summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--CREDITS7
-rw-r--r--Documentation/ABI/stable/sysfs-class-udc93
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-hid11
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-midi12
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-etb1024
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x253
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-funnel12
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio23
-rw-r--r--Documentation/ABI/testing/sysfs-class-net8
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu47
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dell-laptop60
-rw-r--r--Documentation/Changes2
-rw-r--r--Documentation/CodingStyle70
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/alsa-driver-api.tmpl31
-rw-r--r--Documentation/DocBook/crypto-API.tmpl1253
-rw-r--r--Documentation/DocBook/drm.tmpl434
-rw-r--r--Documentation/DocBook/media/dvb/dvbproperty.xml4
-rw-r--r--Documentation/DocBook/media/v4l/biblio.xml85
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml12
-rw-r--r--Documentation/DocBook/media/v4l/dev-subdev.xml109
-rw-r--r--Documentation/DocBook/media/v4l/io.xml5
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml1310
-rw-r--r--Documentation/DocBook/media/v4l/selections-common.xml16
-rw-r--r--Documentation/DocBook/media/v4l/subdev-formats.xml326
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml11
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enuminput.xml8
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enumoutput.xml8
-rw-r--r--Documentation/DocBook/uio-howto.tmpl2
-rw-r--r--Documentation/DocBook/writing-an-alsa-driver.tmpl23
-rw-r--r--Documentation/IPMI.txt74
-rw-r--r--Documentation/arm/memory.txt2
-rw-r--r--Documentation/block/biodoc.txt6
-rw-r--r--Documentation/cgroups/cgroups.txt4
-rw-r--r--Documentation/cgroups/cpusets.txt6
-rw-r--r--Documentation/cgroups/memory.txt8
-rw-r--r--Documentation/crypto/crypto-API-userspace.txt205
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt204
-rw-r--r--Documentation/devicetree/bindings/arm/gic-v3.txt39
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt53
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt11
-rw-r--r--Documentation/devicetree/bindings/ata/marvell.txt6
-rw-r--r--Documentation/devicetree/bindings/ata/sata_rcar.txt11
-rw-r--r--Documentation/devicetree/bindings/btmrvl.txt29
-rw-r--r--Documentation/devicetree/bindings/bus/bcma.txt21
-rw-r--r--Documentation/devicetree/bindings/chosen.txt46
-rw-r--r--Documentation/devicetree/bindings/clock/qoriq-clock.txt14
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,flexgen.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-xdma.txt54
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_bam_dma.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/sun6i-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt)0
-rw-r--r--Documentation/devicetree/bindings/drm/imx/hdmi.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt)0
-rw-r--r--Documentation/devicetree/bindings/drm/imx/ldb.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/ldb.txt)0
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-74xx-mmio.txt30
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mcp23s08.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-vf610.txt55
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt40
-rw-r--r--Documentation/devicetree/bindings/gpio/pl061-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt4
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt2
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt29
-rw-r--r--Documentation/devicetree/bindings/hwrng/atmel-trng.txt16
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-designware.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-img-scb.txt26
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx.txt11
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-meson.txt24
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt14
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt8
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.txt46
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt2
-rw-r--r--Documentation/devicetree/bindings/input/cap11xx.txt (renamed from Documentation/devicetree/bindings/input/cap1106.txt)26
-rw-r--r--Documentation/devicetree/bindings/input/elan_i2c.txt34
-rw-r--r--Documentation/devicetree/bindings/input/elants_i2c.txt33
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt10
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt55
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.txt26
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp8860.txt29
-rw-r--r--Documentation/devicetree/bindings/mailbox/omap-mailbox.txt23
-rw-r--r--Documentation/devicetree/bindings/media/meson-ir.txt14
-rw-r--r--Documentation/devicetree/bindings/media/rcar_vin.txt2
-rw-r--r--Documentation/devicetree/bindings/media/si4713.txt30
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt37
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/bmips.txt8
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt11
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/usb.txt11
-rw-r--r--Documentation/devicetree/bindings/mips/cpu_irq.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-nand.txt6
-rw-r--r--Documentation/devicetree/bindings/mtd/diskonchip.txt15
-rw-r--r--Documentation/devicetree/bindings/mtd/gpio-control-nand.txt14
-rw-r--r--Documentation/devicetree/bindings/mtd/sunxi-nand.txt45
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe.txt12
-rw-r--r--Documentation/devicetree/bindings/net/can/c_can.txt5
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt9
-rw-r--r--Documentation/devicetree/bindings/net/micrel.txt35
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt3
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt1
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b116xw03.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt49
-rw-r--r--Documentation/devicetree/bindings/phy/berlin-sata-phy.txt4
-rw-r--r--Documentation/devicetree/bindings/phy/berlin-usb-phy.txt16
-rw-r--r--Documentation/devicetree/bindings/phy/phy-miphy28lp.txt128
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mvebu.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/samsung-phy.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt96
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt215
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt162
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ste,abx500.txt184
-rw-r--r--Documentation/devicetree/bindings/power_supply/gpio-charger.txt27
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/fman.txt534
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt29
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt30
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-opal.txt16
-rw-r--r--Documentation/devicetree/bindings/serial/bcm63xx-uart.txt30
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-mxs-auart.txt10
-rw-r--r--Documentation/devicetree/bindings/serial/of-serial.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.txt42
-rw-r--r--Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt69
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt9
-rw-r--r--Documentation/devicetree/bindings/serial/sirf-uart.txt16
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/bman-portals.txt56
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/bman.txt125
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/qman-portals.txt154
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/qman.txt165
-rw-r--r--Documentation/devicetree/bindings/sound/arndale.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/eukrea-tlv320.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,esai.txt44
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,spdif.txt37
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-sai.txt66
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt61
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-spdif.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt45
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audmux.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/max98090.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,fsi.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/rt5631.txt48
-rw-r--r--Documentation/devicetree/bindings/sound/rt5677.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt13
-rw-r--r--Documentation/devicetree/bindings/sound/ts3a227e.txt26
-rw-r--r--Documentation/devicetree/bindings/sound/wm8960.txt31
-rw-r--r--Documentation/devicetree/bindings/spi/spi-gpio.txt6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-img-spfi.txt37
-rw-r--r--Documentation/devicetree/bindings/spi/spi-meson.txt22
-rw-r--r--Documentation/devicetree/bindings/spi/spi-samsung.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/armada-thermal.txt8
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.txt68
-rw-r--r--Documentation/devicetree/bindings/thermal/tegra-soctherm.txt53
-rw-r--r--Documentation/devicetree/bindings/unittest.txt14
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt24
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt23
-rw-r--r--Documentation/devicetree/bindings/usb/exynos-usb.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/pxa-usb.txt22
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ohci.txt2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt4
-rw-r--r--Documentation/devicetree/bindings/video/adi,adv7511.txt88
-rw-r--r--Documentation/devicetree/bindings/video/backlight/lp855x.txt2
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dsim.txt1
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-drm.txt19
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-vop.txt58
-rw-r--r--Documentation/devicetree/bindings/video/samsung-fimd.txt1
-rw-r--r--Documentation/devicetree/bindings/video/simple-framebuffer-sunxi.txt33
-rw-r--r--Documentation/devicetree/bindings/video/simple-framebuffer.txt68
-rw-r--r--Documentation/devicetree/of_selftest.txt20
-rw-r--r--Documentation/devicetree/overlay-notes.txt133
-rw-r--r--Documentation/devicetree/todo.txt1
-rw-r--r--Documentation/dmaengine/client.txt (renamed from Documentation/dmaengine.txt)0
-rw-r--r--Documentation/dmaengine/dmatest.txt (renamed from Documentation/dmatest.txt)0
-rw-r--r--Documentation/dmaengine/provider.txt366
-rw-r--r--Documentation/email-clients.txt11
-rw-r--r--Documentation/filesystems/proc.txt2
-rw-r--r--Documentation/filesystems/squashfs.txt8
-rw-r--r--Documentation/gpio/consumer.txt27
-rw-r--r--Documentation/gpio/driver.txt4
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/i2c/upgrading-clients6
-rw-r--r--Documentation/i2c/writing-clients8
-rw-r--r--Documentation/ia64/kvm.txt83
-rw-r--r--Documentation/input/xpad.txt123
-rw-r--r--Documentation/kernel-parameters.txt48
-rw-r--r--Documentation/kobject.txt2
-rw-r--r--Documentation/kselftest.txt (renamed from tools/testing/selftests/README.txt)30
-rw-r--r--Documentation/local_ops.txt13
-rw-r--r--Documentation/mailbox.txt2
-rw-r--r--Documentation/memory-barriers.txt42
-rw-r--r--Documentation/mic/mpssd/Makefile2
-rw-r--r--Documentation/networking/bonding.txt7
-rw-r--r--Documentation/networking/fib_trie.txt4
-rw-r--r--Documentation/networking/ip-sysctl.txt23
-rw-r--r--Documentation/networking/ipvlan.txt107
-rw-r--r--Documentation/networking/ixgbe.txt2
-rw-r--r--Documentation/networking/stmmac.txt132
-rw-r--r--Documentation/networking/switchdev.txt59
-rw-r--r--Documentation/networking/timestamping.txt33
-rw-r--r--Documentation/networking/timestamping/txtimestamp.c90
-rw-r--r--Documentation/phy.txt60
-rw-r--r--Documentation/power/runtime_pm.txt10
-rw-r--r--Documentation/power/suspend-and-interrupts.txt2
-rw-r--r--Documentation/power/userland-swsusp.txt2
-rw-r--r--Documentation/ramoops.txt13
-rw-r--r--Documentation/s390/Debugging390.txt462
-rw-r--r--Documentation/scsi/scsi_eh.txt4
-rw-r--r--Documentation/security/IMA-templates.txt29
-rw-r--r--Documentation/serial/driver6
-rw-r--r--Documentation/sound/alsa/ControlNames.txt32
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt8
-rw-r--r--Documentation/sound/alsa/Procfile.txt17
-rw-r--r--Documentation/sysctl/kernel.txt10
-rw-r--r--Documentation/sysctl/net.txt34
-rw-r--r--Documentation/trace/coresight.txt299
-rw-r--r--Documentation/usb/gadget_configfs.txt2
-rw-r--r--Documentation/usb/gadget_hid.txt7
-rw-r--r--Documentation/usb/usb-serial.txt2
-rw-r--r--Documentation/video4linux/CARDLIST.cx238852
-rw-r--r--Documentation/video4linux/CARDLIST.em28xx1
-rw-r--r--Documentation/video4linux/CARDLIST.saa71341
-rw-r--r--Documentation/video4linux/soc-camera.txt2
-rw-r--r--Documentation/video4linux/vivid.txt15
-rw-r--r--Documentation/virtual/kvm/api.txt102
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt10
-rw-r--r--Documentation/virtual/kvm/msr.txt2
-rw-r--r--Documentation/vm/hugetlbpage.txt4
-rw-r--r--Documentation/vm/page_owner.txt81
-rw-r--r--Documentation/x86/entry_64.txt7
-rw-r--r--MAINTAINERS232
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/barrier.h51
-rw-r--r--arch/alpha/include/uapi/asm/socket.h5
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts18
-rw-r--r--arch/arc/configs/fpga_noramfs_defconfig63
-rw-r--r--arch/arc/configs/nsim_700_defconfig (renamed from arch/arc/configs/fpga_defconfig)0
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/irqflags.h9
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Kconfig.debug63
-rw-r--r--arch/arm/boot/dts/am4372.dtsi4
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts15
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi11
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi14
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi14
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi21
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi14
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi53
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts9
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi21
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi2
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts4
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos4x12.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi3
-rw-r--r--arch/arm/boot/dts/hip04.dtsi717
-rw-r--r--arch/arm/boot/dts/meson.dtsi7
-rw-r--r--arch/arm/boot/dts/omap2430-sdp.dts32
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts28
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts28
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi75
-rw-r--r--arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi64
-rw-r--r--arch/arm/boot/dts/ste-href-ab8500.dtsi162
-rw-r--r--arch/arm/boot/dts/ste-href-ab8505.dtsi90
-rw-r--r--arch/arm/boot/dts/ste-href-family-pinctrl.dtsi230
-rw-r--r--arch/arm/boot/dts/ste-href-stuib.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618.dtsi12
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60.dtsi20
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi42
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts10
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi30
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts36
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi23
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts44
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi66
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi25
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts199
-rw-r--r--arch/arm/common/sa1111.c14
-rw-r--r--arch/arm/configs/davinci_all_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/configs/shmobile_defconfig3
-rw-r--r--arch/arm/crypto/aes_glue.c4
-rw-r--r--arch/arm/crypto/sha1_glue.c2
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c2
-rw-r--r--arch/arm/crypto/sha512_neon_glue.c6
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/barrier.h4
-rw-r--r--arch/arm/include/asm/cacheflush.h10
-rw-r--r--arch/arm/include/asm/device.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h14
-rw-r--r--arch/arm/include/asm/fixmap.h31
-rw-r--r--arch/arm/include/asm/hardware/coresight.h157
-rw-r--r--arch/arm/include/asm/hardware/cp14.h542
-rw-r--r--arch/arm/include/asm/hw_irq.h1
-rw-r--r--arch/arm/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm/include/asm/kvm_host.h2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm/include/asm/mcpm.h17
-rw-r--r--arch/arm/include/asm/percpu.h4
-rw-r--r--arch/arm/include/asm/pgalloc.h10
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pgtable.h62
-rw-r--r--arch/arm/include/asm/ptrace.h5
-rw-r--r--arch/arm/include/asm/thread_info.h9
-rw-r--r--arch/arm/include/asm/vfp.h5
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h66
-rw-r--r--arch/arm/include/asm/xen/page.h4
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/atags_compat.c6
-rw-r--r--arch/arm/kernel/atags_parse.c5
-rw-r--r--arch/arm/kernel/atags_proc.c4
-rw-r--r--arch/arm/kernel/bios32.c2
-rw-r--r--arch/arm/kernel/dma-isa.c4
-rw-r--r--arch/arm/kernel/dma.c26
-rw-r--r--arch/arm/kernel/entry-common.S235
-rw-r--r--arch/arm/kernel/entry-ftrace.S243
-rw-r--r--arch/arm/kernel/etm.c654
-rw-r--r--arch/arm/kernel/fiq.c2
-rw-r--r--arch/arm/kernel/ftrace.c19
-rw-r--r--arch/arm/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm/kernel/io.c5
-rw-r--r--arch/arm/kernel/irq.c8
-rw-r--r--arch/arm/kernel/iwmmxt.S13
-rw-r--r--arch/arm/kernel/jump_label.c2
-rw-r--r--arch/arm/kernel/kgdb.c29
-rw-r--r--arch/arm/kernel/machine_kexec.c15
-rw-r--r--arch/arm/kernel/module.c2
-rw-r--r--arch/arm/kernel/patch.c92
-rw-r--r--arch/arm/kernel/patch.h12
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/kernel/return_address.c3
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/signal.c1
-rw-r--r--arch/arm/kernel/smp.c15
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/kernel/stacktrace.c4
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c4
-rw-r--r--arch/arm/kernel/thumbee.c2
-rw-r--r--arch/arm/kernel/topology.c4
-rw-r--r--arch/arm/kernel/traps.c42
-rw-r--r--arch/arm/kernel/unwind.c3
-rw-r--r--arch/arm/kernel/vmlinux.lds.S19
-rw-r--r--arch/arm/kernel/xscale-cp0.c7
-rw-r--r--arch/arm/kvm/arm.c78
-rw-r--r--arch/arm/kvm/guest.c26
-rw-r--r--arch/arm/kvm/mmio.c15
-rw-r--r--arch/arm/kvm/mmu.c92
-rw-r--r--arch/arm/kvm/psci.c18
-rw-r--r--arch/arm/lib/copy_from_user.S5
-rw-r--r--arch/arm/lib/copy_template.S30
-rw-r--r--arch/arm/lib/copy_to_user.S5
-rw-r--r--arch/arm/lib/memcpy.S5
-rw-r--r--arch/arm/lib/memmove.S28
-rw-r--r--arch/arm/lib/memset.S12
-rw-r--r--arch/arm/lib/memzero.S12
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c4
-rw-r--r--arch/arm/mach-davinci/cpuidle.c1
-rw-r--r--arch/arm/mach-davinci/dm355.c7
-rw-r--r--arch/arm/mach-davinci/dm365.c7
-rw-r--r--arch/arm/mach-davinci/pm.c1
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-imx/mmdc.c1
-rw-r--r--arch/arm/mach-mmp/gplugd.c1
-rw-r--r--arch/arm/mach-msm/clock-pcom.c1
-rw-r--r--arch/arm/mach-msm/smd.c1
-rw-r--r--arch/arm/mach-omap2/Kconfig8
-rw-r--r--arch/arm/mach-omap2/Makefile1
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c71
-rw-r--r--arch/arm/mach-omap2/clock.h2
-rw-r--r--arch/arm/mach-omap2/devices.c31
-rw-r--r--arch/arm/mach-omap2/emu.c50
-rw-r--r--arch/arm/mach-omap2/id.c8
-rw-r--r--arch/arm/mach-omap2/soc.h1
-rw-r--r--arch/arm/mach-omap2/twl-common.c12
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-prima2/rstc.c1
-rw-r--r--arch/arm/mach-prima2/rtciobrg.c1
-rw-r--r--arch/arm/mach-pxa/pxa3xx-ulpi.c1
-rw-r--r--arch/arm/mach-pxa/spitz.c9
-rw-r--r--arch/arm/mach-pxa/tosa-bt.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-osiris-dvs.c1
-rw-r--r--arch/arm/mach-sa1100/clock.c43
-rw-r--r--arch/arm/mach-sa1100/collie.c55
-rw-r--r--arch/arm/mach-sa1100/include/mach/entry-macro.S41
-rw-r--r--arch/arm/mach-sa1100/include/mach/irqs.h102
-rw-r--r--arch/arm/mach-sa1100/irq.c229
-rw-r--r--arch/arm/mach-sa1100/neponset.c1
-rw-r--r--arch/arm/mach-shmobile/board-lager.c58
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c2
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c58
-rw-r--r--arch/arm/mach-u300/regulator.c1
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/alignment.c10
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c6
-rw-r--r--arch/arm/mm/cache-tauros2.c12
-rw-r--r--arch/arm/mm/context.c58
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/dma-mapping.c84
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/fault.c31
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/highmem.c15
-rw-r--r--arch/arm/mm/init.c153
-rw-r--r--arch/arm/mm/mmu.c127
-rw-r--r--arch/arm/mm/pageattr.c91
-rw-r--r--arch/arm/mm/proc-v7.S5
-rw-r--r--arch/arm/nwfpe/fpmodule.c8
-rw-r--r--arch/arm/plat-pxa/ssp.c1
-rw-r--r--arch/arm/plat-samsung/adc.c1
-rw-r--r--arch/arm/vfp/vfphw.S6
-rw-r--r--arch/arm/vfp/vfpmodule.c102
-rw-r--r--arch/arm/vfp/vfpsingle.c2
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/enlighten.c5
-rw-r--r--arch/arm/xen/mm.c121
-rw-r--r--arch/arm/xen/mm32.c202
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/crypto/Kconfig4
-rw-r--r--arch/arm64/crypto/Makefile4
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c2
-rw-r--r--arch/arm64/crypto/aes-glue.c8
-rw-r--r--arch/arm64/crypto/crc32-arm64.c274
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/barrier.h3
-rw-r--r--arch/arm64/include/asm/device.h1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h44
-rw-r--r--arch/arm64/kernel/psci.c6
-rw-r--r--arch/arm64/kvm/guest.c26
-rw-r--r--arch/arm64/mm/dump.c5
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/uapi/asm/socket.h5
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/barrier.h51
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h7
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/uapi/asm/socket.h5
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/frv/include/uapi/asm/socket.h5
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/Makefile1
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/barrier.h25
-rw-r--r--arch/ia64/include/asm/kvm_host.h609
-rw-r--r--arch/ia64/include/asm/pvclock-abi.h48
-rw-r--r--arch/ia64/include/uapi/asm/kvm.h268
-rw-r--r--arch/ia64/include/uapi/asm/socket.h5
-rw-r--r--arch/ia64/kernel/perfmon.c10
-rw-r--r--arch/ia64/kvm/Kconfig66
-rw-r--r--arch/ia64/kvm/Makefile67
-rw-r--r--arch/ia64/kvm/asm-offsets.c241
-rw-r--r--arch/ia64/kvm/irq.h33
-rw-r--r--arch/ia64/kvm/kvm-ia64.c1942
-rw-r--r--arch/ia64/kvm/kvm_fw.c674
-rw-r--r--arch/ia64/kvm/kvm_lib.c21
-rw-r--r--arch/ia64/kvm/kvm_minstate.h266
-rw-r--r--arch/ia64/kvm/lapic.h30
-rw-r--r--arch/ia64/kvm/memcpy.S1
-rw-r--r--arch/ia64/kvm/memset.S1
-rw-r--r--arch/ia64/kvm/misc.h94
-rw-r--r--arch/ia64/kvm/mmio.c336
-rw-r--r--arch/ia64/kvm/optvfault.S1090
-rw-r--r--arch/ia64/kvm/process.c1024
-rw-r--r--arch/ia64/kvm/trampoline.S1038
-rw-r--r--arch/ia64/kvm/vcpu.c2209
-rw-r--r--arch/ia64/kvm/vcpu.h752
-rw-r--r--arch/ia64/kvm/vmm.c99
-rw-r--r--arch/ia64/kvm/vmm_ivt.S1392
-rw-r--r--arch/ia64/kvm/vti.h290
-rw-r--r--arch/ia64/kvm/vtlb.c640
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/uapi/asm/socket.h5
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/barrier.h19
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/pgtable.h1
-rw-r--r--arch/microblaze/kernel/dma.c27
-rw-r--r--arch/microblaze/mm/consistent.c25
-rw-r--r--arch/mips/Kbuild.platforms2
-rw-r--r--arch/mips/Kconfig98
-rw-r--r--arch/mips/Kconfig.debug13
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/alchemy/common/clock.c7
-rw-r--r--arch/mips/alchemy/common/setup.c6
-rw-r--r--arch/mips/ar7/platform.c24
-rw-r--r--arch/mips/ath25/Kconfig16
-rw-r--r--arch/mips/ath25/Makefile16
-rw-r--r--arch/mips/ath25/Platform6
-rw-r--r--arch/mips/ath25/ar2315.c364
-rw-r--r--arch/mips/ath25/ar2315.h22
-rw-r--r--arch/mips/ath25/ar2315_regs.h410
-rw-r--r--arch/mips/ath25/ar5312.c393
-rw-r--r--arch/mips/ath25/ar5312.h22
-rw-r--r--arch/mips/ath25/ar5312_regs.h224
-rw-r--r--arch/mips/ath25/board.c234
-rw-r--r--arch/mips/ath25/devices.c125
-rw-r--r--arch/mips/ath25/devices.h43
-rw-r--r--arch/mips/ath25/early_printk.c44
-rw-r--r--arch/mips/ath25/prom.c26
-rw-r--r--arch/mips/ath79/irq.c1
-rw-r--r--arch/mips/ath79/prom.c38
-rw-r--r--arch/mips/ath79/setup.c5
-rw-r--r--arch/mips/bcm3384/Makefile1
-rw-r--r--arch/mips/bcm3384/Platform7
-rw-r--r--arch/mips/bcm3384/dma.c81
-rw-r--r--arch/mips/bcm3384/irq.c193
-rw-r--r--arch/mips/bcm3384/setup.c97
-rw-r--r--arch/mips/bcm47xx/bcm47xx_private.h6
-rw-r--r--arch/mips/bcm47xx/irq.c8
-rw-r--r--arch/mips/bcm47xx/nvram.c155
-rw-r--r--arch/mips/bcm47xx/setup.c91
-rw-r--r--arch/mips/bcm47xx/sprom.c82
-rw-r--r--arch/mips/bcm63xx/cpu.c2
-rw-r--r--arch/mips/boot/dts/Makefile1
-rw-r--r--arch/mips/boot/dts/bcm3384.dtsi109
-rw-r--r--arch/mips/boot/dts/bcm93384wvg.dts32
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c4
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c49
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c274
-rw-r--r--arch/mips/cavium-octeon/setup.c4
-rw-r--r--arch/mips/configs/bcm3384_defconfig78
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig3
-rw-r--r--arch/mips/fw/lib/cmdline.c8
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/atomic.h374
-rw-r--r--arch/mips/include/asm/barrier.h61
-rw-r--r--arch/mips/include/asm/bitops.h35
-rw-r--r--arch/mips/include/asm/bmips.h1
-rw-r--r--arch/mips/include/asm/bootinfo.h13
-rw-r--r--arch/mips/include/asm/clock.h3
-rw-r--r--arch/mips/include/asm/cmpxchg.h27
-rw-r--r--arch/mips/include/asm/compiler.h8
-rw-r--r--arch/mips/include/asm/cpu-features.h4
-rw-r--r--arch/mips/include/asm/cpu.h2
-rw-r--r--arch/mips/include/asm/edac.h6
-rw-r--r--arch/mips/include/asm/elf.h74
-rw-r--r--arch/mips/include/asm/fpu.h49
-rw-r--r--arch/mips/include/asm/futex.h27
-rw-r--r--arch/mips/include/asm/hpet.h73
-rw-r--r--arch/mips/include/asm/io.h8
-rw-r--r--arch/mips/include/asm/irq.h3
-rw-r--r--arch/mips/include/asm/irq_cpu.h4
-rw-r--r--arch/mips/include/asm/mach-ath25/ath25_platform.h73
-rw-r--r--arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h64
-rw-r--r--arch/mips/include/asm/mach-ath25/dma-coherence.h82
-rw-r--r--arch/mips/include/asm/mach-ath25/gpio.h16
-rw-r--r--arch/mips/include/asm/mach-ath25/war.h25
-rw-r--r--arch/mips/include/asm/mach-au1x00/ioremap.h10
-rw-r--r--arch/mips/include/asm/mach-bcm3384/dma-coherence.h48
-rw-r--r--arch/mips/include/asm/mach-bcm3384/war.h24
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h36
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/ioremap.h6
-rw-r--r--arch/mips/include/asm/mach-generic/ioremap.h4
-rw-r--r--arch/mips/include/asm/mach-generic/irq.h6
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/boot_param.h49
-rw-r--r--arch/mips/include/asm/mach-loongson/dma-coherence.h6
-rw-r--r--arch/mips/include/asm/mach-loongson/irq.h3
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson_hwmon.h55
-rw-r--r--arch/mips/include/asm/mach-loongson/machine.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/topology.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/workarounds.h7
-rw-r--r--arch/mips/include/asm/mach-loongson1/cpufreq.h23
-rw-r--r--arch/mips/include/asm/mach-loongson1/loongson1.h8
-rw-r--r--arch/mips/include/asm/mach-loongson1/platform.h10
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-clk.h23
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-mux.h67
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-pwm.h29
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-wdt.h11
-rw-r--r--arch/mips/include/asm/mach-malta/irq.h1
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h25
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h64
-rw-r--r--arch/mips/include/asm/mach-ralink/pinmux.h55
-rw-r--r--arch/mips/include/asm/mach-ralink/ralink_regs.h7
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h35
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883.h16
-rw-r--r--arch/mips/include/asm/mach-sead3/irq.h1
-rw-r--r--arch/mips/include/asm/mach-tx39xx/ioremap.h4
-rw-r--r--arch/mips/include/asm/mach-tx49xx/ioremap.h4
-rw-r--r--arch/mips/include/asm/mips-boards/maltaint.h24
-rw-r--r--arch/mips/include/asm/mips-boards/sead3int.h15
-rw-r--r--arch/mips/include/asm/mips-cm.h2
-rw-r--r--arch/mips/include/asm/mips-cpc.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h43
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h69
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h63
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h52
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h3
-rw-r--r--arch/mips/include/asm/paccess.h2
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/pci.h2
-rw-r--r--arch/mips/include/asm/pgtable-32.h104
-rw-r--r--arch/mips/include/asm/pgtable-bits.h36
-rw-r--r--arch/mips/include/asm/pgtable.h18
-rw-r--r--arch/mips/include/asm/prom.h1
-rw-r--r--arch/mips/include/asm/r4kcache.h59
-rw-r--r--arch/mips/include/asm/spinlock.h50
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/time.h6
-rw-r--r--arch/mips/include/asm/types.h18
-rw-r--r--arch/mips/include/asm/uaccess.h27
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h7
-rw-r--r--arch/mips/include/uapi/asm/socket.h5
-rw-r--r--arch/mips/jz4740/setup.c2
-rw-r--r--arch/mips/kernel/Makefile10
-rw-r--r--arch/mips/kernel/cevt-gic.c105
-rw-r--r--arch/mips/kernel/cevt-r4k.c6
-rw-r--r--arch/mips/kernel/cpu-probe.c71
-rw-r--r--arch/mips/kernel/crash_dump.c4
-rw-r--r--arch/mips/kernel/csrc-gic.c40
-rw-r--r--arch/mips/kernel/elf.c191
-rw-r--r--arch/mips/kernel/i8259.c24
-rw-r--r--arch/mips/kernel/irq-gic.c402
-rw-r--r--arch/mips/kernel/irq_cpu.c48
-rw-r--r--arch/mips/kernel/mips-cm.c12
-rw-r--r--arch/mips/kernel/mips-cpc.c4
-rw-r--r--arch/mips/kernel/mips_ksyms.c4
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c30
-rw-r--r--arch/mips/kernel/process.c54
-rw-r--r--arch/mips/kernel/prom.c18
-rw-r--r--arch/mips/kernel/setup.c14
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c114
-rw-r--r--arch/mips/kernel/smp-cmp.c2
-rw-r--r--arch/mips/kernel/smp-cps.c6
-rw-r--r--arch/mips/kernel/smp-gic.c2
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/kernel/traps.c66
-rw-r--r--arch/mips/kernel/vdso.c15
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c11
-rw-r--r--arch/mips/lantiq/irq.c56
-rw-r--r--arch/mips/lantiq/prom.c18
-rw-r--r--arch/mips/lantiq/xway/Makefile2
-rw-r--r--arch/mips/lantiq/xway/dcdc.c1
-rw-r--r--arch/mips/lantiq/xway/dma.c1
-rw-r--r--arch/mips/lantiq/xway/gptu.c1
-rw-r--r--arch/mips/lantiq/xway/reset.c70
-rw-r--r--arch/mips/lantiq/xway/vmmc.c69
-rw-r--r--arch/mips/lantiq/xway/xrx200_phy_fw.c24
-rw-r--r--arch/mips/lib/iomap.c18
-rw-r--r--arch/mips/lib/memset.S6
-rw-r--r--arch/mips/lib/mips-atomic.c20
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c11
-rw-r--r--arch/mips/lib/strlen_user.S3
-rw-r--r--arch/mips/loongson/Kconfig17
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_pci.c25
-rw-r--r--arch/mips/loongson/common/dma-swiotlb.c14
-rw-r--r--arch/mips/loongson/common/early_printk.c2
-rw-r--r--arch/mips/loongson/common/env.c28
-rw-r--r--arch/mips/loongson/common/gpio.c2
-rw-r--r--arch/mips/loongson/common/init.c1
-rw-r--r--arch/mips/loongson/common/machtype.c23
-rw-r--r--arch/mips/loongson/common/rtc.c2
-rw-r--r--arch/mips/loongson/common/serial.c66
-rw-r--r--arch/mips/loongson/common/setup.c1
-rw-r--r--arch/mips/loongson/common/time.c5
-rw-r--r--arch/mips/loongson/common/uart_base.c30
-rw-r--r--arch/mips/loongson/lemote-2f/irq.c4
-rw-r--r--arch/mips/loongson/lemote-2f/reset.c2
-rw-r--r--arch/mips/loongson/loongson-3/Makefile4
-rw-r--r--arch/mips/loongson/loongson-3/hpet.c257
-rw-r--r--arch/mips/loongson/loongson-3/irq.c16
-rw-r--r--arch/mips/loongson/loongson-3/numa.c12
-rw-r--r--arch/mips/loongson/loongson-3/platform.c43
-rw-r--r--arch/mips/loongson/loongson-3/smp.c70
-rw-r--r--arch/mips/loongson1/Kconfig42
-rw-r--r--arch/mips/loongson1/common/Makefile2
-rw-r--r--arch/mips/loongson1/common/clock.c28
-rw-r--r--arch/mips/loongson1/common/platform.c141
-rw-r--r--arch/mips/loongson1/common/prom.c30
-rw-r--r--arch/mips/loongson1/common/reset.c20
-rw-r--r--arch/mips/loongson1/common/time.c226
-rw-r--r--arch/mips/loongson1/ls1b/board.c12
-rw-r--r--arch/mips/math-emu/cp1emu.c9
-rw-r--r--arch/mips/math-emu/ieee754dp.c2
-rw-r--r--arch/mips/math-emu/ieee754sp.c2
-rw-r--r--arch/mips/mm/Makefile10
-rw-r--r--arch/mips/mm/c-r4k.c43
-rw-r--r--arch/mips/mm/dma-default.c5
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/ioremap.c18
-rw-r--r--arch/mips/mm/sc-r5k.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c18
-rw-r--r--arch/mips/mm/uasm-mips.c2
-rw-r--r--arch/mips/mm/uasm.c14
-rw-r--r--arch/mips/mti-malta/malta-init.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c327
-rw-r--r--arch/mips/mti-malta/malta-time.c51
-rw-r--r--arch/mips/mti-sead3/leds-sead3.c1
-rw-r--r--arch/mips/mti-sead3/sead3-ehci.c8
-rw-r--r--arch/mips/mti-sead3/sead3-i2c-drv.c1
-rw-r--r--arch/mips/mti-sead3/sead3-int.c131
-rw-r--r--arch/mips/mti-sead3/sead3-net.c14
-rw-r--r--arch/mips/mti-sead3/sead3-platform.c18
-rw-r--r--arch/mips/mti-sead3/sead3-serial.c45
-rw-r--r--arch/mips/mti-sead3/sead3-time.c35
-rw-r--r--arch/mips/net/bpf_jit.c4
-rw-r--r--arch/mips/oprofile/Makefile1
-rw-r--r--arch/mips/oprofile/backtrace.c5
-rw-r--r--arch/mips/oprofile/common.c11
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c220
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c18
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/ops-bcm63xx.c2
-rw-r--r--arch/mips/pci/ops-nile4.c12
-rw-r--r--arch/mips/pci/ops-pmcmsp.c12
-rw-r--r--arch/mips/pci/pci-alchemy.c1
-rw-r--r--arch/mips/pci/pci-ar2315.c511
-rw-r--r--arch/mips/pci/pci-ar71xx.c14
-rw-r--r--arch/mips/pci/pci-ar724x.c24
-rw-r--r--arch/mips/pci/pci-lantiq.c1
-rw-r--r--arch/mips/pci/pci-octeon.c2
-rw-r--r--arch/mips/pci/pci-rt2880.c285
-rw-r--r--arch/mips/pci/pci-rt3883.c10
-rw-r--r--arch/mips/pci/pci-tx4939.c2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_prom.c2
-rw-r--r--arch/mips/ralink/Kconfig3
-rw-r--r--arch/mips/ralink/Makefile4
-rw-r--r--arch/mips/ralink/bootrom.c48
-rw-r--r--arch/mips/ralink/clk.c6
-rw-r--r--arch/mips/ralink/common.h19
-rw-r--r--arch/mips/ralink/early_printk.c45
-rw-r--r--arch/mips/ralink/ill_acc.c87
-rw-r--r--arch/mips/ralink/irq.c45
-rw-r--r--arch/mips/ralink/mt7620.c465
-rw-r--r--arch/mips/ralink/of.c32
-rw-r--r--arch/mips/ralink/prom.c1
-rw-r--r--arch/mips/ralink/rt288x.c65
-rw-r--r--arch/mips/ralink/rt305x.c153
-rw-r--r--arch/mips/ralink/rt3883.c174
-rw-r--r--arch/mips/ralink/timer.c1
-rw-r--r--arch/mips/rb532/gpio.c2
-rw-r--r--arch/mips/rb532/prom.c8
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c6
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c5
-rw-r--r--arch/mips/sibyte/common/cfe.c8
-rw-r--r--arch/mips/sibyte/swarm/platform.c2
-rw-r--r--arch/mips/sibyte/swarm/rtc_m41t81.c4
-rw-r--r--arch/mips/sibyte/swarm/rtc_xicor1241.c4
-rw-r--r--arch/mips/sibyte/swarm/setup.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4927.c4
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c4
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c4
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c1
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/uapi/asm/socket.h5
-rw-r--r--arch/nios2/Makefile2
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/io.h2
-rw-r--r--arch/nios2/include/asm/uaccess.h4
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/uapi/asm/socket.h5
-rw-r--r--arch/parisc/lib/fixup.S4
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/boot/dts/b4860emu.dts4
-rw-r--r--arch/powerpc/boot/dts/b4qds.dtsi23
-rw-r--r--arch/powerpc/boot/dts/bsc9131rdb.dtsi50
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-post.dtsi28
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi28
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi85
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi68
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi30
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi29
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi29
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts20
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts20
-rw-r--r--arch/powerpc/boot/dts/p5040ds.dts20
-rw-r--r--arch/powerpc/boot/dts/t104xrdb.dtsi7
-rw-r--r--arch/powerpc/boot/dts/t208xqds.dtsi11
-rw-r--r--arch/powerpc/boot/dts/t4240emu.dts4
-rw-r--r--arch/powerpc/boot/main.c15
-rw-r--r--arch/powerpc/boot/ops.h2
-rw-r--r--arch/powerpc/boot/serial.c6
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_ppc9a_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc310_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/sbc8641d_defconfig1
-rw-r--r--arch/powerpc/configs/c2k_defconfig1
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_le_defconfig1
-rw-r--r--arch/powerpc/crypto/sha1.c4
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/barrier.h19
-rw-r--r--arch/powerpc/include/asm/bitops.h6
-rw-r--r--arch/powerpc/include/asm/cputable.h10
-rw-r--r--arch/powerpc/include/asm/eeh.h2
-rw-r--r--arch/powerpc/include/asm/elf.h3
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h5
-rw-r--r--arch/powerpc/include/asm/hardirq.h7
-rw-r--r--arch/powerpc/include/asm/hugetlb.h8
-rw-r--r--arch/powerpc/include/asm/io.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h18
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h19
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h2
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h22
-rw-r--r--arch/powerpc/include/asm/opal.h122
-rw-r--r--arch/powerpc/include/asm/paca.h7
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h20
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-4k.h16
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h3
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h52
-rw-r--r--arch/powerpc/include/asm/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h7
-rw-r--r--arch/powerpc/include/asm/setup.h3
-rw-r--r--arch/powerpc/include/asm/thread_info.h5
-rw-r--r--arch/powerpc/include/asm/tlbflush.h10
-rw-r--r--arch/powerpc/include/asm/vga.h4
-rw-r--r--arch/powerpc/include/asm/xics.h8
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h5
-rw-r--r--arch/powerpc/kernel/align.c2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c9
-rw-r--r--arch/powerpc/kernel/crash_dump.c1
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/eeh.c41
-rw-r--r--arch/powerpc/kernel/eeh_driver.c10
-rw-r--r--arch/powerpc/kernel/entry_32.S12
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S34
-rw-r--r--arch/powerpc/kernel/ftrace.c73
-rw-r--r--arch/powerpc/kernel/head_8xx.S230
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c6
-rw-r--r--arch/powerpc/kernel/idle_power7.S12
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/mce.c24
-rw-r--r--arch/powerpc/kernel/mce_power.c4
-rw-r--r--arch/powerpc/kernel/of_platform.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/pci_32.c4
-rw-r--r--arch/powerpc/kernel/pci_64.c1
-rw-r--r--arch/powerpc/kernel/process.c36
-rw-r--r--arch/powerpc/kernel/prom.c11
-rw-r--r--arch/powerpc/kernel/rtas-proc.c20
-rw-r--r--arch/powerpc/kernel/rtas.c4
-rw-r--r--arch/powerpc/kernel/rtas_pci.c1
-rw-r--r--arch/powerpc/kernel/setup-common.c6
-rw-r--r--arch/powerpc/kernel/setup_32.c11
-rw-r--r--arch/powerpc/kernel/setup_64.c35
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/time.c23
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/kernel/udbg_16550.c6
-rw-r--r--arch/powerpc/kernel/vdso.c1
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c8
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c224
-rw-r--r--arch/powerpc/kvm/book3s_hv.c438
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c139
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S39
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c150
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c36
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S305
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c8
-rw-r--r--arch/powerpc/kvm/book3s_pr.c5
-rw-r--r--arch/powerpc/kvm/book3s_xics.c30
-rw-r--r--arch/powerpc/kvm/book3s_xics.h1
-rw-r--r--arch/powerpc/kvm/e500.c22
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c6
-rw-r--r--arch/powerpc/kvm/e500mc.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c10
-rw-r--r--arch/powerpc/kvm/trace_book3s.h32
-rw-r--r--arch/powerpc/kvm/trace_booke.h47
-rw-r--r--arch/powerpc/kvm/trace_hv.h477
-rw-r--r--arch/powerpc/kvm/trace_pr.h25
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/alloc.c4
-rw-r--r--arch/powerpc/lib/copyuser_power7.S2
-rw-r--r--arch/powerpc/lib/devres.c43
-rw-r--r--arch/powerpc/lib/memcpy_power7.S2
-rw-r--r--arch/powerpc/lib/sstep.c6
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/mm/gup.c235
-rw-r--r--arch/powerpc/mm/hash_low_64.S19
-rw-r--r--arch/powerpc/mm/hash_native_64.c41
-rw-r--r--arch/powerpc/mm/hash_utils_64.c116
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c60
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c51
-rw-r--r--arch/powerpc/mm/init_32.c10
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c77
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c8
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c227
-rw-r--r--arch/powerpc/mm/pgtable_32.c5
-rw-r--r--arch/powerpc/mm/pgtable_64.c104
-rw-r--r--arch/powerpc/net/bpf_jit.h7
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c49
-rw-r--r--arch/powerpc/oprofile/backtrace.c6
-rw-r--r--arch/powerpc/perf/core-book3s.c22
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c6
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c9
-rw-r--r--arch/powerpc/platforms/52xx/efika.c3
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c1
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c1
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c8
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c2
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c5
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c1
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c4
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c6
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c1
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c1
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c4
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c6
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c3
-rw-r--r--arch/powerpc/platforms/maple/pci.c1
-rw-r--r--arch/powerpc/platforms/maple/setup.c4
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c1
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c6
-rw-r--r--arch/powerpc/platforms/powermac/pci.c1
-rw-r--r--arch/powerpc/platforms/powermac/setup.c3
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c16
-rw-r--r--arch/powerpc/platforms/powernv/opal-async.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c65
-rw-r--r--arch/powerpc/platforms/powernv/opal-tracepoints.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S6
-rw-r--r--arch/powerpc/platforms/powernv/opal.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c217
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c44
-rw-r--r--arch/powerpc/platforms/powernv/pci.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c6
-rw-r--r--arch/powerpc/platforms/powernv/smp.c23
-rw-r--r--arch/powerpc/platforms/ps3/htab.c2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c2
-rw-r--r--arch/powerpc/platforms/ps3/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c7
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c36
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S4
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c16
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c10
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c70
-rw-r--r--arch/powerpc/sysdev/axonram.c1
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c1
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c3
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c1
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c105
-rw-r--r--arch/powerpc/sysdev/fsl_rio.h13
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c5
-rw-r--r--arch/powerpc/sysdev/ipic.c1
-rw-r--r--arch/powerpc/sysdev/mpc5xxx_clocks.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c1
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c1
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c1
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c1
-rw-r--r--arch/powerpc/sysdev/pmi.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_cpm.c8
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c2
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c1
-rw-r--r--arch/powerpc/sysdev/uic.c1
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/xmon/xmon.c82
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/s390/crypto/des_s390.c4
-rw-r--r--arch/s390/crypto/ghash_s390.c2
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c4
-rw-r--r--arch/s390/crypto/sha512_s390.c4
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/barrier.h7
-rw-r--r--arch/s390/include/asm/cmpxchg.h240
-rw-r--r--arch/s390/include/asm/cputime.h46
-rw-r--r--arch/s390/include/asm/debug.h29
-rw-r--r--arch/s390/include/asm/ftrace.h54
-rw-r--r--arch/s390/include/asm/idle.h3
-rw-r--r--arch/s390/include/asm/io.h9
-rw-r--r--arch/s390/include/asm/irq.h11
-rw-r--r--arch/s390/include/asm/kprobes.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h99
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pci_io.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/pgtable.h33
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/sigp.h1
-rw-r--r--arch/s390/include/asm/spinlock.h9
-rw-r--r--arch/s390/include/asm/tlb.h1
-rw-r--r--arch/s390/include/uapi/asm/socket.h5
-rw-r--r--arch/s390/include/uapi/asm/unistd.h4
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.c2
-rw-r--r--arch/s390/kernel/debug.c12
-rw-r--r--arch/s390/kernel/dumpstack.c3
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S424
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/entry64.S372
-rw-r--r--arch/s390/kernel/ftrace.c136
-rw-r--r--arch/s390/kernel/idle.c29
-rw-r--r--arch/s390/kernel/irq.c5
-rw-r--r--arch/s390/kernel/kprobes.c178
-rw-r--r--arch/s390/kernel/mcount.S1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c1
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/ptrace.c115
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/traps.c25
-rw-r--r--arch/s390/kvm/gaccess.c40
-rw-r--r--arch/s390/kvm/intercept.c20
-rw-r--r--arch/s390/kvm/interrupt.c1044
-rw-r--r--arch/s390/kvm/kvm-s390.c24
-rw-r--r--arch/s390/kvm/kvm-s390.h11
-rw-r--r--arch/s390/kvm/priv.c112
-rw-r--r--arch/s390/kvm/sigp.c305
-rw-r--r--arch/s390/mm/fault.c10
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c226
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci.c9
-rw-r--r--arch/s390/pci/pci_clp.c1
-rw-r--r--arch/s390/pci/pci_debug.c7
-rw-r--r--arch/s390/pci/pci_mmio.c115
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c2
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sparc/crypto/aes_glue.c2
-rw-r--r--arch/sparc/crypto/camellia_glue.c2
-rw-r--r--arch/sparc/crypto/crc32c_glue.c2
-rw-r--r--arch/sparc/crypto/des_glue.c2
-rw-r--r--arch/sparc/crypto/md5_glue.c2
-rw-r--r--arch/sparc/crypto/sha1_glue.c2
-rw-r--r--arch/sparc/crypto/sha256_glue.c6
-rw-r--r--arch/sparc/crypto/sha512_glue.c6
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/barrier_64.h7
-rw-r--r--arch/sparc/include/asm/ldc.h1
-rw-r--r--arch/sparc/include/asm/parport.h1
-rw-r--r--arch/sparc/include/asm/vio.h34
-rw-r--r--arch/sparc/include/uapi/asm/socket.h5
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/apc.c1
-rw-r--r--arch/sparc/kernel/auxio_64.c1
-rw-r--r--arch/sparc/kernel/central.c2
-rw-r--r--arch/sparc/kernel/chmc.c1
-rw-r--r--arch/sparc/kernel/ldc.c12
-rw-r--r--arch/sparc/kernel/leon_pci_grpci1.c1
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c1
-rw-r--r--arch/sparc/kernel/leon_smp.c2
-rw-r--r--arch/sparc/kernel/pci_fire.c1
-rw-r--r--arch/sparc/kernel/pci_psycho.c1
-rw-r--r--arch/sparc/kernel/pci_sabre.c1
-rw-r--r--arch/sparc/kernel/pci_schizo.c1
-rw-r--r--arch/sparc/kernel/pci_sun4v.c1
-rw-r--r--arch/sparc/kernel/pmc.c1
-rw-r--r--arch/sparc/kernel/power.c1
-rw-r--r--arch/sparc/kernel/syscalls.S10
-rw-r--r--arch/sparc/kernel/systbls_32.S1
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/sparc/kernel/time_32.c1
-rw-r--r--arch/sparc/kernel/time_64.c3
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c11
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/tile/gxio/mpipe.c4
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/io.h5
-rw-r--r--arch/tile/include/asm/pgtable.h4
-rw-r--r--arch/tile/include/asm/pgtable_64.h2
-rw-r--r--arch/tile/include/uapi/asm/ptrace.h16
-rw-r--r--arch/tile/include/uapi/asm/sigcontext.h14
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/irq.c5
-rw-r--r--arch/tile/kernel/kgdb.c6
-rw-r--r--arch/tile/kernel/kprobes.c3
-rw-r--r--arch/tile/kernel/machine_kexec.c28
-rw-r--r--arch/tile/kernel/messaging.c5
-rw-r--r--arch/tile/kernel/module.c12
-rw-r--r--arch/tile/kernel/pci.c7
-rw-r--r--arch/tile/kernel/pci_gx.c95
-rw-r--r--arch/tile/kernel/process.c16
-rw-r--r--arch/tile/kernel/setup.c36
-rw-r--r--arch/tile/kernel/signal.c20
-rw-r--r--arch/tile/kernel/single_step.c6
-rw-r--r--arch/tile/kernel/smpboot.c5
-rw-r--r--arch/tile/kernel/stack.c7
-rw-r--r--arch/tile/kernel/time.c4
-rw-r--r--arch/tile/kernel/traps.c10
-rw-r--r--arch/tile/kernel/unaligned.c22
-rw-r--r--arch/tile/mm/fault.c34
-rw-r--r--arch/tile/mm/homecache.c6
-rw-r--r--arch/tile/mm/hugetlbpage.c18
-rw-r--r--arch/tile/mm/init.c32
-rw-r--r--arch/tile/mm/pgtable.c4
-rw-r--r--arch/um/drivers/line.c6
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/crypto/aes_glue.c4
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c10
-rw-r--r--arch/x86/crypto/blowfish_glue.c4
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c4
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c4
-rw-r--r--arch/x86/crypto/camellia_glue.c4
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c2
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c4
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c4
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c4
-rw-r--r--arch/x86/crypto/des3_ede_glue.c8
-rw-r--r--arch/x86/crypto/fpu.c3
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c2
-rw-r--r--arch/x86/crypto/salsa20_glue.c4
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c4
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c2
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c2
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c3
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c6
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c6
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c2
-rw-r--r--arch/x86/crypto/twofish_glue.c4
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c4
-rw-r--r--arch/x86/ia32/audit.c1
-rw-r--r--arch/x86/ia32/ia32entry.S1
-rw-r--r--arch/x86/include/asm/barrier.h70
-rw-r--r--arch/x86/include/asm/dma.h2
-rw-r--r--arch/x86/include/asm/hash.h7
-rw-r--r--arch/x86/include/asm/kvm_host.h37
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/platform_sst_audio.h62
-rw-r--r--arch/x86/include/asm/segment.h30
-rw-r--r--arch/x86/include/asm/vmx.h3
-rw-r--r--arch/x86/include/asm/xen/cpuid.h91
-rw-r--r--arch/x86/include/asm/xen/page-coherent.h4
-rw-r--r--arch/x86/include/asm/xen/page.h71
-rw-r--r--arch/x86/include/asm/xsave.h1
-rw-r--r--arch/x86/include/uapi/asm/vmx.h6
-rw-r--r--arch/x86/kernel/asm-offsets_32.c4
-rw-r--r--arch/x86/kernel/asm-offsets_64.c4
-rw-r--r--arch/x86/kernel/audit_64.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_iommu.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c6
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/kernel/early-quirks.c23
-rw-r--r--arch/x86/kernel/entry_32.S6
-rw-r--r--arch/x86/kernel/entry_64.S28
-rw-r--r--arch/x86/kernel/kvm.c9
-rw-r--r--arch/x86/kernel/kvmclock.c20
-rw-r--r--arch/x86/kernel/process_64.c101
-rw-r--r--arch/x86/kernel/tls.c45
-rw-r--r--arch/x86/kernel/xsave.c1
-rw-r--r--arch/x86/kvm/Makefile7
-rw-r--r--arch/x86/kvm/assigned-dev.c (renamed from virt/kvm/assigned-dev.c)30
-rw-r--r--arch/x86/kvm/assigned-dev.h32
-rw-r--r--arch/x86/kvm/cpuid.c57
-rw-r--r--arch/x86/kvm/emulate.c408
-rw-r--r--arch/x86/kvm/ioapic.c (renamed from virt/kvm/ioapic.c)12
-rw-r--r--arch/x86/kvm/ioapic.h (renamed from virt/kvm/ioapic.h)21
-rw-r--r--arch/x86/kvm/iommu.c (renamed from virt/kvm/iommu.c)11
-rw-r--r--arch/x86/kvm/irq_comm.c (renamed from virt/kvm/irq_comm.c)45
-rw-r--r--arch/x86/kvm/lapic.c210
-rw-r--r--arch/x86/kvm/lapic.h14
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/svm.c24
-rw-r--r--arch/x86/kvm/trace.h37
-rw-r--r--arch/x86/kvm/vmx.c608
-rw-r--r--arch/x86/kvm/x86.c226
-rw-r--r--arch/x86/kvm/x86.h3
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/hash.c92
-rw-r--r--arch/x86/mm/fault.c64
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/pageattr.c22
-rw-r--r--arch/x86/net/bpf_jit_comp.c34
-rw-r--r--arch/x86/pci/xen.c31
-rw-r--r--arch/x86/platform/iris/iris.c1
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-pm.c2
-rw-r--r--arch/x86/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/um/asm/barrier.h20
-rw-r--r--arch/x86/um/sys_call_table_64.c1
-rw-r--r--arch/x86/xen/mmu.c40
-rw-r--r--arch/x86/xen/p2m.c1172
-rw-r--r--arch/x86/xen/setup.c441
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/Kconfig56
-rw-r--r--arch/xtensa/Kconfig.debug4
-rw-r--r--arch/xtensa/Makefile1
-rw-r--r--arch/xtensa/boot/boot-elf/boot.lds.S2
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S10
-rw-r--r--arch/xtensa/boot/boot-uboot/Makefile4
-rw-r--r--arch/xtensa/configs/iss_defconfig3
-rw-r--r--arch/xtensa/configs/s6105_defconfig615
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/cacheflush.h7
-rw-r--r--arch/xtensa/include/asm/highmem.h2
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h40
-rw-r--r--arch/xtensa/include/asm/mmu_context.h4
-rw-r--r--arch/xtensa/include/asm/nommu_context.h4
-rw-r--r--arch/xtensa/include/asm/page.h12
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--arch/xtensa/include/asm/uaccess.h4
-rw-r--r--arch/xtensa/include/asm/vectors.h7
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h6
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h5
-rw-r--r--arch/xtensa/kernel/head.S5
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--arch/xtensa/mm/Makefile4
-rw-r--r--arch/xtensa/mm/init.c19
-rw-r--r--arch/xtensa/platforms/s6105/Makefile3
-rw-r--r--arch/xtensa/platforms/s6105/device.c161
-rw-r--r--arch/xtensa/platforms/s6105/include/platform/gpio.h27
-rw-r--r--arch/xtensa/platforms/s6105/include/platform/hardware.h11
-rw-r--r--arch/xtensa/platforms/s6105/include/platform/serial.h8
-rw-r--r--arch/xtensa/platforms/s6105/setup.c73
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h4
-rw-r--r--arch/xtensa/variants/s6000/Makefile4
-rw-r--r--arch/xtensa/variants/s6000/delay.c25
-rw-r--r--arch/xtensa/variants/s6000/dmac.c173
-rw-r--r--arch/xtensa/variants/s6000/gpio.c230
-rw-r--r--arch/xtensa/variants/s6000/include/variant/core.h431
-rw-r--r--arch/xtensa/variants/s6000/include/variant/dmac.h387
-rw-r--r--arch/xtensa/variants/s6000/include/variant/gpio.h6
-rw-r--r--arch/xtensa/variants/s6000/include/variant/hardware.h259
-rw-r--r--arch/xtensa/variants/s6000/include/variant/irq.h8
-rw-r--r--arch/xtensa/variants/s6000/include/variant/tie-asm.h304
-rw-r--r--arch/xtensa/variants/s6000/include/variant/tie.h191
-rw-r--r--arch/xtensa/variants/s6000/irq.c74
-rw-r--r--block/bio.c82
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-mq-cpumap.c4
-rw-r--r--block/blk-mq-sysfs.c9
-rw-r--r--block/blk-mq-tag.c56
-rw-r--r--block/blk-mq.c126
-rw-r--r--block/blk-mq.h5
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/blk-sysfs.c12
-rw-r--r--block/genhd.c11
-rw-r--r--crypto/842.c1
-rw-r--r--crypto/aes_generic.c2
-rw-r--r--crypto/af_alg.c8
-rw-r--r--crypto/algapi.c4
-rw-r--r--crypto/algif_hash.c10
-rw-r--r--crypto/algif_skcipher.c29
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c1
-rw-r--r--crypto/api.c4
-rw-r--r--crypto/arc4.c1
-rw-r--r--crypto/authenc.c1
-rw-r--r--crypto/authencesn.c1
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c1
-rw-r--r--crypto/ccm.c5
-rw-r--r--crypto/chainiv.c1
-rw-r--r--crypto/cmac.c1
-rw-r--r--crypto/crc32.c1
-rw-r--r--crypto/crc32c_generic.c2
-rw-r--r--crypto/crct10dif_generic.c2
-rw-r--r--crypto/cryptd.c1
-rw-r--r--crypto/crypto_null.c6
-rw-r--r--crypto/crypto_user.c6
-rw-r--r--crypto/ctr.c3
-rw-r--r--crypto/cts.c1
-rw-r--r--crypto/deflate.c2
-rw-r--r--crypto/des_generic.c2
-rw-r--r--crypto/drbg.c127
-rw-r--r--crypto/ecb.c1
-rw-r--r--crypto/eseqiv.c1
-rw-r--r--crypto/fcrypt.c1
-rw-r--r--crypto/gcm.c7
-rw-r--r--crypto/ghash-generic.c2
-rw-r--r--crypto/hmac.c1
-rw-r--r--crypto/khazad.c1
-rw-r--r--crypto/krng.c2
-rw-r--r--crypto/lrw.c1
-rw-r--r--crypto/lz4.c1
-rw-r--r--crypto/lz4hc.c1
-rw-r--r--crypto/lzo.c1
-rw-r--r--crypto/mcryptd.c1
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c1
-rw-r--r--crypto/michael_mic.c1
-rw-r--r--crypto/pcbc.c1
-rw-r--r--crypto/pcrypt.c1
-rw-r--r--crypto/rmd128.c1
-rw-r--r--crypto/rmd160.c1
-rw-r--r--crypto/rmd256.c1
-rw-r--r--crypto/rmd320.c1
-rw-r--r--crypto/salsa20_generic.c2
-rw-r--r--crypto/seed.c1
-rw-r--r--crypto/seqiv.c1
-rw-r--r--crypto/serpent_generic.c4
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c4
-rw-r--r--crypto/sha512_generic.c4
-rw-r--r--crypto/tcrypt.c32
-rw-r--r--crypto/tea.c4
-rw-r--r--crypto/testmgr.c3
-rw-r--r--crypto/tgr192.c4
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/vmac.c1
-rw-r--r--crypto/wp512.c4
-rw-r--r--crypto/xcbc.c1
-rw-r--r--crypto/xts.c1
-rw-r--r--crypto/zlib.c1
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpi_pad.c8
-rw-r--r--drivers/acpi/apei/ghes.c1
-rw-r--r--drivers/acpi/blacklist.c54
-rw-r--r--drivers/acpi/pci_root.c16
-rw-r--r--drivers/amba/bus.c17
-rw-r--r--drivers/amba/tegra-ahb.c1
-rw-r--r--drivers/android/Kconfig37
-rw-r--r--drivers/android/Makefile3
-rw-r--r--drivers/android/binder.c (renamed from drivers/staging/android/binder.c)6
-rw-r--r--drivers/android/binder_trace.h (renamed from drivers/staging/android/binder_trace.h)0
-rw-r--r--drivers/ata/ahci_da850.c1
-rw-r--r--drivers/ata/ahci_imx.c1
-rw-r--r--drivers/ata/ahci_mvebu.c1
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/ahci_st.c1
-rw-r--r--drivers/ata/ahci_sunxi.c17
-rw-r--r--drivers/ata/ahci_xgene.c1
-rw-r--r--drivers/ata/libata-core.c20
-rw-r--r--drivers/ata/libata-eh.c7
-rw-r--r--drivers/ata/libata-scsi.c31
-rw-r--r--drivers/ata/libata-transport.c1
-rw-r--r--drivers/ata/pata_arasan_cf.c1
-rw-r--r--drivers/ata/pata_at32.c1
-rw-r--r--drivers/ata/pata_at91.c1
-rw-r--r--drivers/ata/pata_bf54x.c1
-rw-r--r--drivers/ata/pata_ep93xx.c1
-rw-r--r--drivers/ata/pata_imx.c1
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c1
-rw-r--r--drivers/ata/pata_mpc52xx.c1
-rw-r--r--drivers/ata/pata_octeon_cf.c1
-rw-r--r--drivers/ata/pata_of_platform.c1
-rw-r--r--drivers/ata/pata_palmld.c1
-rw-r--r--drivers/ata/pata_platform.c1
-rw-r--r--drivers/ata/pata_pxa.c1
-rw-r--r--drivers/ata/pata_rb532_cf.c1
-rw-r--r--drivers/ata/pata_samsung_cf.c1
-rw-r--r--drivers/ata/sata_dwc_460ex.c1
-rw-r--r--drivers/ata/sata_fsl.c1
-rw-r--r--drivers/ata/sata_highbank.c1
-rw-r--r--drivers/ata/sata_mv.c1
-rw-r--r--drivers/ata/sata_rcar.c1
-rw-r--r--drivers/atm/eni.c3
-rw-r--r--drivers/atm/fore200e.c1
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/bus.c8
-rw-r--r--drivers/base/cacheinfo.c539
-rw-r--r--drivers/base/core.c38
-rw-r--r--drivers/base/cpu.c59
-rw-r--r--drivers/base/devcoredump.c56
-rw-r--r--drivers/base/firmware_class.c7
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/base/platform.c23
-rw-r--r--drivers/base/topology.c71
-rw-r--r--drivers/bcma/bcma_private.h1
-rw-r--r--drivers/bcma/driver_chipcommon.c2
-rw-r--r--drivers/bcma/driver_gpio.c4
-rw-r--r--drivers/bcma/driver_mips.c24
-rw-r--r--drivers/bcma/driver_pci_host.c4
-rw-r--r--drivers/bcma/main.c79
-rw-r--r--drivers/bcma/scan.c1
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/drbd/drbd_actlog.c3
-rw-r--r--drivers/block/drbd/drbd_int.h39
-rw-r--r--drivers/block/drbd/drbd_main.c23
-rw-r--r--drivers/block/drbd/drbd_nl.c64
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_req.c25
-rw-r--r--drivers/block/drbd/drbd_state.c42
-rw-r--r--drivers/block/drbd/drbd_state.h5
-rw-r--r--drivers/block/drbd/drbd_worker.c5
-rw-r--r--drivers/block/mg_disk.c1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c5
-rw-r--r--drivers/block/null_blk.c52
-rw-r--r--drivers/block/nvme-core.c1594
-rw-r--r--drivers/block/nvme-scsi.c162
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/block/rsxx/dev.c29
-rw-r--r--drivers/block/sunvdc.c222
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/virtio_blk.c81
-rw-r--r--drivers/block/xen-blkfront.c65
-rw-r--r--drivers/block/zram/zram_drv.c104
-rw-r--r--drivers/block/zram/zram_drv.h4
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/ath3k.c6
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c31
-rw-r--r--drivers/bluetooth/btmrvl_drv.h20
-rw-r--r--drivers/bluetooth/btmrvl_main.c68
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c304
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h5
-rw-r--r--drivers/bluetooth/btusb.c15
-rw-r--r--drivers/bluetooth/btwilink.c1
-rw-r--r--drivers/bluetooth/hci_ath.c2
-rw-r--r--drivers/bluetooth/hci_h5.c25
-rw-r--r--drivers/bus/brcmstb_gisb.c1
-rw-r--r--drivers/bus/imx-weim.c1
-rw-r--r--drivers/bus/omap-ocp2scp.c1
-rw-r--r--drivers/bus/omap_l3_noc.c1
-rw-r--r--drivers/char/agp/intel-gtt.c4
-rw-r--r--drivers/char/hangcheck-timer.c4
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c16
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c1
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c1
-rw-r--r--drivers/char/hw_random/core.c12
-rw-r--r--drivers/char/hw_random/exynos-rng.c1
-rw-r--r--drivers/char/hw_random/msm-rng.c1
-rw-r--r--drivers/char/hw_random/mxc-rnga.c1
-rw-r--r--drivers/char/hw_random/n2-drv.c1
-rw-r--r--drivers/char/hw_random/octeon-rng.c1
-rw-r--r--drivers/char/hw_random/omap-rng.c1
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c1
-rw-r--r--drivers/char/hw_random/pasemi-rng.c1
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c1
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c1
-rw-r--r--drivers/char/hw_random/tx4939-rng.c1
-rw-r--r--drivers/char/i8k.c26
-rw-r--r--drivers/char/ipmi/Kconfig14
-rw-r--r--drivers/char/ipmi/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c560
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c310
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c498
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c1870
-rw-r--r--drivers/char/sonypi.c1
-rw-r--r--drivers/char/tb0219.c1
-rw-r--r--drivers/char/tpm/tpm_atmel.c1
-rw-r--r--drivers/char/tpm/tpm_nsc.c1
-rw-r--r--drivers/char/tpm/tpm_tis.c1
-rw-r--r--drivers/char/virtio_console.c39
-rw-r--r--drivers/char/xillybus/xillybus_of.c1
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/clk-axm5516.c1
-rw-r--r--drivers/clk/clk-ls1x.c109
-rw-r--r--drivers/clk/clk-ppc-corenet.c1
-rw-r--r--drivers/clk/clk-s2mps11.c1
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c1
-rw-r--r--drivers/clk/ux500/abx500-clk.c1
-rw-r--r--drivers/clk/x86/clk-lpt.c1
-rw-r--r--drivers/clocksource/Kconfig5
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/mips-gic-timer.c166
-rw-r--r--drivers/connector/connector.c6
-rw-r--r--drivers/coresight/Makefile11
-rw-r--r--drivers/coresight/coresight-etb10.c537
-rw-r--r--drivers/coresight/coresight-etm-cp14.c591
-rw-r--r--drivers/coresight/coresight-etm.h251
-rw-r--r--drivers/coresight/coresight-etm3x.c1928
-rw-r--r--drivers/coresight/coresight-funnel.c268
-rw-r--r--drivers/coresight/coresight-priv.h63
-rw-r--r--drivers/coresight/coresight-replicator.c137
-rw-r--r--drivers/coresight/coresight-tmc.c776
-rw-r--r--drivers/coresight/coresight-tpiu.c217
-rw-r--r--drivers/coresight/coresight.c717
-rw-r--r--drivers/coresight/of_coresight.c204
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c1
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c1
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c1
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c1
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c1
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c1
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c1
-rw-r--r--drivers/cpufreq/omap-cpufreq.c1
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c1
-rw-r--r--drivers/cpufreq/spear-cpufreq.c1
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c1
-rw-r--r--drivers/cpuidle/cpuidle-at91.c1
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c1
-rw-r--r--drivers/cpuidle/cpuidle-clps711x.c1
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c1
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c1
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c3
-rw-r--r--drivers/cpuidle/cpuidle-ux500.c1
-rw-r--r--drivers/cpuidle/cpuidle-zynq.c1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c1
-rw-r--r--drivers/crypto/atmel-aes.c1
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/atmel-tdes.c1
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/caam/caamalg.c1904
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/caam/desc_constr.h2
-rw-r--r--drivers/crypto/caam/error.c25
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/ccp/ccp-platform.c1
-rw-r--r--drivers/crypto/mv_cesa.c1
-rw-r--r--drivers/crypto/mxs-dcp.c1
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/nx/nx-842.c4
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c12
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c61
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c13
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c12
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c66
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c81
-rw-r--r--drivers/crypto/nx/nx-sha256.c208
-rw-r--r--drivers/crypto/nx/nx-sha512.c222
-rw-r--r--drivers/crypto/nx/nx.c127
-rw-r--r--drivers/crypto/nx/nx.h8
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/omap-des.c1
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c3
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c15
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h9
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c37
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c3
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c12
-rw-r--r--drivers/crypto/qce/core.c1
-rw-r--r--drivers/crypto/s5p-sss.c1
-rw-r--r--drivers/crypto/sahara.c795
-rw-r--r--drivers/crypto/talitos.c1
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c7
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c11
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.c1
-rw-r--r--drivers/devfreq/exynos/exynos5_bus.c1
-rw-r--r--drivers/dma-buf/fence.c2
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_xdmac.c1524
-rw-r--r--drivers/dma/bcm2835-dma.c3
-rw-r--r--drivers/dma/bestcomm/bestcomm.c1
-rw-r--r--drivers/dma/cppi41.c12
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/edma.c1
-rw-r--r--drivers/dma/fsl-edma.c192
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/imx-dma.c1
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ioat/dma_v3.c35
-rw-r--r--drivers/dma/iop-adma.c1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c1
-rw-r--r--drivers/dma/mmp_tdma.c1
-rw-r--r--drivers/dma/moxart-dma.c1
-rw-r--r--drivers/dma/mpc512x_dma.c13
-rw-r--r--drivers/dma/mv_xor.c1
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/omap-dma.c3
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c106
-rw-r--r--drivers/dma/ppc4xx/adma.c1
-rw-r--r--drivers/dma/qcom_bam_dma.c231
-rw-r--r--drivers/dma/s3c24xx-dma.c1
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/dma/sh/rcar-audmapp.c3
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c3
-rw-r--r--drivers/dma/sh/shdma-base.c4
-rw-r--r--drivers/dma/sh/shdma-of.c1
-rw-r--r--drivers/dma/sh/shdmac.c3
-rw-r--r--drivers/dma/sh/sudmac.c3
-rw-r--r--drivers/dma/sirf-dma.c6
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/sun6i-dma.c122
-rw-r--r--drivers/dma/tegra20-apb-dma.c1
-rw-r--r--drivers/dma/timb_dma.c1
-rw-r--r--drivers/dma/txx9dmac.c2
-rw-r--r--drivers/dma/txx9dmac.h4
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c13
-rw-r--r--drivers/edac/cell_edac.c1
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/ppc4xx_edac.c1
-rw-r--r--drivers/edac/sb_edac.c208
-rw-r--r--drivers/edac/tile_edac.c1
-rw-r--r--drivers/extcon/extcon-adc-jack.c1
-rw-r--r--drivers/extcon/extcon-arizona.c1
-rw-r--r--drivers/extcon/extcon-class.c14
-rw-r--r--drivers/extcon/extcon-gpio.c1
-rw-r--r--drivers/extcon/extcon-max14577.c3
-rw-r--r--drivers/extcon/extcon-max77693.c13
-rw-r--r--drivers/extcon/extcon-max8997.c1
-rw-r--r--drivers/extcon/extcon-palmas.c1
-rw-r--r--drivers/firewire/core-device.c3
-rw-r--r--drivers/firewire/ohci.c6
-rw-r--r--drivers/firewire/sbp2.c67
-rw-r--r--drivers/firmware/dcdbas.c1
-rw-r--r--drivers/gpio/Kconfig23
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c170
-rw-r--r--drivers/gpio/gpio-adp5520.c1
-rw-r--r--drivers/gpio/gpio-amd8111.c1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c5
-rw-r--r--drivers/gpio/gpio-clps711x.c1
-rw-r--r--drivers/gpio/gpio-crystalcove.c1
-rw-r--r--drivers/gpio/gpio-cs5535.c12
-rw-r--r--drivers/gpio/gpio-da9052.c1
-rw-r--r--drivers/gpio/gpio-da9055.c1
-rw-r--r--drivers/gpio/gpio-davinci.c6
-rw-r--r--drivers/gpio/gpio-dwapb.c5
-rw-r--r--drivers/gpio/gpio-em.c12
-rw-r--r--drivers/gpio/gpio-ep93xx.c1
-rw-r--r--drivers/gpio/gpio-f7188x.c1
-rw-r--r--drivers/gpio/gpio-ge.c1
-rw-r--r--drivers/gpio/gpio-grgpio.c2
-rw-r--r--drivers/gpio/gpio-ich.c1
-rw-r--r--drivers/gpio/gpio-iop.c1
-rw-r--r--drivers/gpio/gpio-janz-ttl.c1
-rw-r--r--drivers/gpio/gpio-kempld.c1
-rw-r--r--drivers/gpio/gpio-lp3943.c1
-rw-r--r--drivers/gpio/gpio-lpc32xx.c1
-rw-r--r--drivers/gpio/gpio-lynxpoint.c1
-rw-r--r--drivers/gpio/gpio-mcp23s08.c41
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c1
-rw-r--r--drivers/gpio/gpio-moxart.c1
-rw-r--r--drivers/gpio/gpio-mpc5200.c2
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c27
-rw-r--r--drivers/gpio/gpio-msic.c1
-rw-r--r--drivers/gpio/gpio-msm-v1.c3
-rw-r--r--drivers/gpio/gpio-msm-v2.c1
-rw-r--r--drivers/gpio/gpio-mvebu.c100
-rw-r--r--drivers/gpio/gpio-mxc.c1
-rw-r--r--drivers/gpio/gpio-mxs.c14
-rw-r--r--drivers/gpio/gpio-octeon.c1
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pl061.c20
-rw-r--r--drivers/gpio/gpio-rc5t583.c1
-rw-r--r--drivers/gpio/gpio-rcar.c27
-rw-r--r--drivers/gpio/gpio-sch.c1
-rw-r--r--drivers/gpio/gpio-spear-spics.c3
-rw-r--r--drivers/gpio/gpio-sta2x11.c1
-rw-r--r--drivers/gpio/gpio-stp-xway.c9
-rw-r--r--drivers/gpio/gpio-syscon.c1
-rw-r--r--drivers/gpio/gpio-tb10x.c8
-rw-r--r--drivers/gpio/gpio-tegra.c5
-rw-r--r--drivers/gpio/gpio-timberdale.c1
-rw-r--r--drivers/gpio/gpio-tps65912.c1
-rw-r--r--drivers/gpio/gpio-ts5500.c1
-rw-r--r--drivers/gpio/gpio-twl4030.c1
-rw-r--r--drivers/gpio/gpio-twl6040.c1
-rw-r--r--drivers/gpio/gpio-tz1090-pdc.c1
-rw-r--r--drivers/gpio/gpio-tz1090.c3
-rw-r--r--drivers/gpio/gpio-vf610.c295
-rw-r--r--drivers/gpio/gpio-vr41xx.c5
-rw-r--r--drivers/gpio/gpio-vx855.c1
-rw-r--r--drivers/gpio/gpio-xgene.c1
-rw-r--r--drivers/gpio/gpio-xtensa.c1
-rw-r--r--drivers/gpio/gpio-zevio.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c68
-rw-r--r--drivers/gpio/gpiolib-legacy.c12
-rw-r--r--drivers/gpio/gpiolib-sysfs.c6
-rw-r--r--drivers/gpio/gpiolib.c241
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/README.drm43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h221
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c595
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c308
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1062
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h146
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c256
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c356
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c176
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c353
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c159
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c346
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h91
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c565
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c96
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h405
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h107
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h600
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c410
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c343
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c85
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c1235
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h168
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h185
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c18
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c23
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c22
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c40
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c657
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c1966
-rw-r--r--drivers/gpu/drm/drm_crtc.c581
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c132
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c201
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c68
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c231
-rw-r--r--drivers/gpu/drm/drm_edid_load.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c135
-rw-r--r--drivers/gpu/drm/drm_flip_work.c105
-rw-r--r--drivers/gpu/drm/drm_fops.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c259
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c660
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c43
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c203
-rw-r--r--drivers/gpu/drm/drm_prime.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c132
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c42
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c253
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c129
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c126
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c195
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c75
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h12
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c31
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c170
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c20
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c49
-rw-r--r--drivers/gpu/drm/i2c/Kconfig6
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c1010
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h289
-rw-r--r--drivers/gpu/drm/i915/Makefile13
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c39
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c270
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1070
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c359
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h311
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c87
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c43
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1000
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h643
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c57
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h104
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c14
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c463
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c719
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2851
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c985
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h212
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c44
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c381
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c279
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c120
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c338
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c136
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2653
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c481
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c792
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c974
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c419
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h12
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1406
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c605
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c495
-rw-r--r--drivers/gpu/drm/imx/Kconfig (renamed from drivers/staging/imx-drm/Kconfig)7
-rw-r--r--drivers/gpu/drm/imx/Makefile (renamed from drivers/staging/imx-drm/Makefile)0
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c (renamed from drivers/staging/imx-drm/imx-drm-core.c)7
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h (renamed from drivers/staging/imx-drm/imx-drm.h)0
-rw-r--r--drivers/gpu/drm/imx/imx-hdmi.c (renamed from drivers/staging/imx-drm/imx-hdmi.c)1
-rw-r--r--drivers/gpu/drm/imx/imx-hdmi.h (renamed from drivers/staging/imx-drm/imx-hdmi.h)0
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c (renamed from drivers/staging/imx-drm/imx-ldb.c)6
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c (renamed from drivers/staging/imx-drm/imx-tve.c)9
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c (renamed from drivers/staging/imx-drm/ipuv3-crtc.c)5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c (renamed from drivers/staging/imx-drm/ipuv3-plane.c)43
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h (renamed from drivers/staging/imx-drm/ipuv3-plane.h)2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c (renamed from drivers/staging/imx-drm/parallel-display.c)14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h247
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c91
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h2144
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c604
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c31
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h126
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h75
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c144
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c7
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c348
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c121
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c207
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h91
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c466
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c322
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h122
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c273
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h131
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c328
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c241
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h23
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c163
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h35
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c40
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h13
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c13
-rw-r--r--drivers/gpu/drm/nouveau/Makefile18
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c113
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm204.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h63
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c369
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/image.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/npde.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/priv.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c270
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c42
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c117
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c813
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c97
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c221
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc111
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h738
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h863
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h828
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h754
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/base.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c199
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c248
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c134
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c15
-rw-r--r--drivers/gpu/drm/panel/Kconfig13
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c13
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c30
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c464
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c134
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c36
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c3
-rw-r--r--drivers/gpu/drm/r128/r128_state.c4
-rw-r--r--drivers/gpu/drm/radeon/Makefile4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c752
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h8
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c214
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h136
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c42
-rw-r--r--drivers/gpu/drm/radeon/cikd.h93
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c24
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c17
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h18
-rw-r--r--drivers/gpu/drm/radeon/pptable.h8
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h162
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c121
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c268
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c92
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c563
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.h47
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h20
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c154
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c220
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c236
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/si.c24
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c37
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c381
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h5
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/sid.h40
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h25
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h30
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig11
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c45
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c121
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c151
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c57
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c5
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig17
-rw-r--r--drivers/gpu/drm/rockchip/Makefile8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c551
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h68
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c201
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c210
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h (renamed from drivers/staging/android/binder.h)23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c294
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h54
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1455
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h201
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c1
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/Makefile4
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c21
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c242
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c24
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c4
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c62
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c84
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h6
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1073
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp_lut.h373
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c18
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c17
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h3
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c104
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c31
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c596
-rw-r--r--drivers/gpu/drm/tegra/drm.c46
-rw-r--r--drivers/gpu/drm/tegra/drm.h18
-rw-r--r--drivers/gpu/drm/tegra/dsi.c811
-rw-r--r--drivers/gpu/drm/tegra/dsi.h14
-rw-r--r--drivers/gpu/drm/tegra/fb.c52
-rw-r--r--drivers/gpu/drm/tegra/gem.c366
-rw-r--r--drivers/gpu/drm/tegra/gem.h14
-rw-r--r--drivers/gpu/drm/tegra/output.c35
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c7
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c25
-rw-r--r--drivers/gpu/drm/udl/Makefile2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c276
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c97
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/host1x/cdma.c2
-rw-r--r--drivers/gpu/host1x/cdma.h2
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c10
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c12
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c4
-rw-r--r--drivers/gpu/host1x/job.h2
-rw-r--r--drivers/gpu/host1x/mipi.c148
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c66
-rw-r--r--drivers/hid/Kconfig22
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-core.c36
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-input.c4
-rw-r--r--drivers/hid/hid-lenovo.c13
-rw-r--r--drivers/hid/hid-logitech-dj.c397
-rw-r--r--drivers/hid/hid-logitech-dj.h125
-rw-r--r--drivers/hid/hid-logitech-hidpp.c1241
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-multitouch.c27
-rw-r--r--drivers/hid/hid-plantronics.c55
-rw-r--r--drivers/hid/hid-rmi.c83
-rw-r--r--drivers/hid/hid-roccat-kone.c9
-rw-r--r--drivers/hid/hid-saitek.c4
-rw-r--r--drivers/hid/hid-sony.c150
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c16
-rw-r--r--drivers/hid/usbhid/hid-core.c39
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hid/wacom.h2
-rw-r--r--drivers/hid/wacom_sys.c68
-rw-r--r--drivers/hid/wacom_wac.c216
-rw-r--r--drivers/hid/wacom_wac.h3
-rw-r--r--drivers/hsi/clients/nokia-modem.c8
-rw-r--r--drivers/hsi/controllers/omap_ssi.c1
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c4
-rw-r--r--drivers/hv/channel_mgmt.c13
-rw-r--r--drivers/hv/hv_balloon.c10
-rw-r--r--drivers/hv/hv_kvp.c9
-rw-r--r--drivers/hv/hv_snapshot.c28
-rw-r--r--drivers/hwmon/abituguru.c1
-rw-r--r--drivers/hwmon/abituguru3.c1
-rw-r--r--drivers/hwmon/abx500.c1
-rw-r--r--drivers/hwmon/applesmc.c1
-rw-r--r--drivers/hwmon/coretemp.c1
-rw-r--r--drivers/hwmon/da9052-hwmon.c1
-rw-r--r--drivers/hwmon/da9055-hwmon.c1
-rw-r--r--drivers/hwmon/dme1737.c1
-rw-r--r--drivers/hwmon/f71805f.c1
-rw-r--r--drivers/hwmon/f71882fg.c1
-rw-r--r--drivers/hwmon/i5k_amb.c1
-rw-r--r--drivers/hwmon/ibmpowernv.c1
-rw-r--r--drivers/hwmon/iio_hwmon.c1
-rw-r--r--drivers/hwmon/it87.c1
-rw-r--r--drivers/hwmon/jz4740-hwmon.c1
-rw-r--r--drivers/hwmon/lm75.c9
-rw-r--r--drivers/hwmon/lm78.c1
-rw-r--r--drivers/hwmon/max197.c1
-rw-r--r--drivers/hwmon/mc13783-adc.c1
-rw-r--r--drivers/hwmon/menf21bmc_hwmon.c1
-rw-r--r--drivers/hwmon/nct6683.c1
-rw-r--r--drivers/hwmon/nct6775.c1
-rw-r--r--drivers/hwmon/ntc_thermistor.c7
-rw-r--r--drivers/hwmon/pc87360.c1
-rw-r--r--drivers/hwmon/pc87427.c1
-rw-r--r--drivers/hwmon/s3c-hwmon.c1
-rw-r--r--drivers/hwmon/sch5627.c1
-rw-r--r--drivers/hwmon/sch5636.c1
-rw-r--r--drivers/hwmon/sht15.c1
-rw-r--r--drivers/hwmon/sis5595.c1
-rw-r--r--drivers/hwmon/smsc47b397.c1
-rw-r--r--drivers/hwmon/smsc47m1.c1
-rw-r--r--drivers/hwmon/tmp102.c6
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c1
-rw-r--r--drivers/hwmon/ultra45_env.c1
-rw-r--r--drivers/hwmon/vexpress.c1
-rw-r--r--drivers/hwmon/via-cputemp.c1
-rw-r--r--drivers/hwmon/via686a.c1
-rw-r--r--drivers/hwmon/vt1211.c1
-rw-r--r--drivers/hwmon/vt8231.c1
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/hwmon/w83627hf.c1
-rw-r--r--drivers/hwmon/w83781d.c1
-rw-r--r--drivers/hwmon/wm831x-hwmon.c1
-rw-r--r--drivers/hwmon/wm8350-hwmon.c1
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c1
-rw-r--r--drivers/hwspinlock/u8500_hsem.c1
-rw-r--r--drivers/i2c/Kconfig10
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/busses/Kconfig20
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-at91.c128
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c1
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c1
-rw-r--r--drivers/i2c/busses/i2c-cadence.c1
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c25
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c1
-rw-r--r--drivers/i2c/busses/i2c-efm32.c1
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c5
-rw-r--r--drivers/i2c/busses/i2c-gpio.c1
-rw-r--r--drivers/i2c/busses/i2c-highlander.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c57
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c1
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c1412
-rw-r--r--drivers/i2c/busses/i2c-imx.c355
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c1
-rw-r--r--drivers/i2c/busses/i2c-isch.c1
-rw-r--r--drivers/i2c/busses/i2c-kempld.c1
-rw-r--r--drivers/i2c/busses/i2c-meson.c492
-rw-r--r--drivers/i2c/busses/i2c-mpc.c19
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c1
-rw-r--r--drivers/i2c/busses/i2c-mxs.c3
-rw-r--r--drivers/i2c/busses/i2c-ocores.c1
-rw-r--r--drivers/i2c/busses/i2c-octeon.c1
-rw-r--r--drivers/i2c/busses/i2c-omap.c134
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c1
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c1
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c1
-rw-r--r--drivers/i2c/busses/i2c-pnx.c1
-rw-r--r--drivers/i2c/busses/i2c-puv3.c1
-rw-r--r--drivers/i2c/busses/i2c-pxa.c20
-rw-r--r--drivers/i2c/busses/i2c-qup.c1
-rw-r--r--drivers/i2c/busses/i2c-rcar.c125
-rw-r--r--drivers/i2c/busses/i2c-riic.c1
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c252
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c30
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c1
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c245
-rw-r--r--drivers/i2c/busses/i2c-simtec.c1
-rw-r--r--drivers/i2c/busses/i2c-sirf.c1
-rw-r--r--drivers/i2c/busses/i2c-st.c1
-rw-r--r--drivers/i2c/busses/i2c-stu300.c1
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c1
-rw-r--r--drivers/i2c/busses/i2c-tegra.c1
-rw-r--r--drivers/i2c/busses/i2c-versatile.c1
-rw-r--r--drivers/i2c/busses/i2c-wmt.c1
-rw-r--r--drivers/i2c/busses/i2c-xiic.c59
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/busses/scx200_acb.c1
-rw-r--r--drivers/i2c/i2c-core.c209
-rw-r--r--drivers/i2c/i2c-mux.c12
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c170
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c1
-rw-r--r--drivers/ide/au1xxx-ide.c1
-rw-r--r--drivers/ide/gayle.c1
-rw-r--r--drivers/ide/ide-floppy.c4
-rw-r--r--drivers/ide/ide.c3
-rw-r--r--drivers/ide/ide_platform.c1
-rw-r--r--drivers/ide/palm_bk3710.c1
-rw-r--r--drivers/ide/tx4938ide.c1
-rw-r--r--drivers/ide/tx4939ide.c1
-rw-r--r--drivers/iio/accel/st_accel.h3
-rw-r--r--drivers/iio/accel/st_accel_core.c22
-rw-r--r--drivers/iio/accel/st_accel_i2c.c3
-rw-r--r--drivers/iio/accel/st_accel_spi.c3
-rw-r--r--drivers/iio/adc/Kconfig14
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/exynos_adc.c62
-rw-r--r--drivers/iio/adc/mcp320x.c222
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c595
-rw-r--r--drivers/iio/adc/rockchip_saradc.c65
-rw-r--r--drivers/iio/adc/vf610_adc.c45
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c126
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c1
-rw-r--r--drivers/iio/gyro/st_gyro.h3
-rw-r--r--drivers/iio/gyro/st_gyro_core.c19
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c4
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c4
-rw-r--r--drivers/iio/humidity/Kconfig10
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/si7020.c161
-rw-r--r--drivers/iio/inkern.c33
-rw-r--r--drivers/iio/magnetometer/st_magn.h3
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c18
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c3
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c3
-rw-r--r--drivers/iio/pressure/Kconfig11
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/bmp280.c455
-rw-r--r--drivers/iio/pressure/st_pressure.h3
-rw-r--r--drivers/iio/pressure/st_pressure_buffer.c12
-rw-r--r--drivers/iio/pressure/st_pressure_core.c49
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c11
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c11
-rw-r--r--drivers/iio/proximity/as3935.c16
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/Makefile1
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/multicast.c11
-rw-r--r--drivers/infiniband/core/umem.c72
-rw-r--r--drivers/infiniband/core/umem_odp.c668
-rw-r--r--drivers/infiniband/core/umem_rbtree.c94
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c171
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c183
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c62
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c28
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c13
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c45
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c69
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h116
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c330
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c798
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c221
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c239
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c22
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c104
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h30
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c102
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c91
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/joystick/xpad.c8
-rw-r--r--drivers/input/keyboard/Kconfig8
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/adp5520-keys.c1
-rw-r--r--drivers/input/keyboard/amikbd.c48
-rw-r--r--drivers/input/keyboard/atkbd.c6
-rw-r--r--drivers/input/keyboard/bf54x-keys.c1
-rw-r--r--drivers/input/keyboard/cap1106.c341
-rw-r--r--drivers/input/keyboard/cap11xx.c376
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c1
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c1
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c1
-rw-r--r--drivers/input/keyboard/goldfish_events.c1
-rw-r--r--drivers/input/keyboard/gpio_keys.c38
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c1
-rw-r--r--drivers/input/keyboard/imx_keypad.c1
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c1
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c1
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c93
-rw-r--r--drivers/input/keyboard/matrix_keypad.c1
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c42
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c1
-rw-r--r--drivers/input/keyboard/nspire-keypad.c1
-rw-r--r--drivers/input/keyboard/omap-keypad.c1
-rw-r--r--drivers/input/keyboard/omap4-keypad.c1
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c1
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c85
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c1
-rw-r--r--drivers/input/keyboard/samsung-keypad.c1
-rw-r--r--drivers/input/keyboard/spear-keyboard.c1
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c1
-rw-r--r--drivers/input/keyboard/tegra-kbc.c1
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c1
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c1
-rw-r--r--drivers/input/misc/88pm80x_onkey.c1
-rw-r--r--drivers/input/misc/88pm860x_onkey.c7
-rw-r--r--drivers/input/misc/ab8500-ponkey.c1
-rw-r--r--drivers/input/misc/ad714x-i2c.c6
-rw-r--r--drivers/input/misc/ad714x-spi.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c6
-rw-r--r--drivers/input/misc/adxl34x-spi.c6
-rw-r--r--drivers/input/misc/arizona-haptics.c1
-rw-r--r--drivers/input/misc/bfin_rotary.c1
-rw-r--r--drivers/input/misc/cobalt_btns.c1
-rw-r--r--drivers/input/misc/da9052_onkey.c1
-rw-r--r--drivers/input/misc/da9055_onkey.c1
-rw-r--r--drivers/input/misc/dm355evm_keys.c1
-rw-r--r--drivers/input/misc/drv260x.c6
-rw-r--r--drivers/input/misc/drv2667.c6
-rw-r--r--drivers/input/misc/gp2ap002a00f.c6
-rw-r--r--drivers/input/misc/gpio-beeper.c1
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c1
-rw-r--r--drivers/input/misc/ideapad_slidebar.c1
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c1
-rw-r--r--drivers/input/misc/kxtj9.c6
-rw-r--r--drivers/input/misc/m68kspkr.c1
-rw-r--r--drivers/input/misc/max77693-haptic.c7
-rw-r--r--drivers/input/misc/max8925_onkey.c7
-rw-r--r--drivers/input/misc/max8997_haptic.c5
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c1
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c7
-rw-r--r--drivers/input/misc/pcap_keys.c1
-rw-r--r--drivers/input/misc/pcspkr.c1
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c5
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/misc/pwm-beeper.c7
-rw-r--r--drivers/input/misc/rb532_button.c1
-rw-r--r--drivers/input/misc/retu-pwrbutton.c1
-rw-r--r--drivers/input/misc/rotary_encoder.c1
-rw-r--r--drivers/input/misc/sgi_btns.c1
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c5
-rw-r--r--drivers/input/misc/soc_button_array.c1
-rw-r--r--drivers/input/misc/sparcspkr.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c1
-rw-r--r--drivers/input/misc/twl4030-vibra.c7
-rw-r--r--drivers/input/misc/twl6040-vibra.c5
-rw-r--r--drivers/input/misc/wistron_btns.c1
-rw-r--r--drivers/input/misc/wm831x-on.c1
-rw-r--r--drivers/input/mouse/Kconfig30
-rw-r--r--drivers/input/mouse/Makefile5
-rw-r--r--drivers/input/mouse/amimouse.c1
-rw-r--r--drivers/input/mouse/cyapa.c289
-rw-r--r--drivers/input/mouse/elan_i2c.h86
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1137
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c611
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c514
-rw-r--r--drivers/input/mouse/gpio_mouse.c1
-rw-r--r--drivers/input/mouse/lifebook.h6
-rw-r--r--drivers/input/mouse/navpoint.c7
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/serio/altera_ps2.c82
-rw-r--r--drivers/input/serio/apbps2.c1
-rw-r--r--drivers/input/serio/arc_ps2.c1
-rw-r--r--drivers/input/serio/at32psif.c1
-rw-r--r--drivers/input/serio/ct82c710.c1
-rw-r--r--drivers/input/serio/i8042-sparcio.h1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h10
-rw-r--r--drivers/input/serio/i8042.c1
-rw-r--r--drivers/input/serio/maceps2.c1
-rw-r--r--drivers/input/serio/olpc_apsp.c1
-rw-r--r--drivers/input/serio/q40kbd.c1
-rw-r--r--drivers/input/serio/rpckbd.c1
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/serio/serio_raw.c4
-rw-r--r--drivers/input/serio/xilinx_ps2.c1
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c1
-rw-r--r--drivers/input/touchscreen/Kconfig25
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7877.c6
-rw-r--r--drivers/input/touchscreen/ad7879.c6
-rw-r--r--drivers/input/touchscreen/ads7846.c6
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c1
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c6
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c6
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c6
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c7
-rw-r--r--drivers/input/touchscreen/da9034-ts.c1
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c6
-rw-r--r--drivers/input/touchscreen/eeti_ts.c6
-rw-r--r--drivers/input/touchscreen/egalax_ts.c6
-rw-r--r--drivers/input/touchscreen/elants_i2c.c1271
-rw-r--r--drivers/input/touchscreen/goodix.c395
-rw-r--r--drivers/input/touchscreen/ili210x.c6
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c1
-rw-r--r--drivers/input/touchscreen/ipaq-micro-ts.c6
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c1
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c1
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c1
-rw-r--r--drivers/input/touchscreen/mms114.c6
-rw-r--r--drivers/input/touchscreen/pcap_ts.c1
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c6
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c1
-rw-r--r--drivers/input/touchscreen/st1232.c7
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c1
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c1
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c1
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c1
-rw-r--r--drivers/input/touchscreen/tsc2005.c6
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c7
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c1
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c6
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c1
-rw-r--r--drivers/input/touchscreen/zforce_ts.c6
-rw-r--r--drivers/iommu/Kconfig29
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c153
-rw-r--r--drivers/iommu/arm-smmu.c136
-rw-r--r--drivers/iommu/dmar.c532
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/fsl_pamu.c1
-rw-r--r--drivers/iommu/intel-iommu.c307
-rw-r--r--drivers/iommu/intel_irq_remapping.c249
-rw-r--r--drivers/iommu/iommu.c22
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/msm_iommu.c3
-rw-r--r--drivers/iommu/msm_iommu_dev.c10
-rw-r--r--drivers/iommu/of_iommu.c89
-rw-r--r--drivers/iommu/omap-iommu-debug.c242
-rw-r--r--drivers/iommu/omap-iommu.c312
-rw-r--r--drivers/iommu/omap-iommu.h98
-rw-r--r--drivers/iommu/omap-iommu2.c337
-rw-r--r--drivers/iommu/rockchip-iommu.c1038
-rw-r--r--drivers/iommu/shmobile-ipmmu.c1
-rw-r--r--drivers/iommu/tegra-gart.c1
-rw-r--r--drivers/irqchip/Kconfig16
-rw-r--r--drivers/irqchip/Makefile4
-rw-r--r--drivers/irqchip/irq-gic-v2m.c333
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1425
-rw-r--r--drivers/irqchip/irq-gic-v3.c156
-rw-r--r--drivers/irqchip/irq-gic.c81
-rw-r--r--drivers/irqchip/irq-keystone.c1
-rw-r--r--drivers/irqchip/irq-mips-gic.c789
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c163
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c1
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c1
-rw-r--r--drivers/isdn/capi/capiutil.c2
-rw-r--r--drivers/isdn/gigaset/Kconfig2
-rw-r--r--drivers/isdn/gigaset/gigaset.h3
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c77
-rw-r--r--drivers/isdn/hisax/hfc_2bs0.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c3
-rw-r--r--drivers/isdn/hisax/hfc_usb.c5
-rw-r--r--drivers/isdn/hisax/ipacx.c2
-rw-r--r--drivers/isdn/hisax/isdnl1.c2
-rw-r--r--drivers/isdn/hisax/isdnl3.c2
-rw-r--r--drivers/isdn/hysdn/hycapi.c2
-rw-r--r--drivers/isdn/mISDN/l1oip_codec.c6
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/socket.c4
-rw-r--r--drivers/isdn/pcbit/layer2.c1
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class.c29
-rw-r--r--drivers/leds/led-core.c36
-rw-r--r--drivers/leds/led-triggers.c16
-rw-r--r--drivers/leds/leds-88pm860x.c1
-rw-r--r--drivers/leds/leds-adp5520.c1
-rw-r--r--drivers/leds/leds-asic3.c1
-rw-r--r--drivers/leds/leds-clevo-mail.c1
-rw-r--r--drivers/leds/leds-cobalt-qube.c1
-rw-r--r--drivers/leds/leds-cobalt-raq.c1
-rw-r--r--drivers/leds/leds-da903x.c1
-rw-r--r--drivers/leds/leds-da9052.c1
-rw-r--r--drivers/leds/leds-gpio.c1
-rw-r--r--drivers/leds/leds-hp6xx.c1
-rw-r--r--drivers/leds/leds-lm3533.c1
-rw-r--r--drivers/leds/leds-lp8788.c1
-rw-r--r--drivers/leds/leds-lp8860.c491
-rw-r--r--drivers/leds/leds-lt3593.c1
-rw-r--r--drivers/leds/leds-max8997.c1
-rw-r--r--drivers/leds/leds-mc13783.c1
-rw-r--r--drivers/leds/leds-menf21bmc.c1
-rw-r--r--drivers/leds/leds-net48xx.c1
-rw-r--r--drivers/leds/leds-netxbig.c1
-rw-r--r--drivers/leds/leds-ns2.c1
-rw-r--r--drivers/leds/leds-ot200.c1
-rw-r--r--drivers/leds/leds-pwm.c1
-rw-r--r--drivers/leds/leds-rb532.c1
-rw-r--r--drivers/leds/leds-regulator.c19
-rw-r--r--drivers/leds/leds-s3c24xx.c1
-rw-r--r--drivers/leds/leds-sunfire.c2
-rw-r--r--drivers/leds/leds-syscon.c71
-rw-r--r--drivers/leds/leds-wm831x-status.c1
-rw-r--r--drivers/leds/leds-wm8350.c1
-rw-r--r--drivers/leds/leds-wrap.c1
-rw-r--r--drivers/leds/leds.h20
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c8
-rw-r--r--drivers/leds/trigger/ledtrig-default-on.c2
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c6
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c2
-rw-r--r--drivers/leds/trigger/ledtrig-oneshot.c4
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c10
-rw-r--r--drivers/lguest/lguest_device.c17
-rw-r--r--drivers/macintosh/smu.c1
-rw-r--r--drivers/macintosh/therm_pm72.c1
-rw-r--r--drivers/macintosh/therm_windtunnel.c1
-rw-r--r--drivers/macintosh/windfarm_pm81.c1
-rw-r--r--drivers/macintosh/windfarm_pm91.c1
-rw-r--r--drivers/mailbox/Kconfig12
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/mailbox.c19
-rw-r--r--drivers/mailbox/mailbox.h14
-rw-r--r--drivers/mailbox/omap-mailbox.c347
-rw-r--r--drivers/mailbox/pcc.c403
-rw-r--r--drivers/md/bcache/request.c23
-rw-r--r--drivers/md/dm.c13
-rw-r--r--drivers/md/md.c44
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/common/cx2341x.c29
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c2
-rw-r--r--drivers/media/common/siano/smsir.c3
-rw-r--r--drivers/media/common/tveeprom.c36
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_net.c4
-rw-r--r--drivers/media/dvb-frontends/Kconfig5
-rw-r--r--drivers/media/dvb-frontends/af9033.c140
-rw-r--r--drivers/media/dvb-frontends/af9033_priv.h11
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c117
-rw-r--r--drivers/media/dvb-frontends/cx22700.c3
-rw-r--r--drivers/media/dvb-frontends/cx24110.c50
-rw-r--r--drivers/media/dvb-frontends/cx24117.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c9
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c3
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c9
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c267
-rw-r--r--drivers/media/dvb-frontends/m88ds3103_priv.h181
-rw-r--r--drivers/media/dvb-frontends/mn88472.h38
-rw-r--r--drivers/media/dvb-frontends/mn88473.h38
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c60
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h11
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c8
-rw-r--r--drivers/media/dvb-frontends/si2168.c75
-rw-r--r--drivers/media/dvb-frontends/si2168.h4
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h1
-rw-r--r--drivers/media/dvb-frontends/sp2.c21
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c196
-rw-r--r--drivers/media/dvb-frontends/stv090x.h44
-rw-r--r--drivers/media/firewire/firedtv-ci.c3
-rw-r--r--drivers/media/firewire/firedtv.h2
-rw-r--r--drivers/media/i2c/Kconfig9
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7170.c16
-rw-r--r--drivers/media/i2c/adv7175.c16
-rw-r--r--drivers/media/i2c/adv7180.c6
-rw-r--r--drivers/media/i2c/adv7183.c6
-rw-r--r--drivers/media/i2c/adv7511.c229
-rw-r--r--drivers/media/i2c/adv7604.c109
-rw-r--r--drivers/media/i2c/adv7842.c40
-rw-r--r--drivers/media/i2c/ak881x.c8
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c14
-rw-r--r--drivers/media/i2c/cx25840/cx25840-firmware.c11
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c3
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c6
-rw-r--r--drivers/media/i2c/ml86v7667.c6
-rw-r--r--drivers/media/i2c/mt9m032.c6
-rw-r--r--drivers/media/i2c/mt9p031.c8
-rw-r--r--drivers/media/i2c/mt9t001.c8
-rw-r--r--drivers/media/i2c/mt9v011.c6
-rw-r--r--drivers/media/i2c/mt9v032.c12
-rw-r--r--drivers/media/i2c/noon010pc30.c12
-rw-r--r--drivers/media/i2c/ov7670.c16
-rw-r--r--drivers/media/i2c/ov9650.c10
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h6
-rw-r--r--drivers/media/i2c/s5k4ecgx.c4
-rw-r--r--drivers/media/i2c/s5k5baf.c14
-rw-r--r--drivers/media/i2c/s5k6a3.c2
-rw-r--r--drivers/media/i2c/s5k6aa.c8
-rw-r--r--drivers/media/i2c/saa6752hs.c6
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/smiapp-pll.c280
-rw-r--r--drivers/media/i2c/smiapp-pll.h21
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c259
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h8
-rw-r--r--drivers/media/i2c/soc_camera/imx074.c8
-rw-r--r--drivers/media/i2c/soc_camera/mt9m001.c14
-rw-r--r--drivers/media/i2c/soc_camera/mt9m111.c70
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c10
-rw-r--r--drivers/media/i2c/soc_camera/mt9t112.c22
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c26
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c54
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c8
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c58
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c20
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c40
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c12
-rw-r--r--drivers/media/i2c/soc_camera/rj54n1cb0c.c54
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c10
-rw-r--r--drivers/media/i2c/sr030pc30.c14
-rw-r--r--drivers/media/i2c/tvp514x.c12
-rw-r--r--drivers/media/i2c/tvp5150.c6
-rw-r--r--drivers/media/i2c/tvp7002.c10
-rw-r--r--drivers/media/i2c/vs6624.c18
-rw-r--r--drivers/media/media-entity.c13
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile3
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c12
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c18
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h3
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h3
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c9
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c131
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c15
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c691
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c31
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c10
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885.h8
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c12
-rw-r--r--drivers/media/pci/cx88/Kconfig5
-rw-r--r--drivers/media/pci/cx88/Makefile1
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c112
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c563
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c71
-rw-r--r--drivers/media/pci/cx88/cx88-core.c119
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c156
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c164
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c211
-rw-r--r--drivers/media/pci/cx88/cx88-video.c875
-rw-r--r--drivers/media/pci/cx88/cx88.h106
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c2
-rw-r--r--drivers/media/pci/meye/meye.c3
-rw-r--r--drivers/media/pci/pt1/pt1.c13
-rw-r--r--drivers/media/pci/pt3/pt3.c75
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c18
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c17
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c16
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c16
-rw-r--r--drivers/media/pci/saa7134/saa7134.h2
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c101
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c13
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-types.h4
-rw-r--r--drivers/media/pci/saa7164/saa7164.h4
-rw-r--r--drivers/media/pci/smipcie/Kconfig17
-rw-r--r--drivers/media/pci/smipcie/Makefile6
-rw-r--r--drivers/media/pci/smipcie/smipcie.c1099
-rw-r--r--drivers/media/pci/smipcie/smipcie.h299
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c88
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h1
-rw-r--r--drivers/media/pci/ttpci/budget-patch.c2
-rw-r--r--drivers/media/pci/tw68/tw68-core.c15
-rw-r--r--drivers/media/pci/tw68/tw68-video.c9
-rw-r--r--drivers/media/pci/tw68/tw68.h1
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c5
-rw-r--r--drivers/media/platform/Kconfig12
-rw-r--r--drivers/media/platform/Makefile5
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c35
-rw-r--r--drivers/media/platform/coda/Makefile2
-rw-r--r--drivers/media/platform/coda/coda-bit.c322
-rw-r--r--drivers/media/platform/coda/coda-common.c608
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c238
-rw-r--r--drivers/media/platform/coda/coda.h24
-rw-r--r--drivers/media/platform/coda/coda_regs.h7
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c1
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c1
-rw-r--r--drivers/media/platform/davinci/isif.c1
-rw-r--r--drivers/media/platform/davinci/vpbe.c22
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c618
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c1
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c1
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c9
-rw-r--r--drivers/media/platform/davinci/vpif.c1
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c3
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/platform/davinci/vpss.c1
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c24
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c15
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c11
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c16
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.c26
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c15
-rw-r--r--drivers/media/platform/exynos4-is/fimc-reg.c14
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c1
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c15
-rw-r--r--drivers/media/platform/fsl-viu.c4
-rw-r--r--drivers/media/platform/m2m-deinterlace.c1
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c85
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h3
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c1
-rw-r--r--drivers/media/platform/mx2_emmaprp.c10
-rw-r--r--drivers/media/platform/omap/Kconfig3
-rw-r--r--drivers/media/platform/omap/omap_vout.c11
-rw-r--r--drivers/media/platform/omap3isp/isp.c1
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c112
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c18
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c42
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c60
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c19
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c95
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h10
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c10
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c8
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c11
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c10
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c50
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c122
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c65
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c13
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c32
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c3
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c1
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c3
-rw-r--r--drivers/media/platform/sh_veu.c1
-rw-r--r--drivers/media/platform/sh_vou.c12
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c23
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c26
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c7
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c36
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c16
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c481
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c21
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c39
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c3
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c78
-rw-r--r--drivers/media/platform/ti-vpe/csc.c10
-rw-r--r--drivers/media/platform/ti-vpe/sc.c10
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c1
-rw-r--r--drivers/media/platform/timblogiw.c1
-rw-r--r--drivers/media/platform/via-camera.c12
-rw-r--r--drivers/media/platform/vim2m.c (renamed from drivers/media/platform/mem2mem_testdev.c)223
-rw-r--r--drivers/media/platform/vivid/vivid-core.c21
-rw-r--r--drivers/media/platform/vivid/vivid-core.h16
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c165
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c4
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.c704
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.h4
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c327
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.h38
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c4
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c4
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c38
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c4
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c31
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c12
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c10
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c10
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c12
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c10
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c42
-rw-r--r--drivers/media/radio/radio-si476x.c1
-rw-r--r--drivers/media/radio/radio-timb.c1
-rw-r--r--drivers/media/radio/radio-wl1273.c5
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c29
-rw-r--r--drivers/media/radio/si4713/si4713.c164
-rw-r--r--drivers/media/radio/si4713/si4713.h15
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c4
-rw-r--r--drivers/media/rc/Kconfig26
-rw-r--r--drivers/media/rc/Makefile2
-rw-r--r--drivers/media/rc/gpio-ir-recv.c1
-rw-r--r--drivers/media/rc/igorplugusb.c261
-rw-r--r--drivers/media/rc/img-ir/Kconfig1
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c1
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c28
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.h6
-rw-r--r--drivers/media/rc/ir-lirc-codec.c12
-rw-r--r--drivers/media/rc/lirc_dev.c3
-rw-r--r--drivers/media/rc/meson-ir.c216
-rw-r--r--drivers/media/rc/rc-main.c8
-rw-r--r--drivers/media/rc/redrat3.c4
-rw-r--r--drivers/media/rc/sunxi-cir.c1
-rw-r--r--drivers/media/tuners/Kconfig8
-rw-r--r--drivers/media/tuners/Makefile1
-rw-r--r--drivers/media/tuners/m88rs6000t.c744
-rw-r--r--drivers/media/tuners/m88rs6000t.h29
-rw-r--r--drivers/media/tuners/m88ts2022.c2
-rw-r--r--drivers/media/tuners/mxl5007t.c30
-rw-r--r--drivers/media/tuners/r820t.c12
-rw-r--r--drivers/media/tuners/si2157.c44
-rw-r--r--drivers/media/tuners/si2157.h2
-rw-r--r--drivers/media/tuners/si2157_priv.h8
-rw-r--r--drivers/media/tuners/tda18271-common.c2
-rw-r--r--drivers/media/tuners/xc5000.c17
-rw-r--r--drivers/media/tuners/xc5000.h1
-rw-r--r--drivers/media/usb/Kconfig1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c5
-rw-r--r--drivers/media/usb/au0828/au0828-core.c8
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c14
-rw-r--r--drivers/media/usb/cx231xx/Kconfig1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c59
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c97
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c331
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c257
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c165
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c159
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c132
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c8
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c47
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c48
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c89
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h41
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig3
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c438
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c22
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c231
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h7
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c3
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c230
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c5
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c19
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c7
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c71
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c41
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c95
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c17
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h3
-rw-r--r--drivers/media/usb/em28xx/em28xx-v4l.h1
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c98
-rw-r--r--drivers/media/usb/em28xx/em28xx.h27
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c25
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c3
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c51
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c161
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c1009
-rw-r--r--drivers/media/usb/uvc/uvc_video.c23
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c125
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c87
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c34
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c21
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c49
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c71
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c425
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c194
-rw-r--r--drivers/memory/atmel-sdramc.c1
-rw-r--r--drivers/memory/fsl-corenet-cf.c1
-rw-r--r--drivers/memory/fsl_ifc.c13
-rw-r--r--drivers/memory/mvebu-devbus.c1
-rw-r--r--drivers/memory/omap-gpmc.c1
-rw-r--r--drivers/memory/tegra20-mc.c1
-rw-r--r--drivers/memory/ti-aemif.c1
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c1
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c1
-rw-r--r--drivers/mfd/ab3100-otp.c1
-rw-r--r--drivers/mfd/ab8500-core.c1
-rw-r--r--drivers/mfd/ab8500-debugfs.c1
-rw-r--r--drivers/mfd/ab8500-gpadc.c1
-rw-r--r--drivers/mfd/ab8500-sysctrl.c1
-rw-r--r--drivers/mfd/davinci_voicecodec.c1
-rw-r--r--drivers/mfd/db8500-prcmu.c1
-rw-r--r--drivers/mfd/intel_msic.c1
-rw-r--r--drivers/mfd/jz4740-adc.c1
-rw-r--r--drivers/mfd/kempld-core.c1
-rw-r--r--drivers/mfd/mcp-sa11x0.c1
-rw-r--r--drivers/mfd/omap-usb-host.c1
-rw-r--r--drivers/mfd/omap-usb-tll.c1
-rw-r--r--drivers/mfd/pm8921-core.c1
-rw-r--r--drivers/mfd/sm501.c1
-rw-r--r--drivers/mfd/ssbi.c1
-rw-r--r--drivers/mfd/sta2x11-mfd.c4
-rw-r--r--drivers/mfd/sun6i-prcm.c1
-rw-r--r--drivers/mfd/syscon.c1
-rw-r--r--drivers/mfd/t7l66xb.c1
-rw-r--r--drivers/mfd/tc6393xb.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c1
-rw-r--r--drivers/mfd/tps65911-comparator.c1
-rw-r--r--drivers/mfd/twl4030-audio.c1
-rw-r--r--drivers/mfd/twl4030-power.c1
-rw-r--r--drivers/mfd/vexpress-sysreg.c2
-rw-r--r--drivers/misc/arm-charlcd.c1
-rw-r--r--drivers/misc/atmel-ssc.c5
-rw-r--r--drivers/misc/carma/Kconfig6
-rw-r--r--drivers/misc/carma/carma-fpga-program.c105
-rw-r--r--drivers/misc/carma/carma-fpga.c99
-rw-r--r--drivers/misc/cs5535-mfgpt.c1
-rw-r--r--drivers/misc/cxl/cxl.h15
-rw-r--r--drivers/misc/cxl/fault.c8
-rw-r--r--drivers/misc/cxl/irq.c144
-rw-r--r--drivers/misc/cxl/native.c14
-rw-r--r--drivers/misc/eeprom/at24.c35
-rw-r--r--drivers/misc/eeprom/sunxi_sid.c1
-rw-r--r--drivers/misc/fuse/Makefile1
-rw-r--r--drivers/misc/genwqe/card_utils.c2
-rw-r--r--drivers/misc/mei/amthif.c34
-rw-r--r--drivers/misc/mei/bus.c4
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/debugfs.c8
-rw-r--r--drivers/misc/mei/hbm.c23
-rw-r--r--drivers/misc/mei/hbm.h4
-rw-r--r--drivers/misc/mei/hw-me-regs.h12
-rw-r--r--drivers/misc/mei/hw-me.c49
-rw-r--r--drivers/misc/mei/hw-me.h10
-rw-r--r--drivers/misc/mei/hw-txe.c7
-rw-r--r--drivers/misc/mei/init.c38
-rw-r--r--drivers/misc/mei/interrupt.c12
-rw-r--r--drivers/misc/mei/main.c43
-rw-r--r--drivers/misc/mei/mei_dev.h39
-rw-r--r--drivers/misc/mei/nfc.c52
-rw-r--r--drivers/misc/mei/pci-me.c12
-rw-r--r--drivers/misc/mei/pci-txe.c1
-rw-r--r--drivers/misc/mei/wd.c9
-rw-r--r--drivers/misc/mic/card/mic_virtio.c14
-rw-r--r--drivers/misc/mic/card/mic_x100.c1
-rw-r--r--drivers/misc/pch_phub.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c17
-rw-r--r--drivers/mmc/host/au1xmmc.c1
-rw-r--r--drivers/mmc/host/davinci_mmc.c1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c1
-rw-r--r--drivers/mmc/host/tmio_mmc.c1
-rw-r--r--drivers/mmc/host/usdhi6rol0.c1
-rw-r--r--drivers/mmc/host/wbsd.c1
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c1
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm47xxpart.c28
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c3
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c1
-rw-r--r--drivers/mtd/devices/docg3.c123
-rw-r--r--drivers/mtd/devices/m25p80.c30
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c6
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/devices/pmc551.c3
-rw-r--r--drivers/mtd/devices/spear_smi.c1
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c1
-rw-r--r--drivers/mtd/inftlmount.c2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c1
-rw-r--r--drivers/mtd/maps/ixp4xx.c1
-rw-r--r--drivers/mtd/maps/lantiq-flash.c1
-rw-r--r--drivers/mtd/maps/physmap.c1
-rw-r--r--drivers/mtd/maps/physmap_of.c9
-rw-r--r--drivers/mtd/maps/plat-ram.c1
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c1
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/mtd/maps/sun_uflash.c1
-rw-r--r--drivers/mtd/nand/Kconfig12
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/ams-delta.c1
-rw-r--r--drivers/mtd/nand/atmel_nand.c122
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h4
-rw-r--r--drivers/mtd/nand/au1550nd.c1
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c1
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c1
-rw-r--r--drivers/mtd/nand/cafe_nand.c45
-rw-r--r--drivers/mtd/nand/davinci_nand.c1
-rw-r--r--drivers/mtd/nand/denali_dt.c1
-rw-r--r--drivers/mtd/nand/docg4.c1
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c1
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c11
-rw-r--r--drivers/mtd/nand/fsl_upm.c1
-rw-r--r--drivers/mtd/nand/fsmc_nand.c1
-rw-r--r--drivers/mtd/nand/gpio.c5
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c153
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c201
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/jz4740_nand.c1
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c1
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c1
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/mxc_nand.c11
-rw-r--r--drivers/mtd/nand/nand_base.c12
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nandsim.c42
-rw-r--r--drivers/mtd/nand/ndfc.c1
-rw-r--r--drivers/mtd/nand/nuc900_nand.c1
-rw-r--r--drivers/mtd/nand/omap2.c25
-rw-r--r--drivers/mtd/nand/omap_elm.c1
-rw-r--r--drivers/mtd/nand/orion_nand.c40
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/plat_nand.c1
-rw-r--r--drivers/mtd/nand/s3c2410.c1
-rw-r--r--drivers/mtd/nand/sh_flctl.c1
-rw-r--r--drivers/mtd/nand/sharpsl.c1
-rw-r--r--drivers/mtd/nand/socrates_nand.c1
-rw-r--r--drivers/mtd/nand/sunxi_nand.c1432
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c1
-rw-r--r--drivers/mtd/onenand/generic.c1
-rw-r--r--drivers/mtd/onenand/omap2.c1
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c24
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c313
-rw-r--r--drivers/mtd/tests/oobtest.c77
-rw-r--r--drivers/mtd/tests/torturetest.c4
-rw-r--r--drivers/mtd/ubi/attach.c126
-rw-r--r--drivers/mtd/ubi/block.c41
-rw-r--r--drivers/mtd/ubi/build.c126
-rw-r--r--drivers/mtd/ubi/cdev.c36
-rw-r--r--drivers/mtd/ubi/debug.c10
-rw-r--r--drivers/mtd/ubi/eba.c53
-rw-r--r--drivers/mtd/ubi/fastmap.c96
-rw-r--r--drivers/mtd/ubi/io.c150
-rw-r--r--drivers/mtd/ubi/kapi.c6
-rw-r--r--drivers/mtd/ubi/misc.c4
-rw-r--r--drivers/mtd/ubi/ubi.h13
-rw-r--r--drivers/mtd/ubi/upd.c10
-rw-r--r--drivers/mtd/ubi/vmt.c69
-rw-r--r--drivers/mtd/ubi/vtbl.c71
-rw-r--r--drivers/mtd/ubi/wl.c80
-rw-r--r--drivers/net/Kconfig20
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_3ad.c106
-rw-r--r--drivers/net/bonding/bond_alb.c23
-rw-r--r--drivers/net/bonding/bond_debugfs.c4
-rw-r--r--drivers/net/bonding/bond_main.c36
-rw-r--r--drivers/net/bonding/bond_netlink.c2
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c2
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c1
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c13
-rw-r--r--drivers/net/can/c_can/c_can.h25
-rw-r--r--drivers/net/can/c_can/c_can_platform.c202
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/cc770/cc770_isa.c1
-rw-r--r--drivers/net/can/cc770/cc770_platform.c1
-rw-r--r--drivers/net/can/dev.c78
-rw-r--r--drivers/net/can/flexcan.c102
-rw-r--r--drivers/net/can/grcan.c1
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c1
-rw-r--r--drivers/net/can/mscan/mscan.c48
-rw-r--r--drivers/net/can/rcar_can.c1
-rw-r--r--drivers/net/can/sja1000/sja1000.c51
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c1
-rw-r--r--drivers/net/can/slcan.c7
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c14
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c23
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h48
-rw-r--r--drivers/net/can/vcan.c5
-rw-r--r--drivers/net/can/xilinx_can.c1
-rw-r--r--drivers/net/dsa/Kconfig17
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/bcm_sf2.c23
-rw-r--r--drivers/net/dsa/mv88e6060.c5
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c25
-rw-r--r--drivers/net/dsa/mv88e6131.c12
-rw-r--r--drivers/net/dsa/mv88e6171.c26
-rw-r--r--drivers/net/dsa/mv88e6352.c788
-rw-r--r--drivers/net/dsa/mv88e6xxx.c101
-rw-r--r--drivers/net/dsa/mv88e6xxx.h16
-rw-r--r--drivers/net/dummy.c20
-rw-r--r--drivers/net/ethernet/3com/typhoon.c4
-rw-r--r--drivers/net/ethernet/8390/ax88796.c1
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c1
-rw-r--r--drivers/net/ethernet/8390/ne.c1
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h63
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c208
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c446
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c535
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c89
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c86
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h112
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c12
-rw-r--r--drivers/net/ethernet/apple/macmace.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig5
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c37
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c75
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c170
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c126
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c37
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig4
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1
-rw-r--r--drivers/net/ethernet/cadence/macb.c450
-rw-r--r--drivers/net/ethernet/cadence/macb.h36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c158
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c611
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c303
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c537
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h160
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h90
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2271
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c138
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c289
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c555
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c28
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c38
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c61
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rss.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h20
-rw-r--r--drivers/net/ethernet/davicom/Kconfig2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c20
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c66
-rw-r--r--drivers/net/ethernet/ethoc.c1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.h237
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c157
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c148
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h38
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/hp/hp100.c7
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c25
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2184
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c252
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c129
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c127
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c488
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c198
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h62
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2136
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c47
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c100
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c39
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c183
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c547
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c275
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c134
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h329
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c1432
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h43
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c757
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/jme.c12
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c258
-rw-r--r--drivers/net/ethernet/marvell/skge.c3
-rw-r--r--drivers/net/ethernet/marvell/sky2.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c425
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c557
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c303
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c593
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c553
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h118
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c303
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c15
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c5
-rw-r--r--drivers/net/ethernet/netx-eth.c1
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c1
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c11
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c22
-rw-r--r--drivers/net/ethernet/realtek/atp.h246
-rw-r--r--drivers/net/ethernet/realtek/r8169.c100
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c28
-rw-r--r--drivers/net/ethernet/rocker/Kconfig27
-rw-r--r--drivers/net/ethernet/rocker/Makefile5
-rw-r--r--drivers/net/ethernet/rocker/rocker.c4375
-rw-r--r--drivers/net/ethernet/rocker/rocker.h428
-rw-r--r--drivers/net/ethernet/s6gmac.c1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c18
-rw-r--r--drivers/net/ethernet/sfc/falcon.c10
-rw-r--r--drivers/net/ethernet/sfc/farch.c27
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h19
-rw-r--r--drivers/net/ethernet/sfc/nic.h112
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c269
-rw-r--r--drivers/net/ethernet/sgi/meth.c1
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c211
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c135
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c68
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h28
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1
-rw-r--r--drivers/net/ethernet/sun/sunqe.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c775
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h17
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/ethernet/via/via-velocity.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c7
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c7
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c3
-rw-r--r--drivers/net/fddi/defxx.c187
-rw-r--r--drivers/net/fddi/defxx.h4
-rw-r--r--drivers/net/hamradio/6pack.c3
-rw-r--r--drivers/net/hyperv/netvsc.c10
-rw-r--r--drivers/net/hyperv/netvsc_drv.c9
-rw-r--r--drivers/net/hyperv/rndis_filter.c15
-rw-r--r--drivers/net/ieee802154/Kconfig10
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/at86rf230.c445
-rw-r--r--drivers/net/ieee802154/cc2520.c73
-rw-r--r--drivers/net/ieee802154/fakehard.c430
-rw-r--r--drivers/net/ieee802154/fakelb.c92
-rw-r--r--drivers/net/ieee802154/mrf24j40.c104
-rw-r--r--drivers/net/ipvlan/Makefile7
-rw-r--r--drivers/net/ipvlan/ipvlan.h121
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c607
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c795
-rw-r--r--drivers/net/irda/act200l-sir.c11
-rw-r--r--drivers/net/irda/actisys-sir.c3
-rw-r--r--drivers/net/irda/ali-ircc.c264
-rw-r--r--drivers/net/irda/au1k_ir.c1
-rw-r--r--drivers/net/irda/donauboe.c96
-rw-r--r--drivers/net/irda/girbil-sir.c14
-rw-r--r--drivers/net/irda/irda-usb.c142
-rw-r--r--drivers/net/irda/irtty-sir.c18
-rw-r--r--drivers/net/irda/litelink-sir.c8
-rw-r--r--drivers/net/irda/ma600-sir.c20
-rw-r--r--drivers/net/irda/mcp2120-sir.c14
-rw-r--r--drivers/net/irda/mcs7780.c53
-rw-r--r--drivers/net/irda/nsc-ircc.c229
-rw-r--r--drivers/net/irda/old_belkin-sir.c8
-rw-r--r--drivers/net/irda/pxaficp_ir.c1
-rw-r--r--drivers/net/irda/sa1100_ir.c1
-rw-r--r--drivers/net/irda/sir_dev.c79
-rw-r--r--drivers/net/irda/sir_dongle.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c255
-rw-r--r--drivers/net/irda/tekram-sir.c15
-rw-r--r--drivers/net/irda/toim3232-sir.c12
-rw-r--r--drivers/net/irda/via-ircc.c175
-rw-r--r--drivers/net/irda/vlsi_ir.c145
-rw-r--r--drivers/net/irda/vlsi_ir.h3
-rw-r--r--drivers/net/irda/w83977af_ir.c77
-rw-r--r--drivers/net/macvlan.c19
-rw-r--r--drivers/net/macvtap.c232
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c18
-rw-r--r--drivers/net/phy/amd.c17
-rw-r--r--drivers/net/phy/at803x.c14
-rw-r--r--drivers/net/phy/bcm63xx.c15
-rw-r--r--drivers/net/phy/bcm7xxx.c151
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/broadcom.c15
-rw-r--r--drivers/net/phy/cicada.c15
-rw-r--r--drivers/net/phy/davicom.c15
-rw-r--r--drivers/net/phy/et1011c.c17
-rw-r--r--drivers/net/phy/fixed_phy.c (renamed from drivers/net/phy/fixed.c)1
-rw-r--r--drivers/net/phy/icplus.c15
-rw-r--r--drivers/net/phy/lxt.c15
-rw-r--r--drivers/net/phy/marvell.c67
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c38
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c1
-rw-r--r--drivers/net/phy/mdio-octeon.c1
-rw-r--r--drivers/net/phy/micrel.c344
-rw-r--r--drivers/net/phy/national.c17
-rw-r--r--drivers/net/phy/qsemi.c17
-rw-r--r--drivers/net/phy/realtek.c13
-rw-r--r--drivers/net/phy/smsc.c14
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/phy/ste10Xp.c15
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tun.c322
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/cdc-phonet.c6
-rw-r--r--drivers/net/usb/cdc_ether.c19
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/usb/r8152.c229
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/virtio_net.c161
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c15
-rw-r--r--drivers/net/vxlan.c39
-rw-r--r--drivers/net/wireless/ath/ath.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c92
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c154
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h100
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c1154
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h46
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h53
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c1256
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c746
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c579
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h207
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1179
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h401
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig14
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c234
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h28
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h13
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile9
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c181
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.h36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h498
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h144
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c338
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c (renamed from drivers/net/wireless/ath/ath9k/spectral.c)165
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h (renamed from drivers/net/wireless/ath/ath9k/spectral.h)29
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c463
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c98
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c73
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c361
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h38
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c37
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c4
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c136
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h26
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c10
-rw-r--r--drivers/net/wireless/b43/main.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c52
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/btcoex.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bus.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h)11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c (renamed from drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c)338
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h)11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/common.c168
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/commonring.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.c (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c)47
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd.h)10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/debug.c (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c)6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/debug.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h)6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c400
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.c34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c102
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h95
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/of.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c)55
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h)8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c142
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h75
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/vendor.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.c180
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c31
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c16
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h2
-rw-r--r--drivers/net/wireless/cw1200/scan.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h5
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c15
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c21
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h5
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h31
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c51
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h308
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h244
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c618
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c200
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h297
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h179
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c74
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c106
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c531
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h154
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/offloading.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c123
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c150
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c786
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c303
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tdls.c569
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c146
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c75
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c37
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c404
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c99
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c661
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h28
-rw-r--r--drivers/net/wireless/mwifiex/11n.c4
-rw-r--r--drivers/net/wireless/mwifiex/11n.h4
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c6
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c130
-rw-r--r--drivers/net/wireless/mwifiex/decl.h19
-rw-r--r--drivers/net/wireless/mwifiex/fw.h34
-rw-r--r--drivers/net/wireless/mwifiex/init.c22
-rw-r--r--drivers/net/wireless/mwifiex/join.c4
-rw-r--r--drivers/net/wireless/mwifiex/main.c123
-rw-r--r--drivers/net/wireless/mwifiex/main.h63
-rw-r--r--drivers/net/wireless/mwifiex/scan.c78
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h7
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c14
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c3
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c6
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c288
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c35
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c5
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c11
-rw-r--r--drivers/net/wireless/mwifiex/usb.c51
-rw-r--r--drivers/net/wireless/mwifiex/usb.h3
-rw-r--r--drivers/net/wireless/mwifiex/util.c38
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c42
-rw-r--r--drivers/net/wireless/mwl8k.c7
-rw-r--r--drivers/net/wireless/p54/net2280.h451
-rw-r--r--drivers/net/wireless/p54/p54usb.h13
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c18
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c45
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c28
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h41
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c24
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/Makefile5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/dm.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/phy.c12
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c23
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c6
-rw-r--r--drivers/net/xen-netback/common.h4
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c55
-rw-r--r--drivers/net/xen-netback/xenbus.c21
-rw-r--r--drivers/net/xen-netfront.c267
-rw-r--r--drivers/nfc/nfcwilink.c1
-rw-r--r--drivers/nfc/pn544/i2c.c2
-rw-r--r--drivers/nfc/st21nfca/i2c.c41
-rw-r--r--drivers/nfc/st21nfca/st21nfca.c32
-rw-r--r--drivers/nfc/st21nfca/st21nfca.h2
-rw-r--r--drivers/nfc/st21nfca/st21nfca_dep.c47
-rw-r--r--drivers/nfc/st21nfca/st21nfca_dep.h4
-rw-r--r--drivers/nfc/st21nfcb/i2c.c39
-rw-r--r--drivers/nfc/st21nfcb/ndlc.c2
-rw-r--r--drivers/of/Kconfig11
-rw-r--r--drivers/of/Makefile5
-rw-r--r--drivers/of/address.c4
-rw-r--r--drivers/of/base.c135
-rw-r--r--drivers/of/dynamic.c218
-rw-r--r--drivers/of/fdt.c111
-rw-r--r--drivers/of/irq.c1
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/overlay.c562
-rw-r--r--drivers/of/pdt.c27
-rw-r--r--drivers/of/platform.c115
-rw-r--r--drivers/of/resolver.c128
-rw-r--r--drivers/of/testcase-data/testcases.dts50
-rw-r--r--drivers/of/unittest-data/testcases.dts79
-rw-r--r--drivers/of/unittest-data/tests-interrupts.dtsi (renamed from drivers/of/testcase-data/tests-interrupts.dtsi)0
-rw-r--r--drivers/of/unittest-data/tests-match.dtsi (renamed from drivers/of/testcase-data/tests-match.dtsi)0
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi180
-rw-r--r--drivers/of/unittest-data/tests-phandle.dtsi (renamed from drivers/of/testcase-data/tests-phandle.dtsi)0
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi (renamed from drivers/of/testcase-data/tests-platform.dtsi)0
-rw-r--r--drivers/of/unittest.c (renamed from drivers/of/selftest.c)633
-rw-r--r--drivers/parport/parport_amiga.c1
-rw-r--r--drivers/parport/parport_ax88796.c1
-rw-r--r--drivers/parport/parport_pc.c8
-rw-r--r--drivers/parport/parport_serial.c10
-rw-r--r--drivers/parport/parport_sunbpp.c1
-rw-r--r--drivers/pci/host/pci-dra7xx.c1
-rw-r--r--drivers/pci/host/pci-exynos.c1
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-imx6.c1
-rw-r--r--drivers/pci/host/pci-keystone.c1
-rw-r--r--drivers/pci/host/pci-mvebu.c1
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/host/pci-tegra.c1
-rw-r--r--drivers/pci/host/pci-xgene.c1
-rw-r--r--drivers/pci/host/pcie-rcar.c1
-rw-r--r--drivers/pci/host/pcie-spear13xx.c1
-rw-r--r--drivers/pci/host/pcie-xilinx.c1
-rw-r--r--drivers/pci/pci-sysfs.c39
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/at91_cf.c1
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c1
-rw-r--r--drivers/pcmcia/db1xxx_ss.c1
-rw-r--r--drivers/pcmcia/electra_cf.c1
-rw-r--r--drivers/pcmcia/i82365.c1
-rw-r--r--drivers/pcmcia/m32r_cfc.c1
-rw-r--r--drivers/pcmcia/m32r_pcc.c1
-rw-r--r--drivers/pcmcia/omap_cf.c1
-rw-r--r--drivers/pcmcia/pxa2xx_base.c1
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c1
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/sa1111_badge4.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c11
-rw-r--r--drivers/pcmcia/sa1111_generic.h4
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c3
-rw-r--r--drivers/pcmcia/sa11xx_base.c14
-rw-r--r--drivers/pcmcia/soc_common.c4
-rw-r--r--drivers/pcmcia/tcic.c1
-rw-r--r--drivers/pcmcia/vrc4171_card.c1
-rw-r--r--drivers/pcmcia/xxs1500_ss.c1
-rw-r--r--drivers/phy/Kconfig23
-rw-r--r--drivers/phy/Makefile3
-rw-r--r--drivers/phy/phy-armada375-usb2.c158
-rw-r--r--drivers/phy/phy-bcm-kona-usb2.c2
-rw-r--r--drivers/phy/phy-berlin-sata.c36
-rw-r--r--drivers/phy/phy-berlin-usb.c223
-rw-r--r--drivers/phy/phy-core.c115
-rw-r--r--drivers/phy/phy-exynos-dp-video.c2
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c2
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c139
-rw-r--r--drivers/phy/phy-exynos5250-sata.c2
-rw-r--r--drivers/phy/phy-hix5hd2-sata.c7
-rw-r--r--drivers/phy/phy-miphy28lp.c1283
-rw-r--r--drivers/phy/phy-miphy365x.c7
-rw-r--r--drivers/phy/phy-mvebu-sata.c2
-rw-r--r--drivers/phy/phy-omap-usb2.c16
-rw-r--r--drivers/phy/phy-qcom-apq8064-sata.c3
-rw-r--r--drivers/phy/phy-qcom-ipq806x-sata.c3
-rw-r--r--drivers/phy/phy-rcar-gen2.c2
-rw-r--r--drivers/phy/phy-samsung-usb2.c3
-rw-r--r--drivers/phy/phy-spear1310-miphy.c2
-rw-r--r--drivers/phy/phy-spear1340-miphy.c2
-rw-r--r--drivers/phy/phy-stih407-usb.c2
-rw-r--r--drivers/phy/phy-stih41x-usb.c7
-rw-r--r--drivers/phy/phy-sun4i-usb.c11
-rw-r--r--drivers/phy/phy-ti-pipe3.c2
-rw-r--r--drivers/phy/phy-twl4030-usb.c15
-rw-r--r--drivers/phy/phy-xgene.c2
-rw-r--r--drivers/pinctrl/Kconfig13
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2.c1
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2cd.c1
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2q.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c72
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx21.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx23.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx28.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx35.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx51.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c25
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c1
-rw-r--r--drivers/pinctrl/intel/Kconfig27
-rw-r--r--drivers/pinctrl/intel/Makefile4
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c (renamed from drivers/pinctrl/pinctrl-baytrail.c)8
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c1519
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-375.c1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c34
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c4
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c20
-rw-r--r--drivers/pinctrl/pinconf-generic.c71
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91.c23
-rw-r--r--drivers/pinctrl/pinctrl-at91.h72
-rw-r--r--drivers/pinctrl/pinctrl-bcm281xx.c6
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c1
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c1
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c117
-rw-r--r--drivers/pinctrl/pinctrl-single.c1
-rw-r--r--drivers/pinctrl/pinctrl-st.c3
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c9
-rw-r--r--drivers/pinctrl/pinctrl-tegra-xusb.c23
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c1
-rw-r--r--drivers/pinctrl/pinctrl-tegra124.c1
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c1
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c1
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c1
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c1
-rw-r--r--drivers/pinctrl/pinctrl-xway.c1
-rw-r--r--drivers/pinctrl/qcom/Kconfig13
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c933
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c949
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c380
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c30
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c31
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c132
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h82
-rw-r--r--drivers/pinctrl/sh-pfc/core.c1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c1
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c3
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c1
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c1
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c1
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c1
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c1
-rw-r--r--drivers/pinctrl/sunxi/Kconfig4
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c749
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h1
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-vt8500.c1
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8505.c1
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8650.c1
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8750.c1
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8850.c1
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c1
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/acer-wmi.c1
-rw-r--r--drivers/platform/x86/acerhdf.c266
-rw-r--r--drivers/platform/x86/alienware-wmi.c1
-rw-r--r--drivers/platform/x86/amilo-rfkill.c1
-rw-r--r--drivers/platform/x86/asus-laptop.c4
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.c3
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c1058
-rw-r--r--drivers/platform/x86/dell-smo8800.c10
-rw-r--r--drivers/platform/x86/dell-wmi.c176
-rw-r--r--drivers/platform/x86/eeepc-laptop.c214
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c7
-rw-r--r--drivers/platform/x86/hdaps.c1
-rw-r--r--drivers/platform/x86/hp-wireless.c3
-rw-r--r--drivers/platform/x86/hp-wmi.c1
-rw-r--r--drivers/platform/x86/hp_accel.c1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/platform/x86/intel_ips.c4
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c1
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c1
-rw-r--r--drivers/platform/x86/intel_oaktrail.c4
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c1
-rw-r--r--drivers/platform/x86/msi-laptop.c3
-rw-r--r--drivers/platform/x86/msi-wmi.c3
-rw-r--r--drivers/platform/x86/samsung-q10.c1
-rw-r--r--drivers/platform/x86/sony-laptop.c7
-rw-r--r--drivers/platform/x86/tc1100-wmi.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c118
-rw-r--r--drivers/platform/x86/toshiba_acpi.c166
-rw-r--r--drivers/platform/x86/xo1-rfkill.c1
-rw-r--r--drivers/power/88pm860x_battery.c1
-rw-r--r--drivers/power/88pm860x_charger.c1
-rw-r--r--drivers/power/ab8500_btemp.c1
-rw-r--r--drivers/power/ab8500_charger.c1
-rw-r--r--drivers/power/ab8500_fg.c1
-rw-r--r--drivers/power/abx500_chargalg.c1
-rw-r--r--drivers/power/bq27x00_battery.c1
-rw-r--r--drivers/power/charger-manager.c1
-rw-r--r--drivers/power/da9030_battery.c1
-rw-r--r--drivers/power/da9052-battery.c1
-rw-r--r--drivers/power/ds2782_battery.c8
-rw-r--r--drivers/power/generic-adc-battery.c1
-rw-r--r--drivers/power/gpio-charger.c73
-rw-r--r--drivers/power/intel_mid_battery.c1
-rw-r--r--drivers/power/jz4740-battery.c1
-rw-r--r--drivers/power/lp8788-charger.c1
-rw-r--r--drivers/power/max14577_charger.c1
-rw-r--r--drivers/power/max8903_charger.c1
-rw-r--r--drivers/power/max8997_charger.c1
-rw-r--r--drivers/power/max8998_charger.c1
-rw-r--r--drivers/power/olpc_battery.c1
-rw-r--r--drivers/power/reset/as3722-poweroff.c1
-rw-r--r--drivers/power/reset/axxia-reset.c21
-rw-r--r--drivers/power/reset/brcmstb-reboot.c29
-rw-r--r--drivers/power/reset/gpio-poweroff.c1
-rw-r--r--drivers/power/reset/gpio-restart.c1
-rw-r--r--drivers/power/reset/hisi-reboot.c20
-rw-r--r--drivers/power/reset/keystone-reset.c21
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c1
-rw-r--r--drivers/power/reset/qnap-poweroff.c1
-rw-r--r--drivers/power/reset/restart-poweroff.c1
-rw-r--r--drivers/power/reset/syscon-reboot.c2
-rw-r--r--drivers/power/reset/vexpress-poweroff.c40
-rw-r--r--drivers/power/reset/xgene-reboot.c56
-rw-r--r--drivers/power/rx51_battery.c1
-rw-r--r--drivers/power/tps65090-charger.c1
-rw-r--r--drivers/power/twl4030_charger.c1
-rw-r--r--drivers/power/wm97xx_battery.c1
-rw-r--r--drivers/pps/clients/pps-gpio.c1
-rw-r--r--drivers/pwm/Kconfig22
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-ab8500.c1
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c299
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c1
-rw-r--r--drivers/pwm/pwm-bcm2835.c205
-rw-r--r--drivers/pwm/pwm-bfin.c1
-rw-r--r--drivers/pwm/pwm-clps711x.c1
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c64
-rw-r--r--drivers/pwm/pwm-imx.c1
-rw-r--r--drivers/pwm/pwm-jz4740.c1
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-lpc32xx.c1
-rw-r--r--drivers/pwm/pwm-mxs.c1
-rw-r--r--drivers/pwm/pwm-puv3.c1
-rw-r--r--drivers/pwm/pwm-pxa.c1
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c1
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-spear.c1
-rw-r--r--drivers/pwm/pwm-tegra.c1
-rw-r--r--drivers/pwm/pwm-tiecap.c1
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c1
-rw-r--r--drivers/pwm/pwm-tipwmss.c1
-rw-r--r--drivers/pwm/pwm-vt8500.c1
-rw-r--r--drivers/regulator/88pm800.c1
-rw-r--r--drivers/regulator/88pm8607.c1
-rw-r--r--drivers/regulator/aat2870-regulator.c1
-rw-r--r--drivers/regulator/ab3100.c1
-rw-r--r--drivers/regulator/ab8500-ext.c1
-rw-r--r--drivers/regulator/ab8500.c1
-rw-r--r--drivers/regulator/anatop-regulator.c1
-rw-r--r--drivers/regulator/arizona-ldo1.c1
-rw-r--r--drivers/regulator/arizona-micsupp.c1
-rw-r--r--drivers/regulator/as3711-regulator.c1
-rw-r--r--drivers/regulator/as3722-regulator.c1
-rw-r--r--drivers/regulator/axp20x-regulator.c1
-rw-r--r--drivers/regulator/bcm590xx-regulator.c1
-rw-r--r--drivers/regulator/da903x.c1
-rw-r--r--drivers/regulator/da9052-regulator.c1
-rw-r--r--drivers/regulator/da9055-regulator.c1
-rw-r--r--drivers/regulator/da9063-regulator.c1
-rw-r--r--drivers/regulator/db8500-prcmu.c1
-rw-r--r--drivers/regulator/dummy.c1
-rw-r--r--drivers/regulator/fixed.c1
-rw-r--r--drivers/regulator/gpio-regulator.c1
-rw-r--r--drivers/regulator/hi6421-regulator.c1
-rw-r--r--drivers/regulator/lp8788-buck.c1
-rw-r--r--drivers/regulator/lp8788-ldo.c2
-rw-r--r--drivers/regulator/max14577.c1
-rw-r--r--drivers/regulator/max77686.c1
-rw-r--r--drivers/regulator/max77693.c1
-rw-r--r--drivers/regulator/max77802.c1
-rw-r--r--drivers/regulator/max8907-regulator.c1
-rw-r--r--drivers/regulator/max8925-regulator.c1
-rw-r--r--drivers/regulator/max8997.c1
-rw-r--r--drivers/regulator/max8998.c1
-rw-r--r--drivers/regulator/mc13783-regulator.c1
-rw-r--r--drivers/regulator/mc13892-regulator.c1
-rw-r--r--drivers/regulator/palmas-regulator.c1
-rw-r--r--drivers/regulator/pbias-regulator.c1
-rw-r--r--drivers/regulator/pcap-regulator.c1
-rw-r--r--drivers/regulator/pwm-regulator.c1
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c1
-rw-r--r--drivers/regulator/rc5t583-regulator.c1
-rw-r--r--drivers/regulator/rk808-regulator.c1
-rw-r--r--drivers/regulator/s2mpa01.c1
-rw-r--r--drivers/regulator/s2mps11.c1
-rw-r--r--drivers/regulator/s5m8767.c1
-rw-r--r--drivers/regulator/stw481x-vmmc.c1
-rw-r--r--drivers/regulator/ti-abb-regulator.c1
-rw-r--r--drivers/regulator/tps6105x-regulator.c1
-rw-r--r--drivers/regulator/tps6507x-regulator.c1
-rw-r--r--drivers/regulator/tps65090-regulator.c1
-rw-r--r--drivers/regulator/tps65218-regulator.c1
-rw-r--r--drivers/regulator/tps6586x-regulator.c1
-rw-r--r--drivers/regulator/tps65910-regulator.c1
-rw-r--r--drivers/regulator/tps65912-regulator.c1
-rw-r--r--drivers/regulator/tps80031-regulator.c1
-rw-r--r--drivers/regulator/twl-regulator.c1
-rw-r--r--drivers/regulator/vexpress.c1
-rw-r--r--drivers/regulator/virtual.c1
-rw-r--r--drivers/regulator/wm831x-dcdc.c4
-rw-r--r--drivers/regulator/wm831x-isink.c1
-rw-r--r--drivers/regulator/wm831x-ldo.c3
-rw-r--r--drivers/regulator/wm8994-regulator.c1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c1
-rw-r--r--drivers/remoteproc/omap_remoteproc.c52
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c11
-rw-r--r--drivers/remoteproc/ste_modem_rproc.c1
-rw-r--r--drivers/reset/reset-socfpga.c1
-rw-r--r--drivers/reset/reset-sunxi.c1
-rw-r--r--drivers/reset/sti/reset-stih415.c1
-rw-r--r--drivers/reset/sti/reset-stih416.c1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c44
-rw-r--r--drivers/rtc/Kconfig11
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-88pm80x.c1
-rw-r--r--drivers/rtc/rtc-88pm860x.c1
-rw-r--r--drivers/rtc/rtc-ab3100.c1
-rw-r--r--drivers/rtc/rtc-ab8500.c1
-rw-r--r--drivers/rtc/rtc-at32ap700x.c1
-rw-r--r--drivers/rtc/rtc-at91rm9200.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c1
-rw-r--r--drivers/rtc/rtc-au1xxx.c1
-rw-r--r--drivers/rtc/rtc-bfin.c1
-rw-r--r--drivers/rtc/rtc-bq4802.c1
-rw-r--r--drivers/rtc/rtc-coh901331.c1
-rw-r--r--drivers/rtc/rtc-da9052.c1
-rw-r--r--drivers/rtc/rtc-da9055.c1
-rw-r--r--drivers/rtc/rtc-da9063.c1
-rw-r--r--drivers/rtc/rtc-davinci.c1
-rw-r--r--drivers/rtc/rtc-dm355evm.c1
-rw-r--r--drivers/rtc/rtc-ds1216.c1
-rw-r--r--drivers/rtc/rtc-ds1286.c1
-rw-r--r--drivers/rtc/rtc-ds1302.c1
-rw-r--r--drivers/rtc/rtc-ds1511.c1
-rw-r--r--drivers/rtc/rtc-ds1553.c1
-rw-r--r--drivers/rtc/rtc-ds1742.c1
-rw-r--r--drivers/rtc/rtc-ds2404.c1
-rw-r--r--drivers/rtc/rtc-efi.c1
-rw-r--r--drivers/rtc/rtc-ep93xx.c1
-rw-r--r--drivers/rtc/rtc-generic.c1
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c1
-rw-r--r--drivers/rtc/rtc-imxdi.c1
-rw-r--r--drivers/rtc/rtc-jz4740.c1
-rw-r--r--drivers/rtc/rtc-lp8788.c1
-rw-r--r--drivers/rtc/rtc-lpc32xx.c1
-rw-r--r--drivers/rtc/rtc-ls1x.c1
-rw-r--r--drivers/rtc/rtc-m48t35.c1
-rw-r--r--drivers/rtc/rtc-m48t59.c1
-rw-r--r--drivers/rtc/rtc-m48t86.c1
-rw-r--r--drivers/rtc/rtc-max77686.c1
-rw-r--r--drivers/rtc/rtc-max77802.c1
-rw-r--r--drivers/rtc/rtc-max8907.c1
-rw-r--r--drivers/rtc/rtc-max8925.c1
-rw-r--r--drivers/rtc/rtc-max8997.c1
-rw-r--r--drivers/rtc/rtc-max8998.c1
-rw-r--r--drivers/rtc/rtc-mc13xxx.c1
-rw-r--r--drivers/rtc/rtc-moxart.c1
-rw-r--r--drivers/rtc/rtc-mpc5121.c1
-rw-r--r--drivers/rtc/rtc-msm6242.c1
-rw-r--r--drivers/rtc/rtc-mv.c1
-rw-r--r--drivers/rtc/rtc-mxc.c1
-rw-r--r--drivers/rtc/rtc-nuc900.c1
-rw-r--r--drivers/rtc/rtc-omap.c1
-rw-r--r--drivers/rtc/rtc-opal.c261
-rw-r--r--drivers/rtc/rtc-palmas.c1
-rw-r--r--drivers/rtc/rtc-pcap.c1
-rw-r--r--drivers/rtc/rtc-pm8xxx.c1
-rw-r--r--drivers/rtc/rtc-ps3.c1
-rw-r--r--drivers/rtc/rtc-puv3.c1
-rw-r--r--drivers/rtc/rtc-rc5t583.c1
-rw-r--r--drivers/rtc/rtc-rp5c01.c1
-rw-r--r--drivers/rtc/rtc-rs5c313.c1
-rw-r--r--drivers/rtc/rtc-s3c.c1
-rw-r--r--drivers/rtc/rtc-s5m.c1
-rw-r--r--drivers/rtc/rtc-sh.c1
-rw-r--r--drivers/rtc/rtc-sirfsoc.c1
-rw-r--r--drivers/rtc/rtc-snvs.c12
-rw-r--r--drivers/rtc/rtc-starfire.c1
-rw-r--r--drivers/rtc/rtc-stk17ta8.c1
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c1
-rw-r--r--drivers/rtc/rtc-sun4v.c1
-rw-r--r--drivers/rtc/rtc-sunxi.c1
-rw-r--r--drivers/rtc/rtc-tegra.c1
-rw-r--r--drivers/rtc/rtc-test.c1
-rw-r--r--drivers/rtc/rtc-tile.c1
-rw-r--r--drivers/rtc/rtc-tps6586x.c1
-rw-r--r--drivers/rtc/rtc-tps65910.c1
-rw-r--r--drivers/rtc/rtc-tps80031.c1
-rw-r--r--drivers/rtc/rtc-twl.c1
-rw-r--r--drivers/rtc/rtc-tx4939.c1
-rw-r--r--drivers/rtc/rtc-v3020.c1
-rw-r--r--drivers/rtc/rtc-vr41xx.c1
-rw-r--r--drivers/rtc/rtc-vt8500.c1
-rw-r--r--drivers/rtc/rtc-xgene.c1
-rw-r--r--drivers/s390/block/dasd.c31
-rw-r--r--drivers/s390/block/dasd_genhd.c26
-rw-r--r--drivers/s390/block/dcssblk.c1
-rw-r--r--drivers/s390/block/scm_blk.c222
-rw-r--r--drivers/s390/block/scm_blk.h6
-rw-r--r--drivers/s390/block/scm_blk_cluster.c69
-rw-r--r--drivers/s390/block/xpram.c1
-rw-r--r--drivers/s390/char/Kconfig10
-rw-r--r--drivers/s390/char/monwriter.c1
-rw-r--r--drivers/s390/char/sclp.c1
-rw-r--r--drivers/s390/char/sclp_async.c3
-rw-r--r--drivers/s390/char/tape_3590.c4
-rw-r--r--drivers/s390/cio/eadm_sch.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/kvm/kvm_virtio.c11
-rw-r--r--drivers/s390/kvm/virtio_ccw.c203
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/ctcm_sysfs.c8
-rw-r--r--drivers/s390/net/lcs.c11
-rw-r--r--drivers/s390/net/qeth_core.h16
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3.h4
-rw-r--r--drivers/s390/net/qeth_l3_main.c10
-rw-r--r--drivers/sbus/char/bbc_i2c.c1
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/sbus/char/envctrl.c1
-rw-r--r--drivers/sbus/char/flash.c1
-rw-r--r--drivers/sbus/char/uctrl.c1
-rw-r--r--drivers/scsi/a3000.c1
-rw-r--r--drivers/scsi/a4000t.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c10
-rw-r--r--drivers/scsi/bvme6000_scsi.c1
-rw-r--r--drivers/scsi/csiostor/csio_attr.c8
-rw-r--r--drivers/scsi/csiostor/csio_hw.c58
-rw-r--r--drivers/scsi/csiostor/csio_hw.h1
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h49
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c15
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c21
-rw-r--r--drivers/scsi/csiostor/csio_init.c78
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c20
-rw-r--r--drivers/scsi/csiostor/csio_mb.c343
-rw-r--r--drivers/scsi/csiostor/csio_mb.h12
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c26
-rw-r--r--drivers/scsi/csiostor/csio_wr.h2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c251
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h4
-rw-r--r--drivers/scsi/fcoe/fcoe.c6
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/jazz_esp.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c66
-rw-r--r--drivers/scsi/mac_esp.c1
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mvme16x_scsi.c1
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/ps3rom.c1
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/sgiwd93.c1
-rw-r--r--drivers/scsi/sni_53c710.c1
-rw-r--r--drivers/scsi/sun3x_esp.c1
-rw-r--r--drivers/scsi/sun_esp.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c1
-rw-r--r--drivers/scsi/virtio_scsi.c50
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c1
-rw-r--r--drivers/soc/ti/knav_dma.c1
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c1
-rw-r--r--drivers/spi/Kconfig17
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-adi-v3.c1
-rw-r--r--drivers/spi/spi-altera.c1
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-atmel.c116
-rw-r--r--drivers/spi/spi-au1550.c1
-rw-r--r--drivers/spi/spi-bcm2835.c1
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c1
-rw-r--r--drivers/spi/spi-bcm63xx.c1
-rw-r--r--drivers/spi/spi-bfin-sport.c1
-rw-r--r--drivers/spi/spi-bfin5xx.c1
-rw-r--r--drivers/spi/spi-cadence.c33
-rw-r--r--drivers/spi/spi-clps711x.c1
-rw-r--r--drivers/spi/spi-davinci.c1
-rw-r--r--drivers/spi/spi-dw-mid.c114
-rw-r--r--drivers/spi/spi-dw-mmio.c1
-rw-r--r--drivers/spi/spi-dw.h2
-rw-r--r--drivers/spi/spi-efm32.c1
-rw-r--r--drivers/spi/spi-ep93xx.c1
-rw-r--r--drivers/spi/spi-falcon.c1
-rw-r--r--drivers/spi/spi-fsl-cpm.c5
-rw-r--r--drivers/spi/spi-fsl-dspi.c3
-rw-r--r--drivers/spi/spi-fsl-espi.c60
-rw-r--r--drivers/spi/spi-fsl-lib.c59
-rw-r--r--drivers/spi/spi-fsl-lib.h10
-rw-r--r--drivers/spi/spi-fsl-spi.c19
-rw-r--r--drivers/spi/spi-gpio.c38
-rw-r--r--drivers/spi/spi-img-spfi.c746
-rw-r--r--drivers/spi/spi-imx.c1
-rw-r--r--drivers/spi/spi-meson-spifc.c462
-rw-r--r--drivers/spi/spi-mpc512x-psc.c1
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c1
-rw-r--r--drivers/spi/spi-mpc52xx.c1
-rw-r--r--drivers/spi/spi-mxs.c13
-rw-r--r--drivers/spi/spi-nuc900.c1
-rw-r--r--drivers/spi/spi-oc-tiny.c1
-rw-r--r--drivers/spi/spi-octeon.c1
-rw-r--r--drivers/spi/spi-omap-100k.c1
-rw-r--r--drivers/spi/spi-omap-uwire.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c1
-rw-r--r--drivers/spi/spi-orion.c1
-rw-r--r--drivers/spi/spi-ppc4xx.c1
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c8
-rw-r--r--drivers/spi/spi-pxa2xx.c305
-rw-r--r--drivers/spi/spi-pxa2xx.h16
-rw-r--r--drivers/spi/spi-qup.c1
-rw-r--r--drivers/spi/spi-rockchip.c3
-rw-r--r--drivers/spi/spi-rspi.c1
-rw-r--r--drivers/spi/spi-s3c24xx.c1
-rw-r--r--drivers/spi/spi-s3c64xx.c42
-rw-r--r--drivers/spi/spi-sh-hspi.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi-sh-sci.c1
-rw-r--r--drivers/spi/spi-sh.c1
-rw-r--r--drivers/spi/spi-sirf.c13
-rw-r--r--drivers/spi/spi-sun4i.c1
-rw-r--r--drivers/spi/spi-sun6i.c1
-rw-r--r--drivers/spi/spi-tegra114.c1
-rw-r--r--drivers/spi/spi-tegra20-sflash.c1
-rw-r--r--drivers/spi/spi-tegra20-slink.c1
-rw-r--r--drivers/spi/spi-ti-qspi.c1
-rw-r--r--drivers/spi/spi-topcliff-pch.c1
-rw-r--r--drivers/spi/spi-txx9.c4
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c1
-rw-r--r--drivers/spi/spi.c308
-rw-r--r--drivers/spi/spidev.c16
-rw-r--r--drivers/spmi/spmi-pmic-arb.c1
-rw-r--r--drivers/ssb/driver_mipscore.c14
-rw-r--r--drivers/ssb/pcihost_wrapper.c33
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile3
-rw-r--r--drivers/staging/android/Kconfig30
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/TODO7
-rw-r--r--drivers/staging/android/ashmem.c5
-rw-r--r--drivers/staging/android/ion/ion.c10
-rw-r--r--drivers/staging/android/ion/ion.h2
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c6
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c2
-rw-r--r--drivers/staging/android/ion/ion_priv.h4
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c6
-rw-r--r--drivers/staging/android/sync_debug.c5
-rw-r--r--drivers/staging/android/timed_gpio.c8
-rw-r--r--drivers/staging/bcm/Adapter.h474
-rw-r--r--drivers/staging/bcm/Bcmchar.c2652
-rw-r--r--drivers/staging/bcm/Bcmnet.c240
-rw-r--r--drivers/staging/bcm/CmHost.c2254
-rw-r--r--drivers/staging/bcm/CmHost.h62
-rw-r--r--drivers/staging/bcm/DDRInit.c1355
-rw-r--r--drivers/staging/bcm/DDRInit.h9
-rw-r--r--drivers/staging/bcm/Debug.h242
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c241
-rw-r--r--drivers/staging/bcm/HostMIBSInterface.h192
-rw-r--r--drivers/staging/bcm/IPv6Protocol.c476
-rw-r--r--drivers/staging/bcm/IPv6ProtocolHdr.h85
-rw-r--r--drivers/staging/bcm/InterfaceAdapter.h79
-rw-r--r--drivers/staging/bcm/InterfaceDld.c317
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c274
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.h15
-rw-r--r--drivers/staging/bcm/InterfaceInit.c729
-rw-r--r--drivers/staging/bcm/InterfaceInit.h26
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c190
-rw-r--r--drivers/staging/bcm/InterfaceIsr.h15
-rw-r--r--drivers/staging/bcm/InterfaceMacros.h18
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c247
-rw-r--r--drivers/staging/bcm/InterfaceMisc.h42
-rw-r--r--drivers/staging/bcm/InterfaceRx.c289
-rw-r--r--drivers/staging/bcm/InterfaceRx.h7
-rw-r--r--drivers/staging/bcm/InterfaceTx.c213
-rw-r--r--drivers/staging/bcm/InterfaceTx.h7
-rw-r--r--drivers/staging/bcm/Ioctl.h226
-rw-r--r--drivers/staging/bcm/Kconfig6
-rw-r--r--drivers/staging/bcm/LeakyBucket.c364
-rw-r--r--drivers/staging/bcm/Macros.h352
-rw-r--r--drivers/staging/bcm/Makefile12
-rw-r--r--drivers/staging/bcm/Misc.c1587
-rw-r--r--drivers/staging/bcm/PHSDefines.h94
-rw-r--r--drivers/staging/bcm/PHSModule.c1703
-rw-r--r--drivers/staging/bcm/PHSModule.h59
-rw-r--r--drivers/staging/bcm/Protocol.h128
-rw-r--r--drivers/staging/bcm/Prototypes.h217
-rw-r--r--drivers/staging/bcm/Qos.c1200
-rw-r--r--drivers/staging/bcm/Queue.h29
-rw-r--r--drivers/staging/bcm/TODO26
-rw-r--r--drivers/staging/bcm/Transmit.c271
-rw-r--r--drivers/staging/bcm/Typedefs.h47
-rw-r--r--drivers/staging/bcm/cntrl_SignalingInterface.h311
-rw-r--r--drivers/staging/bcm/headers.h78
-rw-r--r--drivers/staging/bcm/hostmibs.c164
-rw-r--r--drivers/staging/bcm/led_control.c952
-rw-r--r--drivers/staging/bcm/led_control.h84
-rw-r--r--drivers/staging/bcm/nvm.c4661
-rw-r--r--drivers/staging/bcm/nvm.h286
-rw-r--r--drivers/staging/bcm/sort.c52
-rw-r--r--drivers/staging/bcm/target_params.h57
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c145
-rw-r--r--drivers/staging/bcm/vendorspecificextn.h18
-rw-r--r--drivers/staging/clocking-wizard/Kconfig9
-rw-r--r--drivers/staging/clocking-wizard/Makefile1
-rw-r--r--drivers/staging/clocking-wizard/TODO12
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c341
-rw-r--r--drivers/staging/clocking-wizard/dt-binding.txt30
-rw-r--r--drivers/staging/comedi/Kconfig28
-rw-r--r--drivers/staging/comedi/Makefile7
-rw-r--r--drivers/staging/comedi/comedi.h13
-rw-r--r--drivers/staging/comedi/comedi_buf.c157
-rw-r--r--drivers/staging/comedi/comedi_compat32.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c279
-rw-r--r--drivers/staging/comedi/comedi_pci.c16
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.c16
-rw-r--r--drivers/staging/comedi/comedi_usb.c16
-rw-r--r--drivers/staging/comedi/comedidev.h139
-rw-r--r--drivers/staging/comedi/drivers.c102
-rw-r--r--drivers/staging/comedi/drivers/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c274
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h144
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_eeprom.c360
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c480
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c28
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c381
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c2050
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c3003
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c77
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c5
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c117
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c268
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c32
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c1153
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c125
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c7
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_tcw.h56
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.c32
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c1
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c40
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c202
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c76
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c338
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c471
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c8
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c1
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h2
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c34
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236_common.c5
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c53
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c193
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c2
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c1
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c283
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c295
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c83
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c9
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.h43
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c5
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c63
-rw-r--r--drivers/staging/comedi/drivers/dac02.c1
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c1
-rw-r--r--drivers/staging/comedi/drivers/das08.c3
-rw-r--r--drivers/staging/comedi/drivers/das16.c23
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c7
-rw-r--r--drivers/staging/comedi/drivers/das1800.c62
-rw-r--r--drivers/staging/comedi/drivers/das6402.c202
-rw-r--r--drivers/staging/comedi/drivers/das800.c38
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c702
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c1
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c1
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c2
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c62
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c9
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c4
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c4
-rw-r--r--drivers/staging/comedi/drivers/fl512.c1
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c8
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c9
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c1
-rw-r--r--drivers/staging/comedi/drivers/me4000.c120
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c5
-rw-r--r--drivers/staging/comedi/drivers/mf6x4.c1
-rw-r--r--drivers/staging/comedi/drivers/mite.c9
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c21
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c56
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h6
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c152
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c166
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c14
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h16
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c2
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c25
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c6
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c40
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c26
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c34
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c32
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c34
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c38
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c77
-rw-r--r--drivers/staging/comedi/drivers/rti800.c1
-rw-r--r--drivers/staging/comedi/drivers/rti802.c1
-rw-r--r--drivers/staging/comedi/drivers/s526.c1
-rw-r--r--drivers/staging/comedi/drivers/s626.c38
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c4
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c400
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c145
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c350
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c8
-rw-r--r--drivers/staging/comedi/range.c36
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c2
-rw-r--r--drivers/staging/dgap/dgap.c7462
-rw-r--r--drivers/staging/dgap/dgap.h3
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c30
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c33
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h61
-rw-r--r--drivers/staging/dgnc/dgnc_kcompat.h18
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c30
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c37
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c146
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c8
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/boot.h34
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h30
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c50
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c212
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c935
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c1204
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c400
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c436
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h60
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c97
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h4
-rw-r--r--drivers/staging/fwserial/fwserial.c3
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c10
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h8
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c6
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c57
-rw-r--r--drivers/staging/iio/Documentation/generic_buffer.c81
-rw-r--r--drivers/staging/iio/Documentation/iio_event_monitor.c32
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h7
-rw-r--r--drivers/staging/iio/Documentation/lsiio.c20
-rw-r--r--drivers/staging/iio/accel/Kconfig39
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c1
-rw-r--r--drivers/staging/iio/adc/ad7192.c3
-rw-r--r--drivers/staging/iio/adc/ad7280a.c2
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c1
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/staging/iio/adc/ad7816.c3
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c3
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c7
-rw-r--r--drivers/staging/iio/adc/spear_adc.c5
-rw-r--r--drivers/staging/iio/addac/Kconfig9
-rw-r--r--drivers/staging/iio/addac/adt7316.h3
-rw-r--r--drivers/staging/iio/gyro/Kconfig5
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c17
-rw-r--r--drivers/staging/iio/meter/Kconfig15
-rw-r--r--drivers/staging/iio/trigger/Kconfig5
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c1
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c4
-rw-r--r--drivers/staging/imx-drm/TODO17
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h12
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c17
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c34
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c60
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c89
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c15
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c18
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c98
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c52
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c25
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c8
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c34
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c20
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c75
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c19
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c63
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c8
-rw-r--r--drivers/staging/lustre/lustre/Kconfig2
-rw-r--r--drivers/staging/lustre/lustre/include/dt_object.h4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h8
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_capa.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_eacl.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h7
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c5
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c10
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c7
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c219
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c63
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c78
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c45
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c18
-rw-r--r--drivers/staging/lustre/lustre/libcfs/fail.c24
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c28
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c32
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c6
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c3
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c38
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c96
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_capa.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h30
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c116
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c9
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_rmtacl.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c89
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c5
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c41
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c35
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c3
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c12
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/capa.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/dt_object.c15
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c28
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c11
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c38
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c140
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c176
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c3
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c11
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c51
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c48
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c115
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c37
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c33
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c59
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c22
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c27
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c90
-rw-r--r--drivers/staging/media/Kconfig10
-rw-r--r--drivers/staging/media/Makefile7
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c7
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c18
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.h4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c88
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h58
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c104
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c101
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.h6
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c106
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c19
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c8
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c5
-rw-r--r--drivers/staging/media/lirc/Kconfig6
-rw-r--r--drivers/staging/media/lirc/Makefile1
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c14
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c508
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c37
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c1
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c14
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c1
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c19
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c145
-rw-r--r--drivers/staging/media/mn88472/Kconfig7
-rw-r--r--drivers/staging/media/mn88472/Makefile5
-rw-r--r--drivers/staging/media/mn88472/TODO21
-rw-r--r--drivers/staging/media/mn88472/mn88472.c523
-rw-r--r--drivers/staging/media/mn88472/mn88472_priv.h36
-rw-r--r--drivers/staging/media/mn88473/Kconfig7
-rw-r--r--drivers/staging/media/mn88473/Makefile5
-rw-r--r--drivers/staging/media/mn88473/TODO21
-rw-r--r--drivers/staging/media/mn88473/mn88473.c464
-rw-r--r--drivers/staging/media/mn88473/mn88473_priv.h36
-rw-r--r--drivers/staging/media/omap24xx/Kconfig35
-rw-r--r--drivers/staging/media/omap24xx/Makefile5
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam-dma.c598
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam.c1882
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam.h596
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.c938
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.h200
-rw-r--r--drivers/staging/media/omap24xx/v4l2-int-device.c164
-rw-r--r--drivers/staging/media/omap24xx/v4l2-int-device.h305
-rw-r--r--drivers/staging/media/omap4iss/iss.c5
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c64
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c16
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c28
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c26
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c78
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h10
-rw-r--r--drivers/staging/media/parport/Kconfig (renamed from drivers/media/parport/Kconfig)24
-rw-r--r--drivers/staging/media/parport/Makefile (renamed from drivers/media/parport/Makefile)0
-rw-r--r--drivers/staging/media/parport/bw-qcam.c (renamed from drivers/media/parport/bw-qcam.c)0
-rw-r--r--drivers/staging/media/parport/c-qcam.c (renamed from drivers/media/parport/c-qcam.c)0
-rw-r--r--drivers/staging/media/parport/pms.c (renamed from drivers/media/parport/pms.c)0
-rw-r--r--drivers/staging/media/parport/w9966.c (renamed from drivers/media/parport/w9966.c)0
-rw-r--r--drivers/staging/media/tlg2300/Kconfig (renamed from drivers/media/usb/tlg2300/Kconfig)6
-rw-r--r--drivers/staging/media/tlg2300/Makefile (renamed from drivers/media/usb/tlg2300/Makefile)0
-rw-r--r--drivers/staging/media/tlg2300/pd-alsa.c (renamed from drivers/media/usb/tlg2300/pd-alsa.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-common.h (renamed from drivers/media/usb/tlg2300/pd-common.h)0
-rw-r--r--drivers/staging/media/tlg2300/pd-dvb.c (renamed from drivers/media/usb/tlg2300/pd-dvb.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-main.c (renamed from drivers/media/usb/tlg2300/pd-main.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-radio.c (renamed from drivers/media/usb/tlg2300/pd-radio.c)0
-rw-r--r--drivers/staging/media/tlg2300/pd-video.c (renamed from drivers/media/usb/tlg2300/pd-video.c)0
-rw-r--r--drivers/staging/media/tlg2300/vendorcmds.h (renamed from drivers/media/usb/tlg2300/vendorcmds.h)0
-rw-r--r--drivers/staging/media/vino/Kconfig24
-rw-r--r--drivers/staging/media/vino/Makefile3
-rw-r--r--drivers/staging/media/vino/indycam.c (renamed from drivers/media/platform/indycam.c)0
-rw-r--r--drivers/staging/media/vino/indycam.h (renamed from drivers/media/platform/indycam.h)0
-rw-r--r--drivers/staging/media/vino/saa7191.c (renamed from drivers/media/i2c/saa7191.c)0
-rw-r--r--drivers/staging/media/vino/saa7191.h (renamed from drivers/media/i2c/saa7191.h)0
-rw-r--r--drivers/staging/media/vino/vino.c (renamed from drivers/media/platform/vino.c)6
-rw-r--r--drivers/staging/media/vino/vino.h (renamed from drivers/media/platform/vino.h)0
-rw-r--r--drivers/staging/netlogic/xlr_net.c1
-rw-r--r--drivers/staging/nvec/nvec.c1
-rw-r--r--drivers/staging/nvec/nvec_kbd.c1
-rw-r--r--drivers/staging/nvec/nvec_paz00.c1
-rw-r--r--drivers/staging/nvec/nvec_power.c1
-rw-r--r--drivers/staging/nvec/nvec_ps2.c1
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c70
-rw-r--r--drivers/staging/octeon/ethernet-rx.c156
-rw-r--r--drivers/staging/octeon/ethernet-tx.c11
-rw-r--r--drivers/staging/octeon/ethernet.c9
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c3
-rw-r--r--drivers/staging/ozwpan/ozhcd.c23
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c12
-rw-r--r--drivers/staging/panel/TODO1
-rw-r--r--drivers/staging/panel/panel.c807
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c9
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c47
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c34
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c24
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c14
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c8
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c16
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c8
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c30
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c4
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h3
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211_ext.h20
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h31
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h8
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h26
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h36
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c31
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c11
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c41
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c35
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c10
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c8
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c24
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c12
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c26
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c11
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c22
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c91
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c6
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c20
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.h8
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c1
-rw-r--r--drivers/staging/rtl8712/hal_init.c11
-rw-r--r--drivers/staging/rtl8712/ieee80211.c17
-rw-r--r--drivers/staging/rtl8712/osdep_service.h8
-rw-r--r--drivers/staging/rtl8712/recv_linux.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c13
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c43
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c58
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c23
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c27
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c19
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c10
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c15
-rw-r--r--drivers/staging/rtl8712/usb_intf.c16
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c9
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c4
-rw-r--r--drivers/staging/rtl8723au/Makefile3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c159
-rw-r--r--drivers/staging/rtl8723au/core/rtw_cmd.c181
-rw-r--r--drivers/staging/rtl8723au/core/rtw_efuse.c23
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8723au/core/rtw_led.c1893
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme.c76
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c67
-rw-r--r--drivers/staging/rtl8723au/core/rtw_pwrctrl.c8
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c165
-rw-r--r--drivers/staging/rtl8723au/core/rtw_security.c98
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sreset.c2
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c15
-rw-r--r--drivers/staging/rtl8723au/core/rtw_xmit.c94
-rw-r--r--drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c2
-rw-r--r--drivers/staging/rtl8723au/hal/hal_com.c4
-rw-r--r--drivers/staging/rtl8723au/hal/odm_HWConfig.c13
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c44
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_cmd.c7
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c493
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c3
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c229
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_xmit.c31
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_led.c124
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723au_xmit.c33
-rw-r--r--drivers/staging/rtl8723au/hal/usb_halinit.c479
-rw-r--r--drivers/staging/rtl8723au/hal/usb_ops_linux.c21
-rw-r--r--drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h2
-rw-r--r--drivers/staging/rtl8723au/include/drv_types.h2
-rw-r--r--drivers/staging/rtl8723au/include/odm_debug.h30
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_dm.h3
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_hal.h11
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_led.h30
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_recv.h6
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_xmit.h1
-rw-r--r--drivers/staging/rtl8723au/include/rtw_cmd.h4
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ht.h3
-rw-r--r--drivers/staging/rtl8723au/include/rtw_led.h181
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h6
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h3
-rw-r--r--drivers/staging/rtl8723au/include/rtw_xmit.h18
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops.h2
-rw-r--r--drivers/staging/rtl8723au/include/usb_ops_linux.h4
-rw-r--r--drivers/staging/rtl8723au/include/wlan_bssdef.h21
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c24
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c19
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c37
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c23
-rw-r--r--drivers/staging/rts5208/ms.c4
-rw-r--r--drivers/staging/rts5208/rtsx.c4
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c459
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c3
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c8
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h2
-rw-r--r--drivers/staging/skein/Kconfig24
-rw-r--r--drivers/staging/skein/Makefile13
-rw-r--r--drivers/staging/skein/skein_api.c2
-rw-r--r--drivers/staging/skein/skein_api.h2
-rw-r--r--drivers/staging/skein/skein_base.c (renamed from drivers/staging/skein/skein.c)23
-rw-r--r--drivers/staging/skein/skein_base.h (renamed from drivers/staging/skein/skein.h)39
-rw-r--r--drivers/staging/skein/skein_block.c932
-rw-r--r--drivers/staging/skein/skein_block.h2
-rw-r--r--drivers/staging/skein/skein_generic.c216
-rw-r--r--drivers/staging/skein/skein_iv.h2
-rw-r--r--drivers/staging/skein/threefish_api.h2
-rw-r--r--drivers/staging/slicoss/slicoss.c46
-rw-r--r--drivers/staging/speakup/kobjects.c6
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c2
-rw-r--r--drivers/staging/speakup/speakup_keypc.c2
-rw-r--r--drivers/staging/unisys/channels/channel.c114
-rw-r--r--drivers/staging/unisys/channels/chanstub.c8
-rw-r--r--drivers/staging/unisys/channels/chanstub.h8
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/channel.h505
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/channel_guid.h27
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/controlframework.h47
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h623
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/diagchannel.h168
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/iochannel.h292
-rw-r--r--drivers/staging/unisys/common-spar/include/channels/vbuschannel.h80
-rw-r--r--drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h37
-rw-r--r--drivers/staging/unisys/common-spar/include/vmcallinterface.h47
-rw-r--r--drivers/staging/unisys/include/timskmod.h17
-rw-r--r--drivers/staging/unisys/include/uisqueue.h75
-rw-r--r--drivers/staging/unisys/include/uisutils.h162
-rw-r--r--drivers/staging/unisys/include/vbushelper.h16
-rw-r--r--drivers/staging/unisys/uislib/uislib.c521
-rw-r--r--drivers/staging/unisys/uislib/uisqueue.c8
-rw-r--r--drivers/staging/unisys/uislib/uisutils.c109
-rw-r--r--drivers/staging/unisys/virthba/virthba.c104
-rw-r--r--drivers/staging/unisys/virthba/virthba.h4
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.c332
-rw-r--r--drivers/staging/unisys/virtpci/virtpci.h20
-rw-r--r--drivers/staging/unisys/visorchannel/globals.h1
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel.h2
-rw-r--r--drivers/staging/unisys/visorchannel/visorchannel_funcs.c301
-rw-r--r--drivers/staging/unisys/visorchipset/file.c6
-rw-r--r--drivers/staging/unisys/visorchipset/parser.c52
-rw-r--r--drivers/staging/unisys/visorchipset/testing.h5
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset.h275
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_main.c702
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.c40
-rw-r--r--drivers/staging/unisys/visorutil/charqueue.h17
-rw-r--r--drivers/staging/unisys/visorutil/easyproc.c6
-rw-r--r--drivers/staging/unisys/visorutil/memregion.h28
-rw-r--r--drivers/staging/unisys/visorutil/memregion_direct.c64
-rw-r--r--drivers/staging/unisys/visorutil/periodic_work.c19
-rw-r--r--drivers/staging/unisys/visorutil/procobjecttree.c15
-rw-r--r--drivers/staging/unisys/visorutil/visorkmodutils.c26
-rw-r--r--drivers/staging/vme/devices/Kconfig3
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c4
-rw-r--r--drivers/staging/vt6655/80211hdr.h318
-rw-r--r--drivers/staging/vt6655/80211mgr.c1019
-rw-r--r--drivers/staging/vt6655/80211mgr.h725
-rw-r--r--drivers/staging/vt6655/IEEE11h.c141
-rw-r--r--drivers/staging/vt6655/IEEE11h.h42
-rw-r--r--drivers/staging/vt6655/Kconfig4
-rw-r--r--drivers/staging/vt6655/Makefile23
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c374
-rw-r--r--drivers/staging/vt6655/aes_ccmp.h37
-rw-r--r--drivers/staging/vt6655/baseband.c777
-rw-r--r--drivers/staging/vt6655/baseband.h39
-rw-r--r--drivers/staging/vt6655/bssdb.c1512
-rw-r--r--drivers/staging/vt6655/bssdb.h326
-rw-r--r--drivers/staging/vt6655/card.c1259
-rw-r--r--drivers/staging/vt6655/card.h120
-rw-r--r--drivers/staging/vt6655/channel.c844
-rw-r--r--drivers/staging/vt6655/channel.h22
-rw-r--r--drivers/staging/vt6655/country.h161
-rw-r--r--drivers/staging/vt6655/datarate.c410
-rw-r--r--drivers/staging/vt6655/datarate.h78
-rw-r--r--drivers/staging/vt6655/desc.h134
-rw-r--r--drivers/staging/vt6655/device.h476
-rw-r--r--drivers/staging/vt6655/device_cfg.h2
-rw-r--r--drivers/staging/vt6655/device_main.c2533
-rw-r--r--drivers/staging/vt6655/dpc.c1312
-rw-r--r--drivers/staging/vt6655/dpc.h10
-rw-r--r--drivers/staging/vt6655/hostap.c765
-rw-r--r--drivers/staging/vt6655/hostap.h58
-rw-r--r--drivers/staging/vt6655/iocmd.h408
-rw-r--r--drivers/staging/vt6655/ioctl.c658
-rw-r--r--drivers/staging/vt6655/ioctl.h36
-rw-r--r--drivers/staging/vt6655/iowpa.h130
-rw-r--r--drivers/staging/vt6655/iwctl.c1937
-rw-r--r--drivers/staging/vt6655/iwctl.h206
-rw-r--r--drivers/staging/vt6655/key.c838
-rw-r--r--drivers/staging/vt6655/key.h131
-rw-r--r--drivers/staging/vt6655/mac.c780
-rw-r--r--drivers/staging/vt6655/mac.h53
-rw-r--r--drivers/staging/vt6655/mib.c423
-rw-r--r--drivers/staging/vt6655/mib.h261
-rw-r--r--drivers/staging/vt6655/michael.c148
-rw-r--r--drivers/staging/vt6655/michael.h52
-rw-r--r--drivers/staging/vt6655/power.c208
-rw-r--r--drivers/staging/vt6655/power.h16
-rw-r--r--drivers/staging/vt6655/rc4.c88
-rw-r--r--drivers/staging/vt6655/rc4.h47
-rw-r--r--drivers/staging/vt6655/rf.c381
-rw-r--r--drivers/staging/vt6655/rf.h9
-rw-r--r--drivers/staging/vt6655/rxtx.c2112
-rw-r--r--drivers/staging/vt6655/rxtx.h52
-rw-r--r--drivers/staging/vt6655/srom.c252
-rw-r--r--drivers/staging/vt6655/srom.h49
-rw-r--r--drivers/staging/vt6655/tcrc.c191
-rw-r--r--drivers/staging/vt6655/tcrc.h50
-rw-r--r--drivers/staging/vt6655/tether.c105
-rw-r--r--drivers/staging/vt6655/tether.h192
-rw-r--r--drivers/staging/vt6655/tkip.c268
-rw-r--r--drivers/staging/vt6655/tkip.h57
-rw-r--r--drivers/staging/vt6655/tmacro.h2
-rw-r--r--drivers/staging/vt6655/ttype.h42
-rw-r--r--drivers/staging/vt6655/upc.h1
-rw-r--r--drivers/staging/vt6655/vntconfiguration.dat1
-rw-r--r--drivers/staging/vt6655/vntwifi.c700
-rw-r--r--drivers/staging/vt6655/vntwifi.h273
-rw-r--r--drivers/staging/vt6655/wcmd.c1023
-rw-r--r--drivers/staging/vt6655/wcmd.h123
-rw-r--r--drivers/staging/vt6655/wctl.c233
-rw-r--r--drivers/staging/vt6655/wctl.h105
-rw-r--r--drivers/staging/vt6655/wmgr.c4602
-rw-r--r--drivers/staging/vt6655/wmgr.h420
-rw-r--r--drivers/staging/vt6655/wpa.c300
-rw-r--r--drivers/staging/vt6655/wpa.h83
-rw-r--r--drivers/staging/vt6655/wpa2.c359
-rw-r--r--drivers/staging/vt6655/wpa2.h77
-rw-r--r--drivers/staging/vt6655/wpactl.c896
-rw-r--r--drivers/staging/vt6655/wpactl.h64
-rw-r--r--drivers/staging/vt6655/wroute.c187
-rw-r--r--drivers/staging/vt6655/wroute.h45
-rw-r--r--drivers/staging/vt6656/main_usb.c17
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h3
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c6
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c2
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c9
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c2
-rw-r--r--drivers/staging/xgifb/vb_def.h1
-rw-r--r--drivers/staging/xgifb/vb_setmode.c2
-rw-r--r--drivers/staging/xgifb/vb_util.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c12
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/tc/tc.c36
-rw-r--r--drivers/thermal/Kconfig32
-rw-r--r--drivers/thermal/Makefile5
-rw-r--r--drivers/thermal/armada_thermal.c21
-rw-r--r--drivers/thermal/clock_cooling.c485
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c1
-rw-r--r--drivers/thermal/db8500_thermal.c1
-rw-r--r--drivers/thermal/dove_thermal.c1
-rw-r--r--drivers/thermal/imx_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c12
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c80
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c4
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c12
-rw-r--r--drivers/thermal/kirkwood_thermal.c1
-rw-r--r--drivers/thermal/of-thermal.c148
-rw-r--r--drivers/thermal/rockchip_thermal.c693
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.h1
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c693
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h123
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c239
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h159
-rw-r--r--drivers/thermal/spear_thermal.c1
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c1
-rw-r--r--drivers/thermal/st/st_thermal_syscfg.c1
-rw-r--r--drivers/thermal/tegra_soctherm.c476
-rw-r--r--drivers/thermal/thermal_core.c8
-rw-r--r--drivers/thermal/thermal_core.h18
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c8
-rw-r--r--drivers/tty/amiserial.c1
-rw-r--r--drivers/tty/ehv_bytechan.c5
-rw-r--r--drivers/tty/goldfish.c6
-rw-r--r--drivers/tty/hvc/hvc_opal.c1
-rw-r--r--drivers/tty/hvc/hvc_tile.c1
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/hvc/hvsi_lib.c3
-rw-r--r--drivers/tty/ipwireless/hardware.c12
-rw-r--r--drivers/tty/isicom.c14
-rw-r--r--drivers/tty/n_tty.c81
-rw-r--r--drivers/tty/pty.c87
-rw-r--r--drivers/tty/serial/8250/8250.h9
-rw-r--r--drivers/tty/serial/8250/8250_core.c113
-rw-r--r--drivers/tty/serial/8250/8250_dma.c39
-rw-r--r--drivers/tty/serial/8250/8250_dw.c70
-rw-r--r--drivers/tty/serial/8250/8250_em.c5
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c8
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c4
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c4
-rw-r--r--drivers/tty/serial/8250/8250_omap.c1281
-rw-r--r--drivers/tty/serial/8250/8250_pci.c198
-rw-r--r--drivers/tty/serial/8250/Kconfig11
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig26
-rw-r--r--drivers/tty/serial/altera_jtaguart.c1
-rw-r--r--drivers/tty/serial/altera_uart.c1
-rw-r--r--drivers/tty/serial/amba-pl010.c60
-rw-r--r--drivers/tty/serial/amba-pl011.c48
-rw-r--r--drivers/tty/serial/apbuart.c1
-rw-r--r--drivers/tty/serial/ar933x_uart.c31
-rw-r--r--drivers/tty/serial/arc_uart.c1
-rw-r--r--drivers/tty/serial/atmel_serial.c231
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c56
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c5
-rw-r--r--drivers/tty/serial/bfin_uart.c6
-rw-r--r--drivers/tty/serial/clps711x.c6
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c49
-rw-r--r--drivers/tty/serial/crisv10.c12
-rw-r--r--drivers/tty/serial/crisv10.h1
-rw-r--r--drivers/tty/serial/earlycon.c2
-rw-r--r--drivers/tty/serial/efm32-uart.c1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c26
-rw-r--r--drivers/tty/serial/icom.c4
-rw-r--r--drivers/tty/serial/imx.c154
-rw-r--r--drivers/tty/serial/ip22zilog.c18
-rw-r--r--drivers/tty/serial/jsm/Makefile2
-rw-r--r--drivers/tty/serial/jsm/jsm.h101
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c982
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c156
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c36
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c39
-rw-r--r--drivers/tty/serial/lantiq.c5
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c1
-rw-r--r--drivers/tty/serial/m32r_sio.c42
-rw-r--r--drivers/tty/serial/max310x.c77
-rw-r--r--drivers/tty/serial/mcf.c43
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/meson_uart.c1
-rw-r--r--drivers/tty/serial/mfd.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c1
-rw-r--r--drivers/tty/serial/mpsc.c40
-rw-r--r--drivers/tty/serial/mrst_max3110.c27
-rw-r--r--drivers/tty/serial/msm_serial.c112
-rw-r--r--drivers/tty/serial/msm_serial.h2
-rw-r--r--drivers/tty/serial/msm_serial_hs.c1
-rw-r--r--drivers/tty/serial/mxs-auart.c251
-rw-r--r--drivers/tty/serial/netx-serial.c1
-rw-r--r--drivers/tty/serial/of_serial.c76
-rw-r--r--drivers/tty/serial/omap-serial.c93
-rw-r--r--drivers/tty/serial/pmac_zilog.c10
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c49
-rw-r--r--drivers/tty/serial/pxa.c4
-rw-r--r--drivers/tty/serial/sa1100.c46
-rw-r--r--drivers/tty/serial/samsung.c91
-rw-r--r--drivers/tty/serial/sc16is7xx.c57
-rw-r--r--drivers/tty/serial/sccnxp.c1
-rw-r--r--drivers/tty/serial/serial-tegra.c36
-rw-r--r--drivers/tty/serial/serial_core.c116
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c12
-rw-r--r--drivers/tty/serial/serial_txx9.c1
-rw-r--r--drivers/tty/serial/sh-sci.c6
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c35
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h4
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/tty/serial/sunhv.c1
-rw-r--r--drivers/tty/serial/sunsab.c37
-rw-r--r--drivers/tty/serial/sunsu.c40
-rw-r--r--drivers/tty/serial/sunzilog.c25
-rw-r--r--drivers/tty/serial/timbuart.c3
-rw-r--r--drivers/tty/serial/ucc_uart.c1
-rw-r--r--drivers/tty/serial/vr41xx_siu.c1
-rw-r--r--drivers/tty/serial/vt8500_serial.c1
-rw-r--r--drivers/tty/serial/xilinx_uartps.c22
-rw-r--r--drivers/tty/tty_buffer.c10
-rw-r--r--drivers/tty/tty_io.c440
-rw-r--r--drivers/tty/tty_ioctl.c38
-rw-r--r--drivers/tty/tty_ldisc.c111
-rw-r--r--drivers/tty/tty_mutex.c49
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/tty/vt/keyboard.c6
-rw-r--r--drivers/tty/vt/vt.c22
-rw-r--r--drivers/uio/uio.c4
-rw-r--r--drivers/uio/uio_dmem_genirq.c1
-rw-r--r--drivers/uio/uio_pdrv_genirq.c1
-rw-r--r--drivers/uio/uio_pruss.c1
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c1
-rw-r--r--drivers/usb/chipidea/Makefile1
-rw-r--r--drivers/usb/chipidea/ci.h10
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c46
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c9
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c115
-rw-r--r--drivers/usb/chipidea/ci_hdrc_zevio.c1
-rw-r--r--drivers/usb/chipidea/core.c226
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/host.c78
-rw-r--r--drivers/usb/chipidea/otg_fsm.c41
-rw-r--r--drivers/usb/chipidea/udc.c22
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c7
-rw-r--r--drivers/usb/class/cdc-acm.c17
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/common/usb-otg-fsm.c8
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/Kconfig68
-rw-r--r--drivers/usb/dwc2/Makefile32
-rw-r--r--drivers/usb/dwc2/core.c10
-rw-r--r--drivers/usb/dwc2/core.h197
-rw-r--r--drivers/usb/dwc2/core_intr.c17
-rw-r--r--drivers/usb/dwc2/gadget.c387
-rw-r--r--drivers/usb/dwc2/hcd.c94
-rw-r--r--drivers/usb/dwc2/hcd.h10
-rw-r--r--drivers/usb/dwc2/pci.c7
-rw-r--r--drivers/usb/dwc2/platform.c44
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c251
-rw-r--r--drivers/usb/dwc3/core.h89
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c44
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c20
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c35
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/ep0.c26
-rw-r--r--drivers/usb/dwc3/gadget.c68
-rw-r--r--drivers/usb/dwc3/host.c22
-rw-r--r--drivers/usb/dwc3/platform_data.h20
-rw-r--r--drivers/usb/dwc3/trace.h20
-rw-r--r--drivers/usb/gadget/Kconfig61
-rw-r--r--drivers/usb/gadget/composite.c64
-rw-r--r--drivers/usb/gadget/configfs.c5
-rw-r--r--drivers/usb/gadget/function/Makefile4
-rw-r--r--drivers/usb/gadget/function/f_hid.c376
-rw-r--r--drivers/usb/gadget/function/f_midi.c364
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/gadget/function/f_phonet.c6
-rw-r--r--drivers/usb/gadget/function/f_rndis.c3
-rw-r--r--drivers/usb/gadget/function/u_hid.h42
-rw-r--r--drivers/usb/gadget/function/u_midi.h40
-rw-r--r--drivers/usb/gadget/function/u_uac1.c3
-rw-r--r--drivers/usb/gadget/legacy/Kconfig2
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c26
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c43
-rw-r--r--drivers/usb/gadget/legacy/hid.c80
-rw-r--r--drivers/usb/gadget/legacy/printer.c65
-rw-r--r--drivers/usb/gadget/legacy/zero.c2
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c12
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c90
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c26
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c14
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig21
-rw-r--r--drivers/usb/gadget/udc/bdc/Makefile8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h490
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c376
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.h29
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c533
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c123
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.h37
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2024
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.h22
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c132
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c587
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c38
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c4
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c14
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c20
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c6
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c9
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c12
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c6
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c6
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c33
-rw-r--r--drivers/usb/gadget/udc/net2272.c67
-rw-r--r--drivers/usb/gadget/udc/net2280.c65
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c7
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c11
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c44
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c153
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.h6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c10
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c17
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c7
-rw-r--r--drivers/usb/gadget/udc/udc-core.c28
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c3
-rw-r--r--drivers/usb/host/Kconfig18
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-atmel.c13
-rw-r--r--drivers/usb/host/ehci-exynos.c13
-rw-r--r--drivers/usb/host/ehci-fsl.c12
-rw-r--r--drivers/usb/host/ehci-hcd.c14
-rw-r--r--drivers/usb/host/ehci-hub.c45
-rw-r--r--drivers/usb/host/ehci-msm.c10
-rw-r--r--drivers/usb/host/ehci-mv.c13
-rw-r--r--drivers/usb/host/ehci-mxc.c11
-rw-r--r--drivers/usb/host/ehci-octeon.c188
-rw-r--r--drivers/usb/host/ehci-orion.c15
-rw-r--r--drivers/usb/host/ehci-platform.c28
-rw-r--r--drivers/usb/host/ehci-sead3.c5
-rw-r--r--drivers/usb/host/ehci-sh.c16
-rw-r--r--drivers/usb/host/ehci-spear.c9
-rw-r--r--drivers/usb/host/ehci-sysfs.c2
-rw-r--r--drivers/usb/host/ehci-tegra.c11
-rw-r--r--drivers/usb/host/ehci-w90x900.c18
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/fhci-hcd.c1
-rw-r--r--drivers/usb/host/fotg210-hcd.c2
-rw-r--r--drivers/usb/host/fotg210.h62
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c1
-rw-r--r--drivers/usb/host/fusbh200-hcd.c2
-rw-r--r--drivers/usb/host/fusbh200.h62
-rw-r--r--drivers/usb/host/imx21-hcd.c6
-rw-r--r--drivers/usb/host/isp116x-hcd.c1
-rw-r--r--drivers/usb/host/isp1362-hcd.c1
-rw-r--r--drivers/usb/host/isp1760-if.c1
-rw-r--r--drivers/usb/host/octeon2-common.c200
-rw-r--r--drivers/usb/host/ohci-at91.c16
-rw-r--r--drivers/usb/host/ohci-da8xx.c7
-rw-r--r--drivers/usb/host/ohci-exynos.c12
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-jz4740.c13
-rw-r--r--drivers/usb/host/ohci-nxp.c1
-rw-r--r--drivers/usb/host/ohci-octeon.c202
-rw-r--r--drivers/usb/host/ohci-omap.c3
-rw-r--r--drivers/usb/host/ohci-platform.c54
-rw-r--r--drivers/usb/host/ohci-pxa27x.c12
-rw-r--r--drivers/usb/host/ohci-s3c2410.c1
-rw-r--r--drivers/usb/host/ohci-spear.c12
-rw-r--r--drivers/usb/host/ohci.h19
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c1
-rw-r--r--drivers/usb/host/pci-quirks.c12
-rw-r--r--drivers/usb/host/r8a66597-hcd.c1
-rw-r--r--drivers/usb/host/u132-hcd.c1
-rw-r--r--drivers/usb/host/uhci-platform.c6
-rw-r--r--drivers/usb/host/xhci-plat.c10
-rw-r--r--drivers/usb/host/xhci-ring.c113
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/adutux.c5
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c3
-rw-r--r--drivers/usb/misc/usb3503.c9
-rw-r--r--drivers/usb/misc/yurex.c6
-rw-r--r--drivers/usb/musb/Kconfig5
-rw-r--r--drivers/usb/musb/am35x.c32
-rw-r--r--drivers/usb/musb/blackfin.c74
-rw-r--r--drivers/usb/musb/da8xx.c30
-rw-r--r--drivers/usb/musb/davinci.c22
-rw-r--r--drivers/usb/musb/jz4740.c2
-rw-r--r--drivers/usb/musb/musb_core.c336
-rw-r--r--drivers/usb/musb/musb_core.h87
-rw-r--r--drivers/usb/musb/musb_cppi41.c10
-rw-r--r--drivers/usb/musb/musb_debugfs.c57
-rw-r--r--drivers/usb/musb/musb_dsps.c37
-rw-r--r--drivers/usb/musb/musb_gadget.c58
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/musb/musb_io.h106
-rw-r--r--drivers/usb/musb/musb_regs.h26
-rw-r--r--drivers/usb/musb/musb_virthub.c23
-rw-r--r--drivers/usb/musb/musbhsdma.c7
-rw-r--r--drivers/usb/musb/omap2430.c52
-rw-r--r--drivers/usb/musb/tusb6010.c102
-rw-r--r--drivers/usb/musb/ux500.c16
-rw-r--r--drivers/usb/musb/ux500_dma.c11
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c34
-rw-r--r--drivers/usb/phy/phy-am335x-control.c5
-rw-r--r--drivers/usb/phy/phy-am335x.c1
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c27
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-generic.c9
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c17
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c105
-rw-r--r--drivers/usb/phy/phy-keystone.c1
-rw-r--r--drivers/usb/phy/phy-msm-usb.c107
-rw-r--r--drivers/usb/phy/phy-mv-usb.c57
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c5
-rw-r--r--drivers/usb/phy/phy-omap-otg.c1
-rw-r--r--drivers/usb/phy/phy-rcar-gen2-usb.c4
-rw-r--r--drivers/usb/phy/phy-rcar-usb.c12
-rw-r--r--drivers/usb/phy/phy-tahvo.c35
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c10
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c1
-rw-r--r--drivers/usb/phy/phy-ulpi.c6
-rw-r--r--drivers/usb/phy/phy.c16
-rw-r--r--drivers/usb/renesas_usbhs/common.c14
-rw-r--r--drivers/usb/renesas_usbhs/common.h7
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c111
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h10
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c41
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c8
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.c73
-rw-r--r--drivers/usb/serial/Kconfig5
-rw-r--r--drivers/usb/serial/keyspan_pda.c16
-rw-r--r--drivers/usb/serial/kobil_sct.c2
-rw-r--r--drivers/usb/serial/mos7720.c8
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/qcserial.c33
-rw-r--r--drivers/usb/serial/usb-serial-simple.c12
-rw-r--r--drivers/usb/storage/debug.c7
-rw-r--r--drivers/usb/storage/debug.h10
-rw-r--r--drivers/usb/storage/initializers.c3
-rw-r--r--drivers/usb/storage/transport.c17
-rw-r--r--drivers/usb/storage/uas.c64
-rw-r--r--drivers/usb/storage/usb.c36
-rw-r--r--drivers/usb/usbip/stub_dev.c26
-rw-r--r--drivers/usb/usbip/vhci_hcd.c10
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c2
-rw-r--r--drivers/usb/wusbcore/security.c8
-rw-r--r--drivers/uwb/rsv.c4
-rw-r--r--drivers/vfio/Kconfig2
-rw-r--r--drivers/vfio/pci/Kconfig8
-rw-r--r--drivers/vfio/pci/vfio_pci.c5
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c7
-rw-r--r--drivers/vhost/net.c39
-rw-r--r--drivers/vhost/scsi.c22
-rw-r--r--drivers/vhost/vhost.c93
-rw-r--r--drivers/vhost/vhost.h41
-rw-r--r--drivers/video/backlight/lp855x_bl.c55
-rw-r--r--drivers/video/backlight/pwm_bl.c5
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/amba-clcd.c1
-rw-r--r--drivers/video/fbdev/amifb.c1
-rw-r--r--drivers/video/fbdev/arkfb.c2
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c1
-rw-r--r--drivers/video/fbdev/au1100fb.c1
-rw-r--r--drivers/video/fbdev/au1200fb.c1
-rw-r--r--drivers/video/fbdev/auo_k1900fb.c1
-rw-r--r--drivers/video/fbdev/auo_k1901fb.c1
-rw-r--r--drivers/video/fbdev/bf537-lq035.c1
-rw-r--r--drivers/video/fbdev/bf54x-lq043fb.c1
-rw-r--r--drivers/video/fbdev/bfin-t350mcqb-fb.c1
-rw-r--r--drivers/video/fbdev/broadsheetfb.c1
-rw-r--r--drivers/video/fbdev/bw2.c1
-rw-r--r--drivers/video/fbdev/cg14.c1
-rw-r--r--drivers/video/fbdev/cg3.c1
-rw-r--r--drivers/video/fbdev/cg6.c1
-rw-r--r--drivers/video/fbdev/clps711x-fb.c1
-rw-r--r--drivers/video/fbdev/clps711xfb.c1
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c1
-rw-r--r--drivers/video/fbdev/da8xx-fb.c1
-rw-r--r--drivers/video/fbdev/efifb.c1
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c1
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi.c3
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c2
-rw-r--r--drivers/video/fbdev/ffb.c1
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c1
-rw-r--r--drivers/video/fbdev/grvga.c1
-rw-r--r--drivers/video/fbdev/hecubafb.c1
-rw-r--r--drivers/video/fbdev/hitfb.c1
-rw-r--r--drivers/video/fbdev/imxfb.c1
-rw-r--r--drivers/video/fbdev/leo.c1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c1
-rw-r--r--drivers/video/fbdev/metronomefb.c1
-rw-r--r--drivers/video/fbdev/mmp/core.c6
-rw-r--r--drivers/video/fbdev/mmp/fb/mmpfb.c1
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c4
-rw-r--r--drivers/video/fbdev/mx3fb.c4
-rw-r--r--drivers/video/fbdev/mxsfb.c19
-rw-r--r--drivers/video/fbdev/nuc900fb.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_ams_delta.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_htcherald.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_inn1510.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_inn1610.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_osk.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_palmte.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_palmtt.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_palmz71.c1
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-dvi.c10
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-hdmi.c100
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c58
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-dpi.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/Kconfig7
-rw-r--r--drivers/video/fbdev/omap2/dss/Makefile2
-rw-r--r--drivers/video/fbdev/omap2/dss/core.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.c21
-rw-r--r--drivers/video/fbdev/omap2/dss/dpi.c328
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c660
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c58
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.c124
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.h225
-rw-r--r--drivers/video/fbdev/omap2/dss/dss_features.c42
-rw-r--r--drivers/video/fbdev/omap2/dss/dss_features.h12
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi.h71
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4.c339
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4_core.c14
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4_core.h4
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5.c339
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5_core.c11
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5_core.h2
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_common.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_phy.c31
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c313
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_wp.c16
-rw-r--r--drivers/video/fbdev/omap2/dss/output.c19
-rw-r--r--drivers/video/fbdev/omap2/dss/pll.c378
-rw-r--r--drivers/video/fbdev/omap2/dss/rfbi.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c1
-rw-r--r--drivers/video/fbdev/p9100.c1
-rw-r--r--drivers/video/fbdev/platinumfb.c1
-rw-r--r--drivers/video/fbdev/pxa168fb.c1
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c1
-rw-r--r--drivers/video/fbdev/pxafb.c1
-rw-r--r--drivers/video/fbdev/s3c-fb.c1
-rw-r--r--drivers/video/fbdev/s3c2410fb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c2
-rw-r--r--drivers/video/fbdev/sa1100fb.c31
-rw-r--r--drivers/video/fbdev/sa1100fb.h1
-rw-r--r--drivers/video/fbdev/sh7760fb.c1
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/fbdev/sh_mobile_meram.c1
-rw-r--r--drivers/video/fbdev/simplefb.c163
-rw-r--r--drivers/video/fbdev/sis/sis_main.c14
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/smscufx.c6
-rw-r--r--drivers/video/fbdev/sunxvr1000.c1
-rw-r--r--drivers/video/fbdev/tcx.c1
-rw-r--r--drivers/video/fbdev/udlfb.c9
-rw-r--r--drivers/video/fbdev/uvesafb.c6
-rw-r--r--drivers/video/fbdev/vesafb.c1
-rw-r--r--drivers/video/fbdev/via/viafbdev.c3
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c1
-rw-r--r--drivers/video/fbdev/vt8623fb.c2
-rw-r--r--drivers/video/fbdev/wm8505fb.c1
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c1
-rw-r--r--drivers/video/fbdev/xilinxfb.c1
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c102
-rw-r--r--drivers/virtio/virtio_balloon.c57
-rw-r--r--drivers/virtio/virtio_mmio.c18
-rw-r--r--drivers/virtio/virtio_pci.c802
-rw-r--r--drivers/virtio/virtio_pci_common.c464
-rw-r--r--drivers/virtio/virtio_pci_common.h136
-rw-r--r--drivers/virtio/virtio_pci_legacy.c326
-rw-r--r--drivers/virtio/virtio_ring.c109
-rw-r--r--drivers/w1/masters/ds2490.c2
-rw-r--r--drivers/w1/masters/mxc_w1.c1
-rw-r--r--drivers/w1/masters/w1-gpio.c1
-rw-r--r--drivers/w1/slaves/w1_bq27000.c4
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--drivers/watchdog/acquirewdt.c1
-rw-r--r--drivers/watchdog/advantechwdt.c1
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c1
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c1
-rw-r--r--drivers/watchdog/at91sam9_wdt.c1
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c1
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c1
-rw-r--r--drivers/watchdog/bfin_wdt.c1
-rw-r--r--drivers/watchdog/coh901327_wdt.c1
-rw-r--r--drivers/watchdog/cpwd.c1
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/dw_wdt.c1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/geodewdt.c1
-rw-r--r--drivers/watchdog/gpio_wdt.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c1
-rw-r--r--drivers/watchdog/ib700wdt.c1
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c48
-rw-r--r--drivers/watchdog/intel-mid_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/kempld_wdt.c1
-rw-r--r--drivers/watchdog/ks8695_wdt.c1
-rw-r--r--drivers/watchdog/lantiq_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/menf21bmc_wdt.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c1
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c1
-rw-r--r--drivers/watchdog/mv64x60_wdt.c1
-rw-r--r--drivers/watchdog/nuc900_wdt.c1
-rw-r--r--drivers/watchdog/nv_tco.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/rdc321x_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c1
-rw-r--r--drivers/watchdog/sch311x_wdt.c1
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c1
-rw-r--r--drivers/watchdog/sp5100_tco.c1
-rw-r--r--drivers/watchdog/sunxi_wdt.c1
-rw-r--r--drivers/watchdog/tegra_wdt.c1
-rw-r--r--drivers/watchdog/ts72xx_wdt.c1
-rw-r--r--drivers/watchdog/twl4030_wdt.c1
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c1
-rw-r--r--drivers/watchdog/xen_wdt.c1
-rw-r--r--drivers/xen/swiotlb-xen.c19
-rw-r--r--drivers/xen/xen-pciback/passthrough.c14
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c112
-rw-r--r--drivers/xen/xen-pciback/pciback.h7
-rw-r--r--drivers/xen/xen-pciback/vpci.c14
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/zorro/zorro.c1
-rw-r--r--fs/Kconfig.binfmt3
-rw-r--r--fs/Makefile2
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/amigaffs.c28
-rw-r--r--fs/affs/file.c76
-rw-r--r--fs/afs/rxrpc.c14
-rw-r--r--fs/aio.c33
-rw-r--r--fs/befs/linuxvfs.c4
-rw-r--r--fs/binfmt_elf.c238
-rw-r--r--fs/binfmt_em86.c4
-rw-r--r--fs/binfmt_misc.c4
-rw-r--r--fs/binfmt_script.c10
-rw-r--r--fs/btrfs/check-integrity.c163
-rw-r--r--fs/btrfs/compression.c18
-rw-r--r--fs/btrfs/ctree.c2
-rw-r--r--fs/btrfs/ctree.h85
-rw-r--r--fs/btrfs/dev-replace.c32
-rw-r--r--fs/btrfs/dir-item.c10
-rw-r--r--fs/btrfs/disk-io.c49
-rw-r--r--fs/btrfs/extent-tree.c211
-rw-r--r--fs/btrfs/extent_io.c41
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file.c51
-rw-r--r--fs/btrfs/free-space-cache.c117
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode-map.c4
-rw-r--r--fs/btrfs/inode.c152
-rw-r--r--fs/btrfs/ioctl.c36
-rw-r--r--fs/btrfs/ordered-data.c49
-rw-r--r--fs/btrfs/ordered-data.h12
-rw-r--r--fs/btrfs/raid56.c763
-rw-r--r--fs/btrfs/raid56.h16
-rw-r--r--fs/btrfs/scrub.c893
-rw-r--r--fs/btrfs/send.c49
-rw-r--r--fs/btrfs/super.c94
-rw-r--r--fs/btrfs/sysfs.c34
-rw-r--r--fs/btrfs/transaction.c166
-rw-r--r--fs/btrfs/transaction.h6
-rw-r--r--fs/btrfs/tree-log.c50
-rw-r--r--fs/btrfs/volumes.c90
-rw-r--r--fs/btrfs/volumes.h32
-rw-r--r--fs/btrfs/xattr.c150
-rw-r--r--fs/ceph/addr.c273
-rw-r--r--fs/ceph/caps.c132
-rw-r--r--fs/ceph/dir.c27
-rw-r--r--fs/ceph/file.c97
-rw-r--r--fs/ceph/inode.c59
-rw-r--r--fs/ceph/locks.c64
-rw-r--r--fs/ceph/mds_client.c41
-rw-r--r--fs/ceph/mds_client.h10
-rw-r--r--fs/ceph/snap.c37
-rw-r--r--fs/ceph/super.c16
-rw-r--r--fs/ceph/super.h55
-rw-r--r--fs/ceph/xattr.c7
-rw-r--r--fs/coda/dir.c4
-rw-r--r--fs/debugfs/file.c54
-rw-r--r--fs/drop_caches.c11
-rw-r--r--fs/exec.c113
-rw-r--r--fs/ext4/ext4.h41
-rw-r--r--fs/ext4/extents.c223
-rw-r--r--fs/ext4/extents_status.c321
-rw-r--r--fs/ext4/extents_status.h82
-rw-r--r--fs/ext4/file.c220
-rw-r--r--fs/ext4/inline.c35
-rw-r--r--fs/ext4/inode.c37
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/mballoc.c15
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c8
-rw-r--r--fs/ext4/namei.c1
-rw-r--r--fs/ext4/resize.c6
-rw-r--r--fs/ext4/super.c51
-rw-r--r--fs/fat/fat.h1
-rw-r--r--fs/fat/file.c3
-rw-r--r--fs/fat/inode.c12
-rw-r--r--fs/fs-writeback.c29
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/dev.c29
-rw-r--r--fs/fuse/dir.c538
-rw-r--r--fs/fuse/file.c230
-rw-r--r--fs/fuse/fuse_i.h45
-rw-r--r--fs/fuse/inode.c39
-rw-r--r--fs/hfsplus/catalog.c89
-rw-r--r--fs/hfsplus/dir.c11
-rw-r--r--fs/hfsplus/hfsplus_fs.h4
-rw-r--r--fs/hfsplus/super.c4
-rw-r--r--fs/hugetlbfs/inode.c14
-rw-r--r--fs/inode.c13
-rw-r--r--fs/internal.h5
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/isofs/rock.c6
-rw-r--r--fs/jbd2/journal.c3
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/summary.c1
-rw-r--r--fs/kernfs/file.c73
-rw-r--r--fs/lockd/mon.c2
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/mount.h3
-rw-r--r--fs/namei.c100
-rw-r--r--fs/namespace.c71
-rw-r--r--fs/nfsd/nfs4proc.c57
-rw-r--r--fs/nfsd/nfs4state.c74
-rw-r--r--fs/nfsd/nfs4xdr.c34
-rw-r--r--fs/nfsd/nfscache.c4
-rw-r--r--fs/nfsd/nfsctl.c6
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/state.h19
-rw-r--r--fs/nfsd/vfs.c37
-rw-r--r--fs/nfsd/vfs.h2
-rw-r--r--fs/nfsd/xdr4.h9
-rw-r--r--fs/notify/dnotify/dnotify.c4
-rw-r--r--fs/notify/fdinfo.c6
-rw-r--r--fs/notify/fsnotify.c4
-rw-r--r--fs/notify/fsnotify.h12
-rw-r--r--fs/notify/inode_mark.c113
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c2
-rw-r--r--fs/notify/inotify/inotify_user.c10
-rw-r--r--fs/notify/mark.c97
-rw-r--r--fs/notify/vfsmount_mark.c109
-rw-r--r--fs/nsfs.c161
-rw-r--r--fs/ocfs2/alloc.c28
-rw-r--r--fs/ocfs2/alloc.h2
-rw-r--r--fs/ocfs2/aops.c16
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c12
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/open.c16
-rw-r--r--fs/pnode.c1
-rw-r--r--fs/proc/base.c53
-rw-r--r--fs/proc/inode.c10
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/meminfo.c15
-rw-r--r--fs/proc/namespaces.c153
-rw-r--r--fs/pstore/ram.c14
-rw-r--r--fs/pstore/ram_core.c31
-rw-r--r--fs/read_write.c24
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/reiserfs/super.c3
-rw-r--r--fs/seq_file.c6
-rw-r--r--fs/squashfs/Kconfig15
-rw-r--r--fs/squashfs/Makefile1
-rw-r--r--fs/squashfs/decompressor.c7
-rw-r--r--fs/squashfs/decompressor.h4
-rw-r--r--fs/squashfs/lz4_wrapper.c142
-rw-r--r--fs/squashfs/squashfs_fs.h1
-rw-r--r--fs/sysfs/file.c59
-rw-r--r--fs/ubifs/file.c1
-rw-r--r--fs/ubifs/journal.c7
-rw-r--r--fs/xfs/libxfs/xfs_ag.h281
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c1
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h3
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c3
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c2
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c77
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_da_format.c2
-rw-r--r--fs/xfs/libxfs/xfs_dinode.h243
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c20
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h140
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c11
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c12
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c14
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h140
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c13
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c2
-rw-r--r--fs/xfs/libxfs/xfs_format.h1107
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c43
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c3
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c4
-rw-r--r--fs/xfs/libxfs/xfs_inum.h60
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_log_rlimit.c2
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c3
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/libxfs/xfs_sb.h584
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c2
-rw-r--r--fs/xfs/xfs_acl.c2
-rw-r--r--fs/xfs/xfs_acl.h36
-rw-r--r--fs/xfs/xfs_aops.c3
-rw-r--r--fs/xfs/xfs_attr_inactive.c3
-rw-r--r--fs/xfs/xfs_attr_list.c3
-rw-r--r--fs/xfs/xfs_bmap_util.c3
-rw-r--r--fs/xfs/xfs_buf.c27
-rw-r--r--fs/xfs/xfs_buf.h3
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_dir2_readdir.c21
-rw-r--r--fs/xfs/xfs_discard.c1
-rw-r--r--fs/xfs/xfs_dquot.c2
-rw-r--r--fs/xfs/xfs_dquot_item.c2
-rw-r--r--fs/xfs/xfs_error.c2
-rw-r--r--fs/xfs/xfs_export.c3
-rw-r--r--fs/xfs/xfs_extent_busy.c1
-rw-r--r--fs/xfs/xfs_extfree_item.c3
-rw-r--r--fs/xfs/xfs_file.c9
-rw-r--r--fs/xfs/xfs_filestream.c3
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_icache.c4
-rw-r--r--fs/xfs/xfs_icache.h8
-rw-r--r--fs/xfs/xfs_icreate_item.c3
-rw-r--r--fs/xfs/xfs_inode.c29
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_inode_item.c3
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_iomap.c18
-rw-r--r--fs/xfs/xfs_iops.c5
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_linux.h6
-rw-r--r--fs/xfs/xfs_log.c8
-rw-r--r--fs/xfs/xfs_log_cil.c3
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_message.c3
-rw-r--r--fs/xfs/xfs_mount.c33
-rw-r--r--fs/xfs/xfs_mount.h8
-rw-r--r--fs/xfs/xfs_qm.c14
-rw-r--r--fs/xfs/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/xfs_qm_syscalls.c27
-rw-r--r--fs/xfs/xfs_quotaops.c2
-rw-r--r--fs/xfs/xfs_rtalloc.c3
-rw-r--r--fs/xfs/xfs_super.c19
-rw-r--r--fs/xfs/xfs_symlink.c3
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trans.c2
-rw-r--r--fs/xfs/xfs_trans_ail.c3
-rw-r--r--fs/xfs/xfs_trans_buf.c137
-rw-r--r--fs/xfs/xfs_trans_dquot.c2
-rw-r--r--fs/xfs/xfs_trans_extfree.c3
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_xattr.c2
-rw-r--r--include/asm-generic/barrier.h8
-rw-r--r--include/asm-generic/hash.h9
-rw-r--r--include/asm-generic/pgtable.h11
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/crypto/hash.h492
-rw-r--r--include/crypto/if_alg.h1
-rw-r--r--include/crypto/rng.h80
-rw-r--r--include/drm/drmP.h27
-rw-r--r--include/drm/drm_atomic.h69
-rw-r--r--include/drm/drm_atomic_helper.h126
-rw-r--r--include/drm/drm_crtc.h327
-rw-r--r--include/drm/drm_crtc_helper.h13
-rw-r--r--include/drm/drm_displayid.h76
-rw-r--r--include/drm/drm_dp_helper.h26
-rw-r--r--include/drm/drm_dp_mst_helper.h8
-rw-r--r--include/drm/drm_edid.h109
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/drm_flip_work.h33
-rw-r--r--include/drm/drm_gem.h7
-rw-r--r--include/drm/drm_gem_cma_helper.h30
-rw-r--r--include/drm/drm_mipi_dsi.h94
-rw-r--r--include/drm/drm_modeset_lock.h5
-rw-r--r--include/drm/drm_plane_helper.h44
-rw-r--r--include/drm/i915_pciids.h17
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h9
-rw-r--r--include/dt-bindings/dma/at91.h25
-rw-r--r--include/dt-bindings/interrupt-controller/mips-gic.h9
-rw-r--r--include/dt-bindings/phy/phy.h19
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h142
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h44
-rw-r--r--include/dt-bindings/thermal/tegra124-soctherm.h13
-rw-r--r--include/kvm/arm_arch_timer.h10
-rw-r--r--include/kvm/arm_vgic.h12
-rw-r--r--include/linux/amba/bus.h11
-rw-r--r--include/linux/ath9k_platform.h3
-rw-r--r--include/linux/bcma/bcma.h2
-rw-r--r--include/linux/bcma/bcma_driver_mips.h4
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bio.h5
-rw-r--r--include/linux/bitmap.h39
-rw-r--r--include/linux/blk-mq.h10
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/bpf.h11
-rw-r--r--include/linux/cacheinfo.h100
-rw-r--r--include/linux/can/dev.h3
-rw-r--r--include/linux/ceph/auth.h26
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/ceph_fs.h10
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/messenger.h9
-rw-r--r--include/linux/ceph/msgr.h11
-rw-r--r--include/linux/ceph/osd_client.h13
-rw-r--r--include/linux/ceph/pagelist.h4
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/clock_cooling.h65
-rw-r--r--include/linux/cma.h1
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/coresight.h263
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpumask.h17
-rw-r--r--include/linux/cpuset.h37
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/crypto.h1112
-rw-r--r--include/linux/debugfs.h17
-rw-r--r--include/linux/device.h40
-rw-r--r--include/linux/dma-mapping.h13
-rw-r--r--include/linux/dmaengine.h3
-rw-r--r--include/linux/dmar.h50
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/elf.h5
-rw-r--r--include/linux/etherdevice.h12
-rw-r--r--include/linux/ethtool.h42
-rw-r--r--include/linux/fault-inject.h17
-rw-r--r--include/linux/fence.h4
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/fs.h29
-rw-r--r--include/linux/fsl_ifc.h21
-rw-r--r--include/linux/fsnotify_backend.h31
-rw-r--r--include/linux/ftrace.h7
-rw-r--r--include/linux/gfp.h7
-rw-r--r--include/linux/gpio.h7
-rw-r--r--include/linux/gpio/consumer.h40
-rw-r--r--include/linux/gpio/driver.h8
-rw-r--r--include/linux/hash.h35
-rw-r--r--include/linux/hdmi.h21
-rw-r--r--include/linux/hid.h43
-rw-r--r--include/linux/hugetlb.h46
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/i2c.h31
-rw-r--r--include/linux/i2c/twl.h2
-rw-r--r--include/linux/ieee80211.h71
-rw-r--r--include/linux/ieee802154.h (renamed from include/net/ieee802154.h)61
-rw-r--r--include/linux/if_bridge.h31
-rw-r--r--include/linux/if_vlan.h107
-rw-r--r--include/linux/iio/common/st_sensors.h10
-rw-r--r--include/linux/iio/iio.h8
-rw-r--r--include/linux/integrity.h6
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/io.h4
-rw-r--r--include/linux/iommu.h11
-rw-r--r--include/linux/ipc_namespace.h23
-rw-r--r--include/linux/ipmi.h6
-rw-r--r--include/linux/ipmi_smi.h10
-rw-r--r--include/linux/ipv6.h11
-rw-r--r--include/linux/irqchip/arm-gic-v3.h128
-rw-r--r--include/linux/irqchip/arm-gic.h4
-rw-r--r--include/linux/irqchip/mips-gic.h (renamed from arch/mips/include/asm/gic.h)267
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kernfs.h8
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/kvm_host.h106
-rw-r--r--include/linux/kvm_types.h27
-rw-r--r--include/linux/leds.h40
-rw-r--r--include/linux/libata.h6
-rw-r--r--include/linux/list.h34
-rw-r--r--include/linux/mailbox_client.h3
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/memcontrol.h16
-rw-r--r--include/linux/mfd/arizona/core.h1
-rw-r--r--include/linux/mfd/davinci_voicecodec.h7
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mlx4/cmd.h31
-rw-r--r--include/linux/mlx4/device.h127
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mlx5/device.h78
-rw-r--r--include/linux/mlx5/driver.h22
-rw-r--r--include/linux/mlx5/qp.h65
-rw-r--r--include/linux/mm.h53
-rw-r--r--include/linux/mm_types.h12
-rw-r--r--include/linux/mmu_notifier.h90
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/mtd/nand.h18
-rw-r--r--include/linux/mtd/spi-nor.h11
-rw-r--r--include/linux/namei.h25
-rw-r--r--include/linux/netdev_features.h9
-rw-r--r--include/linux/netdevice.h134
-rw-r--r--include/linux/nl802154.h4
-rw-r--r--include/linux/ns_common.h12
-rw-r--r--include/linux/nvme.h18
-rw-r--r--include/linux/of.h124
-rw-r--r--include/linux/of_address.h4
-rw-r--r--include/linux/of_iommu.h23
-rw-r--r--include/linux/of_pdt.h3
-rw-r--r--include/linux/of_platform.h6
-rw-r--r--include/linux/omap-mailbox.h16
-rw-r--r--include/linux/oom.h11
-rw-r--r--include/linux/page-debug-flags.h32
-rw-r--r--include/linux/page_ext.h84
-rw-r--r--include/linux/page_owner.h38
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu-defs.h2
-rw-r--r--include/linux/percpu-refcount.h4
-rw-r--r--include/linux/phy.h26
-rw-r--r--include/linux/phy/phy.h52
-rw-r--r--include/linux/phy_fixed.h2
-rw-r--r--include/linux/pid_namespace.h3
-rw-r--r--include/linux/platform_data/asoc-s3c.h1
-rw-r--r--include/linux/platform_data/bcmgenet.h18
-rw-r--r--include/linux/platform_data/dma-imx.h1
-rw-r--r--include/linux/platform_data/dwc3-exynos.h24
-rw-r--r--include/linux/platform_data/lp855x.h2
-rw-r--r--include/linux/platform_data/rcar-du.h74
-rw-r--r--include/linux/platform_data/st21nfca.h1
-rw-r--r--include/linux/platform_data/st21nfcb.h1
-rw-r--r--include/linux/platform_device.h12
-rw-r--r--include/linux/plist.h10
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/proc_ns.h43
-rw-r--r--include/linux/pstore_ram.h4
-rw-r--r--include/linux/pxa168_eth.h3
-rw-r--r--include/linux/pxa2xx_ssp.h20
-rw-r--r--include/linux/ratelimit.h12
-rw-r--r--include/linux/rculist.h17
-rw-r--r--include/linux/rhashtable.h15
-rw-r--r--include/linux/rtnetlink.h14
-rw-r--r--include/linux/sched.h11
-rw-r--r--include/linux/serial_8250.h3
-rw-r--r--include/linux/serial_bcm63xx.h2
-rw-r--r--include/linux/serial_core.h67
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/skbuff.h188
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/socket.h24
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spmi.h3
-rw-r--r--include/linux/stacktrace.h5
-rw-r--r--include/linux/sunrpc/svc.h34
-rw-r--r--include/linux/sunrpc/svc_xprt.h7
-rw-r--r--include/linux/swap.h8
-rw-r--r--include/linux/syscalls.h11
-rw-r--r--include/linux/sysfs.h9
-rw-r--r--include/linux/tcp.h8
-rw-r--r--include/linux/thermal.h73
-rw-r--r--include/linux/tty.h28
-rw-r--r--include/linux/uio.h1
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/usb/chipidea.h5
-rw-r--r--include/linux/usb/composite.h7
-rw-r--r--include/linux/usb/ehci-dbgp.h83
-rw-r--r--include/linux/usb/ehci_def.h65
-rw-r--r--include/linux/usb/gadget.h5
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/usb/otg.h7
-rw-r--r--include/linux/usb/phy.h6
-rw-r--r--include/linux/usb/renesas_usbhs.h4
-rw-r--r--include/linux/user_namespace.h15
-rw-r--r--include/linux/utsname.h3
-rw-r--r--include/linux/virtio.h14
-rw-r--r--include/linux/virtio_byteorder.h59
-rw-r--r--include/linux/virtio_config.h103
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmw_vmci_api.h5
-rw-r--r--include/media/davinci/vpbe.h2
-rw-r--r--include/media/davinci/vpbe_display.h21
-rw-r--r--include/media/davinci/vpbe_venc.h5
-rw-r--r--include/media/exynos-fimc.h2
-rw-r--r--include/media/lirc_dev.h8
-rw-r--r--include/media/radio-si4713.h30
-rw-r--r--include/media/si4713.h4
-rw-r--r--include/media/soc_camera.h2
-rw-r--r--include/media/soc_mediabus.h6
-rw-r--r--include/media/v4l2-common.h17
-rw-r--r--include/media/v4l2-ctrls.h25
-rw-r--r--include/media/v4l2-image-sizes.h9
-rw-r--r--include/media/v4l2-mediabus.h6
-rw-r--r--include/media/v4l2-subdev.h2
-rw-r--r--include/media/videobuf2-core.h42
-rw-r--r--include/media/videobuf2-dma-sg.h3
-rw-r--r--include/net/6lowpan.h12
-rw-r--r--include/net/af_ieee802154.h4
-rw-r--r--include/net/af_vsock.h6
-rw-r--r--include/net/bluetooth/hci.h68
-rw-r--r--include/net/bluetooth/hci_core.h78
-rw-r--r--include/net/bluetooth/l2cap.h50
-rw-r--r--include/net/bluetooth/mgmt.h24
-rw-r--r--include/net/bond_3ad.h (renamed from drivers/net/bonding/bond_3ad.h)6
-rw-r--r--include/net/bond_alb.h (renamed from drivers/net/bonding/bond_alb.h)6
-rw-r--r--include/net/bond_options.h (renamed from drivers/net/bonding/bond_options.h)6
-rw-r--r--include/net/bonding.h (renamed from drivers/net/bonding/bonding.h)18
-rw-r--r--include/net/cfg80211.h241
-rw-r--r--include/net/cfg802154.h161
-rw-r--r--include/net/checksum.h16
-rw-r--r--include/net/compat.h5
-rw-r--r--include/net/dsa.h33
-rw-r--r--include/net/fou.h19
-rw-r--r--include/net/gue.h103
-rw-r--r--include/net/ieee802154_netdev.h18
-rw-r--r--include/net/inet6_hashtables.h10
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_fib.h4
-rw-r--r--include/net/ip_tunnels.h16
-rw-r--r--include/net/ipx.h5
-rw-r--r--include/net/irda/irda.h15
-rw-r--r--include/net/irda/irlap.h2
-rw-r--r--include/net/irda/parameters.h6
-rw-r--r--include/net/llc_c_st.h4
-rw-r--r--include/net/llc_s_st.h2
-rw-r--r--include/net/mac80211.h319
-rw-r--r--include/net/mac802154.h158
-rw-r--r--include/net/mpls.h39
-rw-r--r--include/net/neighbour.h14
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h15
-rw-r--r--include/net/netfilter/nf_nat_redirect.h12
-rw-r--r--include/net/netfilter/nf_tables_bridge.h7
-rw-r--r--include/net/netfilter/nft_redir.h21
-rw-r--r--include/net/netlink.h10
-rw-r--r--include/net/netns/xfrm.h4
-rw-r--r--include/net/nfc/digital.h13
-rw-r--r--include/net/nfc/hci.h4
-rw-r--r--include/net/nfc/nci.h37
-rw-r--r--include/net/nfc/nci_core.h9
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/net/nl802154.h226
-rw-r--r--include/net/ping.h2
-rw-r--r--include/net/regulatory.h12
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/structs.h7
-rw-r--r--include/net/sock.h46
-rw-r--r--include/net/switchdev.h37
-rw-r--r--include/net/tc_act/tc_vlan.h27
-rw-r--r--include/net/tcp.h25
-rw-r--r--include/net/udplite.h10
-rw-r--r--include/net/wpan-phy.h105
-rw-r--r--include/rdma/ib_umem.h34
-rw-r--r--include/rdma/ib_umem_odp.h160
-rw-r--r--include/rdma/ib_verbs.h54
-rw-r--r--include/scsi/libsas.h11
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/sound/compress_driver.h5
-rw-r--r--include/sound/jack.h26
-rw-r--r--include/sound/omap-hdmi-audio.h43
-rw-r--r--include/sound/pcm.h254
-rw-r--r--include/sound/rcar_snd.h8
-rw-r--r--include/sound/rt5645.h4
-rw-r--r--include/sound/rt5677.h10
-rw-r--r--include/sound/seq_kernel.h4
-rw-r--r--include/sound/soc-dai.h7
-rw-r--r--include/sound/soc-dapm.h9
-rw-r--r--include/sound/soc.h116
-rw-r--r--include/sound/uda134x.h12
-rw-r--r--include/trace/events/asoc.h25
-rw-r--r--include/trace/events/ext4.h17
-rw-r--r--include/trace/events/host1x.h27
-rw-r--r--include/trace/events/sunrpc.h120
-rw-r--r--include/uapi/asm-generic/socket.h5
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/i915_drm.h7
-rw-r--r--include/uapi/linux/Kbuild95
-rw-r--r--include/uapi/linux/android/Kbuild2
-rw-r--r--include/uapi/linux/android/binder.h (renamed from drivers/staging/android/uapi/binder.h)1
-rw-r--r--include/uapi/linux/audit.h17
-rw-r--r--include/uapi/linux/bpf.h14
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/can/error.h1
-rw-r--r--include/uapi/linux/ethtool.h28
-rw-r--r--include/uapi/linux/hyperv.h1
-rw-r--r--include/uapi/linux/if_alg.h2
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--include/uapi/linux/if_link.h18
-rw-r--r--include/uapi/linux/if_tun.h18
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/kcmp.h (renamed from include/linux/kcmp.h)6
-rw-r--r--include/uapi/linux/kfd_ioctl.h154
-rw-r--r--include/uapi/linux/kvm.h11
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/media-bus-format.h125
-rw-r--r--include/uapi/linux/msg.h28
-rw-r--r--include/uapi/linux/neighbour.h6
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h8
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h18
-rw-r--r--include/uapi/linux/netfilter/xt_set.h13
-rw-r--r--include/uapi/linux/nfc.h23
-rw-r--r--include/uapi/linux/nl80211.h147
-rw-r--r--include/uapi/linux/nvme.h46
-rw-r--r--include/uapi/linux/openvswitch.h53
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--include/uapi/linux/sem.h18
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/serial_reg.h1
-rw-r--r--include/uapi/linux/snmp.h5
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h35
-rw-r--r--include/uapi/linux/thermal.h35
-rw-r--r--include/uapi/linux/tipc_netlink.h244
-rw-r--r--include/uapi/linux/tty_flags.h19
-rw-r--r--include/uapi/linux/v4l2-common.h2
-rw-r--r--include/uapi/linux/v4l2-mediabus.h219
-rw-r--r--include/uapi/linux/v4l2-subdev.h6
-rw-r--r--include/uapi/linux/videodev2.h101
-rw-r--r--include/uapi/linux/virtio_balloon.h1
-rw-r--r--include/uapi/linux/virtio_blk.h15
-rw-r--r--include/uapi/linux/virtio_config.h9
-rw-r--r--include/uapi/linux/virtio_console.h7
-rw-r--r--include/uapi/linux/virtio_net.h15
-rw-r--r--include/uapi/linux/virtio_ring.h45
-rw-r--r--include/uapi/linux/virtio_scsi.h (renamed from include/linux/virtio_scsi.h)106
-rw-r--r--include/uapi/linux/virtio_types.h46
-rw-r--r--include/uapi/linux/vt.h3
-rw-r--r--include/uapi/rdma/ib_user_verbs.h29
-rw-r--r--include/uapi/sound/asound.h3
-rw-r--r--include/uapi/sound/compress_offload.h17
-rw-r--r--include/uapi/sound/firewire.h3
-rw-r--r--include/uapi/sound/hdspm.h12
-rw-r--r--include/video/omapdss.h45
-rw-r--r--include/xen/interface/features.h3
-rw-r--r--include/xen/interface/grant_table.h19
-rw-r--r--init/main.c19
-rw-r--r--init/version.c5
-rw-r--r--ipc/Makefile2
-rw-r--r--ipc/ipc_sysctl.c93
-rw-r--r--ipc/ipcns_notifier.c92
-rw-r--r--ipc/msg.c36
-rw-r--r--ipc/msgutil.c5
-rw-r--r--ipc/namespace.c54
-rw-r--r--ipc/sem.c13
-rw-r--r--ipc/shm.c21
-rw-r--r--ipc/util.c40
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/audit_tree.c16
-rw-r--r--kernel/bpf/Makefile2
-rw-r--r--kernel/bpf/arraymap.c156
-rw-r--r--kernel/bpf/hashtab.c367
-rw-r--r--kernel/bpf/helpers.c89
-rw-r--r--kernel/bpf/syscall.c6
-rw-r--r--kernel/bpf/test_stub.c56
-rw-r--r--kernel/bpf/verifier.c171
-rw-r--r--kernel/cgroup.c175
-rw-r--r--kernel/cpuset.c162
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/exit.c21
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/gcov/Kconfig5
-rw-r--r--kernel/groups.c11
-rw-r--r--kernel/irq_work.c4
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kprobes.c18
-rw-r--r--kernel/nsproxy.c10
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/pid_namespace.c29
-rw-r--r--kernel/printk/printk.c10
-rw-r--r--kernel/stacktrace.c32
-rw-r--r--kernel/sys_ni.c5
-rw-r--r--kernel/sysctl.c7
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/time.c1
-rw-r--r--kernel/trace/blktrace.c3
-rw-r--r--kernel/trace/trace.c27
-rw-r--r--kernel/trace/trace.h14
-rw-r--r--kernel/trace/trace_events.c42
-rw-r--r--kernel/trace/trace_syscalls.c7
-rw-r--r--kernel/uid16.c2
-rw-r--r--kernel/user.c6
-rw-r--r--kernel/user_namespace.c153
-rw-r--r--kernel/utsname.c31
-rw-r--r--kernel/workqueue.c30
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--lib/Makefile2
-rw-r--r--lib/audit.c3
-rw-r--r--lib/bitmap.c53
-rw-r--r--lib/decompress.c4
-rw-r--r--lib/decompress_bunzip2.c2
-rw-r--r--lib/devres.c4
-rw-r--r--lib/fault-inject.c21
-rw-r--r--lib/hash.c39
-rw-r--r--lib/iovec.c25
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/rhashtable.c88
-rw-r--r--lib/show_mem.c6
-rw-r--r--lib/test_bpf.c53
-rw-r--r--mm/Kconfig.debug10
-rw-r--r--mm/Makefile2
-rw-r--r--mm/cma.c26
-rw-r--r--mm/debug-pagealloc.c45
-rw-r--r--mm/fadvise.c6
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/filemap_xip.c23
-rw-r--r--mm/fremap.c6
-rw-r--r--mm/gup.c81
-rw-r--r--mm/huge_memory.c14
-rw-r--r--mm/hugetlb.c35
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memblock.c43
-rw-r--r--mm/memcontrol.c180
-rw-r--r--mm/memory-failure.c15
-rw-r--r--mm/memory.c26
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/migrate.c31
-rw-r--r--mm/mincore.c7
-rw-r--r--mm/mmap.c24
-rw-r--r--mm/mmu_notifier.c25
-rw-r--r--mm/mremap.c9
-rw-r--r--mm/nommu.c50
-rw-r--r--mm/oom_kill.c17
-rw-r--r--mm/page_alloc.c149
-rw-r--r--mm/page_ext.c403
-rw-r--r--mm/page_owner.c311
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/rmap.c20
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c16
-rw-r--r--mm/vmacache.c2
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c221
-rw-r--r--mm/vmstat.c102
-rw-r--r--mm/zbud.c4
-rw-r--r--mm/zsmalloc.c390
-rw-r--r--mm/zswap.c11
-rw-r--r--net/6lowpan/iphc.c103
-rw-r--r--net/8021q/vlan_dev.c28
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile5
-rw-r--r--net/appletalk/ddp.c4
-rw-r--r--net/atm/common.c16
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/bluetooth/6lowpan.c307
-rw-r--r--net/bluetooth/Kconfig21
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/a2mp.c4
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/amp.c25
-rw-r--r--net/bluetooth/bnep/Kconfig2
-rw-r--r--net/bluetooth/cmtp/Kconfig2
-rw-r--r--net/bluetooth/ecc.c816
-rw-r--r--net/bluetooth/ecc.h54
-rw-r--r--net/bluetooth/hci_conn.c17
-rw-r--r--net/bluetooth/hci_core.c475
-rw-r--r--net/bluetooth/hci_event.c316
-rw-r--r--net/bluetooth/hci_sock.c6
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/bluetooth/l2cap_core.c105
-rw-r--r--net/bluetooth/l2cap_sock.c37
-rw-r--r--net/bluetooth/mgmt.c1081
-rw-r--r--net/bluetooth/rfcomm/Kconfig2
-rw-r--r--net/bluetooth/rfcomm/core.c12
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bluetooth/smp.c1632
-rw-r--r--net/bluetooth/smp.h60
-rw-r--r--net/bridge/br_fdb.c144
-rw-r--r--net/bridge/br_forward.c5
-rw-r--r--net/bridge/br_input.c60
-rw-r--r--net/bridge/br_netfilter.c1
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h20
-rw-r--r--net/bridge/br_stp.c7
-rw-r--r--net/bridge/br_sysfs_if.c2
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c88
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c52
-rw-r--r--net/caif/caif_socket.c10
-rw-r--r--net/can/af_can.c7
-rw-r--r--net/can/bcm.c37
-rw-r--r--net/can/gw.c2
-rw-r--r--net/can/raw.c8
-rw-r--r--net/ceph/auth_x.c76
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/buffer.c4
-rw-r--r--net/ceph/ceph_common.c21
-rw-r--r--net/ceph/messenger.c34
-rw-r--r--net/ceph/osd_client.c118
-rw-r--r--net/compat.c87
-rw-r--r--net/core/datagram.c301
-rw-r--r--net/core/dev.c172
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/dev_ioctl.c9
-rw-r--r--net/core/dst.c24
-rw-r--r--net/core/ethtool.c81
-rw-r--r--net/core/filter.c97
-rw-r--r--net/core/iovec.c47
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/neighbour.c279
-rw-r--r--net/core/net-sysfs.c44
-rw-r--r--net/core/net_namespace.c39
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/rtnetlink.c159
-rw-r--r--net/core/scm.c3
-rw-r--r--net/core/skbuff.c347
-rw-r--r--net/core/sock.c44
-rw-r--r--net/core/sysctl_net_core.c21
-rw-r--r--net/core/utils.c3
-rw-r--r--net/dccp/ackvec.c2
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/feat.c4
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/proto.c9
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/decnet/dn_neigh.c4
-rw-r--r--net/dsa/Kconfig11
-rw-r--r--net/dsa/dsa.c171
-rw-r--r--net/dsa/slave.c90
-rw-r--r--net/dsa/tag_dsa.c2
-rw-r--r--net/dsa/tag_edsa.c2
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/ieee802154/6lowpan_rtnl.c149
-rw-r--r--net/ieee802154/Makefile4
-rw-r--r--net/ieee802154/af802154.h4
-rw-r--r--net/ieee802154/af_ieee802154.c8
-rw-r--r--net/ieee802154/core.c321
-rw-r--r--net/ieee802154/core.h46
-rw-r--r--net/ieee802154/dgram.c11
-rw-r--r--net/ieee802154/header_ops.c3
-rw-r--r--net/ieee802154/ieee802154.h6
-rw-r--r--net/ieee802154/netlink.c11
-rw-r--r--net/ieee802154/nl-mac.c269
-rw-r--r--net/ieee802154/nl-phy.c32
-rw-r--r--net/ieee802154/nl802154.c957
-rw-r--r--net/ieee802154/nl802154.h7
-rw-r--r--net/ieee802154/nl_policy.c4
-rw-r--r--net/ieee802154/raw.c9
-rw-r--r--net/ieee802154/rdev-ops.h89
-rw-r--r--net/ieee802154/reassembly.c8
-rw-r--r--net/ieee802154/reassembly.h4
-rw-r--r--net/ieee802154/sysfs.c128
-rw-r--r--net/ieee802154/sysfs.h9
-rw-r--r--net/ieee802154/wpan-class.c230
-rw-r--r--net/ipv4/Kconfig9
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/cipso_ipv4.c8
-rw-r--r--net/ipv4/esp4.c14
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/fou.c423
-rw-r--r--net/ipv4/geneve.c44
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/icmp.c51
-rw-r--r--net/ipv4/igmp.c34
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ip_fragment.c19
-rw-r--r--net/ipv4/ip_gre.c11
-rw-r--r--net/ipv4/ip_output.c16
-rw-r--r--net/ipv4/ip_sockglue.c28
-rw-r--r--net/ipv4/ip_tunnel.c122
-rw-r--r--net/ipv4/ipconfig.c19
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/netfilter/Kconfig9
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/nf_log_arp.c24
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c12
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c77
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c7
-rw-r--r--net/ipv4/ping.c13
-rw-r--r--net/ipv4/proc.c11
-rw-r--r--net/ipv4/raw.c109
-rw-r--r--net/ipv4/syncookies.c86
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c75
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_cubic.c31
-rw-r--r--net/ipv4/tcp_input.c49
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv4/tcp_output.c152
-rw-r--r--net/ipv4/tcp_timer.c18
-rw-r--r--net/ipv4/udp.c210
-rw-r--r--net/ipv4/udp_offload.c69
-rw-r--r--net/ipv6/addrconf.c74
-rw-r--r--net/ipv6/ah6.c14
-rw-r--r--net/ipv6/datagram.c31
-rw-r--r--net/ipv6/esp6.c17
-rw-r--r--net/ipv6/exthdrs.c20
-rw-r--r--net/ipv6/icmp.c31
-rw-r--r--net/ipv6/ip6_flowlabel.c13
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c22
-rw-r--r--net/ipv6/ip6_tunnel.c109
-rw-r--r--net/ipv6/ip6_vti.c31
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/mcast.c6
-rw-r--r--net/ipv6/mip6.c11
-rw-r--r--net/ipv6/ndisc.c11
-rw-r--r--net/ipv6/netfilter.c2
-rw-r--r--net/ipv6/netfilter/Kconfig9
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c77
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c7
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/raw.c124
-rw-r--r--net/ipv6/reassembly.c15
-rw-r--r--net/ipv6/route.c94
-rw-r--r--net/ipv6/sit.c9
-rw-r--r--net/ipv6/syncookies.c13
-rw-r--r--net/ipv6/tcp_ipv6.c32
-rw-r--r--net/ipv6/udp.c170
-rw-r--r--net/ipv6/udp_offload.c4
-rw-r--r--net/ipx/af_ipx.c8
-rw-r--r--net/ipx/ipx_proc.c18
-rw-r--r--net/ipx/ipx_route.c4
-rw-r--r--net/ipx/sysctl_net_ipx.c4
-rw-r--r--net/irda/af_irda.c253
-rw-r--r--net/irda/discovery.c12
-rw-r--r--net/irda/ircomm/ircomm_core.c55
-rw-r--r--net/irda/ircomm/ircomm_event.c28
-rw-r--r--net/irda/ircomm/ircomm_lmp.c30
-rw-r--r--net/irda/ircomm/ircomm_param.c52
-rw-r--r--net/irda/ircomm/ircomm_ttp.c30
-rw-r--r--net/irda/ircomm/ircomm_tty.c116
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c122
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c21
-rw-r--r--net/irda/irda_device.c34
-rw-r--r--net/irda/iriap.c119
-rw-r--r--net/irda/iriap_event.c42
-rw-r--r--net/irda/irias_object.c78
-rw-r--r--net/irda/irlan/irlan_client.c86
-rw-r--r--net/irda/irlan/irlan_client_event.c66
-rw-r--r--net/irda/irlan/irlan_common.c80
-rw-r--r--net/irda/irlan/irlan_eth.c26
-rw-r--r--net/irda/irlan/irlan_event.c4
-rw-r--r--net/irda/irlan/irlan_filter.c23
-rw-r--r--net/irda/irlan/irlan_provider.c42
-rw-r--r--net/irda/irlan/irlan_provider_event.c16
-rw-r--r--net/irda/irlap.c74
-rw-r--r--net/irda/irlap_event.c234
-rw-r--r--net/irda/irlap_frame.c117
-rw-r--r--net/irda/irlmp.c186
-rw-r--r--net/irda/irlmp_event.c133
-rw-r--r--net/irda/irlmp_frame.c56
-rw-r--r--net/irda/irmod.c12
-rw-r--r--net/irda/irnetlink.c4
-rw-r--r--net/irda/irqueue.c14
-rw-r--r--net/irda/irsysctl.c9
-rw-r--r--net/irda/irttp.c164
-rw-r--r--net/irda/parameters.c85
-rw-r--r--net/irda/qos.c89
-rw-r--r--net/irda/wrapper.c28
-rw-r--r--net/iucv/af_iucv.c8
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_eth.c1
-rw-r--r--net/l2tp/l2tp_ip.c4
-rw-r--r--net/l2tp/l2tp_ip6.c4
-rw-r--r--net/l2tp/l2tp_ppp.c5
-rw-r--r--net/lapb/lapb_iface.c17
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/llc/llc_c_st.c708
-rw-r--r--net/llc/llc_conn.c4
-rw-r--r--net/llc/llc_if.c2
-rw-r--r--net/llc/llc_s_st.c20
-rw-r--r--net/llc/llc_sap.c2
-rw-r--r--net/mac80211/Kconfig18
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-tx.c12
-rw-r--r--net/mac80211/cfg.c193
-rw-r--r--net/mac80211/chan.c32
-rw-r--r--net/mac80211/debug.h10
-rw-r--r--net/mac80211/debugfs_key.c12
-rw-r--r--net/mac80211/debugfs_sta.c11
-rw-r--r--net/mac80211/driver-ops.h157
-rw-r--r--net/mac80211/ieee80211_i.h122
-rw-r--r--net/mac80211/iface.c33
-rw-r--r--net/mac80211/key.c13
-rw-r--r--net/mac80211/main.c44
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_pathtbl.c31
-rw-r--r--net/mac80211/mlme.c334
-rw-r--r--net/mac80211/ocb.c250
-rw-r--r--net/mac80211/rate.c8
-rw-r--r--net/mac80211/rate.h24
-rw-r--r--net/mac80211/rc80211_minstrel.c7
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c334
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h40
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c43
-rw-r--r--net/mac80211/rx.c192
-rw-r--r--net/mac80211/scan.c129
-rw-r--r--net/mac80211/sta_info.c19
-rw-r--r--net/mac80211/sta_info.h11
-rw-r--r--net/mac80211/status.c171
-rw-r--r--net/mac80211/tdls.c800
-rw-r--r--net/mac80211/trace.h248
-rw-r--r--net/mac80211/tx.c316
-rw-r--r--net/mac80211/util.c203
-rw-r--r--net/mac80211/vht.c2
-rw-r--r--net/mac80211/wep.c2
-rw-r--r--net/mac80211/wme.c72
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mac80211/wpa.c5
-rw-r--r--net/mac802154/Kconfig2
-rw-r--r--net/mac802154/Makefile4
-rw-r--r--net/mac802154/cfg.c210
-rw-r--r--net/mac802154/cfg.h9
-rw-r--r--net/mac802154/driver-ops.h222
-rw-r--r--net/mac802154/ieee802154_dev.c415
-rw-r--r--net/mac802154/ieee802154_i.h (renamed from net/mac802154/mac802154.h)132
-rw-r--r--net/mac802154/iface.c586
-rw-r--r--net/mac802154/llsec.c23
-rw-r--r--net/mac802154/mac_cmd.c88
-rw-r--r--net/mac802154/main.c217
-rw-r--r--net/mac802154/mib.c271
-rw-r--r--net/mac802154/monitor.c117
-rw-r--r--net/mac802154/rx.c310
-rw-r--r--net/mac802154/tx.c154
-rw-r--r--net/mac802154/util.c84
-rw-r--r--net/mac802154/wpan.c599
-rw-r--r--net/mpls/mpls_gso.c3
-rw-r--r--net/netfilter/Kconfig17
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/core.c1
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h101
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c1
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_pe.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c11
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c14
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_log.c40
-rw-r--r--net/netfilter/nf_nat_redirect.c127
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/nfnetlink_log.c18
-rw-r--r--net/netfilter/nft_hash.c10
-rw-r--r--net/netfilter/nft_meta.c7
-rw-r--r--net/netfilter/nft_redir.c99
-rw-r--r--net/netfilter/xt_DSCP.c6
-rw-r--r--net/netfilter/xt_REDIRECT.c83
-rw-r--r--net/netfilter/xt_connlimit.c25
-rw-r--r--net/netfilter/xt_recent.c64
-rw-r--r--net/netfilter/xt_set.c73
-rw-r--r--net/netlink/af_netlink.c77
-rw-r--r--net/netrom/af_netrom.c4
-rw-r--r--net/nfc/digital_dep.c834
-rw-r--r--net/nfc/hci/command.c3
-rw-r--r--net/nfc/hci/core.c56
-rw-r--r--net/nfc/llcp_commands.c10
-rw-r--r--net/nfc/llcp_core.c5
-rw-r--r--net/nfc/llcp_sock.c8
-rw-r--r--net/nfc/nci/core.c150
-rw-r--r--net/nfc/nci/data.c24
-rw-r--r--net/nfc/nci/ntf.c152
-rw-r--r--net/nfc/netlink.c77
-rw-r--r--net/nfc/rawsock.c4
-rw-r--r--net/nonet.c26
-rw-r--r--net/openvswitch/Kconfig23
-rw-r--r--net/openvswitch/Makefile14
-rw-r--r--net/openvswitch/actions.c355
-rw-r--r--net/openvswitch/datapath.c346
-rw-r--r--net/openvswitch/datapath.h24
-rw-r--r--net/openvswitch/flow.c38
-rw-r--r--net/openvswitch/flow.h88
-rw-r--r--net/openvswitch/flow_netlink.c633
-rw-r--r--net/openvswitch/flow_netlink.h18
-rw-r--r--net/openvswitch/flow_table.c27
-rw-r--r--net/openvswitch/flow_table.h10
-rw-r--r--net/openvswitch/vport-geneve.c46
-rw-r--r--net/openvswitch/vport-gre.c47
-rw-r--r--net/openvswitch/vport-internal_dev.c22
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/openvswitch/vport-netdev.h3
-rw-r--r--net/openvswitch/vport-vxlan.c47
-rw-r--r--net/openvswitch/vport.c171
-rw-r--r--net/openvswitch/vport.h34
-rw-r--r--net/packet/af_packet.c80
-rw-r--r--net/phonet/af_phonet.c9
-rw-r--r--net/phonet/datagram.c4
-rw-r--r--net/phonet/pep-gprs.c3
-rw-r--r--net/phonet/pep.c16
-rw-r--r--net/rds/ib.h3
-rw-r--r--net/rds/ib_recv.c37
-rw-r--r--net/rds/iw.h3
-rw-r--r--net/rds/iw_recv.c37
-rw-r--r--net/rds/message.c78
-rw-r--r--net/rds/rds.h9
-rw-r--r--net/rds/recv.c6
-rw-r--r--net/rds/send.c6
-rw-r--r--net/rds/tcp.h3
-rw-r--r--net/rds/tcp_recv.c38
-rw-r--r--net/rfkill/rfkill-gpio.c1
-rw-r--r--net/rfkill/rfkill-regulator.c1
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/rxrpc/ar-output.c10
-rw-r--r--net/rxrpc/ar-recvmsg.c2
-rw-r--r--net/sched/Kconfig18
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_vlan.c207
-rw-r--r--net/sched/cls_basic.c26
-rw-r--r--net/sched/cls_bpf.c26
-rw-r--r--net/sched/cls_cgroup.c14
-rw-r--r--net/sched/cls_flow.c14
-rw-r--r--net/sched/cls_fw.c8
-rw-r--r--net/sched/cls_route.c8
-rw-r--r--net/sched/cls_rsvp.h8
-rw-r--r--net/sched/cls_tcindex.c15
-rw-r--r--net/sched/cls_u32.c5
-rw-r--r--net/sched/sch_fq.c9
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_netem.c27
-rw-r--r--net/sctp/chunk.c9
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/proc.c12
-rw-r--r--net/sctp/sm_make_chunk.c20
-rw-r--r--net/sctp/socket.c19
-rw-r--r--net/sctp/ulpqueue.c5
-rw-r--r--net/socket.c200
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/cache.c26
-rw-r--r--net/sunrpc/svc.c42
-rw-r--r--net/sunrpc/svc_xprt.c292
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/xdr.c9
-rw-r--r--net/switchdev/Kconfig13
-rw-r--r--net/switchdev/Makefile5
-rw-r--r--net/switchdev/switchdev.c52
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/bcast.c230
-rw-r--r--net/tipc/bcast.h6
-rw-r--r--net/tipc/bearer.c447
-rw-r--r--net/tipc/bearer.h16
-rw-r--r--net/tipc/core.h2
-rw-r--r--net/tipc/link.c981
-rw-r--r--net/tipc/link.h55
-rw-r--r--net/tipc/msg.c133
-rw-r--r--net/tipc/msg.h16
-rw-r--r--net/tipc/name_distr.c181
-rw-r--r--net/tipc/name_distr.h1
-rw-r--r--net/tipc/name_table.c373
-rw-r--r--net/tipc/name_table.h30
-rw-r--r--net/tipc/net.c106
-rw-r--r--net/tipc/net.h8
-rw-r--r--net/tipc/netlink.c133
-rw-r--r--net/tipc/netlink.h (renamed from net/tipc/node_subscr.h)35
-rw-r--r--net/tipc/node.c108
-rw-r--r--net/tipc/node.h16
-rw-r--r--net/tipc/node_subscr.c96
-rw-r--r--net/tipc/socket.c419
-rw-r--r--net/tipc/socket.h3
-rw-r--r--net/tipc/subscr.c1
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--net/vmw_vsock/af_vsock.c6
-rw-r--r--net/vmw_vsock/vmci_transport.c18
-rw-r--r--net/wireless/Kconfig2
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/chan.c19
-rw-r--r--net/wireless/core.c95
-rw-r--r--net/wireless/core.h13
-rw-r--r--net/wireless/nl80211.c964
-rw-r--r--net/wireless/ocb.c88
-rw-r--r--net/wireless/rdev-ops.h79
-rw-r--r--net/wireless/reg.c174
-rw-r--r--net/wireless/sme.c13
-rw-r--r--net/wireless/trace.h155
-rw-r--r--net/wireless/util.c5
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/xfrm/xfrm_policy.c52
-rw-r--r--net/xfrm/xfrm_user.c12
-rw-r--r--samples/bpf/Makefile23
-rw-r--r--samples/bpf/bpf_helpers.h40
-rw-r--r--samples/bpf/bpf_load.c203
-rw-r--r--samples/bpf/bpf_load.h24
-rw-r--r--samples/bpf/libbpf.c31
-rw-r--r--samples/bpf/libbpf.h17
-rw-r--r--samples/bpf/sock_example.c101
-rw-r--r--samples/bpf/sockex1_kern.c25
-rw-r--r--samples/bpf/sockex1_user.c49
-rw-r--r--samples/bpf/sockex2_kern.c215
-rw-r--r--samples/bpf/sockex2_user.c44
-rw-r--r--samples/bpf/test_maps.c291
-rw-r--r--samples/bpf/test_verifier.c60
-rw-r--r--scripts/checkkconfigsymbols.py139
-rwxr-xr-xscripts/checkkconfigsymbols.sh59
-rw-r--r--scripts/coccinelle/api/platform_no_drv_owner.cocci106
-rw-r--r--scripts/kconfig/list.h6
-rw-r--r--scripts/recordmcount.c2
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--security/integrity/digsig.c38
-rw-r--r--security/integrity/evm/evm_main.c11
-rw-r--r--security/integrity/iint.c88
-rw-r--r--security/integrity/ima/Kconfig27
-rw-r--r--security/integrity/ima/ima_api.c7
-rw-r--r--security/integrity/ima/ima_crypto.c35
-rw-r--r--security/integrity/ima/ima_fs.c37
-rw-r--r--security/integrity/ima/ima_init.c17
-rw-r--r--security/integrity/ima/ima_main.c5
-rw-r--r--security/integrity/ima/ima_policy.c42
-rw-r--r--security/integrity/ima/ima_template.c103
-rw-r--r--security/integrity/integrity.h19
-rw-r--r--security/keys/encrypted-keys/encrypted.c5
-rw-r--r--security/keys/key.c10
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/smack/smack_access.c17
-rw-r--r--security/smack/smack_lsm.c13
-rw-r--r--sound/aoa/codecs/onyx.c8
-rw-r--r--sound/aoa/codecs/tas.c10
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c6
-rw-r--r--sound/arm/pxa2xx-ac97.c1
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c4
-rw-r--r--sound/atmel/abdac.c19
-rw-r--r--sound/atmel/ac97c.c13
-rw-r--r--sound/core/Makefile3
-rw-r--r--sound/core/control.c71
-rw-r--r--sound/core/init.c33
-rw-r--r--sound/core/pcm.c48
-rw-r--r--sound/core/pcm_lib.c148
-rw-r--r--sound/core/pcm_native.c214
-rw-r--r--sound/core/pcm_trace.h110
-rw-r--r--sound/core/seq/oss/seq_oss_init.c9
-rw-r--r--sound/core/seq/seq.c5
-rw-r--r--sound/core/seq/seq_device.c103
-rw-r--r--sound/core/sgbuf.c3
-rw-r--r--sound/core/sound.c9
-rw-r--r--sound/drivers/aloop.c1
-rw-r--r--sound/drivers/dummy.c1
-rw-r--r--sound/drivers/ml403-ac97cr.c1
-rw-r--r--sound/drivers/mpu401/mpu401.c1
-rw-r--r--sound/drivers/mtpav.c1
-rw-r--r--sound/drivers/mts64.c19
-rw-r--r--sound/drivers/pcsp/pcsp.c1
-rw-r--r--sound/drivers/portman2x4.c1
-rw-r--r--sound/drivers/serial-u16550.c1
-rw-r--r--sound/drivers/virmidi.c22
-rw-r--r--sound/drivers/vx/vx_core.c10
-rw-r--r--sound/drivers/vx/vx_mixer.c35
-rw-r--r--sound/firewire/Kconfig26
-rw-r--r--sound/firewire/Makefile7
-rw-r--r--sound/firewire/amdtp.c8
-rw-r--r--sound/firewire/amdtp.h24
-rw-r--r--sound/firewire/bebob/bebob.h4
-rw-r--r--sound/firewire/bebob/bebob_focusrite.c10
-rw-r--r--sound/firewire/bebob/bebob_maudio.c59
-rw-r--r--sound/firewire/bebob/bebob_terratec.c4
-rw-r--r--sound/firewire/bebob/bebob_yamaha.c2
-rw-r--r--sound/firewire/cmp.c2
-rw-r--r--sound/firewire/dice.c1511
-rw-r--r--sound/firewire/dice/Makefile3
-rw-r--r--sound/firewire/dice/dice-hwdep.c190
-rw-r--r--sound/firewire/dice/dice-interface.h (renamed from sound/firewire/dice-interface.h)0
-rw-r--r--sound/firewire/dice/dice-midi.c157
-rw-r--r--sound/firewire/dice/dice-pcm.c422
-rw-r--r--sound/firewire/dice/dice-proc.c252
-rw-r--r--sound/firewire/dice/dice-stream.c407
-rw-r--r--sound/firewire/dice/dice-transaction.c382
-rw-r--r--sound/firewire/dice/dice.c361
-rw-r--r--sound/firewire/dice/dice.h189
-rw-r--r--sound/firewire/isight.c10
-rw-r--r--sound/firewire/oxfw/Makefile3
-rw-r--r--sound/firewire/oxfw/oxfw-command.c153
-rw-r--r--sound/firewire/oxfw/oxfw-control.c283
-rw-r--r--sound/firewire/oxfw/oxfw-hwdep.c190
-rw-r--r--sound/firewire/oxfw/oxfw-midi.c207
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c424
-rw-r--r--sound/firewire/oxfw/oxfw-proc.c113
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c685
-rw-r--r--sound/firewire/oxfw/oxfw.c317
-rw-r--r--sound/firewire/oxfw/oxfw.h146
-rw-r--r--sound/firewire/speakers.c792
-rw-r--r--sound/i2c/other/ak4xxx-adda.c26
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c10
-rw-r--r--sound/isa/es1688/es1688_lib.c13
-rw-r--r--sound/isa/es18xx.c78
-rw-r--r--sound/isa/msnd/msnd_pinnacle_mixer.c11
-rw-r--r--sound/isa/sb/emu8000_synth.c3
-rw-r--r--sound/isa/sb/sb16_main.c10
-rw-r--r--sound/isa/sb/sb_common.c3
-rw-r--r--sound/isa/sb/sb_mixer.c31
-rw-r--r--sound/isa/wss/wss_lib.c16
-rw-r--r--sound/mips/hal2.c1
-rw-r--r--sound/mips/sgio2audio.c12
-rw-r--r--sound/oss/dmasound/dmasound_paula.c1
-rw-r--r--sound/oss/uart401.c11
-rw-r--r--sound/parisc/harmony.c12
-rw-r--r--sound/pci/ac97/ac97_codec.c10
-rw-r--r--sound/pci/ac97/ac97_patch.c198
-rw-r--r--sound/pci/ac97/ac97_patch.h2
-rw-r--r--sound/pci/asihpi/asihpi.c377
-rw-r--r--sound/pci/asihpi/hpi.h16
-rw-r--r--sound/pci/asihpi/hpi6205.c43
-rw-r--r--sound/pci/asihpi/hpi_internal.h20
-rw-r--r--sound/pci/asihpi/hpicmn.c107
-rw-r--r--sound/pci/asihpi/hpicmn.h19
-rw-r--r--sound/pci/asihpi/hpimsginit.c30
-rw-r--r--sound/pci/asihpi/hpimsgx.c15
-rw-r--r--sound/pci/asihpi/hpioctl.c126
-rw-r--r--sound/pci/asihpi/hpios.h8
-rw-r--r--sound/pci/atiixp.c4
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au88x0.c35
-rw-r--r--sound/pci/au88x0/au88x0.h4
-rw-r--r--sound/pci/au88x0/au88x0_a3d.c21
-rw-r--r--sound/pci/au88x0/au88x0_core.c102
-rw-r--r--sound/pci/au88x0/au88x0_eq.c3
-rw-r--r--sound/pci/au88x0/au88x0_game.c3
-rw-r--r--sound/pci/au88x0/au88x0_mpu401.c2
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c8
-rw-r--r--sound/pci/au88x0/au88x0_synth.c17
-rw-r--r--sound/pci/aw2/aw2-alsa.c13
-rw-r--r--sound/pci/azt3328.c13
-rw-r--r--sound/pci/ca0106/ca0106_mixer.c40
-rw-r--r--sound/pci/ctxfi/ctatc.c10
-rw-r--r--sound/pci/ctxfi/ctdaio.c30
-rw-r--r--sound/pci/ctxfi/cttimer.c4
-rw-r--r--sound/pci/echoaudio/darla20_dsp.c5
-rw-r--r--sound/pci/echoaudio/darla24_dsp.c13
-rw-r--r--sound/pci/echoaudio/echo3g_dsp.c5
-rw-r--r--sound/pci/echoaudio/echoaudio.c145
-rw-r--r--sound/pci/echoaudio/echoaudio.h31
-rw-r--r--sound/pci/echoaudio/echoaudio_3g.c27
-rw-r--r--sound/pci/echoaudio/echoaudio_dsp.c109
-rw-r--r--sound/pci/echoaudio/echoaudio_gml.c11
-rw-r--r--sound/pci/echoaudio/gina20_dsp.c9
-rw-r--r--sound/pci/echoaudio/gina24_dsp.c30
-rw-r--r--sound/pci/echoaudio/indigo_dsp.c11
-rw-r--r--sound/pci/echoaudio/indigo_express_dsp.c6
-rw-r--r--sound/pci/echoaudio/indigodj_dsp.c11
-rw-r--r--sound/pci/echoaudio/indigodjx_dsp.c5
-rw-r--r--sound/pci/echoaudio/indigoio_dsp.c8
-rw-r--r--sound/pci/echoaudio/indigoiox_dsp.c5
-rw-r--r--sound/pci/echoaudio/layla20_dsp.c28
-rw-r--r--sound/pci/echoaudio/layla24_dsp.c26
-rw-r--r--sound/pci/echoaudio/mia_dsp.c15
-rw-r--r--sound/pci/echoaudio/midi.c23
-rw-r--r--sound/pci/echoaudio/mona_dsp.c29
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c9
-rw-r--r--sound/pci/emu10k1/emu10k1x.c2
-rw-r--r--sound/pci/emu10k1/emufx.c3
-rw-r--r--sound/pci/emu10k1/emumixer.c58
-rw-r--r--sound/pci/emu10k1/p16v.c20
-rw-r--r--sound/pci/es1938.c10
-rw-r--r--sound/pci/es1968.c15
-rw-r--r--sound/pci/fm801.c10
-rw-r--r--sound/pci/hda/hda_auto_parser.c66
-rw-r--r--sound/pci/hda/hda_beep.c38
-rw-r--r--sound/pci/hda/hda_codec.c298
-rw-r--r--sound/pci/hda/hda_eld.c2
-rw-r--r--sound/pci/hda/hda_generic.c181
-rw-r--r--sound/pci/hda/hda_intel.c150
-rw-r--r--sound/pci/hda/hda_jack.c60
-rw-r--r--sound/pci/hda/hda_jack.h5
-rw-r--r--sound/pci/hda/hda_priv.h16
-rw-r--r--sound/pci/hda/hda_sysfs.c35
-rw-r--r--sound/pci/hda/patch_analog.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c11
-rw-r--r--sound/pci/hda/patch_realtek.c109
-rw-r--r--sound/pci/ice1712/aureon.c46
-rw-r--r--sound/pci/ice1712/ews.c16
-rw-r--r--sound/pci/ice1712/hoontech.c6
-rw-r--r--sound/pci/ice1712/ice1712.c52
-rw-r--r--sound/pci/ice1712/ice1724.c18
-rw-r--r--sound/pci/ice1712/juli.c5
-rw-r--r--sound/pci/ice1712/maya44.c20
-rw-r--r--sound/pci/ice1712/phase.c12
-rw-r--r--sound/pci/ice1712/pontis.c8
-rw-r--r--sound/pci/ice1712/prodigy192.c22
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c11
-rw-r--r--sound/pci/ice1712/quartet.c32
-rw-r--r--sound/pci/ice1712/revo.c4
-rw-r--r--sound/pci/ice1712/se.c9
-rw-r--r--sound/pci/korg1212/korg1212.c26
-rw-r--r--sound/pci/lola/lola.c2
-rw-r--r--sound/pci/lola/lola_mixer.c3
-rw-r--r--sound/pci/lx6464es/lx_defs.h2
-rw-r--r--sound/pci/mixart/mixart_hwdep.c9
-rw-r--r--sound/pci/pcxhr/pcxhr.c54
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c10
-rw-r--r--sound/pci/pcxhr/pcxhr_mixer.c18
-rw-r--r--sound/pci/rme32.c34
-rw-r--r--sound/pci/rme96.c62
-rw-r--r--sound/pci/rme9652/hdsp.c178
-rw-r--r--sound/pci/rme9652/hdspm.c84
-rw-r--r--sound/pci/rme9652/rme9652.c58
-rw-r--r--sound/pci/sonicvibes.c10
-rw-r--r--sound/pci/trident/trident_main.c3
-rw-r--r--sound/pci/via82xx.c10
-rw-r--r--sound/pci/vx222/vx222_ops.c5
-rw-r--r--sound/pcmcia/vx/vxpocket.c1
-rw-r--r--sound/ppc/pmac.c3
-rw-r--r--sound/ppc/powermac.c1
-rw-r--r--sound/ppc/tumbler.c11
-rw-r--r--sound/sh/aica.c1
-rw-r--r--sound/sh/sh_dac_audio.c1
-rw-r--r--sound/soc/Makefile6
-rw-r--r--sound/soc/adi/axi-i2s.c1
-rw-r--r--sound/soc/adi/axi-spdif.c1
-rw-r--r--sound/soc/atmel/Kconfig9
-rw-r--r--sound/soc/atmel/Makefile1
-rw-r--r--sound/soc/atmel/atmel-pcm-dma.c4
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c5
-rw-r--r--sound/soc/atmel/atmel_wm8904.c1
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c1
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c1
-rw-r--r--sound/soc/atmel/snd-soc-afeb9260.c151
-rw-r--r--sound/soc/au1x/ac97c.c3
-rw-r--r--sound/soc/au1x/db1000.c1
-rw-r--r--sound/soc/au1x/db1200.c1
-rw-r--r--sound/soc/au1x/dbdma2.c1
-rw-r--r--sound/soc/au1x/dma.c1
-rw-r--r--sound/soc/au1x/i2sc.c1
-rw-r--r--sound/soc/au1x/psc-ac97.c3
-rw-r--r--sound/soc/au1x/psc-i2s.c1
-rw-r--r--sound/soc/bcm/bcm2835-i2s.c1
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c1
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c3
-rw-r--r--sound/soc/blackfin/bf5xx-ad1836.c1
-rw-r--r--sound/soc/blackfin/bf5xx-ad1980.c2
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c1
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.c1
-rw-r--r--sound/soc/blackfin/bf6xx-i2s.c1
-rw-r--r--sound/soc/blackfin/bfin-eval-adau1373.c1
-rw-r--r--sound/soc/blackfin/bfin-eval-adau1701.c1
-rw-r--r--sound/soc/blackfin/bfin-eval-adau1x61.c1
-rw-r--r--sound/soc/blackfin/bfin-eval-adau1x81.c1
-rw-r--r--sound/soc/blackfin/bfin-eval-adav80x.c1
-rw-r--r--sound/soc/cirrus/Kconfig3
-rw-r--r--sound/soc/cirrus/edb93xx.c1
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c3
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c1
-rw-r--r--sound/soc/cirrus/simone.c1
-rw-r--r--sound/soc/cirrus/snappercl15.c1
-rw-r--r--sound/soc/codecs/88pm860x-codec.c1
-rw-r--r--sound/soc/codecs/Kconfig47
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/ab8500-codec.c33
-rw-r--r--sound/soc/codecs/ac97.c19
-rw-r--r--sound/soc/codecs/ad193x.c14
-rw-r--r--sound/soc/codecs/ad1980.c213
-rw-r--r--sound/soc/codecs/ad1980.h26
-rw-r--r--sound/soc/codecs/ad73311.c1
-rw-r--r--sound/soc/codecs/adau1373.c6
-rw-r--r--sound/soc/codecs/adau1701.c86
-rw-r--r--sound/soc/codecs/adau1761.c25
-rw-r--r--sound/soc/codecs/adau1781.c33
-rw-r--r--sound/soc/codecs/adau17x1.c71
-rw-r--r--sound/soc/codecs/adau17x1.h10
-rw-r--r--sound/soc/codecs/adav80x.c4
-rw-r--r--sound/soc/codecs/ads117x.c1
-rw-r--r--sound/soc/codecs/ak4535.c31
-rw-r--r--sound/soc/codecs/ak4554.c1
-rw-r--r--sound/soc/codecs/ak4641.c33
-rw-r--r--sound/soc/codecs/ak4642.c16
-rw-r--r--sound/soc/codecs/ak4671.c13
-rw-r--r--sound/soc/codecs/ak5386.c1
-rw-r--r--sound/soc/codecs/alc5623.c22
-rw-r--r--sound/soc/codecs/alc5632.c22
-rw-r--r--sound/soc/codecs/arizona.c34
-rw-r--r--sound/soc/codecs/bt-sco.c1
-rw-r--r--sound/soc/codecs/cq93vc.c34
-rw-r--r--sound/soc/codecs/cs4265.c2
-rw-r--r--sound/soc/codecs/cs4271-i2c.c62
-rw-r--r--sound/soc/codecs/cs4271-spi.c55
-rw-r--r--sound/soc/codecs/cs4271.c155
-rw-r--r--sound/soc/codecs/cs4271.h11
-rw-r--r--sound/soc/codecs/cs42l51.c6
-rw-r--r--sound/soc/codecs/cs42l73.c6
-rw-r--r--sound/soc/codecs/cx20442.c1
-rw-r--r--sound/soc/codecs/dmic.c1
-rw-r--r--sound/soc/codecs/hdmi.c3
-rw-r--r--sound/soc/codecs/jz4740.c1
-rw-r--r--sound/soc/codecs/lm49453.c8
-rw-r--r--sound/soc/codecs/max98088.c31
-rw-r--r--sound/soc/codecs/max98090.c201
-rw-r--r--sound/soc/codecs/max98090.h8
-rw-r--r--sound/soc/codecs/max98095.c23
-rw-r--r--sound/soc/codecs/max9850.c22
-rw-r--r--sound/soc/codecs/mc13783.c1
-rw-r--r--sound/soc/codecs/pcm3008.c1
-rw-r--r--sound/soc/codecs/rt286.c231
-rw-r--r--sound/soc/codecs/rt5631.c38
-rw-r--r--sound/soc/codecs/rt5645.c200
-rw-r--r--sound/soc/codecs/rt5645.h19
-rw-r--r--sound/soc/codecs/rt5670.c136
-rw-r--r--sound/soc/codecs/rt5670.h6
-rw-r--r--sound/soc/codecs/rt5677-spi.c130
-rw-r--r--sound/soc/codecs/rt5677-spi.h21
-rw-r--r--sound/soc/codecs/rt5677.c1198
-rw-r--r--sound/soc/codecs/rt5677.h162
-rw-r--r--sound/soc/codecs/sgtl5000.c111
-rw-r--r--sound/soc/codecs/si476x.c1
-rw-r--r--sound/soc/codecs/sigmadsp-i2c.c81
-rw-r--r--sound/soc/codecs/sigmadsp-regmap.c46
-rw-r--r--sound/soc/codecs/sigmadsp.c711
-rw-r--r--sound/soc/codecs/sigmadsp.h59
-rw-r--r--sound/soc/codecs/sirf-audio-codec.c7
-rw-r--r--sound/soc/codecs/sn95031.c16
-rw-r--r--sound/soc/codecs/spdif_receiver.c1
-rw-r--r--sound/soc/codecs/spdif_transmitter.c1
-rw-r--r--sound/soc/codecs/ssm4567.c128
-rw-r--r--sound/soc/codecs/sta32x.c21
-rw-r--r--sound/soc/codecs/sta350.c21
-rw-r--r--sound/soc/codecs/sta529.c35
-rw-r--r--sound/soc/codecs/stac9766.c61
-rw-r--r--sound/soc/codecs/tas2552.c10
-rw-r--r--sound/soc/codecs/tfa9879.c328
-rw-r--r--sound/soc/codecs/tfa9879.h202
-rw-r--r--sound/soc/codecs/tlv320aic23.c21
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c31
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c24
-rw-r--r--sound/soc/codecs/tlv320aic3x.c228
-rw-r--r--sound/soc/codecs/tlv320aic3x.h1
-rw-r--r--sound/soc/codecs/tlv320dac33.c2
-rw-r--r--sound/soc/codecs/ts3a227e.c314
-rw-r--r--sound/soc/codecs/ts3a227e.h17
-rw-r--r--sound/soc/codecs/twl4030.c3
-rw-r--r--sound/soc/codecs/twl6040.c24
-rw-r--r--sound/soc/codecs/uda134x.c33
-rw-r--r--sound/soc/codecs/uda1380.c20
-rw-r--r--sound/soc/codecs/wl1273.c11
-rw-r--r--sound/soc/codecs/wm5102.c19
-rw-r--r--sound/soc/codecs/wm5110.c1
-rw-r--r--sound/soc/codecs/wm8350.c22
-rw-r--r--sound/soc/codecs/wm8400.c35
-rw-r--r--sound/soc/codecs/wm8510.c26
-rw-r--r--sound/soc/codecs/wm8523.c29
-rw-r--r--sound/soc/codecs/wm8580.c4
-rw-r--r--sound/soc/codecs/wm8711.c27
-rw-r--r--sound/soc/codecs/wm8727.c1
-rw-r--r--sound/soc/codecs/wm8728.c34
-rw-r--r--sound/soc/codecs/wm8731.c37
-rw-r--r--sound/soc/codecs/wm8737.c49
-rw-r--r--sound/soc/codecs/wm8750.c25
-rw-r--r--sound/soc/codecs/wm8776.c31
-rw-r--r--sound/soc/codecs/wm8782.c1
-rw-r--r--sound/soc/codecs/wm8804.c3
-rw-r--r--sound/soc/codecs/wm8900.c8
-rw-r--r--sound/soc/codecs/wm8903.c43
-rw-r--r--sound/soc/codecs/wm8940.c22
-rw-r--r--sound/soc/codecs/wm8955.c33
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c12
-rw-r--r--sound/soc/codecs/wm8960.c113
-rw-r--r--sound/soc/codecs/wm8961.c34
-rw-r--r--sound/soc/codecs/wm8962.c11
-rw-r--r--sound/soc/codecs/wm8974.c25
-rw-r--r--sound/soc/codecs/wm8978.c10
-rw-r--r--sound/soc/codecs/wm8983.c27
-rw-r--r--sound/soc/codecs/wm8985.c28
-rw-r--r--sound/soc/codecs/wm8988.c27
-rw-r--r--sound/soc/codecs/wm8990.c24
-rw-r--r--sound/soc/codecs/wm8991.c32
-rw-r--r--sound/soc/codecs/wm8993.c12
-rw-r--r--sound/soc/codecs/wm8994.c5
-rw-r--r--sound/soc/codecs/wm8994.h2
-rw-r--r--sound/soc/codecs/wm8995.c17
-rw-r--r--sound/soc/codecs/wm8997.c1
-rw-r--r--sound/soc/codecs/wm9081.c7
-rw-r--r--sound/soc/codecs/wm9090.c32
-rw-r--r--sound/soc/codecs/wm9705.c47
-rw-r--r--sound/soc/codecs/wm9712.c220
-rw-r--r--sound/soc/codecs/wm9713.c231
-rw-r--r--sound/soc/codecs/wm_adsp.c97
-rw-r--r--sound/soc/davinci/davinci-evm.c1
-rw-r--r--sound/soc/davinci/davinci-i2s.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c340
-rw-r--r--sound/soc/davinci/davinci-mcasp.h17
-rw-r--r--sound/soc/davinci/davinci-vcif.c1
-rw-r--r--sound/soc/dwc/designware_i2s.c47
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c6
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c19
-rw-r--r--sound/soc/fsl/fsl_dma.c10
-rw-r--r--sound/soc/fsl/fsl_esai.c13
-rw-r--r--sound/soc/fsl/fsl_sai.c1
-rw-r--r--sound/soc/fsl/fsl_spdif.c1
-rw-r--r--sound/soc/fsl/fsl_ssi.c18
-rw-r--r--sound/soc/fsl/imx-audmux.c1
-rw-r--r--sound/soc/fsl/imx-mc13783.c1
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c7
-rw-r--r--sound/soc/fsl/imx-spdif.c4
-rw-r--r--sound/soc/fsl/imx-ssi.c3
-rw-r--r--sound/soc/fsl/imx-wm8962.c7
-rw-r--r--sound/soc/fsl/mpc5200_dma.c3
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c7
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c1
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c1
-rw-r--r--sound/soc/fsl/mx27vis-aic32x4.c1
-rw-r--r--sound/soc/fsl/p1022_ds.c1
-rw-r--r--sound/soc/fsl/p1022_rdk.c1
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c1
-rw-r--r--sound/soc/generic/simple-card.c161
-rw-r--r--sound/soc/intel/Kconfig44
-rw-r--r--sound/soc/intel/Makefile7
-rw-r--r--sound/soc/intel/broadwell.c65
-rw-r--r--sound/soc/intel/byt-max98090.c1
-rw-r--r--sound/soc/intel/byt-rt5640.c1
-rw-r--r--sound/soc/intel/bytcr_dpcm_rt5640.c230
-rw-r--r--sound/soc/intel/cht_bsw_rt5672.c285
-rw-r--r--sound/soc/intel/haswell.c15
-rw-r--r--sound/soc/intel/mfld_machine.c1
-rw-r--r--sound/soc/intel/sst-acpi.c1
-rw-r--r--sound/soc/intel/sst-atom-controls.c1208
-rw-r--r--sound/soc/intel/sst-atom-controls.h428
-rw-r--r--sound/soc/intel/sst-baytrail-dsp.c24
-rw-r--r--sound/soc/intel/sst-baytrail-pcm.c1
-rw-r--r--sound/soc/intel/sst-dsp-priv.h136
-rw-r--r--sound/soc/intel/sst-dsp.c31
-rw-r--r--sound/soc/intel/sst-dsp.h28
-rw-r--r--sound/soc/intel/sst-firmware.c937
-rw-r--r--sound/soc/intel/sst-haswell-dsp.c295
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c413
-rw-r--r--sound/soc/intel/sst-haswell-ipc.h25
-rw-r--r--sound/soc/intel/sst-haswell-pcm.c419
-rw-r--r--sound/soc/intel/sst-mfld-platform-compress.c8
-rw-r--r--sound/soc/intel/sst-mfld-platform-pcm.c155
-rw-r--r--sound/soc/intel/sst-mfld-platform.h5
-rw-r--r--sound/soc/intel/sst/Makefile7
-rw-r--r--sound/soc/intel/sst/sst.c437
-rw-r--r--sound/soc/intel/sst/sst.h546
-rw-r--r--sound/soc/intel/sst/sst_acpi.c383
-rw-r--r--sound/soc/intel/sst/sst_drv_interface.c686
-rw-r--r--sound/soc/intel/sst/sst_ipc.c373
-rw-r--r--sound/soc/intel/sst/sst_loader.c456
-rw-r--r--sound/soc/intel/sst/sst_pci.c209
-rw-r--r--sound/soc/intel/sst/sst_pvt.c449
-rw-r--r--sound/soc/intel/sst/sst_stream.c437
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c1
-rw-r--r--sound/soc/jz4740/qi_lb60.c12
-rw-r--r--sound/soc/kirkwood/armada-370-db.c1
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c1
-rw-r--r--sound/soc/mxs/mxs-saif.c3
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c8
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c3
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c1
-rw-r--r--sound/soc/omap/Kconfig34
-rw-r--r--sound/soc/omap/Makefile6
-rw-r--r--sound/soc/omap/ams-delta.c1
-rw-r--r--sound/soc/omap/mcbsp.c3
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c1
-rw-r--r--sound/soc/omap/omap-dmic.c1
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c407
-rw-r--r--sound/soc/omap/omap-hdmi-card.c87
-rw-r--r--sound/soc/omap/omap-hdmi.c364
-rw-r--r--sound/soc/omap/omap-hdmi.h38
-rw-r--r--sound/soc/omap/omap-mcbsp.c1
-rw-r--r--sound/soc/omap/omap-mcpdm.c1
-rw-r--r--sound/soc/omap/omap-twl4030.c1
-rw-r--r--sound/soc/omap/rx51.c1
-rw-r--r--sound/soc/pxa/brownstone.c1
-rw-r--r--sound/soc/pxa/corgi.c1
-rw-r--r--sound/soc/pxa/e740_wm9705.c1
-rw-r--r--sound/soc/pxa/e750_wm9705.c1
-rw-r--r--sound/soc/pxa/e800_wm9712.c1
-rw-r--r--sound/soc/pxa/hx4700.c1
-rw-r--r--sound/soc/pxa/imote2.c1
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c8
-rw-r--r--sound/soc/pxa/mmp-pcm.c1
-rw-r--r--sound/soc/pxa/mmp-sspa.c1
-rw-r--r--sound/soc/pxa/palm27x.c1
-rw-r--r--sound/soc/pxa/poodle.c1
-rw-r--r--sound/soc/pxa/pxa-ssp.c17
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c7
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c1
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c1
-rw-r--r--sound/soc/pxa/spitz.c52
-rw-r--r--sound/soc/pxa/tosa.c1
-rw-r--r--sound/soc/pxa/ttc-dkb.c1
-rw-r--r--sound/soc/rockchip/Kconfig9
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c1
-rw-r--r--sound/soc/samsung/Kconfig8
-rw-r--r--sound/soc/samsung/Makefile2
-rw-r--r--sound/soc/samsung/ac97.c5
-rw-r--r--sound/soc/samsung/arndale_rt5631.c150
-rw-r--r--sound/soc/samsung/bells.c1
-rw-r--r--sound/soc/samsung/i2s-regs.h10
-rw-r--r--sound/soc/samsung/i2s.c245
-rw-r--r--sound/soc/samsung/idma.c1
-rw-r--r--sound/soc/samsung/littlemill.c1
-rw-r--r--sound/soc/samsung/lowland.c1
-rw-r--r--sound/soc/samsung/odroidx2_max98090.c5
-rw-r--r--sound/soc/samsung/pcm.c1
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c1
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c1
-rw-r--r--sound/soc/samsung/s3c24xx_simtec_hermes.c1
-rw-r--r--sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c1
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c1
-rw-r--r--sound/soc/samsung/smdk_wm8580pcm.c1
-rw-r--r--sound/soc/samsung/smdk_wm8994.c1
-rw-r--r--sound/soc/samsung/smdk_wm8994pcm.c1
-rw-r--r--sound/soc/samsung/snow.c1
-rw-r--r--sound/soc/samsung/spdif.c1
-rw-r--r--sound/soc/samsung/speyside.c1
-rw-r--r--sound/soc/samsung/tobermory.c1
-rw-r--r--sound/soc/sh/dma-sh7760.c1
-rw-r--r--sound/soc/sh/fsi.c9
-rw-r--r--sound/soc/sh/hac.c3
-rw-r--r--sound/soc/sh/rcar/adg.c2
-rw-r--r--sound/soc/sh/rcar/core.c236
-rw-r--r--sound/soc/sh/rcar/dvc.c215
-rw-r--r--sound/soc/sh/rcar/gen.c30
-rw-r--r--sound/soc/sh/rcar/rsnd.h92
-rw-r--r--sound/soc/sh/rcar/src.c101
-rw-r--r--sound/soc/sh/rcar/ssi.c233
-rw-r--r--sound/soc/sh/siu_dai.c1
-rw-r--r--sound/soc/sh/ssi.c1
-rw-r--r--sound/soc/sirf/sirf-audio-port.c1
-rw-r--r--sound/soc/sirf/sirf-audio.c1
-rw-r--r--sound/soc/sirf/sirf-usp.c1
-rw-r--r--sound/soc/soc-ac97.c256
-rw-r--r--sound/soc/soc-cache.c149
-rw-r--r--sound/soc/soc-compress.c11
-rw-r--r--sound/soc/soc-core.c1643
-rw-r--r--sound/soc/soc-dapm.c755
-rw-r--r--sound/soc/soc-jack.c11
-rw-r--r--sound/soc/soc-ops.c952
-rw-r--r--sound/soc/soc-pcm.c23
-rw-r--r--sound/soc/soc-utils.c1
-rw-r--r--sound/soc/spear/spdif_in.c1
-rw-r--r--sound/soc/spear/spdif_out.c1
-rw-r--r--sound/soc/tegra/tegra20_ac97.c3
-rw-r--r--sound/soc/tegra/tegra20_das.c1
-rw-r--r--sound/soc/tegra/tegra20_i2s.c1
-rw-r--r--sound/soc/tegra/tegra20_spdif.c1
-rw-r--r--sound/soc/tegra/tegra30_ahub.c1
-rw-r--r--sound/soc/tegra/tegra30_i2s.c1
-rw-r--r--sound/soc/tegra/tegra_alc5632.c1
-rw-r--r--sound/soc/tegra/tegra_max98090.c1
-rw-r--r--sound/soc/tegra/tegra_rt5640.c7
-rw-r--r--sound/soc/tegra/tegra_wm8753.c1
-rw-r--r--sound/soc/tegra/tegra_wm8903.c1
-rw-r--r--sound/soc/tegra/tegra_wm9712.c1
-rw-r--r--sound/soc/tegra/trimslice.c1
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c3
-rw-r--r--sound/soc/txx9/txx9aclc-generic.c1
-rw-r--r--sound/soc/txx9/txx9aclc.c3
-rw-r--r--sound/soc/ux500/mop500.c9
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c1
-rw-r--r--sound/sparc/amd7930.c1
-rw-r--r--sound/sparc/cs4231.c13
-rw-r--r--sound/sparc/dbri.c1
-rw-r--r--sound/usb/6fire/control.c22
-rw-r--r--sound/usb/6fire/firmware.c2
-rw-r--r--sound/usb/6fire/pcm.c17
-rw-r--r--sound/usb/Makefile1
-rw-r--r--sound/usb/card.c109
-rw-r--r--sound/usb/endpoint.c16
-rw-r--r--sound/usb/endpoint.h2
-rw-r--r--sound/usb/midi.c2
-rw-r--r--sound/usb/misc/ua101.c18
-rw-r--r--sound/usb/mixer.c241
-rw-r--r--sound/usb/mixer.h40
-rw-r--r--sound/usb/mixer_maps.c9
-rw-r--r--sound/usb/mixer_quirks.c920
-rw-r--r--sound/usb/mixer_scarlett.c1004
-rw-r--r--sound/usb/mixer_scarlett.h6
-rw-r--r--sound/usb/pcm.c5
-rw-r--r--sound/usb/quirks-table.h298
-rw-r--r--sound/usb/quirks.c72
-rw-r--r--sound/usb/quirks.h3
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c9
-rw-r--r--tools/hv/hv_fcopy_daemon.c33
-rw-r--r--tools/hv/hv_kvp_daemon.c48
-rw-r--r--tools/hv/hv_vss_daemon.c95
-rw-r--r--tools/perf/util/include/asm/hash.h6
-rw-r--r--tools/testing/selftests/Makefile15
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test.c10
-rw-r--r--tools/testing/selftests/exec/.gitignore9
-rw-r--r--tools/testing/selftests/exec/Makefile25
-rw-r--r--tools/testing/selftests/exec/execveat.c397
-rw-r--r--tools/testing/selftests/ipc/msgque.c26
-rw-r--r--tools/testing/selftests/kcmp/Makefile22
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c27
-rw-r--r--tools/testing/selftests/kselftest.h62
-rw-r--r--tools/testing/selftests/mount/unprivileged-remount-test.c204
-rw-r--r--tools/testing/selftests/net/Makefile8
-rwxr-xr-xtools/testing/selftests/net/test_bpf.sh10
-rw-r--r--tools/testing/selftests/size/.gitignore1
-rw-r--r--tools/testing/selftests/size/Makefile12
-rw-r--r--tools/testing/selftests/size/get_size.c100
-rw-r--r--tools/testing/selftests/timers/posix_timers.c14
-rw-r--r--tools/testing/selftests/user/Makefile8
-rwxr-xr-xtools/testing/selftests/user/test_user_copy.sh10
-rw-r--r--tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c2
-rw-r--r--tools/usb/usbip/libsrc/list.h2
-rw-r--r--tools/usb/usbip/src/usbipd.c2
-rw-r--r--tools/virtio/linux/virtio.h22
-rw-r--r--tools/virtio/linux/virtio_config.h2
-rw-r--r--tools/virtio/virtio_test.c5
-rw-r--r--tools/virtio/vringh_test.c16
-rw-r--r--tools/vm/Makefile4
-rw-r--r--tools/vm/page_owner_sort.c144
-rw-r--r--usr/Kconfig24
-rw-r--r--virt/kvm/arm/arch_timer.c30
-rw-r--r--virt/kvm/arm/vgic.c116
-rw-r--r--virt/kvm/eventfd.c7
-rw-r--r--virt/kvm/kvm_main.c133
7638 files changed, 310723 insertions, 209719 deletions
diff --git a/.mailmap b/.mailmap
index 1ad68731fb47..ada8ad696b2e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,7 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andreas Herrmann <aherrman@de.ibm.com>
-Andrew Morton <akpm@osdl.org>
+Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>
Archit Taneja <archit@ti.com>
@@ -102,6 +102,8 @@ Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
Sam Ravnborg <sam@mars.ravnborg.org>
+Santosh Shilimkar <ssantosh@kernel.org>
+Santosh Shilimkar <santosh.shilimkar@oracle.org>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
diff --git a/CREDITS b/CREDITS
index bb6278884f89..c56d8aa10131 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1197,6 +1197,13 @@ S: R. Tocantins, 89 - Cristo Rei
S: 80050-430 - Curitiba - Paraná
S: Brazil
+N: Oded Gabbay
+E: oded.gabbay@gmail.com
+D: AMD KFD maintainer
+S: 12 Shraga Raphaeli
+S: Petah-Tikva, 4906418
+S: Israel
+
N: Kumar Gala
E: galak@kernel.crashing.org
D: Embedded PowerPC 6xx/7xx/74xx/82xx/83xx/85xx support
diff --git a/Documentation/ABI/stable/sysfs-class-udc b/Documentation/ABI/stable/sysfs-class-udc
new file mode 100644
index 000000000000..85d3dac2e204
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-class-udc
@@ -0,0 +1,93 @@
+What: /sys/class/udc/<udc>/a_alt_hnp_support
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates if an OTG A-Host supports HNP at an alternate port.
+Users:
+
+What: /sys/class/udc/<udc>/a_hnp_support
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates if an OTG A-Host supports HNP at this port.
+Users:
+
+What: /sys/class/udc/<udc>/b_hnp_enable
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates if an OTG A-Host enabled HNP support.
+Users:
+
+What: /sys/class/udc/<udc>/current_speed
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates the current negotiated speed at this port.
+Users:
+
+What: /sys/class/udc/<udc>/is_a_peripheral
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates that this port is the default Host on an OTG session
+ but HNP was used to switch roles.
+Users:
+
+What: /sys/class/udc/<udc>/is_otg
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates that this port support OTG.
+Users:
+
+What: /sys/class/udc/<udc>/maximum_speed
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates the maximum USB speed supported by this port.
+Users:
+
+What: /sys/class/udc/<udc>/maximum_speed
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates the maximum USB speed supported by this port.
+Users:
+
+What: /sys/class/udc/<udc>/soft_connect
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Allows users to disconnect data pullup resistors thus causing a
+ logical disconnection from the USB Host.
+Users:
+
+What: /sys/class/udc/<udc>/srp
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Allows users to manually start Session Request Protocol.
+Users:
+
+What: /sys/class/udc/<udc>/state
+Date: June 2011
+KernelVersion: 3.1
+Contact: Felipe Balbi <balbi@kernel.org>
+Description:
+ Indicates current state of the USB Device Controller. Valid
+ states are: 'not-attached', 'attached', 'powered',
+ 'reconnecting', 'unauthenticated', 'default', 'addressed',
+ 'configured', and 'suspended'; however not all USB Device
+ Controllers support reporting all states.
+Users:
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-hid b/Documentation/ABI/testing/configfs-usb-gadget-hid
new file mode 100644
index 000000000000..f12e00e6baa3
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-usb-gadget-hid
@@ -0,0 +1,11 @@
+What: /config/usb-gadget/gadget/functions/hid.name
+Date: Nov 2014
+KernelVersion: 3.19
+Description:
+ The attributes:
+
+ protocol - HID protocol to use
+ report_desc - blob corresponding to HID report descriptors
+ except the data passed through /dev/hidg<N>
+ report_length - HID report length
+ subclass - HID device subclass to use
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-midi b/Documentation/ABI/testing/configfs-usb-gadget-midi
new file mode 100644
index 000000000000..6b341df7249c
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-usb-gadget-midi
@@ -0,0 +1,12 @@
+What: /config/usb-gadget/gadget/functions/midi.name
+Date: Nov 2014
+KernelVersion: 3.19
+Description:
+ The attributes:
+
+ index - index value for the USB MIDI adapter
+ id - ID string for the USB MIDI adapter
+ buflen - MIDI buffer length
+ qlen - USB read request queue length
+ in_ports - number of MIDI input ports
+ out_ports - number of MIDI output ports
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10
new file mode 100644
index 000000000000..4b8d6ec92e2b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10
@@ -0,0 +1,24 @@
+What: /sys/bus/coresight/devices/<memory_map>.etb/enable_sink
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Add/remove a sink from a trace path. There can be multiple
+ source for a single sink.
+ ex: echo 1 > /sys/bus/coresight/devices/20010000.etb/enable_sink
+
+What: /sys/bus/coresight/devices/<memory_map>.etb/status
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) List various control and status registers. The specific
+ layout and content is driver specific.
+
+What: /sys/bus/coresight/devices/<memory_map>.etb/trigger_cntr
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Disables write access to the Trace RAM by stopping the
+ formatter after a defined number of words have been stored
+ following the trigger event. The number of 32-bit words written
+ into the Trace RAM following the trigger event is equal to the
+ value stored in this register+1 (from ARM ETB-TRM).
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
new file mode 100644
index 000000000000..b4d0b99afffb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
@@ -0,0 +1,253 @@
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/enable_source
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Enable/disable tracing on this specific trace entiry.
+ Enabling a source implies the source has been configured
+ properly and a sink has been identidifed for it. The path
+ of coresight components linking the source to the sink is
+ configured and managed automatically by the coresight framework.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/status
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) List various control and status registers. The specific
+ layout and content is driver specific.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/addr_idx
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: Select which address comparator or pair (of comparators) to
+ work with.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/addr_acctype
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with @addr_idx. Specifies
+ characteristics about the address comparator being configure,
+ for example the access type, the kind of instruction to trace,
+ processor contect ID to trigger on, etc. Individual fields in
+ the access type register may vary on the version of the trace
+ entity.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/addr_range
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with @addr_idx. Specifies the range of
+ addresses to trigger on. Inclusion or exclusion is specificed
+ in the corresponding access type register.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/addr_single
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with @addr_idx. Specifies the single
+ address to trigger on, highly influenced by the configuration
+ options of the corresponding access type register.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/addr_start
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with @addr_idx. Specifies the single
+ address to start tracing on, highly influenced by the
+ configuration options of the corresponding access type register.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/addr_stop
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with @addr_idx. Specifies the single
+ address to stop tracing on, highly influenced by the
+ configuration options of the corresponding access type register.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/cntr_idx
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Specifies the counter to work on.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/cntr_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with cntr_idx, give access to the
+ counter event register.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/cntr_val
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with cntr_idx, give access to the
+ counter value register.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/cntr_rld_val
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with cntr_idx, give access to the
+ counter reload value register.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/cntr_rld_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used in conjunction with cntr_idx, give access to the
+ counter reload event register.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_idx
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Specifies the index of the context ID register to be
+ selected.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_mask
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Mask to apply to all the context ID comparator.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_val
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Used with the ctxid_idx, specify with context ID to trigger
+ on.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/enable_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines which event triggers a trace.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/etmsr
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Gives access to the ETM status register, which holds
+ programming information and status on certains events.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/fifofull_level
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Number of byte left in the fifo before considering it full.
+ Depending on the tracer's version, can also hold threshold for
+ data suppression.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mode
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Interface with the driver's 'mode' field, controlling
+ various aspect of the trace entity such as time stamping,
+ context ID size and cycle accurate tracing. Driver specific
+ and bound to change depending on the driver.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/nr_addr_cmp
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Provides the number of address comparators pairs accessible
+ on a trace unit, as specified by bit 3:0 of register ETMCCR.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/nr_cntr
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Provides the number of counters accessible on a trace unit,
+ as specified by bit 15:13 of register ETMCCR.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/nr_ctxid_cmp
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Provides the number of context ID comparator available on a
+ trace unit, as specified by bit 25:24 of register ETMCCR.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/reset
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (W) Cancels all configuration on a trace unit and set it back
+ to its boot configuration.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/seq_12_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines the event that causes the sequencer to transition
+ from state 1 to state 2.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/seq_13_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines the event that causes the sequencer to transition
+ from state 1 to state 3.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/seq_21_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines the event that causes the sequencer to transition
+ from state 2 to state 1.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/seq_23_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines the event that causes the sequencer to transition
+ from state 2 to state 3.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/seq_31_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines the event that causes the sequencer to transition
+ from state 3 to state 1.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/seq_32_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines the event that causes the sequencer to transition
+ from state 3 to state 2.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/curr_seq_state
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Holds the current state of the sequencer.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/sync_freq
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Holds the trace synchronization frequency value - must be
+ programmed with the various implementation behavior in mind.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/timestamp_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines an event that requests the insertion of a timestamp
+ into the trace stream.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/traceid
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Holds the trace ID that will appear in the trace stream
+ coming from this trace entity.
+
+What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/trigger_event
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Define the event that controls the trigger.
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-funnel b/Documentation/ABI/testing/sysfs-bus-coresight-devices-funnel
new file mode 100644
index 000000000000..d75acda5e1b3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-funnel
@@ -0,0 +1,12 @@
+What: /sys/bus/coresight/devices/<memory_map>.funnel/funnel_ctrl
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Enables the slave ports and defines the hold time of the
+ slave ports.
+
+What: /sys/bus/coresight/devices/<memory_map>.funnel/priority
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Defines input port priority order.
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc
new file mode 100644
index 000000000000..f38cded5fa22
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc
@@ -0,0 +1,8 @@
+What: /sys/bus/coresight/devices/<memory_map>.tmc/trigger_cntr
+Date: November 2014
+KernelVersion: 3.19
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Disables write access to the Trace RAM by stopping the
+ formatter after a defined number of words have been stored
+ following the trigger event. Additional interface for this
+ driver are expected to be added as it matures.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index d760b0224ef7..117521dbf2b3 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -200,6 +200,13 @@ Description:
Raw pressure measurement from channel Y. Units after
application of scale and offset are kilopascal.
+What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_input
+What: /sys/bus/iio/devices/iio:deviceX/in_pressure_input
+KernelVersion: 3.8
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scaled pressure measurement from channel Y, in kilopascal.
+
What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_raw
KernelVersion: 3.14
Contact: linux-iio@vger.kernel.org
@@ -231,6 +238,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_tempY_offset
What: /sys/bus/iio/devices/iio:deviceX/in_temp_offset
What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_offset
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_offset
+What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_offset
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -251,6 +259,7 @@ Description:
What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_scale
What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale
What: /sys/bus/iio/devices/iio:deviceX/in_voltage_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage-voltage_scale
What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_scale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale
@@ -266,6 +275,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_rot_from_north_magnetic_tilt_comp_sca
What: /sys/bus/iio/devices/iio:deviceX/in_rot_from_north_true_tilt_comp_scale
What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_scale
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_scale
+What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -328,6 +338,10 @@ Description:
are listed in this attribute.
What /sys/bus/iio/devices/iio:deviceX/out_voltageY_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_red_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_green_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_blue_hardwaregain
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_clear_hardwaregain
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -1028,3 +1042,12 @@ Contact: linux-iio@vger.kernel.org
Description:
Raw value of rotation from true/magnetic north measured with
or without compensation from tilt sensors.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_currentX_raw
+KernelVersion: 3.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw current measurement from channel X. Units are in milliamps
+ after application of scale and offset. If no offset or scale is
+ present, output should be considered as processed with the
+ unit in milliamps.
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index e1b2e785bba8..beb8ec4dabbc 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -216,3 +216,11 @@ Contact: netdev@vger.kernel.org
Description:
Indicates the interface protocol type as a decimal value. See
include/uapi/linux/if_arp.h for all possible values.
+
+What: /sys/class/net/<iface>/phys_switch_id
+Date: November 2014
+KernelVersion: 3.19
+Contact: netdev@vger.kernel.org
+Description:
+ Indicates the unique physical switch identifier of a switch this
+ port belongs to, as a string.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index acb9bfc89b48..99983e67c13c 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -224,3 +224,50 @@ Description: Parameters for the Intel P-state driver
frequency range.
More details can be found in Documentation/cpu-freq/intel-pstate.txt
+
+What: /sys/devices/system/cpu/cpu*/cache/index*/<set_of_attributes_mentioned_below>
+Date: July 2014(documented, existed before August 2008)
+Contact: Sudeep Holla <sudeep.holla@arm.com>
+ Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Parameters for the CPU cache attributes
+
+ allocation_policy:
+ - WriteAllocate: allocate a memory location to a cache line
+ on a cache miss because of a write
+ - ReadAllocate: allocate a memory location to a cache line
+ on a cache miss because of a read
+ - ReadWriteAllocate: both writeallocate and readallocate
+
+ attributes: LEGACY used only on IA64 and is same as write_policy
+
+ coherency_line_size: the minimum amount of data in bytes that gets
+ transferred from memory to cache
+
+ level: the cache hierarcy in the multi-level cache configuration
+
+ number_of_sets: total number of sets in the cache, a set is a
+ collection of cache lines with the same cache index
+
+ physical_line_partition: number of physical cache line per cache tag
+
+ shared_cpu_list: the list of logical cpus sharing the cache
+
+ shared_cpu_map: logical cpu mask containing the list of cpus sharing
+ the cache
+
+ size: the total cache size in kB
+
+ type:
+ - Instruction: cache that only holds instructions
+ - Data: cache that only caches data
+ - Unified: cache that holds both data and instructions
+
+ ways_of_associativity: degree of freedom in placing a particular block
+ of memory in the cache
+
+ write_policy:
+ - WriteThrough: data is written to both the cache line
+ and to the block in the lower-level memory
+ - WriteBack: data is written only to the cache line and
+ the modified cache line is written to main
+ memory only when it is replaced
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
new file mode 100644
index 000000000000..7969443ef0ef
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dell-laptop
@@ -0,0 +1,60 @@
+What: /sys/class/leds/dell::kbd_backlight/als_setting
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to control the automatic keyboard
+ illumination mode on some systems that have an ambient
+ light sensor. Write 1 to this file to enable the auto
+ mode, 0 to disable it.
+
+What: /sys/class/leds/dell::kbd_backlight/start_triggers
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to control the input triggers that
+ turn on the keyboard backlight illumination that is
+ disabled because of inactivity.
+ Read the file to see the triggers available. The ones
+ enabled are preceded by '+', those disabled by '-'.
+
+ To enable a trigger, write its name preceded by '+' to
+ this file. To disable a trigger, write its name preceded
+ by '-' instead.
+
+ For example, to enable the keyboard as trigger run:
+ echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
+ To disable it:
+ echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
+
+ Note that not all the available triggers can be configured.
+
+What: /sys/class/leds/dell::kbd_backlight/stop_timeout
+Date: December 2014
+KernelVersion: 3.19
+Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+ Pali Rohár <pali.rohar@gmail.com>
+Description:
+ This file allows to specify the interval after which the
+ keyboard illumination is disabled because of inactivity.
+ The timeouts are expressed in seconds, minutes, hours and
+ days, for which the symbols are 's', 'm', 'h' and 'd'
+ respectively.
+
+ To configure the timeout, write to this file a value along
+ with any the above units. If no unit is specified, the value
+ is assumed to be expressed in seconds.
+
+ For example, to set the timeout to 10 minutes run:
+ echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
+
+ Note that when this file is read, the returned value might be
+ expressed in a different unit than the one used when the timeout
+ was set.
+
+ Also note that only some timeouts are supported and that
+ some systems might fall back to a specific timeout in case
+ an invalid timeout is written to this file.
diff --git a/Documentation/Changes b/Documentation/Changes
index 1de131bb49fb..74bdda9272a4 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -383,7 +383,7 @@ o <http://www.iptables.org/downloads.html>
Ip-route2
---------
-o <ftp://ftp.tux.org/pub/net/ip-routing/iproute2-2.2.4-now-ss991023.tar.gz>
+o <https://www.kernel.org/pub/linux/utils/net/iproute2/>
OProfile
--------
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 3171822c22a5..618a33c940df 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -392,7 +392,12 @@ The goto statement comes in handy when a function exits from multiple
locations and some common work such as cleanup has to be done. If there is no
cleanup needed then just return directly.
-The rationale is:
+Choose label names which say what the goto does or why the goto exists. An
+example of a good name could be "out_buffer:" if the goto frees "buffer". Avoid
+using GW-BASIC names like "err1:" and "err2:". Also don't name them after the
+goto location like "err_kmalloc_failed:"
+
+The rationale for using gotos is:
- unconditional statements are easier to understand and follow
- nesting is reduced
@@ -403,9 +408,10 @@ The rationale is:
int fun(int a)
{
int result = 0;
- char *buffer = kmalloc(SIZE);
+ char *buffer;
- if (buffer == NULL)
+ buffer = kmalloc(SIZE, GFP_KERNEL);
+ if (!buffer)
return -ENOMEM;
if (condition1) {
@@ -413,14 +419,25 @@ int fun(int a)
...
}
result = 1;
- goto out;
+ goto out_buffer;
}
...
-out:
+out_buffer:
kfree(buffer);
return result;
}
+A common type of bug to be aware of it "one err bugs" which look like this:
+
+err:
+ kfree(foo->bar);
+ kfree(foo);
+ return ret;
+
+The bug in this code is that on some exit paths "foo" is NULL. Normally the
+fix for this is to split it up into two error labels "err_bar:" and "err_foo:".
+
+
Chapter 8: Commenting
Comments are good, but there is also a danger of over-commenting. NEVER
@@ -845,6 +862,49 @@ next instruction in the assembly output:
: /* outputs */ : /* inputs */ : /* clobbers */);
+ Chapter 20: Conditional Compilation
+
+Wherever possible, don't use preprocessor conditionals (#if, #ifdef) in .c
+files; doing so makes code harder to read and logic harder to follow. Instead,
+use such conditionals in a header file defining functions for use in those .c
+files, providing no-op stub versions in the #else case, and then call those
+functions unconditionally from .c files. The compiler will avoid generating
+any code for the stub calls, producing identical results, but the logic will
+remain easy to follow.
+
+Prefer to compile out entire functions, rather than portions of functions or
+portions of expressions. Rather than putting an ifdef in an expression, factor
+out part or all of the expression into a separate helper function and apply the
+conditional to that function.
+
+If you have a function or variable which may potentially go unused in a
+particular configuration, and the compiler would warn about its definition
+going unused, mark the definition as __maybe_unused rather than wrapping it in
+a preprocessor conditional. (However, if a function or variable *always* goes
+unused, delete it.)
+
+Within code, where possible, use the IS_ENABLED macro to convert a Kconfig
+symbol into a C boolean expression, and use it in a normal C conditional:
+
+ if (IS_ENABLED(CONFIG_SOMETHING)) {
+ ...
+ }
+
+The compiler will constant-fold the conditional away, and include or exclude
+the block of code just as with an #ifdef, so this will not add any runtime
+overhead. However, this approach still allows the C compiler to see the code
+inside the block, and check it for correctness (syntax, types, symbol
+references, etc). Thus, you still have to use an #ifdef if the code inside the
+block references symbols that will not exist if the condition is not met.
+
+At the end of any non-trivial #if or #ifdef block (more than a few lines),
+place a comment after the #endif on the same line, noting the conditional
+expression used. For instance:
+
+#ifdef CONFIG_SOMETHING
+...
+#endif /* CONFIG_SOMETHING */
+
Appendix I: References
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index bec06659e0eb..9c7d92d03f62 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -15,7 +15,7 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
tracepoint.xml drm.xml media_api.xml w1.xml \
- writing_musb_glue_layer.xml
+ writing_musb_glue_layer.xml crypto-API.xml
include Documentation/DocBook/media/Makefile
diff --git a/Documentation/DocBook/alsa-driver-api.tmpl b/Documentation/DocBook/alsa-driver-api.tmpl
index 0230a96f0564..71f9246127ec 100644
--- a/Documentation/DocBook/alsa-driver-api.tmpl
+++ b/Documentation/DocBook/alsa-driver-api.tmpl
@@ -57,6 +57,7 @@
!Esound/core/pcm.c
!Esound/core/pcm_lib.c
!Esound/core/pcm_native.c
+!Iinclude/sound/pcm.h
</sect1>
<sect1><title>PCM Format Helpers</title>
!Esound/core/pcm_misc.c
@@ -64,6 +65,10 @@
<sect1><title>PCM Memory Management</title>
!Esound/core/pcm_memory.c
</sect1>
+ <sect1><title>PCM DMA Engine API</title>
+!Esound/core/pcm_dmaengine.c
+!Iinclude/sound/dmaengine_pcm.h
+ </sect1>
</chapter>
<chapter><title>Control/Mixer API</title>
<sect1><title>General Control Interface</title>
@@ -91,12 +96,38 @@
!Esound/core/info.c
</sect1>
</chapter>
+ <chapter><title>Compress Offload</title>
+ <sect1><title>Compress Offload API</title>
+!Esound/core/compress_offload.c
+!Iinclude/uapi/sound/compress_offload.h
+!Iinclude/uapi/sound/compress_params.h
+!Iinclude/sound/compress_driver.h
+ </sect1>
+ </chapter>
+ <chapter><title>ASoC</title>
+ <sect1><title>ASoC Core API</title>
+!Iinclude/sound/soc.h
+!Esound/soc/soc-core.c
+!Esound/soc/soc-cache.c
+!Esound/soc/soc-devres.c
+!Esound/soc/soc-io.c
+!Esound/soc/soc-pcm.c
+ </sect1>
+ <sect1><title>ASoC DAPM API</title>
+!Esound/soc/soc-dapm.c
+ </sect1>
+ <sect1><title>ASoC DMA Engine API</title>
+!Esound/soc/soc-generic-dmaengine-pcm.c
+ </sect1>
+ </chapter>
<chapter><title>Miscellaneous Functions</title>
<sect1><title>Hardware-Dependent Devices API</title>
!Esound/core/hwdep.c
</sect1>
<sect1><title>Jack Abstraction Layer API</title>
+!Iinclude/sound/jack.h
!Esound/core/jack.c
+!Esound/soc/soc-jack.c
</sect1>
<sect1><title>ISA DMA Helpers</title>
!Esound/core/isadma.c
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
new file mode 100644
index 000000000000..c763d30f4893
--- /dev/null
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -0,0 +1,1253 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="KernelCryptoAPI">
+ <bookinfo>
+ <title>Linux Kernel Crypto API</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Stephan</firstname>
+ <surname>Mueller</surname>
+ <affiliation>
+ <address>
+ <email>smueller@chronox.de</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Marek</firstname>
+ <surname>Vasut</surname>
+ <affiliation>
+ <address>
+ <email>marek@denx.de</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2014</year>
+ <holder>Stephan Mueller</holder>
+ </copyright>
+
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+ <toc></toc>
+
+ <chapter id="Intro">
+ <title>Kernel Crypto API Interface Specification</title>
+
+ <sect1><title>Introduction</title>
+
+ <para>
+ The kernel crypto API offers a rich set of cryptographic ciphers as
+ well as other data transformation mechanisms and methods to invoke
+ these. This document contains a description of the API and provides
+ example code.
+ </para>
+
+ <para>
+ To understand and properly use the kernel crypto API a brief
+ explanation of its structure is given. Based on the architecture,
+ the API can be separated into different components. Following the
+ architecture specification, hints to developers of ciphers are
+ provided. Pointers to the API function call documentation are
+ given at the end.
+ </para>
+
+ <para>
+ The kernel crypto API refers to all algorithms as "transformations".
+ Therefore, a cipher handle variable usually has the name "tfm".
+ Besides cryptographic operations, the kernel crypto API also knows
+ compression transformations and handles them the same way as ciphers.
+ </para>
+
+ <para>
+ The kernel crypto API serves the following entity types:
+
+ <itemizedlist>
+ <listitem>
+ <para>consumers requesting cryptographic services</para>
+ </listitem>
+ <listitem>
+ <para>data transformation implementations (typically ciphers)
+ that can be called by consumers using the kernel crypto
+ API</para>
+ </listitem>
+ </itemizedlist>
+ </para>
+
+ <para>
+ This specification is intended for consumers of the kernel crypto
+ API as well as for developers implementing ciphers. This API
+ specification, however, does not discusses all API calls available
+ to data transformation implementations (i.e. implementations of
+ ciphers and other transformations (such as CRC or even compression
+ algorithms) that can register with the kernel crypto API).
+ </para>
+
+ <para>
+ Note: The terms "transformation" and cipher algorithm are used
+ interchangably.
+ </para>
+ </sect1>
+
+ <sect1><title>Terminology</title>
+ <para>
+ The transformation implementation is an actual code or interface
+ to hardware which implements a certain transformation with precisely
+ defined behavior.
+ </para>
+
+ <para>
+ The transformation object (TFM) is an instance of a transformation
+ implementation. There can be multiple transformation objects
+ associated with a single transformation implementation. Each of
+ those transformation objects is held by a crypto API consumer or
+ another transformation. Transformation object is allocated when a
+ crypto API consumer requests a transformation implementation.
+ The consumer is then provided with a structure, which contains
+ a transformation object (TFM).
+ </para>
+
+ <para>
+ The structure that contains transformation objects may also be
+ referred to as a "cipher handle". Such a cipher handle is always
+ subject to the following phases that are reflected in the API calls
+ applicable to such a cipher handle:
+ </para>
+
+ <orderedlist>
+ <listitem>
+ <para>Initialization of a cipher handle.</para>
+ </listitem>
+ <listitem>
+ <para>Execution of all intended cipher operations applicable
+ for the handle where the cipher handle must be furnished to
+ every API call.</para>
+ </listitem>
+ <listitem>
+ <para>Destruction of a cipher handle.</para>
+ </listitem>
+ </orderedlist>
+
+ <para>
+ When using the initialization API calls, a cipher handle is
+ created and returned to the consumer. Therefore, please refer
+ to all initialization API calls that refer to the data
+ structure type a consumer is expected to receive and subsequently
+ to use. The initialization API calls have all the same naming
+ conventions of crypto_alloc_*.
+ </para>
+
+ <para>
+ The transformation context is private data associated with
+ the transformation object.
+ </para>
+ </sect1>
+ </chapter>
+
+ <chapter id="Architecture"><title>Kernel Crypto API Architecture</title>
+ <sect1><title>Cipher algorithm types</title>
+ <para>
+ The kernel crypto API provides different API calls for the
+ following cipher types:
+
+ <itemizedlist>
+ <listitem><para>Symmetric ciphers</para></listitem>
+ <listitem><para>AEAD ciphers</para></listitem>
+ <listitem><para>Message digest, including keyed message digest</para></listitem>
+ <listitem><para>Random number generation</para></listitem>
+ <listitem><para>User space interface</para></listitem>
+ </itemizedlist>
+ </para>
+ </sect1>
+
+ <sect1><title>Ciphers And Templates</title>
+ <para>
+ The kernel crypto API provides implementations of single block
+ ciphers and message digests. In addition, the kernel crypto API
+ provides numerous "templates" that can be used in conjunction
+ with the single block ciphers and message digests. Templates
+ include all types of block chaining mode, the HMAC mechanism, etc.
+ </para>
+
+ <para>
+ Single block ciphers and message digests can either be directly
+ used by a caller or invoked together with a template to form
+ multi-block ciphers or keyed message digests.
+ </para>
+
+ <para>
+ A single block cipher may even be called with multiple templates.
+ However, templates cannot be used without a single cipher.
+ </para>
+
+ <para>
+ See /proc/crypto and search for "name". For example:
+
+ <itemizedlist>
+ <listitem><para>aes</para></listitem>
+ <listitem><para>ecb(aes)</para></listitem>
+ <listitem><para>cmac(aes)</para></listitem>
+ <listitem><para>ccm(aes)</para></listitem>
+ <listitem><para>rfc4106(gcm(aes))</para></listitem>
+ <listitem><para>sha1</para></listitem>
+ <listitem><para>hmac(sha1)</para></listitem>
+ <listitem><para>authenc(hmac(sha1),cbc(aes))</para></listitem>
+ </itemizedlist>
+ </para>
+
+ <para>
+ In these examples, "aes" and "sha1" are the ciphers and all
+ others are the templates.
+ </para>
+ </sect1>
+
+ <sect1><title>Synchronous And Asynchronous Operation</title>
+ <para>
+ The kernel crypto API provides synchronous and asynchronous
+ API operations.
+ </para>
+
+ <para>
+ When using the synchronous API operation, the caller invokes
+ a cipher operation which is performed synchronously by the
+ kernel crypto API. That means, the caller waits until the
+ cipher operation completes. Therefore, the kernel crypto API
+ calls work like regular function calls. For synchronous
+ operation, the set of API calls is small and conceptually
+ similar to any other crypto library.
+ </para>
+
+ <para>
+ Asynchronous operation is provided by the kernel crypto API
+ which implies that the invocation of a cipher operation will
+ complete almost instantly. That invocation triggers the
+ cipher operation but it does not signal its completion. Before
+ invoking a cipher operation, the caller must provide a callback
+ function the kernel crypto API can invoke to signal the
+ completion of the cipher operation. Furthermore, the caller
+ must ensure it can handle such asynchronous events by applying
+ appropriate locking around its data. The kernel crypto API
+ does not perform any special serialization operation to protect
+ the caller's data integrity.
+ </para>
+ </sect1>
+
+ <sect1><title>Crypto API Cipher References And Priority</title>
+ <para>
+ A cipher is referenced by the caller with a string. That string
+ has the following semantics:
+
+ <programlisting>
+ template(single block cipher)
+ </programlisting>
+
+ where "template" and "single block cipher" is the aforementioned
+ template and single block cipher, respectively. If applicable,
+ additional templates may enclose other templates, such as
+
+ <programlisting>
+ template1(template2(single block cipher)))
+ </programlisting>
+ </para>
+
+ <para>
+ The kernel crypto API may provide multiple implementations of a
+ template or a single block cipher. For example, AES on newer
+ Intel hardware has the following implementations: AES-NI,
+ assembler implementation, or straight C. Now, when using the
+ string "aes" with the kernel crypto API, which cipher
+ implementation is used? The answer to that question is the
+ priority number assigned to each cipher implementation by the
+ kernel crypto API. When a caller uses the string to refer to a
+ cipher during initialization of a cipher handle, the kernel
+ crypto API looks up all implementations providing an
+ implementation with that name and selects the implementation
+ with the highest priority.
+ </para>
+
+ <para>
+ Now, a caller may have the need to refer to a specific cipher
+ implementation and thus does not want to rely on the
+ priority-based selection. To accommodate this scenario, the
+ kernel crypto API allows the cipher implementation to register
+ a unique name in addition to common names. When using that
+ unique name, a caller is therefore always sure to refer to
+ the intended cipher implementation.
+ </para>
+
+ <para>
+ The list of available ciphers is given in /proc/crypto. However,
+ that list does not specify all possible permutations of
+ templates and ciphers. Each block listed in /proc/crypto may
+ contain the following information -- if one of the components
+ listed as follows are not applicable to a cipher, it is not
+ displayed:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>name: the generic name of the cipher that is subject
+ to the priority-based selection -- this name can be used by
+ the cipher allocation API calls (all names listed above are
+ examples for such generic names)</para>
+ </listitem>
+ <listitem>
+ <para>driver: the unique name of the cipher -- this name can
+ be used by the cipher allocation API calls</para>
+ </listitem>
+ <listitem>
+ <para>module: the kernel module providing the cipher
+ implementation (or "kernel" for statically linked ciphers)</para>
+ </listitem>
+ <listitem>
+ <para>priority: the priority value of the cipher implementation</para>
+ </listitem>
+ <listitem>
+ <para>refcnt: the reference count of the respective cipher
+ (i.e. the number of current consumers of this cipher)</para>
+ </listitem>
+ <listitem>
+ <para>selftest: specification whether the self test for the
+ cipher passed</para>
+ </listitem>
+ <listitem>
+ <para>type:
+ <itemizedlist>
+ <listitem>
+ <para>blkcipher for synchronous block ciphers</para>
+ </listitem>
+ <listitem>
+ <para>ablkcipher for asynchronous block ciphers</para>
+ </listitem>
+ <listitem>
+ <para>cipher for single block ciphers that may be used with
+ an additional template</para>
+ </listitem>
+ <listitem>
+ <para>shash for synchronous message digest</para>
+ </listitem>
+ <listitem>
+ <para>ahash for asynchronous message digest</para>
+ </listitem>
+ <listitem>
+ <para>aead for AEAD cipher type</para>
+ </listitem>
+ <listitem>
+ <para>compression for compression type transformations</para>
+ </listitem>
+ <listitem>
+ <para>rng for random number generator</para>
+ </listitem>
+ <listitem>
+ <para>givcipher for cipher with associated IV generator
+ (see the geniv entry below for the specification of the
+ IV generator type used by the cipher implementation)</para>
+ </listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ <listitem>
+ <para>blocksize: blocksize of cipher in bytes</para>
+ </listitem>
+ <listitem>
+ <para>keysize: key size in bytes</para>
+ </listitem>
+ <listitem>
+ <para>ivsize: IV size in bytes</para>
+ </listitem>
+ <listitem>
+ <para>seedsize: required size of seed data for random number
+ generator</para>
+ </listitem>
+ <listitem>
+ <para>digestsize: output size of the message digest</para>
+ </listitem>
+ <listitem>
+ <para>geniv: IV generation type:
+ <itemizedlist>
+ <listitem>
+ <para>eseqiv for encrypted sequence number based IV
+ generation</para>
+ </listitem>
+ <listitem>
+ <para>seqiv for sequence number based IV generation</para>
+ </listitem>
+ <listitem>
+ <para>chainiv for chain iv generation</para>
+ </listitem>
+ <listitem>
+ <para>&lt;builtin&gt; is a marker that the cipher implements
+ IV generation and handling as it is specific to the given
+ cipher</para>
+ </listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </itemizedlist>
+ </sect1>
+
+ <sect1><title>Key Sizes</title>
+ <para>
+ When allocating a cipher handle, the caller only specifies the
+ cipher type. Symmetric ciphers, however, typically support
+ multiple key sizes (e.g. AES-128 vs. AES-192 vs. AES-256).
+ These key sizes are determined with the length of the provided
+ key. Thus, the kernel crypto API does not provide a separate
+ way to select the particular symmetric cipher key size.
+ </para>
+ </sect1>
+
+ <sect1><title>Cipher Allocation Type And Masks</title>
+ <para>
+ The different cipher handle allocation functions allow the
+ specification of a type and mask flag. Both parameters have
+ the following meaning (and are therefore not covered in the
+ subsequent sections).
+ </para>
+
+ <para>
+ The type flag specifies the type of the cipher algorithm.
+ The caller usually provides a 0 when the caller wants the
+ default handling. Otherwise, the caller may provide the
+ following selections which match the the aforementioned
+ cipher types:
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_CIPHER Single block cipher</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_COMPRESS Compression</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with
+ Associated Data (MAC)</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block
+ cipher packed together with an IV generator (see geniv field
+ in the /proc/crypto listing for the known IV generators)</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_DIGEST Raw message digest</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_AHASH Asynchronous multi-block hash</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_RNG Random Number Generation</para>
+ </listitem>
+ <listitem>
+ <para>CRYPTO_ALG_TYPE_PCOMPRESS Enhanced version of
+ CRYPTO_ALG_TYPE_COMPRESS allowing for segmented compression /
+ decompression instead of performing the operation on one
+ segment only. CRYPTO_ALG_TYPE_PCOMPRESS is intended to replace
+ CRYPTO_ALG_TYPE_COMPRESS once existing consumers are converted.</para>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+ The mask flag restricts the type of cipher. The only allowed
+ flag is CRYPTO_ALG_ASYNC to restrict the cipher lookup function
+ to asynchronous ciphers. Usually, a caller provides a 0 for the
+ mask flag.
+ </para>
+
+ <para>
+ When the caller provides a mask and type specification, the
+ caller limits the search the kernel crypto API can perform for
+ a suitable cipher implementation for the given cipher name.
+ That means, even when a caller uses a cipher name that exists
+ during its initialization call, the kernel crypto API may not
+ select it due to the used type and mask field.
+ </para>
+ </sect1>
+ </chapter>
+
+ <chapter id="Development"><title>Developing Cipher Algorithms</title>
+ <sect1><title>Registering And Unregistering Transformation</title>
+ <para>
+ There are three distinct types of registration functions in
+ the Crypto API. One is used to register a generic cryptographic
+ transformation, while the other two are specific to HASH
+ transformations and COMPRESSion. We will discuss the latter
+ two in a separate chapter, here we will only look at the
+ generic ones.
+ </para>
+
+ <para>
+ Before discussing the register functions, the data structure
+ to be filled with each, struct crypto_alg, must be considered
+ -- see below for a description of this data structure.
+ </para>
+
+ <para>
+ The generic registration functions can be found in
+ include/linux/crypto.h and their definition can be seen below.
+ The former function registers a single transformation, while
+ the latter works on an array of transformation descriptions.
+ The latter is useful when registering transformations in bulk.
+ </para>
+
+ <programlisting>
+ int crypto_register_alg(struct crypto_alg *alg);
+ int crypto_register_algs(struct crypto_alg *algs, int count);
+ </programlisting>
+
+ <para>
+ The counterparts to those functions are listed below.
+ </para>
+
+ <programlisting>
+ int crypto_unregister_alg(struct crypto_alg *alg);
+ int crypto_unregister_algs(struct crypto_alg *algs, int count);
+ </programlisting>
+
+ <para>
+ Notice that both registration and unregistration functions
+ do return a value, so make sure to handle errors. A return
+ code of zero implies success. Any return code &lt; 0 implies
+ an error.
+ </para>
+
+ <para>
+ The bulk registration / unregistration functions require
+ that struct crypto_alg is an array of count size. These
+ functions simply loop over that array and register /
+ unregister each individual algorithm. If an error occurs,
+ the loop is terminated at the offending algorithm definition.
+ That means, the algorithms prior to the offending algorithm
+ are successfully registered. Note, the caller has no way of
+ knowing which cipher implementations have successfully
+ registered. If this is important to know, the caller should
+ loop through the different implementations using the single
+ instance *_alg functions for each individual implementation.
+ </para>
+ </sect1>
+
+ <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
+ <para>
+ Example of transformations: aes, arc4, ...
+ </para>
+
+ <para>
+ This section describes the simplest of all transformation
+ implementations, that being the CIPHER type used for symmetric
+ ciphers. The CIPHER type is used for transformations which
+ operate on exactly one block at a time and there are no
+ dependencies between blocks at all.
+ </para>
+
+ <sect2><title>Registration specifics</title>
+ <para>
+ The registration of [CIPHER] algorithm is specific in that
+ struct crypto_alg field .cra_type is empty. The .cra_u.cipher
+ has to be filled in with proper callbacks to implement this
+ transformation.
+ </para>
+
+ <para>
+ See struct cipher_alg below.
+ </para>
+ </sect2>
+
+ <sect2><title>Cipher Definition With struct cipher_alg</title>
+ <para>
+ Struct cipher_alg defines a single block cipher.
+ </para>
+
+ <para>
+ Here are schematics of how these functions are called when
+ operated from other part of the kernel. Note that the
+ .cia_setkey() call might happen before or after any of these
+ schematics happen, but must not happen during any of these
+ are in-flight.
+ </para>
+
+ <para>
+ <programlisting>
+ KEY ---. PLAINTEXT ---.
+ v v
+ .cia_setkey() -&gt; .cia_encrypt()
+ |
+ '-----&gt; CIPHERTEXT
+ </programlisting>
+ </para>
+
+ <para>
+ Please note that a pattern where .cia_setkey() is called
+ multiple times is also valid:
+ </para>
+
+ <para>
+ <programlisting>
+
+ KEY1 --. PLAINTEXT1 --. KEY2 --. PLAINTEXT2 --.
+ v v v v
+ .cia_setkey() -&gt; .cia_encrypt() -&gt; .cia_setkey() -&gt; .cia_encrypt()
+ | |
+ '---&gt; CIPHERTEXT1 '---&gt; CIPHERTEXT2
+ </programlisting>
+ </para>
+
+ </sect2>
+ </sect1>
+
+ <sect1><title>Multi-Block Ciphers [BLKCIPHER] [ABLKCIPHER]</title>
+ <para>
+ Example of transformations: cbc(aes), ecb(arc4), ...
+ </para>
+
+ <para>
+ This section describes the multi-block cipher transformation
+ implementations for both synchronous [BLKCIPHER] and
+ asynchronous [ABLKCIPHER] case. The multi-block ciphers are
+ used for transformations which operate on scatterlists of
+ data supplied to the transformation functions. They output
+ the result into a scatterlist of data as well.
+ </para>
+
+ <sect2><title>Registration Specifics</title>
+
+ <para>
+ The registration of [BLKCIPHER] or [ABLKCIPHER] algorithms
+ is one of the most standard procedures throughout the crypto API.
+ </para>
+
+ <para>
+ Note, if a cipher implementation requires a proper alignment
+ of data, the caller should use the functions of
+ crypto_blkcipher_alignmask() or crypto_ablkcipher_alignmask()
+ respectively to identify a memory alignment mask. The kernel
+ crypto API is able to process requests that are unaligned.
+ This implies, however, additional overhead as the kernel
+ crypto API needs to perform the realignment of the data which
+ may imply moving of data.
+ </para>
+ </sect2>
+
+ <sect2><title>Cipher Definition With struct blkcipher_alg and ablkcipher_alg</title>
+ <para>
+ Struct blkcipher_alg defines a synchronous block cipher whereas
+ struct ablkcipher_alg defines an asynchronous block cipher.
+ </para>
+
+ <para>
+ Please refer to the single block cipher description for schematics
+ of the block cipher usage. The usage patterns are exactly the same
+ for [ABLKCIPHER] and [BLKCIPHER] as they are for plain [CIPHER].
+ </para>
+ </sect2>
+
+ <sect2><title>Specifics Of Asynchronous Multi-Block Cipher</title>
+ <para>
+ There are a couple of specifics to the [ABLKCIPHER] interface.
+ </para>
+
+ <para>
+ First of all, some of the drivers will want to use the
+ Generic ScatterWalk in case the hardware needs to be fed
+ separate chunks of the scatterlist which contains the
+ plaintext and will contain the ciphertext. Please refer
+ to the ScatterWalk interface offered by the Linux kernel
+ scatter / gather list implementation.
+ </para>
+ </sect2>
+ </sect1>
+
+ <sect1><title>Hashing [HASH]</title>
+
+ <para>
+ Example of transformations: crc32, md5, sha1, sha256,...
+ </para>
+
+ <sect2><title>Registering And Unregistering The Transformation</title>
+
+ <para>
+ There are multiple ways to register a HASH transformation,
+ depending on whether the transformation is synchronous [SHASH]
+ or asynchronous [AHASH] and the amount of HASH transformations
+ we are registering. You can find the prototypes defined in
+ include/crypto/internal/hash.h:
+ </para>
+
+ <programlisting>
+ int crypto_register_ahash(struct ahash_alg *alg);
+
+ int crypto_register_shash(struct shash_alg *alg);
+ int crypto_register_shashes(struct shash_alg *algs, int count);
+ </programlisting>
+
+ <para>
+ The respective counterparts for unregistering the HASH
+ transformation are as follows:
+ </para>
+
+ <programlisting>
+ int crypto_unregister_ahash(struct ahash_alg *alg);
+
+ int crypto_unregister_shash(struct shash_alg *alg);
+ int crypto_unregister_shashes(struct shash_alg *algs, int count);
+ </programlisting>
+ </sect2>
+
+ <sect2><title>Cipher Definition With struct shash_alg and ahash_alg</title>
+ <para>
+ Here are schematics of how these functions are called when
+ operated from other part of the kernel. Note that the .setkey()
+ call might happen before or after any of these schematics happen,
+ but must not happen during any of these are in-flight. Please note
+ that calling .init() followed immediately by .finish() is also a
+ perfectly valid transformation.
+ </para>
+
+ <programlisting>
+ I) DATA -----------.
+ v
+ .init() -&gt; .update() -&gt; .final() ! .update() might not be called
+ ^ | | at all in this scenario.
+ '----' '---&gt; HASH
+
+ II) DATA -----------.-----------.
+ v v
+ .init() -&gt; .update() -&gt; .finup() ! .update() may not be called
+ ^ | | at all in this scenario.
+ '----' '---&gt; HASH
+
+ III) DATA -----------.
+ v
+ .digest() ! The entire process is handled
+ | by the .digest() call.
+ '---------------&gt; HASH
+ </programlisting>
+
+ <para>
+ Here is a schematic of how the .export()/.import() functions are
+ called when used from another part of the kernel.
+ </para>
+
+ <programlisting>
+ KEY--. DATA--.
+ v v ! .update() may not be called
+ .setkey() -&gt; .init() -&gt; .update() -&gt; .export() at all in this scenario.
+ ^ | |
+ '-----' '--&gt; PARTIAL_HASH
+
+ ----------- other transformations happen here -----------
+
+ PARTIAL_HASH--. DATA1--.
+ v v
+ .import -&gt; .update() -&gt; .final() ! .update() may not be called
+ ^ | | at all in this scenario.
+ '----' '--&gt; HASH1
+
+ PARTIAL_HASH--. DATA2-.
+ v v
+ .import -&gt; .finup()
+ |
+ '---------------&gt; HASH2
+ </programlisting>
+ </sect2>
+
+ <sect2><title>Specifics Of Asynchronous HASH Transformation</title>
+ <para>
+ Some of the drivers will want to use the Generic ScatterWalk
+ in case the implementation needs to be fed separate chunks of the
+ scatterlist which contains the input data. The buffer containing
+ the resulting hash will always be properly aligned to
+ .cra_alignmask so there is no need to worry about this.
+ </para>
+ </sect2>
+ </sect1>
+ </chapter>
+
+ <chapter id="API"><title>Programming Interface</title>
+ <sect1><title>Block Cipher Context Data Structures</title>
+!Pinclude/linux/crypto.h Block Cipher Context Data Structures
+!Finclude/linux/crypto.h aead_request
+ </sect1>
+ <sect1><title>Block Cipher Algorithm Definitions</title>
+!Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
+!Finclude/linux/crypto.h crypto_alg
+!Finclude/linux/crypto.h ablkcipher_alg
+!Finclude/linux/crypto.h aead_alg
+!Finclude/linux/crypto.h blkcipher_alg
+!Finclude/linux/crypto.h cipher_alg
+!Finclude/linux/crypto.h rng_alg
+ </sect1>
+ <sect1><title>Asynchronous Block Cipher API</title>
+!Pinclude/linux/crypto.h Asynchronous Block Cipher API
+!Finclude/linux/crypto.h crypto_alloc_ablkcipher
+!Finclude/linux/crypto.h crypto_free_ablkcipher
+!Finclude/linux/crypto.h crypto_has_ablkcipher
+!Finclude/linux/crypto.h crypto_ablkcipher_ivsize
+!Finclude/linux/crypto.h crypto_ablkcipher_blocksize
+!Finclude/linux/crypto.h crypto_ablkcipher_setkey
+!Finclude/linux/crypto.h crypto_ablkcipher_reqtfm
+!Finclude/linux/crypto.h crypto_ablkcipher_encrypt
+!Finclude/linux/crypto.h crypto_ablkcipher_decrypt
+ </sect1>
+ <sect1><title>Asynchronous Cipher Request Handle</title>
+!Pinclude/linux/crypto.h Asynchronous Cipher Request Handle
+!Finclude/linux/crypto.h crypto_ablkcipher_reqsize
+!Finclude/linux/crypto.h ablkcipher_request_set_tfm
+!Finclude/linux/crypto.h ablkcipher_request_alloc
+!Finclude/linux/crypto.h ablkcipher_request_free
+!Finclude/linux/crypto.h ablkcipher_request_set_callback
+!Finclude/linux/crypto.h ablkcipher_request_set_crypt
+ </sect1>
+ <sect1><title>Authenticated Encryption With Associated Data (AEAD) Cipher API</title>
+!Pinclude/linux/crypto.h Authenticated Encryption With Associated Data (AEAD) Cipher API
+!Finclude/linux/crypto.h crypto_alloc_aead
+!Finclude/linux/crypto.h crypto_free_aead
+!Finclude/linux/crypto.h crypto_aead_ivsize
+!Finclude/linux/crypto.h crypto_aead_authsize
+!Finclude/linux/crypto.h crypto_aead_blocksize
+!Finclude/linux/crypto.h crypto_aead_setkey
+!Finclude/linux/crypto.h crypto_aead_setauthsize
+!Finclude/linux/crypto.h crypto_aead_encrypt
+!Finclude/linux/crypto.h crypto_aead_decrypt
+ </sect1>
+ <sect1><title>Asynchronous AEAD Request Handle</title>
+!Pinclude/linux/crypto.h Asynchronous AEAD Request Handle
+!Finclude/linux/crypto.h crypto_aead_reqsize
+!Finclude/linux/crypto.h aead_request_set_tfm
+!Finclude/linux/crypto.h aead_request_alloc
+!Finclude/linux/crypto.h aead_request_free
+!Finclude/linux/crypto.h aead_request_set_callback
+!Finclude/linux/crypto.h aead_request_set_crypt
+!Finclude/linux/crypto.h aead_request_set_assoc
+ </sect1>
+ <sect1><title>Synchronous Block Cipher API</title>
+!Pinclude/linux/crypto.h Synchronous Block Cipher API
+!Finclude/linux/crypto.h crypto_alloc_blkcipher
+!Finclude/linux/crypto.h crypto_free_blkcipher
+!Finclude/linux/crypto.h crypto_has_blkcipher
+!Finclude/linux/crypto.h crypto_blkcipher_name
+!Finclude/linux/crypto.h crypto_blkcipher_ivsize
+!Finclude/linux/crypto.h crypto_blkcipher_blocksize
+!Finclude/linux/crypto.h crypto_blkcipher_setkey
+!Finclude/linux/crypto.h crypto_blkcipher_encrypt
+!Finclude/linux/crypto.h crypto_blkcipher_encrypt_iv
+!Finclude/linux/crypto.h crypto_blkcipher_decrypt
+!Finclude/linux/crypto.h crypto_blkcipher_decrypt_iv
+!Finclude/linux/crypto.h crypto_blkcipher_set_iv
+!Finclude/linux/crypto.h crypto_blkcipher_get_iv
+ </sect1>
+ <sect1><title>Single Block Cipher API</title>
+!Pinclude/linux/crypto.h Single Block Cipher API
+!Finclude/linux/crypto.h crypto_alloc_cipher
+!Finclude/linux/crypto.h crypto_free_cipher
+!Finclude/linux/crypto.h crypto_has_cipher
+!Finclude/linux/crypto.h crypto_cipher_blocksize
+!Finclude/linux/crypto.h crypto_cipher_setkey
+!Finclude/linux/crypto.h crypto_cipher_encrypt_one
+!Finclude/linux/crypto.h crypto_cipher_decrypt_one
+ </sect1>
+ <sect1><title>Synchronous Message Digest API</title>
+!Pinclude/linux/crypto.h Synchronous Message Digest API
+!Finclude/linux/crypto.h crypto_alloc_hash
+!Finclude/linux/crypto.h crypto_free_hash
+!Finclude/linux/crypto.h crypto_has_hash
+!Finclude/linux/crypto.h crypto_hash_blocksize
+!Finclude/linux/crypto.h crypto_hash_digestsize
+!Finclude/linux/crypto.h crypto_hash_init
+!Finclude/linux/crypto.h crypto_hash_update
+!Finclude/linux/crypto.h crypto_hash_final
+!Finclude/linux/crypto.h crypto_hash_digest
+!Finclude/linux/crypto.h crypto_hash_setkey
+ </sect1>
+ <sect1><title>Message Digest Algorithm Definitions</title>
+!Pinclude/crypto/hash.h Message Digest Algorithm Definitions
+!Finclude/crypto/hash.h hash_alg_common
+!Finclude/crypto/hash.h ahash_alg
+!Finclude/crypto/hash.h shash_alg
+ </sect1>
+ <sect1><title>Asynchronous Message Digest API</title>
+!Pinclude/crypto/hash.h Asynchronous Message Digest API
+!Finclude/crypto/hash.h crypto_alloc_ahash
+!Finclude/crypto/hash.h crypto_free_ahash
+!Finclude/crypto/hash.h crypto_ahash_init
+!Finclude/crypto/hash.h crypto_ahash_digestsize
+!Finclude/crypto/hash.h crypto_ahash_reqtfm
+!Finclude/crypto/hash.h crypto_ahash_reqsize
+!Finclude/crypto/hash.h crypto_ahash_setkey
+!Finclude/crypto/hash.h crypto_ahash_finup
+!Finclude/crypto/hash.h crypto_ahash_final
+!Finclude/crypto/hash.h crypto_ahash_digest
+!Finclude/crypto/hash.h crypto_ahash_export
+!Finclude/crypto/hash.h crypto_ahash_import
+ </sect1>
+ <sect1><title>Asynchronous Hash Request Handle</title>
+!Pinclude/crypto/hash.h Asynchronous Hash Request Handle
+!Finclude/crypto/hash.h ahash_request_set_tfm
+!Finclude/crypto/hash.h ahash_request_alloc
+!Finclude/crypto/hash.h ahash_request_free
+!Finclude/crypto/hash.h ahash_request_set_callback
+!Finclude/crypto/hash.h ahash_request_set_crypt
+ </sect1>
+ <sect1><title>Synchronous Message Digest API</title>
+!Pinclude/crypto/hash.h Synchronous Message Digest API
+!Finclude/crypto/hash.h crypto_alloc_shash
+!Finclude/crypto/hash.h crypto_free_shash
+!Finclude/crypto/hash.h crypto_shash_blocksize
+!Finclude/crypto/hash.h crypto_shash_digestsize
+!Finclude/crypto/hash.h crypto_shash_descsize
+!Finclude/crypto/hash.h crypto_shash_setkey
+!Finclude/crypto/hash.h crypto_shash_digest
+!Finclude/crypto/hash.h crypto_shash_export
+!Finclude/crypto/hash.h crypto_shash_import
+!Finclude/crypto/hash.h crypto_shash_init
+!Finclude/crypto/hash.h crypto_shash_update
+!Finclude/crypto/hash.h crypto_shash_final
+!Finclude/crypto/hash.h crypto_shash_finup
+ </sect1>
+ <sect1><title>Crypto API Random Number API</title>
+!Pinclude/crypto/rng.h Random number generator API
+!Finclude/crypto/rng.h crypto_alloc_rng
+!Finclude/crypto/rng.h crypto_rng_alg
+!Finclude/crypto/rng.h crypto_free_rng
+!Finclude/crypto/rng.h crypto_rng_get_bytes
+!Finclude/crypto/rng.h crypto_rng_reset
+!Finclude/crypto/rng.h crypto_rng_seedsize
+!Cinclude/crypto/rng.h
+ </sect1>
+ </chapter>
+
+ <chapter id="Code"><title>Code Examples</title>
+ <sect1><title>Code Example For Asynchronous Block Cipher Operation</title>
+ <programlisting>
+
+struct tcrypt_result {
+ struct completion completion;
+ int err;
+};
+
+/* tie all data structures together */
+struct ablkcipher_def {
+ struct scatterlist sg;
+ struct crypto_ablkcipher *tfm;
+ struct ablkcipher_request *req;
+ struct tcrypt_result result;
+};
+
+/* Callback function */
+static void test_ablkcipher_cb(struct crypto_async_request *req, int error)
+{
+ struct tcrypt_result *result = req-&gt;data;
+
+ if (error == -EINPROGRESS)
+ return;
+ result-&gt;err = error;
+ complete(&amp;result-&gt;completion);
+ pr_info("Encryption finished successfully\n");
+}
+
+/* Perform cipher operation */
+static unsigned int test_ablkcipher_encdec(struct ablkcipher_def *ablk,
+ int enc)
+{
+ int rc = 0;
+
+ if (enc)
+ rc = crypto_ablkcipher_encrypt(ablk-&gt;req);
+ else
+ rc = crypto_ablkcipher_decrypt(ablk-&gt;req);
+
+ switch (rc) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ rc = wait_for_completion_interruptible(
+ &amp;ablk-&gt;result.completion);
+ if (!rc &amp;&amp; !ablk-&gt;result.err) {
+ reinit_completion(&amp;ablk-&gt;result.completion);
+ break;
+ }
+ default:
+ pr_info("ablkcipher encrypt returned with %d result %d\n",
+ rc, ablk-&gt;result.err);
+ break;
+ }
+ init_completion(&amp;ablk-&gt;result.completion);
+
+ return rc;
+}
+
+/* Initialize and trigger cipher operation */
+static int test_ablkcipher(void)
+{
+ struct ablkcipher_def ablk;
+ struct crypto_ablkcipher *ablkcipher = NULL;
+ struct ablkcipher_request *req = NULL;
+ char *scratchpad = NULL;
+ char *ivdata = NULL;
+ unsigned char key[32];
+ int ret = -EFAULT;
+
+ ablkcipher = crypto_alloc_ablkcipher("cbc-aes-aesni", 0, 0);
+ if (IS_ERR(ablkcipher)) {
+ pr_info("could not allocate ablkcipher handle\n");
+ return PTR_ERR(ablkcipher);
+ }
+
+ req = ablkcipher_request_alloc(ablkcipher, GFP_KERNEL);
+ if (IS_ERR(req)) {
+ pr_info("could not allocate request queue\n");
+ ret = PTR_ERR(req);
+ goto out;
+ }
+
+ ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ test_ablkcipher_cb,
+ &amp;ablk.result);
+
+ /* AES 256 with random key */
+ get_random_bytes(&amp;key, 32);
+ if (crypto_ablkcipher_setkey(ablkcipher, key, 32)) {
+ pr_info("key could not be set\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* IV will be random */
+ ivdata = kmalloc(16, GFP_KERNEL);
+ if (!ivdata) {
+ pr_info("could not allocate ivdata\n");
+ goto out;
+ }
+ get_random_bytes(ivdata, 16);
+
+ /* Input data will be random */
+ scratchpad = kmalloc(16, GFP_KERNEL);
+ if (!scratchpad) {
+ pr_info("could not allocate scratchpad\n");
+ goto out;
+ }
+ get_random_bytes(scratchpad, 16);
+
+ ablk.tfm = ablkcipher;
+ ablk.req = req;
+
+ /* We encrypt one block */
+ sg_init_one(&amp;ablk.sg, scratchpad, 16);
+ ablkcipher_request_set_crypt(req, &amp;ablk.sg, &amp;ablk.sg, 16, ivdata);
+ init_completion(&amp;ablk.result.completion);
+
+ /* encrypt data */
+ ret = test_ablkcipher_encdec(&amp;ablk, 1);
+ if (ret)
+ goto out;
+
+ pr_info("Encryption triggered successfully\n");
+
+out:
+ if (ablkcipher)
+ crypto_free_ablkcipher(ablkcipher);
+ if (req)
+ ablkcipher_request_free(req);
+ if (ivdata)
+ kfree(ivdata);
+ if (scratchpad)
+ kfree(scratchpad);
+ return ret;
+}
+ </programlisting>
+ </sect1>
+
+ <sect1><title>Code Example For Synchronous Block Cipher Operation</title>
+ <programlisting>
+
+static int test_blkcipher(void)
+{
+ struct crypto_blkcipher *blkcipher = NULL;
+ char *cipher = "cbc(aes)";
+ // AES 128
+ charkey =
+"\x12\x34\x56\x78\x90\xab\xcd\xef\x12\x34\x56\x78\x90\xab\xcd\xef";
+ chariv =
+"\x12\x34\x56\x78\x90\xab\xcd\xef\x12\x34\x56\x78\x90\xab\xcd\xef";
+ unsigned int ivsize = 0;
+ char *scratchpad = NULL; // holds plaintext and ciphertext
+ struct scatterlist sg;
+ struct blkcipher_desc desc;
+ int ret = -EFAULT;
+
+ blkcipher = crypto_alloc_blkcipher(cipher, 0, 0);
+ if (IS_ERR(blkcipher)) {
+ printk("could not allocate blkcipher handle for %s\n", cipher);
+ return -PTR_ERR(blkcipher);
+ }
+
+ if (crypto_blkcipher_setkey(blkcipher, key, strlen(key))) {
+ printk("key could not be set\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ivsize = crypto_blkcipher_ivsize(blkcipher);
+ if (ivsize) {
+ if (ivsize != strlen(iv))
+ printk("IV length differs from expected length\n");
+ crypto_blkcipher_set_iv(blkcipher, iv, ivsize);
+ }
+
+ scratchpad = kmalloc(crypto_blkcipher_blocksize(blkcipher), GFP_KERNEL);
+ if (!scratchpad) {
+ printk("could not allocate scratchpad for %s\n", cipher);
+ goto out;
+ }
+ /* get some random data that we want to encrypt */
+ get_random_bytes(scratchpad, crypto_blkcipher_blocksize(blkcipher));
+
+ desc.flags = 0;
+ desc.tfm = blkcipher;
+ sg_init_one(&amp;sg, scratchpad, crypto_blkcipher_blocksize(blkcipher));
+
+ /* encrypt data in place */
+ crypto_blkcipher_encrypt(&amp;desc, &amp;sg, &amp;sg,
+ crypto_blkcipher_blocksize(blkcipher));
+
+ /* decrypt data in place
+ * crypto_blkcipher_decrypt(&amp;desc, &amp;sg, &amp;sg,
+ */ crypto_blkcipher_blocksize(blkcipher));
+
+
+ printk("Cipher operation completed\n");
+ return 0;
+
+out:
+ if (blkcipher)
+ crypto_free_blkcipher(blkcipher);
+ if (scratchpad)
+ kzfree(scratchpad);
+ return ret;
+}
+ </programlisting>
+ </sect1>
+
+ <sect1><title>Code Example For Use of Operational State Memory With SHASH</title>
+ <programlisting>
+
+struct sdesc {
+ struct shash_desc shash;
+ char ctx[];
+};
+
+static struct sdescinit_sdesc(struct crypto_shash *alg)
+{
+ struct sdescsdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return ERR_PTR(-ENOMEM);
+ sdesc-&gt;shash.tfm = alg;
+ sdesc-&gt;shash.flags = 0x0;
+ return sdesc;
+}
+
+static int calc_hash(struct crypto_shashalg,
+ const unsigned chardata, unsigned int datalen,
+ unsigned chardigest) {
+ struct sdescsdesc;
+ int ret;
+
+ sdesc = init_sdesc(alg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_digest(&amp;sdesc-&gt;shash, data, datalen, digest);
+ kfree(sdesc);
+ return ret;
+}
+ </programlisting>
+ </sect1>
+
+ <sect1><title>Code Example For Random Number Generator Usage</title>
+ <programlisting>
+
+static int get_random_numbers(u8 *buf, unsigned int len)
+{
+ struct crypto_rngrng = NULL;
+ chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
+ int ret;
+
+ if (!buf || !len) {
+ pr_debug("No output buffer provided\n");
+ return -EINVAL;
+ }
+
+ rng = crypto_alloc_rng(drbg, 0, 0);
+ if (IS_ERR(rng)) {
+ pr_debug("could not allocate RNG handle for %s\n", drbg);
+ return -PTR_ERR(rng);
+ }
+
+ ret = crypto_rng_get_bytes(rng, buf, len);
+ if (ret &lt; 0)
+ pr_debug("generation of random numbers failed\n");
+ else if (ret == 0)
+ pr_debug("RNG returned no data");
+ else
+ pr_debug("RNG returned %d bytes of data\n", ret);
+
+out:
+ crypto_free_rng(rng);
+ return ret;
+}
+ </programlisting>
+ </sect1>
+ </chapter>
+ </book>
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index be35bc328b77..4b592ffbafee 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -492,10 +492,10 @@ char *date;</synopsis>
<sect2>
<title>The Translation Table Manager (TTM)</title>
<para>
- TTM design background and information belongs here.
+ TTM design background and information belongs here.
</para>
<sect3>
- <title>TTM initialization</title>
+ <title>TTM initialization</title>
<warning><para>This section is outdated.</para></warning>
<para>
Drivers wishing to support TTM must fill out a drm_bo_driver
@@ -503,42 +503,42 @@ char *date;</synopsis>
pointers for initializing the TTM, allocating and freeing memory,
waiting for command completion and fence synchronization, and memory
migration. See the radeon_ttm.c file for an example of usage.
- </para>
- <para>
- The ttm_global_reference structure is made up of several fields:
- </para>
- <programlisting>
- struct ttm_global_reference {
- enum ttm_global_types global_type;
- size_t size;
- void *object;
- int (*init) (struct ttm_global_reference *);
- void (*release) (struct ttm_global_reference *);
- };
- </programlisting>
- <para>
- There should be one global reference structure for your memory
- manager as a whole, and there will be others for each object
- created by the memory manager at runtime. Your global TTM should
- have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
- object should be sizeof(struct ttm_mem_global), and the init and
- release hooks should point at your driver-specific init and
- release routines, which probably eventually call
- ttm_mem_global_init and ttm_mem_global_release, respectively.
- </para>
- <para>
- Once your global TTM accounting structure is set up and initialized
- by calling ttm_global_item_ref() on it,
- you need to create a buffer object TTM to
- provide a pool for buffer object allocation by clients and the
- kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
- and its size should be sizeof(struct ttm_bo_global). Again,
- driver-specific init and release functions may be provided,
- likely eventually calling ttm_bo_global_init() and
- ttm_bo_global_release(), respectively. Also, like the previous
- object, ttm_global_item_ref() is used to create an initial reference
- count for the TTM, which will call your initialization function.
- </para>
+ </para>
+ <para>
+ The ttm_global_reference structure is made up of several fields:
+ </para>
+ <programlisting>
+ struct ttm_global_reference {
+ enum ttm_global_types global_type;
+ size_t size;
+ void *object;
+ int (*init) (struct ttm_global_reference *);
+ void (*release) (struct ttm_global_reference *);
+ };
+ </programlisting>
+ <para>
+ There should be one global reference structure for your memory
+ manager as a whole, and there will be others for each object
+ created by the memory manager at runtime. Your global TTM should
+ have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
+ object should be sizeof(struct ttm_mem_global), and the init and
+ release hooks should point at your driver-specific init and
+ release routines, which probably eventually call
+ ttm_mem_global_init and ttm_mem_global_release, respectively.
+ </para>
+ <para>
+ Once your global TTM accounting structure is set up and initialized
+ by calling ttm_global_item_ref() on it,
+ you need to create a buffer object TTM to
+ provide a pool for buffer object allocation by clients and the
+ kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
+ and its size should be sizeof(struct ttm_bo_global). Again,
+ driver-specific init and release functions may be provided,
+ likely eventually calling ttm_bo_global_init() and
+ ttm_bo_global_release(), respectively. Also, like the previous
+ object, ttm_global_item_ref() is used to create an initial reference
+ count for the TTM, which will call your initialization function.
+ </para>
</sect3>
</sect2>
<sect2 id="drm-gem">
@@ -566,19 +566,19 @@ char *date;</synopsis>
using driver-specific ioctls.
</para>
<para>
- On a fundamental level, GEM involves several operations:
- <itemizedlist>
- <listitem>Memory allocation and freeing</listitem>
- <listitem>Command execution</listitem>
- <listitem>Aperture management at command execution time</listitem>
- </itemizedlist>
- Buffer object allocation is relatively straightforward and largely
+ On a fundamental level, GEM involves several operations:
+ <itemizedlist>
+ <listitem>Memory allocation and freeing</listitem>
+ <listitem>Command execution</listitem>
+ <listitem>Aperture management at command execution time</listitem>
+ </itemizedlist>
+ Buffer object allocation is relatively straightforward and largely
provided by Linux's shmem layer, which provides memory to back each
object.
</para>
<para>
Device-specific operations, such as command execution, pinning, buffer
- read &amp; write, mapping, and domain ownership transfers are left to
+ read &amp; write, mapping, and domain ownership transfers are left to
driver-specific ioctls.
</para>
<sect3>
@@ -738,16 +738,16 @@ char *date;</synopsis>
respectively. The conversion is handled by the DRM core without any
driver-specific support.
</para>
- <para>
- GEM also supports buffer sharing with dma-buf file descriptors through
- PRIME. GEM-based drivers must use the provided helpers functions to
- implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
- Since sharing file descriptors is inherently more secure than the
- easily guessable and global GEM names it is the preferred buffer
- sharing mechanism. Sharing buffers through GEM names is only supported
- for legacy userspace. Furthermore PRIME also allows cross-device
- buffer sharing since it is based on dma-bufs.
- </para>
+ <para>
+ GEM also supports buffer sharing with dma-buf file descriptors through
+ PRIME. GEM-based drivers must use the provided helpers functions to
+ implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
+ Since sharing file descriptors is inherently more secure than the
+ easily guessable and global GEM names it is the preferred buffer
+ sharing mechanism. Sharing buffers through GEM names is only supported
+ for legacy userspace. Furthermore PRIME also allows cross-device
+ buffer sharing since it is based on dma-bufs.
+ </para>
</sect3>
<sect3 id="drm-gem-objects-mapping">
<title>GEM Objects Mapping</title>
@@ -852,7 +852,7 @@ char *date;</synopsis>
<sect3>
<title>Command Execution</title>
<para>
- Perhaps the most important GEM function for GPU devices is providing a
+ Perhaps the most important GEM function for GPU devices is providing a
command execution interface to clients. Client programs construct
command buffers containing references to previously allocated memory
objects, and then submit them to GEM. At that point, GEM takes care to
@@ -874,95 +874,101 @@ char *date;</synopsis>
<title>GEM Function Reference</title>
!Edrivers/gpu/drm/drm_gem.c
</sect3>
- </sect2>
- <sect2>
- <title>VMA Offset Manager</title>
+ </sect2>
+ <sect2>
+ <title>VMA Offset Manager</title>
!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
!Edrivers/gpu/drm/drm_vma_manager.c
!Iinclude/drm/drm_vma_manager.h
- </sect2>
- <sect2 id="drm-prime-support">
- <title>PRIME Buffer Sharing</title>
- <para>
- PRIME is the cross device buffer sharing framework in drm, originally
- created for the OPTIMUS range of multi-gpu platforms. To userspace
- PRIME buffers are dma-buf based file descriptors.
- </para>
- <sect3>
- <title>Overview and Driver Interface</title>
- <para>
- Similar to GEM global names, PRIME file descriptors are
- also used to share buffer objects across processes. They offer
- additional security: as file descriptors must be explicitly sent over
- UNIX domain sockets to be shared between applications, they can't be
- guessed like the globally unique GEM names.
- </para>
- <para>
- Drivers that support the PRIME
- API must set the DRIVER_PRIME bit in the struct
- <structname>drm_driver</structname>
- <structfield>driver_features</structfield> field, and implement the
- <methodname>prime_handle_to_fd</methodname> and
- <methodname>prime_fd_to_handle</methodname> operations.
- </para>
- <para>
- <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle,
- uint32_t flags, int *prime_fd);
+ </sect2>
+ <sect2 id="drm-prime-support">
+ <title>PRIME Buffer Sharing</title>
+ <para>
+ PRIME is the cross device buffer sharing framework in drm, originally
+ created for the OPTIMUS range of multi-gpu platforms. To userspace
+ PRIME buffers are dma-buf based file descriptors.
+ </para>
+ <sect3>
+ <title>Overview and Driver Interface</title>
+ <para>
+ Similar to GEM global names, PRIME file descriptors are
+ also used to share buffer objects across processes. They offer
+ additional security: as file descriptors must be explicitly sent over
+ UNIX domain sockets to be shared between applications, they can't be
+ guessed like the globally unique GEM names.
+ </para>
+ <para>
+ Drivers that support the PRIME
+ API must set the DRIVER_PRIME bit in the struct
+ <structname>drm_driver</structname>
+ <structfield>driver_features</structfield> field, and implement the
+ <methodname>prime_handle_to_fd</methodname> and
+ <methodname>prime_fd_to_handle</methodname> operations.
+ </para>
+ <para>
+ <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags, int *prime_fd);
int (*prime_fd_to_handle)(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle);</synopsis>
- Those two operations convert a handle to a PRIME file descriptor and
- vice versa. Drivers must use the kernel dma-buf buffer sharing framework
- to manage the PRIME file descriptors. Similar to the mode setting
- API PRIME is agnostic to the underlying buffer object manager, as
- long as handles are 32bit unsigned integers.
- </para>
- <para>
- While non-GEM drivers must implement the operations themselves, GEM
- drivers must use the <function>drm_gem_prime_handle_to_fd</function>
- and <function>drm_gem_prime_fd_to_handle</function> helper functions.
- Those helpers rely on the driver
- <methodname>gem_prime_export</methodname> and
- <methodname>gem_prime_import</methodname> operations to create a dma-buf
- instance from a GEM object (dma-buf exporter role) and to create a GEM
- object from a dma-buf instance (dma-buf importer role).
- </para>
- <para>
- <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle);</synopsis>
+ Those two operations convert a handle to a PRIME file descriptor and
+ vice versa. Drivers must use the kernel dma-buf buffer sharing framework
+ to manage the PRIME file descriptors. Similar to the mode setting
+ API PRIME is agnostic to the underlying buffer object manager, as
+ long as handles are 32bit unsigned integers.
+ </para>
+ <para>
+ While non-GEM drivers must implement the operations themselves, GEM
+ drivers must use the <function>drm_gem_prime_handle_to_fd</function>
+ and <function>drm_gem_prime_fd_to_handle</function> helper functions.
+ Those helpers rely on the driver
+ <methodname>gem_prime_export</methodname> and
+ <methodname>gem_prime_import</methodname> operations to create a dma-buf
+ instance from a GEM object (dma-buf exporter role) and to create a GEM
+ object from a dma-buf instance (dma-buf importer role).
+ </para>
+ <para>
+ <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
- struct dma_buf *dma_buf);</synopsis>
- These two operations are mandatory for GEM drivers that support
- PRIME.
- </para>
- </sect3>
- <sect3>
- <title>PRIME Helper Functions</title>
-!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
+ struct dma_buf *dma_buf);</synopsis>
+ These two operations are mandatory for GEM drivers that support
+ PRIME.
+ </para>
</sect3>
- </sect2>
- <sect2>
- <title>PRIME Function References</title>
+ <sect3>
+ <title>PRIME Helper Functions</title>
+!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
+ </sect3>
+ </sect2>
+ <sect2>
+ <title>PRIME Function References</title>
!Edrivers/gpu/drm/drm_prime.c
- </sect2>
- <sect2>
- <title>DRM MM Range Allocator</title>
- <sect3>
- <title>Overview</title>
+ </sect2>
+ <sect2>
+ <title>DRM MM Range Allocator</title>
+ <sect3>
+ <title>Overview</title>
!Pdrivers/gpu/drm/drm_mm.c Overview
- </sect3>
- <sect3>
- <title>LRU Scan/Eviction Support</title>
+ </sect3>
+ <sect3>
+ <title>LRU Scan/Eviction Support</title>
!Pdrivers/gpu/drm/drm_mm.c lru scan roaster
- </sect3>
+ </sect3>
</sect2>
- <sect2>
- <title>DRM MM Range Allocator Function References</title>
+ <sect2>
+ <title>DRM MM Range Allocator Function References</title>
!Edrivers/gpu/drm/drm_mm.c
!Iinclude/drm/drm_mm.h
- </sect2>
+ </sect2>
+ <sect2>
+ <title>CMA Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_gem_cma_helper.c cma helpers
+!Edrivers/gpu/drm/drm_gem_cma_helper.c
+!Iinclude/drm/drm_gem_cma_helper.h
+ </sect2>
</sect1>
<!-- Internals: mode setting -->
@@ -996,6 +1002,10 @@ int max_width, max_height;</synopsis>
!Edrivers/gpu/drm/drm_modes.c
</sect2>
<sect2>
+ <title>Atomic Mode Setting Function Reference</title>
+!Edrivers/gpu/drm/drm_atomic.c
+ </sect2>
+ <sect2>
<title>Frame Buffer Creation</title>
<synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
struct drm_file *file_priv,
@@ -1827,6 +1837,10 @@ void intel_crt_init(struct drm_device *dev)
!Edrivers/gpu/drm/drm_crtc.c
</sect2>
<sect2>
+ <title>KMS Data Structures</title>
+!Iinclude/drm/drm_crtc.h
+ </sect2>
+ <sect2>
<title>KMS Locking</title>
!Pdrivers/gpu/drm/drm_modeset_lock.c kms locking
!Iinclude/drm/drm_modeset_lock.h
@@ -1933,10 +1947,16 @@ void intel_crt_init(struct drm_device *dev)
and then retrieves a list of modes by calling the connector
<methodname>get_modes</methodname> helper operation.
</para>
+ <para>
+ If the helper operation returns no mode, and if the connector status
+ is connector_status_connected, standard VESA DMT modes up to
+ 1024x768 are automatically added to the modes list by a call to
+ <function>drm_add_modes_noedid</function>.
+ </para>
<para>
- The function filters out modes larger than
+ The function then filters out modes larger than
<parameter>max_width</parameter> and <parameter>max_height</parameter>
- if specified. It then calls the optional connector
+ if specified. It finally calls the optional connector
<methodname>mode_valid</methodname> helper operation for each mode in
the probed list to check whether the mode is valid for the connector.
</para>
@@ -2076,12 +2096,20 @@ void intel_crt_init(struct drm_device *dev)
<synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
<para>
Fill the connector's <structfield>probed_modes</structfield> list
- by parsing EDID data with <function>drm_add_edid_modes</function> or
- calling <function>drm_mode_probed_add</function> directly for every
+ by parsing EDID data with <function>drm_add_edid_modes</function>,
+ adding standard VESA DMT modes with <function>drm_add_modes_noedid</function>,
+ or calling <function>drm_mode_probed_add</function> directly for every
supported mode and return the number of modes it has detected. This
operation is mandatory.
</para>
<para>
+ Note that the caller function will automatically add standard VESA
+ DMT modes up to 1024x768 if the <methodname>get_modes</methodname>
+ helper operation returns no mode and if the connector status is
+ connector_status_connected. There is no need to call
+ <function>drm_add_edid_modes</function> manually in that case.
+ </para>
+ <para>
When adding modes manually the driver creates each mode with a call to
<function>drm_mode_create</function> and must fill the following fields.
<itemizedlist>
@@ -2278,7 +2306,7 @@ void intel_crt_init(struct drm_device *dev)
<function>drm_helper_probe_single_connector_modes</function>.
</para>
<para>
- When parsing EDID data, <function>drm_add_edid_modes</function> fill the
+ When parsing EDID data, <function>drm_add_edid_modes</function> fills the
connector <structfield>display_info</structfield>
<structfield>width_mm</structfield> and
<structfield>height_mm</structfield> fields. When creating modes
@@ -2316,8 +2344,26 @@ void intel_crt_init(struct drm_device *dev)
</itemizedlist>
</sect2>
<sect2>
+ <title>Atomic Modeset Helper Functions Reference</title>
+ <sect3>
+ <title>Overview</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c overview
+ </sect3>
+ <sect3>
+ <title>Implementing Asynchronous Atomic Commit</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit
+ </sect3>
+ <sect3>
+ <title>Atomic State Reset and Initialization</title>
+!Pdrivers/gpu/drm/drm_atomic_helper.c atomic state reset and initialization
+ </sect3>
+!Iinclude/drm/drm_atomic_helper.h
+!Edrivers/gpu/drm/drm_atomic_helper.c
+ </sect2>
+ <sect2>
<title>Modeset Helper Functions Reference</title>
!Edrivers/gpu/drm/drm_crtc_helper.c
+!Pdrivers/gpu/drm/drm_crtc_helper.c overview
</sect2>
<sect2>
<title>Output Probing Helper Functions Reference</title>
@@ -2343,6 +2389,12 @@ void intel_crt_init(struct drm_device *dev)
!Edrivers/gpu/drm/drm_dp_mst_topology.c
</sect2>
<sect2>
+ <title>MIPI DSI Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_mipi_dsi.c dsi helpers
+!Iinclude/drm/drm_mipi_dsi.h
+!Edrivers/gpu/drm/drm_mipi_dsi.c
+ </sect2>
+ <sect2>
<title>EDID Helper Functions Reference</title>
!Edrivers/gpu/drm/drm_edid.c
</sect2>
@@ -2371,7 +2423,12 @@ void intel_crt_init(struct drm_device *dev)
</sect2>
<sect2>
<title id="drm-kms-planehelpers">Plane Helper Reference</title>
-!Edrivers/gpu/drm/drm_plane_helper.c Plane Helpers
+!Edrivers/gpu/drm/drm_plane_helper.c
+!Pdrivers/gpu/drm/drm_plane_helper.c overview
+ </sect2>
+ <sect2>
+ <title>Tile group</title>
+!Pdrivers/gpu/drm/drm_crtc.c Tile group
</sect2>
</sect1>
@@ -2507,8 +2564,8 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Description/Restrictions</td>
</tr>
<tr>
- <td rowspan="21" valign="top" >DRM</td>
- <td rowspan="2" valign="top" >Generic</td>
+ <td rowspan="25" valign="top" >DRM</td>
+ <td rowspan="4" valign="top" >Generic</td>
<td valign="top" >“EDID”</td>
<td valign="top" >BLOB | IMMUTABLE</td>
<td valign="top" >0</td>
@@ -2523,6 +2580,20 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Contains DPMS operation mode value.</td>
</tr>
<tr>
+ <td valign="top" >“PATH”</td>
+ <td valign="top" >BLOB | IMMUTABLE</td>
+ <td valign="top" >0</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >Contains topology path to a connector.</td>
+ </tr>
+ <tr>
+ <td valign="top" >“TILE”</td>
+ <td valign="top" >BLOB | IMMUTABLE</td>
+ <td valign="top" >0</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >Contains tiling information for a connector.</td>
+ </tr>
+ <tr>
<td rowspan="1" valign="top" >Plane</td>
<td valign="top" >“type”</td>
<td valign="top" >ENUM | IMMUTABLE</td>
@@ -2638,6 +2709,21 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >TBD</td>
</tr>
<tr>
+ <td rowspan="2" valign="top" >Virtual GPU</td>
+ <td valign="top" >“suggested X”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=0xffffffff</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >property to suggest an X offset for a connector</td>
+ </tr>
+ <tr>
+ <td valign="top" >“suggested Y”</td>
+ <td valign="top" >RANGE</td>
+ <td valign="top" >Min=0, Max=0xffffffff</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >property to suggest an Y offset for a connector</td>
+ </tr>
+ <tr>
<td rowspan="3" valign="top" >Optional</td>
<td valign="top" >“scaling mode”</td>
<td valign="top" >ENUM</td>
@@ -3788,6 +3874,26 @@ int num_ioctls;</synopsis>
those have basic support through the gma500 drm driver.
</para>
<sect1>
+ <title>Core Driver Infrastructure</title>
+ <para>
+ This section covers core driver infrastructure used by both the display
+ and the GEM parts of the driver.
+ </para>
+ <sect2>
+ <title>Runtime Power Management</title>
+!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
+!Idrivers/gpu/drm/i915/intel_runtime_pm.c
+ </sect2>
+ <sect2>
+ <title>Interrupt Handling</title>
+!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
+ </sect2>
+ </sect1>
+ <sect1>
<title>Display Hardware Handling</title>
<para>
This section covers everything related to the display hardware including
@@ -3804,6 +3910,18 @@ int num_ioctls;</synopsis>
</para>
</sect2>
<sect2>
+ <title>Frontbuffer Tracking</title>
+!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
+!Idrivers/gpu/drm/i915/intel_frontbuffer.c
+!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
+!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
+ </sect2>
+ <sect2>
+ <title>Display FIFO Underrun Reporting</title>
+!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
+!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
+ </sect2>
+ <sect2>
<title>Plane Configuration</title>
<para>
This section covers plane configuration and composition with the
@@ -3823,6 +3941,16 @@ int num_ioctls;</synopsis>
</para>
</sect2>
<sect2>
+ <title>High Definition Audio</title>
+!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
+!Idrivers/gpu/drm/i915/intel_audio.c
+ </sect2>
+ <sect2>
+ <title>Panel Self Refresh PSR (PSR/SRD)</title>
+!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
+!Idrivers/gpu/drm/i915/intel_psr.c
+ </sect2>
+ <sect2>
<title>DPIO</title>
!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
<table id="dpiox2">
@@ -3931,6 +4059,28 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
</sect1>
+
+ <sect1>
+ <title> Tracing </title>
+ <para>
+ This sections covers all things related to the tracepoints implemented in
+ the i915 driver.
+ </para>
+ <sect2>
+ <title> i915_ppgtt_create and i915_ppgtt_release </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
+ </sect2>
+ <sect2>
+ <title> i915_context_create and i915_context_free </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
+ </sect2>
+ <sect2>
+ <title> switch_mm </title>
+!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
+ </sect2>
+ </sect1>
+
</chapter>
+!Cdrivers/gpu/drm/i915/i915_irq.c
</part>
</book>
diff --git a/Documentation/DocBook/media/dvb/dvbproperty.xml b/Documentation/DocBook/media/dvb/dvbproperty.xml
index 948ddaab592e..3018564ddfd9 100644
--- a/Documentation/DocBook/media/dvb/dvbproperty.xml
+++ b/Documentation/DocBook/media/dvb/dvbproperty.xml
@@ -120,8 +120,8 @@ struct dtv_properties {
</para>
<informaltable><tgroup cols="1"><tbody><row><entry
align="char">
-<para>This ioctl call sets one or more frontend properties. This call only
- requires read-only access to the device.</para>
+<para>This ioctl call sets one or more frontend properties. This call
+ requires read/write access to the device.</para>
</entry>
</row></tbody></tgroup></informaltable>
<para>SYNOPSIS
diff --git a/Documentation/DocBook/media/v4l/biblio.xml b/Documentation/DocBook/media/v4l/biblio.xml
index d2eb79e41a01..7ff01a23c2fe 100644
--- a/Documentation/DocBook/media/v4l/biblio.xml
+++ b/Documentation/DocBook/media/v4l/biblio.xml
@@ -178,6 +178,75 @@ Signal - NTSC for Studio Applications"</title>
1125-Line High-Definition Production"</title>
</biblioentry>
+ <biblioentry id="srgb">
+ <abbrev>sRGB</abbrev>
+ <authorgroup>
+ <corpauthor>International Electrotechnical Commission
+(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
+ </authorgroup>
+ <title>IEC 61966-2-1 ed1.0 "Multimedia systems and equipment - Colour measurement
+and management - Part 2-1: Colour management - Default RGB colour space - sRGB"</title>
+ </biblioentry>
+
+ <biblioentry id="sycc">
+ <abbrev>sYCC</abbrev>
+ <authorgroup>
+ <corpauthor>International Electrotechnical Commission
+(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
+ </authorgroup>
+ <title>IEC 61966-2-1-am1 ed1.0 "Amendment 1 - Multimedia systems and equipment - Colour measurement
+and management - Part 2-1: Colour management - Default RGB colour space - sRGB"</title>
+ </biblioentry>
+
+ <biblioentry id="xvycc">
+ <abbrev>xvYCC</abbrev>
+ <authorgroup>
+ <corpauthor>International Electrotechnical Commission
+(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
+ </authorgroup>
+ <title>IEC 61966-2-4 ed1.0 "Multimedia systems and equipment - Colour measurement
+and management - Part 2-4: Colour management - Extended-gamut YCC colour space for video
+applications - xvYCC"</title>
+ </biblioentry>
+
+ <biblioentry id="adobergb">
+ <abbrev>AdobeRGB</abbrev>
+ <authorgroup>
+ <corpauthor>Adobe Systems Incorporated (<ulink url="http://www.adobe.com">http://www.adobe.com</ulink>)</corpauthor>
+ </authorgroup>
+ <title>Adobe&copy; RGB (1998) Color Image Encoding Version 2005-05</title>
+ </biblioentry>
+
+ <biblioentry id="oprgb">
+ <abbrev>opRGB</abbrev>
+ <authorgroup>
+ <corpauthor>International Electrotechnical Commission
+(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
+ </authorgroup>
+ <title>IEC 61966-2-5 "Multimedia systems and equipment - Colour measurement
+and management - Part 2-5: Colour management - Optional RGB colour space - opRGB"</title>
+ </biblioentry>
+
+ <biblioentry id="itu2020">
+ <abbrev>ITU&nbsp;BT.2020</abbrev>
+ <authorgroup>
+ <corpauthor>International Telecommunication Union (<ulink
+url="http://www.itu.ch">http://www.itu.ch</ulink>)</corpauthor>
+ </authorgroup>
+ <title>ITU-R Recommendation BT.2020 (08/2012) "Parameter values for ultra-high
+definition television systems for production and international programme exchange"
+</title>
+ </biblioentry>
+
+ <biblioentry id="tech3213">
+ <abbrev>EBU&nbsp;Tech&nbsp;3213</abbrev>
+ <authorgroup>
+ <corpauthor>European Broadcast Union (<ulink
+url="http://www.ebu.ch">http://www.ebu.ch</ulink>)</corpauthor>
+ </authorgroup>
+ <title>E.B.U. Standard for Chromaticity Tolerances for Studio Monitors"</title>
+ </biblioentry>
+
<biblioentry id="iec62106">
<abbrev>IEC&nbsp;62106</abbrev>
<authorgroup>
@@ -266,4 +335,20 @@ in the frequency range from 87,5 to 108,0 MHz</title>
<subtitle>Version 1, Revision 2</subtitle>
</biblioentry>
+ <biblioentry id="poynton">
+ <abbrev>poynton</abbrev>
+ <authorgroup>
+ <corpauthor>Charles Poynton</corpauthor>
+ </authorgroup>
+ <title>Digital Video and HDTV, Algorithms and Interfaces</title>
+ </biblioentry>
+
+ <biblioentry id="colimg">
+ <abbrev>colimg</abbrev>
+ <authorgroup>
+ <corpauthor>Erik Reinhard et al.</corpauthor>
+ </authorgroup>
+ <title>Color Imaging: Fundamentals and Applications</title>
+ </biblioentry>
+
</bibliography>
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 0a2debfa68f6..350dfb3d71ea 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2579,6 +2579,18 @@ fields changed from _s32 to _u32.
</orderedlist>
</section>
+ <section>
+ <title>V4L2 in Linux 3.19</title>
+ <orderedlist>
+ <listitem>
+ <para>Rewrote Colorspace chapter, added new &v4l2-ycbcr-encoding;
+and &v4l2-quantization; fields to &v4l2-pix-format;, &v4l2-pix-format-mplane;
+and &v4l2-mbus-framefmt;.
+ </para>
+ </listitem>
+ </orderedlist>
+ </section>
+
<section id="other">
<title>Relation of V4L2 to other Linux multimedia APIs</title>
diff --git a/Documentation/DocBook/media/v4l/dev-subdev.xml b/Documentation/DocBook/media/v4l/dev-subdev.xml
index d15aaf83f56f..4f0ba58c9bd9 100644
--- a/Documentation/DocBook/media/v4l/dev-subdev.xml
+++ b/Documentation/DocBook/media/v4l/dev-subdev.xml
@@ -195,53 +195,59 @@
<title>Sample Pipeline Configuration</title>
<tgroup cols="3">
<colspec colname="what"/>
- <colspec colname="sensor-0" />
- <colspec colname="frontend-0" />
- <colspec colname="frontend-1" />
- <colspec colname="scaler-0" />
- <colspec colname="scaler-1" />
+ <colspec colname="sensor-0 format" />
+ <colspec colname="frontend-0 format" />
+ <colspec colname="frontend-1 format" />
+ <colspec colname="scaler-0 format" />
+ <colspec colname="scaler-0 compose" />
+ <colspec colname="scaler-1 format" />
<thead>
<row>
<entry></entry>
- <entry>Sensor/0</entry>
- <entry>Frontend/0</entry>
- <entry>Frontend/1</entry>
- <entry>Scaler/0</entry>
- <entry>Scaler/1</entry>
+ <entry>Sensor/0 format</entry>
+ <entry>Frontend/0 format</entry>
+ <entry>Frontend/1 format</entry>
+ <entry>Scaler/0 format</entry>
+ <entry>Scaler/0 compose selection rectangle</entry>
+ <entry>Scaler/1 format</entry>
</row>
</thead>
<tbody valign="top">
<row>
<entry>Initial state</entry>
- <entry>2048x1536</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ <entry>2048x1536/SGRBG8_1X8</entry>
+ <entry>(default)</entry>
+ <entry>(default)</entry>
+ <entry>(default)</entry>
+ <entry>(default)</entry>
+ <entry>(default)</entry>
</row>
<row>
- <entry>Configure frontend input</entry>
- <entry>2048x1536</entry>
- <entry><emphasis>2048x1536</emphasis></entry>
- <entry><emphasis>2046x1534</emphasis></entry>
- <entry>-</entry>
- <entry>-</entry>
+ <entry>Configure frontend sink format</entry>
+ <entry>2048x1536/SGRBG8_1X8</entry>
+ <entry><emphasis>2048x1536/SGRBG8_1X8</emphasis></entry>
+ <entry><emphasis>2046x1534/SGRBG8_1X8</emphasis></entry>
+ <entry>(default)</entry>
+ <entry>(default)</entry>
+ <entry>(default)</entry>
</row>
<row>
- <entry>Configure scaler input</entry>
- <entry>2048x1536</entry>
- <entry>2048x1536</entry>
- <entry>2046x1534</entry>
- <entry><emphasis>2046x1534</emphasis></entry>
- <entry><emphasis>2046x1534</emphasis></entry>
+ <entry>Configure scaler sink format</entry>
+ <entry>2048x1536/SGRBG8_1X8</entry>
+ <entry>2048x1536/SGRBG8_1X8</entry>
+ <entry>2046x1534/SGRBG8_1X8</entry>
+ <entry><emphasis>2046x1534/SGRBG8_1X8</emphasis></entry>
+ <entry><emphasis>0,0/2046x1534</emphasis></entry>
+ <entry><emphasis>2046x1534/SGRBG8_1X8</emphasis></entry>
</row>
<row>
- <entry>Configure scaler output</entry>
- <entry>2048x1536</entry>
- <entry>2048x1536</entry>
- <entry>2046x1534</entry>
- <entry>2046x1534</entry>
- <entry><emphasis>1280x960</emphasis></entry>
+ <entry>Configure scaler sink compose selection</entry>
+ <entry>2048x1536/SGRBG8_1X8</entry>
+ <entry>2048x1536/SGRBG8_1X8</entry>
+ <entry>2046x1534/SGRBG8_1X8</entry>
+ <entry>2046x1534/SGRBG8_1X8</entry>
+ <entry><emphasis>0,0/1280x960</emphasis></entry>
+ <entry><emphasis>1280x960/SGRBG8_1X8</emphasis></entry>
</row>
</tbody>
</tgroup>
@@ -249,19 +255,30 @@
<para>
<orderedlist>
- <listitem><para>Initial state. The sensor output is set to its native 3MP
- resolution. Resolutions on the host frontend and scaler input and output
- pads are undefined.</para></listitem>
- <listitem><para>The application configures the frontend input pad resolution to
- 2048x1536. The driver propagates the format to the frontend output pad.
- Note that the propagated output format can be different, as in this case,
- than the input format, as the hardware might need to crop pixels (for
- instance when converting a Bayer filter pattern to RGB or YUV).</para></listitem>
- <listitem><para>The application configures the scaler input pad resolution to
- 2046x1534 to match the frontend output resolution. The driver propagates
- the format to the scaler output pad.</para></listitem>
- <listitem><para>The application configures the scaler output pad resolution to
- 1280x960.</para></listitem>
+ <listitem><para>Initial state. The sensor source pad format is
+ set to its native 3MP size and V4L2_MBUS_FMT_SGRBG8_1X8
+ media bus code. Formats on the host frontend and scaler sink
+ and source pads have the default values, as well as the
+ compose rectangle on the scaler's sink pad.</para></listitem>
+
+ <listitem><para>The application configures the frontend sink
+ pad format's size to 2048x1536 and its media bus code to
+ V4L2_MBUS_FMT_SGRBG_1X8. The driver propagates the format to
+ the frontend source pad.</para></listitem>
+
+ <listitem><para>The application configures the scaler sink pad
+ format's size to 2046x1534 and the media bus code to
+ V4L2_MBUS_FMT_SGRBG_1X8 to match the frontend source size and
+ media bus code. The media bus code on the sink pad is set to
+ V4L2_MBUS_FMT_SGRBG_1X8. The driver propagates the size to the
+ compose selection rectangle on the scaler's sink pad, and the
+ format to the scaler source pad.</para></listitem>
+
+ <listitem><para>The application configures the size of the compose
+ selection rectangle of the scaler's sink pad 1280x960. The driver
+ propagates the size to the scaler's source pad
+ format.</para></listitem>
+
</orderedlist>
</para>
diff --git a/Documentation/DocBook/media/v4l/io.xml b/Documentation/DocBook/media/v4l/io.xml
index e5e8325aa3d7..1c17f802b471 100644
--- a/Documentation/DocBook/media/v4l/io.xml
+++ b/Documentation/DocBook/media/v4l/io.xml
@@ -1422,7 +1422,10 @@ one of the <constant>V4L2_FIELD_NONE</constant>,
<constant>V4L2_FIELD_BOTTOM</constant>, or
<constant>V4L2_FIELD_INTERLACED</constant> formats is acceptable.
Drivers choose depending on hardware capabilities or e.&nbsp;g. the
-requested image size, and return the actual field order. &v4l2-buffer;
+requested image size, and return the actual field order. Drivers must
+never return <constant>V4L2_FIELD_ANY</constant>. If multiple
+field orders are possible the driver must choose one of the possible
+field orders during &VIDIOC-S-FMT; or &VIDIOC-TRY-FMT;. &v4l2-buffer;
<structfield>field</structfield> can never be
<constant>V4L2_FIELD_ANY</constant>.</entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index df5b23d46552..d5eca4b8f74b 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -138,9 +138,25 @@ applicable values.</para></entry>
<row>
<entry>__u32</entry>
<entry><structfield>flags</structfield></entry>
- <entry>Flags set by the application or driver, see <xref
+ <entry>Flags set by the application or driver, see <xref
linkend="format-flags" />.</entry>
</row>
+ <row>
+ <entry>&v4l2-ycbcr-encoding;</entry>
+ <entry><structfield>ycbcr_enc</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-quantization;</entry>
+ <entry><structfield>quantization</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -232,9 +248,25 @@ codes can be used.</entry>
<entry>Flags set by the application or driver, see <xref
linkend="format-flags" />.</entry>
</row>
+ <row>
+ <entry>&v4l2-ycbcr-encoding;</entry>
+ <entry><structfield>ycbcr_enc</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-quantization;</entry>
+ <entry><structfield>quantization</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
<row>
<entry>__u8</entry>
- <entry><structfield>reserved[10]</structfield></entry>
+ <entry><structfield>reserved[8]</structfield></entry>
<entry>Reserved for future extensions. Should be zeroed by the
application.</entry>
</row>
@@ -296,345 +328,1005 @@ in the 2-planar version or with each component in its own buffer in the
<section id="colorspaces">
<title>Colorspaces</title>
- <para>[intro]</para>
+ <para>'Color' is a very complex concept and depends on physics, chemistry and
+biology. Just because you have three numbers that describe the 'red', 'green'
+and 'blue' components of the color of a pixel does not mean that you can accurately
+display that color. A colorspace defines what it actually <emphasis>means</emphasis>
+to have an RGB value of e.g. (255,&nbsp;0,&nbsp;0). That is, which color should be
+reproduced on the screen in a perfectly calibrated environment.</para>
- <!-- See proposal by Billy Biggs, video4linux-list@redhat.com
-on 11 Oct 2002, subject: "Re: [V4L] Re: v4l2 api", and
-http://vektor.theorem.ca/graphics/ycbcr/ and
-http://www.poynton.com/notes/colour_and_gamma/ColorFAQ.html -->
+ <para>In order to do that we first need to have a good definition of
+color, i.e. some way to uniquely and unambiguously define a color so that someone
+else can reproduce it. Human color vision is trichromatic since the human eye has
+color receptors that are sensitive to three different wavelengths of light. Hence
+the need to use three numbers to describe color. Be glad you are not a mantis shrimp
+as those are sensitive to 12 different wavelengths, so instead of RGB we would be
+using the ABCDEFGHIJKL colorspace...</para>
- <para>
- <variablelist>
- <varlistentry>
- <term>Gamma Correction</term>
- <listitem>
- <para>[to do]</para>
- <para>E'<subscript>R</subscript> = f(R)</para>
- <para>E'<subscript>G</subscript> = f(G)</para>
- <para>E'<subscript>B</subscript> = f(B)</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Construction of luminance and color-difference
-signals</term>
- <listitem>
- <para>[to do]</para>
- <para>E'<subscript>Y</subscript> =
-Coeff<subscript>R</subscript> E'<subscript>R</subscript>
-+ Coeff<subscript>G</subscript> E'<subscript>G</subscript>
-+ Coeff<subscript>B</subscript> E'<subscript>B</subscript></para>
- <para>(E'<subscript>R</subscript> - E'<subscript>Y</subscript>) = E'<subscript>R</subscript>
-- Coeff<subscript>R</subscript> E'<subscript>R</subscript>
-- Coeff<subscript>G</subscript> E'<subscript>G</subscript>
-- Coeff<subscript>B</subscript> E'<subscript>B</subscript></para>
- <para>(E'<subscript>B</subscript> - E'<subscript>Y</subscript>) = E'<subscript>B</subscript>
-- Coeff<subscript>R</subscript> E'<subscript>R</subscript>
-- Coeff<subscript>G</subscript> E'<subscript>G</subscript>
-- Coeff<subscript>B</subscript> E'<subscript>B</subscript></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Re-normalized color-difference signals</term>
- <listitem>
- <para>The color-difference signals are scaled back to unity
-range [-0.5;+0.5]:</para>
- <para>K<subscript>B</subscript> = 0.5 / (1 - Coeff<subscript>B</subscript>)</para>
- <para>K<subscript>R</subscript> = 0.5 / (1 - Coeff<subscript>R</subscript>)</para>
- <para>P<subscript>B</subscript> =
-K<subscript>B</subscript> (E'<subscript>B</subscript> - E'<subscript>Y</subscript>) =
- 0.5 (Coeff<subscript>R</subscript> / Coeff<subscript>B</subscript>) E'<subscript>R</subscript>
-+ 0.5 (Coeff<subscript>G</subscript> / Coeff<subscript>B</subscript>) E'<subscript>G</subscript>
-+ 0.5 E'<subscript>B</subscript></para>
- <para>P<subscript>R</subscript> =
-K<subscript>R</subscript> (E'<subscript>R</subscript> - E'<subscript>Y</subscript>) =
- 0.5 E'<subscript>R</subscript>
-+ 0.5 (Coeff<subscript>G</subscript> / Coeff<subscript>R</subscript>) E'<subscript>G</subscript>
-+ 0.5 (Coeff<subscript>B</subscript> / Coeff<subscript>R</subscript>) E'<subscript>B</subscript></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Quantization</term>
- <listitem>
- <para>[to do]</para>
- <para>Y' = (Lum. Levels - 1) &middot; E'<subscript>Y</subscript> + Lum. Offset</para>
- <para>C<subscript>B</subscript> = (Chrom. Levels - 1)
-&middot; P<subscript>B</subscript> + Chrom. Offset</para>
- <para>C<subscript>R</subscript> = (Chrom. Levels - 1)
-&middot; P<subscript>R</subscript> + Chrom. Offset</para>
- <para>Rounding to the nearest integer and clamping to the range
-[0;255] finally yields the digital color components Y'CbCr
-stored in YUV images.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </para>
-
- <example>
- <title>ITU-R Rec. BT.601 color conversion</title>
-
- <para>Forward Transformation</para>
-
- <programlisting>
-int ER, EG, EB; /* gamma corrected RGB input [0;255] */
-int Y1, Cb, Cr; /* output [0;255] */
-
-double r, g, b; /* temporaries */
-double y1, pb, pr;
-
-int
-clamp (double x)
-{
- int r = x; /* round to nearest */
-
- if (r &lt; 0) return 0;
- else if (r &gt; 255) return 255;
- else return r;
-}
-
-r = ER / 255.0;
-g = EG / 255.0;
-b = EB / 255.0;
-
-y1 = 0.299 * r + 0.587 * g + 0.114 * b;
-pb = -0.169 * r - 0.331 * g + 0.5 * b;
-pr = 0.5 * r - 0.419 * g - 0.081 * b;
-
-Y1 = clamp (219 * y1 + 16);
-Cb = clamp (224 * pb + 128);
-Cr = clamp (224 * pr + 128);
-
-/* or shorter */
-
-y1 = 0.299 * ER + 0.587 * EG + 0.114 * EB;
-
-Y1 = clamp ( (219 / 255.0) * y1 + 16);
-Cb = clamp (((224 / 255.0) / (2 - 2 * 0.114)) * (EB - y1) + 128);
-Cr = clamp (((224 / 255.0) / (2 - 2 * 0.299)) * (ER - y1) + 128);
- </programlisting>
-
- <para>Inverse Transformation</para>
-
- <programlisting>
-int Y1, Cb, Cr; /* gamma pre-corrected input [0;255] */
-int ER, EG, EB; /* output [0;255] */
-
-double r, g, b; /* temporaries */
-double y1, pb, pr;
-
-int
-clamp (double x)
-{
- int r = x; /* round to nearest */
-
- if (r &lt; 0) return 0;
- else if (r &gt; 255) return 255;
- else return r;
-}
-
-y1 = (Y1 - 16) / 219.0;
-pb = (Cb - 128) / 224.0;
-pr = (Cr - 128) / 224.0;
-
-r = 1.0 * y1 + 0 * pb + 1.402 * pr;
-g = 1.0 * y1 - 0.344 * pb - 0.714 * pr;
-b = 1.0 * y1 + 1.772 * pb + 0 * pr;
-
-ER = clamp (r * 255); /* [ok? one should prob. limit y1,pb,pr] */
-EG = clamp (g * 255);
-EB = clamp (b * 255);
- </programlisting>
- </example>
-
- <table pgwide="1" id="v4l2-colorspace" orient="land">
- <title>enum v4l2_colorspace</title>
- <tgroup cols="11" align="center">
- <colspec align="left" />
- <colspec align="center" />
- <colspec align="left" />
- <colspec colname="cr" />
- <colspec colname="cg" />
- <colspec colname="cb" />
- <colspec colname="wp" />
- <colspec colname="gc" />
- <colspec colname="lum" />
- <colspec colname="qy" />
- <colspec colname="qc" />
- <spanspec namest="cr" nameend="cb" spanname="chrom" />
- <spanspec namest="qy" nameend="qc" spanname="quant" />
- <spanspec namest="lum" nameend="qc" spanname="spam" />
+ <para>Color exists only in the eye and brain and is the result of how strongly
+color receptors are stimulated. This is based on the Spectral
+Power Distribution (SPD) which is a graph showing the intensity (radiant power)
+of the light at wavelengths covering the visible spectrum as it enters the eye.
+The science of colorimetry is about the relationship between the SPD and color as
+perceived by the human brain.</para>
+
+ <para>Since the human eye has only three color receptors it is perfectly
+possible that different SPDs will result in the same stimulation of those receptors
+and are perceived as the same color, even though the SPD of the light is
+different.</para>
+
+ <para>In the 1920s experiments were devised to determine the relationship
+between SPDs and the perceived color and that resulted in the CIE 1931 standard
+that defines spectral weighting functions that model the perception of color.
+Specifically that standard defines functions that can take an SPD and calculate
+the stimulus for each color receptor. After some further mathematical transforms
+these stimuli are known as the <emphasis>CIE XYZ tristimulus</emphasis> values
+and these X, Y and Z values describe a color as perceived by a human unambiguously.
+These X, Y and Z values are all in the range [0&hellip;1].</para>
+
+ <para>The Y value in the CIE XYZ colorspace corresponds to luminance. Often
+the CIE XYZ colorspace is transformed to the normalized CIE xyY colorspace:</para>
+
+ <para>x = X / (X + Y + Z)</para>
+ <para>y = Y / (X + Y + Z)</para>
+
+ <para>The x and y values are the chromaticity coordinates and can be used to
+define a color without the luminance component Y. It is very confusing to
+have such similar names for these colorspaces. Just be aware that if colors
+are specified with lower case 'x' and 'y', then the CIE xyY colorspace is
+used. Upper case 'X' and 'Y' refer to the CIE XYZ colorspace. Also, y has nothing
+to do with luminance. Together x and y specify a color, and Y the luminance.
+That is really all you need to remember from a practical point of view. At
+the end of this section you will find reading resources that go into much more
+detail if you are interested.
+</para>
+
+ <para>A monitor or TV will reproduce colors by emitting light at three
+different wavelengths, the combination of which will stimulate the color receptors
+in the eye and thus cause the perception of color. Historically these wavelengths
+were defined by the red, green and blue phosphors used in the displays. These
+<emphasis>color primaries</emphasis> are part of what defines a colorspace.</para>
+
+ <para>Different display devices will have different primaries and some
+primaries are more suitable for some display technologies than others. This has
+resulted in a variety of colorspaces that are used for different display
+technologies or uses. To define a colorspace you need to define the three
+color primaries (these are typically defined as x,&nbsp;y chromaticity coordinates
+from the CIE xyY colorspace) but also the white reference: that is the color obtained
+when all three primaries are at maximum power. This determines the relative power
+or energy of the primaries. This is usually chosen to be close to daylight which has
+been defined as the CIE D65 Illuminant.</para>
+
+ <para>To recapitulate: the CIE XYZ colorspace uniquely identifies colors.
+Other colorspaces are defined by three chromaticity coordinates defined in the
+CIE xyY colorspace. Based on those a 3x3 matrix can be constructed that
+transforms CIE XYZ colors to colors in the new colorspace.
+</para>
+
+ <para>Both the CIE XYZ and the RGB colorspace that are derived from the
+specific chromaticity primaries are linear colorspaces. But neither the eye,
+nor display technology is linear. Doubling the values of all components in
+the linear colorspace will not be perceived as twice the intensity of the color.
+So each colorspace also defines a transfer function that takes a linear color
+component value and transforms it to the non-linear component value, which is a
+closer match to the non-linear performance of both the eye and displays. Linear
+component values are denoted RGB, non-linear are denoted as R'G'B'. In general
+colors used in graphics are all R'G'B', except in openGL which uses linear RGB.
+Special care should be taken when dealing with openGL to provide linear RGB colors
+or to use the built-in openGL support to apply the inverse transfer function.</para>
+
+ <para>The final piece that defines a colorspace is a function that
+transforms non-linear R'G'B' to non-linear Y'CbCr. This function is determined
+by the so-called luma coefficients. There may be multiple possible Y'CbCr
+encodings allowed for the same colorspace. Many encodings of color
+prefer to use luma (Y') and chroma (CbCr) instead of R'G'B'. Since the human
+eye is more sensitive to differences in luminance than in color this encoding
+allows one to reduce the amount of color information compared to the luma
+data. Note that the luma (Y') is unrelated to the Y in the CIE XYZ colorspace.
+Also note that Y'CbCr is often called YCbCr or YUV even though these are
+strictly speaking wrong.</para>
+
+ <para>Sometimes people confuse Y'CbCr as being a colorspace. This is not
+correct, it is just an encoding of an R'G'B' color into luma and chroma
+values. The underlying colorspace that is associated with the R'G'B' color
+is also associated with the Y'CbCr color.</para>
+
+ <para>The final step is how the RGB, R'G'B' or Y'CbCr values are
+quantized. The CIE XYZ colorspace where X, Y and Z are in the range
+[0&hellip;1] describes all colors that humans can perceive, but the transform to
+another colorspace will produce colors that are outside the [0&hellip;1] range.
+Once clamped to the [0&hellip;1] range those colors can no longer be reproduced
+in that colorspace. This clamping is what reduces the extent or gamut of the
+colorspace. How the range of [0&hellip;1] is translated to integer values in the
+range of [0&hellip;255] (or higher, depending on the color depth) is called the
+quantization. This is <emphasis>not</emphasis> part of the colorspace
+definition. In practice RGB or R'G'B' values are full range, i.e. they
+use the full [0&hellip;255] range. Y'CbCr values on the other hand are limited
+range with Y' using [16&hellip;235] and Cb and Cr using [16&hellip;240].</para>
+
+ <para>Unfortunately, in some cases limited range RGB is also used
+where the components use the range [16&hellip;235]. And full range Y'CbCr also exists
+using the [0&hellip;255] range.</para>
+
+ <para>In order to correctly interpret a color you need to know the
+quantization range, whether it is R'G'B' or Y'CbCr, the used Y'CbCr encoding
+and the colorspace.
+From that information you can calculate the corresponding CIE XYZ color
+and map that again to whatever colorspace your display device uses.</para>
+
+ <para>The colorspace definition itself consists of the three
+chromaticity primaries, the white reference chromaticity, a transfer
+function and the luma coefficients needed to transform R'G'B' to Y'CbCr. While
+some colorspace standards correctly define all four, quite often the colorspace
+standard only defines some, and you have to rely on other standards for
+the missing pieces. The fact that colorspaces are often a mix of different
+standards also led to very confusing naming conventions where the name of
+a standard was used to name a colorspace when in fact that standard was
+part of various other colorspaces as well.</para>
+
+ <para>If you want to read more about colors and colorspaces, then the
+following resources are useful: <xref linkend="poynton" /> is a good practical
+book for video engineers, <xref linkend="colimg" /> has a much broader scope and
+describes many more aspects of color (physics, chemistry, biology, etc.).
+The <ulink url="http://www.brucelindbloom.com">http://www.brucelindbloom.com</ulink>
+website is an excellent resource, especially with respect to the mathematics behind
+colorspace conversions. The wikipedia <ulink url="http://en.wikipedia.org/wiki/CIE_1931_color_space#CIE_xy_chromaticity_diagram_and_the_CIE_xyY_color_space">CIE 1931 colorspace</ulink> article
+is also very useful.</para>
+ </section>
+
+ <section>
+ <title>Defining Colorspaces in V4L2</title>
+ <para>In V4L2 colorspaces are defined by three values. The first is the colorspace
+identifier (&v4l2-colorspace;) which defines the chromaticities, the transfer
+function, the default Y'CbCr encoding and the default quantization method. The second
+is the Y'CbCr encoding identifier (&v4l2-ycbcr-encoding;) to specify non-standard
+Y'CbCr encodings and the third is the quantization identifier (&v4l2-quantization;)
+to specify non-standard quantization methods. Most of the time only the colorspace
+field of &v4l2-pix-format; or &v4l2-pix-format-mplane; needs to be filled in. Note
+that the default R'G'B' quantization is always full range for all colorspaces,
+so this won't be mentioned explicitly for each colorspace description.</para>
+
+ <table pgwide="1" frame="none" id="v4l2-colorspace">
+ <title>V4L2 Colorspaces</title>
+ <tgroup cols="2" align="left">
+ &cs-def;
<thead>
<row>
- <entry morerows="1">Identifier</entry>
- <entry morerows="1">Value</entry>
- <entry morerows="1">Description</entry>
- <entry spanname="chrom">Chromaticities<footnote>
- <para>The coordinates of the color primaries are
-given in the CIE system (1931)</para>
- </footnote></entry>
- <entry morerows="1">White Point</entry>
- <entry morerows="1">Gamma Correction</entry>
- <entry morerows="1">Luminance E'<subscript>Y</subscript></entry>
- <entry spanname="quant">Quantization</entry>
- </row>
- <row>
- <entry>Red</entry>
- <entry>Green</entry>
- <entry>Blue</entry>
- <entry>Y'</entry>
- <entry>Cb, Cr</entry>
+ <entry>Identifier</entry>
+ <entry>Details</entry>
</row>
</thead>
<tbody valign="top">
<row>
<entry><constant>V4L2_COLORSPACE_SMPTE170M</constant></entry>
- <entry>1</entry>
- <entry>NTSC/PAL according to <xref linkend="smpte170m" />,
-<xref linkend="itu601" /></entry>
- <entry>x&nbsp;=&nbsp;0.630, y&nbsp;=&nbsp;0.340</entry>
- <entry>x&nbsp;=&nbsp;0.310, y&nbsp;=&nbsp;0.595</entry>
- <entry>x&nbsp;=&nbsp;0.155, y&nbsp;=&nbsp;0.070</entry>
- <entry>x&nbsp;=&nbsp;0.3127, y&nbsp;=&nbsp;0.3290,
- Illuminant D<subscript>65</subscript></entry>
- <entry>E' = 4.5&nbsp;I&nbsp;for&nbsp;I&nbsp;&le;0.018,
-1.099&nbsp;I<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&lt;&nbsp;I</entry>
- <entry>0.299&nbsp;E'<subscript>R</subscript>
-+&nbsp;0.587&nbsp;E'<subscript>G</subscript>
-+&nbsp;0.114&nbsp;E'<subscript>B</subscript></entry>
- <entry>219&nbsp;E'<subscript>Y</subscript>&nbsp;+&nbsp;16</entry>
- <entry>224&nbsp;P<subscript>B,R</subscript>&nbsp;+&nbsp;128</entry>
+ <entry>See <xref linkend="col-smpte-170m" />.</entry>
</row>
<row>
- <entry><constant>V4L2_COLORSPACE_SMPTE240M</constant></entry>
- <entry>2</entry>
- <entry>1125-Line (US) HDTV, see <xref
-linkend="smpte240m" /></entry>
- <entry>x&nbsp;=&nbsp;0.630, y&nbsp;=&nbsp;0.340</entry>
- <entry>x&nbsp;=&nbsp;0.310, y&nbsp;=&nbsp;0.595</entry>
- <entry>x&nbsp;=&nbsp;0.155, y&nbsp;=&nbsp;0.070</entry>
- <entry>x&nbsp;=&nbsp;0.3127, y&nbsp;=&nbsp;0.3290,
- Illuminant D<subscript>65</subscript></entry>
- <entry>E' = 4&nbsp;I&nbsp;for&nbsp;I&nbsp;&le;0.0228,
-1.1115&nbsp;I<superscript>0.45</superscript>&nbsp;-&nbsp;0.1115&nbsp;for&nbsp;0.0228&nbsp;&lt;&nbsp;I</entry>
- <entry>0.212&nbsp;E'<subscript>R</subscript>
-+&nbsp;0.701&nbsp;E'<subscript>G</subscript>
-+&nbsp;0.087&nbsp;E'<subscript>B</subscript></entry>
- <entry>219&nbsp;E'<subscript>Y</subscript>&nbsp;+&nbsp;16</entry>
- <entry>224&nbsp;P<subscript>B,R</subscript>&nbsp;+&nbsp;128</entry>
+ <entry><constant>V4L2_COLORSPACE_REC709</constant></entry>
+ <entry>See <xref linkend="col-rec709" />.</entry>
</row>
<row>
- <entry><constant>V4L2_COLORSPACE_REC709</constant></entry>
- <entry>3</entry>
- <entry>HDTV and modern devices, see <xref
-linkend="itu709" /></entry>
- <entry>x&nbsp;=&nbsp;0.640, y&nbsp;=&nbsp;0.330</entry>
- <entry>x&nbsp;=&nbsp;0.300, y&nbsp;=&nbsp;0.600</entry>
- <entry>x&nbsp;=&nbsp;0.150, y&nbsp;=&nbsp;0.060</entry>
- <entry>x&nbsp;=&nbsp;0.3127, y&nbsp;=&nbsp;0.3290,
- Illuminant D<subscript>65</subscript></entry>
- <entry>E' = 4.5&nbsp;I&nbsp;for&nbsp;I&nbsp;&le;0.018,
-1.099&nbsp;I<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&lt;&nbsp;I</entry>
- <entry>0.2125&nbsp;E'<subscript>R</subscript>
-+&nbsp;0.7154&nbsp;E'<subscript>G</subscript>
-+&nbsp;0.0721&nbsp;E'<subscript>B</subscript></entry>
- <entry>219&nbsp;E'<subscript>Y</subscript>&nbsp;+&nbsp;16</entry>
- <entry>224&nbsp;P<subscript>B,R</subscript>&nbsp;+&nbsp;128</entry>
+ <entry><constant>V4L2_COLORSPACE_SRGB</constant></entry>
+ <entry>See <xref linkend="col-srgb" />.</entry>
</row>
<row>
- <entry><constant>V4L2_COLORSPACE_BT878</constant></entry>
- <entry>4</entry>
- <entry>Broken Bt878 extents<footnote>
- <para>The ubiquitous Bt878 video capture chip
-quantizes E'<subscript>Y</subscript> to 238 levels, yielding a range
-of Y' = 16 &hellip; 253, unlike Rec. 601 Y' = 16 &hellip;
-235. This is not a typo in the Bt878 documentation, it has been
-implemented in silicon. The chroma extents are unclear.</para>
- </footnote>, <xref linkend="itu601" /></entry>
- <entry>?</entry>
- <entry>?</entry>
- <entry>?</entry>
- <entry>?</entry>
- <entry>?</entry>
- <entry>0.299&nbsp;E'<subscript>R</subscript>
-+&nbsp;0.587&nbsp;E'<subscript>G</subscript>
-+&nbsp;0.114&nbsp;E'<subscript>B</subscript></entry>
- <entry><emphasis>237</emphasis>&nbsp;E'<subscript>Y</subscript>&nbsp;+&nbsp;16</entry>
- <entry>224&nbsp;P<subscript>B,R</subscript>&nbsp;+&nbsp;128 (probably)</entry>
+ <entry><constant>V4L2_COLORSPACE_ADOBERGB</constant></entry>
+ <entry>See <xref linkend="col-adobergb" />.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORSPACE_BT2020</constant></entry>
+ <entry>See <xref linkend="col-bt2020" />.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORSPACE_SMPTE240M</constant></entry>
+ <entry>See <xref linkend="col-smpte-240m" />.</entry>
</row>
<row>
<entry><constant>V4L2_COLORSPACE_470_SYSTEM_M</constant></entry>
- <entry>5</entry>
- <entry>M/NTSC<footnote>
- <para>No identifier exists for M/PAL which uses
-the chromaticities of M/NTSC, the remaining parameters are equal to B and
-G/PAL.</para>
- </footnote> according to <xref linkend="itu470" />, <xref
- linkend="itu601" /></entry>
- <entry>x&nbsp;=&nbsp;0.67, y&nbsp;=&nbsp;0.33</entry>
- <entry>x&nbsp;=&nbsp;0.21, y&nbsp;=&nbsp;0.71</entry>
- <entry>x&nbsp;=&nbsp;0.14, y&nbsp;=&nbsp;0.08</entry>
- <entry>x&nbsp;=&nbsp;0.310, y&nbsp;=&nbsp;0.316, Illuminant C</entry>
- <entry>?</entry>
- <entry>0.299&nbsp;E'<subscript>R</subscript>
-+&nbsp;0.587&nbsp;E'<subscript>G</subscript>
-+&nbsp;0.114&nbsp;E'<subscript>B</subscript></entry>
- <entry>219&nbsp;E'<subscript>Y</subscript>&nbsp;+&nbsp;16</entry>
- <entry>224&nbsp;P<subscript>B,R</subscript>&nbsp;+&nbsp;128</entry>
+ <entry>See <xref linkend="col-sysm" />.</entry>
</row>
<row>
<entry><constant>V4L2_COLORSPACE_470_SYSTEM_BG</constant></entry>
- <entry>6</entry>
- <entry>625-line PAL and SECAM systems according to <xref
-linkend="itu470" />, <xref linkend="itu601" /></entry>
- <entry>x&nbsp;=&nbsp;0.64, y&nbsp;=&nbsp;0.33</entry>
- <entry>x&nbsp;=&nbsp;0.29, y&nbsp;=&nbsp;0.60</entry>
- <entry>x&nbsp;=&nbsp;0.15, y&nbsp;=&nbsp;0.06</entry>
- <entry>x&nbsp;=&nbsp;0.313, y&nbsp;=&nbsp;0.329,
-Illuminant D<subscript>65</subscript></entry>
- <entry>?</entry>
- <entry>0.299&nbsp;E'<subscript>R</subscript>
-+&nbsp;0.587&nbsp;E'<subscript>G</subscript>
-+&nbsp;0.114&nbsp;E'<subscript>B</subscript></entry>
- <entry>219&nbsp;E'<subscript>Y</subscript>&nbsp;+&nbsp;16</entry>
- <entry>224&nbsp;P<subscript>B,R</subscript>&nbsp;+&nbsp;128</entry>
+ <entry>See <xref linkend="col-sysbg" />.</entry>
</row>
<row>
<entry><constant>V4L2_COLORSPACE_JPEG</constant></entry>
- <entry>7</entry>
- <entry>JPEG Y'CbCr, see <xref linkend="jfif" />, <xref linkend="itu601" /></entry>
- <entry>?</entry>
- <entry>?</entry>
- <entry>?</entry>
- <entry>?</entry>
- <entry>?</entry>
- <entry>0.299&nbsp;E'<subscript>R</subscript>
-+&nbsp;0.587&nbsp;E'<subscript>G</subscript>
-+&nbsp;0.114&nbsp;E'<subscript>B</subscript></entry>
- <entry>256&nbsp;E'<subscript>Y</subscript>&nbsp;+&nbsp;16<footnote>
- <para>Note JFIF quantizes
-Y'P<subscript>B</subscript>P<subscript>R</subscript> in range [0;+1] and
-[-0.5;+0.5] to <emphasis>257</emphasis> levels, however Y'CbCr signals
-are still clamped to [0;255].</para>
- </footnote></entry>
- <entry>256&nbsp;P<subscript>B,R</subscript>&nbsp;+&nbsp;128</entry>
+ <entry>See <xref linkend="col-jpeg" />.</entry>
</row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="v4l2-ycbcr-encoding">
+ <title>V4L2 Y'CbCr Encodings</title>
+ <tgroup cols="2" align="left">
+ &cs-def;
+ <thead>
<row>
- <entry><constant>V4L2_COLORSPACE_SRGB</constant></entry>
- <entry>8</entry>
- <entry>[?]</entry>
- <entry>x&nbsp;=&nbsp;0.640, y&nbsp;=&nbsp;0.330</entry>
- <entry>x&nbsp;=&nbsp;0.300, y&nbsp;=&nbsp;0.600</entry>
- <entry>x&nbsp;=&nbsp;0.150, y&nbsp;=&nbsp;0.060</entry>
- <entry>x&nbsp;=&nbsp;0.3127, y&nbsp;=&nbsp;0.3290,
- Illuminant D<subscript>65</subscript></entry>
- <entry>E' = 4.5&nbsp;I&nbsp;for&nbsp;I&nbsp;&le;0.018,
-1.099&nbsp;I<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&lt;&nbsp;I</entry>
- <entry spanname="spam">n/a</entry>
+ <entry>Identifier</entry>
+ <entry>Details</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_YCBCR_ENC_DEFAULT</constant></entry>
+ <entry>Use the default Y'CbCr encoding as defined by the colorspace.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_YCBCR_ENC_601</constant></entry>
+ <entry>Use the BT.601 Y'CbCr encoding.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_YCBCR_ENC_709</constant></entry>
+ <entry>Use the Rec. 709 Y'CbCr encoding.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_YCBCR_ENC_XV601</constant></entry>
+ <entry>Use the extended gamut xvYCC BT.601 encoding.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_YCBCR_ENC_XV709</constant></entry>
+ <entry>Use the extended gamut xvYCC Rec. 709 encoding.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_YCBCR_ENC_SYCC</constant></entry>
+ <entry>Use the extended gamut sYCC encoding.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_YCBCR_ENC_BT2020</constant></entry>
+ <entry>Use the default non-constant luminance BT.2020 Y'CbCr encoding.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_YCBCR_ENC_BT2020_CONST_LUM</constant></entry>
+ <entry>Use the constant luminance BT.2020 Yc'CbcCrc encoding.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="v4l2-quantization">
+ <title>V4L2 Quantization Methods</title>
+ <tgroup cols="2" align="left">
+ &cs-def;
+ <thead>
+ <row>
+ <entry>Identifier</entry>
+ <entry>Details</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_QUANTIZATION_DEFAULT</constant></entry>
+ <entry>Use the default quantization encoding as defined by the colorspace.
+This is always full range for R'G'B' and usually limited range for Y'CbCr.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_QUANTIZATION_FULL_RANGE</constant></entry>
+ <entry>Use the full range quantization encoding. I.e. the range [0&hellip;1]
+is mapped to [0&hellip;255] (with possible clipping to [1&hellip;254] to avoid the
+0x00 and 0xff values). Cb and Cr are mapped from [-0.5&hellip;0.5] to [0&hellip;255]
+(with possible clipping to [1&hellip;254] to avoid the 0x00 and 0xff values).</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_QUANTIZATION_LIM_RANGE</constant></entry>
+ <entry>Use the limited range quantization encoding. I.e. the range [0&hellip;1]
+is mapped to [16&hellip;235]. Cb and Cr are mapped from [-0.5&hellip;0.5] to [16&hellip;240].
+</entry>
</row>
</tbody>
</tgroup>
</table>
</section>
+ <section>
+ <title>Detailed Colorspace Descriptions</title>
+ <section>
+ <title id="col-smpte-170m">Colorspace SMPTE 170M (<constant>V4L2_COLORSPACE_SMPTE170M</constant>)</title>
+ <para>The <xref linkend="smpte170m" /> standard defines the colorspace used by NTSC and PAL and by SDTV
+in general. The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_601</constant>.
+The default Y'CbCr quantization is limited range. The chromaticities of the primary colors and
+the white reference are:</para>
+ <table frame="none">
+ <title>SMPTE 170M Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.630</entry>
+ <entry>0.340</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.310</entry>
+ <entry>0.595</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.155</entry>
+ <entry>0.070</entry>
+ </row>
+ <row>
+ <entry>White Reference (D65)</entry>
+ <entry>0.3127</entry>
+ <entry>0.3290</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <para>The red, green and blue chromaticities are also often referred to
+as the SMPTE C set, so this colorspace is sometimes called SMPTE C as well.</para>
+ <variablelist>
+ <varlistentry>
+ <term>The transfer function defined for SMPTE 170M is the same as the
+one defined in Rec. 709. Normally L is in the range [0&hellip;1], but for the extended
+gamut xvYCC encoding values outside that range are allowed.</term>
+ <listitem>
+ <para>L' = -1.099(-L)<superscript>0.45</superscript>&nbsp;+&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&le;&nbsp;-0.018</para>
+ <para>L' = 4.5L&nbsp;for&nbsp;-0.018&nbsp;&lt;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
+ <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&ge;&nbsp;0.018</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = -((L'&nbsp;-&nbsp;0.099)&nbsp;/&nbsp;-1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&le;&nbsp;-0.081</para>
+ <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;-0.081&nbsp;&lt;&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
+ <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The luminance (Y') and color difference (Cb and Cr) are obtained with
+the following <constant>V4L2_YCBCR_ENC_601</constant> encoding:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B'</para>
+ <para>Cb&nbsp;=&nbsp;-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B'</para>
+ <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
+clamped to the range [-0.5&hellip;0.5]. This conversion to Y'CbCr is identical to the one
+defined in the <xref linkend="itu601" /> standard and this colorspace is sometimes called BT.601 as well, even
+though BT.601 does not mention any color primaries.</para>
+ <para>The default quantization is limited range, but full range is possible although
+rarely seen.</para>
+ <para>The <constant>V4L2_YCBCR_ENC_601</constant> encoding as described above is the
+default for this colorspace, but it can be overridden with <constant>V4L2_YCBCR_ENC_709</constant>,
+in which case the Rec. 709 Y'CbCr encoding is used.</para>
+ <variablelist>
+ <varlistentry>
+ <term>The xvYCC 601 encoding (<constant>V4L2_YCBCR_ENC_XV601</constant>, <xref linkend="xvycc" />) is similar
+to the BT.601 encoding, but it allows for R', G' and B' values that are outside the range
+[0&hellip;1]. The resulting Y', Cb and Cr values are scaled and offset:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;(219&nbsp;/&nbsp;255)&nbsp;*&nbsp;(0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B')&nbsp;+&nbsp;(16&nbsp;/&nbsp;255)</para>
+ <para>Cb&nbsp;=&nbsp;(224&nbsp;/&nbsp;255)&nbsp;*&nbsp;(-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B')</para>
+ <para>Cr&nbsp;=&nbsp;(224&nbsp;/&nbsp;255)&nbsp;*&nbsp;(0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B')</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are clamped
+to the range [-0.5&hellip;0.5]. The non-standard xvYCC 709 encoding can also be used by selecting
+<constant>V4L2_YCBCR_ENC_XV709</constant>. The xvYCC encodings always use full range
+quantization.</para>
+ </section>
+
+ <section>
+ <title id="col-rec709">Colorspace Rec. 709 (<constant>V4L2_COLORSPACE_REC709</constant>)</title>
+ <para>The <xref linkend="itu709" /> standard defines the colorspace used by HDTV in general. The default
+Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_709</constant>. The default Y'CbCr quantization is
+limited range. The chromaticities of the primary colors and the white reference are:</para>
+ <table frame="none">
+ <title>Rec. 709 Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.640</entry>
+ <entry>0.330</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.300</entry>
+ <entry>0.600</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.150</entry>
+ <entry>0.060</entry>
+ </row>
+ <row>
+ <entry>White Reference (D65)</entry>
+ <entry>0.3127</entry>
+ <entry>0.3290</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <para>The full name of this standard is Rec. ITU-R BT.709-5.</para>
+ <variablelist>
+ <varlistentry>
+ <term>Transfer function. Normally L is in the range [0&hellip;1], but for the extended
+gamut xvYCC encoding values outside that range are allowed.</term>
+ <listitem>
+ <para>L' = -1.099(-L)<superscript>0.45</superscript>&nbsp;+&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&le;&nbsp;-0.018</para>
+ <para>L' = 4.5L&nbsp;for&nbsp;-0.018&nbsp;&lt;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
+ <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&ge;&nbsp;0.018</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = -((L'&nbsp;-&nbsp;0.099)&nbsp;/&nbsp;-1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&le;&nbsp;-0.081</para>
+ <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;-0.081&nbsp;&lt;&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
+ <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the following
+<constant>V4L2_YCBCR_ENC_709</constant> encoding:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;0.2126R'&nbsp;+&nbsp;0.7152G'&nbsp;+&nbsp;0.0722B'</para>
+ <para>Cb&nbsp;=&nbsp;-0.1146R'&nbsp;-&nbsp;0.3854G'&nbsp;+&nbsp;0.5B'</para>
+ <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4542G'&nbsp;-&nbsp;0.0458B'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
+clamped to the range [-0.5&hellip;0.5].</para>
+ <para>The default quantization is limited range, but full range is possible although
+rarely seen.</para>
+ <para>The <constant>V4L2_YCBCR_ENC_709</constant> encoding described above is the default
+for this colorspace, but it can be overridden with <constant>V4L2_YCBCR_ENC_601</constant>, in which
+case the BT.601 Y'CbCr encoding is used.</para>
+ <variablelist>
+ <varlistentry>
+ <term>The xvYCC 709 encoding (<constant>V4L2_YCBCR_ENC_XV709</constant>, <xref linkend="xvycc" />)
+is similar to the Rec. 709 encoding, but it allows for R', G' and B' values that are outside the range
+[0&hellip;1]. The resulting Y', Cb and Cr values are scaled and offset:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;(219&nbsp;/&nbsp;255)&nbsp;*&nbsp;(0.2126R'&nbsp;+&nbsp;0.7152G'&nbsp;+&nbsp;0.0722B')&nbsp;+&nbsp;(16&nbsp;/&nbsp;255)</para>
+ <para>Cb&nbsp;=&nbsp;(224&nbsp;/&nbsp;255)&nbsp;*&nbsp;(-0.1146R'&nbsp;-&nbsp;0.3854G'&nbsp;+&nbsp;0.5B')</para>
+ <para>Cr&nbsp;=&nbsp;(224&nbsp;/&nbsp;255)&nbsp;*&nbsp;(0.5R'&nbsp;-&nbsp;0.4542G'&nbsp;-&nbsp;0.0458B')</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are clamped
+to the range [-0.5&hellip;0.5]. The non-standard xvYCC 601 encoding can also be used by
+selecting <constant>V4L2_YCBCR_ENC_XV601</constant>. The xvYCC encodings always use full
+range quantization.</para>
+ </section>
+
+ <section>
+ <title id="col-srgb">Colorspace sRGB (<constant>V4L2_COLORSPACE_SRGB</constant>)</title>
+ <para>The <xref linkend="srgb" /> standard defines the colorspace used by most webcams and computer graphics. The
+default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_SYCC</constant>. The default Y'CbCr quantization
+is full range. The chromaticities of the primary colors and the white reference are:</para>
+ <table frame="none">
+ <title>sRGB Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.640</entry>
+ <entry>0.330</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.300</entry>
+ <entry>0.600</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.150</entry>
+ <entry>0.060</entry>
+ </row>
+ <row>
+ <entry>White Reference (D65)</entry>
+ <entry>0.3127</entry>
+ <entry>0.3290</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <para>These chromaticities are identical to the Rec. 709 colorspace.</para>
+ <variablelist>
+ <varlistentry>
+ <term>Transfer function. Note that negative values for L are only used by the Y'CbCr conversion.</term>
+ <listitem>
+ <para>L' = -1.055(-L)<superscript>1/2.4</superscript>&nbsp;+&nbsp;0.055&nbsp;for&nbsp;L&nbsp;&lt;&nbsp;-0.0031308</para>
+ <para>L' = 12.92L&nbsp;for&nbsp;-0.0031308&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;0.0031308</para>
+ <para>L' = 1.055L<superscript>1/2.4</superscript>&nbsp;-&nbsp;0.055&nbsp;for&nbsp;0.0031308&nbsp;&lt;&nbsp;L&nbsp;&le;&nbsp;1</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = -((-L'&nbsp;+&nbsp;0.055)&nbsp;/&nbsp;1.055)<superscript>2.4</superscript>&nbsp;for&nbsp;L'&nbsp;&lt;&nbsp;-0.04045</para>
+ <para>L = L'&nbsp;/&nbsp;12.92&nbsp;for&nbsp;-0.04045&nbsp;&le;&nbsp;L'&nbsp;&le;&nbsp;0.04045</para>
+ <para>L = ((L'&nbsp;+&nbsp;0.055)&nbsp;/&nbsp;1.055)<superscript>2.4</superscript>&nbsp;for&nbsp;L'&nbsp;&gt;&nbsp;0.04045</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the following
+<constant>V4L2_YCBCR_ENC_SYCC</constant> encoding as defined by <xref linkend="sycc" />:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;0.2990R'&nbsp;+&nbsp;0.5870G'&nbsp;+&nbsp;0.1140B'</para>
+ <para>Cb&nbsp;=&nbsp;-0.1687R'&nbsp;-&nbsp;0.3313G'&nbsp;+&nbsp;0.5B'</para>
+ <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4187G'&nbsp;-&nbsp;0.0813B'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are clamped
+to the range [-0.5&hellip;0.5]. The <constant>V4L2_YCBCR_ENC_SYCC</constant> quantization is always
+full range. Although this Y'CbCr encoding looks very similar to the <constant>V4L2_YCBCR_ENC_XV601</constant>
+encoding, it is not. The <constant>V4L2_YCBCR_ENC_XV601</constant> scales and offsets the Y'CbCr
+values before quantization, but this encoding does not do that.</para>
+ </section>
+
+ <section>
+ <title id="col-adobergb">Colorspace Adobe RGB (<constant>V4L2_COLORSPACE_ADOBERGB</constant>)</title>
+ <para>The <xref linkend="adobergb" /> standard defines the colorspace used by computer graphics
+that use the AdobeRGB colorspace. This is also known as the <xref linkend="oprgb" /> standard.
+The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_601</constant>. The default Y'CbCr
+quantization is limited range. The chromaticities of the primary colors and the white reference
+are:</para>
+ <table frame="none">
+ <title>Adobe RGB Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.6400</entry>
+ <entry>0.3300</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.2100</entry>
+ <entry>0.7100</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.1500</entry>
+ <entry>0.0600</entry>
+ </row>
+ <row>
+ <entry>White Reference (D65)</entry>
+ <entry>0.3127</entry>
+ <entry>0.3290</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <variablelist>
+ <varlistentry>
+ <term>Transfer function:</term>
+ <listitem>
+ <para>L' = L<superscript>1/2.19921875</superscript></para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = L'<superscript>2.19921875</superscript></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
+following <constant>V4L2_YCBCR_ENC_601</constant> encoding:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B'</para>
+ <para>Cb&nbsp;=&nbsp;-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B'</para>
+ <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
+clamped to the range [-0.5&hellip;0.5]. This transform is identical to one defined in
+SMPTE 170M/BT.601. The Y'CbCr quantization is limited range.</para>
+ </section>
+
+ <section>
+ <title id="col-bt2020">Colorspace BT.2020 (<constant>V4L2_COLORSPACE_BT2020</constant>)</title>
+ <para>The <xref linkend="itu2020" /> standard defines the colorspace used by Ultra-high definition
+television (UHDTV). The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_BT2020</constant>.
+The default Y'CbCr quantization is limited range. The chromaticities of the primary colors and
+the white reference are:</para>
+ <table frame="none">
+ <title>BT.2020 Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.708</entry>
+ <entry>0.292</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.170</entry>
+ <entry>0.797</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.131</entry>
+ <entry>0.046</entry>
+ </row>
+ <row>
+ <entry>White Reference (D65)</entry>
+ <entry>0.3127</entry>
+ <entry>0.3290</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <variablelist>
+ <varlistentry>
+ <term>Transfer function (same as Rec. 709):</term>
+ <listitem>
+ <para>L' = 4.5L&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
+ <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;1</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
+ <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
+following <constant>V4L2_YCBCR_ENC_BT2020</constant> encoding:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;0.2627R'&nbsp;+&nbsp;0.6789G'&nbsp;+&nbsp;0.0593B'</para>
+ <para>Cb&nbsp;=&nbsp;-0.1396R'&nbsp;-&nbsp;0.3604G'&nbsp;+&nbsp;0.5B'</para>
+ <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4598G'&nbsp;-&nbsp;0.0402B'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
+clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.</para>
+ <para>There is also an alternate constant luminance R'G'B' to Yc'CbcCrc
+(<constant>V4L2_YCBCR_ENC_BT2020_CONST_LUM</constant>) encoding:</para>
+ <variablelist>
+ <varlistentry>
+ <term>Luma:</term>
+ <listitem>
+ <para>Yc'&nbsp;=&nbsp;(0.2627R&nbsp;+&nbsp;0.6789G&nbsp;+&nbsp;0.0593B)'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>B'&nbsp;-&nbsp;Yc'&nbsp;&le;&nbsp;0:</term>
+ <listitem>
+ <para>Cbc&nbsp;=&nbsp;(B'&nbsp;-&nbsp;Y')&nbsp;/&nbsp;1.9404</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>B'&nbsp;-&nbsp;Yc'&nbsp;&gt;&nbsp;0:</term>
+ <listitem>
+ <para>Cbc&nbsp;=&nbsp;(B'&nbsp;-&nbsp;Y')&nbsp;/&nbsp;1.5816</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>R'&nbsp;-&nbsp;Yc'&nbsp;&le;&nbsp;0:</term>
+ <listitem>
+ <para>Crc&nbsp;=&nbsp;(R'&nbsp;-&nbsp;Y')&nbsp;/&nbsp;1.7184</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>R'&nbsp;-&nbsp;Yc'&nbsp;&gt;&nbsp;0:</term>
+ <listitem>
+ <para>Crc&nbsp;=&nbsp;(R'&nbsp;-&nbsp;Y')&nbsp;/&nbsp;0.9936</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Yc' is clamped to the range [0&hellip;1] and Cbc and Crc are
+clamped to the range [-0.5&hellip;0.5]. The Yc'CbcCrc quantization is limited range.</para>
+ </section>
+
+ <section>
+ <title id="col-smpte-240m">Colorspace SMPTE 240M (<constant>V4L2_COLORSPACE_SMPTE240M</constant>)</title>
+ <para>The <xref linkend="smpte240m" /> standard was an interim standard used during the early days of HDTV (1988-1998).
+It has been superseded by Rec. 709. The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_SMPTE240M</constant>.
+The default Y'CbCr quantization is limited range. The chromaticities of the primary colors and the
+white reference are:</para>
+ <table frame="none">
+ <title>SMPTE 240M Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.630</entry>
+ <entry>0.340</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.310</entry>
+ <entry>0.595</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.155</entry>
+ <entry>0.070</entry>
+ </row>
+ <row>
+ <entry>White Reference (D65)</entry>
+ <entry>0.3127</entry>
+ <entry>0.3290</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <para>These chromaticities are identical to the SMPTE 170M colorspace.</para>
+ <variablelist>
+ <varlistentry>
+ <term>Transfer function:</term>
+ <listitem>
+ <para>L' = 4L&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L&nbsp;&lt;&nbsp;0.0228</para>
+ <para>L' = 1.1115L<superscript>0.45</superscript>&nbsp;-&nbsp;0.1115&nbsp;for&nbsp;0.0228&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;1</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = L'&nbsp;/&nbsp;4&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L'&nbsp;&lt;&nbsp;0.0913</para>
+ <para>L = ((L'&nbsp;+&nbsp;0.1115)&nbsp;/&nbsp;1.1115)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.0913</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
+following <constant>V4L2_YCBCR_ENC_SMPTE240M</constant> encoding:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;0.2122R'&nbsp;+&nbsp;0.7013G'&nbsp;+&nbsp;0.0865B'</para>
+ <para>Cb&nbsp;=&nbsp;-0.1161R'&nbsp;-&nbsp;0.3839G'&nbsp;+&nbsp;0.5B'</para>
+ <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4451G'&nbsp;-&nbsp;0.0549B'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Yc' is clamped to the range [0&hellip;1] and Cbc and Crc are
+clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.</para>
+ </section>
+
+ <section>
+ <title id="col-sysm">Colorspace NTSC 1953 (<constant>V4L2_COLORSPACE_470_SYSTEM_M</constant>)</title>
+ <para>This standard defines the colorspace used by NTSC in 1953. In practice this
+colorspace is obsolete and SMPTE 170M should be used instead. The default Y'CbCr encoding
+is <constant>V4L2_YCBCR_ENC_601</constant>. The default Y'CbCr quantization is limited range.
+The chromaticities of the primary colors and the white reference are:</para>
+ <table frame="none">
+ <title>NTSC 1953 Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.67</entry>
+ <entry>0.33</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.21</entry>
+ <entry>0.71</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.14</entry>
+ <entry>0.08</entry>
+ </row>
+ <row>
+ <entry>White Reference (C)</entry>
+ <entry>0.310</entry>
+ <entry>0.316</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <para>Note that this colorspace uses Illuminant C instead of D65 as the
+white reference. To correctly convert an image in this colorspace to another
+that uses D65 you need to apply a chromatic adaptation algorithm such as the
+Bradford method.</para>
+ <variablelist>
+ <varlistentry>
+ <term>The transfer function was never properly defined for NTSC 1953. The
+Rec. 709 transfer function is recommended in the literature:</term>
+ <listitem>
+ <para>L' = 4.5L&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
+ <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;1</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
+ <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
+following <constant>V4L2_YCBCR_ENC_601</constant> encoding:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B'</para>
+ <para>Cb&nbsp;=&nbsp;-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B'</para>
+ <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
+clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.
+This transform is identical to one defined in SMPTE 170M/BT.601.</para>
+ </section>
+
+ <section>
+ <title id="col-sysbg">Colorspace EBU Tech. 3213 (<constant>V4L2_COLORSPACE_470_SYSTEM_BG</constant>)</title>
+ <para>The <xref linkend="tech3213" /> standard defines the colorspace used by PAL/SECAM in 1975. In practice this
+colorspace is obsolete and SMPTE 170M should be used instead. The default Y'CbCr encoding
+is <constant>V4L2_YCBCR_ENC_601</constant>. The default Y'CbCr quantization is limited range.
+The chromaticities of the primary colors and the white reference are:</para>
+ <table frame="none">
+ <title>EBU Tech. 3213 Chromaticities</title>
+ <tgroup cols="3" align="left">
+ &cs-str;
+ <thead>
+ <row>
+ <entry>Color</entry>
+ <entry>x</entry>
+ <entry>y</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry>Red</entry>
+ <entry>0.64</entry>
+ <entry>0.33</entry>
+ </row>
+ <row>
+ <entry>Green</entry>
+ <entry>0.29</entry>
+ <entry>0.60</entry>
+ </row>
+ <row>
+ <entry>Blue</entry>
+ <entry>0.15</entry>
+ <entry>0.06</entry>
+ </row>
+ <row>
+ <entry>White Reference (D65)</entry>
+ <entry>0.3127</entry>
+ <entry>0.3290</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <variablelist>
+ <varlistentry>
+ <term>The transfer function was never properly defined for this colorspace.
+The Rec. 709 transfer function is recommended in the literature:</term>
+ <listitem>
+ <para>L' = 4.5L&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
+ <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;1</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Inverse Transfer function:</term>
+ <listitem>
+ <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
+ <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <variablelist>
+ <varlistentry>
+ <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
+following <constant>V4L2_YCBCR_ENC_601</constant> encoding:</term>
+ <listitem>
+ <para>Y'&nbsp;=&nbsp;0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B'</para>
+ <para>Cb&nbsp;=&nbsp;-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B'</para>
+ <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B'</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
+clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.
+This transform is identical to one defined in SMPTE 170M/BT.601.</para>
+ </section>
+
+ <section>
+ <title id="col-jpeg">Colorspace JPEG (<constant>V4L2_COLORSPACE_JPEG</constant>)</title>
+ <para>This colorspace defines the colorspace used by most (Motion-)JPEG formats. The chromaticities
+of the primary colors and the white reference are identical to sRGB. The Y'CbCr encoding is
+<constant>V4L2_YCBCR_ENC_601</constant> with full range quantization where
+Y' is scaled to [0&hellip;255] and Cb/Cr are scaled to [-128&hellip;128] and
+then clipped to [-128&hellip;127].</para>
+ <para>Note that the JPEG standard does not actually store colorspace information.
+So if something other than sRGB is used, then the driver will have to set that information
+explicitly. Effectively <constant>V4L2_COLORSPACE_JPEG</constant> can be considered to be
+an abbreviation for <constant>V4L2_COLORSPACE_SRGB</constant>, <constant>V4L2_YCBCR_ENC_601</constant>
+and <constant>V4L2_QUANTIZATION_FULL_RANGE</constant>.</para>
+ </section>
+
+ </section>
+
<section id="pixfmt-indexed">
<title>Indexed Format</title>
diff --git a/Documentation/DocBook/media/v4l/selections-common.xml b/Documentation/DocBook/media/v4l/selections-common.xml
index 7502f784b8cc..d6d56fb6f9c0 100644
--- a/Documentation/DocBook/media/v4l/selections-common.xml
+++ b/Documentation/DocBook/media/v4l/selections-common.xml
@@ -63,6 +63,22 @@
<entry>Yes</entry>
</row>
<row>
+ <entry><constant>V4L2_SEL_TGT_NATIVE_SIZE</constant></entry>
+ <entry>0x0003</entry>
+ <entry>The native size of the device, e.g. a sensor's
+ pixel array. <structfield>left</structfield> and
+ <structfield>top</structfield> fields are zero for this
+ target. Setting the native size will generally only make
+ sense for memory to memory devices where the software can
+ create a canvas of a given size in which for example a
+ video frame can be composed. In that case
+ V4L2_SEL_TGT_NATIVE_SIZE can be used to configure the size
+ of that canvas.
+ </entry>
+ <entry>Yes</entry>
+ <entry>Yes</entry>
+ </row>
+ <row>
<entry><constant>V4L2_SEL_TGT_COMPOSE</constant></entry>
<entry>0x0100</entry>
<entry>Compose rectangle. Used to configure scaling
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
index b2d5a0363cba..c5ea868e3909 100644
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/media/v4l/subdev-formats.xml
@@ -34,8 +34,24 @@
<xref linkend="colorspaces" /> for details.</entry>
</row>
<row>
+ <entry>&v4l2-ycbcr-encoding;</entry>
+ <entry><structfield>ycbcr_enc</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-quantization;</entry>
+ <entry><structfield>quantization</structfield></entry>
+ <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+ </row>
+ <row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[7]</entry>
+ <entry><structfield>reserved</structfield>[6]</entry>
<entry>Reserved for future extensions. Applications and drivers must
set the array to zero.</entry>
</row>
@@ -86,7 +102,7 @@
green and 5-bit blue values padded on the high bit, transferred as 2 8-bit
samples per pixel with the most significant bits (padding, red and half of
the green value) transferred first will be named
- <constant>V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE</constant>.
+ <constant>MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE</constant>.
</para>
<para>The following tables list existing packed RGB formats.</para>
@@ -176,8 +192,8 @@
</row>
</thead>
<tbody valign="top">
- <row id="V4L2-MBUS-FMT-RGB444-2X8-PADHI-BE">
- <entry>V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE</entry>
+ <row id="MEDIA-BUS-FMT-RGB444-2X8-PADHI-BE">
+ <entry>MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE</entry>
<entry>0x1001</entry>
<entry></entry>
&dash-ent-24;
@@ -204,8 +220,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB444-2X8-PADHI-LE">
- <entry>V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE</entry>
+ <row id="MEDIA-BUS-FMT-RGB444-2X8-PADHI-LE">
+ <entry>MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE</entry>
<entry>0x1002</entry>
<entry></entry>
&dash-ent-24;
@@ -232,8 +248,8 @@
<entry>r<subscript>1</subscript></entry>
<entry>r<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB555-2X8-PADHI-BE">
- <entry>V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE</entry>
+ <row id="MEDIA-BUS-FMT-RGB555-2X8-PADHI-BE">
+ <entry>MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE</entry>
<entry>0x1003</entry>
<entry></entry>
&dash-ent-24;
@@ -260,8 +276,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB555-2X8-PADHI-LE">
- <entry>V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE</entry>
+ <row id="MEDIA-BUS-FMT-RGB555-2X8-PADHI-LE">
+ <entry>MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE</entry>
<entry>0x1004</entry>
<entry></entry>
&dash-ent-24;
@@ -288,8 +304,8 @@
<entry>g<subscript>4</subscript></entry>
<entry>g<subscript>3</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-BGR565-2X8-BE">
- <entry>V4L2_MBUS_FMT_BGR565_2X8_BE</entry>
+ <row id="MEDIA-BUS-FMT-BGR565-2X8-BE">
+ <entry>MEDIA_BUS_FMT_BGR565_2X8_BE</entry>
<entry>0x1005</entry>
<entry></entry>
&dash-ent-24;
@@ -316,8 +332,8 @@
<entry>r<subscript>1</subscript></entry>
<entry>r<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-BGR565-2X8-LE">
- <entry>V4L2_MBUS_FMT_BGR565_2X8_LE</entry>
+ <row id="MEDIA-BUS-FMT-BGR565-2X8-LE">
+ <entry>MEDIA_BUS_FMT_BGR565_2X8_LE</entry>
<entry>0x1006</entry>
<entry></entry>
&dash-ent-24;
@@ -344,8 +360,8 @@
<entry>g<subscript>4</subscript></entry>
<entry>g<subscript>3</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB565-2X8-BE">
- <entry>V4L2_MBUS_FMT_RGB565_2X8_BE</entry>
+ <row id="MEDIA-BUS-FMT-RGB565-2X8-BE">
+ <entry>MEDIA_BUS_FMT_RGB565_2X8_BE</entry>
<entry>0x1007</entry>
<entry></entry>
&dash-ent-24;
@@ -372,8 +388,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB565-2X8-LE">
- <entry>V4L2_MBUS_FMT_RGB565_2X8_LE</entry>
+ <row id="MEDIA-BUS-FMT-RGB565-2X8-LE">
+ <entry>MEDIA_BUS_FMT_RGB565_2X8_LE</entry>
<entry>0x1008</entry>
<entry></entry>
&dash-ent-24;
@@ -400,8 +416,8 @@
<entry>g<subscript>4</subscript></entry>
<entry>g<subscript>3</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB666-1X18">
- <entry>V4L2_MBUS_FMT_RGB666_1X18</entry>
+ <row id="MEDIA-BUS-FMT-RGB666-1X18">
+ <entry>MEDIA_BUS_FMT_RGB666_1X18</entry>
<entry>0x1009</entry>
<entry></entry>
&dash-ent-14;
@@ -424,8 +440,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB888-1X24">
- <entry>V4L2_MBUS_FMT_RGB888_1X24</entry>
+ <row id="MEDIA-BUS-FMT-RGB888-1X24">
+ <entry>MEDIA_BUS_FMT_RGB888_1X24</entry>
<entry>0x100a</entry>
<entry></entry>
&dash-ent-8;
@@ -454,8 +470,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB888-2X12-BE">
- <entry>V4L2_MBUS_FMT_RGB888_2X12_BE</entry>
+ <row id="MEDIA-BUS-FMT-RGB888-2X12-BE">
+ <entry>MEDIA_BUS_FMT_RGB888_2X12_BE</entry>
<entry>0x100b</entry>
<entry></entry>
&dash-ent-20;
@@ -490,8 +506,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-RGB888-2X12-LE">
- <entry>V4L2_MBUS_FMT_RGB888_2X12_LE</entry>
+ <row id="MEDIA-BUS-FMT-RGB888-2X12-LE">
+ <entry>MEDIA_BUS_FMT_RGB888_2X12_LE</entry>
<entry>0x100c</entry>
<entry></entry>
&dash-ent-20;
@@ -526,8 +542,8 @@
<entry>g<subscript>5</subscript></entry>
<entry>g<subscript>4</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-ARGB888-1X32">
- <entry>V4L2_MBUS_FMT_ARGB888_1X32</entry>
+ <row id="MEDIA-BUS-FMT-ARGB888-1X32">
+ <entry>MEDIA_BUS_FMT_ARGB888_1X32</entry>
<entry>0x100d</entry>
<entry></entry>
<entry>a<subscript>7</subscript></entry>
@@ -600,7 +616,7 @@
<para>For instance, a format with uncompressed 10-bit Bayer components
arranged in a red, green, green, blue pattern transferred as 2 8-bit
samples per pixel with the least significant bits transferred first will
- be named <constant>V4L2_MBUS_FMT_SRGGB10_2X8_PADHI_LE</constant>.
+ be named <constant>MEDIA_BUS_FMT_SRGGB10_2X8_PADHI_LE</constant>.
</para>
<figure id="bayer-patterns">
@@ -663,8 +679,8 @@
</row>
</thead>
<tbody valign="top">
- <row id="V4L2-MBUS-FMT-SBGGR8-1X8">
- <entry>V4L2_MBUS_FMT_SBGGR8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR8-1X8">
+ <entry>MEDIA_BUS_FMT_SBGGR8_1X8</entry>
<entry>0x3001</entry>
<entry></entry>
<entry>-</entry>
@@ -680,8 +696,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGBRG8-1X8">
- <entry>V4L2_MBUS_FMT_SGBRG8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SGBRG8-1X8">
+ <entry>MEDIA_BUS_FMT_SGBRG8_1X8</entry>
<entry>0x3013</entry>
<entry></entry>
<entry>-</entry>
@@ -697,8 +713,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGRBG8-1X8">
- <entry>V4L2_MBUS_FMT_SGRBG8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SGRBG8-1X8">
+ <entry>MEDIA_BUS_FMT_SGRBG8_1X8</entry>
<entry>0x3002</entry>
<entry></entry>
<entry>-</entry>
@@ -714,8 +730,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SRGGB8-1X8">
- <entry>V4L2_MBUS_FMT_SRGGB8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SRGGB8-1X8">
+ <entry>MEDIA_BUS_FMT_SRGGB8_1X8</entry>
<entry>0x3014</entry>
<entry></entry>
<entry>-</entry>
@@ -731,8 +747,8 @@
<entry>r<subscript>1</subscript></entry>
<entry>r<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SBGGR10-ALAW8-1X8">
- <entry>V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR10-ALAW8-1X8">
+ <entry>MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8</entry>
<entry>0x3015</entry>
<entry></entry>
<entry>-</entry>
@@ -748,8 +764,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGBRG10-ALAW8-1X8">
- <entry>V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SGBRG10-ALAW8-1X8">
+ <entry>MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8</entry>
<entry>0x3016</entry>
<entry></entry>
<entry>-</entry>
@@ -765,8 +781,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGRBG10-ALAW8-1X8">
- <entry>V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SGRBG10-ALAW8-1X8">
+ <entry>MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8</entry>
<entry>0x3017</entry>
<entry></entry>
<entry>-</entry>
@@ -782,8 +798,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SRGGB10-ALAW8-1X8">
- <entry>V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SRGGB10-ALAW8-1X8">
+ <entry>MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8</entry>
<entry>0x3018</entry>
<entry></entry>
<entry>-</entry>
@@ -799,8 +815,8 @@
<entry>r<subscript>1</subscript></entry>
<entry>r<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SBGGR10-DPCM8-1X8">
- <entry>V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR10-DPCM8-1X8">
+ <entry>MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8</entry>
<entry>0x300b</entry>
<entry></entry>
<entry>-</entry>
@@ -816,8 +832,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGBRG10-DPCM8-1X8">
- <entry>V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SGBRG10-DPCM8-1X8">
+ <entry>MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8</entry>
<entry>0x300c</entry>
<entry></entry>
<entry>-</entry>
@@ -833,8 +849,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGRBG10-DPCM8-1X8">
- <entry>V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SGRBG10-DPCM8-1X8">
+ <entry>MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8</entry>
<entry>0x3009</entry>
<entry></entry>
<entry>-</entry>
@@ -850,8 +866,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SRGGB10-DPCM8-1X8">
- <entry>V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-SRGGB10-DPCM8-1X8">
+ <entry>MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8</entry>
<entry>0x300d</entry>
<entry></entry>
<entry>-</entry>
@@ -867,8 +883,8 @@
<entry>r<subscript>1</subscript></entry>
<entry>r<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SBGGR10-2X8-PADHI-BE">
- <entry>V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR10-2X8-PADHI-BE">
+ <entry>MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE</entry>
<entry>0x3003</entry>
<entry></entry>
<entry>-</entry>
@@ -901,8 +917,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SBGGR10-2X8-PADHI-LE">
- <entry>V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR10-2X8-PADHI-LE">
+ <entry>MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE</entry>
<entry>0x3004</entry>
<entry></entry>
<entry>-</entry>
@@ -935,8 +951,8 @@
<entry>b<subscript>9</subscript></entry>
<entry>b<subscript>8</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SBGGR10-2X8-PADLO-BE">
- <entry>V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR10-2X8-PADLO-BE">
+ <entry>MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE</entry>
<entry>0x3005</entry>
<entry></entry>
<entry>-</entry>
@@ -969,8 +985,8 @@
<entry>0</entry>
<entry>0</entry>
</row>
- <row id="V4L2-MBUS-FMT-SBGGR10-2X8-PADLO-LE">
- <entry>V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR10-2X8-PADLO-LE">
+ <entry>MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE</entry>
<entry>0x3006</entry>
<entry></entry>
<entry>-</entry>
@@ -1003,8 +1019,8 @@
<entry>b<subscript>3</subscript></entry>
<entry>b<subscript>2</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SBGGR10-1X10">
- <entry>V4L2_MBUS_FMT_SBGGR10_1X10</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR10-1X10">
+ <entry>MEDIA_BUS_FMT_SBGGR10_1X10</entry>
<entry>0x3007</entry>
<entry></entry>
<entry>-</entry>
@@ -1020,8 +1036,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGBRG10-1X10">
- <entry>V4L2_MBUS_FMT_SGBRG10_1X10</entry>
+ <row id="MEDIA-BUS-FMT-SGBRG10-1X10">
+ <entry>MEDIA_BUS_FMT_SGBRG10_1X10</entry>
<entry>0x300e</entry>
<entry></entry>
<entry>-</entry>
@@ -1037,8 +1053,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGRBG10-1X10">
- <entry>V4L2_MBUS_FMT_SGRBG10_1X10</entry>
+ <row id="MEDIA-BUS-FMT-SGRBG10-1X10">
+ <entry>MEDIA_BUS_FMT_SGRBG10_1X10</entry>
<entry>0x300a</entry>
<entry></entry>
<entry>-</entry>
@@ -1054,8 +1070,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SRGGB10-1X10">
- <entry>V4L2_MBUS_FMT_SRGGB10_1X10</entry>
+ <row id="MEDIA-BUS-FMT-SRGGB10-1X10">
+ <entry>MEDIA_BUS_FMT_SRGGB10_1X10</entry>
<entry>0x300f</entry>
<entry></entry>
<entry>-</entry>
@@ -1071,8 +1087,8 @@
<entry>r<subscript>1</subscript></entry>
<entry>r<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SBGGR12-1X12">
- <entry>V4L2_MBUS_FMT_SBGGR12_1X12</entry>
+ <row id="MEDIA-BUS-FMT-SBGGR12-1X12">
+ <entry>MEDIA_BUS_FMT_SBGGR12_1X12</entry>
<entry>0x3008</entry>
<entry></entry>
<entry>b<subscript>11</subscript></entry>
@@ -1088,8 +1104,8 @@
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGBRG12-1X12">
- <entry>V4L2_MBUS_FMT_SGBRG12_1X12</entry>
+ <row id="MEDIA-BUS-FMT-SGBRG12-1X12">
+ <entry>MEDIA_BUS_FMT_SGBRG12_1X12</entry>
<entry>0x3010</entry>
<entry></entry>
<entry>g<subscript>11</subscript></entry>
@@ -1105,8 +1121,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SGRBG12-1X12">
- <entry>V4L2_MBUS_FMT_SGRBG12_1X12</entry>
+ <row id="MEDIA-BUS-FMT-SGRBG12-1X12">
+ <entry>MEDIA_BUS_FMT_SGRBG12_1X12</entry>
<entry>0x3011</entry>
<entry></entry>
<entry>g<subscript>11</subscript></entry>
@@ -1122,8 +1138,8 @@
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-SRGGB12-1X12">
- <entry>V4L2_MBUS_FMT_SRGGB12_1X12</entry>
+ <row id="MEDIA-BUS-FMT-SRGGB12-1X12">
+ <entry>MEDIA_BUS_FMT_SRGGB12_1X12</entry>
<entry>0x3012</entry>
<entry></entry>
<entry>r<subscript>11</subscript></entry>
@@ -1175,7 +1191,7 @@
<para>For instance, a format where pixels are encoded as 8-bit YUV values
downsampled to 4:2:2 and transferred as 2 8-bit bus samples per pixel in the
- U, Y, V, Y order will be named <constant>V4L2_MBUS_FMT_UYVY8_2X8</constant>.
+ U, Y, V, Y order will be named <constant>MEDIA_BUS_FMT_UYVY8_2X8</constant>.
</para>
<para><xref linkend="v4l2-mbus-pixelcode-yuv8"/> lists existing packed YUV
@@ -1280,8 +1296,8 @@
</row>
</thead>
<tbody valign="top">
- <row id="V4L2-MBUS-FMT-Y8-1X8">
- <entry>V4L2_MBUS_FMT_Y8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-Y8-1X8">
+ <entry>MEDIA_BUS_FMT_Y8_1X8</entry>
<entry>0x2001</entry>
<entry></entry>
&dash-ent-24;
@@ -1294,8 +1310,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UV8-1X8">
- <entry>V4L2_MBUS_FMT_UV8_1X8</entry>
+ <row id="MEDIA-BUS-FMT-UV8-1X8">
+ <entry>MEDIA_BUS_FMT_UV8_1X8</entry>
<entry>0x2015</entry>
<entry></entry>
&dash-ent-24;
@@ -1322,8 +1338,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UYVY8-1_5X8">
- <entry>V4L2_MBUS_FMT_UYVY8_1_5X8</entry>
+ <row id="MEDIA-BUS-FMT-UYVY8-1_5X8">
+ <entry>MEDIA_BUS_FMT_UYVY8_1_5X8</entry>
<entry>0x2002</entry>
<entry></entry>
&dash-ent-24;
@@ -1406,8 +1422,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-VYUY8-1_5X8">
- <entry>V4L2_MBUS_FMT_VYUY8_1_5X8</entry>
+ <row id="MEDIA-BUS-FMT-VYUY8-1_5X8">
+ <entry>MEDIA_BUS_FMT_VYUY8_1_5X8</entry>
<entry>0x2003</entry>
<entry></entry>
&dash-ent-24;
@@ -1490,8 +1506,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YUYV8-1_5X8">
- <entry>V4L2_MBUS_FMT_YUYV8_1_5X8</entry>
+ <row id="MEDIA-BUS-FMT-YUYV8-1_5X8">
+ <entry>MEDIA_BUS_FMT_YUYV8_1_5X8</entry>
<entry>0x2004</entry>
<entry></entry>
&dash-ent-24;
@@ -1574,8 +1590,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YVYU8-1_5X8">
- <entry>V4L2_MBUS_FMT_YVYU8_1_5X8</entry>
+ <row id="MEDIA-BUS-FMT-YVYU8-1_5X8">
+ <entry>MEDIA_BUS_FMT_YVYU8_1_5X8</entry>
<entry>0x2005</entry>
<entry></entry>
&dash-ent-24;
@@ -1658,8 +1674,8 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UYVY8-2X8">
- <entry>V4L2_MBUS_FMT_UYVY8_2X8</entry>
+ <row id="MEDIA-BUS-FMT-UYVY8-2X8">
+ <entry>MEDIA_BUS_FMT_UYVY8_2X8</entry>
<entry>0x2006</entry>
<entry></entry>
&dash-ent-24;
@@ -1714,8 +1730,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-VYUY8-2X8">
- <entry>V4L2_MBUS_FMT_VYUY8_2X8</entry>
+ <row id="MEDIA-BUS-FMT-VYUY8-2X8">
+ <entry>MEDIA_BUS_FMT_VYUY8_2X8</entry>
<entry>0x2007</entry>
<entry></entry>
&dash-ent-24;
@@ -1770,8 +1786,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YUYV8-2X8">
- <entry>V4L2_MBUS_FMT_YUYV8_2X8</entry>
+ <row id="MEDIA-BUS-FMT-YUYV8-2X8">
+ <entry>MEDIA_BUS_FMT_YUYV8_2X8</entry>
<entry>0x2008</entry>
<entry></entry>
&dash-ent-24;
@@ -1826,8 +1842,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YVYU8-2X8">
- <entry>V4L2_MBUS_FMT_YVYU8_2X8</entry>
+ <row id="MEDIA-BUS-FMT-YVYU8-2X8">
+ <entry>MEDIA_BUS_FMT_YVYU8_2X8</entry>
<entry>0x2009</entry>
<entry></entry>
&dash-ent-24;
@@ -1882,8 +1898,8 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-Y10-1X10">
- <entry>V4L2_MBUS_FMT_Y10_1X10</entry>
+ <row id="MEDIA-BUS-FMT-Y10-1X10">
+ <entry>MEDIA_BUS_FMT_Y10_1X10</entry>
<entry>0x200a</entry>
<entry></entry>
&dash-ent-22;
@@ -1898,8 +1914,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UYVY10-2X10">
- <entry>V4L2_MBUS_FMT_UYVY10_2X10</entry>
+ <row id="MEDIA-BUS-FMT-UYVY10-2X10">
+ <entry>MEDIA_BUS_FMT_UYVY10_2X10</entry>
<entry>0x2018</entry>
<entry></entry>
&dash-ent-22;
@@ -1962,8 +1978,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-VYUY10-2X10">
- <entry>V4L2_MBUS_FMT_VYUY10_2X10</entry>
+ <row id="MEDIA-BUS-FMT-VYUY10-2X10">
+ <entry>MEDIA_BUS_FMT_VYUY10_2X10</entry>
<entry>0x2019</entry>
<entry></entry>
&dash-ent-22;
@@ -2026,8 +2042,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YUYV10-2X10">
- <entry>V4L2_MBUS_FMT_YUYV10_2X10</entry>
+ <row id="MEDIA-BUS-FMT-YUYV10-2X10">
+ <entry>MEDIA_BUS_FMT_YUYV10_2X10</entry>
<entry>0x200b</entry>
<entry></entry>
&dash-ent-22;
@@ -2090,8 +2106,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YVYU10-2X10">
- <entry>V4L2_MBUS_FMT_YVYU10_2X10</entry>
+ <row id="MEDIA-BUS-FMT-YVYU10-2X10">
+ <entry>MEDIA_BUS_FMT_YVYU10_2X10</entry>
<entry>0x200c</entry>
<entry></entry>
&dash-ent-22;
@@ -2154,8 +2170,8 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-Y12-1X12">
- <entry>V4L2_MBUS_FMT_Y12_1X12</entry>
+ <row id="MEDIA-BUS-FMT-Y12-1X12">
+ <entry>MEDIA_BUS_FMT_Y12_1X12</entry>
<entry>0x2013</entry>
<entry></entry>
&dash-ent-20;
@@ -2172,8 +2188,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UYVY8-1X16">
- <entry>V4L2_MBUS_FMT_UYVY8_1X16</entry>
+ <row id="MEDIA-BUS-FMT-UYVY8-1X16">
+ <entry>MEDIA_BUS_FMT_UYVY8_1X16</entry>
<entry>0x200f</entry>
<entry></entry>
&dash-ent-16;
@@ -2216,8 +2232,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-VYUY8-1X16">
- <entry>V4L2_MBUS_FMT_VYUY8_1X16</entry>
+ <row id="MEDIA-BUS-FMT-VYUY8-1X16">
+ <entry>MEDIA_BUS_FMT_VYUY8_1X16</entry>
<entry>0x2010</entry>
<entry></entry>
&dash-ent-16;
@@ -2260,8 +2276,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YUYV8-1X16">
- <entry>V4L2_MBUS_FMT_YUYV8_1X16</entry>
+ <row id="MEDIA-BUS-FMT-YUYV8-1X16">
+ <entry>MEDIA_BUS_FMT_YUYV8_1X16</entry>
<entry>0x2011</entry>
<entry></entry>
&dash-ent-16;
@@ -2304,8 +2320,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YVYU8-1X16">
- <entry>V4L2_MBUS_FMT_YVYU8_1X16</entry>
+ <row id="MEDIA-BUS-FMT-YVYU8-1X16">
+ <entry>MEDIA_BUS_FMT_YVYU8_1X16</entry>
<entry>0x2012</entry>
<entry></entry>
&dash-ent-16;
@@ -2348,8 +2364,8 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YDYUYDYV8-1X16">
- <entry>V4L2_MBUS_FMT_YDYUYDYV8_1X16</entry>
+ <row id="MEDIA-BUS-FMT-YDYUYDYV8-1X16">
+ <entry>MEDIA_BUS_FMT_YDYUYDYV8_1X16</entry>
<entry>0x2014</entry>
<entry></entry>
&dash-ent-16;
@@ -2436,8 +2452,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UYVY10-1X20">
- <entry>V4L2_MBUS_FMT_UYVY10_1X20</entry>
+ <row id="MEDIA-BUS-FMT-UYVY10-1X20">
+ <entry>MEDIA_BUS_FMT_UYVY10_1X20</entry>
<entry>0x201a</entry>
<entry></entry>
&dash-ent-12;
@@ -2488,8 +2504,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-VYUY10-1X20">
- <entry>V4L2_MBUS_FMT_VYUY10_1X20</entry>
+ <row id="MEDIA-BUS-FMT-VYUY10-1X20">
+ <entry>MEDIA_BUS_FMT_VYUY10_1X20</entry>
<entry>0x201b</entry>
<entry></entry>
&dash-ent-12;
@@ -2540,8 +2556,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YUYV10-1X20">
- <entry>V4L2_MBUS_FMT_YUYV10_1X20</entry>
+ <row id="MEDIA-BUS-FMT-YUYV10-1X20">
+ <entry>MEDIA_BUS_FMT_YUYV10_1X20</entry>
<entry>0x200d</entry>
<entry></entry>
&dash-ent-12;
@@ -2592,8 +2608,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YVYU10-1X20">
- <entry>V4L2_MBUS_FMT_YVYU10_1X20</entry>
+ <row id="MEDIA-BUS-FMT-YVYU10-1X20">
+ <entry>MEDIA_BUS_FMT_YVYU10_1X20</entry>
<entry>0x200e</entry>
<entry></entry>
&dash-ent-12;
@@ -2644,8 +2660,8 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YUV10-1X30">
- <entry>V4L2_MBUS_FMT_YUV10_1X30</entry>
+ <row id="MEDIA-BUS-FMT-YUV10-1X30">
+ <entry>MEDIA_BUS_FMT_YUV10_1X30</entry>
<entry>0x2016</entry>
<entry></entry>
<entry>-</entry>
@@ -2681,8 +2697,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-AYUV8-1X32">
- <entry>V4L2_MBUS_FMT_AYUV8_1X32</entry>
+ <row id="MEDIA-BUS-FMT-AYUV8-1X32">
+ <entry>MEDIA_BUS_FMT_AYUV8_1X32</entry>
<entry>0x2017</entry>
<entry></entry>
<entry>a<subscript>7</subscript></entry>
@@ -2718,8 +2734,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UYVY12-2X12">
- <entry>V4L2_MBUS_FMT_UYVY12_2X12</entry>
+ <row id="MEDIA-BUS-FMT-UYVY12-2X12">
+ <entry>MEDIA_BUS_FMT_UYVY12_2X12</entry>
<entry>0x201c</entry>
<entry></entry>
&dash-ent-20;
@@ -2790,8 +2806,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-VYUY12-2X12">
- <entry>V4L2_MBUS_FMT_VYUY12_2X12</entry>
+ <row id="MEDIA-BUS-FMT-VYUY12-2X12">
+ <entry>MEDIA_BUS_FMT_VYUY12_2X12</entry>
<entry>0x201d</entry>
<entry></entry>
&dash-ent-20;
@@ -2862,8 +2878,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YUYV12-2X12">
- <entry>V4L2_MBUS_FMT_YUYV12_2X12</entry>
+ <row id="MEDIA-BUS-FMT-YUYV12-2X12">
+ <entry>MEDIA_BUS_FMT_YUYV12_2X12</entry>
<entry>0x201e</entry>
<entry></entry>
&dash-ent-20;
@@ -2934,8 +2950,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YVYU12-2X12">
- <entry>V4L2_MBUS_FMT_YVYU12_2X12</entry>
+ <row id="MEDIA-BUS-FMT-YVYU12-2X12">
+ <entry>MEDIA_BUS_FMT_YVYU12_2X12</entry>
<entry>0x201f</entry>
<entry></entry>
&dash-ent-20;
@@ -3006,8 +3022,8 @@
<entry>u<subscript>1</subscript></entry>
<entry>u<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-UYVY12-1X24">
- <entry>V4L2_MBUS_FMT_UYVY12_1X24</entry>
+ <row id="MEDIA-BUS-FMT-UYVY12-1X24">
+ <entry>MEDIA_BUS_FMT_UYVY12_1X24</entry>
<entry>0x2020</entry>
<entry></entry>
&dash-ent-8;
@@ -3066,8 +3082,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-VYUY12-1X24">
- <entry>V4L2_MBUS_FMT_VYUY12_1X24</entry>
+ <row id="MEDIA-BUS-FMT-VYUY12-1X24">
+ <entry>MEDIA_BUS_FMT_VYUY12_1X24</entry>
<entry>0x2021</entry>
<entry></entry>
&dash-ent-8;
@@ -3126,8 +3142,8 @@
<entry>y<subscript>1</subscript></entry>
<entry>y<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YUYV12-1X24">
- <entry>V4L2_MBUS_FMT_YUYV12_1X24</entry>
+ <row id="MEDIA-BUS-FMT-YUYV12-1X24">
+ <entry>MEDIA_BUS_FMT_YUYV12_1X24</entry>
<entry>0x2022</entry>
<entry></entry>
&dash-ent-8;
@@ -3186,8 +3202,8 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
- <row id="V4L2-MBUS-FMT-YVYU12-1X24">
- <entry>V4L2_MBUS_FMT_YVYU12_1X24</entry>
+ <row id="MEDIA-BUS-FMT-YVYU12-1X24">
+ <entry>MEDIA_BUS_FMT_YVYU12_1X24</entry>
<entry>0x2023</entry>
<entry></entry>
&dash-ent-8;
@@ -3366,8 +3382,8 @@
</row>
</thead>
<tbody valign="top">
- <row id="V4L2-MBUS-FMT-AHSV8888-1X32">
- <entry>V4L2_MBUS_FMT_AHSV8888_1X32</entry>
+ <row id="MEDIA-BUS-FMT-AHSV8888-1X32">
+ <entry>MEDIA_BUS_FMT_AHSV8888_1X32</entry>
<entry>0x6001</entry>
<entry></entry>
<entry>a<subscript>7</subscript></entry>
@@ -3422,7 +3438,7 @@
</para>
<para>For instance, for a JPEG baseline process and an 8-bit bus width
- the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>.
+ the format will be named <constant>MEDIA_BUS_FMT_JPEG_1X8</constant>.
</para>
<para>The following table lists existing JPEG compressed formats.</para>
@@ -3441,8 +3457,8 @@
</row>
</thead>
<tbody valign="top">
- <row id="V4L2-MBUS-FMT-JPEG-1X8">
- <entry>V4L2_MBUS_FMT_JPEG_1X8</entry>
+ <row id="MEDIA-BUS-FMT-JPEG-1X8">
+ <entry>MEDIA_BUS_FMT_JPEG_1X8</entry>
<entry>0x4001</entry>
<entry>Besides of its usage for the parallel bus this format is
recommended for transmission of JPEG data over MIPI CSI bus
@@ -3484,8 +3500,8 @@ interface and may change in the future.</para>
</row>
</thead>
<tbody valign="top">
- <row id="V4L2-MBUS-FMT-S5C-UYVY-JPEG-1X8">
- <entry>V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8</entry>
+ <row id="MEDIA-BUS-FMT-S5C-UYVY-JPEG-1X8">
+ <entry>MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8</entry>
<entry>0x5001</entry>
<entry>
Interleaved raw UYVY and JPEG image format with embedded
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 7cfe618f754d..ac0f8d9d2a49 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -152,6 +152,15 @@ structs, ioctls) must be noted in more detail in the history chapter
applications. -->
<revision>
+ <revnumber>3.19</revnumber>
+ <date>2014-12-05</date>
+ <authorinitials>hv</authorinitials>
+ <revremark>Rewrote Colorspace chapter, added new &v4l2-ycbcr-encoding; and &v4l2-quantization; fields
+to &v4l2-pix-format;, &v4l2-pix-format-mplane; and &v4l2-mbus-framefmt;.
+ </revremark>
+ </revision>
+
+ <revision>
<revnumber>3.17</revnumber>
<date>2014-08-04</date>
<authorinitials>lp, hv</authorinitials>
@@ -539,7 +548,7 @@ and discussions on the V4L mailing list.</revremark>
</partinfo>
<title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.17</subtitle>
+ <subtitle>Revision 3.19</subtitle>
<chapter id="common">
&sub-common;
diff --git a/Documentation/DocBook/media/v4l/vidioc-enuminput.xml b/Documentation/DocBook/media/v4l/vidioc-enuminput.xml
index 493a39a8ef21..603fecef9083 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enuminput.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enuminput.xml
@@ -287,6 +287,14 @@ input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
<entry>0x00000004</entry>
<entry>This input supports setting the TV standard by using VIDIOC_S_STD.</entry>
</row>
+ <row>
+ <entry><constant>V4L2_IN_CAP_NATIVE_SIZE</constant></entry>
+ <entry>0x00000008</entry>
+ <entry>This input supports setting the native size using
+ the <constant>V4L2_SEL_TGT_NATIVE_SIZE</constant>
+ selection target, see <xref
+ linkend="v4l2-selections-common"/>.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml b/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml
index 2654e097df39..773fb1258c24 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml
@@ -172,6 +172,14 @@ input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
<entry>0x00000004</entry>
<entry>This output supports setting the TV standard by using VIDIOC_S_STD.</entry>
</row>
+ <row>
+ <entry><constant>V4L2_OUT_CAP_NATIVE_SIZE</constant></entry>
+ <entry>0x00000008</entry>
+ <entry>This output supports setting the native size using
+ the <constant>V4L2_SEL_TGT_NATIVE_SIZE</constant>
+ selection target, see <xref
+ linkend="v4l2-selections-common"/>.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index bbe9c1fd5cef..1fdc246e4256 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -540,7 +540,7 @@ appears in sysfs.
</para></listitem>
<listitem><para>
-<varname>unsigned long size</varname>: Fill in the size of the
+<varname>resource_size_t size</varname>: Fill in the size of the
memory block that <varname>addr</varname> points to. If <varname>size</varname>
is zero, the mapping is considered unused. Note that you
<emphasis>must</emphasis> initialize <varname>size</varname> with zero for
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
index 784793df81ed..84ef6a90131c 100644
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl
@@ -3658,6 +3658,29 @@ struct _snd_pcm_runtime {
</para>
<para>
+ The above callback can be simplified with a helper function,
+ <function>snd_ctl_enum_info</function>. The final code
+ looks like below.
+ (You can pass ARRAY_SIZE(texts) instead of 4 in the third
+ argument; it's a matter of taste.)
+
+ <informalexample>
+ <programlisting>
+<![CDATA[
+ static int snd_myctl_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+ {
+ static char *texts[4] = {
+ "First", "Second", "Third", "Fourth"
+ };
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
+ }
+]]>
+ </programlisting>
+ </informalexample>
+ </para>
+
+ <para>
Some common info callbacks are available for your convenience:
<function>snd_ctl_boolean_mono_info()</function> and
<function>snd_ctl_boolean_stereo_info()</function>.
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index f13c9132e9f2..653d5d739d7f 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -42,7 +42,13 @@ The driver interface depends on your hardware. If your system
properly provides the SMBIOS info for IPMI, the driver will detect it
and just work. If you have a board with a standard interface (These
will generally be either "KCS", "SMIC", or "BT", consult your hardware
-manual), choose the 'IPMI SI handler' option.
+manual), choose the 'IPMI SI handler' option. A driver also exists
+for direct I2C access to the IPMI management controller. Some boards
+support this, but it is unknown if it will work on every board. For
+this, choose 'IPMI SMBus handler', but be ready to try to do some
+figuring to see if it will work on your system if the SMBIOS/APCI
+information is wrong or not present. It is fairly safe to have both
+these enabled and let the drivers auto-detect what is present.
You should generally enable ACPI on your system, as systems with IPMI
can have ACPI tables describing them.
@@ -52,7 +58,8 @@ their job correctly, the IPMI controller should be automatically
detected (via ACPI or SMBIOS tables) and should just work. Sadly,
many boards do not have this information. The driver attempts
standard defaults, but they may not work. If you fall into this
-situation, you need to read the section below named 'The SI Driver'.
+situation, you need to read the section below named 'The SI Driver' or
+"The SMBus Driver" on how to hand-configure your system.
IPMI defines a standard watchdog timer. You can enable this with the
'IPMI Watchdog Timer' config option. If you compile the driver into
@@ -97,7 +104,12 @@ driver, each open file for this device ties in to the message handler
as an IPMI user.
ipmi_si - A driver for various system interfaces. This supports KCS,
-SMIC, and BT interfaces.
+SMIC, and BT interfaces. Unless you have an SMBus interface or your
+own custom interface, you probably need to use this.
+
+ipmi_ssif - A driver for accessing BMCs on the SMBus. It uses the
+I2C kernel driver's SMBus interfaces to send and receive IPMI messages
+over the SMBus.
ipmi_watchdog - IPMI requires systems to have a very capable watchdog
timer. This driver implements the standard Linux watchdog timer
@@ -476,6 +488,62 @@ for specifying an interface. Note that when removing an interface,
only the first three parameters (si type, address type, and address)
are used for the comparison. Any options are ignored for removing.
+The SMBus Driver (SSIF)
+-----------------------
+
+The SMBus driver allows up to 4 SMBus devices to be configured in the
+system. By default, the driver will only register with something it
+finds in DMI or ACPI tables. You can change this
+at module load time (for a module) with:
+
+ modprobe ipmi_ssif.o
+ addr=<i2caddr1>[,<i2caddr2>[,...]]
+ adapter=<adapter1>[,<adapter2>[...]]
+ dbg=<flags1>,<flags2>...
+ slave_addrs=<addr1>,<addr2>,...
+ [dbg_probe=1]
+
+The addresses are normal I2C addresses. The adapter is the string
+name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
+It is *NOT* i2c-<n> itself.
+
+The debug flags are bit flags for each BMC found, they are:
+IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8
+
+Setting dbg_probe to 1 will enable debugging of the probing and
+detection process for BMCs on the SMBusses.
+
+The slave_addrs specifies the IPMI address of the local BMC. This is
+usually 0x20 and the driver defaults to that, but in case it's not, it
+can be specified when the driver starts up.
+
+Discovering the IPMI compliant BMC on the SMBus can cause devices on
+the I2C bus to fail. The SMBus driver writes a "Get Device ID" IPMI
+message as a block write to the I2C bus and waits for a response.
+This action can be detrimental to some I2C devices. It is highly
+recommended that the known I2C address be given to the SMBus driver in
+the smb_addr parameter unless you have DMI or ACPI data to tell the
+driver what to use.
+
+When compiled into the kernel, the addresses can be specified on the
+kernel command line as:
+
+ ipmb_ssif.addr=<i2caddr1>[,<i2caddr2>[...]]
+ ipmi_ssif.adapter=<adapter1>[,<adapter2>[...]]
+ ipmi_ssif.dbg=<flags1>[,<flags2>[...]]
+ ipmi_ssif.dbg_probe=1
+ ipmi_ssif.slave_addrs=<addr1>[,<addr2>[...]]
+
+These are the same options as on the module command line.
+
+The I2C driver does not support non-blocking access or polling, so
+this driver cannod to IPMI panic events, extend the watchdog at panic
+time, or other panic-related IPMI functions without special kernel
+patches and driver modifications. You can get those at the openipmi
+web page.
+
+The driver supports a hot add and remove of interfaces through the I2C
+sysfs interface.
Other Pieces
------------
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
index 38dc06d0a791..4178ebda6e66 100644
--- a/Documentation/arm/memory.txt
+++ b/Documentation/arm/memory.txt
@@ -41,7 +41,7 @@ fffe8000 fffeffff DTCM mapping area for platforms with
fffe0000 fffe7fff ITCM mapping area for platforms with
ITCM mounted inside the CPU.
-ffc00000 ffdfffff Fixmap mapping region. Addresses provided
+ffc00000 ffefffff Fixmap mapping region. Addresses provided
by fix_to_virt() will be located here.
fee00000 feffffff Mapping of PCI I/O space. This is a static
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 6b972b287795..5aabc08de811 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -942,7 +942,11 @@ elevator_allow_merge_fn called whenever the block layer determines
request safely. The io scheduler may still
want to stop a merge at this point if it
results in some sort of conflict internally,
- this hook allows it to do that.
+ this hook allows it to do that. Note however
+ that two *requests* can still be merged at later
+ time. Currently the io scheduler has no way to
+ prevent that. It can only learn about the fact
+ from elevator_merge_req_fn callback.
elevator_dispatch_fn* fills the dispatch queue with ready requests.
I/O schedulers are free to postpone requests by
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 10c949b293e4..f935fac1e73b 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -312,10 +312,10 @@ the "cpuset" cgroup subsystem, the steps are something like:
2) mkdir /sys/fs/cgroup/cpuset
3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
4) Create the new cgroup by doing mkdir's and write's (or echo's) in
- the /sys/fs/cgroup virtual file system.
+ the /sys/fs/cgroup/cpuset virtual file system.
5) Start a task that will be the "founding father" of the new job.
6) Attach that task to the new cgroup by writing its PID to the
- /sys/fs/cgroup/cpuset/tasks file for that cgroup.
+ /sys/fs/cgroup/cpuset tasks file for that cgroup.
7) fork, exec or clone the job tasks from this founding father task.
For example, the following sequence of commands will setup a cgroup
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 3c94ff3f9693..f2235a162529 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -445,7 +445,7 @@ across partially overlapping sets of CPUs would risk unstable dynamics
that would be beyond our understanding. So if each of two partially
overlapping cpusets enables the flag 'cpuset.sched_load_balance', then we
form a single sched domain that is a superset of both. We won't move
-a task to a CPU outside it cpuset, but the scheduler load balancing
+a task to a CPU outside its cpuset, but the scheduler load balancing
code might waste some compute cycles considering that possibility.
This mismatch is why there is not a simple one-to-one relation
@@ -552,8 +552,8 @@ otherwise initial value -1 that indicates the cpuset has no request.
1 : search siblings (hyperthreads in a core).
2 : search cores in a package.
3 : search cpus in a node [= system wide on non-NUMA system]
- ( 4 : search nodes in a chunk of node [on NUMA system] )
- ( 5 : search system wide [on NUMA system] )
+ 4 : search nodes in a chunk of node [on NUMA system]
+ 5 : search system wide [on NUMA system]
The system default is architecture dependent. The system default
can be changed using the relax_domain_level= boot parameter.
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 46b2b5080317..a22df3ad35ff 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -326,7 +326,7 @@ per cgroup, instead of globally.
* tcp memory pressure: sockets memory pressure for the tcp protocol.
-2.7.3 Common use cases
+2.7.2 Common use cases
Because the "kmem" counter is fed to the main user counter, kernel memory can
never be limited completely independently of user memory. Say "U" is the user
@@ -354,19 +354,19 @@ set:
3. User Interface
-0. Configuration
+3.0. Configuration
a. Enable CONFIG_CGROUPS
b. Enable CONFIG_MEMCG
c. Enable CONFIG_MEMCG_SWAP (to use swap extension)
d. Enable CONFIG_MEMCG_KMEM (to use kmem extension)
-1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
+3.1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
# mount -t tmpfs none /sys/fs/cgroup
# mkdir /sys/fs/cgroup/memory
# mount -t cgroup none /sys/fs/cgroup/memory -o memory
-2. Make the new group and move bash into it
+3.2. Make the new group and move bash into it
# mkdir /sys/fs/cgroup/memory/0
# echo $$ > /sys/fs/cgroup/memory/0/tasks
diff --git a/Documentation/crypto/crypto-API-userspace.txt b/Documentation/crypto/crypto-API-userspace.txt
new file mode 100644
index 000000000000..ac619cd90300
--- /dev/null
+++ b/Documentation/crypto/crypto-API-userspace.txt
@@ -0,0 +1,205 @@
+Introduction
+============
+
+The concepts of the kernel crypto API visible to kernel space is fully
+applicable to the user space interface as well. Therefore, the kernel crypto API
+high level discussion for the in-kernel use cases applies here as well.
+
+The major difference, however, is that user space can only act as a consumer
+and never as a provider of a transformation or cipher algorithm.
+
+The following covers the user space interface exported by the kernel crypto
+API. A working example of this description is libkcapi that can be obtained from
+[1]. That library can be used by user space applications that require
+cryptographic services from the kernel.
+
+Some details of the in-kernel kernel crypto API aspects do not
+apply to user space, however. This includes the difference between synchronous
+and asynchronous invocations. The user space API call is fully synchronous.
+In addition, only a subset of all cipher types are available as documented
+below.
+
+
+User space API general remarks
+==============================
+
+The kernel crypto API is accessible from user space. Currently, the following
+ciphers are accessible:
+
+ * Message digest including keyed message digest (HMAC, CMAC)
+
+ * Symmetric ciphers
+
+Note, AEAD ciphers are currently not supported via the symmetric cipher
+interface.
+
+The interface is provided via Netlink using the type AF_ALG. In addition, the
+setsockopt option type is SOL_ALG. In case the user space header files do not
+export these flags yet, use the following macros:
+
+#ifndef AF_ALG
+#define AF_ALG 38
+#endif
+#ifndef SOL_ALG
+#define SOL_ALG 279
+#endif
+
+A cipher is accessed with the same name as done for the in-kernel API calls.
+This includes the generic vs. unique naming schema for ciphers as well as the
+enforcement of priorities for generic names.
+
+To interact with the kernel crypto API, a Netlink socket must be created by
+the user space application. User space invokes the cipher operation with the
+send/write system call family. The result of the cipher operation is obtained
+with the read/recv system call family.
+
+The following API calls assume that the Netlink socket descriptor is already
+opened by the user space application and discusses only the kernel crypto API
+specific invocations.
+
+To initialize a Netlink interface, the following sequence has to be performed
+by the consumer:
+
+ 1. Create a socket of type AF_ALG with the struct sockaddr_alg parameter
+ specified below for the different cipher types.
+
+ 2. Invoke bind with the socket descriptor
+
+ 3. Invoke accept with the socket descriptor. The accept system call
+ returns a new file descriptor that is to be used to interact with
+ the particular cipher instance. When invoking send/write or recv/read
+ system calls to send data to the kernel or obtain data from the
+ kernel, the file descriptor returned by accept must be used.
+
+In-place cipher operation
+=========================
+
+Just like the in-kernel operation of the kernel crypto API, the user space
+interface allows the cipher operation in-place. That means that the input buffer
+used for the send/write system call and the output buffer used by the read/recv
+system call may be one and the same. This is of particular interest for
+symmetric cipher operations where a copying of the output data to its final
+destination can be avoided.
+
+If a consumer on the other hand wants to maintain the plaintext and the
+ciphertext in different memory locations, all a consumer needs to do is to
+provide different memory pointers for the encryption and decryption operation.
+
+Message digest API
+==================
+
+The message digest type to be used for the cipher operation is selected when
+invoking the bind syscall. bind requires the caller to provide a filled
+struct sockaddr data structure. This data structure must be filled as follows:
+
+struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "hash", /* this selects the hash logic in the kernel */
+ .salg_name = "sha1" /* this is the cipher name */
+};
+
+The salg_type value "hash" applies to message digests and keyed message digests.
+Though, a keyed message digest is referenced by the appropriate salg_name.
+Please see below for the setsockopt interface that explains how the key can be
+set for a keyed message digest.
+
+Using the send() system call, the application provides the data that should be
+processed with the message digest. The send system call allows the following
+flags to be specified:
+
+ * MSG_MORE: If this flag is set, the send system call acts like a
+ message digest update function where the final hash is not
+ yet calculated. If the flag is not set, the send system call
+ calculates the final message digest immediately.
+
+With the recv() system call, the application can read the message digest from
+the kernel crypto API. If the buffer is too small for the message digest, the
+flag MSG_TRUNC is set by the kernel.
+
+In order to set a message digest key, the calling application must use the
+setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC operation is
+performed without the initial HMAC state change caused by the key.
+
+
+Symmetric cipher API
+====================
+
+The operation is very similar to the message digest discussion. During
+initialization, the struct sockaddr data structure must be filled as follows:
+
+struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "skcipher", /* this selects the symmetric cipher */
+ .salg_name = "cbc(aes)" /* this is the cipher name */
+};
+
+Before data can be sent to the kernel using the write/send system call family,
+the consumer must set the key. The key setting is described with the setsockopt
+invocation below.
+
+Using the sendmsg() system call, the application provides the data that should
+be processed for encryption or decryption. In addition, the IV is specified
+with the data structure provided by the sendmsg() system call.
+
+The sendmsg system call parameter of struct msghdr is embedded into the
+struct cmsghdr data structure. See recv(2) and cmsg(3) for more information
+on how the cmsghdr data structure is used together with the send/recv system
+call family. That cmsghdr data structure holds the following information
+specified with a separate header instances:
+
+ * specification of the cipher operation type with one of these flags:
+ ALG_OP_ENCRYPT - encryption of data
+ ALG_OP_DECRYPT - decryption of data
+
+ * specification of the IV information marked with the flag ALG_SET_IV
+
+The send system call family allows the following flag to be specified:
+
+ * MSG_MORE: If this flag is set, the send system call acts like a
+ cipher update function where more input data is expected
+ with a subsequent invocation of the send system call.
+
+Note: The kernel reports -EINVAL for any unexpected data. The caller must
+make sure that all data matches the constraints given in /proc/crypto for the
+selected cipher.
+
+With the recv() system call, the application can read the result of the
+cipher operation from the kernel crypto API. The output buffer must be at least
+as large as to hold all blocks of the encrypted or decrypted data. If the output
+data size is smaller, only as many blocks are returned that fit into that
+output buffer size.
+
+Setsockopt interface
+====================
+
+In addition to the read/recv and send/write system call handling to send and
+retrieve data subject to the cipher operation, a consumer also needs to set
+the additional information for the cipher operation. This additional information
+is set using the setsockopt system call that must be invoked with the file
+descriptor of the open cipher (i.e. the file descriptor returned by the
+accept system call).
+
+Each setsockopt invocation must use the level SOL_ALG.
+
+The setsockopt interface allows setting the following data using the mentioned
+optname:
+
+ * ALG_SET_KEY -- Setting the key. Key setting is applicable to:
+
+ - the skcipher cipher type (symmetric ciphers)
+
+ - the hash cipher type (keyed message digests)
+
+User space API example
+======================
+
+Please see [1] for libkcapi which provides an easy-to-use wrapper around the
+aforementioned Netlink kernel interface. [1] also contains a test application
+that invokes all libkcapi API calls.
+
+[1] http://www.chronox.de/libkcapi.html
+
+Author
+======
+
+Stephan Mueller <smueller@chronox.de>
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
new file mode 100644
index 000000000000..d790f49066f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -0,0 +1,204 @@
+* CoreSight Components:
+
+CoreSight components are compliant with the ARM CoreSight architecture
+specification and can be connected in various topologies to suit a particular
+SoCs tracing needs. These trace components can generally be classified as
+sinks, links and sources. Trace data produced by one or more sources flows
+through the intermediate links connecting the source to the currently selected
+sink. Each CoreSight component device should use these properties to describe
+its hardware characteristcs.
+
+* Required properties for all components *except* non-configurable replicators:
+
+ * compatible: These have to be supplemented with "arm,primecell" as
+ drivers are using the AMBA bus interface. Possible values include:
+ - "arm,coresight-etb10", "arm,primecell";
+ - "arm,coresight-tpiu", "arm,primecell";
+ - "arm,coresight-tmc", "arm,primecell";
+ - "arm,coresight-funnel", "arm,primecell";
+ - "arm,coresight-etm3x", "arm,primecell";
+
+ * reg: physical base address and length of the register
+ set(s) of the component.
+
+ * clocks: the clock associated to this component.
+
+ * clock-names: the name of the clock as referenced by the code.
+ Since we are using the AMBA framework, the name should be
+ "apb_pclk".
+
+ * port or ports: The representation of the component's port
+ layout using the generic DT graph presentation found in
+ "bindings/graph.txt".
+
+* Required properties for devices that don't show up on the AMBA bus, such as
+ non-configurable replicators:
+
+ * compatible: Currently supported value is (note the absence of the
+ AMBA markee):
+ - "arm,coresight-replicator"
+
+ * id: a unique number that will identify this replicator.
+
+ * port or ports: same as above.
+
+* Optional properties for ETM/PTMs:
+
+ * arm,cp14: must be present if the system accesses ETM/PTM management
+ registers via co-processor 14.
+
+ * cpu: the cpu phandle this ETM/PTM is affined to. When omitted the
+ source is considered to belong to CPU0.
+
+* Optional property for TMC:
+
+ * arm,buffer-size: size of contiguous buffer space for TMC ETR
+ (embedded trace router)
+
+
+Example:
+
+1. Sinks
+ etb@20010000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0 0x20010000 0 0x1000>;
+
+ coresight-default-sink;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ etb_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port0>;
+ };
+ };
+ };
+
+ tpiu@20030000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0 0x20030000 0 0x1000>;
+
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ tpiu_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port1>;
+ };
+ };
+ };
+
+2. Links
+ replicator {
+ /* non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-replicator";
+ /* this will show up in debugfs as "0.replicator" */
+ id = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&etb_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel_out_port0>;
+ };
+ };
+ };
+ };
+
+ funnel@20040000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x20040000 0 0x1000>;
+
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel output port */
+ port@0 {
+ reg = <0>;
+ funnel_out_port0: endpoint {
+ remote-endpoint =
+ <&replicator_in_port0>;
+ };
+ };
+
+ /* funnel input ports */
+ port@1 {
+ reg = <0>;
+ funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm0_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm1_out_port>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out_port>;
+ };
+ };
+
+ };
+ };
+
+3. Sources
+ ptm@2201c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0x2201c000 0 0x1000>;
+
+ cpu = <&cpu0>;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ ptm0_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port0>;
+ };
+ };
+ };
+
+ ptm@2201d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0x2201d000 0 0x1000>;
+
+ cpu = <&cpu1>;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ ptm1_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
index 33cd05e6c125..ddfade40ac59 100644
--- a/Documentation/devicetree/bindings/arm/gic-v3.txt
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -49,11 +49,29 @@ Optional
occupied by the redistributors. Required if more than one such
region is present.
+Sub-nodes:
+
+GICv3 has one or more Interrupt Translation Services (ITS) that are
+used to route Message Signalled Interrupts (MSI) to the CPUs.
+
+These nodes must have the following properties:
+- compatible : Should at least contain "arm,gic-v3-its".
+- msi-controller : Boolean property. Identifies the node as an MSI controller
+- reg: Specifies the base physical address and size of the ITS
+ registers.
+
+The main GIC node must contain the appropriate #address-cells,
+#size-cells and ranges properties for the reg property of all ITS
+nodes.
+
Examples:
gic: interrupt-controller@2cf00000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
interrupt-controller;
reg = <0x0 0x2f000000 0 0x10000>, // GICD
<0x0 0x2f100000 0 0x200000>, // GICR
@@ -61,11 +79,20 @@ Examples:
<0x0 0x2c010000 0 0x2000>, // GICH
<0x0 0x2c020000 0 0x2000>; // GICV
interrupts = <1 9 4>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x0 0x2c200000 0 0x200000>;
+ };
};
gic: interrupt-controller@2c010000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
interrupt-controller;
redistributor-stride = <0x0 0x40000>; // 256kB stride
#redistributor-regions = <2>;
@@ -76,4 +103,16 @@ Examples:
<0x0 0x2c060000 0 0x2000>, // GICH
<0x0 0x2c080000 0 0x2000>; // GICV
interrupts = <1 9 4>;
+
+ gic-its@2c200000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x0 0x2c200000 0 0x200000>;
+ };
+
+ gic-its@2c400000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x0 0x2c400000 0 0x200000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index b38608af66db..8112d0c3675a 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -97,3 +97,56 @@ Example:
<0x2c006000 0x2000>;
interrupts = <1 9 0xf04>;
};
+
+
+* GICv2m extension for MSI/MSI-x support (Optional)
+
+Certain revisions of GIC-400 supports MSI/MSI-x via V2M register frame(s).
+This is enabled by specifying v2m sub-node(s).
+
+Required properties:
+
+- compatible : The value here should contain "arm,gic-v2m-frame".
+
+- msi-controller : Identifies the node as an MSI controller.
+
+- reg : GICv2m MSI interface register base and size
+
+Optional properties:
+
+- arm,msi-base-spi : When the MSI_TYPER register contains an incorrect
+ value, this property should contain the SPI base of
+ the MSI frame, overriding the HW value.
+
+- arm,msi-num-spis : When the MSI_TYPER register contains an incorrect
+ value, this property should contain the number of
+ SPIs assigned to the frame, overriding the HW value.
+
+Example:
+
+ interrupt-controller@e1101000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-controller;
+ interrupts = <1 8 0xf04>;
+ ranges = <0 0 0 0xe1100000 0 0x100000>;
+ reg = <0x0 0xe1110000 0 0x01000>,
+ <0x0 0xe112f000 0 0x02000>,
+ <0x0 0xe1140000 0 0x10000>,
+ <0x0 0xe1160000 0 0x10000>;
+ v2m0: v2m@0x8000 {
+ compatible = "arm,gic-v2m-frame";
+ msi-controller;
+ reg = <0x0 0x80000 0 0x1000>;
+ };
+
+ ....
+
+ v2mN: v2m@0x9000 {
+ compatible = "arm,gic-v2m-frame";
+ msi-controller;
+ reg = <0x0 0x90000 0 0x1000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
new file mode 100644
index 000000000000..d680b07ec6e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
@@ -0,0 +1,28 @@
+Mediatek 65xx/81xx sysirq
+
+Mediatek SOCs sysirq support controllable irq inverter for each GIC SPI
+interrupt.
+
+Required properties:
+- compatible: should be one of:
+ "mediatek,mt8135-sysirq"
+ "mediatek,mt8127-sysirq"
+ "mediatek,mt6589-sysirq"
+ "mediatek,mt6582-sysirq"
+ "mediatek,mt6577-sysirq"
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Use the same format as specified by GIC in
+ Documentation/devicetree/bindings/arm/gic.txt
+- interrupt-parent: phandle of irq parent for sysirq. The parent must
+ use the same interrupt-cells format as GIC.
+- reg: Physical base address of the intpol registers and length of memory
+ mapped region.
+
+Example:
+ sysirq: interrupt-controller@10200100 {
+ compatible = "mediatek,mt6589-sysirq", "mediatek,mt6577-sysirq";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0 0x10200100 0 0x1c>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
index 709efaa30841..f46ca9a316a2 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
@@ -16,6 +16,8 @@ Required properties:
future controllers.
Must be "samsung,exynos3250-adc" for
controllers compatible with ADC of Exynos3250.
+ Must be "samsung,exynos7-adc" for
+ the ADC in Exynos7 and compatibles
Must be "samsung,s3c2410-adc" for
the ADC in s3c2410 and compatibles
Must be "samsung,s3c2416-adc" for
@@ -43,13 +45,16 @@ Required properties:
compatible ADC block)
- vdd-supply VDD input supply.
+- samsung,syscon-phandle Contains the PMU system controller node
+ (To access the ADC_PHY register on Exynos5250/5420/5800/3250)
+
Note: child nodes can be added for auto probing from device tree.
Example: adding device info in dtsi file
adc: adc@12D10000 {
compatible = "samsung,exynos-adc-v1";
- reg = <0x12D10000 0x100>, <0x10040718 0x4>;
+ reg = <0x12D10000 0x100>;
interrupts = <0 106 0>;
#io-channel-cells = <1>;
io-channel-ranges;
@@ -58,13 +63,14 @@ adc: adc@12D10000 {
clock-names = "adc";
vdd-supply = <&buck5_reg>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
};
Example: adding device info in dtsi file for Exynos3250 with additional sclk
adc: adc@126C0000 {
compatible = "samsung,exynos3250-adc", "samsung,exynos-adc-v2;
- reg = <0x126C0000 0x100>, <0x10020718 0x4>;
+ reg = <0x126C0000 0x100>;
interrupts = <0 137 0>;
#io-channel-cells = <1>;
io-channel-ranges;
@@ -73,6 +79,7 @@ adc: adc@126C0000 {
clock-names = "adc", "sclk";
vdd-supply = <&buck5_reg>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
};
Example: Adding child nodes in dts file
diff --git a/Documentation/devicetree/bindings/ata/marvell.txt b/Documentation/devicetree/bindings/ata/marvell.txt
index 1c8351604d38..b460edd12766 100644
--- a/Documentation/devicetree/bindings/ata/marvell.txt
+++ b/Documentation/devicetree/bindings/ata/marvell.txt
@@ -6,11 +6,17 @@ Required Properties:
- interrupts : Interrupt controller is using
- nr-ports : Number of SATA ports in use.
+Optional Properties:
+- phys : List of phandles to sata phys
+- phy-names : Should be "0", "1", etc, one number per phandle
+
Example:
sata@80000 {
compatible = "marvell,orion-sata";
reg = <0x80000 0x5000>;
interrupts = <21>;
+ phys = <&sata_phy0>, <&sata_phy1>;
+ phy-names = "0", "1";
nr-ports = <2>;
}
diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt
index 80ae87a0784b..2493a5a31655 100644
--- a/Documentation/devicetree/bindings/ata/sata_rcar.txt
+++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt
@@ -3,18 +3,21 @@
Required properties:
- compatible : should contain one of the following:
- "renesas,sata-r8a7779" for R-Car H1
+ ("renesas,rcar-sata" is deprecated)
- "renesas,sata-r8a7790-es1" for R-Car H2 ES1
- "renesas,sata-r8a7790" for R-Car H2 other than ES1
- "renesas,sata-r8a7791" for R-Car M2-W
- "renesas,sata-r8a7793" for R-Car M2-N
- reg : address and length of the SATA registers;
- interrupts : must consist of one interrupt specifier.
+- clocks : must contain a reference to the functional clock.
Example:
-sata: sata@fc600000 {
- compatible = "renesas,sata-r8a7779";
- reg = <0xfc600000 0x2000>;
+sata0: sata@ee300000 {
+ compatible = "renesas,sata-r8a7791";
+ reg = <0 0xee300000 0 0x2000>;
interrupt-parent = <&gic>;
- interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7791_CLK_SATA0>;
};
diff --git a/Documentation/devicetree/bindings/btmrvl.txt b/Documentation/devicetree/bindings/btmrvl.txt
new file mode 100644
index 000000000000..58f964bb0a52
--- /dev/null
+++ b/Documentation/devicetree/bindings/btmrvl.txt
@@ -0,0 +1,29 @@
+btmrvl
+------
+
+Required properties:
+
+ - compatible : must be "btmrvl,cfgdata"
+
+Optional properties:
+
+ - btmrvl,cal-data : Calibration data downloaded to the device during
+ initialization. This is an array of 28 values(u8).
+
+ - btmrvl,gpio-gap : gpio and gap (in msecs) combination to be
+ configured.
+
+Example:
+
+GPIO pin 13 is configured as a wakeup source and GAP is set to 100 msecs
+in below example.
+
+btmrvl {
+ compatible = "btmrvl,cfgdata";
+
+ btmrvl,cal-data = /bits/ 8 <
+ 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
+ 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
+ 0x00 0x00 0xf0 0x00>;
+ btmrvl,gpio-gap = <0x0d64>;
+};
diff --git a/Documentation/devicetree/bindings/bus/bcma.txt b/Documentation/devicetree/bindings/bus/bcma.txt
index 62a48348ac15..edd44d802139 100644
--- a/Documentation/devicetree/bindings/bus/bcma.txt
+++ b/Documentation/devicetree/bindings/bus/bcma.txt
@@ -8,6 +8,11 @@ Required properties:
The cores on the AXI bus are automatically detected by bcma with the
memory ranges they are using and they get registered afterwards.
+Automatic detection of the IRQ number is not working on
+BCM47xx/BCM53xx ARM SoCs. To assign IRQ numbers to the cores, provide
+them manually through device tree. Use an interrupt-map to specify the
+IRQ used by the devices on the bus. The first address is just an index,
+because we do not have any special register.
The top-level axi bus may contain children representing attached cores
(devices). This is needed since some hardware details can't be auto
@@ -22,6 +27,22 @@ Example:
ranges = <0x00000000 0x18000000 0x00100000>;
#address-cells = <1>;
#size-cells = <1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x000fffff 0xffff>;
+ interrupt-map =
+ /* Ethernet Controller 0 */
+ <0x00024000 0 &gic GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+
+ /* Ethernet Controller 1 */
+ <0x00025000 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+
+ /* PCIe Controller 0 */
+ <0x00012000 0 &gic GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00012000 1 &gic GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00012000 2 &gic GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00012000 3 &gic GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00012000 4 &gic GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00012000 5 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
chipcommon {
reg = <0x00000000 0x1000>;
diff --git a/Documentation/devicetree/bindings/chosen.txt b/Documentation/devicetree/bindings/chosen.txt
new file mode 100644
index 000000000000..ed838f453f7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/chosen.txt
@@ -0,0 +1,46 @@
+The chosen node
+---------------
+
+The chosen node does not represent a real device, but serves as a place
+for passing data between firmware and the operating system, like boot
+arguments. Data in the chosen node does not represent the hardware.
+
+
+stdout-path property
+--------------------
+
+Device trees may specify the device to be used for boot console output
+with a stdout-path property under /chosen, as described in ePAPR, e.g.
+
+/ {
+ chosen {
+ stdout-path = "/serial@f00:115200";
+ };
+
+ serial@f00 {
+ compatible = "vendor,some-uart";
+ reg = <0xf00 0x10>;
+ };
+};
+
+If the character ":" is present in the value, this terminates the path.
+The meaning of any characters following the ":" is device-specific, and
+must be specified in the relevant binding documentation.
+
+For UART devices, the preferred binding is a string in the form:
+
+ <baud>{<parity>{<bits>{<flow>}}}
+
+where
+
+ baud - baud rate in decimal
+ parity - 'n' (none), 'o', (odd) or 'e' (even)
+ bits - number of data bits
+ flow - 'r' (rts)
+
+For example: 115200n8r
+
+Implementation note: Linux will look for the property "linux,stdout-path" or
+on PowerPC "stdout" if "stdout-path" is not found. However, the
+"linux,stdout-path" and "stdout" properties are deprecated. New platforms
+should only use the "stdout-path" property.
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 5666812fc42b..266ff9d23229 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -62,6 +62,8 @@ Required properties:
It takes parent's clock-frequency as its clock.
* "fsl,qoriq-sysclk-2.0": for input system clock (v2.0).
It takes parent's clock-frequency as its clock.
+ * "fsl,qoriq-platform-pll-1.0" for the platform PLL clock (v1.0)
+ * "fsl,qoriq-platform-pll-2.0" for the platform PLL clock (v2.0)
- #clock-cells: From common clock binding. The number of cells in a
clock-specifier. Should be <0> for "fsl,qoriq-sysclk-[1,2].0"
clocks, or <1> for "fsl,qoriq-core-pll-[1,2].0" clocks.
@@ -128,8 +130,16 @@ Example for clock block and clock provider:
clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
clock-output-names = "cmux1";
};
+
+ platform-pll: platform-pll@c00 {
+ #clock-cells = <1>;
+ reg = <0xc00 0x4>;
+ compatible = "fsl,qoriq-platform-pll-1.0";
+ clocks = <&sysclk>;
+ clock-output-names = "platform-pll", "platform-pll-div2";
+ };
};
- }
+};
Example for clock consumer:
@@ -139,4 +149,4 @@ Example for clock consumer:
clocks = <&mux0>;
...
};
- }
+};
diff --git a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
index 1d3ace088172..b7ee5c7e0f75 100644
--- a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
@@ -11,7 +11,7 @@ Please find an example below:
Clockgen block diagram
-------------------------------------------------------------------
- | Flexgen stucture |
+ | Flexgen structure |
| --------------------------------------------- |
| | ------- -------- -------- | |
clk_sysin | | | | | | | | |
diff --git a/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt b/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
index 5c65eccd0e56..e8a35c71e947 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
@@ -1,5 +1,5 @@
Freescale SAHARA Cryptographic Accelerator included in some i.MX chips.
-Currently only i.MX27 is supported.
+Currently only i.MX27 and i.MX53 are supported.
Required properties:
- compatible : Should be "fsl,<soc>-sahara"
diff --git a/Documentation/devicetree/bindings/dma/atmel-xdma.txt b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
new file mode 100644
index 000000000000..0eb2b3207e08
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
@@ -0,0 +1,54 @@
+* Atmel Extensible Direct Memory Access Controller (XDMAC)
+
+* XDMA Controller
+Required properties:
+- compatible: Should be "atmel,<chip>-dma".
+ <chip> compatible description:
+ - sama5d4: first SoC adding the XDMAC
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain DMA interrupt.
+- #dma-cells: Must be <1>, used to represent the number of integer cells in
+the dmas property of client devices.
+ - The 1st cell specifies the channel configuration register:
+ - bit 13: SIF, source interface identifier, used to get the memory
+ interface identifier,
+ - bit 14: DIF, destination interface identifier, used to get the peripheral
+ interface identifier,
+ - bit 30-24: PERID, peripheral identifier.
+
+Example:
+
+dma1: dma-controller@f0004000 {
+ compatible = "atmel,sama5d4-dma";
+ reg = <0xf0004000 0x200>;
+ interrupts = <50 4 0>;
+ #dma-cells = <1>;
+};
+
+
+* DMA clients
+DMA clients connected to the Atmel XDMA controller must use the format
+described in the dma.txt file, using a one-cell specifier for each channel.
+The two cells in order are:
+1. A phandle pointing to the DMA controller.
+2. Channel configuration register. Configurable fields are:
+ - bit 13: SIF, source interface identifier, used to get the memory
+ interface identifier,
+ - bit 14: DIF, destination interface identifier, used to get the peripheral
+ interface identifier,
+ - bit 30-24: PERID, peripheral identifier.
+
+Example:
+
+i2c2: i2c@f8024000 {
+ compatible = "atmel,at91sam9x5-i2c";
+ reg = <0xf8024000 0x4000>;
+ interrupts = <34 4 6>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(6))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(7))>;
+ dma-names = "tx", "rx";
+};
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 4659fd952301..dc8d3aac1aa9 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -48,6 +48,7 @@ The full ID of peripheral types can be found below.
21 ESAI
22 SSI Dual FIFO (needs firmware ver >= 2)
23 Shared ASRC
+ 24 SAI
The third cell specifies the transfer priority as below.
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
index d75a9d767022..f8c3311b7153 100644
--- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
@@ -1,7 +1,9 @@
QCOM BAM DMA controller
Required properties:
-- compatible: must contain "qcom,bam-v1.4.0" for MSM8974
+- compatible: must be one of the following:
+ * "qcom,bam-v1.4.0" for MSM8974, APQ8074 and APQ8084
+ * "qcom,bam-v1.3.0" for APQ8064, IPQ8064 and MSM8960
- reg: Address range for DMA registers
- interrupts: Should contain the one interrupt shared by all channels
- #dma-cells: must be <1>, the cell in the dmas property of the client device
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
index 3e145c1675b1..9cdcba24d7c3 100644
--- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt
+++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
@@ -4,7 +4,7 @@ This driver follows the generic DMA bindings defined in dma.txt.
Required properties:
-- compatible: Must be "allwinner,sun6i-a31-dma"
+- compatible: Must be "allwinner,sun6i-a31-dma" or "allwinner,sun8i-a23-dma"
- reg: Should contain the registers base address and length
- interrupts: Should contain a reference to the interrupt used by this device
- clocks: Should contain a reference to the parent AHB clock
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
index e75f0e549fff..e75f0e549fff 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt
+++ b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt b/Documentation/devicetree/bindings/drm/imx/hdmi.txt
index 1b756cf9afb0..1b756cf9afb0 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt
+++ b/Documentation/devicetree/bindings/drm/imx/hdmi.txt
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt b/Documentation/devicetree/bindings/drm/imx/ldb.txt
index 443bcb6134d5..443bcb6134d5 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt
+++ b/Documentation/devicetree/bindings/drm/imx/ldb.txt
diff --git a/Documentation/devicetree/bindings/gpio/gpio-74xx-mmio.txt b/Documentation/devicetree/bindings/gpio/gpio-74xx-mmio.txt
new file mode 100644
index 000000000000..7bb1a9d60133
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-74xx-mmio.txt
@@ -0,0 +1,30 @@
+* 74XX MMIO GPIO driver
+
+Required properties:
+- compatible: Should contain one of the following:
+ "ti,741g125": for 741G125 (1-bit Input),
+ "ti,741g174": for 741G74 (1-bit Output),
+ "ti,742g125": for 742G125 (2-bit Input),
+ "ti,7474" : for 7474 (2-bit Output),
+ "ti,74125" : for 74125 (4-bit Input),
+ "ti,74175" : for 74175 (4-bit Output),
+ "ti,74365" : for 74365 (6-bit Input),
+ "ti,74174" : for 74174 (6-bit Output),
+ "ti,74244" : for 74244 (8-bit Input),
+ "ti,74273" : for 74273 (8-bit Output),
+ "ti,741624" : for 741624 (16-bit Input),
+ "ti,7416374": for 7416374 (16-bit Output).
+- reg: Physical base address and length where IC resides.
+- gpio-controller: Marks the device node as a gpio controller.
+- #gpio-cells: Should be two. The first cell is the pin number and
+ the second cell is used to specify the GPIO polarity:
+ 0 = Active High,
+ 1 = Active Low.
+
+Example:
+ ctrl: gpio@30008004 {
+ compatible = "ti,74174";
+ reg = <0x30008004 0x1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mcp23s08.txt b/Documentation/devicetree/bindings/gpio/gpio-mcp23s08.txt
index c306a2d0f2b1..f3332b9a8ed4 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mcp23s08.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mcp23s08.txt
@@ -57,6 +57,8 @@ Optional device specific properties:
occurred on. If it is not set, the interrupt are only generated for the
bank they belong to.
On devices with only one interrupt output this property is useless.
+- microchip,irq-active-high: Sets the INTPOL flag in the IOCON register. This
+ configures the IRQ output polarity as active high.
Example I2C (with interrupt):
gpiom1: gpio@20 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-vf610.txt b/Documentation/devicetree/bindings/gpio/gpio-vf610.txt
new file mode 100644
index 000000000000..436cc99c6598
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-vf610.txt
@@ -0,0 +1,55 @@
+* Freescale VF610 PORT/GPIO module
+
+The Freescale PORT/GPIO modules are two adjacent modules providing GPIO
+functionality. Each pair serves 32 GPIOs. The VF610 has 5 instances of
+each, and each PORT module has its own interrupt.
+
+Required properties for GPIO node:
+- compatible : Should be "fsl,<soc>-gpio", currently "fsl,vf610-gpio"
+- reg : The first reg tuple represents the PORT module, the second tuple
+ the GPIO module.
+- interrupts : Should be the port interrupt shared by all 32 pins.
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify the gpio polarity:
+ 0 = active high
+ 1 = active low
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells : Should be 2. The first cell is the GPIO number.
+ The second cell bits[3:0] is used to specify trigger type and level flags:
+ 1 = low-to-high edge triggered.
+ 2 = high-to-low edge triggered.
+ 4 = active high level-sensitive.
+ 8 = active low level-sensitive.
+
+Note: Each GPIO port should have an alias correctly numbered in "aliases"
+node.
+
+Examples:
+
+aliases {
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+};
+
+gpio1: gpio@40049000 {
+ compatible = "fsl,vf610-gpio";
+ reg = <0x40049000 0x1000 0x400ff000 0x40>;
+ interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 0 32>;
+};
+
+gpio2: gpio@4004a000 {
+ compatible = "fsl,vf610-gpio";
+ reg = <0x4004a000 0x1000 0x400ff040 0x40>;
+ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 32 32>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index 3fb8f53071b8..b9bd1d64cfa6 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -13,13 +13,22 @@ properties, each containing a 'gpio-list':
gpio-specifier : Array of #gpio-cells specifying specific gpio
(controller specific)
-GPIO properties should be named "[<name>-]gpios". The exact
-meaning of each gpios property must be documented in the device tree
-binding for each device.
+GPIO properties should be named "[<name>-]gpios", with <name> being the purpose
+of this GPIO for the device. While a non-existent <name> is considered valid
+for compatibility reasons (resolving to the "gpios" property), it is not allowed
+for new bindings.
-For example, the following could be used to describe GPIO pins used
-as chip select lines; with chip selects 0, 1 and 3 populated, and chip
-select 2 left empty:
+GPIO properties can contain one or more GPIO phandles, but only in exceptional
+cases should they contain more than one. If your device uses several GPIOs with
+distinct functions, reference each of them under its own property, giving it a
+meaningful name. The only case where an array of GPIOs is accepted is when
+several GPIOs serve the same function (e.g. a parallel data line).
+
+The exact purpose of each gpios property must be documented in the device tree
+binding of the device.
+
+The following example could be used to describe GPIO pins used as device enable
+and bit-banged data signals:
gpio1: gpio1 {
gpio-controller
@@ -30,10 +39,12 @@ select 2 left empty:
#gpio-cells = <1>;
};
[...]
- chipsel-gpios = <&gpio1 12 0>,
- <&gpio1 13 0>,
- <0>, /* holes are permitted, means no GPIO 2 */
- <&gpio2 2>;
+
+ enable-gpios = <&gpio2 2>;
+ data-gpios = <&gpio1 12 0>,
+ <&gpio1 13 0>,
+ <&gpio1 14 0>,
+ <&gpio1 15 0>;
Note that gpio-specifier length is controller dependent. In the
above example, &gpio1 uses 2 cells to specify a gpio, while &gpio2
@@ -42,16 +53,17 @@ only uses one.
gpio-specifier may encode: bank, pin position inside the bank,
whether pin is open-drain and whether pin is logically inverted.
Exact meaning of each specifier cell is controller specific, and must
-be documented in the device tree binding for the device.
+be documented in the device tree binding for the device. Use the macros
+defined in include/dt-bindings/gpio/gpio.h whenever possible:
Example of a node using GPIOs:
node {
- gpios = <&qe_pio_e 18 0>;
+ enable-gpios = <&qe_pio_e 18 GPIO_ACTIVE_HIGH>;
};
-In this example gpio-specifier is "18 0" and encodes GPIO pin number,
-and GPIO flags as accepted by the "qe_pio_e" gpio-controller.
+GPIO_ACTIVE_HIGH is 0, so in this example gpio-specifier is "18 0" and encodes
+GPIO pin number, and GPIO flags as accepted by the "qe_pio_e" gpio-controller.
1.1) GPIO specifier best practices
----------------------------------
diff --git a/Documentation/devicetree/bindings/gpio/pl061-gpio.txt b/Documentation/devicetree/bindings/gpio/pl061-gpio.txt
index a2c416bcbccc..89058d375b7c 100644
--- a/Documentation/devicetree/bindings/gpio/pl061-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/pl061-gpio.txt
@@ -7,4 +7,4 @@ Required properties:
- bit 0 specifies polarity (0 for normal, 1 for inverted)
- gpio-controller : Marks the device node as a GPIO controller.
- interrupts : Interrupt mapping for GPIO IRQ.
-
+- gpio-ranges : Interaction with the PINCTRL subsystem.
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 941a26aa4322..38fb86f28ba2 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -6,7 +6,9 @@ Required Properties:
- "renesas,gpio-r8a7778": for R8A7778 (R-Mobile M1) compatible GPIO controller.
- "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
- "renesas,gpio-r8a7790": for R8A7790 (R-Car H2) compatible GPIO controller.
- - "renesas,gpio-r8a7791": for R8A7791 (R-Car M2) compatible GPIO controller.
+ - "renesas,gpio-r8a7791": for R8A7791 (R-Car M2-W) compatible GPIO controller.
+ - "renesas,gpio-r8a7793": for R8A7793 (R-Car M2-N) compatible GPIO controller.
+ - "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
- "renesas,gpio-rcar": for generic R-Car GPIO controller.
- reg: Base address and length of each memory resource used by the GPIO
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index b48f4ef31d93..4c32ef0b7db8 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -191,6 +191,8 @@ of the following host1x client modules:
- nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
- nvidia,edid: supplies a binary EDID blob
- nvidia,panel: phandle of a display panel
+ - nvidia,ganged-mode: contains a phandle to a second DSI controller to gang
+ up with in order to support up to 8 data lanes
- sor: serial output resource
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 2d150c311a05..c99eb34e640b 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -68,7 +68,7 @@ STMicroelectronics stih4xx platforms
number of clocks may depend of the SoC type.
- clock-names: names of the clocks listed in clocks property in the same
order.
- - hdmi,hpd-gpio: gpio id to detect if an hdmi cable is plugged or not.
+ - ddc: phandle of an I2C controller used for DDC EDID probing
sti-hda:
Required properties:
@@ -83,6 +83,22 @@ sti-hda:
- clock-names: names of the clocks listed in clocks property in the same
order.
+sti-hqvdp:
+ must be a child of sti-display-subsystem
+ Required properties:
+ - compatible: "st,stih<chip>-hqvdp"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - clocks: from common clock binding: handle hardware IP needed clocks, the
+ number of clocks may depend of the SoC type.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: names of the clocks listed in clocks property in the same
+ order.
+ - resets: resets to be used by the device
+ See ../reset/reset.txt for details.
+ - reset-names: names of the resets listed in resets property in the same
+ order.
+ - st,vtg: phandle on vtg main device node.
+
Example:
/ {
@@ -173,7 +189,6 @@ Example:
interrupt-names = "irq";
clock-names = "pix", "tmds", "phy", "audio";
clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
- hdmi,hpd-gpio = <&PIO2 5>;
};
sti-hda@fe85a000 {
@@ -184,6 +199,16 @@ Example:
clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
};
};
+
+ sti-hqvdp@9c000000 {
+ compatible = "st,stih407-hqvdp";
+ reg = <0x9C00000 0x100000>;
+ clock-names = "hqvdp", "pix_main";
+ clocks = <&clk_s_c0_flexgen CLK_MAIN_DISP>, <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>;
+ reset-names = "hqvdp";
+ resets = <&softreset STIH407_HDQVDP_SOFTRESET>;
+ st,vtg = <&vtg_main>;
+ };
};
...
};
diff --git a/Documentation/devicetree/bindings/hwrng/atmel-trng.txt b/Documentation/devicetree/bindings/hwrng/atmel-trng.txt
new file mode 100644
index 000000000000..4ac5aaa2d024
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwrng/atmel-trng.txt
@@ -0,0 +1,16 @@
+Atmel TRNG (True Random Number Generator) block
+
+Required properties:
+- compatible : Should be "atmel,at91sam9g45-trng"
+- reg : Offset and length of the register set of this block
+- interrupts : the interrupt number for the TRNG block
+- clocks: should contain the TRNG clk source
+
+Example:
+
+trng@fffcc000 {
+ compatible = "atmel,at91sam9g45-trng";
+ reg = <0xfffcc000 0x4000>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&trng_clk>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-designware.txt b/Documentation/devicetree/bindings/i2c/i2c-designware.txt
index 5199b0c8cf7a..fee26dc3e858 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-designware.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-designware.txt
@@ -14,10 +14,10 @@ Optional properties :
- i2c-sda-hold-time-ns : should contain the SDA hold time in nanoseconds.
This option is only supported in hardware blocks version 1.11a or newer.
- - i2c-scl-falling-time : should contain the SCL falling time in nanoseconds.
+ - i2c-scl-falling-time-ns : should contain the SCL falling time in nanoseconds.
This value which is by default 300ns is used to compute the tLOW period.
- - i2c-sda-falling-time : should contain the SDA falling time in nanoseconds.
+ - i2c-sda-falling-time-ns : should contain the SDA falling time in nanoseconds.
This value which is by default 300ns is used to compute the tHIGH period.
Example :
diff --git a/Documentation/devicetree/bindings/i2c/i2c-img-scb.txt b/Documentation/devicetree/bindings/i2c/i2c-img-scb.txt
new file mode 100644
index 000000000000..b6461602dca5
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-img-scb.txt
@@ -0,0 +1,26 @@
+IMG Serial Control Bus (SCB) I2C Controller
+
+Required Properties:
+- compatible: "img,scb-i2c"
+- reg: Physical base address and length of controller registers
+- interrupts: Interrupt number used by the controller
+- clocks : Should contain a clock specifier for each entry in clock-names
+- clock-names : Should contain the following entries:
+ "scb", for the SCB core clock.
+ "sys", for the system clock.
+- clock-frequency: The I2C bus frequency in Hz
+- #address-cells: Should be <1>
+- #size-cells: Should be <0>
+
+Example:
+
+i2c@18100000 {
+ compatible = "img,scb-i2c";
+ reg = <0x18100000 0x200>;
+ interrupts = <GIC_SHARED 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&i2c0_clk>, <&system_clk>;
+ clock-names = "scb", "sys";
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
index 4a8513e44740..52d37fd8d3e5 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
@@ -11,6 +11,8 @@ Required properties:
Optional properties:
- clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz.
The absence of the propoerty indicates the default frequency 100 kHz.
+- dmas: A list of two dma specifiers, one for each entry in dma-names.
+- dma-names: should contain "tx" and "rx".
Examples:
@@ -26,3 +28,12 @@ i2c@70038000 { /* HS-I2C on i.MX51 */
interrupts = <64>;
clock-frequency = <400000>;
};
+
+i2c0: i2c@40066000 { /* i2c0 on vf610 */
+ compatible = "fsl,vf610-i2c";
+ reg = <0x40066000 0x1000>;
+ interrupts =<0 71 0x04>;
+ dmas = <&edma0 0 50>,
+ <&edma0 0 51>;
+ dma-names = "rx","tx";
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-meson.txt b/Documentation/devicetree/bindings/i2c/i2c-meson.txt
new file mode 100644
index 000000000000..682f9a6f766e
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-meson.txt
@@ -0,0 +1,24 @@
+Amlogic Meson I2C controller
+
+Required properties:
+ - compatible: must be "amlogic,meson6-i2c"
+ - reg: physical address and length of the device registers
+ - interrupts: a single interrupt specifier
+ - clocks: clock for the device
+ - #address-cells: should be <1>
+ - #size-cells: should be <0>
+
+Optional properties:
+- clock-frequency: the desired I2C bus clock frequency in Hz; in
+ absence of this property the default value is used (100 kHz).
+
+Examples:
+
+ i2c@c8100500 {
+ compatible = "amlogic,meson6-i2c";
+ reg = <0xc8100500 0x20>;
+ interrupts = <0 92 1>;
+ clocks = <&clk81>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
index d2153ce36fa8..2bfc6e7ed094 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
@@ -2,6 +2,15 @@ Device tree configuration for Renesas IIC (sh_mobile) driver
Required properties:
- compatible : "renesas,iic-<soctype>". "renesas,rmobile-iic" as fallback
+ Examples with soctypes are:
+ - "renesas,iic-r8a73a4" (R-Mobile APE6)
+ - "renesas,iic-r8a7740" (R-Mobile A1)
+ - "renesas,iic-r8a7790" (R-Car H2)
+ - "renesas,iic-r8a7791" (R-Car M2-W)
+ - "renesas,iic-r8a7792" (R-Car V2H)
+ - "renesas,iic-r8a7793" (R-Car M2-N)
+ - "renesas,iic-r8a7794" (R-Car E2)
+ - "renesas,iic-sh73a0" (SH-Mobile AG5)
- reg : address start and address range size of device
- interrupts : interrupt of device
- clocks : clock for device
@@ -10,6 +19,11 @@ Required properties:
Optional properties:
- clock-frequency : frequency of bus clock in Hz. Default 100kHz if unset.
+- dmas : Must contain a list of two references to DMA
+ specifiers, one for transmission, and one for
+ reception.
+- dma-names : Must contain a list of two DMA names, "tx" and "rx".
+
Pinctrl properties might be needed, too. See there.
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 605dcca5dbec..9f4e3824e71e 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -17,6 +17,9 @@ adi,adt7473 +/-1C TDM Extended Temp Range I.C
adi,adt7475 +/-1C TDM Extended Temp Range I.C
adi,adt7476 +/-1C TDM Extended Temp Range I.C
adi,adt7490 +/-1C TDM Extended Temp Range I.C
+adi,adxl345 Three-Axis Digital Accelerometer
+adi,adxl346 Three-Axis Digital Accelerometer
+adi,adxl34x Three-Axis Digital Accelerometer
at,24c08 i2c serial eeprom (24cxx)
atmel,24c00 i2c serial eeprom (24cxx)
atmel,24c01 i2c serial eeprom (24cxx)
@@ -76,7 +79,12 @@ ovti,ov5642 OV5642: Color CMOS QSXGA (5-megapixel) Image Sensor with OmniBSI an
pericom,pt7c4338 Real-time Clock Module
plx,pex8648 48-Lane, 12-Port PCI Express Gen 2 (5.0 GT/s) Switch
ramtron,24c64 i2c serial eeprom (24cxx)
+ricoh,r2025sd I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
+ricoh,r2221tl I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
ricoh,rs5c372a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
+ricoh,rs5c372b I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
+ricoh,rv5c386 I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
+ricoh,rv5c387a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
samsung,24ad0xd1 S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
sii,s35390a 2-wire CMOS real-time clock
st-micro,24c256 i2c serial eeprom (24cxx)
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.txt b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.txt
new file mode 100644
index 000000000000..4e36d6e2f7b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.txt
@@ -0,0 +1,46 @@
+Qualcomm's SPMI PMIC current ADC
+
+QPNP PMIC current ADC (IADC) provides interface to clients to read current.
+A 16 bit ADC is used for current measurements. IADC can measure the current
+through an external resistor (channel 1) or internal (built-in) resistor
+(channel 0). When using an external resistor it is to be described by
+qcom,external-resistor-micro-ohms property.
+
+IADC node:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should contain "qcom,spmi-iadc".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: IADC base address and length in the SPMI PMIC register map
+
+- interrupts:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: End of ADC conversion.
+
+- qcom,external-resistor-micro-ohms:
+ Usage: optional
+ Value type: <u32>
+ Definition: Sense resister value in micro Ohm.
+ If not defined value of 10000 micro Ohms will be used.
+
+Example:
+ /* IADC node */
+ pmic_iadc: iadc@3600 {
+ compatible = "qcom,spmi-iadc";
+ reg = <0x3600 0x100>;
+ interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
+ qcom,external-resistor-micro-ohms = <10000>;
+ #io-channel-cells = <1>;
+ };
+
+ /* IIO client node */
+ bat {
+ io-channels = <&pmic_iadc 0>;
+ io-channel-names = "iadc";
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
index 5d3ec1df226d..a9a5fe19ff2a 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
@@ -1,7 +1,7 @@
Rockchip Successive Approximation Register (SAR) A/D Converter bindings
Required properties:
-- compatible: Should be "rockchip,saradc"
+- compatible: Should be "rockchip,saradc" or "rockchip,rk3066-tsadc"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: The interrupt number to the cpu. The interrupt specifier format
diff --git a/Documentation/devicetree/bindings/input/cap1106.txt b/Documentation/devicetree/bindings/input/cap11xx.txt
index 4b463904cba0..7d0a3009771b 100644
--- a/Documentation/devicetree/bindings/input/cap1106.txt
+++ b/Documentation/devicetree/bindings/input/cap11xx.txt
@@ -1,14 +1,16 @@
-Device tree bindings for Microchip CAP1106, 6 channel capacitive touch sensor
+Device tree bindings for Microchip CAP11xx based capacitive touch sensors
-The node for this driver must be a child of a I2C controller node, as the
+The node for this device must be a child of a I2C controller node, as the
device communication via I2C only.
Required properties:
- compatible: Must be "microchip,cap1106"
+ compatible: Must contain one of:
+ "microchip,cap1106"
+ "microchip,cap1126"
+ "microchip,cap1188"
reg: The I2C slave address of the device.
- Only 0x28 is valid.
interrupts: Property describing the interrupt line the
device's ALERT#/CM_IRQ# pin is connected to.
@@ -26,6 +28,10 @@ Optional properties:
Valid values are 1, 2, 4, and 8.
By default, a gain of 1 is set.
+ microchip,irq-active-high: By default the interrupt pin is active low
+ open drain. This property allows using the active
+ high push-pull output.
+
linux,keycodes: Specifies an array of numeric keycode values to
be used for the channels. If this property is
omitted, KEY_A, KEY_B, etc are used as
@@ -43,11 +49,11 @@ i2c_controller {
autorepeat;
microchip,sensor-gain = <2>;
- linux,keycodes = <103 /* KEY_UP */
- 106 /* KEY_RIGHT */
- 108 /* KEY_DOWN */
- 105 /* KEY_LEFT */
- 109 /* KEY_PAGEDOWN */
- 104>; /* KEY_PAGEUP */
+ linux,keycodes = <103>, /* KEY_UP */
+ <106>, /* KEY_RIGHT */
+ <108>, /* KEY_DOWN */
+ <105>, /* KEY_LEFT */
+ <109>, /* KEY_PAGEDOWN */
+ <104>; /* KEY_PAGEUP */
};
}
diff --git a/Documentation/devicetree/bindings/input/elan_i2c.txt b/Documentation/devicetree/bindings/input/elan_i2c.txt
new file mode 100644
index 000000000000..ee3242c4ba67
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/elan_i2c.txt
@@ -0,0 +1,34 @@
+Elantech I2C Touchpad
+
+Required properties:
+- compatible: must be "elan,ekth3000".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+ binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+ binding[0]).
+
+Optional properties:
+- wakeup-source: touchpad can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+ pinctrl binding [1]).
+- vcc-supply: a phandle for the regulator supplying 3.3V power.
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+ &i2c1 {
+ /* ... */
+
+ touchpad@15 {
+ compatible = "elan,ekth3000";
+ reg = <0x15>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
new file mode 100644
index 000000000000..a765232e6446
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/elants_i2c.txt
@@ -0,0 +1,33 @@
+Elantech I2C Touchscreen
+
+Required properties:
+- compatible: must be "elan,ekth3500".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+ binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+ binding[0]).
+
+Optional properties:
+- wakeup-source: touchscreen can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+ pinctrl binding [1]).
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+ &i2c1 {
+ /* ... */
+
+ touchscreen@10 {
+ compatible = "elan,ekth3500";
+ reg = <0x10>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 5c2c02140a62..a4a38fcf2ed6 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -10,10 +10,13 @@ Optional properties:
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
- - gpios: OF device-tree gpio specification.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
+Required mutual exclusive subnode-properties:
+ - gpios: OF device-tree gpio specification.
+ - interrupts: the interrupt line for that input
+
Optional subnode-properties:
- linux,input-type: Specify event type this button/key generates.
If not specified defaults to <1> == EV_KEY.
@@ -33,4 +36,9 @@ Example nodes:
linux,code = <103>;
gpios = <&gpio1 0 1>;
};
+ button@22 {
+ label = "GPIO Key DOWN";
+ linux,code = <108>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ };
...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
new file mode 100644
index 000000000000..5a65478e5d40
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
@@ -0,0 +1,55 @@
+MIPS Global Interrupt Controller (GIC)
+
+The MIPS GIC routes external interrupts to individual VPEs and IRQ pins.
+It also supports local (per-processor) interrupts and software-generated
+interrupts which can be used as IPIs. The GIC also includes a free-running
+global timer, per-CPU count/compare timers, and a watchdog.
+
+Required properties:
+- compatible : Should be "mti,gic".
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt specifier. Should be 3.
+ - The first cell is the type of interrupt, local or shared.
+ See <include/dt-bindings/interrupt-controller/mips-gic.h>.
+ - The second cell is the GIC interrupt number.
+ - The third cell encodes the interrupt flags.
+ See <include/dt-bindings/interrupt-controller/irq.h> for a list of valid
+ flags.
+
+Optional properties:
+- reg : Base address and length of the GIC registers. If not present,
+ the base address reported by the hardware GCR_GIC_BASE will be used.
+- mti,reserved-cpu-vectors : Specifies the list of CPU interrupt vectors
+ to which the GIC may not route interrupts. Valid values are 2 - 7.
+ This property is ignored if the CPU is started in EIC mode.
+
+Required properties for timer sub-node:
+- compatible : Should be "mti,gic-timer".
+- interrupts : Interrupt for the GIC local timer.
+- clock-frequency : Clock frequency at which the GIC timers operate.
+
+Example:
+
+ gic: interrupt-controller@1bdc0000 {
+ compatible = "mti,gic";
+ reg = <0x1bdc0000 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ mti,reserved-cpu-vectors = <7>;
+
+ timer {
+ compatible = "mti,gic-timer";
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ uart@18101400 {
+ ...
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
new file mode 100644
index 000000000000..9a55ac3735e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
@@ -0,0 +1,26 @@
+Rockchip IOMMU
+==============
+
+A Rockchip DRM iommu translates io virtual addresses to physical addresses for
+its master device. Each slave device is bound to a single master device, and
+shares its clocks, power domain and irq.
+
+Required properties:
+- compatible : Should be "rockchip,iommu"
+- reg : Address space for the configuration registers
+- interrupts : Interrupt specifier for the IOMMU instance
+- interrupt-names : Interrupt name for the IOMMU instance
+- #iommu-cells : Should be <0>. This indicates the iommu is a
+ "single-master" device, and needs no additional information
+ to associate with its master device. See:
+ Documentation/devicetree/bindings/iommu/iommu.txt
+
+Example:
+
+ vopl_mmu: iommu@ff940300 {
+ compatible = "rockchip,iommu";
+ reg = <0xff940300 0x100>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vopl_mmu";
+ #iommu-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-lp8860.txt b/Documentation/devicetree/bindings/leds/leds-lp8860.txt
new file mode 100644
index 000000000000..aad38dd94d4b
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-lp8860.txt
@@ -0,0 +1,29 @@
+* Texas Instruments - lp8860 4-Channel LED Driver
+
+The LP8860-Q1 is an high-efficiency LED
+driver with boost controller. It has 4 high-precision
+current sinks that can be controlled by a PWM input
+signal, a SPI/I2C master, or both.
+
+Required properties:
+ - compatible:
+ "ti,lp8860"
+ - reg - I2C slave address
+ - label - Used for naming LEDs
+
+Optional properties:
+ - enable-gpio - gpio pin to enable/disable the device.
+ - supply - "vled" - LED supply
+
+Example:
+
+leds: leds@6 {
+ compatible = "ti,lp8860";
+ reg = <0x2d>;
+ label = "display_cluster";
+ enable-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ vled-supply = <&vbatt>;
+}
+
+For more product information please see the link below:
+http://www.ti.com/product/lp8860-q1
diff --git a/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt b/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
index 48edc4b92afb..d1a043339c11 100644
--- a/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
@@ -43,6 +43,9 @@ Required properties:
device. The format is dependent on which interrupt
controller the OMAP device uses
- ti,hwmods: Name of the hwmod associated with the mailbox
+- #mbox-cells: Common mailbox binding property to identify the number
+ of cells required for the mailbox specifier. Should be
+ 1
- ti,mbox-num-users: Number of targets (processor devices) that the mailbox
device can interrupt
- ti,mbox-num-fifos: Number of h/w fifo queues within the mailbox IP block
@@ -72,6 +75,18 @@ data that represent the following:
Cell #3 (usr_id) - mailbox user id for identifying the interrupt line
associated with generating a tx/rx fifo interrupt.
+Mailbox Users:
+==============
+A device needing to communicate with a target processor device should specify
+them using the common mailbox binding properties, "mboxes" and the optional
+"mbox-names" (please see Documentation/devicetree/bindings/mailbox/mailbox.txt
+for details). Each value of the mboxes property should contain a phandle to the
+mailbox controller device node and an args specifier that will be the phandle to
+the intended sub-mailbox child node to be used for communication. The equivalent
+"mbox-names" property value can be used to give a name to the communication channel
+to be used by the client user.
+
+
Example:
--------
@@ -81,6 +96,7 @@ mailbox: mailbox@4a0f4000 {
reg = <0x4a0f4000 0x200>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "mailbox";
+ #mbox-cells = <1>;
ti,mbox-num-users = <3>;
ti,mbox-num-fifos = <8>;
mbox_ipu: mbox_ipu {
@@ -93,12 +109,19 @@ mailbox: mailbox@4a0f4000 {
};
};
+dsp {
+ ...
+ mboxes = <&mailbox &mbox_dsp>;
+ ...
+};
+
/* AM33xx */
mailbox: mailbox@480C8000 {
compatible = "ti,omap4-mailbox";
reg = <0x480C8000 0x200>;
interrupts = <77>;
ti,hwmods = "mailbox";
+ #mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <8>;
mbox_wkupm3: wkup_m3 {
diff --git a/Documentation/devicetree/bindings/media/meson-ir.txt b/Documentation/devicetree/bindings/media/meson-ir.txt
new file mode 100644
index 000000000000..407848e85f31
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/meson-ir.txt
@@ -0,0 +1,14 @@
+* Amlogic Meson IR remote control receiver
+
+Required properties:
+ - compatible : should be "amlogic,meson6-ir"
+ - reg : physical base address and length of the device registers
+ - interrupts : a single specifier for the interrupt from the device
+
+Example:
+
+ ir-receiver@c8100480 {
+ compatible= "amlogic,meson6-ir";
+ reg = <0xc8100480 0x20>;
+ interrupts = <0 15 1>;
+ };
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index ba61782c2af9..9dafe6b06cd2 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -6,6 +6,8 @@ family of devices. The current blocks are always slaves and suppot one input
channel which can be either RGB, YUYV or BT656.
- compatible: Must be one of the following
+ - "renesas,vin-r8a7794" for the R8A7794 device
+ - "renesas,vin-r8a7793" for the R8A7793 device
- "renesas,vin-r8a7791" for the R8A7791 device
- "renesas,vin-r8a7790" for the R8A7790 device
- "renesas,vin-r8a7779" for the R8A7779 device
diff --git a/Documentation/devicetree/bindings/media/si4713.txt b/Documentation/devicetree/bindings/media/si4713.txt
new file mode 100644
index 000000000000..5ee5552d3465
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/si4713.txt
@@ -0,0 +1,30 @@
+* Silicon Labs FM Radio transmitter
+
+The Silicon Labs Si4713 is an FM radio transmitter with receive power scan
+supporting 76-108 MHz. It includes an RDS encoder and has both, a stereo-analog
+and a digital interface, which supports I2S, left-justified and a custom
+DSP-mode format. It is programmable through an I2C interface.
+
+Required Properties:
+- compatible: Should contain "silabs,si4713"
+- reg: the I2C address of the device
+
+Optional Properties:
+- interrupts-extended: Interrupt specifier for the chips interrupt
+- reset-gpios: GPIO specifier for the chips reset line
+- vdd-supply: phandle for Vdd regulator
+- vio-supply: phandle for Vio regulator
+
+Example:
+
+&i2c2 {
+ fmtx: si4713@63 {
+ compatible = "silabs,si4713";
+ reg = <0x63>;
+
+ interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_FALLING>; /* 53 */
+ reset-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>; /* 163 */
+ vio-supply = <&vio>;
+ vdd-supply = <&vaux1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt b/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt
new file mode 100644
index 000000000000..d4e0141d3620
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt
@@ -0,0 +1,37 @@
+* Interrupt Controller
+
+Properties:
+- compatible: "brcm,bcm3384-intc"
+
+ Compatibility with BCM3384 and possibly other BCM33xx/BCM63xx SoCs.
+
+- reg: Address/length pairs for each mask/status register set. Length must
+ be 8. If multiple register sets are specified, the first set will
+ handle IRQ offsets 0..31, the second set 32..63, and so on.
+
+- interrupt-controller: This is an interrupt controller.
+
+- #interrupt-cells: Must be <1>. Just a simple IRQ offset; no level/edge
+ or polarity configuration is possible with this controller.
+
+- interrupt-parent: This controller is cascaded from a MIPS CPU HW IRQ, or
+ from another INTC.
+
+- interrupts: The IRQ on the parent controller.
+
+Example:
+ periph_intc: periph_intc@14e00038 {
+ compatible = "brcm,bcm3384-intc";
+
+ /*
+ * IRQs 0..31: mask reg 0x14e00038, status reg 0x14e0003c
+ * IRQs 32..63: mask reg 0x14e00340, status reg 0x14e00344
+ */
+ reg = <0x14e00038 0x8 0x14e00340 0x8>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/mips/brcm/bmips.txt b/Documentation/devicetree/bindings/mips/brcm/bmips.txt
new file mode 100644
index 000000000000..8ef71b4085ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/bmips.txt
@@ -0,0 +1,8 @@
+* Broadcom MIPS (BMIPS) CPUs
+
+Required properties:
+- compatible: "brcm,bmips3300", "brcm,bmips4350", "brcm,bmips4380",
+ "brcm,bmips5000"
+
+- mips-hpt-frequency: This is common to all CPUs in the system so it lives
+ under the "cpus" node.
diff --git a/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt b/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt
new file mode 100644
index 000000000000..8a139cb3c0b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt
@@ -0,0 +1,11 @@
+* Broadcom cable/DSL platforms
+
+SoCs:
+
+Required properties:
+- compatible: "brcm,bcm3384", "brcm,bcm33843"
+
+Boards:
+
+Required properties:
+- compatible: "brcm,bcm93384wvg"
diff --git a/Documentation/devicetree/bindings/mips/brcm/usb.txt b/Documentation/devicetree/bindings/mips/brcm/usb.txt
new file mode 100644
index 000000000000..452c45c7bf29
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/usb.txt
@@ -0,0 +1,11 @@
+* Broadcom USB controllers
+
+Required properties:
+- compatible: "brcm,bcm3384-ohci", "brcm,bcm3384-ehci"
+
+ These currently use the generic-ohci and generic-ehci drivers. On some
+ systems, special handling may be needed in the following cases:
+
+ - Restoring state after systemwide power save modes
+ - Sharing PHYs with the USBD (UDC) hardware
+ - Figuring out which controllers are disabled on ASIC bondout variants
diff --git a/Documentation/devicetree/bindings/mips/cpu_irq.txt b/Documentation/devicetree/bindings/mips/cpu_irq.txt
index 13aa4b62c62a..fc149f326dae 100644
--- a/Documentation/devicetree/bindings/mips/cpu_irq.txt
+++ b/Documentation/devicetree/bindings/mips/cpu_irq.txt
@@ -1,6 +1,6 @@
MIPS CPU interrupt controller
-On MIPS the mips_cpu_intc_init() helper can be used to initialize the 8 CPU
+On MIPS the mips_cpu_irq_of_init() helper can be used to initialize the 8 CPU
IRQs from a devicetree file and create a irq_domain for IRQ controller.
With the irq_domain in place we can describe how the 8 IRQs are wired to the
@@ -36,7 +36,7 @@ Example devicetree:
Example platform irq.c:
static struct of_device_id __initdata of_irq_ids[] = {
- { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_irq_of_init },
{ .compatible = "ralink,rt2880-intc", .data = intc_of_init },
{},
};
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 6edc3b616e98..1fe6dde98499 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -5,7 +5,9 @@ Required properties:
- reg : should specify localbus address and size used for the chip,
and hardware ECC controller if available.
If the hardware ECC is PMECC, it should contain address and size for
- PMECC, PMECC Error Location controller and ROM which has lookup tables.
+ PMECC and PMECC Error Location controller.
+ The PMECC lookup table address and size in ROM is optional. If not
+ specified, driver will build it in runtime.
- atmel,nand-addr-offset : offset for the address latch.
- atmel,nand-cmd-offset : offset for the command latch.
- #address-cells, #size-cells : Must be present if the device has sub-nodes
@@ -27,7 +29,7 @@ Optional properties:
are: 512, 1024.
- atmel,pmecc-lookup-table-offset : includes two offsets of lookup table in ROM
for different sector size. First one is for sector size 512, the next is for
- sector size 1024.
+ sector size 1024. If not specified, driver will build the table in runtime.
- nand-bus-width : 8 or 16 bus width if not present 8
- nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
- Nand Flash Controller(NFC) is a slave driver under Atmel nand flash
diff --git a/Documentation/devicetree/bindings/mtd/diskonchip.txt b/Documentation/devicetree/bindings/mtd/diskonchip.txt
new file mode 100644
index 000000000000..3e13bfdbea5b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/diskonchip.txt
@@ -0,0 +1,15 @@
+M-Systems and Sandisk DiskOnChip devices
+
+M-System DiskOnChip G3
+======================
+The Sandisk (formerly M-Systems) docg3 is a nand device of 64M to 256MB.
+
+Required properties:
+ - compatible: should be "m-systems,diskonchip-g3"
+ - reg: register base and size
+
+Example:
+ docg3: flash@0 {
+ compatible = "m-systems,diskonchip-g3";
+ reg = <0x0 0x2000>;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
index 36ef07d3c90f..af8915b41ccf 100644
--- a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
@@ -11,8 +11,8 @@ Required properties:
are made in native endianness.
- #address-cells, #size-cells : Must be present if the device has sub-nodes
representing partitions.
-- gpios : specifies the gpio pins to control the NAND device. nwp is an
- optional gpio and may be set to 0 if not present.
+- gpios : Specifies the GPIO pins to control the NAND device. The order of
+ GPIO references is: RDY, nCE, ALE, CLE, and an optional nWP.
Optional properties:
- bank-width : Width (in bytes) of the device. If not present, the width
@@ -35,11 +35,11 @@ gpio-nand@1,0 {
reg = <1 0x0000 0x2>;
#address-cells = <1>;
#size-cells = <1>;
- gpios = <&banka 1 0 /* rdy */
- &banka 2 0 /* nce */
- &banka 3 0 /* ale */
- &banka 4 0 /* cle */
- 0 /* nwp */>;
+ gpios = <&banka 1 0>, /* RDY */
+ <&banka 2 0>, /* nCE */
+ <&banka 3 0>, /* ALE */
+ <&banka 4 0>, /* CLE */
+ <0>; /* nWP */
partition@0 {
...
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
new file mode 100644
index 000000000000..0273adb8638c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
@@ -0,0 +1,45 @@
+Allwinner NAND Flash Controller (NFC)
+
+Required properties:
+- compatible : "allwinner,sun4i-a10-nand".
+- reg : shall contain registers location and length for data and reg.
+- interrupts : shall define the nand controller interrupt.
+- #address-cells: shall be set to 1. Encode the nand CS.
+- #size-cells : shall be set to 0.
+- clocks : shall reference nand controller clocks.
+- clock-names : nand controller internal clock names. Shall contain :
+ * "ahb" : AHB gating clock
+ * "mod" : nand controller clock
+
+Optional children nodes:
+Children nodes represent the available nand chips.
+
+Optional properties:
+- allwinner,rb : shall contain the native Ready/Busy ids.
+ or
+- rb-gpios : shall contain the gpios used as R/B pins.
+- nand-ecc-mode : one of the supported ECC modes ("hw", "hw_syndrome", "soft",
+ "soft_bch" or "none")
+
+see Documentation/devicetree/mtd/nand.txt for generic bindings.
+
+
+Examples:
+nfc: nand@01c03000 {
+ compatible = "allwinner,sun4i-a10-nand";
+ reg = <0x01c03000 0x1000>;
+ interrupts = <0 37 1>;
+ clocks = <&ahb_gates 13>, <&nand_clk>;
+ clock-names = "ahb", "mod";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_pins_a &nand_cs0_pins_a &nand_rb0_pins_a>;
+ status = "okay";
+
+ nand@0 {
+ reg = <0>;
+ allwinner,rb = <0>;
+ nand-ecc-mode = "soft_bch";
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 41354f730beb..26efd526d16c 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -7,7 +7,10 @@ Required properties:
- PCS registers
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
-- interrupts: Should contain the amd-xgbe interrupt
+- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
+ listed is required and is the general device interrupt. If the optional
+ amd,per-channel-interrupt property is specified, then one additional
+ interrupt for each DMA channel supported by the device should be specified
- clocks:
- DMA clock for the amd-xgbe device (used for calculating the
correct Rx interrupt watchdog timer value on a DMA channel
@@ -23,6 +26,9 @@ Optional properties:
- mac-address: mac address to be assigned to the device. Can be overridden
by UEFI.
- dma-coherent: Present if dma operations are coherent
+- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
+ a unique interrupt for each DMA channel - this requires an additional
+ interrupt be configured for each DMA channel
Example:
xgbe@e0700000 {
@@ -30,7 +36,9 @@ Example:
reg = <0 0xe0700000 0 0x80000>,
<0 0xe0780000 0 0x80000>;
interrupt-parent = <&gic>;
- interrupts = <0 325 4>;
+ interrupts = <0 325 4>,
+ <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
+ amd,per-channel-interrupt;
clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
clock-names = "dma_clk", "ptp_clk";
phy-handle = <&phy>;
diff --git a/Documentation/devicetree/bindings/net/can/c_can.txt b/Documentation/devicetree/bindings/net/can/c_can.txt
index 8f1ae81228e3..5a1d8b0c39e9 100644
--- a/Documentation/devicetree/bindings/net/can/c_can.txt
+++ b/Documentation/devicetree/bindings/net/can/c_can.txt
@@ -4,6 +4,8 @@ Bosch C_CAN/D_CAN controller Device Tree Bindings
Required properties:
- compatible : Should be "bosch,c_can" for C_CAN controllers and
"bosch,d_can" for D_CAN controllers.
+ Can be "ti,dra7-d_can", "ti,am3352-d_can" or
+ "ti,am4372-d_can".
- reg : physical base address and size of the C_CAN/D_CAN
registers map
- interrupts : property with a value describing the interrupt
@@ -12,6 +14,9 @@ Required properties:
Optional properties:
- ti,hwmods : Must be "d_can<n>" or "c_can<n>", n being the
instance number
+- syscon-raminit : Handle to system control region that contains the
+ RAMINIT register, register offset to the RAMINIT
+ register and the CAN instance number (0 offset).
Note: "ti,hwmods" field is used to fetch the base address and irq
resources from TI, omap hwmod data base during device registration.
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index a62c889aafca..e124847443f8 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -10,7 +10,7 @@ Required properties:
- dsa,ethernet : Should be a phandle to a valid Ethernet device node
- dsa,mii-bus : Should be a phandle to a valid MDIO bus device node
-Optionnal properties:
+Optional properties:
- interrupts : property with a value describing the switch
interrupt number (not supported by the driver)
@@ -23,6 +23,13 @@ Each of these switch child nodes should have the following required properties:
- #address-cells : Must be 1
- #size-cells : Must be 0
+A switch child node has the following optional property:
+
+- eeprom-length : Set to the length of an EEPROM connected to the
+ switch. Must be set if the switch can not detect
+ the presence and/or size of a connected EEPROM,
+ otherwise optional.
+
A switch may have multiple "port" children nodes
Each port children node must have the following mandatory properties:
diff --git a/Documentation/devicetree/bindings/net/micrel.txt b/Documentation/devicetree/bindings/net/micrel.txt
index e1d99b95c4ec..87496a8c64ab 100644
--- a/Documentation/devicetree/bindings/net/micrel.txt
+++ b/Documentation/devicetree/bindings/net/micrel.txt
@@ -6,19 +6,32 @@ Optional properties:
- micrel,led-mode : LED mode value to set for PHYs with configurable LEDs.
- Configure the LED mode with single value. The list of PHYs and
- the bits that are currently supported:
+ Configure the LED mode with single value. The list of PHYs and the
+ bits that are currently supported:
- KSZ8001: register 0x1e, bits 15..14
- KSZ8041: register 0x1e, bits 15..14
- KSZ8021: register 0x1f, bits 5..4
- KSZ8031: register 0x1f, bits 5..4
- KSZ8051: register 0x1f, bits 5..4
+ KSZ8001: register 0x1e, bits 15..14
+ KSZ8041: register 0x1e, bits 15..14
+ KSZ8021: register 0x1f, bits 5..4
+ KSZ8031: register 0x1f, bits 5..4
+ KSZ8051: register 0x1f, bits 5..4
+ KSZ8081: register 0x1f, bits 5..4
+ KSZ8091: register 0x1f, bits 5..4
- See the respective PHY datasheet for the mode values.
+ See the respective PHY datasheet for the mode values.
+
+ - micrel,rmii-reference-clock-select-25-mhz: RMII Reference Clock Select
+ bit selects 25 MHz mode
+
+ Setting the RMII Reference Clock Select bit enables 25 MHz rather
+ than 50 MHz clock mode.
+
+ Note that this option in only needed for certain PHY revisions with a
+ non-standard, inverted function of this configuration bit.
+ Specifically, a clock reference ("rmii-ref" below) is always needed to
+ actually select a mode.
- clocks, clock-names: contains clocks according to the common clock bindings.
- supported clocks:
- - KSZ8021, KSZ8031: "rmii-ref": The RMII refence input clock. Used
- to determine the XI input clock.
+ supported clocks:
+ - KSZ8021, KSZ8031, KSZ8081, KSZ8091: "rmii-ref": The RMII reference
+ input clock. Used to determine the XI input clock.
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 5b8c58903077..40831fbaff72 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -19,7 +19,6 @@ Optional Properties:
specifications. If neither of these are specified, the default is to
assume clause 22. The compatible list may also contain other
elements.
-- max-speed: Maximum PHY supported speed (10, 100, 1000...)
If the phy's identifier is known then the list may contain an entry
of the form: "ethernet-phy-idAAAA.BBBB" where
@@ -29,6 +28,8 @@ Optional Properties:
4 hex digits. This is the chip vendor OUI bits 19:24,
followed by 10 bits of a vendor specific ID.
+- max-speed: Maximum PHY supported speed (10, 100, 1000...)
+
Example:
ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
index 34d4db1a4e25..2f6ec85fda8e 100644
--- a/Documentation/devicetree/bindings/net/sh_eth.txt
+++ b/Documentation/devicetree/bindings/net/sh_eth.txt
@@ -9,6 +9,7 @@ Required properties:
"renesas,ether-r8a7779" if the device is a part of R8A7779 SoC.
"renesas,ether-r8a7790" if the device is a part of R8A7790 SoC.
"renesas,ether-r8a7791" if the device is a part of R8A7791 SoC.
+ "renesas,ether-r8a7793" if the device is a part of R8A7793 SoC.
"renesas,ether-r8a7794" if the device is a part of R8A7794 SoC.
"renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
- reg: offset and length of (1) the E-DMAC/feLic register block (required),
diff --git a/Documentation/devicetree/bindings/panel/auo,b116xw03.txt b/Documentation/devicetree/bindings/panel/auo,b116xw03.txt
new file mode 100644
index 000000000000..690d0a568ef3
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b116xw03.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
+
+Required properties:
+- compatible: should be "auo,b116xw03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt b/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt
new file mode 100644
index 000000000000..7da1d5c038ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt
@@ -0,0 +1,7 @@
+HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "hannstar,hsd070pww1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt b/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt
new file mode 100644
index 000000000000..04caaae19af6
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt
@@ -0,0 +1,7 @@
+Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
+
+Required properties:
+- compatible: should be "hit,tx23d38vm0caa"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt b/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt
new file mode 100644
index 000000000000..2743b07cd2f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt
@@ -0,0 +1,7 @@
+Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,g121i1-l01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt b/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt
new file mode 100644
index 000000000000..f522bb8e47e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt
@@ -0,0 +1,49 @@
+Sharp Microelectronics 10.1" WQXGA TFT LCD panel
+
+This panel requires a dual-channel DSI host to operate. It supports two modes:
+- left-right: each channel drives the left or right half of the screen
+- even-odd: each channel drives the even or odd lines of the screen
+
+Each of the DSI channels controls a separate DSI peripheral. The peripheral
+driven by the first link (DSI-LINK1), left or even, is considered the primary
+peripheral and controls the device. The 'link2' property contains a phandle
+to the peripheral driven by the second link (DSI-LINK2, right or odd).
+
+Note that in video mode the DSI-LINK1 interface always provides the left/even
+pixels and DSI-LINK2 always provides the right/odd pixels. In command mode it
+is possible to program either link to drive the left/even or right/odd pixels
+but for the sake of consistency this binding assumes that the same assignment
+is chosen as for video mode.
+
+Required properties:
+- compatible: should be "sharp,lq101r1sx01"
+- reg: DSI virtual channel of the peripheral
+
+Required properties (for DSI-LINK1 only):
+- link2: phandle to the DSI peripheral on the secondary link. Note that the
+ presence of this property marks the containing node as DSI-LINK1.
+- power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties (for DSI-LINK1 only):
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+ dsi@54300000 {
+ panel: panel@0 {
+ compatible = "sharp,lq101r1sx01";
+ reg = <0>;
+
+ link2 = <&secondary>;
+
+ power-supply = <...>;
+ backlight = <...>;
+ };
+ };
+
+ dsi@54400000 {
+ secondary: panel@0 {
+ compatible = "sharp,lq101r1sx01";
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/berlin-sata-phy.txt b/Documentation/devicetree/bindings/phy/berlin-sata-phy.txt
index 88f8c23384c0..c0155f842f62 100644
--- a/Documentation/devicetree/bindings/phy/berlin-sata-phy.txt
+++ b/Documentation/devicetree/bindings/phy/berlin-sata-phy.txt
@@ -2,7 +2,9 @@ Berlin SATA PHY
---------------
Required properties:
-- compatible: should be "marvell,berlin2q-sata-phy"
+- compatible: should be one of
+ "marvell,berlin2-sata-phy"
+ "marvell,berlin2q-sata-phy"
- address-cells: should be 1
- size-cells: should be 0
- phy-cells: from the generic PHY bindings, must be 1
diff --git a/Documentation/devicetree/bindings/phy/berlin-usb-phy.txt b/Documentation/devicetree/bindings/phy/berlin-usb-phy.txt
new file mode 100644
index 000000000000..be33780f668e
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/berlin-usb-phy.txt
@@ -0,0 +1,16 @@
+* Marvell Berlin USB PHY
+
+Required properties:
+- compatible: "marvell,berlin2-usb-phy" or "marvell,berlin2cd-usb-phy"
+- reg: base address and length of the registers
+- #phys-cells: should be 0
+- resets: reference to the reset controller
+
+Example:
+
+ usb-phy@f774000 {
+ compatible = "marvell,berlin2-usb-phy";
+ reg = <0xf774000 0x128>;
+ #phy-cells = <0>;
+ resets = <&chip 0x104 14>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-miphy28lp.txt b/Documentation/devicetree/bindings/phy/phy-miphy28lp.txt
new file mode 100644
index 000000000000..46a135dae6b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-miphy28lp.txt
@@ -0,0 +1,128 @@
+STMicroelectronics STi MIPHY28LP PHY binding
+============================================
+
+This binding describes a miphy device that is used to control PHY hardware
+for SATA, PCIe or USB3.
+
+Required properties (controller (parent) node):
+- compatible : Should be "st,miphy28lp-phy".
+- st,syscfg : Should be a phandle of the system configuration register group
+ which contain the SATA, PCIe or USB3 mode setting bits.
+
+Required nodes : A sub-node is required for each channel the controller
+ provides. Address range information including the usual
+ 'reg' and 'reg-names' properties are used inside these
+ nodes to describe the controller's topology. These nodes
+ are translated by the driver's .xlate() function.
+
+Required properties (port (child) node):
+- #phy-cells : Should be 1 (See second example)
+ Cell after port phandle is device type from:
+ - PHY_TYPE_SATA
+ - PHY_TYPE_PCI
+ - PHY_TYPE_USB3
+- reg : Address and length of the register set for the device.
+- reg-names : The names of the register addresses corresponding to the registers
+ filled in "reg". It can also contain the offset of the system configuration
+ registers used as glue-logic to setup the device for SATA/PCIe or USB3
+ devices.
+- resets : phandle to the parent reset controller.
+- reset-names : Associated name must be "miphy-sw-rst".
+
+Optional properties (port (child) node):
+- st,osc-rdy : to check the MIPHY0_OSC_RDY status in the glue-logic. This
+ is not available in all the MiPHY. For example, for STiH407, only the
+ MiPHY0 has this bit.
+- st,osc-force-ext : to select the external oscillator. This can change from
+ different MiPHY inside the same SoC.
+- st,sata_gen : to select which SATA_SPDMODE has to be set in the SATA system config
+ register.
+- st,px_rx_pol_inv : to invert polarity of RXn/RXp (respectively negative line and positive
+ line).
+- st,scc-on : enable ssc to reduce effects of EMI (only for sata or PCIe).
+- st,tx-impedance-comp : to compensate tx impedance avoiding out of range values.
+
+example:
+
+ miphy28lp_phy: miphy28lp@9b22000 {
+ compatible = "st,miphy28lp-phy";
+ st,syscfg = <&syscfg_core>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ phy_port0: port@9b22000 {
+ reg = <0x9b22000 0xff>,
+ <0x9b09000 0xff>,
+ <0x9b04000 0xff>,
+ <0x114 0x4>, /* sysctrl MiPHY cntrl */
+ <0x818 0x4>, /* sysctrl MiPHY status*/
+ <0xe0 0x4>, /* sysctrl PCIe */
+ <0xec 0x4>; /* sysctrl SATA */
+ reg-names = "sata-up",
+ "pcie-up",
+ "pipew",
+ "miphy-ctrl-glue",
+ "miphy-status-glue",
+ "pcie-glue",
+ "sata-glue";
+ #phy-cells = <1>;
+ st,osc-rdy;
+ reset-names = "miphy-sw-rst";
+ resets = <&softreset STIH407_MIPHY0_SOFTRESET>;
+ };
+
+ phy_port1: port@9b2a000 {
+ reg = <0x9b2a000 0xff>,
+ <0x9b19000 0xff>,
+ <0x9b14000 0xff>,
+ <0x118 0x4>,
+ <0x81c 0x4>,
+ <0xe4 0x4>,
+ <0xf0 0x4>;
+ reg-names = "sata-up",
+ "pcie-up",
+ "pipew",
+ "miphy-ctrl-glue",
+ "miphy-status-glue",
+ "pcie-glue",
+ "sata-glue";
+ #phy-cells = <1>;
+ st,osc-force-ext;
+ reset-names = "miphy-sw-rst";
+ resets = <&softreset STIH407_MIPHY1_SOFTRESET>;
+ };
+
+ phy_port2: port@8f95000 {
+ reg = <0x8f95000 0xff>,
+ <0x8f90000 0xff>,
+ <0x11c 0x4>,
+ <0x820 0x4>;
+ reg-names = "pipew",
+ "usb3-up",
+ "miphy-ctrl-glue",
+ "miphy-status-glue";
+ #phy-cells = <1>;
+ reset-names = "miphy-sw-rst";
+ resets = <&softreset STIH407_MIPHY2_SOFTRESET>;
+ };
+ };
+
+
+Specifying phy control of devices
+=================================
+
+Device nodes should specify the configuration required in their "phys"
+property, containing a phandle to the miphy device node and an index
+specifying which configuration to use, as described in phy-bindings.txt.
+
+example:
+ sata0: sata@9b20000 {
+ ...
+ phys = <&phy_port0 PHY_TYPE_SATA>;
+ ...
+ };
+
+Macro definitions for the supported miphy configuration can be found in:
+
+include/dt-bindings/phy/phy-miphy28lp.h
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu.txt b/Documentation/devicetree/bindings/phy/phy-mvebu.txt
new file mode 100644
index 000000000000..f95b6260a3b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-mvebu.txt
@@ -0,0 +1,43 @@
+* Marvell MVEBU SATA PHY
+
+Power control for the SATA phy found on Marvell MVEBU SoCs.
+
+This document extends the binding described in phy-bindings.txt
+
+Required properties :
+
+ - reg : Offset and length of the register set for the SATA device
+ - compatible : Should be "marvell,mvebu-sata-phy"
+ - clocks : phandle of clock and specifier that supplies the device
+ - clock-names : Should be "sata"
+
+Example:
+ sata-phy@84000 {
+ compatible = "marvell,mvebu-sata-phy";
+ reg = <0x84000 0x0334>;
+ clocks = <&gate_clk 15>;
+ clock-names = "sata";
+ #phy-cells = <0>;
+ status = "ok";
+ };
+
+Armada 375 USB cluster
+----------------------
+
+Armada 375 comes with an USB2 host and device controller and an USB3
+controller. The USB cluster control register allows to manage common
+features of both USB controllers.
+
+Required properties:
+
+- compatible: "marvell,armada-375-usb-cluster"
+- reg: Should contain usb cluster register location and length.
+- #phy-cells : from the generic phy bindings, must be 1. Possible
+values are 1 (USB2), 2 (USB3).
+
+Example:
+ usbcluster: usb-cluster@18400 {
+ compatible = "marvell,armada-375-usb-cluster";
+ reg = <0x18400 0x4>;
+ #phy-cells = <1>
+ };
diff --git a/Documentation/devicetree/bindings/phy/samsung-phy.txt b/Documentation/devicetree/bindings/phy/samsung-phy.txt
index 15e0f2c7130f..d5bad920827f 100644
--- a/Documentation/devicetree/bindings/phy/samsung-phy.txt
+++ b/Documentation/devicetree/bindings/phy/samsung-phy.txt
@@ -128,6 +128,7 @@ Required properties:
- compatible : Should be set to one of the following supported values:
- "samsung,exynos5250-usbdrd-phy" - for exynos5250 SoC,
- "samsung,exynos5420-usbdrd-phy" - for exynos5420 SoC.
+ - "samsung,exynos7-usbdrd-phy" - for exynos7 SoC.
- reg : Register offset and length of USB DRD PHY register set;
- clocks: Clock IDs array as required by the controller
- clock-names: names of clocks correseponding to IDs in the clock property;
@@ -138,6 +139,11 @@ Required properties:
PHY operations, associated by phy name. It is used to
determine bit values for clock settings register.
For Exynos5420 this is given as 'sclk_usbphy30' in CMU.
+ - optional clocks: Exynos7 SoC has now following additional
+ gate clocks available:
+ - phy_pipe: for PIPE3 phy
+ - phy_utmi: for UTMI+ phy
+ - itp: for ITP generation
- samsung,pmu-syscon: phandle for PMU system controller interface, used to
control pmu registers for power isolation.
- #phy-cells : from the generic PHY bindings, must be 1;
diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
index 49d0e6050940..509faa87ad0e 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
@@ -67,7 +67,7 @@ Valid values for pin and group names are:
They also all support the some form of muxing. Any pins which are contained
in one of the mux groups (see below) can be muxed only to the functions
supported by the mux group. All other pins can be muxed to the "perip"
- function which which enables them with their intended peripheral.
+ function which enables them with their intended peripheral.
Different pins in the same mux group cannot be muxed to different functions,
however it is possible to mux only a subset of the pins in a mux group to a
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
new file mode 100644
index 000000000000..17e7240c6998
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -0,0 +1,96 @@
+== Amlogic Meson pinmux controller ==
+
+Required properties for the root node:
+ - compatible: "amlogic,meson8-pinctrl"
+ - reg: address and size of registers controlling irq functionality
+
+=== GPIO sub-nodes ===
+
+The 2 power domains of the controller (regular and always-on) are
+represented as sub-nodes and each of them acts as a GPIO controller.
+
+Required properties for sub-nodes are:
+ - reg: should contain address and size for mux, pull-enable, pull and
+ gpio register sets
+ - reg-names: an array of strings describing the "reg" entries. Must
+ contain "mux", "pull" and "gpio". "pull-enable" is optional and
+ when it is missing the "pull" registers are used instead
+ - gpio-controller: identifies the node as a gpio controller
+ - #gpio-cells: must be 2
+
+Valid sub-node names are:
+ - "banks" for the regular domain
+ - "ao-bank" for the always-on domain
+
+=== Other sub-nodes ===
+
+Child nodes without the "gpio-controller" represent some desired
+configuration for a pin or a group. Those nodes can be pinmux nodes or
+configuration nodes.
+
+Required properties for pinmux nodes are:
+ - groups: a list of pinmux groups. The list of all available groups
+ depends on the SoC and can be found in driver sources.
+ - function: the name of a function to activate for the specified set
+ of groups. The list of all available functions depends on the SoC
+ and can be found in driver sources.
+
+Required properties for configuration nodes:
+ - pins: a list of pin names
+
+Configuration nodes support the generic properties "bias-disable",
+"bias-pull-up" and "bias-pull-down", described in file
+pinctrl-bindings.txt
+
+=== Example ===
+
+ pinctrl: pinctrl@c1109880 {
+ compatible = "amlogic,meson8-pinctrl";
+ reg = <0xc1109880 0x10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gpio: banks@c11080b0 {
+ reg = <0xc11080b0 0x28>,
+ <0xc11080e8 0x18>,
+ <0xc1108120 0x18>,
+ <0xc1108030 0x30>;
+ reg-names = "mux", "pull", "pull-enable", "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio_ao: ao-bank@c1108030 {
+ reg = <0xc8100014 0x4>,
+ <0xc810002c 0x4>,
+ <0xc8100024 0x8>;
+ reg-names = "mux", "pull", "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ nand {
+ mux {
+ groups = "nand_io", "nand_io_ce0", "nand_io_ce1",
+ "nand_io_rb0", "nand_ale", "nand_cle",
+ "nand_wen_clk", "nand_ren_clk", "nand_dqs",
+ "nand_ce2", "nand_ce3";
+ function = "nand";
+ };
+ };
+
+ uart_ao_a {
+ mux {
+ groups = "uart_tx_ao_a", "uart_rx_ao_a",
+ "uart_cts_ao_a", "uart_rts_ao_a";
+ function = "uart_ao";
+ };
+
+ conf {
+ pins = "GPIOAO_0", "GPIOAO_1",
+ "GPIOAO_2", "GPIOAO_3";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index 98eb94d91a1c..47d84b6ee91b 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -216,4 +216,4 @@ arguments are described below.
or 0 to disable debouncing
More in-depth documentation on these parameters can be found in
-<include/linux/pinctrl/pinconfig-generic.h>
+<include/linux/pinctrl/pinconf-generic.h>
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
new file mode 100644
index 000000000000..7ed08048516a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -0,0 +1,215 @@
+Qualcomm PMIC GPIO block
+
+This binding describes the GPIO block(s) found in the 8xxx series of
+PMIC's from Qualcomm.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,pm8018-gpio"
+ "qcom,pm8038-gpio"
+ "qcom,pm8058-gpio"
+ "qcom,pm8917-gpio"
+ "qcom,pm8921-gpio"
+ "qcom,pm8941-gpio"
+ "qcom,pma8084-gpio"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register base of the GPIO block and length.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must contain an array of encoded interrupt specifiers for
+ each available GPIO
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: Mark the device node as a GPIO controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 2;
+ the first cell will be used to define gpio number and the
+ second denotes the flags for this gpio
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin or a list of pins. This configuration can include the
+mux function to select on those pin(s), and various pin configuration
+parameters, as listed below.
+
+
+SUBNODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode. Valid pins are:
+ gpio1-gpio6 for pm8018
+ gpio1-gpio12 for pm8038
+ gpio1-gpio40 for pm8058
+ gpio1-gpio38 for pm8917
+ gpio1-gpio44 for pm8921
+ gpio1-gpio36 for pm8941
+ gpio1-gpio22 for pma8084
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Valid values are:
+ "normal",
+ "paired",
+ "func1",
+ "func2",
+ "dtest1",
+ "dtest2",
+ "dtest3",
+ "dtest4"
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <empty>
+ Definition: The specified pins should be configured as pull up.
+
+- qcom,pull-up-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the strength to use for pull up, if selected.
+ Valid values are; as defined in
+ <dt-bindings/pinctrl/qcom,pmic-gpio.h>:
+ 1: 30uA (PMIC_GPIO_PULL_UP_30)
+ 2: 1.5uA (PMIC_GPIO_PULL_UP_1P5)
+ 3: 31.5uA (PMIC_GPIO_PULL_UP_31P5)
+ 4: 1.5uA + 30uA boost (PMIC_GPIO_PULL_UP_1P5_30)
+ If this property is ommited 30uA strength will be used if
+ pull up is selected
+
+- bias-high-impedance:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins will put in high-Z mode and disabled.
+
+- input-enable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are put in input mode.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+
+- power-source:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the power source for the specified pins. Valid
+ power sources are defined per chip in
+ <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+- qcom,drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins. Value
+ drive strengths are:
+ 0: no (PMIC_GPIO_STRENGTH_NO)
+ 1: high (PMIC_GPIO_STRENGTH_HIGH) 0.9mA @ 1.8V - 1.9mA @ 2.6V
+ 2: medium (PMIC_GPIO_STRENGTH_MED) 0.6mA @ 1.8V - 1.25mA @ 2.6V
+ 3: low (PMIC_GPIO_STRENGTH_LOW) 0.15mA @ 1.8V - 0.3mA @ 2.6V
+ as defined in <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+- drive-push-pull:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in push-pull mode.
+
+- drive-open-drain:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in open-drain mode.
+
+- drive-open-source:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in open-source mode.
+
+Example:
+
+ pm8921_gpio: gpio@150 {
+ compatible = "qcom,pm8921-gpio";
+ reg = <0x150 0x160>;
+ interrupts = <192 1>, <193 1>, <194 1>,
+ <195 1>, <196 1>, <197 1>,
+ <198 1>, <199 1>, <200 1>,
+ <201 1>, <202 1>, <203 1>,
+ <204 1>, <205 1>, <206 1>,
+ <207 1>, <208 1>, <209 1>,
+ <210 1>, <211 1>, <212 1>,
+ <213 1>, <214 1>, <215 1>,
+ <216 1>, <217 1>, <218 1>,
+ <219 1>, <220 1>, <221 1>,
+ <222 1>, <223 1>, <224 1>,
+ <225 1>, <226 1>, <227 1>,
+ <228 1>, <229 1>, <230 1>,
+ <231 1>, <232 1>, <233 1>,
+ <234 1>, <235 1>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pm8921_gpio_keys: gpio-keys {
+ volume-keys {
+ pins = "gpio20", "gpio21";
+ function = "normal";
+
+ input-enable;
+ bias-pull-up;
+ drive-push-pull;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
new file mode 100644
index 000000000000..854774b194ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -0,0 +1,162 @@
+Qualcomm PMIC Multi-Purpose Pin (MPP) block
+
+This binding describes the MPP block(s) found in the 8xxx series
+of PMIC's from Qualcomm.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should contain one of:
+ "qcom,pm8841-mpp",
+ "qcom,pm8941-mpp",
+ "qcom,pma8084-mpp",
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register base of the MPP block and length.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Must contain an array of encoded interrupt specifiers for
+ each available MPP
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: Mark the device node as a GPIO controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 2;
+ the first cell will be used to define MPP number and the
+ second denotes the flags for this MPP
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin or a list of pins. This configuration can include the
+mux function to select on those pin(s), and various pin configuration
+parameters, as listed below.
+
+SUBNODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of MPP pins affected by the properties specified in
+ this subnode. Valid pins are:
+ mpp1-mpp4 for pm8841
+ mpp1-mpp8 for pm8941
+ mpp1-mpp4 for pma8084
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Valid values are:
+ "normal",
+ "paired",
+ "dtest1",
+ "dtest2",
+ "dtest3",
+ "dtest4"
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as no pull.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <u32>
+ Definition: The specified pins should be configured as pull up.
+ Valid values are 600, 10000 and 30000 in bidirectional mode
+ only, i.e. when operating in qcom,analog-mode and input and
+ outputs are enabled. The hardware ignores the configuration
+ when operating in other modes.
+
+- bias-high-impedance:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins will put in high-Z mode and disabled.
+
+- input-enable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are put in input mode, i.e. their input
+ buffer is enabled
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+
+- power-source:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the power source for the specified pins. Valid power
+ sources are defined in <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+
+- qcom,analog-mode:
+ Usage: optional
+ Value type: <none>
+ Definition: Selects Analog mode of operation: combined with input-enable
+ and/or output-high, output-low MPP could operate as
+ Bidirectional Logic, Analog Input, Analog Output.
+
+- qcom,amux-route:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the source for analog input. Valid values are
+ defined in <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+ PMIC_MPP_AMUX_ROUTE_CH5, PMIC_MPP_AMUX_ROUTE_CH6...
+
+Example:
+
+ mpps@a000 {
+ compatible = "qcom,pm8841-mpp";
+ reg = <0xa000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <4 0xa0 0 0>, <4 0xa1 0 0>, <4 0xa2 0 0>, <4 0xa3 0 0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pm8841_default>;
+
+ pm8841_default: default {
+ gpio {
+ pins = "mpp1", "mpp2", "mpp3", "mpp4";
+ function = "normal";
+ input-enable;
+ power-source = <PM8841_MPP_S3>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index e82aaf492517..8425838a6dff 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -18,6 +18,7 @@ Required Properties:
- "samsung,exynos5250-pinctrl": for Exynos5250 compatible pin-controller.
- "samsung,exynos5260-pinctrl": for Exynos5260 compatible pin-controller.
- "samsung,exynos5420-pinctrl": for Exynos5420 compatible pin-controller.
+ - "samsung,exynos7-pinctrl": for Exynos7 compatible pin-controller.
- reg: Base address of the pin controller hardware module and length of
the address space it occupies.
@@ -136,6 +137,8 @@ B. External Wakeup Interrupts: For supporting external wakeup interrupts, a
found on Samsung S3C64xx SoCs,
- samsung,exynos4210-wakeup-eint: represents wakeup interrupt controller
found on Samsung Exynos4210 and S5PC110/S5PV210 SoCs.
+ - samsung,exynos7-wakeup-eint: represents wakeup interrupt controller
+ found on Samsung Exynos7 SoC.
- interrupt-parent: phandle of the interrupt parent to which the external
wakeup interrupts are forwarded to.
- interrupts: interrupt used by multiplexed wakeup interrupts.
diff --git a/Documentation/devicetree/bindings/pinctrl/ste,abx500.txt b/Documentation/devicetree/bindings/pinctrl/ste,abx500.txt
index e3865e136067..87697420439e 100644
--- a/Documentation/devicetree/bindings/pinctrl/ste,abx500.txt
+++ b/Documentation/devicetree/bindings/pinctrl/ste,abx500.txt
@@ -8,42 +8,8 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
phrase "pin configuration node".
-ST Ericsson's pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as input, output, pull up, pull down...
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Required subnode-properties:
-- ste,pins : An array of strings. Each string contains the name of a pin or
- group.
-
-Optional subnode-properties:
-- ste,function: A string containing the name of the function to mux to the
- pin or group.
-
-- generic pin configuration option to use. Example :
-
- default_cfg {
- ste,pins = "GPIO1";
- bias-disable;
- };
-
-- ste,config: Handle of pin configuration node containing the generic
- pinconfig options to use, as described in pinctrl-bindings.txt in
- this directory. Example :
-
- pcfg_bias_disable: pcfg_bias_disable {
- bias-disable;
- };
-
- default_cfg {
- ste,pins = "GPIO1";
- ste.config = <&pcfg_bias_disable>;
- };
+ST Ericsson's pin configuration nodes use the generic pin multiplexing
+and pin configuration bindings, see pinctrl-bindings.txt
Example board file extract:
@@ -54,11 +20,11 @@ Example board file extract:
sysclkreq2 {
sysclkreq2_default_mode: sysclkreq2_default {
default_mux {
- ste,function = "sysclkreq";
- ste,pins = "sysclkreq2_d_1";
+ function = "sysclkreq";
+ groups = "sysclkreq2_d_1";
};
default_cfg {
- ste,pins = "GPIO1";
+ pins = "GPIO1";
bias-disable;
};
};
@@ -66,11 +32,11 @@ Example board file extract:
sysclkreq3 {
sysclkreq3_default_mode: sysclkreq3_default {
default_mux {
- ste,function = "sysclkreq";
- ste,pins = "sysclkreq3_d_1";
+ function = "sysclkreq";
+ groups = "sysclkreq3_d_1";
};
default_cfg {
- ste,pins = "GPIO2";
+ pins = "GPIO2";
output-low;
};
};
@@ -78,11 +44,11 @@ Example board file extract:
gpio3 {
gpio3_default_mode: gpio3_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio3_a_1";
+ function = "gpio";
+ groups = "gpio3_a_1";
};
default_cfg {
- ste,pins = "GPIO3";
+ pins = "GPIO3";
output-low;
};
};
@@ -90,11 +56,11 @@ Example board file extract:
sysclkreq6 {
sysclkreq6_default_mode: sysclkreq6_default {
default_mux {
- ste,function = "sysclkreq";
- ste,pins = "sysclkreq6_d_1";
+ function = "sysclkreq";
+ groups = "sysclkreq6_d_1";
};
default_cfg {
- ste,pins = "GPIO4";
+ pins = "GPIO4";
bias-disable;
};
};
@@ -102,11 +68,11 @@ Example board file extract:
pwmout1 {
pwmout1_default_mode: pwmout1_default {
default_mux {
- ste,function = "pwmout";
- ste,pins = "pwmout1_d_1";
+ function = "pwmout";
+ groups = "pwmout1_d_1";
};
default_cfg {
- ste,pins = "GPIO14";
+ pins = "GPIO14";
output-low;
};
};
@@ -114,11 +80,11 @@ Example board file extract:
pwmout2 {
pwmout2_default_mode: pwmout2_default {
pwmout2_default_mux {
- ste,function = "pwmout";
- ste,pins = "pwmout2_d_1";
+ function = "pwmout";
+ groups = "pwmout2_d_1";
};
pwmout2_default_cfg {
- ste,pins = "GPIO15";
+ pins = "GPIO15";
output-low;
};
};
@@ -126,11 +92,11 @@ Example board file extract:
pwmout3 {
pwmout3_default_mode: pwmout3_default {
pwmout3_default_mux {
- ste,function = "pwmout";
- ste,pins = "pwmout3_d_1";
+ function = "pwmout";
+ groups = "pwmout3_d_1";
};
pwmout3_default_cfg {
- ste,pins = "GPIO16";
+ pins = "GPIO16";
output-low;
};
};
@@ -139,15 +105,15 @@ Example board file extract:
adi1_default_mode: adi1_default {
adi1_default_mux {
- ste,function = "adi1";
- ste,pins = "adi1_d_1";
+ function = "adi1";
+ groups = "adi1_d_1";
};
adi1_default_cfg1 {
- ste,pins = "GPIO17","GPIO19","GPIO20";
+ pins = "GPIO17","GPIO19","GPIO20";
bias-disable;
};
adi1_default_cfg2 {
- ste,pins = "GPIO18";
+ pins = "GPIO18";
output-low;
};
};
@@ -155,15 +121,15 @@ Example board file extract:
dmic12 {
dmic12_default_mode: dmic12_default {
dmic12_default_mux {
- ste,function = "dmic";
- ste,pins = "dmic12_d_1";
+ function = "dmic";
+ groups = "dmic12_d_1";
};
dmic12_default_cfg1 {
- ste,pins = "GPIO27";
+ pins = "GPIO27";
output-low;
};
dmic12_default_cfg2 {
- ste,pins = "GPIO28";
+ pins = "GPIO28";
bias-disable;
};
};
@@ -171,15 +137,15 @@ Example board file extract:
dmic34 {
dmic34_default_mode: dmic34_default {
dmic34_default_mux {
- ste,function = "dmic";
- ste,pins = "dmic34_d_1";
+ function = "dmic";
+ groups = "dmic34_d_1";
};
dmic34_default_cfg1 {
- ste,pins = "GPIO29";
+ pins = "GPIO29";
output-low;
};
dmic34_default_cfg2 {
- ste,pins = "GPIO30";
+ pins = "GPIO30";
bias-disable;{
};
@@ -188,15 +154,15 @@ Example board file extract:
dmic56 {
dmic56_default_mode: dmic56_default {
dmic56_default_mux {
- ste,function = "dmic";
- ste,pins = "dmic56_d_1";
+ function = "dmic";
+ groups = "dmic56_d_1";
};
dmic56_default_cfg1 {
- ste,pins = "GPIO31";
+ pins = "GPIO31";
output-low;
};
dmic56_default_cfg2 {
- ste,pins = "GPIO32";
+ pins = "GPIO32";
bias-disable;
};
};
@@ -204,11 +170,11 @@ Example board file extract:
sysclkreq5 {
sysclkreq5_default_mode: sysclkreq5_default {
sysclkreq5_default_mux {
- ste,function = "sysclkreq";
- ste,pins = "sysclkreq5_d_1";
+ function = "sysclkreq";
+ groups = "sysclkreq5_d_1";
};
sysclkreq5_default_cfg {
- ste,pins = "GPIO42";
+ pins = "GPIO42";
output-low;
};
};
@@ -216,11 +182,11 @@ Example board file extract:
batremn {
batremn_default_mode: batremn_default {
batremn_default_mux {
- ste,function = "batremn";
- ste,pins = "batremn_d_1";
+ function = "batremn";
+ groups = "batremn_d_1";
};
batremn_default_cfg {
- ste,pins = "GPIO43";
+ pins = "GPIO43";
bias-disable;
};
};
@@ -228,11 +194,11 @@ Example board file extract:
service {
service_default_mode: service_default {
service_default_mux {
- ste,function = "service";
- ste,pins = "service_d_1";
+ function = "service";
+ groups = "service_d_1";
};
service_default_cfg {
- ste,pins = "GPIO44";
+ pins = "GPIO44";
bias-disable;
};
};
@@ -240,13 +206,13 @@ Example board file extract:
pwrctrl0 {
pwrctrl0_default_mux: pwrctrl0_mux {
pwrctrl0_default_mux {
- ste,function = "pwrctrl";
- ste,pins = "pwrctrl0_d_1";
+ function = "pwrctrl";
+ groups = "pwrctrl0_d_1";
};
};
pwrctrl0_default_mode: pwrctrl0_default {
pwrctrl0_default_cfg {
- ste,pins = "GPIO45";
+ pins = "GPIO45";
bias-disable;
};
};
@@ -254,13 +220,13 @@ Example board file extract:
pwrctrl1 {
pwrctrl1_default_mux: pwrctrl1_mux {
pwrctrl1_default_mux {
- ste,function = "pwrctrl";
- ste,pins = "pwrctrl1_d_1";
+ function = "pwrctrl";
+ groups = "pwrctrl1_d_1";
};
};
pwrctrl1_default_mode: pwrctrl1_default {
pwrctrl1_default_cfg {
- ste,pins = "GPIO46";
+ pins = "GPIO46";
bias-disable;
};
};
@@ -268,11 +234,11 @@ Example board file extract:
pwmextvibra1 {
pwmextvibra1_default_mode: pwmextvibra1_default {
pwmextvibra1_default_mux {
- ste,function = "pwmextvibra";
- ste,pins = "pwmextvibra1_d_1";
+ function = "pwmextvibra";
+ groups = "pwmextvibra1_d_1";
};
pwmextvibra1_default_cfg {
- ste,pins = "GPIO47";
+ pins = "GPIO47";
bias-disable;
};
};
@@ -280,11 +246,11 @@ Example board file extract:
pwmextvibra2 {
pwmextvibra2_default_mode: pwmextvibra2_default {
pwmextvibra2_default_mux {
- ste,function = "pwmextvibra";
- ste,pins = "pwmextvibra2_d_1";
+ function = "pwmextvibra";
+ groups = "pwmextvibra2_d_1";
};
pwmextvibra1_default_cfg {
- ste,pins = "GPIO48";
+ pins = "GPIO48";
bias-disable;
};
};
@@ -292,11 +258,11 @@ Example board file extract:
gpio51 {
gpio51_default_mode: gpio51_default {
gpio51_default_mux {
- ste,function = "gpio";
- ste,pins = "gpio51_a_1";
+ function = "gpio";
+ groups = "gpio51_a_1";
};
gpio51_default_cfg {
- ste,pins = "GPIO51";
+ pins = "GPIO51";
output-low;
};
};
@@ -304,11 +270,11 @@ Example board file extract:
gpio52 {
gpio52_default_mode: gpio52_default {
gpio52_default_mux {
- ste,function = "gpio";
- ste,pins = "gpio52_a_1";
+ function = "gpio";
+ groups = "gpio52_a_1";
};
gpio52_default_cfg {
- ste,pins = "GPIO52";
+ pins = "GPIO52";
bias-pull-down;
};
};
@@ -316,11 +282,11 @@ Example board file extract:
gpio53 {
gpio53_default_mode: gpio53_default {
gpio53_default_mux {
- ste,function = "gpio";
- ste,pins = "gpio53_a_1";
+ function = "gpio";
+ groups = "gpio53_a_1";
};
gpio53_default_cfg {
- ste,pins = "GPIO53";
+ pins = "GPIO53";
bias-pull-down;
};
};
@@ -328,11 +294,11 @@ Example board file extract:
gpio54 {
gpio54_default_mode: gpio54_default {
gpio54_default_mux {
- ste,function = "gpio";
- ste,pins = "gpio54_a_1";
+ function = "gpio";
+ groups = "gpio54_a_1";
};
gpio54_default_cfg {
- ste,pins = "GPIO54";
+ pins = "GPIO54";
output-low;
};
};
@@ -340,11 +306,11 @@ Example board file extract:
pdmclkdat {
pdmclkdat_default_mode: pdmclkdat_default {
pdmclkdat_default_mux {
- ste,function = "pdm";
- ste,pins = "pdmclkdat_d_1";
+ function = "pdm";
+ groups = "pdmclkdat_d_1";
};
pdmclkdat_default_cfg {
- ste,pins = "GPIO55", "GPIO56";
+ pins = "GPIO55", "GPIO56";
bias-disable;
};
};
diff --git a/Documentation/devicetree/bindings/power_supply/gpio-charger.txt b/Documentation/devicetree/bindings/power_supply/gpio-charger.txt
new file mode 100644
index 000000000000..adbb5dc5b6e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/gpio-charger.txt
@@ -0,0 +1,27 @@
+gpio-charger
+
+Required properties :
+ - compatible : "gpio-charger"
+ - gpios : GPIO indicating the charger presence.
+ See GPIO binding in bindings/gpio/gpio.txt .
+ - charger-type : power supply type, one of
+ unknown
+ battery
+ ups
+ mains
+ usb-sdp (USB standard downstream port)
+ usb-dcp (USB dedicated charging port)
+ usb-cdp (USB charging downstream port)
+ usb-aca (USB accessory charger adapter)
+
+Example:
+
+ usb_charger: charger {
+ compatible = "gpio-charger";
+ charger-type = "usb-sdp";
+ gpios = <&gpf0 2 0 0 0>;
+ }
+
+ battery {
+ power-supplies = <&usb_charger>;
+ };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/fman.txt b/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
new file mode 100644
index 000000000000..edeea160ca39
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
@@ -0,0 +1,534 @@
+=============================================================================
+Freescale Frame Manager Device Bindings
+
+CONTENTS
+ - FMan Node
+ - FMan Port Node
+ - FMan MURAM Node
+ - FMan dTSEC/XGEC/mEMAC Node
+ - FMan IEEE 1588 Node
+ - Example
+
+=============================================================================
+FMan Node
+
+DESCRIPTION
+
+Due to the fact that the FMan is an aggregation of sub-engines (ports, MACs,
+etc.) the FMan node will have child nodes for each of them.
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must include "fsl,fman"
+ FMan version can be determined via FM_IP_REV_1 register in the
+ FMan block. The offset is 0xc4 from the beginning of the
+ Frame Processing Manager memory map (0xc3000 from the
+ beginning of the FMan node).
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Specifies the index of the FMan unit.
+
+ The cell-index value may be used by the SoC, to identify the
+ FMan unit in the SoC memory map. In the table bellow,
+ there's a description of the cell-index use in each SoC:
+
+ - P1023:
+ register[bit] FMan unit cell-index
+ ============================================================
+ DEVDISR[1] 1 0
+
+ - P2041, P3041, P4080 P5020, P5040:
+ register[bit] FMan unit cell-index
+ ============================================================
+ DCFG_DEVDISR2[6] 1 0
+ DCFG_DEVDISR2[14] 2 1
+ (Second FM available only in P4080 and P5040)
+
+ - B4860, T1040, T2080, T4240:
+ register[bit] FMan unit cell-index
+ ============================================================
+ DCFG_CCSR_DEVDISR2[24] 1 0
+ DCFG_CCSR_DEVDISR2[25] 2 1
+ (Second FM available only in T4240)
+
+ DEVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in
+ the specific SoC "Device Configuration/Pin Control" Memory
+ Map.
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property. Specifies the offset of the
+ following configuration registers:
+ - BMI configuration registers.
+ - QMI configuration registers.
+ - DMA configuration registers.
+ - FPM configuration registers.
+ - FMan controller configuration registers.
+
+- ranges
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property.
+
+- clocks
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: phandle for the fman input clock.
+
+- clock-names
+ usage: required
+ Value type: <stringlist>
+ Definition: "fmanclk" for the fman input clock.
+
+- interrupts
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A pair of IRQs are specified in this property.
+ The first element is associated with the event interrupts and
+ the second element is associated with the error interrupts.
+
+- fsl,qman-channel-range
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Specifies the range of the available dedicated
+ channels in the FMan. The first cell specifies the beginning
+ of the range and the second cell specifies the number of
+ channels.
+ Further information available at:
+ "Work Queue (WQ) Channel Assignments in the QMan" section
+ in DPAA Reference Manual.
+
+- fsl,qman
+- fsl,bman
+ Usage: required
+ Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
+
+=============================================================================
+FMan MURAM Node
+
+DESCRIPTION
+
+FMan Internal memory - shared between all the FMan modules.
+It contains data structures that are common and written to or read by
+the modules.
+FMan internal memory is split into the following parts:
+ Packet buffering (Tx/Rx FIFOs)
+ Frames internal context
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must include "fsl,fman-muram"
+
+- ranges
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property.
+ Specifies the multi-user memory offset and the size within
+ the FMan.
+
+EXAMPLE
+
+muram@0 {
+ compatible = "fsl,fman-muram";
+ ranges = <0 0x000000 0x28000>;
+};
+
+=============================================================================
+FMan Port Node
+
+DESCRIPTION
+
+The Frame Manager (FMan) supports several types of hardware ports:
+ Ethernet receiver (RX)
+ Ethernet transmitter (TX)
+ Offline/Host command (O/H)
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: A standard property.
+ Must include one of the following:
+ - "fsl,fman-v2-port-oh" for FManV2 OH ports
+ - "fsl,fman-v2-port-rx" for FManV2 RX ports
+ - "fsl,fman-v2-port-tx" for FManV2 TX ports
+ - "fsl,fman-v3-port-oh" for FManV3 OH ports
+ - "fsl,fman-v3-port-rx" for FManV3 RX ports
+ - "fsl,fman-v3-port-tx" for FManV3 TX ports
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Specifies the hardware port id.
+ Each hardware port on the FMan has its own hardware PortID.
+ Super set of all hardware Port IDs available at FMan Reference
+ Manual under "FMan Hardware Ports in Freescale Devices" table.
+
+ Each hardware port is assigned a 4KB, port-specific page in
+ the FMan hardware port memory region (which is part of the
+ FMan memory map). The first 4 KB in the FMan hardware ports
+ memory region is used for what are called common registers.
+ The subsequent 63 4KB pages are allocated to the hardware
+ ports.
+ The page of a specific port is determined by the cell-index.
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: There is one reg region describing the port
+ configuration registers.
+
+EXAMPLE
+
+port@a8000 {
+ cell-index = <0x28>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xa8000 0x1000>;
+};
+
+port@88000 {
+ cell-index = <0x8>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x88000 0x1000>;
+};
+
+port@81000 {
+ cell-index = <0x1>;
+ compatible = "fsl,fman-v2-port-oh";
+ reg = <0x81000 0x1000>;
+};
+
+=============================================================================
+FMan dTSEC/XGEC/mEMAC Node
+
+DESCRIPTION
+
+mEMAC/dTSEC/XGEC are the Ethernet network interfaces
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: A standard property.
+ Must include one of the following:
+ - "fsl,fman-dtsec" for dTSEC MAC
+ - "fsl,fman-xgec" for XGEC MAC
+ - "fsl,fman-memac for mEMAC MAC
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Specifies the MAC id.
+
+ The cell-index value may be used by the FMan or the SoC, to
+ identify the MAC unit in the FMan (or SoC) memory map.
+ In the tables bellow there's a description of the cell-index
+ use, there are two tables, one describes the use of cell-index
+ by the FMan, the second describes the use by the SoC:
+
+ 1. FMan Registers
+
+ FManV2:
+ register[bit] MAC cell-index
+ ============================================================
+ FM_EPI[16] XGEC 8
+ FM_EPI[16+n] dTSECn n-1
+ FM_NPI[11+n] dTSECn n-1
+ n = 1,..,5
+
+ FManV3:
+ register[bit] MAC cell-index
+ ============================================================
+ FM_EPI[16+n] mEMACn n-1
+ FM_EPI[25] mEMAC10 9
+
+ FM_NPI[11+n] mEMACn n-1
+ FM_NPI[10] mEMAC10 9
+ FM_NPI[11] mEMAC9 8
+ n = 1,..8
+
+ FM_EPI and FM_NPI are located in the FMan memory map.
+
+ 2. SoC registers:
+
+ - P2041, P3041, P4080 P5020, P5040:
+ register[bit] FMan MAC cell
+ Unit index
+ ============================================================
+ DCFG_DEVDISR2[7] 1 XGEC 8
+ DCFG_DEVDISR2[7+n] 1 dTSECn n-1
+ DCFG_DEVDISR2[15] 2 XGEC 8
+ DCFG_DEVDISR2[15+n] 2 dTSECn n-1
+ n = 1,..5
+
+ - T1040, T2080, T4240, B4860:
+ register[bit] FMan MAC cell
+ Unit index
+ ============================================================
+ DCFG_CCSR_DEVDISR2[n-1] 1 mEMACn n-1
+ DCFG_CCSR_DEVDISR2[11+n] 2 mEMACn n-1
+ n = 1,..6,9,10
+
+ EVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in
+ the specific SoC "Device Configuration/Pin Control" Memory
+ Map.
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property.
+
+- fsl,fman-ports
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: An array of two phandles - the first references is
+ the FMan RX port and the second is the TX port used by this
+ MAC.
+
+- ptp-timer
+ Usage required
+ Value type: <phandle>
+ Definition: A phandle for 1EEE1588 timer.
+
+EXAMPLE
+
+fman1_tx28: port@a8000 {
+ cell-index = <0x28>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xa8000 0x1000>;
+};
+
+fman1_rx8: port@88000 {
+ cell-index = <0x8>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x88000 0x1000>;
+};
+
+ptp-timer: ptp_timer@fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0xfe000 0x1000>;
+};
+
+ethernet@e0000 {
+ compatible = "fsl,fman-dtsec";
+ cell-index = <0>;
+ reg = <0xe0000 0x1000>;
+ fsl,fman-ports = <&fman1_rx8 &fman1_tx28>;
+ ptp-timer = <&ptp-timer>;
+};
+
+============================================================================
+FMan IEEE 1588 Node
+
+DESCRIPTION
+
+The FMan interface to support IEEE 1588
+
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: A standard property.
+ Must include "fsl,fman-ptp-timer".
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property.
+
+EXAMPLE
+
+ptp-timer@fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0xfe000 0x1000>;
+};
+
+=============================================================================
+Example
+
+fman@400000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <1>;
+ compatible = "fsl,fman"
+ ranges = <0 0x400000 0x100000>;
+ reg = <0x400000 0x100000>;
+ clocks = <&fman_clk>;
+ clock-names = "fmanclk";
+ interrupts = <
+ 96 2 0 0
+ 16 2 1 1>;
+ fsl,qman-channel-range = <0x40 0xc>;
+
+ muram@0 {
+ compatible = "fsl,fman-muram";
+ reg = <0x0 0x28000>;
+ };
+
+ port@81000 {
+ cell-index = <1>;
+ compatible = "fsl,fman-v2-port-oh";
+ reg = <0x81000 0x1000>;
+ };
+
+ port@82000 {
+ cell-index = <2>;
+ compatible = "fsl,fman-v2-port-oh";
+ reg = <0x82000 0x1000>;
+ };
+
+ port@83000 {
+ cell-index = <3>;
+ compatible = "fsl,fman-v2-port-oh";
+ reg = <0x83000 0x1000>;
+ };
+
+ port@84000 {
+ cell-index = <4>;
+ compatible = "fsl,fman-v2-port-oh";
+ reg = <0x84000 0x1000>;
+ };
+
+ port@85000 {
+ cell-index = <5>;
+ compatible = "fsl,fman-v2-port-oh";
+ reg = <0x85000 0x1000>;
+ };
+
+ port@86000 {
+ cell-index = <6>;
+ compatible = "fsl,fman-v2-port-oh";
+ reg = <0x86000 0x1000>;
+ };
+
+ fman1_rx_0x8: port@88000 {
+ cell-index = <0x8>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x88000 0x1000>;
+ };
+
+ fman1_rx_0x9: port@89000 {
+ cell-index = <0x9>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x89000 0x1000>;
+ };
+
+ fman1_rx_0xa: port@8a000 {
+ cell-index = <0xa>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x8a000 0x1000>;
+ };
+
+ fman1_rx_0xb: port@8b000 {
+ cell-index = <0xb>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x8b000 0x1000>;
+ };
+
+ fman1_rx_0xc: port@8c000 {
+ cell-index = <0xc>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x8c000 0x1000>;
+ };
+
+ fman1_rx_0x10: port@90000 {
+ cell-index = <0x10>;
+ compatible = "fsl,fman-v2-port-rx";
+ reg = <0x90000 0x1000>;
+ };
+
+ fman1_tx_0x28: port@a8000 {
+ cell-index = <0x28>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xa8000 0x1000>;
+ };
+
+ fman1_tx_0x29: port@a9000 {
+ cell-index = <0x29>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xa9000 0x1000>;
+ };
+
+ fman1_tx_0x2a: port@aa000 {
+ cell-index = <0x2a>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xaa000 0x1000>;
+ };
+
+ fman1_tx_0x2b: port@ab000 {
+ cell-index = <0x2b>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xab000 0x1000>;
+ };
+
+ fman1_tx_0x2c: port@ac0000 {
+ cell-index = <0x2c>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xac000 0x1000>;
+ };
+
+ fman1_tx_0x30: port@b0000 {
+ cell-index = <0x30>;
+ compatible = "fsl,fman-v2-port-tx";
+ reg = <0xb0000 0x1000>;
+ };
+
+ ethernet@e0000 {
+ compatible = "fsl,fman-dtsec";
+ cell-index = <0>;
+ reg = <0xe0000 0x1000>;
+ fsl,fman-ports = <&fman1_rx_0x8 &fman1_tx_0x28>;
+ };
+
+ ethernet@e2000 {
+ compatible = "fsl,fman-dtsec";
+ cell-index = <1>;
+ reg = <0xe2000 0x1000>;
+ fsl,fman-ports = <&fman1_rx_0x9 &fman1_tx_0x29>;
+ };
+
+ ethernet@e4000 {
+ compatible = "fsl,fman-dtsec";
+ cell-index = <2>;
+ reg = <0xe4000 0x1000>;
+ fsl,fman-ports = <&fman1_rx_0xa &fman1_tx_0x2a>;
+ };
+
+ ethernet@e6000 {
+ compatible = "fsl,fman-dtsec";
+ cell-index = <3>;
+ reg = <0xe6000 0x1000>;
+ fsl,fman-ports = <&fman1_rx_0xb &fman1_tx_0x2b>;
+ };
+
+ ethernet@e8000 {
+ compatible = "fsl,fman-dtsec";
+ cell-index = <4>;
+ reg = <0xf0000 0x1000>;
+ fsl,fman-ports = <&fman1_rx_0xc &fman1_tx_0x2c>;
+
+ ethernet@f0000 {
+ cell-index = <8>;
+ compatible = "fsl,fman-xgec";
+ reg = <0xf0000 0x1000>;
+ fsl,fman-ports = <&fman1_rx_0x10 &fman1_tx_0x30>;
+ };
+
+ ptp-timer@fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0xfe000 0x1000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
new file mode 100644
index 000000000000..cfda0d57d302
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
@@ -0,0 +1,29 @@
+Device-Tree bindings for Atmel's HLCDC (High-end LCD Controller) PWM driver
+
+The Atmel HLCDC PWM is subdevice of the HLCDC MFD device.
+See ../mfd/atmel-hlcdc.txt for more details.
+
+Required properties:
+ - compatible: value should be one of the following:
+ "atmel,hlcdc-pwm"
+ - pinctr-names: the pin control state names. Should contain "default".
+ - pinctrl-0: should contain the pinctrl states described by pinctrl
+ default.
+ - #pwm-cells: should be set to 3. This PWM chip use the default 3 cells
+ bindings defined in pwm.txt in this directory.
+
+Example:
+
+ hlcdc: hlcdc@f0030000 {
+ compatible = "atmel,sama5d3-hlcdc";
+ reg = <0xf0030000 0x2000>;
+ clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+ clock-names = "periph_clk","sys_clk", "slow_clk";
+
+ hlcdc_pwm: hlcdc-pwm {
+ compatible = "atmel,hlcdc-pwm";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_pwm>;
+ #pwm-cells = <3>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
new file mode 100644
index 000000000000..fb6fb31bc4c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
@@ -0,0 +1,30 @@
+BCM2835 PWM controller (Raspberry Pi controller)
+
+Required properties:
+- compatible: should be "brcm,bcm2835-pwm"
+- reg: physical base address and length of the controller's registers
+- clock: This clock defines the base clock frequency of the PWM hardware
+ system, the period and the duty_cycle of the PWM signal is a multiple of
+ the base period.
+- #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
+ the cells format.
+
+Examples:
+
+pwm@2020c000 {
+ compatible = "brcm,bcm2835-pwm";
+ reg = <0x2020c000 0x28>;
+ clocks = <&clk_pwm>;
+ #pwm-cells = <2>;
+};
+
+clocks {
+ ....
+ clk_pwm: pwm {
+ compatible = "fixed-clock";
+ reg = <3>;
+ #clock-cells = <0>;
+ clock-frequency = <9200000>;
+ };
+ ....
+};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-opal.txt b/Documentation/devicetree/bindings/rtc/rtc-opal.txt
new file mode 100644
index 000000000000..af87e5ecac54
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-opal.txt
@@ -0,0 +1,16 @@
+IBM OPAL real-time clock
+------------------------
+
+Required properties:
+- comapatible: Should be "ibm,opal-rtc"
+
+Optional properties:
+- has-tpo: Decides if the wakeup is supported or not.
+
+Example:
+ rtc {
+ compatible = "ibm,opal-rtc";
+ has-tpo;
+ phandle = <0x10000029>;
+ linux,phandle = <0x10000029>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/bcm63xx-uart.txt b/Documentation/devicetree/bindings/serial/bcm63xx-uart.txt
new file mode 100644
index 000000000000..5c52e5eef16d
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/bcm63xx-uart.txt
@@ -0,0 +1,30 @@
+* BCM63xx UART
+
+Required properties:
+
+- compatible: "brcm,bcm6345-uart"
+
+- reg: The base address of the UART register bank.
+
+- interrupts: A single interrupt specifier.
+
+- clocks: Clock driving the hardware; used to figure out the baud rate
+ divisor.
+
+Example:
+
+ uart0: serial@14e00520 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x14e00520 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+ clocks = <&periph_clk>;
+ };
+
+ clocks {
+ periph_clk: periph_clk@0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <54000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/serial/fsl-mxs-auart.txt b/Documentation/devicetree/bindings/serial/fsl-mxs-auart.txt
index 59a40f18d551..7c408c87e613 100644
--- a/Documentation/devicetree/bindings/serial/fsl-mxs-auart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-mxs-auart.txt
@@ -11,8 +11,13 @@ Required properties:
- dma-names: "rx" for RX channel, "tx" for TX channel.
Optional properties:
-- fsl,uart-has-rtscts : Indicate the UART has RTS and CTS lines,
+- fsl,uart-has-rtscts : Indicate the UART has RTS and CTS lines
+ for hardware flow control,
it also means you enable the DMA support for this UART.
+- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
+ line respectively. It will use specified PIO instead of the peripheral
+ function pin for the USART feature.
+ If unsure, don't specify this property.
Example:
auart0: serial@8006a000 {
@@ -21,6 +26,9 @@ auart0: serial@8006a000 {
interrupts = <112>;
dmas = <&dma_apbx 8>, <&dma_apbx 9>;
dma-names = "rx", "tx";
+ cts-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+ dsr-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+ dcd-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
};
Note: Each auart port should have an alias correctly numbered in "aliases"
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/of-serial.txt
index 8c4fd0332028..b52b98234b9b 100644
--- a/Documentation/devicetree/bindings/serial/of-serial.txt
+++ b/Documentation/devicetree/bindings/serial/of-serial.txt
@@ -10,6 +10,7 @@ Required properties:
- "ns16850"
- "nvidia,tegra20-uart"
- "nxp,lpc3220-uart"
+ - "ralink,rt2880-uart"
- "ibm,qpace-nwp-serial"
- "altr,16550-FIFO32"
- "altr,16550-FIFO64"
diff --git a/Documentation/devicetree/bindings/serial/pl011.txt b/Documentation/devicetree/bindings/serial/pl011.txt
index 5d2e840ae65c..ba3ecb8cb5a1 100644
--- a/Documentation/devicetree/bindings/serial/pl011.txt
+++ b/Documentation/devicetree/bindings/serial/pl011.txt
@@ -6,12 +6,46 @@ Required properties:
- interrupts: exactly one interrupt specifier
Optional properties:
-- pinctrl: When present, must have one state named "sleep"
- and one state named "default"
-- clocks: When present, must refer to exactly one clock named
+- pinctrl:
+ When present, must have one state named "default",
+ and may contain a second name named "sleep". The former
+ state sets up pins for ordinary operation whereas
+ the latter state will put the associated pins to sleep
+ when the UART is unused
+- clocks:
+ When present, the first clock listed must correspond to
+ the clock named UARTCLK on the IP block, i.e. the clock
+ to the external serial line, whereas the second clock
+ must correspond to the PCLK clocking the internal logic
+ of the block. Just listing one clock (the first one) is
+ deprecated.
+- clocks-names:
+ When present, the first clock listed must be named
+ "uartclk" and the second clock listed must be named
"apb_pclk"
-- dmas: When present, may have one or two dma channels.
+- dmas:
+ When present, may have one or two dma channels.
The first one must be named "rx", the second one
must be named "tx".
+- auto-poll:
+ Enables polling when using RX DMA.
+- poll-rate-ms:
+ Rate at which poll occurs when auto-poll is set,
+ default 100ms.
+- poll-timeout-ms:
+ Poll timeout when auto-poll is set, default
+ 3000ms.
See also bindings/arm/primecell.txt
+
+Example:
+
+uart@80120000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x80120000 0x1000>;
+ interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dma 13 0 0x2>, <&dma 13 0 0x0>;
+ dma-names = "rx", "tx";
+ clocks = <&foo_clk>, <&bar_clk>;
+ clock-names = "uartclk", "apb_pclk";
+};
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
index ffa5b784c66e..a2114c217376 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
@@ -27,27 +27,52 @@ Optional properties:
- dmas: Should contain dma specifiers for transmit and receive channels
- dma-names: Should contain "tx" for transmit and "rx" for receive channels
+Note: Aliases may be defined to ensure the correct ordering of the UARTs.
+The alias serialN will result in the UART being assigned port N. If any
+serialN alias exists, then an alias must exist for each enabled UART. The
+serialN aliases should be in a .dts file instead of in a .dtsi file.
+
Examples:
-A uartdm v1.4 device with dma capabilities.
-
-serial@f991e000 {
- compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0xf991e000 0x1000>;
- interrupts = <0 108 0x0>;
- clocks = <&blsp1_uart2_apps_cxc>, <&blsp1_ahb_cxc>;
- clock-names = "core", "iface";
- dmas = <&dma0 0>, <&dma0 1>;
- dma-names = "tx", "rx";
-};
-
-A uartdm v1.3 device without dma capabilities and part of a GSBI complex.
-
-serial@19c40000 {
- compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
- reg = <0x19c40000 0x1000>,
- <0x19c00000 0x1000>;
- interrupts = <0 195 0x0>;
- clocks = <&gsbi5_uart_cxc>, <&gsbi5_ahb_cxc>;
- clock-names = "core", "iface";
-};
+- A uartdm v1.4 device with dma capabilities.
+
+ serial@f991e000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0xf991e000 0x1000>;
+ interrupts = <0 108 0x0>;
+ clocks = <&blsp1_uart2_apps_cxc>, <&blsp1_ahb_cxc>;
+ clock-names = "core", "iface";
+ dmas = <&dma0 0>, <&dma0 1>;
+ dma-names = "tx", "rx";
+ };
+
+- A uartdm v1.3 device without dma capabilities and part of a GSBI complex.
+
+ serial@19c40000 {
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+ reg = <0x19c40000 0x1000>,
+ <0x19c00000 0x1000>;
+ interrupts = <0 195 0x0>;
+ clocks = <&gsbi5_uart_cxc>, <&gsbi5_ahb_cxc>;
+ clock-names = "core", "iface";
+ };
+
+- serialN alias.
+
+ aliases {
+ serial0 = &uarta;
+ serial1 = &uartc;
+ serial2 = &uartb;
+ };
+
+ uarta: serial@12490000 {
+ status = "ok";
+ };
+
+ uartb: serial@16340000 {
+ status = "ok";
+ };
+
+ uartc: serial@1a240000 {
+ status = "ok";
+ };
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index b3556609a06f..ae73bb0e9ad9 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -4,8 +4,7 @@ Required properties:
- compatible: Must contain one of the following:
- - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
- - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
+ - "renesas,scif-r7s72100" for R7S72100 (RZ/A1H) SCIF compatible UART.
- "renesas,scifa-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFA compatible UART.
- "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
- "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
@@ -20,6 +19,12 @@ Required properties:
- "renesas,scifa-r8a7791" for R8A7791 (R-Car M2) SCIFA compatible UART.
- "renesas,scifb-r8a7791" for R8A7791 (R-Car M2) SCIFB compatible UART.
- "renesas,hscif-r8a7791" for R8A7791 (R-Car M2) HSCIF compatible UART.
+ - "renesas,scif-r8a7794" for R8A7794 (R-Car E2) SCIF compatible UART.
+ - "renesas,scifa-r8a7794" for R8A7794 (R-Car E2) SCIFA compatible UART.
+ - "renesas,scifb-r8a7794" for R8A7794 (R-Car E2) SCIFB compatible UART.
+ - "renesas,hscif-r8a7794" for R8A7794 (R-Car E2) HSCIF compatible UART.
+ - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
+ - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
- "renesas,scif" for generic SCIF compatible UART.
- "renesas,scifa" for generic SCIFA compatible UART.
- "renesas,scifb" for generic SCIFB compatible UART.
diff --git a/Documentation/devicetree/bindings/serial/sirf-uart.txt b/Documentation/devicetree/bindings/serial/sirf-uart.txt
index a2dfc6522a91..3acdd969edf1 100644
--- a/Documentation/devicetree/bindings/serial/sirf-uart.txt
+++ b/Documentation/devicetree/bindings/serial/sirf-uart.txt
@@ -1,7 +1,9 @@
* CSR SiRFprimaII/atlasVI Universal Synchronous Asynchronous Receiver/Transmitter *
Required properties:
-- compatible : Should be "sirf,prima2-uart" or "sirf, prima2-usp-uart"
+- compatible : Should be "sirf,prima2-uart", "sirf, prima2-usp-uart",
+ "sirf,marco-uart" or "sirf,marco-bt-uart" which means
+ uart located in BT module and used for BT.
- reg : Offset and length of the register set for the device
- interrupts : Should contain uart interrupt
- fifosize : Should define hardware rx/tx fifo size
@@ -31,3 +33,15 @@ usp@b0090000 {
rts-gpios = <&gpio 15 0>;
cts-gpios = <&gpio 46 0>;
};
+
+for uart use in BT module,
+uart6: uart@11000000 {
+ cell-index = <6>;
+ compatible = "sirf,marco-bt-uart", "sirf,marco-uart";
+ reg = <0x11000000 0x1000>;
+ interrupts = <0 100 0>;
+ clocks = <&clks 138>, <&clks 140>, <&clks 141>;
+ clock-names = "uart", "general", "noc";
+ fifosize = <128>;
+ status = "disabled";
+}
diff --git a/Documentation/devicetree/bindings/soc/fsl/bman-portals.txt b/Documentation/devicetree/bindings/soc/fsl/bman-portals.txt
new file mode 100644
index 000000000000..2a00e14e11e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/bman-portals.txt
@@ -0,0 +1,56 @@
+QorIQ DPAA Buffer Manager Portals Device Tree Binding
+
+Copyright (C) 2008 - 2014 Freescale Semiconductor Inc.
+
+CONTENTS
+
+ - BMan Portal
+ - Example
+
+BMan Portal Node
+
+Portals are memory mapped interfaces to BMan that allow low-latency, lock-less
+interaction by software running on processor cores, accelerators and network
+interfaces with the BMan
+
+PROPERTIES
+
+- compatible
+ Usage: Required
+ Value type: <stringlist>
+ Definition: Must include "fsl,bman-portal-<hardware revision>"
+ May include "fsl,<SoC>-bman-portal" or "fsl,bman-portal"
+
+- reg
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: Two regions. The first is the cache-enabled region of
+ the portal. The second is the cache-inhibited region of
+ the portal
+
+- interrupts
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: Standard property
+
+EXAMPLE
+
+The example below shows a (P4080) BMan portals container/bus node with two portals
+
+ bman-portals@ff4000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0xf 0xf4000000 0x200000>;
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal-1.0.0", "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x100000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal-1.0.0", "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x101000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/fsl/bman.txt b/Documentation/devicetree/bindings/soc/fsl/bman.txt
new file mode 100644
index 000000000000..9f80bf8709ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/bman.txt
@@ -0,0 +1,125 @@
+QorIQ DPAA Buffer Manager Device Tree Bindings
+
+Copyright (C) 2008 - 2014 Freescale Semiconductor Inc.
+
+CONTENTS
+
+ - BMan Node
+ - BMan Private Memory Node
+ - Example
+
+BMan Node
+
+The Buffer Manager is part of the Data-Path Acceleration Architecture (DPAA).
+BMan supports hardware allocation and deallocation of buffers belonging to pools
+originally created by software with configurable depletion thresholds. This
+binding covers the CCSR space programming model
+
+PROPERTIES
+
+- compatible
+ Usage: Required
+ Value type: <stringlist>
+ Definition: Must include "fsl,bman"
+ May include "fsl,<SoC>-bman"
+
+- reg
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: Registers region within the CCSR address space
+
+The BMan revision information is located in the BMAN_IP_REV_1/2 registers which
+are located at offsets 0xbf8 and 0xbfc
+
+- interrupts
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: Standard property. The error interrupt
+
+- fsl,liodn
+ Usage: See pamu.txt
+ Value type: <prop-encoded-array>
+ Definition: PAMU property used for static LIODN assignment
+
+- fsl,iommu-parent
+ Usage: See pamu.txt
+ Value type: <phandle>
+ Definition: PAMU property used for dynamic LIODN assignment
+
+ For additional details about the PAMU/LIODN binding(s) see pamu.txt
+
+Devices connected to a BMan instance via Direct Connect Portals (DCP) must link
+to the respective BMan instance
+
+- fsl,bman
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Description: List of phandle and DCP index pairs, to the BMan instance
+ to which this device is connected via the DCP
+
+BMan Private Memory Node
+
+BMan requires a contiguous range of physical memory used for the backing store
+for BMan Free Buffer Proxy Records (FBPR). This memory is reserved/allocated as a
+node under the /reserved-memory node
+
+The BMan FBPR memory node must be named "bman-fbpr"
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must inclide "fsl,bman-fbpr"
+
+The following constraints are relevant to the FBPR private memory:
+ - The size must be 2^(size + 1), with size = 11..33. That is 4 KiB to
+ 16 GiB
+ - The alignment must be a muliptle of the memory size
+
+The size of the FBPR must be chosen by observing the hardware features configured
+via the Reset Configuration Word (RCW) and that are relevant to a specific board
+(e.g. number of MAC(s) pinned-out, number of offline/host command FMan ports,
+etc.). The size configured in the DT must reflect the hardware capabilities and
+not the specific needs of an application
+
+For additional details about reserved memory regions see reserved-memory.txt
+
+EXAMPLE
+
+The example below shows a BMan FBPR dynamic allocation memory node
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0xf 0xffffffff>;
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
+The example below shows a (P4080) BMan CCSR-space node
+
+ crypto@300000 {
+ ...
+ fsl,bman = <&bman, 2>;
+ ...
+ };
+
+ bman: bman@31a000 {
+ compatible = "fsl,bman";
+ reg = <0x31a000 0x1000>;
+ interrupts = <16 2 1 2>;
+ fsl,liodn = <0x17>;
+ memory-region = <&bman_fbpr>;
+ };
+
+ fman@400000 {
+ ...
+ fsl,bman = <&bman, 0>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/soc/fsl/qman-portals.txt b/Documentation/devicetree/bindings/soc/fsl/qman-portals.txt
new file mode 100644
index 000000000000..48c4dae5d6f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/qman-portals.txt
@@ -0,0 +1,154 @@
+QorIQ DPAA Queue Manager Portals Device Tree Binding
+
+Copyright (C) 2008 - 2014 Freescale Semiconductor Inc.
+
+CONTENTS
+
+ - QMan Portal
+ - QMan Pool Channel
+ - Example
+
+QMan Portal Node
+
+Portals are memory mapped interfaces to QMan that allow low-latency, lock-less
+interaction by software running on processor cores, accelerators and network
+interfaces with the QMan
+
+PROPERTIES
+
+- compatible
+ Usage: Required
+ Value type: <stringlist>
+ Definition: Must include "fsl,qman-portal-<hardware revision>"
+ May include "fsl,<SoC>-qman-portal" or "fsl,qman-portal"
+
+- reg
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: Two regions. The first is the cache-enabled region of
+ the portal. The second is the cache-inhibited region of
+ the portal
+
+- interrupts
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: Standard property
+
+- fsl,liodn
+ Usage: See pamu.txt
+ Value type: <prop-encoded-array>
+ Definition: Two LIODN(s). DQRR LIODN (DLIODN) and Frame LIODN
+ (FLIODN)
+
+- fsl,iommu-parent
+ Usage: See pamu.txt
+ Value type: <phandle>
+ Definition: PAMU property used for dynamic LIODN assignment
+
+ For additional details about the PAMU/LIODN binding(s) see pamu.txt
+
+- fsl,qman-channel-id
+ Usage: Required
+ Value type: <u32>
+ Definition: The hardware index of the channel. This can also be
+ determined by dividing any of the channel's 8 work queue
+ IDs by 8
+
+In addition to these properties the qman-portals should have sub-nodes to
+represent the HW devices/portals that are connected to the software portal
+described here
+
+The currently supported sub-nodes are:
+ * fman0
+ * fman1
+ * pme
+ * crypto
+
+These subnodes should have the following properties:
+
+- fsl,liodn
+ Usage: See pamu.txt
+ Value type: <prop-encoded-array>
+ Definition: PAMU property used for static LIODN assignment
+
+- fsl,iommu-parent
+ Usage: See pamu.txt
+ Value type: <phandle>
+ Definition: PAMU property used for dynamic LIODN assignment
+
+- dev-handle
+ Usage: Required
+ Value type: <phandle>
+ Definition: The phandle to the particular hardware device that this
+ portal is connected to.
+
+DPAA QMan Pool Channel Nodes
+
+Pool Channels are defined with the following properties.
+
+PROPERTIES
+
+- compatible
+ Usage: Required
+ Value type: <stringlist>
+ Definition: Must include "fsl,qman-pool-channel"
+ May include "fsl,<SoC>-qman-pool-channel"
+
+- fsl,qman-channel-id
+ Usage: Required
+ Value type: <u32>
+ Definition: The hardware index of the channel. This can also be
+ determined by dividing any of the channel's 8 work queue
+ IDs by 8
+
+EXAMPLE
+
+The example below shows a (P4080) QMan portals container/bus node with two portals
+
+ qman-portals@ff4200000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0xf 0xf4200000 0x200000>;
+
+ qman-portal@0 {
+ compatible = "fsl,qman-portal-1.2.0", "fsl,qman-portal";
+ reg = <0 0x4000>, <0x100000 0x1000>;
+ interrupts = <104 2 0 0>;
+ fsl,liodn = <1 2>;
+ fsl,qman-channel-id = <0>;
+
+ fman0 {
+ fsl,liodn = <0x21>;
+ dev-handle = <&fman0>;
+ };
+ fman1 {
+ fsl,liodn = <0xa1>;
+ dev-handle = <&fman1>;
+ };
+ crypto {
+ fsl,liodn = <0x41 0x66>;
+ dev-handle = <&crypto>;
+ };
+ };
+ qman-portal@4000 {
+ compatible = "fsl,qman-portal-1.2.0", "fsl,qman-portal";
+ reg = <0x4000 0x4000>, <0x101000 0x1000>;
+ interrupts = <106 2 0 0>;
+ fsl,liodn = <3 4>;
+ fsl,qman-channel-id = <1>;
+
+ fman0 {
+ fsl,liodn = <0x22>;
+ dev-handle = <&fman0>;
+ };
+ fman1 {
+ fsl,liodn = <0xa2>;
+ dev-handle = <&fman1>;
+ };
+ crypto {
+ fsl,liodn = <0x42 0x67>;
+ dev-handle = <&crypto>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/fsl/qman.txt b/Documentation/devicetree/bindings/soc/fsl/qman.txt
new file mode 100644
index 000000000000..063e3a0b9d04
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/qman.txt
@@ -0,0 +1,165 @@
+QorIQ DPAA Queue Manager Device Tree Binding
+
+Copyright (C) 2008 - 2014 Freescale Semiconductor Inc.
+
+CONTENTS
+
+ - QMan Node
+ - QMan Private Memory Nodes
+ - Example
+
+QMan Node
+
+The Queue Manager is part of the Data-Path Acceleration Architecture (DPAA). QMan
+supports queuing and QoS scheduling of frames to CPUs, network interfaces and
+DPAA logic modules, maintains packet ordering within flows. Besides providing
+flow-level queuing, is also responsible for congestion management functions such
+as RED/WRED, congestion notifications and tail discards. This binding covers the
+CCSR space programming model
+
+PROPERTIES
+
+- compatible
+ Usage: Required
+ Value type: <stringlist>
+ Definition: Must include "fsl,qman"
+ May include "fsl,<SoC>-qman"
+
+- reg
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: Registers region within the CCSR address space
+
+The QMan revision information is located in the QMAN_IP_REV_1/2 registers which
+are located at offsets 0xbf8 and 0xbfc
+
+- interrupts
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Definition: Standard property. The error interrupt
+
+- fsl,liodn
+ Usage: See pamu.txt
+ Value type: <prop-encoded-array>
+ Definition: PAMU property used for static LIODN assignment
+
+- fsl,iommu-parent
+ Usage: See pamu.txt
+ Value type: <phandle>
+ Definition: PAMU property used for dynamic LIODN assignment
+
+ For additional details about the PAMU/LIODN binding(s) see pamu.txt
+
+- clocks
+ Usage: See clock-bindings.txt and qoriq-clock.txt
+ Value type: <prop-encoded-array>
+ Definition: Reference input clock. Its frequency is half of the
+ platform clock
+
+Devices connected to a QMan instance via Direct Connect Portals (DCP) must link
+to the respective QMan instance
+
+- fsl,qman
+ Usage: Required
+ Value type: <prop-encoded-array>
+ Description: List of phandle and DCP index pairs, to the QMan instance
+ to which this device is connected via the DCP
+
+QMan Private Memory Nodes
+
+QMan requires two contiguous range of physical memory used for the backing store
+for QMan Frame Queue Descriptor (FQD) and Packed Frame Descriptor Record (PFDR).
+This memory is reserved/allocated as a nodes under the /reserved-memory node
+
+The QMan FQD memory node must be named "qman-fqd"
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must inclide "fsl,qman-fqd"
+
+The QMan PFDR memory node must be named "qman-pfdr"
+
+PROPERTIES
+
+- compatible
+ Usage: required
+ Value type: <stringlist>
+ Definition: Must inclide "fsl,qman-pfdr"
+
+The following constraints are relevant to the FQD and PFDR private memory:
+ - The size must be 2^(size + 1), with size = 11..29. That is 4 KiB to
+ 1 GiB
+ - The alignment must be a muliptle of the memory size
+
+The size of the FQD and PFDP must be chosen by observing the hardware features
+configured via the Reset Configuration Word (RCW) and that are relevant to a
+specific board (e.g. number of MAC(s) pinned-out, number of offline/host command
+FMan ports, etc.). The size configured in the DT must reflect the hardware
+capabilities and not the specific needs of an application
+
+For additional details about reserved memory regions see reserved-memory.txt
+
+EXAMPLE
+
+The example below shows a QMan FQD and a PFDR dynamic allocation memory nodes
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ qman_fqd: qman-fqd {
+ compatible = "fsl,qman-fqd";
+ alloc-ranges = <0 0 0xf 0xffffffff>;
+ size = <0 0x400000>;
+ alignment = <0 0x400000>;
+ };
+ qman_pfdr: qman-pfdr {
+ compatible = "fsl,qman-pfdr";
+ alloc-ranges = <0 0 0xf 0xffffffff>;
+ size = <0 0x2000000>;
+ alignment = <0 0x2000000>;
+ };
+ };
+
+The example below shows a (P4080) QMan CCSR-space node
+
+ clockgen: global-utilities@e1000 {
+ ...
+ sysclk: sysclk {
+ ...
+ };
+ ...
+ platform_pll: platform-pll@c00 {
+ #clock-cells = <1>;
+ reg = <0xc00 0x4>;
+ compatible = "fsl,qoriq-platform-pll-1.0";
+ clocks = <&sysclk>;
+ clock-output-names = "platform-pll", "platform-pll-div2";
+ };
+ ...
+ };
+
+ crypto@300000 {
+ ...
+ fsl,qman = <&qman, 2>;
+ ...
+ };
+
+ qman: qman@318000 {
+ compatible = "fsl,qman";
+ reg = <0x318000 0x1000>;
+ interrupts = <16 2 1 3>
+ fsl,liodn = <0x16>;
+ memory-region = <&qman_fqd &qman_pfdr>;
+ clocks = <&platform_pll 1>;
+ };
+
+ fman@400000 {
+ ...
+ fsl,qman = <&qman, 0>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/sound/arndale.txt b/Documentation/devicetree/bindings/sound/arndale.txt
new file mode 100644
index 000000000000..0e76946385ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/arndale.txt
@@ -0,0 +1,24 @@
+Audio Binding for Arndale boards
+
+Required properties:
+- compatible : Can be the following,
+ "samsung,arndale-rt5631"
+
+- samsung,audio-cpu: The phandle of the Samsung I2S controller
+- samsung,audio-codec: The phandle of the audio codec
+
+Optional:
+- samsung,model: The name of the sound-card
+
+Arndale Boards has many audio daughter cards, one of them is
+rt5631/alc5631. Below example shows audio bindings for rt5631/
+alc5631 based codec.
+
+Example:
+
+sound {
+ compatible = "samsung,arndale-rt5631";
+
+ samsung,audio-cpu = <&i2s0>
+ samsung,audio-codec = <&rt5631>;
+};
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
index 60ca07996458..46bc9829c71a 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
@@ -32,7 +32,7 @@ Optional properties:
- rx-num-evt : FIFO levels.
- sram-size-playback : size of sram to be allocated during playback
- sram-size-capture : size of sram to be allocated during capture
-- interrupts : Interrupt numbers for McASP, currently not used by the driver
+- interrupts : Interrupt numbers for McASP
- interrupt-names : Known interrupt names are "tx" and "rx"
- pinctrl-0: Should specify pin control group used for this controller.
- pinctrl-names: Should contain only one value - "default", for more details
diff --git a/Documentation/devicetree/bindings/sound/eukrea-tlv320.txt b/Documentation/devicetree/bindings/sound/eukrea-tlv320.txt
index 0d7985c864af..6dfa88c4dc1e 100644
--- a/Documentation/devicetree/bindings/sound/eukrea-tlv320.txt
+++ b/Documentation/devicetree/bindings/sound/eukrea-tlv320.txt
@@ -1,11 +1,16 @@
Audio complex for Eukrea boards with tlv320aic23 codec.
Required properties:
-- compatible : "eukrea,asoc-tlv320"
-- eukrea,model : The user-visible name of this sound complex.
-- ssi-controller : The phandle of the SSI controller.
-- fsl,mux-int-port : The internal port of the i.MX audio muxer (AUDMUX).
-- fsl,mux-ext-port : The external port of the i.MX audio muxer.
+
+ - compatible : "eukrea,asoc-tlv320"
+
+ - eukrea,model : The user-visible name of this sound complex.
+
+ - ssi-controller : The phandle of the SSI controller.
+
+ - fsl,mux-int-port : The internal port of the i.MX audio muxer (AUDMUX).
+
+ - fsl,mux-ext-port : The external port of the i.MX audio muxer.
Note: The AUDMUX port numbering should start at 1, which is consistent with
hardware manual.
diff --git a/Documentation/devicetree/bindings/sound/fsl,esai.txt b/Documentation/devicetree/bindings/sound/fsl,esai.txt
index 52f5b6bf3e8e..d3b6b5f48010 100644
--- a/Documentation/devicetree/bindings/sound/fsl,esai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,esai.txt
@@ -7,37 +7,39 @@ other DSPs. It has up to six transmitters and four receivers.
Required properties:
- - compatible : Compatible list, must contain "fsl,imx35-esai" or
- "fsl,vf610-esai"
+ - compatible : Compatible list, must contain "fsl,imx35-esai" or
+ "fsl,vf610-esai"
- - reg : Offset and length of the register set for the device.
+ - reg : Offset and length of the register set for the device.
- - interrupts : Contains the spdif interrupt.
+ - interrupts : Contains the spdif interrupt.
- - dmas : Generic dma devicetree binding as described in
- Documentation/devicetree/bindings/dma/dma.txt.
+ - dmas : Generic dma devicetree binding as described in
+ Documentation/devicetree/bindings/dma/dma.txt.
- - dma-names : Two dmas have to be defined, "tx" and "rx".
+ - dma-names : Two dmas have to be defined, "tx" and "rx".
- - clocks: Contains an entry for each entry in clock-names.
+ - clocks : Contains an entry for each entry in clock-names.
- - clock-names : Includes the following entries:
- "core" The core clock used to access registers
- "extal" The esai baud clock for esai controller used to derive
- HCK, SCK and FS.
- "fsys" The system clock derived from ahb clock used to derive
- HCK, SCK and FS.
+ - clock-names : Includes the following entries:
+ "core" The core clock used to access registers
+ "extal" The esai baud clock for esai controller used to
+ derive HCK, SCK and FS.
+ "fsys" The system clock derived from ahb clock used to
+ derive HCK, SCK and FS.
- - fsl,fifo-depth: The number of elements in the transmit and receive FIFOs.
- This number is the maximum allowed value for TFCR[TFWM] or RFCR[RFWM].
+ - fsl,fifo-depth : The number of elements in the transmit and receive
+ FIFOs. This number is the maximum allowed value for
+ TFCR[TFWM] or RFCR[RFWM].
- fsl,esai-synchronous: This is a boolean property. If present, indicating
- that ESAI would work in the synchronous mode, which means all the settings
- for Receiving would be duplicated from Transmition related registers.
+ that ESAI would work in the synchronous mode, which
+ means all the settings for Receiving would be
+ duplicated from Transmition related registers.
- - big-endian : If this property is absent, the native endian mode will
- be in use as default, or the big endian mode will be in use for all the
- device registers.
+ - big-endian : If this property is absent, the native endian mode
+ will be in use as default, or the big endian mode
+ will be in use for all the device registers.
Example:
diff --git a/Documentation/devicetree/bindings/sound/fsl,spdif.txt b/Documentation/devicetree/bindings/sound/fsl,spdif.txt
index 3e9e82c8eab3..b5ee32ee3706 100644
--- a/Documentation/devicetree/bindings/sound/fsl,spdif.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,spdif.txt
@@ -6,32 +6,31 @@ a fibre cable.
Required properties:
- - compatible : Compatible list, must contain "fsl,imx35-spdif".
+ - compatible : Compatible list, must contain "fsl,imx35-spdif".
- - reg : Offset and length of the register set for the device.
+ - reg : Offset and length of the register set for the device.
- - interrupts : Contains the spdif interrupt.
+ - interrupts : Contains the spdif interrupt.
- - dmas : Generic dma devicetree binding as described in
- Documentation/devicetree/bindings/dma/dma.txt.
+ - dmas : Generic dma devicetree binding as described in
+ Documentation/devicetree/bindings/dma/dma.txt.
- - dma-names : Two dmas have to be defined, "tx" and "rx".
+ - dma-names : Two dmas have to be defined, "tx" and "rx".
- - clocks : Contains an entry for each entry in clock-names.
+ - clocks : Contains an entry for each entry in clock-names.
- - clock-names : Includes the following entries:
- "core" The core clock of spdif controller
- "rxtx<0-7>" Clock source list for tx and rx clock.
- This clock list should be identical to
- the source list connecting to the spdif
- clock mux in "SPDIF Transceiver Clock
- Diagram" of SoC reference manual. It
- can also be referred to TxClk_Source
- bit of register SPDIF_STC.
+ - clock-names : Includes the following entries:
+ "core" The core clock of spdif controller.
+ "rxtx<0-7>" Clock source list for tx and rx clock.
+ This clock list should be identical to the source
+ list connecting to the spdif clock mux in "SPDIF
+ Transceiver Clock Diagram" of SoC reference manual.
+ It can also be referred to TxClk_Source bit of
+ register SPDIF_STC.
- - big-endian : If this property is absent, the native endian mode will
- be in use as default, or the big endian mode will be in use for all the
- device registers.
+ - big-endian : If this property is absent, the native endian mode
+ will be in use as default, or the big endian mode
+ will be in use for all the device registers.
Example:
diff --git a/Documentation/devicetree/bindings/sound/fsl-sai.txt b/Documentation/devicetree/bindings/sound/fsl-sai.txt
index 4956b14d4b06..044e5d76e2dd 100644
--- a/Documentation/devicetree/bindings/sound/fsl-sai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-sai.txt
@@ -5,32 +5,48 @@ which provides a synchronous audio interface that supports fullduplex
serial interfaces with frame synchronization such as I2S, AC97, TDM, and
codec/DSP interfaces.
-
Required properties:
-- compatible: Compatible list, contains "fsl,vf610-sai" or "fsl,imx6sx-sai".
-- reg: Offset and length of the register set for the device.
-- clocks: Must contain an entry for each entry in clock-names.
-- clock-names : Must include the "bus" for register access and "mclk1" "mclk2"
- "mclk3" for bit clock and frame clock providing.
-- dmas : Generic dma devicetree binding as described in
- Documentation/devicetree/bindings/dma/dma.txt.
-- dma-names : Two dmas have to be defined, "tx" and "rx".
-- pinctrl-names: Must contain a "default" entry.
-- pinctrl-NNN: One property must exist for each entry in pinctrl-names.
- See ../pinctrl/pinctrl-bindings.txt for details of the property values.
-- big-endian: Boolean property, required if all the FTM_PWM registers
- are big-endian rather than little-endian.
-- lsb-first: Configures whether the LSB or the MSB is transmitted first for
- the fifo data. If this property is absent, the MSB is transmitted first as
- default, or the LSB is transmitted first.
-- fsl,sai-synchronous-rx: This is a boolean property. If present, indicating
- that SAI will work in the synchronous mode (sync Tx with Rx) which means
- both the transimitter and receiver will send and receive data by following
- receiver's bit clocks and frame sync clocks.
-- fsl,sai-asynchronous: This is a boolean property. If present, indicating
- that SAI will work in the asynchronous mode, which means both transimitter
- and receiver will send and receive data by following their own bit clocks
- and frame sync clocks separately.
+
+ - compatible : Compatible list, contains "fsl,vf610-sai" or
+ "fsl,imx6sx-sai".
+
+ - reg : Offset and length of the register set for the device.
+
+ - clocks : Must contain an entry for each entry in clock-names.
+
+ - clock-names : Must include the "bus" for register access and
+ "mclk1", "mclk2", "mclk3" for bit clock and frame
+ clock providing.
+ - dmas : Generic dma devicetree binding as described in
+ Documentation/devicetree/bindings/dma/dma.txt.
+
+ - dma-names : Two dmas have to be defined, "tx" and "rx".
+
+ - pinctrl-names : Must contain a "default" entry.
+
+ - pinctrl-NNN : One property must exist for each entry in
+ pinctrl-names. See ../pinctrl/pinctrl-bindings.txt
+ for details of the property values.
+
+ - big-endian : Boolean property, required if all the FTM_PWM
+ registers are big-endian rather than little-endian.
+
+ - lsb-first : Configures whether the LSB or the MSB is transmitted
+ first for the fifo data. If this property is absent,
+ the MSB is transmitted first as default, or the LSB
+ is transmitted first.
+
+ - fsl,sai-synchronous-rx: This is a boolean property. If present, indicating
+ that SAI will work in the synchronous mode (sync Tx
+ with Rx) which means both the transimitter and the
+ receiver will send and receive data by following
+ receiver's bit clocks and frame sync clocks.
+
+ - fsl,sai-asynchronous: This is a boolean property. If present, indicating
+ that SAI will work in the asynchronous mode, which
+ means both transimitter and receiver will send and
+ receive data by following their own bit clocks and
+ frame sync clocks separately.
Note:
- If both fsl,sai-asynchronous and fsl,sai-synchronous-rx are absent, the
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt b/Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt
index e4acdd891e49..2f89db88fd57 100644
--- a/Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt
@@ -1,33 +1,40 @@
Freescale i.MX audio complex with SGTL5000 codec
Required properties:
-- compatible : "fsl,imx-audio-sgtl5000"
-- model : The user-visible name of this sound complex
-- ssi-controller : The phandle of the i.MX SSI controller
-- audio-codec : The phandle of the SGTL5000 audio codec
-- audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names could be power
- supplies, SGTL5000 pins, and the jacks on the board:
-
- Power supplies:
- * Mic Bias
-
- SGTL5000 pins:
- * MIC_IN
- * LINE_IN
- * HP_OUT
- * LINE_OUT
-
- Board connectors:
- * Mic Jack
- * Line In Jack
- * Headphone Jack
- * Line Out Jack
- * Ext Spk
-
-- mux-int-port : The internal port of the i.MX audio muxer (AUDMUX)
-- mux-ext-port : The external port of the i.MX audio muxer
+
+ - compatible : "fsl,imx-audio-sgtl5000"
+
+ - model : The user-visible name of this sound complex
+
+ - ssi-controller : The phandle of the i.MX SSI controller
+
+ - audio-codec : The phandle of the SGTL5000 audio codec
+
+ - audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, SGTL5000
+ pins, and the jacks on the board:
+
+ Power supplies:
+ * Mic Bias
+
+ SGTL5000 pins:
+ * MIC_IN
+ * LINE_IN
+ * HP_OUT
+ * LINE_OUT
+
+ Board connectors:
+ * Mic Jack
+ * Line In Jack
+ * Headphone Jack
+ * Line Out Jack
+ * Ext Spk
+
+ - mux-int-port : The internal port of the i.MX audio muxer (AUDMUX)
+
+ - mux-ext-port : The external port of the i.MX audio muxer
Note: The AUDMUX port numbering should start at 1, which is consistent with
hardware manual.
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt b/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt
index 7d13479f9c3c..da84a442ccea 100644
--- a/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt
+++ b/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt
@@ -2,23 +2,25 @@ Freescale i.MX audio complex with S/PDIF transceiver
Required properties:
- - compatible : "fsl,imx-audio-spdif"
+ - compatible : "fsl,imx-audio-spdif"
- - model : The user-visible name of this sound complex
+ - model : The user-visible name of this sound complex
- - spdif-controller : The phandle of the i.MX S/PDIF controller
+ - spdif-controller : The phandle of the i.MX S/PDIF controller
Optional properties:
- - spdif-out : This is a boolean property. If present, the transmitting
- function of S/PDIF will be enabled, indicating there's a physical
- S/PDIF out connector/jack on the board or it's connecting to some
- other IP block, such as an HDMI encoder/display-controller.
+ - spdif-out : This is a boolean property. If present, the
+ transmitting function of S/PDIF will be enabled,
+ indicating there's a physical S/PDIF out connector
+ or jack on the board or it's connecting to some
+ other IP block, such as an HDMI encoder or
+ display-controller.
- - spdif-in : This is a boolean property. If present, the receiving
- function of S/PDIF will be enabled, indicating there's a physical
- S/PDIF in connector/jack on the board.
+ - spdif-in : This is a boolean property. If present, the receiving
+ function of S/PDIF will be enabled, indicating there
+ is a physical S/PDIF in connector/jack on the board.
* Note: At least one of these two properties should be set in the DT binding.
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt b/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt
index f49450a87890..acea71bee34f 100644
--- a/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt
+++ b/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt
@@ -1,25 +1,32 @@
Freescale i.MX audio complex with WM8962 codec
Required properties:
-- compatible : "fsl,imx-audio-wm8962"
-- model : The user-visible name of this sound complex
-- ssi-controller : The phandle of the i.MX SSI controller
-- audio-codec : The phandle of the WM8962 audio codec
-- audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names could be power
- supplies, WM8962 pins, and the jacks on the board:
-
- Power supplies:
- * Mic Bias
-
- Board connectors:
- * Mic Jack
- * Headphone Jack
- * Ext Spk
-
-- mux-int-port : The internal port of the i.MX audio muxer (AUDMUX)
-- mux-ext-port : The external port of the i.MX audio muxer
+
+ - compatible : "fsl,imx-audio-wm8962"
+
+ - model : The user-visible name of this sound complex
+
+ - ssi-controller : The phandle of the i.MX SSI controller
+
+ - audio-codec : The phandle of the WM8962 audio codec
+
+ - audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, WM8962
+ pins, and the jacks on the board:
+
+ Power supplies:
+ * Mic Bias
+
+ Board connectors:
+ * Mic Jack
+ * Headphone Jack
+ * Ext Spk
+
+ - mux-int-port : The internal port of the i.MX audio muxer (AUDMUX)
+
+ - mux-ext-port : The external port of the i.MX audio muxer
Note: The AUDMUX port numbering should start at 1, which is consistent with
hardware manual.
diff --git a/Documentation/devicetree/bindings/sound/imx-audmux.txt b/Documentation/devicetree/bindings/sound/imx-audmux.txt
index f88a00e54c63..b30a737e209e 100644
--- a/Documentation/devicetree/bindings/sound/imx-audmux.txt
+++ b/Documentation/devicetree/bindings/sound/imx-audmux.txt
@@ -1,18 +1,24 @@
Freescale Digital Audio Mux (AUDMUX) device
Required properties:
-- compatible : "fsl,imx21-audmux" for AUDMUX version firstly used on i.MX21,
- or "fsl,imx31-audmux" for the version firstly used on i.MX31.
-- reg : Should contain AUDMUX registers location and length
+
+ - compatible : "fsl,imx21-audmux" for AUDMUX version firstly used
+ on i.MX21, or "fsl,imx31-audmux" for the version
+ firstly used on i.MX31.
+
+ - reg : Should contain AUDMUX registers location and length.
An initial configuration can be setup using child nodes.
Required properties of optional child nodes:
-- fsl,audmux-port : Integer of the audmux port that is configured by this
- child node.
-- fsl,port-config : List of configuration options for the specific port. For
- imx31-audmux and above, it is a list of tuples <ptcr pdcr>. For
- imx21-audmux it is a list of pcr values.
+
+ - fsl,audmux-port : Integer of the audmux port that is configured by this
+ child node.
+
+ - fsl,port-config : List of configuration options for the specific port.
+ For imx31-audmux and above, it is a list of tuples
+ <ptcr pdcr>. For imx21-audmux it is a list of pcr
+ values.
Example:
diff --git a/Documentation/devicetree/bindings/sound/max98090.txt b/Documentation/devicetree/bindings/sound/max98090.txt
index c454e67f54bb..aa802a274520 100644
--- a/Documentation/devicetree/bindings/sound/max98090.txt
+++ b/Documentation/devicetree/bindings/sound/max98090.txt
@@ -16,6 +16,8 @@ Optional properties:
- clock-names: Should be "mclk"
+- maxim,dmic-freq: Frequency at which to clock DMIC
+
Pins on the device (for linking into audio routes):
* MIC1
diff --git a/Documentation/devicetree/bindings/sound/renesas,fsi.txt b/Documentation/devicetree/bindings/sound/renesas,fsi.txt
index c5be003f413e..0d0ab51105b0 100644
--- a/Documentation/devicetree/bindings/sound/renesas,fsi.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,fsi.txt
@@ -1,11 +1,16 @@
Renesas FSI
Required properties:
-- compatible : "renesas,sh_fsi2" or "renesas,sh_fsi"
+- compatible : "renesas,fsi2-<soctype>",
+ "renesas,sh_fsi2" or "renesas,sh_fsi" as
+ fallback.
+ Examples with soctypes are:
+ - "renesas,fsi2-r8a7740" (R-Mobile A1)
+ - "renesas,fsi2-sh73a0" (SH-Mobile AG5)
- reg : Should contain the register physical address and length
- interrupts : Should contain FSI interrupt
-- fsia,spdif-connection : FSI is connected by S/PDFI
+- fsia,spdif-connection : FSI is connected by S/PDIF
- fsia,stream-mode-support : FSI supports 16bit stream mode.
- fsia,use-internal-clock : FSI uses internal clock when master mode.
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index aa697abf337e..2dd690bc19cc 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -1,8 +1,12 @@
Renesas R-Car sound
Required properties:
-- compatible : "renesas,rcar_sound-gen1" if generation1
+- compatible : "renesas,rcar_sound-<soctype>", fallbacks
+ "renesas,rcar_sound-gen1" if generation1, and
"renesas,rcar_sound-gen2" if generation2
+ Examples with soctypes are:
+ - "renesas,rcar_sound-r8a7790" (R-Car H2)
+ - "renesas,rcar_sound-r8a7791" (R-Car M2-W)
- reg : Should contain the register physical address.
required register is
SRU/ADG/SSI if generation1
@@ -35,9 +39,9 @@ DAI subnode properties:
Example:
-rcar_sound: rcar_sound@0xffd90000 {
+rcar_sound: rcar_sound@ec500000 {
#sound-dai-cells = <1>;
- compatible = "renesas,rcar_sound-gen2";
+ compatible = "renesas,rcar_sound-r8a7791", "renesas,rcar_sound-gen2";
reg = <0 0xec500000 0 0x1000>, /* SCU */
<0 0xec5a0000 0 0x100>, /* ADG */
<0 0xec540000 0 0x1000>, /* SSIU */
diff --git a/Documentation/devicetree/bindings/sound/rt5631.txt b/Documentation/devicetree/bindings/sound/rt5631.txt
new file mode 100644
index 000000000000..92b986ca337b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5631.txt
@@ -0,0 +1,48 @@
+ALC5631/RT5631 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+ - compatible : "realtek,alc5631" or "realtek,rt5631"
+
+ - reg : the I2C address of the device.
+
+Pins on the device (for linking into audio routes):
+
+ * SPK_OUT_R_P
+ * SPK_OUT_R_N
+ * SPK_OUT_L_P
+ * SPK_OUT_L_N
+ * HP_OUT_L
+ * HP_OUT_R
+ * AUX_OUT2_LP
+ * AUX_OUT2_RN
+ * AUX_OUT1_LP
+ * AUX_OUT1_RN
+ * AUX_IN_L_JD
+ * AUX_IN_R_JD
+ * MONO_IN_P
+ * MONO_IN_N
+ * MIC1_P
+ * MIC1_N
+ * MIC2_P
+ * MIC2_N
+ * MONO_OUT_P
+ * MONO_OUT_N
+ * MICBIAS1
+ * MICBIAS2
+
+Example:
+
+alc5631: alc5631@1a {
+ compatible = "realtek,alc5631";
+ reg = <0x1a>;
+};
+
+or
+
+rt5631: rt5631@1a {
+ compatible = "realtek,rt5631";
+ reg = <0x1a>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rt5677.txt b/Documentation/devicetree/bindings/sound/rt5677.txt
index 0701b834fc73..740ff771aa8b 100644
--- a/Documentation/devicetree/bindings/sound/rt5677.txt
+++ b/Documentation/devicetree/bindings/sound/rt5677.txt
@@ -27,6 +27,21 @@ Optional properties:
Boolean. Indicate MIC1/2 input and LOUT1/2/3 outputs are differential,
rather than single-ended.
+- realtek,gpio-config
+ Array of six 8bit elements that configures GPIO.
+ 0 - floating (reset value)
+ 1 - pull down
+ 2 - pull up
+
+- realtek,jd1-gpio
+ Configures GPIO Mic Jack detection 1.
+ Select 0 ~ 3 as OFF, GPIO1, GPIO2 and GPIO3 respectively.
+
+- realtek,jd2-gpio
+- realtek,jd3-gpio
+ Configures GPIO Mic Jack detection 2 and 3.
+ Select 0 ~ 3 as OFF, GPIO4, GPIO5 and GPIO6 respectively.
+
Pins on the device (for linking into audio routes):
* IN1P
@@ -56,4 +71,6 @@ rt5677 {
realtek,pow-ldo2-gpio =
<&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
realtek,in1-differential = "true";
+ realtek,gpio-config = /bits/ 8 <0 0 0 0 0 2>; /* pull up GPIO6 */
+ realtek,jd2-gpio = <3>; /* Enables Jack detection for GPIO6 */
};
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.txt b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
index 7386d444ada1..d188296bb6ec 100644
--- a/Documentation/devicetree/bindings/sound/samsung-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
@@ -6,10 +6,17 @@ Required SoC Specific Properties:
- samsung,s3c6410-i2s: for 8/16/24bit stereo I2S.
- samsung,s5pv210-i2s: for 8/16/24bit multichannel(5.1) I2S with
secondary fifo, s/w reset control and internal mux for root clk src.
- - samsung,exynos5420-i2s: for 8/16/24bit multichannel(7.1) I2S with
- secondary fifo, s/w reset control, internal mux for root clk src and
- TDM support. TDM (Time division multiplexing) is to allow transfer of
- multiple channel audio data on single data line.
+ - samsung,exynos5420-i2s: for 8/16/24bit multichannel(5.1) I2S for
+ playback, sterio channel capture, secondary fifo using internal
+ or external dma, s/w reset control, internal mux for root clk src
+ and 7.1 channel TDM support for playback. TDM (Time division multiplexing)
+ is to allow transfer of multiple channel audio data on single data line.
+ - samsung,exynos7-i2s: with all the available features of exynos5 i2s,
+ exynos7 I2S has 7.1 channel TDM support for capture, secondary fifo
+ with only external dma and more no.of root clk sampling frequencies.
+ - samsung,exynos7-i2s1: I2S1 on previous samsung platforms supports
+ stereo channels. exynos7 i2s1 upgraded to 5.1 multichannel with
+ slightly modified bit offsets.
- reg: physical base address of the controller and length of memory mapped
region.
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index d556dcb8816b..0e5e4eb3ef1b 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -7,6 +7,17 @@ Required properties:
- clocks : the clock provider of SYS_MCLK
+- micbias-resistor-k-ohms : the bias resistor to be used in kOmhs
+ The resistor can take values of 2k, 4k or 8k.
+ If set to 0 it will be off.
+ If this node is not mentioned or if the value is unknown, then
+ micbias resistor is set to 4K.
+
+- micbias-voltage-m-volts : the bias voltage to be used in mVolts
+ The voltage can take values from 1.25V to 3V by 250mV steps
+ If this node is not mentionned or the value is unknown, then
+ the value is set to 1.25V.
+
- VDDA-supply : the regulator provider of VDDA
- VDDIO-supply: the regulator provider of VDDIO
@@ -21,6 +32,8 @@ codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
clocks = <&clks 150>;
+ micbias-resistor-k-ohms = <2>;
+ micbias-voltage-m-volts = <2250>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
};
diff --git a/Documentation/devicetree/bindings/sound/ts3a227e.txt b/Documentation/devicetree/bindings/sound/ts3a227e.txt
new file mode 100644
index 000000000000..e8bf23eb1803
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ts3a227e.txt
@@ -0,0 +1,26 @@
+Texas Instruments TS3A227E
+Autonomous Audio Accessory Detection and Configuration Switch
+
+The TS3A227E detect headsets of 3-ring and 4-ring standards and
+switches automatically to route the microphone correctly. It also
+handles key press detection in accordance with the Android audio
+headset specification v1.0.
+
+Required properties:
+
+ - compatible: Should contain "ti,ts3a227e".
+ - reg: The i2c address. Should contain <0x3b>.
+ - interrupt-parent: The parent interrupt controller
+ - interrupts: Interrupt number for /INT pin from the 227e
+
+
+Examples:
+
+ i2c {
+ ts3a227e@3b {
+ compatible = "ti,ts3a227e";
+ reg = <0x3b>;
+ interrupt-parent = <&gpio>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/wm8960.txt b/Documentation/devicetree/bindings/sound/wm8960.txt
new file mode 100644
index 000000000000..2deb8a3da9c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wm8960.txt
@@ -0,0 +1,31 @@
+WM8960 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+ - compatible : "wlf,wm8960"
+
+ - reg : the I2C address of the device.
+
+Optional properties:
+ - wlf,shared-lrclk: This is a boolean property. If present, the LRCM bit of
+ R24 (Additional control 2) gets set, indicating that ADCLRC and DACLRC pins
+ will be disabled only when ADC (Left and Right) and DAC (Left and Right)
+ are disabled.
+ When wm8960 works on synchronize mode and DACLRC pin is used to supply
+ frame clock, it will no frame clock for captrue unless enable DAC to enable
+ DACLRC pin. If shared-lrclk is present, no need to enable DAC for captrue.
+
+ - wlf,capless: This is a boolean property. If present, OUT3 pin will be
+ enabled and disabled together with HP_L and HP_R pins in response to jack
+ detect events.
+
+Example:
+
+codec: wm8960@1a {
+ compatible = "wlf,wm8960";
+ reg = <0x1a>;
+
+ wlf,shared-lrclk;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-gpio.txt b/Documentation/devicetree/bindings/spi/spi-gpio.txt
index 8a824be15754..a95603bcf6ff 100644
--- a/Documentation/devicetree/bindings/spi/spi-gpio.txt
+++ b/Documentation/devicetree/bindings/spi/spi-gpio.txt
@@ -8,8 +8,10 @@ Required properties:
- gpio-sck: GPIO spec for the SCK line to use
- gpio-miso: GPIO spec for the MISO line to use
- gpio-mosi: GPIO spec for the MOSI line to use
- - cs-gpios: GPIOs to use for chipselect lines
- - num-chipselects: number of chipselect lines
+ - cs-gpios: GPIOs to use for chipselect lines.
+ Not needed if num-chipselects = <0>.
+ - num-chipselects: Number of chipselect lines. Should be <0> if a single device
+ with no chip select is connected.
Example:
diff --git a/Documentation/devicetree/bindings/spi/spi-img-spfi.txt b/Documentation/devicetree/bindings/spi/spi-img-spfi.txt
new file mode 100644
index 000000000000..c7dd50fb8eb2
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-img-spfi.txt
@@ -0,0 +1,37 @@
+IMG Synchronous Peripheral Flash Interface (SPFI) controller
+
+Required properties:
+- compatible: Must be "img,spfi".
+- reg: Must contain the base address and length of the SPFI registers.
+- interrupts: Must contain the SPFI interrupt.
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clock/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - spfi: SPI operating clock
+ - sys: SPI system interface clock
+- dmas: Must contain an entry for each entry in dma-names.
+ See ../dma/dma.txt for details.
+- dma-names: Must include the following entries:
+ - rx
+ - tx
+- #address-cells: Must be 1.
+- #size-cells: Must be 0.
+
+Optional properties:
+- img,supports-quad-mode: Should be set if the interface supports quad mode
+ SPI transfers.
+
+Example:
+
+spi@18100f00 {
+ compatible = "img,spfi";
+ reg = <0x18100f00 0x100>;
+ interrupts = <GIC_SHARED 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&spi_clk>, <&system_clk>;
+ clock-names = "spfi", "sys";
+ dmas = <&mdc 9 0xffffffff 0>, <&mdc 10 0xffffffff 0>;
+ dma-names = "rx", "tx";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-meson.txt b/Documentation/devicetree/bindings/spi/spi-meson.txt
new file mode 100644
index 000000000000..bb52a86f3365
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-meson.txt
@@ -0,0 +1,22 @@
+Amlogic Meson SPI controllers
+
+* SPIFC (SPI Flash Controller)
+
+The Meson SPIFC is a controller optimized for communication with SPI
+NOR memories, without DMA support and a 64-byte unified transmit /
+receive buffer.
+
+Required properties:
+ - compatible: should be "amlogic,meson6-spifc"
+ - reg: physical base address and length of the controller registers
+ - clocks: phandle of the input clock for the baud rate generator
+ - #address-cells: should be 1
+ - #size-cells: should be 0
+
+ spi@c1108c80 {
+ compatible = "amlogic,meson6-spifc";
+ reg = <0xc1108c80 0x80>;
+ clocks = <&clk81>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-samsung.txt b/Documentation/devicetree/bindings/spi/spi-samsung.txt
index 1e8a8578148f..6dbdeb3c361a 100644
--- a/Documentation/devicetree/bindings/spi/spi-samsung.txt
+++ b/Documentation/devicetree/bindings/spi/spi-samsung.txt
@@ -9,7 +9,7 @@ Required SoC Specific Properties:
- samsung,s3c2443-spi: for s3c2443, s3c2416 and s3c2450 platforms
- samsung,s3c6410-spi: for s3c6410 platforms
- samsung,s5pv210-spi: for s5pv210 and s5pc110 platforms
- - samsung,exynos4210-spi: for exynos4 and exynos5 platforms
+ - samsung,exynos7-spi: for exynos7 platforms
- reg: physical base address of the controller and length of memory mapped
region.
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
index 4cf024929a3f..4698e0edc205 100644
--- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -5,17 +5,9 @@ Required properties:
- compatible: Should be set to one of the following:
marvell,armada370-thermal
marvell,armada375-thermal
- marvell,armada375-z1-thermal
marvell,armada380-thermal
marvell,armadaxp-thermal
- Note: As the name suggests, "marvell,armada375-z1-thermal"
- applies for the SoC Z1 stepping only. On such stepping
- some quirks need to be done and the register offset differs
- from the one in the A0 stepping.
- The operating system may auto-detect the SoC stepping and
- update the compatible and register offsets at runtime.
-
- reg: Device's register space.
Two entries are expected, see the examples below.
The first one is required for the sensor register;
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
new file mode 100644
index 000000000000..ef802de4957a
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
@@ -0,0 +1,68 @@
+* Temperature Sensor ADC (TSADC) on rockchip SoCs
+
+Required properties:
+- compatible : "rockchip,rk3288-tsadc"
+- reg : physical base address of the controller and length of memory mapped
+ region.
+- interrupts : The interrupt number to the cpu. The interrupt specifier format
+ depends on the interrupt controller.
+- clocks : Must contain an entry for each entry in clock-names.
+- clock-names : Shall be "tsadc" for the converter-clock, and "apb_pclk" for
+ the peripheral clock.
+- resets : Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names : Must include the name "tsadc-apb".
+- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
+- rockchip,hw-tshut-temp : The hardware-controlled shutdown temperature value.
+- rockchip,hw-tshut-mode : The hardware-controlled shutdown mode 0:CRU 1:GPIO.
+- rockchip,hw-tshut-polarity : The hardware-controlled active polarity 0:LOW
+ 1:HIGH.
+
+Exiample:
+tsadc: tsadc@ff280000 {
+ compatible = "rockchip,rk3288-tsadc";
+ reg = <0xff280000 0x100>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+ clock-names = "tsadc", "apb_pclk";
+ resets = <&cru SRST_TSADC>;
+ reset-names = "tsadc-apb";
+ pinctrl-names = "default";
+ pinctrl-0 = <&otp_out>;
+ #thermal-sensor-cells = <1>;
+ rockchip,hw-tshut-temp = <95000>;
+ rockchip,hw-tshut-mode = <0>;
+ rockchip,hw-tshut-polarity = <0>;
+};
+
+Example: referring to thermal sensors:
+thermal-zones {
+ cpu_thermal: cpu_thermal {
+ polling-delay-passive = <1000>; /* milliseconds */
+ polling-delay = <5000>; /* milliseconds */
+
+ /* sensor ID */
+ thermal-sensors = <&tsadc 1>;
+
+ trips {
+ cpu_alert0: cpu_alert {
+ temperature = <70000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu_crit: cpu_crit {
+ temperature = <90000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt b/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
new file mode 100644
index 000000000000..ecf3ed76cd46
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
@@ -0,0 +1,53 @@
+Tegra124 SOCTHERM thermal management system
+
+The SOCTHERM IP block contains thermal sensors, support for polled
+or interrupt-based thermal monitoring, CPU and GPU throttling based
+on temperature trip points, and handling external overcurrent
+notifications. It is also used to manage emergency shutdown in an
+overheating situation.
+
+Required properties :
+- compatible : "nvidia,tegra124-soctherm".
+- reg : Should contain 1 entry:
+ - SOCTHERM register set
+- interrupts : Defines the interrupt used by SOCTHERM
+- clocks : Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entries:
+ - tsensor
+ - soctherm
+- resets : Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names : Must include the following entries:
+ - soctherm
+- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description
+ of this property. See <dt-bindings/thermal/tegra124-soctherm.h> for a
+ list of valid values when referring to thermal sensors.
+
+
+Example :
+
+ soctherm@0,700e2000 {
+ compatible = "nvidia,tegra124-soctherm";
+ reg = <0x0 0x700e2000 0x0 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
+ <&tegra_car TEGRA124_CLK_SOC_THERM>;
+ clock-names = "tsensor", "soctherm";
+ resets = <&tegra_car 78>;
+ reset-names = "soctherm";
+
+ #thermal-sensor-cells = <1>;
+ };
+
+Example: referring to thermal sensors :
+
+ thermal-zones {
+ cpu {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/unittest.txt b/Documentation/devicetree/bindings/unittest.txt
new file mode 100644
index 000000000000..0f92a22fddfa
--- /dev/null
+++ b/Documentation/devicetree/bindings/unittest.txt
@@ -0,0 +1,14 @@
+* OF selftest platform device
+
+** selftest
+
+Required properties:
+- compatible: must be "selftest"
+
+All other properties are optional.
+
+Example:
+ selftest {
+ compatible = "selftest";
+ status = "okay";
+ };
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
new file mode 100644
index 000000000000..27f8b1e5ee46
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -0,0 +1,24 @@
+* USB2 ChipIdea USB controller for ci13xxx
+
+Required properties:
+- compatible: should be "chipidea,usb2"
+- reg: base address and length of the registers
+- interrupts: interrupt for the USB controller
+
+Optional properties:
+- clocks: reference to the USB clock
+- phys: reference to the USB PHY
+- phy-names: should be "usb-phy"
+- vbus-supply: reference to the VBUS regulator
+
+Example:
+
+ usb@f7ed0000 {
+ compatible = "chipidea,usb2";
+ reg = <0xf7ed0000 0x10000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&chip CLKID_USB0>;
+ phys = <&usb_phy0>;
+ phy-names = "usb-phy";
+ vbus-supply = <&reg_usb0_vbus>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 471366d6a129..cd7f0454e13a 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -14,6 +14,29 @@ Optional properties:
- phys: from the *Generic PHY* bindings
- phy-names: from the *Generic PHY* bindings
- tx-fifo-resize: determines if the FIFO *has* to be reallocated.
+ - snps,disable_scramble_quirk: true when SW should disable data scrambling.
+ Only really useful for FPGA builds.
+ - snps,has-lpm-erratum: true when DWC3 was configured with LPM Erratum enabled
+ - snps,lpm-nyet-threshold: LPM NYET threshold
+ - snps,u2exit_lfps_quirk: set if we want to enable u2exit lfps quirk
+ - snps,u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
+ - snps,req_p1p2p3_quirk: when set, the core will always request for
+ P1/P2/P3 transition sequence.
+ - snps,del_p1p2p3_quirk: when set core will delay P1/P2/P3 until a certain
+ amount of 8B10B errors occur.
+ - snps,del_phy_power_chg_quirk: when set core will delay PHY power change
+ from P0 to P1/P2/P3.
+ - snps,lfps_filter_quirk: when set core will filter LFPS reception.
+ - snps,rx_detect_poll_quirk: when set core will disable a 400us delay to start
+ Polling LFPS after RX.Detect.
+ - snps,tx_de_emphasis_quirk: when set core will set Tx de-emphasis value.
+ - snps,tx_de_emphasis: the value driven to the PHY is controlled by the
+ LTSSM during USB3 Compliance mode.
+ - snps,dis_u3_susphy_quirk: when set core will disable USB3 suspend phy.
+ - snps,dis_u2_susphy_quirk: when set core will disable USB2 suspend phy.
+ - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
+ utmi_l1_suspend_n, false when asserts utmi_sleep_n
+ - snps,hird-threshold: HIRD threshold
This is usually a subnode to DWC3 glue to which it is connected.
diff --git a/Documentation/devicetree/bindings/usb/exynos-usb.txt b/Documentation/devicetree/bindings/usb/exynos-usb.txt
index a3b5990d0f2c..9b4dbe3b2acc 100644
--- a/Documentation/devicetree/bindings/usb/exynos-usb.txt
+++ b/Documentation/devicetree/bindings/usb/exynos-usb.txt
@@ -82,8 +82,10 @@ Example:
DWC3
Required properties:
- - compatible: should be "samsung,exynos5250-dwusb3" for USB 3.0 DWC3
- controller.
+ - compatible: should be one of the following -
+ "samsung,exynos5250-dwusb3": for USB 3.0 DWC3 controller on
+ Exynos5250/5420.
+ "samsung,exynos7-dwusb3": for USB 3.0 DWC3 controller on Exynos7.
- #address-cells, #size-cells : should be '1' if the device has sub-nodes
with 'reg' property.
- ranges: allows valid 1:1 translation between child's address space and
diff --git a/Documentation/devicetree/bindings/usb/pxa-usb.txt b/Documentation/devicetree/bindings/usb/pxa-usb.txt
index 79729a948d5a..9c331799b87c 100644
--- a/Documentation/devicetree/bindings/usb/pxa-usb.txt
+++ b/Documentation/devicetree/bindings/usb/pxa-usb.txt
@@ -29,3 +29,25 @@ Example:
marvell,port-mode = <2>; /* PMM_GLOBAL_MODE */
};
+UDC
+
+Required properties:
+ - compatible: Should be "marvell,pxa270-udc" for USB controllers
+ used in device mode.
+ - reg: usb device MMIO address space
+ - interrupts: single interrupt generated by the UDC IP
+ - clocks: input clock of the UDC IP (see clock-bindings.txt)
+
+Optional properties:
+ - gpios:
+ - gpio activated to control the USB D+ pullup (see gpio.txt)
+
+Example:
+
+ pxa27x_udc: udc@40600000 {
+ compatible = "marvell,pxa270-udc";
+ reg = <0x40600000 0x10000>;
+ interrupts = <11>;
+ clocks = <&pxa2xx_clks 11>;
+ gpios = <&gpio 22 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb-ohci.txt b/Documentation/devicetree/bindings/usb/usb-ohci.txt
index b968a1aea995..19233b7365e1 100644
--- a/Documentation/devicetree/bindings/usb/usb-ohci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ohci.txt
@@ -9,6 +9,8 @@ Optional properties:
- big-endian-regs : boolean, set this for hcds with big-endian registers
- big-endian-desc : boolean, set this for hcds with big-endian descriptors
- big-endian : boolean, for hcds with big-endian-regs + big-endian-desc
+- no-big-frame-no : boolean, set if frame_no lives in bits [15:0] of HCCA
+- num-ports : u32, to override the detected port count
- clocks : a list of phandle + clock specifier pairs
- phys : phandle + phy specifier pair
- phy-names : "usb"
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 2417cb0b493b..b1df0ad1306c 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -47,6 +47,7 @@ dlink D-Link Corporation
dmo Data Modul AG
ebv EBV Elektronik
edt Emerging Display Technologies
+elan Elan Microelectronic Corp.
emmicro EM Microelectronic
energymicro Silicon Laboratories (formerly Energy Micro AS)
epcos EPCOS AG
@@ -66,8 +67,10 @@ gmt Global Mixed-mode Technology, Inc.
google Google, Inc.
gumstix Gumstix, Inc.
gw Gateworks Corporation
+hannstar HannStar Display Corporation
haoyu Haoyu Microelectronic Co. Ltd.
hisilicon Hisilicon Limited.
+hit Hitachi Ltd.
honeywell Honeywell
hp Hewlett Packard
i2se I2SE GmbH
@@ -101,6 +104,7 @@ mitsubishi Mitsubishi Electric Corporation
mosaixtech Mosaix Technologies, Inc.
moxa Moxa
mpl MPL AG
+mti Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
mundoreader Mundo Reader S.L.
murata Murata Manufacturing Co., Ltd.
mxicy Macronix International Co., Ltd.
diff --git a/Documentation/devicetree/bindings/video/adi,adv7511.txt b/Documentation/devicetree/bindings/video/adi,adv7511.txt
new file mode 100644
index 000000000000..96c25ee01501
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/adi,adv7511.txt
@@ -0,0 +1,88 @@
+Analog Device ADV7511(W)/13 HDMI Encoders
+-----------------------------------------
+
+The ADV7511, ADV7511W and ADV7513 are HDMI audio and video transmitters
+compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
+S/PDIF, CEC and HDCP.
+
+Required properties:
+
+- compatible: Should be one of "adi,adv7511", "adi,adv7511w" or "adi,adv7513"
+- reg: I2C slave address
+
+The ADV7511 supports a large number of input data formats that differ by their
+color depth, color format, clock mode, bit justification and random
+arrangement of components on the data bus. The combination of the following
+properties describe the input and map directly to the video input tables of the
+ADV7511 datasheet that document all the supported combinations.
+
+- adi,input-depth: Number of bits per color component at the input (8, 10 or
+ 12).
+- adi,input-colorspace: The input color space, one of "rgb", "yuv422" or
+ "yuv444".
+- adi,input-clock: The input clock type, one of "1x" (one clock cycle per
+ pixel), "2x" (two clock cycles per pixel), "ddr" (one clock cycle per pixel,
+ data driven on both edges).
+
+The following input format properties are required except in "rgb 1x" and
+"yuv444 1x" modes, in which case they must not be specified.
+
+- adi,input-style: The input components arrangement variant (1, 2 or 3), as
+ listed in the input format tables in the datasheet.
+- adi,input-justification: The input bit justification ("left", "evenly",
+ "right").
+
+Optional properties:
+
+- interrupts: Specifier for the ADV7511 interrupt
+- pd-gpios: Specifier for the GPIO connected to the power down signal
+
+- adi,clock-delay: Video data clock delay relative to the pixel clock, in ps
+ (-1200 ps .. 1600 ps). Defaults to no delay.
+- adi,embedded-sync: The input uses synchronization signals embedded in the
+ data stream (similar to BT.656). Defaults to separate H/V synchronization
+ signals.
+
+Required nodes:
+
+The ADV7511 has two video ports. Their connections are modelled using the OF
+graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for the RGB or YUV input
+- Video port 1 for the HDMI output
+
+
+Example
+-------
+
+ adv7511w: hdmi@39 {
+ compatible = "adi,adv7511w";
+ reg = <39>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+ adi,input-style = <1>;
+ adi,input-justification = "evenly";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7511w_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7511_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/video/backlight/lp855x.txt b/Documentation/devicetree/bindings/video/backlight/lp855x.txt
index 96e83a56048e..0a3ecbc3a1b9 100644
--- a/Documentation/devicetree/bindings/video/backlight/lp855x.txt
+++ b/Documentation/devicetree/bindings/video/backlight/lp855x.txt
@@ -12,6 +12,7 @@ Optional properties:
- pwm-period: PWM period value. Set only PWM input mode used (u32)
- rom-addr: Register address of ROM area to be updated (u8)
- rom-val: Register value to be updated (u8)
+ - power-supply: Regulator which controls the 3V rail
Example:
@@ -56,6 +57,7 @@ Example:
backlight@2c {
compatible = "ti,lp8557";
reg = <0x2c>;
+ power-supply = <&backlight_vdd>;
dev-ctrl = /bits/ 8 <0x41>;
init-brt = /bits/ 8 <0x0a>;
diff --git a/Documentation/devicetree/bindings/video/exynos_dsim.txt b/Documentation/devicetree/bindings/video/exynos_dsim.txt
index e74243b4b317..ca2b4aacd9af 100644
--- a/Documentation/devicetree/bindings/video/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dsim.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: value should be one of the following
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
+ "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
- reg: physical base address and length of the registers set for the device
- interrupts: should contain DSI interrupt
diff --git a/Documentation/devicetree/bindings/video/rockchip-drm.txt b/Documentation/devicetree/bindings/video/rockchip-drm.txt
new file mode 100644
index 000000000000..7fff582495a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-drm.txt
@@ -0,0 +1,19 @@
+Rockchip DRM master device
+================================
+
+The Rockchip DRM master device is a virtual device needed to list all
+vop devices or other display interface nodes that comprise the
+graphics subsystem.
+
+Required properties:
+- compatible: Should be "rockchip,display-subsystem"
+- ports: Should contain a list of phandles pointing to display interface port
+ of vop devices. vop definitions as defined in
+ Documentation/devicetree/bindings/video/rockchip-vop.txt
+
+example:
+
+display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vopl_out>, <&vopb_out>;
+};
diff --git a/Documentation/devicetree/bindings/video/rockchip-vop.txt b/Documentation/devicetree/bindings/video/rockchip-vop.txt
new file mode 100644
index 000000000000..d15351f2313d
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-vop.txt
@@ -0,0 +1,58 @@
+device-tree bindings for rockchip soc display controller (vop)
+
+VOP (Visual Output Processor) is the Display Controller for the Rockchip
+series of SoCs which transfers the image data from a video memory
+buffer to an external LCD interface.
+
+Required properties:
+- compatible: value should be one of the following
+ "rockchip,rk3288-vop";
+
+- interrupts: should contain a list of all VOP IP block interrupts in the
+ order: VSYNC, LCD_SYSTEM. The interrupt specifier
+ format depends on the interrupt controller used.
+
+- clocks: must include clock specifiers corresponding to entries in the
+ clock-names property.
+
+- clock-names: Must contain
+ aclk_vop: for ddr buffer transfer.
+ hclk_vop: for ahb bus to R/W the phy regs.
+ dclk_vop: pixel clock.
+
+- resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+ - axi
+ - ahb
+ - dclk
+
+- iommus: required a iommu node
+
+- port: A port node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+SoC specific DT entry:
+ vopb: vopb@ff930000 {
+ compatible = "rockchip,rk3288-vop";
+ reg = <0xff930000 0x19c>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
+ clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+ resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>;
+ reset-names = "axi", "ahb", "dclk";
+ iommus = <&vopb_mmu>;
+ vopb_out: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ vopb_out_edp: endpoint@0 {
+ reg = <0>;
+ remote-endpoint=<&edp_in_vopb>;
+ };
+ vopb_out_hdmi: endpoint@1 {
+ reg = <1>;
+ remote-endpoint=<&hdmi_in_vopb>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/video/samsung-fimd.txt b/Documentation/devicetree/bindings/video/samsung-fimd.txt
index 4e6c77c85546..cf1af6371021 100644
--- a/Documentation/devicetree/bindings/video/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/video/samsung-fimd.txt
@@ -11,6 +11,7 @@ Required properties:
"samsung,s5pv210-fimd"; /* for S5PV210 SoC */
"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
+ "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
"samsung,exynos5250-fimd"; /* for Exynos5 SoCs */
- reg: physical base address and length of the FIMD registers set.
diff --git a/Documentation/devicetree/bindings/video/simple-framebuffer-sunxi.txt b/Documentation/devicetree/bindings/video/simple-framebuffer-sunxi.txt
new file mode 100644
index 000000000000..c46ba641a1df
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/simple-framebuffer-sunxi.txt
@@ -0,0 +1,33 @@
+Sunxi specific Simple Framebuffer bindings
+
+This binding documents sunxi specific extensions to the simple-framebuffer
+bindings. The sunxi simplefb u-boot code relies on the devicetree containing
+pre-populated simplefb nodes.
+
+These extensions are intended so that u-boot can select the right node based
+on which pipeline is being used. As such they are solely intended for
+firmware / bootloader use, and the OS should ignore them.
+
+Required properties:
+- compatible: "allwinner,simple-framebuffer"
+- allwinner,pipeline, one of:
+ "de_be0-lcd0"
+ "de_be1-lcd1"
+ "de_be0-lcd0-hdmi"
+ "de_be1-lcd1-hdmi"
+
+Example:
+
+chosen {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ framebuffer@0 {
+ compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
+ allwinner,pipeline = "de_be0-lcd0-hdmi";
+ clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
+ <&ahb_gates 44>;
+ status = "disabled";
+ };
+};
diff --git a/Documentation/devicetree/bindings/video/simple-framebuffer.txt b/Documentation/devicetree/bindings/video/simple-framebuffer.txt
index 70c26f3a5b9a..4474ef6e0b95 100644
--- a/Documentation/devicetree/bindings/video/simple-framebuffer.txt
+++ b/Documentation/devicetree/bindings/video/simple-framebuffer.txt
@@ -1,8 +1,40 @@
Simple Framebuffer
-A simple frame-buffer describes a raw memory region that may be rendered to,
-with the assumption that the display hardware has already been set up to scan
-out from that buffer.
+A simple frame-buffer describes a frame-buffer setup by firmware or
+the bootloader, with the assumption that the display hardware has already
+been set up to scan out from the memory pointed to by the reg property.
+
+Since simplefb nodes represent runtime information they must be sub-nodes of
+the chosen node (*). Simplefb nodes must be named "framebuffer@<address>".
+
+If the devicetree contains nodes for the display hardware used by a simplefb,
+then the simplefb node must contain a property called "display", which
+contains a phandle pointing to the primary display hw node, so that the OS
+knows which simplefb to disable when handing over control to a driver for the
+real hardware. The bindings for the hw nodes must specify which node is
+considered the primary node.
+
+It is advised to add display# aliases to help the OS determine how to number
+things. If display# aliases are used, then if the simplefb node contains a
+"display" property then the /aliases/display# path must point to the display
+hw node the "display" property points to, otherwise it must point directly
+to the simplefb node.
+
+If a simplefb node represents the preferred console for user interaction,
+then the chosen node's stdout-path property should point to it, or to the
+primary display hw node, as with display# aliases. If display aliases are
+used then it should be set to the alias instead.
+
+It is advised that devicetree files contain pre-filled, disabled framebuffer
+nodes, so that the firmware only needs to update the mode information and
+enable them. This way if e.g. later on support for more display clocks get
+added, the simplefb nodes will already contain this info and the firmware
+does not need to be updated.
+
+If pre-filled framebuffer nodes are used, the firmware may need extra
+information to find the right node. In that case an extra platform specific
+compatible and platform specific properties should be used and documented,
+see e.g. simple-framebuffer-sunxi.txt .
Required properties:
- compatible: "simple-framebuffer"
@@ -14,13 +46,41 @@ Required properties:
- r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b).
- a8b8g8r8 (32-bit pixels, d[31:24]=a, d[23:16]=b, d[15:8]=g, d[7:0]=r).
+Optional properties:
+- clocks : List of clocks used by the framebuffer. Clocks listed here
+ are expected to already be configured correctly. The OS must
+ ensure these clocks are not modified or disabled while the
+ simple framebuffer remains active.
+- display : phandle pointing to the primary display hardware node
+
Example:
- framebuffer {
+aliases {
+ display0 = &lcdc0;
+}
+
+chosen {
+ framebuffer0: framebuffer@1d385000 {
compatible = "simple-framebuffer";
reg = <0x1d385000 (1600 * 1200 * 2)>;
width = <1600>;
height = <1200>;
stride = <(1600 * 2)>;
format = "r5g6b5";
+ clocks = <&ahb_gates 36>, <&ahb_gates 43>, <&ahb_gates 44>;
+ display = <&lcdc0>;
+ };
+ stdout-path = "display0";
+};
+
+soc@01c00000 {
+ lcdc0: lcdc@1c0c000 {
+ compatible = "allwinner,sun4i-a10-lcdc";
+ ...
};
+};
+
+
+*) Older devicetree files may have a compatible = "simple-framebuffer" node
+in a different place, operating systems must first enumerate any compatible
+nodes found under chosen and then check for other compatible nodes.
diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt
index 1e3d5c92b5e3..57a808b588bf 100644
--- a/Documentation/devicetree/of_selftest.txt
+++ b/Documentation/devicetree/of_selftest.txt
@@ -63,7 +63,6 @@ struct device_node {
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
- struct device_node *allnext; /* next in list of all nodes */
...
};
@@ -99,12 +98,6 @@ child11 -> sibling12 -> sibling13 -> sibling14 -> null
Figure 1: Generic structure of un-flattened device tree
-*allnext: it is used to link all the nodes of DT into a list. So, for the
- above tree the list would be as follows:
-
-root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2->
-child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null
-
Before executing OF selftest, it is required to attach the test data to
machine's device tree (if present). So, when selftest_data_add() is called,
at first it reads the flattened device tree data linked into the kernel image
@@ -131,11 +124,6 @@ root ('/')
test-child01 null null null
-allnext list:
-
-root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2
-->test-sibling3->null
-
Figure 2: Example test data tree to be attached to live tree.
According to the scenario above, the live tree is already present so it isn't
@@ -204,8 +192,6 @@ detached and then moving up the parent nodes are removed, and eventually the
whole tree). selftest_data_remove() calls detach_node_and_children() that uses
of_detach_node() to detach the nodes from the live device tree.
-To detach a node, of_detach_node() first updates all_next linked list, by
-attaching the previous node's allnext to current node's allnext pointer. And
-then, it either updates the child pointer of given node's parent to its
-sibling or attaches the previous sibling to the given node's sibling, as
-appropriate. That is it :)
+To detach a node, of_detach_node() either updates the child pointer of given
+node's parent to its sibling or attaches the previous sibling to the given
+node's sibling, as appropriate. That is it :)
diff --git a/Documentation/devicetree/overlay-notes.txt b/Documentation/devicetree/overlay-notes.txt
new file mode 100644
index 000000000000..30ae758e3eef
--- /dev/null
+++ b/Documentation/devicetree/overlay-notes.txt
@@ -0,0 +1,133 @@
+Device Tree Overlay Notes
+-------------------------
+
+This document describes the implementation of the in-kernel
+device tree overlay functionality residing in drivers/of/overlay.c and is a
+companion document to Documentation/devicetree/dt-object-internal.txt[1] &
+Documentation/devicetree/dynamic-resolution-notes.txt[2]
+
+How overlays work
+-----------------
+
+A Device Tree's overlay purpose is to modify the kernel's live tree, and
+have the modification affecting the state of the the kernel in a way that
+is reflecting the changes.
+Since the kernel mainly deals with devices, any new device node that result
+in an active device should have it created while if the device node is either
+disabled or removed all together, the affected device should be deregistered.
+
+Lets take an example where we have a foo board with the following base tree
+which is taken from [1].
+
+---- foo.dts -----------------------------------------------------------------
+ /* FOO platform */
+ / {
+ compatible = "corp,foo";
+
+ /* shared resources */
+ res: res {
+ };
+
+ /* On chip peripherals */
+ ocp: ocp {
+ /* peripherals that are always instantiated */
+ peripheral1 { ... };
+ }
+ };
+---- foo.dts -----------------------------------------------------------------
+
+The overlay bar.dts, when loaded (and resolved as described in [2]) should
+
+---- bar.dts -----------------------------------------------------------------
+/plugin/; /* allow undefined label references and record them */
+/ {
+ .... /* various properties for loader use; i.e. part id etc. */
+ fragment@0 {
+ target = <&ocp>;
+ __overlay__ {
+ /* bar peripheral */
+ bar {
+ compatible = "corp,bar";
+ ... /* various properties and child nodes */
+ }
+ };
+ };
+};
+---- bar.dts -----------------------------------------------------------------
+
+result in foo+bar.dts
+
+---- foo+bar.dts -------------------------------------------------------------
+ /* FOO platform + bar peripheral */
+ / {
+ compatible = "corp,foo";
+
+ /* shared resources */
+ res: res {
+ };
+
+ /* On chip peripherals */
+ ocp: ocp {
+ /* peripherals that are always instantiated */
+ peripheral1 { ... };
+
+ /* bar peripheral */
+ bar {
+ compatible = "corp,bar";
+ ... /* various properties and child nodes */
+ }
+ }
+ };
+---- foo+bar.dts -------------------------------------------------------------
+
+As a result of the the overlay, a new device node (bar) has been created
+so a bar platform device will be registered and if a matching device driver
+is loaded the device will be created as expected.
+
+Overlay in-kernel API
+--------------------------------
+
+The API is quite easy to use.
+
+1. Call of_overlay_create() to create and apply an overlay. The return value
+is a cookie identifying this overlay.
+
+2. Call of_overlay_destroy() to remove and cleanup the overlay previously
+created via the call to of_overlay_create(). Removal of an overlay that
+is stacked by another will not be permitted.
+
+Finally, if you need to remove all overlays in one-go, just call
+of_overlay_destroy_all() which will remove every single one in the correct
+order.
+
+Overlay DTS Format
+------------------
+
+The DTS of an overlay should have the following format:
+
+{
+ /* ignored properties by the overlay */
+
+ fragment@0 { /* first child node */
+
+ target=<phandle>; /* phandle target of the overlay */
+ or
+ target-path="/path"; /* target path of the overlay */
+
+ __overlay__ {
+ property-a; /* add property-a to the target */
+ node-a { /* add to an existing, or create a node-a */
+ ...
+ };
+ };
+ }
+ fragment@1 { /* second child node */
+ ...
+ };
+ /* more fragments follow */
+}
+
+Using the non-phandle based target method allows one to use a base DT which does
+not contain a __symbols__ node, i.e. it was not compiled with the -@ option.
+The __symbols__ node is only required for the target=<phandle> method, since it
+contains the information required to map from a phandle to a tree location.
diff --git a/Documentation/devicetree/todo.txt b/Documentation/devicetree/todo.txt
index c3cf0659bd19..b5139d1de811 100644
--- a/Documentation/devicetree/todo.txt
+++ b/Documentation/devicetree/todo.txt
@@ -2,7 +2,6 @@ Todo list for devicetree:
=== General structure ===
- Switch from custom lists to (h)list_head for nodes and properties structure
-- Remove of_allnodes list and iterate using list of child nodes alone
=== CONFIG_OF_DYNAMIC ===
- Switch to RCU for tree updates and get rid of global spinlock
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine/client.txt
index 11fb87ff6cd0..11fb87ff6cd0 100644
--- a/Documentation/dmaengine.txt
+++ b/Documentation/dmaengine/client.txt
diff --git a/Documentation/dmatest.txt b/Documentation/dmaengine/dmatest.txt
index dd77a81bdb80..dd77a81bdb80 100644
--- a/Documentation/dmatest.txt
+++ b/Documentation/dmaengine/dmatest.txt
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
new file mode 100644
index 000000000000..766658ccf235
--- /dev/null
+++ b/Documentation/dmaengine/provider.txt
@@ -0,0 +1,366 @@
+DMAengine controller documentation
+==================================
+
+Hardware Introduction
++++++++++++++++++++++
+
+Most of the Slave DMA controllers have the same general principles of
+operations.
+
+They have a given number of channels to use for the DMA transfers, and
+a given number of requests lines.
+
+Requests and channels are pretty much orthogonal. Channels can be used
+to serve several to any requests. To simplify, channels are the
+entities that will be doing the copy, and requests what endpoints are
+involved.
+
+The request lines actually correspond to physical lines going from the
+DMA-eligible devices to the controller itself. Whenever the device
+will want to start a transfer, it will assert a DMA request (DRQ) by
+asserting that request line.
+
+A very simple DMA controller would only take into account a single
+parameter: the transfer size. At each clock cycle, it would transfer a
+byte of data from one buffer to another, until the transfer size has
+been reached.
+
+That wouldn't work well in the real world, since slave devices might
+require a specific number of bits to be transferred in a single
+cycle. For example, we may want to transfer as much data as the
+physical bus allows to maximize performances when doing a simple
+memory copy operation, but our audio device could have a narrower FIFO
+that requires data to be written exactly 16 or 24 bits at a time. This
+is why most if not all of the DMA controllers can adjust this, using a
+parameter called the transfer width.
+
+Moreover, some DMA controllers, whenever the RAM is used as a source
+or destination, can group the reads or writes in memory into a buffer,
+so instead of having a lot of small memory accesses, which is not
+really efficient, you'll get several bigger transfers. This is done
+using a parameter called the burst size, that defines how many single
+reads/writes it's allowed to do without the controller splitting the
+transfer into smaller sub-transfers.
+
+Our theoretical DMA controller would then only be able to do transfers
+that involve a single contiguous block of data. However, some of the
+transfers we usually have are not, and want to copy data from
+non-contiguous buffers to a contiguous buffer, which is called
+scatter-gather.
+
+DMAEngine, at least for mem2dev transfers, require support for
+scatter-gather. So we're left with two cases here: either we have a
+quite simple DMA controller that doesn't support it, and we'll have to
+implement it in software, or we have a more advanced DMA controller,
+that implements in hardware scatter-gather.
+
+The latter are usually programmed using a collection of chunks to
+transfer, and whenever the transfer is started, the controller will go
+over that collection, doing whatever we programmed there.
+
+This collection is usually either a table or a linked list. You will
+then push either the address of the table and its number of elements,
+or the first item of the list to one channel of the DMA controller,
+and whenever a DRQ will be asserted, it will go through the collection
+to know where to fetch the data from.
+
+Either way, the format of this collection is completely dependent on
+your hardware. Each DMA controller will require a different structure,
+but all of them will require, for every chunk, at least the source and
+destination addresses, whether it should increment these addresses or
+not and the three parameters we saw earlier: the burst size, the
+transfer width and the transfer size.
+
+The one last thing is that usually, slave devices won't issue DRQ by
+default, and you have to enable this in your slave device driver first
+whenever you're willing to use DMA.
+
+These were just the general memory-to-memory (also called mem2mem) or
+memory-to-device (mem2dev) kind of transfers. Most devices often
+support other kind of transfers or memory operations that dmaengine
+support and will be detailed later in this document.
+
+DMA Support in Linux
+++++++++++++++++++++
+
+Historically, DMA controller drivers have been implemented using the
+async TX API, to offload operations such as memory copy, XOR,
+cryptography, etc., basically any memory to memory operation.
+
+Over time, the need for memory to device transfers arose, and
+dmaengine was extended. Nowadays, the async TX API is written as a
+layer on top of dmaengine, and acts as a client. Still, dmaengine
+accommodates that API in some cases, and made some design choices to
+ensure that it stayed compatible.
+
+For more information on the Async TX API, please look the relevant
+documentation file in Documentation/crypto/async-tx-api.txt.
+
+DMAEngine Registration
+++++++++++++++++++++++
+
+struct dma_device Initialization
+--------------------------------
+
+Just like any other kernel framework, the whole DMAEngine registration
+relies on the driver filling a structure and registering against the
+framework. In our case, that structure is dma_device.
+
+The first thing you need to do in your driver is to allocate this
+structure. Any of the usual memory allocators will do, but you'll also
+need to initialize a few fields in there:
+
+ * channels: should be initialized as a list using the
+ INIT_LIST_HEAD macro for example
+
+ * dev: should hold the pointer to the struct device associated
+ to your current driver instance.
+
+Supported transaction types
+---------------------------
+
+The next thing you need is to set which transaction types your device
+(and driver) supports.
+
+Our dma_device structure has a field called cap_mask that holds the
+various types of transaction supported, and you need to modify this
+mask using the dma_cap_set function, with various flags depending on
+transaction types you support as an argument.
+
+All those capabilities are defined in the dma_transaction_type enum,
+in include/linux/dmaengine.h
+
+Currently, the types available are:
+ * DMA_MEMCPY
+ - The device is able to do memory to memory copies
+
+ * DMA_XOR
+ - The device is able to perform XOR operations on memory areas
+ - Used to accelerate XOR intensive tasks, such as RAID5
+
+ * DMA_XOR_VAL
+ - The device is able to perform parity check using the XOR
+ algorithm against a memory buffer.
+
+ * DMA_PQ
+ - The device is able to perform RAID6 P+Q computations, P being a
+ simple XOR, and Q being a Reed-Solomon algorithm.
+
+ * DMA_PQ_VAL
+ - The device is able to perform parity check using RAID6 P+Q
+ algorithm against a memory buffer.
+
+ * DMA_INTERRUPT
+ - The device is able to trigger a dummy transfer that will
+ generate periodic interrupts
+ - Used by the client drivers to register a callback that will be
+ called on a regular basis through the DMA controller interrupt
+
+ * DMA_SG
+ - The device supports memory to memory scatter-gather
+ transfers.
+ - Even though a plain memcpy can look like a particular case of a
+ scatter-gather transfer, with a single chunk to transfer, it's a
+ distinct transaction type in the mem2mem transfers case
+
+ * DMA_PRIVATE
+ - The devices only supports slave transfers, and as such isn't
+ available for async transfers.
+
+ * DMA_ASYNC_TX
+ - Must not be set by the device, and will be set by the framework
+ if needed
+ - /* TODO: What is it about? */
+
+ * DMA_SLAVE
+ - The device can handle device to memory transfers, including
+ scatter-gather transfers.
+ - While in the mem2mem case we were having two distinct types to
+ deal with a single chunk to copy or a collection of them, here,
+ we just have a single transaction type that is supposed to
+ handle both.
+ - If you want to transfer a single contiguous memory buffer,
+ simply build a scatter list with only one item.
+
+ * DMA_CYCLIC
+ - The device can handle cyclic transfers.
+ - A cyclic transfer is a transfer where the chunk collection will
+ loop over itself, with the last item pointing to the first.
+ - It's usually used for audio transfers, where you want to operate
+ on a single ring buffer that you will fill with your audio data.
+
+ * DMA_INTERLEAVE
+ - The device supports interleaved transfer.
+ - These transfers can transfer data from a non-contiguous buffer
+ to a non-contiguous buffer, opposed to DMA_SLAVE that can
+ transfer data from a non-contiguous data set to a continuous
+ destination buffer.
+ - It's usually used for 2d content transfers, in which case you
+ want to transfer a portion of uncompressed data directly to the
+ display to print it
+
+These various types will also affect how the source and destination
+addresses change over time.
+
+Addresses pointing to RAM are typically incremented (or decremented)
+after each transfer. In case of a ring buffer, they may loop
+(DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
+are typically fixed.
+
+Device operations
+-----------------
+
+Our dma_device structure also requires a few function pointers in
+order to implement the actual logic, now that we described what
+operations we were able to perform.
+
+The functions that we have to fill in there, and hence have to
+implement, obviously depend on the transaction types you reported as
+supported.
+
+ * device_alloc_chan_resources
+ * device_free_chan_resources
+ - These functions will be called whenever a driver will call
+ dma_request_channel or dma_release_channel for the first/last
+ time on the channel associated to that driver.
+ - They are in charge of allocating/freeing all the needed
+ resources in order for that channel to be useful for your
+ driver.
+ - These functions can sleep.
+
+ * device_prep_dma_*
+ - These functions are matching the capabilities you registered
+ previously.
+ - These functions all take the buffer or the scatterlist relevant
+ for the transfer being prepared, and should create a hardware
+ descriptor or a list of hardware descriptors from it
+ - These functions can be called from an interrupt context
+ - Any allocation you might do should be using the GFP_NOWAIT
+ flag, in order not to potentially sleep, but without depleting
+ the emergency pool either.
+ - Drivers should try to pre-allocate any memory they might need
+ during the transfer setup at probe time to avoid putting to
+ much pressure on the nowait allocator.
+
+ - It should return a unique instance of the
+ dma_async_tx_descriptor structure, that further represents this
+ particular transfer.
+
+ - This structure can be initialized using the function
+ dma_async_tx_descriptor_init.
+ - You'll also need to set two fields in this structure:
+ + flags:
+ TODO: Can it be modified by the driver itself, or
+ should it be always the flags passed in the arguments
+
+ + tx_submit: A pointer to a function you have to implement,
+ that is supposed to push the current
+ transaction descriptor to a pending queue, waiting
+ for issue_pending to be called.
+
+ * device_issue_pending
+ - Takes the first transaction descriptor in the pending queue,
+ and starts the transfer. Whenever that transfer is done, it
+ should move to the next transaction in the list.
+ - This function can be called in an interrupt context
+
+ * device_tx_status
+ - Should report the bytes left to go over on the given channel
+ - Should only care about the transaction descriptor passed as
+ argument, not the currently active one on a given channel
+ - The tx_state argument might be NULL
+ - Should use dma_set_residue to report it
+ - In the case of a cyclic transfer, it should only take into
+ account the current period.
+ - This function can be called in an interrupt context.
+
+ * device_control
+ - Used by client drivers to control and configure the channel it
+ has a handle on.
+ - Called with a command and an argument
+ + The command is one of the values listed by the enum
+ dma_ctrl_cmd. The valid commands are:
+ + DMA_PAUSE
+ + Pauses a transfer on the channel
+ + This command should operate synchronously on the channel,
+ pausing right away the work of the given channel
+ + DMA_RESUME
+ + Restarts a transfer on the channel
+ + This command should operate synchronously on the channel,
+ resuming right away the work of the given channel
+ + DMA_TERMINATE_ALL
+ + Aborts all the pending and ongoing transfers on the
+ channel
+ + This command should operate synchronously on the channel,
+ terminating right away all the channels
+ + DMA_SLAVE_CONFIG
+ + Reconfigures the channel with passed configuration
+ + This command should NOT perform synchronously, or on any
+ currently queued transfers, but only on subsequent ones
+ + In this case, the function will receive a
+ dma_slave_config structure pointer as an argument, that
+ will detail which configuration to use.
+ + Even though that structure contains a direction field,
+ this field is deprecated in favor of the direction
+ argument given to the prep_* functions
+ + FSLDMA_EXTERNAL_START
+ + TODO: Why does that even exist?
+ + The argument is an opaque unsigned long. This actually is a
+ pointer to a struct dma_slave_config that should be used only
+ in the DMA_SLAVE_CONFIG.
+
+ * device_slave_caps
+ - Called through the framework by client drivers in order to have
+ an idea of what are the properties of the channel allocated to
+ them.
+ - Such properties are the buswidth, available directions, etc.
+ - Required for every generic layer doing DMA transfers, such as
+ ASoC.
+
+Misc notes (stuff that should be documented, but don't really know
+where to put them)
+------------------------------------------------------------------
+ * dma_run_dependencies
+ - Should be called at the end of an async TX transfer, and can be
+ ignored in the slave transfers case.
+ - Makes sure that dependent operations are run before marking it
+ as complete.
+
+ * dma_cookie_t
+ - it's a DMA transaction ID that will increment over time.
+ - Not really relevant any more since the introduction of virt-dma
+ that abstracts it away.
+
+ * DMA_CTRL_ACK
+ - Undocumented feature
+ - No one really has an idea of what it's about, besides being
+ related to reusing the DMA transaction descriptors or having
+ additional transactions added to it in the async-tx API
+ - Useless in the case of the slave API
+
+General Design Notes
+--------------------
+
+Most of the DMAEngine drivers you'll see are based on a similar design
+that handles the end of transfer interrupts in the handler, but defer
+most work to a tasklet, including the start of a new transfer whenever
+the previous transfer ended.
+
+This is a rather inefficient design though, because the inter-transfer
+latency will be not only the interrupt latency, but also the
+scheduling latency of the tasklet, which will leave the channel idle
+in between, which will slow down the global transfer rate.
+
+You should avoid this kind of practice, and instead of electing a new
+transfer in your tasklet, move that part to the interrupt handler in
+order to have a shorter idle window (that we can't really avoid
+anyway).
+
+Glossary
+--------
+
+Burst: A number of consecutive read or write operations
+ that can be queued to buffers before being flushed to
+ memory.
+Chunk: A contiguous collection of bursts
+Transfer: A collection of chunks (be it contiguous or not)
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 9af538be3751..eede6088f978 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -77,6 +77,17 @@ should appear, and then pressing CTRL-R let you specify the patch file
to insert into the message.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Claws Mail (GUI)
+
+Works. Some people use this successfully for patches.
+
+To insert a patch use Message->Insert File (CTRL+i) or an external editor.
+
+If the inserted patch has to be edited in the Claws composition window
+"Auto wrapping" in Configuration->Preferences->Compose->Wrapping should be
+disabled.
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Evolution (GUI)
Some people use this successfully for patches.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index eb8a10e22f7c..aae9dd13c91f 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1272,7 +1272,7 @@ softirq.
1.9 Ext4 file system parameters
-------------------------------
+-------------------------------
Information about mounted ext4 file systems can be found in
/proc/fs/ext4. Each mounted filesystem will have a directory in
diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt
index 403c090aca39..e5274f84dc56 100644
--- a/Documentation/filesystems/squashfs.txt
+++ b/Documentation/filesystems/squashfs.txt
@@ -2,10 +2,10 @@ SQUASHFS 4.0 FILESYSTEM
=======================
Squashfs is a compressed read-only filesystem for Linux.
-It uses zlib/lzo/xz compression to compress files, inodes and directories.
-Inodes in the system are very small and all blocks are packed to minimise
-data overhead. Block sizes greater than 4K are supported up to a maximum
-of 1Mbytes (default block size 128K).
+It uses zlib, lz4, lzo, or xz compression to compress files, inodes and
+directories. Inodes in the system are very small and all blocks are packed to
+minimise data overhead. Block sizes greater than 4K are supported up to a
+maximum of 1Mbytes (default block size 128K).
Squashfs is intended for general read-only filesystem use, for archival
use (i.e. in cases where a .tar.gz file may be used), and in constrained
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index 859918db36b8..d85fbae451ea 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -199,6 +199,33 @@ The active-low state of a GPIO can also be queried using the following call:
Note that these functions should only be used with great moderation ; a driver
should not have to care about the physical line level.
+
+Set multiple GPIO outputs with a single function call
+-----------------------------------------------------
+The following functions set the output values of an array of GPIOs:
+
+ void gpiod_set_array(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+ void gpiod_set_raw_array(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+ void gpiod_set_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+ void gpiod_set_raw_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+
+The array can be an arbitrary set of GPIOs. The functions will try to set
+GPIOs belonging to the same bank or chip simultaneously if supported by the
+corresponding chip driver. In that case a significantly improved performance
+can be expected. If simultaneous setting is not possible the GPIOs will be set
+sequentially.
+Note that for optimal performance GPIOs belonging to the same chip should be
+contiguous within the array of descriptors.
+
+
GPIOs mapped to IRQs
--------------------
GPIO lines can quite often be used as IRQs. You can get the IRQ number
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index 31e0b5db55d8..90d0f6aba7a6 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -158,12 +158,12 @@ Locking IRQ usage
Input GPIOs can be used as IRQ signals. When this happens, a driver is requested
to mark the GPIO as being used as an IRQ:
- int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
This will prevent the use of non-irq related GPIO APIs until the GPIO IRQ lock
is released:
- void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
+ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
When implementing an irqchip inside a GPIO driver, these two functions should
typically be called in the .startup() and .shutdown() callbacks from the
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 793c83dac738..82f48f774afb 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -29,6 +29,7 @@ Supported adapters:
* Intel Wildcat Point-LP (PCH)
* Intel BayTrail (SOC)
* Intel Sunrise Point-H (PCH)
+ * Intel Sunrise Point-LP (PCH)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/i2c/upgrading-clients b/Documentation/i2c/upgrading-clients
index 8e5fbd88c7d1..ccba3ffd6e80 100644
--- a/Documentation/i2c/upgrading-clients
+++ b/Documentation/i2c/upgrading-clients
@@ -79,11 +79,10 @@ static struct i2c_driver example_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "example",
+ .pm = &example_pm_ops,
},
.attach_adapter = example_attach_adapter,
.detach_client = example_detach,
- .suspend = example_suspend,
- .resume = example_resume,
};
@@ -272,10 +271,9 @@ static struct i2c_driver example_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "example",
+ .pm = &example_pm_ops,
},
.id_table = example_idtable,
.probe = example_probe,
.remove = example_remove,
- .suspend = example_suspend,
- .resume = example_resume,
};
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 6b344b516bff..a755b141fa4a 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -36,6 +36,7 @@ MODULE_DEVICE_TABLE(i2c, foo_idtable);
static struct i2c_driver foo_driver = {
.driver = {
.name = "foo",
+ .pm = &foo_pm_ops, /* optional */
},
.id_table = foo_idtable,
@@ -47,8 +48,6 @@ static struct i2c_driver foo_driver = {
.address_list = normal_i2c,
.shutdown = foo_shutdown, /* optional */
- .suspend = foo_suspend, /* optional */
- .resume = foo_resume, /* optional */
.command = foo_command, /* optional, deprecated */
}
@@ -279,8 +278,9 @@ Power Management
If your I2C device needs special handling when entering a system low
power state -- like putting a transceiver into a low power mode, or
-activating a system wakeup mechanism -- do that in the suspend() method.
-The resume() method should reverse what the suspend() method does.
+activating a system wakeup mechanism -- do that by implementing the
+appropriate callbacks for the dev_pm_ops of the driver (like suspend
+and resume).
These are standard driver model calls, and they work just like they
would for any other driver stack. The calls can sleep, and can use
diff --git a/Documentation/ia64/kvm.txt b/Documentation/ia64/kvm.txt
deleted file mode 100644
index ffb5c80bec3e..000000000000
--- a/Documentation/ia64/kvm.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Currently, kvm module is in EXPERIMENTAL stage on IA64. This means that
-interfaces are not stable enough to use. So, please don't run critical
-applications in virtual machine.
-We will try our best to improve it in future versions!
-
- Guide: How to boot up guests on kvm/ia64
-
-This guide is to describe how to enable kvm support for IA-64 systems.
-
-1. Get the kvm source from git.kernel.org.
- Userspace source:
- git clone git://git.kernel.org/pub/scm/virt/kvm/kvm-userspace.git
- Kernel Source:
- git clone git://git.kernel.org/pub/scm/linux/kernel/git/xiantao/kvm-ia64.git
-
-2. Compile the source code.
- 2.1 Compile userspace code:
- (1)cd ./kvm-userspace
- (2)./configure
- (3)cd kernel
- (4)make sync LINUX= $kernel_dir (kernel_dir is the directory of kernel source.)
- (5)cd ..
- (6)make qemu
- (7)cd qemu; make install
-
- 2.2 Compile kernel source code:
- (1) cd ./$kernel_dir
- (2) Make menuconfig
- (3) Enter into virtualization option, and choose kvm.
- (4) make
- (5) Once (4) done, make modules_install
- (6) Make initrd, and use new kernel to reboot up host machine.
- (7) Once (6) done, cd $kernel_dir/arch/ia64/kvm
- (8) insmod kvm.ko; insmod kvm-intel.ko
-
-Note: For step 2, please make sure that host page size == TARGET_PAGE_SIZE of qemu, otherwise, may fail.
-
-3. Get Guest Firmware named as Flash.fd, and put it under right place:
- (1) If you have the guest firmware (binary) released by Intel Corp for Xen, use it directly.
-
- (2) If you have no firmware at hand, Please download its source from
- hg clone http://xenbits.xensource.com/ext/efi-vfirmware.hg
- you can get the firmware's binary in the directory of efi-vfirmware.hg/binaries.
-
- (3) Rename the firmware you owned to Flash.fd, and copy it to /usr/local/share/qemu
-
-4. Boot up Linux or Windows guests:
- 4.1 Create or install a image for guest boot. If you have xen experience, it should be easy.
-
- 4.2 Boot up guests use the following command.
- /usr/local/bin/qemu-system-ia64 -smp xx -m 512 -hda $your_image
- (xx is the number of virtual processors for the guest, now the maximum value is 4)
-
-5. Known possible issue on some platforms with old Firmware.
-
-In the event of strange host crash issues, try to solve it through either of the following ways:
-
-(1): Upgrade your Firmware to the latest one.
-
-(2): Applying the below patch to kernel source.
-diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
-index 0b53344..f02b0f7 100644
---- a/arch/ia64/kernel/pal.S
-+++ b/arch/ia64/kernel/pal.S
-@@ -84,7 +84,8 @@ GLOBAL_ENTRY(ia64_pal_call_static)
- mov ar.pfs = loc1
- mov rp = loc0
- ;;
-- srlz.d // serialize restoration of psr.l
-+ srlz.i // serialize restoration of psr.l
-+ ;;
- br.ret.sptk.many b0
- END(ia64_pal_call_static)
-
-6. Bug report:
- If you found any issues when use kvm/ia64, Please post the bug info to kvm-ia64-devel mailing list.
- https://lists.sourceforge.net/lists/listinfo/kvm-ia64-devel/
-
-Thanks for your interest! Let's work together, and make kvm/ia64 stronger and stronger!
-
-
- Xiantao Zhang <xiantao.zhang@intel.com>
- 2008.3.10
diff --git a/Documentation/input/xpad.txt b/Documentation/input/xpad.txt
index 7cc9a436e6a1..d1b23f295db4 100644
--- a/Documentation/input/xpad.txt
+++ b/Documentation/input/xpad.txt
@@ -1,18 +1,22 @@
-xpad - Linux USB driver for X-Box gamepads
+xpad - Linux USB driver for Xbox compatible controllers
-This is the very first release of a driver for X-Box gamepads.
-Basically, this was hacked away in just a few hours, so don't expect
-miracles.
+This driver exposes all first-party and third-party Xbox compatible
+controllers. It has a long history and has enjoyed considerable usage
+as Window's xinput library caused most PC games to focus on Xbox
+controller compatibility.
-In particular, there is currently NO support for the rumble pack.
-You won't find many ff-aware linux applications anyway.
+Due to backwards compatibility all buttons are reported as digital.
+This only effects Original Xbox controllers. All later controller models
+have only digital face buttons.
+
+Rumble is supported on some models of Xbox 360 controllers but not of
+Original Xbox controllers nor on Xbox One controllers. As of writing
+the Xbox One's rumble protocol has not been reverse engineered but in
+the future could be supported.
0. Notes
--------
-
-Driver updated for kernel 2.6.17.11. (Based on a patch for 2.6.11.4.)
-
The number of buttons/axes reported varies based on 3 things:
- if you are using a known controller
- if you are using a known dance pad
@@ -20,12 +24,16 @@ The number of buttons/axes reported varies based on 3 things:
module configuration for "Map D-PAD to buttons rather than axes for unknown
pads" (module option dpad_to_buttons)
-If you set dpad_to_buttons to 0 and you are using an unknown device (one
-not listed below), the driver will map the directional pad to axes (X/Y),
-if you said N it will map the d-pad to buttons, which is needed for dance
-style games to function correctly. The default is Y.
+If you set dpad_to_buttons to N and you are using an unknown device
+the driver will map the directional pad to axes (X/Y).
+If you said Y it will map the d-pad to buttons, which is needed for dance
+style games to function correctly. The default is Y.
+
+dpad_to_buttons has no effect for known pads. A erroneous commit message
+claimed dpad_to_buttons could be used to force behavior on known devices.
+This is not true. Both dpad_to_buttons and triggers_to_buttons only affect
+unknown controllers.
-dpad_to_buttons has no effect for known pads.
0.1 Normal Controllers
----------------------
@@ -80,17 +88,29 @@ to the list of supported devices, ensuring that it will work out of the
box in the future.
-1. USB adapter
+1. USB adapters
--------------
+All generations of Xbox controllers speak USB over the wire.
+- Original Xbox controllers use a proprietary connector and require adapters.
+- Wireless Xbox 360 controllers require a 'Xbox 360 Wireless Gaming Receiver
+ for Windows'
+- Wired Xbox 360 controllers use standard USB connectors.
+- Xbox One controllers can be wireless but speak Wi-Fi Direct and are not
+ yet supported.
+- Xbox One controllers can be wired and use standard Micro-USB connectors.
+
-Before you can actually use the driver, you need to get yourself an
-adapter cable to connect the X-Box controller to your Linux-Box. You
-can buy these online fairly cheap, or build your own.
+
+1.1 Original Xbox USB adapters
+--------------
+Using this driver with an Original Xbox controller requires an
+adapter cable to break out the proprietary connector's pins to USB.
+You can buy these online fairly cheap, or build your own.
Such a cable is pretty easy to build. The Controller itself is a USB
compound device (a hub with three ports for two expansion slots and
the controller device) with the only difference in a nonstandard connector
-(5 pins vs. 4 on standard USB connector).
+(5 pins vs. 4 on standard USB 1.0 connectors).
You just need to solder a USB connector onto the cable and keep the
yellow wire unconnected. The other pins have the same order on both
@@ -102,26 +122,41 @@ original one. You can buy an extension cable and cut that instead. That way,
you can still use the controller with your X-Box, if you have one ;)
+
2. Driver Installation
----------------------
-Once you have the adapter cable and the controller is connected, you need
-to load your USB subsystem and should cat /proc/bus/usb/devices.
-There should be an entry like the one at the end [4].
+Once you have the adapter cable, if needed, and the controller connected
+the xpad module should be auto loaded. To confirm you can cat
+/proc/bus/usb/devices. There should be an entry like the one at the end [4].
+
+
-Currently (as of version 0.0.6), the following devices are included:
- original Microsoft XBOX controller (US), vendor=0x045e, product=0x0202
- smaller Microsoft XBOX controller (US), vendor=0x045e, product=0x0289
+3. Supported Controllers
+------------------------
+For a full list of supported controllers and associated vendor and product
+IDs see the xpad_device[] array[6].
+
+As of the historic version 0.0.6 (2006-10-10) the following devices
+were supported:
+ original Microsoft XBOX controller (US), vendor=0x045e, product=0x0202
+ smaller Microsoft XBOX controller (US), vendor=0x045e, product=0x0289
original Microsoft XBOX controller (Japan), vendor=0x045e, product=0x0285
- InterAct PowerPad Pro (Germany), vendor=0x05fd, product=0x107a
- RedOctane Xbox Dance Pad (US), vendor=0x0c12, product=0x8809
+ InterAct PowerPad Pro (Germany), vendor=0x05fd, product=0x107a
+ RedOctane Xbox Dance Pad (US), vendor=0x0c12, product=0x8809
+
+Unrecognized models of Xbox controllers should function as Generic
+Xbox controllers. Unrecognized Dance Pad controllers require setting
+the module option 'dpad_to_buttons'.
+
+If you have an unrecognized controller please see 0.3 - Unknown Controllers
-The driver should work with xbox pads not listed above as well, however
-you will need to do something extra for dance pads to work.
-If you have a controller not listed above, see 0.3 - Unknown Controllers
+4. Manual Testing
+-----------------
+To test this driver's functionality you may use 'jstest'.
-If you compiled and installed the driver, test the functionality:
+For example:
> modprobe xpad
> modprobe joydev
> jstest /dev/js0
@@ -134,7 +169,8 @@ show 20 inputs (6 axes, 14 buttons).
It works? Voila, you're done ;)
-3. Thanks
+
+5. Thanks
---------
I have to thank ITO Takayuki for the detailed info on his site
@@ -145,14 +181,14 @@ His useful info and both the usb-skeleton as well as the iforce input driver
the basic functionality.
-4. References
--------------
-1. http://euc.jp/periphs/xbox-controller.ja.html (ITO Takayuki)
-2. http://xpad.xbox-scene.com/
-3. http://www.markosweb.com/www/xboxhackz.com/
+6. References
+-------------
-4. /proc/bus/usb/devices - dump from InterAct PowerPad Pro (Germany):
+[1]: http://euc.jp/periphs/xbox-controller.ja.html (ITO Takayuki)
+[2]: http://xpad.xbox-scene.com/
+[3]: http://www.markosweb.com/www/xboxhackz.com/
+[4]: /proc/bus/usb/devices - dump from InterAct PowerPad Pro (Germany):
T: Bus=01 Lev=03 Prnt=04 Port=00 Cnt=01 Dev#= 5 Spd=12 MxCh= 0
D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=32 #Cfgs= 1
@@ -162,7 +198,7 @@ I: If#= 0 Alt= 0 #EPs= 2 Cls=58(unk. ) Sub=42 Prot=00 Driver=(none)
E: Ad=81(I) Atr=03(Int.) MxPS= 32 Ivl= 10ms
E: Ad=02(O) Atr=03(Int.) MxPS= 32 Ivl= 10ms
-5. /proc/bus/usb/devices - dump from Redoctane Xbox Dance Pad (US):
+[5]: /proc/bus/usb/devices - dump from Redoctane Xbox Dance Pad (US):
T: Bus=01 Lev=02 Prnt=09 Port=00 Cnt=01 Dev#= 10 Spd=12 MxCh= 0
D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
@@ -173,7 +209,12 @@ I: If#= 0 Alt= 0 #EPs= 2 Cls=58(unk. ) Sub=42 Prot=00 Driver=xpad
E: Ad=82(I) Atr=03(Int.) MxPS= 32 Ivl=4ms
E: Ad=02(O) Atr=03(Int.) MxPS= 32 Ivl=4ms
---
+[6]: http://lxr.free-electrons.com/ident?i=xpad_device
+
+
+
+7. Historic Edits
+-----------------
Marko Friedemann <mfr@bmx-chemnitz.de>
2002-07-16
- original doc
@@ -181,3 +222,5 @@ Marko Friedemann <mfr@bmx-chemnitz.de>
Dominic Cerquetti <binary1230@yahoo.com>
2005-03-19
- added stuff for dance pads, new d-pad->axes mappings
+
+Later changes may be viewed with 'git log Documentation/input/xpad.txt'
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 10b8cc1bda8d..4df73da11adc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -829,6 +829,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
CONFIG_DEBUG_PAGEALLOC, hence this option will not help
tracking down these problems.
+ debug_pagealloc=
+ [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
+ parameter enables the feature at boot time. In
+ default, it is disabled. We can avoid allocating huge
+ chunk of memory for debug pagealloc if we don't enable
+ it at boot time and the system will work mostly same
+ with the kernel built without CONFIG_DEBUG_PAGEALLOC.
+ on: enable the feature
+
debugpat [X86] Enable PAT debugging
decnet.addr= [HW,NET]
@@ -1228,9 +1237,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
multiple times interleaved with hugepages= to reserve
huge pages of different sizes. Valid pages sizes on
x86-64 are 2M (when the CPU supports "pse") and 1G
- (when the CPU supports the "pdpe1gb" cpuinfo flag)
- Note that 1GB pages can only be allocated at boot time
- using hugepages= and not freed afterwards.
+ (when the CPU supports the "pdpe1gb" cpuinfo flag).
hvc_iucv= [S390] Number of z/VM IUCV hypervisor console (HVC)
terminal devices. Valid values: 0..8
@@ -1369,6 +1376,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Formats: { "ima" | "ima-ng" }
Default: "ima-ng"
+ ima_template_fmt=
+ [IMA] Define a custom template format.
+ Format: { "field1|...|fieldN" }
+
ima.ahash_minsize= [IMA] Minimum file size for asynchronous hash usage
Format: <min_file_size>
Set the minimal file size for using asynchronous hash.
@@ -2515,6 +2526,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
OSS [HW,OSS]
See Documentation/sound/oss/oss-parameters.txt
+ page_owner= [KNL] Boot-time page_owner enabling option.
+ Storage of the information about who allocated
+ each page is disabled in default. With this switch,
+ we can turn it on.
+ on: enable the feature
+
panic= [KNL] Kernel behaviour on panic: delay <timeout>
timeout > 0: seconds before rebooting
timeout = 0: wait forever
@@ -3443,6 +3460,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
neutralize any effect of /proc/sys/kernel/sysrq.
Useful for debugging.
+ tcpmhash_entries= [KNL,NET]
+ Set the number of tcp_metrics_hash slots.
+ Default value is 8192 or 16384 depending on total
+ ram pages. This is used to specify the TCP metrics
+ cache size. See Documentation/networking/ip-sysctl.txt
+ "tcp_no_metrics_save" section for more details.
+
tdfx= [HW,DRM]
test_suspend= [SUSPEND][,N]
@@ -3555,6 +3579,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See also Documentation/trace/ftrace.txt "trace options"
section.
+ tp_printk[FTRACE]
+ Have the tracepoints sent to printk as well as the
+ tracing ring buffer. This is useful for early boot up
+ where the system hangs or reboots and does not give the
+ option for reading the tracing buffer or performing a
+ ftrace_dump_on_oops.
+
+ To turn off having tracepoints sent to printk,
+ echo 0 > /proc/sys/kernel/tracepoint_printk
+ Note, echoing 1 into this file without the
+ tracepoint_printk kernel cmdline option has no effect.
+
+ ** CAUTION **
+
+ Having tracepoints sent to printk() and activating high
+ frequency tracepoints such as irq or sched, can cause
+ the system to live lock.
+
traceoff_on_warning
[FTRACE] enable this option to disable tracing when a
warning is hit. This turns off "tracing_on". Tracing can
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt
index f87241dfed87..1be59a3a521c 100644
--- a/Documentation/kobject.txt
+++ b/Documentation/kobject.txt
@@ -173,7 +173,7 @@ This should be done only after any attributes or children of the kobject
have been initialized properly, as userspace will instantly start to look
for them when this call happens.
-When the kobject is removed from the kernel (details on how to do that is
+When the kobject is removed from the kernel (details on how to do that are
below), the uevent for KOBJ_REMOVE will be automatically created by the
kobject core, so the caller does not have to worry about doing that by
hand.
diff --git a/tools/testing/selftests/README.txt b/Documentation/kselftest.txt
index 2660d5ff9179..a87d840bacfe 100644
--- a/tools/testing/selftests/README.txt
+++ b/Documentation/kselftest.txt
@@ -15,37 +15,45 @@ Running the selftests (hotplug tests are run in limited mode)
=============================================================
To build the tests:
-
$ make -C tools/testing/selftests
To run the tests:
-
$ make -C tools/testing/selftests run_tests
+To build and run the tests with a single command, use:
+ $ make kselftest
+
- note that some tests will require root privileges.
-To run only tests targeted for a single subsystem: (including
-hotplug targets in limited mode)
- $ make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests
+Running a subset of selftests
+========================================
+You can use the "TARGETS" variable on the make command line to specify
+single test to run, or a list of tests to run.
+
+To run only tests targeted for a single subsystem:
+ $ make -C tools/testing/selftests TARGETS=ptrace run_tests
+
+You can specify multiple tests to build and run:
+ $ make TARGETS="size timers" kselftest
+
+See the top-level tools/testing/selftests/Makefile for the list of all
+possible targets.
-See the top-level tools/testing/selftests/Makefile for the list of all possible
-targets.
Running the full range hotplug selftests
========================================
-To build the tests:
-
+To build the hotplug tests:
$ make -C tools/testing/selftests hotplug
-To run the tests:
-
+To run the hotplug tests:
$ make -C tools/testing/selftests run_hotplug
- note that some tests will require root privileges.
+
Contributing new tests
======================
diff --git a/Documentation/local_ops.txt b/Documentation/local_ops.txt
index 300da4bdfdbd..407576a23317 100644
--- a/Documentation/local_ops.txt
+++ b/Documentation/local_ops.txt
@@ -8,6 +8,11 @@ to implement them for any given architecture and shows how they can be used
properly. It also stresses on the precautions that must be taken when reading
those local variables across CPUs when the order of memory writes matters.
+Note that local_t based operations are not recommended for general kernel use.
+Please use the this_cpu operations instead unless there is really a special purpose.
+Most uses of local_t in the kernel have been replaced by this_cpu operations.
+this_cpu operations combine the relocation with the local_t like semantics in
+a single instruction and yield more compact and faster executing code.
* Purpose of local atomic operations
@@ -87,10 +92,10 @@ the per cpu variable. For instance :
local_inc(&get_cpu_var(counters));
put_cpu_var(counters);
-If you are already in a preemption-safe context, you can directly use
-__get_cpu_var() instead.
+If you are already in a preemption-safe context, you can use
+this_cpu_ptr() instead.
- local_inc(&__get_cpu_var(counters));
+ local_inc(this_cpu_ptr(&counters));
@@ -134,7 +139,7 @@ static void test_each(void *info)
{
/* Increment the counter from a non preemptible context */
printk("Increment on cpu %d\n", smp_processor_id());
- local_inc(&__get_cpu_var(counters));
+ local_inc(this_cpu_ptr(&counters));
/* This is what incrementing the variable would look like within a
* preemptible context (it disables preemption) :
diff --git a/Documentation/mailbox.txt b/Documentation/mailbox.txt
index 60f43ff629aa..1092ad9578da 100644
--- a/Documentation/mailbox.txt
+++ b/Documentation/mailbox.txt
@@ -53,7 +53,7 @@ static void message_from_remote(struct mbox_client *cl, void *mssg)
{
struct demo_client *dc = container_of(mbox_client,
struct demo_client, cl);
- if (dc->aysnc) {
+ if (dc->async) {
if (is_an_ack(mssg)) {
/* An ACK to our last sample sent */
return; /* Or do something else here */
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 7ee2ae6d5451..70a09f8a0383 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1633,6 +1633,48 @@ There are some more advanced barrier functions:
operations" subsection for information on where to use these.
+ (*) dma_wmb();
+ (*) dma_rmb();
+
+ These are for use with consistent memory to guarantee the ordering
+ of writes or reads of shared memory accessible to both the CPU and a
+ DMA capable device.
+
+ For example, consider a device driver that shares memory with a device
+ and uses a descriptor status value to indicate if the descriptor belongs
+ to the device or the CPU, and a doorbell to notify it when new
+ descriptors are available:
+
+ if (desc->status != DEVICE_OWN) {
+ /* do not read data until we own descriptor */
+ dma_rmb();
+
+ /* read/modify data */
+ read_data = desc->data;
+ desc->data = write_data;
+
+ /* flush modifications before status update */
+ dma_wmb();
+
+ /* assign ownership */
+ desc->status = DEVICE_OWN;
+
+ /* force memory to sync before notifying device via MMIO */
+ wmb();
+
+ /* notify device of new descriptors */
+ writel(DESC_NOTIFY, doorbell);
+ }
+
+ The dma_rmb() allows us guarantee the device has released ownership
+ before we read the data from the descriptor, and he dma_wmb() allows
+ us to guarantee the data is written to the descriptor before the device
+ can see it now has ownership. The wmb() is needed to guarantee that the
+ cache coherent memory writes have completed before attempting a write to
+ the cache incoherent MMIO region.
+
+ See Documentation/DMA-API.txt for more information on consistent memory.
+
MMIO WRITE BARRIER
------------------
diff --git a/Documentation/mic/mpssd/Makefile b/Documentation/mic/mpssd/Makefile
index 0f3156888048..f47fe6ba7300 100644
--- a/Documentation/mic/mpssd/Makefile
+++ b/Documentation/mic/mpssd/Makefile
@@ -1,5 +1,5 @@
# List of programs to build
-hostprogs-y := mpssd
+hostprogs-$(CONFIG_X86_64) := mpssd
mpssd-objs := mpssd.o sysfs.o
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index eeb5b2e97bed..83bf4986baea 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -2230,11 +2230,8 @@ balance-rr: This mode is the only mode that will permit a single
It is possible to adjust TCP/IP's congestion limits by
altering the net.ipv4.tcp_reordering sysctl parameter. The
- usual default value is 3, and the maximum useful value is 127.
- For a four interface balance-rr bond, expect that a single
- TCP/IP stream will utilize no more than approximately 2.3
- interface's worth of throughput, even after adjusting
- tcp_reordering.
+ usual default value is 3. But keep in mind TCP stack is able
+ to automatically increase this when it detects reorders.
Note that the fraction of packets that will be delivered out of
order is highly variable, and is unlikely to be zero. The level
diff --git a/Documentation/networking/fib_trie.txt b/Documentation/networking/fib_trie.txt
index 0723db7f8495..fe719388518b 100644
--- a/Documentation/networking/fib_trie.txt
+++ b/Documentation/networking/fib_trie.txt
@@ -73,8 +73,8 @@ trie_leaf_remove()
trie_rebalance()
The key function for the dynamic trie after any change in the trie
- it is run to optimize and reorganize. Tt will walk the trie upwards
- towards the root from a given tnode, doing a resize() at each step
+ it is run to optimize and reorganize. It will walk the trie upwards
+ towards the root from a given tnode, doing a resize() at each step
to implement level compression.
resize()
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index a476b08a43e0..9bffdfc648dc 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -383,9 +383,17 @@ tcp_orphan_retries - INTEGER
may consume significant resources. Cf. tcp_max_orphans.
tcp_reordering - INTEGER
- Maximal reordering of packets in a TCP stream.
+ Initial reordering level of packets in a TCP stream.
+ TCP stack can then dynamically adjust flow reordering level
+ between this initial value and tcp_max_reordering
Default: 3
+tcp_max_reordering - INTEGER
+ Maximal reordering level of packets in a TCP stream.
+ 300 is a fairly conservative value, but you might increase it
+ if paths are using per packet load balancing (like bonding rr mode)
+ Default: 300
+
tcp_retrans_collapse - BOOLEAN
Bug-to-bug compatibility with some broken printers.
On retransmit try to send bigger packets to work around bugs in
@@ -1466,6 +1474,19 @@ suppress_frag_ndisc - INTEGER
1 - (default) discard fragmented neighbor discovery packets
0 - allow fragmented neighbor discovery packets
+optimistic_dad - BOOLEAN
+ Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
+ 0: disabled (default)
+ 1: enabled
+
+use_optimistic - BOOLEAN
+ If enabled, do not classify optimistic addresses as deprecated during
+ source address selection. Preferred addresses will still be chosen
+ before optimistic addresses, subject to other ranking in the source
+ address selection algorithm.
+ 0: disabled (default)
+ 1: enabled
+
icmp/*:
ratelimit - INTEGER
Limit the maximal rates for sending ICMPv6 packets.
diff --git a/Documentation/networking/ipvlan.txt b/Documentation/networking/ipvlan.txt
new file mode 100644
index 000000000000..cf996394e466
--- /dev/null
+++ b/Documentation/networking/ipvlan.txt
@@ -0,0 +1,107 @@
+
+ IPVLAN Driver HOWTO
+
+Initial Release:
+ Mahesh Bandewar <maheshb AT google.com>
+
+1. Introduction:
+ This is conceptually very similar to the macvlan driver with one major
+exception of using L3 for mux-ing /demux-ing among slaves. This property makes
+the master device share the L2 with it's slave devices. I have developed this
+driver in conjuntion with network namespaces and not sure if there is use case
+outside of it.
+
+
+2. Building and Installation:
+ In order to build the driver, please select the config item CONFIG_IPVLAN.
+The driver can be built into the kernel (CONFIG_IPVLAN=y) or as a module
+(CONFIG_IPVLAN=m).
+
+
+3. Configuration:
+ There are no module parameters for this driver and it can be configured
+using IProute2/ip utility.
+
+ ip link add link <master-dev> <slave-dev> type ipvlan mode { l2 | L3 }
+
+ e.g. ip link add link ipvl0 eth0 type ipvlan mode l2
+
+
+4. Operating modes:
+ IPvlan has two modes of operation - L2 and L3. For a given master device,
+you can select one of these two modes and all slaves on that master will
+operate in the same (selected) mode. The RX mode is almost identical except
+that in L3 mode the slaves wont receive any multicast / broadcast traffic.
+L3 mode is more restrictive since routing is controlled from the other (mostly)
+default namespace.
+
+4.1 L2 mode:
+ In this mode TX processing happens on the stack instance attached to the
+slave device and packets are switched and queued to the master device to send
+out. In this mode the slaves will RX/TX multicast and broadcast (if applicable)
+as well.
+
+4.2 L3 mode:
+ In this mode TX processing upto L3 happens on the stack instance attached
+to the slave device and packets are switched to the stack instance of the
+master device for the L2 processing and routing from that instance will be
+used before packets are queued on the outbound device. In this mode the slaves
+will not receive nor can send multicast / broadcast traffic.
+
+
+5. What to choose (macvlan vs. ipvlan)?
+ These two devices are very similar in many regards and the specific use
+case could very well define which device to choose. if one of the following
+situations defines your use case then you can choose to use ipvlan -
+ (a) The Linux host that is connected to the external switch / router has
+policy configured that allows only one mac per port.
+ (b) No of virtual devices created on a master exceed the mac capacity and
+puts the NIC in promiscous mode and degraded performance is a concern.
+ (c) If the slave device is to be put into the hostile / untrusted network
+namespace where L2 on the slave could be changed / misused.
+
+
+6. Example configuration:
+
+ +=============================================================+
+ | Host: host1 |
+ | |
+ | +----------------------+ +----------------------+ |
+ | | NS:ns0 | | NS:ns1 | |
+ | | | | | |
+ | | | | | |
+ | | ipvl0 | | ipvl1 | |
+ | +----------#-----------+ +-----------#----------+ |
+ | # # |
+ | ################################ |
+ | # eth0 |
+ +==============================#==============================+
+
+
+ (a) Create two network namespaces - ns0, ns1
+ ip netns add ns0
+ ip netns add ns1
+
+ (b) Create two ipvlan slaves on eth0 (master device)
+ ip link add link eth0 ipvl0 type ipvlan mode l2
+ ip link add link eth0 ipvl1 type ipvlan mode l2
+
+ (c) Assign slaves to the respective network namespaces
+ ip link set dev ipvl0 netns ns0
+ ip link set dev ipvl1 netns ns1
+
+ (d) Now switch to the namespace (ns0 or ns1) to configure the slave devices
+ - For ns0
+ (1) ip netns exec ns0 bash
+ (2) ip link set dev ipvl0 up
+ (3) ip link set dev lo up
+ (4) ip -4 addr add 127.0.0.1 dev lo
+ (5) ip -4 addr add $IPADDR dev ipvl0
+ (6) ip -4 route add default via $ROUTER dev ipvl0
+ - For ns1
+ (1) ip netns exec ns1 bash
+ (2) ip link set dev ipvl1 up
+ (3) ip link set dev lo up
+ (4) ip -4 addr add 127.0.0.1 dev lo
+ (5) ip -4 addr add $IPADDR dev ipvl1
+ (6) ip -4 route add default via $ROUTER dev ipvl1
diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt
index 96cccebb839b..0ace6e776ac8 100644
--- a/Documentation/networking/ixgbe.txt
+++ b/Documentation/networking/ixgbe.txt
@@ -138,7 +138,7 @@ Other ethtool Commands:
To enable Flow Director
ethtool -K ethX ntuple on
To add a filter
- Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 0x178000a
+ Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 10.0.128.23
action 1
To see the list of filters currently present:
ethtool -u ethX
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 2090895b08d4..e655e2453c98 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -1,12 +1,12 @@
STMicroelectronics 10/100/1000 Synopsys Ethernet driver
-Copyright (C) 2007-2013 STMicroelectronics Ltd
+Copyright (C) 2007-2014 STMicroelectronics Ltd
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
(Synopsys IP blocks).
-Currently this network device driver is for all STM embedded MAC/GMAC
+Currently this network device driver is for all STi embedded MAC/GMAC
(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
FF1152AMT0221 D1215994A VIRTEX FPGA board.
@@ -22,6 +22,9 @@ The kernel configuration option is STMMAC_ETH:
Device Drivers ---> Network device support ---> Ethernet (1000 Mbit) --->
STMicroelectronics 10/100/1000 Ethernet driver (STMMAC_ETH)
+CONFIG_STMMAC_PLATFORM: is to enable the platform driver.
+CONFIG_STMMAC_PCI: is to enable the pci driver.
+
2) Driver parameters list:
debug: message level (0: no output, 16: all);
phyaddr: to manually provide the physical address to the PHY device;
@@ -45,10 +48,11 @@ Driver parameters can be also passed in command line by using:
The xmit method is invoked when the kernel needs to transmit a packet; it sets
the descriptors in the ring and informs the DMA engine that there is a packet
ready to be transmitted.
-Once the controller has finished transmitting the packet, an interrupt is
-triggered; So the driver will be able to release the socket buffers.
By default, the driver sets the NETIF_F_SG bit in the features field of the
-net_device structure enabling the scatter/gather feature.
+net_device structure enabling the scatter-gather feature. This is true on
+chips and configurations where the checksum can be done in hardware.
+Once the controller has finished transmitting the packet, napi will be
+scheduled to release the transmit resources.
4.2) Receive process
When one or more packets are received, an interrupt happens. The interrupts
@@ -58,20 +62,12 @@ This is based on NAPI so the interrupt handler signals only if there is work
to be done, and it exits.
Then the poll method will be scheduled at some future point.
The incoming packets are stored, by the DMA, in a list of pre-allocated socket
-buffers in order to avoid the memcpy (Zero-copy).
+buffers in order to avoid the memcpy (zero-copy).
4.3) Interrupt Mitigation
The driver is able to mitigate the number of its DMA interrupts
using NAPI for the reception on chips older than the 3.50.
New chips have an HW RX-Watchdog used for this mitigation.
-
-On Tx-side, the mitigation schema is based on a SW timer that calls the
-tx function (stmmac_tx) to reclaim the resource after transmitting the
-frames.
-Also there is another parameter (like a threshold) used to program
-the descriptors avoiding to set the interrupt on completion bit in
-when the frame is sent (xmit).
-
Mitigation parameters can be tuned by ethtool.
4.4) WOL
@@ -79,7 +75,7 @@ Wake up on Lan feature through Magic and Unicast frames are supported for the
GMAC core.
4.5) DMA descriptors
-Driver handles both normal and enhanced descriptors. The latter has been only
+Driver handles both normal and alternate descriptors. The latter has been only
tested on DWC Ether MAC 10/100/1000 Universal version 3.41a and later.
STMMAC supports DMA descriptor to operate both in dual buffer (RING)
@@ -91,9 +87,20 @@ In CHAINED mode each descriptor will have pointer to next descriptor in
the list, hence creating the explicit chaining in the descriptor itself,
whereas such explicit chaining is not possible in RING mode.
+4.5.1) Extended descriptors
+ The extended descriptors give us information about the Ethernet payload
+ when it is carrying PTP packets or TCP/UDP/ICMP over IP.
+ These are not available on GMAC Synopsys chips older than the 3.50.
+ At probe time the driver will decide if these can be actually used.
+ This support also is mandatory for PTPv2 because the extra descriptors
+ are used for saving the hardware timestamps and Extended Status.
+
4.6) Ethtool support
-Ethtool is supported. Driver statistics and internal errors can be taken using:
-ethtool -S ethX command. It is possible to dump registers etc.
+Ethtool is supported.
+
+For example, driver statistics (including RMON), internal errors can be taken
+using:
+ # ethtool -S ethX command
4.7) Jumbo and Segmentation Offloading
Jumbo frames are supported and tested for the GMAC.
@@ -101,12 +108,11 @@ The GSO has been also added but it's performed in software.
LRO is not supported.
4.8) Physical
-The driver is compatible with PAL to work with PHY and GPHY devices.
+The driver is compatible with Physical Abstraction Layer to be connected with
+PHY and GPHY devices.
4.9) Platform information
-Several driver's information can be passed through the platform
-These are included in the include/linux/stmmac.h header file
-and detailed below as well:
+Several information can be passed through the platform and device-tree.
struct plat_stmmacenet_data {
char *phy_bus_name;
@@ -125,15 +131,18 @@ struct plat_stmmacenet_data {
int force_sf_dma_mode;
int force_thresh_dma_mode;
int riwt_off;
+ int max_speed;
+ int maxmtu;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
void *(*setup)(struct platform_device *pdev);
+ void (*free)(struct platform_device *pdev, void *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
void *custom_cfg;
void *custom_data;
void *bsp_priv;
- };
+};
Where:
o phy_bus_name: phy bus name to attach to the stmmac.
@@ -258,32 +267,43 @@ and the second one, with a real PHY device attached to the bus,
by using the stmmac_mdio_bus_data structure (to provide the id, the
reset procedure etc).
-4.10) List of source files:
- o Kconfig
- o Makefile
- o stmmac_main.c: main network device driver;
- o stmmac_mdio.c: mdio functions;
- o stmmac_pci: PCI driver;
- o stmmac_platform.c: platform driver
- o stmmac_ethtool.c: ethtool support;
- o stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
- (only tested on ST40 platforms based);
+Note that, starting from new chips, where it is available the HW capability
+register, many configurations are discovered at run-time for example to
+understand if EEE, HW csum, PTP, enhanced descriptor etc are actually
+available. As strategy adopted in this driver, the information from the HW
+capability register can replace what has been passed from the platform.
+
+4.10) Device-tree support.
+
+Please see the following document:
+ Documentation/devicetree/bindings/net/stmmac.txt
+
+and the stmmac_of_data structure inside the include/linux/stmmac.h header file.
+
+4.11) This is a summary of the content of some relevant files:
+ o stmmac_main.c: to implement the main network device driver;
+ o stmmac_mdio.c: to provide mdio functions;
+ o stmmac_pci: this the PCI driver;
+ o stmmac_platform.c: this the platform driver (OF supported)
+ o stmmac_ethtool.c: to implement the ethtool support;
o stmmac.h: private driver structure;
o common.h: common definitions and VFTs;
o descs.h: descriptor structure definitions;
- o dwmac1000_core.c: GMAC core functions;
- o dwmac1000_dma.c: dma functions for the GMAC chip;
- o dwmac1000.h: specific header file for the GMAC;
- o dwmac100_core: MAC 100 core and dma code;
- o dwmac100_dma.c: dma functions for the MAC chip;
+ o dwmac1000_core.c: dwmac GiGa core functions;
+ o dwmac1000_dma.c: dma functions for the GMAC chip;
+ o dwmac1000.h: specific header file for the dwmac GiGa;
+ o dwmac100_core: dwmac 100 core code;
+ o dwmac100_dma.c: dma functions for the dwmac 100 chip;
o dwmac1000.h: specific header file for the MAC;
- o dwmac_lib.c: generic DMA functions shared among chips;
+ o dwmac_lib.c: generic DMA functions;
o enh_desc.c: functions for handling enhanced descriptors;
o norm_desc.c: functions for handling normal descriptors;
o chain_mode.c/ring_mode.c:: functions to manage RING/CHAINED modes;
o mmc_core.c/mmc.h: Management MAC Counters;
- o stmmac_hwtstamp.c: HW timestamp support for PTP
- o stmmac_ptp.c: PTP 1588 clock
+ o stmmac_hwtstamp.c: HW timestamp support for PTP;
+ o stmmac_ptp.c: PTP 1588 clock;
+ o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c
+ for STMicroelectronics SoCs.
5) Debug Information
@@ -298,23 +318,14 @@ to get statistics: e.g. using: ethtool -S ethX
(that shows the Management counters (MMC) if supported)
or sees the MAC/DMA registers: e.g. using: ethtool -d ethX
-Compiling the Kernel with CONFIG_DEBUG_FS and enabling the
-STMMAC_DEBUG_FS option the driver will export the following
+Compiling the Kernel with CONFIG_DEBUG_FS the driver will export the following
debugfs entries:
/sys/kernel/debug/stmmaceth/descriptors_status
To show the DMA TX/RX descriptor rings
-Developer can also use the "debug" module parameter to get
-further debug information.
-
-In the end, there are other macros (that cannot be enabled
-via menuconfig) to turn-on the RX/TX DMA debugging,
-specific MAC core debug printk etc. Others to enable the
-debug in the TX and RX processes.
-All these are only useful during the developing stage
-and should never enabled inside the code for general usage.
-In fact, these can generate an huge amount of debug messages.
+Developer can also use the "debug" module parameter to get further debug
+information (please see: NETIF Msg Level).
6) Energy Efficient Ethernet
@@ -337,15 +348,7 @@ To enter in Tx LPI mode the driver needs to have a software timer
that enable and disable the LPI mode when there is nothing to be
transmitted.
-7) Extended descriptors
-The extended descriptors give us information about the receive Ethernet payload
-when it is carrying PTP packets or TCP/UDP/ICMP over IP.
-These are not available on GMAC Synopsys chips older than the 3.50.
-At probe time the driver will decide if these can be actually used.
-This support also is mandatory for PTPv2 because the extra descriptors 6 and 7
-are used for saving the hardware timestamps.
-
-8) Precision Time Protocol (PTP)
+7) Precision Time Protocol (PTP)
The driver supports the IEEE 1588-2002, Precision Time Protocol (PTP),
which enables precise synchronization of clocks in measurement and
control systems implemented with technologies such as network
@@ -355,7 +358,7 @@ In addition to the basic timestamp features mentioned in IEEE 1588-2002
Timestamps, new GMAC cores support the advanced timestamp features.
IEEE 1588-2008 that can be enabled when configure the Kernel.
-9) SGMII/RGMII supports
+8) SGMII/RGMII supports
New GMAC devices provide own way to manage RGMII/SGMII.
This information is available at run-time by looking at the
HW capability register. This means that the stmmac can manage
@@ -364,8 +367,3 @@ In fact, the HW provides a subset of extended registers to
restart the ANE, verify Full/Half duplex mode and Speed.
Also thanks to these registers it is possible to look at the
Auto-negotiated Link Parter Ability.
-
-10) TODO:
- o XGMAC is not supported.
- o Complete the TBI & RTBI support.
- o extend VLAN support for 3.70a SYNP GMAC.
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
new file mode 100644
index 000000000000..f981a9295a39
--- /dev/null
+++ b/Documentation/networking/switchdev.txt
@@ -0,0 +1,59 @@
+Switch (and switch-ish) device drivers HOWTO
+===========================
+
+Please note that the word "switch" is here used in very generic meaning.
+This include devices supporting L2/L3 but also various flow offloading chips,
+including switches embedded into SR-IOV NICs.
+
+Lets describe a topology a bit. Imagine the following example:
+
+ +----------------------------+ +---------------+
+ | SOME switch chip | | CPU |
+ +----------------------------+ +---------------+
+ port1 port2 port3 port4 MNGMNT | PCI-E |
+ | | | | | +---------------+
+ PHY PHY | | | | NIC0 NIC1
+ | | | | | |
+ | | +- PCI-E -+ | |
+ | +------- MII -------+ |
+ +------------- MII ------------+
+
+In this example, there are two independent lines between the switch silicon
+and CPU. NIC0 and NIC1 drivers are not aware of a switch presence. They are
+separate from the switch driver. SOME switch chip is by managed by a driver
+via PCI-E device MNGMNT. Note that MNGMNT device, NIC0 and NIC1 may be
+connected to some other type of bus.
+
+Now, for the previous example show the representation in kernel:
+
+ +----------------------------+ +---------------+
+ | SOME switch chip | | CPU |
+ +----------------------------+ +---------------+
+ sw0p0 sw0p1 sw0p2 sw0p3 MNGMNT | PCI-E |
+ | | | | | +---------------+
+ PHY PHY | | | | eth0 eth1
+ | | | | | |
+ | | +- PCI-E -+ | |
+ | +------- MII -------+ |
+ +------------- MII ------------+
+
+Lets call the example switch driver for SOME switch chip "SOMEswitch". This
+driver takes care of PCI-E device MNGMNT. There is a netdevice instance sw0pX
+created for each port of a switch. These netdevices are instances
+of "SOMEswitch" driver. sw0pX netdevices serve as a "representation"
+of the switch chip. eth0 and eth1 are instances of some other existing driver.
+
+The only difference of the switch-port netdevice from the ordinary netdevice
+is that is implements couple more NDOs:
+
+ ndo_switch_parent_id_get - This returns the same ID for two port netdevices
+ of the same physical switch chip. This is
+ mandatory to be implemented by all switch drivers
+ and serves the caller for recognition of a port
+ netdevice.
+ ndo_switch_parent_* - Functions that serve for a manipulation of the switch
+ chip itself (it can be though of as a "parent" of the
+ port, therefore the name). They are not port-specific.
+ Caller might use arbitrary port netdevice of the same
+ switch and it will make no difference.
+ ndo_switch_port_* - Functions that serve for a port-specific manipulation.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 1d6d02d6ba52..a5c784c89312 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -122,7 +122,7 @@ SOF_TIMESTAMPING_RAW_HARDWARE:
1.3.3 Timestamp Options
-The interface supports one option
+The interface supports the options
SOF_TIMESTAMPING_OPT_ID:
@@ -130,19 +130,36 @@ SOF_TIMESTAMPING_OPT_ID:
have multiple concurrent timestamping requests outstanding. Packets
can be reordered in the transmit path, for instance in the packet
scheduler. In that case timestamps will be queued onto the error
- queue out of order from the original send() calls. This option
- embeds a counter that is incremented at send() time, to order
- timestamps within a flow.
+ queue out of order from the original send() calls. It is not always
+ possible to uniquely match timestamps to the original send() calls
+ based on timestamp order or payload inspection alone, then.
+
+ This option associates each packet at send() with a unique
+ identifier and returns that along with the timestamp. The identifier
+ is derived from a per-socket u32 counter (that wraps). For datagram
+ sockets, the counter increments with each sent packet. For stream
+ sockets, it increments with every byte.
+
+ The counter starts at zero. It is initialized the first time that
+ the socket option is enabled. It is reset each time the option is
+ enabled after having been disabled. Resetting the counter does not
+ change the identifiers of existing packets in the system.
This option is implemented only for transmit timestamps. There, the
timestamp is always looped along with a struct sock_extended_err.
The option modifies field ee_data to pass an id that is unique
among all possibly concurrently outstanding timestamp requests for
- that socket. In practice, it is a monotonically increasing u32
- (that wraps).
+ that socket.
+
+
+SOF_TIMESTAMPING_OPT_CMSG:
- In datagram sockets, the counter increments on each send call. In
- stream sockets, it increments with every byte.
+ Support recv() cmsg for all timestamped packets. Control messages
+ are already supported unconditionally on all packets with receive
+ timestamps and on IPv6 packets with transmit timestamp. This option
+ extends them to IPv4 packets with transmit timestamp. One use case
+ is to correlate packets with their egress device, by enabling socket
+ option IP_PKTINFO simultaneously.
1.4 Bytestream Timestamps
diff --git a/Documentation/networking/timestamping/txtimestamp.c b/Documentation/networking/timestamping/txtimestamp.c
index b32fc2a07734..876f71c5625a 100644
--- a/Documentation/networking/timestamping/txtimestamp.c
+++ b/Documentation/networking/timestamping/txtimestamp.c
@@ -46,6 +46,7 @@
#include <netpacket/packet.h>
#include <poll.h>
#include <stdarg.h>
+#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
@@ -58,6 +59,14 @@
#include <time.h>
#include <unistd.h>
+/* ugly hack to work around netinet/in.h and linux/ipv6.h conflicts */
+#ifndef in6_pktinfo
+struct in6_pktinfo {
+ struct in6_addr ipi6_addr;
+ int ipi6_ifindex;
+};
+#endif
+
/* command line parameters */
static int cfg_proto = SOCK_STREAM;
static int cfg_ipproto = IPPROTO_TCP;
@@ -65,6 +74,8 @@ static int cfg_num_pkts = 4;
static int do_ipv4 = 1;
static int do_ipv6 = 1;
static int cfg_payload_len = 10;
+static bool cfg_show_payload;
+static bool cfg_do_pktinfo;
static uint16_t dest_port = 9000;
static struct sockaddr_in daddr;
@@ -131,6 +142,30 @@ static void print_timestamp(struct scm_timestamping *tss, int tstype,
__print_timestamp(tsname, &tss->ts[0], tskey, payload_len);
}
+/* TODO: convert to check_and_print payload once API is stable */
+static void print_payload(char *data, int len)
+{
+ int i;
+
+ if (len > 70)
+ len = 70;
+
+ fprintf(stderr, "payload: ");
+ for (i = 0; i < len; i++)
+ fprintf(stderr, "%02hhx ", data[i]);
+ fprintf(stderr, "\n");
+}
+
+static void print_pktinfo(int family, int ifindex, void *saddr, void *daddr)
+{
+ char sa[INET6_ADDRSTRLEN], da[INET6_ADDRSTRLEN];
+
+ fprintf(stderr, " pktinfo: ifindex=%u src=%s dst=%s\n",
+ ifindex,
+ saddr ? inet_ntop(family, saddr, sa, sizeof(sa)) : "unknown",
+ daddr ? inet_ntop(family, daddr, da, sizeof(da)) : "unknown");
+}
+
static void __poll(int fd)
{
struct pollfd pollfd;
@@ -156,10 +191,9 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
cm->cmsg_type == SCM_TIMESTAMPING) {
tss = (void *) CMSG_DATA(cm);
} else if ((cm->cmsg_level == SOL_IP &&
- cm->cmsg_type == IP_RECVERR) ||
- (cm->cmsg_level == SOL_IPV6 &&
- cm->cmsg_type == IPV6_RECVERR)) {
-
+ cm->cmsg_type == IP_RECVERR) ||
+ (cm->cmsg_level == SOL_IPV6 &&
+ cm->cmsg_type == IPV6_RECVERR)) {
serr = (void *) CMSG_DATA(cm);
if (serr->ee_errno != ENOMSG ||
serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
@@ -168,6 +202,16 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
serr->ee_origin);
serr = NULL;
}
+ } else if (cm->cmsg_level == SOL_IP &&
+ cm->cmsg_type == IP_PKTINFO) {
+ struct in_pktinfo *info = (void *) CMSG_DATA(cm);
+ print_pktinfo(AF_INET, info->ipi_ifindex,
+ &info->ipi_spec_dst, &info->ipi_addr);
+ } else if (cm->cmsg_level == SOL_IPV6 &&
+ cm->cmsg_type == IPV6_PKTINFO) {
+ struct in6_pktinfo *info6 = (void *) CMSG_DATA(cm);
+ print_pktinfo(AF_INET6, info6->ipi6_ifindex,
+ NULL, &info6->ipi6_addr);
} else
fprintf(stderr, "unknown cmsg %d,%d\n",
cm->cmsg_level, cm->cmsg_type);
@@ -206,7 +250,11 @@ static int recv_errmsg(int fd)
if (ret == -1 && errno != EAGAIN)
error(1, errno, "recvmsg");
- __recv_errmsg_cmsg(&msg, ret);
+ if (ret > 0) {
+ __recv_errmsg_cmsg(&msg, ret);
+ if (cfg_show_payload)
+ print_payload(data, cfg_payload_len);
+ }
free(data);
return ret == -1;
@@ -215,9 +263,9 @@ static int recv_errmsg(int fd)
static void do_test(int family, unsigned int opt)
{
char *buf;
- int fd, i, val, total_len;
+ int fd, i, val = 1, total_len;
- if (family == IPPROTO_IPV6 && cfg_proto != SOCK_STREAM) {
+ if (family == AF_INET6 && cfg_proto != SOCK_STREAM) {
/* due to lack of checksum generation code */
fprintf(stderr, "test: skipping datagram over IPv6\n");
return;
@@ -239,7 +287,6 @@ static void do_test(int family, unsigned int opt)
error(1, errno, "socket");
if (cfg_proto == SOCK_STREAM) {
- val = 1;
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY,
(char*) &val, sizeof(val)))
error(1, 0, "setsockopt no nagle");
@@ -253,7 +300,20 @@ static void do_test(int family, unsigned int opt)
}
}
+ if (cfg_do_pktinfo) {
+ if (family == AF_INET6) {
+ if (setsockopt(fd, SOL_IPV6, IPV6_RECVPKTINFO,
+ &val, sizeof(val)))
+ error(1, errno, "setsockopt pktinfo ipv6");
+ } else {
+ if (setsockopt(fd, SOL_IP, IP_PKTINFO,
+ &val, sizeof(val)))
+ error(1, errno, "setsockopt pktinfo ipv4");
+ }
+ }
+
opt |= SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_OPT_CMSG |
SOF_TIMESTAMPING_OPT_ID;
if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING,
(char *) &opt, sizeof(opt)))
@@ -262,8 +322,6 @@ static void do_test(int family, unsigned int opt)
for (i = 0; i < cfg_num_pkts; i++) {
memset(&ts_prev, 0, sizeof(ts_prev));
memset(buf, 'a' + i, total_len);
- buf[total_len - 2] = '\n';
- buf[total_len - 1] = '\0';
if (cfg_proto == SOCK_RAW) {
struct udphdr *udph;
@@ -324,11 +382,13 @@ static void __attribute__((noreturn)) usage(const char *filepath)
" -4: only IPv4\n"
" -6: only IPv6\n"
" -h: show this message\n"
+ " -I: request PKTINFO\n"
" -l N: send N bytes at a time\n"
" -r: use raw\n"
" -R: use raw (IP_HDRINCL)\n"
" -p N: connect to port N\n"
- " -u: use udp\n",
+ " -u: use udp\n"
+ " -x: show payload (up to 70 bytes)\n",
filepath);
exit(1);
}
@@ -338,7 +398,7 @@ static void parse_opt(int argc, char **argv)
int proto_count = 0;
char c;
- while ((c = getopt(argc, argv, "46hl:p:rRu")) != -1) {
+ while ((c = getopt(argc, argv, "46hIl:p:rRux")) != -1) {
switch (c) {
case '4':
do_ipv6 = 0;
@@ -346,6 +406,9 @@ static void parse_opt(int argc, char **argv)
case '6':
do_ipv4 = 0;
break;
+ case 'I':
+ cfg_do_pktinfo = true;
+ break;
case 'r':
proto_count++;
cfg_proto = SOCK_RAW;
@@ -367,6 +430,9 @@ static void parse_opt(int argc, char **argv)
case 'p':
dest_port = strtoul(optarg, NULL, 10);
break;
+ case 'x':
+ cfg_show_payload = true;
+ break;
case 'h':
default:
usage(argv[0]);
diff --git a/Documentation/phy.txt b/Documentation/phy.txt
index c6594af94d25..371361c69a4b 100644
--- a/Documentation/phy.txt
+++ b/Documentation/phy.txt
@@ -54,18 +54,14 @@ The PHY driver should create the PHY in order for other peripheral controllers
to make use of it. The PHY framework provides 2 APIs to create the PHY.
struct phy *phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data);
+ const struct phy_ops *ops);
struct phy *devm_phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data);
+ const struct phy_ops *ops);
The PHY drivers can use one of the above 2 APIs to create the PHY by passing
-the device pointer, phy ops and init_data.
+the device pointer and phy ops.
phy_ops is a set of function pointers for performing PHY operations such as
-init, exit, power_on and power_off. *init_data* is mandatory to get a reference
-to the PHY in the case of non-dt boot. See section *Board File Initialization*
-on how init_data should be used.
+init, exit, power_on and power_off.
Inorder to dereference the private data (in phy_ops), the phy provider driver
can use phy_set_drvdata() after creating the PHY and use phy_get_drvdata() in
@@ -137,42 +133,18 @@ There are exported APIs like phy_pm_runtime_get, phy_pm_runtime_get_sync,
phy_pm_runtime_put, phy_pm_runtime_put_sync, phy_pm_runtime_allow and
phy_pm_runtime_forbid for performing PM operations.
-8. Board File Initialization
-
-Certain board file initialization is necessary in order to get a reference
-to the PHY in the case of non-dt boot.
-Say we have a single device that implements 3 PHYs that of USB, SATA and PCIe,
-then in the board file the following initialization should be done.
-
-struct phy_consumer consumers[] = {
- PHY_CONSUMER("dwc3.0", "usb"),
- PHY_CONSUMER("pcie.0", "pcie"),
- PHY_CONSUMER("sata.0", "sata"),
-};
-PHY_CONSUMER takes 2 parameters, first is the device name of the controller
-(PHY consumer) and second is the port name.
-
-struct phy_init_data init_data = {
- .consumers = consumers,
- .num_consumers = ARRAY_SIZE(consumers),
-};
-
-static const struct platform_device pipe3_phy_dev = {
- .name = "pipe3-phy",
- .id = -1,
- .dev = {
- .platform_data = {
- .init_data = &init_data,
- },
- },
-};
-
-then, while doing phy_create, the PHY driver should pass this init_data
- phy_create(dev, ops, pdata->init_data);
-
-and the controller driver (phy consumer) should pass the port name along with
-the device to get a reference to the PHY
- phy_get(dev, "pcie");
+8. PHY Mappings
+
+In order to get reference to a PHY without help from DeviceTree, the framework
+offers lookups which can be compared to clkdev that allow clk structures to be
+bound to devices. A lookup can be made be made during runtime when a handle to
+the struct phy already exists.
+
+The framework offers the following API for registering and unregistering the
+lookups.
+
+int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id);
+void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id);
9. DeviceTree Binding
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index f32ce5419573..44fe1d28a163 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -229,13 +229,13 @@ defined in include/linux/pm.h:
- if set, the value of child_count is ignored (but still updated)
unsigned int disable_depth;
- - used for disabling the helper funcions (they work normally if this is
+ - used for disabling the helper functions (they work normally if this is
equal to zero); the initial value of it is 1 (i.e. runtime PM is
initially disabled for all devices)
int runtime_error;
- if set, there was a fatal error (one of the callbacks returned error code
- as described in Section 2), so the helper funtions will not work until
+ as described in Section 2), so the helper functions will not work until
this flag is cleared; this is the error code returned by the failing
callback
@@ -468,6 +468,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
- set the power.irq_safe flag for the device, causing the runtime-PM
callbacks to be invoked with interrupts off
+ bool pm_runtime_is_irq_safe(struct device *dev);
+ - return true if power.irq_safe flag was set for the device, causing
+ the runtime-PM callbacks to be invoked with interrupts off
+
void pm_runtime_mark_last_busy(struct device *dev);
- set the power.last_busy field to the current time
@@ -524,7 +528,7 @@ pm_runtime_put_sync_autosuspend()
5. Runtime PM Initialization, Device Probing and Removal
Initially, the runtime PM is disabled for all devices, which means that the
-majority of the runtime PM helper funtions described in Section 4 will return
+majority of the runtime PM helper functions described in Section 4 will return
-EAGAIN until pm_runtime_enable() is called for the device.
In addition to that, the initial runtime PM status of all devices is
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt
index 69663640dea5..2f9c5a5fcb25 100644
--- a/Documentation/power/suspend-and-interrupts.txt
+++ b/Documentation/power/suspend-and-interrupts.txt
@@ -77,7 +77,7 @@ Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ
in a special way. Namely, the IRQ remains enabled, by on the first interrupt
it will be disabled, marked as pending and "suspended" so that it will be
re-enabled by resume_device_irqs() during the subsequent system resume. Also
-the PM core is notified about the event which casues the system suspend in
+the PM core is notified about the event which causes the system suspend in
progress to be aborted (that doesn't have to happen immediately, but at one
of the points where the suspend thread looks for pending wakeup events).
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
index 0e870825c1b9..bbfcd1bbedc5 100644
--- a/Documentation/power/userland-swsusp.txt
+++ b/Documentation/power/userland-swsusp.txt
@@ -99,7 +99,7 @@ SNAPSHOT_S2RAM - suspend to RAM; using this call causes the kernel to
The device's read() operation can be used to transfer the snapshot image from
the kernel. It has the following limitations:
- you cannot read() more than one virtual memory page at a time
-- read()s across page boundaries are impossible (ie. if ypu read() 1/2 of
+- read()s across page boundaries are impossible (ie. if you read() 1/2 of
a page in the previous call, you will only be able to read()
_at_ _most_ 1/2 of the page in the next call)
diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
index 69b3cac4749d..5d8675615e59 100644
--- a/Documentation/ramoops.txt
+++ b/Documentation/ramoops.txt
@@ -14,11 +14,19 @@ survive after a restart.
1. Ramoops concepts
-Ramoops uses a predefined memory area to store the dump. The start and size of
-the memory area are set using two variables:
+Ramoops uses a predefined memory area to store the dump. The start and size
+and type of the memory area are set using three variables:
* "mem_address" for the start
* "mem_size" for the size. The memory size will be rounded down to a
power of two.
+ * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
+
+Typically the default value of mem_type=0 should be used as that sets the pstore
+mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
+pgprot_noncached, which only works on some platforms. This is because pstore
+depends on atomic operations. At least on ARM, pgprot_noncached causes the
+memory to be mapped strongly ordered, and atomic operations on strongly ordered
+memory are implementation defined, and won't work on many ARMs such as omaps.
The memory area is divided into "record_size" chunks (also rounded down to
power of two) and each oops/panic writes a "record_size" chunk of
@@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different manners:
static struct ramoops_platform_data ramoops_data = {
.mem_size = <...>,
.mem_address = <...>,
+ .mem_type = <...>,
.record_size = <...>,
.dump_oops = <...>,
.ecc = <...>,
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index 462321c1aeea..08911b5c6b0e 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -26,11 +26,6 @@ The Linux for s/390 & z/Architecture Kernel Task Structure
Register Usage & Stackframes on Linux for s/390 & z/Architecture
A sample program with comments
Compiling programs for debugging on Linux for s/390 & z/Architecture
-Figuring out gcc compile errors
-Debugging Tools
-objdump
-strace
-Performance Debugging
Debugging under VM
s/390 & z/Architecture IO Overview
Debugging IO on s/390 & z/Architecture under VM
@@ -114,28 +109,25 @@ s/390 z/Architecture
16-17 16-17 Address Space Control
- 00 Primary Space Mode when DAT on
- The linux kernel currently runs in this mode, CR1 is affiliated with
- this mode & points to the primary segment table origin etc.
-
- 01 Access register mode this mode is used in functions to
- copy data between kernel & user space.
-
- 10 Secondary space mode not used in linux however CR7 the
- register affiliated with this mode is & this & normally
- CR13=CR7 to allow us to copy data between kernel & user space.
- We do this as follows:
- We set ar2 to 0 to designate its
- affiliated gpr ( gpr2 )to point to primary=kernel space.
- We set ar4 to 1 to designate its
- affiliated gpr ( gpr4 ) to point to secondary=home=user space
- & then essentially do a memcopy(gpr2,gpr4,size) to
- copy data between the address spaces, the reason we use home space for the
- kernel & don't keep secondary space free is that code will not run in
- secondary space.
-
- 11 Home Space Mode all user programs run in this mode.
- it is affiliated with CR13.
+ 00 Primary Space Mode:
+ The register CR1 contains the primary address-space control ele-
+ ment (PASCE), which points to the primary space region/segment
+ table origin.
+
+ 01 Access register mode
+
+ 10 Secondary Space Mode:
+ The register CR7 contains the secondary address-space control
+ element (SASCE), which points to the secondary space region or
+ segment table origin.
+
+ 11 Home Space Mode:
+ The register CR13 contains the home space address-space control
+ element (HASCE), which points to the home space region/segment
+ table origin.
+
+ See "Address Spaces on Linux for s/390 & z/Architecture" below
+ for more information about address space usage in Linux.
18-19 18-19 Condition codes (CC)
@@ -249,9 +241,9 @@ currently 4TB of physical memory currently on z/Architecture.
Address Spaces on Linux for s/390 & z/Architecture
==================================================
-Our addressing scheme is as follows
-
+Our addressing scheme is basically as follows:
+ Primary Space Home Space
Himem 0x7fffffff 2GB on s/390 ***************** ****************
currently 0x3ffffffffff (2^42)-1 * User Stack * * *
on z/Architecture. ***************** * *
@@ -264,9 +256,46 @@ on z/Architecture. ***************** * *
* Sections * * *
0x00000000 ***************** ****************
-This also means that we need to look at the PSW problem state bit
-or the addressing mode to decide whether we are looking at
-user or kernel space.
+This also means that we need to look at the PSW problem state bit and the
+addressing mode to decide whether we are looking at user or kernel space.
+
+User space runs in primary address mode (or access register mode within
+the vdso code).
+
+The kernel usually also runs in home space mode, however when accessing
+user space the kernel switches to primary or secondary address mode if
+the mvcos instruction is not available or if a compare-and-swap (futex)
+instruction on a user space address is performed.
+
+When also looking at the ASCE control registers, this means:
+
+User space:
+- runs in primary or access register mode
+- cr1 contains the user asce
+- cr7 contains the user asce
+- cr13 contains the kernel asce
+
+Kernel space:
+- runs in home space mode
+- cr1 contains the user or kernel asce
+ -> the kernel asce is loaded when a uaccess requires primary or
+ secondary address mode
+- cr7 contains the user or kernel asce, (changed with set_fs())
+- cr13 contains the kernel asce
+
+In case of uaccess the kernel changes to:
+- primary space mode in case of a uaccess (copy_to_user) and uses
+ e.g. the mvcp instruction to access user space. However the kernel
+ will stay in home space mode if the mvcos instruction is available
+- secondary space mode in case of futex atomic operations, so that the
+ instructions come from primary address space and data from secondary
+ space
+
+In case of KVM, the kernel runs in home space mode, but cr1 gets switched
+to contain the gmap asce before the SIE instruction gets executed. When
+the SIE instruction is finished, cr1 will be switched back to contain the
+user asce.
+
Virtual Addresses on s/390 & z/Architecture
===========================================
@@ -706,376 +735,7 @@ Debugging with optimisation has since much improved after fixing
some bugs, please make sure you are using gdb-5.0 or later developed
after Nov'2000.
-Figuring out gcc compile errors
-===============================
-If you are getting a lot of syntax errors compiling a program & the problem
-isn't blatantly obvious from the source.
-It often helps to just preprocess the file, this is done with the -E
-option in gcc.
-What this does is that it runs through the very first phase of compilation
-( compilation in gcc is done in several stages & gcc calls many programs to
-achieve its end result ) with the -E option gcc just calls the gcc preprocessor (cpp).
-The c preprocessor does the following, it joins all the files #included together
-recursively ( #include files can #include other files ) & also the c file you wish to compile.
-It puts a fully qualified path of the #included files in a comment & it
-does macro expansion.
-This is useful for debugging because
-1) You can double check whether the files you expect to be included are the ones
-that are being included ( e.g. double check that you aren't going to the i386 asm directory ).
-2) Check that macro definitions aren't clashing with typedefs,
-3) Check that definitions aren't being used before they are being included.
-4) Helps put the line emitting the error under the microscope if it contains macros.
-
-For convenience the Linux kernel's makefile will do preprocessing automatically for you
-by suffixing the file you want built with .i ( instead of .o )
-
-e.g.
-from the linux directory type
-make arch/s390/kernel/signal.i
-this will build
-
-s390-gcc -D__KERNEL__ -I/home1/barrow/linux/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
--fno-strict-aliasing -D__SMP__ -pipe -fno-strength-reduce -E arch/s390/kernel/signal.c
-> arch/s390/kernel/signal.i
-
-Now look at signal.i you should see something like.
-
-
-# 1 "/home1/barrow/linux/include/asm/types.h" 1
-typedef unsigned short umode_t;
-typedef __signed__ char __s8;
-typedef unsigned char __u8;
-typedef __signed__ short __s16;
-typedef unsigned short __u16;
-
-If instead you are getting errors further down e.g.
-unknown instruction:2515 "move.l" or better still unknown instruction:2515
-"Fixme not implemented yet, call Martin" you are probably are attempting to compile some code
-meant for another architecture or code that is simply not implemented, with a fixme statement
-stuck into the inline assembly code so that the author of the file now knows he has work to do.
-To look at the assembly emitted by gcc just before it is about to call gas ( the gnu assembler )
-use the -S option.
-Again for your convenience the Linux kernel's Makefile will hold your hand &
-do all this donkey work for you also by building the file with the .s suffix.
-e.g.
-from the Linux directory type
-make arch/s390/kernel/signal.s
-
-s390-gcc -D__KERNEL__ -I/home1/barrow/linux/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
--fno-strict-aliasing -D__SMP__ -pipe -fno-strength-reduce -S arch/s390/kernel/signal.c
--o arch/s390/kernel/signal.s
-
-
-This will output something like, ( please note the constant pool & the useful comments
-in the prologue to give you a hand at interpreting it ).
-
-.LC54:
- .string "misaligned (__u16 *) in __xchg\n"
-.LC57:
- .string "misaligned (__u32 *) in __xchg\n"
-.L$PG1: # Pool sys_sigsuspend
-.LC192:
- .long -262401
-.LC193:
- .long -1
-.LC194:
- .long schedule-.L$PG1
-.LC195:
- .long do_signal-.L$PG1
- .align 4
-.globl sys_sigsuspend
- .type sys_sigsuspend,@function
-sys_sigsuspend:
-# leaf function 0
-# automatics 16
-# outgoing args 0
-# need frame pointer 0
-# call alloca 0
-# has varargs 0
-# incoming args (stack) 0
-# function length 168
- STM 8,15,32(15)
- LR 0,15
- AHI 15,-112
- BASR 13,0
-.L$CO1: AHI 13,.L$PG1-.L$CO1
- ST 0,0(15)
- LR 8,2
- N 5,.LC192-.L$PG1(13)
-
-Adding -g to the above output makes the output even more useful
-e.g. typing
-make CC:="s390-gcc -g" kernel/sched.s
-
-which compiles.
-s390-gcc -g -D__KERNEL__ -I/home/barrow/linux-2.3/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strict-aliasing -pipe -fno-strength-reduce -S kernel/sched.c -o kernel/sched.s
-
-also outputs stabs ( debugger ) info, from this info you can find out the
-offsets & sizes of various elements in structures.
-e.g. the stab for the structure
-struct rlimit {
- unsigned long rlim_cur;
- unsigned long rlim_max;
-};
-is
-.stabs "rlimit:T(151,2)=s8rlim_cur:(0,5),0,32;rlim_max:(0,5),32,32;;",128,0,0,0
-from this stab you can see that
-rlimit_cur starts at bit offset 0 & is 32 bits in size
-rlimit_max starts at bit offset 32 & is 32 bits in size.
-
-
-Debugging Tools:
-================
-
-objdump
-=======
-This is a tool with many options the most useful being ( if compiled with -g).
-objdump --source <victim program or object file> > <victims debug listing >
-
-
-The whole kernel can be compiled like this ( Doing this will make a 17MB kernel
-& a 200 MB listing ) however you have to strip it before building the image
-using the strip command to make it a more reasonable size to boot it.
-
-A source/assembly mixed dump of the kernel can be done with the line
-objdump --source vmlinux > vmlinux.lst
-Also, if the file isn't compiled -g, this will output as much debugging information
-as it can (e.g. function names). This is very slow as it spends lots
-of time searching for debugging info. The following self explanatory line should be used
-instead if the code isn't compiled -g, as it is much faster:
-objdump --disassemble-all --syms vmlinux > vmlinux.lst
-
-As hard drive space is valuable most of us use the following approach.
-1) Look at the emitted psw on the console to find the crash address in the kernel.
-2) Look at the file System.map ( in the linux directory ) produced when building
-the kernel to find the closest address less than the current PSW to find the
-offending function.
-3) use grep or similar to search the source tree looking for the source file
- with this function if you don't know where it is.
-4) rebuild this object file with -g on, as an example suppose the file was
-( /arch/s390/kernel/signal.o )
-5) Assuming the file with the erroneous function is signal.c Move to the base of the
-Linux source tree.
-6) rm /arch/s390/kernel/signal.o
-7) make /arch/s390/kernel/signal.o
-8) watch the gcc command line emitted
-9) type it in again or alternatively cut & paste it on the console adding the -g option.
-10) objdump --source arch/s390/kernel/signal.o > signal.lst
-This will output the source & the assembly intermixed, as the snippet below shows
-This will unfortunately output addresses which aren't the same
-as the kernel ones you should be able to get around the mental arithmetic
-by playing with the --adjust-vma parameter to objdump.
-
-
-
-
-static inline void spin_lock(spinlock_t *lp)
-{
- a0: 18 34 lr %r3,%r4
- a2: a7 3a 03 bc ahi %r3,956
- __asm__ __volatile(" lhi 1,-1\n"
- a6: a7 18 ff ff lhi %r1,-1
- aa: 1f 00 slr %r0,%r0
- ac: ba 01 30 00 cs %r0,%r1,0(%r3)
- b0: a7 44 ff fd jm aa <sys_sigsuspend+0x2e>
- saveset = current->blocked;
- b4: d2 07 f0 68 mvc 104(8,%r15),972(%r4)
- b8: 43 cc
- return (set->sig[0] & mask) != 0;
-}
-
-6) If debugging under VM go down to that section in the document for more info.
-
-
-I now have a tool which takes the pain out of --adjust-vma
-& you are able to do something like
-make /arch/s390/kernel/traps.lst
-& it automatically generates the correctly relocated entries for
-the text segment in traps.lst.
-This tool is now standard in linux distro's in scripts/makelst
-
-strace:
--------
-Q. What is it ?
-A. It is a tool for intercepting calls to the kernel & logging them
-to a file & on the screen.
-
-Q. What use is it ?
-A. You can use it to find out what files a particular program opens.
-
-
-Example 1
----------
-If you wanted to know does ping work but didn't have the source
-strace ping -c 1 127.0.0.1
-& then look at the man pages for each of the syscalls below,
-( In fact this is sometimes easier than looking at some spaghetti
-source which conditionally compiles for several architectures ).
-Not everything that it throws out needs to make sense immediately.
-
-Just looking quickly you can see that it is making up a RAW socket
-for the ICMP protocol.
-Doing an alarm(10) for a 10 second timeout
-& doing a gettimeofday call before & after each read to see
-how long the replies took, & writing some text to stdout so the user
-has an idea what is going on.
-
-socket(PF_INET, SOCK_RAW, IPPROTO_ICMP) = 3
-getuid() = 0
-setuid(0) = 0
-stat("/usr/share/locale/C/libc.cat", 0xbffff134) = -1 ENOENT (No such file or directory)
-stat("/usr/share/locale/libc/C", 0xbffff134) = -1 ENOENT (No such file or directory)
-stat("/usr/local/share/locale/C/libc.cat", 0xbffff134) = -1 ENOENT (No such file or directory)
-getpid() = 353
-setsockopt(3, SOL_SOCKET, SO_BROADCAST, [1], 4) = 0
-setsockopt(3, SOL_SOCKET, SO_RCVBUF, [49152], 4) = 0
-fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(3, 1), ...}) = 0
-mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x40008000
-ioctl(1, TCGETS, {B9600 opost isig icanon echo ...}) = 0
-write(1, "PING 127.0.0.1 (127.0.0.1): 56 d"..., 42PING 127.0.0.1 (127.0.0.1): 56 data bytes
-) = 42
-sigaction(SIGINT, {0x8049ba0, [], SA_RESTART}, {SIG_DFL}) = 0
-sigaction(SIGALRM, {0x8049600, [], SA_RESTART}, {SIG_DFL}) = 0
-gettimeofday({948904719, 138951}, NULL) = 0
-sendto(3, "\10\0D\201a\1\0\0\17#\2178\307\36"..., 64, 0, {sin_family=AF_INET,
-sin_port=htons(0), sin_addr=inet_addr("127.0.0.1")}, 16) = 64
-sigaction(SIGALRM, {0x8049600, [], SA_RESTART}, {0x8049600, [], SA_RESTART}) = 0
-sigaction(SIGALRM, {0x8049ba0, [], SA_RESTART}, {0x8049600, [], SA_RESTART}) = 0
-alarm(10) = 0
-recvfrom(3, "E\0\0T\0005\0\0@\1|r\177\0\0\1\177"..., 192, 0,
-{sin_family=AF_INET, sin_port=htons(50882), sin_addr=inet_addr("127.0.0.1")}, [16]) = 84
-gettimeofday({948904719, 160224}, NULL) = 0
-recvfrom(3, "E\0\0T\0006\0\0\377\1\275p\177\0"..., 192, 0,
-{sin_family=AF_INET, sin_port=htons(50882), sin_addr=inet_addr("127.0.0.1")}, [16]) = 84
-gettimeofday({948904719, 166952}, NULL) = 0
-write(1, "64 bytes from 127.0.0.1: icmp_se"...,
-5764 bytes from 127.0.0.1: icmp_seq=0 ttl=255 time=28.0 ms
-
-Example 2
----------
-strace passwd 2>&1 | grep open
-produces the following output
-open("/etc/ld.so.cache", O_RDONLY) = 3
-open("/opt/kde/lib/libc.so.5", O_RDONLY) = -1 ENOENT (No such file or directory)
-open("/lib/libc.so.5", O_RDONLY) = 3
-open("/dev", O_RDONLY) = 3
-open("/var/run/utmp", O_RDONLY) = 3
-open("/etc/passwd", O_RDONLY) = 3
-open("/etc/shadow", O_RDONLY) = 3
-open("/etc/login.defs", O_RDONLY) = 4
-open("/dev/tty", O_RDONLY) = 4
-
-The 2>&1 is done to redirect stderr to stdout & grep is then filtering this input
-through the pipe for each line containing the string open.
-
-
-Example 3
----------
-Getting sophisticated
-telnetd crashes & I don't know why
-
-Steps
------
-1) Replace the following line in /etc/inetd.conf
-telnet stream tcp nowait root /usr/sbin/in.telnetd -h
-with
-telnet stream tcp nowait root /blah
-
-2) Create the file /blah with the following contents to start tracing telnetd
-#!/bin/bash
-/usr/bin/strace -o/t1 -f /usr/sbin/in.telnetd -h
-3) chmod 700 /blah to make it executable only to root
-4)
-killall -HUP inetd
-or ps aux | grep inetd
-get inetd's process id
-& kill -HUP inetd to restart it.
-
-Important options
------------------
--o is used to tell strace to output to a file in our case t1 in the root directory
--f is to follow children i.e.
-e.g in our case above telnetd will start the login process & subsequently a shell like bash.
-You will be able to tell which is which from the process ID's listed on the left hand side
-of the strace output.
--p<pid> will tell strace to attach to a running process, yup this can be done provided
- it isn't being traced or debugged already & you have enough privileges,
-the reason 2 processes cannot trace or debug the same program is that strace
-becomes the parent process of the one being debugged & processes ( unlike people )
-can have only one parent.
-
-
-However the file /t1 will get big quite quickly
-to test it telnet 127.0.0.1
-
-now look at what files in.telnetd execve'd
-413 execve("/usr/sbin/in.telnetd", ["/usr/sbin/in.telnetd", "-h"], [/* 17 vars */]) = 0
-414 execve("/bin/login", ["/bin/login", "-h", "localhost", "-p"], [/* 2 vars */]) = 0
-
-Whey it worked!.
-
-
-Other hints:
-------------
-If the program is not very interactive ( i.e. not much keyboard input )
-& is crashing in one architecture but not in another you can do
-an strace of both programs under as identical a scenario as you can
-on both architectures outputting to a file then.
-do a diff of the two traces using the diff program
-i.e.
-diff output1 output2
-& maybe you'll be able to see where the call paths differed, this
-is possibly near the cause of the crash.
-
-More info
----------
-Look at man pages for strace & the various syscalls
-e.g. man strace, man alarm, man socket.
-
-
-Performance Debugging
-=====================
-gcc is capable of compiling in profiling code just add the -p option
-to the CFLAGS, this obviously affects program size & performance.
-This can be used by the gprof gnu profiling tool or the
-gcov the gnu code coverage tool ( code coverage is a means of testing
-code quality by checking if all the code in an executable in exercised by
-a tester ).
-
-
-Using top to find out where processes are sleeping in the kernel
-----------------------------------------------------------------
-To do this copy the System.map from the root directory where
-the linux kernel was built to the /boot directory on your
-linux machine.
-Start top
-Now type fU<return>
-You should see a new field called WCHAN which
-tells you where each process is sleeping here is a typical output.
-
- 6:59pm up 41 min, 1 user, load average: 0.00, 0.00, 0.00
-28 processes: 27 sleeping, 1 running, 0 zombie, 0 stopped
-CPU states: 0.0% user, 0.1% system, 0.0% nice, 99.8% idle
-Mem: 254900K av, 45976K used, 208924K free, 0K shrd, 28636K buff
-Swap: 0K av, 0K used, 0K free 8620K cached
-
- PID USER PRI NI SIZE RSS SHARE WCHAN STAT LIB %CPU %MEM TIME COMMAND
- 750 root 12 0 848 848 700 do_select S 0 0.1 0.3 0:00 in.telnetd
- 767 root 16 0 1140 1140 964 R 0 0.1 0.4 0:00 top
- 1 root 8 0 212 212 180 do_select S 0 0.0 0.0 0:00 init
- 2 root 9 0 0 0 0 down_inte SW 0 0.0 0.0 0:00 kmcheck
-
-The time command
-----------------
-Another related command is the time command which gives you an indication
-of where a process is spending the majority of its time.
-e.g.
-time ping -c 5 nc
-outputs
-real 0m4.054s
-user 0m0.010s
-sys 0m0.010s
Debugging under VM
==================
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
index a0c85110a07e..8638f61c8c9d 100644
--- a/Documentation/scsi/scsi_eh.txt
+++ b/Documentation/scsi/scsi_eh.txt
@@ -172,7 +172,7 @@ ways.
- eh_strategy_handler() callback
This is one big callback which should perform whole error
- handling. As such, it should do all choirs SCSI midlayer
+ handling. As such, it should do all chores the SCSI midlayer
performs during recovery. This will be discussed in [2-2].
Once recovery is complete, SCSI EH resumes normal operation by
@@ -428,7 +428,7 @@ scmd->allowed.
scsi_unjam_host() and it is responsible for whole recovery process.
On completion, the handler should have made lower layers forget about
all failed scmds and either ready for new commands or offline. Also,
-it should perform SCSI EH maintenance choirs to maintain integrity of
+it should perform SCSI EH maintenance chores to maintain integrity of
SCSI midlayer. IOW, of the steps described in [2-1-2], all steps
except for #1 must be implemented by eh_strategy_handler().
diff --git a/Documentation/security/IMA-templates.txt b/Documentation/security/IMA-templates.txt
index a4e102dddfea..839b5dad9226 100644
--- a/Documentation/security/IMA-templates.txt
+++ b/Documentation/security/IMA-templates.txt
@@ -27,25 +27,22 @@ Managing templates with these structures is very simple. To support
a new data type, developers define the field identifier and implement
two functions, init() and show(), respectively to generate and display
measurement entries. Defining a new template descriptor requires
-specifying the template format, a string of field identifiers separated
-by the '|' character. While in the current implementation it is possible
-to define new template descriptors only by adding their definition in the
-template specific code (ima_template.c), in a future version it will be
-possible to register a new template on a running kernel by supplying to IMA
-the desired format string. In this version, IMA initializes at boot time
-all defined template descriptors by translating the format into an array
-of template fields structures taken from the set of the supported ones.
+specifying the template format (a string of field identifiers separated
+by the '|' character) through the 'ima_template_fmt' kernel command line
+parameter. At boot time, IMA initializes the chosen template descriptor
+by translating the format into an array of template fields structures taken
+from the set of the supported ones.
After the initialization step, IMA will call ima_alloc_init_template()
(new function defined within the patches for the new template management
mechanism) to generate a new measurement entry by using the template
descriptor chosen through the kernel configuration or through the newly
-introduced 'ima_template=' kernel command line parameter. It is during this
-phase that the advantages of the new architecture are clearly shown:
-the latter function will not contain specific code to handle a given template
-but, instead, it simply calls the init() method of the template fields
-associated to the chosen template descriptor and store the result (pointer
-to allocated data and data length) in the measurement entry structure.
+introduced 'ima_template' and 'ima_template_fmt' kernel command line parameters.
+It is during this phase that the advantages of the new architecture are
+clearly shown: the latter function will not contain specific code to handle
+a given template but, instead, it simply calls the init() method of the template
+fields associated to the chosen template descriptor and store the result
+(pointer to allocated data and data length) in the measurement entry structure.
The same mechanism is employed to display measurements entries.
The functions ima[_ascii]_measurements_show() retrieve, for each entry,
@@ -86,4 +83,6 @@ currently the following methods are supported:
- select a template descriptor among those supported in the kernel
configuration ('ima-ng' is the default choice);
- specify a template descriptor name from the kernel command line through
- the 'ima_template=' parameter.
+ the 'ima_template=' parameter;
+ - register a new template descriptor with custom format through the kernel
+ command line parameter 'ima_template_fmt='.
diff --git a/Documentation/serial/driver b/Documentation/serial/driver
index ba64e4b892e9..c415b0ef4493 100644
--- a/Documentation/serial/driver
+++ b/Documentation/serial/driver
@@ -59,7 +59,9 @@ The core driver uses the info->tmpbuf_sem lock to prevent multi-threaded
access to the info->tmpbuf bouncebuffer used for port writes.
The port_sem semaphore is used to protect against ports being added/
-removed or reconfigured at inappropriate times.
+removed or reconfigured at inappropriate times. Since v2.6.27, this
+semaphore has been the 'mutex' member of the tty_port struct, and
+commonly referred to as the port mutex (or port->mutex).
uart_ops
@@ -248,7 +250,7 @@ hardware.
Other flags may be used (eg, xon/xoff characters) if your
hardware supports hardware "soft" flow control.
- Locking: none.
+ Locking: caller holds port->mutex
Interrupts: caller dependent.
This call must not sleep
diff --git a/Documentation/sound/alsa/ControlNames.txt b/Documentation/sound/alsa/ControlNames.txt
index fea65bb6269e..79a6127863ca 100644
--- a/Documentation/sound/alsa/ControlNames.txt
+++ b/Documentation/sound/alsa/ControlNames.txt
@@ -1,6 +1,6 @@
This document describes standard names of mixer controls.
-Syntax: SOURCE [DIRECTION] FUNCTION
+Syntax: [LOCATION] SOURCE [CHANNEL] [DIRECTION] FUNCTION
DIRECTION:
<nothing> (both directions)
@@ -14,12 +14,29 @@ FUNCTION:
Volume
Route (route control, hardware specific)
+CHANNEL:
+ <nothing> (channel independent, or applies to all channels)
+ Front
+ Surround (rear left/right in 4.0/5.1 surround)
+ CLFE
+ Center
+ LFE
+ Side (side left/right for 7.1 surround)
+
+LOCATION: (physical location of source)
+ Front
+ Rear
+ Dock (docking station)
+ Internal
+
SOURCE:
Master
Master Mono
Hardware Master
Speaker (internal speaker)
+ Bass Speaker (internal LFE speaker)
Headphone
+ Line Out
Beep (beep generator)
Phone
Phone Input
@@ -27,14 +44,14 @@ SOURCE:
Synth
FM
Mic
- Line
+ Headset Mic (mic part of combined headset jack - 4-pin headphone + mic)
+ Headphone Mic (mic part of either/or - 3-pin headphone or mic)
+ Line (input only, use "Line Out" for output)
CD
Video
Zoom Video
Aux
PCM
- PCM Front
- PCM Rear
PCM Pan
Loopback
Analog Loopback (D/A -> A/D loopback)
@@ -47,8 +64,13 @@ SOURCE:
Music
I2S
IEC958
+ HDMI
+ SPDIF (output only)
+ SPDIF In
+ Digital In
+ HDMI/DP (either HDMI or DisplayPort)
-Exceptions:
+Exceptions (deprecated):
[Digital] Capture Source
[Digital] Capture Switch (aka input gain switch)
[Digital] Capture Volume (aka input gain volume)
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index a5e754714344..5a3163cac6c3 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -113,14 +113,10 @@ AD1984
AD1986A
=======
- 6stack 6-jack, separate surrounds (default)
3stack 3-stack, shared surrounds
laptop 2-channel only (FSC V2060, Samsung M50)
- laptop-eapd 2-channel with EAPD (ASUS A6J)
- laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100)
- ultra 2-channel with EAPD (Samsung Ultra tablet PC)
- samsung 2-channel with EAPD (Samsung R65)
- samsung-p50 2-channel with HP-automute (Samsung P50)
+ laptop-imic 2-channel with built-in mic
+ eapd Turn on EAPD constantly
AD1988/AD1988B/AD1989A/AD1989B
==============================
diff --git a/Documentation/sound/alsa/Procfile.txt b/Documentation/sound/alsa/Procfile.txt
index 7fcd1ad96fcc..7f8a0d325905 100644
--- a/Documentation/sound/alsa/Procfile.txt
+++ b/Documentation/sound/alsa/Procfile.txt
@@ -101,10 +101,6 @@ card*/pcm*/xrun_debug
bit 0 = Enable XRUN/jiffies debug messages
bit 1 = Show stack trace at XRUN / jiffies check
bit 2 = Enable additional jiffies check
- bit 3 = Log hwptr update at each period interrupt
- bit 4 = Log hwptr update at each snd_pcm_update_hw_ptr()
- bit 5 = Show last 10 positions on error
- bit 6 = Do above only once
When the bit 0 is set, the driver will show the messages to
kernel log when an xrun is detected. The debug message is
@@ -121,15 +117,6 @@ card*/pcm*/xrun_debug
buggy) hardware that doesn't give smooth pointer updates.
This feature is enabled via the bit 2.
- Bits 3 and 4 are for logging the hwptr records. Note that
- these will give flood of kernel messages.
-
- When bit 5 is set, the driver logs the last 10 xrun errors and
- the proc file shows each jiffies, position, period_size,
- buffer_size, old_hw_ptr, and hw_ptr_base values.
-
- When bit 6 is set, the full xrun log is shown only once.
-
card*/pcm*/sub*/info
The general information of this PCM sub-stream.
@@ -146,6 +133,10 @@ card*/pcm*/sub*/sw_params
card*/pcm*/sub*/prealloc
The buffer pre-allocation information.
+card*/pcm*/sub*/xrun_injection
+ Triggers an XRUN to the running stream when any value is
+ written to this proc file. Used for fault injection.
+ This entry is write-only.
AC97 Codec Information
----------------------
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index b5d0c8501a18..75511efefc64 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -116,10 +116,12 @@ set during run time.
auto_msgmni:
-Enables/Disables automatic recomputing of msgmni upon memory add/remove
-or upon ipc namespace creation/removal (see the msgmni description
-above). Echoing "1" into this file enables msgmni automatic recomputing.
-Echoing "0" turns it off. auto_msgmni default value is 1.
+This variable has no effect and may be removed in future kernel
+releases. Reading it always returns 0.
+Up to Linux 3.17, it enabled/disabled automatic recomputing of msgmni
+upon memory add/remove or upon ipc namespace creation/removal.
+Echoing "1" into this file enabled msgmni automatic recomputing.
+Echoing "0" turned it off. auto_msgmni default value was 1.
==============================================================
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 04892b821157..666594b43cff 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -120,10 +120,14 @@ seconds.
warnings
--------
-This controls console messages from the networking stack that can occur because
-of problems on the network like duplicate address or bad checksums. Normally,
-this should be enabled, but if the problem persists the messages can be
-disabled.
+This sysctl is now unused.
+
+This was used to control console messages from the networking stack that
+occur because of problems on the network like duplicate address or bad
+checksums.
+
+These messages are now emitted at KERN_DEBUG and can generally be enabled
+and controlled by the dynamic_debug facility.
netdev_budget
-------------
@@ -138,6 +142,28 @@ netdev_max_backlog
Maximum number of packets, queued on the INPUT side, when the interface
receives packets faster than kernel can process them.
+netdev_rss_key
+--------------
+
+RSS (Receive Side Scaling) enabled drivers use a 40 bytes host key that is
+randomly generated.
+Some user space might need to gather its content even if drivers do not
+provide ethtool -x support yet.
+
+myhost:~# cat /proc/sys/net/core/netdev_rss_key
+84:50:f4:00:a8:15:d1:a7:e9:7f:1d:60:35:c7:47:25:42:97:74:ca:56:bb:b6:a1:d8: ... (52 bytes total)
+
+File contains nul bytes if no driver ever called netdev_rss_key_fill() function.
+Note:
+/proc/sys/net/core/netdev_rss_key contains 52 bytes of key,
+but most drivers only use 40 bytes of it.
+
+myhost:~# ethtool -x eth0
+RX flow hash indirection table for eth0 with 8 RX ring(s):
+ 0: 0 1 2 3 4 5 6 7
+RSS hash key:
+84:50:f4:00:a8:15:d1:a7:e9:7f:1d:60:35:c7:47:25:42:97:74:ca:56:bb:b6:a1:d8:43:e3:c9:0c:fd:17:55:c2:3a:4d:69:ed:f1:42:89
+
netdev_tstamp_prequeue
----------------------
diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt
new file mode 100644
index 000000000000..bba7dbfc49ed
--- /dev/null
+++ b/Documentation/trace/coresight.txt
@@ -0,0 +1,299 @@
+ Coresight - HW Assisted Tracing on ARM
+ ======================================
+
+ Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ Date: September 11th, 2014
+
+Introduction
+------------
+
+Coresight is an umbrella of technologies allowing for the debugging of ARM
+based SoC. It includes solutions for JTAG and HW assisted tracing. This
+document is concerned with the latter.
+
+HW assisted tracing is becoming increasingly useful when dealing with systems
+that have many SoCs and other components like GPU and DMA engines. ARM has
+developed a HW assisted tracing solution by means of different components, each
+being added to a design at systhesis time to cater to specific tracing needs.
+Compoments are generally categorised as source, link and sinks and are
+(usually) discovered using the AMBA bus.
+
+"Sources" generate a compressed stream representing the processor instruction
+path based on tracing scenarios as configured by users. From there the stream
+flows through the coresight system (via ATB bus) using links that are connecting
+the emanating source to a sink(s). Sinks serve as endpoints to the coresight
+implementation, either storing the compressed stream in a memory buffer or
+creating an interface to the outside world where data can be transferred to a
+host without fear of filling up the onboard coresight memory buffer.
+
+At typical coresight system would look like this:
+
+ *****************************************************************
+ **************************** AMBA AXI ****************************===||
+ ***************************************************************** ||
+ ^ ^ | ||
+ | | * **
+ 0000000 ::::: 0000000 ::::: ::::: @@@@@@@ ||||||||||||
+ 0 CPU 0<-->: C : 0 CPU 0<-->: C : : C : @ STM @ || System ||
+ |->0000000 : T : |->0000000 : T : : T :<--->@@@@@ || Memory ||
+ | #######<-->: I : | #######<-->: I : : I : @@@<-| ||||||||||||
+ | # ETM # ::::: | # PTM # ::::: ::::: @ |
+ | ##### ^ ^ | ##### ^ ! ^ ! . | |||||||||
+ | |->### | ! | |->### | ! | ! . | || DAP ||
+ | | # | ! | | # | ! | ! . | |||||||||
+ | | . | ! | | . | ! | ! . | | |
+ | | . | ! | | . | ! | ! . | | *
+ | | . | ! | | . | ! | ! . | | SWD/
+ | | . | ! | | . | ! | ! . | | JTAG
+ *****************************************************************<-|
+ *************************** AMBA Debug ABP ************************
+ *****************************************************************
+ | . ! . ! ! . |
+ | . * . * * . |
+ *****************************************************************
+ ******************** Cross Trigger Matrix (CTM) *******************
+ *****************************************************************
+ | . ^ . . |
+ | * ! * * |
+ *****************************************************************
+ ****************** AMBA Advanced Trace Bus (ATB) ******************
+ *****************************************************************
+ | ! =============== |
+ | * ===== F =====<---------|
+ | ::::::::: ==== U ====
+ |-->:: CTI ::<!! === N ===
+ | ::::::::: ! == N ==
+ | ^ * == E ==
+ | ! &&&&&&&&& IIIIIII == L ==
+ |------>&& ETB &&<......II I =======
+ | ! &&&&&&&&& II I .
+ | ! I I .
+ | ! I REP I<..........
+ | ! I I
+ | !!>&&&&&&&&& II I *Source: ARM ltd.
+ |------>& TPIU &<......II I DAP = Debug Access Port
+ &&&&&&&&& IIIIIII ETM = Embedded Trace Macrocell
+ ; PTM = Program Trace Macrocell
+ ; CTI = Cross Trigger Interface
+ * ETB = Embedded Trace Buffer
+ To trace port TPIU= Trace Port Interface Unit
+ SWD = Serial Wire Debug
+
+While on target configuration of the components is done via the ABP bus,
+all trace data are carried out-of-band on the ATB bus. The CTM provides
+a way to aggregate and distribute signals between CoreSight components.
+
+The coresight framework provides a central point to represent, configure and
+manage coresight devices on a platform. This first implementation centers on
+the basic tracing functionality, enabling components such ETM/PTM, funnel,
+replicator, TMC, TPIU and ETB. Future work will enable more
+intricate IP blocks such as STM and CTI.
+
+
+Acronyms and Classification
+---------------------------
+
+Acronyms:
+
+PTM: Program Trace Macrocell
+ETM: Embedded Trace Macrocell
+STM: System trace Macrocell
+ETB: Embedded Trace Buffer
+ITM: Instrumentation Trace Macrocell
+TPIU: Trace Port Interface Unit
+TMC-ETR: Trace Memory Controller, configured as Embedded Trace Router
+TMC-ETF: Trace Memory Controller, configured as Embedded Trace FIFO
+CTI: Cross Trigger Interface
+
+Classification:
+
+Source:
+ ETMv3.x ETMv4, PTMv1.0, PTMv1.1, STM, STM500, ITM
+Link:
+ Funnel, replicator (intelligent or not), TMC-ETR
+Sinks:
+ ETBv1.0, ETB1.1, TPIU, TMC-ETF
+Misc:
+ CTI
+
+
+Device Tree Bindings
+----------------------
+
+See Documentation/devicetree/bindings/arm/coresight.txt for details.
+
+As of this writing drivers for ITM, STMs and CTIs are not provided but are
+expected to be added as the solution matures.
+
+
+Framework and implementation
+----------------------------
+
+The coresight framework provides a central point to represent, configure and
+manage coresight devices on a platform. Any coresight compliant device can
+register with the framework for as long as they use the right APIs:
+
+struct coresight_device *coresight_register(struct coresight_desc *desc);
+void coresight_unregister(struct coresight_device *csdev);
+
+The registering function is taking a "struct coresight_device *csdev" and
+register the device with the core framework. The unregister function takes
+a reference to a "strut coresight_device", obtained at registration time.
+
+If everything goes well during the registration process the new devices will
+show up under /sys/bus/coresight/devices, as showns here for a TC2 platform:
+
+root:~# ls /sys/bus/coresight/devices/
+replicator 20030000.tpiu 2201c000.ptm 2203c000.etm 2203e000.etm
+20010000.etb 20040000.funnel 2201d000.ptm 2203d000.etm
+root:~#
+
+The functions take a "struct coresight_device", which looks like this:
+
+struct coresight_desc {
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct coresight_platform_data *pdata;
+ struct device *dev;
+ const struct attribute_group **groups;
+};
+
+
+The "coresight_dev_type" identifies what the device is, i.e, source link or
+sink while the "coresight_dev_subtype" will characterise that type further.
+
+The "struct coresight_ops" is mandatory and will tell the framework how to
+perform base operations related to the components, each component having
+a different set of requirement. For that "struct coresight_ops_sink",
+"struct coresight_ops_link" and "struct coresight_ops_source" have been
+provided.
+
+The next field, "struct coresight_platform_data *pdata" is acquired by calling
+"of_get_coresight_platform_data()", as part of the driver's _probe routine and
+"struct device *dev" gets the device reference embedded in the "amba_device":
+
+static int etm_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ ...
+ ...
+ drvdata->dev = &adev->dev;
+ ...
+}
+
+Specific class of device (source, link, or sink) have generic operations
+that can be performed on them (see "struct coresight_ops"). The
+"**groups" is a list of sysfs entries pertaining to operations
+specific to that component only. "Implementation defined" customisations are
+expected to be accessed and controlled using those entries.
+
+Last but not least, "struct module *owner" is expected to be set to reflect
+the information carried in "THIS_MODULE".
+
+How to use
+----------
+
+Before trace collection can start, a coresight sink needs to be identify.
+There is no limit on the amount of sinks (nor sources) that can be enabled at
+any given moment. As a generic operation, all device pertaining to the sink
+class will have an "active" entry in sysfs:
+
+root:/sys/bus/coresight/devices# ls
+replicator 20030000.tpiu 2201c000.ptm 2203c000.etm 2203e000.etm
+20010000.etb 20040000.funnel 2201d000.ptm 2203d000.etm
+root:/sys/bus/coresight/devices# ls 20010000.etb
+enable_sink status trigger_cntr
+root:/sys/bus/coresight/devices# echo 1 > 20010000.etb/enable_sink
+root:/sys/bus/coresight/devices# cat 20010000.etb/enable_sink
+1
+root:/sys/bus/coresight/devices#
+
+At boot time the current etm3x driver will configure the first address
+comparator with "_stext" and "_etext", essentially tracing any instruction
+that falls within that range. As such "enabling" a source will immediately
+trigger a trace capture:
+
+root:/sys/bus/coresight/devices# echo 1 > 2201c000.ptm/enable_source
+root:/sys/bus/coresight/devices# cat 2201c000.ptm/enable_source
+1
+root:/sys/bus/coresight/devices# cat 20010000.etb/status
+Depth: 0x2000
+Status: 0x1
+RAM read ptr: 0x0
+RAM wrt ptr: 0x19d3 <----- The write pointer is moving
+Trigger cnt: 0x0
+Control: 0x1
+Flush status: 0x0
+Flush ctrl: 0x2001
+root:/sys/bus/coresight/devices#
+
+Trace collection is stopped the same way:
+
+root:/sys/bus/coresight/devices# echo 0 > 2201c000.ptm/enable_source
+root:/sys/bus/coresight/devices#
+
+The content of the ETB buffer can be harvested directly from /dev:
+
+root:/sys/bus/coresight/devices# dd if=/dev/20010000.etb \
+of=~/cstrace.bin
+
+64+0 records in
+64+0 records out
+32768 bytes (33 kB) copied, 0.00125258 s, 26.2 MB/s
+root:/sys/bus/coresight/devices#
+
+The file cstrace.bin can be decompressed using "ptm2human", DS-5 or Trace32.
+
+Following is a DS-5 output of an experimental loop that increments a variable up
+to a certain value. The example is simple and yet provides a glimpse of the
+wealth of possibilities that coresight provides.
+
+Info Tracing enabled
+Instruction 106378866 0x8026B53C E52DE004 false PUSH {lr}
+Instruction 0 0x8026B540 E24DD00C false SUB sp,sp,#0xc
+Instruction 0 0x8026B544 E3A03000 false MOV r3,#0
+Instruction 0 0x8026B548 E58D3004 false STR r3,[sp,#4]
+Instruction 0 0x8026B54C E59D3004 false LDR r3,[sp,#4]
+Instruction 0 0x8026B550 E3530004 false CMP r3,#4
+Instruction 0 0x8026B554 E2833001 false ADD r3,r3,#1
+Instruction 0 0x8026B558 E58D3004 false STR r3,[sp,#4]
+Instruction 0 0x8026B55C DAFFFFFA true BLE {pc}-0x10 ; 0x8026b54c
+Timestamp Timestamp: 17106715833
+Instruction 319 0x8026B54C E59D3004 false LDR r3,[sp,#4]
+Instruction 0 0x8026B550 E3530004 false CMP r3,#4
+Instruction 0 0x8026B554 E2833001 false ADD r3,r3,#1
+Instruction 0 0x8026B558 E58D3004 false STR r3,[sp,#4]
+Instruction 0 0x8026B55C DAFFFFFA true BLE {pc}-0x10 ; 0x8026b54c
+Instruction 9 0x8026B54C E59D3004 false LDR r3,[sp,#4]
+Instruction 0 0x8026B550 E3530004 false CMP r3,#4
+Instruction 0 0x8026B554 E2833001 false ADD r3,r3,#1
+Instruction 0 0x8026B558 E58D3004 false STR r3,[sp,#4]
+Instruction 0 0x8026B55C DAFFFFFA true BLE {pc}-0x10 ; 0x8026b54c
+Instruction 7 0x8026B54C E59D3004 false LDR r3,[sp,#4]
+Instruction 0 0x8026B550 E3530004 false CMP r3,#4
+Instruction 0 0x8026B554 E2833001 false ADD r3,r3,#1
+Instruction 0 0x8026B558 E58D3004 false STR r3,[sp,#4]
+Instruction 0 0x8026B55C DAFFFFFA true BLE {pc}-0x10 ; 0x8026b54c
+Instruction 7 0x8026B54C E59D3004 false LDR r3,[sp,#4]
+Instruction 0 0x8026B550 E3530004 false CMP r3,#4
+Instruction 0 0x8026B554 E2833001 false ADD r3,r3,#1
+Instruction 0 0x8026B558 E58D3004 false STR r3,[sp,#4]
+Instruction 0 0x8026B55C DAFFFFFA true BLE {pc}-0x10 ; 0x8026b54c
+Instruction 10 0x8026B54C E59D3004 false LDR r3,[sp,#4]
+Instruction 0 0x8026B550 E3530004 false CMP r3,#4
+Instruction 0 0x8026B554 E2833001 false ADD r3,r3,#1
+Instruction 0 0x8026B558 E58D3004 false STR r3,[sp,#4]
+Instruction 0 0x8026B55C DAFFFFFA true BLE {pc}-0x10 ; 0x8026b54c
+Instruction 6 0x8026B560 EE1D3F30 false MRC p15,#0x0,r3,c13,c0,#1
+Instruction 0 0x8026B564 E1A0100D false MOV r1,sp
+Instruction 0 0x8026B568 E3C12D7F false BIC r2,r1,#0x1fc0
+Instruction 0 0x8026B56C E3C2203F false BIC r2,r2,#0x3f
+Instruction 0 0x8026B570 E59D1004 false LDR r1,[sp,#4]
+Instruction 0 0x8026B574 E59F0010 false LDR r0,[pc,#16] ; [0x8026B58C] = 0x80550368
+Instruction 0 0x8026B578 E592200C false LDR r2,[r2,#0xc]
+Instruction 0 0x8026B57C E59221D0 false LDR r2,[r2,#0x1d0]
+Instruction 0 0x8026B580 EB07A4CF true BL {pc}+0x1e9344 ; 0x804548c4
+Info Tracing enabled
+Instruction 13570831 0x8026B584 E28DD00C false ADD sp,sp,#0xc
+Instruction 0 0x8026B588 E8BD8000 true LDM sp!,{pc}
+Timestamp Timestamp: 17107041535
diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt
index 4cf53e406613..635e57493709 100644
--- a/Documentation/usb/gadget_configfs.txt
+++ b/Documentation/usb/gadget_configfs.txt
@@ -376,7 +376,7 @@ functions and binds them. This way the whole gadget is bound.
configured, so config_groups for particular functions are defined
in the functions implementation files drivers/usb/gadget/f_*.c.
-5. Funciton's code is written in such a way that it uses
+5. Function's code is written in such a way that it uses
usb_get_function_instance(), which, in turn, calls request_module.
So, provided that modprobe works, modules for particular functions
diff --git a/Documentation/usb/gadget_hid.txt b/Documentation/usb/gadget_hid.txt
index 12696c2e43fb..7a0fb8e16e27 100644
--- a/Documentation/usb/gadget_hid.txt
+++ b/Documentation/usb/gadget_hid.txt
@@ -74,6 +74,13 @@ static struct platform_device my_hid = {
You can add as many HID functions as you want, only limited by
the amount of interrupt endpoints your gadget driver supports.
+Configuration with configfs
+
+ Instead of adding fake platform devices and drivers in order to pass
+ some data to the kernel, if HID is a part of a gadget composed with
+ configfs the hidg_func_descriptor.report_desc is passed to the kernel
+ by writing the appropriate stream of bytes to a configfs attribute.
+
Send and receive HID reports
HID reports can be sent/received using read/write on the
diff --git a/Documentation/usb/usb-serial.txt b/Documentation/usb/usb-serial.txt
index 5bd7926185e8..947fa62bccf2 100644
--- a/Documentation/usb/usb-serial.txt
+++ b/Documentation/usb/usb-serial.txt
@@ -145,7 +145,7 @@ Keyspan PDA Serial Adapter
Single port DB-9 serial adapter, pushed as a PDA adapter for iMacs (mostly
sold in Macintosh catalogs, comes in a translucent white/green dongle).
Fairly simple device. Firmware is homebrew.
- This driver also works for the Xircom/Entrgra single port serial adapter.
+ This driver also works for the Xircom/Entrega single port serial adapter.
Current status:
Things that work:
diff --git a/Documentation/video4linux/CARDLIST.cx23885 b/Documentation/video4linux/CARDLIST.cx23885
index a74eeccfe700..4c84ec853265 100644
--- a/Documentation/video4linux/CARDLIST.cx23885
+++ b/Documentation/video4linux/CARDLIST.cx23885
@@ -43,3 +43,5 @@
42 -> Leadtek Winfast PxPVR2200 [107d:6f21]
43 -> Hauppauge ImpactVCB-e [0070:7133]
44 -> DViCO FusionHDTV DVB-T Dual Express2 [18ac:db98]
+ 45 -> DVBSky T9580 [4254:9580]
+ 46 -> DVBSky T980C [4254:980c]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index bc3351bb48b4..3700edb81db2 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -93,3 +93,4 @@
92 -> PCTV DVB-S2 Stick (461e) (em28178)
93 -> KWorld USB ATSC TV Stick UB435-Q V3 (em2874) [1b80:e34c]
94 -> PCTV tripleStick (292e) (em28178)
+ 95 -> Leadtek VC100 (em2861) [0413:6f07]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 8df17d063499..a93d86455233 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -191,3 +191,4 @@
190 -> Asus My Cinema PS3-100 [1043:48cd]
191 -> Hawell HW-9004V1
192 -> AverMedia AverTV Satellite Hybrid+FM A706 [1461:2055]
+193 -> WIS Voyager or compatible [1905:7007]
diff --git a/Documentation/video4linux/soc-camera.txt b/Documentation/video4linux/soc-camera.txt
index daa9e2ac162c..84f41cf1f3e8 100644
--- a/Documentation/video4linux/soc-camera.txt
+++ b/Documentation/video4linux/soc-camera.txt
@@ -151,7 +151,7 @@ they are transferred over a media bus. Soc-camera provides support to
conveniently manage these formats. A table of standard transformations is
maintained by soc-camera core, which describes, what FOURCC pixel format will
be obtained, if a media-bus pixel format is stored in memory according to
-certain rules. E.g. if V4L2_MBUS_FMT_YUYV8_2X8 data is sampled with 8 bits per
+certain rules. E.g. if MEDIA_BUS_FMT_YUYV8_2X8 data is sampled with 8 bits per
sample and stored in memory in the little-endian order with no gaps between
bytes, data in memory will represent the V4L2_PIX_FMT_YUYV FOURCC format. These
standard transformations will be used by soc-camera or by camera host drivers to
diff --git a/Documentation/video4linux/vivid.txt b/Documentation/video4linux/vivid.txt
index e5a940e3d304..6cfc8541a362 100644
--- a/Documentation/video4linux/vivid.txt
+++ b/Documentation/video4linux/vivid.txt
@@ -640,6 +640,21 @@ Colorspace: selects which colorspace should be used when generating the image.
Changing the colorspace will result in the V4L2_EVENT_SOURCE_CHANGE
to be sent since it emulates a detected colorspace change.
+Y'CbCr Encoding: selects which Y'CbCr encoding should be used when generating
+ a Y'CbCr image. This only applies if the CSC Colorbar test pattern is
+ selected, and if the format is set to a Y'CbCr format as opposed to an
+ RGB format.
+
+ Changing the Y'CbCr encoding will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates a detected colorspace change.
+
+Quantization: selects which quantization should be used for the RGB or Y'CbCr
+ encoding when generating the test pattern. This only applies if the CSC
+ Colorbar test pattern is selected.
+
+ Changing the quantization will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates a detected colorspace change.
+
Limited RGB Range (16-235): selects if the RGB range of the HDMI source should
be limited or full range. This combines with the Digital Video 'Rx RGB
Quantization Range' control and can be used to test what happens if
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7610eaa4d491..0007fef4ed81 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -68,9 +68,12 @@ description:
Capability: which KVM extension provides this ioctl. Can be 'basic',
which means that is will be provided by any kernel that supports
- API version 12 (see section 4.1), or a KVM_CAP_xyz constant, which
+ API version 12 (see section 4.1), a KVM_CAP_xyz constant, which
means availability needs to be checked with KVM_CHECK_EXTENSION
- (see section 4.4).
+ (see section 4.4), or 'none' which means that while not all kernels
+ support this ioctl, there's no capability bit to check its
+ availability: for kernels that don't support the ioctl,
+ the ioctl returns -ENOTTY.
Architectures: which instruction set architectures provide this ioctl.
x86 includes both i386 and x86_64.
@@ -604,7 +607,7 @@ struct kvm_fpu {
4.24 KVM_CREATE_IRQCHIP
Capability: KVM_CAP_IRQCHIP, KVM_CAP_S390_IRQCHIP (s390)
-Architectures: x86, ia64, ARM, arm64, s390
+Architectures: x86, ARM, arm64, s390
Type: vm ioctl
Parameters: none
Returns: 0 on success, -1 on error
@@ -612,7 +615,7 @@ Returns: 0 on success, -1 on error
Creates an interrupt controller model in the kernel. On x86, creates a virtual
ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
-only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM/arm64, a GIC is
+only go to the IOAPIC. On ARM/arm64, a GIC is
created. On s390, a dummy irq routing table is created.
Note that on s390 the KVM_CAP_S390_IRQCHIP vm capability needs to be enabled
@@ -622,7 +625,7 @@ before KVM_CREATE_IRQCHIP can be used.
4.25 KVM_IRQ_LINE
Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64, arm, arm64
+Architectures: x86, arm, arm64
Type: vm ioctl
Parameters: struct kvm_irq_level
Returns: 0 on success, -1 on error
@@ -676,7 +679,7 @@ struct kvm_irq_level {
4.26 KVM_GET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_irqchip (in/out)
Returns: 0 on success, -1 on error
@@ -698,7 +701,7 @@ struct kvm_irqchip {
4.27 KVM_SET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_irqchip (in)
Returns: 0 on success, -1 on error
@@ -991,7 +994,7 @@ for vm-wide capabilities.
4.38 KVM_GET_MP_STATE
Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64, s390
+Architectures: x86, s390
Type: vcpu ioctl
Parameters: struct kvm_mp_state (out)
Returns: 0 on success; -1 on error
@@ -1005,16 +1008,15 @@ uniprocessor guests).
Possible values are:
- - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86, ia64]
+ - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86]
- KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP)
- which has not yet received an INIT signal [x86,
- ia64]
+ which has not yet received an INIT signal [x86]
- KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is
- now ready for a SIPI [x86, ia64]
+ now ready for a SIPI [x86]
- KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and
- is waiting for an interrupt [x86, ia64]
+ is waiting for an interrupt [x86]
- KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector
- accessible via KVM_GET_VCPU_EVENTS) [x86, ia64]
+ accessible via KVM_GET_VCPU_EVENTS) [x86]
- KVM_MP_STATE_STOPPED: the vcpu is stopped [s390]
- KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390]
- KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted)
@@ -1022,7 +1024,7 @@ Possible values are:
- KVM_MP_STATE_LOAD: the vcpu is in a special load/startup state
[s390]
-On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
these architectures.
@@ -1030,7 +1032,7 @@ these architectures.
4.39 KVM_SET_MP_STATE
Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64, s390
+Architectures: x86, s390
Type: vcpu ioctl
Parameters: struct kvm_mp_state (in)
Returns: 0 on success; -1 on error
@@ -1038,7 +1040,7 @@ Returns: 0 on success; -1 on error
Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
arguments.
-On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
these architectures.
@@ -1065,7 +1067,7 @@ documentation when it pops into existence).
4.41 KVM_SET_BOOT_CPU_ID
Capability: KVM_CAP_SET_BOOT_CPU_ID
-Architectures: x86, ia64
+Architectures: x86
Type: vm ioctl
Parameters: unsigned long vcpu_id
Returns: 0 on success, -1 on error
@@ -1257,8 +1259,8 @@ The flags bitmap is defined as:
4.48 KVM_ASSIGN_PCI_DEVICE
-Capability: KVM_CAP_DEVICE_ASSIGNMENT
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_pci_dev (in)
Returns: 0 on success, -1 on error
@@ -1298,25 +1300,36 @@ Only PCI header type 0 devices with PCI BAR resources are supported by
device assignment. The user requesting this ioctl must have read/write
access to the PCI sysfs resource files associated with the device.
+Errors:
+ ENOTTY: kernel does not support this ioctl
+
+ Other error conditions may be defined by individual device types or
+ have their standard meanings.
+
4.49 KVM_DEASSIGN_PCI_DEVICE
-Capability: KVM_CAP_DEVICE_DEASSIGNMENT
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_pci_dev (in)
Returns: 0 on success, -1 on error
Ends PCI device assignment, releasing all associated resources.
-See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
+See KVM_ASSIGN_PCI_DEVICE for the data structure. Only assigned_dev_id is
used in kvm_assigned_pci_dev to identify the device.
+Errors:
+ ENOTTY: kernel does not support this ioctl
+
+ Other error conditions may be defined by individual device types or
+ have their standard meanings.
4.50 KVM_ASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
-Architectures: x86 ia64
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_irq (in)
Returns: 0 on success, -1 on error
@@ -1346,11 +1359,17 @@ The following flags are defined:
It is not valid to specify multiple types per host or guest IRQ. However, the
IRQ type of host and guest can differ or can even be null.
+Errors:
+ ENOTTY: kernel does not support this ioctl
+
+ Other error conditions may be defined by individual device types or
+ have their standard meanings.
+
4.51 KVM_DEASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
-Architectures: x86 ia64
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_irq (in)
Returns: 0 on success, -1 on error
@@ -1365,7 +1384,7 @@ KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
4.52 KVM_SET_GSI_ROUTING
Capability: KVM_CAP_IRQ_ROUTING
-Architectures: x86 ia64 s390
+Architectures: x86 s390
Type: vm ioctl
Parameters: struct kvm_irq_routing (in)
Returns: 0 on success, -1 on error
@@ -1423,8 +1442,8 @@ struct kvm_irq_routing_s390_adapter {
4.53 KVM_ASSIGN_SET_MSIX_NR
-Capability: KVM_CAP_DEVICE_MSIX
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_msix_nr (in)
Returns: 0 on success, -1 on error
@@ -1445,8 +1464,8 @@ struct kvm_assigned_msix_nr {
4.54 KVM_ASSIGN_SET_MSIX_ENTRY
-Capability: KVM_CAP_DEVICE_MSIX
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
Type: vm ioctl
Parameters: struct kvm_assigned_msix_entry (in)
Returns: 0 on success, -1 on error
@@ -1461,6 +1480,12 @@ struct kvm_assigned_msix_entry {
__u16 padding[3];
};
+Errors:
+ ENOTTY: kernel does not support this ioctl
+
+ Other error conditions may be defined by individual device types or
+ have their standard meanings.
+
4.55 KVM_SET_TSC_KHZ
@@ -2453,9 +2478,15 @@ return ENOEXEC for that vcpu.
Note that because some registers reflect machine topology, all vcpus
should be created before this ioctl is invoked.
+Userspace can call this function multiple times for a given vcpu, including
+after the vcpu has been run. This will reset the vcpu to its initial
+state. All calls to this function after the initial call must use the same
+target and same set of feature flags, otherwise EINVAL will be returned.
+
Possible features:
- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
- Depends on KVM_CAP_ARM_PSCI.
+ Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
+ and execute guest code when KVM_RUN is called.
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
@@ -2951,6 +2982,15 @@ HVC instruction based PSCI call from the vcpu. The 'type' field describes
the system-level event type. The 'flags' field describes architecture
specific flags for the system-level event.
+Valid values for 'type' are:
+ KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the
+ VM. Userspace is not obliged to honour this, and if it does honour
+ this does not need to destroy the VM synchronously (ie it may call
+ KVM_RUN again before shutdown finally occurs).
+ KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM.
+ As with SHUTDOWN, userspace can choose to ignore the request, or
+ to schedule the reset to occur in the future and may call KVM_RUN again.
+
/* Fix the size of the union. */
char padding[256];
};
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index 0d16f96c0eac..d426fc87fe93 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -12,14 +12,14 @@ specific.
1. GROUP: KVM_S390_VM_MEM_CTRL
Architectures: s390
-1.1. ATTRIBUTE: KVM_S390_VM_MEM_CTRL
+1.1. ATTRIBUTE: KVM_S390_VM_MEM_ENABLE_CMMA
Parameters: none
-Returns: -EBUSY if already a vcpus is defined, otherwise 0
+Returns: -EBUSY if a vcpu is already defined, otherwise 0
-Enables CMMA for the virtual machine
+Enables Collaborative Memory Management Assist (CMMA) for the virtual machine.
-1.2. ATTRIBUTE: KVM_S390_VM_CLR_CMMA
-Parameteres: none
+1.2. ATTRIBUTE: KVM_S390_VM_MEM_CLR_CMMA
+Parameters: none
Returns: 0
Clear the CMMA status for all guest pages, so any pages the guest marked
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 6d470ae7b073..2a71c8f29f68 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -168,7 +168,7 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02
64 byte memory area which must be in guest RAM and must be
zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
when asynchronous page faults are enabled on the vcpu 0 when
- disabled. Bit 2 is 1 if asynchronous page faults can be injected
+ disabled. Bit 1 is 1 if asynchronous page faults can be injected
when vcpu is in cpl == 0.
First 4 byte of 64 byte memory location will be written to by
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index b64e0af9cc56..f2d3a100fe38 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -1,8 +1,8 @@
The intent of this file is to give a brief summary of hugetlbpage support in
the Linux kernel. This support is built on top of multiple page size support
-that is provided by most modern architectures. For example, i386
-architecture supports 4K and 4M (2M in PAE mode) page sizes, ia64
+that is provided by most modern architectures. For example, x86 CPUs normally
+support 4K and 2M (1G if architecturally supported) page sizes, ia64
architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M,
256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical
translations. Typically this is a very scarce resource on processor.
diff --git a/Documentation/vm/page_owner.txt b/Documentation/vm/page_owner.txt
new file mode 100644
index 000000000000..8f3ce9b3aa11
--- /dev/null
+++ b/Documentation/vm/page_owner.txt
@@ -0,0 +1,81 @@
+page owner: Tracking about who allocated each page
+-----------------------------------------------------------
+
+* Introduction
+
+page owner is for the tracking about who allocated each page.
+It can be used to debug memory leak or to find a memory hogger.
+When allocation happens, information about allocation such as call stack
+and order of pages is stored into certain storage for each page.
+When we need to know about status of all pages, we can get and analyze
+this information.
+
+Although we already have tracepoint for tracing page allocation/free,
+using it for analyzing who allocate each page is rather complex. We need
+to enlarge the trace buffer for preventing overlapping until userspace
+program launched. And, launched program continually dump out the trace
+buffer for later analysis and it would change system behviour with more
+possibility rather than just keeping it in memory, so bad for debugging.
+
+page owner can also be used for various purposes. For example, accurate
+fragmentation statistics can be obtained through gfp flag information of
+each page. It is already implemented and activated if page owner is
+enabled. Other usages are more than welcome.
+
+page owner is disabled in default. So, if you'd like to use it, you need
+to add "page_owner=on" into your boot cmdline. If the kernel is built
+with page owner and page owner is disabled in runtime due to no enabling
+boot option, runtime overhead is marginal. If disabled in runtime, it
+doesn't require memory to store owner information, so there is no runtime
+memory overhead. And, page owner inserts just two unlikely branches into
+the page allocator hotpath and if it returns false then allocation is
+done like as the kernel without page owner. These two unlikely branches
+would not affect to allocation performance. Following is the kernel's
+code size change due to this facility.
+
+- Without page owner
+ text data bss dec hex filename
+ 40662 1493 644 42799 a72f mm/page_alloc.o
+
+- With page owner
+ text data bss dec hex filename
+ 40892 1493 644 43029 a815 mm/page_alloc.o
+ 1427 24 8 1459 5b3 mm/page_ext.o
+ 2722 50 0 2772 ad4 mm/page_owner.o
+
+Although, roughly, 4 KB code is added in total, page_alloc.o increase by
+230 bytes and only half of it is in hotpath. Building the kernel with
+page owner and turning it on if needed would be great option to debug
+kernel memory problem.
+
+There is one notice that is caused by implementation detail. page owner
+stores information into the memory from struct page extension. This memory
+is initialized some time later than that page allocator starts in sparse
+memory system, so, until initialization, many pages can be allocated and
+they would have no owner information. To fix it up, these early allocated
+pages are investigated and marked as allocated in initialization phase.
+Although it doesn't mean that they have the right owner information,
+at least, we can tell whether the page is allocated or not,
+more accurately. On 2GB memory x86-64 VM box, 13343 early allocated pages
+are catched and marked, although they are mostly allocated from struct
+page extension feature. Anyway, after that, no page is left in
+un-tracking state.
+
+* Usage
+
+1) Build user-space helper
+ cd tools/vm
+ make page_owner_sort
+
+2) Enable page owner
+ Add "page_owner=on" to boot cmdline.
+
+3) Do the job what you want to debug
+
+4) Analyze information from page owner
+ cat /sys/kernel/debug/page_owner > page_owner_full.txt
+ grep -v ^PFN page_owner_full.txt > page_owner.txt
+ ./page_owner_sort page_owner.txt sorted_page_owner.txt
+
+ See the result about who allocated each page
+ in the sorted_page_owner.txt.
diff --git a/Documentation/x86/entry_64.txt b/Documentation/x86/entry_64.txt
index bc7226ef5055..4a1c5c2dc5a9 100644
--- a/Documentation/x86/entry_64.txt
+++ b/Documentation/x86/entry_64.txt
@@ -7,9 +7,12 @@ http://lkml.kernel.org/r/<20110529191055.GC9835%40elte.hu>
The x86 architecture has quite a few different ways to jump into
kernel code. Most of these entry points are registered in
arch/x86/kernel/traps.c and implemented in arch/x86/kernel/entry_64.S
-and arch/x86/ia32/ia32entry.S.
+for 64-bit, arch/x86/kernel/entry_32.S for 32-bit and finally
+arch/x86/ia32/ia32entry.S which implements the 32-bit compatibility
+syscall entry points and thus provides for 32-bit processes the
+ability to execute syscalls when running on 64-bit kernels.
-The IDT vector assignments are listed in arch/x86/include/irq_vectors.h.
+The IDT vector assignments are listed in arch/x86/include/asm/irq_vectors.h.
Some of these entries are:
diff --git a/MAINTAINERS b/MAINTAINERS
index 1c63cfecc6bf..08f671dad3e9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -618,6 +618,16 @@ S: Maintained
F: drivers/iommu/amd_iommu*.[ch]
F: include/linux/amd-iommu.h
+AMD KFD
+M: Oded Gabbay <oded.gabbay@amd.com>
+L: dri-devel@lists.freedesktop.org
+T: git git://people.freedesktop.org/~gabbayo/linux.git
+S: Supported
+F: drivers/gpu/drm/amd/amdkfd/
+F: drivers/gpu/drm/radeon/radeon_kfd.c
+F: drivers/gpu/drm/radeon/radeon_kfd.h
+F: include/uapi/linux/kfd_ioctl.h
+
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: amd64-microcode@amd64.org
@@ -850,6 +860,7 @@ ARM/Amlogic MesonX SoC support
M: Carlo Caione <carlo@caione.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: drivers/media/rc/meson-ir.c
N: meson[x68]
ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
@@ -919,6 +930,15 @@ M: Hubert Feurstein <hubert.feurstein@contec.at>
S: Maintained
F: arch/arm/mach-ep93xx/micro9.c
+ARM/CORESIGHT FRAMEWORK AND DRIVERS
+M: Mathieu Poirier <mathieu.poirier@linaro.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/coresight/*
+F: Documentation/trace/coresight.txt
+F: Documentation/devicetree/bindings/arm/coresight.txt
+F: Documentation/ABI/testing/sysfs-bus-coresight-devices-*
+
ARM/CORGI MACHINE SUPPORT
M: Richard Purdie <rpurdie@rpsys.net>
S: Maintained
@@ -1721,6 +1741,13 @@ F: drivers/dma/at_hdmac.c
F: drivers/dma/at_hdmac_regs.h
F: include/linux/platform_data/dma-atmel.h
+ATMEL XDMA DRIVER
+M: Ludovic Desroches <ludovic.desroches@atmel.com>
+L: linux-arm-kernel@lists.infradead.org
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/at_xdmac.c
+
ATMEL I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
L: linux-i2c@vger.kernel.org
@@ -1793,10 +1820,11 @@ S: Supported
F: drivers/scsi/esas2r
AUDIT SUBSYSTEM
+M: Paul Moore <paul@paul-moore.com>
M: Eric Paris <eparis@redhat.com>
-L: linux-audit@redhat.com (subscribers-only)
+L: linux-audit@redhat.com (moderated for non-subscribers)
W: http://people.redhat.com/sgrubb/audit/
-T: git git://git.infradead.org/users/eparis/audit.git
+T: git git://git.infradead.org/users/pcmoore/audit
S: Maintained
F: include/linux/audit.h
F: include/uapi/linux/audit.h
@@ -1869,7 +1897,6 @@ F: drivers/net/wireless/b43legacy/
BACKLIGHT CLASS/SUBSYSTEM
M: Jingoo Han <jg1.han@samsung.com>
-M: Bryan Wu <cooloney@gmail.com>
M: Lee Jones <lee.jones@linaro.org>
S: Maintained
F: drivers/video/backlight/
@@ -1898,13 +1925,6 @@ W: http://bcache.evilpiepirate.org
S: Maintained:
F: drivers/md/bcache/
-BECEEM BCS200/BCS220-3/BCSM250 WIMAX SUPPORT
-M: Kevin McKinney <klmckinney1@gmail.com>
-M: Matthias Beyer <mail@beyermatthias.de>
-L: devel@driverdev.osuosl.org
-S: Maintained
-F: drivers/staging/bcm*
-
BEFS FILE SYSTEM
S: Orphan
F: Documentation/filesystems/befs.txt
@@ -2085,6 +2105,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
S: Maintained
N: bcm2835
+BROADCOM BCM33XX MIPS ARCHITECTURE
+M: Kevin Cernekee <cernekee@gmail.com>
+L: linux-mips@linux-mips.org
+S: Maintained
+F: arch/mips/bcm3384/*
+F: arch/mips/include/asm/mach-bcm3384/*
+F: arch/mips/kernel/*bmips*
+
BROADCOM BCM5301X ARM ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
L: linux-arm-kernel@lists.infradead.org
@@ -2101,6 +2129,12 @@ S: Maintained
F: arch/arm/mach-bcm/bcm63xx.c
F: arch/arm/include/debug/bcm63xx.S
+BROADCOM BCM63XX/BCM33XX UDC DRIVER
+M: Kevin Cernekee <cernekee@gmail.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/usb/gadget/udc/bcm63xx_udc.*
+
BROADCOM BCM7XXX ARM ARCHITECTURE
M: Marc Carino <marc.ceeeee@gmail.com>
M: Brian Norris <computersforpeace@gmail.com>
@@ -2112,6 +2146,18 @@ F: arch/arm/mach-bcm/*brcmstb*
F: arch/arm/boot/dts/bcm7*.dts*
F: drivers/bus/brcmstb_gisb.c
+BROADCOM BMIPS MIPS ARCHITECTURE
+M: Kevin Cernekee <cernekee@gmail.com>
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: linux-mips@linux-mips.org
+S: Maintained
+F: arch/mips/bmips/*
+F: arch/mips/include/asm/mach-bmips/*
+F: arch/mips/kernel/*bmips*
+F: arch/mips/boot/dts/bcm*.dts*
+F: drivers/irqchip/irq-bcm7*
+F: drivers/irqchip/irq-brcmstb*
+
BROADCOM TG3 GIGABIT ETHERNET DRIVER
M: Prashant Sreedharan <prashant@broadcom.com>
M: Michael Chan <mchan@broadcom.com>
@@ -2332,6 +2378,14 @@ F: security/capability.c
F: security/commoncap.c
F: kernel/capability.c
+CC2520 IEEE-802.15.4 RADIO DRIVER
+M: Varka Bhadram <varkabhadram@gmail.com>
+L: linux-wpan@vger.kernel.org
+S: Maintained
+F: drivers/net/ieee802154/cc2520.c
+F: include/linux/spi/cc2520.h
+F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
+
CELL BROADBAND ENGINE ARCHITECTURE
M: Arnd Bergmann <arnd@arndb.de>
L: linuxppc-dev@lists.ozlabs.org
@@ -2513,6 +2567,13 @@ F: fs/coda/
F: include/linux/coda*.h
F: include/uapi/linux/coda*.h
+CODA V4L2 MEM2MEM DRIVER
+M: Philipp Zabel <p.zabel@pengutronix.de>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/coda.txt
+F: drivers/media/platform/coda/
+
COMMON CLK FRAMEWORK
M: Mike Turquette <mturquette@linaro.org>
L: linux-kernel@vger.kernel.org
@@ -3121,7 +3182,8 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Maintained
F: drivers/dma/
F: include/linux/dma*
-T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
+F: Documentation/dmaengine/
+T: git git://git.infradead.org/users/vkoul/slave-dma.git
DME1737 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
@@ -3238,6 +3300,13 @@ F: drivers/gpu/drm/exynos/
F: include/drm/exynos*
F: include/uapi/drm/exynos*
+DRM DRIVERS FOR FREESCALE IMX
+M: Philipp Zabel <p.zabel@pengutronix.de>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/imx/
+F: Documentation/devicetree/bindings/drm/imx/
+
DRM DRIVERS FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
M: Terje Bergström <tbergstrom@nvidia.com>
@@ -3996,7 +4065,7 @@ F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC SOUND DRIVERS
M: Timur Tabi <timur@tabi.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
-M: Xiubo Li <Li.Xiubo@freescale.com>
+M: Xiubo Li <Xiubo.Lee@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
@@ -4186,6 +4255,12 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/usb/go7007/
+GOODIX TOUCHSCREEN
+M: Bastien Nocera <hadess@hadess.net>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/input/touchscreen/goodix.c
+
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Alexandre Courbot <gnurou@gmail.com>
@@ -4607,6 +4682,7 @@ W: https://i2c.wiki.kernel.org/
Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
S: Maintained
+F: Documentation/devicetree/bindings/i2c/
F: Documentation/i2c/
F: drivers/i2c/
F: include/linux/i2c.h
@@ -4745,8 +4821,21 @@ S: Maintained
F: net/ieee802154/
F: net/mac802154/
F: drivers/net/ieee802154/
+F: include/linux/nl802154.h
+F: include/linux/ieee802154.h
+F: include/net/nl802154.h
+F: include/net/mac802154.h
+F: include/net/af_ieee802154.h
+F: include/net/cfg802154.h
+F: include/net/ieee802154_netdev.h
F: Documentation/networking/ieee802154.txt
+IGORPLUG-USB IR RECEIVER
+M: Sean Young <sean@mess.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/rc/igorplugusb.c
+
IGUANAWORKS USB IR TRANSCEIVER
M: Sean Young <sean@mess.org>
L: linux-media@vger.kernel.org
@@ -4804,6 +4893,11 @@ L: linux-security-module@vger.kernel.org
S: Supported
F: security/integrity/ima/
+IMGTEC IR DECODER DRIVER
+M: James Hogan <james.hogan@imgtec.com>
+S: Maintained
+F: drivers/media/rc/img-ir/
+
IMS TWINTURBO FRAMEBUFFER DRIVER
L: linux-fbdev@vger.kernel.org
S: Orphan
@@ -5407,15 +5501,6 @@ S: Supported
F: arch/powerpc/include/asm/kvm*
F: arch/powerpc/kvm/
-KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64)
-M: Xiantao Zhang <xiantao.zhang@intel.com>
-L: kvm-ia64@vger.kernel.org
-W: http://kvm.qumranet.com
-S: Supported
-F: Documentation/ia64/kvm.txt
-F: arch/ia64/include/asm/kvm*
-F: arch/ia64/kvm/
-
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
M: Christian Borntraeger <borntraeger@de.ibm.com>
M: Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -5919,6 +6004,11 @@ M: Russell King <rmk+kernel@arm.linux.org.uk>
S: Maintained
F: drivers/gpu/drm/armada/
+MARVELL 88E6352 DSA support
+M: Guenter Roeck <linux@roeck-us.net>
+S: Maintained
+F: drivers/net/dsa/mv88e6352.c
+
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
M: Stephen Hemminger <stephen@networkplumber.org>
@@ -6158,6 +6248,28 @@ S: Supported
F: include/linux/mlx5/
F: drivers/infiniband/hw/mlx5/
+MN88472 MEDIA DRIVER
+M: Antti Palosaari <crope@iki.fi>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+W: http://palosaari.fi/linux/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/anttip/media_tree.git
+S: Maintained
+F: drivers/staging/media/mn88472/
+F: drivers/media/dvb-frontends/mn88472.h
+
+MN88473 MEDIA DRIVER
+M: Antti Palosaari <crope@iki.fi>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+W: http://palosaari.fi/linux/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/anttip/media_tree.git
+S: Maintained
+F: drivers/staging/media/mn88473/
+F: drivers/media/dvb-frontends/mn88473.h
+
MODULE SUPPORT
M: Rusty Russell <rusty@rustcorp.com.au>
S: Maintained
@@ -6504,19 +6616,8 @@ L: netdev@vger.kernel.org
S: Maintained
NETWORKING [WIRELESS]
-M: "John W. Linville" <linville@tuxdriver.com>
L: linux-wireless@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-wireless/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
-S: Maintained
-F: net/mac80211/
-F: net/rfkill/
-F: net/wireless/
-F: include/net/ieee80211*
-F: include/linux/wireless.h
-F: include/uapi/linux/wireless.h
-F: include/net/iw_handler.h
-F: drivers/net/wireless/
NETWORKING DRIVERS
L: netdev@vger.kernel.org
@@ -6537,6 +6638,14 @@ F: include/linux/inetdevice.h
F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
+NETWORKING DRIVERS (WIRELESS)
+M: Kalle Valo <kvalo@codeaurora.org>
+L: linux-wireless@vger.kernel.org
+Q: http://patchwork.kernel.org/project/linux-wireless/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git/
+S: Maintained
+F: drivers/net/wireless/
+
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
@@ -6648,6 +6757,12 @@ S: Supported
F: drivers/gpu/drm/i2c/tda998x_drv.c
F: include/drm/i2c/tda998x.h
+NXP TFA9879 DRIVER
+M: Peter Rosin <peda@axentia.se>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: sound/soc/codecs/tfa9879*
+
OMAP SUPPORT
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
@@ -7291,7 +7406,13 @@ PIN CONTROLLER - ATMEL AT91
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: drivers/pinctrl/pinctrl-at91.c
+F: drivers/pinctrl/pinctrl-at91.*
+
+PIN CONTROLLER - INTEL
+M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+S: Maintained
+F: drivers/pinctrl/intel/
PIN CONTROLLER - RENESAS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
@@ -7910,6 +8031,13 @@ F: drivers/hid/hid-roccat*
F: include/linux/hid-roccat*
F: Documentation/ABI/*/sysfs-driver-hid-roccat*
+ROCKER DRIVER
+M: Jiri Pirko <jiri@resnulli.us>
+M: Scott Feldman <sfeldma@gmail.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/rocker/
+
ROCKETPORT DRIVER
P: Comtrol Corp.
W: http://www.comtrol.com
@@ -7917,6 +8045,12 @@ S: Maintained
F: Documentation/serial/rocket.txt
F: drivers/tty/rocket*
+ROCKETPORT EXPRESS/INFINITY DRIVER
+M: Kevin Cernekee <cernekee@gmail.com>
+L: linux-serial@vger.kernel.org
+S: Odd Fixes
+F: drivers/tty/serial/rp2.*
+
ROSE NETWORK LAYER
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org
@@ -7957,11 +8091,10 @@ S: Maintained
F: drivers/media/dvb-frontends/rtl2832_sdr*
RTL8180 WIRELESS DRIVER
-M: "John W. Linville" <linville@tuxdriver.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
-S: Maintained
+S: Orphan
F: drivers/net/wireless/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
@@ -8546,6 +8679,14 @@ F: drivers/media/usb/siano/
F: drivers/media/usb/siano/
F: drivers/media/mmc/siano/
+SIMPLEFB FB DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: linux-fbdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/video/simple-framebuffer.txt
+F: drivers/video/fbdev/simplefb.c
+F: include/linux/platform_data/simplefb.h
+
SH_VEU V4L2 MEM2MEM DRIVER
L: linux-media@vger.kernel.org
S: Orphan
@@ -9106,6 +9247,13 @@ F: lib/swiotlb.c
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
+SWITCHDEV
+M: Jiri Pirko <jiri@resnulli.us>
+L: netdev@vger.kernel.org
+S: Supported
+F: net/switchdev/
+F: include/net/switchdev.h
+
SYNOPSYS ARC ARCHITECTURE
M: Vineet Gupta <vgupta@synopsys.com>
S: Supported
@@ -9362,6 +9510,7 @@ Q: https://patchwork.kernel.org/project/linux-pm/list/
S: Supported
F: drivers/thermal/
F: include/linux/thermal.h
+F: include/uapi/linux/thermal.h
F: include/linux/cpu_cooling.h
F: Documentation/devicetree/bindings/thermal/
@@ -9466,7 +9615,7 @@ F: include/uapi/linux/tipc*.h
F: net/tipc/
TILE ARCHITECTURE
-M: Chris Metcalf <cmetcalf@tilera.com>
+M: Chris Metcalf <cmetcalf@ezchip.com>
W: http://www.tilera.com/scm/
S: Supported
F: arch/tile/
@@ -10094,13 +10243,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/via/via-velocity.*
-VIVI VIRTUAL VIDEO DRIVER
+VIVID VIRTUAL VIDEO DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: http://linuxtv.org
S: Maintained
-F: drivers/media/platform/vivi*
+F: drivers/media/platform/vivid/*
VLAN (802.1Q)
M: Patrick McHardy <kaber@trash.net>
@@ -10363,6 +10512,13 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/mcheck/*
+X86 VDSO
+M: Andy Lutomirski <luto@amacapital.net>
+L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
+S: Maintained
+F: arch/x86/vdso/
+
XC2028/3028 TUNER DRIVER
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index 25b49725df07..76aeb8fa551a 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,7 +3,6 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
index 3832bdb794fe..77516c87255d 100644
--- a/arch/alpha/include/asm/barrier.h
+++ b/arch/alpha/include/asm/barrier.h
@@ -7,6 +7,57 @@
#define rmb() __asm__ __volatile__("mb": : :"memory")
#define wmb() __asm__ __volatile__("wmb": : :"memory")
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like this where there are no data dependencies.
+ */
#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
#ifdef CONFIG_SMP
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 3de1394bcab8..9a20821b111c 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -87,4 +87,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index fe44b2494609..df94ac1f75b6 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -428,3 +428,4 @@ source "arch/arc/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+source "kernel/power/Kconfig"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 10bc3d4e8a44..db72fec0e160 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -12,7 +12,7 @@ ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := arc-linux-uclibc-
endif
-KBUILD_DEFCONFIG := fpga_defconfig
+KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index cfaedd9c61c9..1c169dc74ad1 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -20,7 +20,7 @@
/* this is for console on PGU */
/* bootargs = "console=tty0 consoleblank=0"; */
/* this is for console on serial */
- bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
};
aliases {
@@ -41,9 +41,9 @@
#interrupt-cells = <1>;
};
- uart0: serial@c0000000 {
+ uart0: serial@f0000000 {
compatible = "ns8250";
- reg = <0xc0000000 0x2000>;
+ reg = <0xf0000000 0x2000>;
interrupts = <11>;
clock-frequency = <3686400>;
baud = <115200>;
@@ -52,21 +52,21 @@
no-loopback-test = <1>;
};
- pgu0: pgu@c9000000 {
+ pgu0: pgu@f9000000 {
compatible = "snps,arcpgufb";
- reg = <0xc9000000 0x400>;
+ reg = <0xf9000000 0x400>;
};
- ps2: ps2@c9001000 {
+ ps2: ps2@f9001000 {
compatible = "snps,arc_ps2";
- reg = <0xc9000400 0x14>;
+ reg = <0xf9000400 0x14>;
interrupts = <13>;
interrupt-names = "arc_ps2_irq";
};
- eth0: ethernet@c0003000 {
+ eth0: ethernet@f0003000 {
compatible = "snps,oscilan";
- reg = <0xc0003000 0x44>;
+ reg = <0xf0003000 0x44>;
interrupts = <7>, <8>;
interrupt-names = "rx", "tx";
};
diff --git a/arch/arc/configs/fpga_noramfs_defconfig b/arch/arc/configs/fpga_noramfs_defconfig
deleted file mode 100644
index 49c93011ab96..000000000000
--- a/arch/arc/configs/fpga_noramfs_defconfig
+++ /dev/null
@@ -1,63 +0,0 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_FPGA_LEGACY=y
-# CONFIG_ARC_HAS_RTSC is not set
-CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
-CONFIG_PREEMPT=y
-# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_UNIX_DIAG=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-# CONFIG_IPV6 is not set
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_BLK_DEV is not set
-CONFIG_NETDEVICES=y
-CONFIG_ARC_EMAC=y
-CONFIG_LXT_PHY=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_ARC=y
-CONFIG_SERIAL_ARC_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/nsim_700_defconfig
index ef4d3bc7b6c0..ef4d3bc7b6c0 100644
--- a/arch/arc/configs/fpga_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index b8fffc1a2ac2..be0c39e76f7c 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += fb.h
generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index 742816f1b210..27ecc6975a58 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -41,6 +41,15 @@
/******************************************************************
* IRQ Control Macros
+ *
+ * All of them have "memory" clobber (compiler barrier) which is needed to
+ * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
+ *
+ * Noted at the time of Abilis Timer List corruption
+ * Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67
+ * Reasoning : https://lkml.org/lkml/2013/4/8/15
+ *
******************************************************************/
/*
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index d01df0c517a2..20ebb602ea2f 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -26,8 +26,10 @@
#include <asm/setup.h>
#include <asm/mach_desc.h>
+#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+#endif
struct plat_smp_ops plat_smp_ops;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c8424a85bc04..97d07ed60a0b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,7 @@ config ARM
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_USE_BUILTIN_BSWAP
@@ -687,7 +688,9 @@ config ARCH_SA1100
select CPU_SA1100
select GENERIC_CLOCKEVENTS
select HAVE_IDE
+ select IRQ_DOMAIN
select ISA
+ select MULTI_IRQ_HANDLER
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
help
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f9295a4e1036..5ddd4906f7a7 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1452,14 +1452,6 @@ config EARLY_PRINTK
kernel low-level debugging functions. Add earlyprintk to your
kernel parameters to enable this console.
-config OC_ETM
- bool "On-chip ETM and ETB"
- depends on ARM_AMBA
- help
- Enables the on-chip embedded trace macrocell and embedded trace
- buffer driver that will allow you to collect traces of the
- kernel code.
-
config ARM_KPROBES_TEST
tristate "Kprobes test module"
depends on KPROBES && MODULES
@@ -1486,4 +1478,59 @@ config DEBUG_SET_MODULE_RONX
against certain classes of kernel exploits.
If in doubt, say "N".
+menuconfig CORESIGHT
+ bool "CoreSight Tracing Support"
+ select ARM_AMBA
+ help
+ This framework provides a kernel interface for the CoreSight debug
+ and trace drivers to register themselves with. It's intended to build
+ a topological view of the CoreSight components based on a DT
+ specification and configure the right serie of components when a
+ trace source gets enabled.
+
+if CORESIGHT
+config CORESIGHT_LINKS_AND_SINKS
+ bool "CoreSight Link and Sink drivers"
+ help
+ This enables support for CoreSight link and sink drivers that are
+ responsible for transporting and collecting the trace data
+ respectively. Link and sinks are dynamically aggregated with a trace
+ entity at run time to form a complete trace path.
+
+config CORESIGHT_LINK_AND_SINK_TMC
+ bool "Coresight generic TMC driver"
+ depends on CORESIGHT_LINKS_AND_SINKS
+ help
+ This enables support for the Trace Memory Controller driver. Depending
+ on its configuration the device can act as a link (embedded trace router
+ - ETR) or sink (embedded trace FIFO). The driver complies with the
+ generic implementation of the component without special enhancement or
+ added features.
+
+config CORESIGHT_SINK_TPIU
+ bool "Coresight generic TPIU driver"
+ depends on CORESIGHT_LINKS_AND_SINKS
+ help
+ This enables support for the Trace Port Interface Unit driver, responsible
+ for bridging the gap between the on-chip coresight components and a trace
+ port collection engine, typically connected to an external host for use
+ case capturing more traces than the on-board coresight memory can handle.
+
+config CORESIGHT_SINK_ETBV10
+ bool "Coresight ETBv1.0 driver"
+ depends on CORESIGHT_LINKS_AND_SINKS
+ help
+ This enables support for the Embedded Trace Buffer version 1.0 driver
+ that complies with the generic implementation of the component without
+ special enhancement or added features.
+
+config CORESIGHT_SOURCE_ETM3X
+ bool "CoreSight Embedded Trace Macrocell 3.x driver"
+ select CORESIGHT_LINKS_AND_SINKS
+ help
+ This driver provides support for processor ETM3.x and PTM1.x modules,
+ which allows tracing the instructions that a processor is executing
+ This is primarily useful for instruction level tracing. Depending
+ the ETM version data tracing may also be available.
+endif
endmenu
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index d42d7865dd53..b62a1cd776cd 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -843,6 +843,8 @@
maximum-speed = "high-speed";
dr_mode = "otg";
status = "disabled";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
@@ -865,6 +867,8 @@
maximum-speed = "high-speed";
dr_mode = "otg";
status = "disabled";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 87aa4f3b8b3d..53bbfc90b26a 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -100,7 +100,7 @@
};
lcd0: display {
- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+ compatible = "newhaven,nhd-4.3-480272ef-atxl", "panel-dpi";
label = "lcd";
pinctrl-names = "default";
@@ -112,11 +112,11 @@
clock-frequency = <9000000>;
hactive = <480>;
vactive = <272>;
- hfront-porch = <8>;
- hback-porch = <43>;
- hsync-len = <4>;
- vback-porch = <12>;
- vfront-porch = <4>;
+ hfront-porch = <2>;
+ hback-porch = <2>;
+ hsync-len = <41>;
+ vfront-porch = <2>;
+ vback-porch = <2>;
vsync-len = <10>;
hsync-active = <0>;
vsync-active = <0>;
@@ -320,8 +320,7 @@
lcd_pins: lcd_pins {
pinctrl-single,pins = <
- /* GPIO 5_8 to select LCD / HDMI */
- 0x238 (PIN_OUTPUT_PULLUP | MUX_MODE7)
+ 0x1c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpcm_ad7.gpio1_7 */
>;
};
};
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index 9721e55384ce..50096d3427eb 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -14,6 +14,7 @@
#include "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy.h>
#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
@@ -348,6 +349,12 @@
#clock-cells = <1>;
};
+ usbcluster: usb-cluster@18400 {
+ compatible = "marvell,armada-375-usb-cluster";
+ reg = <0x18400 0x4>;
+ #phy-cells = <1>;
+ };
+
mbusc: mbus-controller@20000 {
compatible = "marvell,mbus-controller";
reg = <0x20000 0x100>, <0x20180 0x20>;
@@ -398,6 +405,8 @@
reg = <0x50000 0x500>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 18>;
+ phys = <&usbcluster PHY_TYPE_USB2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -414,6 +423,8 @@
reg = <0x58000 0x20000>,<0x5b880 0x80>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 16>;
+ phys = <&usbcluster PHY_TYPE_USB3>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index b5b84006469e..9198b719d0ef 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -9,12 +9,12 @@
* licensing only applies to this file, and not this project as a
* whole.
*
- * a) This library is free software; you can redistribute it and/or
+ * a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index cb100b03a362..dd1313cbc314 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -956,6 +956,14 @@
};
};
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&clk32k>;
+ status = "disabled";
+ };
+
watchdog@fffffd40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffd40 0x10>;
@@ -966,6 +974,12 @@
atmel,idle-halt;
status = "disabled";
};
+
+ gpbr: syscon@fffffd50 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd50 0x10>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index a81aab4281a7..cdb9ed612109 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -828,12 +828,26 @@
clocks = <&mck>;
};
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&slow_xtal>;
+ status = "disabled";
+ };
+
watchdog@fffffd40 {
compatible = "atmel,at91sam9260-wdt";
reg = <0xfffffd40 0x10>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
status = "disabled";
};
+
+ gpbr: syscon@fffffd50 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd50 0x10>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 653e4395b7cb..1467750e3377 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -922,6 +922,27 @@
pinctrl-0 = <&pinctrl_can_rx_tx>;
clocks = <&can_clk>;
clock-names = "can_clk";
+ };
+
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&slow_xtal>;
+ status = "disabled";
+ };
+
+ rtc@fffffd50 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd50 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&slow_xtal>;
+ status = "disabled";
+ };
+
+ gpbr: syscon@fffffd60 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd60 0x50>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index d2919108e92d..dfaacb113f2e 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -112,9 +112,23 @@
};
};
+ shdwc@fffffd10 {
+ atmel,wakeup-counter = <10>;
+ atmel,wakeup-rtt-timer;
+ };
+
+ rtc@fffffd20 {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ status = "okay";
+ };
+
watchdog@fffffd40 {
status = "okay";
};
+
+ gpbr: syscon@fffffd50 {
+ status = "okay";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index d3f65130a1f8..2a8da8a884b4 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -492,6 +492,27 @@
};
};
+ isi {
+ pinctrl_isi: isi-0 {
+ atmel,pins = <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE /* D8 */
+ AT91_PIOB 9 AT91_PERIPH_B AT91_PINCTRL_NONE /* D9 */
+ AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE /* D10 */
+ AT91_PIOB 11 AT91_PERIPH_B AT91_PINCTRL_NONE /* D11 */
+ AT91_PIOB 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* D0 */
+ AT91_PIOB 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* D1 */
+ AT91_PIOB 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* D2 */
+ AT91_PIOB 23 AT91_PERIPH_A AT91_PINCTRL_NONE /* D3 */
+ AT91_PIOB 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* D4 */
+ AT91_PIOB 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* D5 */
+ AT91_PIOB 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* D6 */
+ AT91_PIOB 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* D7 */
+ AT91_PIOB 28 AT91_PERIPH_A AT91_PINCTRL_NONE /* PCK */
+ AT91_PIOB 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* VSYNC */
+ AT91_PIOB 30 AT91_PERIPH_A AT91_PINCTRL_NONE /* HSYNC */
+ AT91_PIOB 31 AT91_PERIPH_A AT91_PINCTRL_NONE /* MCK */>;
+ };
+ };
+
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
@@ -940,6 +961,13 @@
status = "disabled";
};
+ trng@fffcc000 {
+ compatible = "atmel,at91sam9g45-trng";
+ reg = <0xfffcc000 0x4000>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&trng_clk>;
+ };
+
i2c0: i2c@fff84000 {
compatible = "atmel,at91sam9g10-i2c";
reg = <0xfff84000 0x100>;
@@ -1028,6 +1056,17 @@
};
};
+ isi@fffb4000 {
+ compatible = "atmel,at91sam9g45-isi";
+ reg = <0xfffb4000 0x4000>;
+ interrupts = <26 IRQ_TYPE_LEVEL_HIGH 5>;
+ clocks = <&isi_clk>;
+ clock-names = "isi_clk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_isi>;
+ status = "disabled";
+ };
+
pwm0: pwm@fffb8000 {
compatible = "atmel,at91sam9rl-pwm";
reg = <0xfffb8000 0x300>;
@@ -1192,12 +1231,26 @@
};
};
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&clk32k>;
+ status = "disabled";
+ };
+
rtc@fffffdb0 {
compatible = "atmel,at91rm9200-rtc";
reg = <0xfffffdb0 0x30>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
status = "disabled";
};
+
+ gpbr: syscon@fffffd60 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd60 0x10>;
+ status = "disabled";
+ };
};
fb0: fb@0x00500000 {
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index d8dd22651090..33ce7ca2c404 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -161,6 +161,15 @@
pinctrl-0 = <&pinctrl_pwm_leds>;
};
+ rtc@fffffd20 {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ status = "okay";
+ };
+
+ gpbr: syscon@fffffd60 {
+ status = "okay";
+ };
+
rtc@fffffdb0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index f0b4352650ed..72424371413e 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -1059,6 +1059,27 @@
clocks = <&slow_rc_osc &slow_osc>;
};
};
+
+ rtc@fffffeb0 {
+ compatible = "atmel,at91rm9200-rtc";
+ reg = <0xfffffeb0 0x40>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ status = "disabled";
+ };
+
+ rtc@fffffd20 {
+ compatible = "atmel,at91sam9260-rtt";
+ reg = <0xfffffd20 0x10>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&clk32k>;
+ status = "disabled";
+ };
+
+ gpbr: syscon@fffffd60 {
+ compatible = "atmel,at91sam9260-gpbr", "syscon";
+ reg = <0xfffffd60 0x10>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index f3bb2dd6269e..d2d8e94e0aa2 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -102,7 +102,7 @@
twd_watchdog: watchdog@1e620 {
compatible = "arm,cortex-a9-twd-wdt";
reg = <0x1e620 0x20>;
- interupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 736092b1a535..10b725c7bfc0 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -304,7 +304,7 @@
/* VDD_GPU - over VDD_SMPS6 */
regulator-name = "smps6";
regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <12500000>;
+ regulator-max-microvolt = <1250000>;
regulator-always-on;
regulator-boot-on;
};
@@ -313,7 +313,7 @@
/* CORE_VDD */
regulator-name = "smps7";
regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <1030000>;
+ regulator-max-microvolt = <1060000>;
regulator-always-on;
regulator-boot-on;
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 63bf99be1762..22771bc1643a 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -742,7 +742,7 @@
};
wdt2: wdt@4ae14000 {
- compatible = "ti,omap4-wdt";
+ compatible = "ti,omap3-wdt";
reg = <0x4ae14000 0x80>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "wd_timer2";
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index afc74fd4bb5e..89085d066c65 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -160,7 +160,7 @@
/* VDD_CORE */
regulator-name = "smps2";
regulator-min-microvolt = <850000>;
- regulator-max-microvolt = <1030000>;
+ regulator-max-microvolt = <1060000>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 2c05b3f017fa..4bdcbd61ce47 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -1042,7 +1042,7 @@
#clock-cells = <0>;
compatible = "ti,mux-clock";
clocks = <&sys_clkin1>, <&sys_clkin2>;
- reg = <0x01a4>;
+ reg = <0x0164>;
};
mlb_clk: mlb_clk {
@@ -1084,14 +1084,14 @@
#clock-cells = <0>;
compatible = "ti,mux-clock";
clocks = <&sys_clkin1>, <&sys_clkin2>;
- reg = <0x01d0>;
+ reg = <0x0168>;
};
video2_dpll_clk_mux: video2_dpll_clk_mux {
#clock-cells = <0>;
compatible = "ti,mux-clock";
clocks = <&sys_clkin1>, <&sys_clkin2>;
- reg = <0x01d4>;
+ reg = <0x016c>;
};
wkupaon_iclk_mux: wkupaon_iclk_mux {
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 242ddda0a8cd..22465494b796 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -311,12 +311,13 @@
adc: adc@126C0000 {
compatible = "samsung,exynos3250-adc",
"samsung,exynos-adc-v2";
- reg = <0x126C0000 0x100>, <0x10020718 0x4>;
+ reg = <0x126C0000 0x100>;
interrupts = <0 137 0>;
clock-names = "adc", "sclk";
clocks = <&cmu CLK_TSADC>, <&cmu CLK_SCLK_TSADC>;
#io-channel-cells = <1>;
io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index 2e9f1f7be77b..93b70402e943 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -108,13 +108,14 @@
adc: adc@126C0000 {
compatible = "samsung,exynos-adc-v1";
- reg = <0x126C0000 0x100>, <0x10020718 0x4>;
+ reg = <0x126C0000 0x100>;
interrupt-parent = <&combiner>;
interrupts = <10 3>;
clocks = <&clock CLK_TSADC>;
clock-names = "adc";
#io-channel-cells = <1>;
io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index d45a07ea3402..0a229fcd7acf 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -754,12 +754,13 @@
adc: adc@12D10000 {
compatible = "samsung,exynos-adc-v1";
- reg = <0x12D10000 0x100>, <0x10040718 0x4>;
+ reg = <0x12D10000 0x100>;
interrupts = <0 106 0>;
clocks = <&clock CLK_ADC>;
clock-names = "adc";
#io-channel-cells = <1>;
io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 90bf4011e319..517e50f6760b 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -541,12 +541,13 @@
adc: adc@12D10000 {
compatible = "samsung,exynos-adc-v2";
- reg = <0x12D10000 0x100>, <0x10040720 0x4>;
+ reg = <0x12D10000 0x100>;
interrupts = <0 106 0>;
clocks = <&clock CLK_TSADC>;
clock-names = "adc";
#io-channel-cells = <1>;
io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/hip04.dtsi b/arch/arm/boot/dts/hip04.dtsi
index 93b6c909e991..238814596a87 100644
--- a/arch/arm/boot/dts/hip04.dtsi
+++ b/arch/arm/boot/dts/hip04.dtsi
@@ -190,6 +190,12 @@
clock-frequency = <168000000>;
};
+ clk_375m: clk_375m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <375000000>;
+ };
+
soc {
/* It's a 32-bit SoC. */
#address-cells = <1>;
@@ -264,4 +270,715 @@
};
};
+
+ etb@0,e3c42000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0 0xe3c42000 0 0x1000>;
+
+ coresight-default-sink;
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ port {
+ etb0_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&replicator0_out_port0>;
+ };
+ };
+ };
+
+ etb@0,e3c82000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0 0xe3c82000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ port {
+ etb1_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&replicator1_out_port0>;
+ };
+ };
+ };
+
+ etb@0,e3cc2000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0 0xe3cc2000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ port {
+ etb2_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&replicator2_out_port0>;
+ };
+ };
+ };
+
+ etb@0,e3d02000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0 0xe3d02000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ port {
+ etb3_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&replicator3_out_port0>;
+ };
+ };
+ };
+
+ tpiu@0,e3c05000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0 0xe3c05000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ port {
+ tpiu_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&funnel4_out_port0>;
+ };
+ };
+ };
+
+ replicator0 {
+ /* non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-replicator";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator0_out_port0: endpoint {
+ remote-endpoint = <&etb0_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator0_out_port1: endpoint {
+ remote-endpoint = <&funnel4_in_port0>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator0_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel0_out_port0>;
+ };
+ };
+ };
+ };
+
+ replicator1 {
+ /* non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-replicator";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator1_out_port0: endpoint {
+ remote-endpoint = <&etb1_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator1_out_port1: endpoint {
+ remote-endpoint = <&funnel4_in_port1>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator1_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel1_out_port0>;
+ };
+ };
+ };
+ };
+
+ replicator2 {
+ /* non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-replicator";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator2_out_port0: endpoint {
+ remote-endpoint = <&etb2_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator2_out_port1: endpoint {
+ remote-endpoint = <&funnel4_in_port2>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator2_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel2_out_port0>;
+ };
+ };
+ };
+ };
+
+ replicator3 {
+ /* non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-replicator";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator3_out_port0: endpoint {
+ remote-endpoint = <&etb3_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator3_out_port1: endpoint {
+ remote-endpoint = <&funnel4_in_port3>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator3_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel3_out_port0>;
+ };
+ };
+ };
+ };
+
+ funnel@0,e3c41000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0xe3c41000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel output port */
+ port@0 {
+ reg = <0>;
+ funnel0_out_port0: endpoint {
+ remote-endpoint =
+ <&replicator0_in_port0>;
+ };
+ };
+
+ /* funnel input ports */
+ port@1 {
+ reg = <0>;
+ funnel0_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm0_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel0_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm1_out_port>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel0_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm2_out_port>;
+ };
+ };
+
+ port@4 {
+ reg = <3>;
+ funnel0_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm3_out_port>;
+ };
+ };
+ };
+ };
+
+ funnel@0,e3c81000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0xe3c81000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel output port */
+ port@0 {
+ reg = <0>;
+ funnel1_out_port0: endpoint {
+ remote-endpoint =
+ <&replicator1_in_port0>;
+ };
+ };
+
+ /* funnel input ports */
+ port@1 {
+ reg = <0>;
+ funnel1_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm4_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel1_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm5_out_port>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel1_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm6_out_port>;
+ };
+ };
+
+ port@4 {
+ reg = <3>;
+ funnel1_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm7_out_port>;
+ };
+ };
+ };
+ };
+
+ funnel@0,e3cc1000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0xe3cc1000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel output port */
+ port@0 {
+ reg = <0>;
+ funnel2_out_port0: endpoint {
+ remote-endpoint =
+ <&replicator2_in_port0>;
+ };
+ };
+
+ /* funnel input ports */
+ port@1 {
+ reg = <0>;
+ funnel2_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm8_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel2_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm9_out_port>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel2_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm10_out_port>;
+ };
+ };
+
+ port@4 {
+ reg = <3>;
+ funnel2_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm11_out_port>;
+ };
+ };
+ };
+ };
+
+ funnel@0,e3d01000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0xe3d01000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel output port */
+ port@0 {
+ reg = <0>;
+ funnel3_out_port0: endpoint {
+ remote-endpoint =
+ <&replicator3_in_port0>;
+ };
+ };
+
+ /* funnel input ports */
+ port@1 {
+ reg = <0>;
+ funnel3_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm12_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel3_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm13_out_port>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel3_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm14_out_port>;
+ };
+ };
+
+ port@4 {
+ reg = <3>;
+ funnel3_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm15_out_port>;
+ };
+ };
+ };
+ };
+
+ funnel@0,e3c04000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0xe3c04000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel output port */
+ port@0 {
+ reg = <0>;
+ funnel4_out_port0: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+
+ /* funnel input ports */
+ port@1 {
+ reg = <0>;
+ funnel4_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&replicator0_out_port1>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel4_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&replicator1_out_port1>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel4_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&replicator2_out_port1>;
+ };
+ };
+
+ port@4 {
+ reg = <3>;
+ funnel4_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&replicator3_out_port1>;
+ };
+ };
+ };
+ };
+
+ ptm@0,e3c7c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3c7c000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU0>;
+ port {
+ ptm0_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port0>;
+ };
+ };
+ };
+
+ ptm@0,e3c7d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3c7d000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU1>;
+ port {
+ ptm1_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port1>;
+ };
+ };
+ };
+
+ ptm@0,e3c7e000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3c7e000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU2>;
+ port {
+ ptm2_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port2>;
+ };
+ };
+ };
+
+ ptm@0,e3c7f000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3c7f000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU3>;
+ port {
+ ptm3_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port3>;
+ };
+ };
+ };
+
+ ptm@0,e3cbc000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3cbc000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU4>;
+ port {
+ ptm4_out_port: endpoint {
+ remote-endpoint = <&funnel1_in_port0>;
+ };
+ };
+ };
+
+ ptm@0,e3cbd000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3cbd000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU5>;
+ port {
+ ptm5_out_port: endpoint {
+ remote-endpoint = <&funnel1_in_port1>;
+ };
+ };
+ };
+
+ ptm@0,e3cbe000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3cbe000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU6>;
+ port {
+ ptm6_out_port: endpoint {
+ remote-endpoint = <&funnel1_in_port2>;
+ };
+ };
+ };
+
+ ptm@0,e3cbf000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3cbf000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU7>;
+ port {
+ ptm7_out_port: endpoint {
+ remote-endpoint = <&funnel1_in_port3>;
+ };
+ };
+ };
+
+ ptm@0,e3cfc000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3cfc000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU8>;
+ port {
+ ptm8_out_port: endpoint {
+ remote-endpoint = <&funnel2_in_port0>;
+ };
+ };
+ };
+
+ ptm@0,e3cfd000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3cfd000 0 0x1000>;
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU9>;
+ port {
+ ptm9_out_port: endpoint {
+ remote-endpoint = <&funnel2_in_port1>;
+ };
+ };
+ };
+
+ ptm@0,e3cfe000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3cfe000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU10>;
+ port {
+ ptm10_out_port: endpoint {
+ remote-endpoint = <&funnel2_in_port2>;
+ };
+ };
+ };
+
+ ptm@0,e3cff000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3cff000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU11>;
+ port {
+ ptm11_out_port: endpoint {
+ remote-endpoint = <&funnel2_in_port3>;
+ };
+ };
+ };
+
+ ptm@0,e3d3c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3d3c000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU12>;
+ port {
+ ptm12_out_port: endpoint {
+ remote-endpoint = <&funnel3_in_port0>;
+ };
+ };
+ };
+
+ ptm@0,e3d3d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3d3d000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU13>;
+ port {
+ ptm13_out_port: endpoint {
+ remote-endpoint = <&funnel3_in_port1>;
+ };
+ };
+ };
+
+ ptm@0,e3d3e000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3d3e000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU14>;
+ port {
+ ptm14_out_port: endpoint {
+ remote-endpoint = <&funnel3_in_port2>;
+ };
+ };
+ };
+
+ ptm@0,e3d3f000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0xe3d3f000 0 0x1000>;
+
+ clocks = <&clk_375m>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU15>;
+ port {
+ ptm15_out_port: endpoint {
+ remote-endpoint = <&funnel3_in_port3>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 03bcff87bd27..b67ede515bcd 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -143,5 +143,12 @@
#size-cells = <0>;
status = "disabled";
};
+
+ ir_receiver: ir-receiver@c8100480 {
+ compatible= "amlogic,meson6-ir";
+ reg = <0xc8100480 0x20>;
+ interrupts = <0 15 1>;
+ status = "disabled";
+ };
};
}; /* end of / */
diff --git a/arch/arm/boot/dts/omap2430-sdp.dts b/arch/arm/boot/dts/omap2430-sdp.dts
index 05eca2e4430f..6b36ede58488 100644
--- a/arch/arm/boot/dts/omap2430-sdp.dts
+++ b/arch/arm/boot/dts/omap2430-sdp.dts
@@ -48,22 +48,22 @@
gpmc,device-width = <1>;
gpmc,cycle2cycle-samecsen = <1>;
gpmc,cycle2cycle-diffcsen = <1>;
- gpmc,cs-on-ns = <7>;
- gpmc,cs-rd-off-ns = <233>;
- gpmc,cs-wr-off-ns = <233>;
- gpmc,adv-on-ns = <22>;
- gpmc,adv-rd-off-ns = <60>;
- gpmc,adv-wr-off-ns = <60>;
- gpmc,oe-on-ns = <67>;
- gpmc,oe-off-ns = <210>;
- gpmc,we-on-ns = <67>;
- gpmc,we-off-ns = <210>;
- gpmc,rd-cycle-ns = <233>;
- gpmc,wr-cycle-ns = <233>;
- gpmc,access-ns = <233>;
- gpmc,page-burst-access-ns = <30>;
- gpmc,bus-turnaround-ns = <30>;
- gpmc,cycle2cycle-delay-ns = <30>;
+ gpmc,cs-on-ns = <6>;
+ gpmc,cs-rd-off-ns = <187>;
+ gpmc,cs-wr-off-ns = <187>;
+ gpmc,adv-on-ns = <18>;
+ gpmc,adv-rd-off-ns = <48>;
+ gpmc,adv-wr-off-ns = <48>;
+ gpmc,oe-on-ns = <60>;
+ gpmc,oe-off-ns = <169>;
+ gpmc,we-on-ns = <66>;
+ gpmc,we-off-ns = <169>;
+ gpmc,rd-cycle-ns = <187>;
+ gpmc,wr-cycle-ns = <187>;
+ gpmc,access-ns = <187>;
+ gpmc,page-burst-access-ns = <24>;
+ gpmc,bus-turnaround-ns = <24>;
+ gpmc,cycle2cycle-delay-ns = <24>;
gpmc,wait-monitoring-ns = <0>;
gpmc,clk-activation-ns = <0>;
gpmc,wr-data-mux-bus-ns = <0>;
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 06a8aec4e6ea..25f7b0a22114 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -145,6 +145,34 @@
};
};
};
+
+ etb@5401b000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0x5401b000 0x1000>;
+
+ coresight-default-sink;
+ clocks = <&emu_src_ck>;
+ clock-names = "apb_pclk";
+ port {
+ etb_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm_out>;
+ };
+ };
+ };
+
+ etm@54010000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x54010000 0x1000>;
+
+ clocks = <&emu_src_ck>;
+ clock-names = "apb_pclk";
+ port {
+ etm_out: endpoint {
+ remote-endpoint = <&etb_in>;
+ };
+ };
+ };
};
&omap3_pmx_wkup {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a9aae88b74f5..c792391ef090 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -140,6 +140,34 @@
};
};
};
+
+ etb@540000000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0x5401b000 0x1000>;
+
+ coresight-default-sink;
+ clocks = <&emu_src_ck>;
+ clock-names = "apb_pclk";
+ port {
+ etb_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm_out>;
+ };
+ };
+ };
+
+ etm@54010000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x54010000 0x1000>;
+
+ clocks = <&emu_src_ck>;
+ clock-names = "apb_pclk";
+ port {
+ etm_out: endpoint {
+ remote-endpoint = <&etb_in>;
+ };
+ };
+ };
};
&omap3_pmx_wkup {
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index e0157b0f075c..1b0f30c2c4a5 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -9,12 +9,12 @@
* licensing only applies to this file, and not this project as a
* whole.
*
- * a) This library is free software; you can redistribute it and/or
+ * a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
@@ -45,6 +45,7 @@
#include "skeleton.dtsi"
#include <dt-bindings/clock/at91.h>
+#include <dt-bindings/dma/at91.h>
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
@@ -302,6 +303,15 @@
#size-cells = <1>;
ranges;
+ dma1: dma-controller@f0004000 {
+ compatible = "atmel,sama5d4-dma";
+ reg = <0xf0004000 0x200>;
+ interrupts = <50 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ clocks = <&dma1_clk>;
+ clock-names = "dma_clk";
+ };
+
ramc0: ramc@f0010000 {
compatible = "atmel,sama5d3-ddramc";
reg = <0xf0010000 0x200>;
@@ -309,6 +319,15 @@
clock-names = "ddrck", "mpddr";
};
+ dma0: dma-controller@f0014000 {
+ compatible = "atmel,sama5d4-dma";
+ reg = <0xf0014000 0x200>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ clocks = <&dma0_clk>;
+ clock-names = "dma_clk";
+ };
+
pmc: pmc@f0018000 {
compatible = "atmel,sama5d3-pmc";
reg = <0xf0018000 0x120>;
@@ -761,6 +780,10 @@
compatible = "atmel,hsmci";
reg = <0xf8000000 0x600>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(0))>;
+ dma-names = "rxtx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3>;
status = "disabled";
@@ -776,6 +799,13 @@
compatible = "atmel,at91rm9200-spi";
reg = <0xf8010000 0x100>;
interrupts = <37 IRQ_TYPE_LEVEL_HIGH 3>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(10))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(11))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
clocks = <&spi0_clk>;
@@ -787,6 +817,13 @@
compatible = "atmel,at91sam9x5-i2c";
reg = <0xf8014000 0x4000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 6>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(2))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(3))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
#address-cells = <1>;
@@ -817,7 +854,14 @@
i2c2: i2c@f8024000 {
compatible = "atmel,at91sam9x5-i2c";
reg = <0xf8024000 0x4000>;
- interrupts = <34 4 6>;
+ interrupts = <34 IRQ_TYPE_LEVEL_HIGH 6>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(6))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(7))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
#address-cells = <1>;
@@ -830,6 +874,10 @@
compatible = "atmel,hsmci";
reg = <0xfc000000 0x600>;
interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(1))>;
+ dma-names = "rxtx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3>;
status = "disabled";
@@ -843,6 +891,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xfc008000 0x100>;
interrupts = <29 IRQ_TYPE_LEVEL_HIGH 5>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(16))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(17))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart2 &pinctrl_usart2_rts &pinctrl_usart2_cts>;
clocks = <&usart2_clk>;
@@ -854,6 +909,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xfc00c000 0x100>;
interrupts = <30 IRQ_TYPE_LEVEL_HIGH 5>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(18))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(19))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart3>;
clocks = <&usart3_clk>;
@@ -865,6 +927,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xfc010000 0x100>;
interrupts = <31 IRQ_TYPE_LEVEL_HIGH 5>;
+ dmas = <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(20))>,
+ <&dma1
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(21))>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart4>;
clocks = <&usart4_clk>;
diff --git a/arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi b/arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi
index e0799966bc25..52dba2e39c71 100644
--- a/arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi
+++ b/arch/arm/boot/dts/ste-ccu8540-pinctrl.dtsi
@@ -16,31 +16,31 @@
uart0 {
uart0_default_mux: uart0_mux {
default_mux {
- ste,function = "u0";
- ste,pins = "u0_a_1";
+ function = "u0";
+ groups = "u0_a_1";
};
};
uart0_default_mode: uart0_default {
default_cfg1 {
- ste,pins = "GPIO0", "GPIO2";
+ pins = "GPIO0", "GPIO2";
ste,config = <&in_pu>;
};
default_cfg2 {
- ste,pins = "GPIO1", "GPIO3";
+ pins = "GPIO1", "GPIO3";
ste,config = <&out_hi>;
};
};
uart0_sleep_mode: uart0_sleep {
sleep_cfg1 {
- ste,pins = "GPIO0", "GPIO2";
+ pins = "GPIO0", "GPIO2";
ste,config = <&slpm_in_pu>;
};
sleep_cfg2 {
- ste,pins = "GPIO1", "GPIO3";
+ pins = "GPIO1", "GPIO3";
ste,config = <&slpm_out_hi>;
};
};
@@ -49,29 +49,29 @@
uart2 {
uart2_default_mode: uart2_default {
default_mux {
- ste,function = "u2";
- ste,pins = "u2txrx_a_1";
+ function = "u2";
+ groups = "u2txrx_a_1";
};
default_cfg1 {
- ste,pins = "GPIO120";
+ pins = "GPIO120";
ste,config = <&in_pu>;
};
default_cfg2 {
- ste,pins = "GPIO121";
+ pins = "GPIO121";
ste,config = <&out_hi>;
};
};
uart2_sleep_mode: uart2_sleep {
sleep_cfg1 {
- ste,pins = "GPIO120";
+ pins = "GPIO120";
ste,config = <&slpm_in_pu>;
};
sleep_cfg2 {
- ste,pins = "GPIO121";
+ pins = "GPIO121";
ste,config = <&slpm_out_hi>;
};
};
@@ -80,21 +80,21 @@
i2c0 {
i2c0_default_mux: i2c_mux {
default_mux {
- ste,function = "i2c0";
- ste,pins = "i2c0_a_1";
+ function = "i2c0";
+ groups = "i2c0_a_1";
};
};
i2c0_default_mode: i2c_default {
default_cfg1 {
- ste,pins = "GPIO147", "GPIO148";
+ pins = "GPIO147", "GPIO148";
ste,config = <&in_pu>;
};
};
i2c0_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO147", "GPIO148";
+ pins = "GPIO147", "GPIO148";
ste,config = <&slpm_in_pu>;
};
};
@@ -103,21 +103,21 @@
i2c1 {
i2c1_default_mux: i2c_mux {
default_mux {
- ste,function = "i2c1";
- ste,pins = "i2c1_b_2";
+ function = "i2c1";
+ groups = "i2c1_b_2";
};
};
i2c1_default_mode: i2c_default {
default_cfg1 {
- ste,pins = "GPIO16", "GPIO17";
+ pins = "GPIO16", "GPIO17";
ste,config = <&in_pu>;
};
};
i2c1_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO16", "GPIO17";
+ pins = "GPIO16", "GPIO17";
ste,config = <&slpm_in_pu>;
};
};
@@ -126,21 +126,21 @@
i2c2 {
i2c2_default_mux: i2c_mux {
default_mux {
- ste,function = "i2c2";
- ste,pins = "i2c2_b_2";
+ function = "i2c2";
+ groups = "i2c2_b_2";
};
};
i2c2_default_mode: i2c_default {
default_cfg1 {
- ste,pins = "GPIO10", "GPIO11";
+ pins = "GPIO10", "GPIO11";
ste,config = <&in_pu>;
};
};
i2c2_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO11", "GPIO11";
+ pins = "GPIO11", "GPIO11";
ste,config = <&slpm_in_pu>;
};
};
@@ -149,21 +149,21 @@
i2c4 {
i2c4_default_mux: i2c_mux {
default_mux {
- ste,function = "i2c4";
- ste,pins = "i2c4_b_2";
+ function = "i2c4";
+ groups = "i2c4_b_2";
};
};
i2c4_default_mode: i2c_default {
default_cfg1 {
- ste,pins = "GPIO122", "GPIO123";
+ pins = "GPIO122", "GPIO123";
ste,config = <&in_pu>;
};
};
i2c4_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO122", "GPIO123";
+ pins = "GPIO122", "GPIO123";
ste,config = <&slpm_in_pu>;
};
};
@@ -172,21 +172,21 @@
i2c5 {
i2c5_default_mux: i2c_mux {
default_mux {
- ste,function = "i2c5";
- ste,pins = "i2c5_c_2";
+ function = "i2c5";
+ groups = "i2c5_c_2";
};
};
i2c5_default_mode: i2c_default {
default_cfg1 {
- ste,pins = "GPIO118", "GPIO119";
+ pins = "GPIO118", "GPIO119";
ste,config = <&in_pu>;
};
};
i2c5_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO118", "GPIO119";
+ pins = "GPIO118", "GPIO119";
ste,config = <&slpm_in_pu>;
};
};
diff --git a/arch/arm/boot/dts/ste-href-ab8500.dtsi b/arch/arm/boot/dts/ste-href-ab8500.dtsi
index 30f8601da323..9b69bce9297d 100644
--- a/arch/arm/boot/dts/ste-href-ab8500.dtsi
+++ b/arch/arm/boot/dts/ste-href-ab8500.dtsi
@@ -47,11 +47,11 @@
gpio2 {
gpio2_default_mode: gpio2_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio2_a_1";
+ function = "gpio";
+ groups = "gpio2_a_1";
};
default_cfg {
- ste,pins = "GPIO2_T9";
+ pins = "GPIO2_T9";
input-enable;
bias-pull-down;
};
@@ -60,11 +60,11 @@
gpio4 {
gpio4_default_mode: gpio4_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio4_a_1";
+ function = "gpio";
+ groups = "gpio4_a_1";
};
default_cfg {
- ste,pins = "GPIO4_W2";
+ pins = "GPIO4_W2";
input-enable;
bias-pull-down;
};
@@ -73,11 +73,11 @@
gpio10 {
gpio10_default_mode: gpio10_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio10_d_1";
+ function = "gpio";
+ groups = "gpio10_d_1";
};
default_cfg {
- ste,pins = "GPIO10_U17";
+ pins = "GPIO10_U17";
input-enable;
bias-pull-down;
};
@@ -86,11 +86,11 @@
gpio11 {
gpio11_default_mode: gpio11_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio11_d_1";
+ function = "gpio";
+ groups = "gpio11_d_1";
};
default_cfg {
- ste,pins = "GPIO11_AA18";
+ pins = "GPIO11_AA18";
input-enable;
bias-pull-down;
};
@@ -99,11 +99,11 @@
gpio12 {
gpio12_default_mode: gpio12_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio12_d_1";
+ function = "gpio";
+ groups = "gpio12_d_1";
};
default_cfg {
- ste,pins = "GPIO12_U16";
+ pins = "GPIO12_U16";
input-enable;
bias-pull-down;
};
@@ -112,11 +112,11 @@
gpio13 {
gpio13_default_mode: gpio13_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio13_d_1";
+ function = "gpio";
+ groups = "gpio13_d_1";
};
default_cfg {
- ste,pins = "GPIO13_W17";
+ pins = "GPIO13_W17";
input-enable;
bias-pull-down;
};
@@ -125,11 +125,11 @@
gpio16 {
gpio16_default_mode: gpio16_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio16_a_1";
+ function = "gpio";
+ groups = "gpio16_a_1";
};
default_cfg {
- ste,pins = "GPIO16_F15";
+ pins = "GPIO16_F15";
input-enable;
bias-pull-down;
};
@@ -138,11 +138,11 @@
gpio24 {
gpio24_default_mode: gpio24_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio24_a_1";
+ function = "gpio";
+ groups = "gpio24_a_1";
};
default_cfg {
- ste,pins = "GPIO24_T14";
+ pins = "GPIO24_T14";
input-enable;
bias-pull-down;
};
@@ -151,11 +151,11 @@
gpio25 {
gpio25_default_mode: gpio25_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio25_a_1";
+ function = "gpio";
+ groups = "gpio25_a_1";
};
default_cfg {
- ste,pins = "GPIO25_R16";
+ pins = "GPIO25_R16";
input-enable;
bias-pull-down;
};
@@ -164,11 +164,11 @@
gpio36 {
gpio36_default_mode: gpio36_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio36_a_1";
+ function = "gpio";
+ groups = "gpio36_a_1";
};
default_cfg {
- ste,pins = "GPIO36_A17";
+ pins = "GPIO36_A17";
input-enable;
bias-pull-down;
};
@@ -177,11 +177,11 @@
gpio37 {
gpio37_default_mode: gpio37_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio37_a_1";
+ function = "gpio";
+ groups = "gpio37_a_1";
};
default_cfg {
- ste,pins = "GPIO37_E15";
+ pins = "GPIO37_E15";
input-enable;
bias-pull-down;
};
@@ -190,11 +190,11 @@
gpio38 {
gpio38_default_mode: gpio38_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio38_a_1";
+ function = "gpio";
+ groups = "gpio38_a_1";
};
default_cfg {
- ste,pins = "GPIO38_C17";
+ pins = "GPIO38_C17";
input-enable;
bias-pull-down;
};
@@ -203,11 +203,11 @@
gpio39 {
gpio39_default_mode: gpio39_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio39_a_1";
+ function = "gpio";
+ groups = "gpio39_a_1";
};
default_cfg {
- ste,pins = "GPIO39_E16";
+ pins = "GPIO39_E16";
input-enable;
bias-pull-down;
};
@@ -216,11 +216,11 @@
gpio42 {
gpio42_default_mode: gpio42_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio42_a_1";
+ function = "gpio";
+ groups = "gpio42_a_1";
};
default_cfg {
- ste,pins = "GPIO42_U2";
+ pins = "GPIO42_U2";
input-enable;
bias-pull-down;
};
@@ -232,11 +232,11 @@
gpio26 {
gpio26_default_mode: gpio26_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio26_d_1";
+ function = "gpio";
+ groups = "gpio26_d_1";
};
default_cfg {
- ste,pins = "GPIO26_M16";
+ pins = "GPIO26_M16";
output-low;
};
};
@@ -244,11 +244,11 @@
gpio35 {
gpio35_default_mode: gpio35_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio35_d_1";
+ function = "gpio";
+ groups = "gpio35_d_1";
};
default_cfg {
- ste,pins = "GPIO35_W15";
+ pins = "GPIO35_W15";
output-low;
};
};
@@ -260,11 +260,11 @@
ycbcr {
ycbcr_default_mode: ycbcr_default {
default_mux {
- ste,function = "ycbcr";
- ste,pins = "ycbcr0123_d_1";
+ function = "ycbcr";
+ groups = "ycbcr0123_d_1";
};
default_cfg {
- ste,pins = "GPIO6_Y18",
+ pins = "GPIO6_Y18",
"GPIO7_AA20",
"GPIO8_W18",
"GPIO9_AA19";
@@ -277,11 +277,11 @@
pwm {
pwm_default_mode: pwm_default {
default_mux {
- ste,function = "pwmout";
- ste,pins = "pwmout1_d_1", "pwmout2_d_1";
+ function = "pwmout";
+ groups = "pwmout1_d_1", "pwmout2_d_1";
};
default_cfg {
- ste,pins = "GPIO14_F14",
+ pins = "GPIO14_F14",
"GPIO15_B17";
input-enable;
bias-pull-down;
@@ -292,11 +292,11 @@
adi1 {
adi1_default_mode: adi1_default {
default_mux {
- ste,function = "adi1";
- ste,pins = "adi1_d_1";
+ function = "adi1";
+ groups = "adi1_d_1";
};
default_cfg {
- ste,pins = "GPIO17_P5",
+ pins = "GPIO17_P5",
"GPIO18_R5",
"GPIO19_U5",
"GPIO20_T5";
@@ -309,11 +309,11 @@
usbuicc {
usbuicc_default_mode: usbuicc_default {
default_mux {
- ste,function = "usbuicc";
- ste,pins = "usbuicc_d_1";
+ function = "usbuicc";
+ groups = "usbuicc_d_1";
};
default_cfg {
- ste,pins = "GPIO21_H19",
+ pins = "GPIO21_H19",
"GPIO22_G20",
"GPIO23_G19";
input-enable;
@@ -325,13 +325,13 @@
dmic {
dmic_default_mode: dmic_default {
default_mux {
- ste,function = "dmic";
- ste,pins = "dmic12_d_1",
+ function = "dmic";
+ groups = "dmic12_d_1",
"dmic34_d_1",
"dmic56_d_1";
};
default_cfg {
- ste,pins = "GPIO27_J6",
+ pins = "GPIO27_J6",
"GPIO28_K6",
"GPIO29_G6",
"GPIO30_H6",
@@ -345,11 +345,11 @@
extcpena {
extcpena_default_mode: extcpena_default {
default_mux {
- ste,function = "extcpena";
- ste,pins = "extcpena_d_1";
+ function = "extcpena";
+ groups = "extcpena_d_1";
};
default_cfg {
- ste,pins = "GPIO34_R17";
+ pins = "GPIO34_R17";
input-enable;
bias-pull-down;
};
@@ -359,11 +359,11 @@
modsclsda {
modsclsda_default_mode: modsclsda_default {
default_mux {
- ste,function = "modsclsda";
- ste,pins = "modsclsda_d_1";
+ function = "modsclsda";
+ groups = "modsclsda_d_1";
};
default_cfg {
- ste,pins = "GPIO40_T19",
+ pins = "GPIO40_T19",
"GPIO41_U19";
input-enable;
bias-pull-down;
@@ -376,22 +376,22 @@
sysclkreq2 {
sysclkreq2_default_mode: sysclkreq2_default {
default_mux {
- ste,function = "sysclkreq";
- ste,pins = "sysclkreq2_d_1";
+ function = "sysclkreq";
+ groups = "sysclkreq2_d_1";
};
default_cfg {
- ste,pins = "GPIO1_T10";
+ pins = "GPIO1_T10";
input-enable;
bias-disable;
};
};
sysclkreq2_sleep_mode: sysclkreq2_sleep {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio1_a_1";
+ function = "gpio";
+ groups = "gpio1_a_1";
};
default_cfg {
- ste,pins = "GPIO1_T10";
+ pins = "GPIO1_T10";
input-enable;
bias-pull-down;
};
@@ -400,22 +400,22 @@
sysclkreq4 {
sysclkreq4_default_mode: sysclkreq4_default {
default_mux {
- ste,function = "sysclkreq";
- ste,pins = "sysclkreq4_d_1";
+ function = "sysclkreq";
+ groups = "sysclkreq4_d_1";
};
default_cfg {
- ste,pins = "GPIO3_U9";
+ pins = "GPIO3_U9";
input-enable;
bias-disable;
};
};
sysclkreq4_sleep_mode: sysclkreq4_sleep {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio3_a_1";
+ function = "gpio";
+ groups = "gpio3_a_1";
};
default_cfg {
- ste,pins = "GPIO3_U9";
+ pins = "GPIO3_U9";
input-enable;
bias-pull-down;
};
diff --git a/arch/arm/boot/dts/ste-href-ab8505.dtsi b/arch/arm/boot/dts/ste-href-ab8505.dtsi
index 6006d62086a2..ccf37a9df050 100644
--- a/arch/arm/boot/dts/ste-href-ab8505.dtsi
+++ b/arch/arm/boot/dts/ste-href-ab8505.dtsi
@@ -35,11 +35,11 @@
gpio2 {
gpio2_default_mode: gpio2_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio2_a_1";
+ function = "gpio";
+ groups = "gpio2_a_1";
};
default_cfg {
- ste,pins = "GPIO2_R5";
+ pins = "GPIO2_R5";
input-enable;
bias-pull-down;
};
@@ -48,11 +48,11 @@
gpio10 {
gpio10_default_mode: gpio10_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio10_d_1";
+ function = "gpio";
+ groups = "gpio10_d_1";
};
default_cfg {
- ste,pins = "GPIO10_B16";
+ pins = "GPIO10_B16";
input-enable;
bias-pull-down;
};
@@ -61,11 +61,11 @@
gpio11 {
gpio11_default_mode: gpio11_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio11_d_1";
+ function = "gpio";
+ groups = "gpio11_d_1";
};
default_cfg {
- ste,pins = "GPIO11_B17";
+ pins = "GPIO11_B17";
input-enable;
bias-pull-down;
};
@@ -74,11 +74,11 @@
gpio13 {
gpio13_default_mode: gpio13_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio13_d_1";
+ function = "gpio";
+ groups = "gpio13_d_1";
};
default_cfg {
- ste,pins = "GPIO13_D17";
+ pins = "GPIO13_D17";
input-enable;
bias-disable;
};
@@ -87,11 +87,11 @@
gpio34 {
gpio34_default_mode: gpio34_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio34_a_1";
+ function = "gpio";
+ groups = "gpio34_a_1";
};
default_cfg {
- ste,pins = "GPIO34_H14";
+ pins = "GPIO34_H14";
input-enable;
bias-pull-down;
};
@@ -100,11 +100,11 @@
gpio50 {
gpio50_default_mode: gpio50_default {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio50_d_1";
+ function = "gpio";
+ groups = "gpio50_d_1";
};
default_cfg {
- ste,pins = "GPIO50_L4";
+ pins = "GPIO50_L4";
input-enable;
bias-disable;
};
@@ -114,11 +114,11 @@
pwm {
pwm_default_mode: pwm_default {
default_mux {
- ste,function = "pwmout";
- ste,pins = "pwmout1_d_1";
+ function = "pwmout";
+ groups = "pwmout1_d_1";
};
default_cfg {
- ste,pins = "GPIO14_C16";
+ pins = "GPIO14_C16";
input-enable;
bias-pull-down;
};
@@ -128,11 +128,11 @@
adi2 {
adi2_default_mode: adi2_default {
default_mux {
- ste,function = "adi2";
- ste,pins = "adi2_d_1";
+ function = "adi2";
+ groups = "adi2_d_1";
};
default_cfg {
- ste,pins = "GPIO17_P2",
+ pins = "GPIO17_P2",
"GPIO18_N3",
"GPIO19_T1",
"GPIO20_P3";
@@ -145,11 +145,11 @@
modsclsda {
modsclsda_default_mode: modsclsda_default {
default_mux {
- ste,function = "modsclsda";
- ste,pins = "modsclsda_d_1";
+ function = "modsclsda";
+ groups = "modsclsda_d_1";
};
default_cfg {
- ste,pins = "GPIO40_J15",
+ pins = "GPIO40_J15",
"GPIO41_J14";
input-enable;
bias-pull-down;
@@ -159,11 +159,11 @@
resethw {
resethw_default_mode: resethw_default {
default_mux {
- ste,function = "resethw";
- ste,pins = "resethw_d_1";
+ function = "resethw";
+ groups = "resethw_d_1";
};
default_cfg {
- ste,pins = "GPIO52_D16";
+ pins = "GPIO52_D16";
input-enable;
bias-pull-down;
};
@@ -172,11 +172,11 @@
service {
service_default_mode: service_default {
default_mux {
- ste,function = "service";
- ste,pins = "service_d_1";
+ function = "service";
+ groups = "service_d_1";
};
default_cfg {
- ste,pins = "GPIO53_D15";
+ pins = "GPIO53_D15";
input-enable;
bias-pull-down;
};
@@ -188,22 +188,22 @@
sysclkreq2 {
sysclkreq2_default_mode: sysclkreq2_default {
default_mux {
- ste,function = "sysclkreq";
- ste,pins = "sysclkreq2_d_1";
+ function = "sysclkreq";
+ groups = "sysclkreq2_d_1";
};
default_cfg {
- ste,pins = "GPIO1_N4";
+ pins = "GPIO1_N4";
input-enable;
bias-disable;
};
};
sysclkreq2_sleep_mode: sysclkreq2_sleep {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio1_a_1";
+ function = "gpio";
+ groups = "gpio1_a_1";
};
default_cfg {
- ste,pins = "GPIO1_N4";
+ pins = "GPIO1_N4";
input-enable;
bias-pull-down;
};
@@ -212,22 +212,22 @@
sysclkreq4 {
sysclkreq4_default_mode: sysclkreq4_default {
default_mux {
- ste,function = "sysclkreq";
- ste,pins = "sysclkreq4_d_1";
+ function = "sysclkreq";
+ groups = "sysclkreq4_d_1";
};
default_cfg {
- ste,pins = "GPIO3_P5";
+ pins = "GPIO3_P5";
input-enable;
bias-disable;
};
};
sysclkreq4_sleep_mode: sysclkreq4_sleep {
default_mux {
- ste,function = "gpio";
- ste,pins = "gpio3_a_1";
+ function = "gpio";
+ groups = "gpio3_a_1";
};
default_cfg {
- ste,pins = "GPIO3_P5";
+ pins = "GPIO3_P5";
input-enable;
bias-pull-down;
};
diff --git a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
index addfcc7c2750..5c5cea232743 100644
--- a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
+++ b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
@@ -18,33 +18,33 @@
uart0 {
uart0_default_mode: uart0_default {
default_mux {
- ste,function = "u0";
- ste,pins = "u0_a_1";
+ function = "u0";
+ groups = "u0_a_1";
};
default_cfg1 {
- ste,pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
+ pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
ste,config = <&in_pu>;
};
default_cfg2 {
- ste,pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */
+ pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */
ste,config = <&out_hi>;
};
};
uart0_sleep_mode: uart0_sleep {
sleep_cfg1 {
- ste,pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
+ pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
ste,config = <&slpm_in_wkup_pdis>;
};
sleep_cfg2 {
- ste,pins = "GPIO1_AJ3"; /* RTS */
+ pins = "GPIO1_AJ3"; /* RTS */
ste,config = <&slpm_out_hi_wkup_pdis>;
};
sleep_cfg3 {
- ste,pins = "GPIO3_AH3"; /* TXD */
+ pins = "GPIO3_AH3"; /* TXD */
ste,config = <&slpm_out_wkup_pdis>;
};
};
@@ -53,28 +53,28 @@
uart1 {
uart1_default_mode: uart1_default {
default_mux {
- ste,function = "u1";
- ste,pins = "u1rxtx_a_1";
+ function = "u1";
+ groups = "u1rxtx_a_1";
};
default_cfg1 {
- ste,pins = "GPIO4_AH6"; /* RXD */
+ pins = "GPIO4_AH6"; /* RXD */
ste,config = <&in_pu>;
};
default_cfg2 {
- ste,pins = "GPIO5_AG6"; /* TXD */
+ pins = "GPIO5_AG6"; /* TXD */
ste,config = <&out_hi>;
};
};
uart1_sleep_mode: uart1_sleep {
sleep_cfg1 {
- ste,pins = "GPIO4_AH6"; /* RXD */
+ pins = "GPIO4_AH6"; /* RXD */
ste,config = <&slpm_in_wkup_pdis>;
};
sleep_cfg2 {
- ste,pins = "GPIO5_AG6"; /* TXD */
+ pins = "GPIO5_AG6"; /* TXD */
ste,config = <&slpm_out_wkup_pdis>;
};
};
@@ -83,28 +83,28 @@
uart2 {
uart2_default_mode: uart2_default {
default_mux {
- ste,function = "u2";
- ste,pins = "u2rxtx_c_1";
+ function = "u2";
+ groups = "u2rxtx_c_1";
};
default_cfg1 {
- ste,pins = "GPIO29_W2"; /* RXD */
+ pins = "GPIO29_W2"; /* RXD */
ste,config = <&in_pu>;
};
default_cfg2 {
- ste,pins = "GPIO30_W3"; /* TXD */
+ pins = "GPIO30_W3"; /* TXD */
ste,config = <&out_hi>;
};
};
uart2_sleep_mode: uart2_sleep {
sleep_cfg1 {
- ste,pins = "GPIO29_W2"; /* RXD */
+ pins = "GPIO29_W2"; /* RXD */
ste,config = <&in_wkup_pdis>;
};
sleep_cfg2 {
- ste,pins = "GPIO30_W3"; /* TXD */
+ pins = "GPIO30_W3"; /* TXD */
ste,config = <&out_wkup_pdis>;
};
};
@@ -114,18 +114,18 @@
i2c0 {
i2c0_default_mode: i2c_default {
default_mux {
- ste,function = "i2c0";
- ste,pins = "i2c0_a_1";
+ function = "i2c0";
+ groups = "i2c0_a_1";
};
default_cfg1 {
- ste,pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */
+ pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */
ste,config = <&in_pu>;
};
};
i2c0_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */
+ pins = "GPIO147_C15", "GPIO148_B16"; /* SDA/SCL */
ste,config = <&slpm_in_wkup_pdis>;
};
};
@@ -134,18 +134,18 @@
i2c1 {
i2c1_default_mode: i2c_default {
default_mux {
- ste,function = "i2c1";
- ste,pins = "i2c1_b_2";
+ function = "i2c1";
+ groups = "i2c1_b_2";
};
default_cfg1 {
- ste,pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */
+ pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */
ste,config = <&in_pu>;
};
};
i2c1_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */
+ pins = "GPIO16_AD3", "GPIO17_AD4"; /* SDA/SCL */
ste,config = <&slpm_in_wkup_pdis>;
};
};
@@ -154,18 +154,18 @@
i2c2 {
i2c2_default_mode: i2c_default {
default_mux {
- ste,function = "i2c2";
- ste,pins = "i2c2_b_2";
+ function = "i2c2";
+ groups = "i2c2_b_2";
};
default_cfg1 {
- ste,pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */
+ pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */
ste,config = <&in_pu>;
};
};
i2c2_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */
+ pins = "GPIO10_AF5", "GPIO11_AG4"; /* SDA/SCL */
ste,config = <&slpm_in_wkup_pdis>;
};
};
@@ -174,18 +174,18 @@
i2c3 {
i2c3_default_mode: i2c_default {
default_mux {
- ste,function = "i2c3";
- ste,pins = "i2c3_c_2";
+ function = "i2c3";
+ groups = "i2c3_c_2";
};
default_cfg1 {
- ste,pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */
+ pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */
ste,config = <&in_pu>;
};
};
i2c3_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */
+ pins = "GPIO229_AG7", "GPIO230_AF7"; /* SDA/SCL */
ste,config = <&slpm_in_wkup_pdis>;
};
};
@@ -198,18 +198,18 @@
i2c4 {
i2c4_default_mode: i2c_default {
default_mux {
- ste,function = "i2c4";
- ste,pins = "i2c4_b_1";
+ function = "i2c4";
+ groups = "i2c4_b_1";
};
default_cfg1 {
- ste,pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */
+ pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */
ste,config = <&in_pu>;
};
};
i2c4_sleep_mode: i2c_sleep {
sleep_cfg1 {
- ste,pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */
+ pins = "GPIO4_AH6", "GPIO5_AG6"; /* SDA/SCL */
ste,config = <&slpm_in_wkup_pdis>;
};
};
@@ -219,19 +219,19 @@
spi2 {
spi2_default_mode: spi_default {
default_mux {
- ste,function = "spi2";
- ste,pins = "spi2_oc1_2";
+ function = "spi2";
+ groups = "spi2_oc1_2";
};
default_cfg1 {
- ste,pins = "GPIO216_AG12"; /* FRM */
+ pins = "GPIO216_AG12"; /* FRM */
ste,config = <&gpio_out_hi>;
};
default_cfg2 {
- ste,pins = "GPIO218_AH11"; /* RXD */
+ pins = "GPIO218_AH11"; /* RXD */
ste,config = <&in_pd>;
};
default_cfg3 {
- ste,pins =
+ pins =
"GPIO215_AH13", /* TXD */
"GPIO217_AH12"; /* CLK */
ste,config = <&out_lo>;
@@ -245,32 +245,32 @@
* as we do not state any muxing.
*/
idle_cfg1 {
- ste,pins = "GPIO218_AH11"; /* RXD */
+ pins = "GPIO218_AH11"; /* RXD */
ste,config = <&slpm_in_pdis>;
};
idle_cfg2 {
- ste,pins = "GPIO215_AH13"; /* TXD */
+ pins = "GPIO215_AH13"; /* TXD */
ste,config = <&slpm_out_lo_pdis>;
};
idle_cfg3 {
- ste,pins = "GPIO217_AH12"; /* CLK */
+ pins = "GPIO217_AH12"; /* CLK */
ste,config = <&slpm_pdis>;
};
};
spi2_sleep_mode: spi_sleep {
sleep_cfg1 {
- ste,pins =
+ pins =
"GPIO216_AG12", /* FRM */
"GPIO218_AH11"; /* RXD */
ste,config = <&slpm_in_wkup_pdis>;
};
sleep_cfg2 {
- ste,pins = "GPIO215_AH13"; /* TXD */
+ pins = "GPIO215_AH13"; /* TXD */
ste,config = <&slpm_out_lo_wkup_pdis>;
};
sleep_cfg3 {
- ste,pins = "GPIO217_AH12"; /* CLK */
+ pins = "GPIO217_AH12"; /* CLK */
ste,config = <&slpm_wkup_pdis>;
};
};
@@ -281,26 +281,26 @@
/* This is the external SD card slot, 4 bits wide */
sdi0_default_mode: sdi0_default {
default_mux {
- ste,function = "mc0";
- ste,pins = "mc0_a_1";
+ function = "mc0";
+ groups = "mc0_a_1";
};
default_cfg1 {
- ste,pins =
+ pins =
"GPIO18_AC2", /* CMDDIR */
"GPIO19_AC1", /* DAT0DIR */
"GPIO20_AB4"; /* DAT2DIR */
ste,config = <&out_hi>;
};
default_cfg2 {
- ste,pins = "GPIO22_AA3"; /* FBCLK */
+ pins = "GPIO22_AA3"; /* FBCLK */
ste,config = <&in_nopull>;
};
default_cfg3 {
- ste,pins = "GPIO23_AA4"; /* CLK */
+ pins = "GPIO23_AA4"; /* CLK */
ste,config = <&out_lo>;
};
default_cfg4 {
- ste,pins =
+ pins =
"GPIO24_AB2", /* CMD */
"GPIO25_Y4", /* DAT0 */
"GPIO26_Y2", /* DAT1 */
@@ -312,14 +312,14 @@
sdi0_sleep_mode: sdi0_sleep {
sleep_cfg1 {
- ste,pins =
+ pins =
"GPIO18_AC2", /* CMDDIR */
"GPIO19_AC1", /* DAT0DIR */
"GPIO20_AB4"; /* DAT2DIR */
ste,config = <&slpm_out_hi_wkup_pdis>;
};
sleep_cfg2 {
- ste,pins =
+ pins =
"GPIO22_AA3", /* FBCLK */
"GPIO24_AB2", /* CMD */
"GPIO25_Y4", /* DAT0 */
@@ -329,7 +329,7 @@
ste,config = <&slpm_in_wkup_pdis>;
};
sleep_cfg3 {
- ste,pins = "GPIO23_AA4"; /* CLK */
+ pins = "GPIO23_AA4"; /* CLK */
ste,config = <&slpm_out_lo_wkup_pdis>;
};
};
@@ -339,19 +339,19 @@
/* This is the WLAN SDIO 4 bits wide */
sdi1_default_mode: sdi1_default {
default_mux {
- ste,function = "mc1";
- ste,pins = "mc1_a_1";
+ function = "mc1";
+ groups = "mc1_a_1";
};
default_cfg1 {
- ste,pins = "GPIO208_AH16"; /* CLK */
+ pins = "GPIO208_AH16"; /* CLK */
ste,config = <&out_lo>;
};
default_cfg2 {
- ste,pins = "GPIO209_AG15"; /* FBCLK */
+ pins = "GPIO209_AG15"; /* FBCLK */
ste,config = <&in_nopull>;
};
default_cfg3 {
- ste,pins =
+ pins =
"GPIO210_AJ15", /* CMD */
"GPIO211_AG14", /* DAT0 */
"GPIO212_AF13", /* DAT1 */
@@ -363,11 +363,11 @@
sdi1_sleep_mode: sdi1_sleep {
sleep_cfg1 {
- ste,pins = "GPIO208_AH16"; /* CLK */
+ pins = "GPIO208_AH16"; /* CLK */
ste,config = <&slpm_out_lo_wkup_pdis>;
};
sleep_cfg2 {
- ste,pins =
+ pins =
"GPIO209_AG15", /* FBCLK */
"GPIO210_AJ15", /* CMD */
"GPIO211_AG14", /* DAT0 */
@@ -383,19 +383,19 @@
/* This is the eMMC 8 bits wide, usually PoP eMMC */
sdi2_default_mode: sdi2_default {
default_mux {
- ste,function = "mc2";
- ste,pins = "mc2_a_1";
+ function = "mc2";
+ groups = "mc2_a_1";
};
default_cfg1 {
- ste,pins = "GPIO128_A5"; /* CLK */
+ pins = "GPIO128_A5"; /* CLK */
ste,config = <&out_lo>;
};
default_cfg2 {
- ste,pins = "GPIO130_C8"; /* FBCLK */
+ pins = "GPIO130_C8"; /* FBCLK */
ste,config = <&in_nopull>;
};
default_cfg3 {
- ste,pins =
+ pins =
"GPIO129_B4", /* CMD */
"GPIO131_A12", /* DAT0 */
"GPIO132_C10", /* DAT1 */
@@ -411,17 +411,17 @@
sdi2_sleep_mode: sdi2_sleep {
sleep_cfg1 {
- ste,pins = "GPIO128_A5"; /* CLK */
+ pins = "GPIO128_A5"; /* CLK */
ste,config = <&out_lo_wkup_pdis>;
};
sleep_cfg2 {
- ste,pins =
+ pins =
"GPIO130_C8", /* FBCLK */
"GPIO129_B4"; /* CMD */
ste,config = <&in_wkup_pdis_en>;
};
sleep_cfg3 {
- ste,pins =
+ pins =
"GPIO131_A12", /* DAT0 */
"GPIO132_C10", /* DAT1 */
"GPIO133_B10", /* DAT2 */
@@ -439,19 +439,19 @@
/* This is the eMMC 8 bits wide, usually PCB-mounted eMMC */
sdi4_default_mode: sdi4_default {
default_mux {
- ste,function = "mc4";
- ste,pins = "mc4_a_1";
+ function = "mc4";
+ groups = "mc4_a_1";
};
default_cfg1 {
- ste,pins = "GPIO203_AE23"; /* CLK */
+ pins = "GPIO203_AE23"; /* CLK */
ste,config = <&out_lo>;
};
default_cfg2 {
- ste,pins = "GPIO202_AF25"; /* FBCLK */
+ pins = "GPIO202_AF25"; /* FBCLK */
ste,config = <&in_nopull>;
};
default_cfg3 {
- ste,pins =
+ pins =
"GPIO201_AF24", /* CMD */
"GPIO200_AH26", /* DAT0 */
"GPIO199_AH23", /* DAT1 */
@@ -467,11 +467,11 @@
sdi4_sleep_mode: sdi4_sleep {
sleep_cfg1 {
- ste,pins = "GPIO203_AE23"; /* CLK */
+ pins = "GPIO203_AE23"; /* CLK */
ste,config = <&out_lo_wkup_pdis>;
};
sleep_cfg2 {
- ste,pins =
+ pins =
"GPIO202_AF25", /* FBCLK */
"GPIO201_AF24", /* CMD */
"GPIO200_AH26", /* DAT0 */
@@ -494,11 +494,11 @@
msp0 {
msp0_default_mode: msp0_default {
default_msp0_mux {
- ste,function = "msp0";
- ste,pins = "msp0txrx_a_1", "msp0tfstck_a_1";
+ function = "msp0";
+ groups = "msp0txrx_a_1", "msp0tfstck_a_1";
};
default_msp0_cfg {
- ste,pins =
+ pins =
"GPIO12_AC4", /* TXD */
"GPIO15_AC3", /* RXD */
"GPIO13_AF3", /* TFS */
@@ -511,15 +511,15 @@
msp1 {
msp1_default_mode: msp1_default {
default_mux {
- ste,function = "msp1";
- ste,pins = "msp1txrx_a_1", "msp1_a_1";
+ function = "msp1";
+ groups = "msp1txrx_a_1", "msp1_a_1";
};
default_cfg1 {
- ste,pins = "GPIO33_AF2";
+ pins = "GPIO33_AF2";
ste,config = <&out_lo>;
};
default_cfg2 {
- ste,pins =
+ pins =
"GPIO34_AE1",
"GPIO35_AE2",
"GPIO36_AG2";
@@ -533,18 +533,18 @@
msp2_default_mode: msp2_default {
/* MSP2 usually used for HDMI audio */
default_mux {
- ste,function = "msp2";
- ste,pins = "msp2_a_1";
+ function = "msp2";
+ groups = "msp2_a_1";
};
default_cfg1 {
- ste,pins =
+ pins =
"GPIO193_AH27", /* TXD */
"GPIO194_AF27", /* TCK */
"GPIO195_AG28"; /* TFS */
ste,config = <&in_pd>;
};
default_cfg2 {
- ste,pins = "GPIO196_AG26"; /* RXD */
+ pins = "GPIO196_AG26"; /* RXD */
ste,config = <&out_lo>;
};
};
@@ -554,11 +554,11 @@
musb {
musb_default_mode: musb_default {
default_mux {
- ste,function = "usb";
- ste,pins = "usb_a_1";
+ function = "usb";
+ groups = "usb_a_1";
};
default_cfg1 {
- ste,pins =
+ pins =
"GPIO256_AF28", /* NXT */
"GPIO258_AD29", /* XCLK */
"GPIO259_AC29", /* DIR */
@@ -573,25 +573,25 @@
ste,config = <&in_nopull>;
};
default_cfg2 {
- ste,pins = "GPIO257_AE29"; /* STP */
+ pins = "GPIO257_AE29"; /* STP */
ste,config = <&out_hi>;
};
};
musb_sleep_mode: musb_sleep {
sleep_cfg1 {
- ste,pins =
+ pins =
"GPIO256_AF28", /* NXT */
"GPIO258_AD29", /* XCLK */
"GPIO259_AC29"; /* DIR */
ste,config = <&slpm_wkup_pdis_en>;
};
sleep_cfg2 {
- ste,pins = "GPIO257_AE29"; /* STP */
+ pins = "GPIO257_AE29"; /* STP */
ste,config = <&slpm_out_hi_wkup_pdis>;
};
sleep_cfg3 {
- ste,pins =
+ pins =
"GPIO260_AD28", /* DAT7 */
"GPIO261_AD26", /* DAT6 */
"GPIO262_AE26", /* DAT5 */
@@ -609,8 +609,8 @@
lcd_default_mode: lcd_default {
default_mux {
/* Mux in VSI0 and all the data lines */
- ste,function = "lcd";
- ste,pins =
+ function = "lcd";
+ groups =
"lcdvsi0_a_1", /* VSI0 for LCD */
"lcd_d0_d7_a_1", /* Data lines */
"lcd_d8_d11_a_1", /* TV-out */
@@ -618,7 +618,7 @@
"lcdvsi1_a_1"; /* VSI1 for HDMI */
};
default_cfg1 {
- ste,pins =
+ pins =
"GPIO68_E1", /* VSI0 */
"GPIO69_E2"; /* VSI1 */
ste,config = <&in_pu>;
@@ -626,7 +626,7 @@
};
lcd_sleep_mode: lcd_sleep {
sleep_cfg1 {
- ste,pins = "GPIO69_E2"; /* VSI1 */
+ pins = "GPIO69_E2"; /* VSI1 */
ste,config = <&slpm_in_wkup_pdis>;
};
};
@@ -636,11 +636,11 @@
/* SKE keys on position 2 in an 8x8 matrix */
ske_kpa2_default_mode: ske_kpa2_default {
default_mux {
- ste,function = "kp";
- ste,pins = "kp_a_2";
+ function = "kp";
+ groups = "kp_a_2";
};
default_cfg1 {
- ste,pins =
+ pins =
"GPIO153_B17", /* I7 */
"GPIO154_C16", /* I6 */
"GPIO155_C19", /* I5 */
@@ -652,7 +652,7 @@
ste,config = <&in_pd>;
};
default_cfg2 {
- ste,pins =
+ pins =
"GPIO157_A18", /* O7 */
"GPIO158_C18", /* O6 */
"GPIO159_B19", /* O5 */
@@ -666,7 +666,7 @@
};
ske_kpa2_sleep_mode: ske_kpa2_sleep {
sleep_cfg1 {
- ste,pins =
+ pins =
"GPIO153_B17", /* I7 */
"GPIO154_C16", /* I6 */
"GPIO155_C19", /* I5 */
@@ -678,7 +678,7 @@
ste,config = <&slpm_in_pu_wkup_pdis_en>;
};
sleep_cfg2 {
- ste,pins =
+ pins =
"GPIO157_A18", /* O7 */
"GPIO158_C18", /* O6 */
"GPIO159_B19", /* O5 */
@@ -696,11 +696,11 @@
*/
ske_kpaoc1_default_mode: ske_kpaoc1_default {
default_mux {
- ste,function = "kp";
- ste,pins = "kp_a_1", "kp_oc1_1";
+ function = "kp";
+ groups = "kp_a_1", "kp_oc1_1";
};
default_cfg1 {
- ste,pins =
+ pins =
"GPIO91_B6", /* KP_O0 */
"GPIO90_A3", /* KP_O1 */
"GPIO87_B3", /* KP_O2 */
@@ -710,7 +710,7 @@
ste,config = <&out_lo>;
};
default_cfg2 {
- ste,pins =
+ pins =
"GPIO93_B7", /* KP_I0 */
"GPIO92_D6", /* KP_I1 */
"GPIO89_E6", /* KP_I2 */
@@ -729,13 +729,13 @@
* These are plain GPIO pins used by WLAN
*/
default_cfg1 {
- ste,pins =
+ pins =
"GPIO226_AF8", /* WLAN_PMU_EN */
"GPIO85_D5"; /* WLAN_ENA */
ste,config = <&gpio_out_lo>;
};
default_cfg2 {
- ste,pins = "GPIO4_AH6"; /* WLAN_IRQ on UART1 */
+ pins = "GPIO4_AH6"; /* WLAN_IRQ on UART1 */
ste,config = <&gpio_in_pu>;
};
};
diff --git a/arch/arm/boot/dts/ste-href-stuib.dtsi b/arch/arm/boot/dts/ste-href-stuib.dtsi
index 84d7c5d883f2..7d4f8184c522 100644
--- a/arch/arm/boot/dts/ste-href-stuib.dtsi
+++ b/arch/arm/boot/dts/ste-href-stuib.dtsi
@@ -103,7 +103,7 @@
prox {
prox_stuib_mode: prox_stuib {
stuib_cfg {
- ste,pins = "GPIO217_AH12";
+ pins = "GPIO217_AH12";
ste,config = <&gpio_in_pu>;
};
};
@@ -111,7 +111,7 @@
hall {
hall_stuib_mode: stuib_tvk {
stuib_cfg {
- ste,pins = "GPIO145_C13";
+ pins = "GPIO145_C13";
ste,config = <&gpio_in_pu>;
};
};
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
index 18b65d1b14f2..062c6aae3afa 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
@@ -130,7 +130,7 @@
tc35893 {
tc35893_tvk_mode: tc35893_tvk {
tvk_cfg {
- ste,pins = "GPIO218_AH11";
+ pins = "GPIO218_AH11";
ste,config = <&gpio_in_pu>;
};
};
@@ -138,7 +138,7 @@
prox {
prox_tvk_mode: prox_tvk {
tvk_cfg {
- ste,pins = "GPIO217_AH12";
+ pins = "GPIO217_AH12";
ste,config = <&gpio_in_pu>;
};
};
@@ -146,7 +146,7 @@
hall {
hall_tvk_mode: hall_tvk {
tvk_cfg {
- ste,pins = "GPIO145_C13";
+ pins = "GPIO145_C13";
ste,config = <&gpio_in_pu>;
};
};
@@ -155,7 +155,7 @@
accel_tvk_mode: accel_tvk {
/* Accelerometer interrupt lines 1 & 2 */
tvk_cfg {
- ste,pins = "GPIO82_C1", "GPIO83_D3";
+ pins = "GPIO82_C1", "GPIO83_D3";
ste,config = <&gpio_in_pu>;
};
};
@@ -164,11 +164,11 @@
magneto_tvk_mode: magneto_tvk {
/* Magnetometer uses GPIO 31 and 32, pull these up/down respectively */
tvk_cfg1 {
- ste,pins = "GPIO31_V3";
+ pins = "GPIO31_V3";
ste,config = <&gpio_in_pu>;
};
tvk_cfg2 {
- ste,pins = "GPIO32_V2";
+ pins = "GPIO32_V2";
ste,config = <&gpio_in_pd>;
};
};
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index abc762e24fcb..7f3975b58d16 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -79,11 +79,11 @@
ssp0 {
ssp0_hrefprev60_mode: ssp0_hrefprev60_default {
hrefprev60_mux {
- ste,function = "ssp0";
- ste,pins = "ssp0_a_1";
+ function = "ssp0";
+ groups = "ssp0_a_1";
};
hrefprev60_cfg1 {
- ste,pins = "GPIO145_C13"; /* RXD */
+ pins = "GPIO145_C13"; /* RXD */
ste,config = <&in_pd>;
};
@@ -93,11 +93,11 @@
/* This additional pin needed on early MOP500 and HREFs previous to v60 */
sdi0_default_mode: sdi0_default {
hrefprev60_mux {
- ste,function = "mc0";
- ste,pins = "mc0dat31dir_a_1";
+ function = "mc0";
+ groups = "mc0dat31dir_a_1";
};
hrefprev60_cfg1 {
- ste,pins = "GPIO21_AB3"; /* DAT31DIR */
+ pins = "GPIO21_AB3"; /* DAT31DIR */
ste,config = <&out_hi>;
};
@@ -106,7 +106,7 @@
tc35892 {
tc35892_hrefprev60_mode: tc35892_hrefprev60 {
hrefprev60_cfg {
- ste,pins = "GPIO217_AH12";
+ pins = "GPIO217_AH12";
ste,config = <&gpio_in_pu>;
};
};
@@ -114,11 +114,11 @@
ipgpio {
ipgpio_hrefprev60_mode: ipgpio_hrefprev60 {
hrefprev60_mux {
- ste,function = "ipgpio";
- ste,pins = "ipgpio0_c_1", "ipgpio1_c_1";
+ function = "ipgpio";
+ groups = "ipgpio0_c_1", "ipgpio1_c_1";
};
hrefprev60_cfg1 {
- ste,pins = "GPIO6_AF6", "GPIO7_AG5";
+ pins = "GPIO6_AF6", "GPIO7_AG5";
ste,config = <&in_pu>;
};
};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index bcc1f0c37f49..a4bc9e77d640 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -49,7 +49,7 @@
/* SD card detect GPIO pin, extend default state */
sdi0_default_mode: sdi0_default {
default_hrefv60_cfg1 {
- ste,pins = "GPIO95_E8";
+ pins = "GPIO95_E8";
ste,config = <&gpio_in_pu>;
};
};
@@ -64,19 +64,19 @@
*/
ipgpio_hrefv60_mode: ipgpio_hrefv60 {
hrefv60_mux {
- ste,function = "ipgpio";
- ste,pins = "ipgpio0_c_1", "ipgpio1_c_1", "ipgpio4_c_1";
+ function = "ipgpio";
+ groups = "ipgpio0_c_1", "ipgpio1_c_1", "ipgpio4_c_1";
};
hrefv60_cfg1 {
- ste,pins = "GPIO6_AF6", "GPIO7_AG5";
+ pins = "GPIO6_AF6", "GPIO7_AG5";
ste,config = <&in_pu>;
};
hrefv60_cfg2 {
- ste,pins = "GPIO21_AB3";
+ pins = "GPIO21_AB3";
ste,config = <&gpio_out_lo>;
};
hrefv60_cfg3 {
- ste,pins = "GPIO64_F3";
+ pins = "GPIO64_F3";
ste,config = <&out_lo>;
};
};
@@ -89,7 +89,7 @@
*/
etm_hrefv60_mode: etm_hrefv60 {
hrefv60_cfg1 {
- ste,pins =
+ pins =
"GPIO70_G5",
"GPIO71_G4",
"GPIO72_H4",
@@ -103,11 +103,11 @@
nahj_hrefv60_mode: nahj_hrefv60 {
/* NAHJ CTRL on GPIO76 to low, CTRL_INV on GPIO216 to high */
hrefv60_cfg1 {
- ste,pins = "GPIO76_J2";
+ pins = "GPIO76_J2";
ste,config = <&gpio_out_lo>;
};
hrefv60_cfg2 {
- ste,pins = "GPIO216_AG12";
+ pins = "GPIO216_AG12";
ste,config = <&gpio_out_hi>;
};
};
@@ -116,13 +116,13 @@
nfc_hrefv60_mode: nfc_hrefv60 {
/* NFC ENA and RESET to low, pulldown IRQ line */
hrefv60_cfg1 {
- ste,pins =
+ pins =
"GPIO77_H1", /* NFC_ENA */
"GPIO142_C11"; /* NFC_RESET */
ste,config = <&gpio_out_lo>;
};
hrefv60_cfg2 {
- ste,pins = "GPIO144_B13"; /* NFC_IRQ */
+ pins = "GPIO144_B13"; /* NFC_IRQ */
ste,config = <&gpio_in_pd>;
};
};
@@ -130,11 +130,11 @@
force {
force_hrefv60_mode: force_hrefv60 {
hrefv60_cfg1 {
- ste,pins = "GPIO91_B6"; /* FORCE_SENSING_INT */
+ pins = "GPIO91_B6"; /* FORCE_SENSING_INT */
ste,config = <&gpio_in_pu>;
};
hrefv60_cfg2 {
- ste,pins =
+ pins =
"GPIO92_D6", /* FORCE_SENSING_RST */
"GPIO97_D9"; /* FORCE_SENSING_WU */
ste,config = <&gpio_out_lo>;
@@ -144,7 +144,7 @@
dipro {
dipro_hrefv60_mode: dipro_hrefv60 {
hrefv60_cfg1 {
- ste,pins = "GPIO139_C9"; /* DIPRO_INT */
+ pins = "GPIO139_C9"; /* DIPRO_INT */
ste,config = <&gpio_in_pu>;
};
};
@@ -153,7 +153,7 @@
vaudio_hf_hrefv60_mode: vaudio_hf_hrefv60 {
/* Audio Amplifier HF enable GPIO */
hrefv60_cfg1 {
- ste,pins = "GPIO149_B14"; /* VAUDIO_HF_EN, enable MAX8968 */
+ pins = "GPIO149_B14"; /* VAUDIO_HF_EN, enable MAX8968 */
ste,config = <&gpio_out_hi>;
};
};
@@ -165,7 +165,7 @@
* pull low to reset state
*/
hrefv60_cfg1 {
- ste,pins = "GPIO171_D23"; /* GBF_ENA_RESET */
+ pins = "GPIO171_D23"; /* GBF_ENA_RESET */
ste,config = <&gpio_out_lo>;
};
};
@@ -174,7 +174,7 @@
hdtv_hrefv60_mode: hdtv_hrefv60 {
/* MSP : HDTV INTERFACE GPIO line */
hrefv60_cfg1 {
- ste,pins = "GPIO192_AJ27";
+ pins = "GPIO192_AJ27";
ste,config = <&gpio_in_pd>;
};
};
@@ -187,11 +187,11 @@
* reset signals low.
*/
hrefv60_cfg1 {
- ste,pins = "GPIO143_D12", "GPIO146_D13";
+ pins = "GPIO143_D12", "GPIO146_D13";
ste,config = <&gpio_out_lo>;
};
hrefv60_cfg2 {
- ste,pins = "GPIO67_G2";
+ pins = "GPIO67_G2";
ste,config = <&gpio_in_pu>;
};
};
@@ -204,11 +204,11 @@
* Drive DISP1 reset high (not reset), driver DISP2 reset low (reset)
*/
hrefv60_cfg1 {
- ste,pins ="GPIO65_F1";
+ pins ="GPIO65_F1";
ste,config = <&gpio_out_hi>;
};
hrefv60_cfg2 {
- ste,pins ="GPIO66_G3";
+ pins ="GPIO66_G3";
ste,config = <&gpio_out_lo>;
};
};
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index e411ff7769fe..85d3b95dfdba 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -30,12 +30,12 @@
cd_default_mode: cd_default {
cd_default_cfg1 {
/* CD input GPIO */
- ste,pins = "GPIO111_H21";
+ pins = "GPIO111_H21";
ste,input = <0>;
};
cd_default_cfg2 {
/* CD GPIO biasing */
- ste,pins = "GPIO112_J21";
+ pins = "GPIO112_J21";
ste,output = <0>;
};
};
@@ -43,7 +43,7 @@
gpioi2c {
gpioi2c_default_mode: gpioi2c_default {
gpioi2c_default_cfg {
- ste,pins = "GPIO73_C21", "GPIO74_C20";
+ pins = "GPIO73_C21", "GPIO74_C20";
ste,input = <0>;
};
};
@@ -51,7 +51,7 @@
user-led {
user_led_default_mode: user_led_default {
user_led_default_cfg {
- ste,pins = "GPIO2_C5";
+ pins = "GPIO2_C5";
ste,output = <1>;
};
};
@@ -59,7 +59,7 @@
user-button {
user_button_default_mode: user_button_default {
user_button_default_cfg {
- ste,pins = "GPIO3_A4";
+ pins = "GPIO3_A4";
ste,input = <0>;
};
};
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index f435ff20aefe..f182f6538e90 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -100,41 +100,41 @@
uart0 {
uart0_default_mux: uart0_mux {
u0_default_mux {
- ste,function = "u0";
- ste,pins = "u0_a_1";
+ function = "u0";
+ groups = "u0_a_1";
};
};
};
uart1 {
uart1_default_mux: uart1_mux {
u1_default_mux {
- ste,function = "u1";
- ste,pins = "u1_a_1";
+ function = "u1";
+ groups = "u1_a_1";
};
};
};
mmcsd {
mmcsd_default_mux: mmcsd_mux {
mmcsd_default_mux {
- ste,function = "mmcsd";
- ste,pins = "mmcsd_a_1", "mmcsd_b_1";
+ function = "mmcsd";
+ groups = "mmcsd_a_1", "mmcsd_b_1";
};
};
mmcsd_default_mode: mmcsd_default {
mmcsd_default_cfg1 {
/* MCCLK */
- ste,pins = "GPIO8_B10";
+ pins = "GPIO8_B10";
ste,output = <0>;
};
mmcsd_default_cfg2 {
/* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
- ste,pins = "GPIO10_C11", "GPIO15_A12",
+ pins = "GPIO10_C11", "GPIO15_A12",
"GPIO16_C13", "GPIO23_D15";
ste,output = <1>;
};
mmcsd_default_cfg3 {
/* MCCMD, MCDAT3-0, MCMSFBCLK */
- ste,pins = "GPIO9_A10", "GPIO11_B11",
+ pins = "GPIO9_A10", "GPIO11_B11",
"GPIO12_A11", "GPIO13_C12",
"GPIO14_B12", "GPIO24_C15";
ste,input = <1>;
@@ -144,13 +144,13 @@
i2c0 {
i2c0_default_mux: i2c0_mux {
i2c0_default_mux {
- ste,function = "i2c0";
- ste,pins = "i2c0_a_1";
+ function = "i2c0";
+ groups = "i2c0_a_1";
};
};
i2c0_default_mode: i2c0_default {
i2c0_default_cfg {
- ste,pins = "GPIO62_D3", "GPIO63_D2";
+ pins = "GPIO62_D3", "GPIO63_D2";
ste,input = <0>;
};
};
@@ -158,13 +158,13 @@
i2c1 {
i2c1_default_mux: i2c1_mux {
i2c1_default_mux {
- ste,function = "i2c1";
- ste,pins = "i2c1_a_1";
+ function = "i2c1";
+ groups = "i2c1_a_1";
};
};
i2c1_default_mode: i2c1_default {
i2c1_default_cfg {
- ste,pins = "GPIO53_L4", "GPIO54_L3";
+ pins = "GPIO53_L4", "GPIO54_L3";
ste,input = <0>;
};
};
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 3e97a669f15e..206826a855c0 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -404,17 +404,17 @@
*/
eth_snowball_mode: eth_snowball {
snowball_mux {
- ste,function = "sm";
- ste,pins = "sm_b_1";
+ function = "sm";
+ groups = "sm_b_1";
};
/* LAN IRQ pin */
snowball_cfg1 {
- ste,pins = "GPIO140_B11";
+ pins = "GPIO140_B11";
ste,config = <&in_nopull>;
};
/* LAN reset pin */
snowball_cfg2 {
- ste,pins = "GPIO141_C12";
+ pins = "GPIO141_C12";
ste,config = <&gpio_out_hi>;
};
@@ -423,11 +423,11 @@
sdi0 {
sdi0_default_mode: sdi0_default {
snowball_mux {
- ste,function = "mc0";
- ste,pins = "mc0dat31dir_a_1";
+ function = "mc0";
+ groups = "mc0dat31dir_a_1";
};
snowball_cfg1 {
- ste,pins = "GPIO21_AB3"; /* DAT31DIR */
+ pins = "GPIO21_AB3"; /* DAT31DIR */
ste,config = <&out_hi>;
};
@@ -436,19 +436,19 @@
ssp0 {
ssp0_snowball_mode: ssp0_snowball_default {
snowball_mux {
- ste,function = "ssp0";
- ste,pins = "ssp0_a_1";
+ function = "ssp0";
+ groups = "ssp0_a_1";
};
snowball_cfg1 {
- ste,pins = "GPIO144_B13"; /* FRM */
+ pins = "GPIO144_B13"; /* FRM */
ste,config = <&gpio_out_hi>;
};
snowball_cfg2 {
- ste,pins = "GPIO145_C13"; /* RXD */
+ pins = "GPIO145_C13"; /* RXD */
ste,config = <&in_pd>;
};
snowball_cfg3 {
- ste,pins =
+ pins =
"GPIO146_D13", /* TXD */
"GPIO143_D12"; /* CLK */
ste,config = <&out_lo>;
@@ -459,7 +459,7 @@
gpio_led {
gpioled_snowball_mode: gpioled_default {
snowball_cfg1 {
- ste,pins = "GPIO142_C11";
+ pins = "GPIO142_C11";
ste,config = <&gpio_out_hi>;
};
@@ -469,7 +469,7 @@
accel_snowball_mode: accel_snowball {
/* Accelerometer lines */
snowball_cfg1 {
- ste,pins =
+ pins =
"GPIO163_C20", /* ACCEL_IRQ1 */
"GPIO164_B21"; /* ACCEL_IRQ2 */
ste,config = <&gpio_in_pu>;
@@ -479,7 +479,7 @@
magnetometer {
magneto_snowball_mode: magneto_snowball {
snowball_cfg1 {
- ste,pins = "GPIO165_C21"; /* MAG_DRDY */
+ pins = "GPIO165_C21"; /* MAG_DRDY */
ste,config = <&gpio_in_pu>;
};
};
@@ -491,7 +491,7 @@
* pull low to reset state
*/
snowball_cfg1 {
- ste,pins = "GPIO171_D23"; /* GBF_ENA_RESET */
+ pins = "GPIO171_D23"; /* GBF_ENA_RESET */
ste,config = <&gpio_out_lo>;
};
};
@@ -503,13 +503,13 @@
* These are plain GPIO pins used by WLAN
*/
snowball_cfg1 {
- ste,pins =
+ pins =
"GPIO161_D21", /* WLAN_PMU_EN */
"GPIO215_AH13"; /* WLAN_ENA */
ste,config = <&gpio_out_lo>;
};
snowball_cfg2 {
- ste,pins = "GPIO216_AG12"; /* WLAN_IRQ */
+ pins = "GPIO216_AG12"; /* WLAN_IRQ */
ste,config = <&gpio_in_pu>;
};
};
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 222f3b3f4dd5..4296b5398bf5 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -1,5 +1,6 @@
#include <dt-bindings/clock/tegra114-car.h>
#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/memory/tegra114-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -50,6 +51,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DC>;
+
nvidia,head = <0>;
rgb {
@@ -67,6 +70,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DCB>;
+
nvidia,head = <1>;
rgb {
@@ -498,15 +503,15 @@
reset-names = "fuse";
};
- iommu@70019010 {
- compatible = "nvidia,tegra114-smmu", "nvidia,tegra30-smmu";
- reg = <0x70019010 0x02c
- 0x700191f0 0x010
- 0x70019228 0x074>;
- nvidia,#asids = <4>;
- dma-window = <0 0x40000000>;
- nvidia,swgroups = <0x18659fe>;
- nvidia,ahb = <&ahb>;
+ mc: memory-controller@70019000 {
+ compatible = "nvidia,tegra114-mc";
+ reg = <0x70019000 0x1000>;
+ clocks = <&tegra_car TEGRA114_CLK_MC>;
+ clock-names = "mc";
+
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+
+ #iommu-cells = <1>;
};
ahub@70080000 {
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 51b373ff1065..4eb540be368f 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -1942,4 +1942,48 @@
<&tegra_car TEGRA124_CLK_EXTERN1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
};
+
+ thermal-zones {
+ cpu {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /* There are currently no cooling maps because there are no cooling devices */
+ };
+ };
+
+ mem {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /* There are currently no cooling maps because there are no cooling devices */
+ };
+ };
+
+ gpu {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /* There are currently no cooling maps because there are no cooling devices */
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index df2b06b29985..4be06c6ea0c8 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -1,8 +1,10 @@
#include <dt-bindings/clock/tegra124-car.h>
#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/memory/tegra124-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/tegra124-soctherm.h>
#include "skeleton.dtsi"
@@ -102,6 +104,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DC>;
+
nvidia,head = <0>;
};
@@ -115,6 +119,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DCB>;
+
nvidia,head = <1>;
};
@@ -275,7 +281,8 @@
pinmux: pinmux@0,70000868 {
compatible = "nvidia,tegra124-pinmux";
reg = <0x0 0x70000868 0x0 0x164>, /* Pad control registers */
- <0x0 0x70003000 0x0 0x434>; /* Mux registers */
+ <0x0 0x70003000 0x0 0x434>, /* Mux registers */
+ <0x0 0x70000820 0x0 0x008>; /* MIPI pad control */
};
/*
@@ -551,6 +558,17 @@
reset-names = "fuse";
};
+ mc: memory-controller@0,70019000 {
+ compatible = "nvidia,tegra124-mc";
+ reg = <0x0 0x70019000 0x0 0x1000>;
+ clocks = <&tegra_car TEGRA124_CLK_MC>;
+ clock-names = "mc";
+
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+
+ #iommu-cells = <1>;
+ };
+
sata@0,70020000 {
compatible = "nvidia,tegra124-ahci";
@@ -640,6 +658,18 @@
status = "disabled";
};
+ soctherm: thermal-sensor@0,700e2000 {
+ compatible = "nvidia,tegra124-soctherm";
+ reg = <0x0 0x700e2000 0x0 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
+ <&tegra_car TEGRA124_CLK_SOC_THERM>;
+ clock-names = "tsensor", "soctherm";
+ resets = <&tegra_car 78>;
+ reset-names = "soctherm";
+ #thermal-sensor-cells = <1>;
+ };
+
ahub@0,70300000 {
compatible = "nvidia,tegra124-ahub";
reg = <0x0 0x70300000 0x0 0x200>,
@@ -881,6 +911,40 @@
};
};
+ thermal-zones {
+ cpu {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+ };
+
+ mem {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
+ };
+
+ gpu {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
+ };
+
+ pllx {
+ polling-delay-passive = <1000>;
+ polling-delay = <1000>;
+
+ thermal-sensors =
+ <&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
+ };
+ };
+
timer {
compatible = "arm,armv7-timer";
interrupts = <GIC_PPI 13
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index b270b9e3d455..99475f6e76a3 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -1,5 +1,6 @@
#include <dt-bindings/clock/tegra30-car.h>
#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/memory/tegra30-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -166,6 +167,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DC>;
+
nvidia,head = <0>;
rgb {
@@ -183,6 +186,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ iommus = <&mc TEGRA_SWGROUP_DCB>;
+
nvidia,head = <1>;
rgb {
@@ -615,23 +620,15 @@
clock-names = "pclk", "clk32k_in";
};
- memory-controller@7000f000 {
+ mc: memory-controller@7000f000 {
compatible = "nvidia,tegra30-mc";
- reg = <0x7000f000 0x010
- 0x7000f03c 0x1b4
- 0x7000f200 0x028
- 0x7000f284 0x17c>;
+ reg = <0x7000f000 0x400>;
+ clocks = <&tegra_car TEGRA30_CLK_MC>;
+ clock-names = "mc";
+
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- };
- iommu@7000f010 {
- compatible = "nvidia,tegra30-smmu";
- reg = <0x7000f010 0x02c
- 0x7000f1f0 0x010
- 0x7000f228 0x05c>;
- nvidia,#asids = <4>; /* # of ASIDs */
- dma-window = <0 0x40000000>; /* IOVA start & length */
- nvidia,ahb = <&ahb>;
+ #iommu-cells = <1>;
};
fuse@7000f800 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 322fd1519b09..33920df03640 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -358,6 +358,205 @@
};
};
+ etb@0,20010000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0 0x20010000 0 0x1000>;
+
+ coresight-default-sink;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ etb_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port0>;
+ };
+ };
+ };
+
+ tpiu@0,20030000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0 0x20030000 0 0x1000>;
+
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ tpiu_in_port: endpoint@0 {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port1>;
+ };
+ };
+ };
+
+ replicator {
+ /* non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-replicator";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&etb_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel_out_port0>;
+ };
+ };
+ };
+ };
+
+ funnel@0,20040000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x20040000 0 0x1000>;
+
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel output port */
+ port@0 {
+ reg = <0>;
+ funnel_out_port0: endpoint {
+ remote-endpoint =
+ <&replicator_in_port0>;
+ };
+ };
+
+ /* funnel input ports */
+ port@1 {
+ reg = <0>;
+ funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm0_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm1_out_port>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out_port>;
+ };
+ };
+
+ /* Input port #3 is for ITM, not supported here */
+
+ port@4 {
+ reg = <4>;
+ funnel_in_port4: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out_port>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ funnel_in_port5: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm2_out_port>;
+ };
+ };
+ };
+ };
+
+ ptm@0,2201c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0x2201c000 0 0x1000>;
+
+ cpu = <&cpu0>;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ ptm0_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port0>;
+ };
+ };
+ };
+
+ ptm@0,2201d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0x2201d000 0 0x1000>;
+
+ cpu = <&cpu1>;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ ptm1_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port1>;
+ };
+ };
+ };
+
+ etm@0,2203c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0x2203c000 0 0x1000>;
+
+ cpu = <&cpu2>;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ etm0_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port2>;
+ };
+ };
+ };
+
+ etm@0,2203d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0x2203d000 0 0x1000>;
+
+ cpu = <&cpu3>;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ etm1_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port4>;
+ };
+ };
+ };
+
+ etm@0,2203e000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0 0x2203e000 0 0x1000>;
+
+ cpu = <&cpu4>;
+ clocks = <&oscclk6a>;
+ clock-names = "apb_pclk";
+ port {
+ etm2_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port5>;
+ };
+ };
+ };
+
smb {
compatible = "simple-bus";
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index e57d7e5bf96a..5cc779c8e9c6 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -282,8 +282,8 @@ static int sa1111_retrigger_lowirq(struct irq_data *d)
}
if (i == 8)
- printk(KERN_ERR "Danger Will Robinson: failed to "
- "re-trigger IRQ%d\n", d->irq);
+ pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
+ d->irq);
return i == 8 ? -1 : 0;
}
@@ -384,8 +384,8 @@ static int sa1111_retrigger_highirq(struct irq_data *d)
}
if (i == 8)
- printk(KERN_ERR "Danger Will Robinson: failed to "
- "re-trigger IRQ%d\n", d->irq);
+ pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
+ d->irq);
return i == 8 ? -1 : 0;
}
@@ -740,9 +740,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
goto err_unmap;
}
- printk(KERN_INFO "SA1111 Microprocessor Companion Chip: "
- "silicon revision %lx, metal revision %lx\n",
- (id & SKID_SIREV_MASK)>>4, (id & SKID_MTREV_MASK));
+ pr_info("SA1111 Microprocessor Companion Chip: silicon revision %lx, metal revision %lx\n",
+ (id & SKID_SIREV_MASK) >> 4, id & SKID_MTREV_MASK);
/*
* We found it. Wake the chip up, and initialise.
@@ -1057,7 +1056,6 @@ static struct platform_driver sa1111_device_driver = {
.resume = sa1111_resume,
.driver = {
.name = "sa1111",
- .owner = THIS_MODULE,
},
};
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index f95f72d62db7..759f9b0053e2 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -97,7 +97,6 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_DEFLATE=m
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_EVBUG=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index d7896580f3bb..2328fe752e9c 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -479,4 +479,4 @@ CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_CRYPTO_DEV_TEGRA_AES=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 3e09286f7ff1..c2c3a852af9f 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -127,6 +127,8 @@ CONFIG_SRAM=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_MD=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 63fb5316ff02..df2c0f514b0a 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -146,7 +146,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_S35390A=y
CONFIG_DMADEVICES=y
CONFIG_SH_DMAE=y
-CONFIG_RCAR_AUDMAC_PP=y
CONFIG_RCAR_DMAC=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PWM=y
@@ -178,5 +177,5 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_THERMAL=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
CONFIG_REGULATOR_DA9210=y
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
index 3003fa1f6fb4..0409b8f89782 100644
--- a/arch/arm/crypto/aes_glue.c
+++ b/arch/arm/crypto/aes_glue.c
@@ -93,6 +93,6 @@ module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
-MODULE_ALIAS("aes-asm");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-asm");
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 84f2a756588b..e31b0440c613 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -171,5 +171,5 @@ module_exit(sha1_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 6f1b411b1d55..0b0083757d47 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -194,4 +194,4 @@ module_exit(sha1_neon_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
index 0d2758ff5e12..b124dce838d6 100644
--- a/arch/arm/crypto/sha512_neon_glue.c
+++ b/arch/arm/crypto/sha512_neon_glue.c
@@ -241,7 +241,7 @@ static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
sha512_neon_final(desc, D);
memcpy(hash, D, SHA384_DIGEST_SIZE);
- memset(D, 0, SHA512_DIGEST_SIZE);
+ memzero_explicit(D, SHA512_DIGEST_SIZE);
return 0;
}
@@ -301,5 +301,5 @@ module_exit(sha512_neon_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
-MODULE_ALIAS("sha512");
-MODULE_ALIAS("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 70cd84eb7fda..fe74c0d1e485 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -7,7 +7,6 @@ generic-y += current.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
-generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index c6a3e73a6e24..d2f81e6b8c1c 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -43,10 +43,14 @@
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dsb()
#define wmb() do { dsb(st); outer_sync(); } while (0)
+#define dma_rmb() dmb(osh)
+#define dma_wmb() dmb(oshst)
#else
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
#endif
#ifndef CONFIG_SMP
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 10e78d00a0bb..2d46862e7bef 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+
#endif
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index dc662fca9230..4111592f0130 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -17,6 +17,7 @@ struct dev_archdata {
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
+ bool dma_coherent;
};
struct omap_device;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 85738b200023..b52101d37ec7 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -121,12 +121,18 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#define dma_max_pfn(dev) dma_max_pfn(dev)
-static inline int set_arch_dma_coherent_ops(struct device *dev)
+#define arch_setup_dma_ops arch_setup_dma_ops
+extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent);
+
+#define arch_teardown_dma_ops arch_teardown_dma_ops
+extern void arch_teardown_dma_ops(struct device *dev);
+
+/* do not use this function in a driver */
+static inline bool is_device_dma_coherent(struct device *dev)
{
- set_dma_ops(dev, &arm_coherent_dma_ops);
- return 0;
+ return dev->archdata.dma_coherent;
}
-#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 74124b0d0d79..0415eae1df27 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -2,27 +2,24 @@
#define _ASM_FIXMAP_H
#define FIXADDR_START 0xffc00000UL
-#define FIXADDR_TOP 0xffe00000UL
-#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
+#define FIXADDR_END 0xfff00000UL
+#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
-#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT)
+#include <asm/kmap_types.h>
-#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
+enum fixed_addresses {
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
-extern void __this_fixmap_does_not_exist(void);
+ /* Support writing RO kernel text via kprobes, jump labels, etc. */
+ FIX_TEXT_POKE0,
+ FIX_TEXT_POKE1,
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
- if (idx >= FIX_KMAP_NR_PTES)
- __this_fixmap_does_not_exist();
- return __fix_to_virt(idx);
-}
+ __end_of_fixed_addresses
+};
-static inline unsigned int virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
deleted file mode 100644
index ad774f37c47c..000000000000
--- a/arch/arm/include/asm/hardware/coresight.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * linux/arch/arm/include/asm/hardware/coresight.h
- *
- * CoreSight components' registers
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_HARDWARE_CORESIGHT_H
-#define __ASM_HARDWARE_CORESIGHT_H
-
-#define TRACER_ACCESSED_BIT 0
-#define TRACER_RUNNING_BIT 1
-#define TRACER_CYCLE_ACC_BIT 2
-#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
-#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
-#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
-
-#define TRACER_TIMEOUT 10000
-
-#define etm_writel(t, v, x) \
- (writel_relaxed((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
-
-/* CoreSight Management Registers */
-#define CSMR_LOCKACCESS 0xfb0
-#define CSMR_LOCKSTATUS 0xfb4
-#define CSMR_AUTHSTATUS 0xfb8
-#define CSMR_DEVID 0xfc8
-#define CSMR_DEVTYPE 0xfcc
-/* CoreSight Component Registers */
-#define CSCR_CLASS 0xff4
-
-#define CS_LAR_KEY 0xc5acce55
-
-/* ETM control register, "ETM Architecture", 3.3.1 */
-#define ETMR_CTRL 0
-#define ETMCTRL_POWERDOWN 1
-#define ETMCTRL_PROGRAM (1 << 10)
-#define ETMCTRL_PORTSEL (1 << 11)
-#define ETMCTRL_DO_CONTEXTID (3 << 14)
-#define ETMCTRL_PORTMASK1 (7 << 4)
-#define ETMCTRL_PORTMASK2 (1 << 21)
-#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
-#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21)
-#define ETMCTRL_DO_CPRT (1 << 1)
-#define ETMCTRL_DATAMASK (3 << 2)
-#define ETMCTRL_DATA_DO_DATA (1 << 2)
-#define ETMCTRL_DATA_DO_ADDR (1 << 3)
-#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
-#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
-#define ETMCTRL_CYCLEACCURATE (1 << 12)
-
-/* ETM configuration code register */
-#define ETMR_CONFCODE (0x04)
-
-/* ETM trace start/stop resource control register */
-#define ETMR_TRACESSCTRL (0x18)
-
-/* ETM trigger event register */
-#define ETMR_TRIGEVT (0x08)
-
-/* address access type register bits, "ETM architecture",
- * table 3-27 */
-/* - access type */
-#define ETMAAT_IFETCH 0
-#define ETMAAT_IEXEC 1
-#define ETMAAT_IEXECPASS 2
-#define ETMAAT_IEXECFAIL 3
-#define ETMAAT_DLOADSTORE 4
-#define ETMAAT_DLOAD 5
-#define ETMAAT_DSTORE 6
-/* - comparison access size */
-#define ETMAAT_JAVA (0 << 3)
-#define ETMAAT_THUMB (1 << 3)
-#define ETMAAT_ARM (3 << 3)
-/* - data value comparison control */
-#define ETMAAT_NOVALCMP (0 << 5)
-#define ETMAAT_VALMATCH (1 << 5)
-#define ETMAAT_VALNOMATCH (3 << 5)
-/* - exact match */
-#define ETMAAT_EXACTMATCH (1 << 7)
-/* - context id comparator control */
-#define ETMAAT_IGNCONTEXTID (0 << 8)
-#define ETMAAT_VALUE1 (1 << 8)
-#define ETMAAT_VALUE2 (2 << 8)
-#define ETMAAT_VALUE3 (3 << 8)
-/* - security level control */
-#define ETMAAT_IGNSECURITY (0 << 10)
-#define ETMAAT_NSONLY (1 << 10)
-#define ETMAAT_SONLY (2 << 10)
-
-#define ETMR_COMP_VAL(x) (0x40 + (x) * 4)
-#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4)
-
-/* ETM status register, "ETM Architecture", 3.3.2 */
-#define ETMR_STATUS (0x10)
-#define ETMST_OVERFLOW BIT(0)
-#define ETMST_PROGBIT BIT(1)
-#define ETMST_STARTSTOP BIT(2)
-#define ETMST_TRIGGER BIT(3)
-
-#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
-#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
-#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER)
-
-#define ETMR_TRACEENCTRL2 0x1c
-#define ETMR_TRACEENCTRL 0x24
-#define ETMTE_INCLEXCL BIT(24)
-#define ETMR_TRACEENEVT 0x20
-#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
- ETMCTRL_DATA_DO_ADDR | \
- ETMCTRL_BRANCH_OUTPUT | \
- ETMCTRL_DO_CONTEXTID)
-
-/* ETM management registers, "ETM Architecture", 3.5.24 */
-#define ETMMR_OSLAR 0x300
-#define ETMMR_OSLSR 0x304
-#define ETMMR_OSSRR 0x308
-#define ETMMR_PDSR 0x314
-
-/* ETB registers, "CoreSight Components TRM", 9.3 */
-#define ETBR_DEPTH 0x04
-#define ETBR_STATUS 0x0c
-#define ETBR_READMEM 0x10
-#define ETBR_READADDR 0x14
-#define ETBR_WRITEADDR 0x18
-#define ETBR_TRIGGERCOUNT 0x1c
-#define ETBR_CTRL 0x20
-#define ETBR_FORMATTERCTRL 0x304
-#define ETBFF_ENFTC 1
-#define ETBFF_ENFCONT BIT(1)
-#define ETBFF_FONFLIN BIT(4)
-#define ETBFF_MANUAL_FLUSH BIT(6)
-#define ETBFF_TRIGIN BIT(8)
-#define ETBFF_TRIGEVT BIT(9)
-#define ETBFF_TRIGFL BIT(10)
-
-#define etb_writel(t, v, x) \
- (writel_relaxed((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
-
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
- do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
-
-#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etb_unlock(t) \
- do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
-
-#endif /* __ASM_HARDWARE_CORESIGHT_H */
-
diff --git a/arch/arm/include/asm/hardware/cp14.h b/arch/arm/include/asm/hardware/cp14.h
new file mode 100644
index 000000000000..61576dc58ede
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cp14.h
@@ -0,0 +1,542 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_HARDWARE_CP14_H
+#define __ASM_HARDWARE_CP14_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg) RCP14_##reg()
+#define dbg_write(val, reg) WCP14_##reg(val)
+#define etm_read(reg) RCP14_##reg()
+#define etm_write(val, reg) WCP14_##reg(val)
+
+/* MRC14 and MCR14 */
+#define MRC14(op1, crn, crm, op2) \
+({ \
+u32 val; \
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val; \
+})
+
+#define MCR14(val, op1, crn, crm, op2) \
+({ \
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv7
+ * DBGECR, DBGDSCCR, DBGDSMCR, DBGDRCR
+ *
+ * Available only in DBGv7.1
+ * DBGBXVRm, DBGOSDLR, DBGDEVID2, DBGDEVID1
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGPRSR,
+ * DBGPRSR, DBGDSAR, DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR() MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint() MRC14(0, c0, c1, 0)
+#define RCP14_DBGDTRRXint() MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR() MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR() MRC14(0, c0, c7, 0)
+#define RCP14_DBGECR() MRC14(0, c0, c9, 0)
+#define RCP14_DBGDSCCR() MRC14(0, c0, c10, 0)
+#define RCP14_DBGDSMCR() MRC14(0, c0, c11, 0)
+#define RCP14_DBGDTRRXext() MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext() MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext() MRC14(0, c0, c3, 2)
+#define RCP14_DBGDRCR() MRC14(0, c0, c4, 2)
+#define RCP14_DBGBVR0() MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1() MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2() MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3() MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4() MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5() MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6() MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7() MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8() MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9() MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10() MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11() MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12() MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13() MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14() MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15() MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0() MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1() MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2() MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3() MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4() MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5() MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6() MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7() MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8() MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9() MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10() MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11() MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12() MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13() MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14() MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15() MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0() MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1() MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2() MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3() MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4() MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5() MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6() MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7() MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8() MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9() MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10() MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11() MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12() MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13() MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14() MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15() MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0() MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1() MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2() MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3() MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4() MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5() MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6() MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7() MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8() MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9() MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10() MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11() MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12() MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13() MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14() MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15() MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR() MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0() MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1() MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2() MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3() MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4() MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5() MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6() MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7() MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8() MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9() MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10() MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11() MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12() MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13() MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14() MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15() MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR() MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR() MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR() MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR() MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR() MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR() MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL() MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET() MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR() MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS() MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2() MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1() MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID() MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDTRTXint(val) MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val) MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val) MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGECR(val) MCR14(val, 0, c0, c9, 0)
+#define WCP14_DBGDSCCR(val) MCR14(val, 0, c0, c10, 0)
+#define WCP14_DBGDSMCR(val) MCR14(val, 0, c0, c11, 0)
+#define WCP14_DBGDTRRXext(val) MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val) MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val) MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGDRCR(val) MCR14(val, 0, c0, c4, 2)
+#define WCP14_DBGBVR0(val) MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val) MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val) MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val) MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val) MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val) MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val) MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val) MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val) MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val) MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val) MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val) MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val) MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val) MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val) MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val) MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val) MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val) MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val) MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val) MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val) MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val) MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val) MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val) MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val) MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val) MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val) MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val) MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val) MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val) MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val) MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val) MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val) MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val) MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val) MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val) MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val) MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val) MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val) MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val) MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val) MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val) MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val) MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val) MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val) MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val) MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val) MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val) MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val) MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val) MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val) MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val) MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val) MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val) MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val) MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val) MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val) MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val) MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val) MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val) MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val) MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val) MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val) MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val) MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val) MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val) MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val) MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val) MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val) MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val) MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val) MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val) MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val) MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val) MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val) MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val) MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val) MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val) MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val) MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val) MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val) MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val) MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val) MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val) MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val) MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val) MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val) MCR14(val, 0, c7, c9, 6)
+
+/*
+ * ETM Registers
+ *
+ * Available only in ETMv3.3, 3.4, 3.5
+ * ETMASICCR, ETMTECR2, ETMFFRR, ETMVDEVR, ETMVDCR1, ETMVDCR2, ETMVDCR3,
+ * ETMDCVRn, ETMDCMRn
+ *
+ * Available only in ETMv3.5 as read only
+ * ETMIDR2
+ *
+ * Available only in ETMv3.5, PFTv1.0, 1.1
+ * ETMTSEVR, ETMVMIDCVR, ETMPDCR
+ *
+ * Read only
+ * ETMCCR, ETMSCR, ETMIDR, ETMCCER, ETMOSLSR
+ * ETMLSR, ETMAUTHSTATUS, ETMDEVID, ETMDEVTYPE, ETMPIDR4, ETMPIDR5, ETMPIDR6,
+ * ETMPIDR7, ETMPIDR0, ETMPIDR1, ETMPIDR2, ETMPIDR2, ETMPIDR3, ETMCIDR0,
+ * ETMCIDR1, ETMCIDR2, ETMCIDR3
+ *
+ * Write only
+ * ETMOSLAR, ETMLAR
+ * Note: ETMCCER[11] controls WO nature of certain regs. Refer ETM arch spec.
+ */
+#define RCP14_ETMCR() MRC14(1, c0, c0, 0)
+#define RCP14_ETMCCR() MRC14(1, c0, c1, 0)
+#define RCP14_ETMTRIGGER() MRC14(1, c0, c2, 0)
+#define RCP14_ETMASICCR() MRC14(1, c0, c3, 0)
+#define RCP14_ETMSR() MRC14(1, c0, c4, 0)
+#define RCP14_ETMSCR() MRC14(1, c0, c5, 0)
+#define RCP14_ETMTSSCR() MRC14(1, c0, c6, 0)
+#define RCP14_ETMTECR2() MRC14(1, c0, c7, 0)
+#define RCP14_ETMTEEVR() MRC14(1, c0, c8, 0)
+#define RCP14_ETMTECR1() MRC14(1, c0, c9, 0)
+#define RCP14_ETMFFRR() MRC14(1, c0, c10, 0)
+#define RCP14_ETMFFLR() MRC14(1, c0, c11, 0)
+#define RCP14_ETMVDEVR() MRC14(1, c0, c12, 0)
+#define RCP14_ETMVDCR1() MRC14(1, c0, c13, 0)
+#define RCP14_ETMVDCR2() MRC14(1, c0, c14, 0)
+#define RCP14_ETMVDCR3() MRC14(1, c0, c15, 0)
+#define RCP14_ETMACVR0() MRC14(1, c0, c0, 1)
+#define RCP14_ETMACVR1() MRC14(1, c0, c1, 1)
+#define RCP14_ETMACVR2() MRC14(1, c0, c2, 1)
+#define RCP14_ETMACVR3() MRC14(1, c0, c3, 1)
+#define RCP14_ETMACVR4() MRC14(1, c0, c4, 1)
+#define RCP14_ETMACVR5() MRC14(1, c0, c5, 1)
+#define RCP14_ETMACVR6() MRC14(1, c0, c6, 1)
+#define RCP14_ETMACVR7() MRC14(1, c0, c7, 1)
+#define RCP14_ETMACVR8() MRC14(1, c0, c8, 1)
+#define RCP14_ETMACVR9() MRC14(1, c0, c9, 1)
+#define RCP14_ETMACVR10() MRC14(1, c0, c10, 1)
+#define RCP14_ETMACVR11() MRC14(1, c0, c11, 1)
+#define RCP14_ETMACVR12() MRC14(1, c0, c12, 1)
+#define RCP14_ETMACVR13() MRC14(1, c0, c13, 1)
+#define RCP14_ETMACVR14() MRC14(1, c0, c14, 1)
+#define RCP14_ETMACVR15() MRC14(1, c0, c15, 1)
+#define RCP14_ETMACTR0() MRC14(1, c0, c0, 2)
+#define RCP14_ETMACTR1() MRC14(1, c0, c1, 2)
+#define RCP14_ETMACTR2() MRC14(1, c0, c2, 2)
+#define RCP14_ETMACTR3() MRC14(1, c0, c3, 2)
+#define RCP14_ETMACTR4() MRC14(1, c0, c4, 2)
+#define RCP14_ETMACTR5() MRC14(1, c0, c5, 2)
+#define RCP14_ETMACTR6() MRC14(1, c0, c6, 2)
+#define RCP14_ETMACTR7() MRC14(1, c0, c7, 2)
+#define RCP14_ETMACTR8() MRC14(1, c0, c8, 2)
+#define RCP14_ETMACTR9() MRC14(1, c0, c9, 2)
+#define RCP14_ETMACTR10() MRC14(1, c0, c10, 2)
+#define RCP14_ETMACTR11() MRC14(1, c0, c11, 2)
+#define RCP14_ETMACTR12() MRC14(1, c0, c12, 2)
+#define RCP14_ETMACTR13() MRC14(1, c0, c13, 2)
+#define RCP14_ETMACTR14() MRC14(1, c0, c14, 2)
+#define RCP14_ETMACTR15() MRC14(1, c0, c15, 2)
+#define RCP14_ETMDCVR0() MRC14(1, c0, c0, 3)
+#define RCP14_ETMDCVR2() MRC14(1, c0, c2, 3)
+#define RCP14_ETMDCVR4() MRC14(1, c0, c4, 3)
+#define RCP14_ETMDCVR6() MRC14(1, c0, c6, 3)
+#define RCP14_ETMDCVR8() MRC14(1, c0, c8, 3)
+#define RCP14_ETMDCVR10() MRC14(1, c0, c10, 3)
+#define RCP14_ETMDCVR12() MRC14(1, c0, c12, 3)
+#define RCP14_ETMDCVR14() MRC14(1, c0, c14, 3)
+#define RCP14_ETMDCMR0() MRC14(1, c0, c0, 4)
+#define RCP14_ETMDCMR2() MRC14(1, c0, c2, 4)
+#define RCP14_ETMDCMR4() MRC14(1, c0, c4, 4)
+#define RCP14_ETMDCMR6() MRC14(1, c0, c6, 4)
+#define RCP14_ETMDCMR8() MRC14(1, c0, c8, 4)
+#define RCP14_ETMDCMR10() MRC14(1, c0, c10, 4)
+#define RCP14_ETMDCMR12() MRC14(1, c0, c12, 4)
+#define RCP14_ETMDCMR14() MRC14(1, c0, c14, 4)
+#define RCP14_ETMCNTRLDVR0() MRC14(1, c0, c0, 5)
+#define RCP14_ETMCNTRLDVR1() MRC14(1, c0, c1, 5)
+#define RCP14_ETMCNTRLDVR2() MRC14(1, c0, c2, 5)
+#define RCP14_ETMCNTRLDVR3() MRC14(1, c0, c3, 5)
+#define RCP14_ETMCNTENR0() MRC14(1, c0, c4, 5)
+#define RCP14_ETMCNTENR1() MRC14(1, c0, c5, 5)
+#define RCP14_ETMCNTENR2() MRC14(1, c0, c6, 5)
+#define RCP14_ETMCNTENR3() MRC14(1, c0, c7, 5)
+#define RCP14_ETMCNTRLDEVR0() MRC14(1, c0, c8, 5)
+#define RCP14_ETMCNTRLDEVR1() MRC14(1, c0, c9, 5)
+#define RCP14_ETMCNTRLDEVR2() MRC14(1, c0, c10, 5)
+#define RCP14_ETMCNTRLDEVR3() MRC14(1, c0, c11, 5)
+#define RCP14_ETMCNTVR0() MRC14(1, c0, c12, 5)
+#define RCP14_ETMCNTVR1() MRC14(1, c0, c13, 5)
+#define RCP14_ETMCNTVR2() MRC14(1, c0, c14, 5)
+#define RCP14_ETMCNTVR3() MRC14(1, c0, c15, 5)
+#define RCP14_ETMSQ12EVR() MRC14(1, c0, c0, 6)
+#define RCP14_ETMSQ21EVR() MRC14(1, c0, c1, 6)
+#define RCP14_ETMSQ23EVR() MRC14(1, c0, c2, 6)
+#define RCP14_ETMSQ31EVR() MRC14(1, c0, c3, 6)
+#define RCP14_ETMSQ32EVR() MRC14(1, c0, c4, 6)
+#define RCP14_ETMSQ13EVR() MRC14(1, c0, c5, 6)
+#define RCP14_ETMSQR() MRC14(1, c0, c7, 6)
+#define RCP14_ETMEXTOUTEVR0() MRC14(1, c0, c8, 6)
+#define RCP14_ETMEXTOUTEVR1() MRC14(1, c0, c9, 6)
+#define RCP14_ETMEXTOUTEVR2() MRC14(1, c0, c10, 6)
+#define RCP14_ETMEXTOUTEVR3() MRC14(1, c0, c11, 6)
+#define RCP14_ETMCIDCVR0() MRC14(1, c0, c12, 6)
+#define RCP14_ETMCIDCVR1() MRC14(1, c0, c13, 6)
+#define RCP14_ETMCIDCVR2() MRC14(1, c0, c14, 6)
+#define RCP14_ETMCIDCMR() MRC14(1, c0, c15, 6)
+#define RCP14_ETMIMPSPEC0() MRC14(1, c0, c0, 7)
+#define RCP14_ETMIMPSPEC1() MRC14(1, c0, c1, 7)
+#define RCP14_ETMIMPSPEC2() MRC14(1, c0, c2, 7)
+#define RCP14_ETMIMPSPEC3() MRC14(1, c0, c3, 7)
+#define RCP14_ETMIMPSPEC4() MRC14(1, c0, c4, 7)
+#define RCP14_ETMIMPSPEC5() MRC14(1, c0, c5, 7)
+#define RCP14_ETMIMPSPEC6() MRC14(1, c0, c6, 7)
+#define RCP14_ETMIMPSPEC7() MRC14(1, c0, c7, 7)
+#define RCP14_ETMSYNCFR() MRC14(1, c0, c8, 7)
+#define RCP14_ETMIDR() MRC14(1, c0, c9, 7)
+#define RCP14_ETMCCER() MRC14(1, c0, c10, 7)
+#define RCP14_ETMEXTINSELR() MRC14(1, c0, c11, 7)
+#define RCP14_ETMTESSEICR() MRC14(1, c0, c12, 7)
+#define RCP14_ETMEIBCR() MRC14(1, c0, c13, 7)
+#define RCP14_ETMTSEVR() MRC14(1, c0, c14, 7)
+#define RCP14_ETMAUXCR() MRC14(1, c0, c15, 7)
+#define RCP14_ETMTRACEIDR() MRC14(1, c1, c0, 0)
+#define RCP14_ETMIDR2() MRC14(1, c1, c2, 0)
+#define RCP14_ETMVMIDCVR() MRC14(1, c1, c0, 1)
+#define RCP14_ETMOSLSR() MRC14(1, c1, c1, 4)
+/* Not available in PFTv1.1 */
+#define RCP14_ETMOSSRR() MRC14(1, c1, c2, 4)
+#define RCP14_ETMPDCR() MRC14(1, c1, c4, 4)
+#define RCP14_ETMPDSR() MRC14(1, c1, c5, 4)
+#define RCP14_ETMITCTRL() MRC14(1, c7, c0, 4)
+#define RCP14_ETMCLAIMSET() MRC14(1, c7, c8, 6)
+#define RCP14_ETMCLAIMCLR() MRC14(1, c7, c9, 6)
+#define RCP14_ETMLSR() MRC14(1, c7, c13, 6)
+#define RCP14_ETMAUTHSTATUS() MRC14(1, c7, c14, 6)
+#define RCP14_ETMDEVID() MRC14(1, c7, c2, 7)
+#define RCP14_ETMDEVTYPE() MRC14(1, c7, c3, 7)
+#define RCP14_ETMPIDR4() MRC14(1, c7, c4, 7)
+#define RCP14_ETMPIDR5() MRC14(1, c7, c5, 7)
+#define RCP14_ETMPIDR6() MRC14(1, c7, c6, 7)
+#define RCP14_ETMPIDR7() MRC14(1, c7, c7, 7)
+#define RCP14_ETMPIDR0() MRC14(1, c7, c8, 7)
+#define RCP14_ETMPIDR1() MRC14(1, c7, c9, 7)
+#define RCP14_ETMPIDR2() MRC14(1, c7, c10, 7)
+#define RCP14_ETMPIDR3() MRC14(1, c7, c11, 7)
+#define RCP14_ETMCIDR0() MRC14(1, c7, c12, 7)
+#define RCP14_ETMCIDR1() MRC14(1, c7, c13, 7)
+#define RCP14_ETMCIDR2() MRC14(1, c7, c14, 7)
+#define RCP14_ETMCIDR3() MRC14(1, c7, c15, 7)
+
+#define WCP14_ETMCR(val) MCR14(val, 1, c0, c0, 0)
+#define WCP14_ETMTRIGGER(val) MCR14(val, 1, c0, c2, 0)
+#define WCP14_ETMASICCR(val) MCR14(val, 1, c0, c3, 0)
+#define WCP14_ETMSR(val) MCR14(val, 1, c0, c4, 0)
+#define WCP14_ETMTSSCR(val) MCR14(val, 1, c0, c6, 0)
+#define WCP14_ETMTECR2(val) MCR14(val, 1, c0, c7, 0)
+#define WCP14_ETMTEEVR(val) MCR14(val, 1, c0, c8, 0)
+#define WCP14_ETMTECR1(val) MCR14(val, 1, c0, c9, 0)
+#define WCP14_ETMFFRR(val) MCR14(val, 1, c0, c10, 0)
+#define WCP14_ETMFFLR(val) MCR14(val, 1, c0, c11, 0)
+#define WCP14_ETMVDEVR(val) MCR14(val, 1, c0, c12, 0)
+#define WCP14_ETMVDCR1(val) MCR14(val, 1, c0, c13, 0)
+#define WCP14_ETMVDCR2(val) MCR14(val, 1, c0, c14, 0)
+#define WCP14_ETMVDCR3(val) MCR14(val, 1, c0, c15, 0)
+#define WCP14_ETMACVR0(val) MCR14(val, 1, c0, c0, 1)
+#define WCP14_ETMACVR1(val) MCR14(val, 1, c0, c1, 1)
+#define WCP14_ETMACVR2(val) MCR14(val, 1, c0, c2, 1)
+#define WCP14_ETMACVR3(val) MCR14(val, 1, c0, c3, 1)
+#define WCP14_ETMACVR4(val) MCR14(val, 1, c0, c4, 1)
+#define WCP14_ETMACVR5(val) MCR14(val, 1, c0, c5, 1)
+#define WCP14_ETMACVR6(val) MCR14(val, 1, c0, c6, 1)
+#define WCP14_ETMACVR7(val) MCR14(val, 1, c0, c7, 1)
+#define WCP14_ETMACVR8(val) MCR14(val, 1, c0, c8, 1)
+#define WCP14_ETMACVR9(val) MCR14(val, 1, c0, c9, 1)
+#define WCP14_ETMACVR10(val) MCR14(val, 1, c0, c10, 1)
+#define WCP14_ETMACVR11(val) MCR14(val, 1, c0, c11, 1)
+#define WCP14_ETMACVR12(val) MCR14(val, 1, c0, c12, 1)
+#define WCP14_ETMACVR13(val) MCR14(val, 1, c0, c13, 1)
+#define WCP14_ETMACVR14(val) MCR14(val, 1, c0, c14, 1)
+#define WCP14_ETMACVR15(val) MCR14(val, 1, c0, c15, 1)
+#define WCP14_ETMACTR0(val) MCR14(val, 1, c0, c0, 2)
+#define WCP14_ETMACTR1(val) MCR14(val, 1, c0, c1, 2)
+#define WCP14_ETMACTR2(val) MCR14(val, 1, c0, c2, 2)
+#define WCP14_ETMACTR3(val) MCR14(val, 1, c0, c3, 2)
+#define WCP14_ETMACTR4(val) MCR14(val, 1, c0, c4, 2)
+#define WCP14_ETMACTR5(val) MCR14(val, 1, c0, c5, 2)
+#define WCP14_ETMACTR6(val) MCR14(val, 1, c0, c6, 2)
+#define WCP14_ETMACTR7(val) MCR14(val, 1, c0, c7, 2)
+#define WCP14_ETMACTR8(val) MCR14(val, 1, c0, c8, 2)
+#define WCP14_ETMACTR9(val) MCR14(val, 1, c0, c9, 2)
+#define WCP14_ETMACTR10(val) MCR14(val, 1, c0, c10, 2)
+#define WCP14_ETMACTR11(val) MCR14(val, 1, c0, c11, 2)
+#define WCP14_ETMACTR12(val) MCR14(val, 1, c0, c12, 2)
+#define WCP14_ETMACTR13(val) MCR14(val, 1, c0, c13, 2)
+#define WCP14_ETMACTR14(val) MCR14(val, 1, c0, c14, 2)
+#define WCP14_ETMACTR15(val) MCR14(val, 1, c0, c15, 2)
+#define WCP14_ETMDCVR0(val) MCR14(val, 1, c0, c0, 3)
+#define WCP14_ETMDCVR2(val) MCR14(val, 1, c0, c2, 3)
+#define WCP14_ETMDCVR4(val) MCR14(val, 1, c0, c4, 3)
+#define WCP14_ETMDCVR6(val) MCR14(val, 1, c0, c6, 3)
+#define WCP14_ETMDCVR8(val) MCR14(val, 1, c0, c8, 3)
+#define WCP14_ETMDCVR10(val) MCR14(val, 1, c0, c10, 3)
+#define WCP14_ETMDCVR12(val) MCR14(val, 1, c0, c12, 3)
+#define WCP14_ETMDCVR14(val) MCR14(val, 1, c0, c14, 3)
+#define WCP14_ETMDCMR0(val) MCR14(val, 1, c0, c0, 4)
+#define WCP14_ETMDCMR2(val) MCR14(val, 1, c0, c2, 4)
+#define WCP14_ETMDCMR4(val) MCR14(val, 1, c0, c4, 4)
+#define WCP14_ETMDCMR6(val) MCR14(val, 1, c0, c6, 4)
+#define WCP14_ETMDCMR8(val) MCR14(val, 1, c0, c8, 4)
+#define WCP14_ETMDCMR10(val) MCR14(val, 1, c0, c10, 4)
+#define WCP14_ETMDCMR12(val) MCR14(val, 1, c0, c12, 4)
+#define WCP14_ETMDCMR14(val) MCR14(val, 1, c0, c14, 4)
+#define WCP14_ETMCNTRLDVR0(val) MCR14(val, 1, c0, c0, 5)
+#define WCP14_ETMCNTRLDVR1(val) MCR14(val, 1, c0, c1, 5)
+#define WCP14_ETMCNTRLDVR2(val) MCR14(val, 1, c0, c2, 5)
+#define WCP14_ETMCNTRLDVR3(val) MCR14(val, 1, c0, c3, 5)
+#define WCP14_ETMCNTENR0(val) MCR14(val, 1, c0, c4, 5)
+#define WCP14_ETMCNTENR1(val) MCR14(val, 1, c0, c5, 5)
+#define WCP14_ETMCNTENR2(val) MCR14(val, 1, c0, c6, 5)
+#define WCP14_ETMCNTENR3(val) MCR14(val, 1, c0, c7, 5)
+#define WCP14_ETMCNTRLDEVR0(val) MCR14(val, 1, c0, c8, 5)
+#define WCP14_ETMCNTRLDEVR1(val) MCR14(val, 1, c0, c9, 5)
+#define WCP14_ETMCNTRLDEVR2(val) MCR14(val, 1, c0, c10, 5)
+#define WCP14_ETMCNTRLDEVR3(val) MCR14(val, 1, c0, c11, 5)
+#define WCP14_ETMCNTVR0(val) MCR14(val, 1, c0, c12, 5)
+#define WCP14_ETMCNTVR1(val) MCR14(val, 1, c0, c13, 5)
+#define WCP14_ETMCNTVR2(val) MCR14(val, 1, c0, c14, 5)
+#define WCP14_ETMCNTVR3(val) MCR14(val, 1, c0, c15, 5)
+#define WCP14_ETMSQ12EVR(val) MCR14(val, 1, c0, c0, 6)
+#define WCP14_ETMSQ21EVR(val) MCR14(val, 1, c0, c1, 6)
+#define WCP14_ETMSQ23EVR(val) MCR14(val, 1, c0, c2, 6)
+#define WCP14_ETMSQ31EVR(val) MCR14(val, 1, c0, c3, 6)
+#define WCP14_ETMSQ32EVR(val) MCR14(val, 1, c0, c4, 6)
+#define WCP14_ETMSQ13EVR(val) MCR14(val, 1, c0, c5, 6)
+#define WCP14_ETMSQR(val) MCR14(val, 1, c0, c7, 6)
+#define WCP14_ETMEXTOUTEVR0(val) MCR14(val, 1, c0, c8, 6)
+#define WCP14_ETMEXTOUTEVR1(val) MCR14(val, 1, c0, c9, 6)
+#define WCP14_ETMEXTOUTEVR2(val) MCR14(val, 1, c0, c10, 6)
+#define WCP14_ETMEXTOUTEVR3(val) MCR14(val, 1, c0, c11, 6)
+#define WCP14_ETMCIDCVR0(val) MCR14(val, 1, c0, c12, 6)
+#define WCP14_ETMCIDCVR1(val) MCR14(val, 1, c0, c13, 6)
+#define WCP14_ETMCIDCVR2(val) MCR14(val, 1, c0, c14, 6)
+#define WCP14_ETMCIDCMR(val) MCR14(val, 1, c0, c15, 6)
+#define WCP14_ETMIMPSPEC0(val) MCR14(val, 1, c0, c0, 7)
+#define WCP14_ETMIMPSPEC1(val) MCR14(val, 1, c0, c1, 7)
+#define WCP14_ETMIMPSPEC2(val) MCR14(val, 1, c0, c2, 7)
+#define WCP14_ETMIMPSPEC3(val) MCR14(val, 1, c0, c3, 7)
+#define WCP14_ETMIMPSPEC4(val) MCR14(val, 1, c0, c4, 7)
+#define WCP14_ETMIMPSPEC5(val) MCR14(val, 1, c0, c5, 7)
+#define WCP14_ETMIMPSPEC6(val) MCR14(val, 1, c0, c6, 7)
+#define WCP14_ETMIMPSPEC7(val) MCR14(val, 1, c0, c7, 7)
+/* Can be read only in ETMv3.4, ETMv3.5 */
+#define WCP14_ETMSYNCFR(val) MCR14(val, 1, c0, c8, 7)
+#define WCP14_ETMEXTINSELR(val) MCR14(val, 1, c0, c11, 7)
+#define WCP14_ETMTESSEICR(val) MCR14(val, 1, c0, c12, 7)
+#define WCP14_ETMEIBCR(val) MCR14(val, 1, c0, c13, 7)
+#define WCP14_ETMTSEVR(val) MCR14(val, 1, c0, c14, 7)
+#define WCP14_ETMAUXCR(val) MCR14(val, 1, c0, c15, 7)
+#define WCP14_ETMTRACEIDR(val) MCR14(val, 1, c1, c0, 0)
+#define WCP14_ETMIDR2(val) MCR14(val, 1, c1, c2, 0)
+#define WCP14_ETMVMIDCVR(val) MCR14(val, 1, c1, c0, 1)
+#define WCP14_ETMOSLAR(val) MCR14(val, 1, c1, c0, 4)
+/* Not available in PFTv1.1 */
+#define WCP14_ETMOSSRR(val) MCR14(val, 1, c1, c2, 4)
+#define WCP14_ETMPDCR(val) MCR14(val, 1, c1, c4, 4)
+#define WCP14_ETMPDSR(val) MCR14(val, 1, c1, c5, 4)
+#define WCP14_ETMITCTRL(val) MCR14(val, 1, c7, c0, 4)
+#define WCP14_ETMCLAIMSET(val) MCR14(val, 1, c7, c8, 6)
+#define WCP14_ETMCLAIMCLR(val) MCR14(val, 1, c7, c9, 6)
+/* Writes to this from CP14 interface are ignored */
+#define WCP14_ETMLAR(val) MCR14(val, 1, c7, c12, 6)
+
+#endif
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index a71b417b1856..af79da40af2a 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -8,6 +8,7 @@ static inline void ack_bad_irq(int irq)
{
extern unsigned long irq_err_count;
irq_err_count++;
+ pr_crit("unexpected IRQ trap at vector %02x\n", irq);
}
void set_irq_flags(unsigned int irq, unsigned int flags);
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index b9db269c6e61..66ce17655bb9 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr = HCR_GUEST_MASK;
+}
+
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 53036e21756b..254e0650e48b 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -150,8 +150,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index acb0d5712716..63e0ecc04901 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -52,6 +52,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
+void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
@@ -161,9 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
}
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size)
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu))
+ if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);
/*
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index d428e386c88e..3446f6a1d9fa 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -219,6 +219,23 @@ void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
int __mcpm_cluster_state(unsigned int cluster);
+/**
+ * mcpm_sync_init - Initialize the cluster synchronization support
+ *
+ * @power_up_setup: platform specific function invoked during very
+ * early CPU/cluster bringup stage.
+ *
+ * This prepares memory used by vlocks and the MCPM state machine used
+ * across CPUs that may have their caches active or inactive. Must be
+ * called only after a successful call to mcpm_platform_register().
+ *
+ * The power_up_setup argument is a pointer to assembly code called when
+ * the MMU and caches are still disabled during boot and no stack space is
+ * available. The affinity level passed to that code corresponds to the
+ * resource that needs to be initialized (e.g. 1 for cluster level, 0 for
+ * CPU level). Proper exclusion mechanisms are already activated at that
+ * point.
+ */
int __init mcpm_sync_init(
void (*power_up_setup)(unsigned int affinity_level));
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 209e6504922e..a89b4076cde4 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,14 +30,14 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- register unsigned long *sp asm ("sp");
/*
* Read TPIDRPRW.
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+ asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
+ : "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 78a779361682..19cfab526d13 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -157,7 +157,15 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
+ extern pmdval_t user_pmd_table;
+ pmdval_t prot;
+
+ if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
+ prot = user_pmd_table;
+ else
+ prot = _PAGE_USER_TABLE;
+
+ __pmd_populate(pmdp, page_to_phys(ptep), prot);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5cfba15cb401..5e68278e953e 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -20,12 +20,14 @@
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
*/
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 9fd61c72a33a..f8f1cff62065 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -76,6 +76,7 @@
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
/*
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 3b30062975b2..d5cac545ba33 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -252,17 +252,57 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte_ext(ptep, pteval, ext);
}
-#define PTE_BIT_FUNC(fn,op) \
-static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
-
-PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
-PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
-PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
-PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
-PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
-PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
-PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN);
-PTE_BIT_FUNC(mknexec, |= L_PTE_XN);
+static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) &= ~pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) |= pgprot_val(prot);
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(L_PTE_XN));
+}
+
+static inline pte_t pte_mknexec(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(L_PTE_XN));
+}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 601264d983fa..51622ba7c4a6 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -154,9 +154,8 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
return regs->ARM_sp;
}
-#define current_pt_regs(void) ({ \
- register unsigned long sp asm ("sp"); \
- (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \
+#define current_pt_regs(void) ({ (struct pt_regs *) \
+ ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
})
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index ce73ab635414..d890e41f5520 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -90,14 +90,19 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
/*
+ * how to get the current stack pointer in C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+/*
* how to get the thread information struct from C
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
static inline struct thread_info *current_thread_info(void)
{
- register unsigned long sp asm ("sp");
- return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+ return (struct thread_info *)
+ (current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index f4ab34fd4f72..ee5f3084243c 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -22,6 +22,7 @@
#define FPSID_NODOUBLE (1<<20)
#define FPSID_ARCH_BIT (16)
#define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT)
+#define FPSID_CPUID_ARCH_MASK (0x7F << FPSID_ARCH_BIT)
#define FPSID_PART_BIT (8)
#define FPSID_PART_MASK (0xFF << FPSID_PART_BIT)
#define FPSID_VARIANT_BIT (4)
@@ -75,6 +76,10 @@
/* MVFR0 bits */
#define MVFR0_A_SIMD_BIT (0)
#define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT)
+#define MVFR0_SP_BIT (4)
+#define MVFR0_SP_MASK (0xf << MVFR0_SP_BIT)
+#define MVFR0_DP_BIT (8)
+#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
/* Bit patterns for decoding the packaged operation descriptors */
#define VFPOPDESC_LENGTH_BIT (9)
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index e8275ea88e88..efd562412850 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -5,6 +5,18 @@
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs);
+void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+void __xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void __xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
@@ -20,20 +32,56 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
+ /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
+ * is a foreign page grant-mapped in dom0. If the page is local we
+ * can safely call the native dma_ops function, otherwise we call
+ * the xen specific function. */
+ if (local)
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ else
+ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
-void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ struct dma_attrs *attrs)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
+ * always return false. If the page is local we can safely call the
+ * native dma_ops function, otherwise we call the xen specific
+ * function. */
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->unmap_page)
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+ } else
+ __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
-void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
-void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (__generic_dma_ops(hwdev)->sync_single_for_device)
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 135c24a5ba26..68c739b3fdf4 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
#define xen_unmap(cookie) iounmap((cookie))
+bool xen_arch_need_swiotlb(struct device *dev,
+ unsigned long pfn,
+ unsigned long mfn);
+
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8dcbed5016ac..fb2b71ebe3f2 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -30,7 +30,6 @@ else
obj-y += entry-armv.o
endif
-obj-$(CONFIG_OC_ETM) += etm.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
@@ -47,6 +46,7 @@ endif
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
+obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
@@ -67,7 +67,7 @@ test-kprobes-objs += kprobes-test-arm.o
endif
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
-obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KGDB) += kgdb.o patch.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
obj-$(CONFIG_OF) += devtree.o
@@ -84,6 +84,7 @@ obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
+CFLAGS_pj4-cp0.o := -marm
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
diff --git a/arch/arm/kernel/atags_compat.c b/arch/arm/kernel/atags_compat.c
index 5236ad38f417..05c28b12353c 100644
--- a/arch/arm/kernel/atags_compat.c
+++ b/arch/arm/kernel/atags_compat.c
@@ -97,8 +97,7 @@ static void __init build_tag_list(struct param_struct *params, void *taglist)
struct tag *tag = taglist;
if (params->u1.s.page_size != PAGE_SIZE) {
- printk(KERN_WARNING "Warning: bad configuration page, "
- "trying to continue\n");
+ pr_warn("Warning: bad configuration page, trying to continue\n");
return;
}
@@ -109,8 +108,7 @@ static void __init build_tag_list(struct param_struct *params, void *taglist)
params->u1.s.nr_pages != 0x04000 &&
params->u1.s.nr_pages != 0x08000 &&
params->u1.s.nr_pages != 0x10000) {
- printk(KERN_WARNING "Warning: bad NeTTrom parameters "
- "detected, using defaults\n");
+ pr_warn("Warning: bad NeTTrom parameters detected, using defaults\n");
params->u1.s.nr_pages = 0x1000; /* 16MB */
params->u1.s.ramdisk_size = 0;
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 528f8af2addb..68c6ae0b9e4c 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -167,8 +167,7 @@ static void __init parse_tags(const struct tag *t)
{
for (; t->hdr.size; t = tag_next(t))
if (!parse_tag(t))
- printk(KERN_WARNING
- "Ignoring unrecognised tag 0x%08x\n",
+ pr_warn("Ignoring unrecognised tag 0x%08x\n",
t->hdr.tag);
}
@@ -193,7 +192,7 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
*/
for_each_machine_desc(p)
if (machine_nr == p->nr) {
- printk("Machine: %s\n", p->name);
+ pr_info("Machine: %s\n", p->name);
mdesc = p;
break;
}
diff --git a/arch/arm/kernel/atags_proc.c b/arch/arm/kernel/atags_proc.c
index c7ff8073416f..5a3379055f55 100644
--- a/arch/arm/kernel/atags_proc.c
+++ b/arch/arm/kernel/atags_proc.c
@@ -41,7 +41,7 @@ static int __init init_atags_procfs(void)
size_t size;
if (tag->hdr.tag != ATAG_CORE) {
- printk(KERN_INFO "No ATAGs?");
+ pr_info("No ATAGs?");
return -EINVAL;
}
@@ -68,7 +68,7 @@ static int __init init_atags_procfs(void)
nomem:
kfree(b);
- printk(KERN_ERR "Exporting ATAGs: not enough memory\n");
+ pr_err("Exporting ATAGs: not enough memory\n");
return -ENOMEM;
}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index daaff73bc776..a4effd6d8f2f 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -364,7 +364,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
/*
* Report what we did for this bus
*/
- printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
+ pr_info("PCI: bus%d: Fast back to back transfers %sabled\n",
bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
}
EXPORT_SYMBOL(pcibios_fixup_bus);
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
index 360bb6d701f5..84363fe7bad2 100644
--- a/arch/arm/kernel/dma-isa.c
+++ b/arch/arm/kernel/dma-isa.c
@@ -213,8 +213,8 @@ void __init isa_init_dma(void)
for (chan = 0; chan < 8; chan++) {
int ret = isa_dma_add(chan, &isa_dma[chan]);
if (ret)
- printk(KERN_ERR "ISADMA%u: unable to register: %d\n",
- chan, ret);
+ pr_err("ISADMA%u: unable to register: %d\n",
+ chan, ret);
}
request_dma(DMA_ISA_CASCADE, "cascade");
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 7b829d9663b1..e651c4d0a0d9 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -79,7 +79,7 @@ int request_dma(unsigned int chan, const char *device_id)
return ret;
bad_dma:
- printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan);
+ pr_err("dma: trying to allocate DMA%d\n", chan);
return -EINVAL;
busy:
@@ -100,7 +100,7 @@ void free_dma(unsigned int chan)
goto bad_dma;
if (dma->active) {
- printk(KERN_ERR "dma%d: freeing active DMA\n", chan);
+ pr_err("dma%d: freeing active DMA\n", chan);
dma->d_ops->disable(chan, dma);
dma->active = 0;
}
@@ -111,11 +111,11 @@ void free_dma(unsigned int chan)
return;
}
- printk(KERN_ERR "dma%d: trying to free free DMA\n", chan);
+ pr_err("dma%d: trying to free free DMA\n", chan);
return;
bad_dma:
- printk(KERN_ERR "dma: trying to free DMA%d\n", chan);
+ pr_err("dma: trying to free DMA%d\n", chan);
}
EXPORT_SYMBOL(free_dma);
@@ -126,8 +126,7 @@ void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg)
dma_t *dma = dma_channel(chan);
if (dma->active)
- printk(KERN_ERR "dma%d: altering DMA SG while "
- "DMA active\n", chan);
+ pr_err("dma%d: altering DMA SG while DMA active\n", chan);
dma->sg = sg;
dma->sgcount = nr_sg;
@@ -144,8 +143,7 @@ void __set_dma_addr (unsigned int chan, void *addr)
dma_t *dma = dma_channel(chan);
if (dma->active)
- printk(KERN_ERR "dma%d: altering DMA address while "
- "DMA active\n", chan);
+ pr_err("dma%d: altering DMA address while DMA active\n", chan);
dma->sg = NULL;
dma->addr = addr;
@@ -162,8 +160,7 @@ void set_dma_count (unsigned int chan, unsigned long count)
dma_t *dma = dma_channel(chan);
if (dma->active)
- printk(KERN_ERR "dma%d: altering DMA count while "
- "DMA active\n", chan);
+ pr_err("dma%d: altering DMA count while DMA active\n", chan);
dma->sg = NULL;
dma->count = count;
@@ -178,8 +175,7 @@ void set_dma_mode (unsigned int chan, unsigned int mode)
dma_t *dma = dma_channel(chan);
if (dma->active)
- printk(KERN_ERR "dma%d: altering DMA mode while "
- "DMA active\n", chan);
+ pr_err("dma%d: altering DMA mode while DMA active\n", chan);
dma->dma_mode = mode;
dma->invalid = 1;
@@ -202,7 +198,7 @@ void enable_dma (unsigned int chan)
return;
free_dma:
- printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan);
+ pr_err("dma%d: trying to enable free DMA\n", chan);
BUG();
}
EXPORT_SYMBOL(enable_dma);
@@ -223,7 +219,7 @@ void disable_dma (unsigned int chan)
return;
free_dma:
- printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan);
+ pr_err("dma%d: trying to disable free DMA\n", chan);
BUG();
}
EXPORT_SYMBOL(disable_dma);
@@ -240,7 +236,7 @@ EXPORT_SYMBOL(dma_channel_active);
void set_dma_page(unsigned int chan, char pagenr)
{
- printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan);
+ pr_err("dma%d: trying to set_dma_page\n", chan);
}
EXPORT_SYMBOL(set_dma_page);
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 6bb09d4abdea..f8ccc21fa032 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -109,241 +109,6 @@ ENDPROC(ret_from_fork)
#undef CALL
#define CALL(x) .long x
-#ifdef CONFIG_FUNCTION_TRACER
-/*
- * When compiling with -pg, gcc inserts a call to the mcount routine at the
- * start of every function. In mcount, apart from the function's address (in
- * lr), we need to get hold of the function's caller's address.
- *
- * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
- *
- * bl mcount
- *
- * These versions have the limitation that in order for the mcount routine to
- * be able to determine the function's caller's address, an APCS-style frame
- * pointer (which is set up with something like the code below) is required.
- *
- * mov ip, sp
- * push {fp, ip, lr, pc}
- * sub fp, ip, #4
- *
- * With EABI, these frame pointers are not available unless -mapcs-frame is
- * specified, and if building as Thumb-2, not even then.
- *
- * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
- * with call sites like:
- *
- * push {lr}
- * bl __gnu_mcount_nc
- *
- * With these compilers, frame pointers are not necessary.
- *
- * mcount can be thought of as a function called in the middle of a subroutine
- * call. As such, it needs to be transparent for both the caller and the
- * callee: the original lr needs to be restored when leaving mcount, and no
- * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
- * clobber the ip register. This is OK because the ARM calling convention
- * allows it to be clobbered in subroutines and doesn't use it to hold
- * parameters.)
- *
- * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
- * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
- * arch/arm/kernel/ftrace.c).
- */
-
-#ifndef CONFIG_OLD_MCOUNT
-#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
-#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
-#endif
-#endif
-
-.macro mcount_adjust_addr rd, rn
- bic \rd, \rn, #1 @ clear the Thumb bit if present
- sub \rd, \rd, #MCOUNT_INSN_SIZE
-.endm
-
-.macro __mcount suffix
- mcount_enter
- ldr r0, =ftrace_trace_function
- ldr r2, [r0]
- adr r0, .Lftrace_stub
- cmp r0, r2
- bne 1f
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- ldr r1, =ftrace_graph_return
- ldr r2, [r1]
- cmp r0, r2
- bne ftrace_graph_caller\suffix
-
- ldr r1, =ftrace_graph_entry
- ldr r2, [r1]
- ldr r0, =ftrace_graph_entry_stub
- cmp r0, r2
- bne ftrace_graph_caller\suffix
-#endif
-
- mcount_exit
-
-1: mcount_get_lr r1 @ lr of instrumented func
- mcount_adjust_addr r0, lr @ instrumented function
- adr lr, BSYM(2f)
- mov pc, r2
-2: mcount_exit
-.endm
-
-.macro __ftrace_caller suffix
- mcount_enter
-
- mcount_get_lr r1 @ lr of instrumented func
- mcount_adjust_addr r0, lr @ instrumented function
-
- .globl ftrace_call\suffix
-ftrace_call\suffix:
- bl ftrace_stub
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl ftrace_graph_call\suffix
-ftrace_graph_call\suffix:
- mov r0, r0
-#endif
-
- mcount_exit
-.endm
-
-.macro __ftrace_graph_caller
- sub r0, fp, #4 @ &lr of instrumented routine (&parent)
-#ifdef CONFIG_DYNAMIC_FTRACE
- @ called from __ftrace_caller, saved in mcount_enter
- ldr r1, [sp, #16] @ instrumented routine (func)
- mcount_adjust_addr r1, r1
-#else
- @ called from __mcount, untouched in lr
- mcount_adjust_addr r1, lr @ instrumented routine (func)
-#endif
- mov r2, fp @ frame pointer
- bl prepare_ftrace_return
- mcount_exit
-.endm
-
-#ifdef CONFIG_OLD_MCOUNT
-/*
- * mcount
- */
-
-.macro mcount_enter
- stmdb sp!, {r0-r3, lr}
-.endm
-
-.macro mcount_get_lr reg
- ldr \reg, [fp, #-4]
-.endm
-
-.macro mcount_exit
- ldr lr, [fp, #-4]
- ldmia sp!, {r0-r3, pc}
-.endm
-
-ENTRY(mcount)
-#ifdef CONFIG_DYNAMIC_FTRACE
- stmdb sp!, {lr}
- ldr lr, [fp, #-4]
- ldmia sp!, {pc}
-#else
- __mcount _old
-#endif
-ENDPROC(mcount)
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(ftrace_caller_old)
- __ftrace_caller _old
-ENDPROC(ftrace_caller_old)
-#endif
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller_old)
- __ftrace_graph_caller
-ENDPROC(ftrace_graph_caller_old)
-#endif
-
-.purgem mcount_enter
-.purgem mcount_get_lr
-.purgem mcount_exit
-#endif
-
-/*
- * __gnu_mcount_nc
- */
-
-.macro mcount_enter
-/*
- * This pad compensates for the push {lr} at the call site. Note that we are
- * unable to unwind through a function which does not otherwise save its lr.
- */
- UNWIND(.pad #4)
- stmdb sp!, {r0-r3, lr}
- UNWIND(.save {r0-r3, lr})
-.endm
-
-.macro mcount_get_lr reg
- ldr \reg, [sp, #20]
-.endm
-
-.macro mcount_exit
- ldmia sp!, {r0-r3, ip, lr}
- ret ip
-.endm
-
-ENTRY(__gnu_mcount_nc)
-UNWIND(.fnstart)
-#ifdef CONFIG_DYNAMIC_FTRACE
- mov ip, lr
- ldmia sp!, {lr}
- ret ip
-#else
- __mcount
-#endif
-UNWIND(.fnend)
-ENDPROC(__gnu_mcount_nc)
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(ftrace_caller)
-UNWIND(.fnstart)
- __ftrace_caller
-UNWIND(.fnend)
-ENDPROC(ftrace_caller)
-#endif
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
-UNWIND(.fnstart)
- __ftrace_graph_caller
-UNWIND(.fnend)
-ENDPROC(ftrace_graph_caller)
-#endif
-
-.purgem mcount_enter
-.purgem mcount_get_lr
-.purgem mcount_exit
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl return_to_handler
-return_to_handler:
- stmdb sp!, {r0-r3}
- mov r0, fp @ frame pointer
- bl ftrace_return_to_handler
- mov lr, r0 @ r0 has real ret addr
- ldmia sp!, {r0-r3}
- ret lr
-#endif
-
-ENTRY(ftrace_stub)
-.Lftrace_stub:
- ret lr
-ENDPROC(ftrace_stub)
-
-#endif /* CONFIG_FUNCTION_TRACER */
-
/*=============================================================================
* SWI handler
*-----------------------------------------------------------------------------
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
new file mode 100644
index 000000000000..fe57c73e70a4
--- /dev/null
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -0,0 +1,243 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/assembler.h>
+#include <asm/ftrace.h>
+#include <asm/unwind.h>
+
+#include "entry-header.S"
+
+/*
+ * When compiling with -pg, gcc inserts a call to the mcount routine at the
+ * start of every function. In mcount, apart from the function's address (in
+ * lr), we need to get hold of the function's caller's address.
+ *
+ * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
+ *
+ * bl mcount
+ *
+ * These versions have the limitation that in order for the mcount routine to
+ * be able to determine the function's caller's address, an APCS-style frame
+ * pointer (which is set up with something like the code below) is required.
+ *
+ * mov ip, sp
+ * push {fp, ip, lr, pc}
+ * sub fp, ip, #4
+ *
+ * With EABI, these frame pointers are not available unless -mapcs-frame is
+ * specified, and if building as Thumb-2, not even then.
+ *
+ * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
+ * with call sites like:
+ *
+ * push {lr}
+ * bl __gnu_mcount_nc
+ *
+ * With these compilers, frame pointers are not necessary.
+ *
+ * mcount can be thought of as a function called in the middle of a subroutine
+ * call. As such, it needs to be transparent for both the caller and the
+ * callee: the original lr needs to be restored when leaving mcount, and no
+ * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
+ * clobber the ip register. This is OK because the ARM calling convention
+ * allows it to be clobbered in subroutines and doesn't use it to hold
+ * parameters.)
+ *
+ * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
+ * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
+ * arch/arm/kernel/ftrace.c).
+ */
+
+#ifndef CONFIG_OLD_MCOUNT
+#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
+#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
+#endif
+#endif
+
+.macro mcount_adjust_addr rd, rn
+ bic \rd, \rn, #1 @ clear the Thumb bit if present
+ sub \rd, \rd, #MCOUNT_INSN_SIZE
+.endm
+
+.macro __mcount suffix
+ mcount_enter
+ ldr r0, =ftrace_trace_function
+ ldr r2, [r0]
+ adr r0, .Lftrace_stub
+ cmp r0, r2
+ bne 1f
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ldr r1, =ftrace_graph_return
+ ldr r2, [r1]
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
+
+ ldr r1, =ftrace_graph_entry
+ ldr r2, [r1]
+ ldr r0, =ftrace_graph_entry_stub
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
+#endif
+
+ mcount_exit
+
+1: mcount_get_lr r1 @ lr of instrumented func
+ mcount_adjust_addr r0, lr @ instrumented function
+ adr lr, BSYM(2f)
+ mov pc, r2
+2: mcount_exit
+.endm
+
+.macro __ftrace_caller suffix
+ mcount_enter
+
+ mcount_get_lr r1 @ lr of instrumented func
+ mcount_adjust_addr r0, lr @ instrumented function
+
+ .globl ftrace_call\suffix
+ftrace_call\suffix:
+ bl ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call\suffix
+ftrace_graph_call\suffix:
+ mov r0, r0
+#endif
+
+ mcount_exit
+.endm
+
+.macro __ftrace_graph_caller
+ sub r0, fp, #4 @ &lr of instrumented routine (&parent)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ @ called from __ftrace_caller, saved in mcount_enter
+ ldr r1, [sp, #16] @ instrumented routine (func)
+ mcount_adjust_addr r1, r1
+#else
+ @ called from __mcount, untouched in lr
+ mcount_adjust_addr r1, lr @ instrumented routine (func)
+#endif
+ mov r2, fp @ frame pointer
+ bl prepare_ftrace_return
+ mcount_exit
+.endm
+
+#ifdef CONFIG_OLD_MCOUNT
+/*
+ * mcount
+ */
+
+.macro mcount_enter
+ stmdb sp!, {r0-r3, lr}
+.endm
+
+.macro mcount_get_lr reg
+ ldr \reg, [fp, #-4]
+.endm
+
+.macro mcount_exit
+ ldr lr, [fp, #-4]
+ ldmia sp!, {r0-r3, pc}
+.endm
+
+ENTRY(mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ stmdb sp!, {lr}
+ ldr lr, [fp, #-4]
+ ldmia sp!, {pc}
+#else
+ __mcount _old
+#endif
+ENDPROC(mcount)
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller_old)
+ __ftrace_caller _old
+ENDPROC(ftrace_caller_old)
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller_old)
+ __ftrace_graph_caller
+ENDPROC(ftrace_graph_caller_old)
+#endif
+
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+#endif
+
+/*
+ * __gnu_mcount_nc
+ */
+
+.macro mcount_enter
+/*
+ * This pad compensates for the push {lr} at the call site. Note that we are
+ * unable to unwind through a function which does not otherwise save its lr.
+ */
+ UNWIND(.pad #4)
+ stmdb sp!, {r0-r3, lr}
+ UNWIND(.save {r0-r3, lr})
+.endm
+
+.macro mcount_get_lr reg
+ ldr \reg, [sp, #20]
+.endm
+
+.macro mcount_exit
+ ldmia sp!, {r0-r3, ip, lr}
+ ret ip
+.endm
+
+ENTRY(__gnu_mcount_nc)
+UNWIND(.fnstart)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ mov ip, lr
+ ldmia sp!, {lr}
+ ret ip
+#else
+ __mcount
+#endif
+UNWIND(.fnend)
+ENDPROC(__gnu_mcount_nc)
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller)
+UNWIND(.fnstart)
+ __ftrace_caller
+UNWIND(.fnend)
+ENDPROC(ftrace_caller)
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+UNWIND(.fnstart)
+ __ftrace_graph_caller
+UNWIND(.fnend)
+ENDPROC(ftrace_graph_caller)
+#endif
+
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl return_to_handler
+return_to_handler:
+ stmdb sp!, {r0-r3}
+ mov r0, fp @ frame pointer
+ bl ftrace_return_to_handler
+ mov lr, r0 @ r0 has real ret addr
+ ldmia sp!, {r0-r3}
+ ret lr
+#endif
+
+ENTRY(ftrace_stub)
+.Lftrace_stub:
+ ret lr
+ENDPROC(ftrace_stub)
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
deleted file mode 100644
index 131a6ab5f355..000000000000
--- a/arch/arm/kernel/etm.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/*
- * linux/arch/arm/kernel/etm.c
- *
- * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer.
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/sysrq.h>
-#include <linux/device.h>
-#include <linux/clk.h>
-#include <linux/amba/bus.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <asm/hardware/coresight.h>
-#include <asm/sections.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Alexander Shishkin");
-
-/*
- * ETM tracer state
- */
-struct tracectx {
- unsigned int etb_bufsz;
- void __iomem *etb_regs;
- void __iomem *etm_regs;
- unsigned long flags;
- int ncmppairs;
- int etm_portsz;
- struct device *dev;
- struct clk *emu_clk;
- struct mutex mutex;
-};
-
-static struct tracectx tracer;
-
-static inline bool trace_isrunning(struct tracectx *t)
-{
- return !!(t->flags & TRACER_RUNNING);
-}
-
-static int etm_setup_address_range(struct tracectx *t, int n,
- unsigned long start, unsigned long end, int exclude, int data)
-{
- u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
- ETMAAT_NOVALCMP;
-
- if (n < 1 || n > t->ncmppairs)
- return -EINVAL;
-
- /* comparators and ranges are numbered starting with 1 as opposed
- * to bits in a word */
- n--;
-
- if (data)
- flags |= ETMAAT_DLOADSTORE;
- else
- flags |= ETMAAT_IEXEC;
-
- /* first comparator for the range */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
- etm_writel(t, start, ETMR_COMP_VAL(n * 2));
-
- /* second comparator is right next to it */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
- etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
-
- flags = exclude ? ETMTE_INCLEXCL : 0;
- etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
-
- return 0;
-}
-
-static int trace_start(struct tracectx *t)
-{
- u32 v;
- unsigned long timeout = TRACER_TIMEOUT;
-
- etb_unlock(t);
-
- etb_writel(t, 0, ETBR_FORMATTERCTRL);
- etb_writel(t, 1, ETBR_CTRL);
-
- etb_lock(t);
-
- /* configure etm */
- v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
-
- if (t->flags & TRACER_CYCLE_ACC)
- v |= ETMCTRL_CYCLEACCURATE;
-
- etm_unlock(t);
-
- etm_writel(t, v, ETMR_CTRL);
-
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
- ;
- if (!timeout) {
- dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
- return -EFAULT;
- }
-
- etm_setup_address_range(t, 1, (unsigned long)_stext,
- (unsigned long)_etext, 0, 0);
- etm_writel(t, 0, ETMR_TRACEENCTRL2);
- etm_writel(t, 0, ETMR_TRACESSCTRL);
- etm_writel(t, 0x6f, ETMR_TRACEENEVT);
-
- v &= ~ETMCTRL_PROGRAM;
- v |= ETMCTRL_PORTSEL;
-
- etm_writel(t, v, ETMR_CTRL);
-
- timeout = TRACER_TIMEOUT;
- while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
- ;
- if (!timeout) {
- dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
- etm_lock(t);
- return -EFAULT;
- }
-
- etm_lock(t);
-
- t->flags |= TRACER_RUNNING;
-
- return 0;
-}
-
-static int trace_stop(struct tracectx *t)
-{
- unsigned long timeout = TRACER_TIMEOUT;
-
- etm_unlock(t);
-
- etm_writel(t, 0x440, ETMR_CTRL);
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
- ;
- if (!timeout) {
- dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
- return -EFAULT;
- }
-
- etm_lock(t);
-
- etb_unlock(t);
- etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
-
- timeout = TRACER_TIMEOUT;
- while (etb_readl(t, ETBR_FORMATTERCTRL) &
- ETBFF_MANUAL_FLUSH && --timeout)
- ;
- if (!timeout) {
- dev_dbg(t->dev, "Waiting for formatter flush to commence "
- "timed out\n");
- etb_lock(t);
- return -EFAULT;
- }
-
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_lock(t);
-
- t->flags &= ~TRACER_RUNNING;
-
- return 0;
-}
-
-static int etb_getdatalen(struct tracectx *t)
-{
- u32 v;
- int rp, wp;
-
- v = etb_readl(t, ETBR_STATUS);
-
- if (v & 1)
- return t->etb_bufsz;
-
- rp = etb_readl(t, ETBR_READADDR);
- wp = etb_readl(t, ETBR_WRITEADDR);
-
- if (rp > wp) {
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
- return 0;
- }
-
- return wp - rp;
-}
-
-/* sysrq+v will always stop the running trace and leave it at that */
-static void etm_dump(void)
-{
- struct tracectx *t = &tracer;
- u32 first = 0;
- int length;
-
- if (!t->etb_regs) {
- printk(KERN_INFO "No tracing hardware found\n");
- return;
- }
-
- if (trace_isrunning(t))
- trace_stop(t);
-
- etb_unlock(t);
-
- length = etb_getdatalen(t);
-
- if (length == t->etb_bufsz)
- first = etb_readl(t, ETBR_WRITEADDR);
-
- etb_writel(t, first, ETBR_READADDR);
-
- printk(KERN_INFO "Trace buffer contents length: %d\n", length);
- printk(KERN_INFO "--- ETB buffer begin ---\n");
- for (; length; length--)
- printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
- printk(KERN_INFO "\n--- ETB buffer end ---\n");
-
- /* deassert the overflow bit */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
- etb_lock(t);
-}
-
-static void sysrq_etm_dump(int key)
-{
- dev_dbg(tracer.dev, "Dumping ETB buffer\n");
- etm_dump();
-}
-
-static struct sysrq_key_op sysrq_etm_op = {
- .handler = sysrq_etm_dump,
- .help_msg = "etm-buffer-dump(v)",
- .action_msg = "etm",
-};
-
-static int etb_open(struct inode *inode, struct file *file)
-{
- if (!tracer.etb_regs)
- return -ENODEV;
-
- file->private_data = &tracer;
-
- return nonseekable_open(inode, file);
-}
-
-static ssize_t etb_read(struct file *file, char __user *data,
- size_t len, loff_t *ppos)
-{
- int total, i;
- long length;
- struct tracectx *t = file->private_data;
- u32 first = 0;
- u32 *buf;
-
- mutex_lock(&t->mutex);
-
- if (trace_isrunning(t)) {
- length = 0;
- goto out;
- }
-
- etb_unlock(t);
-
- total = etb_getdatalen(t);
- if (total == t->etb_bufsz)
- first = etb_readl(t, ETBR_WRITEADDR);
-
- etb_writel(t, first, ETBR_READADDR);
-
- length = min(total * 4, (int)len);
- buf = vmalloc(length);
-
- dev_dbg(t->dev, "ETB buffer length: %d\n", total);
- dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
- for (i = 0; i < length / 4; i++)
- buf[i] = etb_readl(t, ETBR_READMEM);
-
- /* the only way to deassert overflow bit in ETB status is this */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_WRITEADDR);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
- etb_lock(t);
-
- length -= copy_to_user(data, buf, length);
- vfree(buf);
-
-out:
- mutex_unlock(&t->mutex);
-
- return length;
-}
-
-static int etb_release(struct inode *inode, struct file *file)
-{
- /* there's nothing to do here, actually */
- return 0;
-}
-
-static const struct file_operations etb_fops = {
- .owner = THIS_MODULE,
- .read = etb_read,
- .open = etb_open,
- .release = etb_release,
- .llseek = no_llseek,
-};
-
-static struct miscdevice etb_miscdev = {
- .name = "tracebuf",
- .minor = 0,
- .fops = &etb_fops,
-};
-
-static int etb_probe(struct amba_device *dev, const struct amba_id *id)
-{
- struct tracectx *t = &tracer;
- int ret = 0;
-
- ret = amba_request_regions(dev, NULL);
- if (ret)
- goto out;
-
- t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
- if (!t->etb_regs) {
- ret = -ENOMEM;
- goto out_release;
- }
-
- amba_set_drvdata(dev, t);
-
- etb_miscdev.parent = &dev->dev;
-
- ret = misc_register(&etb_miscdev);
- if (ret)
- goto out_unmap;
-
- t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
- if (IS_ERR(t->emu_clk)) {
- dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
- return -EFAULT;
- }
-
- clk_enable(t->emu_clk);
-
- etb_unlock(t);
- t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
- dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
-
- /* make sure trace capture is disabled */
- etb_writel(t, 0, ETBR_CTRL);
- etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
- etb_lock(t);
-
- dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
-
-out:
- return ret;
-
-out_unmap:
- iounmap(t->etb_regs);
-
-out_release:
- amba_release_regions(dev);
-
- return ret;
-}
-
-static int etb_remove(struct amba_device *dev)
-{
- struct tracectx *t = amba_get_drvdata(dev);
-
- iounmap(t->etb_regs);
- t->etb_regs = NULL;
-
- clk_disable(t->emu_clk);
- clk_put(t->emu_clk);
-
- amba_release_regions(dev);
-
- return 0;
-}
-
-static struct amba_id etb_ids[] = {
- {
- .id = 0x0003b907,
- .mask = 0x0007ffff,
- },
- { 0, 0 },
-};
-
-static struct amba_driver etb_driver = {
- .drv = {
- .name = "etb",
- .owner = THIS_MODULE,
- },
- .probe = etb_probe,
- .remove = etb_remove,
- .id_table = etb_ids,
-};
-
-/* use a sysfs file "trace_running" to start/stop tracing */
-static ssize_t trace_running_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%x\n", trace_isrunning(&tracer));
-}
-
-static ssize_t trace_running_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned int value;
- int ret;
-
- if (sscanf(buf, "%u", &value) != 1)
- return -EINVAL;
-
- mutex_lock(&tracer.mutex);
- ret = value ? trace_start(&tracer) : trace_stop(&tracer);
- mutex_unlock(&tracer.mutex);
-
- return ret ? : n;
-}
-
-static struct kobj_attribute trace_running_attr =
- __ATTR(trace_running, 0644, trace_running_show, trace_running_store);
-
-static ssize_t trace_info_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
- int datalen;
-
- etb_unlock(&tracer);
- datalen = etb_getdatalen(&tracer);
- etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
- etb_ra = etb_readl(&tracer, ETBR_READADDR);
- etb_st = etb_readl(&tracer, ETBR_STATUS);
- etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
- etb_lock(&tracer);
-
- etm_unlock(&tracer);
- etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
- etm_st = etm_readl(&tracer, ETMR_STATUS);
- etm_lock(&tracer);
-
- return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
- "ETBR_WRITEADDR:\t%08x\n"
- "ETBR_READADDR:\t%08x\n"
- "ETBR_STATUS:\t%08x\n"
- "ETBR_FORMATTERCTRL:\t%08x\n"
- "ETMR_CTRL:\t%08x\n"
- "ETMR_STATUS:\t%08x\n",
- datalen,
- tracer.ncmppairs,
- etb_wa,
- etb_ra,
- etb_st,
- etb_fc,
- etm_ctrl,
- etm_st
- );
-}
-
-static struct kobj_attribute trace_info_attr =
- __ATTR(trace_info, 0444, trace_info_show, NULL);
-
-static ssize_t trace_mode_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d %d\n",
- !!(tracer.flags & TRACER_CYCLE_ACC),
- tracer.etm_portsz);
-}
-
-static ssize_t trace_mode_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned int cycacc, portsz;
-
- if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2)
- return -EINVAL;
-
- mutex_lock(&tracer.mutex);
- if (cycacc)
- tracer.flags |= TRACER_CYCLE_ACC;
- else
- tracer.flags &= ~TRACER_CYCLE_ACC;
-
- tracer.etm_portsz = portsz & 0x0f;
- mutex_unlock(&tracer.mutex);
-
- return n;
-}
-
-static struct kobj_attribute trace_mode_attr =
- __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
-
-static int etm_probe(struct amba_device *dev, const struct amba_id *id)
-{
- struct tracectx *t = &tracer;
- int ret = 0;
-
- if (t->etm_regs) {
- dev_dbg(&dev->dev, "ETM already initialized\n");
- ret = -EBUSY;
- goto out;
- }
-
- ret = amba_request_regions(dev, NULL);
- if (ret)
- goto out;
-
- t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
- if (!t->etm_regs) {
- ret = -ENOMEM;
- goto out_release;
- }
-
- amba_set_drvdata(dev, t);
-
- mutex_init(&t->mutex);
- t->dev = &dev->dev;
- t->flags = TRACER_CYCLE_ACC;
- t->etm_portsz = 1;
-
- etm_unlock(t);
- (void)etm_readl(t, ETMMR_PDSR);
- /* dummy first read */
- (void)etm_readl(&tracer, ETMMR_OSSRR);
-
- t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
- etm_writel(t, 0x440, ETMR_CTRL);
- etm_lock(t);
-
- ret = sysfs_create_file(&dev->dev.kobj,
- &trace_running_attr.attr);
- if (ret)
- goto out_unmap;
-
- /* failing to create any of these two is not fatal */
- ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr);
- if (ret)
- dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n");
-
- ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr);
- if (ret)
- dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
-
- dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
-
-out:
- return ret;
-
-out_unmap:
- iounmap(t->etm_regs);
-
-out_release:
- amba_release_regions(dev);
-
- return ret;
-}
-
-static int etm_remove(struct amba_device *dev)
-{
- struct tracectx *t = amba_get_drvdata(dev);
-
- iounmap(t->etm_regs);
- t->etm_regs = NULL;
-
- amba_release_regions(dev);
-
- sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-
- return 0;
-}
-
-static struct amba_id etm_ids[] = {
- {
- .id = 0x0003b921,
- .mask = 0x0007ffff,
- },
- { 0, 0 },
-};
-
-static struct amba_driver etm_driver = {
- .drv = {
- .name = "etm",
- .owner = THIS_MODULE,
- },
- .probe = etm_probe,
- .remove = etm_remove,
- .id_table = etm_ids,
-};
-
-static int __init etm_init(void)
-{
- int retval;
-
- retval = amba_driver_register(&etb_driver);
- if (retval) {
- printk(KERN_ERR "Failed to register etb\n");
- return retval;
- }
-
- retval = amba_driver_register(&etm_driver);
- if (retval) {
- amba_driver_unregister(&etb_driver);
- printk(KERN_ERR "Failed to probe etm\n");
- return retval;
- }
-
- /* not being able to install this handler is not fatal */
- (void)register_sysrq_key('v', &sysrq_etm_op);
-
- return 0;
-}
-
-device_initcall(etm_init);
-
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index b37752a96652..059c3da0fee3 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -124,7 +124,7 @@ int claim_fiq(struct fiq_handler *f)
void release_fiq(struct fiq_handler *f)
{
if (current_fiq != f) {
- printk(KERN_ERR "%s FIQ trying to release %s FIQ\n",
+ pr_err("%s FIQ trying to release %s FIQ\n",
f->name, current_fiq->name);
dump_stack();
return;
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index af9a8a927a4e..b8c75e45a950 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -15,6 +15,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/opcodes.h>
@@ -35,6 +36,22 @@
#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
+static int __ftrace_modify_code(void *data)
+{
+ int *command = data;
+
+ set_kernel_text_rw();
+ ftrace_modify_all_code(*command);
+ set_kernel_text_ro();
+
+ return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+ stop_machine(__ftrace_modify_code, &command, NULL);
+}
+
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return rec->arch.old_mcount ? OLD_NOP : NOP;
@@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void)
int ftrace_arch_code_modify_post_process(void)
{
set_all_modules_text_ro();
+ /* Make sure any TLB misses during machine stop are cleared. */
+ flush_tlb_all();
return 0;
}
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index b5b452f90f76..7fc70ae21185 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -29,6 +29,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/smp.h>
#include <linux/cpu_pm.h>
+#include <linux/coresight.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
@@ -36,7 +37,6 @@
#include <asm/hw_breakpoint.h>
#include <asm/kdebug.h>
#include <asm/traps.h>
-#include <asm/hardware/coresight.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -976,7 +976,7 @@ static void reset_ctrl_regs(void *unused)
* Unconditionally clear the OS lock by writing a value
* other than CS_LAR_KEY to the access register.
*/
- ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);
+ ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK);
isb();
/*
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c
index 9203cf883330..eedefe050022 100644
--- a/arch/arm/kernel/io.c
+++ b/arch/arm/kernel/io.c
@@ -51,6 +51,7 @@ void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
from++;
}
}
+EXPORT_SYMBOL(_memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
@@ -66,6 +67,7 @@ void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
to++;
}
}
+EXPORT_SYMBOL(_memcpy_toio);
/*
* "memset" on IO memory space.
@@ -79,7 +81,4 @@ void _memset_io(volatile void __iomem *dst, int c, size_t count)
dst++;
}
}
-
-EXPORT_SYMBOL(_memcpy_fromio);
-EXPORT_SYMBOL(_memcpy_toio);
EXPORT_SYMBOL(_memset_io);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 7c81ec428b9b..ad857bada96c 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,6 +31,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
+#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
@@ -82,7 +83,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (irq >= nr_irqs) {
- printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
+ pr_err("Trying to set irq flags for IRQ%d\n", irq);
return;
}
@@ -135,7 +136,6 @@ int __init arch_probe_nr_irqs(void)
#endif
#ifdef CONFIG_HOTPLUG_CPU
-
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
@@ -187,8 +187,8 @@ void migrate_irqs(void)
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
- if (affinity_broken && printk_ratelimit())
- pr_warn("IRQ%u no longer affine to CPU%u\n",
+ if (affinity_broken)
+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, smp_processor_id());
}
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index ad58e565fe98..49fadbda8c63 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -58,6 +58,7 @@
#define MMX_SIZE (0x98)
.text
+ .arm
/*
* Lazy switching of Concan coprocessor context
@@ -182,6 +183,8 @@ concan_load:
tmcr wCon, r2
ret lr
+ENDPROC(iwmmxt_task_enable)
+
/*
* Back up Concan regs to save area and disable access to them
* (mainly for gdb or sleep mode usage)
@@ -232,6 +235,8 @@ ENTRY(iwmmxt_task_disable)
1: msr cpsr_c, ip @ restore interrupt mode
ldmfd sp!, {r4, pc}
+ENDPROC(iwmmxt_task_disable)
+
/*
* Copy Concan state to given memory address
*
@@ -268,6 +273,8 @@ ENTRY(iwmmxt_task_copy)
msr cpsr_c, ip @ restore interrupt mode
ret r3
+ENDPROC(iwmmxt_task_copy)
+
/*
* Restore Concan state from given memory address
*
@@ -304,6 +311,8 @@ ENTRY(iwmmxt_task_restore)
msr cpsr_c, ip @ restore interrupt mode
ret r3
+ENDPROC(iwmmxt_task_restore)
+
/*
* Concan handling on task switch
*
@@ -335,6 +344,8 @@ ENTRY(iwmmxt_task_switch)
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
+ENDPROC(iwmmxt_task_switch)
+
/*
* Remove Concan ownership of given task
*
@@ -353,6 +364,8 @@ ENTRY(iwmmxt_task_release)
msr cpsr_c, r2 @ restore interrupts
ret lr
+ENDPROC(iwmmxt_task_release)
+
.data
concan_owner:
.word 0
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index 4ce4f789446d..afeeb9ea6f43 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -19,7 +19,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
insn = arm_gen_nop();
if (is_static)
- __patch_text(addr, insn);
+ __patch_text_early(addr, insn);
else
patch_text(addr, insn);
}
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a74b53c1b7df..07db2f8a1b45 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -12,8 +12,12 @@
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/uaccess.h>
+
#include <asm/traps.h>
+#include "patch.h"
+
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
{ "r0", 4, offsetof(struct pt_regs, ARM_r0)},
@@ -244,6 +248,31 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier);
}
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+ int err;
+
+ /* patch_text() only supports int-sized breakpoints */
+ BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE);
+
+ err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+
+ patch_text((void *)bpt->bpt_addr,
+ *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+
+ return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+ patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+
+ return 0;
+}
+
/*
* Register our undef instruction hooks with ARM undef core.
* We regsiter a hook specifically looking for the KGB break inst
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 8cf0996aa1a8..de2b085ad753 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -29,6 +29,7 @@ extern unsigned long kexec_boot_atags;
static atomic_t waiting_for_crash_ipi;
+static unsigned long dt_mem;
/*
* Provide a dummy crash_notes definition while crash dump arrives to arm.
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
@@ -64,7 +65,7 @@ int machine_kexec_prepare(struct kimage *image)
return err;
if (be32_to_cpu(header) == OF_DT_HEADER)
- kexec_boot_atags = current_segment->mem;
+ dt_mem = current_segment->mem;
}
return 0;
}
@@ -126,12 +127,12 @@ void machine_crash_shutdown(struct pt_regs *regs)
msecs--;
}
if (atomic_read(&waiting_for_crash_ipi) > 0)
- printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n");
+ pr_warn("Non-crashing CPUs did not react to IPI\n");
crash_save_cpu(regs, smp_processor_id());
machine_kexec_mask_interrupts();
- printk(KERN_INFO "Loading crashdump kernel...\n");
+ pr_info("Loading crashdump kernel...\n");
}
/*
@@ -163,12 +164,12 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
/* Prepare parameters for reboot_code_buffer*/
+ set_kernel_text_rw();
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
- if (!kexec_boot_atags)
- kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
-
+ kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
+ + KEXEC_ARM_ATAGS_OFFSET;
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
@@ -177,7 +178,7 @@ void machine_kexec(struct kimage *image)
reboot_entry_phys = (unsigned long)reboot_entry +
(reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
- printk(KERN_INFO "Bye!\n");
+ pr_info("Bye!\n");
if (kexec_reinit)
kexec_reinit();
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 6a4dffefd357..bea7db9e5b80 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -251,7 +251,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
#endif
default:
- printk(KERN_ERR "%s: unknown relocation: %u\n",
+ pr_err("%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
return -ENOEXEC;
}
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index 07314af47733..5038960e3c55 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -1,8 +1,11 @@
#include <linux/kernel.h>
+#include <linux/spinlock.h>
#include <linux/kprobes.h>
+#include <linux/mm.h>
#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
#include <asm/smp_plat.h>
#include <asm/opcodes.h>
@@ -13,21 +16,77 @@ struct patch {
unsigned int insn;
};
-void __kprobes __patch_text(void *addr, unsigned int insn)
+static DEFINE_SPINLOCK(patch_lock);
+
+static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+ __acquires(&patch_lock)
+{
+ unsigned int uintaddr = (uintptr_t) addr;
+ bool module = !core_kernel_text(uintaddr);
+ struct page *page;
+
+ if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
+ page = vmalloc_to_page(addr);
+ else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
+ page = virt_to_page(addr);
+ else
+ return addr;
+
+ if (flags)
+ spin_lock_irqsave(&patch_lock, *flags);
+ else
+ __acquire(&patch_lock);
+
+ set_fixmap(fixmap, page_to_phys(page));
+
+ return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+ __releases(&patch_lock)
+{
+ clear_fixmap(fixmap);
+
+ if (flags)
+ spin_unlock_irqrestore(&patch_lock, *flags);
+ else
+ __release(&patch_lock);
+}
+
+void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
{
bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
+ unsigned int uintaddr = (uintptr_t) addr;
+ bool twopage = false;
+ unsigned long flags;
+ void *waddr = addr;
int size;
+ if (remap)
+ waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
+ else
+ __acquire(&patch_lock);
+
if (thumb2 && __opcode_is_thumb16(insn)) {
- *(u16 *)addr = __opcode_to_mem_thumb16(insn);
+ *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
size = sizeof(u16);
- } else if (thumb2 && ((uintptr_t)addr & 2)) {
+ } else if (thumb2 && (uintaddr & 2)) {
u16 first = __opcode_thumb32_first(insn);
u16 second = __opcode_thumb32_second(insn);
- u16 *addrh = addr;
+ u16 *addrh0 = waddr;
+ u16 *addrh1 = waddr + 2;
+
+ twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2;
+ if (twopage && remap)
+ addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL);
+
+ *addrh0 = __opcode_to_mem_thumb16(first);
+ *addrh1 = __opcode_to_mem_thumb16(second);
- addrh[0] = __opcode_to_mem_thumb16(first);
- addrh[1] = __opcode_to_mem_thumb16(second);
+ if (twopage && addrh1 != addr + 2) {
+ flush_kernel_vmap_range(addrh1, 2);
+ patch_unmap(FIX_TEXT_POKE1, NULL);
+ }
size = sizeof(u32);
} else {
@@ -36,10 +95,16 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
else
insn = __opcode_to_mem_arm(insn);
- *(u32 *)addr = insn;
+ *(u32 *)waddr = insn;
size = sizeof(u32);
}
+ if (waddr != addr) {
+ flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
+ patch_unmap(FIX_TEXT_POKE0, &flags);
+ } else
+ __release(&patch_lock);
+
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
}
@@ -60,16 +125,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
.insn = insn,
};
- if (cache_ops_need_broadcast()) {
- stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
- } else {
- bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
- && __opcode_is_thumb32(insn)
- && ((uintptr_t)addr & 2);
-
- if (straddles_word)
- stop_machine(patch_text_stop_machine, &patch, NULL);
- else
- __patch_text(addr, insn);
- }
+ stop_machine(patch_text_stop_machine, &patch, NULL);
}
diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h
index b4731f2dac38..77e054c2f6cd 100644
--- a/arch/arm/kernel/patch.h
+++ b/arch/arm/kernel/patch.h
@@ -2,6 +2,16 @@
#define _ARM_KERNEL_PATCH_H
void patch_text(void *addr, unsigned int insn);
-void __patch_text(void *addr, unsigned int insn);
+void __patch_text_real(void *addr, unsigned int insn, bool remap);
+
+static inline void __patch_text(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, true);
+}
+
+static inline void __patch_text_early(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, false);
+}
#endif
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index fe972a2f3df3..fdfa3a78ec8c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -51,8 +51,8 @@ EXPORT_SYMBOL(__stack_chk_guard);
static const char *processor_modes[] __maybe_unused = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
- "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
- "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
+ "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
+ "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
};
static const char *isa_modes[] __maybe_unused = {
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 98ea4b7eb406..24b4a04846eb 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -39,13 +39,12 @@ void *return_address(unsigned int level)
{
struct return_address_data data;
struct stackframe frame;
- register unsigned long current_sp asm ("sp");
data.level = level + 2;
data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)return_address;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c03106378b49..f9c863911038 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -18,6 +18,7 @@
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
+#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/kexec.h>
@@ -806,6 +807,7 @@ static int __init customize_machine(void)
* machine from the device tree, if no callback is provided,
* otherwise we would always need an init_machine callback.
*/
+ of_iommu_init();
if (machine_desc->init_machine)
machine_desc->init_machine();
#ifdef CONFIG_OF
@@ -900,6 +902,7 @@ void __init setup_arch(char **cmdline_p)
mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
machine_desc = mdesc;
machine_name = mdesc->name;
+ dump_stack_set_arch_desc("%s", mdesc->name);
if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index bd1983437205..8aa6f1b87c9e 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -592,7 +592,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
}
syscall = 0;
} else if (thread_flags & _TIF_UPROBE) {
- clear_thread_flag(TIF_UPROBE);
uprobe_notify_resume(regs);
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 13396d3d600e..5e6052e18850 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -225,7 +225,7 @@ void __cpu_die(unsigned int cpu)
pr_err("CPU%u: cpu didn't die\n", cpu);
return;
}
- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+ pr_notice("CPU%u: shutdown\n", cpu);
/*
* platform_cpu_kill() is generally expected to do the powering off
@@ -235,7 +235,7 @@ void __cpu_die(unsigned int cpu)
* the requesting CPU and the dying CPU actually losing power.
*/
if (!platform_cpu_kill(cpu))
- printk("CPU%u: unable to kill\n", cpu);
+ pr_err("CPU%u: unable to kill\n", cpu);
}
/*
@@ -351,7 +351,7 @@ asmlinkage void secondary_start_kernel(void)
cpu_init();
- printk("CPU%u: Booted secondary processor\n", cpu);
+ pr_debug("CPU%u: Booted secondary processor\n", cpu);
preempt_disable();
trace_hardirqs_off();
@@ -387,9 +387,6 @@ asmlinkage void secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
- printk(KERN_INFO "SMP: Total of %d processors activated.\n",
- num_online_cpus());
-
hyp_mode_check();
}
@@ -521,7 +518,7 @@ static void ipi_cpu_stop(unsigned int cpu)
if (system_state == SYSTEM_BOOTING ||
system_state == SYSTEM_RUNNING) {
raw_spin_lock(&stop_lock);
- printk(KERN_CRIT "CPU%u: stopping\n", cpu);
+ pr_crit("CPU%u: stopping\n", cpu);
dump_stack();
raw_spin_unlock(&stop_lock);
}
@@ -615,8 +612,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
- cpu, ipinr);
+ pr_crit("CPU%u: Unknown IPI message 0x%x\n",
+ cpu, ipinr);
break;
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 93090213c71c..172c6a05d27f 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -199,7 +199,7 @@ static void twd_calibrate_rate(void)
* the timer ticks
*/
if (twd_timer_rate == 0) {
- printk(KERN_INFO "Calibrating local timer... ");
+ pr_info("Calibrating local timer... ");
/* Wait for a tick to start */
waitjiffies = get_jiffies_64() + 1;
@@ -223,7 +223,7 @@ static void twd_calibrate_rate(void)
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
- printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
+ pr_cont("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
(twd_timer_rate / 10000) % 100);
}
}
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index f065eb05d254..92b72375c4c7 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -134,12 +134,10 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
frame.pc = thread_saved_pc(tsk);
#endif
} else {
- register unsigned long current_sp asm ("sp");
-
/* We don't want this function nor the caller */
data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)__save_stack_trace;
}
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 587fdfe1a72c..afdd51e30bec 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -260,7 +260,7 @@ static int __init swp_emulation_init(void)
return -ENOMEM;
#endif /* CONFIG_PROC_FS */
- printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
+ pr_notice("Registering SWP/SWPB emulation handler\n");
register_undef_hook(&swp_hook);
return 0;
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index e90a3148f385..b83f3b7737fb 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -400,7 +400,7 @@ asmlinkage long sys_oabi_sendto(int fd, void __user *buff,
return sys_sendto(fd, buff, len, flags, addr, addrlen);
}
-asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags)
{
struct sockaddr __user *addr;
int msg_namelen;
@@ -446,7 +446,7 @@ asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args)
break;
case SYS_SENDMSG:
if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
- r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]);
+ r = sys_oabi_sendmsg(a[0], (struct user_msghdr __user *)a[1], a[2]);
break;
default:
r = sys_socketcall(call, args);
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index 80f0d69205e7..8ff8dbfbe9fb 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -72,7 +72,7 @@ static int __init thumbee_init(void)
if ((pfr0 & 0x0000f000) != 0x00001000)
return 0;
- printk(KERN_INFO "ThumbEE CPU extension supported.\n");
+ pr_info("ThumbEE CPU extension supported.\n");
elf_hwcap |= HWCAP_THUMBEE;
thread_register_notifier(&thumbee_notifier_block);
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 89cfdd6e50cb..08b7847bf912 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -165,7 +165,7 @@ static void update_cpu_capacity(unsigned int cpu)
set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
- printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n",
+ pr_info("CPU%u: update cpu_capacity %lu\n",
cpu, arch_scale_cpu_capacity(NULL, cpu));
}
@@ -269,7 +269,7 @@ void store_cpu_topology(unsigned int cpuid)
update_cpu_capacity(cpuid);
- printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
+ pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id,
cpu_topology[cpuid].core_id,
cpu_topology[cpuid].socket_id, mpidr);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 9f5d81881eb6..788e23fe64d8 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -198,14 +198,14 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
}
if (!fp) {
- printk("no frame pointer");
+ pr_cont("no frame pointer");
ok = 0;
} else if (verify_stack(fp)) {
- printk("invalid frame pointer 0x%08x", fp);
+ pr_cont("invalid frame pointer 0x%08x", fp);
ok = 0;
} else if (fp < (unsigned long)end_of_stack(tsk))
- printk("frame pointer underflow");
- printk("\n");
+ pr_cont("frame pointer underflow");
+ pr_cont("\n");
if (ok)
c_backtrace(fp, mode);
@@ -240,8 +240,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
static int die_counter;
int ret;
- printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
- S_ISA "\n", str, err, ++die_counter);
+ pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
+ str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
@@ -250,8 +250,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
print_modules();
__show_regs(regs);
- printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
+ pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
@@ -446,7 +446,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
- printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
+ pr_info("%s (%d): undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
__show_regs(regs);
dump_instr(KERN_INFO, regs);
@@ -496,7 +496,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason)
{
console_verbose();
- printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]);
+ pr_crit("Bad mode in %s handler detected\n", handler[reason]);
die("Oops - bad mode", regs, 0);
local_irq_disable();
@@ -516,7 +516,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_SYSCALL) {
- printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
+ pr_err("[%d] %s: obsolete system call %08x.\n",
task_pid_nr(current), current->comm, n);
dump_instr(KERN_ERR, regs);
}
@@ -694,7 +694,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
* something catastrophic has happened
*/
if (user_debug & UDBG_SYSCALL) {
- printk("[%d] %s: arm syscall %d\n",
+ pr_err("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
dump_instr("", regs);
if (user_mode(regs)) {
@@ -753,8 +753,8 @@ late_initcall(arm_mrc_hook_init);
void __bad_xchg(volatile void *ptr, int size)
{
- printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
- __builtin_return_address(0), ptr, size);
+ pr_err("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
+ __builtin_return_address(0), ptr, size);
BUG();
}
EXPORT_SYMBOL(__bad_xchg);
@@ -771,8 +771,8 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_BADABORT) {
- printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
- task_pid_nr(current), current->comm, code, instr);
+ pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
+ task_pid_nr(current), current->comm, code, instr);
dump_instr(KERN_ERR, regs);
show_pte(current->mm, addr);
}
@@ -788,29 +788,29 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
void __readwrite_bug(const char *fn)
{
- printk("%s called, but not implemented\n", fn);
+ pr_err("%s called, but not implemented\n", fn);
BUG();
}
EXPORT_SYMBOL(__readwrite_bug);
void __pte_error(const char *file, int line, pte_t pte)
{
- printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
+ pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
}
void __pmd_error(const char *file, int line, pmd_t pmd)
{
- printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
+ pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
}
void __pgd_error(const char *file, int line, pgd_t pgd)
{
- printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
+ pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
}
asmlinkage void __div0(void)
{
- printk("Division by zero in kernel.\n");
+ pr_err("Division by zero in kernel.\n");
dump_stack();
}
EXPORT_SYMBOL(__div0);
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index cbb85c5fabf9..0bee233fef9a 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -471,7 +471,6 @@ int unwind_frame(struct stackframe *frame)
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -485,7 +484,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = regs->ARM_lr;
} else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)unwind_backtrace;
} else {
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 8e95aa47457a..b31aa73e8076 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -8,6 +8,9 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+#include <asm/pgtable.h>
+#endif
#define PROC_INFO \
. = ALIGN(4); \
@@ -90,6 +93,11 @@ SECTIONS
_text = .;
HEAD_TEXT
}
+
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
+
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -112,6 +120,9 @@ SECTIONS
ARM_CPU_KEEP(PROC_INFO)
}
+#ifdef CONFIG_DEBUG_RODATA
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
@@ -145,7 +156,11 @@ SECTIONS
_etext = .; /* End of text and rodata section */
#ifndef CONFIG_XIP_KERNEL
+# ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+# else
. = ALIGN(PAGE_SIZE);
+# endif
__init_begin = .;
#endif
/*
@@ -219,7 +234,11 @@ SECTIONS
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;
#else
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+#else
. = ALIGN(THREAD_SIZE);
+#endif
__init_end = .;
__data_loc = .;
#endif
diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c
index e42adc6bcdb1..bdbb8853a19b 100644
--- a/arch/arm/kernel/xscale-cp0.c
+++ b/arch/arm/kernel/xscale-cp0.c
@@ -157,15 +157,14 @@ static int __init xscale_cp0_init(void)
if (cpu_has_iwmmxt()) {
#ifndef CONFIG_IWMMXT
- printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor "
- "detected, but kernel support is missing.\n");
+ pr_warn("CAUTION: XScale iWMMXt coprocessor detected, but kernel support is missing.\n");
#else
- printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n");
+ pr_info("XScale iWMMXt coprocessor detected.\n");
elf_hwcap |= HWCAP_IWMMXT;
thread_register_notifier(&iwmmxt_notifier_block);
#endif
} else {
- printk(KERN_INFO "XScale DSP coprocessor detected.\n");
+ pr_info("XScale DSP coprocessor detected.\n");
thread_register_notifier(&dsp_notifier_block);
cp_access |= 1;
}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9e193c8a959e..2d6d91001062 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -213,6 +213,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
int err;
struct kvm_vcpu *vcpu;
+ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
+ err = -EBUSY;
+ goto out;
+ }
+
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
@@ -263,6 +268,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1;
+ bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
@@ -419,6 +425,7 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
int ret;
if (likely(vcpu->arch.has_run_once))
@@ -427,15 +434,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true;
/*
- * Initialize the VGIC before running a vcpu the first time on
- * this VM.
+ * Map the VGIC hardware resources before running a vcpu the first
+ * time on this VM.
*/
- if (unlikely(!vgic_initialized(vcpu->kvm))) {
- ret = kvm_vgic_init(vcpu->kvm);
+ if (unlikely(!vgic_ready(kvm))) {
+ ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
}
+ /*
+ * Enable the arch timers only if we have an in-kernel VGIC
+ * and it has been properly initialized, since we cannot handle
+ * interrupts from the virtual timer with a userspace gic.
+ */
+ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
+ kvm_timer_enable(kvm);
+
return 0;
}
@@ -649,6 +664,48 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return -EINVAL;
}
+static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned int i;
+ int phys_target = kvm_target_cpu();
+
+ if (init->target != phys_target)
+ return -EINVAL;
+
+ /*
+ * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+ * use the same target.
+ */
+ if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
+ return -EINVAL;
+
+ /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+ for (i = 0; i < sizeof(init->features) * 8; i++) {
+ bool set = (init->features[i / 32] & (1 << (i % 32)));
+
+ if (set && i >= KVM_VCPU_MAX_FEATURES)
+ return -ENOENT;
+
+ /*
+ * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+ * use the same feature set.
+ */
+ if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
+ test_bit(i, vcpu->arch.features) != set)
+ return -EINVAL;
+
+ if (set)
+ set_bit(i, vcpu->arch.features);
+ }
+
+ vcpu->arch.target = phys_target;
+
+ /* Now we know what it is, we can reset it. */
+ return kvm_reset_vcpu(vcpu);
+}
+
+
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
struct kvm_vcpu_init *init)
{
@@ -659,10 +716,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
return ret;
/*
+ * Ensure a rebooted VM will fault in RAM pages and detect if the
+ * guest MMU is turned off and flush the caches as needed.
+ */
+ if (vcpu->arch.has_run_once)
+ stage2_unmap_vm(vcpu->kvm);
+
+ vcpu_reset_hcr(vcpu);
+
+ /*
* Handle the "start in power-off" case by marking the VCPU as paused.
*/
- if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+ if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
vcpu->arch.pause = true;
+ else
+ vcpu->arch.pause = false;
return 0;
}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index cc0b78769bd8..384bab67c462 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- vcpu->arch.hcr = HCR_GUEST_MASK;
return 0;
}
@@ -274,31 +273,6 @@ int __attribute_const__ kvm_target_cpu(void)
}
}
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
-{
- unsigned int i;
-
- /* We can only cope with guest==host and only on A15/A7 (for now). */
- if (init->target != kvm_target_cpu())
- return -EINVAL;
-
- vcpu->arch.target = init->target;
- bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
- /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
- for (i = 0; i < sizeof(init->features) * 8; i++) {
- if (test_bit(i, (void *)init->features)) {
- if (i >= KVM_VCPU_MAX_FEATURES)
- return -ENOENT;
- set_bit(i, vcpu->arch.features);
- }
- }
-
- /* Now we know what it is, we can reset it. */
- return kvm_reset_vcpu(vcpu);
-}
-
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
int target = kvm_target_cpu();
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 4cb5a93182e9..5d3bfc0eb3f0 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -187,15 +187,18 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
rt = vcpu->arch.mmio_decode.rt;
- data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
- trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
- KVM_TRACE_MMIO_READ_UNSATISFIED,
- mmio.len, fault_ipa,
- (mmio.is_write) ? data : 0);
+ if (mmio.is_write) {
+ data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt),
+ mmio.len);
- if (mmio.is_write)
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len,
+ fault_ipa, data);
mmio_write_buf(mmio.data, mmio.len, data);
+ } else {
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len,
+ fault_ipa, 0);
+ }
if (vgic_handle_mmio(vcpu, run, &mmio))
return 1;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 8664ff17cbbe..1dc9778a00af 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -612,6 +612,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
unmap_range(kvm, kvm->arch.pgd, start, size);
}
+static void stage2_unmap_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ hva_t hva = memslot->userspace_addr;
+ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+ phys_addr_t size = PAGE_SIZE * memslot->npages;
+ hva_t reg_end = hva + size;
+
+ /*
+ * A memory region could potentially cover multiple VMAs, and any holes
+ * between them, so iterate over all of them to find out if we should
+ * unmap any of them.
+ *
+ * +--------------------------------------------+
+ * +---------------+----------------+ +----------------+
+ * | : VMA 1 | VMA 2 | | VMA 3 : |
+ * +---------------+----------------+ +----------------+
+ * | memory region |
+ * +--------------------------------------------+
+ */
+ do {
+ struct vm_area_struct *vma = find_vma(current->mm, hva);
+ hva_t vm_start, vm_end;
+
+ if (!vma || vma->vm_start >= reg_end)
+ break;
+
+ /*
+ * Take the intersection of this VMA with the memory region
+ */
+ vm_start = max(hva, vma->vm_start);
+ vm_end = min(reg_end, vma->vm_end);
+
+ if (!(vma->vm_flags & VM_PFNMAP)) {
+ gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
+ unmap_stage2_range(kvm, gpa, vm_end - vm_start);
+ }
+ hva = vm_end;
+ } while (hva < reg_end);
+}
+
+/**
+ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
+ * @kvm: The struct kvm pointer
+ *
+ * Go through the memregions and unmap any reguler RAM
+ * backing memory already mapped to the VM.
+ */
+void stage2_unmap_vm(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(memslot, slots)
+ stage2_unmap_memslot(kvm, memslot);
+
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+}
+
/**
* kvm_free_stage2_pgd - free all stage-2 tables
* @kvm: The KVM struct pointer for the VM.
@@ -853,6 +918,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct vm_area_struct *vma;
pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
+ bool fault_ipa_uncached;
write_fault = kvm_is_write_fault(vcpu);
if (fault_status == FSC_PERM && !write_fault) {
@@ -919,6 +985,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!hugetlb && !force_pte)
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+ fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
+
if (hugetlb) {
pmd_t new_pmd = pfn_pmd(pfn, mem_type);
new_pmd = pmd_mkhuge(new_pmd);
@@ -926,7 +994,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pmd_writable(&new_pmd);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
+ coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
+ fault_ipa_uncached);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -934,7 +1003,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+ coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
+ fault_ipa_uncached);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
}
@@ -1294,11 +1364,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
hva = vm_end;
} while (hva < reg_end);
- if (ret) {
- spin_lock(&kvm->mmu_lock);
+ spin_lock(&kvm->mmu_lock);
+ if (ret)
unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
- spin_unlock(&kvm->mmu_lock);
- }
+ else
+ stage2_flush_memslot(kvm, memslot);
+ spin_unlock(&kvm->mmu_lock);
return ret;
}
@@ -1310,6 +1381,15 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
+ /*
+ * Readonly memslots are not incoherent with the caches by definition,
+ * but in practice, they are used mostly to emulate ROMs or NOR flashes
+ * that the guest may consider devices and hence map as uncached.
+ * To prevent incoherency issues in these cases, tag all readonly
+ * regions as incoherent.
+ */
+ if (slot->flags & KVM_MEM_READONLY)
+ slot->flags |= KVM_MEMSLOT_INCOHERENT;
return 0;
}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 09cf37737ee2..58cb3248d277 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/preempt.h>
#include <linux/kvm_host.h>
#include <linux/wait.h>
@@ -166,6 +167,23 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
{
+ int i;
+ struct kvm_vcpu *tmp;
+
+ /*
+ * The KVM ABI specifies that a system event exit may call KVM_RUN
+ * again and may perform shutdown/reboot at a later time that when the
+ * actual request is made. Since we are implementing PSCI and a
+ * caller of PSCI reboot and shutdown expects that the system shuts
+ * down or reboots immediately, let's make sure that VCPUs are not run
+ * after this call is handled and before the VCPUs have been
+ * re-initialized.
+ */
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ tmp->arch.pause = true;
+ kvm_vcpu_kick(tmp);
+ }
+
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type;
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 66a477a3e3cc..7a235b9952be 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
/*
* Prototype:
@@ -77,6 +78,10 @@
stmdb sp!, {r0, r2, r3, \reg1, \reg2}
.endm
+ .macro usave reg1 reg2
+ UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
+ .endm
+
.macro exit reg1 reg2
add sp, sp, #8
ldmfd sp!, {r0, \reg1, \reg2}
diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S
index 3bc8eb811a73..652e4d98cd47 100644
--- a/arch/arm/lib/copy_template.S
+++ b/arch/arm/lib/copy_template.S
@@ -53,6 +53,12 @@
* data as needed by the implementation including this code. Called
* upon code entry.
*
+ * usave reg1 reg2
+ *
+ * Unwind annotation macro is corresponding for 'enter' macro.
+ * It tell unwinder that preserved some provided registers on the stack
+ * and additional data by a prior 'enter' macro.
+ *
* exit reg1 reg2
*
* Restore registers with the values previously saved with the
@@ -67,7 +73,12 @@
*/
+ UNWIND( .fnstart )
enter r4, lr
+ UNWIND( .fnend )
+
+ UNWIND( .fnstart )
+ usave r4, lr @ in first stmdb block
subs r2, r2, #4
blt 8f
@@ -79,6 +90,11 @@
1: subs r2, r2, #(28)
stmfd sp!, {r5 - r8}
+ UNWIND( .fnend )
+
+ UNWIND( .fnstart )
+ usave r4, lr
+ UNWIND( .save {r5 - r8} ) @ in second stmfd block
blt 5f
CALGN( ands ip, r0, #31 )
@@ -144,7 +160,10 @@
CALGN( bcs 2b )
7: ldmfd sp!, {r5 - r8}
+ UNWIND( .fnend ) @ end of second stmfd block
+ UNWIND( .fnstart )
+ usave r4, lr @ still in first stmdb block
8: movs r2, r2, lsl #31
ldr1b r1, r3, ne, abort=21f
ldr1b r1, r4, cs, abort=21f
@@ -173,10 +192,13 @@
ldr1w r1, lr, abort=21f
beq 17f
bgt 18f
+ UNWIND( .fnend )
.macro forward_copy_shift pull push
+ UNWIND( .fnstart )
+ usave r4, lr @ still in first stmdb block
subs r2, r2, #28
blt 14f
@@ -187,7 +209,11 @@
CALGN( bcc 15f )
11: stmfd sp!, {r5 - r9}
+ UNWIND( .fnend )
+ UNWIND( .fnstart )
+ usave r4, lr
+ UNWIND( .save {r5 - r9} ) @ in new second stmfd block
PLD( pld [r1, #0] )
PLD( subs r2, r2, #96 )
PLD( pld [r1, #28] )
@@ -221,7 +247,10 @@
PLD( bge 13b )
ldmfd sp!, {r5 - r9}
+ UNWIND( .fnend ) @ end of the second stmfd block
+ UNWIND( .fnstart )
+ usave r4, lr @ still in first stmdb block
14: ands ip, r2, #28
beq 16f
@@ -236,6 +265,7 @@
16: sub r1, r1, #(\push / 8)
b 8b
+ UNWIND( .fnend )
.endm
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index d066df686e17..a9d3db16ecb5 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
/*
* Prototype:
@@ -80,6 +81,10 @@
stmdb sp!, {r0, r2, r3, \reg1, \reg2}
.endm
+ .macro usave reg1 reg2
+ UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
+ .endm
+
.macro exit reg1 reg2
add sp, sp, #8
ldmfd sp!, {r0, \reg1, \reg2}
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index a9b9e2287a09..7797e81e40e0 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
@@ -48,6 +49,10 @@
stmdb sp!, {r0, \reg1, \reg2}
.endm
+ .macro usave reg1 reg2
+ UNWIND( .save {r0, \reg1, \reg2} )
+ .endm
+
.macro exit reg1 reg2
ldmfd sp!, {r0, \reg1, \reg2}
.endm
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index d1fc0c0c342c..69a9d47fc5ab 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
.text
@@ -27,12 +28,17 @@
*/
ENTRY(memmove)
+ UNWIND( .fnstart )
subs ip, r0, r1
cmphi r2, ip
bls memcpy
stmfd sp!, {r0, r4, lr}
+ UNWIND( .fnend )
+
+ UNWIND( .fnstart )
+ UNWIND( .save {r0, r4, lr} ) @ in first stmfd block
add r1, r1, r2
add r0, r0, r2
subs r2, r2, #4
@@ -45,6 +51,11 @@ ENTRY(memmove)
1: subs r2, r2, #(28)
stmfd sp!, {r5 - r8}
+ UNWIND( .fnend )
+
+ UNWIND( .fnstart )
+ UNWIND( .save {r0, r4, lr} )
+ UNWIND( .save {r5 - r8} ) @ in second stmfd block
blt 5f
CALGN( ands ip, r0, #31 )
@@ -97,6 +108,10 @@ ENTRY(memmove)
CALGN( bcs 2b )
7: ldmfd sp!, {r5 - r8}
+ UNWIND( .fnend ) @ end of second stmfd block
+
+ UNWIND( .fnstart )
+ UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
8: movs r2, r2, lsl #31
ldrneb r3, [r1, #-1]!
@@ -124,10 +139,13 @@ ENTRY(memmove)
ldr r3, [r1, #0]
beq 17f
blt 18f
+ UNWIND( .fnend )
.macro backward_copy_shift push pull
+ UNWIND( .fnstart )
+ UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
subs r2, r2, #28
blt 14f
@@ -137,6 +155,11 @@ ENTRY(memmove)
CALGN( bcc 15f )
11: stmfd sp!, {r5 - r9}
+ UNWIND( .fnend )
+
+ UNWIND( .fnstart )
+ UNWIND( .save {r0, r4, lr} )
+ UNWIND( .save {r5 - r9} ) @ in new second stmfd block
PLD( pld [r1, #-4] )
PLD( subs r2, r2, #96 )
@@ -171,6 +194,10 @@ ENTRY(memmove)
PLD( bge 13b )
ldmfd sp!, {r5 - r9}
+ UNWIND( .fnend ) @ end of the second stmfd block
+
+ UNWIND( .fnstart )
+ UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
14: ands ip, r2, #28
beq 16f
@@ -186,6 +213,7 @@ ENTRY(memmove)
16: add r1, r1, #(\pull / 8)
b 8b
+ UNWIND( .fnend )
.endm
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 671455c854fa..a4ee97b5a2bf 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -11,11 +11,13 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
.text
.align 5
ENTRY(memset)
+UNWIND( .fnstart )
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
bne 6f @ 1
@@ -34,6 +36,9 @@ ENTRY(memset)
* We need 2 extra registers for this loop - use r8 and the LR
*/
stmfd sp!, {r8, lr}
+UNWIND( .fnend )
+UNWIND( .fnstart )
+UNWIND( .save {r8, lr} )
mov r8, r1
mov lr, r1
@@ -53,6 +58,7 @@ ENTRY(memset)
tst r2, #16
stmneia ip!, {r1, r3, r8, lr}
ldmfd sp!, {r8, lr}
+UNWIND( .fnend )
#else
@@ -62,6 +68,9 @@ ENTRY(memset)
*/
stmfd sp!, {r4-r8, lr}
+UNWIND( .fnend )
+UNWIND( .fnstart )
+UNWIND( .save {r4-r8, lr} )
mov r4, r1
mov r5, r1
mov r6, r1
@@ -94,9 +103,11 @@ ENTRY(memset)
tst r2, #16
stmneia ip!, {r4-r7}
ldmfd sp!, {r4-r8, lr}
+UNWIND( .fnend )
#endif
+UNWIND( .fnstart )
4: tst r2, #8
stmneia ip!, {r1, r3}
tst r2, #4
@@ -120,4 +131,5 @@ ENTRY(memset)
strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
+UNWIND( .fnend )
ENDPROC(memset)
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 385ccb306fa2..0eded952e089 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
.text
.align 5
@@ -18,6 +19,7 @@
* mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we
* don't bother; we use byte stores instead.
*/
+UNWIND( .fnstart )
1: subs r1, r1, #4 @ 1 do we have enough
blt 5f @ 1 bytes to align with?
cmp r3, #2 @ 1
@@ -47,6 +49,9 @@ ENTRY(__memzero)
* use the LR
*/
str lr, [sp, #-4]! @ 1
+UNWIND( .fnend )
+UNWIND( .fnstart )
+UNWIND( .save {lr} )
mov ip, r2 @ 1
mov lr, r2 @ 1
@@ -66,6 +71,7 @@ ENTRY(__memzero)
tst r1, #16 @ 1 16 bytes or more?
stmneia r0!, {r2, r3, ip, lr} @ 4
ldr lr, [sp], #4 @ 1
+UNWIND( .fnend )
#else
@@ -75,6 +81,9 @@ ENTRY(__memzero)
*/
stmfd sp!, {r4-r7, lr}
+UNWIND( .fnend )
+UNWIND( .fnstart )
+UNWIND( .save {r4-r7, lr} )
mov r4, r2
mov r5, r2
mov r6, r2
@@ -105,9 +114,11 @@ ENTRY(__memzero)
tst r1, #16
stmneia r0!, {r4-r7}
ldmfd sp!, {r4-r7, lr}
+UNWIND( .fnend )
#endif
+UNWIND( .fnstart )
4: tst r1, #8 @ 1 8 bytes or more?
stmneia r0!, {r2, r3} @ 2
tst r1, #4 @ 1 4 bytes or more?
@@ -122,4 +133,5 @@ ENTRY(__memzero)
tst r1, #1 @ 1 a byte left over
strneb r2, [r0], #1 @ 1
ret lr @ 1
+UNWIND( .fnend )
ENDPROC(__memzero)
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 06d63d5651f3..b46b4d25f93e 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -294,7 +294,7 @@ static struct vpbe_output dm355evm_vpbe_outputs[] = {
.default_mode = "ntsc",
.num_modes = ARRAY_SIZE(dm355evm_enc_preset_timing),
.modes = dm355evm_enc_preset_timing,
- .if_params = V4L2_MBUS_FMT_FIXED,
+ .if_params = MEDIA_BUS_FMT_FIXED,
},
};
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index e08a8684ead2..a756003595e9 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -485,7 +485,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = {
.default_mode = "ntsc",
.num_modes = ARRAY_SIZE(dm365evm_enc_std_timing),
.modes = dm365evm_enc_std_timing,
- .if_params = V4L2_MBUS_FMT_FIXED,
+ .if_params = MEDIA_BUS_FMT_FIXED,
},
{
.output = {
@@ -498,7 +498,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = {
.default_mode = "480p59_94",
.num_modes = ARRAY_SIZE(dm365evm_enc_preset_timing),
.modes = dm365evm_enc_preset_timing,
- .if_params = V4L2_MBUS_FMT_FIXED,
+ .if_params = MEDIA_BUS_FMT_FIXED,
},
};
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index b4675fc28f83..e365c1bb1265 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -91,7 +91,6 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
static struct platform_driver davinci_cpuidle_driver = {
.driver = {
.name = "cpuidle-davinci",
- .owner = THIS_MODULE,
},
};
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 2f3ed3a58d57..9cbeda798584 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -785,14 +785,13 @@ static struct resource dm355_v4l2_disp_resources[] = {
},
};
-static int dm355_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type,
- int field)
+static int dm355_vpbe_setup_pinmux(u32 if_type, int field)
{
switch (if_type) {
- case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
davinci_cfg_reg(DM355_VOUT_FIELD_G70);
break;
- case V4L2_MBUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
if (field)
davinci_cfg_reg(DM355_VOUT_FIELD);
else
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 0ae8114f5cc9..e3a3c54b6832 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1306,16 +1306,15 @@ static struct resource dm365_v4l2_disp_resources[] = {
},
};
-static int dm365_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type,
- int field)
+static int dm365_vpbe_setup_pinmux(u32 if_type, int field)
{
switch (if_type) {
- case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
davinci_cfg_reg(DM365_VOUT_FIELD_G81);
davinci_cfg_reg(DM365_VOUT_COUTL_EN);
davinci_cfg_reg(DM365_VOUT_COUTH_EN);
break;
- case V4L2_MBUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
if (field)
davinci_cfg_reg(DM365_VOUT_FIELD);
else
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index a508fe587af7..07e23ba61f3a 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -148,7 +148,6 @@ static int __exit davinci_pm_remove(struct platform_device *pdev)
static struct platform_driver davinci_pm_driver = {
.driver = {
.name = "pm-davinci",
- .owner = THIS_MODULE,
},
.remove = __exit_p(davinci_pm_remove),
};
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index bb9c56e44a7b..603820e5aba7 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -34,7 +34,7 @@ config ARCH_EXYNOS3
bool "SAMSUNG EXYNOS3"
select ARM_CPU_SUSPEND if PM
help
- Samsung EXYNOS3 (Crotex-A7) SoC based systems
+ Samsung EXYNOS3 (Cortex-A7) SoC based systems
config ARCH_EXYNOS4
bool "SAMSUNG EXYNOS4"
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index 3729d90cfa46..a377f95033ae 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -76,7 +76,6 @@ static struct of_device_id imx_mmdc_dt_ids[] = {
static struct platform_driver imx_mmdc_driver = {
.driver = {
.name = "imx-mmdc",
- .owner = THIS_MODULE,
.of_match_table = imx_mmdc_dt_ids,
},
.probe = imx_mmdc_probe,
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index d81b2475e67e..22762a1f9f72 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -158,6 +158,7 @@ struct pxa168_eth_platform_data gplugd_eth_platform_data = {
.port_number = 0,
.phy_addr = 0,
.speed = 0, /* Autonagotiation */
+ .intf = PHY_INTERFACE_MODE_RMII,
.init = gplugd_eth_init,
};
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
index 9a80449518e6..f5b69d736ee5 100644
--- a/arch/arm/mach-msm/clock-pcom.c
+++ b/arch/arm/mach-msm/clock-pcom.c
@@ -169,7 +169,6 @@ static struct platform_driver msm_clock_pcom_driver = {
.probe = msm_clock_pcom_probe,
.driver = {
.name = "msm-clock-pcom",
- .owner = THIS_MODULE,
},
};
module_platform_driver(msm_clock_pcom_driver);
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index b1588a1ea2f8..7550f5a08956 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -1019,7 +1019,6 @@ static struct platform_driver msm_smd_driver = {
.probe = msm_smd_probe,
.driver = {
.name = MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 6e249324fdd7..f0edec199cd4 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -278,14 +278,6 @@ config MACH_SBC3530
default y
select OMAP_PACKAGE_CUS
-config OMAP3_EMU
- bool "OMAP3 debugging peripherals"
- depends on ARCH_OMAP3
- select ARM_AMBA
- select OC_ETM
- help
- Say Y here to enable debugging hardware of omap3
-
config OMAP3_SDRC_AC_TIMING
bool "Enable SDRC AC timing register changes"
depends on ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 08cc94474d17..5d27dfdef66b 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -228,7 +228,6 @@ obj-$(CONFIG_SOC_OMAP5) += omap_hwmod_54xx_data.o
obj-$(CONFIG_SOC_DRA7XX) += omap_hwmod_7xx_data.o
# EMU peripherals
-obj-$(CONFIG_OMAP3_EMU) += emu.o
obj-$(CONFIG_HW_PERF_EVENTS) += pmu.o
iommu-$(CONFIG_OMAP_IOMMU) := omap-iommu.o
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 03502abe4f2e..14edcd7a2a1d 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -23,6 +23,7 @@
#include <linux/regulator/machine.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
#include <linux/omap-gpmc.h>
#include <linux/mmc/host.h>
#include <linux/power/isp1704_charger.h>
@@ -38,7 +39,6 @@
#include <sound/tlv320aic3x.h>
#include <sound/tpa6130a2-plat.h>
-#include <media/radio-si4713.h>
#include <media/si4713.h>
#include <linux/platform_data/leds-lp55xx.h>
@@ -756,46 +756,17 @@ static struct regulator_init_data rx51_vintdig = {
},
};
-static const char * const si4713_supply_names[] = {
- "vio",
- "vdd",
-};
-
-static struct si4713_platform_data rx51_si4713_i2c_data __initdata_or_module = {
- .supplies = ARRAY_SIZE(si4713_supply_names),
- .supply_names = si4713_supply_names,
- .gpio_reset = RX51_FMTX_RESET_GPIO,
-};
-
-static struct i2c_board_info rx51_si4713_board_info __initdata_or_module = {
- I2C_BOARD_INFO("si4713", SI4713_I2C_ADDR_BUSEN_HIGH),
- .platform_data = &rx51_si4713_i2c_data,
-};
-
-static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module = {
- .i2c_bus = 2,
- .subdev_board_info = &rx51_si4713_board_info,
-};
-
-static struct platform_device rx51_si4713_dev __initdata_or_module = {
- .name = "radio-si4713",
- .id = -1,
- .dev = {
- .platform_data = &rx51_si4713_data,
+static struct gpiod_lookup_table rx51_fmtx_gpios_table = {
+ .dev_id = "2-0063",
+ .table = {
+ GPIO_LOOKUP("gpio.6", 3, "reset", GPIO_ACTIVE_HIGH), /* 163 */
+ { },
},
};
-static __init void rx51_init_si4713(void)
+static __init void rx51_gpio_init(void)
{
- int err;
-
- err = gpio_request_one(RX51_FMTX_IRQ, GPIOF_DIR_IN, "si4713 irq");
- if (err) {
- printk(KERN_ERR "Cannot request si4713 irq gpio. %d\n", err);
- return;
- }
- rx51_si4713_board_info.irq = gpio_to_irq(RX51_FMTX_IRQ);
- platform_device_register(&rx51_si4713_dev);
+ gpiod_add_lookup_table(&rx51_fmtx_gpios_table);
}
static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
@@ -1025,7 +996,19 @@ static struct aic3x_pdata rx51_aic3x_data2 = {
.gpio_reset = 60,
};
+#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713)
+static struct si4713_platform_data rx51_si4713_platform_data = {
+ .is_platform_device = true
+};
+#endif
+
static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713)
+ {
+ I2C_BOARD_INFO("si4713", 0x63),
+ .platform_data = &rx51_si4713_platform_data,
+ },
+#endif
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
.platform_data = &rx51_aic3x_data,
@@ -1066,6 +1049,10 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
static int __init rx51_i2c_init(void)
{
+#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713)
+ int err;
+#endif
+
if ((system_rev >= SYSTEM_REV_S_USES_VAUX3 && system_rev < 0x100) ||
system_rev >= SYSTEM_REV_B_USES_VAUX3) {
rx51_twldata.vaux3 = &rx51_vaux3_mmc;
@@ -1083,6 +1070,14 @@ static int __init rx51_i2c_init(void)
rx51_twldata.vdac->constraints.name = "VDAC";
omap_pmic_init(1, 2200, "twl5030", 7 + OMAP_INTC_START, &rx51_twldata);
+#if IS_ENABLED(CONFIG_I2C_SI4713) && IS_ENABLED(CONFIG_PLATFORM_SI4713)
+ err = gpio_request_one(RX51_FMTX_IRQ, GPIOF_DIR_IN, "si4713 irq");
+ if (err) {
+ printk(KERN_ERR "Cannot request si4713 irq gpio. %d\n", err);
+ return err;
+ }
+ rx51_peripherals_i2c_board_info_2[0].irq = gpio_to_irq(RX51_FMTX_IRQ);
+#endif
omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
@@ -1269,13 +1264,13 @@ static void __init rx51_init_omap3_rom_rng(void)
void __init rx51_peripherals_init(void)
{
+ rx51_gpio_init();
rx51_i2c_init();
regulator_has_full_constraints();
gpmc_onenand_init(board_onenand_data);
rx51_add_gpio_keys();
rx51_init_wl1251();
rx51_init_tsc2005();
- rx51_init_si4713();
rx51_init_lirc();
spi_register_board_info(rx51_peripherals_spi_board_info,
ARRAY_SIZE(rx51_peripherals_spi_board_info));
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 641337c6cde9..a4282e79143e 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -270,8 +270,6 @@ extern const struct clksel_rate div31_1to31_rates[];
extern void __iomem *clk_memmaps[];
-extern int am33xx_clk_init(void);
-
extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index a7bc4ce81e19..1afb50d6d636 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -67,28 +67,6 @@ static int __init omap3_l3_init(void)
}
omap_postcore_initcall(omap3_l3_init);
-#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
-
-static struct resource omap2cam_resources[] = {
- {
- .start = OMAP24XX_CAMERA_BASE,
- .end = OMAP24XX_CAMERA_BASE + 0xfff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 24 + OMAP_INTC_START,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct platform_device omap2cam_device = {
- .name = "omap24xxcam",
- .id = -1,
- .num_resources = ARRAY_SIZE(omap2cam_resources),
- .resource = omap2cam_resources,
-};
-#endif
-
#if defined(CONFIG_IOMMU_API)
#include <linux/platform_data/iommu-omap.h>
@@ -211,14 +189,6 @@ int omap3_init_camera(struct isp_platform_data *pdata)
#endif
-static inline void omap_init_camera(void)
-{
-#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
- if (cpu_is_omap24xx())
- platform_device_register(&omap2cam_device);
-#endif
-}
-
#if defined(CONFIG_OMAP2PLUS_MBOX) || defined(CONFIG_OMAP2PLUS_MBOX_MODULE)
static inline void __init omap_init_mbox(void)
{
@@ -397,7 +367,6 @@ static int __init omap2_init_devices(void)
* in alphabetical order so they're easier to sort through.
*/
omap_init_audio();
- omap_init_camera();
/* If dtb is there, the devices will be created dynamically */
if (!of_have_populated_dt()) {
omap_init_mbox();
diff --git a/arch/arm/mach-omap2/emu.c b/arch/arm/mach-omap2/emu.c
deleted file mode 100644
index cbeaca2d7695..000000000000
--- a/arch/arm/mach-omap2/emu.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * emu.c
- *
- * ETM and ETB CoreSight components' resources as found in OMAP3xxx.
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include "soc.h"
-#include "iomap.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Alexander Shishkin");
-
-/* Cortex CoreSight components within omap3xxx EMU */
-#define ETM_BASE (L4_EMU_34XX_PHYS + 0x10000)
-#define DBG_BASE (L4_EMU_34XX_PHYS + 0x11000)
-#define ETB_BASE (L4_EMU_34XX_PHYS + 0x1b000)
-#define DAPCTL (L4_EMU_34XX_PHYS + 0x1d000)
-
-static AMBA_APB_DEVICE(omap3_etb, "etb", 0x000bb907, ETB_BASE, { }, NULL);
-static AMBA_APB_DEVICE(omap3_etm, "etm", 0x102bb921, ETM_BASE, { }, NULL);
-
-static int __init emu_init(void)
-{
- if (!cpu_is_omap34xx())
- return -ENODEV;
-
- amba_device_register(&omap3_etb_device, &iomem_resource);
- amba_device_register(&omap3_etm_device, &iomem_resource);
-
- return 0;
-}
-
-omap_subsys_initcall(emu_init);
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 53841dea80ea..c25feba05818 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -471,11 +471,15 @@ void __init omap3xxx_check_revision(void)
cpu_rev = "1.0";
break;
case 1:
- /* FALLTHROUGH */
- default:
omap_revision = AM437X_REV_ES1_1;
cpu_rev = "1.1";
break;
+ case 2:
+ /* FALLTHROUGH */
+ default:
+ omap_revision = AM437X_REV_ES1_2;
+ cpu_rev = "1.2";
+ break;
}
break;
case 0xb8f2:
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 4376f59626d1..c1a3b4416311 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -446,6 +446,7 @@ IS_OMAP_TYPE(3430, 0x3430)
#define AM437X_CLASS 0x43700000
#define AM437X_REV_ES1_0 (AM437X_CLASS | (0x10 << 8))
#define AM437X_REV_ES1_1 (AM437X_CLASS | (0x11 << 8))
+#define AM437X_REV_ES1_2 (AM437X_CLASS | (0x12 << 8))
#define OMAP443X_CLASS 0x44300044
#define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8))
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index b0d54dae1bcb..4457e731f7a4 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -91,18 +91,8 @@ void __init omap_pmic_late_init(void)
}
#if defined(CONFIG_ARCH_OMAP3)
-struct phy_consumer consumers[] = {
- PHY_CONSUMER("musb-hdrc.0", "usb"),
-};
-
-struct phy_init_data init_data = {
- .consumers = consumers,
- .num_consumers = ARRAY_SIZE(consumers),
-};
-
static struct twl4030_usb_data omap3_usb_pdata = {
- .usb_mode = T2_USB_MODE_ULPI,
- .init_data = &init_data,
+ .usb_mode = T2_USB_MODE_ULPI,
};
static int omap3_batt_table[] = {
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index 96e9bc102117..d99d08eeb966 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -135,7 +135,6 @@ static struct platform_driver sirfsoc_memc_driver = {
.probe = sirfsoc_memc_probe,
.driver = {
.name = "sirfsoc-memc",
- .owner = THIS_MODULE,
.of_match_table = memc_ids,
},
};
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
index 3dffcb2d714e..e1f1f86f6a95 100644
--- a/arch/arm/mach-prima2/rstc.c
+++ b/arch/arm/mach-prima2/rstc.c
@@ -114,7 +114,6 @@ static struct platform_driver sirfsoc_rstc_driver = {
.probe = sirfsoc_rstc_probe,
.driver = {
.name = "sirfsoc_rstc",
- .owner = THIS_MODULE,
.of_match_table = rstc_ids,
},
};
diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c
index a17c88b74fa1..70a0b475062b 100644
--- a/arch/arm/mach-prima2/rtciobrg.c
+++ b/arch/arm/mach-prima2/rtciobrg.c
@@ -123,7 +123,6 @@ static struct platform_driver sirfsoc_rtciobrg_driver = {
.probe = sirfsoc_rtciobrg_probe,
.driver = {
.name = "sirfsoc-rtciobrg",
- .owner = THIS_MODULE,
.of_match_table = rtciobrg_ids,
},
};
diff --git a/arch/arm/mach-pxa/pxa3xx-ulpi.c b/arch/arm/mach-pxa/pxa3xx-ulpi.c
index 614003e8b081..1c85275cb768 100644
--- a/arch/arm/mach-pxa/pxa3xx-ulpi.c
+++ b/arch/arm/mach-pxa/pxa3xx-ulpi.c
@@ -379,7 +379,6 @@ static int pxa3xx_u2d_remove(struct platform_device *pdev)
static struct platform_driver pxa3xx_u2d_ulpi_driver = {
.driver = {
.name = "pxa3xx-u2d",
- .owner = THIS_MODULE,
},
.probe = pxa3xx_u2d_probe,
.remove = pxa3xx_u2d_remove,
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 840c3a48e720..962a7f31f596 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -924,6 +924,14 @@ static inline void spitz_i2c_init(void) {}
#endif
/******************************************************************************
+ * Audio devices
+ ******************************************************************************/
+static inline void spitz_audio_init(void)
+{
+ platform_device_register_simple("spitz-audio", -1, NULL, 0);
+}
+
+/******************************************************************************
* Machine init
******************************************************************************/
static void spitz_poweroff(void)
@@ -970,6 +978,7 @@ static void __init spitz_init(void)
spitz_nor_init();
spitz_nand_init();
spitz_i2c_init();
+ spitz_audio_init();
}
static void __init spitz_fixup(struct tag *tags, char **cmdline)
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
index fc3646c2c694..685deff861d2 100644
--- a/arch/arm/mach-pxa/tosa-bt.c
+++ b/arch/arm/mach-pxa/tosa-bt.c
@@ -129,7 +129,6 @@ static struct platform_driver tosa_bt_driver = {
.driver = {
.name = "tosa-bt",
- .owner = THIS_MODULE,
},
};
diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
index 33afb9190091..ce2db235dbaf 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
@@ -171,7 +171,6 @@ static struct platform_driver osiris_dvs_driver = {
.remove = osiris_dvs_remove,
.driver = {
.name = "osiris-dvs",
- .owner = THIS_MODULE,
.pm = &osiris_dvs_pm,
},
};
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c
index 9fa6a990cf03..03c75a811cb0 100644
--- a/arch/arm/mach-sa1100/clock.c
+++ b/arch/arm/mach-sa1100/clock.c
@@ -15,10 +15,12 @@
#include <linux/clkdev.h>
#include <mach/hardware.h>
+#include <mach/generic.h>
struct clkops {
void (*enable)(struct clk *);
void (*disable)(struct clk *);
+ unsigned long (*get_rate)(struct clk *);
};
struct clk {
@@ -33,13 +35,6 @@ struct clk clk_##_name = { \
static DEFINE_SPINLOCK(clocks_lock);
-/* Dummy clk routine to build generic kernel parts that may be using them */
-unsigned long clk_get_rate(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
static void clk_gpio27_enable(struct clk *clk)
{
/*
@@ -58,6 +53,19 @@ static void clk_gpio27_disable(struct clk *clk)
GAFR &= ~GPIO_32_768kHz;
}
+static void clk_cpu_enable(struct clk *clk)
+{
+}
+
+static void clk_cpu_disable(struct clk *clk)
+{
+}
+
+static unsigned long clk_cpu_get_rate(struct clk *clk)
+{
+ return sa11x0_getspeed(0) * 1000;
+}
+
int clk_enable(struct clk *clk)
{
unsigned long flags;
@@ -87,16 +95,37 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (clk && clk->ops && clk->ops->get_rate)
+ return clk->ops->get_rate(clk);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
const struct clkops clk_gpio27_ops = {
.enable = clk_gpio27_enable,
.disable = clk_gpio27_disable,
};
+const struct clkops clk_cpu_ops = {
+ .enable = clk_cpu_enable,
+ .disable = clk_cpu_disable,
+ .get_rate = clk_cpu_get_rate,
+};
+
static DEFINE_CLK(gpio27, &clk_gpio27_ops);
+static DEFINE_CLK(cpu, &clk_cpu_ops);
+
static struct clk_lookup sa11xx_clkregs[] = {
CLKDEV_INIT("sa1111.0", NULL, &clk_gpio27),
CLKDEV_INIT("sa1100-rtc", NULL, NULL),
+ CLKDEV_INIT("sa11x0-fb", NULL, &clk_cpu),
+ CLKDEV_INIT("sa11x0-pcmcia", NULL, &clk_cpu),
+ /* sa1111 names devices using internal offsets, PCMCIA is at 0x1800 */
+ CLKDEV_INIT("1800", NULL, &clk_cpu),
};
static int __init sa11xx_clk_init(void)
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 108939f8d053..b90c7d828391 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -30,7 +30,7 @@
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/gpio.h>
-#include <linux/pda_power.h>
+#include <linux/power/gpio-charger.h>
#include <video/sa1100fb.h>
@@ -131,62 +131,24 @@ static struct irda_platform_data collie_ir_data = {
/*
* Collie AC IN
*/
-static int collie_power_init(struct device *dev)
-{
- int ret = gpio_request(COLLIE_GPIO_AC_IN, "ac in");
- if (ret)
- goto err_gpio_req;
-
- ret = gpio_direction_input(COLLIE_GPIO_AC_IN);
- if (ret)
- goto err_gpio_in;
-
- return 0;
-
-err_gpio_in:
- gpio_free(COLLIE_GPIO_AC_IN);
-err_gpio_req:
- return ret;
-}
-
-static void collie_power_exit(struct device *dev)
-{
- gpio_free(COLLIE_GPIO_AC_IN);
-}
-
-static int collie_power_ac_online(void)
-{
- return gpio_get_value(COLLIE_GPIO_AC_IN) == 2;
-}
-
static char *collie_ac_supplied_to[] = {
"main-battery",
"backup-battery",
};
-static struct pda_power_pdata collie_power_data = {
- .init = collie_power_init,
- .is_ac_online = collie_power_ac_online,
- .exit = collie_power_exit,
+
+static struct gpio_charger_platform_data collie_power_data = {
+ .name = "charger",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .gpio = COLLIE_GPIO_AC_IN,
.supplied_to = collie_ac_supplied_to,
.num_supplicants = ARRAY_SIZE(collie_ac_supplied_to),
};
-static struct resource collie_power_resource[] = {
- {
- .name = "ac",
- .flags = IORESOURCE_IRQ |
- IORESOURCE_IRQ_HIGHEDGE |
- IORESOURCE_IRQ_LOWEDGE,
- },
-};
-
static struct platform_device collie_power_device = {
- .name = "pda-power",
+ .name = "gpio-charger",
.id = -1,
.dev.platform_data = &collie_power_data,
- .resource = collie_power_resource,
- .num_resources = ARRAY_SIZE(collie_power_resource),
};
#ifdef CONFIG_SHARP_LOCOMO
@@ -420,9 +382,6 @@ static void __init collie_init(void)
GPSR |= _COLLIE_GPIO_UCB1x00_RESET;
- collie_power_resource[0].start = gpio_to_irq(COLLIE_GPIO_AC_IN);
- collie_power_resource[0].end = gpio_to_irq(COLLIE_GPIO_AC_IN);
-
sa11x0_ppc_configure_mcp();
diff --git a/arch/arm/mach-sa1100/include/mach/entry-macro.S b/arch/arm/mach-sa1100/include/mach/entry-macro.S
deleted file mode 100644
index 8cf7630bf024..000000000000
--- a/arch/arm/mach-sa1100/include/mach/entry-macro.S
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/entry-macro.S
- *
- * Low-level IRQ helper macros for SA1100-based platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
- .macro get_irqnr_preamble, base, tmp
- mov \base, #0xfa000000 @ ICIP = 0xfa050000
- add \base, \base, #0x00050000
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqstat, [\base] @ get irqs
- ldr \irqnr, [\base, #4] @ ICMR = 0xfa050004
- ands \irqstat, \irqstat, \irqnr
- mov \irqnr, #0
- beq 1001f
- tst \irqstat, #0xff
- moveq \irqstat, \irqstat, lsr #8
- addeq \irqnr, \irqnr, #8
- tsteq \irqstat, #0xff
- moveq \irqstat, \irqstat, lsr #8
- addeq \irqnr, \irqnr, #8
- tsteq \irqstat, #0xff
- moveq \irqstat, \irqstat, lsr #8
- addeq \irqnr, \irqnr, #8
- tst \irqstat, #0x0f
- moveq \irqstat, \irqstat, lsr #4
- addeq \irqnr, \irqnr, #4
- tst \irqstat, #0x03
- moveq \irqstat, \irqstat, lsr #2
- addeq \irqnr, \irqnr, #2
- tst \irqstat, #0x01
- addeqs \irqnr, \irqnr, #1
-1001:
- .endm
-
diff --git a/arch/arm/mach-sa1100/include/mach/irqs.h b/arch/arm/mach-sa1100/include/mach/irqs.h
index 3790298b7142..de0983494c7e 100644
--- a/arch/arm/mach-sa1100/include/mach/irqs.h
+++ b/arch/arm/mach-sa1100/include/mach/irqs.h
@@ -8,56 +8,56 @@
* 2001/11/14 RMK Cleaned up and standardised a lot of the IRQs.
*/
-#define IRQ_GPIO0 0
-#define IRQ_GPIO1 1
-#define IRQ_GPIO2 2
-#define IRQ_GPIO3 3
-#define IRQ_GPIO4 4
-#define IRQ_GPIO5 5
-#define IRQ_GPIO6 6
-#define IRQ_GPIO7 7
-#define IRQ_GPIO8 8
-#define IRQ_GPIO9 9
-#define IRQ_GPIO10 10
-#define IRQ_GPIO11_27 11
-#define IRQ_LCD 12 /* LCD controller */
-#define IRQ_Ser0UDC 13 /* Ser. port 0 UDC */
-#define IRQ_Ser1SDLC 14 /* Ser. port 1 SDLC */
-#define IRQ_Ser1UART 15 /* Ser. port 1 UART */
-#define IRQ_Ser2ICP 16 /* Ser. port 2 ICP */
-#define IRQ_Ser3UART 17 /* Ser. port 3 UART */
-#define IRQ_Ser4MCP 18 /* Ser. port 4 MCP */
-#define IRQ_Ser4SSP 19 /* Ser. port 4 SSP */
-#define IRQ_DMA0 20 /* DMA controller channel 0 */
-#define IRQ_DMA1 21 /* DMA controller channel 1 */
-#define IRQ_DMA2 22 /* DMA controller channel 2 */
-#define IRQ_DMA3 23 /* DMA controller channel 3 */
-#define IRQ_DMA4 24 /* DMA controller channel 4 */
-#define IRQ_DMA5 25 /* DMA controller channel 5 */
-#define IRQ_OST0 26 /* OS Timer match 0 */
-#define IRQ_OST1 27 /* OS Timer match 1 */
-#define IRQ_OST2 28 /* OS Timer match 2 */
-#define IRQ_OST3 29 /* OS Timer match 3 */
-#define IRQ_RTC1Hz 30 /* RTC 1 Hz clock */
-#define IRQ_RTCAlrm 31 /* RTC Alarm */
+#define IRQ_GPIO0 1
+#define IRQ_GPIO1 2
+#define IRQ_GPIO2 3
+#define IRQ_GPIO3 4
+#define IRQ_GPIO4 5
+#define IRQ_GPIO5 6
+#define IRQ_GPIO6 7
+#define IRQ_GPIO7 8
+#define IRQ_GPIO8 9
+#define IRQ_GPIO9 10
+#define IRQ_GPIO10 11
+#define IRQ_GPIO11_27 12
+#define IRQ_LCD 13 /* LCD controller */
+#define IRQ_Ser0UDC 14 /* Ser. port 0 UDC */
+#define IRQ_Ser1SDLC 15 /* Ser. port 1 SDLC */
+#define IRQ_Ser1UART 16 /* Ser. port 1 UART */
+#define IRQ_Ser2ICP 17 /* Ser. port 2 ICP */
+#define IRQ_Ser3UART 18 /* Ser. port 3 UART */
+#define IRQ_Ser4MCP 19 /* Ser. port 4 MCP */
+#define IRQ_Ser4SSP 20 /* Ser. port 4 SSP */
+#define IRQ_DMA0 21 /* DMA controller channel 0 */
+#define IRQ_DMA1 22 /* DMA controller channel 1 */
+#define IRQ_DMA2 23 /* DMA controller channel 2 */
+#define IRQ_DMA3 24 /* DMA controller channel 3 */
+#define IRQ_DMA4 25 /* DMA controller channel 4 */
+#define IRQ_DMA5 26 /* DMA controller channel 5 */
+#define IRQ_OST0 27 /* OS Timer match 0 */
+#define IRQ_OST1 28 /* OS Timer match 1 */
+#define IRQ_OST2 29 /* OS Timer match 2 */
+#define IRQ_OST3 30 /* OS Timer match 3 */
+#define IRQ_RTC1Hz 31 /* RTC 1 Hz clock */
+#define IRQ_RTCAlrm 32 /* RTC Alarm */
-#define IRQ_GPIO11 32
-#define IRQ_GPIO12 33
-#define IRQ_GPIO13 34
-#define IRQ_GPIO14 35
-#define IRQ_GPIO15 36
-#define IRQ_GPIO16 37
-#define IRQ_GPIO17 38
-#define IRQ_GPIO18 39
-#define IRQ_GPIO19 40
-#define IRQ_GPIO20 41
-#define IRQ_GPIO21 42
-#define IRQ_GPIO22 43
-#define IRQ_GPIO23 44
-#define IRQ_GPIO24 45
-#define IRQ_GPIO25 46
-#define IRQ_GPIO26 47
-#define IRQ_GPIO27 48
+#define IRQ_GPIO11 33
+#define IRQ_GPIO12 34
+#define IRQ_GPIO13 35
+#define IRQ_GPIO14 36
+#define IRQ_GPIO15 37
+#define IRQ_GPIO16 38
+#define IRQ_GPIO17 39
+#define IRQ_GPIO18 40
+#define IRQ_GPIO19 41
+#define IRQ_GPIO20 42
+#define IRQ_GPIO21 43
+#define IRQ_GPIO22 44
+#define IRQ_GPIO23 45
+#define IRQ_GPIO24 46
+#define IRQ_GPIO25 47
+#define IRQ_GPIO26 48
+#define IRQ_GPIO27 49
/*
* The next 16 interrupts are for board specific purposes. Since
@@ -65,8 +65,8 @@
* these. If you need more, increase IRQ_BOARD_END, but keep it
* within sensible limits. IRQs 49 to 64 are available.
*/
-#define IRQ_BOARD_START 49
-#define IRQ_BOARD_END 65
+#define IRQ_BOARD_START 50
+#define IRQ_BOARD_END 66
/*
* Figure out the MAX IRQ number.
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c
index 2124f1fc2fbe..63e2901db416 100644
--- a/arch/arm/mach-sa1100/irq.c
+++ b/arch/arm/mach-sa1100/irq.c
@@ -14,17 +14,73 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/ioport.h>
#include <linux/syscore_ops.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <asm/mach/irq.h>
+#include <asm/exception.h>
#include "generic.h"
/*
+ * We don't need to ACK IRQs on the SA1100 unless they're GPIOs
+ * this is for internal IRQs i.e. from IRQ LCD to RTCAlrm.
+ */
+static void sa1100_mask_irq(struct irq_data *d)
+{
+ ICMR &= ~BIT(d->hwirq);
+}
+
+static void sa1100_unmask_irq(struct irq_data *d)
+{
+ ICMR |= BIT(d->hwirq);
+}
+
+/*
+ * Apart form GPIOs, only the RTC alarm can be a wakeup event.
+ */
+static int sa1100_set_wake(struct irq_data *d, unsigned int on)
+{
+ if (BIT(d->hwirq) == IC_RTCAlrm) {
+ if (on)
+ PWER |= PWER_RTC;
+ else
+ PWER &= ~PWER_RTC;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static struct irq_chip sa1100_normal_chip = {
+ .name = "SC",
+ .irq_ack = sa1100_mask_irq,
+ .irq_mask = sa1100_mask_irq,
+ .irq_unmask = sa1100_unmask_irq,
+ .irq_set_wake = sa1100_set_wake,
+};
+
+static int sa1100_normal_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sa1100_normal_chip,
+ handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static struct irq_domain_ops sa1100_normal_irqdomain_ops = {
+ .map = sa1100_normal_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static struct irq_domain *sa1100_normal_irqdomain;
+
+/*
* SA1100 GPIO edge detection for IRQs:
* IRQs are generated on Falling-Edge, Rising-Edge, or both.
* Use this instead of directly setting GRER/GFER.
@@ -33,20 +89,11 @@ static int GPIO_IRQ_rising_edge;
static int GPIO_IRQ_falling_edge;
static int GPIO_IRQ_mask = (1 << 11) - 1;
-/*
- * To get the GPIO number from an IRQ number
- */
-#define GPIO_11_27_IRQ(i) ((i) - 21)
-#define GPIO11_27_MASK(irq) (1 << GPIO_11_27_IRQ(irq))
-
static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
{
unsigned int mask;
- if (d->irq <= 10)
- mask = 1 << d->irq;
- else
- mask = GPIO11_27_MASK(d->irq);
+ mask = BIT(d->hwirq);
if (type == IRQ_TYPE_PROBE) {
if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
@@ -70,41 +117,51 @@ static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
}
/*
- * GPIO IRQs must be acknowledged. This is for IRQs from 0 to 10.
+ * GPIO IRQs must be acknowledged.
*/
-static void sa1100_low_gpio_ack(struct irq_data *d)
-{
- GEDR = (1 << d->irq);
-}
-
-static void sa1100_low_gpio_mask(struct irq_data *d)
-{
- ICMR &= ~(1 << d->irq);
-}
-
-static void sa1100_low_gpio_unmask(struct irq_data *d)
+static void sa1100_gpio_ack(struct irq_data *d)
{
- ICMR |= 1 << d->irq;
+ GEDR = BIT(d->hwirq);
}
-static int sa1100_low_gpio_wake(struct irq_data *d, unsigned int on)
+static int sa1100_gpio_wake(struct irq_data *d, unsigned int on)
{
if (on)
- PWER |= 1 << d->irq;
+ PWER |= BIT(d->hwirq);
else
- PWER &= ~(1 << d->irq);
+ PWER &= ~BIT(d->hwirq);
return 0;
}
+/*
+ * This is for IRQs from 0 to 10.
+ */
static struct irq_chip sa1100_low_gpio_chip = {
.name = "GPIO-l",
- .irq_ack = sa1100_low_gpio_ack,
- .irq_mask = sa1100_low_gpio_mask,
- .irq_unmask = sa1100_low_gpio_unmask,
+ .irq_ack = sa1100_gpio_ack,
+ .irq_mask = sa1100_mask_irq,
+ .irq_unmask = sa1100_unmask_irq,
.irq_set_type = sa1100_gpio_type,
- .irq_set_wake = sa1100_low_gpio_wake,
+ .irq_set_wake = sa1100_gpio_wake,
+};
+
+static int sa1100_low_gpio_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops sa1100_low_gpio_irqdomain_ops = {
+ .map = sa1100_low_gpio_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
};
+static struct irq_domain *sa1100_low_gpio_irqdomain;
+
/*
* IRQ11 (GPIO11 through 27) handler. We enter here with the
* irq_controller_lock held, and IRQs disabled. Decode the IRQ
@@ -141,16 +198,9 @@ sa1100_high_gpio_handler(unsigned int irq, struct irq_desc *desc)
* In addition, the IRQs are all collected up into one bit in the
* interrupt controller registers.
*/
-static void sa1100_high_gpio_ack(struct irq_data *d)
-{
- unsigned int mask = GPIO11_27_MASK(d->irq);
-
- GEDR = mask;
-}
-
static void sa1100_high_gpio_mask(struct irq_data *d)
{
- unsigned int mask = GPIO11_27_MASK(d->irq);
+ unsigned int mask = BIT(d->hwirq);
GPIO_IRQ_mask &= ~mask;
@@ -160,7 +210,7 @@ static void sa1100_high_gpio_mask(struct irq_data *d)
static void sa1100_high_gpio_unmask(struct irq_data *d)
{
- unsigned int mask = GPIO11_27_MASK(d->irq);
+ unsigned int mask = BIT(d->hwirq);
GPIO_IRQ_mask |= mask;
@@ -168,61 +218,32 @@ static void sa1100_high_gpio_unmask(struct irq_data *d)
GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
}
-static int sa1100_high_gpio_wake(struct irq_data *d, unsigned int on)
-{
- if (on)
- PWER |= GPIO11_27_MASK(d->irq);
- else
- PWER &= ~GPIO11_27_MASK(d->irq);
- return 0;
-}
-
static struct irq_chip sa1100_high_gpio_chip = {
.name = "GPIO-h",
- .irq_ack = sa1100_high_gpio_ack,
+ .irq_ack = sa1100_gpio_ack,
.irq_mask = sa1100_high_gpio_mask,
.irq_unmask = sa1100_high_gpio_unmask,
.irq_set_type = sa1100_gpio_type,
- .irq_set_wake = sa1100_high_gpio_wake,
+ .irq_set_wake = sa1100_gpio_wake,
};
-/*
- * We don't need to ACK IRQs on the SA1100 unless they're GPIOs
- * this is for internal IRQs i.e. from 11 to 31.
- */
-static void sa1100_mask_irq(struct irq_data *d)
-{
- ICMR &= ~(1 << d->irq);
-}
-
-static void sa1100_unmask_irq(struct irq_data *d)
+static int sa1100_high_gpio_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
{
- ICMR |= (1 << d->irq);
-}
+ irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-/*
- * Apart form GPIOs, only the RTC alarm can be a wakeup event.
- */
-static int sa1100_set_wake(struct irq_data *d, unsigned int on)
-{
- if (d->irq == IRQ_RTCAlrm) {
- if (on)
- PWER |= PWER_RTC;
- else
- PWER &= ~PWER_RTC;
- return 0;
- }
- return -EINVAL;
+ return 0;
}
-static struct irq_chip sa1100_normal_chip = {
- .name = "SC",
- .irq_ack = sa1100_mask_irq,
- .irq_mask = sa1100_mask_irq,
- .irq_unmask = sa1100_unmask_irq,
- .irq_set_wake = sa1100_set_wake,
+static struct irq_domain_ops sa1100_high_gpio_irqdomain_ops = {
+ .map = sa1100_high_gpio_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
};
+static struct irq_domain *sa1100_high_gpio_irqdomain;
+
static struct resource irq_resource =
DEFINE_RES_MEM_NAMED(0x90050000, SZ_64K, "irqs");
@@ -291,10 +312,25 @@ static int __init sa1100irq_init_devicefs(void)
device_initcall(sa1100irq_init_devicefs);
-void __init sa1100_init_irq(void)
+static asmlinkage void __exception_irq_entry
+sa1100_handle_irq(struct pt_regs *regs)
{
- unsigned int irq;
+ uint32_t icip, icmr, mask;
+
+ do {
+ icip = (ICIP);
+ icmr = (ICMR);
+ mask = icip & icmr;
+
+ if (mask == 0)
+ break;
+
+ handle_IRQ(ffs(mask) - 1 + IRQ_GPIO0, regs);
+ } while (1);
+}
+void __init sa1100_init_irq(void)
+{
request_resource(&iomem_resource, &irq_resource);
/* disable all IRQs */
@@ -314,29 +350,24 @@ void __init sa1100_init_irq(void)
*/
ICCR = 1;
- for (irq = 0; irq <= 10; irq++) {
- irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip,
- handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
+ sa1100_low_gpio_irqdomain = irq_domain_add_legacy(NULL,
+ 11, IRQ_GPIO0, 0,
+ &sa1100_low_gpio_irqdomain_ops, NULL);
- for (irq = 12; irq <= 31; irq++) {
- irq_set_chip_and_handler(irq, &sa1100_normal_chip,
- handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
- }
+ sa1100_normal_irqdomain = irq_domain_add_legacy(NULL,
+ 21, IRQ_GPIO11_27, 11,
+ &sa1100_normal_irqdomain_ops, NULL);
- for (irq = 32; irq <= 48; irq++) {
- irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip,
- handle_edge_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
+ sa1100_high_gpio_irqdomain = irq_domain_add_legacy(NULL,
+ 17, IRQ_GPIO11, 11,
+ &sa1100_high_gpio_irqdomain_ops, NULL);
/*
* Install handler for GPIO 11-27 edge detect interrupts
*/
- irq_set_chip(IRQ_GPIO11_27, &sa1100_normal_chip);
irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler);
+ set_handle_irq(sa1100_handle_irq);
+
sa1100_init_gpio();
}
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 400f80332046..169262e3040d 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -423,7 +423,6 @@ static struct platform_driver neponset_device_driver = {
.remove = neponset_remove,
.driver = {
.name = "neponset",
- .owner = THIS_MODULE,
.pm = PM_OPS,
},
};
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index b47262afb240..f8197eb6e566 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -32,7 +32,6 @@
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/camera-rcar.h>
#include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/rcar-du.h>
#include <linux/platform_data/usb-rcar-gen2-phy.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
@@ -83,61 +82,6 @@
*
*/
-/* DU */
-static struct rcar_du_encoder_data lager_du_encoders[] = {
- {
- .type = RCAR_DU_ENCODER_VGA,
- .output = RCAR_DU_OUTPUT_DPAD0,
- }, {
- .type = RCAR_DU_ENCODER_NONE,
- .output = RCAR_DU_OUTPUT_LVDS1,
- .connector.lvds.panel = {
- .width_mm = 210,
- .height_mm = 158,
- .mode = {
- .pixelclock = 65000000,
- .hactive = 1024,
- .hfront_porch = 20,
- .hback_porch = 160,
- .hsync_len = 136,
- .vactive = 768,
- .vfront_porch = 3,
- .vback_porch = 29,
- .vsync_len = 6,
- },
- },
- },
-};
-
-static const struct rcar_du_platform_data lager_du_pdata __initconst = {
- .encoders = lager_du_encoders,
- .num_encoders = ARRAY_SIZE(lager_du_encoders),
-};
-
-static const struct resource du_resources[] __initconst = {
- DEFINE_RES_MEM(0xfeb00000, 0x70000),
- DEFINE_RES_MEM_NAMED(0xfeb90000, 0x1c, "lvds.0"),
- DEFINE_RES_MEM_NAMED(0xfeb94000, 0x1c, "lvds.1"),
- DEFINE_RES_IRQ(gic_spi(256)),
- DEFINE_RES_IRQ(gic_spi(268)),
- DEFINE_RES_IRQ(gic_spi(269)),
-};
-
-static void __init lager_add_du_device(void)
-{
- struct platform_device_info info = {
- .name = "rcar-du-r8a7790",
- .id = -1,
- .res = du_resources,
- .num_res = ARRAY_SIZE(du_resources),
- .data = &lager_du_pdata,
- .size_data = sizeof(lager_du_pdata),
- .dma_mask = DMA_BIT_MASK(32),
- };
-
- platform_device_register_full(&info);
-}
-
/* LEDS */
static struct gpio_led lager_leds[] = {
{
@@ -800,8 +744,6 @@ static void __init lager_add_standard_devices(void)
platform_device_register_full(&ether_info);
- lager_add_du_device();
-
platform_device_register_resndata(NULL, "qspi", 0,
qspi_resources,
ARRAY_SIZE(qspi_resources),
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index ed1087031c5d..a1c1dfb6a67a 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1149,7 +1149,7 @@ static struct soc_camera_platform_info camera_info = {
.format_name = "UYVY",
.format_depth = 16,
.format = {
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
.field = V4L2_FIELD_NONE,
.width = 640,
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 994dc7d86ae2..598f704f76ae 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -27,7 +27,6 @@
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/camera-rcar.h>
#include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/rcar-du.h>
#include <linux/platform_data/usb-rcar-phy.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
@@ -171,62 +170,6 @@ static struct platform_device hspi_device = {
.num_resources = ARRAY_SIZE(hspi_resources),
};
-/*
- * DU
- *
- * The panel only specifies the [hv]display and [hv]total values. The position
- * and width of the sync pulses don't matter, they're copied from VESA timings.
- */
-static struct rcar_du_encoder_data du_encoders[] = {
- {
- .type = RCAR_DU_ENCODER_VGA,
- .output = RCAR_DU_OUTPUT_DPAD0,
- }, {
- .type = RCAR_DU_ENCODER_LVDS,
- .output = RCAR_DU_OUTPUT_DPAD1,
- .connector.lvds.panel = {
- .width_mm = 210,
- .height_mm = 158,
- .mode = {
- .pixelclock = 65000000,
- .hactive = 1024,
- .hfront_porch = 20,
- .hback_porch = 160,
- .hsync_len = 136,
- .vactive = 768,
- .vfront_porch = 3,
- .vback_porch = 29,
- .vsync_len = 6,
- },
- },
- },
-};
-
-static const struct rcar_du_platform_data du_pdata __initconst = {
- .encoders = du_encoders,
- .num_encoders = ARRAY_SIZE(du_encoders),
-};
-
-static const struct resource du_resources[] __initconst = {
- DEFINE_RES_MEM(0xfff80000, 0x40000),
- DEFINE_RES_IRQ(gic_iid(0x3f)),
-};
-
-static void __init marzen_add_du_device(void)
-{
- struct platform_device_info info = {
- .name = "rcar-du-r8a7779",
- .id = -1,
- .res = du_resources,
- .num_res = ARRAY_SIZE(du_resources),
- .data = &du_pdata,
- .size_data = sizeof(du_pdata),
- .dma_mask = DMA_BIT_MASK(32),
- };
-
- platform_device_register_full(&info);
-}
-
/* LEDS */
static struct gpio_led marzen_leds[] = {
{
@@ -385,7 +328,6 @@ static void __init marzen_init(void)
platform_device_register_full(&vin1_info);
platform_device_register_full(&vin3_info);
platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
- marzen_add_du_device();
}
static const char *marzen_boards_compat_dt[] __initdata = {
diff --git a/arch/arm/mach-u300/regulator.c b/arch/arm/mach-u300/regulator.c
index 0493a845b6bc..595b574c2c50 100644
--- a/arch/arm/mach-u300/regulator.c
+++ b/arch/arm/mach-u300/regulator.c
@@ -116,7 +116,6 @@ static const struct of_device_id s365_board_match[] = {
static struct platform_driver s365_board_driver = {
.driver = {
.name = "s365-board",
- .owner = THIS_MODULE,
.of_match_table = s365_board_match,
},
};
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ab906b801047..03823e784f63 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1009,3 +1009,24 @@ config ARCH_SUPPORTS_BIG_ENDIAN
help
This option specifies the architecture can support big endian
operation.
+
+config ARM_KERNMEM_PERMS
+ bool "Restrict kernel memory permissions"
+ help
+ If this is set, kernel memory other than kernel text (and rodata)
+ will be made non-executable. The tradeoff is that each region is
+ padded to section-size (1MiB) boundaries (because their permissions
+ are different and splitting the 1M pages into 4K ones causes TLB
+ performance problems), wasting memory.
+
+config DEBUG_RODATA
+ bool "Make kernel text and rodata read-only"
+ depends on ARM_KERNMEM_PERMS
+ default y
+ help
+ If this is set, kernel text and rodata will be made read-only. This
+ is to help catch accidental or malicious attempts to change the
+ kernel's executable code. Additionally splits rodata from kernel
+ text so it can be made explicitly non-executable. This creates
+ another section-size padded region, so it can waste more memory
+ space while gaining the read-only protections.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 91da64de440f..d3afdf9eb65a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
- mmap.o pgd.o mmu.o
+ mmap.o pgd.o mmu.o pageattr.o
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 83792f4324ea..2c0c541c60ca 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -113,7 +113,7 @@ static int safe_usermode(int new_usermode, bool warn)
new_usermode |= UM_FIXUP;
if (warn)
- printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
+ pr_warn("alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
}
return new_usermode;
@@ -523,7 +523,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
* processor for us.
*/
if (addr != eaddr) {
- printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
+ pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
"addr = %08lx, eaddr = %08lx\n",
instruction_pointer(regs), instr, addr, eaddr);
show_regs(regs);
@@ -567,7 +567,7 @@ fault:
return TYPE_FAULT;
bad:
- printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
+ pr_err("Alignment trap: not handling ldm with s-bit set\n");
return TYPE_ERROR;
}
@@ -899,13 +899,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
return 0;
swp:
- printk(KERN_ERR "Alignment trap: not handling swp instruction\n");
+ pr_err("Alignment trap: not handling swp instruction\n");
bad:
/*
* Oops, we didn't handle the instruction.
*/
- printk(KERN_ERR "Alignment trap: not handling instruction "
+ pr_err("Alignment trap: not handling instruction "
"%0*lx at [<%08lx>]\n",
isize << 1,
isize == 2 ? tinstr : instr, instrptr);
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index e028a7f2ebcc..097181e08c25 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -313,7 +313,7 @@ static void __init disable_l2_prefetch(void)
*/
u = read_extra_features();
if (!(u & 0x01000000)) {
- printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n");
+ pr_info("Feroceon L2: Disabling L2 prefetch.\n");
write_extra_features(u | 0x01000000);
}
}
@@ -326,7 +326,7 @@ static void __init enable_l2(void)
if (!(u & 0x00400000)) {
int i, d;
- printk(KERN_INFO "Feroceon L2: Enabling L2\n");
+ pr_info("Feroceon L2: Enabling L2\n");
d = flush_and_disable_dcache();
i = invalidate_and_disable_icache();
@@ -353,7 +353,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
enable_l2();
- printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
+ pr_info("Feroceon L2: Cache support initialised%s.\n",
l2_wt_override ? ", in WT override mode" : "");
}
#ifdef CONFIG_OF
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index b273739e6359..1e373d268c04 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -185,7 +185,7 @@ static void enable_extra_feature(unsigned int features)
u &= ~0x01000000;
else
u |= 0x01000000;
- printk(KERN_INFO "Tauros2: %s L2 prefetch.\n",
+ pr_info("Tauros2: %s L2 prefetch.\n",
(features & CACHE_TAUROS2_PREFETCH_ON)
? "Enabling" : "Disabling");
@@ -193,7 +193,7 @@ static void enable_extra_feature(unsigned int features)
u |= 0x00100000;
else
u &= ~0x00100000;
- printk(KERN_INFO "Tauros2: %s line fill burt8.\n",
+ pr_info("Tauros2: %s line fill burt8.\n",
(features & CACHE_TAUROS2_LINEFILL_BURST8)
? "Enabling" : "Disabling");
@@ -216,7 +216,7 @@ static void __init tauros2_internal_init(unsigned int features)
*/
feat = read_extra_features();
if (!(feat & 0x00400000)) {
- printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
+ pr_info("Tauros2: Enabling L2 cache.\n");
write_extra_features(feat | 0x00400000);
}
@@ -253,7 +253,7 @@ static void __init tauros2_internal_init(unsigned int features)
*/
actlr = read_actlr();
if (!(actlr & 0x00000002)) {
- printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
+ pr_info("Tauros2: Enabling L2 cache.\n");
write_actlr(actlr | 0x00000002);
}
@@ -262,11 +262,11 @@ static void __init tauros2_internal_init(unsigned int features)
#endif
if (mode == NULL) {
- printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n");
+ pr_crit("Tauros2: Unable to detect CPU mode.\n");
return;
}
- printk(KERN_INFO "Tauros2: L2 cache support initialised "
+ pr_info("Tauros2: L2 cache support initialised "
"in %s mode.\n", mode);
}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 6eb97b3a7481..91892569710f 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -184,36 +184,46 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
- if (asid != 0 && is_reserved_asid(asid)) {
+ if (asid != 0) {
/*
- * Our current ASID was active during a rollover, we can
- * continue to use it and this was just a false alarm.
+ * If our current ASID was active during a rollover, we
+ * can continue to use it and this was just a false alarm.
*/
- asid = generation | (asid & ~ASID_MASK);
- } else {
+ if (is_reserved_asid(asid))
+ return generation | (asid & ~ASID_MASK);
+
/*
- * Allocate a free ASID. If we can't find one, take a
- * note of the currently active ASIDs and mark the TLBs
- * as requiring flushes. We always count from ASID #1,
- * as we reserve ASID #0 to switch via TTBR0 and to
- * avoid speculative page table walks from hitting in
- * any partial walk caches, which could be populated
- * from overlapping level-1 descriptors used to map both
- * the module area and the userspace stack.
+ * We had a valid ASID in a previous life, so try to re-use
+ * it if possible.,
*/
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
- if (asid == NUM_USER_ASIDS) {
- generation = atomic64_add_return(ASID_FIRST_VERSION,
- &asid_generation);
- flush_context(cpu);
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
- }
- __set_bit(asid, asid_map);
- cur_idx = asid;
- asid |= generation;
- cpumask_clear(mm_cpumask(mm));
+ asid &= ~ASID_MASK;
+ if (!__test_and_set_bit(asid, asid_map))
+ goto bump_gen;
}
+ /*
+ * Allocate a free ASID. If we can't find one, take a note of the
+ * currently active ASIDs and mark the TLBs as requiring flushes.
+ * We always count from ASID #1, as we reserve ASID #0 to switch
+ * via TTBR0 and to avoid speculative page table walks from hitting
+ * in any partial walk caches, which could be populated from
+ * overlapping level-1 descriptors used to map both the module
+ * area and the userspace stack.
+ */
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
+ if (asid == NUM_USER_ASIDS) {
+ generation = atomic64_add_return(ASID_FIRST_VERSION,
+ &asid_generation);
+ flush_context(cpu);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+ }
+
+ __set_bit(asid, asid_map);
+ cur_idx = asid;
+
+bump_gen:
+ asid |= generation;
+ cpumask_clear(mm_cpumask(mm));
return asid;
}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index b9bcc9d79176..70423345da26 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -62,7 +62,7 @@ static void discard_old_kernel_data(void *kto)
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kto),
- "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
+ "r" ((unsigned long)kto + PAGE_SIZE - 1)
: "cc");
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e8907117861e..7864797609b3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1947,9 +1947,8 @@ EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
* arm_iommu_create_mapping)
*
* Attaches specified io address space mapping to the provided device,
- * this replaces the dma operations (dma_map_ops pointer) with the
- * IOMMU aware version. More than one client might be attached to
- * the same io address space mapping.
+ * More than one client might be attached to the same io address space
+ * mapping.
*/
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
@@ -1962,7 +1961,6 @@ int arm_iommu_attach_device(struct device *dev,
kref_get(&mapping->kref);
dev->archdata.mapping = mapping;
- set_dma_ops(dev, &iommu_ops);
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
@@ -1974,7 +1972,6 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
*/
void arm_iommu_detach_device(struct device *dev)
{
@@ -1989,10 +1986,83 @@ void arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
dev->archdata.mapping = NULL;
- set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
-#endif
+static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
+{
+ return coherent ? &iommu_coherent_ops : &iommu_ops;
+}
+
+static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu)
+{
+ struct dma_iommu_mapping *mapping;
+
+ if (!iommu)
+ return false;
+
+ mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
+ if (IS_ERR(mapping)) {
+ pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
+ size, dev_name(dev));
+ return false;
+ }
+
+ if (arm_iommu_attach_device(dev, mapping)) {
+ pr_warn("Failed to attached device %s to IOMMU_mapping\n",
+ dev_name(dev));
+ arm_iommu_release_mapping(mapping);
+ return false;
+ }
+
+ return true;
+}
+
+static void arm_teardown_iommu_dma_ops(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+}
+
+#else
+
+static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu)
+{
+ return false;
+}
+
+static void arm_teardown_iommu_dma_ops(struct device *dev) { }
+
+#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
+
+#endif /* CONFIG_ARM_DMA_USE_IOMMU */
+
+static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+{
+ return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
+}
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent)
+{
+ struct dma_map_ops *dma_ops;
+
+ dev->archdata.dma_coherent = coherent;
+ if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
+ dma_ops = arm_get_iommu_dma_map_ops(coherent);
+ else
+ dma_ops = arm_get_dma_map_ops(coherent);
+
+ set_dma_ops(dev, dma_ops);
+}
+
+void arch_teardown_dma_ops(struct device *dev)
+{
+ arm_teardown_iommu_dma_ops(dev);
+}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index ff379ac115df..d9e0d00a6699 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -235,7 +235,7 @@ void __init check_writebuffer_bugs(void)
const char *reason;
unsigned long v = 1;
- printk(KERN_INFO "CPU: Testing write buffer coherency: ");
+ pr_info("CPU: Testing write buffer coherency: ");
page = alloc_page(GFP_KERNEL);
if (page) {
@@ -261,9 +261,9 @@ void __init check_writebuffer_bugs(void)
}
if (v) {
- printk("failed, %s\n", reason);
+ pr_cont("failed, %s\n", reason);
shared_pte_mask = L_PTE_MT_UNCACHED;
} else {
- printk("ok\n");
+ pr_cont("ok\n");
}
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index eb8830a4c5ed..a982dc3190df 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -63,9 +63,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
if (!mm)
mm = &init_mm;
- printk(KERN_ALERT "pgd = %p\n", mm->pgd);
+ pr_alert("pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
- printk(KERN_ALERT "[%08lx] *pgd=%08llx",
+ pr_alert("[%08lx] *pgd=%08llx",
addr, (long long)pgd_val(*pgd));
do {
@@ -77,31 +77,31 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
if (pgd_bad(*pgd)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
pud = pud_offset(pgd, addr);
if (PTRS_PER_PUD != 1)
- printk(", *pud=%08llx", (long long)pud_val(*pud));
+ pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
if (pud_none(*pud))
break;
if (pud_bad(*pud)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
pmd = pmd_offset(pud, addr);
if (PTRS_PER_PMD != 1)
- printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
+ pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
@@ -110,15 +110,15 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
pte = pte_offset_map(pmd, addr);
- printk(", *pte=%08llx", (long long)pte_val(*pte));
+ pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
#ifndef CONFIG_ARM_LPAE
- printk(", *ppte=%08llx",
+ pr_cont(", *ppte=%08llx",
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
#endif
pte_unmap(pte);
} while(0);
- printk("\n");
+ pr_cont("\n");
}
#else /* CONFIG_MMU */
void show_pte(struct mm_struct *mm, unsigned long addr)
@@ -142,10 +142,9 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* No handler, we'll have to terminate things with extreme prejudice.
*/
bust_spinlocks(1);
- printk(KERN_ALERT
- "Unable to handle kernel %s at virtual address %08lx\n",
- (addr < PAGE_SIZE) ? "NULL pointer dereference" :
- "paging request", addr);
+ pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
show_pte(mm, addr);
die("Oops", regs, fsr);
@@ -551,7 +550,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
- printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
info.si_signo = inf->sig;
@@ -583,7 +582,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
- printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
info.si_signo = inf->sig;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 265b836b3bd1..34b66af516ea 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -33,7 +33,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
asm( "mcrr p15, 0, %1, %0, c14\n"
" mcr p15, 0, %2, c7, c10, 4"
:
- : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
+ : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
: "cc");
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index e17ed00828d7..b98895d9fe57 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -18,19 +18,20 @@
#include <asm/tlbflush.h>
#include "mm.h"
-pte_t *fixmap_page_table;
-
static inline void set_fixmap_pte(int idx, pte_t pte)
{
unsigned long vaddr = __fix_to_virt(idx);
- set_pte_ext(fixmap_page_table + idx, pte, 0);
+ pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+ set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
static inline pte_t get_fixmap_pte(unsigned long vaddr)
{
- unsigned long idx = __virt_to_fix(vaddr);
- return *(fixmap_page_table + idx);
+ pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+ return *ptep;
}
void *kmap(struct page *page)
@@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page)
* With debugging enabled, kunmap_atomic forces that entry to 0.
* Make sure it was indeed properly unmapped.
*/
- BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
/*
* When debugging is off, kunmap_atomic leaves the previous mapping
@@ -137,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9481f85c56e6..98ad9c79ea0e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -29,6 +29,7 @@
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
@@ -67,7 +68,7 @@ early_param("initrd", early_initrd);
static int __init parse_tag_initrd(const struct tag *tag)
{
- printk(KERN_WARNING "ATAG_INITRD is deprecated; "
+ pr_warn("ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size;
@@ -544,7 +545,7 @@ void __init mem_init(void)
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
- printk(KERN_NOTICE "Virtual kernel memory layout:\n"
+ pr_notice("Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
#ifdef CONFIG_HAVE_TCM
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
@@ -570,7 +571,7 @@ void __init mem_init(void)
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
- MLK(FIXADDR_START, FIXADDR_TOP),
+ MLK(FIXADDR_START, FIXADDR_END),
MLM(VMALLOC_START, VMALLOC_END),
MLM(PAGE_OFFSET, (unsigned long)high_memory),
#ifdef CONFIG_HIGHMEM
@@ -615,7 +616,145 @@ void __init mem_init(void)
}
}
-void free_initmem(void)
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+struct section_perm {
+ unsigned long start;
+ unsigned long end;
+ pmdval_t mask;
+ pmdval_t prot;
+ pmdval_t clear;
+};
+
+static struct section_perm nx_perms[] = {
+ /* Make pages tables, etc before _stext RW (set NX). */
+ {
+ .start = PAGE_OFFSET,
+ .end = (unsigned long)_stext,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+ /* Make init RW (set NX). */
+ {
+ .start = (unsigned long)__init_begin,
+ .end = (unsigned long)_sdata,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+#ifdef CONFIG_DEBUG_RODATA
+ /* Make rodata NX (set RO in ro_perms below). */
+ {
+ .start = (unsigned long)__start_rodata,
+ .end = (unsigned long)__init_begin,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+#endif
+};
+
+#ifdef CONFIG_DEBUG_RODATA
+static struct section_perm ro_perms[] = {
+ /* Make kernel code and rodata RX (set RO). */
+ {
+ .start = (unsigned long)_stext,
+ .end = (unsigned long)__init_begin,
+#ifdef CONFIG_ARM_LPAE
+ .mask = ~PMD_SECT_RDONLY,
+ .prot = PMD_SECT_RDONLY,
+#else
+ .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
+ .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+ .clear = PMD_SECT_AP_WRITE,
+#endif
+ },
+};
+#endif
+
+/*
+ * Updates section permissions only for the current mm (sections are
+ * copied into each mm). During startup, this is the init_mm. Is only
+ * safe to be called with preemption disabled, as under stop_machine().
+ */
+static inline void section_update(unsigned long addr, pmdval_t mask,
+ pmdval_t prot)
+{
+ struct mm_struct *mm;
+ pmd_t *pmd;
+
+ mm = current->active_mm;
+ pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+
+#ifdef CONFIG_ARM_LPAE
+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#else
+ if (addr & SECTION_SIZE)
+ pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
+ else
+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#endif
+ flush_pmd_entry(pmd);
+ local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
+}
+
+/* Make sure extended page tables are in use. */
+static inline bool arch_has_strict_perms(void)
+{
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return false;
+
+ return !!(get_cr() & CR_XP);
+}
+
+#define set_section_perms(perms, field) { \
+ size_t i; \
+ unsigned long addr; \
+ \
+ if (!arch_has_strict_perms()) \
+ return; \
+ \
+ for (i = 0; i < ARRAY_SIZE(perms); i++) { \
+ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
+ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
+ pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
+ perms[i].start, perms[i].end, \
+ SECTION_SIZE); \
+ continue; \
+ } \
+ \
+ for (addr = perms[i].start; \
+ addr < perms[i].end; \
+ addr += SECTION_SIZE) \
+ section_update(addr, perms[i].mask, \
+ perms[i].field); \
+ } \
+}
+
+static inline void fix_kernmem_perms(void)
+{
+ set_section_perms(nx_perms, prot);
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ set_section_perms(ro_perms, prot);
+}
+
+void set_kernel_text_rw(void)
+{
+ set_section_perms(ro_perms, clear);
+}
+
+void set_kernel_text_ro(void)
+{
+ set_section_perms(ro_perms, prot);
+}
+#endif /* CONFIG_DEBUG_RODATA */
+
+#else
+static inline void fix_kernmem_perms(void) { }
+#endif /* CONFIG_ARM_KERNMEM_PERMS */
+
+void free_tcmmem(void)
{
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
@@ -623,6 +762,12 @@ void free_initmem(void)
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
#endif
+}
+
+void free_initmem(void)
+{
+ fix_kernmem_perms();
+ free_tcmmem();
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f98cec7fe1e..cda7c40999b6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
+#include <asm/fixmap.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -52,6 +53,8 @@ EXPORT_SYMBOL(empty_zero_page);
*/
pmd_t *top_pmd;
+pmdval_t user_pmd_table = _PAGE_USER_TABLE;
+
#define CPOLICY_UNCACHED 0
#define CPOLICY_BUFFERED 1
#define CPOLICY_WRITETHROUGH 2
@@ -192,7 +195,7 @@ early_param("cachepolicy", early_cachepolicy);
static int __init early_nocache(char *__unused)
{
char *p = "buffered";
- printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
+ pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(p);
return 0;
}
@@ -201,7 +204,7 @@ early_param("nocache", early_nocache);
static int __init early_nowrite(char *__unused)
{
char *p = "uncached";
- printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
+ pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(p);
return 0;
}
@@ -354,43 +357,28 @@ const struct mem_type *get_mem_type(unsigned int type)
}
EXPORT_SYMBOL(get_mem_type);
-#define PTE_SET_FN(_name, pteop) \
-static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
- void *data) \
-{ \
- pte_t pte = pteop(*ptep); \
-\
- set_pte_ext(ptep, pte, 0); \
- return 0; \
-} \
-
-#define SET_MEMORY_FN(_name, callback) \
-int set_memory_##_name(unsigned long addr, int numpages) \
-{ \
- unsigned long start = addr; \
- unsigned long size = PAGE_SIZE*numpages; \
- unsigned end = start + size; \
-\
- if (start < MODULES_VADDR || start >= MODULES_END) \
- return -EINVAL;\
-\
- if (end < MODULES_VADDR || end >= MODULES_END) \
- return -EINVAL; \
-\
- apply_to_page_range(&init_mm, start, size, callback, NULL); \
- flush_tlb_kernel_range(start, end); \
- return 0;\
-}
+/*
+ * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
+ * As a result, this can only be called with preemption disabled, as under
+ * stop_machine().
+ */
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+{
+ unsigned long vaddr = __fix_to_virt(idx);
+ pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
-PTE_SET_FN(ro, pte_wrprotect)
-PTE_SET_FN(rw, pte_mkwrite)
-PTE_SET_FN(x, pte_mkexec)
-PTE_SET_FN(nx, pte_mknexec)
+ /* Make sure fixmap region does not exceed available allocation. */
+ BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
+ FIXADDR_END);
+ BUG_ON(idx >= __end_of_fixed_addresses);
-SET_MEMORY_FN(ro, pte_set_ro)
-SET_MEMORY_FN(rw, pte_set_rw)
-SET_MEMORY_FN(x, pte_set_x)
-SET_MEMORY_FN(nx, pte_set_nx)
+ if (pgprot_val(prot))
+ set_pte_at(NULL, vaddr, pte,
+ pfn_pte(phys >> PAGE_SHIFT, prot));
+ else
+ pte_clear(NULL, vaddr, pte);
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+}
/*
* Adjust the PMD section entries according to the CPU in use.
@@ -528,14 +516,23 @@ static void __init build_mem_type_table(void)
hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
+#ifndef CONFIG_ARM_LPAE
/*
* We don't use domains on ARMv6 (since this causes problems with
* v6/v7 kernels), so we must use a separate memory type for user
* r/o, kernel r/w to map the vectors page.
*/
-#ifndef CONFIG_ARM_LPAE
if (cpu_arch == CPU_ARCH_ARMv6)
vecs_pgprot |= L_PTE_MT_VECTORS;
+
+ /*
+ * Check is it with support for the PXN bit
+ * in the Short-descriptor translation table format descriptors.
+ */
+ if (cpu_arch == CPU_ARCH_ARMv7 &&
+ (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
+ user_pmd_table |= PMD_PXNTABLE;
+ }
#endif
/*
@@ -605,6 +602,11 @@ static void __init build_mem_type_table(void)
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
+
+ /*
+ * Set PXN for user mappings
+ */
+ user_pgprot |= PTE_EXT_PXN;
#endif
for (i = 0; i < 16; i++) {
@@ -786,8 +788,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
length = PAGE_ALIGN(md->length);
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
- printk(KERN_ERR "MM: CPU does not support supersection "
- "mapping for 0x%08llx at 0x%08lx\n",
+ pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
@@ -799,15 +800,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
* of the actual domain assignments in use.
*/
if (type->domain) {
- printk(KERN_ERR "MM: invalid domain in supersection "
- "mapping for 0x%08llx at 0x%08lx\n",
+ pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
- printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
- " at 0x%08lx invalid alignment\n",
+ pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
@@ -850,18 +849,16 @@ static void __init create_mapping(struct map_desc *md)
pgd_t *pgd;
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
- printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
- " at 0x%08lx in user region\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
- printk(KERN_WARNING "BUG: mapping for 0x%08llx"
- " at 0x%08lx out of vmalloc space\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
type = &mem_types[md->type];
@@ -881,9 +878,8 @@ static void __init create_mapping(struct map_desc *md)
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
- printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
- "be mapped using pages, ignoring.\n",
- (long long)__pfn_to_phys(md->pfn), addr);
+ pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
+ (long long)__pfn_to_phys(md->pfn), addr);
return;
}
@@ -1053,15 +1049,13 @@ static int __init early_vmalloc(char *arg)
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
- printk(KERN_WARNING
- "vmalloc area too small, limiting to %luMB\n",
+ pr_warn("vmalloc area too small, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
- printk(KERN_WARNING
- "vmalloc area is too big, limiting to %luMB\n",
+ pr_warn("vmalloc area is too big, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
@@ -1094,7 +1088,7 @@ void __init sanity_check_meminfo(void)
if (highmem) {
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
- &block_start, &block_end);
+ &block_start, &block_end);
memblock_remove(reg->base, reg->size);
continue;
}
@@ -1103,7 +1097,7 @@ void __init sanity_check_meminfo(void)
phys_addr_t overlap_size = reg->size - size_limit;
pr_notice("Truncating RAM at %pa-%pa to -%pa",
- &block_start, &block_end, &vmalloc_limit);
+ &block_start, &block_end, &vmalloc_limit);
memblock_remove(vmalloc_limit, overlap_size);
block_end = vmalloc_limit;
}
@@ -1326,10 +1320,10 @@ static void __init kmap_init(void)
#ifdef CONFIG_HIGHMEM
pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
-
- fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
- FIXADDR_START, _PAGE_KERNEL_TABLE);
#endif
+
+ early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
+ _PAGE_KERNEL_TABLE);
}
static void __init map_lowmem(void)
@@ -1349,13 +1343,20 @@ static void __init map_lowmem(void)
if (start >= end)
break;
- if (end < kernel_x_start || start >= kernel_x_end) {
+ if (end < kernel_x_start) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RWX;
create_mapping(&map);
+ } else if (start >= kernel_x_end) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_RW;
+
+ create_mapping(&map);
} else {
/* This better cover the entire kernel */
if (start < kernel_x_start) {
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
new file mode 100644
index 000000000000..004e35cdcfff
--- /dev/null
+++ b/arch/arm/mm/pageattr.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+struct page_change_data {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page_change_data *cdata = data;
+ pte_t pte = *ptep;
+
+ pte = clear_pte_bit(pte, cdata->clear_mask);
+ pte = set_pte_bit(pte, cdata->set_mask);
+
+ set_pte_ext(ptep, pte, 0);
+ return 0;
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+ pgprot_t set_mask, pgprot_t clear_mask)
+{
+ unsigned long start = addr;
+ unsigned long size = PAGE_SIZE*numpages;
+ unsigned long end = start + size;
+ int ret;
+ struct page_change_data data;
+
+ if (!IS_ALIGNED(addr, PAGE_SIZE)) {
+ start &= PAGE_MASK;
+ end = start + size;
+ WARN_ON_ONCE(1);
+ }
+
+ if (!is_module_address(start) || !is_module_address(end - 1))
+ return -EINVAL;
+
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, end);
+ return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(L_PTE_RDONLY),
+ __pgprot(0));
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(0),
+ __pgprot(L_PTE_RDONLY));
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(L_PTE_XN),
+ __pgprot(0));
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(0),
+ __pgprot(L_PTE_XN));
+}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 22ac2a6fbfe3..8b4ee5e81c14 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -591,9 +591,10 @@ __krait_proc_info:
/*
* Some Krait processors don't indicate support for SDIV and UDIV
* instructions in the ARM instruction set, even though they actually
- * do support them.
+ * do support them. They also don't indicate support for fused multiply
+ * instructions even though they actually do support them.
*/
- __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
.size __krait_proc_info, . - __krait_proc_info
/*
diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c
index 4e729f055a81..ec717c190e2c 100644
--- a/arch/arm/nwfpe/fpmodule.c
+++ b/arch/arm/nwfpe/fpmodule.c
@@ -86,20 +86,20 @@ extern void nwfpe_enter(void);
static int __init fpe_init(void)
{
if (sizeof(FPA11) > sizeof(union fp_state)) {
- printk(KERN_ERR "nwfpe: bad structure size\n");
+ pr_err("nwfpe: bad structure size\n");
return -EINVAL;
}
if (sizeof(FPREG) != 12) {
- printk(KERN_ERR "nwfpe: bad register size\n");
+ pr_err("nwfpe: bad register size\n");
return -EINVAL;
}
if (fpe_type[0] && strcmp(fpe_type, "nwfpe"))
return 0;
/* Display title, version and copyright information. */
- printk(KERN_WARNING "NetWinder Floating Point Emulator V0.97 ("
- NWFPE_BITS " precision)\n");
+ pr_info("NetWinder Floating Point Emulator V0.97 ("
+ NWFPE_BITS " precision)\n");
thread_register_notifier(&nwfpe_notifier_block);
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index 1f5ee17a10e8..ad9529cc4203 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -268,7 +268,6 @@ static struct platform_driver pxa_ssp_driver = {
.probe = pxa_ssp_probe,
.remove = pxa_ssp_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "pxa2xx-ssp",
.of_match_table = of_match_ptr(pxa_ssp_of_ids),
},
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 468352633101..e2be70df06c6 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -505,7 +505,6 @@ static struct platform_driver s3c_adc_driver = {
.id_table = s3c_adc_driver_ids,
.driver = {
.name = "s3c-adc",
- .owner = THIS_MODULE,
.pm = &adc_pm_ops,
},
.probe = s3c_adc_probe,
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index cda654cbf2c2..f74a8f7e5f84 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -197,6 +197,12 @@ look_for_VFP_exceptions:
tst r5, #FPSCR_IXE
bne process_exception
+ tst r5, #FPSCR_LENGTH_MASK
+ beq skip
+ orr r1, r1, #FPEXC_DEX
+ b process_exception
+skip:
+
@ Fall into hand on to next handler - appropriate coproc instr
@ not recognised by VFP
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 2f37e1d6cb45..f6e4d56eda00 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -738,63 +738,73 @@ static int __init vfp_init(void)
vfp_vector = vfp_null_entry;
pr_info("VFP support v0.3: ");
- if (VFP_arch)
+ if (VFP_arch) {
pr_cont("not present\n");
- else if (vfpsid & FPSID_NODOUBLE) {
- pr_cont("no double precision support\n");
- } else {
- hotcpu_notifier(vfp_hotplug, 0);
-
- VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
- pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
- (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
- (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
- (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
- (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
- (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
-
- vfp_vector = vfp_support_entry;
-
- thread_register_notifier(&vfp_notifier_block);
- vfp_pm_init();
-
- /*
- * We detected VFP, and the support code is
- * in place; report VFP support to userspace.
- */
- elf_hwcap |= HWCAP_VFP;
-#ifdef CONFIG_VFPv3
- if (VFP_arch >= 2) {
- elf_hwcap |= HWCAP_VFPv3;
-
- /*
- * Check for VFPv3 D16 and VFPv4 D16. CPUs in
- * this configuration only have 16 x 64bit
- * registers.
- */
- if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
- elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
- else
- elf_hwcap |= HWCAP_VFPD32;
- }
-#endif
+ return 0;
+ /* Extract the architecture on CPUID scheme */
+ } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+ VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK;
+ VFP_arch >>= FPSID_ARCH_BIT;
/*
* Check for the presence of the Advanced SIMD
* load/store instructions, integer and single
* precision floating point operations. Only check
* for NEON if the hardware has the MVFR registers.
*/
- if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
-#ifdef CONFIG_NEON
- if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
- elf_hwcap |= HWCAP_NEON;
-#endif
-#ifdef CONFIG_VFPv3
+ if (IS_ENABLED(CONFIG_NEON) &&
+ (fmrx(MVFR1) & 0x000fff00) == 0x00011100)
+ elf_hwcap |= HWCAP_NEON;
+
+ if (IS_ENABLED(CONFIG_VFPv3)) {
+ u32 mvfr0 = fmrx(MVFR0);
+ if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 ||
+ ((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) {
+ elf_hwcap |= HWCAP_VFPv3;
+ /*
+ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
+ * this configuration only have 16 x 64bit
+ * registers.
+ */
+ if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1)
+ /* also v4-D16 */
+ elf_hwcap |= HWCAP_VFPv3D16;
+ else
+ elf_hwcap |= HWCAP_VFPD32;
+ }
+
if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
elf_hwcap |= HWCAP_VFPv4;
-#endif
}
+ /* Extract the architecture version on pre-cpuid scheme */
+ } else {
+ if (vfpsid & FPSID_NODOUBLE) {
+ pr_cont("no double precision support\n");
+ return 0;
+ }
+
+ VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
}
+
+ hotcpu_notifier(vfp_hotplug, 0);
+
+ vfp_vector = vfp_support_entry;
+
+ thread_register_notifier(&vfp_notifier_block);
+ vfp_pm_init();
+
+ /*
+ * We detected VFP, and the support code is
+ * in place; report VFP support to userspace.
+ */
+ elf_hwcap |= HWCAP_VFP;
+
+ pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
+ (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
+ VFP_arch,
+ (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
+ (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
+ (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
+
return 0;
}
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index 4f96c1617aae..f0465ba0f221 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -290,7 +290,7 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand)
u32 z, a;
if ((significand & 0xc0000000) != 0x40000000) {
- printk(KERN_WARNING "VFP: estimate_sqrt: invalid significand\n");
+ pr_warn("VFP: estimate_sqrt: invalid significand\n");
}
a = significand << 1;
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 1f85bfe6b470..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
-obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
+obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 0e15f011f9c8..c7ca936ebd99 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -261,11 +261,6 @@ static int __init xen_guest_init(void)
xen_setup_features();
- if (!xen_feature(XENFEAT_grant_map_identity)) {
- pr_warn("Please upgrade your Xen.\n"
- "If your platform has any non-coherent DMA devices, they won't work properly.\n");
- }
-
if (xen_feature(XENFEAT_dom0))
xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
else
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index b0e77de99148..351b24a979d4 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -1,6 +1,10 @@
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/export.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
@@ -8,6 +12,7 @@
#include <linux/swiotlb.h>
#include <xen/xen.h>
+#include <xen/interface/grant_table.h>
#include <xen/interface/memory.h>
#include <xen/swiotlb-xen.h>
@@ -16,6 +21,114 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
+enum dma_cache_op {
+ DMA_UNMAP,
+ DMA_MAP,
+};
+static bool hypercall_cflush = false;
+
+/* functions called by SWIOTLB */
+
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
+ size_t size, enum dma_data_direction dir, enum dma_cache_op op)
+{
+ struct gnttab_cache_flush cflush;
+ unsigned long pfn;
+ size_t left = size;
+
+ pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
+ do {
+ size_t len = left;
+
+ /* buffers in highmem or foreign pages cannot cross page
+ * boundaries */
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ cflush.op = 0;
+ cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
+ cflush.offset = offset;
+ cflush.length = len;
+
+ if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
+ cflush.op = GNTTAB_CACHE_INVAL;
+ if (op == DMA_MAP) {
+ if (dir == DMA_FROM_DEVICE)
+ cflush.op = GNTTAB_CACHE_INVAL;
+ else
+ cflush.op = GNTTAB_CACHE_CLEAN;
+ }
+ if (cflush.op)
+ HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
+
+ offset = 0;
+ pfn++;
+ left -= len;
+ } while (left);
+}
+
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
+}
+
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
+}
+
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ if (is_device_dma_coherent(hwdev))
+ return;
+ if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ return;
+
+ __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
+}
+
+void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+
+{
+ if (is_device_dma_coherent(hwdev))
+ return;
+ if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ return;
+
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void __xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (is_device_dma_coherent(hwdev))
+ return;
+ __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void __xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (is_device_dma_coherent(hwdev))
+ return;
+ __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
+}
+
+bool xen_arch_need_swiotlb(struct device *dev,
+ unsigned long pfn,
+ unsigned long mfn)
+{
+ return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
+}
+
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
@@ -56,10 +169,18 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
int __init xen_mm_init(void)
{
+ struct gnttab_cache_flush cflush;
if (!xen_initial_domain())
return 0;
xen_swiotlb_init(1, false);
xen_dma_ops = &xen_swiotlb_dma_ops;
+
+ cflush.op = 0;
+ cflush.a.dev_bus_addr = 0;
+ cflush.offset = 0;
+ cflush.length = 0;
+ if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
+ hypercall_cflush = true;
return 0;
}
arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
deleted file mode 100644
index 3b99860fd7ae..000000000000
--- a/arch/arm/xen/mm32.c
+++ /dev/null
@@ -1,202 +0,0 @@
-#include <linux/cpu.h>
-#include <linux/dma-mapping.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-
-#include <xen/features.h>
-
-static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
-static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
-
-static int alloc_xen_mm32_scratch_page(int cpu)
-{
- struct page *page;
- unsigned long virt;
- pmd_t *pmdp;
- pte_t *ptep;
-
- if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
- return 0;
-
- page = alloc_page(GFP_KERNEL);
- if (page == NULL) {
- pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
- return -ENOMEM;
- }
-
- virt = (unsigned long)__va(page_to_phys(page));
- pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
- ptep = pte_offset_kernel(pmdp, virt);
-
- per_cpu(xen_mm32_scratch_virt, cpu) = virt;
- per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
-
- return 0;
-}
-
-static int xen_mm32_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
- switch (action) {
- case CPU_UP_PREPARE:
- if (alloc_xen_mm32_scratch_page(cpu))
- return NOTIFY_BAD;
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block xen_mm32_cpu_notifier = {
- .notifier_call = xen_mm32_cpu_notify,
-};
-
-static void* xen_mm32_remap_page(dma_addr_t handle)
-{
- unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
- pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
-
- *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
- local_flush_tlb_kernel_page(virt);
-
- return (void*)virt;
-}
-
-static void xen_mm32_unmap(void *vaddr)
-{
- put_cpu_var(xen_mm32_scratch_virt);
-}
-
-
-/* functions called by SWIOTLB */
-
-static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
- size_t size, enum dma_data_direction dir,
- void (*op)(const void *, size_t, int))
-{
- unsigned long pfn;
- size_t left = size;
-
- pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
- offset %= PAGE_SIZE;
-
- do {
- size_t len = left;
- void *vaddr;
-
- if (!pfn_valid(pfn))
- {
- /* Cannot map the page, we don't know its physical address.
- * Return and hope for the best */
- if (!xen_feature(XENFEAT_grant_map_identity))
- return;
- vaddr = xen_mm32_remap_page(handle) + offset;
- op(vaddr, len, dir);
- xen_mm32_unmap(vaddr - offset);
- } else {
- struct page *page = pfn_to_page(pfn);
-
- if (PageHighMem(page)) {
- if (len + offset > PAGE_SIZE)
- len = PAGE_SIZE - offset;
-
- if (cache_is_vipt_nonaliasing()) {
- vaddr = kmap_atomic(page);
- op(vaddr + offset, len, dir);
- kunmap_atomic(vaddr);
- } else {
- vaddr = kmap_high_get(page);
- if (vaddr) {
- op(vaddr + offset, len, dir);
- kunmap_high(page);
- }
- }
- } else {
- vaddr = page_address(page) + offset;
- op(vaddr, len, dir);
- }
- }
-
- offset = 0;
- pfn++;
- left -= len;
- } while (left);
-}
-
-static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- /* Cannot use __dma_page_dev_to_cpu because we don't have a
- * struct page for handle */
-
- if (dir != DMA_TO_DEVICE)
- outer_inv_range(handle, handle + size);
-
- dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
-}
-
-static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
-
- dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
-
- if (dir == DMA_FROM_DEVICE) {
- outer_inv_range(handle, handle + size);
- } else {
- outer_clean_range(handle, handle + size);
- }
-}
-
-void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-
-{
- if (!__generic_dma_ops(hwdev)->unmap_page)
- return;
- if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
- return;
-
- __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
-}
-
-void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
- return;
- __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
-}
-
-void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (!__generic_dma_ops(hwdev)->sync_single_for_device)
- return;
- __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
-}
-
-int __init xen_mm32_init(void)
-{
- int cpu;
-
- if (!xen_initial_domain())
- return 0;
-
- register_cpu_notifier(&xen_mm32_cpu_notifier);
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (alloc_xen_mm32_scratch_page(cpu)) {
- put_online_cpus();
- unregister_cpu_notifier(&xen_mm32_cpu_notifier);
- return -ENOMEM;
- }
- }
- put_online_cpus();
-
- return 0;
-}
-arch_initcall(xen_mm32_init);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6b1ebd964c10..b1f9a20a3677 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2,6 +2,7 @@ config ARM64
def_bool y
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF
@@ -13,7 +14,9 @@ config ARM64
select ARM_ARCH_TIMER
select ARM_GIC
select AUDIT_ARCH_COMPAT_GENERIC
+ select ARM_GIC_V2M if PCI_MSI
select ARM_GIC_V3
+ select ARM_GIC_V3_ITS if PCI_MSI
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index a38b02ce5f9a..2cf32e9887e1 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -49,4 +49,8 @@ config CRYPTO_AES_ARM64_NEON_BLK
select CRYPTO_AES
select CRYPTO_ABLK_HELPER
+config CRYPTO_CRC32_ARM64
+ tristate "CRC32 and CRC32C using optional ARMv8 instructions"
+ depends on ARM64
+ select CRYPTO_HASH
endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index a3f935fde975..5720608c50b1 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -34,5 +34,9 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4
CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
+obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o
+
+CFLAGS_crc32-arm64.o := -mcpu=generic+crc
+
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 0ac73b838fa3..6c348df5bf36 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -296,4 +296,4 @@ module_exit(aes_mod_exit);
MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ccm(aes)");
+MODULE_ALIAS_CRYPTO("ccm(aes)");
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 801aae32841f..b1b5b893eb20 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -44,10 +44,10 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
#define aes_xts_encrypt neon_aes_xts_encrypt
#define aes_xts_decrypt neon_aes_xts_decrypt
MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
-MODULE_ALIAS("ecb(aes)");
-MODULE_ALIAS("cbc(aes)");
-MODULE_ALIAS("ctr(aes)");
-MODULE_ALIAS("xts(aes)");
+MODULE_ALIAS_CRYPTO("ecb(aes)");
+MODULE_ALIAS_CRYPTO("cbc(aes)");
+MODULE_ALIAS_CRYPTO("ctr(aes)");
+MODULE_ALIAS_CRYPTO("xts(aes)");
#endif
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
new file mode 100644
index 000000000000..9499199924ae
--- /dev/null
+++ b/arch/arm64/crypto/crc32-arm64.c
@@ -0,0 +1,274 @@
+/*
+ * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions
+ *
+ * Module based on crypto/crc32c_generic.c
+ *
+ * CRC32 loop taken from Ed Nevill's Hadoop CRC patch
+ * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E
+ *
+ * Using inline assembly instead of intrinsics in order to be backwards
+ * compatible with older compilers.
+ *
+ * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/unaligned/access_ok.h>
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>");
+MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions");
+MODULE_LICENSE("GPL v2");
+
+#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+
+static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
+{
+ s64 length = len;
+
+ while ((length -= sizeof(u64)) >= 0) {
+ CRC32X(crc, get_unaligned_le64(p));
+ p += sizeof(u64);
+ }
+
+ /* The following is more efficient than the straight loop */
+ if (length & sizeof(u32)) {
+ CRC32W(crc, get_unaligned_le32(p));
+ p += sizeof(u32);
+ }
+ if (length & sizeof(u16)) {
+ CRC32H(crc, get_unaligned_le16(p));
+ p += sizeof(u16);
+ }
+ if (length & sizeof(u8))
+ CRC32B(crc, *p);
+
+ return crc;
+}
+
+static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
+{
+ s64 length = len;
+
+ while ((length -= sizeof(u64)) >= 0) {
+ CRC32CX(crc, get_unaligned_le64(p));
+ p += sizeof(u64);
+ }
+
+ /* The following is more efficient than the straight loop */
+ if (length & sizeof(u32)) {
+ CRC32CW(crc, get_unaligned_le32(p));
+ p += sizeof(u32);
+ }
+ if (length & sizeof(u16)) {
+ CRC32CH(crc, get_unaligned_le16(p));
+ p += sizeof(u16);
+ }
+ if (length & sizeof(u8))
+ CRC32CB(crc, *p);
+
+ return crc;
+}
+
+#define CHKSUM_BLOCK_SIZE 1
+#define CHKSUM_DIGEST_SIZE 4
+
+struct chksum_ctx {
+ u32 key;
+};
+
+struct chksum_desc_ctx {
+ u32 crc;
+};
+
+static int chksum_init(struct shash_desc *desc)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = mctx->key;
+
+ return 0;
+}
+
+/*
+ * Setting the seed allows arbitrary accumulators and flexible XOR policy
+ * If your algorithm starts with ~0, then XOR with ~0 before you set
+ * the seed.
+ */
+static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
+
+ if (keylen != sizeof(mctx->key)) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ mctx->key = get_unaligned_le32(key);
+ return 0;
+}
+
+static int chksum_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length);
+ return 0;
+}
+
+static int chksumc_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length);
+ return 0;
+}
+
+static int chksum_final(struct shash_desc *desc, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ put_unaligned_le32(~ctx->crc, out);
+ return 0;
+}
+
+static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
+{
+ put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
+ return 0;
+}
+
+static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
+{
+ put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out);
+ return 0;
+}
+
+static int chksum_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ return __chksum_finup(ctx->crc, data, len, out);
+}
+
+static int chksumc_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ return __chksumc_finup(ctx->crc, data, len, out);
+}
+
+static int chksum_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ return __chksum_finup(mctx->key, data, length, out);
+}
+
+static int chksumc_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ return __chksumc_finup(mctx->key, data, length, out);
+}
+
+static int crc32_cra_init(struct crypto_tfm *tfm)
+{
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+
+ mctx->key = ~0;
+ return 0;
+}
+
+static struct shash_alg crc32_alg = {
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .setkey = chksum_setkey,
+ .init = chksum_init,
+ .update = chksum_update,
+ .final = chksum_final,
+ .finup = chksum_finup,
+ .digest = chksum_digest,
+ .descsize = sizeof(struct chksum_desc_ctx),
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "crc32-arm64-hw",
+ .cra_priority = 300,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 0,
+ .cra_ctxsize = sizeof(struct chksum_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_cra_init,
+ }
+};
+
+static struct shash_alg crc32c_alg = {
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .setkey = chksum_setkey,
+ .init = chksum_init,
+ .update = chksumc_update,
+ .final = chksum_final,
+ .finup = chksumc_finup,
+ .digest = chksumc_digest,
+ .descsize = sizeof(struct chksum_desc_ctx),
+ .base = {
+ .cra_name = "crc32c",
+ .cra_driver_name = "crc32c-arm64-hw",
+ .cra_priority = 300,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 0,
+ .cra_ctxsize = sizeof(struct chksum_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_cra_init,
+ }
+};
+
+static int __init crc32_mod_init(void)
+{
+ int err;
+
+ err = crypto_register_shash(&crc32_alg);
+
+ if (err)
+ return err;
+
+ err = crypto_register_shash(&crc32c_alg);
+
+ if (err) {
+ crypto_unregister_shash(&crc32_alg);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit crc32_mod_exit(void)
+{
+ crypto_unregister_shash(&crc32_alg);
+ crypto_unregister_shash(&crc32c_alg);
+}
+
+module_cpu_feature_match(CRC32, crc32_mod_init);
+module_exit(crc32_mod_exit);
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index dc770bd4f5a5..55103e50c51b 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -14,7 +14,6 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += ftrace.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -28,6 +27,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
+generic-y += msi.h
generic-y += mutex.h
generic-y += pci.h
generic-y += pci-bridge.h
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 6389d60574d9..a5abb0062d6e 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -32,6 +32,9 @@
#define rmb() dsb(ld)
#define wmb() dsb(st)
+#define dma_rmb() dmb(oshld)
+#define dma_wmb() dmb(oshst)
+
#ifndef CONFIG_SMP
#define smp_mb() barrier()
#define smp_rmb() barrier()
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index cf98b362094b..243ef256b8c9 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -21,6 +21,7 @@ struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
+ bool dma_coherent;
};
struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index adeae3f6f0fc..d34189bceff7 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -54,11 +54,18 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
static inline int set_arch_dma_coherent_ops(struct device *dev)
{
+ dev->archdata.dma_coherent = true;
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return 0;
}
#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
+/* do not use this function in a driver */
+static inline bool is_device_dma_coherent(struct device *dev)
+{
+ return dev->archdata.dma_coherent;
+}
+
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5674a55b5518..8127e45e2637 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -38,6 +38,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+}
+
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2012c4ba8d67..0b7dfdb931df 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -165,8 +165,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
@@ -200,6 +198,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...);
+void force_vm_exit(const cpumask_t *mask);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 0caf7a59f6a1..14a74f136272 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -83,6 +83,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
+void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
@@ -243,9 +244,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
}
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size)
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu))
+ if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);
if (!icache_is_aliasing()) { /* PIPT */
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index dde3fc9c49f0..2052102b4e02 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1,43 +1 @@
-#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
-#define _ASM_ARM64_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-attrs.h>
-#include <linux/dma-mapping.h>
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
-{
- return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-}
-#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
+#include <../../arm/include/asm/xen/page-coherent.h>
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 663da771580a..f1dbca7d5c96 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -511,7 +511,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
static int psci_suspend_finisher(unsigned long index)
{
- struct psci_power_state *state = __get_cpu_var(psci_power_state);
+ struct psci_power_state *state = __this_cpu_read(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
@@ -520,7 +520,7 @@ static int psci_suspend_finisher(unsigned long index)
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
{
int ret;
- struct psci_power_state *state = __get_cpu_var(psci_power_state);
+ struct psci_power_state *state = __this_cpu_read(psci_power_state);
/*
* idle state index 0 corresponds to wfi, should never be called
* from the cpu_suspend operations
@@ -540,6 +540,8 @@ const struct cpu_operations cpu_psci_ops = {
.name = "psci",
#ifdef CONFIG_CPU_IDLE
.cpu_init_idle = cpu_psci_cpu_init_idle,
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
.cpu_suspend = cpu_psci_cpu_suspend,
#endif
#ifdef CONFIG_SMP
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 76794692c20b..9535bd555d1d 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
return 0;
}
@@ -297,31 +296,6 @@ int __attribute_const__ kvm_target_cpu(void)
return -EINVAL;
}
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
-{
- unsigned int i;
- int phys_target = kvm_target_cpu();
-
- if (init->target != phys_target)
- return -EINVAL;
-
- vcpu->arch.target = phys_target;
- bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
- /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
- for (i = 0; i < sizeof(init->features) * 8; i++) {
- if (init->features[i / 32] & (1 << (i % 32))) {
- if (i >= KVM_VCPU_MAX_FEATURES)
- return -ENOENT;
- set_bit(i, vcpu->arch.features);
- }
- }
-
- /* Now we know what it is, we can reset it. */
- return kvm_reset_vcpu(vcpu);
-}
-
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
int target = kvm_target_cpu();
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index bf69601be546..cf33f33333cc 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -182,9 +182,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
- if (addr < LOWEST_ADDR)
- return;
-
if (!st->level) {
st->level = level;
st->current_prot = prot;
@@ -272,7 +269,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
{
- pgd_t *pgd = pgd_offset(mm, 0);
+ pgd_t *pgd = pgd_offset(mm, 0UL);
unsigned i;
unsigned long addr;
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 2a71b1cb9848..528d70d47a54 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -7,7 +7,6 @@ generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += futex.h
-generic-y += hash.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += local.h
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 6e6cd159924b..2b65ed6b277c 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -80,4 +80,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _UAPI__ASM_AVR32_SOCKET_H */
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 46ed6bb9c679..4bd3c3cfc9ab 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -10,7 +10,6 @@ generic-y += emergency-restart.h
generic-y += errno.h
generic-y += fb.h
generic-y += futex.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
index 420006877998..dfb66fe88b34 100644
--- a/arch/blackfin/include/asm/barrier.h
+++ b/arch/blackfin/include/asm/barrier.h
@@ -22,6 +22,57 @@
# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define rmb() do { barrier(); smp_check_barrier(); } while (0)
# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
+/*
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like this where there are no data dependencies.
+ */
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#endif
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 2d90d62edc97..d00d732784b1 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -9,8 +9,11 @@
#ifndef __BFIN_ASM_SERIAL_H__
#define __BFIN_ASM_SERIAL_H__
+#include <linux/circ_buf.h>
#include <linux/serial_core.h>
#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#include <mach/anomaly.h>
#include <mach/bfin_serial.h>
@@ -25,10 +28,6 @@
# endif
#endif
-struct circ_buf;
-struct timer_list;
-struct work_struct;
-
struct bfin_serial_port {
struct uart_port port;
unsigned int old_status;
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index e77e0c1dbe75..2de73391b81e 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -15,7 +15,6 @@ generic-y += exec.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += futex.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += io.h
generic-y += ioctl.h
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 2ca489eaadd3..d5f124832fd1 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -7,7 +7,6 @@ generic-y += barrier.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += linkage.h
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
index ed94e5ed0a23..e2503d9f1869 100644
--- a/arch/cris/include/uapi/asm/socket.h
+++ b/arch/cris/include/uapi/asm/socket.h
@@ -82,6 +82,11 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 3caf05cabfc5..e3f81b53578e 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -2,7 +2,6 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index ca2c6e6f31c6..4823ad125578 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -80,5 +80,10 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 5f234a5a2320..c7a99f860b40 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += fb.h
generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ce9531ed44db..074e52bf815c 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -20,7 +20,6 @@ config IA64
select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
select HAVE_FUNCTION_TRACER
select HAVE_DMA_ATTRS
- select HAVE_KVM
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
@@ -640,8 +639,6 @@ source "security/Kconfig"
source "crypto/Kconfig"
-source "arch/ia64/kvm/Kconfig"
-
source "lib/Kconfig"
config IOMMU_HELPER
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 5441b14994fc..970d0bd99621 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -53,7 +53,6 @@ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
-core-$(CONFIG_KVM) += arch/ia64/kvm/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 747320be9d0e..9b41b4bcc073 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,7 +1,6 @@
generic-y += clkdev.h
generic-y += exec.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index a48957c7b445..f6769eb2bbf9 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -35,26 +35,25 @@
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
-#define mb() ia64_mf()
-#define rmb() mb()
-#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
+#define mb() ia64_mf()
+#define rmb() mb()
+#define wmb() mb()
+
+#define dma_rmb() mb()
+#define dma_wmb() mb()
#ifdef CONFIG_SMP
# define smp_mb() mb()
-# define smp_rmb() rmb()
-# define smp_wmb() wmb()
-# define smp_read_barrier_depends() read_barrier_depends()
-
#else
-
# define smp_mb() barrier()
-# define smp_rmb() barrier()
-# define smp_wmb() barrier()
-# define smp_read_barrier_depends() do { } while(0)
-
#endif
+#define smp_rmb() smp_mb()
+#define smp_wmb() smp_mb()
+
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
deleted file mode 100644
index 4729752b7256..000000000000
--- a/arch/ia64/include/asm/kvm_host.h
+++ /dev/null
@@ -1,609 +0,0 @@
-/*
- * kvm_host.h: used for kvm module, and hold ia64-specific sections.
- *
- * Copyright (C) 2007, Intel Corporation.
- *
- * Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#ifndef __ASM_KVM_HOST_H
-#define __ASM_KVM_HOST_H
-
-#define KVM_USER_MEM_SLOTS 32
-
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
-
-/* define exit reasons from vmm to kvm*/
-#define EXIT_REASON_VM_PANIC 0
-#define EXIT_REASON_MMIO_INSTRUCTION 1
-#define EXIT_REASON_PAL_CALL 2
-#define EXIT_REASON_SAL_CALL 3
-#define EXIT_REASON_SWITCH_RR6 4
-#define EXIT_REASON_VM_DESTROY 5
-#define EXIT_REASON_EXTERNAL_INTERRUPT 6
-#define EXIT_REASON_IPI 7
-#define EXIT_REASON_PTC_G 8
-#define EXIT_REASON_DEBUG 20
-
-/*Define vmm address space and vm data space.*/
-#define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
-#define KVM_VMM_SHIFT 24
-#define KVM_VMM_BASE 0xD000000000000000
-#define VMM_SIZE (__IA64_UL_CONST(8)<<20)
-
-/*
- * Define vm_buffer, used by PAL Services, base address.
- * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
- */
-#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
-#define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
-
-/*
- * kvm guest's data area looks as follow:
- *
- * +----------------------+ ------- KVM_VM_DATA_SIZE
- * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET
- * | | | / |
- * | .......... | | /vcpu's struct&stack |
- * | .......... | | /---------------------|---- 0
- * | vcpu[5]'s data | | / vpd |
- * | vcpu[4]'s data | |/-----------------------|
- * | vcpu[3]'s data | / vtlb |
- * | vcpu[2]'s data | /|------------------------|
- * | vcpu[1]'s data |/ | vhpt |
- * | vcpu[0]'s data |____________________________|
- * +----------------------+ |
- * | memory dirty log | |
- * +----------------------+ |
- * | vm's data struct | |
- * +----------------------+ |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | | |
- * | vm's p2m table | |
- * | | |
- * | | |
- * | | | |
- * vm's data->| | | |
- * +----------------------+ ------- 0
- * To support large memory, needs to increase the size of p2m.
- * To support more vcpus, needs to ensure it has enough space to
- * hold vcpus' data.
- */
-
-#define KVM_VM_DATA_SHIFT 26
-#define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
-#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE)
-
-#define KVM_P2M_BASE KVM_VM_DATA_BASE
-#define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20)
-
-#define VHPT_SHIFT 16
-#define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT)
-#define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
-
-#define VTLB_SHIFT 16
-#define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT)
-#define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5))
-
-#define VPD_SHIFT 16
-#define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT)
-
-#define VCPU_STRUCT_SHIFT 16
-#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
-
-/*
- * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
- */
-#define KVM_STK_SHIFT 16
-#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
-
-#define KVM_VM_STRUCT_SHIFT 19
-#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
-
-#define KVM_MEM_DIRY_LOG_SHIFT 19
-#define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
-
-#ifndef __ASSEMBLY__
-
-/*Define the max vcpus and memory for Guests.*/
-#define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
- KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
-#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
-
-#define VMM_LOG_LEN 256
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/kvm.h>
-#include <linux/kvm_para.h>
-#include <linux/kvm_types.h>
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/page.h>
-
-struct kvm_vcpu_data {
- char vcpu_vhpt[VHPT_SIZE];
- char vcpu_vtlb[VTLB_SIZE];
- char vcpu_vpd[VPD_SIZE];
- char vcpu_struct[VCPU_STRUCT_SIZE];
-};
-
-struct kvm_vm_data {
- char kvm_p2m[KVM_P2M_SIZE];
- char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
- char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
- struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
-};
-
-#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
- offsetof(struct kvm_vm_data, vcpu_data[n]))
-#define KVM_VM_BASE (KVM_VM_DATA_BASE + \
- offsetof(struct kvm_vm_data, kvm_vm_struct))
-#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
- offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
-
-#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
-#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
-#define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
-#define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \
- offsetof(struct kvm_vcpu_data, vcpu_struct))
-
-/*IO section definitions*/
-#define IOREQ_READ 1
-#define IOREQ_WRITE 0
-
-#define STATE_IOREQ_NONE 0
-#define STATE_IOREQ_READY 1
-#define STATE_IOREQ_INPROCESS 2
-#define STATE_IORESP_READY 3
-
-/*Guest Physical address layout.*/
-#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
-#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
-#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
-#define GPFN_PIB (3UL << 60) /* PIB base */
-#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
-#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
-#define GPFN_GFW (6UL << 60) /* Guest Firmware */
-#define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */
-
-#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
-#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
-#define INVALID_MFN (~0UL)
-#define MEM_G (1UL << 30)
-#define MEM_M (1UL << 20)
-#define MMIO_START (3 * MEM_G)
-#define MMIO_SIZE (512 * MEM_M)
-#define VGA_IO_START 0xA0000UL
-#define VGA_IO_SIZE 0x20000
-#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
-#define LEGACY_IO_SIZE (64 * MEM_M)
-#define IO_SAPIC_START 0xfec00000UL
-#define IO_SAPIC_SIZE 0x100000
-#define PIB_START 0xfee00000UL
-#define PIB_SIZE 0x200000
-#define GFW_START (4 * MEM_G - 16 * MEM_M)
-#define GFW_SIZE (16 * MEM_M)
-
-/*Deliver mode, defined for ioapic.c*/
-#define dest_Fixed IOSAPIC_FIXED
-#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
-
-#define NMI_VECTOR 2
-#define ExtINT_VECTOR 0
-#define NULL_VECTOR (-1)
-#define IA64_SPURIOUS_INT_VECTOR 0x0f
-
-#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
-
-/*
- *Delivery mode
- */
-#define SAPIC_DELIV_SHIFT 8
-#define SAPIC_FIXED 0x0
-#define SAPIC_LOWEST_PRIORITY 0x1
-#define SAPIC_PMI 0x2
-#define SAPIC_NMI 0x4
-#define SAPIC_INIT 0x5
-#define SAPIC_EXTINT 0x7
-
-/*
- * vcpu->requests bit members for arch
- */
-#define KVM_REQ_PTC_G 32
-#define KVM_REQ_RESUME 33
-
-struct kvm_mmio_req {
- uint64_t addr; /* physical address */
- uint64_t size; /* size in bytes */
- uint64_t data; /* data (or paddr of data) */
- uint8_t state:4;
- uint8_t dir:1; /* 1=read, 0=write */
-};
-
-/*Pal data struct */
-struct kvm_pal_call{
- /*In area*/
- uint64_t gr28;
- uint64_t gr29;
- uint64_t gr30;
- uint64_t gr31;
- /*Out area*/
- struct ia64_pal_retval ret;
-};
-
-/* Sal data structure */
-struct kvm_sal_call{
- /*In area*/
- uint64_t in0;
- uint64_t in1;
- uint64_t in2;
- uint64_t in3;
- uint64_t in4;
- uint64_t in5;
- uint64_t in6;
- uint64_t in7;
- struct sal_ret_values ret;
-};
-
-/*Guest change rr6*/
-struct kvm_switch_rr6 {
- uint64_t old_rr;
- uint64_t new_rr;
-};
-
-union ia64_ipi_a{
- unsigned long val;
- struct {
- unsigned long rv : 3;
- unsigned long ir : 1;
- unsigned long eid : 8;
- unsigned long id : 8;
- unsigned long ib_base : 44;
- };
-};
-
-union ia64_ipi_d {
- unsigned long val;
- struct {
- unsigned long vector : 8;
- unsigned long dm : 3;
- unsigned long ig : 53;
- };
-};
-
-/*ipi check exit data*/
-struct kvm_ipi_data{
- union ia64_ipi_a addr;
- union ia64_ipi_d data;
-};
-
-/*global purge data*/
-struct kvm_ptc_g {
- unsigned long vaddr;
- unsigned long rr;
- unsigned long ps;
- struct kvm_vcpu *vcpu;
-};
-
-/*Exit control data */
-struct exit_ctl_data{
- uint32_t exit_reason;
- uint32_t vm_status;
- union {
- struct kvm_mmio_req ioreq;
- struct kvm_pal_call pal_data;
- struct kvm_sal_call sal_data;
- struct kvm_switch_rr6 rr_data;
- struct kvm_ipi_data ipi_data;
- struct kvm_ptc_g ptc_g_data;
- } u;
-};
-
-union pte_flags {
- unsigned long val;
- struct {
- unsigned long p : 1; /*0 */
- unsigned long : 1; /* 1 */
- unsigned long ma : 3; /* 2-4 */
- unsigned long a : 1; /* 5 */
- unsigned long d : 1; /* 6 */
- unsigned long pl : 2; /* 7-8 */
- unsigned long ar : 3; /* 9-11 */
- unsigned long ppn : 38; /* 12-49 */
- unsigned long : 2; /* 50-51 */
- unsigned long ed : 1; /* 52 */
- };
-};
-
-union ia64_pta {
- unsigned long val;
- struct {
- unsigned long ve : 1;
- unsigned long reserved0 : 1;
- unsigned long size : 6;
- unsigned long vf : 1;
- unsigned long reserved1 : 6;
- unsigned long base : 49;
- };
-};
-
-struct thash_cb {
- /* THASH base information */
- struct thash_data *hash; /* hash table pointer */
- union ia64_pta pta;
- int num;
-};
-
-struct kvm_vcpu_stat {
- u32 halt_wakeup;
-};
-
-struct kvm_vcpu_arch {
- int launched;
- int last_exit;
- int last_run_cpu;
- int vmm_tr_slot;
- int vm_tr_slot;
- int sn_rtc_tr_slot;
-
-#define KVM_MP_STATE_RUNNABLE 0
-#define KVM_MP_STATE_UNINITIALIZED 1
-#define KVM_MP_STATE_INIT_RECEIVED 2
-#define KVM_MP_STATE_HALTED 3
- int mp_state;
-
-#define MAX_PTC_G_NUM 3
- int ptc_g_count;
- struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
-
- /*halt timer to wake up sleepy vcpus*/
- struct hrtimer hlt_timer;
- long ht_active;
-
- struct kvm_lapic *apic; /* kernel irqchip context */
- struct vpd *vpd;
-
- /* Exit data for vmm_transition*/
- struct exit_ctl_data exit_data;
-
- cpumask_t cache_coherent_map;
-
- unsigned long vmm_rr;
- unsigned long host_rr6;
- unsigned long psbits[8];
- unsigned long cr_iipa;
- unsigned long cr_isr;
- unsigned long vsa_base;
- unsigned long dirty_log_lock_pa;
- unsigned long __gp;
- /* TR and TC. */
- struct thash_data itrs[NITRS];
- struct thash_data dtrs[NDTRS];
- /* Bit is set if there is a tr/tc for the region. */
- unsigned char itr_regions;
- unsigned char dtr_regions;
- unsigned char tc_regions;
- /* purge all */
- unsigned long ptce_base;
- unsigned long ptce_count[2];
- unsigned long ptce_stride[2];
- /* itc/itm */
- unsigned long last_itc;
- long itc_offset;
- unsigned long itc_check;
- unsigned long timer_check;
- unsigned int timer_pending;
- unsigned int timer_fired;
-
- unsigned long vrr[8];
- unsigned long ibr[8];
- unsigned long dbr[8];
- unsigned long insvc[4]; /* Interrupt in service. */
- unsigned long xtp;
-
- unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
- unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
- unsigned long metaphysical_saved_rr0; /* from kvm_arch */
- unsigned long metaphysical_saved_rr4; /* from kvm_arch */
- unsigned long fp_psr; /*used for lazy float register */
- unsigned long saved_gp;
- /*for phycial emulation */
- int mode_flags;
- struct thash_cb vtlb;
- struct thash_cb vhpt;
- char irq_check;
- char irq_new_pending;
-
- unsigned long opcode;
- unsigned long cause;
- char log_buf[VMM_LOG_LEN];
- union context host;
- union context guest;
-
- char mmio_data[8];
-};
-
-struct kvm_vm_stat {
- u64 remote_tlb_flush;
-};
-
-struct kvm_sal_data {
- unsigned long boot_ip;
- unsigned long boot_gp;
-};
-
-struct kvm_arch_memory_slot {
-};
-
-struct kvm_arch {
- spinlock_t dirty_log_lock;
-
- unsigned long vm_base;
- unsigned long metaphysical_rr0;
- unsigned long metaphysical_rr4;
- unsigned long vmm_init_rr;
-
- int is_sn2;
-
- struct kvm_ioapic *vioapic;
- struct kvm_vm_stat stat;
- struct kvm_sal_data rdv_sal_data;
-
- struct list_head assigned_dev_head;
- struct iommu_domain *iommu_domain;
- bool iommu_noncoherent;
-
- unsigned long irq_sources_bitmap;
- unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
-};
-
-union cpuid3_t {
- u64 value;
- struct {
- u64 number : 8;
- u64 revision : 8;
- u64 model : 8;
- u64 family : 8;
- u64 archrev : 8;
- u64 rv : 24;
- };
-};
-
-struct kvm_pt_regs {
- /* The following registers are saved by SAVE_MIN: */
- unsigned long b6; /* scratch */
- unsigned long b7; /* scratch */
-
- unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
- unsigned long ar_ssd; /* reserved for future use (scratch) */
-
- unsigned long r8; /* scratch (return value register 0) */
- unsigned long r9; /* scratch (return value register 1) */
- unsigned long r10; /* scratch (return value register 2) */
- unsigned long r11; /* scratch (return value register 3) */
-
- unsigned long cr_ipsr; /* interrupted task's psr */
- unsigned long cr_iip; /* interrupted task's instruction pointer */
- unsigned long cr_ifs; /* interrupted task's function state */
-
- unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
- unsigned long ar_pfs; /* prev function state */
- unsigned long ar_rsc; /* RSE configuration */
- /* The following two are valid only if cr_ipsr.cpl > 0: */
- unsigned long ar_rnat; /* RSE NaT */
- unsigned long ar_bspstore; /* RSE bspstore */
-
- unsigned long pr; /* 64 predicate registers (1 bit each) */
- unsigned long b0; /* return pointer (bp) */
- unsigned long loadrs; /* size of dirty partition << 16 */
-
- unsigned long r1; /* the gp pointer */
- unsigned long r12; /* interrupted task's memory stack pointer */
- unsigned long r13; /* thread pointer */
-
- unsigned long ar_fpsr; /* floating point status (preserved) */
- unsigned long r15; /* scratch */
-
- /* The remaining registers are NOT saved for system calls. */
- unsigned long r14; /* scratch */
- unsigned long r2; /* scratch */
- unsigned long r3; /* scratch */
- unsigned long r16; /* scratch */
- unsigned long r17; /* scratch */
- unsigned long r18; /* scratch */
- unsigned long r19; /* scratch */
- unsigned long r20; /* scratch */
- unsigned long r21; /* scratch */
- unsigned long r22; /* scratch */
- unsigned long r23; /* scratch */
- unsigned long r24; /* scratch */
- unsigned long r25; /* scratch */
- unsigned long r26; /* scratch */
- unsigned long r27; /* scratch */
- unsigned long r28; /* scratch */
- unsigned long r29; /* scratch */
- unsigned long r30; /* scratch */
- unsigned long r31; /* scratch */
- unsigned long ar_ccv; /* compare/exchange value (scratch) */
-
- /*
- * Floating point registers that the kernel considers scratch:
- */
- struct ia64_fpreg f6; /* scratch */
- struct ia64_fpreg f7; /* scratch */
- struct ia64_fpreg f8; /* scratch */
- struct ia64_fpreg f9; /* scratch */
- struct ia64_fpreg f10; /* scratch */
- struct ia64_fpreg f11; /* scratch */
-
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
- unsigned long eml_unat; /* used for emulating instruction */
- unsigned long pad0; /* alignment pad */
-};
-
-static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
-{
- return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
-}
-
-typedef int kvm_vmm_entry(void);
-typedef void kvm_tramp_entry(union context *host, union context *guest);
-
-struct kvm_vmm_info{
- struct module *module;
- kvm_vmm_entry *vmm_entry;
- kvm_tramp_entry *tramp_entry;
- unsigned long vmm_ivt;
- unsigned long patch_mov_ar;
- unsigned long patch_mov_ar_sn2;
-};
-
-int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
-int kvm_emulate_halt(struct kvm_vcpu *vcpu);
-int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
-void kvm_sal_emul(struct kvm_vcpu *vcpu);
-
-#define __KVM_HAVE_ARCH_VM_ALLOC 1
-struct kvm *kvm_arch_alloc_vm(void);
-void kvm_arch_free_vm(struct kvm *kvm);
-
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_free_memslot(struct kvm *kvm,
- struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
-static inline void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- enum kvm_mr_change change) {}
-static inline void kvm_arch_hardware_unsetup(void) {}
-
-#endif /* __ASSEMBLY__*/
-
-#endif
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
deleted file mode 100644
index 42b233bedeb5..000000000000
--- a/arch/ia64/include/asm/pvclock-abi.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * same structure to x86's
- * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
- * For now, define same duplicated definitions.
- */
-
-#ifndef _ASM_IA64__PVCLOCK_ABI_H
-#define _ASM_IA64__PVCLOCK_ABI_H
-#ifndef __ASSEMBLY__
-
-/*
- * These structs MUST NOT be changed.
- * They are the ABI between hypervisor and guest OS.
- * KVM is using this.
- *
- * pvclock_vcpu_time_info holds the system time and the tsc timestamp
- * of the last update. So the guest can use the tsc delta to get a
- * more precise system time. There is one per virtual cpu.
- *
- * pvclock_wall_clock references the point in time when the system
- * time was zero (usually boot time), thus the guest calculates the
- * current wall clock by adding the system time.
- *
- * Protocol for the "version" fields is: hypervisor raises it (making
- * it uneven) before it starts updating the fields and raises it again
- * (making it even) when it is done. Thus the guest can make sure the
- * time values it got are consistent by checking the version before
- * and after reading them.
- */
-
-struct pvclock_vcpu_time_info {
- u32 version;
- u32 pad0;
- u64 tsc_timestamp;
- u64 system_time;
- u32 tsc_to_system_mul;
- s8 tsc_shift;
- u8 pad[3];
-} __attribute__((__packed__)); /* 32 bytes */
-
-struct pvclock_wall_clock {
- u32 version;
- u32 sec;
- u32 nsec;
-} __attribute__((__packed__));
-
-#endif /* __ASSEMBLY__ */
-#endif /* _ASM_IA64__PVCLOCK_ABI_H */
diff --git a/arch/ia64/include/uapi/asm/kvm.h b/arch/ia64/include/uapi/asm/kvm.h
deleted file mode 100644
index 99503c284400..000000000000
--- a/arch/ia64/include/uapi/asm/kvm.h
+++ /dev/null
@@ -1,268 +0,0 @@
-#ifndef __ASM_IA64_KVM_H
-#define __ASM_IA64_KVM_H
-
-/*
- * kvm structure definitions for ia64
- *
- * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/* Select x86 specific features in <linux/kvm.h> */
-#define __KVM_HAVE_IOAPIC
-#define __KVM_HAVE_IRQ_LINE
-
-/* Architectural interrupt line count. */
-#define KVM_NR_INTERRUPTS 256
-
-#define KVM_IOAPIC_NUM_PINS 48
-
-struct kvm_ioapic_state {
- __u64 base_address;
- __u32 ioregsel;
- __u32 id;
- __u32 irr;
- __u32 pad;
- union {
- __u64 bits;
- struct {
- __u8 vector;
- __u8 delivery_mode:3;
- __u8 dest_mode:1;
- __u8 delivery_status:1;
- __u8 polarity:1;
- __u8 remote_irr:1;
- __u8 trig_mode:1;
- __u8 mask:1;
- __u8 reserve:7;
- __u8 reserved[4];
- __u8 dest_id;
- } fields;
- } redirtbl[KVM_IOAPIC_NUM_PINS];
-};
-
-#define KVM_IRQCHIP_PIC_MASTER 0
-#define KVM_IRQCHIP_PIC_SLAVE 1
-#define KVM_IRQCHIP_IOAPIC 2
-#define KVM_NR_IRQCHIPS 3
-
-#define KVM_CONTEXT_SIZE 8*1024
-
-struct kvm_fpreg {
- union {
- unsigned long bits[2];
- long double __dummy; /* force 16-byte alignment */
- } u;
-};
-
-union context {
- /* 8K size */
- char dummy[KVM_CONTEXT_SIZE];
- struct {
- unsigned long psr;
- unsigned long pr;
- unsigned long caller_unat;
- unsigned long pad;
- unsigned long gr[32];
- unsigned long ar[128];
- unsigned long br[8];
- unsigned long cr[128];
- unsigned long rr[8];
- unsigned long ibr[8];
- unsigned long dbr[8];
- unsigned long pkr[8];
- struct kvm_fpreg fr[128];
- };
-};
-
-struct thash_data {
- union {
- struct {
- unsigned long p : 1; /* 0 */
- unsigned long rv1 : 1; /* 1 */
- unsigned long ma : 3; /* 2-4 */
- unsigned long a : 1; /* 5 */
- unsigned long d : 1; /* 6 */
- unsigned long pl : 2; /* 7-8 */
- unsigned long ar : 3; /* 9-11 */
- unsigned long ppn : 38; /* 12-49 */
- unsigned long rv2 : 2; /* 50-51 */
- unsigned long ed : 1; /* 52 */
- unsigned long ig1 : 11; /* 53-63 */
- };
- struct {
- unsigned long __rv1 : 53; /* 0-52 */
- unsigned long contiguous : 1; /*53 */
- unsigned long tc : 1; /* 54 TR or TC */
- unsigned long cl : 1;
- /* 55 I side or D side cache line */
- unsigned long len : 4; /* 56-59 */
- unsigned long io : 1; /* 60 entry is for io or not */
- unsigned long nomap : 1;
- /* 61 entry cann't be inserted into machine TLB.*/
- unsigned long checked : 1;
- /* 62 for VTLB/VHPT sanity check */
- unsigned long invalid : 1;
- /* 63 invalid entry */
- };
- unsigned long page_flags;
- }; /* same for VHPT and TLB */
-
- union {
- struct {
- unsigned long rv3 : 2;
- unsigned long ps : 6;
- unsigned long key : 24;
- unsigned long rv4 : 32;
- };
- unsigned long itir;
- };
- union {
- struct {
- unsigned long ig2 : 12;
- unsigned long vpn : 49;
- unsigned long vrn : 3;
- };
- unsigned long ifa;
- unsigned long vadr;
- struct {
- unsigned long tag : 63;
- unsigned long ti : 1;
- };
- unsigned long etag;
- };
- union {
- struct thash_data *next;
- unsigned long rid;
- unsigned long gpaddr;
- };
-};
-
-#define NITRS 8
-#define NDTRS 8
-
-struct saved_vpd {
- unsigned long vhpi;
- unsigned long vgr[16];
- unsigned long vbgr[16];
- unsigned long vnat;
- unsigned long vbnat;
- unsigned long vcpuid[5];
- unsigned long vpsr;
- unsigned long vpr;
- union {
- unsigned long vcr[128];
- struct {
- unsigned long dcr;
- unsigned long itm;
- unsigned long iva;
- unsigned long rsv1[5];
- unsigned long pta;
- unsigned long rsv2[7];
- unsigned long ipsr;
- unsigned long isr;
- unsigned long rsv3;
- unsigned long iip;
- unsigned long ifa;
- unsigned long itir;
- unsigned long iipa;
- unsigned long ifs;
- unsigned long iim;
- unsigned long iha;
- unsigned long rsv4[38];
- unsigned long lid;
- unsigned long ivr;
- unsigned long tpr;
- unsigned long eoi;
- unsigned long irr[4];
- unsigned long itv;
- unsigned long pmv;
- unsigned long cmcv;
- unsigned long rsv5[5];
- unsigned long lrr0;
- unsigned long lrr1;
- unsigned long rsv6[46];
- };
- };
-};
-
-struct kvm_regs {
- struct saved_vpd vpd;
- /*Arch-regs*/
- int mp_state;
- unsigned long vmm_rr;
- /* TR and TC. */
- struct thash_data itrs[NITRS];
- struct thash_data dtrs[NDTRS];
- /* Bit is set if there is a tr/tc for the region. */
- unsigned char itr_regions;
- unsigned char dtr_regions;
- unsigned char tc_regions;
-
- char irq_check;
- unsigned long saved_itc;
- unsigned long itc_check;
- unsigned long timer_check;
- unsigned long timer_pending;
- unsigned long last_itc;
-
- unsigned long vrr[8];
- unsigned long ibr[8];
- unsigned long dbr[8];
- unsigned long insvc[4]; /* Interrupt in service. */
- unsigned long xtp;
-
- unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
- unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
- unsigned long metaphysical_saved_rr0; /* from kvm_arch */
- unsigned long metaphysical_saved_rr4; /* from kvm_arch */
- unsigned long fp_psr; /*used for lazy float register */
- unsigned long saved_gp;
- /*for phycial emulation */
-
- union context saved_guest;
-
- unsigned long reserved[64]; /* for future use */
-};
-
-struct kvm_sregs {
-};
-
-struct kvm_fpu {
-};
-
-#define KVM_IA64_VCPU_STACK_SHIFT 16
-#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
-
-struct kvm_ia64_vcpu_stack {
- unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
-};
-
-struct kvm_debug_exit_arch {
-};
-
-/* for KVM_SET_GUEST_DEBUG */
-struct kvm_guest_debug_arch {
-};
-
-/* definition of registers in kvm_run */
-struct kvm_sync_regs {
-};
-
-#endif
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index a1b49bac7951..59be3d87f86d 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -89,4 +89,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index dc063fe6646a..5f4243f0acfa 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2145,22 +2145,12 @@ doit:
return 0;
}
-static int
-pfm_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- DPRINT(("pfm_no_open called\n"));
- return -ENXIO;
-}
-
-
-
static const struct file_operations pfm_file_ops = {
.llseek = no_llseek,
.read = pfm_read,
.write = pfm_write,
.poll = pfm_poll,
.unlocked_ioctl = pfm_ioctl,
- .open = pfm_no_open, /* special open code to disallow open via /proc */
.fasync = pfm_fasync,
.release = pfm_close,
.flush = pfm_flush
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
deleted file mode 100644
index 3d50ea955c4c..000000000000
--- a/arch/ia64/kvm/Kconfig
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# KVM configuration
-#
-
-source "virt/kvm/Kconfig"
-
-menuconfig VIRTUALIZATION
- bool "Virtualization"
- depends on HAVE_KVM || IA64
- default y
- ---help---
- Say Y here to get to see options for using your Linux host to run other
- operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if VIRTUALIZATION
-
-config KVM
- tristate "Kernel-based Virtual Machine (KVM) support"
- depends on BROKEN
- depends on HAVE_KVM && MODULES
- depends on BROKEN
- select PREEMPT_NOTIFIERS
- select ANON_INODES
- select HAVE_KVM_IRQCHIP
- select HAVE_KVM_IRQFD
- select HAVE_KVM_IRQ_ROUTING
- select KVM_APIC_ARCHITECTURE
- select KVM_MMIO
- ---help---
- Support hosting fully virtualized guest machines using hardware
- virtualization extensions. You will need a fairly recent
- processor equipped with virtualization extensions. You will also
- need to select one or more of the processor modules below.
-
- This module provides access to the hardware capabilities through
- a character device node named /dev/kvm.
-
- To compile this as a module, choose M here: the module
- will be called kvm.
-
- If unsure, say N.
-
-config KVM_INTEL
- tristate "KVM for Intel Itanium 2 processors support"
- depends on KVM && m
- ---help---
- Provides support for KVM on Itanium 2 processors equipped with the VT
- extensions.
-
-config KVM_DEVICE_ASSIGNMENT
- bool "KVM legacy PCI device assignment support"
- depends on KVM && PCI && IOMMU_API
- default y
- ---help---
- Provide support for legacy PCI device assignment through KVM. The
- kernel now also supports a full featured userspace device driver
- framework through VFIO, which supersedes much of this support.
-
- If unsure, say Y.
-
-source drivers/vhost/Kconfig
-
-endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
deleted file mode 100644
index 18e45ec49bbf..000000000000
--- a/arch/ia64/kvm/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-#This Make file is to generate asm-offsets.h and build source.
-#
-
-#Generate asm-offsets.h for vmm module build
-offsets-file := asm-offsets.h
-
-always := $(offsets-file)
-targets := $(offsets-file)
-targets += arch/ia64/kvm/asm-offsets.s
-
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
- "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
-endef
-
-quiet_cmd_offsets = GEN $@
-define cmd_offsets
- (set -e; \
- echo "#ifndef __ASM_KVM_OFFSETS_H__"; \
- echo "#define __ASM_KVM_OFFSETS_H__"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Makefile"; \
- echo " *"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y) $<; \
- echo ""; \
- echo "#endif" ) > $@
-endef
-
-# We use internal rules to avoid the "is up to date" message from make
-arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \
- $(wildcard $(srctree)/arch/ia64/include/asm/*.h)\
- $(wildcard $(srctree)/include/linux/*.h)
- $(call if_changed_dep,cc_s_c)
-
-$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
- $(call cmd,offsets)
-
-FORCE : $(obj)/$(offsets-file)
-
-#
-# Makefile for Kernel-based Virtual Machine module
-#
-
-ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
-asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
-KVM := ../../../virt/kvm
-
-common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \
- $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o
-
-ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y)
-common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o
-endif
-
-kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
-obj-$(CONFIG_KVM) += kvm.o
-
-CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
-kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
- vtlb.o process.o kvm_lib.o
-#Add link memcpy and memset to avoid possible structure assignment error
-kvm-intel-objs += memcpy.o memset.o
-obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
deleted file mode 100644
index 9324c875caf5..000000000000
--- a/arch/ia64/kvm/asm-offsets.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * asm-offsets.c Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- *
- * Anthony Xu <anthony.xu@intel.com>
- * Xiantao Zhang <xiantao.zhang@intel.com>
- * Copyright (c) 2007 Intel Corporation KVM support.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/kvm_host.h>
-#include <linux/kbuild.h>
-
-#include "vcpu.h"
-
-void foo(void)
-{
- DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
- DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs));
-
- BLANK();
-
- DEFINE(VMM_VCPU_META_RR0_OFFSET,
- offsetof(struct kvm_vcpu, arch.metaphysical_rr0));
- DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
- offsetof(struct kvm_vcpu,
- arch.metaphysical_saved_rr0));
- DEFINE(VMM_VCPU_VRR0_OFFSET,
- offsetof(struct kvm_vcpu, arch.vrr[0]));
- DEFINE(VMM_VPD_IRR0_OFFSET,
- offsetof(struct vpd, irr[0]));
- DEFINE(VMM_VCPU_ITC_CHECK_OFFSET,
- offsetof(struct kvm_vcpu, arch.itc_check));
- DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET,
- offsetof(struct kvm_vcpu, arch.irq_check));
- DEFINE(VMM_VPD_VHPI_OFFSET,
- offsetof(struct vpd, vhpi));
- DEFINE(VMM_VCPU_VSA_BASE_OFFSET,
- offsetof(struct kvm_vcpu, arch.vsa_base));
- DEFINE(VMM_VCPU_VPD_OFFSET,
- offsetof(struct kvm_vcpu, arch.vpd));
- DEFINE(VMM_VCPU_IRQ_CHECK,
- offsetof(struct kvm_vcpu, arch.irq_check));
- DEFINE(VMM_VCPU_TIMER_PENDING,
- offsetof(struct kvm_vcpu, arch.timer_pending));
- DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
- offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0));
- DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
- offsetof(struct kvm_vcpu, arch.mode_flags));
- DEFINE(VMM_VCPU_ITC_OFS_OFFSET,
- offsetof(struct kvm_vcpu, arch.itc_offset));
- DEFINE(VMM_VCPU_LAST_ITC_OFFSET,
- offsetof(struct kvm_vcpu, arch.last_itc));
- DEFINE(VMM_VCPU_SAVED_GP_OFFSET,
- offsetof(struct kvm_vcpu, arch.saved_gp));
-
- BLANK();
-
- DEFINE(VMM_PT_REGS_B6_OFFSET,
- offsetof(struct kvm_pt_regs, b6));
- DEFINE(VMM_PT_REGS_B7_OFFSET,
- offsetof(struct kvm_pt_regs, b7));
- DEFINE(VMM_PT_REGS_AR_CSD_OFFSET,
- offsetof(struct kvm_pt_regs, ar_csd));
- DEFINE(VMM_PT_REGS_AR_SSD_OFFSET,
- offsetof(struct kvm_pt_regs, ar_ssd));
- DEFINE(VMM_PT_REGS_R8_OFFSET,
- offsetof(struct kvm_pt_regs, r8));
- DEFINE(VMM_PT_REGS_R9_OFFSET,
- offsetof(struct kvm_pt_regs, r9));
- DEFINE(VMM_PT_REGS_R10_OFFSET,
- offsetof(struct kvm_pt_regs, r10));
- DEFINE(VMM_PT_REGS_R11_OFFSET,
- offsetof(struct kvm_pt_regs, r11));
- DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET,
- offsetof(struct kvm_pt_regs, cr_ipsr));
- DEFINE(VMM_PT_REGS_CR_IIP_OFFSET,
- offsetof(struct kvm_pt_regs, cr_iip));
- DEFINE(VMM_PT_REGS_CR_IFS_OFFSET,
- offsetof(struct kvm_pt_regs, cr_ifs));
- DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET,
- offsetof(struct kvm_pt_regs, ar_unat));
- DEFINE(VMM_PT_REGS_AR_PFS_OFFSET,
- offsetof(struct kvm_pt_regs, ar_pfs));
- DEFINE(VMM_PT_REGS_AR_RSC_OFFSET,
- offsetof(struct kvm_pt_regs, ar_rsc));
- DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET,
- offsetof(struct kvm_pt_regs, ar_rnat));
-
- DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET,
- offsetof(struct kvm_pt_regs, ar_bspstore));
- DEFINE(VMM_PT_REGS_PR_OFFSET,
- offsetof(struct kvm_pt_regs, pr));
- DEFINE(VMM_PT_REGS_B0_OFFSET,
- offsetof(struct kvm_pt_regs, b0));
- DEFINE(VMM_PT_REGS_LOADRS_OFFSET,
- offsetof(struct kvm_pt_regs, loadrs));
- DEFINE(VMM_PT_REGS_R1_OFFSET,
- offsetof(struct kvm_pt_regs, r1));
- DEFINE(VMM_PT_REGS_R12_OFFSET,
- offsetof(struct kvm_pt_regs, r12));
- DEFINE(VMM_PT_REGS_R13_OFFSET,
- offsetof(struct kvm_pt_regs, r13));
- DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET,
- offsetof(struct kvm_pt_regs, ar_fpsr));
- DEFINE(VMM_PT_REGS_R15_OFFSET,
- offsetof(struct kvm_pt_regs, r15));
- DEFINE(VMM_PT_REGS_R14_OFFSET,
- offsetof(struct kvm_pt_regs, r14));
- DEFINE(VMM_PT_REGS_R2_OFFSET,
- offsetof(struct kvm_pt_regs, r2));
- DEFINE(VMM_PT_REGS_R3_OFFSET,
- offsetof(struct kvm_pt_regs, r3));
- DEFINE(VMM_PT_REGS_R16_OFFSET,
- offsetof(struct kvm_pt_regs, r16));
- DEFINE(VMM_PT_REGS_R17_OFFSET,
- offsetof(struct kvm_pt_regs, r17));
- DEFINE(VMM_PT_REGS_R18_OFFSET,
- offsetof(struct kvm_pt_regs, r18));
- DEFINE(VMM_PT_REGS_R19_OFFSET,
- offsetof(struct kvm_pt_regs, r19));
- DEFINE(VMM_PT_REGS_R20_OFFSET,
- offsetof(struct kvm_pt_regs, r20));
- DEFINE(VMM_PT_REGS_R21_OFFSET,
- offsetof(struct kvm_pt_regs, r21));
- DEFINE(VMM_PT_REGS_R22_OFFSET,
- offsetof(struct kvm_pt_regs, r22));
- DEFINE(VMM_PT_REGS_R23_OFFSET,
- offsetof(struct kvm_pt_regs, r23));
- DEFINE(VMM_PT_REGS_R24_OFFSET,
- offsetof(struct kvm_pt_regs, r24));
- DEFINE(VMM_PT_REGS_R25_OFFSET,
- offsetof(struct kvm_pt_regs, r25));
- DEFINE(VMM_PT_REGS_R26_OFFSET,
- offsetof(struct kvm_pt_regs, r26));
- DEFINE(VMM_PT_REGS_R27_OFFSET,
- offsetof(struct kvm_pt_regs, r27));
- DEFINE(VMM_PT_REGS_R28_OFFSET,
- offsetof(struct kvm_pt_regs, r28));
- DEFINE(VMM_PT_REGS_R29_OFFSET,
- offsetof(struct kvm_pt_regs, r29));
- DEFINE(VMM_PT_REGS_R30_OFFSET,
- offsetof(struct kvm_pt_regs, r30));
- DEFINE(VMM_PT_REGS_R31_OFFSET,
- offsetof(struct kvm_pt_regs, r31));
- DEFINE(VMM_PT_REGS_AR_CCV_OFFSET,
- offsetof(struct kvm_pt_regs, ar_ccv));
- DEFINE(VMM_PT_REGS_F6_OFFSET,
- offsetof(struct kvm_pt_regs, f6));
- DEFINE(VMM_PT_REGS_F7_OFFSET,
- offsetof(struct kvm_pt_regs, f7));
- DEFINE(VMM_PT_REGS_F8_OFFSET,
- offsetof(struct kvm_pt_regs, f8));
- DEFINE(VMM_PT_REGS_F9_OFFSET,
- offsetof(struct kvm_pt_regs, f9));
- DEFINE(VMM_PT_REGS_F10_OFFSET,
- offsetof(struct kvm_pt_regs, f10));
- DEFINE(VMM_PT_REGS_F11_OFFSET,
- offsetof(struct kvm_pt_regs, f11));
- DEFINE(VMM_PT_REGS_R4_OFFSET,
- offsetof(struct kvm_pt_regs, r4));
- DEFINE(VMM_PT_REGS_R5_OFFSET,
- offsetof(struct kvm_pt_regs, r5));
- DEFINE(VMM_PT_REGS_R6_OFFSET,
- offsetof(struct kvm_pt_regs, r6));
- DEFINE(VMM_PT_REGS_R7_OFFSET,
- offsetof(struct kvm_pt_regs, r7));
- DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET,
- offsetof(struct kvm_pt_regs, eml_unat));
- DEFINE(VMM_VCPU_IIPA_OFFSET,
- offsetof(struct kvm_vcpu, arch.cr_iipa));
- DEFINE(VMM_VCPU_OPCODE_OFFSET,
- offsetof(struct kvm_vcpu, arch.opcode));
- DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause));
- DEFINE(VMM_VCPU_ISR_OFFSET,
- offsetof(struct kvm_vcpu, arch.cr_isr));
- DEFINE(VMM_PT_REGS_R16_SLOT,
- (((offsetof(struct kvm_pt_regs, r16)
- - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f));
- DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
- offsetof(struct kvm_vcpu, arch.mode_flags));
- DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp));
- BLANK();
-
- DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd));
- DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs));
- DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET,
- offsetof(struct kvm_vcpu, arch.insvc[0]));
- DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta));
- DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr));
-
- DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4]));
- DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5]));
- DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12]));
- DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13]));
- DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0]));
- DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1]));
- DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0]));
- DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1]));
- DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2]));
- DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0]));
- DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16]));
- DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18]));
- DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19]));
- DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21]));
- DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24]));
- DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27]));
- DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28]));
- DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29]));
- DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30]));
- DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36]));
- DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40]));
- DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64]));
- DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65]));
- DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0]));
- DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2]));
- DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8]));
- DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0]));
- DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0]));
- DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2]));
- DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3]));
- DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32]));
- DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33]));
- DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0]));
- DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr));
- BLANK();
-}
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h
deleted file mode 100644
index c0785a728271..000000000000
--- a/arch/ia64/kvm/irq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * irq.h: In-kernel interrupt controller related definitions
- * Copyright (c) 2008, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Xiantao Zhang <xiantao.zhang@intel.com>
- *
- */
-
-#ifndef __IRQ_H
-#define __IRQ_H
-
-#include "lapic.h"
-
-static inline int irqchip_in_kernel(struct kvm *kvm)
-{
- return 1;
-}
-
-#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
deleted file mode 100644
index dbe46f43884d..000000000000
--- a/arch/ia64/kvm/kvm-ia64.c
+++ /dev/null
@@ -1,1942 +0,0 @@
-/*
- * kvm_ia64.c: Basic KVM support On Itanium series processors
- *
- *
- * Copyright (C) 2007, Intel Corporation.
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/percpu.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/kvm_host.h>
-#include <linux/kvm.h>
-#include <linux/bitops.h>
-#include <linux/hrtimer.h>
-#include <linux/uaccess.h>
-#include <linux/iommu.h>
-#include <linux/intel-iommu.h>
-#include <linux/pci.h>
-
-#include <asm/pgtable.h>
-#include <asm/gcc_intrin.h>
-#include <asm/pal.h>
-#include <asm/cacheflush.h>
-#include <asm/div64.h>
-#include <asm/tlb.h>
-#include <asm/elf.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/shub_mmr.h>
-
-#include "misc.h"
-#include "vti.h"
-#include "iodev.h"
-#include "ioapic.h"
-#include "lapic.h"
-#include "irq.h"
-
-static unsigned long kvm_vmm_base;
-static unsigned long kvm_vsa_base;
-static unsigned long kvm_vm_buffer;
-static unsigned long kvm_vm_buffer_size;
-unsigned long kvm_vmm_gp;
-
-static long vp_env_info;
-
-static struct kvm_vmm_info *kvm_vmm_info;
-
-static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
-
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- { NULL }
-};
-
-static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
-{
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
- if (vcpu->kvm->arch.is_sn2)
- return rtc_time();
- else
-#endif
- return ia64_getreg(_IA64_REG_AR_ITC);
-}
-
-static void kvm_flush_icache(unsigned long start, unsigned long len)
-{
- int l;
-
- for (l = 0; l < (len + 32); l += 32)
- ia64_fc((void *)(start + l));
-
- ia64_sync_i();
- ia64_srlz_i();
-}
-
-static void kvm_flush_tlb_all(void)
-{
- unsigned long i, j, count0, count1, stride0, stride1, addr;
- long flags;
-
- addr = local_cpu_data->ptce_base;
- count0 = local_cpu_data->ptce_count[0];
- count1 = local_cpu_data->ptce_count[1];
- stride0 = local_cpu_data->ptce_stride[0];
- stride1 = local_cpu_data->ptce_stride[1];
-
- local_irq_save(flags);
- for (i = 0; i < count0; ++i) {
- for (j = 0; j < count1; ++j) {
- ia64_ptce(addr);
- addr += stride1;
- }
- addr += stride0;
- }
- local_irq_restore(flags);
- ia64_srlz_i(); /* srlz.i implies srlz.d */
-}
-
-long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
- (u64)opt_handler);
-
- return iprv.status;
-}
-
-static DEFINE_SPINLOCK(vp_lock);
-
-int kvm_arch_hardware_enable(void)
-{
- long status;
- long tmp_base;
- unsigned long pte;
- unsigned long saved_psr;
- int slot;
-
- pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
- local_irq_save(saved_psr);
- slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
- local_irq_restore(saved_psr);
- if (slot < 0)
- return -EINVAL;
-
- spin_lock(&vp_lock);
- status = ia64_pal_vp_init_env(kvm_vsa_base ?
- VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
- __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
- if (status != 0) {
- spin_unlock(&vp_lock);
- printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
- return -EINVAL;
- }
-
- if (!kvm_vsa_base) {
- kvm_vsa_base = tmp_base;
- printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
- }
- spin_unlock(&vp_lock);
- ia64_ptr_entry(0x3, slot);
-
- return 0;
-}
-
-void kvm_arch_hardware_disable(void)
-{
-
- long status;
- int slot;
- unsigned long pte;
- unsigned long saved_psr;
- unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
-
- pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
- PAGE_KERNEL));
-
- local_irq_save(saved_psr);
- slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
- local_irq_restore(saved_psr);
- if (slot < 0)
- return;
-
- status = ia64_pal_vp_exit_env(host_iva);
- if (status)
- printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
- status);
- ia64_ptr_entry(0x3, slot);
-}
-
-void kvm_arch_check_processor_compat(void *rtn)
-{
- *(int *)rtn = 0;
-}
-
-int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-{
-
- int r;
-
- switch (ext) {
- case KVM_CAP_IRQCHIP:
- case KVM_CAP_MP_STATE:
- case KVM_CAP_IRQ_INJECT_STATUS:
- case KVM_CAP_IOAPIC_POLARITY_IGNORED:
- r = 1;
- break;
- case KVM_CAP_COALESCED_MMIO:
- r = KVM_COALESCED_MMIO_PAGE_OFFSET;
- break;
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
- case KVM_CAP_IOMMU:
- r = iommu_present(&pci_bus_type);
- break;
-#endif
- default:
- r = 0;
- }
- return r;
-
-}
-
-static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = 1;
- return 0;
-}
-
-static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct kvm_mmio_req *p;
- struct kvm_io_device *mmio_dev;
- int r;
-
- p = kvm_get_vcpu_ioreq(vcpu);
-
- if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
- goto mmio;
- vcpu->mmio_needed = 1;
- vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
- vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
- vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
-
- if (vcpu->mmio_is_write)
- memcpy(vcpu->arch.mmio_data, &p->data, p->size);
- memcpy(kvm_run->mmio.data, &p->data, p->size);
- kvm_run->exit_reason = KVM_EXIT_MMIO;
- return 0;
-mmio:
- if (p->dir)
- r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
- p->size, &p->data);
- else
- r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
- p->size, &p->data);
- if (r)
- printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
- p->state = STATE_IORESP_READY;
-
- return 1;
-}
-
-static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
-
- if (p->exit_reason == EXIT_REASON_PAL_CALL)
- return kvm_pal_emul(vcpu, kvm_run);
- else {
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = 2;
- return 0;
- }
-}
-
-static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
-
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- kvm_sal_emul(vcpu);
- return 1;
- } else {
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = 3;
- return 0;
- }
-
-}
-
-static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
-{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (!test_and_set_bit(vector, &vpd->irr[0])) {
- vcpu->arch.irq_new_pending = 1;
- kvm_vcpu_kick(vcpu);
- return 1;
- }
- return 0;
-}
-
-/*
- * offset: address offset to IPI space.
- * value: deliver value.
- */
-static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
- uint64_t vector)
-{
- switch (dm) {
- case SAPIC_FIXED:
- break;
- case SAPIC_NMI:
- vector = 2;
- break;
- case SAPIC_EXTINT:
- vector = 0;
- break;
- case SAPIC_INIT:
- case SAPIC_PMI:
- default:
- printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
- return;
- }
- __apic_accept_irq(vcpu, vector);
-}
-
-static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
- unsigned long eid)
-{
- union ia64_lid lid;
- int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- lid.val = VCPU_LID(vcpu);
- if (lid.id == id && lid.eid == eid)
- return vcpu;
- }
-
- return NULL;
-}
-
-static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
- struct kvm_vcpu *target_vcpu;
- struct kvm_pt_regs *regs;
- union ia64_ipi_a addr = p->u.ipi_data.addr;
- union ia64_ipi_d data = p->u.ipi_data.data;
-
- target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
- if (!target_vcpu)
- return handle_vm_error(vcpu, kvm_run);
-
- if (!target_vcpu->arch.launched) {
- regs = vcpu_regs(target_vcpu);
-
- regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
- regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
-
- target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- if (waitqueue_active(&target_vcpu->wq))
- wake_up_interruptible(&target_vcpu->wq);
- } else {
- vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
- if (target_vcpu != vcpu)
- kvm_vcpu_kick(target_vcpu);
- }
-
- return 1;
-}
-
-struct call_data {
- struct kvm_ptc_g ptc_g_data;
- struct kvm_vcpu *vcpu;
-};
-
-static void vcpu_global_purge(void *info)
-{
- struct call_data *p = (struct call_data *)info;
- struct kvm_vcpu *vcpu = p->vcpu;
-
- if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
- return;
-
- set_bit(KVM_REQ_PTC_G, &vcpu->requests);
- if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
- vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
- p->ptc_g_data;
- } else {
- clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
- vcpu->arch.ptc_g_count = 0;
- set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
- }
-}
-
-static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
- struct kvm *kvm = vcpu->kvm;
- struct call_data call_data;
- int i;
- struct kvm_vcpu *vcpui;
-
- call_data.ptc_g_data = p->u.ptc_g_data;
-
- kvm_for_each_vcpu(i, vcpui, kvm) {
- if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
- vcpu == vcpui)
- continue;
-
- if (waitqueue_active(&vcpui->wq))
- wake_up_interruptible(&vcpui->wq);
-
- if (vcpui->cpu != -1) {
- call_data.vcpu = vcpui;
- smp_call_function_single(vcpui->cpu,
- vcpu_global_purge, &call_data, 1);
- } else
- printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
-
- }
- return 1;
-}
-
-static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- return 1;
-}
-
-static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
-{
- unsigned long pte, rtc_phys_addr, map_addr;
- int slot;
-
- map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
- rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
- pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
- slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
- vcpu->arch.sn_rtc_tr_slot = slot;
- if (slot < 0) {
- printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
- slot = 0;
- }
- return slot;
-}
-
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
-{
-
- ktime_t kt;
- long itc_diff;
- unsigned long vcpu_now_itc;
- unsigned long expires;
- struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
- unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (irqchip_in_kernel(vcpu->kvm)) {
-
- vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
-
- if (time_after(vcpu_now_itc, vpd->itm)) {
- vcpu->arch.timer_check = 1;
- return 1;
- }
- itc_diff = vpd->itm - vcpu_now_itc;
- if (itc_diff < 0)
- itc_diff = -itc_diff;
-
- expires = div64_u64(itc_diff, cyc_per_usec);
- kt = ktime_set(0, 1000 * expires);
-
- vcpu->arch.ht_active = 1;
- hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
-
- vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
- kvm_vcpu_block(vcpu);
- hrtimer_cancel(p_ht);
- vcpu->arch.ht_active = 0;
-
- if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
- kvm_cpu_has_pending_timer(vcpu))
- if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-
- if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
- return -EINTR;
- return 1;
- } else {
- printk(KERN_ERR"kvm: Unsupported userspace halt!");
- return 0;
- }
-}
-
-static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
-{
- kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
- return 0;
-}
-
-static int handle_external_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
-{
- return 1;
-}
-
-static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
-{
- printk("VMM: %s", vcpu->arch.log_buf);
- return 1;
-}
-
-static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run) = {
- [EXIT_REASON_VM_PANIC] = handle_vm_error,
- [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
- [EXIT_REASON_PAL_CALL] = handle_pal_call,
- [EXIT_REASON_SAL_CALL] = handle_sal_call,
- [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
- [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
- [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
- [EXIT_REASON_IPI] = handle_ipi,
- [EXIT_REASON_PTC_G] = handle_global_purge,
- [EXIT_REASON_DEBUG] = handle_vcpu_debug,
-
-};
-
-static const int kvm_vti_max_exit_handlers =
- sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
-
-static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p_exit_data;
-
- p_exit_data = kvm_get_exit_data(vcpu);
- return p_exit_data->exit_reason;
-}
-
-/*
- * The guest has exited. See if we can fix it or if we need userspace
- * assistance.
- */
-static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
-{
- u32 exit_reason = kvm_get_exit_reason(vcpu);
- vcpu->arch.last_exit = exit_reason;
-
- if (exit_reason < kvm_vti_max_exit_handlers
- && kvm_vti_exit_handlers[exit_reason])
- return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
- else {
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = exit_reason;
- }
- return 0;
-}
-
-static inline void vti_set_rr6(unsigned long rr6)
-{
- ia64_set_rr(RR6, rr6);
- ia64_srlz_i();
-}
-
-static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
-{
- unsigned long pte;
- struct kvm *kvm = vcpu->kvm;
- int r;
-
- /*Insert a pair of tr to map vmm*/
- pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
- r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
- if (r < 0)
- goto out;
- vcpu->arch.vmm_tr_slot = r;
- /*Insert a pairt of tr to map data of vm*/
- pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
- r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
- pte, KVM_VM_DATA_SHIFT);
- if (r < 0)
- goto out;
- vcpu->arch.vm_tr_slot = r;
-
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
- if (kvm->arch.is_sn2) {
- r = kvm_sn2_setup_mappings(vcpu);
- if (r < 0)
- goto out;
- }
-#endif
-
- r = 0;
-out:
- return r;
-}
-
-static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
- ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
- if (kvm->arch.is_sn2)
- ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
-#endif
-}
-
-static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
-{
- unsigned long psr;
- int r;
- int cpu = smp_processor_id();
-
- if (vcpu->arch.last_run_cpu != cpu ||
- per_cpu(last_vcpu, cpu) != vcpu) {
- per_cpu(last_vcpu, cpu) = vcpu;
- vcpu->arch.last_run_cpu = cpu;
- kvm_flush_tlb_all();
- }
-
- vcpu->arch.host_rr6 = ia64_get_rr(RR6);
- vti_set_rr6(vcpu->arch.vmm_rr);
- local_irq_save(psr);
- r = kvm_insert_vmm_mapping(vcpu);
- local_irq_restore(psr);
- return r;
-}
-
-static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
-{
- kvm_purge_vmm_mapping(vcpu);
- vti_set_rr6(vcpu->arch.host_rr6);
-}
-
-static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- union context *host_ctx, *guest_ctx;
- int r, idx;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
-again:
- if (signal_pending(current)) {
- r = -EINTR;
- kvm_run->exit_reason = KVM_EXIT_INTR;
- goto out;
- }
-
- preempt_disable();
- local_irq_disable();
-
- /*Get host and guest context with guest address space.*/
- host_ctx = kvm_get_host_context(vcpu);
- guest_ctx = kvm_get_guest_context(vcpu);
-
- clear_bit(KVM_REQ_KICK, &vcpu->requests);
-
- r = kvm_vcpu_pre_transition(vcpu);
- if (r < 0)
- goto vcpu_run_fail;
-
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- vcpu->mode = IN_GUEST_MODE;
- kvm_guest_enter();
-
- /*
- * Transition to the guest
- */
- kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
-
- kvm_vcpu_post_transition(vcpu);
-
- vcpu->arch.launched = 1;
- set_bit(KVM_REQ_KICK, &vcpu->requests);
- local_irq_enable();
-
- /*
- * We must have an instruction between local_irq_enable() and
- * kvm_guest_exit(), so the timer interrupt isn't delayed by
- * the interrupt shadow. The stat.exits increment will do nicely.
- * But we need to prevent reordering, hence this barrier():
- */
- barrier();
- kvm_guest_exit();
- vcpu->mode = OUTSIDE_GUEST_MODE;
- preempt_enable();
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
- r = kvm_handle_exit(kvm_run, vcpu);
-
- if (r > 0) {
- if (!need_resched())
- goto again;
- }
-
-out:
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- if (r > 0) {
- cond_resched();
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- goto again;
- }
-
- return r;
-
-vcpu_run_fail:
- local_irq_enable();
- preempt_enable();
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- goto out;
-}
-
-static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
-{
- struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
-
- if (!vcpu->mmio_is_write)
- memcpy(&p->data, vcpu->arch.mmio_data, 8);
- p->state = STATE_IORESP_READY;
-}
-
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- int r;
- sigset_t sigsaved;
-
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
- if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
- kvm_vcpu_block(vcpu);
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
- r = -EAGAIN;
- goto out;
- }
-
- if (vcpu->mmio_needed) {
- memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
- kvm_set_mmio_data(vcpu);
- vcpu->mmio_read_completed = 1;
- vcpu->mmio_needed = 0;
- }
- r = __vcpu_run(vcpu, kvm_run);
-out:
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
-
- return r;
-}
-
-struct kvm *kvm_arch_alloc_vm(void)
-{
-
- struct kvm *kvm;
- uint64_t vm_base;
-
- BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
-
- vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
-
- if (!vm_base)
- return NULL;
-
- memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
- kvm = (struct kvm *)(vm_base +
- offsetof(struct kvm_vm_data, kvm_vm_struct));
- kvm->arch.vm_base = vm_base;
- printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
-
- return kvm;
-}
-
-struct kvm_ia64_io_range {
- unsigned long start;
- unsigned long size;
- unsigned long type;
-};
-
-static const struct kvm_ia64_io_range io_ranges[] = {
- {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
- {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
- {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
- {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
- {PIB_START, PIB_SIZE, GPFN_PIB},
-};
-
-static void kvm_build_io_pmt(struct kvm *kvm)
-{
- unsigned long i, j;
-
- /* Mark I/O ranges */
- for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
- i++) {
- for (j = io_ranges[i].start;
- j < io_ranges[i].start + io_ranges[i].size;
- j += PAGE_SIZE)
- kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
- io_ranges[i].type, 0);
- }
-
-}
-
-/*Use unused rids to virtualize guest rid.*/
-#define GUEST_PHYSICAL_RR0 0x1739
-#define GUEST_PHYSICAL_RR4 0x2739
-#define VMM_INIT_RR 0x1660
-
-int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-{
- BUG_ON(!kvm);
-
- if (type)
- return -EINVAL;
-
- kvm->arch.is_sn2 = ia64_platform_is("sn2");
-
- kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
- kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
- kvm->arch.vmm_init_rr = VMM_INIT_RR;
-
- /*
- *Fill P2M entries for MMIO/IO ranges
- */
- kvm_build_io_pmt(kvm);
-
- INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
-
- /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
- set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
-
- return 0;
-}
-
-static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
- struct kvm_irqchip *chip)
-{
- int r;
-
- r = 0;
- switch (chip->chip_id) {
- case KVM_IRQCHIP_IOAPIC:
- r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
- break;
- default:
- r = -EINVAL;
- break;
- }
- return r;
-}
-
-static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
-{
- int r;
-
- r = 0;
- switch (chip->chip_id) {
- case KVM_IRQCHIP_IOAPIC:
- r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
- break;
- default:
- r = -EINVAL;
- break;
- }
- return r;
-}
-
-#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
-
-int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- int i;
-
- for (i = 0; i < 16; i++) {
- vpd->vgr[i] = regs->vpd.vgr[i];
- vpd->vbgr[i] = regs->vpd.vbgr[i];
- }
- for (i = 0; i < 128; i++)
- vpd->vcr[i] = regs->vpd.vcr[i];
- vpd->vhpi = regs->vpd.vhpi;
- vpd->vnat = regs->vpd.vnat;
- vpd->vbnat = regs->vpd.vbnat;
- vpd->vpsr = regs->vpd.vpsr;
-
- vpd->vpr = regs->vpd.vpr;
-
- memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
-
- RESTORE_REGS(mp_state);
- RESTORE_REGS(vmm_rr);
- memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
- memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
- RESTORE_REGS(itr_regions);
- RESTORE_REGS(dtr_regions);
- RESTORE_REGS(tc_regions);
- RESTORE_REGS(irq_check);
- RESTORE_REGS(itc_check);
- RESTORE_REGS(timer_check);
- RESTORE_REGS(timer_pending);
- RESTORE_REGS(last_itc);
- for (i = 0; i < 8; i++) {
- vcpu->arch.vrr[i] = regs->vrr[i];
- vcpu->arch.ibr[i] = regs->ibr[i];
- vcpu->arch.dbr[i] = regs->dbr[i];
- }
- for (i = 0; i < 4; i++)
- vcpu->arch.insvc[i] = regs->insvc[i];
- RESTORE_REGS(xtp);
- RESTORE_REGS(metaphysical_rr0);
- RESTORE_REGS(metaphysical_rr4);
- RESTORE_REGS(metaphysical_saved_rr0);
- RESTORE_REGS(metaphysical_saved_rr4);
- RESTORE_REGS(fp_psr);
- RESTORE_REGS(saved_gp);
-
- vcpu->arch.irq_new_pending = 1;
- vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
- set_bit(KVM_REQ_RESUME, &vcpu->requests);
-
- return 0;
-}
-
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
- bool line_status)
-{
- if (!irqchip_in_kernel(kvm))
- return -ENXIO;
-
- irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
- irq_event->irq, irq_event->level,
- line_status);
- return 0;
-}
-
-long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- struct kvm *kvm = filp->private_data;
- void __user *argp = (void __user *)arg;
- int r = -ENOTTY;
-
- switch (ioctl) {
- case KVM_CREATE_IRQCHIP:
- r = -EFAULT;
- r = kvm_ioapic_init(kvm);
- if (r)
- goto out;
- r = kvm_setup_default_irq_routing(kvm);
- if (r) {
- mutex_lock(&kvm->slots_lock);
- kvm_ioapic_destroy(kvm);
- mutex_unlock(&kvm->slots_lock);
- goto out;
- }
- break;
- case KVM_GET_IRQCHIP: {
- /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
- struct kvm_irqchip chip;
-
- r = -EFAULT;
- if (copy_from_user(&chip, argp, sizeof chip))
- goto out;
- r = -ENXIO;
- if (!irqchip_in_kernel(kvm))
- goto out;
- r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
- if (r)
- goto out;
- r = -EFAULT;
- if (copy_to_user(argp, &chip, sizeof chip))
- goto out;
- r = 0;
- break;
- }
- case KVM_SET_IRQCHIP: {
- /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
- struct kvm_irqchip chip;
-
- r = -EFAULT;
- if (copy_from_user(&chip, argp, sizeof chip))
- goto out;
- r = -ENXIO;
- if (!irqchip_in_kernel(kvm))
- goto out;
- r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
- if (r)
- goto out;
- r = 0;
- break;
- }
- default:
- ;
- }
-out:
- return r;
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- return -EINVAL;
-
-}
-int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
- struct kvm_translation *tr)
-{
-
- return -EINVAL;
-}
-
-static int kvm_alloc_vmm_area(void)
-{
- if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
- kvm_vmm_base = __get_free_pages(GFP_KERNEL,
- get_order(KVM_VMM_SIZE));
- if (!kvm_vmm_base)
- return -ENOMEM;
-
- memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
- kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
-
- printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
- kvm_vmm_base, kvm_vm_buffer);
- }
-
- return 0;
-}
-
-static void kvm_free_vmm_area(void)
-{
- if (kvm_vmm_base) {
- /*Zero this area before free to avoid bits leak!!*/
- memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
- free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
- kvm_vmm_base = 0;
- kvm_vm_buffer = 0;
- kvm_vsa_base = 0;
- }
-}
-
-static int vti_init_vpd(struct kvm_vcpu *vcpu)
-{
- int i;
- union cpuid3_t cpuid3;
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (IS_ERR(vpd))
- return PTR_ERR(vpd);
-
- /* CPUID init */
- for (i = 0; i < 5; i++)
- vpd->vcpuid[i] = ia64_get_cpuid(i);
-
- /* Limit the CPUID number to 5 */
- cpuid3.value = vpd->vcpuid[3];
- cpuid3.number = 4; /* 5 - 1 */
- vpd->vcpuid[3] = cpuid3.value;
-
- /*Set vac and vdc fields*/
- vpd->vac.a_from_int_cr = 1;
- vpd->vac.a_to_int_cr = 1;
- vpd->vac.a_from_psr = 1;
- vpd->vac.a_from_cpuid = 1;
- vpd->vac.a_cover = 1;
- vpd->vac.a_bsw = 1;
- vpd->vac.a_int = 1;
- vpd->vdc.d_vmsw = 1;
-
- /*Set virtual buffer*/
- vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
-
- return 0;
-}
-
-static int vti_create_vp(struct kvm_vcpu *vcpu)
-{
- long ret;
- struct vpd *vpd = vcpu->arch.vpd;
- unsigned long vmm_ivt;
-
- vmm_ivt = kvm_vmm_info->vmm_ivt;
-
- printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
-
- ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
-
- if (ret) {
- printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static void init_ptce_info(struct kvm_vcpu *vcpu)
-{
- ia64_ptce_info_t ptce = {0};
-
- ia64_get_ptce(&ptce);
- vcpu->arch.ptce_base = ptce.base;
- vcpu->arch.ptce_count[0] = ptce.count[0];
- vcpu->arch.ptce_count[1] = ptce.count[1];
- vcpu->arch.ptce_stride[0] = ptce.stride[0];
- vcpu->arch.ptce_stride[1] = ptce.stride[1];
-}
-
-static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
-{
- struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
-
- if (hrtimer_cancel(p_ht))
- hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
-}
-
-static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
-{
- struct kvm_vcpu *vcpu;
- wait_queue_head_t *q;
-
- vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
- q = &vcpu->wq;
-
- if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
- goto out;
-
- if (waitqueue_active(q))
- wake_up_interruptible(q);
-
-out:
- vcpu->arch.timer_fired = 1;
- vcpu->arch.timer_check = 1;
- return HRTIMER_NORESTART;
-}
-
-#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
-
-bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
-{
- return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
- struct kvm_vcpu *v;
- int r;
- int i;
- long itc_offset;
- struct kvm *kvm = vcpu->kvm;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- union context *p_ctx = &vcpu->arch.guest;
- struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
-
- /*Init vcpu context for first run.*/
- if (IS_ERR(vmm_vcpu))
- return PTR_ERR(vmm_vcpu);
-
- if (kvm_vcpu_is_bsp(vcpu)) {
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-
- /*Set entry address for first run.*/
- regs->cr_iip = PALE_RESET_ENTRY;
-
- /*Initialize itc offset for vcpus*/
- itc_offset = 0UL - kvm_get_itc(vcpu);
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
- v = (struct kvm_vcpu *)((char *)vcpu +
- sizeof(struct kvm_vcpu_data) * i);
- v->arch.itc_offset = itc_offset;
- v->arch.last_itc = 0;
- }
- } else
- vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
-
- r = -ENOMEM;
- vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
- if (!vcpu->arch.apic)
- goto out;
- vcpu->arch.apic->vcpu = vcpu;
-
- p_ctx->gr[1] = 0;
- p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
- p_ctx->gr[13] = (unsigned long)vmm_vcpu;
- p_ctx->psr = 0x1008522000UL;
- p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
- p_ctx->caller_unat = 0;
- p_ctx->pr = 0x0;
- p_ctx->ar[36] = 0x0; /*unat*/
- p_ctx->ar[19] = 0x0; /*rnat*/
- p_ctx->ar[18] = (unsigned long)vmm_vcpu +
- ((sizeof(struct kvm_vcpu)+15) & ~15);
- p_ctx->ar[64] = 0x0; /*pfs*/
- p_ctx->cr[0] = 0x7e04UL;
- p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
- p_ctx->cr[8] = 0x3c;
-
- /*Initialize region register*/
- p_ctx->rr[0] = 0x30;
- p_ctx->rr[1] = 0x30;
- p_ctx->rr[2] = 0x30;
- p_ctx->rr[3] = 0x30;
- p_ctx->rr[4] = 0x30;
- p_ctx->rr[5] = 0x30;
- p_ctx->rr[7] = 0x30;
-
- /*Initialize branch register 0*/
- p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
-
- vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
- vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
- vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
-
- hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- vcpu->arch.hlt_timer.function = hlt_timer_fn;
-
- vcpu->arch.last_run_cpu = -1;
- vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
- vcpu->arch.vsa_base = kvm_vsa_base;
- vcpu->arch.__gp = kvm_vmm_gp;
- vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
- vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
- vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
- init_ptce_info(vcpu);
-
- r = 0;
-out:
- return r;
-}
-
-static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
-{
- unsigned long psr;
- int r;
-
- local_irq_save(psr);
- r = kvm_insert_vmm_mapping(vcpu);
- local_irq_restore(psr);
- if (r)
- goto fail;
- r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
- if (r)
- goto fail;
-
- r = vti_init_vpd(vcpu);
- if (r) {
- printk(KERN_DEBUG"kvm: vpd init error!!\n");
- goto uninit;
- }
-
- r = vti_create_vp(vcpu);
- if (r)
- goto uninit;
-
- kvm_purge_vmm_mapping(vcpu);
-
- return 0;
-uninit:
- kvm_vcpu_uninit(vcpu);
-fail:
- return r;
-}
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
- unsigned int id)
-{
- struct kvm_vcpu *vcpu;
- unsigned long vm_base = kvm->arch.vm_base;
- int r;
- int cpu;
-
- BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
-
- r = -EINVAL;
- if (id >= KVM_MAX_VCPUS) {
- printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
- KVM_MAX_VCPUS);
- goto fail;
- }
-
- r = -ENOMEM;
- if (!vm_base) {
- printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
- goto fail;
- }
- vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
- vcpu_data[id].vcpu_struct));
- vcpu->kvm = kvm;
-
- cpu = get_cpu();
- r = vti_vcpu_setup(vcpu, id);
- put_cpu();
-
- if (r) {
- printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
- goto fail;
- }
-
- return vcpu;
-fail:
- return ERR_PTR(r);
-}
-
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg)
-{
- return -EINVAL;
-}
-
-void kvm_arch_free_vm(struct kvm *kvm)
-{
- unsigned long vm_base = kvm->arch.vm_base;
-
- if (vm_base) {
- memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
- free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
- }
-
-}
-
-static void kvm_release_vm_pages(struct kvm *kvm)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int j;
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
- for (j = 0; j < memslot->npages; j++) {
- if (memslot->rmap[j])
- put_page((struct page *)memslot->rmap[j]);
- }
- }
-}
-
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
- kvm_iommu_unmap_guest(kvm);
- kvm_free_all_assigned_devices(kvm);
- kfree(kvm->arch.vioapic);
- kvm_release_vm_pages(kvm);
-}
-
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- if (cpu != vcpu->cpu) {
- vcpu->cpu = cpu;
- if (vcpu->arch.ht_active)
- kvm_migrate_hlt_timer(vcpu);
- }
-}
-
-#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
-
-int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
- int i;
-
- vcpu_load(vcpu);
-
- for (i = 0; i < 16; i++) {
- regs->vpd.vgr[i] = vpd->vgr[i];
- regs->vpd.vbgr[i] = vpd->vbgr[i];
- }
- for (i = 0; i < 128; i++)
- regs->vpd.vcr[i] = vpd->vcr[i];
- regs->vpd.vhpi = vpd->vhpi;
- regs->vpd.vnat = vpd->vnat;
- regs->vpd.vbnat = vpd->vbnat;
- regs->vpd.vpsr = vpd->vpsr;
- regs->vpd.vpr = vpd->vpr;
-
- memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
-
- SAVE_REGS(mp_state);
- SAVE_REGS(vmm_rr);
- memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
- memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
- SAVE_REGS(itr_regions);
- SAVE_REGS(dtr_regions);
- SAVE_REGS(tc_regions);
- SAVE_REGS(irq_check);
- SAVE_REGS(itc_check);
- SAVE_REGS(timer_check);
- SAVE_REGS(timer_pending);
- SAVE_REGS(last_itc);
- for (i = 0; i < 8; i++) {
- regs->vrr[i] = vcpu->arch.vrr[i];
- regs->ibr[i] = vcpu->arch.ibr[i];
- regs->dbr[i] = vcpu->arch.dbr[i];
- }
- for (i = 0; i < 4; i++)
- regs->insvc[i] = vcpu->arch.insvc[i];
- regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
- SAVE_REGS(xtp);
- SAVE_REGS(metaphysical_rr0);
- SAVE_REGS(metaphysical_rr4);
- SAVE_REGS(metaphysical_saved_rr0);
- SAVE_REGS(metaphysical_saved_rr4);
- SAVE_REGS(fp_psr);
- SAVE_REGS(saved_gp);
-
- vcpu_put(vcpu);
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
- struct kvm_ia64_vcpu_stack *stack)
-{
- memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
- struct kvm_ia64_vcpu_stack *stack)
-{
- memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
- sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
-
- vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
- return 0;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-
- hrtimer_cancel(&vcpu->arch.hlt_timer);
- kfree(vcpu->arch.apic);
-}
-
-long kvm_arch_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- struct kvm_vcpu *vcpu = filp->private_data;
- void __user *argp = (void __user *)arg;
- struct kvm_ia64_vcpu_stack *stack = NULL;
- long r;
-
- switch (ioctl) {
- case KVM_IA64_VCPU_GET_STACK: {
- struct kvm_ia64_vcpu_stack __user *user_stack;
- void __user *first_p = argp;
-
- r = -EFAULT;
- if (copy_from_user(&user_stack, first_p, sizeof(void *)))
- goto out;
-
- if (!access_ok(VERIFY_WRITE, user_stack,
- sizeof(struct kvm_ia64_vcpu_stack))) {
- printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
- "Illegal user destination address for stack\n");
- goto out;
- }
- stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
- if (!stack) {
- r = -ENOMEM;
- goto out;
- }
-
- r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
- if (r)
- goto out;
-
- if (copy_to_user(user_stack, stack,
- sizeof(struct kvm_ia64_vcpu_stack))) {
- r = -EFAULT;
- goto out;
- }
-
- break;
- }
- case KVM_IA64_VCPU_SET_STACK: {
- struct kvm_ia64_vcpu_stack __user *user_stack;
- void __user *first_p = argp;
-
- r = -EFAULT;
- if (copy_from_user(&user_stack, first_p, sizeof(void *)))
- goto out;
-
- if (!access_ok(VERIFY_READ, user_stack,
- sizeof(struct kvm_ia64_vcpu_stack))) {
- printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
- "Illegal user address for stack\n");
- goto out;
- }
- stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
- if (!stack) {
- r = -ENOMEM;
- goto out;
- }
- if (copy_from_user(stack, user_stack,
- sizeof(struct kvm_ia64_vcpu_stack)))
- goto out;
-
- r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
- break;
- }
-
- default:
- r = -EINVAL;
- }
-
-out:
- kfree(stack);
- return r;
-}
-
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
-{
- unsigned long i;
- unsigned long pfn;
- int npages = memslot->npages;
- unsigned long base_gfn = memslot->base_gfn;
-
- if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
- return -ENOMEM;
-
- for (i = 0; i < npages; i++) {
- pfn = gfn_to_pfn(kvm, base_gfn + i);
- if (!kvm_is_reserved_pfn(pfn)) {
- kvm_set_pmt_entry(kvm, base_gfn + i,
- pfn << PAGE_SHIFT,
- _PAGE_AR_RWX | _PAGE_MA_WB);
- memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
- } else {
- kvm_set_pmt_entry(kvm, base_gfn + i,
- GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
- _PAGE_MA_UC);
- memslot->rmap[i] = 0;
- }
- }
-
- return 0;
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
- kvm_flush_remote_tlbs(kvm);
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- kvm_arch_flush_shadow_all();
-}
-
-long kvm_arch_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- return -EINVAL;
-}
-
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
- kvm_vcpu_uninit(vcpu);
-}
-
-static int vti_cpu_has_kvm_support(void)
-{
- long avail = 1, status = 1, control = 1;
- long ret;
-
- ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
- if (ret)
- goto out;
-
- if (!(avail & PAL_PROC_VM_BIT))
- goto out;
-
- printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
-
- ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
- if (ret)
- goto out;
- printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
-
- if (!(vp_env_info & VP_OPCODE)) {
- printk(KERN_WARNING"kvm: No opcode ability on hardware, "
- "vm_env_info:0x%lx\n", vp_env_info);
- }
-
- return 1;
-out:
- return 0;
-}
-
-
-/*
- * On SN2, the ITC isn't stable, so copy in fast path code to use the
- * SN2 RTC, replacing the ITC based default verion.
- */
-static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
- struct module *module)
-{
- unsigned long new_ar, new_ar_sn2;
- unsigned long module_base;
-
- if (!ia64_platform_is("sn2"))
- return;
-
- module_base = (unsigned long)module->module_core;
-
- new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
- new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
-
- printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
- "as source\n");
-
- /*
- * Copy the SN2 version of mov_ar into place. They are both
- * the same size, so 6 bundles is sufficient (6 * 0x10).
- */
- memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
-}
-
-static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
- struct module *module)
-{
- unsigned long module_base;
- unsigned long vmm_size;
-
- unsigned long vmm_offset, func_offset, fdesc_offset;
- struct fdesc *p_fdesc;
-
- BUG_ON(!module);
-
- if (!kvm_vmm_base) {
- printk("kvm: kvm area hasn't been initialized yet!!\n");
- return -EFAULT;
- }
-
- /*Calculate new position of relocated vmm module.*/
- module_base = (unsigned long)module->module_core;
- vmm_size = module->core_size;
- if (unlikely(vmm_size > KVM_VMM_SIZE))
- return -EFAULT;
-
- memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
- kvm_patch_vmm(vmm_info, module);
- kvm_flush_icache(kvm_vmm_base, vmm_size);
-
- /*Recalculate kvm_vmm_info based on new VMM*/
- vmm_offset = vmm_info->vmm_ivt - module_base;
- kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
- printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
- kvm_vmm_info->vmm_ivt);
-
- fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
- kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
- fdesc_offset);
- func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
- p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
- p_fdesc->ip = KVM_VMM_BASE + func_offset;
- p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
-
- printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
- KVM_VMM_BASE+func_offset);
-
- fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
- kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
- fdesc_offset);
- func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
- p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
- p_fdesc->ip = KVM_VMM_BASE + func_offset;
- p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
-
- kvm_vmm_gp = p_fdesc->gp;
-
- printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
- kvm_vmm_info->vmm_entry);
- printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
- KVM_VMM_BASE + func_offset);
-
- return 0;
-}
-
-int kvm_arch_init(void *opaque)
-{
- int r;
- struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
-
- if (!vti_cpu_has_kvm_support()) {
- printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
- r = -EOPNOTSUPP;
- goto out;
- }
-
- if (kvm_vmm_info) {
- printk(KERN_ERR "kvm: Already loaded VMM module!\n");
- r = -EEXIST;
- goto out;
- }
-
- r = -ENOMEM;
- kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
- if (!kvm_vmm_info)
- goto out;
-
- if (kvm_alloc_vmm_area())
- goto out_free0;
-
- r = kvm_relocate_vmm(vmm_info, vmm_info->module);
- if (r)
- goto out_free1;
-
- return 0;
-
-out_free1:
- kvm_free_vmm_area();
-out_free0:
- kfree(kvm_vmm_info);
-out:
- return r;
-}
-
-void kvm_arch_exit(void)
-{
- kvm_free_vmm_area();
- kfree(kvm_vmm_info);
- kvm_vmm_info = NULL;
-}
-
-static void kvm_ia64_sync_dirty_log(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
-{
- int i;
- long base;
- unsigned long n;
- unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
- offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
-
- n = kvm_dirty_bitmap_bytes(memslot);
- base = memslot->base_gfn / BITS_PER_LONG;
-
- spin_lock(&kvm->arch.dirty_log_lock);
- for (i = 0; i < n/sizeof(long); ++i) {
- memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
- dirty_bitmap[base + i] = 0;
- }
- spin_unlock(&kvm->arch.dirty_log_lock);
-}
-
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
-{
- int r;
- unsigned long n;
- struct kvm_memory_slot *memslot;
- int is_dirty = 0;
-
- mutex_lock(&kvm->slots_lock);
-
- r = -EINVAL;
- if (log->slot >= KVM_USER_MEM_SLOTS)
- goto out;
-
- memslot = id_to_memslot(kvm->memslots, log->slot);
- r = -ENOENT;
- if (!memslot->dirty_bitmap)
- goto out;
-
- kvm_ia64_sync_dirty_log(kvm, memslot);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
- if (r)
- goto out;
-
- /* If nothing is dirty, don't bother messing with page tables. */
- if (is_dirty) {
- kvm_flush_remote_tlbs(kvm);
- n = kvm_dirty_bitmap_bytes(memslot);
- memset(memslot->dirty_bitmap, 0, n);
- }
- r = 0;
-out:
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-int kvm_arch_hardware_setup(void)
-{
- return 0;
-}
-
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
-{
- return __apic_accept_irq(vcpu, irq->vector);
-}
-
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
-{
- return apic->vcpu->vcpu_id == dest;
-}
-
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
-{
- return 0;
-}
-
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
-{
- return vcpu1->arch.xtp - vcpu2->arch.xtp;
-}
-
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode)
-{
- struct kvm_lapic *target = vcpu->arch.apic;
- return (dest_mode == 0) ?
- kvm_apic_match_physical_addr(target, dest) :
- kvm_apic_match_logical_addr(target, dest);
-}
-
-static int find_highest_bits(int *dat)
-{
- u32 bits, bitnum;
- int i;
-
- /* loop for all 256 bits */
- for (i = 7; i >= 0 ; i--) {
- bits = dat[i];
- if (bits) {
- bitnum = fls(bits);
- return i * 32 + bitnum - 1;
- }
- }
-
- return -1;
-}
-
-int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
-{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (vpd->irr[0] & (1UL << NMI_VECTOR))
- return NMI_VECTOR;
- if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
- return ExtINT_VECTOR;
-
- return find_highest_bits((int *)&vpd->irr[0]);
-}
-
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.timer_fired;
-}
-
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
- (kvm_highest_pending_irq(vcpu) != -1);
-}
-
-int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-{
- return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
-}
-
-int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
- struct kvm_mp_state *mp_state)
-{
- mp_state->mp_state = vcpu->arch.mp_state;
- return 0;
-}
-
-static int vcpu_reset(struct kvm_vcpu *vcpu)
-{
- int r;
- long psr;
- local_irq_save(psr);
- r = kvm_insert_vmm_mapping(vcpu);
- local_irq_restore(psr);
- if (r)
- goto fail;
-
- vcpu->arch.launched = 0;
- kvm_arch_vcpu_uninit(vcpu);
- r = kvm_arch_vcpu_init(vcpu);
- if (r)
- goto fail;
-
- kvm_purge_vmm_mapping(vcpu);
- r = 0;
-fail:
- return r;
-}
-
-int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
- struct kvm_mp_state *mp_state)
-{
- int r = 0;
-
- vcpu->arch.mp_state = mp_state->mp_state;
- if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
- r = vcpu_reset(vcpu);
- return r;
-}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
deleted file mode 100644
index cb548ee9fcae..000000000000
--- a/arch/ia64/kvm/kvm_fw.c
+++ /dev/null
@@ -1,674 +0,0 @@
-/*
- * PAL/SAL call delegation
- *
- * Copyright (c) 2004 Li Susie <susie.li@intel.com>
- * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
- * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <linux/kvm_host.h>
-#include <linux/smp.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/shub_mmr.h>
-
-#include "vti.h"
-#include "misc.h"
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/tlb.h>
-
-/*
- * Handy macros to make sure that the PAL return values start out
- * as something meaningful.
- */
-#define INIT_PAL_STATUS_UNIMPLEMENTED(x) \
- { \
- x.status = PAL_STATUS_UNIMPLEMENTED; \
- x.v0 = 0; \
- x.v1 = 0; \
- x.v2 = 0; \
- }
-
-#define INIT_PAL_STATUS_SUCCESS(x) \
- { \
- x.status = PAL_STATUS_SUCCESS; \
- x.v0 = 0; \
- x.v1 = 0; \
- x.v2 = 0; \
- }
-
-static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
- u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
- struct exit_ctl_data *p;
-
- if (vcpu) {
- p = &vcpu->arch.exit_data;
- if (p->exit_reason == EXIT_REASON_PAL_CALL) {
- *gr28 = p->u.pal_data.gr28;
- *gr29 = p->u.pal_data.gr29;
- *gr30 = p->u.pal_data.gr30;
- *gr31 = p->u.pal_data.gr31;
- return ;
- }
- }
- printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
-}
-
-static void set_pal_result(struct kvm_vcpu *vcpu,
- struct ia64_pal_retval result) {
-
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
- if (p->exit_reason == EXIT_REASON_PAL_CALL) {
- p->u.pal_data.ret = result;
- return ;
- }
- INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
-}
-
-static void set_sal_result(struct kvm_vcpu *vcpu,
- struct sal_ret_values result) {
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- p->u.sal_data.ret = result;
- return ;
- }
- printk(KERN_WARNING"Failed to set sal result!!\n");
-}
-
-struct cache_flush_args {
- u64 cache_type;
- u64 operation;
- u64 progress;
- long status;
-};
-
-cpumask_t cpu_cache_coherent_map;
-
-static void remote_pal_cache_flush(void *data)
-{
- struct cache_flush_args *args = data;
- long status;
- u64 progress = args->progress;
-
- status = ia64_pal_cache_flush(args->cache_type, args->operation,
- &progress, NULL);
- if (status != 0)
- args->status = status;
-}
-
-static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
-{
- u64 gr28, gr29, gr30, gr31;
- struct ia64_pal_retval result = {0, 0, 0, 0};
- struct cache_flush_args args = {0, 0, 0, 0};
- long psr;
-
- gr28 = gr29 = gr30 = gr31 = 0;
- kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
-
- if (gr31 != 0)
- printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
-
- /* Always call Host Pal in int=1 */
- gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
- args.cache_type = gr29;
- args.operation = gr30;
- smp_call_function(remote_pal_cache_flush,
- (void *)&args, 1);
- if (args.status != 0)
- printk(KERN_ERR"pal_cache_flush error!,"
- "status:0x%lx\n", args.status);
- /*
- * Call Host PAL cache flush
- * Clear psr.ic when call PAL_CACHE_FLUSH
- */
- local_irq_save(psr);
- result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
- &result.v0);
- local_irq_restore(psr);
- if (result.status != 0)
- printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
- "in1:%lx,in2:%lx\n",
- vcpu, result.status, gr29, gr30);
-
-#if 0
- if (gr29 == PAL_CACHE_TYPE_COHERENT) {
- cpus_setall(vcpu->arch.cache_coherent_map);
- cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
- cpus_setall(cpu_cache_coherent_map);
- cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
- }
-#endif
- return result;
-}
-
-struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result;
-
- PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
- return result;
-}
-
-static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result;
-
- PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
-
- /*
- * PAL_FREQ_BASE may not be implemented in some platforms,
- * call SAL instead.
- */
- if (result.v0 == 0) {
- result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
- &result.v0,
- &result.v1);
- result.v2 = 0;
- }
-
- return result;
-}
-
-/*
- * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
- * RTC is used instead. This function patches the ratios from SAL
- * to match the RTC before providing them to the guest.
- */
-static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
-{
- struct pal_freq_ratio *ratio;
- unsigned long sal_freq, sal_drift, factor;
-
- result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
- &sal_freq, &sal_drift);
- ratio = (struct pal_freq_ratio *)&result->v2;
- factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
- sn_rtc_cycles_per_second;
-
- ratio->num = 3;
- ratio->den = factor;
-}
-
-static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result;
-
- PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
-
- if (vcpu->kvm->arch.is_sn2)
- sn2_patch_itc_freq_ratios(&result);
-
- return result;
-}
-
-static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result;
-
- INIT_PAL_STATUS_UNIMPLEMENTED(result);
- return result;
-}
-
-static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result;
-
- INIT_PAL_STATUS_SUCCESS(result);
- return result;
-}
-
-static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result = {0, 0, 0, 0};
- long in0, in1, in2, in3;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
- &result.v2, in2);
-
- return result;
-}
-
-static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
-{
-
- struct ia64_pal_retval result = {0, 0, 0, 0};
- long in0, in1, in2, in3;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
-
- return result;
-}
-
-static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
-{
-
- pal_cache_config_info_t ci;
- long status;
- unsigned long in0, in1, in2, in3, r9, r10;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- status = ia64_pal_cache_config_info(in1, in2, &ci);
- r9 = ci.pcci_info_1.pcci1_data;
- r10 = ci.pcci_info_2.pcci2_data;
- return ((struct ia64_pal_retval){status, r9, r10, 0});
-}
-
-#define GUEST_IMPL_VA_MSB 59
-#define GUEST_RID_BITS 18
-
-static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
-{
-
- pal_vm_info_1_u_t vminfo1;
- pal_vm_info_2_u_t vminfo2;
- struct ia64_pal_retval result;
-
- PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
- if (!result.status) {
- vminfo1.pvi1_val = result.v0;
- vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
- vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
- result.v0 = vminfo1.pvi1_val;
- vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
- vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
- result.v1 = vminfo2.pvi2_val;
- }
-
- return result;
-}
-
-static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result;
- unsigned long in0, in1, in2, in3;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-
- result.status = ia64_pal_vm_info(in1, in2,
- (pal_tc_info_u_t *)&result.v1, &result.v2);
-
- return result;
-}
-
-static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
-{
- u64 index = 0;
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
- if (p->exit_reason == EXIT_REASON_PAL_CALL)
- index = p->u.pal_data.gr28;
-
- return index;
-}
-
-static void prepare_for_halt(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.timer_pending = 1;
- vcpu->arch.timer_fired = 0;
-}
-
-static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
-{
- long status;
- unsigned long in0, in1, in2, in3, r9;
- unsigned long pm_buffer[16];
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- status = ia64_pal_perf_mon_info(pm_buffer,
- (pal_perf_mon_info_u_t *) &r9);
- if (status != 0) {
- printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
- } else {
- if (in1)
- memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
- else {
- status = PAL_STATUS_EINVAL;
- printk(KERN_WARNING"Invalid parameters "
- "for PAL call:0x%lx!\n", in0);
- }
- }
- return (struct ia64_pal_retval){status, r9, 0, 0};
-}
-
-static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
-{
- unsigned long in0, in1, in2, in3;
- long status;
- unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
- | (1UL << 61) | (1UL << 60);
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- if (in1) {
- memcpy((void *)in1, &res, sizeof(res));
- status = 0;
- } else{
- status = PAL_STATUS_EINVAL;
- printk(KERN_WARNING"Invalid parameters "
- "for PAL call:0x%lx!\n", in0);
- }
-
- return (struct ia64_pal_retval){status, 0, 0, 0};
-}
-
-static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
-{
- unsigned long r9;
- long status;
-
- status = ia64_pal_mem_attrib(&r9);
-
- return (struct ia64_pal_retval){status, r9, 0, 0};
-}
-
-static void remote_pal_prefetch_visibility(void *v)
-{
- s64 trans_type = (s64)v;
- ia64_pal_prefetch_visibility(trans_type);
-}
-
-static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result = {0, 0, 0, 0};
- unsigned long in0, in1, in2, in3;
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
- result.status = ia64_pal_prefetch_visibility(in1);
- if (result.status == 0) {
- /* Must be performed on all remote processors
- in the coherence domain. */
- smp_call_function(remote_pal_prefetch_visibility,
- (void *)in1, 1);
- /* Unnecessary on remote processor for other vcpus!*/
- result.status = 1;
- }
- return result;
-}
-
-static void remote_pal_mc_drain(void *v)
-{
- ia64_pal_mc_drain();
-}
-
-static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
-{
- struct ia64_pal_retval result = {0, 0, 0, 0};
- unsigned long in0, in1, in2, in3;
-
- kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-
- if (in1 == 0 && in2) {
- char brand_info[128];
- result.status = ia64_pal_get_brand_info(brand_info);
- if (result.status == PAL_STATUS_SUCCESS)
- memcpy((void *)in2, brand_info, 128);
- } else {
- result.status = PAL_STATUS_REQUIRES_MEMORY;
- printk(KERN_WARNING"Invalid parameters for "
- "PAL call:0x%lx!\n", in0);
- }
-
- return result;
-}
-
-int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-
- u64 gr28;
- struct ia64_pal_retval result;
- int ret = 1;
-
- gr28 = kvm_get_pal_call_index(vcpu);
- switch (gr28) {
- case PAL_CACHE_FLUSH:
- result = pal_cache_flush(vcpu);
- break;
- case PAL_MEM_ATTRIB:
- result = pal_mem_attrib(vcpu);
- break;
- case PAL_CACHE_SUMMARY:
- result = pal_cache_summary(vcpu);
- break;
- case PAL_PERF_MON_INFO:
- result = pal_perf_mon_info(vcpu);
- break;
- case PAL_HALT_INFO:
- result = pal_halt_info(vcpu);
- break;
- case PAL_HALT_LIGHT:
- {
- INIT_PAL_STATUS_SUCCESS(result);
- prepare_for_halt(vcpu);
- if (kvm_highest_pending_irq(vcpu) == -1)
- ret = kvm_emulate_halt(vcpu);
- }
- break;
-
- case PAL_PREFETCH_VISIBILITY:
- result = pal_prefetch_visibility(vcpu);
- break;
- case PAL_MC_DRAIN:
- result.status = ia64_pal_mc_drain();
- /* FIXME: All vcpus likely call PAL_MC_DRAIN.
- That causes the congestion. */
- smp_call_function(remote_pal_mc_drain, NULL, 1);
- break;
-
- case PAL_FREQ_RATIOS:
- result = pal_freq_ratios(vcpu);
- break;
-
- case PAL_FREQ_BASE:
- result = pal_freq_base(vcpu);
- break;
-
- case PAL_LOGICAL_TO_PHYSICAL :
- result = pal_logical_to_physica(vcpu);
- break;
-
- case PAL_VM_SUMMARY :
- result = pal_vm_summary(vcpu);
- break;
-
- case PAL_VM_INFO :
- result = pal_vm_info(vcpu);
- break;
- case PAL_PLATFORM_ADDR :
- result = pal_platform_addr(vcpu);
- break;
- case PAL_CACHE_INFO:
- result = pal_cache_info(vcpu);
- break;
- case PAL_PTCE_INFO:
- INIT_PAL_STATUS_SUCCESS(result);
- result.v1 = (1L << 32) | 1L;
- break;
- case PAL_REGISTER_INFO:
- result = pal_register_info(vcpu);
- break;
- case PAL_VM_PAGE_SIZE:
- result.status = ia64_pal_vm_page_size(&result.v0,
- &result.v1);
- break;
- case PAL_RSE_INFO:
- result.status = ia64_pal_rse_info(&result.v0,
- (pal_hints_u_t *)&result.v1);
- break;
- case PAL_PROC_GET_FEATURES:
- result = pal_proc_get_features(vcpu);
- break;
- case PAL_DEBUG_INFO:
- result.status = ia64_pal_debug_info(&result.v0,
- &result.v1);
- break;
- case PAL_VERSION:
- result.status = ia64_pal_version(
- (pal_version_u_t *)&result.v0,
- (pal_version_u_t *)&result.v1);
- break;
- case PAL_FIXED_ADDR:
- result.status = PAL_STATUS_SUCCESS;
- result.v0 = vcpu->vcpu_id;
- break;
- case PAL_BRAND_INFO:
- result = pal_get_brand_info(vcpu);
- break;
- case PAL_GET_PSTATE:
- case PAL_CACHE_SHARED_INFO:
- INIT_PAL_STATUS_UNIMPLEMENTED(result);
- break;
- default:
- INIT_PAL_STATUS_UNIMPLEMENTED(result);
- printk(KERN_WARNING"kvm: Unsupported pal call,"
- " index:0x%lx\n", gr28);
- }
- set_pal_result(vcpu, result);
- return ret;
-}
-
-static struct sal_ret_values sal_emulator(struct kvm *kvm,
- long index, unsigned long in1,
- unsigned long in2, unsigned long in3,
- unsigned long in4, unsigned long in5,
- unsigned long in6, unsigned long in7)
-{
- unsigned long r9 = 0;
- unsigned long r10 = 0;
- long r11 = 0;
- long status;
-
- status = 0;
- switch (index) {
- case SAL_FREQ_BASE:
- status = ia64_sal_freq_base(in1, &r9, &r10);
- break;
- case SAL_PCI_CONFIG_READ:
- printk(KERN_WARNING"kvm: Not allowed to call here!"
- " SAL_PCI_CONFIG_READ\n");
- break;
- case SAL_PCI_CONFIG_WRITE:
- printk(KERN_WARNING"kvm: Not allowed to call here!"
- " SAL_PCI_CONFIG_WRITE\n");
- break;
- case SAL_SET_VECTORS:
- if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
- if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
- status = -2;
- } else {
- kvm->arch.rdv_sal_data.boot_ip = in2;
- kvm->arch.rdv_sal_data.boot_gp = in3;
- }
- printk("Rendvous called! iip:%lx\n\n", in2);
- } else
- printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
- "ignored...\n", in1);
- break;
- case SAL_GET_STATE_INFO:
- /* No more info. */
- status = -5;
- r9 = 0;
- break;
- case SAL_GET_STATE_INFO_SIZE:
- /* Return a dummy size. */
- status = 0;
- r9 = 128;
- break;
- case SAL_CLEAR_STATE_INFO:
- /* Noop. */
- break;
- case SAL_MC_RENDEZ:
- printk(KERN_WARNING
- "kvm: called SAL_MC_RENDEZ. ignored...\n");
- break;
- case SAL_MC_SET_PARAMS:
- printk(KERN_WARNING
- "kvm: called SAL_MC_SET_PARAMS.ignored!\n");
- break;
- case SAL_CACHE_FLUSH:
- if (1) {
- /*Flush using SAL.
- This method is faster but has a side
- effect on other vcpu running on
- this cpu. */
- status = ia64_sal_cache_flush(in1);
- } else {
- /*Maybe need to implement the method
- without side effect!*/
- status = 0;
- }
- break;
- case SAL_CACHE_INIT:
- printk(KERN_WARNING
- "kvm: called SAL_CACHE_INIT. ignored...\n");
- break;
- case SAL_UPDATE_PAL:
- printk(KERN_WARNING
- "kvm: CALLED SAL_UPDATE_PAL. ignored...\n");
- break;
- default:
- printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
- " index:%ld\n", index);
- status = -1;
- break;
- }
- return ((struct sal_ret_values) {status, r9, r10, r11});
-}
-
-static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
- u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
-
- struct exit_ctl_data *p;
-
- p = kvm_get_exit_data(vcpu);
-
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- *in0 = p->u.sal_data.in0;
- *in1 = p->u.sal_data.in1;
- *in2 = p->u.sal_data.in2;
- *in3 = p->u.sal_data.in3;
- *in4 = p->u.sal_data.in4;
- *in5 = p->u.sal_data.in5;
- *in6 = p->u.sal_data.in6;
- *in7 = p->u.sal_data.in7;
- return ;
- }
- *in0 = 0;
-}
-
-void kvm_sal_emul(struct kvm_vcpu *vcpu)
-{
-
- struct sal_ret_values result;
- u64 index, in1, in2, in3, in4, in5, in6, in7;
-
- kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
- &in3, &in4, &in5, &in6, &in7);
- result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
- in4, in5, in6, in7);
- set_sal_result(vcpu, result);
-}
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c
deleted file mode 100644
index f1268b8e6f9e..000000000000
--- a/arch/ia64/kvm/kvm_lib.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * kvm_lib.c: Compile some libraries for kvm-intel module.
- *
- * Just include kernel's library, and disable symbols export.
- * Copyright (C) 2008, Intel Corporation.
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#undef CONFIG_MODULES
-#include <linux/module.h>
-#undef CONFIG_KALLSYMS
-#undef EXPORT_SYMBOL
-#undef EXPORT_SYMBOL_GPL
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#include "../../../lib/vsprintf.c"
-#include "../../../lib/ctype.c"
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
deleted file mode 100644
index b2bcaa2787aa..000000000000
--- a/arch/ia64/kvm/kvm_minstate.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * kvm_minstate.h: min save macros
- * Copyright (c) 2007, Intel Corporation.
- *
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-
-#include <asm/asmmacro.h>
-#include <asm/types.h>
-#include <asm/kregs.h>
-#include <asm/kvm_host.h>
-
-#include "asm-offsets.h"
-
-#define KVM_MINSTATE_START_SAVE_MIN \
- mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
- ;; \
- mov.m r28 = ar.rnat; \
- addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
- ;; \
- lfetch.fault.excl.nt1 [r22]; \
- addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \
- mov r23 = ar.bspstore; /* save ar.bspstore */ \
- ;; \
- mov ar.bspstore = r22; /* switch to kernel RBS */\
- ;; \
- mov r18 = ar.bsp; \
- mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
-
-
-
-#define KVM_MINSTATE_END_SAVE_MIN \
- bsw.1; /* switch back to bank 1 (must be last in insn group) */\
- ;;
-
-
-#define PAL_VSA_SYNC_READ \
- /* begin to call pal vps sync_read */ \
-{.mii; \
- add r25 = VMM_VPD_BASE_OFFSET, r21; \
- nop 0x0; \
- mov r24=ip; \
- ;; \
-} \
-{.mmb \
- add r24=0x20, r24; \
- ld8 r25 = [r25]; /* read vpd base */ \
- br.cond.sptk kvm_vps_sync_read; /*call the service*/ \
- ;; \
-}; \
-
-
-#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
-
-/*
- * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- * psr.ic: off
- * r31: contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- * psr.ic: off
- * r2 = points to &pt_regs.r16
- * r8 = contents of ar.ccv
- * r9 = contents of ar.csd
- * r10 = contents of ar.ssd
- * r11 = FPSR_DEFAULT
- * r12 = kernel sp (kernel virtual address)
- * r13 = points to current task_struct (kernel virtual address)
- * p15 = TRUE if psr.i is set in cr.ipsr
- * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
- * preserved
- *
- * Note that psr.ic is NOT turned on by this macro. This is so that
- * we can pass interruption state as arguments to a handler.
- */
-
-
-#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
-
-#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
- KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
- mov r27 = ar.rsc; /* M */ \
- mov r20 = r1; /* A */ \
- mov r25 = ar.unat; /* M */ \
- mov r29 = cr.ipsr; /* M */ \
- mov r26 = ar.pfs; /* I */ \
- mov r18 = cr.isr; \
- COVER; /* B;; (or nothing) */ \
- ;; \
- tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
- mov r1 = r16; \
-/* mov r21=r16; */ \
- /* switch from user to kernel RBS: */ \
- ;; \
- invala; /* M */ \
- SAVE_IFS; \
- ;; \
- KVM_MINSTATE_START_SAVE_MIN \
- adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
- adds r16 = PT(CR_IPSR),r1; \
- ;; \
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
- st8 [r16] = r29; /* save cr.ipsr */ \
- ;; \
- lfetch.fault.excl.nt1 [r17]; \
- tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
- mov r29 = b0 \
- ;; \
- adds r16 = PT(R8),r1; /* initialize first base pointer */\
- adds r17 = PT(R9),r1; /* initialize second base pointer */\
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r8,16; \
-.mem.offset 8,0; st8.spill [r17] = r9,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r10,24; \
-.mem.offset 8,0; st8.spill [r17] = r11,24; \
- ;; \
- mov r9 = cr.iip; /* M */ \
- mov r10 = ar.fpsr; /* M */ \
- ;; \
- st8 [r16] = r9,16; /* save cr.iip */ \
- st8 [r17] = r30,16; /* save cr.ifs */ \
- sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
- ;; \
- st8 [r16] = r25,16; /* save ar.unat */ \
- st8 [r17] = r26,16; /* save ar.pfs */ \
- shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
- ;; \
- st8 [r16] = r27,16; /* save ar.rsc */ \
- st8 [r17] = r28,16; /* save ar.rnat */ \
- ;; /* avoid RAW on r16 & r17 */ \
- st8 [r16] = r23,16; /* save ar.bspstore */ \
- st8 [r17] = r31,16; /* save predicates */ \
- ;; \
- st8 [r16] = r29,16; /* save b0 */ \
- st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
-.mem.offset 8,0; st8.spill [r17] = r12,16; \
- adds r12 = -16,r1; /* switch to kernel memory stack */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r13,16; \
-.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
- mov r13 = r21; /* establish `current' */ \
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r15,16; \
-.mem.offset 8,0; st8.spill [r17] = r14,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r16] = r2,16; \
-.mem.offset 8,0; st8.spill [r17] = r3,16; \
- adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
- ;; \
- adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
- adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
- mov r26 = cr.iipa; \
- mov r27 = cr.isr; \
- ;; \
- st8 [r16] = r26; \
- st8 [r17] = r27; \
- ;; \
- EXTRA; \
- mov r8 = ar.ccv; \
- mov r9 = ar.csd; \
- mov r10 = ar.ssd; \
- movl r11 = FPSR_DEFAULT; /* L-unit */ \
- adds r17 = VMM_VCPU_GP_OFFSET,r13; \
- ;; \
- ld8 r1 = [r17];/* establish kernel global pointer */ \
- ;; \
- PAL_VSA_SYNC_READ \
- KVM_MINSTATE_END_SAVE_MIN
-
-/*
- * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
- *
- * Assumed state upon entry:
- * psr.ic: on
- * r2: points to &pt_regs.f6
- * r3: points to &pt_regs.f7
- * r8: contents of ar.ccv
- * r9: contents of ar.csd
- * r10: contents of ar.ssd
- * r11: FPSR_DEFAULT
- *
- * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
- */
-#define KVM_SAVE_REST \
-.mem.offset 0,0; st8.spill [r2] = r16,16; \
-.mem.offset 8,0; st8.spill [r3] = r17,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r18,16; \
-.mem.offset 8,0; st8.spill [r3] = r19,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r20,16; \
-.mem.offset 8,0; st8.spill [r3] = r21,16; \
- mov r18=b6; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r22,16; \
-.mem.offset 8,0; st8.spill [r3] = r23,16; \
- mov r19 = b7; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r24,16; \
-.mem.offset 8,0; st8.spill [r3] = r25,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r26,16; \
-.mem.offset 8,0; st8.spill [r3] = r27,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r28,16; \
-.mem.offset 8,0; st8.spill [r3] = r29,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r30,16; \
-.mem.offset 8,0; st8.spill [r3] = r31,32; \
- ;; \
- mov ar.fpsr = r11; \
- st8 [r2] = r8,8; \
- adds r24 = PT(B6)-PT(F7),r3; \
- adds r25 = PT(B7)-PT(F7),r3; \
- ;; \
- st8 [r24] = r18,16; /* b6 */ \
- st8 [r25] = r19,16; /* b7 */ \
- adds r2 = PT(R4)-PT(F6),r2; \
- adds r3 = PT(R5)-PT(F7),r3; \
- ;; \
- st8 [r24] = r9; /* ar.csd */ \
- st8 [r25] = r10; /* ar.ssd */ \
- ;; \
- mov r18 = ar.unat; \
- adds r19 = PT(EML_UNAT)-PT(R4),r2; \
- ;; \
- st8 [r19] = r18; /* eml_unat */ \
-
-
-#define KVM_SAVE_EXTRA \
-.mem.offset 0,0; st8.spill [r2] = r4,16; \
-.mem.offset 8,0; st8.spill [r3] = r5,16; \
- ;; \
-.mem.offset 0,0; st8.spill [r2] = r6,16; \
-.mem.offset 8,0; st8.spill [r3] = r7; \
- ;; \
- mov r26 = ar.unat; \
- ;; \
- st8 [r2] = r26;/* eml_unat */ \
-
-#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
-#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
-#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
deleted file mode 100644
index c5f92a926a9a..000000000000
--- a/arch/ia64/kvm/lapic.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __KVM_IA64_LAPIC_H
-#define __KVM_IA64_LAPIC_H
-
-#include <linux/kvm_host.h>
-
-/*
- * vlsapic
- */
-struct kvm_lapic{
- struct kvm_vcpu *vcpu;
- uint64_t insvc[4];
- uint64_t vhpi;
- uint8_t xtp;
- uint8_t pal_init_pending;
- uint8_t pad[2];
-};
-
-int kvm_create_lapic(struct kvm_vcpu *vcpu);
-void kvm_free_lapic(struct kvm_vcpu *vcpu);
-
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode);
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
-#define kvm_apic_present(x) (true)
-#define kvm_lapic_enabled(x) (true)
-
-#endif
diff --git a/arch/ia64/kvm/memcpy.S b/arch/ia64/kvm/memcpy.S
deleted file mode 100644
index c04cdbe9f80f..000000000000
--- a/arch/ia64/kvm/memcpy.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../lib/memcpy.S"
diff --git a/arch/ia64/kvm/memset.S b/arch/ia64/kvm/memset.S
deleted file mode 100644
index 83c3066d844a..000000000000
--- a/arch/ia64/kvm/memset.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../lib/memset.S"
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
deleted file mode 100644
index dd979e00b574..000000000000
--- a/arch/ia64/kvm/misc.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef __KVM_IA64_MISC_H
-#define __KVM_IA64_MISC_H
-
-#include <linux/kvm_host.h>
-/*
- * misc.h
- * Copyright (C) 2007, Intel Corporation.
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-/*
- *Return p2m base address at host side!
- */
-static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
-{
- return (uint64_t *)(kvm->arch.vm_base +
- offsetof(struct kvm_vm_data, kvm_p2m));
-}
-
-static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
- u64 paddr, u64 mem_flags)
-{
- uint64_t *pmt_base = kvm_host_get_pmt(kvm);
- unsigned long pte;
-
- pte = PAGE_ALIGN(paddr) | mem_flags;
- pmt_base[gfn] = pte;
-}
-
-/*Function for translating host address to guest address*/
-
-static inline void *to_guest(struct kvm *kvm, void *addr)
-{
- return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
- KVM_VM_DATA_BASE);
-}
-
-/*Function for translating guest address to host address*/
-
-static inline void *to_host(struct kvm *kvm, void *addr)
-{
- return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
- + kvm->arch.vm_base);
-}
-
-/* Get host context of the vcpu */
-static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
-{
- union context *ctx = &vcpu->arch.host;
- return to_guest(vcpu->kvm, ctx);
-}
-
-/* Get guest context of the vcpu */
-static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
-{
- union context *ctx = &vcpu->arch.guest;
- return to_guest(vcpu->kvm, ctx);
-}
-
-/* kvm get exit data from gvmm! */
-static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.exit_data;
-}
-
-/*kvm get vcpu ioreq for kvm module!*/
-static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p_ctl_data;
-
- if (vcpu) {
- p_ctl_data = kvm_get_exit_data(vcpu);
- if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
- return &p_ctl_data->u.ioreq;
- }
-
- return NULL;
-}
-
-#endif
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
deleted file mode 100644
index f1e17d3d6cd9..000000000000
--- a/arch/ia64/kvm/mmio.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * mmio.c: MMIO emulation components.
- * Copyright (c) 2004, Intel Corporation.
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
- *
- * Copyright (c) 2007 Intel Corporation KVM support.
- * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/kvm_host.h>
-
-#include "vcpu.h"
-
-static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
-{
- VLSAPIC_XTP(v) = val;
-}
-
-/*
- * LSAPIC OFFSET
- */
-#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
-#define PIB_OFST_INTA 0x1E0000
-#define PIB_OFST_XTP 0x1E0008
-
-/*
- * execute write IPI op.
- */
-static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
- uint64_t addr, uint64_t data)
-{
- struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
- unsigned long psr;
-
- local_irq_save(psr);
-
- p->exit_reason = EXIT_REASON_IPI;
- p->u.ipi_data.addr.val = addr;
- p->u.ipi_data.data.val = data;
- vmm_transition(current_vcpu);
-
- local_irq_restore(psr);
-
-}
-
-void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
- unsigned long length, unsigned long val)
-{
- addr &= (PIB_SIZE - 1);
-
- switch (addr) {
- case PIB_OFST_INTA:
- panic_vm(v, "Undefined write on PIB INTA\n");
- break;
- case PIB_OFST_XTP:
- if (length == 1) {
- vlsapic_write_xtp(v, val);
- } else {
- panic_vm(v, "Undefined write on PIB XTP\n");
- }
- break;
- default:
- if (PIB_LOW_HALF(addr)) {
- /*Lower half */
- if (length != 8)
- panic_vm(v, "Can't LHF write with size %ld!\n",
- length);
- else
- vlsapic_write_ipi(v, addr, val);
- } else { /*Upper half */
- panic_vm(v, "IPI-UHF write %lx\n", addr);
- }
- break;
- }
-}
-
-unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
- unsigned long length)
-{
- uint64_t result = 0;
-
- addr &= (PIB_SIZE - 1);
-
- switch (addr) {
- case PIB_OFST_INTA:
- if (length == 1) /* 1 byte load */
- ; /* There is no i8259, there is no INTA access*/
- else
- panic_vm(v, "Undefined read on PIB INTA\n");
-
- break;
- case PIB_OFST_XTP:
- if (length == 1) {
- result = VLSAPIC_XTP(v);
- } else {
- panic_vm(v, "Undefined read on PIB XTP\n");
- }
- break;
- default:
- panic_vm(v, "Undefined addr access for lsapic!\n");
- break;
- }
- return result;
-}
-
-static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
- u16 s, int ma, int dir)
-{
- unsigned long iot;
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- unsigned long psr;
-
- iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
-
- local_irq_save(psr);
-
- /*Intercept the access for PIB range*/
- if (iot == GPFN_PIB) {
- if (!dir)
- lsapic_write(vcpu, src_pa, s, *dest);
- else
- *dest = lsapic_read(vcpu, src_pa, s);
- goto out;
- }
- p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
- p->u.ioreq.addr = src_pa;
- p->u.ioreq.size = s;
- p->u.ioreq.dir = dir;
- if (dir == IOREQ_WRITE)
- p->u.ioreq.data = *dest;
- p->u.ioreq.state = STATE_IOREQ_READY;
- vmm_transition(vcpu);
-
- if (p->u.ioreq.state == STATE_IORESP_READY) {
- if (dir == IOREQ_READ)
- /* it's necessary to ensure zero extending */
- *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
- } else
- panic_vm(vcpu, "Unhandled mmio access returned!\n");
-out:
- local_irq_restore(psr);
- return ;
-}
-
-/*
- dir 1: read 0:write
- inst_type 0:integer 1:floating point
- */
-#define SL_INTEGER 0 /* store/load interger*/
-#define SL_FLOATING 1 /* store/load floating*/
-
-void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
-{
- struct kvm_pt_regs *regs;
- IA64_BUNDLE bundle;
- int slot, dir = 0;
- int inst_type = -1;
- u16 size = 0;
- u64 data, slot1a, slot1b, temp, update_reg;
- s32 imm;
- INST64 inst;
-
- regs = vcpu_regs(vcpu);
-
- if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
- /* if fetch code fail, return and try again */
- return;
- }
- slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
- if (!slot)
- inst.inst = bundle.slot0;
- else if (slot == 1) {
- slot1a = bundle.slot1a;
- slot1b = bundle.slot1b;
- inst.inst = slot1a + (slot1b << 18);
- } else if (slot == 2)
- inst.inst = bundle.slot2;
-
- /* Integer Load/Store */
- if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
- inst_type = SL_INTEGER;
- size = (inst.M1.x6 & 0x3);
- if ((inst.M1.x6 >> 2) > 0xb) {
- /*write*/
- dir = IOREQ_WRITE;
- data = vcpu_get_gr(vcpu, inst.M4.r2);
- } else if ((inst.M1.x6 >> 2) < 0xb) {
- /*read*/
- dir = IOREQ_READ;
- }
- } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
- /* Integer Load + Reg update */
- inst_type = SL_INTEGER;
- dir = IOREQ_READ;
- size = (inst.M2.x6 & 0x3);
- temp = vcpu_get_gr(vcpu, inst.M2.r3);
- update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
- temp += update_reg;
- vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
- } else if (inst.M3.major == 5) {
- /*Integer Load/Store + Imm update*/
- inst_type = SL_INTEGER;
- size = (inst.M3.x6&0x3);
- if ((inst.M5.x6 >> 2) > 0xb) {
- /*write*/
- dir = IOREQ_WRITE;
- data = vcpu_get_gr(vcpu, inst.M5.r2);
- temp = vcpu_get_gr(vcpu, inst.M5.r3);
- imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
- (inst.M5.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
-
- } else if ((inst.M3.x6 >> 2) < 0xb) {
- /*read*/
- dir = IOREQ_READ;
- temp = vcpu_get_gr(vcpu, inst.M3.r3);
- imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
- (inst.M3.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
-
- }
- } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
- && inst.M9.m == 0 && inst.M9.x == 0) {
- /* Floating-point spill*/
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
- /* Write high word. FIXME: this is a kludge! */
- v.u.bits[1] &= 0x3ffff;
- mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8,
- ma, IOREQ_WRITE);
- data = v.u.bits[0];
- size = 3;
- } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
- /* Floating-point spill + Imm update */
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
- temp = vcpu_get_gr(vcpu, inst.M10.r3);
- imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
- (inst.M10.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
-
- /* Write high word.FIXME: this is a kludge! */
- v.u.bits[1] &= 0x3ffff;
- mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1],
- 8, ma, IOREQ_WRITE);
- data = v.u.bits[0];
- size = 3;
- } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
- /* Floating-point stf8 + Imm update */
- struct ia64_fpreg v;
- inst_type = SL_FLOATING;
- dir = IOREQ_WRITE;
- size = 3;
- vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
- data = v.u.bits[0]; /* Significand. */
- temp = vcpu_get_gr(vcpu, inst.M10.r3);
- imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
- (inst.M10.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
- } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
- && inst.M15.x6 <= 0x2f) {
- temp = vcpu_get_gr(vcpu, inst.M15.r3);
- imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
- (inst.M15.imm7 << 23);
- temp += imm >> 23;
- vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
-
- vcpu_increment_iip(vcpu);
- return;
- } else if (inst.M12.major == 6 && inst.M12.m == 1
- && inst.M12.x == 1 && inst.M12.x6 == 1) {
- /* Floating-point Load Pair + Imm ldfp8 M12*/
- struct ia64_fpreg v;
-
- inst_type = SL_FLOATING;
- dir = IOREQ_READ;
- size = 8; /*ldfd*/
- mmio_access(vcpu, padr, &data, size, ma, dir);
- v.u.bits[0] = data;
- v.u.bits[1] = 0x1003E;
- vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
- padr += 8;
- mmio_access(vcpu, padr, &data, size, ma, dir);
- v.u.bits[0] = data;
- v.u.bits[1] = 0x1003E;
- vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
- padr += 8;
- vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
- vcpu_increment_iip(vcpu);
- return;
- } else {
- inst_type = -1;
- panic_vm(vcpu, "Unsupported MMIO access instruction! "
- "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
- bundle.i64[0], bundle.i64[1]);
- }
-
- size = 1 << size;
- if (dir == IOREQ_WRITE) {
- mmio_access(vcpu, padr, &data, size, ma, dir);
- } else {
- mmio_access(vcpu, padr, &data, size, ma, dir);
- if (inst_type == SL_INTEGER)
- vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
- else
- panic_vm(vcpu, "Unsupported instruction type!\n");
-
- }
- vcpu_increment_iip(vcpu);
-}
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
deleted file mode 100644
index f793be3effff..000000000000
--- a/arch/ia64/kvm/optvfault.S
+++ /dev/null
@@ -1,1090 +0,0 @@
-/*
- * arch/ia64/kvm/optvfault.S
- * optimize virtualization fault handler
- *
- * Copyright (C) 2006 Intel Co
- * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- * Copyright (C) 2008 Intel Co
- * Add the support for Tukwila processors.
- * Xiantao Zhang <xiantao.zhang@intel.com>
- */
-
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-#include <asm/kvm_host.h>
-
-#include "vti.h"
-#include "asm-offsets.h"
-
-#define ACCE_MOV_FROM_AR
-#define ACCE_MOV_FROM_RR
-#define ACCE_MOV_TO_RR
-#define ACCE_RSM
-#define ACCE_SSM
-#define ACCE_MOV_TO_PSR
-#define ACCE_THASH
-
-#define VMX_VPS_SYNC_READ \
- add r16=VMM_VPD_BASE_OFFSET,r21; \
- mov r17 = b0; \
- mov r18 = r24; \
- mov r19 = r25; \
- mov r20 = r31; \
- ;; \
-{.mii; \
- ld8 r16 = [r16]; \
- nop 0x0; \
- mov r24 = ip; \
- ;; \
-}; \
-{.mmb; \
- add r24=0x20, r24; \
- mov r25 =r16; \
- br.sptk.many kvm_vps_sync_read; \
-}; \
- mov b0 = r17; \
- mov r24 = r18; \
- mov r25 = r19; \
- mov r31 = r20
-
-ENTRY(kvm_vps_entry)
- adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
- ;;
- ld8 r29 = [r29]
- ;;
- add r29 = r29, r30
- ;;
- mov b0 = r29
- br.sptk.many b0
-END(kvm_vps_entry)
-
-/*
- * Inputs:
- * r24 : return address
- * r25 : vpd
- * r29 : scratch
- *
- */
-GLOBAL_ENTRY(kvm_vps_sync_read)
- movl r30 = PAL_VPS_SYNC_READ
- ;;
- br.sptk.many kvm_vps_entry
-END(kvm_vps_sync_read)
-
-/*
- * Inputs:
- * r24 : return address
- * r25 : vpd
- * r29 : scratch
- *
- */
-GLOBAL_ENTRY(kvm_vps_sync_write)
- movl r30 = PAL_VPS_SYNC_WRITE
- ;;
- br.sptk.many kvm_vps_entry
-END(kvm_vps_sync_write)
-
-/*
- * Inputs:
- * r23 : pr
- * r24 : guest b0
- * r25 : vpd
- *
- */
-GLOBAL_ENTRY(kvm_vps_resume_normal)
- movl r30 = PAL_VPS_RESUME_NORMAL
- ;;
- mov pr=r23,-2
- br.sptk.many kvm_vps_entry
-END(kvm_vps_resume_normal)
-
-/*
- * Inputs:
- * r23 : pr
- * r24 : guest b0
- * r25 : vpd
- * r17 : isr
- */
-GLOBAL_ENTRY(kvm_vps_resume_handler)
- movl r30 = PAL_VPS_RESUME_HANDLER
- ;;
- ld8 r26=[r25]
- shr r17=r17,IA64_ISR_IR_BIT
- ;;
- dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
- mov pr=r23,-2
- br.sptk.many kvm_vps_entry
-END(kvm_vps_resume_handler)
-
-//mov r1=ar3
-GLOBAL_ENTRY(kvm_asm_mov_from_ar)
-#ifndef ACCE_MOV_FROM_AR
- br.many kvm_virtualization_fault_back
-#endif
- add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
- add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
- extr.u r17=r25,6,7
- ;;
- ld8 r18=[r18]
- mov r19=ar.itc
- mov r24=b0
- ;;
- add r19=r19,r18
- addl r20=@gprel(asm_mov_to_reg),gp
- ;;
- st8 [r16] = r19
- adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
- shladd r17=r17,4,r20
- ;;
- mov b0=r17
- br.sptk.few b0
- ;;
-END(kvm_asm_mov_from_ar)
-
-/*
- * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
- * clock as it's source for emulating the ITC. This version will be
- * copied on top of the original version if the host is determined to
- * be an SN2.
- */
-GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
- add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
- movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
-
- add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
- extr.u r17=r25,6,7
- mov r24=b0
- ;;
- ld8 r18=[r18]
- ld8 r19=[r19]
- addl r20=@gprel(asm_mov_to_reg),gp
- ;;
- add r19=r19,r18
- shladd r17=r17,4,r20
- ;;
- adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
- st8 [r16] = r19
- mov b0=r17
- br.sptk.few b0
- ;;
-END(kvm_asm_mov_from_ar_sn2)
-
-
-
-// mov r1=rr[r3]
-GLOBAL_ENTRY(kvm_asm_mov_from_rr)
-#ifndef ACCE_MOV_FROM_RR
- br.many kvm_virtualization_fault_back
-#endif
- extr.u r16=r25,20,7
- extr.u r17=r25,6,7
- addl r20=@gprel(asm_mov_from_reg),gp
- ;;
- adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
- shladd r16=r16,4,r20
- mov r24=b0
- ;;
- add r27=VMM_VCPU_VRR0_OFFSET,r21
- mov b0=r16
- br.many b0
- ;;
-kvm_asm_mov_from_rr_back_1:
- adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
- adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
- shr.u r26=r19,61
- ;;
- shladd r17=r17,4,r22
- shladd r27=r26,3,r27
- ;;
- ld8 r19=[r27]
- mov b0=r17
- br.many b0
-END(kvm_asm_mov_from_rr)
-
-
-// mov rr[r3]=r2
-GLOBAL_ENTRY(kvm_asm_mov_to_rr)
-#ifndef ACCE_MOV_TO_RR
- br.many kvm_virtualization_fault_back
-#endif
- extr.u r16=r25,20,7
- extr.u r17=r25,13,7
- addl r20=@gprel(asm_mov_from_reg),gp
- ;;
- adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
- shladd r16=r16,4,r20
- mov r22=b0
- ;;
- add r27=VMM_VCPU_VRR0_OFFSET,r21
- mov b0=r16
- br.many b0
- ;;
-kvm_asm_mov_to_rr_back_1:
- adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
- shr.u r23=r19,61
- shladd r17=r17,4,r20
- ;;
- //if rr6, go back
- cmp.eq p6,p0=6,r23
- mov b0=r22
- (p6) br.cond.dpnt.many kvm_virtualization_fault_back
- ;;
- mov r28=r19
- mov b0=r17
- br.many b0
-kvm_asm_mov_to_rr_back_2:
- adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
- shladd r27=r23,3,r27
- ;; // vrr.rid<<4 |0xe
- st8 [r27]=r19
- mov b0=r30
- ;;
- extr.u r16=r19,8,26
- extr.u r18 =r19,2,6
- mov r17 =0xe
- ;;
- shladd r16 = r16, 4, r17
- extr.u r19 =r19,0,8
- ;;
- shl r16 = r16,8
- ;;
- add r19 = r19, r16
- ;; //set ve 1
- dep r19=-1,r19,0,1
- cmp.lt p6,p0=14,r18
- ;;
- (p6) mov r18=14
- ;;
- (p6) dep r19=r18,r19,2,6
- ;;
- cmp.eq p6,p0=0,r23
- ;;
- cmp.eq.or p6,p0=4,r23
- ;;
- adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
- (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
- ;;
- ld4 r16=[r16]
- cmp.eq p7,p0=r0,r0
- (p6) shladd r17=r23,1,r17
- ;;
- (p6) st8 [r17]=r19
- (p6) tbit.nz p6,p7=r16,0
- ;;
- (p7) mov rr[r28]=r19
- mov r24=r22
- br.many b0
-END(kvm_asm_mov_to_rr)
-
-
-//rsm
-GLOBAL_ENTRY(kvm_asm_rsm)
-#ifndef ACCE_RSM
- br.many kvm_virtualization_fault_back
-#endif
- VMX_VPS_SYNC_READ
- ;;
- extr.u r26=r25,6,21
- extr.u r27=r25,31,2
- ;;
- extr.u r28=r25,36,1
- dep r26=r27,r26,21,2
- ;;
- add r17=VPD_VPSR_START_OFFSET,r16
- add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
- //r26 is imm24
- dep r26=r28,r26,23,1
- ;;
- ld8 r18=[r17]
- movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
- ld4 r23=[r22]
- sub r27=-1,r26
- mov r24=b0
- ;;
- mov r20=cr.ipsr
- or r28=r27,r28
- and r19=r18,r27
- ;;
- st8 [r17]=r19
- and r20=r20,r28
- /* Comment it out due to short of fp lazy alorgithm support
- adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
- ;;
- ld8 r27=[r27]
- ;;
- tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
- ;;
- (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
- */
- ;;
- mov cr.ipsr=r20
- tbit.nz p6,p0=r23,0
- ;;
- tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
- (p6) br.dptk kvm_resume_to_guest_with_sync
- ;;
- add r26=VMM_VCPU_META_RR0_OFFSET,r21
- add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
- dep r23=-1,r23,0,1
- ;;
- ld8 r26=[r26]
- ld8 r27=[r27]
- st4 [r22]=r23
- dep.z r28=4,61,3
- ;;
- mov rr[r0]=r26
- ;;
- mov rr[r28]=r27
- ;;
- srlz.d
- br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_rsm)
-
-
-//ssm
-GLOBAL_ENTRY(kvm_asm_ssm)
-#ifndef ACCE_SSM
- br.many kvm_virtualization_fault_back
-#endif
- VMX_VPS_SYNC_READ
- ;;
- extr.u r26=r25,6,21
- extr.u r27=r25,31,2
- ;;
- extr.u r28=r25,36,1
- dep r26=r27,r26,21,2
- ;; //r26 is imm24
- add r27=VPD_VPSR_START_OFFSET,r16
- dep r26=r28,r26,23,1
- ;; //r19 vpsr
- ld8 r29=[r27]
- mov r24=b0
- ;;
- add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
- mov r20=cr.ipsr
- or r19=r29,r26
- ;;
- ld4 r23=[r22]
- st8 [r27]=r19
- or r20=r20,r26
- ;;
- mov cr.ipsr=r20
- movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
- ;;
- and r19=r28,r19
- tbit.z p6,p0=r23,0
- ;;
- cmp.ne.or p6,p0=r28,r19
- (p6) br.dptk kvm_asm_ssm_1
- ;;
- add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
- add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
- dep r23=0,r23,0,1
- ;;
- ld8 r26=[r26]
- ld8 r27=[r27]
- st4 [r22]=r23
- dep.z r28=4,61,3
- ;;
- mov rr[r0]=r26
- ;;
- mov rr[r28]=r27
- ;;
- srlz.d
- ;;
-kvm_asm_ssm_1:
- tbit.nz p6,p0=r29,IA64_PSR_I_BIT
- ;;
- tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
- (p6) br.dptk kvm_resume_to_guest_with_sync
- ;;
- add r29=VPD_VTPR_START_OFFSET,r16
- add r30=VPD_VHPI_START_OFFSET,r16
- ;;
- ld8 r29=[r29]
- ld8 r30=[r30]
- ;;
- extr.u r17=r29,4,4
- extr.u r18=r29,16,1
- ;;
- dep r17=r18,r17,4,1
- ;;
- cmp.gt p6,p0=r30,r17
- (p6) br.dpnt.few kvm_asm_dispatch_vexirq
- br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_ssm)
-
-
-//mov psr.l=r2
-GLOBAL_ENTRY(kvm_asm_mov_to_psr)
-#ifndef ACCE_MOV_TO_PSR
- br.many kvm_virtualization_fault_back
-#endif
- VMX_VPS_SYNC_READ
- ;;
- extr.u r26=r25,13,7 //r2
- addl r20=@gprel(asm_mov_from_reg),gp
- ;;
- adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
- shladd r26=r26,4,r20
- mov r24=b0
- ;;
- add r27=VPD_VPSR_START_OFFSET,r16
- mov b0=r26
- br.many b0
- ;;
-kvm_asm_mov_to_psr_back:
- ld8 r17=[r27]
- add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
- dep r19=0,r19,32,32
- ;;
- ld4 r23=[r22]
- dep r18=0,r17,0,32
- ;;
- add r30=r18,r19
- movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
- ;;
- st8 [r27]=r30
- and r27=r28,r30
- and r29=r28,r17
- ;;
- cmp.eq p5,p0=r29,r27
- cmp.eq p6,p7=r28,r27
- (p5) br.many kvm_asm_mov_to_psr_1
- ;;
- //virtual to physical
- (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
- (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
- (p7) dep r23=-1,r23,0,1
- ;;
- //physical to virtual
- (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
- (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
- (p6) dep r23=0,r23,0,1
- ;;
- ld8 r26=[r26]
- ld8 r27=[r27]
- st4 [r22]=r23
- dep.z r28=4,61,3
- ;;
- mov rr[r0]=r26
- ;;
- mov rr[r28]=r27
- ;;
- srlz.d
- ;;
-kvm_asm_mov_to_psr_1:
- mov r20=cr.ipsr
- movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
- ;;
- or r19=r19,r28
- dep r20=0,r20,0,32
- ;;
- add r20=r19,r20
- mov b0=r24
- ;;
- /* Comment it out due to short of fp lazy algorithm support
- adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
- ;;
- ld8 r27=[r27]
- ;;
- tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
- ;;
- (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
- ;;
- */
- mov cr.ipsr=r20
- cmp.ne p6,p0=r0,r0
- ;;
- tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
- tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
- (p6) br.dpnt.few kvm_resume_to_guest_with_sync
- ;;
- add r29=VPD_VTPR_START_OFFSET,r16
- add r30=VPD_VHPI_START_OFFSET,r16
- ;;
- ld8 r29=[r29]
- ld8 r30=[r30]
- ;;
- extr.u r17=r29,4,4
- extr.u r18=r29,16,1
- ;;
- dep r17=r18,r17,4,1
- ;;
- cmp.gt p6,p0=r30,r17
- (p6) br.dpnt.few kvm_asm_dispatch_vexirq
- br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_mov_to_psr)
-
-
-ENTRY(kvm_asm_dispatch_vexirq)
-//increment iip
- mov r17 = b0
- mov r18 = r31
-{.mii
- add r25=VMM_VPD_BASE_OFFSET,r21
- nop 0x0
- mov r24 = ip
- ;;
-}
-{.mmb
- add r24 = 0x20, r24
- ld8 r25 = [r25]
- br.sptk.many kvm_vps_sync_write
-}
- mov b0 =r17
- mov r16=cr.ipsr
- mov r31 = r18
- mov r19 = 37
- ;;
- extr.u r17=r16,IA64_PSR_RI_BIT,2
- tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
- ;;
- (p6) mov r18=cr.iip
- (p6) mov r17=r0
- (p7) add r17=1,r17
- ;;
- (p6) add r18=0x10,r18
- dep r16=r17,r16,IA64_PSR_RI_BIT,2
- ;;
- (p6) mov cr.iip=r18
- mov cr.ipsr=r16
- mov r30 =1
- br.many kvm_dispatch_vexirq
-END(kvm_asm_dispatch_vexirq)
-
-// thash
-// TODO: add support when pta.vf = 1
-GLOBAL_ENTRY(kvm_asm_thash)
-#ifndef ACCE_THASH
- br.many kvm_virtualization_fault_back
-#endif
- extr.u r17=r25,20,7 // get r3 from opcode in r25
- extr.u r18=r25,6,7 // get r1 from opcode in r25
- addl r20=@gprel(asm_mov_from_reg),gp
- ;;
- adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
- shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
- adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
- ;;
- mov r24=b0
- ;;
- ld8 r16=[r16] // get VPD addr
- mov b0=r17
- br.many b0 // r19 return value
- ;;
-kvm_asm_thash_back1:
- shr.u r23=r19,61 // get RR number
- adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
- adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
- ;;
- shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
- ld8 r17=[r16] // get PTA
- mov r26=1
- ;;
- extr.u r29=r17,2,6 // get pta.size
- ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
- ;;
- mov b0=r24
- //Fallback to C if pta.vf is set
- tbit.nz p6,p0=r17, 8
- ;;
- (p6) mov r24=EVENT_THASH
- (p6) br.cond.dpnt.many kvm_virtualization_fault_back
- extr.u r28=r28,2,6 // get rr.ps
- shl r22=r26,r29 // 1UL << pta.size
- ;;
- shr.u r23=r19,r28 // vaddr >> rr.ps
- adds r26=3,r29 // pta.size + 3
- shl r27=r17,3 // pta << 3
- ;;
- shl r23=r23,3 // (vaddr >> rr.ps) << 3
- shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
- movl r16=7<<61
- ;;
- adds r22=-1,r22 // (1UL << pta.size) - 1
- shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
- and r19=r19,r16 // vaddr & VRN_MASK
- ;;
- and r22=r22,r23 // vhpt_offset
- or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
- adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
- ;;
- or r19=r19,r22 // calc pval
- shladd r17=r18,4,r26
- adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
- ;;
- mov b0=r17
- br.many b0
-END(kvm_asm_thash)
-
-#define MOV_TO_REG0 \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- nop.b 0x0; \
- ;; \
-};
-
-
-#define MOV_TO_REG(n) \
-{; \
- mov r##n##=r19; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-};
-
-
-#define MOV_FROM_REG(n) \
-{; \
- mov r19=r##n##; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-};
-
-
-#define MOV_TO_BANK0_REG(n) \
-ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
-{; \
- mov r26=r2; \
- mov r2=r19; \
- bsw.1; \
- ;; \
-}; \
-{; \
- mov r##n##=r2; \
- nop.b 0x0; \
- bsw.0; \
- ;; \
-}; \
-{; \
- mov r2=r26; \
- mov b0=r30; \
- br.sptk.many b0; \
- ;; \
-}; \
-END(asm_mov_to_bank0_reg##n##)
-
-
-#define MOV_FROM_BANK0_REG(n) \
-ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
-{; \
- mov r26=r2; \
- nop.b 0x0; \
- bsw.1; \
- ;; \
-}; \
-{; \
- mov r2=r##n##; \
- nop.b 0x0; \
- bsw.0; \
- ;; \
-}; \
-{; \
- mov r19=r2; \
- mov r2=r26; \
- mov b0=r30; \
-}; \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many b0; \
- ;; \
-}; \
-END(asm_mov_from_bank0_reg##n##)
-
-
-#define JMP_TO_MOV_TO_BANK0_REG(n) \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many asm_mov_to_bank0_reg##n##; \
- ;; \
-}
-
-
-#define JMP_TO_MOV_FROM_BANK0_REG(n) \
-{; \
- nop.b 0x0; \
- nop.b 0x0; \
- br.sptk.many asm_mov_from_bank0_reg##n##; \
- ;; \
-}
-
-
-MOV_FROM_BANK0_REG(16)
-MOV_FROM_BANK0_REG(17)
-MOV_FROM_BANK0_REG(18)
-MOV_FROM_BANK0_REG(19)
-MOV_FROM_BANK0_REG(20)
-MOV_FROM_BANK0_REG(21)
-MOV_FROM_BANK0_REG(22)
-MOV_FROM_BANK0_REG(23)
-MOV_FROM_BANK0_REG(24)
-MOV_FROM_BANK0_REG(25)
-MOV_FROM_BANK0_REG(26)
-MOV_FROM_BANK0_REG(27)
-MOV_FROM_BANK0_REG(28)
-MOV_FROM_BANK0_REG(29)
-MOV_FROM_BANK0_REG(30)
-MOV_FROM_BANK0_REG(31)
-
-
-// mov from reg table
-ENTRY(asm_mov_from_reg)
- MOV_FROM_REG(0)
- MOV_FROM_REG(1)
- MOV_FROM_REG(2)
- MOV_FROM_REG(3)
- MOV_FROM_REG(4)
- MOV_FROM_REG(5)
- MOV_FROM_REG(6)
- MOV_FROM_REG(7)
- MOV_FROM_REG(8)
- MOV_FROM_REG(9)
- MOV_FROM_REG(10)
- MOV_FROM_REG(11)
- MOV_FROM_REG(12)
- MOV_FROM_REG(13)
- MOV_FROM_REG(14)
- MOV_FROM_REG(15)
- JMP_TO_MOV_FROM_BANK0_REG(16)
- JMP_TO_MOV_FROM_BANK0_REG(17)
- JMP_TO_MOV_FROM_BANK0_REG(18)
- JMP_TO_MOV_FROM_BANK0_REG(19)
- JMP_TO_MOV_FROM_BANK0_REG(20)
- JMP_TO_MOV_FROM_BANK0_REG(21)
- JMP_TO_MOV_FROM_BANK0_REG(22)
- JMP_TO_MOV_FROM_BANK0_REG(23)
- JMP_TO_MOV_FROM_BANK0_REG(24)
- JMP_TO_MOV_FROM_BANK0_REG(25)
- JMP_TO_MOV_FROM_BANK0_REG(26)
- JMP_TO_MOV_FROM_BANK0_REG(27)
- JMP_TO_MOV_FROM_BANK0_REG(28)
- JMP_TO_MOV_FROM_BANK0_REG(29)
- JMP_TO_MOV_FROM_BANK0_REG(30)
- JMP_TO_MOV_FROM_BANK0_REG(31)
- MOV_FROM_REG(32)
- MOV_FROM_REG(33)
- MOV_FROM_REG(34)
- MOV_FROM_REG(35)
- MOV_FROM_REG(36)
- MOV_FROM_REG(37)
- MOV_FROM_REG(38)
- MOV_FROM_REG(39)
- MOV_FROM_REG(40)
- MOV_FROM_REG(41)
- MOV_FROM_REG(42)
- MOV_FROM_REG(43)
- MOV_FROM_REG(44)
- MOV_FROM_REG(45)
- MOV_FROM_REG(46)
- MOV_FROM_REG(47)
- MOV_FROM_REG(48)
- MOV_FROM_REG(49)
- MOV_FROM_REG(50)
- MOV_FROM_REG(51)
- MOV_FROM_REG(52)
- MOV_FROM_REG(53)
- MOV_FROM_REG(54)
- MOV_FROM_REG(55)
- MOV_FROM_REG(56)
- MOV_FROM_REG(57)
- MOV_FROM_REG(58)
- MOV_FROM_REG(59)
- MOV_FROM_REG(60)
- MOV_FROM_REG(61)
- MOV_FROM_REG(62)
- MOV_FROM_REG(63)
- MOV_FROM_REG(64)
- MOV_FROM_REG(65)
- MOV_FROM_REG(66)
- MOV_FROM_REG(67)
- MOV_FROM_REG(68)
- MOV_FROM_REG(69)
- MOV_FROM_REG(70)
- MOV_FROM_REG(71)
- MOV_FROM_REG(72)
- MOV_FROM_REG(73)
- MOV_FROM_REG(74)
- MOV_FROM_REG(75)
- MOV_FROM_REG(76)
- MOV_FROM_REG(77)
- MOV_FROM_REG(78)
- MOV_FROM_REG(79)
- MOV_FROM_REG(80)
- MOV_FROM_REG(81)
- MOV_FROM_REG(82)
- MOV_FROM_REG(83)
- MOV_FROM_REG(84)
- MOV_FROM_REG(85)
- MOV_FROM_REG(86)
- MOV_FROM_REG(87)
- MOV_FROM_REG(88)
- MOV_FROM_REG(89)
- MOV_FROM_REG(90)
- MOV_FROM_REG(91)
- MOV_FROM_REG(92)
- MOV_FROM_REG(93)
- MOV_FROM_REG(94)
- MOV_FROM_REG(95)
- MOV_FROM_REG(96)
- MOV_FROM_REG(97)
- MOV_FROM_REG(98)
- MOV_FROM_REG(99)
- MOV_FROM_REG(100)
- MOV_FROM_REG(101)
- MOV_FROM_REG(102)
- MOV_FROM_REG(103)
- MOV_FROM_REG(104)
- MOV_FROM_REG(105)
- MOV_FROM_REG(106)
- MOV_FROM_REG(107)
- MOV_FROM_REG(108)
- MOV_FROM_REG(109)
- MOV_FROM_REG(110)
- MOV_FROM_REG(111)
- MOV_FROM_REG(112)
- MOV_FROM_REG(113)
- MOV_FROM_REG(114)
- MOV_FROM_REG(115)
- MOV_FROM_REG(116)
- MOV_FROM_REG(117)
- MOV_FROM_REG(118)
- MOV_FROM_REG(119)
- MOV_FROM_REG(120)
- MOV_FROM_REG(121)
- MOV_FROM_REG(122)
- MOV_FROM_REG(123)
- MOV_FROM_REG(124)
- MOV_FROM_REG(125)
- MOV_FROM_REG(126)
- MOV_FROM_REG(127)
-END(asm_mov_from_reg)
-
-
-/* must be in bank 0
- * parameter:
- * r31: pr
- * r24: b0
- */
-ENTRY(kvm_resume_to_guest_with_sync)
- adds r19=VMM_VPD_BASE_OFFSET,r21
- mov r16 = r31
- mov r17 = r24
- ;;
-{.mii
- ld8 r25 =[r19]
- nop 0x0
- mov r24 = ip
- ;;
-}
-{.mmb
- add r24 =0x20, r24
- nop 0x0
- br.sptk.many kvm_vps_sync_write
-}
-
- mov r31 = r16
- mov r24 =r17
- ;;
- br.sptk.many kvm_resume_to_guest
-END(kvm_resume_to_guest_with_sync)
-
-ENTRY(kvm_resume_to_guest)
- adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
- ;;
- ld8 r1 =[r16]
- adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
- ;;
- mov r16=cr.ipsr
- ;;
- ld8 r20 = [r20]
- adds r19=VMM_VPD_BASE_OFFSET,r21
- ;;
- ld8 r25=[r19]
- extr.u r17=r16,IA64_PSR_RI_BIT,2
- tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
- ;;
- (p6) mov r18=cr.iip
- (p6) mov r17=r0
- ;;
- (p6) add r18=0x10,r18
- (p7) add r17=1,r17
- ;;
- (p6) mov cr.iip=r18
- dep r16=r17,r16,IA64_PSR_RI_BIT,2
- ;;
- mov cr.ipsr=r16
- adds r19= VPD_VPSR_START_OFFSET,r25
- add r28=PAL_VPS_RESUME_NORMAL,r20
- add r29=PAL_VPS_RESUME_HANDLER,r20
- ;;
- ld8 r19=[r19]
- mov b0=r29
- mov r27=cr.isr
- ;;
- tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic
- shr r27=r27,IA64_ISR_IR_BIT
- ;;
- (p6) ld8 r26=[r25]
- (p7) mov b0=r28
- ;;
- (p6) dep r26=r27,r26,63,1
- mov pr=r31,-2
- br.sptk.many b0 // call pal service
- ;;
-END(kvm_resume_to_guest)
-
-
-MOV_TO_BANK0_REG(16)
-MOV_TO_BANK0_REG(17)
-MOV_TO_BANK0_REG(18)
-MOV_TO_BANK0_REG(19)
-MOV_TO_BANK0_REG(20)
-MOV_TO_BANK0_REG(21)
-MOV_TO_BANK0_REG(22)
-MOV_TO_BANK0_REG(23)
-MOV_TO_BANK0_REG(24)
-MOV_TO_BANK0_REG(25)
-MOV_TO_BANK0_REG(26)
-MOV_TO_BANK0_REG(27)
-MOV_TO_BANK0_REG(28)
-MOV_TO_BANK0_REG(29)
-MOV_TO_BANK0_REG(30)
-MOV_TO_BANK0_REG(31)
-
-
-// mov to reg table
-ENTRY(asm_mov_to_reg)
- MOV_TO_REG0
- MOV_TO_REG(1)
- MOV_TO_REG(2)
- MOV_TO_REG(3)
- MOV_TO_REG(4)
- MOV_TO_REG(5)
- MOV_TO_REG(6)
- MOV_TO_REG(7)
- MOV_TO_REG(8)
- MOV_TO_REG(9)
- MOV_TO_REG(10)
- MOV_TO_REG(11)
- MOV_TO_REG(12)
- MOV_TO_REG(13)
- MOV_TO_REG(14)
- MOV_TO_REG(15)
- JMP_TO_MOV_TO_BANK0_REG(16)
- JMP_TO_MOV_TO_BANK0_REG(17)
- JMP_TO_MOV_TO_BANK0_REG(18)
- JMP_TO_MOV_TO_BANK0_REG(19)
- JMP_TO_MOV_TO_BANK0_REG(20)
- JMP_TO_MOV_TO_BANK0_REG(21)
- JMP_TO_MOV_TO_BANK0_REG(22)
- JMP_TO_MOV_TO_BANK0_REG(23)
- JMP_TO_MOV_TO_BANK0_REG(24)
- JMP_TO_MOV_TO_BANK0_REG(25)
- JMP_TO_MOV_TO_BANK0_REG(26)
- JMP_TO_MOV_TO_BANK0_REG(27)
- JMP_TO_MOV_TO_BANK0_REG(28)
- JMP_TO_MOV_TO_BANK0_REG(29)
- JMP_TO_MOV_TO_BANK0_REG(30)
- JMP_TO_MOV_TO_BANK0_REG(31)
- MOV_TO_REG(32)
- MOV_TO_REG(33)
- MOV_TO_REG(34)
- MOV_TO_REG(35)
- MOV_TO_REG(36)
- MOV_TO_REG(37)
- MOV_TO_REG(38)
- MOV_TO_REG(39)
- MOV_TO_REG(40)
- MOV_TO_REG(41)
- MOV_TO_REG(42)
- MOV_TO_REG(43)
- MOV_TO_REG(44)
- MOV_TO_REG(45)
- MOV_TO_REG(46)
- MOV_TO_REG(47)
- MOV_TO_REG(48)
- MOV_TO_REG(49)
- MOV_TO_REG(50)
- MOV_TO_REG(51)
- MOV_TO_REG(52)
- MOV_TO_REG(53)
- MOV_TO_REG(54)
- MOV_TO_REG(55)
- MOV_TO_REG(56)
- MOV_TO_REG(57)
- MOV_TO_REG(58)
- MOV_TO_REG(59)
- MOV_TO_REG(60)
- MOV_TO_REG(61)
- MOV_TO_REG(62)
- MOV_TO_REG(63)
- MOV_TO_REG(64)
- MOV_TO_REG(65)
- MOV_TO_REG(66)
- MOV_TO_REG(67)
- MOV_TO_REG(68)
- MOV_TO_REG(69)
- MOV_TO_REG(70)
- MOV_TO_REG(71)
- MOV_TO_REG(72)
- MOV_TO_REG(73)
- MOV_TO_REG(74)
- MOV_TO_REG(75)
- MOV_TO_REG(76)
- MOV_TO_REG(77)
- MOV_TO_REG(78)
- MOV_TO_REG(79)
- MOV_TO_REG(80)
- MOV_TO_REG(81)
- MOV_TO_REG(82)
- MOV_TO_REG(83)
- MOV_TO_REG(84)
- MOV_TO_REG(85)
- MOV_TO_REG(86)
- MOV_TO_REG(87)
- MOV_TO_REG(88)
- MOV_TO_REG(89)
- MOV_TO_REG(90)
- MOV_TO_REG(91)
- MOV_TO_REG(92)
- MOV_TO_REG(93)
- MOV_TO_REG(94)
- MOV_TO_REG(95)
- MOV_TO_REG(96)
- MOV_TO_REG(97)
- MOV_TO_REG(98)
- MOV_TO_REG(99)
- MOV_TO_REG(100)
- MOV_TO_REG(101)
- MOV_TO_REG(102)
- MOV_TO_REG(103)
- MOV_TO_REG(104)
- MOV_TO_REG(105)
- MOV_TO_REG(106)
- MOV_TO_REG(107)
- MOV_TO_REG(108)
- MOV_TO_REG(109)
- MOV_TO_REG(110)
- MOV_TO_REG(111)
- MOV_TO_REG(112)
- MOV_TO_REG(113)
- MOV_TO_REG(114)
- MOV_TO_REG(115)
- MOV_TO_REG(116)
- MOV_TO_REG(117)
- MOV_TO_REG(118)
- MOV_TO_REG(119)
- MOV_TO_REG(120)
- MOV_TO_REG(121)
- MOV_TO_REG(122)
- MOV_TO_REG(123)
- MOV_TO_REG(124)
- MOV_TO_REG(125)
- MOV_TO_REG(126)
- MOV_TO_REG(127)
-END(asm_mov_to_reg)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
deleted file mode 100644
index b0398740b48d..000000000000
--- a/arch/ia64/kvm/process.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * process.c: handle interruption inject for guests.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Shaofan Li (Susue Li) <susie.li@intel.com>
- * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Xiantao Zhang (xiantao.zhang@intel.com)
- */
-#include "vcpu.h"
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/fpswa.h>
-#include <asm/kregs.h>
-#include <asm/tlb.h>
-
-fpswa_interface_t *vmm_fpswa_interface;
-
-#define IA64_VHPT_TRANS_VECTOR 0x0000
-#define IA64_INST_TLB_VECTOR 0x0400
-#define IA64_DATA_TLB_VECTOR 0x0800
-#define IA64_ALT_INST_TLB_VECTOR 0x0c00
-#define IA64_ALT_DATA_TLB_VECTOR 0x1000
-#define IA64_DATA_NESTED_TLB_VECTOR 0x1400
-#define IA64_INST_KEY_MISS_VECTOR 0x1800
-#define IA64_DATA_KEY_MISS_VECTOR 0x1c00
-#define IA64_DIRTY_BIT_VECTOR 0x2000
-#define IA64_INST_ACCESS_BIT_VECTOR 0x2400
-#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
-#define IA64_BREAK_VECTOR 0x2c00
-#define IA64_EXTINT_VECTOR 0x3000
-#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
-#define IA64_KEY_PERMISSION_VECTOR 0x5100
-#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
-#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
-#define IA64_GENEX_VECTOR 0x5400
-#define IA64_DISABLED_FPREG_VECTOR 0x5500
-#define IA64_NAT_CONSUMPTION_VECTOR 0x5600
-#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */
-#define IA64_DEBUG_VECTOR 0x5900
-#define IA64_UNALIGNED_REF_VECTOR 0x5a00
-#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
-#define IA64_FP_FAULT_VECTOR 0x5c00
-#define IA64_FP_TRAP_VECTOR 0x5d00
-#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
-#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
-#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
-
-/* SDM vol2 5.5 - IVA based interruption handling */
-#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
- IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \
- IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
-
-#define DOMN_PAL_REQUEST 0x110000
-#define DOMN_SAL_REQUEST 0x110001
-
-static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
- 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
- 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
- 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
- 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
- 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
- 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
- 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
-};
-
-static void collect_interruption(struct kvm_vcpu *vcpu)
-{
- u64 ipsr;
- u64 vdcr;
- u64 vifs;
- unsigned long vpsr;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- vpsr = vcpu_get_psr(vcpu);
- vcpu_bsw0(vcpu);
- if (vpsr & IA64_PSR_IC) {
-
- /* Sync mpsr id/da/dd/ss/ed bits to vipsr
- * since after guest do rfi, we still want these bits on in
- * mpsr
- */
-
- ipsr = regs->cr_ipsr;
- vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
- | IA64_PSR_DD | IA64_PSR_SS
- | IA64_PSR_ED));
- vcpu_set_ipsr(vcpu, vpsr);
-
- /* Currently, for trap, we do not advance IIP to next
- * instruction. That's because we assume caller already
- * set up IIP correctly
- */
-
- vcpu_set_iip(vcpu , regs->cr_iip);
-
- /* set vifs.v to zero */
- vifs = VCPU(vcpu, ifs);
- vifs &= ~IA64_IFS_V;
- vcpu_set_ifs(vcpu, vifs);
-
- vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
- }
-
- vdcr = VCPU(vcpu, dcr);
-
- /* Set guest psr
- * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
- * be: set to the value of dcr.be
- * pp: set to the value of dcr.pp
- */
- vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
- vpsr |= (vdcr & IA64_DCR_BE);
-
- /* VDCR pp bit position is different from VPSR pp bit */
- if (vdcr & IA64_DCR_PP) {
- vpsr |= IA64_PSR_PP;
- } else {
- vpsr &= ~IA64_PSR_PP;
- }
-
- vcpu_set_psr(vcpu, vpsr);
-
-}
-
-void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
-{
- u64 viva;
- struct kvm_pt_regs *regs;
- union ia64_isr pt_isr;
-
- regs = vcpu_regs(vcpu);
-
- /* clear cr.isr.ir (incomplete register frame)*/
- pt_isr.val = VMX(vcpu, cr_isr);
- pt_isr.ir = 0;
- VMX(vcpu, cr_isr) = pt_isr.val;
-
- collect_interruption(vcpu);
-
- viva = vcpu_get_iva(vcpu);
- regs->cr_iip = viva + vec;
-}
-
-static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
-{
- union ia64_rr rr, rr1;
-
- rr.val = vcpu_get_rr(vcpu, ifa);
- rr1.val = 0;
- rr1.ps = rr.ps;
- rr1.rid = rr.rid;
- return (rr1.val);
-}
-
-/*
- * Set vIFA & vITIR & vIHA, when vPSR.ic =1
- * Parameter:
- * set_ifa: if true, set vIFA
- * set_itir: if true, set vITIR
- * set_iha: if true, set vIHA
- */
-void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
- int set_ifa, int set_itir, int set_iha)
-{
- long vpsr;
- u64 value;
-
- vpsr = VCPU(vcpu, vpsr);
- /* Vol2, Table 8-1 */
- if (vpsr & IA64_PSR_IC) {
- if (set_ifa)
- vcpu_set_ifa(vcpu, vadr);
- if (set_itir) {
- value = vcpu_get_itir_on_fault(vcpu, vadr);
- vcpu_set_itir(vcpu, value);
- }
-
- if (set_iha) {
- value = vcpu_thash(vcpu, vadr);
- vcpu_set_iha(vcpu, value);
- }
- }
-}
-
-/*
- * Data TLB Fault
- * @ Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
-}
-
-/*
- * Instruction TLB Fault
- * @ Instruction TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
-}
-
-/*
- * Data Nested TLB Fault
- * @ Data Nested TLB Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void nested_dtlb(struct kvm_vcpu *vcpu)
-{
- inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
-}
-
-/*
- * Alternate Data TLB Fault
- * @ Alternate Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
-{
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
-}
-
-/*
- * Data TLB Fault
- * @ Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
-{
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
-}
-
-/* Deal with:
- * VHPT Translation Vector
- */
-static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR, IHA*/
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
- inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
-}
-
-/*
- * VHPT Instruction Fault
- * @ VHPT Translation vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- _vhpt_fault(vcpu, vadr);
-}
-
-/*
- * VHPT Data Fault
- * @ VHPT Translation vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
- _vhpt_fault(vcpu, vadr);
-}
-
-/*
- * Deal with:
- * General Exception vector
- */
-void _general_exception(struct kvm_vcpu *vcpu)
-{
- inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
-}
-
-/*
- * Illegal Operation Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void illegal_op(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Illegal Dependency Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void illegal_dep(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Reserved Register/Field Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void rsv_reg_field(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-/*
- * Privileged Operation Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-
-void privilege_op(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Unimplement Data Address Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void unimpl_daddr(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/*
- * Privileged Register Fault
- * @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void privilege_reg(struct kvm_vcpu *vcpu)
-{
- _general_exception(vcpu);
-}
-
-/* Deal with
- * Nat consumption vector
- * Parameter:
- * vaddr: Optional, if t == REGISTER
- */
-static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
- enum tlb_miss_type t)
-{
- /* If vPSR.ic && t == DATA/INST, IFA */
- if (t == DATA || t == INSTRUCTION) {
- /* IFA */
- set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
- }
-
- inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
-}
-
-/*
- * Instruction Nat Page Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
-{
- _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
-}
-
-/*
- * Register Nat Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void rnat_consumption(struct kvm_vcpu *vcpu)
-{
- _nat_consumption_fault(vcpu, 0, REGISTER);
-}
-
-/*
- * Data Nat Page Consumption Fault
- * @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
-{
- _nat_consumption_fault(vcpu, vadr, DATA);
-}
-
-/* Deal with
- * Page not present vector
- */
-static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
-}
-
-void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
- __page_not_present(vcpu, vadr);
-}
-
-void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
- __page_not_present(vcpu, vadr);
-}
-
-/* Deal with
- * Data access rights vector
- */
-void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
-{
- /* If vPSR.ic, IFA, ITIR */
- set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
- inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
-}
-
-fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
- unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
- unsigned long *ifs, struct kvm_pt_regs *regs)
-{
- fp_state_t fp_state;
- fpswa_ret_t ret;
- struct kvm_vcpu *vcpu = current_vcpu;
-
- uint64_t old_rr7 = ia64_get_rr(7UL<<61);
-
- if (!vmm_fpswa_interface)
- return (fpswa_ret_t) {-1, 0, 0, 0};
-
- memset(&fp_state, 0, sizeof(fp_state_t));
-
- /*
- * compute fp_state. only FP registers f6 - f11 are used by the
- * vmm, so set those bits in the mask and set the low volatile
- * pointer to point to these registers.
- */
- fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
-
- fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
-
- /*
- * unsigned long (*EFI_FPSWA) (
- * unsigned long trap_type,
- * void *Bundle,
- * unsigned long *pipsr,
- * unsigned long *pfsr,
- * unsigned long *pisr,
- * unsigned long *ppreds,
- * unsigned long *pifs,
- * void *fp_state);
- */
- /*Call host fpswa interface directly to virtualize
- *guest fpswa request!
- */
- ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
- ia64_srlz_d();
-
- ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
- ipsr, fpsr, isr, pr, ifs, &fp_state);
- ia64_set_rr(7UL << 61, old_rr7);
- ia64_srlz_d();
- return ret;
-}
-
-/*
- * Handle floating-point assist faults and traps for domain.
- */
-unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
- unsigned long isr)
-{
- struct kvm_vcpu *v = current_vcpu;
- IA64_BUNDLE bundle;
- unsigned long fault_ip;
- fpswa_ret_t ret;
-
- fault_ip = regs->cr_iip;
- /*
- * When the FP trap occurs, the trapping instruction is completed.
- * If ipsr.ri == 0, there is the trapping instruction in previous
- * bundle.
- */
- if (!fp_fault && (ia64_psr(regs)->ri == 0))
- fault_ip -= 16;
-
- if (fetch_code(v, fault_ip, &bundle))
- return -EAGAIN;
-
- if (!bundle.i64[0] && !bundle.i64[1])
- return -EACCES;
-
- ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
- &isr, &regs->pr, &regs->cr_ifs, regs);
- return ret.status;
-}
-
-void reflect_interruption(u64 ifa, u64 isr, u64 iim,
- u64 vec, struct kvm_pt_regs *regs)
-{
- u64 vector;
- int status ;
- struct kvm_vcpu *vcpu = current_vcpu;
- u64 vpsr = VCPU(vcpu, vpsr);
-
- vector = vec2off[vec];
-
- if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
- panic_vm(vcpu, "Interruption with vector :0x%lx occurs "
- "with psr.ic = 0\n", vector);
- return;
- }
-
- switch (vec) {
- case 32: /*IA64_FP_FAULT_VECTOR*/
- status = vmm_handle_fpu_swa(1, regs, isr);
- if (!status) {
- vcpu_increment_iip(vcpu);
- return;
- } else if (-EAGAIN == status)
- return;
- break;
- case 33: /*IA64_FP_TRAP_VECTOR*/
- status = vmm_handle_fpu_swa(0, regs, isr);
- if (!status)
- return ;
- break;
- }
-
- VCPU(vcpu, isr) = isr;
- VCPU(vcpu, iipa) = regs->cr_iip;
- if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
- VCPU(vcpu, iim) = iim;
- else
- set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
-
- inject_guest_interruption(vcpu, vector);
-}
-
-static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
- unsigned long arg)
-{
- struct thash_data *data;
- unsigned long gpa, poff;
-
- if (!is_physical_mode(vcpu)) {
- /* Depends on caller to provide the DTR or DTC mapping.*/
- data = vtlb_lookup(vcpu, arg, D_TLB);
- if (data)
- gpa = data->page_flags & _PAGE_PPN_MASK;
- else {
- data = vhpt_lookup(arg);
- if (!data)
- return 0;
- gpa = data->gpaddr & _PAGE_PPN_MASK;
- }
-
- poff = arg & (PSIZE(data->ps) - 1);
- arg = PAGEALIGN(gpa, data->ps) | poff;
- }
- arg = kvm_gpa_to_mpa(arg << 1 >> 1);
-
- return (unsigned long)__va(arg);
-}
-
-static void set_pal_call_data(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- unsigned long gr28 = vcpu_get_gr(vcpu, 28);
- unsigned long gr29 = vcpu_get_gr(vcpu, 29);
- unsigned long gr30 = vcpu_get_gr(vcpu, 30);
-
- /*FIXME:For static and stacked convention, firmware
- * has put the parameters in gr28-gr31 before
- * break to vmm !!*/
-
- switch (gr28) {
- case PAL_PERF_MON_INFO:
- case PAL_HALT_INFO:
- p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29);
- p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
- break;
- case PAL_BRAND_INFO:
- p->u.pal_data.gr29 = gr29;
- p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
- break;
- default:
- p->u.pal_data.gr29 = gr29;
- p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
- }
- p->u.pal_data.gr28 = gr28;
- p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
-
- p->exit_reason = EXIT_REASON_PAL_CALL;
-}
-
-static void get_pal_call_result(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
- if (p->exit_reason == EXIT_REASON_PAL_CALL) {
- vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
- vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
- vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
- vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
- } else
- panic_vm(vcpu, "Mis-set for exit reason!\n");
-}
-
-static void set_sal_call_data(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
- p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
- p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
- p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
- p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
- p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
- p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
- p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
- p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
- p->exit_reason = EXIT_REASON_SAL_CALL;
-}
-
-static void get_sal_call_result(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
- if (p->exit_reason == EXIT_REASON_SAL_CALL) {
- vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
- vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
- vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
- vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
- } else
- panic_vm(vcpu, "Mis-set for exit reason!\n");
-}
-
-void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
- unsigned long isr, unsigned long iim)
-{
- struct kvm_vcpu *v = current_vcpu;
- long psr;
-
- if (ia64_psr(regs)->cpl == 0) {
- /* Allow hypercalls only when cpl = 0. */
- if (iim == DOMN_PAL_REQUEST) {
- local_irq_save(psr);
- set_pal_call_data(v);
- vmm_transition(v);
- get_pal_call_result(v);
- vcpu_increment_iip(v);
- local_irq_restore(psr);
- return;
- } else if (iim == DOMN_SAL_REQUEST) {
- local_irq_save(psr);
- set_sal_call_data(v);
- vmm_transition(v);
- get_sal_call_result(v);
- vcpu_increment_iip(v);
- local_irq_restore(psr);
- return;
- }
- }
- reflect_interruption(ifa, isr, iim, 11, regs);
-}
-
-void check_pending_irq(struct kvm_vcpu *vcpu)
-{
- int mask, h_pending, h_inservice;
- u64 isr;
- unsigned long vpsr;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- h_pending = highest_pending_irq(vcpu);
- if (h_pending == NULL_VECTOR) {
- update_vhpi(vcpu, NULL_VECTOR);
- return;
- }
- h_inservice = highest_inservice_irq(vcpu);
-
- vpsr = VCPU(vcpu, vpsr);
- mask = irq_masked(vcpu, h_pending, h_inservice);
- if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
- isr = vpsr & IA64_PSR_RI;
- update_vhpi(vcpu, h_pending);
- reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
- } else if (mask == IRQ_MASKED_BY_INSVC) {
- if (VCPU(vcpu, vhpi))
- update_vhpi(vcpu, NULL_VECTOR);
- } else {
- /* masked by vpsr.i or vtpr.*/
- update_vhpi(vcpu, h_pending);
- }
-}
-
-static void generate_exirq(struct kvm_vcpu *vcpu)
-{
- unsigned vpsr;
- uint64_t isr;
-
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- vpsr = VCPU(vcpu, vpsr);
- isr = vpsr & IA64_PSR_RI;
- if (!(vpsr & IA64_PSR_IC))
- panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n");
- reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
-}
-
-void vhpi_detection(struct kvm_vcpu *vcpu)
-{
- uint64_t threshold, vhpi;
- union ia64_tpr vtpr;
- struct ia64_psr vpsr;
-
- vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
- vtpr.val = VCPU(vcpu, tpr);
-
- threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
- vhpi = VCPU(vcpu, vhpi);
- if (vhpi > threshold) {
- /* interrupt actived*/
- generate_exirq(vcpu);
- }
-}
-
-void leave_hypervisor_tail(void)
-{
- struct kvm_vcpu *v = current_vcpu;
-
- if (VMX(v, timer_check)) {
- VMX(v, timer_check) = 0;
- if (VMX(v, itc_check)) {
- if (vcpu_get_itc(v) > VCPU(v, itm)) {
- if (!(VCPU(v, itv) & (1 << 16))) {
- vcpu_pend_interrupt(v, VCPU(v, itv)
- & 0xff);
- VMX(v, itc_check) = 0;
- } else {
- v->arch.timer_pending = 1;
- }
- VMX(v, last_itc) = VCPU(v, itm) + 1;
- }
- }
- }
-
- rmb();
- if (v->arch.irq_new_pending) {
- v->arch.irq_new_pending = 0;
- VMX(v, irq_check) = 0;
- check_pending_irq(v);
- return;
- }
- if (VMX(v, irq_check)) {
- VMX(v, irq_check) = 0;
- vhpi_detection(v);
- }
-}
-
-static inline void handle_lds(struct kvm_pt_regs *regs)
-{
- regs->cr_ipsr |= IA64_PSR_ED;
-}
-
-void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
-{
- unsigned long pte;
- union ia64_rr rr;
-
- rr.val = ia64_get_rr(vadr);
- pte = vadr & _PAGE_PPN_MASK;
- pte = pte | PHY_PAGE_WB;
- thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
- return;
-}
-
-void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
-{
- unsigned long vpsr;
- int type;
-
- u64 vhpt_adr, gppa, pteval, rr, itir;
- union ia64_isr misr;
- union ia64_pta vpta;
- struct thash_data *data;
- struct kvm_vcpu *v = current_vcpu;
-
- vpsr = VCPU(v, vpsr);
- misr.val = VMX(v, cr_isr);
-
- type = vec;
-
- if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
- if (vec == 2) {
- if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
- emulate_io_inst(v, ((vadr << 1) >> 1), 4);
- return;
- }
- }
- physical_tlb_miss(v, vadr, type);
- return;
- }
- data = vtlb_lookup(v, vadr, type);
- if (data != 0) {
- if (type == D_TLB) {
- gppa = (vadr & ((1UL << data->ps) - 1))
- + (data->ppn >> (data->ps - 12) << data->ps);
- if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
- if (data->pl >= ((regs->cr_ipsr >>
- IA64_PSR_CPL0_BIT) & 3))
- emulate_io_inst(v, gppa, data->ma);
- else {
- vcpu_set_isr(v, misr.val);
- data_access_rights(v, vadr);
- }
- return ;
- }
- }
- thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
-
- } else if (type == D_TLB) {
- if (misr.sp) {
- handle_lds(regs);
- return;
- }
-
- rr = vcpu_get_rr(v, vadr);
- itir = rr & (RR_RID_MASK | RR_PS_MASK);
-
- if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
- if (vpsr & IA64_PSR_IC) {
- vcpu_set_isr(v, misr.val);
- alt_dtlb(v, vadr);
- } else {
- nested_dtlb(v);
- }
- return ;
- }
-
- vpta.val = vcpu_get_pta(v);
- /* avoid recursively walking (short format) VHPT */
-
- vhpt_adr = vcpu_thash(v, vadr);
- if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
- /* VHPT successfully read. */
- if (!(pteval & _PAGE_P)) {
- if (vpsr & IA64_PSR_IC) {
- vcpu_set_isr(v, misr.val);
- dtlb_fault(v, vadr);
- } else {
- nested_dtlb(v);
- }
- } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
- thash_purge_and_insert(v, pteval, itir,
- vadr, D_TLB);
- } else if (vpsr & IA64_PSR_IC) {
- vcpu_set_isr(v, misr.val);
- dtlb_fault(v, vadr);
- } else {
- nested_dtlb(v);
- }
- } else {
- /* Can't read VHPT. */
- if (vpsr & IA64_PSR_IC) {
- vcpu_set_isr(v, misr.val);
- dvhpt_fault(v, vadr);
- } else {
- nested_dtlb(v);
- }
- }
- } else if (type == I_TLB) {
- if (!(vpsr & IA64_PSR_IC))
- misr.ni = 1;
- if (!vhpt_enabled(v, vadr, INST_REF)) {
- vcpu_set_isr(v, misr.val);
- alt_itlb(v, vadr);
- return;
- }
-
- vpta.val = vcpu_get_pta(v);
-
- vhpt_adr = vcpu_thash(v, vadr);
- if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
- /* VHPT successfully read. */
- if (pteval & _PAGE_P) {
- if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
- vcpu_set_isr(v, misr.val);
- itlb_fault(v, vadr);
- return ;
- }
- rr = vcpu_get_rr(v, vadr);
- itir = rr & (RR_RID_MASK | RR_PS_MASK);
- thash_purge_and_insert(v, pteval, itir,
- vadr, I_TLB);
- } else {
- vcpu_set_isr(v, misr.val);
- inst_page_not_present(v, vadr);
- }
- } else {
- vcpu_set_isr(v, misr.val);
- ivhpt_fault(v, vadr);
- }
- }
-}
-
-void kvm_vexirq(struct kvm_vcpu *vcpu)
-{
- u64 vpsr, isr;
- struct kvm_pt_regs *regs;
-
- regs = vcpu_regs(vcpu);
- vpsr = VCPU(vcpu, vpsr);
- isr = vpsr & IA64_PSR_RI;
- reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
-}
-
-void kvm_ia64_handle_irq(struct kvm_vcpu *v)
-{
- struct exit_ctl_data *p = &v->arch.exit_data;
- long psr;
-
- local_irq_save(psr);
- p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
- vmm_transition(v);
- local_irq_restore(psr);
-
- VMX(v, timer_check) = 1;
-
-}
-
-static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
-{
- u64 oldrid, moldrid, oldpsbits, vaddr;
- struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
- vaddr = p->vaddr;
-
- oldrid = VMX(v, vrr[0]);
- VMX(v, vrr[0]) = p->rr;
- oldpsbits = VMX(v, psbits[0]);
- VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
- moldrid = ia64_get_rr(0x0);
- ia64_set_rr(0x0, vrrtomrr(p->rr));
- ia64_srlz_d();
-
- vaddr = PAGEALIGN(vaddr, p->ps);
- thash_purge_entries_remote(v, vaddr, p->ps);
-
- VMX(v, vrr[0]) = oldrid;
- VMX(v, psbits[0]) = oldpsbits;
- ia64_set_rr(0x0, moldrid);
- ia64_dv_serialize_data();
-}
-
-static void vcpu_do_resume(struct kvm_vcpu *vcpu)
-{
- /*Re-init VHPT and VTLB once from resume*/
- vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
- thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
- vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
- thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
-
- ia64_set_pta(vcpu->arch.vhpt.pta.val);
-}
-
-static void vmm_sanity_check(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
- if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) {
- panic_vm(vcpu, "Failed to do vmm sanity check,"
- "it maybe caused by crashed vmm!!\n\n");
- }
-}
-
-static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
-{
- vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/
-
- if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
- vcpu_do_resume(vcpu);
- return;
- }
-
- if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
- thash_purge_all(vcpu);
- return;
- }
-
- if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
- while (vcpu->arch.ptc_g_count > 0)
- ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
- }
-}
-
-void vmm_transition(struct kvm_vcpu *vcpu)
-{
- ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
- 1, 0, 0, 0, 0, 0);
- vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
- ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
- 1, 0, 0, 0, 0, 0);
- kvm_do_resume_op(vcpu);
-}
-
-void vmm_panic_handler(u64 vec)
-{
- struct kvm_vcpu *vcpu = current_vcpu;
- vmm_sanity = 0;
- panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n",
- vec2off[vec]);
-}
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S
deleted file mode 100644
index 30897d44d61e..000000000000
--- a/arch/ia64/kvm/trampoline.S
+++ /dev/null
@@ -1,1038 +0,0 @@
-/* Save all processor states
- *
- * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com>
- * Copyright (c) 2007 Anthony Xu <anthony.xu@intel.com>
- */
-
-#include <asm/asmmacro.h>
-#include "asm-offsets.h"
-
-
-#define CTX(name) VMM_CTX_##name##_OFFSET
-
- /*
- * r32: context_t base address
- */
-#define SAVE_BRANCH_REGS \
- add r2 = CTX(B0),r32; \
- add r3 = CTX(B1),r32; \
- mov r16 = b0; \
- mov r17 = b1; \
- ;; \
- st8 [r2]=r16,16; \
- st8 [r3]=r17,16; \
- ;; \
- mov r16 = b2; \
- mov r17 = b3; \
- ;; \
- st8 [r2]=r16,16; \
- st8 [r3]=r17,16; \
- ;; \
- mov r16 = b4; \
- mov r17 = b5; \
- ;; \
- st8 [r2]=r16; \
- st8 [r3]=r17; \
- ;;
-
- /*
- * r33: context_t base address
- */
-#define RESTORE_BRANCH_REGS \
- add r2 = CTX(B0),r33; \
- add r3 = CTX(B1),r33; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov b0 = r16; \
- mov b1 = r17; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov b2 = r16; \
- mov b3 = r17; \
- ;; \
- ld8 r16=[r2]; \
- ld8 r17=[r3]; \
- ;; \
- mov b4=r16; \
- mov b5=r17; \
- ;;
-
-
- /*
- * r32: context_t base address
- * bsw == 1
- * Save all bank1 general registers, r4 ~ r7
- */
-#define SAVE_GENERAL_REGS \
- add r2=CTX(R4),r32; \
- add r3=CTX(R5),r32; \
- ;; \
-.mem.offset 0,0; \
- st8.spill [r2]=r4,16; \
-.mem.offset 8,0; \
- st8.spill [r3]=r5,16; \
- ;; \
-.mem.offset 0,0; \
- st8.spill [r2]=r6,48; \
-.mem.offset 8,0; \
- st8.spill [r3]=r7,48; \
- ;; \
-.mem.offset 0,0; \
- st8.spill [r2]=r12; \
-.mem.offset 8,0; \
- st8.spill [r3]=r13; \
- ;;
-
- /*
- * r33: context_t base address
- * bsw == 1
- */
-#define RESTORE_GENERAL_REGS \
- add r2=CTX(R4),r33; \
- add r3=CTX(R5),r33; \
- ;; \
- ld8.fill r4=[r2],16; \
- ld8.fill r5=[r3],16; \
- ;; \
- ld8.fill r6=[r2],48; \
- ld8.fill r7=[r3],48; \
- ;; \
- ld8.fill r12=[r2]; \
- ld8.fill r13 =[r3]; \
- ;;
-
-
-
-
- /*
- * r32: context_t base address
- */
-#define SAVE_KERNEL_REGS \
- add r2 = CTX(KR0),r32; \
- add r3 = CTX(KR1),r32; \
- mov r16 = ar.k0; \
- mov r17 = ar.k1; \
- ;; \
- st8 [r2] = r16,16; \
- st8 [r3] = r17,16; \
- ;; \
- mov r16 = ar.k2; \
- mov r17 = ar.k3; \
- ;; \
- st8 [r2] = r16,16; \
- st8 [r3] = r17,16; \
- ;; \
- mov r16 = ar.k4; \
- mov r17 = ar.k5; \
- ;; \
- st8 [r2] = r16,16; \
- st8 [r3] = r17,16; \
- ;; \
- mov r16 = ar.k6; \
- mov r17 = ar.k7; \
- ;; \
- st8 [r2] = r16; \
- st8 [r3] = r17; \
- ;;
-
-
-
- /*
- * r33: context_t base address
- */
-#define RESTORE_KERNEL_REGS \
- add r2 = CTX(KR0),r33; \
- add r3 = CTX(KR1),r33; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov ar.k0=r16; \
- mov ar.k1=r17; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov ar.k2=r16; \
- mov ar.k3=r17; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov ar.k4=r16; \
- mov ar.k5=r17; \
- ;; \
- ld8 r16=[r2],16; \
- ld8 r17=[r3],16; \
- ;; \
- mov ar.k6=r16; \
- mov ar.k7=r17; \
- ;;
-
-
-
- /*
- * r32: context_t base address
- */
-#define SAVE_APP_REGS \
- add r2 = CTX(BSPSTORE),r32; \
- mov r16 = ar.bspstore; \
- ;; \
- st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\
- mov r16 = ar.rnat; \
- ;; \
- st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \
- mov r16 = ar.fcr; \
- ;; \
- st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \
- mov r16 = ar.eflag; \
- ;; \
- st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \
- mov r16 = ar.cflg; \
- ;; \
- st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \
- mov r16 = ar.fsr; \
- ;; \
- st8 [r2] = r16,CTX(FIR)-CTX(FSR); \
- mov r16 = ar.fir; \
- ;; \
- st8 [r2] = r16,CTX(FDR)-CTX(FIR); \
- mov r16 = ar.fdr; \
- ;; \
- st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \
- mov r16 = ar.unat; \
- ;; \
- st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \
- mov r16 = ar.fpsr; \
- ;; \
- st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \
- mov r16 = ar.pfs; \
- ;; \
- st8 [r2] = r16,CTX(LC)-CTX(PFS); \
- mov r16 = ar.lc; \
- ;; \
- st8 [r2] = r16; \
- ;;
-
- /*
- * r33: context_t base address
- */
-#define RESTORE_APP_REGS \
- add r2=CTX(BSPSTORE),r33; \
- ;; \
- ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \
- ;; \
- mov ar.bspstore=r16; \
- ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \
- ;; \
- mov ar.rnat=r16; \
- ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \
- ;; \
- mov ar.fcr=r16; \
- ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \
- ;; \
- mov ar.eflag=r16; \
- ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \
- ;; \
- mov ar.cflg=r16; \
- ld8 r16=[r2],CTX(FIR)-CTX(FSR); \
- ;; \
- mov ar.fsr=r16; \
- ld8 r16=[r2],CTX(FDR)-CTX(FIR); \
- ;; \
- mov ar.fir=r16; \
- ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \
- ;; \
- mov ar.fdr=r16; \
- ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \
- ;; \
- mov ar.unat=r16; \
- ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \
- ;; \
- mov ar.fpsr=r16; \
- ld8 r16=[r2],CTX(LC)-CTX(PFS); \
- ;; \
- mov ar.pfs=r16; \
- ld8 r16=[r2]; \
- ;; \
- mov ar.lc=r16; \
- ;;
-
- /*
- * r32: context_t base address
- */
-#define SAVE_CTL_REGS \
- add r2 = CTX(DCR),r32; \
- mov r16 = cr.dcr; \
- ;; \
- st8 [r2] = r16,CTX(IVA)-CTX(DCR); \
- ;; \
- mov r16 = cr.iva; \
- ;; \
- st8 [r2] = r16,CTX(PTA)-CTX(IVA); \
- ;; \
- mov r16 = cr.pta; \
- ;; \
- st8 [r2] = r16 ; \
- ;;
-
- /*
- * r33: context_t base address
- */
-#define RESTORE_CTL_REGS \
- add r2 = CTX(DCR),r33; \
- ;; \
- ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \
- ;; \
- mov cr.dcr = r16; \
- dv_serialize_data; \
- ;; \
- ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \
- ;; \
- mov cr.iva = r16; \
- dv_serialize_data; \
- ;; \
- ld8 r16 = [r2]; \
- ;; \
- mov cr.pta = r16; \
- dv_serialize_data; \
- ;;
-
-
- /*
- * r32: context_t base address
- */
-#define SAVE_REGION_REGS \
- add r2=CTX(RR0),r32; \
- mov r16=rr[r0]; \
- dep.z r18=1,61,3; \
- ;; \
- st8 [r2]=r16,8; \
- mov r17=rr[r18]; \
- dep.z r18=2,61,3; \
- ;; \
- st8 [r2]=r17,8; \
- mov r16=rr[r18]; \
- dep.z r18=3,61,3; \
- ;; \
- st8 [r2]=r16,8; \
- mov r17=rr[r18]; \
- dep.z r18=4,61,3; \
- ;; \
- st8 [r2]=r17,8; \
- mov r16=rr[r18]; \
- dep.z r18=5,61,3; \
- ;; \
- st8 [r2]=r16,8; \
- mov r17=rr[r18]; \
- dep.z r18=7,61,3; \
- ;; \
- st8 [r2]=r17,16; \
- mov r16=rr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- ;;
-
- /*
- * r33:context_t base address
- */
-#define RESTORE_REGION_REGS \
- add r2=CTX(RR0),r33;\
- mov r18=r0; \
- ;; \
- ld8 r20=[r2],8; \
- ;; /* rr0 */ \
- ld8 r21=[r2],8; \
- ;; /* rr1 */ \
- ld8 r22=[r2],8; \
- ;; /* rr2 */ \
- ld8 r23=[r2],8; \
- ;; /* rr3 */ \
- ld8 r24=[r2],8; \
- ;; /* rr4 */ \
- ld8 r25=[r2],16; \
- ;; /* rr5 */ \
- ld8 r27=[r2]; \
- ;; /* rr7 */ \
- mov rr[r18]=r20; \
- dep.z r18=1,61,3; \
- ;; /* rr1 */ \
- mov rr[r18]=r21; \
- dep.z r18=2,61,3; \
- ;; /* rr2 */ \
- mov rr[r18]=r22; \
- dep.z r18=3,61,3; \
- ;; /* rr3 */ \
- mov rr[r18]=r23; \
- dep.z r18=4,61,3; \
- ;; /* rr4 */ \
- mov rr[r18]=r24; \
- dep.z r18=5,61,3; \
- ;; /* rr5 */ \
- mov rr[r18]=r25; \
- dep.z r18=7,61,3; \
- ;; /* rr7 */ \
- mov rr[r18]=r27; \
- ;; \
- srlz.i; \
- ;;
-
-
-
- /*
- * r32: context_t base address
- * r36~r39:scratch registers
- */
-#define SAVE_DEBUG_REGS \
- add r2=CTX(IBR0),r32; \
- add r3=CTX(DBR0),r32; \
- mov r16=ibr[r0]; \
- mov r17=dbr[r0]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=1,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=2,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=2,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=3,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=4,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=5,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=6,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- add r18=7,r0; \
- ;; \
- mov r16=ibr[r18]; \
- mov r17=dbr[r18]; \
- ;; \
- st8 [r2]=r16,8; \
- st8 [r3]=r17,8; \
- ;;
-
-
-/*
- * r33: point to context_t structure
- * ar.lc are corrupted.
- */
-#define RESTORE_DEBUG_REGS \
- add r2=CTX(IBR0),r33; \
- add r3=CTX(DBR0),r33; \
- mov r16=7; \
- mov r17=r0; \
- ;; \
- mov ar.lc = r16; \
- ;; \
-1: \
- ld8 r18=[r2],8; \
- ld8 r19=[r3],8; \
- ;; \
- mov ibr[r17]=r18; \
- mov dbr[r17]=r19; \
- ;; \
- srlz.i; \
- ;; \
- add r17=1,r17; \
- br.cloop.sptk 1b; \
- ;;
-
-
- /*
- * r32: context_t base address
- */
-#define SAVE_FPU_LOW \
- add r2=CTX(F2),r32; \
- add r3=CTX(F3),r32; \
- ;; \
- stf.spill.nta [r2]=f2,32; \
- stf.spill.nta [r3]=f3,32; \
- ;; \
- stf.spill.nta [r2]=f4,32; \
- stf.spill.nta [r3]=f5,32; \
- ;; \
- stf.spill.nta [r2]=f6,32; \
- stf.spill.nta [r3]=f7,32; \
- ;; \
- stf.spill.nta [r2]=f8,32; \
- stf.spill.nta [r3]=f9,32; \
- ;; \
- stf.spill.nta [r2]=f10,32; \
- stf.spill.nta [r3]=f11,32; \
- ;; \
- stf.spill.nta [r2]=f12,32; \
- stf.spill.nta [r3]=f13,32; \
- ;; \
- stf.spill.nta [r2]=f14,32; \
- stf.spill.nta [r3]=f15,32; \
- ;; \
- stf.spill.nta [r2]=f16,32; \
- stf.spill.nta [r3]=f17,32; \
- ;; \
- stf.spill.nta [r2]=f18,32; \
- stf.spill.nta [r3]=f19,32; \
- ;; \
- stf.spill.nta [r2]=f20,32; \
- stf.spill.nta [r3]=f21,32; \
- ;; \
- stf.spill.nta [r2]=f22,32; \
- stf.spill.nta [r3]=f23,32; \
- ;; \
- stf.spill.nta [r2]=f24,32; \
- stf.spill.nta [r3]=f25,32; \
- ;; \
- stf.spill.nta [r2]=f26,32; \
- stf.spill.nta [r3]=f27,32; \
- ;; \
- stf.spill.nta [r2]=f28,32; \
- stf.spill.nta [r3]=f29,32; \
- ;; \
- stf.spill.nta [r2]=f30; \
- stf.spill.nta [r3]=f31; \
- ;;
-
- /*
- * r32: context_t base address
- */
-#define SAVE_FPU_HIGH \
- add r2=CTX(F32),r32; \
- add r3=CTX(F33),r32; \
- ;; \
- stf.spill.nta [r2]=f32,32; \
- stf.spill.nta [r3]=f33,32; \
- ;; \
- stf.spill.nta [r2]=f34,32; \
- stf.spill.nta [r3]=f35,32; \
- ;; \
- stf.spill.nta [r2]=f36,32; \
- stf.spill.nta [r3]=f37,32; \
- ;; \
- stf.spill.nta [r2]=f38,32; \
- stf.spill.nta [r3]=f39,32; \
- ;; \
- stf.spill.nta [r2]=f40,32; \
- stf.spill.nta [r3]=f41,32; \
- ;; \
- stf.spill.nta [r2]=f42,32; \
- stf.spill.nta [r3]=f43,32; \
- ;; \
- stf.spill.nta [r2]=f44,32; \
- stf.spill.nta [r3]=f45,32; \
- ;; \
- stf.spill.nta [r2]=f46,32; \
- stf.spill.nta [r3]=f47,32; \
- ;; \
- stf.spill.nta [r2]=f48,32; \
- stf.spill.nta [r3]=f49,32; \
- ;; \
- stf.spill.nta [r2]=f50,32; \
- stf.spill.nta [r3]=f51,32; \
- ;; \
- stf.spill.nta [r2]=f52,32; \
- stf.spill.nta [r3]=f53,32; \
- ;; \
- stf.spill.nta [r2]=f54,32; \
- stf.spill.nta [r3]=f55,32; \
- ;; \
- stf.spill.nta [r2]=f56,32; \
- stf.spill.nta [r3]=f57,32; \
- ;; \
- stf.spill.nta [r2]=f58,32; \
- stf.spill.nta [r3]=f59,32; \
- ;; \
- stf.spill.nta [r2]=f60,32; \
- stf.spill.nta [r3]=f61,32; \
- ;; \
- stf.spill.nta [r2]=f62,32; \
- stf.spill.nta [r3]=f63,32; \
- ;; \
- stf.spill.nta [r2]=f64,32; \
- stf.spill.nta [r3]=f65,32; \
- ;; \
- stf.spill.nta [r2]=f66,32; \
- stf.spill.nta [r3]=f67,32; \
- ;; \
- stf.spill.nta [r2]=f68,32; \
- stf.spill.nta [r3]=f69,32; \
- ;; \
- stf.spill.nta [r2]=f70,32; \
- stf.spill.nta [r3]=f71,32; \
- ;; \
- stf.spill.nta [r2]=f72,32; \
- stf.spill.nta [r3]=f73,32; \
- ;; \
- stf.spill.nta [r2]=f74,32; \
- stf.spill.nta [r3]=f75,32; \
- ;; \
- stf.spill.nta [r2]=f76,32; \
- stf.spill.nta [r3]=f77,32; \
- ;; \
- stf.spill.nta [r2]=f78,32; \
- stf.spill.nta [r3]=f79,32; \
- ;; \
- stf.spill.nta [r2]=f80,32; \
- stf.spill.nta [r3]=f81,32; \
- ;; \
- stf.spill.nta [r2]=f82,32; \
- stf.spill.nta [r3]=f83,32; \
- ;; \
- stf.spill.nta [r2]=f84,32; \
- stf.spill.nta [r3]=f85,32; \
- ;; \
- stf.spill.nta [r2]=f86,32; \
- stf.spill.nta [r3]=f87,32; \
- ;; \
- stf.spill.nta [r2]=f88,32; \
- stf.spill.nta [r3]=f89,32; \
- ;; \
- stf.spill.nta [r2]=f90,32; \
- stf.spill.nta [r3]=f91,32; \
- ;; \
- stf.spill.nta [r2]=f92,32; \
- stf.spill.nta [r3]=f93,32; \
- ;; \
- stf.spill.nta [r2]=f94,32; \
- stf.spill.nta [r3]=f95,32; \
- ;; \
- stf.spill.nta [r2]=f96,32; \
- stf.spill.nta [r3]=f97,32; \
- ;; \
- stf.spill.nta [r2]=f98,32; \
- stf.spill.nta [r3]=f99,32; \
- ;; \
- stf.spill.nta [r2]=f100,32; \
- stf.spill.nta [r3]=f101,32; \
- ;; \
- stf.spill.nta [r2]=f102,32; \
- stf.spill.nta [r3]=f103,32; \
- ;; \
- stf.spill.nta [r2]=f104,32; \
- stf.spill.nta [r3]=f105,32; \
- ;; \
- stf.spill.nta [r2]=f106,32; \
- stf.spill.nta [r3]=f107,32; \
- ;; \
- stf.spill.nta [r2]=f108,32; \
- stf.spill.nta [r3]=f109,32; \
- ;; \
- stf.spill.nta [r2]=f110,32; \
- stf.spill.nta [r3]=f111,32; \
- ;; \
- stf.spill.nta [r2]=f112,32; \
- stf.spill.nta [r3]=f113,32; \
- ;; \
- stf.spill.nta [r2]=f114,32; \
- stf.spill.nta [r3]=f115,32; \
- ;; \
- stf.spill.nta [r2]=f116,32; \
- stf.spill.nta [r3]=f117,32; \
- ;; \
- stf.spill.nta [r2]=f118,32; \
- stf.spill.nta [r3]=f119,32; \
- ;; \
- stf.spill.nta [r2]=f120,32; \
- stf.spill.nta [r3]=f121,32; \
- ;; \
- stf.spill.nta [r2]=f122,32; \
- stf.spill.nta [r3]=f123,32; \
- ;; \
- stf.spill.nta [r2]=f124,32; \
- stf.spill.nta [r3]=f125,32; \
- ;; \
- stf.spill.nta [r2]=f126; \
- stf.spill.nta [r3]=f127; \
- ;;
-
- /*
- * r33: point to context_t structure
- */
-#define RESTORE_FPU_LOW \
- add r2 = CTX(F2), r33; \
- add r3 = CTX(F3), r33; \
- ;; \
- ldf.fill.nta f2 = [r2], 32; \
- ldf.fill.nta f3 = [r3], 32; \
- ;; \
- ldf.fill.nta f4 = [r2], 32; \
- ldf.fill.nta f5 = [r3], 32; \
- ;; \
- ldf.fill.nta f6 = [r2], 32; \
- ldf.fill.nta f7 = [r3], 32; \
- ;; \
- ldf.fill.nta f8 = [r2], 32; \
- ldf.fill.nta f9 = [r3], 32; \
- ;; \
- ldf.fill.nta f10 = [r2], 32; \
- ldf.fill.nta f11 = [r3], 32; \
- ;; \
- ldf.fill.nta f12 = [r2], 32; \
- ldf.fill.nta f13 = [r3], 32; \
- ;; \
- ldf.fill.nta f14 = [r2], 32; \
- ldf.fill.nta f15 = [r3], 32; \
- ;; \
- ldf.fill.nta f16 = [r2], 32; \
- ldf.fill.nta f17 = [r3], 32; \
- ;; \
- ldf.fill.nta f18 = [r2], 32; \
- ldf.fill.nta f19 = [r3], 32; \
- ;; \
- ldf.fill.nta f20 = [r2], 32; \
- ldf.fill.nta f21 = [r3], 32; \
- ;; \
- ldf.fill.nta f22 = [r2], 32; \
- ldf.fill.nta f23 = [r3], 32; \
- ;; \
- ldf.fill.nta f24 = [r2], 32; \
- ldf.fill.nta f25 = [r3], 32; \
- ;; \
- ldf.fill.nta f26 = [r2], 32; \
- ldf.fill.nta f27 = [r3], 32; \
- ;; \
- ldf.fill.nta f28 = [r2], 32; \
- ldf.fill.nta f29 = [r3], 32; \
- ;; \
- ldf.fill.nta f30 = [r2], 32; \
- ldf.fill.nta f31 = [r3], 32; \
- ;;
-
-
-
- /*
- * r33: point to context_t structure
- */
-#define RESTORE_FPU_HIGH \
- add r2 = CTX(F32), r33; \
- add r3 = CTX(F33), r33; \
- ;; \
- ldf.fill.nta f32 = [r2], 32; \
- ldf.fill.nta f33 = [r3], 32; \
- ;; \
- ldf.fill.nta f34 = [r2], 32; \
- ldf.fill.nta f35 = [r3], 32; \
- ;; \
- ldf.fill.nta f36 = [r2], 32; \
- ldf.fill.nta f37 = [r3], 32; \
- ;; \
- ldf.fill.nta f38 = [r2], 32; \
- ldf.fill.nta f39 = [r3], 32; \
- ;; \
- ldf.fill.nta f40 = [r2], 32; \
- ldf.fill.nta f41 = [r3], 32; \
- ;; \
- ldf.fill.nta f42 = [r2], 32; \
- ldf.fill.nta f43 = [r3], 32; \
- ;; \
- ldf.fill.nta f44 = [r2], 32; \
- ldf.fill.nta f45 = [r3], 32; \
- ;; \
- ldf.fill.nta f46 = [r2], 32; \
- ldf.fill.nta f47 = [r3], 32; \
- ;; \
- ldf.fill.nta f48 = [r2], 32; \
- ldf.fill.nta f49 = [r3], 32; \
- ;; \
- ldf.fill.nta f50 = [r2], 32; \
- ldf.fill.nta f51 = [r3], 32; \
- ;; \
- ldf.fill.nta f52 = [r2], 32; \
- ldf.fill.nta f53 = [r3], 32; \
- ;; \
- ldf.fill.nta f54 = [r2], 32; \
- ldf.fill.nta f55 = [r3], 32; \
- ;; \
- ldf.fill.nta f56 = [r2], 32; \
- ldf.fill.nta f57 = [r3], 32; \
- ;; \
- ldf.fill.nta f58 = [r2], 32; \
- ldf.fill.nta f59 = [r3], 32; \
- ;; \
- ldf.fill.nta f60 = [r2], 32; \
- ldf.fill.nta f61 = [r3], 32; \
- ;; \
- ldf.fill.nta f62 = [r2], 32; \
- ldf.fill.nta f63 = [r3], 32; \
- ;; \
- ldf.fill.nta f64 = [r2], 32; \
- ldf.fill.nta f65 = [r3], 32; \
- ;; \
- ldf.fill.nta f66 = [r2], 32; \
- ldf.fill.nta f67 = [r3], 32; \
- ;; \
- ldf.fill.nta f68 = [r2], 32; \
- ldf.fill.nta f69 = [r3], 32; \
- ;; \
- ldf.fill.nta f70 = [r2], 32; \
- ldf.fill.nta f71 = [r3], 32; \
- ;; \
- ldf.fill.nta f72 = [r2], 32; \
- ldf.fill.nta f73 = [r3], 32; \
- ;; \
- ldf.fill.nta f74 = [r2], 32; \
- ldf.fill.nta f75 = [r3], 32; \
- ;; \
- ldf.fill.nta f76 = [r2], 32; \
- ldf.fill.nta f77 = [r3], 32; \
- ;; \
- ldf.fill.nta f78 = [r2], 32; \
- ldf.fill.nta f79 = [r3], 32; \
- ;; \
- ldf.fill.nta f80 = [r2], 32; \
- ldf.fill.nta f81 = [r3], 32; \
- ;; \
- ldf.fill.nta f82 = [r2], 32; \
- ldf.fill.nta f83 = [r3], 32; \
- ;; \
- ldf.fill.nta f84 = [r2], 32; \
- ldf.fill.nta f85 = [r3], 32; \
- ;; \
- ldf.fill.nta f86 = [r2], 32; \
- ldf.fill.nta f87 = [r3], 32; \
- ;; \
- ldf.fill.nta f88 = [r2], 32; \
- ldf.fill.nta f89 = [r3], 32; \
- ;; \
- ldf.fill.nta f90 = [r2], 32; \
- ldf.fill.nta f91 = [r3], 32; \
- ;; \
- ldf.fill.nta f92 = [r2], 32; \
- ldf.fill.nta f93 = [r3], 32; \
- ;; \
- ldf.fill.nta f94 = [r2], 32; \
- ldf.fill.nta f95 = [r3], 32; \
- ;; \
- ldf.fill.nta f96 = [r2], 32; \
- ldf.fill.nta f97 = [r3], 32; \
- ;; \
- ldf.fill.nta f98 = [r2], 32; \
- ldf.fill.nta f99 = [r3], 32; \
- ;; \
- ldf.fill.nta f100 = [r2], 32; \
- ldf.fill.nta f101 = [r3], 32; \
- ;; \
- ldf.fill.nta f102 = [r2], 32; \
- ldf.fill.nta f103 = [r3], 32; \
- ;; \
- ldf.fill.nta f104 = [r2], 32; \
- ldf.fill.nta f105 = [r3], 32; \
- ;; \
- ldf.fill.nta f106 = [r2], 32; \
- ldf.fill.nta f107 = [r3], 32; \
- ;; \
- ldf.fill.nta f108 = [r2], 32; \
- ldf.fill.nta f109 = [r3], 32; \
- ;; \
- ldf.fill.nta f110 = [r2], 32; \
- ldf.fill.nta f111 = [r3], 32; \
- ;; \
- ldf.fill.nta f112 = [r2], 32; \
- ldf.fill.nta f113 = [r3], 32; \
- ;; \
- ldf.fill.nta f114 = [r2], 32; \
- ldf.fill.nta f115 = [r3], 32; \
- ;; \
- ldf.fill.nta f116 = [r2], 32; \
- ldf.fill.nta f117 = [r3], 32; \
- ;; \
- ldf.fill.nta f118 = [r2], 32; \
- ldf.fill.nta f119 = [r3], 32; \
- ;; \
- ldf.fill.nta f120 = [r2], 32; \
- ldf.fill.nta f121 = [r3], 32; \
- ;; \
- ldf.fill.nta f122 = [r2], 32; \
- ldf.fill.nta f123 = [r3], 32; \
- ;; \
- ldf.fill.nta f124 = [r2], 32; \
- ldf.fill.nta f125 = [r3], 32; \
- ;; \
- ldf.fill.nta f126 = [r2], 32; \
- ldf.fill.nta f127 = [r3], 32; \
- ;;
-
- /*
- * r32: context_t base address
- */
-#define SAVE_PTK_REGS \
- add r2=CTX(PKR0), r32; \
- mov r16=7; \
- ;; \
- mov ar.lc=r16; \
- mov r17=r0; \
- ;; \
-1: \
- mov r18=pkr[r17]; \
- ;; \
- srlz.i; \
- ;; \
- st8 [r2]=r18, 8; \
- ;; \
- add r17 =1,r17; \
- ;; \
- br.cloop.sptk 1b; \
- ;;
-
-/*
- * r33: point to context_t structure
- * ar.lc are corrupted.
- */
-#define RESTORE_PTK_REGS \
- add r2=CTX(PKR0), r33; \
- mov r16=7; \
- ;; \
- mov ar.lc=r16; \
- mov r17=r0; \
- ;; \
-1: \
- ld8 r18=[r2], 8; \
- ;; \
- mov pkr[r17]=r18; \
- ;; \
- srlz.i; \
- ;; \
- add r17 =1,r17; \
- ;; \
- br.cloop.sptk 1b; \
- ;;
-
-
-/*
- * void vmm_trampoline( context_t * from,
- * context_t * to)
- *
- * from: r32
- * to: r33
- * note: interrupt disabled before call this function.
- */
-GLOBAL_ENTRY(vmm_trampoline)
- mov r16 = psr
- adds r2 = CTX(PSR), r32
- ;;
- st8 [r2] = r16, 8 // psr
- mov r17 = pr
- ;;
- st8 [r2] = r17, 8 // pr
- mov r18 = ar.unat
- ;;
- st8 [r2] = r18
- mov r17 = ar.rsc
- ;;
- adds r2 = CTX(RSC),r32
- ;;
- st8 [r2]= r17
- mov ar.rsc =0
- flushrs
- ;;
- SAVE_GENERAL_REGS
- ;;
- SAVE_KERNEL_REGS
- ;;
- SAVE_APP_REGS
- ;;
- SAVE_BRANCH_REGS
- ;;
- SAVE_CTL_REGS
- ;;
- SAVE_REGION_REGS
- ;;
- //SAVE_DEBUG_REGS
- ;;
- rsm psr.dfl
- ;;
- srlz.d
- ;;
- SAVE_FPU_LOW
- ;;
- rsm psr.dfh
- ;;
- srlz.d
- ;;
- SAVE_FPU_HIGH
- ;;
- SAVE_PTK_REGS
- ;;
- RESTORE_PTK_REGS
- ;;
- RESTORE_FPU_HIGH
- ;;
- RESTORE_FPU_LOW
- ;;
- //RESTORE_DEBUG_REGS
- ;;
- RESTORE_REGION_REGS
- ;;
- RESTORE_CTL_REGS
- ;;
- RESTORE_BRANCH_REGS
- ;;
- RESTORE_APP_REGS
- ;;
- RESTORE_KERNEL_REGS
- ;;
- RESTORE_GENERAL_REGS
- ;;
- adds r2=CTX(PSR), r33
- ;;
- ld8 r16=[r2], 8 // psr
- ;;
- mov psr.l=r16
- ;;
- srlz.d
- ;;
- ld8 r16=[r2], 8 // pr
- ;;
- mov pr =r16,-1
- ld8 r16=[r2] // unat
- ;;
- mov ar.unat=r16
- ;;
- adds r2=CTX(RSC),r33
- ;;
- ld8 r16 =[r2]
- ;;
- mov ar.rsc = r16
- ;;
- br.ret.sptk.few b0
-END(vmm_trampoline)
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
deleted file mode 100644
index 958815c9787d..000000000000
--- a/arch/ia64/kvm/vcpu.c
+++ /dev/null
@@ -1,2209 +0,0 @@
-/*
- * kvm_vcpu.c: handling all virtual cpu related thing.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Shaofan Li (Susue Li) <susie.li@intel.com>
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Xiantao Zhang <xiantao.zhang@intel.com>
- */
-
-#include <linux/kvm_host.h>
-#include <linux/types.h>
-
-#include <asm/processor.h>
-#include <asm/ia64regs.h>
-#include <asm/gcc_intrin.h>
-#include <asm/kregs.h>
-#include <asm/pgtable.h>
-#include <asm/tlb.h>
-
-#include "asm-offsets.h"
-#include "vcpu.h"
-
-/*
- * Special notes:
- * - Index by it/dt/rt sequence
- * - Only existing mode transitions are allowed in this table
- * - RSE is placed at lazy mode when emulating guest partial mode
- * - If gva happens to be rr0 and rr4, only allowed case is identity
- * mapping (gva=gpa), or panic! (How?)
- */
-int mm_switch_table[8][8] = {
- /* 2004/09/12(Kevin): Allow switch to self */
- /*
- * (it,dt,rt): (0,0,0) -> (1,1,1)
- * This kind of transition usually occurs in the very early
- * stage of Linux boot up procedure. Another case is in efi
- * and pal calls. (see "arch/ia64/kernel/head.S")
- *
- * (it,dt,rt): (0,0,0) -> (0,1,1)
- * This kind of transition is found when OSYa exits efi boot
- * service. Due to gva = gpa in this case (Same region),
- * data access can be satisfied though itlb entry for physical
- * emulation is hit.
- */
- {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
- {0, 0, 0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 0, 0, 0, 0},
- /*
- * (it,dt,rt): (0,1,1) -> (1,1,1)
- * This kind of transition is found in OSYa.
- *
- * (it,dt,rt): (0,1,1) -> (0,0,0)
- * This kind of transition is found in OSYa
- */
- {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
- /* (1,0,0)->(1,1,1) */
- {0, 0, 0, 0, 0, 0, 0, SW_P2V},
- /*
- * (it,dt,rt): (1,0,1) -> (1,1,1)
- * This kind of transition usually occurs when Linux returns
- * from the low level TLB miss handlers.
- * (see "arch/ia64/kernel/ivt.S")
- */
- {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
- {0, 0, 0, 0, 0, 0, 0, 0},
- /*
- * (it,dt,rt): (1,1,1) -> (1,0,1)
- * This kind of transition usually occurs in Linux low level
- * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
- *
- * (it,dt,rt): (1,1,1) -> (0,0,0)
- * This kind of transition usually occurs in pal and efi calls,
- * which requires running in physical mode.
- * (see "arch/ia64/kernel/head.S")
- * (1,1,1)->(1,0,0)
- */
-
- {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
-};
-
-void physical_mode_init(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.mode_flags = GUEST_IN_PHY;
-}
-
-void switch_to_physical_rid(struct kvm_vcpu *vcpu)
-{
- unsigned long psr;
-
- /* Save original virtual mode rr[0] and rr[4] */
- psr = ia64_clear_ic();
- ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
- ia64_srlz_d();
- ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
- ia64_srlz_d();
-
- ia64_set_psr(psr);
- return;
-}
-
-void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
-{
- unsigned long psr;
-
- psr = ia64_clear_ic();
- ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
- ia64_srlz_d();
- ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
- ia64_srlz_d();
- ia64_set_psr(psr);
- return;
-}
-
-static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
-{
- return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
-}
-
-void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
- struct ia64_psr new_psr)
-{
- int act;
- act = mm_switch_action(old_psr, new_psr);
- switch (act) {
- case SW_V2P:
- /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
- old_psr.val, new_psr.val);*/
- switch_to_physical_rid(vcpu);
- /*
- * Set rse to enforced lazy, to prevent active rse
- *save/restor when guest physical mode.
- */
- vcpu->arch.mode_flags |= GUEST_IN_PHY;
- break;
- case SW_P2V:
- switch_to_virtual_rid(vcpu);
- /*
- * recover old mode which is saved when entering
- * guest physical mode
- */
- vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
- break;
- case SW_SELF:
- break;
- case SW_NOP:
- break;
- default:
- /* Sanity check */
- break;
- }
- return;
-}
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- * - insertions (itc.*, itr.*)
- * - purges (ptc.* and ptr.*)
- * - tpa
- * - tak
- * - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
-void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
- struct ia64_psr new_psr)
-{
-
- if ((old_psr.dt != new_psr.dt)
- || (old_psr.it != new_psr.it)
- || (old_psr.rt != new_psr.rt))
- switch_mm_mode(vcpu, old_psr, new_psr);
-
- return;
-}
-
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- * - insertions (itc.*, itr.*)
- * - purges (ptc.* and ptr.*)
- * - tpa
- * - tak
- * - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
-void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
-{
- if (is_physical_mode(vcpu)) {
- vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
- switch_to_virtual_rid(vcpu);
- }
- return;
-}
-
-/* Recover always follows prepare */
-void recover_if_physical_mode(struct kvm_vcpu *vcpu)
-{
- if (is_physical_mode(vcpu))
- switch_to_physical_rid(vcpu);
- vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
- return;
-}
-
-#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
-
-static u16 gr_info[32] = {
- 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
- RPT(r1), RPT(r2), RPT(r3),
- RPT(r4), RPT(r5), RPT(r6), RPT(r7),
- RPT(r8), RPT(r9), RPT(r10), RPT(r11),
- RPT(r12), RPT(r13), RPT(r14), RPT(r15),
- RPT(r16), RPT(r17), RPT(r18), RPT(r19),
- RPT(r20), RPT(r21), RPT(r22), RPT(r23),
- RPT(r24), RPT(r25), RPT(r26), RPT(r27),
- RPT(r28), RPT(r29), RPT(r30), RPT(r31)
-};
-
-#define IA64_FIRST_STACKED_GR 32
-#define IA64_FIRST_ROTATING_FR 32
-
-static inline unsigned long
-rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
-{
- reg += rrb;
- if (reg >= sor)
- reg -= sor;
- return reg;
-}
-
-/*
- * Return the (rotated) index for floating point register
- * be in the REGNUM (REGNUM must range from 32-127,
- * result is in the range from 0-95.
- */
-static inline unsigned long fph_index(struct kvm_pt_regs *regs,
- long regnum)
-{
- unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
- return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
-}
-
-/*
- * The inverse of the above: given bspstore and the number of
- * registers, calculate ar.bsp.
- */
-static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
- long num_regs)
-{
- long delta = ia64_rse_slot_num(addr) + num_regs;
- int i = 0;
-
- if (num_regs < 0)
- delta -= 0x3e;
- if (delta < 0) {
- while (delta <= -0x3f) {
- i--;
- delta += 0x3f;
- }
- } else {
- while (delta >= 0x3f) {
- i++;
- delta -= 0x3f;
- }
- }
-
- return addr + num_regs + i;
-}
-
-static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
- unsigned long *val, int *nat)
-{
- unsigned long *bsp, *addr, *rnat_addr, *bspstore;
- unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
- unsigned long nat_mask;
- unsigned long old_rsc, new_rsc;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
- new_rsc = old_rsc&(~(0x3));
- ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
-
- bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
- bsp = kbs + (regs->loadrs >> 19);
-
- addr = kvm_rse_skip_regs(bsp, -sof + ridx);
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- rnat_addr = ia64_rse_rnat_addr(addr);
-
- if (addr >= bspstore) {
- ia64_flushrs();
- ia64_mf();
- bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
- }
- *val = *addr;
- if (nat) {
- if (bspstore < rnat_addr)
- *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
- & nat_mask);
- else
- *nat = (int)!!((*rnat_addr) & nat_mask);
- ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
- }
-}
-
-void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
- unsigned long val, unsigned long nat)
-{
- unsigned long *bsp, *bspstore, *addr, *rnat_addr;
- unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
- unsigned long nat_mask;
- unsigned long old_rsc, new_rsc, psr;
- unsigned long rnat;
- long sof = (regs->cr_ifs) & 0x7f;
- long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
- long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
- long ridx = r1 - 32;
-
- if (ridx < sor)
- ridx = rotate_reg(sor, rrb_gr, ridx);
-
- old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
- /* put RSC to lazy mode, and set loadrs 0 */
- new_rsc = old_rsc & (~0x3fff0003);
- ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
- bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
-
- addr = kvm_rse_skip_regs(bsp, -sof + ridx);
- nat_mask = 1UL << ia64_rse_slot_num(addr);
- rnat_addr = ia64_rse_rnat_addr(addr);
-
- local_irq_save(psr);
- bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
- if (addr >= bspstore) {
-
- ia64_flushrs();
- ia64_mf();
- *addr = val;
- bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
- rnat = ia64_getreg(_IA64_REG_AR_RNAT);
- if (bspstore < rnat_addr)
- rnat = rnat & (~nat_mask);
- else
- *rnat_addr = (*rnat_addr)&(~nat_mask);
-
- ia64_mf();
- ia64_loadrs();
- ia64_setreg(_IA64_REG_AR_RNAT, rnat);
- } else {
- rnat = ia64_getreg(_IA64_REG_AR_RNAT);
- *addr = val;
- if (bspstore < rnat_addr)
- rnat = rnat&(~nat_mask);
- else
- *rnat_addr = (*rnat_addr) & (~nat_mask);
-
- ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
- ia64_setreg(_IA64_REG_AR_RNAT, rnat);
- }
- local_irq_restore(psr);
- ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
-}
-
-void getreg(unsigned long regnum, unsigned long *val,
- int *nat, struct kvm_pt_regs *regs)
-{
- unsigned long addr, *unat;
- if (regnum >= IA64_FIRST_STACKED_GR) {
- get_rse_reg(regs, regnum, val, nat);
- return;
- }
-
- /*
- * Now look at registers in [0-31] range and init correct UNAT
- */
- addr = (unsigned long)regs;
- unat = &regs->eml_unat;
-
- addr += gr_info[regnum];
-
- *val = *(unsigned long *)addr;
- /*
- * do it only when requested
- */
- if (nat)
- *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
-}
-
-void setreg(unsigned long regnum, unsigned long val,
- int nat, struct kvm_pt_regs *regs)
-{
- unsigned long addr;
- unsigned long bitmask;
- unsigned long *unat;
-
- /*
- * First takes care of stacked registers
- */
- if (regnum >= IA64_FIRST_STACKED_GR) {
- set_rse_reg(regs, regnum, val, nat);
- return;
- }
-
- /*
- * Now look at registers in [0-31] range and init correct UNAT
- */
- addr = (unsigned long)regs;
- unat = &regs->eml_unat;
- /*
- * add offset from base of struct
- * and do it !
- */
- addr += gr_info[regnum];
-
- *(unsigned long *)addr = val;
-
- /*
- * We need to clear the corresponding UNAT bit to fully emulate the load
- * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
- */
- bitmask = 1UL << ((addr >> 3) & 0x3f);
- if (nat)
- *unat |= bitmask;
- else
- *unat &= ~bitmask;
-
-}
-
-u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- unsigned long val;
-
- if (!reg)
- return 0;
- getreg(reg, &val, 0, regs);
- return val;
-}
-
-void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- long sof = (regs->cr_ifs) & 0x7f;
-
- if (!reg)
- return;
- if (reg >= sof + 32)
- return;
- setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
-}
-
-void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
- struct kvm_pt_regs *regs)
-{
- /* Take floating register rotation into consideration*/
- if (regnum >= IA64_FIRST_ROTATING_FR)
- regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
-#define CASE_FIXED_FP(reg) \
- case (reg) : \
- ia64_stf_spill(fpval, reg); \
- break
-
- switch (regnum) {
- CASE_FIXED_FP(0);
- CASE_FIXED_FP(1);
- CASE_FIXED_FP(2);
- CASE_FIXED_FP(3);
- CASE_FIXED_FP(4);
- CASE_FIXED_FP(5);
-
- CASE_FIXED_FP(6);
- CASE_FIXED_FP(7);
- CASE_FIXED_FP(8);
- CASE_FIXED_FP(9);
- CASE_FIXED_FP(10);
- CASE_FIXED_FP(11);
-
- CASE_FIXED_FP(12);
- CASE_FIXED_FP(13);
- CASE_FIXED_FP(14);
- CASE_FIXED_FP(15);
- CASE_FIXED_FP(16);
- CASE_FIXED_FP(17);
- CASE_FIXED_FP(18);
- CASE_FIXED_FP(19);
- CASE_FIXED_FP(20);
- CASE_FIXED_FP(21);
- CASE_FIXED_FP(22);
- CASE_FIXED_FP(23);
- CASE_FIXED_FP(24);
- CASE_FIXED_FP(25);
- CASE_FIXED_FP(26);
- CASE_FIXED_FP(27);
- CASE_FIXED_FP(28);
- CASE_FIXED_FP(29);
- CASE_FIXED_FP(30);
- CASE_FIXED_FP(31);
- CASE_FIXED_FP(32);
- CASE_FIXED_FP(33);
- CASE_FIXED_FP(34);
- CASE_FIXED_FP(35);
- CASE_FIXED_FP(36);
- CASE_FIXED_FP(37);
- CASE_FIXED_FP(38);
- CASE_FIXED_FP(39);
- CASE_FIXED_FP(40);
- CASE_FIXED_FP(41);
- CASE_FIXED_FP(42);
- CASE_FIXED_FP(43);
- CASE_FIXED_FP(44);
- CASE_FIXED_FP(45);
- CASE_FIXED_FP(46);
- CASE_FIXED_FP(47);
- CASE_FIXED_FP(48);
- CASE_FIXED_FP(49);
- CASE_FIXED_FP(50);
- CASE_FIXED_FP(51);
- CASE_FIXED_FP(52);
- CASE_FIXED_FP(53);
- CASE_FIXED_FP(54);
- CASE_FIXED_FP(55);
- CASE_FIXED_FP(56);
- CASE_FIXED_FP(57);
- CASE_FIXED_FP(58);
- CASE_FIXED_FP(59);
- CASE_FIXED_FP(60);
- CASE_FIXED_FP(61);
- CASE_FIXED_FP(62);
- CASE_FIXED_FP(63);
- CASE_FIXED_FP(64);
- CASE_FIXED_FP(65);
- CASE_FIXED_FP(66);
- CASE_FIXED_FP(67);
- CASE_FIXED_FP(68);
- CASE_FIXED_FP(69);
- CASE_FIXED_FP(70);
- CASE_FIXED_FP(71);
- CASE_FIXED_FP(72);
- CASE_FIXED_FP(73);
- CASE_FIXED_FP(74);
- CASE_FIXED_FP(75);
- CASE_FIXED_FP(76);
- CASE_FIXED_FP(77);
- CASE_FIXED_FP(78);
- CASE_FIXED_FP(79);
- CASE_FIXED_FP(80);
- CASE_FIXED_FP(81);
- CASE_FIXED_FP(82);
- CASE_FIXED_FP(83);
- CASE_FIXED_FP(84);
- CASE_FIXED_FP(85);
- CASE_FIXED_FP(86);
- CASE_FIXED_FP(87);
- CASE_FIXED_FP(88);
- CASE_FIXED_FP(89);
- CASE_FIXED_FP(90);
- CASE_FIXED_FP(91);
- CASE_FIXED_FP(92);
- CASE_FIXED_FP(93);
- CASE_FIXED_FP(94);
- CASE_FIXED_FP(95);
- CASE_FIXED_FP(96);
- CASE_FIXED_FP(97);
- CASE_FIXED_FP(98);
- CASE_FIXED_FP(99);
- CASE_FIXED_FP(100);
- CASE_FIXED_FP(101);
- CASE_FIXED_FP(102);
- CASE_FIXED_FP(103);
- CASE_FIXED_FP(104);
- CASE_FIXED_FP(105);
- CASE_FIXED_FP(106);
- CASE_FIXED_FP(107);
- CASE_FIXED_FP(108);
- CASE_FIXED_FP(109);
- CASE_FIXED_FP(110);
- CASE_FIXED_FP(111);
- CASE_FIXED_FP(112);
- CASE_FIXED_FP(113);
- CASE_FIXED_FP(114);
- CASE_FIXED_FP(115);
- CASE_FIXED_FP(116);
- CASE_FIXED_FP(117);
- CASE_FIXED_FP(118);
- CASE_FIXED_FP(119);
- CASE_FIXED_FP(120);
- CASE_FIXED_FP(121);
- CASE_FIXED_FP(122);
- CASE_FIXED_FP(123);
- CASE_FIXED_FP(124);
- CASE_FIXED_FP(125);
- CASE_FIXED_FP(126);
- CASE_FIXED_FP(127);
- }
-#undef CASE_FIXED_FP
-}
-
-void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
- struct kvm_pt_regs *regs)
-{
- /* Take floating register rotation into consideration*/
- if (regnum >= IA64_FIRST_ROTATING_FR)
- regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
-
-#define CASE_FIXED_FP(reg) \
- case (reg) : \
- ia64_ldf_fill(reg, fpval); \
- break
-
- switch (regnum) {
- CASE_FIXED_FP(2);
- CASE_FIXED_FP(3);
- CASE_FIXED_FP(4);
- CASE_FIXED_FP(5);
-
- CASE_FIXED_FP(6);
- CASE_FIXED_FP(7);
- CASE_FIXED_FP(8);
- CASE_FIXED_FP(9);
- CASE_FIXED_FP(10);
- CASE_FIXED_FP(11);
-
- CASE_FIXED_FP(12);
- CASE_FIXED_FP(13);
- CASE_FIXED_FP(14);
- CASE_FIXED_FP(15);
- CASE_FIXED_FP(16);
- CASE_FIXED_FP(17);
- CASE_FIXED_FP(18);
- CASE_FIXED_FP(19);
- CASE_FIXED_FP(20);
- CASE_FIXED_FP(21);
- CASE_FIXED_FP(22);
- CASE_FIXED_FP(23);
- CASE_FIXED_FP(24);
- CASE_FIXED_FP(25);
- CASE_FIXED_FP(26);
- CASE_FIXED_FP(27);
- CASE_FIXED_FP(28);
- CASE_FIXED_FP(29);
- CASE_FIXED_FP(30);
- CASE_FIXED_FP(31);
- CASE_FIXED_FP(32);
- CASE_FIXED_FP(33);
- CASE_FIXED_FP(34);
- CASE_FIXED_FP(35);
- CASE_FIXED_FP(36);
- CASE_FIXED_FP(37);
- CASE_FIXED_FP(38);
- CASE_FIXED_FP(39);
- CASE_FIXED_FP(40);
- CASE_FIXED_FP(41);
- CASE_FIXED_FP(42);
- CASE_FIXED_FP(43);
- CASE_FIXED_FP(44);
- CASE_FIXED_FP(45);
- CASE_FIXED_FP(46);
- CASE_FIXED_FP(47);
- CASE_FIXED_FP(48);
- CASE_FIXED_FP(49);
- CASE_FIXED_FP(50);
- CASE_FIXED_FP(51);
- CASE_FIXED_FP(52);
- CASE_FIXED_FP(53);
- CASE_FIXED_FP(54);
- CASE_FIXED_FP(55);
- CASE_FIXED_FP(56);
- CASE_FIXED_FP(57);
- CASE_FIXED_FP(58);
- CASE_FIXED_FP(59);
- CASE_FIXED_FP(60);
- CASE_FIXED_FP(61);
- CASE_FIXED_FP(62);
- CASE_FIXED_FP(63);
- CASE_FIXED_FP(64);
- CASE_FIXED_FP(65);
- CASE_FIXED_FP(66);
- CASE_FIXED_FP(67);
- CASE_FIXED_FP(68);
- CASE_FIXED_FP(69);
- CASE_FIXED_FP(70);
- CASE_FIXED_FP(71);
- CASE_FIXED_FP(72);
- CASE_FIXED_FP(73);
- CASE_FIXED_FP(74);
- CASE_FIXED_FP(75);
- CASE_FIXED_FP(76);
- CASE_FIXED_FP(77);
- CASE_FIXED_FP(78);
- CASE_FIXED_FP(79);
- CASE_FIXED_FP(80);
- CASE_FIXED_FP(81);
- CASE_FIXED_FP(82);
- CASE_FIXED_FP(83);
- CASE_FIXED_FP(84);
- CASE_FIXED_FP(85);
- CASE_FIXED_FP(86);
- CASE_FIXED_FP(87);
- CASE_FIXED_FP(88);
- CASE_FIXED_FP(89);
- CASE_FIXED_FP(90);
- CASE_FIXED_FP(91);
- CASE_FIXED_FP(92);
- CASE_FIXED_FP(93);
- CASE_FIXED_FP(94);
- CASE_FIXED_FP(95);
- CASE_FIXED_FP(96);
- CASE_FIXED_FP(97);
- CASE_FIXED_FP(98);
- CASE_FIXED_FP(99);
- CASE_FIXED_FP(100);
- CASE_FIXED_FP(101);
- CASE_FIXED_FP(102);
- CASE_FIXED_FP(103);
- CASE_FIXED_FP(104);
- CASE_FIXED_FP(105);
- CASE_FIXED_FP(106);
- CASE_FIXED_FP(107);
- CASE_FIXED_FP(108);
- CASE_FIXED_FP(109);
- CASE_FIXED_FP(110);
- CASE_FIXED_FP(111);
- CASE_FIXED_FP(112);
- CASE_FIXED_FP(113);
- CASE_FIXED_FP(114);
- CASE_FIXED_FP(115);
- CASE_FIXED_FP(116);
- CASE_FIXED_FP(117);
- CASE_FIXED_FP(118);
- CASE_FIXED_FP(119);
- CASE_FIXED_FP(120);
- CASE_FIXED_FP(121);
- CASE_FIXED_FP(122);
- CASE_FIXED_FP(123);
- CASE_FIXED_FP(124);
- CASE_FIXED_FP(125);
- CASE_FIXED_FP(126);
- CASE_FIXED_FP(127);
- }
-}
-
-void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
- struct ia64_fpreg *val)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
-}
-
-void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
- struct ia64_fpreg *val)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- if (reg > 1)
- setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
-}
-
-/*
- * The Altix RTC is mapped specially here for the vmm module
- */
-#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
-static long kvm_get_itc(struct kvm_vcpu *vcpu)
-{
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
- struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
-
- if (kvm->arch.is_sn2)
- return (*SN_RTC_BASE);
- else
-#endif
- return ia64_getreg(_IA64_REG_AR_ITC);
-}
-
-/************************************************************************
- * lsapic timer
- ***********************************************************************/
-u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
-{
- unsigned long guest_itc;
- guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
-
- if (guest_itc >= VMX(vcpu, last_itc)) {
- VMX(vcpu, last_itc) = guest_itc;
- return guest_itc;
- } else
- return VMX(vcpu, last_itc);
-}
-
-static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
-static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
-{
- struct kvm_vcpu *v;
- struct kvm *kvm;
- int i;
- long itc_offset = val - kvm_get_itc(vcpu);
- unsigned long vitv = VCPU(vcpu, itv);
-
- kvm = (struct kvm *)KVM_VM_BASE;
-
- if (kvm_vcpu_is_bsp(vcpu)) {
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
- v = (struct kvm_vcpu *)((char *)vcpu +
- sizeof(struct kvm_vcpu_data) * i);
- VMX(v, itc_offset) = itc_offset;
- VMX(v, last_itc) = 0;
- }
- }
- VMX(vcpu, last_itc) = 0;
- if (VCPU(vcpu, itm) <= val) {
- VMX(vcpu, itc_check) = 0;
- vcpu_unpend_interrupt(vcpu, vitv);
- } else {
- VMX(vcpu, itc_check) = 1;
- vcpu_set_itm(vcpu, VCPU(vcpu, itm));
- }
-
-}
-
-static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, itm));
-}
-
-static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
-{
- unsigned long vitv = VCPU(vcpu, itv);
- VCPU(vcpu, itm) = val;
-
- if (val > vcpu_get_itc(vcpu)) {
- VMX(vcpu, itc_check) = 1;
- vcpu_unpend_interrupt(vcpu, vitv);
- VMX(vcpu, timer_pending) = 0;
- } else
- VMX(vcpu, itc_check) = 0;
-}
-
-#define ITV_VECTOR(itv) (itv&0xff)
-#define ITV_IRQ_MASK(itv) (itv&(1<<16))
-
-static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, itv) = val;
- if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
- vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
- vcpu->arch.timer_pending = 0;
- }
-}
-
-static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
-{
- int vec;
-
- vec = highest_inservice_irq(vcpu);
- if (vec == NULL_VECTOR)
- return;
- VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
- VCPU(vcpu, eoi) = 0;
- vcpu->arch.irq_new_pending = 1;
-
-}
-
-/* See Table 5-8 in SDM vol2 for the definition */
-int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
-{
- union ia64_tpr vtpr;
-
- vtpr.val = VCPU(vcpu, tpr);
-
- if (h_inservice == NMI_VECTOR)
- return IRQ_MASKED_BY_INSVC;
-
- if (h_pending == NMI_VECTOR) {
- /* Non Maskable Interrupt */
- return IRQ_NO_MASKED;
- }
-
- if (h_inservice == ExtINT_VECTOR)
- return IRQ_MASKED_BY_INSVC;
-
- if (h_pending == ExtINT_VECTOR) {
- if (vtpr.mmi) {
- /* mask all external IRQ */
- return IRQ_MASKED_BY_VTPR;
- } else
- return IRQ_NO_MASKED;
- }
-
- if (is_higher_irq(h_pending, h_inservice)) {
- if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
- return IRQ_NO_MASKED;
- else
- return IRQ_MASKED_BY_VTPR;
- } else {
- return IRQ_MASKED_BY_INSVC;
- }
-}
-
-void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
-{
- long spsr;
- int ret;
-
- local_irq_save(spsr);
- ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
- local_irq_restore(spsr);
-
- vcpu->arch.irq_new_pending = 1;
-}
-
-void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
-{
- long spsr;
- int ret;
-
- local_irq_save(spsr);
- ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
- local_irq_restore(spsr);
- if (ret) {
- vcpu->arch.irq_new_pending = 1;
- wmb();
- }
-}
-
-void update_vhpi(struct kvm_vcpu *vcpu, int vec)
-{
- u64 vhpi;
-
- if (vec == NULL_VECTOR)
- vhpi = 0;
- else if (vec == NMI_VECTOR)
- vhpi = 32;
- else if (vec == ExtINT_VECTOR)
- vhpi = 16;
- else
- vhpi = vec >> 4;
-
- VCPU(vcpu, vhpi) = vhpi;
- if (VCPU(vcpu, vac).a_int)
- ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
- (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
-}
-
-u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
-{
- int vec, h_inservice, mask;
-
- vec = highest_pending_irq(vcpu);
- h_inservice = highest_inservice_irq(vcpu);
- mask = irq_masked(vcpu, vec, h_inservice);
- if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
- if (VCPU(vcpu, vhpi))
- update_vhpi(vcpu, NULL_VECTOR);
- return IA64_SPURIOUS_INT_VECTOR;
- }
- if (mask == IRQ_MASKED_BY_VTPR) {
- update_vhpi(vcpu, vec);
- return IA64_SPURIOUS_INT_VECTOR;
- }
- VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
- vcpu_unpend_interrupt(vcpu, vec);
- return (u64)vec;
-}
-
-/**************************************************************************
- Privileged operation emulation routines
- **************************************************************************/
-u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
-{
- union ia64_pta vpta;
- union ia64_rr vrr;
- u64 pval;
- u64 vhpt_offset;
-
- vpta.val = vcpu_get_pta(vcpu);
- vrr.val = vcpu_get_rr(vcpu, vadr);
- vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
- if (vpta.vf) {
- pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
- vpta.val, 0, 0, 0, 0);
- } else {
- pval = (vadr & VRN_MASK) | vhpt_offset |
- (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
- }
- return pval;
-}
-
-u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
-{
- union ia64_rr vrr;
- union ia64_pta vpta;
- u64 pval;
-
- vpta.val = vcpu_get_pta(vcpu);
- vrr.val = vcpu_get_rr(vcpu, vadr);
- if (vpta.vf) {
- pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
- 0, 0, 0, 0, 0);
- } else
- pval = 1;
-
- return pval;
-}
-
-u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
-{
- struct thash_data *data;
- union ia64_pta vpta;
- u64 key;
-
- vpta.val = vcpu_get_pta(vcpu);
- if (vpta.vf == 0) {
- key = 1;
- return key;
- }
- data = vtlb_lookup(vcpu, vadr, D_TLB);
- if (!data || !data->p)
- key = 1;
- else
- key = data->key;
-
- return key;
-}
-
-void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long thash, vadr;
-
- vadr = vcpu_get_gr(vcpu, inst.M46.r3);
- thash = vcpu_thash(vcpu, vadr);
- vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
-}
-
-void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long tag, vadr;
-
- vadr = vcpu_get_gr(vcpu, inst.M46.r3);
- tag = vcpu_ttag(vcpu, vadr);
- vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
-}
-
-int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
-{
- struct thash_data *data;
- union ia64_isr visr, pt_isr;
- struct kvm_pt_regs *regs;
- struct ia64_psr vpsr;
-
- regs = vcpu_regs(vcpu);
- pt_isr.val = VMX(vcpu, cr_isr);
- visr.val = 0;
- visr.ei = pt_isr.ei;
- visr.ir = pt_isr.ir;
- vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
- visr.na = 1;
-
- data = vhpt_lookup(vadr);
- if (data) {
- if (data->p == 0) {
- vcpu_set_isr(vcpu, visr.val);
- data_page_not_present(vcpu, vadr);
- return IA64_FAULT;
- } else if (data->ma == VA_MATTR_NATPAGE) {
- vcpu_set_isr(vcpu, visr.val);
- dnat_page_consumption(vcpu, vadr);
- return IA64_FAULT;
- } else {
- *padr = (data->gpaddr >> data->ps << data->ps) |
- (vadr & (PSIZE(data->ps) - 1));
- return IA64_NO_FAULT;
- }
- }
-
- data = vtlb_lookup(vcpu, vadr, D_TLB);
- if (data) {
- if (data->p == 0) {
- vcpu_set_isr(vcpu, visr.val);
- data_page_not_present(vcpu, vadr);
- return IA64_FAULT;
- } else if (data->ma == VA_MATTR_NATPAGE) {
- vcpu_set_isr(vcpu, visr.val);
- dnat_page_consumption(vcpu, vadr);
- return IA64_FAULT;
- } else{
- *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
- | (vadr & (PSIZE(data->ps) - 1));
- return IA64_NO_FAULT;
- }
- }
- if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
- if (vpsr.ic) {
- vcpu_set_isr(vcpu, visr.val);
- alt_dtlb(vcpu, vadr);
- return IA64_FAULT;
- } else {
- nested_dtlb(vcpu);
- return IA64_FAULT;
- }
- } else {
- if (vpsr.ic) {
- vcpu_set_isr(vcpu, visr.val);
- dvhpt_fault(vcpu, vadr);
- return IA64_FAULT;
- } else{
- nested_dtlb(vcpu);
- return IA64_FAULT;
- }
- }
-
- return IA64_NO_FAULT;
-}
-
-int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r1, r3;
-
- r3 = vcpu_get_gr(vcpu, inst.M46.r3);
-
- if (vcpu_tpa(vcpu, r3, &r1))
- return IA64_FAULT;
-
- vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
- return(IA64_NO_FAULT);
-}
-
-void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r1, r3;
-
- r3 = vcpu_get_gr(vcpu, inst.M46.r3);
- r1 = vcpu_tak(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
-}
-
-/************************************
- * Insert/Purge translation register/cache
- ************************************/
-void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
-{
- thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
-}
-
-void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
-{
- thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
-}
-
-void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
-{
- u64 ps, va, rid;
- struct thash_data *p_itr;
-
- ps = itir_ps(itir);
- va = PAGEALIGN(ifa, ps);
- pte &= ~PAGE_FLAGS_RV_MASK;
- rid = vcpu_get_rr(vcpu, ifa);
- rid = rid & RR_RID_MASK;
- p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
- vcpu_set_tr(p_itr, pte, itir, va, rid);
- vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
-}
-
-
-void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
-{
- u64 gpfn;
- u64 ps, va, rid;
- struct thash_data *p_dtr;
-
- ps = itir_ps(itir);
- va = PAGEALIGN(ifa, ps);
- pte &= ~PAGE_FLAGS_RV_MASK;
-
- if (ps != _PAGE_SIZE_16M)
- thash_purge_entries(vcpu, va, ps);
- gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
- if (__gpfn_is_io(gpfn))
- pte |= VTLB_PTE_IO;
- rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;
- p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
- vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
- pte, itir, va, rid);
- vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
-}
-
-void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
-{
- int index;
- u64 va;
-
- va = PAGEALIGN(ifa, ps);
- while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
- vcpu->arch.dtrs[index].page_flags = 0;
-
- thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
-{
- int index;
- u64 va;
-
- va = PAGEALIGN(ifa, ps);
- while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
- vcpu->arch.itrs[index].page_flags = 0;
-
- thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
- va = PAGEALIGN(va, ps);
- thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
-{
- thash_purge_all(vcpu);
-}
-
-void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- long psr;
- local_irq_save(psr);
- p->exit_reason = EXIT_REASON_PTC_G;
-
- p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
- p->u.ptc_g_data.vaddr = va;
- p->u.ptc_g_data.ps = ps;
- vmm_transition(vcpu);
- /* Do Local Purge Here*/
- vcpu_ptc_l(vcpu, va, ps);
- local_irq_restore(psr);
-}
-
-
-void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
- vcpu_ptc_ga(vcpu, va, ps);
-}
-
-void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- vcpu_ptc_e(vcpu, ifa);
-}
-
-void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long ifa, itir;
-
- ifa = vcpu_get_gr(vcpu, inst.M45.r3);
- itir = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long itir, ifa, pte, slot;
-
- slot = vcpu_get_gr(vcpu, inst.M45.r3);
- pte = vcpu_get_gr(vcpu, inst.M45.r2);
- itir = vcpu_get_itir(vcpu);
- ifa = vcpu_get_ifa(vcpu);
- vcpu_itr_d(vcpu, slot, pte, itir, ifa);
-}
-
-
-
-void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long itir, ifa, pte, slot;
-
- slot = vcpu_get_gr(vcpu, inst.M45.r3);
- pte = vcpu_get_gr(vcpu, inst.M45.r2);
- itir = vcpu_get_itir(vcpu);
- ifa = vcpu_get_ifa(vcpu);
- vcpu_itr_i(vcpu, slot, pte, itir, ifa);
-}
-
-void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long itir, ifa, pte;
-
- itir = vcpu_get_itir(vcpu);
- ifa = vcpu_get_ifa(vcpu);
- pte = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_itc_d(vcpu, pte, itir, ifa);
-}
-
-void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long itir, ifa, pte;
-
- itir = vcpu_get_itir(vcpu);
- ifa = vcpu_get_ifa(vcpu);
- pte = vcpu_get_gr(vcpu, inst.M45.r2);
- vcpu_itc_i(vcpu, pte, itir, ifa);
-}
-
-/*************************************
- * Moves to semi-privileged registers
- *************************************/
-
-void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long imm;
-
- if (inst.M30.s)
- imm = -inst.M30.imm;
- else
- imm = inst.M30.imm;
-
- vcpu_set_itc(vcpu, imm);
-}
-
-void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r2;
-
- r2 = vcpu_get_gr(vcpu, inst.M29.r2);
- vcpu_set_itc(vcpu, r2);
-}
-
-void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r1;
-
- r1 = vcpu_get_itc(vcpu);
- vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
-}
-
-/**************************************************************************
- struct kvm_vcpu protection key register access routines
- **************************************************************************/
-
-unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
-{
- return ((unsigned long)ia64_get_pkr(reg));
-}
-
-void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
-{
- ia64_set_pkr(reg, val);
-}
-
-/********************************
- * Moves to privileged registers
- ********************************/
-unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
- unsigned long val)
-{
- union ia64_rr oldrr, newrr;
- unsigned long rrval;
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- unsigned long psr;
-
- oldrr.val = vcpu_get_rr(vcpu, reg);
- newrr.val = val;
- vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
-
- switch ((unsigned long)(reg >> VRN_SHIFT)) {
- case VRN6:
- vcpu->arch.vmm_rr = vrrtomrr(val);
- local_irq_save(psr);
- p->exit_reason = EXIT_REASON_SWITCH_RR6;
- vmm_transition(vcpu);
- local_irq_restore(psr);
- break;
- case VRN4:
- rrval = vrrtomrr(val);
- vcpu->arch.metaphysical_saved_rr4 = rrval;
- if (!is_physical_mode(vcpu))
- ia64_set_rr(reg, rrval);
- break;
- case VRN0:
- rrval = vrrtomrr(val);
- vcpu->arch.metaphysical_saved_rr0 = rrval;
- if (!is_physical_mode(vcpu))
- ia64_set_rr(reg, rrval);
- break;
- default:
- ia64_set_rr(reg, vrrtomrr(val));
- break;
- }
-
- return (IA64_NO_FAULT);
-}
-
-void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r2;
-
- r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- vcpu_set_rr(vcpu, r3, r2);
-}
-
-void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-}
-
-void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-}
-
-void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r2;
-
- r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- vcpu_set_pmc(vcpu, r3, r2);
-}
-
-void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r2;
-
- r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- vcpu_set_pmd(vcpu, r3, r2);
-}
-
-void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- u64 r3, r2;
-
- r3 = vcpu_get_gr(vcpu, inst.M42.r3);
- r2 = vcpu_get_gr(vcpu, inst.M42.r2);
- vcpu_set_pkr(vcpu, r3, r2);
-}
-
-void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_rr(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_pkr(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_dbr(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_ibr(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_pmc(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
-{
- /* FIXME: This could get called as a result of a rsvd-reg fault */
- if (reg > (ia64_get_cpuid(3) & 0xff))
- return 0;
- else
- return ia64_get_cpuid(reg);
-}
-
-void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r3, r1;
-
- r3 = vcpu_get_gr(vcpu, inst.M43.r3);
- r1 = vcpu_get_cpuid(vcpu, r3);
- vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
-{
- VCPU(vcpu, tpr) = val;
- vcpu->arch.irq_check = 1;
-}
-
-unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long r2;
-
- r2 = vcpu_get_gr(vcpu, inst.M32.r2);
- VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
-
- switch (inst.M32.cr3) {
- case 0:
- vcpu_set_dcr(vcpu, r2);
- break;
- case 1:
- vcpu_set_itm(vcpu, r2);
- break;
- case 66:
- vcpu_set_tpr(vcpu, r2);
- break;
- case 67:
- vcpu_set_eoi(vcpu, r2);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long tgt = inst.M33.r1;
- unsigned long val;
-
- switch (inst.M33.cr3) {
- case 65:
- val = vcpu_get_ivr(vcpu);
- vcpu_set_gr(vcpu, tgt, val, 0);
- break;
-
- case 67:
- vcpu_set_gr(vcpu, tgt, 0L, 0);
- break;
- default:
- val = VCPU(vcpu, vcr[inst.M33.cr3]);
- vcpu_set_gr(vcpu, tgt, val, 0);
- break;
- }
-
- return 0;
-}
-
-void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
-{
-
- unsigned long mask;
- struct kvm_pt_regs *regs;
- struct ia64_psr old_psr, new_psr;
-
- old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
- regs = vcpu_regs(vcpu);
- /* We only support guest as:
- * vpsr.pk = 0
- * vpsr.is = 0
- * Otherwise panic
- */
- if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
- panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
- "& vpsr.is=0\n");
-
- /*
- * For those IA64_PSR bits: id/da/dd/ss/ed/ia
- * Since these bits will become 0, after success execution of each
- * instruction, we will change set them to mIA64_PSR
- */
- VCPU(vcpu, vpsr) = val
- & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
- IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
-
- if (!old_psr.i && (val & IA64_PSR_I)) {
- /* vpsr.i 0->1 */
- vcpu->arch.irq_check = 1;
- }
- new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
- /*
- * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
- * , except for the following bits:
- * ic/i/dt/si/rt/mc/it/bn/vm
- */
- mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
- IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
- IA64_PSR_VM;
-
- regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
-
- check_mm_mode_switch(vcpu, old_psr, new_psr);
-
- return ;
-}
-
-unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
-{
- struct ia64_psr vpsr;
-
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
- if (!vpsr.ic)
- VCPU(vcpu, ifs) = regs->cr_ifs;
- regs->cr_ifs = IA64_IFS_V;
- return (IA64_NO_FAULT);
-}
-
-
-
-/**************************************************************************
- VCPU banked general register access routines
- **************************************************************************/
-#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
- do { \
- __asm__ __volatile__ ( \
- ";;extr.u %0 = %3,%6,16;;\n" \
- "dep %1 = %0, %1, 0, 16;;\n" \
- "st8 [%4] = %1\n" \
- "extr.u %0 = %2, 16, 16;;\n" \
- "dep %3 = %0, %3, %6, 16;;\n" \
- "st8 [%5] = %3\n" \
- ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
- "r"(*runat), "r"(b1unat), "r"(runat), \
- "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
- } while (0)
-
-void vcpu_bsw0(struct kvm_vcpu *vcpu)
-{
- unsigned long i;
-
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- unsigned long *r = &regs->r16;
- unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
- unsigned long *b1 = &VCPU(vcpu, vgr[0]);
- unsigned long *runat = &regs->eml_unat;
- unsigned long *b0unat = &VCPU(vcpu, vbnat);
- unsigned long *b1unat = &VCPU(vcpu, vnat);
-
-
- if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
- for (i = 0; i < 16; i++) {
- *b1++ = *r;
- *r++ = *b0++;
- }
- vcpu_bsw0_unat(i, b0unat, b1unat, runat,
- VMM_PT_REGS_R16_SLOT);
- VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
- }
-}
-
-#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
- do { \
- __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
- "dep %1 = %0, %1, 16, 16;;\n" \
- "st8 [%4] = %1\n" \
- "extr.u %0 = %2, 0, 16;;\n" \
- "dep %3 = %0, %3, %6, 16;;\n" \
- "st8 [%5] = %3\n" \
- ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
- "r"(*runat), "r"(b0unat), "r"(runat), \
- "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
- } while (0)
-
-void vcpu_bsw1(struct kvm_vcpu *vcpu)
-{
- unsigned long i;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- unsigned long *r = &regs->r16;
- unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
- unsigned long *b1 = &VCPU(vcpu, vgr[0]);
- unsigned long *runat = &regs->eml_unat;
- unsigned long *b0unat = &VCPU(vcpu, vbnat);
- unsigned long *b1unat = &VCPU(vcpu, vnat);
-
- if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
- for (i = 0; i < 16; i++) {
- *b0++ = *r;
- *r++ = *b1++;
- }
- vcpu_bsw1_unat(i, b0unat, b1unat, runat,
- VMM_PT_REGS_R16_SLOT);
- VCPU(vcpu, vpsr) |= IA64_PSR_BN;
- }
-}
-
-void vcpu_rfi(struct kvm_vcpu *vcpu)
-{
- unsigned long ifs, psr;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- psr = VCPU(vcpu, ipsr);
- if (psr & IA64_PSR_BN)
- vcpu_bsw1(vcpu);
- else
- vcpu_bsw0(vcpu);
- vcpu_set_psr(vcpu, psr);
- ifs = VCPU(vcpu, ifs);
- if (ifs >> 63)
- regs->cr_ifs = ifs;
- regs->cr_iip = VCPU(vcpu, iip);
-}
-
-/*
- VPSR can't keep track of below bits of guest PSR
- This function gets guest PSR
- */
-
-unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
-{
- unsigned long mask;
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
- mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
- IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
- return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
-}
-
-void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long vpsr;
- unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
- | inst.M44.imm;
-
- vpsr = vcpu_get_psr(vcpu);
- vpsr &= (~imm24);
- vcpu_set_psr(vcpu, vpsr);
-}
-
-void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long vpsr;
- unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
- | inst.M44.imm;
-
- vpsr = vcpu_get_psr(vcpu);
- vpsr |= imm24;
- vcpu_set_psr(vcpu, vpsr);
-}
-
-/* Generate Mask
- * Parameter:
- * bit -- starting bit
- * len -- how many bits
- */
-#define MASK(bit,len) \
-({ \
- __u64 ret; \
- \
- __asm __volatile("dep %0=-1, r0, %1, %2"\
- : "=r" (ret): \
- "M" (bit), \
- "M" (len)); \
- ret; \
-})
-
-void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
-{
- val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
- vcpu_set_psr(vcpu, val);
-}
-
-void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long val;
-
- val = vcpu_get_gr(vcpu, inst.M35.r2);
- vcpu_set_psr_l(vcpu, val);
-}
-
-void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
-{
- unsigned long val;
-
- val = vcpu_get_psr(vcpu);
- val = (val & MASK(0, 32)) | (val & MASK(35, 2));
- vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
-}
-
-void vcpu_increment_iip(struct kvm_vcpu *vcpu)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
- if (ipsr->ri == 2) {
- ipsr->ri = 0;
- regs->cr_iip += 16;
- } else
- ipsr->ri++;
-}
-
-void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
-{
- struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
-
- if (ipsr->ri == 0) {
- ipsr->ri = 2;
- regs->cr_iip -= 16;
- } else
- ipsr->ri--;
-}
-
-/** Emulate a privileged operation.
- *
- *
- * @param vcpu virtual cpu
- * @cause the reason cause virtualization fault
- * @opcode the instruction code which cause virtualization fault
- */
-
-void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
-{
- unsigned long status, cause, opcode ;
- INST64 inst;
-
- status = IA64_NO_FAULT;
- cause = VMX(vcpu, cause);
- opcode = VMX(vcpu, opcode);
- inst.inst = opcode;
- /*
- * Switch to actual virtual rid in rr0 and rr4,
- * which is required by some tlb related instructions.
- */
- prepare_if_physical_mode(vcpu);
-
- switch (cause) {
- case EVENT_RSM:
- kvm_rsm(vcpu, inst);
- break;
- case EVENT_SSM:
- kvm_ssm(vcpu, inst);
- break;
- case EVENT_MOV_TO_PSR:
- kvm_mov_to_psr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PSR:
- kvm_mov_from_psr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_CR:
- kvm_mov_from_cr(vcpu, inst);
- break;
- case EVENT_MOV_TO_CR:
- kvm_mov_to_cr(vcpu, inst);
- break;
- case EVENT_BSW_0:
- vcpu_bsw0(vcpu);
- break;
- case EVENT_BSW_1:
- vcpu_bsw1(vcpu);
- break;
- case EVENT_COVER:
- vcpu_cover(vcpu);
- break;
- case EVENT_RFI:
- vcpu_rfi(vcpu);
- break;
- case EVENT_ITR_D:
- kvm_itr_d(vcpu, inst);
- break;
- case EVENT_ITR_I:
- kvm_itr_i(vcpu, inst);
- break;
- case EVENT_PTR_D:
- kvm_ptr_d(vcpu, inst);
- break;
- case EVENT_PTR_I:
- kvm_ptr_i(vcpu, inst);
- break;
- case EVENT_ITC_D:
- kvm_itc_d(vcpu, inst);
- break;
- case EVENT_ITC_I:
- kvm_itc_i(vcpu, inst);
- break;
- case EVENT_PTC_L:
- kvm_ptc_l(vcpu, inst);
- break;
- case EVENT_PTC_G:
- kvm_ptc_g(vcpu, inst);
- break;
- case EVENT_PTC_GA:
- kvm_ptc_ga(vcpu, inst);
- break;
- case EVENT_PTC_E:
- kvm_ptc_e(vcpu, inst);
- break;
- case EVENT_MOV_TO_RR:
- kvm_mov_to_rr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_RR:
- kvm_mov_from_rr(vcpu, inst);
- break;
- case EVENT_THASH:
- kvm_thash(vcpu, inst);
- break;
- case EVENT_TTAG:
- kvm_ttag(vcpu, inst);
- break;
- case EVENT_TPA:
- status = kvm_tpa(vcpu, inst);
- break;
- case EVENT_TAK:
- kvm_tak(vcpu, inst);
- break;
- case EVENT_MOV_TO_AR_IMM:
- kvm_mov_to_ar_imm(vcpu, inst);
- break;
- case EVENT_MOV_TO_AR:
- kvm_mov_to_ar_reg(vcpu, inst);
- break;
- case EVENT_MOV_FROM_AR:
- kvm_mov_from_ar_reg(vcpu, inst);
- break;
- case EVENT_MOV_TO_DBR:
- kvm_mov_to_dbr(vcpu, inst);
- break;
- case EVENT_MOV_TO_IBR:
- kvm_mov_to_ibr(vcpu, inst);
- break;
- case EVENT_MOV_TO_PMC:
- kvm_mov_to_pmc(vcpu, inst);
- break;
- case EVENT_MOV_TO_PMD:
- kvm_mov_to_pmd(vcpu, inst);
- break;
- case EVENT_MOV_TO_PKR:
- kvm_mov_to_pkr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_DBR:
- kvm_mov_from_dbr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_IBR:
- kvm_mov_from_ibr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PMC:
- kvm_mov_from_pmc(vcpu, inst);
- break;
- case EVENT_MOV_FROM_PKR:
- kvm_mov_from_pkr(vcpu, inst);
- break;
- case EVENT_MOV_FROM_CPUID:
- kvm_mov_from_cpuid(vcpu, inst);
- break;
- case EVENT_VMSW:
- status = IA64_FAULT;
- break;
- default:
- break;
- };
- /*Assume all status is NO_FAULT ?*/
- if (status == IA64_NO_FAULT && cause != EVENT_RFI)
- vcpu_increment_iip(vcpu);
-
- recover_if_physical_mode(vcpu);
-}
-
-void init_vcpu(struct kvm_vcpu *vcpu)
-{
- int i;
-
- vcpu->arch.mode_flags = GUEST_IN_PHY;
- VMX(vcpu, vrr[0]) = 0x38;
- VMX(vcpu, vrr[1]) = 0x38;
- VMX(vcpu, vrr[2]) = 0x38;
- VMX(vcpu, vrr[3]) = 0x38;
- VMX(vcpu, vrr[4]) = 0x38;
- VMX(vcpu, vrr[5]) = 0x38;
- VMX(vcpu, vrr[6]) = 0x38;
- VMX(vcpu, vrr[7]) = 0x38;
- VCPU(vcpu, vpsr) = IA64_PSR_BN;
- VCPU(vcpu, dcr) = 0;
- /* pta.size must not be 0. The minimum is 15 (32k) */
- VCPU(vcpu, pta) = 15 << 2;
- VCPU(vcpu, itv) = 0x10000;
- VCPU(vcpu, itm) = 0;
- VMX(vcpu, last_itc) = 0;
-
- VCPU(vcpu, lid) = VCPU_LID(vcpu);
- VCPU(vcpu, ivr) = 0;
- VCPU(vcpu, tpr) = 0x10000;
- VCPU(vcpu, eoi) = 0;
- VCPU(vcpu, irr[0]) = 0;
- VCPU(vcpu, irr[1]) = 0;
- VCPU(vcpu, irr[2]) = 0;
- VCPU(vcpu, irr[3]) = 0;
- VCPU(vcpu, pmv) = 0x10000;
- VCPU(vcpu, cmcv) = 0x10000;
- VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
- VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
- update_vhpi(vcpu, NULL_VECTOR);
- VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
-
- for (i = 0; i < 4; i++)
- VLSAPIC_INSVC(vcpu, i) = 0;
-}
-
-void kvm_init_all_rr(struct kvm_vcpu *vcpu)
-{
- unsigned long psr;
-
- local_irq_save(psr);
-
- /* WARNING: not allow co-exist of both virtual mode and physical
- * mode in same region
- */
-
- vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
- vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
-
- if (is_physical_mode(vcpu)) {
- if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
- panic_vm(vcpu, "Machine Status conflicts!\n");
-
- ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
- ia64_dv_serialize_data();
- ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
- ia64_dv_serialize_data();
- } else {
- ia64_set_rr((VRN0 << VRN_SHIFT),
- vcpu->arch.metaphysical_saved_rr0);
- ia64_dv_serialize_data();
- ia64_set_rr((VRN4 << VRN_SHIFT),
- vcpu->arch.metaphysical_saved_rr4);
- ia64_dv_serialize_data();
- }
- ia64_set_rr((VRN1 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN1])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN2 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN2])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN3 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN3])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN5 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN5])));
- ia64_dv_serialize_data();
- ia64_set_rr((VRN7 << VRN_SHIFT),
- vrrtomrr(VMX(vcpu, vrr[VRN7])));
- ia64_dv_serialize_data();
- ia64_srlz_d();
- ia64_set_psr(psr);
-}
-
-int vmm_entry(void)
-{
- struct kvm_vcpu *v;
- v = current_vcpu;
-
- ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
- 0, 0, 0, 0, 0, 0);
- kvm_init_vtlb(v);
- kvm_init_vhpt(v);
- init_vcpu(v);
- kvm_init_all_rr(v);
- vmm_reset_entry();
-
- return 0;
-}
-
-static void kvm_show_registers(struct kvm_pt_regs *regs)
-{
- unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
-
- struct kvm_vcpu *vcpu = current_vcpu;
- if (vcpu != NULL)
- printk("vcpu 0x%p vcpu %d\n",
- vcpu, vcpu->vcpu_id);
-
- printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
- regs->cr_ipsr, regs->cr_ifs, ip);
-
- printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
- regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
- printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
- regs->ar_rnat, regs->ar_bspstore, regs->pr);
- printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
- regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
- printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
- printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
- regs->b6, regs->b7);
- printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
- regs->f6.u.bits[1], regs->f6.u.bits[0],
- regs->f7.u.bits[1], regs->f7.u.bits[0]);
- printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
- regs->f8.u.bits[1], regs->f8.u.bits[0],
- regs->f9.u.bits[1], regs->f9.u.bits[0]);
- printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
- regs->f10.u.bits[1], regs->f10.u.bits[0],
- regs->f11.u.bits[1], regs->f11.u.bits[0]);
-
- printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
- regs->r2, regs->r3);
- printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
- regs->r9, regs->r10);
- printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
- regs->r12, regs->r13);
- printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
- regs->r15, regs->r16);
- printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
- regs->r18, regs->r19);
- printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
- regs->r21, regs->r22);
- printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
- regs->r24, regs->r25);
- printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
- regs->r27, regs->r28);
- printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
- regs->r30, regs->r31);
-
-}
-
-void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
-{
- va_list args;
- char buf[256];
-
- struct kvm_pt_regs *regs = vcpu_regs(v);
- struct exit_ctl_data *p = &v->arch.exit_data;
- va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
- printk(buf);
- kvm_show_registers(regs);
- p->exit_reason = EXIT_REASON_VM_PANIC;
- vmm_transition(v);
- /*Never to return*/
- while (1);
-}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
deleted file mode 100644
index 988911b4cc7a..000000000000
--- a/arch/ia64/kvm/vcpu.h
+++ /dev/null
@@ -1,752 +0,0 @@
-/*
- * vcpu.h: vcpu routines
- * Copyright (c) 2005, Intel Corporation.
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- *
- * Copyright (c) 2007, Intel Corporation.
- * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-
-#ifndef __KVM_VCPU_H__
-#define __KVM_VCPU_H__
-
-#include <asm/types.h>
-#include <asm/fpu.h>
-#include <asm/processor.h>
-
-#ifndef __ASSEMBLY__
-#include "vti.h"
-
-#include <linux/kvm_host.h>
-#include <linux/spinlock.h>
-
-typedef unsigned long IA64_INST;
-
-typedef union U_IA64_BUNDLE {
- unsigned long i64[2];
- struct { unsigned long template:5, slot0:41, slot1a:18,
- slot1b:23, slot2:41; };
- /* NOTE: following doesn't work because bitfields can't cross natural
- size boundaries
- struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
-} IA64_BUNDLE;
-
-typedef union U_INST64_A5 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
- imm9d:9, s:1, major:4; };
-} INST64_A5;
-
-typedef union U_INST64_B4 {
- IA64_INST inst;
- struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
- wh:2, d:1, un1:1, major:4; };
-} INST64_B4;
-
-typedef union U_INST64_B8 {
- IA64_INST inst;
- struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
-} INST64_B8;
-
-typedef union U_INST64_B9 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
-} INST64_B9;
-
-typedef union U_INST64_I19 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
-} INST64_I19;
-
-typedef union U_INST64_I26 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_I26;
-
-typedef union U_INST64_I27 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
-} INST64_I27;
-
-typedef union U_INST64_I28 { /* not privileged (mov from AR) */
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_I28;
-
-typedef union U_INST64_M28 {
- IA64_INST inst;
- struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M28;
-
-typedef union U_INST64_M29 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M29;
-
-typedef union U_INST64_M30 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
- x3:3, s:1, major:4; };
-} INST64_M30;
-
-typedef union U_INST64_M31 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M31;
-
-typedef union U_INST64_M32 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M32;
-
-typedef union U_INST64_M33 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M33;
-
-typedef union U_INST64_M35 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-
-} INST64_M35;
-
-typedef union U_INST64_M36 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
-} INST64_M36;
-
-typedef union U_INST64_M37 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
- i:1, major:4; };
-} INST64_M37;
-
-typedef union U_INST64_M41 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-} INST64_M41;
-
-typedef union U_INST64_M42 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M42;
-
-typedef union U_INST64_M43 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M43;
-
-typedef union U_INST64_M44 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
-} INST64_M44;
-
-typedef union U_INST64_M45 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M45;
-
-typedef union U_INST64_M46 {
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
- x3:3, un1:1, major:4; };
-} INST64_M46;
-
-typedef union U_INST64_M47 {
- IA64_INST inst;
- struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
-} INST64_M47;
-
-typedef union U_INST64_M1{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M1;
-
-typedef union U_INST64_M2{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M2;
-
-typedef union U_INST64_M3{
- IA64_INST inst;
- struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
- x6:6, s:1, major:4; };
-} INST64_M3;
-
-typedef union U_INST64_M4 {
- IA64_INST inst;
- struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M4;
-
-typedef union U_INST64_M5 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
- x6:6, s:1, major:4; };
-} INST64_M5;
-
-typedef union U_INST64_M6 {
- IA64_INST inst;
- struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M6;
-
-typedef union U_INST64_M9 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M9;
-
-typedef union U_INST64_M10 {
- IA64_INST inst;
- struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
- x6:6, s:1, major:4; };
-} INST64_M10;
-
-typedef union U_INST64_M12 {
- IA64_INST inst;
- struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
- x6:6, m:1, major:4; };
-} INST64_M12;
-
-typedef union U_INST64_M15 {
- IA64_INST inst;
- struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
- x6:6, s:1, major:4; };
-} INST64_M15;
-
-typedef union U_INST64 {
- IA64_INST inst;
- struct { unsigned long :37, major:4; } generic;
- INST64_A5 A5; /* used in build_hypercall_bundle only */
- INST64_B4 B4; /* used in build_hypercall_bundle only */
- INST64_B8 B8; /* rfi, bsw.[01] */
- INST64_B9 B9; /* break.b */
- INST64_I19 I19; /* used in build_hypercall_bundle only */
- INST64_I26 I26; /* mov register to ar (I unit) */
- INST64_I27 I27; /* mov immediate to ar (I unit) */
- INST64_I28 I28; /* mov from ar (I unit) */
- INST64_M1 M1; /* ld integer */
- INST64_M2 M2;
- INST64_M3 M3;
- INST64_M4 M4; /* st integer */
- INST64_M5 M5;
- INST64_M6 M6; /* ldfd floating pointer */
- INST64_M9 M9; /* stfd floating pointer */
- INST64_M10 M10; /* stfd floating pointer */
- INST64_M12 M12; /* ldfd pair floating pointer */
- INST64_M15 M15; /* lfetch + imm update */
- INST64_M28 M28; /* purge translation cache entry */
- INST64_M29 M29; /* mov register to ar (M unit) */
- INST64_M30 M30; /* mov immediate to ar (M unit) */
- INST64_M31 M31; /* mov from ar (M unit) */
- INST64_M32 M32; /* mov reg to cr */
- INST64_M33 M33; /* mov from cr */
- INST64_M35 M35; /* mov to psr */
- INST64_M36 M36; /* mov from psr */
- INST64_M37 M37; /* break.m */
- INST64_M41 M41; /* translation cache insert */
- INST64_M42 M42; /* mov to indirect reg/translation reg insert*/
- INST64_M43 M43; /* mov from indirect reg */
- INST64_M44 M44; /* set/reset system mask */
- INST64_M45 M45; /* translation purge */
- INST64_M46 M46; /* translation access (tpa,tak) */
- INST64_M47 M47; /* purge translation entry */
-} INST64;
-
-#define MASK_41 ((unsigned long)0x1ffffffffff)
-
-/* Virtual address memory attributes encoding */
-#define VA_MATTR_WB 0x0
-#define VA_MATTR_UC 0x4
-#define VA_MATTR_UCE 0x5
-#define VA_MATTR_WC 0x6
-#define VA_MATTR_NATPAGE 0x7
-
-#define PMASK(size) (~((size) - 1))
-#define PSIZE(size) (1UL<<(size))
-#define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits))
-#define PAGEALIGN(va, ps) CLEARLSB(va, ps)
-#define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
-#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */
-
-#define ARCH_PAGE_SHIFT 12
-
-#define INVALID_TI_TAG (1UL << 63)
-
-#define VTLB_PTE_P_BIT 0
-#define VTLB_PTE_IO_BIT 60
-#define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT)
-#define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT)
-
-#define vcpu_quick_region_check(_tr_regions,_ifa) \
- (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
-
-#define vcpu_quick_region_set(_tr_regions,_ifa) \
- do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
-
-static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
- u64 va, u64 rid)
-{
- trp->page_flags = pte;
- trp->itir = itir;
- trp->vadr = va;
- trp->rid = rid;
-}
-
-extern u64 kvm_get_mpt_entry(u64 gpfn);
-
-/* Return I/ */
-static inline u64 __gpfn_is_io(u64 gpfn)
-{
- u64 pte;
- pte = kvm_get_mpt_entry(gpfn);
- if (!(pte & GPFN_INV_MASK)) {
- pte = pte & GPFN_IO_MASK;
- if (pte != GPFN_PHYS_MMIO)
- return pte;
- }
- return 0;
-}
-#endif
-#define IA64_NO_FAULT 0
-#define IA64_FAULT 1
-
-#define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15)
-
-#define SW_BAD 0 /* Bad mode transitition */
-#define SW_V2P 1 /* Physical emulatino is activated */
-#define SW_P2V 2 /* Exit physical mode emulation */
-#define SW_SELF 3 /* No mode transition */
-#define SW_NOP 4 /* Mode transition, but without action required */
-
-#define GUEST_IN_PHY 0x1
-#define GUEST_PHY_EMUL 0x2
-
-#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))
-
-#define VRN_SHIFT 61
-#define VRN_MASK 0xe000000000000000
-#define VRN0 0x0UL
-#define VRN1 0x1UL
-#define VRN2 0x2UL
-#define VRN3 0x3UL
-#define VRN4 0x4UL
-#define VRN5 0x5UL
-#define VRN6 0x6UL
-#define VRN7 0x7UL
-
-#define IRQ_NO_MASKED 0
-#define IRQ_MASKED_BY_VTPR 1
-#define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */
-
-#define PTA_BASE_SHIFT 15
-
-#define IA64_PSR_VM_BIT 46
-#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
-
-/* Interruption Function State */
-#define IA64_IFS_V_BIT 63
-#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
-
-#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
-#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
-
-#ifndef __ASSEMBLY__
-
-#include <asm/gcc_intrin.h>
-
-#define is_physical_mode(v) \
- ((v->arch.mode_flags) & GUEST_IN_PHY)
-
-#define is_virtual_mode(v) \
- (!is_physical_mode(v))
-
-#define MODE_IND(psr) \
- (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
-
-#ifndef CONFIG_SMP
-#define _vmm_raw_spin_lock(x) do {}while(0)
-#define _vmm_raw_spin_unlock(x) do {}while(0)
-#else
-typedef struct {
- volatile unsigned int lock;
-} vmm_spinlock_t;
-#define _vmm_raw_spin_lock(x) \
- do { \
- __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
- __u64 ia64_spinlock_val; \
- ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
- if (unlikely(ia64_spinlock_val)) { \
- do { \
- while (*ia64_spinlock_ptr) \
- ia64_barrier(); \
- ia64_spinlock_val = \
- ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
- } while (ia64_spinlock_val); \
- } \
- } while (0)
-
-#define _vmm_raw_spin_unlock(x) \
- do { barrier(); \
- ((vmm_spinlock_t *)x)->lock = 0; } \
-while (0)
-#endif
-
-void vmm_spin_lock(vmm_spinlock_t *lock);
-void vmm_spin_unlock(vmm_spinlock_t *lock);
-enum {
- I_TLB = 1,
- D_TLB = 2
-};
-
-union kvm_va {
- struct {
- unsigned long off : 60; /* intra-region offset */
- unsigned long reg : 4; /* region number */
- } f;
- unsigned long l;
- void *p;
-};
-
-#define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \
- _v.f.reg = 0; _v.l; })
-#define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \
- _v.f.reg = -1; _v.p; })
-
-#define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \
- _v.rid; })
-#define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \
- _v.ps; })
-#define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \
- _v.ve; })
-
-enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
-enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };
-
-#define VCPU(_v, _x) ((_v)->arch.vpd->_x)
-#define VMX(_v, _x) ((_v)->arch._x)
-
-#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
-#define VLSAPIC_XTP(_v) VMX(_v, xtp)
-
-static inline unsigned long itir_ps(unsigned long itir)
-{
- return ((itir >> 2) & 0x3f);
-}
-
-
-/**************************************************************************
- VCPU control register access routines
- **************************************************************************/
-
-static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, itir));
-}
-
-static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, itir) = val;
-}
-
-static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, ifa));
-}
-
-static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, ifa) = val;
-}
-
-static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, iva));
-}
-
-static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, pta));
-}
-
-static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, lid));
-}
-
-static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, tpr));
-}
-
-static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
-{
- return (0UL); /*reads of eoi always return 0 */
-}
-
-static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, irr[0]));
-}
-
-static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, irr[1]));
-}
-
-static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, irr[2]));
-}
-
-static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
-{
- return ((u64)VCPU(vcpu, irr[3]));
-}
-
-static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
-{
- ia64_setreg(_IA64_REG_CR_DCR, val);
-}
-
-static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, isr) = val;
-}
-
-static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, lid) = val;
-}
-
-static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, ipsr) = val;
-}
-
-static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, iip) = val;
-}
-
-static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, ifs) = val;
-}
-
-static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, iipa) = val;
-}
-
-static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
-{
- VCPU(vcpu, iha) = val;
-}
-
-
-static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
-{
- return vcpu->arch.vrr[reg>>61];
-}
-
-/**************************************************************************
- VCPU debug breakpoint register access routines
- **************************************************************************/
-
-static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
-{
- __ia64_set_dbr(reg, val);
-}
-
-static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
-{
- ia64_set_ibr(reg, val);
-}
-
-static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
-{
- return ((u64)__ia64_get_dbr(reg));
-}
-
-static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
-{
- return ((u64)ia64_get_ibr(reg));
-}
-
-/**************************************************************************
- VCPU performance monitor register access routines
- **************************************************************************/
-static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
-{
- /* NOTE: Writes to unimplemented PMC registers are discarded */
- ia64_set_pmc(reg, val);
-}
-
-static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
-{
- /* NOTE: Writes to unimplemented PMD registers are discarded */
- ia64_set_pmd(reg, val);
-}
-
-static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
-{
- /* NOTE: Reads from unimplemented PMC registers return zero */
- return ((u64)ia64_get_pmc(reg));
-}
-
-static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
-{
- /* NOTE: Reads from unimplemented PMD registers return zero */
- return ((u64)ia64_get_pmd(reg));
-}
-
-static inline unsigned long vrrtomrr(unsigned long val)
-{
- union ia64_rr rr;
- rr.val = val;
- rr.rid = (rr.rid << 4) | 0xe;
- if (rr.ps > PAGE_SHIFT)
- rr.ps = PAGE_SHIFT;
- rr.ve = 1;
- return rr.val;
-}
-
-
-static inline int highest_bits(int *dat)
-{
- u32 bits, bitnum;
- int i;
-
- /* loop for all 256 bits */
- for (i = 7; i >= 0 ; i--) {
- bits = dat[i];
- if (bits) {
- bitnum = fls(bits);
- return i * 32 + bitnum - 1;
- }
- }
- return NULL_VECTOR;
-}
-
-/*
- * The pending irq is higher than the inservice one.
- *
- */
-static inline int is_higher_irq(int pending, int inservice)
-{
- return ((pending > inservice)
- || ((pending != NULL_VECTOR)
- && (inservice == NULL_VECTOR)));
-}
-
-static inline int is_higher_class(int pending, int mic)
-{
- return ((pending >> 4) > mic);
-}
-
-/*
- * Return 0-255 for pending irq.
- * NULL_VECTOR: when no pending.
- */
-static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
-{
- if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
- return NMI_VECTOR;
- if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
- return ExtINT_VECTOR;
-
- return highest_bits((int *)&VCPU(vcpu, irr[0]));
-}
-
-static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
-{
- if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
- return NMI_VECTOR;
- if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
- return ExtINT_VECTOR;
-
- return highest_bits((int *)&(VMX(vcpu, insvc[0])));
-}
-
-extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
- struct ia64_fpreg *val);
-extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
- struct ia64_fpreg *val);
-extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg);
-extern void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg,
- u64 val, int nat);
-extern unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu);
-extern void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val);
-extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
-extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
-extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
- u64 itir, u64 va, int type);
-extern struct thash_data *vhpt_lookup(u64 va);
-extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
-extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
-extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
-extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
-extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
- u64 itir, u64 ifa, int type);
-extern void thash_purge_all(struct kvm_vcpu *v);
-extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
- u64 va, int is_data);
-extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
- u64 ps, int is_data);
-
-extern void vcpu_increment_iip(struct kvm_vcpu *v);
-extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
-extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
-extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
-extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
-extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
-extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
-extern void nested_dtlb(struct kvm_vcpu *vcpu);
-extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
-extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);
-
-extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
-extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);
-
-extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
-extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
-extern void vmm_transition(struct kvm_vcpu *vcpu);
-extern void vmm_trampoline(union context *from, union context *to);
-extern int vmm_entry(void);
-extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu);
-
-extern void vmm_reset_entry(void);
-void kvm_init_vtlb(struct kvm_vcpu *v);
-void kvm_init_vhpt(struct kvm_vcpu *v);
-void thash_init(struct thash_cb *hcb, u64 sz);
-
-void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
-u64 kvm_gpa_to_mpa(u64 gpa);
-extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
- u64 arg4, u64 arg5, u64 arg6, u64 arg7);
-
-extern long vmm_sanity;
-
-#endif
-#endif /* __VCPU_H__ */
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
deleted file mode 100644
index 176a12cd56de..000000000000
--- a/arch/ia64/kvm/vmm.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * vmm.c: vmm module interface with kvm module
- *
- * Copyright (c) 2007, Intel Corporation.
- *
- * Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/fpswa.h>
-
-#include "vcpu.h"
-
-MODULE_AUTHOR("Intel");
-MODULE_LICENSE("GPL");
-
-extern char kvm_ia64_ivt;
-extern char kvm_asm_mov_from_ar;
-extern char kvm_asm_mov_from_ar_sn2;
-extern fpswa_interface_t *vmm_fpswa_interface;
-
-long vmm_sanity = 1;
-
-struct kvm_vmm_info vmm_info = {
- .module = THIS_MODULE,
- .vmm_entry = vmm_entry,
- .tramp_entry = vmm_trampoline,
- .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
- .patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar,
- .patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2,
-};
-
-static int __init kvm_vmm_init(void)
-{
-
- vmm_fpswa_interface = fpswa_interface;
-
- /*Register vmm data to kvm side*/
- return kvm_init(&vmm_info, 1024, 0, THIS_MODULE);
-}
-
-static void __exit kvm_vmm_exit(void)
-{
- kvm_exit();
- return ;
-}
-
-void vmm_spin_lock(vmm_spinlock_t *lock)
-{
- _vmm_raw_spin_lock(lock);
-}
-
-void vmm_spin_unlock(vmm_spinlock_t *lock)
-{
- _vmm_raw_spin_unlock(lock);
-}
-
-static void vcpu_debug_exit(struct kvm_vcpu *vcpu)
-{
- struct exit_ctl_data *p = &vcpu->arch.exit_data;
- long psr;
-
- local_irq_save(psr);
- p->exit_reason = EXIT_REASON_DEBUG;
- vmm_transition(vcpu);
- local_irq_restore(psr);
-}
-
-asmlinkage int printk(const char *fmt, ...)
-{
- struct kvm_vcpu *vcpu = current_vcpu;
- va_list args;
- int r;
-
- memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN);
- va_start(args, fmt);
- r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args);
- va_end(args);
- vcpu_debug_exit(vcpu);
- return r;
-}
-
-module_init(kvm_vmm_init)
-module_exit(kvm_vmm_exit)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
deleted file mode 100644
index 397e34a63e18..000000000000
--- a/arch/ia64/kvm/vmm_ivt.S
+++ /dev/null
@@ -1,1392 +0,0 @@
-/*
- * arch/ia64/kvm/vmm_ivt.S
- *
- * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- * David Mosberger <davidm@hpl.hp.com>
- * Copyright (C) 2000, 2002-2003 Intel Co
- * Asit Mallick <asit.k.mallick@intel.com>
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Kenneth Chen <kenneth.w.chen@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- *
- *
- * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
- * for SMP
- * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
- * handler now uses virtual PT.
- *
- * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Supporting Intel virtualization architecture
- *
- */
-
-/*
- * This file defines the interruption vector table used by the CPU.
- * It does not include one entry per possible cause of interruption.
- *
- * The first 20 entries of the table contain 64 bundles each while the
- * remaining 48 entries contain only 16 bundles each.
- *
- * The 64 bundles are used to allow inlining the whole handler for
- * critical
- * interruptions like TLB misses.
- *
- * For each entry, the comment is as follows:
- *
- * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
- * (12,51)
- * entry offset ----/ / / /
- * /
- * entry number ---------/ / /
- * /
- * size of the entry -------------/ /
- * /
- * vector name -------------------------------------/
- * /
- * interruptions triggering this vector
- * ----------------------/
- *
- * The table is 32KB in size and must be aligned on 32KB
- * boundary.
- * (The CPU ignores the 15 lower bits of the address)
- *
- * Table is based upon EAS2.6 (Oct 1999)
- */
-
-
-#include <asm/asmmacro.h>
-#include <asm/cache.h>
-#include <asm/pgtable.h>
-
-#include "asm-offsets.h"
-#include "vcpu.h"
-#include "kvm_minstate.h"
-#include "vti.h"
-
-#if 0
-# define PSR_DEFAULT_BITS psr.ac
-#else
-# define PSR_DEFAULT_BITS 0
-#endif
-
-#define KVM_FAULT(n) \
- kvm_fault_##n:; \
- mov r19=n;; \
- br.sptk.many kvm_vmm_panic; \
- ;; \
-
-#define KVM_REFLECT(n) \
- mov r31=pr; \
- mov r19=n; /* prepare to save predicates */ \
- mov r29=cr.ipsr; \
- ;; \
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
-(p7) br.sptk.many kvm_dispatch_reflection; \
- br.sptk.many kvm_vmm_panic; \
-
-GLOBAL_ENTRY(kvm_vmm_panic)
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,1,0
- mov out0=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- br.call.sptk.many b6=vmm_panic_handler;
-END(kvm_vmm_panic)
-
- .section .text..ivt,"ax"
-
- .align 32768 // align on 32KB boundary
- .global kvm_ia64_ivt
-kvm_ia64_ivt:
-///////////////////////////////////////////////////////////////
-// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
-ENTRY(kvm_vhpt_miss)
- KVM_FAULT(0)
-END(kvm_vhpt_miss)
-
- .org kvm_ia64_ivt+0x400
-////////////////////////////////////////////////////////////////
-// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
-ENTRY(kvm_itlb_miss)
- mov r31 = pr
- mov r29=cr.ipsr;
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6) br.sptk kvm_alt_itlb_miss
- mov r19 = 1
- br.sptk kvm_itlb_miss_dispatch
- KVM_FAULT(1);
-END(kvm_itlb_miss)
-
- .org kvm_ia64_ivt+0x0800
-//////////////////////////////////////////////////////////////////
-// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
-ENTRY(kvm_dtlb_miss)
- mov r31 = pr
- mov r29=cr.ipsr;
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6) br.sptk kvm_alt_dtlb_miss
- br.sptk kvm_dtlb_miss_dispatch
-END(kvm_dtlb_miss)
-
- .org kvm_ia64_ivt+0x0c00
-////////////////////////////////////////////////////////////////////
-// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
-ENTRY(kvm_alt_itlb_miss)
- mov r16=cr.ifa // get address that caused the TLB miss
- ;;
- movl r17=PAGE_KERNEL
- mov r24=cr.ipsr
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- ;;
- and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- ;;
- or r19=r17,r19 // insert PTE control bits into r19
- ;;
- movl r20=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r20
- ;;
- itc.i r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
-END(kvm_alt_itlb_miss)
-
- .org kvm_ia64_ivt+0x1000
-/////////////////////////////////////////////////////////////////////
-// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
-ENTRY(kvm_alt_dtlb_miss)
- mov r16=cr.ifa // get address that caused the TLB miss
- ;;
- movl r17=PAGE_KERNEL
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- mov r24=cr.ipsr
- ;;
- and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- ;;
- or r19=r19,r17 // insert PTE control bits into r19
- ;;
- movl r20=IA64_GRANULE_SHIFT<<2
- ;;
- mov cr.itir=r20
- ;;
- itc.d r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
-END(kvm_alt_dtlb_miss)
-
- .org kvm_ia64_ivt+0x1400
-//////////////////////////////////////////////////////////////////////
-// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
-ENTRY(kvm_nested_dtlb_miss)
- KVM_FAULT(5)
-END(kvm_nested_dtlb_miss)
-
- .org kvm_ia64_ivt+0x1800
-/////////////////////////////////////////////////////////////////////
-// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
-ENTRY(kvm_ikey_miss)
- KVM_REFLECT(6)
-END(kvm_ikey_miss)
-
- .org kvm_ia64_ivt+0x1c00
-/////////////////////////////////////////////////////////////////////
-// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-ENTRY(kvm_dkey_miss)
- KVM_REFLECT(7)
-END(kvm_dkey_miss)
-
- .org kvm_ia64_ivt+0x2000
-////////////////////////////////////////////////////////////////////
-// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
-ENTRY(kvm_dirty_bit)
- KVM_REFLECT(8)
-END(kvm_dirty_bit)
-
- .org kvm_ia64_ivt+0x2400
-////////////////////////////////////////////////////////////////////
-// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
-ENTRY(kvm_iaccess_bit)
- KVM_REFLECT(9)
-END(kvm_iaccess_bit)
-
- .org kvm_ia64_ivt+0x2800
-///////////////////////////////////////////////////////////////////
-// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
-ENTRY(kvm_daccess_bit)
- KVM_REFLECT(10)
-END(kvm_daccess_bit)
-
- .org kvm_ia64_ivt+0x2c00
-/////////////////////////////////////////////////////////////////
-// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
-ENTRY(kvm_break_fault)
- mov r31=pr
- mov r19=11
- mov r29=cr.ipsr
- ;;
- KVM_SAVE_MIN_WITH_COVER_R19
- ;;
- alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
- mov out0=cr.ifa
- mov out2=cr.isr // FIXME: pity to make this slow access twice
- mov out3=cr.iim // FIXME: pity to make this slow access twice
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15)ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- adds out1=16,sp
- br.call.sptk.many b6=kvm_ia64_handle_break
- ;;
-END(kvm_break_fault)
-
- .org kvm_ia64_ivt+0x3000
-/////////////////////////////////////////////////////////////////
-// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
-ENTRY(kvm_interrupt)
- mov r31=pr // prepare to save predicates
- mov r19=12
- mov r29=cr.ipsr
- ;;
- tbit.z p6,p7=r29,IA64_PSR_VM_BIT
- tbit.z p0,p15=r29,IA64_PSR_I_BIT
- ;;
-(p7) br.sptk kvm_dispatch_interrupt
- ;;
- mov r27=ar.rsc /* M */
- mov r20=r1 /* A */
- mov r25=ar.unat /* M */
- mov r26=ar.pfs /* I */
- mov r28=cr.iip /* M */
- cover /* B (or nothing) */
- ;;
- mov r1=sp
- ;;
- invala /* M */
- mov r30=cr.ifs
- ;;
- addl r1=-VMM_PT_REGS_SIZE,r1
- ;;
- adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
- adds r16=PT(CR_IPSR),r1
- ;;
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
- st8 [r16]=r29 /* save cr.ipsr */
- ;;
- lfetch.fault.excl.nt1 [r17]
- mov r29=b0
- ;;
- adds r16=PT(R8),r1 /* initialize first base pointer */
- adds r17=PT(R9),r1 /* initialize second base pointer */
- mov r18=r0 /* make sure r18 isn't NaT */
- ;;
-.mem.offset 0,0; st8.spill [r16]=r8,16
-.mem.offset 8,0; st8.spill [r17]=r9,16
- ;;
-.mem.offset 0,0; st8.spill [r16]=r10,24
-.mem.offset 8,0; st8.spill [r17]=r11,24
- ;;
- st8 [r16]=r28,16 /* save cr.iip */
- st8 [r17]=r30,16 /* save cr.ifs */
- mov r8=ar.fpsr /* M */
- mov r9=ar.csd
- mov r10=ar.ssd
- movl r11=FPSR_DEFAULT /* L-unit */
- ;;
- st8 [r16]=r25,16 /* save ar.unat */
- st8 [r17]=r26,16 /* save ar.pfs */
- shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
- ;;
- st8 [r16]=r27,16 /* save ar.rsc */
- adds r17=16,r17 /* skip over ar_rnat field */
- ;;
- st8 [r17]=r31,16 /* save predicates */
- adds r16=16,r16 /* skip over ar_bspstore field */
- ;;
- st8 [r16]=r29,16 /* save b0 */
- st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
- ;;
-.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
-.mem.offset 8,0; st8.spill [r17]=r12,16
- adds r12=-16,r1
- /* switch to kernel memory stack (with 16 bytes of scratch) */
- ;;
-.mem.offset 0,0; st8.spill [r16]=r13,16
-.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
- ;;
-.mem.offset 0,0; st8.spill [r16]=r15,16
-.mem.offset 8,0; st8.spill [r17]=r14,16
- dep r14=-1,r0,60,4
- ;;
-.mem.offset 0,0; st8.spill [r16]=r2,16
-.mem.offset 8,0; st8.spill [r17]=r3,16
- adds r2=VMM_PT_REGS_R16_OFFSET,r1
- adds r14 = VMM_VCPU_GP_OFFSET,r13
- ;;
- mov r8=ar.ccv
- ld8 r14 = [r14]
- ;;
- mov r1=r14 /* establish kernel global pointer */
- ;; \
- bsw.1
- ;;
- alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
- mov out0=r13
- ;;
- ssm psr.ic
- ;;
- srlz.i
- ;;
- //(p15) ssm psr.i
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- srlz.i // ensure everybody knows psr.ic is back on
- ;;
-.mem.offset 0,0; st8.spill [r2]=r16,16
-.mem.offset 8,0; st8.spill [r3]=r17,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r18,16
-.mem.offset 8,0; st8.spill [r3]=r19,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r20,16
-.mem.offset 8,0; st8.spill [r3]=r21,16
- mov r18=b6
- ;;
-.mem.offset 0,0; st8.spill [r2]=r22,16
-.mem.offset 8,0; st8.spill [r3]=r23,16
- mov r19=b7
- ;;
-.mem.offset 0,0; st8.spill [r2]=r24,16
-.mem.offset 8,0; st8.spill [r3]=r25,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r26,16
-.mem.offset 8,0; st8.spill [r3]=r27,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r28,16
-.mem.offset 8,0; st8.spill [r3]=r29,16
- ;;
-.mem.offset 0,0; st8.spill [r2]=r30,16
-.mem.offset 8,0; st8.spill [r3]=r31,32
- ;;
- mov ar.fpsr=r11 /* M-unit */
- st8 [r2]=r8,8 /* ar.ccv */
- adds r24=PT(B6)-PT(F7),r3
- ;;
- stf.spill [r2]=f6,32
- stf.spill [r3]=f7,32
- ;;
- stf.spill [r2]=f8,32
- stf.spill [r3]=f9,32
- ;;
- stf.spill [r2]=f10
- stf.spill [r3]=f11
- adds r25=PT(B7)-PT(F11),r3
- ;;
- st8 [r24]=r18,16 /* b6 */
- st8 [r25]=r19,16 /* b7 */
- ;;
- st8 [r24]=r9 /* ar.csd */
- st8 [r25]=r10 /* ar.ssd */
- ;;
- srlz.d // make sure we see the effect of cr.ivr
- addl r14=@gprel(ia64_leave_nested),gp
- ;;
- mov rp=r14
- br.call.sptk.many b6=kvm_ia64_handle_irq
- ;;
-END(kvm_interrupt)
-
- .global kvm_dispatch_vexirq
- .org kvm_ia64_ivt+0x3400
-//////////////////////////////////////////////////////////////////////
-// 0x3400 Entry 13 (size 64 bundles) Reserved
-ENTRY(kvm_virtual_exirq)
- mov r31=pr
- mov r19=13
- mov r30 =r0
- ;;
-kvm_dispatch_vexirq:
- cmp.eq p6,p0 = 1,r30
- ;;
-(p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
- ;;
-(p6) ld8 r1 = [r29]
- ;;
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,1,0
- mov out0=r13
-
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- KVM_SAVE_REST
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- mov rp=r14
- br.call.sptk.many b6=kvm_vexirq
-END(kvm_virtual_exirq)
-
- .org kvm_ia64_ivt+0x3800
-/////////////////////////////////////////////////////////////////////
-// 0x3800 Entry 14 (size 64 bundles) Reserved
- KVM_FAULT(14)
- // this code segment is from 2.6.16.13
-
- .org kvm_ia64_ivt+0x3c00
-///////////////////////////////////////////////////////////////////////
-// 0x3c00 Entry 15 (size 64 bundles) Reserved
- KVM_FAULT(15)
-
- .org kvm_ia64_ivt+0x4000
-///////////////////////////////////////////////////////////////////////
-// 0x4000 Entry 16 (size 64 bundles) Reserved
- KVM_FAULT(16)
-
- .org kvm_ia64_ivt+0x4400
-//////////////////////////////////////////////////////////////////////
-// 0x4400 Entry 17 (size 64 bundles) Reserved
- KVM_FAULT(17)
-
- .org kvm_ia64_ivt+0x4800
-//////////////////////////////////////////////////////////////////////
-// 0x4800 Entry 18 (size 64 bundles) Reserved
- KVM_FAULT(18)
-
- .org kvm_ia64_ivt+0x4c00
-//////////////////////////////////////////////////////////////////////
-// 0x4c00 Entry 19 (size 64 bundles) Reserved
- KVM_FAULT(19)
-
- .org kvm_ia64_ivt+0x5000
-//////////////////////////////////////////////////////////////////////
-// 0x5000 Entry 20 (size 16 bundles) Page Not Present
-ENTRY(kvm_page_not_present)
- KVM_REFLECT(20)
-END(kvm_page_not_present)
-
- .org kvm_ia64_ivt+0x5100
-///////////////////////////////////////////////////////////////////////
-// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
-ENTRY(kvm_key_permission)
- KVM_REFLECT(21)
-END(kvm_key_permission)
-
- .org kvm_ia64_ivt+0x5200
-//////////////////////////////////////////////////////////////////////
-// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
-ENTRY(kvm_iaccess_rights)
- KVM_REFLECT(22)
-END(kvm_iaccess_rights)
-
- .org kvm_ia64_ivt+0x5300
-//////////////////////////////////////////////////////////////////////
-// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
-ENTRY(kvm_daccess_rights)
- KVM_REFLECT(23)
-END(kvm_daccess_rights)
-
- .org kvm_ia64_ivt+0x5400
-/////////////////////////////////////////////////////////////////////
-// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
-ENTRY(kvm_general_exception)
- KVM_REFLECT(24)
- KVM_FAULT(24)
-END(kvm_general_exception)
-
- .org kvm_ia64_ivt+0x5500
-//////////////////////////////////////////////////////////////////////
-// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
-ENTRY(kvm_disabled_fp_reg)
- KVM_REFLECT(25)
-END(kvm_disabled_fp_reg)
-
- .org kvm_ia64_ivt+0x5600
-////////////////////////////////////////////////////////////////////
-// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
-ENTRY(kvm_nat_consumption)
- KVM_REFLECT(26)
-END(kvm_nat_consumption)
-
- .org kvm_ia64_ivt+0x5700
-/////////////////////////////////////////////////////////////////////
-// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
-ENTRY(kvm_speculation_vector)
- KVM_REFLECT(27)
-END(kvm_speculation_vector)
-
- .org kvm_ia64_ivt+0x5800
-/////////////////////////////////////////////////////////////////////
-// 0x5800 Entry 28 (size 16 bundles) Reserved
- KVM_FAULT(28)
-
- .org kvm_ia64_ivt+0x5900
-///////////////////////////////////////////////////////////////////
-// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
-ENTRY(kvm_debug_vector)
- KVM_FAULT(29)
-END(kvm_debug_vector)
-
- .org kvm_ia64_ivt+0x5a00
-///////////////////////////////////////////////////////////////
-// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
-ENTRY(kvm_unaligned_access)
- KVM_REFLECT(30)
-END(kvm_unaligned_access)
-
- .org kvm_ia64_ivt+0x5b00
-//////////////////////////////////////////////////////////////////////
-// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
-ENTRY(kvm_unsupported_data_reference)
- KVM_REFLECT(31)
-END(kvm_unsupported_data_reference)
-
- .org kvm_ia64_ivt+0x5c00
-////////////////////////////////////////////////////////////////////
-// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
-ENTRY(kvm_floating_point_fault)
- KVM_REFLECT(32)
-END(kvm_floating_point_fault)
-
- .org kvm_ia64_ivt+0x5d00
-/////////////////////////////////////////////////////////////////////
-// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
-ENTRY(kvm_floating_point_trap)
- KVM_REFLECT(33)
-END(kvm_floating_point_trap)
-
- .org kvm_ia64_ivt+0x5e00
-//////////////////////////////////////////////////////////////////////
-// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
-ENTRY(kvm_lower_privilege_trap)
- KVM_REFLECT(34)
-END(kvm_lower_privilege_trap)
-
- .org kvm_ia64_ivt+0x5f00
-//////////////////////////////////////////////////////////////////////
-// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
-ENTRY(kvm_taken_branch_trap)
- KVM_REFLECT(35)
-END(kvm_taken_branch_trap)
-
- .org kvm_ia64_ivt+0x6000
-////////////////////////////////////////////////////////////////////
-// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
-ENTRY(kvm_single_step_trap)
- KVM_REFLECT(36)
-END(kvm_single_step_trap)
- .global kvm_virtualization_fault_back
- .org kvm_ia64_ivt+0x6100
-/////////////////////////////////////////////////////////////////////
-// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
-ENTRY(kvm_virtualization_fault)
- mov r31=pr
- adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
- ;;
- st8 [r16] = r1
- adds r17 = VMM_VCPU_GP_OFFSET, r21
- ;;
- ld8 r1 = [r17]
- cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
- cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
- cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
- cmp.eq p9,p0=EVENT_RSM,r24
- cmp.eq p10,p0=EVENT_SSM,r24
- cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
- cmp.eq p12,p0=EVENT_THASH,r24
-(p6) br.dptk.many kvm_asm_mov_from_ar
-(p7) br.dptk.many kvm_asm_mov_from_rr
-(p8) br.dptk.many kvm_asm_mov_to_rr
-(p9) br.dptk.many kvm_asm_rsm
-(p10) br.dptk.many kvm_asm_ssm
-(p11) br.dptk.many kvm_asm_mov_to_psr
-(p12) br.dptk.many kvm_asm_thash
- ;;
-kvm_virtualization_fault_back:
- adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
- ;;
- ld8 r1 = [r16]
- ;;
- mov r19=37
- adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
- adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
- ;;
- st8 [r16] = r24
- st8 [r17] = r25
- ;;
- cmp.ne p6,p0=EVENT_RFI, r24
-(p6) br.sptk kvm_dispatch_virtualization_fault
- ;;
- adds r18=VMM_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r18]
- ;;
- adds r18=VMM_VPD_VIFS_OFFSET,r18
- ;;
- ld8 r18=[r18]
- ;;
- tbit.z p6,p0=r18,63
-(p6) br.sptk kvm_dispatch_virtualization_fault
- ;;
-//if vifs.v=1 desert current register frame
- alloc r18=ar.pfs,0,0,0,0
- br.sptk kvm_dispatch_virtualization_fault
-END(kvm_virtualization_fault)
-
- .org kvm_ia64_ivt+0x6200
-//////////////////////////////////////////////////////////////
-// 0x6200 Entry 38 (size 16 bundles) Reserved
- KVM_FAULT(38)
-
- .org kvm_ia64_ivt+0x6300
-/////////////////////////////////////////////////////////////////
-// 0x6300 Entry 39 (size 16 bundles) Reserved
- KVM_FAULT(39)
-
- .org kvm_ia64_ivt+0x6400
-/////////////////////////////////////////////////////////////////
-// 0x6400 Entry 40 (size 16 bundles) Reserved
- KVM_FAULT(40)
-
- .org kvm_ia64_ivt+0x6500
-//////////////////////////////////////////////////////////////////
-// 0x6500 Entry 41 (size 16 bundles) Reserved
- KVM_FAULT(41)
-
- .org kvm_ia64_ivt+0x6600
-//////////////////////////////////////////////////////////////////
-// 0x6600 Entry 42 (size 16 bundles) Reserved
- KVM_FAULT(42)
-
- .org kvm_ia64_ivt+0x6700
-//////////////////////////////////////////////////////////////////
-// 0x6700 Entry 43 (size 16 bundles) Reserved
- KVM_FAULT(43)
-
- .org kvm_ia64_ivt+0x6800
-//////////////////////////////////////////////////////////////////
-// 0x6800 Entry 44 (size 16 bundles) Reserved
- KVM_FAULT(44)
-
- .org kvm_ia64_ivt+0x6900
-///////////////////////////////////////////////////////////////////
-// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
-//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
-ENTRY(kvm_ia32_exception)
- KVM_FAULT(45)
-END(kvm_ia32_exception)
-
- .org kvm_ia64_ivt+0x6a00
-////////////////////////////////////////////////////////////////////
-// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
-ENTRY(kvm_ia32_intercept)
- KVM_FAULT(47)
-END(kvm_ia32_intercept)
-
- .org kvm_ia64_ivt+0x6c00
-/////////////////////////////////////////////////////////////////////
-// 0x6c00 Entry 48 (size 16 bundles) Reserved
- KVM_FAULT(48)
-
- .org kvm_ia64_ivt+0x6d00
-//////////////////////////////////////////////////////////////////////
-// 0x6d00 Entry 49 (size 16 bundles) Reserved
- KVM_FAULT(49)
-
- .org kvm_ia64_ivt+0x6e00
-//////////////////////////////////////////////////////////////////////
-// 0x6e00 Entry 50 (size 16 bundles) Reserved
- KVM_FAULT(50)
-
- .org kvm_ia64_ivt+0x6f00
-/////////////////////////////////////////////////////////////////////
-// 0x6f00 Entry 51 (size 16 bundles) Reserved
- KVM_FAULT(52)
-
- .org kvm_ia64_ivt+0x7100
-////////////////////////////////////////////////////////////////////
-// 0x7100 Entry 53 (size 16 bundles) Reserved
- KVM_FAULT(53)
-
- .org kvm_ia64_ivt+0x7200
-/////////////////////////////////////////////////////////////////////
-// 0x7200 Entry 54 (size 16 bundles) Reserved
- KVM_FAULT(54)
-
- .org kvm_ia64_ivt+0x7300
-////////////////////////////////////////////////////////////////////
-// 0x7300 Entry 55 (size 16 bundles) Reserved
- KVM_FAULT(55)
-
- .org kvm_ia64_ivt+0x7400
-////////////////////////////////////////////////////////////////////
-// 0x7400 Entry 56 (size 16 bundles) Reserved
- KVM_FAULT(56)
-
- .org kvm_ia64_ivt+0x7500
-/////////////////////////////////////////////////////////////////////
-// 0x7500 Entry 57 (size 16 bundles) Reserved
- KVM_FAULT(57)
-
- .org kvm_ia64_ivt+0x7600
-/////////////////////////////////////////////////////////////////////
-// 0x7600 Entry 58 (size 16 bundles) Reserved
- KVM_FAULT(58)
-
- .org kvm_ia64_ivt+0x7700
-////////////////////////////////////////////////////////////////////
-// 0x7700 Entry 59 (size 16 bundles) Reserved
- KVM_FAULT(59)
-
- .org kvm_ia64_ivt+0x7800
-////////////////////////////////////////////////////////////////////
-// 0x7800 Entry 60 (size 16 bundles) Reserved
- KVM_FAULT(60)
-
- .org kvm_ia64_ivt+0x7900
-/////////////////////////////////////////////////////////////////////
-// 0x7900 Entry 61 (size 16 bundles) Reserved
- KVM_FAULT(61)
-
- .org kvm_ia64_ivt+0x7a00
-/////////////////////////////////////////////////////////////////////
-// 0x7a00 Entry 62 (size 16 bundles) Reserved
- KVM_FAULT(62)
-
- .org kvm_ia64_ivt+0x7b00
-/////////////////////////////////////////////////////////////////////
-// 0x7b00 Entry 63 (size 16 bundles) Reserved
- KVM_FAULT(63)
-
- .org kvm_ia64_ivt+0x7c00
-////////////////////////////////////////////////////////////////////
-// 0x7c00 Entry 64 (size 16 bundles) Reserved
- KVM_FAULT(64)
-
- .org kvm_ia64_ivt+0x7d00
-/////////////////////////////////////////////////////////////////////
-// 0x7d00 Entry 65 (size 16 bundles) Reserved
- KVM_FAULT(65)
-
- .org kvm_ia64_ivt+0x7e00
-/////////////////////////////////////////////////////////////////////
-// 0x7e00 Entry 66 (size 16 bundles) Reserved
- KVM_FAULT(66)
-
- .org kvm_ia64_ivt+0x7f00
-////////////////////////////////////////////////////////////////////
-// 0x7f00 Entry 67 (size 16 bundles) Reserved
- KVM_FAULT(67)
-
- .org kvm_ia64_ivt+0x8000
-// There is no particular reason for this code to be here, other than that
-// there happens to be space here that would go unused otherwise. If this
-// fault ever gets "unreserved", simply moved the following code to a more
-// suitable spot...
-
-
-ENTRY(kvm_dtlb_miss_dispatch)
- mov r19 = 2
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,3,0
- mov out0=cr.ifa
- mov out1=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
- ;;
- KVM_SAVE_REST
- KVM_SAVE_EXTRA
- mov rp=r14
- ;;
- adds out2=16,r12
- br.call.sptk.many b6=kvm_page_fault
-END(kvm_dtlb_miss_dispatch)
-
-ENTRY(kvm_itlb_miss_dispatch)
-
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,3,0
- mov out0=cr.ifa
- mov out1=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- adds out2=16,r12
- br.call.sptk.many b6=kvm_page_fault
-END(kvm_itlb_miss_dispatch)
-
-ENTRY(kvm_dispatch_reflection)
-/*
- * Input:
- * psr.ic: off
- * r19: intr type (offset into ivt, see ia64_int.h)
- * r31: contains saved predicates (pr)
- */
- KVM_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,5,0
- mov out0=cr.ifa
- mov out1=cr.isr
- mov out2=cr.iim
- mov out3=r15
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- adds out4=16,r12
- br.call.sptk.many b6=reflect_interruption
-END(kvm_dispatch_reflection)
-
-ENTRY(kvm_dispatch_virtualization_fault)
- adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
- adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
- ;;
- st8 [r16] = r24
- st8 [r17] = r25
- ;;
- KVM_SAVE_MIN_WITH_COVER_R19
- ;;
- alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
- mov out0=r13 //vcpu
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
- (p15) ssm psr.i // restore psr.i
- addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
- ;;
- KVM_SAVE_REST
- KVM_SAVE_EXTRA
- mov rp=r14
- ;;
- adds out1=16,sp //regs
- br.call.sptk.many b6=kvm_emulate
-END(kvm_dispatch_virtualization_fault)
-
-
-ENTRY(kvm_dispatch_interrupt)
- KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
- ;;
- alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
- ssm psr.ic
- ;;
- srlz.i
- ;;
- (p15) ssm psr.i
- addl r14=@gprel(ia64_leave_hypervisor),gp
- ;;
- KVM_SAVE_REST
- mov rp=r14
- ;;
- mov out0=r13 // pass pointer to pt_regs as second arg
- br.call.sptk.many b6=kvm_ia64_handle_irq
-END(kvm_dispatch_interrupt)
-
-GLOBAL_ENTRY(ia64_leave_nested)
- rsm psr.i
- ;;
- adds r21=PT(PR)+16,r12
- ;;
- lfetch [r21],PT(CR_IPSR)-PT(PR)
- adds r2=PT(B6)+16,r12
- adds r3=PT(R16)+16,r12
- ;;
- lfetch [r21]
- ld8 r28=[r2],8 // load b6
- adds r29=PT(R24)+16,r12
-
- ld8.fill r16=[r3]
- adds r3=PT(AR_CSD)-PT(R16),r3
- adds r30=PT(AR_CCV)+16,r12
- ;;
- ld8.fill r24=[r29]
- ld8 r15=[r30] // load ar.ccv
- ;;
- ld8 r29=[r2],16 // load b7
- ld8 r30=[r3],16 // load ar.csd
- ;;
- ld8 r31=[r2],16 // load ar.ssd
- ld8.fill r8=[r3],16
- ;;
- ld8.fill r9=[r2],16
- ld8.fill r10=[r3],PT(R17)-PT(R10)
- ;;
- ld8.fill r11=[r2],PT(R18)-PT(R11)
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- mov ar.csd=r30
- mov ar.ssd=r31
- ;;
- rsm psr.i | psr.ic
- // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
- ;;
- srlz.i
- ;;
- ld8.fill r22=[r2],24
- ld8.fill r23=[r3],24
- mov b6=r28
- ;;
- ld8.fill r25=[r2],16
- ld8.fill r26=[r3],16
- mov b7=r29
- ;;
- ld8.fill r27=[r2],16
- ld8.fill r28=[r3],16
- ;;
- ld8.fill r29=[r2],16
- ld8.fill r30=[r3],24
- ;;
- ld8.fill r31=[r2],PT(F9)-PT(R31)
- adds r3=PT(F10)-PT(F6),r3
- ;;
- ldf.fill f9=[r2],PT(F6)-PT(F9)
- ldf.fill f10=[r3],PT(F8)-PT(F10)
- ;;
- ldf.fill f6=[r2],PT(F7)-PT(F6)
- ;;
- ldf.fill f7=[r2],PT(F11)-PT(F7)
- ldf.fill f8=[r3],32
- ;;
- srlz.i // ensure interruption collection is off
- mov ar.ccv=r15
- ;;
- bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
- ;;
- ldf.fill f11=[r2]
-// mov r18=r13
-// mov r21=r13
- adds r16=PT(CR_IPSR)+16,r12
- adds r17=PT(CR_IIP)+16,r12
- ;;
- ld8 r29=[r16],16 // load cr.ipsr
- ld8 r28=[r17],16 // load cr.iip
- ;;
- ld8 r30=[r16],16 // load cr.ifs
- ld8 r25=[r17],16 // load ar.unat
- ;;
- ld8 r26=[r16],16 // load ar.pfs
- ld8 r27=[r17],16 // load ar.rsc
- cmp.eq p9,p0=r0,r0
- // set p9 to indicate that we should restore cr.ifs
- ;;
- ld8 r24=[r16],16 // load ar.rnat (may be garbage)
- ld8 r23=[r17],16// load ar.bspstore (may be garbage)
- ;;
- ld8 r31=[r16],16 // load predicates
- ld8 r22=[r17],16 // load b0
- ;;
- ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
- ld8.fill r1=[r17],16 // load r1
- ;;
- ld8.fill r12=[r16],16
- ld8.fill r13=[r17],16
- ;;
- ld8 r20=[r16],16 // ar.fpsr
- ld8.fill r15=[r17],16
- ;;
- ld8.fill r14=[r16],16
- ld8.fill r2=[r17]
- ;;
- ld8.fill r3=[r16]
- ;;
- mov r16=ar.bsp // get existing backing store pointer
- ;;
- mov b0=r22
- mov ar.pfs=r26
- mov cr.ifs=r30
- mov cr.ipsr=r29
- mov ar.fpsr=r20
- mov cr.iip=r28
- ;;
- mov ar.rsc=r27
- mov ar.unat=r25
- mov pr=r31,-1
- rfi
-END(ia64_leave_nested)
-
-GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
-/*
- * work.need_resched etc. mustn't get changed
- *by this CPU before it returns to
- * user- or fsys-mode, hence we disable interrupts early on:
- */
- adds r2 = PT(R4)+16,r12
- adds r3 = PT(R5)+16,r12
- adds r8 = PT(EML_UNAT)+16,r12
- ;;
- ld8 r8 = [r8]
- ;;
- mov ar.unat=r8
- ;;
- ld8.fill r4=[r2],16 //load r4
- ld8.fill r5=[r3],16 //load r5
- ;;
- ld8.fill r6=[r2] //load r6
- ld8.fill r7=[r3] //load r7
- ;;
-END(ia64_leave_hypervisor_prepare)
-//fall through
-GLOBAL_ENTRY(ia64_leave_hypervisor)
- rsm psr.i
- ;;
- br.call.sptk.many b0=leave_hypervisor_tail
- ;;
- adds r20=PT(PR)+16,r12
- adds r8=PT(EML_UNAT)+16,r12
- ;;
- ld8 r8=[r8]
- ;;
- mov ar.unat=r8
- ;;
- lfetch [r20],PT(CR_IPSR)-PT(PR)
- adds r2 = PT(B6)+16,r12
- adds r3 = PT(B7)+16,r12
- ;;
- lfetch [r20]
- ;;
- ld8 r24=[r2],16 /* B6 */
- ld8 r25=[r3],16 /* B7 */
- ;;
- ld8 r26=[r2],16 /* ar_csd */
- ld8 r27=[r3],16 /* ar_ssd */
- mov b6 = r24
- ;;
- ld8.fill r8=[r2],16
- ld8.fill r9=[r3],16
- mov b7 = r25
- ;;
- mov ar.csd = r26
- mov ar.ssd = r27
- ;;
- ld8.fill r10=[r2],PT(R15)-PT(R10)
- ld8.fill r11=[r3],PT(R14)-PT(R11)
- ;;
- ld8.fill r15=[r2],PT(R16)-PT(R15)
- ld8.fill r14=[r3],PT(R17)-PT(R14)
- ;;
- ld8.fill r16=[r2],16
- ld8.fill r17=[r3],16
- ;;
- ld8.fill r18=[r2],16
- ld8.fill r19=[r3],16
- ;;
- ld8.fill r20=[r2],16
- ld8.fill r21=[r3],16
- ;;
- ld8.fill r22=[r2],16
- ld8.fill r23=[r3],16
- ;;
- ld8.fill r24=[r2],16
- ld8.fill r25=[r3],16
- ;;
- ld8.fill r26=[r2],16
- ld8.fill r27=[r3],16
- ;;
- ld8.fill r28=[r2],16
- ld8.fill r29=[r3],16
- ;;
- ld8.fill r30=[r2],PT(F6)-PT(R30)
- ld8.fill r31=[r3],PT(F7)-PT(R31)
- ;;
- rsm psr.i | psr.ic
- // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
- ;;
- srlz.i // ensure interruption collection is off
- ;;
- bsw.0
- ;;
- adds r16 = PT(CR_IPSR)+16,r12
- adds r17 = PT(CR_IIP)+16,r12
- mov r21=r13 // get current
- ;;
- ld8 r31=[r16],16 // load cr.ipsr
- ld8 r30=[r17],16 // load cr.iip
- ;;
- ld8 r29=[r16],16 // load cr.ifs
- ld8 r28=[r17],16 // load ar.unat
- ;;
- ld8 r27=[r16],16 // load ar.pfs
- ld8 r26=[r17],16 // load ar.rsc
- ;;
- ld8 r25=[r16],16 // load ar.rnat
- ld8 r24=[r17],16 // load ar.bspstore
- ;;
- ld8 r23=[r16],16 // load predicates
- ld8 r22=[r17],16 // load b0
- ;;
- ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
- ld8.fill r1=[r17],16 //load r1
- ;;
- ld8.fill r12=[r16],16 //load r12
- ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
- ;;
- ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
- ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
- ;;
- ld8.fill r3=[r16] //load r3
- ld8 r18=[r17] //load ar_ccv
- ;;
- mov ar.fpsr=r19
- mov ar.ccv=r18
- shr.u r18=r20,16
- ;;
-kvm_rbs_switch:
- mov r19=96
-
-kvm_dont_preserve_current_frame:
-/*
- * To prevent leaking bits between the hypervisor and guest domain,
- * we must clear the stacked registers in the "invalid" partition here.
- * 5 registers/cycle on McKinley).
- */
-# define pRecurse p6
-# define pReturn p7
-# define Nregs 14
-
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
- sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
- ;;
- mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
- shladd in0=loc1,3,r19
- mov in1=0
- ;;
- TEXT_ALIGN(32)
-kvm_rse_clear_invalid:
- alloc loc0=ar.pfs,2,Nregs-2,2,0
- cmp.lt pRecurse,p0=Nregs*8,in0
- // if more than Nregs regs left to clear, (re)curse
- add out0=-Nregs*8,in0
- add out1=1,in1 // increment recursion count
- mov loc1=0
- mov loc2=0
- ;;
- mov loc3=0
- mov loc4=0
- mov loc5=0
- mov loc6=0
- mov loc7=0
-(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
- ;;
- mov loc8=0
- mov loc9=0
- cmp.ne pReturn,p0=r0,in1
- // if recursion count != 0, we need to do a br.ret
- mov loc10=0
- mov loc11=0
-(pReturn) br.ret.dptk.many b0
-
-# undef pRecurse
-# undef pReturn
-
-// loadrs has already been shifted
- alloc r16=ar.pfs,0,0,0,0 // drop current register frame
- ;;
- loadrs
- ;;
- mov ar.bspstore=r24
- ;;
- mov ar.unat=r28
- mov ar.rnat=r25
- mov ar.rsc=r26
- ;;
- mov cr.ipsr=r31
- mov cr.iip=r30
- mov cr.ifs=r29
- mov ar.pfs=r27
- adds r18=VMM_VPD_BASE_OFFSET,r21
- ;;
- ld8 r18=[r18] //vpd
- adds r17=VMM_VCPU_ISR_OFFSET,r21
- ;;
- ld8 r17=[r17]
- adds r19=VMM_VPD_VPSR_OFFSET,r18
- ;;
- ld8 r19=[r19] //vpsr
- mov r25=r18
- adds r16= VMM_VCPU_GP_OFFSET,r21
- ;;
- ld8 r16= [r16] // Put gp in r24
- movl r24=@gprel(ia64_vmm_entry) // calculate return address
- ;;
- add r24=r24,r16
- ;;
- br.sptk.many kvm_vps_sync_write // call the service
- ;;
-END(ia64_leave_hypervisor)
-// fall through
-GLOBAL_ENTRY(ia64_vmm_entry)
-/*
- * must be at bank 0
- * parameter:
- * r17:cr.isr
- * r18:vpd
- * r19:vpsr
- * r22:b0
- * r23:predicate
- */
- mov r24=r22
- mov r25=r18
- tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
-(p1) br.cond.sptk.few kvm_vps_resume_normal
-(p2) br.cond.sptk.many kvm_vps_resume_handler
- ;;
-END(ia64_vmm_entry)
-
-/*
- * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
- * u64 arg3, u64 arg4, u64 arg5,
- * u64 arg6, u64 arg7);
- *
- * XXX: The currently defined services use only 4 args at the max. The
- * rest are not consumed.
- */
-GLOBAL_ENTRY(ia64_call_vsa)
- .regstk 4,4,0,0
-
-rpsave = loc0
-pfssave = loc1
-psrsave = loc2
-entry = loc3
-hostret = r24
-
- alloc pfssave=ar.pfs,4,4,0,0
- mov rpsave=rp
- adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
- ;;
- ld8 entry=[entry]
-1: mov hostret=ip
- mov r25=in1 // copy arguments
- mov r26=in2
- mov r27=in3
- mov psrsave=psr
- ;;
- tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
- tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
- ;;
- add hostret=2f-1b,hostret // calculate return address
- add entry=entry,in0
- ;;
- rsm psr.i | psr.ic
- ;;
- srlz.i
- mov b6=entry
- br.cond.sptk b6 // call the service
-2:
-// Architectural sequence for enabling interrupts if necessary
-(p7) ssm psr.ic
- ;;
-(p7) srlz.i
- ;;
-(p6) ssm psr.i
- ;;
- mov rp=rpsave
- mov ar.pfs=pfssave
- mov r8=r31
- ;;
- srlz.d
- br.ret.sptk rp
-
-END(ia64_call_vsa)
-
-#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
-
-GLOBAL_ENTRY(vmm_reset_entry)
- //set up ipsr, iip, vpd.vpsr, dcr
- // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
- // For DCR: all bits 0
- bsw.0
- ;;
- mov r21 =r13
- adds r14=-VMM_PT_REGS_SIZE, r12
- ;;
- movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
- movl r10=0x8000000000000000
- adds r16=PT(CR_IIP), r14
- adds r20=PT(R1), r14
- ;;
- rsm psr.ic | psr.i
- ;;
- srlz.i
- ;;
- mov ar.rsc = 0
- ;;
- flushrs
- ;;
- mov ar.bspstore = 0
- // clear BSPSTORE
- ;;
- mov cr.ipsr=r6
- mov cr.ifs=r10
- ld8 r4 = [r16] // Set init iip for first run.
- ld8 r1 = [r20]
- ;;
- mov cr.iip=r4
- adds r16=VMM_VPD_BASE_OFFSET,r13
- ;;
- ld8 r18=[r16]
- ;;
- adds r19=VMM_VPD_VPSR_OFFSET,r18
- ;;
- ld8 r19=[r19]
- mov r17=r0
- mov r22=r0
- mov r23=r0
- br.cond.sptk ia64_vmm_entry
- br.ret.sptk b0
-END(vmm_reset_entry)
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h
deleted file mode 100644
index b214b5b0432d..000000000000
--- a/arch/ia64/kvm/vti.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * vti.h: prototype for generial vt related interface
- * Copyright (c) 2004, Intel Corporation.
- *
- * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Fred Yang (fred.yang@intel.com)
- * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
- *
- * Copyright (c) 2007, Intel Corporation.
- * Zhang xiantao <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-#ifndef _KVM_VT_I_H
-#define _KVM_VT_I_H
-
-#ifndef __ASSEMBLY__
-#include <asm/page.h>
-
-#include <linux/kvm_host.h>
-
-/* define itr.i and itr.d in ia64_itr function */
-#define ITR 0x01
-#define DTR 0x02
-#define IaDTR 0x03
-
-#define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/
-#define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/
-
-#define RR6 (6UL<<61)
-#define RR7 (7UL<<61)
-
-
-/* config_options in pal_vp_init_env */
-#define VP_INITIALIZE 1UL
-#define VP_FR_PMC 1UL<<1
-#define VP_OPCODE 1UL<<8
-#define VP_CAUSE 1UL<<9
-#define VP_FW_ACC 1UL<<63
-
-/* init vp env with initializing vm_buffer */
-#define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\
- VP_OPCODE | VP_CAUSE | VP_FW_ACC)
-/* init vp env without initializing vm_buffer */
-#define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC
-
-#define PAL_VP_CREATE 265
-/* Stacked Virt. Initializes a new VPD for the operation of
- * a new virtual processor in the virtual environment.
- */
-#define PAL_VP_ENV_INFO 266
-/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
-#define PAL_VP_EXIT_ENV 267
-/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
-#define PAL_VP_INIT_ENV 268
-/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
-#define PAL_VP_REGISTER 269
-/*Stacked Virt. Register a different host IVT for the virtual processor.*/
-#define PAL_VP_RESUME 270
-/* Renamed from PAL_VP_RESUME */
-#define PAL_VP_RESTORE 270
-/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
-#define PAL_VP_SUSPEND 271
-/* Renamed from PAL_VP_SUSPEND */
-#define PAL_VP_SAVE 271
-/* Stacked Virt. Suspends operation for the specified virtual processor on
- * the logical processor.
- */
-#define PAL_VP_TERMINATE 272
-/* Stacked Virt. Terminates operation for the specified virtual processor.*/
-
-union vac {
- unsigned long value;
- struct {
- unsigned int a_int:1;
- unsigned int a_from_int_cr:1;
- unsigned int a_to_int_cr:1;
- unsigned int a_from_psr:1;
- unsigned int a_from_cpuid:1;
- unsigned int a_cover:1;
- unsigned int a_bsw:1;
- long reserved:57;
- };
-};
-
-union vdc {
- unsigned long value;
- struct {
- unsigned int d_vmsw:1;
- unsigned int d_extint:1;
- unsigned int d_ibr_dbr:1;
- unsigned int d_pmc:1;
- unsigned int d_to_pmd:1;
- unsigned int d_itm:1;
- long reserved:58;
- };
-};
-
-struct vpd {
- union vac vac;
- union vdc vdc;
- unsigned long virt_env_vaddr;
- unsigned long reserved1[29];
- unsigned long vhpi;
- unsigned long reserved2[95];
- unsigned long vgr[16];
- unsigned long vbgr[16];
- unsigned long vnat;
- unsigned long vbnat;
- unsigned long vcpuid[5];
- unsigned long reserved3[11];
- unsigned long vpsr;
- unsigned long vpr;
- unsigned long reserved4[76];
- union {
- unsigned long vcr[128];
- struct {
- unsigned long dcr;
- unsigned long itm;
- unsigned long iva;
- unsigned long rsv1[5];
- unsigned long pta;
- unsigned long rsv2[7];
- unsigned long ipsr;
- unsigned long isr;
- unsigned long rsv3;
- unsigned long iip;
- unsigned long ifa;
- unsigned long itir;
- unsigned long iipa;
- unsigned long ifs;
- unsigned long iim;
- unsigned long iha;
- unsigned long rsv4[38];
- unsigned long lid;
- unsigned long ivr;
- unsigned long tpr;
- unsigned long eoi;
- unsigned long irr[4];
- unsigned long itv;
- unsigned long pmv;
- unsigned long cmcv;
- unsigned long rsv5[5];
- unsigned long lrr0;
- unsigned long lrr1;
- unsigned long rsv6[46];
- };
- };
- unsigned long reserved5[128];
- unsigned long reserved6[3456];
- unsigned long vmm_avail[128];
- unsigned long reserved7[4096];
-};
-
-#define PAL_PROC_VM_BIT (1UL << 40)
-#define PAL_PROC_VMSW_BIT (1UL << 54)
-
-static inline s64 ia64_pal_vp_env_info(u64 *buffer_size,
- u64 *vp_env_info)
-{
- struct ia64_pal_retval iprv;
- PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
- *buffer_size = iprv.v0;
- *vp_env_info = iprv.v1;
- return iprv.status;
-}
-
-static inline s64 ia64_pal_vp_exit_env(u64 iva)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
- return iprv.status;
-}
-
-static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr,
- u64 vbase_addr, u64 *vsa_base)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,
- vbase_addr);
- *vsa_base = iprv.v0;
-
- return iprv.status;
-}
-
-static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);
-
- return iprv.status;
-}
-
-static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector)
-{
- struct ia64_pal_retval iprv;
-
- PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
-
- return iprv.status;
-}
-
-#endif
-
-/*VPD field offset*/
-#define VPD_VAC_START_OFFSET 0
-#define VPD_VDC_START_OFFSET 8
-#define VPD_VHPI_START_OFFSET 256
-#define VPD_VGR_START_OFFSET 1024
-#define VPD_VBGR_START_OFFSET 1152
-#define VPD_VNAT_START_OFFSET 1280
-#define VPD_VBNAT_START_OFFSET 1288
-#define VPD_VCPUID_START_OFFSET 1296
-#define VPD_VPSR_START_OFFSET 1424
-#define VPD_VPR_START_OFFSET 1432
-#define VPD_VRSE_CFLE_START_OFFSET 1440
-#define VPD_VCR_START_OFFSET 2048
-#define VPD_VTPR_START_OFFSET 2576
-#define VPD_VRR_START_OFFSET 3072
-#define VPD_VMM_VAIL_START_OFFSET 31744
-
-/*Virtualization faults*/
-
-#define EVENT_MOV_TO_AR 1
-#define EVENT_MOV_TO_AR_IMM 2
-#define EVENT_MOV_FROM_AR 3
-#define EVENT_MOV_TO_CR 4
-#define EVENT_MOV_FROM_CR 5
-#define EVENT_MOV_TO_PSR 6
-#define EVENT_MOV_FROM_PSR 7
-#define EVENT_ITC_D 8
-#define EVENT_ITC_I 9
-#define EVENT_MOV_TO_RR 10
-#define EVENT_MOV_TO_DBR 11
-#define EVENT_MOV_TO_IBR 12
-#define EVENT_MOV_TO_PKR 13
-#define EVENT_MOV_TO_PMC 14
-#define EVENT_MOV_TO_PMD 15
-#define EVENT_ITR_D 16
-#define EVENT_ITR_I 17
-#define EVENT_MOV_FROM_RR 18
-#define EVENT_MOV_FROM_DBR 19
-#define EVENT_MOV_FROM_IBR 20
-#define EVENT_MOV_FROM_PKR 21
-#define EVENT_MOV_FROM_PMC 22
-#define EVENT_MOV_FROM_CPUID 23
-#define EVENT_SSM 24
-#define EVENT_RSM 25
-#define EVENT_PTC_L 26
-#define EVENT_PTC_G 27
-#define EVENT_PTC_GA 28
-#define EVENT_PTR_D 29
-#define EVENT_PTR_I 30
-#define EVENT_THASH 31
-#define EVENT_TTAG 32
-#define EVENT_TPA 33
-#define EVENT_TAK 34
-#define EVENT_PTC_E 35
-#define EVENT_COVER 36
-#define EVENT_RFI 37
-#define EVENT_BSW_0 38
-#define EVENT_BSW_1 39
-#define EVENT_VMSW 40
-
-/**PAL virtual services offsets */
-#define PAL_VPS_RESUME_NORMAL 0x0000
-#define PAL_VPS_RESUME_HANDLER 0x0400
-#define PAL_VPS_SYNC_READ 0x0800
-#define PAL_VPS_SYNC_WRITE 0x0c00
-#define PAL_VPS_SET_PENDING_INTERRUPT 0x1000
-#define PAL_VPS_THASH 0x1400
-#define PAL_VPS_TTAG 0x1800
-#define PAL_VPS_RESTORE 0x1c00
-#define PAL_VPS_SAVE 0x2000
-
-#endif/* _VT_I_H*/
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
deleted file mode 100644
index a7869f8f49a6..000000000000
--- a/arch/ia64/kvm/vtlb.c
+++ /dev/null
@@ -1,640 +0,0 @@
-/*
- * vtlb.c: guest virtual tlb handling module.
- * Copyright (c) 2004, Intel Corporation.
- * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
- * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- *
- * Copyright (c) 2007, Intel Corporation.
- * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- * Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include "vcpu.h"
-
-#include <linux/rwsem.h>
-
-#include <asm/tlb.h>
-
-/*
- * Check to see if the address rid:va is translated by the TLB
- */
-
-static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
-{
- return ((trp->p) && (trp->rid == rid)
- && ((va-trp->vadr) < PSIZE(trp->ps)));
-}
-
-/*
- * Only for GUEST TR format.
- */
-static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
-{
- u64 sa1, ea1;
-
- if (!trp->p || trp->rid != rid)
- return 0;
-
- sa1 = trp->vadr;
- ea1 = sa1 + PSIZE(trp->ps) - 1;
- eva -= 1;
- if ((sva > ea1) || (sa1 > eva))
- return 0;
- else
- return 1;
-
-}
-
-void machine_tlb_purge(u64 va, u64 ps)
-{
- ia64_ptcl(va, ps << 2);
-}
-
-void local_flush_tlb_all(void)
-{
- int i, j;
- unsigned long flags, count0, count1;
- unsigned long stride0, stride1, addr;
-
- addr = current_vcpu->arch.ptce_base;
- count0 = current_vcpu->arch.ptce_count[0];
- count1 = current_vcpu->arch.ptce_count[1];
- stride0 = current_vcpu->arch.ptce_stride[0];
- stride1 = current_vcpu->arch.ptce_stride[1];
-
- local_irq_save(flags);
- for (i = 0; i < count0; ++i) {
- for (j = 0; j < count1; ++j) {
- ia64_ptce(addr);
- addr += stride1;
- }
- addr += stride0;
- }
- local_irq_restore(flags);
- ia64_srlz_i(); /* srlz.i implies srlz.d */
-}
-
-int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
-{
- union ia64_rr vrr;
- union ia64_pta vpta;
- struct ia64_psr vpsr;
-
- vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
- vrr.val = vcpu_get_rr(vcpu, vadr);
- vpta.val = vcpu_get_pta(vcpu);
-
- if (vrr.ve & vpta.ve) {
- switch (ref) {
- case DATA_REF:
- case NA_REF:
- return vpsr.dt;
- case INST_REF:
- return vpsr.dt && vpsr.it && vpsr.ic;
- case RSE_REF:
- return vpsr.dt && vpsr.rt;
-
- }
- }
- return 0;
-}
-
-struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
-{
- u64 index, pfn, rid, pfn_bits;
-
- pfn_bits = vpta.size - 5 - 8;
- pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
- rid = _REGION_ID(vrr);
- index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
- *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
-
- return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
- (index << 5));
-}
-
-struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
-{
-
- struct thash_data *trp;
- int i;
- u64 rid;
-
- rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;
- if (type == D_TLB) {
- if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
- for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
- i < NDTRS; i++, trp++) {
- if (__is_tr_translated(trp, rid, va))
- return trp;
- }
- }
- } else {
- if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
- for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
- i < NITRS; i++, trp++) {
- if (__is_tr_translated(trp, rid, va))
- return trp;
- }
- }
- }
-
- return NULL;
-}
-
-static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
-{
- union ia64_rr rr;
- struct thash_data *head;
- unsigned long ps, gpaddr;
-
- ps = itir_ps(itir);
- rr.val = ia64_get_rr(ifa);
-
- gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
- (ifa & ((1UL << ps) - 1));
-
- head = (struct thash_data *)ia64_thash(ifa);
- head->etag = INVALID_TI_TAG;
- ia64_mf();
- head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
- head->itir = rr.ps << 2;
- head->etag = ia64_ttag(ifa);
- head->gpaddr = gpaddr;
-}
-
-void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
-{
- u64 i, dirty_pages = 1;
- u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
- vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
- void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
-
- dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
-
- vmm_spin_lock(lock);
- for (i = 0; i < dirty_pages; i++) {
- /* avoid RMW */
- if (!test_bit(base_gfn + i, dirty_bitmap))
- set_bit(base_gfn + i , dirty_bitmap);
- }
- vmm_spin_unlock(lock);
-}
-
-void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
-{
- u64 phy_pte, psr;
- union ia64_rr mrr;
-
- mrr.val = ia64_get_rr(va);
- phy_pte = translate_phy_pte(&pte, itir, va);
-
- if (itir_ps(itir) >= mrr.ps) {
- vhpt_insert(phy_pte, itir, va, pte);
- } else {
- phy_pte &= ~PAGE_FLAGS_RV_MASK;
- psr = ia64_clear_ic();
- ia64_itc(type, va, phy_pte, itir_ps(itir));
- paravirt_dv_serialize_data();
- ia64_set_psr(psr);
- }
-
- if (!(pte&VTLB_PTE_IO))
- mark_pages_dirty(v, pte, itir_ps(itir));
-}
-
-/*
- * vhpt lookup
- */
-struct thash_data *vhpt_lookup(u64 va)
-{
- struct thash_data *head;
- u64 tag;
-
- head = (struct thash_data *)ia64_thash(va);
- tag = ia64_ttag(va);
- if (head->etag == tag)
- return head;
- return NULL;
-}
-
-u64 guest_vhpt_lookup(u64 iha, u64 *pte)
-{
- u64 ret;
- struct thash_data *data;
-
- data = __vtr_lookup(current_vcpu, iha, D_TLB);
- if (data != NULL)
- thash_vhpt_insert(current_vcpu, data->page_flags,
- data->itir, iha, D_TLB);
-
- asm volatile ("rsm psr.ic|psr.i;;"
- "srlz.d;;"
- "ld8.s r9=[%1];;"
- "tnat.nz p6,p7=r9;;"
- "(p6) mov %0=1;"
- "(p6) mov r9=r0;"
- "(p7) extr.u r9=r9,0,53;;"
- "(p7) mov %0=r0;"
- "(p7) st8 [%2]=r9;;"
- "ssm psr.ic;;"
- "srlz.d;;"
- "ssm psr.i;;"
- "srlz.d;;"
- : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
-
- return ret;
-}
-
-/*
- * purge software guest tlb
- */
-
-static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
-{
- struct thash_data *cur;
- u64 start, curadr, size, psbits, tag, rr_ps, num;
- union ia64_rr vrr;
- struct thash_cb *hcb = &v->arch.vtlb;
-
- vrr.val = vcpu_get_rr(v, va);
- psbits = VMX(v, psbits[(va >> 61)]);
- start = va & ~((1UL << ps) - 1);
- while (psbits) {
- curadr = start;
- rr_ps = __ffs(psbits);
- psbits &= ~(1UL << rr_ps);
- num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
- size = PSIZE(rr_ps);
- vrr.ps = rr_ps;
- while (num) {
- cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
- if (cur->etag == tag && cur->ps == rr_ps)
- cur->etag = INVALID_TI_TAG;
- curadr += size;
- num--;
- }
- }
-}
-
-
-/*
- * purge VHPT and machine TLB
- */
-static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
-{
- struct thash_data *cur;
- u64 start, size, tag, num;
- union ia64_rr rr;
-
- start = va & ~((1UL << ps) - 1);
- rr.val = ia64_get_rr(va);
- size = PSIZE(rr.ps);
- num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
- while (num) {
- cur = (struct thash_data *)ia64_thash(start);
- tag = ia64_ttag(start);
- if (cur->etag == tag)
- cur->etag = INVALID_TI_TAG;
- start += size;
- num--;
- }
- machine_tlb_purge(va, ps);
-}
-
-/*
- * Insert an entry into hash TLB or VHPT.
- * NOTES:
- * 1: When inserting VHPT to thash, "va" is a must covered
- * address by the inserted machine VHPT entry.
- * 2: The format of entry is always in TLB.
- * 3: The caller need to make sure the new entry will not overlap
- * with any existed entry.
- */
-void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
-{
- struct thash_data *head;
- union ia64_rr vrr;
- u64 tag;
- struct thash_cb *hcb = &v->arch.vtlb;
-
- vrr.val = vcpu_get_rr(v, va);
- vrr.ps = itir_ps(itir);
- VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
- head = vsa_thash(hcb->pta, va, vrr.val, &tag);
- head->page_flags = pte;
- head->itir = itir;
- head->etag = tag;
-}
-
-int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
-{
- struct thash_data *trp;
- int i;
- u64 end, rid;
-
- rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;
- end = va + PSIZE(ps);
- if (type == D_TLB) {
- if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
- for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
- i < NDTRS; i++, trp++) {
- if (__is_tr_overlap(trp, rid, va, end))
- return i;
- }
- }
- } else {
- if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
- for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
- i < NITRS; i++, trp++) {
- if (__is_tr_overlap(trp, rid, va, end))
- return i;
- }
- }
- }
- return -1;
-}
-
-/*
- * Purge entries in VTLB and VHPT
- */
-void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
-{
- if (vcpu_quick_region_check(v->arch.tc_regions, va))
- vtlb_purge(v, va, ps);
- vhpt_purge(v, va, ps);
-}
-
-void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
-{
- u64 old_va = va;
- va = REGION_OFFSET(va);
- if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
- vtlb_purge(v, va, ps);
- vhpt_purge(v, va, ps);
-}
-
-u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
-{
- u64 ps, ps_mask, paddr, maddr, io_mask;
- union pte_flags phy_pte;
-
- ps = itir_ps(itir);
- ps_mask = ~((1UL << ps) - 1);
- phy_pte.val = *pte;
- paddr = *pte;
- paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
- maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
- io_mask = maddr & GPFN_IO_MASK;
- if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
- *pte |= VTLB_PTE_IO;
- return -1;
- }
- maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
- (paddr & ~PAGE_MASK);
- phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
- return phy_pte.val;
-}
-
-/*
- * Purge overlap TCs and then insert the new entry to emulate itc ops.
- * Notes: Only TC entry can purge and insert.
- */
-void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
- u64 ifa, int type)
-{
- u64 ps;
- u64 phy_pte, io_mask, index;
- union ia64_rr vrr, mrr;
-
- ps = itir_ps(itir);
- vrr.val = vcpu_get_rr(v, ifa);
- mrr.val = ia64_get_rr(ifa);
-
- index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
- io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
- phy_pte = translate_phy_pte(&pte, itir, ifa);
-
- /* Ensure WB attribute if pte is related to a normal mem page,
- * which is required by vga acceleration since qemu maps shared
- * vram buffer with WB.
- */
- if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
- io_mask != GPFN_PHYS_MMIO) {
- pte &= ~_PAGE_MA_MASK;
- phy_pte &= ~_PAGE_MA_MASK;
- }
-
- vtlb_purge(v, ifa, ps);
- vhpt_purge(v, ifa, ps);
-
- if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
- vtlb_insert(v, pte, itir, ifa);
- vcpu_quick_region_set(VMX(v, tc_regions), ifa);
- }
- if (pte & VTLB_PTE_IO)
- return;
-
- if (ps >= mrr.ps)
- vhpt_insert(phy_pte, itir, ifa, pte);
- else {
- u64 psr;
- phy_pte &= ~PAGE_FLAGS_RV_MASK;
- psr = ia64_clear_ic();
- ia64_itc(type, ifa, phy_pte, ps);
- paravirt_dv_serialize_data();
- ia64_set_psr(psr);
- }
- if (!(pte&VTLB_PTE_IO))
- mark_pages_dirty(v, pte, ps);
-
-}
-
-/*
- * Purge all TCs or VHPT entries including those in Hash table.
- *
- */
-
-void thash_purge_all(struct kvm_vcpu *v)
-{
- int i;
- struct thash_data *head;
- struct thash_cb *vtlb, *vhpt;
- vtlb = &v->arch.vtlb;
- vhpt = &v->arch.vhpt;
-
- for (i = 0; i < 8; i++)
- VMX(v, psbits[i]) = 0;
-
- head = vtlb->hash;
- for (i = 0; i < vtlb->num; i++) {
- head->page_flags = 0;
- head->etag = INVALID_TI_TAG;
- head->itir = 0;
- head->next = 0;
- head++;
- };
-
- head = vhpt->hash;
- for (i = 0; i < vhpt->num; i++) {
- head->page_flags = 0;
- head->etag = INVALID_TI_TAG;
- head->itir = 0;
- head->next = 0;
- head++;
- };
-
- local_flush_tlb_all();
-}
-
-/*
- * Lookup the hash table and its collision chain to find an entry
- * covering this address rid:va or the entry.
- *
- * INPUT:
- * in: TLB format for both VHPT & TLB.
- */
-struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
-{
- struct thash_data *cch;
- u64 psbits, ps, tag;
- union ia64_rr vrr;
-
- struct thash_cb *hcb = &v->arch.vtlb;
-
- cch = __vtr_lookup(v, va, is_data);
- if (cch)
- return cch;
-
- if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
- return NULL;
-
- psbits = VMX(v, psbits[(va >> 61)]);
- vrr.val = vcpu_get_rr(v, va);
- while (psbits) {
- ps = __ffs(psbits);
- psbits &= ~(1UL << ps);
- vrr.ps = ps;
- cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
- if (cch->etag == tag && cch->ps == ps)
- return cch;
- }
-
- return NULL;
-}
-
-/*
- * Initialize internal control data before service.
- */
-void thash_init(struct thash_cb *hcb, u64 sz)
-{
- int i;
- struct thash_data *head;
-
- hcb->pta.val = (unsigned long)hcb->hash;
- hcb->pta.vf = 1;
- hcb->pta.ve = 1;
- hcb->pta.size = sz;
- head = hcb->hash;
- for (i = 0; i < hcb->num; i++) {
- head->page_flags = 0;
- head->itir = 0;
- head->etag = INVALID_TI_TAG;
- head->next = 0;
- head++;
- }
-}
-
-u64 kvm_get_mpt_entry(u64 gpfn)
-{
- u64 *base = (u64 *) KVM_P2M_BASE;
-
- if (gpfn >= (KVM_P2M_SIZE >> 3))
- panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
-
- return *(base + gpfn);
-}
-
-u64 kvm_lookup_mpa(u64 gpfn)
-{
- u64 maddr;
- maddr = kvm_get_mpt_entry(gpfn);
- return maddr&_PAGE_PPN_MASK;
-}
-
-u64 kvm_gpa_to_mpa(u64 gpa)
-{
- u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
- return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
-}
-
-/*
- * Fetch guest bundle code.
- * INPUT:
- * gip: guest ip
- * pbundle: used to return fetched bundle.
- */
-int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
-{
- u64 gpip = 0; /* guest physical IP*/
- u64 *vpa;
- struct thash_data *tlb;
- u64 maddr;
-
- if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
- /* I-side physical mode */
- gpip = gip;
- } else {
- tlb = vtlb_lookup(vcpu, gip, I_TLB);
- if (tlb)
- gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
- (gip & (PSIZE(tlb->ps) - 1));
- }
- if (gpip) {
- maddr = kvm_gpa_to_mpa(gpip);
- } else {
- tlb = vhpt_lookup(gip);
- if (tlb == NULL) {
- ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
- return IA64_FAULT;
- }
- maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
- | (gip & (PSIZE(tlb->ps) - 1));
- }
- vpa = (u64 *)__kvm_va(maddr);
-
- pbundle->i64[0] = *vpa++;
- pbundle->i64[1] = *vpa;
-
- return IA64_NO_FAULT;
-}
-
-void kvm_init_vhpt(struct kvm_vcpu *v)
-{
- v->arch.vhpt.num = VHPT_NUM_ENTRIES;
- thash_init(&v->arch.vhpt, VHPT_SHIFT);
- ia64_set_pta(v->arch.vhpt.pta.val);
- /*Enable VHPT here?*/
-}
-
-void kvm_init_vtlb(struct kvm_vcpu *v)
-{
- v->arch.vtlb.num = VTLB_NUM_ENTRIES;
- thash_init(&v->arch.vtlb, VTLB_SHIFT);
-}
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index 3796801d6e0c..2edc793372fc 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -2,7 +2,6 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += module.h
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 6c9a24b3aefa..7bc4cb273856 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -80,4 +80,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index dbaf9f3065e8..9b6c691874bd 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -6,7 +6,6 @@ generic-y += device.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 7b8111c8f937..0bf5d525b945 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -13,7 +13,6 @@ generic-y += fb.h
generic-y += fcntl.h
generic-y += futex.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index c7591e80067c..d703d8e26a65 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -4,8 +4,6 @@
#include <asm/metag_mem.h>
#define nop() asm volatile ("NOP")
-#define mb() wmb()
-#define rmb() barrier()
#ifdef CONFIG_METAG_META21
@@ -41,13 +39,13 @@ static inline void wr_fence(void)
#endif /* !CONFIG_METAG_META21 */
-static inline void wmb(void)
-{
- /* flush writes through the write combiner */
- wr_fence();
-}
+/* flush writes through the write combiner */
+#define mb() wr_fence()
+#define rmb() barrier()
+#define wmb() mb()
-#define read_barrier_depends() do { } while (0)
+#define dma_rmb() rmb()
+#define dma_wmb() wmb()
#ifndef CONFIG_SMP
#define fence() do { } while (0)
@@ -82,7 +80,10 @@ static inline void fence(void)
#define smp_wmb() barrier()
#endif
#endif
-#define smp_read_barrier_depends() do { } while (0)
+
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define smp_store_release(p, v) \
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index a7736fa0580c..0bce820428fc 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,5 +1,6 @@
config MICROBLAZE
def_bool y
+ select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 448143b8cabd..ab564a6db5c3 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -4,7 +4,6 @@ generic-y += clkdev.h
generic-y += cputime.h
generic-y += device.h
generic-y += exec.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 95cef0b5f836..df19d0c47be8 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -565,6 +565,7 @@ void consistent_free(size_t size, void *vaddr);
void consistent_sync(void *vaddr, size_t size, int direction);
void consistent_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
+unsigned long consistent_virt_to_pfn(void *vaddr);
void setup_memory(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 4633c36c1b32..ed7ba8a11822 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -154,9 +154,36 @@ dma_direct_sync_sg_for_device(struct device *dev,
__dma_sync(sg->dma_address, sg->length, direction);
}
+int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+#ifdef CONFIG_MMU
+ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long off = vma->vm_pgoff;
+ unsigned long pfn;
+
+ if (off >= count || user_count > (count - off))
+ return -ENXIO;
+
+#ifdef NOT_COHERENT_CACHE
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pfn = consistent_virt_to_pfn(cpu_addr);
+#else
+ pfn = virt_to_pfn(cpu_addr);
+#endif
+ return remap_pfn_range(vma, vma->vm_start, pfn + off,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+#else
+ return -ENXIO;
+#endif
+}
+
struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = dma_direct_map_sg,
.dma_supported = dma_direct_dma_supported,
.map_page = dma_direct_map_page,
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index e10ad930895e..b06c3a7faf20 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -156,6 +156,25 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
}
EXPORT_SYMBOL(consistent_alloc);
+#ifdef CONFIG_MMU
+static pte_t *consistent_virt_to_pte(void *vaddr)
+{
+ unsigned long addr = (unsigned long)vaddr;
+
+ return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
+}
+
+unsigned long consistent_virt_to_pfn(void *vaddr)
+{
+ pte_t *ptep = consistent_virt_to_pte(vaddr);
+
+ if (pte_none(*ptep) || !pte_present(*ptep))
+ return 0;
+
+ return pte_pfn(*ptep);
+}
+#endif
+
/*
* free page(s) as defined by the above mapping.
*/
@@ -181,13 +200,9 @@ void consistent_free(size_t size, void *vaddr)
} while (size -= PAGE_SIZE);
#else
do {
- pte_t *ptep;
+ pte_t *ptep = consistent_virt_to_pte(vaddr);
unsigned long pfn;
- ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
- (unsigned int)vaddr),
- (unsigned int)vaddr),
- (unsigned int)vaddr);
if (!pte_none(*ptep) && pte_present(*ptep)) {
pfn = pte_pfn(*ptep);
pte_clear(&init_mm, (unsigned int)vaddr, ptep);
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index f5e18bf3275e..e5fc463b36d0 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -2,7 +2,9 @@
platforms += alchemy
platforms += ar7
+platforms += ath25
platforms += ath79
+platforms += bcm3384
platforms += bcm47xx
platforms += bcm63xx
platforms += cavium-octeon
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 9536ef912f59..3289969ee423 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -53,6 +53,7 @@ config MIPS
select HAVE_CC_STACKPROTECTOR
select CPU_PM if CPU_IDLE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_BINFMT_ELF_STATE
menu "Machine selection"
@@ -62,7 +63,7 @@ choice
config MIPS_ALCHEMY
bool "Alchemy processor based machines"
- select 64BIT_PHYS_ADDR
+ select ARCH_PHYS_ADDR_T_64BIT
select CEVT_R4K
select CSRC_R4K
select IRQ_CPU
@@ -96,6 +97,20 @@ config AR7
Support for the Texas Instruments AR7 System-on-a-Chip
family: TNETD7100, 7200 and 7300.
+config ATH25
+ bool "Atheros AR231x/AR531x SoC support"
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select IRQ_DOMAIN
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_HAS_EARLY_PRINTK
+ help
+ Support for Atheros AR231x and Atheros AR531x based boards
+
config ATH79
bool "Atheros AR71XX/AR724X/AR913X based boards"
select ARCH_REQUIRE_GPIOLIB
@@ -115,6 +130,32 @@ config ATH79
help
Support for the Atheros AR71XX/AR724X/AR913X SoCs.
+config BCM3384
+ bool "Broadcom BCM3384 based boards"
+ select BOOT_RAW
+ select NO_EXCEPT_FILL
+ select USE_OF
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYNC_R4K
+ select COMMON_CLK
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_CPU_BMIPS5000
+ select SWAP_IO_SPACE
+ select USB_EHCI_BIG_ENDIAN_DESC
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_OHCI_BIG_ENDIAN_DESC
+ select USB_OHCI_BIG_ENDIAN_MMIO
+ help
+ Support for BCM3384 based boards. BCM3384/BCM33843 is a cable modem
+ chipset with a Linux application processor that is often used to
+ provide Samba services, a CUPS print server, and/or advanced routing
+ features.
+
config BCM47XX
bool "Broadcom BCM47XX based boards"
select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -269,6 +310,8 @@ config LANTIQ
select USE_OF
select PINCTRL
select PINCTRL_LANTIQ
+ select ARCH_HAS_RESET_CONTROLLER
+ select RESET_CONTROLLER
config LASAT
bool "LASAT Networks platforms"
@@ -315,17 +358,18 @@ config MIPS_MALTA
select BOOT_RAW
select CEVT_R4K
select CSRC_R4K
- select CSRC_GIC
+ select CLKSRC_MIPS_GIC
select DMA_MAYBE_COHERENT
select GENERIC_ISA_DMA
select HAVE_PCSPKR_PLATFORM
select IRQ_CPU
- select IRQ_GIC
+ select MIPS_GIC
select HW_HAS_PCI
select I8253
select I8259
select MIPS_BONITO64
select MIPS_CPU_SCACHE
+ select MIPS_L1_CACHE_SHIFT_6
select PCI_GT64XXX_PCI0
select MIPS_MSC
select SWAP_IO_SPACE
@@ -340,6 +384,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MICROMIPS
select SYS_SUPPORTS_MIPS_CMP
select SYS_SUPPORTS_MIPS_CPS
select SYS_SUPPORTS_MIPS16
@@ -357,12 +402,12 @@ config MIPS_SEAD3
select BUILTIN_DTB
select CEVT_R4K
select CSRC_R4K
- select CSRC_GIC
+ select CLKSRC_MIPS_GIC
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select DMA_NONCOHERENT
select IRQ_CPU
- select IRQ_GIC
+ select MIPS_GIC
select LIBFDT
select MIPS_MSC
select SYS_HAS_CPU_MIPS32_R1
@@ -726,7 +771,7 @@ config MIKROTIK_RB532
config CAVIUM_OCTEON_SOC
bool "Cavium Networks Octeon SoC based boards"
select CEVT_R4K
- select 64BIT_PHYS_ADDR
+ select ARCH_PHYS_ADDR_T_64BIT
select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
@@ -768,7 +813,7 @@ config NLM_XLR_BOARD
select SWAP_IO_SPACE
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
- select 64BIT_PHYS_ADDR
+ select ARCH_PHYS_ADDR_T_64BIT
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select DMA_COHERENT
@@ -794,7 +839,7 @@ config NLM_XLP_BOARD
select HW_HAS_PCI
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
- select 64BIT_PHYS_ADDR
+ select ARCH_PHYS_ADDR_T_64BIT
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
@@ -835,6 +880,7 @@ config MIPS_PARAVIRT
endchoice
source "arch/mips/alchemy/Kconfig"
+source "arch/mips/ath25/Kconfig"
source "arch/mips/ath79/Kconfig"
source "arch/mips/bcm47xx/Kconfig"
source "arch/mips/bcm63xx/Kconfig"
@@ -907,10 +953,6 @@ config CEVT_GT641XX
config CEVT_R4K
bool
-config CEVT_GIC
- select MIPS_CM
- bool
-
config CEVT_SB1250
bool
@@ -926,10 +968,6 @@ config CSRC_IOASIC
config CSRC_R4K
bool
-config CSRC_GIC
- select MIPS_CM
- bool
-
config CSRC_SB1250
bool
@@ -941,7 +979,7 @@ config FW_CFE
bool
config ARCH_DMA_ADDR_T_64BIT
- def_bool (HIGHMEM && 64BIT_PHYS_ADDR) || 64BIT
+ def_bool (HIGHMEM && ARCH_PHYS_ADDR_T_64BIT) || 64BIT
config DMA_MAYBE_COHERENT
select DMA_NONCOHERENT
@@ -975,6 +1013,7 @@ config SYS_SUPPORTS_HOTPLUG_CPU
config I8259
bool
+ select IRQ_DOMAIN
config MIPS_BONITO64
bool
@@ -1055,6 +1094,7 @@ config MIPS_HUGE_TLB_SUPPORT
config IRQ_CPU
bool
+ select IRQ_DOMAIN
config IRQ_CPU_RM7K
bool
@@ -1071,10 +1111,6 @@ config IRQ_TXX9
config IRQ_GT641XX
bool
-config IRQ_GIC
- select MIPS_CM
- bool
-
config PCI_GT64XXX_PCI0
bool
@@ -1574,6 +1610,7 @@ config CPU_LOONGSON1
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_CPUFREQ
config CPU_BMIPS32_3300
select SMP_UP if SMP
@@ -1586,12 +1623,14 @@ config CPU_BMIPS4350
config CPU_BMIPS4380
bool
+ select MIPS_L1_CACHE_SHIFT_6
select SYS_SUPPORTS_SMP
select SYS_SUPPORTS_HOTPLUG_CPU
config CPU_BMIPS5000
bool
select MIPS_CPU_SCACHE
+ select MIPS_L1_CACHE_SHIFT_7
select SYS_SUPPORTS_SMP
select SYS_SUPPORTS_HOTPLUG_CPU
@@ -1886,15 +1925,6 @@ config FORCE_MAX_ZONEORDER
The page size is not necessarily 4KB. Keep this in mind
when choosing a value for this option.
-config CEVT_GIC
- bool "Use GIC global counter for clock events"
- depends on IRQ_GIC && !MIPS_SEAD3
- help
- Use the GIC global counter for the clock events. The R4K clock
- event driver is always present, so if the platform ends up not
- detecting a GIC, it will fall back to the R4K timer for the
- generation of clock events.
-
config BOARD_SCACHE
bool
@@ -1908,7 +1938,6 @@ config IP22_CPU_SCACHE
config MIPS_CPU_SCACHE
bool
select BOARD_SCACHE
- select MIPS_L1_CACHE_SHIFT_6
config R5000_CPU_SCACHE
bool
@@ -2095,11 +2124,8 @@ config SB1_PASS_2_1_WORKAROUNDS
default y
-config 64BIT_PHYS_ADDR
- bool
-
config ARCH_PHYS_ADDR_T_64BIT
- def_bool 64BIT_PHYS_ADDR
+ bool
choice
prompt "SmartMIPS or microMIPS ASE support"
@@ -2122,7 +2148,7 @@ config CPU_HAS_SMARTMIPS
here.
config CPU_MICROMIPS
- depends on SYS_SUPPORTS_MICROMIPS
+ depends on 32BIT && SYS_SUPPORTS_MICROMIPS
bool "microMIPS"
help
When this option is enabled the kernel will be built using the
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 3a2b775e8458..88a9f433f6fc 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -122,4 +122,17 @@ config SPINLOCK_TEST
help
Add several files to the debugfs to test spinlock speed.
+config FP32XX_HYBRID_FPRS
+ bool "Run FP32 & FPXX code with hybrid FPRs"
+ depends on MIPS_O32_FP64_SUPPORT
+ help
+ The hybrid FPR scheme is normally used only when a program needs to
+ execute a mix of FP32 & FP64A code, since the trapping & emulation
+ that it entails is expensive. When enabled, this option will lead
+ to the kernel running programs which use the FP32 & FPXX FP ABIs
+ using the hybrid FPR scheme, which can be useful for debugging
+ purposes.
+
+ If unsure, say N.
+
endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 58076472bdd8..2563a088d3b8 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -380,6 +380,7 @@ define archhelp
echo ' vmlinux.ecoff - ECOFF boot image'
echo ' vmlinux.bin - Raw binary boot image'
echo ' vmlinux.srec - SREC boot image'
+ echo ' vmlinux.32 - 64-bit boot image wrapped in 32bits (IP22/IP32)'
echo ' vmlinuz - Compressed boot(zboot) image'
echo ' vmlinuz.ecoff - ECOFF zboot image'
echo ' vmlinuz.bin - Raw binary zboot image'
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index d7557cde271a..203e4403c366 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -37,7 +37,6 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
-#include <linux/clk-private.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -397,10 +396,10 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate,
break;
/* if this parent is currently unused, remember it.
- * XXX: I know it's a layering violation, but it works
- * so well.. (if (!clk_has_active_children(pc)) )
+ * XXX: we would actually want clk_has_active_children()
+ * but this is a good-enough approximation for now.
*/
- if (pc->prepare_count == 0) {
+ if (!__clk_is_prepared(pc)) {
if (!free)
free = pc;
}
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index ea8f41869e56..4e72daf12c32 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -70,9 +70,9 @@ void __init plat_mem_setup(void)
iomem_resource.end = IOMEM_RESOURCE_END;
}
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_PCI)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_PCI)
/* This routine should be valid for all Au1x based boards */
-phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+phys_addr_t __fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
{
unsigned long start = ALCHEMY_PCI_MEMWIN_START;
unsigned long end = ALCHEMY_PCI_MEMWIN_END;
@@ -83,7 +83,7 @@ phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
/* Check for PCI memory window */
if (phys_addr >= start && (phys_addr + size - 1) <= end)
- return (phys_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr);
+ return (phys_addr_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr);
/* default nop */
return phys_addr;
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 7e2356fd5fd6..af2441dbfc12 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -311,8 +311,7 @@ static void __init cpmac_get_mac(int instance, unsigned char *dev_addr)
&dev_addr[0], &dev_addr[1],
&dev_addr[2], &dev_addr[3],
&dev_addr[4], &dev_addr[5]) != 6) {
- pr_warning("cannot parse mac address, "
- "using random address\n");
+ pr_warn("cannot parse mac address, using random address\n");
eth_random_addr(dev_addr);
}
} else
@@ -665,7 +664,7 @@ static int __init ar7_register_devices(void)
res = platform_device_register(&physmap_flash);
if (res)
- pr_warning("unable to register physmap-flash: %d\n", res);
+ pr_warn("unable to register physmap-flash: %d\n", res);
if (ar7_is_titan())
titan_fixup_devices();
@@ -673,13 +672,13 @@ static int __init ar7_register_devices(void)
ar7_device_disable(vlynq_low_data.reset_bit);
res = platform_device_register(&vlynq_low);
if (res)
- pr_warning("unable to register vlynq-low: %d\n", res);
+ pr_warn("unable to register vlynq-low: %d\n", res);
if (ar7_has_high_vlynq()) {
ar7_device_disable(vlynq_high_data.reset_bit);
res = platform_device_register(&vlynq_high);
if (res)
- pr_warning("unable to register vlynq-high: %d\n", res);
+ pr_warn("unable to register vlynq-high: %d\n", res);
}
if (ar7_has_high_cpmac()) {
@@ -689,9 +688,10 @@ static int __init ar7_register_devices(void)
res = platform_device_register(&cpmac_high);
if (res)
- pr_warning("unable to register cpmac-high: %d\n", res);
+ pr_warn("unable to register cpmac-high: %d\n",
+ res);
} else
- pr_warning("unable to add cpmac-high phy: %d\n", res);
+ pr_warn("unable to add cpmac-high phy: %d\n", res);
} else
cpmac_low_data.phy_mask = 0xffffffff;
@@ -700,18 +700,18 @@ static int __init ar7_register_devices(void)
cpmac_get_mac(0, cpmac_low_data.dev_addr);
res = platform_device_register(&cpmac_low);
if (res)
- pr_warning("unable to register cpmac-low: %d\n", res);
+ pr_warn("unable to register cpmac-low: %d\n", res);
} else
- pr_warning("unable to add cpmac-low phy: %d\n", res);
+ pr_warn("unable to add cpmac-low phy: %d\n", res);
detect_leds();
res = platform_device_register(&ar7_gpio_leds);
if (res)
- pr_warning("unable to register leds: %d\n", res);
+ pr_warn("unable to register leds: %d\n", res);
res = platform_device_register(&ar7_udc);
if (res)
- pr_warning("unable to register usb slave: %d\n", res);
+ pr_warn("unable to register usb slave: %d\n", res);
/* Register watchdog only if enabled in hardware */
bootcr = ioremap_nocache(AR7_REGS_DCL, 4);
@@ -726,7 +726,7 @@ static int __init ar7_register_devices(void)
ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
res = platform_device_register(&ar7_wdt);
if (res)
- pr_warning("unable to register watchdog: %d\n", res);
+ pr_warn("unable to register watchdog: %d\n", res);
}
return 0;
diff --git a/arch/mips/ath25/Kconfig b/arch/mips/ath25/Kconfig
new file mode 100644
index 000000000000..fc19dd57e42d
--- /dev/null
+++ b/arch/mips/ath25/Kconfig
@@ -0,0 +1,16 @@
+config SOC_AR5312
+ bool "Atheros AR5312/AR2312+ SoC support"
+ depends on ATH25
+ default y
+
+config SOC_AR2315
+ bool "Atheros AR2315+ SoC support"
+ depends on ATH25
+ default y
+
+config PCI_AR2315
+ bool "Atheros AR2315 PCI controller support"
+ depends on SOC_AR2315
+ select HW_HAS_PCI
+ select PCI
+ default y
diff --git a/arch/mips/ath25/Makefile b/arch/mips/ath25/Makefile
new file mode 100644
index 000000000000..eabad7da446a
--- /dev/null
+++ b/arch/mips/ath25/Makefile
@@ -0,0 +1,16 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2006 FON Technology, SL.
+# Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+# Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
+#
+
+obj-y += board.o prom.o devices.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-$(CONFIG_SOC_AR5312) += ar5312.o
+obj-$(CONFIG_SOC_AR2315) += ar2315.o
diff --git a/arch/mips/ath25/Platform b/arch/mips/ath25/Platform
new file mode 100644
index 000000000000..ef3f81fa080b
--- /dev/null
+++ b/arch/mips/ath25/Platform
@@ -0,0 +1,6 @@
+#
+# Atheros AR531X/AR231X WiSoC
+#
+platform-$(CONFIG_ATH25) += ath25/
+cflags-$(CONFIG_ATH25) += -I$(srctree)/arch/mips/include/asm/mach-ath25
+load-$(CONFIG_ATH25) += 0xffffffff80041000
diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c
new file mode 100644
index 000000000000..2befa7d766a6
--- /dev/null
+++ b/arch/mips/ath25/ar2315.c
@@ -0,0 +1,364 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com>
+ */
+
+/*
+ * Platform devices for Atheros AR2315 SoCs
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+
+#include <ath25_platform.h>
+
+#include "devices.h"
+#include "ar2315.h"
+#include "ar2315_regs.h"
+
+static void __iomem *ar2315_rst_base;
+static struct irq_domain *ar2315_misc_irq_domain;
+
+static inline u32 ar2315_rst_reg_read(u32 reg)
+{
+ return __raw_readl(ar2315_rst_base + reg);
+}
+
+static inline void ar2315_rst_reg_write(u32 reg, u32 val)
+{
+ __raw_writel(val, ar2315_rst_base + reg);
+}
+
+static inline void ar2315_rst_reg_mask(u32 reg, u32 mask, u32 val)
+{
+ u32 ret = ar2315_rst_reg_read(reg);
+
+ ret &= ~mask;
+ ret |= val;
+ ar2315_rst_reg_write(reg, ret);
+}
+
+static irqreturn_t ar2315_ahb_err_handler(int cpl, void *dev_id)
+{
+ ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET);
+ ar2315_rst_reg_read(AR2315_AHB_ERR1);
+
+ pr_emerg("AHB fatal error\n");
+ machine_restart("AHB error"); /* Catastrophic failure */
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction ar2315_ahb_err_interrupt = {
+ .handler = ar2315_ahb_err_handler,
+ .name = "ar2315-ahb-error",
+};
+
+static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
+ ar2315_rst_reg_read(AR2315_IMR);
+ unsigned nr, misc_irq = 0;
+
+ if (pending) {
+ struct irq_domain *domain = irq_get_handler_data(irq);
+
+ nr = __ffs(pending);
+ misc_irq = irq_find_mapping(domain, nr);
+ }
+
+ if (misc_irq) {
+ if (nr == AR2315_MISC_IRQ_GPIO)
+ ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_GPIO);
+ else if (nr == AR2315_MISC_IRQ_WATCHDOG)
+ ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_WD);
+ generic_handle_irq(misc_irq);
+ } else {
+ spurious_interrupt();
+ }
+}
+
+static void ar2315_misc_irq_unmask(struct irq_data *d)
+{
+ ar2315_rst_reg_mask(AR2315_IMR, 0, BIT(d->hwirq));
+}
+
+static void ar2315_misc_irq_mask(struct irq_data *d)
+{
+ ar2315_rst_reg_mask(AR2315_IMR, BIT(d->hwirq), 0);
+}
+
+static struct irq_chip ar2315_misc_irq_chip = {
+ .name = "ar2315-misc",
+ .irq_unmask = ar2315_misc_irq_unmask,
+ .irq_mask = ar2315_misc_irq_mask,
+};
+
+static int ar2315_misc_irq_map(struct irq_domain *d, unsigned irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ar2315_misc_irq_chip, handle_level_irq);
+ return 0;
+}
+
+static struct irq_domain_ops ar2315_misc_irq_domain_ops = {
+ .map = ar2315_misc_irq_map,
+};
+
+/*
+ * Called when an interrupt is received, this function
+ * determines exactly which interrupt it was, and it
+ * invokes the appropriate handler.
+ *
+ * Implicitly, we also define interrupt priority by
+ * choosing which to dispatch first.
+ */
+static void ar2315_irq_dispatch(void)
+{
+ u32 pending = read_c0_status() & read_c0_cause();
+
+ if (pending & CAUSEF_IP3)
+ do_IRQ(AR2315_IRQ_WLAN0);
+#ifdef CONFIG_PCI_AR2315
+ else if (pending & CAUSEF_IP5)
+ do_IRQ(AR2315_IRQ_LCBUS_PCI);
+#endif
+ else if (pending & CAUSEF_IP2)
+ do_IRQ(AR2315_IRQ_MISC);
+ else if (pending & CAUSEF_IP7)
+ do_IRQ(ATH25_IRQ_CPU_CLOCK);
+ else
+ spurious_interrupt();
+}
+
+void __init ar2315_arch_init_irq(void)
+{
+ struct irq_domain *domain;
+ unsigned irq;
+
+ ath25_irq_dispatch = ar2315_irq_dispatch;
+
+ domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT,
+ &ar2315_misc_irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add IRQ domain");
+
+ irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB);
+ setup_irq(irq, &ar2315_ahb_err_interrupt);
+
+ irq_set_chained_handler(AR2315_IRQ_MISC, ar2315_misc_irq_handler);
+ irq_set_handler_data(AR2315_IRQ_MISC, domain);
+
+ ar2315_misc_irq_domain = domain;
+}
+
+void __init ar2315_init_devices(void)
+{
+ /* Find board configuration */
+ ath25_find_config(AR2315_SPI_READ_BASE, AR2315_SPI_READ_SIZE);
+
+ ath25_add_wmac(0, AR2315_WLAN0_BASE, AR2315_IRQ_WLAN0);
+}
+
+static void ar2315_restart(char *command)
+{
+ void (*mips_reset_vec)(void) = (void *)0xbfc00000;
+
+ local_irq_disable();
+
+ /* try reset the system via reset control */
+ ar2315_rst_reg_write(AR2315_COLD_RESET, AR2317_RESET_SYSTEM);
+
+ /* Cold reset does not work on the AR2315/6, use the GPIO reset bits
+ * a workaround. Give it some time to attempt a gpio based hardware
+ * reset (atheros reference design workaround) */
+
+ /* TODO: implement the GPIO reset workaround */
+
+ /* Some boards (e.g. Senao EOC-2610) don't implement the reset logic
+ * workaround. Attempt to jump to the mips reset location -
+ * the boot loader itself might be able to recover the system */
+ mips_reset_vec();
+}
+
+/*
+ * This table is indexed by bits 5..4 of the CLOCKCTL1 register
+ * to determine the predevisor value.
+ */
+static int clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 };
+static int pllc_divide_table[5] __initdata = { 2, 3, 4, 6, 3 };
+
+static unsigned __init ar2315_sys_clk(u32 clock_ctl)
+{
+ unsigned int pllc_ctrl, cpu_div;
+ unsigned int pllc_out, refdiv, fdiv, divby2;
+ unsigned int clk_div;
+
+ pllc_ctrl = ar2315_rst_reg_read(AR2315_PLLC_CTL);
+ refdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_REF_DIV);
+ refdiv = clockctl1_predivide_table[refdiv];
+ fdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_FDBACK_DIV);
+ divby2 = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_ADD_FDBACK_DIV) + 1;
+ pllc_out = (40000000 / refdiv) * (2 * divby2) * fdiv;
+
+ /* clkm input selected */
+ switch (clock_ctl & AR2315_CPUCLK_CLK_SEL_M) {
+ case 0:
+ case 1:
+ clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKM_DIV);
+ clk_div = pllc_divide_table[clk_div];
+ break;
+ case 2:
+ clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKC_DIV);
+ clk_div = pllc_divide_table[clk_div];
+ break;
+ default:
+ pllc_out = 40000000;
+ clk_div = 1;
+ break;
+ }
+
+ cpu_div = ATH25_REG_MS(clock_ctl, AR2315_CPUCLK_CLK_DIV);
+ cpu_div = cpu_div * 2 ?: 1;
+
+ return pllc_out / (clk_div * cpu_div);
+}
+
+static inline unsigned ar2315_cpu_frequency(void)
+{
+ return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_CPUCLK));
+}
+
+static inline unsigned ar2315_apb_frequency(void)
+{
+ return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_AMBACLK));
+}
+
+void __init ar2315_plat_time_init(void)
+{
+ mips_hpt_frequency = ar2315_cpu_frequency() / 2;
+}
+
+void __init ar2315_plat_mem_setup(void)
+{
+ void __iomem *sdram_base;
+ u32 memsize, memcfg;
+ u32 devid;
+ u32 config;
+
+ /* Detect memory size */
+ sdram_base = ioremap_nocache(AR2315_SDRAMCTL_BASE,
+ AR2315_SDRAMCTL_SIZE);
+ memcfg = __raw_readl(sdram_base + AR2315_MEM_CFG);
+ memsize = 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_DATA_WIDTH);
+ memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_COL_WIDTH);
+ memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_ROW_WIDTH);
+ memsize <<= 3;
+ add_memory_region(0, memsize, BOOT_MEM_RAM);
+ iounmap(sdram_base);
+
+ ar2315_rst_base = ioremap_nocache(AR2315_RST_BASE, AR2315_RST_SIZE);
+
+ /* Detect the hardware based on the device ID */
+ devid = ar2315_rst_reg_read(AR2315_SREV) & AR2315_REV_CHIP;
+ switch (devid) {
+ case 0x91: /* Need to check */
+ ath25_soc = ATH25_SOC_AR2318;
+ break;
+ case 0x90:
+ ath25_soc = ATH25_SOC_AR2317;
+ break;
+ case 0x87:
+ ath25_soc = ATH25_SOC_AR2316;
+ break;
+ case 0x86:
+ default:
+ ath25_soc = ATH25_SOC_AR2315;
+ break;
+ }
+ ath25_board.devid = devid;
+
+ /* Clear any lingering AHB errors */
+ config = read_c0_config();
+ write_c0_config(config & ~0x3);
+ ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET);
+ ar2315_rst_reg_read(AR2315_AHB_ERR1);
+ ar2315_rst_reg_write(AR2315_WDT_CTRL, AR2315_WDT_CTRL_IGNORE);
+
+ _machine_restart = ar2315_restart;
+}
+
+#ifdef CONFIG_PCI_AR2315
+static struct resource ar2315_pci_res[] = {
+ {
+ .name = "ar2315-pci-ctrl",
+ .flags = IORESOURCE_MEM,
+ .start = AR2315_PCI_BASE,
+ .end = AR2315_PCI_BASE + AR2315_PCI_SIZE - 1,
+ },
+ {
+ .name = "ar2315-pci-ext",
+ .flags = IORESOURCE_MEM,
+ .start = AR2315_PCI_EXT_BASE,
+ .end = AR2315_PCI_EXT_BASE + AR2315_PCI_EXT_SIZE - 1,
+ },
+ {
+ .name = "ar2315-pci",
+ .flags = IORESOURCE_IRQ,
+ .start = AR2315_IRQ_LCBUS_PCI,
+ .end = AR2315_IRQ_LCBUS_PCI,
+ },
+};
+#endif
+
+void __init ar2315_arch_init(void)
+{
+ unsigned irq = irq_create_mapping(ar2315_misc_irq_domain,
+ AR2315_MISC_IRQ_UART0);
+
+ ath25_serial_setup(AR2315_UART0_BASE, irq, ar2315_apb_frequency());
+
+#ifdef CONFIG_PCI_AR2315
+ if (ath25_soc == ATH25_SOC_AR2315) {
+ /* Reset PCI DMA logic */
+ ar2315_rst_reg_mask(AR2315_RESET, 0, AR2315_RESET_PCIDMA);
+ msleep(20);
+ ar2315_rst_reg_mask(AR2315_RESET, AR2315_RESET_PCIDMA, 0);
+ msleep(20);
+
+ /* Configure endians */
+ ar2315_rst_reg_mask(AR2315_ENDIAN_CTL, 0, AR2315_CONFIG_PCIAHB |
+ AR2315_CONFIG_PCIAHB_BRIDGE);
+
+ /* Configure as PCI host with DMA */
+ ar2315_rst_reg_write(AR2315_PCICLK, AR2315_PCICLK_PLLC_CLKM |
+ (AR2315_PCICLK_IN_FREQ_DIV_6 <<
+ AR2315_PCICLK_DIV_S));
+ ar2315_rst_reg_mask(AR2315_AHB_ARB_CTL, 0, AR2315_ARB_PCI);
+ ar2315_rst_reg_mask(AR2315_IF_CTL, AR2315_IF_PCI_CLK_MASK |
+ AR2315_IF_MASK, AR2315_IF_PCI |
+ AR2315_IF_PCI_HOST | AR2315_IF_PCI_INTR |
+ (AR2315_IF_PCI_CLK_OUTPUT_CLK <<
+ AR2315_IF_PCI_CLK_SHIFT));
+
+ platform_device_register_simple("ar2315-pci", -1,
+ ar2315_pci_res,
+ ARRAY_SIZE(ar2315_pci_res));
+ }
+#endif
+}
diff --git a/arch/mips/ath25/ar2315.h b/arch/mips/ath25/ar2315.h
new file mode 100644
index 000000000000..877afe63eed5
--- /dev/null
+++ b/arch/mips/ath25/ar2315.h
@@ -0,0 +1,22 @@
+#ifndef __AR2315_H
+#define __AR2315_H
+
+#ifdef CONFIG_SOC_AR2315
+
+void ar2315_arch_init_irq(void);
+void ar2315_init_devices(void);
+void ar2315_plat_time_init(void);
+void ar2315_plat_mem_setup(void);
+void ar2315_arch_init(void);
+
+#else
+
+static inline void ar2315_arch_init_irq(void) {}
+static inline void ar2315_init_devices(void) {}
+static inline void ar2315_plat_time_init(void) {}
+static inline void ar2315_plat_mem_setup(void) {}
+static inline void ar2315_arch_init(void) {}
+
+#endif
+
+#endif /* __AR2315_H */
diff --git a/arch/mips/ath25/ar2315_regs.h b/arch/mips/ath25/ar2315_regs.h
new file mode 100644
index 000000000000..16e86149cb74
--- /dev/null
+++ b/arch/mips/ath25/ar2315_regs.h
@@ -0,0 +1,410 @@
+/*
+ * Register definitions for AR2315+
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2008 Felix Fietkau <nbd@openwrt.org>
+ */
+
+#ifndef __ASM_MACH_ATH25_AR2315_REGS_H
+#define __ASM_MACH_ATH25_AR2315_REGS_H
+
+/*
+ * IRQs
+ */
+#define AR2315_IRQ_MISC (MIPS_CPU_IRQ_BASE + 2) /* C0_CAUSE: 0x0400 */
+#define AR2315_IRQ_WLAN0 (MIPS_CPU_IRQ_BASE + 3) /* C0_CAUSE: 0x0800 */
+#define AR2315_IRQ_ENET0 (MIPS_CPU_IRQ_BASE + 4) /* C0_CAUSE: 0x1000 */
+#define AR2315_IRQ_LCBUS_PCI (MIPS_CPU_IRQ_BASE + 5) /* C0_CAUSE: 0x2000 */
+#define AR2315_IRQ_WLAN0_POLL (MIPS_CPU_IRQ_BASE + 6) /* C0_CAUSE: 0x4000 */
+
+/*
+ * Miscellaneous interrupts, which share IP2.
+ */
+#define AR2315_MISC_IRQ_UART0 0
+#define AR2315_MISC_IRQ_I2C_RSVD 1
+#define AR2315_MISC_IRQ_SPI 2
+#define AR2315_MISC_IRQ_AHB 3
+#define AR2315_MISC_IRQ_APB 4
+#define AR2315_MISC_IRQ_TIMER 5
+#define AR2315_MISC_IRQ_GPIO 6
+#define AR2315_MISC_IRQ_WATCHDOG 7
+#define AR2315_MISC_IRQ_IR_RSVD 8
+#define AR2315_MISC_IRQ_COUNT 9
+
+/*
+ * Address map
+ */
+#define AR2315_SPI_READ_BASE 0x08000000 /* SPI flash */
+#define AR2315_SPI_READ_SIZE 0x01000000
+#define AR2315_WLAN0_BASE 0x10000000 /* Wireless MMR */
+#define AR2315_PCI_BASE 0x10100000 /* PCI MMR */
+#define AR2315_PCI_SIZE 0x00001000
+#define AR2315_SDRAMCTL_BASE 0x10300000 /* SDRAM MMR */
+#define AR2315_SDRAMCTL_SIZE 0x00000020
+#define AR2315_LOCAL_BASE 0x10400000 /* Local bus MMR */
+#define AR2315_ENET0_BASE 0x10500000 /* Ethernet MMR */
+#define AR2315_RST_BASE 0x11000000 /* Reset control MMR */
+#define AR2315_RST_SIZE 0x00000100
+#define AR2315_UART0_BASE 0x11100000 /* UART MMR */
+#define AR2315_SPI_MMR_BASE 0x11300000 /* SPI flash MMR */
+#define AR2315_SPI_MMR_SIZE 0x00000010
+#define AR2315_PCI_EXT_BASE 0x80000000 /* PCI external */
+#define AR2315_PCI_EXT_SIZE 0x40000000
+
+/*
+ * Configuration registers
+ */
+
+/* Cold reset register */
+#define AR2315_COLD_RESET 0x0000
+
+#define AR2315_RESET_COLD_AHB 0x00000001
+#define AR2315_RESET_COLD_APB 0x00000002
+#define AR2315_RESET_COLD_CPU 0x00000004
+#define AR2315_RESET_COLD_CPUWARM 0x00000008
+#define AR2315_RESET_SYSTEM (RESET_COLD_CPU |\
+ RESET_COLD_APB |\
+ RESET_COLD_AHB) /* full system */
+#define AR2317_RESET_SYSTEM 0x00000010
+
+/* Reset register */
+#define AR2315_RESET 0x0004
+
+#define AR2315_RESET_WARM_WLAN0_MAC 0x00000001 /* warm reset WLAN0 MAC */
+#define AR2315_RESET_WARM_WLAN0_BB 0x00000002 /* warm reset WLAN0 BB */
+#define AR2315_RESET_MPEGTS_RSVD 0x00000004 /* warm reset MPEG-TS */
+#define AR2315_RESET_PCIDMA 0x00000008 /* warm reset PCI ahb/dma */
+#define AR2315_RESET_MEMCTL 0x00000010 /* warm reset mem control */
+#define AR2315_RESET_LOCAL 0x00000020 /* warm reset local bus */
+#define AR2315_RESET_I2C_RSVD 0x00000040 /* warm reset I2C bus */
+#define AR2315_RESET_SPI 0x00000080 /* warm reset SPI iface */
+#define AR2315_RESET_UART0 0x00000100 /* warm reset UART0 */
+#define AR2315_RESET_IR_RSVD 0x00000200 /* warm reset IR iface */
+#define AR2315_RESET_EPHY0 0x00000400 /* cold reset ENET0 phy */
+#define AR2315_RESET_ENET0 0x00000800 /* cold reset ENET0 MAC */
+
+/* AHB master arbitration control */
+#define AR2315_AHB_ARB_CTL 0x0008
+
+#define AR2315_ARB_CPU 0x00000001 /* CPU, default */
+#define AR2315_ARB_WLAN 0x00000002 /* WLAN */
+#define AR2315_ARB_MPEGTS_RSVD 0x00000004 /* MPEG-TS */
+#define AR2315_ARB_LOCAL 0x00000008 /* Local bus */
+#define AR2315_ARB_PCI 0x00000010 /* PCI bus */
+#define AR2315_ARB_ETHERNET 0x00000020 /* Ethernet */
+#define AR2315_ARB_RETRY 0x00000100 /* Retry policy (debug) */
+
+/* Config Register */
+#define AR2315_ENDIAN_CTL 0x000c
+
+#define AR2315_CONFIG_AHB 0x00000001 /* EC-AHB bridge endian */
+#define AR2315_CONFIG_WLAN 0x00000002 /* WLAN byteswap */
+#define AR2315_CONFIG_MPEGTS_RSVD 0x00000004 /* MPEG-TS byteswap */
+#define AR2315_CONFIG_PCI 0x00000008 /* PCI byteswap */
+#define AR2315_CONFIG_MEMCTL 0x00000010 /* Mem controller endian */
+#define AR2315_CONFIG_LOCAL 0x00000020 /* Local bus byteswap */
+#define AR2315_CONFIG_ETHERNET 0x00000040 /* Ethernet byteswap */
+#define AR2315_CONFIG_MERGE 0x00000200 /* CPU write buffer merge */
+#define AR2315_CONFIG_CPU 0x00000400 /* CPU big endian */
+#define AR2315_CONFIG_BIG 0x00000400
+#define AR2315_CONFIG_PCIAHB 0x00000800
+#define AR2315_CONFIG_PCIAHB_BRIDGE 0x00001000
+#define AR2315_CONFIG_SPI 0x00008000 /* SPI byteswap */
+#define AR2315_CONFIG_CPU_DRAM 0x00010000
+#define AR2315_CONFIG_CPU_PCI 0x00020000
+#define AR2315_CONFIG_CPU_MMR 0x00040000
+
+/* NMI control */
+#define AR2315_NMI_CTL 0x0010
+
+#define AR2315_NMI_EN 1
+
+/* Revision Register - Initial value is 0x3010 (WMAC 3.0, AR231X 1.0). */
+#define AR2315_SREV 0x0014
+
+#define AR2315_REV_MAJ 0x000000f0
+#define AR2315_REV_MAJ_S 4
+#define AR2315_REV_MIN 0x0000000f
+#define AR2315_REV_MIN_S 0
+#define AR2315_REV_CHIP (AR2315_REV_MAJ | AR2315_REV_MIN)
+
+/* Interface Enable */
+#define AR2315_IF_CTL 0x0018
+
+#define AR2315_IF_MASK 0x00000007
+#define AR2315_IF_DISABLED 0 /* Disable all */
+#define AR2315_IF_PCI 1 /* PCI */
+#define AR2315_IF_TS_LOCAL 2 /* Local bus */
+#define AR2315_IF_ALL 3 /* Emulation only */
+#define AR2315_IF_LOCAL_HOST 0x00000008
+#define AR2315_IF_PCI_HOST 0x00000010
+#define AR2315_IF_PCI_INTR 0x00000020
+#define AR2315_IF_PCI_CLK_MASK 0x00030000
+#define AR2315_IF_PCI_CLK_INPUT 0
+#define AR2315_IF_PCI_CLK_OUTPUT_LOW 1
+#define AR2315_IF_PCI_CLK_OUTPUT_CLK 2
+#define AR2315_IF_PCI_CLK_OUTPUT_HIGH 3
+#define AR2315_IF_PCI_CLK_SHIFT 16
+
+/* APB Interrupt control */
+#define AR2315_ISR 0x0020
+#define AR2315_IMR 0x0024
+#define AR2315_GISR 0x0028
+
+#define AR2315_ISR_UART0 0x00000001 /* high speed UART */
+#define AR2315_ISR_I2C_RSVD 0x00000002 /* I2C bus */
+#define AR2315_ISR_SPI 0x00000004 /* SPI bus */
+#define AR2315_ISR_AHB 0x00000008 /* AHB error */
+#define AR2315_ISR_APB 0x00000010 /* APB error */
+#define AR2315_ISR_TIMER 0x00000020 /* Timer */
+#define AR2315_ISR_GPIO 0x00000040 /* GPIO */
+#define AR2315_ISR_WD 0x00000080 /* Watchdog */
+#define AR2315_ISR_IR_RSVD 0x00000100 /* IR */
+
+#define AR2315_GISR_MISC 0x00000001 /* Misc */
+#define AR2315_GISR_WLAN0 0x00000002 /* WLAN0 */
+#define AR2315_GISR_MPEGTS_RSVD 0x00000004 /* MPEG-TS */
+#define AR2315_GISR_LOCALPCI 0x00000008 /* Local/PCI bus */
+#define AR2315_GISR_WMACPOLL 0x00000010
+#define AR2315_GISR_TIMER 0x00000020
+#define AR2315_GISR_ETHERNET 0x00000040 /* Ethernet */
+
+/* Generic timer */
+#define AR2315_TIMER 0x0030
+#define AR2315_RELOAD 0x0034
+
+/* Watchdog timer */
+#define AR2315_WDT_TIMER 0x0038
+#define AR2315_WDT_CTRL 0x003c
+
+#define AR2315_WDT_CTRL_IGNORE 0x00000000 /* ignore expiration */
+#define AR2315_WDT_CTRL_NMI 0x00000001 /* NMI on watchdog */
+#define AR2315_WDT_CTRL_RESET 0x00000002 /* reset on watchdog */
+
+/* CPU Performance Counters */
+#define AR2315_PERFCNT0 0x0048
+#define AR2315_PERFCNT1 0x004c
+
+#define AR2315_PERF0_DATAHIT 0x00000001 /* Count Data Cache Hits */
+#define AR2315_PERF0_DATAMISS 0x00000002 /* Count Data Cache Misses */
+#define AR2315_PERF0_INSTHIT 0x00000004 /* Count Instruction Cache Hits */
+#define AR2315_PERF0_INSTMISS 0x00000008 /* Count Instruction Cache Misses */
+#define AR2315_PERF0_ACTIVE 0x00000010 /* Count Active Processor Cycles */
+#define AR2315_PERF0_WBHIT 0x00000020 /* Count CPU Write Buffer Hits */
+#define AR2315_PERF0_WBMISS 0x00000040 /* Count CPU Write Buffer Misses */
+
+#define AR2315_PERF1_EB_ARDY 0x00000001 /* Count EB_ARdy signal */
+#define AR2315_PERF1_EB_AVALID 0x00000002 /* Count EB_AValid signal */
+#define AR2315_PERF1_EB_WDRDY 0x00000004 /* Count EB_WDRdy signal */
+#define AR2315_PERF1_EB_RDVAL 0x00000008 /* Count EB_RdVal signal */
+#define AR2315_PERF1_VRADDR 0x00000010 /* Count valid read address cycles*/
+#define AR2315_PERF1_VWADDR 0x00000020 /* Count valid write address cycl.*/
+#define AR2315_PERF1_VWDATA 0x00000040 /* Count valid write data cycles */
+
+/* AHB Error Reporting */
+#define AR2315_AHB_ERR0 0x0050 /* error */
+#define AR2315_AHB_ERR1 0x0054 /* haddr */
+#define AR2315_AHB_ERR2 0x0058 /* hwdata */
+#define AR2315_AHB_ERR3 0x005c /* hrdata */
+#define AR2315_AHB_ERR4 0x0060 /* status */
+
+#define AR2315_AHB_ERROR_DET 1 /* AHB Error has been detected, */
+ /* write 1 to clear all bits in ERR0 */
+#define AR2315_AHB_ERROR_OVR 2 /* AHB Error overflow has been detected */
+#define AR2315_AHB_ERROR_WDT 4 /* AHB Error due to wdt instead of hresp */
+
+#define AR2315_PROCERR_HMAST 0x0000000f
+#define AR2315_PROCERR_HMAST_DFLT 0
+#define AR2315_PROCERR_HMAST_WMAC 1
+#define AR2315_PROCERR_HMAST_ENET 2
+#define AR2315_PROCERR_HMAST_PCIENDPT 3
+#define AR2315_PROCERR_HMAST_LOCAL 4
+#define AR2315_PROCERR_HMAST_CPU 5
+#define AR2315_PROCERR_HMAST_PCITGT 6
+#define AR2315_PROCERR_HMAST_S 0
+#define AR2315_PROCERR_HWRITE 0x00000010
+#define AR2315_PROCERR_HSIZE 0x00000060
+#define AR2315_PROCERR_HSIZE_S 5
+#define AR2315_PROCERR_HTRANS 0x00000180
+#define AR2315_PROCERR_HTRANS_S 7
+#define AR2315_PROCERR_HBURST 0x00000e00
+#define AR2315_PROCERR_HBURST_S 9
+
+/* Clock Control */
+#define AR2315_PLLC_CTL 0x0064
+#define AR2315_PLLV_CTL 0x0068
+#define AR2315_CPUCLK 0x006c
+#define AR2315_AMBACLK 0x0070
+#define AR2315_SYNCCLK 0x0074
+#define AR2315_DSL_SLEEP_CTL 0x0080
+#define AR2315_DSL_SLEEP_DUR 0x0084
+
+/* PLLc Control fields */
+#define AR2315_PLLC_REF_DIV_M 0x00000003
+#define AR2315_PLLC_REF_DIV_S 0
+#define AR2315_PLLC_FDBACK_DIV_M 0x0000007c
+#define AR2315_PLLC_FDBACK_DIV_S 2
+#define AR2315_PLLC_ADD_FDBACK_DIV_M 0x00000080
+#define AR2315_PLLC_ADD_FDBACK_DIV_S 7
+#define AR2315_PLLC_CLKC_DIV_M 0x0001c000
+#define AR2315_PLLC_CLKC_DIV_S 14
+#define AR2315_PLLC_CLKM_DIV_M 0x00700000
+#define AR2315_PLLC_CLKM_DIV_S 20
+
+/* CPU CLK Control fields */
+#define AR2315_CPUCLK_CLK_SEL_M 0x00000003
+#define AR2315_CPUCLK_CLK_SEL_S 0
+#define AR2315_CPUCLK_CLK_DIV_M 0x0000000c
+#define AR2315_CPUCLK_CLK_DIV_S 2
+
+/* AMBA CLK Control fields */
+#define AR2315_AMBACLK_CLK_SEL_M 0x00000003
+#define AR2315_AMBACLK_CLK_SEL_S 0
+#define AR2315_AMBACLK_CLK_DIV_M 0x0000000c
+#define AR2315_AMBACLK_CLK_DIV_S 2
+
+/* PCI Clock Control */
+#define AR2315_PCICLK 0x00a4
+
+#define AR2315_PCICLK_INPUT_M 0x00000003
+#define AR2315_PCICLK_INPUT_S 0
+#define AR2315_PCICLK_PLLC_CLKM 0
+#define AR2315_PCICLK_PLLC_CLKM1 1
+#define AR2315_PCICLK_PLLC_CLKC 2
+#define AR2315_PCICLK_REF_CLK 3
+#define AR2315_PCICLK_DIV_M 0x0000000c
+#define AR2315_PCICLK_DIV_S 2
+#define AR2315_PCICLK_IN_FREQ 0
+#define AR2315_PCICLK_IN_FREQ_DIV_6 1
+#define AR2315_PCICLK_IN_FREQ_DIV_8 2
+#define AR2315_PCICLK_IN_FREQ_DIV_10 3
+
+/* Observation Control Register */
+#define AR2315_OCR 0x00b0
+
+#define AR2315_OCR_GPIO0_IRIN 0x00000040
+#define AR2315_OCR_GPIO1_IROUT 0x00000080
+#define AR2315_OCR_GPIO3_RXCLR 0x00000200
+
+/* General Clock Control */
+#define AR2315_MISCCLK 0x00b4
+
+#define AR2315_MISCCLK_PLLBYPASS_EN 0x00000001
+#define AR2315_MISCCLK_PROCREFCLK 0x00000002
+
+/*
+ * SDRAM Controller
+ * - No read or write buffers are included.
+ */
+#define AR2315_MEM_CFG 0x0000
+#define AR2315_MEM_CTRL 0x000c
+#define AR2315_MEM_REF 0x0010
+
+#define AR2315_MEM_CFG_DATA_WIDTH_M 0x00006000
+#define AR2315_MEM_CFG_DATA_WIDTH_S 13
+#define AR2315_MEM_CFG_COL_WIDTH_M 0x00001e00
+#define AR2315_MEM_CFG_COL_WIDTH_S 9
+#define AR2315_MEM_CFG_ROW_WIDTH_M 0x000001e0
+#define AR2315_MEM_CFG_ROW_WIDTH_S 5
+#define AR2315_MEM_CFG_BANKADDR_BITS_M 0x00000018
+#define AR2315_MEM_CFG_BANKADDR_BITS_S 3
+
+/*
+ * Local Bus Interface Registers
+ */
+#define AR2315_LB_CONFIG 0x0000
+
+#define AR2315_LBCONF_OE 0x00000001 /* =1 OE is low-true */
+#define AR2315_LBCONF_CS0 0x00000002 /* =1 first CS is low-true */
+#define AR2315_LBCONF_CS1 0x00000004 /* =1 2nd CS is low-true */
+#define AR2315_LBCONF_RDY 0x00000008 /* =1 RDY is low-true */
+#define AR2315_LBCONF_WE 0x00000010 /* =1 Write En is low-true */
+#define AR2315_LBCONF_WAIT 0x00000020 /* =1 WAIT is low-true */
+#define AR2315_LBCONF_ADS 0x00000040 /* =1 Adr Strobe is low-true */
+#define AR2315_LBCONF_MOT 0x00000080 /* =0 Intel, =1 Motorola */
+#define AR2315_LBCONF_8CS 0x00000100 /* =1 8 bits CS, 0= 16bits */
+#define AR2315_LBCONF_8DS 0x00000200 /* =1 8 bits Data S, 0=16bits */
+#define AR2315_LBCONF_ADS_EN 0x00000400 /* =1 Enable ADS */
+#define AR2315_LBCONF_ADR_OE 0x00000800 /* =1 Adr cap on OE, WE or DS */
+#define AR2315_LBCONF_ADDT_MUX 0x00001000 /* =1 Adr and Data share bus */
+#define AR2315_LBCONF_DATA_OE 0x00002000 /* =1 Data cap on OE, WE, DS */
+#define AR2315_LBCONF_16DATA 0x00004000 /* =1 Data is 16 bits wide */
+#define AR2315_LBCONF_SWAPDT 0x00008000 /* =1 Byte swap data */
+#define AR2315_LBCONF_SYNC 0x00010000 /* =1 Bus synchronous to clk */
+#define AR2315_LBCONF_INT 0x00020000 /* =1 Intr is low true */
+#define AR2315_LBCONF_INT_CTR0 0x00000000 /* GND high-Z, Vdd is high-Z */
+#define AR2315_LBCONF_INT_CTR1 0x00040000 /* GND drive, Vdd is high-Z */
+#define AR2315_LBCONF_INT_CTR2 0x00080000 /* GND high-Z, Vdd drive */
+#define AR2315_LBCONF_INT_CTR3 0x000c0000 /* GND drive, Vdd drive */
+#define AR2315_LBCONF_RDY_WAIT 0x00100000 /* =1 RDY is negative of WAIT */
+#define AR2315_LBCONF_INT_PULSE 0x00200000 /* =1 Interrupt is a pulse */
+#define AR2315_LBCONF_ENABLE 0x00400000 /* =1 Falcon respond to LB */
+
+#define AR2315_LB_CLKSEL 0x0004
+
+#define AR2315_LBCLK_EXT 0x00000001 /* use external clk for lb */
+
+#define AR2315_LB_1MS 0x0008
+
+#define AR2315_LB1MS_MASK 0x0003ffff /* # of AHB clk cycles in 1ms */
+
+#define AR2315_LB_MISCCFG 0x000c
+
+#define AR2315_LBM_TXD_EN 0x00000001 /* Enable TXD for fragments */
+#define AR2315_LBM_RX_INTEN 0x00000002 /* Enable LB ints on RX ready */
+#define AR2315_LBM_MBOXWR_INTEN 0x00000004 /* Enable LB ints on mbox wr */
+#define AR2315_LBM_MBOXRD_INTEN 0x00000008 /* Enable LB ints on mbox rd */
+#define AR2315_LMB_DESCSWAP_EN 0x00000010 /* Byte swap desc enable */
+#define AR2315_LBM_TIMEOUT_M 0x00ffff80
+#define AR2315_LBM_TIMEOUT_S 7
+#define AR2315_LBM_PORTMUX 0x07000000
+
+#define AR2315_LB_RXTSOFF 0x0010
+
+#define AR2315_LB_TX_CHAIN_EN 0x0100
+
+#define AR2315_LB_TXEN_0 0x00000001
+#define AR2315_LB_TXEN_1 0x00000002
+#define AR2315_LB_TXEN_2 0x00000004
+#define AR2315_LB_TXEN_3 0x00000008
+
+#define AR2315_LB_TX_CHAIN_DIS 0x0104
+#define AR2315_LB_TX_DESC_PTR 0x0200
+
+#define AR2315_LB_RX_CHAIN_EN 0x0400
+
+#define AR2315_LB_RXEN 0x00000001
+
+#define AR2315_LB_RX_CHAIN_DIS 0x0404
+#define AR2315_LB_RX_DESC_PTR 0x0408
+
+#define AR2315_LB_INT_STATUS 0x0500
+
+#define AR2315_LB_INT_TX_DESC 0x00000001
+#define AR2315_LB_INT_TX_OK 0x00000002
+#define AR2315_LB_INT_TX_ERR 0x00000004
+#define AR2315_LB_INT_TX_EOF 0x00000008
+#define AR2315_LB_INT_RX_DESC 0x00000010
+#define AR2315_LB_INT_RX_OK 0x00000020
+#define AR2315_LB_INT_RX_ERR 0x00000040
+#define AR2315_LB_INT_RX_EOF 0x00000080
+#define AR2315_LB_INT_TX_TRUNC 0x00000100
+#define AR2315_LB_INT_TX_STARVE 0x00000200
+#define AR2315_LB_INT_LB_TIMEOUT 0x00000400
+#define AR2315_LB_INT_LB_ERR 0x00000800
+#define AR2315_LB_INT_MBOX_WR 0x00001000
+#define AR2315_LB_INT_MBOX_RD 0x00002000
+
+/* Bit definitions for INT MASK are the same as INT_STATUS */
+#define AR2315_LB_INT_MASK 0x0504
+
+#define AR2315_LB_INT_EN 0x0508
+#define AR2315_LB_MBOX 0x0600
+
+#endif /* __ASM_MACH_ATH25_AR2315_REGS_H */
diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c
new file mode 100644
index 000000000000..b6887f75144c
--- /dev/null
+++ b/arch/mips/ath25/ar5312.c
@@ -0,0 +1,393 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com>
+ */
+
+/*
+ * Platform devices for Atheros AR5312 SoCs
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+
+#include <ath25_platform.h>
+
+#include "devices.h"
+#include "ar5312.h"
+#include "ar5312_regs.h"
+
+static void __iomem *ar5312_rst_base;
+static struct irq_domain *ar5312_misc_irq_domain;
+
+static inline u32 ar5312_rst_reg_read(u32 reg)
+{
+ return __raw_readl(ar5312_rst_base + reg);
+}
+
+static inline void ar5312_rst_reg_write(u32 reg, u32 val)
+{
+ __raw_writel(val, ar5312_rst_base + reg);
+}
+
+static inline void ar5312_rst_reg_mask(u32 reg, u32 mask, u32 val)
+{
+ u32 ret = ar5312_rst_reg_read(reg);
+
+ ret &= ~mask;
+ ret |= val;
+ ar5312_rst_reg_write(reg, ret);
+}
+
+static irqreturn_t ar5312_ahb_err_handler(int cpl, void *dev_id)
+{
+ u32 proc1 = ar5312_rst_reg_read(AR5312_PROC1);
+ u32 proc_addr = ar5312_rst_reg_read(AR5312_PROCADDR); /* clears error */
+ u32 dma1 = ar5312_rst_reg_read(AR5312_DMA1);
+ u32 dma_addr = ar5312_rst_reg_read(AR5312_DMAADDR); /* clears error */
+
+ pr_emerg("AHB interrupt: PROCADDR=0x%8.8x PROC1=0x%8.8x DMAADDR=0x%8.8x DMA1=0x%8.8x\n",
+ proc_addr, proc1, dma_addr, dma1);
+
+ machine_restart("AHB error"); /* Catastrophic failure */
+ return IRQ_HANDLED;
+}
+
+static struct irqaction ar5312_ahb_err_interrupt = {
+ .handler = ar5312_ahb_err_handler,
+ .name = "ar5312-ahb-error",
+};
+
+static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
+ ar5312_rst_reg_read(AR5312_IMR);
+ unsigned nr, misc_irq = 0;
+
+ if (pending) {
+ struct irq_domain *domain = irq_get_handler_data(irq);
+
+ nr = __ffs(pending);
+ misc_irq = irq_find_mapping(domain, nr);
+ }
+
+ if (misc_irq) {
+ generic_handle_irq(misc_irq);
+ if (nr == AR5312_MISC_IRQ_TIMER)
+ ar5312_rst_reg_read(AR5312_TIMER);
+ } else {
+ spurious_interrupt();
+ }
+}
+
+/* Enable the specified AR5312_MISC_IRQ interrupt */
+static void ar5312_misc_irq_unmask(struct irq_data *d)
+{
+ ar5312_rst_reg_mask(AR5312_IMR, 0, BIT(d->hwirq));
+}
+
+/* Disable the specified AR5312_MISC_IRQ interrupt */
+static void ar5312_misc_irq_mask(struct irq_data *d)
+{
+ ar5312_rst_reg_mask(AR5312_IMR, BIT(d->hwirq), 0);
+ ar5312_rst_reg_read(AR5312_IMR); /* flush write buffer */
+}
+
+static struct irq_chip ar5312_misc_irq_chip = {
+ .name = "ar5312-misc",
+ .irq_unmask = ar5312_misc_irq_unmask,
+ .irq_mask = ar5312_misc_irq_mask,
+};
+
+static int ar5312_misc_irq_map(struct irq_domain *d, unsigned irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ar5312_misc_irq_chip, handle_level_irq);
+ return 0;
+}
+
+static struct irq_domain_ops ar5312_misc_irq_domain_ops = {
+ .map = ar5312_misc_irq_map,
+};
+
+static void ar5312_irq_dispatch(void)
+{
+ u32 pending = read_c0_status() & read_c0_cause();
+
+ if (pending & CAUSEF_IP2)
+ do_IRQ(AR5312_IRQ_WLAN0);
+ else if (pending & CAUSEF_IP5)
+ do_IRQ(AR5312_IRQ_WLAN1);
+ else if (pending & CAUSEF_IP6)
+ do_IRQ(AR5312_IRQ_MISC);
+ else if (pending & CAUSEF_IP7)
+ do_IRQ(ATH25_IRQ_CPU_CLOCK);
+ else
+ spurious_interrupt();
+}
+
+void __init ar5312_arch_init_irq(void)
+{
+ struct irq_domain *domain;
+ unsigned irq;
+
+ ath25_irq_dispatch = ar5312_irq_dispatch;
+
+ domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT,
+ &ar5312_misc_irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add IRQ domain");
+
+ irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC);
+ setup_irq(irq, &ar5312_ahb_err_interrupt);
+
+ irq_set_chained_handler(AR5312_IRQ_MISC, ar5312_misc_irq_handler);
+ irq_set_handler_data(AR5312_IRQ_MISC, domain);
+
+ ar5312_misc_irq_domain = domain;
+}
+
+static struct physmap_flash_data ar5312_flash_data = {
+ .width = 2,
+};
+
+static struct resource ar5312_flash_resource = {
+ .start = AR5312_FLASH_BASE,
+ .end = AR5312_FLASH_BASE + AR5312_FLASH_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device ar5312_physmap_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev.platform_data = &ar5312_flash_data,
+ .resource = &ar5312_flash_resource,
+ .num_resources = 1,
+};
+
+static void __init ar5312_flash_init(void)
+{
+ void __iomem *flashctl_base;
+ u32 ctl;
+
+ flashctl_base = ioremap_nocache(AR5312_FLASHCTL_BASE,
+ AR5312_FLASHCTL_SIZE);
+
+ ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL0);
+ ctl &= AR5312_FLASHCTL_MW;
+
+ /* fixup flash width */
+ switch (ctl) {
+ case AR5312_FLASHCTL_MW16:
+ ar5312_flash_data.width = 2;
+ break;
+ case AR5312_FLASHCTL_MW8:
+ default:
+ ar5312_flash_data.width = 1;
+ break;
+ }
+
+ /*
+ * Configure flash bank 0.
+ * Assume 8M window size. Flash will be aliased if it's smaller
+ */
+ ctl |= AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC_8M | AR5312_FLASHCTL_RBLE;
+ ctl |= 0x01 << AR5312_FLASHCTL_IDCY_S;
+ ctl |= 0x07 << AR5312_FLASHCTL_WST1_S;
+ ctl |= 0x07 << AR5312_FLASHCTL_WST2_S;
+ __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL0);
+
+ /* Disable other flash banks */
+ ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL1);
+ ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC);
+ __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL1);
+ ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL2);
+ ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC);
+ __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL2);
+
+ iounmap(flashctl_base);
+}
+
+void __init ar5312_init_devices(void)
+{
+ struct ath25_boarddata *config;
+
+ ar5312_flash_init();
+
+ /* Locate board/radio config data */
+ ath25_find_config(AR5312_FLASH_BASE, AR5312_FLASH_SIZE);
+ config = ath25_board.config;
+
+ /* AR2313 has CPU minor rev. 10 */
+ if ((current_cpu_data.processor_id & 0xff) == 0x0a)
+ ath25_soc = ATH25_SOC_AR2313;
+
+ /* AR2312 shares the same Silicon ID as AR5312 */
+ else if (config->flags & BD_ISCASPER)
+ ath25_soc = ATH25_SOC_AR2312;
+
+ /* Everything else is probably AR5312 or compatible */
+ else
+ ath25_soc = ATH25_SOC_AR5312;
+
+ platform_device_register(&ar5312_physmap_flash);
+
+ switch (ath25_soc) {
+ case ATH25_SOC_AR5312:
+ if (!ath25_board.radio)
+ return;
+
+ if (!(config->flags & BD_WLAN0))
+ break;
+
+ ath25_add_wmac(0, AR5312_WLAN0_BASE, AR5312_IRQ_WLAN0);
+ break;
+ case ATH25_SOC_AR2312:
+ case ATH25_SOC_AR2313:
+ if (!ath25_board.radio)
+ return;
+ break;
+ default:
+ break;
+ }
+
+ if (config->flags & BD_WLAN1)
+ ath25_add_wmac(1, AR5312_WLAN1_BASE, AR5312_IRQ_WLAN1);
+}
+
+static void ar5312_restart(char *command)
+{
+ /* reset the system */
+ local_irq_disable();
+ while (1)
+ ar5312_rst_reg_write(AR5312_RESET, AR5312_RESET_SYSTEM);
+}
+
+/*
+ * This table is indexed by bits 5..4 of the CLOCKCTL1 register
+ * to determine the predevisor value.
+ */
+static unsigned clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 };
+
+static unsigned __init ar5312_cpu_frequency(void)
+{
+ u32 scratch, devid, clock_ctl1;
+ u32 predivide_mask, multiplier_mask, doubler_mask;
+ unsigned predivide_shift, multiplier_shift;
+ unsigned predivide_select, predivisor, multiplier;
+
+ /* Trust the bootrom's idea of cpu frequency. */
+ scratch = ar5312_rst_reg_read(AR5312_SCRATCH);
+ if (scratch)
+ return scratch;
+
+ devid = ar5312_rst_reg_read(AR5312_REV);
+ devid = (devid & AR5312_REV_MAJ) >> AR5312_REV_MAJ_S;
+ if (devid == AR5312_REV_MAJ_AR2313) {
+ predivide_mask = AR2313_CLOCKCTL1_PREDIVIDE_MASK;
+ predivide_shift = AR2313_CLOCKCTL1_PREDIVIDE_SHIFT;
+ multiplier_mask = AR2313_CLOCKCTL1_MULTIPLIER_MASK;
+ multiplier_shift = AR2313_CLOCKCTL1_MULTIPLIER_SHIFT;
+ doubler_mask = AR2313_CLOCKCTL1_DOUBLER_MASK;
+ } else { /* AR5312 and AR2312 */
+ predivide_mask = AR5312_CLOCKCTL1_PREDIVIDE_MASK;
+ predivide_shift = AR5312_CLOCKCTL1_PREDIVIDE_SHIFT;
+ multiplier_mask = AR5312_CLOCKCTL1_MULTIPLIER_MASK;
+ multiplier_shift = AR5312_CLOCKCTL1_MULTIPLIER_SHIFT;
+ doubler_mask = AR5312_CLOCKCTL1_DOUBLER_MASK;
+ }
+
+ /*
+ * Clocking is derived from a fixed 40MHz input clock.
+ *
+ * cpu_freq = input_clock * MULT (where MULT is PLL multiplier)
+ * sys_freq = cpu_freq / 4 (used for APB clock, serial,
+ * flash, Timer, Watchdog Timer)
+ *
+ * cnt_freq = cpu_freq / 2 (use for CPU count/compare)
+ *
+ * So, for example, with a PLL multiplier of 5, we have
+ *
+ * cpu_freq = 200MHz
+ * sys_freq = 50MHz
+ * cnt_freq = 100MHz
+ *
+ * We compute the CPU frequency, based on PLL settings.
+ */
+
+ clock_ctl1 = ar5312_rst_reg_read(AR5312_CLOCKCTL1);
+ predivide_select = (clock_ctl1 & predivide_mask) >> predivide_shift;
+ predivisor = clockctl1_predivide_table[predivide_select];
+ multiplier = (clock_ctl1 & multiplier_mask) >> multiplier_shift;
+
+ if (clock_ctl1 & doubler_mask)
+ multiplier <<= 1;
+
+ return (40000000 / predivisor) * multiplier;
+}
+
+static inline unsigned ar5312_sys_frequency(void)
+{
+ return ar5312_cpu_frequency() / 4;
+}
+
+void __init ar5312_plat_time_init(void)
+{
+ mips_hpt_frequency = ar5312_cpu_frequency() / 2;
+}
+
+void __init ar5312_plat_mem_setup(void)
+{
+ void __iomem *sdram_base;
+ u32 memsize, memcfg, bank0_ac, bank1_ac;
+ u32 devid;
+
+ /* Detect memory size */
+ sdram_base = ioremap_nocache(AR5312_SDRAMCTL_BASE,
+ AR5312_SDRAMCTL_SIZE);
+ memcfg = __raw_readl(sdram_base + AR5312_MEM_CFG1);
+ bank0_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC0);
+ bank1_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC1);
+ memsize = (bank0_ac ? (1 << (bank0_ac + 1)) : 0) +
+ (bank1_ac ? (1 << (bank1_ac + 1)) : 0);
+ memsize <<= 20;
+ add_memory_region(0, memsize, BOOT_MEM_RAM);
+ iounmap(sdram_base);
+
+ ar5312_rst_base = ioremap_nocache(AR5312_RST_BASE, AR5312_RST_SIZE);
+
+ devid = ar5312_rst_reg_read(AR5312_REV);
+ devid >>= AR5312_REV_WMAC_MIN_S;
+ devid &= AR5312_REV_CHIP;
+ ath25_board.devid = (u16)devid;
+
+ /* Clear any lingering AHB errors */
+ ar5312_rst_reg_read(AR5312_PROCADDR);
+ ar5312_rst_reg_read(AR5312_DMAADDR);
+ ar5312_rst_reg_write(AR5312_WDT_CTRL, AR5312_WDT_CTRL_IGNORE);
+
+ _machine_restart = ar5312_restart;
+}
+
+void __init ar5312_arch_init(void)
+{
+ unsigned irq = irq_create_mapping(ar5312_misc_irq_domain,
+ AR5312_MISC_IRQ_UART0);
+
+ ath25_serial_setup(AR5312_UART0_BASE, irq, ar5312_sys_frequency());
+}
diff --git a/arch/mips/ath25/ar5312.h b/arch/mips/ath25/ar5312.h
new file mode 100644
index 000000000000..470abb0052bd
--- /dev/null
+++ b/arch/mips/ath25/ar5312.h
@@ -0,0 +1,22 @@
+#ifndef __AR5312_H
+#define __AR5312_H
+
+#ifdef CONFIG_SOC_AR5312
+
+void ar5312_arch_init_irq(void);
+void ar5312_init_devices(void);
+void ar5312_plat_time_init(void);
+void ar5312_plat_mem_setup(void);
+void ar5312_arch_init(void);
+
+#else
+
+static inline void ar5312_arch_init_irq(void) {}
+static inline void ar5312_init_devices(void) {}
+static inline void ar5312_plat_time_init(void) {}
+static inline void ar5312_plat_mem_setup(void) {}
+static inline void ar5312_arch_init(void) {}
+
+#endif
+
+#endif /* __AR5312_H */
diff --git a/arch/mips/ath25/ar5312_regs.h b/arch/mips/ath25/ar5312_regs.h
new file mode 100644
index 000000000000..4b947f967439
--- /dev/null
+++ b/arch/mips/ath25/ar5312_regs.h
@@ -0,0 +1,224 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ */
+
+#ifndef __ASM_MACH_ATH25_AR5312_REGS_H
+#define __ASM_MACH_ATH25_AR5312_REGS_H
+
+/*
+ * IRQs
+ */
+#define AR5312_IRQ_WLAN0 (MIPS_CPU_IRQ_BASE + 2) /* C0_CAUSE: 0x0400 */
+#define AR5312_IRQ_ENET0 (MIPS_CPU_IRQ_BASE + 3) /* C0_CAUSE: 0x0800 */
+#define AR5312_IRQ_ENET1 (MIPS_CPU_IRQ_BASE + 4) /* C0_CAUSE: 0x1000 */
+#define AR5312_IRQ_WLAN1 (MIPS_CPU_IRQ_BASE + 5) /* C0_CAUSE: 0x2000 */
+#define AR5312_IRQ_MISC (MIPS_CPU_IRQ_BASE + 6) /* C0_CAUSE: 0x4000 */
+
+/*
+ * Miscellaneous interrupts, which share IP6.
+ */
+#define AR5312_MISC_IRQ_TIMER 0
+#define AR5312_MISC_IRQ_AHB_PROC 1
+#define AR5312_MISC_IRQ_AHB_DMA 2
+#define AR5312_MISC_IRQ_GPIO 3
+#define AR5312_MISC_IRQ_UART0 4
+#define AR5312_MISC_IRQ_UART0_DMA 5
+#define AR5312_MISC_IRQ_WATCHDOG 6
+#define AR5312_MISC_IRQ_LOCAL 7
+#define AR5312_MISC_IRQ_SPI 8
+#define AR5312_MISC_IRQ_COUNT 9
+
+/*
+ * Address Map
+ *
+ * The AR5312 supports 2 enet MACS, even though many reference boards only
+ * actually use 1 of them (i.e. Only MAC 0 is actually connected to an enet
+ * PHY or PHY switch. The AR2312 supports 1 enet MAC.
+ */
+#define AR5312_WLAN0_BASE 0x18000000
+#define AR5312_ENET0_BASE 0x18100000
+#define AR5312_ENET1_BASE 0x18200000
+#define AR5312_SDRAMCTL_BASE 0x18300000
+#define AR5312_SDRAMCTL_SIZE 0x00000010
+#define AR5312_FLASHCTL_BASE 0x18400000
+#define AR5312_FLASHCTL_SIZE 0x00000010
+#define AR5312_WLAN1_BASE 0x18500000
+#define AR5312_UART0_BASE 0x1c000000 /* UART MMR */
+#define AR5312_GPIO_BASE 0x1c002000
+#define AR5312_GPIO_SIZE 0x00000010
+#define AR5312_RST_BASE 0x1c003000
+#define AR5312_RST_SIZE 0x00000100
+#define AR5312_FLASH_BASE 0x1e000000
+#define AR5312_FLASH_SIZE 0x00800000
+
+/*
+ * Need these defines to determine true number of ethernet MACs
+ */
+#define AR5312_AR5312_REV2 0x0052 /* AR5312 WMAC (AP31) */
+#define AR5312_AR5312_REV7 0x0057 /* AR5312 WMAC (AP30-040) */
+#define AR5312_AR2313_REV8 0x0058 /* AR2313 WMAC (AP43-030) */
+
+/* Reset/Timer Block Address Map */
+#define AR5312_TIMER 0x0000 /* countdown timer */
+#define AR5312_RELOAD 0x0004 /* timer reload value */
+#define AR5312_WDT_CTRL 0x0008 /* watchdog cntrl */
+#define AR5312_WDT_TIMER 0x000c /* watchdog timer */
+#define AR5312_ISR 0x0010 /* Intr Status Reg */
+#define AR5312_IMR 0x0014 /* Intr Mask Reg */
+#define AR5312_RESET 0x0020
+#define AR5312_CLOCKCTL1 0x0064
+#define AR5312_SCRATCH 0x006c
+#define AR5312_PROCADDR 0x0070
+#define AR5312_PROC1 0x0074
+#define AR5312_DMAADDR 0x0078
+#define AR5312_DMA1 0x007c
+#define AR5312_ENABLE 0x0080 /* interface enb */
+#define AR5312_REV 0x0090 /* revision */
+
+/* AR5312_WDT_CTRL register bit field definitions */
+#define AR5312_WDT_CTRL_IGNORE 0x00000000 /* ignore expiration */
+#define AR5312_WDT_CTRL_NMI 0x00000001
+#define AR5312_WDT_CTRL_RESET 0x00000002
+
+/* AR5312_ISR register bit field definitions */
+#define AR5312_ISR_TIMER 0x00000001
+#define AR5312_ISR_AHBPROC 0x00000002
+#define AR5312_ISR_AHBDMA 0x00000004
+#define AR5312_ISR_GPIO 0x00000008
+#define AR5312_ISR_UART0 0x00000010
+#define AR5312_ISR_UART0DMA 0x00000020
+#define AR5312_ISR_WD 0x00000040
+#define AR5312_ISR_LOCAL 0x00000080
+
+/* AR5312_RESET register bit field definitions */
+#define AR5312_RESET_SYSTEM 0x00000001 /* cold reset full system */
+#define AR5312_RESET_PROC 0x00000002 /* cold reset MIPS core */
+#define AR5312_RESET_WLAN0 0x00000004 /* cold reset WLAN MAC/BB */
+#define AR5312_RESET_EPHY0 0x00000008 /* cold reset ENET0 phy */
+#define AR5312_RESET_EPHY1 0x00000010 /* cold reset ENET1 phy */
+#define AR5312_RESET_ENET0 0x00000020 /* cold reset ENET0 MAC */
+#define AR5312_RESET_ENET1 0x00000040 /* cold reset ENET1 MAC */
+#define AR5312_RESET_UART0 0x00000100 /* cold reset UART0 */
+#define AR5312_RESET_WLAN1 0x00000200 /* cold reset WLAN MAC/BB */
+#define AR5312_RESET_APB 0x00000400 /* cold reset APB ar5312 */
+#define AR5312_RESET_WARM_PROC 0x00001000 /* warm reset MIPS core */
+#define AR5312_RESET_WARM_WLAN0_MAC 0x00002000 /* warm reset WLAN0 MAC */
+#define AR5312_RESET_WARM_WLAN0_BB 0x00004000 /* warm reset WLAN0 BB */
+#define AR5312_RESET_NMI 0x00010000 /* send an NMI to the CPU */
+#define AR5312_RESET_WARM_WLAN1_MAC 0x00020000 /* warm reset WLAN1 MAC */
+#define AR5312_RESET_WARM_WLAN1_BB 0x00040000 /* warm reset WLAN1 BB */
+#define AR5312_RESET_LOCAL_BUS 0x00080000 /* reset local bus */
+#define AR5312_RESET_WDOG 0x00100000 /* last reset was a wdt */
+
+#define AR5312_RESET_WMAC0_BITS (AR5312_RESET_WLAN0 |\
+ AR5312_RESET_WARM_WLAN0_MAC |\
+ AR5312_RESET_WARM_WLAN0_BB)
+
+#define AR5312_RESET_WMAC1_BITS (AR5312_RESET_WLAN1 |\
+ AR5312_RESET_WARM_WLAN1_MAC |\
+ AR5312_RESET_WARM_WLAN1_BB)
+
+/* AR5312_CLOCKCTL1 register bit field definitions */
+#define AR5312_CLOCKCTL1_PREDIVIDE_MASK 0x00000030
+#define AR5312_CLOCKCTL1_PREDIVIDE_SHIFT 4
+#define AR5312_CLOCKCTL1_MULTIPLIER_MASK 0x00001f00
+#define AR5312_CLOCKCTL1_MULTIPLIER_SHIFT 8
+#define AR5312_CLOCKCTL1_DOUBLER_MASK 0x00010000
+
+/* Valid for AR5312 and AR2312 */
+#define AR5312_CLOCKCTL1_PREDIVIDE_MASK 0x00000030
+#define AR5312_CLOCKCTL1_PREDIVIDE_SHIFT 4
+#define AR5312_CLOCKCTL1_MULTIPLIER_MASK 0x00001f00
+#define AR5312_CLOCKCTL1_MULTIPLIER_SHIFT 8
+#define AR5312_CLOCKCTL1_DOUBLER_MASK 0x00010000
+
+/* Valid for AR2313 */
+#define AR2313_CLOCKCTL1_PREDIVIDE_MASK 0x00003000
+#define AR2313_CLOCKCTL1_PREDIVIDE_SHIFT 12
+#define AR2313_CLOCKCTL1_MULTIPLIER_MASK 0x001f0000
+#define AR2313_CLOCKCTL1_MULTIPLIER_SHIFT 16
+#define AR2313_CLOCKCTL1_DOUBLER_MASK 0x00000000
+
+/* AR5312_ENABLE register bit field definitions */
+#define AR5312_ENABLE_WLAN0 0x00000001
+#define AR5312_ENABLE_ENET0 0x00000002
+#define AR5312_ENABLE_ENET1 0x00000004
+#define AR5312_ENABLE_UART_AND_WLAN1_PIO 0x00000008/* UART & WLAN1 PIO */
+#define AR5312_ENABLE_WLAN1_DMA 0x00000010/* WLAN1 DMAs */
+#define AR5312_ENABLE_WLAN1 (AR5312_ENABLE_UART_AND_WLAN1_PIO |\
+ AR5312_ENABLE_WLAN1_DMA)
+
+/* AR5312_REV register bit field definitions */
+#define AR5312_REV_WMAC_MAJ 0x0000f000
+#define AR5312_REV_WMAC_MAJ_S 12
+#define AR5312_REV_WMAC_MIN 0x00000f00
+#define AR5312_REV_WMAC_MIN_S 8
+#define AR5312_REV_MAJ 0x000000f0
+#define AR5312_REV_MAJ_S 4
+#define AR5312_REV_MIN 0x0000000f
+#define AR5312_REV_MIN_S 0
+#define AR5312_REV_CHIP (AR5312_REV_MAJ|AR5312_REV_MIN)
+
+/* Major revision numbers, bits 7..4 of Revision ID register */
+#define AR5312_REV_MAJ_AR5312 0x4
+#define AR5312_REV_MAJ_AR2313 0x5
+
+/* Minor revision numbers, bits 3..0 of Revision ID register */
+#define AR5312_REV_MIN_DUAL 0x0 /* Dual WLAN version */
+#define AR5312_REV_MIN_SINGLE 0x1 /* Single WLAN version */
+
+/*
+ * ARM Flash Controller -- 3 flash banks with either x8 or x16 devices
+ */
+#define AR5312_FLASHCTL0 0x0000
+#define AR5312_FLASHCTL1 0x0004
+#define AR5312_FLASHCTL2 0x0008
+
+/* AR5312_FLASHCTL register bit field definitions */
+#define AR5312_FLASHCTL_IDCY 0x0000000f /* Idle cycle turnaround time */
+#define AR5312_FLASHCTL_IDCY_S 0
+#define AR5312_FLASHCTL_WST1 0x000003e0 /* Wait state 1 */
+#define AR5312_FLASHCTL_WST1_S 5
+#define AR5312_FLASHCTL_RBLE 0x00000400 /* Read byte lane enable */
+#define AR5312_FLASHCTL_WST2 0x0000f800 /* Wait state 2 */
+#define AR5312_FLASHCTL_WST2_S 11
+#define AR5312_FLASHCTL_AC 0x00070000 /* Flash addr check (added) */
+#define AR5312_FLASHCTL_AC_S 16
+#define AR5312_FLASHCTL_AC_128K 0x00000000
+#define AR5312_FLASHCTL_AC_256K 0x00010000
+#define AR5312_FLASHCTL_AC_512K 0x00020000
+#define AR5312_FLASHCTL_AC_1M 0x00030000
+#define AR5312_FLASHCTL_AC_2M 0x00040000
+#define AR5312_FLASHCTL_AC_4M 0x00050000
+#define AR5312_FLASHCTL_AC_8M 0x00060000
+#define AR5312_FLASHCTL_AC_RES 0x00070000 /* 16MB is not supported */
+#define AR5312_FLASHCTL_E 0x00080000 /* Flash bank enable (added) */
+#define AR5312_FLASHCTL_BUSERR 0x01000000 /* Bus transfer error flag */
+#define AR5312_FLASHCTL_WPERR 0x02000000 /* Write protect error flag */
+#define AR5312_FLASHCTL_WP 0x04000000 /* Write protect */
+#define AR5312_FLASHCTL_BM 0x08000000 /* Burst mode */
+#define AR5312_FLASHCTL_MW 0x30000000 /* Mem width */
+#define AR5312_FLASHCTL_MW8 0x00000000 /* Mem width x8 */
+#define AR5312_FLASHCTL_MW16 0x10000000 /* Mem width x16 */
+#define AR5312_FLASHCTL_MW32 0x20000000 /* Mem width x32 (not supp) */
+#define AR5312_FLASHCTL_ATNR 0x00000000 /* Access == no retry */
+#define AR5312_FLASHCTL_ATR 0x80000000 /* Access == retry every */
+#define AR5312_FLASHCTL_ATR4 0xc0000000 /* Access == retry every 4 */
+
+/*
+ * ARM SDRAM Controller -- just enough to determine memory size
+ */
+#define AR5312_MEM_CFG1 0x0004
+
+#define AR5312_MEM_CFG1_AC0_M 0x00000700 /* bank 0: SDRAM addr check */
+#define AR5312_MEM_CFG1_AC0_S 8
+#define AR5312_MEM_CFG1_AC1_M 0x00007000 /* bank 1: SDRAM addr check */
+#define AR5312_MEM_CFG1_AC1_S 12
+
+#endif /* __ASM_MACH_ATH25_AR5312_REGS_H */
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
new file mode 100644
index 000000000000..b8bb78282d6a
--- /dev/null
+++ b/arch/mips/ath25/board.c
@@ -0,0 +1,234 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/irq_cpu.h>
+#include <asm/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <ath25_platform.h>
+#include "devices.h"
+#include "ar5312.h"
+#include "ar2315.h"
+
+void (*ath25_irq_dispatch)(void);
+
+static inline bool check_radio_magic(const void __iomem *addr)
+{
+ addr += 0x7a; /* offset for flash magic */
+ return (__raw_readb(addr) == 0x5a) && (__raw_readb(addr + 1) == 0xa5);
+}
+
+static inline bool check_notempty(const void __iomem *addr)
+{
+ return __raw_readl(addr) != 0xffffffff;
+}
+
+static inline bool check_board_data(const void __iomem *addr, bool broken)
+{
+ /* config magic found */
+ if (__raw_readl(addr) == ATH25_BD_MAGIC)
+ return true;
+
+ if (!broken)
+ return false;
+
+ /* broken board data detected, use radio data to find the
+ * offset, user will fix this */
+
+ if (check_radio_magic(addr + 0x1000))
+ return true;
+ if (check_radio_magic(addr + 0xf8))
+ return true;
+
+ return false;
+}
+
+static const void __iomem * __init find_board_config(const void __iomem *limit,
+ const bool broken)
+{
+ const void __iomem *addr;
+ const void __iomem *begin = limit - 0x1000;
+ const void __iomem *end = limit - 0x30000;
+
+ for (addr = begin; addr >= end; addr -= 0x1000)
+ if (check_board_data(addr, broken))
+ return addr;
+
+ return NULL;
+}
+
+static const void __iomem * __init find_radio_config(const void __iomem *limit,
+ const void __iomem *bcfg)
+{
+ const void __iomem *rcfg, *begin, *end;
+
+ /*
+ * Now find the start of Radio Configuration data, using heuristics:
+ * Search forward from Board Configuration data by 0x1000 bytes
+ * at a time until we find non-0xffffffff.
+ */
+ begin = bcfg + 0x1000;
+ end = limit;
+ for (rcfg = begin; rcfg < end; rcfg += 0x1000)
+ if (check_notempty(rcfg) && check_radio_magic(rcfg))
+ return rcfg;
+
+ /* AR2316 relocates radio config to new location */
+ begin = bcfg + 0xf8;
+ end = limit - 0x1000 + 0xf8;
+ for (rcfg = begin; rcfg < end; rcfg += 0x1000)
+ if (check_notempty(rcfg) && check_radio_magic(rcfg))
+ return rcfg;
+
+ return NULL;
+}
+
+/*
+ * NB: Search region size could be larger than the actual flash size,
+ * but this shouldn't be a problem here, because the flash
+ * will simply be mapped multiple times.
+ */
+int __init ath25_find_config(phys_addr_t base, unsigned long size)
+{
+ const void __iomem *flash_base, *flash_limit;
+ struct ath25_boarddata *config;
+ unsigned int rcfg_size;
+ int broken_boarddata = 0;
+ const void __iomem *bcfg, *rcfg;
+ u8 *board_data;
+ u8 *radio_data;
+ u8 *mac_addr;
+ u32 offset;
+
+ flash_base = ioremap_nocache(base, size);
+ flash_limit = flash_base + size;
+
+ ath25_board.config = NULL;
+ ath25_board.radio = NULL;
+
+ /* Copy the board and radio data to RAM, because accessing the mapped
+ * memory of the flash directly after booting is not safe */
+
+ /* Try to find valid board and radio data */
+ bcfg = find_board_config(flash_limit, false);
+
+ /* If that fails, try to at least find valid radio data */
+ if (!bcfg) {
+ bcfg = find_board_config(flash_limit, true);
+ broken_boarddata = 1;
+ }
+
+ if (!bcfg) {
+ pr_warn("WARNING: No board configuration data found!\n");
+ goto error;
+ }
+
+ board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
+ ath25_board.config = (struct ath25_boarddata *)board_data;
+ memcpy_fromio(board_data, bcfg, 0x100);
+ if (broken_boarddata) {
+ pr_warn("WARNING: broken board data detected\n");
+ config = ath25_board.config;
+ if (is_zero_ether_addr(config->enet0_mac)) {
+ pr_info("Fixing up empty mac addresses\n");
+ config->reset_config_gpio = 0xffff;
+ config->sys_led_gpio = 0xffff;
+ random_ether_addr(config->wlan0_mac);
+ config->wlan0_mac[0] &= ~0x06;
+ random_ether_addr(config->enet0_mac);
+ random_ether_addr(config->enet1_mac);
+ }
+ }
+
+ /* Radio config starts 0x100 bytes after board config, regardless
+ * of what the physical layout on the flash chip looks like */
+
+ rcfg = find_radio_config(flash_limit, bcfg);
+ if (!rcfg) {
+ pr_warn("WARNING: Could not find Radio Configuration data\n");
+ goto error;
+ }
+
+ radio_data = board_data + 0x100 + ((rcfg - bcfg) & 0xfff);
+ ath25_board.radio = radio_data;
+ offset = radio_data - board_data;
+ pr_info("Radio config found at offset 0x%x (0x%x)\n", rcfg - bcfg,
+ offset);
+ rcfg_size = BOARD_CONFIG_BUFSZ - offset;
+ memcpy_fromio(radio_data, rcfg, rcfg_size);
+
+ mac_addr = &radio_data[0x1d * 2];
+ if (is_broadcast_ether_addr(mac_addr)) {
+ pr_info("Radio MAC is blank; using board-data\n");
+ ether_addr_copy(mac_addr, ath25_board.config->wlan0_mac);
+ }
+
+ iounmap(flash_base);
+
+ return 0;
+
+error:
+ iounmap(flash_base);
+ return -ENODEV;
+}
+
+static void ath25_halt(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+void __init plat_mem_setup(void)
+{
+ _machine_halt = ath25_halt;
+ pm_power_off = ath25_halt;
+
+ if (is_ar5312())
+ ar5312_plat_mem_setup();
+ else
+ ar2315_plat_mem_setup();
+
+ /* Disable data watchpoints */
+ write_c0_watchlo0(0);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ ath25_irq_dispatch();
+}
+
+void __init plat_time_init(void)
+{
+ if (is_ar5312())
+ ar5312_plat_time_init();
+ else
+ ar2315_plat_time_init();
+}
+
+unsigned int __cpuinit get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+void __init arch_init_irq(void)
+{
+ clear_c0_status(ST0_IM);
+ mips_cpu_irq_init();
+
+ /* Initialize interrupt controllers */
+ if (is_ar5312())
+ ar5312_arch_init_irq();
+ else
+ ar2315_arch_init_irq();
+}
diff --git a/arch/mips/ath25/devices.c b/arch/mips/ath25/devices.c
new file mode 100644
index 000000000000..7a64567d1ac3
--- /dev/null
+++ b/arch/mips/ath25/devices.c
@@ -0,0 +1,125 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+#include <linux/platform_device.h>
+#include <asm/bootinfo.h>
+
+#include <ath25_platform.h>
+#include "devices.h"
+#include "ar5312.h"
+#include "ar2315.h"
+
+struct ar231x_board_config ath25_board;
+enum ath25_soc_type ath25_soc = ATH25_SOC_UNKNOWN;
+
+static struct resource ath25_wmac0_res[] = {
+ {
+ .name = "wmac0_membase",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "wmac0_irq",
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct resource ath25_wmac1_res[] = {
+ {
+ .name = "wmac1_membase",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "wmac1_irq",
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device ath25_wmac[] = {
+ {
+ .id = 0,
+ .name = "ar231x-wmac",
+ .resource = ath25_wmac0_res,
+ .num_resources = ARRAY_SIZE(ath25_wmac0_res),
+ .dev.platform_data = &ath25_board,
+ },
+ {
+ .id = 1,
+ .name = "ar231x-wmac",
+ .resource = ath25_wmac1_res,
+ .num_resources = ARRAY_SIZE(ath25_wmac1_res),
+ .dev.platform_data = &ath25_board,
+ },
+};
+
+static const char * const soc_type_strings[] = {
+ [ATH25_SOC_AR5312] = "Atheros AR5312",
+ [ATH25_SOC_AR2312] = "Atheros AR2312",
+ [ATH25_SOC_AR2313] = "Atheros AR2313",
+ [ATH25_SOC_AR2315] = "Atheros AR2315",
+ [ATH25_SOC_AR2316] = "Atheros AR2316",
+ [ATH25_SOC_AR2317] = "Atheros AR2317",
+ [ATH25_SOC_AR2318] = "Atheros AR2318",
+ [ATH25_SOC_UNKNOWN] = "Atheros (unknown)",
+};
+
+const char *get_system_type(void)
+{
+ if ((ath25_soc >= ARRAY_SIZE(soc_type_strings)) ||
+ !soc_type_strings[ath25_soc])
+ return soc_type_strings[ATH25_SOC_UNKNOWN];
+ return soc_type_strings[ath25_soc];
+}
+
+void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
+{
+ struct uart_port s;
+
+ memset(&s, 0, sizeof(s));
+
+ s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP;
+ s.iotype = UPIO_MEM32;
+ s.irq = irq;
+ s.regshift = 2;
+ s.mapbase = mapbase;
+ s.uartclk = uartclk;
+
+ early_serial_setup(&s);
+}
+
+int __init ath25_add_wmac(int nr, u32 base, int irq)
+{
+ struct resource *res;
+
+ ath25_wmac[nr].dev.platform_data = &ath25_board;
+ res = &ath25_wmac[nr].resource[0];
+ res->start = base;
+ res->end = base + 0x10000 - 1;
+ res++;
+ res->start = irq;
+ res->end = irq;
+ return platform_device_register(&ath25_wmac[nr]);
+}
+
+static int __init ath25_register_devices(void)
+{
+ if (is_ar5312())
+ ar5312_init_devices();
+ else
+ ar2315_init_devices();
+
+ return 0;
+}
+
+device_initcall(ath25_register_devices);
+
+static int __init ath25_arch_init(void)
+{
+ if (is_ar5312())
+ ar5312_arch_init();
+ else
+ ar2315_arch_init();
+
+ return 0;
+}
+
+arch_initcall(ath25_arch_init);
diff --git a/arch/mips/ath25/devices.h b/arch/mips/ath25/devices.h
new file mode 100644
index 000000000000..04d414115356
--- /dev/null
+++ b/arch/mips/ath25/devices.h
@@ -0,0 +1,43 @@
+#ifndef __ATH25_DEVICES_H
+#define __ATH25_DEVICES_H
+
+#include <linux/cpu.h>
+
+#define ATH25_REG_MS(_val, _field) (((_val) & _field##_M) >> _field##_S)
+
+#define ATH25_IRQ_CPU_CLOCK (MIPS_CPU_IRQ_BASE + 7) /* C0_CAUSE: 0x8000 */
+
+enum ath25_soc_type {
+ /* handled by ar5312.c */
+ ATH25_SOC_AR2312,
+ ATH25_SOC_AR2313,
+ ATH25_SOC_AR5312,
+
+ /* handled by ar2315.c */
+ ATH25_SOC_AR2315,
+ ATH25_SOC_AR2316,
+ ATH25_SOC_AR2317,
+ ATH25_SOC_AR2318,
+
+ ATH25_SOC_UNKNOWN
+};
+
+extern enum ath25_soc_type ath25_soc;
+extern struct ar231x_board_config ath25_board;
+extern void (*ath25_irq_dispatch)(void);
+
+int ath25_find_config(phys_addr_t offset, unsigned long size);
+void ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk);
+int ath25_add_wmac(int nr, u32 base, int irq);
+
+static inline bool is_ar2315(void)
+{
+ return (current_cpu_data.cputype == CPU_4KEC);
+}
+
+static inline bool is_ar5312(void)
+{
+ return !is_ar2315();
+}
+
+#endif
diff --git a/arch/mips/ath25/early_printk.c b/arch/mips/ath25/early_printk.c
new file mode 100644
index 000000000000..36035b628161
--- /dev/null
+++ b/arch/mips/ath25/early_printk.c
@@ -0,0 +1,44 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/serial_reg.h>
+
+#include "devices.h"
+#include "ar2315_regs.h"
+#include "ar5312_regs.h"
+
+static inline void prom_uart_wr(void __iomem *base, unsigned reg,
+ unsigned char ch)
+{
+ __raw_writel(ch, base + 4 * reg);
+}
+
+static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg)
+{
+ return __raw_readl(base + 4 * reg);
+}
+
+void prom_putchar(unsigned char ch)
+{
+ static void __iomem *base;
+
+ if (unlikely(base == NULL)) {
+ if (is_ar2315())
+ base = (void __iomem *)(KSEG1ADDR(AR2315_UART0_BASE));
+ else
+ base = (void __iomem *)(KSEG1ADDR(AR5312_UART0_BASE));
+ }
+
+ while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
+ ;
+ prom_uart_wr(base, UART_TX, ch);
+ while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
+ ;
+}
diff --git a/arch/mips/ath25/prom.c b/arch/mips/ath25/prom.c
new file mode 100644
index 000000000000..edf82be8870d
--- /dev/null
+++ b/arch/mips/ath25/prom.c
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright MontaVista Software Inc
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ */
+
+/*
+ * Prom setup file for AR5312/AR231x SoCs
+ */
+
+#include <linux/init.h>
+#include <asm/bootinfo.h>
+
+void __init prom_init(void)
+{
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 9c0e1761773f..6adae366f11a 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -359,7 +359,6 @@ void __init arch_init_irq(void)
BUG();
}
- cp0_perfcount_irq = ATH79_MISC_IRQ(5);
mips_cpu_irq_init();
ath79_misc_irq_init();
diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c
index e9cbd7c2918f..e1fe63051136 100644
--- a/arch/mips/ath79/prom.c
+++ b/arch/mips/ath79/prom.c
@@ -13,42 +13,24 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
+#include <linux/initrd.h>
#include <asm/bootinfo.h>
#include <asm/addrspace.h>
+#include <asm/fw/fw.h>
#include "common.h"
-static inline int is_valid_ram_addr(void *addr)
-{
- if (((u32) addr > KSEG0) &&
- ((u32) addr < (KSEG0 + ATH79_MEM_SIZE_MAX)))
- return 1;
-
- if (((u32) addr > KSEG1) &&
- ((u32) addr < (KSEG1 + ATH79_MEM_SIZE_MAX)))
- return 1;
-
- return 0;
-}
-
-static __init void ath79_prom_init_cmdline(int argc, char **argv)
-{
- int i;
-
- if (!is_valid_ram_addr(argv))
- return;
-
- for (i = 0; i < argc; i++)
- if (is_valid_ram_addr(argv[i])) {
- strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
- strlcat(arcs_cmdline, argv[i], sizeof(arcs_cmdline));
- }
-}
-
void __init prom_init(void)
{
- ath79_prom_init_cmdline(fw_arg0, (char **)fw_arg1);
+ fw_init_cmdline();
+
+ /* Read the initrd address from the firmware environment */
+ initrd_start = fw_getenvl("initrd_start");
+ if (initrd_start) {
+ initrd_start = KSEG0ADDR(initrd_start);
+ initrd_end = initrd_start + fw_getenvl("initrd_size");
+ }
}
void __init prom_free_prom_memory(void)
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 64807a4809d0..a73c93c3d44a 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -182,6 +182,11 @@ const char *get_system_type(void)
return ath79_sys_type;
}
+int get_c0_perfcount_int(void)
+{
+ return ATH79_MISC_IRQ(5);
+}
+
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
diff --git a/arch/mips/bcm3384/Makefile b/arch/mips/bcm3384/Makefile
new file mode 100644
index 000000000000..a393955cba08
--- /dev/null
+++ b/arch/mips/bcm3384/Makefile
@@ -0,0 +1 @@
+obj-y += setup.o irq.o dma.o
diff --git a/arch/mips/bcm3384/Platform b/arch/mips/bcm3384/Platform
new file mode 100644
index 000000000000..8e1ca0819e1b
--- /dev/null
+++ b/arch/mips/bcm3384/Platform
@@ -0,0 +1,7 @@
+#
+# Broadcom BCM3384 boards
+#
+platform-$(CONFIG_BCM3384) += bcm3384/
+cflags-$(CONFIG_BCM3384) += \
+ -I$(srctree)/arch/mips/include/asm/mach-bcm3384/
+load-$(CONFIG_BCM3384) := 0xffffffff80010000
diff --git a/arch/mips/bcm3384/dma.c b/arch/mips/bcm3384/dma.c
new file mode 100644
index 000000000000..ea42012fd4f5
--- /dev/null
+++ b/arch/mips/bcm3384/dma.c
@@ -0,0 +1,81 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <dma-coherence.h>
+
+/*
+ * BCM3384 has configurable address translation windows which allow the
+ * peripherals' DMA addresses to be different from the Zephyr-visible
+ * physical addresses. e.g. usb_dma_addr = zephyr_pa ^ 0x08000000
+ *
+ * If our DT "memory" node has a "dma-xor-mask" property we will enable this
+ * translation using the provided offset.
+ */
+static u32 bcm3384_dma_xor_mask;
+static u32 bcm3384_dma_xor_limit = 0xffffffff;
+
+/*
+ * PCI collapses the memory hole at 0x10000000 - 0x1fffffff.
+ * On systems with a dma-xor-mask, this range is guaranteed to live above
+ * the dma-xor-limit.
+ */
+#define BCM3384_MEM_HOLE_PA 0x10000000
+#define BCM3384_MEM_HOLE_SIZE 0x10000000
+
+static dma_addr_t bcm3384_phys_to_dma(struct device *dev, phys_addr_t pa)
+{
+ if (dev && dev_is_pci(dev) &&
+ pa >= (BCM3384_MEM_HOLE_PA + BCM3384_MEM_HOLE_SIZE))
+ return pa - BCM3384_MEM_HOLE_SIZE;
+ if (pa <= bcm3384_dma_xor_limit)
+ return pa ^ bcm3384_dma_xor_mask;
+ return pa;
+}
+
+dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
+{
+ return bcm3384_phys_to_dma(dev, virt_to_phys(addr));
+}
+
+dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+{
+ return bcm3384_phys_to_dma(dev, page_to_phys(page));
+}
+
+unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ if (dev && dev_is_pci(dev) &&
+ dma_addr >= BCM3384_MEM_HOLE_PA)
+ return dma_addr + BCM3384_MEM_HOLE_SIZE;
+ if ((dma_addr ^ bcm3384_dma_xor_mask) <= bcm3384_dma_xor_limit)
+ return dma_addr ^ bcm3384_dma_xor_mask;
+ return dma_addr;
+}
+
+static int __init bcm3384_init_dma_xor(void)
+{
+ struct device_node *np = of_find_node_by_type(NULL, "memory");
+
+ if (!np)
+ return 0;
+
+ of_property_read_u32(np, "dma-xor-mask", &bcm3384_dma_xor_mask);
+ of_property_read_u32(np, "dma-xor-limit", &bcm3384_dma_xor_limit);
+
+ of_node_put(np);
+ return 0;
+}
+arch_initcall(bcm3384_init_dma_xor);
diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c
new file mode 100644
index 000000000000..0fb5134fb832
--- /dev/null
+++ b/arch/mips/bcm3384/irq.c
@@ -0,0 +1,193 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Partially based on arch/mips/ralink/irq.c
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/bmips.h>
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+
+/* INTC register offsets */
+#define INTC_REG_ENABLE 0x00
+#define INTC_REG_STATUS 0x04
+
+#define MAX_WORDS 2
+#define IRQS_PER_WORD 32
+
+struct bcm3384_intc {
+ int n_words;
+ void __iomem *reg[MAX_WORDS];
+ u32 enable[MAX_WORDS];
+ spinlock_t lock;
+};
+
+static void bcm3384_intc_irq_unmask(struct irq_data *d)
+{
+ struct bcm3384_intc *priv = d->domain->host_data;
+ unsigned long flags;
+ int idx = d->hwirq / IRQS_PER_WORD;
+ int bit = d->hwirq % IRQS_PER_WORD;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->enable[idx] |= BIT(bit);
+ __raw_writel(priv->enable[idx], priv->reg[idx] + INTC_REG_ENABLE);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void bcm3384_intc_irq_mask(struct irq_data *d)
+{
+ struct bcm3384_intc *priv = d->domain->host_data;
+ unsigned long flags;
+ int idx = d->hwirq / IRQS_PER_WORD;
+ int bit = d->hwirq % IRQS_PER_WORD;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->enable[idx] &= ~BIT(bit);
+ __raw_writel(priv->enable[idx], priv->reg[idx] + INTC_REG_ENABLE);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static struct irq_chip bcm3384_intc_irq_chip = {
+ .name = "INTC",
+ .irq_unmask = bcm3384_intc_irq_unmask,
+ .irq_mask = bcm3384_intc_irq_mask,
+ .irq_mask_ack = bcm3384_intc_irq_mask,
+};
+
+unsigned int get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+static void bcm3384_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_get_handler_data(irq);
+ struct bcm3384_intc *priv = domain->host_data;
+ unsigned long flags;
+ unsigned int idx;
+
+ for (idx = 0; idx < priv->n_words; idx++) {
+ unsigned long pending;
+ int hwirq;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pending = __raw_readl(priv->reg[idx] + INTC_REG_STATUS) &
+ priv->enable[idx];
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+ generic_handle_irq(irq_find_mapping(domain,
+ hwirq + idx * IRQS_PER_WORD));
+ }
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long pending =
+ (read_c0_status() & read_c0_cause() & ST0_IM) >> STATUSB_IP0;
+ int bit;
+
+ for_each_set_bit(bit, &pending, 8)
+ do_IRQ(MIPS_CPU_IRQ_BASE + bit);
+}
+
+static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &bcm3384_intc_irq_chip, handle_level_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = intc_map,
+};
+
+static int __init ioremap_one_pair(struct bcm3384_intc *priv,
+ struct device_node *node,
+ int idx)
+{
+ struct resource res;
+
+ if (of_address_to_resource(node, idx, &res))
+ return 0;
+
+ if (request_mem_region(res.start, resource_size(&res),
+ res.name) < 0)
+ pr_err("Failed to request INTC register region\n");
+
+ priv->reg[idx] = ioremap_nocache(res.start, resource_size(&res));
+ if (!priv->reg[idx])
+ panic("Failed to ioremap INTC register range");
+
+ /* start up with everything masked before we hook the parent IRQ */
+ __raw_writel(0, priv->reg[idx] + INTC_REG_ENABLE);
+ priv->enable[idx] = 0;
+
+ return IRQS_PER_WORD;
+}
+
+static int __init intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain;
+ unsigned int parent_irq, n_irqs = 0;
+ struct bcm3384_intc *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ panic("Failed to allocate bcm3384_intc struct");
+
+ spin_lock_init(&priv->lock);
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq)
+ panic("Failed to get INTC IRQ");
+
+ n_irqs += ioremap_one_pair(priv, node, 0);
+ n_irqs += ioremap_one_pair(priv, node, 1);
+
+ if (!n_irqs)
+ panic("Failed to map INTC registers");
+
+ priv->n_words = n_irqs / IRQS_PER_WORD;
+ domain = irq_domain_add_linear(node, n_irqs, &irq_domain_ops, priv);
+ if (!domain)
+ panic("Failed to add irqdomain");
+
+ irq_set_chained_handler(parent_irq, bcm3384_intc_irq_handler);
+ irq_set_handler_data(parent_irq, domain);
+
+ return 0;
+}
+
+static struct of_device_id of_irq_ids[] __initdata = {
+ { .compatible = "mti,cpu-interrupt-controller",
+ .data = mips_cpu_intc_init },
+ { .compatible = "brcm,bcm3384-intc",
+ .data = intc_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ bmips_tp1_irqs = 0;
+ of_irq_init(of_irq_ids);
+}
diff --git a/arch/mips/bcm3384/setup.c b/arch/mips/bcm3384/setup.c
new file mode 100644
index 000000000000..d84b8400b874
--- /dev/null
+++ b/arch/mips/bcm3384/setup.c
@@ -0,0 +1,97 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/clk-provider.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/smp.h>
+#include <asm/addrspace.h>
+#include <asm/bmips.h>
+#include <asm/bootinfo.h>
+#include <asm/prom.h>
+#include <asm/smp-ops.h>
+#include <asm/time.h>
+
+void __init prom_init(void)
+{
+ register_bmips_smp_ops();
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+const char *get_system_type(void)
+{
+ return "BCM3384";
+}
+
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ u32 freq;
+
+ np = of_find_node_by_name(NULL, "cpus");
+ if (!np)
+ panic("missing 'cpus' DT node");
+ if (of_property_read_u32(np, "mips-hpt-frequency", &freq) < 0)
+ panic("missing 'mips-hpt-frequency' property");
+ of_node_put(np);
+
+ mips_hpt_frequency = freq;
+}
+
+void __init plat_mem_setup(void)
+{
+ void *dtb = __dtb_start;
+
+ set_io_port_base(0);
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0;
+
+ /* intended to somewhat resemble ARM; see Documentation/arm/Booting */
+ if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
+ dtb = phys_to_virt(fw_arg2);
+
+ __dt_setup_arch(dtb);
+
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+}
+
+void __init device_tree_init(void)
+{
+ struct device_node *np;
+
+ unflatten_and_copy_device_tree();
+
+ /* Disable SMP boot unless both CPUs are listed in DT and !disabled */
+ np = of_find_node_by_name(NULL, "cpus");
+ if (np && of_get_available_child_count(np) <= 1)
+ bmips_smp_enabled = 0;
+ of_node_put(np);
+}
+
+int __init plat_of_setup(void)
+{
+ return __dt_register_buses("brcm,bcm3384", "simple-bus");
+}
+
+arch_initcall(plat_of_setup);
+
+static int __init plat_dev_init(void)
+{
+ of_clk_init(NULL);
+ return 0;
+}
+
+device_initcall(plat_dev_init);
diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h
index f1cc9d0495d8..ea909a56a3ee 100644
--- a/arch/mips/bcm47xx/bcm47xx_private.h
+++ b/arch/mips/bcm47xx/bcm47xx_private.h
@@ -6,12 +6,18 @@
/* prom.c */
void __init bcm47xx_prom_highmem_init(void);
+/* sprom.c */
+void bcm47xx_sprom_register_fallbacks(void);
+
/* buttons.c */
int __init bcm47xx_buttons_register(void);
/* leds.c */
void __init bcm47xx_leds_register(void);
+/* setup.c */
+void __init bcm47xx_bus_setup(void);
+
/* workarounds.c */
void __init bcm47xx_workarounds(void);
diff --git a/arch/mips/bcm47xx/irq.c b/arch/mips/bcm47xx/irq.c
index e0585b76ec19..21b4497f09be 100644
--- a/arch/mips/bcm47xx/irq.c
+++ b/arch/mips/bcm47xx/irq.c
@@ -22,6 +22,8 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "bcm47xx_private.h"
+
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -65,6 +67,12 @@ DEFINE_HWx_IRQDISPATCH(7)
void __init arch_init_irq(void)
{
+ /*
+ * This is the first arch callback after mm_init (we can use kmalloc),
+ * so let's finish bus initialization now.
+ */
+ bcm47xx_bus_setup();
+
#ifdef CONFIG_BCM47XX_BCMA
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
bcma_write32(bcm47xx_bus.bcma.bus.drv_mips.core,
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 2bed73a684ae..c5c381c43f17 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -13,24 +13,35 @@
#include <linux/types.h>
#include <linux/module.h>
-#include <linux/ssb/ssb.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <asm/addrspace.h>
+#include <linux/mtd/mtd.h>
#include <bcm47xx_nvram.h>
-#include <asm/mach-bcm47xx/bcm47xx.h>
+
+#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
+#define NVRAM_SPACE 0x8000
+
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+struct nvram_header {
+ u32 magic;
+ u32 len;
+ u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+ u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
+ u32 config_ncdl; /* ncdl values for memc */
+};
static char nvram_buf[NVRAM_SPACE];
static const u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000};
-static u32 find_nvram_size(u32 end)
+static u32 find_nvram_size(void __iomem *end)
{
- struct nvram_header *header;
+ struct nvram_header __iomem *header;
int i;
for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
- header = (struct nvram_header *)KSEG1ADDR(end - nvram_sizes[i]);
- if (header->magic == NVRAM_HEADER)
+ header = (struct nvram_header *)(end - nvram_sizes[i]);
+ if (header->magic == NVRAM_MAGIC)
return nvram_sizes[i];
}
@@ -38,36 +49,40 @@ static u32 find_nvram_size(u32 end)
}
/* Probe for NVRAM header */
-static int nvram_find_and_copy(u32 base, u32 lim)
+static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
{
- struct nvram_header *header;
+ struct nvram_header __iomem *header;
int i;
u32 off;
u32 *src, *dst;
u32 size;
+ if (nvram_buf[0]) {
+ pr_warn("nvram already initialized\n");
+ return -EEXIST;
+ }
+
/* TODO: when nvram is on nand flash check for bad blocks first. */
off = FLASH_MIN;
while (off <= lim) {
/* Windowed flash access */
- size = find_nvram_size(base + off);
+ size = find_nvram_size(iobase + off);
if (size) {
- header = (struct nvram_header *)KSEG1ADDR(base + off -
- size);
+ header = (struct nvram_header *)(iobase + off - size);
goto found;
}
off <<= 1;
}
/* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
- header = (struct nvram_header *) KSEG1ADDR(base + 4096);
- if (header->magic == NVRAM_HEADER) {
+ header = (struct nvram_header *)(iobase + 4096);
+ if (header->magic == NVRAM_MAGIC) {
size = NVRAM_SPACE;
goto found;
}
- header = (struct nvram_header *) KSEG1ADDR(base + 1024);
- if (header->magic == NVRAM_HEADER) {
+ header = (struct nvram_header *)(iobase + 1024);
+ if (header->magic == NVRAM_MAGIC) {
size = NVRAM_SPACE;
goto found;
}
@@ -94,71 +109,73 @@ found:
return 0;
}
-#ifdef CONFIG_BCM47XX_SSB
-static int nvram_init_ssb(void)
+/*
+ * On bcm47xx we need access to the NVRAM very early, so we can't use mtd
+ * subsystem to access flash. We can't even use platform device / driver to
+ * store memory offset.
+ * To handle this we provide following symbol. It's supposed to be called as
+ * soon as we get info about flash device, before any NVRAM entry is needed.
+ */
+int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
- struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore;
- u32 base;
- u32 lim;
-
- if (mcore->pflash.present) {
- base = mcore->pflash.window;
- lim = mcore->pflash.window_size;
- } else {
- pr_err("Couldn't find supported flash memory\n");
- return -ENXIO;
- }
+ void __iomem *iobase;
+ int err;
- return nvram_find_and_copy(base, lim);
-}
-#endif
+ iobase = ioremap_nocache(base, lim);
+ if (!iobase)
+ return -ENOMEM;
-#ifdef CONFIG_BCM47XX_BCMA
-static int nvram_init_bcma(void)
-{
- struct bcma_drv_cc *cc = &bcm47xx_bus.bcma.bus.drv_cc;
- u32 base;
- u32 lim;
-
-#ifdef CONFIG_BCMA_NFLASH
- if (cc->nflash.boot) {
- base = BCMA_SOC_FLASH1;
- lim = BCMA_SOC_FLASH1_SZ;
- } else
-#endif
- if (cc->pflash.present) {
- base = cc->pflash.window;
- lim = cc->pflash.window_size;
-#ifdef CONFIG_BCMA_SFLASH
- } else if (cc->sflash.present) {
- base = cc->sflash.window;
- lim = cc->sflash.size;
-#endif
- } else {
- pr_err("Couldn't find supported flash memory\n");
- return -ENXIO;
- }
+ err = nvram_find_and_copy(iobase, lim);
+
+ iounmap(iobase);
- return nvram_find_and_copy(base, lim);
+ return err;
}
-#endif
static int nvram_init(void)
{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- return nvram_init_ssb();
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- return nvram_init_bcma();
-#endif
+#ifdef CONFIG_MTD
+ struct mtd_info *mtd;
+ struct nvram_header header;
+ size_t bytes_read;
+ int err, i;
+
+ mtd = get_mtd_device_nm("nvram");
+ if (IS_ERR(mtd))
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
+ loff_t from = mtd->size - nvram_sizes[i];
+
+ if (from < 0)
+ continue;
+
+ err = mtd_read(mtd, from, sizeof(header), &bytes_read,
+ (uint8_t *)&header);
+ if (!err && header.magic == NVRAM_MAGIC) {
+ u8 *dst = (uint8_t *)nvram_buf;
+ size_t len = header.len;
+
+ if (header.len > NVRAM_SPACE) {
+ pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+ header.len, NVRAM_SPACE);
+ len = NVRAM_SPACE;
+ }
+
+ err = mtd_read(mtd, from, len, &bytes_read, dst);
+ if (err)
+ return err;
+ memset(dst + bytes_read, 0x0, NVRAM_SPACE - bytes_read);
+
+ return 0;
+ }
}
+#endif
+
return -ENXIO;
}
-int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len)
+int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len)
{
char *var, *value, *end, *eq;
int err;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index c00585d915bc..e43b5046cb30 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -102,23 +102,6 @@ static void bcm47xx_machine_halt(void)
}
#ifdef CONFIG_BCM47XX_SSB
-static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
-{
- char prefix[10];
-
- if (bus->bustype == SSB_BUSTYPE_PCI) {
- memset(out, 0, sizeof(struct ssb_sprom));
- snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
- bus->host_pci->bus->number + 1,
- PCI_SLOT(bus->host_pci->devfn));
- bcm47xx_fill_sprom(out, prefix, false);
- return 0;
- } else {
- printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n");
- return -EINVAL;
- }
-}
-
static int bcm47xx_get_invariants(struct ssb_bus *bus,
struct ssb_init_invariants *iv)
{
@@ -144,11 +127,6 @@ static void __init bcm47xx_register_ssb(void)
char buf[100];
struct ssb_mipscore *mcore;
- err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb);
- if (err)
- printk(KERN_WARNING "bcm47xx: someone else already registered"
- " a ssb SPROM callback handler (err %d)\n", err);
-
err = ssb_bus_ssbbus_register(&(bcm47xx_bus.ssb), SSB_ENUM_BASE,
bcm47xx_get_invariants);
if (err)
@@ -171,56 +149,21 @@ static void __init bcm47xx_register_ssb(void)
#endif
#ifdef CONFIG_BCM47XX_BCMA
-static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
-{
- char prefix[10];
- struct bcma_device *core;
-
- switch (bus->hosttype) {
- case BCMA_HOSTTYPE_PCI:
- memset(out, 0, sizeof(struct ssb_sprom));
- snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
- bus->host_pci->bus->number + 1,
- PCI_SLOT(bus->host_pci->devfn));
- bcm47xx_fill_sprom(out, prefix, false);
- return 0;
- case BCMA_HOSTTYPE_SOC:
- memset(out, 0, sizeof(struct ssb_sprom));
- core = bcma_find_core(bus, BCMA_CORE_80211);
- if (core) {
- snprintf(prefix, sizeof(prefix), "sb/%u/",
- core->core_index);
- bcm47xx_fill_sprom(out, prefix, true);
- } else {
- bcm47xx_fill_sprom(out, NULL, false);
- }
- return 0;
- default:
- pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
- return -EINVAL;
- }
-}
-
static void __init bcm47xx_register_bcma(void)
{
int err;
- err = bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma);
- if (err)
- pr_warn("bcm47xx: someone else already registered a bcma SPROM callback handler (err %d)\n", err);
-
err = bcma_host_soc_register(&bcm47xx_bus.bcma);
if (err)
panic("Failed to register BCMA bus (err %d)", err);
-
- err = bcma_host_soc_init(&bcm47xx_bus.bcma);
- if (err)
- panic("Failed to initialize BCMA bus (err %d)", err);
-
- bcm47xx_fill_bcma_boardinfo(&bcm47xx_bus.bcma.bus.boardinfo, NULL);
}
#endif
+/*
+ * Memory setup is done in the early part of MIPS's arch_mem_init. It's supposed
+ * to detect memory and record it with add_memory_region.
+ * Any extra initializaion performed here must not use kmalloc or bootmem.
+ */
void __init plat_mem_setup(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -229,6 +172,7 @@ void __init plat_mem_setup(void)
printk(KERN_INFO "bcm47xx: using bcma bus\n");
#ifdef CONFIG_BCM47XX_BCMA
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
+ bcm47xx_sprom_register_fallbacks();
bcm47xx_register_bcma();
bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id);
#ifdef CONFIG_HIGHMEM
@@ -239,6 +183,7 @@ void __init plat_mem_setup(void)
printk(KERN_INFO "bcm47xx: using ssb bus\n");
#ifdef CONFIG_BCM47XX_SSB
bcm47xx_bus_type = BCM47XX_BUS_TYPE_SSB;
+ bcm47xx_sprom_register_fallbacks();
bcm47xx_register_ssb();
bcm47xx_set_system_type(bcm47xx_bus.ssb.chip_id);
#endif
@@ -247,6 +192,28 @@ void __init plat_mem_setup(void)
_machine_restart = bcm47xx_machine_restart;
_machine_halt = bcm47xx_machine_halt;
pm_power_off = bcm47xx_machine_halt;
+}
+
+/*
+ * This finishes bus initialization doing things that were not possible without
+ * kmalloc. Make sure to call it late enough (after mm_init).
+ */
+void __init bcm47xx_bus_setup(void)
+{
+#ifdef CONFIG_BCM47XX_BCMA
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
+ int err;
+
+ err = bcma_host_soc_init(&bcm47xx_bus.bcma);
+ if (err)
+ panic("Failed to initialize BCMA bus (err %d)", err);
+
+ bcm47xx_fill_bcma_boardinfo(&bcm47xx_bus.bcma.bus.boardinfo,
+ NULL);
+ }
+#endif
+
+ /* With bus initialized we can access NVRAM and detect the board */
bcm47xx_board_detect();
mips_set_machine_name(bcm47xx_board_get_name());
}
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 41226b68de3d..2eff7fe99c6b 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -136,6 +136,20 @@ static void nvram_read_leddc(const char *prefix, const char *name,
*leddc_off_time = (val >> 16) & 0xff;
}
+static void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
+{
+ if (strchr(buf, ':'))
+ sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
+ &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
+ &macaddr[5]);
+ else if (strchr(buf, '-'))
+ sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
+ &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
+ &macaddr[5]);
+ else
+ pr_warn("Can not parse mac address: %s\n", buf);
+}
+
static void nvram_read_macaddr(const char *prefix, const char *name,
u8 val[6], bool fallback)
{
@@ -801,3 +815,71 @@ void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo,
nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true);
}
#endif
+
+#if defined(CONFIG_BCM47XX_SSB)
+static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
+{
+ char prefix[10];
+
+ if (bus->bustype == SSB_BUSTYPE_PCI) {
+ memset(out, 0, sizeof(struct ssb_sprom));
+ snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ bcm47xx_fill_sprom(out, prefix, false);
+ return 0;
+ } else {
+ pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+}
+#endif
+
+#if defined(CONFIG_BCM47XX_BCMA)
+static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
+{
+ char prefix[10];
+ struct bcma_device *core;
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ bcm47xx_fill_sprom(out, prefix, false);
+ return 0;
+ case BCMA_HOSTTYPE_SOC:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ core = bcma_find_core(bus, BCMA_CORE_80211);
+ if (core) {
+ snprintf(prefix, sizeof(prefix), "sb/%u/",
+ core->core_index);
+ bcm47xx_fill_sprom(out, prefix, true);
+ } else {
+ bcm47xx_fill_sprom(out, NULL, false);
+ }
+ return 0;
+ default:
+ pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+}
+#endif
+
+/*
+ * On bcm47xx we need to register SPROM fallback handler very early, so we can't
+ * use anything like platform device / driver for this.
+ */
+void bcm47xx_sprom_register_fallbacks(void)
+{
+#if defined(CONFIG_BCM47XX_SSB)
+ if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
+ pr_warn("Failed to registered ssb SPROM handler\n");
+#endif
+
+#if defined(CONFIG_BCM47XX_BCMA)
+ if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
+ pr_warn("Failed to registered bcma SPROM handler\n");
+#endif
+}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index 536f64443031..307ec8b8e41c 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -263,7 +263,7 @@ static unsigned int detect_memory_size(void)
if (BCMCPU_IS_6345()) {
val = bcm_sdram_readl(SDRAM_MBASE_REG);
- return (val * 8 * 1024 * 1024);
+ return val * 8 * 1024 * 1024;
}
if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile
index ca9c90e2cabf..4f49fa477f14 100644
--- a/arch/mips/boot/dts/Makefile
+++ b/arch/mips/boot/dts/Makefile
@@ -1,3 +1,4 @@
+dtb-$(CONFIG_BCM3384) += bcm93384wvg.dtb
dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb
dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb
dtb-$(CONFIG_DT_XLP_EVP) += xlp_evp.dtb
diff --git a/arch/mips/boot/dts/bcm3384.dtsi b/arch/mips/boot/dts/bcm3384.dtsi
new file mode 100644
index 000000000000..21b074a99c94
--- /dev/null
+++ b/arch/mips/boot/dts/bcm3384.dtsi
@@ -0,0 +1,109 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm3384", "brcm,bcm33843";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* On BMIPS5000 this is 1/8th of the CPU core clock */
+ mips-hpt-frequency = <100000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ periph_clk: periph_clk@0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <54000000>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: cpu_intc@0 {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ periph_intc: periph_intc@14e00038 {
+ compatible = "brcm,bcm3384-intc";
+ reg = <0x14e00038 0x8 0x14e00340 0x8>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <4>;
+ };
+
+ zmips_intc: zmips_intc@104b0060 {
+ compatible = "brcm,bcm3384-intc";
+ reg = <0x104b0060 0x8>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <29>;
+ };
+
+ iop_intc: iop_intc@14e00058 {
+ compatible = "brcm,bcm3384-intc";
+ reg = <0x14e00058 0x8>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <6>;
+ };
+
+ uart0: serial@14e00520 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x14e00520 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ ehci0: usb@15400300 {
+ compatible = "brcm,bcm3384-ehci", "generic-ehci";
+ reg = <0x15400300 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+
+ ohci0: usb@15400400 {
+ compatible = "brcm,bcm3384-ohci", "generic-ohci";
+ reg = <0x15400400 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <40>;
+ status = "disabled";
+ };
+};
diff --git a/arch/mips/boot/dts/bcm93384wvg.dts b/arch/mips/boot/dts/bcm93384wvg.dts
new file mode 100644
index 000000000000..831741179212
--- /dev/null
+++ b/arch/mips/boot/dts/bcm93384wvg.dts
@@ -0,0 +1,32 @@
+/dts-v1/;
+
+/include/ "bcm3384.dtsi"
+
+/ {
+ compatible = "brcm,bcm93384wvg", "brcm,bcm3384";
+ model = "Broadcom BCM93384WVG";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x04000000>;
+ dma-xor-mask = <0x08000000>;
+ dma-xor-limit = <0x0fffffff>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 02f244475207..3778655c4a37 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -262,8 +262,8 @@ char *octeon_swiotlb;
void __init plat_swiotlb_setup(void)
{
int i;
- phys_t max_addr;
- phys_t addr_size;
+ phys_addr_t max_addr;
+ phys_addr_t addr_size;
size_t swiotlbsize;
unsigned long swiotlb_nslabs;
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index f4c1b36fdf65..e15b049b3bd7 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -28,22 +28,23 @@
#include <asm/octeon/octeon.h>
/**
- * Given the chip processor ID from COP0, this function returns a
- * string representing the chip model number. The string is of the
- * form CNXXXXpX.X-FREQ-SUFFIX.
- * - XXXX = The chip model number
- * - X.X = Chip pass number
- * - FREQ = Current frequency in Mhz
- * - SUFFIX = NSP, EXP, SCP, SSP, or CP
- *
- * @chip_id: Chip ID
+ * Read a byte of fuse data
+ * @byte_addr: address to read
*
- * Returns Model string
+ * Returns fuse value: 0 or 1
*/
-const char *octeon_model_get_string(uint32_t chip_id)
+static uint8_t __init cvmx_fuse_read_byte(int byte_addr)
{
- static char buffer[32];
- return octeon_model_get_string_buffer(chip_id, buffer);
+ union cvmx_mio_fus_rcmd read_cmd;
+
+ read_cmd.u64 = 0;
+ read_cmd.s.addr = byte_addr;
+ read_cmd.s.pend = 1;
+ cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
+ while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
+ && read_cmd.s.pend)
+ ;
+ return read_cmd.s.dat;
}
/*
@@ -51,7 +52,8 @@ const char *octeon_model_get_string(uint32_t chip_id)
* as running early in u-boot static/global variables don't work when
* running from flash.
*/
-const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
+static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
+ char *buffer)
{
const char *family;
const char *core_model;
@@ -407,3 +409,22 @@ const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix);
return buffer;
}
+
+/**
+ * Given the chip processor ID from COP0, this function returns a
+ * string representing the chip model number. The string is of the
+ * form CNXXXXpX.X-FREQ-SUFFIX.
+ * - XXXX = The chip model number
+ * - X.X = Chip pass number
+ * - FREQ = Current frequency in Mhz
+ * - SUFFIX = NSP, EXP, SCP, SSP, or CP
+ *
+ * @chip_id: Chip ID
+ *
+ * Returns Model string
+ */
+const char *__init octeon_model_get_string(uint32_t chip_id)
+{
+ static char buffer[32];
+ return octeon_model_get_string_buffer(chip_id, buffer);
+}
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 6df0f4d8f197..b67ddf0f8bcd 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -7,22 +7,27 @@
* Copyright (C) 2008 Wind River Systems
*/
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/usb.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-rnm-defs.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
+#include <asm/octeon/cvmx-uctlx-defs.h>
/* Octeon Random Number Generator. */
static int __init octeon_rng_device_init(void)
@@ -68,6 +73,229 @@ device_initcall(octeon_rng_device_init);
#ifdef CONFIG_USB
+static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
+
+static int octeon2_usb_clock_start_cnt;
+
+static void octeon2_usb_clocks_start(void)
+{
+ u64 div;
+ union cvmx_uctlx_if_ena if_ena;
+ union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+ union cvmx_uctlx_uphy_ctl_status uphy_ctl_status;
+ union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
+ int i;
+ unsigned long io_clk_64_to_ns;
+
+
+ mutex_lock(&octeon2_usb_clocks_mutex);
+
+ octeon2_usb_clock_start_cnt++;
+ if (octeon2_usb_clock_start_cnt != 1)
+ goto exit;
+
+ io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
+
+ /*
+ * Step 1: Wait for voltages stable. That surely happened
+ * before starting the kernel.
+ *
+ * Step 2: Enable SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1
+ */
+ if_ena.u64 = 0;
+ if_ena.s.en = 1;
+ cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+
+ /* Step 3: Configure the reference clock, PHY, and HCLK */
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+
+ /*
+ * If the UCTL looks like it has already been started, skip
+ * the initialization, otherwise bus errors are obtained.
+ */
+ if (clk_rst_ctl.s.hrst)
+ goto end_clock;
+ /* 3a */
+ clk_rst_ctl.s.p_por = 1;
+ clk_rst_ctl.s.hrst = 0;
+ clk_rst_ctl.s.p_prst = 0;
+ clk_rst_ctl.s.h_clkdiv_rst = 0;
+ clk_rst_ctl.s.o_clkdiv_rst = 0;
+ clk_rst_ctl.s.h_clkdiv_en = 0;
+ clk_rst_ctl.s.o_clkdiv_en = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3b */
+ /* 12MHz crystal. */
+ clk_rst_ctl.s.p_refclk_sel = 0;
+ clk_rst_ctl.s.p_refclk_div = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3c */
+ div = octeon_get_io_clock_rate() / 130000000ull;
+
+ switch (div) {
+ case 0:
+ div = 1;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ break;
+ case 5:
+ div = 4;
+ break;
+ case 6:
+ case 7:
+ div = 6;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ div = 8;
+ break;
+ default:
+ div = 12;
+ break;
+ }
+ clk_rst_ctl.s.h_div = div;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* Read it back, */
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ clk_rst_ctl.s.h_clkdiv_en = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* 3d */
+ clk_rst_ctl.s.h_clkdiv_rst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3e: delay 64 io clocks */
+ ndelay(io_clk_64_to_ns);
+
+ /*
+ * Step 4: Program the power-on reset field in the UCTL
+ * clock-reset-control register.
+ */
+ clk_rst_ctl.s.p_por = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 5: Wait 1 ms for the PHY clock to start. */
+ mdelay(1);
+
+ /*
+ * Step 6: Program the reset input from automatic test
+ * equipment field in the UPHY CSR
+ */
+ uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0));
+ uphy_ctl_status.s.ate_reset = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
+
+ /* Step 7: Wait for at least 10ns. */
+ ndelay(10);
+
+ /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */
+ uphy_ctl_status.s.ate_reset = 0;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
+
+ /*
+ * Step 9: Wait for at least 20ns for UPHY to output PHY clock
+ * signals and OHCI_CLK48
+ */
+ ndelay(20);
+
+ /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
+ /* 10a */
+ clk_rst_ctl.s.o_clkdiv_rst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 10b */
+ clk_rst_ctl.s.o_clkdiv_en = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 10c */
+ ndelay(io_clk_64_to_ns);
+
+ /*
+ * Step 11: Program the PHY reset field:
+ * UCTL0_CLK_RST_CTL[P_PRST] = 1
+ */
+ clk_rst_ctl.s.p_prst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 12: Wait 1 uS. */
+ udelay(1);
+
+ /* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */
+ clk_rst_ctl.s.hrst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+end_clock:
+ /* Now we can set some other registers. */
+
+ for (i = 0; i <= 1; i++) {
+ port_ctl_status.u64 =
+ cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
+ /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
+ port_ctl_status.s.txvreftune = 15;
+ port_ctl_status.s.txrisetune = 1;
+ port_ctl_status.s.txpreemphasistune = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
+ port_ctl_status.u64);
+ }
+
+ /* Set uSOF cycle period to 60,000 bits. */
+ cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
+exit:
+ mutex_unlock(&octeon2_usb_clocks_mutex);
+}
+
+static void octeon2_usb_clocks_stop(void)
+{
+ mutex_lock(&octeon2_usb_clocks_mutex);
+ octeon2_usb_clock_start_cnt--;
+ mutex_unlock(&octeon2_usb_clocks_mutex);
+}
+
+static int octeon_ehci_power_on(struct platform_device *pdev)
+{
+ octeon2_usb_clocks_start();
+ return 0;
+}
+
+static void octeon_ehci_power_off(struct platform_device *pdev)
+{
+ octeon2_usb_clocks_stop();
+}
+
+static struct usb_ehci_pdata octeon_ehci_pdata = {
+ /* Octeon EHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+ .big_endian_mmio = 1,
+#endif
+ .power_on = octeon_ehci_power_on,
+ .power_off = octeon_ehci_power_off,
+};
+
+static void __init octeon_ehci_hw_start(void)
+{
+ union cvmx_uctlx_ehci_ctl ehci_ctl;
+
+ octeon2_usb_clocks_start();
+
+ ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0));
+ /* Use 64-bit addressing. */
+ ehci_ctl.s.ehci_64b_addr_en = 1;
+ ehci_ctl.s.l2c_addr_msb = 0;
+ ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+ ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+ cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64);
+
+ octeon2_usb_clocks_stop();
+}
+
+static u64 octeon_ehci_dma_mask = DMA_BIT_MASK(64);
+
static int __init octeon_ehci_device_init(void)
{
struct platform_device *pd;
@@ -88,7 +316,7 @@ static int __init octeon_ehci_device_init(void)
if (octeon_is_simulation() || usb_disabled())
return 0; /* No USB in the simulator. */
- pd = platform_device_alloc("octeon-ehci", 0);
+ pd = platform_device_alloc("ehci-platform", 0);
if (!pd) {
ret = -ENOMEM;
goto out;
@@ -105,6 +333,10 @@ static int __init octeon_ehci_device_init(void)
if (ret)
goto fail;
+ pd->dev.dma_mask = &octeon_ehci_dma_mask;
+ pd->dev.platform_data = &octeon_ehci_pdata;
+ octeon_ehci_hw_start();
+
ret = platform_device_add(pd);
if (ret)
goto fail;
@@ -117,6 +349,41 @@ out:
}
device_initcall(octeon_ehci_device_init);
+static int octeon_ohci_power_on(struct platform_device *pdev)
+{
+ octeon2_usb_clocks_start();
+ return 0;
+}
+
+static void octeon_ohci_power_off(struct platform_device *pdev)
+{
+ octeon2_usb_clocks_stop();
+}
+
+static struct usb_ohci_pdata octeon_ohci_pdata = {
+ /* Octeon OHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+ .big_endian_mmio = 1,
+#endif
+ .power_on = octeon_ohci_power_on,
+ .power_off = octeon_ohci_power_off,
+};
+
+static void __init octeon_ohci_hw_start(void)
+{
+ union cvmx_uctlx_ohci_ctl ohci_ctl;
+
+ octeon2_usb_clocks_start();
+
+ ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0));
+ ohci_ctl.s.l2c_addr_msb = 0;
+ ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+ ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+ cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64);
+
+ octeon2_usb_clocks_stop();
+}
+
static int __init octeon_ohci_device_init(void)
{
struct platform_device *pd;
@@ -137,7 +404,7 @@ static int __init octeon_ohci_device_init(void)
if (octeon_is_simulation() || usb_disabled())
return 0; /* No USB in the simulator. */
- pd = platform_device_alloc("octeon-ohci", 0);
+ pd = platform_device_alloc("ohci-platform", 0);
if (!pd) {
ret = -ENOMEM;
goto out;
@@ -154,6 +421,9 @@ static int __init octeon_ohci_device_init(void)
if (ret)
goto fail;
+ pd->dev.platform_data = &octeon_ohci_pdata;
+ octeon_ohci_hw_start();
+
ret = platform_device_add(pd);
if (ret)
goto fail;
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 5ebdb32d9a2b..94f888d3384e 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -1092,7 +1092,7 @@ static int __init edac_devinit(void)
name = edac_device_names[i];
dev = platform_device_register_simple(name, -1, NULL, 0);
if (IS_ERR(dev)) {
- pr_err("Registation of %s failed!\n", name);
+ pr_err("Registration of %s failed!\n", name);
err = PTR_ERR(dev);
}
}
@@ -1103,7 +1103,7 @@ static int __init edac_devinit(void)
dev = platform_device_register_simple("octeon_lmc_edac",
i, NULL, 0);
if (IS_ERR(dev)) {
- pr_err("Registation of octeon_lmc_edac %d failed!\n", i);
+ pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
err = PTR_ERR(dev);
}
}
diff --git a/arch/mips/configs/bcm3384_defconfig b/arch/mips/configs/bcm3384_defconfig
new file mode 100644
index 000000000000..88711c28ff32
--- /dev/null
+++ b/arch/mips/configs/bcm3384_defconfig
@@ -0,0 +1,78 @@
+CONFIG_BCM3384=y
+CONFIG_HIGHMEM=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+CONFIG_EXPERT=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_MAC80211=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_BLK_DEV is not set
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_USB_USBNET=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_EARLYCON_FORCE=y
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_CIFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index b2476a1c4aaa..e57058d4ec22 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -120,6 +120,9 @@ CONFIG_SPI_OCTEON=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_USB_SUPPORT is not set
+CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y
+CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_STAGING=y
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
index ffd0345780ae..6ecda64ad184 100644
--- a/arch/mips/fw/lib/cmdline.c
+++ b/arch/mips/fw/lib/cmdline.c
@@ -68,7 +68,7 @@ char *fw_getenv(char *envname)
result = fw_envp(index + 1);
break;
} else if (fw_envp(index)[i] == '=') {
- result = (fw_envp(index + 1) + i);
+ result = fw_envp(index) + i + 1;
break;
}
}
@@ -88,13 +88,13 @@ unsigned long fw_getenvl(char *envname)
{
unsigned long envl = 0UL;
char *str;
- long val;
int tmp;
str = fw_getenv(envname);
if (str) {
- tmp = kstrtol(str, 0, &val);
- envl = (unsigned long)val;
+ tmp = kstrtoul(str, 0, &envl);
+ if (tmp)
+ envl = 0;
}
return envl;
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 72e1cf1cab00..200efeac4181 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -3,7 +3,6 @@ generic-y += cputime.h
generic-y += current.h
generic-y += dma-contiguous.h
generic-y += emergency-restart.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 6dd6bfc607e9..857da84cfc92 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -17,6 +17,7 @@
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
+#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cmpxchg.h>
#include <asm/war.h>
@@ -40,95 +41,97 @@
*/
#define atomic_set(v, i) ((v)->counter = (i))
-#define ATOMIC_OP(op, c_op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
-{ \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
- int temp; \
- \
- __asm__ __volatile__( \
- " .set arch=r4000 \n" \
- "1: ll %0, %1 # atomic_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- " beqzl %0, 1b \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+m" (v->counter) \
- : "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set arch=r4000 \n" \
- " ll %0, %1 # atomic_" #op "\n" \
- " " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+m" (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!temp)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
-} \
-
-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
-{ \
- int result; \
- \
- smp_mb__before_llsc(); \
- \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
- int temp; \
- \
- __asm__ __volatile__( \
- " .set arch=r4000 \n" \
- "1: ll %1, %2 # atomic_" #op "_return \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " beqzl %0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
- : "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set arch=r4000 \n" \
- " ll %1, %2 # atomic_" #op "_return \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!result)); \
- \
- result = temp; result c_op i; \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- result c_op i; \
- v->counter = result; \
- raw_local_irq_restore(flags); \
- } \
- \
- smp_llsc_mb(); \
- \
- return result; \
+#define ATOMIC_OP(op, c_op, asm_op) \
+static __inline__ void atomic_##op(int i, atomic_t * v) \
+{ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %0, %1 # atomic_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " beqzl %0, 1b \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " ll %0, %1 # atomic_" #op "\n" \
+ " " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!temp)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
}
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ int result; \
+ \
+ smp_mb__before_llsc(); \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " ll %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; result c_op i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ smp_llsc_mb(); \
+ \
+ return result; \
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OPS(add, +=, addu)
@@ -167,8 +170,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i), "m" (v->counter)
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF12_ASM() (v->counter)
+ : "Ir" (i), GCC_OFF12_ASM() (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
int temp;
@@ -185,7 +189,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF12_ASM() (v->counter)
: "Ir" (i));
} else {
unsigned long flags;
@@ -315,96 +320,98 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define atomic64_set(v, i) ((v)->counter = (i))
-#define ATOMIC64_OP(op, c_op, asm_op) \
-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
-{ \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
- long temp; \
- \
- __asm__ __volatile__( \
- " .set arch=r4000 \n" \
- "1: lld %0, %1 # atomic64_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- " beqzl %0, 1b \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+m" (v->counter) \
- : "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set arch=r4000 \n" \
- " lld %0, %1 # atomic64_" #op "\n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+m" (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!temp)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
-} \
-
-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
-{ \
- long result; \
- \
- smp_mb__before_llsc(); \
- \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
- long temp; \
- \
- __asm__ __volatile__( \
- " .set arch=r4000 \n" \
- "1: lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " beqzl %0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
- : "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set arch=r4000 \n" \
- " lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
- : "Ir" (i), "m" (v->counter) \
- : "memory"); \
- } while (unlikely(!result)); \
- \
- result = temp; result c_op i; \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- result c_op i; \
- v->counter = result; \
- raw_local_irq_restore(flags); \
- } \
- \
- smp_llsc_mb(); \
- \
- return result; \
+#define ATOMIC64_OP(op, c_op, asm_op) \
+static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+{ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %0, %1 # atomic64_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " beqzl %0, 1b \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " lld %0, %1 # atomic64_" #op "\n" \
+ " " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!temp)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
+}
+
+#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+{ \
+ long result; \
+ \
+ smp_mb__before_llsc(); \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %1, %2 # atomic64_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " lld %1, %2 # atomic64_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "=" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
+ : "memory"); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; result c_op i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ smp_llsc_mb(); \
+ \
+ return result; \
}
-#define ATOMIC64_OPS(op, c_op, asm_op) \
- ATOMIC64_OP(op, c_op, asm_op) \
+#define ATOMIC64_OPS(op, c_op, asm_op) \
+ ATOMIC64_OP(op, c_op, asm_op) \
ATOMIC64_OP_RETURN(op, c_op, asm_op)
ATOMIC64_OPS(add, +=, daddu)
@@ -415,7 +422,8 @@ ATOMIC64_OPS(sub, -=, dsubu)
#undef ATOMIC64_OP
/*
- * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
+ * atomic64_sub_if_positive - conditionally subtract integer from atomic
+ * variable
* @i: integer value to subtract
* @v: pointer of type atomic64_t
*
@@ -443,8 +451,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
+ : "=&r" (result), "=&r" (temp),
+ "=" GCC_OFF12_ASM() (v->counter)
+ : "Ir" (i), GCC_OFF12_ASM() (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
long temp;
@@ -461,7 +470,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF12_ASM() (v->counter)
: "Ir" (i));
} else {
unsigned long flags;
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index d0101dd0575e..2b8bbbcb9be0 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -10,58 +10,6 @@
#include <asm/addrspace.h>
-/*
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- */
-
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
@@ -127,20 +75,21 @@
#include <asm/wbflush.h>
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
#define mb() wbflush()
#define iob() wbflush()
#else /* !CONFIG_CPU_HAS_WB */
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
#define mb() fast_mb()
#define iob() fast_iob()
#endif /* !CONFIG_CPU_HAS_WB */
+#define wmb() fast_wmb()
+#define rmb() fast_rmb()
+#define dma_wmb() fast_wmb()
+#define dma_rmb() fast_rmb()
+
#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
# ifdef CONFIG_CPU_CAVIUM_OCTEON
# define smp_mb() __sync()
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index bae6b0fa8ab5..6663bcca9d0c 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
+#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/sgidefs.h>
#include <asm/war.h>
@@ -78,8 +79,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (1UL << bit), "m" (*m));
+ : "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
+ : "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
@@ -87,7 +88,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
" " __LL "%0, %1 # set_bit \n"
" " __INS "%0, %3, %2, 1 \n"
" " __SC "%0, %1 \n"
- : "=&r" (temp), "+m" (*m)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
: "ir" (bit), "r" (~0));
} while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 */
@@ -99,7 +100,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
" or %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else
@@ -130,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
: "ir" (~(1UL << bit)));
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
@@ -139,7 +140,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
" " __LL "%0, %1 # clear_bit \n"
" " __INS "%0, $0, %2, 1 \n"
" " __SC "%0, %1 \n"
- : "=&r" (temp), "+m" (*m)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
: "ir" (bit));
} while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 */
@@ -151,7 +152,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
" and %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
} else
@@ -196,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
: "ir" (1UL << bit));
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -209,7 +210,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else
@@ -244,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
@@ -258,7 +259,7 @@ static inline int test_and_set_bit(unsigned long nr,
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
@@ -312,7 +313,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
@@ -354,7 +355,7 @@ static inline int test_and_clear_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
#ifdef CONFIG_CPU_MIPSR2
@@ -368,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
" " __EXT "%2, %0, %3, 1 \n"
" " __INS "%0, $0, %3, 1 \n"
" " __SC "%0, %1 \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
: "ir" (bit)
: "memory");
} while (unlikely(!temp));
@@ -385,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
" xor %2, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
@@ -427,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
@@ -441,7 +442,7 @@ static inline int test_and_change_bit(unsigned long nr,
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index cbaccebf5065..30939b02e3ff 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -84,6 +84,7 @@ extern char bmips_smp_int_vec_end;
extern int bmips_smp_enabled;
extern int bmips_cpu_offset;
extern cpumask_t bmips_booted_mask;
+extern unsigned long bmips_tp1_irqs;
extern void bmips_ebase_setup(void);
extern asmlinkage void plat_wired_tlb_setup(void);
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 1f7ca8b00404..b603804caac5 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -70,10 +70,7 @@ enum loongson_machine_type {
MACH_DEXXON_GDIUM2F10,
MACH_LEMOTE_NAS,
MACH_LEMOTE_LL2F,
- MACH_LEMOTE_A1004,
- MACH_LEMOTE_A1101,
- MACH_LEMOTE_A1201,
- MACH_LEMOTE_A1205,
+ MACH_LOONGSON_GENERIC,
MACH_LOONGSON_END
};
@@ -101,16 +98,16 @@ extern unsigned long mips_machtype;
struct boot_mem_map {
int nr_map;
struct boot_mem_map_entry {
- phys_t addr; /* start of memory segment */
- phys_t size; /* size of memory segment */
+ phys_addr_t addr; /* start of memory segment */
+ phys_addr_t size; /* size of memory segment */
long type; /* type of memory segment */
} map[BOOT_MEM_MAP_MAX];
};
extern struct boot_mem_map boot_mem_map;
-extern void add_memory_region(phys_t start, phys_t size, long type);
-extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max);
+extern void add_memory_region(phys_addr_t start, phys_addr_t size, long type);
+extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max);
extern void prom_init(void);
extern void prom_free_prom_memory(void);
diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h
index 778e32d817bc..4809c29a4890 100644
--- a/arch/mips/include/asm/clock.h
+++ b/arch/mips/include/asm/clock.h
@@ -35,9 +35,6 @@ struct clk {
#define CLK_ALWAYS_ENABLED (1 << 0)
#define CLK_RATE_PROPAGATES (1 << 1)
-/* Should be defined by processor-specific code */
-void arch_init_clk_ops(struct clk_ops **, int type);
-
int clk_init(void);
int __clk_enable(struct clk *);
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index eefcaa363a87..28b1edf19501 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -10,6 +10,7 @@
#include <linux/bug.h>
#include <linux/irqflags.h>
+#include <asm/compiler.h>
#include <asm/war.h>
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
@@ -30,8 +31,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
" sc %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
+ : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
+ : GCC_OFF12_ASM() (*m), "Jr" (val)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
@@ -45,8 +46,9 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
" .set arch=r4000 \n"
" sc %2, %1 \n"
" .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
+ : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+ "=&r" (dummy)
+ : GCC_OFF12_ASM() (*m), "Jr" (val)
: "memory");
} while (unlikely(!dummy));
} else {
@@ -80,8 +82,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
" scd %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
+ : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
+ : GCC_OFF12_ASM() (*m), "Jr" (val)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
@@ -93,8 +95,9 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
" move %2, %z4 \n"
" scd %2, %1 \n"
" .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
+ : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+ "=&r" (dummy)
+ : GCC_OFF12_ASM() (*m), "Jr" (val)
: "memory");
} while (unlikely(!dummy));
} else {
@@ -155,8 +158,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" beqzl $1, 1b \n" \
"2: \n" \
" .set pop \n" \
- : "=&r" (__ret), "=R" (*m) \
- : "R" (*m), "Jr" (old), "Jr" (new) \
+ : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \
+ : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else if (kernel_uses_llsc) { \
__asm__ __volatile__( \
@@ -172,8 +175,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" beqz $1, 1b \n" \
" .set pop \n" \
"2: \n" \
- : "=&r" (__ret), "=R" (*m) \
- : "R" (*m), "Jr" (old), "Jr" (new) \
+ : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \
+ : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else { \
unsigned long __flags; \
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index 71f5c5cfc58a..c73815e0123a 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -16,4 +16,12 @@
#define GCC_REG_ACCUM "accum"
#endif
+#ifndef CONFIG_CPU_MICROMIPS
+#define GCC_OFF12_ASM() "R"
+#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
+#define GCC_OFF12_ASM() "ZC"
+#else
+#error "microMIPS compilation unsupported with GCC older than 4.9"
+#endif
+
#endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 3325f3eb248c..2897cfafcaf0 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -344,4 +344,8 @@
# define cpu_has_msa 0
#endif
+#ifndef cpu_has_fre
+# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
+#endif
+
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index dfdc77ed1839..33866fce4d63 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -142,6 +142,7 @@
#define PRID_IMP_BMIPS3300_BUG 0x0000
#define PRID_IMP_BMIPS43XX 0xa000
#define PRID_IMP_BMIPS5000 0x5a00
+#define PRID_IMP_BMIPS5200 0x5b00
#define PRID_REV_BMIPS4380_LO 0x0040
#define PRID_REV_BMIPS4380_HI 0x006f
@@ -368,6 +369,7 @@ enum cpu_type_enum {
#define MIPS_CPU_HTW 0x100000000ull /* CPU support Hardware Page Table Walker */
#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
+#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
/*
* CPU ASE encodings
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index 4da0c1fe30d9..ae6fedcb0060 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -1,6 +1,8 @@
#ifndef ASM_EDAC_H
#define ASM_EDAC_H
+#include <asm/compiler.h>
+
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static inline void atomic_scrub(void *va, u32 size)
@@ -24,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
" sc %0, %1 \n"
" beqz %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*virt_addr)
- : "m" (*virt_addr));
+ : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
+ : GCC_OFF12_ASM() (*virt_addr));
virt_addr++;
}
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 1d38fe0edd2d..eb4d95de619c 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -8,6 +8,8 @@
#ifndef _ASM_ELF_H
#define _ASM_ELF_H
+#include <linux/fs.h>
+#include <uapi/linux/elf.h>
/* ELF header e_flags defines. */
/* MIPS architecture level. */
@@ -28,6 +30,7 @@
#define PT_MIPS_REGINFO 0x70000000
#define PT_MIPS_RTPROC 0x70000001
#define PT_MIPS_OPTIONS 0x70000002
+#define PT_MIPS_ABIFLAGS 0x70000003
/* Flags in the e_flags field of the header */
#define EF_MIPS_NOREORDER 0x00000001
@@ -174,6 +177,30 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+struct mips_elf_abiflags_v0 {
+ uint16_t version; /* Version of flags structure */
+ uint8_t isa_level; /* The level of the ISA: 1-5, 32, 64 */
+ uint8_t isa_rev; /* The revision of ISA: 0 for MIPS V and below,
+ 1-n otherwise */
+ uint8_t gpr_size; /* The size of general purpose registers */
+ uint8_t cpr1_size; /* The size of co-processor 1 registers */
+ uint8_t cpr2_size; /* The size of co-processor 2 registers */
+ uint8_t fp_abi; /* The floating-point ABI */
+ uint32_t isa_ext; /* Mask of processor-specific extensions */
+ uint32_t ases; /* Mask of ASEs used */
+ uint32_t flags1; /* Mask of general flags */
+ uint32_t flags2;
+};
+
+#define MIPS_ABI_FP_ANY 0 /* FP ABI doesn't matter */
+#define MIPS_ABI_FP_DOUBLE 1 /* -mdouble-float */
+#define MIPS_ABI_FP_SINGLE 2 /* -msingle-float */
+#define MIPS_ABI_FP_SOFT 3 /* -msoft-float */
+#define MIPS_ABI_FP_OLD_64 4 /* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_XX 5 /* -mfpxx */
+#define MIPS_ABI_FP_64 6 /* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_64A 7 /* -mips32r2 -mfp64 -mno-odd-spreg */
+
#ifdef CONFIG_32BIT
/*
@@ -262,16 +289,13 @@ extern struct mips_abi mips_abi_n32;
#ifdef CONFIG_32BIT
-#define SET_PERSONALITY(ex) \
+#define SET_PERSONALITY2(ex, state) \
do { \
- if ((ex).e_flags & EF_MIPS_FP64) \
- clear_thread_flag(TIF_32BIT_FPREGS); \
- else \
- set_thread_flag(TIF_32BIT_FPREGS); \
- \
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
\
+ mips_set_personality_fp(state); \
+ \
current->thread.abi = &mips_abi; \
} while (0)
@@ -291,44 +315,44 @@ do { \
#endif
#ifdef CONFIG_MIPS32_O32
-#define __SET_PERSONALITY32_O32(ex) \
+#define __SET_PERSONALITY32_O32(ex, state) \
do { \
set_thread_flag(TIF_32BIT_REGS); \
set_thread_flag(TIF_32BIT_ADDR); \
\
- if (!((ex).e_flags & EF_MIPS_FP64)) \
- set_thread_flag(TIF_32BIT_FPREGS); \
+ mips_set_personality_fp(state); \
\
current->thread.abi = &mips_abi_32; \
} while (0)
#else
-#define __SET_PERSONALITY32_O32(ex) \
+#define __SET_PERSONALITY32_O32(ex, state) \
do { } while (0)
#endif
#ifdef CONFIG_MIPS32_COMPAT
-#define __SET_PERSONALITY32(ex) \
+#define __SET_PERSONALITY32(ex, state) \
do { \
if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \
((ex).e_flags & EF_MIPS_ABI) == 0) \
__SET_PERSONALITY32_N32(); \
else \
- __SET_PERSONALITY32_O32(ex); \
+ __SET_PERSONALITY32_O32(ex, state); \
} while (0)
#else
-#define __SET_PERSONALITY32(ex) do { } while (0)
+#define __SET_PERSONALITY32(ex, state) do { } while (0)
#endif
-#define SET_PERSONALITY(ex) \
+#define SET_PERSONALITY2(ex, state) \
do { \
unsigned int p; \
\
clear_thread_flag(TIF_32BIT_REGS); \
clear_thread_flag(TIF_32BIT_FPREGS); \
+ clear_thread_flag(TIF_HYBRID_FPREGS); \
clear_thread_flag(TIF_32BIT_ADDR); \
\
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- __SET_PERSONALITY32(ex); \
+ __SET_PERSONALITY32(ex, state); \
else \
current->thread.abi = &mips_abi; \
\
@@ -390,4 +414,24 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
+struct arch_elf_state {
+ int fp_abi;
+ int interp_fp_abi;
+ int overall_abi;
+};
+
+#define INIT_ARCH_ELF_STATE { \
+ .fp_abi = -1, \
+ .interp_fp_abi = -1, \
+ .overall_abi = -1, \
+}
+
+extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state);
+
+extern int arch_check_elf(void *ehdr, bool has_interpreter,
+ struct arch_elf_state *state);
+
+extern void mips_set_personality_fp(struct arch_elf_state *state);
+
#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index dd562414cd5e..994d21939676 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -36,14 +36,16 @@ extern void _restore_fp(struct task_struct *);
/*
* This enum specifies a mode in which we want the FPU to operate, for cores
- * which implement the Status.FR bit. Note that FPU_32BIT & FPU_64BIT
- * purposefully have the values 0 & 1 respectively, so that an integer value
- * of Status.FR can be trivially casted to the corresponding enum fpu_mode.
+ * which implement the Status.FR bit. Note that the bottom bit of the value
+ * purposefully matches the desired value of the Status.FR bit.
*/
enum fpu_mode {
FPU_32BIT = 0, /* FR = 0 */
- FPU_64BIT, /* FR = 1 */
+ FPU_64BIT, /* FR = 1, FRE = 0 */
FPU_AS_IS,
+ FPU_HYBRID, /* FR = 1, FRE = 1 */
+
+#define FPU_FR_MASK 0x1
};
static inline int __enable_fpu(enum fpu_mode mode)
@@ -57,6 +59,14 @@ static inline int __enable_fpu(enum fpu_mode mode)
enable_fpu_hazard();
return 0;
+ case FPU_HYBRID:
+ if (!cpu_has_fre)
+ return SIGFPE;
+
+ /* set FRE */
+ write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE);
+ goto fr_common;
+
case FPU_64BIT:
#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
@@ -64,8 +74,11 @@ static inline int __enable_fpu(enum fpu_mode mode)
#endif
/* fall through */
case FPU_32BIT:
+ /* clear FRE */
+ write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE);
+fr_common:
/* set CU1 & change FR appropriately */
- fr = (int)mode;
+ fr = (int)mode & FPU_FR_MASK;
change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0));
enable_fpu_hazard();
@@ -102,13 +115,17 @@ static inline int __own_fpu(void)
enum fpu_mode mode;
int ret;
- mode = !test_thread_flag(TIF_32BIT_FPREGS);
+ if (test_thread_flag(TIF_HYBRID_FPREGS))
+ mode = FPU_HYBRID;
+ else
+ mode = !test_thread_flag(TIF_32BIT_FPREGS);
+
ret = __enable_fpu(mode);
if (ret)
return ret;
KSTK_STATUS(current) |= ST0_CU1;
- if (mode == FPU_64BIT)
+ if (mode == FPU_64BIT || mode == FPU_HYBRID)
KSTK_STATUS(current) |= ST0_FR;
else /* mode == FPU_32BIT */
KSTK_STATUS(current) &= ~ST0_FR;
@@ -166,8 +183,24 @@ static inline int init_fpu(void)
if (cpu_has_fpu) {
ret = __own_fpu();
- if (!ret)
+ if (!ret) {
+ unsigned int config5 = read_c0_config5();
+
+ /*
+ * Ensure FRE is clear whilst running _init_fpu, since
+ * single precision FP instructions are used. If FRE
+ * was set then we'll just end up initialising all 32
+ * 64b registers.
+ */
+ write_c0_config5(config5 & ~MIPS_CONF5_FRE);
+ enable_fpu_hazard();
+
_init_fpu();
+
+ /* Restore FRE */
+ write_c0_config5(config5);
+ enable_fpu_hazard();
+ }
} else
fpu_emulator_init_fpu();
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 194cda0396a3..ef9987a61d88 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -14,6 +14,7 @@
#include <linux/uaccess.h>
#include <asm/asm-eva.h>
#include <asm/barrier.h>
+#include <asm/compiler.h>
#include <asm/errno.h>
#include <asm/war.h>
@@ -32,6 +33,7 @@
" beqzl $1, 1b \n" \
__WEAK_LLSC_MB \
"3: \n" \
+ " .insn \n" \
" .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
@@ -42,8 +44,10 @@
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
- : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
- : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
+ : "=r" (ret), "=&r" (oldval), \
+ "=" GCC_OFF12_ASM() (*uaddr) \
+ : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \
+ "i" (-EFAULT) \
: "memory"); \
} else if (cpu_has_llsc) { \
__asm__ __volatile__( \
@@ -58,6 +62,7 @@
" beqz $1, 1b \n" \
__WEAK_LLSC_MB \
"3: \n" \
+ " .insn \n" \
" .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
@@ -68,8 +73,10 @@
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
- : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
- : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
+ : "=r" (ret), "=&r" (oldval), \
+ "=" GCC_OFF12_ASM() (*uaddr) \
+ : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \
+ "i" (-EFAULT) \
: "memory"); \
} else \
ret = -ENOSYS; \
@@ -157,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" beqzl $1, 1b \n"
__WEAK_LLSC_MB
"3: \n"
+ " .insn \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
"4: li %0, %6 \n"
@@ -166,8 +174,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
- : "+r" (ret), "=&r" (val), "=R" (*uaddr)
- : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
+ : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
+ : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ "i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__(
@@ -184,6 +193,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" beqz $1, 1b \n"
__WEAK_LLSC_MB
"3: \n"
+ " .insn \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
"4: li %0, %6 \n"
@@ -193,8 +203,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
- : "+r" (ret), "=&r" (val), "=R" (*uaddr)
- : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
+ : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
+ : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ "i" (-EFAULT)
: "memory");
} else
return -ENOSYS;
diff --git a/arch/mips/include/asm/hpet.h b/arch/mips/include/asm/hpet.h
new file mode 100644
index 000000000000..18a8f778bfaa
--- /dev/null
+++ b/arch/mips/include/asm/hpet.h
@@ -0,0 +1,73 @@
+#ifndef _ASM_HPET_H
+#define _ASM_HPET_H
+
+#ifdef CONFIG_RS780_HPET
+
+#define HPET_MMAP_SIZE 1024
+
+#define HPET_ID 0x000
+#define HPET_PERIOD 0x004
+#define HPET_CFG 0x010
+#define HPET_STATUS 0x020
+#define HPET_COUNTER 0x0f0
+
+#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
+#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
+#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n)
+
+#define HPET_T0_IRS 0x001
+#define HPET_T1_IRS 0x002
+#define HPET_T3_IRS 0x004
+
+#define HPET_T0_CFG 0x100
+#define HPET_T0_CMP 0x108
+#define HPET_T0_ROUTE 0x110
+#define HPET_T1_CFG 0x120
+#define HPET_T1_CMP 0x128
+#define HPET_T1_ROUTE 0x130
+#define HPET_T2_CFG 0x140
+#define HPET_T2_CMP 0x148
+#define HPET_T2_ROUTE 0x150
+
+#define HPET_ID_REV 0x000000ff
+#define HPET_ID_NUMBER 0x00001f00
+#define HPET_ID_64BIT 0x00002000
+#define HPET_ID_LEGSUP 0x00008000
+#define HPET_ID_VENDOR 0xffff0000
+#define HPET_ID_NUMBER_SHIFT 8
+#define HPET_ID_VENDOR_SHIFT 16
+
+#define HPET_CFG_ENABLE 0x001
+#define HPET_CFG_LEGACY 0x002
+#define HPET_LEGACY_8254 2
+#define HPET_LEGACY_RTC 8
+
+#define HPET_TN_LEVEL 0x0002
+#define HPET_TN_ENABLE 0x0004
+#define HPET_TN_PERIODIC 0x0008
+#define HPET_TN_PERIODIC_CAP 0x0010
+#define HPET_TN_64BIT_CAP 0x0020
+#define HPET_TN_SETVAL 0x0040
+#define HPET_TN_32BIT 0x0100
+#define HPET_TN_ROUTE 0x3e00
+#define HPET_TN_FSB 0x4000
+#define HPET_TN_FSB_CAP 0x8000
+#define HPET_TN_ROUTE_SHIFT 9
+
+/* Max HPET Period is 10^8 femto sec as in HPET spec */
+#define HPET_MAX_PERIOD 100000000UL
+/*
+ * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
+ * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
+ */
+#define HPET_MIN_PERIOD 100000UL
+
+#define HPET_ADDR 0x20000
+#define HPET_MMIO_ADDR 0x90000e0000020000
+#define HPET_FREQ 14318780
+#define HPET_COMPARE_VAL ((HPET_FREQ + HZ / 2) / HZ)
+#define HPET_T0_IRQ 0
+
+extern void __init setup_hpet_timer(void);
+#endif /* CONFIG_RS780_HPET */
+#endif /* _ASM_HPET_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 933b50e125a0..9e777cd42b67 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -167,7 +167,7 @@ static inline void * isa_bus_to_virt(unsigned long address)
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
+extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
extern void __iounmap(const volatile void __iomem *addr);
#ifndef CONFIG_PCI
@@ -175,7 +175,7 @@ struct pci_dev;
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
#endif
-static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
+static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
unsigned long flags)
{
void __iomem *addr = plat_ioremap(offset, size, flags);
@@ -183,7 +183,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
if (addr)
return addr;
-#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
+#define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
if (cpu_has_64bit_addresses) {
u64 base = UNCAC_BASE;
@@ -197,7 +197,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
return (void __iomem *) (unsigned long) (base + offset);
} else if (__builtin_constant_p(offset) &&
__builtin_constant_p(size) && __builtin_constant_p(flags)) {
- phys_t phys_addr, last_addr;
+ phys_addr_t phys_addr, last_addr;
phys_addr = fixup_bigphys_addr(offset, size);
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 39f07aec640c..5a4e1bb8fb1b 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -48,4 +48,7 @@ extern int cp0_compare_irq;
extern int cp0_compare_irq_shift;
extern int cp0_perfcount_irq;
+void arch_trigger_all_cpu_backtrace(bool);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
#endif /* _ASM_IRQ_H */
diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h
index 3f11fdb3ed8c..39a160bb41dc 100644
--- a/arch/mips/include/asm/irq_cpu.h
+++ b/arch/mips/include/asm/irq_cpu.h
@@ -19,8 +19,8 @@ extern void rm9k_cpu_irq_init(void);
#ifdef CONFIG_IRQ_DOMAIN
struct device_node;
-extern int mips_cpu_intc_init(struct device_node *of_node,
- struct device_node *parent);
+extern int mips_cpu_irq_of_init(struct device_node *of_node,
+ struct device_node *parent);
#endif
#endif /* _ASM_IRQ_CPU_H */
diff --git a/arch/mips/include/asm/mach-ath25/ath25_platform.h b/arch/mips/include/asm/mach-ath25/ath25_platform.h
new file mode 100644
index 000000000000..4f4ee4f9e5ec
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/ath25_platform.h
@@ -0,0 +1,73 @@
+#ifndef __ASM_MACH_ATH25_PLATFORM_H
+#define __ASM_MACH_ATH25_PLATFORM_H
+
+#include <linux/etherdevice.h>
+
+/*
+ * This is board-specific data that is stored in a "fixed" location in flash.
+ * It is shared across operating systems, so it should not be changed lightly.
+ * The main reason we need it is in order to extract the ethernet MAC
+ * address(es).
+ */
+struct ath25_boarddata {
+ u32 magic; /* board data is valid */
+#define ATH25_BD_MAGIC 0x35333131 /* "5311", for all 531x/231x platforms */
+ u16 cksum; /* checksum (starting with BD_REV 2) */
+ u16 rev; /* revision of this struct */
+#define BD_REV 4
+ char board_name[64]; /* Name of board */
+ u16 major; /* Board major number */
+ u16 minor; /* Board minor number */
+ u32 flags; /* Board configuration */
+#define BD_ENET0 0x00000001 /* ENET0 is stuffed */
+#define BD_ENET1 0x00000002 /* ENET1 is stuffed */
+#define BD_UART1 0x00000004 /* UART1 is stuffed */
+#define BD_UART0 0x00000008 /* UART0 is stuffed (dma) */
+#define BD_RSTFACTORY 0x00000010 /* Reset factory defaults stuffed */
+#define BD_SYSLED 0x00000020 /* System LED stuffed */
+#define BD_EXTUARTCLK 0x00000040 /* External UART clock */
+#define BD_CPUFREQ 0x00000080 /* cpu freq is valid in nvram */
+#define BD_SYSFREQ 0x00000100 /* sys freq is set in nvram */
+#define BD_WLAN0 0x00000200 /* Enable WLAN0 */
+#define BD_MEMCAP 0x00000400 /* CAP SDRAM @ mem_cap for testing */
+#define BD_DISWATCHDOG 0x00000800 /* disable system watchdog */
+#define BD_WLAN1 0x00001000 /* Enable WLAN1 (ar5212) */
+#define BD_ISCASPER 0x00002000 /* FLAG for AR2312 */
+#define BD_WLAN0_2G_EN 0x00004000 /* FLAG for radio0_2G */
+#define BD_WLAN0_5G_EN 0x00008000 /* FLAG for radio0_2G */
+#define BD_WLAN1_2G_EN 0x00020000 /* FLAG for radio0_2G */
+#define BD_WLAN1_5G_EN 0x00040000 /* FLAG for radio0_2G */
+ u16 reset_config_gpio; /* Reset factory GPIO pin */
+ u16 sys_led_gpio; /* System LED GPIO pin */
+
+ u32 cpu_freq; /* CPU core frequency in Hz */
+ u32 sys_freq; /* System frequency in Hz */
+ u32 cnt_freq; /* Calculated C0_COUNT frequency */
+
+ u8 wlan0_mac[ETH_ALEN];
+ u8 enet0_mac[ETH_ALEN];
+ u8 enet1_mac[ETH_ALEN];
+
+ u16 pci_id; /* Pseudo PCIID for common code */
+ u16 mem_cap; /* cap bank1 in MB */
+
+ /* version 3 */
+ u8 wlan1_mac[ETH_ALEN]; /* (ar5212) */
+};
+
+#define BOARD_CONFIG_BUFSZ 0x1000
+
+/*
+ * Platform device information for the Wireless MAC
+ */
+struct ar231x_board_config {
+ u16 devid;
+
+ /* board config data */
+ struct ath25_boarddata *config;
+
+ /* radio calibration data */
+ const char *radio;
+};
+
+#endif /* __ASM_MACH_ATH25_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
new file mode 100644
index 000000000000..ade0356df257
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
@@ -0,0 +1,64 @@
+/*
+ * Atheros AR231x/AR531x SoC specific CPU feature overrides
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H
+
+/*
+ * The Atheros AR531x/AR231x SoCs have MIPS 4Kc/4KEc core.
+ */
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_ejtag 1
+
+#if !defined(CONFIG_SOC_AR5312)
+# define cpu_has_llsc 1
+#else
+/*
+ * The MIPS 4Kc V0.9 core in the AR5312/AR2312 have problems with the
+ * ll/sc instructions.
+ */
+# define cpu_has_llsc 0
+#endif
+
+#define cpu_has_mips16 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+
+#if !defined(CONFIG_SOC_AR5312)
+# define cpu_has_mips32r2 1
+#endif
+
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 0
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+#define cpu_has_64bit_addresses 0
+
+#endif /* __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ath25/dma-coherence.h b/arch/mips/include/asm/mach-ath25/dma-coherence.h
new file mode 100644
index 000000000000..d8009c93a465
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/dma-coherence.h
@@ -0,0 +1,82 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
+ *
+ */
+#ifndef __ASM_MACH_ATH25_DMA_COHERENCE_H
+#define __ASM_MACH_ATH25_DMA_COHERENCE_H
+
+#include <linux/device.h>
+
+/*
+ * We need some arbitrary non-zero value to be programmed to the BAR1 register
+ * of PCI host controller to enable DMA. The same value should be used as the
+ * offset to calculate the physical address of DMA buffer for PCI devices.
+ */
+#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
+
+static inline dma_addr_t ath25_dev_offset(struct device *dev)
+{
+#ifdef CONFIG_PCI
+ extern struct bus_type pci_bus_type;
+
+ if (dev && dev->bus == &pci_bus_type)
+ return AR2315_PCI_HOST_SDRAM_BASEADDR;
+#endif
+ return 0;
+}
+
+static inline dma_addr_t
+plat_map_dma_mem(struct device *dev, void *addr, size_t size)
+{
+ return virt_to_phys(addr) + ath25_dev_offset(dev);
+}
+
+static inline dma_addr_t
+plat_map_dma_mem_page(struct device *dev, struct page *page)
+{
+ return page_to_phys(page) + ath25_dev_offset(dev);
+}
+
+static inline unsigned long
+plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr - ath25_dev_offset(dev);
+}
+
+static inline void
+plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int plat_device_is_coherent(struct device *dev)
+{
+#ifdef CONFIG_DMA_COHERENT
+ return 1;
+#endif
+#ifdef CONFIG_DMA_NONCOHERENT
+ return 0;
+#endif
+}
+
+#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ath25/gpio.h b/arch/mips/include/asm/mach-ath25/gpio.h
new file mode 100644
index 000000000000..713564b8e8ef
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/gpio.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_MACH_ATH25_GPIO_H
+#define __ASM_MACH_ATH25_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+static inline int irq_to_gpio(unsigned irq)
+{
+ return -EINVAL;
+}
+
+#endif /* __ASM_MACH_ATH25_GPIO_H */
diff --git a/arch/mips/include/asm/mach-ath25/war.h b/arch/mips/include/asm/mach-ath25/war.h
new file mode 100644
index 000000000000..e3a5250ebd67
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/war.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
+ */
+#ifndef __ASM_MACH_ATH25_WAR_H
+#define __ASM_MACH_ATH25_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define RM9000_CDEX_SMP_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+
+#endif /* __ASM_MACH_ATH25_WAR_H */
diff --git a/arch/mips/include/asm/mach-au1x00/ioremap.h b/arch/mips/include/asm/mach-au1x00/ioremap.h
index 75a94ad3ac91..99fea1fbb4f5 100644
--- a/arch/mips/include/asm/mach-au1x00/ioremap.h
+++ b/arch/mips/include/asm/mach-au1x00/ioremap.h
@@ -11,10 +11,10 @@
#include <linux/types.h>
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_PCI)
-extern phys_t __fixup_bigphys_addr(phys_t, phys_t);
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_PCI)
+extern phys_addr_t __fixup_bigphys_addr(phys_addr_t, phys_addr_t);
#else
-static inline phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+static inline phys_addr_t __fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
{
return phys_addr;
}
@@ -23,12 +23,12 @@ static inline phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
/*
* Allow physical addresses to be fixed up to help 36-bit peripherals.
*/
-static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
{
return __fixup_bigphys_addr(phys_addr, size);
}
-static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size,
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
unsigned long flags)
{
return NULL;
diff --git a/arch/mips/include/asm/mach-bcm3384/dma-coherence.h b/arch/mips/include/asm/mach-bcm3384/dma-coherence.h
new file mode 100644
index 000000000000..a3be8e50e1f0
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm3384/dma-coherence.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_MACH_BCM3384_DMA_COHERENCE_H
+#define __ASM_MACH_BCM3384_DMA_COHERENCE_H
+
+struct device;
+
+extern dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size);
+extern dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page);
+extern unsigned long plat_dma_addr_to_phys(struct device *dev,
+ dma_addr_t dma_addr);
+
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction)
+{
+}
+
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline int plat_device_is_coherent(struct device *dev)
+{
+ return 0;
+}
+
+#endif /* __ASM_MACH_BCM3384_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-bcm3384/war.h b/arch/mips/include/asm/mach-bcm3384/war.h
new file mode 100644
index 000000000000..59d7599059b0
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm3384/war.h
@@ -0,0 +1,24 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_MACH_BCM3384_WAR_H
+#define __ASM_MIPS_MACH_BCM3384_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+
+#endif /* __ASM_MIPS_MACH_BCM3384_WAR_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
index 36a3fc1aa3ae..ee59ffe99922 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
@@ -14,40 +14,8 @@
#include <linux/types.h>
#include <linux/kernel.h>
-struct nvram_header {
- u32 magic;
- u32 len;
- u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
- u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
- u32 config_ncdl; /* ncdl values for memc */
-};
-
-#define NVRAM_HEADER 0x48534C46 /* 'FLSH' */
-#define NVRAM_VERSION 1
-#define NVRAM_HEADER_SIZE 20
-#define NVRAM_SPACE 0x8000
-
-#define FLASH_MIN 0x00020000 /* Minimum flash size */
-
-#define NVRAM_MAX_VALUE_LEN 255
-#define NVRAM_MAX_PARAM_LEN 64
-
-extern int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len);
-
-static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
-{
- if (strchr(buf, ':'))
- sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
- &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
- &macaddr[5]);
- else if (strchr(buf, '-'))
- sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
- &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
- &macaddr[5]);
- else
- printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
-}
-
+int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
+int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
#endif /* __BCM47XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
index ff15e3b14e7a..aea6e64b828f 100644
--- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h
+++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
@@ -3,12 +3,12 @@
#include <bcm63xx_cpu.h>
-static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
{
return phys_addr;
}
-static inline int is_bcm63xx_internal_registers(phys_t offset)
+static inline int is_bcm63xx_internal_registers(phys_addr_t offset)
{
switch (bcm63xx_get_cpu_id()) {
case BCM3368_CPU_ID:
@@ -32,7 +32,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
return 0;
}
-static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size,
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
unsigned long flags)
{
if (is_bcm63xx_internal_registers(offset))
diff --git a/arch/mips/include/asm/mach-generic/ioremap.h b/arch/mips/include/asm/mach-generic/ioremap.h
index b379938d47f0..513371f7c39c 100644
--- a/arch/mips/include/asm/mach-generic/ioremap.h
+++ b/arch/mips/include/asm/mach-generic/ioremap.h
@@ -15,12 +15,12 @@
* Allow physical addresses to be fixed up to help peripherals located
* outside the low 32-bit range -- generic pass-through version.
*/
-static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
{
return phys_addr;
}
-static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size,
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
unsigned long flags)
{
return NULL;
diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h
index 139cd200e79d..050e18bb1a04 100644
--- a/arch/mips/include/asm/mach-generic/irq.h
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -36,4 +36,10 @@
#endif /* CONFIG_IRQ_CPU */
+#ifdef CONFIG_MIPS_GIC
+#ifndef MIPS_GIC_IRQ_BASE
+#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
+#endif
+#endif /* CONFIG_MIPS_GIC */
+
#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index f196cceb7322..4e5ae6523cb4 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -48,6 +48,8 @@ extern struct clk *clk_get_ppe(void);
extern unsigned char ltq_boot_select(void);
/* find out what caused the last cpu reset */
extern int ltq_reset_cause(void);
+/* find out the soc type */
+extern int ltq_soc_type(void);
#define IOPORT_RESOURCE_START 0x10000000
#define IOPORT_RESOURCE_END 0xffffffff
diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h
index 3388fc53599e..fa802926523f 100644
--- a/arch/mips/include/asm/mach-loongson/boot_param.h
+++ b/arch/mips/include/asm/mach-loongson/boot_param.h
@@ -10,7 +10,8 @@
#define VIDEO_ROM 7
#define ADAPTER_ROM 8
#define ACPI_TABLE 9
-#define MAX_MEMORY_TYPE 10
+#define SMBIOS_TABLE 10
+#define MAX_MEMORY_TYPE 11
#define LOONGSON3_BOOT_MEM_MAP_MAX 128
struct efi_memory_map_loongson {
@@ -42,15 +43,49 @@ struct efi_cpuinfo_loongson {
u32 processor_id; /* PRID, e.g. 6305, 6306 */
u32 cputype; /* Loongson_3A/3B, etc. */
u32 total_node; /* num of total numa nodes */
- u32 cpu_startup_core_id; /* Core id */
+ u16 cpu_startup_core_id; /* Boot core id */
+ u16 reserved_cores_mask;
u32 cpu_clock_freq; /* cpu_clock */
u32 nr_cpus;
} __packed;
+#define MAX_UARTS 64
+struct uart_device {
+ u32 iotype; /* see include/linux/serial_core.h */
+ u32 uartclk;
+ u32 int_offset;
+ u64 uart_base;
+} __packed;
+
+#define MAX_SENSORS 64
+#define SENSOR_TEMPER 0x00000001
+#define SENSOR_VOLTAGE 0x00000002
+#define SENSOR_FAN 0x00000004
+struct sensor_device {
+ char name[32]; /* a formal name */
+ char label[64]; /* a flexible description */
+ u32 type; /* SENSOR_* */
+ u32 id; /* instance id of a sensor-class */
+ u32 fan_policy; /* see loongson_hwmon.h */
+ u32 fan_percent;/* only for constant speed policy */
+ u64 base_addr; /* base address of device registers */
+} __packed;
+
struct system_loongson {
u16 vers; /* version of system_loongson */
u32 ccnuma_smp; /* 0: no numa; 1: has numa */
u32 sing_double_channel; /* 1:single; 2:double */
+ u32 nr_uarts;
+ struct uart_device uarts[MAX_UARTS];
+ u32 nr_sensors;
+ struct sensor_device sensors[MAX_SENSORS];
+ char has_ec;
+ char ec_name[32];
+ u64 ec_base_addr;
+ char has_tcm;
+ char tcm_name[32];
+ u64 tcm_base_addr;
+ u64 workarounds; /* see workarounds.h */
} __packed;
struct irq_source_routing_table {
@@ -149,6 +184,8 @@ struct loongson_system_configuration {
u32 nr_nodes;
int cores_per_node;
int cores_per_package;
+ u16 boot_cpu_id;
+ u16 reserved_cpus_mask;
enum loongson_cpu_type cputype;
u64 ht_control_base;
u64 pci_mem_start_addr;
@@ -159,9 +196,15 @@ struct loongson_system_configuration {
u64 suspend_addr;
u64 vgabios_addr;
u32 dma_mask_bits;
+ char ecname[32];
+ u32 nr_uarts;
+ struct uart_device uarts[MAX_UARTS];
+ u32 nr_sensors;
+ struct sensor_device sensors[MAX_SENSORS];
+ u64 workarounds;
};
extern struct efi_memory_map_loongson *loongson_memmap;
extern struct loongson_system_configuration loongson_sysconf;
-extern int cpuhotplug_workaround;
+
#endif
diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h
index 6a902751cc7f..a90534161bd2 100644
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -23,7 +23,7 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{
#ifdef CONFIG_CPU_LOONGSON3
- return virt_to_phys(addr);
+ return phys_to_dma(dev, virt_to_phys(addr));
#else
return virt_to_phys(addr) | 0x80000000;
#endif
@@ -33,7 +33,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
struct page *page)
{
#ifdef CONFIG_CPU_LOONGSON3
- return page_to_phys(page);
+ return phys_to_dma(dev, page_to_phys(page));
#else
return page_to_phys(page) | 0x80000000;
#endif
@@ -43,7 +43,7 @@ static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
- return dma_addr;
+ return dma_to_phys(dev, dma_addr);
#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
#else
diff --git a/arch/mips/include/asm/mach-loongson/irq.h b/arch/mips/include/asm/mach-loongson/irq.h
index 34560bda6626..a281cca5f2fb 100644
--- a/arch/mips/include/asm/mach-loongson/irq.h
+++ b/arch/mips/include/asm/mach-loongson/irq.h
@@ -32,8 +32,7 @@
#define LOONGSON_INT_ROUTER_LPC LOONGSON_INT_ROUTER_ENTRY(0x0a)
#define LOONGSON_INT_ROUTER_HT1(n) LOONGSON_INT_ROUTER_ENTRY(n + 0x18)
-#define LOONGSON_INT_CORE0_INT0 0x11 /* route to int 0 of core 0 */
-#define LOONGSON_INT_CORE0_INT1 0x21 /* route to int 1 of core 0 */
+#define LOONGSON_INT_COREx_INTy(x, y) (1<<(x) | 1<<(y+4)) /* route to int y of core x */
#endif
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index 92bf76c21441..5459ac09679f 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -35,7 +35,7 @@ extern void __init prom_init_cmdline(void);
extern void __init prom_init_machtype(void);
extern void __init prom_init_env(void);
#ifdef CONFIG_LOONGSON_UART_BASE
-extern unsigned long _loongson_uart_base, loongson_uart_base;
+extern unsigned long _loongson_uart_base[], loongson_uart_base[];
extern void prom_init_loongson_uart_base(void);
#endif
diff --git a/arch/mips/include/asm/mach-loongson/loongson_hwmon.h b/arch/mips/include/asm/mach-loongson/loongson_hwmon.h
new file mode 100644
index 000000000000..4431fc54a36c
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/loongson_hwmon.h
@@ -0,0 +1,55 @@
+#ifndef __LOONGSON_HWMON_H_
+#define __LOONGSON_HWMON_H_
+
+#include <linux/types.h>
+
+#define MIN_TEMP 0
+#define MAX_TEMP 255
+#define NOT_VALID_TEMP 999
+
+typedef int (*get_temp_fun)(int);
+extern int loongson3_cpu_temp(int);
+
+/* 0:Max speed, 1:Manual, 2:Auto */
+enum fan_control_mode {
+ FAN_FULL_MODE = 0,
+ FAN_MANUAL_MODE = 1,
+ FAN_AUTO_MODE = 2,
+ FAN_MODE_END
+};
+
+struct temp_range {
+ u8 low;
+ u8 high;
+ u8 level;
+};
+
+#define CONSTANT_SPEED_POLICY 0 /* at constent speed */
+#define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */
+#define KERNEL_HELPER_POLICY 2 /* kernel as a helper to fan control */
+
+#define MAX_STEP_NUM 16
+#define MAX_FAN_LEVEL 255
+
+/* loongson_fan_policy works when fan work at FAN_AUTO_MODE */
+struct loongson_fan_policy {
+ u8 type;
+
+ /* percent only used when type is CONSTANT_SPEED_POLICY */
+ u8 percent;
+
+ /* period between two check. (Unit: S) */
+ u8 adjust_period;
+
+ /* fan adjust usually depend on a temprature input */
+ get_temp_fun depend_temp;
+
+ /* up_step/down_step used when type is STEP_SPEED_POLICY */
+ u8 up_step_num;
+ u8 down_step_num;
+ struct temp_range up_step[MAX_STEP_NUM];
+ struct temp_range down_step[MAX_STEP_NUM];
+ struct delayed_work work;
+};
+
+#endif /* __LOONGSON_HWMON_H_*/
diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h
index 228e37847a36..cb2b60249cd2 100644
--- a/arch/mips/include/asm/mach-loongson/machine.h
+++ b/arch/mips/include/asm/mach-loongson/machine.h
@@ -26,7 +26,7 @@
#ifdef CONFIG_LOONGSON_MACH3X
-#define LOONGSON_MACHTYPE MACH_LEMOTE_A1101
+#define LOONGSON_MACHTYPE MACH_LOONGSON_GENERIC
#endif /* CONFIG_LOONGSON_MACH3X */
diff --git a/arch/mips/include/asm/mach-loongson/topology.h b/arch/mips/include/asm/mach-loongson/topology.h
index 5598ba77d2ef..0d8f3b55bdbc 100644
--- a/arch/mips/include/asm/mach-loongson/topology.h
+++ b/arch/mips/include/asm/mach-loongson/topology.h
@@ -3,7 +3,7 @@
#ifdef CONFIG_NUMA
-#define cpu_to_node(cpu) ((cpu) >> 2)
+#define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2)
#define parent_node(node) (node)
#define cpumask_of_node(node) (&__node_data[(node)]->cpumask)
diff --git a/arch/mips/include/asm/mach-loongson/workarounds.h b/arch/mips/include/asm/mach-loongson/workarounds.h
new file mode 100644
index 000000000000..e180c1422eae
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/workarounds.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_MACH_LOONGSON_WORKAROUNDS_H_
+#define __ASM_MACH_LOONGSON_WORKAROUNDS_H_
+
+#define WORKAROUND_CPUFREQ 0x00000001
+#define WORKAROUND_CPUHOTPLUG 0x00000002
+
+#endif
diff --git a/arch/mips/include/asm/mach-loongson1/cpufreq.h b/arch/mips/include/asm/mach-loongson1/cpufreq.h
new file mode 100644
index 000000000000..e7765ce30bcf
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/cpufreq.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 CPUFreq platform support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+#ifndef __ASM_MACH_LOONGSON1_CPUFREQ_H
+#define __ASM_MACH_LOONGSON1_CPUFREQ_H
+
+struct plat_ls1x_cpufreq {
+ const char *clk_name; /* CPU clk */
+ const char *osc_clk_name; /* OSC clk */
+ unsigned int max_freq; /* in kHz */
+ unsigned int min_freq; /* in kHz */
+};
+
+#endif /* __ASM_MACH_LOONGSON1_CPUFREQ_H */
diff --git a/arch/mips/include/asm/mach-loongson1/loongson1.h b/arch/mips/include/asm/mach-loongson1/loongson1.h
index 5c437c2ba6b3..20e0c2b155dd 100644
--- a/arch/mips/include/asm/mach-loongson1/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson1/loongson1.h
@@ -16,6 +16,7 @@
#define DEFAULT_MEMSIZE 256 /* If no memsize provided */
/* Loongson 1 Register Bases */
+#define LS1X_MUX_BASE 0x1fd00420
#define LS1X_INTC_BASE 0x1fd01040
#define LS1X_EHCI_BASE 0x1fe00000
#define LS1X_OHCI_BASE 0x1fe08000
@@ -31,7 +32,10 @@
#define LS1X_I2C0_BASE 0x1fe58000
#define LS1X_I2C1_BASE 0x1fe68000
#define LS1X_I2C2_BASE 0x1fe70000
-#define LS1X_PWM_BASE 0x1fe5c000
+#define LS1X_PWM0_BASE 0x1fe5c000
+#define LS1X_PWM1_BASE 0x1fe5c010
+#define LS1X_PWM2_BASE 0x1fe5c020
+#define LS1X_PWM3_BASE 0x1fe5c030
#define LS1X_WDT_BASE 0x1fe5c060
#define LS1X_RTC_BASE 0x1fe64000
#define LS1X_AC97_BASE 0x1fe74000
@@ -39,6 +43,8 @@
#define LS1X_CLK_BASE 0x1fe78030
#include <regs-clk.h>
+#include <regs-mux.h>
+#include <regs-pwm.h>
#include <regs-wdt.h>
#endif /* __ASM_MACH_LOONGSON1_LOONGSON1_H */
diff --git a/arch/mips/include/asm/mach-loongson1/platform.h b/arch/mips/include/asm/mach-loongson1/platform.h
index 30c13e508fff..47de55e0c835 100644
--- a/arch/mips/include/asm/mach-loongson1/platform.h
+++ b/arch/mips/include/asm/mach-loongson1/platform.h
@@ -13,10 +13,12 @@
#include <linux/platform_device.h>
-extern struct platform_device ls1x_uart_device;
-extern struct platform_device ls1x_eth0_device;
-extern struct platform_device ls1x_ehci_device;
-extern struct platform_device ls1x_rtc_device;
+extern struct platform_device ls1x_uart_pdev;
+extern struct platform_device ls1x_cpufreq_pdev;
+extern struct platform_device ls1x_eth0_pdev;
+extern struct platform_device ls1x_eth1_pdev;
+extern struct platform_device ls1x_ehci_pdev;
+extern struct platform_device ls1x_rtc_pdev;
extern void __init ls1x_clk_init(void);
extern void __init ls1x_serial_setup(struct platform_device *pdev);
diff --git a/arch/mips/include/asm/mach-loongson1/regs-clk.h b/arch/mips/include/asm/mach-loongson1/regs-clk.h
index fb6a3ff9318f..ee2445b10fc3 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-clk.h
@@ -20,15 +20,32 @@
/* Clock PLL Divisor Register Bits */
#define DIV_DC_EN (0x1 << 31)
+#define DIV_DC_RST (0x1 << 30)
#define DIV_CPU_EN (0x1 << 25)
+#define DIV_CPU_RST (0x1 << 24)
#define DIV_DDR_EN (0x1 << 19)
+#define DIV_DDR_RST (0x1 << 18)
+#define RST_DC_EN (0x1 << 5)
+#define RST_DC (0x1 << 4)
+#define RST_DDR_EN (0x1 << 3)
+#define RST_DDR (0x1 << 2)
+#define RST_CPU_EN (0x1 << 1)
+#define RST_CPU 0x1
#define DIV_DC_SHIFT 26
#define DIV_CPU_SHIFT 20
#define DIV_DDR_SHIFT 14
-#define DIV_DC_WIDTH 5
-#define DIV_CPU_WIDTH 5
-#define DIV_DDR_WIDTH 5
+#define DIV_DC_WIDTH 4
+#define DIV_CPU_WIDTH 4
+#define DIV_DDR_WIDTH 4
+
+#define BYPASS_DC_SHIFT 12
+#define BYPASS_DDR_SHIFT 10
+#define BYPASS_CPU_SHIFT 8
+
+#define BYPASS_DC_WIDTH 1
+#define BYPASS_DDR_WIDTH 1
+#define BYPASS_CPU_WIDTH 1
#endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-mux.h b/arch/mips/include/asm/mach-loongson1/regs-mux.h
new file mode 100644
index 000000000000..fb1e36efaa19
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/regs-mux.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 MUX Register Definitions.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON1_REGS_MUX_H
+#define __ASM_MACH_LOONGSON1_REGS_MUX_H
+
+#define LS1X_MUX_REG(x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_MUX_BASE + (x)))
+
+#define LS1X_MUX_CTRL0 LS1X_MUX_REG(0x0)
+#define LS1X_MUX_CTRL1 LS1X_MUX_REG(0x4)
+
+/* MUX CTRL0 Register Bits */
+#define UART0_USE_PWM23 (0x1 << 28)
+#define UART0_USE_PWM01 (0x1 << 27)
+#define UART1_USE_LCD0_5_6_11 (0x1 << 26)
+#define I2C2_USE_CAN1 (0x1 << 25)
+#define I2C1_USE_CAN0 (0x1 << 24)
+#define NAND3_USE_UART5 (0x1 << 23)
+#define NAND3_USE_UART4 (0x1 << 22)
+#define NAND3_USE_UART1_DAT (0x1 << 21)
+#define NAND3_USE_UART1_CTS (0x1 << 20)
+#define NAND3_USE_PWM23 (0x1 << 19)
+#define NAND3_USE_PWM01 (0x1 << 18)
+#define NAND2_USE_UART5 (0x1 << 17)
+#define NAND2_USE_UART4 (0x1 << 16)
+#define NAND2_USE_UART1_DAT (0x1 << 15)
+#define NAND2_USE_UART1_CTS (0x1 << 14)
+#define NAND2_USE_PWM23 (0x1 << 13)
+#define NAND2_USE_PWM01 (0x1 << 12)
+#define NAND1_USE_UART5 (0x1 << 11)
+#define NAND1_USE_UART4 (0x1 << 10)
+#define NAND1_USE_UART1_DAT (0x1 << 9)
+#define NAND1_USE_UART1_CTS (0x1 << 8)
+#define NAND1_USE_PWM23 (0x1 << 7)
+#define NAND1_USE_PWM01 (0x1 << 6)
+#define GMAC1_USE_UART1 (0x1 << 4)
+#define GMAC1_USE_UART0 (0x1 << 3)
+#define LCD_USE_UART0_DAT (0x1 << 2)
+#define LCD_USE_UART15 (0x1 << 1)
+#define LCD_USE_UART0 0x1
+
+/* MUX CTRL1 Register Bits */
+#define USB_RESET (0x1 << 31)
+#define SPI1_CS_USE_PWM01 (0x1 << 24)
+#define SPI1_USE_CAN (0x1 << 23)
+#define DISABLE_DDR_CONFSPACE (0x1 << 20)
+#define DDR32TO16EN (0x1 << 16)
+#define GMAC1_SHUT (0x1 << 13)
+#define GMAC0_SHUT (0x1 << 12)
+#define USB_SHUT (0x1 << 11)
+#define UART1_3_USE_CAN1 (0x1 << 5)
+#define UART1_2_USE_CAN0 (0x1 << 4)
+#define GMAC1_USE_TXCLK (0x1 << 3)
+#define GMAC0_USE_TXCLK (0x1 << 2)
+#define GMAC1_USE_PWM23 (0x1 << 1)
+#define GMAC0_USE_PWM01 0x1
+
+#endif /* __ASM_MACH_LOONGSON1_REGS_MUX_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-pwm.h b/arch/mips/include/asm/mach-loongson1/regs-pwm.h
new file mode 100644
index 000000000000..99f2bcc586f0
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/regs-pwm.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 PWM Register Definitions.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON1_REGS_PWM_H
+#define __ASM_MACH_LOONGSON1_REGS_PWM_H
+
+/* Loongson 1 PWM Timer Register Definitions */
+#define PWM_CNT 0x0
+#define PWM_HRC 0x4
+#define PWM_LRC 0x8
+#define PWM_CTRL 0xc
+
+/* PWM Control Register Bits */
+#define CNT_RST (0x1 << 7)
+#define INT_SR (0x1 << 6)
+#define INT_EN (0x1 << 5)
+#define PWM_SINGLE (0x1 << 4)
+#define PWM_OE (0x1 << 3)
+#define CNT_EN 0x1
+
+#endif /* __ASM_MACH_LOONGSON1_REGS_PWM_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-wdt.h b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
index 6574568c2084..c39ee982ad3b 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-wdt.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * Loongson 1 watchdog register definitions.
+ * Loongson 1 Watchdog Register Definitions.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -12,11 +12,8 @@
#ifndef __ASM_MACH_LOONGSON1_REGS_WDT_H
#define __ASM_MACH_LOONGSON1_REGS_WDT_H
-#define LS1X_WDT_REG(x) \
- ((void __iomem *)KSEG1ADDR(LS1X_WDT_BASE + (x)))
-
-#define LS1X_WDT_EN LS1X_WDT_REG(0x0)
-#define LS1X_WDT_SET LS1X_WDT_REG(0x4)
-#define LS1X_WDT_TIMER LS1X_WDT_REG(0x8)
+#define WDT_EN 0x0
+#define WDT_TIMER 0x4
+#define WDT_SET 0x8
#endif /* __ASM_MACH_LOONGSON1_REGS_WDT_H */
diff --git a/arch/mips/include/asm/mach-malta/irq.h b/arch/mips/include/asm/mach-malta/irq.h
index f2c13d211abb..47cfe64efbb0 100644
--- a/arch/mips/include/asm/mach-malta/irq.h
+++ b/arch/mips/include/asm/mach-malta/irq.h
@@ -2,7 +2,6 @@
#define __ASM_MACH_MIPS_IRQ_H
-#define GIC_NUM_INTRS (24 + NR_CPUS * 2)
#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index fc946c835995..2e54b4bff5cf 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -49,6 +49,7 @@
#include <linux/types.h>
+#include <asm/compiler.h>
#include <asm/war.h>
#ifndef R10000_LLSC_WAR
@@ -84,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
" "__beqz"%0, 1b \n"
" nop \n"
" .set pop \n"
- : "=&r" (temp), "=m" (*addr)
- : "ir" (~mask), "ir" (value), "m" (*addr));
+ : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
+ : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
}
/*
@@ -105,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
" "__beqz"%0, 1b \n"
" nop \n"
" .set pop \n"
- : "=&r" (temp), "=m" (*addr)
- : "ir" (mask), "m" (*addr));
+ : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
+ : "ir" (mask), GCC_OFF12_ASM() (*addr));
}
/*
@@ -126,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
" "__beqz"%0, 1b \n"
" nop \n"
" .set pop \n"
- : "=&r" (temp), "=m" (*addr)
- : "ir" (~mask), "m" (*addr));
+ : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
+ : "ir" (~mask), GCC_OFF12_ASM() (*addr));
}
/*
@@ -147,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
" "__beqz"%0, 1b \n"
" nop \n"
" .set pop \n"
- : "=&r" (temp), "=m" (*addr)
- : "ir" (mask), "m" (*addr));
+ : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
+ : "ir" (mask), GCC_OFF12_ASM() (*addr));
}
/*
@@ -219,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
" .set arch=r4000 \n" \
"1: ll %0, %1 #custom_read_reg32 \n" \
" .set pop \n" \
- : "=r" (tmp), "=m" (*address) \
- : "m" (*address))
+ : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \
+ : GCC_OFF12_ASM() (*address))
#define custom_write_reg32(address, tmp) \
__asm__ __volatile__( \
@@ -230,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
" "__beqz"%0, 1b \n" \
" nop \n" \
" .set pop \n" \
- : "=&r" (tmp), "=m" (*address) \
- : "0" (tmp), "m" (*address))
+ : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \
+ : "0" (tmp), GCC_OFF12_ASM() (*address))
#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
index 6f9b24f51157..1976fb815fd1 100644
--- a/arch/mips/include/asm/mach-ralink/mt7620.h
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -13,6 +13,13 @@
#ifndef _MT7620_REGS_H_
#define _MT7620_REGS_H_
+enum mt762x_soc_type {
+ MT762X_SOC_UNKNOWN = 0,
+ MT762X_SOC_MT7620A,
+ MT762X_SOC_MT7620N,
+ MT762X_SOC_MT7628AN,
+};
+
#define MT7620_SYSC_BASE 0x10000000
#define SYSC_REG_CHIP_NAME0 0x00
@@ -25,11 +32,9 @@
#define SYSC_REG_CPLL_CONFIG0 0x54
#define SYSC_REG_CPLL_CONFIG1 0x58
-#define MT7620N_CHIP_NAME0 0x33365452
-#define MT7620N_CHIP_NAME1 0x20203235
-
-#define MT7620A_CHIP_NAME0 0x3637544d
-#define MT7620A_CHIP_NAME1 0x20203032
+#define MT7620_CHIP_NAME0 0x3637544d
+#define MT7620_CHIP_NAME1 0x20203032
+#define MT7628_CHIP_NAME1 0x20203832
#define SYSCFG0_XTAL_FREQ_SEL BIT(6)
@@ -74,6 +79,9 @@
#define SYSCFG0_DRAM_TYPE_DDR1 1
#define SYSCFG0_DRAM_TYPE_DDR2 2
+#define SYSCFG0_DRAM_TYPE_DDR2_MT7628 0
+#define SYSCFG0_DRAM_TYPE_DDR1_MT7628 1
+
#define MT7620_DRAM_BASE 0x0
#define MT7620_SDRAM_SIZE_MIN 2
#define MT7620_SDRAM_SIZE_MAX 64
@@ -82,7 +90,6 @@
#define MT7620_DDR2_SIZE_MIN 32
#define MT7620_DDR2_SIZE_MAX 256
-#define MT7620_GPIO_MODE_I2C BIT(0)
#define MT7620_GPIO_MODE_UART0_SHIFT 2
#define MT7620_GPIO_MODE_UART0_MASK 0x7
#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
@@ -94,15 +101,40 @@
#define MT7620_GPIO_MODE_GPIO_UARTF 0x5
#define MT7620_GPIO_MODE_GPIO_I2S 0x6
#define MT7620_GPIO_MODE_GPIO 0x7
-#define MT7620_GPIO_MODE_UART1 BIT(5)
-#define MT7620_GPIO_MODE_MDIO BIT(8)
-#define MT7620_GPIO_MODE_RGMII1 BIT(9)
-#define MT7620_GPIO_MODE_RGMII2 BIT(10)
-#define MT7620_GPIO_MODE_SPI BIT(11)
-#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12)
-#define MT7620_GPIO_MODE_WLED BIT(13)
-#define MT7620_GPIO_MODE_JTAG BIT(15)
-#define MT7620_GPIO_MODE_EPHY BIT(15)
-#define MT7620_GPIO_MODE_WDT BIT(22)
+
+#define MT7620_GPIO_MODE_NAND 0
+#define MT7620_GPIO_MODE_SD 1
+#define MT7620_GPIO_MODE_ND_SD_GPIO 2
+#define MT7620_GPIO_MODE_ND_SD_MASK 0x3
+#define MT7620_GPIO_MODE_ND_SD_SHIFT 18
+
+#define MT7620_GPIO_MODE_PCIE_RST 0
+#define MT7620_GPIO_MODE_PCIE_REF 1
+#define MT7620_GPIO_MODE_PCIE_GPIO 2
+#define MT7620_GPIO_MODE_PCIE_MASK 0x3
+#define MT7620_GPIO_MODE_PCIE_SHIFT 16
+
+#define MT7620_GPIO_MODE_WDT_RST 0
+#define MT7620_GPIO_MODE_WDT_REF 1
+#define MT7620_GPIO_MODE_WDT_GPIO 2
+#define MT7620_GPIO_MODE_WDT_MASK 0x3
+#define MT7620_GPIO_MODE_WDT_SHIFT 21
+
+#define MT7620_GPIO_MODE_I2C 0
+#define MT7620_GPIO_MODE_UART1 5
+#define MT7620_GPIO_MODE_MDIO 8
+#define MT7620_GPIO_MODE_RGMII1 9
+#define MT7620_GPIO_MODE_RGMII2 10
+#define MT7620_GPIO_MODE_SPI 11
+#define MT7620_GPIO_MODE_SPI_REF_CLK 12
+#define MT7620_GPIO_MODE_WLED 13
+#define MT7620_GPIO_MODE_JTAG 15
+#define MT7620_GPIO_MODE_EPHY 15
+#define MT7620_GPIO_MODE_PA 20
+
+static inline int mt7620_get_eco(void)
+{
+ return rt_sysc_r32(SYSC_REG_CHIP_REV) & CHIP_REV_ECO_MASK;
+}
#endif
diff --git a/arch/mips/include/asm/mach-ralink/pinmux.h b/arch/mips/include/asm/mach-ralink/pinmux.h
new file mode 100644
index 000000000000..be106cb2e26d
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/pinmux.h
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT288X_PINMUX_H__
+#define _RT288X_PINMUX_H__
+
+#define FUNC(name, value, pin_first, pin_count) \
+ { name, value, pin_first, pin_count }
+
+#define GRP(_name, _func, _mask, _shift) \
+ { .name = _name, .mask = _mask, .shift = _shift, \
+ .func = _func, .gpio = _mask, \
+ .func_count = ARRAY_SIZE(_func) }
+
+#define GRP_G(_name, _func, _mask, _gpio, _shift) \
+ { .name = _name, .mask = _mask, .shift = _shift, \
+ .func = _func, .gpio = _gpio, \
+ .func_count = ARRAY_SIZE(_func) }
+
+struct rt2880_pmx_group;
+
+struct rt2880_pmx_func {
+ const char *name;
+ const char value;
+
+ int pin_first;
+ int pin_count;
+ int *pins;
+
+ int *groups;
+ int group_count;
+
+ int enabled;
+};
+
+struct rt2880_pmx_group {
+ const char *name;
+ int enabled;
+
+ const u32 shift;
+ const char mask;
+ const char gpio;
+
+ struct rt2880_pmx_func *func;
+ int func_count;
+};
+
+extern struct rt2880_pmx_group *rt2880_pinmux_data;
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
index 5a508f9f9432..bd93014490df 100644
--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h
+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
@@ -26,6 +26,13 @@ static inline u32 rt_sysc_r32(unsigned reg)
return __raw_readl(rt_sysc_membase + reg);
}
+static inline void rt_sysc_m32(u32 clr, u32 set, unsigned reg)
+{
+ u32 val = rt_sysc_r32(reg) & ~clr;
+
+ __raw_writel(val | set, rt_sysc_membase + reg);
+}
+
static inline void rt_memc_w32(u32 val, unsigned reg)
{
__raw_writel(val, rt_memc_membase + reg);
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index 069bf37a6010..96f731bac79a 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -125,24 +125,29 @@ static inline int soc_is_rt5350(void)
#define RT305X_GPIO_GE0_TXD0 40
#define RT305X_GPIO_GE0_RXCLK 51
-#define RT305X_GPIO_MODE_I2C BIT(0)
-#define RT305X_GPIO_MODE_SPI BIT(1)
#define RT305X_GPIO_MODE_UART0_SHIFT 2
#define RT305X_GPIO_MODE_UART0_MASK 0x7
#define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT)
-#define RT305X_GPIO_MODE_UARTF 0x0
-#define RT305X_GPIO_MODE_PCM_UARTF 0x1
-#define RT305X_GPIO_MODE_PCM_I2S 0x2
-#define RT305X_GPIO_MODE_I2S_UARTF 0x3
-#define RT305X_GPIO_MODE_PCM_GPIO 0x4
-#define RT305X_GPIO_MODE_GPIO_UARTF 0x5
-#define RT305X_GPIO_MODE_GPIO_I2S 0x6
-#define RT305X_GPIO_MODE_GPIO 0x7
-#define RT305X_GPIO_MODE_UART1 BIT(5)
-#define RT305X_GPIO_MODE_JTAG BIT(6)
-#define RT305X_GPIO_MODE_MDIO BIT(7)
-#define RT305X_GPIO_MODE_SDRAM BIT(8)
-#define RT305X_GPIO_MODE_RGMII BIT(9)
+#define RT305X_GPIO_MODE_UARTF 0
+#define RT305X_GPIO_MODE_PCM_UARTF 1
+#define RT305X_GPIO_MODE_PCM_I2S 2
+#define RT305X_GPIO_MODE_I2S_UARTF 3
+#define RT305X_GPIO_MODE_PCM_GPIO 4
+#define RT305X_GPIO_MODE_GPIO_UARTF 5
+#define RT305X_GPIO_MODE_GPIO_I2S 6
+#define RT305X_GPIO_MODE_GPIO 7
+
+#define RT305X_GPIO_MODE_I2C 0
+#define RT305X_GPIO_MODE_SPI 1
+#define RT305X_GPIO_MODE_UART1 5
+#define RT305X_GPIO_MODE_JTAG 6
+#define RT305X_GPIO_MODE_MDIO 7
+#define RT305X_GPIO_MODE_SDRAM 8
+#define RT305X_GPIO_MODE_RGMII 9
+#define RT5350_GPIO_MODE_PHY_LED 14
+#define RT5350_GPIO_MODE_SPI_CS1 21
+#define RT3352_GPIO_MODE_LNA 18
+#define RT3352_GPIO_MODE_PA 20
#define RT3352_SYSC_REG_SYSCFG0 0x010
#define RT3352_SYSC_REG_SYSCFG1 0x014
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
index 058382f37f92..0fbe6f9257cd 100644
--- a/arch/mips/include/asm/mach-ralink/rt3883.h
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -112,8 +112,6 @@
#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19)
#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18)
-#define RT3883_GPIO_MODE_I2C BIT(0)
-#define RT3883_GPIO_MODE_SPI BIT(1)
#define RT3883_GPIO_MODE_UART0_SHIFT 2
#define RT3883_GPIO_MODE_UART0_MASK 0x7
#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
@@ -125,11 +123,15 @@
#define RT3883_GPIO_MODE_GPIO_UARTF 0x5
#define RT3883_GPIO_MODE_GPIO_I2S 0x6
#define RT3883_GPIO_MODE_GPIO 0x7
-#define RT3883_GPIO_MODE_UART1 BIT(5)
-#define RT3883_GPIO_MODE_JTAG BIT(6)
-#define RT3883_GPIO_MODE_MDIO BIT(7)
-#define RT3883_GPIO_MODE_GE1 BIT(9)
-#define RT3883_GPIO_MODE_GE2 BIT(10)
+
+#define RT3883_GPIO_MODE_I2C 0
+#define RT3883_GPIO_MODE_SPI 1
+#define RT3883_GPIO_MODE_UART1 5
+#define RT3883_GPIO_MODE_JTAG 6
+#define RT3883_GPIO_MODE_MDIO 7
+#define RT3883_GPIO_MODE_GE1 9
+#define RT3883_GPIO_MODE_GE2 10
+
#define RT3883_GPIO_MODE_PCI_SHIFT 11
#define RT3883_GPIO_MODE_PCI_MASK 0x7
#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
diff --git a/arch/mips/include/asm/mach-sead3/irq.h b/arch/mips/include/asm/mach-sead3/irq.h
index d8106f75b9af..5d154cfbcf4c 100644
--- a/arch/mips/include/asm/mach-sead3/irq.h
+++ b/arch/mips/include/asm/mach-sead3/irq.h
@@ -1,7 +1,6 @@
#ifndef __ASM_MACH_MIPS_IRQ_H
#define __ASM_MACH_MIPS_IRQ_H
-#define GIC_NUM_INTRS (24 + NR_CPUS * 2)
#define NR_IRQS 256
diff --git a/arch/mips/include/asm/mach-tx39xx/ioremap.h b/arch/mips/include/asm/mach-tx39xx/ioremap.h
index 93c6c04ffda3..0874cd2b06d7 100644
--- a/arch/mips/include/asm/mach-tx39xx/ioremap.h
+++ b/arch/mips/include/asm/mach-tx39xx/ioremap.h
@@ -15,12 +15,12 @@
* Allow physical addresses to be fixed up to help peripherals located
* outside the low 32-bit range -- generic pass-through version.
*/
-static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
{
return phys_addr;
}
-static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size,
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
unsigned long flags)
{
#define TXX9_DIRECTMAP_BASE 0xff000000ul
diff --git a/arch/mips/include/asm/mach-tx49xx/ioremap.h b/arch/mips/include/asm/mach-tx49xx/ioremap.h
index 1e7beae72229..4b6a8441b25f 100644
--- a/arch/mips/include/asm/mach-tx49xx/ioremap.h
+++ b/arch/mips/include/asm/mach-tx49xx/ioremap.h
@@ -15,12 +15,12 @@
* Allow physical addresses to be fixed up to help peripherals located
* outside the low 32-bit range -- generic pass-through version.
*/
-static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
{
return phys_addr;
}
-static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size,
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
unsigned long flags)
{
#ifdef CONFIG_64BIT
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
index e330732ddf98..987ff580466b 100644
--- a/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -10,7 +10,7 @@
#ifndef _MIPS_MALTAINT_H
#define _MIPS_MALTAINT_H
-#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
+#include <linux/irqchip/mips-gic.h>
/*
* Interrupts 0..15 are used for Malta ISA compatible interrupts
@@ -22,29 +22,28 @@
#define MIPSCPU_INT_SW1 1
#define MIPSCPU_INT_MB0 2
#define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0
+#define MIPSCPU_INT_GIC MIPSCPU_INT_MB0 /* GIC chained interrupt */
#define MIPSCPU_INT_MB1 3
#define MIPSCPU_INT_SMI MIPSCPU_INT_MB1
-#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */
#define MIPSCPU_INT_MB2 4
-#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */
#define MIPSCPU_INT_MB3 5
#define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3
#define MIPSCPU_INT_MB4 6
#define MIPSCPU_INT_CORELO MIPSCPU_INT_MB4
/*
- * Interrupts 64..127 are used for Soc-it Classic interrupts
+ * Interrupts 96..127 are used for Soc-it Classic interrupts
*/
-#define MSC01C_INT_BASE 64
+#define MSC01C_INT_BASE 96
/* SOC-it Classic interrupt offsets */
#define MSC01C_INT_TMR 0
#define MSC01C_INT_PCI 1
/*
- * Interrupts 64..127 are used for Soc-it EIC interrupts
+ * Interrupts 96..127 are used for Soc-it EIC interrupts
*/
-#define MSC01E_INT_BASE 64
+#define MSC01E_INT_BASE 96
/* SOC-it EIC interrupt offsets */
#define MSC01E_INT_SW0 1
@@ -63,14 +62,7 @@
#define MSC01E_INT_PERFCTR 10
#define MSC01E_INT_CPUCTR 11
-/* External Interrupts used for IPI */
-#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16
-#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
-#define GIC_IPI_EXT_INTR_RESCHED_VPE1 18
-#define GIC_IPI_EXT_INTR_CALLFNC_VPE1 19
-#define GIC_IPI_EXT_INTR_RESCHED_VPE2 20
-#define GIC_IPI_EXT_INTR_CALLFNC_VPE2 21
-#define GIC_IPI_EXT_INTR_RESCHED_VPE3 22
-#define GIC_IPI_EXT_INTR_CALLFNC_VPE3 23
+/* GIC external interrupts */
+#define GIC_INT_I8259A GIC_SHARED_TO_HWIRQ(3)
#endif /* !(_MIPS_MALTAINT_H) */
diff --git a/arch/mips/include/asm/mips-boards/sead3int.h b/arch/mips/include/asm/mips-boards/sead3int.h
index 6b17aaf7d901..8932c7de0419 100644
--- a/arch/mips/include/asm/mips-boards/sead3int.h
+++ b/arch/mips/include/asm/mips-boards/sead3int.h
@@ -10,10 +10,23 @@
#ifndef _MIPS_SEAD3INT_H
#define _MIPS_SEAD3INT_H
+#include <linux/irqchip/mips-gic.h>
+
/* SEAD-3 GIC address space definitions. */
#define GIC_BASE_ADDR 0x1b1c0000
#define GIC_ADDRSPACE_SZ (128 * 1024)
-#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 0)
+/* CPU interrupt offsets */
+#define CPU_INT_GIC 2
+#define CPU_INT_EHCI 2
+#define CPU_INT_UART0 4
+#define CPU_INT_UART1 4
+#define CPU_INT_NET 6
+
+/* GIC interrupt offsets */
+#define GIC_INT_NET GIC_SHARED_TO_HWIRQ(0)
+#define GIC_INT_UART1 GIC_SHARED_TO_HWIRQ(2)
+#define GIC_INT_UART0 GIC_SHARED_TO_HWIRQ(3)
+#define GIC_INT_EHCI GIC_SHARED_TO_HWIRQ(5)
#endif /* !(_MIPS_SEAD3INT_H) */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 6a9d2dd005ca..b95a827d763e 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -30,7 +30,7 @@ extern void __iomem *mips_cm_l2sync_base;
* different way by defining a function with the same prototype except for the
* name mips_cm_phys_base (without underscores).
*/
-extern phys_t __mips_cm_phys_base(void);
+extern phys_addr_t __mips_cm_phys_base(void);
/**
* mips_cm_probe - probe for a Coherence Manager
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index e139a534e0fd..1cebe8c79051 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -25,7 +25,7 @@ extern void __iomem *mips_cpc_base;
* memory mapped registers. This is platform dependant & must therefore be
* implemented per-platform.
*/
-extern phys_t mips_cpc_default_phys_base(void);
+extern phys_addr_t mips_cpc_default_phys_base(void);
/**
* mips_cpc_phys_base - retrieve the physical base address of the CPC
@@ -35,7 +35,7 @@ extern phys_t mips_cpc_default_phys_base(void);
* is present. It may be overriden by individual platforms which determine
* this address in a different way.
*/
-extern phys_t __weak mips_cpc_phys_base(void);
+extern phys_addr_t __weak mips_cpc_phys_base(void);
/**
* mips_cpc_probe - probe for a Cluster Power Controller
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 22a135ac91de..5e4aef304b02 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -653,6 +653,9 @@
#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
+#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
+#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
+#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
@@ -694,6 +697,7 @@
#define MIPS_FPIR_W (_ULCAST_(1) << 20)
#define MIPS_FPIR_L (_ULCAST_(1) << 21)
#define MIPS_FPIR_F64 (_ULCAST_(1) << 22)
+#define MIPS_FPIR_FREP (_ULCAST_(1) << 29)
/*
* Bits in the MIPS32 Memory Segmentation registers.
@@ -994,6 +998,39 @@ do { \
local_irq_restore(__flags); \
} while (0)
+#define __readx_32bit_c0_register(source) \
+({ \
+ unsigned int __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips32r2 \n" \
+ " .insn \n" \
+ " # mfhc0 $1, %1 \n" \
+ " .word (0x40410000 | ((%1 & 0x1f) << 11)) \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__res) \
+ : "i" (source)); \
+ __res; \
+})
+
+#define __writex_32bit_c0_register(register, value) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips32r2 \n" \
+ " move $1, %0 \n" \
+ " # mthc0 $1, %1 \n" \
+ " .insn \n" \
+ " .word (0x40c10000 | ((%1 & 0x1f) << 11)) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (value), "i" (register)); \
+} while (0)
+
#define read_c0_index() __read_32bit_c0_register($0, 0)
#define write_c0_index(val) __write_32bit_c0_register($0, 0, val)
@@ -1003,9 +1040,15 @@ do { \
#define read_c0_entrylo0() __read_ulong_c0_register($2, 0)
#define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val)
+#define readx_c0_entrylo0() __readx_32bit_c0_register(2)
+#define writex_c0_entrylo0(val) __writex_32bit_c0_register(2, val)
+
#define read_c0_entrylo1() __read_ulong_c0_register($3, 0)
#define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val)
+#define readx_c0_entrylo1() __readx_32bit_c0_register(3)
+#define writex_c0_entrylo1(val) __writex_32bit_c0_register(3, val)
+
#define read_c0_conf() __read_32bit_c0_register($3, 0)
#define write_c0_conf(val) __write_32bit_c0_register($3, 0, val)
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 024a71b2bff9..75739c83f07e 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -76,6 +76,8 @@
#include <linux/prefetch.h>
+#include <asm/compiler.h>
+
#include <asm/octeon/cvmx-fpa.h>
/**
* By default we disable the max depth support. Most programs
@@ -273,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
" lbu %[ticket], %[now_serving]\n"
"4:\n"
".set pop\n" :
- [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+ [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
[now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
[my_ticket] "=r"(my_ticket)
);
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 4b4d0ecfd9eb..2188e65afb86 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -1066,7 +1066,7 @@ static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
uint64_t switch_complete;
CVMX_MF_CHORD(switch_complete);
if (!switch_complete)
- pr_warning("%s called with tag switch in progress\n", function);
+ pr_warn("%s called with tag switch in progress\n", function);
}
/**
@@ -1084,8 +1084,7 @@ static inline void cvmx_pow_tag_sw_wait(void)
if (unlikely(switch_complete))
break;
if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
- pr_warning("Tag switch is taking a long time, "
- "possible deadlock\n");
+ pr_warn("Tag switch is taking a long time, possible deadlock\n");
start_cycle = -MAX_CYCLES - 1;
}
}
@@ -1296,19 +1295,16 @@ static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
__cvmx_pow_warn_if_pending_switch(__func__);
current_tag = cvmx_pow_get_current_tag();
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag\n", __func__);
+ pr_warn("%s called with NULL tag\n", __func__);
if ((current_tag.s.type == tag_type)
&& (current_tag.s.tag == tag))
- pr_warning("%s called to perform a tag switch to the "
- "same tag\n",
- __func__);
+ pr_warn("%s called to perform a tag switch to the same tag\n",
+ __func__);
if (tag_type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called to perform a tag switch to "
- "NULL. Use cvmx_pow_tag_sw_null() instead\n",
- __func__);
+ pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
+ __func__);
}
/*
@@ -1407,23 +1403,19 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
__cvmx_pow_warn_if_pending_switch(__func__);
current_tag = cvmx_pow_get_current_tag();
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
if ((current_tag.s.type == tag_type)
&& (current_tag.s.tag == tag))
- pr_warning("%s called to perform a tag switch to "
- "the same tag\n",
- __func__);
+ pr_warn("%s called to perform a tag switch to the same tag\n",
+ __func__);
if (tag_type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called to perform a tag switch to "
- "NULL. Use cvmx_pow_tag_sw_null() instead\n",
- __func__);
+ pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
+ __func__);
if (wqp != cvmx_phys_to_ptr(0x80))
if (wqp != cvmx_pow_get_current_wqp())
- pr_warning("%s passed WQE(%p) doesn't match "
- "the address in the POW(%p)\n",
- __func__, wqp,
- cvmx_pow_get_current_wqp());
+ pr_warn("%s passed WQE(%p) doesn't match the address in the POW(%p)\n",
+ __func__, wqp,
+ cvmx_pow_get_current_wqp());
}
/*
@@ -1507,12 +1499,10 @@ static inline void cvmx_pow_tag_sw_null_nocheck(void)
__cvmx_pow_warn_if_pending_switch(__func__);
current_tag = cvmx_pow_get_current_tag();
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called when we already have a "
- "NULL tag\n",
- __func__);
+ pr_warn("%s called when we already have a NULL tag\n",
+ __func__);
}
tag_req.u64 = 0;
@@ -1725,17 +1715,14 @@ static inline void cvmx_pow_tag_sw_desched_nocheck(
__cvmx_pow_warn_if_pending_switch(__func__);
current_tag = cvmx_pow_get_current_tag();
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag. Deschedule not "
- "allowed from NULL state\n",
- __func__);
+ pr_warn("%s called with NULL tag. Deschedule not allowed from NULL state\n",
+ __func__);
if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
&& (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
- pr_warning("%s called where neither the before or "
- "after tag is ATOMIC\n",
- __func__);
+ pr_warn("%s called where neither the before or after tag is ATOMIC\n",
+ __func__);
}
tag_req.u64 = 0;
@@ -1832,12 +1819,10 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
__cvmx_pow_warn_if_pending_switch(__func__);
current_tag = cvmx_pow_get_current_tag();
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag. Deschedule not "
- "expected from NULL state\n",
- __func__);
+ pr_warn("%s called with NULL tag. Deschedule not expected from NULL state\n",
+ __func__);
}
/* Need to make sure any writes to the work queue entry are complete */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index f991e7701d3d..33db1c806b01 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -451,67 +451,4 @@ static inline uint32_t cvmx_octeon_num_cores(void)
return cvmx_pop(ciu_fuse);
}
-/**
- * Read a byte of fuse data
- * @byte_addr: address to read
- *
- * Returns fuse value: 0 or 1
- */
-static uint8_t cvmx_fuse_read_byte(int byte_addr)
-{
- union cvmx_mio_fus_rcmd read_cmd;
-
- read_cmd.u64 = 0;
- read_cmd.s.addr = byte_addr;
- read_cmd.s.pend = 1;
- cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
- while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
- && read_cmd.s.pend)
- ;
- return read_cmd.s.dat;
-}
-
-/**
- * Read a single fuse bit
- *
- * @fuse: Fuse number (0-1024)
- *
- * Returns fuse value: 0 or 1
- */
-static inline int cvmx_fuse_read(int fuse)
-{
- return (cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1;
-}
-
-static inline int cvmx_octeon_model_CN36XX(void)
-{
- return OCTEON_IS_MODEL(OCTEON_CN38XX)
- && !cvmx_octeon_is_pass1()
- && cvmx_fuse_read(264);
-}
-
-static inline int cvmx_octeon_zip_present(void)
-{
- return octeon_has_feature(OCTEON_FEATURE_ZIP);
-}
-
-static inline int cvmx_octeon_dfa_present(void)
-{
- if (!OCTEON_IS_MODEL(OCTEON_CN38XX)
- && !OCTEON_IS_MODEL(OCTEON_CN31XX)
- && !OCTEON_IS_MODEL(OCTEON_CN58XX))
- return 0;
- else if (OCTEON_IS_MODEL(OCTEON_CN3020))
- return 0;
- else if (cvmx_octeon_is_pass1())
- return 1;
- else
- return !cvmx_fuse_read(120);
-}
-
-static inline int cvmx_octeon_crypto_present(void)
-{
- return octeon_has_feature(OCTEON_FEATURE_CRYPTO);
-}
-
#endif /* __CVMX_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 90e05a8d4b15..c4fe81f47f53 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -86,8 +86,6 @@ enum octeon_feature {
OCTEON_MAX_FEATURE
};
-static inline int cvmx_fuse_read(int fuse);
-
/**
* Determine if the current Octeon supports a specific feature. These
* checks have been optimized to be fairly quick, but they should still
@@ -105,33 +103,6 @@ static inline int octeon_has_feature(enum octeon_feature feature)
case OCTEON_FEATURE_SAAD:
return !OCTEON_IS_MODEL(OCTEON_CN3XXX);
- case OCTEON_FEATURE_ZIP:
- if (OCTEON_IS_MODEL(OCTEON_CN30XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX))
- return 0;
- else if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
- return 1;
- else
- return !cvmx_fuse_read(121);
-
- case OCTEON_FEATURE_CRYPTO:
- if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
- union cvmx_mio_fus_dat2 fus_2;
- fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
- if (fus_2.s.nocrypto || fus_2.s.nomul) {
- return 0;
- } else if (!fus_2.s.dorm_crypto) {
- return 1;
- } else {
- union cvmx_rnm_ctl_status st;
- st.u64 = cvmx_read_csr(CVMX_RNM_CTL_STATUS);
- return st.s.eer_val;
- }
- } else {
- return !cvmx_fuse_read(90);
- }
-
case OCTEON_FEATURE_DORM_CRYPTO:
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
union cvmx_mio_fus_dat2 fus_2;
@@ -188,29 +159,6 @@ static inline int octeon_has_feature(enum octeon_feature feature)
&& !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
&& !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X);
- case OCTEON_FEATURE_DFA:
- if (!OCTEON_IS_MODEL(OCTEON_CN38XX)
- && !OCTEON_IS_MODEL(OCTEON_CN31XX)
- && !OCTEON_IS_MODEL(OCTEON_CN58XX))
- return 0;
- else if (OCTEON_IS_MODEL(OCTEON_CN3020))
- return 0;
- else
- return !cvmx_fuse_read(120);
-
- case OCTEON_FEATURE_HFA:
- if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
- return 0;
- else
- return !cvmx_fuse_read(90);
-
- case OCTEON_FEATURE_DFM:
- if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)
- || OCTEON_IS_MODEL(OCTEON_CN66XX)))
- return 0;
- else
- return !cvmx_fuse_read(90);
-
case OCTEON_FEATURE_MDIO_CLAUSE_45:
return !(OCTEON_IS_MODEL(OCTEON_CN3XXX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index e2c122c6a657..e8a1c2fd52cd 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -326,8 +326,7 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
#define OCTEON_IS_COMMON_BINARY() 1
#undef OCTEON_MODEL
-const char *octeon_model_get_string(uint32_t chip_id);
-const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer);
+const char *__init octeon_model_get_string(uint32_t chip_id);
/*
* Return the octeon family, i.e., ProcessorID of the PrID register.
diff --git a/arch/mips/include/asm/paccess.h b/arch/mips/include/asm/paccess.h
index 2474fc5d1751..af81ab0da55f 100644
--- a/arch/mips/include/asm/paccess.h
+++ b/arch/mips/include/asm/paccess.h
@@ -56,6 +56,7 @@ struct __large_pstruct { unsigned long buf[100]; };
"1:\t" insn "\t%1,%2\n\t" \
"move\t%0,$0\n" \
"2:\n\t" \
+ ".insn\n\t" \
".section\t.fixup,\"ax\"\n" \
"3:\tli\t%0,%3\n\t" \
"move\t%1,$0\n\t" \
@@ -94,6 +95,7 @@ extern void __get_dbe_unknown(void);
"1:\t" insn "\t%1,%2\n\t" \
"move\t%0,$0\n" \
"2:\n\t" \
+ ".insn\n\t" \
".section\t.fixup,\"ax\"\n" \
"3:\tli\t%0,%3\n\t" \
"j\t2b\n\t" \
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 3be81803595d..154b70a10483 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -116,7 +116,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
/*
* These are used to make use of C type-checking..
*/
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
#ifdef CONFIG_CPU_MIPS32
typedef struct { unsigned long pte_low, pte_high; } pte_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 974b0e308963..69529624a005 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -84,7 +84,7 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc, resource_size_t *start,
resource_size_t *end)
{
- phys_t size = resource_size(rsrc);
+ phys_addr_t size = resource_size(rsrc);
*start = fixup_bigphys_addr(rsrc->start, size);
*end = rsrc->start + size;
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index cd7d6064bcbe..68984b612f9d 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -69,7 +69,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
#else
@@ -103,7 +103,7 @@ static inline void pmd_clear(pmd_t *pmdp)
pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
}
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
static inline pte_t
@@ -126,7 +126,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
#endif
-#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
+#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
#define __pgd_offset(address) pgd_index(address)
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
@@ -155,73 +155,75 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
/* Swap entries must have VALID bit cleared. */
-#define __swp_type(x) (((x).val >> 10) & 0x1f)
-#define __swp_offset(x) ((x).val >> 15)
-#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
+#define __swp_type(x) (((x).val >> 10) & 0x1f)
+#define __swp_offset(x) ((x).val >> 15)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/*
- * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
+ * Encode and decode a nonlinear file mapping entry
*/
-#define PTE_FILE_MAX_BITS 28
-
-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
- (((_pte).pte >> 2 ) & 0x38) | \
- (((_pte).pte >> 10) << 6 ))
+#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
+ (((_pte).pte >> 2 ) & 0x38) | \
+ (((_pte).pte >> 10) << 6 ))
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
- (((off) & 0x38) << 2 ) | \
- (((off) >> 6 ) << 10) | \
- _PAGE_FILE })
+#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
+ (((off) & 0x38) << 2 ) | \
+ (((off) >> 6 ) << 10) | \
+ _PAGE_FILE })
+/*
+ * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
+ */
+#define PTE_FILE_MAX_BITS 28
#else
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
/* Swap entries must have VALID and GLOBAL bits cleared. */
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
-#define __swp_type(x) (((x).val >> 2) & 0x1f)
-#define __swp_offset(x) ((x).val >> 7)
-#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
-#else
-#define __swp_type(x) (((x).val >> 8) & 0x1f)
-#define __swp_offset(x) ((x).val >> 13)
-#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
-#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
+#define __swp_type(x) (((x).val >> 2) & 0x1f)
+#define __swp_offset(x) ((x).val >> 7)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
+#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
/*
* Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
*/
-#define PTE_FILE_MAX_BITS 30
-
-#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
+#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
+#define PTE_FILE_MAX_BITS 30
#else
/*
- * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
+ * Constraints:
+ * _PAGE_PRESENT at bit 0
+ * _PAGE_MODIFIED at bit 4
+ * _PAGE_GLOBAL at bit 6
+ * _PAGE_VALID at bit 7
*/
-#define PTE_FILE_MAX_BITS 28
+#define __swp_type(x) (((x).val >> 8) & 0x1f)
+#define __swp_offset(x) ((x).val >> 13)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
- (((_pte).pte >> 2) & 0x8) | \
- (((_pte).pte >> 8) << 4))
+/*
+ * Encode and decode a nonlinear file mapping entry
+ */
+#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
+ (((_pte).pte >> 2) & 0x8) | \
+ (((_pte).pte >> 8) << 4))
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
- (((off) & 0x8) << 2) | \
- (((off) >> 4) << 8) | \
- _PAGE_FILE })
-#endif
+#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
+ (((off) & 0x8) << 2) | \
+ (((off) >> 4) << 8) | \
+ _PAGE_FILE })
-#endif
+#define PTE_FILE_MAX_BITS 28
+#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
-#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
-#else
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#endif
+#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */
#endif /* _ASM_PGTABLE_32_H */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index e747bfa0be7e..ca11f14f40a3 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -32,39 +32,41 @@
* unpredictable things. The code (when it is written) to deal with
* this problem will be in the update_mmu_cache() code for the r4k.
*/
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
/*
* The following bits are directly used by the TLB hardware
*/
-#define _PAGE_R4KBUG (1 << 0) /* workaround for r4k bug */
-#define _PAGE_GLOBAL (1 << 0)
-#define _PAGE_VALID_SHIFT 1
+#define _PAGE_GLOBAL_SHIFT 0
+#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ (1 << 1) /* synonym */
-#define _PAGE_DIRTY_SHIFT 2
-#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */
-#define _PAGE_SILENT_WRITE (1 << 2)
-#define _CACHE_SHIFT 3
-#define _CACHE_MASK (7 << 3)
+#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
+#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
+#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
+#define _CACHE_MASK (7 << _CACHE_SHIFT)
/*
* The following bits are implemented in software
*
* _PAGE_FILE semantics: set:pagecache unset:swap
*/
-#define _PAGE_PRESENT_SHIFT 6
+#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT 7
+#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT 8
+#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT 9
+#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT 10
+#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE (1 << 10)
+#define _PAGE_SILENT_READ _PAGE_VALID
+#define _PAGE_SILENT_WRITE _PAGE_DIRTY
+#define _PAGE_FILE _PAGE_MODIFIED
+
+#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
@@ -172,7 +174,7 @@
#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
-#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */
+#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
#ifndef _PFN_SHIFT
#define _PFN_SHIFT PAGE_SHIFT
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index d6d1928539b1..62a6ba383d4f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -125,7 +125,7 @@ do { \
extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pteval);
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
@@ -227,7 +227,7 @@ extern pgd_t swapper_pg_dir[];
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
@@ -297,13 +297,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
- pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
+ pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
- pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
+ pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
return pte;
}
@@ -382,13 +382,13 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte.pte_low &= _PAGE_CHG_MASK;
- pte.pte_high &= ~0x3f;
+ pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot);
- pte.pte_high |= pgprot_val(newprot) & 0x3f;
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#else
@@ -419,7 +419,7 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
@@ -428,7 +428,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long size,
pgprot_t prot)
{
- phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
+ phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
}
#define io_remap_pfn_range io_remap_pfn_range
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index a9494c0141fb..eaa26270a5e5 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -22,6 +22,7 @@ extern void device_tree_init(void);
struct boot_param_header;
extern void __dt_setup_arch(void *bph);
+extern int __dt_register_buses(const char *bus0, const char *bus1);
#define dt_setup_arch(sym) \
({ \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index cd6e0afc6833..e293a8d89a6d 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -47,79 +47,20 @@ extern void (*r4k_blast_icache)(void);
#ifdef CONFIG_MIPS_MT
-/*
- * Optionally force single-threaded execution during I-cache flushes.
- */
-#define PROTECT_CACHE_FLUSHES 1
-
-#ifdef PROTECT_CACHE_FLUSHES
-
-extern int mt_protiflush;
-extern int mt_protdflush;
-extern void mt_cflush_lockdown(void);
-extern void mt_cflush_release(void);
-
-#define BEGIN_MT_IPROT \
- unsigned long flags = 0; \
- unsigned long mtflags = 0; \
- if(mt_protiflush) { \
- local_irq_save(flags); \
- ehb(); \
- mtflags = dvpe(); \
- mt_cflush_lockdown(); \
- }
-
-#define END_MT_IPROT \
- if(mt_protiflush) { \
- mt_cflush_release(); \
- evpe(mtflags); \
- local_irq_restore(flags); \
- }
-
-#define BEGIN_MT_DPROT \
- unsigned long flags = 0; \
- unsigned long mtflags = 0; \
- if(mt_protdflush) { \
- local_irq_save(flags); \
- ehb(); \
- mtflags = dvpe(); \
- mt_cflush_lockdown(); \
- }
-
-#define END_MT_DPROT \
- if(mt_protdflush) { \
- mt_cflush_release(); \
- evpe(mtflags); \
- local_irq_restore(flags); \
- }
-
-#else
-
-#define BEGIN_MT_IPROT
-#define BEGIN_MT_DPROT
-#define END_MT_IPROT
-#define END_MT_DPROT
-
-#endif /* PROTECT_CACHE_FLUSHES */
-
#define __iflush_prologue \
unsigned long redundance; \
extern int mt_n_iflushes; \
- BEGIN_MT_IPROT \
for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
#define __iflush_epilogue \
- END_MT_IPROT \
}
#define __dflush_prologue \
unsigned long redundance; \
extern int mt_n_dflushes; \
- BEGIN_MT_DPROT \
for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
#define __dflush_epilogue \
- END_MT_DPROT \
}
#define __inv_dflush_prologue __dflush_prologue
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 78d201fb6c87..c6d06d383ef9 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <asm/barrier.h>
+#include <asm/compiler.h>
#include <asm/war.h>
/*
@@ -88,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" subu %[ticket], %[ticket], 1 \n"
" .previous \n"
" .set pop \n"
- : [ticket_ptr] "+m" (lock->lock),
+ : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
[serving_now_ptr] "+m" (lock->h.serving_now),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
@@ -121,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" subu %[ticket], %[ticket], 1 \n"
" .previous \n"
" .set pop \n"
- : [ticket_ptr] "+m" (lock->lock),
+ : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
[serving_now_ptr] "+m" (lock->h.serving_now),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
@@ -163,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" li %[ticket], 0 \n"
" .previous \n"
" .set pop \n"
- : [ticket_ptr] "+m" (lock->lock),
+ : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (tmp2),
[now_serving] "=&r" (tmp3)
@@ -187,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" li %[ticket], 0 \n"
" .previous \n"
" .set pop \n"
- : [ticket_ptr] "+m" (lock->lock),
+ : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (tmp2),
[now_serving] "=&r" (tmp3)
@@ -234,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" beqzl %1, 1b \n"
" nop \n"
" .set reorder \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} else {
do {
@@ -244,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" bltz %1, 1b \n"
" addu %1, 1 \n"
"2: sc %1, %0 \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
@@ -268,8 +269,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} else {
do {
@@ -277,8 +278,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
"1: ll %1, %2 # arch_read_unlock \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
@@ -298,8 +299,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" beqzl %1, 1b \n"
" nop \n"
" .set reorder \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} else {
do {
@@ -308,8 +309,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
"2: sc %1, %0 \n"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
@@ -348,8 +349,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
__WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
- : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} else {
__asm__ __volatile__(
@@ -365,8 +366,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
__WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
- : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
}
@@ -392,8 +393,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
" li %2, 1 \n"
" .set reorder \n"
"2: \n"
- : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} else {
do {
@@ -405,8 +406,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
" sc %1, %0 \n"
" li %2, 1 \n"
"2: \n"
- : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
- : "m" (rw->lock)
+ : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
+ "=&r" (ret)
+ : GCC_OFF12_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 7de865805deb..99eea59604e9 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -116,6 +116,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */
+#define TIF_HYBRID_FPREGS 28 /* 64b FP registers, odd singles in bits 63:32 of even doubles */
#define TIF_USEDMSA 29 /* MSA has been used this quantum */
#define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
@@ -135,6 +136,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS)
+#define _TIF_HYBRID_FPREGS (1<<TIF_HYBRID_FPREGS)
#define _TIF_USEDMSA (1<<TIF_USEDMSA)
#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 8f3047d611ee..8ab2874225c4 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -46,19 +46,17 @@ extern unsigned int mips_hpt_frequency;
* so it lives here.
*/
extern int (*perf_irq)(void);
+extern int __weak get_c0_perfcount_int(void);
/*
* Initialize the calling CPU's compare interrupt as clockevent device
*/
extern unsigned int __weak get_c0_compare_int(void);
extern int r4k_clockevent_init(void);
-extern int gic_clockevent_init(void);
static inline int mips_clockevent_init(void)
{
-#if defined(CONFIG_CEVT_GIC)
- return (gic_clockevent_init() | r4k_clockevent_init());
-#elif defined(CONFIG_CEVT_R4K)
+#ifdef CONFIG_CEVT_R4K
return r4k_clockevent_init();
#else
return -ENXIO;
diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h
index a845aafedee4..148d42a17f30 100644
--- a/arch/mips/include/asm/types.h
+++ b/arch/mips/include/asm/types.h
@@ -11,23 +11,7 @@
#ifndef _ASM_TYPES_H
#define _ASM_TYPES_H
-# include <asm-generic/int-ll64.h>
+#include <asm-generic/int-ll64.h>
#include <uapi/asm/types.h>
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#ifndef __ASSEMBLY__
-
-/*
- * Don't use phys_t. You've been warned.
- */
-#ifdef CONFIG_64BIT_PHYS_ADDR
-typedef unsigned long long phys_t;
-#else
-typedef unsigned long phys_t;
-#endif
-
-#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_TYPES_H */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 22a5624e2fd2..bf8b32450ef6 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -1325,33 +1325,6 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
return res;
}
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-static inline long __strlen_user(const char __user *s)
-{
- long res;
-
- if (segment_eq(get_fs(), get_ds())) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- __MODULE_JAL(__strlen_kernel_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s)
- : "$2", "$4", __UA_t0, "$31");
- } else {
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- __MODULE_JAL(__strlen_user_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s)
- : "$2", "$4", __UA_t0, "$31");
- }
-
- return res;
-}
-
/*
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 708c5d414905..fc1cdd25fcda 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -136,9 +136,11 @@ Ip_u1s2(_lui);
Ip_u2s3u1(_lw);
Ip_u3u1u2(_lwx);
Ip_u1u2u3(_mfc0);
+Ip_u1u2u3(_mfhc0);
Ip_u1(_mfhi);
Ip_u1(_mflo);
Ip_u1u2u3(_mtc0);
+Ip_u1u2u3(_mthc0);
Ip_u3u1u2(_mul);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4bfdb9d4c186..89c22433b1c6 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -108,9 +108,10 @@ enum rt_op {
*/
enum cop_op {
mfc_op = 0x00, dmfc_op = 0x01,
- cfc_op = 0x02, mfhc_op = 0x03,
- mtc_op = 0x04, dmtc_op = 0x05,
- ctc_op = 0x06, mthc_op = 0x07,
+ cfc_op = 0x02, mfhc0_op = 0x02,
+ mfhc_op = 0x03, mtc_op = 0x04,
+ dmtc_op = 0x05, ctc_op = 0x06,
+ mthc0_op = 0x06, mthc_op = 0x07,
bc_op = 0x08, cop_op = 0x10,
copm_op = 0x18
};
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index a14baa218c76..dec3c850f36b 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -98,4 +98,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index 76eafcb79c89..ef796f97b996 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -32,7 +32,7 @@ static void __init jz4740_detect_mem(void)
{
void __iomem *jz_emc_base;
u32 ctrl, bus, bank, rows, cols;
- phys_t size;
+ phys_addr_t size;
jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100);
ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL);
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 008a2fed0584..92987d1bbe5f 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -4,9 +4,10 @@
extra-y := head.o vmlinux.lds
-obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
- prom.o ptrace.o reset.o setup.o signal.o syscall.o \
- time.o topology.o traps.o unaligned.o watch.o vdso.o
+obj-y += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
+ process.o prom.o ptrace.o reset.o setup.o signal.o \
+ syscall.o time.o topology.o traps.o unaligned.o watch.o \
+ vdso.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
@@ -18,12 +19,10 @@ endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
-obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
-obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
@@ -68,7 +67,6 @@ obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
-obj-$(CONFIG_IRQ_GIC) += irq-gic.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_32BIT) += scall32-o32.o
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
deleted file mode 100644
index 6093716980b9..000000000000
--- a/arch/mips/kernel/cevt-gic.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2013 Imagination Technologies Ltd.
- */
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/percpu.h>
-#include <linux/smp.h>
-#include <linux/irq.h>
-
-#include <asm/time.h>
-#include <asm/gic.h>
-#include <asm/mips-boards/maltaint.h>
-
-DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
-int gic_timer_irq_installed;
-
-
-static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
-{
- u64 cnt;
- int res;
-
- cnt = gic_read_count();
- cnt += (u64)delta;
- gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
- res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
- return res;
-}
-
-void gic_set_clock_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- /* Nothing to do ... */
-}
-
-irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *cd;
- int cpu = smp_processor_id();
-
- gic_write_compare(gic_read_compare());
- cd = &per_cpu(gic_clockevent_device, cpu);
- cd->event_handler(cd);
- return IRQ_HANDLED;
-}
-
-struct irqaction gic_compare_irqaction = {
- .handler = gic_compare_interrupt,
- .flags = IRQF_PERCPU | IRQF_TIMER,
- .name = "timer",
-};
-
-
-void gic_event_handler(struct clock_event_device *dev)
-{
-}
-
-int gic_clockevent_init(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- unsigned int irq;
-
- if (!cpu_has_counter || !gic_frequency)
- return -ENXIO;
-
- irq = MIPS_GIC_IRQ_BASE;
-
- cd = &per_cpu(gic_clockevent_device, cpu);
-
- cd->name = "MIPS GIC";
- cd->features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_C3STOP;
-
- clockevent_set_clock(cd, gic_frequency);
-
- /* Calculate the min / max delta */
- cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
-
- cd->rating = 300;
- cd->irq = irq;
- cd->cpumask = cpumask_of(cpu);
- cd->set_next_event = gic_next_event;
- cd->set_mode = gic_set_clock_mode;
- cd->event_handler = gic_event_handler;
-
- clockevents_register_device(cd);
-
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
-
- if (gic_timer_irq_installed)
- return 0;
-
- gic_timer_irq_installed = 1;
-
- setup_irq(irq, &gic_compare_irqaction);
- irq_set_handler(irq, handle_percpu_irq);
- return 0;
-}
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index bc127e22fdab..6acaad0480af 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -11,10 +11,10 @@
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
+#include <linux/irqchip/mips-gic.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
-#include <asm/gic.h>
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
@@ -85,8 +85,8 @@ void mips_event_handler(struct clock_event_device *dev)
*/
static int c0_compare_int_pending(void)
{
-#ifdef CONFIG_IRQ_GIC
- if (cpu_has_veic)
+#ifdef CONFIG_MIPS_GIC
+ if (gic_present)
return gic_get_timer_pending();
#endif
return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index dc49cf30c2db..5342674842f5 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -69,6 +69,63 @@ static int __init htw_disable(char *s)
__setup("nohtw", htw_disable);
+static int mips_ftlb_disabled;
+static int mips_has_ftlb_configured;
+
+static void set_ftlb_enable(struct cpuinfo_mips *c, int enable);
+
+static int __init ftlb_disable(char *s)
+{
+ unsigned int config4, mmuextdef;
+
+ /*
+ * If the core hasn't done any FTLB configuration, there is nothing
+ * for us to do here.
+ */
+ if (!mips_has_ftlb_configured)
+ return 1;
+
+ /* Disable it in the boot cpu */
+ set_ftlb_enable(&cpu_data[0], 0);
+
+ back_to_back_c0_hazard();
+
+ config4 = read_c0_config4();
+
+ /* Check that FTLB has been disabled */
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ /* MMUSIZEEXT == VTLB ON, FTLB OFF */
+ if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) {
+ /* This should never happen */
+ pr_warn("FTLB could not be disabled!\n");
+ return 1;
+ }
+
+ mips_ftlb_disabled = 1;
+ mips_has_ftlb_configured = 0;
+
+ /*
+ * noftlb is mainly used for debug purposes so print
+ * an informative message instead of using pr_debug()
+ */
+ pr_info("FTLB has been disabled\n");
+
+ /*
+ * Some of these bits are duplicated in the decode_config4.
+ * MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case
+ * once FTLB has been disabled so undo what decode_config4 did.
+ */
+ cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways *
+ cpu_data[0].tlbsizeftlbsets;
+ cpu_data[0].tlbsizeftlbsets = 0;
+ cpu_data[0].tlbsizeftlbways = 0;
+
+ return 1;
+}
+
+__setup("noftlb", ftlb_disable);
+
+
static inline void check_errata(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -140,7 +197,7 @@ static inline unsigned long cpu_get_fpu_id(void)
*/
static inline int __cpu_has_fpu(void)
{
- return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
+ return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE;
}
static inline unsigned long cpu_get_msa_id(void)
@@ -399,6 +456,8 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
/* fall through */
case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+ if (mips_ftlb_disabled)
+ break;
newcf4 = (config4 & ~ftlb_page) |
(page_size_ftlb(mmuextdef) <<
MIPS_CONF4_FTLBPAGESIZE_SHIFT);
@@ -418,6 +477,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
+ mips_has_ftlb_configured = 1;
break;
}
}
@@ -432,7 +492,7 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
unsigned int config5;
config5 = read_c0_config5();
- config5 &= ~MIPS_CONF5_UFR;
+ config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
write_c0_config5(config5);
if (config5 & MIPS_CONF5_EVA)
@@ -453,8 +513,8 @@ static void decode_configs(struct cpuinfo_mips *c)
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
- /* Enable FTLB if present */
- set_ftlb_enable(c, 1);
+ /* Enable FTLB if present and not disabled */
+ set_ftlb_enable(c, !mips_ftlb_disabled);
ok = decode_config0(c); /* Read Config registers. */
BUG_ON(!ok); /* Arch spec violation! */
@@ -1058,6 +1118,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
break;
}
case PRID_IMP_BMIPS5000:
+ case PRID_IMP_BMIPS5200:
c->cputype = CPU_BMIPS5000;
__cpu_name[cpu] = "Broadcom BMIPS5000";
set_elf_platform(cpu, "bmips5000");
@@ -1288,6 +1349,8 @@ void cpu_probe(void)
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
+ if (c->fpu_id & MIPS_FPIR_FREP)
+ c->options |= MIPS_CPU_FRE;
}
}
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index f291cf99b03a..6fe7790e5868 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -38,7 +38,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
kunmap_atomic(vaddr);
} else {
if (!kdump_buf_page) {
- pr_warning("Kdump: Kdump buffer page not allocated\n");
+ pr_warn("Kdump: Kdump buffer page not allocated\n");
return -EFAULT;
}
@@ -57,7 +57,7 @@ static int __init kdump_buf_page_init(void)
kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!kdump_buf_page) {
- pr_warning("Kdump: Failed to allocate kdump buffer page\n");
+ pr_warn("Kdump: Failed to allocate kdump buffer page\n");
ret = -ENOMEM;
}
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
deleted file mode 100644
index e02620901117..000000000000
--- a/arch/mips/kernel/csrc-gic.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/time.h>
-
-#include <asm/gic.h>
-
-static cycle_t gic_hpt_read(struct clocksource *cs)
-{
- return gic_read_count();
-}
-
-static struct clocksource gic_clocksource = {
- .name = "GIC",
- .read = gic_hpt_read,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-void __init gic_clocksource_init(unsigned int frequency)
-{
- unsigned int config, bits;
-
- /* Calculate the clocksource mask. */
- GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config);
- bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
- (GIC_SH_CONFIG_COUNTBITS_SHF - 2));
-
- /* Set clocksource mask. */
- gic_clocksource.mask = CLOCKSOURCE_MASK(bits);
-
- /* Calculate a somewhat reasonable rating value. */
- gic_clocksource.rating = 200 + frequency / 10000000;
-
- clocksource_register_hz(&gic_clocksource, frequency);
-}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
new file mode 100644
index 000000000000..c92b15df6893
--- /dev/null
+++ b/arch/mips/kernel/elf.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/elf.h>
+#include <linux/sched.h>
+
+enum {
+ FP_ERROR = -1,
+ FP_DOUBLE_64A = -2,
+};
+
+int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state)
+{
+ struct elfhdr *ehdr = _ehdr;
+ struct elf_phdr *phdr = _phdr;
+ struct mips_elf_abiflags_v0 abiflags;
+ int ret;
+
+ if (config_enabled(CONFIG_64BIT) &&
+ (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
+ return 0;
+ if (phdr->p_type != PT_MIPS_ABIFLAGS)
+ return 0;
+ if (phdr->p_filesz < sizeof(abiflags))
+ return -EINVAL;
+
+ ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
+ sizeof(abiflags));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(abiflags))
+ return -EIO;
+
+ /* Record the required FP ABIs for use by mips_check_elf */
+ if (is_interp)
+ state->interp_fp_abi = abiflags.fp_abi;
+ else
+ state->fp_abi = abiflags.fp_abi;
+
+ return 0;
+}
+
+static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
+{
+ /* If the ABI requirement is provided, simply return that */
+ if (in_abi != -1)
+ return in_abi;
+
+ /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */
+ if (ehdr->e_flags & EF_MIPS_FP64)
+ return MIPS_ABI_FP_64;
+
+ /* Default to MIPS_ABI_FP_DOUBLE */
+ return MIPS_ABI_FP_DOUBLE;
+}
+
+int arch_check_elf(void *_ehdr, bool has_interpreter,
+ struct arch_elf_state *state)
+{
+ struct elfhdr *ehdr = _ehdr;
+ unsigned fp_abi, interp_fp_abi, abi0, abi1;
+
+ /* Ignore non-O32 binaries */
+ if (config_enabled(CONFIG_64BIT) &&
+ (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
+ return 0;
+
+ fp_abi = get_fp_abi(ehdr, state->fp_abi);
+
+ if (has_interpreter) {
+ interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi);
+
+ abi0 = min(fp_abi, interp_fp_abi);
+ abi1 = max(fp_abi, interp_fp_abi);
+ } else {
+ abi0 = abi1 = fp_abi;
+ }
+
+ state->overall_abi = FP_ERROR;
+
+ if (abi0 == abi1) {
+ state->overall_abi = abi0;
+ } else if (abi0 == MIPS_ABI_FP_ANY) {
+ state->overall_abi = abi1;
+ } else if (abi0 == MIPS_ABI_FP_DOUBLE) {
+ switch (abi1) {
+ case MIPS_ABI_FP_XX:
+ state->overall_abi = MIPS_ABI_FP_DOUBLE;
+ break;
+
+ case MIPS_ABI_FP_64A:
+ state->overall_abi = FP_DOUBLE_64A;
+ break;
+ }
+ } else if (abi0 == MIPS_ABI_FP_SINGLE ||
+ abi0 == MIPS_ABI_FP_SOFT) {
+ /* Cannot link with other ABIs */
+ } else if (abi0 == MIPS_ABI_FP_OLD_64) {
+ switch (abi1) {
+ case MIPS_ABI_FP_XX:
+ case MIPS_ABI_FP_64:
+ case MIPS_ABI_FP_64A:
+ state->overall_abi = MIPS_ABI_FP_64;
+ break;
+ }
+ } else if (abi0 == MIPS_ABI_FP_XX ||
+ abi0 == MIPS_ABI_FP_64 ||
+ abi0 == MIPS_ABI_FP_64A) {
+ state->overall_abi = MIPS_ABI_FP_64;
+ }
+
+ switch (state->overall_abi) {
+ case MIPS_ABI_FP_64:
+ case MIPS_ABI_FP_64A:
+ case FP_DOUBLE_64A:
+ if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return -ELIBBAD;
+ break;
+
+ case FP_ERROR:
+ return -ELIBBAD;
+ }
+
+ return 0;
+}
+
+void mips_set_personality_fp(struct arch_elf_state *state)
+{
+ if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) {
+ /*
+ * Use hybrid FPRs for all code which can correctly execute
+ * with that mode.
+ */
+ switch (state->overall_abi) {
+ case MIPS_ABI_FP_DOUBLE:
+ case MIPS_ABI_FP_SINGLE:
+ case MIPS_ABI_FP_SOFT:
+ case MIPS_ABI_FP_XX:
+ case MIPS_ABI_FP_ANY:
+ /* FR=1, FRE=1 */
+ clear_thread_flag(TIF_32BIT_FPREGS);
+ set_thread_flag(TIF_HYBRID_FPREGS);
+ return;
+ }
+ }
+
+ switch (state->overall_abi) {
+ case MIPS_ABI_FP_DOUBLE:
+ case MIPS_ABI_FP_SINGLE:
+ case MIPS_ABI_FP_SOFT:
+ /* FR=0 */
+ set_thread_flag(TIF_32BIT_FPREGS);
+ clear_thread_flag(TIF_HYBRID_FPREGS);
+ break;
+
+ case FP_DOUBLE_64A:
+ /* FR=1, FRE=1 */
+ clear_thread_flag(TIF_32BIT_FPREGS);
+ set_thread_flag(TIF_HYBRID_FPREGS);
+ break;
+
+ case MIPS_ABI_FP_64:
+ case MIPS_ABI_FP_64A:
+ /* FR=1, FRE=0 */
+ clear_thread_flag(TIF_32BIT_FPREGS);
+ clear_thread_flag(TIF_HYBRID_FPREGS);
+ break;
+
+ case MIPS_ABI_FP_XX:
+ case MIPS_ABI_FP_ANY:
+ if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ set_thread_flag(TIF_32BIT_FPREGS);
+ else
+ clear_thread_flag(TIF_32BIT_FPREGS);
+
+ clear_thread_flag(TIF_HYBRID_FPREGS);
+ break;
+
+ default:
+ case FP_ERROR:
+ BUG();
+ }
+}
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 50b364897dda..a74ec3ae557c 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
@@ -308,6 +309,19 @@ static struct resource pic2_io_resource = {
.flags = IORESOURCE_BUSY
};
+static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
+ irq_set_probe(virq);
+ return 0;
+}
+
+static struct irq_domain_ops i8259A_ops = {
+ .map = i8259A_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
/*
* On systems with i8259-style interrupt controllers we assume for
* driver compatibility reasons interrupts 0 - 15 to be the i8259
@@ -315,17 +329,17 @@ static struct resource pic2_io_resource = {
*/
void __init init_i8259_irqs(void)
{
- int i;
+ struct irq_domain *domain;
insert_resource(&ioport_resource, &pic1_io_resource);
insert_resource(&ioport_resource, &pic2_io_resource);
init_8259A(0);
- for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
- irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq);
- irq_set_probe(i);
- }
+ domain = irq_domain_add_legacy(NULL, 16, I8259A_IRQ_BASE, 0,
+ &i8259A_ops, NULL);
+ if (!domain)
+ panic("Failed to add i8259 IRQ domain");
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
}
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
deleted file mode 100644
index 9e9d8b9a5b97..000000000000
--- a/arch/mips/kernel/irq-gic.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/bitmap.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/irq.h>
-#include <linux/clocksource.h>
-
-#include <asm/io.h>
-#include <asm/gic.h>
-#include <asm/setup.h>
-#include <asm/traps.h>
-#include <linux/hardirq.h>
-#include <asm-generic/bitops/find.h>
-
-unsigned int gic_frequency;
-unsigned int gic_present;
-unsigned long _gic_base;
-unsigned int gic_irq_base;
-unsigned int gic_irq_flags[GIC_NUM_INTRS];
-
-/* The index into this array is the vector # of the interrupt. */
-struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS];
-
-struct gic_pcpu_mask {
- DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS);
-};
-
-struct gic_pending_regs {
- DECLARE_BITMAP(pending, GIC_NUM_INTRS);
-};
-
-struct gic_intrmask_regs {
- DECLARE_BITMAP(intrmask, GIC_NUM_INTRS);
-};
-
-static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
-static struct gic_pending_regs pending_regs[NR_CPUS];
-static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
-
-#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
-cycle_t gic_read_count(void)
-{
- unsigned int hi, hi2, lo;
-
- do {
- GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
- GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
- GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
- } while (hi2 != hi);
-
- return (((cycle_t) hi) << 32) + lo;
-}
-
-void gic_write_compare(cycle_t cnt)
-{
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
- (int)(cnt >> 32));
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
- (int)(cnt & 0xffffffff));
-}
-
-void gic_write_cpu_compare(cycle_t cnt, int cpu)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
- GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
- (int)(cnt >> 32));
- GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
- (int)(cnt & 0xffffffff));
-
- local_irq_restore(flags);
-}
-
-cycle_t gic_read_compare(void)
-{
- unsigned int hi, lo;
-
- GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
- GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
-
- return (((cycle_t) hi) << 32) + lo;
-}
-#endif
-
-unsigned int gic_get_timer_pending(void)
-{
- unsigned int vpe_pending;
-
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
- GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending);
- return (vpe_pending & GIC_VPE_PEND_TIMER_MSK);
-}
-
-void gic_bind_eic_interrupt(int irq, int set)
-{
- /* Convert irq vector # to hw int # */
- irq -= GIC_PIN_TO_VEC_OFFSET;
-
- /* Set irq to use shadow set */
- GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set);
-}
-
-void gic_send_ipi(unsigned int intr)
-{
- GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
-}
-
-static void gic_eic_irq_dispatch(void)
-{
- unsigned int cause = read_c0_cause();
- int irq;
-
- irq = (cause & ST0_IM) >> STATUSB_IP2;
- if (irq == 0)
- irq = -1;
-
- if (irq >= 0)
- do_IRQ(gic_irq_base + irq);
- else
- spurious_interrupt();
-}
-
-static void __init vpe_local_setup(unsigned int numvpes)
-{
- unsigned long timer_intr = GIC_INT_TMR;
- unsigned long perf_intr = GIC_INT_PERFCTR;
- unsigned int vpe_ctl;
- int i;
-
- if (cpu_has_veic) {
- /*
- * GIC timer interrupt -> CPU HW Int X (vector X+2) ->
- * map to pin X+2-1 (since GIC adds 1)
- */
- timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
- /*
- * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) ->
- * map to pin X+2-1 (since GIC adds 1)
- */
- perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
- }
-
- /*
- * Setup the default performance counter timer interrupts
- * for all VPEs
- */
- for (i = 0; i < numvpes; i++) {
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
-
- /* Are Interrupts locally routable? */
- GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
- if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
- GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
- GIC_MAP_TO_PIN_MSK | timer_intr);
- if (cpu_has_veic) {
- set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
- gic_eic_irq_dispatch);
- gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK;
- }
-
- if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
- GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
- GIC_MAP_TO_PIN_MSK | perf_intr);
- if (cpu_has_veic) {
- set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch);
- gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK;
- }
- }
-}
-
-unsigned int gic_compare_int(void)
-{
- unsigned int pending;
-
- GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
- if (pending & GIC_VPE_PEND_CMP_MSK)
- return 1;
- else
- return 0;
-}
-
-void gic_get_int_mask(unsigned long *dst, const unsigned long *src)
-{
- unsigned int i;
- unsigned long *pending, *intrmask, *pcpu_mask;
- unsigned long *pending_abs, *intrmask_abs;
-
- /* Get per-cpu bitmaps */
- pending = pending_regs[smp_processor_id()].pending;
- intrmask = intrmask_regs[smp_processor_id()].intrmask;
- pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
-
- pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
- GIC_SH_PEND_31_0_OFS);
- intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
- GIC_SH_MASK_31_0_OFS);
-
- for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
- GICREAD(*pending_abs, pending[i]);
- GICREAD(*intrmask_abs, intrmask[i]);
- pending_abs++;
- intrmask_abs++;
- }
-
- bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
- bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
- bitmap_and(dst, src, pending, GIC_NUM_INTRS);
-}
-
-unsigned int gic_get_int(void)
-{
- DECLARE_BITMAP(interrupts, GIC_NUM_INTRS);
-
- bitmap_fill(interrupts, GIC_NUM_INTRS);
- gic_get_int_mask(interrupts, interrupts);
-
- return find_first_bit(interrupts, GIC_NUM_INTRS);
-}
-
-static void gic_mask_irq(struct irq_data *d)
-{
- GIC_CLR_INTR_MASK(d->irq - gic_irq_base);
-}
-
-static void gic_unmask_irq(struct irq_data *d)
-{
- GIC_SET_INTR_MASK(d->irq - gic_irq_base);
-}
-
-#ifdef CONFIG_SMP
-static DEFINE_SPINLOCK(gic_lock);
-
-static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
- bool force)
-{
- unsigned int irq = (d->irq - gic_irq_base);
- cpumask_t tmp = CPU_MASK_NONE;
- unsigned long flags;
- int i;
-
- cpumask_and(&tmp, cpumask, cpu_online_mask);
- if (cpus_empty(tmp))
- return -1;
-
- /* Assumption : cpumask refers to a single CPU */
- spin_lock_irqsave(&gic_lock, flags);
-
- /* Re-route this IRQ */
- GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
-
- /* Update the pcpu_masks */
- for (i = 0; i < NR_CPUS; i++)
- clear_bit(irq, pcpu_masks[i].pcpu_mask);
- set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
-
- cpumask_copy(d->affinity, cpumask);
- spin_unlock_irqrestore(&gic_lock, flags);
-
- return IRQ_SET_MASK_OK_NOCOPY;
-}
-#endif
-
-static struct irq_chip gic_irq_controller = {
- .name = "MIPS GIC",
- .irq_ack = gic_irq_ack,
- .irq_mask = gic_mask_irq,
- .irq_mask_ack = gic_mask_irq,
- .irq_unmask = gic_unmask_irq,
- .irq_eoi = gic_finish_irq,
-#ifdef CONFIG_SMP
- .irq_set_affinity = gic_set_affinity,
-#endif
-};
-
-static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
- unsigned int pin, unsigned int polarity, unsigned int trigtype,
- unsigned int flags)
-{
- struct gic_shared_intr_map *map_ptr;
-
- /* Setup Intr to Pin mapping */
- if (pin & GIC_MAP_TO_NMI_MSK) {
- int i;
-
- GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
- /* FIXME: hack to route NMI to all cpu's */
- for (i = 0; i < NR_CPUS; i += 32) {
- GICWRITE(GIC_REG_ADDR(SHARED,
- GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
- 0xffffffff);
- }
- } else {
- GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
- GIC_MAP_TO_PIN_MSK | pin);
- /* Setup Intr to CPU mapping */
- GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
- if (cpu_has_veic) {
- set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET,
- gic_eic_irq_dispatch);
- map_ptr = &gic_shared_intr_map[pin + GIC_PIN_TO_VEC_OFFSET];
- if (map_ptr->num_shared_intr >= GIC_MAX_SHARED_INTR)
- BUG();
- map_ptr->intr_list[map_ptr->num_shared_intr++] = intr;
- }
- }
-
- /* Setup Intr Polarity */
- GIC_SET_POLARITY(intr, polarity);
-
- /* Setup Intr Trigger Type */
- GIC_SET_TRIGGER(intr, trigtype);
-
- /* Init Intr Masks */
- GIC_CLR_INTR_MASK(intr);
-
- /* Initialise per-cpu Interrupt software masks */
- set_bit(intr, pcpu_masks[cpu].pcpu_mask);
-
- if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0))
- GIC_SET_INTR_MASK(intr);
- if (trigtype == GIC_TRIG_EDGE)
- gic_irq_flags[intr] |= GIC_TRIG_EDGE;
-}
-
-static void __init gic_basic_init(int numintrs, int numvpes,
- struct gic_intr_map *intrmap, int mapsize)
-{
- unsigned int i, cpu;
- unsigned int pin_offset = 0;
-
- board_bind_eic_interrupt = &gic_bind_eic_interrupt;
-
- /* Setup defaults */
- for (i = 0; i < numintrs; i++) {
- GIC_SET_POLARITY(i, GIC_POL_POS);
- GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
- GIC_CLR_INTR_MASK(i);
- if (i < GIC_NUM_INTRS) {
- gic_irq_flags[i] = 0;
- gic_shared_intr_map[i].num_shared_intr = 0;
- gic_shared_intr_map[i].local_intr_mask = 0;
- }
- }
-
- /*
- * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract
- * one because the GIC will add one (since 0=no intr).
- */
- if (cpu_has_veic)
- pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
-
- /* Setup specifics */
- for (i = 0; i < mapsize; i++) {
- cpu = intrmap[i].cpunum;
- if (cpu == GIC_UNUSED)
- continue;
- gic_setup_intr(i,
- intrmap[i].cpunum,
- intrmap[i].pin + pin_offset,
- intrmap[i].polarity,
- intrmap[i].trigtype,
- intrmap[i].flags);
- }
-
- vpe_local_setup(numvpes);
-}
-
-void __init gic_init(unsigned long gic_base_addr,
- unsigned long gic_addrspace_size,
- struct gic_intr_map *intr_map, unsigned int intr_map_size,
- unsigned int irqbase)
-{
- unsigned int gicconfig;
- int numvpes, numintrs;
-
- _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
- gic_addrspace_size);
- gic_irq_base = irqbase;
-
- GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
- numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
- GIC_SH_CONFIG_NUMINTRS_SHF;
- numintrs = ((numintrs + 1) * 8);
-
- numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
- GIC_SH_CONFIG_NUMVPES_SHF;
- numvpes = numvpes + 1;
-
- gic_basic_init(numintrs, numvpes, intr_map, intr_map_size);
-
- gic_platform_init(numintrs, &gic_irq_controller);
-}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index e498f2b3646a..590c2c980fd3 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -36,6 +36,7 @@
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
+#include <asm/setup.h>
static inline void unmask_mips_irq(struct irq_data *d)
{
@@ -94,28 +95,24 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
.irq_eoi = unmask_mips_irq,
};
-void __init mips_cpu_irq_init(void)
+asmlinkage void __weak plat_irq_dispatch(void)
{
- int irq_base = MIPS_CPU_IRQ_BASE;
- int i;
+ unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
+ int irq;
- /* Mask interrupts. */
- clear_c0_status(ST0_IM);
- clear_c0_cause(CAUSEF_IP);
-
- /* Software interrupts are used for MT/CMT IPI */
- for (i = irq_base; i < irq_base + 2; i++)
- irq_set_chip_and_handler(i, cpu_has_mipsmt ?
- &mips_mt_cpu_irq_controller :
- &mips_cpu_irq_controller,
- handle_percpu_irq);
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
- for (i = irq_base + 2; i < irq_base + 8; i++)
- irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
- handle_percpu_irq);
+ pending >>= CAUSEB_IP;
+ while (pending) {
+ irq = fls(pending) - 1;
+ do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+ pending &= ~BIT(irq);
+ }
}
-#ifdef CONFIG_IRQ_DOMAIN
static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
@@ -128,6 +125,9 @@ static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
chip = &mips_cpu_irq_controller;
}
+ if (cpu_has_vint)
+ set_vi_handler(hw, plat_irq_dispatch);
+
irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
return 0;
@@ -138,8 +138,7 @@ static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-int __init mips_cpu_intc_init(struct device_node *of_node,
- struct device_node *parent)
+static void __init __mips_cpu_irq_init(struct device_node *of_node)
{
struct irq_domain *domain;
@@ -151,7 +150,16 @@ int __init mips_cpu_intc_init(struct device_node *of_node,
&mips_cpu_intc_irq_domain_ops, NULL);
if (!domain)
panic("Failed to add irqdomain for MIPS CPU");
+}
+void __init mips_cpu_irq_init(void)
+{
+ __mips_cpu_irq_init(NULL);
+}
+
+int __init mips_cpu_irq_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ __mips_cpu_irq_init(of_node);
return 0;
}
-#endif /* CONFIG_IRQ_DOMAIN */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index f76f7a08412d..85bbe9b96759 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -16,7 +16,7 @@
void __iomem *mips_cm_base;
void __iomem *mips_cm_l2sync_base;
-phys_t __mips_cm_phys_base(void)
+phys_addr_t __mips_cm_phys_base(void)
{
u32 config3 = read_c0_config3();
u32 cmgcr;
@@ -30,10 +30,10 @@ phys_t __mips_cm_phys_base(void)
return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
}
-phys_t mips_cm_phys_base(void)
+phys_addr_t mips_cm_phys_base(void)
__attribute__((weak, alias("__mips_cm_phys_base")));
-phys_t __mips_cm_l2sync_phys_base(void)
+phys_addr_t __mips_cm_l2sync_phys_base(void)
{
u32 base_reg;
@@ -49,13 +49,13 @@ phys_t __mips_cm_l2sync_phys_base(void)
return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
}
-phys_t mips_cm_l2sync_phys_base(void)
+phys_addr_t mips_cm_l2sync_phys_base(void)
__attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
static void mips_cm_probe_l2sync(void)
{
unsigned major_rev;
- phys_t addr;
+ phys_addr_t addr;
/* L2-only sync was introduced with CM major revision 6 */
major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
@@ -78,7 +78,7 @@ static void mips_cm_probe_l2sync(void)
int mips_cm_probe(void)
{
- phys_t addr;
+ phys_addr_t addr;
u32 base_reg;
addr = mips_cm_phys_base();
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index ba473608a347..11964501c4b0 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
-phys_t __weak mips_cpc_phys_base(void)
+phys_addr_t __weak mips_cpc_phys_base(void)
{
u32 cpc_base;
@@ -44,7 +44,7 @@ phys_t __weak mips_cpc_phys_base(void)
int mips_cpc_probe(void)
{
- phys_t addr;
+ phys_addr_t addr;
unsigned cpu;
for_each_possible_cpu(cpu)
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 2607c3a4ff7e..17eaf0cf760c 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -24,9 +24,7 @@ extern long __strncpy_from_user_nocheck_asm(char *__to,
const char *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char *__from,
long __len);
-extern long __strlen_kernel_nocheck_asm(const char *s);
extern long __strlen_kernel_asm(const char *s);
-extern long __strlen_user_nocheck_asm(const char *s);
extern long __strlen_user_asm(const char *s);
extern long __strnlen_kernel_nocheck_asm(const char *s);
extern long __strnlen_kernel_asm(const char *s);
@@ -62,9 +60,7 @@ EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_kernel_asm);
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_user_asm);
-EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
EXPORT_SYMBOL(__strlen_kernel_asm);
-EXPORT_SYMBOL(__strlen_user_nocheck_asm);
EXPORT_SYMBOL(__strlen_user_asm);
EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
EXPORT_SYMBOL(__strnlen_kernel_asm);
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index a8f9cdc6f8b0..9466184d0039 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -561,8 +561,8 @@ static int mipspmu_get_irq(void)
IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD,
"mips_perf_pmu", NULL);
if (err) {
- pr_warning("Unable to request IRQ%d for MIPS "
- "performance counters!\n", mipspmu.irq);
+ pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
+ mipspmu.irq);
}
} else if (cp0_perfcount_irq < 0) {
/*
@@ -572,8 +572,7 @@ static int mipspmu_get_irq(void)
perf_irq = mipsxx_pmu_handle_shared_irq;
err = 0;
} else {
- pr_warning("The platform hasn't properly defined its "
- "interrupt controller.\n");
+ pr_warn("The platform hasn't properly defined its interrupt controller\n");
err = -ENOENT;
}
@@ -1614,22 +1613,13 @@ init_hw_perf_events(void)
counters = counters_total_to_per_cpu(counters);
#endif
-#ifdef MSC01E_INT_BASE
- if (cpu_has_veic) {
- /*
- * Using platform specific interrupt controller defines.
- */
- irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
- } else {
-#endif
- if ((cp0_perfcount_irq >= 0) &&
- (cp0_compare_irq != cp0_perfcount_irq))
- irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
- else
- irq = -1;
-#ifdef MSC01E_INT_BASE
- }
-#endif
+ if (get_c0_perfcount_int)
+ irq = get_c0_perfcount_int();
+ else if ((cp0_perfcount_irq >= 0) &&
+ (cp0_compare_irq != cp0_perfcount_irq))
+ irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ irq = -1;
mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 636b0745d7c7..eb76434828e8 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -42,6 +42,7 @@
#include <asm/isadep.h>
#include <asm/inst.h>
#include <asm/stacktrace.h>
+#include <asm/irq_regs.h>
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
@@ -187,21 +188,21 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
*/
if (mm_insn_16bit(ip->halfword[0])) {
mmi.word = (ip->halfword[0] << 16);
- return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
- mmi.mm16_r5_format.rt == 31) ||
- (mmi.mm16_m_format.opcode == mm_pool16c_op &&
- mmi.mm16_m_format.func == mm_swm16_op));
+ return (mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+ mmi.mm16_r5_format.rt == 31) ||
+ (mmi.mm16_m_format.opcode == mm_pool16c_op &&
+ mmi.mm16_m_format.func == mm_swm16_op);
}
else {
mmi.halfword[0] = ip->halfword[1];
mmi.halfword[1] = ip->halfword[0];
- return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
- mmi.mm_m_format.rd > 9 &&
- mmi.mm_m_format.base == 29 &&
- mmi.mm_m_format.func == mm_swm32_func) ||
- (mmi.i_format.opcode == mm_sw32_op &&
- mmi.i_format.rs == 29 &&
- mmi.i_format.rt == 31));
+ return (mmi.mm_m_format.opcode == mm_pool32b_op &&
+ mmi.mm_m_format.rd > 9 &&
+ mmi.mm_m_format.base == 29 &&
+ mmi.mm_m_format.func == mm_swm32_func) ||
+ (mmi.i_format.opcode == mm_sw32_op &&
+ mmi.i_format.rs == 29 &&
+ mmi.i_format.rt == 31);
}
#else
/* sw / sd $ra, offset($sp) */
@@ -233,7 +234,7 @@ static inline int is_jump_ins(union mips_instruction *ip)
if (ip->r_format.opcode != mm_pool32a_op ||
ip->r_format.func != mm_pool32axf_op)
return 0;
- return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
+ return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
#else
if (ip->j_format.opcode == j_op)
return 1;
@@ -260,13 +261,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
union mips_instruction mmi;
mmi.word = (ip->halfword[0] << 16);
- return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
- mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
- (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
- mmi.mm16_r5_format.rt == 29));
+ return (mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+ mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+ (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+ mmi.mm16_r5_format.rt == 29);
}
- return (ip->mm_i_format.opcode == mm_addiu32_op &&
- ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+ return ip->mm_i_format.opcode == mm_addiu32_op &&
+ ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
#else
/* addiu/daddiu sp,sp,-imm */
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
@@ -532,3 +533,20 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ALMASK;
}
+
+static void arch_dump_stack(void *info)
+{
+ struct pt_regs *regs;
+
+ regs = get_irq_regs();
+
+ if (regs)
+ show_regs(regs);
+
+ dump_stack();
+}
+
+void arch_trigger_all_cpu_backtrace(bool include_self)
+{
+ smp_call_function(arch_dump_stack, NULL, 1);
+}
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 5d39bb85bf35..452d4350ce42 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
#include <asm/page.h>
#include <asm/prom.h>
@@ -54,4 +55,21 @@ void __init __dt_setup_arch(void *bph)
mips_set_machine_name(of_flat_dt_get_machine_name());
}
+
+int __init __dt_register_buses(const char *bus0, const char *bus1)
+{
+ static struct of_device_id of_ids[3];
+
+ if (!of_have_populated_dt())
+ panic("device tree not present");
+
+ strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible));
+ strlcpy(of_ids[1].compatible, bus1, sizeof(of_ids[1].compatible));
+
+ if (of_platform_populate(NULL, of_ids, NULL, NULL))
+ panic("failed to populate DT");
+
+ return 0;
+}
+
#endif
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index f3b635f86c39..058929041368 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -82,14 +82,14 @@ static struct resource data_resource = { .name = "Kernel data", };
static void *detect_magic __initdata = detect_memory_region;
-void __init add_memory_region(phys_t start, phys_t size, long type)
+void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
{
int x = boot_mem_map.nr_map;
int i;
/* Sanity check */
if (start + size < start) {
- pr_warning("Trying to add an invalid memory region, skipped\n");
+ pr_warn("Trying to add an invalid memory region, skipped\n");
return;
}
@@ -127,10 +127,10 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
boot_mem_map.nr_map++;
}
-void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
+void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
{
void *dm = &detect_magic;
- phys_t size;
+ phys_addr_t size;
for (size = sz_min; size < sz_max; size <<= 1) {
if (!memcmp(dm, dm + size, sizeof(detect_magic)))
@@ -493,7 +493,7 @@ static int usermem __initdata;
static int __init early_parse_mem(char *p)
{
- phys_t start, size;
+ phys_addr_t start, size;
/*
* If a user specifies memory size, we
@@ -545,9 +545,9 @@ static int __init early_parse_elfcorehdr(char *p)
early_param("elfcorehdr", early_parse_elfcorehdr);
#endif
-static void __init arch_mem_addpart(phys_t mem, phys_t end, int type)
+static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
{
- phys_t size;
+ phys_addr_t size;
int i;
size = end - mem;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 16f1e4f2bf3c..545bf11bd2ed 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -530,7 +530,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
struct mips_abi *abi = current->thread.abi;
#ifdef CONFIG_CPU_MICROMIPS
void *vdso;
- unsigned int tmp = (unsigned int)current->mm->context.vdso;
+ unsigned long tmp = (unsigned long)current->mm->context.vdso;
set_isa16_mode(tmp);
vdso = (void *)tmp;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 06bb5ed6d80a..b8bd9340c9c7 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -35,6 +35,7 @@
#include <asm/bmips.h>
#include <asm/traps.h>
#include <asm/barrier.h>
+#include <asm/cpu-features.h>
static int __maybe_unused max_cpus = 1;
@@ -42,6 +43,12 @@ static int __maybe_unused max_cpus = 1;
int bmips_smp_enabled = 1;
int bmips_cpu_offset;
cpumask_t bmips_booted_mask;
+unsigned long bmips_tp1_irqs = IE_IRQ1;
+
+#define RESET_FROM_KSEG0 0x80080800
+#define RESET_FROM_KSEG1 0xa0080800
+
+static void bmips_set_reset_vec(int cpu, u32 val);
#ifdef CONFIG_SMP
@@ -194,6 +201,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
pr_info("SMP: Booting CPU%d...\n", cpu);
if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
+ /* kseg1 might not exist if this CPU enabled XKS01 */
+ bmips_set_reset_vec(cpu, RESET_FROM_KSEG0);
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
@@ -203,8 +213,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
bmips5000_send_ipi_single(cpu, 0);
break;
}
- }
- else {
+ } else {
+ bmips_set_reset_vec(cpu, RESET_FROM_KSEG1);
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
@@ -213,17 +224,7 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
set_c0_brcm_cmt_ctrl(0x01);
break;
case CPU_BMIPS5000:
- if (cpu & 0x01)
- write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
- else {
- /*
- * core N thread 0 was already booted; just
- * pulse the NMI line
- */
- bmips_write_zscm_reg(0x210, 0xc0000000);
- udelay(10);
- bmips_write_zscm_reg(0x210, 0x00);
- }
+ write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
break;
}
cpumask_set_cpu(cpu, &bmips_booted_mask);
@@ -235,31 +236,12 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
*/
static void bmips_init_secondary(void)
{
- /* move NMI vector to kseg0, in case XKS01 is enabled */
-
- void __iomem *cbr;
- unsigned long old_vec;
- unsigned long relo_vector;
- int boot_cpu;
-
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
- cbr = BMIPS_GET_CBR();
-
- boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
- relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
- BMIPS_RELO_VECTOR_CONTROL_1;
-
- old_vec = __raw_readl(cbr + relo_vector);
- __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
-
clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
break;
case CPU_BMIPS5000:
- write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
- (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
-
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
break;
}
@@ -276,7 +258,7 @@ static void bmips_smp_finish(void)
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
irq_enable_hazard();
- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
+ set_c0_status(IE_SW0 | IE_SW1 | bmips_tp1_irqs | IE_IRQ5 | ST0_IE);
irq_enable_hazard();
}
@@ -381,6 +363,7 @@ static int bmips_cpu_disable(void)
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
+ clear_c0_status(IE_IRQ5);
local_flush_tlb_all();
local_flush_icache_range(0, ~0);
@@ -405,7 +388,8 @@ void __ref play_dead(void)
* IRQ handlers; this clears ST0_IE and returns immediately.
*/
clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
- change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
+ change_c0_status(
+ IE_IRQ5 | bmips_tp1_irqs | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
irq_disable_hazard();
@@ -473,10 +457,61 @@ static inline void bmips_nmi_handler_setup(void)
&bmips_smp_int_vec_end);
}
+struct reset_vec_info {
+ int cpu;
+ u32 val;
+};
+
+static void bmips_set_reset_vec_remote(void *vinfo)
+{
+ struct reset_vec_info *info = vinfo;
+ int shift = info->cpu & 0x01 ? 16 : 0;
+ u32 mask = ~(0xffff << shift), val = info->val >> 16;
+
+ preempt_disable();
+ if (smp_processor_id() > 0) {
+ smp_call_function_single(0, &bmips_set_reset_vec_remote,
+ info, 1);
+ } else {
+ if (info->cpu & 0x02) {
+ /* BMIPS5200 "should" use mask/shift, but it's buggy */
+ bmips_write_zscm_reg(0xa0, (val << 16) | val);
+ bmips_read_zscm_reg(0xa0);
+ } else {
+ write_c0_brcm_bootvec((read_c0_brcm_bootvec() & mask) |
+ (val << shift));
+ }
+ }
+ preempt_enable();
+}
+
+static void bmips_set_reset_vec(int cpu, u32 val)
+{
+ struct reset_vec_info info;
+
+ if (current_cpu_type() == CPU_BMIPS5000) {
+ /* this needs to run from CPU0 (which is always online) */
+ info.cpu = cpu;
+ info.val = val;
+ bmips_set_reset_vec_remote(&info);
+ } else {
+ void __iomem *cbr = BMIPS_GET_CBR();
+
+ if (cpu == 0)
+ __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
+ else {
+ if (current_cpu_type() != CPU_BMIPS4380)
+ return;
+ __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ }
+ }
+ __sync();
+ back_to_back_c0_hazard();
+}
+
void bmips_ebase_setup(void)
{
unsigned long new_ebase = ebase;
- void __iomem __maybe_unused *cbr;
BUG_ON(ebase != CKSEG0);
@@ -496,15 +531,14 @@ void bmips_ebase_setup(void)
&bmips_smp_int_vec, 0x80);
__sync();
return;
+ case CPU_BMIPS3300:
case CPU_BMIPS4380:
/*
* 0x8000_0000: reset/NMI (initially in kseg1)
* 0x8000_0400: normal vectors
*/
new_ebase = 0x80000400;
- cbr = BMIPS_GET_CBR();
- __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
- __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ bmips_set_reset_vec(0, RESET_FROM_KSEG0);
break;
case CPU_BMIPS5000:
/*
@@ -512,10 +546,8 @@ void bmips_ebase_setup(void)
* 0x8000_1000: normal vectors
*/
new_ebase = 0x80001000;
- write_c0_brcm_bootvec(0xa0088008);
+ bmips_set_reset_vec(0, RESET_FROM_KSEG0);
write_c0_ebase(new_ebase);
- if (max_cpus > 2)
- bmips_write_zscm_reg(0xa0, 0xa008a008);
break;
default:
return;
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index fc8a51553426..1e0a93c5a3e7 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -24,6 +24,7 @@
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
+#include <linux/irqchip/mips-gic.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
@@ -37,7 +38,6 @@
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/amon.h>
-#include <asm/gic.h>
static void cmp_init_secondary(void)
{
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index e6e16a1d4add..bed7590e475f 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -9,13 +9,13 @@
*/
#include <linux/io.h>
+#include <linux/irqchip/mips-gic.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <asm/bcache.h>
-#include <asm/gic.h>
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
#include <asm/mips_mt.h>
@@ -273,8 +273,8 @@ static void cps_init_secondary(void)
if (cpu_has_mipsmt)
dmt();
- change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
- STATUSF_IP6 | STATUSF_IP7);
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
+ STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
}
static void cps_smp_finish(void)
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
index 3b21a96d1ccb..5f0ab5bcd01e 100644
--- a/arch/mips/kernel/smp-gic.c
+++ b/arch/mips/kernel/smp-gic.c
@@ -12,9 +12,9 @@
* option) any later version.
*/
+#include <linux/irqchip/mips-gic.h>
#include <linux/printk.h>
-#include <asm/gic.h>
#include <asm/mips-cpc.h>
#include <asm/smp-ops.h>
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 21f23add04f4..ad86951b73bd 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/mips-gic.h>
#include <linux/compiler.h>
#include <linux/smp.h>
@@ -34,7 +35,6 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
-#include <asm/gic.h>
static void __init smvp_copy_vpe_config(void)
{
@@ -119,7 +119,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
unsigned long flags;
int vpflags;
-#ifdef CONFIG_IRQ_GIC
+#ifdef CONFIG_MIPS_GIC
if (gic_present) {
gic_send_ipi_single(cpu, action);
return;
@@ -158,7 +158,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
static void vsmp_init_secondary(void)
{
-#ifdef CONFIG_IRQ_GIC
+#ifdef CONFIG_MIPS_GIC
/* This is Malta specific: IPI,performance and timer interrupts */
if (gic_present)
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 4a4f9dda5658..604b558809c4 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -117,6 +117,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
"2: sc %[tmp], (%[addr]) \n"
" beqzl %[tmp], 1b \n"
"3: \n"
+ " .insn \n"
" .section .fixup,\"ax\" \n"
"4: li %[err], %[efault] \n"
" j 3b \n"
@@ -142,6 +143,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
"2: sc %[tmp], (%[addr]) \n"
" bnez %[tmp], 4f \n"
"3: \n"
+ " .insn \n"
" .subsection 2 \n"
"4: b 1b \n"
" .previous \n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 22b19c275044..ad3d2031c327 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -724,6 +724,50 @@ int process_fpemu_return(int sig, void __user *fault_addr)
}
}
+static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
+ unsigned long old_epc, unsigned long old_ra)
+{
+ union mips_instruction inst = { .word = opcode };
+ void __user *fault_addr = NULL;
+ int sig;
+
+ /* If it's obviously not an FP instruction, skip it */
+ switch (inst.i_format.opcode) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case ldc1_op:
+ case swc1_op:
+ case sdc1_op:
+ break;
+
+ default:
+ return -1;
+ }
+
+ /*
+ * do_ri skipped over the instruction via compute_return_epc, undo
+ * that for the FPU emulator.
+ */
+ regs->cp0_epc = old_epc;
+ regs->regs[31] = old_ra;
+
+ /* Save the FP context to struct thread_struct */
+ lose_fpu(1);
+
+ /* Run the emulator */
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+
+ /* If something went wrong, signal */
+ process_fpemu_return(sig, fault_addr);
+
+ /* Restore the hardware register state */
+ own_fpu(1);
+
+ return 0;
+}
+
/*
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX
*/
@@ -1016,6 +1060,9 @@ asmlinkage void do_ri(struct pt_regs *regs)
if (status < 0)
status = simulate_sync(regs, opcode);
+
+ if (status < 0)
+ status = simulate_fp(regs, opcode, old_epc, old31);
}
if (status < 0)
@@ -1380,12 +1427,19 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
show_regs(regs);
if (multi_match) {
- printk("Index : %0x\n", read_c0_index());
- printk("Pagemask: %0x\n", read_c0_pagemask());
- printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
- printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
- printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
- printk("\n");
+ pr_err("Index : %0x\n", read_c0_index());
+ pr_err("Pagemask: %0x\n", read_c0_pagemask());
+ pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
+ pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
+ pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
+ pr_err("Wired : %0x\n", read_c0_wired());
+ pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
+ if (cpu_has_htw) {
+ pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
+ pr_err("PWSize : %0*lx\n", field, read_c0_pwsize());
+ pr_err("PWCtl : %0x\n", read_c0_pwctl());
+ }
+ pr_err("\n");
dump_tlb_all();
}
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 0f1af58b036a..ed2a278722a9 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -16,9 +16,11 @@
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/unistd.h>
+#include <linux/random.h>
#include <asm/vdso.h>
#include <asm/uasm.h>
+#include <asm/processor.h>
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
@@ -67,7 +69,18 @@ subsys_initcall(init_vdso);
static unsigned long vdso_addr(unsigned long start)
{
- return STACK_TOP;
+ unsigned long offset = 0UL;
+
+ if (current->flags & PF_RANDOMIZE) {
+ offset = get_random_int();
+ offset <<= PAGE_SHIFT;
+ if (TASK_IS_32BIT_ADDR)
+ offset &= 0xfffffful;
+ else
+ offset &= 0xffffffful;
+ }
+
+ return STACK_TOP + offset;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 468ffa043607..7edcd4946fc1 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -49,6 +49,7 @@
/* Activation Status Register */
#define ACTS_ASC0_ACT 0x00001000
+#define ACTS_SSC0 0x00002000
#define ACTS_ASC1_ACT 0x00000800
#define ACTS_I2C_ACT 0x00004000
#define ACTS_P0 0x00010000
@@ -147,12 +148,11 @@ static void falcon_gpe_enable(void)
if (status & (1 << (GPPC_OFFSET + 1)))
return;
- if (status_r32(STATUS_CONFIG) == 0)
+ freq = (status_r32(STATUS_CONFIG) &
+ GPEFREQ_MASK) >>
+ GPEFREQ_OFFSET;
+ if (freq == 0)
freq = 1; /* use 625MHz on unfused chip */
- else
- freq = (status_r32(STATUS_CONFIG) &
- GPEFREQ_MASK) >>
- GPEFREQ_OFFSET;
/* apply new frequency */
sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
@@ -260,5 +260,6 @@ void __init ltq_soc_init(void)
clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
clkdev_add_sys("1e100b00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
clkdev_add_sys("1e100c00.serial", SYSCTL_SYS1, ACTS_ASC0_ACT);
+ clkdev_add_sys("1e100d00.spi", SYSCTL_SYS1, ACTS_SSC0);
clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 030568a70ac4..6ab10573490d 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -70,6 +70,7 @@ static struct resource ltq_eiu_irq[MAX_EIU];
static void __iomem *ltq_icu_membase[MAX_IM];
static void __iomem *ltq_eiu_membase;
static struct irq_domain *ltq_domain;
+static int ltq_perfcount_irq;
int ltq_eiu_get_irq(int exin)
{
@@ -378,30 +379,6 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
panic("Failed to remap icu memory");
}
- /* the external interrupts are optional and xway only */
- eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
- if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
- /* find out how many external irq sources we have */
- exin_avail = of_irq_count(eiu_node);
-
- if (exin_avail > MAX_EIU)
- exin_avail = MAX_EIU;
-
- ret = of_irq_to_resource_table(eiu_node,
- ltq_eiu_irq, exin_avail);
- if (ret != exin_avail)
- panic("failed to load external irq resources");
-
- if (request_mem_region(res.start, resource_size(&res),
- res.name) < 0)
- pr_err("Failed to request eiu memory");
-
- ltq_eiu_membase = ioremap_nocache(res.start,
- resource_size(&res));
- if (!ltq_eiu_membase)
- panic("Failed to remap eiu memory");
- }
-
/* turn off all irqs by default */
for (i = 0; i < MAX_IM; i++) {
/* make sure all irqs are turned off by default */
@@ -449,7 +426,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
#endif
/* tell oprofile which irq to use */
- cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
+ ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
/*
* if the timer irq is not one of the mips irqs we need to
@@ -458,9 +435,38 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
if (MIPS_CPU_TIMER_IRQ != 7)
irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
+ /* the external interrupts are optional and xway only */
+ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
+ if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
+ /* find out how many external irq sources we have */
+ exin_avail = of_irq_count(eiu_node);
+
+ if (exin_avail > MAX_EIU)
+ exin_avail = MAX_EIU;
+
+ ret = of_irq_to_resource_table(eiu_node,
+ ltq_eiu_irq, exin_avail);
+ if (ret != exin_avail)
+ panic("failed to load external irq resources");
+
+ if (request_mem_region(res.start, resource_size(&res),
+ res.name) < 0)
+ pr_err("Failed to request eiu memory");
+
+ ltq_eiu_membase = ioremap_nocache(res.start,
+ resource_size(&res));
+ if (!ltq_eiu_membase)
+ panic("Failed to remap eiu memory");
+ }
+
return 0;
}
+int get_c0_perfcount_int(void)
+{
+ return ltq_perfcount_irq;
+}
+
unsigned int get_c0_compare_int(void)
{
return MIPS_CPU_TIMER_IRQ;
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 7447d322d14e..39ab3e786e59 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -36,6 +36,11 @@ const char *get_system_type(void)
return soc_info.sys_type;
}
+int ltq_soc_type(void)
+{
+ return soc_info.type;
+}
+
void prom_free_prom_memory(void)
{
}
@@ -72,6 +77,8 @@ void __init plat_mem_setup(void)
* parsed resulting in our memory appearing
*/
__dt_setup_arch(__dtb_start);
+
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
}
void __init device_tree_init(void)
@@ -97,16 +104,7 @@ void __init prom_init(void)
int __init plat_of_setup(void)
{
- static struct of_device_id of_ids[3];
-
- if (!of_have_populated_dt())
- panic("device tree not present");
-
- strlcpy(of_ids[0].compatible, soc_info.compatible,
- sizeof(of_ids[0].compatible));
- strncpy(of_ids[1].compatible, "simple-bus",
- sizeof(of_ids[1].compatible));
- return of_platform_populate(NULL, of_ids, NULL, NULL);
+ return __dt_register_buses(soc_info.compatible, "simple-bus");
}
arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
index 087497d97357..a2edc538f477 100644
--- a/arch/mips/lantiq/xway/Makefile
+++ b/arch/mips/lantiq/xway/Makefile
@@ -1,3 +1,5 @@
obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o dcdc.o
+obj-y += vmmc.o
+
obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o
diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c
index 7688ac0f06d0..ae8e930f5283 100644
--- a/arch/mips/lantiq/xway/dcdc.c
+++ b/arch/mips/lantiq/xway/dcdc.c
@@ -46,7 +46,6 @@ static struct platform_driver dcdc_driver = {
.probe = dcdc_probe,
.driver = {
.name = "dcdc-xrx200",
- .owner = THIS_MODULE,
.of_match_table = dcdc_match,
},
};
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 78a91fa41944..34a116e840d8 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -261,7 +261,6 @@ static struct platform_driver dma_driver = {
.probe = ltq_dma_init,
.driver = {
.name = "dma-xway",
- .owner = THIS_MODULE,
.of_match_table = dma_match,
},
};
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index 850821df924c..f1492b2db017 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -193,7 +193,6 @@ static struct platform_driver dma_driver = {
.probe = gptu_probe,
.driver = {
.name = "gptu-xway",
- .owner = THIS_MODULE,
.of_match_table = gptu_match,
},
};
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 1fa0f175357e..fe68f9ae47c1 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/reset-controller.h>
#include <asm/reboot.h>
@@ -113,10 +114,77 @@ void ltq_reset_once(unsigned int module, ulong u)
ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ);
}
+static int ltq_assert_device(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ u32 val;
+
+ if (id < 8)
+ return -1;
+
+ val = ltq_rcu_r32(RCU_RST_REQ);
+ val |= BIT(id);
+ ltq_rcu_w32(val, RCU_RST_REQ);
+
+ return 0;
+}
+
+static int ltq_deassert_device(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ u32 val;
+
+ if (id < 8)
+ return -1;
+
+ val = ltq_rcu_r32(RCU_RST_REQ);
+ val &= ~BIT(id);
+ ltq_rcu_w32(val, RCU_RST_REQ);
+
+ return 0;
+}
+
+static int ltq_reset_device(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ ltq_assert_device(rcdev, id);
+ return ltq_deassert_device(rcdev, id);
+}
+
+static struct reset_control_ops reset_ops = {
+ .reset = ltq_reset_device,
+ .assert = ltq_assert_device,
+ .deassert = ltq_deassert_device,
+};
+
+static struct reset_controller_dev reset_dev = {
+ .ops = &reset_ops,
+ .owner = THIS_MODULE,
+ .nr_resets = 32,
+ .of_reset_n_cells = 1,
+};
+
+void ltq_rst_init(void)
+{
+ reset_dev.of_node = of_find_compatible_node(NULL, NULL,
+ "lantiq,xway-reset");
+ if (!reset_dev.of_node)
+ pr_err("Failed to find reset controller node");
+ else
+ reset_controller_register(&reset_dev);
+}
+
static void ltq_machine_restart(char *command)
{
+ u32 val = ltq_rcu_r32(RCU_RST_REQ);
+
+ if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200"))
+ val |= RCU_RD_GPHY1_XRX200 | RCU_RD_GPHY0_XRX200;
+
+ val |= RCU_RD_SRST;
+
local_irq_disable();
- ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | RCU_RD_SRST, RCU_RST_REQ);
+ ltq_rcu_w32(val, RCU_RST_REQ);
unreachable();
}
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
new file mode 100644
index 000000000000..696cd57f6f13
--- /dev/null
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -0,0 +1,69 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+
+#include <lantiq_soc.h>
+
+static unsigned int *cp1_base;
+
+unsigned int *ltq_get_cp1_base(void)
+{
+ if (!cp1_base)
+ panic("no cp1 base was set\n");
+
+ return cp1_base;
+}
+EXPORT_SYMBOL(ltq_get_cp1_base);
+
+static int vmmc_probe(struct platform_device *pdev)
+{
+#define CP1_SIZE (1 << 20)
+ int gpio_count;
+ dma_addr_t dma;
+
+ cp1_base =
+ (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE,
+ &dma, GFP_ATOMIC));
+
+ gpio_count = of_gpio_count(pdev->dev.of_node);
+ while (gpio_count > 0) {
+ enum of_gpio_flags flags;
+ int gpio = of_get_gpio_flags(pdev->dev.of_node,
+ --gpio_count, &flags);
+ if (gpio_request(gpio, "vmmc-relay"))
+ continue;
+ dev_info(&pdev->dev, "requested GPIO %d\n", gpio);
+ gpio_direction_output(gpio,
+ (flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
+ }
+
+ dev_info(&pdev->dev, "reserved %dMB at 0x%p", CP1_SIZE >> 20, cp1_base);
+
+ return 0;
+}
+
+static const struct of_device_id vmmc_match[] = {
+ { .compatible = "lantiq,vmmc-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, vmmc_match);
+
+static struct platform_driver vmmc_driver = {
+ .probe = vmmc_probe,
+ .driver = {
+ .name = "lantiq,vmmc",
+ .owner = THIS_MODULE,
+ .of_match_table = vmmc_match,
+ },
+};
+
+module_platform_driver(vmmc_driver);
diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c
index d4d9d31f152e..199094a40c15 100644
--- a/arch/mips/lantiq/xway/xrx200_phy_fw.c
+++ b/arch/mips/lantiq/xway/xrx200_phy_fw.c
@@ -24,7 +24,28 @@ static dma_addr_t xway_gphy_load(struct platform_device *pdev)
void *fw_addr;
size_t size;
- if (of_property_read_string(pdev->dev.of_node, "firmware", &fw_name)) {
+ if (of_get_property(pdev->dev.of_node, "firmware1", NULL) ||
+ of_get_property(pdev->dev.of_node, "firmware2", NULL)) {
+ switch (ltq_soc_type()) {
+ case SOC_TYPE_VR9:
+ if (of_property_read_string(pdev->dev.of_node,
+ "firmware1", &fw_name)) {
+ dev_err(&pdev->dev,
+ "failed to load firmware filename\n");
+ return 0;
+ }
+ break;
+ case SOC_TYPE_VR9_2:
+ if (of_property_read_string(pdev->dev.of_node,
+ "firmware2", &fw_name)) {
+ dev_err(&pdev->dev,
+ "failed to load firmware filename\n");
+ return 0;
+ }
+ break;
+ }
+ } else if (of_property_read_string(pdev->dev.of_node,
+ "firmware", &fw_name)) {
dev_err(&pdev->dev, "failed to load firmware filename\n");
return 0;
}
@@ -85,7 +106,6 @@ static struct platform_driver xway_phy_driver = {
.probe = xway_phy_fw_probe,
.driver = {
.name = "phy-xrx200",
- .owner = THIS_MODULE,
.of_match_table = xway_phy_match,
},
};
diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c
index e3acb2dad33a..8e7e378ce51c 100644
--- a/arch/mips/lib/iomap.c
+++ b/arch/mips/lib/iomap.c
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(iowrite32be);
/*
* These are the "repeat MMIO read/write" functions.
- * Note the "__raw" accesses, since we don't want to
- * convert to CPU byte order. We write in "IO byte
- * order" (we also don't have IO barriers).
+ * Note the "__mem" accesses, since we want to convert
+ * to CPU byte order if the host bus happens to not match the
+ * endianness of PCI/ISA (see mach-generic/mangle-port.h).
*/
static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
{
while (--count >= 0) {
- u8 data = __raw_readb(addr);
+ u8 data = __mem_readb(addr);
*dst = data;
dst++;
}
@@ -113,7 +113,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
{
while (--count >= 0) {
- u16 data = __raw_readw(addr);
+ u16 data = __mem_readw(addr);
*dst = data;
dst++;
}
@@ -122,7 +122,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
{
while (--count >= 0) {
- u32 data = __raw_readl(addr);
+ u32 data = __mem_readl(addr);
*dst = data;
dst++;
}
@@ -131,7 +131,7 @@ static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
{
while (--count >= 0) {
- __raw_writeb(*src, addr);
+ __mem_writeb(*src, addr);
src++;
}
}
@@ -139,7 +139,7 @@ static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
{
while (--count >= 0) {
- __raw_writew(*src, addr);
+ __mem_writew(*src, addr);
src++;
}
}
@@ -147,7 +147,7 @@ static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
{
while (--count >= 0) {
- __raw_writel(*src, addr);
+ __mem_writel(*src, addr);
src++;
}
}
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 7b0e5462ca51..c8fe6b1968fb 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -114,8 +114,7 @@
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
-#endif
-#ifdef __MIPSEL__
+#else
EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
#endif
PTR_SUBU a0, t0 /* long align ptr */
@@ -164,8 +163,7 @@
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
-#endif
-#ifdef __MIPSEL__
+#else
EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
#endif
1: jr ra
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index 57bcdaf1f1c8..be777d9a3f85 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -42,15 +42,11 @@ notrace void arch_local_irq_disable(void)
__asm__ __volatile__(
" .set push \n"
" .set noat \n"
-#if defined(CONFIG_CPU_MIPSR2)
- /* see irqflags.h for inline function */
-#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
-#endif
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: /* no outputs */
@@ -72,15 +68,11 @@ notrace unsigned long arch_local_irq_save(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_MIPSR2)
- /* see irqflags.h for inline function */
-#else
" mfc0 %[flags], $12 \n"
" ori $1, %[flags], 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
-#endif
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: [flags] "=r" (flags)
@@ -103,18 +95,12 @@ notrace void arch_local_irq_restore(unsigned long flags)
" .set push \n"
" .set noreorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
- /* see irqflags.h for inline function */
-#elif defined(CONFIG_CPU_MIPSR2)
- /* see irqflags.h for inline function */
-#else
" mfc0 $1, $12 \n"
" andi %[flags], 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $12 \n"
-#endif
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: [flags] "=r" (__tmp1)
@@ -136,18 +122,12 @@ notrace void __arch_local_irq_restore(unsigned long flags)
" .set push \n"
" .set noreorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
- /* see irqflags.h for inline function */
-#elif defined(CONFIG_CPU_MIPSR2)
- /* see irqflags.h for inline function */
-#else
" mfc0 $1, $12 \n"
" andi %[flags], 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $12 \n"
-#endif
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
: [flags] "=r" (__tmp1)
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 1ef365ab3cd3..975a13855116 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
@@ -21,7 +22,7 @@ static void dump_tlb(int first, int last)
unsigned int asid;
unsigned long entryhi, entrylo0;
- asid = read_c0_entryhi() & 0xfc0;
+ asid = read_c0_entryhi() & ASID_MASK;
for (i = first; i <= last; i++) {
write_c0_index(i<<8);
@@ -34,8 +35,8 @@ static void dump_tlb(int first, int last)
entrylo0 = read_c0_entrylo0();
/* Unused entries have a virtual address of KSEG0. */
- if ((entryhi & 0xfffff000) != 0x80000000
- && (entryhi & 0xfc0) == asid) {
+ if ((entryhi & PAGE_MASK) != KSEG0
+ && (entryhi & ASID_MASK) == asid) {
/*
* Only print entries in use
*/
@@ -43,8 +44,8 @@ static void dump_tlb(int first, int last)
printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
- (entryhi & 0xfffff000),
- entryhi & 0xfc0,
+ entryhi & PAGE_MASK,
+ entryhi & ASID_MASK,
entrylo0 & PAGE_MASK,
(entrylo0 & (1 << 11)) ? 1 : 0,
(entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index bef65c98df59..929bbacd697e 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -28,7 +28,6 @@ LEAF(__strlen_\func\()_asm)
and v0, a0
bnez v0, .Lfault\@
-FEXPORT(__strlen_\func\()_nocheck_asm)
move v0, a0
.ifeqs "\func", "kernel"
1: EX(lbu, v1, (v0), .Lfault\@)
@@ -48,9 +47,7 @@ FEXPORT(__strlen_\func\()_nocheck_asm)
#ifndef CONFIG_EVA
/* Set aliases */
.global __strlen_user_asm
- .global __strlen_user_nocheck_asm
.set __strlen_user_asm, __strlen_kernel_asm
- .set __strlen_user_nocheck_asm, __strlen_kernel_nocheck_asm
#endif
__BUILD_STRLEN_ASM kernel
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 1b91fc6a921b..156de85b82cd 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -86,6 +86,7 @@ config LOONGSON_MACH3X
select LOONGSON_MC146818
select ZONE_DMA32
select LEFI_FIRMWARE_INTERFACE
+ select PHYS48_TO_HT40
help
Generic Loongson 3 family machines utilize the 3A/3B revision
of Loongson processor and RS780/SBX00 chipset.
@@ -107,6 +108,18 @@ config CS5536_MFGPT
If unsure, say Yes.
+config RS780_HPET
+ bool "RS780/SBX00 HPET Timer"
+ depends on LOONGSON_MACH3X
+ select MIPS_EXTERNAL_TIMER
+ help
+ This option enables the hpet timer of AMD RS780/SBX00.
+
+ If you want to enable the Loongson3 CPUFreq Driver, Please enable
+ this option at first, otherwise, You will get wrong system time.
+
+ If unsure, say Yes.
+
config LOONGSON_SUSPEND
bool
default y
@@ -131,6 +144,10 @@ config SWIOTLB
select NEED_SG_DMA_LENGTH
select NEED_DMA_MAP_STATE
+config PHYS48_TO_HT40
+ bool
+ default y if CPU_LOONGSON3
+
config LOONGSON_MC146818
bool
default n
diff --git a/arch/mips/loongson/common/cs5536/cs5536_pci.c b/arch/mips/loongson/common/cs5536/cs5536_pci.c
index 81bed9d18061..b739723205f8 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_pci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_pci.c
@@ -21,6 +21,7 @@
*/
#include <linux/types.h>
+#include <cs5536/cs5536_pci.h>
#include <cs5536/cs5536_vsm.h>
enum {
@@ -35,21 +36,21 @@ enum {
};
static const cs5536_pci_vsm_write vsm_conf_write[] = {
- [CS5536_ISA_FUNC] pci_isa_write_reg,
- [reserved_func] NULL,
- [CS5536_IDE_FUNC] pci_ide_write_reg,
- [CS5536_ACC_FUNC] pci_acc_write_reg,
- [CS5536_OHCI_FUNC] pci_ohci_write_reg,
- [CS5536_EHCI_FUNC] pci_ehci_write_reg,
+ [CS5536_ISA_FUNC] = pci_isa_write_reg,
+ [reserved_func] = NULL,
+ [CS5536_IDE_FUNC] = pci_ide_write_reg,
+ [CS5536_ACC_FUNC] = pci_acc_write_reg,
+ [CS5536_OHCI_FUNC] = pci_ohci_write_reg,
+ [CS5536_EHCI_FUNC] = pci_ehci_write_reg,
};
static const cs5536_pci_vsm_read vsm_conf_read[] = {
- [CS5536_ISA_FUNC] pci_isa_read_reg,
- [reserved_func] NULL,
- [CS5536_IDE_FUNC] pci_ide_read_reg,
- [CS5536_ACC_FUNC] pci_acc_read_reg,
- [CS5536_OHCI_FUNC] pci_ohci_read_reg,
- [CS5536_EHCI_FUNC] pci_ehci_read_reg,
+ [CS5536_ISA_FUNC] = pci_isa_read_reg,
+ [reserved_func] = NULL,
+ [CS5536_IDE_FUNC] = pci_ide_read_reg,
+ [CS5536_ACC_FUNC] = pci_acc_read_reg,
+ [CS5536_OHCI_FUNC] = pci_ohci_read_reg,
+ [CS5536_EHCI_FUNC] = pci_ehci_read_reg,
};
/*
diff --git a/arch/mips/loongson/common/dma-swiotlb.c b/arch/mips/loongson/common/dma-swiotlb.c
index c2be01f91575..2c6b989c1bc4 100644
--- a/arch/mips/loongson/common/dma-swiotlb.c
+++ b/arch/mips/loongson/common/dma-swiotlb.c
@@ -105,11 +105,25 @@ static int loongson_dma_set_mask(struct device *dev, u64 mask)
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
+ long nid;
+#ifdef CONFIG_PHYS48_TO_HT40
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ nid = (paddr >> 44) & 0x3;
+ paddr = ((nid << 44) ^ paddr) | (nid << 37);
+#endif
return paddr;
}
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
+ long nid;
+#ifdef CONFIG_PHYS48_TO_HT40
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ nid = (daddr >> 37) & 0x3;
+ daddr = ((nid << 37) ^ daddr) | (nid << 44);
+#endif
return daddr;
}
diff --git a/arch/mips/loongson/common/early_printk.c b/arch/mips/loongson/common/early_printk.c
index ced461b39069..6ca632e529dc 100644
--- a/arch/mips/loongson/common/early_printk.c
+++ b/arch/mips/loongson/common/early_printk.c
@@ -30,7 +30,7 @@ void prom_putchar(char c)
int timeout;
unsigned char *uart_base;
- uart_base = (unsigned char *)_loongson_uart_base;
+ uart_base = (unsigned char *)_loongson_uart_base[0];
timeout = 1024;
while (((serial_in(uart_base, UART_LSR) & UART_LSR_THRE) == 0) &&
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index f15228550a22..045ea3d47c87 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -21,6 +21,7 @@
#include <asm/bootinfo.h>
#include <loongson.h>
#include <boot_param.h>
+#include <workarounds.h>
u32 cpu_clock_freq;
EXPORT_SYMBOL(cpu_clock_freq);
@@ -31,7 +32,6 @@ u64 loongson_chipcfg[MAX_PACKAGES] = {0xffffffffbfc00180};
u64 loongson_freqctrl[MAX_PACKAGES];
unsigned long long smp_group[4];
-int cpuhotplug_workaround = 0;
#define parse_even_earlier(res, option, p) \
do { \
@@ -67,6 +67,7 @@ void __init prom_init_env(void)
#else
struct boot_params *boot_p;
struct loongson_params *loongson_p;
+ struct system_loongson *esys;
struct efi_cpuinfo_loongson *ecpu;
struct irq_source_routing_table *eirq_source;
@@ -74,6 +75,8 @@ void __init prom_init_env(void)
boot_p = (struct boot_params *)fw_arg2;
loongson_p = &(boot_p->efi.smbios.lp);
+ esys = (struct system_loongson *)
+ ((u64)loongson_p + loongson_p->system_offset);
ecpu = (struct efi_cpuinfo_loongson *)
((u64)loongson_p + loongson_p->cpu_offset);
eirq_source = (struct irq_source_routing_table *)
@@ -95,6 +98,7 @@ void __init prom_init_env(void)
loongson_chipcfg[2] = 0x900020001fe00180;
loongson_chipcfg[3] = 0x900030001fe00180;
loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
+ loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
} else if (ecpu->cputype == Loongson_3B) {
loongson_sysconf.cores_per_node = 4; /* One chip has 2 nodes */
loongson_sysconf.cores_per_package = 8;
@@ -111,7 +115,7 @@ void __init prom_init_env(void)
loongson_freqctrl[2] = 0x900040001fe001d0;
loongson_freqctrl[3] = 0x900060001fe001d0;
loongson_sysconf.ht_control_base = 0x90001EFDFB000000;
- cpuhotplug_workaround = 1;
+ loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG;
} else {
loongson_sysconf.cores_per_node = 1;
loongson_sysconf.cores_per_package = 1;
@@ -119,6 +123,8 @@ void __init prom_init_env(void)
}
loongson_sysconf.nr_cpus = ecpu->nr_cpus;
+ loongson_sysconf.boot_cpu_id = ecpu->cpu_startup_core_id;
+ loongson_sysconf.reserved_cpus_mask = ecpu->reserved_cores_mask;
if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0)
loongson_sysconf.nr_cpus = NR_CPUS;
loongson_sysconf.nr_nodes = (loongson_sysconf.nr_cpus +
@@ -141,6 +147,24 @@ void __init prom_init_env(void)
pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n",
loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr,
loongson_sysconf.vgabios_addr);
+
+ memset(loongson_sysconf.ecname, 0, 32);
+ if (esys->has_ec)
+ memcpy(loongson_sysconf.ecname, esys->ec_name, 32);
+ loongson_sysconf.workarounds |= esys->workarounds;
+
+ loongson_sysconf.nr_uarts = esys->nr_uarts;
+ if (esys->nr_uarts < 1 || esys->nr_uarts > MAX_UARTS)
+ loongson_sysconf.nr_uarts = 1;
+ memcpy(loongson_sysconf.uarts, esys->uarts,
+ sizeof(struct uart_device) * loongson_sysconf.nr_uarts);
+
+ loongson_sysconf.nr_sensors = esys->nr_sensors;
+ if (loongson_sysconf.nr_sensors > MAX_SENSORS)
+ loongson_sysconf.nr_sensors = 0;
+ if (loongson_sysconf.nr_sensors)
+ memcpy(loongson_sysconf.sensors, esys->sensors,
+ sizeof(struct sensor_device) * loongson_sysconf.nr_sensors);
#endif
if (cpu_clock_freq == 0) {
processor_id = (&current_cpu_data)->processor_id;
diff --git a/arch/mips/loongson/common/gpio.c b/arch/mips/loongson/common/gpio.c
index 21869908aaa4..29dbaa253061 100644
--- a/arch/mips/loongson/common/gpio.c
+++ b/arch/mips/loongson/common/gpio.c
@@ -37,7 +37,7 @@ int gpio_get_value(unsigned gpio)
val = LOONGSON_GPIODATA;
spin_unlock(&gpio_lock);
- return ((val & mask) != 0);
+ return (val & mask) != 0;
}
EXPORT_SYMBOL(gpio_get_value);
diff --git a/arch/mips/loongson/common/init.c b/arch/mips/loongson/common/init.c
index f6af3aba4c86..9b987fe98b5b 100644
--- a/arch/mips/loongson/common/init.c
+++ b/arch/mips/loongson/common/init.c
@@ -9,6 +9,7 @@
*/
#include <linux/bootmem.h>
+#include <asm/bootinfo.h>
#include <asm/smp-ops.h>
#include <loongson.h>
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 1a4797984b8d..f2807bc662a3 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -19,19 +19,16 @@
#define MACHTYPE_LEN 50
static const char *system_types[] = {
- [MACH_LOONGSON_UNKNOWN] "unknown loongson machine",
- [MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box",
- [MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box",
- [MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches",
- [MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches",
- [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f",
- [MACH_LEMOTE_NAS] "lemote-nas-2f",
- [MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
- [MACH_LEMOTE_A1004] "lemote-3a-notebook-a1004",
- [MACH_LEMOTE_A1101] "lemote-3a-itx-a1101",
- [MACH_LEMOTE_A1201] "lemote-2gq-notebook-a1201",
- [MACH_LEMOTE_A1205] "lemote-2gq-aio-a1205",
- [MACH_LOONGSON_END] NULL,
+ [MACH_LOONGSON_UNKNOWN] = "unknown loongson machine",
+ [MACH_LEMOTE_FL2E] = "lemote-fuloong-2e-box",
+ [MACH_LEMOTE_FL2F] = "lemote-fuloong-2f-box",
+ [MACH_LEMOTE_ML2F7] = "lemote-mengloong-2f-7inches",
+ [MACH_LEMOTE_YL2F89] = "lemote-yeeloong-2f-8.9inches",
+ [MACH_DEXXON_GDIUM2F10] = "dexxon-gdium-2f",
+ [MACH_LEMOTE_NAS] = "lemote-nas-2f",
+ [MACH_LEMOTE_LL2F] = "lemote-lynloong-2f",
+ [MACH_LOONGSON_GENERIC] = "generic-loongson-machine",
+ [MACH_LOONGSON_END] = NULL,
};
const char *get_system_type(void)
diff --git a/arch/mips/loongson/common/rtc.c b/arch/mips/loongson/common/rtc.c
index a90d87c01555..b5709af09f7f 100644
--- a/arch/mips/loongson/common/rtc.c
+++ b/arch/mips/loongson/common/rtc.c
@@ -14,7 +14,7 @@
#include <linux/platform_device.h>
#include <linux/mc146818rtc.h>
-struct resource loongson_rtc_resources[] = {
+static struct resource loongson_rtc_resources[] = {
{
.start = RTC_PORT(0),
.end = RTC_PORT(1),
diff --git a/arch/mips/loongson/common/serial.c b/arch/mips/loongson/common/serial.c
index bd2b7095b6dc..c23fa1373729 100644
--- a/arch/mips/loongson/common/serial.c
+++ b/arch/mips/loongson/common/serial.c
@@ -38,20 +38,17 @@
.regshift = 0, \
}
-static struct plat_serial8250_port uart8250_data[][2] = {
- [MACH_LOONGSON_UNKNOWN] {},
- [MACH_LEMOTE_FL2E] {PORT(4, 1843200), {} },
- [MACH_LEMOTE_FL2F] {PORT(3, 1843200), {} },
- [MACH_LEMOTE_ML2F7] {PORT_M(3, 3686400), {} },
- [MACH_LEMOTE_YL2F89] {PORT_M(3, 3686400), {} },
- [MACH_DEXXON_GDIUM2F10] {PORT_M(3, 3686400), {} },
- [MACH_LEMOTE_NAS] {PORT_M(3, 3686400), {} },
- [MACH_LEMOTE_LL2F] {PORT(3, 1843200), {} },
- [MACH_LEMOTE_A1004] {PORT_M(2, 33177600), {} },
- [MACH_LEMOTE_A1101] {PORT_M(2, 25000000), {} },
- [MACH_LEMOTE_A1201] {PORT_M(2, 25000000), {} },
- [MACH_LEMOTE_A1205] {PORT_M(2, 25000000), {} },
- [MACH_LOONGSON_END] {},
+static struct plat_serial8250_port uart8250_data[][MAX_UARTS + 1] = {
+ [MACH_LOONGSON_UNKNOWN] = {},
+ [MACH_LEMOTE_FL2E] = {PORT(4, 1843200), {} },
+ [MACH_LEMOTE_FL2F] = {PORT(3, 1843200), {} },
+ [MACH_LEMOTE_ML2F7] = {PORT_M(3, 3686400), {} },
+ [MACH_LEMOTE_YL2F89] = {PORT_M(3, 3686400), {} },
+ [MACH_DEXXON_GDIUM2F10] = {PORT_M(3, 3686400), {} },
+ [MACH_LEMOTE_NAS] = {PORT_M(3, 3686400), {} },
+ [MACH_LEMOTE_LL2F] = {PORT(3, 1843200), {} },
+ [MACH_LOONGSON_GENERIC] = {PORT_M(2, 25000000), {} },
+ [MACH_LOONGSON_END] = {},
};
static struct platform_device uart8250_device = {
@@ -61,17 +58,52 @@ static struct platform_device uart8250_device = {
static int __init serial_init(void)
{
+ int i;
unsigned char iotype;
iotype = uart8250_data[mips_machtype][0].iotype;
- if (UPIO_MEM == iotype)
+ if (UPIO_MEM == iotype) {
+ uart8250_data[mips_machtype][0].mapbase =
+ loongson_uart_base[0];
uart8250_data[mips_machtype][0].membase =
- (void __iomem *)_loongson_uart_base;
+ (void __iomem *)_loongson_uart_base[0];
+ }
else if (UPIO_PORT == iotype)
uart8250_data[mips_machtype][0].iobase =
- loongson_uart_base - LOONGSON_PCIIO_BASE;
+ loongson_uart_base[0] - LOONGSON_PCIIO_BASE;
+ if (loongson_sysconf.uarts[0].uartclk)
+ uart8250_data[mips_machtype][0].uartclk =
+ loongson_sysconf.uarts[0].uartclk;
+
+ for (i = 1; i < loongson_sysconf.nr_uarts; i++) {
+ iotype = loongson_sysconf.uarts[i].iotype;
+ uart8250_data[mips_machtype][i].iotype = iotype;
+ loongson_uart_base[i] = loongson_sysconf.uarts[i].uart_base;
+
+ if (UPIO_MEM == iotype) {
+ uart8250_data[mips_machtype][i].irq =
+ MIPS_CPU_IRQ_BASE + loongson_sysconf.uarts[i].int_offset;
+ uart8250_data[mips_machtype][i].mapbase =
+ loongson_uart_base[i];
+ uart8250_data[mips_machtype][i].membase =
+ ioremap_nocache(loongson_uart_base[i], 8);
+ } else if (UPIO_PORT == iotype) {
+ uart8250_data[mips_machtype][i].irq =
+ loongson_sysconf.uarts[i].int_offset;
+ uart8250_data[mips_machtype][i].iobase =
+ loongson_uart_base[i] - LOONGSON_PCIIO_BASE;
+ }
+
+ uart8250_data[mips_machtype][i].uartclk =
+ loongson_sysconf.uarts[i].uartclk;
+ uart8250_data[mips_machtype][i].flags =
+ UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
+ }
+
+ memset(&uart8250_data[mips_machtype][loongson_sysconf.nr_uarts],
+ 0, sizeof(struct plat_serial8250_port));
uart8250_device.dev.platform_data = uart8250_data[mips_machtype];
return platform_device_register(&uart8250_device);
diff --git a/arch/mips/loongson/common/setup.c b/arch/mips/loongson/common/setup.c
index bb4ac922e47a..d477dd6bb326 100644
--- a/arch/mips/loongson/common/setup.c
+++ b/arch/mips/loongson/common/setup.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <asm/wbflush.h>
+#include <asm/bootinfo.h>
#include <loongson.h>
diff --git a/arch/mips/loongson/common/time.c b/arch/mips/loongson/common/time.c
index 262a1f65b05e..e1a5382ad47e 100644
--- a/arch/mips/loongson/common/time.c
+++ b/arch/mips/loongson/common/time.c
@@ -12,6 +12,7 @@
*/
#include <asm/mc146818-time.h>
#include <asm/time.h>
+#include <asm/hpet.h>
#include <loongson.h>
#include <cs5536/cs5536_mfgpt.h>
@@ -21,7 +22,11 @@ void __init plat_time_init(void)
/* setup mips r4k timer */
mips_hpt_frequency = cpu_clock_freq / 2;
+#ifdef CONFIG_RS780_HPET
+ setup_hpet_timer();
+#else
setup_mfgpt0_timer();
+#endif
}
void read_persistent_clock(struct timespec *ts)
diff --git a/arch/mips/loongson/common/uart_base.c b/arch/mips/loongson/common/uart_base.c
index 1e1eeea73fde..9de559d58e1f 100644
--- a/arch/mips/loongson/common/uart_base.c
+++ b/arch/mips/loongson/common/uart_base.c
@@ -13,22 +13,27 @@
#include <loongson.h>
-/* ioremapped */
-unsigned long _loongson_uart_base;
-EXPORT_SYMBOL(_loongson_uart_base);
/* raw */
-unsigned long loongson_uart_base;
+unsigned long loongson_uart_base[MAX_UARTS] = {};
+/* ioremapped */
+unsigned long _loongson_uart_base[MAX_UARTS] = {};
+
EXPORT_SYMBOL(loongson_uart_base);
+EXPORT_SYMBOL(_loongson_uart_base);
void prom_init_loongson_uart_base(void)
{
switch (mips_machtype) {
+ case MACH_LOONGSON_GENERIC:
+ /* The CPU provided serial port (CPU) */
+ loongson_uart_base[0] = LOONGSON_REG_BASE + 0x1e0;
+ break;
case MACH_LEMOTE_FL2E:
- loongson_uart_base = LOONGSON_PCIIO_BASE + 0x3f8;
+ loongson_uart_base[0] = LOONGSON_PCIIO_BASE + 0x3f8;
break;
case MACH_LEMOTE_FL2F:
case MACH_LEMOTE_LL2F:
- loongson_uart_base = LOONGSON_PCIIO_BASE + 0x2f8;
+ loongson_uart_base[0] = LOONGSON_PCIIO_BASE + 0x2f8;
break;
case MACH_LEMOTE_ML2F7:
case MACH_LEMOTE_YL2F89:
@@ -36,17 +41,10 @@ void prom_init_loongson_uart_base(void)
case MACH_LEMOTE_NAS:
default:
/* The CPU provided serial port (LPC) */
- loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8;
- break;
- case MACH_LEMOTE_A1004:
- case MACH_LEMOTE_A1101:
- case MACH_LEMOTE_A1201:
- case MACH_LEMOTE_A1205:
- /* The CPU provided serial port (CPU) */
- loongson_uart_base = LOONGSON_REG_BASE + 0x1e0;
+ loongson_uart_base[0] = LOONGSON_LIO1_BASE + 0x3f8;
break;
}
- _loongson_uart_base =
- (unsigned long)ioremap_nocache(loongson_uart_base, 8);
+ _loongson_uart_base[0] =
+ (unsigned long)ioremap_nocache(loongson_uart_base[0], 8);
}
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c
index 6f8682e44483..cab5f43e0e29 100644
--- a/arch/mips/loongson/lemote-2f/irq.c
+++ b/arch/mips/loongson/lemote-2f/irq.c
@@ -93,13 +93,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id)
return IRQ_HANDLED;
}
-struct irqaction ip6_irqaction = {
+static struct irqaction ip6_irqaction = {
.handler = ip6_action,
.name = "cascade",
.flags = IRQF_SHARED | IRQF_NO_THREAD,
};
-struct irqaction cascade_irqaction = {
+static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
diff --git a/arch/mips/loongson/lemote-2f/reset.c b/arch/mips/loongson/lemote-2f/reset.c
index 79ac694fe744..a26ca7fcd7e0 100644
--- a/arch/mips/loongson/lemote-2f/reset.c
+++ b/arch/mips/loongson/lemote-2f/reset.c
@@ -76,7 +76,7 @@ static void fl2f_shutdown(void)
/* reset support for yeeloong2f and mengloong2f notebook */
-void ml2f_reboot(void)
+static void ml2f_reboot(void)
{
reset_cpu();
diff --git a/arch/mips/loongson/loongson-3/Makefile b/arch/mips/loongson/loongson-3/Makefile
index b4df775b9f30..622fead5ebc9 100644
--- a/arch/mips/loongson/loongson-3/Makefile
+++ b/arch/mips/loongson/loongson-3/Makefile
@@ -1,8 +1,10 @@
#
# Makefile for Loongson-3 family machines
#
-obj-y += irq.o cop2-ex.o
+obj-y += irq.o cop2-ex.o platform.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_NUMA) += numa.o
+
+obj-$(CONFIG_RS780_HPET) += hpet.o
diff --git a/arch/mips/loongson/loongson-3/hpet.c b/arch/mips/loongson/loongson-3/hpet.c
new file mode 100644
index 000000000000..e898d68668a9
--- /dev/null
+++ b/arch/mips/loongson/loongson-3/hpet.c
@@ -0,0 +1,257 @@
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <asm/hpet.h>
+#include <asm/time.h>
+
+#define SMBUS_CFG_BASE (loongson_sysconf.ht_control_base + 0x0300a000)
+#define SMBUS_PCI_REG40 0x40
+#define SMBUS_PCI_REG64 0x64
+#define SMBUS_PCI_REGB4 0xb4
+
+static DEFINE_SPINLOCK(hpet_lock);
+DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
+
+static unsigned int smbus_read(int offset)
+{
+ return *(volatile unsigned int *)(SMBUS_CFG_BASE + offset);
+}
+
+static void smbus_write(int offset, int data)
+{
+ *(volatile unsigned int *)(SMBUS_CFG_BASE + offset) = data;
+}
+
+static void smbus_enable(int offset, int bit)
+{
+ unsigned int cfg = smbus_read(offset);
+
+ cfg |= bit;
+ smbus_write(offset, cfg);
+}
+
+static int hpet_read(int offset)
+{
+ return *(volatile unsigned int *)(HPET_MMIO_ADDR + offset);
+}
+
+static void hpet_write(int offset, int data)
+{
+ *(volatile unsigned int *)(HPET_MMIO_ADDR + offset) = data;
+}
+
+static void hpet_start_counter(void)
+{
+ unsigned int cfg = hpet_read(HPET_CFG);
+
+ cfg |= HPET_CFG_ENABLE;
+ hpet_write(HPET_CFG, cfg);
+}
+
+static void hpet_stop_counter(void)
+{
+ unsigned int cfg = hpet_read(HPET_CFG);
+
+ cfg &= ~HPET_CFG_ENABLE;
+ hpet_write(HPET_CFG, cfg);
+}
+
+static void hpet_reset_counter(void)
+{
+ hpet_write(HPET_COUNTER, 0);
+ hpet_write(HPET_COUNTER + 4, 0);
+}
+
+static void hpet_restart_counter(void)
+{
+ hpet_stop_counter();
+ hpet_reset_counter();
+ hpet_start_counter();
+}
+
+static void hpet_enable_legacy_int(void)
+{
+ /* Do nothing on Loongson-3 */
+}
+
+static void hpet_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ int cfg = 0;
+
+ spin_lock(&hpet_lock);
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_info("set clock event to periodic mode!\n");
+ /* stop counter */
+ hpet_stop_counter();
+
+ /* enables the timer0 to generate a periodic interrupt */
+ cfg = hpet_read(HPET_T0_CFG);
+ cfg &= ~HPET_TN_LEVEL;
+ cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
+ HPET_TN_SETVAL | HPET_TN_32BIT;
+ hpet_write(HPET_T0_CFG, cfg);
+
+ /* set the comparator */
+ hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+ udelay(1);
+ hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+
+ /* start counter */
+ hpet_start_counter();
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ cfg = hpet_read(HPET_T0_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_write(HPET_T0_CFG, cfg);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ pr_info("set clock event to one shot mode!\n");
+ cfg = hpet_read(HPET_T0_CFG);
+ /* set timer0 type
+ * 1 : periodic interrupt
+ * 0 : non-periodic(oneshot) interrupt
+ */
+ cfg &= ~HPET_TN_PERIODIC;
+ cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+ hpet_write(HPET_T0_CFG, cfg);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ hpet_enable_legacy_int();
+ break;
+ }
+ spin_unlock(&hpet_lock);
+}
+
+static int hpet_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+ cnt = hpet_read(HPET_COUNTER);
+ cnt += delta;
+ hpet_write(HPET_T0_CMP, cnt);
+
+ res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
+ return res;
+}
+
+static irqreturn_t hpet_irq_handler(int irq, void *data)
+{
+ int is_irq;
+ struct clock_event_device *cd;
+ unsigned int cpu = smp_processor_id();
+
+ is_irq = hpet_read(HPET_STATUS);
+ if (is_irq & HPET_T0_IRS) {
+ /* clear the TIMER0 irq status register */
+ hpet_write(HPET_STATUS, HPET_T0_IRS);
+ cd = &per_cpu(hpet_clockevent_device, cpu);
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static struct irqaction hpet_irq = {
+ .handler = hpet_irq_handler,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
+ .name = "hpet",
+};
+
+/*
+ * hpet address assignation and irq setting should be done in bios.
+ * but pmon don't do this, we just setup here directly.
+ * The operation under is normal. unfortunately, hpet_setup process
+ * is before pci initialize.
+ *
+ * {
+ * struct pci_dev *pdev;
+ *
+ * pdev = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+ * pci_write_config_word(pdev, SMBUS_PCI_REGB4, HPET_ADDR);
+ *
+ * ...
+ * }
+ */
+static void hpet_setup(void)
+{
+ /* set hpet base address */
+ smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
+
+ /* enable decodeing of access to HPET MMIO*/
+ smbus_enable(SMBUS_PCI_REG40, (1 << 28));
+
+ /* HPET irq enable */
+ smbus_enable(SMBUS_PCI_REG64, (1 << 10));
+
+ hpet_enable_legacy_int();
+}
+
+void __init setup_hpet_timer(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ hpet_setup();
+
+ cd = &per_cpu(hpet_clockevent_device, cpu);
+ cd->name = "hpet";
+ cd->rating = 320;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ cd->set_mode = hpet_set_mode;
+ cd->set_next_event = hpet_next_event;
+ cd->irq = HPET_T0_IRQ;
+ cd->cpumask = cpumask_of(cpu);
+ clockevent_set_clock(cd, HPET_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = 5000;
+
+ clockevents_register_device(cd);
+ setup_irq(HPET_T0_IRQ, &hpet_irq);
+ pr_info("hpet clock event device register\n");
+}
+
+static cycle_t hpet_read_counter(struct clocksource *cs)
+{
+ return (cycle_t)hpet_read(HPET_COUNTER);
+}
+
+static void hpet_suspend(struct clocksource *cs)
+{
+}
+
+static void hpet_resume(struct clocksource *cs)
+{
+ hpet_setup();
+ hpet_restart_counter();
+}
+
+static struct clocksource csrc_hpet = {
+ .name = "hpet",
+ /* mips clocksource rating is less than 300, so hpet is better. */
+ .rating = 300,
+ .read = hpet_read_counter,
+ .mask = CLOCKSOURCE_MASK(32),
+ /* oneshot mode work normal with this flag */
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .suspend = hpet_suspend,
+ .resume = hpet_resume,
+ .mult = 0,
+ .shift = 10,
+};
+
+int __init init_hpet_clocksource(void)
+{
+ csrc_hpet.mult = clocksource_hz2mult(HPET_FREQ, csrc_hpet.shift);
+ return clocksource_register_hz(&csrc_hpet, HPET_FREQ);
+}
+
+arch_initcall(init_hpet_clocksource);
diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
index ca1c62af5188..21221edda7a9 100644
--- a/arch/mips/loongson/loongson-3/irq.c
+++ b/arch/mips/loongson/loongson-3/irq.c
@@ -9,7 +9,7 @@
#include "smp.h"
-unsigned int ht_irq[] = {1, 3, 4, 5, 6, 7, 8, 12, 14, 15};
+unsigned int ht_irq[] = {0, 1, 3, 4, 5, 6, 7, 8, 12, 14, 15};
static void ht_irqdispatch(void)
{
@@ -55,8 +55,8 @@ static inline void mask_loongson_irq(struct irq_data *d)
/* Workaround: UART IRQ may deliver to any core */
if (d->irq == LOONGSON_UART_IRQ) {
int cpu = smp_processor_id();
- int node_id = cpu / loongson_sysconf.cores_per_node;
- int core_id = cpu % loongson_sysconf.cores_per_node;
+ int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
+ int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
u64 intenclr_addr = smp_group[node_id] |
(u64)(&LOONGSON_INT_ROUTER_INTENCLR);
u64 introuter_lpc_addr = smp_group[node_id] |
@@ -72,8 +72,8 @@ static inline void unmask_loongson_irq(struct irq_data *d)
/* Workaround: UART IRQ may deliver to any core */
if (d->irq == LOONGSON_UART_IRQ) {
int cpu = smp_processor_id();
- int node_id = cpu / loongson_sysconf.cores_per_node;
- int core_id = cpu % loongson_sysconf.cores_per_node;
+ int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
+ int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
u64 intenset_addr = smp_group[node_id] |
(u64)(&LOONGSON_INT_ROUTER_INTENSET);
u64 introuter_lpc_addr = smp_group[node_id] |
@@ -102,10 +102,12 @@ void irq_router_init(void)
int i;
/* route LPC int to cpu core0 int 0 */
- LOONGSON_INT_ROUTER_LPC = LOONGSON_INT_CORE0_INT0;
+ LOONGSON_INT_ROUTER_LPC =
+ LOONGSON_INT_COREx_INTy(loongson_sysconf.boot_cpu_id, 0);
/* route HT1 int0 ~ int7 to cpu core0 INT1*/
for (i = 0; i < 8; i++)
- LOONGSON_INT_ROUTER_HT1(i) = LOONGSON_INT_CORE0_INT1;
+ LOONGSON_INT_ROUTER_HT1(i) =
+ LOONGSON_INT_COREx_INTy(loongson_sysconf.boot_cpu_id, 1);
/* enable HT1 interrupt */
LOONGSON_HT1_INTN_EN(0) = 0xffffffff;
/* enable router interrupt intenset */
diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c
index 42323bcc5d28..6cae0e75de27 100644
--- a/arch/mips/loongson/loongson-3/numa.c
+++ b/arch/mips/loongson/loongson-3/numa.c
@@ -224,7 +224,7 @@ static void __init node_mem_init(unsigned int node)
static __init void prom_meminit(void)
{
- unsigned int node, cpu;
+ unsigned int node, cpu, active_cpu = 0;
cpu_node_probe();
init_topology_matrix();
@@ -240,8 +240,14 @@ static __init void prom_meminit(void)
node = cpu / loongson_sysconf.cores_per_node;
if (node >= num_online_nodes())
node = 0;
- pr_info("NUMA: set cpumask cpu %d on node %d\n", cpu, node);
- cpu_set(cpu, __node_data[(node)]->cpumask);
+
+ if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
+ continue;
+
+ cpu_set(active_cpu, __node_data[(node)]->cpumask);
+ pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
+
+ active_cpu++;
}
}
diff --git a/arch/mips/loongson/loongson-3/platform.c b/arch/mips/loongson/loongson-3/platform.c
new file mode 100644
index 000000000000..25a97cc0ee33
--- /dev/null
+++ b/arch/mips/loongson/loongson-3/platform.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ * Xiang Yu, xiangy@lemote.com
+ * Chen Huacai, chenhc@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <asm/bootinfo.h>
+#include <boot_param.h>
+#include <loongson_hwmon.h>
+#include <workarounds.h>
+
+static int __init loongson3_platform_init(void)
+{
+ int i;
+ struct platform_device *pdev;
+
+ if (loongson_sysconf.ecname[0] != '\0')
+ platform_device_register_simple(loongson_sysconf.ecname, -1, NULL, 0);
+
+ for (i = 0; i < loongson_sysconf.nr_sensors; i++) {
+ if (loongson_sysconf.sensors[i].type > SENSOR_FAN)
+ continue;
+
+ pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ pdev->name = loongson_sysconf.sensors[i].name;
+ pdev->id = loongson_sysconf.sensors[i].id;
+ pdev->dev.platform_data = &loongson_sysconf.sensors[i];
+ platform_device_register(pdev);
+ }
+
+ return 0;
+}
+
+arch_initcall(loongson3_platform_init);
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c
index d8c63af6c7cc..e2eb688b5434 100644
--- a/arch/mips/loongson/loongson-3/smp.c
+++ b/arch/mips/loongson/loongson-3/smp.c
@@ -25,6 +25,7 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <loongson.h>
+#include <workarounds.h>
#include "smp.h"
@@ -239,7 +240,7 @@ static void ipi_mailbox_buf_init(void)
*/
static void loongson3_send_ipi_single(int cpu, unsigned int action)
{
- loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu]);
+ loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(cpu)]);
}
static void
@@ -248,7 +249,7 @@ loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
unsigned int i;
for_each_cpu(i, mask)
- loongson3_ipi_write32((u32)action, ipi_set0_regs[i]);
+ loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(i)]);
}
void loongson3_ipi_interrupt(struct pt_regs *regs)
@@ -257,10 +258,10 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
unsigned int action, c0count;
/* Load the ipi register to figure out what we're supposed to do */
- action = loongson3_ipi_read32(ipi_status0_regs[cpu]);
+ action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]);
/* Clear the ipi register to clear the interrupt */
- loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu]);
+ loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu_logical_map(cpu)]);
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
@@ -291,12 +292,14 @@ static void loongson3_init_secondary(void)
/* Set interrupt mask, but don't enable */
change_c0_status(ST0_IM, imask);
- for (i = 0; i < loongson_sysconf.nr_cpus; i++)
- loongson3_ipi_write32(0xffffffff, ipi_en0_regs[i]);
+ for (i = 0; i < num_possible_cpus(); i++)
+ loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]);
- cpu_data[cpu].package = cpu / loongson_sysconf.cores_per_package;
- cpu_data[cpu].core = cpu % loongson_sysconf.cores_per_package;
per_cpu(cpu_state, cpu) = CPU_ONLINE;
+ cpu_data[cpu].core =
+ cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+ cpu_data[cpu].package =
+ cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
i = 0;
__this_cpu_write(core0_c0count, 0);
@@ -314,37 +317,50 @@ static void loongson3_init_secondary(void)
static void loongson3_smp_finish(void)
{
+ int cpu = smp_processor_id();
+
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
local_irq_enable();
loongson3_ipi_write64(0,
- (void *)(ipi_mailbox_buf[smp_processor_id()]+0x0));
+ (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0));
pr_info("CPU#%d finished, CP0_ST=%x\n",
smp_processor_id(), read_c0_status());
}
static void __init loongson3_smp_setup(void)
{
- int i, num;
+ int i = 0, num = 0; /* i: physical id, num: logical id */
init_cpu_possible(cpu_none_mask);
- set_cpu_possible(0, true);
-
- __cpu_number_map[0] = 0;
- __cpu_logical_map[0] = 0;
/* For unified kernel, NR_CPUS is the maximum possible value,
* loongson_sysconf.nr_cpus is the really present value */
- for (i = 1, num = 0; i < loongson_sysconf.nr_cpus; i++) {
- set_cpu_possible(i, true);
- __cpu_number_map[i] = ++num;
- __cpu_logical_map[num] = i;
+ while (i < loongson_sysconf.nr_cpus) {
+ if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
+ /* Reserved physical CPU cores */
+ __cpu_number_map[i] = -1;
+ } else {
+ __cpu_number_map[i] = num;
+ __cpu_logical_map[num] = i;
+ set_cpu_possible(num, true);
+ num++;
+ }
+ i++;
}
+ pr_info("Detected %i available CPU(s)\n", num);
+
+ while (num < loongson_sysconf.nr_cpus) {
+ __cpu_logical_map[num] = -1;
+ num++;
+ }
+
ipi_set0_regs_init();
ipi_clear0_regs_init();
ipi_status0_regs_init();
ipi_en0_regs_init();
ipi_mailbox_buf_init();
- pr_info("Detected %i available secondary CPU(s)\n", num);
+ cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
+ cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
}
static void __init loongson3_prepare_cpus(unsigned int max_cpus)
@@ -371,10 +387,14 @@ static void loongson3_boot_secondary(int cpu, struct task_struct *idle)
pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
cpu, startargs[0], startargs[1], startargs[2]);
- loongson3_ipi_write64(startargs[3], (void *)(ipi_mailbox_buf[cpu]+0x18));
- loongson3_ipi_write64(startargs[2], (void *)(ipi_mailbox_buf[cpu]+0x10));
- loongson3_ipi_write64(startargs[1], (void *)(ipi_mailbox_buf[cpu]+0x8));
- loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu]+0x0));
+ loongson3_ipi_write64(startargs[3],
+ (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x18));
+ loongson3_ipi_write64(startargs[2],
+ (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x10));
+ loongson3_ipi_write64(startargs[1],
+ (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x8));
+ loongson3_ipi_write64(startargs[0],
+ (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0));
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -568,7 +588,7 @@ void loongson3_disable_clock(int cpu)
if (loongson_sysconf.cputype == Loongson_3A) {
LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
} else if (loongson_sysconf.cputype == Loongson_3B) {
- if (!cpuhotplug_workaround)
+ if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
}
}
@@ -581,7 +601,7 @@ void loongson3_enable_clock(int cpu)
if (loongson_sysconf.cputype == Loongson_3A) {
LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
} else if (loongson_sysconf.cputype == Loongson_3B) {
- if (!cpuhotplug_workaround)
+ if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
}
}
diff --git a/arch/mips/loongson1/Kconfig b/arch/mips/loongson1/Kconfig
index e23c25d09963..a2b796eaf3c3 100644
--- a/arch/mips/loongson1/Kconfig
+++ b/arch/mips/loongson1/Kconfig
@@ -5,8 +5,8 @@ choice
config LOONGSON1_LS1B
bool "Loongson LS1B board"
- select CEVT_R4K
- select CSRC_R4K
+ select CEVT_R4K if !MIPS_EXTERNAL_TIMER
+ select CSRC_R4K if !MIPS_EXTERNAL_TIMER
select SYS_HAS_CPU_LOONGSON1B
select DMA_NONCOHERENT
select BOOT_ELF32
@@ -16,8 +16,46 @@ config LOONGSON1_LS1B
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_MIPS16
select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
select COMMON_CLK
endchoice
+menuconfig CEVT_CSRC_LS1X
+ bool "Use PWM Timer for clockevent/clocksource"
+ select MIPS_EXTERNAL_TIMER
+ depends on CPU_LOONGSON1
+ help
+ This option changes the default clockevent/clocksource to PWM Timer,
+ and is required by Loongson1 CPUFreq support.
+
+ If unsure, say N.
+
+choice
+ prompt "Select clockevent/clocksource"
+ depends on CEVT_CSRC_LS1X
+ default TIMER_USE_PWM0
+
+config TIMER_USE_PWM0
+ bool "Use PWM Timer 0"
+ help
+ Use PWM Timer 0 as the default clockevent/clocksourcer.
+
+config TIMER_USE_PWM1
+ bool "Use PWM Timer 1"
+ help
+ Use PWM Timer 1 as the default clockevent/clocksourcer.
+
+config TIMER_USE_PWM2
+ bool "Use PWM Timer 2"
+ help
+ Use PWM Timer 2 as the default clockevent/clocksourcer.
+
+config TIMER_USE_PWM3
+ bool "Use PWM Timer 3"
+ help
+ Use PWM Timer 3 as the default clockevent/clocksourcer.
+
+endchoice
+
endif # MACH_LOONGSON1
diff --git a/arch/mips/loongson1/common/Makefile b/arch/mips/loongson1/common/Makefile
index b2797709ef5b..723b4ce3b8f0 100644
--- a/arch/mips/loongson1/common/Makefile
+++ b/arch/mips/loongson1/common/Makefile
@@ -2,4 +2,4 @@
# Makefile for common code of loongson1 based machines.
#
-obj-y += clock.o irq.o platform.o prom.o reset.o setup.o
+obj-y += time.o irq.o platform.o prom.o reset.o setup.o
diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c
deleted file mode 100644
index b4437f19c3d9..000000000000
--- a/arch/mips/loongson1/common/clock.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <asm/time.h>
-#include <platform.h>
-
-void __init plat_time_init(void)
-{
- struct clk *clk;
-
- /* Initialize LS1X clocks */
- ls1x_clk_init();
-
- /* setup mips r4k timer */
- clk = clk_get(NULL, "cpu");
- if (IS_ERR(clk))
- panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
-
- mips_hpt_frequency = clk_get_rate(clk) / 2;
-}
diff --git a/arch/mips/loongson1/common/platform.c b/arch/mips/loongson1/common/platform.c
index fdf8cb5987a4..ddf1d4cbf31e 100644
--- a/arch/mips/loongson1/common/platform.c
+++ b/arch/mips/loongson1/common/platform.c
@@ -16,8 +16,10 @@
#include <linux/usb/ehci_pdriver.h>
#include <asm-generic/sizes.h>
+#include <cpufreq.h>
#include <loongson1.h>
+/* 8250/16550 compatible UART */
#define LS1X_UART(_id) \
{ \
.mapbase = LS1X_UART ## _id ## _BASE, \
@@ -27,7 +29,7 @@
.type = PORT_16550A, \
}
-static struct plat_serial8250_port ls1x_serial8250_port[] = {
+static struct plat_serial8250_port ls1x_serial8250_pdata[] = {
LS1X_UART(0),
LS1X_UART(1),
LS1X_UART(2),
@@ -35,11 +37,11 @@ static struct plat_serial8250_port ls1x_serial8250_port[] = {
{},
};
-struct platform_device ls1x_uart_device = {
+struct platform_device ls1x_uart_pdev = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
- .platform_data = ls1x_serial8250_port,
+ .platform_data = ls1x_serial8250_pdata,
},
};
@@ -48,16 +50,97 @@ void __init ls1x_serial_setup(struct platform_device *pdev)
struct clk *clk;
struct plat_serial8250_port *p;
- clk = clk_get(NULL, pdev->name);
- if (IS_ERR(clk))
- panic("unable to get %s clock, err=%ld",
- pdev->name, PTR_ERR(clk));
+ clk = clk_get(&pdev->dev, pdev->name);
+ if (IS_ERR(clk)) {
+ pr_err("unable to get %s clock, err=%ld",
+ pdev->name, PTR_ERR(clk));
+ return;
+ }
+ clk_prepare_enable(clk);
for (p = pdev->dev.platform_data; p->flags != 0; ++p)
p->uartclk = clk_get_rate(clk);
}
+/* CPUFreq */
+static struct plat_ls1x_cpufreq ls1x_cpufreq_pdata = {
+ .clk_name = "cpu_clk",
+ .osc_clk_name = "osc_33m_clk",
+ .max_freq = 266 * 1000,
+ .min_freq = 33 * 1000,
+};
+
+struct platform_device ls1x_cpufreq_pdev = {
+ .name = "ls1x-cpufreq",
+ .dev = {
+ .platform_data = &ls1x_cpufreq_pdata,
+ },
+};
+
/* Synopsys Ethernet GMAC */
+static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
+ .phy_mask = 0,
+};
+
+static struct stmmac_dma_cfg ls1x_eth_dma_cfg = {
+ .pbl = 1,
+};
+
+int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat = NULL;
+ u32 val;
+
+ val = __raw_readl(LS1X_MUX_CTRL1);
+
+ plat_dat = dev_get_platdata(&pdev->dev);
+ if (plat_dat->bus_id) {
+ __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
+ GMAC1_USE_UART0, LS1X_MUX_CTRL0);
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ val |= (GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
+ break;
+ default:
+ pr_err("unsupported mii mode %d\n",
+ plat_dat->interface);
+ return -ENOTSUPP;
+ }
+ val &= ~GMAC1_SHUT;
+ } else {
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ val |= (GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
+ break;
+ default:
+ pr_err("unsupported mii mode %d\n",
+ plat_dat->interface);
+ return -ENOTSUPP;
+ }
+ val &= ~GMAC0_SHUT;
+ }
+ __raw_writel(val, LS1X_MUX_CTRL1);
+
+ return 0;
+}
+
+static struct plat_stmmacenet_data ls1x_eth0_pdata = {
+ .bus_id = 0,
+ .phy_addr = -1,
+ .interface = PHY_INTERFACE_MODE_MII,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .init = ls1x_eth_mux_init,
+};
+
static struct resource ls1x_eth0_resources[] = {
[0] = {
.start = LS1X_GMAC0_BASE,
@@ -71,25 +154,47 @@ static struct resource ls1x_eth0_resources[] = {
},
};
-static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
- .phy_mask = 0,
+struct platform_device ls1x_eth0_pdev = {
+ .name = "stmmaceth",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(ls1x_eth0_resources),
+ .resource = ls1x_eth0_resources,
+ .dev = {
+ .platform_data = &ls1x_eth0_pdata,
+ },
};
-static struct plat_stmmacenet_data ls1x_eth_data = {
- .bus_id = 0,
+static struct plat_stmmacenet_data ls1x_eth1_pdata = {
+ .bus_id = 1,
.phy_addr = -1,
+ .interface = PHY_INTERFACE_MODE_MII,
.mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
.tx_coe = 1,
+ .init = ls1x_eth_mux_init,
};
-struct platform_device ls1x_eth0_device = {
+static struct resource ls1x_eth1_resources[] = {
+ [0] = {
+ .start = LS1X_GMAC1_BASE,
+ .end = LS1X_GMAC1_BASE + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "macirq",
+ .start = LS1X_GMAC1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device ls1x_eth1_pdev = {
.name = "stmmaceth",
- .id = 0,
- .num_resources = ARRAY_SIZE(ls1x_eth0_resources),
- .resource = ls1x_eth0_resources,
+ .id = 1,
+ .num_resources = ARRAY_SIZE(ls1x_eth1_resources),
+ .resource = ls1x_eth1_resources,
.dev = {
- .platform_data = &ls1x_eth_data,
+ .platform_data = &ls1x_eth1_pdata,
},
};
@@ -111,7 +216,7 @@ static struct resource ls1x_ehci_resources[] = {
static struct usb_ehci_pdata ls1x_ehci_pdata = {
};
-struct platform_device ls1x_ehci_device = {
+struct platform_device ls1x_ehci_pdev = {
.name = "ehci-platform",
.id = -1,
.num_resources = ARRAY_SIZE(ls1x_ehci_resources),
@@ -123,7 +228,7 @@ struct platform_device ls1x_ehci_device = {
};
/* Real Time Clock */
-struct platform_device ls1x_rtc_device = {
+struct platform_device ls1x_rtc_pdev = {
.name = "ls1x-rtc",
.id = -1,
};
diff --git a/arch/mips/loongson1/common/prom.c b/arch/mips/loongson1/common/prom.c
index 2a47af5a55c3..68600980ea49 100644
--- a/arch/mips/loongson1/common/prom.c
+++ b/arch/mips/loongson1/common/prom.c
@@ -27,7 +27,7 @@ char *prom_getenv(char *envname)
i = strlen(envname);
while (*env) {
- if (strncmp(envname, *env, i) == 0 && *(*env+i) == '=')
+ if (strncmp(envname, *env, i) == 0 && *(*env + i) == '=')
return *env + i + 1;
env++;
}
@@ -49,7 +49,7 @@ void __init prom_init_cmdline(void)
for (i = 1; i < prom_argc; i++) {
strcpy(c, prom_argv[i]);
c += strlen(prom_argv[i]);
- if (i < prom_argc-1)
+ if (i < prom_argc - 1)
*c++ = ' ';
}
*c = 0;
@@ -57,6 +57,7 @@ void __init prom_init_cmdline(void)
void __init prom_init(void)
{
+ void __iomem *uart_base;
prom_argc = fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg2;
@@ -65,23 +66,18 @@ void __init prom_init(void)
memsize = env_or_default("memsize", DEFAULT_MEMSIZE);
highmemsize = env_or_default("highmemsize", 0x0);
-}
-void __init prom_free_prom_memory(void)
-{
+ if (strstr(arcs_cmdline, "console=ttyS3"))
+ uart_base = ioremap_nocache(LS1X_UART3_BASE, 0x0f);
+ else if (strstr(arcs_cmdline, "console=ttyS2"))
+ uart_base = ioremap_nocache(LS1X_UART2_BASE, 0x0f);
+ else if (strstr(arcs_cmdline, "console=ttyS1"))
+ uart_base = ioremap_nocache(LS1X_UART1_BASE, 0x0f);
+ else
+ uart_base = ioremap_nocache(LS1X_UART0_BASE, 0x0f);
+ setup_8250_early_printk_port((unsigned long)uart_base, 0, 0);
}
-#define PORT(offset) (u8 *)(KSEG1ADDR(LS1X_UART0_BASE + offset))
-
-void prom_putchar(char c)
+void __init prom_free_prom_memory(void)
{
- int timeout;
-
- timeout = 1024;
-
- while (((readb(PORT(UART_LSR)) & UART_LSR_THRE) == 0)
- && (timeout-- > 0))
- ;
-
- writeb(c, PORT(UART_TX));
}
diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c
index 547f34b69e4c..c41e4ca56ab4 100644
--- a/arch/mips/loongson1/common/reset.c
+++ b/arch/mips/loongson1/common/reset.c
@@ -14,12 +14,7 @@
#include <loongson1.h>
-static void ls1x_restart(char *command)
-{
- __raw_writel(0x1, LS1X_WDT_EN);
- __raw_writel(0x5000000, LS1X_WDT_TIMER);
- __raw_writel(0x1, LS1X_WDT_SET);
-}
+static void __iomem *wdt_base;
static void ls1x_halt(void)
{
@@ -29,6 +24,15 @@ static void ls1x_halt(void)
}
}
+static void ls1x_restart(char *command)
+{
+ __raw_writel(0x1, wdt_base + WDT_EN);
+ __raw_writel(0x1, wdt_base + WDT_TIMER);
+ __raw_writel(0x1, wdt_base + WDT_SET);
+
+ ls1x_halt();
+}
+
static void ls1x_power_off(void)
{
ls1x_halt();
@@ -36,6 +40,10 @@ static void ls1x_power_off(void)
static int __init ls1x_reboot_setup(void)
{
+ wdt_base = ioremap_nocache(LS1X_WDT_BASE, 0x0f);
+ if (!wdt_base)
+ panic("Failed to remap watchdog registers");
+
_machine_restart = ls1x_restart;
_machine_halt = ls1x_halt;
pm_power_off = ls1x_power_off;
diff --git a/arch/mips/loongson1/common/time.c b/arch/mips/loongson1/common/time.c
new file mode 100644
index 000000000000..df0f850d6a5f
--- /dev/null
+++ b/arch/mips/loongson1/common/time.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <asm/time.h>
+
+#include <loongson1.h>
+#include <platform.h>
+
+#ifdef CONFIG_CEVT_CSRC_LS1X
+
+#if defined(CONFIG_TIMER_USE_PWM1)
+#define LS1X_TIMER_BASE LS1X_PWM1_BASE
+#define LS1X_TIMER_IRQ LS1X_PWM1_IRQ
+
+#elif defined(CONFIG_TIMER_USE_PWM2)
+#define LS1X_TIMER_BASE LS1X_PWM2_BASE
+#define LS1X_TIMER_IRQ LS1X_PWM2_IRQ
+
+#elif defined(CONFIG_TIMER_USE_PWM3)
+#define LS1X_TIMER_BASE LS1X_PWM3_BASE
+#define LS1X_TIMER_IRQ LS1X_PWM3_IRQ
+
+#else
+#define LS1X_TIMER_BASE LS1X_PWM0_BASE
+#define LS1X_TIMER_IRQ LS1X_PWM0_IRQ
+#endif
+
+DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
+
+static void __iomem *timer_base;
+static uint32_t ls1x_jiffies_per_tick;
+
+static inline void ls1x_pwmtimer_set_period(uint32_t period)
+{
+ __raw_writel(period, timer_base + PWM_HRC);
+ __raw_writel(period, timer_base + PWM_LRC);
+}
+
+static inline void ls1x_pwmtimer_restart(void)
+{
+ __raw_writel(0x0, timer_base + PWM_CNT);
+ __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+}
+
+void __init ls1x_pwmtimer_init(void)
+{
+ timer_base = ioremap(LS1X_TIMER_BASE, 0xf);
+ if (!timer_base)
+ panic("Failed to remap timer registers");
+
+ ls1x_jiffies_per_tick = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
+
+ ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
+ ls1x_pwmtimer_restart();
+}
+
+static cycle_t ls1x_clocksource_read(struct clocksource *cs)
+{
+ unsigned long flags;
+ int count;
+ u32 jifs;
+ static int old_count;
+ static u32 old_jifs;
+
+ raw_spin_lock_irqsave(&ls1x_timer_lock, flags);
+ /*
+ * Although our caller may have the read side of xtime_lock,
+ * this is now a seqlock, and we are cheating in this routine
+ * by having side effects on state that we cannot undo if
+ * there is a collision on the seqlock and our caller has to
+ * retry. (Namely, old_jifs and old_count.) So we must treat
+ * jiffies as volatile despite the lock. We read jiffies
+ * before latching the timer count to guarantee that although
+ * the jiffies value might be older than the count (that is,
+ * the counter may underflow between the last point where
+ * jiffies was incremented and the point where we latch the
+ * count), it cannot be newer.
+ */
+ jifs = jiffies;
+ /* read the count */
+ count = __raw_readl(timer_base + PWM_CNT);
+
+ /*
+ * It's possible for count to appear to go the wrong way for this
+ * reason:
+ *
+ * The timer counter underflows, but we haven't handled the resulting
+ * interrupt and incremented jiffies yet.
+ *
+ * Previous attempts to handle these cases intelligently were buggy, so
+ * we just do the simple thing now.
+ */
+ if (count < old_count && jifs == old_jifs)
+ count = old_count;
+
+ old_count = count;
+ old_jifs = jifs;
+
+ raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
+
+ return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count;
+}
+
+static struct clocksource ls1x_clocksource = {
+ .name = "ls1x-pwmtimer",
+ .read = ls1x_clocksource_read,
+ .mask = CLOCKSOURCE_MASK(24),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static irqreturn_t ls1x_clockevent_isr(int irq, void *devid)
+{
+ struct clock_event_device *cd = devid;
+
+ ls1x_pwmtimer_restart();
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static void ls1x_clockevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *cd)
+{
+ raw_spin_lock(&ls1x_timer_lock);
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
+ ls1x_pwmtimer_restart();
+ case CLOCK_EVT_MODE_RESUME:
+ __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ __raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
+ timer_base + PWM_CTRL);
+ break;
+ default:
+ break;
+ }
+ raw_spin_unlock(&ls1x_timer_lock);
+}
+
+static int ls1x_clockevent_set_next(unsigned long evt,
+ struct clock_event_device *cd)
+{
+ raw_spin_lock(&ls1x_timer_lock);
+ ls1x_pwmtimer_set_period(evt);
+ ls1x_pwmtimer_restart();
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static struct clock_event_device ls1x_clockevent = {
+ .name = "ls1x-pwmtimer",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 300,
+ .irq = LS1X_TIMER_IRQ,
+ .set_next_event = ls1x_clockevent_set_next,
+ .set_mode = ls1x_clockevent_set_mode,
+};
+
+static struct irqaction ls1x_pwmtimer_irqaction = {
+ .name = "ls1x-pwmtimer",
+ .handler = ls1x_clockevent_isr,
+ .dev_id = &ls1x_clockevent,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+};
+
+static void __init ls1x_time_init(void)
+{
+ struct clock_event_device *cd = &ls1x_clockevent;
+ int ret;
+
+ if (!mips_hpt_frequency)
+ panic("Invalid timer clock rate");
+
+ ls1x_pwmtimer_init();
+
+ clockevent_set_clock(cd, mips_hpt_frequency);
+ cd->max_delta_ns = clockevent_delta2ns(0xffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x000300, cd);
+ cd->cpumask = cpumask_of(smp_processor_id());
+ clockevents_register_device(cd);
+
+ ls1x_clocksource.rating = 200 + mips_hpt_frequency / 10000000;
+ ret = clocksource_register_hz(&ls1x_clocksource, mips_hpt_frequency);
+ if (ret)
+ panic(KERN_ERR "Failed to register clocksource: %d\n", ret);
+
+ setup_irq(LS1X_TIMER_IRQ, &ls1x_pwmtimer_irqaction);
+}
+#endif /* CONFIG_CEVT_CSRC_LS1X */
+
+void __init plat_time_init(void)
+{
+ struct clk *clk = NULL;
+
+ /* initialize LS1X clocks */
+ ls1x_clk_init();
+
+#ifdef CONFIG_CEVT_CSRC_LS1X
+ /* setup LS1X PWM timer */
+ clk = clk_get(NULL, "ls1x_pwmtimer");
+ if (IS_ERR(clk))
+ panic("unable to get timer clock, err=%ld", PTR_ERR(clk));
+
+ mips_hpt_frequency = clk_get_rate(clk);
+ ls1x_time_init();
+#else
+ /* setup mips r4k timer */
+ clk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(clk))
+ panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
+
+ mips_hpt_frequency = clk_get_rate(clk) / 2;
+#endif /* CONFIG_CEVT_CSRC_LS1X */
+}
diff --git a/arch/mips/loongson1/ls1b/board.c b/arch/mips/loongson1/ls1b/board.c
index b26b10dac70a..58daeea25739 100644
--- a/arch/mips/loongson1/ls1b/board.c
+++ b/arch/mips/loongson1/ls1b/board.c
@@ -10,17 +10,19 @@
#include <platform.h>
static struct platform_device *ls1b_platform_devices[] __initdata = {
- &ls1x_uart_device,
- &ls1x_eth0_device,
- &ls1x_ehci_device,
- &ls1x_rtc_device,
+ &ls1x_uart_pdev,
+ &ls1x_cpufreq_pdev,
+ &ls1x_eth0_pdev,
+ &ls1x_eth1_pdev,
+ &ls1x_ehci_pdev,
+ &ls1x_rtc_pdev,
};
static int __init ls1b_platform_init(void)
{
int err;
- ls1x_serial_setup(&ls1x_uart_device);
+ ls1x_serial_setup(&ls1x_uart_pdev);
err = platform_add_devices(ls1b_platform_devices,
ARRAY_SIZE(ls1b_platform_devices));
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index cac529a405b8..9dfcd7fc1bc3 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -643,9 +643,14 @@ static inline int cop1_64bit(struct pt_regs *xcp)
return !test_thread_flag(TIF_32BIT_FPREGS);
}
+static inline bool hybrid_fprs(void)
+{
+ return test_thread_flag(TIF_HYBRID_FPREGS);
+}
+
#define SIFROMREG(si, x) \
do { \
- if (cop1_64bit(xcp)) \
+ if (cop1_64bit(xcp) && !hybrid_fprs()) \
(si) = (int)get_fpr32(&ctx->fpr[x], 0); \
else \
(si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \
@@ -653,7 +658,7 @@ do { \
#define SITOREG(si, x) \
do { \
- if (cop1_64bit(xcp)) { \
+ if (cop1_64bit(xcp) && !hybrid_fprs()) { \
unsigned i; \
set_fpr32(&ctx->fpr[x], 0, si); \
for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index fd134675fc2e..068f45a415fc 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -38,7 +38,7 @@ int ieee754dp_isnan(union ieee754dp x)
static inline int ieee754dp_issnan(union ieee754dp x)
{
assert(ieee754dp_isnan(x));
- return ((DPMANT(x) & DP_MBIT(DP_FBITS-1)) == DP_MBIT(DP_FBITS-1));
+ return (DPMANT(x) & DP_MBIT(DP_FBITS - 1)) == DP_MBIT(DP_FBITS - 1);
}
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index d348efe91445..ba88301579c2 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -38,7 +38,7 @@ int ieee754sp_isnan(union ieee754sp x)
static inline int ieee754sp_issnan(union ieee754sp x)
{
assert(ieee754sp_isnan(x));
- return (SPMANT(x) & SP_MBIT(SP_FBITS-1));
+ return SPMANT(x) & SP_MBIT(SP_FBITS - 1);
}
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 7f4f93ab22b7..67ede4ef9b8d 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,13 @@
obj-y += cache.o dma-default.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \
- tlbex.o tlbex-fault.o tlb-funcs.o uasm-mips.o
+ tlbex.o tlbex-fault.o tlb-funcs.o
+
+ifdef CONFIG_CPU_MICROMIPS
+obj-y += uasm-micromips.o
+else
+obj-y += uasm-mips.o
+endif
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
@@ -22,5 +28,3 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
-
-obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fbcd8674ff1d..dd261df005c2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -917,6 +917,18 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c)
}
}
+static void b5k_instruction_hazard(void)
+{
+ __sync();
+ __sync();
+ __asm__ __volatile__(
+ " nop; nop; nop; nop; nop; nop; nop; nop\n"
+ " nop; nop; nop; nop; nop; nop; nop; nop\n"
+ " nop; nop; nop; nop; nop; nop; nop; nop\n"
+ " nop; nop; nop; nop; nop; nop; nop; nop\n"
+ : : : "memory");
+}
+
static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
};
@@ -1683,6 +1695,37 @@ void r4k_cache_init(void)
coherency_setup();
board_cache_error_setup = r4k_cache_error_setup;
+
+ /*
+ * Per-CPU overrides
+ */
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ /* No IPI is needed because all CPUs share the same D$ */
+ flush_data_cache_page = r4k_blast_dcache_page;
+ break;
+ case CPU_BMIPS5000:
+ /* We lose our superpowers if L2 is disabled */
+ if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
+ break;
+
+ /* I$ fills from D$ just by emptying the write buffers */
+ flush_cache_page = (void *)b5k_instruction_hazard;
+ flush_cache_range = (void *)b5k_instruction_hazard;
+ flush_cache_sigtramp = (void *)b5k_instruction_hazard;
+ local_flush_data_cache_page = (void *)b5k_instruction_hazard;
+ flush_data_cache_page = (void *)b5k_instruction_hazard;
+ flush_icache_range = (void *)b5k_instruction_hazard;
+ local_flush_icache_range = (void *)b5k_instruction_hazard;
+
+ /* Cache aliases are handled in hardware; allow HIGHMEM */
+ current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES;
+
+ /* Optimization: an L2 flush implicitly flushes the L1 */
+ current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
+ break;
+ }
}
static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 33ba3c558fe4..af5f046e627e 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -61,6 +61,11 @@ static inline struct page *dma_addr_to_page(struct device *dev,
* Warning on the terminology - Linux calls an uncached area coherent;
* MIPS terminology calls memory areas with hardware maintained coherency
* coherent.
+ *
+ * Note that the R14000 and R16000 should also be checked for in this
+ * condition. However this function is only called on non-I/O-coherent
+ * systems and only the R10000 and R12000 are used in such systems, the
+ * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
*/
static inline int cpu_needs_post_dma_flush(struct device *dev)
{
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 06ce17c2a905..7cba480568c8 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -17,7 +17,7 @@
static inline pte_t gup_get_pte(pte_t *ptep)
{
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
pte_t pte;
retry:
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index f42e35e42790..448cde372af0 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -95,7 +95,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
idx += in_interrupt() ? FIX_N_COLOURS : 0;
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot);
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
entrylo = pte_to_entrylo(pte_val(pte));
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 7f840bc08abf..8d5008cbdc0f 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -17,9 +17,9 @@
#include <asm/tlbflush.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address,
- phys_t size, phys_t phys_addr, unsigned long flags)
+ phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
{
- phys_t end;
+ phys_addr_t end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
| __WRITEABLE | flags);
@@ -43,9 +43,9 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
}
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
- phys_t size, phys_t phys_addr, unsigned long flags)
+ phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
{
- phys_t end;
+ phys_addr_t end;
address &= ~PGDIR_MASK;
end = address + size;
@@ -64,8 +64,8 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
return 0;
}
-static int remap_area_pages(unsigned long address, phys_t phys_addr,
- phys_t size, unsigned long flags)
+static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
+ phys_addr_t size, unsigned long flags)
{
int error;
pgd_t * dir;
@@ -111,13 +111,13 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr,
* caller shouldn't need to know that small detail.
*/
-#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
+#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
-void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
+void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
{
struct vm_struct * area;
unsigned long offset;
- phys_t last_addr;
+ phys_addr_t last_addr;
void * addr;
phys_addr = fixup_bigphys_addr(phys_addr, size);
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 0216ed6eaa2a..751b5cd18bf2 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -81,7 +81,7 @@ static inline int __init r5k_sc_probe(void)
unsigned long config = read_c0_config();
if (config & CONF_SC)
- return(0);
+ return 0;
scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c3917e251f59..e90b2e899291 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -332,7 +332,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
ptep = pte_offset_map(pmdp, address);
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index e3328a96e809..3978a3d81366 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -637,7 +637,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
if (cpu_has_rixi) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
#else
UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -1009,7 +1009,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
* 64bit address support (36bit on a 32bit CPU) in a 32bit
* Kernel is a special case. Only a few CPUs use it.
*/
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits) {
uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
@@ -1510,14 +1510,14 @@ static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_lld(p, pte, 0, ptr);
else
# endif
UASM_i_LL(p, pte, 0, ptr);
#else
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_ld(p, pte, 0, ptr);
else
@@ -1530,13 +1530,13 @@ static void
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int mode)
{
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
#endif
uasm_i_ori(p, pte, pte, mode);
#ifdef CONFIG_SMP
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_scd(p, pte, 0, ptr);
else
@@ -1548,7 +1548,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
else
uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!cpu_has_64bits) {
/* no uasm_i_nop needed */
uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
@@ -1563,14 +1563,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
uasm_i_nop(p);
# endif
#else
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_sd(p, pte, 0, ptr);
else
# endif
UASM_i_SW(p, pte, 0, ptr);
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!cpu_has_64bits) {
uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
uasm_i_ori(p, pte, pte, hwmode);
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 6708a2dbf934..8e02291cfc0c 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -96,9 +96,11 @@ static struct insn insn_table[] = {
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mfhc0, M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD },
{ insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD },
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mthc0, M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index a01b0d6cedd2..4adf30284813 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -51,12 +51,12 @@ enum opcode {
insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb,
insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
- insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
- insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
- insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra,
- insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
- insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
- insn_xor, insn_xori, insn_yield,
+ insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_mtc0,
+ insn_mthc0, insn_mul, insn_or, insn_ori, insn_pref, insn_rfe,
+ insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sllv, insn_slt,
+ insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu,
+ insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi,
+ insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield,
};
struct insn {
@@ -284,9 +284,11 @@ I_u2s3u1(_lld)
I_u1s2(_lui)
I_u2s3u1(_lw)
I_u1u2u3(_mfc0)
+I_u1u2u3(_mfhc0)
I_u1(_mfhi)
I_u1(_mflo)
I_u1u2u3(_mtc0)
+I_u1u2u3(_mthc0)
I_u3u1u2(_mul)
I_u2u1u3(_ori)
I_u3u1u2(_or)
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 0f60256d3784..6849f533154f 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -111,7 +111,7 @@ static void __init mips_ejtag_setup(void)
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
-phys_t mips_cpc_default_phys_base(void)
+phys_addr_t mips_cpc_default_phys_base(void)
{
return CPC_BASE_ADDR;
}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index e4f43baa8f67..d1392f8f5811 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -18,6 +18,7 @@
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irqchip/mips-gic.h>
#include <linux/kernel_stat.h>
#include <linux/kernel.h>
#include <linux/random.h>
@@ -33,19 +34,13 @@
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/msc01_pci.h>
#include <asm/msc01_ic.h>
-#include <asm/gic.h>
#include <asm/setup.h>
#include <asm/rtlx.h>
-static unsigned long _msc01_biu_base;
-static unsigned int ipi_map[NR_CPUS];
+static void __iomem *_msc01_biu_base;
static DEFINE_RAW_SPINLOCK(mips_irq_lock);
-#ifdef CONFIG_MIPS_GIC_IPI
-DECLARE_BITMAP(ipi_ints, GIC_NUM_INTRS);
-#endif
-
static inline int mips_pcibios_iack(void)
{
int irq;
@@ -127,24 +122,10 @@ static void malta_hw0_irqdispatch(void)
#endif
}
-static void malta_ipi_irqdispatch(void)
+static irqreturn_t i8259_handler(int irq, void *dev_id)
{
-#ifdef CONFIG_MIPS_GIC_IPI
- unsigned long irq;
- DECLARE_BITMAP(pending, GIC_NUM_INTRS);
-
- gic_get_int_mask(pending, ipi_ints);
-
- irq = find_first_bit(pending, GIC_NUM_INTRS);
-
- while (irq < GIC_NUM_INTRS) {
- do_IRQ(MIPS_GIC_IRQ_BASE + irq);
-
- irq = find_next_bit(pending, GIC_NUM_INTRS, irq + 1);
- }
-#endif
- if (gic_compare_int())
- do_IRQ(MIPS_GIC_IRQ_BASE);
+ malta_hw0_irqdispatch();
+ return IRQ_HANDLED;
}
static void corehi_irqdispatch(void)
@@ -203,95 +184,10 @@ static void corehi_irqdispatch(void)
die("CoreHi interrupt", regs);
}
-static inline int clz(unsigned long x)
-{
- __asm__(
- " .set push \n"
- " .set mips32 \n"
- " clz %0, %1 \n"
- " .set pop \n"
- : "=r" (x)
- : "r" (x));
-
- return x;
-}
-
-/*
- * Version of ffs that only looks at bits 12..15.
- */
-static inline unsigned int irq_ffs(unsigned int pending)
-{
-#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
- return -clz(pending) + 31 - CAUSEB_IP;
-#else
- unsigned int a0 = 7;
- unsigned int t0;
-
- t0 = pending & 0xf000;
- t0 = t0 < 1;
- t0 = t0 << 2;
- a0 = a0 - t0;
- pending = pending << t0;
-
- t0 = pending & 0xc000;
- t0 = t0 < 1;
- t0 = t0 << 1;
- a0 = a0 - t0;
- pending = pending << t0;
-
- t0 = pending & 0x8000;
- t0 = t0 < 1;
- /* t0 = t0 << 2; */
- a0 = a0 - t0;
- /* pending = pending << t0; */
-
- return a0;
-#endif
-}
-
-/*
- * IRQs on the Malta board look basically (barring software IRQs which we
- * don't use at all and all external interrupt sources are combined together
- * on hardware interrupt 0 (MIPS IRQ 2)) like:
- *
- * MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 Combined hardware interrupt (hw0)
- * 3 Hardware (ignored)
- * 4 Hardware (ignored)
- * 5 Hardware (ignored)
- * 6 Hardware (ignored)
- * 7 R4k timer (what we use)
- *
- * We handle the IRQ according to _our_ priority which is:
- *
- * Highest ---- R4k Timer
- * Lowest ---- Combined hardware interrupt
- *
- * then we just return, if multiple IRQs are pending then we will just take
- * another exception, big deal.
- */
-
-asmlinkage void plat_irq_dispatch(void)
+static irqreturn_t corehi_handler(int irq, void *dev_id)
{
- unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
- int irq;
-
- if (unlikely(!pending)) {
- spurious_interrupt();
- return;
- }
-
- irq = irq_ffs(pending);
-
- if (irq == MIPSCPU_INT_I8259A)
- malta_hw0_irqdispatch();
- else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()]))
- malta_ipi_irqdispatch();
- else
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+ corehi_irqdispatch();
+ return IRQ_HANDLED;
}
#ifdef CONFIG_MIPS_MT_SMP
@@ -312,13 +208,6 @@ static void ipi_call_dispatch(void)
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
-#endif /* CONFIG_MIPS_MT_SMP */
-
-#ifdef CONFIG_MIPS_GIC_IPI
-
-#define GIC_MIPS_CPU_IPI_RESCHED_IRQ 3
-#define GIC_MIPS_CPU_IPI_CALL_IRQ 4
-
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
@@ -349,31 +238,16 @@ static struct irqaction irq_call = {
.flags = IRQF_PERCPU,
.name = "IPI_call"
};
-#endif /* CONFIG_MIPS_GIC_IPI */
-
-static int gic_resched_int_base;
-static int gic_call_int_base;
-#define GIC_RESCHED_INT(cpu) (gic_resched_int_base+(cpu))
-#define GIC_CALL_INT(cpu) (gic_call_int_base+(cpu))
-
-unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
-{
- return GIC_CALL_INT(cpu);
-}
-
-unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
-{
- return GIC_RESCHED_INT(cpu);
-}
+#endif /* CONFIG_MIPS_MT_SMP */
static struct irqaction i8259irq = {
- .handler = no_action,
+ .handler = i8259_handler,
.name = "XT-PIC cascade",
.flags = IRQF_NO_THREAD,
};
static struct irqaction corehi_irqaction = {
- .handler = no_action,
+ .handler = corehi_handler,
.name = "CoreHi",
.flags = IRQF_NO_THREAD,
};
@@ -399,60 +273,6 @@ static msc_irqmap_t msc_eicirqmap[] __initdata = {
static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap);
-/*
- * This GIC specific tabular array defines the association between External
- * Interrupts and CPUs/Core Interrupts. The nature of the External
- * Interrupts is also defined here - polarity/trigger.
- */
-
-#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
-#define X GIC_UNUSED
-
-static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
- { X, X, X, X, 0 },
- { X, X, X, X, 0 },
- { X, X, X, X, 0 },
- { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT4, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { X, X, X, X, 0 },
- { X, X, X, X, 0 },
- { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { X, X, X, X, 0 },
- /* The remainder of this table is initialised by fill_ipi_map */
-};
-#undef X
-
-#ifdef CONFIG_MIPS_GIC_IPI
-static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin)
-{
- int intr = baseintr + cpu;
- gic_intr_map[intr].cpunum = cpu;
- gic_intr_map[intr].pin = cpupin;
- gic_intr_map[intr].polarity = GIC_POL_POS;
- gic_intr_map[intr].trigtype = GIC_TRIG_EDGE;
- gic_intr_map[intr].flags = 0;
- ipi_map[cpu] |= (1 << (cpupin + 2));
- bitmap_set(ipi_ints, intr, 1);
-}
-
-static void __init fill_ipi_map(void)
-{
- int cpu;
-
- for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
- fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1);
- fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2);
- }
-}
-#endif
-
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
setup_irq(irq, action);
@@ -461,6 +281,8 @@ void __init arch_init_ipiirq(int irq, struct irqaction *action)
void __init arch_init_irq(void)
{
+ int corehi_irq, i8259_irq;
+
init_i8259_irqs();
if (!cpu_has_veic)
@@ -471,12 +293,12 @@ void __init arch_init_irq(void)
gic_present = 1;
} else {
if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) {
- _msc01_biu_base = (unsigned long)
- ioremap_nocache(MSC01_BIU_REG_BASE,
+ _msc01_biu_base = ioremap_nocache(MSC01_BIU_REG_BASE,
MSC01_BIU_ADDRSPACE_SZ);
- gic_present = (REG(_msc01_biu_base, MSC01_SC_CFG) &
- MSC01_SC_CFG_GICPRES_MSK) >>
- MSC01_SC_CFG_GICPRES_SHF;
+ gic_present =
+ (__raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS) &
+ MSC01_SC_CFG_GICPRES_MSK) >>
+ MSC01_SC_CFG_GICPRES_SHF;
}
}
if (gic_present)
@@ -507,63 +329,20 @@ void __init arch_init_irq(void)
msc_nr_irqs);
}
- if (cpu_has_veic) {
- set_vi_handler(MSC01E_INT_I8259A, malta_hw0_irqdispatch);
- set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
- setup_irq(MSC01E_INT_BASE+MSC01E_INT_I8259A, &i8259irq);
- setup_irq(MSC01E_INT_BASE+MSC01E_INT_COREHI, &corehi_irqaction);
- } else if (cpu_has_vint) {
- set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
- set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch);
- setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
- setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
- &corehi_irqaction);
- } else {
- setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
- setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
- &corehi_irqaction);
- }
-
if (gic_present) {
- /* FIXME */
int i;
-#if defined(CONFIG_MIPS_GIC_IPI)
- gic_call_int_base = GIC_NUM_INTRS -
- (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids;
- gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
- fill_ipi_map();
-#endif
- gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
- ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
+
+ gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, MIPSCPU_INT_GIC,
+ MIPS_GIC_IRQ_BASE);
if (!mips_cm_present()) {
/* Enable the GIC */
- i = REG(_msc01_biu_base, MSC01_SC_CFG);
- REG(_msc01_biu_base, MSC01_SC_CFG) =
- (i | (0x1 << MSC01_SC_CFG_GICENA_SHF));
+ i = __raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS);
+ __raw_writel(i | (0x1 << MSC01_SC_CFG_GICENA_SHF),
+ _msc01_biu_base + MSC01_SC_CFG_OFS);
pr_debug("GIC Enabled\n");
}
-#if defined(CONFIG_MIPS_GIC_IPI)
- /* set up ipi interrupts */
- if (cpu_has_vint) {
- set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch);
- set_vi_handler(MIPSCPU_INT_IPI1, malta_ipi_irqdispatch);
- }
- /* Argh.. this really needs sorting out.. */
- pr_info("CPU%d: status register was %08x\n",
- smp_processor_id(), read_c0_status());
- write_c0_status(read_c0_status() | STATUSF_IP3 | STATUSF_IP4);
- pr_info("CPU%d: status register now %08x\n",
- smp_processor_id(), read_c0_status());
- write_c0_status(0x1100dc00);
- pr_info("CPU%d: status register frc %08x\n",
- smp_processor_id(), read_c0_status());
- for (i = 0; i < nr_cpu_ids; i++) {
- arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
- GIC_RESCHED_INT(i), &irq_resched);
- arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
- GIC_CALL_INT(i), &irq_call);
- }
-#endif
+ i8259_irq = MIPS_GIC_IRQ_BASE + GIC_INT_I8259A;
+ corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
} else {
#if defined(CONFIG_MIPS_MT_SMP)
/* set up ipi interrupts */
@@ -573,12 +352,6 @@ void __init arch_init_irq(void)
cpu_ipi_resched_irq = MSC01E_INT_SW0;
cpu_ipi_call_irq = MSC01E_INT_SW1;
} else {
- if (cpu_has_vint) {
- set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ,
- ipi_resched_dispatch);
- set_vi_handler (MIPS_CPU_IPI_CALL_IRQ,
- ipi_call_dispatch);
- }
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE +
MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE +
@@ -587,7 +360,21 @@ void __init arch_init_irq(void)
arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched);
arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
#endif
+ if (cpu_has_veic) {
+ set_vi_handler(MSC01E_INT_I8259A,
+ malta_hw0_irqdispatch);
+ set_vi_handler(MSC01E_INT_COREHI,
+ corehi_irqdispatch);
+ i8259_irq = MSC01E_INT_BASE + MSC01E_INT_I8259A;
+ corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
+ } else {
+ i8259_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_I8259A;
+ corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
+ }
}
+
+ setup_irq(i8259_irq, &i8259irq);
+ setup_irq(corehi_irq, &corehi_irqaction);
}
void malta_be_init(void)
@@ -714,37 +501,3 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup)
return retval;
}
-
-void gic_enable_interrupt(int irq_vec)
-{
- GIC_SET_INTR_MASK(irq_vec);
-}
-
-void gic_disable_interrupt(int irq_vec)
-{
- GIC_CLR_INTR_MASK(irq_vec);
-}
-
-void gic_irq_ack(struct irq_data *d)
-{
- int irq = (d->irq - gic_irq_base);
-
- GIC_CLR_INTR_MASK(irq);
-
- if (gic_irq_flags[irq] & GIC_TRIG_EDGE)
- GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
-}
-
-void gic_finish_irq(struct irq_data *d)
-{
- /* Enable interrupts. */
- GIC_SET_INTR_MASK(d->irq - gic_irq_base);
-}
-
-void __init gic_platform_init(int irqs, struct irq_chip *irq_controller)
-{
- int i;
-
- for (i = gic_irq_base; i < (gic_irq_base + irqs); i++)
- irq_set_chip(i, irq_controller);
-}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 3778a359f3ad..ce02dbdedc62 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/mips-gic.h>
#include <linux/timex.h>
#include <linux/mc146818rtc.h>
@@ -37,7 +38,6 @@
#include <asm/time.h>
#include <asm/mc146818-time.h>
#include <asm/msc01_ic.h>
-#include <asm/gic.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/maltaint.h>
@@ -46,6 +46,8 @@ static int mips_cpu_timer_irq;
static int mips_cpu_perf_irq;
extern int cp0_perfcount_irq;
+static unsigned int gic_frequency;
+
static void mips_timer_dispatch(void)
{
do_IRQ(mips_cpu_timer_irq);
@@ -70,9 +72,7 @@ static void __init estimate_frequencies(void)
{
unsigned long flags;
unsigned int count, start;
-#ifdef CONFIG_IRQ_GIC
- unsigned int giccount = 0, gicstart = 0;
-#endif
+ cycle_t giccount = 0, gicstart = 0;
#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
@@ -87,32 +87,26 @@ static void __init estimate_frequencies(void)
/* Initialize counters. */
start = read_c0_count();
-#ifdef CONFIG_IRQ_GIC
if (gic_present)
- GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
-#endif
+ gicstart = gic_read_count();
/* Read counter exactly on falling edge of update flag. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
count = read_c0_count();
-#ifdef CONFIG_IRQ_GIC
if (gic_present)
- GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
-#endif
+ giccount = gic_read_count();
local_irq_restore(flags);
count -= start;
mips_hpt_frequency = count;
-#ifdef CONFIG_IRQ_GIC
if (gic_present) {
giccount -= gicstart;
gic_frequency = giccount;
}
-#endif
}
void read_persistent_clock(struct timespec *ts)
@@ -121,35 +115,30 @@ void read_persistent_clock(struct timespec *ts)
ts->tv_nsec = 0;
}
-static void __init plat_perf_setup(void)
+int get_c0_perfcount_int(void)
{
-#ifdef MSC01E_INT_BASE
if (cpu_has_veic) {
set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch);
mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
- } else
-#endif
- if (cp0_perfcount_irq >= 0) {
- if (cpu_has_vint)
- set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
+ } else if (gic_present) {
+ mips_cpu_perf_irq = gic_get_c0_perfcount_int();
+ } else if (cp0_perfcount_irq >= 0) {
mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
-#ifdef CONFIG_SMP
- irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq);
-#endif
+ } else {
+ mips_cpu_perf_irq = -1;
}
+
+ return mips_cpu_perf_irq;
}
unsigned int get_c0_compare_int(void)
{
-#ifdef MSC01E_INT_BASE
if (cpu_has_veic) {
set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch);
mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
- } else
-#endif
- {
- if (cpu_has_vint)
- set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
+ } else if (gic_present) {
+ mips_cpu_timer_irq = gic_get_c0_compare_int();
+ } else {
mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
}
@@ -191,16 +180,14 @@ void __init plat_time_init(void)
setup_pit_timer();
#endif
-#ifdef CONFIG_IRQ_GIC
+#ifdef CONFIG_MIPS_GIC
if (gic_present) {
freq = freqround(gic_frequency, 5000);
printk("GIC frequency %d.%02d MHz\n", freq/1000000,
(freq%1000000)*100/1000000);
-#ifdef CONFIG_CSRC_GIC
+#ifdef CONFIG_CLKSRC_MIPS_GIC
gic_clocksource_init(gic_frequency);
#endif
}
#endif
-
- plat_perf_setup();
}
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c
index 0a168c948b01..3abe47b316aa 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/arch/mips/mti-sead3/leds-sead3.c
@@ -70,7 +70,6 @@ static struct platform_driver sead3_led_driver = {
.remove = sead3_led_remove,
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/arch/mips/mti-sead3/sead3-ehci.c b/arch/mips/mti-sead3/sead3-ehci.c
index 772fc056a92d..014dd7ba4d68 100644
--- a/arch/mips/mti-sead3/sead3-ehci.c
+++ b/arch/mips/mti-sead3/sead3-ehci.c
@@ -9,6 +9,9 @@
#include <linux/irq.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
+#include <linux/irqchip/mips-gic.h>
+
+#include <asm/mips-boards/sead3int.h>
struct resource ehci_resources[] = {
{
@@ -17,7 +20,6 @@ struct resource ehci_resources[] = {
.flags = IORESOURCE_MEM
},
{
- .start = MIPS_CPU_IRQ_BASE + 2,
.flags = IORESOURCE_IRQ
}
};
@@ -37,6 +39,10 @@ static struct platform_device ehci_device = {
static int __init ehci_init(void)
{
+ if (gic_present)
+ ehci_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_EHCI;
+ else
+ ehci_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_EHCI;
return platform_device_register(&ehci_device);
}
diff --git a/arch/mips/mti-sead3/sead3-i2c-drv.c b/arch/mips/mti-sead3/sead3-i2c-drv.c
index 1f787a6a7878..2bebf0974e39 100644
--- a/arch/mips/mti-sead3/sead3-i2c-drv.c
+++ b/arch/mips/mti-sead3/sead3-i2c-drv.c
@@ -380,7 +380,6 @@ static int sead3_i2c_platform_resume(struct platform_device *pdev)
static struct platform_driver sead3_i2c_platform_driver = {
.driver = {
.name = "sead3-i2c",
- .owner = THIS_MODULE,
},
.probe = sead3_i2c_platform_probe,
.remove = sead3_i2c_platform_remove,
diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c
index 6a560ac03def..e31e17f81eef 100644
--- a/arch/mips/mti-sead3/sead3-int.c
+++ b/arch/mips/mti-sead3/sead3-int.c
@@ -7,9 +7,9 @@
*/
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqchip/mips-gic.h>
#include <linux/io.h>
-#include <asm/gic.h>
#include <asm/irq_cpu.h>
#include <asm/setup.h>
@@ -20,138 +20,23 @@
#define SEAD_CONFIG_BASE 0x1b100110
#define SEAD_CONFIG_SIZE 4
-static unsigned long sead3_config_reg;
-
-/*
- * This table defines the setup for each external GIC interrupt. It is
- * indexed by interrupt number.
- */
-#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
-static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
- { 0, GIC_CPU_INT4, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
- { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
- { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
- { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
- { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
- { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
- { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
-};
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
- int irq;
-
- irq = (fls(pending) - CAUSEB_IP - 1);
- if (irq >= 0)
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
- else
- spurious_interrupt();
-}
+static void __iomem *sead3_config_reg;
void __init arch_init_irq(void)
{
- int i;
-
- if (!cpu_has_veic) {
+ if (!cpu_has_veic)
mips_cpu_irq_init();
- if (cpu_has_vint) {
- /* install generic handler */
- for (i = 0; i < 8; i++)
- set_vi_handler(i, plat_irq_dispatch);
- }
- }
-
- sead3_config_reg = (unsigned long)ioremap_nocache(SEAD_CONFIG_BASE,
- SEAD_CONFIG_SIZE);
- gic_present = (REG32(sead3_config_reg) & SEAD_CONFIG_GIC_PRESENT_MSK) >>
+ sead3_config_reg = ioremap_nocache(SEAD_CONFIG_BASE, SEAD_CONFIG_SIZE);
+ gic_present = (__raw_readl(sead3_config_reg) &
+ SEAD_CONFIG_GIC_PRESENT_MSK) >>
SEAD_CONFIG_GIC_PRESENT_SHF;
pr_info("GIC: %spresent\n", (gic_present) ? "" : "not ");
pr_info("EIC: %s\n",
(current_cpu_data.options & MIPS_CPU_VEIC) ? "on" : "off");
if (gic_present)
- gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
- ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
-}
-
-void gic_enable_interrupt(int irq_vec)
-{
- unsigned int i, irq_source;
-
- /* enable all the interrupts associated with this vector */
- for (i = 0; i < gic_shared_intr_map[irq_vec].num_shared_intr; i++) {
- irq_source = gic_shared_intr_map[irq_vec].intr_list[i];
- GIC_SET_INTR_MASK(irq_source);
- }
- /* enable all local interrupts associated with this vector */
- if (gic_shared_intr_map[irq_vec].local_intr_mask) {
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
- GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_SMASK),
- gic_shared_intr_map[irq_vec].local_intr_mask);
- }
+ gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, CPU_INT_GIC,
+ MIPS_GIC_IRQ_BASE);
}
-void gic_disable_interrupt(int irq_vec)
-{
- unsigned int i, irq_source;
-
- /* disable all the interrupts associated with this vector */
- for (i = 0; i < gic_shared_intr_map[irq_vec].num_shared_intr; i++) {
- irq_source = gic_shared_intr_map[irq_vec].intr_list[i];
- GIC_CLR_INTR_MASK(irq_source);
- }
- /* disable all local interrupts associated with this vector */
- if (gic_shared_intr_map[irq_vec].local_intr_mask) {
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
- GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_RMASK),
- gic_shared_intr_map[irq_vec].local_intr_mask);
- }
-}
-
-void gic_irq_ack(struct irq_data *d)
-{
- GIC_CLR_INTR_MASK(d->irq - gic_irq_base);
-}
-
-void gic_finish_irq(struct irq_data *d)
-{
- unsigned int irq = (d->irq - gic_irq_base);
- unsigned int i, irq_source;
-
- /* Clear edge detectors. */
- for (i = 0; i < gic_shared_intr_map[irq].num_shared_intr; i++) {
- irq_source = gic_shared_intr_map[irq].intr_list[i];
- if (gic_irq_flags[irq_source] & GIC_TRIG_EDGE)
- GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq_source);
- }
-
- /* Enable interrupts. */
- GIC_SET_INTR_MASK(irq);
-}
-
-void __init gic_platform_init(int irqs, struct irq_chip *irq_controller)
-{
- int i;
-
- /*
- * For non-EIC mode, we want to setup the GIC in pass-through
- * mode, as if the GIC didn't exist. Do not map any interrupts
- * for an external interrupt controller.
- */
- if (!cpu_has_veic)
- return;
-
- for (i = gic_irq_base; i < (gic_irq_base + irqs); i++)
- irq_set_chip_and_handler(i, irq_controller, handle_percpu_irq);
-}
diff --git a/arch/mips/mti-sead3/sead3-net.c b/arch/mips/mti-sead3/sead3-net.c
index dd11e7eb771c..46176b804576 100644
--- a/arch/mips/mti-sead3/sead3-net.c
+++ b/arch/mips/mti-sead3/sead3-net.c
@@ -7,9 +7,12 @@
*/
#include <linux/module.h>
#include <linux/irq.h>
+#include <linux/irqchip/mips-gic.h>
#include <linux/platform_device.h>
#include <linux/smsc911x.h>
+#include <asm/mips-boards/sead3int.h>
+
static struct smsc911x_platform_config sead3_smsc911x_data = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
@@ -17,14 +20,13 @@ static struct smsc911x_platform_config sead3_smsc911x_data = {
.phy_interface = PHY_INTERFACE_MODE_MII,
};
-struct resource sead3_net_resourcess[] = {
+struct resource sead3_net_resources[] = {
{
.start = 0x1f010000,
.end = 0x1f01ffff,
.flags = IORESOURCE_MEM
},
{
- .start = MIPS_CPU_IRQ_BASE + 6,
.flags = IORESOURCE_IRQ
}
};
@@ -35,12 +37,16 @@ static struct platform_device sead3_net_device = {
.dev = {
.platform_data = &sead3_smsc911x_data,
},
- .num_resources = ARRAY_SIZE(sead3_net_resourcess),
- .resource = sead3_net_resourcess
+ .num_resources = ARRAY_SIZE(sead3_net_resources),
+ .resource = sead3_net_resources
};
static int __init sead3_net_init(void)
{
+ if (gic_present)
+ sead3_net_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_NET;
+ else
+ sead3_net_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_NET;
return platform_device_register(&sead3_net_device);
}
diff --git a/arch/mips/mti-sead3/sead3-platform.c b/arch/mips/mti-sead3/sead3-platform.c
index 6c3b33dbed18..53ee6f1f018d 100644
--- a/arch/mips/mti-sead3/sead3-platform.c
+++ b/arch/mips/mti-sead3/sead3-platform.c
@@ -7,12 +7,15 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/irqchip/mips-gic.h>
#include <linux/serial_8250.h>
-#define UART(base, int) \
+#include <asm/mips-boards/sead3int.h>
+
+#define UART(base) \
{ \
.mapbase = base, \
- .irq = int, \
+ .irq = -1, \
.uartclk = 14745600, \
.iotype = UPIO_MEM32, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \
@@ -20,8 +23,8 @@
}
static struct plat_serial8250_port uart8250_data[] = {
- UART(0x1f000900, MIPS_CPU_IRQ_BASE + 4), /* ttyS0 = USB */
- UART(0x1f000800, MIPS_CPU_IRQ_BASE + 4), /* ttyS1 = RS232 */
+ UART(0x1f000900), /* ttyS0 = USB */
+ UART(0x1f000800), /* ttyS1 = RS232 */
{ },
};
@@ -35,6 +38,13 @@ static struct platform_device uart8250_device = {
static int __init uart8250_init(void)
{
+ if (gic_present) {
+ uart8250_data[0].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART0;
+ uart8250_data[1].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART1;
+ } else {
+ uart8250_data[0].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART0;
+ uart8250_data[1].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART1;
+ }
return platform_device_register(&uart8250_device);
}
diff --git a/arch/mips/mti-sead3/sead3-serial.c b/arch/mips/mti-sead3/sead3-serial.c
deleted file mode 100644
index bc52705bbee4..000000000000
--- a/arch/mips/mti-sead3/sead3-serial.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/serial_8250.h>
-
-#define UART(base, int) \
-{ \
- .mapbase = base, \
- .irq = int, \
- .uartclk = 14745600, \
- .iotype = UPIO_MEM32, \
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \
- .regshift = 2, \
-}
-
-static struct plat_serial8250_port uart8250_data[] = {
- UART(0x1f000900, MIPS_CPU_IRQ_BASE + 4), /* ttyS0 = USB */
- UART(0x1f000800, MIPS_CPU_IRQ_BASE + 4), /* ttyS1 = RS232 */
- { },
-};
-
-static struct platform_device uart8250_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = uart8250_data,
- },
-};
-
-static int __init uart8250_init(void)
-{
- return platform_device_register(&uart8250_device);
-}
-
-module_init(uart8250_init);
-
-MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("8250 UART probe driver for the SEAD-3 platform");
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 678d03d53c60..ec1dd2491f96 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -6,6 +6,7 @@
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
+#include <linux/irqchip/mips-gic.h>
#include <asm/cpu.h>
#include <asm/setup.h>
@@ -13,19 +14,6 @@
#include <asm/irq.h>
#include <asm/mips-boards/generic.h>
-static int mips_cpu_timer_irq;
-static int mips_cpu_perf_irq;
-
-static void mips_timer_dispatch(void)
-{
- do_IRQ(mips_cpu_timer_irq);
-}
-
-static void mips_perf_dispatch(void)
-{
- do_IRQ(mips_cpu_perf_irq);
-}
-
static void __iomem *status_reg = (void __iomem *)0xbf000410;
/*
@@ -81,21 +69,20 @@ void read_persistent_clock(struct timespec *ts)
ts->tv_nsec = 0;
}
-static void __init plat_perf_setup(void)
+int get_c0_perfcount_int(void)
{
- if (cp0_perfcount_irq >= 0) {
- if (cpu_has_vint)
- set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
- mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
- }
+ if (gic_present)
+ return gic_get_c0_compare_int();
+ if (cp0_perfcount_irq >= 0)
+ return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ return -1;
}
unsigned int get_c0_compare_int(void)
{
- if (cpu_has_vint)
- set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
- mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
- return mips_cpu_timer_irq;
+ if (gic_present)
+ return gic_get_c0_compare_int();
+ return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
}
void __init plat_time_init(void)
@@ -108,6 +95,4 @@ void __init plat_time_init(void)
(est_freq % 1000000) * 100 / 1000000);
mips_scroll_message();
-
- plat_perf_setup();
}
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 9b55143d19db..9fd6834a2172 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -426,7 +426,7 @@ static inline void emit_mod(unsigned int dst, unsigned int src,
u32 *p = &ctx->target[ctx->idx];
uasm_i_divu(&p, dst, src);
p = &ctx->target[ctx->idx + 1];
- uasm_i_mflo(&p, dst);
+ uasm_i_mfhi(&p, dst);
}
ctx->idx += 2; /* 2 insts */
}
@@ -971,7 +971,7 @@ load_ind:
break;
case BPF_ALU | BPF_MOD | BPF_K:
/* A %= k */
- if (k == 1 || optimize_div(&k)) {
+ if (k == 1) {
ctx->flags |= SEEN_A;
emit_jit_reg_move(r_A, r_zero, ctx);
} else {
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 9c0a6782c091..070afdb297df 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -14,3 +14,4 @@ oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o
+oprofile-$(CONFIG_CPU_LOONGSON3) += op_model_loongson3.o
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
index 83a1dfd8f0e3..5e645c9a3162 100644
--- a/arch/mips/oprofile/backtrace.c
+++ b/arch/mips/oprofile/backtrace.c
@@ -65,7 +65,7 @@ static inline int is_end_of_function_marker(union mips_instruction *ip)
* - handle cases where the stack is adjusted inside a function
* (generally doesn't happen)
* - find optimal value for max_instr_check
- * - try to find a way to handle leaf functions
+ * - try to find a better way to handle leaf functions
*/
static inline int unwind_user_frame(struct stackframe *old_frame,
@@ -104,7 +104,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
}
if (!ra_offset || !stack_size)
- return -1;
+ goto done;
if (ra_offset) {
new_frame.ra = old_frame->sp + ra_offset;
@@ -121,6 +121,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
if (new_frame.sp > old_frame->sp)
return -2;
+done:
new_frame.pc = old_frame->ra;
*old_frame = new_frame;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index e74732449478..a26cbe372e06 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -18,6 +18,7 @@
extern struct op_mips_model op_model_mipsxx_ops __weak;
extern struct op_mips_model op_model_loongson2_ops __weak;
+extern struct op_mips_model op_model_loongson3_ops __weak;
static struct op_mips_model *model;
@@ -104,8 +105,17 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_LOONGSON2:
lmodel = &op_model_loongson2_ops;
break;
+ case CPU_LOONGSON3:
+ lmodel = &op_model_loongson3_ops;
+ break;
};
+ /*
+ * Always set the backtrace. This allows unsupported CPU types to still
+ * use timer-based oprofile.
+ */
+ ops->backtrace = op_mips_backtrace;
+
if (!lmodel)
return -ENODEV;
@@ -121,7 +131,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->start = op_mips_start;
ops->stop = op_mips_stop;
ops->cpu_type = lmodel->cpu_type;
- ops->backtrace = op_mips_backtrace;
printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
lmodel->cpu_type);
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
new file mode 100644
index 000000000000..8bcf7fc40f0d
--- /dev/null
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -0,0 +1,220 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/proc_fs.h>
+#include <linux/oprofile.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <asm/uaccess.h>
+#include <irq.h>
+#include <loongson.h>
+#include "op_impl.h"
+
+#define LOONGSON3_PERFCNT_OVERFLOW (1ULL << 63)
+
+#define LOONGSON3_PERFCTRL_EXL (1UL << 0)
+#define LOONGSON3_PERFCTRL_KERNEL (1UL << 1)
+#define LOONGSON3_PERFCTRL_SUPERVISOR (1UL << 2)
+#define LOONGSON3_PERFCTRL_USER (1UL << 3)
+#define LOONGSON3_PERFCTRL_ENABLE (1UL << 4)
+#define LOONGSON3_PERFCTRL_W (1UL << 30)
+#define LOONGSON3_PERFCTRL_M (1UL << 31)
+#define LOONGSON3_PERFCTRL_EVENT(idx, event) \
+ (((event) & (idx ? 0x0f : 0x3f)) << 5)
+
+/* Loongson-3 PerfCount performance counter1 register */
+#define read_c0_perflo1() __read_64bit_c0_register($25, 0)
+#define write_c0_perflo1(val) __write_64bit_c0_register($25, 0, val)
+#define read_c0_perfhi1() __read_64bit_c0_register($25, 1)
+#define write_c0_perfhi1(val) __write_64bit_c0_register($25, 1, val)
+
+/* Loongson-3 PerfCount performance counter2 register */
+#define read_c0_perflo2() __read_64bit_c0_register($25, 2)
+#define write_c0_perflo2(val) __write_64bit_c0_register($25, 2, val)
+#define read_c0_perfhi2() __read_64bit_c0_register($25, 3)
+#define write_c0_perfhi2(val) __write_64bit_c0_register($25, 3, val)
+
+static int (*save_perf_irq)(void);
+
+static struct loongson3_register_config {
+ unsigned int control1;
+ unsigned int control2;
+ unsigned long long reset_counter1;
+ unsigned long long reset_counter2;
+ int ctr1_enable, ctr2_enable;
+} reg;
+
+static void reset_counters(void *arg)
+{
+ write_c0_perfhi1(0);
+ write_c0_perfhi2(0);
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+}
+
+/* Compute all of the registers in preparation for enabling profiling. */
+static void loongson3_reg_setup(struct op_counter_config *ctr)
+{
+ unsigned int control1 = 0;
+ unsigned int control2 = 0;
+
+ reg.reset_counter1 = 0;
+ reg.reset_counter2 = 0;
+ /* Compute the performance counter control word. */
+ /* For now count kernel and user mode */
+ if (ctr[0].enabled) {
+ control1 |= LOONGSON3_PERFCTRL_EVENT(0, ctr[0].event) |
+ LOONGSON3_PERFCTRL_ENABLE;
+ if (ctr[0].kernel)
+ control1 |= LOONGSON3_PERFCTRL_KERNEL;
+ if (ctr[0].user)
+ control1 |= LOONGSON3_PERFCTRL_USER;
+ reg.reset_counter1 = 0x8000000000000000ULL - ctr[0].count;
+ }
+
+ if (ctr[1].enabled) {
+ control2 |= LOONGSON3_PERFCTRL_EVENT(1, ctr[1].event) |
+ LOONGSON3_PERFCTRL_ENABLE;
+ if (ctr[1].kernel)
+ control2 |= LOONGSON3_PERFCTRL_KERNEL;
+ if (ctr[1].user)
+ control2 |= LOONGSON3_PERFCTRL_USER;
+ reg.reset_counter2 = 0x8000000000000000ULL - ctr[1].count;
+ }
+
+ if (ctr[0].enabled)
+ control1 |= LOONGSON3_PERFCTRL_EXL;
+ if (ctr[1].enabled)
+ control2 |= LOONGSON3_PERFCTRL_EXL;
+
+ reg.control1 = control1;
+ reg.control2 = control2;
+ reg.ctr1_enable = ctr[0].enabled;
+ reg.ctr2_enable = ctr[1].enabled;
+}
+
+/* Program all of the registers in preparation for enabling profiling. */
+static void loongson3_cpu_setup(void *args)
+{
+ uint64_t perfcount1, perfcount2;
+
+ perfcount1 = reg.reset_counter1;
+ perfcount2 = reg.reset_counter2;
+ write_c0_perfhi1(perfcount1);
+ write_c0_perfhi2(perfcount2);
+}
+
+static void loongson3_cpu_start(void *args)
+{
+ /* Start all counters on current CPU */
+ reg.control1 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
+ reg.control2 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
+
+ if (reg.ctr1_enable)
+ write_c0_perflo1(reg.control1);
+ if (reg.ctr2_enable)
+ write_c0_perflo2(reg.control2);
+}
+
+static void loongson3_cpu_stop(void *args)
+{
+ /* Stop all counters on current CPU */
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+ memset(&reg, 0, sizeof(reg));
+}
+
+static int loongson3_perfcount_handler(void)
+{
+ unsigned long flags;
+ uint64_t counter1, counter2;
+ uint32_t cause, handled = IRQ_NONE;
+ struct pt_regs *regs = get_irq_regs();
+
+ cause = read_c0_cause();
+ if (!(cause & CAUSEF_PCI))
+ return handled;
+
+ counter1 = read_c0_perfhi1();
+ counter2 = read_c0_perfhi2();
+
+ local_irq_save(flags);
+
+ if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) {
+ if (reg.ctr1_enable)
+ oprofile_add_sample(regs, 0);
+ counter1 = reg.reset_counter1;
+ }
+ if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) {
+ if (reg.ctr2_enable)
+ oprofile_add_sample(regs, 1);
+ counter2 = reg.reset_counter2;
+ }
+
+ local_irq_restore(flags);
+
+ write_c0_perfhi1(counter1);
+ write_c0_perfhi2(counter2);
+
+ if (!(cause & CAUSEF_TI))
+ handled = IRQ_HANDLED;
+
+ return handled;
+}
+
+static int loongson3_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case CPU_STARTING:
+ case CPU_STARTING_FROZEN:
+ write_c0_perflo1(reg.control1);
+ write_c0_perflo2(reg.control2);
+ break;
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block loongson3_notifier_block = {
+ .notifier_call = loongson3_cpu_callback
+};
+
+static int __init loongson3_init(void)
+{
+ on_each_cpu(reset_counters, NULL, 1);
+ register_hotcpu_notifier(&loongson3_notifier_block);
+ save_perf_irq = perf_irq;
+ perf_irq = loongson3_perfcount_handler;
+
+ return 0;
+}
+
+static void loongson3_exit(void)
+{
+ on_each_cpu(reset_counters, NULL, 1);
+ unregister_hotcpu_notifier(&loongson3_notifier_block);
+ perf_irq = save_perf_irq;
+}
+
+struct op_mips_model op_model_loongson3_ops = {
+ .reg_setup = loongson3_reg_setup,
+ .cpu_setup = loongson3_cpu_setup,
+ .init = loongson3_init,
+ .exit = loongson3_exit,
+ .cpu_start = loongson3_cpu_start,
+ .cpu_stop = loongson3_cpu_stop,
+ .cpu_type = "mips/loongson3",
+ .num_counters = 2
+};
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 42821ae2d77e..01f721a85c5b 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <asm/irq_regs.h>
+#include <asm/time.h>
#include "op_impl.h"
@@ -35,6 +36,7 @@
#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
static int (*save_perf_irq)(void);
+static int perfcount_irq;
/*
* XLR has only one set of counters per core. Designate the
@@ -431,8 +433,16 @@ static int __init mipsxx_init(void)
save_perf_irq = perf_irq;
perf_irq = mipsxx_perfcount_handler;
- if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
- return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
+ if (get_c0_perfcount_int)
+ perfcount_irq = get_c0_perfcount_int();
+ else if ((cp0_perfcount_irq >= 0) &&
+ (cp0_compare_irq != cp0_perfcount_irq))
+ perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ perfcount_irq = -1;
+
+ if (perfcount_irq >= 0)
+ return request_irq(perfcount_irq, mipsxx_perfcount_int,
0, "Perfcounter", save_perf_irq);
return 0;
@@ -442,8 +452,8 @@ static void mipsxx_exit(void)
{
int counters = op_model_mipsxx_ops.num_counters;
- if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
- free_irq(cp0_perfcount_irq, save_perf_irq);
+ if (perfcount_irq >= 0)
+ free_irq(perfcount_irq, save_perf_irq);
counters = counters_per_cpu_to_total(counters);
on_each_cpu(reset_counters, (void *)(long)counters, 1);
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 6523d558ff5a..300591c6278d 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
ops-bcm63xx.o
obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o
+obj-$(CONFIG_PCI_AR2315) += pci-ar2315.o
obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o
@@ -42,6 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
+obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o
obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
index 13eea696bbe7..d02eb9d16b55 100644
--- a/arch/mips/pci/ops-bcm63xx.c
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -469,7 +469,7 @@ static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn)
{
switch (bus->number) {
case PCIE_BUS_BRIDGE:
- return (PCI_SLOT(devfn) == 0);
+ return PCI_SLOT(devfn) == 0;
case PCIE_BUS_DEVICE:
if (PCI_SLOT(devfn) == 0)
return bcm_pcie_readl(PCIE_DLSTATUS_REG)
diff --git a/arch/mips/pci/ops-nile4.c b/arch/mips/pci/ops-nile4.c
index a1a7c9f4096e..b9d1fd0ff7e2 100644
--- a/arch/mips/pci/ops-nile4.c
+++ b/arch/mips/pci/ops-nile4.c
@@ -13,8 +13,6 @@
volatile unsigned long *const vrc_pciregs = (void *) Vrc5074_BASE;
-static DEFINE_SPINLOCK(nile4_pci_lock);
-
static int nile4_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 *val)
{
@@ -76,7 +74,6 @@ static int nile4_pcibios_config_access(unsigned char access_type,
static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- unsigned long flags;
u32 data = 0;
int err;
@@ -85,11 +82,8 @@ static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn,
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
- spin_lock_irqsave(&nile4_pci_lock, flags);
err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
- &data);
- spin_unlock_irqrestore(&nile4_pci_lock, flags);
-
+ &data);
if (err)
return err;
@@ -106,7 +100,6 @@ static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn,
static int nile4_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- unsigned long flags;
u32 data = 0;
int err;
@@ -115,11 +108,8 @@ static int nile4_pcibios_write(struct pci_bus *bus, unsigned int devfn,
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
- spin_lock_irqsave(&nile4_pci_lock, flags);
err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
&data);
- spin_unlock_irqrestore(&nile4_pci_lock, flags);
-
if (err)
return err;
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index 50034f985be1..dd2d9f7e9412 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -193,8 +193,6 @@ static void pci_proc_init(void)
}
#endif /* CONFIG_PROC_FS && PCI_COUNTERS */
-static DEFINE_SPINLOCK(bpci_lock);
-
/*****************************************************************************
*
* STRUCT: pci_io_resource
@@ -368,7 +366,6 @@ int msp_pcibios_config_access(unsigned char access_type,
struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
unsigned char bus_num = bus->number;
unsigned char dev_fn = (unsigned char)devfn;
- unsigned long flags;
unsigned long intr;
unsigned long value;
static char pciirqflag;
@@ -401,10 +398,7 @@ int msp_pcibios_config_access(unsigned char access_type,
}
#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
- local_irq_save(flags);
vpe_status = dvpe();
-#else
- spin_lock_irqsave(&bpci_lock, flags);
#endif
/*
@@ -457,9 +451,6 @@ int msp_pcibios_config_access(unsigned char access_type,
#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
evpe(vpe_status);
- local_irq_restore(flags);
-#else
- spin_unlock_irqrestore(&bpci_lock, flags);
#endif
return -1;
@@ -467,9 +458,6 @@ int msp_pcibios_config_access(unsigned char access_type,
#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
evpe(vpe_status);
- local_irq_restore(flags);
-#else
- spin_unlock_irqrestore(&bpci_lock, flags);
#endif
return PCIBIOS_SUCCESSFUL;
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index c19600a03460..28952637a862 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -505,7 +505,6 @@ static struct platform_driver alchemy_pcictl_driver = {
.probe = alchemy_pci_probe,
.driver = {
.name = "alchemy-pci",
- .owner = THIS_MODULE,
},
};
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
new file mode 100644
index 000000000000..bd2b3b60da83
--- /dev/null
+++ b/arch/mips/pci/pci-ar2315.c
@@ -0,0 +1,511 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * Both AR2315 and AR2316 chips have PCI interface unit, which supports DMA
+ * and interrupt. PCI interface supports MMIO access method, but does not
+ * seem to support I/O ports.
+ *
+ * Read/write operation in the region 0x80000000-0xBFFFFFFF causes
+ * a memory read/write command on the PCI bus. 30 LSBs of address on
+ * the bus are taken from memory read/write request and 2 MSBs are
+ * determined by PCI unit configuration.
+ *
+ * To work with the configuration space instead of memory is necessary set
+ * the CFG_SEL bit in the PCI_MISC_CONFIG register.
+ *
+ * Devices on the bus can perform DMA requests via chip BAR1. PCI host
+ * controller BARs are programmend as if an external device is programmed.
+ * Which means that during configuration, IDSEL pin of the chip should be
+ * asserted.
+ *
+ * We know (and support) only one board that uses the PCI interface -
+ * Fonera 2.0g (FON2202). It has a USB EHCI controller connected to the
+ * AR2315 PCI bus. IDSEL pin of USB controller is connected to AD[13] line
+ * and IDSEL pin of AR2315 is connected to AD[16] line.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <asm/paccess.h>
+
+/*
+ * PCI Bus Interface Registers
+ */
+#define AR2315_PCI_1MS_REG 0x0008
+
+#define AR2315_PCI_1MS_MASK 0x3FFFF /* # of AHB clk cycles in 1ms */
+
+#define AR2315_PCI_MISC_CONFIG 0x000c
+
+#define AR2315_PCIMISC_TXD_EN 0x00000001 /* Enable TXD for fragments */
+#define AR2315_PCIMISC_CFG_SEL 0x00000002 /* Mem or Config cycles */
+#define AR2315_PCIMISC_GIG_MASK 0x0000000C /* bits 31-30 for pci req */
+#define AR2315_PCIMISC_RST_MODE 0x00000030
+#define AR2315_PCIRST_INPUT 0x00000000 /* 4:5=0 rst is input */
+#define AR2315_PCIRST_LOW 0x00000010 /* 4:5=1 rst to GND */
+#define AR2315_PCIRST_HIGH 0x00000020 /* 4:5=2 rst to VDD */
+#define AR2315_PCIGRANT_EN 0x00000000 /* 6:7=0 early grant en */
+#define AR2315_PCIGRANT_FRAME 0x00000040 /* 6:7=1 grant waits 4 frame */
+#define AR2315_PCIGRANT_IDLE 0x00000080 /* 6:7=2 grant waits 4 idle */
+#define AR2315_PCIGRANT_GAP 0x00000000 /* 6:7=2 grant waits 4 idle */
+#define AR2315_PCICACHE_DIS 0x00001000 /* PCI external access cache
+ * disable */
+
+#define AR2315_PCI_OUT_TSTAMP 0x0010
+
+#define AR2315_PCI_UNCACHE_CFG 0x0014
+
+#define AR2315_PCI_IN_EN 0x0100
+
+#define AR2315_PCI_IN_EN0 0x01 /* Enable chain 0 */
+#define AR2315_PCI_IN_EN1 0x02 /* Enable chain 1 */
+#define AR2315_PCI_IN_EN2 0x04 /* Enable chain 2 */
+#define AR2315_PCI_IN_EN3 0x08 /* Enable chain 3 */
+
+#define AR2315_PCI_IN_DIS 0x0104
+
+#define AR2315_PCI_IN_DIS0 0x01 /* Disable chain 0 */
+#define AR2315_PCI_IN_DIS1 0x02 /* Disable chain 1 */
+#define AR2315_PCI_IN_DIS2 0x04 /* Disable chain 2 */
+#define AR2315_PCI_IN_DIS3 0x08 /* Disable chain 3 */
+
+#define AR2315_PCI_IN_PTR 0x0200
+
+#define AR2315_PCI_OUT_EN 0x0400
+
+#define AR2315_PCI_OUT_EN0 0x01 /* Enable chain 0 */
+
+#define AR2315_PCI_OUT_DIS 0x0404
+
+#define AR2315_PCI_OUT_DIS0 0x01 /* Disable chain 0 */
+
+#define AR2315_PCI_OUT_PTR 0x0408
+
+/* PCI interrupt status (write one to clear) */
+#define AR2315_PCI_ISR 0x0500
+
+#define AR2315_PCI_INT_TX 0x00000001 /* Desc In Completed */
+#define AR2315_PCI_INT_TXOK 0x00000002 /* Desc In OK */
+#define AR2315_PCI_INT_TXERR 0x00000004 /* Desc In ERR */
+#define AR2315_PCI_INT_TXEOL 0x00000008 /* Desc In End-of-List */
+#define AR2315_PCI_INT_RX 0x00000010 /* Desc Out Completed */
+#define AR2315_PCI_INT_RXOK 0x00000020 /* Desc Out OK */
+#define AR2315_PCI_INT_RXERR 0x00000040 /* Desc Out ERR */
+#define AR2315_PCI_INT_RXEOL 0x00000080 /* Desc Out EOL */
+#define AR2315_PCI_INT_TXOOD 0x00000200 /* Desc In Out-of-Desc */
+#define AR2315_PCI_INT_DESCMASK 0x0000FFFF /* Desc Mask */
+#define AR2315_PCI_INT_EXT 0x02000000 /* Extern PCI INTA */
+#define AR2315_PCI_INT_ABORT 0x04000000 /* PCI bus abort event */
+
+/* PCI interrupt mask */
+#define AR2315_PCI_IMR 0x0504
+
+/* Global PCI interrupt enable */
+#define AR2315_PCI_IER 0x0508
+
+#define AR2315_PCI_IER_DISABLE 0x00 /* disable pci interrupts */
+#define AR2315_PCI_IER_ENABLE 0x01 /* enable pci interrupts */
+
+#define AR2315_PCI_HOST_IN_EN 0x0800
+#define AR2315_PCI_HOST_IN_DIS 0x0804
+#define AR2315_PCI_HOST_IN_PTR 0x0810
+#define AR2315_PCI_HOST_OUT_EN 0x0900
+#define AR2315_PCI_HOST_OUT_DIS 0x0904
+#define AR2315_PCI_HOST_OUT_PTR 0x0908
+
+/*
+ * PCI interrupts, which share IP5
+ * Keep ordered according to AR2315_PCI_INT_XXX bits
+ */
+#define AR2315_PCI_IRQ_EXT 25
+#define AR2315_PCI_IRQ_ABORT 26
+#define AR2315_PCI_IRQ_COUNT 27
+
+/* Arbitrary size of memory region to access the configuration space */
+#define AR2315_PCI_CFG_SIZE 0x00100000
+
+#define AR2315_PCI_HOST_SLOT 3
+#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS)
+
+/* ??? access BAR */
+#define AR2315_PCI_HOST_MBAR0 0x10000000
+/* RAM access BAR */
+#define AR2315_PCI_HOST_MBAR1 AR2315_PCI_HOST_SDRAM_BASEADDR
+/* ??? access BAR */
+#define AR2315_PCI_HOST_MBAR2 0x30000000
+
+struct ar2315_pci_ctrl {
+ void __iomem *cfg_mem;
+ void __iomem *mmr_mem;
+ unsigned irq;
+ unsigned irq_ext;
+ struct irq_domain *domain;
+ struct pci_controller pci_ctrl;
+ struct resource mem_res;
+ struct resource io_res;
+};
+
+static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return container_of(hose, struct ar2315_pci_ctrl, pci_ctrl);
+}
+
+static inline u32 ar2315_pci_reg_read(struct ar2315_pci_ctrl *apc, u32 reg)
+{
+ return __raw_readl(apc->mmr_mem + reg);
+}
+
+static inline void ar2315_pci_reg_write(struct ar2315_pci_ctrl *apc, u32 reg,
+ u32 val)
+{
+ __raw_writel(val, apc->mmr_mem + reg);
+}
+
+static inline void ar2315_pci_reg_mask(struct ar2315_pci_ctrl *apc, u32 reg,
+ u32 mask, u32 val)
+{
+ u32 ret = ar2315_pci_reg_read(apc, reg);
+
+ ret &= ~mask;
+ ret |= val;
+ ar2315_pci_reg_write(apc, reg, ret);
+}
+
+static int ar2315_pci_cfg_access(struct ar2315_pci_ctrl *apc, unsigned devfn,
+ int where, int size, u32 *ptr, bool write)
+{
+ int func = PCI_FUNC(devfn);
+ int dev = PCI_SLOT(devfn);
+ u32 addr = (1 << (13 + dev)) | (func << 8) | (where & ~3);
+ u32 mask = 0xffffffff >> 8 * (4 - size);
+ u32 sh = (where & 3) * 8;
+ u32 value, isr;
+
+ /* Prevent access past the remapped area */
+ if (addr >= AR2315_PCI_CFG_SIZE || dev > 18)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Clear pending errors */
+ ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT);
+ /* Select Configuration access */
+ ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, 0,
+ AR2315_PCIMISC_CFG_SEL);
+
+ mb(); /* PCI must see space change before we begin */
+
+ value = __raw_readl(apc->cfg_mem + addr);
+
+ isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR);
+
+ if (isr & AR2315_PCI_INT_ABORT)
+ goto exit_err;
+
+ if (write) {
+ value = (value & ~(mask << sh)) | *ptr << sh;
+ __raw_writel(value, apc->cfg_mem + addr);
+ isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR);
+ if (isr & AR2315_PCI_INT_ABORT)
+ goto exit_err;
+ } else {
+ *ptr = (value >> sh) & mask;
+ }
+
+ goto exit;
+
+exit_err:
+ ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT);
+ if (!write)
+ *ptr = 0xffffffff;
+
+exit:
+ /* Select Memory access */
+ ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, AR2315_PCIMISC_CFG_SEL,
+ 0);
+
+ return isr & AR2315_PCI_INT_ABORT ? PCIBIOS_DEVICE_NOT_FOUND :
+ PCIBIOS_SUCCESSFUL;
+}
+
+static inline int ar2315_pci_local_cfg_rd(struct ar2315_pci_ctrl *apc,
+ unsigned devfn, int where, u32 *val)
+{
+ return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), val,
+ false);
+}
+
+static inline int ar2315_pci_local_cfg_wr(struct ar2315_pci_ctrl *apc,
+ unsigned devfn, int where, u32 val)
+{
+ return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), &val,
+ true);
+}
+
+static int ar2315_pci_cfg_read(struct pci_bus *bus, unsigned devfn, int where,
+ int size, u32 *value)
+{
+ struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus);
+
+ if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return ar2315_pci_cfg_access(apc, devfn, where, size, value, false);
+}
+
+static int ar2315_pci_cfg_write(struct pci_bus *bus, unsigned devfn, int where,
+ int size, u32 value)
+{
+ struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus);
+
+ if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return ar2315_pci_cfg_access(apc, devfn, where, size, &value, true);
+}
+
+static struct pci_ops ar2315_pci_ops = {
+ .read = ar2315_pci_cfg_read,
+ .write = ar2315_pci_cfg_write,
+};
+
+static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc)
+{
+ unsigned devfn = PCI_DEVFN(AR2315_PCI_HOST_SLOT, 0);
+ int res;
+ u32 id;
+
+ res = ar2315_pci_local_cfg_rd(apc, devfn, PCI_VENDOR_ID, &id);
+ if (res != PCIBIOS_SUCCESSFUL || id != AR2315_PCI_HOST_DEVID)
+ return -ENODEV;
+
+ /* Program MBARs */
+ ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_0,
+ AR2315_PCI_HOST_MBAR0);
+ ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_1,
+ AR2315_PCI_HOST_MBAR1);
+ ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_2,
+ AR2315_PCI_HOST_MBAR2);
+
+ /* Run */
+ ar2315_pci_local_cfg_wr(apc, devfn, PCI_COMMAND, PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL |
+ PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY |
+ PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
+
+ return 0;
+}
+
+static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct ar2315_pci_ctrl *apc = irq_get_handler_data(irq);
+ u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
+ ar2315_pci_reg_read(apc, AR2315_PCI_IMR);
+ unsigned pci_irq = 0;
+
+ if (pending)
+ pci_irq = irq_find_mapping(apc->domain, __ffs(pending));
+
+ if (pci_irq)
+ generic_handle_irq(pci_irq);
+ else
+ spurious_interrupt();
+}
+
+static void ar2315_pci_irq_mask(struct irq_data *d)
+{
+ struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
+
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, BIT(d->hwirq), 0);
+}
+
+static void ar2315_pci_irq_mask_ack(struct irq_data *d)
+{
+ struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
+ u32 m = BIT(d->hwirq);
+
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, m, 0);
+ ar2315_pci_reg_write(apc, AR2315_PCI_ISR, m);
+}
+
+static void ar2315_pci_irq_unmask(struct irq_data *d)
+{
+ struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
+
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, 0, BIT(d->hwirq));
+}
+
+static struct irq_chip ar2315_pci_irq_chip = {
+ .name = "AR2315-PCI",
+ .irq_mask = ar2315_pci_irq_mask,
+ .irq_mask_ack = ar2315_pci_irq_mask_ack,
+ .irq_unmask = ar2315_pci_irq_unmask,
+};
+
+static int ar2315_pci_irq_map(struct irq_domain *d, unsigned irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ar2315_pci_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static struct irq_domain_ops ar2315_pci_irq_domain_ops = {
+ .map = ar2315_pci_irq_map,
+};
+
+static void ar2315_pci_irq_init(struct ar2315_pci_ctrl *apc)
+{
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IER, AR2315_PCI_IER_ENABLE, 0);
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, (AR2315_PCI_INT_ABORT |
+ AR2315_PCI_INT_EXT), 0);
+
+ apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT);
+
+ irq_set_chained_handler(apc->irq, ar2315_pci_irq_handler);
+ irq_set_handler_data(apc->irq, apc);
+
+ /* Clear any pending Abort or external Interrupts
+ * and enable interrupt processing */
+ ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT |
+ AR2315_PCI_INT_EXT);
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IER, 0, AR2315_PCI_IER_ENABLE);
+}
+
+static int ar2315_pci_probe(struct platform_device *pdev)
+{
+ struct ar2315_pci_ctrl *apc;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, err;
+
+ apc = devm_kzalloc(dev, sizeof(*apc), GFP_KERNEL);
+ if (!apc)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+ apc->irq = irq;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ar2315-pci-ctrl");
+ apc->mmr_mem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(apc->mmr_mem))
+ return PTR_ERR(apc->mmr_mem);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ar2315-pci-ext");
+ if (!res)
+ return -EINVAL;
+
+ apc->mem_res.name = "AR2315 PCI mem space";
+ apc->mem_res.parent = res;
+ apc->mem_res.start = res->start;
+ apc->mem_res.end = res->end;
+ apc->mem_res.flags = IORESOURCE_MEM;
+
+ /* Remap PCI config space */
+ apc->cfg_mem = devm_ioremap_nocache(dev, res->start,
+ AR2315_PCI_CFG_SIZE);
+ if (!apc->cfg_mem) {
+ dev_err(dev, "failed to remap PCI config space\n");
+ return -ENOMEM;
+ }
+
+ /* Reset the PCI bus by setting bits 5-4 in PCI_MCFG */
+ ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG,
+ AR2315_PCIMISC_RST_MODE,
+ AR2315_PCIRST_LOW);
+ msleep(100);
+
+ /* Bring the PCI out of reset */
+ ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG,
+ AR2315_PCIMISC_RST_MODE,
+ AR2315_PCIRST_HIGH | AR2315_PCICACHE_DIS | 0x8);
+
+ ar2315_pci_reg_write(apc, AR2315_PCI_UNCACHE_CFG,
+ 0x1E | /* 1GB uncached */
+ (1 << 5) | /* Enable uncached */
+ (0x2 << 30) /* Base: 0x80000000 */);
+ ar2315_pci_reg_read(apc, AR2315_PCI_UNCACHE_CFG);
+
+ msleep(500);
+
+ err = ar2315_pci_host_setup(apc);
+ if (err)
+ return err;
+
+ apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT,
+ &ar2315_pci_irq_domain_ops, apc);
+ if (!apc->domain) {
+ dev_err(dev, "failed to add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ ar2315_pci_irq_init(apc);
+
+ /* PCI controller does not support I/O ports */
+ apc->io_res.name = "AR2315 IO space";
+ apc->io_res.start = 0;
+ apc->io_res.end = 0;
+ apc->io_res.flags = IORESOURCE_IO,
+
+ apc->pci_ctrl.pci_ops = &ar2315_pci_ops;
+ apc->pci_ctrl.mem_resource = &apc->mem_res,
+ apc->pci_ctrl.io_resource = &apc->io_res,
+
+ register_pci_controller(&apc->pci_ctrl);
+
+ dev_info(dev, "register PCI controller\n");
+
+ return 0;
+}
+
+static struct platform_driver ar2315_pci_driver = {
+ .probe = ar2315_pci_probe,
+ .driver = {
+ .name = "ar2315-pci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ar2315_pci_init(void)
+{
+ return platform_driver_register(&ar2315_pci_driver);
+}
+arch_initcall(ar2315_pci_init);
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(dev->bus);
+
+ return slot ? 0 : apc->irq_ext;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index d471a26dd5f8..9e62ad31d4b5 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -50,7 +50,6 @@
struct ar71xx_pci_controller {
void __iomem *cfg_base;
- spinlock_t lock;
int irq;
int irq_base;
struct pci_controller pci_ctrl;
@@ -182,7 +181,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
{
struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
void __iomem *base = apc->cfg_base;
- unsigned long flags;
u32 data;
int err;
int ret;
@@ -190,8 +188,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
ret = PCIBIOS_SUCCESSFUL;
data = ~0;
- spin_lock_irqsave(&apc->lock, flags);
-
err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
AR71XX_PCI_CFG_CMD_READ);
if (err)
@@ -199,8 +195,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
else
data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
- spin_unlock_irqrestore(&apc->lock, flags);
-
*value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
return ret;
@@ -211,15 +205,12 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
{
struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
void __iomem *base = apc->cfg_base;
- unsigned long flags;
int err;
int ret;
value = value << (8 * (where & 3));
ret = PCIBIOS_SUCCESSFUL;
- spin_lock_irqsave(&apc->lock, flags);
-
err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
AR71XX_PCI_CFG_CMD_WRITE);
if (err)
@@ -227,8 +218,6 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
else
__raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
- spin_unlock_irqrestore(&apc->lock, flags);
-
return ret;
}
@@ -360,8 +349,6 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
if (!apc)
return -ENOMEM;
- spin_lock_init(&apc->lock);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(apc->cfg_base))
@@ -416,7 +403,6 @@ static struct platform_driver ar71xx_pci_driver = {
.probe = ar71xx_pci_probe,
.driver = {
.name = "ar71xx-pci",
- .owner = THIS_MODULE,
},
};
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 785b2659b519..a1b7d2a1b0d5 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -9,7 +9,6 @@
* by the Free Software Foundation.
*/
-#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/module.h>
@@ -48,8 +47,6 @@ struct ar724x_pci_controller {
bool bar0_is_cached;
u32 bar0_value;
- spinlock_t lock;
-
struct pci_controller pci_controller;
struct resource io_res;
struct resource mem_res;
@@ -75,7 +72,6 @@ pci_bus_to_ar724x_controller(struct pci_bus *bus)
static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
int where, int size, u32 value)
{
- unsigned long flags;
void __iomem *base;
u32 data;
int s;
@@ -86,8 +82,6 @@ static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
return PCIBIOS_DEVICE_NOT_FOUND;
base = apc->crp_base;
-
- spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
@@ -105,14 +99,12 @@ static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
data = value;
break;
default:
- spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_BAD_REGISTER_NUMBER;
}
__raw_writel(data, base + (where & ~3));
/* flush write */
__raw_readl(base + (where & ~3));
- spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_SUCCESSFUL;
}
@@ -121,7 +113,6 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t *value)
{
struct ar724x_pci_controller *apc;
- unsigned long flags;
void __iomem *base;
u32 data;
@@ -133,8 +124,6 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
return PCIBIOS_DEVICE_NOT_FOUND;
base = apc->devcfg_base;
-
- spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
@@ -153,13 +142,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
case 4:
break;
default:
- spin_unlock_irqrestore(&apc->lock, flags);
-
return PCIBIOS_BAD_REGISTER_NUMBER;
}
- spin_unlock_irqrestore(&apc->lock, flags);
-
if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
apc->bar0_is_cached) {
/* use the cached value */
@@ -175,7 +160,6 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t value)
{
struct ar724x_pci_controller *apc;
- unsigned long flags;
void __iomem *base;
u32 data;
int s;
@@ -209,8 +193,6 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
}
base = apc->devcfg_base;
-
- spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
@@ -228,15 +210,12 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
data = value;
break;
default:
- spin_unlock_irqrestore(&apc->lock, flags);
-
return PCIBIOS_BAD_REGISTER_NUMBER;
}
__raw_writel(data, base + (where & ~3));
/* flush write */
__raw_readl(base + (where & ~3));
- spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_SUCCESSFUL;
}
@@ -380,8 +359,6 @@ static int ar724x_pci_probe(struct platform_device *pdev)
if (apc->irq < 0)
return -EINVAL;
- spin_lock_init(&apc->lock);
-
res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
if (!res)
return -EINVAL;
@@ -423,7 +400,6 @@ static struct platform_driver ar724x_pci_driver = {
.probe = ar724x_pci_probe,
.driver = {
.name = "ar724x-pci",
- .owner = THIS_MODULE,
},
};
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index d3ed15b2b2d1..8b117e638306 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -242,7 +242,6 @@ static struct platform_driver ltq_pci_driver = {
.probe = ltq_pci_probe,
.driver = {
.name = "pci-xway",
- .owner = THIS_MODULE,
.of_match_table = ltq_pci_match,
},
};
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 59cccd95688b..d07e04121cc6 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -708,7 +708,7 @@ static int __init octeon_pci_setup(void)
if (IS_ERR(platform_device_register_simple("octeon_pci_edac",
-1, NULL, 0)))
- pr_err("Registation of co_pci_edac failed!\n");
+ pr_err("Registration of co_pci_edac failed!\n");
octeon_pci_dma_init();
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
new file mode 100644
index 000000000000..a4574947e698
--- /dev/null
+++ b/arch/mips/pci/pci-rt2880.c
@@ -0,0 +1,285 @@
+/*
+ * Ralink RT288x SoC PCI register definitions
+ *
+ * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+#include <asm/mach-ralink/rt288x.h>
+
+#define RT2880_PCI_BASE 0x00440000
+#define RT288X_CPU_IRQ_PCI 4
+
+#define RT2880_PCI_MEM_BASE 0x20000000
+#define RT2880_PCI_MEM_SIZE 0x10000000
+#define RT2880_PCI_IO_BASE 0x00460000
+#define RT2880_PCI_IO_SIZE 0x00010000
+
+#define RT2880_PCI_REG_PCICFG_ADDR 0x00
+#define RT2880_PCI_REG_PCIMSK_ADDR 0x0c
+#define RT2880_PCI_REG_BAR0SETUP_ADDR 0x10
+#define RT2880_PCI_REG_IMBASEBAR0_ADDR 0x18
+#define RT2880_PCI_REG_CONFIG_ADDR 0x20
+#define RT2880_PCI_REG_CONFIG_DATA 0x24
+#define RT2880_PCI_REG_MEMBASE 0x28
+#define RT2880_PCI_REG_IOBASE 0x2c
+#define RT2880_PCI_REG_ID 0x30
+#define RT2880_PCI_REG_CLASS 0x34
+#define RT2880_PCI_REG_SUBID 0x38
+#define RT2880_PCI_REG_ARBCTL 0x80
+
+static void __iomem *rt2880_pci_base;
+static DEFINE_SPINLOCK(rt2880_pci_lock);
+
+static u32 rt2880_pci_reg_read(u32 reg)
+{
+ return readl(rt2880_pci_base + reg);
+}
+
+static void rt2880_pci_reg_write(u32 val, u32 reg)
+{
+ writel(val, rt2880_pci_base + reg);
+}
+
+static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
+ unsigned int func, unsigned int where)
+{
+ return ((bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) |
+ 0x80000000);
+}
+
+static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ unsigned long flags;
+ u32 address;
+ u32 data;
+
+ address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ spin_lock_irqsave(&rt2880_pci_lock, flags);
+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
+ data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
+ spin_unlock_irqrestore(&rt2880_pci_lock, flags);
+
+ switch (size) {
+ case 1:
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ break;
+ case 2:
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ break;
+ case 4:
+ *val = data;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ unsigned long flags;
+ u32 address;
+ u32 data;
+
+ address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ spin_lock_irqsave(&rt2880_pci_lock, flags);
+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
+ data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
+
+ switch (size) {
+ case 1:
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 2:
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 4:
+ data = val;
+ break;
+ }
+
+ rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA);
+ spin_unlock_irqrestore(&rt2880_pci_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops rt2880_pci_ops = {
+ .read = rt2880_pci_config_read,
+ .write = rt2880_pci_config_write,
+};
+
+static struct resource rt2880_pci_mem_resource = {
+ .name = "PCI MEM space",
+ .start = RT2880_PCI_MEM_BASE,
+ .end = RT2880_PCI_MEM_BASE + RT2880_PCI_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource rt2880_pci_io_resource = {
+ .name = "PCI IO space",
+ .start = RT2880_PCI_IO_BASE,
+ .end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1,
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller rt2880_pci_controller = {
+ .pci_ops = &rt2880_pci_ops,
+ .mem_resource = &rt2880_pci_mem_resource,
+ .io_resource = &rt2880_pci_io_resource,
+};
+
+static inline u32 rt2880_pci_read_u32(unsigned long reg)
+{
+ unsigned long flags;
+ u32 address;
+ u32 ret;
+
+ address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
+
+ spin_lock_irqsave(&rt2880_pci_lock, flags);
+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
+ ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
+ spin_unlock_irqrestore(&rt2880_pci_lock, flags);
+
+ return ret;
+}
+
+static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
+{
+ unsigned long flags;
+ u32 address;
+
+ address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
+
+ spin_lock_irqsave(&rt2880_pci_lock, flags);
+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
+ rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA);
+ spin_unlock_irqrestore(&rt2880_pci_lock, flags);
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ u16 cmd;
+ int irq = -1;
+
+ if (dev->bus->number != 0)
+ return irq;
+
+ switch (PCI_SLOT(dev->devfn)) {
+ case 0x00:
+ rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
+ (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
+ break;
+ case 0x11:
+ irq = RT288X_CPU_IRQ_PCI;
+ break;
+ default:
+ pr_err("%s:%s[%d] trying to alloc unknown pci irq\n",
+ __FILE__, __func__, __LINE__);
+ BUG();
+ break;
+ }
+
+ pci_write_config_byte((struct pci_dev *) dev,
+ PCI_CACHE_LINE_SIZE, 0x14);
+ pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
+ pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
+ PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
+ pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
+ pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
+ dev->irq);
+ return irq;
+}
+
+static int rt288x_pci_probe(struct platform_device *pdev)
+{
+ void __iomem *io_map_base;
+ int i;
+
+ rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE);
+
+ io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE);
+ rt2880_pci_controller.io_map_base = (unsigned long) io_map_base;
+ set_io_port_base((unsigned long) io_map_base);
+
+ ioport_resource.start = RT2880_PCI_IO_BASE;
+ ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1;
+
+ rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR);
+ for (i = 0; i < 0xfffff; i++)
+ ;
+
+ rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL);
+ rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR);
+ rt2880_pci_reg_write(RT2880_PCI_MEM_BASE, RT2880_PCI_REG_MEMBASE);
+ rt2880_pci_reg_write(RT2880_PCI_IO_BASE, RT2880_PCI_REG_IOBASE);
+ rt2880_pci_reg_write(0x08000000, RT2880_PCI_REG_IMBASEBAR0_ADDR);
+ rt2880_pci_reg_write(0x08021814, RT2880_PCI_REG_ID);
+ rt2880_pci_reg_write(0x00800001, RT2880_PCI_REG_CLASS);
+ rt2880_pci_reg_write(0x28801814, RT2880_PCI_REG_SUBID);
+ rt2880_pci_reg_write(0x000c0000, RT2880_PCI_REG_PCIMSK_ADDR);
+
+ rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
+ (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
+
+ register_pci_controller(&rt2880_pci_controller);
+ return 0;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static const struct of_device_id rt288x_pci_match[] = {
+ { .compatible = "ralink,rt288x-pci" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt288x_pci_match);
+
+static struct platform_driver rt288x_pci_driver = {
+ .probe = rt288x_pci_probe,
+ .driver = {
+ .name = "rt288x-pci",
+ .owner = THIS_MODULE,
+ .of_match_table = rt288x_pci_match,
+ },
+};
+
+int __init pcibios_init(void)
+{
+ int ret = platform_driver_register(&rt288x_pci_driver);
+
+ if (ret)
+ pr_info("rt288x-pci: Error registering platform driver!");
+
+ return ret;
+}
+
+arch_initcall(pcibios_init);
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 72919aeef42b..ec9be8ca4ada 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -61,7 +61,6 @@
struct rt3883_pci_controller {
void __iomem *base;
- spinlock_t lock;
struct device_node *intc_of_node;
struct irq_domain *irq_domain;
@@ -111,10 +110,8 @@ static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc,
address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
- spin_lock_irqsave(&rpc->lock, flags);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
ret = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
- spin_unlock_irqrestore(&rpc->lock, flags);
return ret;
}
@@ -128,10 +125,8 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc,
address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
- spin_lock_irqsave(&rpc->lock, flags);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
- spin_unlock_irqrestore(&rpc->lock, flags);
}
static void rt3883_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -252,10 +247,8 @@ static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn,
address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
- spin_lock_irqsave(&rpc->lock, flags);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
- spin_unlock_irqrestore(&rpc->lock, flags);
switch (size) {
case 1:
@@ -288,7 +281,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn,
address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
- spin_lock_irqsave(&rpc->lock, flags);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
@@ -307,7 +299,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn,
}
rt3883_pci_w32(rpc, data, RT3883_PCI_REG_CFGDATA);
- spin_unlock_irqrestore(&rpc->lock, flags);
return PCIBIOS_SUCCESSFUL;
}
@@ -598,7 +589,6 @@ static struct platform_driver rt3883_pci_driver = {
.probe = rt3883_pci_probe,
.driver = {
.name = "rt3883-pci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rt3883_pci_ids),
},
};
diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c
index c10fbf2a19dc..cd8ed09c4f53 100644
--- a/arch/mips/pci/pci-tx4939.c
+++ b/arch/mips/pci/pci-tx4939.c
@@ -103,5 +103,5 @@ void __init tx4939_setup_pcierr_irq(void)
tx4927_pcierr_interrupt,
0, "PCI error",
(void *)TX4939_PCIC_REG))
- pr_warning("Failed to request irq for PCIERR\n");
+ pr_warn("Failed to request irq for PCIERR\n");
}
diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c
index 1c9897531660..ef620a4c82a5 100644
--- a/arch/mips/pmcs-msp71xx/msp_prom.c
+++ b/arch/mips/pmcs-msp71xx/msp_prom.c
@@ -295,7 +295,7 @@ char *prom_getenv(char *env_name)
while (*var) {
if (strncmp(env_name, *var, i) == 0) {
- return (*var + strlen(env_name) + 1);
+ return *var + strlen(env_name) + 1;
}
var++;
}
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 77e8a9620e18..b1c52ca580f9 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -16,6 +16,7 @@ choice
config SOC_RT288X
bool "RT288x"
select MIPS_L1_CACHE_SHIFT_4
+ select HW_HAS_PCI
config SOC_RT305X
bool "RT305x"
@@ -26,7 +27,7 @@ choice
select HW_HAS_PCI
config SOC_MT7620
- bool "MT7620"
+ bool "MT7620/8"
endchoice
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
index 2c09c8aa0ae2..a6c9d0061326 100644
--- a/arch/mips/ralink/Makefile
+++ b/arch/mips/ralink/Makefile
@@ -10,9 +10,13 @@ obj-y := prom.o of.o reset.o clk.o irq.o timer.o
obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o
+obj-$(CONFIG_RALINK_ILL_ACC) += ill_acc.o
+
obj-$(CONFIG_SOC_RT288X) += rt288x.o
obj-$(CONFIG_SOC_RT305X) += rt305x.o
obj-$(CONFIG_SOC_RT3883) += rt3883.o
obj-$(CONFIG_SOC_MT7620) += mt7620.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-$(CONFIG_DEBUG_FS) += bootrom.o
diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c
new file mode 100644
index 000000000000..5403468394fb
--- /dev/null
+++ b/arch/mips/ralink/bootrom.c
@@ -0,0 +1,48 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define BOOTROM_OFFSET 0x10118000
+#define BOOTROM_SIZE 0x8000
+
+static void __iomem *membase = (void __iomem *) KSEG1ADDR(BOOTROM_OFFSET);
+
+static int bootrom_show(struct seq_file *s, void *unused)
+{
+ seq_write(s, membase, BOOTROM_SIZE);
+
+ return 0;
+}
+
+static int bootrom_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bootrom_show, NULL);
+}
+
+static const struct file_operations bootrom_file_ops = {
+ .open = bootrom_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int bootrom_setup(void)
+{
+ if (!debugfs_create_file("bootrom", 0444,
+ NULL, NULL, &bootrom_file_ops)) {
+ pr_err("Failed to create bootrom debugfs file\n");
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+postcore_initcall(bootrom_setup);
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
index 5d0983d47161..feb5a9bf98b4 100644
--- a/arch/mips/ralink/clk.c
+++ b/arch/mips/ralink/clk.c
@@ -56,6 +56,12 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_rate);
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return -1;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
+
void __init plat_time_init(void)
{
struct clk *clk;
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 42dfd6100a2d..8e7d8e618fb9 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -11,25 +11,6 @@
#define RAMIPS_SYS_TYPE_LEN 32
-struct ralink_pinmux_grp {
- const char *name;
- u32 mask;
- int gpio_first;
- int gpio_last;
-};
-
-struct ralink_pinmux {
- struct ralink_pinmux_grp *mode;
- struct ralink_pinmux_grp *uart;
- int uart_shift;
- u32 uart_mask;
- void (*wdt_reset)(void);
- struct ralink_pinmux_grp *pci;
- int pci_shift;
- u32 pci_mask;
-};
-extern struct ralink_pinmux rt_gpio_pinmux;
-
struct ralink_soc_info {
unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
unsigned char *compatible;
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index b46d0419d09b..255d695ec8c6 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -12,21 +12,24 @@
#include <asm/addrspace.h>
#ifdef CONFIG_SOC_RT288X
-#define EARLY_UART_BASE 0x300c00
+#define EARLY_UART_BASE 0x300c00
+#define CHIPID_BASE 0x300004
+#elif defined(CONFIG_SOC_MT7621)
+#define EARLY_UART_BASE 0x1E000c00
+#define CHIPID_BASE 0x1E000004
#else
-#define EARLY_UART_BASE 0x10000c00
+#define EARLY_UART_BASE 0x10000c00
+#define CHIPID_BASE 0x10000004
#endif
-#define UART_REG_RX 0x00
-#define UART_REG_TX 0x04
-#define UART_REG_IER 0x08
-#define UART_REG_IIR 0x0c
-#define UART_REG_FCR 0x10
-#define UART_REG_LCR 0x14
-#define UART_REG_MCR 0x18
-#define UART_REG_LSR 0x1c
+#define MT7628_CHIP_NAME1 0x20203832
+
+#define UART_REG_TX 0x04
+#define UART_REG_LSR 0x14
+#define UART_REG_LSR_RT2880 0x1c
static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE);
+static __iomem void *chipid_membase = (__iomem void *) KSEG1ADDR(CHIPID_BASE);
static inline void uart_w32(u32 val, unsigned reg)
{
@@ -38,11 +41,23 @@ static inline u32 uart_r32(unsigned reg)
return __raw_readl(uart_membase + reg);
}
+static inline int soc_is_mt7628(void)
+{
+ return IS_ENABLED(CONFIG_SOC_MT7620) &&
+ (__raw_readl(chipid_membase) == MT7628_CHIP_NAME1);
+}
+
void prom_putchar(unsigned char ch)
{
- while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
- ;
- uart_w32(ch, UART_REG_TX);
- while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
- ;
+ if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) {
+ uart_w32(ch, UART_TX);
+ while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
+ ;
+ } else {
+ while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
+ ;
+ uart_w32(ch, UART_REG_TX);
+ while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
+ ;
+ }
}
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
new file mode 100644
index 000000000000..e20b02e3ae28
--- /dev/null
+++ b/arch/mips/ralink/ill_acc.c
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+#define REG_ILL_ACC_ADDR 0x10
+#define REG_ILL_ACC_TYPE 0x14
+
+#define ILL_INT_STATUS BIT(31)
+#define ILL_ACC_WRITE BIT(30)
+#define ILL_ACC_LEN_M 0xff
+#define ILL_ACC_OFF_M 0xf
+#define ILL_ACC_OFF_S 16
+#define ILL_ACC_ID_M 0x7
+#define ILL_ACC_ID_S 8
+
+#define DRV_NAME "ill_acc"
+
+static const char * const ill_acc_ids[] = {
+ "cpu", "dma", "ppe", "pdma rx", "pdma tx", "pci/e", "wmac", "usb",
+};
+
+static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
+{
+ struct device *dev = (struct device *) _priv;
+ u32 addr = rt_memc_r32(REG_ILL_ACC_ADDR);
+ u32 type = rt_memc_r32(REG_ILL_ACC_TYPE);
+
+ dev_err(dev, "illegal %s access from %s - addr:0x%08x offset:%d len:%d\n",
+ (type & ILL_ACC_WRITE) ? ("write") : ("read"),
+ ill_acc_ids[(type >> ILL_ACC_ID_S) & ILL_ACC_ID_M],
+ addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
+ type & ILL_ACC_LEN_M);
+
+ rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE);
+
+ return IRQ_HANDLED;
+}
+
+static int __init ill_acc_of_setup(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ int irq;
+
+ /* somehow this driver breaks on RT5350 */
+ if (of_machine_is_compatible("ralink,rt5350-soc"))
+ return -EINVAL;
+
+ np = of_find_compatible_node(NULL, NULL, "ralink,rt3050-memc");
+ if (!np)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ pr_err("%s: failed to lookup pdev\n", np->name);
+ return -EINVAL;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -EINVAL;
+ }
+
+ if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return -EINVAL;
+ }
+
+ rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
+
+ dev_info(&pdev->dev, "irq registered\n");
+
+ return 0;
+}
+
+arch_initcall(ill_acc_of_setup);
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 781b3d14a489..7cf91b92e9d1 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -20,14 +20,6 @@
#include "common.h"
-/* INTC register offsets */
-#define INTC_REG_STATUS0 0x00
-#define INTC_REG_STATUS1 0x04
-#define INTC_REG_TYPE 0x20
-#define INTC_REG_RAW_STATUS 0x30
-#define INTC_REG_ENABLE 0x34
-#define INTC_REG_DISABLE 0x38
-
#define INTC_INT_GLOBAL BIT(31)
#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
@@ -44,16 +36,36 @@
#define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9)
+enum rt_intc_regs_enum {
+ INTC_REG_STATUS0 = 0,
+ INTC_REG_STATUS1,
+ INTC_REG_TYPE,
+ INTC_REG_RAW_STATUS,
+ INTC_REG_ENABLE,
+ INTC_REG_DISABLE,
+};
+
+static u32 rt_intc_regs[] = {
+ [INTC_REG_STATUS0] = 0x00,
+ [INTC_REG_STATUS1] = 0x04,
+ [INTC_REG_TYPE] = 0x20,
+ [INTC_REG_RAW_STATUS] = 0x30,
+ [INTC_REG_ENABLE] = 0x34,
+ [INTC_REG_DISABLE] = 0x38,
+};
+
static void __iomem *rt_intc_membase;
+static int rt_perfcount_irq;
+
static inline void rt_intc_w32(u32 val, unsigned reg)
{
- __raw_writel(val, rt_intc_membase + reg);
+ __raw_writel(val, rt_intc_membase + rt_intc_regs[reg]);
}
static inline u32 rt_intc_r32(unsigned reg)
{
- return __raw_readl(rt_intc_membase + reg);
+ return __raw_readl(rt_intc_membase + rt_intc_regs[reg]);
}
static void ralink_intc_irq_unmask(struct irq_data *d)
@@ -73,6 +85,11 @@ static struct irq_chip ralink_intc_irq_chip = {
.irq_mask_ack = ralink_intc_irq_mask,
};
+int get_c0_perfcount_int(void)
+{
+ return rt_perfcount_irq;
+}
+
unsigned int get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
@@ -134,6 +151,10 @@ static int __init intc_of_init(struct device_node *node,
struct irq_domain *domain;
int irq;
+ if (!of_property_read_u32_array(node, "ralink,intc-registers",
+ rt_intc_regs, 6))
+ pr_info("intc: using register map from devicetree\n");
+
irq = irq_of_parse_and_map(node, 0);
if (!irq)
panic("Failed to get INTC IRQ");
@@ -167,13 +188,13 @@ static int __init intc_of_init(struct device_node *node,
irq_set_handler_data(irq, domain);
/* tell the kernel which irq is used for performance monitoring */
- cp0_perfcount_irq = irq_create_mapping(domain, 9);
+ rt_perfcount_irq = irq_create_mapping(domain, 9);
return 0;
}
static struct of_device_id __initdata of_irq_ids[] = {
- { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_irq_of_init },
{ .compatible = "ralink,rt2880-intc", .data = intc_of_init },
{},
};
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index a3ad56c2372d..2ea5ff6dc22e 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -17,124 +17,214 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7620.h>
+#include <asm/mach-ralink/pinmux.h>
#include "common.h"
+/* analog */
+#define PMU0_CFG 0x88
+#define PMU_SW_SET BIT(28)
+#define A_DCDC_EN BIT(24)
+#define A_SSC_PERI BIT(19)
+#define A_SSC_GEN BIT(18)
+#define A_SSC_M 0x3
+#define A_SSC_S 16
+#define A_DLY_M 0x7
+#define A_DLY_S 8
+#define A_VTUNE_M 0xff
+
+/* digital */
+#define PMU1_CFG 0x8C
+#define DIG_SW_SEL BIT(25)
+
+/* is this a MT7620 or a MT7628 */
+enum mt762x_soc_type mt762x_soc;
+
/* does the board have sdram or ddram */
static int dram_type;
-static struct ralink_pinmux_grp mode_mux[] = {
- {
- .name = "i2c",
- .mask = MT7620_GPIO_MODE_I2C,
- .gpio_first = 1,
- .gpio_last = 2,
- }, {
- .name = "spi",
- .mask = MT7620_GPIO_MODE_SPI,
- .gpio_first = 3,
- .gpio_last = 6,
- }, {
- .name = "uartlite",
- .mask = MT7620_GPIO_MODE_UART1,
- .gpio_first = 15,
- .gpio_last = 16,
- }, {
- .name = "wdt",
- .mask = MT7620_GPIO_MODE_WDT,
- .gpio_first = 17,
- .gpio_last = 17,
- }, {
- .name = "mdio",
- .mask = MT7620_GPIO_MODE_MDIO,
- .gpio_first = 22,
- .gpio_last = 23,
- }, {
- .name = "rgmii1",
- .mask = MT7620_GPIO_MODE_RGMII1,
- .gpio_first = 24,
- .gpio_last = 35,
- }, {
- .name = "spi refclk",
- .mask = MT7620_GPIO_MODE_SPI_REF_CLK,
- .gpio_first = 37,
- .gpio_last = 39,
- }, {
- .name = "jtag",
- .mask = MT7620_GPIO_MODE_JTAG,
- .gpio_first = 40,
- .gpio_last = 44,
- }, {
- /* shared lines with jtag */
- .name = "ephy",
- .mask = MT7620_GPIO_MODE_EPHY,
- .gpio_first = 40,
- .gpio_last = 44,
- }, {
- .name = "nand",
- .mask = MT7620_GPIO_MODE_JTAG,
- .gpio_first = 45,
- .gpio_last = 59,
- }, {
- .name = "rgmii2",
- .mask = MT7620_GPIO_MODE_RGMII2,
- .gpio_first = 60,
- .gpio_last = 71,
- }, {
- .name = "wled",
- .mask = MT7620_GPIO_MODE_WLED,
- .gpio_first = 72,
- .gpio_last = 72,
- }, {0}
+static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
+static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
+static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
+static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 22, 2) };
+static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) };
+static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) };
+static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) };
+static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) };
+static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) };
+static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) };
+static struct rt2880_pmx_func uartf_grp[] = {
+ FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8),
+ FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8),
+ FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8),
+ FUNC("i2s uartf", MT7620_GPIO_MODE_I2S_UARTF, 7, 8),
+ FUNC("pcm gpio", MT7620_GPIO_MODE_PCM_GPIO, 11, 4),
+ FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4),
+ FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4),
+};
+static struct rt2880_pmx_func wdt_grp[] = {
+ FUNC("wdt rst", 0, 17, 1),
+ FUNC("wdt refclk", 0, 17, 1),
+ };
+static struct rt2880_pmx_func pcie_rst_grp[] = {
+ FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1),
+ FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1)
+};
+static struct rt2880_pmx_func nd_sd_grp[] = {
+ FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
+ FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15)
+};
+
+static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
+ GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C),
+ GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK,
+ MT7620_GPIO_MODE_UART0_SHIFT),
+ GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI),
+ GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1),
+ GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK,
+ MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT),
+ GRP("mdio", mdio_grp, 1, MT7620_GPIO_MODE_MDIO),
+ GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1),
+ GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
+ GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK,
+ MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT),
+ GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK,
+ MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT),
+ GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2),
+ GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED),
+ GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY),
+ GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA),
+ { 0 }
+};
+
+static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
+ FUNC("sdcx", 3, 19, 1),
+ FUNC("utif", 2, 19, 1),
+ FUNC("gpio", 1, 19, 1),
+ FUNC("pwm", 0, 19, 1),
+};
+
+static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
+ FUNC("sdcx", 3, 18, 1),
+ FUNC("utif", 2, 18, 1),
+ FUNC("gpio", 1, 18, 1),
+ FUNC("pwm", 0, 18, 1),
+};
+
+static struct rt2880_pmx_func uart2_grp_mt7628[] = {
+ FUNC("sdcx", 3, 20, 2),
+ FUNC("pwm", 2, 20, 2),
+ FUNC("gpio", 1, 20, 2),
+ FUNC("uart", 0, 20, 2),
+};
+
+static struct rt2880_pmx_func uart1_grp_mt7628[] = {
+ FUNC("sdcx", 3, 45, 2),
+ FUNC("pwm", 2, 45, 2),
+ FUNC("gpio", 1, 45, 2),
+ FUNC("uart", 0, 45, 2),
+};
+
+static struct rt2880_pmx_func i2c_grp_mt7628[] = {
+ FUNC("-", 3, 4, 2),
+ FUNC("debug", 2, 4, 2),
+ FUNC("gpio", 1, 4, 2),
+ FUNC("i2c", 0, 4, 2),
+};
+
+static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
+static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
+static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) };
+static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
+
+static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
+ FUNC("jtag", 3, 22, 8),
+ FUNC("utif", 2, 22, 8),
+ FUNC("gpio", 1, 22, 8),
+ FUNC("sdcx", 0, 22, 8),
+};
+
+static struct rt2880_pmx_func uart0_grp_mt7628[] = {
+ FUNC("-", 3, 12, 2),
+ FUNC("-", 2, 12, 2),
+ FUNC("gpio", 1, 12, 2),
+ FUNC("uart", 0, 12, 2),
+};
+
+static struct rt2880_pmx_func i2s_grp_mt7628[] = {
+ FUNC("antenna", 3, 0, 4),
+ FUNC("pcm", 2, 0, 4),
+ FUNC("gpio", 1, 0, 4),
+ FUNC("i2s", 0, 0, 4),
+};
+
+static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
+ FUNC("-", 3, 6, 1),
+ FUNC("refclk", 2, 6, 1),
+ FUNC("gpio", 1, 6, 1),
+ FUNC("spi", 0, 6, 1),
+};
+
+static struct rt2880_pmx_func spis_grp_mt7628[] = {
+ FUNC("pwm", 3, 14, 4),
+ FUNC("util", 2, 14, 4),
+ FUNC("gpio", 1, 14, 4),
+ FUNC("spis", 0, 14, 4),
};
-static struct ralink_pinmux_grp uart_mux[] = {
- {
- .name = "uartf",
- .mask = MT7620_GPIO_MODE_UARTF,
- .gpio_first = 7,
- .gpio_last = 14,
- }, {
- .name = "pcm uartf",
- .mask = MT7620_GPIO_MODE_PCM_UARTF,
- .gpio_first = 7,
- .gpio_last = 14,
- }, {
- .name = "pcm i2s",
- .mask = MT7620_GPIO_MODE_PCM_I2S,
- .gpio_first = 7,
- .gpio_last = 14,
- }, {
- .name = "i2s uartf",
- .mask = MT7620_GPIO_MODE_I2S_UARTF,
- .gpio_first = 7,
- .gpio_last = 14,
- }, {
- .name = "pcm gpio",
- .mask = MT7620_GPIO_MODE_PCM_GPIO,
- .gpio_first = 11,
- .gpio_last = 14,
- }, {
- .name = "gpio uartf",
- .mask = MT7620_GPIO_MODE_GPIO_UARTF,
- .gpio_first = 7,
- .gpio_last = 10,
- }, {
- .name = "gpio i2s",
- .mask = MT7620_GPIO_MODE_GPIO_I2S,
- .gpio_first = 7,
- .gpio_last = 10,
- }, {
- .name = "gpio",
- .mask = MT7620_GPIO_MODE_GPIO,
- }, {0}
+static struct rt2880_pmx_func gpio_grp_mt7628[] = {
+ FUNC("pcie", 3, 11, 1),
+ FUNC("refclk", 2, 11, 1),
+ FUNC("gpio", 1, 11, 1),
+ FUNC("gpio", 0, 11, 1),
};
-struct ralink_pinmux rt_gpio_pinmux = {
- .mode = mode_mux,
- .uart = uart_mux,
- .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT,
- .uart_mask = MT7620_GPIO_MODE_UART0_MASK,
+#define MT7628_GPIO_MODE_MASK 0x3
+
+#define MT7628_GPIO_MODE_PWM1 30
+#define MT7628_GPIO_MODE_PWM0 28
+#define MT7628_GPIO_MODE_UART2 26
+#define MT7628_GPIO_MODE_UART1 24
+#define MT7628_GPIO_MODE_I2C 20
+#define MT7628_GPIO_MODE_REFCLK 18
+#define MT7628_GPIO_MODE_PERST 16
+#define MT7628_GPIO_MODE_WDT 14
+#define MT7628_GPIO_MODE_SPI 12
+#define MT7628_GPIO_MODE_SDMODE 10
+#define MT7628_GPIO_MODE_UART0 8
+#define MT7628_GPIO_MODE_I2S 6
+#define MT7628_GPIO_MODE_CS1 4
+#define MT7628_GPIO_MODE_SPIS 2
+#define MT7628_GPIO_MODE_GPIO 0
+
+static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
+ GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_PWM1),
+ GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_PWM0),
+ GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_UART2),
+ GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_UART1),
+ GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_I2C),
+ GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK),
+ GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST),
+ GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT),
+ GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI),
+ GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_SDMODE),
+ GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_UART0),
+ GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_I2S),
+ GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_CS1),
+ GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_SPIS),
+ GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_GPIO),
+ { 0 }
};
static __init u32
@@ -287,29 +377,42 @@ void __init ralink_clk_init(void)
xtal_rate = mt7620_get_xtal_rate();
- cpu_pll_rate = mt7620_get_cpu_pll_rate(xtal_rate);
- pll_rate = mt7620_get_pll_rate(xtal_rate, cpu_pll_rate);
-
- cpu_rate = mt7620_get_cpu_rate(pll_rate);
- dram_rate = mt7620_get_dram_rate(pll_rate);
- sys_rate = mt7620_get_sys_rate(cpu_rate);
- periph_rate = mt7620_get_periph_rate(xtal_rate);
-
#define RFMT(label) label ":%lu.%03luMHz "
#define RINT(x) ((x) / 1000000)
#define RFRAC(x) (((x) / 1000) % 1000)
- pr_debug(RFMT("XTAL") RFMT("CPU_PLL") RFMT("PLL"),
- RINT(xtal_rate), RFRAC(xtal_rate),
- RINT(cpu_pll_rate), RFRAC(cpu_pll_rate),
- RINT(pll_rate), RFRAC(pll_rate));
+ if (mt762x_soc == MT762X_SOC_MT7628AN) {
+ if (xtal_rate == MHZ(40))
+ cpu_rate = MHZ(580);
+ else
+ cpu_rate = MHZ(575);
+ dram_rate = sys_rate = cpu_rate / 3;
+ periph_rate = MHZ(40);
+
+ ralink_clk_add("10000d00.uartlite", periph_rate);
+ ralink_clk_add("10000e00.uartlite", periph_rate);
+ } else {
+ cpu_pll_rate = mt7620_get_cpu_pll_rate(xtal_rate);
+ pll_rate = mt7620_get_pll_rate(xtal_rate, cpu_pll_rate);
+
+ cpu_rate = mt7620_get_cpu_rate(pll_rate);
+ dram_rate = mt7620_get_dram_rate(pll_rate);
+ sys_rate = mt7620_get_sys_rate(cpu_rate);
+ periph_rate = mt7620_get_periph_rate(xtal_rate);
+
+ pr_debug(RFMT("XTAL") RFMT("CPU_PLL") RFMT("PLL"),
+ RINT(xtal_rate), RFRAC(xtal_rate),
+ RINT(cpu_pll_rate), RFRAC(cpu_pll_rate),
+ RINT(pll_rate), RFRAC(pll_rate));
+
+ ralink_clk_add("10000500.uart", periph_rate);
+ }
pr_debug(RFMT("CPU") RFMT("DRAM") RFMT("SYS") RFMT("PERIPH"),
RINT(cpu_rate), RFRAC(cpu_rate),
RINT(dram_rate), RFRAC(dram_rate),
RINT(sys_rate), RFRAC(sys_rate),
RINT(periph_rate), RFRAC(periph_rate));
-
#undef RFRAC
#undef RINT
#undef RFMT
@@ -317,9 +420,9 @@ void __init ralink_clk_init(void)
ralink_clk_add("cpu", cpu_rate);
ralink_clk_add("10000100.timer", periph_rate);
ralink_clk_add("10000120.watchdog", periph_rate);
- ralink_clk_add("10000500.uart", periph_rate);
ralink_clk_add("10000b00.spi", sys_rate);
ralink_clk_add("10000c00.uartlite", periph_rate);
+ ralink_clk_add("10180000.wmac", xtal_rate);
}
void __init ralink_of_remap(void)
@@ -331,6 +434,52 @@ void __init ralink_of_remap(void)
panic("Failed to remap core resources");
}
+static __init void
+mt7620_dram_init(struct ralink_soc_info *soc_info)
+{
+ switch (dram_type) {
+ case SYSCFG0_DRAM_TYPE_SDRAM:
+ pr_info("Board has SDRAM\n");
+ soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
+ break;
+
+ case SYSCFG0_DRAM_TYPE_DDR1:
+ pr_info("Board has DDR1\n");
+ soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+ break;
+
+ case SYSCFG0_DRAM_TYPE_DDR2:
+ pr_info("Board has DDR2\n");
+ soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static __init void
+mt7628_dram_init(struct ralink_soc_info *soc_info)
+{
+ switch (dram_type) {
+ case SYSCFG0_DRAM_TYPE_DDR1_MT7628:
+ pr_info("Board has DDR1\n");
+ soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+ break;
+
+ case SYSCFG0_DRAM_TYPE_DDR2_MT7628:
+ pr_info("Board has DDR2\n");
+ soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+ break;
+ default:
+ BUG();
+ }
+}
+
void prom_soc_init(struct ralink_soc_info *soc_info)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
@@ -339,22 +488,36 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
u32 n1;
u32 rev;
u32 cfg0;
+ u32 pmu0;
+ u32 pmu1;
+ u32 bga;
n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
-
- if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) {
- name = "MT7620N";
- soc_info->compatible = "ralink,mt7620n-soc";
- } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) {
- name = "MT7620A";
- soc_info->compatible = "ralink,mt7620a-soc";
+ rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+ bga = (rev >> CHIP_REV_PKG_SHIFT) & CHIP_REV_PKG_MASK;
+
+ if (n0 == MT7620_CHIP_NAME0 && n1 == MT7620_CHIP_NAME1) {
+ if (bga) {
+ mt762x_soc = MT762X_SOC_MT7620A;
+ name = "MT7620A";
+ soc_info->compatible = "ralink,mt7620a-soc";
+ } else {
+ mt762x_soc = MT762X_SOC_MT7620N;
+ name = "MT7620N";
+ soc_info->compatible = "ralink,mt7620n-soc";
+#ifdef CONFIG_PCI
+ panic("mt7620n is only supported for non pci kernels");
+#endif
+ }
+ } else if (n0 == MT7620_CHIP_NAME0 && n1 == MT7628_CHIP_NAME1) {
+ mt762x_soc = MT762X_SOC_MT7628AN;
+ name = "MT7628AN";
+ soc_info->compatible = "ralink,mt7628an-soc";
} else {
- panic("mt7620: unknown SoC, n0:%08x n1:%08x", n0, n1);
+ panic("mt762x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
}
- rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
-
snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
"Ralink %s ver:%u eco:%u",
name,
@@ -364,26 +527,22 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
- switch (dram_type) {
- case SYSCFG0_DRAM_TYPE_SDRAM:
- pr_info("Board has SDRAM\n");
- soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
- soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
- break;
-
- case SYSCFG0_DRAM_TYPE_DDR1:
- pr_info("Board has DDR1\n");
- soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
- soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
- break;
-
- case SYSCFG0_DRAM_TYPE_DDR2:
- pr_info("Board has DDR2\n");
- soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
- soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
- break;
- default:
- BUG();
- }
soc_info->mem_base = MT7620_DRAM_BASE;
+ if (mt762x_soc == MT762X_SOC_MT7628AN)
+ mt7628_dram_init(soc_info);
+ else
+ mt7620_dram_init(soc_info);
+
+ pmu0 = __raw_readl(sysc + PMU0_CFG);
+ pmu1 = __raw_readl(sysc + PMU1_CFG);
+
+ pr_info("Analog PMU set to %s control\n",
+ (pmu0 & PMU_SW_SET) ? ("sw") : ("hw"));
+ pr_info("Digital PMU set to %s control\n",
+ (pmu1 & DIG_SW_SEL) ? ("sw") : ("hw"));
+
+ if (mt762x_soc == MT762X_SOC_MT7628AN)
+ rt2880_pinmux_data = mt7628an_pinmux_data;
+ else
+ rt2880_pinmux_data = mt7620a_pinmux_data;
}
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 7c4598cb6de8..0d30dcd63246 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -53,6 +53,17 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
+static int memory_dtb;
+
+static int __init early_init_dt_find_memory(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ if (depth == 1 && !strcmp(uname, "memory@0"))
+ memory_dtb = 1;
+
+ return 0;
+}
+
void __init plat_mem_setup(void)
{
set_io_port_base(KSEG1);
@@ -63,7 +74,12 @@ void __init plat_mem_setup(void)
*/
__dt_setup_arch(__dtb_start);
- if (soc_info.mem_size)
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+
+ of_scan_flat_dt(early_init_dt_find_memory, NULL);
+ if (memory_dtb)
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+ else if (soc_info.mem_size)
add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M,
BOOT_MEM_RAM);
else
@@ -74,19 +90,9 @@ void __init plat_mem_setup(void)
static int __init plat_of_setup(void)
{
- static struct of_device_id of_ids[3];
- int len = sizeof(of_ids[0].compatible);
-
- if (!of_have_populated_dt())
- panic("device tree not present");
-
- strlcpy(of_ids[0].compatible, soc_info.compatible, len);
- strlcpy(of_ids[1].compatible, "palmbus", len);
-
- if (of_platform_populate(NULL, of_ids, NULL, NULL))
- panic("failed to populate DT");
+ __dt_register_buses(soc_info.compatible, "palmbus");
- /* make sure ithat the reset controller is setup early */
+ /* make sure that the reset controller is setup early */
ralink_rst_init();
return 0;
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 9c64f029d047..09419f67da39 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -18,6 +18,7 @@
#include "common.h"
struct ralink_soc_info soc_info;
+struct rt2880_pmx_group *rt2880_pinmux_data = NULL;
const char *get_system_type(void)
{
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index f87de1ab2198..738cec865f41 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -17,46 +17,27 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt288x.h>
+#include <asm/mach-ralink/pinmux.h>
#include "common.h"
-static struct ralink_pinmux_grp mode_mux[] = {
- {
- .name = "i2c",
- .mask = RT2880_GPIO_MODE_I2C,
- .gpio_first = 1,
- .gpio_last = 2,
- }, {
- .name = "spi",
- .mask = RT2880_GPIO_MODE_SPI,
- .gpio_first = 3,
- .gpio_last = 6,
- }, {
- .name = "uartlite",
- .mask = RT2880_GPIO_MODE_UART0,
- .gpio_first = 7,
- .gpio_last = 14,
- }, {
- .name = "jtag",
- .mask = RT2880_GPIO_MODE_JTAG,
- .gpio_first = 17,
- .gpio_last = 21,
- }, {
- .name = "mdio",
- .mask = RT2880_GPIO_MODE_MDIO,
- .gpio_first = 22,
- .gpio_last = 23,
- }, {
- .name = "sdram",
- .mask = RT2880_GPIO_MODE_SDRAM,
- .gpio_first = 24,
- .gpio_last = 39,
- }, {
- .name = "pci",
- .mask = RT2880_GPIO_MODE_PCI,
- .gpio_first = 40,
- .gpio_last = 71,
- }, {0}
+static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
+static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
+static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
+
+static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
+ GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
+ GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
+ GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
+ GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
+ GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
+ { 0 }
};
static void rt288x_wdt_reset(void)
@@ -69,14 +50,9 @@ static void rt288x_wdt_reset(void)
rt_sysc_w32(t, SYSC_REG_CLKCFG);
}
-struct ralink_pinmux rt_gpio_pinmux = {
- .mode = mode_mux,
- .wdt_reset = rt288x_wdt_reset,
-};
-
void __init ralink_clk_init(void)
{
- unsigned long cpu_rate;
+ unsigned long cpu_rate, wmac_rate = 40000000;
u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
@@ -101,6 +77,7 @@ void __init ralink_clk_init(void)
ralink_clk_add("300500.uart", cpu_rate / 2);
ralink_clk_add("300c00.uartlite", cpu_rate / 2);
ralink_clk_add("400000.ethernet", cpu_rate / 2);
+ ralink_clk_add("480000.wmac", wmac_rate);
}
void __init ralink_of_remap(void)
@@ -140,4 +117,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_base = RT2880_SDRAM_BASE;
soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
+
+ rt2880_pinmux_data = rt2880_pinmux_data_act;
}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index bb82a82da9e7..c40776ab67db 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -17,90 +17,78 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt305x.h>
+#include <asm/mach-ralink/pinmux.h>
#include "common.h"
enum rt305x_soc_type rt305x_soc;
-static struct ralink_pinmux_grp mode_mux[] = {
- {
- .name = "i2c",
- .mask = RT305X_GPIO_MODE_I2C,
- .gpio_first = RT305X_GPIO_I2C_SD,
- .gpio_last = RT305X_GPIO_I2C_SCLK,
- }, {
- .name = "spi",
- .mask = RT305X_GPIO_MODE_SPI,
- .gpio_first = RT305X_GPIO_SPI_EN,
- .gpio_last = RT305X_GPIO_SPI_CLK,
- }, {
- .name = "uartlite",
- .mask = RT305X_GPIO_MODE_UART1,
- .gpio_first = RT305X_GPIO_UART1_TXD,
- .gpio_last = RT305X_GPIO_UART1_RXD,
- }, {
- .name = "jtag",
- .mask = RT305X_GPIO_MODE_JTAG,
- .gpio_first = RT305X_GPIO_JTAG_TDO,
- .gpio_last = RT305X_GPIO_JTAG_TDI,
- }, {
- .name = "mdio",
- .mask = RT305X_GPIO_MODE_MDIO,
- .gpio_first = RT305X_GPIO_MDIO_MDC,
- .gpio_last = RT305X_GPIO_MDIO_MDIO,
- }, {
- .name = "sdram",
- .mask = RT305X_GPIO_MODE_SDRAM,
- .gpio_first = RT305X_GPIO_SDRAM_MD16,
- .gpio_last = RT305X_GPIO_SDRAM_MD31,
- }, {
- .name = "rgmii",
- .mask = RT305X_GPIO_MODE_RGMII,
- .gpio_first = RT305X_GPIO_GE0_TXD0,
- .gpio_last = RT305X_GPIO_GE0_RXCLK,
- }, {0}
+static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct rt2880_pmx_func uartf_func[] = {
+ FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8),
+ FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8),
+ FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8),
+ FUNC("i2s uartf", RT305X_GPIO_MODE_I2S_UARTF, 7, 8),
+ FUNC("pcm gpio", RT305X_GPIO_MODE_PCM_GPIO, 11, 4),
+ FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4),
+ FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4),
+};
+static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) };
+static struct rt2880_pmx_func rt5350_cs1_func[] = {
+ FUNC("spi_cs1", 0, 27, 1),
+ FUNC("wdg_cs1", 1, 27, 1),
+};
+static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
+static struct rt2880_pmx_func rt3352_rgmii_func[] = {
+ FUNC("rgmii", 0, 24, 12)
+};
+static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
+static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
+static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
+static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
+
+static struct rt2880_pmx_group rt3050_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ RT305X_GPIO_MODE_UART0_SHIFT),
+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
+ GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
+ GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM),
+ { 0 }
};
-static struct ralink_pinmux_grp uart_mux[] = {
- {
- .name = "uartf",
- .mask = RT305X_GPIO_MODE_UARTF,
- .gpio_first = RT305X_GPIO_7,
- .gpio_last = RT305X_GPIO_14,
- }, {
- .name = "pcm uartf",
- .mask = RT305X_GPIO_MODE_PCM_UARTF,
- .gpio_first = RT305X_GPIO_7,
- .gpio_last = RT305X_GPIO_14,
- }, {
- .name = "pcm i2s",
- .mask = RT305X_GPIO_MODE_PCM_I2S,
- .gpio_first = RT305X_GPIO_7,
- .gpio_last = RT305X_GPIO_14,
- }, {
- .name = "i2s uartf",
- .mask = RT305X_GPIO_MODE_I2S_UARTF,
- .gpio_first = RT305X_GPIO_7,
- .gpio_last = RT305X_GPIO_14,
- }, {
- .name = "pcm gpio",
- .mask = RT305X_GPIO_MODE_PCM_GPIO,
- .gpio_first = RT305X_GPIO_10,
- .gpio_last = RT305X_GPIO_14,
- }, {
- .name = "gpio uartf",
- .mask = RT305X_GPIO_MODE_GPIO_UARTF,
- .gpio_first = RT305X_GPIO_7,
- .gpio_last = RT305X_GPIO_10,
- }, {
- .name = "gpio i2s",
- .mask = RT305X_GPIO_MODE_GPIO_I2S,
- .gpio_first = RT305X_GPIO_7,
- .gpio_last = RT305X_GPIO_10,
- }, {
- .name = "gpio",
- .mask = RT305X_GPIO_MODE_GPIO,
- }, {0}
+static struct rt2880_pmx_group rt3352_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ RT305X_GPIO_MODE_UART0_SHIFT),
+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
+ GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
+ GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA),
+ GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA),
+ GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
+ { 0 }
+};
+
+static struct rt2880_pmx_group rt5350_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ RT305X_GPIO_MODE_UART0_SHIFT),
+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
+ GRP("spi_cs1", rt5350_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
+ { 0 }
};
static void rt305x_wdt_reset(void)
@@ -114,14 +102,6 @@ static void rt305x_wdt_reset(void)
rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
}
-struct ralink_pinmux rt_gpio_pinmux = {
- .mode = mode_mux,
- .uart = uart_mux,
- .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
- .uart_mask = RT305X_GPIO_MODE_UART0_MASK,
- .wdt_reset = rt305x_wdt_reset,
-};
-
static unsigned long rt5350_get_mem_size(void)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
@@ -290,11 +270,14 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_base = RT305X_SDRAM_BASE;
if (soc_is_rt5350()) {
soc_info->mem_size = rt5350_get_mem_size();
+ rt2880_pinmux_data = rt5350_pinmux_data;
} else if (soc_is_rt305x() || soc_is_rt3350()) {
soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
+ rt2880_pinmux_data = rt3050_pinmux_data;
} else if (soc_is_rt3352()) {
soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
+ rt2880_pinmux_data = rt3352_pinmux_data;
}
}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index b474ac284b83..86a535c770d8 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -17,132 +17,50 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt3883.h>
+#include <asm/mach-ralink/pinmux.h>
#include "common.h"
-static struct ralink_pinmux_grp mode_mux[] = {
- {
- .name = "i2c",
- .mask = RT3883_GPIO_MODE_I2C,
- .gpio_first = RT3883_GPIO_I2C_SD,
- .gpio_last = RT3883_GPIO_I2C_SCLK,
- }, {
- .name = "spi",
- .mask = RT3883_GPIO_MODE_SPI,
- .gpio_first = RT3883_GPIO_SPI_CS0,
- .gpio_last = RT3883_GPIO_SPI_MISO,
- }, {
- .name = "uartlite",
- .mask = RT3883_GPIO_MODE_UART1,
- .gpio_first = RT3883_GPIO_UART1_TXD,
- .gpio_last = RT3883_GPIO_UART1_RXD,
- }, {
- .name = "jtag",
- .mask = RT3883_GPIO_MODE_JTAG,
- .gpio_first = RT3883_GPIO_JTAG_TDO,
- .gpio_last = RT3883_GPIO_JTAG_TCLK,
- }, {
- .name = "mdio",
- .mask = RT3883_GPIO_MODE_MDIO,
- .gpio_first = RT3883_GPIO_MDIO_MDC,
- .gpio_last = RT3883_GPIO_MDIO_MDIO,
- }, {
- .name = "ge1",
- .mask = RT3883_GPIO_MODE_GE1,
- .gpio_first = RT3883_GPIO_GE1_TXD0,
- .gpio_last = RT3883_GPIO_GE1_RXCLK,
- }, {
- .name = "ge2",
- .mask = RT3883_GPIO_MODE_GE2,
- .gpio_first = RT3883_GPIO_GE2_TXD0,
- .gpio_last = RT3883_GPIO_GE2_RXCLK,
- }, {
- .name = "pci",
- .mask = RT3883_GPIO_MODE_PCI,
- .gpio_first = RT3883_GPIO_PCI_AD0,
- .gpio_last = RT3883_GPIO_PCI_AD31,
- }, {
- .name = "lna a",
- .mask = RT3883_GPIO_MODE_LNA_A,
- .gpio_first = RT3883_GPIO_LNA_PE_A0,
- .gpio_last = RT3883_GPIO_LNA_PE_A2,
- }, {
- .name = "lna g",
- .mask = RT3883_GPIO_MODE_LNA_G,
- .gpio_first = RT3883_GPIO_LNA_PE_G0,
- .gpio_last = RT3883_GPIO_LNA_PE_G2,
- }, {0}
+static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct rt2880_pmx_func uartf_func[] = {
+ FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8),
+ FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8),
+ FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8),
+ FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8),
+ FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4),
+ FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4),
+ FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4),
};
-
-static struct ralink_pinmux_grp uart_mux[] = {
- {
- .name = "uartf",
- .mask = RT3883_GPIO_MODE_UARTF,
- .gpio_first = RT3883_GPIO_7,
- .gpio_last = RT3883_GPIO_14,
- }, {
- .name = "pcm uartf",
- .mask = RT3883_GPIO_MODE_PCM_UARTF,
- .gpio_first = RT3883_GPIO_7,
- .gpio_last = RT3883_GPIO_14,
- }, {
- .name = "pcm i2s",
- .mask = RT3883_GPIO_MODE_PCM_I2S,
- .gpio_first = RT3883_GPIO_7,
- .gpio_last = RT3883_GPIO_14,
- }, {
- .name = "i2s uartf",
- .mask = RT3883_GPIO_MODE_I2S_UARTF,
- .gpio_first = RT3883_GPIO_7,
- .gpio_last = RT3883_GPIO_14,
- }, {
- .name = "pcm gpio",
- .mask = RT3883_GPIO_MODE_PCM_GPIO,
- .gpio_first = RT3883_GPIO_11,
- .gpio_last = RT3883_GPIO_14,
- }, {
- .name = "gpio uartf",
- .mask = RT3883_GPIO_MODE_GPIO_UARTF,
- .gpio_first = RT3883_GPIO_7,
- .gpio_last = RT3883_GPIO_10,
- }, {
- .name = "gpio i2s",
- .mask = RT3883_GPIO_MODE_GPIO_I2S,
- .gpio_first = RT3883_GPIO_7,
- .gpio_last = RT3883_GPIO_10,
- }, {
- .name = "gpio",
- .mask = RT3883_GPIO_MODE_GPIO,
- }, {0}
+static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
+static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
+static struct rt2880_pmx_func pci_func[] = {
+ FUNC("pci-dev", 0, 40, 32),
+ FUNC("pci-host2", 1, 40, 32),
+ FUNC("pci-host1", 2, 40, 32),
+ FUNC("pci-fnc", 3, 40, 32)
};
-
-static struct ralink_pinmux_grp pci_mux[] = {
- {
- .name = "pci-dev",
- .mask = 0,
- .gpio_first = RT3883_GPIO_PCI_AD0,
- .gpio_last = RT3883_GPIO_PCI_AD31,
- }, {
- .name = "pci-host2",
- .mask = 1,
- .gpio_first = RT3883_GPIO_PCI_AD0,
- .gpio_last = RT3883_GPIO_PCI_AD31,
- }, {
- .name = "pci-host1",
- .mask = 2,
- .gpio_first = RT3883_GPIO_PCI_AD0,
- .gpio_last = RT3883_GPIO_PCI_AD31,
- }, {
- .name = "pci-fnc",
- .mask = 3,
- .gpio_first = RT3883_GPIO_PCI_AD0,
- .gpio_last = RT3883_GPIO_PCI_AD31,
- }, {
- .name = "pci-gpio",
- .mask = 7,
- .gpio_first = RT3883_GPIO_PCI_AD0,
- .gpio_last = RT3883_GPIO_PCI_AD31,
- }, {0}
+static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
+static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
+
+static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI),
+ GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK,
+ RT3883_GPIO_MODE_UART0_SHIFT),
+ GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1),
+ GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO),
+ GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A),
+ GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G),
+ GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK,
+ RT3883_GPIO_MODE_PCI_SHIFT),
+ GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1),
+ GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2),
+ { 0 }
};
static void rt3883_wdt_reset(void)
@@ -155,17 +73,6 @@ static void rt3883_wdt_reset(void)
rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
}
-struct ralink_pinmux rt_gpio_pinmux = {
- .mode = mode_mux,
- .uart = uart_mux,
- .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
- .uart_mask = RT3883_GPIO_MODE_UART0_MASK,
- .wdt_reset = rt3883_wdt_reset,
- .pci = pci_mux,
- .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
- .pci_mask = RT3883_GPIO_MODE_PCI_MASK,
-};
-
void __init ralink_clk_init(void)
{
unsigned long cpu_rate, sys_rate;
@@ -204,6 +111,7 @@ void __init ralink_clk_init(void)
ralink_clk_add("10000b00.spi", sys_rate);
ralink_clk_add("10000c00.uartlite", 40000000);
ralink_clk_add("10100000.ethernet", sys_rate);
+ ralink_clk_add("10180000.wmac", 40000000);
}
void __init ralink_of_remap(void)
@@ -243,4 +151,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_base = RT3883_SDRAM_BASE;
soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
+
+ rt2880_pinmux_data = rt3883_pinmux_data;
}
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index 5bb29b3790ff..82c72a15bf75 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -173,7 +173,6 @@ static struct platform_driver rt_timer_driver = {
.remove = rt_timer_remove,
.driver = {
.name = "rt-timer",
- .owner = THIS_MODULE,
.of_match_table = rt_timer_match
},
};
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index a18007613c30..5aa3df853082 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -79,7 +79,7 @@ static inline void rb532_set_bit(unsigned bitval,
*/
static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr)
{
- return (readl(ioaddr) & (1 << offset));
+ return readl(ioaddr) & (1 << offset);
}
/*
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index a757ded437cd..657210e767c2 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -122,8 +122,8 @@ void __init prom_setup_cmdline(void)
void __init prom_init(void)
{
struct ddr_ram __iomem *ddr;
- phys_t memsize;
- phys_t ddrbase;
+ phys_addr_t memsize;
+ phys_addr_t ddrbase;
ddr = ioremap_nocache(ddr_reg[0].start,
ddr_reg[0].end - ddr_reg[0].start);
@@ -133,8 +133,8 @@ void __init prom_init(void)
return;
}
- ddrbase = (phys_t)&ddr->ddrbase;
- memsize = (phys_t)&ddr->ddrmask;
+ ddrbase = (phys_addr_t)&ddr->ddrbase;
+ memsize = (phys_addr_t)&ddr->ddrmask;
memsize = 0 - memsize;
prom_setup_cmdline();
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index 7cec0a4e527d..6b009c45abed 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -24,14 +24,12 @@ EXPORT_SYMBOL(sgimc);
static inline unsigned long get_bank_addr(unsigned int memconfig)
{
- return ((memconfig & SGIMC_MCONFIG_BASEADDR) <<
- ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22));
+ return (memconfig & SGIMC_MCONFIG_BASEADDR) << ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22);
}
static inline unsigned long get_bank_size(unsigned int memconfig)
{
- return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) <<
- ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14);
+ return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) << ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14);
}
static inline unsigned int get_bank_config(int bank)
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index 3f47346608d7..712cc0f6a58d 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -338,7 +338,7 @@ static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr)
PHYS_TO_XKSEG_UNCACHED(pte);
a = (a & 0x3f) << 6; /* PFN */
a += vaddr & ((1 << pgsz) - 1);
- return (cpu_err_addr == a);
+ return cpu_err_addr == a;
}
}
}
@@ -351,7 +351,7 @@ static int check_vdma_memaddr(void)
u32 a = sgimc->maddronly;
if (!(sgimc->dma_ctrl & 0x100)) /* Xlate-bit clear ? */
- return (cpu_err_addr == a);
+ return cpu_err_addr == a;
if (check_microtlb(sgimc->dtlb_hi0, sgimc->dtlb_lo0, a) ||
check_microtlb(sgimc->dtlb_hi1, sgimc->dtlb_lo1, a) ||
@@ -367,7 +367,7 @@ static int check_vdma_gioaddr(void)
if (gio_err_stat & GIO_ERRMASK) {
u32 a = sgimc->gio_dma_trans;
a = (sgimc->gmaddronly & ~a) | (sgimc->gio_dma_sbits & a);
- return (gio_err_addr == a);
+ return gio_err_addr == a;
}
return 0;
}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index 7a53b1e28a93..ecbb62f339c5 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -125,8 +125,7 @@ unsigned long node_getfirstfree(cnodeid_t cnode)
#endif
offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask)))
- return (TO_NODE(nasid, offset) >> PAGE_SHIFT);
+ return TO_NODE(nasid, offset) >> PAGE_SHIFT;
else
- return (KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >>
- PAGE_SHIFT);
+ return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index a304bcc37e4f..0b68469e063f 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -42,8 +42,7 @@ static int fine_mode;
static int is_fine_dirmode(void)
{
- return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
- >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
+ return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE;
}
static hubreg_t get_region(cnodeid_t cnode)
@@ -288,7 +287,7 @@ static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
if (size <= 128) {
if (slot % 4 == 0) {
size <<= 20; /* size in bytes */
- return(size >> PAGE_SHIFT);
+ return size >> PAGE_SHIFT;
} else
return 0;
} else {
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 588e1806a1a3..c1a11a11db7f 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -38,7 +38,7 @@
#define MAX_RAM_SIZE (~0ULL)
#else
#ifdef CONFIG_HIGHMEM
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define MAX_RAM_SIZE (~0ULL)
#else
#define MAX_RAM_SIZE (0xffffffffULL)
@@ -49,8 +49,8 @@
#endif
#define SIBYTE_MAX_MEM_REGIONS 8
-phys_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS];
-phys_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS];
+phys_addr_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS];
+phys_addr_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS];
unsigned int board_mem_region_count;
int cfe_cons_handle;
@@ -96,7 +96,7 @@ static void __noreturn cfe_linux_halt(void)
static __init void prom_meminit(void)
{
- u64 addr, size, type; /* regardless of 64BIT_PHYS_ADDR */
+ u64 addr, size, type; /* regardless of PHYS_ADDR_T_64BIT */
int mem_flags = 0;
unsigned int idx;
int rd_flag;
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
index 9480c14ec66a..1cecdcf85cf1 100644
--- a/arch/mips/sibyte/swarm/platform.c
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -50,7 +50,7 @@ static struct platform_device swarm_pata_device = {
static int __init swarm_pata_init(void)
{
u8 __iomem *base;
- phys_t offset, size;
+ phys_addr_t offset, size;
struct resource *r;
if (!SIBYTE_HAVE_IDE)
diff --git a/arch/mips/sibyte/swarm/rtc_m41t81.c b/arch/mips/sibyte/swarm/rtc_m41t81.c
index b732600b47f5..e62466445f08 100644
--- a/arch/mips/sibyte/swarm/rtc_m41t81.c
+++ b/arch/mips/sibyte/swarm/rtc_m41t81.c
@@ -109,7 +109,7 @@ static int m41t81_read(uint8_t addr)
return -1;
}
- return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff);
+ return __raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff;
}
static int m41t81_write(uint8_t addr, int b)
@@ -229,5 +229,5 @@ int m41t81_probe(void)
tmp = m41t81_read(M41T81REG_SC);
m41t81_write(M41T81REG_SC, tmp & 0x7f);
- return (m41t81_read(M41T81REG_SC) != -1);
+ return m41t81_read(M41T81REG_SC) != -1;
}
diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c
index 178a824b28d4..50a82c495427 100644
--- a/arch/mips/sibyte/swarm/rtc_xicor1241.c
+++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c
@@ -84,7 +84,7 @@ static int xicor_read(uint8_t addr)
return -1;
}
- return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff);
+ return __raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff;
}
static int xicor_write(uint8_t addr, int b)
@@ -206,5 +206,5 @@ unsigned long xicor_get_time(void)
int xicor_probe(void)
{
- return (xicor_read(X1241REG_SC) != -1);
+ return xicor_read(X1241REG_SC) != -1;
}
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index 3462c831d0ea..494fb0a475ac 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -76,7 +76,7 @@ int swarm_be_handler(struct pt_regs *regs, int is_fixup)
printk("DBE physical address: %010Lx\n",
__read_64bit_c0_register($26, 1));
}
- return (is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL);
+ return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
}
enum swarm_rtc_type {
diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c
index e714d6ce9a82..a4664cb6c1e1 100644
--- a/arch/mips/txx9/generic/setup_tx4927.c
+++ b/arch/mips/txx9/generic/setup_tx4927.c
@@ -29,8 +29,8 @@ static void __init tx4927_wdr_init(void)
{
/* report watchdog reset status */
if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDRST)
- pr_warning("Watchdog reset detected at 0x%lx\n",
- read_c0_errorepc());
+ pr_warn("Watchdog reset detected at 0x%lx\n",
+ read_c0_errorepc());
/* clear WatchDogReset (W1C) */
tx4927_ccfg_set(TX4927_CCFG_WDRST);
/* do reset on watchdog */
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index 0a3bf2dfaba1..58cdb2aba5e1 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -31,8 +31,8 @@ static void __init tx4938_wdr_init(void)
{
/* report watchdog reset status */
if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDRST)
- pr_warning("Watchdog reset detected at 0x%lx\n",
- read_c0_errorepc());
+ pr_warn("Watchdog reset detected at 0x%lx\n",
+ read_c0_errorepc());
/* clear WatchDogReset (W1C) */
tx4938_ccfg_set(TX4938_CCFG_WDRST);
/* do reset on watchdog */
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index b7eccbd17bf7..e3733cde50d6 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -35,8 +35,8 @@ static void __init tx4939_wdr_init(void)
{
/* report watchdog reset status */
if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST)
- pr_warning("Watchdog reset detected at 0x%lx\n",
- read_c0_errorepc());
+ pr_warn("Watchdog reset detected at 0x%lx\n",
+ read_c0_errorepc());
/* clear WatchDogReset (W1C) */
tx4939_ccfg_set(TX4939_CCFG_WDRST);
/* do reset on watchdog */
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 2da5f25f98bc..37030409745c 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -245,7 +245,6 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev)
static struct platform_driver rbtx4939_led_driver = {
.driver = {
.name = "rbtx4939-led",
- .owner = THIS_MODULE,
},
};
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 54a062cb9f2c..f892d9de47d9 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -3,7 +3,6 @@ generic-y += barrier.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 6aa3ce1854aa..cab7d6d50051 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -80,4 +80,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile
index e142c9ee51fa..2328f82ba2a8 100644
--- a/arch/nios2/Makefile
+++ b/arch/nios2/Makefile
@@ -14,6 +14,8 @@
# Nios2 port by Wind River Systems Inc trough:
# fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+KBUILD_DEFCONFIG := 3c120_defconfig
+
UTS_SYSNAME = Linux
export MMU
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index bb160be0dc28..01c75f36e8b3 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -19,7 +19,6 @@ generic-y += fcntl.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h
index 9102bfd3fa1c..6e24d7cceb0c 100644
--- a/arch/nios2/include/asm/io.h
+++ b/arch/nios2/include/asm/io.h
@@ -45,6 +45,8 @@ static inline void iounmap(void __iomem *addr)
__iounmap(addr);
}
+#define ioremap_wc ioremap_nocache
+
/* Pages to physical address... */
#define page_to_phys(page) virt_to_phys(page_to_virt(page))
#define page_to_bus(page) page_to_virt(page)
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index acedc0a2860e..caa51ff85a3c 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -168,7 +168,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
unsigned long __gu_val; \
__get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
- (x) = (__typeof__(x))__gu_val; \
+ (x) = (__force __typeof__(x))__gu_val; \
__gu_err; \
})
@@ -180,7 +180,7 @@ do { \
if (access_ok(VERIFY_READ, __gu_ptr, sizeof(*__gu_ptr))) \
__get_user_common(__gu_val, sizeof(*__gu_ptr), \
__gu_ptr, __gu_err); \
- (x) = (__typeof__(x))__gu_val; \
+ (x) = (__force __typeof__(x))__gu_val; \
__gu_err; \
})
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 89b61d7dc790..91f1f360a7c4 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -25,7 +25,6 @@ generic-y += fcntl.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index ffb024b8423f..8686237a3c3c 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -7,7 +7,6 @@ generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index fe35ceacf0e7..a5cd40cd8ee1 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -79,4 +79,9 @@
#define SO_BPF_EXTENSIONS 0x4029
+#define SO_INCOMING_CPU 0x402A
+
+#define SO_ATTACH_BPF 0x402B
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index f8c45cc2947d..536ef66bb94b 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -38,14 +38,14 @@
LDREGX \t2(\t1),\t2
addil LT%exception_data,%r27
LDREG RT%exception_data(%r1),\t1
- /* t1 = &__get_cpu_var(exception_data) */
+ /* t1 = this_cpu_ptr(&exception_data) */
add,l \t1,\t2,\t1
/* t1 = t1->fault_ip */
LDREG EXCDATA_IP(\t1), \t1
.endm
#else
.macro get_fault_ip t1 t2
- /* t1 = &__get_cpu_var(exception_data) */
+ /* t1 = this_cpu_ptr(&exception_data) */
addil LT%exception_data,%r27
LDREG RT%exception_data(%r1),\t2
/* t1 = t2->fault_ip */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 88eace4e28c3..a2a168e2dfe7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -88,6 +88,7 @@ config PPC
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select BINFMT_ELF
+ select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
@@ -128,6 +129,7 @@ config PPC
select HAVE_BPF_JIT if PPC64
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_HAS_GCOV_PROFILE_ALL
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE
select GENERIC_TIME_VSYSCALL_OLD
@@ -148,6 +150,8 @@ config PPC
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
+ select NO_BOOTMEM
+ select HAVE_GENERIC_RCU_GUP
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -549,7 +553,7 @@ config PPC_4K_PAGES
bool "4k page size"
config PPC_16K_PAGES
- bool "16k page size" if 44x
+ bool "16k page size" if 44x || PPC_8xx
config PPC_64K_PAGES
bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64
diff --git a/arch/powerpc/boot/dts/b4860emu.dts b/arch/powerpc/boot/dts/b4860emu.dts
index 85646b4f96e1..2aa5cd318ce8 100644
--- a/arch/powerpc/boot/dts/b4860emu.dts
+++ b/arch/powerpc/boot/dts/b4860emu.dts
@@ -193,9 +193,9 @@
fsl,liodn-bits = <12>;
};
- clockgen: global-utilities@e1000 {
+/include/ "fsl/qoriq-clockgen2.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0";
- reg = <0xe1000 0x1000>;
};
/include/ "fsl/qoriq-dma-0.dtsi"
diff --git a/arch/powerpc/boot/dts/b4qds.dtsi b/arch/powerpc/boot/dts/b4qds.dtsi
index 8b47edcfabf0..e5bde0b85135 100644
--- a/arch/powerpc/boot/dts/b4qds.dtsi
+++ b/arch/powerpc/boot/dts/b4qds.dtsi
@@ -152,6 +152,29 @@
reg = <0x68>;
};
};
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+
+ ina220@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+
+ adt7461@4c {
+ compatible = "adi,adt7461";
+ reg = <0x4c>;
+ };
+ };
};
};
diff --git a/arch/powerpc/boot/dts/bsc9131rdb.dtsi b/arch/powerpc/boot/dts/bsc9131rdb.dtsi
index 9e6c01339ccc..45efcbadb23c 100644
--- a/arch/powerpc/boot/dts/bsc9131rdb.dtsi
+++ b/arch/powerpc/boot/dts/bsc9131rdb.dtsi
@@ -40,31 +40,6 @@
compatible = "fsl,ifc-nand";
reg = <0x0 0x0 0x4000>;
- partition@0 {
- /* This location must not be altered */
- /* 3MB for u-boot Bootloader Image */
- reg = <0x0 0x00300000>;
- label = "NAND U-Boot Image";
- read-only;
- };
-
- partition@300000 {
- /* 1MB for DTB Image */
- reg = <0x00300000 0x00100000>;
- label = "NAND DTB Image";
- };
-
- partition@400000 {
- /* 8MB for Linux Kernel Image */
- reg = <0x00400000 0x00800000>;
- label = "NAND Linux Kernel Image";
- };
-
- partition@c00000 {
- /* Rest space for Root file System Image */
- reg = <0x00c00000 0x07400000>;
- label = "NAND RFS Image";
- };
};
};
@@ -82,31 +57,6 @@
reg = <0>;
spi-max-frequency = <50000000>;
- /* 512KB for u-boot Bootloader Image */
- partition@0 {
- reg = <0x0 0x00080000>;
- label = "SPI Flash U-Boot Image";
- read-only;
- };
-
- /* 512KB for DTB Image */
- partition@80000 {
- reg = <0x00080000 0x00080000>;
- label = "SPI Flash DTB Image";
- };
-
- /* 4MB for Linux Kernel Image */
- partition@100000 {
- reg = <0x00100000 0x00400000>;
- label = "SPI Flash Kernel Image";
- };
-
- /*11MB for RFS Image */
- partition@500000 {
- reg = <0x00500000 0x00B00000>;
- label = "SPI Flash RFS Image";
- };
-
};
};
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
index d67894459ac8..86161ae6c966 100644
--- a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
@@ -80,33 +80,9 @@
compatible = "fsl,b4420-device-config", "fsl,qoriq-device-config-2.0";
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen2.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,b4420-clockgen", "fsl,qoriq-clockgen-2.0";
- ranges = <0x0 0xe1000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-2.0";
- clock-output-names = "sysclk";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2", "pll0-div4";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2", "pll1-div4";
- };
mux0: mux0@0 {
#clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
index 582381dba1d7..65100b9636b7 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -124,33 +124,9 @@
compatible = "fsl,b4860-device-config", "fsl,qoriq-device-config-2.0";
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen2.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,b4860-clockgen", "fsl,qoriq-clockgen-2.0";
- ranges = <0x0 0xe1000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-2.0";
- clock-output-names = "sysclk";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2", "pll0-div4";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2", "pll1-div4";
- };
mux0: mux0@0 {
#clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 69ce1026c948..efd74db4f9b0 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -305,53 +305,9 @@
#sleep-cells = <2>;
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen1.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0";
- ranges = <0x0 0xe1000 0x1000>;
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-1.0";
- clock-output-names = "sysclk";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2";
- };
-
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux0";
- };
-
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux1";
- };
mux2: mux2@40 {
#clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index cd63cb1b1042..d7425ef1ae41 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -332,53 +332,9 @@
#sleep-cells = <2>;
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen1.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
- ranges = <0x0 0xe1000 0x1000>;
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-1.0";
- clock-output-names = "sysclk";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2";
- };
-
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux0";
- };
-
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux1";
- };
mux2: mux2@40 {
#clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 12947ccddf25..7005a4a4cef0 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -352,35 +352,9 @@
#sleep-cells = <2>;
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen1.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
- ranges = <0x0 0xe1000 0x1000>;
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-1.0";
- clock-output-names = "sysclk";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2";
- };
pll2: pll2@840 {
#clock-cells = <1>;
@@ -398,24 +372,6 @@
clock-output-names = "pll3", "pll3-div2";
};
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux0";
- };
-
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux1";
- };
-
mux2: mux2@40 {
#clock-cells = <0>;
reg = <0x40 0x4>;
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 4c4a2b0436b2..55834211bd28 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -337,53 +337,9 @@
#sleep-cells = <2>;
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen1.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
- ranges = <0x0 0xe1000 0x1000>;
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-1.0";
- clock-output-names = "sysclk";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2";
- };
-
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux0";
- };
-
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux1";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
index 67296fdd9698..6e4cd6ce363c 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -297,53 +297,9 @@
#sleep-cells = <2>;
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen1.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,p5040-clockgen", "fsl,qoriq-clockgen-1.0";
- ranges = <0x0 0xe1000 0x1000>;
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-1.0";
- clock-output-names = "sysclk";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2";
- };
-
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux0";
- };
-
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux1";
- };
mux2: mux2@40 {
#clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi
new file mode 100644
index 000000000000..4ece1edbff63
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi
@@ -0,0 +1,85 @@
+/*
+ * QorIQ clock control device tree stub [ controller @ offset 0xe1000 ]
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+global-utilities@e1000 {
+ compatible = "fsl,qoriq-clockgen-1.0";
+ ranges = <0x0 0xe1000 0x1000>;
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysclk: sysclk {
+ #clock-cells = <0>;
+ compatible = "fsl,qoriq-sysclk-1.0", "fixed-clock";
+ clock-output-names = "sysclk";
+ };
+ pll0: pll0@800 {
+ #clock-cells = <1>;
+ reg = <0x800 0x4>;
+ compatible = "fsl,qoriq-core-pll-1.0";
+ clocks = <&sysclk>;
+ clock-output-names = "pll0", "pll0-div2";
+ };
+ pll1: pll1@820 {
+ #clock-cells = <1>;
+ reg = <0x820 0x4>;
+ compatible = "fsl,qoriq-core-pll-1.0";
+ clocks = <&sysclk>;
+ clock-output-names = "pll1", "pll1-div2";
+ };
+ mux0: mux0@0 {
+ #clock-cells = <0>;
+ reg = <0x0 0x4>;
+ compatible = "fsl,qoriq-core-mux-1.0";
+ clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
+ clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
+ clock-output-names = "cmux0";
+ };
+ mux1: mux1@20 {
+ #clock-cells = <0>;
+ reg = <0x20 0x4>;
+ compatible = "fsl,qoriq-core-mux-1.0";
+ clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
+ clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
+ clock-output-names = "cmux1";
+ };
+ platform_pll: platform-pll@c00 {
+ #clock-cells = <1>;
+ reg = <0xc00 0x4>;
+ compatible = "fsl,qoriq-platform-pll-1.0";
+ clocks = <&sysclk>;
+ clock-output-names = "platform-pll", "platform-pll-div2";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi
new file mode 100644
index 000000000000..48e0b6e4ce33
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi
@@ -0,0 +1,68 @@
+/*
+ * QorIQ clock control device tree stub [ controller @ offset 0xe1000 ]
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+global-utilities@e1000 {
+ compatible = "fsl,qoriq-clockgen-2.0";
+ ranges = <0x0 0xe1000 0x1000>;
+ reg = <0xe1000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysclk: sysclk {
+ #clock-cells = <0>;
+ compatible = "fsl,qoriq-sysclk-2.0", "fixed-clock";
+ clock-output-names = "sysclk";
+ };
+ pll0: pll0@800 {
+ #clock-cells = <1>;
+ reg = <0x800 0x4>;
+ compatible = "fsl,qoriq-core-pll-2.0";
+ clocks = <&sysclk>;
+ clock-output-names = "pll0", "pll0-div2", "pll0-div4";
+ };
+ pll1: pll1@820 {
+ #clock-cells = <1>;
+ reg = <0x820 0x4>;
+ compatible = "fsl,qoriq-core-pll-2.0";
+ clocks = <&sysclk>;
+ clock-output-names = "pll1", "pll1-div2", "pll1-div4";
+ };
+ platform_pll: platform-pll@c00 {
+ #clock-cells = <1>;
+ reg = <0xc00 0x4>;
+ compatible = "fsl,qoriq-platform-pll-2.0";
+ clocks = <&sysclk>;
+ clock-output-names = "platform-pll", "platform-pll-div2";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 12e597eea3c8..15ae462e758f 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -281,35 +281,9 @@
fsl,liodn-bits = <12>;
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen2.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,t1040-clockgen", "fsl,qoriq-clockgen-2.0";
- ranges = <0x0 0xe1000 0x1000>;
- reg = <0xe1000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-2.0";
- clock-output-names = "sysclk", "fixed-clock";
- };
-
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2", "pll0-div4";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2", "pll1-div4";
- };
mux0: mux0@0 {
#clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index aecee9690a88..1ce91e3485a9 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -305,34 +305,9 @@
fsl,liodn-bits = <12>;
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen2.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,t2080-clockgen", "fsl,qoriq-clockgen-2.0";
- ranges = <0x0 0xe1000 0x1000>;
- reg = <0xe1000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-2.0";
- clock-output-names = "sysclk", "fixed-clock";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2", "pll0-div4";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2", "pll1-div4";
- };
mux0: mux0@0 {
#clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 7e2fc7cdce48..0e96fcabe812 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -368,34 +368,9 @@
fsl,liodn-bits = <12>;
};
- clockgen: global-utilities@e1000 {
+/include/ "qoriq-clockgen2.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0";
- ranges = <0x0 0xe1000 0x1000>;
- reg = <0xe1000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-2.0";
- clock-output-names = "sysclk";
- };
-
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2", "pll0-div4";
- };
-
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2", "pll1-div4";
- };
pll2: pll2@840 {
#clock-cells = <1>;
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index 2fed3bc0b990..394ea9c943c9 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -98,6 +98,26 @@
reg = <0x68>;
interrupts = <0x1 0x1 0 0>;
};
+ ina220@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+ ina220@41 {
+ compatible = "ti,ina220";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+ ina220@44 {
+ compatible = "ti,ina220";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+ ina220@45 {
+ compatible = "ti,ina220";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
adt7461@4c {
compatible = "adi,adt7461";
reg = <0x4c>;
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index 2869fea717dd..b7f3057cd894 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -98,6 +98,26 @@
reg = <0x68>;
interrupts = <0x1 0x1 0 0>;
};
+ ina220@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+ ina220@41 {
+ compatible = "ti,ina220";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+ ina220@44 {
+ compatible = "ti,ina220";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+ ina220@45 {
+ compatible = "ti,ina220";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
adt7461@4c {
compatible = "adi,adt7461";
reg = <0x4c>;
diff --git a/arch/powerpc/boot/dts/p5040ds.dts b/arch/powerpc/boot/dts/p5040ds.dts
index 860b5ccf76c0..7e04bf487c04 100644
--- a/arch/powerpc/boot/dts/p5040ds.dts
+++ b/arch/powerpc/boot/dts/p5040ds.dts
@@ -95,6 +95,26 @@
reg = <0x68>;
interrupts = <0x1 0x1 0 0>;
};
+ ina220@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+ ina220@41 {
+ compatible = "ti,ina220";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+ ina220@44 {
+ compatible = "ti,ina220";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+ ina220@45 {
+ compatible = "ti,ina220";
+ reg = <0x45>;
+ shunt-resistor = <1000>;
+ };
adt7461@4c {
compatible = "adi,adt7461";
reg = <0x4c>;
diff --git a/arch/powerpc/boot/dts/t104xrdb.dtsi b/arch/powerpc/boot/dts/t104xrdb.dtsi
index 1cf0f3c5f7e5..187add885cae 100644
--- a/arch/powerpc/boot/dts/t104xrdb.dtsi
+++ b/arch/powerpc/boot/dts/t104xrdb.dtsi
@@ -83,6 +83,13 @@
};
};
+ i2c@118000 {
+ adt7461@4c {
+ compatible = "adi,adt7461";
+ reg = <0x4c>;
+ };
+ };
+
i2c@118100 {
pca9546@77 {
compatible = "nxp,pca9546";
diff --git a/arch/powerpc/boot/dts/t208xqds.dtsi b/arch/powerpc/boot/dts/t208xqds.dtsi
index 555dc6e03d89..59061834d54e 100644
--- a/arch/powerpc/boot/dts/t208xqds.dtsi
+++ b/arch/powerpc/boot/dts/t208xqds.dtsi
@@ -169,6 +169,17 @@
shunt-resistor = <1000>;
};
};
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+
+ adt7461@4c {
+ compatible = "adi,adt7461";
+ reg = <0x4c>;
+ };
+ };
};
};
diff --git a/arch/powerpc/boot/dts/t4240emu.dts b/arch/powerpc/boot/dts/t4240emu.dts
index bc12127a03fb..decaf357db9c 100644
--- a/arch/powerpc/boot/dts/t4240emu.dts
+++ b/arch/powerpc/boot/dts/t4240emu.dts
@@ -250,9 +250,9 @@
fsl,liodn-bits = <12>;
};
- clockgen: global-utilities@e1000 {
+/include/ "fsl/qoriq-clockgen2.dtsi"
+ global-utilities@e1000 {
compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0";
- reg = <0xe1000 0x1000>;
};
/include/ "fsl/qoriq-dma-0.dtsi"
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index d367a0aece2a..d80161b633f4 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -144,13 +144,24 @@ static char cmdline[BOOT_COMMAND_LINE_SIZE]
static void prep_cmdline(void *chosen)
{
+ unsigned int getline_timeout = 5000;
+ int v;
+ int n;
+
+ /* Wait-for-input time */
+ n = getprop(chosen, "linux,cmdline-timeout", &v, sizeof(v));
+ if (n == sizeof(v))
+ getline_timeout = v;
+
if (cmdline[0] == '\0')
getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
printf("\n\rLinux/PowerPC load: %s", cmdline);
+
/* If possible, edit the command line */
- if (console_ops.edit_cmdline)
- console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
+ if (console_ops.edit_cmdline && getline_timeout)
+ console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE, getline_timeout);
+
printf("\n\r");
/* Put the command line back into the devtree for the kernel */
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 8aad3c55aeda..5e75e1c5518e 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -58,7 +58,7 @@ extern struct dt_ops dt_ops;
struct console_ops {
int (*open)(void);
void (*write)(const char *buf, int len);
- void (*edit_cmdline)(char *buf, int len);
+ void (*edit_cmdline)(char *buf, int len, unsigned int getline_timeout);
void (*close)(void);
void *data;
};
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index f2156f07571f..167ee9433de6 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -33,7 +33,7 @@ static void serial_write(const char *buf, int len)
scdp->putc(*buf++);
}
-static void serial_edit_cmdline(char *buf, int len)
+static void serial_edit_cmdline(char *buf, int len, unsigned int timeout)
{
int timer = 0, count;
char ch, *cp;
@@ -44,7 +44,7 @@ static void serial_edit_cmdline(char *buf, int len)
cp = &buf[count];
count++;
- while (timer++ < 5*1000) {
+ do {
if (scdp->tstc()) {
while (((ch = scdp->getc()) != '\n') && (ch != '\r')) {
/* Test for backspace/delete */
@@ -70,7 +70,7 @@ static void serial_edit_cmdline(char *buf, int len)
break; /* Exit 'timer' loop */
}
udelay(1000); /* 1 msec */
- }
+ } while (timer++ < timeout);
*cp = 0;
}
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index dc939de9b5b0..b4c4b469e320 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -100,7 +100,6 @@ CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
# CONFIG_NET_VENDOR_3COM is not set
CONFIG_FS_ENET=y
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index e5a648115ada..7cb9719abf3d 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -113,7 +113,6 @@ CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index 8317b6010ba6..ecabf625d249 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -114,7 +114,6 @@ CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 124d66f0282c..4a4a86fb0d3d 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -165,7 +165,6 @@ CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig
index 1e151594c691..99ea8746bbaf 100644
--- a/arch/powerpc/configs/86xx/sbc8641d_defconfig
+++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig
@@ -167,7 +167,6 @@ CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index 59734916986a..8a08d6dcb0b4 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -211,7 +211,6 @@ CONFIG_MV643XX_ETH=y
# CONFIG_NETDEV_10000 is not set
# CONFIG_ATM_DRIVERS is not set
CONFIG_NETCONSOLE=m
-CONFIG_NETPOLL_TRAP=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 688e9e4d29a1..611efe99faeb 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -144,6 +144,7 @@ CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_UIO=y
CONFIG_STAGING=y
+CONFIG_MEMORY=y
CONFIG_VIRT_DRIVERS=y
CONFIG_FSL_HV_MANAGER=y
CONFIG_EXT2_FS=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 6db97e4414b2..be24a18c0d96 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -118,6 +118,7 @@ CONFIG_FSL_DMA=y
CONFIG_VIRT_DRIVERS=y
CONFIG_FSL_HV_MANAGER=y
CONFIG_FSL_CORENET_CF=y
+CONFIG_MEMORY=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_ISO9660_FS=m
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index d2c415489f72..02395fab19bd 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -215,6 +215,7 @@ CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
+CONFIG_MEMORY=y
# CONFIG_NET_DMA is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 87460083dbc7..b5d1b82a1b43 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -216,6 +216,7 @@ CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
+CONFIG_MEMORY=y
# CONFIG_NET_DMA is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 20bc5e2d368d..5830d735c5c3 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -154,7 +154,6 @@ CONFIG_WINDFARM_PM121=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
CONFIG_VIRTIO_NET=m
CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index c3a3269b0865..67885b2d70aa 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -103,7 +103,6 @@ CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
CONFIG_VORTEX=y
CONFIG_ACENIC=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index fec5870f1818..ad6d6b5af7d7 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -629,7 +629,6 @@ CONFIG_SLIP_SMART=y
CONFIG_NET_FC=y
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_VIRTIO_NET=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index dd2a9cab4b50..1f97364017c7 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -133,7 +133,6 @@ CONFIG_DM_UEVENT=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
CONFIG_VIRTIO_NET=m
CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig
index d2008887eb8c..ac7ca5852827 100644
--- a/arch/powerpc/configs/pseries_le_defconfig
+++ b/arch/powerpc/configs/pseries_le_defconfig
@@ -134,7 +134,6 @@ CONFIG_DM_UEVENT=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
CONFIG_VIRTIO_NET=m
CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index f9e8b9491efc..d3feba5a275f 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -66,7 +66,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
src = data + done;
} while (done + 63 < len);
- memset(temp, 0, sizeof(temp));
+ memzero_explicit(temp, sizeof(temp));
partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
@@ -154,4 +154,4 @@ module_exit(sha1_powerpc_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
-MODULE_ALIAS("sha1-powerpc");
+MODULE_ALIAS_CRYPTO("sha1-powerpc");
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 31e8f59aff38..382b28e364dc 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,6 +1,5 @@
generic-y += clkdev.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index bab79a110c7b..a3bf5be111ff 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -33,12 +33,9 @@
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
-#ifdef CONFIG_SMP
-
#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
#else
@@ -46,20 +43,26 @@
#endif
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define dma_rmb() __lwsync()
+#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
+
+#ifdef CONFIG_SMP
+#define smp_lwsync() __lwsync()
#define smp_mb() mb()
#define smp_rmb() __lwsync()
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
-#define smp_read_barrier_depends() read_barrier_depends()
#else
-#define __lwsync() barrier()
+#define smp_lwsync() barrier()
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
/*
* This is a barrier which prevents following instructions from being
* started until the value of the argument x is known. For example, if
@@ -72,7 +75,7 @@
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- __lwsync(); \
+ smp_lwsync(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
@@ -80,7 +83,7 @@ do { \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- __lwsync(); \
+ smp_lwsync(); \
___p1; \
})
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index bd3bd573d0ae..59abc620f8e8 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -14,9 +14,9 @@
*
* The bitop functions are defined to work on unsigned longs, so for a
* ppc64 system the bits end up numbered:
- * |63..............0|127............64|191...........128|255...........196|
+ * |63..............0|127............64|191...........128|255...........192|
* and on ppc32:
- * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
*
* There are a few little-endian macros used mostly for filesystem
* bitmaps, these work on similar bit arrays layouts, but
@@ -213,7 +213,7 @@ static __inline__ unsigned long ffz(unsigned long x)
return __ilog2(x & -x);
}
-static __inline__ int __ffs(unsigned long x)
+static __inline__ unsigned long __ffs(unsigned long x)
{
return __ilog2(x & -x);
}
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index daa5af91163c..22d5a7da9e68 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -448,13 +448,9 @@ extern const char *powerpc_base_platform;
CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
-#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
- CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
- CPU_FTR_ICSWX | CPU_FTR_DABRX )
-
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
-#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500 | CPU_FTRS_A2)
+#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500)
#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
@@ -505,13 +501,13 @@ enum {
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
-#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500 & CPU_FTRS_A2)
+#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500)
#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POWER4 & CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
- CPU_FTRS_POWER8_DD1 & CPU_FTRS_POSSIBLE)
+ CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE)
#endif
#else
enum {
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index ca07f9c27335..0652ebe117af 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -39,6 +39,7 @@ struct device_node;
#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
#define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */
+#define EEH_EARLY_DUMP_LOG 0x20 /* Dump log immediately */
/*
* Delay for PE reset, all in ms
@@ -72,6 +73,7 @@ struct device_node;
#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */
+#define EEH_PE_RESET (1 << 3) /* PE reset in progress */
#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 888d8f3f2524..57d289acb803 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -28,8 +28,7 @@
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
+#define ELF_ET_DYN_BASE 0x20000000
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index 77ced0b3d81d..43b6bb1a4a9c 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -68,7 +68,10 @@ struct ccsr_guts {
u8 res0b4[0xc0 - 0xb4];
__be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
Called 'elbcvselcr' on 86xx SOCs */
- u8 res0c4[0x224 - 0xc4];
+ u8 res0c4[0x100 - 0xc4];
+ __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
+ There are 16 registers */
+ u8 res140[0x224 - 0x140];
__be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
__be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
u8 res22c[0x604 - 0x22c];
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 1bbb3013d6aa..8add8b861e8d 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -21,7 +21,12 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
-#define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending
+#define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x))
static inline void ack_bad_irq(unsigned int irq)
{
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 766b77d527ac..1d53a65b4ec1 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -48,7 +48,7 @@ static inline unsigned int hugepd_shift(hugepd_t hpd)
#endif /* CONFIG_PPC_BOOK3S_64 */
-static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned pdshift)
{
/*
@@ -58,9 +58,9 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
*/
unsigned long idx = 0;
- pte_t *dir = hugepd_page(*hpdp);
+ pte_t *dir = hugepd_page(hpd);
#ifndef CONFIG_PPC_FSL_BOOK3E
- idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
+ idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
#endif
return dir + idx;
@@ -193,7 +193,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
}
#define hugepd_shift(x) 0
-static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned pdshift)
{
return 0;
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 9eaf301ac952..a8d2ef30d473 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -855,9 +855,6 @@ static inline void * bus_to_virt(unsigned long address)
#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
-void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
- size_t size, unsigned long flags);
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_IO_H */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 6acf0c2a0f99..942c7b1678e3 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -170,8 +170,6 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret);
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
unsigned long gpa, bool dirty);
-extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh, unsigned long ptel);
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel,
pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 0aa817933e6a..2d81e202bdcc 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -37,7 +37,6 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
-extern unsigned long kvm_rma_pages;
#endif
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
@@ -148,7 +147,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
/* This covers 14..54 bits of va*/
rb = (v & ~0x7fUL) << 16; /* AVA field */
- rb |= v >> (62 - 8); /* B field */
+ rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
/*
* AVA in v had cleared lower 23 bits. We need to derive
* that from pteg index
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 047855619cc4..7efd666a3fa7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -180,11 +180,6 @@ struct kvmppc_spapr_tce_table {
struct page *pages[0];
};
-struct kvm_rma_info {
- atomic_t use_count;
- unsigned long base_pfn;
-};
-
/* XICS components, defined in book3s_xics.c */
struct kvmppc_xics;
struct kvmppc_icp;
@@ -214,16 +209,9 @@ struct revmap_entry {
#define KVMPPC_RMAP_PRESENT 0x100000000ul
#define KVMPPC_RMAP_INDEX 0xfffffffful
-/* Low-order bits in memslot->arch.slot_phys[] */
-#define KVMPPC_PAGE_ORDER_MASK 0x1f
-#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
-#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
-#define KVMPPC_GOT_PAGE 0x80
-
struct kvm_arch_memory_slot {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long *rmap;
- unsigned long *slot_phys;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
};
@@ -242,14 +230,12 @@ struct kvm_arch {
struct kvm_rma_info *rma;
unsigned long vrma_slb_v;
int rma_setup_done;
- int using_mmu_notifiers;
u32 hpt_order;
atomic_t vcpus_running;
u32 online_vcores;
unsigned long hpt_npte;
unsigned long hpt_mask;
atomic_t hpte_mod_interest;
- spinlock_t slot_phys_lock;
cpumask_t need_tlb_flush;
int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
@@ -297,6 +283,7 @@ struct kvmppc_vcore {
struct list_head runnable_threads;
spinlock_t lock;
wait_queue_head_t wq;
+ spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
struct kvm_vcpu *runner;
@@ -308,6 +295,7 @@ struct kvmppc_vcore {
ulong dpdes; /* doorbell state (POWER8) */
void *mpp_buffer; /* Micro Partition Prefetch buffer */
bool mpp_buffer_is_valid;
+ ulong conferring_threads;
};
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -664,6 +652,8 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock;
u64 busy_stolen;
u64 busy_preempt;
+
+ u32 emul_inst;
#endif
};
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a6dcdb6d13c1..46bf652c9169 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -170,8 +170,6 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba);
-extern struct kvm_rma_info *kvm_alloc_rma(void);
-extern void kvm_release_rma(struct kvm_rma_info *ri);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 307347f8ddbd..c8175a3fe560 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -42,7 +42,7 @@ struct machdep_calls {
unsigned long newpp,
unsigned long vpn,
int bpsize, int apsize,
- int ssize, int local);
+ int ssize, unsigned long flags);
void (*hpte_updateboltedpp)(unsigned long newpp,
unsigned long ea,
int psize, int ssize);
@@ -60,7 +60,7 @@ struct machdep_calls {
void (*hugepage_invalidate)(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
- int psize, int ssize);
+ int psize, int ssize, int local);
/* special for kexec, to be called in real mode, linear mapping is
* destroyed as well */
void (*hpte_clear_all)(void);
@@ -142,7 +142,6 @@ struct machdep_calls {
#endif
void (*restart)(char *cmd);
- void (*power_off)(void);
void (*halt)(void);
void (*panic)(char *str);
void (*cpu_die)(void);
@@ -292,10 +291,6 @@ struct machdep_calls {
#ifdef CONFIG_ARCH_RANDOM
int (*get_random_long)(unsigned long *v);
#endif
-
-#ifdef CONFIG_MEMORY_HOTREMOVE
- int (*remove_memory)(u64, u64);
-#endif
};
extern void e500_idle(void);
@@ -343,16 +338,6 @@ extern sys_ctrler_t sys_ctrler;
#endif /* CONFIG_PPC_PMAC */
-
-/* Functions to produce codes on the leds.
- * The SRC code should be unique for the message category and should
- * be limited to the lower 24 bits (the upper 8 are set by these funcs),
- * and (for boot & dump) should be sorted numerically in the order
- * the events occur.
- */
-/* Print a boot progress message. */
-void ppc64_boot_msg(unsigned int src, const char *msg);
-
static inline void log_error(char *buf, unsigned int err_type, int fatal)
{
if (ppc_md.log_error)
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 3d11d3ce79ec..986b9e1e1044 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -56,6 +56,7 @@
* additional information from the MI_EPN, and MI_TWC registers.
*/
#define SPRN_MI_RPN 790
+#define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
/* Define an RPN value for mapping kernel memory to large virtual
* pages for boot initialization. This has real page number of 0,
@@ -129,6 +130,7 @@
* additional information from the MD_EPN, and MD_TWC registers.
*/
#define SPRN_MD_RPN 798
+#define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
/* This is a temporary storage register that could be used to save
* a processor working register during a tablewalk.
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index aeebc94b2bce..4f13c3ed7acf 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -316,27 +316,33 @@ static inline unsigned long hpt_hash(unsigned long vpn,
return hash & 0x7fffffffffUL;
}
+#define HPTE_LOCAL_UPDATE 0x1
+#define HPTE_NOHPTE_UPDATE 0x2
+
extern int __hash_page_4K(unsigned long ea, unsigned long access,
unsigned long vsid, pte_t *ptep, unsigned long trap,
- unsigned int local, int ssize, int subpage_prot);
+ unsigned long flags, int ssize, int subpage_prot);
extern int __hash_page_64K(unsigned long ea, unsigned long access,
unsigned long vsid, pte_t *ptep, unsigned long trap,
- unsigned int local, int ssize);
+ unsigned long flags, int ssize);
struct mm_struct;
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
-extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap);
-extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
+extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
+ unsigned long access, unsigned long trap,
+ unsigned long flags);
+extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
+ unsigned long dsisr);
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
- pte_t *ptep, unsigned long trap, int local, int ssize,
- unsigned int shift, unsigned int mmu_psize);
+ pte_t *ptep, unsigned long trap, unsigned long flags,
+ int ssize, unsigned int shift, unsigned int mmu_psize);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int __hash_page_thp(unsigned long ea, unsigned long access,
unsigned long vsid, pmd_t *pmdp, unsigned long trap,
- int local, int ssize, unsigned int psize);
+ unsigned long flags, int ssize, unsigned int psize);
#else
static inline int __hash_page_thp(unsigned long ea, unsigned long access,
unsigned long vsid, pmd_t *pmdp,
- unsigned long trap, int local,
+ unsigned long trap, unsigned long flags,
int ssize, unsigned int psize)
{
BUG();
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9124b0ede1fc..5cd8d2fddba9 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -154,6 +154,10 @@ struct opal_sg_list {
#define OPAL_HANDLE_HMI 98
#define OPAL_REGISTER_DUMP_REGION 101
#define OPAL_UNREGISTER_DUMP_REGION 102
+#define OPAL_WRITE_TPO 103
+#define OPAL_READ_TPO 104
+#define OPAL_IPMI_SEND 107
+#define OPAL_IPMI_RECV 108
#ifndef __ASSEMBLY__
@@ -284,62 +288,6 @@ enum OpalMessageType {
OPAL_MSG_TYPE_MAX,
};
-/* Machine check related definitions */
-enum OpalMCE_Version {
- OpalMCE_V1 = 1,
-};
-
-enum OpalMCE_Severity {
- OpalMCE_SEV_NO_ERROR = 0,
- OpalMCE_SEV_WARNING = 1,
- OpalMCE_SEV_ERROR_SYNC = 2,
- OpalMCE_SEV_FATAL = 3,
-};
-
-enum OpalMCE_Disposition {
- OpalMCE_DISPOSITION_RECOVERED = 0,
- OpalMCE_DISPOSITION_NOT_RECOVERED = 1,
-};
-
-enum OpalMCE_Initiator {
- OpalMCE_INITIATOR_UNKNOWN = 0,
- OpalMCE_INITIATOR_CPU = 1,
-};
-
-enum OpalMCE_ErrorType {
- OpalMCE_ERROR_TYPE_UNKNOWN = 0,
- OpalMCE_ERROR_TYPE_UE = 1,
- OpalMCE_ERROR_TYPE_SLB = 2,
- OpalMCE_ERROR_TYPE_ERAT = 3,
- OpalMCE_ERROR_TYPE_TLB = 4,
-};
-
-enum OpalMCE_UeErrorType {
- OpalMCE_UE_ERROR_INDETERMINATE = 0,
- OpalMCE_UE_ERROR_IFETCH = 1,
- OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
- OpalMCE_UE_ERROR_LOAD_STORE = 3,
- OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
-};
-
-enum OpalMCE_SlbErrorType {
- OpalMCE_SLB_ERROR_INDETERMINATE = 0,
- OpalMCE_SLB_ERROR_PARITY = 1,
- OpalMCE_SLB_ERROR_MULTIHIT = 2,
-};
-
-enum OpalMCE_EratErrorType {
- OpalMCE_ERAT_ERROR_INDETERMINATE = 0,
- OpalMCE_ERAT_ERROR_PARITY = 1,
- OpalMCE_ERAT_ERROR_MULTIHIT = 2,
-};
-
-enum OpalMCE_TlbErrorType {
- OpalMCE_TLB_ERROR_INDETERMINATE = 0,
- OpalMCE_TLB_ERROR_PARITY = 1,
- OpalMCE_TLB_ERROR_MULTIHIT = 2,
-};
-
enum OpalThreadStatus {
OPAL_THREAD_INACTIVE = 0x0,
OPAL_THREAD_STARTED = 0x1,
@@ -452,52 +400,15 @@ struct opal_msg {
__be64 params[8];
};
-struct opal_machine_check_event {
- enum OpalMCE_Version version:8; /* 0x00 */
- uint8_t in_use; /* 0x01 */
- enum OpalMCE_Severity severity:8; /* 0x02 */
- enum OpalMCE_Initiator initiator:8; /* 0x03 */
- enum OpalMCE_ErrorType error_type:8; /* 0x04 */
- enum OpalMCE_Disposition disposition:8; /* 0x05 */
- uint8_t reserved_1[2]; /* 0x06 */
- uint64_t gpr3; /* 0x08 */
- uint64_t srr0; /* 0x10 */
- uint64_t srr1; /* 0x18 */
- union { /* 0x20 */
- struct {
- enum OpalMCE_UeErrorType ue_error_type:8;
- uint8_t effective_address_provided;
- uint8_t physical_address_provided;
- uint8_t reserved_1[5];
- uint64_t effective_address;
- uint64_t physical_address;
- uint8_t reserved_2[8];
- } ue_error;
-
- struct {
- enum OpalMCE_SlbErrorType slb_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
- } slb_error;
-
- struct {
- enum OpalMCE_EratErrorType erat_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
- } erat_error;
+enum {
+ OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
+};
- struct {
- enum OpalMCE_TlbErrorType tlb_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
- } tlb_error;
- } u;
+struct opal_ipmi_msg {
+ uint8_t version;
+ uint8_t netfn;
+ uint8_t cmd;
+ uint8_t data[];
};
/* FSP memory errors handling */
@@ -819,6 +730,9 @@ int64_t opal_rtc_read(__be32 *year_month_day,
__be64 *hour_minute_second_millisecond);
int64_t opal_rtc_write(uint32_t year_month_day,
uint64_t hour_minute_second_millisecond);
+int64_t opal_tpo_read(uint64_t token, __be32 *year_mon_day, __be32 *hour_min);
+int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day,
+ uint32_t hour_min);
int64_t opal_cec_power_down(uint64_t request);
int64_t opal_cec_reboot(void);
int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
@@ -963,6 +877,10 @@ int64_t opal_handle_hmi(void);
int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
int64_t opal_unregister_dump_region(uint32_t id);
int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
+int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
+ uint64_t msg_len);
+int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
+ uint64_t *msg_len);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
@@ -992,8 +910,6 @@ extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
struct rtc_time;
-extern int opal_set_rtc_time(struct rtc_time *tm);
-extern void opal_get_rtc_time(struct rtc_time *tm);
extern unsigned long opal_get_boot_time(void);
extern void opal_nvram_init(void);
extern void opal_flash_init(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a5139ea6910b..24a386cbb928 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -42,7 +42,6 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
struct task_struct;
-struct opal_machine_check_event;
/*
* Defines the layout of the paca.
@@ -153,12 +152,6 @@ struct paca_struct {
u64 tm_scratch; /* TM scratch area for reclaim */
#endif
-#ifdef CONFIG_PPC_POWERNV
- /* Pointer to OPAL machine check event structure set by the
- * early exception handler for use by high level C handler
- */
- struct opal_machine_check_event *opal_mc_evt;
-#endif
#ifdef CONFIG_PPC_BOOK3S_64
/* Exclusive emergency stack pointer for machine check exception. */
void *mc_emergency_sp;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 26fe1ae15212..69c059887a2c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -379,12 +379,14 @@ static inline int hugepd_ok(hugepd_t hpd)
}
#endif
-#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
+#define is_hugepd(hpd) (hugepd_ok(hpd))
+#define pgd_huge pgd_huge
int pgd_huge(pgd_t pgd);
#else /* CONFIG_HUGETLB_PAGE */
#define is_hugepd(pdep) 0
#define pgd_huge(pgd) 0
#endif /* CONFIG_HUGETLB_PAGE */
+#define __hugepd(x) ((hugepd_t) { (x) })
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 945e47adf7db..234e07c47803 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -170,6 +170,25 @@ static inline unsigned long pte_update(pte_t *p,
#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp;
+#ifdef CONFIG_PPC_8xx
+ unsigned long tmp2;
+
+ __asm__ __volatile__("\
+1: lwarx %0,0,%4\n\
+ andc %1,%0,%5\n\
+ or %1,%1,%6\n\
+ /* 0x200 == Extended encoding, bit 22 */ \
+ /* Bit 22 has to be 1 if neither _PAGE_USER nor _PAGE_RW are set */ \
+ rlwimi %1,%1,32-2,0x200\n /* get _PAGE_USER */ \
+ rlwinm %3,%1,32-1,0x200\n /* get _PAGE_RW */ \
+ or %1,%3,%1\n\
+ xori %1,%1,0x200\n"
+" stwcx. %1,0,%4\n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p), "=&r" (tmp2)
+ : "r" (p), "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+#else /* CONFIG_PPC_8xx */
__asm__ __volatile__("\
1: lwarx %0,0,%3\n\
andc %1,%0,%4\n\
@@ -180,6 +199,7 @@ static inline unsigned long pte_update(pte_t *p,
: "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" (clr), "r" (set), "m" (*p)
: "cc" );
+#endif /* CONFIG_PPC_8xx */
#else /* PTE_ATOMIC_UPDATES */
unsigned long old = pte_val(*p);
*p = __pte((old & ~clr) | set);
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
index 7b935683f268..132ee1d482c2 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -57,7 +57,21 @@
#define pgd_present(pgd) (pgd_val(pgd) != 0)
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
-#define pgd_page(pgd) virt_to_page(pgd_page_vaddr(pgd))
+
+#ifndef __ASSEMBLY__
+
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+ return __pte(pgd_val(pgd));
+}
+
+static inline pgd_t pte_pgd(pte_t pte)
+{
+ return __pgd(pte_val(pte));
+}
+extern struct page *pgd_page(pgd_t pgd);
+
+#endif /* !__ASSEMBLY__ */
#define pud_offset(pgdp, addr) \
(((pud_t *) pgd_page_vaddr(*(pgdp))) + \
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index a56b82fb0609..1de35bbd02a6 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -38,4 +38,7 @@
/* Bits to mask out from a PGD/PUD to get to the PMD page */
#define PUD_MASKED_BITS 0x1ff
+#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd })))
+#define pte_pgd(pte) ((pgd_t)pte_pud(pte))
+
#endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 9b4b1904efae..b9dcc936e2d1 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -152,7 +152,7 @@
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
|| (pmd_val(pmd) & PMD_BAD_BITS))
-#define pmd_present(pmd) (pmd_val(pmd) != 0)
+#define pmd_present(pmd) (!pmd_none(pmd))
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
extern struct page *pmd_page(pmd_t pmd);
@@ -164,9 +164,21 @@ extern struct page *pmd_page(pmd_t pmd);
#define pud_present(pud) (pud_val(pud) != 0)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
-#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
+extern struct page *pud_page(pud_t pud);
+
+static inline pte_t pud_pte(pud_t pud)
+{
+ return __pte(pud_val(pud));
+}
+
+static inline pud_t pte_pud(pte_t pte)
+{
+ return __pud(pte_val(pte));
+}
+#define pud_write(pud) pte_write(pud_pte(pud))
#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
+#define pgd_write(pgd) pte_write(pgd_pte(pgd))
/*
* Find an entry in a page-table-directory. We combine the address region
@@ -422,7 +434,22 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
-
+/*
+ *
+ * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
+ * page. The hugetlbfs page table walking and mangling paths are totally
+ * separated form the core VM paths and they're differentiated by
+ * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
+ *
+ * pmd_trans_huge() is defined as false at build time if
+ * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
+ * time in such case.
+ *
+ * For ppc64 we need to differntiate from explicit hugepages from THP, because
+ * for THP we also track the subpage details at the pmd level. We don't do
+ * that for explicit huge pages.
+ *
+ */
static inline int pmd_trans_huge(pmd_t pmd)
{
/*
@@ -431,16 +458,6 @@ static inline int pmd_trans_huge(pmd_t pmd)
return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE);
}
-static inline int pmd_large(pmd_t pmd)
-{
- /*
- * leaf pte for huge page, bottom two bits != 00
- */
- if (pmd_trans_huge(pmd))
- return pmd_val(pmd) & _PAGE_PRESENT;
- return 0;
-}
-
static inline int pmd_trans_splitting(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
@@ -451,6 +468,14 @@ static inline int pmd_trans_splitting(pmd_t pmd)
extern int has_transparent_hugepage(void);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int pmd_large(pmd_t pmd)
+{
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ return ((pmd_val(pmd) & 0x3) != 0x0);
+}
+
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));
@@ -576,6 +601,5 @@ static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
*/
return true;
}
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 316f9a5da173..a8805fee0df9 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -274,11 +274,9 @@ extern void paging_init(void);
*/
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
-extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr);
-
extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr);
+ unsigned long end, int write,
+ struct page **pages, int *nr);
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#define has_transparent_hugepage() 0
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 6f8536208049..1a5287759fc8 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -204,6 +204,7 @@
#define PPC_INST_ERATSX_DOT 0x7c000127
/* Misc instructions for BPF compiler */
+#define PPC_INST_LBZ 0x88000000
#define PPC_INST_LD 0xe8000000
#define PPC_INST_LHZ 0xa0000000
#define PPC_INST_LHBRX 0x7c00062c
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index dda7ac4c80bd..29c3798cf800 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -451,7 +451,7 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
-extern void power7_nap(int check_irq);
+extern unsigned long power7_nap(int check_irq);
extern void power7_sleep(void);
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index d44826e4ff97..daa4616e61c4 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -48,19 +48,22 @@
*/
#define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */
#define _PAGE_USER 0x0800 /* msb PP bits */
+/* set when neither _PAGE_USER nor _PAGE_RW are set */
+#define _PAGE_KNLRO 0x0200
#define _PMD_PRESENT 0x0001
#define _PMD_BAD 0x0ff0
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
-#define _PTE_NONE_MASK _PAGE_ACCESSED
+#define _PTE_NONE_MASK _PAGE_KNLRO
/* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
/* We need to add _PAGE_SHARED to kernel pages */
-#define _PAGE_KERNEL_RO (_PAGE_SHARED)
+#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_KNLRO)
+#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_KNLRO)
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 11ba86e17631..fbdf18cf954c 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -8,7 +8,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
-extern int init_bootmem_done; /* set once bootmem is available */
extern unsigned long long memory_limit;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
@@ -24,7 +23,7 @@ extern void reloc_got2(unsigned long);
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
void check_for_initrd(void);
-void do_init_bootmem(void);
+void initmem_init(void);
void setup_panic(void);
#define ARCH_PANIC_TIMEOUT 180
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index b034ecdb7c74..ebc4f165690a 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -71,13 +71,12 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */
+register unsigned long __current_r1 asm("r1");
static inline struct thread_info *current_thread_info(void)
{
- register unsigned long sp asm("r1");
-
/* gcc4, at least, is smart enough to turn this into a single
* rlwinm for ppc32 and clrrdi for ppc64 */
- return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
+ return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 2def01ed0cb2..23d351ca0303 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
static inline void arch_enter_lazy_mmu_mode(void)
{
- struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
static inline void arch_leave_lazy_mmu_mode(void)
{
- struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->index)
__flush_tlb_pending(batch);
@@ -125,9 +125,11 @@ static inline void arch_leave_lazy_mmu_mode(void)
extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
- int ssize, int local);
+ int ssize, unsigned long flags);
extern void flush_hash_range(unsigned long number, int local);
-
+extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
+ pmd_t *pmdp, unsigned int psize, int ssize,
+ unsigned long flags);
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h
index a2eac409c1ec..e5f8dd366212 100644
--- a/arch/powerpc/include/asm/vga.h
+++ b/arch/powerpc/include/asm/vga.h
@@ -38,12 +38,10 @@ static inline u16 scr_readw(volatile const u16 *addr)
#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
-extern unsigned long vgacon_remap_base;
-
#ifdef __powerpc64__
#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap((x), s))
#else
-#define VGA_MAP_MEM(x,s) (x + vgacon_remap_base)
+#define VGA_MAP_MEM(x,s) (x)
#endif
#define vga_readb(x) (*(x))
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 0d050ea37a04..6997f4a271df 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -98,7 +98,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
static inline void xics_push_cppr(unsigned int vec)
{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
return;
@@ -111,7 +111,7 @@ static inline void xics_push_cppr(unsigned int vec)
static inline unsigned char xics_pop_cppr(void)
{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
if (WARN_ON(os_cppr->index < 1))
return LOWEST_PRIORITY;
@@ -121,7 +121,7 @@ static inline unsigned char xics_pop_cppr(void)
static inline void xics_set_base_cppr(unsigned char cppr)
{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
/* we only really want to set the priority when there's
* just one cppr value on the stack
@@ -133,7 +133,7 @@ static inline void xics_set_base_cppr(unsigned char cppr)
static inline unsigned char xics_cppr_top(void)
{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
return os_cppr->stack[os_cppr->index];
}
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index a9c3e2e18c05..c046666038f8 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -87,4 +87,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 34f55524d456..86150fbb42c3 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -908,7 +908,7 @@ int fix_alignment(struct pt_regs *regs)
flush_fp_to_thread(current);
}
- if ((nb == 16)) {
+ if (nb == 16) {
if (flags & F) {
/* Special case for 16-byte FP loads and stores */
PPC_WARN_ALIGNMENT(fp_pair, regs);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9d7dede2847c..24d78e1871c9 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -489,7 +489,6 @@ int main(void)
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
- DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
@@ -499,6 +498,7 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+ DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
@@ -726,12 +726,5 @@ int main(void)
arch.timing_last_enter.tv32.tbl));
#endif
-#ifdef CONFIG_PPC_POWERNV
- DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3));
- DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0));
- DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1));
- DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt));
-#endif
-
return 0;
}
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index c78e6dac4d7d..cfa0f81a5bb0 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -12,7 +12,6 @@
#undef DEBUG
#include <linux/crash_dump.h>
-#include <linux/bootmem.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <asm/code-patching.h>
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index d55c76c571f3..f4217819cc31 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
- __get_cpu_var(irq_stat).doorbell_irqs++;
+ __this_cpu_inc(irq_stat.doorbell_irqs);
smp_ipi_demux();
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 2248a1999c64..e1b6d8e17289 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -143,6 +143,8 @@ static int __init eeh_setup(char *str)
{
if (!strcmp(str, "off"))
eeh_add_flag(EEH_FORCE_DISABLED);
+ else if (!strcmp(str, "early_log"))
+ eeh_add_flag(EEH_EARLY_DUMP_LOG);
return 1;
}
@@ -758,30 +760,41 @@ static void eeh_reset_pe_once(struct eeh_pe *pe)
int eeh_reset_pe(struct eeh_pe *pe)
{
int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
- int i, rc;
+ int i, state, ret;
+
+ /* Mark as reset and block config space */
+ eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
/* Take three shots at resetting the bus */
- for (i=0; i<3; i++) {
+ for (i = 0; i < 3; i++) {
eeh_reset_pe_once(pe);
/*
* EEH_PE_ISOLATED is expected to be removed after
* BAR restore.
*/
- rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
- if ((rc & flags) == flags)
- return 0;
+ state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
+ if ((state & flags) == flags) {
+ ret = 0;
+ goto out;
+ }
- if (rc < 0) {
- pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
+ if (state < 0) {
+ pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
__func__, pe->phb->global_number, pe->addr);
- return -1;
+ ret = -ENOTRECOVERABLE;
+ goto out;
}
- pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n",
- i+1, pe->phb->global_number, pe->addr, rc);
+
+ /* We might run out of credits */
+ ret = -EIO;
+ pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n",
+ __func__, state, pe->phb->global_number, pe->addr, (i + 1));
}
- return -1;
+out:
+ eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
+ return ret;
}
/**
@@ -920,11 +933,8 @@ int eeh_init(void)
pr_warn("%s: Platform EEH operation not found\n",
__func__);
return -EEXIST;
- } else if ((ret = eeh_ops->init())) {
- pr_warn("%s: Failed to call platform init function (%d)\n",
- __func__, ret);
+ } else if ((ret = eeh_ops->init()))
return ret;
- }
/* Initialize EEH event */
ret = eeh_event_init();
@@ -1209,6 +1219,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state)
static struct pci_device_id eeh_reset_ids[] = {
{ PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
{ PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
+ { PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */
{ 0 }
};
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6535936bdf27..b17e793ba67e 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -528,13 +528,11 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
eeh_pe_dev_traverse(pe, eeh_report_error, &result);
/* Issue reset */
- eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
ret = eeh_reset_pe(pe);
if (ret) {
- eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED);
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
return ret;
}
- eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
/* Unfreeze the PE */
ret = eeh_clear_pe_frozen_state(pe, true);
@@ -601,19 +599,15 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
* config accesses. So we prefer to block them. However, controlled
* PCI config accesses initiated from EEH itself are allowed.
*/
- eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
rc = eeh_reset_pe(pe);
- if (rc) {
- eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
+ if (rc)
return rc;
- }
pci_lock_rescan_remove();
/* Restore PE */
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
- eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 22b45a4955cd..10a093579191 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1424,12 +1424,18 @@ _GLOBAL(ftrace_graph_caller)
lwz r4, 44(r1)
subi r4, r4, MCOUNT_INSN_SIZE
- /* get the parent address */
- addi r3, r1, 52
+ /* Grab the LR out of the caller stack frame */
+ lwz r3,52(r1)
bl prepare_ftrace_return
nop
+ /*
+ * prepare_ftrace_return gives us the address we divert to.
+ * Change the LR in the callers stack frame to this.
+ */
+ stw r3,52(r1)
+
MCOUNT_RESTORE_FRAME
/* old link register ends up in ctr reg */
bctr
@@ -1457,4 +1463,4 @@ _GLOBAL(return_to_handler)
blr
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#endif /* CONFIG_MCOUNT */
+#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0905c8da90f1..194e46dcf08d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1227,13 +1227,20 @@ _GLOBAL(ftrace_graph_caller)
ld r4, 128(r1)
subi r4, r4, MCOUNT_INSN_SIZE
- /* get the parent address */
+ /* Grab the LR out of the caller stack frame */
ld r11, 112(r1)
- addi r3, r11, 16
+ ld r3, 16(r11)
bl prepare_ftrace_return
nop
+ /*
+ * prepare_ftrace_return gives us the address we divert to.
+ * Change the LR in the callers stack frame to this.
+ */
+ ld r11, 112(r1)
+ std r3, 16(r11)
+
ld r0, 128(r1)
mtlr r0
addi r1, r1, 112
@@ -1241,28 +1248,6 @@ _GLOBAL(ftrace_graph_caller)
_GLOBAL(return_to_handler)
/* need to save return values */
- std r4, -24(r1)
- std r3, -16(r1)
- std r31, -8(r1)
- mr r31, r1
- stdu r1, -112(r1)
-
- bl ftrace_return_to_handler
- nop
-
- /* return value has real return address */
- mtlr r3
-
- ld r1, 0(r1)
- ld r4, -24(r1)
- ld r3, -16(r1)
- ld r31, -8(r1)
-
- /* Jump back to real return address */
- blr
-
-_GLOBAL(mod_return_to_handler)
- /* need to save return values */
std r4, -32(r1)
std r3, -24(r1)
/* save TOC */
@@ -1272,7 +1257,7 @@ _GLOBAL(mod_return_to_handler)
stdu r1, -112(r1)
/*
- * We are in a module using the module's TOC.
+ * We might be called from a module.
* Switch to our TOC to run inside the core kernel.
*/
ld r2, PACATOC(r13)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 72e783ea0681..db08382e19f1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -131,6 +131,8 @@ BEGIN_FTR_SECTION
1:
#endif
+ /* Return SRR1 from power7_nap() */
+ mfspr r3,SPRN_SRR1
beq cr1,2f
b power7_wakeup_noloss
2: b power7_wakeup_loss
@@ -292,15 +294,26 @@ decrementer_pSeries:
. = 0xc00
.globl system_call_pSeries
system_call_pSeries:
- HMT_MEDIUM
+ /*
+ * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
+ * that support it) before changing to HMT_MEDIUM. That allows the KVM
+ * code to save that value into the guest state (it is the guest's PPR
+ * value). Otherwise just change to HMT_MEDIUM as userspace has
+ * already saved the PPR.
+ */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
SET_SCRATCH0(r13)
GET_PACA(r13)
std r9,PACA_EXGEN+EX_R9(r13)
+ OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
+ HMT_MEDIUM;
std r10,PACA_EXGEN+EX_R10(r13)
+ OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
mfcr r9
KVMTEST(0xc00)
GET_SCRATCH0(r13)
+#else
+ HMT_MEDIUM;
#endif
SYSCALL_PSERIES_1
SYSCALL_PSERIES_2_RFID
@@ -1301,23 +1314,6 @@ hmi_exception_after_realmode:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b hmi_exception_hv
-#ifdef CONFIG_PPC_POWERNV
-_GLOBAL(opal_mc_secondary_handler)
- HMT_MEDIUM_PPR_DISCARD
- SET_SCRATCH0(r13)
- GET_PACA(r13)
- clrldi r3,r3,2
- tovirt(r3,r3)
- std r3,PACA_OPAL_MC_EVT(r13)
- ld r13,OPAL_MC_SRR0(r3)
- mtspr SPRN_SRR0,r13
- ld r13,OPAL_MC_SRR1(r3)
- mtspr SPRN_SRR1,r13
- ld r3,OPAL_MC_GPR3(r3)
- GET_SCRATCH0(r13)
- b machine_check_pSeries
-#endif /* CONFIG_PPC_POWERNV */
-
#define MACHINE_CHECK_HANDLER_WINDUP \
/* Clear MSR_RI before setting SRR0 and SRR1. */\
@@ -1571,9 +1567,11 @@ do_hash_page:
* r3 contains the faulting address
* r4 contains the required access permissions
* r5 contains the trap number
+ * r6 contains dsisr
*
* at return r3 = 0 for success, 1 for page fault, negative for error
*/
+ ld r6,_DSISR(r1)
bl hash_page /* build HPTE if possible */
cmpdi r3,0 /* see if hash_page succeeded */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index e66af6d265e8..44d4d8eb3c85 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -510,79 +510,36 @@ int ftrace_disable_ftrace_graph_caller(void)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
-#ifdef CONFIG_PPC64
-extern void mod_return_to_handler(void);
-#endif
-
/*
* Hook the return address and push it in the stack of return addrs
- * in current thread info.
+ * in current thread info. Return the address we want to divert to.
*/
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
{
- unsigned long old;
- int faulted;
struct ftrace_graph_ent trace;
- unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long return_hooker;
if (unlikely(ftrace_graph_is_dead()))
- return;
+ goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
- return;
-
-#ifdef CONFIG_PPC64
- /* non core kernel code needs to save and restore the TOC */
- if (REGION_ID(self_addr) != KERNEL_REGION_ID)
- return_hooker = (unsigned long)&mod_return_to_handler;
-#endif
-
- return_hooker = ppc_function_entry((void *)return_hooker);
+ goto out;
- /*
- * Protect against fault, even if it shouldn't
- * happen. This tool is too much intrusive to
- * ignore such a protection.
- */
- asm volatile(
- "1: " PPC_LL "%[old], 0(%[parent])\n"
- "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
- " li %[faulted], 0\n"
- "3:\n"
-
- ".section .fixup, \"ax\"\n"
- "4: li %[faulted], 1\n"
- " b 3b\n"
- ".previous\n"
-
- ".section __ex_table,\"a\"\n"
- PPC_LONG_ALIGN "\n"
- PPC_LONG "1b,4b\n"
- PPC_LONG "2b,4b\n"
- ".previous"
-
- : [old] "=&r" (old), [faulted] "=r" (faulted)
- : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
- : "memory"
- );
-
- if (unlikely(faulted)) {
- ftrace_graph_stop();
- WARN_ON(1);
- return;
- }
+ return_hooker = ppc_function_entry(return_to_handler);
- trace.func = self_addr;
+ trace.func = ip;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- *parent = old;
- return;
- }
+ if (!ftrace_graph_entry(&trace))
+ goto out;
+
+ if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
+ goto out;
- if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
- *parent = old;
+ parent = return_hooker;
+out:
+ return parent;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index fafff8dbd5d9..d99aac0d69f1 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -33,13 +33,31 @@
/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
-#define DO_8xx_CPU6(val, reg) \
- li reg, val; \
- stw reg, 12(r0); \
- lwz reg, 12(r0);
+#define SPRN_MI_TWC_ADDR 0x2b80
+#define SPRN_MI_RPN_ADDR 0x2d80
+#define SPRN_MD_TWC_ADDR 0x3b80
+#define SPRN_MD_RPN_ADDR 0x3d80
+
+#define MTSPR_CPU6(spr, reg, treg) \
+ li treg, spr##_ADDR; \
+ stw treg, 12(r0); \
+ lwz treg, 12(r0); \
+ mtspr spr, reg
#else
-#define DO_8xx_CPU6(val, reg)
+#define MTSPR_CPU6(spr, reg, treg) \
+ mtspr spr, reg
#endif
+
+/*
+ * Value for the bits that have fixed value in RPN entries.
+ * Also used for tagging DAR for DTLBerror.
+ */
+#ifdef CONFIG_PPC_16K_PAGES
+#define RPN_PATTERN (0x00f0 | MD_SPS16K)
+#else
+#define RPN_PATTERN 0x00f0
+#endif
+
__HEAD
_ENTRY(_stext);
_ENTRY(_start);
@@ -65,13 +83,6 @@ _ENTRY(_start);
* 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
* the "internal" processor registers before MMU_init is called.
*
- * The TLB code currently contains a major hack. Since I use the condition
- * code register, I have to save and restore it. I am out of registers, so
- * I just store it in memory location 0 (the TLB handlers are not reentrant).
- * To avoid making any decisions, I need to use the "segment" valid bit
- * in the first level table, but that would require many changes to the
- * Linux page directory/table functions that I don't want to do right now.
- *
* -- Dan
*/
.globl __start
@@ -211,7 +222,7 @@ MachineCheck:
EXCEPTION_PROLOG
mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
- li r5,0x00f0
+ li r5,RPN_PATTERN
mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
@@ -219,30 +230,16 @@ MachineCheck:
EXC_XFER_STD(0x200, machine_check_exception)
/* Data access exception.
- * This is "never generated" by the MPC8xx. We jump to it for other
- * translation errors.
+ * This is "never generated" by the MPC8xx.
*/
. = 0x300
DataAccess:
- EXCEPTION_PROLOG
- mfspr r10,SPRN_DSISR
- stw r10,_DSISR(r11)
- mr r5,r10
- mfspr r4,SPRN_DAR
- li r10,0x00f0
- mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
- EXC_XFER_LITE(0x300, handle_page_fault)
/* Instruction access exception.
- * This is "never generated" by the MPC8xx. We jump to it for other
- * translation errors.
+ * This is "never generated" by the MPC8xx.
*/
. = 0x400
InstructionAccess:
- EXCEPTION_PROLOG
- mr r4,r12
- mr r5,r9
- EXC_XFER_LITE(0x400, handle_page_fault)
/* External interrupt */
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
@@ -253,7 +250,7 @@ Alignment:
EXCEPTION_PROLOG
mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
- li r5,0x00f0
+ li r5,RPN_PATTERN
mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
@@ -292,8 +289,8 @@ SystemCall:
. = 0x1100
/*
* For the MPC8xx, this is a software tablewalk to load the instruction
- * TLB. It is modelled after the example in the Motorola manual. The task
- * switch loads the M_TWB register with the pointer to the first level table.
+ * TLB. The task switch loads the M_TW register with the pointer to the first
+ * level table.
* If we discover there is no second level table (value is zero) or if there
* is an invalid pte, we load that into the TLB, which causes another fault
* into the TLB Error interrupt where we can handle such problems.
@@ -302,20 +299,17 @@ SystemCall:
*/
InstructionTLBMiss:
#ifdef CONFIG_8xx_CPU6
- stw r3, 8(r0)
+ mtspr SPRN_DAR, r3
#endif
EXCEPTION_PROLOG_0
mtspr SPRN_SPRG_SCRATCH2, r10
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
#ifdef CONFIG_8xx_CPU15
- addi r11, r10, 0x1000
+ addi r11, r10, PAGE_SIZE
tlbie r11
- addi r11, r10, -0x1000
+ addi r11, r10, -PAGE_SIZE
tlbie r11
#endif
- DO_8xx_CPU6(0x3780, r3)
- mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
- mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -323,32 +317,37 @@ InstructionTLBMiss:
#ifdef CONFIG_MODULES
/* Only modules will cause ITLB Misses as we always
* pin the first 8MB of kernel memory */
- andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
+ andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
+#endif
+ mfspr r11, SPRN_M_TW /* Get level 1 table base address */
+#ifdef CONFIG_MODULES
beq 3f
- lis r11, swapper_pg_dir@h
- ori r11, r11, swapper_pg_dir@l
- rlwimi r10, r11, 0, 2, 19
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
+ ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
3:
#endif
- lwz r11, 0(r10) /* Get the level 1 entry */
+ /* Extract level 1 index */
+ rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
+ lwzx r11, r10, r11 /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */
/* We have a pte table, so load the MI_TWC with the attributes
* for this "segment."
*/
- ori r11,r11,1 /* Set valid bit */
- DO_8xx_CPU6(0x2b80, r3)
- mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
- DO_8xx_CPU6(0x3b80, r3)
- mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
- mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
- lwz r10, 0(r11) /* Get the pte */
+ MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */
+ mfspr r11, SPRN_SRR0 /* Get effective address of fault */
+ /* Extract level 2 index */
+ rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+ lwzx r10, r10, r11 /* Get the pte */
#ifdef CONFIG_SWAP
andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
+ li r11, RPN_PATTERN
bne- cr0, 2f
+#else
+ li r11, RPN_PATTERN
#endif
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 21 and 28 must be clear.
@@ -356,62 +355,63 @@ InstructionTLBMiss:
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
- li r11, 0x00f0
rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */
- DO_8xx_CPU6(0x2d80, r3)
- mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
+ MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
#ifdef CONFIG_8xx_CPU6
- lwz r3, 8(r0)
+ mfspr r3, SPRN_DAR
+ mtspr SPRN_DAR, r11 /* Tag DAR */
#endif
mfspr r10, SPRN_SPRG_SCRATCH2
EXCEPTION_EPILOG_0
rfi
2:
- mfspr r11, SPRN_SRR1
+ mfspr r10, SPRN_SRR1
/* clear all error bits as TLB Miss
* sets a few unconditionally
*/
- rlwinm r11, r11, 0, 0xffff
- mtspr SPRN_SRR1, r11
+ rlwinm r10, r10, 0, 0xffff
+ mtspr SPRN_SRR1, r10
/* Restore registers */
#ifdef CONFIG_8xx_CPU6
- lwz r3, 8(r0)
+ mfspr r3, SPRN_DAR
+ mtspr SPRN_DAR, r11 /* Tag DAR */
#endif
mfspr r10, SPRN_SPRG_SCRATCH2
- EXCEPTION_EPILOG_0
- b InstructionAccess
+ b InstructionTLBError1
. = 0x1200
DataStoreTLBMiss:
#ifdef CONFIG_8xx_CPU6
- stw r3, 8(r0)
+ mtspr SPRN_DAR, r3
#endif
EXCEPTION_PROLOG_0
mtspr SPRN_SPRG_SCRATCH2, r10
- mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
+ mfspr r10, SPRN_MD_EPN
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
- andi. r11, r10, 0x0800
+ andis. r11, r10, 0x8000
+ mfspr r11, SPRN_M_TW /* Get level 1 table base address */
beq 3f
- lis r11, swapper_pg_dir@h
- ori r11, r11, swapper_pg_dir@l
- rlwimi r10, r11, 0, 2, 19
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
+ ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
3:
- lwz r11, 0(r10) /* Get the level 1 entry */
+ /* Extract level 1 index */
+ rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
+ lwzx r11, r10, r11 /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */
/* We have a pte table, so load fetch the pte from the table.
*/
- ori r11, r11, 1 /* Set valid bit in physical L2 page */
- DO_8xx_CPU6(0x3b80, r3)
- mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
- mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
+ mfspr r10, SPRN_MD_EPN /* Get address of fault */
+ /* Extract level 2 index */
+ rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+ rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
lwz r10, 0(r10) /* Get the pte */
/* Insert the Guarded flag into the TWC from the Linux PTE.
@@ -425,8 +425,7 @@ DataStoreTLBMiss:
* It is bit 25 in the Linux PTE and bit 30 in the TWC
*/
rlwimi r11, r10, 32-5, 30, 30
- DO_8xx_CPU6(0x3b80, r3)
- mtspr SPRN_MD_TWC, r11
+ MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
* We also need to know if the insn is a load/store, so:
@@ -442,14 +441,8 @@ DataStoreTLBMiss:
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
#endif
- /* Honour kernel RO, User NA */
- /* 0x200 == Extended encoding, bit 22 */
- rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */
- /* r11 = (r10 & _PAGE_RW) >> 1 */
- rlwinm r11, r10, 32-1, 0x200
- or r10, r11, r10
- /* invert RW and 0x200 bits */
- xori r10, r10, _PAGE_RW | 0x200
+ /* invert RW */
+ xori r10, r10, _PAGE_RW
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 22 and 28 must be clear.
@@ -457,14 +450,13 @@ DataStoreTLBMiss:
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
-2: li r11, 0x00f0
+2: li r11, RPN_PATTERN
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
- DO_8xx_CPU6(0x3d80, r3)
- mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
#ifdef CONFIG_8xx_CPU6
- lwz r3, 8(r0)
+ mfspr r3, SPRN_DAR
#endif
mtspr SPRN_DAR, r11 /* Tag DAR */
mfspr r10, SPRN_SPRG_SCRATCH2
@@ -477,7 +469,17 @@ DataStoreTLBMiss:
*/
. = 0x1300
InstructionTLBError:
- b InstructionAccess
+ EXCEPTION_PROLOG_0
+InstructionTLBError1:
+ EXCEPTION_PROLOG_1
+ EXCEPTION_PROLOG_2
+ mr r4,r12
+ mr r5,r9
+ andis. r10,r5,0x4000
+ beq+ 1f
+ tlbie r4
+ /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
+1: EXC_XFER_LITE(0x400, handle_page_fault)
/* This is the data TLB error on the MPC8xx. This could be due to
* many reasons, including a dirty update to a pte. We bail out to
@@ -488,11 +490,21 @@ DataTLBError:
EXCEPTION_PROLOG_0
mfspr r11, SPRN_DAR
- cmpwi cr0, r11, 0x00f0
+ cmpwi cr0, r11, RPN_PATTERN
beq- FixupDAR /* must be a buggy dcbX, icbi insn. */
DARFixed:/* Return from dcbx instruction bug workaround */
- EXCEPTION_EPILOG_0
- b DataAccess
+ EXCEPTION_PROLOG_1
+ EXCEPTION_PROLOG_2
+ mfspr r5,SPRN_DSISR
+ stw r5,_DSISR(r11)
+ mfspr r4,SPRN_DAR
+ andis. r10,r5,0x4000
+ beq+ 1f
+ tlbie r4
+1: li r10,RPN_PATTERN
+ mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
+ /* 0x300 is DataAccess exception, needed by bad_page_fault() */
+ EXC_XFER_LITE(0x300, handle_page_fault)
EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
@@ -521,29 +533,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */
#define NO_SELF_MODIFYING_CODE
FixupDAR:/* Entry point for dcbx workaround. */
#ifdef CONFIG_8xx_CPU6
- stw r3, 8(r0)
+ mtspr SPRN_DAR, r3
#endif
mtspr SPRN_SPRG_SCRATCH2, r10
/* fetch instruction from memory. */
mfspr r10, SPRN_SRR0
andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
- DO_8xx_CPU6(0x3780, r3)
- mtspr SPRN_MD_EPN, r10
- mfspr r11, SPRN_M_TWB /* Get level 1 table entry address */
+ mfspr r11, SPRN_M_TW /* Get level 1 table base address */
beq- 3f /* Branch if user space */
lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
- rlwimi r11, r10, 32-20, 0xffc /* r11 = r11&~0xffc|(r10>>20)&0xffc */
-3: lwz r11, 0(r11) /* Get the level 1 entry */
- DO_8xx_CPU6(0x3b80, r3)
- mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
- mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
- lwz r11, 0(r11) /* Get the pte */
+ /* Extract level 1 index */
+3: rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
+ lwzx r11, r10, r11 /* Get the level 1 entry */
+ rlwinm r10, r11,0,0,19 /* Extract page descriptor page address */
+ mfspr r11, SPRN_SRR0 /* Get effective address of fault */
+ /* Extract level 2 index */
+ rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+ lwzx r11, r10, r11 /* Get the pte */
#ifdef CONFIG_8xx_CPU6
- lwz r3, 8(r0) /* restore r3 from memory */
+ mfspr r3, SPRN_DAR
#endif
/* concat physical page address(r11) and page offset(r10) */
- rlwimi r11, r10, 0, 20, 31
+ mfspr r10, SPRN_SRR0
+ rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
lwz r11,0(r11)
/* Check if it really is a dcbx instruction. */
/* dcbt and dcbtst does not generate DTLB Misses/Errors,
@@ -698,11 +711,11 @@ start_here:
#ifdef CONFIG_8xx_CPU6
lis r4, cpu6_errata_word@h
ori r4, r4, cpu6_errata_word@l
- li r3, 0x3980
+ li r3, 0x3f80
stw r3, 12(r4)
lwz r3, 12(r4)
#endif
- mtspr SPRN_M_TWB, r6
+ mtspr SPRN_M_TW, r6
lis r4,2f@h
ori r4,r4,2f@l
tophys(r4,r4)
@@ -876,10 +889,10 @@ _GLOBAL(set_context)
lis r6, cpu6_errata_word@h
ori r6, r6, cpu6_errata_word@l
tophys (r4, r4)
- li r7, 0x3980
+ li r7, 0x3f80
stw r7, 12(r6)
lwz r7, 12(r6)
- mtspr SPRN_M_TWB, r4 /* Update MMU base address */
+ mtspr SPRN_M_TW, r4 /* Update MMU base address */
li r7, 0x3380
stw r7, 12(r6)
lwz r7, 12(r6)
@@ -887,7 +900,7 @@ _GLOBAL(set_context)
#else
mtspr SPRN_M_CASID,r3 /* Update context */
tophys (r4, r4)
- mtspr SPRN_M_TWB, r4 /* and pgd */
+ mtspr SPRN_M_TW, r4 /* and pgd */
#endif
SYNC
blr
@@ -919,12 +932,13 @@ set_dec_cpu6:
.globl sdata
sdata:
.globl empty_zero_page
+ .align PAGE_SHIFT
empty_zero_page:
- .space 4096
+ .space PAGE_SIZE
.globl swapper_pg_dir
swapper_pg_dir:
- .space 4096
+ .space PGD_TABLE_SIZE
/* Room for two PTE table poiners, usually the kernel and current user
* pointer to their respective root page table (pgdir).
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 1f7d84e2e8b2..05e804cdecaa 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type)
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
*slot = bp;
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
- struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
if (*slot != bp) {
WARN_ONCE(1, "Can't find the breakpoint");
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
*/
rcu_read_lock();
- bp = __get_cpu_var(bp_per_reg);
+ bp = __this_cpu_read(bp_per_reg);
if (!bp)
goto out;
info = counter_arch_bp(bp);
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index c0754bbf8118..18c0687e5ab3 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -212,6 +212,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
mtspr SPRN_SRR0,r5
rfid
+/*
+ * R3 here contains the value that will be returned to the caller
+ * of power7_nap.
+ */
_GLOBAL(power7_wakeup_loss)
ld r1,PACAR1(r13)
BEGIN_FTR_SECTION
@@ -219,15 +223,19 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
REST_NVGPRS(r1)
REST_GPR(2, r1)
- ld r3,_CCR(r1)
+ ld r6,_CCR(r1)
ld r4,_MSR(r1)
ld r5,_NIP(r1)
addi r1,r1,INT_FRAME_SIZE
- mtcr r3
+ mtcr r6
mtspr SPRN_SRR1,r4
mtspr SPRN_SRR0,r5
rfid
+/*
+ * R3 here contains the value that will be returned to the caller
+ * of power7_nap.
+ */
_GLOBAL(power7_wakeup_noloss)
lbz r0,PACA_NAPSTATELOST(r13)
cmpwi r0,0
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a83cf5ef6488..5d3968c4d799 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
* We don't need to disable preemption here because any CPU can
* safely use any IOMMU pool.
*/
- pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
+ pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
if (largealloc)
pool = &(tbl->large_pool);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c14383575fe8..45096033d37b 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -50,7 +50,6 @@
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/mutex.h>
-#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <linux/of.h>
@@ -114,7 +113,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
static inline notrace int decrementer_check_overflow(void)
{
u64 now = get_tb_or_rtc();
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
return now >= *next_tb;
}
@@ -499,7 +498,7 @@ void __do_irq(struct pt_regs *regs)
/* And finally process it */
if (unlikely(irq == NO_IRQ))
- __get_cpu_var(irq_stat).spurious_irqs++;
+ __this_cpu_inc(irq_stat.spurious_irqs);
else
generic_handle_irq(irq);
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 8504657379f1..e77c3ccf8dcf 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
struct thread_info *backup_current_thread_info =
- &__get_cpu_var(kgdb_thread_info);
+ this_cpu_ptr(&kgdb_thread_info);
if (user_mode(regs))
return 0;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2f72af82513c..7c053f281406 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
}
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_msr = regs->msr;
}
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
ret = 1;
goto no_kprobe;
}
- p = __get_cpu_var(current_kprobe);
+ p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index a7fd4cb78b78..15c99b649b04 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
uint64_t nip, uint64_t addr)
{
uint64_t srr1;
- int index = __get_cpu_var(mce_nest_count)++;
- struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);
+ int index = __this_cpu_inc_return(mce_nest_count);
+ struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
/*
* Return if we don't have enough space to log mce event.
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
*/
int get_mce_event(struct machine_check_event *mce, bool release)
{
- int index = __get_cpu_var(mce_nest_count) - 1;
+ int index = __this_cpu_read(mce_nest_count) - 1;
struct machine_check_event *mc_evt;
int ret = 0;
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
/* Check if we have MCE info to process. */
if (index < MAX_MC_EVT) {
- mc_evt = &__get_cpu_var(mce_event[index]);
+ mc_evt = this_cpu_ptr(&mce_event[index]);
/* Copy the event structure and release the original */
if (mce)
*mce = *mc_evt;
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
}
/* Decrement the count to free the slot. */
if (release)
- __get_cpu_var(mce_nest_count)--;
+ __this_cpu_dec(mce_nest_count);
return ret;
}
@@ -184,13 +184,13 @@ void machine_check_queue_event(void)
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
- index = __get_cpu_var(mce_queue_count)++;
+ index = __this_cpu_inc_return(mce_queue_count);
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
- __get_cpu_var(mce_queue_count)--;
+ __this_cpu_dec(mce_queue_count);
return;
}
- __get_cpu_var(mce_event_queue[index]) = evt;
+ memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
/* Queue irq work to process this event later. */
irq_work_queue(&mce_event_process_work);
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work)
* For now just print it to console.
* TODO: log this error event to FSP or nvram.
*/
- while (__get_cpu_var(mce_queue_count) > 0) {
- index = __get_cpu_var(mce_queue_count) - 1;
+ while (__this_cpu_read(mce_queue_count) > 0) {
+ index = __this_cpu_read(mce_queue_count) - 1;
machine_check_print_event_info(
- &__get_cpu_var(mce_event_queue[index]));
- __get_cpu_var(mce_queue_count)--;
+ this_cpu_ptr(&mce_event_queue[index]));
+ __this_cpu_dec(mce_queue_count);
}
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index aa9aff3d6ad3..b6f123ab90ed 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -79,7 +79,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
}
if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
/* reset error bits */
dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
}
@@ -110,7 +110,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
break;
case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
handled = 1;
}
break;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index f87bc1b4bdda..2f35a72642c6 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -110,7 +110,6 @@ static struct platform_driver of_pci_phb_driver = {
.probe = of_pci_phb_probe,
.driver = {
.name = "of-pci",
- .owner = THIS_MODULE,
.of_match_table = of_pci_phb_ids,
},
};
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e5dad9a9edc0..37d512d35943 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -20,7 +20,6 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/of_address.h>
@@ -1464,7 +1463,7 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
res = &hose->io_resource;
if (!res->flags) {
- printk(KERN_WARNING "PCI: I/O resource not set for host"
+ pr_info("PCI: I/O resource not set for host"
" bridge %s (domain %d)\n",
hose->dn->full_name, hose->global_number);
} else {
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 432459c817fa..1f7930037cb7 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -199,9 +199,7 @@ pci_create_OF_bus_map(void)
struct property* of_prop;
struct device_node *dn;
- of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
- if (!of_prop)
- return;
+ of_prop = memblock_virt_alloc(sizeof(struct property) + 256, 0);
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index b15194e2c5fc..60bb187cb46a 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -17,7 +17,6 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/list.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 923cd2daba89..b4cc7bef6b16 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -37,9 +37,9 @@
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
+#include <linux/uaccess.h>
#include <asm/pgtable.h>
-#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
@@ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk)
void __set_breakpoint(struct arch_hw_breakpoint *brk)
{
- __get_cpu_var(current_brk) = *brk;
+ memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
if (cpu_has_feature(CPU_FTR_DAWR))
set_dawr(brk);
@@ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
- if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
+ if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
__set_breakpoint(&new->thread.hw_brk);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
@@ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* Collect processor utilization data per process
*/
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
+ struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
long unsigned start_tb, current_tb;
start_tb = old_thread->start_tb;
cu->current_tb = current_tb = mfspr(SPRN_PURR);
@@ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC_BOOK3S_64
- batch = &__get_cpu_var(ppc64_tlb_batch);
+ batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->active) {
current_thread_info()->local_flags |= _TLF_LAZY_MMU;
if (batch->index)
@@ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#ifdef CONFIG_PPC_BOOK3S_64
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
- batch = &__get_cpu_var(ppc64_tlb_batch);
+ batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -921,12 +921,8 @@ static void show_instructions(struct pt_regs *regs)
pc = (unsigned long)phys_to_virt(pc);
#endif
- /* We use __get_user here *only* to avoid an OOPS on a
- * bad address because the pc *should* only be a
- * kernel address.
- */
if (!__kernel_text_address(pc) ||
- __get_user(instr, (unsigned int __user *)pc)) {
+ probe_kernel_address((unsigned int __user *)pc, instr)) {
printk(KERN_CONT "XXXXXXXX ");
} else {
if (regs->nip == pc)
@@ -1531,13 +1527,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
int curr_frame = current->curr_ret_stack;
extern void return_to_handler(void);
unsigned long rth = (unsigned long)return_to_handler;
- unsigned long mrth = -1;
-#ifdef CONFIG_PPC64
- extern void mod_return_to_handler(void);
- rth = *(unsigned long *)rth;
- mrth = (unsigned long)mod_return_to_handler;
- mrth = *(unsigned long *)mrth;
-#endif
#endif
sp = (unsigned long) stack;
@@ -1562,7 +1551,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
if (!firstframe || ip != lr) {
printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if ((ip == rth || ip == mrth) && curr_frame >= 0) {
+ if ((ip == rth) && curr_frame >= 0) {
printk(" (%pS)",
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
@@ -1665,12 +1654,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
return ret;
}
-unsigned long randomize_et_dyn(unsigned long base)
-{
- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
-
- if (ret < base)
- return base;
-
- return ret;
-}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 099f27e6d1b0..6a799b3cc6b4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -160,6 +160,12 @@ static struct ibm_pa_feature {
{CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
{0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+ /*
+ * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
+ * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
+ * which is 0 if the kernel doesn't support TM.
+ */
+ {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -696,10 +702,7 @@ void __init early_init_devtree(void *params)
reserve_crashkernel();
early_reserve_mem();
- /*
- * Ensure that total memory size is page-aligned, because otherwise
- * mark_bootmem() gets upset.
- */
+ /* Ensure that total memory size is page-aligned. */
limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
memblock_enforce_memory_limit(limit);
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 8777fb02349f..fb2fb3ea85e5 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -113,17 +113,6 @@
#define SENSOR_PREFIX "ibm,sensor-"
#define cel_to_fahr(x) ((x*9/5)+32)
-
-/* Globals */
-static struct rtas_sensors sensors;
-static struct device_node *rtas_node = NULL;
-static unsigned long power_on_time = 0; /* Save the time the user set */
-static char progress_led[MAX_LINELENGTH];
-
-static unsigned long rtas_tone_frequency = 1000;
-static unsigned long rtas_tone_volume = 0;
-
-/* ****************STRUCTS******************************************* */
struct individual_sensor {
unsigned int token;
unsigned int quant;
@@ -134,6 +123,15 @@ struct rtas_sensors {
unsigned int quant;
};
+/* Globals */
+static struct rtas_sensors sensors;
+static struct device_node *rtas_node = NULL;
+static unsigned long power_on_time = 0; /* Save the time the user set */
+static char progress_led[MAX_LINELENGTH];
+
+static unsigned long rtas_tone_frequency = 1000;
+static unsigned long rtas_tone_volume = 0;
+
/* ****************************************************************** */
/* Declarations */
static int ppc_rtas_sensors_show(struct seq_file *m, void *v);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8b4c857c1421..4af905e81ab0 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1091,8 +1091,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
}
/*
- * Call early during boot, before mem init or bootmem, to retrieve the RTAS
- * informations from the device-tree and allocate the RMO buffer for userland
+ * Call early during boot, before mem init, to retrieve the RTAS
+ * information from the device-tree and allocate the RMO buffer for userland
* accesses.
*/
void __init rtas_initialize(void)
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 7c55b86206b3..ce230da2c015 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -26,7 +26,6 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <asm/io.h>
#include <asm/pgtable.h>
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 1362cd62b3fa..44c8d03558ac 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -139,8 +139,8 @@ void machine_restart(char *cmd)
void machine_power_off(void)
{
machine_shutdown();
- if (ppc_md.power_off)
- ppc_md.power_off();
+ if (pm_power_off)
+ pm_power_off();
#ifdef CONFIG_SMP
smp_send_stop();
#endif
@@ -151,7 +151,7 @@ void machine_power_off(void)
/* Used by the G5 thermal driver */
EXPORT_SYMBOL_GPL(machine_power_off);
-void (*pm_power_off)(void) = machine_power_off;
+void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
void machine_halt(void)
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 07831ed0d9ef..bb02e9f6944e 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/tty.h>
-#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
@@ -53,11 +52,6 @@ unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
-#ifdef CONFIG_VGA_CONSOLE
-unsigned long vgacon_remap_base;
-EXPORT_SYMBOL(vgacon_remap_base);
-#endif
-
/*
* These are used in binfmt_elf.c to put aux entries on the stack
* for each elf executable being started.
@@ -311,9 +305,8 @@ void __init setup_arch(char **cmdline_p)
irqstack_early_init();
- /* set up the bootmem stuff with available memory */
- do_init_bootmem();
- if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
+ initmem_init();
+ if ( ppc_md.progress ) ppc_md.progress("setup_arch: initmem", 0x3eab);
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4f3cfe1b6a33..49f553bbb360 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -660,13 +660,11 @@ static void __init emergency_stack_init(void)
}
/*
- * Called into from start_kernel this initializes bootmem, which is used
+ * Called into from start_kernel this initializes memblock, which is used
* to manage page allocation until mem_init is called.
*/
void __init setup_arch(char **cmdline_p)
{
- ppc64_boot_msg(0x12, "Setup Arch");
-
*cmdline_p = boot_command_line;
/*
@@ -691,9 +689,7 @@ void __init setup_arch(char **cmdline_p)
exc_lvl_early_init();
emergency_stack_init();
- /* set up the bootmem stuff with available memory */
- do_init_bootmem();
- sparse_init();
+ initmem_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
@@ -711,33 +707,6 @@ void __init setup_arch(char **cmdline_p)
if ((unsigned long)_stext & 0xffff)
panic("Kernelbase not 64K-aligned (0x%lx)!\n",
(unsigned long)_stext);
-
- ppc64_boot_msg(0x15, "Setup Done");
-}
-
-
-/* ToDo: do something useful if ppc_md is not yet setup. */
-#define PPC64_LINUX_FUNCTION 0x0f000000
-#define PPC64_IPL_MESSAGE 0xc0000000
-#define PPC64_TERM_MESSAGE 0xb0000000
-
-static void ppc64_do_msg(unsigned int src, const char *msg)
-{
- if (ppc_md.progress) {
- char buf[128];
-
- sprintf(buf, "%08X\n", src);
- ppc_md.progress(buf, 0);
- snprintf(buf, 128, "%s", msg);
- ppc_md.progress(buf, 0);
- }
-}
-
-/* Print a boot progress message. */
-void ppc64_boot_msg(unsigned int src, const char *msg)
-{
- ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
- printk("[boot]%04x %s\n", src, msg);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 71e186d5f331..8b2d2dc8ef10 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
irqreturn_t smp_ipi_demux(void)
{
- struct cpu_messages *info = &__get_cpu_var(ipi_message);
+ struct cpu_messages *info = this_cpu_ptr(&ipi_message);
unsigned int all;
mb(); /* order any irq clear */
@@ -442,9 +442,9 @@ void generic_mach_cpu_die(void)
idle_task_exit();
cpu = smp_processor_id();
printk(KERN_DEBUG "CPU%d offline\n", cpu);
- __get_cpu_var(cpu_state) = CPU_DEAD;
+ __this_cpu_write(cpu_state, CPU_DEAD);
smp_wmb();
- while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
+ while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)
cpu_relax();
}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 67fd2fd2620a..fa1fd8a0c867 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void)
ppc_set_pmu_inuse(1);
/* Only need to enable them once */
- if (__get_cpu_var(pmcs_enabled))
+ if (__this_cpu_read(pmcs_enabled))
return;
- __get_cpu_var(pmcs_enabled) = 1;
+ __this_cpu_write(pmcs_enabled, 1);
if (ppc_md.enable_pmcs)
ppc_md.enable_pmcs();
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7505599c2593..fa7c4f12104f 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void)
DEFINE_PER_CPU(u8, irq_work_pending);
-#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
-#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
-#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
+#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
+#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
+#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
#endif /* 32 vs 64 bit */
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void)
static void __timer_interrupt(void)
{
struct pt_regs *regs = get_irq_regs();
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
- struct clock_event_device *evt = &__get_cpu_var(decrementers);
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+ struct clock_event_device *evt = this_cpu_ptr(&decrementers);
u64 now;
trace_timer_interrupt_entry(regs);
@@ -498,7 +498,7 @@ static void __timer_interrupt(void)
*next_tb = ~(u64)0;
if (evt->event_handler)
evt->event_handler(evt);
- __get_cpu_var(irq_stat).timer_irqs_event++;
+ __this_cpu_inc(irq_stat.timer_irqs_event);
} else {
now = *next_tb - now;
if (now <= DECREMENTER_MAX)
@@ -506,13 +506,13 @@ static void __timer_interrupt(void)
/* We may have raced with new irq work */
if (test_irq_work_pending())
set_dec(1);
- __get_cpu_var(irq_stat).timer_irqs_others++;
+ __this_cpu_inc(irq_stat.timer_irqs_others);
}
#ifdef CONFIG_PPC64
/* collect purr register values often, for accurate calculations */
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
+ struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
cu->current_tb = mfspr(SPRN_PURR);
}
#endif
@@ -527,7 +527,7 @@ static void __timer_interrupt(void)
void timer_interrupt(struct pt_regs * regs)
{
struct pt_regs *old_regs;
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions.
@@ -813,7 +813,7 @@ static void __init clocksource_init(void)
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
- __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
+ __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
set_dec(evt);
/* We may have raced with new irq work */
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
/* Interrupt handler for the timer broadcast IPI */
void tick_broadcast_ipi_handler(void)
{
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
*next_tb = get_tb_or_rtc();
__timer_interrupt();
@@ -989,6 +989,7 @@ void GregorianDay(struct rtc_time * tm)
tm->tm_wday = day % 7;
}
+EXPORT_SYMBOL_GPL(GregorianDay);
void to_tm(int tim, struct rtc_time * tm)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0dc43f9932cf..e6595b72269b 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs)
{
long handled = 0;
- __get_cpu_var(irq_stat).mce_exceptions++;
+ __this_cpu_inc(irq_stat.mce_exceptions);
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs)
long hmi_exception_realmode(struct pt_regs *regs)
{
- __get_cpu_var(irq_stat).hmi_exceptions++;
+ __this_cpu_inc(irq_stat.hmi_exceptions);
if (ppc_md.hmi_exception_early)
ppc_md.hmi_exception_early(regs);
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs)
enum ctx_state prev_state = exception_enter();
int recover = 0;
- __get_cpu_var(irq_stat).mce_exceptions++;
+ __this_cpu_inc(irq_stat.mce_exceptions);
/* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs)
void performance_monitor_exception(struct pt_regs *regs)
{
- __get_cpu_var(irq_stat).pmu_irqs++;
+ __this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs);
}
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 6e7c4923b5ea..411116c38da4 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -69,8 +69,12 @@ static void udbg_uart_putc(char c)
static int udbg_uart_getc_poll(void)
{
- if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR))
+ if (!udbg_uart_in)
+ return -1;
+
+ if (!(udbg_uart_in(UART_LSR) & LSR_DR))
return udbg_uart_in(UART_RBR);
+
return -1;
}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index f174351842cf..305eb0d9b768 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -20,7 +20,6 @@
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/pgtable.h>
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 602eb51d20bc..f5769f19ae25 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -172,6 +172,7 @@ config KVM_XICS
depends on KVM_BOOK3S_64 && !KVM_MPIC
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
+ default y
---help---
Include support for the XICS (eXternal Interrupt Controller
Specification) interrupt controller architecture used on
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index b32db4b95361..888bf466d8c6 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -64,14 +64,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
-void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
-{
-}
-
-void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
-{
-}
-
void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index cd0b0730e29e..a2eb6d354a57 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -78,11 +78,6 @@ static inline bool sr_kp(u32 sr_raw)
return (sr_raw & 0x20000000) ? true: false;
}
-static inline bool sr_nx(u32 sr_raw)
-{
- return (sr_raw & 0x10000000) ? true: false;
-}
-
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data,
bool iswrite);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d40770248b6a..534acb3c6c3d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,8 +37,7 @@
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
-/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
-#define MAX_LPID_970 63
+#include "trace_hv.h"
/* Power architecture requires HPT is at least 256kB */
#define PPC_MIN_HPT_ORDER 18
@@ -229,14 +228,9 @@ int kvmppc_mmu_hv_init(void)
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
- /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
- if (cpu_has_feature(CPU_FTR_ARCH_206)) {
- host_lpid = mfspr(SPRN_LPID); /* POWER7 */
- rsvd_lpid = LPID_RSVD;
- } else {
- host_lpid = 0; /* PPC970 */
- rsvd_lpid = MAX_LPID_970;
- }
+ /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
+ host_lpid = mfspr(SPRN_LPID);
+ rsvd_lpid = LPID_RSVD;
kvmppc_init_lpid(rsvd_lpid + 1);
@@ -259,130 +253,12 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, msr);
}
-/*
- * This is called to get a reference to a guest page if there isn't
- * one already in the memslot->arch.slot_phys[] array.
- */
-static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
- struct kvm_memory_slot *memslot,
- unsigned long psize)
-{
- unsigned long start;
- long np, err;
- struct page *page, *hpage, *pages[1];
- unsigned long s, pgsize;
- unsigned long *physp;
- unsigned int is_io, got, pgorder;
- struct vm_area_struct *vma;
- unsigned long pfn, i, npages;
-
- physp = memslot->arch.slot_phys;
- if (!physp)
- return -EINVAL;
- if (physp[gfn - memslot->base_gfn])
- return 0;
-
- is_io = 0;
- got = 0;
- page = NULL;
- pgsize = psize;
- err = -EINVAL;
- start = gfn_to_hva_memslot(memslot, gfn);
-
- /* Instantiate and get the page we want access to */
- np = get_user_pages_fast(start, 1, 1, pages);
- if (np != 1) {
- /* Look up the vma for the page */
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, start);
- if (!vma || vma->vm_start > start ||
- start + psize > vma->vm_end ||
- !(vma->vm_flags & VM_PFNMAP))
- goto up_err;
- is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
- pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
- /* check alignment of pfn vs. requested page size */
- if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
- goto up_err;
- up_read(&current->mm->mmap_sem);
-
- } else {
- page = pages[0];
- got = KVMPPC_GOT_PAGE;
-
- /* See if this is a large page */
- s = PAGE_SIZE;
- if (PageHuge(page)) {
- hpage = compound_head(page);
- s <<= compound_order(hpage);
- /* Get the whole large page if slot alignment is ok */
- if (s > psize && slot_is_aligned(memslot, s) &&
- !(memslot->userspace_addr & (s - 1))) {
- start &= ~(s - 1);
- pgsize = s;
- get_page(hpage);
- put_page(page);
- page = hpage;
- }
- }
- if (s < psize)
- goto out;
- pfn = page_to_pfn(page);
- }
-
- npages = pgsize >> PAGE_SHIFT;
- pgorder = __ilog2(npages);
- physp += (gfn - memslot->base_gfn) & ~(npages - 1);
- spin_lock(&kvm->arch.slot_phys_lock);
- for (i = 0; i < npages; ++i) {
- if (!physp[i]) {
- physp[i] = ((pfn + i) << PAGE_SHIFT) +
- got + is_io + pgorder;
- got = 0;
- }
- }
- spin_unlock(&kvm->arch.slot_phys_lock);
- err = 0;
-
- out:
- if (got)
- put_page(page);
- return err;
-
- up_err:
- up_read(&current->mm->mmap_sem);
- return err;
-}
-
long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel, unsigned long *pte_idx_ret)
{
- unsigned long psize, gpa, gfn;
- struct kvm_memory_slot *memslot;
long ret;
- if (kvm->arch.using_mmu_notifiers)
- goto do_insert;
-
- psize = hpte_page_size(pteh, ptel);
- if (!psize)
- return H_PARAMETER;
-
- pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
-
- /* Find the memslot (if any) for this address */
- gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
- gfn = gpa >> PAGE_SHIFT;
- memslot = gfn_to_memslot(kvm, gfn);
- if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
- if (!slot_is_aligned(memslot, psize))
- return H_PARAMETER;
- if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
- return H_PARAMETER;
- }
-
- do_insert:
/* Protect linux PTE lookup from page table destruction */
rcu_read_lock_sched(); /* this disables preemption too */
ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
@@ -397,19 +273,6 @@ long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
}
-/*
- * We come here on a H_ENTER call from the guest when we are not
- * using mmu notifiers and we don't have the requested page pinned
- * already.
- */
-long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh,
- unsigned long ptel)
-{
- return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
- pteh, ptel, &vcpu->arch.gpr[4]);
-}
-
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
gva_t eaddr)
{
@@ -494,7 +357,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
/* Storage key permission check for POWER7 */
- if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
+ if (data && virtmode) {
int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
if (amrfield & 1)
gpte->may_read = 0;
@@ -622,14 +485,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn);
+ trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
+
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
- if (!kvm->arch.using_mmu_notifiers)
- return -EFAULT; /* should never get here */
-
/*
* This should never happen, because of the slot_is_aligned()
* check in kvmppc_do_h_enter().
@@ -641,6 +503,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
+ ret = -EFAULT;
is_io = 0;
pfn = 0;
page = NULL;
@@ -664,7 +527,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
up_read(&current->mm->mmap_sem);
if (!pfn)
- return -EFAULT;
+ goto out_put;
} else {
page = pages[0];
pfn = page_to_pfn(page);
@@ -694,14 +557,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
}
- ret = -EFAULT;
if (psize > pte_size)
goto out_put;
/* Check WIMG vs. the actual page we're accessing */
if (!hpte_cache_flags_ok(r, is_io)) {
if (is_io)
- return -EFAULT;
+ goto out_put;
+
/*
* Allow guest to map emulated device memory as
* uncacheable, but actually make it cacheable.
@@ -765,6 +628,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
SetPageDirty(page);
out_put:
+ trace_kvm_page_fault_exit(vcpu, hpte, ret);
+
if (page) {
/*
* We drop pages[0] here, not page because page might
@@ -895,8 +760,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
hpte_rpn(ptel, psize) == gfn) {
- if (kvm->arch.using_mmu_notifiers)
- hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
+ hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
kvmppc_invalidate_hpte(kvm, hptep, i);
/* Harvest R and C */
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
@@ -914,15 +778,13 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
{
- if (kvm->arch.using_mmu_notifiers)
- kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+ kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
return 0;
}
int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
- if (kvm->arch.using_mmu_notifiers)
- kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
+ kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
return 0;
}
@@ -1004,8 +866,6 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
- if (!kvm->arch.using_mmu_notifiers)
- return 0;
return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp);
}
@@ -1042,15 +902,11 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
{
- if (!kvm->arch.using_mmu_notifiers)
- return 0;
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
}
void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
{
- if (!kvm->arch.using_mmu_notifiers)
- return;
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
}
@@ -1117,8 +973,11 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
}
/* Now check and modify the HPTE */
- if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID)))
+ if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
+ /* unlock and continue */
+ hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
continue;
+ }
/* need to make it temporarily absent so C is stable */
hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
@@ -1206,35 +1065,17 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
struct page *page, *pages[1];
int npages;
unsigned long hva, offset;
- unsigned long pa;
- unsigned long *physp;
int srcu_idx;
srcu_idx = srcu_read_lock(&kvm->srcu);
memslot = gfn_to_memslot(kvm, gfn);
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
goto err;
- if (!kvm->arch.using_mmu_notifiers) {
- physp = memslot->arch.slot_phys;
- if (!physp)
- goto err;
- physp += gfn - memslot->base_gfn;
- pa = *physp;
- if (!pa) {
- if (kvmppc_get_guest_page(kvm, gfn, memslot,
- PAGE_SIZE) < 0)
- goto err;
- pa = *physp;
- }
- page = pfn_to_page(pa >> PAGE_SHIFT);
- get_page(page);
- } else {
- hva = gfn_to_hva_memslot(memslot, gfn);
- npages = get_user_pages_fast(hva, 1, 1, pages);
- if (npages < 1)
- goto err;
- page = pages[0];
- }
+ hva = gfn_to_hva_memslot(memslot, gfn);
+ npages = get_user_pages_fast(hva, 1, 1, pages);
+ if (npages < 1)
+ goto err;
+ page = pages[0];
srcu_read_unlock(&kvm->srcu, srcu_idx);
offset = gpa & (PAGE_SIZE - 1);
@@ -1258,7 +1099,7 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
put_page(page);
- if (!dirty || !kvm->arch.using_mmu_notifiers)
+ if (!dirty)
return;
/* We need to mark this page dirty in the rmap chain */
@@ -1539,9 +1380,15 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
lbuf = (unsigned long __user *)buf;
for (j = 0; j < hdr.n_valid; ++j) {
+ __be64 hpte_v;
+ __be64 hpte_r;
+
err = -EFAULT;
- if (__get_user(v, lbuf) || __get_user(r, lbuf + 1))
+ if (__get_user(hpte_v, lbuf) ||
+ __get_user(hpte_r, lbuf + 1))
goto out;
+ v = be64_to_cpu(hpte_v);
+ r = be64_to_cpu(hpte_r);
err = -EINVAL;
if (!(v & HPTE_V_VALID))
goto out;
@@ -1652,10 +1499,7 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- vcpu->arch.slb_nr = 32; /* POWER7 */
- else
- vcpu->arch.slb_nr = 64;
+ vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */
mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e63587d30b70..de4018a1bc4b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -58,6 +58,9 @@
#include "book3s.h"
+#define CREATE_TRACE_POINTS
+#include "trace_hv.h"
+
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */
@@ -135,11 +138,10 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
* stolen.
*
* Updates to busy_stolen are protected by arch.tbacct_lock;
- * updates to vc->stolen_tb are protected by the arch.tbacct_lock
- * of the vcpu that has taken responsibility for running the vcore
- * (i.e. vc->runner). The stolen times are measured in units of
- * timebase ticks. (Note that the != TB_NIL checks below are
- * purely defensive; they should never fail.)
+ * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
+ * lock. The stolen times are measured in units of timebase ticks.
+ * (Note that the != TB_NIL checks below are purely defensive;
+ * they should never fail.)
*/
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
@@ -147,12 +149,21 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
- spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
- vc->preempt_tb != TB_NIL) {
- vc->stolen_tb += mftb() - vc->preempt_tb;
- vc->preempt_tb = TB_NIL;
+ /*
+ * We can test vc->runner without taking the vcore lock,
+ * because only this task ever sets vc->runner to this
+ * vcpu, and once it is set to this vcpu, only this task
+ * ever sets it to NULL.
+ */
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
+ if (vc->preempt_tb != TB_NIL) {
+ vc->stolen_tb += mftb() - vc->preempt_tb;
+ vc->preempt_tb = TB_NIL;
+ }
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu->arch.busy_preempt != TB_NIL) {
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
@@ -166,9 +177,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
- spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
vc->preempt_tb = mftb();
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
+ }
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = mftb();
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
@@ -191,9 +205,6 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
struct kvmppc_vcore *vc = vcpu->arch.vcore;
if (arch_compat) {
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- return -EINVAL; /* 970 has no compat mode support */
-
switch (arch_compat) {
case PVR_ARCH_205:
/*
@@ -505,25 +516,14 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
{
u64 p;
+ unsigned long flags;
- /*
- * If we are the task running the vcore, then since we hold
- * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
- * can't be updated, so we don't need the tbacct_lock.
- * If the vcore is inactive, it can't become active (since we
- * hold the vcore lock), so the vcpu load/put functions won't
- * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
- */
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
+ p = vc->stolen_tb;
if (vc->vcore_state != VCORE_INACTIVE &&
- vc->runner->arch.run_task != current) {
- spin_lock_irq(&vc->runner->arch.tbacct_lock);
- p = vc->stolen_tb;
- if (vc->preempt_tb != TB_NIL)
- p += now - vc->preempt_tb;
- spin_unlock_irq(&vc->runner->arch.tbacct_lock);
- } else {
- p = vc->stolen_tb;
- }
+ vc->preempt_tb != TB_NIL)
+ p += now - vc->preempt_tb;
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
return p;
}
@@ -607,10 +607,45 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
}
}
+static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
+{
+ struct kvmppc_vcore *vcore = target->arch.vcore;
+
+ /*
+ * We expect to have been called by the real mode handler
+ * (kvmppc_rm_h_confer()) which would have directly returned
+ * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
+ * have useful work to do and should not confer) so we don't
+ * recheck that here.
+ */
+
+ spin_lock(&vcore->lock);
+ if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
+ vcore->vcore_state != VCORE_INACTIVE)
+ target = vcore->runner;
+ spin_unlock(&vcore->lock);
+
+ return kvm_vcpu_yield_to(target);
+}
+
+static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
+{
+ int yield_count = 0;
+ struct lppaca *lppaca;
+
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
+ if (lppaca)
+ yield_count = lppaca->yield_count;
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ return yield_count;
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
+ int yield_count;
struct kvm_vcpu *tvcpu;
int idx, rc;
@@ -619,14 +654,6 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
switch (req) {
- case H_ENTER:
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
- kvmppc_get_gpr(vcpu, 5),
- kvmppc_get_gpr(vcpu, 6),
- kvmppc_get_gpr(vcpu, 7));
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- break;
case H_CEDE:
break;
case H_PROD:
@@ -654,7 +681,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
ret = H_PARAMETER;
break;
}
- kvm_vcpu_yield_to(tvcpu);
+ yield_count = kvmppc_get_gpr(vcpu, 5);
+ if (kvmppc_get_yield_count(tvcpu) != yield_count)
+ break;
+ kvm_arch_vcpu_yield_to(tvcpu);
break;
case H_REGISTER_VPA:
ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
@@ -769,6 +799,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
+ /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
+ case BOOK3S_INTERRUPT_HMI:
case BOOK3S_INTERRUPT_PERFMON:
r = RESUME_GUEST;
break;
@@ -837,6 +869,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
* Accordingly return to Guest or Host.
*/
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
+ if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
+ vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
+ swab32(vcpu->arch.emul_inst) :
+ vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
r = kvmppc_emulate_debug_inst(run, vcpu);
} else {
@@ -1357,6 +1393,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
+ spin_lock_init(&vcore->stoltb_lock);
init_waitqueue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
@@ -1694,9 +1731,11 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->n_woken = 0;
vc->nap_count = 0;
vc->entry_exit_count = 0;
+ vc->preempt_tb = TB_NIL;
vc->vcore_state = VCORE_STARTING;
vc->in_guest = 0;
vc->napping_threads = 0;
+ vc->conferring_threads = 0;
/*
* Updating any of the vpas requires calling kvmppc_pin_guest_page,
@@ -1726,6 +1765,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
kvmppc_create_dtl_entry(vcpu, vc);
+ trace_kvm_guest_enter(vcpu);
}
/* Set this explicitly in case thread 0 doesn't have a vcpu */
@@ -1734,6 +1774,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->vcore_state = VCORE_RUNNING;
preempt_disable();
+
+ trace_kvmppc_run_core(vc, 0);
+
spin_unlock(&vc->lock);
kvm_guest_enter();
@@ -1779,6 +1822,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_core_pending_dec(vcpu))
kvmppc_core_dequeue_dec(vcpu);
+ trace_kvm_guest_exit(vcpu);
+
ret = RESUME_GUEST;
if (vcpu->arch.trap)
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
@@ -1804,6 +1849,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
wake_up(&vcpu->arch.cpu_run);
}
}
+
+ trace_kvmppc_run_core(vc, 1);
}
/*
@@ -1826,15 +1873,37 @@ static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
*/
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
{
+ struct kvm_vcpu *vcpu;
+ int do_sleep = 1;
+
DEFINE_WAIT(wait);
prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+
+ /*
+ * Check one last time for pending exceptions and ceded state after
+ * we put ourselves on the wait queue
+ */
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+ if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
+ do_sleep = 0;
+ break;
+ }
+ }
+
+ if (!do_sleep) {
+ finish_wait(&vc->wq, &wait);
+ return;
+ }
+
vc->vcore_state = VCORE_SLEEPING;
+ trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
schedule();
finish_wait(&vc->wq, &wait);
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
+ trace_kvmppc_vcore_blocked(vc, 1);
}
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
@@ -1843,6 +1912,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct kvmppc_vcore *vc;
struct kvm_vcpu *v, *vn;
+ trace_kvmppc_run_vcpu_enter(vcpu);
+
kvm_run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
@@ -1872,6 +1943,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
VCORE_EXIT_COUNT(vc) == 0) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
+ trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
wake_up(&vc->wq);
}
@@ -1936,6 +2008,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
+ trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
spin_unlock(&vc->lock);
return vcpu->arch.ret;
}
@@ -1962,7 +2035,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
smp_mb();
- /* On the first time here, set up HTAB and VRMA or RMA */
+ /* On the first time here, set up HTAB and VRMA */
if (!vcpu->kvm->arch.rma_setup_done) {
r = kvmppc_hv_setup_htab_rma(vcpu);
if (r)
@@ -1981,7 +2054,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
+ trace_kvm_hcall_enter(vcpu);
r = kvmppc_pseries_do_hcall(vcpu);
+ trace_kvm_hcall_exit(vcpu, r);
kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) {
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -1997,98 +2072,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
-
-/* Work out RMLS (real mode limit selector) field value for a given RMA size.
- Assumes POWER7 or PPC970. */
-static inline int lpcr_rmls(unsigned long rma_size)
-{
- switch (rma_size) {
- case 32ul << 20: /* 32 MB */
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return 8; /* only supported on POWER7 */
- return -1;
- case 64ul << 20: /* 64 MB */
- return 3;
- case 128ul << 20: /* 128 MB */
- return 7;
- case 256ul << 20: /* 256 MB */
- return 4;
- case 1ul << 30: /* 1 GB */
- return 2;
- case 16ul << 30: /* 16 GB */
- return 1;
- case 256ul << 30: /* 256 GB */
- return 0;
- default:
- return -1;
- }
-}
-
-static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *page;
- struct kvm_rma_info *ri = vma->vm_file->private_data;
-
- if (vmf->pgoff >= kvm_rma_pages)
- return VM_FAULT_SIGBUS;
-
- page = pfn_to_page(ri->base_pfn + vmf->pgoff);
- get_page(page);
- vmf->page = page;
- return 0;
-}
-
-static const struct vm_operations_struct kvm_rma_vm_ops = {
- .fault = kvm_rma_fault,
-};
-
-static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
-{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = &kvm_rma_vm_ops;
- return 0;
-}
-
-static int kvm_rma_release(struct inode *inode, struct file *filp)
-{
- struct kvm_rma_info *ri = filp->private_data;
-
- kvm_release_rma(ri);
- return 0;
-}
-
-static const struct file_operations kvm_rma_fops = {
- .mmap = kvm_rma_mmap,
- .release = kvm_rma_release,
-};
-
-static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
- struct kvm_allocate_rma *ret)
-{
- long fd;
- struct kvm_rma_info *ri;
- /*
- * Only do this on PPC970 in HV mode
- */
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_201))
- return -EINVAL;
-
- if (!kvm_rma_pages)
- return -EINVAL;
-
- ri = kvm_alloc_rma();
- if (!ri)
- return -ENOMEM;
-
- fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
- if (fd < 0)
- kvm_release_rma(ri);
-
- ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
- return fd;
-}
-
static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
int linux_psize)
{
@@ -2167,26 +2150,6 @@ out:
return r;
}
-static void unpin_slot(struct kvm_memory_slot *memslot)
-{
- unsigned long *physp;
- unsigned long j, npages, pfn;
- struct page *page;
-
- physp = memslot->arch.slot_phys;
- npages = memslot->npages;
- if (!physp)
- return;
- for (j = 0; j < npages; j++) {
- if (!(physp[j] & KVMPPC_GOT_PAGE))
- continue;
- pfn = physp[j] >> PAGE_SHIFT;
- page = pfn_to_page(pfn);
- SetPageDirty(page);
- put_page(page);
- }
-}
-
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
@@ -2194,11 +2157,6 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
vfree(free->arch.rmap);
free->arch.rmap = NULL;
}
- if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
- unpin_slot(free);
- vfree(free->arch.slot_phys);
- free->arch.slot_phys = NULL;
- }
}
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
@@ -2207,7 +2165,6 @@ static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
if (!slot->arch.rmap)
return -ENOMEM;
- slot->arch.slot_phys = NULL;
return 0;
}
@@ -2216,17 +2173,6 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem)
{
- unsigned long *phys;
-
- /* Allocate a slot_phys array if needed */
- phys = memslot->arch.slot_phys;
- if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
- phys = vzalloc(memslot->npages * sizeof(unsigned long));
- if (!phys)
- return -ENOMEM;
- memslot->arch.slot_phys = phys;
- }
-
return 0;
}
@@ -2284,17 +2230,11 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
struct kvm *kvm = vcpu->kvm;
- struct kvm_rma_info *ri = NULL;
unsigned long hva;
struct kvm_memory_slot *memslot;
struct vm_area_struct *vma;
unsigned long lpcr = 0, senc;
- unsigned long lpcr_mask = 0;
unsigned long psize, porder;
- unsigned long rma_size;
- unsigned long rmls;
- unsigned long *physp;
- unsigned long i, npages;
int srcu_idx;
mutex_lock(&kvm->lock);
@@ -2329,88 +2269,25 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
psize = vma_kernel_pagesize(vma);
porder = __ilog2(psize);
- /* Is this one of our preallocated RMAs? */
- if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
- hva == vma->vm_start)
- ri = vma->vm_file->private_data;
-
up_read(&current->mm->mmap_sem);
- if (!ri) {
- /* On POWER7, use VRMA; on PPC970, give up */
- err = -EPERM;
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- pr_err("KVM: CPU requires an RMO\n");
- goto out_srcu;
- }
+ /* We can handle 4k, 64k or 16M pages in the VRMA */
+ err = -EINVAL;
+ if (!(psize == 0x1000 || psize == 0x10000 ||
+ psize == 0x1000000))
+ goto out_srcu;
- /* We can handle 4k, 64k or 16M pages in the VRMA */
- err = -EINVAL;
- if (!(psize == 0x1000 || psize == 0x10000 ||
- psize == 0x1000000))
- goto out_srcu;
+ /* Update VRMASD field in the LPCR */
+ senc = slb_pgsize_encoding(psize);
+ kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
+ /* the -4 is to account for senc values starting at 0x10 */
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
- /* Update VRMASD field in the LPCR */
- senc = slb_pgsize_encoding(psize);
- kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
- (VRMA_VSID << SLB_VSID_SHIFT_1T);
- lpcr_mask = LPCR_VRMASD;
- /* the -4 is to account for senc values starting at 0x10 */
- lpcr = senc << (LPCR_VRMASD_SH - 4);
+ /* Create HPTEs in the hash page table for the VRMA */
+ kvmppc_map_vrma(vcpu, memslot, porder);
- /* Create HPTEs in the hash page table for the VRMA */
- kvmppc_map_vrma(vcpu, memslot, porder);
-
- } else {
- /* Set up to use an RMO region */
- rma_size = kvm_rma_pages;
- if (rma_size > memslot->npages)
- rma_size = memslot->npages;
- rma_size <<= PAGE_SHIFT;
- rmls = lpcr_rmls(rma_size);
- err = -EINVAL;
- if ((long)rmls < 0) {
- pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
- goto out_srcu;
- }
- atomic_inc(&ri->use_count);
- kvm->arch.rma = ri;
-
- /* Update LPCR and RMOR */
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970; insert RMLS value (split field) in HID4 */
- lpcr_mask = (1ul << HID4_RMLS0_SH) |
- (3ul << HID4_RMLS2_SH) | HID4_RMOR;
- lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
- ((rmls & 3) << HID4_RMLS2_SH);
- /* RMOR is also in HID4 */
- lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
- << HID4_RMOR_SH;
- } else {
- /* POWER7 */
- lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
- lpcr = rmls << LPCR_RMLS_SH;
- kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
- }
- pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
- ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
-
- /* Initialize phys addrs of pages in RMO */
- npages = kvm_rma_pages;
- porder = __ilog2(npages);
- physp = memslot->arch.slot_phys;
- if (physp) {
- if (npages > memslot->npages)
- npages = memslot->npages;
- spin_lock(&kvm->arch.slot_phys_lock);
- for (i = 0; i < npages; ++i)
- physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
- porder;
- spin_unlock(&kvm->arch.slot_phys_lock);
- }
- }
-
- kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+ kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
smp_wmb();
@@ -2449,35 +2326,21 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
sizeof(kvm->arch.enabled_hcalls));
- kvm->arch.rma = NULL;
-
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970; HID4 is effectively the LPCR */
- kvm->arch.host_lpid = 0;
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
- lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
- lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
- ((lpid & 0xf) << HID4_LPID5_SH);
- } else {
- /* POWER7; init LPCR for virtual RMA mode */
- kvm->arch.host_lpid = mfspr(SPRN_LPID);
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
- lpcr &= LPCR_PECE | LPCR_LPES;
- lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
- LPCR_VPM0 | LPCR_VPM1;
- kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
- (VRMA_VSID << SLB_VSID_SHIFT_1T);
- /* On POWER8 turn on online bit to enable PURR/SPURR */
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- lpcr |= LPCR_ONL;
- }
+ /* Init LPCR for virtual RMA mode */
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+ lpcr &= LPCR_PECE | LPCR_LPES;
+ lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
+ LPCR_VPM0 | LPCR_VPM1;
+ kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
+ /* On POWER8 turn on online bit to enable PURR/SPURR */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ lpcr |= LPCR_ONL;
kvm->arch.lpcr = lpcr;
- kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
- spin_lock_init(&kvm->arch.slot_phys_lock);
-
/*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
@@ -2507,10 +2370,6 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvm_hv_vm_deactivated();
kvmppc_free_vcores(kvm);
- if (kvm->arch.rma) {
- kvm_release_rma(kvm->arch.rma);
- kvm->arch.rma = NULL;
- }
kvmppc_free_hpt(kvm);
}
@@ -2536,7 +2395,8 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
static int kvmppc_core_check_processor_compat_hv(void)
{
- if (!cpu_has_feature(CPU_FTR_HVMODE))
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_206))
return -EIO;
return 0;
}
@@ -2550,16 +2410,6 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
switch (ioctl) {
- case KVM_ALLOCATE_RMA: {
- struct kvm_allocate_rma rma;
- struct kvm *kvm = filp->private_data;
-
- r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
- if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
- r = -EFAULT;
- break;
- }
-
case KVM_PPC_ALLOCATE_HTAB: {
u32 htab_order;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 4fdc27c80f4c..1f083ff8a61a 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -12,11 +12,11 @@
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
#include <linux/cma.h>
+#include <linux/bitops.h>
#include <asm/cputable.h>
#include <asm/kvm_ppc.h>
@@ -33,95 +33,9 @@
* By default we reserve 5% of memory for hash pagetable allocation.
*/
static unsigned long kvm_cma_resv_ratio = 5;
-/*
- * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
- * Each RMA has to be physically contiguous and of a size that the
- * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
- * and other larger sizes. Since we are unlikely to be allocate that
- * much physically contiguous memory after the system is up and running,
- * we preallocate a set of RMAs in early boot using CMA.
- * should be power of 2.
- */
-unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
-EXPORT_SYMBOL_GPL(kvm_rma_pages);
static struct cma *kvm_cma;
-/* Work out RMLS (real mode limit selector) field value for a given RMA size.
- Assumes POWER7 or PPC970. */
-static inline int lpcr_rmls(unsigned long rma_size)
-{
- switch (rma_size) {
- case 32ul << 20: /* 32 MB */
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return 8; /* only supported on POWER7 */
- return -1;
- case 64ul << 20: /* 64 MB */
- return 3;
- case 128ul << 20: /* 128 MB */
- return 7;
- case 256ul << 20: /* 256 MB */
- return 4;
- case 1ul << 30: /* 1 GB */
- return 2;
- case 16ul << 30: /* 16 GB */
- return 1;
- case 256ul << 30: /* 256 GB */
- return 0;
- default:
- return -1;
- }
-}
-
-static int __init early_parse_rma_size(char *p)
-{
- unsigned long kvm_rma_size;
-
- pr_debug("%s(%s)\n", __func__, p);
- if (!p)
- return -EINVAL;
- kvm_rma_size = memparse(p, &p);
- /*
- * Check that the requested size is one supported in hardware
- */
- if (lpcr_rmls(kvm_rma_size) < 0) {
- pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
- return -EINVAL;
- }
- kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
- return 0;
-}
-early_param("kvm_rma_size", early_parse_rma_size);
-
-struct kvm_rma_info *kvm_alloc_rma()
-{
- struct page *page;
- struct kvm_rma_info *ri;
-
- ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
- if (!ri)
- return NULL;
- page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
- if (!page)
- goto err_out;
- atomic_set(&ri->use_count, 1);
- ri->base_pfn = page_to_pfn(page);
- return ri;
-err_out:
- kfree(ri);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(kvm_alloc_rma);
-
-void kvm_release_rma(struct kvm_rma_info *ri)
-{
- if (atomic_dec_and_test(&ri->use_count)) {
- cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
- kfree(ri);
- }
-}
-EXPORT_SYMBOL_GPL(kvm_release_rma);
-
static int __init early_parse_kvm_cma_resv(char *p)
{
pr_debug("%s(%s)\n", __func__, p);
@@ -133,14 +47,9 @@ early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
struct page *kvm_alloc_hpt(unsigned long nr_pages)
{
- unsigned long align_pages = HPT_ALIGN_PAGES;
-
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
- /* Old CPUs require HPT aligned on a multiple of its size */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- align_pages = nr_pages;
- return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
+ return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
@@ -154,7 +63,7 @@ EXPORT_SYMBOL_GPL(kvm_release_hpt);
* kvm_cma_reserve() - reserve area for kvm hash pagetable
*
* This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
+ * called by arch specific code once the memblock allocator
* has been activated and all other subsystems have already allocated/reserved
* memory.
*/
@@ -181,22 +90,44 @@ void __init kvm_cma_reserve(void)
if (selected_size) {
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
- /*
- * Old CPUs require HPT aligned on a multiple of its size. So for them
- * make the alignment as max size we could request.
- */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- align_size = __rounddown_pow_of_two(selected_size);
- else
- align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
-
- align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
+ align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
cma_declare_contiguous(0, selected_size, 0, align_size,
KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
}
}
/*
+ * Real-mode H_CONFER implementation.
+ * We check if we are the only vcpu out of this virtual core
+ * still running in the guest and not ceded. If so, we pop up
+ * to the virtual-mode implementation; if not, just return to
+ * the guest.
+ */
+long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+ unsigned int yield_count)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ int threads_running;
+ int threads_ceded;
+ int threads_conferring;
+ u64 stop = get_tb() + 10 * tb_ticks_per_usec;
+ int rv = H_SUCCESS; /* => don't yield */
+
+ set_bit(vcpu->arch.ptid, &vc->conferring_threads);
+ while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
+ threads_running = VCORE_ENTRY_COUNT(vc);
+ threads_ceded = hweight32(vc->napping_threads);
+ threads_conferring = hweight32(vc->conferring_threads);
+ if (threads_ceded + threads_conferring >= threads_running) {
+ rv = H_TOO_HARD; /* => do yield */
+ break;
+ }
+ }
+ clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
+ return rv;
+}
+
+/*
* When running HV mode KVM we need to block certain operations while KVM VMs
* exist in the system. We use a counter of VMs to track this.
*
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 731be7478b27..36540a99d178 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -52,10 +52,8 @@ _GLOBAL(__kvmppc_vcore_entry)
std r3, _CCR(r1)
/* Save host DSCR */
-BEGIN_FTR_SECTION
mfspr r3, SPRN_DSCR
std r3, HSTATE_DSCR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
/* Save host DABR */
@@ -84,11 +82,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
mfspr r6, SPRN_MMCRA
-BEGIN_FTR_SECTION
- /* On P7, clear MMCRA in order to disable SDAR updates */
+ /* Clear MMCRA in order to disable SDAR updates */
li r5, 0
mtspr SPRN_MMCRA, r5
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r5, LPPACA_PMCINUSE(r3)
@@ -113,20 +109,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r7, SPRN_PMC4
mfspr r8, SPRN_PMC5
mfspr r9, SPRN_PMC6
-BEGIN_FTR_SECTION
- mfspr r10, SPRN_PMC7
- mfspr r11, SPRN_PMC8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
stw r3, HSTATE_PMC(r13)
stw r5, HSTATE_PMC + 4(r13)
stw r6, HSTATE_PMC + 8(r13)
stw r7, HSTATE_PMC + 12(r13)
stw r8, HSTATE_PMC + 16(r13)
stw r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
- stw r10, HSTATE_PMC + 24(r13)
- stw r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
31:
/*
@@ -140,31 +128,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
-#ifdef CONFIG_SMP
- /*
- * On PPC970, if the guest vcpu has an external interrupt pending,
- * send ourselves an IPI so as to interrupt the guest once it
- * enables interrupts. (It must have interrupts disabled,
- * otherwise we would already have delivered the interrupt.)
- *
- * XXX If this is a UP build, smp_send_reschedule is not available,
- * so the interrupt will be delayed until the next time the vcpu
- * enters the guest with interrupts enabled.
- */
-BEGIN_FTR_SECTION
- ld r4, HSTATE_KVM_VCPU(r13)
- ld r0, VCPU_PENDING_EXC(r4)
- li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
- oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
- and. r0, r0, r7
- beq 32f
- lhz r3, PACAPACAINDEX(r13)
- bl smp_send_reschedule
- nop
-32:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-#endif /* CONFIG_SMP */
-
/* Jump to partition switch code */
bl kvmppc_hv_entry_trampoline
nop
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index d562c8e2bc30..60081bd75847 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -138,8 +138,5 @@ out:
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
{
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return kvmppc_realmode_mc_power7(vcpu);
-
- return 0;
+ return kvmppc_realmode_mc_power7(vcpu);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 084ad54c73cd..510bdfbc4073 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -45,16 +45,12 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags)
* as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
* we can use tlbiel as long as we mark all other physical
* cores as potentially having stale TLB entries for this lpid.
- * If we're not using MMU notifiers, we never take pages away
- * from the guest, so we can use tlbiel if requested.
* Otherwise, don't use tlbiel.
*/
if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
global = 0;
- else if (kvm->arch.using_mmu_notifiers)
- global = 1;
else
- global = !(flags & H_LOCAL);
+ global = 1;
if (!global) {
/* any other core might now have stale TLB entries... */
@@ -170,7 +166,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
struct revmap_entry *rev;
unsigned long g_ptel;
struct kvm_memory_slot *memslot;
- unsigned long *physp, pte_size;
+ unsigned long pte_size;
unsigned long is_io;
unsigned long *rmap;
pte_t pte;
@@ -198,9 +194,6 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
is_io = ~0ul;
rmap = NULL;
if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
- /* PPC970 can't do emulated MMIO */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- return H_PARAMETER;
/* Emulated MMIO - mark this with key=31 */
pteh |= HPTE_V_ABSENT;
ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
@@ -213,37 +206,20 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
slot_fn = gfn - memslot->base_gfn;
rmap = &memslot->arch.rmap[slot_fn];
- if (!kvm->arch.using_mmu_notifiers) {
- physp = memslot->arch.slot_phys;
- if (!physp)
- return H_PARAMETER;
- physp += slot_fn;
- if (realmode)
- physp = real_vmalloc_addr(physp);
- pa = *physp;
- if (!pa)
- return H_TOO_HARD;
- is_io = pa & (HPTE_R_I | HPTE_R_W);
- pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
- pa &= PAGE_MASK;
+ /* Translate to host virtual address */
+ hva = __gfn_to_hva_memslot(memslot, gfn);
+
+ /* Look up the Linux PTE for the backing page */
+ pte_size = psize;
+ pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
+ if (pte_present(pte) && !pte_numa(pte)) {
+ if (writing && !pte_write(pte))
+ /* make the actual HPTE be read-only */
+ ptel = hpte_make_readonly(ptel);
+ is_io = hpte_cache_bits(pte_val(pte));
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (pte_size - 1);
pa |= gpa & ~PAGE_MASK;
- } else {
- /* Translate to host virtual address */
- hva = __gfn_to_hva_memslot(memslot, gfn);
-
- /* Look up the Linux PTE for the backing page */
- pte_size = psize;
- pte = lookup_linux_pte_and_update(pgdir, hva, writing,
- &pte_size);
- if (pte_present(pte) && !pte_numa(pte)) {
- if (writing && !pte_write(pte))
- /* make the actual HPTE be read-only */
- ptel = hpte_make_readonly(ptel);
- is_io = hpte_cache_bits(pte_val(pte));
- pa = pte_pfn(pte) << PAGE_SHIFT;
- pa |= hva & (pte_size - 1);
- pa |= gpa & ~PAGE_MASK;
- }
}
if (pte_size < psize)
@@ -337,8 +313,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
- if (kvm->arch.using_mmu_notifiers &&
- mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
@@ -395,61 +370,11 @@ static inline int try_lock_tlbie(unsigned int *lock)
return old == 0;
}
-/*
- * tlbie/tlbiel is a bit different on the PPC970 compared to later
- * processors such as POWER7; the large page bit is in the instruction
- * not RB, and the top 16 bits and the bottom 12 bits of the VA
- * in RB must be 0.
- */
-static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
- long npages, int global, bool need_sync)
-{
- long i;
-
- if (global) {
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- if (need_sync)
- asm volatile("ptesync" : : : "memory");
- for (i = 0; i < npages; ++i) {
- unsigned long rb = rbvalues[i];
-
- if (rb & 1) /* large page */
- asm volatile("tlbie %0,1" : :
- "r" (rb & 0x0000fffffffff000ul));
- else
- asm volatile("tlbie %0,0" : :
- "r" (rb & 0x0000fffffffff000ul));
- }
- asm volatile("eieio; tlbsync; ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- if (need_sync)
- asm volatile("ptesync" : : : "memory");
- for (i = 0; i < npages; ++i) {
- unsigned long rb = rbvalues[i];
-
- if (rb & 1) /* large page */
- asm volatile("tlbiel %0,1" : :
- "r" (rb & 0x0000fffffffff000ul));
- else
- asm volatile("tlbiel %0,0" : :
- "r" (rb & 0x0000fffffffff000ul));
- }
- asm volatile("ptesync" : : : "memory");
- }
-}
-
static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
long npages, int global, bool need_sync)
{
long i;
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970 tlbie instruction is a bit different */
- do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
- return;
- }
if (global) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax();
@@ -667,40 +592,29 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
rev->guest_rpte = r;
note_hpte_modification(kvm, rev);
}
- r = (be64_to_cpu(hpte[1]) & ~mask) | bits;
/* Update HPTE */
if (v & HPTE_V_VALID) {
- rb = compute_tlbie_rb(v, r, pte_index);
- hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID);
- do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/*
- * If the host has this page as readonly but the guest
- * wants to make it read/write, reduce the permissions.
- * Checking the host permissions involves finding the
- * memslot and then the Linux PTE for the page.
+ * If the page is valid, don't let it transition from
+ * readonly to writable. If it should be writable, we'll
+ * take a trap and let the page fault code sort it out.
*/
- if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
- unsigned long psize, gfn, hva;
- struct kvm_memory_slot *memslot;
- pgd_t *pgdir = vcpu->arch.pgdir;
- pte_t pte;
-
- psize = hpte_page_size(v, r);
- gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
- memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
- if (memslot) {
- hva = __gfn_to_hva_memslot(memslot, gfn);
- pte = lookup_linux_pte_and_update(pgdir, hva,
- 1, &psize);
- if (pte_present(pte) && !pte_write(pte))
- r = hpte_make_readonly(r);
- }
+ pte = be64_to_cpu(hpte[1]);
+ r = (pte & ~mask) | bits;
+ if (hpte_is_writable(r) && !hpte_is_writable(pte))
+ r = hpte_make_readonly(r);
+ /* If the PTE is changing, invalidate it first */
+ if (r != pte) {
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
+ HPTE_V_ABSENT);
+ do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
+ true);
+ hpte[1] = cpu_to_be64(r);
}
}
- hpte[1] = cpu_to_be64(r);
- eieio();
- hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
+ unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
asm volatile("ptesync" : : : "memory");
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 3ee38e6e884f..7b066f6b02ad 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -183,8 +183,10 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* state update in HW (ie bus transactions) so we can handle them
* separately here as well.
*/
- if (resend)
+ if (resend) {
icp->rm_action |= XICS_RM_CHECK_RESEND;
+ icp->rm_resend_icp = icp;
+ }
}
@@ -254,10 +256,25 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
* nothing needs to be done as there can be no XISR to
* reject.
*
+ * ICP state: Check_IPI
+ *
* If the CPPR is less favored, then we might be replacing
- * an interrupt, and thus need to possibly reject it as in
+ * an interrupt, and thus need to possibly reject it.
*
- * ICP state: Check_IPI
+ * ICP State: IPI
+ *
+ * Besides rejecting any pending interrupts, we also
+ * update XISR and pending_pri to mark IPI as pending.
+ *
+ * PAPR does not describe this state, but if the MFRR is being
+ * made less favored than its earlier value, there might be
+ * a previously-rejected interrupt needing to be resent.
+ * Ideally, we would want to resend only if
+ * prio(pending_interrupt) < mfrr &&
+ * prio(pending_interrupt) < cppr
+ * where pending interrupt is the one that was rejected. But
+ * we don't have that state, so we simply trigger a resend
+ * whenever the MFRR is made less favored.
*/
do {
old_state = new_state = ACCESS_ONCE(icp->state);
@@ -270,13 +287,14 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
resend = false;
if (mfrr < new_state.cppr) {
/* Reject a pending interrupt if not an IPI */
- if (mfrr <= new_state.pending_pri)
+ if (mfrr <= new_state.pending_pri) {
reject = new_state.xisr;
- new_state.pending_pri = mfrr;
- new_state.xisr = XICS_IPI;
+ new_state.pending_pri = mfrr;
+ new_state.xisr = XICS_IPI;
+ }
}
- if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
+ if (mfrr > old_state.mfrr) {
resend = new_state.need_resend;
new_state.need_resend = 0;
}
@@ -289,8 +307,10 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
}
/* Pass resends to virtual mode */
- if (resend)
+ if (resend) {
this_icp->rm_action |= XICS_RM_CHECK_RESEND;
+ this_icp->rm_resend_icp = icp;
+ }
return check_too_hard(xics, this_icp);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index edb2ccdbb2ba..10554df13852 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -94,20 +94,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
lwz r6, HSTATE_PMC + 12(r13)
lwz r8, HSTATE_PMC + 16(r13)
lwz r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
- lwz r10, HSTATE_PMC + 24(r13)
- lwz r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_PMC1, r3
mtspr SPRN_PMC2, r4
mtspr SPRN_PMC3, r5
mtspr SPRN_PMC4, r6
mtspr SPRN_PMC5, r8
mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
ld r3, HSTATE_MMCR(r13)
ld r4, HSTATE_MMCR + 8(r13)
ld r5, HSTATE_MMCR + 16(r13)
@@ -153,11 +145,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
-BEGIN_FTR_SECTION
beq 11f
cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
beq cr2, 14f /* HMI check */
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* RFI into the highmem handler, or branch to interrupt handler */
mfmsr r6
@@ -166,7 +156,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8
mtsrr1 r7
- beqa 0x500 /* external interrupt (PPC970) */
beq cr1, 13f /* machine check */
RFI
@@ -201,8 +190,6 @@ kvmppc_primary_no_guest:
bge kvm_novcpu_exit /* another thread already exiting */
li r3, NAPPING_NOVCPU
stb r3, HSTATE_NAPPING(r13)
- li r3, 1
- stb r3, HSTATE_HWTHREAD_REQ(r13)
b kvm_do_nap
@@ -293,6 +280,8 @@ kvm_start_guest:
/* if we have no vcpu to run, go back to sleep */
beq kvm_no_guest
+kvm_secondary_got_guest:
+
/* Set HSTATE_DSCR(r13) to something sensible */
ld r6, PACA_DSCR(r13)
std r6, HSTATE_DSCR(r13)
@@ -318,27 +307,46 @@ kvm_start_guest:
stwcx. r3, 0, r4
bne 51b
+/*
+ * At this point we have finished executing in the guest.
+ * We need to wait for hwthread_req to become zero, since
+ * we may not turn on the MMU while hwthread_req is non-zero.
+ * While waiting we also need to check if we get given a vcpu to run.
+ */
kvm_no_guest:
- li r0, KVM_HWTHREAD_IN_NAP
+ lbz r3, HSTATE_HWTHREAD_REQ(r13)
+ cmpwi r3, 0
+ bne 53f
+ HMT_MEDIUM
+ li r0, KVM_HWTHREAD_IN_KERNEL
stb r0, HSTATE_HWTHREAD_STATE(r13)
-kvm_do_nap:
- /* Clear the runlatch bit before napping */
- mfspr r2, SPRN_CTRLF
- clrrdi r2, r2, 1
- mtspr SPRN_CTRLT, r2
-
+ /* need to recheck hwthread_req after a barrier, to avoid race */
+ sync
+ lbz r3, HSTATE_HWTHREAD_REQ(r13)
+ cmpwi r3, 0
+ bne 54f
+/*
+ * We jump to power7_wakeup_loss, which will return to the caller
+ * of power7_nap in the powernv cpu offline loop. The value we
+ * put in r3 becomes the return value for power7_nap.
+ */
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
- isync
- std r0, HSTATE_SCRATCH0(r13)
- ptesync
- ld r0, HSTATE_SCRATCH0(r13)
-1: cmpd r0, r0
- bne 1b
- nap
- b .
+ li r3, 0
+ b power7_wakeup_loss
+
+53: HMT_LOW
+ ld r4, HSTATE_KVM_VCPU(r13)
+ cmpdi r4, 0
+ beq kvm_no_guest
+ HMT_MEDIUM
+ b kvm_secondary_got_guest
+
+54: li r0, KVM_HWTHREAD_IN_KVM
+ stb r0, HSTATE_HWTHREAD_STATE(r13)
+ b kvm_no_guest
/******************************************************************************
* *
@@ -374,11 +382,8 @@ kvmppc_hv_entry:
slbia
ptesync
-BEGIN_FTR_SECTION
- b 30f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/*
- * POWER7 host -> guest partition switch code.
+ * POWER7/POWER8 host -> guest partition switch code.
* We don't have to lock against concurrent tlbies,
* but we do have to coordinate across hardware threads.
*/
@@ -486,97 +491,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
cmpwi r3,512 /* 1 microsecond */
li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
blt hdec_soon
- b 31f
-
- /*
- * PPC970 host -> guest partition switch code.
- * We have to lock against concurrent tlbies,
- * using native_tlbie_lock to lock against host tlbies
- * and kvm->arch.tlbie_lock to lock against guest tlbies.
- * We also have to invalidate the TLB since its
- * entries aren't tagged with the LPID.
- */
-30: ld r5,HSTATE_KVM_VCORE(r13)
- ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
-
- /* first take native_tlbie_lock */
- .section ".toc","aw"
-toc_tlbie_lock:
- .tc native_tlbie_lock[TC],native_tlbie_lock
- .previous
- ld r3,toc_tlbie_lock@toc(r2)
-#ifdef __BIG_ENDIAN__
- lwz r8,PACA_LOCK_TOKEN(r13)
-#else
- lwz r8,PACAPACAINDEX(r13)
-#endif
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r5,HSTATE_KVM_VCORE(r13)
- ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
- li r0,0x18f
- rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
- or r0,r7,r0
- ptesync
- sync
- mtspr SPRN_HID4,r0 /* switch to reserved LPID */
- isync
- li r0,0
- stw r0,0(r3) /* drop native_tlbie_lock */
-
- /* invalidate the whole TLB */
- li r0,256
- mtctr r0
- li r6,0
-25: tlbiel r6
- addi r6,r6,0x1000
- bdnz 25b
- ptesync
-
- /* Take the guest's tlbie_lock */
- addi r3,r9,KVM_TLBIE_LOCK
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
- ld r6,KVM_SDR1(r9)
- mtspr SPRN_SDR1,r6 /* switch to partition page table */
-
- /* Set up HID4 with the guest's LPID etc. */
- sync
- mtspr SPRN_HID4,r7
- isync
-
- /* drop the guest's tlbie_lock */
- li r0,0
- stw r0,0(r3)
- /* Check if HDEC expires soon */
- mfspr r3,SPRN_HDEC
- cmpwi r3,10
- li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
- blt hdec_soon
-
- /* Enable HDEC interrupts */
- mfspr r0,SPRN_HID0
- li r3,1
- rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
- sync
- mtspr SPRN_HID0,r0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
-31:
/* Do we have a guest vcpu to run? */
cmpdi r4, 0
beq kvmppc_primary_no_guest
@@ -606,7 +521,6 @@ kvmppc_got_guest:
stb r6, VCPU_VPA_DIRTY(r4)
25:
-BEGIN_FTR_SECTION
/* Save purr/spurr */
mfspr r5,SPRN_PURR
mfspr r6,SPRN_SPURR
@@ -616,7 +530,6 @@ BEGIN_FTR_SECTION
ld r8,VCPU_SPURR(r4)
mtspr SPRN_PURR,r7
mtspr SPRN_SPURR,r8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
/* Set partition DABR */
@@ -625,9 +538,7 @@ BEGIN_FTR_SECTION
ld r6,VCPU_DABR(r4)
mtspr SPRN_DABRX,r5
mtspr SPRN_DABR,r6
- BEGIN_FTR_SECTION_NESTED(89)
isync
- END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -758,20 +669,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
lwz r7, VCPU_PMC + 12(r4)
lwz r8, VCPU_PMC + 16(r4)
lwz r9, VCPU_PMC + 20(r4)
-BEGIN_FTR_SECTION
- lwz r10, VCPU_PMC + 24(r4)
- lwz r11, VCPU_PMC + 28(r4)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_PMC1, r3
mtspr SPRN_PMC2, r5
mtspr SPRN_PMC3, r6
mtspr SPRN_PMC4, r7
mtspr SPRN_PMC5, r8
mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
ld r3, VCPU_MMCR(r4)
ld r5, VCPU_MMCR + 8(r4)
ld r6, VCPU_MMCR + 16(r4)
@@ -818,14 +721,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(R31)(r4)
-BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
ld r5, VCPU_DSCR(r4)
mtspr SPRN_DSCR, r5
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
- /* Skip next section on POWER7 or PPC970 */
+ /* Skip next section on POWER7 */
b 8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
@@ -901,7 +802,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6
-BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
ld r6,VCPU_UAMOR(r4)
@@ -909,7 +809,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_AMR,r5
mtspr SPRN_UAMOR,r6
mtspr SPRN_AMOR,r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Restore state of CTRL run bit; assume 1 on entry */
lwz r5,VCPU_CTRL(r4)
@@ -944,13 +843,11 @@ deliver_guest_interrupt:
rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
cmpdi cr1, r0, 0
andi. r8, r11, MSR_EE
-BEGIN_FTR_SECTION
mfspr r8, SPRN_LPCR
/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
mtspr SPRN_LPCR, r8
isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
beq 5f
li r0, BOOK3S_INTERRUPT_EXTERNAL
bne cr1, 12f
@@ -1105,15 +1002,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
stw r12,VCPU_TRAP(r9)
- /* Save HEIR (HV emulation assist reg) in last_inst
+ /* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
-BEGIN_FTR_SECTION
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f
mfspr r3,SPRN_HEIR
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-11: stw r3,VCPU_LAST_INST(r9)
+11: stw r3,VCPU_HEIR(r9)
/* these are volatile across C function calls */
mfctr r3
@@ -1121,13 +1016,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r3, VCPU_CTR(r9)
stw r4, VCPU_XER(r9)
-BEGIN_FTR_SECTION
/* If this is a page table miss then see if it's theirs or ours */
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
beq kvmppc_hdsi
cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
beq kvmppc_hisi
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* See if this is a leftover HDEC interrupt */
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
@@ -1140,11 +1033,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
beq hcall_try_real_mode
- /* Only handle external interrupts here on arch 206 and later */
-BEGIN_FTR_SECTION
- b ext_interrupt_to_host
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
-
/* External interrupt ? */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
bne+ ext_interrupt_to_host
@@ -1174,11 +1062,9 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
mfdsisr r7
std r6, VCPU_DAR(r9)
stw r7, VCPU_DSISR(r9)
-BEGIN_FTR_SECTION
/* don't overwrite fault_dar/fault_dsisr if HDSI */
cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
beq 6f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r6, VCPU_FAULT_DAR(r9)
stw r7, VCPU_FAULT_DSISR(r9)
@@ -1217,7 +1103,6 @@ mc_cont:
/*
* Save the guest PURR/SPURR
*/
-BEGIN_FTR_SECTION
mfspr r5,SPRN_PURR
mfspr r6,SPRN_SPURR
ld r7,VCPU_PURR(r9)
@@ -1237,7 +1122,6 @@ BEGIN_FTR_SECTION
add r4,r4,r6
mtspr SPRN_PURR,r3
mtspr SPRN_SPURR,r4
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
/* Save DEC */
mfspr r5,SPRN_DEC
@@ -1287,22 +1171,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
-BEGIN_FTR_SECTION
mfspr r5,SPRN_AMR
mfspr r6,SPRN_UAMOR
std r5,VCPU_AMR(r9)
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Switch DSCR back to host value */
-BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Save non-volatile GPRs */
std r14, VCPU_GPR(R14)(r9)
@@ -1484,11 +1364,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r4, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
mfspr r6, SPRN_MMCRA
-BEGIN_FTR_SECTION
- /* On P7, clear MMCRA in order to disable SDAR updates */
+ /* Clear MMCRA in order to disable SDAR updates */
li r7, 0
mtspr SPRN_MMCRA, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
beq 21f /* if no VPA, save PMU stuff anyway */
lbz r7, LPPACA_PMCINUSE(r8)
@@ -1513,10 +1391,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r6, SPRN_PMC4
mfspr r7, SPRN_PMC5
mfspr r8, SPRN_PMC6
-BEGIN_FTR_SECTION
- mfspr r10, SPRN_PMC7
- mfspr r11, SPRN_PMC8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
stw r3, VCPU_PMC(r9)
stw r4, VCPU_PMC + 4(r9)
stw r5, VCPU_PMC + 8(r9)
@@ -1524,10 +1398,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
stw r7, VCPU_PMC + 16(r9)
stw r8, VCPU_PMC + 20(r9)
BEGIN_FTR_SECTION
- stw r10, VCPU_PMC + 24(r9)
- stw r11, VCPU_PMC + 28(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_SIER
mfspr r6, SPRN_SPMC1
mfspr r7, SPRN_SPMC2
@@ -1547,11 +1417,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ptesync
hdec_soon: /* r12 = trap, r13 = paca */
-BEGIN_FTR_SECTION
- b 32f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/*
- * POWER7 guest -> host partition switch code.
+ * POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
* have to coordinate the hardware threads.
*/
@@ -1679,87 +1546,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
16: ld r8,KVM_HOST_LPCR(r4)
mtspr SPRN_LPCR,r8
isync
- b 33f
-
- /*
- * PPC970 guest -> host partition switch code.
- * We have to lock against concurrent tlbies, and
- * we have to flush the whole TLB.
- */
-32: ld r5,HSTATE_KVM_VCORE(r13)
- ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
-
- /* Take the guest's tlbie_lock */
-#ifdef __BIG_ENDIAN__
- lwz r8,PACA_LOCK_TOKEN(r13)
-#else
- lwz r8,PACAPACAINDEX(r13)
-#endif
- addi r3,r4,KVM_TLBIE_LOCK
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
- li r0,0x18f
- rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
- or r0,r7,r0
- ptesync
- sync
- mtspr SPRN_HID4,r0 /* switch to reserved LPID */
- isync
- li r0,0
- stw r0,0(r3) /* drop guest tlbie_lock */
-
- /* invalidate the whole TLB */
- li r0,256
- mtctr r0
- li r6,0
-25: tlbiel r6
- addi r6,r6,0x1000
- bdnz 25b
- ptesync
-
- /* take native_tlbie_lock */
- ld r3,toc_tlbie_lock@toc(2)
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r6,KVM_HOST_SDR1(r4)
- mtspr SPRN_SDR1,r6 /* switch to host page table */
-
- /* Set up host HID4 value */
- sync
- mtspr SPRN_HID4,r7
- isync
- li r0,0
- stw r0,0(r3) /* drop native_tlbie_lock */
-
- lis r8,0x7fff /* MAX_INT@h */
- mtspr SPRN_HDEC,r8
-
- /* Disable HDEC interrupts */
- mfspr r0,SPRN_HID0
- li r3,0
- rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
- sync
- mtspr SPRN_HID0,r0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
/* load host SLB entries */
-33: ld r8,PACA_SLBSHADOWPTR(r13)
+ ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
li r3, SLBSHADOW_SAVEAREA
@@ -2028,7 +1817,7 @@ hcall_real_table:
.long 0 /* 0xd8 */
.long 0 /* 0xdc */
.long DOTSYM(kvmppc_h_cede) - hcall_real_table
- .long 0 /* 0xe4 */
+ .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
.long 0 /* 0xe8 */
.long 0 /* 0xec */
.long 0 /* 0xf0 */
@@ -2107,9 +1896,6 @@ _GLOBAL(kvmppc_h_cede)
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
std r0,VCPU_GPR(R3)(r3)
-BEGIN_FTR_SECTION
- b kvm_cede_exit /* just send it up to host on 970 */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/*
* Set our bit in the bitmask of napping threads unless all the
@@ -2172,6 +1958,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
* runlatch bit before napping.
*/
+kvm_do_nap:
mfspr r2, SPRN_CTRLF
clrrdi r2, r2, 1
mtspr SPRN_CTRLT, r2
@@ -2435,7 +2222,6 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
- isync
addi r3,r3,VCPU_FPRS
bl store_fp_state
#ifdef CONFIG_ALTIVEC
@@ -2471,7 +2257,6 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
- isync
addi r3,r4,VCPU_FPRS
bl load_fp_state
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index bfb8035314e3..bd6ab1672ae6 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -352,14 +352,6 @@ static inline u32 inst_get_field(u32 inst, int msb, int lsb)
return kvmppc_get_field(inst, msb + 32, lsb + 32);
}
-/*
- * Replaces inst bits with ordering according to spec.
- */
-static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value)
-{
- return kvmppc_set_field(inst, msb + 32, lsb + 32, value);
-}
-
bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
{
if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index cf2eb16846d1..f57383941d03 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -644,11 +644,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
-static inline int get_fpr_index(int i)
-{
- return i * TS_FPRWIDTH;
-}
-
/* Give up external provider (FPU, Altivec, VSX) */
void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
{
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index eaeb78047fb8..807351f76f84 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -613,10 +613,25 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
* there might be a previously-rejected interrupt needing
* to be resent.
*
+ * ICP state: Check_IPI
+ *
* If the CPPR is less favored, then we might be replacing
- * an interrupt, and thus need to possibly reject it as in
+ * an interrupt, and thus need to possibly reject it.
*
- * ICP state: Check_IPI
+ * ICP State: IPI
+ *
+ * Besides rejecting any pending interrupts, we also
+ * update XISR and pending_pri to mark IPI as pending.
+ *
+ * PAPR does not describe this state, but if the MFRR is being
+ * made less favored than its earlier value, there might be
+ * a previously-rejected interrupt needing to be resent.
+ * Ideally, we would want to resend only if
+ * prio(pending_interrupt) < mfrr &&
+ * prio(pending_interrupt) < cppr
+ * where pending interrupt is the one that was rejected. But
+ * we don't have that state, so we simply trigger a resend
+ * whenever the MFRR is made less favored.
*/
do {
old_state = new_state = ACCESS_ONCE(icp->state);
@@ -629,13 +644,14 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
resend = false;
if (mfrr < new_state.cppr) {
/* Reject a pending interrupt if not an IPI */
- if (mfrr <= new_state.pending_pri)
+ if (mfrr <= new_state.pending_pri) {
reject = new_state.xisr;
- new_state.pending_pri = mfrr;
- new_state.xisr = XICS_IPI;
+ new_state.pending_pri = mfrr;
+ new_state.xisr = XICS_IPI;
+ }
}
- if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
+ if (mfrr > old_state.mfrr) {
resend = new_state.need_resend;
new_state.need_resend = 0;
}
@@ -789,7 +805,7 @@ static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
if (icp->rm_action & XICS_RM_KICK_VCPU)
kvmppc_fast_vcpu_kick(icp->rm_kick_target);
if (icp->rm_action & XICS_RM_CHECK_RESEND)
- icp_check_resend(xics, icp);
+ icp_check_resend(xics, icp->rm_resend_icp);
if (icp->rm_action & XICS_RM_REJECT)
icp_deliver_irq(xics, icp, icp->rm_reject);
if (icp->rm_action & XICS_RM_NOTIFY_EOI)
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index e8aaa7a3f209..73f0f2723c07 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -74,6 +74,7 @@ struct kvmppc_icp {
#define XICS_RM_NOTIFY_EOI 0x8
u32 rm_action;
struct kvm_vcpu *rm_kick_target;
+ struct kvmppc_icp *rm_resend_icp;
u32 rm_reject;
u32 rm_eoied_irq;
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 2e02ed849f36..b29ce752c7d6 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry)
unsigned long sid;
int ret = -1;
- sid = ++(__get_cpu_var(pcpu_last_used_sid));
+ sid = __this_cpu_inc_return(pcpu_last_used_sid);
if (sid < NUM_TIDS) {
- __get_cpu_var(pcpu_sids).entry[sid] = entry;
+ __this_cpu_write(pcpu_sids.entry[sid], entry);
entry->val = sid;
- entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
+ entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
ret = sid;
}
@@ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry)
static inline int local_sid_lookup(struct id *entry)
{
if (entry && entry->val != 0 &&
- __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
- entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
+ __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
+ entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
return entry->val;
return -1;
}
@@ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry)
/* Invalidate all id mappings on local core -- call with preempt disabled */
static inline void local_sid_destroy_all(void)
{
- __get_cpu_var(pcpu_last_used_sid) = 0;
- memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
+ __this_cpu_write(pcpu_last_used_sid, 0);
+ memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
}
static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -299,14 +299,6 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
}
-void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
-{
-}
-
-void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
-{
-}
-
static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
{
kvmppc_booke_vcpu_load(vcpu, cpu);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 769778f855b0..cc536d4a75ef 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -661,7 +661,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
if (unlikely((pr && !(mas3 & MAS3_UX)) ||
(!pr && !(mas3 & MAS3_SX)))) {
pr_err_ratelimited(
- "%s: Instuction emulation from guest addres %08lx without execute permission\n",
+ "%s: Instruction emulation from guest address %08lx without execute permission\n",
__func__, geaddr);
return EMULATE_AGAIN;
}
@@ -673,7 +673,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
pr_err_ratelimited(
- "%s: Instuction emulation from guest addres %08lx mismatches storage attributes\n",
+ "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
__func__, geaddr);
return EMULATE_AGAIN;
}
@@ -686,7 +686,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
/* Guard against emulation from devices area */
if (unlikely(!page_is_ram(pfn))) {
- pr_err_ratelimited("%s: Instruction emulation from non-RAM host addres %08llx is not supported\n",
+ pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
__func__, addr);
return EMULATE_AGAIN;
}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 2fdc8722e324..cda695de8aa7 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -144,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
- __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) {
+ __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) {
kvmppc_e500_tlbil_all(vcpu_e500);
- __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu;
+ __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu);
}
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index c1f8f53cd312..c45eaab752b0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -527,18 +527,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 0;
break;
case KVM_CAP_PPC_RMA:
- r = hv_enabled;
- /* PPC970 requires an RMA */
- if (r && cpu_has_feature(CPU_FTR_ARCH_201))
- r = 2;
+ r = 0;
break;
#endif
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- if (hv_enabled)
- r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
- else
- r = 0;
+ r = hv_enabled;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
r = 1;
#else
diff --git a/arch/powerpc/kvm/trace_book3s.h b/arch/powerpc/kvm/trace_book3s.h
new file mode 100644
index 000000000000..f647ce0f428b
--- /dev/null
+++ b/arch/powerpc/kvm/trace_book3s.h
@@ -0,0 +1,32 @@
+#if !defined(_TRACE_KVM_BOOK3S_H)
+#define _TRACE_KVM_BOOK3S_H
+
+/*
+ * Common defines used by the trace macros in trace_pr.h and trace_hv.h
+ */
+
+#define kvm_trace_symbol_exit \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x501, "EXTERNAL_LEVEL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+
+#endif
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index f7537cf26ce7..7ec534d1db9f 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -151,6 +151,47 @@ TRACE_EVENT(kvm_booke206_ref_release,
__entry->pfn, __entry->flags)
);
+#ifdef CONFIG_SPE_POSSIBLE
+#define kvm_trace_symbol_irqprio_spe \
+ {BOOKE_IRQPRIO_SPE_UNAVAIL, "SPE_UNAVAIL"}, \
+ {BOOKE_IRQPRIO_SPE_FP_DATA, "SPE_FP_DATA"}, \
+ {BOOKE_IRQPRIO_SPE_FP_ROUND, "SPE_FP_ROUND"},
+#else
+#define kvm_trace_symbol_irqprio_spe
+#endif
+
+#ifdef CONFIG_PPC_E500MC
+#define kvm_trace_symbol_irqprio_e500mc \
+ {BOOKE_IRQPRIO_ALTIVEC_UNAVAIL, "ALTIVEC_UNAVAIL"}, \
+ {BOOKE_IRQPRIO_ALTIVEC_ASSIST, "ALTIVEC_ASSIST"},
+#else
+#define kvm_trace_symbol_irqprio_e500mc
+#endif
+
+#define kvm_trace_symbol_irqprio \
+ kvm_trace_symbol_irqprio_spe \
+ kvm_trace_symbol_irqprio_e500mc \
+ {BOOKE_IRQPRIO_DATA_STORAGE, "DATA_STORAGE"}, \
+ {BOOKE_IRQPRIO_INST_STORAGE, "INST_STORAGE"}, \
+ {BOOKE_IRQPRIO_ALIGNMENT, "ALIGNMENT"}, \
+ {BOOKE_IRQPRIO_PROGRAM, "PROGRAM"}, \
+ {BOOKE_IRQPRIO_FP_UNAVAIL, "FP_UNAVAIL"}, \
+ {BOOKE_IRQPRIO_SYSCALL, "SYSCALL"}, \
+ {BOOKE_IRQPRIO_AP_UNAVAIL, "AP_UNAVAIL"}, \
+ {BOOKE_IRQPRIO_DTLB_MISS, "DTLB_MISS"}, \
+ {BOOKE_IRQPRIO_ITLB_MISS, "ITLB_MISS"}, \
+ {BOOKE_IRQPRIO_MACHINE_CHECK, "MACHINE_CHECK"}, \
+ {BOOKE_IRQPRIO_DEBUG, "DEBUG"}, \
+ {BOOKE_IRQPRIO_CRITICAL, "CRITICAL"}, \
+ {BOOKE_IRQPRIO_WATCHDOG, "WATCHDOG"}, \
+ {BOOKE_IRQPRIO_EXTERNAL, "EXTERNAL"}, \
+ {BOOKE_IRQPRIO_FIT, "FIT"}, \
+ {BOOKE_IRQPRIO_DECREMENTER, "DECREMENTER"}, \
+ {BOOKE_IRQPRIO_PERFORMANCE_MONITOR, "PERFORMANCE_MONITOR"}, \
+ {BOOKE_IRQPRIO_EXTERNAL_LEVEL, "EXTERNAL_LEVEL"}, \
+ {BOOKE_IRQPRIO_DBELL, "DBELL"}, \
+ {BOOKE_IRQPRIO_DBELL_CRIT, "DBELL_CRIT"} \
+
TRACE_EVENT(kvm_booke_queue_irqprio,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
TP_ARGS(vcpu, priority),
@@ -167,8 +208,10 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
__entry->pending = vcpu->arch.pending_exceptions;
),
- TP_printk("vcpu=%x prio=%x pending=%lx",
- __entry->cpu_nr, __entry->priority, __entry->pending)
+ TP_printk("vcpu=%x prio=%s pending=%lx",
+ __entry->cpu_nr,
+ __print_symbolic(__entry->priority, kvm_trace_symbol_irqprio),
+ __entry->pending)
);
#endif
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
new file mode 100644
index 000000000000..33d9daff5783
--- /dev/null
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -0,0 +1,477 @@
+#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_HV_H
+
+#include <linux/tracepoint.h>
+#include "trace_book3s.h"
+#include <asm/hvcall.h>
+#include <asm/kvm_asm.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm_hv
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_hv
+
+#define kvm_trace_symbol_hcall \
+ {H_REMOVE, "H_REMOVE"}, \
+ {H_ENTER, "H_ENTER"}, \
+ {H_READ, "H_READ"}, \
+ {H_CLEAR_MOD, "H_CLEAR_MOD"}, \
+ {H_CLEAR_REF, "H_CLEAR_REF"}, \
+ {H_PROTECT, "H_PROTECT"}, \
+ {H_GET_TCE, "H_GET_TCE"}, \
+ {H_PUT_TCE, "H_PUT_TCE"}, \
+ {H_SET_SPRG0, "H_SET_SPRG0"}, \
+ {H_SET_DABR, "H_SET_DABR"}, \
+ {H_PAGE_INIT, "H_PAGE_INIT"}, \
+ {H_SET_ASR, "H_SET_ASR"}, \
+ {H_ASR_ON, "H_ASR_ON"}, \
+ {H_ASR_OFF, "H_ASR_OFF"}, \
+ {H_LOGICAL_CI_LOAD, "H_LOGICAL_CI_LOAD"}, \
+ {H_LOGICAL_CI_STORE, "H_LOGICAL_CI_STORE"}, \
+ {H_LOGICAL_CACHE_LOAD, "H_LOGICAL_CACHE_LOAD"}, \
+ {H_LOGICAL_CACHE_STORE, "H_LOGICAL_CACHE_STORE"}, \
+ {H_LOGICAL_ICBI, "H_LOGICAL_ICBI"}, \
+ {H_LOGICAL_DCBF, "H_LOGICAL_DCBF"}, \
+ {H_GET_TERM_CHAR, "H_GET_TERM_CHAR"}, \
+ {H_PUT_TERM_CHAR, "H_PUT_TERM_CHAR"}, \
+ {H_REAL_TO_LOGICAL, "H_REAL_TO_LOGICAL"}, \
+ {H_HYPERVISOR_DATA, "H_HYPERVISOR_DATA"}, \
+ {H_EOI, "H_EOI"}, \
+ {H_CPPR, "H_CPPR"}, \
+ {H_IPI, "H_IPI"}, \
+ {H_IPOLL, "H_IPOLL"}, \
+ {H_XIRR, "H_XIRR"}, \
+ {H_PERFMON, "H_PERFMON"}, \
+ {H_MIGRATE_DMA, "H_MIGRATE_DMA"}, \
+ {H_REGISTER_VPA, "H_REGISTER_VPA"}, \
+ {H_CEDE, "H_CEDE"}, \
+ {H_CONFER, "H_CONFER"}, \
+ {H_PROD, "H_PROD"}, \
+ {H_GET_PPP, "H_GET_PPP"}, \
+ {H_SET_PPP, "H_SET_PPP"}, \
+ {H_PURR, "H_PURR"}, \
+ {H_PIC, "H_PIC"}, \
+ {H_REG_CRQ, "H_REG_CRQ"}, \
+ {H_FREE_CRQ, "H_FREE_CRQ"}, \
+ {H_VIO_SIGNAL, "H_VIO_SIGNAL"}, \
+ {H_SEND_CRQ, "H_SEND_CRQ"}, \
+ {H_COPY_RDMA, "H_COPY_RDMA"}, \
+ {H_REGISTER_LOGICAL_LAN, "H_REGISTER_LOGICAL_LAN"}, \
+ {H_FREE_LOGICAL_LAN, "H_FREE_LOGICAL_LAN"}, \
+ {H_ADD_LOGICAL_LAN_BUFFER, "H_ADD_LOGICAL_LAN_BUFFER"}, \
+ {H_SEND_LOGICAL_LAN, "H_SEND_LOGICAL_LAN"}, \
+ {H_BULK_REMOVE, "H_BULK_REMOVE"}, \
+ {H_MULTICAST_CTRL, "H_MULTICAST_CTRL"}, \
+ {H_SET_XDABR, "H_SET_XDABR"}, \
+ {H_STUFF_TCE, "H_STUFF_TCE"}, \
+ {H_PUT_TCE_INDIRECT, "H_PUT_TCE_INDIRECT"}, \
+ {H_CHANGE_LOGICAL_LAN_MAC, "H_CHANGE_LOGICAL_LAN_MAC"}, \
+ {H_VTERM_PARTNER_INFO, "H_VTERM_PARTNER_INFO"}, \
+ {H_REGISTER_VTERM, "H_REGISTER_VTERM"}, \
+ {H_FREE_VTERM, "H_FREE_VTERM"}, \
+ {H_RESET_EVENTS, "H_RESET_EVENTS"}, \
+ {H_ALLOC_RESOURCE, "H_ALLOC_RESOURCE"}, \
+ {H_FREE_RESOURCE, "H_FREE_RESOURCE"}, \
+ {H_MODIFY_QP, "H_MODIFY_QP"}, \
+ {H_QUERY_QP, "H_QUERY_QP"}, \
+ {H_REREGISTER_PMR, "H_REREGISTER_PMR"}, \
+ {H_REGISTER_SMR, "H_REGISTER_SMR"}, \
+ {H_QUERY_MR, "H_QUERY_MR"}, \
+ {H_QUERY_MW, "H_QUERY_MW"}, \
+ {H_QUERY_HCA, "H_QUERY_HCA"}, \
+ {H_QUERY_PORT, "H_QUERY_PORT"}, \
+ {H_MODIFY_PORT, "H_MODIFY_PORT"}, \
+ {H_DEFINE_AQP1, "H_DEFINE_AQP1"}, \
+ {H_GET_TRACE_BUFFER, "H_GET_TRACE_BUFFER"}, \
+ {H_DEFINE_AQP0, "H_DEFINE_AQP0"}, \
+ {H_RESIZE_MR, "H_RESIZE_MR"}, \
+ {H_ATTACH_MCQP, "H_ATTACH_MCQP"}, \
+ {H_DETACH_MCQP, "H_DETACH_MCQP"}, \
+ {H_CREATE_RPT, "H_CREATE_RPT"}, \
+ {H_REMOVE_RPT, "H_REMOVE_RPT"}, \
+ {H_REGISTER_RPAGES, "H_REGISTER_RPAGES"}, \
+ {H_DISABLE_AND_GETC, "H_DISABLE_AND_GETC"}, \
+ {H_ERROR_DATA, "H_ERROR_DATA"}, \
+ {H_GET_HCA_INFO, "H_GET_HCA_INFO"}, \
+ {H_GET_PERF_COUNT, "H_GET_PERF_COUNT"}, \
+ {H_MANAGE_TRACE, "H_MANAGE_TRACE"}, \
+ {H_FREE_LOGICAL_LAN_BUFFER, "H_FREE_LOGICAL_LAN_BUFFER"}, \
+ {H_QUERY_INT_STATE, "H_QUERY_INT_STATE"}, \
+ {H_POLL_PENDING, "H_POLL_PENDING"}, \
+ {H_ILLAN_ATTRIBUTES, "H_ILLAN_ATTRIBUTES"}, \
+ {H_MODIFY_HEA_QP, "H_MODIFY_HEA_QP"}, \
+ {H_QUERY_HEA_QP, "H_QUERY_HEA_QP"}, \
+ {H_QUERY_HEA, "H_QUERY_HEA"}, \
+ {H_QUERY_HEA_PORT, "H_QUERY_HEA_PORT"}, \
+ {H_MODIFY_HEA_PORT, "H_MODIFY_HEA_PORT"}, \
+ {H_REG_BCMC, "H_REG_BCMC"}, \
+ {H_DEREG_BCMC, "H_DEREG_BCMC"}, \
+ {H_REGISTER_HEA_RPAGES, "H_REGISTER_HEA_RPAGES"}, \
+ {H_DISABLE_AND_GET_HEA, "H_DISABLE_AND_GET_HEA"}, \
+ {H_GET_HEA_INFO, "H_GET_HEA_INFO"}, \
+ {H_ALLOC_HEA_RESOURCE, "H_ALLOC_HEA_RESOURCE"}, \
+ {H_ADD_CONN, "H_ADD_CONN"}, \
+ {H_DEL_CONN, "H_DEL_CONN"}, \
+ {H_JOIN, "H_JOIN"}, \
+ {H_VASI_STATE, "H_VASI_STATE"}, \
+ {H_ENABLE_CRQ, "H_ENABLE_CRQ"}, \
+ {H_GET_EM_PARMS, "H_GET_EM_PARMS"}, \
+ {H_SET_MPP, "H_SET_MPP"}, \
+ {H_GET_MPP, "H_GET_MPP"}, \
+ {H_HOME_NODE_ASSOCIATIVITY, "H_HOME_NODE_ASSOCIATIVITY"}, \
+ {H_BEST_ENERGY, "H_BEST_ENERGY"}, \
+ {H_XIRR_X, "H_XIRR_X"}, \
+ {H_RANDOM, "H_RANDOM"}, \
+ {H_COP, "H_COP"}, \
+ {H_GET_MPP_X, "H_GET_MPP_X"}, \
+ {H_SET_MODE, "H_SET_MODE"}, \
+ {H_RTAS, "H_RTAS"}
+
+#define kvm_trace_symbol_kvmret \
+ {RESUME_GUEST, "RESUME_GUEST"}, \
+ {RESUME_GUEST_NV, "RESUME_GUEST_NV"}, \
+ {RESUME_HOST, "RESUME_HOST"}, \
+ {RESUME_HOST_NV, "RESUME_HOST_NV"}
+
+#define kvm_trace_symbol_hcall_rc \
+ {H_SUCCESS, "H_SUCCESS"}, \
+ {H_BUSY, "H_BUSY"}, \
+ {H_CLOSED, "H_CLOSED"}, \
+ {H_NOT_AVAILABLE, "H_NOT_AVAILABLE"}, \
+ {H_CONSTRAINED, "H_CONSTRAINED"}, \
+ {H_PARTIAL, "H_PARTIAL"}, \
+ {H_IN_PROGRESS, "H_IN_PROGRESS"}, \
+ {H_PAGE_REGISTERED, "H_PAGE_REGISTERED"}, \
+ {H_PARTIAL_STORE, "H_PARTIAL_STORE"}, \
+ {H_PENDING, "H_PENDING"}, \
+ {H_CONTINUE, "H_CONTINUE"}, \
+ {H_LONG_BUSY_START_RANGE, "H_LONG_BUSY_START_RANGE"}, \
+ {H_LONG_BUSY_ORDER_1_MSEC, "H_LONG_BUSY_ORDER_1_MSEC"}, \
+ {H_LONG_BUSY_ORDER_10_MSEC, "H_LONG_BUSY_ORDER_10_MSEC"}, \
+ {H_LONG_BUSY_ORDER_100_MSEC, "H_LONG_BUSY_ORDER_100_MSEC"}, \
+ {H_LONG_BUSY_ORDER_1_SEC, "H_LONG_BUSY_ORDER_1_SEC"}, \
+ {H_LONG_BUSY_ORDER_10_SEC, "H_LONG_BUSY_ORDER_10_SEC"}, \
+ {H_LONG_BUSY_ORDER_100_SEC, "H_LONG_BUSY_ORDER_100_SEC"}, \
+ {H_LONG_BUSY_END_RANGE, "H_LONG_BUSY_END_RANGE"}, \
+ {H_TOO_HARD, "H_TOO_HARD"}, \
+ {H_HARDWARE, "H_HARDWARE"}, \
+ {H_FUNCTION, "H_FUNCTION"}, \
+ {H_PRIVILEGE, "H_PRIVILEGE"}, \
+ {H_PARAMETER, "H_PARAMETER"}, \
+ {H_BAD_MODE, "H_BAD_MODE"}, \
+ {H_PTEG_FULL, "H_PTEG_FULL"}, \
+ {H_NOT_FOUND, "H_NOT_FOUND"}, \
+ {H_RESERVED_DABR, "H_RESERVED_DABR"}, \
+ {H_NO_MEM, "H_NO_MEM"}, \
+ {H_AUTHORITY, "H_AUTHORITY"}, \
+ {H_PERMISSION, "H_PERMISSION"}, \
+ {H_DROPPED, "H_DROPPED"}, \
+ {H_SOURCE_PARM, "H_SOURCE_PARM"}, \
+ {H_DEST_PARM, "H_DEST_PARM"}, \
+ {H_REMOTE_PARM, "H_REMOTE_PARM"}, \
+ {H_RESOURCE, "H_RESOURCE"}, \
+ {H_ADAPTER_PARM, "H_ADAPTER_PARM"}, \
+ {H_RH_PARM, "H_RH_PARM"}, \
+ {H_RCQ_PARM, "H_RCQ_PARM"}, \
+ {H_SCQ_PARM, "H_SCQ_PARM"}, \
+ {H_EQ_PARM, "H_EQ_PARM"}, \
+ {H_RT_PARM, "H_RT_PARM"}, \
+ {H_ST_PARM, "H_ST_PARM"}, \
+ {H_SIGT_PARM, "H_SIGT_PARM"}, \
+ {H_TOKEN_PARM, "H_TOKEN_PARM"}, \
+ {H_MLENGTH_PARM, "H_MLENGTH_PARM"}, \
+ {H_MEM_PARM, "H_MEM_PARM"}, \
+ {H_MEM_ACCESS_PARM, "H_MEM_ACCESS_PARM"}, \
+ {H_ATTR_PARM, "H_ATTR_PARM"}, \
+ {H_PORT_PARM, "H_PORT_PARM"}, \
+ {H_MCG_PARM, "H_MCG_PARM"}, \
+ {H_VL_PARM, "H_VL_PARM"}, \
+ {H_TSIZE_PARM, "H_TSIZE_PARM"}, \
+ {H_TRACE_PARM, "H_TRACE_PARM"}, \
+ {H_MASK_PARM, "H_MASK_PARM"}, \
+ {H_MCG_FULL, "H_MCG_FULL"}, \
+ {H_ALIAS_EXIST, "H_ALIAS_EXIST"}, \
+ {H_P_COUNTER, "H_P_COUNTER"}, \
+ {H_TABLE_FULL, "H_TABLE_FULL"}, \
+ {H_ALT_TABLE, "H_ALT_TABLE"}, \
+ {H_MR_CONDITION, "H_MR_CONDITION"}, \
+ {H_NOT_ENOUGH_RESOURCES, "H_NOT_ENOUGH_RESOURCES"}, \
+ {H_R_STATE, "H_R_STATE"}, \
+ {H_RESCINDED, "H_RESCINDED"}, \
+ {H_P2, "H_P2"}, \
+ {H_P3, "H_P3"}, \
+ {H_P4, "H_P4"}, \
+ {H_P5, "H_P5"}, \
+ {H_P6, "H_P6"}, \
+ {H_P7, "H_P7"}, \
+ {H_P8, "H_P8"}, \
+ {H_P9, "H_P9"}, \
+ {H_TOO_BIG, "H_TOO_BIG"}, \
+ {H_OVERLAP, "H_OVERLAP"}, \
+ {H_INTERRUPT, "H_INTERRUPT"}, \
+ {H_BAD_DATA, "H_BAD_DATA"}, \
+ {H_NOT_ACTIVE, "H_NOT_ACTIVE"}, \
+ {H_SG_LIST, "H_SG_LIST"}, \
+ {H_OP_MODE, "H_OP_MODE"}, \
+ {H_COP_HW, "H_COP_HW"}, \
+ {H_UNSUPPORTED_FLAG_START, "H_UNSUPPORTED_FLAG_START"}, \
+ {H_UNSUPPORTED_FLAG_END, "H_UNSUPPORTED_FLAG_END"}, \
+ {H_MULTI_THREADS_ACTIVE, "H_MULTI_THREADS_ACTIVE"}, \
+ {H_OUTSTANDING_COP_OPS, "H_OUTSTANDING_COP_OPS"}
+
+TRACE_EVENT(kvm_guest_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, pc)
+ __field(unsigned long, pending_exceptions)
+ __field(u8, ceded)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->ceded = vcpu->arch.ceded;
+ __entry->pending_exceptions = vcpu->arch.pending_exceptions;
+ ),
+
+ TP_printk("VCPU %d: pc=0x%lx pexcp=0x%lx ceded=%d",
+ __entry->vcpu_id,
+ __entry->pc,
+ __entry->pending_exceptions, __entry->ceded)
+);
+
+TRACE_EVENT(kvm_guest_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(int, trap)
+ __field(unsigned long, pc)
+ __field(unsigned long, msr)
+ __field(u8, ceded)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->trap = vcpu->arch.trap;
+ __entry->ceded = vcpu->arch.ceded;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->msr = vcpu->arch.shregs.msr;
+ ),
+
+ TP_printk("VCPU %d: trap=%s pc=0x%lx msr=0x%lx, ceded=%d",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->trap, kvm_trace_symbol_exit),
+ __entry->pc, __entry->msr, __entry->ceded
+ )
+);
+
+TRACE_EVENT(kvm_page_fault_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep,
+ struct kvm_memory_slot *memslot, unsigned long ea,
+ unsigned long dsisr),
+
+ TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, hpte_v)
+ __field(unsigned long, hpte_r)
+ __field(unsigned long, gpte_r)
+ __field(unsigned long, ea)
+ __field(u64, base_gfn)
+ __field(u32, slot_flags)
+ __field(u32, dsisr)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->hpte_v = hptep[0];
+ __entry->hpte_r = hptep[1];
+ __entry->gpte_r = hptep[2];
+ __entry->ea = ea;
+ __entry->dsisr = dsisr;
+ __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
+ __entry->slot_flags = memslot ? memslot->flags : 0;
+ ),
+
+ TP_printk("VCPU %d: hpte=0x%lx:0x%lx guest=0x%lx ea=0x%lx,%x slot=0x%llx,0x%x",
+ __entry->vcpu_id,
+ __entry->hpte_v, __entry->hpte_r, __entry->gpte_r,
+ __entry->ea, __entry->dsisr,
+ __entry->base_gfn, __entry->slot_flags)
+);
+
+TRACE_EVENT(kvm_page_fault_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep, long ret),
+
+ TP_ARGS(vcpu, hptep, ret),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, hpte_v)
+ __field(unsigned long, hpte_r)
+ __field(long, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->hpte_v = hptep[0];
+ __entry->hpte_r = hptep[1];
+ __entry->ret = ret;
+ ),
+
+ TP_printk("VCPU %d: hpte=0x%lx:0x%lx ret=0x%lx",
+ __entry->vcpu_id,
+ __entry->hpte_v, __entry->hpte_r, __entry->ret)
+);
+
+TRACE_EVENT(kvm_hcall_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, req)
+ __field(unsigned long, gpr4)
+ __field(unsigned long, gpr5)
+ __field(unsigned long, gpr6)
+ __field(unsigned long, gpr7)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->req = kvmppc_get_gpr(vcpu, 3);
+ __entry->gpr4 = kvmppc_get_gpr(vcpu, 4);
+ __entry->gpr5 = kvmppc_get_gpr(vcpu, 5);
+ __entry->gpr6 = kvmppc_get_gpr(vcpu, 6);
+ __entry->gpr7 = kvmppc_get_gpr(vcpu, 7);
+ ),
+
+ TP_printk("VCPU %d: hcall=%s GPR4-7=0x%lx,0x%lx,0x%lx,0x%lx",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->req, kvm_trace_symbol_hcall),
+ __entry->gpr4, __entry->gpr5, __entry->gpr6, __entry->gpr7)
+);
+
+TRACE_EVENT(kvm_hcall_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, int ret),
+
+ TP_ARGS(vcpu, ret),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(unsigned long, ret)
+ __field(unsigned long, hcall_rc)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->ret = ret;
+ __entry->hcall_rc = kvmppc_get_gpr(vcpu, 3);
+ ),
+
+ TP_printk("VCPU %d: ret=%s hcall_rc=%s",
+ __entry->vcpu_id,
+ __print_symbolic(__entry->ret, kvm_trace_symbol_kvmret),
+ __print_symbolic(__entry->ret & RESUME_FLAG_HOST ?
+ H_TOO_HARD : __entry->hcall_rc,
+ kvm_trace_symbol_hcall_rc))
+);
+
+TRACE_EVENT(kvmppc_run_core,
+ TP_PROTO(struct kvmppc_vcore *vc, int where),
+
+ TP_ARGS(vc, where),
+
+ TP_STRUCT__entry(
+ __field(int, n_runnable)
+ __field(int, runner_vcpu)
+ __field(int, where)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->runner_vcpu = vc->runner->vcpu_id;
+ __entry->n_runnable = vc->n_runnable;
+ __entry->where = where;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s runner_vcpu==%d runnable=%d tgid=%d",
+ __entry->where ? "Exit" : "Enter",
+ __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_vcore_blocked,
+ TP_PROTO(struct kvmppc_vcore *vc, int where),
+
+ TP_ARGS(vc, where),
+
+ TP_STRUCT__entry(
+ __field(int, n_runnable)
+ __field(int, runner_vcpu)
+ __field(int, where)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->runner_vcpu = vc->runner->vcpu_id;
+ __entry->n_runnable = vc->n_runnable;
+ __entry->where = where;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("%s runner_vcpu=%d runnable=%d tgid=%d",
+ __entry->where ? "Exit" : "Enter",
+ __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_run_vcpu_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(pid_t, tgid)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->tgid = current->tgid;
+ ),
+
+ TP_printk("VCPU %d: tgid=%d", __entry->vcpu_id, __entry->tgid)
+);
+
+TRACE_EVENT(kvmppc_run_vcpu_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
+
+ TP_ARGS(vcpu, run),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(int, exit)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->exit = run->exit_reason;
+ __entry->ret = vcpu->arch.ret;
+ ),
+
+ TP_printk("VCPU %d: exit=%d, ret=%d",
+ __entry->vcpu_id, __entry->exit, __entry->ret)
+);
+
+#endif /* _TRACE_KVM_HV_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index e1357cd8dc1f..810507cb688a 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -3,36 +3,13 @@
#define _TRACE_KVM_PR_H
#include <linux/tracepoint.h>
+#include "trace_book3s.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_pr
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_pr
-#define kvm_trace_symbol_exit \
- {0x100, "SYSTEM_RESET"}, \
- {0x200, "MACHINE_CHECK"}, \
- {0x300, "DATA_STORAGE"}, \
- {0x380, "DATA_SEGMENT"}, \
- {0x400, "INST_STORAGE"}, \
- {0x480, "INST_SEGMENT"}, \
- {0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
- {0x502, "EXTERNAL_HV"}, \
- {0x600, "ALIGNMENT"}, \
- {0x700, "PROGRAM"}, \
- {0x800, "FP_UNAVAIL"}, \
- {0x900, "DECREMENTER"}, \
- {0x980, "HV_DECREMENTER"}, \
- {0xc00, "SYSCALL"}, \
- {0xd00, "TRACE"}, \
- {0xe00, "H_DATA_STORAGE"}, \
- {0xe20, "H_INST_STORAGE"}, \
- {0xe40, "H_EMUL_ASSIST"}, \
- {0xf00, "PERFMON"}, \
- {0xf20, "ALTIVEC"}, \
- {0xf40, "VSX"}
-
TRACE_EVENT(kvm_book3s_reenter,
TP_PROTO(int r, struct kvm_vcpu *vcpu),
TP_ARGS(r, vcpu),
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 9f342f134ae4..597562f69b2d 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -12,7 +12,6 @@ CFLAGS_REMOVE_feature-fixups.o = -pg
obj-y := string.o alloc.o \
crtsavres.o ppc_ksyms.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o
-obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
usercopy_64.o mem_64.o string.o \
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index da22c84a8fed..4a6c2cf890d9 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -13,9 +13,7 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
if (mem_init_done)
p = kzalloc(size, mask);
else {
- p = alloc_bootmem(size);
- if (p)
- memset(p, 0, size);
+ p = memblock_virt_alloc(size, 0);
}
return p;
}
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index c46c876ac96a..92ee840529bc 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -718,4 +718,4 @@ err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
b exit_vmx_usercopy /* tail call optimise */
-#endif /* CONFiG_ALTIVEC */
+#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/devres.c b/arch/powerpc/lib/devres.c
deleted file mode 100644
index 8df55fc3aad6..000000000000
--- a/arch/powerpc/lib/devres.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/device.h> /* devres_*(), devm_ioremap_release() */
-#include <linux/gfp.h>
-#include <linux/io.h> /* ioremap_prot() */
-#include <linux/export.h> /* EXPORT_SYMBOL() */
-
-/**
- * devm_ioremap_prot - Managed ioremap_prot()
- * @dev: Generic device to remap IO address for
- * @offset: BUS offset to map
- * @size: Size of map
- * @flags: Page flags
- *
- * Managed ioremap_prot(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
- size_t size, unsigned long flags)
-{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = ioremap_prot(offset, size, flags);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
-}
-EXPORT_SYMBOL(devm_ioremap_prot);
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 2ff5c142f87b..0830587df16e 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -653,4 +653,4 @@ _GLOBAL(memcpy_power7)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
b exit_vmx_copy /* tail call optimise */
-#endif /* CONFiG_ALTIVEC */
+#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 54651fc2d412..dc885b30f7a6 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1865,6 +1865,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
}
goto ldst_done;
+#ifdef CONFIG_PPC_FPU
case LOAD_FP:
if (regs->msr & MSR_LE)
return 0;
@@ -1873,7 +1874,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
else
err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
goto ldst_done;
-
+#endif
#ifdef CONFIG_ALTIVEC
case LOAD_VMX:
if (regs->msr & MSR_LE)
@@ -1919,6 +1920,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
err = write_mem(op.val, op.ea, size, regs);
goto ldst_done;
+#ifdef CONFIG_PPC_FPU
case STORE_FP:
if (regs->msr & MSR_LE)
return 0;
@@ -1927,7 +1929,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
else
err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
goto ldst_done;
-
+#endif
#ifdef CONFIG_ALTIVEC
case STORE_VMX:
if (regs->msr & MSR_LE)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 325e861616a1..438dcd3fd0d1 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -6,7 +6,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o gup.o mmap.o \
+obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(CONFIG_WORD_SIZE).o \
pgtable_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 08d659a9fcdb..eb79907f34fa 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -43,7 +43,6 @@
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <asm/debug.h>
-#include <mm/mmu_decl.h>
#include "icswx.h"
@@ -380,12 +379,6 @@ good_area:
goto bad_area;
#endif /* CONFIG_6xx */
#if defined(CONFIG_8xx)
- /* 8xx sometimes need to load a invalid/non-present TLBs.
- * These must be invalidated separately as linux mm don't.
- */
- if (error_code & 0x40000000) /* no translation? */
- _tlbil_va(address, 0, 0, 0);
-
/* The MPC8xx seems to always set 0x80000000, which is
* "undefined". Of those that can be set, this is the only
* one which seems bad.
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
deleted file mode 100644
index d8746684f606..000000000000
--- a/arch/powerpc/mm/gup.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Lockless get_user_pages_fast for powerpc
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- */
-#undef DEBUG
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/vmstat.h>
-#include <linux/pagemap.h>
-#include <linux/rwsem.h>
-#include <asm/pgtable.h>
-
-#ifdef __HAVE_ARCH_PTE_SPECIAL
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long mask, result;
- pte_t *ptep;
-
- result = _PAGE_PRESENT|_PAGE_USER;
- if (write)
- result |= _PAGE_RW;
- mask = result | _PAGE_SPECIAL;
-
- ptep = pte_offset_kernel(&pmd, addr);
- do {
- pte_t pte = ACCESS_ONCE(*ptep);
- struct page *page;
- /*
- * Similar to the PMD case, NUMA hinting must take slow path
- */
- if (pte_numa(pte))
- return 0;
-
- if ((pte_val(pte) & mask) != result)
- return 0;
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- if (!page_cache_get_speculative(page))
- return 0;
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_page(page);
- return 0;
- }
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = ACCESS_ONCE(*pmdp);
-
- next = pmd_addr_end(addr, end);
- /*
- * If we find a splitting transparent hugepage we
- * return zero. That will result in taking the slow
- * path which will call wait_split_huge_page()
- * if the pmd is still in splitting state
- */
- if (pmd_none(pmd) || pmd_trans_splitting(pmd))
- return 0;
- if (pmd_huge(pmd) || pmd_large(pmd)) {
- /*
- * NUMA hinting faults need to be handled in the GUP
- * slowpath for accounting purposes and so that they
- * can be serialised against THP migration.
- */
- if (pmd_numa(pmd))
- return 0;
-
- if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
- write, pages, nr))
- return 0;
- } else if (is_hugepd(pmdp)) {
- if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
- addr, next, write, pages, nr))
- return 0;
- } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
- return 0;
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&pgd, addr);
- do {
- pud_t pud = ACCESS_ONCE(*pudp);
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (pud_huge(pud)) {
- if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next,
- write, pages, nr))
- return 0;
- } else if (is_hugepd(pudp)) {
- if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
- addr, next, write, pages, nr))
- return 0;
- } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
- return 0;
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- unsigned long flags;
- pgd_t *pgdp;
- int nr = 0;
-
- pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
-
- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
- start, len)))
- return 0;
-
- pr_devel(" aligned: %lx .. %lx\n", start, end);
-
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
- * important workloads (eg. DB2), and whether limiting the batch size
- * will decrease performance.
- *
- * It seems like we're in the clear for the moment. Direct-IO is
- * the main guy that batches up lots of get_user_pages, and even
- * they are limited to 64-at-a-time which is not so many.
- */
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables from being freed on powerpc.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the the page and take a ref on it.
- */
- local_irq_save(flags);
-
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = ACCESS_ONCE(*pgdp);
-
- pr_devel(" %016lx: normal pgd %p\n", addr,
- (void *)pgd_val(pgd));
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (pgd_huge(pgd)) {
- if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
- write, pages, &nr))
- break;
- } else if (is_hugepd(pgdp)) {
- if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
- addr, next, write, pages, &nr))
- break;
- } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
-
- local_irq_restore(flags);
-
- return nr;
-}
-
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- int nr, ret;
-
- start &= PAGE_MASK;
- nr = __get_user_pages_fast(start, nr_pages, write, pages);
- ret = nr;
-
- if (nr < nr_pages) {
- pr_devel(" slow path ! nr = %d\n", nr);
-
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- nr_pages - nr, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
- }
-
- return ret;
-}
-
-#endif /* __HAVE_ARCH_PTE_SPECIAL */
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 057cbbb4c576..463174a4a647 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -46,7 +46,8 @@
/*
* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
- * pte_t *ptep, unsigned long trap, int local, int ssize)
+ * pte_t *ptep, unsigned long trap, unsigned long flags,
+ * int ssize)
*
* Adds a 4K page to the hash table in a segment of 4K pages only
*/
@@ -298,7 +299,7 @@ htab_modify_pte:
li r6,MMU_PAGE_4K /* base page size */
li r7,MMU_PAGE_4K /* actual page size */
ld r8,STK_PARAM(R9)(r1) /* segment size */
- ld r9,STK_PARAM(R8)(r1) /* get "local" param */
+ ld r9,STK_PARAM(R8)(r1) /* get "flags" param */
.globl htab_call_hpte_updatepp
htab_call_hpte_updatepp:
bl . /* Patched by htab_finish_init() */
@@ -338,8 +339,8 @@ htab_pte_insert_failure:
*****************************************************************************/
/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
- * pte_t *ptep, unsigned long trap, int local, int ssize,
- * int subpg_prot)
+ * pte_t *ptep, unsigned long trap, unsigned local flags,
+ * int ssize, int subpg_prot)
*/
/*
@@ -514,7 +515,7 @@ htab_insert_pte:
andis. r0,r31,_PAGE_4K_PFN@h
srdi r5,r31,PTE_RPN_SHIFT
bne- htab_special_pfn
- sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
+ sldi r5,r5,PAGE_FACTOR
add r5,r5,r25
htab_special_pfn:
sldi r5,r5,HW_PAGE_SHIFT
@@ -544,7 +545,7 @@ htab_call_hpte_insert1:
andis. r0,r31,_PAGE_4K_PFN@h
srdi r5,r31,PTE_RPN_SHIFT
bne- 3f
- sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
+ sldi r5,r5,PAGE_FACTOR
add r5,r5,r25
3: sldi r5,r5,HW_PAGE_SHIFT
@@ -594,7 +595,7 @@ htab_inval_old_hpte:
li r5,0 /* PTE.hidx */
li r6,MMU_PAGE_64K /* psize */
ld r7,STK_PARAM(R9)(r1) /* ssize */
- ld r8,STK_PARAM(R8)(r1) /* local */
+ ld r8,STK_PARAM(R8)(r1) /* flags */
bl flush_hash_page
/* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
lis r0,_PAGE_HPTE_SUB@h
@@ -666,7 +667,7 @@ htab_modify_pte:
li r6,MMU_PAGE_4K /* base page size */
li r7,MMU_PAGE_4K /* actual page size */
ld r8,STK_PARAM(R9)(r1) /* segment size */
- ld r9,STK_PARAM(R8)(r1) /* get "local" param */
+ ld r9,STK_PARAM(R8)(r1) /* get "flags" param */
.globl htab_call_hpte_updatepp
htab_call_hpte_updatepp:
bl . /* patched by htab_finish_init() */
@@ -962,7 +963,7 @@ ht64_modify_pte:
li r6,MMU_PAGE_64K /* base page size */
li r7,MMU_PAGE_64K /* actual page size */
ld r8,STK_PARAM(R9)(r1) /* segment size */
- ld r9,STK_PARAM(R8)(r1) /* get "local" param */
+ ld r9,STK_PARAM(R8)(r1) /* get "flags" param */
.globl ht64_call_hpte_updatepp
ht64_call_hpte_updatepp:
bl . /* patched by htab_finish_init() */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ae4962a06476..9c4880ddecd6 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -283,19 +283,17 @@ static long native_hpte_remove(unsigned long hpte_group)
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long vpn, int bpsize,
- int apsize, int ssize, int local)
+ int apsize, int ssize, unsigned long flags)
{
struct hash_pte *hptep = htab_address + slot;
unsigned long hpte_v, want_v;
- int ret = 0;
+ int ret = 0, local = 0;
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp);
- native_lock_hpte(hptep);
-
hpte_v = be64_to_cpu(hptep->v);
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
@@ -308,15 +306,30 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" -> miss\n");
ret = -1;
} else {
- DBG_LOW(" -> hit\n");
- /* Update the HPTE */
- hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
+ native_lock_hpte(hptep);
+ /* recheck with locks held */
+ hpte_v = be64_to_cpu(hptep->v);
+ if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
+ !(hpte_v & HPTE_V_VALID))) {
+ ret = -1;
+ } else {
+ DBG_LOW(" -> hit\n");
+ /* Update the HPTE */
+ hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
+ ~(HPTE_R_PP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PP | HPTE_R_N |
+ HPTE_R_C)));
+ }
+ native_unlock_hpte(hptep);
}
- native_unlock_hpte(hptep);
- /* Ensure it is out of the tlb too. */
- tlbie(vpn, bpsize, apsize, ssize, local);
+ if (flags & HPTE_LOCAL_UPDATE)
+ local = 1;
+ /*
+ * Ensure it is out of the tlb too if it is not a nohpte fault
+ */
+ if (!(flags & HPTE_NOHPTE_UPDATE))
+ tlbie(vpn, bpsize, apsize, ssize, local);
return ret;
}
@@ -419,7 +432,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
static void native_hugepage_invalidate(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
- int psize, int ssize)
+ int psize, int ssize, int local)
{
int i;
struct hash_pte *hptep;
@@ -465,7 +478,7 @@ static void native_hugepage_invalidate(unsigned long vsid,
* instruction compares entry_VA in tlb with the VA specified
* here
*/
- tlbie(vpn, psize, actual_psize, ssize, 0);
+ tlbie(vpn, psize, actual_psize, ssize, local);
}
local_irq_restore(flags);
}
@@ -629,7 +642,7 @@ static void native_flush_hash_range(unsigned long number, int local)
unsigned long want_v;
unsigned long flags;
real_pte_t pte;
- struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d5339a3b9945..2c2022d16059 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -989,7 +989,9 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
* -1 - critical hash insertion error
* -2 - access not permitted by subpage protection mechanism
*/
-int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap)
+int hash_page_mm(struct mm_struct *mm, unsigned long ea,
+ unsigned long access, unsigned long trap,
+ unsigned long flags)
{
enum ctx_state prev_state = exception_enter();
pgd_t *pgdir;
@@ -997,7 +999,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
pte_t *ptep;
unsigned hugeshift;
const struct cpumask *tmp;
- int rc, user_region = 0, local = 0;
+ int rc, user_region = 0;
int psize, ssize;
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
@@ -1049,7 +1051,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
/* Check CPU locality */
tmp = cpumask_of(smp_processor_id());
if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
- local = 1;
+ flags |= HPTE_LOCAL_UPDATE;
#ifndef CONFIG_PPC_64K_PAGES
/* If we use 4K pages and our psize is not 4K, then we might
@@ -1086,11 +1088,11 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
if (hugeshift) {
if (pmd_trans_huge(*(pmd_t *)ptep))
rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
- trap, local, ssize, psize);
+ trap, flags, ssize, psize);
#ifdef CONFIG_HUGETLB_PAGE
else
rc = __hash_page_huge(ea, access, vsid, ptep, trap,
- local, ssize, hugeshift, psize);
+ flags, ssize, hugeshift, psize);
#else
else {
/*
@@ -1149,7 +1151,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
#ifdef CONFIG_PPC_HAS_HASH_64K
if (psize == MMU_PAGE_64K)
- rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
+ rc = __hash_page_64K(ea, access, vsid, ptep, trap,
+ flags, ssize);
else
#endif /* CONFIG_PPC_HAS_HASH_64K */
{
@@ -1158,7 +1161,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
rc = -2;
else
rc = __hash_page_4K(ea, access, vsid, ptep, trap,
- local, ssize, spp);
+ flags, ssize, spp);
}
/* Dump some info in case of hash insertion failure, they should
@@ -1181,14 +1184,19 @@ bail:
}
EXPORT_SYMBOL_GPL(hash_page_mm);
-int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
+ unsigned long dsisr)
{
+ unsigned long flags = 0;
struct mm_struct *mm = current->mm;
if (REGION_ID(ea) == VMALLOC_REGION_ID)
mm = &init_mm;
- return hash_page_mm(mm, ea, access, trap);
+ if (dsisr & DSISR_NOHPTE)
+ flags |= HPTE_NOHPTE_UPDATE;
+
+ return hash_page_mm(mm, ea, access, trap, flags);
}
EXPORT_SYMBOL_GPL(hash_page);
@@ -1200,7 +1208,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
pgd_t *pgdir;
pte_t *ptep;
unsigned long flags;
- int rc, ssize, local = 0;
+ int rc, ssize, update_flags = 0;
BUG_ON(REGION_ID(ea) != USER_REGION_ID);
@@ -1251,16 +1259,17 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Is that local to this CPU ? */
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- local = 1;
+ update_flags |= HPTE_LOCAL_UPDATE;
/* Hash it in */
#ifdef CONFIG_PPC_HAS_HASH_64K
if (mm->context.user_psize == MMU_PAGE_64K)
- rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
+ rc = __hash_page_64K(ea, access, vsid, ptep, trap,
+ update_flags, ssize);
else
#endif /* CONFIG_PPC_HAS_HASH_64K */
- rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
- subpage_protection(mm, ea));
+ rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
+ ssize, subpage_protection(mm, ea));
/* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
@@ -1278,9 +1287,10 @@ out_exit:
* do not forget to update the assembly call site !
*/
void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
- int local)
+ unsigned long flags)
{
unsigned long hash, index, shift, hidx, slot;
+ int local = flags & HPTE_LOCAL_UPDATE;
DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
@@ -1315,6 +1325,78 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
#endif
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
+ pmd_t *pmdp, unsigned int psize, int ssize,
+ unsigned long flags)
+{
+ int i, max_hpte_count, valid;
+ unsigned long s_addr;
+ unsigned char *hpte_slot_array;
+ unsigned long hidx, shift, vpn, hash, slot;
+ int local = flags & HPTE_LOCAL_UPDATE;
+
+ s_addr = addr & HPAGE_PMD_MASK;
+ hpte_slot_array = get_hpte_slot_array(pmdp);
+ /*
+ * IF we try to do a HUGE PTE update after a withdraw is done.
+ * we will find the below NULL. This happens when we do
+ * split_huge_page_pmd
+ */
+ if (!hpte_slot_array)
+ return;
+
+ if (ppc_md.hugepage_invalidate) {
+ ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
+ psize, ssize, local);
+ goto tm_abort;
+ }
+ /*
+ * No bluk hpte removal support, invalidate each entry
+ */
+ shift = mmu_psize_defs[psize].shift;
+ max_hpte_count = HPAGE_PMD_SIZE >> shift;
+ for (i = 0; i < max_hpte_count; i++) {
+ /*
+ * 8 bits per each hpte entries
+ * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
+ */
+ valid = hpte_valid(hpte_slot_array, i);
+ if (!valid)
+ continue;
+ hidx = hpte_hash_index(hpte_slot_array, i);
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
+ hash = ~hash;
+
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot += hidx & _PTEIDX_GROUP_IX;
+ ppc_md.hpte_invalidate(slot, vpn, psize,
+ MMU_PAGE_16M, ssize, local);
+ }
+tm_abort:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Transactions are not aborted by tlbiel, only tlbie.
+ * Without, syncing a page back to a block device w/ PIO could pick up
+ * transactional data (bad!) so we force an abort here. Before the
+ * sync the page will be made read-only, which will flush_hash_page.
+ * BIG ISSUE here: if the kernel uses a page from userspace without
+ * unmapping it first, it may see the speculated version.
+ */
+ if (local && cpu_has_feature(CPU_FTR_TM) &&
+ current->thread.regs &&
+ MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ tm_enable();
+ tm_abort(TM_CAUSE_TLBI);
+ }
+#endif
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
void flush_hash_range(unsigned long number, int local)
{
if (ppc_md.flush_hash_range)
@@ -1322,7 +1404,7 @@ void flush_hash_range(unsigned long number, int local)
else {
int i;
struct ppc64_tlb_batch *batch =
- &__get_cpu_var(ppc64_tlb_batch);
+ this_cpu_ptr(&ppc64_tlb_batch);
for (i = 0; i < number; i++)
flush_hash_page(batch->vpn[i], batch->pte[i],
@@ -1432,7 +1514,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 5f5e6328c21c..86686514ae13 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -18,60 +18,9 @@
#include <linux/mm.h>
#include <asm/machdep.h>
-static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
- pmd_t *pmdp, unsigned int psize, int ssize)
-{
- int i, max_hpte_count, valid;
- unsigned long s_addr;
- unsigned char *hpte_slot_array;
- unsigned long hidx, shift, vpn, hash, slot;
-
- s_addr = addr & HPAGE_PMD_MASK;
- hpte_slot_array = get_hpte_slot_array(pmdp);
- /*
- * IF we try to do a HUGE PTE update after a withdraw is done.
- * we will find the below NULL. This happens when we do
- * split_huge_page_pmd
- */
- if (!hpte_slot_array)
- return;
-
- if (ppc_md.hugepage_invalidate)
- return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
- psize, ssize);
- /*
- * No bluk hpte removal support, invalidate each entry
- */
- shift = mmu_psize_defs[psize].shift;
- max_hpte_count = HPAGE_PMD_SIZE >> shift;
- for (i = 0; i < max_hpte_count; i++) {
- /*
- * 8 bits per each hpte entries
- * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
- */
- valid = hpte_valid(hpte_slot_array, i);
- if (!valid)
- continue;
- hidx = hpte_hash_index(hpte_slot_array, i);
-
- /* get the vpn */
- addr = s_addr + (i * (1ul << shift));
- vpn = hpt_vpn(addr, vsid, ssize);
- hash = hpt_hash(vpn, shift, ssize);
- if (hidx & _PTEIDX_SECONDARY)
- hash = ~hash;
-
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, vpn, psize,
- MMU_PAGE_16M, ssize, 0);
- }
-}
-
-
int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
- pmd_t *pmdp, unsigned long trap, int local, int ssize,
- unsigned int psize)
+ pmd_t *pmdp, unsigned long trap, unsigned long flags,
+ int ssize, unsigned int psize)
{
unsigned int index, valid;
unsigned char *hpte_slot_array;
@@ -145,7 +94,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* hash page table entries.
*/
if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
- invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
+ flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
+ ssize, flags);
}
valid = hpte_valid(hpte_slot_array, index);
@@ -158,7 +108,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
slot += hidx & _PTEIDX_GROUP_IX;
ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
- psize, lpsize, ssize, local);
+ psize, lpsize, ssize, flags);
/*
* We failed to update, try to insert a new entry.
*/
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 5e4ee2573903..ba47aaf33a4b 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -33,13 +33,13 @@ static inline int tlb1_next(void)
ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
- index = __get_cpu_var(next_tlbcam_idx);
+ index = this_cpu_read(next_tlbcam_idx);
/* Just round-robin the entries and wrap when we hit the end */
if (unlikely(index == ncams - 1))
- __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
+ __this_cpu_write(next_tlbcam_idx, tlbcam_index);
else
- __get_cpu_var(next_tlbcam_idx)++;
+ __this_cpu_inc(next_tlbcam_idx);
return index;
}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index a5bcf9301196..d94b1af53a93 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -19,8 +19,8 @@ extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long vflags, int psize, int ssize);
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
- pte_t *ptep, unsigned long trap, int local, int ssize,
- unsigned int shift, unsigned int mmu_psize)
+ pte_t *ptep, unsigned long trap, unsigned long flags,
+ int ssize, unsigned int shift, unsigned int mmu_psize)
{
unsigned long vpn;
unsigned long old_pte, new_pte;
@@ -81,7 +81,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
slot += (old_pte & _PAGE_F_GIX) >> 12;
if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
- mmu_psize, ssize, local) == -1)
+ mmu_psize, ssize, flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6a4a5fcb9730..5ff4e07d920a 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -62,6 +62,9 @@ static unsigned nr_gpages;
/*
* We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
* 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
+ *
+ * Defined in such a way that we can optimize away code block at build time
+ * if CONFIG_HUGETLB_PAGE=n.
*/
int pmd_huge(pmd_t pmd)
{
@@ -230,7 +233,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
return NULL;
- return hugepte_offset(hpdp, addr, pdshift);
+ return hugepte_offset(*hpdp, addr, pdshift);
}
#else
@@ -270,13 +273,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
return NULL;
- return hugepte_offset(hpdp, addr, pdshift);
+ return hugepte_offset(*hpdp, addr, pdshift);
}
#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
/* Build list of addresses of gigantic pages. This function is used in early
- * boot before the buddy or bootmem allocator is setup.
+ * boot before the buddy allocator is setup.
*/
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
@@ -312,7 +315,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
* If gpages can be in highmem we can't use the trick of storing the
* data structure in the page; allocate space for this
*/
- m = alloc_bootmem(sizeof(struct huge_bootmem_page));
+ m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
#else
m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
@@ -352,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val,
if (size != 0) {
if (sscanf(val, "%lu", &npages) <= 0)
npages = 0;
+ if (npages > MAX_NUMBER_GPAGES) {
+ pr_warn("MMU: %lu pages requested for page "
+ "size %llu KB, limiting to "
+ __stringify(MAX_NUMBER_GPAGES) "\n",
+ npages, size / 1024);
+ npages = MAX_NUMBER_GPAGES;
+ }
gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
size = 0;
}
@@ -399,7 +409,7 @@ void __init reserve_hugetlb_gpages(void)
#else /* !PPC_FSL_BOOK3E */
/* Build list of addresses of gigantic pages. This function is used in early
- * boot before the buddy or bootmem allocator is setup.
+ * boot before the buddy allocator is setup.
*/
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
@@ -462,7 +472,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
{
struct hugepd_freelist **batchp;
- batchp = &get_cpu_var(hugepd_freelist_cur);
+ batchp = this_cpu_ptr(&hugepd_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpumask_equal(mm_cpumask(tlb->mm),
@@ -536,7 +546,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
do {
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
- if (!is_hugepd(pmd)) {
+ if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
/*
* if it is not hugepd pointer, we should already find
* it cleared.
@@ -585,7 +595,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
do {
pud = pud_offset(pgd, addr);
next = pud_addr_end(addr, end);
- if (!is_hugepd(pud)) {
+ if (!is_hugepd(__hugepd(pud_val(*pud)))) {
if (pud_none_or_clear_bad(pud))
continue;
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
@@ -651,7 +661,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
do {
next = pgd_addr_end(addr, end);
pgd = pgd_offset(tlb->mm, addr);
- if (!is_hugepd(pgd)) {
+ if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
if (pgd_none_or_clear_bad(pgd))
continue;
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
@@ -711,12 +721,11 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
return (__boundary - 1 < end - 1) ? __boundary : end;
}
-int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
- unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
+int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
+ unsigned long end, int write, struct page **pages, int *nr)
{
pte_t *ptep;
- unsigned long sz = 1UL << hugepd_shift(*hugepd);
+ unsigned long sz = 1UL << hugepd_shift(hugepd);
unsigned long next;
ptep = hugepte_offset(hugepd, addr, pdshift);
@@ -959,7 +968,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
else if (pgd_huge(pgd)) {
ret_pte = (pte_t *) pgdp;
goto out;
- } else if (is_hugepd(&pgd))
+ } else if (is_hugepd(__hugepd(pgd_val(pgd))))
hpdp = (hugepd_t *)&pgd;
else {
/*
@@ -976,7 +985,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
else if (pud_huge(pud)) {
ret_pte = (pte_t *) pudp;
goto out;
- } else if (is_hugepd(&pud))
+ } else if (is_hugepd(__hugepd(pud_val(pud))))
hpdp = (hugepd_t *)&pud;
else {
pdshift = PMD_SHIFT;
@@ -997,7 +1006,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
if (pmd_huge(pmd) || pmd_large(pmd)) {
ret_pte = (pte_t *) pmdp;
goto out;
- } else if (is_hugepd(&pmd))
+ } else if (is_hugepd(__hugepd(pmd_val(pmd))))
hpdp = (hugepd_t *)&pmd;
else
return pte_offset_kernel(&pmd, ea);
@@ -1006,7 +1015,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
if (!hpdp)
return NULL;
- ret_pte = hugepte_offset(hpdp, ea, pdshift);
+ ret_pte = hugepte_offset(*hpdp, ea, pdshift);
pdshift = hugepd_shift(*hpdp);
out:
if (shift)
@@ -1036,14 +1045,6 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
if ((pte_val(pte) & mask) != mask)
return 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- /*
- * check for splitting here
- */
- if (pmd_trans_splitting(pte_pmd(pte)))
- return 0;
-#endif
-
/* hugepages are never "special" */
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 415a51b028b9..a10be665b645 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -26,7 +26,6 @@
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
@@ -195,15 +194,6 @@ void __init MMU_init(void)
memblock_set_current_limit(lowmem_end_addr);
}
-/* This is only called until mem_init is done. */
-void __init *early_get_page(void)
-{
- if (init_bootmem_done)
- return alloc_bootmem_pages(PAGE_SIZE);
- else
- return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
-}
-
#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3481556a1880..10471f9bb63f 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -34,7 +34,6 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/nodemask.h>
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8ebaac75c940..b7285a5870f8 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -35,6 +35,7 @@
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
@@ -60,7 +61,6 @@
#define CPU_FTR_NOEXECUTE 0
#endif
-int init_bootmem_done;
int mem_init_done;
unsigned long long memory_limit;
@@ -144,8 +144,17 @@ int arch_remove_memory(u64 start, u64 size)
zone = page_zone(pfn_to_page(start_pfn));
ret = __remove_pages(zone, start_pfn, nr_pages);
- if (!ret && (ppc_md.remove_memory))
- ret = ppc_md.remove_memory(start, size);
+ if (ret)
+ return ret;
+
+ /* Remove htab bolted mappings for this section of memory */
+ start = (unsigned long)__va(start);
+ ret = remove_section_mapping(start, start + size);
+
+ /* Ensure all vmalloc mappings are flushed in case they also
+ * hit that section of memory
+ */
+ vm_unmap_aliases();
return ret;
}
@@ -180,70 +189,23 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
}
EXPORT_SYMBOL_GPL(walk_system_ram_range);
-/*
- * Initialize the bootmem system and give it all the memory we
- * have available. If we are using highmem, we only put the
- * lowmem into the bootmem system.
- */
#ifndef CONFIG_NEED_MULTIPLE_NODES
-void __init do_init_bootmem(void)
+void __init initmem_init(void)
{
- unsigned long start, bootmap_pages;
- unsigned long total_pages;
- struct memblock_region *reg;
- int boot_mapsize;
-
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
- total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
+ min_low_pfn = MEMORY_START >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
- total_pages = total_lowmem >> PAGE_SHIFT;
max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
#endif
- /*
- * Find an area to use for the bootmem bitmap. Calculate the size of
- * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
- * Add 1 additional page in case the address isn't page-aligned.
- */
- bootmap_pages = bootmem_bootmap_pages(total_pages);
-
- start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
-
- min_low_pfn = MEMORY_START >> PAGE_SHIFT;
- boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
-
/* Place all memblock_regions in the same node and merge contiguous
* memblock_regions
*/
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
- /* Add all physical memory to the bootmem map, mark each area
- * present.
- */
-#ifdef CONFIG_HIGHMEM
- free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
-
- /* reserve the sections we're already using */
- for_each_memblock(reserved, reg) {
- unsigned long top = reg->base + reg->size - 1;
- if (top < lowmem_end_addr)
- reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
- else if (reg->base < lowmem_end_addr) {
- unsigned long trunc_size = lowmem_end_addr - reg->base;
- reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
- }
- }
-#else
- free_bootmem_with_active_regions(0, max_pfn);
-
- /* reserve the sections we're already using */
- for_each_memblock(reserved, reg)
- reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
-#endif
/* XXX need to clip this if using highmem? */
sparse_memory_present_with_active_regions(0);
-
- init_bootmem_done = 1;
+ sparse_init();
}
/* mark pages that don't exist as nosave */
@@ -359,14 +321,6 @@ void __init paging_init(void)
mark_nonram_nosave();
}
-static void __init register_page_bootmem_info(void)
-{
- int i;
-
- for_each_online_node(i)
- register_page_bootmem_info_node(NODE_DATA(i));
-}
-
void __init mem_init(void)
{
/*
@@ -379,7 +333,6 @@ void __init mem_init(void)
swiotlb_init(0);
#endif
- register_page_bootmem_info();
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
set_max_mapnr(max_pfn);
free_all_bootmem();
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 928ebe79668b..9cba6cba2e50 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -421,12 +421,12 @@ void __init mmu_context_init(void)
/*
* Allocate the maps used by context management
*/
- context_map = alloc_bootmem(CTX_MAP_SIZE);
- context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
+ context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0);
+ context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0);
#ifndef CONFIG_SMP
- stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
+ stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
#else
- stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE);
+ stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
register_cpu_notifier(&mmu_context_cpu_nb);
#endif
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 9615d82919b8..78c45f392f5b 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -67,7 +67,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
{
__tlbil_va(address, pid);
}
-#endif /* CONIFG_8xx */
+#endif /* CONFIG_8xx */
#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b9d1dfdbe5bb..0257a7d659ef 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -134,28 +134,6 @@ static int __init fake_numa_create_new_node(unsigned long end_pfn,
return 0;
}
-/*
- * get_node_active_region - Return active region containing pfn
- * Active range returned is empty if none found.
- * @pfn: The page to return the region for
- * @node_ar: Returned set to the active region containing @pfn
- */
-static void __init get_node_active_region(unsigned long pfn,
- struct node_active_region *node_ar)
-{
- unsigned long start_pfn, end_pfn;
- int i, nid;
-
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
- if (pfn >= start_pfn && pfn < end_pfn) {
- node_ar->nid = nid;
- node_ar->start_pfn = start_pfn;
- node_ar->end_pfn = end_pfn;
- break;
- }
- }
-}
-
static void reset_numa_cpu_lookup_table(void)
{
unsigned int cpu;
@@ -928,134 +906,48 @@ static void __init dump_numa_memory_topology(void)
}
}
-/*
- * Allocate some memory, satisfying the memblock or bootmem allocator where
- * required. nid is the preferred node and end is the physical address of
- * the highest address in the node.
- *
- * Returns the virtual address of the memory.
- */
-static void __init *careful_zallocation(int nid, unsigned long size,
- unsigned long align,
- unsigned long end_pfn)
-{
- void *ret;
- int new_nid;
- unsigned long ret_paddr;
-
- ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
-
- /* retry over all memory */
- if (!ret_paddr)
- ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
-
- if (!ret_paddr)
- panic("numa.c: cannot allocate %lu bytes for node %d",
- size, nid);
-
- ret = __va(ret_paddr);
-
- /*
- * We initialize the nodes in numeric order: 0, 1, 2...
- * and hand over control from the MEMBLOCK allocator to the
- * bootmem allocator. If this function is called for
- * node 5, then we know that all nodes <5 are using the
- * bootmem allocator instead of the MEMBLOCK allocator.
- *
- * So, check the nid from which this allocation came
- * and double check to see if we need to use bootmem
- * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
- * since it would be useless.
- */
- new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
- if (new_nid < nid) {
- ret = __alloc_bootmem_node(NODE_DATA(new_nid),
- size, align, 0);
-
- dbg("alloc_bootmem %p %lx\n", ret, size);
- }
-
- memset(ret, 0, size);
- return ret;
-}
-
static struct notifier_block ppc64_numa_nb = {
.notifier_call = cpu_numa_callback,
.priority = 1 /* Must run before sched domains notifier. */
};
-static void __init mark_reserved_regions_for_nid(int nid)
+/* Initialize NODE_DATA for a node on the local memory */
+static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
- struct pglist_data *node = NODE_DATA(nid);
- struct memblock_region *reg;
-
- for_each_memblock(reserved, reg) {
- unsigned long physbase = reg->base;
- unsigned long size = reg->size;
- unsigned long start_pfn = physbase >> PAGE_SHIFT;
- unsigned long end_pfn = PFN_UP(physbase + size);
- struct node_active_region node_ar;
- unsigned long node_end_pfn = pgdat_end_pfn(node);
-
- /*
- * Check to make sure that this memblock.reserved area is
- * within the bounds of the node that we care about.
- * Checking the nid of the start and end points is not
- * sufficient because the reserved area could span the
- * entire node.
- */
- if (end_pfn <= node->node_start_pfn ||
- start_pfn >= node_end_pfn)
- continue;
-
- get_node_active_region(start_pfn, &node_ar);
- while (start_pfn < end_pfn &&
- node_ar.start_pfn < node_ar.end_pfn) {
- unsigned long reserve_size = size;
- /*
- * if reserved region extends past active region
- * then trim size to active region
- */
- if (end_pfn > node_ar.end_pfn)
- reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
- - physbase;
- /*
- * Only worry about *this* node, others may not
- * yet have valid NODE_DATA().
- */
- if (node_ar.nid == nid) {
- dbg("reserve_bootmem %lx %lx nid=%d\n",
- physbase, reserve_size, node_ar.nid);
- reserve_bootmem_node(NODE_DATA(node_ar.nid),
- physbase, reserve_size,
- BOOTMEM_DEFAULT);
- }
- /*
- * if reserved region is contained in the active region
- * then done.
- */
- if (end_pfn <= node_ar.end_pfn)
- break;
-
- /*
- * reserved region extends past the active region
- * get next active region that contains this
- * reserved region
- */
- start_pfn = node_ar.end_pfn;
- physbase = start_pfn << PAGE_SHIFT;
- size = size - reserve_size;
- get_node_active_region(start_pfn, &node_ar);
- }
- }
+ u64 spanned_pages = end_pfn - start_pfn;
+ const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
+ u64 nd_pa;
+ void *nd;
+ int tnid;
+
+ if (spanned_pages)
+ pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start_pfn << PAGE_SHIFT,
+ (end_pfn << PAGE_SHIFT) - 1);
+ else
+ pr_info("Initmem setup node %d\n", nid);
+
+ nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd = __va(nd_pa);
+
+ /* report and initialize */
+ pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
+ nd_pa, nd_pa + nd_size - 1);
+ tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+ if (tnid != nid)
+ pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
+
+ node_data[nid] = nd;
+ memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
+ NODE_DATA(nid)->node_id = nid;
+ NODE_DATA(nid)->node_start_pfn = start_pfn;
+ NODE_DATA(nid)->node_spanned_pages = spanned_pages;
}
-
-void __init do_init_bootmem(void)
+void __init initmem_init(void)
{
int nid, cpu;
- min_low_pfn = 0;
max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
max_pfn = max_low_pfn;
@@ -1064,64 +956,18 @@ void __init do_init_bootmem(void)
else
dump_numa_memory_topology();
+ memblock_dump_all();
+
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
- void *bootmem_vaddr;
- unsigned long bootmap_pages;
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
-
- /*
- * Allocate the node structure node local if possible
- *
- * Be careful moving this around, as it relies on all
- * previous nodes' bootmem to be initialized and have
- * all reserved areas marked.
- */
- NODE_DATA(nid) = careful_zallocation(nid,
- sizeof(struct pglist_data),
- SMP_CACHE_BYTES, end_pfn);
-
- dbg("node %d\n", nid);
- dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
-
- NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
- NODE_DATA(nid)->node_start_pfn = start_pfn;
- NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
-
- if (NODE_DATA(nid)->node_spanned_pages == 0)
- continue;
-
- dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
- dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
-
- bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
- bootmem_vaddr = careful_zallocation(nid,
- bootmap_pages << PAGE_SHIFT,
- PAGE_SIZE, end_pfn);
-
- dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
-
- init_bootmem_node(NODE_DATA(nid),
- __pa(bootmem_vaddr) >> PAGE_SHIFT,
- start_pfn, end_pfn);
-
- free_bootmem_with_active_regions(nid, end_pfn);
- /*
- * Be very careful about moving this around. Future
- * calls to careful_zallocation() depend on this getting
- * done correctly.
- */
- mark_reserved_regions_for_nid(nid);
+ setup_node_data(nid, start_pfn, end_pfn);
sparse_memory_present_with_active_regions(nid);
}
- init_bootmem_done = 1;
+ sparse_init();
- /*
- * Now bootmem is initialised we can create the node to cpumask
- * lookup tables and setup the cpu callback to populate them.
- */
setup_node_to_cpumask_map();
reset_numa_cpu_lookup_table();
@@ -1711,12 +1557,11 @@ static void stage_topology_update(int core_id)
static int dt_update_callback(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct of_prop_reconfig *update;
+ struct of_reconfig_data *update = data;
int rc = NOTIFY_DONE;
switch (action) {
case OF_RECONFIG_UPDATE_PROPERTY:
- update = (struct of_prop_reconfig *)data;
if (!of_prop_cmp(update->dn->type, "cpu") &&
!of_prop_cmp(update->prop->name, "ibm,associativity")) {
u32 core_id;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index cf11342bf519..50fad3801f30 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -100,12 +100,11 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
{
pte_t *pte;
extern int mem_init_done;
- extern void *early_get_page(void);
if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
} else {
- pte = (pte_t *)early_get_page();
+ pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte)
clear_page(pte);
}
@@ -430,7 +429,7 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
}
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c8d709ab489d..4fe5f64cc179 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -33,9 +33,9 @@
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -51,6 +51,7 @@
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/firmware.h>
+#include <asm/dma.h>
#include "mmu_decl.h"
@@ -75,11 +76,7 @@ static __ref void *early_alloc_pgtable(unsigned long size)
{
void *pt;
- if (init_bootmem_done)
- pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
- else
- pt = __va(memblock_alloc_base(size, size,
- __pa(MAX_DMA_ADDRESS)));
+ pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
memset(pt, 0, size);
return pt;
@@ -113,10 +110,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
__pgprot(flags)));
} else {
#ifdef CONFIG_PPC_MMU_NOHASH
- /* Warning ! This will blow up if bootmem is not initialized
- * which our ppc64 code is keen to do that, we'll need to
- * fix it and/or be more careful
- */
pgdp = pgd_offset_k(ea);
#ifdef PUD_TABLE_SIZE
if (pgd_none(*pgdp)) {
@@ -352,16 +345,31 @@ EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__iounmap);
EXPORT_SYMBOL(__iounmap_at);
+#ifndef __PAGETABLE_PUD_FOLDED
+/* 4 level page table */
+struct page *pgd_page(pgd_t pgd)
+{
+ if (pgd_huge(pgd))
+ return pte_page(pgd_pte(pgd));
+ return virt_to_page(pgd_page_vaddr(pgd));
+}
+#endif
+
+struct page *pud_page(pud_t pud)
+{
+ if (pud_huge(pud))
+ return pte_page(pud_pte(pud));
+ return virt_to_page(pud_page_vaddr(pud));
+}
+
/*
* For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
* For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
*/
struct page *pmd_page(pmd_t pmd)
{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pmd_trans_huge(pmd))
+ if (pmd_trans_huge(pmd) || pmd_huge(pmd))
return pfn_to_page(pmd_pfn(pmd));
-#endif
return virt_to_page(pmd_page_vaddr(pmd));
}
@@ -731,29 +739,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long old_pmd)
{
- int ssize, i;
- unsigned long s_addr;
- int max_hpte_count;
- unsigned int psize, valid;
- unsigned char *hpte_slot_array;
- unsigned long hidx, vpn, vsid, hash, shift, slot;
-
- /*
- * Flush all the hptes mapping this hugepage
- */
- s_addr = addr & HPAGE_PMD_MASK;
- hpte_slot_array = get_hpte_slot_array(pmdp);
- /*
- * IF we try to do a HUGE PTE update after a withdraw is done.
- * we will find the below NULL. This happens when we do
- * split_huge_page_pmd
- */
- if (!hpte_slot_array)
- return;
+ int ssize;
+ unsigned int psize;
+ unsigned long vsid;
+ unsigned long flags = 0;
+ const struct cpumask *tmp;
/* get the base page size,vsid and segment size */
#ifdef CONFIG_DEBUG_VM
- psize = get_slice_psize(mm, s_addr);
+ psize = get_slice_psize(mm, addr);
BUG_ON(psize == MMU_PAGE_16M);
#endif
if (old_pmd & _PAGE_COMBO)
@@ -761,46 +755,20 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
else
psize = MMU_PAGE_64K;
- if (!is_kernel_addr(s_addr)) {
- ssize = user_segment_size(s_addr);
- vsid = get_vsid(mm->context.id, s_addr, ssize);
+ if (!is_kernel_addr(addr)) {
+ ssize = user_segment_size(addr);
+ vsid = get_vsid(mm->context.id, addr, ssize);
WARN_ON(vsid == 0);
} else {
- vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
+ vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
- if (ppc_md.hugepage_invalidate)
- return ppc_md.hugepage_invalidate(vsid, s_addr,
- hpte_slot_array,
- psize, ssize);
- /*
- * No bluk hpte removal support, invalidate each entry
- */
- shift = mmu_psize_defs[psize].shift;
- max_hpte_count = HPAGE_PMD_SIZE >> shift;
- for (i = 0; i < max_hpte_count; i++) {
- /*
- * 8 bits per each hpte entries
- * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
- */
- valid = hpte_valid(hpte_slot_array, i);
- if (!valid)
- continue;
- hidx = hpte_hash_index(hpte_slot_array, i);
-
- /* get the vpn */
- addr = s_addr + (i * (1ul << shift));
- vpn = hpt_vpn(addr, vsid, ssize);
- hash = hpt_hash(vpn, shift, ssize);
- if (hidx & _PTEIDX_SECONDARY)
- hash = ~hash;
-
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, vpn, psize,
- MMU_PAGE_16M, ssize, 0);
- }
+ tmp = cpumask_of(smp_processor_id());
+ if (cpumask_equal(mm_cpumask(mm), tmp))
+ flags |= HPTE_LOCAL_UPDATE;
+
+ return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
}
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 9aee27c582dc..c406aa95b2bc 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -87,6 +87,9 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
+
+#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
+ ___PPC_RA(base) | IMM_L(i))
#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
@@ -96,6 +99,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
___PPC_RA(base) | ___PPC_RB(b))
/* Convenience helpers for the above with 'far' offsets: */
+#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LBZ(r, r, IMM_L(i)); } } while(0)
+
#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
else { PPC_ADDIS(r, base, IMM_HA(i)); \
PPC_LD(r, r, IMM_L(i)); } } while(0)
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index cbae2dfd053c..1ca125b9c226 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -181,6 +181,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
}
break;
case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
+ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
ctx->seen |= SEEN_XREG;
PPC_CMPWI(r_X, 0);
if (ctx->pc_ret0 != -1) {
@@ -190,9 +191,13 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_LI(r_ret, 0);
PPC_JMP(exit_addr);
}
- PPC_DIVWU(r_scratch1, r_A, r_X);
- PPC_MUL(r_scratch1, r_X, r_scratch1);
- PPC_SUB(r_A, r_A, r_scratch1);
+ if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
+ PPC_DIVWU(r_scratch1, r_A, r_X);
+ PPC_MUL(r_scratch1, r_X, r_scratch1);
+ PPC_SUB(r_A, r_A, r_scratch1);
+ } else {
+ PPC_DIVWU(r_A, r_A, r_X);
+ }
break;
case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
PPC_LI32(r_scratch2, K);
@@ -200,22 +205,6 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1);
break;
- case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
- ctx->seen |= SEEN_XREG;
- PPC_CMPWI(r_X, 0);
- if (ctx->pc_ret0 != -1) {
- PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
- } else {
- /*
- * Exit, returning 0; first pass hits here
- * (longer worst-case code size).
- */
- PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
- PPC_LI(r_ret, 0);
- PPC_JMP(exit_addr);
- }
- PPC_DIVWU(r_A, r_A, r_X);
- break;
case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
if (K == 1)
break;
@@ -361,6 +350,11 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
protocol));
break;
case BPF_ANC | SKF_AD_IFINDEX:
+ case BPF_ANC | SKF_AD_HATYPE:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+ ifindex) != 4);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+ type) != 2);
PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
dev));
PPC_CMPDI(r_scratch1, 0);
@@ -368,14 +362,18 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
} else {
/* Exit, returning 0; first pass hits here. */
- PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
+ PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
PPC_LI(r_ret, 0);
PPC_JMP(exit_addr);
}
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
- ifindex) != 4);
- PPC_LWZ_OFFS(r_A, r_scratch1,
+ if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+ PPC_LWZ_OFFS(r_A, r_scratch1,
offsetof(struct net_device, ifindex));
+ } else {
+ PPC_LHZ_OFFS(r_A, r_scratch1,
+ offsetof(struct net_device, type));
+ }
+
break;
case BPF_ANC | SKF_AD_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
@@ -407,6 +405,11 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
queue_mapping));
break;
+ case BPF_ANC | SKF_AD_PKTTYPE:
+ PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
+ PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
+ PPC_SRWI(r_A, r_A, 5);
+ break;
case BPF_ANC | SKF_AD_CPU:
#ifdef CONFIG_SMP
/*
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index 6adf55fa5d88..ecc66d5f02c9 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -10,7 +10,7 @@
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/compat.h>
#include <asm/oprofile_impl.h>
@@ -105,6 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
first_frame = 0;
}
} else {
+ pagefault_disable();
#ifdef CONFIG_PPC64
if (!is_32bit_task()) {
while (depth--) {
@@ -113,7 +114,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
break;
first_frame = 0;
}
-
+ pagefault_enable();
return;
}
#endif
@@ -124,5 +125,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
break;
first_frame = 0;
}
+ pagefault_enable();
}
}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index a6995d4e93d4..7c4f6690533a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void)
static void power_pmu_bhrb_enable(struct perf_event *event)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!ppmu->bhrb_nr)
return;
@@ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
static void power_pmu_bhrb_disable(struct perf_event *event)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!ppmu->bhrb_nr)
return;
@@ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu)
if (!ppmu)
return;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!cpuhw->disabled) {
/*
@@ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu)
return;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!cpuhw->disabled)
goto out;
@@ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
* Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
n0 = cpuhw->n_events;
if (n0 >= ppmu->n_counter)
goto out;
@@ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
power_pmu_read(event);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
for (i = 0; i < cpuhw->n_events; ++i) {
if (event == cpuhw->event[i]) {
while (++i < cpuhw->n_events) {
@@ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
*/
static void power_pmu_start_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
@@ -1589,7 +1589,7 @@ static void power_pmu_start_txn(struct pmu *pmu)
*/
static void power_pmu_cancel_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
@@ -1607,7 +1607,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
if (!ppmu)
return -EAGAIN;
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
@@ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
struct cpu_hw_events *cpuhw;
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
power_pmu_bhrb_read(cpuhw);
data.br_stack = &cpuhw->bhrb_stack;
}
@@ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val)
static void perf_event_interrupt(struct pt_regs *regs)
{
int i, j;
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
unsigned long val[8];
int found, active;
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index d35ae52c69dc..4acaea01fe03 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -210,7 +210,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu)
unsigned long flags;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
@@ -249,7 +249,7 @@ static void fsl_emb_pmu_enable(struct pmu *pmu)
unsigned long flags;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!cpuhw->disabled)
goto out;
@@ -653,7 +653,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 82f2da28cd27..d2ac1c116454 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -220,7 +220,6 @@ config AKEBONO
select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
select MMC_SDHCI
select MMC_SDHCI_PLTFM
- select MMC_SDHCI_OF_476GTR
select ATA
select SATA_AHCI_PLATFORM
help
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index 58db9d083969..c11ce6516c8f 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -94,7 +94,7 @@ static int avr_probe(struct i2c_client *client,
{
avr_i2c_client = client;
ppc_md.restart = avr_reset_system;
- ppc_md.power_off = avr_power_off_system;
+ pm_power_off = avr_power_off_system;
return 0;
}
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index e996e007bc44..711f3d352af7 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -18,7 +18,7 @@
#include <linux/irq.h>
#include <linux/of_platform.h>
#include <linux/fsl-diu-fb.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <sysdev/fsl_soc.h>
#include <asm/cacheflush.h>
@@ -297,14 +297,13 @@ static void __init mpc512x_setup_diu(void)
* and so negatively affect boot time. Instead we reserve the
* already configured frame buffer area so that it won't be
* destroyed. The starting address of the area to reserve and
- * also it's length is passed to reserve_bootmem(). It will be
+ * also it's length is passed to memblock_reserve(). It will be
* freed later on first open of fbdev, when splash image is not
* needed any more.
*/
if (diu_shared_fb.in_use) {
- ret = reserve_bootmem(diu_shared_fb.fb_phys,
- diu_shared_fb.fb_len,
- BOOTMEM_EXCLUSIVE);
+ ret = memblock_reserve(diu_shared_fb.fb_phys,
+ diu_shared_fb.fb_len);
if (ret) {
pr_err("%s: reserve bootmem failed\n", __func__);
diu_shared_fb.in_use = false;
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 3feffde9128d..6af651e69129 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -212,6 +212,8 @@ static int __init efika_probe(void)
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
+ pm_power_off = rtas_power_off;
+
return 1;
}
@@ -225,7 +227,6 @@ define_machine(efika)
.init_IRQ = mpc52xx_init_irq,
.get_irq = mpc52xx_get_irq,
.restart = rtas_restart,
- .power_off = rtas_power_off,
.halt = rtas_halt,
.set_rtc_time = rtas_set_rtc_time,
.get_rtc_time = rtas_get_rtc_time,
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 692998244d2c..c949ca055712 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -782,7 +782,6 @@ static const struct of_device_id mpc52xx_gpt_match[] = {
static struct platform_driver mpc52xx_gpt_driver = {
.driver = {
.name = "mpc52xx-gpt",
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_gpt_match,
},
.probe = mpc52xx_gpt_probe,
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index f8f0081759fb..251dcb90ef34 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -572,7 +572,6 @@ static const struct of_device_id mpc52xx_lpbfifo_match[] = {
static struct platform_driver mpc52xx_lpbfifo_driver = {
.driver = {
.name = "mpc52xx-lpbfifo",
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_lpbfifo_match,
},
.probe = mpc52xx_lpbfifo_probe,
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index 3d0c3a01143d..a0cb8bd41958 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -169,7 +169,6 @@ static const struct of_device_id ep8248e_mdio_match[] = {
static struct platform_driver ep8248e_mdio_driver = {
.driver = {
.name = "ep8248e-mdio-bitbang",
- .owner = THIS_MODULE,
.of_match_table = ep8248e_mdio_match,
},
.probe = ep8248e_mdio_probe,
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 463fa91ee5b6..15e8021ddef9 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -167,10 +167,10 @@ static int mcu_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (ret)
goto err;
- /* XXX: this is potentially racy, but there is no lock for ppc_md */
- if (!ppc_md.power_off) {
+ /* XXX: this is potentially racy, but there is no lock for pm_power_off */
+ if (!pm_power_off) {
glob_mcu = mcu;
- ppc_md.power_off = mcu_power_off;
+ pm_power_off = mcu_power_off;
dev_info(&client->dev, "will provide power-off service\n");
}
@@ -197,7 +197,7 @@ static int mcu_remove(struct i2c_client *client)
device_remove_file(&client->dev, &dev_attr_status);
if (glob_mcu == mcu) {
- ppc_md.power_off = NULL;
+ pm_power_off = NULL;
glob_mcu = NULL;
}
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index eeb80e25214d..c9adbfb65006 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -435,7 +435,6 @@ static const struct of_device_id pmc_match[] = {
static struct platform_driver pmc_driver = {
.driver = {
.name = "mpc83xx-pmc",
- .owner = THIS_MODULE,
.of_match_table = pmc_match,
},
.probe = pmc_probe,
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index e56b89a792ed..1f309ccb096e 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -170,7 +170,7 @@ static int __init corenet_generic_probe(void)
ppc_md.get_irq = ehv_pic_get_irq;
ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
+ pm_power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
#ifdef CONFIG_SMP
/*
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 8162b0412117..79fd0dfd4b82 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -120,7 +120,7 @@ static int gpio_halt_probe(struct platform_device *pdev)
/* Register our halt function */
ppc_md.halt = gpio_halt_cb;
- ppc_md.power_off = gpio_halt_cb;
+ pm_power_off = gpio_halt_cb;
printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d"
" irq).\n", gpio, trigger, irq);
@@ -137,7 +137,7 @@ static int gpio_halt_remove(struct platform_device *pdev)
free_irq(irq, halt_node);
ppc_md.halt = NULL;
- ppc_md.power_off = NULL;
+ pm_power_off = NULL;
gpio_free(gpio);
@@ -161,7 +161,6 @@ MODULE_DEVICE_TABLE(of, gpio_halt_match);
static struct platform_driver gpio_halt_driver = {
.driver = {
.name = "gpio-halt",
- .owner = THIS_MODULE,
.of_match_table = gpio_halt_match,
},
.probe = gpio_halt_probe,
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index bd6f1a1cf922..157250426b56 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -1,6 +1,3 @@
-config FADS
- bool
-
config CPM1
bool
select CPM
@@ -13,7 +10,6 @@ choice
config MPC8XXFADS
bool "FADS"
- select FADS
config MPC86XADS
bool "MPC86XADS"
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 0883994df384..623bd961465a 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -437,7 +437,6 @@ static struct platform_driver axon_msi_driver = {
.shutdown = axon_msi_shutdown,
.driver = {
.name = "axon-msi",
- .owner = THIS_MODULE,
.of_match_table = axon_msi_device_id,
},
};
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index d4d245c0d787..bee9232fe619 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -186,7 +186,7 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
unsigned long newpp,
unsigned long vpn,
int psize, int apsize,
- int ssize, int local)
+ int ssize, unsigned long flags)
{
unsigned long lpar_rc;
u64 dummy0, dummy1;
@@ -369,7 +369,7 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
unsigned long newpp,
unsigned long vpn,
int psize, int apsize,
- int ssize, int local)
+ int ssize, unsigned long flags)
{
unsigned long lpar_rc;
unsigned long want_v;
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index 2b98a36ef8fb..3ce70ded2d6a 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -29,7 +29,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pci_regs.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -401,11 +401,11 @@ error:
} else {
if (config && *config) {
size = 256;
- free_bootmem(__pa(*config), size);
+ memblock_free(__pa(*config), size);
}
if (res && *res) {
size = sizeof(struct celleb_pci_resource);
- free_bootmem(__pa(*res), size);
+ memblock_free(__pa(*res), size);
}
}
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
index 844c0facb4f7..9438bbed402f 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_epci.c
@@ -25,7 +25,6 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/pci_regs.h>
-#include <linux/bootmem.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index 4278acfa2ede..f22387598040 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -25,7 +25,6 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index 34e8ce2976aa..90be8ec51686 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -142,6 +142,7 @@ static int __init celleb_probe_beat(void)
powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
| FW_FEATURE_BEAT | FW_FEATURE_LPAR;
hpte_init_beat_v3();
+ pm_power_off = beat_power_off;
return 1;
}
@@ -190,6 +191,7 @@ static int __init celleb_probe_native(void)
powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
hpte_init_native();
+ pm_power_off = rtas_power_off;
return 1;
}
@@ -204,7 +206,6 @@ define_machine(celleb_beat) {
.setup_arch = celleb_setup_arch_beat,
.show_cpuinfo = celleb_show_cpuinfo,
.restart = beat_restart,
- .power_off = beat_power_off,
.halt = beat_halt,
.get_rtc_time = beat_get_rtc_time,
.set_rtc_time = beat_set_rtc_time,
@@ -230,7 +231,6 @@ define_machine(celleb_native) {
.setup_arch = celleb_setup_arch_native,
.show_cpuinfo = celleb_show_cpuinfo,
.restart = rtas_restart,
- .power_off = rtas_power_off,
.halt = rtas_halt,
.get_boot_time = rtas_get_boot_time,
.get_rtc_time = rtas_get_rtc_time,
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 8a106b4172e0..4c11421847be 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -82,7 +82,7 @@ static void iic_unmask(struct irq_data *d)
static void iic_eoi(struct irq_data *d)
{
- struct iic *iic = &__get_cpu_var(cpu_iic);
+ struct iic *iic = this_cpu_ptr(&cpu_iic);
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
BUG_ON(iic->eoi_ptr < 0);
}
@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void)
struct iic *iic;
unsigned int virq;
- iic = &__get_cpu_var(cpu_iic);
+ iic = this_cpu_ptr(&cpu_iic);
*(unsigned long *) &pending =
in_be64((u64 __iomem *) &iic->regs->pending_destr);
if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
void iic_setup_cpu(void)
{
- out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
+ out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
}
u8 iic_get_target_id(int cpu)
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index 6e3409d590ac..d328140dc6f5 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -127,6 +127,7 @@ static int __init qpace_probe(void)
return 0;
hpte_init_native();
+ pm_power_off = rtas_power_off;
return 1;
}
@@ -137,7 +138,6 @@ define_machine(qpace) {
.setup_arch = qpace_setup_arch,
.show_cpuinfo = qpace_show_cpuinfo,
.restart = rtas_restart,
- .power_off = rtas_power_off,
.halt = rtas_halt,
.get_boot_time = rtas_get_boot_time,
.get_rtc_time = rtas_get_rtc_time,
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 6ae25fb62015..d62aa982d530 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -259,6 +259,7 @@ static int __init cell_probe(void)
return 0;
hpte_init_native();
+ pm_power_off = rtas_power_off;
return 1;
}
@@ -269,7 +270,6 @@ define_machine(cell) {
.setup_arch = cell_setup_arch,
.show_cpuinfo = cell_show_cpuinfo,
.restart = rtas_restart,
- .power_off = rtas_power_off,
.halt = rtas_halt,
.get_boot_time = rtas_get_boot_time,
.get_rtc_time = rtas_get_rtc_time,
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index ffcbd242e669..f7af74f83693 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -181,7 +181,8 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
return 0;
}
-extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
+extern int hash_page(unsigned long ea, unsigned long access,
+ unsigned long trap, unsigned long dsisr); //XXX
static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
{
int ret;
@@ -196,7 +197,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
(REGION_ID(ea) != USER_REGION_ID)) {
spin_unlock(&spu->register_lock);
- ret = hash_page(ea, _PAGE_PRESENT, 0x300);
+ ret = hash_page(ea, _PAGE_PRESENT, 0x300, dsisr);
spin_lock(&spu->register_lock);
if (!ret) {
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index e45894a08118..d98f845ac777 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -144,7 +144,7 @@ int spufs_handle_class1(struct spu_context *ctx)
access = (_PAGE_PRESENT | _PAGE_USER);
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
local_irq_save(flags);
- ret = hash_page(ea, access, 0x300);
+ ret = hash_page(ea, access, 0x300, dsisr);
local_irq_restore(flags);
/* hashing failed, so try the actual fault handler */
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 5b77b1919fd2..860a59eb8ea2 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -585,6 +585,8 @@ static int __init chrp_probe(void)
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
+ pm_power_off = rtas_power_off;
+
return 1;
}
@@ -597,7 +599,6 @@ define_machine(chrp) {
.show_cpuinfo = chrp_show_cpuinfo,
.init_IRQ = chrp_init_IRQ,
.restart = rtas_restart,
- .power_off = rtas_power_off,
.halt = rtas_halt,
.time_init = chrp_time_init,
.set_rtc_time = chrp_set_rtc_time,
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index bd4ba5d7d568..fe0ed6ee285e 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -67,6 +67,8 @@ static int __init gamecube_probe(void)
if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube"))
return 0;
+ pm_power_off = gamecube_power_off;
+
return 1;
}
@@ -80,7 +82,6 @@ define_machine(gamecube) {
.probe = gamecube_probe,
.init_early = gamecube_init_early,
.restart = gamecube_restart,
- .power_off = gamecube_power_off,
.halt = gamecube_halt,
.init_IRQ = flipper_pic_probe,
.get_irq = flipper_pic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index 168e1d80b2e5..540eeb58d3f0 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -147,6 +147,9 @@ static int __init linkstation_probe(void)
if (!of_flat_dt_is_compatible(root, "linkstation"))
return 0;
+
+ pm_power_off = linkstation_power_off;
+
return 1;
}
@@ -158,7 +161,6 @@ define_machine(linkstation){
.show_cpuinfo = linkstation_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = linkstation_restart,
- .power_off = linkstation_power_off,
.halt = linkstation_halt,
.calibrate_decr = generic_calibrate_decr,
};
diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
index 20a8ed91962e..7feb325b636b 100644
--- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
+++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
@@ -247,7 +247,7 @@ void __init ug_udbg_init(void)
np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-exi");
if (!np) {
udbg_printf("%s: EXI node not found\n", __func__);
- goto done;
+ goto out;
}
exi_io_base = ug_udbg_setup_exi_io_base(np);
@@ -267,8 +267,8 @@ void __init ug_udbg_init(void)
}
done:
- if (np)
- of_node_put(np);
+ of_node_put(np);
+out:
return;
}
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 388e29bab8f6..352592d3e44e 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -211,6 +211,8 @@ static int __init wii_probe(void)
if (!of_flat_dt_is_compatible(dt_root, "nintendo,wii"))
return 0;
+ pm_power_off = wii_power_off;
+
return 1;
}
@@ -226,7 +228,6 @@ define_machine(wii) {
.init_early = wii_init_early,
.setup_arch = wii_setup_arch,
.restart = wii_restart,
- .power_off = wii_power_off,
.halt = wii_halt,
.init_IRQ = wii_pic_probe,
.get_irq = flipper_pic_get_irq,
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index f7136aae8bbf..d3a13067ec42 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/irq.h>
#include <asm/sections.h>
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index cb1b0b35a0c6..56b85cd61aaf 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -169,7 +169,7 @@ static void __init maple_use_rtas_reboot_and_halt_if_present(void)
if (rtas_service_present("system-reboot") &&
rtas_service_present("power-off")) {
ppc_md.restart = rtas_restart;
- ppc_md.power_off = rtas_power_off;
+ pm_power_off = rtas_power_off;
ppc_md.halt = rtas_halt;
}
}
@@ -312,6 +312,7 @@ static int __init maple_probe(void)
alloc_dart_table();
hpte_init_native();
+ pm_power_off = maple_power_off;
return 1;
}
@@ -325,7 +326,6 @@ define_machine(maple) {
.pci_irq_fixup = maple_pci_irq_fixup,
.pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
.restart = maple_restart,
- .power_off = maple_power_off,
.halt = maple_halt,
.get_boot_time = maple_get_boot_time,
.set_rtc_time = maple_set_rtc_time,
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index ada33358950d..ae3f47b25b18 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -305,7 +305,6 @@ static struct platform_driver gpio_mdio_driver =
.remove = gpio_mdio_remove,
.driver = {
.name = "gpio-mdio-bitbang",
- .owner = THIS_MODULE,
.of_match_table = gpio_mdio_match,
},
};
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 014d06e6d46b..60b03a1703d1 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -513,11 +513,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
printk(KERN_ERR "nvram: no address\n");
return -EINVAL;
}
- nvram_image = alloc_bootmem(NVRAM_SIZE);
- if (nvram_image == NULL) {
- printk(KERN_ERR "nvram: can't allocate ram image\n");
- return -ENOMEM;
- }
+ nvram_image = memblock_virt_alloc(NVRAM_SIZE, 0);
nvram_data = ioremap(addr, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 7e868ccf3b0d..04702db35d45 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/of_pci.h>
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index b127a29ac526..713d36d45d1d 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -632,6 +632,8 @@ static int __init pmac_probe(void)
smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
#endif /* CONFIG_PMAC_SMU */
+ pm_power_off = pmac_power_off;
+
return 1;
}
@@ -663,7 +665,6 @@ define_machine(powermac) {
.get_irq = NULL, /* changed later */
.pci_irq_fixup = pmac_pci_irq_fixup,
.restart = pmac_restart,
- .power_off = pmac_power_off,
.halt = pmac_halt,
.time_init = pmac_time_init,
.get_boot_time = pmac_get_boot_time,
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index eba9cb10619c..2809c9895288 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -11,7 +11,6 @@
* (at your option) any later version.
*/
-#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -354,6 +353,9 @@ static int ioda_eeh_get_phb_state(struct eeh_pe *pe)
} else if (!(pe->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
}
return result;
@@ -373,7 +375,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
* moving forward, we have to return operational
* state during PE reset.
*/
- if (pe->state & EEH_PE_CFG_BLOCKED) {
+ if (pe->state & EEH_PE_RESET) {
result = (EEH_STATE_MMIO_ACTIVE |
EEH_STATE_DMA_ACTIVE |
EEH_STATE_MMIO_ENABLED |
@@ -452,6 +454,9 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
}
return result;
@@ -731,7 +736,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
char *drv_log, unsigned long len)
{
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
+ if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
return 0;
}
@@ -1087,6 +1093,10 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
!((*pe)->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(*pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data((*pe)->phb,
+ (*pe)->data);
}
/*
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index e462ab947d16..693b6cdac691 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -71,6 +71,7 @@ int opal_async_get_token_interruptible(void)
return token;
}
+EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
int __opal_async_release_token(int token)
{
@@ -102,6 +103,7 @@ int opal_async_release_token(int token)
return 0;
}
+EXPORT_SYMBOL_GPL(opal_async_release_token);
int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
{
@@ -120,6 +122,7 @@ int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
return 0;
}
+EXPORT_SYMBOL_GPL(opal_async_wait_response);
static int opal_async_comp_event(struct notifier_block *nb,
unsigned long msg_type, void *msg)
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index 499707ddaa9c..37dbee15769f 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -15,6 +15,8 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <asm/opal.h>
#include <asm/firmware.h>
@@ -43,7 +45,7 @@ unsigned long __init opal_get_boot_time(void)
long rc = OPAL_BUSY;
if (!opal_check_token(OPAL_RTC_READ))
- goto out;
+ return 0;
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
@@ -53,62 +55,33 @@ unsigned long __init opal_get_boot_time(void)
mdelay(10);
}
if (rc != OPAL_SUCCESS)
- goto out;
+ return 0;
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = be64_to_cpu(__h_m_s_ms);
opal_to_tm(y_m_d, h_m_s_ms, &tm);
return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
-out:
- ppc_md.get_rtc_time = NULL;
- ppc_md.set_rtc_time = NULL;
- return 0;
}
-void opal_get_rtc_time(struct rtc_time *tm)
+static __init int opal_time_init(void)
{
- long rc = OPAL_BUSY;
- u32 y_m_d;
- u64 h_m_s_ms;
- __be32 __y_m_d;
- __be64 __h_m_s_ms;
+ struct platform_device *pdev;
+ struct device_node *rtc;
- while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
- rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
- opal_poll_events(NULL);
+ rtc = of_find_node_by_path("/ibm,opal/rtc");
+ if (rtc) {
+ pdev = of_platform_device_create(rtc, "opal-rtc", NULL);
+ of_node_put(rtc);
+ } else {
+ if (opal_check_token(OPAL_RTC_READ) ||
+ opal_check_token(OPAL_READ_TPO))
+ pdev = platform_device_register_simple("opal-rtc", -1,
+ NULL, 0);
else
- mdelay(10);
+ return -ENODEV;
}
- if (rc != OPAL_SUCCESS)
- return;
- y_m_d = be32_to_cpu(__y_m_d);
- h_m_s_ms = be64_to_cpu(__h_m_s_ms);
- opal_to_tm(y_m_d, h_m_s_ms, tm);
-}
-
-int opal_set_rtc_time(struct rtc_time *tm)
-{
- long rc = OPAL_BUSY;
- u32 y_m_d = 0;
- u64 h_m_s_ms = 0;
-
- y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24;
- y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16;
- y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8;
- y_m_d |= ((u32)bin2bcd(tm->tm_mday));
-
- h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56;
- h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48;
- h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40;
- while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
- rc = opal_rtc_write(y_m_d, h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
- opal_poll_events(NULL);
- else
- mdelay(10);
- }
- return rc == OPAL_SUCCESS ? 0 : -EIO;
+ return PTR_ERR_OR_ZERO(pdev);
}
+machine_subsys_initcall(powernv, opal_time_init);
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c
index ae14c40b4b1c..e11273b2386d 100644
--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
+++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
@@ -48,7 +48,7 @@ void __trace_opal_entry(unsigned long opcode, unsigned long *args)
local_irq_save(flags);
- depth = &__get_cpu_var(opal_trace_depth);
+ depth = this_cpu_ptr(&opal_trace_depth);
if (*depth)
goto out;
@@ -69,7 +69,7 @@ void __trace_opal_exit(long opcode, unsigned long retval)
local_irq_save(flags);
- depth = &__get_cpu_var(opal_trace_depth);
+ depth = this_cpu_ptr(&opal_trace_depth);
if (*depth)
goto out;
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index feb549aa3eea..0a299be588af 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -18,7 +18,7 @@
.section ".text"
#ifdef CONFIG_TRACEPOINTS
-#ifdef CONFIG_JUMP_LABEL
+#ifdef HAVE_JUMP_LABEL
#define OPAL_BRANCH(LABEL) \
ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key)
#else
@@ -250,3 +250,7 @@ OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE);
+OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
+OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
+OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
+OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index d019b081df9d..cb0b6de79cd4 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -50,7 +50,6 @@ static int mc_recoverable_range_len;
struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock);
-extern u64 opal_mc_secondary_handler[];
static unsigned int *opal_irqs;
static unsigned int opal_irq_count;
static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
@@ -644,6 +643,16 @@ static void __init opal_dump_region_init(void)
pr_warn("DUMP: Failed to register kernel log buffer. "
"rc = %d\n", rc);
}
+
+static void opal_ipmi_init(struct device_node *opal_node)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(opal_node, np)
+ if (of_device_is_compatible(np, "ibm,opal-ipmi"))
+ of_platform_device_create(np, NULL, NULL);
+}
+
static int __init opal_init(void)
{
struct device_node *np, *consoles;
@@ -707,6 +716,8 @@ static int __init opal_init(void)
opal_msglog_init();
}
+ opal_ipmi_init(opal_node);
+
return 0;
}
machine_subsys_initcall(powernv, opal_init);
@@ -742,6 +753,8 @@ void opal_shutdown(void)
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
+EXPORT_SYMBOL_GPL(opal_ipmi_send);
+EXPORT_SYMBOL_GPL(opal_ipmi_recv);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -805,3 +818,9 @@ void opal_free_sg_list(struct opal_sg_list *sg)
sg = NULL;
}
}
+
+EXPORT_SYMBOL_GPL(opal_poll_events);
+EXPORT_SYMBOL_GPL(opal_rtc_read);
+EXPORT_SYMBOL_GPL(opal_rtc_write);
+EXPORT_SYMBOL_GPL(opal_tpo_read);
+EXPORT_SYMBOL_GPL(opal_tpo_write);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 3ba435ec3dcd..fac88ed8a915 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -91,6 +91,24 @@ static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
(IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
}
+static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
+{
+ if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe)) {
+ pr_warn("%s: Invalid PE %d on PHB#%x\n",
+ __func__, pe_no, phb->hose->global_number);
+ return;
+ }
+
+ if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
+ pr_warn("%s: PE %d was assigned on PHB#%x\n",
+ __func__, pe_no, phb->hose->global_number);
+ return;
+ }
+
+ phb->ioda.pe_array[pe_no].phb = phb;
+ phb->ioda.pe_array[pe_no].pe_number = pe_no;
+}
+
static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
{
unsigned long pe;
@@ -172,7 +190,7 @@ fail:
return -EIO;
}
-static void pnv_ioda2_alloc_m64_pe(struct pnv_phb *phb)
+static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
{
resource_size_t sgsz = phb->ioda.m64_segsize;
struct pci_dev *pdev;
@@ -185,16 +203,15 @@ static void pnv_ioda2_alloc_m64_pe(struct pnv_phb *phb)
* instead of root bus.
*/
list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
- for (i = PCI_BRIDGE_RESOURCES;
- i <= PCI_BRIDGE_RESOURCE_END; i++) {
- r = &pdev->resource[i];
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
if (!r->parent ||
!pnv_pci_is_mem_pref_64(r->flags))
continue;
base = (r->start - phb->ioda.m64_base) / sgsz;
for (step = 0; step < resource_size(r) / sgsz; step++)
- set_bit(base + step, phb->ioda.pe_alloc);
+ pnv_ioda_reserve_pe(phb, base + step);
}
}
}
@@ -287,8 +304,6 @@ done:
while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
phb->ioda.total_pe) {
pe = &phb->ioda.pe_array[i];
- pe->phb = phb;
- pe->pe_number = i;
if (!master_pe) {
pe->flags |= PNV_IODA_PE_MASTER;
@@ -313,6 +328,12 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
const u32 *r;
u64 pci_addr;
+ /* FIXME: Support M64 for P7IOC */
+ if (phb->type != PNV_PHB_IODA2) {
+ pr_info(" Not support M64 window\n");
+ return;
+ }
+
if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
pr_info(" Firmware too old to support M64 window\n");
return;
@@ -325,12 +346,6 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
return;
}
- /* FIXME: Support M64 for P7IOC */
- if (phb->type != PNV_PHB_IODA2) {
- pr_info(" Not support M64 window\n");
- return;
- }
-
res = &hose->mem_resources[1];
res->start = of_translate_address(dn, r + 2);
res->end = res->start + of_read_number(r + 4, 2) - 1;
@@ -345,7 +360,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
/* Use last M64 BAR to cover M64 window */
phb->ioda.m64_bar_idx = 15;
phb->init_m64 = pnv_ioda2_init_m64;
- phb->alloc_m64_pe = pnv_ioda2_alloc_m64_pe;
+ phb->reserve_m64_pe = pnv_ioda2_reserve_m64_pe;
phb->pick_m64_pe = pnv_ioda2_pick_m64_pe;
}
@@ -358,7 +373,9 @@ static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
/* Fetch master PE */
if (pe->flags & PNV_IODA_PE_SLAVE) {
pe = pe->master;
- WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
+ if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
+ return;
+
pe_no = pe->pe_number;
}
@@ -507,6 +524,106 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
}
#endif /* CONFIG_PCI_MSI */
+static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
+ struct pnv_ioda_pe *parent,
+ struct pnv_ioda_pe *child,
+ bool is_add)
+{
+ const char *desc = is_add ? "adding" : "removing";
+ uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
+ OPAL_REMOVE_PE_FROM_DOMAIN;
+ struct pnv_ioda_pe *slave;
+ long rc;
+
+ /* Parent PE affects child PE */
+ rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
+ child->pe_number, op);
+ if (rc != OPAL_SUCCESS) {
+ pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
+ rc, desc);
+ return -ENXIO;
+ }
+
+ if (!(child->flags & PNV_IODA_PE_MASTER))
+ return 0;
+
+ /* Compound case: parent PE affects slave PEs */
+ list_for_each_entry(slave, &child->slaves, list) {
+ rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
+ slave->pe_number, op);
+ if (rc != OPAL_SUCCESS) {
+ pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
+ rc, desc);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static int pnv_ioda_set_peltv(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe,
+ bool is_add)
+{
+ struct pnv_ioda_pe *slave;
+ struct pci_dev *pdev;
+ int ret;
+
+ /*
+ * Clear PE frozen state. If it's master PE, we need
+ * clear slave PE frozen state as well.
+ */
+ if (is_add) {
+ opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ if (pe->flags & PNV_IODA_PE_MASTER) {
+ list_for_each_entry(slave, &pe->slaves, list)
+ opal_pci_eeh_freeze_clear(phb->opal_id,
+ slave->pe_number,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ }
+ }
+
+ /*
+ * Associate PE in PELT. We need add the PE into the
+ * corresponding PELT-V as well. Otherwise, the error
+ * originated from the PE might contribute to other
+ * PEs.
+ */
+ ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
+ if (ret)
+ return ret;
+
+ /* For compound PEs, any one affects all of them */
+ if (pe->flags & PNV_IODA_PE_MASTER) {
+ list_for_each_entry(slave, &pe->slaves, list) {
+ ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
+ pdev = pe->pbus->self;
+ else
+ pdev = pe->pdev->bus->self;
+ while (pdev) {
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *parent;
+
+ if (pdn && pdn->pe_number != IODA_INVALID_PE) {
+ parent = &phb->ioda.pe_array[pdn->pe_number];
+ ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
+ if (ret)
+ return ret;
+ }
+
+ pdev = pdev->bus->self;
+ }
+
+ return 0;
+}
+
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
struct pci_dev *parent;
@@ -561,48 +678,36 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
return -ENXIO;
}
- rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
- pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
- if (rc)
- pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
- opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
- OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ /* Configure PELTV */
+ pnv_ioda_set_peltv(phb, pe, true);
- /* Add to all parents PELT-V */
- while (parent) {
- struct pci_dn *pdn = pci_get_pdn(parent);
- if (pdn && pdn->pe_number != IODA_INVALID_PE) {
- rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
- pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
- /* XXX What to do in case of error ? */
- }
- parent = parent->bus->self;
- }
/* Setup reverse map */
for (rid = pe->rid; rid < rid_end; rid++)
phb->ioda.pe_rmap[rid] = pe->pe_number;
/* Setup one MVTs on IODA1 */
- if (phb->type == PNV_PHB_IODA1) {
- pe->mve_number = pe->pe_number;
- rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
- pe->pe_number);
+ if (phb->type != PNV_PHB_IODA1) {
+ pe->mve_number = 0;
+ goto out;
+ }
+
+ pe->mve_number = pe->pe_number;
+ rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
+ if (rc != OPAL_SUCCESS) {
+ pe_err(pe, "OPAL error %ld setting up MVE %d\n",
+ rc, pe->mve_number);
+ pe->mve_number = -1;
+ } else {
+ rc = opal_pci_set_mve_enable(phb->opal_id,
+ pe->mve_number, OPAL_ENABLE_MVE);
if (rc) {
- pe_err(pe, "OPAL error %ld setting up MVE %d\n",
+ pe_err(pe, "OPAL error %ld enabling MVE %d\n",
rc, pe->mve_number);
pe->mve_number = -1;
- } else {
- rc = opal_pci_set_mve_enable(phb->opal_id,
- pe->mve_number, OPAL_ENABLE_MVE);
- if (rc) {
- pe_err(pe, "OPAL error %ld enabling MVE %d\n",
- rc, pe->mve_number);
- pe->mve_number = -1;
- }
}
- } else if (phb->type == PNV_PHB_IODA2)
- pe->mve_number = 0;
+ }
+out:
return 0;
}
@@ -837,8 +942,8 @@ static void pnv_pci_ioda_setup_PEs(void)
phb = hose->private_data;
/* M64 layout might affect PE allocation */
- if (phb->alloc_m64_pe)
- phb->alloc_m64_pe(phb);
+ if (phb->reserve_m64_pe)
+ phb->reserve_m64_pe(phb);
pnv_ioda_setup_PEs(hose->bus);
}
@@ -1834,19 +1939,14 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb_id = be64_to_cpup(prop64);
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
- phb = alloc_bootmem(sizeof(struct pnv_phb));
- if (!phb) {
- pr_err(" Out of memory !\n");
- return;
- }
+ phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
/* Allocate PCI controller */
- memset(phb, 0, sizeof(struct pnv_phb));
phb->hose = hose = pcibios_alloc_controller(np);
if (!phb->hose) {
pr_err(" Can't allocate PCI controller for %s\n",
np->full_name);
- free_bootmem((unsigned long)phb, sizeof(struct pnv_phb));
+ memblock_free(__pa(phb), sizeof(struct pnv_phb));
return;
}
@@ -1913,8 +2013,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
}
pemap_off = size;
size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
- aux = alloc_bootmem(size);
- memset(aux, 0, size);
+ aux = memblock_virt_alloc(size, 0);
phb->ioda.pe_alloc = aux;
phb->ioda.m32_segmap = aux + m32map_off;
if (phb->type == PNV_PHB_IODA1)
@@ -1999,8 +2098,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
}
- /* Configure M64 window */
- if (phb->init_m64 && phb->init_m64(phb))
+ /* Remove M64 resource if we can't configure it successfully */
+ if (!phb->init_m64 || phb->init_m64(phb))
hose->mem_resources[1].flags = 0;
}
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 94ce3481490b..6ef6d4d8e7e2 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -122,12 +122,9 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
return;
}
- phb = alloc_bootmem(sizeof(struct pnv_phb));
- if (phb) {
- memset(phb, 0, sizeof(struct pnv_phb));
- phb->hose = pcibios_alloc_controller(np);
- }
- if (!phb || !phb->hose) {
+ phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
+ phb->hose = pcibios_alloc_controller(np);
+ if (!phb->hose) {
pr_err(" Failed to allocate PCI controller\n");
return;
}
@@ -196,16 +193,27 @@ void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
hub_id = be64_to_cpup(prop64);
pr_info(" HUB-ID : 0x%016llx\n", hub_id);
+ /* Count child PHBs and calculate TCE space per PHB */
+ for_each_child_of_node(np, phbn) {
+ if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
+ of_device_is_compatible(phbn, "ibm,p5ioc2-pciex"))
+ phb_count++;
+ }
+
+ if (phb_count <= 0) {
+ pr_info(" No PHBs for Hub %s\n", np->full_name);
+ return;
+ }
+
+ tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count);
+ pr_info(" Allocating %lld MB of TCE memory per PHB\n",
+ tce_per_phb >> 20);
+
/* Currently allocate 16M of TCE memory for every Hub
*
* XXX TODO: Make it chip local if possible
*/
- tce_mem = __alloc_bootmem(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY,
- __pa(MAX_DMA_ADDRESS));
- if (!tce_mem) {
- pr_err(" Failed to allocate TCE Memory !\n");
- return;
- }
+ tce_mem = memblock_virt_alloc(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY);
pr_debug(" TCE : 0x%016lx..0x%016lx\n",
__pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1);
rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem),
@@ -215,18 +223,6 @@ void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
return;
}
- /* Count child PHBs */
- for_each_child_of_node(np, phbn) {
- if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
- of_device_is_compatible(phbn, "ibm,p5ioc2-pciex"))
- phb_count++;
- }
-
- /* Calculate how much TCE space we can give per PHB */
- tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count);
- pr_info(" Allocating %lld MB of TCE memory per PHB\n",
- tce_per_phb >> 20);
-
/* Initialize PHBs */
for_each_child_of_node(np, phbn) {
if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 540fc6dd56b3..4945e87f12dc 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -16,7 +16,6 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/msi.h>
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 34d29eb2a4de..6c02ff8dd69f 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -130,7 +130,7 @@ struct pnv_phb {
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
void (*shutdown)(struct pnv_phb *phb);
int (*init_m64)(struct pnv_phb *phb);
- void (*alloc_m64_pe)(struct pnv_phb *phb);
+ void (*reserve_m64_pe)(struct pnv_phb *phb);
int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all);
int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 3f9546d8a51f..30b1c3e298a6 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -265,10 +265,8 @@ static unsigned long pnv_memory_block_size(void)
static void __init pnv_setup_machdep_opal(void)
{
ppc_md.get_boot_time = opal_get_boot_time;
- ppc_md.get_rtc_time = opal_get_rtc_time;
- ppc_md.set_rtc_time = opal_set_rtc_time;
ppc_md.restart = pnv_restart;
- ppc_md.power_off = pnv_power_off;
+ pm_power_off = pnv_power_off;
ppc_md.halt = pnv_halt;
ppc_md.machine_check_exception = opal_machine_check;
ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery;
@@ -285,7 +283,7 @@ static void __init pnv_setup_machdep_rtas(void)
ppc_md.set_rtc_time = rtas_set_rtc_time;
}
ppc_md.restart = rtas_restart;
- ppc_md.power_off = rtas_power_off;
+ pm_power_off = rtas_power_off;
ppc_md.halt = rtas_halt;
}
#endif /* CONFIG_PPC_POWERNV_RTAS */
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 4753958cd509..b716f666e48a 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -149,6 +149,7 @@ static int pnv_smp_cpu_disable(void)
static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
+ unsigned long srr1;
/* Standard hot unplug procedure */
local_irq_disable();
@@ -165,13 +166,25 @@ static void pnv_smp_cpu_kill_self(void)
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
ppc64_runlatch_off();
- power7_nap(1);
+ srr1 = power7_nap(1);
ppc64_runlatch_on();
- /* Clear the IPI that woke us up */
- icp_native_flush_interrupt();
- local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
- mb();
+ /*
+ * If the SRR1 value indicates that we woke up due to
+ * an external interrupt, then clear the interrupt.
+ * We clear the interrupt before checking for the
+ * reason, so as to avoid a race where we wake up for
+ * some other reason, find nothing and clear the interrupt
+ * just as some other cpu is sending us an interrupt.
+ * If we returned from power7_nap as a result of
+ * having finished executing in a KVM guest, then srr1
+ * contains 0.
+ */
+ if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
+ icp_native_flush_interrupt();
+ local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
+ smp_mb();
+ }
if (cpu_core_split_required())
continue;
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 3e270e3412ae..2f95d33cf34a 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -110,7 +110,7 @@ static long ps3_hpte_remove(unsigned long hpte_group)
static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long vpn, int psize, int apsize,
- int ssize, int local)
+ int ssize, unsigned long inv_flags)
{
int result;
u64 hpte_v, want_v, hpte_rs;
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 5f3b23220b8e..a6c42f34303a 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -711,7 +711,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
static unsigned int ps3_get_irq(void)
{
- struct ps3_private *pd = &__get_cpu_var(ps3_private);
+ struct ps3_private *pd = this_cpu_ptr(&ps3_private);
u64 x = (pd->bmp.status & pd->bmp.mask);
unsigned int plug;
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 3f509f86432c..799c8580ab09 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -125,12 +125,7 @@ static void __init prealloc(struct ps3_prealloc *p)
if (!p->size)
return;
- p->address = __alloc_bootmem(p->size, p->align, __pa(MAX_DMA_ADDRESS));
- if (!p->address) {
- printk(KERN_ERR "%s: Cannot allocate %s\n", __func__,
- p->name);
- return;
- }
+ p->address = memblock_virt_alloc(p->size, p->align);
printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
p->address);
@@ -248,6 +243,7 @@ static int __init ps3_probe(void)
ps3_mm_init();
ps3_mm_vas_create(&htab_size);
ps3_hpte_init(htab_size);
+ pm_power_off = ps3_power_off;
DBG(" <- %s:%d\n", __func__, __LINE__);
return 1;
@@ -278,7 +274,6 @@ define_machine(ps3) {
.calibrate_decr = ps3_calibrate_decr,
.progress = ps3_progress,
.restart = ps3_restart,
- .power_off = ps3_power_off,
.halt = ps3_halt,
#if defined(CONFIG_KEXEC)
.kexec_cpu_down = ps3_kexec_cpu_down,
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 1062f71f5a85..39049e4884fb 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -75,7 +75,7 @@ static atomic_t dtl_count;
*/
static void consume_dtle(struct dtl_entry *dtle, u64 index)
{
- struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings);
+ struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
struct dtl_entry *wp = dtlr->write_ptr;
struct lppaca *vpa = local_paca->lppaca_ptr;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 5c375f93c669..f30cf4d136a4 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -340,16 +340,17 @@ static void pseries_remove_processor(struct device_node *np)
}
static int pseries_smp_notifier(struct notifier_block *nb,
- unsigned long action, void *node)
+ unsigned long action, void *data)
{
+ struct of_reconfig_data *rd = data;
int err = 0;
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
- err = pseries_add_processor(node);
+ err = pseries_add_processor(rd->dn);
break;
case OF_RECONFIG_DETACH_NODE:
- pseries_remove_processor(node);
+ pseries_remove_processor(rd->dn);
break;
}
return notifier_from_errno(err);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 3c4c0dcd90d3..fa41f0da5b6f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/memblock.h>
-#include <linux/vmalloc.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
@@ -66,22 +65,6 @@ unsigned long pseries_memory_block_size(void)
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-static int pseries_remove_memory(u64 start, u64 size)
-{
- int ret;
-
- /* Remove htab bolted mappings for this section of memory */
- start = (unsigned long)__va(start);
- ret = remove_section_mapping(start, start + size);
-
- /* Ensure all vmalloc mappings are flushed in case they also
- * hit that section of memory
- */
- vm_unmap_aliases();
-
- return ret;
-}
-
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
unsigned long block_sz, start_pfn;
@@ -183,7 +166,7 @@ static int pseries_add_mem_node(struct device_node *np)
return (ret < 0) ? -EINVAL : 0;
}
-static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
+static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
{
struct of_drconf_cell *new_drmem, *old_drmem;
unsigned long memblock_size;
@@ -232,22 +215,21 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
}
static int pseries_memory_notifier(struct notifier_block *nb,
- unsigned long action, void *node)
+ unsigned long action, void *data)
{
- struct of_prop_reconfig *pr;
+ struct of_reconfig_data *rd = data;
int err = 0;
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
- err = pseries_add_mem_node(node);
+ err = pseries_add_mem_node(rd->dn);
break;
case OF_RECONFIG_DETACH_NODE:
- err = pseries_remove_mem_node(node);
+ err = pseries_remove_mem_node(rd->dn);
break;
case OF_RECONFIG_UPDATE_PROPERTY:
- pr = (struct of_prop_reconfig *)node;
- if (!strcmp(pr->prop->name, "ibm,dynamic-memory"))
- err = pseries_update_drconf_memory(pr);
+ if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
+ err = pseries_update_drconf_memory(rd);
break;
}
return notifier_from_errno(err);
@@ -262,10 +244,6 @@ static int __init pseries_memory_hotplug_init(void)
if (firmware_has_feature(FW_FEATURE_LPAR))
of_reconfig_notifier_register(&pseries_mem_nb);
-#ifdef CONFIG_MEMORY_HOTREMOVE
- ppc_md.remove_memory = pseries_remove_memory;
-#endif
-
return 0;
}
machine_device_initcall(pseries, pseries_memory_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 3fda3f17b84e..ccd53f91e8aa 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -18,7 +18,7 @@
#ifdef CONFIG_TRACEPOINTS
-#ifndef CONFIG_JUMP_LABEL
+#ifndef HAVE_JUMP_LABEL
.section ".toc","aw"
.globl hcall_tracepoint_refcount
@@ -78,7 +78,7 @@ hcall_tracepoint_refcount:
mr r5,BUFREG; \
__HCALL_INST_POSTCALL
-#ifdef CONFIG_JUMP_LABEL
+#ifdef HAVE_JUMP_LABEL
#define HCALL_BRANCH(LABEL) \
ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key)
#else
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 4575f0c9e521..f02ec3ab428c 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
if (opcode > MAX_HCALL_OPCODE)
return;
- h = &__get_cpu_var(hcall_stats)[opcode / 4];
+ h = this_cpu_ptr(&hcall_stats[opcode / 4]);
h->tb_start = mftb();
h->purr_start = mfspr(SPRN_PURR);
}
@@ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
if (opcode > MAX_HCALL_OPCODE)
return;
- h = &__get_cpu_var(hcall_stats)[opcode / 4];
+ h = this_cpu_ptr(&hcall_stats[opcode / 4]);
h->num_calls++;
h->tb_total += mftb() - h->tb_start;
h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index e32e00976a94..1d3d52dc3ff3 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -199,7 +199,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
local_irq_save(flags); /* to protect tcep and the page behind it */
- tcep = __get_cpu_var(tce_page);
+ tcep = __this_cpu_read(tce_page);
/* This is safe to do since interrupts are off when we're called
* from iommu_alloc{,_sg}()
@@ -212,7 +212,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
- __get_cpu_var(tce_page) = tcep;
+ __this_cpu_write(tce_page, tcep);
}
rpn = __pa(uaddr) >> TCE_SHIFT;
@@ -398,7 +398,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
long l, limit;
local_irq_disable(); /* to protect tcep and the page behind it */
- tcep = __get_cpu_var(tce_page);
+ tcep = __this_cpu_read(tce_page);
if (!tcep) {
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
@@ -406,7 +406,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
local_irq_enable();
return -ENOMEM;
}
- __get_cpu_var(tce_page) = tcep;
+ __this_cpu_write(tce_page, tcep);
}
proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
@@ -574,8 +574,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
while (isa_dn && isa_dn != dn)
isa_dn = isa_dn->parent;
- if (isa_dn_orig)
- of_node_put(isa_dn_orig);
+ of_node_put(isa_dn_orig);
/* Count number of direct PCI children of the PHB. */
for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
@@ -1251,10 +1250,11 @@ static struct notifier_block iommu_mem_nb = {
.notifier_call = iommu_mem_notifier,
};
-static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
+static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
int err = NOTIFY_OK;
- struct device_node *np = node;
+ struct of_reconfig_data *rd = data;
+ struct device_node *np = rd->dn;
struct pci_dn *pci = PCI_DN(np);
struct direct_window *window;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f6880d2a40fb..469751d92004 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -284,7 +284,7 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
unsigned long newpp,
unsigned long vpn,
int psize, int apsize,
- int ssize, int local)
+ int ssize, unsigned long inv_flags)
{
unsigned long lpar_rc;
unsigned long flags = (newpp & 7) | H_AVPN;
@@ -442,7 +442,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
- int psize, int ssize)
+ int psize, int ssize, int local)
{
int i, index = 0;
unsigned long s_addr = addr;
@@ -515,7 +515,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
unsigned long vpn;
unsigned long i, pix, rc;
unsigned long flags = 0;
- struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long param[9];
unsigned long hash, index, shift, hidx, slot;
@@ -705,7 +705,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
local_irq_save(flags);
- depth = &__get_cpu_var(hcall_trace_depth);
+ depth = this_cpu_ptr(&hcall_trace_depth);
if (*depth)
goto out;
@@ -730,7 +730,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
local_irq_save(flags);
- depth = &__get_cpu_var(hcall_trace_depth);
+ depth = this_cpu_ptr(&hcall_trace_depth);
if (*depth)
goto out;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 11a3b617ef5d..054a0ed5c7ee 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -715,6 +715,8 @@ static int nvram_pstore_init(void)
nvram_pstore_info.buf = oops_data;
nvram_pstore_info.bufsize = oops_data_sz;
+ spin_lock_init(&nvram_pstore_info.buf_lock);
+
rc = pstore_register(&nvram_pstore_info);
if (rc != 0)
pr_err("nvram: pstore_register() failed, defaults to "
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 67e48594040c..fe16a50700de 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -134,7 +134,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
of_node_put(pdn);
if (rc) {
- pr_err("no ibm,pcie-link-speed-stats property\n");
+ pr_debug("no ibm,pcie-link-speed-stats property\n");
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 5a4d0fc03b03..c3b2a7e81ddb 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
if (!rtas_error_extended(h)) {
- memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
- errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
+ memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
+ errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
} else {
int len, error_log_length;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 125c589eeef5..e445b6701f50 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -251,9 +251,10 @@ static void __init pseries_discover_pic(void)
" interrupt-controller\n");
}
-static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
+static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
- struct device_node *np = node;
+ struct of_reconfig_data *rd = data;
+ struct device_node *np = rd->dn;
struct pci_dn *pci = NULL;
int err = NOTIFY_OK;
@@ -499,7 +500,11 @@ static void __init pSeries_setup_arch(void)
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
long rc;
- if ((rc = pSeries_enable_reloc_on_exc()) != H_SUCCESS) {
+
+ rc = pSeries_enable_reloc_on_exc();
+ if (rc == H_P2) {
+ pr_info("Relocation on exceptions not supported\n");
+ } else if (rc != H_SUCCESS) {
pr_warn("Unable to enable relocation on exceptions: "
"%ld\n", rc);
}
@@ -659,6 +664,34 @@ static void __init pSeries_init_early(void)
pr_debug(" <- pSeries_init_early()\n");
}
+/**
+ * pseries_power_off - tell firmware about how to power off the system.
+ *
+ * This function calls either the power-off rtas token in normal cases
+ * or the ibm,power-off-ups token (if present & requested) in case of
+ * a power failure. If power-off token is used, power on will only be
+ * possible with power button press. If ibm,power-off-ups token is used
+ * it will allow auto poweron after power is restored.
+ */
+static void pseries_power_off(void)
+{
+ int rc;
+ int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");
+
+ if (rtas_flash_term_hook)
+ rtas_flash_term_hook(SYS_POWER_OFF);
+
+ if (rtas_poweron_auto == 0 ||
+ rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
+ rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
+ printk(KERN_INFO "RTAS power-off returned %d\n", rc);
+ } else {
+ rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
+ printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
+ }
+ for (;;);
+}
+
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
@@ -741,6 +774,8 @@ static int __init pSeries_probe(void)
else
hpte_init_native();
+ pm_power_off = pseries_power_off;
+
pr_debug("Machine is%s LPAR !\n",
(powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
@@ -754,34 +789,6 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
return PCI_PROBE_NORMAL;
}
-/**
- * pSeries_power_off - tell firmware about how to power off the system.
- *
- * This function calls either the power-off rtas token in normal cases
- * or the ibm,power-off-ups token (if present & requested) in case of
- * a power failure. If power-off token is used, power on will only be
- * possible with power button press. If ibm,power-off-ups token is used
- * it will allow auto poweron after power is restored.
- */
-static void pSeries_power_off(void)
-{
- int rc;
- int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");
-
- if (rtas_flash_term_hook)
- rtas_flash_term_hook(SYS_POWER_OFF);
-
- if (rtas_poweron_auto == 0 ||
- rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
- rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
- printk(KERN_INFO "RTAS power-off returned %d\n", rc);
- } else {
- rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
- printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
- }
- for (;;);
-}
-
#ifndef CONFIG_PCI
void pSeries_final_fixup(void) { }
#endif
@@ -796,7 +803,6 @@ define_machine(pseries) {
.pcibios_fixup = pSeries_final_fixup,
.pci_probe_mode = pSeries_pci_probe_mode,
.restart = rtas_restart,
- .power_off = pSeries_power_off,
.halt = rtas_halt,
.panic = rtas_os_term,
.get_boot_time = rtas_get_boot_time,
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ad56edc39919..f532c92bf99d 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -326,7 +326,6 @@ static struct platform_driver axon_ram_driver = {
.remove = axon_ram_remove,
.driver = {
.name = AXON_RAM_MODULE_NAME,
- .owner = THIS_MODULE,
.of_match_table = axon_ram_device_id,
},
};
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index 90545ad1626e..861cebf9c292 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -210,7 +210,6 @@ static const struct of_device_id mpc85xx_l2ctlr_of_match[] = {
static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = {
.driver = {
.name = "fsl-l2ctlr",
- .owner = THIS_MODULE,
.of_match_table = mpc85xx_l2ctlr_of_match,
},
.probe = mpc85xx_l2ctlr_of_probe,
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 7aed8d0876b7..4bbb4b8dfd09 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -13,7 +13,6 @@
*
*/
#include <linux/irq.h>
-#include <linux/bootmem.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -578,7 +577,6 @@ static const struct of_device_id fsl_of_msi_ids[] = {
static struct platform_driver fsl_of_msi_driver = {
.driver = {
.name = "fsl-msi",
- .owner = THIS_MODULE,
.of_match_table = fsl_of_msi_ids,
},
.probe = fsl_of_msi_probe,
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 65d2ed4549e6..6455c1eada1a 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/log2.h>
#include <linux/slab.h>
@@ -152,7 +151,7 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci,
flags |= 0x10000000; /* enable relaxed ordering */
for (i = 0; size > 0; i++) {
- unsigned int bits = min(ilog2(size),
+ unsigned int bits = min_t(u32, ilog2(size),
__ffs(pci_addr | phys_addr));
if (index + i >= 5)
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 8cf4aa0e3a25..1d6fd7c59fe9 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -80,7 +80,6 @@ static const struct of_device_id pmc_ids[] = {
static struct platform_driver pmc_driver = {
.driver = {
.name = "fsl-pmc",
- .owner = THIS_MODULE,
.of_match_table = pmc_ids,
},
.probe = pmc_probe,
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index c04b718307c8..c1cd3698f534 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -58,6 +58,19 @@
#define RIO_ISR_AACR 0x10120
#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
+#define RIWTAR_TRAD_VAL_SHIFT 12
+#define RIWTAR_TRAD_MASK 0x00FFFFFF
+#define RIWBAR_BADD_VAL_SHIFT 12
+#define RIWBAR_BADD_MASK 0x003FFFFF
+#define RIWAR_ENABLE 0x80000000
+#define RIWAR_TGINT_LOCAL 0x00F00000
+#define RIWAR_RDTYP_NO_SNOOP 0x00040000
+#define RIWAR_RDTYP_SNOOP 0x00050000
+#define RIWAR_WRTYP_NO_SNOOP 0x00004000
+#define RIWAR_WRTYP_SNOOP 0x00005000
+#define RIWAR_WRTYP_ALLOC 0x00006000
+#define RIWAR_SIZE_MASK 0x0000003F
+
#define __fsl_read_rio_config(x, addr, err, op) \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
@@ -266,6 +279,89 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
return 0;
}
+static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
+{
+ int i;
+
+ /* close inbound windows */
+ for (i = 0; i < RIO_INB_ATMU_COUNT; i++)
+ out_be32(&priv->inb_atmu_regs[i].riwar, 0);
+}
+
+int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
+ u64 rstart, u32 size, u32 flags)
+{
+ struct rio_priv *priv = mport->priv;
+ u32 base_size;
+ unsigned int base_size_log;
+ u64 win_start, win_end;
+ u32 riwar;
+ int i;
+
+ if ((size & (size - 1)) != 0)
+ return -EINVAL;
+
+ base_size_log = ilog2(size);
+ base_size = 1 << base_size_log;
+
+ /* check if addresses are aligned with the window size */
+ if (lstart & (base_size - 1))
+ return -EINVAL;
+ if (rstart & (base_size - 1))
+ return -EINVAL;
+
+ /* check for conflicting ranges */
+ for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
+ riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
+ if ((riwar & RIWAR_ENABLE) == 0)
+ continue;
+ win_start = ((u64)(in_be32(&priv->inb_atmu_regs[i].riwbar) & RIWBAR_BADD_MASK))
+ << RIWBAR_BADD_VAL_SHIFT;
+ win_end = win_start + ((1 << ((riwar & RIWAR_SIZE_MASK) + 1)) - 1);
+ if (rstart < win_end && (rstart + size) > win_start)
+ return -EINVAL;
+ }
+
+ /* find unused atmu */
+ for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
+ riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
+ if ((riwar & RIWAR_ENABLE) == 0)
+ break;
+ }
+ if (i >= RIO_INB_ATMU_COUNT)
+ return -ENOMEM;
+
+ out_be32(&priv->inb_atmu_regs[i].riwtar, lstart >> RIWTAR_TRAD_VAL_SHIFT);
+ out_be32(&priv->inb_atmu_regs[i].riwbar, rstart >> RIWBAR_BADD_VAL_SHIFT);
+ out_be32(&priv->inb_atmu_regs[i].riwar, RIWAR_ENABLE | RIWAR_TGINT_LOCAL |
+ RIWAR_RDTYP_SNOOP | RIWAR_WRTYP_SNOOP | (base_size_log - 1));
+
+ return 0;
+}
+
+void fsl_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart)
+{
+ u32 win_start_shift, base_start_shift;
+ struct rio_priv *priv = mport->priv;
+ u32 riwar, riwtar;
+ int i;
+
+ /* skip default window */
+ base_start_shift = lstart >> RIWTAR_TRAD_VAL_SHIFT;
+ for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
+ riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
+ if ((riwar & RIWAR_ENABLE) == 0)
+ continue;
+
+ riwtar = in_be32(&priv->inb_atmu_regs[i].riwtar);
+ win_start_shift = riwtar & RIWTAR_TRAD_MASK;
+ if (win_start_shift == base_start_shift) {
+ out_be32(&priv->inb_atmu_regs[i].riwar, riwar & ~RIWAR_ENABLE);
+ return;
+ }
+ }
+}
+
void fsl_rio_port_error_handler(int offset)
{
/*XXX: Error recovery is not implemented, we just clear errors */
@@ -389,6 +485,8 @@ int fsl_rio_setup(struct platform_device *dev)
ops->add_outb_message = fsl_add_outb_message;
ops->add_inb_buffer = fsl_add_inb_buffer;
ops->get_inb_message = fsl_get_inb_message;
+ ops->map_inb = fsl_map_inb_mem;
+ ops->unmap_inb = fsl_unmap_inb_mem;
rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0);
if (!rmu_node) {
@@ -602,6 +700,11 @@ int fsl_rio_setup(struct platform_device *dev)
RIO_ATMU_REGS_PORT2_OFFSET));
priv->maint_atmu_regs = priv->atmu_regs + 1;
+ priv->inb_atmu_regs = (struct rio_inb_atmu_regs __iomem *)
+ (priv->regs_win +
+ ((i == 0) ? RIO_INB_ATMU_REGS_PORT1_OFFSET :
+ RIO_INB_ATMU_REGS_PORT2_OFFSET));
+
/* Set to receive any dist ID for serial RapidIO controller. */
if (port->phy_type == RIO_PHY_SERIAL)
@@ -620,6 +723,7 @@ int fsl_rio_setup(struct platform_device *dev)
rio_law_start = range_start;
fsl_rio_setup_rmu(port, rmu_np[i]);
+ fsl_rio_inbound_mem_init(priv);
dbell->mport[i] = port;
@@ -673,7 +777,6 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = {
static struct platform_driver fsl_of_rio_rpn_driver = {
.driver = {
.name = "fsl-of-rio",
- .owner = THIS_MODULE,
.of_match_table = fsl_of_rio_rpn_ids,
},
.probe = fsl_of_rio_rpn_probe,
diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h
index ae8e27405a0d..d53407a34f32 100644
--- a/arch/powerpc/sysdev/fsl_rio.h
+++ b/arch/powerpc/sysdev/fsl_rio.h
@@ -50,9 +50,12 @@
#define RIO_S_DBELL_REGS_OFFSET 0x13400
#define RIO_S_PW_REGS_OFFSET 0x134e0
#define RIO_ATMU_REGS_DBELL_OFFSET 0x10C40
+#define RIO_INB_ATMU_REGS_PORT1_OFFSET 0x10d60
+#define RIO_INB_ATMU_REGS_PORT2_OFFSET 0x10f60
#define MAX_MSG_UNIT_NUM 2
#define MAX_PORT_NUM 4
+#define RIO_INB_ATMU_COUNT 4
struct rio_atmu_regs {
u32 rowtar;
@@ -63,6 +66,15 @@ struct rio_atmu_regs {
u32 pad2[3];
};
+struct rio_inb_atmu_regs {
+ u32 riwtar;
+ u32 pad1;
+ u32 riwbar;
+ u32 pad2;
+ u32 riwar;
+ u32 pad3[3];
+};
+
struct rio_dbell_ring {
void *virt;
dma_addr_t phys;
@@ -99,6 +111,7 @@ struct rio_priv {
void __iomem *regs_win;
struct rio_atmu_regs __iomem *atmu_regs;
struct rio_atmu_regs __iomem *maint_atmu_regs;
+ struct rio_inb_atmu_regs __iomem *inb_atmu_regs;
void __iomem *maint_win;
void *rmm_handle; /* RapidIO message manager(unit) Handle */
};
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index ffd1169ebaab..99269c041615 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -197,8 +197,7 @@ static int __init setup_rstcr(void)
if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
- if (np)
- of_node_put(np);
+ of_node_put(np);
return 0;
}
@@ -238,7 +237,7 @@ void fsl_hv_restart(char *cmd)
/*
* Halt the current partition
*
- * This function should be assigned to the ppc_md.power_off and ppc_md.halt
+ * This function should be assigned to the pm_power_off and ppc_md.halt
* function pointers, to shut down the partition when we're running under
* the Freescale hypervisor.
*/
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index b50f97811c25..b28733727ed3 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -20,7 +20,6 @@
#include <linux/signal.h>
#include <linux/syscore_ops.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/fsl_devices.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c
index 5492dc5f56f4..f4f0301b9a60 100644
--- a/arch/powerpc/sysdev/mpc5xxx_clocks.c
+++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c
@@ -26,8 +26,7 @@ unsigned long mpc5xxx_get_bus_frequency(struct device_node *node)
of_node_put(node);
node = np;
}
- if (node)
- of_node_put(node);
+ of_node_put(node);
return p_bus_freq ? *p_bus_freq : 0;
}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 89cec0ed6a58..c4648ad5c1f3 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -24,7 +24,6 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 7bdf3cc741e4..3f165d972a0e 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -270,7 +270,6 @@ static const struct of_device_id mpic_msgr_ids[] = {
static struct platform_driver mpic_msgr_driver = {
.driver = {
.name = "mpic-msgr",
- .owner = THIS_MODULE,
.of_match_table = mpic_msgr_ids,
},
.probe = mpic_msgr_probe,
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 45c114bc430b..a3f660eed6de 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -16,7 +16,6 @@
#undef DEBUG
#include <linux/irq.h>
-#include <linux/bootmem.h>
#include <linux/msi.h>
#include <asm/mpic.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 0dff1cd44481..b2cef1809389 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -10,7 +10,6 @@
*/
#include <linux/irq.h>
-#include <linux/bootmem.h>
#include <linux/msi.h>
#include <asm/mpic.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 13e67d93a7c1..8a0b77a3ec0c 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -210,7 +210,6 @@ static struct platform_driver pmi_of_platform_driver = {
.remove = pmi_of_remove,
.driver = {
.name = "pmi",
- .owner = THIS_MODULE,
.of_match_table = pmi_match,
},
};
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
index 82e2cfe35c62..ba95adf81d8d 100644
--- a/arch/powerpc/sysdev/ppc4xx_cpm.c
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -281,7 +281,7 @@ static int __init cpm_init(void)
printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
np->full_name);
ret = -EINVAL;
- goto out;
+ goto node_put;
}
cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
@@ -290,7 +290,7 @@ static int __init cpm_init(void)
printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
np->full_name);
ret = -EINVAL;
- goto out;
+ goto node_put;
}
/* All 4xx SoCs with a CPM controller have one of two
@@ -330,9 +330,9 @@ static int __init cpm_init(void)
if (cpm.standby || cpm.suspend)
suspend_set_ops(&cpm_suspend_ops);
+node_put:
+ of_node_put(np);
out:
- if (np)
- of_node_put(np);
return ret;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 908105f835d1..ed9970ff8d94 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -197,7 +197,6 @@ static struct platform_driver hsta_msi_driver = {
.probe = hsta_msi_probe,
.driver = {
.name = "hsta-msi",
- .owner = THIS_MODULE,
.of_match_table = hsta_msi_ids,
},
};
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 518eabbe0bdc..6e2e6aa378bb 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -22,7 +22,6 @@
*/
#include <linux/irq.h>
-#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_platform.h>
@@ -270,7 +269,6 @@ static struct platform_driver ppc4xx_msi_driver = {
.remove = ppc4xx_of_msi_remove,
.driver = {
.name = "ppc4xx-msi",
- .owner = THIS_MODULE,
.of_match_table = ppc4xx_msi_ids,
},
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index df6e2fc4ff92..086aca69ecae 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -22,7 +22,6 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/bootmem.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 238a07b97f2c..1f29cee8da7b 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -22,7 +22,6 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ioport.h>
@@ -693,7 +692,6 @@ static const struct of_device_id qe_ids[] = {
static struct platform_driver qe_driver = {
.driver = {
.name = "fsl-qe",
- .owner = THIS_MODULE,
.of_match_table = qe_ids,
},
.probe = qe_probe,
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index b2b87c30e266..543765e1ef14 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -23,7 +23,6 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <asm/irq.h>
#include <asm/io.h>
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 92033936a8f7..7c37157d4c24 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -19,7 +19,6 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index fe0cca477164..365249cd346b 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -155,7 +155,7 @@ int __init xics_smp_probe(void)
void xics_teardown_cpu(void)
{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
/*
* we have to reset the cppr index to 0 because we're
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index c8efbb37d6e0..5b150f0c5df9 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -51,6 +51,12 @@
#include <asm/paca.h>
#endif
+#if defined(CONFIG_PPC_SPLPAR)
+#include <asm/plpar_wrappers.h>
+#else
+static inline long plapr_set_ciabr(unsigned long ciabr) {return 0; };
+#endif
+
#include "nonstdio.h"
#include "dis-asm.h"
@@ -88,10 +94,9 @@ struct bpt {
};
/* Bits in bpt.enabled */
-#define BP_IABR_TE 1 /* IABR translation enabled */
-#define BP_IABR 2
-#define BP_TRAP 8
-#define BP_DABR 0x10
+#define BP_CIABR 1
+#define BP_TRAP 2
+#define BP_DABR 4
#define NBPTS 256
static struct bpt bpts[NBPTS];
@@ -270,6 +275,45 @@ static inline void cinval(void *p)
asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
}
+/**
+ * write_ciabr() - write the CIABR SPR
+ * @ciabr: The value to write.
+ *
+ * This function writes a value to the CIARB register either directly
+ * through mtspr instruction if the kernel is in HV privilege mode or
+ * call a hypervisor function to achieve the same in case the kernel
+ * is in supervisor privilege mode.
+ */
+static void write_ciabr(unsigned long ciabr)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return;
+
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ mtspr(SPRN_CIABR, ciabr);
+ return;
+ }
+ plapr_set_ciabr(ciabr);
+}
+
+/**
+ * set_ciabr() - set the CIABR
+ * @addr: The value to set.
+ *
+ * This function sets the correct privilege value into the the HW
+ * breakpoint address before writing it up in the CIABR register.
+ */
+static void set_ciabr(unsigned long addr)
+{
+ addr &= ~CIABR_PRIV;
+
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ addr |= CIABR_PRIV_HYPER;
+ else
+ addr |= CIABR_PRIV_SUPER;
+ write_ciabr(addr);
+}
+
/*
* Disable surveillance (the service processor watchdog function)
* while we are in xmon.
@@ -727,7 +771,7 @@ static void insert_bpts(void)
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp) {
- if ((bp->enabled & (BP_TRAP|BP_IABR)) == 0)
+ if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0)
continue;
if (mread(bp->address, &bp->instr[0], 4) != 4) {
printf("Couldn't read instruction at %lx, "
@@ -742,7 +786,7 @@ static void insert_bpts(void)
continue;
}
store_inst(&bp->instr[0]);
- if (bp->enabled & BP_IABR)
+ if (bp->enabled & BP_CIABR)
continue;
if (mwrite(bp->address, &bpinstr, 4) != 4) {
printf("Couldn't write instruction at %lx, "
@@ -764,9 +808,9 @@ static void insert_cpu_bpts(void)
brk.len = 8;
__set_breakpoint(&brk);
}
- if (iabr && cpu_has_feature(CPU_FTR_IABR))
- mtspr(SPRN_IABR, iabr->address
- | (iabr->enabled & (BP_IABR|BP_IABR_TE)));
+
+ if (iabr)
+ set_ciabr(iabr->address);
}
static void remove_bpts(void)
@@ -777,7 +821,7 @@ static void remove_bpts(void)
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp) {
- if ((bp->enabled & (BP_TRAP|BP_IABR)) != BP_TRAP)
+ if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP)
continue;
if (mread(bp->address, &instr, 4) == 4
&& instr == bpinstr
@@ -792,8 +836,7 @@ static void remove_bpts(void)
static void remove_cpu_bpts(void)
{
hw_breakpoint_disable();
- if (cpu_has_feature(CPU_FTR_IABR))
- mtspr(SPRN_IABR, 0);
+ write_ciabr(0);
}
/* Command interpreting routine */
@@ -907,7 +950,7 @@ cmds(struct pt_regs *excp)
case 'u':
dump_segments();
break;
-#elif defined(CONFIG_4xx)
+#elif defined(CONFIG_44x)
case 'u':
dump_tlb_44x();
break;
@@ -981,7 +1024,8 @@ static void bootcmds(void)
else if (cmd == 'h')
ppc_md.halt();
else if (cmd == 'p')
- ppc_md.power_off();
+ if (pm_power_off)
+ pm_power_off();
}
static int cpu_cmd(void)
@@ -1127,7 +1171,7 @@ static char *breakpoint_help_string =
"b <addr> [cnt] set breakpoint at given instr addr\n"
"bc clear all breakpoints\n"
"bc <n/addr> clear breakpoint number n or at addr\n"
- "bi <addr> [cnt] set hardware instr breakpoint (POWER3/RS64 only)\n"
+ "bi <addr> [cnt] set hardware instr breakpoint (POWER8 only)\n"
"bd <addr> [cnt] set hardware data breakpoint\n"
"";
@@ -1166,13 +1210,13 @@ bpt_cmds(void)
break;
case 'i': /* bi - hardware instr breakpoint */
- if (!cpu_has_feature(CPU_FTR_IABR)) {
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
printf("Hardware instruction breakpoint "
"not supported on this cpu\n");
break;
}
if (iabr) {
- iabr->enabled &= ~(BP_IABR | BP_IABR_TE);
+ iabr->enabled &= ~BP_CIABR;
iabr = NULL;
}
if (!scanhex(&a))
@@ -1181,7 +1225,7 @@ bpt_cmds(void)
break;
bp = new_breakpoint(a);
if (bp != NULL) {
- bp->enabled |= BP_IABR | BP_IABR_TE;
+ bp->enabled |= BP_CIABR;
iabr = bp;
}
break;
@@ -1238,7 +1282,7 @@ bpt_cmds(void)
if (!bp->enabled)
continue;
printf("%2x %s ", BP_NUM(bp),
- (bp->enabled & BP_IABR)? "inst": "trap");
+ (bp->enabled & BP_CIABR) ? "inst": "trap");
xmon_print_symbol(bp->address, " ", "\n");
}
break;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f2cf1f90295b..68b68d755fdf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -65,6 +65,7 @@ config S390
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 47c8630c93cd..15c94246b600 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -511,7 +511,6 @@ static const struct dev_pm_ops appldata_pm_ops = {
static struct platform_driver appldata_pdrv = {
.driver = {
.name = "appldata",
- .owner = THIS_MODULE,
.pm = &appldata_pm_ops,
},
};
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 23223cd63e54..1f272b24fc0b 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -979,7 +979,7 @@ static void __exit aes_s390_fini(void)
module_init(aes_s390_init);
module_exit(aes_s390_fini);
-MODULE_ALIAS("aes-all");
+MODULE_ALIAS_CRYPTO("aes-all");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 7acb77f7ef1a..9e05cc453a40 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void)
module_init(des_s390_init);
module_exit(des_s390_exit);
-MODULE_ALIAS("des");
-MODULE_ALIAS("des3_ede");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des3_ede");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index d43485d142e9..7940dc90e80b 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void)
module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index a1b3a9dc9d8a..5b2bee323694 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
module_init(sha1_s390_init);
module_exit(sha1_s390_fini);
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 9b853809a492..b74ff158108c 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
module_init(sha256_s390_init);
module_exit(sha256_s390_fini);
-MODULE_ALIAS("sha256");
-MODULE_ALIAS("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 32a81383b69c..0c36989ba182 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
}
};
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha512");
static int sha384_init(struct shash_desc *desc)
{
@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
}
};
-MODULE_ALIAS("sha384");
+MODULE_ALIAS_CRYPTO("sha384");
static int __init init(void)
{
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 773f86676588..c631f98fd524 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,7 +1,6 @@
generic-y += clkdev.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index b5dce6544d76..8d724718ec21 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -24,11 +24,14 @@
#define rmb() mb()
#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
+#define dma_rmb() rmb()
+#define dma_wmb() wmb()
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
+
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 4236408070e5..6259895fcd97 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,200 +11,28 @@
#include <linux/types.h>
#include <linux/bug.h>
-extern void __xchg_called_with_bad_pointer(void);
-
-static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
-{
- unsigned long addr, old;
- int shift;
-
- switch (size) {
- case 1:
- addr = (unsigned long) ptr;
- shift = (3 ^ (addr & 3)) << 3;
- addr ^= addr & 3;
- asm volatile(
- " l %0,%4\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%4\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) addr)
- : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
- "Q" (*(int *) addr) : "memory", "cc", "0");
- return old >> shift;
- case 2:
- addr = (unsigned long) ptr;
- shift = (2 ^ (addr & 2)) << 3;
- addr ^= addr & 2;
- asm volatile(
- " l %0,%4\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%4\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) addr)
- : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
- "Q" (*(int *) addr) : "memory", "cc", "0");
- return old >> shift;
- case 4:
- asm volatile(
- " l %0,%3\n"
- "0: cs %0,%2,%3\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) ptr)
- : "d" (x), "Q" (*(int *) ptr)
- : "memory", "cc");
- return old;
-#ifdef CONFIG_64BIT
- case 8:
- asm volatile(
- " lg %0,%3\n"
- "0: csg %0,%2,%3\n"
- " jl 0b\n"
- : "=&d" (old), "=m" (*(long *) ptr)
- : "d" (x), "Q" (*(long *) ptr)
- : "memory", "cc");
- return old;
-#endif /* CONFIG_64BIT */
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-#define xchg(ptr, x) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
- __ret; \
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __o = (o); \
+ __typeof__(*(ptr)) __n = (n); \
+ (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
})
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG
-
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long addr, prev, tmp;
- int shift;
-
- switch (size) {
- case 1:
- addr = (unsigned long) ptr;
- shift = (3 ^ (addr & 3)) << 3;
- addr ^= addr & 3;
- asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
- "1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
- : "d" ((old & 0xff) << shift),
- "d" ((new & 0xff) << shift),
- "d" (~(0xff << shift))
- : "memory", "cc");
- return prev >> shift;
- case 2:
- addr = (unsigned long) ptr;
- shift = (2 ^ (addr & 2)) << 3;
- addr ^= addr & 2;
- asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
- "1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
- : "d" ((old & 0xffff) << shift),
- "d" ((new & 0xffff) << shift),
- "d" (~(0xffff << shift))
- : "memory", "cc");
- return prev >> shift;
- case 4:
- asm volatile(
- " cs %0,%3,%1\n"
- : "=&d" (prev), "=Q" (*(int *) ptr)
- : "0" (old), "d" (new), "Q" (*(int *) ptr)
- : "memory", "cc");
- return prev;
-#ifdef CONFIG_64BIT
- case 8:
- asm volatile(
- " csg %0,%3,%1\n"
- : "=&d" (prev), "=Q" (*(long *) ptr)
- : "0" (old), "d" (new), "Q" (*(long *) ptr)
- : "memory", "cc");
- return prev;
-#endif /* CONFIG_64BIT */
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
- sizeof(*(ptr))); \
- __ret; \
-})
+#define cmpxchg64 cmpxchg
+#define cmpxchg_local cmpxchg
+#define cmpxchg64_local cmpxchg
-#ifdef CONFIG_64BIT
-#define cmpxchg64(ptr, o, n) \
+#define xchg(ptr, x) \
({ \
- cmpxchg((ptr), (o), (n)); \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old; \
+ do { \
+ __old = *__ptr; \
+ } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
+ __old; \
})
-#else /* CONFIG_64BIT */
-static inline unsigned long long __cmpxchg64(void *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- register_pair rp_old = {.pair = old};
- register_pair rp_new = {.pair = new};
- unsigned long long *ullptr = ptr;
- asm volatile(
- " cds %0,%2,%1"
- : "+d" (rp_old), "+Q" (*ullptr)
- : "d" (rp_new)
- : "memory", "cc");
- return rp_old.pair;
-}
-
-#define cmpxchg64(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg64((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)); \
- __ret; \
-})
-#endif /* CONFIG_64BIT */
+#define __HAVE_ARCH_CMPXCHG
#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
({ \
@@ -265,40 +93,4 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
#define system_has_cmpxchg_double() 1
-#include <asm-generic/cmpxchg-local.h>
-
-static inline unsigned long __cmpxchg_local(void *ptr,
- unsigned long old,
- unsigned long new, int size)
-{
- switch (size) {
- case 1:
- case 2:
- case 4:
-#ifdef CONFIG_64BIT
- case 8:
-#endif
- return __cmpxchg(ptr, old, new, size);
- default:
- return __cmpxchg_local_generic(ptr, old, new, size);
- }
-
- return old;
-}
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
- __ret; \
-})
-
-#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
-
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f8c196984853..b91e960e4045 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
#include <asm/div64.h>
+#define CPUTIME_PER_USEC 4096ULL
+#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
@@ -38,24 +40,24 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
*/
static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
{
- return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
+ return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
}
static inline cputime_t jiffies_to_cputime(const unsigned int jif)
{
- return (__force cputime_t)(jif * (4096000000ULL / HZ));
+ return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
}
static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
{
unsigned long long jif = (__force unsigned long long) cputime;
- do_div(jif, 4096000000ULL / HZ);
+ do_div(jif, CPUTIME_PER_SEC / HZ);
return jif;
}
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
- return (__force cputime64_t)(jif * (4096000000ULL / HZ));
+ return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
}
/*
@@ -68,7 +70,7 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
static inline cputime_t usecs_to_cputime(const unsigned int m)
{
- return (__force cputime_t)(m * 4096ULL);
+ return (__force cputime_t)(m * CPUTIME_PER_USEC);
}
#define usecs_to_cputime64(m) usecs_to_cputime(m)
@@ -78,12 +80,12 @@ static inline cputime_t usecs_to_cputime(const unsigned int m)
*/
static inline unsigned int cputime_to_secs(const cputime_t cputime)
{
- return __div((__force unsigned long long) cputime, 2048000000) >> 1;
+ return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
}
static inline cputime_t secs_to_cputime(const unsigned int s)
{
- return (__force cputime_t)(s * 4096000000ULL);
+ return (__force cputime_t)(s * CPUTIME_PER_SEC);
}
/*
@@ -91,8 +93,8 @@ static inline cputime_t secs_to_cputime(const unsigned int s)
*/
static inline cputime_t timespec_to_cputime(const struct timespec *value)
{
- unsigned long long ret = value->tv_sec * 4096000000ULL;
- return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
+ unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
+ return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
}
static inline void cputime_to_timespec(const cputime_t cputime,
@@ -103,12 +105,12 @@ static inline void cputime_to_timespec(const cputime_t cputime,
register_pair rp;
rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
- value->tv_nsec = rp.subreg.even * 1000 / 4096;
+ asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
+ value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
- value->tv_sec = __cputime / 4096000000ULL;
+ value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
+ value->tv_sec = __cputime / CPUTIME_PER_SEC;
#endif
}
@@ -119,8 +121,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
*/
static inline cputime_t timeval_to_cputime(const struct timeval *value)
{
- unsigned long long ret = value->tv_sec * 4096000000ULL;
- return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
+ unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
+ return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
}
static inline void cputime_to_timeval(const cputime_t cputime,
@@ -131,12 +133,12 @@ static inline void cputime_to_timeval(const cputime_t cputime,
register_pair rp;
rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
- value->tv_usec = rp.subreg.even / 4096;
+ asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
+ value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_usec = (__cputime % 4096000000ULL) / 4096;
- value->tv_sec = __cputime / 4096000000ULL;
+ value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
+ value->tv_sec = __cputime / CPUTIME_PER_SEC;
#endif
}
@@ -146,13 +148,13 @@ static inline void cputime_to_timeval(const cputime_t cputime,
static inline clock_t cputime_to_clock_t(cputime_t cputime)
{
unsigned long long clock = (__force unsigned long long) cputime;
- do_div(clock, 4096000000ULL / USER_HZ);
+ do_div(clock, CPUTIME_PER_SEC / USER_HZ);
return clock;
}
static inline cputime_t clock_t_to_cputime(unsigned long x)
{
- return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
+ return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
}
/*
@@ -161,7 +163,7 @@ static inline cputime_t clock_t_to_cputime(unsigned long x)
static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
{
unsigned long long clock = (__force unsigned long long) cputime;
- do_div(clock, 4096000000ULL / USER_HZ);
+ do_div(clock, CPUTIME_PER_SEC / USER_HZ);
return clock;
}
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 530c15eb01e9..0206c8052328 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -151,9 +151,21 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
* stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
*/
extern debug_entry_t *
-debug_sprintf_event(debug_info_t* id,int level,char *string,...)
+__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
__attribute__ ((format(printf, 3, 4)));
+#define debug_sprintf_event(_id, _level, _fmt, ...) \
+({ \
+ debug_entry_t *__ret; \
+ debug_info_t *__id = _id; \
+ int __level = _level; \
+ if ((!__id) || (__level > __id->level)) \
+ __ret = NULL; \
+ else \
+ __ret = __debug_sprintf_event(__id, __level, \
+ _fmt, ## __VA_ARGS__); \
+ __ret; \
+})
static inline debug_entry_t*
debug_exception(debug_info_t* id, int level, void* data, int length)
@@ -194,9 +206,22 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
* stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
*/
extern debug_entry_t *
-debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
+__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
__attribute__ ((format(printf, 3, 4)));
+#define debug_sprintf_exception(_id, _level, _fmt, ...) \
+({ \
+ debug_entry_t *__ret; \
+ debug_info_t *__id = _id; \
+ int __level = _level; \
+ if ((!__id) || (__level > __id->level)) \
+ __ret = NULL; \
+ else \
+ __ret = __debug_sprintf_exception(__id, __level, \
+ _fmt, ## __VA_ARGS__);\
+ __ret; \
+})
+
int debug_register_view(debug_info_t* id, struct debug_view* view);
int debug_unregister_view(debug_info_t* id, struct debug_view* view);
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 3aef8afec336..abb618f1ead2 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -1,25 +1,69 @@
#ifndef _ASM_S390_FTRACE_H
#define _ASM_S390_FTRACE_H
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+
+#define MCOUNT_INSN_SIZE 24
+#define MCOUNT_RETURN_FIXUP 18
+
#ifndef __ASSEMBLY__
-extern void _mcount(void);
+#define ftrace_return_address(n) __builtin_return_address(n)
+
+void _mcount(void);
+void ftrace_caller(void);
+
extern char ftrace_graph_caller_end;
+extern unsigned long ftrace_plt;
struct dyn_arch_ftrace { };
-#define MCOUNT_ADDR ((long)_mcount)
+#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define FTRACE_ADDR ((unsigned long)ftrace_caller)
+#define KPROBE_ON_FTRACE_NOP 0
+#define KPROBE_ON_FTRACE_CALL 1
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
-#endif /* __ASSEMBLY__ */
+struct ftrace_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ /* jg .+24 */
+ insn->opc = 0xc0f4;
+ insn->disp = MCOUNT_INSN_SIZE / 2;
+#endif
+}
-#define MCOUNT_INSN_SIZE 18
+static inline int is_ftrace_nop(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ if (insn->disp == MCOUNT_INSN_SIZE / 2)
+ return 1;
+#endif
+ return 0;
+}
-#define ARCH_SUPPORTS_FTRACE_OPS 1
+static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
+ unsigned long ip)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ unsigned long target;
+
+ /* brasl r0,ftrace_caller */
+ target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
+ insn->opc = 0xc005;
+ insn->disp = (target - ip) / 2;
+#endif
+}
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 6af037f574b8..113cd963dbbe 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -9,9 +9,10 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/seqlock.h>
struct s390_idle_data {
- unsigned int sequence;
+ seqcount_t seqcount;
unsigned long long idle_count;
unsigned long long idle_time;
unsigned long long clock_idle_enter;
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 6ad9013c67e7..30fd5c84680e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -39,6 +39,15 @@ static inline void iounmap(volatile void __iomem *addr)
{
}
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return NULL;
+}
+
+static inline void ioport_unmap(void __iomem *p)
+{
+}
+
/*
* s390 needs a private implementation of pci_iomap since ioremap with its
* offset parameter isn't sufficient. That's because BAR spaces are not
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index b0d5f0a97a01..343ea7c987aa 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,11 +1,11 @@
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
-#define EXT_INTERRUPT 1
-#define IO_INTERRUPT 2
-#define THIN_INTERRUPT 3
+#define EXT_INTERRUPT 0
+#define IO_INTERRUPT 1
+#define THIN_INTERRUPT 2
-#define NR_IRQS_BASE 4
+#define NR_IRQS_BASE 3
#ifdef CONFIG_PCI_NR_MSI
# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
@@ -13,9 +13,6 @@
# define NR_IRQS NR_IRQS_BASE
#endif
-/* This number is used when no interrupt has been assigned */
-#define NO_IRQ 0
-
/* External interruption codes */
#define EXT_IRQ_INTERRUPT_KEY 0x0040
#define EXT_IRQ_CLK_COMP 0x1004
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 98629173ce3b..b47ad3b642cc 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -60,6 +60,7 @@ typedef u16 kprobe_opcode_t;
struct arch_specific_insn {
/* copy of original instruction */
kprobe_opcode_t *insn;
+ unsigned int is_ftrace_insn : 1;
};
struct prev_kprobe {
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 2175f911a73a..9cba74d5d853 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -123,7 +123,7 @@ struct kvm_s390_sie_block {
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
__u8 icptcode; /* 0x0050 */
- __u8 reserved51; /* 0x0051 */
+ __u8 icptstatus; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */
__u8 reserved54[2]; /* 0x0054 */
__u16 ipa; /* 0x0056 */
@@ -226,10 +226,17 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_sense_running;
u32 instruction_sigp_external_call;
u32 instruction_sigp_emergency;
+ u32 instruction_sigp_cond_emergency;
+ u32 instruction_sigp_start;
u32 instruction_sigp_stop;
+ u32 instruction_sigp_stop_store_status;
+ u32 instruction_sigp_store_status;
u32 instruction_sigp_arch;
u32 instruction_sigp_prefix;
u32 instruction_sigp_restart;
+ u32 instruction_sigp_init_cpu_reset;
+ u32 instruction_sigp_cpu_reset;
+ u32 instruction_sigp_unknown;
u32 diagnose_10;
u32 diagnose_44;
u32 diagnose_9c;
@@ -288,6 +295,79 @@ struct kvm_vcpu_stat {
#define PGM_PER 0x80
#define PGM_CRYPTO_OPERATION 0x119
+/* irq types in order of priority */
+enum irq_types {
+ IRQ_PEND_MCHK_EX = 0,
+ IRQ_PEND_SVC,
+ IRQ_PEND_PROG,
+ IRQ_PEND_MCHK_REP,
+ IRQ_PEND_EXT_IRQ_KEY,
+ IRQ_PEND_EXT_MALFUNC,
+ IRQ_PEND_EXT_EMERGENCY,
+ IRQ_PEND_EXT_EXTERNAL,
+ IRQ_PEND_EXT_CLOCK_COMP,
+ IRQ_PEND_EXT_CPU_TIMER,
+ IRQ_PEND_EXT_TIMING,
+ IRQ_PEND_EXT_SERVICE,
+ IRQ_PEND_EXT_HOST,
+ IRQ_PEND_PFAULT_INIT,
+ IRQ_PEND_PFAULT_DONE,
+ IRQ_PEND_VIRTIO,
+ IRQ_PEND_IO_ISC_0,
+ IRQ_PEND_IO_ISC_1,
+ IRQ_PEND_IO_ISC_2,
+ IRQ_PEND_IO_ISC_3,
+ IRQ_PEND_IO_ISC_4,
+ IRQ_PEND_IO_ISC_5,
+ IRQ_PEND_IO_ISC_6,
+ IRQ_PEND_IO_ISC_7,
+ IRQ_PEND_SIGP_STOP,
+ IRQ_PEND_RESTART,
+ IRQ_PEND_SET_PREFIX,
+ IRQ_PEND_COUNT
+};
+
+/*
+ * Repressible (non-floating) machine check interrupts
+ * subclass bits in MCIC
+ */
+#define MCHK_EXTD_BIT 58
+#define MCHK_DEGR_BIT 56
+#define MCHK_WARN_BIT 55
+#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
+ (1UL << MCHK_EXTD_BIT) | \
+ (1UL << MCHK_WARN_BIT))
+
+/* Exigent machine check interrupts subclass bits in MCIC */
+#define MCHK_SD_BIT 63
+#define MCHK_PD_BIT 62
+#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))
+
+#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY) | \
+ (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
+ (1UL << IRQ_PEND_EXT_CPU_TIMER) | \
+ (1UL << IRQ_PEND_EXT_MALFUNC) | \
+ (1UL << IRQ_PEND_EXT_EMERGENCY) | \
+ (1UL << IRQ_PEND_EXT_EXTERNAL) | \
+ (1UL << IRQ_PEND_EXT_TIMING) | \
+ (1UL << IRQ_PEND_EXT_HOST) | \
+ (1UL << IRQ_PEND_EXT_SERVICE) | \
+ (1UL << IRQ_PEND_VIRTIO) | \
+ (1UL << IRQ_PEND_PFAULT_INIT) | \
+ (1UL << IRQ_PEND_PFAULT_DONE))
+
+#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
+ (1UL << IRQ_PEND_IO_ISC_1) | \
+ (1UL << IRQ_PEND_IO_ISC_2) | \
+ (1UL << IRQ_PEND_IO_ISC_3) | \
+ (1UL << IRQ_PEND_IO_ISC_4) | \
+ (1UL << IRQ_PEND_IO_ISC_5) | \
+ (1UL << IRQ_PEND_IO_ISC_6) | \
+ (1UL << IRQ_PEND_IO_ISC_7))
+
+#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
+ (1UL << IRQ_PEND_MCHK_EX))
+
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
@@ -306,14 +386,25 @@ struct kvm_s390_interrupt_info {
#define ACTION_STORE_ON_STOP (1<<0)
#define ACTION_STOP_ON_STOP (1<<1)
+struct kvm_s390_irq_payload {
+ struct kvm_s390_io_info io;
+ struct kvm_s390_ext_info ext;
+ struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
+ struct kvm_s390_extcall_info extcall;
+ struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_mchk_info mchk;
+};
+
struct kvm_s390_local_interrupt {
spinlock_t lock;
- struct list_head list;
- atomic_t active;
struct kvm_s390_float_interrupt *float_int;
wait_queue_head_t *wq;
atomic_t *cpuflags;
unsigned int action_bits;
+ DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
+ struct kvm_s390_irq_payload irq;
+ unsigned long pending_irqs;
};
struct kvm_s390_float_interrupt {
@@ -434,6 +525,8 @@ struct kvm_arch{
int user_cpu_state_ctrl;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
+ int ipte_lock_count;
+ struct mutex ipte_mutex;
spinlock_t start_stop_lock;
struct kvm_s390_crypto crypto;
};
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 6cc51fe84410..34fbcac61133 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -147,7 +147,7 @@ struct _lowcore {
__u32 softirq_pending; /* 0x02ec */
__u32 percpu_offset; /* 0x02f0 */
__u32 machine_flags; /* 0x02f4 */
- __u32 ftrace_func; /* 0x02f8 */
+ __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
__u32 spinlock_lockval; /* 0x02fc */
__u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
@@ -297,7 +297,7 @@ struct _lowcore {
__u64 percpu_offset; /* 0x0378 */
__u64 vdso_per_cpu_data; /* 0x0380 */
__u64 machine_flags; /* 0x0388 */
- __u64 ftrace_func; /* 0x0390 */
+ __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
__u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */
__u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c030900320e0..ef803c202d42 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -50,10 +50,6 @@ struct zpci_fmb {
atomic64_t unmapped_pages;
} __packed __aligned(16);
-#define ZPCI_MSI_VEC_BITS 11
-#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
-#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
-
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
ZPCI_FN_STATE_STANDBY,
@@ -90,6 +86,7 @@ struct zpci_dev {
/* IRQ stuff */
u64 msi_addr; /* MSI address */
+ unsigned int max_msi; /* maximum number of MSI's */
struct airq_iv *aibv; /* adapter interrupt bit vector */
unsigned int aisb; /* number of the summary bit */
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index d194d544d694..f664e96f48c7 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -139,7 +139,8 @@ static inline int zpci_memcpy_fromio(void *dst,
int size, rc = 0;
while (n > 0) {
- size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8);
+ size = zpci_get_max_write_size((u64 __force) src,
+ (u64) dst, n, 8);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
rc = zpci_read_single(req, dst, offset, size);
if (rc)
@@ -162,7 +163,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
return -EINVAL;
while (n > 0) {
- size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128);
+ size = zpci_get_max_write_size((u64 __force) dst,
+ (u64) src, n, 128);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
if (size > 8) /* main path */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index d39a31c3cdf2..3009c2ba46d2 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,10 +22,9 @@ unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
-void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long,
- bool init_skey);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq);
+unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 57c882761dea..5e102422c9ab 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -133,6 +133,18 @@ extern unsigned long MODULES_END;
#define MODULES_LEN (1UL << 31)
#endif
+static inline int is_module_addr(void *addr)
+{
+#ifdef CONFIG_64BIT
+ BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
+ if (addr < (void *)MODULES_VADDR)
+ return 0;
+ if (addr > (void *)MODULES_END)
+ return 0;
+#endif
+ return 1;
+}
+
/*
* A 31 bit pagetable entry of S390 has following format:
* | PFRA | | OS |
@@ -479,6 +491,11 @@ static inline int mm_has_pgste(struct mm_struct *mm)
return 0;
}
+/*
+ * In the case that a guest uses storage keys
+ * faults should no longer be backed by zero pages
+ */
+#define mm_forbids_zeropage mm_use_skey
static inline int mm_use_skey(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -1634,6 +1651,19 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
return pmd;
}
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp, int full)
+{
+ pmd_t pmd = *pmdp;
+
+ if (!full)
+ pmdp_flush_lazy(mm, address, pmdp);
+ pmd_clear(pmdp);
+ return pmd;
+}
+
#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
@@ -1746,7 +1776,8 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern int vmem_remove_mapping(unsigned long start, unsigned long size);
extern int s390_enable_sie(void);
-extern void s390_enable_skey(void);
+extern int s390_enable_skey(void);
+extern void s390_reset_cmma(struct mm_struct *mm);
/*
* No page table caches to initialise
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d559bdb03d18..bed05ea7ec27 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -217,8 +217,6 @@ static inline unsigned short stap(void)
*/
static inline void cpu_relax(void)
{
- if (MACHINE_HAS_DIAG44)
- asm volatile("diag 0,0,68");
barrier();
}
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 49576115dbb7..fad4ae23ece0 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -10,6 +10,7 @@
#define SIGP_RESTART 6
#define SIGP_STOP_AND_STORE_STATUS 9
#define SIGP_INITIAL_CPU_RESET 11
+#define SIGP_CPU_RESET 12
#define SIGP_SET_PREFIX 13
#define SIGP_STORE_STATUS_AT_ADDRESS 14
#define SIGP_SET_ARCHITECTURE 18
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index d6bdf906caa5..0e37cd041241 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -18,14 +18,7 @@ extern int spin_retry;
static inline int
_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
{
- unsigned int old_expected = old;
-
- asm volatile(
- " cs %0,%3,%1"
- : "=d" (old), "=Q" (*lock)
- : "0" (old), "d" (new), "Q" (*lock)
- : "cc", "memory" );
- return old == old_expected;
+ return __sync_bool_compare_and_swap(lock, old, new);
}
/*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 572c59949004..06d8741ad6f4 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -121,6 +121,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
#endif
}
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index e031332096d7..296942d56e6a 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -86,4 +86,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 4197c89c52d4..2b446cf0cc65 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -287,7 +287,9 @@
#define __NR_getrandom 349
#define __NR_memfd_create 350
#define __NR_bpf 351
-#define NR_syscalls 352
+#define __NR_s390_pci_mmio_write 352
+#define __NR_s390_pci_mmio_read 353
+#define NR_syscalls 354
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ef279a136801..e07e91605353 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -17,8 +17,8 @@
* Make sure that the compiler is new enough. We want a compiler that
* is known to work with the "Q" assembler constraint.
*/
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-#error Your compiler is too old; please use version 3.3.3 or newer
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)
+#error Your compiler is too old; please use version 4.3 or newer
#endif
int main(void)
@@ -156,7 +156,6 @@ int main(void)
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
- DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
BLANK();
DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index ca38139423ae..437e61159279 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -249,7 +249,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
struct group_info *group_info;
int retval;
- if (!capable(CAP_SETGID))
+ if (!may_setgroups())
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 009f5eb11125..34d5fa7b01b5 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -434,7 +434,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
/* Signal frames without vectors registers are short ! */
- __u16 __user *svc = (void *) frame + frame_size - 2;
+ __u16 __user *svc = (void __user *) frame + frame_size - 2;
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
return -EFAULT;
restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index c4f7a3d655b8..d7fa2f0f1425 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -218,3 +218,5 @@ COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char
COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
+COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length);
+COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ee8390da6ea7..c1f21aca76e7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1019,7 +1019,7 @@ debug_count_numargs(char *string)
*/
debug_entry_t*
-debug_sprintf_event(debug_info_t* id, int level,char *string,...)
+__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
{
va_list ap;
int numargs,idx;
@@ -1027,8 +1027,6 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
- if((!id) || (level > id->level))
- return NULL;
if (!debug_active || !id->areas)
return NULL;
numargs=debug_count_numargs(string);
@@ -1050,14 +1048,14 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
return active;
}
-EXPORT_SYMBOL(debug_sprintf_event);
+EXPORT_SYMBOL(__debug_sprintf_event);
/*
* debug_sprintf_exception:
*/
debug_entry_t*
-debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
+__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
{
va_list ap;
int numargs,idx;
@@ -1065,8 +1063,6 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
- if((!id) || (level > id->level))
- return NULL;
if (!debug_active || !id->areas)
return NULL;
@@ -1089,7 +1085,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
return active;
}
-EXPORT_SYMBOL(debug_sprintf_exception);
+EXPORT_SYMBOL(__debug_sprintf_exception);
/*
* debug_register_view:
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index acb412442e5e..a99852e96a77 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -191,7 +191,8 @@ void die(struct pt_regs *regs, const char *str)
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
- printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
+ printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
+ regs->int_code >> 17, ++die_counter);
#ifdef CONFIG_PREEMPT
printk("PREEMPT ");
#endif
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cef2879edff3..302ac1f7f8e7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ctype.h>
-#include <linux/ftrace.h>
#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/pfn.h>
@@ -490,8 +489,5 @@ void __init startup_init(void)
detect_machine_facilities();
setup_topology();
sclp_early_detect();
-#ifdef CONFIG_DYNAMIC_FTRACE
- S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
-#endif
lockdep_on();
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 70203265196f..398329b2b518 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -53,7 +53,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
.macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Lhardirqs_on)
+ l %r1,BASED(.Lc_hardirqs_on)
basr %r14,%r1 # call trace_hardirqs_on_caller
#endif
.endm
@@ -61,7 +61,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
.macro TRACE_IRQS_OFF
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Lhardirqs_off)
+ l %r1,BASED(.Lc_hardirqs_off)
basr %r14,%r1 # call trace_hardirqs_off_caller
#endif
.endm
@@ -70,7 +70,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
#ifdef CONFIG_LOCKDEP
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jz .+10
- l %r1,BASED(.Llockdep_sys_exit)
+ l %r1,BASED(.Lc_lockdep_sys_exit)
basr %r14,%r1 # call lockdep_sys_exit
#endif
.endm
@@ -87,8 +87,8 @@ _PIF_WORK = (_PIF_PER_TRAP)
tmh %r8,0x0001 # interrupting from user ?
jnz 1f
lr %r14,%r9
- sl %r14,BASED(.Lcritical_start)
- cl %r14,BASED(.Lcritical_length)
+ sl %r14,BASED(.Lc_critical_start)
+ cl %r14,BASED(.Lc_critical_length)
jhe 0f
la %r11,\savearea # inside critical section, do cleanup
bras %r14,cleanup_critical
@@ -162,7 +162,7 @@ ENTRY(__switch_to)
lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
-__critical_start:
+.L__critical_start:
/*
* SVC interrupt handler routine. System calls are synchronous events and
* are executed with interrupts enabled.
@@ -170,145 +170,145 @@ __critical_start:
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-sysc_stm:
+.Lsysc_stm:
stm %r8,%r15,__LC_SAVE_AREA_SYNC
l %r12,__LC_THREAD_INFO
l %r13,__LC_SVC_NEW_PSW+4
lhi %r14,_PIF_SYSCALL
-sysc_per:
+.Lsysc_per:
l %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
-sysc_vtime:
+.Lsysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
st %r14,__PT_FLAGS(%r11)
-sysc_do_svc:
+.Lsysc_do_svc:
l %r10,__TI_sysc_table(%r12) # 31 bit system call table
lh %r8,__PT_INT_CODE+2(%r11)
sla %r8,2 # shift and test for svc0
- jnz sysc_nr_ok
+ jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
cl %r1,BASED(.Lnr_syscalls)
- jnl sysc_nr_ok
+ jnl .Lsysc_nr_ok
sth %r1,__PT_INT_CODE+2(%r11)
lr %r8,%r1
sla %r8,2
-sysc_nr_ok:
+.Lsysc_nr_ok:
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
st %r2,__PT_ORIG_GPR2(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15)
l %r9,0(%r8,%r10) # get system call addr.
tm __TI_flags+3(%r12),_TIF_TRACE
- jnz sysc_tracesys
+ jnz .Lsysc_tracesys
basr %r14,%r9 # call sys_xxxx
st %r2,__PT_R2(%r11) # store return value
-sysc_return:
+.Lsysc_return:
LOCKDEP_SYS_EXIT
-sysc_tif:
+.Lsysc_tif:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno sysc_restore
+ jno .Lsysc_restore
tm __PT_FLAGS+3(%r11),_PIF_WORK
- jnz sysc_work
+ jnz .Lsysc_work
tm __TI_flags+3(%r12),_TIF_WORK
- jnz sysc_work # check for thread work
+ jnz .Lsysc_work # check for thread work
tm __LC_CPU_FLAGS+3,_CIF_WORK
- jnz sysc_work
-sysc_restore:
+ jnz .Lsysc_work
+.Lsysc_restore:
mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
lm %r0,%r15,__PT_R0(%r11)
lpsw __LC_RETURN_PSW
-sysc_done:
+.Lsysc_done:
#
# One of the work bits is on. Find out which one.
#
-sysc_work:
+.Lsysc_work:
tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
- jo sysc_mcck_pending
+ jo .Lsysc_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
- jo sysc_reschedule
+ jo .Lsysc_reschedule
tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
- jo sysc_singlestep
+ jo .Lsysc_singlestep
tm __TI_flags+3(%r12),_TIF_SIGPENDING
- jo sysc_sigpending
+ jo .Lsysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
- jo sysc_notify_resume
+ jo .Lsysc_notify_resume
tm __LC_CPU_FLAGS+3,_CIF_ASCE
- jo sysc_uaccess
- j sysc_return # beware of critical section cleanup
+ jo .Lsysc_uaccess
+ j .Lsysc_return # beware of critical section cleanup
#
# _TIF_NEED_RESCHED is set, call schedule
#
-sysc_reschedule:
- l %r1,BASED(.Lschedule)
- la %r14,BASED(sysc_return)
+.Lsysc_reschedule:
+ l %r1,BASED(.Lc_schedule)
+ la %r14,BASED(.Lsysc_return)
br %r1 # call schedule
#
# _CIF_MCCK_PENDING is set, call handler
#
-sysc_mcck_pending:
- l %r1,BASED(.Lhandle_mcck)
- la %r14,BASED(sysc_return)
+.Lsysc_mcck_pending:
+ l %r1,BASED(.Lc_handle_mcck)
+ la %r14,BASED(.Lsysc_return)
br %r1 # TIF bit will be cleared by handler
#
# _CIF_ASCE is set, load user space asce
#
-sysc_uaccess:
+.Lsysc_uaccess:
ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
lctl %c1,%c1,__LC_USER_ASCE # load primary asce
- j sysc_return
+ j .Lsysc_return
#
# _TIF_SIGPENDING is set, call do_signal
#
-sysc_sigpending:
+.Lsysc_sigpending:
lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Ldo_signal)
+ l %r1,BASED(.Lc_do_signal)
basr %r14,%r1 # call do_signal
tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
- jno sysc_return
+ jno .Lsysc_return
lm %r2,%r7,__PT_R2(%r11) # load svc arguments
l %r10,__TI_sysc_table(%r12) # 31 bit system call table
xr %r8,%r8 # svc 0 returns -ENOSYS
clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
- jnl sysc_nr_ok # invalid svc number -> do svc 0
+ jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
sla %r8,2
- j sysc_nr_ok # restart svc
+ j .Lsysc_nr_ok # restart svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
-sysc_notify_resume:
+.Lsysc_notify_resume:
lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Ldo_notify_resume)
- la %r14,BASED(sysc_return)
+ l %r1,BASED(.Lc_do_notify_resume)
+ la %r14,BASED(.Lsysc_return)
br %r1 # call do_notify_resume
#
# _PIF_PER_TRAP is set, call do_per_trap
#
-sysc_singlestep:
+.Lsysc_singlestep:
ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Ldo_per_trap)
- la %r14,BASED(sysc_return)
+ l %r1,BASED(.Lc_do_per_trap)
+ la %r14,BASED(.Lsysc_return)
br %r1 # call do_per_trap
#
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call
#
-sysc_tracesys:
- l %r1,BASED(.Ltrace_enter)
+.Lsysc_tracesys:
+ l %r1,BASED(.Lc_trace_enter)
lr %r2,%r11 # pass pointer to pt_regs
la %r3,0
xr %r0,%r0
@@ -316,22 +316,22 @@ sysc_tracesys:
st %r0,__PT_R2(%r11)
basr %r14,%r1 # call do_syscall_trace_enter
cl %r2,BASED(.Lnr_syscalls)
- jnl sysc_tracenogo
+ jnl .Lsysc_tracenogo
lr %r8,%r2
sll %r8,2
l %r9,0(%r8,%r10)
-sysc_tracego:
+.Lsysc_tracego:
lm %r3,%r7,__PT_R3(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15)
l %r2,__PT_ORIG_GPR2(%r11)
basr %r14,%r9 # call sys_xxx
st %r2,__PT_R2(%r11) # store return value
-sysc_tracenogo:
+.Lsysc_tracenogo:
tm __TI_flags+3(%r12),_TIF_TRACE
- jz sysc_return
- l %r1,BASED(.Ltrace_exit)
+ jz .Lsysc_return
+ l %r1,BASED(.Lc_trace_exit)
lr %r2,%r11 # pass pointer to pt_regs
- la %r14,BASED(sysc_return)
+ la %r14,BASED(.Lsysc_return)
br %r1 # call do_syscall_trace_exit
#
@@ -341,18 +341,18 @@ ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
l %r12,__LC_THREAD_INFO
l %r13,__LC_SVC_NEW_PSW+4
- l %r1,BASED(.Lschedule_tail)
+ l %r1,BASED(.Lc_schedule_tail)
basr %r14,%r1 # call schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
- jne sysc_tracenogo
+ jne .Lsysc_tracenogo
# it's a kernel thread
lm %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
basr %r14,%r9
- j sysc_tracenogo
+ j .Lsysc_tracenogo
/*
* Program check handler routine
@@ -369,7 +369,7 @@ ENTRY(pgm_check_handler)
tmh %r8,0x4000 # PER bit set in old PSW ?
jnz 0f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
- jnz pgm_svcper # -> single stepped svc
+ jnz .Lpgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
@@ -386,42 +386,42 @@ ENTRY(pgm_check_handler)
jz 0f
l %r1,__TI_task(%r12)
tmh %r8,0x0001 # kernel per event ?
- jz pgm_kprobe
+ jz .Lpgm_kprobe
oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
0: REENABLE_IRQS
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Ljump_table)
+ l %r1,BASED(.Lc_jump_table)
la %r10,0x7f
n %r10,__PT_INT_CODE(%r11)
- je sysc_return
+ je .Lsysc_return
sll %r10,2
l %r1,0(%r10,%r1) # load address of handler routine
lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
- j sysc_return
+ j .Lsysc_return
#
# PER event in supervisor state, must be kprobes
#
-pgm_kprobe:
+.Lpgm_kprobe:
REENABLE_IRQS
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Ldo_per_trap)
+ l %r1,BASED(.Lc_do_per_trap)
lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # call do_per_trap
- j sysc_return
+ j .Lsysc_return
#
# single stepped system call
#
-pgm_svcper:
+.Lpgm_svcper:
mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
- mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
+ mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs
+ lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
/*
* IO interrupt handler routine
@@ -435,9 +435,9 @@ ENTRY(io_int_handler)
l %r13,__LC_SVC_NEW_PSW+4
lm %r8,%r9,__LC_IO_OLD_PSW
tmh %r8,0x0001 # interrupting from user ?
- jz io_skip
+ jz .Lio_skip
UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
-io_skip:
+.Lio_skip:
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
@@ -446,35 +446,35 @@ io_skip:
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
-io_loop:
- l %r1,BASED(.Ldo_IRQ)
+.Lio_loop:
+ l %r1,BASED(.Lc_do_IRQ)
lr %r2,%r11 # pass pointer to pt_regs
lhi %r3,IO_INTERRUPT
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
- jz io_call
+ jz .Lio_call
lhi %r3,THIN_INTERRUPT
-io_call:
+.Lio_call:
basr %r14,%r1 # call do_IRQ
tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
- jz io_return
+ jz .Lio_return
tpi 0
- jz io_return
+ jz .Lio_return
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- j io_loop
-io_return:
+ j .Lio_loop
+.Lio_return:
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
-io_tif:
+.Lio_tif:
tm __TI_flags+3(%r12),_TIF_WORK
- jnz io_work # there is work to do (signals etc.)
+ jnz .Lio_work # there is work to do (signals etc.)
tm __LC_CPU_FLAGS+3,_CIF_WORK
- jnz io_work
-io_restore:
+ jnz .Lio_work
+.Lio_restore:
mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
lm %r0,%r15,__PT_R0(%r11)
lpsw __LC_RETURN_PSW
-io_done:
+.Lio_done:
#
# There is work todo, find out in which context we have been interrupted:
@@ -483,15 +483,15 @@ io_done:
# the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
#
-io_work:
+.Lio_work:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jo io_work_user # yes -> do resched & signal
+ jo .Lio_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r12)
- jnz io_restore # preemption disabled
+ jnz .Lio_restore # preemption disabled
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
- jno io_restore
+ jno .Lio_restore
# switch to kernel stack
l %r1,__PT_R15(%r11)
ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@@ -499,20 +499,20 @@ io_work:
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lr %r15,%r1
- # TRACE_IRQS_ON already done at io_return, call
+ # TRACE_IRQS_ON already done at .Lio_return, call
# TRACE_IRQS_OFF to keep things symmetrical
TRACE_IRQS_OFF
- l %r1,BASED(.Lpreempt_irq)
+ l %r1,BASED(.Lc_preempt_irq)
basr %r14,%r1 # call preempt_schedule_irq
- j io_return
+ j .Lio_return
#else
- j io_restore
+ j .Lio_restore
#endif
#
# Need to do work before returning to userspace, switch to kernel stack
#
-io_work_user:
+.Lio_work_user:
l %r1,__LC_KERNEL_STACK
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
@@ -522,74 +522,74 @@ io_work_user:
#
# One of the work bits is on. Find out which one.
#
-io_work_tif:
+.Lio_work_tif:
tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
- jo io_mcck_pending
+ jo .Lio_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
- jo io_reschedule
+ jo .Lio_reschedule
tm __TI_flags+3(%r12),_TIF_SIGPENDING
- jo io_sigpending
+ jo .Lio_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
- jo io_notify_resume
+ jo .Lio_notify_resume
tm __LC_CPU_FLAGS+3,_CIF_ASCE
- jo io_uaccess
- j io_return # beware of critical section cleanup
+ jo .Lio_uaccess
+ j .Lio_return # beware of critical section cleanup
#
# _CIF_MCCK_PENDING is set, call handler
#
-io_mcck_pending:
- # TRACE_IRQS_ON already done at io_return
- l %r1,BASED(.Lhandle_mcck)
+.Lio_mcck_pending:
+ # TRACE_IRQS_ON already done at .Lio_return
+ l %r1,BASED(.Lc_handle_mcck)
basr %r14,%r1 # TIF bit will be cleared by handler
TRACE_IRQS_OFF
- j io_return
+ j .Lio_return
#
# _CIF_ASCE is set, load user space asce
#
-io_uaccess:
+.Lio_uaccess:
ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
lctl %c1,%c1,__LC_USER_ASCE # load primary asce
- j io_return
+ j .Lio_return
#
# _TIF_NEED_RESCHED is set, call schedule
#
-io_reschedule:
- # TRACE_IRQS_ON already done at io_return
- l %r1,BASED(.Lschedule)
+.Lio_reschedule:
+ # TRACE_IRQS_ON already done at .Lio_return
+ l %r1,BASED(.Lc_schedule)
ssm __LC_SVC_NEW_PSW # reenable interrupts
basr %r14,%r1 # call scheduler
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_return
+ j .Lio_return
#
# _TIF_SIGPENDING is set, call do_signal
#
-io_sigpending:
- # TRACE_IRQS_ON already done at io_return
- l %r1,BASED(.Ldo_signal)
+.Lio_sigpending:
+ # TRACE_IRQS_ON already done at .Lio_return
+ l %r1,BASED(.Lc_do_signal)
ssm __LC_SVC_NEW_PSW # reenable interrupts
lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # call do_signal
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_return
+ j .Lio_return
#
# _TIF_SIGPENDING is set, call do_signal
#
-io_notify_resume:
- # TRACE_IRQS_ON already done at io_return
- l %r1,BASED(.Ldo_notify_resume)
+.Lio_notify_resume:
+ # TRACE_IRQS_ON already done at .Lio_return
+ l %r1,BASED(.Lc_do_notify_resume)
ssm __LC_SVC_NEW_PSW # reenable interrupts
lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # call do_notify_resume
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_return
+ j .Lio_return
/*
* External interrupt handler routine
@@ -603,9 +603,9 @@ ENTRY(ext_int_handler)
l %r13,__LC_SVC_NEW_PSW+4
lm %r8,%r9,__LC_EXT_OLD_PSW
tmh %r8,0x0001 # interrupting from user ?
- jz ext_skip
+ jz .Lext_skip
UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
-ext_skip:
+.Lext_skip:
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
@@ -614,29 +614,29 @@ ext_skip:
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
- l %r1,BASED(.Ldo_IRQ)
+ l %r1,BASED(.Lc_do_IRQ)
lr %r2,%r11 # pass pointer to pt_regs
lhi %r3,EXT_INTERRUPT
basr %r14,%r1 # call do_IRQ
- j io_return
+ j .Lio_return
/*
- * Load idle PSW. The second "half" of this function is in cleanup_idle.
+ * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
*/
ENTRY(psw_idle)
st %r3,__SF_EMPTY(%r15)
basr %r1,0
- la %r1,psw_idle_lpsw+4-.(%r1)
+ la %r1,.Lpsw_idle_lpsw+4-.(%r1)
st %r1,__SF_EMPTY+4(%r15)
oi __SF_EMPTY+4(%r15),0x80
stck __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
-psw_idle_lpsw:
+.Lpsw_idle_lpsw:
lpsw __SF_EMPTY(%r15)
br %r14
-psw_idle_end:
+.Lpsw_idle_end:
-__critical_end:
+.L__critical_end:
/*
* Machine check handler routines
@@ -650,7 +650,7 @@ ENTRY(mcck_int_handler)
l %r13,__LC_SVC_NEW_PSW+4
lm %r8,%r9,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage?
- jo mcck_panic # yes -> rest of mcck code invalid
+ jo .Lmcck_panic # yes -> rest of mcck code invalid
la %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
@@ -668,22 +668,22 @@ ENTRY(mcck_int_handler)
2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
- jno mcck_panic # no -> skip cleanup critical
+ jno .Lmcck_panic # no -> skip cleanup critical
tm %r8,0x0001 # interrupting from user ?
- jz mcck_skip
+ jz .Lmcck_skip
UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
-mcck_skip:
+.Lmcck_skip:
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
stm %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Ldo_machine_check)
+ l %r1,BASED(.Lc_do_machine_check)
lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # call s390_do_machine_check
tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno mcck_return
+ jno .Lmcck_return
l %r1,__LC_KERNEL_STACK # switch to kernel stack
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
@@ -691,12 +691,12 @@ mcck_skip:
lr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
- jno mcck_return
+ jno .Lmcck_return
TRACE_IRQS_OFF
- l %r1,BASED(.Lhandle_mcck)
+ l %r1,BASED(.Lc_handle_mcck)
basr %r14,%r1 # call s390_handle_mcck
TRACE_IRQS_ON
-mcck_return:
+.Lmcck_return:
mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
@@ -706,15 +706,15 @@ mcck_return:
0: lm %r0,%r15,__PT_R0(%r11)
lpsw __LC_RETURN_MCCK_PSW
-mcck_panic:
+.Lmcck_panic:
l %r14,__LC_PANIC_STACK
slr %r14,%r15
sra %r14,PAGE_SHIFT
jz 0f
l %r15,__LC_PANIC_STACK
- j mcck_skip
+ j .Lmcck_skip
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j mcck_skip
+ j .Lmcck_skip
#
# PSW restart interrupt handler
@@ -764,58 +764,58 @@ stack_overflow:
1: .long kernel_stack_overflow
#endif
-cleanup_table:
+.Lcleanup_table:
.long system_call + 0x80000000
- .long sysc_do_svc + 0x80000000
- .long sysc_tif + 0x80000000
- .long sysc_restore + 0x80000000
- .long sysc_done + 0x80000000
- .long io_tif + 0x80000000
- .long io_restore + 0x80000000
- .long io_done + 0x80000000
+ .long .Lsysc_do_svc + 0x80000000
+ .long .Lsysc_tif + 0x80000000
+ .long .Lsysc_restore + 0x80000000
+ .long .Lsysc_done + 0x80000000
+ .long .Lio_tif + 0x80000000
+ .long .Lio_restore + 0x80000000
+ .long .Lio_done + 0x80000000
.long psw_idle + 0x80000000
- .long psw_idle_end + 0x80000000
+ .long .Lpsw_idle_end + 0x80000000
cleanup_critical:
- cl %r9,BASED(cleanup_table) # system_call
+ cl %r9,BASED(.Lcleanup_table) # system_call
jl 0f
- cl %r9,BASED(cleanup_table+4) # sysc_do_svc
- jl cleanup_system_call
- cl %r9,BASED(cleanup_table+8) # sysc_tif
+ cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
+ jl .Lcleanup_system_call
+ cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
jl 0f
- cl %r9,BASED(cleanup_table+12) # sysc_restore
- jl cleanup_sysc_tif
- cl %r9,BASED(cleanup_table+16) # sysc_done
- jl cleanup_sysc_restore
- cl %r9,BASED(cleanup_table+20) # io_tif
+ cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
+ jl .Lcleanup_sysc_tif
+ cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
+ jl .Lcleanup_sysc_restore
+ cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
jl 0f
- cl %r9,BASED(cleanup_table+24) # io_restore
- jl cleanup_io_tif
- cl %r9,BASED(cleanup_table+28) # io_done
- jl cleanup_io_restore
- cl %r9,BASED(cleanup_table+32) # psw_idle
+ cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
+ jl .Lcleanup_io_tif
+ cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
+ jl .Lcleanup_io_restore
+ cl %r9,BASED(.Lcleanup_table+32) # psw_idle
jl 0f
- cl %r9,BASED(cleanup_table+36) # psw_idle_end
- jl cleanup_idle
+ cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
+ jl .Lcleanup_idle
0: br %r14
-cleanup_system_call:
+.Lcleanup_system_call:
# check if stpt has been executed
- cl %r9,BASED(cleanup_system_call_insn)
+ cl %r9,BASED(.Lcleanup_system_call_insn)
jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
chi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
0: # check if stm has been executed
- cl %r9,BASED(cleanup_system_call_insn+4)
+ cl %r9,BASED(.Lcleanup_system_call_insn+4)
jh 0f
mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
0: # set up saved registers r12, and r13
st %r12,16(%r11) # r12 thread-info pointer
st %r13,20(%r11) # r13 literal-pool pointer
# check if the user time calculation has been done
- cl %r9,BASED(cleanup_system_call_insn+8)
+ cl %r9,BASED(.Lcleanup_system_call_insn+8)
jh 0f
l %r10,__LC_EXIT_TIMER
l %r15,__LC_EXIT_TIMER+4
@@ -824,7 +824,7 @@ cleanup_system_call:
st %r10,__LC_USER_TIMER
st %r15,__LC_USER_TIMER+4
0: # check if the system time calculation has been done
- cl %r9,BASED(cleanup_system_call_insn+12)
+ cl %r9,BASED(.Lcleanup_system_call_insn+12)
jh 0f
l %r10,__LC_LAST_UPDATE_TIMER
l %r15,__LC_LAST_UPDATE_TIMER+4
@@ -848,20 +848,20 @@ cleanup_system_call:
# setup saved register 15
st %r15,28(%r11) # r15 stack pointer
# set new psw address and exit
- l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
+ l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
br %r14
-cleanup_system_call_insn:
+.Lcleanup_system_call_insn:
.long system_call + 0x80000000
- .long sysc_stm + 0x80000000
- .long sysc_vtime + 0x80000000 + 36
- .long sysc_vtime + 0x80000000 + 76
+ .long .Lsysc_stm + 0x80000000
+ .long .Lsysc_vtime + 0x80000000 + 36
+ .long .Lsysc_vtime + 0x80000000 + 76
-cleanup_sysc_tif:
- l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000
+.Lcleanup_sysc_tif:
+ l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
br %r14
-cleanup_sysc_restore:
- cl %r9,BASED(cleanup_sysc_restore_insn)
+.Lcleanup_sysc_restore:
+ cl %r9,BASED(.Lcleanup_sysc_restore_insn)
jhe 0f
l %r9,12(%r11) # get saved pointer to pt_regs
mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
@@ -869,15 +869,15 @@ cleanup_sysc_restore:
lm %r0,%r7,__PT_R0(%r9)
0: lm %r8,%r9,__LC_RETURN_PSW
br %r14
-cleanup_sysc_restore_insn:
- .long sysc_done - 4 + 0x80000000
+.Lcleanup_sysc_restore_insn:
+ .long .Lsysc_done - 4 + 0x80000000
-cleanup_io_tif:
- l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000
+.Lcleanup_io_tif:
+ l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
br %r14
-cleanup_io_restore:
- cl %r9,BASED(cleanup_io_restore_insn)
+.Lcleanup_io_restore:
+ cl %r9,BASED(.Lcleanup_io_restore_insn)
jhe 0f
l %r9,12(%r11) # get saved r11 pointer to pt_regs
mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
@@ -885,10 +885,10 @@ cleanup_io_restore:
lm %r0,%r7,__PT_R0(%r9)
0: lm %r8,%r9,__LC_RETURN_PSW
br %r14
-cleanup_io_restore_insn:
- .long io_done - 4 + 0x80000000
+.Lcleanup_io_restore_insn:
+ .long .Lio_done - 4 + 0x80000000
-cleanup_idle:
+.Lcleanup_idle:
# copy interrupt clock & cpu timer
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
@@ -897,7 +897,7 @@ cleanup_idle:
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck has been executed
- cl %r9,BASED(cleanup_idle_insn)
+ cl %r9,BASED(.Lcleanup_idle_insn)
jhe 1f
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
@@ -913,12 +913,12 @@ cleanup_idle:
stm %r9,%r10,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
- n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits
+ n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
l %r9,24(%r11) # return from psw_idle
br %r14
-cleanup_idle_insn:
- .long psw_idle_lpsw + 0x80000000
-cleanup_idle_wait:
+.Lcleanup_idle_insn:
+ .long .Lpsw_idle_lpsw + 0x80000000
+.Lcleanup_idle_wait:
.long 0xfcfdffff
/*
@@ -933,30 +933,30 @@ cleanup_idle_wait:
/*
* Symbol constants
*/
-.Ldo_machine_check: .long s390_do_machine_check
-.Lhandle_mcck: .long s390_handle_mcck
-.Ldo_IRQ: .long do_IRQ
-.Ldo_signal: .long do_signal
-.Ldo_notify_resume: .long do_notify_resume
-.Ldo_per_trap: .long do_per_trap
-.Ljump_table: .long pgm_check_table
-.Lschedule: .long schedule
+.Lc_do_machine_check: .long s390_do_machine_check
+.Lc_handle_mcck: .long s390_handle_mcck
+.Lc_do_IRQ: .long do_IRQ
+.Lc_do_signal: .long do_signal
+.Lc_do_notify_resume: .long do_notify_resume
+.Lc_do_per_trap: .long do_per_trap
+.Lc_jump_table: .long pgm_check_table
+.Lc_schedule: .long schedule
#ifdef CONFIG_PREEMPT
-.Lpreempt_irq: .long preempt_schedule_irq
+.Lc_preempt_irq: .long preempt_schedule_irq
#endif
-.Ltrace_enter: .long do_syscall_trace_enter
-.Ltrace_exit: .long do_syscall_trace_exit
-.Lschedule_tail: .long schedule_tail
-.Lsysc_per: .long sysc_per + 0x80000000
+.Lc_trace_enter: .long do_syscall_trace_enter
+.Lc_trace_exit: .long do_syscall_trace_exit
+.Lc_schedule_tail: .long schedule_tail
+.Lc_sysc_per: .long .Lsysc_per + 0x80000000
#ifdef CONFIG_TRACE_IRQFLAGS
-.Lhardirqs_on: .long trace_hardirqs_on_caller
-.Lhardirqs_off: .long trace_hardirqs_off_caller
+.Lc_hardirqs_on: .long trace_hardirqs_on_caller
+.Lc_hardirqs_off: .long trace_hardirqs_off_caller
#endif
#ifdef CONFIG_LOCKDEP
-.Llockdep_sys_exit: .long lockdep_sys_exit
+.Lc_lockdep_sys_exit: .long lockdep_sys_exit
#endif
-.Lcritical_start: .long __critical_start + 0x80000000
-.Lcritical_length: .long __critical_end - __critical_start
+.Lc_critical_start: .long .L__critical_start + 0x80000000
+.Lc_critical_length: .long .L__critical_end - .L__critical_start
.section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esa
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 0554b9771c9f..8e61393c8275 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -74,4 +74,6 @@ struct old_sigaction;
long sys_s390_personality(unsigned int personality);
long sys_s390_runtime_instr(int command, int signum);
+long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
+long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 7b2e03afd017..c329446a951d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -91,7 +91,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
.if \reason==1
# Some program interrupts are suppressing (e.g. protection).
# We must also check the instruction after SIE in that case.
- # do_protection_exception will rewind to rewind_pad
+ # do_protection_exception will rewind to .Lrewind_pad
jh .+42
.else
jhe .+42
@@ -192,7 +192,7 @@ ENTRY(__switch_to)
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
-__critical_start:
+.L__critical_start:
/*
* SVC interrupt handler routine. System calls are synchronous events and
* are executed with interrupts enabled.
@@ -200,15 +200,15 @@ __critical_start:
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-sysc_stmg:
+.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lghi %r14,_PIF_SYSCALL
-sysc_per:
+.Lsysc_per:
lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
-sysc_vtime:
+.Lsysc_vtime:
UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
LAST_BREAK %r13
stmg %r0,%r7,__PT_R0(%r11)
@@ -216,39 +216,39 @@ sysc_vtime:
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
-sysc_do_svc:
+.Lsysc_do_svc:
lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
- jnz sysc_nr_ok
+ jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
llgfr %r1,%r1 # clear high word in r1
cghi %r1,NR_syscalls
- jnl sysc_nr_ok
+ jnl .Lsysc_nr_ok
sth %r1,__PT_INT_CODE+2(%r11)
slag %r8,%r1,2
-sysc_nr_ok:
+.Lsysc_nr_ok:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lgf %r9,0(%r8,%r10) # get system call add.
tm __TI_flags+7(%r12),_TIF_TRACE
- jnz sysc_tracesys
+ jnz .Lsysc_tracesys
basr %r14,%r9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value
-sysc_return:
+.Lsysc_return:
LOCKDEP_SYS_EXIT
-sysc_tif:
+.Lsysc_tif:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno sysc_restore
+ jno .Lsysc_restore
tm __PT_FLAGS+7(%r11),_PIF_WORK
- jnz sysc_work
+ jnz .Lsysc_work
tm __TI_flags+7(%r12),_TIF_WORK
- jnz sysc_work # check for work
+ jnz .Lsysc_work # check for work
tm __LC_CPU_FLAGS+7,_CIF_WORK
- jnz sysc_work
-sysc_restore:
+ jnz .Lsysc_work
+.Lsysc_restore:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
@@ -256,101 +256,101 @@ sysc_restore:
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
-sysc_done:
+.Lsysc_done:
#
# One of the work bits is on. Find out which one.
#
-sysc_work:
+.Lsysc_work:
tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jo sysc_mcck_pending
+ jo .Lsysc_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jo sysc_reschedule
+ jo .Lsysc_reschedule
#ifdef CONFIG_UPROBES
tm __TI_flags+7(%r12),_TIF_UPROBE
- jo sysc_uprobe_notify
+ jo .Lsysc_uprobe_notify
#endif
tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
- jo sysc_singlestep
+ jo .Lsysc_singlestep
tm __TI_flags+7(%r12),_TIF_SIGPENDING
- jo sysc_sigpending
+ jo .Lsysc_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
- jo sysc_notify_resume
+ jo .Lsysc_notify_resume
tm __LC_CPU_FLAGS+7,_CIF_ASCE
- jo sysc_uaccess
- j sysc_return # beware of critical section cleanup
+ jo .Lsysc_uaccess
+ j .Lsysc_return # beware of critical section cleanup
#
# _TIF_NEED_RESCHED is set, call schedule
#
-sysc_reschedule:
- larl %r14,sysc_return
+.Lsysc_reschedule:
+ larl %r14,.Lsysc_return
jg schedule
#
# _CIF_MCCK_PENDING is set, call handler
#
-sysc_mcck_pending:
- larl %r14,sysc_return
+.Lsysc_mcck_pending:
+ larl %r14,.Lsysc_return
jg s390_handle_mcck # TIF bit will be cleared by handler
#
# _CIF_ASCE is set, load user space asce
#
-sysc_uaccess:
+.Lsysc_uaccess:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j sysc_return
+ j .Lsysc_return
#
# _TIF_SIGPENDING is set, call do_signal
#
-sysc_sigpending:
+.Lsysc_sigpending:
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal
tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
- jno sysc_return
+ jno .Lsysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
- jnl sysc_nr_ok # invalid svc number -> do svc 0
+ jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
slag %r8,%r1,2
- j sysc_nr_ok # restart svc
+ j .Lsysc_nr_ok # restart svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
-sysc_notify_resume:
+.Lsysc_notify_resume:
lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,sysc_return
+ larl %r14,.Lsysc_return
jg do_notify_resume
#
# _TIF_UPROBE is set, call uprobe_notify_resume
#
#ifdef CONFIG_UPROBES
-sysc_uprobe_notify:
+.Lsysc_uprobe_notify:
lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,sysc_return
+ larl %r14,.Lsysc_return
jg uprobe_notify_resume
#endif
#
# _PIF_PER_TRAP is set, call do_per_trap
#
-sysc_singlestep:
+.Lsysc_singlestep:
ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,sysc_return
+ larl %r14,.Lsysc_return
jg do_per_trap
#
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call
#
-sysc_tracesys:
+.Lsysc_tracesys:
lgr %r2,%r11 # pass pointer to pt_regs
la %r3,0
llgh %r0,__PT_INT_CODE+2(%r11)
@@ -358,20 +358,20 @@ sysc_tracesys:
brasl %r14,do_syscall_trace_enter
lghi %r0,NR_syscalls
clgr %r0,%r2
- jnh sysc_tracenogo
+ jnh .Lsysc_tracenogo
sllg %r8,%r2,2
lgf %r9,0(%r8,%r10)
-sysc_tracego:
+.Lsysc_tracego:
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
basr %r14,%r9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value
-sysc_tracenogo:
+.Lsysc_tracenogo:
tm __TI_flags+7(%r12),_TIF_TRACE
- jz sysc_return
+ jz .Lsysc_return
lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,sysc_return
+ larl %r14,.Lsysc_return
jg do_syscall_trace_exit
#
@@ -384,13 +384,13 @@ ENTRY(ret_from_fork)
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
- jne sysc_tracenogo
+ jne .Lsysc_tracenogo
# it's a kernel thread
lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
basr %r14,%r9
- j sysc_tracenogo
+ j .Lsysc_tracenogo
/*
* Program check handler routine
@@ -409,7 +409,7 @@ ENTRY(pgm_check_handler)
tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 0f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
- jnz pgm_svcper # -> single stepped svc
+ jnz .Lpgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
@@ -432,7 +432,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 0f
tmhh %r8,0x0001 # kernel per event ?
- jz pgm_kprobe
+ jz .Lpgm_kprobe
oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
@@ -443,31 +443,31 @@ ENTRY(pgm_check_handler)
llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f
sll %r10,2
- je sysc_return
+ je .Lsysc_return
lgf %r1,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
- j sysc_return
+ j .Lsysc_return
#
# PER event in supervisor state, must be kprobes
#
-pgm_kprobe:
+.Lpgm_kprobe:
REENABLE_IRQS
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_per_trap
- j sysc_return
+ j .Lsysc_return
#
# single stepped system call
#
-pgm_svcper:
+.Lpgm_svcper:
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
- larl %r14,sysc_per
+ larl %r14,.Lsysc_per
stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs
+ lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
/*
* IO interrupt handler routine
@@ -483,10 +483,10 @@ ENTRY(io_int_handler)
HANDLE_SIE_INTERCEPT %r14,2
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
tmhh %r8,0x0001 # interrupting from user?
- jz io_skip
+ jz .Lio_skip
UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
LAST_BREAK %r14
-io_skip:
+.Lio_skip:
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -494,29 +494,29 @@ io_skip:
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-io_loop:
+.Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,IO_INTERRUPT
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
- jz io_call
+ jz .Lio_call
lghi %r3,THIN_INTERRUPT
-io_call:
+.Lio_call:
brasl %r14,do_IRQ
tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
- jz io_return
+ jz .Lio_return
tpi 0
- jz io_return
+ jz .Lio_return
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- j io_loop
-io_return:
+ j .Lio_loop
+.Lio_return:
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
-io_tif:
+.Lio_tif:
tm __TI_flags+7(%r12),_TIF_WORK
- jnz io_work # there is work to do (signals etc.)
+ jnz .Lio_work # there is work to do (signals etc.)
tm __LC_CPU_FLAGS+7,_CIF_WORK
- jnz io_work
-io_restore:
+ jnz .Lio_work
+.Lio_restore:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
@@ -524,7 +524,7 @@ io_restore:
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
-io_done:
+.Lio_done:
#
# There is work todo, find out in which context we have been interrupted:
@@ -535,15 +535,15 @@ io_done:
# the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
#
-io_work:
+.Lio_work:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jo io_work_user # yes -> do resched & signal
+ jo .Lio_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r12)
- jnz io_restore # preemption is disabled
+ jnz .Lio_restore # preemption is disabled
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jno io_restore
+ jno .Lio_restore
# switch to kernel stack
lg %r1,__PT_R15(%r11)
aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@@ -551,19 +551,19 @@ io_work:
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
- # TRACE_IRQS_ON already done at io_return, call
+ # TRACE_IRQS_ON already done at .Lio_return, call
# TRACE_IRQS_OFF to keep things symmetrical
TRACE_IRQS_OFF
brasl %r14,preempt_schedule_irq
- j io_return
+ j .Lio_return
#else
- j io_restore
+ j .Lio_restore
#endif
#
# Need to do work before returning to userspace, switch to kernel stack
#
-io_work_user:
+.Lio_work_user:
lg %r1,__LC_KERNEL_STACK
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
@@ -573,70 +573,70 @@ io_work_user:
#
# One of the work bits is on. Find out which one.
#
-io_work_tif:
+.Lio_work_tif:
tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jo io_mcck_pending
+ jo .Lio_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jo io_reschedule
+ jo .Lio_reschedule
tm __TI_flags+7(%r12),_TIF_SIGPENDING
- jo io_sigpending
+ jo .Lio_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
- jo io_notify_resume
+ jo .Lio_notify_resume
tm __LC_CPU_FLAGS+7,_CIF_ASCE
- jo io_uaccess
- j io_return # beware of critical section cleanup
+ jo .Lio_uaccess
+ j .Lio_return # beware of critical section cleanup
#
# _CIF_MCCK_PENDING is set, call handler
#
-io_mcck_pending:
- # TRACE_IRQS_ON already done at io_return
+.Lio_mcck_pending:
+ # TRACE_IRQS_ON already done at .Lio_return
brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
TRACE_IRQS_OFF
- j io_return
+ j .Lio_return
#
# _CIF_ASCE is set, load user space asce
#
-io_uaccess:
+.Lio_uaccess:
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j io_return
+ j .Lio_return
#
# _TIF_NEED_RESCHED is set, call schedule
#
-io_reschedule:
- # TRACE_IRQS_ON already done at io_return
+.Lio_reschedule:
+ # TRACE_IRQS_ON already done at .Lio_return
ssm __LC_SVC_NEW_PSW # reenable interrupts
brasl %r14,schedule # call scheduler
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_return
+ j .Lio_return
#
# _TIF_SIGPENDING or is set, call do_signal
#
-io_sigpending:
- # TRACE_IRQS_ON already done at io_return
+.Lio_sigpending:
+ # TRACE_IRQS_ON already done at .Lio_return
ssm __LC_SVC_NEW_PSW # reenable interrupts
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_return
+ j .Lio_return
#
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
-io_notify_resume:
- # TRACE_IRQS_ON already done at io_return
+.Lio_notify_resume:
+ # TRACE_IRQS_ON already done at .Lio_return
ssm __LC_SVC_NEW_PSW # reenable interrupts
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_notify_resume
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_return
+ j .Lio_return
/*
* External interrupt handler routine
@@ -652,10 +652,10 @@ ENTRY(ext_int_handler)
HANDLE_SIE_INTERCEPT %r14,3
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
tmhh %r8,0x0001 # interrupting from user ?
- jz ext_skip
+ jz .Lext_skip
UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
LAST_BREAK %r14
-ext_skip:
+.Lext_skip:
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -669,23 +669,23 @@ ext_skip:
lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT
brasl %r14,do_IRQ
- j io_return
+ j .Lio_return
/*
- * Load idle PSW. The second "half" of this function is in cleanup_idle.
+ * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
*/
ENTRY(psw_idle)
stg %r3,__SF_EMPTY(%r15)
- larl %r1,psw_idle_lpsw+4
+ larl %r1,.Lpsw_idle_lpsw+4
stg %r1,__SF_EMPTY+8(%r15)
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
-psw_idle_lpsw:
+.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
br %r14
-psw_idle_end:
+.Lpsw_idle_end:
-__critical_end:
+.L__critical_end:
/*
* Machine check handler routines
@@ -701,7 +701,7 @@ ENTRY(mcck_int_handler)
lmg %r8,%r9,__LC_MCK_OLD_PSW
HANDLE_SIE_INTERCEPT %r14,4
tm __LC_MCCK_CODE,0x80 # system damage?
- jo mcck_panic # yes -> rest of mcck code invalid
+ jo .Lmcck_panic # yes -> rest of mcck code invalid
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
@@ -719,13 +719,13 @@ ENTRY(mcck_int_handler)
2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
- jno mcck_panic # no -> skip cleanup critical
+ jno .Lmcck_panic # no -> skip cleanup critical
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
tm %r8,0x0001 # interrupting from user ?
- jz mcck_skip
+ jz .Lmcck_skip
UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
LAST_BREAK %r14
-mcck_skip:
+.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),0(%r14)
@@ -735,7 +735,7 @@ mcck_skip:
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,s390_do_machine_check
tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno mcck_return
+ jno .Lmcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
@@ -743,11 +743,11 @@ mcck_skip:
lgr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jno mcck_return
+ jno .Lmcck_return
TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
-mcck_return:
+.Lmcck_return:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
@@ -758,14 +758,14 @@ mcck_return:
0: lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_MCCK_PSW
-mcck_panic:
+.Lmcck_panic:
lg %r14,__LC_PANIC_STACK
slgr %r14,%r15
srag %r14,%r14,PAGE_SHIFT
jz 0f
lg %r15,__LC_PANIC_STACK
0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j mcck_skip
+ j .Lmcck_skip
#
# PSW restart interrupt handler
@@ -815,69 +815,69 @@ stack_overflow:
#endif
.align 8
-cleanup_table:
+.Lcleanup_table:
.quad system_call
- .quad sysc_do_svc
- .quad sysc_tif
- .quad sysc_restore
- .quad sysc_done
- .quad io_tif
- .quad io_restore
- .quad io_done
+ .quad .Lsysc_do_svc
+ .quad .Lsysc_tif
+ .quad .Lsysc_restore
+ .quad .Lsysc_done
+ .quad .Lio_tif
+ .quad .Lio_restore
+ .quad .Lio_done
.quad psw_idle
- .quad psw_idle_end
+ .quad .Lpsw_idle_end
cleanup_critical:
- clg %r9,BASED(cleanup_table) # system_call
+ clg %r9,BASED(.Lcleanup_table) # system_call
jl 0f
- clg %r9,BASED(cleanup_table+8) # sysc_do_svc
- jl cleanup_system_call
- clg %r9,BASED(cleanup_table+16) # sysc_tif
+ clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
+ jl .Lcleanup_system_call
+ clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
jl 0f
- clg %r9,BASED(cleanup_table+24) # sysc_restore
- jl cleanup_sysc_tif
- clg %r9,BASED(cleanup_table+32) # sysc_done
- jl cleanup_sysc_restore
- clg %r9,BASED(cleanup_table+40) # io_tif
+ clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
+ jl .Lcleanup_sysc_tif
+ clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
+ jl .Lcleanup_sysc_restore
+ clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
jl 0f
- clg %r9,BASED(cleanup_table+48) # io_restore
- jl cleanup_io_tif
- clg %r9,BASED(cleanup_table+56) # io_done
- jl cleanup_io_restore
- clg %r9,BASED(cleanup_table+64) # psw_idle
+ clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
+ jl .Lcleanup_io_tif
+ clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
+ jl .Lcleanup_io_restore
+ clg %r9,BASED(.Lcleanup_table+64) # psw_idle
jl 0f
- clg %r9,BASED(cleanup_table+72) # psw_idle_end
- jl cleanup_idle
+ clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
+ jl .Lcleanup_idle
0: br %r14
-cleanup_system_call:
+.Lcleanup_system_call:
# check if stpt has been executed
- clg %r9,BASED(cleanup_system_call_insn)
+ clg %r9,BASED(.Lcleanup_system_call_insn)
jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
0: # check if stmg has been executed
- clg %r9,BASED(cleanup_system_call_insn+8)
+ clg %r9,BASED(.Lcleanup_system_call_insn+8)
jh 0f
mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
0: # check if base register setup + TIF bit load has been done
- clg %r9,BASED(cleanup_system_call_insn+16)
+ clg %r9,BASED(.Lcleanup_system_call_insn+16)
jhe 0f
# set up saved registers r10 and r12
stg %r10,16(%r11) # r10 last break
stg %r12,32(%r11) # r12 thread-info pointer
0: # check if the user time update has been done
- clg %r9,BASED(cleanup_system_call_insn+24)
+ clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
lg %r15,__LC_EXIT_TIMER
slg %r15,__LC_SYNC_ENTER_TIMER
alg %r15,__LC_USER_TIMER
stg %r15,__LC_USER_TIMER
0: # check if the system time update has been done
- clg %r9,BASED(cleanup_system_call_insn+32)
+ clg %r9,BASED(.Lcleanup_system_call_insn+32)
jh 0f
lg %r15,__LC_LAST_UPDATE_TIMER
slg %r15,__LC_EXIT_TIMER
@@ -904,21 +904,21 @@ cleanup_system_call:
# setup saved register r15
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
- larl %r9,sysc_do_svc
+ larl %r9,.Lsysc_do_svc
br %r14
-cleanup_system_call_insn:
+.Lcleanup_system_call_insn:
.quad system_call
- .quad sysc_stmg
- .quad sysc_per
- .quad sysc_vtime+18
- .quad sysc_vtime+42
+ .quad .Lsysc_stmg
+ .quad .Lsysc_per
+ .quad .Lsysc_vtime+18
+ .quad .Lsysc_vtime+42
-cleanup_sysc_tif:
- larl %r9,sysc_tif
+.Lcleanup_sysc_tif:
+ larl %r9,.Lsysc_tif
br %r14
-cleanup_sysc_restore:
- clg %r9,BASED(cleanup_sysc_restore_insn)
+.Lcleanup_sysc_restore:
+ clg %r9,BASED(.Lcleanup_sysc_restore_insn)
je 0f
lg %r9,24(%r11) # get saved pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
@@ -926,15 +926,15 @@ cleanup_sysc_restore:
lmg %r0,%r7,__PT_R0(%r9)
0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
-cleanup_sysc_restore_insn:
- .quad sysc_done - 4
+.Lcleanup_sysc_restore_insn:
+ .quad .Lsysc_done - 4
-cleanup_io_tif:
- larl %r9,io_tif
+.Lcleanup_io_tif:
+ larl %r9,.Lio_tif
br %r14
-cleanup_io_restore:
- clg %r9,BASED(cleanup_io_restore_insn)
+.Lcleanup_io_restore:
+ clg %r9,BASED(.Lcleanup_io_restore_insn)
je 0f
lg %r9,24(%r11) # get saved r11 pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
@@ -942,10 +942,10 @@ cleanup_io_restore:
lmg %r0,%r7,__PT_R0(%r9)
0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
-cleanup_io_restore_insn:
- .quad io_done - 4
+.Lcleanup_io_restore_insn:
+ .quad .Lio_done - 4
-cleanup_idle:
+.Lcleanup_idle:
# copy interrupt clock & cpu timer
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
@@ -954,7 +954,7 @@ cleanup_idle:
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck & stpt have been executed
- clg %r9,BASED(cleanup_idle_insn)
+ clg %r9,BASED(.Lcleanup_idle_insn)
jhe 1f
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
@@ -973,17 +973,17 @@ cleanup_idle:
nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
br %r14
-cleanup_idle_insn:
- .quad psw_idle_lpsw
+.Lcleanup_idle_insn:
+ .quad .Lpsw_idle_lpsw
/*
* Integer constants
*/
.align 8
.Lcritical_start:
- .quad __critical_start
+ .quad .L__critical_start
.Lcritical_length:
- .quad __critical_end - __critical_start
+ .quad .L__critical_end - .L__critical_start
#if IS_ENABLED(CONFIG_KVM)
@@ -1000,25 +1000,25 @@ ENTRY(sie64a)
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
lg %r14,__LC_GMAP # get gmap pointer
ltgr %r14,%r14
- jz sie_gmap
+ jz .Lsie_gmap
lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
-sie_gmap:
+.Lsie_gmap:
lg %r14,__SF_EMPTY(%r15) # get control block pointer
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
tm __SIE_PROG20+3(%r14),1 # last exit...
- jnz sie_done
+ jnz .Lsie_done
LPP __SF_EMPTY(%r15) # set guest id
sie 0(%r14)
-sie_done:
+.Lsie_done:
LPP __SF_EMPTY+16(%r15) # set host id
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and sie_done should not cause program
+# instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
# See also HANDLE_SIE_INTERCEPT
-rewind_pad:
+.Lrewind_pad:
nop 0
.globl sie_exit
sie_exit:
@@ -1027,19 +1027,19 @@ sie_exit:
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
br %r14
-sie_fault:
+.Lsie_fault:
lghi %r14,-EFAULT
stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
j sie_exit
.align 8
.Lsie_critical:
- .quad sie_gmap
+ .quad .Lsie_gmap
.Lsie_critical_length:
- .quad sie_done - sie_gmap
+ .quad .Lsie_done - .Lsie_gmap
- EX_TABLE(rewind_pad,sie_fault)
- EX_TABLE(sie_exit,sie_fault)
+ EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(sie_exit,.Lsie_fault)
#endif
.section .rodata, "a"
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index ca1cabb3a96c..b86bb8823f15 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -7,6 +7,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
+#include <linux/moduleloader.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
@@ -15,60 +16,39 @@
#include <linux/kprobes.h>
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
#include "entry.h"
-void mcount_replace_code(void);
-void ftrace_disable_code(void);
-void ftrace_enable_insn(void);
-
/*
* The mcount code looks like this:
* stg %r14,8(%r15) # offset 0
* larl %r1,<&counter> # offset 6
* brasl %r14,_mcount # offset 12
* lg %r14,8(%r15) # offset 18
- * Total length is 24 bytes. The complete mcount block initially gets replaced
- * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
- * only patch the jg/lg instruction within the block.
- * Note: we do not patch the first instruction to an unconditional branch,
- * since that would break kprobes/jprobes. It is easier to leave the larl
- * instruction in and only modify the second instruction.
+ * Total length is 24 bytes. Only the first instruction will be patched
+ * by ftrace_make_call / ftrace_make_nop.
* The enabled ftrace code block looks like this:
- * larl %r0,.+24 # offset 0
- * > lg %r1,__LC_FTRACE_FUNC # offset 6
- * br %r1 # offset 12
- * brcl 0,0 # offset 14
- * brc 0,0 # offset 20
+ * > brasl %r0,ftrace_caller # offset 0
+ * larl %r1,<&counter> # offset 6
+ * brasl %r14,_mcount # offset 12
+ * lg %r14,8(%r15) # offset 18
* The ftrace function gets called with a non-standard C function call ABI
* where r0 contains the return address. It is also expected that the called
* function only clobbers r0 and r1, but restores r2-r15.
+ * For module code we can't directly jump to ftrace caller, but need a
+ * trampoline (ftrace_plt), which clobbers also r1.
* The return point of the ftrace function has offset 24, so execution
* continues behind the mcount block.
- * larl %r0,.+24 # offset 0
- * > jg .+18 # offset 6
- * br %r1 # offset 12
- * brcl 0,0 # offset 14
- * brc 0,0 # offset 20
+ * The disabled ftrace code block looks like this:
+ * > jg .+24 # offset 0
+ * larl %r1,<&counter> # offset 6
+ * brasl %r14,_mcount # offset 12
+ * lg %r14,8(%r15) # offset 18
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
*/
-asm(
- " .align 4\n"
- "mcount_replace_code:\n"
- " larl %r0,0f\n"
- "ftrace_disable_code:\n"
- " jg 0f\n"
- " br %r1\n"
- " brcl 0,0\n"
- " brc 0,0\n"
- "0:\n"
- " .align 4\n"
- "ftrace_enable_insn:\n"
- " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
-
-#define MCOUNT_BLOCK_SIZE 24
-#define MCOUNT_INSN_OFFSET 6
-#define FTRACE_INSN_SIZE 6
+
+unsigned long ftrace_plt;
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
@@ -79,24 +59,62 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- /* Initial replacement of the whole mcount block */
- if (addr == MCOUNT_ADDR) {
- if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
- mcount_replace_code,
- MCOUNT_BLOCK_SIZE))
- return -EPERM;
- return 0;
+ struct ftrace_insn insn;
+ unsigned short op;
+ void *from, *to;
+ size_t size;
+
+ ftrace_generate_nop_insn(&insn);
+ size = sizeof(insn);
+ from = &insn;
+ to = (void *) rec->ip;
+ if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
+ return -EFAULT;
+ /*
+ * If we find a breakpoint instruction, a kprobe has been placed
+ * at the beginning of the function. We write the constant
+ * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
+ * instruction so that the kprobes handler can execute a nop, if it
+ * reaches this breakpoint.
+ */
+ if (op == BREAKPOINT_INSTRUCTION) {
+ size -= 2;
+ from += 2;
+ to += 2;
+ insn.disp = KPROBE_ON_FTRACE_NOP;
}
- if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
- MCOUNT_INSN_SIZE))
+ if (probe_kernel_write(to, from, size))
return -EPERM;
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
- FTRACE_INSN_SIZE))
+ struct ftrace_insn insn;
+ unsigned short op;
+ void *from, *to;
+ size_t size;
+
+ ftrace_generate_call_insn(&insn, rec->ip);
+ size = sizeof(insn);
+ from = &insn;
+ to = (void *) rec->ip;
+ if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
+ return -EFAULT;
+ /*
+ * If we find a breakpoint instruction, a kprobe has been placed
+ * at the beginning of the function. We write the constant
+ * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
+ * instruction so that the kprobes handler can execute a brasl if it
+ * reaches this breakpoint.
+ */
+ if (op == BREAKPOINT_INSTRUCTION) {
+ size -= 2;
+ from += 2;
+ to += 2;
+ insn.disp = KPROBE_ON_FTRACE_CALL;
+ }
+ if (probe_kernel_write(to, from, size))
return -EPERM;
return 0;
}
@@ -111,13 +129,30 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
+static int __init ftrace_plt_init(void)
+{
+ unsigned int *ip;
+
+ ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
+ if (!ftrace_plt)
+ panic("cannot allocate ftrace plt\n");
+ ip = (unsigned int *) ftrace_plt;
+ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+ ip[1] = 0x100a0004;
+ ip[2] = 0x07f10000;
+ ip[3] = FTRACE_ADDR >> 32;
+ ip[4] = FTRACE_ADDR & 0xffffffff;
+ set_memory_ro(ftrace_plt, 1);
+ return 0;
+}
+device_initcall(ftrace_plt_init);
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
* in current thread info.
*/
-unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
- unsigned long ip)
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
{
struct ftrace_graph_ent trace;
@@ -137,6 +172,7 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
out:
return parent;
}
+NOKPROBE_SYMBOL(prepare_ftrace_return);
/*
* Patch the kernel code at ftrace_graph_caller location. The instruction
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 7559f1beab29..7a55c29b0b33 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -19,7 +19,7 @@
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
-void __kprobes enabled_wait(void)
+void enabled_wait(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long long idle_time;
@@ -35,31 +35,32 @@ void __kprobes enabled_wait(void)
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
+ trace_hardirqs_off();
+
/* Account time spent with enabled wait psw loaded as idle time. */
- idle->sequence++;
- smp_wmb();
+ write_seqcount_begin(&idle->seqcount);
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
account_idle_time(idle_time);
- smp_wmb();
- idle->sequence++;
+ write_seqcount_end(&idle->seqcount);
}
+NOKPROBE_SYMBOL(enabled_wait);
static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long idle_count;
- unsigned int sequence;
+ unsigned int seq;
do {
- sequence = ACCESS_ONCE(idle->sequence);
+ seq = read_seqcount_begin(&idle->seqcount);
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
- } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ } while (read_seqcount_retry(&idle->seqcount, seq));
return sprintf(buf, "%llu\n", idle_count);
}
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -69,15 +70,15 @@ static ssize_t show_idle_time(struct device *dev,
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long now, idle_time, idle_enter, idle_exit;
- unsigned int sequence;
+ unsigned int seq;
do {
now = get_tod_clock();
- sequence = ACCESS_ONCE(idle->sequence);
+ seq = read_seqcount_begin(&idle->seqcount);
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ } while (read_seqcount_retry(&idle->seqcount, seq));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
@@ -87,14 +88,14 @@ cputime64_t arch_cpu_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
unsigned long long now, idle_enter, idle_exit;
- unsigned int sequence;
+ unsigned int seq;
do {
now = get_tod_clock();
- sequence = ACCESS_ONCE(idle->sequence);
+ seq = read_seqcount_begin(&idle->seqcount);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ } while (read_seqcount_retry(&idle->seqcount, seq));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1b8a38ab7861..f238720690f3 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -127,13 +127,10 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(cpu)
seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
- goto out;
}
if (index < NR_IRQS) {
if (index >= NR_IRQS_BASE)
goto out;
- /* Adjust index to process irqclass_main_desc array entries */
- index--;
seq_printf(p, "%s: ", irqclass_main_desc[index].name);
irq = irqclass_main_desc[index].irq;
for_each_online_cpu(cpu)
@@ -158,7 +155,7 @@ out:
unsigned int arch_dynirq_lower_bound(unsigned int from)
{
- return from < THIN_INTERRUPT ? THIN_INTERRUPT : from;
+ return from < NR_IRQS_BASE ? NR_IRQS_BASE : from;
}
/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 014d4729b134..1e4c710dfb92 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
+#include <linux/ftrace.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/dis.h>
@@ -58,12 +59,23 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
.insn_size = MAX_INSN_SIZE,
};
-static void __kprobes copy_instruction(struct kprobe *p)
+static void copy_instruction(struct kprobe *p)
{
+ unsigned long ip = (unsigned long) p->addr;
s64 disp, new_disp;
u64 addr, new_addr;
- memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
+ if (ftrace_location(ip) == ip) {
+ /*
+ * If kprobes patches the instruction that is morphed by
+ * ftrace make sure that kprobes always sees the branch
+ * "jg .+24" that skips the mcount block
+ */
+ ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
+ p->ainsn.is_ftrace_insn = 1;
+ } else
+ memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
+ p->opcode = p->ainsn.insn[0];
if (!probe_is_insn_relative_long(p->ainsn.insn))
return;
/*
@@ -79,25 +91,14 @@ static void __kprobes copy_instruction(struct kprobe *p)
new_disp = ((addr + (disp * 2)) - new_addr) / 2;
*(s32 *)&p->ainsn.insn[1] = new_disp;
}
+NOKPROBE_SYMBOL(copy_instruction);
static inline int is_kernel_addr(void *addr)
{
return addr < (void *)_end;
}
-static inline int is_module_addr(void *addr)
-{
-#ifdef CONFIG_64BIT
- BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
- if (addr < (void *)MODULES_VADDR)
- return 0;
- if (addr > (void *)MODULES_END)
- return 0;
-#endif
- return 1;
-}
-
-static int __kprobes s390_get_insn_slot(struct kprobe *p)
+static int s390_get_insn_slot(struct kprobe *p)
{
/*
* Get an insn slot that is within the same 2GB area like the original
@@ -111,8 +112,9 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
p->ainsn.insn = get_insn_slot();
return p->ainsn.insn ? 0 : -ENOMEM;
}
+NOKPROBE_SYMBOL(s390_get_insn_slot);
-static void __kprobes s390_free_insn_slot(struct kprobe *p)
+static void s390_free_insn_slot(struct kprobe *p)
{
if (!p->ainsn.insn)
return;
@@ -122,8 +124,9 @@ static void __kprobes s390_free_insn_slot(struct kprobe *p)
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
+NOKPROBE_SYMBOL(s390_free_insn_slot);
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
+int arch_prepare_kprobe(struct kprobe *p)
{
if ((unsigned long) p->addr & 0x01)
return -EINVAL;
@@ -132,54 +135,79 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return -EINVAL;
if (s390_get_insn_slot(p))
return -ENOMEM;
- p->opcode = *p->addr;
copy_instruction(p);
return 0;
}
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
-struct ins_replace_args {
- kprobe_opcode_t *ptr;
- kprobe_opcode_t opcode;
+int arch_check_ftrace_location(struct kprobe *p)
+{
+ return 0;
+}
+
+struct swap_insn_args {
+ struct kprobe *p;
+ unsigned int arm_kprobe : 1;
};
-static int __kprobes swap_instruction(void *aref)
+static int swap_instruction(void *data)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long status = kcb->kprobe_status;
- struct ins_replace_args *args = aref;
-
+ struct swap_insn_args *args = data;
+ struct ftrace_insn new_insn, *insn;
+ struct kprobe *p = args->p;
+ size_t len;
+
+ new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
+ len = sizeof(new_insn.opc);
+ if (!p->ainsn.is_ftrace_insn)
+ goto skip_ftrace;
+ len = sizeof(new_insn);
+ insn = (struct ftrace_insn *) p->addr;
+ if (args->arm_kprobe) {
+ if (is_ftrace_nop(insn))
+ new_insn.disp = KPROBE_ON_FTRACE_NOP;
+ else
+ new_insn.disp = KPROBE_ON_FTRACE_CALL;
+ } else {
+ ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
+ if (insn->disp == KPROBE_ON_FTRACE_NOP)
+ ftrace_generate_nop_insn(&new_insn);
+ }
+skip_ftrace:
kcb->kprobe_status = KPROBE_SWAP_INST;
- probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
+ probe_kernel_write(p->addr, &new_insn, len);
kcb->kprobe_status = status;
return 0;
}
+NOKPROBE_SYMBOL(swap_instruction);
-void __kprobes arch_arm_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
- struct ins_replace_args args;
+ struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
- args.ptr = p->addr;
- args.opcode = BREAKPOINT_INSTRUCTION;
stop_machine(swap_instruction, &args, NULL);
}
+NOKPROBE_SYMBOL(arch_arm_kprobe);
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
+void arch_disarm_kprobe(struct kprobe *p)
{
- struct ins_replace_args args;
+ struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
- args.ptr = p->addr;
- args.opcode = p->opcode;
stop_machine(swap_instruction, &args, NULL);
}
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
-void __kprobes arch_remove_kprobe(struct kprobe *p)
+void arch_remove_kprobe(struct kprobe *p)
{
s390_free_insn_slot(p);
}
+NOKPROBE_SYMBOL(arch_remove_kprobe);
-static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
- struct pt_regs *regs,
- unsigned long ip)
+static void enable_singlestep(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs,
+ unsigned long ip)
{
struct per_regs per_kprobe;
@@ -199,10 +227,11 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.addr = ip | PSW_ADDR_AMODE;
}
+NOKPROBE_SYMBOL(enable_singlestep);
-static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
- struct pt_regs *regs,
- unsigned long ip)
+static void disable_singlestep(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs,
+ unsigned long ip)
{
/* Restore control regs and psw mask, set new psw address */
__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
@@ -210,41 +239,43 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
regs->psw.mask |= kcb->kprobe_saved_imask;
regs->psw.addr = ip | PSW_ADDR_AMODE;
}
+NOKPROBE_SYMBOL(disable_singlestep);
/*
* Activate a kprobe by storing its pointer to current_kprobe. The
* previous kprobe is stored in kcb->prev_kprobe. A stack of up to
* two kprobes can be active, see KPROBE_REENTER.
*/
-static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
+static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
{
kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
kcb->prev_kprobe.status = kcb->kprobe_status;
__this_cpu_write(current_kprobe, p);
}
+NOKPROBE_SYMBOL(push_kprobe);
/*
* Deactivate a kprobe by backing up to the previous state. If the
* current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
* for any other state prev_kprobe.kp will be NULL.
*/
-static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
+static void pop_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
+NOKPROBE_SYMBOL(pop_kprobe);
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs)
+void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
/* Replace the return addr with trampoline addr */
regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
}
+NOKPROBE_SYMBOL(arch_prepare_kretprobe);
-static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
- struct kprobe *p)
+static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
{
switch (kcb->kprobe_status) {
case KPROBE_HIT_SSDONE:
@@ -264,8 +295,9 @@ static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
BUG();
}
}
+NOKPROBE_SYMBOL(kprobe_reenter_check);
-static int __kprobes kprobe_handler(struct pt_regs *regs)
+static int kprobe_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb;
struct kprobe *p;
@@ -339,6 +371,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
preempt_enable_no_resched();
return 0;
}
+NOKPROBE_SYMBOL(kprobe_handler);
/*
* Function return probe trampoline:
@@ -355,8 +388,7 @@ static void __used kretprobe_trampoline_holder(void)
/*
* Called when the probe at kretprobe trampoline is hit
*/
-static int __kprobes trampoline_probe_handler(struct kprobe *p,
- struct pt_regs *regs)
+static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri;
struct hlist_head *head, empty_rp;
@@ -444,6 +476,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
*/
return 1;
}
+NOKPROBE_SYMBOL(trampoline_probe_handler);
/*
* Called after single-stepping. p->addr is the address of the
@@ -453,12 +486,30 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*/
-static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void resume_execution(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
int fixup = probe_get_fixup_type(p->ainsn.insn);
+ /* Check if the kprobes location is an enabled ftrace caller */
+ if (p->ainsn.is_ftrace_insn) {
+ struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
+ struct ftrace_insn call_insn;
+
+ ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
+ /*
+ * A kprobe on an enabled ftrace call site actually single
+ * stepped an unconditional branch (ftrace nop equivalent).
+ * Now we need to fixup things and pretend that a brasl r0,...
+ * was executed instead.
+ */
+ if (insn->disp == KPROBE_ON_FTRACE_CALL) {
+ ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
+ regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
+ }
+ }
+
if (fixup & FIXUP_PSW_NORMAL)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
@@ -476,8 +527,9 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
disable_singlestep(kcb, regs, ip);
}
+NOKPROBE_SYMBOL(resume_execution);
-static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+static int post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct kprobe *p = kprobe_running();
@@ -504,8 +556,9 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
return 1;
}
+NOKPROBE_SYMBOL(post_kprobe_handler);
-static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
+static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct kprobe *p = kprobe_running();
@@ -567,8 +620,9 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
}
return 0;
}
+NOKPROBE_SYMBOL(kprobe_trap_handler);
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
int ret;
@@ -579,12 +633,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
return ret;
}
+NOKPROBE_SYMBOL(kprobe_fault_handler);
/*
* Wrapper routine to for handling exceptions.
*/
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
{
struct die_args *args = (struct die_args *) data;
struct pt_regs *regs = args->regs;
@@ -616,8 +671,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
+NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -635,13 +691,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
return 1;
}
+NOKPROBE_SYMBOL(setjmp_pre_handler);
-void __kprobes jprobe_return(void)
+void jprobe_return(void)
{
asm volatile(".word 0x0002");
}
+NOKPROBE_SYMBOL(jprobe_return);
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long stack;
@@ -655,6 +713,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
preempt_enable_no_resched();
return 1;
}
+NOKPROBE_SYMBOL(longjmp_break_handler);
static struct kprobe trampoline = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
@@ -666,7 +725,8 @@ int __init arch_init_kprobes(void)
return register_kprobe(&trampoline);
}
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+int arch_trampoline_kprobe(struct kprobe *p)
{
return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
}
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 4300ea374826..b6dfc5bfcb89 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,6 +27,7 @@ ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
+ aghi %r0,MCOUNT_RETURN_FIXUP
aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index b878f12a9597..c3f8d157cb0d 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1383,7 +1383,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
cpuhw->lsctl.ed = 1;
/* Set in_use flag and store event */
- event->hw.idx = 0; /* only one sampling event per CPU supported */
cpuhw->event = event;
cpuhw->flags |= PMU_F_IN_USE;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ed84cc224899..aa7a83948c7b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,7 +61,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
-extern void __kprobes kernel_thread_starter(void);
+extern void kernel_thread_starter(void);
/*
* Free current thread data structures etc..
@@ -153,6 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
save_fp_ctl(&p->thread.fp_regs.fpc);
save_fp_regs(p->thread.fp_regs.fprs);
p->thread.fp_regs.pad = 0;
+ p->thread.vxrs = NULL;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
unsigned long tls = frame->childregs.gprs[6];
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 99a567b70d16..eabfb4594517 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -248,14 +248,27 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
*/
tmp = 0;
+ } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
+ /*
+ * floating point control reg. is in the thread structure
+ */
+ tmp = child->thread.fp_regs.fpc;
+ tmp <<= BITS_PER_LONG - 32;
+
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
- /*
- * floating point regs. are stored in the thread structure
+ /*
+ * floating point regs. are either in child->thread.fp_regs
+ * or the child->thread.vxrs array
*/
- offset = addr - (addr_t) &dummy->regs.fp_regs;
- tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
- if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
- tmp <<= BITS_PER_LONG - 32;
+ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
+#ifdef CONFIG_64BIT
+ if (child->thread.vxrs)
+ tmp = *(addr_t *)
+ ((addr_t) child->thread.vxrs + 2*offset);
+ else
+#endif
+ tmp = *(addr_t *)
+ ((addr_t) &child->thread.fp_regs.fprs + offset);
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/*
@@ -383,16 +396,29 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
*/
return 0;
+ } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
+ /*
+ * floating point control reg. is in the thread structure
+ */
+ if ((unsigned int) data != 0 ||
+ test_fp_ctl(data >> (BITS_PER_LONG - 32)))
+ return -EINVAL;
+ child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32);
+
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
/*
- * floating point regs. are stored in the thread structure
+ * floating point regs. are either in child->thread.fp_regs
+ * or the child->thread.vxrs array
*/
- if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
- if ((unsigned int) data != 0 ||
- test_fp_ctl(data >> (BITS_PER_LONG - 32)))
- return -EINVAL;
- offset = addr - (addr_t) &dummy->regs.fp_regs;
- *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
+ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
+#ifdef CONFIG_64BIT
+ if (child->thread.vxrs)
+ *(addr_t *)((addr_t)
+ child->thread.vxrs + 2*offset) = data;
+ else
+#endif
+ *(addr_t *)((addr_t)
+ &child->thread.fp_regs.fprs + offset) = data;
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/*
@@ -611,12 +637,26 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
*/
tmp = 0;
+ } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
+ /*
+ * floating point control reg. is in the thread structure
+ */
+ tmp = child->thread.fp_regs.fpc;
+
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
/*
- * floating point regs. are stored in the thread structure
+ * floating point regs. are either in child->thread.fp_regs
+ * or the child->thread.vxrs array
*/
- offset = addr - (addr_t) &dummy32->regs.fp_regs;
- tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
+ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
+#ifdef CONFIG_64BIT
+ if (child->thread.vxrs)
+ tmp = *(__u32 *)
+ ((addr_t) child->thread.vxrs + 2*offset);
+ else
+#endif
+ tmp = *(__u32 *)
+ ((addr_t) &child->thread.fp_regs.fprs + offset);
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
/*
@@ -722,15 +762,28 @@ static int __poke_user_compat(struct task_struct *child,
*/
return 0;
- } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+ } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
/*
- * floating point regs. are stored in the thread structure
+ * floating point control reg. is in the thread structure
*/
- if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
- test_fp_ctl(tmp))
+ if (test_fp_ctl(tmp))
return -EINVAL;
- offset = addr - (addr_t) &dummy32->regs.fp_regs;
- *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
+ child->thread.fp_regs.fpc = data;
+
+ } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+ /*
+ * floating point regs. are either in child->thread.fp_regs
+ * or the child->thread.vxrs array
+ */
+ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
+#ifdef CONFIG_64BIT
+ if (child->thread.vxrs)
+ *(__u32 *)((addr_t)
+ child->thread.vxrs + 2*offset) = tmp;
+ else
+#endif
+ *(__u32 *)((addr_t)
+ &child->thread.fp_regs.fprs + offset) = tmp;
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
/*
@@ -1038,12 +1091,6 @@ static int s390_tdb_set(struct task_struct *target,
return 0;
}
-static int s390_vxrs_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- return !!target->thread.vxrs;
-}
-
static int s390_vxrs_low_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -1052,6 +1099,8 @@ static int s390_vxrs_low_get(struct task_struct *target,
__u64 vxrs[__NUM_VXRS_LOW];
int i;
+ if (!MACHINE_HAS_VX)
+ return -ENODEV;
if (target->thread.vxrs) {
if (target == current)
save_vx_regs(target->thread.vxrs);
@@ -1070,6 +1119,8 @@ static int s390_vxrs_low_set(struct task_struct *target,
__u64 vxrs[__NUM_VXRS_LOW];
int i, rc;
+ if (!MACHINE_HAS_VX)
+ return -ENODEV;
if (!target->thread.vxrs) {
rc = alloc_vector_registers(target);
if (rc)
@@ -1095,6 +1146,8 @@ static int s390_vxrs_high_get(struct task_struct *target,
{
__vector128 vxrs[__NUM_VXRS_HIGH];
+ if (!MACHINE_HAS_VX)
+ return -ENODEV;
if (target->thread.vxrs) {
if (target == current)
save_vx_regs(target->thread.vxrs);
@@ -1112,6 +1165,8 @@ static int s390_vxrs_high_set(struct task_struct *target,
{
int rc;
+ if (!MACHINE_HAS_VX)
+ return -ENODEV;
if (!target->thread.vxrs) {
rc = alloc_vector_registers(target);
if (rc)
@@ -1196,7 +1251,6 @@ static const struct user_regset s390_regsets[] = {
.n = __NUM_VXRS_LOW,
.size = sizeof(__u64),
.align = sizeof(__u64),
- .active = s390_vxrs_active,
.get = s390_vxrs_low_get,
.set = s390_vxrs_low_set,
},
@@ -1205,7 +1259,6 @@ static const struct user_regset s390_regsets[] = {
.n = __NUM_VXRS_HIGH,
.size = sizeof(__vector128),
.align = sizeof(__vector128),
- .active = s390_vxrs_active,
.get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
@@ -1419,7 +1472,6 @@ static const struct user_regset s390_compat_regsets[] = {
.n = __NUM_VXRS_LOW,
.size = sizeof(__u64),
.align = sizeof(__u64),
- .active = s390_vxrs_active,
.get = s390_vxrs_low_get,
.set = s390_vxrs_low_set,
},
@@ -1428,7 +1480,6 @@ static const struct user_regset s390_compat_regsets[] = {
.n = __NUM_VXRS_HIGH,
.size = sizeof(__vector128),
.align = sizeof(__vector128),
- .active = s390_vxrs_active,
.get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e80d9ff9a56d..4e532c67832f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -41,7 +41,6 @@
#include <linux/ctype.h>
#include <linux/reboot.h>
#include <linux/topology.h>
-#include <linux/ftrace.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/memory.h>
@@ -356,7 +355,6 @@ static void __init setup_lowcore(void)
lc->steal_timer = S390_lowcore.steal_timer;
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
- lc->ftrace_func = S390_lowcore.ftrace_func;
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
restart_stack += ASYNC_SIZE;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 0c1a0ff0a558..6a2ac257d98f 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -371,7 +371,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
} else {
/* Signal frame without vector registers are short ! */
- __u16 __user *svc = (void *) frame + frame_size - 2;
+ __u16 __user *svc = (void __user *) frame + frame_size - 2;
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
return -EFAULT;
restorer = (unsigned long) svc | PSW_ADDR_AMODE;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6fd9e60101f1..0b499f5cbe19 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -236,7 +236,6 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
- lc->ftrace_func = S390_lowcore.ftrace_func;
lc->user_timer = lc->system_timer = lc->steal_timer = 0;
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9f7087fd58de..a2987243bc76 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -360,3 +360,5 @@ SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
+SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
+SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 005d665fe4a5..20660dddb2d6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -61,10 +61,11 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
/*
* Scheduler clock - returns current time in nanosec units.
*/
-unsigned long long notrace __kprobes sched_clock(void)
+unsigned long long notrace sched_clock(void)
{
return tod_to_ns(get_tod_clock_monotonic());
}
+NOKPROBE_SYMBOL(sched_clock);
/*
* Monotonic_clock - returns # of nanoseconds passed since time_init()
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 9ff5ecba26ab..f081cf1157c3 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -49,7 +49,8 @@ static inline void report_user_fault(struct pt_regs *regs, int signr)
return;
if (!printk_ratelimit())
return;
- printk("User process fault: interruption code 0x%X ", regs->int_code);
+ printk("User process fault: interruption code %04x ilc:%d ",
+ regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
printk("\n");
show_regs(regs);
@@ -87,16 +88,16 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
}
}
-static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code,
- char *str)
+static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
{
if (notify_die(DIE_TRAP, str, regs, 0,
regs->int_code, si_signo) == NOTIFY_STOP)
return;
do_report_trap(regs, si_signo, si_code, str);
}
+NOKPROBE_SYMBOL(do_trap);
-void __kprobes do_per_trap(struct pt_regs *regs)
+void do_per_trap(struct pt_regs *regs)
{
siginfo_t info;
@@ -111,6 +112,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
(void __force __user *) current->thread.per_event.address;
force_sig_info(SIGTRAP, &info, current);
}
+NOKPROBE_SYMBOL(do_per_trap);
void default_trap_handler(struct pt_regs *regs)
{
@@ -151,8 +153,6 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
"privileged operation")
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
"special operation exception")
-DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
- "translation exception")
#ifdef CONFIG_64BIT
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
@@ -179,7 +179,13 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
do_trap(regs, SIGFPE, si_code, "floating point exception");
}
-void __kprobes illegal_op(struct pt_regs *regs)
+void translation_exception(struct pt_regs *regs)
+{
+ /* May never happen. */
+ die(regs, "Translation exception");
+}
+
+void illegal_op(struct pt_regs *regs)
{
siginfo_t info;
__u8 opcode[6];
@@ -252,7 +258,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
if (signal)
do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
}
-
+NOKPROBE_SYMBOL(illegal_op);
#ifdef CONFIG_MATHEMU
void specification_exception(struct pt_regs *regs)
@@ -469,7 +475,7 @@ void space_switch_exception(struct pt_regs *regs)
do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
}
-void __kprobes kernel_stack_overflow(struct pt_regs * regs)
+void kernel_stack_overflow(struct pt_regs *regs)
{
bust_spinlocks(1);
printk("Kernel stack overflow.\n");
@@ -477,6 +483,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
bust_spinlocks(0);
panic("Corrupt kernel stack, can't continue.");
}
+NOKPROBE_SYMBOL(kernel_stack_overflow);
void __init trap_init(void)
{
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 0f961a1c64b3..8b9ccf02a2c5 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -207,8 +207,6 @@ union raddress {
unsigned long pfra : 52; /* Page-Frame Real Address */
};
-static int ipte_lock_count;
-static DEFINE_MUTEX(ipte_mutex);
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
@@ -216,47 +214,51 @@ int ipte_lock_held(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->eca & 1)
return ic->kh != 0;
- return ipte_lock_count != 0;
+ return vcpu->kvm->arch.ipte_lock_count != 0;
}
static void ipte_lock_simple(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
- mutex_lock(&ipte_mutex);
- ipte_lock_count++;
- if (ipte_lock_count > 1)
+ mutex_lock(&vcpu->kvm->arch.ipte_mutex);
+ vcpu->kvm->arch.ipte_lock_count++;
+ if (vcpu->kvm->arch.ipte_lock_count > 1)
goto out;
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
- old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
while (old.k) {
cond_resched();
- old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
}
new = old;
new.k = 1;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
out:
- mutex_unlock(&ipte_mutex);
+ mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
}
static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
- mutex_lock(&ipte_mutex);
- ipte_lock_count--;
- if (ipte_lock_count)
+ mutex_lock(&vcpu->kvm->arch.ipte_mutex);
+ vcpu->kvm->arch.ipte_lock_count--;
+ if (vcpu->kvm->arch.ipte_lock_count)
goto out;
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
- new = old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
+ new = old;
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
wake_up(&vcpu->kvm->arch.ipte_wq);
out:
- mutex_unlock(&ipte_mutex);
+ mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
}
static void ipte_lock_siif(struct kvm_vcpu *vcpu)
@@ -265,10 +267,12 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
- old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
while (old.kg) {
cond_resched();
- old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
}
new = old;
new.k = 1;
@@ -282,7 +286,9 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
- new = old = ACCESS_ONCE(*ic);
+ old = *ic;
+ barrier();
+ new = old;
new.kh--;
if (!new.kh)
new.k = 0;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index eaf46291d361..81c77ab8102e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -38,6 +38,19 @@ static const intercept_handler_t instruction_handlers[256] = {
[0xeb] = kvm_s390_handle_eb,
};
+void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
+{
+ struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
+
+ /* Use the length of the EXECUTE instruction if necessary */
+ if (sie_block->icptstatus & 1) {
+ ilc = (sie_block->icptstatus >> 4) & 0x6;
+ if (!ilc)
+ ilc = 4;
+ }
+ sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc);
+}
+
static int handle_noop(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->icptcode) {
@@ -244,7 +257,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
u16 eic = vcpu->arch.sie_block->eic;
- struct kvm_s390_interrupt irq;
+ struct kvm_s390_irq irq;
psw_t newpsw;
int rc;
@@ -269,7 +282,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
if (kvm_s390_si_ext_call_pending(vcpu))
return 0;
irq.type = KVM_S390_INT_EXTERNAL_CALL;
- irq.parm = vcpu->arch.sie_block->extcpuaddr;
+ irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
break;
default:
return -EOPNOTSUPP;
@@ -288,7 +301,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
*/
static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
{
- psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long srcaddr, dstaddr;
int reg1, reg2, rc;
@@ -310,7 +322,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
if (rc != 0)
return rc;
- psw->addr = __rewind_psw(*psw, 4);
+ kvm_s390_rewind_psw(vcpu, 4);
return 0;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a39838457f01..f00f31e66cd8 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -16,6 +16,7 @@
#include <linux/mmu_context.h>
#include <linux/signal.h>
#include <linux/slab.h>
+#include <linux/bitmap.h>
#include <asm/asm-offsets.h>
#include <asm/uaccess.h>
#include "kvm-s390.h"
@@ -27,8 +28,8 @@
#define IOINT_CSSID_MASK 0x03fc0000
#define IOINT_AI_MASK 0x04000000
#define PFAULT_INIT 0x0600
-
-static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu);
+#define PFAULT_DONE 0x0680
+#define VIRTIO_PARAM 0x0d00
static int is_ioint(u64 type)
{
@@ -136,6 +137,31 @@ static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
return 0;
}
+static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.local_int.pending_irqs;
+}
+
+static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
+{
+ unsigned long active_mask = pending_local_irqs(vcpu);
+
+ if (psw_extint_disabled(vcpu))
+ active_mask &= ~IRQ_PEND_EXT_MASK;
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
+ __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
+ __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
+ __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
+ __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
+ if (psw_mchk_disabled(vcpu))
+ active_mask &= ~IRQ_PEND_MCHK_MASK;
+
+ return active_mask;
+}
+
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
@@ -170,26 +196,45 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
}
+static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
+{
+ if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
+ return;
+ if (psw_extint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR0;
+}
+
+static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
+{
+ if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
+ return;
+ if (psw_mchk_disabled(vcpu))
+ vcpu->arch.sie_block->ictl |= ICTL_LPSW;
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR14;
+}
+
+/* Set interception request for non-deliverable local interrupts */
+static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
+{
+ set_intercept_indicators_ext(vcpu);
+ set_intercept_indicators_mchk(vcpu);
+}
+
static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info *inti)
{
switch (inti->type) {
- case KVM_S390_INT_EXTERNAL_CALL:
- case KVM_S390_INT_EMERGENCY:
case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
- case KVM_S390_INT_CLOCK_COMP:
- case KVM_S390_INT_CPU_TIMER:
if (psw_extint_disabled(vcpu))
__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
else
vcpu->arch.sie_block->lctl |= LCTL_CR0;
break;
- case KVM_S390_SIGP_STOP:
- __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
- break;
case KVM_S390_MCHK:
if (psw_mchk_disabled(vcpu))
vcpu->arch.sie_block->ictl |= ICTL_LPSW;
@@ -226,13 +271,236 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
}
}
-static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
- struct kvm_s390_pgm_info *pgm_info)
+static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
+ 0, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
+ 0, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
+ (u16 __user *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_ext_info ext;
+ int rc;
+
+ spin_lock(&li->lock);
+ ext = li->irq.ext;
+ clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
+ li->irq.ext.ext_params2 = 0;
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
+ 0, ext.ext_params2);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_INIT,
+ 0, ext.ext_params2);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_mchk_info mchk;
+ int rc;
+
+ spin_lock(&li->lock);
+ mchk = li->irq.mchk;
+ /*
+ * If there was an exigent machine check pending, then any repressible
+ * machine checks that might have been pending are indicated along
+ * with it, so always clear both bits
+ */
+ clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
+ clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
+ memset(&li->irq.mchk, 0, sizeof(mchk));
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ mchk.mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
+ mchk.cr14, mchk.mcic);
+
+ rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= put_guest_lc(vcpu, mchk.mcic,
+ (u64 __user *) __LC_MCCK_CODE);
+ rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
+ &mchk.fixed_logout, sizeof(mchk.fixed_logout));
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
+ vcpu->stat.deliver_restart_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
+
+ rc = write_guest_lc(vcpu,
+ offsetof(struct _lowcore, restart_old_psw),
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
+ vcpu->stat.deliver_stop_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
+ 0, 0);
+
+ __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
+ clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
+ return 0;
+}
+
+static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_prefix_info prefix;
+
+ spin_lock(&li->lock);
+ prefix = li->irq.prefix;
+ li->irq.prefix.address = 0;
+ clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
+ vcpu->stat.deliver_prefix_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_SIGP_SET_PREFIX,
+ prefix.address, 0);
+
+ kvm_s390_set_prefix(vcpu, prefix.address);
+ return 0;
+}
+
+static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+ int cpu_addr;
+
+ spin_lock(&li->lock);
+ cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
+ clear_bit(cpu_addr, li->sigp_emerg_pending);
+ if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
+ clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
+ vcpu->stat.deliver_emergency_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
+ cpu_addr, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_extcall_info extcall;
+ int rc;
+
+ spin_lock(&li->lock);
+ extcall = li->irq.extcall;
+ li->irq.extcall.code = 0;
+ clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
+ vcpu->stat.deliver_external_call++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_EXTERNAL_CALL,
+ extcall.code, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_pgm_info pgm_info;
int rc = 0;
u16 ilc = get_ilc(vcpu);
- switch (pgm_info->code & ~PGM_PER) {
+ spin_lock(&li->lock);
+ pgm_info = li->irq.pgm;
+ clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
+ memset(&li->irq.pgm, 0, sizeof(pgm_info));
+ spin_unlock(&li->lock);
+
+ VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
+ pgm_info.code, ilc);
+ vcpu->stat.deliver_program_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
+ pgm_info.code, 0);
+
+ switch (pgm_info.code & ~PGM_PER) {
case PGM_AFX_TRANSLATION:
case PGM_ASX_TRANSLATION:
case PGM_EX_TRANSLATION:
@@ -243,7 +511,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
case PGM_SPACE_SWITCH:
- rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
+ rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
break;
case PGM_ALEN_TRANSLATION:
@@ -252,7 +520,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
case PGM_ASTE_SEQUENCE:
case PGM_ASTE_VALIDITY:
case PGM_EXTENDED_AUTHORITY:
- rc = put_guest_lc(vcpu, pgm_info->exc_access_id,
+ rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
case PGM_ASCE_TYPE:
@@ -261,247 +529,208 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_THIRD_TRANS:
case PGM_SEGMENT_TRANSLATION:
- rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
+ rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
- rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
+ rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
- rc |= put_guest_lc(vcpu, pgm_info->op_access_id,
+ rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
(u8 *)__LC_OP_ACCESS_ID);
break;
case PGM_MONITOR:
- rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
- (u64 *)__LC_MON_CLASS_NR);
- rc |= put_guest_lc(vcpu, pgm_info->mon_code,
+ rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
+ (u16 *)__LC_MON_CLASS_NR);
+ rc |= put_guest_lc(vcpu, pgm_info.mon_code,
(u64 *)__LC_MON_CODE);
break;
case PGM_DATA:
- rc = put_guest_lc(vcpu, pgm_info->data_exc_code,
+ rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
(u32 *)__LC_DATA_EXC_CODE);
break;
case PGM_PROTECTION:
- rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
+ rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
- rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
+ rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
}
- if (pgm_info->code & PGM_PER) {
- rc |= put_guest_lc(vcpu, pgm_info->per_code,
+ if (pgm_info.code & PGM_PER) {
+ rc |= put_guest_lc(vcpu, pgm_info.per_code,
(u8 *) __LC_PER_CODE);
- rc |= put_guest_lc(vcpu, pgm_info->per_atmid,
+ rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
(u8 *)__LC_PER_ATMID);
- rc |= put_guest_lc(vcpu, pgm_info->per_address,
+ rc |= put_guest_lc(vcpu, pgm_info.per_address,
(u64 *) __LC_PER_ADDRESS);
- rc |= put_guest_lc(vcpu, pgm_info->per_access_id,
+ rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
(u8 *) __LC_PER_ACCESS_ID);
}
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
- rc |= put_guest_lc(vcpu, pgm_info->code,
+ rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
- return rc;
+static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
+ inti->ext.ext_params);
+ vcpu->stat.deliver_service_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ inti->ext.ext_params, 0);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ (u32 *)__LC_EXT_PARAMS);
+ return rc ? -EFAULT : 0;
}
-static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
{
- const unsigned short table[] = { 2, 4, 4, 6 };
- int rc = 0;
+ int rc;
+
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_DONE, 0,
+ inti->ext.ext_params2);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
+ inti->ext.ext_params, inti->ext.ext_params2);
+ vcpu->stat.deliver_virtio_interrupt++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ inti->ext.ext_params,
+ inti->ext.ext_params2);
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ (u32 *)__LC_EXT_PARAMS);
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ ((__u32)inti->io.subchannel_id << 16) |
+ inti->io.subchannel_nr,
+ ((__u64)inti->io.io_int_parm << 32) |
+ inti->io.io_int_word);
+
+ rc = put_guest_lc(vcpu, inti->io.subchannel_id,
+ (u16 *)__LC_SUBCHANNEL_ID);
+ rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
+ (u16 *)__LC_SUBCHANNEL_NR);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
+ (u32 *)__LC_IO_INT_PARM);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_word,
+ (u32 *)__LC_IO_INT_WORD);
+ rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_mchk_info *mchk = &inti->mchk;
+ int rc;
+
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ mchk->mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
+ mchk->cr14, mchk->mcic);
+
+ rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= put_guest_lc(vcpu, mchk->mcic,
+ (u64 __user *) __LC_MCCK_CODE);
+ rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
+ &mchk->fixed_logout, sizeof(mchk->fixed_logout));
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
+typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
+
+static const deliver_irq_t deliver_irq_funcs[] = {
+ [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
+ [IRQ_PEND_PROG] = __deliver_prog,
+ [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
+ [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
+ [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
+ [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
+ [IRQ_PEND_RESTART] = __deliver_restart,
+ [IRQ_PEND_SIGP_STOP] = __deliver_stop,
+ [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
+ [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
+};
+
+static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt_info *inti)
+{
+ int rc;
switch (inti->type) {
- case KVM_S390_INT_EMERGENCY:
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
- vcpu->stat.deliver_emergency_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->emerg.code, 0);
- rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, inti->emerg.code,
- (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- break;
- case KVM_S390_INT_EXTERNAL_CALL:
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
- vcpu->stat.deliver_external_call++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->extcall.code, 0);
- rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, inti->extcall.code,
- (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- break;
- case KVM_S390_INT_CLOCK_COMP:
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
- rc = deliver_ckc_interrupt(vcpu);
- break;
- case KVM_S390_INT_CPU_TIMER:
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
- rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
- (u16 *)__LC_EXT_INT_CODE);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- break;
case KVM_S390_INT_SERVICE:
- VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
- inti->ext.ext_params);
- vcpu->stat.deliver_service_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
- rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- break;
- case KVM_S390_INT_PFAULT_INIT:
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
- inti->ext.ext_params2);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
- (u16 *) __LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *) __LC_EXT_PARAMS2);
+ rc = __deliver_service(vcpu, inti);
break;
case KVM_S390_INT_PFAULT_DONE:
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
- inti->ext.ext_params2);
- rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ rc = __deliver_pfault_done(vcpu, inti);
break;
case KVM_S390_INT_VIRTIO:
- VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
- inti->ext.ext_params, inti->ext.ext_params2);
- vcpu->stat.deliver_virtio_interrupt++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params,
- inti->ext.ext_params2);
- rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
- break;
- case KVM_S390_SIGP_STOP:
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
- vcpu->stat.deliver_stop_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- 0, 0);
- __set_intercept_indicator(vcpu, inti);
- break;
-
- case KVM_S390_SIGP_SET_PREFIX:
- VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
- inti->prefix.address);
- vcpu->stat.deliver_prefix_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->prefix.address, 0);
- kvm_s390_set_prefix(vcpu, inti->prefix.address);
- break;
-
- case KVM_S390_RESTART:
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
- vcpu->stat.deliver_restart_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- 0, 0);
- rc = write_guest_lc(vcpu,
- offsetof(struct _lowcore, restart_old_psw),
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
+ rc = __deliver_virtio(vcpu, inti);
break;
- case KVM_S390_PROGRAM_INT:
- VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
- inti->pgm.code,
- table[vcpu->arch.sie_block->ipa >> 14]);
- vcpu->stat.deliver_program_int++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->pgm.code, 0);
- rc = __deliver_prog_irq(vcpu, &inti->pgm);
- break;
-
case KVM_S390_MCHK:
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- inti->mchk.mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->mchk.cr14,
- inti->mchk.mcic);
- rc = kvm_s390_vcpu_store_status(vcpu,
- KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE);
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc = __deliver_mchk_floating(vcpu, inti);
break;
-
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- {
- __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
- inti->io.subchannel_nr;
- __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
- inti->io.io_int_word;
- VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
- vcpu->stat.deliver_io_int++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- param0, param1);
- rc = put_guest_lc(vcpu, inti->io.subchannel_id,
- (u16 *)__LC_SUBCHANNEL_ID);
- rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
- (u16 *)__LC_SUBCHANNEL_NR);
- rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
- (u32 *)__LC_IO_INT_PARM);
- rc |= put_guest_lc(vcpu, inti->io.io_int_word,
- (u32 *)__LC_IO_INT_WORD);
- rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
+ rc = __deliver_io(vcpu, inti);
break;
- }
default:
BUG();
}
@@ -509,19 +738,6 @@ static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
return rc;
}
-static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
-{
- int rc;
-
- rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- return rc;
-}
-
/* Check whether SIGP interpretation facility has an external call pending */
int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
{
@@ -538,20 +754,11 @@ int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
struct kvm_s390_interrupt_info *inti;
- int rc = 0;
+ int rc;
- if (atomic_read(&li->active)) {
- spin_lock(&li->lock);
- list_for_each_entry(inti, &li->list, list)
- if (__interrupt_is_deliverable(vcpu, inti)) {
- rc = 1;
- break;
- }
- spin_unlock(&li->lock);
- }
+ rc = !!deliverable_local_irqs(vcpu);
if ((!rc) && atomic_read(&fi->active)) {
spin_lock(&fi->lock);
@@ -643,18 +850,15 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
spin_lock(&li->lock);
- list_for_each_entry_safe(inti, n, &li->list, list) {
- list_del(&inti->list);
- kfree(inti);
- }
- atomic_set(&li->active, 0);
+ li->pending_irqs = 0;
+ bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
+ memset(&li->irq, 0, sizeof(li->irq));
spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */
- atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+ atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
atomic_clear_mask(SIGP_CTRL_C,
&vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
}
@@ -664,34 +868,35 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
struct kvm_s390_interrupt_info *n, *inti = NULL;
+ deliver_irq_t func;
int deliver;
int rc = 0;
+ unsigned long irq_type;
+ unsigned long deliverable_irqs;
__reset_intercept_indicators(vcpu);
- if (atomic_read(&li->active)) {
- do {
- deliver = 0;
- spin_lock(&li->lock);
- list_for_each_entry_safe(inti, n, &li->list, list) {
- if (__interrupt_is_deliverable(vcpu, inti)) {
- list_del(&inti->list);
- deliver = 1;
- break;
- }
- __set_intercept_indicator(vcpu, inti);
- }
- if (list_empty(&li->list))
- atomic_set(&li->active, 0);
- spin_unlock(&li->lock);
- if (deliver) {
- rc = __do_deliver_interrupt(vcpu, inti);
- kfree(inti);
- }
- } while (!rc && deliver);
- }
- if (!rc && kvm_cpu_has_pending_timer(vcpu))
- rc = deliver_ckc_interrupt(vcpu);
+ /* pending ckc conditions might have been invalidated */
+ clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ if (kvm_cpu_has_pending_timer(vcpu))
+ set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+
+ do {
+ deliverable_irqs = deliverable_local_irqs(vcpu);
+ /* bits are in the order of interrupt priority */
+ irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
+ if (irq_type == IRQ_PEND_COUNT)
+ break;
+ func = deliver_irq_funcs[irq_type];
+ if (!func) {
+ WARN_ON_ONCE(func == NULL);
+ clear_bit(irq_type, &li->pending_irqs);
+ continue;
+ }
+ rc = func(vcpu);
+ } while (!rc && irq_type != IRQ_PEND_COUNT);
+
+ set_intercept_indicators_local(vcpu);
if (!rc && atomic_read(&fi->active)) {
do {
@@ -710,7 +915,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
atomic_set(&fi->active, 0);
spin_unlock(&fi->lock);
if (deliver) {
- rc = __do_deliver_interrupt(vcpu, inti);
+ rc = __deliver_floating_interrupt(vcpu, inti);
kfree(inti);
}
} while (!rc && deliver);
@@ -719,23 +924,26 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
return rc;
}
-int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
+static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_interrupt_info *inti;
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return -ENOMEM;
+ li->irq.pgm = irq->u.pgm;
+ set_bit(IRQ_PEND_PROG, &li->pending_irqs);
+ return 0;
+}
- inti->type = KVM_S390_PROGRAM_INT;
- inti->pgm.code = code;
+int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_irq irq;
VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
+ 0, 1);
spin_lock(&li->lock);
- list_add(&inti->list, &li->list);
- atomic_set(&li->active, 1);
+ irq.u.pgm.code = code;
+ __inject_prog(vcpu, &irq);
BUG_ON(waitqueue_active(li->wq));
spin_unlock(&li->lock);
return 0;
@@ -745,27 +953,166 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
struct kvm_s390_pgm_info *pgm_info)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_interrupt_info *inti;
-
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return -ENOMEM;
+ struct kvm_s390_irq irq;
+ int rc;
VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
pgm_info->code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
pgm_info->code, 0, 1);
-
- inti->type = KVM_S390_PROGRAM_INT;
- memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
spin_lock(&li->lock);
- list_add(&inti->list, &li->list);
- atomic_set(&li->active, 1);
+ irq.u.pgm = *pgm_info;
+ rc = __inject_prog(vcpu, &irq);
BUG_ON(waitqueue_active(li->wq));
spin_unlock(&li->lock);
+ return rc;
+}
+
+static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
+ irq->u.ext.ext_params, irq->u.ext.ext_params2);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
+ irq->u.ext.ext_params,
+ irq->u.ext.ext_params2, 2);
+
+ li->irq.ext = irq->u.ext;
+ set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
+int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
+
+ VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
+ irq->u.extcall.code);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
+ irq->u.extcall.code, 0, 2);
+
+ *extcall = irq->u.extcall;
+ set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ return 0;
+}
+
+static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
+
+ VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
+ prefix->address);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
+ prefix->address, 0, 2);
+
+ *prefix = irq->u.prefix;
+ set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
+ return 0;
+}
+
+static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
+
+ li->action_bits |= ACTION_STOP_ON_STOP;
+ set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ return 0;
+}
+
+static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
+ struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
+
+ set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
+ return 0;
+}
+
+static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
+ struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
+
+ VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
+ irq->u.emerg.code);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
+ emerg->code, 0, 2);
+
+ set_bit(emerg->code, li->sigp_emerg_pending);
+ set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ return 0;
+}
+
+static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
+
+ VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
+ mchk->mcic);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
+ mchk->mcic, 2);
+
+ /*
+ * Because repressible machine checks can be indicated along with
+ * exigent machine checks (PoP, Chapter 11, Interruption action)
+ * we need to combine cr14, mcic and external damage code.
+ * Failing storage address and the logout area should not be or'ed
+ * together, we just indicate the last occurrence of the corresponding
+ * machine check
+ */
+ mchk->cr14 |= irq->u.mchk.cr14;
+ mchk->mcic |= irq->u.mchk.mcic;
+ mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
+ mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
+ memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
+ sizeof(mchk->fixed_logout));
+ if (mchk->mcic & MCHK_EX_MASK)
+ set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
+ else if (mchk->mcic & MCHK_REP_MASK)
+ set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
+ return 0;
+}
+
+static int __inject_ckc(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
+ 0, 0, 2);
+
+ set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ return 0;
+}
+
+static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
+ 0, 0, 2);
+
+ set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ return 0;
+}
+
+
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 cr6, u64 schid)
{
@@ -851,7 +1198,17 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ switch (inti->type) {
+ case KVM_S390_MCHK:
+ atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
+ break;
+ default:
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ break;
+ }
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
unlock_fi:
@@ -920,92 +1277,85 @@ void kvm_s390_reinject_io_int(struct kvm *kvm,
__inject_vm(kvm, inti);
}
-int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt *s390int)
+int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+ struct kvm_s390_irq *irq)
{
- struct kvm_s390_local_interrupt *li;
- struct kvm_s390_interrupt_info *inti;
+ irq->type = s390int->type;
+ switch (irq->type) {
+ case KVM_S390_PROGRAM_INT:
+ if (s390int->parm & 0xffff0000)
+ return -EINVAL;
+ irq->u.pgm.code = s390int->parm;
+ break;
+ case KVM_S390_SIGP_SET_PREFIX:
+ irq->u.prefix.address = s390int->parm;
+ break;
+ case KVM_S390_INT_EXTERNAL_CALL:
+ if (irq->u.extcall.code & 0xffff0000)
+ return -EINVAL;
+ irq->u.extcall.code = s390int->parm;
+ break;
+ case KVM_S390_INT_EMERGENCY:
+ if (irq->u.emerg.code & 0xffff0000)
+ return -EINVAL;
+ irq->u.emerg.code = s390int->parm;
+ break;
+ case KVM_S390_MCHK:
+ irq->u.mchk.mcic = s390int->parm64;
+ break;
+ }
+ return 0;
+}
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return -ENOMEM;
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
- switch (s390int->type) {
+ spin_lock(&li->lock);
+ switch (irq->type) {
case KVM_S390_PROGRAM_INT:
- if (s390int->parm & 0xffff0000) {
- kfree(inti);
- return -EINVAL;
- }
- inti->type = s390int->type;
- inti->pgm.code = s390int->parm;
VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
- s390int->parm);
+ irq->u.pgm.code);
+ rc = __inject_prog(vcpu, irq);
break;
case KVM_S390_SIGP_SET_PREFIX:
- inti->prefix.address = s390int->parm;
- inti->type = s390int->type;
- VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
- s390int->parm);
+ rc = __inject_set_prefix(vcpu, irq);
break;
case KVM_S390_SIGP_STOP:
+ rc = __inject_sigp_stop(vcpu, irq);
+ break;
case KVM_S390_RESTART:
+ rc = __inject_sigp_restart(vcpu, irq);
+ break;
case KVM_S390_INT_CLOCK_COMP:
+ rc = __inject_ckc(vcpu);
+ break;
case KVM_S390_INT_CPU_TIMER:
- VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
- inti->type = s390int->type;
+ rc = __inject_cpu_timer(vcpu);
break;
case KVM_S390_INT_EXTERNAL_CALL:
- if (s390int->parm & 0xffff0000) {
- kfree(inti);
- return -EINVAL;
- }
- VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
- s390int->parm);
- inti->type = s390int->type;
- inti->extcall.code = s390int->parm;
+ rc = __inject_extcall(vcpu, irq);
break;
case KVM_S390_INT_EMERGENCY:
- if (s390int->parm & 0xffff0000) {
- kfree(inti);
- return -EINVAL;
- }
- VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
- inti->type = s390int->type;
- inti->emerg.code = s390int->parm;
+ rc = __inject_sigp_emergency(vcpu, irq);
break;
case KVM_S390_MCHK:
- VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
- s390int->parm64);
- inti->type = s390int->type;
- inti->mchk.mcic = s390int->parm64;
+ rc = __inject_mchk(vcpu, irq);
break;
case KVM_S390_INT_PFAULT_INIT:
- inti->type = s390int->type;
- inti->ext.ext_params2 = s390int->parm64;
+ rc = __inject_pfault_init(vcpu, irq);
break;
case KVM_S390_INT_VIRTIO:
case KVM_S390_INT_SERVICE:
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
default:
- kfree(inti);
- return -EINVAL;
+ rc = -EINVAL;
}
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm,
- s390int->parm64, 2);
-
- li = &vcpu->arch.local_int;
- spin_lock(&li->lock);
- if (inti->type == KVM_S390_PROGRAM_INT)
- list_add(&inti->list, &li->list);
- else
- list_add_tail(&inti->list, &li->list);
- atomic_set(&li->active, 1);
- if (inti->type == KVM_S390_SIGP_STOP)
- li->action_bits |= ACTION_STOP_ON_STOP;
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
spin_unlock(&li->lock);
- kvm_s390_vcpu_wakeup(vcpu);
- return 0;
+ if (!rc)
+ kvm_s390_vcpu_wakeup(vcpu);
+ return rc;
}
void kvm_s390_clear_float_irqs(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 55aade49b6d1..3e09801e3104 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -81,10 +81,17 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
+ { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
+ { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
+ { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
+ { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
+ { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
+ { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
+ { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
{ "diagnose_10", VCPU_STAT(diagnose_10) },
{ "diagnose_44", VCPU_STAT(diagnose_44) },
{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
@@ -271,7 +278,7 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_MEM_CLR_CMMA:
mutex_lock(&kvm->lock);
idx = srcu_read_lock(&kvm->srcu);
- page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
+ s390_reset_cmma(kvm->arch.gmap->mm);
srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
ret = 0;
@@ -453,6 +460,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.float_int.lock);
INIT_LIST_HEAD(&kvm->arch.float_int.list);
init_waitqueue_head(&kvm->arch.ipte_wq);
+ mutex_init(&kvm->arch.ipte_mutex);
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created");
@@ -711,7 +719,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
}
spin_lock_init(&vcpu->arch.local_int.lock);
- INIT_LIST_HEAD(&vcpu->arch.local_int.list);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
vcpu->arch.local_int.wq = &vcpu->wq;
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
@@ -1114,13 +1121,15 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
unsigned long token)
{
struct kvm_s390_interrupt inti;
- inti.parm64 = token;
+ struct kvm_s390_irq irq;
if (start_token) {
- inti.type = KVM_S390_INT_PFAULT_INIT;
- WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
+ irq.u.ext.ext_params2 = token;
+ irq.type = KVM_S390_INT_PFAULT_INIT;
+ WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
} else {
inti.type = KVM_S390_INT_PFAULT_DONE;
+ inti.parm64 = token;
WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
}
}
@@ -1614,11 +1623,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
switch (ioctl) {
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
+ struct kvm_s390_irq s390irq;
r = -EFAULT;
if (copy_from_user(&s390int, argp, sizeof(s390int)))
break;
- r = kvm_s390_inject_vcpu(vcpu, &s390int);
+ if (s390int_to_s390irq(&s390int, &s390irq))
+ return -EINVAL;
+ r = kvm_s390_inject_vcpu(vcpu, &s390irq);
break;
}
case KVM_S390_STORE_STATUS:
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 244d02303182..a8f3d9b71c11 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -24,8 +24,6 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
/* declare vfacilities extern */
extern unsigned long *vfacilities;
-int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
-
/* Transactional Memory Execution related macros */
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
#define TDB_FORMAT1 1
@@ -144,7 +142,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm);
int __must_check kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int);
int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt *s390int);
+ struct kvm_s390_irq *irq);
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 cr6, u64 schid);
@@ -152,6 +150,10 @@ void kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti);
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
+/* implemented in intercept.c */
+void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc);
+int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
+
/* implemented in priv.c */
int is_valid_psw(psw_t *psw);
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
@@ -222,6 +224,9 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
+int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+ struct kvm_s390_irq *s390irq);
+
/* implemented in interrupt.c */
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int psw_extint_disabled(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 72bb2dd8b9cd..1be578d64dfc 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -156,41 +156,42 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
return 0;
}
-static void __skey_check_enable(struct kvm_vcpu *vcpu)
+static int __skey_check_enable(struct kvm_vcpu *vcpu)
{
+ int rc = 0;
if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
- return;
+ return rc;
- s390_enable_skey();
+ rc = s390_enable_skey();
trace_kvm_s390_skey_related_inst(vcpu);
vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
+ return rc;
}
static int handle_skey(struct kvm_vcpu *vcpu)
{
- __skey_check_enable(vcpu);
+ int rc = __skey_check_enable(vcpu);
+ if (rc)
+ return rc;
vcpu->stat.instruction_storage_key++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- vcpu->arch.sie_block->gpsw.addr =
- __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
+ kvm_s390_rewind_psw(vcpu, 4);
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
return 0;
}
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
{
- psw_t *psw = &vcpu->arch.sie_block->gpsw;
-
vcpu->stat.instruction_ipte_interlock++;
- if (psw_bits(*psw).p)
+ if (psw_bits(vcpu->arch.sie_block->gpsw).p)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
- psw->addr = __rewind_psw(*psw, 4);
+ kvm_s390_rewind_psw(vcpu, 4);
VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
return 0;
}
@@ -646,10 +647,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
- if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (kvm_s390_check_low_addr_protection(vcpu, start))
- return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
- }
+ start = kvm_s390_logical_to_effective(vcpu, start);
switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
case 0x00000000:
@@ -665,6 +663,12 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
default:
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
}
+
+ if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
+ if (kvm_s390_check_low_addr_protection(vcpu, start))
+ return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
+ }
+
while (start < end) {
unsigned long useraddr, abs_addr;
@@ -683,7 +687,10 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
- __skey_check_enable(vcpu);
+ int rc = __skey_check_enable(vcpu);
+
+ if (rc)
+ return rc;
if (set_guest_storage_key(current->mm, useraddr,
vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
@@ -718,8 +725,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Rewind PSW to repeat the ESSA instruction */
- vcpu->arch.sie_block->gpsw.addr =
- __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
+ kvm_s390_rewind_psw(vcpu, 4);
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
down_read(&gmap->mm->mmap_sem);
@@ -762,8 +768,8 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- u32 val = 0;
- int reg, rc;
+ int reg, rc, nr_regs;
+ u32 ctl_array[16];
u64 ga;
vcpu->stat.instruction_lctl++;
@@ -779,19 +785,20 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
+ nr_regs = ((reg3 - reg1) & 0xf) + 1;
+ rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
+ nr_regs = 0;
do {
- rc = read_guest(vcpu, ga, &val, sizeof(val));
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
- vcpu->arch.sie_block->gcr[reg] |= val;
- ga += 4;
+ vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
-
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
@@ -799,9 +806,9 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+ int reg, rc, nr_regs;
+ u32 ctl_array[16];
u64 ga;
- u32 val;
- int reg, rc;
vcpu->stat.instruction_stctl++;
@@ -817,26 +824,24 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
reg = reg1;
+ nr_regs = 0;
do {
- val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful;
- rc = write_guest(vcpu, ga, &val, sizeof(val));
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
- ga += 4;
+ ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
-
- return 0;
+ rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- u64 ga, val;
- int reg, rc;
+ int reg, rc, nr_regs;
+ u64 ctl_array[16];
+ u64 ga;
vcpu->stat.instruction_lctlg++;
@@ -848,22 +853,22 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- reg = reg1;
-
VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
+ nr_regs = ((reg3 - reg1) & 0xf) + 1;
+ rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
+ reg = reg1;
+ nr_regs = 0;
do {
- rc = read_guest(vcpu, ga, &val, sizeof(val));
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
- vcpu->arch.sie_block->gcr[reg] = val;
- ga += 8;
+ vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
-
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
return 0;
}
@@ -871,8 +876,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- u64 ga, val;
- int reg, rc;
+ int reg, rc, nr_regs;
+ u64 ctl_array[16];
+ u64 ga;
vcpu->stat.instruction_stctg++;
@@ -884,23 +890,19 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- reg = reg1;
-
VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
+ reg = reg1;
+ nr_regs = 0;
do {
- val = vcpu->arch.sie_block->gcr[reg];
- rc = write_guest(vcpu, ga, &val, sizeof(val));
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
- ga += 8;
+ ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
if (reg == reg3)
break;
reg = (reg + 1) % 16;
} while (1);
-
- return 0;
+ rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
static const intercept_handler_t eb_handlers[256] = {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index cf243ba3d50f..6651f9f73973 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -20,20 +20,13 @@
#include "kvm-s390.h"
#include "trace.h"
-static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
+static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u64 *reg)
{
struct kvm_s390_local_interrupt *li;
- struct kvm_vcpu *dst_vcpu = NULL;
int cpuflags;
int rc;
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
-
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
cpuflags = atomic_read(li->cpuflags);
@@ -48,55 +41,53 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
rc = SIGP_CC_STATUS_STORED;
}
- VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
+ VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
+ rc);
return rc;
}
-static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
+static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu)
{
- struct kvm_s390_interrupt s390int = {
+ struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EMERGENCY,
- .parm = vcpu->vcpu_id,
+ .u.emerg.code = vcpu->vcpu_id,
};
- struct kvm_vcpu *dst_vcpu = NULL;
int rc = 0;
- if (cpu_addr < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
-
- rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (!rc)
- VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
+ VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
+ dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
+static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
+{
+ return __inject_sigp_emergency(vcpu, dst_vcpu);
+}
+
+static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu,
u16 asn, u64 *reg)
{
- struct kvm_vcpu *dst_vcpu = NULL;
const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
u16 p_asn, s_asn;
psw_t *psw;
u32 flags;
- if (cpu_addr < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
psw = &dst_vcpu->arch.sie_block->gpsw;
p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
- /* Deliver the emergency signal? */
+ /* Inject the emergency signal? */
if (!(flags & CPUSTAT_STOPPED)
|| (psw->mask & psw_int_mask) != psw_int_mask
|| ((flags & CPUSTAT_WAIT) && psw->addr != 0)
|| (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
- return __sigp_emergency(vcpu, cpu_addr);
+ return __inject_sigp_emergency(vcpu, dst_vcpu);
} else {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -104,23 +95,19 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
}
}
-static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
+static int __sigp_external_call(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu)
{
- struct kvm_s390_interrupt s390int = {
+ struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EXTERNAL_CALL,
- .parm = vcpu->vcpu_id,
+ .u.extcall.code = vcpu->vcpu_id,
};
- struct kvm_vcpu *dst_vcpu = NULL;
int rc;
- if (cpu_addr < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
-
- rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (!rc)
- VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
+ VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
+ dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
@@ -128,29 +115,20 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
{
struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
- struct kvm_s390_interrupt_info *inti;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
- inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
- if (!inti)
- return -ENOMEM;
- inti->type = KVM_S390_SIGP_STOP;
-
spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) {
/* another SIGP STOP is pending */
- kfree(inti);
rc = SIGP_CC_BUSY;
goto out;
}
if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
- kfree(inti);
if ((action & ACTION_STORE_ON_STOP) != 0)
rc = -ESHUTDOWN;
goto out;
}
- list_add_tail(&inti->list, &li->list);
- atomic_set(&li->active, 1);
+ set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
li->action_bits |= action;
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
kvm_s390_vcpu_wakeup(dst_vcpu);
@@ -160,23 +138,27 @@ out:
return rc;
}
-static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
+static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{
- struct kvm_vcpu *dst_vcpu = NULL;
int rc;
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
+ rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
+ VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
+ return rc;
+}
- rc = __inject_sigp_stop(dst_vcpu, action);
+static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu, u64 *reg)
+{
+ int rc;
- VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
+ rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
+ ACTION_STORE_ON_STOP);
+ VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
+ dst_vcpu->vcpu_id);
- if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
+ if (rc == -ESHUTDOWN) {
/* If the CPU has already been stopped, we still have
* to save the status when doing stop-and-store. This
* has to be done after unlocking all spinlocks. */
@@ -212,18 +194,12 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
return rc;
}
-static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
- u64 *reg)
+static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
+ u32 address, u64 *reg)
{
struct kvm_s390_local_interrupt *li;
- struct kvm_vcpu *dst_vcpu = NULL;
- struct kvm_s390_interrupt_info *inti;
int rc;
- if (cpu_addr < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
/*
@@ -238,46 +214,34 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
return SIGP_CC_STATUS_STORED;
}
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return SIGP_CC_BUSY;
-
spin_lock(&li->lock);
/* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
rc = SIGP_CC_STATUS_STORED;
- kfree(inti);
goto out_li;
}
- inti->type = KVM_S390_SIGP_SET_PREFIX;
- inti->prefix.address = address;
-
- list_add_tail(&inti->list, &li->list);
- atomic_set(&li->active, 1);
+ li->irq.prefix.address = address;
+ set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
kvm_s390_vcpu_wakeup(dst_vcpu);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
- VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
+ VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
+ address);
out_li:
spin_unlock(&li->lock);
return rc;
}
-static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
- u32 addr, u64 *reg)
+static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu,
+ u32 addr, u64 *reg)
{
- struct kvm_vcpu *dst_vcpu = NULL;
int flags;
int rc;
- if (cpu_id < KVM_MAX_VCPUS)
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
-
spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
spin_unlock(&dst_vcpu->arch.local_int.lock);
@@ -297,19 +261,12 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
return rc;
}
-static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
- u64 *reg)
+static int __sigp_sense_running(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu, u64 *reg)
{
struct kvm_s390_local_interrupt *li;
- struct kvm_vcpu *dst_vcpu = NULL;
int rc;
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
-
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
/* running */
@@ -321,26 +278,19 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
rc = SIGP_CC_STATUS_STORED;
}
- VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
- rc);
+ VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
+ dst_vcpu->vcpu_id, rc);
return rc;
}
-/* Test whether the destination CPU is available and not busy */
-static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
+static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu, u8 order_code)
{
- struct kvm_s390_local_interrupt *li;
- int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
- struct kvm_vcpu *dst_vcpu = NULL;
-
- if (cpu_addr >= KVM_MAX_VCPUS)
- return SIGP_CC_NOT_OPERATIONAL;
+ struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
+ /* handle (RE)START in user space */
+ int rc = -EOPNOTSUPP;
- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
- li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP)
rc = SIGP_CC_BUSY;
@@ -349,90 +299,131 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
return rc;
}
-int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
+static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu, u8 order_code)
{
- int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
- int r3 = vcpu->arch.sie_block->ipa & 0x000f;
- u32 parameter;
- u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
- u8 order_code;
- int rc;
+ /* handle (INITIAL) CPU RESET in user space */
+ return -EOPNOTSUPP;
+}
- /* sigp in userspace can exit */
- if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
- return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *dst_vcpu)
+{
+ /* handle unknown orders in user space */
+ return -EOPNOTSUPP;
+}
- order_code = kvm_s390_get_base_disp_rs(vcpu);
+static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
+ u16 cpu_addr, u32 parameter, u64 *status_reg)
+{
+ int rc;
+ struct kvm_vcpu *dst_vcpu;
- if (r1 % 2)
- parameter = vcpu->run->s.regs.gprs[r1];
- else
- parameter = vcpu->run->s.regs.gprs[r1 + 1];
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return SIGP_CC_NOT_OPERATIONAL;
+
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
- trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
switch (order_code) {
case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++;
- rc = __sigp_sense(vcpu, cpu_addr,
- &vcpu->run->s.regs.gprs[r1]);
+ rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
break;
case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++;
- rc = __sigp_external_call(vcpu, cpu_addr);
+ rc = __sigp_external_call(vcpu, dst_vcpu);
break;
case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++;
- rc = __sigp_emergency(vcpu, cpu_addr);
+ rc = __sigp_emergency(vcpu, dst_vcpu);
break;
case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
- rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
+ rc = __sigp_stop(vcpu, dst_vcpu);
break;
case SIGP_STOP_AND_STORE_STATUS:
- vcpu->stat.instruction_sigp_stop++;
- rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
- ACTION_STOP_ON_STOP);
+ vcpu->stat.instruction_sigp_stop_store_status++;
+ rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
break;
case SIGP_STORE_STATUS_AT_ADDRESS:
- rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
- &vcpu->run->s.regs.gprs[r1]);
- break;
- case SIGP_SET_ARCHITECTURE:
- vcpu->stat.instruction_sigp_arch++;
- rc = __sigp_set_arch(vcpu, parameter);
+ vcpu->stat.instruction_sigp_store_status++;
+ rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
+ status_reg);
break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
- rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
- &vcpu->run->s.regs.gprs[r1]);
+ rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
break;
case SIGP_COND_EMERGENCY_SIGNAL:
- rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
- &vcpu->run->s.regs.gprs[r1]);
+ vcpu->stat.instruction_sigp_cond_emergency++;
+ rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
+ status_reg);
break;
case SIGP_SENSE_RUNNING:
vcpu->stat.instruction_sigp_sense_running++;
- rc = __sigp_sense_running(vcpu, cpu_addr,
- &vcpu->run->s.regs.gprs[r1]);
+ rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
break;
case SIGP_START:
- rc = sigp_check_callable(vcpu, cpu_addr);
- if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
- rc = -EOPNOTSUPP; /* Handle START in user space */
+ vcpu->stat.instruction_sigp_start++;
+ rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
break;
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
- rc = sigp_check_callable(vcpu, cpu_addr);
- if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
- VCPU_EVENT(vcpu, 4,
- "sigp restart %x to handle userspace",
- cpu_addr);
- /* user space must know about restart */
- rc = -EOPNOTSUPP;
- }
+ rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
+ break;
+ case SIGP_INITIAL_CPU_RESET:
+ vcpu->stat.instruction_sigp_init_cpu_reset++;
+ rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
+ break;
+ case SIGP_CPU_RESET:
+ vcpu->stat.instruction_sigp_cpu_reset++;
+ rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
+ break;
+ default:
+ vcpu->stat.instruction_sigp_unknown++;
+ rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
+ }
+
+ if (rc == -EOPNOTSUPP)
+ VCPU_EVENT(vcpu, 4,
+ "sigp order %u -> cpu %x: handled in user space",
+ order_code, dst_vcpu->vcpu_id);
+
+ return rc;
+}
+
+int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
+{
+ int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+ int r3 = vcpu->arch.sie_block->ipa & 0x000f;
+ u32 parameter;
+ u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
+ u8 order_code;
+ int rc;
+
+ /* sigp in userspace can exit */
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+ order_code = kvm_s390_get_base_disp_rs(vcpu);
+
+ if (r1 % 2)
+ parameter = vcpu->run->s.regs.gprs[r1];
+ else
+ parameter = vcpu->run->s.regs.gprs[r1 + 1];
+
+ trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
+ switch (order_code) {
+ case SIGP_SET_ARCHITECTURE:
+ vcpu->stat.instruction_sigp_arch++;
+ rc = __sigp_set_arch(vcpu, parameter);
break;
default:
- return -EOPNOTSUPP;
+ rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
+ parameter,
+ &vcpu->run->s.regs.gprs[r1]);
}
if (rc < 0)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a2b81d6ce8a5..811937bb90be 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -261,8 +261,8 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
return;
if (!printk_ratelimit())
return;
- printk(KERN_ALERT "User process fault: interruption code 0x%X ",
- regs->int_code);
+ printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
+ regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk(KERN_CONT "\n");
printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
@@ -548,7 +548,7 @@ out:
return fault;
}
-void __kprobes do_protection_exception(struct pt_regs *regs)
+void do_protection_exception(struct pt_regs *regs)
{
unsigned long trans_exc_code;
int fault;
@@ -574,8 +574,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
if (unlikely(fault))
do_fault_error(regs, fault);
}
+NOKPROBE_SYMBOL(do_protection_exception);
-void __kprobes do_dat_exception(struct pt_regs *regs)
+void do_dat_exception(struct pt_regs *regs)
{
int access, fault;
@@ -584,6 +585,7 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
if (unlikely(fault))
do_fault_error(regs, fault);
}
+NOKPROBE_SYMBOL(do_dat_exception);
#ifdef CONFIG_PFAULT
/*
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 3fef3b299665..426c9d462d1c 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -120,7 +120,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
}
}
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
int nr, i, j;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1b79ca67392f..be99357d238c 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -18,6 +18,8 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/swapops.h>
+#include <linux/ksm.h>
+#include <linux/mman.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -750,8 +752,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
break;
/* Walk the process page table, lock and get pte pointer */
ptep = get_locked_pte(gmap->mm, addr, &ptl);
- if (unlikely(!ptep))
- continue;
+ VM_BUG_ON(!ptep);
/* Set notification bit in the pgste of the pte */
entry = *ptep;
if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
@@ -761,7 +762,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
gaddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
- spin_unlock(ptl);
+ pte_unmap_unlock(ptep, ptl);
}
up_read(&gmap->mm->mmap_sem);
return rc;
@@ -834,99 +835,6 @@ static inline void page_table_free_pgste(unsigned long *table)
__free_page(page);
}
-static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, unsigned long end, bool init_skey)
-{
- pte_t *start_pte, *pte;
- spinlock_t *ptl;
- pgste_t pgste;
-
- start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- pte = start_pte;
- do {
- pgste = pgste_get_lock(pte);
- pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
- if (init_skey) {
- unsigned long address;
-
- pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
- PGSTE_GR_BIT | PGSTE_GC_BIT);
-
- /* skip invalid and not writable pages */
- if (pte_val(*pte) & _PAGE_INVALID ||
- !(pte_val(*pte) & _PAGE_WRITE)) {
- pgste_set_unlock(pte, pgste);
- continue;
- }
-
- address = pte_val(*pte) & PAGE_MASK;
- page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
- }
- pgste_set_unlock(pte, pgste);
- } while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap_unlock(start_pte, ptl);
-
- return addr;
-}
-
-static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end, bool init_skey)
-{
- unsigned long next;
- pmd_t *pmd;
-
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- if (pmd_none_or_clear_bad(pmd))
- continue;
- next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
- } while (pmd++, addr = next, addr != end);
-
- return addr;
-}
-
-static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end, bool init_skey)
-{
- unsigned long next;
- pud_t *pud;
-
- pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- continue;
- next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
- } while (pud++, addr = next, addr != end);
-
- return addr;
-}
-
-void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
- unsigned long end, bool init_skey)
-{
- unsigned long addr, next;
- pgd_t *pgd;
-
- down_write(&mm->mmap_sem);
- if (init_skey && mm_use_skey(mm))
- goto out_up;
- addr = start;
- pgd = pgd_offset(mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
- } while (pgd++, addr = next, addr != end);
- if (init_skey)
- current->mm->context.use_skey = 1;
-out_up:
- up_write(&mm->mmap_sem);
-}
-EXPORT_SYMBOL(page_table_reset_pgste);
-
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq)
{
@@ -936,7 +844,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
down_read(&mm->mmap_sem);
retry:
- ptep = get_locked_pte(current->mm, addr, &ptl);
+ ptep = get_locked_pte(mm, addr, &ptl);
if (unlikely(!ptep)) {
up_read(&mm->mmap_sem);
return -EFAULT;
@@ -980,6 +888,45 @@ retry:
}
EXPORT_SYMBOL(set_guest_storage_key);
+unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+{
+ spinlock_t *ptl;
+ pgste_t pgste;
+ pte_t *ptep;
+ uint64_t physaddr;
+ unsigned long key = 0;
+
+ down_read(&mm->mmap_sem);
+ ptep = get_locked_pte(mm, addr, &ptl);
+ if (unlikely(!ptep)) {
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+ }
+ pgste = pgste_get_lock(ptep);
+
+ if (pte_val(*ptep) & _PAGE_INVALID) {
+ key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
+ key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
+ key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
+ key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
+ } else {
+ physaddr = pte_val(*ptep) & PAGE_MASK;
+ key = page_get_storage_key(physaddr);
+
+ /* Reflect guest's logical view, not physical */
+ if (pgste_val(pgste) & PGSTE_GR_BIT)
+ key |= _PAGE_REFERENCED;
+ if (pgste_val(pgste) & PGSTE_GC_BIT)
+ key |= _PAGE_CHANGED;
+ }
+
+ pgste_set_unlock(ptep, pgste);
+ pte_unmap_unlock(ptep, ptl);
+ up_read(&mm->mmap_sem);
+ return key;
+}
+EXPORT_SYMBOL(get_guest_storage_key);
+
#else /* CONFIG_PGSTE */
static inline int page_table_with_pgste(struct page *page)
@@ -992,11 +939,6 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
return NULL;
}
-void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
- unsigned long end, bool init_skey)
-{
-}
-
static inline void page_table_free_pgste(unsigned long *table)
{
}
@@ -1347,13 +1289,89 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
*/
-void s390_enable_skey(void)
+static int __s390_enable_skey(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
- page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
+ unsigned long ptev;
+ pgste_t pgste;
+
+ pgste = pgste_get_lock(pte);
+ /*
+ * Remove all zero page mappings,
+ * after establishing a policy to forbid zero page mappings
+ * following faults for that page will get fresh anonymous pages
+ */
+ if (is_zero_pfn(pte_pfn(*pte))) {
+ ptep_flush_direct(walk->mm, addr, pte);
+ pte_val(*pte) = _PAGE_INVALID;
+ }
+ /* Clear storage key */
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
+ PGSTE_GR_BIT | PGSTE_GC_BIT);
+ ptev = pte_val(*pte);
+ if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
+ page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
+ pgste_set_unlock(pte, pgste);
+ return 0;
+}
+
+int s390_enable_skey(void)
+{
+ struct mm_walk walk = { .pte_entry = __s390_enable_skey };
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int rc = 0;
+
+ down_write(&mm->mmap_sem);
+ if (mm_use_skey(mm))
+ goto out_up;
+
+ mm->context.use_skey = 1;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
+ MADV_UNMERGEABLE, &vma->vm_flags)) {
+ mm->context.use_skey = 0;
+ rc = -ENOMEM;
+ goto out_up;
+ }
+ }
+ mm->def_flags &= ~VM_MERGEABLE;
+
+ walk.mm = mm;
+ walk_page_range(0, TASK_SIZE, &walk);
+
+out_up:
+ up_write(&mm->mmap_sem);
+ return rc;
}
EXPORT_SYMBOL_GPL(s390_enable_skey);
/*
+ * Reset CMMA state, make all pages stable again.
+ */
+static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pgste_t pgste;
+
+ pgste = pgste_get_lock(pte);
+ pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+ pgste_set_unlock(pte, pgste);
+ return 0;
+}
+
+void s390_reset_cmma(struct mm_struct *mm)
+{
+ struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
+
+ down_write(&mm->mmap_sem);
+ walk.mm = mm;
+ walk_page_range(0, TASK_SIZE, &walk);
+ up_write(&mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(s390_reset_cmma);
+
+/*
* Test and reset if a guest page is dirty
*/
bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index a9e1dc4ae442..805d8b29193a 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -3,4 +3,4 @@
#
obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
- pci_event.o pci_debug.o pci_insn.o
+ pci_event.o pci_debug.o pci_insn.o pci_mmio.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index d59c82569750..3290f11ae1d9 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -369,8 +369,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
- msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
- msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
+ msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
/* Allocate adapter summary indicator bit */
rc = -EIO;
@@ -474,7 +473,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
len = pci_resource_len(pdev, i);
if (!len)
continue;
- pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
+ pdev->resource[i].start =
+ (resource_size_t __force) pci_iomap(pdev, i, 0);
pdev->resource[i].end = pdev->resource[i].start + len - 1;
}
}
@@ -489,7 +489,8 @@ static void zpci_unmap_resources(struct zpci_dev *zdev)
len = pci_resource_len(pdev, i);
if (!len)
continue;
- pci_iounmap(pdev, (void *) pdev->resource[i].start);
+ pci_iounmap(pdev, (void __iomem __force *)
+ pdev->resource[i].start);
}
}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 6e22a247de9b..d6e411ed8b1f 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -62,6 +62,7 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
zdev->tlb_refresh = response->refresh;
zdev->dma_mask = response->dasm;
zdev->msi_addr = response->msia;
+ zdev->max_msi = response->noi;
zdev->fmb_update = response->mui;
switch (response->version) {
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index eec598c5939f..3229a2e570df 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -158,10 +158,7 @@ int __init zpci_debug_init(void)
void zpci_debug_exit(void)
{
- if (pci_debug_msg_id)
- debug_unregister(pci_debug_msg_id);
- if (pci_debug_err_id)
- debug_unregister(pci_debug_err_id);
-
+ debug_unregister(pci_debug_msg_id);
+ debug_unregister(pci_debug_err_id);
debugfs_remove(debugfs_root);
}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
new file mode 100644
index 000000000000..62c5ea6d8682
--- /dev/null
+++ b/arch/s390/pci/pci_mmio.c
@@ -0,0 +1,115 @@
+/*
+ * Access to PCI I/O memory from user space programs.
+ *
+ * Copyright IBM Corp. 2014
+ * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+
+static long get_pfn(unsigned long user_addr, unsigned long access,
+ unsigned long *pfn)
+{
+ struct vm_area_struct *vma;
+ long ret;
+
+ down_read(&current->mm->mmap_sem);
+ ret = -EINVAL;
+ vma = find_vma(current->mm, user_addr);
+ if (!vma)
+ goto out;
+ ret = -EACCES;
+ if (!(vma->vm_flags & access))
+ goto out;
+ ret = follow_pfn(vma, user_addr, pfn);
+out:
+ up_read(&current->mm->mmap_sem);
+ return ret;
+}
+
+SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
+ const void __user *, user_buffer, size_t, length)
+{
+ u8 local_buf[64];
+ void __iomem *io_addr;
+ void *buf;
+ unsigned long pfn;
+ long ret;
+
+ if (!zpci_is_enabled())
+ return -ENODEV;
+
+ if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
+ return -EINVAL;
+ if (length > 64) {
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ } else
+ buf = local_buf;
+
+ ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
+ if (ret)
+ goto out;
+ io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+
+ ret = -EFAULT;
+ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+ goto out;
+
+ if (copy_from_user(buf, user_buffer, length))
+ goto out;
+
+ memcpy_toio(io_addr, buf, length);
+ ret = 0;
+out:
+ if (buf != local_buf)
+ kfree(buf);
+ return ret;
+}
+
+SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
+ void __user *, user_buffer, size_t, length)
+{
+ u8 local_buf[64];
+ void __iomem *io_addr;
+ void *buf;
+ unsigned long pfn;
+ long ret;
+
+ if (!zpci_is_enabled())
+ return -ENODEV;
+
+ if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
+ return -EINVAL;
+ if (length > 64) {
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ } else
+ buf = local_buf;
+
+ ret = get_pfn(mmio_addr, VM_READ, &pfn);
+ if (ret)
+ goto out;
+ io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+
+ ret = -EFAULT;
+ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+ goto out;
+
+ memcpy_fromio(buf, io_addr, length);
+
+ if (copy_to_user(user_buffer, buf, length))
+ goto out;
+
+ ret = 0;
+out:
+ if (buf != local_buf)
+ kfree(buf);
+ return ret;
+}
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 46461c19f284..83ed116d414c 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -5,7 +5,6 @@ header-y +=
generic-y += barrier.h
generic-y += clkdev.h
generic-y += cputime.h
-generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index a1403470f80e..c6b6ee5f38b2 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -16,6 +16,7 @@ config SUPERH
select HAVE_DEBUG_BUGVERBOSE
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
+ select ARCH_HAS_GCOV_PROFILE_ALL
select PERF_USE_VMALLOC
select HAVE_DEBUG_KMEMLEAK
select HAVE_KERNEL_GZIP
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 5620e33c18a0..d4b01d4cc102 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -338,7 +338,7 @@ static struct soc_camera_platform_info camera_info = {
.format_name = "UYVY",
.format_depth = 16,
.format = {
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
.field = V4L2_FIELD_NONE,
.width = 640,
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 5a6c9acff0d2..654ebb6bd5d8 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -8,7 +8,6 @@ generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += fcntl.h
-generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index df922f52d76d..705408766ab0 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 888f6260b4ec..641f55cb61c3 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
index 5162fad912ce..d1064e46efe8 100644
--- a/arch/sparc/crypto/crc32c_glue.c
+++ b/arch/sparc/crypto/crc32c_glue.c
@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
-MODULE_ALIAS("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 3065bc61f9d3..d11500972994 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
-MODULE_ALIAS("des");
+MODULE_ALIAS_CRYPTO("des");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index 09a9ea1dfb69..64c7ff5f72a9 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
-MODULE_ALIAS("md5");
+MODULE_ALIAS_CRYPTO("md5");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 6cd5f29e1e0d..1b3e47accc74 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 04f555ab2680..285268ca9279 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -135,7 +135,7 @@ static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash)
sha256_sparc64_final(desc, D);
memcpy(hash, D, SHA224_DIGEST_SIZE);
- memset(D, 0, SHA256_DIGEST_SIZE);
+ memzero_explicit(D, SHA256_DIGEST_SIZE);
return 0;
}
@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
-MODULE_ALIAS("sha224");
-MODULE_ALIAS("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
#include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index f04d1994d19a..11eb36c3fc8c 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -139,7 +139,7 @@ static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash)
sha512_sparc64_final(desc, D);
memcpy(hash, D, 48);
- memset(D, 0, 64);
+ memzero_explicit(D, 64);
return 0;
}
@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
-MODULE_ALIAS("sha384");
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
#include "crop_devid.c"
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index f5f94ce1692c..94f36e7086a7 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -6,7 +6,6 @@ generic-y += cputime.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
-generic-y += hash.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += linkage.h
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 305dcc3dc721..76648941fea7 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -37,7 +37,9 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")
-#define read_barrier_depends() do { } while(0)
+#define dma_rmb() rmb()
+#define dma_wmb() wmb()
+
#define set_mb(__var, __value) \
do { __var = __value; membar_safe("#StoreLoad"); } while(0)
@@ -51,7 +53,8 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define smp_wmb() __asm__ __volatile__("":::"memory")
#endif
-#define smp_read_barrier_depends() do { } while(0)
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
#define smp_store_release(p, v) \
do { \
diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
index 58ab64de25d2..6e9004aa6f25 100644
--- a/arch/sparc/include/asm/ldc.h
+++ b/arch/sparc/include/asm/ldc.h
@@ -61,6 +61,7 @@ void ldc_free(struct ldc_channel *lp);
/* Register TX and RX queues of the link with the hypervisor. */
int ldc_bind(struct ldc_channel *lp);
+void ldc_unbind(struct ldc_channel *lp);
/* For non-RAW protocols we need to complete a handshake before
* communication can proceed. ldc_connect() does that, if the
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index c55291e5b83e..f005ccac91cc 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -238,7 +238,6 @@ static const struct of_device_id ecpp_match[] = {
static struct platform_driver ecpp_driver = {
.driver = {
.name = "ecpp",
- .owner = THIS_MODULE,
.of_match_table = ecpp_match,
},
.probe = ecpp_probe,
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index d758c8d8f47d..8174f6cdbbbb 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -247,6 +247,25 @@ struct vio_net_desc {
struct ldc_trans_cookie cookies[0];
};
+struct vio_net_dext {
+ u8 flags;
+#define VNET_PKT_HASH 0x01
+#define VNET_PKT_HCK_IPV4_HDRCKSUM 0x02
+#define VNET_PKT_HCK_FULLCKSUM 0x04
+#define VNET_PKT_IPV4_LSO 0x08
+#define VNET_PKT_HCK_IPV4_HDRCKSUM_OK 0x10
+#define VNET_PKT_HCK_FULLCKSUM_OK 0x20
+
+ u8 vnet_hashval;
+ u16 ipv4_lso_mss;
+ u32 resv3;
+};
+
+static inline struct vio_net_dext *vio_net_ext(struct vio_net_desc *desc)
+{
+ return (struct vio_net_dext *)&desc->cookies[2];
+}
+
#define VIO_MAX_RING_COOKIES 24
struct vio_dring_state {
@@ -281,6 +300,21 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
((dr->prod - dr->cons) & (ring_size - 1)) - 1);
}
+static inline u32 vio_dring_next(struct vio_dring_state *dr, u32 index)
+{
+ if (++index == dr->num_entries)
+ index = 0;
+ return index;
+}
+
+static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index)
+{
+ if (index == 0)
+ return dr->num_entries - 1;
+ else
+ return index - 1;
+}
+
#define VIO_MAX_TYPE_LEN 32
#define VIO_MAX_COMPAT_LEN 64
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 54d9608681b6..e6a16c40be5f 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -76,6 +76,11 @@
#define SO_BPF_EXTENSIONS 0x0032
+#define SO_INCOMING_CPU 0x0033
+
+#define SO_ATTACH_BPF 0x0034
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 46d83842eddc..6f35f4df17f2 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -415,8 +415,9 @@
#define __NR_getrandom 347
#define __NR_memfd_create 348
#define __NR_bpf 349
+#define __NR_execveat 350
-#define NR_syscalls 350
+#define NR_syscalls 351
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index eefda32b595e..742f6c4436bf 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -178,7 +178,6 @@ MODULE_DEVICE_TABLE(of, apc_match);
static struct platform_driver apc_driver = {
.driver = {
.name = "apc",
- .owner = THIS_MODULE,
.of_match_table = apc_match,
},
.probe = apc_probe,
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 86e55778e4af..086435c17981 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -135,7 +135,6 @@ static struct platform_driver auxio_driver = {
.probe = auxio_probe,
.driver = {
.name = "auxio",
- .owner = THIS_MODULE,
.of_match_table = auxio_match,
},
};
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index 052b5a44318f..4696958299e9 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -152,7 +152,6 @@ static struct platform_driver clock_board_driver = {
.probe = clock_board_probe,
.driver = {
.name = "clock_board",
- .owner = THIS_MODULE,
.of_match_table = clock_board_match,
},
};
@@ -257,7 +256,6 @@ static struct platform_driver fhc_driver = {
.probe = fhc_probe,
.driver = {
.name = "fhc",
- .owner = THIS_MODULE,
.of_match_table = fhc_match,
},
};
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index dbb210d74e21..0de4bcb8261f 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -810,7 +810,6 @@ MODULE_DEVICE_TABLE(of, us3mc_match);
static struct platform_driver us3mc_driver = {
.driver = {
.name = "us3mc",
- .owner = THIS_MODULE,
.of_match_table = us3mc_match,
},
.probe = us3mc_probe,
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 4310332872d4..274a9f59d95c 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1222,11 +1222,12 @@ out_err:
}
EXPORT_SYMBOL(ldc_alloc);
-void ldc_free(struct ldc_channel *lp)
+void ldc_unbind(struct ldc_channel *lp)
{
if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
free_irq(lp->cfg.rx_irq, lp);
free_irq(lp->cfg.tx_irq, lp);
+ lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
}
if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
@@ -1240,10 +1241,15 @@ void ldc_free(struct ldc_channel *lp)
lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
}
- hlist_del(&lp->list);
+ ldc_set_state(lp, LDC_STATE_INIT);
+}
+EXPORT_SYMBOL(ldc_unbind);
+void ldc_free(struct ldc_channel *lp)
+{
+ ldc_unbind(lp);
+ hlist_del(&lp->list);
kfree(lp->mssbuf);
-
ldc_iommu_release(lp);
kfree(lp);
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index c8bf26edfa7c..3382f7b3eeef 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -708,7 +708,6 @@ static struct of_device_id grpci1_of_match[] = {
static struct platform_driver grpci1_of_driver = {
.driver = {
.name = "grpci1",
- .owner = THIS_MODULE,
.of_match_table = grpci1_of_match,
},
.probe = grpci1_of_probe,
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index e433a4d69fe0..94e392bdee7d 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -900,7 +900,6 @@ static struct of_device_id grpci2_of_match[] = {
static struct platform_driver grpci2_of_driver = {
.driver = {
.name = "grpci2",
- .owner = THIS_MODULE,
.of_match_table = grpci2_of_match,
},
.probe = grpci2_of_probe,
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index ea2bad306f93..71e16f2241c2 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -368,7 +368,7 @@ static struct smp_funcall {
unsigned long arg5;
unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
-} ccall_info;
+} ccall_info __attribute__((aligned(8)));
static DEFINE_SPINLOCK(cross_call_lock);
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index e60fc6a67e9b..11a1f0d289d2 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -508,7 +508,6 @@ static const struct of_device_id fire_match[] = {
static struct platform_driver fire_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = fire_match,
},
.probe = fire_probe,
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
index c647634ead2b..7dce27b3c761 100644
--- a/arch/sparc/kernel/pci_psycho.c
+++ b/arch/sparc/kernel/pci_psycho.c
@@ -604,7 +604,6 @@ static const struct of_device_id psycho_match[] = {
static struct platform_driver psycho_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = psycho_match,
},
.probe = psycho_probe,
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 6f00d27e8dac..00a616ffa35b 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -600,7 +600,6 @@ static const struct of_device_id sabre_match[] = {
static struct platform_driver sabre_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = sabre_match,
},
.probe = sabre_probe,
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index f9c6813c132d..c664d3e3aa8d 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1495,7 +1495,6 @@ static const struct of_device_id schizo_match[] = {
static struct platform_driver schizo_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = schizo_match,
},
.probe = schizo_probe,
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 49d33b178793..47ddbd496a1e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -1010,7 +1010,6 @@ static const struct of_device_id pci_sun4v_match[] = {
static struct platform_driver pci_sun4v_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = pci_sun4v_match,
},
.probe = pci_sun4v_probe,
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 8b7297faca79..97d123107ecb 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -82,7 +82,6 @@ MODULE_DEVICE_TABLE(of, pmc_match);
static struct platform_driver pmc_driver = {
.driver = {
.name = "pmc",
- .owner = THIS_MODULE,
.of_match_table = pmc_match,
},
.probe = pmc_probe,
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 4cb23c41553f..1836cb965ff8 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -63,7 +63,6 @@ static struct platform_driver power_driver = {
.probe = power_probe,
.driver = {
.name = "power",
- .owner = THIS_MODULE,
.of_match_table = power_match,
},
};
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 33a17e7b3ccd..bb0008927598 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -6,6 +6,11 @@ sys64_execve:
jmpl %g1, %g0
flushw
+sys64_execveat:
+ set sys_execveat, %g1
+ jmpl %g1, %g0
+ flushw
+
#ifdef CONFIG_COMPAT
sunos_execv:
mov %g0, %o2
@@ -13,6 +18,11 @@ sys32_execve:
set compat_sys_execve, %g1
jmpl %g1, %g0
flushw
+
+sys32_execveat:
+ set compat_sys_execveat, %g1
+ jmpl %g1, %g0
+ flushw
#endif
.align 32
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index ad0cdf497b78..e31a9056a303 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -87,3 +87,4 @@ sys_call_table:
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
+/*350*/ .long sys_execveat
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 580cde9370c9..d72f76ae70eb 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -88,6 +88,7 @@ sys_call_table32:
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
+/*350*/ .word sys32_execveat
#endif /* CONFIG_COMPAT */
@@ -167,3 +168,4 @@ sys_call_table:
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
+/*350*/ .word sys64_execveat
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 5923d1e4e7c9..2f80d23a0a44 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -322,7 +322,6 @@ static struct platform_driver clock_driver = {
.probe = clock_probe,
.driver = {
.name = "rtc",
- .owner = THIS_MODULE,
.of_match_table = clock_match,
},
};
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 59da0c3ea788..edbbeb157d46 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -466,7 +466,6 @@ static struct platform_driver rtc_driver = {
.probe = rtc_probe,
.driver = {
.name = "rtc",
- .owner = THIS_MODULE,
.of_match_table = rtc_match,
},
};
@@ -499,7 +498,6 @@ static struct platform_driver bq4802_driver = {
.probe = bq4802_probe,
.driver = {
.name = "bq4802",
- .owner = THIS_MODULE,
.of_match_table = bq4802_match,
},
};
@@ -563,7 +561,6 @@ static struct platform_driver mostek_driver = {
.probe = mostek_probe,
.driver = {
.name = "mostek",
- .owner = THIS_MODULE,
.of_match_table = mostek_match,
},
};
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 2d91c62f7f5f..3ea267c53320 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1621,7 +1621,7 @@ static void __init kernel_physical_mapping_init(void)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index be65f035d18a..5cbc96d801ff 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -460,10 +460,12 @@ static void __init sparc_context_init(int numctx)
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
struct task_struct *tsk)
{
+ unsigned long flags;
+
if (mm->context == NO_CONTEXT) {
- spin_lock(&srmmu_context_spinlock);
+ spin_lock_irqsave(&srmmu_context_spinlock, flags);
alloc_context(old_mm, mm);
- spin_unlock(&srmmu_context_spinlock);
+ spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
}
@@ -986,14 +988,15 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
void destroy_context(struct mm_struct *mm)
{
+ unsigned long flags;
if (mm->context != NO_CONTEXT) {
flush_cache_mm(mm);
srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
flush_tlb_mm(mm);
- spin_lock(&srmmu_context_spinlock);
+ spin_lock_irqsave(&srmmu_context_spinlock, flags);
free_context(mm->context);
- spin_unlock(&srmmu_context_spinlock);
+ spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
mm->context = NO_CONTEXT;
}
}
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 91de7dd7427f..37dc9364c4a1 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -218,7 +218,6 @@ CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=y
CONFIG_VETH=m
CONFIG_NET_DSA_MV88E6060=y
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index c7702b7ab7a5..76a2781dec2c 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -337,7 +337,6 @@ CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=y
CONFIG_VETH=m
CONFIG_NET_DSA_MV88E6060=y
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index 320ff5e6e61e..6f00e9850636 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -463,6 +463,7 @@ int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
(uint64_t)ts->tv_nsec,
(uint64_t)cycles);
}
+EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
struct timespec *ts)
@@ -485,11 +486,13 @@ int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
}
return ret;
}
+EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp);
int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
{
return gxio_mpipe_adjust_timestamp_aux(context, delta);
}
+EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp);
/* Get our internal context used for link name access. This context is
* special in that it is not associated with an mPIPE service domain.
@@ -542,6 +545,7 @@ int gxio_mpipe_link_instance(const char *link_name)
return gxio_mpipe_info_instance_aux(context, name);
}
+EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance);
int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
{
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index e6462b8a6284..b4c488b65745 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -11,7 +11,6 @@ generic-y += errno.h
generic-y += exec.h
generic-y += fb.h
generic-y += fcntl.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index d372641054d9..6ef4ecab1df2 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -396,8 +396,7 @@ extern void ioport_unmap(void __iomem *addr);
static inline long ioport_panic(void)
{
#ifdef __tilegx__
- panic("PCI IO space support is disabled. Configure the kernel with"
- " CONFIG_TILE_PCI_IO to enable it");
+ panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it");
#else
panic("inb/outb and friends do not exist on tile");
#endif
@@ -406,7 +405,7 @@ static inline long ioport_panic(void)
static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
{
- pr_info("ioport_map: mapping IO resources is unsupported on tile.\n");
+ pr_info("ioport_map: mapping IO resources is unsupported on tile\n");
return NULL;
}
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 33587f16c152..5d1950788c69 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -235,9 +235,9 @@ static inline void __pte_clear(pte_t *ptep)
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
+ pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
- pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
+ pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e))
/* Return PA and protection info for a given kernel VA. */
int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index 2c8a9cd102d3..e96cec52f6d8 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -86,7 +86,7 @@ static inline int pud_huge_page(pud_t pud)
}
#define pmd_ERROR(e) \
- pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
+ pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e))
static inline void pud_clear(pud_t *pudp)
{
diff --git a/arch/tile/include/uapi/asm/ptrace.h b/arch/tile/include/uapi/asm/ptrace.h
index 7757e1985fb6..d03b829857e8 100644
--- a/arch/tile/include/uapi/asm/ptrace.h
+++ b/arch/tile/include/uapi/asm/ptrace.h
@@ -52,12 +52,16 @@ typedef uint_reg_t pt_reg_t;
* system call or exception. "struct sigcontext" has the same shape.
*/
struct pt_regs {
- /* Saved main processor registers; 56..63 are special. */
- /* tp, sp, and lr must immediately follow regs[] for aliasing. */
- pt_reg_t regs[53];
- pt_reg_t tp; /* aliases regs[TREG_TP] */
- pt_reg_t sp; /* aliases regs[TREG_SP] */
- pt_reg_t lr; /* aliases regs[TREG_LR] */
+ union {
+ /* Saved main processor registers; 56..63 are special. */
+ pt_reg_t regs[56];
+ struct {
+ pt_reg_t __regs[53];
+ pt_reg_t tp; /* aliases regs[TREG_TP] */
+ pt_reg_t sp; /* aliases regs[TREG_SP] */
+ pt_reg_t lr; /* aliases regs[TREG_LR] */
+ };
+ };
/* Saved special registers. */
pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
diff --git a/arch/tile/include/uapi/asm/sigcontext.h b/arch/tile/include/uapi/asm/sigcontext.h
index 6348e59d3724..39ff5d1a232d 100644
--- a/arch/tile/include/uapi/asm/sigcontext.h
+++ b/arch/tile/include/uapi/asm/sigcontext.h
@@ -24,10 +24,16 @@
* but is simplified since we know the fault is from userspace.
*/
struct sigcontext {
- __uint_reg_t gregs[53]; /* General-purpose registers. */
- __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
- __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
- __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
+ __extension__ union {
+ /* General-purpose registers. */
+ __uint_reg_t gregs[56];
+ __extension__ struct {
+ __uint_reg_t __gregs[53];
+ __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
+ __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
+ __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
+ };
+ };
__uint_reg_t pc; /* Program counter. */
__uint_reg_t ics; /* In Interrupt Critical Section? */
__uint_reg_t faultnum; /* Fault number. */
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index aca6000bca75..c4646bb99342 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -365,8 +365,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
* to quiesce.
*/
if (rect->teardown_in_progress) {
- pr_notice("cpu %d: detected %s hardwall violation %#lx"
- " while teardown already in progress\n",
+ pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n",
cpu, hwt->name,
(long)mfspr_XDN(hwt, DIRECTION_PROTECT));
goto done;
@@ -630,8 +629,7 @@ static void _hardwall_deactivate(struct hardwall_type *hwt,
struct thread_struct *ts = &task->thread;
if (cpumask_weight(&task->cpus_allowed) != 1) {
- pr_err("pid %d (%s) releasing %s hardwall with"
- " an affinity mask containing %d cpus!\n",
+ pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
task->pid, task->comm, hwt->name,
cpumask_weight(&task->cpus_allowed));
BUG();
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index ba85765e1436..22044fc691ef 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -107,9 +107,8 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
{
long sp = stack_pointer - (long) current_thread_info();
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- pr_emerg("tile_dev_intr: "
- "stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
+ pr_emerg("%s: stack overflow: %ld\n",
+ __func__, sp - sizeof(struct thread_info));
dump_stack();
}
}
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
index 4cd88381a83e..ff5335ae050d 100644
--- a/arch/tile/kernel/kgdb.c
+++ b/arch/tile/kernel/kgdb.c
@@ -125,9 +125,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
{
- int reg;
struct pt_regs *thread_regs;
- unsigned long *ptr = gdb_regs;
if (task == NULL)
return;
@@ -136,9 +134,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
memset(gdb_regs, 0, NUMREGBYTES);
thread_regs = task_pt_regs(task);
- for (reg = 0; reg <= TREG_LAST_GPR; reg++)
- *(ptr++) = thread_regs->regs[reg];
-
+ memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long));
gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
}
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
index 27cdcacbe81d..f8a45c51e9e4 100644
--- a/arch/tile/kernel/kprobes.c
+++ b/arch/tile/kernel/kprobes.c
@@ -90,8 +90,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return -EINVAL;
if (insn_has_control(*p->addr)) {
- pr_notice("Kprobes for control instructions are not "
- "supported\n");
+ pr_notice("Kprobes for control instructions are not supported\n");
return -EINVAL;
}
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index f0b54a934712..008aa2faef55 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -77,16 +77,13 @@ void machine_crash_shutdown(struct pt_regs *regs)
int machine_kexec_prepare(struct kimage *image)
{
if (num_online_cpus() > 1) {
- pr_warning("%s: detected attempt to kexec "
- "with num_online_cpus() > 1\n",
- __func__);
+ pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n",
+ __func__);
return -ENOSYS;
}
if (image->type != KEXEC_TYPE_DEFAULT) {
- pr_warning("%s: detected attempt to kexec "
- "with unsupported type: %d\n",
- __func__,
- image->type);
+ pr_warn("%s: detected attempt to kexec with unsupported type: %d\n",
+ __func__, image->type);
return -ENOSYS;
}
return 0;
@@ -131,8 +128,8 @@ static unsigned char *kexec_bn2cl(void *pg)
*/
csum = ip_compute_csum(pg, bhdrp->b_size);
if (csum != 0) {
- pr_warning("%s: bad checksum %#x (size %d)\n",
- __func__, csum, bhdrp->b_size);
+ pr_warn("%s: bad checksum %#x (size %d)\n",
+ __func__, csum, bhdrp->b_size);
return 0;
}
@@ -160,8 +157,7 @@ static unsigned char *kexec_bn2cl(void *pg)
while (*desc != '\0') {
desc++;
if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
- pr_info("%s: ran off end of page\n",
- __func__);
+ pr_info("%s: ran off end of page\n", __func__);
return 0;
}
}
@@ -195,20 +191,18 @@ static void kexec_find_and_set_command_line(struct kimage *image)
}
if (command_line != 0) {
- pr_info("setting new command line to \"%s\"\n",
- command_line);
+ pr_info("setting new command line to \"%s\"\n", command_line);
hverr = hv_set_command_line(
(HV_VirtAddr) command_line, strlen(command_line));
kunmap_atomic(command_line);
} else {
- pr_info("%s: no command line found; making empty\n",
- __func__);
+ pr_info("%s: no command line found; making empty\n", __func__);
hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
}
if (hverr)
- pr_warning("%s: hv_set_command_line returned error: %d\n",
- __func__, hverr);
+ pr_warn("%s: hv_set_command_line returned error: %d\n",
+ __func__, hverr);
}
/*
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index ac950be1318e..7475af3aacec 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -59,9 +59,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
{
long sp = stack_pointer - (long) current_thread_info();
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- pr_emerg("hv_message_intr: "
- "stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
+ pr_emerg("%s: stack overflow: %ld\n",
+ __func__, sp - sizeof(struct thread_info));
dump_stack();
}
}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index d19b13e3a59f..96447c9160a0 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -96,8 +96,8 @@ void module_free(struct module *mod, void *module_region)
static int validate_hw2_last(long value, struct module *me)
{
if (((value << 16) >> 16) != value) {
- pr_warning("module %s: Out of range HW2_LAST value %#lx\n",
- me->name, value);
+ pr_warn("module %s: Out of range HW2_LAST value %#lx\n",
+ me->name, value);
return 0;
}
return 1;
@@ -210,10 +210,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
value -= (unsigned long) location; /* pc-relative */
value = (long) value >> 3; /* count by instrs */
if (!validate_jumpoff(value)) {
- pr_warning("module %s: Out of range jump to"
- " %#llx at %#llx (%p)\n", me->name,
- sym->st_value + rel[i].r_addend,
- rel[i].r_offset, location);
+ pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n",
+ me->name,
+ sym->st_value + rel[i].r_addend,
+ rel[i].r_offset, location);
return -ENOEXEC;
}
MUNGE(create_JumpOff_X1);
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 1f80a88c75a6..f70c7892fa25 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -178,8 +178,8 @@ int __init tile_pci_init(void)
continue;
hv_cfg_fd1 = tile_pcie_open(i, 1);
if (hv_cfg_fd1 < 0) {
- pr_err("PCI: Couldn't open config fd to HV "
- "for controller %d\n", i);
+ pr_err("PCI: Couldn't open config fd to HV for controller %d\n",
+ i);
goto err_cont;
}
@@ -423,8 +423,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
for (i = 0; i < 6; i++) {
r = &dev->resource[i];
if (r->flags & IORESOURCE_UNSET) {
- pr_err("PCI: Device %s not available "
- "because of resource collisions\n",
+ pr_err("PCI: Device %s not available because of resource collisions\n",
pci_name(dev));
return -EINVAL;
}
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index e717af20dada..2c95f37ebbed 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -131,8 +131,7 @@ static int tile_irq_cpu(int irq)
count = cpumask_weight(&intr_cpus_map);
if (unlikely(count == 0)) {
- pr_warning("intr_cpus_map empty, interrupts will be"
- " delievered to dataplane tiles\n");
+ pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n");
return irq % (smp_height * smp_width);
}
@@ -197,16 +196,16 @@ static int tile_pcie_open(int trio_index)
/* Get the properties of the PCIe ports on this TRIO instance. */
ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
if (ret < 0) {
- pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
- " on TRIO %d\n", ret, trio_index);
+ pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
+ ret, trio_index);
goto get_port_property_failure;
}
context->mmio_base_mac =
iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
if (context->mmio_base_mac == NULL) {
- pr_err("PCI: TRIO config space mapping failure, error %d,"
- " on TRIO %d\n", ret, trio_index);
+ pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
+ ret, trio_index);
ret = -ENOMEM;
goto trio_mmio_mapping_failure;
@@ -622,9 +621,8 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
dev_control.max_read_req_sz,
mac);
if (err < 0) {
- pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
- "MAC %d on TRIO %d\n",
- mac, controller->trio_index);
+ pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
+ mac, controller->trio_index);
}
}
@@ -720,27 +718,24 @@ int __init pcibios_init(void)
reg_offset);
if (!port_status.dl_up) {
if (rc_delay[trio_index][mac]) {
- pr_info("Delaying PCIe RC TRIO init %d sec"
- " on MAC %d on TRIO %d\n",
+ pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
rc_delay[trio_index][mac], mac,
trio_index);
msleep(rc_delay[trio_index][mac] * 1000);
}
ret = gxio_trio_force_rc_link_up(trio_context, mac);
if (ret < 0)
- pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
- "MAC %d on TRIO %d\n", mac, trio_index);
+ pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
+ mac, trio_index);
}
- pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
- trio_index, controller->mac);
+ pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
+ i, trio_index, controller->mac);
/* Delay the bus probe if needed. */
if (rc_delay[trio_index][mac]) {
- pr_info("Delaying PCIe RC bus enumerating %d sec"
- " on MAC %d on TRIO %d\n",
- rc_delay[trio_index][mac], mac,
- trio_index);
+ pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
+ rc_delay[trio_index][mac], mac, trio_index);
msleep(rc_delay[trio_index][mac] * 1000);
} else {
/*
@@ -758,11 +753,10 @@ int __init pcibios_init(void)
if (pcie_ports[trio_index].ports[mac].removable) {
pr_info("PCI: link is down, MAC %d on TRIO %d\n",
mac, trio_index);
- pr_info("This is expected if no PCIe card"
- " is connected to this link\n");
+ pr_info("This is expected if no PCIe card is connected to this link\n");
} else
pr_err("PCI: link is down, MAC %d on TRIO %d\n",
- mac, trio_index);
+ mac, trio_index);
continue;
}
@@ -829,8 +823,8 @@ int __init pcibios_init(void)
/* Alloc a PIO region for PCI config access per MAC. */
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
if (ret < 0) {
- pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
- "on TRIO %d, give up\n", mac, trio_index);
+ pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
+ mac, trio_index);
continue;
}
@@ -842,8 +836,8 @@ int __init pcibios_init(void)
trio_context->pio_cfg_index[mac],
mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
if (ret < 0) {
- pr_err("PCI: PCI CFG PIO init failure for mac %d "
- "on TRIO %d, give up\n", mac, trio_index);
+ pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
+ mac, trio_index);
continue;
}
@@ -865,7 +859,7 @@ int __init pcibios_init(void)
(TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
- mac, trio_index);
+ mac, trio_index);
continue;
}
@@ -925,9 +919,8 @@ int __init pcibios_init(void)
/* Alloc a PIO region for PCI memory access for each RC port. */
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
if (ret < 0) {
- pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
- "give up\n", controller->trio_index,
- controller->mac);
+ pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
+ controller->trio_index, controller->mac);
continue;
}
@@ -944,9 +937,8 @@ int __init pcibios_init(void)
0,
0);
if (ret < 0) {
- pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
- "give up\n", controller->trio_index,
- controller->mac);
+ pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
+ controller->trio_index, controller->mac);
continue;
}
@@ -957,9 +949,8 @@ int __init pcibios_init(void)
*/
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
if (ret < 0) {
- pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, "
- "give up\n", controller->trio_index,
- controller->mac);
+ pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
+ controller->trio_index, controller->mac);
continue;
}
@@ -976,9 +967,8 @@ int __init pcibios_init(void)
0,
HV_TRIO_PIO_FLAG_IO_SPACE);
if (ret < 0) {
- pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, "
- "give up\n", controller->trio_index,
- controller->mac);
+ pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
+ controller->trio_index, controller->mac);
continue;
}
@@ -997,10 +987,9 @@ int __init pcibios_init(void)
ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
0);
if (ret < 0) {
- pr_err("PCI: Mem-Map alloc failure on TRIO %d "
- "mac %d for MC %d, give up\n",
- controller->trio_index,
- controller->mac, j);
+ pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
+ controller->trio_index, controller->mac,
+ j);
goto alloc_mem_map_failed;
}
@@ -1030,10 +1019,9 @@ int __init pcibios_init(void)
j,
GXIO_TRIO_ORDER_MODE_UNORDERED);
if (ret < 0) {
- pr_err("PCI: Mem-Map init failure on TRIO %d "
- "mac %d for MC %d, give up\n",
- controller->trio_index,
- controller->mac, j);
+ pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
+ controller->trio_index, controller->mac,
+ j);
goto alloc_mem_map_failed;
}
@@ -1510,9 +1498,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
* Most PCIe endpoint devices do support 64-bit message addressing.
*/
if (desc->msi_attrib.is_64 == 0) {
- dev_printk(KERN_INFO, &pdev->dev,
- "64-bit MSI message address not supported, "
- "falling back to legacy interrupts.\n");
+ dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
ret = -ENOMEM;
goto is_64_failure;
@@ -1549,11 +1535,8 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
/* SQ regions are out, allocate from map mem regions. */
mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
if (mem_map < 0) {
- dev_printk(KERN_INFO, &pdev->dev,
- "%s Mem-Map alloc failure. "
- "Failed to initialize MSI interrupts. "
- "Falling back to legacy interrupts.\n",
- desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
+ dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
+ desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
ret = -ENOMEM;
goto msi_mem_map_alloc_failure;
}
@@ -1580,7 +1563,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
mem_map, mem_map_base, mem_map_limit,
trio_context->asid);
if (ret < 0) {
- dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n");
+ dev_info(&pdev->dev, "HV MSI config failed\n");
goto hv_msi_config_failure;
}
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 0050cbc1d9de..48e5773dd0b7 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -52,7 +52,7 @@ static int __init idle_setup(char *str)
return -EINVAL;
if (!strcmp(str, "poll")) {
- pr_info("using polling idle threads.\n");
+ pr_info("using polling idle threads\n");
cpu_idle_poll_ctrl(true);
return 0;
} else if (!strcmp(str, "halt")) {
@@ -547,27 +547,25 @@ void show_regs(struct pt_regs *regs)
struct task_struct *tsk = validate_current();
int i;
- pr_err("\n");
if (tsk != &corrupt_current)
show_regs_print_info(KERN_ERR);
#ifdef __tilegx__
for (i = 0; i < 17; i++)
- pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
+ pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
i, regs->regs[i], i+18, regs->regs[i+18],
i+36, regs->regs[i+36]);
- pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
+ pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n",
regs->regs[17], regs->regs[35], regs->tp);
- pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
+ pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr);
#else
for (i = 0; i < 13; i++)
- pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
- " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
+ pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
i, regs->regs[i], i+14, regs->regs[i+14],
i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
- pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
+ pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n",
regs->regs[13], regs->tp, regs->sp, regs->lr);
#endif
- pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
+ pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n",
regs->pc, regs->ex1, regs->faultnum);
dump_stack_regs(regs);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 7f079bbfdf4c..864eea69556d 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -130,7 +130,7 @@ static int __init setup_maxmem(char *str)
maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
pr_info("Forcing RAM used to no more than %dMB\n",
- maxmem_pfn >> (20 - PAGE_SHIFT));
+ maxmem_pfn >> (20 - PAGE_SHIFT));
return 0;
}
early_param("maxmem", setup_maxmem);
@@ -149,7 +149,7 @@ static int __init setup_maxnodemem(char *str)
maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
(HPAGE_SHIFT - PAGE_SHIFT);
pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
- node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
+ node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
return 0;
}
early_param("maxnodemem", setup_maxnodemem);
@@ -417,8 +417,7 @@ static void __init setup_memory(void)
range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
range.size -= (range.start - start_pa);
range.size &= HPAGE_MASK;
- pr_err("Range not hugepage-aligned: %#llx..%#llx:"
- " now %#llx-%#llx\n",
+ pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
start_pa, start_pa + orig_size,
range.start, range.start + range.size);
}
@@ -437,8 +436,8 @@ static void __init setup_memory(void)
if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
int max_size = maxnodemem_pfn[i];
if (max_size > 0) {
- pr_err("Maxnodemem reduced node %d to"
- " %d pages\n", i, max_size);
+ pr_err("Maxnodemem reduced node %d to %d pages\n",
+ i, max_size);
range.size = PFN_PHYS(max_size);
} else {
pr_err("Maxnodemem disabled node %d\n", i);
@@ -490,8 +489,8 @@ static void __init setup_memory(void)
NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
if (end < pci_reserve_end_pfn + percpu_pages) {
end = pci_reserve_start_pfn;
- pr_err("PCI mapping region reduced node %d to"
- " %ld pages\n", i, end - start);
+ pr_err("PCI mapping region reduced node %d to %ld pages\n",
+ i, end - start);
}
}
#endif
@@ -555,10 +554,9 @@ static void __init setup_memory(void)
MAXMEM_PFN : mappable_physpages;
highmem_pages = (long) (physpages - lowmem_pages);
- pr_notice("%ldMB HIGHMEM available.\n",
- pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
- pr_notice("%ldMB LOWMEM available.\n",
- pages_to_mb(lowmem_pages));
+ pr_notice("%ldMB HIGHMEM available\n",
+ pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
+ pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
#else
/* Set max_low_pfn based on what node 0 can directly address. */
max_low_pfn = node_end_pfn[0];
@@ -571,8 +569,8 @@ static void __init setup_memory(void)
max_pfn = MAXMEM_PFN;
node_end_pfn[0] = MAXMEM_PFN;
} else {
- pr_notice("%ldMB memory available.\n",
- pages_to_mb(node_end_pfn[0]));
+ pr_notice("%ldMB memory available\n",
+ pages_to_mb(node_end_pfn[0]));
}
for (i = 1; i < MAX_NUMNODES; ++i) {
node_start_pfn[i] = 0;
@@ -587,8 +585,7 @@ static void __init setup_memory(void)
if (pages)
high_memory = pfn_to_kaddr(node_end_pfn[i]);
}
- pr_notice("%ldMB memory available.\n",
- pages_to_mb(lowmem_pages));
+ pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
#endif
#endif
}
@@ -1535,8 +1532,7 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
BUG_ON(pgd_addr_invalid(addr));
if (addr < VMALLOC_START || addr >= VMALLOC_END)
- panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
- " try increasing CONFIG_VMALLOC_RESERVE\n",
+ panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
addr, VMALLOC_START, VMALLOC_END);
pgd = swapper_pg_dir + pgd_index(addr);
@@ -1591,8 +1587,8 @@ void __init setup_per_cpu_areas(void)
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
ptep = virt_to_kpte(lowmem_va);
if (pte_huge(*ptep)) {
- printk(KERN_DEBUG "early shatter of huge page"
- " at %#lx\n", lowmem_va);
+ printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
+ lowmem_va);
shatter_pmd((pmd_t *)ptep);
ptep = virt_to_kpte(lowmem_va);
BUG_ON(pte_huge(*ptep));
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 7c2fecc52177..bb0a9ce7ae23 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -45,8 +45,7 @@
int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
- int err = 0;
- int i;
+ int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -57,9 +56,7 @@ int restore_sigcontext(struct pt_regs *regs,
*/
BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
-
- for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __get_user(regs->regs[i], &sc->gregs[i]);
+ err = __copy_from_user(regs, sc, sizeof(*regs));
/* Ensure that the PL is always set to USER_PL. */
regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
@@ -110,12 +107,7 @@ badframe:
int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
- int i, err = 0;
-
- for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __put_user(regs->regs[i], &sc->gregs[i]);
-
- return err;
+ return __copy_to_user(sc, regs, sizeof(*regs));
}
/*
@@ -345,7 +337,6 @@ static void dump_mem(void __user *address)
int i, j, k;
int found_readable_mem = 0;
- pr_err("\n");
if (!access_ok(VERIFY_READ, address, 1)) {
pr_err("Not dumping at address 0x%lx (kernel address)\n",
(unsigned long)address);
@@ -367,7 +358,7 @@ static void dump_mem(void __user *address)
(unsigned long)address);
found_readable_mem = 1;
}
- j = sprintf(line, REGFMT":", (unsigned long)addr);
+ j = sprintf(line, REGFMT ":", (unsigned long)addr);
for (k = 0; k < bytes_per_line; ++k)
j += sprintf(&line[j], " %02x", buf[k]);
pr_err("%s\n", line);
@@ -411,8 +402,7 @@ void trace_unhandled_signal(const char *type, struct pt_regs *regs,
case SIGFPE:
case SIGSEGV:
case SIGBUS:
- pr_err("User crash: signal %d,"
- " trap %ld, address 0x%lx\n",
+ pr_err("User crash: signal %d, trap %ld, address 0x%lx\n",
sig, regs->faultnum, address);
show_regs(regs);
dump_mem((void __user *)address);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 6cb2ce31b5a2..862973074bf9 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -222,11 +222,9 @@ static tilepro_bundle_bits rewrite_load_store_unaligned(
}
if (unaligned_printk || unaligned_fixup_count == 0) {
- pr_info("Process %d/%s: PC %#lx: Fixup of"
- " unaligned %s at %#lx.\n",
+ pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
current->pid, current->comm, regs->pc,
- (mem_op == MEMOP_LOAD ||
- mem_op == MEMOP_LOAD_POSTINCR) ?
+ mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
"load" : "store",
(unsigned long)addr);
if (!unaligned_printk) {
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 0d59a1b60c74..20d52a98e171 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -127,8 +127,7 @@ static __init int reset_init_affinity(void)
{
long rc = sched_setaffinity(current->pid, &init_affinity);
if (rc != 0)
- pr_warning("couldn't reset init affinity (%ld)\n",
- rc);
+ pr_warn("couldn't reset init affinity (%ld)\n", rc);
return 0;
}
late_initcall(reset_init_affinity);
@@ -174,7 +173,7 @@ static void start_secondary(void)
/* Indicate that we're ready to come up. */
/* Must not do this before we're ready to receive messages */
if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
- pr_warning("CPU#%d already started!\n", cpuid);
+ pr_warn("CPU#%d already started!\n", cpuid);
for (;;)
local_irq_enable();
}
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index c93977a62116..7ff5afdbd3aa 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -387,9 +387,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
* then bust_spinlocks() spit out a space in front of us
* and it will mess up our KERN_ERR.
*/
- pr_err("\n");
- pr_err("Starting stack dump of tid %d, pid %d (%s)"
- " on cpu %d at cycle %lld\n",
+ pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
kbt->task->pid, kbt->task->tgid, kbt->task->comm,
raw_smp_processor_id(), get_cycles());
}
@@ -411,8 +409,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
i++, address, namebuf, (unsigned long)(kbt->it.sp));
if (i >= 100) {
- pr_err("Stack dump truncated"
- " (%d frames)\n", i);
+ pr_err("Stack dump truncated (%d frames)\n", i);
break;
}
}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index b854a1cd0079..d412b0856c0a 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -98,8 +98,8 @@ void __init calibrate_delay(void)
{
loops_per_jiffy = get_clock_rate() / HZ;
pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n",
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
}
/* Called fairly late in init/main.c, but before we go smp. */
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 86900ccd4977..bf841ca517bb 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -46,9 +46,9 @@ static int __init setup_unaligned_fixup(char *str)
return 0;
pr_info("Fixups for unaligned data accesses are %s\n",
- unaligned_fixup >= 0 ?
- (unaligned_fixup ? "enabled" : "disabled") :
- "completely disabled");
+ unaligned_fixup >= 0 ?
+ (unaligned_fixup ? "enabled" : "disabled") :
+ "completely disabled");
return 1;
}
__setup("unaligned_fixup=", setup_unaligned_fixup);
@@ -305,8 +305,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
case INT_ILL:
if (copy_from_user(&instr, (void __user *)regs->pc,
sizeof(instr))) {
- pr_err("Unreadable instruction for INT_ILL:"
- " %#lx\n", regs->pc);
+ pr_err("Unreadable instruction for INT_ILL: %#lx\n",
+ regs->pc);
do_exit(SIGKILL);
return;
}
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index c02ea2a45f67..7d9a83be0aca 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -969,8 +969,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
unaligned_fixup_count++;
if (unaligned_printk) {
- pr_info("%s/%d. Unalign fixup for kernel access "
- "to userspace %lx.",
+ pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
current->comm, current->pid, regs->regs[ra]);
}
@@ -985,7 +984,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
.si_addr = (unsigned char __user *)0
};
if (unaligned_printk)
- pr_info("Unalign bundle: unexp @%llx, %llx",
+ pr_info("Unalign bundle: unexp @%llx, %llx\n",
(unsigned long long)regs->pc,
(unsigned long long)bundle);
@@ -1370,8 +1369,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
frag.bundle = bundle;
if (unaligned_printk) {
- pr_info("%s/%d, Unalign fixup: pc=%lx "
- "bundle=%lx %d %d %d %d %d %d %d %d.",
+ pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n",
current->comm, current->pid,
(unsigned long)frag.pc,
(unsigned long)frag.bundle,
@@ -1380,8 +1378,8 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
(int)y1_lr, (int)y1_br, (int)x1_add);
for (k = 0; k < n; k += 2)
- pr_info("[%d] %016llx %016llx", k,
- (unsigned long long)frag.insn[k],
+ pr_info("[%d] %016llx %016llx\n",
+ k, (unsigned long long)frag.insn[k],
(unsigned long long)frag.insn[k+1]);
}
@@ -1402,7 +1400,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
.si_addr = (void __user *)&jit_code_area[idx]
};
- pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx",
+ pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
current->pid, current->comm,
(unsigned long long)&jit_code_area[idx]);
@@ -1485,7 +1483,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
/* If exception came from kernel, try fix it up. */
if (fixup_exception(regs)) {
if (unaligned_printk)
- pr_info("Unalign fixup: %d %llx @%llx",
+ pr_info("Unalign fixup: %d %llx @%llx\n",
(int)unaligned_fixup,
(unsigned long long)regs->ex1,
(unsigned long long)regs->pc);
@@ -1519,7 +1517,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
};
if (unaligned_printk)
- pr_info("Unalign fixup: %d %llx @%llx",
+ pr_info("Unalign fixup: %d %llx @%llx\n",
(int)unaligned_fixup,
(unsigned long long)regs->ex1,
(unsigned long long)regs->pc);
@@ -1579,14 +1577,14 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
0);
if (IS_ERR((void __force *)user_page)) {
- pr_err("Out of kernel pages trying do_mmap.\n");
+ pr_err("Out of kernel pages trying do_mmap\n");
return;
}
/* Save the address in the thread_info struct */
info->unalign_jit_base = user_page;
if (unaligned_printk)
- pr_info("Unalign bundle: %d:%d, allocate page @%llx",
+ pr_info("Unalign bundle: %d:%d, allocate page @%llx\n",
raw_smp_processor_id(), current->pid,
(unsigned long long)user_page);
}
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 6c0571216a9d..565e25a98334 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -169,8 +169,7 @@ static void wait_for_migration(pte_t *pte)
while (pte_migrating(*pte)) {
barrier();
if (++retries > bound)
- panic("Hit migrating PTE (%#llx) and"
- " page PFN %#lx still migrating",
+ panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating",
pte->val, pte_pfn(*pte));
}
}
@@ -292,11 +291,10 @@ static int handle_page_fault(struct pt_regs *regs,
*/
stack_offset = stack_pointer & (THREAD_SIZE-1);
if (stack_offset < THREAD_SIZE / 8) {
- pr_alert("Potential stack overrun: sp %#lx\n",
- stack_pointer);
+ pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer);
show_regs(regs);
pr_alert("Killing current process %d/%s\n",
- tsk->pid, tsk->comm);
+ tsk->pid, tsk->comm);
do_group_exit(SIGKILL);
}
@@ -421,7 +419,7 @@ good_area:
} else if (write) {
#ifdef TEST_VERIFY_AREA
if (!is_page_fault && regs->cs == KERNEL_CS)
- pr_err("WP fault at "REGFMT"\n", regs->eip);
+ pr_err("WP fault at " REGFMT "\n", regs->eip);
#endif
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
@@ -519,16 +517,15 @@ no_context:
pte_t *pte = lookup_address(address);
if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
- pr_crit("kernel tried to execute"
- " non-executable page - exploit attempt?"
- " (uid: %d)\n", current->uid);
+ pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n",
+ current->uid);
}
#endif
if (address < PAGE_SIZE)
pr_alert("Unable to handle kernel NULL pointer dereference\n");
else
pr_alert("Unable to handle kernel paging request\n");
- pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
+ pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n",
address, regs->pc);
show_regs(regs);
@@ -575,9 +572,10 @@ do_sigbus:
#ifndef __tilegx__
/* We must release ICS before panicking or we won't get anywhere. */
-#define ics_panic(fmt, ...) do { \
- __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
- panic(fmt, __VA_ARGS__); \
+#define ics_panic(fmt, ...) \
+do { \
+ __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
+ panic(fmt, ##__VA_ARGS__); \
} while (0)
/*
@@ -615,8 +613,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
fault_num != INT_DTLB_ACCESS)) {
unsigned long old_pc = regs->pc;
regs->pc = pc;
- ics_panic("Bad ICS page fault args:"
- " old PC %#lx, fault %d/%d at %#lx\n",
+ ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx",
old_pc, fault_num, write, address);
}
@@ -669,8 +666,8 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
#endif
fixup = search_exception_tables(pc);
if (!fixup)
- ics_panic("ICS atomic fault not in table:"
- " PC %#lx, fault %d", pc, fault_num);
+ ics_panic("ICS atomic fault not in table: PC %#lx, fault %d",
+ pc, fault_num);
regs->pc = fixup->fixup;
regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
}
@@ -826,8 +823,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
set_thread_flag(TIF_ASYNC_TLB);
if (async->fault_num != 0) {
- panic("Second async fault %d;"
- " old fault was %d (%#lx/%ld)",
+ panic("Second async fault %d; old fault was %d (%#lx/%ld)",
fault_num, async->fault_num,
address, write);
}
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 33294fdc402e..cd3387370ebb 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -152,12 +152,10 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
- pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
- " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
+ pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
cache_pa, cache_control, cache_cpumask, cache_buf,
(unsigned long)tlb_va, tlb_length, tlb_pgsize,
- tlb_cpumask, tlb_buf,
- asids, asidcount, rc);
+ tlb_cpumask, tlb_buf, asids, asidcount, rc);
panic("Unsafe to continue.");
}
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index e514899e1100..3270e0019266 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -284,22 +284,21 @@ static __init int __setup_hugepagesz(unsigned long ps)
int level, base_shift;
if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
- pr_warn("Not enabling %ld byte huge pages;"
- " must be a power of four.\n", ps);
+ pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
+ ps);
return -EINVAL;
}
if (ps > 64*1024*1024*1024UL) {
- pr_warn("Not enabling %ld MB huge pages;"
- " largest legal value is 64 GB .\n", ps >> 20);
+ pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
+ ps >> 20);
return -EINVAL;
} else if (ps >= PUD_SIZE) {
static long hv_jpage_size;
if (hv_jpage_size == 0)
hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
if (hv_jpage_size != PUD_SIZE) {
- pr_warn("Not enabling >= %ld MB huge pages:"
- " hypervisor reports size %ld\n",
+ pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
PUD_SIZE >> 20, hv_jpage_size);
return -EINVAL;
}
@@ -320,14 +319,13 @@ static __init int __setup_hugepagesz(unsigned long ps)
int shift_val = log_ps - base_shift;
if (huge_shift[level] != 0) {
int old_shift = base_shift + huge_shift[level];
- pr_warn("Not enabling %ld MB huge pages;"
- " already have size %ld MB.\n",
+ pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
ps >> 20, (1UL << old_shift) >> 20);
return -EINVAL;
}
if (hv_set_pte_super_shift(level, shift_val) != 0) {
- pr_warn("Not enabling %ld MB huge pages;"
- " no hypervisor support.\n", ps >> 20);
+ pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
+ ps >> 20);
return -EINVAL;
}
printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index caa270165f86..be240cc4978d 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -357,11 +357,11 @@ static int __init setup_ktext(char *str)
cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
if (cpumask_weight(&ktext_mask) > 1) {
ktext_small = 1;
- pr_info("ktext: using caching neighborhood %s "
- "with small pages\n", buf);
+ pr_info("ktext: using caching neighborhood %s with small pages\n",
+ buf);
} else {
pr_info("ktext: caching on cpu %s with one huge page\n",
- buf);
+ buf);
}
}
@@ -413,19 +413,16 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
int rc, i;
if (ktext_arg_seen && ktext_hash) {
- pr_warning("warning: \"ktext\" boot argument ignored"
- " if \"kcache_hash\" sets up text hash-for-home\n");
+ pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n");
ktext_small = 0;
}
if (kdata_arg_seen && kdata_hash) {
- pr_warning("warning: \"kdata\" boot argument ignored"
- " if \"kcache_hash\" sets up data hash-for-home\n");
+ pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n");
}
if (kdata_huge && !hash_default) {
- pr_warning("warning: disabling \"kdata=huge\"; requires"
- " kcache_hash=all or =allbutstack\n");
+ pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n");
kdata_huge = 0;
}
@@ -470,8 +467,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
pte[pte_ofs] = pfn_pte(pfn, prot);
} else {
if (kdata_huge)
- printk(KERN_DEBUG "pre-shattered huge"
- " page at %#lx\n", address);
+ printk(KERN_DEBUG "pre-shattered huge page at %#lx\n",
+ address);
for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
pfn++, pte_ofs++, address += PAGE_SIZE) {
pgprot_t prot = init_pgprot(address);
@@ -501,8 +498,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
pr_info("ktext: not using unavailable cpus %s\n", buf);
}
if (cpumask_empty(&ktext_mask)) {
- pr_warning("ktext: no valid cpus; caching on %d.\n",
- smp_processor_id());
+ pr_warn("ktext: no valid cpus; caching on %d\n",
+ smp_processor_id());
cpumask_copy(&ktext_mask,
cpumask_of(smp_processor_id()));
}
@@ -798,11 +795,9 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
/* check that fixmap and pkmap do not overlap */
if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
- pr_err("fixmap and kmap areas overlap"
- " - this will crash\n");
+ pr_err("fixmap and kmap areas overlap - this will crash\n");
pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
- FIXADDR_START);
+ PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START);
BUG();
}
#endif
@@ -926,8 +921,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
unsigned long addr = (unsigned long) begin;
if (kdata_huge && !initfree) {
- pr_warning("Warning: ignoring initfree=0:"
- " incompatible with kdata=huge\n");
+ pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n");
initfree = 1;
}
end = (end + PAGE_SIZE - 1) & PAGE_MASK;
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 5e86eac4bfae..7bf2491a9c1f 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -44,9 +44,7 @@ void show_mem(unsigned int filter)
{
struct zone *zone;
- pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
- " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
- " pagecache:%lu swap:%lu\n",
+ pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
(global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE)),
(global_page_state(NR_INACTIVE_ANON) +
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 8035145f043b..62087028a9ce 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -632,6 +632,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
int fd = winch->fd;
int err;
char c;
+ struct pid *pgrp;
if (fd != -1) {
err = generic_read(fd, &c, NULL);
@@ -657,7 +658,10 @@ static irqreturn_t winch_interrupt(int irq, void *data)
if (line != NULL) {
chan_window_size(line, &tty->winsize.ws_row,
&tty->winsize.ws_col);
- kill_pgrp(tty->pgrp, SIGWINCH, 1);
+ pgrp = tty_get_pgrp(tty);
+ if (pgrp)
+ kill_pgrp(pgrp, SIGWINCH, 1);
+ put_pid(pgrp);
}
tty_kref_put(tty);
}
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 244b12c8cb39..9176fa11d49b 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -10,7 +10,6 @@ generic-y += exec.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += io.h
generic-y += irq_regs.h
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 5a2bb53faa42..3e0c19d0f4c5 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += fcntl.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bea3a0159496..d69f1cd87fd9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -24,6 +24,7 @@ config X86
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_FAST_MULTIPLIER
+ select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select HAVE_AOUT if X86_32
diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
index aafe8ce0d65d..e26984f7ab8d 100644
--- a/arch/x86/crypto/aes_glue.c
+++ b/arch/x86/crypto/aes_glue.c
@@ -66,5 +66,5 @@ module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
-MODULE_ALIAS("aes-asm");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-asm");
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 888950f29fd9..ae855f4f64b7 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -43,10 +43,6 @@
#include <asm/crypto/glue_helper.h>
#endif
-#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
-#define HAS_PCBC
-#endif
-
/* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location.
* This needs to be 16 byte aligned.
@@ -547,7 +543,7 @@ static int ablk_ctr_init(struct crypto_tfm *tfm)
#endif
-#ifdef HAS_PCBC
+#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
static int ablk_pcbc_init(struct crypto_tfm *tfm)
{
return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
@@ -1377,7 +1373,7 @@ static struct crypto_alg aesni_algs[] = { {
},
},
#endif
-#ifdef HAS_PCBC
+#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
}, {
.cra_name = "pcbc(aes)",
.cra_driver_name = "pcbc-aes-aesni",
@@ -1550,4 +1546,4 @@ module_exit(aesni_exit);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index 8af519ed73d1..17c05531dfd1 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -478,5 +478,5 @@ module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
-MODULE_ALIAS("blowfish");
-MODULE_ALIAS("blowfish-asm");
+MODULE_ALIAS_CRYPTO("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish-asm");
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 4209a76fcdaa..9a07fafe3831 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 87a041a10f4a..ed38d959add6 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index c171dcbf192d..5c8b6266a394 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1725,5 +1725,5 @@ module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index e57e20ab5e0b..60ada677a928 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -491,4 +491,4 @@ module_exit(cast5_exit);
MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("cast5");
+MODULE_ALIAS_CRYPTO("cast5");
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 09f3677393e4..0160f68a57ff 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -611,4 +611,4 @@ module_exit(cast6_exit);
MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("cast6");
+MODULE_ALIAS_CRYPTO("cast6");
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 9d014a74ef96..1937fc1d8763 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("crc32");
-MODULE_ALIAS("crc32-pclmul");
+MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32-pclmul");
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 6812ad98355c..28640c3d6af7 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.c
MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("crc32c");
-MODULE_ALIAS("crc32c-intel");
+MODULE_ALIAS_CRYPTO("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c-intel");
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index 7845d7fd54c0..b6c67bf30fdf 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -147,5 +147,5 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("crct10dif");
-MODULE_ALIAS("crct10dif-pclmul");
+MODULE_ALIAS_CRYPTO("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 0e9c0668fe4e..38a14f818ef1 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -502,8 +502,8 @@ module_exit(des3_ede_x86_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
-MODULE_ALIAS("des3_ede");
-MODULE_ALIAS("des3_ede-asm");
-MODULE_ALIAS("des");
-MODULE_ALIAS("des-asm");
+MODULE_ALIAS_CRYPTO("des3_ede");
+MODULE_ALIAS_CRYPTO("des3_ede-asm");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des-asm");
MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index 98d7a188f46b..f368ba261739 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/crypto.h>
#include <asm/i387.h>
struct crypto_fpu_ctx {
@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
{
crypto_unregister_template(&crypto_fpu_tmpl);
}
+
+MODULE_ALIAS_CRYPTO("fpu");
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 88bb7ba8b175..8253d85aa165 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
"acclerated by PCLMULQDQ-NI");
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index 5e8e67739bb5..399a29d067d6 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -119,5 +119,5 @@ module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
-MODULE_ALIAS("salsa20");
-MODULE_ALIAS("salsa20-asm");
+MODULE_ALIAS_CRYPTO("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20-asm");
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 2fae489b1524..437e47a4d302 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -558,5 +558,5 @@ module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
-MODULE_ALIAS("serpent");
-MODULE_ALIAS("serpent-asm");
+MODULE_ALIAS_CRYPTO("serpent");
+MODULE_ALIAS_CRYPTO("serpent-asm");
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index ff4870870972..7e217398b4eb 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -617,4 +617,4 @@ module_exit(serpent_exit);
MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("serpent");
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 8c95f8637306..bf025adaea01 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("serpent");
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index 99eefd812958..a225a5ca1037 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -204,8 +204,7 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
continue;
}
- if (ctx)
- ctx->status = HASH_CTX_STS_IDLE;
+ ctx->status = HASH_CTX_STS_IDLE;
return ctx;
}
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 74d16ef707c7..6c20fe04a738 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -278,4 +278,4 @@ module_exit(sha1_ssse3_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index f248546da1ca..8fad72f4dfd2 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -211,7 +211,7 @@ static int sha224_ssse3_final(struct shash_desc *desc, u8 *hash)
sha256_ssse3_final(desc, D);
memcpy(hash, D, SHA224_DIGEST_SIZE);
- memset(D, 0, SHA256_DIGEST_SIZE);
+ memzero_explicit(D, SHA256_DIGEST_SIZE);
return 0;
}
@@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
-MODULE_ALIAS("sha256");
-MODULE_ALIAS("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 8626b03e83b7..0b6af26832bf 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -219,7 +219,7 @@ static int sha384_ssse3_final(struct shash_desc *desc, u8 *hash)
sha512_ssse3_final(desc, D);
memcpy(hash, D, SHA384_DIGEST_SIZE);
- memset(D, 0, SHA512_DIGEST_SIZE);
+ memzero_explicit(D, SHA512_DIGEST_SIZE);
return 0;
}
@@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
-MODULE_ALIAS("sha512");
-MODULE_ALIAS("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 4e3c665be129..1ac531ea9bcc 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -579,4 +579,4 @@ module_exit(twofish_exit);
MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("twofish");
+MODULE_ALIAS_CRYPTO("twofish");
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index 0a5202303501..77e06c2da83d 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -96,5 +96,5 @@ module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
-MODULE_ALIAS("twofish");
-MODULE_ALIAS("twofish-asm");
+MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-asm");
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 13e63b3e1dfb..56d8a08ee479 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -495,5 +495,5 @@ module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
-MODULE_ALIAS("twofish");
-MODULE_ALIAS("twofish-asm");
+MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-asm");
diff --git a/arch/x86/ia32/audit.c b/arch/x86/ia32/audit.c
index 5d7b381da692..2eccc8932ae6 100644
--- a/arch/x86/ia32/audit.c
+++ b/arch/x86/ia32/audit.c
@@ -35,6 +35,7 @@ int ia32_classify_syscall(unsigned syscall)
case __NR_socketcall:
return 4;
case __NR_execve:
+ case __NR_execveat:
return 5;
default:
return 1;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index ffe71228fc10..82e8a1d44658 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -480,6 +480,7 @@ GLOBAL(\label)
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
PTREGSCALL stub32_sigreturn, sys32_sigreturn
PTREGSCALL stub32_execve, compat_sys_execve
+ PTREGSCALL stub32_execveat, compat_sys_execveat
PTREGSCALL stub32_fork, sys_fork
PTREGSCALL stub32_vfork, sys_vfork
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 0f4460b5636d..2ab1eb33106e 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -24,78 +24,28 @@
#define wmb() asm volatile("sfence" ::: "memory")
#endif
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- **/
-
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
-# define smp_rmb() rmb()
+#define dma_rmb() rmb()
#else
-# define smp_rmb() barrier()
+#define dma_rmb() barrier()
#endif
+#define dma_wmb() barrier()
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
-#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* SMP */
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
#if defined(CONFIG_X86_PPRO_FENCE)
/*
diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h
index 0bdb0c54d9a1..fe884e18fa6e 100644
--- a/arch/x86/include/asm/dma.h
+++ b/arch/x86/include/asm/dma.h
@@ -70,7 +70,7 @@
#define MAX_DMA_CHANNELS 8
/* 16MB ISA DMA zone */
-#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT)
+#define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT)
/* 4GB broken PCI/AGP hardware bus master zone */
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h
deleted file mode 100644
index e8c58f88b1d4..000000000000
--- a/arch/x86/include/asm/hash.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _ASM_X86_HASH_H
-#define _ASM_X86_HASH_H
-
-struct fast_hash_ops;
-extern void setup_arch_fast_hash(struct fast_hash_ops *ops);
-
-#endif /* _ASM_X86_HASH_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ed0c30d6a0c..d89c6b828c96 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -33,7 +33,7 @@
#define KVM_MAX_VCPUS 255
#define KVM_SOFT_MAX_VCPUS 160
-#define KVM_USER_MEM_SLOTS 125
+#define KVM_USER_MEM_SLOTS 509
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
@@ -51,6 +51,7 @@
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
+#define CR3_PCID_INVD (1UL << 63)
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
@@ -361,6 +362,7 @@ struct kvm_vcpu_arch {
int mp_state;
u64 ia32_misc_enable_msr;
bool tpr_access_reporting;
+ u64 ia32_xss;
/*
* Paging state of the vcpu
@@ -542,7 +544,7 @@ struct kvm_apic_map {
struct rcu_head rcu;
u8 ldr_bits;
/* fields bellow are used to decode ldr values in different modes */
- u32 cid_shift, cid_mask, lid_mask;
+ u32 cid_shift, cid_mask, lid_mask, broadcast;
struct kvm_lapic *phys_map[256];
/* first index is cluster id second is cpu id in a cluster */
struct kvm_lapic *logical_map[16][16];
@@ -602,6 +604,9 @@ struct kvm_arch {
struct kvm_xen_hvm_config xen_hvm_config;
+ /* reads protected by irq_srcu, writes by irq_lock */
+ struct hlist_head mask_notifier_list;
+
/* fields used by HYPER-V emulation */
u64 hv_guest_os_id;
u64 hv_hypercall;
@@ -659,6 +664,16 @@ struct msr_data {
u64 data;
};
+struct kvm_lapic_irq {
+ u32 vector;
+ u32 delivery_mode;
+ u32 dest_mode;
+ u32 level;
+ u32 trig_mode;
+ u32 shorthand;
+ u32 dest_id;
+};
+
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
@@ -767,6 +782,7 @@ struct kvm_x86_ops {
enum x86_intercept_stage stage);
void (*handle_external_intr)(struct kvm_vcpu *vcpu);
bool (*mpx_supported)(void);
+ bool (*xsaves_supported)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
@@ -818,6 +834,19 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
+struct kvm_irq_mask_notifier {
+ void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
+ int irq;
+ struct hlist_node link;
+};
+
+void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ bool mask);
+
extern bool tdp_enabled;
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
@@ -863,7 +892,7 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
-void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector);
+void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
int reason, bool has_error_code, u32 error_code);
@@ -895,6 +924,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t gfn, void *data, int offset, int len,
u32 access);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
+bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
static inline int __kvm_irq_line_state(unsigned long *irq_state,
int irq_source_id, int level)
@@ -1066,6 +1096,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index af447f95e3be..25bcd4a89517 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -452,6 +452,7 @@ static inline void update_page_count(int level, unsigned long pages) { }
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level);
+extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags);
diff --git a/arch/x86/include/asm/platform_sst_audio.h b/arch/x86/include/asm/platform_sst_audio.h
index 0a4e140315b6..7249e6d0902d 100644
--- a/arch/x86/include/asm/platform_sst_audio.h
+++ b/arch/x86/include/asm/platform_sst_audio.h
@@ -16,6 +16,9 @@
#include <linux/sfi.h>
+#define MAX_NUM_STREAMS_MRFLD 25
+#define MAX_NUM_STREAMS MAX_NUM_STREAMS_MRFLD
+
enum sst_audio_task_id_mrfld {
SST_TASK_ID_NONE = 0,
SST_TASK_ID_SBA = 1,
@@ -73,6 +76,65 @@ struct sst_platform_data {
unsigned int strm_map_size;
};
+struct sst_info {
+ u32 iram_start;
+ u32 iram_end;
+ bool iram_use;
+ u32 dram_start;
+ u32 dram_end;
+ bool dram_use;
+ u32 imr_start;
+ u32 imr_end;
+ bool imr_use;
+ u32 mailbox_start;
+ bool use_elf;
+ bool lpe_viewpt_rqd;
+ unsigned int max_streams;
+ u32 dma_max_len;
+ u8 num_probes;
+};
+
+struct sst_lib_dnld_info {
+ unsigned int mod_base;
+ unsigned int mod_end;
+ unsigned int mod_table_offset;
+ unsigned int mod_table_size;
+ bool mod_ddr_dnld;
+};
+
+struct sst_res_info {
+ unsigned int shim_offset;
+ unsigned int shim_size;
+ unsigned int shim_phy_addr;
+ unsigned int ssp0_offset;
+ unsigned int ssp0_size;
+ unsigned int dma0_offset;
+ unsigned int dma0_size;
+ unsigned int dma1_offset;
+ unsigned int dma1_size;
+ unsigned int iram_offset;
+ unsigned int iram_size;
+ unsigned int dram_offset;
+ unsigned int dram_size;
+ unsigned int mbox_offset;
+ unsigned int mbox_size;
+ unsigned int acpi_lpe_res_index;
+ unsigned int acpi_ddr_index;
+ unsigned int acpi_ipc_irq_index;
+};
+
+struct sst_ipc_info {
+ int ipc_offset;
+ unsigned int mbox_recv_off;
+};
+
+struct sst_platform_info {
+ const struct sst_info *probe_data;
+ const struct sst_ipc_info *ipc_info;
+ const struct sst_res_info *res_info;
+ const struct sst_lib_dnld_info *lib_info;
+ const char *platform;
+};
int add_sst_platform_device(void);
#endif
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 6f1c3a8a33ab..db257a58571f 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -23,6 +23,15 @@
#define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2)
#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8)
+#define SEGMENT_RPL_MASK 0x3 /*
+ * Bottom two bits of selector give the ring
+ * privilege level
+ */
+#define SEGMENT_TI_MASK 0x4 /* Bit 2 is table indicator (LDT/GDT) */
+#define USER_RPL 0x3 /* User mode is privilege level 3 */
+#define SEGMENT_LDT 0x4 /* LDT segment has TI set... */
+#define SEGMENT_GDT 0x0 /* ... GDT has it cleared */
+
#ifdef CONFIG_X86_32
/*
* The layout of the per-CPU GDT under Linux:
@@ -125,16 +134,6 @@
#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
-/* Bottom two bits of selector give the ring privilege level */
-#define SEGMENT_RPL_MASK 0x3
-/* Bit 2 is table indicator (LDT/GDT) */
-#define SEGMENT_TI_MASK 0x4
-
-/* User mode is privilege level 3 */
-#define USER_RPL 0x3
-/* LDT segment has TI set, GDT has it cleared */
-#define SEGMENT_LDT 0x4
-#define SEGMENT_GDT 0x0
/*
* Matching rules for certain types of segments.
@@ -192,17 +191,6 @@
#define get_kernel_rpl() 0
#endif
-/* User mode is privilege level 3 */
-#define USER_RPL 0x3
-/* LDT segment has TI set, GDT has it cleared */
-#define SEGMENT_LDT 0x4
-#define SEGMENT_GDT 0x0
-
-/* Bottom two bits of selector give the ring privilege level */
-#define SEGMENT_RPL_MASK 0x3
-/* Bit 2 is table indicator (LDT/GDT) */
-#define SEGMENT_TI_MASK 0x4
-
#define IDT_ENTRIES 256
#define NUM_EXCEPTION_VECTORS 32
/* Bitmask of exception vectors which push an error code on the stack */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index bcbfade26d8d..45afaee9555c 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -69,6 +69,7 @@
#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
+#define SECONDARY_EXEC_XSAVES 0x00100000
#define PIN_BASED_EXT_INTR_MASK 0x00000001
@@ -159,6 +160,8 @@ enum vmcs_field {
EOI_EXIT_BITMAP3_HIGH = 0x00002023,
VMREAD_BITMAP = 0x00002026,
VMWRITE_BITMAP = 0x00002028,
+ XSS_EXIT_BITMAP = 0x0000202C,
+ XSS_EXIT_BITMAP_HIGH = 0x0000202D,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
VMCS_LINK_POINTER = 0x00002800,
diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h
new file mode 100644
index 000000000000..0d809e9fc975
--- /dev/null
+++ b/arch/x86/include/asm/xen/cpuid.h
@@ -0,0 +1,91 @@
+/******************************************************************************
+ * arch-x86/cpuid.h
+ *
+ * CPUID interface to Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007 Citrix Systems, Inc.
+ *
+ * Authors:
+ * Keir Fraser <keir@xen.org>
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
+#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
+
+/*
+ * For compatibility with other hypervisor interfaces, the Xen cpuid leaves
+ * can be found at the first otherwise unused 0x100 aligned boundary starting
+ * from 0x40000000.
+ *
+ * e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid
+ * leaves will start at 0x40000100
+ */
+
+#define XEN_CPUID_FIRST_LEAF 0x40000000
+#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
+
+/*
+ * Leaf 1 (0x40000x00)
+ * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
+ * are supported by the Xen host.
+ * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
+ * of a Xen host.
+ */
+#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
+#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
+#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
+
+/*
+ * Leaf 2 (0x40000x01)
+ * EAX[31:16]: Xen major version.
+ * EAX[15: 0]: Xen minor version.
+ * EBX-EDX: Reserved (currently all zeroes).
+ */
+
+/*
+ * Leaf 3 (0x40000x02)
+ * EAX: Number of hypercall transfer pages. This register is always guaranteed
+ * to specify one hypercall page.
+ * EBX: Base address of Xen-specific MSRs.
+ * ECX: Features 1. Unused bits are set to zero.
+ * EDX: Features 2. Unused bits are set to zero.
+ */
+
+/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
+#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
+#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
+
+/*
+ * Leaf 5 (0x40000x04)
+ * HVM-specific features
+ */
+
+/* EAX Features */
+/* Virtualized APIC registers */
+#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0)
+/* Virtualized x2APIC accesses */
+#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1)
+/* Memory mapped from other domains has valid IOMMU entries */
+#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
+
+#define XEN_CPUID_MAX_NUM_LEAVES 4
+
+#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h
index 7f02fe4e2c7b..acd844c017d3 100644
--- a/arch/x86/include/asm/xen/page-coherent.h
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -22,8 +22,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs) { }
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs) { }
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c949923a5668..5eea09915a15 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -41,10 +41,12 @@ typedef struct xpaddr {
extern unsigned long *machine_to_phys_mapping;
extern unsigned long machine_to_phys_nr;
+extern unsigned long *xen_p2m_addr;
+extern unsigned long xen_p2m_size;
+extern unsigned long xen_max_p2m_pfn;
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
@@ -52,17 +54,52 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
-extern int m2p_add_override(unsigned long mfn, struct page *page,
- struct gnttab_map_grant_ref *kmap_op);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
-extern int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn);
-extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
+/*
+ * Helper functions to write or read unsigned long values to/from
+ * memory, when the access may fault.
+ */
+static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
+{
+ return __put_user(val, (unsigned long __user *)addr);
+}
+
+static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+{
+ return __get_user(*val, (unsigned long __user *)addr);
+}
+
+/*
+ * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
+ * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
+ * bits (identity or foreign) are set.
+ * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
+ * identity or foreign indicator will be still set. __pfn_to_mfn() is
+ * encapsulating get_phys_to_machine() which is called in special cases only.
+ * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
+ * cases needing an extended handling.
+ */
+static inline unsigned long __pfn_to_mfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (pfn < xen_p2m_size)
+ mfn = xen_p2m_addr[pfn];
+ else if (unlikely(pfn < xen_max_p2m_pfn))
+ return get_phys_to_machine(pfn);
+ else
+ return IDENTITY_FRAME(pfn);
+
+ if (unlikely(mfn == INVALID_P2M_ENTRY))
+ return get_phys_to_machine(pfn);
+
+ return mfn;
+}
+
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
@@ -70,7 +107,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
- mfn = get_phys_to_machine(pfn);
+ mfn = __pfn_to_mfn(pfn);
if (mfn != INVALID_P2M_ENTRY)
mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
@@ -83,7 +120,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
if (xen_feature(XENFEAT_auto_translated_physmap))
return 1;
- return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
+ return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
}
static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
@@ -102,7 +139,7 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+ ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
if (ret < 0)
return ~0;
@@ -117,7 +154,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
return mfn;
pfn = mfn_to_pfn_no_overrides(mfn);
- if (get_phys_to_machine(pfn) != mfn) {
+ if (__pfn_to_mfn(pfn) != mfn) {
/*
* If this appears to be a foreign mfn (because the pfn
* doesn't map back to the mfn), then check the local override
@@ -133,8 +170,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
* entry doesn't map back to the mfn and m2p_override doesn't have a
* valid entry for it.
*/
- if (pfn == ~0 &&
- get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
+ if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
pfn = mfn;
return pfn;
@@ -180,7 +216,7 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
return mfn;
pfn = mfn_to_pfn(mfn);
- if (get_phys_to_machine(pfn) != mfn)
+ if (__pfn_to_mfn(pfn) != mfn)
return -1; /* force !pfn_valid() */
return pfn;
}
@@ -236,4 +272,11 @@ void make_lowmem_page_readwrite(void *vaddr);
#define xen_remap(cookie, size) ioremap((cookie), (size));
#define xen_unmap(cookie) iounmap((cookie))
+static inline bool xen_arch_need_swiotlb(struct device *dev,
+ unsigned long pfn,
+ unsigned long mfn)
+{
+ return false;
+}
+
#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 7e7a79ada658..5fa9770035dc 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -16,6 +16,7 @@
#define XSTATE_Hi16_ZMM 0x80
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
+#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
/* Bit 63 of XCR0 is reserved for future expansion */
#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 990a2fe1588d..b813bf9da1e2 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -72,6 +72,8 @@
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_APIC_WRITE 56
#define EXIT_REASON_INVPCID 58
+#define EXIT_REASON_XSAVES 63
+#define EXIT_REASON_XRSTORS 64
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -116,6 +118,8 @@
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
{ EXIT_REASON_INVD, "INVD" }, \
{ EXIT_REASON_INVVPID, "INVVPID" }, \
- { EXIT_REASON_INVPCID, "INVPCID" }
+ { EXIT_REASON_INVPCID, "INVPCID" }, \
+ { EXIT_REASON_XSAVES, "XSAVES" }, \
+ { EXIT_REASON_XRSTORS, "XRSTORS" }
#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index d67c4be3e8b1..3b3b9d33ac1d 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -1,3 +1,7 @@
+#ifndef __LINUX_KBUILD_H
+# error "Please do not build this file directly, build asm-offsets.c instead"
+#endif
+
#include <asm/ucontext.h>
#include <linux/lguest.h>
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 4f9359f36bb7..fdcbb4d27c9f 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,3 +1,7 @@
+#ifndef __LINUX_KBUILD_H
+# error "Please do not build this file directly, build asm-offsets.c instead"
+#endif
+
#include <asm/ia32.h>
#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
diff --git a/arch/x86/kernel/audit_64.c b/arch/x86/kernel/audit_64.c
index 06d3e5a14d9d..f3672508b249 100644
--- a/arch/x86/kernel/audit_64.c
+++ b/arch/x86/kernel/audit_64.c
@@ -50,6 +50,7 @@ int audit_classify_syscall(int abi, unsigned syscall)
case __NR_openat:
return 3;
case __NR_execve:
+ case __NR_execveat:
return 5;
default:
return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
index 639d1289b1ba..97242a9242bd 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
@@ -130,10 +130,7 @@ static ssize_t _iommu_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &iommu_cpumask);
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
+ return cpumap_print_to_pagebuf(true, buf, &iommu_cpumask);
}
static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 30790d798e6b..cc6cedb8f25d 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -219,7 +219,6 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int n;
cpumask_t *active_mask;
struct pmu *pmu = dev_get_drvdata(dev);
@@ -230,10 +229,7 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
else
return 0;
- n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask);
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
+ return cpumap_print_to_pagebuf(true, buf, active_mask);
}
static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index d64f275fe274..673f930c700f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -365,11 +365,7 @@ static void rapl_pmu_event_read(struct perf_event *event)
static ssize_t rapl_get_attr_cpumask(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask);
-
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
+ return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
}
static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 9762dbd9f3f7..08f3fed2b0f2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -647,11 +647,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
static ssize_t uncore_get_attr_cpumask(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
-
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
+ return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
}
static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 49f886481615..dd2f07ae9d0c 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1114,8 +1114,8 @@ void __init memblock_find_dma_reserve(void)
* at first, and assume boot_mem will not take below MAX_DMA_PFN
*/
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
- start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN);
- end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN);
+ start_pfn = min(start_pfn, MAX_DMA_PFN);
+ end_pfn = min(end_pfn, MAX_DMA_PFN);
nr_pages += end_pfn - start_pfn;
}
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 2e1a6853e00c..fe9f0b79a18b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -455,6 +455,23 @@ struct intel_stolen_funcs {
u32 (*base)(int num, int slot, int func, size_t size);
};
+static size_t __init gen9_stolen_size(int num, int slot, int func)
+{
+ u16 gmch_ctrl;
+
+ gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+ gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
+ gmch_ctrl &= BDW_GMCH_GMS_MASK;
+
+ if (gmch_ctrl < 0xf0)
+ return gmch_ctrl << 25; /* 32 MB units */
+ else
+ /* 4MB increments starting at 0xf0 for 4MB */
+ return (gmch_ctrl - 0xf0 + 1) << 22;
+}
+
+typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
.base = i830_stolen_base,
.size = i830_stolen_size,
@@ -490,6 +507,11 @@ static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
.size = gen8_stolen_size,
};
+static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
+ .base = intel_stolen_base,
+ .size = gen9_stolen_size,
+};
+
static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
.base = intel_stolen_base,
.size = chv_stolen_size,
@@ -523,6 +545,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
INTEL_BDW_M_IDS(&gen8_stolen_funcs),
INTEL_BDW_D_IDS(&gen8_stolen_funcs),
INTEL_CHV_IDS(&chv_stolen_funcs),
+ INTEL_SKL_IDS(&gen9_stolen_funcs),
};
static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 344b63f18d14..1cf7c97ff175 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1191,10 +1191,10 @@ ENTRY(ftrace_graph_caller)
pushl %eax
pushl %ecx
pushl %edx
- movl 0xc(%esp), %edx
- lea 0x4(%ebp), %eax
+ movl 0xc(%esp), %eax
+ lea 0x4(%ebp), %edx
movl (%ebp), %ecx
- subl $MCOUNT_INSN_SIZE, %edx
+ subl $MCOUNT_INSN_SIZE, %eax
call prepare_ftrace_return
popl %edx
popl %ecx
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index c0226ab54106..90878aa38dbd 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -652,6 +652,20 @@ ENTRY(stub_execve)
CFI_ENDPROC
END(stub_execve)
+ENTRY(stub_execveat)
+ CFI_STARTPROC
+ addq $8, %rsp
+ PARTIAL_FRAME 0
+ SAVE_REST
+ FIXUP_TOP_OF_STACK %r11
+ call sys_execveat
+ RESTORE_TOP_OF_STACK %r11
+ movq %rax,RAX(%rsp)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+END(stub_execveat)
+
/*
* sigreturn is special because it needs to restore all registers on return.
* This cannot be done with SYSRET, so use the IRET return path instead.
@@ -697,6 +711,20 @@ ENTRY(stub_x32_execve)
CFI_ENDPROC
END(stub_x32_execve)
+ENTRY(stub_x32_execveat)
+ CFI_STARTPROC
+ addq $8, %rsp
+ PARTIAL_FRAME 0
+ SAVE_REST
+ FIXUP_TOP_OF_STACK %r11
+ call compat_sys_execveat
+ RESTORE_TOP_OF_STACK %r11
+ movq %rax,RAX(%rsp)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+END(stub_x32_execveat)
+
#endif
/*
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index f6945bef2cd1..94f643484300 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -283,7 +283,14 @@ NOKPROBE_SYMBOL(do_async_page_fault);
static void __init paravirt_ops_setup(void)
{
pv_info.name = "KVM";
- pv_info.paravirt_enabled = 1;
+
+ /*
+ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
+ * guest kernel works like a bare metal kernel with additional
+ * features, and paravirt_enabled is about features that are
+ * missing.
+ */
+ pv_info.paravirt_enabled = 0;
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d9156ceecdff..42caaef897c8 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -59,13 +59,12 @@ static void kvm_get_wallclock(struct timespec *now)
native_write_msr(msr_kvm_wall_clock, low, high);
- preempt_disable();
- cpu = smp_processor_id();
+ cpu = get_cpu();
vcpu_time = &hv_clock[cpu].pvti;
pvclock_read_wallclock(&wall_clock, vcpu_time, now);
- preempt_enable();
+ put_cpu();
}
static int kvm_set_wallclock(const struct timespec *now)
@@ -107,11 +106,10 @@ static unsigned long kvm_get_tsc_khz(void)
int cpu;
unsigned long tsc_khz;
- preempt_disable();
- cpu = smp_processor_id();
+ cpu = get_cpu();
src = &hv_clock[cpu].pvti;
tsc_khz = pvclock_tsc_khz(src);
- preempt_enable();
+ put_cpu();
return tsc_khz;
}
@@ -263,7 +261,6 @@ void __init kvmclock_init(void)
#endif
kvm_get_preset_lpj();
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
- pv_info.paravirt_enabled = 1;
pv_info.name = "KVM";
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
@@ -284,23 +281,22 @@ int __init kvm_setup_vsyscall_timeinfo(void)
size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
- preempt_disable();
- cpu = smp_processor_id();
+ cpu = get_cpu();
vcpu_time = &hv_clock[cpu].pvti;
flags = pvclock_read_flags(vcpu_time);
if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
- preempt_enable();
+ put_cpu();
return 1;
}
if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
- preempt_enable();
+ put_cpu();
return ret;
}
- preempt_enable();
+ put_cpu();
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
#endif
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3ed4a68d4013..5a2c02913af3 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -283,24 +283,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
- /*
- * Reload esp0, LDT and the page table pointer:
- */
+ /* Reload esp0 and ss1. */
load_sp0(tss, next);
- /*
- * Switch DS and ES.
- * This won't pick up thread selector changes, but I guess that is ok.
- */
- savesegment(es, prev->es);
- if (unlikely(next->es | prev->es))
- loadsegment(es, next->es);
-
- savesegment(ds, prev->ds);
- if (unlikely(next->ds | prev->ds))
- loadsegment(ds, next->ds);
-
-
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
*
@@ -309,41 +294,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
savesegment(fs, fsindex);
savesegment(gs, gsindex);
+ /*
+ * Load TLS before restoring any segments so that segment loads
+ * reference the correct GDT entries.
+ */
load_TLS(next, cpu);
/*
- * Leave lazy mode, flushing any hypercalls made here.
- * This must be done before restoring TLS segments so
- * the GDT and LDT are properly updated, and must be
- * done before math_state_restore, so the TS bit is up
- * to date.
+ * Leave lazy mode, flushing any hypercalls made here. This
+ * must be done after loading TLS entries in the GDT but before
+ * loading segments that might reference them, and and it must
+ * be done before math_state_restore, so the TS bit is up to
+ * date.
*/
arch_end_context_switch(next_p);
+ /* Switch DS and ES.
+ *
+ * Reading them only returns the selectors, but writing them (if
+ * nonzero) loads the full descriptor from the GDT or LDT. The
+ * LDT for next is loaded in switch_mm, and the GDT is loaded
+ * above.
+ *
+ * We therefore need to write new values to the segment
+ * registers on every context switch unless both the new and old
+ * values are zero.
+ *
+ * Note that we don't need to do anything for CS and SS, as
+ * those are saved and restored as part of pt_regs.
+ */
+ savesegment(es, prev->es);
+ if (unlikely(next->es | prev->es))
+ loadsegment(es, next->es);
+
+ savesegment(ds, prev->ds);
+ if (unlikely(next->ds | prev->ds))
+ loadsegment(ds, next->ds);
+
/*
* Switch FS and GS.
*
- * Segment register != 0 always requires a reload. Also
- * reload when it has changed. When prev process used 64bit
- * base always reload to avoid an information leak.
+ * These are even more complicated than FS and GS: they have
+ * 64-bit bases are that controlled by arch_prctl. Those bases
+ * only differ from the values in the GDT or LDT if the selector
+ * is 0.
+ *
+ * Loading the segment register resets the hidden base part of
+ * the register to 0 or the value from the GDT / LDT. If the
+ * next base address zero, writing 0 to the segment register is
+ * much faster than using wrmsr to explicitly zero the base.
+ *
+ * The thread_struct.fs and thread_struct.gs values are 0
+ * if the fs and gs bases respectively are not overridden
+ * from the values implied by fsindex and gsindex. They
+ * are nonzero, and store the nonzero base addresses, if
+ * the bases are overridden.
+ *
+ * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
+ * be impossible.
+ *
+ * Therefore we need to reload the segment registers if either
+ * the old or new selector is nonzero, and we need to override
+ * the base address if next thread expects it to be overridden.
+ *
+ * This code is unnecessarily slow in the case where the old and
+ * new indexes are zero and the new base is nonzero -- it will
+ * unnecessarily write 0 to the selector before writing the new
+ * base address.
+ *
+ * Note: This all depends on arch_prctl being the only way that
+ * user code can override the segment base. Once wrfsbase and
+ * wrgsbase are enabled, most of this code will need to change.
*/
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
+
/*
- * Check if the user used a selector != 0; if yes
- * clear 64bit base, since overloaded base is always
- * mapped to the Null selector
+ * If user code wrote a nonzero value to FS, then it also
+ * cleared the overridden base address.
+ *
+ * XXX: if user code wrote 0 to FS and cleared the base
+ * address itself, we won't notice and we'll incorrectly
+ * restore the prior base address next time we reschdule
+ * the process.
*/
if (fsindex)
prev->fs = 0;
}
- /* when next process has a 64bit base use it */
if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
+
+ /* This works (and fails) the same way as fsindex above. */
if (gsindex)
prev->gs = 0;
}
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index f7fec09e3e3a..3e551eee87b9 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -27,6 +27,43 @@ static int get_free_idx(void)
return -ESRCH;
}
+static bool tls_desc_okay(const struct user_desc *info)
+{
+ if (LDT_empty(info))
+ return true;
+
+ /*
+ * espfix is required for 16-bit data segments, but espfix
+ * only works for LDT segments.
+ */
+ if (!info->seg_32bit)
+ return false;
+
+ /* Only allow data segments in the TLS array. */
+ if (info->contents > 1)
+ return false;
+
+ /*
+ * Non-present segments with DPL 3 present an interesting attack
+ * surface. The kernel should handle such segments correctly,
+ * but TLS is very difficult to protect in a sandbox, so prevent
+ * such segments from being created.
+ *
+ * If userspace needs to remove a TLS entry, it can still delete
+ * it outright.
+ */
+ if (info->seg_not_present)
+ return false;
+
+#ifdef CONFIG_X86_64
+ /* The L bit makes no sense for data. */
+ if (info->lm)
+ return false;
+#endif
+
+ return true;
+}
+
static void set_tls_desc(struct task_struct *p, int idx,
const struct user_desc *info, int n)
{
@@ -66,6 +103,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
if (copy_from_user(&info, u_info, sizeof(info)))
return -EFAULT;
+ if (!tls_desc_okay(&info))
+ return -EINVAL;
+
if (idx == -1)
idx = info.entry_number;
@@ -192,6 +232,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
{
struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
const struct user_desc *info;
+ int i;
if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
(pos % sizeof(struct user_desc)) != 0 ||
@@ -205,6 +246,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
else
info = infobuf;
+ for (i = 0; i < count / sizeof(struct user_desc); i++)
+ if (!tls_desc_okay(info + i))
+ return -EINVAL;
+
set_tls_desc(target,
GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
info, count / sizeof(struct user_desc));
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 4c540c4719d8..0de1fae2bdf0 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -738,3 +738,4 @@ void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
return (void *)xsave + xstate_comp_offsets[feature];
}
+EXPORT_SYMBOL_GPL(get_xsave_addr);
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 25d22b2d6509..08f790dfadc9 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -7,14 +7,13 @@ CFLAGS_vmx.o := -I.
KVM := ../../../virt/kvm
-kvm-y += $(KVM)/kvm_main.o $(KVM)/ioapic.o \
- $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o \
+kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o
-kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += $(KVM)/assigned-dev.o $(KVM)/iommu.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
- i8254.o cpuid.o pmu.o
+ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o
+kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
kvm-intel-y += vmx.o
kvm-amd-y += svm.o
diff --git a/virt/kvm/assigned-dev.c b/arch/x86/kvm/assigned-dev.c
index e05000e200d2..6eb5c20ee373 100644
--- a/virt/kvm/assigned-dev.c
+++ b/arch/x86/kvm/assigned-dev.c
@@ -20,6 +20,32 @@
#include <linux/namei.h>
#include <linux/fs.h>
#include "irq.h"
+#include "assigned-dev.h"
+
+struct kvm_assigned_dev_kernel {
+ struct kvm_irq_ack_notifier ack_notifier;
+ struct list_head list;
+ int assigned_dev_id;
+ int host_segnr;
+ int host_busnr;
+ int host_devfn;
+ unsigned int entries_nr;
+ int host_irq;
+ bool host_irq_disabled;
+ bool pci_2_3;
+ struct msix_entry *host_msix_entries;
+ int guest_irq;
+ struct msix_entry *guest_msix_entries;
+ unsigned long irq_requested_type;
+ int irq_source_id;
+ int flags;
+ struct pci_dev *dev;
+ struct kvm *kvm;
+ spinlock_t intx_lock;
+ spinlock_t intx_mask_lock;
+ char irq_name[32];
+ struct pci_saved_state *pci_saved_state;
+};
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
int assigned_dev_id)
@@ -748,7 +774,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
if (r)
goto out_list_del;
}
- r = kvm_assign_device(kvm, match);
+ r = kvm_assign_device(kvm, match->dev);
if (r)
goto out_list_del;
@@ -790,7 +816,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
goto out;
}
- kvm_deassign_device(kvm, match);
+ kvm_deassign_device(kvm, match->dev);
kvm_free_assigned_device(kvm, match);
diff --git a/arch/x86/kvm/assigned-dev.h b/arch/x86/kvm/assigned-dev.h
new file mode 100644
index 000000000000..a428c1a211b2
--- /dev/null
+++ b/arch/x86/kvm/assigned-dev.h
@@ -0,0 +1,32 @@
+#ifndef ARCH_X86_KVM_ASSIGNED_DEV_H
+#define ARCH_X86_KVM_ASSIGNED_DEV_H
+
+#include <linux/kvm_host.h>
+
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev);
+int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev);
+
+int kvm_iommu_map_guest(struct kvm *kvm);
+int kvm_iommu_unmap_guest(struct kvm *kvm);
+
+long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
+ unsigned long arg);
+
+void kvm_free_all_assigned_devices(struct kvm *kvm);
+#else
+static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
+#endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
+
+#endif /* ARCH_X86_KVM_ASSIGNED_DEV_H */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 976e3a57f9ea..8a80737ee6e6 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -23,7 +23,7 @@
#include "mmu.h"
#include "trace.h"
-static u32 xstate_required_size(u64 xstate_bv)
+static u32 xstate_required_size(u64 xstate_bv, bool compacted)
{
int feature_bit = 0;
u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
@@ -31,9 +31,10 @@ static u32 xstate_required_size(u64 xstate_bv)
xstate_bv &= XSTATE_EXTEND_MASK;
while (xstate_bv) {
if (xstate_bv & 0x1) {
- u32 eax, ebx, ecx, edx;
+ u32 eax, ebx, ecx, edx, offset;
cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
- ret = max(ret, eax + ebx);
+ offset = compacted ? ret : ebx;
+ ret = max(ret, offset + eax);
}
xstate_bv >>= 1;
@@ -53,6 +54,8 @@ u64 kvm_supported_xcr0(void)
return xcr0;
}
+#define F(x) bit(X86_FEATURE_##x)
+
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -64,13 +67,13 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
/* Update OSXSAVE bit */
if (cpu_has_xsave && best->function == 0x1) {
- best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+ best->ecx &= ~F(OSXSAVE);
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
- best->ecx |= bit(X86_FEATURE_OSXSAVE);
+ best->ecx |= F(OSXSAVE);
}
if (apic) {
- if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+ if (best->ecx & F(TSC_DEADLINE_TIMER))
apic->lapic_timer.timer_mode_mask = 3 << 17;
else
apic->lapic_timer.timer_mode_mask = 1 << 17;
@@ -85,9 +88,13 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
(best->eax | ((u64)best->edx << 32)) &
kvm_supported_xcr0();
vcpu->arch.guest_xstate_size = best->ebx =
- xstate_required_size(vcpu->arch.xcr0);
+ xstate_required_size(vcpu->arch.xcr0, false);
}
+ best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
+ if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+ best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+
/*
* The existing code assumes virtual address is 48-bit in the canonical
* address checks; exit if it is ever changed.
@@ -122,8 +129,8 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
break;
}
}
- if (entry && (entry->edx & bit(X86_FEATURE_NX)) && !is_efer_nx()) {
- entry->edx &= ~bit(X86_FEATURE_NX);
+ if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
+ entry->edx &= ~F(NX);
printk(KERN_INFO "kvm: guest NX capability removed\n");
}
}
@@ -227,8 +234,6 @@ static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->flags = 0;
}
-#define F(x) bit(X86_FEATURE_##x)
-
static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
u32 func, u32 index, int *nent, int maxnent)
{
@@ -267,6 +272,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
+ unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
/* cpuid 1.edx */
const u32 kvm_supported_word0_x86_features =
@@ -317,7 +323,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
const u32 kvm_supported_word9_x86_features =
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
- F(ADX) | F(SMAP);
+ F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
+ F(AVX512CD);
+
+ /* cpuid 0xD.1.eax */
+ const u32 kvm_supported_word10_x86_features =
+ F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -453,16 +464,34 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
u64 supported = kvm_supported_xcr0();
entry->eax &= supported;
+ entry->ebx = xstate_required_size(supported, false);
+ entry->ecx = entry->ebx;
entry->edx &= supported >> 32;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ if (!supported)
+ break;
+
for (idx = 1, i = 1; idx < 64; ++idx) {
u64 mask = ((u64)1 << idx);
if (*nent >= maxnent)
goto out;
do_cpuid_1_ent(&entry[i], function, idx);
- if (entry[i].eax == 0 || !(supported & mask))
- continue;
+ if (idx == 1) {
+ entry[i].eax &= kvm_supported_word10_x86_features;
+ entry[i].ebx = 0;
+ if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
+ entry[i].ebx =
+ xstate_required_size(supported,
+ true);
+ } else {
+ if (entry[i].eax == 0 || !(supported & mask))
+ continue;
+ if (WARN_ON_ONCE(entry[i].ecx & 1))
+ continue;
+ }
+ entry[i].ecx = 0;
+ entry[i].edx = 0;
entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9f8a2faf5040..169b09d76ddd 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -123,6 +123,7 @@
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
+#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
@@ -166,6 +167,8 @@
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
#define NoBigReal ((u64)1 << 50) /* No big real mode */
#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
+#define NearBranch ((u64)1 << 52) /* Near branches */
+#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
@@ -209,6 +212,7 @@ struct opcode {
const struct group_dual *gdual;
const struct gprefix *gprefix;
const struct escape *esc;
+ const struct instr_dual *idual;
void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
@@ -231,6 +235,11 @@ struct escape {
struct opcode high[64];
};
+struct instr_dual {
+ struct opcode mod012;
+ struct opcode mod3;
+};
+
/* EFLAGS bit definitions. */
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
@@ -379,6 +388,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
ON64(FOP2E(op##q, rax, cl)) \
FOP_END
+/* 2 operand, src and dest are reversed */
+#define FASTOP2R(op, name) \
+ FOP_START(name) \
+ FOP2E(op##b, dl, al) \
+ FOP2E(op##w, dx, ax) \
+ FOP2E(op##l, edx, eax) \
+ ON64(FOP2E(op##q, rdx, rax)) \
+ FOP_END
+
#define FOP3E(op, dst, src, src2) \
FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
@@ -477,9 +495,9 @@ address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
}
static inline unsigned long
-register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
+register_address(struct x86_emulate_ctxt *ctxt, int reg)
{
- return address_mask(ctxt, reg);
+ return address_mask(ctxt, reg_read(ctxt, reg));
}
static void masked_increment(ulong *reg, ulong mask, int inc)
@@ -488,7 +506,7 @@ static void masked_increment(ulong *reg, ulong mask, int inc)
}
static inline void
-register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
+register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
{
ulong mask;
@@ -496,7 +514,7 @@ register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, in
mask = ~0UL;
else
mask = ad_mask(ctxt);
- masked_increment(reg, mask, inc);
+ masked_increment(reg_rmw(ctxt, reg), mask, inc);
}
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
@@ -564,40 +582,6 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
return emulate_exception(ctxt, NM_VECTOR, 0, false);
}
-static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
- int cs_l)
-{
- switch (ctxt->op_bytes) {
- case 2:
- ctxt->_eip = (u16)dst;
- break;
- case 4:
- ctxt->_eip = (u32)dst;
- break;
-#ifdef CONFIG_X86_64
- case 8:
- if ((cs_l && is_noncanonical_address(dst)) ||
- (!cs_l && (dst >> 32) != 0))
- return emulate_gp(ctxt, 0);
- ctxt->_eip = dst;
- break;
-#endif
- default:
- WARN(1, "unsupported eip assignment size\n");
- }
- return X86EMUL_CONTINUE;
-}
-
-static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
-{
- return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
-}
-
-static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
-{
- return assign_eip_near(ctxt, ctxt->_eip + rel);
-}
-
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
u16 selector;
@@ -641,25 +625,24 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
return true;
}
-static int __linearize(struct x86_emulate_ctxt *ctxt,
- struct segmented_address addr,
- unsigned *max_size, unsigned size,
- bool write, bool fetch,
- ulong *linear)
+static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr,
+ unsigned *max_size, unsigned size,
+ bool write, bool fetch,
+ enum x86emul_mode mode, ulong *linear)
{
struct desc_struct desc;
bool usable;
ulong la;
u32 lim;
u16 sel;
- unsigned cpl;
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
- switch (ctxt->mode) {
+ switch (mode) {
case X86EMUL_MODE_PROT64:
- if (((signed long)la << 16) >> 16 != la)
- return emulate_gp(ctxt, 0);
+ if (is_noncanonical_address(la))
+ goto bad;
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
if (size > *max_size)
@@ -678,46 +661,20 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
if (!fetch && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
- if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
- (ctxt->d & NoBigReal)) {
- /* la is between zero and 0xffff */
- if (la > 0xffff)
- goto bad;
- *max_size = 0x10000 - la;
- } else if ((desc.type & 8) || !(desc.type & 4)) {
- /* expand-up segment */
- if (addr.ea > lim)
- goto bad;
- *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
- } else {
+ if (!(desc.type & 8) && (desc.type & 4)) {
/* expand-down segment */
if (addr.ea <= lim)
goto bad;
lim = desc.d ? 0xffffffff : 0xffff;
- if (addr.ea > lim)
- goto bad;
- *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
}
+ if (addr.ea > lim)
+ goto bad;
+ *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
if (size > *max_size)
goto bad;
- cpl = ctxt->ops->cpl(ctxt);
- if (!(desc.type & 8)) {
- /* data segment */
- if (cpl > desc.dpl)
- goto bad;
- } else if ((desc.type & 8) && !(desc.type & 4)) {
- /* nonconforming code segment */
- if (cpl != desc.dpl)
- goto bad;
- } else if ((desc.type & 8) && (desc.type & 4)) {
- /* conforming code segment */
- if (cpl < desc.dpl)
- goto bad;
- }
+ la &= (u32)-1;
break;
}
- if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
- la &= (u32)-1;
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
return emulate_gp(ctxt, 0);
*linear = la;
@@ -735,9 +692,55 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ulong *linear)
{
unsigned max_size;
- return __linearize(ctxt, addr, &max_size, size, write, false, linear);
+ return __linearize(ctxt, addr, &max_size, size, write, false,
+ ctxt->mode, linear);
+}
+
+static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
+ enum x86emul_mode mode)
+{
+ ulong linear;
+ int rc;
+ unsigned max_size;
+ struct segmented_address addr = { .seg = VCPU_SREG_CS,
+ .ea = dst };
+
+ if (ctxt->op_bytes != sizeof(unsigned long))
+ addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
+ rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
+ if (rc == X86EMUL_CONTINUE)
+ ctxt->_eip = addr.ea;
+ return rc;
+}
+
+static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
+{
+ return assign_eip(ctxt, dst, ctxt->mode);
}
+static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
+ const struct desc_struct *cs_desc)
+{
+ enum x86emul_mode mode = ctxt->mode;
+
+#ifdef CONFIG_X86_64
+ if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
+ u64 efer = 0;
+
+ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+ if (efer & EFER_LMA)
+ mode = X86EMUL_MODE_PROT64;
+ }
+#endif
+ if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
+ mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+ return assign_eip(ctxt, dst, mode);
+}
+
+static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+{
+ return assign_eip_near(ctxt, ctxt->_eip + rel);
+}
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
@@ -776,7 +779,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
- rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
+ rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
+ &linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
@@ -911,6 +915,8 @@ FASTOP2W(btc);
FASTOP2(xadd);
+FASTOP2R(cmp, cmp_r);
+
static u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
@@ -1221,6 +1227,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
if (index_reg != 4)
modrm_ea += reg_read(ctxt, index_reg) << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
+ modrm_ea += insn_fetch(s32, ctxt);
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
} else {
@@ -1229,10 +1236,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
adjust_modrm_seg(ctxt, base_reg);
}
switch (ctxt->modrm_mod) {
- case 0:
- if (ctxt->modrm_rm == 5)
- modrm_ea += insn_fetch(s32, ctxt);
- break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
@@ -1284,7 +1287,8 @@ static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
else
sv = (s64)ctxt->src.val & (s64)mask;
- ctxt->dst.addr.mem.ea += (sv >> 3);
+ ctxt->dst.addr.mem.ea = address_mask(ctxt,
+ ctxt->dst.addr.mem.ea + (sv >> 3));
}
/* only subword offset */
@@ -1610,6 +1614,9 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
+ if (is_noncanonical_address(get_desc_base(&seg_desc) |
+ ((u64)base3 << 32)))
+ return emulate_gp(ctxt, 0);
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
@@ -1807,6 +1814,10 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
int seg = ctxt->src2.val;
ctxt->src.val = get_segment_selector(ctxt, seg);
+ if (ctxt->op_bytes == 4) {
+ rsp_increment(ctxt, -2);
+ ctxt->op_bytes = 2;
+ }
return em_push(ctxt);
}
@@ -1850,7 +1861,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
- ctxt->src.val = (unsigned long)ctxt->eflags;
+ ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
return em_push(ctxt);
}
@@ -2035,7 +2046,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+ rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
/* assigning eip failed; restore the old cs */
@@ -2045,31 +2056,22 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return rc;
}
-static int em_grp45(struct x86_emulate_ctxt *ctxt)
+static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
{
- int rc = X86EMUL_CONTINUE;
+ return assign_eip_near(ctxt, ctxt->src.val);
+}
- switch (ctxt->modrm_reg) {
- case 2: /* call near abs */ {
- long int old_eip;
- old_eip = ctxt->_eip;
- rc = assign_eip_near(ctxt, ctxt->src.val);
- if (rc != X86EMUL_CONTINUE)
- break;
- ctxt->src.val = old_eip;
- rc = em_push(ctxt);
- break;
- }
- case 4: /* jmp abs */
- rc = assign_eip_near(ctxt, ctxt->src.val);
- break;
- case 5: /* jmp far */
- rc = em_jmp_far(ctxt);
- break;
- case 6: /* push */
- rc = em_push(ctxt);
- break;
- }
+static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
+{
+ int rc;
+ long int old_eip;
+
+ old_eip = ctxt->_eip;
+ rc = assign_eip_near(ctxt, ctxt->src.val);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ ctxt->src.val = old_eip;
+ rc = em_push(ctxt);
return rc;
}
@@ -2128,11 +2130,11 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
/* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE;
- rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
+ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = assign_eip_far(ctxt, eip, new_desc.l);
+ rc = assign_eip_far(ctxt, eip, &new_desc);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
@@ -2316,6 +2318,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
+ ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
#endif
} else {
/* legacy mode */
@@ -2349,11 +2352,9 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
- /* XXX sysenter/sysexit have not been tested in 64bit mode.
- * Therefore, we inject an #UD.
- */
+ /* sysenter/sysexit have not been tested in 64bit mode. */
if (ctxt->mode == X86EMUL_MODE_PROT64)
- return emulate_ud(ctxt);
+ return X86EMUL_UNHANDLEABLE;
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -2425,6 +2426,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = (u16)(msr_data + 24);
+ rcx = (u32)rcx;
+ rdx = (u32)rdx;
break;
case X86EMUL_MODE_PROT64:
cs_sel = (u16)(msr_data + 32);
@@ -2599,7 +2602,6 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
save_state_to_tss16(ctxt, &tss_seg);
@@ -2607,13 +2609,11 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
if (old_tss_sel != 0xffff) {
@@ -2624,7 +2624,6 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
- /* FIXME: need to provide precise fault address */
return ret;
}
@@ -2813,7 +2812,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
*
* 1. jmp/call/int to task gate: Check against DPL of the task gate
* 2. Exception/IRQ/iret: No check is performed
- * 3. jmp/call to TSS: Check against DPL of the TSS
+ * 3. jmp/call to TSS/task-gate: No check is performed since the
+ * hardware checks it before exiting.
*/
if (reason == TASK_SWITCH_GATE) {
if (idt_index != -1) {
@@ -2830,13 +2830,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
return emulate_gp(ctxt, (idt_index << 3) | 0x2);
}
- } else if (reason != TASK_SWITCH_IRET) {
- int dpl = next_tss_desc.dpl;
- if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
- return emulate_gp(ctxt, tss_selector);
}
-
desc_limit = desc_limit_scaled(&next_tss_desc);
if (!next_tss_desc.p ||
((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
@@ -2913,8 +2908,8 @@ static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
{
int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
- register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
- op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
+ register_address_increment(ctxt, reg, df * op->bytes);
+ op->addr.mem.ea = register_address(ctxt, reg);
}
static int em_das(struct x86_emulate_ctxt *ctxt)
@@ -3025,7 +3020,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
- rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
+ rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE)
goto fail;
@@ -3215,6 +3210,8 @@ static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
return emulate_ud(ctxt);
ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
+ if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
+ ctxt->dst.bytes = 2;
return X86EMUL_CONTINUE;
}
@@ -3317,7 +3314,7 @@ static int em_sidt(struct x86_emulate_ctxt *ctxt)
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}
-static int em_lgdt(struct x86_emulate_ctxt *ctxt)
+static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
{
struct desc_ptr desc_ptr;
int rc;
@@ -3329,12 +3326,23 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt)
ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- ctxt->ops->set_gdt(ctxt, &desc_ptr);
+ if (ctxt->mode == X86EMUL_MODE_PROT64 &&
+ is_noncanonical_address(desc_ptr.address))
+ return emulate_gp(ctxt, 0);
+ if (lgdt)
+ ctxt->ops->set_gdt(ctxt, &desc_ptr);
+ else
+ ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
+static int em_lgdt(struct x86_emulate_ctxt *ctxt)
+{
+ return em_lgdt_lidt(ctxt, true);
+}
+
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
{
int rc;
@@ -3348,20 +3356,7 @@ static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
- struct desc_ptr desc_ptr;
- int rc;
-
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ctxt->op_bytes = 8;
- rc = read_descriptor(ctxt, ctxt->src.addr.mem,
- &desc_ptr.size, &desc_ptr.address,
- ctxt->op_bytes);
- if (rc != X86EMUL_CONTINUE)
- return rc;
- ctxt->ops->set_idt(ctxt, &desc_ptr);
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- return X86EMUL_CONTINUE;
+ return em_lgdt_lidt(ctxt, false);
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
@@ -3384,7 +3379,7 @@ static int em_loop(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
- register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
+ register_address_increment(ctxt, VCPU_REGS_RCX, -1);
if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
rc = jmp_rel(ctxt, ctxt->src.val);
@@ -3554,7 +3549,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
- rsvd = CR3_L_MODE_RESERVED_BITS;
+ rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
if (new_val & rsvd)
return emulate_gp(ctxt, 0);
@@ -3596,8 +3591,15 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
return emulate_ud(ctxt);
- if (check_dr7_gd(ctxt))
+ if (check_dr7_gd(ctxt)) {
+ ulong dr6;
+
+ ctxt->ops->get_dr(ctxt, 6, &dr6);
+ dr6 &= ~15;
+ dr6 |= DR6_BD | DR6_RTM;
+ ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt);
+ }
return X86EMUL_CONTINUE;
}
@@ -3684,6 +3686,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
+#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
@@ -3780,11 +3783,11 @@ static const struct opcode group4[] = {
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
- I(SrcMem | Stack, em_grp45),
+ I(SrcMem | NearBranch, em_call_near_abs),
I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
- I(SrcMem | Stack, em_grp45),
- I(SrcMemFAddr | ImplicitOps, em_grp45),
- I(SrcMem | Stack, em_grp45), D(Undefined),
+ I(SrcMem | NearBranch, em_jmp_abs),
+ I(SrcMemFAddr | ImplicitOps, em_jmp_far),
+ I(SrcMem | Stack, em_push), D(Undefined),
};
static const struct opcode group6[] = {
@@ -3845,8 +3848,12 @@ static const struct gprefix pfx_0f_6f_0f_7f = {
I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
};
+static const struct instr_dual instr_dual_0f_2b = {
+ I(0, em_mov), N
+};
+
static const struct gprefix pfx_0f_2b = {
- I(0, em_mov), I(0, em_mov), N, N,
+ ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
};
static const struct gprefix pfx_0f_28_0f_29 = {
@@ -3920,6 +3927,10 @@ static const struct escape escape_dd = { {
N, N, N, N, N, N, N, N,
} };
+static const struct instr_dual instr_dual_0f_c3 = {
+ I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
+};
+
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
@@ -3964,7 +3975,7 @@ static const struct opcode opcode_table[256] = {
I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
- X16(D(SrcImmByte)),
+ X16(D(SrcImmByte | NearBranch)),
/* 0x80 - 0x87 */
G(ByteOp | DstMem | SrcImm, group1),
G(DstMem | SrcImm, group1),
@@ -3991,20 +4002,20 @@ static const struct opcode opcode_table[256] = {
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
- F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
+ F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
/* 0xA8 - 0xAF */
F2bv(DstAcc | SrcImm | NoWrite, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
- F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
+ F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
- I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
- I(ImplicitOps | Stack, em_ret),
+ I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
+ I(ImplicitOps | NearBranch, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
@@ -4024,13 +4035,14 @@ static const struct opcode opcode_table[256] = {
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
- X3(I(SrcImmByte, em_loop)),
- I(SrcImmByte, em_jcxz),
+ X3(I(SrcImmByte | NearBranch, em_loop)),
+ I(SrcImmByte | NearBranch, em_jcxz),
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
- I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
- I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
+ I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
+ I(SrcImmFAddr | No64, em_jmp_far),
+ D(SrcImmByte | ImplicitOps | NearBranch),
I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
@@ -4090,7 +4102,7 @@ static const struct opcode twobyte_table[256] = {
N, N, N, N,
N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x80 - 0x8F */
- X16(D(SrcImm)),
+ X16(D(SrcImm | NearBranch)),
/* 0x90 - 0x9F */
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
@@ -4121,7 +4133,7 @@ static const struct opcode twobyte_table[256] = {
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
- N, D(DstMem | SrcReg | ModRM | Mov),
+ N, ID(0, &instr_dual_0f_c3),
N, N, N, GD(0, &group9),
/* 0xC8 - 0xCF */
X8(I(DstReg, em_bswap)),
@@ -4134,12 +4146,20 @@ static const struct opcode twobyte_table[256] = {
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};
+static const struct instr_dual instr_dual_0f_38_f0 = {
+ I(DstReg | SrcMem | Mov, em_movbe), N
+};
+
+static const struct instr_dual instr_dual_0f_38_f1 = {
+ I(DstMem | SrcReg | Mov, em_movbe), N
+};
+
static const struct gprefix three_byte_0f_38_f0 = {
- I(DstReg | SrcMem | Mov, em_movbe), N, N, N
+ ID(0, &instr_dual_0f_38_f0), N, N, N
};
static const struct gprefix three_byte_0f_38_f1 = {
- I(DstMem | SrcReg | Mov, em_movbe), N, N, N
+ ID(0, &instr_dual_0f_38_f1), N, N, N
};
/*
@@ -4152,8 +4172,8 @@ static const struct opcode opcode_map_0f_38[256] = {
/* 0x80 - 0xef */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0xf0 - 0xf1 */
- GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
- GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
+ GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
+ GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
/* 0xf2 - 0xff */
N, N, X4(N), X8(N)
};
@@ -4275,7 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
- register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
+ register_address(ctxt, VCPU_REGS_RDI);
op->addr.mem.seg = VCPU_SREG_ES;
op->val = 0;
op->count = 1;
@@ -4329,7 +4349,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
- register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
+ register_address(ctxt, VCPU_REGS_RSI);
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
op->count = 1;
@@ -4338,7 +4358,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
- register_address(ctxt,
+ address_mask(ctxt,
reg_read(ctxt, VCPU_REGS_RBX) +
(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
op->addr.mem.seg = ctxt->seg_override;
@@ -4510,8 +4530,7 @@ done_prefixes:
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
- (mode == X86EMUL_MODE_PROT64 ||
- (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
+ (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
ctxt->d = NotImpl;
}
@@ -4549,6 +4568,12 @@ done_prefixes:
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
+ case InstrDual:
+ if ((ctxt->modrm >> 6) == 3)
+ opcode = opcode.u.idual->mod3;
+ else
+ opcode = opcode.u.idual->mod012;
+ break;
default:
return EMULATION_FAILED;
}
@@ -4567,7 +4592,8 @@ done_prefixes:
return EMULATION_FAILED;
if (unlikely(ctxt->d &
- (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
+ (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
+ No16))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
@@ -4578,8 +4604,12 @@ done_prefixes:
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
- if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
- ctxt->op_bytes = 8;
+ if (mode == X86EMUL_MODE_PROT64) {
+ if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
+ ctxt->op_bytes = 8;
+ else if (ctxt->d & NearBranch)
+ ctxt->op_bytes = 8;
+ }
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
@@ -4588,6 +4618,9 @@ done_prefixes:
ctxt->op_bytes = 4;
}
+ if ((ctxt->d & No16) && ctxt->op_bytes == 2)
+ ctxt->op_bytes = 4;
+
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
@@ -4631,7 +4664,8 @@ done_prefixes:
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative)
- ctxt->memopp->addr.mem.ea += ctxt->_eip;
+ ctxt->memopp->addr.mem.ea = address_mask(ctxt,
+ ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
@@ -4775,6 +4809,12 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
+ /* Instruction can only be executed in protected mode */
+ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
+ rc = emulate_ud(ctxt);
+ goto done;
+ }
+
/* Privileged instruction can be executed only in CPL=0 */
if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
if (ctxt->d & PrivUD)
@@ -4784,12 +4824,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
- /* Instruction can only be executed in protected mode */
- if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
- rc = emulate_ud(ctxt);
- goto done;
- }
-
/* Do instruction specific permission checks */
if (ctxt->d & CheckPerm) {
rc = ctxt->check_perm(ctxt);
@@ -4974,8 +5008,7 @@ writeback:
count = ctxt->src.count;
else
count = ctxt->dst.count;
- register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
- -count);
+ register_address_increment(ctxt, VCPU_REGS_RCX, -count);
if (!string_insn_completed(ctxt)) {
/*
@@ -5053,11 +5086,6 @@ twobyte_insn:
ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
(s16) ctxt->src.val;
break;
- case 0xc3: /* movnti */
- ctxt->dst.bytes = ctxt->op_bytes;
- ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
- (u32) ctxt->src.val;
- break;
default:
goto cannot_emulate;
}
diff --git a/virt/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 0ba4057d271b..b1947e0f3e10 100644
--- a/virt/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -270,7 +270,6 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
spin_unlock(&ioapic->lock);
}
-#ifdef CONFIG_X86
void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
@@ -279,12 +278,6 @@ void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
return;
kvm_make_scan_ioapic_request(kvm);
}
-#else
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
-{
- return;
-}
-#endif
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
@@ -586,11 +579,6 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
case IOAPIC_REG_WINDOW:
ioapic_write_indirect(ioapic, data);
break;
-#ifdef CONFIG_IA64
- case IOAPIC_REG_EOI:
- __kvm_ioapic_update_eoi(NULL, ioapic, data, IOAPIC_LEVEL_TRIG);
- break;
-#endif
default:
break;
diff --git a/virt/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index e23b70634f1e..3c9195535ffc 100644
--- a/virt/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -19,7 +19,6 @@ struct kvm_vcpu;
/* Direct registers. */
#define IOAPIC_REG_SELECT 0x00
#define IOAPIC_REG_WINDOW 0x10
-#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */
/* Indirect registers. */
#define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
@@ -45,6 +44,23 @@ struct rtc_status {
DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS);
};
+union kvm_ioapic_redirect_entry {
+ u64 bits;
+ struct {
+ u8 vector;
+ u8 delivery_mode:3;
+ u8 dest_mode:1;
+ u8 delivery_status:1;
+ u8 polarity:1;
+ u8 remote_irr:1;
+ u8 trig_mode:1;
+ u8 mask:1;
+ u8 reserve:7;
+ u8 reserved[4];
+ u8 dest_id;
+ } fields;
+};
+
struct kvm_ioapic {
u64 base_address;
u32 ioregsel;
@@ -83,7 +99,7 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode);
+ int short_hand, unsigned int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
int trigger_mode);
@@ -97,7 +113,6 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, unsigned long *dest_map);
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
u32 *tmr);
diff --git a/virt/kvm/iommu.c b/arch/x86/kvm/iommu.c
index c1e6ae989a43..17b73eeac8a4 100644
--- a/virt/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -31,6 +31,7 @@
#include <linux/dmar.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
+#include "assigned-dev.h"
static bool allow_unsafe_assigned_interrupts;
module_param_named(allow_unsafe_assigned_interrupts,
@@ -169,10 +170,8 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
return r;
}
-int kvm_assign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
+int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
{
- struct pci_dev *pdev = NULL;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int r;
bool noncoherent;
@@ -181,7 +180,6 @@ int kvm_assign_device(struct kvm *kvm,
if (!domain)
return 0;
- pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
@@ -212,17 +210,14 @@ out_unmap:
return r;
}
-int kvm_deassign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
+int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
{
struct iommu_domain *domain = kvm->arch.iommu_domain;
- struct pci_dev *pdev = NULL;
/* check if iommu exists and in use */
if (!domain)
return 0;
- pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
diff --git a/virt/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 963b8995a9e8..72298b3ac025 100644
--- a/virt/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -26,9 +26,6 @@
#include <trace/events/kvm.h>
#include <asm/msidef.h>
-#ifdef CONFIG_IA64
-#include <asm/iosapic.h>
-#endif
#include "irq.h"
@@ -38,12 +35,8 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{
-#ifdef CONFIG_X86
struct kvm_pic *pic = pic_irqchip(kvm);
return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
-#else
- return -1;
-#endif
}
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
@@ -57,12 +50,7 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
{
-#ifdef CONFIG_IA64
- return irq->delivery_mode ==
- (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
-#else
return irq->delivery_mode == APIC_DM_LOWEST;
-#endif
}
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
@@ -202,9 +190,7 @@ int kvm_request_irq_source_id(struct kvm *kvm)
}
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
-#ifdef CONFIG_X86
ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
-#endif
set_bit(irq_source_id, bitmap);
unlock:
mutex_unlock(&kvm->irq_lock);
@@ -215,9 +201,7 @@ unlock:
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
-#ifdef CONFIG_X86
ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
-#endif
mutex_lock(&kvm->irq_lock);
if (irq_source_id < 0 ||
@@ -230,9 +214,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
goto unlock;
kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
-#ifdef CONFIG_X86
kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
-#endif
unlock:
mutex_unlock(&kvm->irq_lock);
}
@@ -242,7 +224,7 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
{
mutex_lock(&kvm->irq_lock);
kimn->irq = irq;
- hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
+ hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list);
mutex_unlock(&kvm->irq_lock);
}
@@ -264,7 +246,7 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1)
- hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
+ hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link)
if (kimn->irq == gsi)
kimn->func(kimn, mask);
srcu_read_unlock(&kvm->irq_srcu, idx);
@@ -322,16 +304,11 @@ out:
.u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
-#ifdef CONFIG_X86
-# define PIC_ROUTING_ENTRY(irq) \
+#define PIC_ROUTING_ENTRY(irq) \
{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
.u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
-# define ROUTING_ENTRY2(irq) \
+#define ROUTING_ENTRY2(irq) \
IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
-#else
-# define ROUTING_ENTRY2(irq) \
- IOAPIC_ROUTING_ENTRY(irq)
-#endif
static const struct kvm_irq_routing_entry default_routing[] = {
ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
@@ -346,20 +323,6 @@ static const struct kvm_irq_routing_entry default_routing[] = {
ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
-#ifdef CONFIG_IA64
- ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
- ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
- ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
- ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
- ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
- ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
- ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
- ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
- ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
- ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
- ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
- ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
-#endif
};
int kvm_setup_default_irq_routing(struct kvm *kvm)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b8345dd41b25..4f0c0b954686 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -68,6 +68,9 @@
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
+#define APIC_BROADCAST 0xFF
+#define X2APIC_BROADCAST 0xFFFFFFFFul
+
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
@@ -129,8 +132,6 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
-#define KVM_X2APIC_CID_BITS 0
-
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -149,42 +150,56 @@ static void recalculate_apic_map(struct kvm *kvm)
new->cid_shift = 8;
new->cid_mask = 0;
new->lid_mask = 0xff;
+ new->broadcast = APIC_BROADCAST;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
- u16 cid, lid;
- u32 ldr;
if (!kvm_apic_present(vcpu))
continue;
+ if (apic_x2apic_mode(apic)) {
+ new->ldr_bits = 32;
+ new->cid_shift = 16;
+ new->cid_mask = new->lid_mask = 0xffff;
+ new->broadcast = X2APIC_BROADCAST;
+ } else if (kvm_apic_get_reg(apic, APIC_LDR)) {
+ if (kvm_apic_get_reg(apic, APIC_DFR) ==
+ APIC_DFR_CLUSTER) {
+ new->cid_shift = 4;
+ new->cid_mask = 0xf;
+ new->lid_mask = 0xf;
+ } else {
+ new->cid_shift = 8;
+ new->cid_mask = 0;
+ new->lid_mask = 0xff;
+ }
+ }
+
/*
* All APICs have to be configured in the same mode by an OS.
* We take advatage of this while building logical id loockup
- * table. After reset APICs are in xapic/flat mode, so if we
- * find apic with different setting we assume this is the mode
+ * table. After reset APICs are in software disabled mode, so if
+ * we find apic with different setting we assume this is the mode
* OS wants all apics to be in; build lookup table accordingly.
*/
- if (apic_x2apic_mode(apic)) {
- new->ldr_bits = 32;
- new->cid_shift = 16;
- new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
- new->lid_mask = 0xffff;
- } else if (kvm_apic_sw_enabled(apic) &&
- !new->cid_mask /* flat mode */ &&
- kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
- new->cid_shift = 4;
- new->cid_mask = 0xf;
- new->lid_mask = 0xf;
- }
+ if (kvm_apic_sw_enabled(apic))
+ break;
+ }
- new->phys_map[kvm_apic_id(apic)] = apic;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ u16 cid, lid;
+ u32 ldr, aid;
+ aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
lid = apic_logical_id(new, ldr);
- if (lid)
+ if (aid < ARRAY_SIZE(new->phys_map))
+ new->phys_map[aid] = apic;
+ if (lid && cid < ARRAY_SIZE(new->logical_map))
new->logical_map[cid][ffs(lid) - 1] = apic;
}
out:
@@ -201,11 +216,13 @@ out:
static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
{
- u32 prev = kvm_apic_get_reg(apic, APIC_SPIV);
+ bool enabled = val & APIC_SPIV_APIC_ENABLED;
apic_set_reg(apic, APIC_SPIV, val);
- if ((prev ^ val) & APIC_SPIV_APIC_ENABLED) {
- if (val & APIC_SPIV_APIC_ENABLED) {
+
+ if (enabled != apic->sw_enabled) {
+ apic->sw_enabled = enabled;
+ if (enabled) {
static_key_slow_dec_deferred(&apic_sw_disabled);
recalculate_apic_map(apic->vcpu->kvm);
} else
@@ -237,21 +254,17 @@ static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{
- return ((kvm_apic_get_reg(apic, APIC_LVTT) &
- apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT);
+ return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
}
static inline int apic_lvtt_period(struct kvm_lapic *apic)
{
- return ((kvm_apic_get_reg(apic, APIC_LVTT) &
- apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC);
+ return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
}
static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
{
- return ((kvm_apic_get_reg(apic, APIC_LVTT) &
- apic->lapic_timer.timer_mode_mask) ==
- APIC_LVT_TIMER_TSCDEADLINE);
+ return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
}
static inline int apic_lvt_nmi_mode(u32 lvt_val)
@@ -326,8 +339,12 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
{
- apic->irr_pending = true;
apic_set_vector(vec, apic->regs + APIC_IRR);
+ /*
+ * irr_pending must be true if any interrupt is pending; set it after
+ * APIC_IRR to avoid race with apic_clear_irr
+ */
+ apic->irr_pending = true;
}
static inline int apic_search_irr(struct kvm_lapic *apic)
@@ -359,13 +376,15 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
vcpu = apic->vcpu;
- apic_clear_vector(vec, apic->regs + APIC_IRR);
- if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) {
/* try to update RVI */
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
kvm_make_request(KVM_REQ_EVENT, vcpu);
- else {
- vec = apic_search_irr(apic);
- apic->irr_pending = (vec != -1);
+ } else {
+ apic->irr_pending = false;
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
+ if (apic_search_irr(apic) != -1)
+ apic->irr_pending = true;
}
}
@@ -558,16 +577,25 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
apic_update_ppr(apic);
}
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
+static int kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
+{
+ return dest == (apic_x2apic_mode(apic) ?
+ X2APIC_BROADCAST : APIC_BROADCAST);
+}
+
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
{
- return dest == 0xff || kvm_apic_id(apic) == dest;
+ return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
}
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
{
int result = 0;
u32 logical_id;
+ if (kvm_apic_broadcast(apic, mda))
+ return 1;
+
if (apic_x2apic_mode(apic)) {
logical_id = kvm_apic_get_reg(apic, APIC_LDR);
return logical_id & mda;
@@ -595,7 +623,7 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
}
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode)
+ int short_hand, unsigned int dest, int dest_mode)
{
int result = 0;
struct kvm_lapic *target = vcpu->arch.apic;
@@ -657,15 +685,24 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
if (!map)
goto out;
+ if (irq->dest_id == map->broadcast)
+ goto out;
+
+ ret = true;
+
if (irq->dest_mode == 0) { /* physical mode */
- if (irq->delivery_mode == APIC_DM_LOWEST ||
- irq->dest_id == 0xff)
+ if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
goto out;
- dst = &map->phys_map[irq->dest_id & 0xff];
+
+ dst = &map->phys_map[irq->dest_id];
} else {
u32 mda = irq->dest_id << (32 - map->ldr_bits);
+ u16 cid = apic_cluster_id(map, mda);
+
+ if (cid >= ARRAY_SIZE(map->logical_map))
+ goto out;
- dst = map->logical_map[apic_cluster_id(map, mda)];
+ dst = map->logical_map[cid];
bitmap = apic_logical_id(map, mda);
@@ -691,8 +728,6 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
*r = 0;
*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
}
-
- ret = true;
out:
rcu_read_unlock();
return ret;
@@ -1034,6 +1069,26 @@ static void update_divide_count(struct kvm_lapic *apic)
apic->divide_count);
}
+static void apic_timer_expired(struct kvm_lapic *apic)
+{
+ struct kvm_vcpu *vcpu = apic->vcpu;
+ wait_queue_head_t *q = &vcpu->wq;
+
+ /*
+ * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
+ * vcpu_enter_guest.
+ */
+ if (atomic_read(&apic->lapic_timer.pending))
+ return;
+
+ atomic_inc(&apic->lapic_timer.pending);
+ /* FIXME: this code should not know anything about vcpus */
+ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+
+ if (waitqueue_active(q))
+ wake_up_interruptible(q);
+}
+
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
@@ -1096,9 +1151,10 @@ static void start_apic_timer(struct kvm_lapic *apic)
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
- }
- hrtimer_start(&apic->lapic_timer.timer,
- ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
+ hrtimer_start(&apic->lapic_timer.timer,
+ ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
+ } else
+ apic_timer_expired(apic);
local_irq_restore(flags);
}
@@ -1203,17 +1259,20 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
break;
- case APIC_LVTT:
- if ((kvm_apic_get_reg(apic, APIC_LVTT) &
- apic->lapic_timer.timer_mode_mask) !=
- (val & apic->lapic_timer.timer_mode_mask))
+ case APIC_LVTT: {
+ u32 timer_mode = val & apic->lapic_timer.timer_mode_mask;
+
+ if (apic->lapic_timer.timer_mode != timer_mode) {
+ apic->lapic_timer.timer_mode = timer_mode;
hrtimer_cancel(&apic->lapic_timer.timer);
+ }
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
apic_set_reg(apic, APIC_LVTT, val);
break;
+ }
case APIC_TMICT:
if (apic_lvtt_tscdeadline(apic))
@@ -1320,7 +1379,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
static_key_slow_dec_deferred(&apic_hw_disabled);
- if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED))
+ if (!apic->sw_enabled)
static_key_slow_dec_deferred(&apic_sw_disabled);
if (apic->regs)
@@ -1355,9 +1414,6 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
return;
hrtimer_cancel(&apic->lapic_timer.timer);
- /* Inject here so clearing tscdeadline won't override new value */
- if (apic_has_pending_timer(vcpu))
- kvm_inject_apic_timer_irqs(vcpu);
apic->lapic_timer.tscdeadline = data;
start_apic_timer(apic);
}
@@ -1422,6 +1478,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
+ if ((value & MSR_IA32_APICBASE_ENABLE) &&
+ apic->base_address != APIC_DEFAULT_PHYS_BASE)
+ pr_warn_once("APIC base relocation is unsupported by KVM");
+
/* with FSB delivery interrupt, we can restart APIC functionality */
apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
"0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
@@ -1447,6 +1507,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
+ apic->lapic_timer.timer_mode = 0;
apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
@@ -1538,23 +1599,8 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
- struct kvm_vcpu *vcpu = apic->vcpu;
- wait_queue_head_t *q = &vcpu->wq;
-
- /*
- * There is a race window between reading and incrementing, but we do
- * not care about potentially losing timer events in the !reinject
- * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
- * in vcpu_enter_guest.
- */
- if (!atomic_read(&ktimer->pending)) {
- atomic_inc(&ktimer->pending);
- /* FIXME: this code should not know anything about vcpus */
- kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
- }
- if (waitqueue_active(q))
- wake_up_interruptible(q);
+ apic_timer_expired(apic);
if (lapic_is_periodic(apic)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
@@ -1693,6 +1739,9 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
1 : count_vectors(apic->regs + APIC_ISR);
apic->highest_isr_cache = -1;
+ if (kvm_x86_ops->hwapic_irr_update)
+ kvm_x86_ops->hwapic_irr_update(vcpu,
+ apic_find_highest_irr(apic));
kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_rtc_eoi_tracking_restore_one(vcpu);
@@ -1837,8 +1886,11 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
+ if (reg == APIC_ICR2)
+ return 1;
+
/* if this is ICR write vector before command */
- if (msr == 0x830)
+ if (reg == APIC_ICR)
apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
return apic_reg_write(apic, reg, (u32)data);
}
@@ -1851,9 +1903,15 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
+ if (reg == APIC_DFR || reg == APIC_ICR2) {
+ apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
+ reg);
+ return 1;
+ }
+
if (apic_reg_read(apic, reg, 4, &low))
return 1;
- if (msr == 0x830)
+ if (reg == APIC_ICR)
apic_reg_read(apic, APIC_ICR2, 4, &high);
*data = (((u64)high) << 32) | low;
@@ -1908,7 +1966,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- unsigned int sipi_vector;
+ u8 sipi_vector;
unsigned long pe;
if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 6a11845fd8b9..c674fce53cf9 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -11,6 +11,7 @@
struct kvm_timer {
struct hrtimer timer;
s64 period; /* unit: ns */
+ u32 timer_mode;
u32 timer_mode_mask;
u64 tscdeadline;
atomic_t pending; /* accumulated triggered timers */
@@ -22,6 +23,7 @@ struct kvm_lapic {
struct kvm_timer lapic_timer;
u32 divide_count;
struct kvm_vcpu *vcpu;
+ bool sw_enabled;
bool irr_pending;
/* Number of bits set in ISR. */
s16 isr_count;
@@ -55,8 +57,8 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu);
void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr);
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest);
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
unsigned long *dest_map);
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
@@ -119,11 +121,11 @@ static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic)
extern struct static_key_deferred apic_sw_disabled;
-static inline int kvm_apic_sw_enabled(struct kvm_lapic *apic)
+static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic)
{
if (static_key_false(&apic_sw_disabled.key))
- return kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED;
- return APIC_SPIV_APIC_ENABLED;
+ return apic->sw_enabled;
+ return true;
}
static inline bool kvm_apic_present(struct kvm_vcpu *vcpu)
@@ -152,8 +154,6 @@ static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
ldr >>= 32 - map->ldr_bits;
cid = (ldr >> map->cid_shift) & map->cid_mask;
- BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
-
return cid;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 978f402006ee..10fbed126b11 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -214,13 +214,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
#define MMIO_GEN_LOW_SHIFT 10
#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
-#define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
static u64 generation_mmio_spte_mask(unsigned int gen)
{
u64 mask;
- WARN_ON(gen > MMIO_MAX_GEN);
+ WARN_ON(gen & ~MMIO_GEN_MASK);
mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
@@ -263,13 +262,13 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte)
{
- u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
+ u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
return (spte & ~mask) >> PAGE_SHIFT;
}
static unsigned get_mmio_spte_access(u64 spte)
{
- u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
+ u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
return (spte & ~mask) & ~PAGE_MASK;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7527cefc5a43..41dd0387cccb 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1056,9 +1056,11 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
{
struct vcpu_svm *svm = to_svm(vcpu);
- WARN_ON(adjustment < 0);
- if (host)
- adjustment = svm_scale_tsc(vcpu, adjustment);
+ if (host) {
+ if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
+ WARN_ON(adjustment < 0);
+ adjustment = svm_scale_tsc(vcpu, (u64)adjustment);
+ }
svm->vmcb->control.tsc_offset += adjustment;
if (is_guest_mode(vcpu))
@@ -2999,7 +3001,6 @@ static int dr_interception(struct vcpu_svm *svm)
{
int reg, dr;
unsigned long val;
- int err;
if (svm->vcpu.guest_debug == 0) {
/*
@@ -3019,12 +3020,15 @@ static int dr_interception(struct vcpu_svm *svm)
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */
+ if (!kvm_require_dr(&svm->vcpu, dr - 16))
+ return 1;
val = kvm_register_read(&svm->vcpu, reg);
kvm_set_dr(&svm->vcpu, dr - 16, val);
} else {
- err = kvm_get_dr(&svm->vcpu, dr, &val);
- if (!err)
- kvm_register_write(&svm->vcpu, reg, val);
+ if (!kvm_require_dr(&svm->vcpu, dr))
+ return 1;
+ kvm_get_dr(&svm->vcpu, dr, &val);
+ kvm_register_write(&svm->vcpu, reg, val);
}
skip_emulated_instruction(&svm->vcpu);
@@ -4123,6 +4127,11 @@ static bool svm_mpx_supported(void)
return false;
}
+static bool svm_xsaves_supported(void)
+{
+ return false;
+}
+
static bool svm_has_wbinvd_exit(void)
{
return true;
@@ -4410,6 +4419,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.rdtscp_supported = svm_rdtscp_supported,
.invpcid_supported = svm_invpcid_supported,
.mpx_supported = svm_mpx_supported,
+ .xsaves_supported = svm_xsaves_supported,
.set_supported_cpuid = svm_set_supported_cpuid,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 6b06ab8748dd..c2a34bb5ad93 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -5,6 +5,7 @@
#include <asm/vmx.h>
#include <asm/svm.h>
#include <asm/clocksource.h>
+#include <asm/pvclock-abi.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
@@ -877,6 +878,42 @@ TRACE_EVENT(kvm_ple_window,
#define trace_kvm_ple_window_shrink(vcpu_id, new, old) \
trace_kvm_ple_window(false, vcpu_id, new, old)
+TRACE_EVENT(kvm_pvclock_update,
+ TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
+ TP_ARGS(vcpu_id, pvclock),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ __field( __u32, version )
+ __field( __u64, tsc_timestamp )
+ __field( __u64, system_time )
+ __field( __u32, tsc_to_system_mul )
+ __field( __s8, tsc_shift )
+ __field( __u8, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->version = pvclock->version;
+ __entry->tsc_timestamp = pvclock->tsc_timestamp;
+ __entry->system_time = pvclock->system_time;
+ __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
+ __entry->tsc_shift = pvclock->tsc_shift;
+ __entry->flags = pvclock->flags;
+ ),
+
+ TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
+ "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
+ "flags 0x%x }",
+ __entry->vcpu_id,
+ __entry->version,
+ __entry->tsc_timestamp,
+ __entry->system_time,
+ __entry->tsc_to_system_mul,
+ __entry->tsc_shift,
+ __entry->flags)
+);
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3e556c68351b..feb852b04598 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -99,13 +99,15 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
static bool __read_mostly nested = 0;
module_param(nested, bool, S_IRUGO);
+static u64 __read_mostly host_xss;
+
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_CR4_GUEST_OWNED_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT)
+ | X86_CR4_OSXMMEXCPT | X86_CR4_TSD)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
@@ -214,6 +216,7 @@ struct __packed vmcs12 {
u64 virtual_apic_page_addr;
u64 apic_access_addr;
u64 ept_pointer;
+ u64 xss_exit_bitmap;
u64 guest_physical_address;
u64 vmcs_link_pointer;
u64 guest_ia32_debugctl;
@@ -616,6 +619,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
FIELD64(EPT_POINTER, ept_pointer),
+ FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
@@ -720,12 +724,15 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD(HOST_RSP, host_rsp),
FIELD(HOST_RIP, host_rip),
};
-static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
static inline short vmcs_field_to_offset(unsigned long field)
{
- if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
- return -1;
+ BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
+
+ if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
+ vmcs_field_to_offset_table[field] == 0)
+ return -ENOENT;
+
return vmcs_field_to_offset_table[field];
}
@@ -758,6 +765,7 @@ static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static bool vmx_mpx_supported(void);
+static bool vmx_xsaves_supported(void);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
@@ -1098,6 +1106,12 @@ static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
}
+static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES) &&
+ vmx_xsaves_supported();
+}
+
static inline bool is_exception(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -1659,12 +1673,20 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER);
- /* On ept, can't emulate nx, and must switch nx atomically */
- if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
+
+ /*
+ * On EPT, we can't emulate NX, so we must switch EFER atomically.
+ * On CPUs that support "load IA32_EFER", always switch EFER
+ * atomically, since it's faster than switching it manually.
+ */
+ if (cpu_has_load_ia32_efer ||
+ (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
guest_efer = vmx->vcpu.arch.efer;
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
- add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
+ if (guest_efer != host_efer)
+ add_atomic_switch_msr(vmx, MSR_EFER,
+ guest_efer, host_efer);
return false;
}
@@ -2377,12 +2399,13 @@ static __init void nested_vmx_setup_ctls_msrs(void)
nested_vmx_secondary_ctls_low = 0;
nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_UNRESTRICTED_GUEST |
- SECONDARY_EXEC_WBINVD_EXITING;
+ SECONDARY_EXEC_WBINVD_EXITING |
+ SECONDARY_EXEC_XSAVES;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
- nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
+ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
@@ -2558,6 +2581,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
if (!nested_vmx_allowed(vcpu))
return 1;
return vmx_get_vmx_msr(vcpu, msr_index, pdata);
+ case MSR_IA32_XSS:
+ if (!vmx_xsaves_supported())
+ return 1;
+ data = vcpu->arch.ia32_xss;
+ break;
case MSR_TSC_AUX:
if (!to_vmx(vcpu)->rdtscp_enabled)
return 1;
@@ -2649,6 +2677,22 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
return 1; /* they are read-only */
+ case MSR_IA32_XSS:
+ if (!vmx_xsaves_supported())
+ return 1;
+ /*
+ * The only supported bit as of Skylake is bit 8, but
+ * it is not supported on KVM.
+ */
+ if (data != 0)
+ return 1;
+ vcpu->arch.ia32_xss = data;
+ if (vcpu->arch.ia32_xss != host_xss)
+ add_atomic_switch_msr(vmx, MSR_IA32_XSS,
+ vcpu->arch.ia32_xss, host_xss);
+ else
+ clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
+ break;
case MSR_TSC_AUX:
if (!vmx->rdtscp_enabled)
return 1;
@@ -2884,7 +2928,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_SHADOW_VMCS;
+ SECONDARY_EXEC_SHADOW_VMCS |
+ SECONDARY_EXEC_XSAVES;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -3007,6 +3052,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
}
}
+ if (cpu_has_xsaves)
+ rdmsrl(MSR_IA32_XSS, host_xss);
+
return 0;
}
@@ -3110,76 +3158,6 @@ static __init int alloc_kvm_area(void)
return 0;
}
-static __init int hardware_setup(void)
-{
- if (setup_vmcs_config(&vmcs_config) < 0)
- return -EIO;
-
- if (boot_cpu_has(X86_FEATURE_NX))
- kvm_enable_efer_bits(EFER_NX);
-
- if (!cpu_has_vmx_vpid())
- enable_vpid = 0;
- if (!cpu_has_vmx_shadow_vmcs())
- enable_shadow_vmcs = 0;
- if (enable_shadow_vmcs)
- init_vmcs_shadow_fields();
-
- if (!cpu_has_vmx_ept() ||
- !cpu_has_vmx_ept_4levels()) {
- enable_ept = 0;
- enable_unrestricted_guest = 0;
- enable_ept_ad_bits = 0;
- }
-
- if (!cpu_has_vmx_ept_ad_bits())
- enable_ept_ad_bits = 0;
-
- if (!cpu_has_vmx_unrestricted_guest())
- enable_unrestricted_guest = 0;
-
- if (!cpu_has_vmx_flexpriority()) {
- flexpriority_enabled = 0;
-
- /*
- * set_apic_access_page_addr() is used to reload apic access
- * page upon invalidation. No need to do anything if the
- * processor does not have the APIC_ACCESS_ADDR VMCS field.
- */
- kvm_x86_ops->set_apic_access_page_addr = NULL;
- }
-
- if (!cpu_has_vmx_tpr_shadow())
- kvm_x86_ops->update_cr8_intercept = NULL;
-
- if (enable_ept && !cpu_has_vmx_ept_2m_page())
- kvm_disable_largepages();
-
- if (!cpu_has_vmx_ple())
- ple_gap = 0;
-
- if (!cpu_has_vmx_apicv())
- enable_apicv = 0;
-
- if (enable_apicv)
- kvm_x86_ops->update_cr8_intercept = NULL;
- else {
- kvm_x86_ops->hwapic_irr_update = NULL;
- kvm_x86_ops->deliver_posted_interrupt = NULL;
- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
- }
-
- if (nested)
- nested_vmx_setup_ctls_msrs();
-
- return alloc_kvm_area();
-}
-
-static __exit void hardware_unsetup(void)
-{
- free_kvm_area();
-}
-
static bool emulation_required(struct kvm_vcpu *vcpu)
{
return emulate_invalid_guest_state && !guest_state_valid(vcpu);
@@ -4396,6 +4374,7 @@ static void ept_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
}
+#define VMX_XSS_EXIT_BITMAP 0
/*
* Sets up the vmcs for emulated real mode.
*/
@@ -4505,6 +4484,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx);
+ if (vmx_xsaves_supported())
+ vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
+
return 0;
}
@@ -5163,13 +5145,20 @@ static int handle_cr(struct kvm_vcpu *vcpu)
static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
- int dr, reg;
+ int dr, dr7, reg;
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
+
+ /* First, if DR does not exist, trigger UD */
+ if (!kvm_require_dr(vcpu, dr))
+ return 1;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */
if (!kvm_require_cpl(vcpu, 0))
return 1;
- dr = vmcs_readl(GUEST_DR7);
- if (dr & DR7_GD) {
+ dr7 = vmcs_readl(GUEST_DR7);
+ if (dr7 & DR7_GD) {
/*
* As the vm-exit takes precedence over the debug trap, we
* need to emulate the latter, either for the host or the
@@ -5177,17 +5166,14 @@ static int handle_dr(struct kvm_vcpu *vcpu)
*/
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
- vcpu->run->debug.arch.dr7 = dr;
- vcpu->run->debug.arch.pc =
- vmcs_readl(GUEST_CS_BASE) +
- vmcs_readl(GUEST_RIP);
+ vcpu->run->debug.arch.dr7 = dr7;
+ vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
vcpu->run->debug.arch.exception = DB_VECTOR;
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
return 0;
} else {
- vcpu->arch.dr7 &= ~DR7_GD;
+ vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
- vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
kvm_queue_exception(vcpu, DB_VECTOR);
return 1;
}
@@ -5209,8 +5195,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
return 1;
}
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
unsigned long val;
@@ -5391,6 +5375,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_xsaves(struct kvm_vcpu *vcpu)
+{
+ skip_emulated_instruction(vcpu);
+ WARN(1, "this should never happen\n");
+ return 1;
+}
+
+static int handle_xrstors(struct kvm_vcpu *vcpu)
+{
+ skip_emulated_instruction(vcpu);
+ WARN(1, "this should never happen\n");
+ return 1;
+}
+
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
if (likely(fasteoi)) {
@@ -5492,7 +5490,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
}
/* clear all local breakpoint enable flags */
- vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x55);
+ vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~0x155);
/*
* TODO: What about debug traps on tss switch?
@@ -5539,11 +5537,11 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
trace_kvm_page_fault(gpa, exit_qualification);
/* It is a write fault? */
- error_code = exit_qualification & (1U << 1);
+ error_code = exit_qualification & PFERR_WRITE_MASK;
/* It is a fetch fault? */
- error_code |= (exit_qualification & (1U << 2)) << 2;
+ error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK;
/* ept page table is present? */
- error_code |= (exit_qualification >> 3) & 0x1;
+ error_code |= (exit_qualification >> 3) & PFERR_PRESENT_MASK;
vcpu->arch.exit_qualification = exit_qualification;
@@ -5785,6 +5783,204 @@ static void update_ple_window_actual_max(void)
ple_window_grow, INT_MIN);
}
+static __init int hardware_setup(void)
+{
+ int r = -ENOMEM, i, msr;
+
+ rdmsrl_safe(MSR_EFER, &host_efer);
+
+ for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
+ kvm_define_shared_msr(i, vmx_msr_index[i]);
+
+ vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_io_bitmap_a)
+ return r;
+
+ vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_io_bitmap_b)
+ goto out;
+
+ vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_legacy)
+ goto out1;
+
+ vmx_msr_bitmap_legacy_x2apic =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_legacy_x2apic)
+ goto out2;
+
+ vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_longmode)
+ goto out3;
+
+ vmx_msr_bitmap_longmode_x2apic =
+ (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_msr_bitmap_longmode_x2apic)
+ goto out4;
+ vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_vmread_bitmap)
+ goto out5;
+
+ vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!vmx_vmwrite_bitmap)
+ goto out6;
+
+ memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
+ memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
+
+ /*
+ * Allow direct access to the PC debug port (it is often used for I/O
+ * delays, but the vmexits simply slow things down).
+ */
+ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
+ clear_bit(0x80, vmx_io_bitmap_a);
+
+ memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
+
+ memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
+ memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
+
+ vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
+ vmx_msr_bitmap_legacy, PAGE_SIZE);
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
+
+ if (enable_apicv) {
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ vmx_disable_intercept_msr_read_x2apic(msr);
+
+ /* According SDM, in x2apic mode, the whole id reg is used.
+ * But in KVM, it only use the highest eight bits. Need to
+ * intercept it */
+ vmx_enable_intercept_msr_read_x2apic(0x802);
+ /* TMCCT */
+ vmx_enable_intercept_msr_read_x2apic(0x839);
+ /* TPR */
+ vmx_disable_intercept_msr_write_x2apic(0x808);
+ /* EOI */
+ vmx_disable_intercept_msr_write_x2apic(0x80b);
+ /* SELF-IPI */
+ vmx_disable_intercept_msr_write_x2apic(0x83f);
+ }
+
+ if (enable_ept) {
+ kvm_mmu_set_mask_ptes(0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+ 0ull, VMX_EPT_EXECUTABLE_MASK);
+ ept_set_mmio_spte_mask();
+ kvm_enable_tdp();
+ } else
+ kvm_disable_tdp();
+
+ update_ple_window_actual_max();
+
+ if (setup_vmcs_config(&vmcs_config) < 0) {
+ r = -EIO;
+ goto out7;
+ }
+
+ if (boot_cpu_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
+
+ if (!cpu_has_vmx_vpid())
+ enable_vpid = 0;
+ if (!cpu_has_vmx_shadow_vmcs())
+ enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs)
+ init_vmcs_shadow_fields();
+
+ if (!cpu_has_vmx_ept() ||
+ !cpu_has_vmx_ept_4levels()) {
+ enable_ept = 0;
+ enable_unrestricted_guest = 0;
+ enable_ept_ad_bits = 0;
+ }
+
+ if (!cpu_has_vmx_ept_ad_bits())
+ enable_ept_ad_bits = 0;
+
+ if (!cpu_has_vmx_unrestricted_guest())
+ enable_unrestricted_guest = 0;
+
+ if (!cpu_has_vmx_flexpriority()) {
+ flexpriority_enabled = 0;
+
+ /*
+ * set_apic_access_page_addr() is used to reload apic access
+ * page upon invalidation. No need to do anything if the
+ * processor does not have the APIC_ACCESS_ADDR VMCS field.
+ */
+ kvm_x86_ops->set_apic_access_page_addr = NULL;
+ }
+
+ if (!cpu_has_vmx_tpr_shadow())
+ kvm_x86_ops->update_cr8_intercept = NULL;
+
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+
+ if (!cpu_has_vmx_ple())
+ ple_gap = 0;
+
+ if (!cpu_has_vmx_apicv())
+ enable_apicv = 0;
+
+ if (enable_apicv)
+ kvm_x86_ops->update_cr8_intercept = NULL;
+ else {
+ kvm_x86_ops->hwapic_irr_update = NULL;
+ kvm_x86_ops->deliver_posted_interrupt = NULL;
+ kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
+ }
+
+ if (nested)
+ nested_vmx_setup_ctls_msrs();
+
+ return alloc_kvm_area();
+
+out7:
+ free_page((unsigned long)vmx_vmwrite_bitmap);
+out6:
+ free_page((unsigned long)vmx_vmread_bitmap);
+out5:
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
+out4:
+ free_page((unsigned long)vmx_msr_bitmap_longmode);
+out3:
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+out2:
+ free_page((unsigned long)vmx_msr_bitmap_legacy);
+out1:
+ free_page((unsigned long)vmx_io_bitmap_b);
+out:
+ free_page((unsigned long)vmx_io_bitmap_a);
+
+ return r;
+}
+
+static __exit void hardware_unsetup(void)
+{
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
+ free_page((unsigned long)vmx_msr_bitmap_legacy);
+ free_page((unsigned long)vmx_msr_bitmap_longmode);
+ free_page((unsigned long)vmx_io_bitmap_b);
+ free_page((unsigned long)vmx_io_bitmap_a);
+ free_page((unsigned long)vmx_vmwrite_bitmap);
+ free_page((unsigned long)vmx_vmread_bitmap);
+
+ free_kvm_area();
+}
+
/*
* Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
* exiting, so only get here on cpu with PAUSE-Loop-Exiting.
@@ -6361,58 +6557,60 @@ static inline int vmcs_field_readonly(unsigned long field)
* some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
* 64-bit fields are to be returned).
*/
-static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
- unsigned long field, u64 *ret)
+static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 *ret)
{
short offset = vmcs_field_to_offset(field);
char *p;
if (offset < 0)
- return 0;
+ return offset;
p = ((char *)(get_vmcs12(vcpu))) + offset;
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
*ret = *((natural_width *)p);
- return 1;
+ return 0;
case VMCS_FIELD_TYPE_U16:
*ret = *((u16 *)p);
- return 1;
+ return 0;
case VMCS_FIELD_TYPE_U32:
*ret = *((u32 *)p);
- return 1;
+ return 0;
case VMCS_FIELD_TYPE_U64:
*ret = *((u64 *)p);
- return 1;
+ return 0;
default:
- return 0; /* can never happen. */
+ WARN_ON(1);
+ return -ENOENT;
}
}
-static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu,
- unsigned long field, u64 field_value){
+static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 field_value){
short offset = vmcs_field_to_offset(field);
char *p = ((char *) get_vmcs12(vcpu)) + offset;
if (offset < 0)
- return false;
+ return offset;
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_U16:
*(u16 *)p = field_value;
- return true;
+ return 0;
case VMCS_FIELD_TYPE_U32:
*(u32 *)p = field_value;
- return true;
+ return 0;
case VMCS_FIELD_TYPE_U64:
*(u64 *)p = field_value;
- return true;
+ return 0;
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
*(natural_width *)p = field_value;
- return true;
+ return 0;
default:
- return false; /* can never happen. */
+ WARN_ON(1);
+ return -ENOENT;
}
}
@@ -6445,6 +6643,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
field_value = vmcs_readl(field);
break;
+ default:
+ WARN_ON(1);
+ continue;
}
vmcs12_write_any(&vmx->vcpu, field, field_value);
}
@@ -6490,6 +6691,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
vmcs_writel(field, (long)field_value);
break;
+ default:
+ WARN_ON(1);
+ break;
}
}
}
@@ -6528,7 +6732,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
- if (!vmcs12_read_any(vcpu, field, &field_value)) {
+ if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu);
return 1;
@@ -6598,7 +6802,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return 1;
}
- if (!vmcs12_write_any(vcpu, field, field_value)) {
+ if (vmcs12_write_any(vcpu, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu);
return 1;
@@ -6802,6 +7006,8 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
[EXIT_REASON_INVEPT] = handle_invept,
[EXIT_REASON_INVVPID] = handle_invvpid,
+ [EXIT_REASON_XSAVES] = handle_xsaves,
+ [EXIT_REASON_XRSTORS] = handle_xrstors,
};
static const int kvm_vmx_max_exit_handlers =
@@ -7089,6 +7295,14 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
return 1;
+ case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
+ /*
+ * This should never happen, since it is not possible to
+ * set XSS to a non-zero value---neither in L1 nor in L2.
+ * If if it were, XSS would have to be checked against
+ * the XSS exit bitmap in vmcs12.
+ */
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
default:
return 1;
}
@@ -7277,6 +7491,9 @@ static void vmx_set_rvi(int vector)
u16 status;
u8 old;
+ if (vector == -1)
+ vector = 0;
+
status = vmcs_read16(GUEST_INTR_STATUS);
old = (u8)status & 0xff;
if ((u8)vector != old) {
@@ -7288,22 +7505,23 @@ static void vmx_set_rvi(int vector)
static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
{
+ if (!is_guest_mode(vcpu)) {
+ vmx_set_rvi(max_irr);
+ return;
+ }
+
if (max_irr == -1)
return;
/*
- * If a vmexit is needed, vmx_check_nested_events handles it.
+ * In guest mode. If a vmexit is needed, vmx_check_nested_events
+ * handles it.
*/
- if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ if (nested_exit_on_intr(vcpu))
return;
- if (!is_guest_mode(vcpu)) {
- vmx_set_rvi(max_irr);
- return;
- }
-
/*
- * Fall back to pre-APICv interrupt injection since L2
+ * Else, fall back to pre-APICv interrupt injection since L2
* is run without virtual interrupt delivery.
*/
if (!kvm_event_needs_reinjection(vcpu) &&
@@ -7400,6 +7618,12 @@ static bool vmx_mpx_supported(void)
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
}
+static bool vmx_xsaves_supported(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_XSAVES;
+}
+
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
@@ -8135,6 +8359,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
vmcs_write64(VMCS_LINK_POINTER, -1ull);
exec_control = vmcs12->pin_based_vm_exec_control;
@@ -8775,6 +9001,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
if (vmx_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
/* update exit information fields: */
@@ -9176,6 +9404,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.check_intercept = vmx_check_intercept,
.handle_external_intr = vmx_handle_external_intr,
.mpx_supported = vmx_mpx_supported,
+ .xsaves_supported = vmx_xsaves_supported,
.check_nested_events = vmx_check_nested_events,
@@ -9184,150 +9413,21 @@ static struct kvm_x86_ops vmx_x86_ops = {
static int __init vmx_init(void)
{
- int r, i, msr;
-
- rdmsrl_safe(MSR_EFER, &host_efer);
-
- for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
- kvm_define_shared_msr(i, vmx_msr_index[i]);
-
- vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_a)
- return -ENOMEM;
-
- r = -ENOMEM;
-
- vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_b)
- goto out;
-
- vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy)
- goto out1;
-
- vmx_msr_bitmap_legacy_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic)
- goto out2;
-
- vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode)
- goto out3;
-
- vmx_msr_bitmap_longmode_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic)
- goto out4;
- vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_vmread_bitmap)
- goto out5;
-
- vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_vmwrite_bitmap)
- goto out6;
-
- memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
- memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
-
- /*
- * Allow direct access to the PC debug port (it is often used for I/O
- * delays, but the vmexits simply slow things down).
- */
- memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
- clear_bit(0x80, vmx_io_bitmap_a);
-
- memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
-
- memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
- memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
-
- set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
-
- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
- __alignof__(struct vcpu_vmx), THIS_MODULE);
+ int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
- goto out7;
+ return r;
#ifdef CONFIG_KEXEC
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);
#endif
- vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
-
- memcpy(vmx_msr_bitmap_legacy_x2apic,
- vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
- vmx_msr_bitmap_longmode, PAGE_SIZE);
-
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
-
- if (enable_ept) {
- kvm_mmu_set_mask_ptes(0ull,
- (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
- (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
- 0ull, VMX_EPT_EXECUTABLE_MASK);
- ept_set_mmio_spte_mask();
- kvm_enable_tdp();
- } else
- kvm_disable_tdp();
-
- update_ple_window_actual_max();
-
return 0;
-
-out7:
- free_page((unsigned long)vmx_vmwrite_bitmap);
-out6:
- free_page((unsigned long)vmx_vmread_bitmap);
-out5:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out4:
- free_page((unsigned long)vmx_msr_bitmap_longmode);
-out3:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-out2:
- free_page((unsigned long)vmx_msr_bitmap_legacy);
-out1:
- free_page((unsigned long)vmx_io_bitmap_b);
-out:
- free_page((unsigned long)vmx_io_bitmap_a);
- return r;
}
static void __exit vmx_exit(void)
{
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_legacy);
- free_page((unsigned long)vmx_msr_bitmap_longmode);
- free_page((unsigned long)vmx_io_bitmap_b);
- free_page((unsigned long)vmx_io_bitmap_a);
- free_page((unsigned long)vmx_vmwrite_bitmap);
- free_page((unsigned long)vmx_vmread_bitmap);
-
#ifdef CONFIG_KEXEC
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
synchronize_rcu();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0033df32a745..c259814200bd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -27,6 +27,7 @@
#include "kvm_cache_regs.h"
#include "x86.h"
#include "cpuid.h"
+#include "assigned-dev.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
@@ -353,6 +354,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
if (!vcpu->arch.exception.pending) {
queue:
+ if (has_error && !is_protmode(vcpu))
+ has_error = false;
vcpu->arch.exception.pending = true;
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr;
@@ -455,6 +458,16 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
}
EXPORT_SYMBOL_GPL(kvm_require_cpl);
+bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
+{
+ if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+ return true;
+
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return false;
+}
+EXPORT_SYMBOL_GPL(kvm_require_dr);
+
/*
* This function will be used to read from the physical memory of the currently
* running guest. The difference to kvm_read_guest_page is that this function
@@ -656,6 +669,12 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
return 1;
+ if (xcr0 & XSTATE_AVX512) {
+ if (!(xcr0 & XSTATE_YMM))
+ return 1;
+ if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512)
+ return 1;
+ }
kvm_put_guest_xcr0(vcpu);
vcpu->arch.xcr0 = xcr0;
@@ -732,6 +751,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
+#ifdef CONFIG_X86_64
+ cr3 &= ~CR3_PCID_INVD;
+#endif
+
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -811,8 +834,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
vcpu->arch.eff_db[dr] = val;
break;
case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return 1; /* #UD */
/* fall through */
case 6:
if (val & 0xffffffff00000000ULL)
@@ -821,8 +842,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
kvm_update_dr6(vcpu);
break;
case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return 1; /* #UD */
/* fall through */
default: /* 7 */
if (val & 0xffffffff00000000ULL)
@@ -837,27 +856,21 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
- int res;
-
- res = __kvm_set_dr(vcpu, dr, val);
- if (res > 0)
- kvm_queue_exception(vcpu, UD_VECTOR);
- else if (res < 0)
+ if (__kvm_set_dr(vcpu, dr, val)) {
kvm_inject_gp(vcpu, 0);
-
- return res;
+ return 1;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_dr);
-static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
switch (dr) {
case 0 ... 3:
*val = vcpu->arch.db[dr];
break;
case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return 1;
/* fall through */
case 6:
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
@@ -866,23 +879,11 @@ static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
*val = kvm_x86_ops->get_dr6(vcpu);
break;
case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return 1;
/* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
}
-
- return 0;
-}
-
-int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
-{
- if (_kvm_get_dr(vcpu, dr, val)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
@@ -1237,21 +1238,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
bool vcpus_matched;
- bool do_request = false;
struct kvm_arch *ka = &vcpu->kvm->arch;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
atomic_read(&vcpu->kvm->online_vcpus));
- if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
- if (!ka->use_master_clock)
- do_request = 1;
-
- if (!vcpus_matched && ka->use_master_clock)
- do_request = 1;
-
- if (do_request)
+ /*
+ * Once the masterclock is enabled, always perform request in
+ * order to update it.
+ *
+ * In order to enable masterclock, the host clocksource must be TSC
+ * and the vcpus need to have matched TSCs. When that happens,
+ * perform request to enable masterclock.
+ */
+ if (ka->use_master_clock ||
+ (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
@@ -1637,16 +1639,16 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_guest_tsc = tsc_timestamp;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
+
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
* state, we just increase by 2 at the end.
*/
- vcpu->hv_clock.version += 2;
-
- if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
- &guest_hv_clock, sizeof(guest_hv_clock))))
- return 0;
+ vcpu->hv_clock.version = guest_hv_clock.version + 2;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
@@ -1662,6 +1664,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
+ trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
+
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
@@ -2140,7 +2144,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
- u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
+ s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
@@ -3106,7 +3110,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
unsigned long val;
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
- _kvm_get_dr(vcpu, 6, &val);
+ kvm_get_dr(vcpu, 6, &val);
dbgregs->dr6 = val;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
@@ -3128,15 +3132,89 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return 0;
}
+#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
+
+static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+{
+ struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+ u64 xstate_bv = xsave->xsave_hdr.xstate_bv;
+ u64 valid;
+
+ /*
+ * Copy legacy XSAVE area, to avoid complications with CPUID
+ * leaves 0 and 1 in the loop below.
+ */
+ memcpy(dest, xsave, XSAVE_HDR_OFFSET);
+
+ /* Set XSTATE_BV */
+ *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
+
+ /*
+ * Copy each region from the possibly compacted offset to the
+ * non-compacted offset.
+ */
+ valid = xstate_bv & ~XSTATE_FPSSE;
+ while (valid) {
+ u64 feature = valid & -valid;
+ int index = fls64(feature) - 1;
+ void *src = get_xsave_addr(xsave, feature);
+
+ if (src) {
+ u32 size, offset, ecx, edx;
+ cpuid_count(XSTATE_CPUID, index,
+ &size, &offset, &ecx, &edx);
+ memcpy(dest + offset, src, size);
+ }
+
+ valid -= feature;
+ }
+}
+
+static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
+{
+ struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+ u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
+ u64 valid;
+
+ /*
+ * Copy legacy XSAVE area, to avoid complications with CPUID
+ * leaves 0 and 1 in the loop below.
+ */
+ memcpy(xsave, src, XSAVE_HDR_OFFSET);
+
+ /* Set XSTATE_BV and possibly XCOMP_BV. */
+ xsave->xsave_hdr.xstate_bv = xstate_bv;
+ if (cpu_has_xsaves)
+ xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
+
+ /*
+ * Copy each region from the non-compacted offset to the
+ * possibly compacted offset.
+ */
+ valid = xstate_bv & ~XSTATE_FPSSE;
+ while (valid) {
+ u64 feature = valid & -valid;
+ int index = fls64(feature) - 1;
+ void *dest = get_xsave_addr(xsave, feature);
+
+ if (dest) {
+ u32 size, offset, ecx, edx;
+ cpuid_count(XSTATE_CPUID, index,
+ &size, &offset, &ecx, &edx);
+ memcpy(dest, src + offset, size);
+ } else
+ WARN_ON_ONCE(1);
+
+ valid -= feature;
+ }
+}
+
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
if (cpu_has_xsave) {
- memcpy(guest_xsave->region,
- &vcpu->arch.guest_fpu.state->xsave,
- vcpu->arch.guest_xstate_size);
- *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &=
- vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE;
+ memset(guest_xsave, 0, sizeof(struct kvm_xsave));
+ fill_xsave((u8 *) guest_xsave->region, vcpu);
} else {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave,
@@ -3160,8 +3238,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
*/
if (xstate_bv & ~kvm_supported_xcr0())
return -EINVAL;
- memcpy(&vcpu->arch.guest_fpu.state->xsave,
- guest_xsave->region, vcpu->arch.guest_xstate_size);
+ load_xsave(vcpu, (u8 *)guest_xsave->region);
} else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
@@ -4004,7 +4081,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
default:
- ;
+ r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
}
out:
return r;
@@ -4667,7 +4744,7 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{
- return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
+ return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
}
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
@@ -5211,21 +5288,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{
- struct kvm_run *kvm_run = vcpu->run;
- unsigned long eip = vcpu->arch.emulate_ctxt.eip;
- u32 dr6 = 0;
-
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
(vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
- dr6 = kvm_vcpu_check_hw_bp(eip, 0,
+ struct kvm_run *kvm_run = vcpu->run;
+ unsigned long eip = kvm_get_linear_rip(vcpu);
+ u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
vcpu->arch.guest_debug_dr7,
vcpu->arch.eff_db);
if (dr6 != 0) {
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
- kvm_run->debug.arch.pc = kvm_rip_read(vcpu) +
- get_segment_base(vcpu, VCPU_SREG_CS);
-
+ kvm_run->debug.arch.pc = eip;
kvm_run->debug.arch.exception = DB_VECTOR;
kvm_run->exit_reason = KVM_EXIT_DEBUG;
*r = EMULATE_USER_EXIT;
@@ -5235,7 +5308,8 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
!(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
- dr6 = kvm_vcpu_check_hw_bp(eip, 0,
+ unsigned long eip = kvm_get_linear_rip(vcpu);
+ u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
vcpu->arch.dr7,
vcpu->arch.db);
@@ -5365,7 +5439,9 @@ restart:
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
- __kvm_set_rflags(vcpu, ctxt->eflags);
+ if (!ctxt->have_exception ||
+ exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+ __kvm_set_rflags(vcpu, ctxt->eflags);
/*
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
@@ -5965,6 +6041,12 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
X86_EFLAGS_RF);
+ if (vcpu->arch.exception.nr == DB_VECTOR &&
+ (vcpu->arch.dr7 & DR7_GD)) {
+ vcpu->arch.dr7 &= ~DR7_GD;
+ kvm_update_dr7(vcpu);
+ }
+
kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code,
@@ -6873,6 +6955,9 @@ int fx_init(struct kvm_vcpu *vcpu)
return err;
fpu_finit(&vcpu->arch.guest_fpu);
+ if (cpu_has_xsaves)
+ vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv =
+ host_xcr0 | XSTATE_COMPACTION_ENABLED;
/*
* Ensure guest xcr0 is valid for loading
@@ -7024,7 +7109,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_x86_ops->vcpu_reset(vcpu);
}
-void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
+void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{
struct kvm_segment cs;
@@ -7256,6 +7341,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type)
return -EINVAL;
+ INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
@@ -7536,12 +7622,18 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
return kvm_x86_ops->interrupt_allowed(vcpu);
}
-bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
+unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
{
- unsigned long current_rip = kvm_rip_read(vcpu) +
- get_segment_base(vcpu, VCPU_SREG_CS);
+ if (is_64_bit_mode(vcpu))
+ return kvm_rip_read(vcpu);
+ return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
+ kvm_rip_read(vcpu));
+}
+EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
- return current_rip == linear_rip;
+bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
+{
+ return kvm_get_linear_rip(vcpu) == linear_rip;
}
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7cb9c45a5fe0..cc1d61af6140 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -162,7 +162,8 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
#define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
- | XSTATE_BNDREGS | XSTATE_BNDCSR)
+ | XSTATE_BNDREGS | XSTATE_BNDCSR \
+ | XSTATE_AVX512)
extern u64 host_xcr0;
extern u64 kvm_supported_xcr0(void);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index db92793b7e23..1530afb07c85 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -23,7 +23,7 @@ lib-y += memcpy_$(BITS).o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
-obj-y += msr.o msr-reg.o msr-reg-export.o hash.o
+obj-y += msr.o msr-reg.o msr-reg-export.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c
deleted file mode 100644
index ff4fa51a5b1f..000000000000
--- a/arch/x86/lib/hash.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Some portions derived from code covered by the following notice:
- *
- * Copyright (c) 2010-2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/hash.h>
-#include <linux/init.h>
-
-#include <asm/processor.h>
-#include <asm/cpufeature.h>
-#include <asm/hash.h>
-
-static inline u32 crc32_u32(u32 crc, u32 val)
-{
-#ifdef CONFIG_AS_CRC32
- asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
-#else
- asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val));
-#endif
- return crc;
-}
-
-static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed)
-{
- const u32 *p32 = (const u32 *) data;
- u32 i, tmp = 0;
-
- for (i = 0; i < len / 4; i++)
- seed = crc32_u32(seed, *p32++);
-
- switch (len & 3) {
- case 3:
- tmp |= *((const u8 *) p32 + 2) << 16;
- /* fallthrough */
- case 2:
- tmp |= *((const u8 *) p32 + 1) << 8;
- /* fallthrough */
- case 1:
- tmp |= *((const u8 *) p32);
- seed = crc32_u32(seed, tmp);
- break;
- }
-
- return seed;
-}
-
-static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
-{
- const u32 *p32 = (const u32 *) data;
- u32 i;
-
- for (i = 0; i < len; i++)
- seed = crc32_u32(seed, *p32++);
-
- return seed;
-}
-
-void __init setup_arch_fast_hash(struct fast_hash_ops *ops)
-{
- if (cpu_has_xmm4_2) {
- ops->hash = intel_crc4_2_hash;
- ops->hash2 = intel_crc4_2_hash2;
- }
-}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index d973e61e450d..38dcec403b46 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -844,11 +844,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
unsigned int fault)
{
struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
int code = BUS_ADRERR;
- up_read(&mm->mmap_sem);
-
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
@@ -879,7 +876,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
- up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address, 0, 0);
return;
}
@@ -887,14 +883,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
- up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address,
SIGSEGV, SEGV_MAPERR);
return;
}
- up_read(&current->mm->mmap_sem);
-
/*
* We ran out of memory, call the OOM killer, and return the
* userspace (which will retry the fault, or kill us if we got
@@ -1062,7 +1055,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
- int fault;
+ int fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
tsk = current;
@@ -1237,47 +1230,50 @@ good_area:
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
*/
fault = handle_mm_fault(mm, vma, address, flags);
+ major |= fault & VM_FAULT_MAJOR;
/*
- * If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because it
- * would already be released in __lock_page_or_retry in mm/filemap.c.
+ * If we need to retry the mmap_sem has already been released,
+ * and if there is a fatal signal pending there is no guarantee
+ * that we made any progress. Handle this case first.
*/
- if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)))
+ if (unlikely(fault & VM_FAULT_RETRY)) {
+ /* Retry at most once */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+ if (!fatal_signal_pending(tsk))
+ goto retry;
+ }
+
+ /* User mode? Just return to handle the fatal exception */
+ if (flags & FAULT_FLAG_USER)
+ return;
+
+ /* Not returning to user mode? Handle exceptions or die: */
+ no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
return;
+ }
+ up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, fault);
return;
}
/*
- * Major/minor page fault accounting is only done on the
- * initial attempt. If we go through a retry, it is extremely
- * likely that the page will be found in page cache at that point.
+ * Major/minor page fault accounting. If any of the events
+ * returned VM_FAULT_MAJOR, we account it as a major fault.
*/
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- }
- if (fault & VM_FAULT_RETRY) {
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ if (major) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
check_v8086_mode(regs, address, tsk);
-
- up_read(&mm->mmap_sem);
}
NOKPROBE_SYMBOL(__do_page_fault);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 82b41d56bb98..a97ee0801475 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -703,10 +703,10 @@ void __init zone_sizes_init(void)
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+ max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
#endif
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+ max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a3a5d46605d2..536ea2fb6e33 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -384,6 +384,26 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
}
/*
+ * Lookup the PMD entry for a virtual address. Return a pointer to the entry
+ * or NULL if not present.
+ */
+pmd_t *lookup_pmd_address(unsigned long address)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ pgd = pgd_offset_k(address);
+ if (pgd_none(*pgd))
+ return NULL;
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
+ return NULL;
+
+ return pmd_offset(pud, address);
+}
+
+/*
* This is necessary because __pa() does not work on some
* kinds of memory, like vmalloc() or the alloc_remap()
* areas on 32-bit NUMA systems. The percpu areas can
@@ -1817,7 +1837,7 @@ static int __set_pages_np(struct page *page, int numpages)
return __change_page_attr_set_clr(&cpa, 0);
}
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 3f627345d51c..987514396c1e 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -24,7 +24,7 @@ extern u8 sk_load_byte_positive_offset[];
extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
extern u8 sk_load_byte_negative_offset[];
-static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
+static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
if (len == 1)
*ptr = bytes;
@@ -52,12 +52,12 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
#define EMIT4_off32(b1, b2, b3, b4, off) \
do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
-static inline bool is_imm8(int value)
+static bool is_imm8(int value)
{
return value <= 127 && value >= -128;
}
-static inline bool is_simm32(s64 value)
+static bool is_simm32(s64 value)
{
return value == (s64) (s32) value;
}
@@ -94,7 +94,7 @@ static int bpf_size_to_x86_bytes(int bpf_size)
#define X86_JGE 0x7D
#define X86_JG 0x7F
-static inline void bpf_flush_icache(void *start, void *end)
+static void bpf_flush_icache(void *start, void *end)
{
mm_segment_t old_fs = get_fs();
@@ -133,24 +133,24 @@ static const int reg2hex[] = {
* which need extra byte of encoding.
* rax,rcx,...,rbp have simpler encoding
*/
-static inline bool is_ereg(u32 reg)
+static bool is_ereg(u32 reg)
{
- if (reg == BPF_REG_5 || reg == AUX_REG ||
- (reg >= BPF_REG_7 && reg <= BPF_REG_9))
- return true;
- else
- return false;
+ return (1 << reg) & (BIT(BPF_REG_5) |
+ BIT(AUX_REG) |
+ BIT(BPF_REG_7) |
+ BIT(BPF_REG_8) |
+ BIT(BPF_REG_9));
}
/* add modifiers if 'reg' maps to x64 registers r8..r15 */
-static inline u8 add_1mod(u8 byte, u32 reg)
+static u8 add_1mod(u8 byte, u32 reg)
{
if (is_ereg(reg))
byte |= 1;
return byte;
}
-static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
+static u8 add_2mod(u8 byte, u32 r1, u32 r2)
{
if (is_ereg(r1))
byte |= 1;
@@ -160,13 +160,13 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
}
/* encode 'dst_reg' register into x64 opcode 'byte' */
-static inline u8 add_1reg(u8 byte, u32 dst_reg)
+static u8 add_1reg(u8 byte, u32 dst_reg)
{
return byte + reg2hex[dst_reg];
}
/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
-static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
+static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
{
return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
}
@@ -178,7 +178,7 @@ static void jit_fill_hole(void *area, unsigned int size)
}
struct jit_context {
- unsigned int cleanup_addr; /* epilogue code offset */
+ int cleanup_addr; /* epilogue code offset */
bool seen_ld_abs;
};
@@ -192,6 +192,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
struct bpf_insn *insn = bpf_prog->insnsi;
int insn_cnt = bpf_prog->len;
bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
+ bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i;
int proglen = 0;
@@ -854,10 +855,11 @@ common_load:
goto common_load;
case BPF_JMP | BPF_EXIT:
- if (i != insn_cnt - 1) {
+ if (seen_exit) {
jmp_offset = ctx->cleanup_addr - addrs[i];
goto emit_jmp;
}
+ seen_exit = true;
/* update cleanup_addr */
ctx->cleanup_addr = proglen;
/* mov rbx, qword ptr [rbp-X] */
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 1819a91bbb9f..c489ef2c1a39 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -23,6 +23,8 @@
#include <xen/features.h>
#include <xen/events.h>
#include <asm/xen/pci.h>
+#include <asm/xen/cpuid.h>
+#include <asm/apic.h>
#include <asm/i8259.h>
static int xen_pcifront_enable_irq(struct pci_dev *dev)
@@ -423,6 +425,28 @@ int __init pci_xen_init(void)
return 0;
}
+#ifdef CONFIG_PCI_MSI
+void __init xen_msi_init(void)
+{
+ if (!disable_apic) {
+ /*
+ * If hardware supports (x2)APIC virtualization (as indicated
+ * by hypervisor's leaf 4) then we don't need to use pirqs/
+ * event channels for MSI handling and instead use regular
+ * APIC processing
+ */
+ uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
+
+ if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
+ ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && cpu_has_apic))
+ return;
+ }
+
+ x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+}
+#endif
+
int __init pci_xen_hvm_init(void)
{
if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
@@ -437,8 +461,11 @@ int __init pci_xen_hvm_init(void)
#endif
#ifdef CONFIG_PCI_MSI
- x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
- x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+ /*
+ * We need to wait until after x2apic is initialized
+ * before we can set MSI IRQ ops.
+ */
+ x86_platform.apic_post_init = xen_msi_init;
#endif
return 0;
}
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
index 4d171e8640ef..735ba21efe91 100644
--- a/arch/x86/platform/iris/iris.c
+++ b/arch/x86/platform/iris/iris.c
@@ -86,7 +86,6 @@ static int iris_remove(struct platform_device *pdev)
static struct platform_driver iris_driver = {
.driver = {
.name = "iris",
- .owner = THIS_MODULE,
},
.probe = iris_probe,
.remove = iris_remove,
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c
index a9acde72d4ed..c5350fd27d70 100644
--- a/arch/x86/platform/olpc/olpc-xo1-pm.c
+++ b/arch/x86/platform/olpc/olpc-xo1-pm.c
@@ -170,7 +170,6 @@ static int xo1_pm_remove(struct platform_device *pdev)
static struct platform_driver cs5535_pms_driver = {
.driver = {
.name = "cs5535-pms",
- .owner = THIS_MODULE,
},
.probe = xo1_pm_probe,
.remove = xo1_pm_remove,
@@ -179,7 +178,6 @@ static struct platform_driver cs5535_pms_driver = {
static struct platform_driver cs5535_acpi_driver = {
.driver = {
.name = "olpc-xo1-pm-acpi",
- .owner = THIS_MODULE,
},
.probe = xo1_pm_probe,
.remove = xo1_pm_remove,
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index 9fe1b5d002f0..b3560ece1c9f 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -364,3 +364,4 @@
355 i386 getrandom sys_getrandom
356 i386 memfd_create sys_memfd_create
357 i386 bpf sys_bpf
+358 i386 execveat sys_execveat stub32_execveat
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 281150b539a2..8d656fbb57aa 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -328,6 +328,7 @@
319 common memfd_create sys_memfd_create
320 common kexec_file_load sys_kexec_file_load
321 common bpf sys_bpf
+322 64 execveat stub_execveat
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -366,3 +367,4 @@
542 x32 getsockopt compat_sys_getsockopt
543 x32 io_setup compat_sys_io_setup
544 x32 io_submit compat_sys_io_submit
+545 x32 execveat stub_x32_execveat
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index cc04e67bfd05..2d7d9a1f5b53 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -29,20 +29,18 @@
#endif /* CONFIG_X86_32 */
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-
-#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
-#define smp_rmb() rmb()
+#define dma_rmb() rmb()
#else /* CONFIG_X86_PPRO_FENCE */
-#define smp_rmb() barrier()
+#define dma_rmb() barrier()
#endif /* CONFIG_X86_PPRO_FENCE */
+#define dma_wmb() barrier()
-#define smp_wmb() barrier()
+#ifdef CONFIG_SMP
-#define smp_read_barrier_depends() read_barrier_depends()
+#define smp_mb() mb()
+#define smp_rmb() dma_rmb()
+#define smp_wmb() barrier()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* CONFIG_SMP */
@@ -50,11 +48,13 @@
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* CONFIG_SMP */
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index f2f0723070ca..20c3649d0691 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -31,6 +31,7 @@
#define stub_fork sys_fork
#define stub_vfork sys_vfork
#define stub_execve sys_execve
+#define stub_execveat sys_execveat
#define stub_rt_sigreturn sys_rt_sigreturn
#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 8c8298d78185..5c1f9ace7ae7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -387,7 +387,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
unsigned long mfn;
if (!xen_feature(XENFEAT_auto_translated_physmap))
- mfn = get_phys_to_machine(pfn);
+ mfn = __pfn_to_mfn(pfn);
else
mfn = pfn;
/*
@@ -1113,20 +1113,16 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
* instead of somewhere later and be confusing. */
xen_mc_flush();
}
-static void __init xen_pagetable_p2m_copy(void)
+
+static void __init xen_pagetable_p2m_free(void)
{
unsigned long size;
unsigned long addr;
- unsigned long new_mfn_list;
-
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
- new_mfn_list = xen_revector_p2m_tree();
/* No memory or already called. */
- if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
+ if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
return;
/* using __ka address and sticking INVALID_P2M_ENTRY! */
@@ -1144,8 +1140,6 @@ static void __init xen_pagetable_p2m_copy(void)
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
memblock_free(__pa(xen_start_info->mfn_list), size);
- /* And revector! Bye bye old array */
- xen_start_info->mfn_list = new_mfn_list;
/* At this stage, cleanup_highmap has already cleaned __ka space
* from _brk_limit way up to the max_pfn_mapped (which is the end of
@@ -1169,17 +1163,35 @@ static void __init xen_pagetable_p2m_copy(void)
}
#endif
-static void __init xen_pagetable_init(void)
+static void __init xen_pagetable_p2m_setup(void)
{
- paging_init();
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ xen_vmalloc_p2m_tree();
+
#ifdef CONFIG_X86_64
- xen_pagetable_p2m_copy();
+ xen_pagetable_p2m_free();
#endif
+ /* And revector! Bye bye old array */
+ xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
+}
+
+static void __init xen_pagetable_init(void)
+{
+ paging_init();
+ xen_post_allocator_init();
+
+ xen_pagetable_p2m_setup();
+
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
+ /* Remap memory freed due to conflicts with E820 map */
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ xen_remap_memory();
+
xen_setup_shared_info();
- xen_post_allocator_init();
}
static void xen_write_cr2(unsigned long cr2)
{
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b456b048eca9..edbc7a63fd73 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -3,21 +3,22 @@
* guests themselves, but it must also access and update the p2m array
* during suspend/resume when all the pages are reallocated.
*
- * The p2m table is logically a flat array, but we implement it as a
- * three-level tree to allow the address space to be sparse.
+ * The logical flat p2m table is mapped to a linear kernel memory area.
+ * For accesses by Xen a three-level tree linked via mfns only is set up to
+ * allow the address space to be sparse.
*
- * Xen
- * |
- * p2m_top p2m_top_mfn
- * / \ / \
- * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn
- * / \ / \ / /
- * p2m p2m p2m p2m p2m p2m p2m ...
+ * Xen
+ * |
+ * p2m_top_mfn
+ * / \
+ * p2m_mid_mfn p2m_mid_mfn
+ * / /
+ * p2m p2m p2m ...
*
* The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
*
- * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
- * maximum representable pseudo-physical address space is:
+ * The p2m_top_mfn level is limited to 1 page, so the maximum representable
+ * pseudo-physical address space is:
* P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
*
* P2M_PER_PAGE depends on the architecture, as a mfn is always
@@ -30,6 +31,9 @@
* leaf entries, or for the top root, or middle one, for which there is a void
* entry, we assume it is "missing". So (for example)
* pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
+ * We have a dedicated page p2m_missing with all entries being
+ * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m
+ * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns.
*
* We also have the possibility of setting 1-1 mappings on certain regions, so
* that:
@@ -39,122 +43,20 @@
* PCI BARs, or ACPI spaces), we can create mappings easily because we
* get the PFN value to match the MFN.
*
- * For this to work efficiently we have one new page p2m_identity and
- * allocate (via reserved_brk) any other pages we need to cover the sides
- * (1GB or 4MB boundary violations). All entries in p2m_identity are set to
- * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs,
- * no other fancy value).
+ * For this to work efficiently we have one new page p2m_identity. All entries
+ * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only
+ * recognizes that and MFNs, no other fancy value).
*
* On lookup we spot that the entry points to p2m_identity and return the
* identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
* If the entry points to an allocated page, we just proceed as before and
- * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
+ * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
* appropriate functions (pfn_to_mfn).
*
* The reason for having the IDENTITY_FRAME_BIT instead of just returning the
* PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
* non-identity pfn. To protect ourselves against we elect to set (and get) the
* IDENTITY_FRAME_BIT on all identity mapped PFNs.
- *
- * This simplistic diagram is used to explain the more subtle piece of code.
- * There is also a digram of the P2M at the end that can help.
- * Imagine your E820 looking as so:
- *
- * 1GB 2GB 4GB
- * /-------------------+---------\/----\ /----------\ /---+-----\
- * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM |
- * \-------------------+---------/\----/ \----------/ \---+-----/
- * ^- 1029MB ^- 2001MB
- *
- * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100),
- * 2048MB = 524288 (0x80000)]
- *
- * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB
- * is actually not present (would have to kick the balloon driver to put it in).
- *
- * When we are told to set the PFNs for identity mapping (see patch: "xen/setup:
- * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start
- * of the PFN and the end PFN (263424 and 512256 respectively). The first step
- * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page
- * covers 512^2 of page estate (1GB) and in case the start or end PFN is not
- * aligned on 512^2*PAGE_SIZE (1GB) we reserve_brk new middle and leaf pages as
- * required to split any existing p2m_mid_missing middle pages.
- *
- * With the E820 example above, 263424 is not 1GB aligned so we allocate a
- * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000.
- * Each entry in the allocate page is "missing" (points to p2m_missing).
- *
- * Next stage is to determine if we need to do a more granular boundary check
- * on the 4MB (or 2MB depending on architecture) off the start and end pfn's.
- * We check if the start pfn and end pfn violate that boundary check, and if
- * so reserve_brk a (p2m[x][y]) leaf page. This way we have a much finer
- * granularity of setting which PFNs are missing and which ones are identity.
- * In our example 263424 and 512256 both fail the check so we reserve_brk two
- * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing"
- * values) and assign them to p2m[1][2] and p2m[1][488] respectively.
- *
- * At this point we would at minimum reserve_brk one page, but could be up to
- * three. Each call to set_phys_range_identity has at maximum a three page
- * cost. If we were to query the P2M at this stage, all those entries from
- * start PFN through end PFN (so 1029MB -> 2001MB) would return
- * INVALID_P2M_ENTRY ("missing").
- *
- * The next step is to walk from the start pfn to the end pfn setting
- * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity.
- * If we find that the middle entry is pointing to p2m_missing we can swap it
- * over to p2m_identity - this way covering 4MB (or 2MB) PFN space (and
- * similarly swapping p2m_mid_missing for p2m_mid_identity for larger regions).
- * At this point we do not need to worry about boundary aligment (so no need to
- * reserve_brk a middle page, figure out which PFNs are "missing" and which
- * ones are identity), as that has been done earlier. If we find that the
- * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference
- * that page (which covers 512 PFNs) and set the appropriate PFN with
- * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we
- * set from p2m[1][2][256->511] and p2m[1][488][0->256] with
- * IDENTITY_FRAME_BIT set.
- *
- * All other regions that are void (or not filled) either point to p2m_missing
- * (considered missing) or have the default value of INVALID_P2M_ENTRY (also
- * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511]
- * contain the INVALID_P2M_ENTRY value and are considered "missing."
- *
- * Finally, the region beyond the end of of the E820 (4 GB in this example)
- * is set to be identity (in case there are MMIO regions placed here).
- *
- * This is what the p2m ends up looking (for the E820 above) with this
- * fabulous drawing:
- *
- * p2m /--------------\
- * /-----\ | &mfn_list[0],| /-----------------\
- * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. |
- * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] |
- * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] |
- * |-----| \ | [p2m_identity]+\\ | .... |
- * | 2 |--\ \-------------------->| ... | \\ \----------------/
- * |-----| \ \---------------/ \\
- * | 3 |-\ \ \\ p2m_identity [1]
- * |-----| \ \-------------------->/---------------\ /-----------------\
- * | .. |\ | | [p2m_identity]+-->| ~0, ~0, ~0, ... |
- * \-----/ | | | [p2m_identity]+-->| ..., ~0 |
- * | | | .... | \-----------------/
- * | | +-[x], ~0, ~0.. +\
- * | | \---------------/ \
- * | | \-> /---------------\
- * | V p2m_mid_missing p2m_missing | IDENTITY[@0] |
- * | /-----------------\ /------------\ | IDENTITY[@256]|
- * | | [p2m_missing] +---->| ~0, ~0, ...| | ~0, ~0, .... |
- * | | [p2m_missing] +---->| ..., ~0 | \---------------/
- * | | ... | \------------/
- * | \-----------------/
- * |
- * | p2m_mid_identity
- * | /-----------------\
- * \-->| [p2m_identity] +---->[1]
- * | [p2m_identity] +---->[1]
- * | ... |
- * \-----------------/
- *
- * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
*/
#include <linux/init.h>
@@ -164,9 +66,11 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/bootmem.h>
+#include <linux/slab.h>
#include <asm/cache.h>
#include <asm/setup.h>
+#include <asm/uaccess.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
@@ -178,31 +82,26 @@
#include "multicalls.h"
#include "xen-ops.h"
+#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE)
+
static void __init m2p_override_init(void);
+unsigned long *xen_p2m_addr __read_mostly;
+EXPORT_SYMBOL_GPL(xen_p2m_addr);
+unsigned long xen_p2m_size __read_mostly;
+EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
+EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
+
+static DEFINE_SPINLOCK(p2m_update_lock);
static unsigned long *p2m_mid_missing_mfn;
static unsigned long *p2m_top_mfn;
static unsigned long **p2m_top_mfn_p;
-
-/* Placeholders for holes in the address space */
-static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
-
-static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
-
-static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE);
-
-RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
-
-/* For each I/O range remapped we may lose up to two leaf pages for the boundary
- * violations and three mid pages to cover up to 3GB. With
- * early_can_reuse_p2m_middle() most of the leaf pages will be reused by the
- * remapped region.
- */
-RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES);
+static unsigned long *p2m_missing;
+static unsigned long *p2m_identity;
+static pte_t *p2m_missing_pte;
+static pte_t *p2m_identity_pte;
static inline unsigned p2m_top_index(unsigned long pfn)
{
@@ -220,14 +119,6 @@ static inline unsigned p2m_index(unsigned long pfn)
return pfn % P2M_PER_PAGE;
}
-static void p2m_top_init(unsigned long ***top)
-{
- unsigned i;
-
- for (i = 0; i < P2M_TOP_PER_PAGE; i++)
- top[i] = p2m_mid_missing;
-}
-
static void p2m_top_mfn_init(unsigned long *top)
{
unsigned i;
@@ -244,28 +135,43 @@ static void p2m_top_mfn_p_init(unsigned long **top)
top[i] = p2m_mid_missing_mfn;
}
-static void p2m_mid_init(unsigned long **mid, unsigned long *leaf)
+static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
{
unsigned i;
for (i = 0; i < P2M_MID_PER_PAGE; i++)
- mid[i] = leaf;
+ mid[i] = virt_to_mfn(leaf);
}
-static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
+static void p2m_init(unsigned long *p2m)
{
unsigned i;
- for (i = 0; i < P2M_MID_PER_PAGE; i++)
- mid[i] = virt_to_mfn(leaf);
+ for (i = 0; i < P2M_PER_PAGE; i++)
+ p2m[i] = INVALID_P2M_ENTRY;
}
-static void p2m_init(unsigned long *p2m)
+static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
{
unsigned i;
- for (i = 0; i < P2M_MID_PER_PAGE; i++)
- p2m[i] = INVALID_P2M_ENTRY;
+ for (i = 0; i < P2M_PER_PAGE; i++)
+ p2m[i] = IDENTITY_FRAME(pfn + i);
+}
+
+static void * __ref alloc_p2m_page(void)
+{
+ if (unlikely(!slab_is_available()))
+ return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+/* Only to be called in case of a race for a page just allocated! */
+static void free_p2m_page(void *p)
+{
+ BUG_ON(!slab_is_available());
+ free_page((unsigned long)p);
}
/*
@@ -280,40 +186,46 @@ static void p2m_init(unsigned long *p2m)
*/
void __ref xen_build_mfn_list_list(void)
{
- unsigned long pfn;
+ unsigned long pfn, mfn;
+ pte_t *ptep;
+ unsigned int level, topidx, mididx;
+ unsigned long *mid_mfn_p;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
/* Pre-initialize p2m_top_mfn to be completely missing */
if (p2m_top_mfn == NULL) {
- p2m_mid_missing_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_missing_mfn = alloc_p2m_page();
p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
- p2m_top_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn_p = alloc_p2m_page();
p2m_top_mfn_p_init(p2m_top_mfn_p);
- p2m_top_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn = alloc_p2m_page();
p2m_top_mfn_init(p2m_top_mfn);
} else {
/* Reinitialise, mfn's all change after migration */
p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
}
- for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
- unsigned long **mid;
- unsigned long *mid_mfn_p;
+ for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
+ pfn += P2M_PER_PAGE) {
+ topidx = p2m_top_index(pfn);
+ mididx = p2m_mid_index(pfn);
- mid = p2m_top[topidx];
mid_mfn_p = p2m_top_mfn_p[topidx];
+ ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
+ &level);
+ BUG_ON(!ptep || level != PG_LEVEL_4K);
+ mfn = pte_mfn(*ptep);
+ ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
/* Don't bother allocating any mfn mid levels if
* they're just missing, just update the stored mfn,
* since all could have changed over a migrate.
*/
- if (mid == p2m_mid_missing) {
+ if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
BUG_ON(mididx);
BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
@@ -322,19 +234,14 @@ void __ref xen_build_mfn_list_list(void)
}
if (mid_mfn_p == p2m_mid_missing_mfn) {
- /*
- * XXX boot-time only! We should never find
- * missing parts of the mfn tree after
- * runtime.
- */
- mid_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ mid_mfn_p = alloc_p2m_page();
p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
p2m_top_mfn_p[topidx] = mid_mfn_p;
}
p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
- mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
+ mid_mfn_p[mididx] = mfn;
}
}
@@ -353,171 +260,235 @@ void xen_setup_mfn_list_list(void)
/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
- unsigned long *mfn_list;
- unsigned long max_pfn;
unsigned long pfn;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
- mfn_list = (unsigned long *)xen_start_info->mfn_list;
- max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
- xen_max_p2m_pfn = max_pfn;
+ xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
+ xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
- p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_init(p2m_missing);
- p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_init(p2m_identity);
+ for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
+ xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
- p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_init(p2m_mid_missing, p2m_missing);
- p2m_mid_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_init(p2m_mid_identity, p2m_identity);
+ xen_max_p2m_pfn = xen_p2m_size;
+}
- p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_top_init(p2m_top);
+#define P2M_TYPE_IDENTITY 0
+#define P2M_TYPE_MISSING 1
+#define P2M_TYPE_PFN 2
+#define P2M_TYPE_UNKNOWN 3
- /*
- * The domain builder gives us a pre-constructed p2m array in
- * mfn_list for all the pages initially given to us, so we just
- * need to graft that into our tree structure.
- */
- for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
+static int xen_p2m_elem_type(unsigned long pfn)
+{
+ unsigned long mfn;
- if (p2m_top[topidx] == p2m_mid_missing) {
- unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_init(mid, p2m_missing);
+ if (pfn >= xen_p2m_size)
+ return P2M_TYPE_IDENTITY;
- p2m_top[topidx] = mid;
- }
+ mfn = xen_p2m_addr[pfn];
- /*
- * As long as the mfn_list has enough entries to completely
- * fill a p2m page, pointing into the array is ok. But if
- * not the entries beyond the last pfn will be undefined.
- */
- if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
- unsigned long p2midx;
+ if (mfn == INVALID_P2M_ENTRY)
+ return P2M_TYPE_MISSING;
- p2midx = max_pfn % P2M_PER_PAGE;
- for ( ; p2midx < P2M_PER_PAGE; p2midx++)
- mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
- }
- p2m_top[topidx][mididx] = &mfn_list[pfn];
- }
+ if (mfn & IDENTITY_FRAME_BIT)
+ return P2M_TYPE_IDENTITY;
- m2p_override_init();
+ return P2M_TYPE_PFN;
}
-#ifdef CONFIG_X86_64
-unsigned long __init xen_revector_p2m_tree(void)
+
+static void __init xen_rebuild_p2m_list(unsigned long *p2m)
{
- unsigned long va_start;
- unsigned long va_end;
+ unsigned int i, chunk;
unsigned long pfn;
- unsigned long pfn_free = 0;
- unsigned long *mfn_list = NULL;
- unsigned long size;
-
- va_start = xen_start_info->mfn_list;
- /*We copy in increments of P2M_PER_PAGE * sizeof(unsigned long),
- * so make sure it is rounded up to that */
- size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
- va_end = va_start + size;
-
- /* If we were revectored already, don't do it again. */
- if (va_start <= __START_KERNEL_map && va_start >= __PAGE_OFFSET)
- return 0;
+ unsigned long *mfns;
+ pte_t *ptep;
+ pmd_t *pmdp;
+ int type;
- mfn_list = alloc_bootmem_align(size, PAGE_SIZE);
- if (!mfn_list) {
- pr_warn("Could not allocate space for a new P2M tree!\n");
- return xen_start_info->mfn_list;
- }
- /* Fill it out with INVALID_P2M_ENTRY value */
- memset(mfn_list, 0xFF, size);
+ p2m_missing = alloc_p2m_page();
+ p2m_init(p2m_missing);
+ p2m_identity = alloc_p2m_page();
+ p2m_init(p2m_identity);
- for (pfn = 0; pfn < ALIGN(MAX_DOMAIN_PAGES, P2M_PER_PAGE); pfn += P2M_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx;
- unsigned long *mid_p;
+ p2m_missing_pte = alloc_p2m_page();
+ paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
+ p2m_identity_pte = alloc_p2m_page();
+ paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ set_pte(p2m_missing_pte + i,
+ pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
+ set_pte(p2m_identity_pte + i,
+ pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
+ }
- if (!p2m_top[topidx])
+ for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
+ /*
+ * Try to map missing/identity PMDs or p2m-pages if possible.
+ * We have to respect the structure of the mfn_list_list
+ * which will be built just afterwards.
+ * Chunk size to test is one p2m page if we are in the middle
+ * of a mfn_list_list mid page and the complete mid page area
+ * if we are at index 0 of the mid page. Please note that a
+ * mid page might cover more than one PMD, e.g. on 32 bit PAE
+ * kernels.
+ */
+ chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
+ P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;
+
+ type = xen_p2m_elem_type(pfn);
+ i = 0;
+ if (type != P2M_TYPE_PFN)
+ for (i = 1; i < chunk; i++)
+ if (xen_p2m_elem_type(pfn + i) != type)
+ break;
+ if (i < chunk)
+ /* Reset to minimal chunk size. */
+ chunk = P2M_PER_PAGE;
+
+ if (type == P2M_TYPE_PFN || i < chunk) {
+ /* Use initial p2m page contents. */
+#ifdef CONFIG_X86_64
+ mfns = alloc_p2m_page();
+ copy_page(mfns, xen_p2m_addr + pfn);
+#else
+ mfns = xen_p2m_addr + pfn;
+#endif
+ ptep = populate_extra_pte((unsigned long)(p2m + pfn));
+ set_pte(ptep,
+ pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
continue;
+ }
- if (p2m_top[topidx] == p2m_mid_missing)
+ if (chunk == P2M_PER_PAGE) {
+ /* Map complete missing or identity p2m-page. */
+ mfns = (type == P2M_TYPE_MISSING) ?
+ p2m_missing : p2m_identity;
+ ptep = populate_extra_pte((unsigned long)(p2m + pfn));
+ set_pte(ptep,
+ pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
continue;
+ }
- mididx = p2m_mid_index(pfn);
- mid_p = p2m_top[topidx][mididx];
- if (!mid_p)
- continue;
- if ((mid_p == p2m_missing) || (mid_p == p2m_identity))
- continue;
+ /* Complete missing or identity PMD(s) can be mapped. */
+ ptep = (type == P2M_TYPE_MISSING) ?
+ p2m_missing_pte : p2m_identity_pte;
+ for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
+ pmdp = populate_extra_pmd(
+ (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+ set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
+ }
+ }
+}
- if ((unsigned long)mid_p == INVALID_P2M_ENTRY)
- continue;
+void __init xen_vmalloc_p2m_tree(void)
+{
+ static struct vm_struct vm;
- /* The old va. Rebase it on mfn_list */
- if (mid_p >= (unsigned long *)va_start && mid_p <= (unsigned long *)va_end) {
- unsigned long *new;
+ vm.flags = VM_ALLOC;
+ vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
+ PMD_SIZE * PMDS_PER_MID_PAGE);
+ vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
+ pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
- if (pfn_free > (size / sizeof(unsigned long))) {
- WARN(1, "Only allocated for %ld pages, but we want %ld!\n",
- size / sizeof(unsigned long), pfn_free);
- return 0;
- }
- new = &mfn_list[pfn_free];
+ xen_max_p2m_pfn = vm.size / sizeof(unsigned long);
- copy_page(new, mid_p);
- p2m_top[topidx][mididx] = &mfn_list[pfn_free];
+ xen_rebuild_p2m_list(vm.addr);
- pfn_free += P2M_PER_PAGE;
+ xen_p2m_addr = vm.addr;
+ xen_p2m_size = xen_max_p2m_pfn;
- }
- /* This should be the leafs allocated for identity from _brk. */
- }
- return (unsigned long)mfn_list;
+ xen_inv_extra_mem();
+ m2p_override_init();
}
-#else
-unsigned long __init xen_revector_p2m_tree(void)
-{
- return 0;
-}
-#endif
+
unsigned long get_phys_to_machine(unsigned long pfn)
{
- unsigned topidx, mididx, idx;
+ pte_t *ptep;
+ unsigned int level;
+
+ if (unlikely(pfn >= xen_p2m_size)) {
+ if (pfn < xen_max_p2m_pfn)
+ return xen_chk_extra_mem(pfn);
- if (unlikely(pfn >= MAX_P2M_PFN))
return IDENTITY_FRAME(pfn);
+ }
- topidx = p2m_top_index(pfn);
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
+ ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
+ BUG_ON(!ptep || level != PG_LEVEL_4K);
/*
* The INVALID_P2M_ENTRY is filled in both p2m_*identity
* and in p2m_*missing, so returning the INVALID_P2M_ENTRY
* would be wrong.
*/
- if (p2m_top[topidx][mididx] == p2m_identity)
+ if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
return IDENTITY_FRAME(pfn);
- return p2m_top[topidx][mididx][idx];
+ return xen_p2m_addr[pfn];
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);
-static void *alloc_p2m_page(void)
+/*
+ * Allocate new pmd(s). It is checked whether the old pmd is still in place.
+ * If not, nothing is changed. This is okay as the only reason for allocating
+ * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
+ * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
+ */
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
{
- return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
-}
+ pte_t *ptechk;
+ pte_t *pteret = ptep;
+ pte_t *pte_newpg[PMDS_PER_MID_PAGE];
+ pmd_t *pmdp;
+ unsigned int level;
+ unsigned long flags;
+ unsigned long vaddr;
+ int i;
-static void free_p2m_page(void *p)
-{
- free_page((unsigned long)p);
+ /* Do all allocations first to bail out in error case. */
+ for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
+ pte_newpg[i] = alloc_p2m_page();
+ if (!pte_newpg[i]) {
+ for (i--; i >= 0; i--)
+ free_p2m_page(pte_newpg[i]);
+
+ return NULL;
+ }
+ }
+
+ vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);
+
+ for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
+ copy_page(pte_newpg[i], pte_pg);
+ paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);
+
+ pmdp = lookup_pmd_address(vaddr);
+ BUG_ON(!pmdp);
+
+ spin_lock_irqsave(&p2m_update_lock, flags);
+
+ ptechk = lookup_address(vaddr, &level);
+ if (ptechk == pte_pg) {
+ set_pmd(pmdp,
+ __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
+ if (vaddr == (addr & ~(PMD_SIZE - 1)))
+ pteret = pte_offset_kernel(pmdp, addr);
+ pte_newpg[i] = NULL;
+ }
+
+ spin_unlock_irqrestore(&p2m_update_lock, flags);
+
+ if (pte_newpg[i]) {
+ paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
+ free_p2m_page(pte_newpg[i]);
+ }
+
+ vaddr += PMD_SIZE;
+ }
+
+ return pteret;
}
/*
@@ -530,58 +501,62 @@ static void free_p2m_page(void *p)
static bool alloc_p2m(unsigned long pfn)
{
unsigned topidx, mididx;
- unsigned long ***top_p, **mid;
unsigned long *top_mfn_p, *mid_mfn;
- unsigned long *p2m_orig;
+ pte_t *ptep, *pte_pg;
+ unsigned int level;
+ unsigned long flags;
+ unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
+ unsigned long p2m_pfn;
topidx = p2m_top_index(pfn);
mididx = p2m_mid_index(pfn);
- top_p = &p2m_top[topidx];
- mid = ACCESS_ONCE(*top_p);
+ ptep = lookup_address(addr, &level);
+ BUG_ON(!ptep || level != PG_LEVEL_4K);
+ pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
- if (mid == p2m_mid_missing) {
- /* Mid level is missing, allocate a new one */
- mid = alloc_p2m_page();
- if (!mid)
+ if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
+ /* PMD level is missing, allocate a new one */
+ ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+ if (!ptep)
return false;
-
- p2m_mid_init(mid, p2m_missing);
-
- if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
- free_p2m_page(mid);
}
- top_mfn_p = &p2m_top_mfn[topidx];
- mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
+ if (p2m_top_mfn) {
+ top_mfn_p = &p2m_top_mfn[topidx];
+ mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
- BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
+ BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
- if (mid_mfn == p2m_mid_missing_mfn) {
- /* Separately check the mid mfn level */
- unsigned long missing_mfn;
- unsigned long mid_mfn_mfn;
- unsigned long old_mfn;
+ if (mid_mfn == p2m_mid_missing_mfn) {
+ /* Separately check the mid mfn level */
+ unsigned long missing_mfn;
+ unsigned long mid_mfn_mfn;
+ unsigned long old_mfn;
- mid_mfn = alloc_p2m_page();
- if (!mid_mfn)
- return false;
+ mid_mfn = alloc_p2m_page();
+ if (!mid_mfn)
+ return false;
- p2m_mid_mfn_init(mid_mfn, p2m_missing);
+ p2m_mid_mfn_init(mid_mfn, p2m_missing);
- missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
- mid_mfn_mfn = virt_to_mfn(mid_mfn);
- old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
- if (old_mfn != missing_mfn) {
- free_p2m_page(mid_mfn);
- mid_mfn = mfn_to_virt(old_mfn);
- } else {
- p2m_top_mfn_p[topidx] = mid_mfn;
+ missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+ mid_mfn_mfn = virt_to_mfn(mid_mfn);
+ old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
+ if (old_mfn != missing_mfn) {
+ free_p2m_page(mid_mfn);
+ mid_mfn = mfn_to_virt(old_mfn);
+ } else {
+ p2m_top_mfn_p[topidx] = mid_mfn;
+ }
}
+ } else {
+ mid_mfn = NULL;
}
- p2m_orig = ACCESS_ONCE(p2m_top[topidx][mididx]);
- if (p2m_orig == p2m_identity || p2m_orig == p2m_missing) {
+ p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep));
+ if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
+ p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
/* p2m leaf page is missing */
unsigned long *p2m;
@@ -589,183 +564,36 @@ static bool alloc_p2m(unsigned long pfn)
if (!p2m)
return false;
- p2m_init(p2m);
-
- if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
- free_p2m_page(p2m);
+ if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
+ p2m_init(p2m);
else
- mid_mfn[mididx] = virt_to_mfn(p2m);
- }
-
- return true;
-}
-
-static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
-{
- unsigned topidx, mididx, idx;
- unsigned long *p2m;
-
- topidx = p2m_top_index(pfn);
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
-
- /* Pfff.. No boundary cross-over, lets get out. */
- if (!idx && check_boundary)
- return false;
-
- WARN(p2m_top[topidx][mididx] == p2m_identity,
- "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
- topidx, mididx);
-
- /*
- * Could be done by xen_build_dynamic_phys_to_machine..
- */
- if (p2m_top[topidx][mididx] != p2m_missing)
- return false;
-
- /* Boundary cross-over for the edges: */
- p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_init(p2m);
+ p2m_init_identity(p2m, pfn);
- p2m_top[topidx][mididx] = p2m;
+ spin_lock_irqsave(&p2m_update_lock, flags);
- return true;
-}
-
-static bool __init early_alloc_p2m_middle(unsigned long pfn)
-{
- unsigned topidx = p2m_top_index(pfn);
- unsigned long **mid;
-
- mid = p2m_top[topidx];
- if (mid == p2m_mid_missing) {
- mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_mid_init(mid, p2m_missing);
-
- p2m_top[topidx] = mid;
- }
- return true;
-}
-
-/*
- * Skim over the P2M tree looking at pages that are either filled with
- * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
- * replace the P2M leaf with a p2m_missing or p2m_identity.
- * Stick the old page in the new P2M tree location.
- */
-static bool __init early_can_reuse_p2m_middle(unsigned long set_pfn)
-{
- unsigned topidx;
- unsigned mididx;
- unsigned ident_pfns;
- unsigned inv_pfns;
- unsigned long *p2m;
- unsigned idx;
- unsigned long pfn;
-
- /* We only look when this entails a P2M middle layer */
- if (p2m_index(set_pfn))
- return false;
-
- for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
- topidx = p2m_top_index(pfn);
-
- if (!p2m_top[topidx])
- continue;
-
- if (p2m_top[topidx] == p2m_mid_missing)
- continue;
-
- mididx = p2m_mid_index(pfn);
- p2m = p2m_top[topidx][mididx];
- if (!p2m)
- continue;
-
- if ((p2m == p2m_missing) || (p2m == p2m_identity))
- continue;
-
- if ((unsigned long)p2m == INVALID_P2M_ENTRY)
- continue;
-
- ident_pfns = 0;
- inv_pfns = 0;
- for (idx = 0; idx < P2M_PER_PAGE; idx++) {
- /* IDENTITY_PFNs are 1:1 */
- if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
- ident_pfns++;
- else if (p2m[idx] == INVALID_P2M_ENTRY)
- inv_pfns++;
- else
- break;
+ if (pte_pfn(*ptep) == p2m_pfn) {
+ set_pte(ptep,
+ pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
+ if (mid_mfn)
+ mid_mfn[mididx] = virt_to_mfn(p2m);
+ p2m = NULL;
}
- if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
- goto found;
- }
- return false;
-found:
- /* Found one, replace old with p2m_identity or p2m_missing */
- p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
-
- /* Reset where we want to stick the old page in. */
- topidx = p2m_top_index(set_pfn);
- mididx = p2m_mid_index(set_pfn);
-
- /* This shouldn't happen */
- if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
- early_alloc_p2m_middle(set_pfn);
-
- if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
- return false;
-
- p2m_init(p2m);
- p2m_top[topidx][mididx] = p2m;
- return true;
-}
-bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
- if (!early_alloc_p2m_middle(pfn))
- return false;
-
- if (early_can_reuse_p2m_middle(pfn))
- return __set_phys_to_machine(pfn, mfn);
-
- if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/))
- return false;
+ spin_unlock_irqrestore(&p2m_update_lock, flags);
- if (!__set_phys_to_machine(pfn, mfn))
- return false;
+ if (p2m)
+ free_p2m_page(p2m);
}
return true;
}
-static void __init early_split_p2m(unsigned long pfn)
-{
- unsigned long mididx, idx;
-
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
-
- /*
- * Allocate new middle and leaf pages if this pfn lies in the
- * middle of one.
- */
- if (mididx || idx)
- early_alloc_p2m_middle(pfn);
- if (idx)
- early_alloc_p2m(pfn, false);
-}
-
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e)
{
unsigned long pfn;
- if (unlikely(pfn_s >= MAX_P2M_PFN))
+ if (unlikely(pfn_s >= xen_p2m_size))
return 0;
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
@@ -774,101 +602,51 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
if (pfn_s > pfn_e)
return 0;
- if (pfn_e > MAX_P2M_PFN)
- pfn_e = MAX_P2M_PFN;
-
- early_split_p2m(pfn_s);
- early_split_p2m(pfn_e);
-
- for (pfn = pfn_s; pfn < pfn_e;) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
-
- if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
- break;
- pfn++;
-
- /*
- * If the PFN was set to a middle or leaf identity
- * page the remainder must also be identity, so skip
- * ahead to the next middle or leaf entry.
- */
- if (p2m_top[topidx] == p2m_mid_identity)
- pfn = ALIGN(pfn, P2M_MID_PER_PAGE * P2M_PER_PAGE);
- else if (p2m_top[topidx][mididx] == p2m_identity)
- pfn = ALIGN(pfn, P2M_PER_PAGE);
- }
+ if (pfn_e > xen_p2m_size)
+ pfn_e = xen_p2m_size;
- WARN((pfn - pfn_s) != (pfn_e - pfn_s),
- "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
- (pfn_e - pfn_s) - (pfn - pfn_s));
+ for (pfn = pfn_s; pfn < pfn_e; pfn++)
+ xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
return pfn - pfn_s;
}
-/* Try to install p2m mapping; fail if intermediate bits missing */
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
- unsigned topidx, mididx, idx;
+ pte_t *ptep;
+ unsigned int level;
/* don't track P2M changes in autotranslate guests */
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
return true;
- if (unlikely(pfn >= MAX_P2M_PFN)) {
+ if (unlikely(pfn >= xen_p2m_size)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
}
- topidx = p2m_top_index(pfn);
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
-
- /* For sparse holes were the p2m leaf has real PFN along with
- * PCI holes, stick in the PFN as the MFN value.
- *
- * set_phys_range_identity() will have allocated new middle
- * and leaf pages as required so an existing p2m_mid_missing
- * or p2m_missing mean that whole range will be identity so
- * these can be switched to p2m_mid_identity or p2m_identity.
- */
- if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
- if (p2m_top[topidx] == p2m_mid_identity)
- return true;
-
- if (p2m_top[topidx] == p2m_mid_missing) {
- WARN_ON(cmpxchg(&p2m_top[topidx], p2m_mid_missing,
- p2m_mid_identity) != p2m_mid_missing);
- return true;
- }
-
- if (p2m_top[topidx][mididx] == p2m_identity)
- return true;
+ if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
+ return true;
- /* Swap over from MISSING to IDENTITY if needed. */
- if (p2m_top[topidx][mididx] == p2m_missing) {
- WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing,
- p2m_identity) != p2m_missing);
- return true;
- }
- }
+ ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
+ BUG_ON(!ptep || level != PG_LEVEL_4K);
- if (p2m_top[topidx][mididx] == p2m_missing)
+ if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
return mfn == INVALID_P2M_ENTRY;
- p2m_top[topidx][mididx][idx] = mfn;
+ if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
+ return mfn == IDENTITY_FRAME(pfn);
- return true;
+ return false;
}
bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
- if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
+ if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
if (!alloc_p2m(pfn))
return false;
- if (!__set_phys_to_machine(pfn, mfn))
- return false;
+ return __set_phys_to_machine(pfn, mfn);
}
return true;
@@ -877,15 +655,16 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
#define M2P_OVERRIDE_HASH_SHIFT 10
#define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT)
-static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH);
+static struct list_head *m2p_overrides;
static DEFINE_SPINLOCK(m2p_override_lock);
static void __init m2p_override_init(void)
{
unsigned i;
- m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
- sizeof(unsigned long));
+ m2p_overrides = alloc_bootmem_align(
+ sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
+ sizeof(unsigned long));
for (i = 0; i < M2P_OVERRIDE_HASH; i++)
INIT_LIST_HEAD(&m2p_overrides[i]);
@@ -896,68 +675,9 @@ static unsigned long mfn_hash(unsigned long mfn)
return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
}
-int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
-{
- int i, ret = 0;
- bool lazy = false;
- pte_t *pte;
-
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
- if (kmap_ops &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
- arch_enter_lazy_mmu_mode();
- lazy = true;
- }
-
- for (i = 0; i < count; i++) {
- unsigned long mfn, pfn;
-
- /* Do not add to override if the map failed. */
- if (map_ops[i].status)
- continue;
-
- if (map_ops[i].flags & GNTMAP_contains_pte) {
- pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
- (map_ops[i].host_addr & ~PAGE_MASK));
- mfn = pte_mfn(*pte);
- } else {
- mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
- }
- pfn = page_to_pfn(pages[i]);
-
- WARN_ON(PagePrivate(pages[i]));
- SetPagePrivate(pages[i]);
- set_page_private(pages[i], mfn);
- pages[i]->index = pfn_to_mfn(pfn);
-
- if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (kmap_ops) {
- ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
- if (ret)
- goto out;
- }
- }
-
-out:
- if (lazy)
- arch_leave_lazy_mmu_mode();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
-
/* Add an MFN override for a particular page */
-int m2p_add_override(unsigned long mfn, struct page *page,
- struct gnttab_map_grant_ref *kmap_op)
+static int m2p_add_override(unsigned long mfn, struct page *page,
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long pfn;
@@ -970,7 +690,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
address = (unsigned long)__va(pfn << PAGE_SHIFT);
ptep = lookup_address(address, &level);
if (WARN(ptep == NULL || level != PG_LEVEL_4K,
- "m2p_add_override: pfn %lx not mapped", pfn))
+ "m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
@@ -1004,19 +724,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
* because mfn_to_pfn (that ends up being called by GUPF) will
* return the backend pfn rather than the frontend pfn. */
pfn = mfn_to_pfn_no_overrides(mfn);
- if (get_phys_to_machine(pfn) == mfn)
+ if (__pfn_to_mfn(pfn) == mfn)
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
return 0;
}
-EXPORT_SYMBOL_GPL(m2p_add_override);
-int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
+int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
int i, ret = 0;
bool lazy = false;
+ pte_t *pte;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
@@ -1029,35 +749,75 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
}
for (i = 0; i < count; i++) {
- unsigned long mfn = get_phys_to_machine(page_to_pfn(pages[i]));
- unsigned long pfn = page_to_pfn(pages[i]);
+ unsigned long mfn, pfn;
- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
- ret = -EINVAL;
- goto out;
+ /* Do not add to override if the map failed. */
+ if (map_ops[i].status)
+ continue;
+
+ if (map_ops[i].flags & GNTMAP_contains_pte) {
+ pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
+ (map_ops[i].host_addr & ~PAGE_MASK));
+ mfn = pte_mfn(*pte);
+ } else {
+ mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
+ pfn = page_to_pfn(pages[i]);
- set_page_private(pages[i], INVALID_P2M_ENTRY);
- WARN_ON(!PagePrivate(pages[i]));
- ClearPagePrivate(pages[i]);
- set_phys_to_machine(pfn, pages[i]->index);
+ WARN_ON(PagePrivate(pages[i]));
+ SetPagePrivate(pages[i]);
+ set_page_private(pages[i], mfn);
+ pages[i]->index = pfn_to_mfn(pfn);
- if (kmap_ops)
- ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
- if (ret)
+ if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
+ ret = -ENOMEM;
goto out;
+ }
+
+ if (kmap_ops) {
+ ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
+ if (ret)
+ goto out;
+ }
}
out:
if (lazy)
arch_leave_lazy_mmu_mode();
+
return ret;
}
-EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
+EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
-int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn)
+static struct page *m2p_find_override(unsigned long mfn)
+{
+ unsigned long flags;
+ struct list_head *bucket;
+ struct page *p, *ret;
+
+ if (unlikely(!m2p_overrides))
+ return NULL;
+
+ ret = NULL;
+ bucket = &m2p_overrides[mfn_hash(mfn)];
+
+ spin_lock_irqsave(&m2p_override_lock, flags);
+
+ list_for_each_entry(p, bucket, lru) {
+ if (page_private(p) == mfn) {
+ ret = p;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&m2p_override_lock, flags);
+
+ return ret;
+}
+
+static int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op,
+ unsigned long mfn)
{
unsigned long flags;
unsigned long pfn;
@@ -1072,7 +832,7 @@ int m2p_remove_override(struct page *page,
ptep = lookup_address(address, &level);
if (WARN(ptep == NULL || level != PG_LEVEL_4K,
- "m2p_remove_override: pfn %lx not mapped", pfn))
+ "m2p_remove_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
@@ -1102,9 +862,8 @@ int m2p_remove_override(struct page *page,
* hypercall actually returned an error.
*/
if (kmap_op->handle == GNTST_general_error) {
- printk(KERN_WARNING "m2p_remove_override: "
- "pfn %lx mfn %lx, failed to modify kernel mappings",
- pfn, mfn);
+ pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
+ pfn, mfn);
put_balloon_scratch_page();
return -1;
}
@@ -1112,14 +871,14 @@ int m2p_remove_override(struct page *page,
xen_mc_batch();
mcs = __xen_mc_entry(
- sizeof(struct gnttab_unmap_and_replace));
+ sizeof(struct gnttab_unmap_and_replace));
unmap_op = mcs.args;
unmap_op->host_addr = kmap_op->host_addr;
unmap_op->new_addr = scratch_page_address;
unmap_op->handle = kmap_op->handle;
MULTI_grant_table_op(mcs.mc,
- GNTTABOP_unmap_and_replace, unmap_op, 1);
+ GNTTABOP_unmap_and_replace, unmap_op, 1);
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, scratch_page_address,
@@ -1145,35 +904,56 @@ int m2p_remove_override(struct page *page,
* pfn again. */
mfn &= ~FOREIGN_FRAME_BIT;
pfn = mfn_to_pfn_no_overrides(mfn);
- if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+ if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
m2p_find_override(mfn) == NULL)
set_phys_to_machine(pfn, mfn);
return 0;
}
-EXPORT_SYMBOL_GPL(m2p_remove_override);
-struct page *m2p_find_override(unsigned long mfn)
+int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
- unsigned long flags;
- struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)];
- struct page *p, *ret;
+ int i, ret = 0;
+ bool lazy = false;
- ret = NULL;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
- spin_lock_irqsave(&m2p_override_lock, flags);
+ if (kmap_ops &&
+ !in_interrupt() &&
+ paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
- list_for_each_entry(p, bucket, lru) {
- if (page_private(p) == mfn) {
- ret = p;
- break;
+ for (i = 0; i < count; i++) {
+ unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
+ unsigned long pfn = page_to_pfn(pages[i]);
+
+ if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
+ ret = -EINVAL;
+ goto out;
}
- }
- spin_unlock_irqrestore(&m2p_override_lock, flags);
+ set_page_private(pages[i], INVALID_P2M_ENTRY);
+ WARN_ON(!PagePrivate(pages[i]));
+ ClearPagePrivate(pages[i]);
+ set_phys_to_machine(pfn, pages[i]->index);
+
+ if (kmap_ops)
+ ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
+ if (ret)
+ goto out;
+ }
+out:
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
return ret;
}
+EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
{
@@ -1192,79 +972,29 @@ EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
#include "debugfs.h"
static int p2m_dump_show(struct seq_file *m, void *v)
{
- static const char * const level_name[] = { "top", "middle",
- "entry", "abnormal", "error"};
-#define TYPE_IDENTITY 0
-#define TYPE_MISSING 1
-#define TYPE_PFN 2
-#define TYPE_UNKNOWN 3
static const char * const type_name[] = {
- [TYPE_IDENTITY] = "identity",
- [TYPE_MISSING] = "missing",
- [TYPE_PFN] = "pfn",
- [TYPE_UNKNOWN] = "abnormal"};
- unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
- unsigned int uninitialized_var(prev_level);
- unsigned int uninitialized_var(prev_type);
-
- if (!p2m_top)
- return 0;
-
- for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
- unsigned idx = p2m_index(pfn);
- unsigned lvl, type;
-
- lvl = 4;
- type = TYPE_UNKNOWN;
- if (p2m_top[topidx] == p2m_mid_missing) {
- lvl = 0; type = TYPE_MISSING;
- } else if (p2m_top[topidx] == NULL) {
- lvl = 0; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx] == NULL) {
- lvl = 1; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx] == p2m_identity) {
- lvl = 1; type = TYPE_IDENTITY;
- } else if (p2m_top[topidx][mididx] == p2m_missing) {
- lvl = 1; type = TYPE_MISSING;
- } else if (p2m_top[topidx][mididx][idx] == 0) {
- lvl = 2; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) {
- lvl = 2; type = TYPE_IDENTITY;
- } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) {
- lvl = 2; type = TYPE_MISSING;
- } else if (p2m_top[topidx][mididx][idx] == pfn) {
- lvl = 2; type = TYPE_PFN;
- } else if (p2m_top[topidx][mididx][idx] != pfn) {
- lvl = 2; type = TYPE_PFN;
- }
- if (pfn == 0) {
- prev_level = lvl;
- prev_type = type;
- }
- if (pfn == MAX_DOMAIN_PAGES-1) {
- lvl = 3;
- type = TYPE_UNKNOWN;
- }
- if (prev_type != type) {
- seq_printf(m, " [0x%lx->0x%lx] %s\n",
- prev_pfn_type, pfn, type_name[prev_type]);
- prev_pfn_type = pfn;
+ [P2M_TYPE_IDENTITY] = "identity",
+ [P2M_TYPE_MISSING] = "missing",
+ [P2M_TYPE_PFN] = "pfn",
+ [P2M_TYPE_UNKNOWN] = "abnormal"};
+ unsigned long pfn, first_pfn;
+ int type, prev_type;
+
+ prev_type = xen_p2m_elem_type(0);
+ first_pfn = 0;
+
+ for (pfn = 0; pfn < xen_p2m_size; pfn++) {
+ type = xen_p2m_elem_type(pfn);
+ if (type != prev_type) {
+ seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
+ type_name[prev_type]);
prev_type = type;
- }
- if (prev_level != lvl) {
- seq_printf(m, " [0x%lx->0x%lx] level %s\n",
- prev_pfn_level, pfn, level_name[prev_level]);
- prev_pfn_level = pfn;
- prev_level = lvl;
+ first_pfn = pfn;
}
}
+ seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
+ type_name[prev_type]);
return 0;
-#undef TYPE_IDENTITY
-#undef TYPE_MISSING
-#undef TYPE_PFN
-#undef TYPE_UNKNOWN
}
static int p2m_dump_open(struct inode *inode, struct file *filp)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 29834b3fd87f..dfd77dec8e2b 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -30,6 +30,7 @@
#include "xen-ops.h"
#include "vdso.h"
#include "p2m.h"
+#include "mmu.h"
/* These are code, but not functions. Defined in entry.S */
extern const char xen_hypervisor_callback[];
@@ -47,8 +48,19 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
-/* Buffer used to remap identity mapped pages */
-unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata;
+/*
+ * Buffer used to remap identity mapped pages. We only need the virtual space.
+ * The physical page behind this address is remapped as needed to different
+ * buffer pages.
+ */
+#define REMAP_SIZE (P2M_PER_PAGE - 3)
+static struct {
+ unsigned long next_area_mfn;
+ unsigned long target_pfn;
+ unsigned long size;
+ unsigned long mfns[REMAP_SIZE];
+} xen_remap_buf __initdata __aligned(PAGE_SIZE);
+static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
/*
* The maximum amount of extra memory compared to the base size. The
@@ -64,7 +76,6 @@ unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata;
static void __init xen_add_extra_mem(u64 start, u64 size)
{
- unsigned long pfn;
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
@@ -84,75 +95,76 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
memblock_reserve(start, size);
+}
- xen_max_p2m_pfn = PFN_DOWN(start + size);
- for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
- unsigned long mfn = pfn_to_mfn(pfn);
-
- if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
- continue;
- WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
- pfn, mfn);
+static void __init xen_del_extra_mem(u64 start, u64 size)
+{
+ int i;
+ u64 start_r, size_r;
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ start_r = xen_extra_mem[i].start;
+ size_r = xen_extra_mem[i].size;
+
+ /* Start of region. */
+ if (start_r == start) {
+ BUG_ON(size > size_r);
+ xen_extra_mem[i].start += size;
+ xen_extra_mem[i].size -= size;
+ break;
+ }
+ /* End of region. */
+ if (start_r + size_r == start + size) {
+ BUG_ON(size > size_r);
+ xen_extra_mem[i].size -= size;
+ break;
+ }
+ /* Mid of region. */
+ if (start > start_r && start < start_r + size_r) {
+ BUG_ON(start + size > start_r + size_r);
+ xen_extra_mem[i].size = start - start_r;
+ /* Calling memblock_reserve() again is okay. */
+ xen_add_extra_mem(start + size, start_r + size_r -
+ (start + size));
+ break;
+ }
}
+ memblock_free(start, size);
}
-static unsigned long __init xen_do_chunk(unsigned long start,
- unsigned long end, bool release)
+/*
+ * Called during boot before the p2m list can take entries beyond the
+ * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
+ * invalid.
+ */
+unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
- unsigned long len = 0;
- unsigned long pfn;
- int ret;
+ int i;
+ unsigned long addr = PFN_PHYS(pfn);
- for (pfn = start; pfn < end; pfn++) {
- unsigned long frame;
- unsigned long mfn = pfn_to_mfn(pfn);
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ if (addr >= xen_extra_mem[i].start &&
+ addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
+ return INVALID_P2M_ENTRY;
+ }
- if (release) {
- /* Make sure pfn exists to start with */
- if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
- continue;
- frame = mfn;
- } else {
- if (mfn != INVALID_P2M_ENTRY)
- continue;
- frame = pfn;
- }
- set_xen_guest_handle(reservation.extent_start, &frame);
- reservation.nr_extents = 1;
+ return IDENTITY_FRAME(pfn);
+}
- ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
- &reservation);
- WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
- release ? "release" : "populate", pfn, ret);
+/*
+ * Mark all pfns of extra mem as invalid in p2m list.
+ */
+void __init xen_inv_extra_mem(void)
+{
+ unsigned long pfn, pfn_s, pfn_e;
+ int i;
- if (ret == 1) {
- if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
- if (release)
- break;
- set_xen_guest_handle(reservation.extent_start, &frame);
- reservation.nr_extents = 1;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
- &reservation);
- break;
- }
- len++;
- } else
- break;
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ pfn_s = PFN_DOWN(xen_extra_mem[i].start);
+ pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
+ for (pfn = pfn_s; pfn < pfn_e; pfn++)
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
- if (len)
- printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
- release ? "Freeing" : "Populating",
- start, end, len,
- release ? "freed" : "added");
-
- return len;
}
/*
@@ -198,26 +210,62 @@ static unsigned long __init xen_find_pfn_range(
return done;
}
+static int __init xen_free_mfn(unsigned long mfn)
+{
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = 0,
+ .domid = DOMID_SELF
+ };
+
+ set_xen_guest_handle(reservation.extent_start, &mfn);
+ reservation.nr_extents = 1;
+
+ return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+}
+
/*
- * This releases a chunk of memory and then does the identity map. It's used as
+ * This releases a chunk of memory and then does the identity map. It's used
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
unsigned long *released)
{
+ unsigned long len = 0;
+ unsigned long pfn, end;
+ int ret;
+
WARN_ON(start_pfn > end_pfn);
+ end = min(end_pfn, nr_pages);
+ for (pfn = start_pfn; pfn < end; pfn++) {
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+ /* Make sure pfn exists to start with */
+ if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
+ continue;
+
+ ret = xen_free_mfn(mfn);
+ WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
+
+ if (ret == 1) {
+ if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
+ break;
+ len++;
+ } else
+ break;
+ }
+
/* Need to release pages first */
- *released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true);
+ *released += len;
*identity += set_phys_range_identity(start_pfn, end_pfn);
}
/*
- * Helper function to update both the p2m and m2p tables.
+ * Helper function to update the p2m and m2p tables and kernel mapping.
*/
-static unsigned long __init xen_update_mem_tables(unsigned long pfn,
- unsigned long mfn)
+static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
{
struct mmu_update update = {
.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
@@ -225,161 +273,88 @@ static unsigned long __init xen_update_mem_tables(unsigned long pfn,
};
/* Update p2m */
- if (!early_set_phys_to_machine(pfn, mfn)) {
+ if (!set_phys_to_machine(pfn, mfn)) {
WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
pfn, mfn);
- return false;
+ BUG();
}
/* Update m2p */
if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
mfn, pfn);
- return false;
+ BUG();
}
- return true;
+ /* Update kernel mapping, but not for highmem. */
+ if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+ return;
+
+ if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(mfn, PAGE_KERNEL), 0)) {
+ WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
+ mfn, pfn);
+ BUG();
+ }
}
/*
* This function updates the p2m and m2p tables with an identity map from
- * start_pfn to start_pfn+size and remaps the underlying RAM of the original
- * allocation at remap_pfn. It must do so carefully in P2M_PER_PAGE sized blocks
- * to not exhaust the reserved brk space. Doing it in properly aligned blocks
- * ensures we only allocate the minimum required leaf pages in the p2m table. It
- * copies the existing mfns from the p2m table under the 1:1 map, overwrites
- * them with the identity map and then updates the p2m and m2p tables with the
- * remapped memory.
+ * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
+ * original allocation at remap_pfn. The information needed for remapping is
+ * saved in the memory itself to avoid the need for allocating buffers. The
+ * complete remap information is contained in a list of MFNs each containing
+ * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
+ * This enables us to preserve the original mfn sequence while doing the
+ * remapping at a time when the memory management is capable of allocating
+ * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
+ * its callers.
*/
-static unsigned long __init xen_do_set_identity_and_remap_chunk(
+static void __init xen_do_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
{
+ unsigned long buf = (unsigned long)&xen_remap_buf;
+ unsigned long mfn_save, mfn;
unsigned long ident_pfn_iter, remap_pfn_iter;
- unsigned long ident_start_pfn_align, remap_start_pfn_align;
- unsigned long ident_end_pfn_align, remap_end_pfn_align;
- unsigned long ident_boundary_pfn, remap_boundary_pfn;
- unsigned long ident_cnt = 0;
- unsigned long remap_cnt = 0;
+ unsigned long ident_end_pfn = start_pfn + size;
unsigned long left = size;
- unsigned long mod;
- int i;
+ unsigned long ident_cnt = 0;
+ unsigned int i, chunk;
WARN_ON(size == 0);
BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
- /*
- * Determine the proper alignment to remap memory in P2M_PER_PAGE sized
- * blocks. We need to keep track of both the existing pfn mapping and
- * the new pfn remapping.
- */
- mod = start_pfn % P2M_PER_PAGE;
- ident_start_pfn_align =
- mod ? (start_pfn - mod + P2M_PER_PAGE) : start_pfn;
- mod = remap_pfn % P2M_PER_PAGE;
- remap_start_pfn_align =
- mod ? (remap_pfn - mod + P2M_PER_PAGE) : remap_pfn;
- mod = (start_pfn + size) % P2M_PER_PAGE;
- ident_end_pfn_align = start_pfn + size - mod;
- mod = (remap_pfn + size) % P2M_PER_PAGE;
- remap_end_pfn_align = remap_pfn + size - mod;
-
- /* Iterate over each p2m leaf node in each range */
- for (ident_pfn_iter = ident_start_pfn_align, remap_pfn_iter = remap_start_pfn_align;
- ident_pfn_iter < ident_end_pfn_align && remap_pfn_iter < remap_end_pfn_align;
- ident_pfn_iter += P2M_PER_PAGE, remap_pfn_iter += P2M_PER_PAGE) {
- /* Check we aren't past the end */
- BUG_ON(ident_pfn_iter + P2M_PER_PAGE > start_pfn + size);
- BUG_ON(remap_pfn_iter + P2M_PER_PAGE > remap_pfn + size);
-
- /* Save p2m mappings */
- for (i = 0; i < P2M_PER_PAGE; i++)
- xen_remap_buf[i] = pfn_to_mfn(ident_pfn_iter + i);
-
- /* Set identity map which will free a p2m leaf */
- ident_cnt += set_phys_range_identity(ident_pfn_iter,
- ident_pfn_iter + P2M_PER_PAGE);
+ mfn_save = virt_to_mfn(buf);
-#ifdef DEBUG
- /* Helps verify a p2m leaf has been freed */
- for (i = 0; i < P2M_PER_PAGE; i++) {
- unsigned int pfn = ident_pfn_iter + i;
- BUG_ON(pfn_to_mfn(pfn) != pfn);
- }
-#endif
- /* Now remap memory */
- for (i = 0; i < P2M_PER_PAGE; i++) {
- unsigned long mfn = xen_remap_buf[i];
-
- /* This will use the p2m leaf freed above */
- if (!xen_update_mem_tables(remap_pfn_iter + i, mfn)) {
- WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
- remap_pfn_iter + i, mfn);
- return 0;
- }
-
- remap_cnt++;
- }
-
- left -= P2M_PER_PAGE;
- }
-
- /* Max boundary space possible */
- BUG_ON(left > (P2M_PER_PAGE - 1) * 2);
+ for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
+ ident_pfn_iter < ident_end_pfn;
+ ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
+ chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
- /* Now handle the boundary conditions */
- ident_boundary_pfn = start_pfn;
- remap_boundary_pfn = remap_pfn;
- for (i = 0; i < left; i++) {
- unsigned long mfn;
+ /* Map first pfn to xen_remap_buf */
+ mfn = pfn_to_mfn(ident_pfn_iter);
+ set_pte_mfn(buf, mfn, PAGE_KERNEL);
- /* These two checks move from the start to end boundaries */
- if (ident_boundary_pfn == ident_start_pfn_align)
- ident_boundary_pfn = ident_pfn_iter;
- if (remap_boundary_pfn == remap_start_pfn_align)
- remap_boundary_pfn = remap_pfn_iter;
+ /* Save mapping information in page */
+ xen_remap_buf.next_area_mfn = xen_remap_mfn;
+ xen_remap_buf.target_pfn = remap_pfn_iter;
+ xen_remap_buf.size = chunk;
+ for (i = 0; i < chunk; i++)
+ xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
- /* Check we aren't past the end */
- BUG_ON(ident_boundary_pfn >= start_pfn + size);
- BUG_ON(remap_boundary_pfn >= remap_pfn + size);
-
- mfn = pfn_to_mfn(ident_boundary_pfn);
-
- if (!xen_update_mem_tables(remap_boundary_pfn, mfn)) {
- WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
- remap_pfn_iter + i, mfn);
- return 0;
- }
- remap_cnt++;
+ /* Put remap buf into list. */
+ xen_remap_mfn = mfn;
- ident_boundary_pfn++;
- remap_boundary_pfn++;
- }
+ /* Set identity map */
+ ident_cnt += set_phys_range_identity(ident_pfn_iter,
+ ident_pfn_iter + chunk);
- /* Finish up the identity map */
- if (ident_start_pfn_align >= ident_end_pfn_align) {
- /*
- * In this case we have an identity range which does not span an
- * aligned block so everything needs to be identity mapped here.
- * If we didn't check this we might remap too many pages since
- * the align boundaries are not meaningful in this case.
- */
- ident_cnt += set_phys_range_identity(start_pfn,
- start_pfn + size);
- } else {
- /* Remapped above so check each end of the chunk */
- if (start_pfn < ident_start_pfn_align)
- ident_cnt += set_phys_range_identity(start_pfn,
- ident_start_pfn_align);
- if (start_pfn + size > ident_pfn_iter)
- ident_cnt += set_phys_range_identity(ident_pfn_iter,
- start_pfn + size);
+ left -= chunk;
}
- BUG_ON(ident_cnt != size);
- BUG_ON(remap_cnt != size);
-
- return size;
+ /* Restore old xen_remap_buf mapping */
+ set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
}
/*
@@ -396,8 +371,7 @@ static unsigned long __init xen_do_set_identity_and_remap_chunk(
static unsigned long __init xen_set_identity_and_remap_chunk(
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
- unsigned long *identity, unsigned long *remapped,
- unsigned long *released)
+ unsigned long *identity, unsigned long *released)
{
unsigned long pfn;
unsigned long i = 0;
@@ -431,19 +405,12 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
if (size > remap_range_size)
size = remap_range_size;
- if (!xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn)) {
- WARN(1, "Failed to remap 1:1 memory cur_pfn=%ld size=%ld remap_pfn=%ld\n",
- cur_pfn, size, remap_pfn);
- xen_set_identity_and_release_chunk(cur_pfn,
- cur_pfn + left, nr_pages, identity, released);
- break;
- }
+ xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
*identity += size;
- *remapped += size;
}
/*
@@ -458,13 +425,12 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
return remap_pfn;
}
-static unsigned long __init xen_set_identity_and_remap(
+static void __init xen_set_identity_and_remap(
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
unsigned long *released)
{
phys_addr_t start = 0;
unsigned long identity = 0;
- unsigned long remapped = 0;
unsigned long last_pfn = nr_pages;
const struct e820entry *entry;
unsigned long num_released = 0;
@@ -494,8 +460,7 @@ static unsigned long __init xen_set_identity_and_remap(
last_pfn = xen_set_identity_and_remap_chunk(
list, map_size, start_pfn,
end_pfn, nr_pages, last_pfn,
- &identity, &remapped,
- &num_released);
+ &identity, &num_released);
start = end;
}
}
@@ -503,12 +468,63 @@ static unsigned long __init xen_set_identity_and_remap(
*released = num_released;
pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
- pr_info("Remapped %ld page(s), last_pfn=%ld\n", remapped,
- last_pfn);
pr_info("Released %ld page(s)\n", num_released);
+}
+
+/*
+ * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
+ * The remap information (which mfn remap to which pfn) is contained in the
+ * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
+ * This scheme allows to remap the different chunks in arbitrary order while
+ * the resulting mapping will be independant from the order.
+ */
+void __init xen_remap_memory(void)
+{
+ unsigned long buf = (unsigned long)&xen_remap_buf;
+ unsigned long mfn_save, mfn, pfn;
+ unsigned long remapped = 0;
+ unsigned int i;
+ unsigned long pfn_s = ~0UL;
+ unsigned long len = 0;
+
+ mfn_save = virt_to_mfn(buf);
+
+ while (xen_remap_mfn != INVALID_P2M_ENTRY) {
+ /* Map the remap information */
+ set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
- return last_pfn;
+ BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
+
+ pfn = xen_remap_buf.target_pfn;
+ for (i = 0; i < xen_remap_buf.size; i++) {
+ mfn = xen_remap_buf.mfns[i];
+ xen_update_mem_tables(pfn, mfn);
+ remapped++;
+ pfn++;
+ }
+ if (pfn_s == ~0UL || pfn == pfn_s) {
+ pfn_s = xen_remap_buf.target_pfn;
+ len += xen_remap_buf.size;
+ } else if (pfn_s + len == xen_remap_buf.target_pfn) {
+ len += xen_remap_buf.size;
+ } else {
+ xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
+ pfn_s = xen_remap_buf.target_pfn;
+ len = xen_remap_buf.size;
+ }
+
+ mfn = xen_remap_mfn;
+ xen_remap_mfn = xen_remap_buf.next_area_mfn;
+ }
+
+ if (pfn_s != ~0UL && len)
+ xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
+
+ set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
+
+ pr_info("Remapped %ld page(s)\n", remapped);
}
+
static unsigned long __init xen_get_max_pages(void)
{
unsigned long max_pages = MAX_DOMAIN_PAGES;
@@ -569,7 +585,6 @@ char * __init xen_memory_setup(void)
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
- unsigned long last_pfn = 0;
unsigned long extra_pages = 0;
int i;
int op;
@@ -616,17 +631,14 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn;
/*
- * Set identity map on non-RAM pages and remap the underlying RAM.
+ * Set identity map on non-RAM pages and prepare remapping the
+ * underlying RAM.
*/
- last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
- &xen_released_pages);
+ xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
+ &xen_released_pages);
extra_pages += xen_released_pages;
- if (last_pfn > max_pfn) {
- max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
- mem_end = PFN_PHYS(max_pfn);
- }
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base
@@ -653,6 +665,7 @@ char * __init xen_memory_setup(void)
size = min(size, (u64)extra_pages * PAGE_SIZE);
extra_pages -= size / PAGE_SIZE;
xen_add_extra_mem(addr, size);
+ xen_max_p2m_pfn = PFN_DOWN(addr + size);
} else
type = E820_UNUSABLE;
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 4ab9298c5e17..5686bd9d58cc 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -29,11 +29,13 @@ void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
void xen_reserve_top(void);
-extern unsigned long xen_max_p2m_pfn;
void xen_mm_pin_all(void);
void xen_mm_unpin_all(void);
+unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
+void __init xen_inv_extra_mem(void);
+void __init xen_remap_memory(void);
char * __init xen_memory_setup(void);
char * xen_auto_xlated_memory_setup(void);
void __init xen_arch_setup(void);
@@ -46,7 +48,7 @@ void xen_hvm_init_shared_info(void);
void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void);
-unsigned long __init xen_revector_p2m_tree(void);
+void __init xen_vmalloc_p2m_tree(void);
void xen_init_irq_ops(void);
void xen_setup_timer(int cpu);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 81f57e8c8f1b..e31d4949124a 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -98,12 +98,6 @@ config XTENSA_VARIANT_DC233C
help
This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE).
-config XTENSA_VARIANT_S6000
- bool "s6000 - Stretch software configurable processor"
- select VARIANT_IRQ_SWITCH
- select ARCH_REQUIRE_GPIOLIB
- select XTENSA_CALIBRATE_CCOUNT
-
config XTENSA_VARIANT_CUSTOM
bool "Custom Xtensa processor configuration"
select MAY_HAVE_SMP
@@ -126,7 +120,6 @@ config XTENSA_VARIANT_NAME
default "dc232b" if XTENSA_VARIANT_DC232B
default "dc233c" if XTENSA_VARIANT_DC233C
default "fsf" if XTENSA_VARIANT_FSF
- default "s6000" if XTENSA_VARIANT_S6000
default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM
config XTENSA_VARIANT_MMU
@@ -191,7 +184,6 @@ config HOTPLUG_CPU
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
bool "Initialize Xtensa MMU inside the Linux kernel code"
- depends on MMU
default y
help
Earlier version initialized the MMU in the exception vector
@@ -311,15 +303,10 @@ config XTENSA_PLATFORM_XT2000
XT2000 is the name of Tensilica's feature-rich emulation platform.
This hardware is capable of running a full Linux distribution.
-config XTENSA_PLATFORM_S6105
- bool "S6105"
- select HAVE_IDE
- select SERIAL_CONSOLE
- select NO_IOPORT_MAP
-
config XTENSA_PLATFORM_XTFPGA
bool "XTFPGA"
select ETHOC if ETHERNET
+ select PLATFORM_WANT_DEFAULT_MEM
select SERIAL_CONSOLE
select XTENSA_CALIBRATE_CCOUNT
help
@@ -406,6 +393,41 @@ source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
+config PLATFORM_WANT_DEFAULT_MEM
+ def_bool n
+
+config DEFAULT_MEM_START
+ hex "Physical address of the default memory area start"
+ depends on PLATFORM_WANT_DEFAULT_MEM
+ default 0x00000000 if MMU
+ default 0x40000000 if !MMU
+ help
+ This is a fallback start address of the default memory area, it is
+ used when no physical memory size is passed through DTB or through
+ boot parameter from bootloader.
+
+ In noMMU configuration the following parameters are derived from it:
+ - kernel load address;
+ - kernel entry point address;
+ - relocatable vectors base address;
+ - uBoot load address;
+ - TASK_SIZE.
+
+ If unsure, leave the default value here.
+
+config DEFAULT_MEM_SIZE
+ hex "Maximal size of the default memory area"
+ depends on PLATFORM_WANT_DEFAULT_MEM
+ default 0x04000000
+ help
+ This is a fallback size of the default memory area, it is used when
+ no physical memory size is passed through DTB or through boot
+ parameter from bootloader.
+
+ It's also used for TASK_SIZE calculation in noMMU configuration.
+
+ If unsure, leave the default value here.
+
endmenu
menu "Executable file formats"
@@ -414,6 +436,12 @@ source "fs/Kconfig.binfmt"
endmenu
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+endmenu
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index af7da74d535f..8430af27de0a 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -4,7 +4,7 @@ source "lib/Kconfig.debug"
config DEBUG_TLB_SANITY
bool "Debug TLB sanity"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && MMU
help
Enable this to turn on TLB sanity check on each entry to userspace.
This check can spot missing TLB invalidation/wrong PTE permissions/
@@ -14,7 +14,7 @@ config DEBUG_TLB_SANITY
config LD_NO_RELAX
bool "Disable linker relaxation"
- default n
+ default y
help
Enable this function to disable link-time optimizations.
The default linker behavior is to combine identical literal
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 472533064b46..f9e6a068aafd 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -35,7 +35,6 @@ endif
platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
-platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105
platform-$(CONFIG_XTENSA_PLATFORM_XTFPGA) := xtfpga
PLATFORM = $(platform-y)
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index 932b58ef33d4..958b33af96b7 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -41,6 +41,7 @@ SECTIONS
__bss_end = .;
}
+#ifdef CONFIG_MMU
/*
* This is a remapped copy of the Reset Vector Code.
* It keeps gdb in sync with the PC after switching
@@ -51,4 +52,5 @@ SECTIONS
{
*(.ResetVector.remapped_text)
}
+#endif
}
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index 1388a499753b..9341a5750694 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -20,6 +20,7 @@
#include <asm/page.h>
#include <asm/cacheasm.h>
#include <asm/initialize_mmu.h>
+#include <asm/vectors.h>
#include <linux/linkage.h>
.section .ResetVector.text, "ax"
@@ -34,12 +35,7 @@ _ResetVector:
.align 4
RomInitAddr:
-#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
- XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
- .word 0x00003000
-#else
- .word 0xd0003000
-#endif
+ .word LOAD_MEMORY_ADDRESS
RomBootParam:
.word _bootparam
_bootparam:
@@ -79,6 +75,7 @@ reset:
movi a4, 0
jx a0
+#ifdef CONFIG_MMU
.align 4
.section .ResetVector.remapped_text, "x"
@@ -102,3 +99,4 @@ _RemappedSetupMMU:
#endif
.end no-absolute-literals
+#endif
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
index 545759819ef9..403fcf23405c 100644
--- a/arch/xtensa/boot/boot-uboot/Makefile
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -4,11 +4,15 @@
# for more details.
#
+ifdef CONFIG_MMU
ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
UIMAGE_LOADADDR = 0x00003000
else
UIMAGE_LOADADDR = 0xd0003000
endif
+else
+UIMAGE_LOADADDR = $(shell printf "0x%x" $$(( ${CONFIG_DEFAULT_MEM_START} + 0x3000 )) )
+endif
UIMAGE_COMPRESSION = gzip
$(obj)/../uImage: vmlinux.bin.gz FORCE
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index b966baf82cae..e4d193e7a300 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -143,7 +143,6 @@ CONFIG_MMU=y
#
CONFIG_XTENSA_VARIANT_FSF=y
# CONFIG_XTENSA_VARIANT_DC232B is not set
-# CONFIG_XTENSA_VARIANT_S6000 is not set
# CONFIG_XTENSA_UNALIGNED_USER is not set
# CONFIG_PREEMPT is not set
CONFIG_XTENSA_CALIBRATE_CCOUNT=y
@@ -161,7 +160,6 @@ CONFIG_XTENSA_ISS_NETWORK=y
#
CONFIG_XTENSA_PLATFORM_ISS=y
# CONFIG_XTENSA_PLATFORM_XT2000 is not set
-# CONFIG_XTENSA_PLATFORM_S6105 is not set
# CONFIG_GENERIC_CALIBRATE_DELAY is not set
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target"
@@ -759,3 +757,4 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_DMA=y
CONFIG_NLATTR=y
+CONFIG_LD_NO_RELAX=y
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
deleted file mode 100644
index 9471265b8ca6..000000000000
--- a/arch/xtensa/configs/s6105_defconfig
+++ /dev/null
@@ -1,615 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.29-rc7-s6
-# Tue Mar 10 11:09:26 2009
-#
-# CONFIG_FRAME_POINTER is not set
-CONFIG_ZONE_DMA=y
-CONFIG_XTENSA=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_GENERIC_FIND_NEXT_BIT=y
-CONFIG_GENERIC_HWEIGHT=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_NO_IOPORT_MAP=y
-CONFIG_HZ=100
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-
-#
-# General setup
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_TASKSTATS is not set
-# CONFIG_AUDIT is not set
-
-#
-# RCU Subsystem
-#
-# CONFIG_CLASSIC_RCU is not set
-# CONFIG_TREE_RCU is not set
-CONFIG_PREEMPT_RCU=y
-# CONFIG_RCU_TRACE is not set
-# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_GROUP_SCHED is not set
-# CONFIG_CGROUPS is not set
-# CONFIG_SYSFS_DEPRECATED_V2 is not set
-# CONFIG_RELAY is not set
-# CONFIG_NAMESPACES is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL=y
-CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
-CONFIG_EVENTFD=y
-CONFIG_AIO=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLAB=y
-# CONFIG_SLUB is not set
-# CONFIG_SLOB is not set
-# CONFIG_PROFILING is not set
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
-CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_BASE_SMALL=0
-# CONFIG_MODULES is not set
-CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_BLK_DEV_INTEGRITY is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
-# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
-# CONFIG_FREEZER is not set
-# CONFIG_MMU is not set
-CONFIG_VARIANT_IRQ_SWITCH=y
-
-#
-# Processor type and features
-#
-# CONFIG_XTENSA_VARIANT_FSF is not set
-# CONFIG_XTENSA_VARIANT_DC232B is not set
-CONFIG_XTENSA_VARIANT_S6000=y
-# CONFIG_XTENSA_UNALIGNED_USER is not set
-CONFIG_PREEMPT=y
-# CONFIG_HIGHMEM is not set
-CONFIG_XTENSA_CALIBRATE_CCOUNT=y
-CONFIG_SERIAL_CONSOLE=y
-# CONFIG_XTENSA_ISS_NETWORK is not set
-
-#
-# Bus options
-#
-# CONFIG_PCI is not set
-# CONFIG_ARCH_SUPPORTS_MSI is not set
-
-#
-# Platform options
-#
-# CONFIG_XTENSA_PLATFORM_ISS is not set
-# CONFIG_XTENSA_PLATFORM_XT2000 is not set
-CONFIG_XTENSA_PLATFORM_S6105=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS1,38400 debug bootmem_debug loglevel=7"
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-CONFIG_PAGEFLAGS_EXTENDED=y
-CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_PHYS_ADDR_T_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-CONFIG_VIRT_TO_BUS=y
-
-#
-# Executable file formats
-#
-CONFIG_KCORE_ELF=y
-CONFIG_BINFMT_FLAT=y
-# CONFIG_BINFMT_ZFLAT is not set
-# CONFIG_BINFMT_SHARED_FLAT is not set
-# CONFIG_HAVE_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_COMPAT_NET_DEV_OPS=y
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_NETWORK_SECMARK is not set
-# CONFIG_NETFILTER is not set
-# CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
-# CONFIG_TIPC is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_NET_DSA is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_SCHED is not set
-# CONFIG_DCB is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
-# CONFIG_WIRELESS is not set
-# CONFIG_WIMAX is not set
-# CONFIG_RFKILL is not set
-# CONFIG_NET_9P is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
-# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
-# CONFIG_PARPORT is not set
-CONFIG_BLK_DEV=y
-# CONFIG_BLK_DEV_COW_COMMON is not set
-# CONFIG_BLK_DEV_LOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=4096
-# CONFIG_BLK_DEV_XIP is not set
-# CONFIG_CDROM_PKTCDVD is not set
-# CONFIG_ATA_OVER_ETH is not set
-# CONFIG_BLK_DEV_HD is not set
-# CONFIG_MISC_DEVICES is not set
-CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_RAID_ATTRS is not set
-# CONFIG_SCSI is not set
-# CONFIG_SCSI_DMA is not set
-# CONFIG_SCSI_NETLINK is not set
-# CONFIG_ATA is not set
-# CONFIG_MD is not set
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_MACVLAN is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-# CONFIG_VETH is not set
-CONFIG_PHYLIB=y
-
-#
-# MII PHY device drivers
-#
-# CONFIG_MARVELL_PHY is not set
-# CONFIG_DAVICOM_PHY is not set
-# CONFIG_QSEMI_PHY is not set
-# CONFIG_LXT_PHY is not set
-# CONFIG_CICADA_PHY is not set
-# CONFIG_VITESSE_PHY is not set
-CONFIG_SMSC_PHY=y
-# CONFIG_BROADCOM_PHY is not set
-# CONFIG_ICPLUS_PHY is not set
-# CONFIG_REALTEK_PHY is not set
-# CONFIG_NATIONAL_PHY is not set
-# CONFIG_STE10XP is not set
-# CONFIG_LSI_ET1011C_PHY is not set
-# CONFIG_FIXED_PHY is not set
-# CONFIG_MDIO_BITBANG is not set
-# CONFIG_NET_ETHERNET is not set
-CONFIG_NETDEV_1000=y
-CONFIG_S6GMAC=y
-# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
-
-#
-# Enable WiMAX (Networking options) to see the WiMAX drivers
-#
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NETCONSOLE is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_ISDN is not set
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-# CONFIG_INPUT is not set
-
-#
-# Hardware I/O ports
-#
-# CONFIG_SERIO is not set
-# CONFIG_GAMEPORT is not set
-
-#
-# Character devices
-#
-# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_SERIAL_8250_EXTENDED is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_IPMI_HANDLER is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_R3964 is not set
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_TCG_TPM is not set
-# CONFIG_I2C is not set
-# CONFIG_SPI is not set
-CONFIG_ARCH_REQUIRE_GPIOLIB=y
-CONFIG_GPIOLIB=y
-# CONFIG_DEBUG_GPIO is not set
-# CONFIG_GPIO_SYSFS is not set
-
-#
-# Memory mapped GPIO expanders:
-#
-
-#
-# I2C GPIO expanders:
-#
-
-#
-# PCI GPIO expanders:
-#
-
-#
-# SPI GPIO expanders:
-#
-# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
-# CONFIG_HWMON is not set
-# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
-# CONFIG_WATCHDOG is not set
-CONFIG_SSB_POSSIBLE=y
-
-#
-# Sonics Silicon Backplane
-#
-# CONFIG_SSB is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_CORE is not set
-# CONFIG_MFD_SM501 is not set
-# CONFIG_HTC_PASIC3 is not set
-# CONFIG_MFD_TMIO is not set
-# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
-
-#
-# Graphics support
-#
-# CONFIG_VGASTATE is not set
-# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
-# CONFIG_SOUND is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_MMC is not set
-# CONFIG_MEMSTICK is not set
-# CONFIG_NEW_LEDS is not set
-# CONFIG_ACCESSIBILITY is not set
-CONFIG_RTC_LIB=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_HCTOSYS=y
-CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
-# CONFIG_RTC_DEBUG is not set
-
-#
-# RTC interfaces
-#
-# CONFIG_RTC_INTF_SYSFS is not set
-# CONFIG_RTC_INTF_PROC is not set
-# CONFIG_RTC_INTF_DEV is not set
-# CONFIG_RTC_DRV_TEST is not set
-
-#
-# I2C RTC drivers
-#
-# CONFIG_RTC_DRV_DS1307 is not set
-# CONFIG_RTC_DRV_DS1374 is not set
-# CONFIG_RTC_DRV_DS1672 is not set
-# CONFIG_RTC_DRV_MAX6900 is not set
-# CONFIG_RTC_DRV_RS5C372 is not set
-# CONFIG_RTC_DRV_ISL1208 is not set
-# CONFIG_RTC_DRV_X1205 is not set
-# CONFIG_RTC_DRV_PCF8563 is not set
-# CONFIG_RTC_DRV_PCF8583 is not set
-CONFIG_RTC_DRV_M41T80=y
-# CONFIG_RTC_DRV_M41T80_WDT is not set
-# CONFIG_RTC_DRV_S35390A is not set
-# CONFIG_RTC_DRV_FM3130 is not set
-# CONFIG_RTC_DRV_RX8581 is not set
-
-#
-# SPI RTC drivers
-#
-
-#
-# Platform RTC drivers
-#
-# CONFIG_RTC_DRV_DS1286 is not set
-# CONFIG_RTC_DRV_DS1511 is not set
-# CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_DS1742 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
-# CONFIG_RTC_DRV_M48T86 is not set
-# CONFIG_RTC_DRV_M48T35 is not set
-# CONFIG_RTC_DRV_M48T59 is not set
-# CONFIG_RTC_DRV_BQ4802 is not set
-# CONFIG_RTC_DRV_V3020 is not set
-
-#
-# on-CPU RTC drivers
-#
-# CONFIG_DMADEVICES is not set
-# CONFIG_UIO is not set
-# CONFIG_STAGING is not set
-
-#
-# File systems
-#
-# CONFIG_EXT2_FS is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4_FS is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
-# CONFIG_XFS_FS is not set
-# CONFIG_OCFS2_FS is not set
-# CONFIG_BTRFS_FS is not set
-# CONFIG_DNOTIFY is not set
-# CONFIG_INOTIFY is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_SYSFS=y
-# CONFIG_TMPFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
-# CONFIG_MISC_FILESYSTEMS is not set
-# CONFIG_NETWORK_FILESYSTEMS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
-# CONFIG_DLM is not set
-
-#
-# Kernel hacking
-#
-CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_FRAME_WARN=1024
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_SOFTLOCKUP=y
-# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_SCHEDSTATS is not set
-# CONFIG_TIMER_STATS is not set
-# CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_VM is not set
-CONFIG_DEBUG_NOMMU_REGIONS=y
-# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_DEBUG_LIST is not set
-# CONFIG_DEBUG_SG is not set
-# CONFIG_DEBUG_NOTIFIERS is not set
-# CONFIG_BOOT_PRINTK_DELAY is not set
-# CONFIG_RCU_TORTURE_TEST is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-
-#
-# Tracers
-#
-# CONFIG_PREEMPT_TRACER is not set
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
-# CONFIG_DYNAMIC_DEBUG is not set
-# CONFIG_SAMPLES is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-# CONFIG_CRYPTO is not set
-
-#
-# Library routines
-#
-CONFIG_GENERIC_FIND_LAST_BIT=y
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC16 is not set
-# CONFIG_CRC_T10DIF is not set
-# CONFIG_CRC_ITU_T is not set
-# CONFIG_CRC32 is not set
-# CONFIG_CRC7 is not set
-# CONFIG_LIBCRC32C is not set
-CONFIG_PLIST=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_DMA=y
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 105d38922c44..86a9ab2e2ca9 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -9,7 +9,6 @@ generic-y += errno.h
generic-y += exec.h
generic-y += fcntl.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += ioctl.h
generic-y += irq_regs.h
generic-y += irq_work.h
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index e72aaca7a77f..5f67ace97b32 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -67,6 +67,8 @@ extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
#else
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
unsigned long phys) { }
+static inline void __invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
#endif
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
@@ -84,7 +86,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
* (see also Documentation/cachetlb.txt)
*/
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
+#if defined(CONFIG_MMU) && \
+ ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
#ifdef CONFIG_SMP
void flush_cache_all(void);
@@ -150,7 +153,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 2c7901edffaf..01cef6b40829 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -25,7 +25,7 @@
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-#define kmap_prot PAGE_KERNEL
+#define kmap_prot PAGE_KERNEL_EXEC
#if DCACHE_WAY_SIZE > PAGE_SIZE
#define get_pkmap_color get_pkmap_color
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 600781edc8a3..e256f2270ec9 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -26,8 +26,16 @@
#include <asm/pgtable.h>
#include <asm/vectors.h>
+#if XCHAL_HAVE_PTP_MMU
#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#else
+#define CA_WRITEBACK (0x4)
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
#ifdef __ASSEMBLY__
@@ -75,7 +83,7 @@
/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
- movi a2, 0x40000006
+ movi a2, 0x40000000 | XCHAL_SPANNING_WAY
idtlb a2
iitlb a2
isync
@@ -141,9 +149,6 @@
jx a4
1:
- movi a2, VECBASE_RESET_VADDR
- wsr a2, vecbase
-
/* Step 5: remove temporary mapping. */
idtlb a7
iitlb a7
@@ -156,6 +161,33 @@
#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
XCHAL_HAVE_SPANNING_WAY */
+#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+ /* Enable data and instruction cache in the DEFAULT_MEMORY region
+ * if the processor has DTLB and ITLB.
+ */
+
+ movi a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY
+ movi a6, ~_PAGE_ATTRIB_MASK
+ movi a7, CA_WRITEBACK
+ movi a8, 0x20000000
+ movi a9, PLATFORM_DEFAULT_MEM_SIZE
+ j 2f
+1:
+ sub a9, a9, a8
+2:
+ rdtlb1 a3, a5
+ ritlb1 a4, a5
+ and a3, a3, a6
+ and a4, a4, a6
+ or a3, a3, a7
+ or a4, a4, a7
+ wdtlb a3, a5
+ witlb a4, a5
+ add a5, a5, a8
+ bltu a8, a9, 1b
+
+#endif
+
.endm
#endif /*__ASSEMBLY__*/
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index d33c71a8c9ec..04c8ebdc4517 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -50,11 +50,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache);
#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
-#ifdef CONFIG_MMU
void init_mmu(void);
-#else
-static inline void init_mmu(void) { }
-#endif
static inline void set_rasid_register (unsigned long val)
{
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 3407cf7989b7..22984fd1d846 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -1,3 +1,7 @@
+static inline void init_mmu(void)
+{
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index abe24c6f8b2f..ad38500471fa 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -20,10 +20,10 @@
* Fixed TLB translations in the processor.
*/
-#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
-#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
-#define XCHAL_KSEG_PADDR 0x00000000
-#define XCHAL_KSEG_SIZE 0x08000000
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
/*
* PAGE_SHIFT determines the page size
@@ -37,7 +37,7 @@
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#else
-#define PAGE_OFFSET 0
+#define PAGE_OFFSET __XTENSA_UL_CONST(0)
#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif
@@ -145,7 +145,7 @@ extern void copy_page(void *to, void *from);
* some extra work
*/
-#if DCACHE_WAY_SIZE > PAGE_SIZE
+#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
extern void clear_page_alias(void *vaddr, unsigned long paddr);
extern void copy_page_alias(void *to, void *from,
unsigned long to_paddr, unsigned long from_paddr);
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 0383aed59121..872bf0194e6d 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -178,6 +178,7 @@
#else /* no mmu */
+# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
# define PAGE_NONE __pgprot(0)
# define PAGE_SHARED __pgprot(0)
# define PAGE_COPY __pgprot(0)
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index c7211e7e182d..876eb380aa26 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -320,7 +320,7 @@ __asm__ __volatile__( \
({ \
long __gu_err, __gu_val; \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -330,7 +330,7 @@ __asm__ __volatile__( \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index f74ddfbb92ef..a46c53f36113 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -19,6 +19,7 @@
#define _XTENSA_VECTORS_H
#include <variant/core.h>
+#include <platform/hardware.h>
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
@@ -51,13 +52,13 @@
/* MMU Not being used - Virtual == Physical */
/* VECBASE */
- #define VIRTUAL_MEMORY_ADDRESS 0x00002000
+ #define VIRTUAL_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x2000)
/* Location of the start of the kernel text, _start */
- #define KERNELOFFSET 0x00003000
+ #define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x3000)
/* Loaded just above possibly live vectors */
- #define LOAD_MEMORY_ADDRESS 0x00003000
+ #define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000)
#endif /* CONFIG_MMU */
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 00eed6786d7e..201aec0e0446 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -55,6 +55,12 @@
#define MAP_NONBLOCK 0x20000 /* do not block on IO */
#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
+#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
+# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
+ * uninitialized */
+#else
+# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
+#endif
/*
* Flags for msync
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 39acec0cf0b1..4120af086160 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -91,4 +91,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index aeeb3cc8a410..15a461e2a0ed 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -112,6 +112,11 @@ ENTRY(_startup)
movi a0, 0
+#if XCHAL_HAVE_VECBASE
+ movi a2, VECBASE_RESET_VADDR
+ wsr a2, vecbase
+#endif
+
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 5d3f7a119ed1..83cf49685373 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -57,6 +57,7 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
return sys_fadvise64_64(fd, offset, len, advice);
}
+#ifdef CONFIG_MMU
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
@@ -93,3 +94,4 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = COLOUR_ALIGN(addr, pgoff);
}
}
+#endif
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index f54f78e24d7b..e601e2fbe8e6 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -2,6 +2,6 @@
# Makefile for the Linux/Xtensa-specific parts of the memory manager.
#
-obj-y := init.o cache.o misc.o
-obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
+obj-y := init.o misc.o
+obj-$(CONFIG_MMU) += cache.o fault.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 77ed20209ca5..9a9a5935bd36 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -239,6 +239,17 @@ void __init bootmem_init(void)
unsigned long bootmap_start, bootmap_size;
int i;
+ /* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory
+ * accounting doesn't work for pages below that address.
+ *
+ * If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0:
+ * successfull allocations should never return NULL.
+ */
+ if (PLATFORM_DEFAULT_MEM_START)
+ mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0);
+ else
+ mem_reserve(0, 1, 0);
+
sysmem_dump();
max_low_pfn = max_pfn = 0;
min_low_pfn = ~0;
@@ -332,18 +343,24 @@ void __init mem_init(void)
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
+#ifdef CONFIG_MMU
" vmalloc : 0x%08x - 0x%08x (%5u MB)\n"
- " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n",
+#endif
+ " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
#endif
+#ifdef CONFIG_MMU
VMALLOC_START, VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
+#else
+ min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
+#endif
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
}
diff --git a/arch/xtensa/platforms/s6105/Makefile b/arch/xtensa/platforms/s6105/Makefile
deleted file mode 100644
index 0be6194bcb72..000000000000
--- a/arch/xtensa/platforms/s6105/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# Makefile for the Stretch S6105 eval board
-
-obj-y := setup.o device.o
diff --git a/arch/xtensa/platforms/s6105/device.c b/arch/xtensa/platforms/s6105/device.c
deleted file mode 100644
index 4f4fc971042f..000000000000
--- a/arch/xtensa/platforms/s6105/device.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * s6105 platform devices
- *
- * Copyright (c) 2009 emlix GmbH
- */
-
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <linux/serial.h>
-#include <linux/serial_8250.h>
-
-#include <variant/hardware.h>
-#include <variant/dmac.h>
-
-#include <platform/gpio.h>
-
-#define GPIO3_INTNUM 3
-#define UART_INTNUM 4
-#define GMAC_INTNUM 5
-
-static const signed char gpio3_irq_mappings[] = {
- S6_INTC_GPIO(3),
- -1
-};
-
-static const signed char uart_irq_mappings[] = {
- S6_INTC_UART(0),
- S6_INTC_UART(1),
- -1,
-};
-
-static const signed char gmac_irq_mappings[] = {
- S6_INTC_GMAC_STAT,
- S6_INTC_GMAC_ERR,
- S6_INTC_DMA_HOSTTERMCNT(0),
- S6_INTC_DMA_HOSTTERMCNT(1),
- -1
-};
-
-const signed char *platform_irq_mappings[NR_IRQS] = {
- [GPIO3_INTNUM] = gpio3_irq_mappings,
- [UART_INTNUM] = uart_irq_mappings,
- [GMAC_INTNUM] = gmac_irq_mappings,
-};
-
-static struct plat_serial8250_port serial_platform_data[] = {
- {
- .membase = (void *)S6_REG_UART + 0x0000,
- .mapbase = S6_REG_UART + 0x0000,
- .irq = UART_INTNUM,
- .uartclk = S6_SCLK,
- .regshift = 2,
- .iotype = SERIAL_IO_MEM,
- .flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST,
- },
- {
- .membase = (void *)S6_REG_UART + 0x1000,
- .mapbase = S6_REG_UART + 0x1000,
- .irq = UART_INTNUM,
- .uartclk = S6_SCLK,
- .regshift = 2,
- .iotype = SERIAL_IO_MEM,
- .flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST,
- },
- { },
-};
-
-static struct resource s6_gmac_resource[] = {
- {
- .name = "mem",
- .start = (resource_size_t)S6_REG_GMAC,
- .end = (resource_size_t)S6_REG_GMAC + 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "dma",
- .start = (resource_size_t)
- DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX),
- .end = (resource_size_t)
- DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX) + 0x100 - 1,
- .flags = IORESOURCE_DMA,
- },
- {
- .name = "dma",
- .start = (resource_size_t)
- DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX),
- .end = (resource_size_t)
- DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX) + 0x100 - 1,
- .flags = IORESOURCE_DMA,
- },
- {
- .name = "io",
- .start = (resource_size_t)S6_MEM_GMAC,
- .end = (resource_size_t)S6_MEM_GMAC + 0x2000000 - 1,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "irq",
- .start = (resource_size_t)GMAC_INTNUM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "irq",
- .start = (resource_size_t)PHY_POLL,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static int __init prepare_phy_irq(int pin)
-{
- int irq;
- if (gpio_request(pin, "s6gmac_phy") < 0)
- goto fail;
- if (gpio_direction_input(pin) < 0)
- goto free;
- irq = gpio_to_irq(pin);
- if (irq < 0)
- goto free;
- if (irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW) < 0)
- goto free;
- return irq;
-free:
- gpio_free(pin);
-fail:
- return PHY_POLL;
-}
-
-static struct platform_device platform_devices[] = {
- {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data,
- },
- },
- {
- .name = "s6gmac",
- .id = 0,
- .resource = s6_gmac_resource,
- .num_resources = ARRAY_SIZE(s6_gmac_resource),
- },
- {
- I2C_BOARD_INFO("m41t62", S6I2C_ADDR_M41T62),
- },
-};
-
-static int __init device_init(void)
-{
- int i;
-
- s6_gmac_resource[5].start = prepare_phy_irq(GPIO_PHY_IRQ);
-
- for (i = 0; i < ARRAY_SIZE(platform_devices); i++)
- platform_device_register(&platform_devices[i]);
- return 0;
-}
-arch_initcall_sync(device_init);
diff --git a/arch/xtensa/platforms/s6105/include/platform/gpio.h b/arch/xtensa/platforms/s6105/include/platform/gpio.h
deleted file mode 100644
index fa11aa4b61e9..000000000000
--- a/arch/xtensa/platforms/s6105/include/platform/gpio.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __ASM_XTENSA_S6105_GPIO_H
-#define __ASM_XTENSA_S6105_GPIO_H
-
-#define GPIO_BP_TEMP_ALARM 0
-#define GPIO_PB_RESET_IN 1
-#define GPIO_EXP_IRQ 2
-#define GPIO_TRIGGER_IRQ 3
-#define GPIO_RTC_IRQ 4
-#define GPIO_PHY_IRQ 5
-#define GPIO_IMAGER_RESET 6
-#define GPIO_SD_IRQ 7
-#define GPIO_MINI_BOOT_INH 8
-#define GPIO_BOARD_RESET 9
-#define GPIO_EXP_PRESENT 10
-#define GPIO_LED1_NGREEN 12
-#define GPIO_LED1_RED 13
-#define GPIO_LED0_NGREEN 14
-#define GPIO_LED0_NRED 15
-#define GPIO_SPI_CS0 16
-#define GPIO_SPI_CS1 17
-#define GPIO_SPI_CS3 19
-#define GPIO_SPI_CS4 20
-#define GPIO_SD_WP 21
-#define GPIO_BP_RESET 22
-#define GPIO_ALARM_OUT 23
-
-#endif /* __ASM_XTENSA_S6105_GPIO_H */
diff --git a/arch/xtensa/platforms/s6105/include/platform/hardware.h b/arch/xtensa/platforms/s6105/include/platform/hardware.h
deleted file mode 100644
index d628efac7089..000000000000
--- a/arch/xtensa/platforms/s6105/include/platform/hardware.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __XTENSA_S6105_HARDWARE_H
-#define __XTENSA_S6105_HARDWARE_H
-
-#define PLATFORM_DEFAULT_MEM_START 0x40000000
-#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
-
-#define MAX_DMA_ADDRESS 0
-
-#define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x1000)
-
-#endif /* __XTENSA_S6105_HARDWARE_H */
diff --git a/arch/xtensa/platforms/s6105/include/platform/serial.h b/arch/xtensa/platforms/s6105/include/platform/serial.h
deleted file mode 100644
index c8a771e5981b..000000000000
--- a/arch/xtensa/platforms/s6105/include/platform/serial.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __ASM_XTENSA_S6105_SERIAL_H
-#define __ASM_XTENSA_S6105_SERIAL_H
-
-#include <variant/hardware.h>
-
-#define BASE_BAUD (S6_SCLK / 16)
-
-#endif /* __ASM_XTENSA_S6105_SERIAL_H */
diff --git a/arch/xtensa/platforms/s6105/setup.c b/arch/xtensa/platforms/s6105/setup.c
deleted file mode 100644
index 86ce730f7913..000000000000
--- a/arch/xtensa/platforms/s6105/setup.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * s6105 control routines
- *
- * Copyright (c) 2009 emlix GmbH
- */
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootparam.h>
-
-#include <variant/hardware.h>
-#include <variant/gpio.h>
-
-#include <platform/gpio.h>
-
-void platform_halt(void)
-{
- local_irq_disable();
- while (1)
- ;
-}
-
-void platform_power_off(void)
-{
- platform_halt();
-}
-
-void platform_restart(void)
-{
- platform_halt();
-}
-
-void __init platform_setup(char **cmdline)
-{
- unsigned long reg;
-
- reg = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
- reg &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC |
- S6_GREG1_PLLSEL_GMII_MASK << S6_GREG1_PLLSEL_GMII);
- reg |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC |
- S6_GREG1_PLLSEL_GMII_125MHZ << S6_GREG1_PLLSEL_GMII;
- writel(reg, S6_REG_GREG1 + S6_GREG1_PLLSEL);
-
- reg = readl(S6_REG_GREG1 + S6_GREG1_CLKGATE);
- reg &= ~(1 << S6_GREG1_BLOCK_SB);
- reg &= ~(1 << S6_GREG1_BLOCK_GMAC);
- writel(reg, S6_REG_GREG1 + S6_GREG1_CLKGATE);
-
- reg = readl(S6_REG_GREG1 + S6_GREG1_BLOCKENA);
- reg |= 1 << S6_GREG1_BLOCK_SB;
- reg |= 1 << S6_GREG1_BLOCK_GMAC;
- writel(reg, S6_REG_GREG1 + S6_GREG1_BLOCKENA);
-
- printk(KERN_NOTICE "S6105 on Stretch S6000 - "
- "Copyright (C) 2009 emlix GmbH <info@emlix.com>\n");
-}
-
-void __init platform_init(bp_tag_t *first)
-{
- s6_gpio_init(0);
- gpio_request(GPIO_LED1_NGREEN, "led1_green");
- gpio_request(GPIO_LED1_RED, "led1_red");
- gpio_direction_output(GPIO_LED1_NGREEN, 1);
-}
-
-void platform_heartbeat(void)
-{
- static unsigned int c;
-
- if (!(++c & 0x4F))
- gpio_direction_output(GPIO_LED1_RED, !(c & 0x10));
-}
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index aeb316b7ff88..6edd20bb4565 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -17,8 +17,8 @@
/* Memory configuration. */
-#define PLATFORM_DEFAULT_MEM_START 0x00000000
-#define PLATFORM_DEFAULT_MEM_SIZE 0x04000000
+#define PLATFORM_DEFAULT_MEM_START CONFIG_DEFAULT_MEM_START
+#define PLATFORM_DEFAULT_MEM_SIZE CONFIG_DEFAULT_MEM_SIZE
/* Interrupt configuration. */
diff --git a/arch/xtensa/variants/s6000/Makefile b/arch/xtensa/variants/s6000/Makefile
deleted file mode 100644
index 3e7ef0a0c498..000000000000
--- a/arch/xtensa/variants/s6000/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# s6000 Makefile
-
-obj-y += irq.o gpio.o dmac.o
-obj-$(CONFIG_XTENSA_CALIBRATE_CCOUNT) += delay.o
diff --git a/arch/xtensa/variants/s6000/delay.c b/arch/xtensa/variants/s6000/delay.c
deleted file mode 100644
index 39154563ee17..000000000000
--- a/arch/xtensa/variants/s6000/delay.c
+++ /dev/null
@@ -1,25 +0,0 @@
-#include <asm/timex.h>
-#include <asm/io.h>
-#include <variant/hardware.h>
-
-#define LOOPS 10
-void platform_calibrate_ccount(void)
-{
- u32 uninitialized_var(a);
- u32 uninitialized_var(u);
- u32 b;
- u32 tstamp = S6_REG_GREG1 + S6_GREG1_GLOBAL_TIMER;
- int i = LOOPS+1;
- do {
- u32 t = u;
- asm volatile(
- "1: l32i %0, %2, 0 ;"
- " beq %0, %1, 1b ;"
- : "=&a"(u) : "a"(t), "a"(tstamp));
- b = get_ccount();
- if (i == LOOPS)
- a = b;
- } while (--i >= 0);
- b -= a;
- ccount_freq = b * (100000UL / LOOPS);
-}
diff --git a/arch/xtensa/variants/s6000/dmac.c b/arch/xtensa/variants/s6000/dmac.c
deleted file mode 100644
index 340f5bb0b5ef..000000000000
--- a/arch/xtensa/variants/s6000/dmac.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Authors: Oskar Schirmer <oskar@scara.com>
- * Daniel Gloeckner <dg@emlix.com>
- * (c) 2008 emlix GmbH http://www.emlix.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <asm/cacheflush.h>
-#include <variant/dmac.h>
-
-/* DMA engine lookup */
-
-struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
-
-
-/* DMA control, per engine */
-
-void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size)
-{
- if (xtensa_need_flush_dma_source(src)) {
- u32 base = src;
- u32 span = size;
- u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
- if (chunk && (size > chunk)) {
- s32 skip =
- readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
- u32 gaps = (size+chunk-1)/chunk - 1;
- if (skip >= 0) {
- span += gaps * skip;
- } else if (-skip > chunk) {
- s32 decr = gaps * (chunk + skip);
- base += decr;
- span = chunk - decr;
- } else {
- span = max(span + gaps * skip,
- (chunk + skip) * gaps - skip);
- }
- }
- flush_dcache_unaligned(base, span);
- }
- if (xtensa_need_invalidate_dma_destination(dst)) {
- u32 base = dst;
- u32 span = size;
- u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
- if (chunk && (size > chunk)) {
- s32 skip =
- readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
- u32 gaps = (size+chunk-1)/chunk - 1;
- if (skip >= 0) {
- span += gaps * skip;
- } else if (-skip > chunk) {
- s32 decr = gaps * (chunk + skip);
- base += decr;
- span = chunk - decr;
- } else {
- span = max(span + gaps * skip,
- (chunk + skip) * gaps - skip);
- }
- }
- invalidate_dcache_unaligned(base, span);
- }
- s6dmac_put_fifo(dmac, chan, src, dst, size);
-}
-
-void s6dmac_disable_error_irqs(u32 dmac, u32 mask)
-{
- unsigned long flags;
- spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
- spin_lock_irqsave(spinl, flags);
- _s6dmac_disable_error_irqs(dmac, mask);
- spin_unlock_irqrestore(spinl, flags);
-}
-
-u32 s6dmac_int_sources(u32 dmac, u32 channel)
-{
- u32 mask, ret, tmp;
- mask = 1 << channel;
-
- tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT);
- tmp &= mask;
- writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR);
- ret = tmp >> channel;
-
- tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT);
- tmp &= mask;
- writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR);
- ret |= (tmp >> channel) << 1;
-
- tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT);
- tmp &= mask;
- writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR);
- ret |= (tmp >> channel) << 2;
-
- tmp = readl(dmac + S6_DMA_INTRAW0);
- tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER);
- writel(tmp, dmac + S6_DMA_INTCLEAR0);
-
- if (tmp & (mask << S6_DMA_INT0_UNDER))
- ret |= 1 << 3;
- if (tmp & (mask << S6_DMA_INT0_OVER))
- ret |= 1 << 4;
-
- tmp = readl(dmac + S6_DMA_MASTERERRINFO);
- mask <<= S6_DMA_INT1_CHANNEL;
- if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK)
- == channel)
- mask |= 1 << S6_DMA_INT1_MASTER;
- if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK)
- == channel)
- mask |= 1 << (S6_DMA_INT1_MASTER + 1);
- if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK)
- == channel)
- mask |= 1 << (S6_DMA_INT1_MASTER + 2);
-
- tmp = readl(dmac + S6_DMA_INTRAW1) & mask;
- writel(tmp, dmac + S6_DMA_INTCLEAR1);
- ret |= ((tmp >> channel) & 1) << 5;
- ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6;
-
- return ret;
-}
-
-void s6dmac_release_chan(u32 dmac, int chan)
-{
- if (chan >= 0)
- s6dmac_disable_chan(dmac, chan);
-}
-
-
-/* global init */
-
-static inline void __init dmac_init(u32 dmac, u8 chan_nb)
-{
- s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac;
- spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock);
- s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb;
- writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER,
- dmac + S6_DMA_INTCLEAR1);
-}
-
-static inline void __init dmac_master(u32 dmac,
- u32 m0start, u32 m0end, u32 m1start, u32 m1end)
-{
- writel(m0start, dmac + S6_DMA_MASTER0START);
- writel(m0end - 1, dmac + S6_DMA_MASTER0END);
- writel(m1start, dmac + S6_DMA_MASTER1START);
- writel(m1end - 1, dmac + S6_DMA_MASTER1END);
-}
-
-static void __init s6_dmac_init(void)
-{
- dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB);
- dmac_master(S6_REG_LMSDMA,
- S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC);
- dmac_init(S6_REG_NIDMA, S6_NIDMA_NB);
- dmac_init(S6_REG_DPDMA, S6_DPDMA_NB);
- dmac_master(S6_REG_DPDMA,
- S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA);
- dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB);
- dmac_master(S6_REG_HIFDMA,
- S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX);
-}
-
-arch_initcall(s6_dmac_init);
diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c
deleted file mode 100644
index da9e85c13b08..000000000000
--- a/arch/xtensa/variants/s6000/gpio.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * s6000 gpio driver
- *
- * Copyright (c) 2009 emlix GmbH
- * Authors: Oskar Schirmer <oskar@scara.com>
- * Johannes Weiner <hannes@cmpxchg.org>
- * Daniel Gloeckner <dg@emlix.com>
- */
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-
-#include <variant/hardware.h>
-
-#define IRQ_BASE XTENSA_NR_IRQS
-
-#define S6_GPIO_DATA 0x000
-#define S6_GPIO_IS 0x404
-#define S6_GPIO_IBE 0x408
-#define S6_GPIO_IEV 0x40C
-#define S6_GPIO_IE 0x410
-#define S6_GPIO_RIS 0x414
-#define S6_GPIO_MIS 0x418
-#define S6_GPIO_IC 0x41C
-#define S6_GPIO_AFSEL 0x420
-#define S6_GPIO_DIR 0x800
-#define S6_GPIO_BANK(nr) ((nr) * 0x1000)
-#define S6_GPIO_MASK(nr) (4 << (nr))
-#define S6_GPIO_OFFSET(nr) \
- (S6_GPIO_BANK((nr) >> 3) + S6_GPIO_MASK((nr) & 7))
-
-static int direction_input(struct gpio_chip *chip, unsigned int off)
-{
- writeb(0, S6_REG_GPIO + S6_GPIO_DIR + S6_GPIO_OFFSET(off));
- return 0;
-}
-
-static int get(struct gpio_chip *chip, unsigned int off)
-{
- return readb(S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
-}
-
-static int direction_output(struct gpio_chip *chip, unsigned int off, int val)
-{
- unsigned rel = S6_GPIO_OFFSET(off);
- writeb(~0, S6_REG_GPIO + S6_GPIO_DIR + rel);
- writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + rel);
- return 0;
-}
-
-static void set(struct gpio_chip *chip, unsigned int off, int val)
-{
- writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
-}
-
-static int to_irq(struct gpio_chip *chip, unsigned offset)
-{
- if (offset < 8)
- return offset + IRQ_BASE;
- return -EINVAL;
-}
-
-static struct gpio_chip gpiochip = {
- .owner = THIS_MODULE,
- .direction_input = direction_input,
- .get = get,
- .direction_output = direction_output,
- .set = set,
- .to_irq = to_irq,
- .base = 0,
- .ngpio = 24,
- .can_sleep = 0, /* no blocking io needed */
- .exported = 0, /* no exporting to userspace */
-};
-
-int s6_gpio_init(u32 afsel)
-{
- writeb(afsel, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL);
- writeb(afsel >> 8, S6_REG_GPIO + S6_GPIO_BANK(1) + S6_GPIO_AFSEL);
- writeb(afsel >> 16, S6_REG_GPIO + S6_GPIO_BANK(2) + S6_GPIO_AFSEL);
- return gpiochip_add(&gpiochip);
-}
-
-static void ack(struct irq_data *d)
-{
- writeb(1 << (d->irq - IRQ_BASE), S6_REG_GPIO + S6_GPIO_IC);
-}
-
-static void mask(struct irq_data *d)
-{
- u8 r = readb(S6_REG_GPIO + S6_GPIO_IE);
- r &= ~(1 << (d->irq - IRQ_BASE));
- writeb(r, S6_REG_GPIO + S6_GPIO_IE);
-}
-
-static void unmask(struct irq_data *d)
-{
- u8 m = readb(S6_REG_GPIO + S6_GPIO_IE);
- m |= 1 << (d->irq - IRQ_BASE);
- writeb(m, S6_REG_GPIO + S6_GPIO_IE);
-}
-
-static int set_type(struct irq_data *d, unsigned int type)
-{
- const u8 m = 1 << (d->irq - IRQ_BASE);
- irq_flow_handler_t handler;
- u8 reg;
-
- if (type == IRQ_TYPE_PROBE) {
- if ((readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL) & m)
- || (readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE) & m)
- || readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_DIR
- + S6_GPIO_MASK(irq - IRQ_BASE)))
- return 0;
- type = IRQ_TYPE_EDGE_BOTH;
- }
-
- reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
- reg |= m;
- handler = handle_level_irq;
- } else {
- reg &= ~m;
- handler = handle_edge_irq;
- }
- writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
- __irq_set_handler_locked(irq, handler);
-
- reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
- if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING))
- reg |= m;
- else
- reg &= ~m;
- writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
-
- reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
- if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
- reg |= m;
- else
- reg &= ~m;
- writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
- return 0;
-}
-
-static struct irq_chip gpioirqs = {
- .name = "GPIO",
- .irq_ack = ack,
- .irq_mask = mask,
- .irq_unmask = unmask,
- .irq_set_type = set_type,
-};
-
-static u8 demux_masks[4];
-
-static void demux_irqs(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- u8 *mask = irq_desc_get_handler_data(desc);
- u8 pending;
- int cirq;
-
- chip->irq_mask(&desc->irq_data);
- chip->irq_ack(&desc->irq_data);
- pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
- cirq = IRQ_BASE - 1;
- while (pending) {
- int n = ffs(pending);
- cirq += n;
- pending >>= n;
- generic_handle_irq(cirq);
- }
- chip->irq_unmask(&desc->irq_data);
-}
-
-extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
-
-void __init variant_init_irq(void)
-{
- int irq, n;
- writeb(0, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE);
- for (irq = n = 0; irq < XTENSA_NR_IRQS; irq++) {
- const signed char *mapping = platform_irq_mappings[irq];
- int alone = 1;
- u8 mask;
- if (!mapping)
- continue;
- for(mask = 0; *mapping != -1; mapping++)
- switch (*mapping) {
- case S6_INTC_GPIO(0):
- mask |= 1 << 0;
- break;
- case S6_INTC_GPIO(1):
- mask |= 1 << 1;
- break;
- case S6_INTC_GPIO(2):
- mask |= 1 << 2;
- break;
- case S6_INTC_GPIO(3):
- mask |= 0x1f << 3;
- break;
- default:
- alone = 0;
- }
- if (mask) {
- int cirq, i;
- if (!alone) {
- printk(KERN_ERR "chained irq chips can't share"
- " parent irq %i\n", irq);
- continue;
- }
- demux_masks[n] = mask;
- cirq = IRQ_BASE - 1;
- do {
- i = ffs(mask);
- cirq += i;
- mask >>= i;
- irq_set_chip(cirq, &gpioirqs);
- irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
- } while (mask);
- irq_set_handler_data(irq, demux_masks + n);
- irq_set_chained_handler(irq, demux_irqs);
- if (++n == ARRAY_SIZE(demux_masks))
- break;
- }
- }
-}
diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
deleted file mode 100644
index af007953027e..000000000000
--- a/arch/xtensa/variants/s6000/include/variant/core.h
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Xtensa processor core configuration information.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 1999-2008 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CORE_CONFIGURATION_H
-#define _XTENSA_CORE_CONFIGURATION_H
-
-
-/****************************************************************************
- Parameters Useful for Any Code, USER or PRIVILEGED
- ****************************************************************************/
-
-/*
- * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
- * configured, and a value of 0 otherwise. These macros are always defined.
- */
-
-
-/*----------------------------------------------------------------------
- ISA
- ----------------------------------------------------------------------*/
-
-#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
-#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
-#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
-#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
-#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
-#define XCHAL_HAVE_DEBUG 1 /* debug option */
-#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
-#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
-#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
-#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
-#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
-#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
-#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
-#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
-#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */
-#define XCHAL_HAVE_DIV32 0 /* QUOS/QUOU/REMS/REMU instructions */
-#define XCHAL_HAVE_L32R 1 /* L32R instruction */
-#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
-#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
-#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
-#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
-#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
-#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
-#define XCHAL_HAVE_ABS 1 /* ABS instruction */
-/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
-/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
-#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
-#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
-#define XCHAL_HAVE_SPECULATION 0 /* speculation */
-#define XCHAL_HAVE_FULL_RESET 0 /* all regs/state reset */
-#define XCHAL_NUM_CONTEXTS 1 /* */
-#define XCHAL_NUM_MISC_REGS 4 /* num of scratch regs (0..4) */
-#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
-#define XCHAL_HAVE_PRID 0 /* processor ID register */
-#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */
-#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
-#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
-#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
-#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
-#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
-#define XCHAL_HAVE_FP 1 /* floating point pkg */
-#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
-#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
-#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
-
-
-/*----------------------------------------------------------------------
- MISC
- ----------------------------------------------------------------------*/
-
-#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
-#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
-#define XCHAL_DATA_WIDTH 16 /* data width in bytes */
-/* In T1050, applies to selected core load and store instructions (see ISA): */
-#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
-#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
-
-#define XCHAL_SW_VERSION 701001 /* sw version of this header */
-
-#define XCHAL_CORE_ID "stretch_bali" /* alphanum core name
- (CoreID) set in the Xtensa
- Processor Generator */
-
-#define XCHAL_BUILD_UNIQUE_ID 0x000104B9 /* 22-bit sw build ID */
-
-/*
- * These definitions describe the hardware targeted by this software.
- */
-#define XCHAL_HW_CONFIGID0 0xC2F3F9FE /* ConfigID hi 32 bits*/
-#define XCHAL_HW_CONFIGID1 0x054104B9 /* ConfigID lo 32 bits*/
-#define XCHAL_HW_VERSION_NAME "LX1.0.2" /* full version name */
-#define XCHAL_HW_VERSION_MAJOR 2100 /* major ver# of targeted hw */
-#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
-#define XCHAL_HW_VERSION 210002 /* major*100+minor */
-#define XCHAL_HW_REL_LX1 1
-#define XCHAL_HW_REL_LX1_0 1
-#define XCHAL_HW_REL_LX1_0_2 1
-#define XCHAL_HW_CONFIGID_RELIABLE 1
-/* If software targets a *range* of hardware versions, these are the bounds: */
-#define XCHAL_HW_MIN_VERSION_MAJOR 2100 /* major v of earliest tgt hw */
-#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
-#define XCHAL_HW_MIN_VERSION 210002 /* earliest targeted hw */
-#define XCHAL_HW_MAX_VERSION_MAJOR 2100 /* major v of latest tgt hw */
-#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
-#define XCHAL_HW_MAX_VERSION 210002 /* latest targeted hw */
-
-
-/*----------------------------------------------------------------------
- CACHE
- ----------------------------------------------------------------------*/
-
-#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
-#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
-#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
-
-#define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
-#define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
-
-#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
-
-
-
-
-/****************************************************************************
- Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
- ****************************************************************************/
-
-
-#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
-
-/*----------------------------------------------------------------------
- CACHE
- ----------------------------------------------------------------------*/
-
-#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
-
-/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
-
-/* Number of cache sets in log2(lines per way): */
-#define XCHAL_ICACHE_SETWIDTH 9
-#define XCHAL_DCACHE_SETWIDTH 10
-
-/* Cache set associativity (number of ways): */
-#define XCHAL_ICACHE_WAYS 4
-#define XCHAL_DCACHE_WAYS 2
-
-/* Cache features: */
-#define XCHAL_ICACHE_LINE_LOCKABLE 1
-#define XCHAL_DCACHE_LINE_LOCKABLE 0
-#define XCHAL_ICACHE_ECC_PARITY 0
-#define XCHAL_DCACHE_ECC_PARITY 0
-
-/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
-#define XCHAL_CA_BITS 4
-
-
-/*----------------------------------------------------------------------
- INTERNAL I/D RAM/ROMs and XLMI
- ----------------------------------------------------------------------*/
-
-#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
-#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
-#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
-#define XCHAL_NUM_DATARAM 1 /* number of core data RAMs */
-#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
-#define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */
-
-/* Data RAM 0: */
-#define XCHAL_DATARAM0_VADDR 0x3FFF0000
-#define XCHAL_DATARAM0_PADDR 0x3FFF0000
-#define XCHAL_DATARAM0_SIZE 65536
-#define XCHAL_DATARAM0_ECC_PARITY 0
-
-/* XLMI Port 0: */
-#define XCHAL_XLMI0_VADDR 0x37F80000
-#define XCHAL_XLMI0_PADDR 0x37F80000
-#define XCHAL_XLMI0_SIZE 262144
-#define XCHAL_XLMI0_ECC_PARITY 0
-
-
-/*----------------------------------------------------------------------
- INTERRUPTS and TIMERS
- ----------------------------------------------------------------------*/
-
-#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
-#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
-#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
-#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
-#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
-#define XCHAL_NUM_INTERRUPTS 27 /* number of interrupts */
-#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
-#define XCHAL_NUM_EXTINTERRUPTS 20 /* num of external interrupts */
-#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels
- (not including level zero) */
-#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
- /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
-
-/* Masks of interrupts at each interrupt level: */
-#define XCHAL_INTLEVEL1_MASK 0x01F07FFF
-#define XCHAL_INTLEVEL2_MASK 0x02018000
-#define XCHAL_INTLEVEL3_MASK 0x04060000
-#define XCHAL_INTLEVEL4_MASK 0x00000000
-#define XCHAL_INTLEVEL5_MASK 0x00080000
-#define XCHAL_INTLEVEL6_MASK 0x00000000
-#define XCHAL_INTLEVEL7_MASK 0x00000000
-
-/* Masks of interrupts at each range 1..n of interrupt levels: */
-#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x01F07FFF
-#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x03F1FFFF
-#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x07F7FFFF
-#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x07F7FFFF
-#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x07FFFFFF
-#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x07FFFFFF
-#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x07FFFFFF
-
-/* Level of each interrupt: */
-#define XCHAL_INT0_LEVEL 1
-#define XCHAL_INT1_LEVEL 1
-#define XCHAL_INT2_LEVEL 1
-#define XCHAL_INT3_LEVEL 1
-#define XCHAL_INT4_LEVEL 1
-#define XCHAL_INT5_LEVEL 1
-#define XCHAL_INT6_LEVEL 1
-#define XCHAL_INT7_LEVEL 1
-#define XCHAL_INT8_LEVEL 1
-#define XCHAL_INT9_LEVEL 1
-#define XCHAL_INT10_LEVEL 1
-#define XCHAL_INT11_LEVEL 1
-#define XCHAL_INT12_LEVEL 1
-#define XCHAL_INT13_LEVEL 1
-#define XCHAL_INT14_LEVEL 1
-#define XCHAL_INT15_LEVEL 2
-#define XCHAL_INT16_LEVEL 2
-#define XCHAL_INT17_LEVEL 3
-#define XCHAL_INT18_LEVEL 3
-#define XCHAL_INT19_LEVEL 5
-#define XCHAL_INT20_LEVEL 1
-#define XCHAL_INT21_LEVEL 1
-#define XCHAL_INT22_LEVEL 1
-#define XCHAL_INT23_LEVEL 1
-#define XCHAL_INT24_LEVEL 1
-#define XCHAL_INT25_LEVEL 2
-#define XCHAL_INT26_LEVEL 3
-#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
-#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
-#define XCHAL_NMILEVEL 5 /* NMI "level" (for use with
- EXCSAVE/EPS/EPC_n, RFI n) */
-
-/* Type of each interrupt: */
-#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT10_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT11_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT13_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT14_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
-#define XCHAL_INT19_TYPE XTHAL_INTTYPE_NMI
-#define XCHAL_INT20_TYPE XTHAL_INTTYPE_SOFTWARE
-#define XCHAL_INT21_TYPE XTHAL_INTTYPE_SOFTWARE
-#define XCHAL_INT22_TYPE XTHAL_INTTYPE_SOFTWARE
-#define XCHAL_INT23_TYPE XTHAL_INTTYPE_SOFTWARE
-#define XCHAL_INT24_TYPE XTHAL_INTTYPE_TIMER
-#define XCHAL_INT25_TYPE XTHAL_INTTYPE_TIMER
-#define XCHAL_INT26_TYPE XTHAL_INTTYPE_TIMER
-
-/* Masks of interrupts for each type of interrupt: */
-#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xF8000000
-#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00F00000
-#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000000
-#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0007FFFF
-#define XCHAL_INTTYPE_MASK_TIMER 0x07000000
-#define XCHAL_INTTYPE_MASK_NMI 0x00080000
-#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
-
-/* Interrupt numbers assigned to specific interrupt sources: */
-#define XCHAL_TIMER0_INTERRUPT 24 /* CCOMPARE0 */
-#define XCHAL_TIMER1_INTERRUPT 25 /* CCOMPARE1 */
-#define XCHAL_TIMER2_INTERRUPT 26 /* CCOMPARE2 */
-#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
-#define XCHAL_NMI_INTERRUPT 19 /* non-maskable interrupt */
-
-/* Interrupt numbers for levels at which only one interrupt is configured: */
-#define XCHAL_INTLEVEL5_NUM 19
-/* (There are many interrupts each at level(s) 1, 2, 3.) */
-
-
-/*
- * External interrupt vectors/levels.
- * These macros describe how Xtensa processor interrupt numbers
- * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
- * map to external BInterrupt<n> pins, for those interrupts
- * configured as external (level-triggered, edge-triggered, or NMI).
- * See the Xtensa processor databook for more details.
- */
-
-/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
-#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
-#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
-#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
-#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
-#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
-#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
-#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
-#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
-#define XCHAL_EXTINT8_NUM 8 /* (intlevel 1) */
-#define XCHAL_EXTINT9_NUM 9 /* (intlevel 1) */
-#define XCHAL_EXTINT10_NUM 10 /* (intlevel 1) */
-#define XCHAL_EXTINT11_NUM 11 /* (intlevel 1) */
-#define XCHAL_EXTINT12_NUM 12 /* (intlevel 1) */
-#define XCHAL_EXTINT13_NUM 13 /* (intlevel 1) */
-#define XCHAL_EXTINT14_NUM 14 /* (intlevel 1) */
-#define XCHAL_EXTINT15_NUM 15 /* (intlevel 2) */
-#define XCHAL_EXTINT16_NUM 16 /* (intlevel 2) */
-#define XCHAL_EXTINT17_NUM 17 /* (intlevel 3) */
-#define XCHAL_EXTINT18_NUM 18 /* (intlevel 3) */
-#define XCHAL_EXTINT19_NUM 19 /* (intlevel 5) */
-
-
-/*----------------------------------------------------------------------
- EXCEPTIONS and VECTORS
- ----------------------------------------------------------------------*/
-
-#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
- number: 1 == XEA1 (old)
- 2 == XEA2 (new)
- 0 == XEAX (extern) */
-#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
-#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
-#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
-#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
-#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
-#define XCHAL_HAVE_VECTOR_SELECT 0 /* relocatable vectors */
-#define XCHAL_HAVE_VECBASE 0 /* relocatable vectors */
-
-#define XCHAL_RESET_VECOFS 0x00000000
-#define XCHAL_RESET_VECTOR_VADDR 0x3FFE03D0
-#define XCHAL_RESET_VECTOR_PADDR 0x3FFE03D0
-#define XCHAL_USER_VECOFS 0x00000000
-#define XCHAL_USER_VECTOR_VADDR 0x40000220
-#define XCHAL_USER_VECTOR_PADDR 0x40000220
-#define XCHAL_KERNEL_VECOFS 0x00000000
-#define XCHAL_KERNEL_VECTOR_VADDR 0x40000200
-#define XCHAL_KERNEL_VECTOR_PADDR 0x40000200
-#define XCHAL_DOUBLEEXC_VECOFS 0x00000000
-#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x400002A0
-#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x400002A0
-#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
-#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
-#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
-#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
-#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
-#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
-#define XCHAL_WINDOW_VECTORS_VADDR 0x40000000
-#define XCHAL_WINDOW_VECTORS_PADDR 0x40000000
-#define XCHAL_INTLEVEL2_VECOFS 0x00000000
-#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x40000240
-#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x40000240
-#define XCHAL_INTLEVEL3_VECOFS 0x00000000
-#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x40000260
-#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x40000260
-#define XCHAL_INTLEVEL4_VECOFS 0x00000000
-#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x40000390
-#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x40000390
-#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL4_VECOFS
-#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
-#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
-#define XCHAL_NMI_VECOFS 0x00000000
-#define XCHAL_NMI_VECTOR_VADDR 0x400003B0
-#define XCHAL_NMI_VECTOR_PADDR 0x400003B0
-#define XCHAL_INTLEVEL5_VECOFS XCHAL_NMI_VECOFS
-#define XCHAL_INTLEVEL5_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
-#define XCHAL_INTLEVEL5_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
-
-
-/*----------------------------------------------------------------------
- DEBUG
- ----------------------------------------------------------------------*/
-
-#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
-#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
-#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
-#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
-
-
-/*----------------------------------------------------------------------
- MMU
- ----------------------------------------------------------------------*/
-
-/* See core-matmap.h header file for more details. */
-
-#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
-#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
-#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */
-#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
-#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */
-#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
-#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table
- [autorefill] and protection)
- usable for an MMU-based OS */
-/* If none of the above last 4 are set, it's a custom TLB configuration. */
-
-#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */
-#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */
-#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */
-
-#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
-
-
-#endif /* _XTENSA_CORE_CONFIGURATION_H */
-
diff --git a/arch/xtensa/variants/s6000/include/variant/dmac.h b/arch/xtensa/variants/s6000/include/variant/dmac.h
deleted file mode 100644
index 3f88d9fc6897..000000000000
--- a/arch/xtensa/variants/s6000/include/variant/dmac.h
+++ /dev/null
@@ -1,387 +0,0 @@
-/*
- * include/asm-xtensa/variant-s6000/dmac.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Tensilica Inc.
- * Copyright (C) 2008 Emlix GmbH <info@emlix.com>
- * Authors: Fabian Godehardt <fg@emlix.com>
- * Oskar Schirmer <oskar@scara.com>
- * Daniel Gloeckner <dg@emlix.com>
- */
-
-#ifndef __ASM_XTENSA_S6000_DMAC_H
-#define __ASM_XTENSA_S6000_DMAC_H
-#include <linux/io.h>
-#include <variant/hardware.h>
-
-/* DMA global */
-
-#define S6_DMA_INTSTAT0 0x000
-#define S6_DMA_INTSTAT1 0x004
-#define S6_DMA_INTENABLE0 0x008
-#define S6_DMA_INTENABLE1 0x00C
-#define S6_DMA_INTRAW0 0x010
-#define S6_DMA_INTRAW1 0x014
-#define S6_DMA_INTCLEAR0 0x018
-#define S6_DMA_INTCLEAR1 0x01C
-#define S6_DMA_INTSET0 0x020
-#define S6_DMA_INTSET1 0x024
-#define S6_DMA_INT0_UNDER 0
-#define S6_DMA_INT0_OVER 16
-#define S6_DMA_INT1_CHANNEL 0
-#define S6_DMA_INT1_MASTER 16
-#define S6_DMA_INT1_MASTER_MASK 7
-#define S6_DMA_TERMCNTIRQSTAT 0x028
-#define S6_DMA_TERMCNTIRQCLR 0x02C
-#define S6_DMA_TERMCNTIRQSET 0x030
-#define S6_DMA_PENDCNTIRQSTAT 0x034
-#define S6_DMA_PENDCNTIRQCLR 0x038
-#define S6_DMA_PENDCNTIRQSET 0x03C
-#define S6_DMA_LOWWMRKIRQSTAT 0x040
-#define S6_DMA_LOWWMRKIRQCLR 0x044
-#define S6_DMA_LOWWMRKIRQSET 0x048
-#define S6_DMA_MASTERERRINFO 0x04C
-#define S6_DMA_MASTERERR_CHAN(n) (4*(n))
-#define S6_DMA_MASTERERR_CHAN_MASK 0xF
-#define S6_DMA_DESCRFIFO0 0x050
-#define S6_DMA_DESCRFIFO1 0x054
-#define S6_DMA_DESCRFIFO2 0x058
-#define S6_DMA_DESCRFIFO2_AUTODISABLE 24
-#define S6_DMA_DESCRFIFO3 0x05C
-#define S6_DMA_MASTER0START 0x060
-#define S6_DMA_MASTER0END 0x064
-#define S6_DMA_MASTER1START 0x068
-#define S6_DMA_MASTER1END 0x06C
-#define S6_DMA_NEXTFREE 0x070
-#define S6_DMA_NEXTFREE_CHAN 0
-#define S6_DMA_NEXTFREE_CHAN_MASK 0x1F
-#define S6_DMA_NEXTFREE_ENA 16
-#define S6_DMA_NEXTFREE_ENA_MASK ((1 << 16) - 1)
-#define S6_DMA_DPORTCTRLGRP(p) ((p) * 4 + 0x074)
-#define S6_DMA_DPORTCTRLGRP_FRAMEREP 0
-#define S6_DMA_DPORTCTRLGRP_NRCHANS 1
-#define S6_DMA_DPORTCTRLGRP_NRCHANS_1 0
-#define S6_DMA_DPORTCTRLGRP_NRCHANS_3 1
-#define S6_DMA_DPORTCTRLGRP_NRCHANS_4 2
-#define S6_DMA_DPORTCTRLGRP_NRCHANS_2 3
-#define S6_DMA_DPORTCTRLGRP_ENA 31
-
-
-/* DMA per channel */
-
-#define DMA_CHNL(dmac, n) ((dmac) + 0x1000 + (n) * 0x100)
-#define DMA_INDEX_CHNL(addr) (((addr) >> 8) & 0xF)
-#define DMA_MASK_DMAC(addr) ((addr) & 0xFFFF0000)
-#define S6_DMA_CHNCTRL 0x000
-#define S6_DMA_CHNCTRL_ENABLE 0
-#define S6_DMA_CHNCTRL_PAUSE 1
-#define S6_DMA_CHNCTRL_PRIO 2
-#define S6_DMA_CHNCTRL_PRIO_MASK 3
-#define S6_DMA_CHNCTRL_PERIPHXFER 4
-#define S6_DMA_CHNCTRL_PERIPHENA 5
-#define S6_DMA_CHNCTRL_SRCINC 6
-#define S6_DMA_CHNCTRL_DSTINC 7
-#define S6_DMA_CHNCTRL_BURSTLOG 8
-#define S6_DMA_CHNCTRL_BURSTLOG_MASK 7
-#define S6_DMA_CHNCTRL_DESCFIFODEPTH 12
-#define S6_DMA_CHNCTRL_DESCFIFODEPTH_MASK 0x1F
-#define S6_DMA_CHNCTRL_DESCFIFOFULL 17
-#define S6_DMA_CHNCTRL_BWCONSEL 18
-#define S6_DMA_CHNCTRL_BWCONENA 19
-#define S6_DMA_CHNCTRL_PENDGCNTSTAT 20
-#define S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK 0x3F
-#define S6_DMA_CHNCTRL_LOWWMARK 26
-#define S6_DMA_CHNCTRL_LOWWMARK_MASK 0xF
-#define S6_DMA_CHNCTRL_TSTAMP 30
-#define S6_DMA_TERMCNTNB 0x004
-#define S6_DMA_TERMCNTNB_MASK 0xFFFF
-#define S6_DMA_TERMCNTTMO 0x008
-#define S6_DMA_TERMCNTSTAT 0x00C
-#define S6_DMA_TERMCNTSTAT_MASK 0xFF
-#define S6_DMA_CMONCHUNK 0x010
-#define S6_DMA_SRCSKIP 0x014
-#define S6_DMA_DSTSKIP 0x018
-#define S6_DMA_CUR_SRC 0x024
-#define S6_DMA_CUR_DST 0x028
-#define S6_DMA_TIMESTAMP 0x030
-
-/* DMA channel lists */
-
-#define S6_DPDMA_CHAN(stream, channel) (4 * (stream) + (channel))
-#define S6_DPDMA_NB 16
-
-#define S6_HIFDMA_GMACTX 0
-#define S6_HIFDMA_GMACRX 1
-#define S6_HIFDMA_I2S0 2
-#define S6_HIFDMA_I2S1 3
-#define S6_HIFDMA_EGIB 4
-#define S6_HIFDMA_PCITX 5
-#define S6_HIFDMA_PCIRX 6
-#define S6_HIFDMA_NB 7
-
-#define S6_NIDMA_NB 4
-
-#define S6_LMSDMA_NB 12
-
-/* controller access */
-
-#define S6_DMAC_NB 4
-#define S6_DMAC_INDEX(dmac) (((unsigned)(dmac) >> 18) % S6_DMAC_NB)
-
-struct s6dmac_ctrl {
- u32 dmac;
- spinlock_t lock;
- u8 chan_nb;
-};
-
-extern struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
-
-
-/* DMA control, per channel */
-
-static inline int s6dmac_fifo_full(u32 dmac, int chan)
-{
- return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
- & (1 << S6_DMA_CHNCTRL_DESCFIFOFULL)) && 1;
-}
-
-static inline int s6dmac_termcnt_irq(u32 dmac, int chan)
-{
- u32 m = 1 << chan;
- int r = (readl(dmac + S6_DMA_TERMCNTIRQSTAT) & m) && 1;
- if (r)
- writel(m, dmac + S6_DMA_TERMCNTIRQCLR);
- return r;
-}
-
-static inline int s6dmac_pendcnt_irq(u32 dmac, int chan)
-{
- u32 m = 1 << chan;
- int r = (readl(dmac + S6_DMA_PENDCNTIRQSTAT) & m) && 1;
- if (r)
- writel(m, dmac + S6_DMA_PENDCNTIRQCLR);
- return r;
-}
-
-static inline int s6dmac_lowwmark_irq(u32 dmac, int chan)
-{
- int r = (readl(dmac + S6_DMA_LOWWMRKIRQSTAT) & (1 << chan)) ? 1 : 0;
- if (r)
- writel(1 << chan, dmac + S6_DMA_LOWWMRKIRQCLR);
- return r;
-}
-
-static inline u32 s6dmac_pending_count(u32 dmac, int chan)
-{
- return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
- >> S6_DMA_CHNCTRL_PENDGCNTSTAT)
- & S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK;
-}
-
-static inline void s6dmac_set_terminal_count(u32 dmac, int chan, u32 n)
-{
- n &= S6_DMA_TERMCNTNB_MASK;
- n |= readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB)
- & ~S6_DMA_TERMCNTNB_MASK;
- writel(n, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
-}
-
-static inline u32 s6dmac_get_terminal_count(u32 dmac, int chan)
-{
- return (readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB))
- & S6_DMA_TERMCNTNB_MASK;
-}
-
-static inline u32 s6dmac_timestamp(u32 dmac, int chan)
-{
- return readl(DMA_CHNL(dmac, chan) + S6_DMA_TIMESTAMP);
-}
-
-static inline u32 s6dmac_cur_src(u32 dmac, int chan)
-{
- return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_SRC);
-}
-
-static inline u32 s6dmac_cur_dst(u32 dmac, int chan)
-{
- return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_DST);
-}
-
-static inline void s6dmac_disable_chan(u32 dmac, int chan)
-{
- u32 ctrl;
- writel(readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
- & ~(1 << S6_DMA_CHNCTRL_ENABLE),
- DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
- do
- ctrl = readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
- while (ctrl & (1 << S6_DMA_CHNCTRL_ENABLE));
-}
-
-static inline void s6dmac_set_stride_skip(u32 dmac, int chan,
- int comchunk, /* 0: disable scatter/gather */
- int srcskip, int dstskip)
-{
- writel(comchunk, DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
- writel(srcskip, DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
- writel(dstskip, DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
-}
-
-static inline void s6dmac_enable_chan(u32 dmac, int chan,
- int prio, /* 0 (highest) .. 3 (lowest) */
- int periphxfer, /* <0: disable p.req.line, 0..1: mode */
- int srcinc, int dstinc, /* 0: dont increment src/dst address */
- int comchunk, /* 0: disable scatter/gather */
- int srcskip, int dstskip,
- int burstsize, /* 4 for I2S, 7 for everything else */
- int bandwidthconserve, /* <0: disable, 0..1: select */
- int lowwmark, /* 0..15 */
- int timestamp, /* 0: disable timestamp */
- int enable) /* 0: disable for now */
-{
- writel(1, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
- writel(0, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTTMO);
- writel(lowwmark << S6_DMA_CHNCTRL_LOWWMARK,
- DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
- s6dmac_set_stride_skip(dmac, chan, comchunk, srcskip, dstskip);
- writel(((enable ? 1 : 0) << S6_DMA_CHNCTRL_ENABLE) |
- (prio << S6_DMA_CHNCTRL_PRIO) |
- (((periphxfer > 0) ? 1 : 0) << S6_DMA_CHNCTRL_PERIPHXFER) |
- (((periphxfer < 0) ? 0 : 1) << S6_DMA_CHNCTRL_PERIPHENA) |
- ((srcinc ? 1 : 0) << S6_DMA_CHNCTRL_SRCINC) |
- ((dstinc ? 1 : 0) << S6_DMA_CHNCTRL_DSTINC) |
- (burstsize << S6_DMA_CHNCTRL_BURSTLOG) |
- (((bandwidthconserve > 0) ? 1 : 0) << S6_DMA_CHNCTRL_BWCONSEL) |
- (((bandwidthconserve < 0) ? 0 : 1) << S6_DMA_CHNCTRL_BWCONENA) |
- (lowwmark << S6_DMA_CHNCTRL_LOWWMARK) |
- ((timestamp ? 1 : 0) << S6_DMA_CHNCTRL_TSTAMP),
- DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
-}
-
-
-/* DMA control, per engine */
-
-static inline unsigned _dmac_addr_index(u32 dmac)
-{
- unsigned i = S6_DMAC_INDEX(dmac);
- if (s6dmac_ctrl[i].dmac != dmac)
- BUG();
- return i;
-}
-
-static inline void _s6dmac_disable_error_irqs(u32 dmac, u32 mask)
-{
- writel(mask, dmac + S6_DMA_TERMCNTIRQCLR);
- writel(mask, dmac + S6_DMA_PENDCNTIRQCLR);
- writel(mask, dmac + S6_DMA_LOWWMRKIRQCLR);
- writel(readl(dmac + S6_DMA_INTENABLE0)
- & ~((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER)),
- dmac + S6_DMA_INTENABLE0);
- writel(readl(dmac + S6_DMA_INTENABLE1) & ~(mask << S6_DMA_INT1_CHANNEL),
- dmac + S6_DMA_INTENABLE1);
- writel((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER),
- dmac + S6_DMA_INTCLEAR0);
- writel(mask << S6_DMA_INT1_CHANNEL, dmac + S6_DMA_INTCLEAR1);
-}
-
-/*
- * request channel from specified engine
- * with chan<0, accept any channel
- * further parameters see s6dmac_enable_chan
- * returns < 0 upon error, channel nb otherwise
- */
-static inline int s6dmac_request_chan(u32 dmac, int chan,
- int prio,
- int periphxfer,
- int srcinc, int dstinc,
- int comchunk,
- int srcskip, int dstskip,
- int burstsize,
- int bandwidthconserve,
- int lowwmark,
- int timestamp,
- int enable)
-{
- int r = chan;
- unsigned long flags;
- spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
- spin_lock_irqsave(spinl, flags);
- if (r < 0) {
- r = (readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_CHAN)
- & S6_DMA_NEXTFREE_CHAN_MASK;
- }
- if (r >= s6dmac_ctrl[_dmac_addr_index(dmac)].chan_nb) {
- if (chan < 0)
- r = -EBUSY;
- else
- r = -ENXIO;
- } else if (((readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_ENA)
- >> r) & 1) {
- r = -EBUSY;
- } else {
- s6dmac_enable_chan(dmac, r, prio, periphxfer,
- srcinc, dstinc, comchunk, srcskip, dstskip, burstsize,
- bandwidthconserve, lowwmark, timestamp, enable);
- }
- spin_unlock_irqrestore(spinl, flags);
- return r;
-}
-
-static inline void s6dmac_put_fifo(u32 dmac, int chan,
- u32 src, u32 dst, u32 size)
-{
- unsigned long flags;
- spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
- spin_lock_irqsave(spinl, flags);
- writel(src, dmac + S6_DMA_DESCRFIFO0);
- writel(dst, dmac + S6_DMA_DESCRFIFO1);
- writel(size, dmac + S6_DMA_DESCRFIFO2);
- writel(chan, dmac + S6_DMA_DESCRFIFO3);
- spin_unlock_irqrestore(spinl, flags);
-}
-
-static inline u32 s6dmac_channel_enabled(u32 dmac, int chan)
-{
- return readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) &
- (1 << S6_DMA_CHNCTRL_ENABLE);
-}
-
-/*
- * group 1-4 data port channels
- * with port=0..3, nrch=1-4 channels,
- * frrep=0/1 (dis- or enable frame repeat)
- */
-static inline void s6dmac_dp_setup_group(u32 dmac, int port,
- int nrch, int frrep)
-{
- static const u8 mask[4] = {0, 3, 1, 2};
- BUG_ON(dmac != S6_REG_DPDMA);
- if ((port < 0) || (port > 3) || (nrch < 1) || (nrch > 4))
- return;
- writel((mask[nrch - 1] << S6_DMA_DPORTCTRLGRP_NRCHANS)
- | ((frrep ? 1 : 0) << S6_DMA_DPORTCTRLGRP_FRAMEREP),
- dmac + S6_DMA_DPORTCTRLGRP(port));
-}
-
-static inline void s6dmac_dp_switch_group(u32 dmac, int port, int enable)
-{
- u32 tmp;
- BUG_ON(dmac != S6_REG_DPDMA);
- tmp = readl(dmac + S6_DMA_DPORTCTRLGRP(port));
- if (enable)
- tmp |= (1 << S6_DMA_DPORTCTRLGRP_ENA);
- else
- tmp &= ~(1 << S6_DMA_DPORTCTRLGRP_ENA);
- writel(tmp, dmac + S6_DMA_DPORTCTRLGRP(port));
-}
-
-extern void s6dmac_put_fifo_cache(u32 dmac, int chan,
- u32 src, u32 dst, u32 size);
-extern void s6dmac_disable_error_irqs(u32 dmac, u32 mask);
-extern u32 s6dmac_int_sources(u32 dmac, u32 channel);
-extern void s6dmac_release_chan(u32 dmac, int chan);
-
-#endif /* __ASM_XTENSA_S6000_DMAC_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/gpio.h b/arch/xtensa/variants/s6000/include/variant/gpio.h
deleted file mode 100644
index 8484ab0df461..000000000000
--- a/arch/xtensa/variants/s6000/include/variant/gpio.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _XTENSA_VARIANT_S6000_GPIO_H
-#define _XTENSA_VARIANT_S6000_GPIO_H
-
-extern int s6_gpio_init(u32 afsel);
-
-#endif /* _XTENSA_VARIANT_S6000_GPIO_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/hardware.h b/arch/xtensa/variants/s6000/include/variant/hardware.h
deleted file mode 100644
index 5d9ba098d84a..000000000000
--- a/arch/xtensa/variants/s6000/include/variant/hardware.h
+++ /dev/null
@@ -1,259 +0,0 @@
-#ifndef __XTENSA_S6000_HARDWARE_H
-#define __XTENSA_S6000_HARDWARE_H
-
-#define S6_SCLK 1843200
-
-#define S6_MEM_REG 0x20000000
-#define S6_MEM_EFI 0x33F00000
-#define S6_MEM_PCIE_DATARAM1 0x34000000
-#define S6_MEM_XLMI 0x37F80000
-#define S6_MEM_PIF_DATARAM1 0x37FFC000
-#define S6_MEM_GMAC 0x38000000
-#define S6_MEM_I2S 0x3A000000
-#define S6_MEM_EGIB 0x3C000000
-#define S6_MEM_PCIE_CFG 0x3E000000
-#define S6_MEM_PIF_DATARAM 0x3FFE0000
-#define S6_MEM_XLMI_DATARAM 0x3FFF0000
-#define S6_MEM_DDR 0x40000000
-#define S6_MEM_PCIE_APER 0xC0000000
-#define S6_MEM_AUX 0xF0000000
-
-/* Device addresses */
-
-#define S6_REG_SCB S6_MEM_REG
-#define S6_REG_NB (S6_REG_SCB + 0x10000)
-#define S6_REG_LMSDMA (S6_REG_SCB + 0x20000)
-#define S6_REG_NI (S6_REG_SCB + 0x30000)
-#define S6_REG_NIDMA (S6_REG_SCB + 0x40000)
-#define S6_REG_NS (S6_REG_SCB + 0x50000)
-#define S6_REG_DDR (S6_REG_SCB + 0x60000)
-#define S6_REG_GREG1 (S6_REG_SCB + 0x70000)
-#define S6_REG_DP (S6_REG_SCB + 0x80000)
-#define S6_REG_DPDMA (S6_REG_SCB + 0x90000)
-#define S6_REG_EGIB (S6_REG_SCB + 0xA0000)
-#define S6_REG_PCIE (S6_REG_SCB + 0xB0000)
-#define S6_REG_I2S (S6_REG_SCB + 0xC0000)
-#define S6_REG_GMAC (S6_REG_SCB + 0xD0000)
-#define S6_REG_HIFDMA (S6_REG_SCB + 0xE0000)
-#define S6_REG_GREG2 (S6_REG_SCB + 0xF0000)
-
-#define S6_REG_APB S6_REG_SCB
-#define S6_REG_UART (S6_REG_APB + 0x0000)
-#define S6_REG_INTC (S6_REG_APB + 0x2000)
-#define S6_REG_SPI (S6_REG_APB + 0x3000)
-#define S6_REG_I2C (S6_REG_APB + 0x4000)
-#define S6_REG_GPIO (S6_REG_APB + 0x8000)
-
-/* Global register block */
-
-#define S6_GREG1_PLL_LOCKCLEAR 0x000
-#define S6_GREG1_PLL_LOCK_SYS 0
-#define S6_GREG1_PLL_LOCK_IO 1
-#define S6_GREG1_PLL_LOCK_AIM 2
-#define S6_GREG1_PLL_LOCK_DP0 3
-#define S6_GREG1_PLL_LOCK_DP2 4
-#define S6_GREG1_PLL_LOCK_DDR 5
-#define S6_GREG1_PLL_LOCKSTAT 0x004
-#define S6_GREG1_PLL_LOCKSTAT_CURLOCK 0
-#define S6_GREG1_PLL_LOCKSTAT_EVERUNLCK 8
-#define S6_GREG1_PLLSEL 0x010
-#define S6_GREG1_PLLSEL_AIM 0
-#define S6_GREG1_PLLSEL_AIM_DDR2 0
-#define S6_GREG1_PLLSEL_AIM_300MHZ 1
-#define S6_GREG1_PLLSEL_AIM_240MHZ 2
-#define S6_GREG1_PLLSEL_AIM_200MHZ 3
-#define S6_GREG1_PLLSEL_AIM_150MHZ 4
-#define S6_GREG1_PLLSEL_AIM_120MHZ 5
-#define S6_GREG1_PLLSEL_AIM_40MHZ 6
-#define S6_GREG1_PLLSEL_AIM_PLLAIMREF 7
-#define S6_GREG1_PLLSEL_AIM_MASK 7
-#define S6_GREG1_PLLSEL_DDR 8
-#define S6_GREG1_PLLSEL_DDR_HS 0
-#define S6_GREG1_PLLSEL_DDR_333MHZ 1
-#define S6_GREG1_PLLSEL_DDR_250MHZ 2
-#define S6_GREG1_PLLSEL_DDR_200MHZ 3
-#define S6_GREG1_PLLSEL_DDR_167MHZ 4
-#define S6_GREG1_PLLSEL_DDR_100MHZ 5
-#define S6_GREG1_PLLSEL_DDR_33MHZ 6
-#define S6_GREG1_PLLSEL_DDR_PLLIOREF 7
-#define S6_GREG1_PLLSEL_DDR_MASK 7
-#define S6_GREG1_PLLSEL_GMAC 16
-#define S6_GREG1_PLLSEL_GMAC_125MHZ 0
-#define S6_GREG1_PLLSEL_GMAC_25MHZ 1
-#define S6_GREG1_PLLSEL_GMAC_2500KHZ 2
-#define S6_GREG1_PLLSEL_GMAC_EXTERN 3
-#define S6_GREG1_PLLSEL_GMAC_MASK 3
-#define S6_GREG1_PLLSEL_GMII 18
-#define S6_GREG1_PLLSEL_GMII_111MHZ 0
-#define S6_GREG1_PLLSEL_GMII_IOREF 1
-#define S6_GREG1_PLLSEL_GMII_NONE 2
-#define S6_GREG1_PLLSEL_GMII_125MHZ 3
-#define S6_GREG1_PLLSEL_GMII_MASK 3
-#define S6_GREG1_SYSUNLOCKCNT 0x020
-#define S6_GREG1_IOUNLOCKCNT 0x024
-#define S6_GREG1_AIMUNLOCKCNT 0x028
-#define S6_GREG1_DP0UNLOCKCNT 0x02C
-#define S6_GREG1_DP2UNLOCKCNT 0x030
-#define S6_GREG1_DDRUNLOCKCNT 0x034
-#define S6_GREG1_CLKBAL0 0x040
-#define S6_GREG1_CLKBAL0_LSGB 0
-#define S6_GREG1_CLKBAL0_LSPX 8
-#define S6_GREG1_CLKBAL0_MEMDO 16
-#define S6_GREG1_CLKBAL0_HSXT1 24
-#define S6_GREG1_CLKBAL1 0x044
-#define S6_GREG1_CLKBAL1_HSISEF 0
-#define S6_GREG1_CLKBAL1_HSNI 8
-#define S6_GREG1_CLKBAL1_HSNS 16
-#define S6_GREG1_CLKBAL1_HSISEFCFG 24
-#define S6_GREG1_CLKBAL2 0x048
-#define S6_GREG1_CLKBAL2_LSNB 0
-#define S6_GREG1_CLKBAL2_LSSB 8
-#define S6_GREG1_CLKBAL2_LSREST 24
-#define S6_GREG1_CLKBAL3 0x04C
-#define S6_GREG1_CLKBAL3_ISEFXAD 0
-#define S6_GREG1_CLKBAL3_ISEFLMS 8
-#define S6_GREG1_CLKBAL3_ISEFISEF 16
-#define S6_GREG1_CLKBAL3_DDRDD 24
-#define S6_GREG1_CLKBAL4 0x050
-#define S6_GREG1_CLKBAL4_DDRDP 0
-#define S6_GREG1_CLKBAL4_DDRDO 8
-#define S6_GREG1_CLKBAL4_DDRNB 16
-#define S6_GREG1_CLKBAL4_DDRLMS 24
-#define S6_GREG1_BLOCKENA 0x100
-#define S6_GREG1_BLOCK_DDR 0
-#define S6_GREG1_BLOCK_DP 1
-#define S6_GREG1_BLOCK_NSNI 2
-#define S6_GREG1_BLOCK_PCIE 3
-#define S6_GREG1_BLOCK_GMAC 4
-#define S6_GREG1_BLOCK_I2S 5
-#define S6_GREG1_BLOCK_EGIB 6
-#define S6_GREG1_BLOCK_SB 7
-#define S6_GREG1_BLOCK_XT1 8
-#define S6_GREG1_CLKGATE 0x104
-#define S6_GREG1_BGATE_AIMNORTH 9
-#define S6_GREG1_BGATE_AIMEAST 10
-#define S6_GREG1_BGATE_AIMWEST 11
-#define S6_GREG1_BGATE_AIMSOUTH 12
-#define S6_GREG1_CHIPRES 0x108
-#define S6_GREG1_CHIPRES_SOFTRES 0
-#define S6_GREG1_CHIPRES_LOSTLOCK 1
-#define S6_GREG1_RESETCAUSE 0x10C
-#define S6_GREG1_RESETCAUSE_RESETN 0
-#define S6_GREG1_RESETCAUSE_GLOBAL 1
-#define S6_GREG1_RESETCAUSE_WDOGTIMER 2
-#define S6_GREG1_RESETCAUSE_SWCHIP 3
-#define S6_GREG1_RESETCAUSE_PLLSYSLOSS 4
-#define S6_GREG1_RESETCAUSE_PCIE 5
-#define S6_GREG1_RESETCAUSE_CREATEDGLOB 6
-#define S6_GREG1_REFCLOCKCNT 0x110
-#define S6_GREG1_RESETTIMER 0x114
-#define S6_GREG1_NMITIMER 0x118
-#define S6_GREG1_GLOBAL_TIMER 0x11C
-#define S6_GREG1_TIMER0 0x180
-#define S6_GREG1_TIMER1 0x184
-#define S6_GREG1_UARTCLOCKSEL 0x204
-#define S6_GREG1_CHIPVERSPACKG 0x208
-#define S6_GREG1_CHIPVERSPACKG_CHIPVID 0
-#define S6_GREG1_CHIPVERSPACKG_PACKSEL 8
-#define S6_GREG1_ONDIETERMCTRL 0x20C
-#define S6_GREG1_ONDIETERMCTRL_WEST 0
-#define S6_GREG1_ONDIETERMCTRL_NORTH 2
-#define S6_GREG1_ONDIETERMCTRL_EAST 4
-#define S6_GREG1_ONDIETERMCTRL_SOUTH 6
-#define S6_GREG1_ONDIETERMCTRL_NONE 0
-#define S6_GREG1_ONDIETERMCTRL_75OHM 2
-#define S6_GREG1_ONDIETERMCTRL_MASK 3
-#define S6_GREG1_BOOT_CFG0 0x210
-#define S6_GREG1_BOOT_CFG0_AIMSTRONG 1
-#define S6_GREG1_BOOT_CFG0_MINIBOOTDL 2
-#define S6_GREG1_BOOT_CFG0_OCDGPIO8SET 5
-#define S6_GREG1_BOOT_CFG0_OCDGPIOENA 6
-#define S6_GREG1_BOOT_CFG0_DOWNSTREAM 7
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV 8
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_300MHZ 1
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_240MHZ 2
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_200MHZ 3
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_150MHZ 4
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_120MHZ 5
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_40MHZ 6
-#define S6_GREG1_BOOT_CFG0_PLLSYSDIV_MASK 7
-#define S6_GREG1_BOOT_CFG0_BALHSLMS 12
-#define S6_GREG1_BOOT_CFG0_BALHSNB 18
-#define S6_GREG1_BOOT_CFG0_BALHSXAD 24
-#define S6_GREG1_BOOT_CFG1 0x214
-#define S6_GREG1_BOOT_CFG1_PCIE1LANE 1
-#define S6_GREG1_BOOT_CFG1_MPLLPRESCALE 2
-#define S6_GREG1_BOOT_CFG1_MPLLNCY 4
-#define S6_GREG1_BOOT_CFG1_MPLLNCY5 9
-#define S6_GREG1_BOOT_CFG1_BALHSREST 14
-#define S6_GREG1_BOOT_CFG1_BALHSPSMEMS 20
-#define S6_GREG1_BOOT_CFG1_BALLSGI 26
-#define S6_GREG1_BOOT_CFG2 0x218
-#define S6_GREG1_BOOT_CFG2_PEID 0
-#define S6_GREG1_BOOT_CFG3 0x21C
-#define S6_GREG1_DRAMBUSYHOLDOF 0x220
-#define S6_GREG1_DRAMBUSYHOLDOF_XT0 0
-#define S6_GREG1_DRAMBUSYHOLDOF_XT1 4
-#define S6_GREG1_DRAMBUSYHOLDOF_XT_MASK 7
-#define S6_GREG1_PCIEBAR1SIZE 0x224
-#define S6_GREG1_PCIEBAR2SIZE 0x228
-#define S6_GREG1_PCIEVENDOR 0x22C
-#define S6_GREG1_PCIEDEVICE 0x230
-#define S6_GREG1_PCIEREV 0x234
-#define S6_GREG1_PCIECLASS 0x238
-#define S6_GREG1_XT1DCACHEMISS 0x240
-#define S6_GREG1_XT1ICACHEMISS 0x244
-#define S6_GREG1_HWSEMAPHORE(n) (0x400 + 4 * (n))
-#define S6_GREG1_HWSEMAPHORE_NB 16
-
-/* peripheral interrupt numbers */
-
-#define S6_INTC_GPIO(n) (n) /* 0..3 */
-#define S6_INTC_I2C 4
-#define S6_INTC_SPI 5
-#define S6_INTC_NB_ERR 6
-#define S6_INTC_DMA_LMSERR 7
-#define S6_INTC_DMA_LMSLOWWMRK(n) (8 + (n)) /* 0..11 */
-#define S6_INTC_DMA_LMSPENDCNT(n) (20 + (n)) /* 0..11 */
-#define S6_INTC_DMA HOSTLOWWMRK(n) (32 + (n)) /* 0..6 */
-#define S6_INTC_DMA_HOSTPENDCNT(n) (39 + (n)) /* 0..6 */
-#define S6_INTC_DMA_HOSTERR 46
-#define S6_INTC_UART(n) (47 + (n)) /* 0..1 */
-#define S6_INTC_XAD 49
-#define S6_INTC_NI_ERR 50
-#define S6_INTC_NI_INFIFOFULL 51
-#define S6_INTC_DMA_NIERR 52
-#define S6_INTC_DMA_NILOWWMRK(n) (53 + (n)) /* 0..3 */
-#define S6_INTC_DMA_NIPENDCNT(n) (57 + (n)) /* 0..3 */
-#define S6_INTC_DDR 61
-#define S6_INTC_NS_ERR 62
-#define S6_INTC_EFI_CFGERR 63
-#define S6_INTC_EFI_ISEFTEST 64
-#define S6_INTC_EFI_WRITEERR 65
-#define S6_INTC_NMI_TIMER 66
-#define S6_INTC_PLLLOCK_SYS 67
-#define S6_INTC_PLLLOCK_IO 68
-#define S6_INTC_PLLLOCK_AIM 69
-#define S6_INTC_PLLLOCK_DP0 70
-#define S6_INTC_PLLLOCK_DP2 71
-#define S6_INTC_I2S_ERR 72
-#define S6_INTC_GMAC_STAT 73
-#define S6_INTC_GMAC_ERR 74
-#define S6_INTC_GIB_ERR 75
-#define S6_INTC_PCIE_ERR 76
-#define S6_INTC_PCIE_MSI(n) (77 + (n)) /* 0..3 */
-#define S6_INTC_PCIE_INTA 81
-#define S6_INTC_PCIE_INTB 82
-#define S6_INTC_PCIE_INTC 83
-#define S6_INTC_PCIE_INTD 84
-#define S6_INTC_SW(n) (85 + (n)) /* 0..9 */
-#define S6_INTC_SW_ENABLE(n) (85 + 256 + (n))
-#define S6_INTC_DMA_DP_ERR 95
-#define S6_INTC_DMA_DPLOWWMRK(n) (96 + (n)) /* 0..3 */
-#define S6_INTC_DMA_DPPENDCNT(n) (100 + (n)) /* 0..3 */
-#define S6_INTC_DMA_DPTERMCNT(n) (104 + (n)) /* 0..3 */
-#define S6_INTC_TIMER0 108
-#define S6_INTC_TIMER1 109
-#define S6_INTC_DMA_HOSTTERMCNT(n) (110 + (n)) /* 0..6 */
-
-#endif /* __XTENSA_S6000_HARDWARE_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/irq.h b/arch/xtensa/variants/s6000/include/variant/irq.h
deleted file mode 100644
index 39ca751a6255..000000000000
--- a/arch/xtensa/variants/s6000/include/variant/irq.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _XTENSA_S6000_IRQ_H
-#define _XTENSA_S6000_IRQ_H
-
-#define VARIANT_NR_IRQS 8 /* GPIO interrupts */
-
-extern void variant_irq_enable(unsigned int irq);
-
-#endif /* __XTENSA_S6000_IRQ_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/tie-asm.h b/arch/xtensa/variants/s6000/include/variant/tie-asm.h
deleted file mode 100644
index f02d0a3a2e20..000000000000
--- a/arch/xtensa/variants/s6000/include/variant/tie-asm.h
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * This header file contains assembly-language definitions (assembly
- * macros, etc.) for this specific Xtensa processor's TIE extensions
- * and options. It is customized to this Xtensa processor configuration.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999-2008 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CORE_TIE_ASM_H
-#define _XTENSA_CORE_TIE_ASM_H
-
-/* Selection parameter values for save-area save/restore macros: */
-/* Option vs. TIE: */
-#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
-#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
-/* Whether used automatically by compiler: */
-#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
-#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
-/* ABI handling across function calls: */
-#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
-#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
-#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
-/* Misc */
-#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
-
-
-
-/* Macro to save all non-coprocessor (extra) custom TIE and optional state
- * (not including zero-overhead loop registers).
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
- */
- .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 1024-4, 4, 4
- rsr \at1, BR // boolean option
- s32i \at1, \ptr, .Lxchal_ofs_ + 0
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
- .endif
- .endm // xchal_ncp_store
-
-/* Macro to save all non-coprocessor (extra) custom TIE and optional state
- * (not including zero-overhead loop registers).
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
- */
- .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 1024-4, 4, 4
- l32i \at1, \ptr, .Lxchal_ofs_ + 0
- wsr \at1, BR // boolean option
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
- .endif
- .endm // xchal_ncp_load
-
-
-
-#define XCHAL_NCP_NUM_ATMPS 1
-
-
-
-/* Macro to save the state of TIE coprocessor FPU.
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP0_NUM_ATMPS needed)
- */
-#define xchal_cp_FPU_store xchal_cp0_store
-/* #define xchal_cp_FPU_store_a2 xchal_cp0_store a2 a3 a4 a5 a6 */
- .macro xchal_cp0_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 0, 1, 16
- rur232 \at1 // FCR
- s32i \at1, \ptr, 0
- rur233 \at1 // FSR
- s32i \at1, \ptr, 4
- SSI f0, \ptr, 8
- SSI f1, \ptr, 12
- SSI f2, \ptr, 16
- SSI f3, \ptr, 20
- SSI f4, \ptr, 24
- SSI f5, \ptr, 28
- SSI f6, \ptr, 32
- SSI f7, \ptr, 36
- SSI f8, \ptr, 40
- SSI f9, \ptr, 44
- SSI f10, \ptr, 48
- SSI f11, \ptr, 52
- SSI f12, \ptr, 56
- SSI f13, \ptr, 60
- SSI f14, \ptr, 64
- SSI f15, \ptr, 68
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 72
- .endif
- .endm // xchal_cp0_store
-
-/* Macro to restore the state of TIE coprocessor FPU.
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP0_NUM_ATMPS needed)
- */
-#define xchal_cp_FPU_load xchal_cp0_load
-/* #define xchal_cp_FPU_load_a2 xchal_cp0_load a2 a3 a4 a5 a6 */
- .macro xchal_cp0_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 0, 1, 16
- l32i \at1, \ptr, 0
- wur232 \at1 // FCR
- l32i \at1, \ptr, 4
- wur233 \at1 // FSR
- LSI f0, \ptr, 8
- LSI f1, \ptr, 12
- LSI f2, \ptr, 16
- LSI f3, \ptr, 20
- LSI f4, \ptr, 24
- LSI f5, \ptr, 28
- LSI f6, \ptr, 32
- LSI f7, \ptr, 36
- LSI f8, \ptr, 40
- LSI f9, \ptr, 44
- LSI f10, \ptr, 48
- LSI f11, \ptr, 52
- LSI f12, \ptr, 56
- LSI f13, \ptr, 60
- LSI f14, \ptr, 64
- LSI f15, \ptr, 68
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 72
- .endif
- .endm // xchal_cp0_load
-
-#define XCHAL_CP0_NUM_ATMPS 1
-
-/* Macro to save the state of TIE coprocessor XAD.
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP6_NUM_ATMPS needed)
- */
-#define xchal_cp_XAD_store xchal_cp6_store
-/* #define xchal_cp_XAD_store_a2 xchal_cp6_store a2 a3 a4 a5 a6 */
- .macro xchal_cp6_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 0, 1, 16
- rur0 \at1 // LDCBHI
- s32i \at1, \ptr, 0
- rur1 \at1 // LDCBLO
- s32i \at1, \ptr, 4
- rur2 \at1 // STCBHI
- s32i \at1, \ptr, 8
- rur3 \at1 // STCBLO
- s32i \at1, \ptr, 12
- rur8 \at1 // LDBRBASE
- s32i \at1, \ptr, 16
- rur9 \at1 // LDBROFF
- s32i \at1, \ptr, 20
- rur10 \at1 // LDBRINC
- s32i \at1, \ptr, 24
- rur11 \at1 // STBRBASE
- s32i \at1, \ptr, 28
- rur12 \at1 // STBROFF
- s32i \at1, \ptr, 32
- rur13 \at1 // STBRINC
- s32i \at1, \ptr, 36
- rur24 \at1 // SCRATCH0
- s32i \at1, \ptr, 40
- rur25 \at1 // SCRATCH1
- s32i \at1, \ptr, 44
- rur26 \at1 // SCRATCH2
- s32i \at1, \ptr, 48
- rur27 \at1 // SCRATCH3
- s32i \at1, \ptr, 52
- WRAS128I wra0, \ptr, 64
- WRAS128I wra1, \ptr, 80
- WRAS128I wra2, \ptr, 96
- WRAS128I wra3, \ptr, 112
- WRAS128I wra4, \ptr, 128
- WRAS128I wra5, \ptr, 144
- WRAS128I wra6, \ptr, 160
- WRAS128I wra7, \ptr, 176
- WRAS128I wra8, \ptr, 192
- WRAS128I wra9, \ptr, 208
- WRAS128I wra10, \ptr, 224
- WRAS128I wra11, \ptr, 240
- WRAS128I wra12, \ptr, 256
- WRAS128I wra13, \ptr, 272
- WRAS128I wra14, \ptr, 288
- WRAS128I wra15, \ptr, 304
- WRBS128I wrb0, \ptr, 320
- WRBS128I wrb1, \ptr, 336
- WRBS128I wrb2, \ptr, 352
- WRBS128I wrb3, \ptr, 368
- WRBS128I wrb4, \ptr, 384
- WRBS128I wrb5, \ptr, 400
- WRBS128I wrb6, \ptr, 416
- WRBS128I wrb7, \ptr, 432
- WRBS128I wrb8, \ptr, 448
- WRBS128I wrb9, \ptr, 464
- WRBS128I wrb10, \ptr, 480
- WRBS128I wrb11, \ptr, 496
- WRBS128I wrb12, \ptr, 512
- WRBS128I wrb13, \ptr, 528
- WRBS128I wrb14, \ptr, 544
- WRBS128I wrb15, \ptr, 560
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 576
- .endif
- .endm // xchal_cp6_store
-
-/* Macro to restore the state of TIE coprocessor XAD.
- * Save area ptr (clobbered): ptr (16 byte aligned)
- * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP6_NUM_ATMPS needed)
- */
-#define xchal_cp_XAD_load xchal_cp6_load
-/* #define xchal_cp_XAD_load_a2 xchal_cp6_load a2 a3 a4 a5 a6 */
- .macro xchal_cp6_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
- xchal_sa_start \continue, \ofs
- .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
- xchal_sa_align \ptr, 0, 0, 1, 16
- l32i \at1, \ptr, 0
- wur0 \at1 // LDCBHI
- l32i \at1, \ptr, 4
- wur1 \at1 // LDCBLO
- l32i \at1, \ptr, 8
- wur2 \at1 // STCBHI
- l32i \at1, \ptr, 12
- wur3 \at1 // STCBLO
- l32i \at1, \ptr, 16
- wur8 \at1 // LDBRBASE
- l32i \at1, \ptr, 20
- wur9 \at1 // LDBROFF
- l32i \at1, \ptr, 24
- wur10 \at1 // LDBRINC
- l32i \at1, \ptr, 28
- wur11 \at1 // STBRBASE
- l32i \at1, \ptr, 32
- wur12 \at1 // STBROFF
- l32i \at1, \ptr, 36
- wur13 \at1 // STBRINC
- l32i \at1, \ptr, 40
- wur24 \at1 // SCRATCH0
- l32i \at1, \ptr, 44
- wur25 \at1 // SCRATCH1
- l32i \at1, \ptr, 48
- wur26 \at1 // SCRATCH2
- l32i \at1, \ptr, 52
- wur27 \at1 // SCRATCH3
- WRBL128I wrb0, \ptr, 320
- WRBL128I wrb1, \ptr, 336
- WRBL128I wrb2, \ptr, 352
- WRBL128I wrb3, \ptr, 368
- WRBL128I wrb4, \ptr, 384
- WRBL128I wrb5, \ptr, 400
- WRBL128I wrb6, \ptr, 416
- WRBL128I wrb7, \ptr, 432
- WRBL128I wrb8, \ptr, 448
- WRBL128I wrb9, \ptr, 464
- WRBL128I wrb10, \ptr, 480
- WRBL128I wrb11, \ptr, 496
- WRBL128I wrb12, \ptr, 512
- WRBL128I wrb13, \ptr, 528
- WRBL128I wrb14, \ptr, 544
- WRBL128I wrb15, \ptr, 560
- WRAL128I wra0, \ptr, 64
- WRAL128I wra1, \ptr, 80
- WRAL128I wra2, \ptr, 96
- WRAL128I wra3, \ptr, 112
- WRAL128I wra4, \ptr, 128
- WRAL128I wra5, \ptr, 144
- WRAL128I wra6, \ptr, 160
- WRAL128I wra7, \ptr, 176
- WRAL128I wra8, \ptr, 192
- WRAL128I wra9, \ptr, 208
- WRAL128I wra10, \ptr, 224
- WRAL128I wra11, \ptr, 240
- WRAL128I wra12, \ptr, 256
- WRAL128I wra13, \ptr, 272
- WRAL128I wra14, \ptr, 288
- WRAL128I wra15, \ptr, 304
- .set .Lxchal_ofs_, .Lxchal_ofs_ + 576
- .endif
- .endm // xchal_cp6_load
-
-#define XCHAL_CP6_NUM_ATMPS 1
-#define XCHAL_SA_NUM_ATMPS 1
-
- /* Empty macros for unconfigured coprocessors: */
- .macro xchal_cp1_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp1_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
- .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
-
-#endif /*_XTENSA_CORE_TIE_ASM_H*/
-
diff --git a/arch/xtensa/variants/s6000/include/variant/tie.h b/arch/xtensa/variants/s6000/include/variant/tie.h
deleted file mode 100644
index be7ea843d5df..000000000000
--- a/arch/xtensa/variants/s6000/include/variant/tie.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * This header file describes this specific Xtensa processor's TIE extensions
- * that extend basic Xtensa core functionality. It is customized to this
- * Xtensa processor configuration.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999-2008 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CORE_TIE_H
-#define _XTENSA_CORE_TIE_H
-
-#define XCHAL_CP_NUM 2 /* number of coprocessors */
-#define XCHAL_CP_MAX 7 /* max CP ID + 1 (0 if none) */
-#define XCHAL_CP_MASK 0x41 /* bitmask of all CPs by ID */
-#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
-
-/* Basic parameters of each coprocessor: */
-#define XCHAL_CP0_NAME "FPU"
-#define XCHAL_CP0_IDENT FPU
-#define XCHAL_CP0_SA_SIZE 72 /* size of state save area */
-#define XCHAL_CP0_SA_ALIGN 4 /* min alignment of save area */
-#define XCHAL_CP_ID_FPU 0 /* coprocessor ID (0..7) */
-#define XCHAL_CP6_NAME "XAD"
-#define XCHAL_CP6_IDENT XAD
-#define XCHAL_CP6_SA_SIZE 576 /* size of state save area */
-#define XCHAL_CP6_SA_ALIGN 16 /* min alignment of save area */
-#define XCHAL_CP_ID_XAD 6 /* coprocessor ID (0..7) */
-
-/* Filler info for unassigned coprocessors, to simplify arrays etc: */
-#define XCHAL_CP1_SA_SIZE 0
-#define XCHAL_CP1_SA_ALIGN 1
-#define XCHAL_CP2_SA_SIZE 0
-#define XCHAL_CP2_SA_ALIGN 1
-#define XCHAL_CP3_SA_SIZE 0
-#define XCHAL_CP3_SA_ALIGN 1
-#define XCHAL_CP4_SA_SIZE 0
-#define XCHAL_CP4_SA_ALIGN 1
-#define XCHAL_CP5_SA_SIZE 0
-#define XCHAL_CP5_SA_ALIGN 1
-#define XCHAL_CP7_SA_SIZE 0
-#define XCHAL_CP7_SA_ALIGN 1
-
-/* Save area for non-coprocessor optional and custom (TIE) state: */
-#define XCHAL_NCP_SA_SIZE 4
-#define XCHAL_NCP_SA_ALIGN 4
-
-/* Total save area for optional and custom state (NCP + CPn): */
-#define XCHAL_TOTAL_SA_SIZE 672 /* with 16-byte align padding */
-#define XCHAL_TOTAL_SA_ALIGN 16 /* actual minimum alignment */
-
-/*
- * Detailed contents of save areas.
- * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
- * before expanding the XCHAL_xxx_SA_LIST() macros.
- *
- * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
- * dbnum,base,regnum,bitsz,gapsz,reset,x...)
- *
- * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
- * ccused = set if used by compiler without special options or code
- * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
- * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
- * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
- * name = lowercase reg name (no quotes)
- * galign = group byte alignment (power of 2) (galign >= align)
- * align = register byte alignment (power of 2)
- * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
- * (not including any pad bytes required to galign this or next reg)
- * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
- * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
- * regnum = reg index in regfile, or special/TIE-user reg number
- * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
- * gapsz = intervening bits, if bitsz bits not stored contiguously
- * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
- * reset = register reset value (or 0 if undefined at reset)
- * x = reserved for future use (0 until then)
- *
- * To filter out certain registers, e.g. to expand only the non-global
- * registers used by the compiler, you can do something like this:
- *
- * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
- * #define SELCC0(p...)
- * #define SELCC1(abikind,p...) SELAK##abikind(p)
- * #define SELAK0(p...) REG(p)
- * #define SELAK1(p...) REG(p)
- * #define SELAK2(p...)
- * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
- * ...what you want to expand...
- */
-
-#define XCHAL_NCP_SA_NUM 1
-#define XCHAL_NCP_SA_LIST(s) \
- XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0)
-
-#define XCHAL_CP0_SA_NUM 18
-#define XCHAL_CP0_SA_LIST(s) \
- XCHAL_SA_REG(s,0,0,1,0, fcr, 4, 4, 4,0x03E8, ur,232, 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, fsr, 4, 4, 4,0x03E9, ur,233, 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f0, 4, 4, 4,0x0030, f,0 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f1, 4, 4, 4,0x0031, f,1 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f2, 4, 4, 4,0x0032, f,2 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f3, 4, 4, 4,0x0033, f,3 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f4, 4, 4, 4,0x0034, f,4 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f5, 4, 4, 4,0x0035, f,5 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f6, 4, 4, 4,0x0036, f,6 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f7, 4, 4, 4,0x0037, f,7 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f8, 4, 4, 4,0x0038, f,8 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f9, 4, 4, 4,0x0039, f,9 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f10, 4, 4, 4,0x003A, f,10 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f11, 4, 4, 4,0x003B, f,11 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f12, 4, 4, 4,0x003C, f,12 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f13, 4, 4, 4,0x003D, f,13 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f14, 4, 4, 4,0x003E, f,14 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, f15, 4, 4, 4,0x003F, f,15 , 32,0,0,0)
-
-#define XCHAL_CP1_SA_NUM 0
-#define XCHAL_CP1_SA_LIST(s) /* empty */
-
-#define XCHAL_CP2_SA_NUM 0
-#define XCHAL_CP2_SA_LIST(s) /* empty */
-
-#define XCHAL_CP3_SA_NUM 0
-#define XCHAL_CP3_SA_LIST(s) /* empty */
-
-#define XCHAL_CP4_SA_NUM 0
-#define XCHAL_CP4_SA_LIST(s) /* empty */
-
-#define XCHAL_CP5_SA_NUM 0
-#define XCHAL_CP5_SA_LIST(s) /* empty */
-
-#define XCHAL_CP6_SA_NUM 46
-#define XCHAL_CP6_SA_LIST(s) \
- XCHAL_SA_REG(s,0,0,1,0, ldcbhi,16, 4, 4,0x0300, ur,0 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, ldcblo, 4, 4, 4,0x0301, ur,1 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stcbhi, 4, 4, 4,0x0302, ur,2 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stcblo, 4, 4, 4,0x0303, ur,3 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, ldbrbase, 4, 4, 4,0x0308, ur,8 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, ldbroff, 4, 4, 4,0x0309, ur,9 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, ldbrinc, 4, 4, 4,0x030A, ur,10 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stbrbase, 4, 4, 4,0x030B, ur,11 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stbroff, 4, 4, 4,0x030C, ur,12 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, stbrinc, 4, 4, 4,0x030D, ur,13 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, scratch0, 4, 4, 4,0x0318, ur,24 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, scratch1, 4, 4, 4,0x0319, ur,25 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, scratch2, 4, 4, 4,0x031A, ur,26 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,1,0, scratch3, 4, 4, 4,0x031B, ur,27 , 32,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra0,16,16,16,0x1010, wra,0 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra1,16,16,16,0x1011, wra,1 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra2,16,16,16,0x1012, wra,2 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra3,16,16,16,0x1013, wra,3 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra4,16,16,16,0x1014, wra,4 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra5,16,16,16,0x1015, wra,5 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra6,16,16,16,0x1016, wra,6 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra7,16,16,16,0x1017, wra,7 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra8,16,16,16,0x1018, wra,8 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra9,16,16,16,0x1019, wra,9 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra10,16,16,16,0x101A, wra,10 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra11,16,16,16,0x101B, wra,11 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra12,16,16,16,0x101C, wra,12 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra13,16,16,16,0x101D, wra,13 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra14,16,16,16,0x101E, wra,14 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wra15,16,16,16,0x101F, wra,15 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb0,16,16,16,0x1020, wrb,0 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb1,16,16,16,0x1021, wrb,1 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb2,16,16,16,0x1022, wrb,2 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb3,16,16,16,0x1023, wrb,3 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb4,16,16,16,0x1024, wrb,4 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb5,16,16,16,0x1025, wrb,5 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb6,16,16,16,0x1026, wrb,6 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb7,16,16,16,0x1027, wrb,7 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb8,16,16,16,0x1028, wrb,8 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb9,16,16,16,0x1029, wrb,9 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb10,16,16,16,0x102A, wrb,10 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb11,16,16,16,0x102B, wrb,11 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb12,16,16,16,0x102C, wrb,12 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb13,16,16,16,0x102D, wrb,13 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb14,16,16,16,0x102E, wrb,14 ,128,0,0,0) \
- XCHAL_SA_REG(s,0,0,2,0, wrb15,16,16,16,0x102F, wrb,15 ,128,0,0,0)
-
-#define XCHAL_CP7_SA_NUM 0
-#define XCHAL_CP7_SA_LIST(s) /* empty */
-
-/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
-#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8
-
-#endif /*_XTENSA_CORE_TIE_H*/
-
diff --git a/arch/xtensa/variants/s6000/irq.c b/arch/xtensa/variants/s6000/irq.c
deleted file mode 100644
index 81a241e79075..000000000000
--- a/arch/xtensa/variants/s6000/irq.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * s6000 irq crossbar
- *
- * Copyright (c) 2009 emlix GmbH
- * Authors: Johannes Weiner <hannes@cmpxchg.org>
- * Oskar Schirmer <oskar@scara.com>
- */
-#include <linux/io.h>
-#include <asm/irq.h>
-#include <variant/hardware.h>
-
-/* S6_REG_INTC */
-#define INTC_STATUS 0x000
-#define INTC_RAW 0x010
-#define INTC_STATUS_AG 0x100
-#define INTC_CFG(n) (0x200 + 4 * (n))
-
-/*
- * The s6000 has a crossbar that multiplexes interrupt output lines
- * from the peripherals to input lines on the xtensa core.
- *
- * We leave the mapping decisions to the platform as it depends on the
- * actually connected peripherals which distribution makes sense.
- */
-extern const signed char *platform_irq_mappings[NR_IRQS];
-
-static unsigned long scp_to_intc_enable[] = {
-#define TO_INTC_ENABLE(n) (((n) << 1) + 1)
- TO_INTC_ENABLE(0),
- TO_INTC_ENABLE(1),
- TO_INTC_ENABLE(2),
- TO_INTC_ENABLE(3),
- TO_INTC_ENABLE(4),
- TO_INTC_ENABLE(5),
- TO_INTC_ENABLE(6),
- TO_INTC_ENABLE(7),
- TO_INTC_ENABLE(8),
- TO_INTC_ENABLE(9),
- TO_INTC_ENABLE(10),
- TO_INTC_ENABLE(11),
- TO_INTC_ENABLE(12),
- -1,
- -1,
- TO_INTC_ENABLE(13),
- -1,
- TO_INTC_ENABLE(14),
- -1,
- TO_INTC_ENABLE(15),
-#undef TO_INTC_ENABLE
-};
-
-static void irq_set(unsigned int irq, int enable)
-{
- unsigned long en;
- const signed char *m = platform_irq_mappings[irq];
-
- if (!m)
- return;
- en = enable ? scp_to_intc_enable[irq] : 0;
- while (*m >= 0) {
- writel(en, S6_REG_INTC + INTC_CFG(*m));
- m++;
- }
-}
-
-void variant_irq_enable(unsigned int irq)
-{
- irq_set(irq, 1);
-}
-
-void variant_irq_disable(unsigned int irq)
-{
- irq_set(irq, 0);
-}
diff --git a/block/bio.c b/block/bio.c
index 3e6e1986a5b2..471d7382c7d1 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -748,6 +748,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
}
}
+ bio->bi_iter.bi_size += len;
goto done;
}
@@ -764,29 +765,32 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
return 0;
/*
- * we might lose a segment or two here, but rather that than
- * make this too complex.
+ * setup the new entry, we might clear it again later if we
+ * cannot add the page
+ */
+ bvec = &bio->bi_io_vec[bio->bi_vcnt];
+ bvec->bv_page = page;
+ bvec->bv_len = len;
+ bvec->bv_offset = offset;
+ bio->bi_vcnt++;
+ bio->bi_phys_segments++;
+ bio->bi_iter.bi_size += len;
+
+ /*
+ * Perform a recount if the number of segments is greater
+ * than queue_max_segments(q).
*/
- while (bio->bi_phys_segments >= queue_max_segments(q)) {
+ while (bio->bi_phys_segments > queue_max_segments(q)) {
if (retried_segments)
- return 0;
+ goto failed;
retried_segments = 1;
blk_recount_segments(q, bio);
}
/*
- * setup the new entry, we might clear it again later if we
- * cannot add the page
- */
- bvec = &bio->bi_io_vec[bio->bi_vcnt];
- bvec->bv_page = page;
- bvec->bv_len = len;
- bvec->bv_offset = offset;
-
- /*
* if queue has other restrictions (eg varying max sector size
* depending on offset), it can specify a merge_bvec_fn in the
* queue to get further control
@@ -795,7 +799,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
struct bvec_merge_data bvm = {
.bi_bdev = bio->bi_bdev,
.bi_sector = bio->bi_iter.bi_sector,
- .bi_size = bio->bi_iter.bi_size,
+ .bi_size = bio->bi_iter.bi_size - len,
.bi_rw = bio->bi_rw,
};
@@ -803,23 +807,25 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* merge_bvec_fn() returns number of bytes it can accept
* at this offset
*/
- if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
- bvec->bv_page = NULL;
- bvec->bv_len = 0;
- bvec->bv_offset = 0;
- return 0;
- }
+ if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len)
+ goto failed;
}
/* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
+ if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
- bio->bi_vcnt++;
- bio->bi_phys_segments++;
done:
- bio->bi_iter.bi_size += len;
return len;
+
+ failed:
+ bvec->bv_page = NULL;
+ bvec->bv_len = 0;
+ bvec->bv_offset = 0;
+ bio->bi_vcnt--;
+ bio->bi_iter.bi_size -= len;
+ blk_recount_segments(q, bio);
+ return 0;
}
/**
@@ -1739,6 +1745,34 @@ void bio_check_pages_dirty(struct bio *bio)
}
}
+void generic_start_io_acct(int rw, unsigned long sectors,
+ struct hd_struct *part)
+{
+ int cpu = part_stat_lock();
+
+ part_round_stats(cpu, part);
+ part_stat_inc(cpu, part, ios[rw]);
+ part_stat_add(cpu, part, sectors[rw], sectors);
+ part_inc_in_flight(part, rw);
+
+ part_stat_unlock();
+}
+EXPORT_SYMBOL(generic_start_io_acct);
+
+void generic_end_io_acct(int rw, struct hd_struct *part,
+ unsigned long start_time)
+{
+ unsigned long duration = jiffies - start_time;
+ int cpu = part_stat_lock();
+
+ part_stat_add(cpu, part, ticks[rw], duration);
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part, rw);
+
+ part_stat_unlock();
+}
+EXPORT_SYMBOL(generic_end_io_acct);
+
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
void bio_flush_dcache_pages(struct bio *bi)
{
diff --git a/block/blk-core.c b/block/blk-core.c
index ea1c4d0d7a44..30f6153a40c2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -525,6 +525,9 @@ void blk_cleanup_queue(struct request_queue *q)
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q);
+ if (q->mq_ops)
+ blk_mq_free_queue(q);
+
spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 1065d7c65fa1..5f13f4d0bcce 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -17,7 +17,7 @@
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
const int cpu)
{
- return cpu / ((nr_cpus + nr_queues - 1) / nr_queues);
+ return cpu * nr_queues / nr_cpus;
}
static int get_first_sibling(unsigned int cpu)
@@ -90,7 +90,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
unsigned int *map;
/* If cpus are offline, map them to first hctx */
- map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
+ map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
set->numa_node);
if (!map)
return NULL;
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 371d8800b48a..1630a20d5dcf 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -390,16 +390,15 @@ static void blk_mq_sysfs_init(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
- int i, j;
+ int i;
kobject_init(&q->mq_kobj, &blk_mq_ktype);
- queue_for_each_hw_ctx(q, hctx, i) {
+ queue_for_each_hw_ctx(q, hctx, i)
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
- hctx_for_each_ctx(hctx, ctx, j)
- kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
- }
+ queue_for_each_ctx(q, ctx, i)
+ kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
}
/* see blk_register_queue() */
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 728b9a4d5f56..32e8dbb9ad1c 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -137,6 +137,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
{
int tag, org_last_tag, end;
+ bool wrap = last_tag != 0;
org_last_tag = last_tag;
end = bm->depth;
@@ -148,15 +149,16 @@ restart:
* We started with an offset, start from 0 to
* exhaust the map.
*/
- if (org_last_tag && last_tag) {
- end = last_tag;
+ if (wrap) {
+ wrap = false;
+ end = org_last_tag;
last_tag = 0;
goto restart;
}
return -1;
}
last_tag = tag + 1;
- } while (test_and_set_bit_lock(tag, &bm->word));
+ } while (test_and_set_bit(tag, &bm->word));
return tag;
}
@@ -254,6 +256,21 @@ static int bt_get(struct blk_mq_alloc_data *data,
if (tag != -1)
break;
+ /*
+ * We're out of tags on this hardware queue, kick any
+ * pending IO submits before going to sleep waiting for
+ * some to complete.
+ */
+ blk_mq_run_hw_queue(hctx, false);
+
+ /*
+ * Retry tag allocation after running the hardware queue,
+ * as running the queue may also have found completions.
+ */
+ tag = __bt_get(hctx, bt, last_tag);
+ if (tag != -1)
+ break;
+
blk_mq_put_ctx(data->ctx);
io_schedule();
@@ -340,11 +357,10 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
struct bt_wait_state *bs;
int wait_cnt;
- /*
- * The unlock memory barrier need to order access to req in free
- * path and clearing tag bit
- */
- clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
+ clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
+
+ /* Ensure that the wait list checks occur after clear_bit(). */
+ smp_mb();
bs = bt_wake_ptr(bt);
if (!bs)
@@ -360,21 +376,6 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
}
}
-static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
-{
- BUG_ON(tag >= tags->nr_tags);
-
- bt_clear_tag(&tags->bitmap_tags, tag);
-}
-
-static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
- unsigned int tag)
-{
- BUG_ON(tag >= tags->nr_reserved_tags);
-
- bt_clear_tag(&tags->breserved_tags, tag);
-}
-
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
unsigned int *last_tag)
{
@@ -383,10 +384,13 @@ void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
if (tag >= tags->nr_reserved_tags) {
const int real_tag = tag - tags->nr_reserved_tags;
- __blk_mq_put_tag(tags, real_tag);
+ BUG_ON(real_tag >= tags->nr_tags);
+ bt_clear_tag(&tags->bitmap_tags, real_tag);
*last_tag = real_tag;
- } else
- __blk_mq_put_reserved_tag(tags, tag);
+ } else {
+ BUG_ON(tag >= tags->nr_reserved_tags);
+ bt_clear_tag(&tags->breserved_tags, tag);
+ }
}
static void bt_for_each(struct blk_mq_hw_ctx *hctx,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 92ceef0d2ab9..da1ab5641227 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -279,17 +279,25 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
blk_mq_queue_exit(q);
}
-void blk_mq_free_request(struct request *rq)
+void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx;
- struct request_queue *q = rq->q;
ctx->rq_completed[rq_is_sync(rq)]++;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
__blk_mq_free_request(hctx, ctx, rq);
+
+}
+EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
+
+void blk_mq_free_request(struct request *rq)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q = rq->q;
+
+ hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ blk_mq_free_hctx_request(hctx, rq);
}
+EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, int error)
{
@@ -591,7 +599,7 @@ static void blk_mq_rq_timer(unsigned long priv)
* If not software queues are currently mapped to this
* hardware queue, there's nothing to check
*/
- if (!hctx->nr_ctx || !hctx->tags)
+ if (!blk_mq_hw_queue_mapped(hctx))
continue;
blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
@@ -690,6 +698,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct request *rq;
LIST_HEAD(rq_list);
+ LIST_HEAD(driver_list);
+ struct list_head *dptr;
int queued;
WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
@@ -716,16 +726,27 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
}
/*
+ * Start off with dptr being NULL, so we start the first request
+ * immediately, even if we have more pending.
+ */
+ dptr = NULL;
+
+ /*
* Now process all the entries, sending them to the driver.
*/
queued = 0;
while (!list_empty(&rq_list)) {
+ struct blk_mq_queue_data bd;
int ret;
rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list));
+ bd.rq = rq;
+ bd.list = dptr;
+ bd.last = list_empty(&rq_list);
+
+ ret = q->mq_ops->queue_rq(hctx, &bd);
switch (ret) {
case BLK_MQ_RQ_QUEUE_OK:
queued++;
@@ -744,6 +765,13 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
if (ret == BLK_MQ_RQ_QUEUE_BUSY)
break;
+
+ /*
+ * We've done the first request. If we have more than 1
+ * left in the list, set dptr to defer issue.
+ */
+ if (!dptr && rq_list.next != rq_list.prev)
+ dptr = &driver_list;
}
if (!queued)
@@ -770,10 +798,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
- int cpu = hctx->next_cpu;
+ if (hctx->queue->nr_hw_queues == 1)
+ return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
- int next_cpu;
+ int cpu = hctx->next_cpu, next_cpu;
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
if (next_cpu >= nr_cpu_ids)
@@ -781,26 +810,32 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+
+ return cpu;
}
- return cpu;
+ return hctx->next_cpu;
}
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
+ !blk_mq_hw_queue_mapped(hctx)))
return;
- if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
- __blk_mq_run_hw_queue(hctx);
- else if (hctx->queue->nr_hw_queues == 1)
- kblockd_schedule_delayed_work(&hctx->run_work, 0);
- else {
- unsigned int cpu;
+ if (!async) {
+ int cpu = get_cpu();
+ if (cpumask_test_cpu(cpu, hctx->cpumask)) {
+ __blk_mq_run_hw_queue(hctx);
+ put_cpu();
+ return;
+ }
- cpu = blk_mq_hctx_next_cpu(hctx);
- kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
+ put_cpu();
}
+
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->run_work, 0);
}
void blk_mq_run_queues(struct request_queue *q, bool async)
@@ -814,9 +849,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
- preempt_disable();
blk_mq_run_hw_queue(hctx, async);
- preempt_enable();
}
}
EXPORT_SYMBOL(blk_mq_run_queues);
@@ -843,9 +876,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- preempt_disable();
blk_mq_run_hw_queue(hctx, false);
- preempt_enable();
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);
@@ -870,9 +901,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
continue;
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- preempt_disable();
blk_mq_run_hw_queue(hctx, async);
- preempt_enable();
}
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
@@ -898,16 +927,11 @@ static void blk_mq_delay_work_fn(struct work_struct *work)
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
- unsigned long tmo = msecs_to_jiffies(msecs);
-
- if (hctx->queue->nr_hw_queues == 1)
- kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
- else {
- unsigned int cpu;
+ if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
+ return;
- cpu = blk_mq_hctx_next_cpu(hctx);
- kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
- }
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->delay_work, msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_mq_delay_queue);
@@ -1162,7 +1186,17 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
goto run_queue;
}
- if (is_sync) {
+ /*
+ * If the driver supports defer issued based on 'last', then
+ * queue it up like normal since we can potentially save some
+ * CPU this way.
+ */
+ if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
+ struct blk_mq_queue_data bd = {
+ .rq = rq,
+ .list = NULL,
+ .last = 1
+ };
int ret;
blk_mq_bio_to_request(rq, bio);
@@ -1172,7 +1206,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
* error (busy), just add it to our list as we previously
* would have done
*/
- ret = q->mq_ops->queue_rq(data.hctx, rq, true);
+ ret = q->mq_ops->queue_rq(data.hctx, &bd);
if (ret == BLK_MQ_RQ_QUEUE_OK)
goto done;
else {
@@ -1784,16 +1818,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
if (!ctx)
return ERR_PTR(-ENOMEM);
- /*
- * If a crashdump is active, then we are potentially in a very
- * memory constrained environment. Limit us to 1 queue and
- * 64 tags to prevent using too much memory.
- */
- if (is_kdump_kernel()) {
- set->nr_hw_queues = 1;
- set->queue_depth = min(64U, set->queue_depth);
- }
-
hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
set->numa_node);
@@ -2067,6 +2091,16 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
set->queue_depth = BLK_MQ_MAX_DEPTH;
}
+ /*
+ * If a crashdump is active, then we are potentially in a very
+ * memory constrained environment. Limit us to 1 queue and
+ * 64 tags to prevent using too much memory.
+ */
+ if (is_kdump_kernel()) {
+ set->nr_hw_queues = 1;
+ set->queue_depth = min(64U, set->queue_depth);
+ }
+
set->tags = kmalloc_node(set->nr_hw_queues *
sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index d567d5283ffa..206230e64f79 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -115,4 +115,9 @@ static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
data->hctx = hctx;
}
+static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
+{
+ return hctx->nr_ctx && hctx->tags;
+}
+
#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index aa02247d227e..6ed2cbe5e8c9 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -257,9 +257,7 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
__func__, max_hw_sectors);
}
- limits->max_hw_sectors = max_hw_sectors;
- limits->max_sectors = min_t(unsigned int, max_hw_sectors,
- BLK_DEF_MAX_SECTORS);
+ limits->max_sectors = limits->max_hw_sectors = max_hw_sectors;
}
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 1fac43408911..935ea2aa0730 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -492,17 +492,15 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
* Currently, its primary task it to free all the &struct request
* structures that were allocated to the queue and the queue itself.
*
- * Caveat:
- * Hopefully the low level driver will have finished any
- * outstanding requests first...
+ * Note:
+ * The low level driver must have finished any outstanding requests first
+ * via blk_cleanup_queue().
**/
static void blk_release_queue(struct kobject *kobj)
{
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
- blk_sync_queue(q);
-
blkcg_exit_queue(q);
if (q->elevator) {
@@ -517,9 +515,7 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags)
__blk_queue_free_tags(q);
- if (q->mq_ops)
- blk_mq_free_queue(q);
- else
+ if (!q->mq_ops)
blk_free_flush_queue(q->fq);
blk_trace_shutdown(q);
diff --git a/block/genhd.c b/block/genhd.c
index bd3060684ab2..0a536dc05f3b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
struct disk_part_tbl *old_ptbl = disk->part_tbl;
struct disk_part_tbl *new_ptbl;
int len = old_ptbl ? old_ptbl->len : 0;
- int target = partno + 1;
+ int i, target;
size_t size;
- int i;
+
+ /*
+ * check for int overflow, since we can get here from blkpg_ioctl()
+ * with a user passed 'partno'.
+ */
+ target = partno + 1;
+ if (target < 0)
+ return -EINVAL;
/* disk_max_parts() is zero during initialization, ignore if so */
if (disk_max_parts(disk) && target > disk_max_parts(disk))
diff --git a/crypto/842.c b/crypto/842.c
index 65c7a89cfa09..b48f4f108c47 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("842 Compression Algorithm");
+MODULE_ALIAS_CRYPTO("842");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index fd0d6b454975..9b3c54c1cbe8 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1474,4 +1474,4 @@ module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 6a3ad8011585..1fa7bc31be63 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -399,7 +399,7 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
{
struct cmsghdr *cmsg;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_ALG)
@@ -421,6 +421,12 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
con->op = *(u32 *)CMSG_DATA(cmsg);
break;
+ case ALG_SET_AEAD_ASSOCLEN:
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
+ return -EINVAL;
+ con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg);
+ break;
+
default:
return -EINVAL;
}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index e8d3a7dca8c4..71a8143e23b1 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -509,8 +509,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
struct crypto_template *crypto_lookup_template(const char *name)
{
- return try_then_request_module(__crypto_lookup_template(name), "%s",
- name);
+ return try_then_request_module(__crypto_lookup_template(name),
+ "crypto-%s", name);
}
EXPORT_SYMBOL_GPL(crypto_lookup_template);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 850246206b12..01f56eb7816e 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -42,7 +42,7 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned long iovlen;
- struct iovec *iov;
+ const struct iovec *iov;
long copied = 0;
int err;
@@ -58,7 +58,7 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
ctx->more = 0;
- for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
@@ -174,7 +174,7 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
goto unlock;
}
- err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
+ err = memcpy_to_msg(msg, ctx->result, len);
unlock:
release_sock(sk);
@@ -258,8 +258,8 @@ static void hash_sock_destruct(struct sock *sk)
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- sock_kfree_s(sk, ctx->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+ sock_kzfree_s(sk, ctx->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 83187f497c7c..c12207c8dde9 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -251,6 +251,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
struct af_alg_control con = {};
long copied = 0;
bool enc = 0;
+ bool init = 0;
int err;
int i;
@@ -259,6 +260,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
if (err)
return err;
+ init = 1;
switch (con.op) {
case ALG_OP_ENCRYPT:
enc = 1;
@@ -280,7 +282,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
if (!ctx->more && ctx->used)
goto unlock;
- if (!ctx->used) {
+ if (init) {
ctx->enc = enc;
if (con.iv)
memcpy(ctx->iv, con.iv->iv, ivsize);
@@ -298,9 +300,9 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
len = min_t(unsigned long, len,
PAGE_SIZE - sg->offset - sg->length);
- err = memcpy_fromiovec(page_address(sg_page(sg)) +
- sg->offset + sg->length,
- msg->msg_iov, len);
+ err = memcpy_from_msg(page_address(sg_page(sg)) +
+ sg->offset + sg->length,
+ msg, len);
if (err)
goto unlock;
@@ -337,8 +339,8 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
if (!sg_page(sg + i))
goto unlock;
- err = memcpy_fromiovec(page_address(sg_page(sg + i)),
- msg->msg_iov, plen);
+ err = memcpy_from_msg(page_address(sg_page(sg + i)),
+ msg, plen);
if (err) {
__free_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL);
@@ -359,8 +361,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
err = 0;
ctx->more = msg->msg_flags & MSG_MORE;
- if (!ctx->more && !list_empty(&ctx->tsgl))
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
unlock:
skcipher_data_wakeup(sk);
@@ -408,8 +408,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
done:
ctx->more = flags & MSG_MORE;
- if (!ctx->more && !list_empty(&ctx->tsgl))
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
unlock:
skcipher_data_wakeup(sk);
@@ -429,13 +427,13 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
unsigned long iovlen;
- struct iovec *iov;
+ const struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
- for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
@@ -448,14 +446,13 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
while (!sg->length)
sg++;
- used = ctx->used;
- if (!used) {
+ if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
- used = min_t(unsigned long, used, seglen);
+ used = min_t(unsigned long, ctx->used, seglen);
used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
err = used;
@@ -566,7 +563,7 @@ static void skcipher_sock_destruct(struct sock *sk)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
skcipher_free_sgl(sk);
- sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+ sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 666f1962a160..b4485a108389 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -476,4 +476,4 @@ module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
module_init(prng_mod_init);
module_exit(prng_mod_fini);
-MODULE_ALIAS("stdrng");
+MODULE_ALIAS_CRYPTO("stdrng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 008c8a4fb67c..4bb187c2a902 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
+MODULE_ALIAS_CRYPTO("anubis");
diff --git a/crypto/api.c b/crypto/api.c
index a2b39c5f3649..2a81e98a0021 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
alg = crypto_alg_lookup(name, type, mask);
if (!alg) {
- request_module("%s", name);
+ request_module("crypto-%s", name);
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
CRYPTO_ALG_NEED_FALLBACK))
- request_module("%s-all", name);
+ request_module("crypto-%s-all", name);
alg = crypto_alg_lookup(name, type, mask);
}
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 5a772c3657d5..f1a81925558f 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -166,3 +166,4 @@ module_exit(arc4_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
+MODULE_ALIAS_CRYPTO("arc4");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index e1223559d5df..78fb16cab13f 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -721,3 +721,4 @@ module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
+MODULE_ALIAS_CRYPTO("authenc");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 4be0dd4373a9..024bff2344fc 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -814,3 +814,4 @@ module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
+MODULE_ALIAS_CRYPTO("authencesn");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 8baf5447d35b..7bd71f02d0dd 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -138,4 +138,4 @@ module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
-MODULE_ALIAS("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 26bcd7a2d6b4..1b74c5a3e891 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1098,4 +1098,4 @@ module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("camellia");
+MODULE_ALIAS_CRYPTO("camellia");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 5558f630a0eb..84c86db67ec7 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -549,4 +549,4 @@ module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
-MODULE_ALIAS("cast5");
+MODULE_ALIAS_CRYPTO("cast5");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index de732528a430..f408f0bd8de2 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -291,4 +291,4 @@ module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
-MODULE_ALIAS("cast6");
+MODULE_ALIAS_CRYPTO("cast6");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 61ac42e1e32b..780ee27b2d43 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CBC block cipher algorithm");
+MODULE_ALIAS_CRYPTO("cbc");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 1df84217f7c9..003bbbd21a2b 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Counter with CBC MAC");
-MODULE_ALIAS("ccm_base");
-MODULE_ALIAS("rfc4309");
+MODULE_ALIAS_CRYPTO("ccm_base");
+MODULE_ALIAS_CRYPTO("rfc4309");
+MODULE_ALIAS_CRYPTO("ccm");
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 9c294c8f9a07..63c17d5992f7 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Chain IV Generator");
+MODULE_ALIAS_CRYPTO("chainiv");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 50880cf17fad..7a8bfbd548f6 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CMAC keyed hash algorithm");
+MODULE_ALIAS_CRYPTO("cmac");
diff --git a/crypto/crc32.c b/crypto/crc32.c
index 9d1c41569898..187ded28cb0b 100644
--- a/crypto/crc32.c
+++ b/crypto/crc32.c
@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("crc32");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index d9c7beba8e50..2a062025749d 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -170,5 +170,5 @@ module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c");
MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 877e7114ec5c..08bb4f504520 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -124,4 +124,4 @@ module_exit(crct10dif_mod_fini);
MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
MODULE_DESCRIPTION("T10 DIF CRC calculation.");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index e592c90abebb..650afac10fd7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software async crypto daemon");
+MODULE_ALIAS_CRYPTO("cryptd");
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 1dc54bb95a87..a20319132e33 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -145,9 +145,9 @@ static struct crypto_alg null_algs[3] = { {
.coa_decompress = null_compress } }
} };
-MODULE_ALIAS("compress_null");
-MODULE_ALIAS("digest_null");
-MODULE_ALIAS("cipher_null");
+MODULE_ALIAS_CRYPTO("compress_null");
+MODULE_ALIAS_CRYPTO("digest_null");
+MODULE_ALIAS_CRYPTO("cipher_null");
static int __init crypto_null_mod_init(void)
{
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index e2a34feec7a4..c5148a35ae0a 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -201,10 +201,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
- if (!p->cru_driver_name[0])
- return -EINVAL;
-
- alg = crypto_alg_match(p, 1);
+ alg = crypto_alg_match(p, 0);
if (!alg)
return -ENOENT;
@@ -537,3 +534,4 @@ module_exit(crypto_user_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_DESCRIPTION("Crypto userspace configuration API");
+MODULE_ALIAS("net-pf-16-proto-21");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index f2b94f27bb2c..2386f7313952 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CTR Counter block mode");
-MODULE_ALIAS("rfc3686");
+MODULE_ALIAS_CRYPTO("rfc3686");
+MODULE_ALIAS_CRYPTO("ctr");
diff --git a/crypto/cts.c b/crypto/cts.c
index 133f0874c95e..bd9405820e8a 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -351,3 +351,4 @@ module_exit(crypto_cts_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
+MODULE_ALIAS_CRYPTO("cts");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index b57d70eb156b..95d8d37c5021 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-
+MODULE_ALIAS_CRYPTO("deflate");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 298d464ab7d2..42912948776b 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -983,7 +983,7 @@ static struct crypto_alg des_algs[2] = { {
.cia_decrypt = des3_ede_decrypt } }
} };
-MODULE_ALIAS("des3_ede");
+MODULE_ALIAS_CRYPTO("des3_ede");
static int __init des_generic_mod_init(void)
{
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 54cfd4820abc..d748a1d0ca24 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,6 +98,7 @@
*/
#include <crypto/drbg.h>
+#include <linux/string.h>
/***************************************************************
* Backend cipher definitions available to DRBG
@@ -283,38 +284,6 @@ static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
conversion->conv = cpu_to_be32(val);
}
-
-/*
- * Increment buffer
- *
- * @dst buffer to increment
- * @add value to add
- */
-static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
- const unsigned char *add, size_t addlen)
-{
- /* implied: dstlen > addlen */
- unsigned char *dstptr;
- const unsigned char *addptr;
- unsigned int remainder = 0;
- size_t len = addlen;
-
- dstptr = dst + (dstlen-1);
- addptr = add + (addlen-1);
- while (len) {
- remainder += *dstptr + *addptr;
- *dstptr = remainder & 0xff;
- remainder >>= 8;
- len--; dstptr--; addptr--;
- }
- len = dstlen - addlen;
- while (len && remainder > 0) {
- remainder = *dstptr + 1;
- *dstptr = remainder & 0xff;
- remainder >>= 8;
- len--; dstptr--;
- }
-}
#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
/******************************************************************
@@ -323,6 +292,13 @@ static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
#ifdef CONFIG_CRYPTO_DRBG_CTR
#define CRYPTO_DRBG_CTR_STRING "CTR "
+MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes256");
+MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes256");
+MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes192");
+MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
+MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
+MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
+
static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
unsigned char *outval, const struct drbg_string *in);
static int drbg_init_sym_kernel(struct drbg_state *drbg);
@@ -522,9 +498,9 @@ static int drbg_ctr_df(struct drbg_state *drbg,
ret = 0;
out:
- memset(iv, 0, drbg_blocklen(drbg));
- memset(temp, 0, drbg_statelen(drbg));
- memset(pad, 0, drbg_blocklen(drbg));
+ memzero_explicit(iv, drbg_blocklen(drbg));
+ memzero_explicit(temp, drbg_statelen(drbg));
+ memzero_explicit(pad, drbg_blocklen(drbg));
return ret;
}
@@ -554,7 +530,6 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
unsigned int len = 0;
struct drbg_string cipherin;
- unsigned char prefix = DRBG_PREFIX1;
memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
if (3 > reseed)
@@ -574,7 +549,7 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
*/
while (len < (drbg_statelen(drbg))) {
/* 10.2.1.2 step 2.1 */
- drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+ crypto_inc(drbg->V, drbg_blocklen(drbg));
/*
* 10.2.1.2 step 2.2 */
ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
@@ -599,9 +574,9 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
ret = 0;
out:
- memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
+ memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg));
if (2 != reseed)
- memset(df_data, 0, drbg_statelen(drbg));
+ memzero_explicit(df_data, drbg_statelen(drbg));
return ret;
}
@@ -617,7 +592,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
int len = 0;
int ret = 0;
struct drbg_string data;
- unsigned char prefix = DRBG_PREFIX1;
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
@@ -629,7 +603,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
}
/* 10.2.1.5.2 step 4.1 */
- drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+ crypto_inc(drbg->V, drbg_blocklen(drbg));
drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg));
while (len < buflen) {
int outlen = 0;
@@ -643,7 +617,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
drbg_blocklen(drbg) : (buflen - len);
if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
/* 10.2.1.5.2 step 6 */
- drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+ crypto_inc(drbg->V, drbg_blocklen(drbg));
continue;
}
/* 10.2.1.5.2 step 4.3 */
@@ -651,7 +625,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
len += outlen;
/* 10.2.1.5.2 step 6 */
if (len < buflen)
- drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+ crypto_inc(drbg->V, drbg_blocklen(drbg));
}
/* 10.2.1.5.2 step 6 */
@@ -660,7 +634,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
len = ret;
out:
- memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+ memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
return len;
}
@@ -685,6 +659,15 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg);
#ifdef CONFIG_CRYPTO_DRBG_HMAC
#define CRYPTO_DRBG_HMAC_STRING "HMAC "
+MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha512");
+MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha512");
+MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384");
+MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384");
+MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256");
+MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256");
+MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha1");
+MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha1");
+
/* update function of HMAC DRBG as defined in 10.1.2.2 */
static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
int reseed)
@@ -796,6 +779,47 @@ static struct drbg_state_ops drbg_hmac_ops = {
#ifdef CONFIG_CRYPTO_DRBG_HASH
#define CRYPTO_DRBG_HASH_STRING "HASH "
+MODULE_ALIAS_CRYPTO("drbg_pr_sha512");
+MODULE_ALIAS_CRYPTO("drbg_nopr_sha512");
+MODULE_ALIAS_CRYPTO("drbg_pr_sha384");
+MODULE_ALIAS_CRYPTO("drbg_nopr_sha384");
+MODULE_ALIAS_CRYPTO("drbg_pr_sha256");
+MODULE_ALIAS_CRYPTO("drbg_nopr_sha256");
+MODULE_ALIAS_CRYPTO("drbg_pr_sha1");
+MODULE_ALIAS_CRYPTO("drbg_nopr_sha1");
+
+/*
+ * Increment buffer
+ *
+ * @dst buffer to increment
+ * @add value to add
+ */
+static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
+ const unsigned char *add, size_t addlen)
+{
+ /* implied: dstlen > addlen */
+ unsigned char *dstptr;
+ const unsigned char *addptr;
+ unsigned int remainder = 0;
+ size_t len = addlen;
+
+ dstptr = dst + (dstlen-1);
+ addptr = add + (addlen-1);
+ while (len) {
+ remainder += *dstptr + *addptr;
+ *dstptr = remainder & 0xff;
+ remainder >>= 8;
+ len--; dstptr--; addptr--;
+ }
+ len = dstlen - addlen;
+ while (len && remainder > 0) {
+ remainder = *dstptr + 1;
+ *dstptr = remainder & 0xff;
+ remainder >>= 8;
+ len--; dstptr--;
+ }
+}
+
/*
* scratchpad usage: as drbg_hash_update and drbg_hash_df are used
* interlinked, the scratchpad is used as follows:
@@ -848,7 +872,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
}
out:
- memset(tmp, 0, drbg_blocklen(drbg));
+ memzero_explicit(tmp, drbg_blocklen(drbg));
return ret;
}
@@ -892,7 +916,7 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
out:
- memset(drbg->scratchpad, 0, drbg_statelen(drbg));
+ memzero_explicit(drbg->scratchpad, drbg_statelen(drbg));
return ret;
}
@@ -927,7 +951,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
drbg->scratchpad, drbg_blocklen(drbg));
out:
- memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+ memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
return ret;
}
@@ -942,7 +966,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg);
struct drbg_string data;
LIST_HEAD(datalist);
- unsigned char prefix = DRBG_PREFIX1;
memset(src, 0, drbg_statelen(drbg));
memset(dst, 0, drbg_blocklen(drbg));
@@ -963,7 +986,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
drbg_blocklen(drbg) : (buflen - len);
if (!drbg_fips_continuous_test(drbg, dst)) {
- drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+ crypto_inc(src, drbg_statelen(drbg));
continue;
}
/* 10.1.1.4 step hashgen 4.2 */
@@ -971,11 +994,11 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
len += outlen;
/* 10.1.1.4 hashgen step 4.3 */
if (len < buflen)
- drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+ crypto_inc(src, drbg_statelen(drbg));
}
out:
- memset(drbg->scratchpad, 0,
+ memzero_explicit(drbg->scratchpad,
(drbg_statelen(drbg) + drbg_blocklen(drbg)));
return len;
}
@@ -1024,7 +1047,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
out:
- memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+ memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
return len;
}
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 935cfef4aa84..12011aff0971 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ECB block cipher algorithm");
+MODULE_ALIAS_CRYPTO("ecb");
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index bf7ab4a89493..f116fae766f8 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
+MODULE_ALIAS_CRYPTO("eseqiv");
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 021d7fec6bc8..77286ea28865 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
+MODULE_ALIAS_CRYPTO("fcrypt");
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 276cdac567b6..2e403f6138c1 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Galois/Counter Mode");
MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
-MODULE_ALIAS("gcm_base");
-MODULE_ALIAS("rfc4106");
-MODULE_ALIAS("rfc4543");
+MODULE_ALIAS_CRYPTO("gcm_base");
+MODULE_ALIAS_CRYPTO("rfc4106");
+MODULE_ALIAS_CRYPTO("rfc4543");
+MODULE_ALIAS_CRYPTO("gcm");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 9d3f0c69a86f..4e97fae9666f 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -172,4 +172,4 @@ module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e392219ddc61..72e38c098bb3 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -268,3 +268,4 @@ module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("HMAC hash algorithm");
+MODULE_ALIAS_CRYPTO("hmac");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 60e7cd66facc..873eb5ded6d7 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
+MODULE_ALIAS_CRYPTO("khazad");
diff --git a/crypto/krng.c b/crypto/krng.c
index a2d2b72fc135..67c88b331210 100644
--- a/crypto/krng.c
+++ b/crypto/krng.c
@@ -62,4 +62,4 @@ module_exit(krng_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Kernel Random Number Generator");
-MODULE_ALIAS("stdrng");
+MODULE_ALIAS_CRYPTO("stdrng");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index ba42acc4deba..6f9908a7ebcb 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LRW block cipher mode");
+MODULE_ALIAS_CRYPTO("lrw");
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 34d072b72a73..aefbceaf3104 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -104,3 +104,4 @@ module_exit(lz4_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZ4 Compression Algorithm");
+MODULE_ALIAS_CRYPTO("lz4");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 9218b3fed5e3..a1d3b5bd3d85 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -104,3 +104,4 @@ module_exit(lz4hc_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
+MODULE_ALIAS_CRYPTO("lz4hc");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index a8ff2f778dc4..4b3e92525dac 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -107,3 +107,4 @@ module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO Compression Algorithm");
+MODULE_ALIAS_CRYPTO("lzo");
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index b39fbd530102..a8e870444ea9 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -703,3 +703,4 @@ module_exit(mcryptd_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
+MODULE_ALIAS_CRYPTO("mcryptd");
diff --git a/crypto/md4.c b/crypto/md4.c
index 0477a6a01d58..3515af425cc9 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
-
+MODULE_ALIAS_CRYPTO("md4");
diff --git a/crypto/md5.c b/crypto/md5.c
index 7febeaab923b..36f5e5b103f3 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
+MODULE_ALIAS_CRYPTO("md5");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 079b761bc70d..46195e0d0f4d 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Michael MIC");
MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
+MODULE_ALIAS_CRYPTO("michael_mic");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index d1b8bdfb5855..f654965f0933 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCBC block cipher algorithm");
+MODULE_ALIAS_CRYPTO("pcbc");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 309d345ead95..c305d4112735 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_DESCRIPTION("Parallel crypto wrapper");
+MODULE_ALIAS_CRYPTO("pcrypt");
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 8a0f68b7f257..049486ede938 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd128");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 525d7bb752cf..de585e51d455 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd160");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 69293d9b56e0..4ec02a754e09 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd256");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 09f97dfdfbba..770f2cb369f8 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd320");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 9a4770c02284..3d0f9df30ac9 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -248,4 +248,4 @@ module_exit(salsa20_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
-MODULE_ALIAS("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20");
diff --git a/crypto/seed.c b/crypto/seed.c
index 9c904d6d2151..c6ba8438be43 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -476,3 +476,4 @@ module_exit(seed_fini);
MODULE_DESCRIPTION("SEED Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Hye-Shik Chang <perky@FreeBSD.org>, Kim Hyun <hkim@kisa.or.kr>");
+MODULE_ALIAS_CRYPTO("seed");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index ee190fcedcd2..9daa854cc485 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Sequence Number IV Generator");
+MODULE_ALIAS_CRYPTO("seqiv");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 7ddbd7e88859..a53b5e2af335 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -665,5 +665,5 @@ module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
-MODULE_ALIAS("tnepres");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("tnepres");
+MODULE_ALIAS_CRYPTO("serpent");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 7bb047432782..039e58cfa155 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -153,4 +153,4 @@ module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 65e7b76b057f..5eb21b120033 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -384,5 +384,5 @@ module_exit(sha256_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
-MODULE_ALIAS("sha224");
-MODULE_ALIAS("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 95db67197cd9..8d0b19ed4f4b 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -288,5 +288,5 @@ module_exit(sha512_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
-MODULE_ALIAS("sha384");
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 890449e6e7ef..1d864e988ea9 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1225,15 +1225,22 @@ static inline int tcrypt_test(const char *alg)
return ret;
}
-static int do_test(int m)
+static int do_test(const char *alg, u32 type, u32 mask, int m)
{
int i;
int ret = 0;
switch (m) {
case 0:
+ if (alg) {
+ if (!crypto_has_alg(alg, type,
+ mask ?: CRYPTO_ALG_TYPE_MASK))
+ ret = -ENOENT;
+ break;
+ }
+
for (i = 1; i < 200; i++)
- ret += do_test(i);
+ ret += do_test(NULL, 0, 0, i);
break;
case 1:
@@ -1752,6 +1759,11 @@ static int do_test(int m)
break;
case 300:
+ if (alg) {
+ test_hash_speed(alg, sec, generic_hash_speed_template);
+ break;
+ }
+
/* fall through */
case 301:
@@ -1838,6 +1850,11 @@ static int do_test(int m)
break;
case 400:
+ if (alg) {
+ test_ahash_speed(alg, sec, generic_hash_speed_template);
+ break;
+ }
+
/* fall through */
case 401:
@@ -2127,12 +2144,6 @@ static int do_test(int m)
return ret;
}
-static int do_alg_test(const char *alg, u32 type, u32 mask)
-{
- return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
- 0 : -ENOENT;
-}
-
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
@@ -2144,10 +2155,7 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
- if (alg)
- err = do_alg_test(alg, type, mask);
- else
- err = do_test(mode);
+ err = do_test(alg, type, mask, mode);
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
diff --git a/crypto/tea.c b/crypto/tea.c
index 0a572323ee4a..495be2d0077d 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -270,8 +270,8 @@ static void __exit tea_mod_fini(void)
crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
-MODULE_ALIAS("xtea");
-MODULE_ALIAS("xeta");
+MODULE_ALIAS_CRYPTO("xtea");
+MODULE_ALIAS_CRYPTO("xeta");
module_init(tea_mod_init);
module_exit(tea_mod_fini);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 29a0cbdd0d19..037368d34586 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3708,8 +3708,7 @@ test_done:
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
if (fips_enabled && !rc)
- pr_info(KERN_INFO "alg: self-tests for %s (%s) passed\n",
- driver, alg);
+ pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
return rc;
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 3c7af0d1ff7a..6e5651c66cf8 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -676,8 +676,8 @@ static void __exit tgr192_mod_fini(void)
crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
}
-MODULE_ALIAS("tgr160");
-MODULE_ALIAS("tgr128");
+MODULE_ALIAS_CRYPTO("tgr160");
+MODULE_ALIAS_CRYPTO("tgr128");
module_init(tgr192_mod_init);
module_exit(tgr192_mod_fini);
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 2d5000552d0f..523ad8c4e359 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -211,4 +211,4 @@ module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
-MODULE_ALIAS("twofish");
+MODULE_ALIAS_CRYPTO("twofish");
diff --git a/crypto/vmac.c b/crypto/vmac.c
index d84c24bd7ff7..df76a816cfb2 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
+MODULE_ALIAS_CRYPTO("vmac");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index ec64e7762fbb..0de42eb3d040 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1167,8 +1167,8 @@ static void __exit wp512_mod_fini(void)
crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
}
-MODULE_ALIAS("wp384");
-MODULE_ALIAS("wp256");
+MODULE_ALIAS_CRYPTO("wp384");
+MODULE_ALIAS_CRYPTO("wp256");
module_init(wp512_mod_init);
module_exit(wp512_mod_fini);
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index a5fbdf3738cf..df90b332554c 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XCBC keyed hash algorithm");
+MODULE_ALIAS_CRYPTO("xcbc");
diff --git a/crypto/xts.c b/crypto/xts.c
index ca1608f44cb5..f6fd43f100c8 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XTS block cipher mode");
+MODULE_ALIAS_CRYPTO("xts");
diff --git a/crypto/zlib.c b/crypto/zlib.c
index c9ee681d57fd..0eefa9d237ac 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Zlib Compression Algorithm");
MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS_CRYPTO("zlib");
diff --git a/drivers/Kconfig b/drivers/Kconfig
index af02a8a8ec4a..694d5a70d6ce 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -184,4 +184,6 @@ source "drivers/ras/Kconfig"
source "drivers/thunderbolt/Kconfig"
+source "drivers/android/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index ebee55537a05..67d2334dc41e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -161,3 +161,5 @@ obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_RAS) += ras/
obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
+obj-$(CONFIG_CORESIGHT) += coresight/
+obj-$(CONFIG_ANDROID) += android/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index f148a0580e04..c7b105c0e1d3 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -350,12 +350,10 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev,
static ssize_t acpi_pad_idlecpus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = 0;
- n = cpumask_scnprintf(buf, PAGE_SIZE-2, to_cpumask(pad_busy_cpus_bits));
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
+ return cpumap_print_to_pagebuf(false, buf,
+ to_cpumask(pad_busy_cpus_bits));
}
+
static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
acpi_pad_idlecpus_show,
acpi_pad_idlecpus_store);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 1b6aa514848f..e82d0976a5d0 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1087,7 +1087,6 @@ static int ghes_remove(struct platform_device *ghes_dev)
static struct platform_driver ghes_platform_driver = {
.driver = {
.name = "GHES",
- .owner = THIS_MODULE,
},
.probe = ghes_probe,
.remove = ghes_remove,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 7556e7c4a055..9b693d54c743 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -305,60 +305,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
*/
/*
- * Lenovo has a mix of systems OSI(Linux) situations
- * and thus we can not wildcard the vendor.
- *
- * _OSI(Linux) helps sound
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
- * T400, T500
- * _OSI(Linux) has Linux specific hooks
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
- * _OSI(Linux) is a NOP:
- * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
- * DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"),
- */
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad R61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad X61",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T400",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T400"),
- },
- },
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Lenovo ThinkPad T500",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
- },
- },
- /*
* Without this this EEEpc exports a non working WMI interface, with
* this it exports a working "good old" eeepc_laptop interface, fixing
* both brightness control, and rfkill not working.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index cd4de7e038ea..c6bcb8c719d8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-aspm.h>
+#include <linux/dmar.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root;
acpi_handle handle = device->handle;
int no_aspm = 0, clear_aspm = 0;
+ bool hotadd = system_state != SYSTEM_BOOTING;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device,
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
+ if (hotadd && dmar_device_add(handle)) {
+ result = -ENXIO;
+ goto end;
+ }
+
pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->segment, (unsigned int)root->secondary.start);
device->driver_data = NULL;
result = -ENODEV;
- goto end;
+ goto remove_dmar;
}
if (clear_aspm) {
@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
- if (system_state != SYSTEM_BOOTING) {
+ if (hotadd) {
pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus);
}
@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_unlock_rescan_remove();
return 1;
+remove_dmar:
+ if (hotadd)
+ dmar_device_remove(handle);
end:
kfree(root);
return result;
@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_remove_root_bus(root->bus);
+ dmar_device_remove(device->handle);
+
pci_unlock_rescan_remove();
kfree(root);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 973a3332a85f..52ddd9fbb55e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -95,8 +95,12 @@ static int amba_pm_runtime_suspend(struct device *dev)
struct amba_device *pcdev = to_amba_device(dev);
int ret = pm_generic_runtime_suspend(dev);
- if (ret == 0 && dev->driver)
- clk_disable_unprepare(pcdev->pclk);
+ if (ret == 0 && dev->driver) {
+ if (pm_runtime_is_irq_safe(dev))
+ clk_disable(pcdev->pclk);
+ else
+ clk_disable_unprepare(pcdev->pclk);
+ }
return ret;
}
@@ -107,7 +111,10 @@ static int amba_pm_runtime_resume(struct device *dev)
int ret;
if (dev->driver) {
- ret = clk_prepare_enable(pcdev->pclk);
+ if (pm_runtime_is_irq_safe(dev))
+ ret = clk_enable(pcdev->pclk);
+ else
+ ret = clk_prepare_enable(pcdev->pclk);
/* Failure is probably fatal to the system, but... */
if (ret)
return ret;
@@ -115,7 +122,7 @@ static int amba_pm_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
-#endif
+#endif /* CONFIG_PM */
static const struct dev_pm_ops amba_pm = {
.suspend = pm_generic_suspend,
@@ -336,7 +343,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
amba_put_disable_pclk(dev);
- if (cid == AMBA_CID)
+ if (cid == AMBA_CID || cid == CORESIGHT_CID)
dev->periphid = pid;
if (!dev->periphid)
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index d8961ef4d2e7..c6dc3548e5d1 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -277,7 +277,6 @@ static struct platform_driver tegra_ahb_driver = {
.probe = tegra_ahb_probe,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = tegra_ahb_of_match,
.pm = &tegra_ahb_pm,
},
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
new file mode 100644
index 000000000000..bdfc6c6f4f5a
--- /dev/null
+++ b/drivers/android/Kconfig
@@ -0,0 +1,37 @@
+menu "Android"
+
+config ANDROID
+ bool "Android Drivers"
+ ---help---
+ Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+ bool "Android Binder IPC Driver"
+ depends on MMU
+ default n
+ ---help---
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
+
+config ANDROID_BINDER_IPC_32BIT
+ bool
+ depends on !64BIT && ANDROID_BINDER_IPC
+ default y
+ ---help---
+ The Binder API has been changed to support both 32 and 64bit
+ applications in a mixed environment.
+
+ Enable this to support an old 32-bit Android user-space (v4.4 and
+ earlier).
+
+ Note that enabling this will break newer Android user-space.
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
new file mode 100644
index 000000000000..3b7e4b072c58
--- /dev/null
+++ b/drivers/android/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
diff --git a/drivers/staging/android/binder.c b/drivers/android/binder.c
index c69c40d69d5c..8c43521d3f11 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/android/binder.c
@@ -38,7 +38,11 @@
#include <linux/slab.h>
#include <linux/pid_namespace.h>
-#include "binder.h"
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
+
+#include <uapi/linux/android/binder.h>
#include "binder_trace.h"
static DEFINE_MUTEX(binder_main_lock);
diff --git a/drivers/staging/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc8369..7f20f3dc8369 100644
--- a/drivers/staging/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index ad1e71ec10cf..ce8a7a6d6c7f 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -103,7 +103,6 @@ static struct platform_driver ahci_da850_driver = {
.remove = ata_platform_remove_one,
.driver = {
.name = "ahci_da850",
- .owner = THIS_MODULE,
.pm = &ahci_da850_pm_ops,
},
};
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index f3970b4ed889..35d51c59a370 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -679,7 +679,6 @@ static struct platform_driver imx_ahci_driver = {
.remove = ata_platform_remove_one,
.driver = {
.name = "ahci-imx",
- .owner = THIS_MODULE,
.of_match_table = imx_ahci_of_match,
.pm = &ahci_imx_pm_ops,
},
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 68672d2692ee..64bb08432b69 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -115,7 +115,6 @@ static struct platform_driver ahci_mvebu_driver = {
.remove = ata_platform_remove_one,
.driver = {
.name = "ahci-mvebu",
- .owner = THIS_MODULE,
.of_match_table = ahci_mvebu_of_match,
},
};
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 06f1d59fa678..18d539837045 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -76,7 +76,6 @@ static struct platform_driver ahci_driver = {
.remove = ata_platform_remove_one,
.driver = {
.name = "ahci",
- .owner = THIS_MODULE,
.of_match_table = ahci_of_match,
.pm = &ahci_pm_ops,
},
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 835d6eea84fd..2f9e8317cc16 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -230,7 +230,6 @@ MODULE_DEVICE_TABLE(of, st_ahci_match);
static struct platform_driver st_ahci_driver = {
.driver = {
.name = "st_ahci",
- .owner = THIS_MODULE,
.pm = &st_ahci_pm_ops,
.of_match_table = of_match_ptr(st_ahci_match),
},
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index e44d675a30ec..e2e0da539a2f 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -27,6 +27,12 @@
#include <linux/regulator/consumer.h>
#include "ahci.h"
+/* Insmod parameters */
+static bool enable_pmp;
+module_param(enable_pmp, bool, 0);
+MODULE_PARM_DESC(enable_pmp,
+ "Enable support for sata port multipliers, only use if you use a pmp!");
+
#define AHCI_BISTAFR 0x00a0
#define AHCI_BISTCR 0x00a4
#define AHCI_BISTFCTR 0x00a8
@@ -184,7 +190,15 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
goto disable_resources;
hpriv->flags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
- AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+ AHCI_HFLAG_YES_NCQ;
+
+ /*
+ * The sunxi sata controller seems to be unable to successfully do a
+ * soft reset if no pmp is attached, so disable pmp use unless
+ * requested, otherwise directly attached disks do not work.
+ */
+ if (!enable_pmp)
+ hpriv->flags |= AHCI_HFLAG_NO_PMP;
rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info);
if (rc)
@@ -238,7 +252,6 @@ static struct platform_driver ahci_sunxi_driver = {
.remove = ata_platform_remove_one,
.driver = {
.name = "ahci-sunxi",
- .owner = THIS_MODULE,
.of_match_table = ahci_sunxi_of_match,
.pm = &ahci_sunxi_pm_ops,
},
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 0f8538f238b6..feeb8f1e2fe8 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -544,7 +544,6 @@ static struct platform_driver xgene_ahci_driver = {
.remove = ata_platform_remove_one,
.driver = {
.name = "xgene-ahci",
- .owner = THIS_MODULE,
.of_match_table = xgene_ahci_of_match,
},
};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c5ba15af87d3..5c84fb5c3372 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1043,8 +1043,8 @@ const char *sata_spd_string(unsigned int spd)
* None.
*
* RETURNS:
- * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
- * %ATA_DEV_UNKNOWN the event of failure.
+ * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
+ * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
*/
unsigned int ata_dev_classify(const struct ata_taskfile *tf)
{
@@ -1089,6 +1089,11 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
return ATA_DEV_SEMB;
}
+ if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
+ DPRINTK("found ZAC device by sig\n");
+ return ATA_DEV_ZAC;
+ }
+
DPRINTK("unknown device\n");
return ATA_DEV_UNKNOWN;
}
@@ -1329,7 +1334,7 @@ static int ata_hpa_resize(struct ata_device *dev)
int rc;
/* do we need to do it? */
- if (dev->class != ATA_DEV_ATA ||
+ if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
!ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
(dev->horkage & ATA_HORKAGE_BROKEN_HPA))
return 0;
@@ -1889,6 +1894,7 @@ retry:
case ATA_DEV_SEMB:
class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
case ATA_DEV_ATA:
+ case ATA_DEV_ZAC:
tf.command = ATA_CMD_ID_ATA;
break;
case ATA_DEV_ATAPI:
@@ -1980,7 +1986,7 @@ retry:
rc = -EINVAL;
reason = "device reports invalid type";
- if (class == ATA_DEV_ATA) {
+ if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
goto err_out;
if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
@@ -2015,7 +2021,8 @@ retry:
goto retry;
}
- if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
+ if ((flags & ATA_READID_POSTRESET) &&
+ (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
/*
* The exact sequence expected by certain pre-ATA4 drives is:
* SRST RESET
@@ -2280,7 +2287,7 @@ int ata_dev_configure(struct ata_device *dev)
sizeof(modelbuf));
/* ATA-specific feature tests */
- if (dev->class == ATA_DEV_ATA) {
+ if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
if (ata_id_is_cfa(id)) {
/* CPRM may make this media unusable */
if (id[ATA_ID_CFA_KEY_MGMT] & 1)
@@ -4033,6 +4040,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
if (ata_class_enabled(new_class) &&
new_class != ATA_DEV_ATA &&
new_class != ATA_DEV_ATAPI &&
+ new_class != ATA_DEV_ZAC &&
new_class != ATA_DEV_SEMB) {
ata_dev_info(dev, "class mismatch %u != %u\n",
dev->class, new_class);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index dad83df555c4..3dbec8954c86 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1809,6 +1809,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
switch (qc->dev->class) {
case ATA_DEV_ATA:
+ case ATA_DEV_ZAC:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
if (err & (ATA_UNC | ATA_AMNF))
@@ -3792,7 +3793,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
struct ata_eh_context *ehc = &link->eh_context;
unsigned long tmp;
- if (dev->class != ATA_DEV_ATA)
+ if (dev->class != ATA_DEV_ATA &&
+ dev->class != ATA_DEV_ZAC)
continue;
if (!(ehc->i.dev_action[dev->devno] &
ATA_EH_PARK))
@@ -3873,7 +3875,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
/* retry flush if necessary */
ata_for_each_dev(dev, link, ALL) {
- if (dev->class != ATA_DEV_ATA)
+ if (dev->class != ATA_DEV_ATA &&
+ dev->class != ATA_DEV_ZAC)
continue;
rc = ata_eh_maybe_retry_flush(dev);
if (rc)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index dd45c6a03e5d..e364e86e84d7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -235,7 +235,8 @@ static ssize_t ata_scsi_park_store(struct device *device,
rc = -ENODEV;
goto unlock;
}
- if (dev->class != ATA_DEV_ATA) {
+ if (dev->class != ATA_DEV_ATA &&
+ dev->class != ATA_DEV_ZAC) {
rc = -EOPNOTSUPP;
goto unlock;
}
@@ -1961,6 +1962,7 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
{
const u8 versions[] = {
+ 0x00,
0x60, /* SAM-3 (no version claimed) */
0x03,
@@ -1969,6 +1971,20 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
0x02,
0x60 /* SPC-3 (no version claimed) */
};
+ const u8 versions_zbc[] = {
+ 0x00,
+ 0xA0, /* SAM-5 (no version claimed) */
+
+ 0x04,
+ 0xC0, /* SBC-3 (no version claimed) */
+
+ 0x04,
+ 0x60, /* SPC-4 (no version claimed) */
+
+ 0x60,
+ 0x20, /* ZBC (no version claimed) */
+ };
+
u8 hdr[] = {
TYPE_DISK,
0,
@@ -1983,6 +1999,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
if (ata_id_removeable(args->id))
hdr[1] |= (1 << 7);
+ if (args->dev->class == ATA_DEV_ZAC) {
+ hdr[0] = TYPE_ZBC;
+ hdr[2] = 0x6; /* ZBC is defined in SPC-4 */
+ }
+
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
@@ -1995,7 +2016,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
- memcpy(rbuf + 59, versions, sizeof(versions));
+ if (args->dev->class == ATA_DEV_ZAC)
+ memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
+ else
+ memcpy(rbuf + 58, versions, sizeof(versions));
return 0;
}
@@ -2564,7 +2588,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
DPRINTK("ATAPI request sense\n");
- /* FIXME: is this needed? */
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
#ifdef CONFIG_ATA_SFF
@@ -3405,7 +3428,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
ata_xlat_func_t xlat_func;
int rc = 0;
- if (dev->class == ATA_DEV_ATA) {
+ if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
goto bad_cdb_len;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e37413228228..3227b7c8a05f 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -143,6 +143,7 @@ static struct {
{ ATA_DEV_PMP_UNSUP, "pmp" },
{ ATA_DEV_SEMB, "semb" },
{ ATA_DEV_SEMB_UNSUP, "semb" },
+ { ATA_DEV_ZAC, "zac" },
{ ATA_DEV_NONE, "none" }
};
ata_bitfield_name_search(class, ata_class_names)
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 38216b991474..a9b0c820f2eb 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -961,7 +961,6 @@ static struct platform_driver arasan_cf_driver = {
.remove = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &arasan_cf_pm_ops,
.of_match_table = of_match_ptr(arasan_cf_id_table),
},
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index d59d5239405f..9aeb7a6dd4d4 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -389,7 +389,6 @@ static struct platform_driver pata_at32_driver = {
.remove = __exit_p(pata_at32_remove),
.driver = {
.name = "at32_ide",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 8a66f23af4c4..9e85937d36a9 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -444,7 +444,6 @@ static struct platform_driver pata_at91_driver = {
.remove = pata_at91_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 03f2f2bc83bd..dd7410019d15 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1657,7 +1657,6 @@ static struct platform_driver bfin_atapi_driver = {
.resume = bfin_atapi_resume,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 4d37c5415fc7..bd6b089c67a3 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -1021,7 +1021,6 @@ static int ep93xx_pata_remove(struct platform_device *pdev)
static struct platform_driver ep93xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ep93xx_pata_probe,
.remove = ep93xx_pata_remove,
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 989ff5ac69ec..139d20778b29 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -240,7 +240,6 @@ static struct platform_driver pata_imx_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
- .owner = THIS_MODULE,
.pm = &pata_imx_pm_ops,
},
};
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index ddf470c2341d..abda44183512 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -193,7 +193,6 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ixp4xx_pata_probe,
.remove = ata_platform_remove_one,
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index ccd1c83a05cc..252ba27fa63b 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -863,7 +863,6 @@ static struct platform_driver mpc52xx_ata_of_platform_driver = {
#endif
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_ata_of_match,
},
};
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 2a97d3a531ec..80a80548ad0a 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1059,7 +1059,6 @@ static struct platform_driver octeon_cf_driver = {
.probe = octeon_cf_probe,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = octeon_cf_match,
.shutdown = octeon_cf_shutdown
},
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 64965398914a..dcc408abe171 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -75,7 +75,6 @@ MODULE_DEVICE_TABLE(of, pata_of_platform_match);
static struct platform_driver pata_of_platform_driver = {
.driver = {
.name = "pata_of_platform",
- .owner = THIS_MODULE,
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index df2bb7504fc8..8c0d7d736b7a 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -124,7 +124,6 @@ static int palmld_pata_remove(struct platform_device *dev)
static struct platform_driver palmld_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = palmld_pata_probe,
.remove = palmld_pata_remove,
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index f8cff3e247c5..1eedfe46d7c8 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -224,7 +224,6 @@ static struct platform_driver pata_platform_driver = {
.remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 73259bfda1e3..c36b3e6531d8 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -385,7 +385,6 @@ static struct platform_driver pxa_ata_driver = {
.remove = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 3c5eb8fa6bd1..6d08446b877c 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -193,7 +193,6 @@ static struct platform_driver rb532_pata_platform_driver = {
.remove = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 1a24a5dc3940..fa44eb2872db 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -656,7 +656,6 @@ static struct platform_driver pata_s3c_driver = {
.id_table = pata_s3c_driver_ids,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
#ifdef CONFIG_PM_SLEEP
.pm = &pata_s3c_pm_ops,
#endif
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 0bb2cabd2197..c7ddef89e7b0 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1807,7 +1807,6 @@ MODULE_DEVICE_TABLE(of, sata_dwc_match);
static struct platform_driver sata_dwc_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = sata_dwc_match,
},
.probe = sata_dwc_probe,
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 65071591b143..f9054cd36a72 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1624,7 +1624,6 @@ MODULE_DEVICE_TABLE(of, fsl_sata_match);
static struct platform_driver fsl_sata_driver = {
.driver = {
.name = "fsl-sata",
- .owner = THIS_MODULE,
.of_match_table = fsl_sata_match,
},
.probe = sata_fsl_probe,
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index ce2b99a1ed70..24e311fe2c1c 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -634,7 +634,6 @@ static struct platform_driver ahci_highbank_driver = {
.remove = ata_platform_remove_one,
.driver = {
.name = "highbank-ahci",
- .owner = THIS_MODULE,
.of_match_table = ahci_of_match,
.pm = &ahci_highbank_pm_ops,
},
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 391cfda1e83f..f9a0e34eb111 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4280,7 +4280,6 @@ static struct platform_driver mv_platform_driver = {
.resume = mv_platform_resume,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mv_sata_dt_ids),
},
};
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index ea1fbc1d4c5f..cb0d2e644af5 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -1004,7 +1004,6 @@ static struct platform_driver sata_rcar_driver = {
.id_table = sata_rcar_id_table,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = sata_rcar_match,
#ifdef CONFIG_PM_SLEEP
.pm = &sata_rcar_pm_ops,
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index d65975aba4ec..c7fab3ee14ee 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -356,6 +356,8 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
if (skb) {
paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(eni_dev->pci_dev, paddr))
+ goto dma_map_error;
ENI_PRV_PADDR(skb) = paddr;
if (paddr & 3)
printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d has "
@@ -481,6 +483,7 @@ trouble:
if (paddr)
pci_unmap_single(eni_dev->pci_dev,paddr,skb->len,
PCI_DMA_FROMDEVICE);
+dma_map_error:
if (skb) dev_kfree_skb_irq(skb);
return -1;
}
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index d4725fc0395d..d5d9eafbbfcf 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2687,7 +2687,6 @@ MODULE_DEVICE_TABLE(of, fore200e_sba_match);
static struct platform_driver fore200e_sba_driver = {
.driver = {
.name = "fore_200e",
- .owner = THIS_MODULE,
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 53c3fe1aeb29..527d291706e8 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -4,7 +4,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
- topology.o container.o property.o
+ topology.o container.o property.o cacheinfo.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 83e910a57563..876bae5ade33 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -254,13 +254,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
const char *buf, size_t count)
{
struct device *dev;
+ int err = -EINVAL;
dev = bus_find_device_by_name(bus, NULL, buf);
if (!dev)
return -ENODEV;
- if (bus_rescan_devices_helper(dev, NULL) != 0)
- return -EINVAL;
- return count;
+ if (bus_rescan_devices_helper(dev, NULL) == 0)
+ err = count;
+ put_device(dev);
+ return err;
}
static struct device *next_device(struct klist_iter *i)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
new file mode 100644
index 000000000000..6e64563361f0
--- /dev/null
+++ b/drivers/base/cacheinfo.c
@@ -0,0 +1,539 @@
+/*
+ * cacheinfo support - processor cache information via sysfs
+ *
+ * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/cacheinfo.h>
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+
+/* pointer to per cpu cacheinfo */
+static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
+#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
+#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
+#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
+
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
+{
+ return ci_cacheinfo(cpu);
+}
+
+#ifdef CONFIG_OF
+static int cache_setup_of_node(unsigned int cpu)
+{
+ struct device_node *np;
+ struct cacheinfo *this_leaf;
+ struct device *cpu_dev = get_cpu_device(cpu);
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int index = 0;
+
+ /* skip if of_node is already populated */
+ if (this_cpu_ci->info_list->of_node)
+ return 0;
+
+ if (!cpu_dev) {
+ pr_err("No cpu device for CPU %d\n", cpu);
+ return -ENODEV;
+ }
+ np = cpu_dev->of_node;
+ if (!np) {
+ pr_err("Failed to find cpu%d device node\n", cpu);
+ return -ENOENT;
+ }
+
+ while (np && index < cache_leaves(cpu)) {
+ this_leaf = this_cpu_ci->info_list + index;
+ if (this_leaf->level != 1)
+ np = of_find_next_cache_node(np);
+ else
+ np = of_node_get(np);/* cpu node itself */
+ this_leaf->of_node = np;
+ index++;
+ }
+ return 0;
+}
+
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ struct cacheinfo *sib_leaf)
+{
+ return sib_leaf->of_node == this_leaf->of_node;
+}
+#else
+static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ struct cacheinfo *sib_leaf)
+{
+ /*
+ * For non-DT systems, assume unique level 1 cache, system-wide
+ * shared caches for all other levels. This will be used only if
+ * arch specific code has not populated shared_cpu_map
+ */
+ return !(this_leaf->level == 1);
+}
+#endif
+
+static int cache_shared_cpu_map_setup(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf, *sib_leaf;
+ unsigned int index;
+ int ret;
+
+ ret = cache_setup_of_node(cpu);
+ if (ret)
+ return ret;
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ unsigned int i;
+
+ this_leaf = this_cpu_ci->info_list + index;
+ /* skip if shared_cpu_map is already populated */
+ if (!cpumask_empty(&this_leaf->shared_cpu_map))
+ continue;
+
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ for_each_online_cpu(i) {
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+
+ if (i == cpu || !sib_cpu_ci->info_list)
+ continue;/* skip if itself or no cacheinfo */
+ sib_leaf = sib_cpu_ci->info_list + index;
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void cache_shared_cpu_map_remove(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf, *sib_leaf;
+ unsigned int sibling, index;
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ this_leaf = this_cpu_ci->info_list + index;
+ for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
+ struct cpu_cacheinfo *sib_cpu_ci;
+
+ if (sibling == cpu) /* skip itself */
+ continue;
+ sib_cpu_ci = get_cpu_cacheinfo(sibling);
+ sib_leaf = sib_cpu_ci->info_list + index;
+ cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ }
+ of_node_put(this_leaf->of_node);
+ }
+}
+
+static void free_cache_attributes(unsigned int cpu)
+{
+ cache_shared_cpu_map_remove(cpu);
+
+ kfree(per_cpu_cacheinfo(cpu));
+ per_cpu_cacheinfo(cpu) = NULL;
+}
+
+int __weak init_cache_level(unsigned int cpu)
+{
+ return -ENOENT;
+}
+
+int __weak populate_cache_leaves(unsigned int cpu)
+{
+ return -ENOENT;
+}
+
+static int detect_cache_attributes(unsigned int cpu)
+{
+ int ret;
+
+ if (init_cache_level(cpu))
+ return -ENOENT;
+
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct cacheinfo), GFP_KERNEL);
+ if (per_cpu_cacheinfo(cpu) == NULL)
+ return -ENOMEM;
+
+ ret = populate_cache_leaves(cpu);
+ if (ret)
+ goto free_ci;
+ /*
+ * For systems using DT for cache hierarcy, of_node and shared_cpu_map
+ * will be set up here only if they are not populated already
+ */
+ ret = cache_shared_cpu_map_setup(cpu);
+ if (ret)
+ goto free_ci;
+ return 0;
+
+free_ci:
+ free_cache_attributes(cpu);
+ return ret;
+}
+
+/* pointer to cpuX/cache device */
+static DEFINE_PER_CPU(struct device *, ci_cache_dev);
+#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
+
+static cpumask_t cache_dev_map;
+
+/* pointer to array of devices for cpuX/cache/indexY */
+static DEFINE_PER_CPU(struct device **, ci_index_dev);
+#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
+#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
+
+#define show_one(file_name, object) \
+static ssize_t file_name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
+ return sprintf(buf, "%u\n", this_leaf->object); \
+}
+
+show_one(level, level);
+show_one(coherency_line_size, coherency_line_size);
+show_one(number_of_sets, number_of_sets);
+show_one(physical_line_partition, physical_line_partition);
+show_one(ways_of_associativity, ways_of_associativity);
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%uK\n", this_leaf->size >> 10);
+}
+
+static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
+
+ return cpumap_print_to_pagebuf(list, buf, mask);
+}
+
+static ssize_t shared_cpu_map_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return shared_cpumap_show_func(dev, false, buf);
+}
+
+static ssize_t shared_cpu_list_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return shared_cpumap_show_func(dev, true, buf);
+}
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+
+ switch (this_leaf->type) {
+ case CACHE_TYPE_DATA:
+ return sprintf(buf, "Data\n");
+ case CACHE_TYPE_INST:
+ return sprintf(buf, "Instruction\n");
+ case CACHE_TYPE_UNIFIED:
+ return sprintf(buf, "Unified\n");
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t allocation_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ unsigned int ci_attr = this_leaf->attributes;
+ int n = 0;
+
+ if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
+ n = sprintf(buf, "ReadWriteAllocate\n");
+ else if (ci_attr & CACHE_READ_ALLOCATE)
+ n = sprintf(buf, "ReadAllocate\n");
+ else if (ci_attr & CACHE_WRITE_ALLOCATE)
+ n = sprintf(buf, "WriteAllocate\n");
+ return n;
+}
+
+static ssize_t write_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ unsigned int ci_attr = this_leaf->attributes;
+ int n = 0;
+
+ if (ci_attr & CACHE_WRITE_THROUGH)
+ n = sprintf(buf, "WriteThrough\n");
+ else if (ci_attr & CACHE_WRITE_BACK)
+ n = sprintf(buf, "WriteBack\n");
+ return n;
+}
+
+static DEVICE_ATTR_RO(level);
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(coherency_line_size);
+static DEVICE_ATTR_RO(ways_of_associativity);
+static DEVICE_ATTR_RO(number_of_sets);
+static DEVICE_ATTR_RO(size);
+static DEVICE_ATTR_RO(allocation_policy);
+static DEVICE_ATTR_RO(write_policy);
+static DEVICE_ATTR_RO(shared_cpu_map);
+static DEVICE_ATTR_RO(shared_cpu_list);
+static DEVICE_ATTR_RO(physical_line_partition);
+
+static struct attribute *cache_default_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_level.attr,
+ &dev_attr_shared_cpu_map.attr,
+ &dev_attr_shared_cpu_list.attr,
+ &dev_attr_coherency_line_size.attr,
+ &dev_attr_ways_of_associativity.attr,
+ &dev_attr_number_of_sets.attr,
+ &dev_attr_size.attr,
+ &dev_attr_allocation_policy.attr,
+ &dev_attr_write_policy.attr,
+ &dev_attr_physical_line_partition.attr,
+ NULL
+};
+
+static umode_t
+cache_default_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
+ umode_t mode = attr->mode;
+
+ if ((attr == &dev_attr_type.attr) && this_leaf->type)
+ return mode;
+ if ((attr == &dev_attr_level.attr) && this_leaf->level)
+ return mode;
+ if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
+ return mode;
+ if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
+ return mode;
+ if ((attr == &dev_attr_coherency_line_size.attr) &&
+ this_leaf->coherency_line_size)
+ return mode;
+ if ((attr == &dev_attr_ways_of_associativity.attr) &&
+ this_leaf->size) /* allow 0 = full associativity */
+ return mode;
+ if ((attr == &dev_attr_number_of_sets.attr) &&
+ this_leaf->number_of_sets)
+ return mode;
+ if ((attr == &dev_attr_size.attr) && this_leaf->size)
+ return mode;
+ if ((attr == &dev_attr_write_policy.attr) &&
+ (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
+ return mode;
+ if ((attr == &dev_attr_allocation_policy.attr) &&
+ (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
+ return mode;
+ if ((attr == &dev_attr_physical_line_partition.attr) &&
+ this_leaf->physical_line_partition)
+ return mode;
+
+ return 0;
+}
+
+static const struct attribute_group cache_default_group = {
+ .attrs = cache_default_attrs,
+ .is_visible = cache_default_attrs_is_visible,
+};
+
+static const struct attribute_group *cache_default_groups[] = {
+ &cache_default_group,
+ NULL,
+};
+
+static const struct attribute_group *cache_private_groups[] = {
+ &cache_default_group,
+ NULL, /* Place holder for private group */
+ NULL,
+};
+
+const struct attribute_group *
+__weak cache_get_priv_group(struct cacheinfo *this_leaf)
+{
+ return NULL;
+}
+
+static const struct attribute_group **
+cache_get_attribute_groups(struct cacheinfo *this_leaf)
+{
+ const struct attribute_group *priv_group =
+ cache_get_priv_group(this_leaf);
+
+ if (!priv_group)
+ return cache_default_groups;
+
+ if (!cache_private_groups[1])
+ cache_private_groups[1] = priv_group;
+
+ return cache_private_groups;
+}
+
+/* Add/Remove cache interface for CPU device */
+static void cpu_cache_sysfs_exit(unsigned int cpu)
+{
+ int i;
+ struct device *ci_dev;
+
+ if (per_cpu_index_dev(cpu)) {
+ for (i = 0; i < cache_leaves(cpu); i++) {
+ ci_dev = per_cache_index_dev(cpu, i);
+ if (!ci_dev)
+ continue;
+ device_unregister(ci_dev);
+ }
+ kfree(per_cpu_index_dev(cpu));
+ per_cpu_index_dev(cpu) = NULL;
+ }
+ device_unregister(per_cpu_cache_dev(cpu));
+ per_cpu_cache_dev(cpu) = NULL;
+}
+
+static int cpu_cache_sysfs_init(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ if (per_cpu_cacheinfo(cpu) == NULL)
+ return -ENOENT;
+
+ per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
+ if (IS_ERR(per_cpu_cache_dev(cpu)))
+ return PTR_ERR(per_cpu_cache_dev(cpu));
+
+ /* Allocate all required memory */
+ per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct device *), GFP_KERNEL);
+ if (unlikely(per_cpu_index_dev(cpu) == NULL))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ cpu_cache_sysfs_exit(cpu);
+ return -ENOMEM;
+}
+
+static int cache_add_dev(unsigned int cpu)
+{
+ unsigned int i;
+ int rc;
+ struct device *ci_dev, *parent;
+ struct cacheinfo *this_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ const struct attribute_group **cache_groups;
+
+ rc = cpu_cache_sysfs_init(cpu);
+ if (unlikely(rc < 0))
+ return rc;
+
+ parent = per_cpu_cache_dev(cpu);
+ for (i = 0; i < cache_leaves(cpu); i++) {
+ this_leaf = this_cpu_ci->info_list + i;
+ if (this_leaf->disable_sysfs)
+ continue;
+ cache_groups = cache_get_attribute_groups(this_leaf);
+ ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
+ "index%1u", i);
+ if (IS_ERR(ci_dev)) {
+ rc = PTR_ERR(ci_dev);
+ goto err;
+ }
+ per_cache_index_dev(cpu, i) = ci_dev;
+ }
+ cpumask_set_cpu(cpu, &cache_dev_map);
+
+ return 0;
+err:
+ cpu_cache_sysfs_exit(cpu);
+ return rc;
+}
+
+static void cache_remove_dev(unsigned int cpu)
+{
+ if (!cpumask_test_cpu(cpu, &cache_dev_map))
+ return;
+ cpumask_clear_cpu(cpu, &cache_dev_map);
+
+ cpu_cache_sysfs_exit(cpu);
+}
+
+static int cacheinfo_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int rc = 0;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ rc = detect_cache_attributes(cpu);
+ if (!rc)
+ rc = cache_add_dev(cpu);
+ break;
+ case CPU_DEAD:
+ cache_remove_dev(cpu);
+ if (per_cpu_cacheinfo(cpu))
+ free_cache_attributes(cpu);
+ break;
+ }
+ return notifier_from_errno(rc);
+}
+
+static int __init cacheinfo_sysfs_init(void)
+{
+ int cpu, rc = 0;
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu) {
+ rc = detect_cache_attributes(cpu);
+ if (rc)
+ goto out;
+ rc = cache_add_dev(cpu);
+ if (rc) {
+ free_cache_attributes(cpu);
+ pr_err("error populating cacheinfo..cpu%d\n", cpu);
+ goto out;
+ }
+ }
+ __hotcpu_notifier(cacheinfo_cpu_callback, 0);
+
+out:
+ cpu_notifier_register_done();
+ return rc;
+}
+
+device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 842d04707de6..97e2baf6e5d8 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1021,18 +1021,6 @@ int device_add(struct device *dev)
if (error)
goto attrError;
- if (MAJOR(dev->devt)) {
- error = device_create_file(dev, &dev_attr_dev);
- if (error)
- goto ueventattrError;
-
- error = device_create_sys_dev_entry(dev);
- if (error)
- goto devtattrError;
-
- devtmpfs_create_node(dev);
- }
-
error = device_add_class_symlinks(dev);
if (error)
goto SymlinkError;
@@ -1047,6 +1035,18 @@ int device_add(struct device *dev)
goto DPMError;
device_pm_add(dev);
+ if (MAJOR(dev->devt)) {
+ error = device_create_file(dev, &dev_attr_dev);
+ if (error)
+ goto DevAttrError;
+
+ error = device_create_sys_dev_entry(dev);
+ if (error)
+ goto SysEntryError;
+
+ devtmpfs_create_node(dev);
+ }
+
/* Notify clients of device addition. This call must come
* after dpm_sysfs_add() and before kobject_uevent().
*/
@@ -1076,6 +1076,12 @@ int device_add(struct device *dev)
done:
put_device(dev);
return error;
+ SysEntryError:
+ if (MAJOR(dev->devt))
+ device_remove_file(dev, &dev_attr_dev);
+ DevAttrError:
+ device_pm_remove(dev);
+ dpm_sysfs_remove(dev);
DPMError:
bus_remove_device(dev);
BusError:
@@ -1083,14 +1089,6 @@ done:
AttrsError:
device_remove_class_symlinks(dev);
SymlinkError:
- if (MAJOR(dev->devt))
- devtmpfs_delete_node(dev);
- if (MAJOR(dev->devt))
- device_remove_sys_dev_entry(dev);
- devtattrError:
- if (MAJOR(dev->devt))
- device_remove_file(dev, &dev_attr_dev);
- ueventattrError:
device_remove_file(dev, &dev_attr_uevent);
attrError:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 006b1bc5297d..f829a4c71749 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -207,11 +207,8 @@ static ssize_t show_cpus_attr(struct device *dev,
char *buf)
{
struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
- int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
+ return cpumap_print_to_pagebuf(true, buf, *ca->map);
}
#define _CPU_ATTR(name, map) \
@@ -366,6 +363,60 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
+static void device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct device *
+__cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, va_list args)
+{
+ struct device *dev = NULL;
+ int retval = -ENODEV;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ device_initialize(dev);
+ dev->parent = parent;
+ dev->groups = groups;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, drvdata);
+
+ retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
+{
+ va_list vargs;
+ struct device *dev;
+
+ va_start(vargs, fmt);
+ dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
+ va_end(vargs);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(cpu_device_create);
+
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
#endif
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 96614b04544c..1bd120a0b084 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -31,6 +31,11 @@
#include <linux/fs.h>
#include <linux/workqueue.h>
+static struct class devcd_class;
+
+/* global disable flag, for security purposes */
+static bool devcd_disabled;
+
/* if data isn't read by userspace after 5 minutes then delete it */
#define DEVCD_TIMEOUT (HZ * 60 * 5)
@@ -121,11 +126,51 @@ static const struct attribute_group *devcd_dev_groups[] = {
&devcd_dev_group, NULL,
};
+static int devcd_free(struct device *dev, void *data)
+{
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ flush_delayed_work(&devcd->del_wk);
+ return 0;
+}
+
+static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", devcd_disabled);
+}
+
+static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ long tmp = simple_strtol(buf, NULL, 10);
+
+ /*
+ * This essentially makes the attribute write-once, since you can't
+ * go back to not having it disabled. This is intentional, it serves
+ * as a system lockdown feature.
+ */
+ if (tmp != 1)
+ return -EINVAL;
+
+ devcd_disabled = true;
+
+ class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
+
+ return count;
+}
+
+static struct class_attribute devcd_class_attrs[] = {
+ __ATTR_RW(disabled),
+ __ATTR_NULL
+};
+
static struct class devcd_class = {
.name = "devcoredump",
.owner = THIS_MODULE,
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
+ .class_attrs = devcd_class_attrs,
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
@@ -192,6 +237,9 @@ void dev_coredumpm(struct device *dev, struct module *owner,
struct devcd_entry *devcd;
struct device *existing;
+ if (devcd_disabled)
+ goto free;
+
existing = class_find_device(&devcd_class, NULL, dev,
devcd_match_failing);
if (existing) {
@@ -249,14 +297,6 @@ static int __init devcoredump_init(void)
}
__initcall(devcoredump_init);
-static int devcd_free(struct device *dev, void *data)
-{
- struct devcd_entry *devcd = dev_to_devcd(dev);
-
- flush_delayed_work(&devcd->del_wk);
- return 0;
-}
-
static void __exit devcoredump_exit(void)
{
class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 3d785ebb48d3..58470c395301 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -591,8 +591,7 @@ static int fw_map_pages_buf(struct firmware_buf *buf)
if (!buf->is_paged_buf)
return 0;
- if (buf->data)
- vunmap(buf->data);
+ vunmap(buf->data);
buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
if (!buf->data)
return -ENOMEM;
@@ -925,7 +924,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
- wait_for_completion(&buf->completion);
+ retval = wait_for_completion_interruptible(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
if (is_fw_load_aborted(buf))
@@ -1004,7 +1003,7 @@ static int sync_cached_firmware_buf(struct firmware_buf *buf)
break;
}
mutex_unlock(&fw_lock);
- wait_for_completion(&buf->completion);
+ ret = wait_for_completion_interruptible(&buf->completion);
mutex_lock(&fw_lock);
}
mutex_unlock(&fw_lock);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 7c5d87191b28..85be040a21c8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -228,8 +228,8 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
struct page *first_page;
int ret;
- first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
- start_pfn = page_to_pfn(first_page);
+ start_pfn = phys_index << PFN_SECTION_SHIFT;
+ first_page = pfn_to_page(start_pfn);
switch (action) {
case MEM_ONLINE:
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 472168cd0c97..a3b82e9c7f20 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -25,32 +25,26 @@ static struct bus_type node_subsys = {
};
-static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
+static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
{
struct node *node_dev = to_node(dev);
const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
- int len;
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
- len = type?
- cpulist_scnprintf(buf, PAGE_SIZE-2, mask) :
- cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
- buf[len++] = '\n';
- buf[len] = '\0';
- return len;
+ return cpumap_print_to_pagebuf(list, buf, mask);
}
static inline ssize_t node_read_cpumask(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return node_read_cpumap(dev, 0, buf);
+ return node_read_cpumap(dev, false, buf);
}
static inline ssize_t node_read_cpulist(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return node_read_cpumap(dev, 1, buf);
+ return node_read_cpumap(dev, true, buf);
}
static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b2afc29403f9..9421fed40905 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -580,9 +580,10 @@ void platform_driver_unregister(struct platform_driver *drv)
EXPORT_SYMBOL_GPL(platform_driver_unregister);
/**
- * platform_driver_probe - register driver for non-hotpluggable device
+ * __platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
* @probe: the driver probe routine, probably from an __init section
+ * @module: module which will be the owner of the driver
*
* Use this instead of platform_driver_register() when you know the device
* is not hotpluggable and has already been registered, and you want to
@@ -598,8 +599,8 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
* Returns zero if the driver registered and bound to a device, else returns
* a negative error code and with the driver not registered.
*/
-int __init_or_module platform_driver_probe(struct platform_driver *drv,
- int (*probe)(struct platform_device *))
+int __init_or_module __platform_driver_probe(struct platform_driver *drv,
+ int (*probe)(struct platform_device *), struct module *module)
{
int retval, code;
@@ -614,7 +615,7 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
/* temporary section violation during probe() */
drv->probe = probe;
- retval = code = platform_driver_register(drv);
+ retval = code = __platform_driver_register(drv, module);
/*
* Fixup that section violation, being paranoid about code scanning
@@ -633,27 +634,28 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
platform_driver_unregister(drv);
return retval;
}
-EXPORT_SYMBOL_GPL(platform_driver_probe);
+EXPORT_SYMBOL_GPL(__platform_driver_probe);
/**
- * platform_create_bundle - register driver and create corresponding device
+ * __platform_create_bundle - register driver and create corresponding device
* @driver: platform driver structure
* @probe: the driver probe routine, probably from an __init section
* @res: set of resources that needs to be allocated for the device
* @n_res: number of resources
* @data: platform specific data for this platform device
* @size: size of platform specific data
+ * @module: module which will be the owner of the driver
*
* Use this in legacy-style modules that probe hardware directly and
* register a single platform device and corresponding platform driver.
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
-struct platform_device * __init_or_module platform_create_bundle(
+struct platform_device * __init_or_module __platform_create_bundle(
struct platform_driver *driver,
int (*probe)(struct platform_device *),
struct resource *res, unsigned int n_res,
- const void *data, size_t size)
+ const void *data, size_t size, struct module *module)
{
struct platform_device *pdev;
int error;
@@ -676,7 +678,7 @@ struct platform_device * __init_or_module platform_create_bundle(
if (error)
goto err_pdev_put;
- error = platform_driver_probe(driver, probe);
+ error = __platform_driver_probe(driver, probe, module);
if (error)
goto err_pdev_del;
@@ -689,7 +691,7 @@ err_pdev_put:
err_out:
return ERR_PTR(error);
}
-EXPORT_SYMBOL_GPL(platform_create_bundle);
+EXPORT_SYMBOL_GPL(__platform_create_bundle);
/* modalias support enables more hands-off userspace setup:
* (a) environment variable lets new-style hotplug events work once system is
@@ -1006,6 +1008,7 @@ int __init platform_bus_init(void)
error = bus_register(&platform_bus_type);
if (error)
device_unregister(&platform_bus);
+ of_platform_register_reconfig_notifier();
return error;
}
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index be7c1fb7c0c9..6491f45200a7 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -29,75 +29,52 @@
#include <linux/hardirq.h>
#include <linux/topology.h>
-#define define_one_ro_named(_name, _func) \
- static DEVICE_ATTR(_name, 0444, _func, NULL)
-
-#define define_one_ro(_name) \
- static DEVICE_ATTR(_name, 0444, show_##_name, NULL)
-
#define define_id_show_func(name) \
-static ssize_t show_##name(struct device *dev, \
+static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%d\n", topology_##name(dev->id)); \
}
-#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
- defined(topology_book_cpumask)
-static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
-{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
-
- if (len > 1) {
- n = type?
- cpulist_scnprintf(buf, len-2, mask) :
- cpumask_scnprintf(buf, len-2, mask);
- buf[n++] = '\n';
- buf[n] = '\0';
- }
- return n;
-}
-#endif
-
-#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct device *dev, \
+#define define_siblings_show_map(name, mask) \
+static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- return show_cpumap(0, topology_##name(dev->id), buf); \
+ return cpumap_print_to_pagebuf(false, buf, topology_##mask(dev->id));\
}
-#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
+#define define_siblings_show_list(name, mask) \
+static ssize_t name##_list_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
{ \
- return show_cpumap(1, topology_##name(dev->id), buf); \
+ return cpumap_print_to_pagebuf(true, buf, topology_##mask(dev->id));\
}
-#define define_siblings_show_func(name) \
- define_siblings_show_map(name); define_siblings_show_list(name)
+#define define_siblings_show_func(name, mask) \
+ define_siblings_show_map(name, mask); \
+ define_siblings_show_list(name, mask)
define_id_show_func(physical_package_id);
-define_one_ro(physical_package_id);
+static DEVICE_ATTR_RO(physical_package_id);
define_id_show_func(core_id);
-define_one_ro(core_id);
+static DEVICE_ATTR_RO(core_id);
-define_siblings_show_func(thread_cpumask);
-define_one_ro_named(thread_siblings, show_thread_cpumask);
-define_one_ro_named(thread_siblings_list, show_thread_cpumask_list);
+define_siblings_show_func(thread_siblings, thread_cpumask);
+static DEVICE_ATTR_RO(thread_siblings);
+static DEVICE_ATTR_RO(thread_siblings_list);
-define_siblings_show_func(core_cpumask);
-define_one_ro_named(core_siblings, show_core_cpumask);
-define_one_ro_named(core_siblings_list, show_core_cpumask_list);
+define_siblings_show_func(core_siblings, core_cpumask);
+static DEVICE_ATTR_RO(core_siblings);
+static DEVICE_ATTR_RO(core_siblings_list);
#ifdef CONFIG_SCHED_BOOK
define_id_show_func(book_id);
-define_one_ro(book_id);
-define_siblings_show_func(book_cpumask);
-define_one_ro_named(book_siblings, show_book_cpumask);
-define_one_ro_named(book_siblings_list, show_book_cpumask_list);
+static DEVICE_ATTR_RO(book_id);
+define_siblings_show_func(book_siblings, book_cpumask);
+static DEVICE_ATTR_RO(book_siblings);
+static DEVICE_ATTR_RO(book_siblings_list);
#endif
static struct attribute *default_attrs[] = {
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index b6412b2d748d..314ae4032f3e 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -24,6 +24,7 @@ struct bcma_bus;
/* main.c */
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout);
+void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core);
int bcma_bus_register(struct bcma_bus *bus);
void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus,
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b068f98920a8..19f679667ca4 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -339,7 +339,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
return;
}
- irq = bcma_core_irq(cc->core);
+ irq = bcma_core_irq(cc->core, 0);
/* Determine the registers of the UARTs */
cc->nr_serial_ports = (cc->capabilities & BCMA_CC_CAP_NRUART);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 706b9ae0dcfb..598a6cd9028a 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -152,7 +152,7 @@ static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
handle_simple_irq);
}
- hwirq = bcma_core_irq(cc->core);
+ hwirq = bcma_core_irq(cc->core, 0);
err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
cc);
if (err)
@@ -183,7 +183,7 @@ static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
return;
bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
- free_irq(bcma_core_irq(cc->core), cc);
+ free_irq(bcma_core_irq(cc->core, 0), cc);
for (gpio = 0; gpio < chip->ngpio; gpio++) {
int irq = irq_find_mapping(cc->irq_domain, gpio);
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 004d6aa671ce..04faf6df959f 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -20,6 +20,9 @@
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/time.h>
+#ifdef CONFIG_BCM47XX
+#include <bcm47xx_nvram.h>
+#endif
enum bcma_boot_dev {
BCMA_BOOT_DEV_UNK = 0,
@@ -115,7 +118,7 @@ static u32 bcma_core_mips_irqflag(struct bcma_device *dev)
* If disabled, 5 is returned.
* If not supported, 6 is returned.
*/
-static unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+unsigned int bcma_core_mips_irq(struct bcma_device *dev)
{
struct bcma_device *mdev = dev->bus->drv_mips.core;
u32 irqflag;
@@ -133,13 +136,6 @@ static unsigned int bcma_core_mips_irq(struct bcma_device *dev)
return 5;
}
-unsigned int bcma_core_irq(struct bcma_device *dev)
-{
- unsigned int mips_irq = bcma_core_mips_irq(dev);
- return mips_irq <= 4 ? mips_irq + 2 : 0;
-}
-EXPORT_SYMBOL(bcma_core_irq);
-
static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
{
unsigned int oldirq = bcma_core_mips_irq(dev);
@@ -323,10 +319,16 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
switch (boot_dev) {
case BCMA_BOOT_DEV_PARALLEL:
case BCMA_BOOT_DEV_SERIAL:
- /* TODO: Init NVRAM using BCMA_SOC_FLASH2 window */
+#ifdef CONFIG_BCM47XX
+ bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH2,
+ BCMA_SOC_FLASH2_SZ);
+#endif
break;
case BCMA_BOOT_DEV_NAND:
- /* TODO: Init NVRAM using BCMA_SOC_FLASH1 window */
+#ifdef CONFIG_BCM47XX
+ bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH1,
+ BCMA_SOC_FLASH1_SZ);
+#endif
break;
default:
break;
@@ -423,7 +425,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
break;
default:
list_for_each_entry(core, &bus->cores, list) {
- core->irq = bcma_core_irq(core);
+ core->irq = bcma_core_irq(core, 0);
}
bcma_err(bus,
"Unknown device (0x%x) found, can not configure IRQs\n",
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index c3d7b03c2fdc..c8a6b741967b 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -593,7 +593,7 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
pr_info("PCI: Fixing up device %s\n", pci_name(dev));
/* Fix up interrupt lines */
- dev->irq = bcma_core_irq(pc_host->pdev->core);
+ dev->irq = bcma_core_irq(pc_host->pdev->core, 0);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
readrq = pcie_get_readrq(dev);
@@ -617,6 +617,6 @@ int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
pci_ops);
- return bcma_core_irq(pc_host->pdev->core);
+ return bcma_core_irq(pc_host->pdev->core, 0);
}
EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 1000955ce09d..534e1337766d 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
@@ -153,6 +154,46 @@ static struct device_node *bcma_of_find_child_device(struct platform_device *par
return NULL;
}
+static int bcma_of_irq_parse(struct platform_device *parent,
+ struct bcma_device *core,
+ struct of_phandle_args *out_irq, int num)
+{
+ __be32 laddr[1];
+ int rc;
+
+ if (core->dev.of_node) {
+ rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
+ if (!rc)
+ return rc;
+ }
+
+ out_irq->np = parent->dev.of_node;
+ out_irq->args_count = 1;
+ out_irq->args[0] = num;
+
+ laddr[0] = cpu_to_be32(core->addr);
+ return of_irq_parse_raw(laddr, out_irq);
+}
+
+static unsigned int bcma_of_get_irq(struct platform_device *parent,
+ struct bcma_device *core, int num)
+{
+ struct of_phandle_args out_irq;
+ int ret;
+
+ if (!parent || !parent->dev.of_node)
+ return 0;
+
+ ret = bcma_of_irq_parse(parent, core, &out_irq, num);
+ if (ret) {
+ bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
+ ret);
+ return 0;
+ }
+
+ return irq_create_of_mapping(&out_irq);
+}
+
static void bcma_of_fill_device(struct platform_device *parent,
struct bcma_device *core)
{
@@ -161,18 +202,47 @@ static void bcma_of_fill_device(struct platform_device *parent,
node = bcma_of_find_child_device(parent, core);
if (node)
core->dev.of_node = node;
+
+ core->irq = bcma_of_get_irq(parent, core, 0);
}
#else
static void bcma_of_fill_device(struct platform_device *parent,
struct bcma_device *core)
{
}
+static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
+ struct bcma_device *core, int num)
+{
+ return 0;
+}
#endif /* CONFIG_OF */
-static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
+unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
- int err;
+ struct bcma_bus *bus = core->bus;
+ unsigned int mips_irq;
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ return bus->host_pci->irq;
+ case BCMA_HOSTTYPE_SOC:
+ if (bus->drv_mips.core && num == 0) {
+ mips_irq = bcma_core_mips_irq(core);
+ return mips_irq <= 4 ? mips_irq + 2 : 0;
+ }
+ if (bus->host_pdev)
+ return bcma_of_get_irq(bus->host_pdev, core, num);
+ return 0;
+ case BCMA_HOSTTYPE_SDIO:
+ return 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(bcma_core_irq);
+
+void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
+{
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
@@ -196,6 +266,11 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
case BCMA_HOSTTYPE_SDIO:
break;
}
+}
+
+static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
+{
+ int err;
err = device_register(&core->dev);
if (err) {
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 14b56561a36f..917520776879 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -505,6 +505,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
bus->nr_cores++;
other_core = bcma_find_core_reverse(bus, core->id.id);
core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
+ bcma_prepare_core(bus, core);
bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
core->core_index, bcma_device_name(&core->id),
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 758da2287d9a..5fd50a284168 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1864,7 +1864,6 @@ static int __exit amiga_floppy_remove(struct platform_device *pdev)
static struct platform_driver amiga_floppy_driver = {
.driver = {
.name = "amiga-floppy",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..46c282fff104 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -395,7 +395,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_TKILL);
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
- blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
+ blk_queue_max_hw_sectors(q, 1024);
q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index a2dfa169237d..1318e3217cb0 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -827,8 +827,7 @@ static int update_sync_bits(struct drbd_device *device,
*
*/
int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
- enum update_sync_bits_mode mode,
- const char *file, const unsigned int line)
+ enum update_sync_bits_mode mode)
{
/* Is called from worker and receiver context _only_ */
unsigned long sbnr, ebnr, lbnr;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9b22f8f01b57..b905e9888b88 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1454,7 +1454,6 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
/* drbd_nl.c */
-extern int drbd_msg_put_info(struct sk_buff *skb, const char *info);
extern void drbd_suspend_io(struct drbd_device *device);
extern void drbd_resume_io(struct drbd_device *device);
extern char *ppsize(char *buf, unsigned long long size);
@@ -1558,52 +1557,31 @@ extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
-/* Yes, there is kernel_setsockopt, but only since 2.6.18.
- * So we have our own copy of it here. */
-static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
- char *optval, int optlen)
-{
- mm_segment_t oldfs = get_fs();
- char __user *uoptval;
- int err;
-
- uoptval = (char __user __force *)optval;
-
- set_fs(KERNEL_DS);
- if (level == SOL_SOCKET)
- err = sock_setsockopt(sock, level, optname, uoptval, optlen);
- else
- err = sock->ops->setsockopt(sock, level, optname, uoptval,
- optlen);
- set_fs(oldfs);
- return err;
-}
-
static inline void drbd_tcp_cork(struct socket *sock)
{
int val = 1;
- (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
+ (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
(char*)&val, sizeof(val));
}
static inline void drbd_tcp_uncork(struct socket *sock)
{
int val = 0;
- (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
+ (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
(char*)&val, sizeof(val));
}
static inline void drbd_tcp_nodelay(struct socket *sock)
{
int val = 1;
- (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
+ (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
(char*)&val, sizeof(val));
}
static inline void drbd_tcp_quickack(struct socket *sock)
{
int val = 2;
- (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
+ (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
(char*)&val, sizeof(val));
}
@@ -1662,14 +1640,13 @@ extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long stil
enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
- enum update_sync_bits_mode mode,
- const char *file, const unsigned int line);
+ enum update_sync_bits_mode mode);
#define drbd_set_in_sync(device, sector, size) \
- __drbd_change_sync(device, sector, size, SET_IN_SYNC, __FILE__, __LINE__)
+ __drbd_change_sync(device, sector, size, SET_IN_SYNC)
#define drbd_set_out_of_sync(device, sector, size) \
- __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC, __FILE__, __LINE__)
+ __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
#define drbd_rs_failed_io(device, sector, size) \
- __drbd_change_sync(device, sector, size, RECORD_RS_FAILED, __FILE__, __LINE__)
+ __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
extern void drbd_al_shrink(struct drbd_device *device);
extern int drbd_initialize_al(struct drbd_device *, void *);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 973c185c9cfe..1fc83427199c 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2532,10 +2532,6 @@ int set_resource_options(struct drbd_resource *resource, struct res_opts *res_op
if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
return -ENOMEM;
- /*
- retcode = ERR_NOMEM;
- drbd_msg_put_info("unable to allocate cpumask");
- */
/* silently ignore cpu mask on UP kernel */
if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
@@ -2731,7 +2727,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
device = minor_to_device(minor);
if (device)
- return ERR_MINOR_EXISTS;
+ return ERR_MINOR_OR_VOLUME_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
@@ -2793,20 +2789,16 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
if (id < 0) {
- if (id == -ENOSPC) {
- err = ERR_MINOR_EXISTS;
- drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
- }
+ if (id == -ENOSPC)
+ err = ERR_MINOR_OR_VOLUME_EXISTS;
goto out_no_minor_idr;
}
kref_get(&device->kref);
id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
if (id < 0) {
- if (id == -ENOSPC) {
- err = ERR_MINOR_EXISTS;
- drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
- }
+ if (id == -ENOSPC)
+ err = ERR_MINOR_OR_VOLUME_EXISTS;
goto out_idr_remove_minor;
}
kref_get(&device->kref);
@@ -2825,10 +2817,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
if (id < 0) {
- if (id == -ENOSPC) {
+ if (id == -ENOSPC)
err = ERR_INVALID_REQUEST;
- drbd_msg_put_info(adm_ctx->reply_skb, "requested volume exists already");
- }
goto out_idr_remove_from_resource;
}
kref_get(&connection->kref);
@@ -2836,7 +2826,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
if (init_submitter(device)) {
err = ERR_NOMEM;
- drbd_msg_put_info(adm_ctx->reply_skb, "unable to create submit workqueue");
goto out_idr_remove_vol;
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1cd47df44bda..74df8cfad414 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -92,7 +92,7 @@ static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
* reason it could fail was no space in skb, and there are 4k available. */
-int drbd_msg_put_info(struct sk_buff *skb, const char *info)
+static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
{
struct nlattr *nla;
int err = -EMSGSIZE;
@@ -588,7 +588,7 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for
val.i = 0; val.role = new_role;
while (try++ < max_tries) {
- rv = _drbd_request_state(device, mask, val, CS_WAIT_COMPLETE);
+ rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
/* in case we first succeeded to outdate,
* but now suddenly could establish a connection */
@@ -2052,7 +2052,7 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
rcu_read_unlock();
- /* connection->volumes protected by genl_lock() here */
+ /* connection->peer_devices protected by genl_lock() here */
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
struct drbd_device *device = peer_device->device;
if (!device->bitmap) {
@@ -3483,7 +3483,7 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
* that first_peer_device(device)->connection and device->vnr match the request. */
if (adm_ctx.device) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
- retcode = ERR_MINOR_EXISTS;
+ retcode = ERR_MINOR_OR_VOLUME_EXISTS;
/* else: still NO_ERROR */
goto out;
}
@@ -3530,6 +3530,27 @@ out:
return 0;
}
+static int adm_del_resource(struct drbd_resource *resource)
+{
+ struct drbd_connection *connection;
+
+ for_each_connection(connection, resource) {
+ if (connection->cstate > C_STANDALONE)
+ return ERR_NET_CONFIGURED;
+ }
+ if (!idr_is_empty(&resource->devices))
+ return ERR_RES_IN_USE;
+
+ list_del_rcu(&resource->resources);
+ /* Make sure all threads have actually stopped: state handling only
+ * does drbd_thread_stop_nowait(). */
+ list_for_each_entry(connection, &resource->connections, connections)
+ drbd_thread_stop(&connection->worker);
+ synchronize_rcu();
+ drbd_free_resource(resource);
+ return NO_ERROR;
+}
+
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
@@ -3575,14 +3596,6 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
}
}
- /* If we reach this, all volumes (of this connection) are Secondary,
- * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
- * actually stopped, state handling only does drbd_thread_stop_nowait(). */
- for_each_connection(connection, resource)
- drbd_thread_stop(&connection->worker);
-
- /* Now, nothing can fail anymore */
-
/* delete volumes */
idr_for_each_entry(&resource->devices, device, i) {
retcode = adm_del_minor(device);
@@ -3593,10 +3606,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
}
}
- list_del_rcu(&resource->resources);
- synchronize_rcu();
- drbd_free_resource(resource);
- retcode = NO_ERROR;
+ retcode = adm_del_resource(resource);
out:
mutex_unlock(&resource->adm_mutex);
finish:
@@ -3608,7 +3618,6 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
struct drbd_resource *resource;
- struct drbd_connection *connection;
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
@@ -3616,27 +3625,10 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
return retcode;
if (retcode != NO_ERROR)
goto finish;
-
resource = adm_ctx.resource;
- mutex_lock(&resource->adm_mutex);
- for_each_connection(connection, resource) {
- if (connection->cstate > C_STANDALONE) {
- retcode = ERR_NET_CONFIGURED;
- goto out;
- }
- }
- if (!idr_is_empty(&resource->devices)) {
- retcode = ERR_RES_IN_USE;
- goto out;
- }
- list_del_rcu(&resource->resources);
- for_each_connection(connection, resource)
- drbd_thread_stop(&connection->worker);
- synchronize_rcu();
- drbd_free_resource(resource);
- retcode = NO_ERROR;
-out:
+ mutex_lock(&resource->adm_mutex);
+ retcode = adm_del_resource(resource);
mutex_unlock(&resource->adm_mutex);
finish:
drbd_adm_finish(&adm_ctx, info, retcode);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6960fb064731..d169b4a79267 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2482,7 +2482,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
atomic_read(&device->rs_sect_ev);
if (atomic_read(&device->ap_actlog_cnt)
- || !device->rs_last_events || curr_events - device->rs_last_events > 64) {
+ || curr_events - device->rs_last_events > 64) {
unsigned long rs_left;
int i;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 5a01c53dddeb..34f2f0ba409b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -36,29 +36,15 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
{
- const int rw = bio_data_dir(req->master_bio);
- int cpu;
- cpu = part_stat_lock();
- part_round_stats(cpu, &device->vdisk->part0);
- part_stat_inc(cpu, &device->vdisk->part0, ios[rw]);
- part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9);
- (void) cpu; /* The macro invocations above want the cpu argument, I do not like
- the compiler warning about cpu only assigned but never used... */
- part_inc_in_flight(&device->vdisk->part0, rw);
- part_stat_unlock();
+ generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
+ &device->vdisk->part0);
}
/* Update disk stats when completing request upwards */
static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
{
- int rw = bio_data_dir(req->master_bio);
- unsigned long duration = jiffies - req->start_jif;
- int cpu;
- cpu = part_stat_lock();
- part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration);
- part_round_stats(cpu, &device->vdisk->part0);
- part_dec_in_flight(&device->vdisk->part0, rw);
- part_stat_unlock();
+ generic_end_io_acct(bio_data_dir(req->master_bio),
+ &device->vdisk->part0, req->start_jif);
}
static struct drbd_request *drbd_req_new(struct drbd_device *device,
@@ -1545,6 +1531,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
struct request_queue * const b =
device->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
+ bvm->bi_bdev = device->ldev->backing_bdev;
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}
@@ -1628,7 +1615,7 @@ void request_timer_fn(unsigned long data)
time_after(now, req_peer->pre_send_jif + ent) &&
!time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
- _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+ _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
}
if (dt && oldest_submit_jif != now &&
time_after(now, oldest_submit_jif + dt) &&
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 84b11f887d73..2d7dd269b6a8 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -215,6 +215,18 @@ static bool no_peer_wf_report_params(struct drbd_connection *connection)
return rv;
}
+static void wake_up_all_devices(struct drbd_connection *connection)
+{
+ struct drbd_peer_device *peer_device;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ wake_up(&peer_device->device->state_wait);
+ rcu_read_unlock();
+
+}
+
/**
* cl_wide_st_chg() - true if the state change is a cluster wide one
@@ -410,6 +422,22 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask,
return rv;
}
+enum drbd_state_rv
+_drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
+{
+ enum drbd_state_rv rv;
+
+ BUG_ON(f & CS_SERIALIZE);
+
+ wait_event_cmd(device->state_wait,
+ (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE,
+ mutex_unlock(device->state_mutex),
+ mutex_lock(device->state_mutex));
+
+ return rv;
+}
+
static void print_st(struct drbd_device *device, const char *name, union drbd_state ns)
{
drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
@@ -629,14 +657,11 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_c
if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
rv = SS_IN_TRANSIENT_STATE;
- /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
- rv = SS_IN_TRANSIENT_STATE; */
-
/* While establishing a connection only allow cstate to change.
- Delay/refuse role changes, detach attach etc... */
+ Delay/refuse role changes, detach attach etc... (they do not touch cstate) */
if (test_bit(STATE_SENT, &connection->flags) &&
- !(os.conn == C_WF_REPORT_PARAMS ||
- (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+ !((ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION) ||
+ (ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS)))
rv = SS_IN_TRANSIENT_STATE;
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
@@ -1032,8 +1057,10 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
/* Wake up role changes, that were delayed because of connection establishing */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
- no_peer_wf_report_params(connection))
+ no_peer_wf_report_params(connection)) {
clear_bit(STATE_SENT, &connection->flags);
+ wake_up_all_devices(connection);
+ }
wake_up(&device->misc_wait);
wake_up(&device->state_wait);
@@ -1072,7 +1099,6 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
set_ov_position(device, ns.conn);
device->rs_start = now;
- device->rs_last_events = 0;
device->rs_last_sect_ev = 0;
device->ov_last_oos_size = 0;
device->ov_last_oos_start = 0;
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index cc41605ba21c..7f53c40823cd 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -117,6 +117,11 @@ extern enum drbd_state_rv _drbd_request_state(struct drbd_device *,
union drbd_state,
union drbd_state,
enum chg_state_flags);
+
+extern enum drbd_state_rv
+_drbd_request_state_holding_state_mutex(struct drbd_device *, union drbd_state,
+ union drbd_state, enum chg_state_flags);
+
extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state,
enum chg_state_flags,
struct completion *done);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d2d1f97511bd..d0fae55d871d 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1592,11 +1592,15 @@ void drbd_resync_after_changed(struct drbd_device *device)
void drbd_rs_controller_reset(struct drbd_device *device)
{
+ struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
struct fifo_buffer *plan;
atomic_set(&device->rs_sect_in, 0);
atomic_set(&device->rs_sect_ev, 0);
device->rs_in_flight = 0;
+ device->rs_last_events =
+ (int)part_stat_read(&disk->part0, sectors[0]) +
+ (int)part_stat_read(&disk->part0, sectors[1]);
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
@@ -1743,7 +1747,6 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
device->rs_failed = 0;
device->rs_paused = 0;
device->rs_same_csum = 0;
- device->rs_last_events = 0;
device->rs_last_sect_ev = 0;
device->rs_total = tw;
device->rs_start = now;
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index e352cac707e8..145ce2aa2e78 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -1082,7 +1082,6 @@ static struct platform_driver mg_disk_driver = {
.remove = mg_remove,
.driver = {
.name = MG_DEV_NAME,
- .owner = THIS_MODULE,
.pm = &mg_pm,
}
};
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 1bd5f523f8fd..3bd7ca9853a8 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3775,9 +3775,10 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
return false;
}
-static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq,
- bool last)
+static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request *rq = bd->rq;
int ret;
if (unlikely(mtip_check_unal_depth(hctx, rq)))
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8001e812018b..ae9f615382f6 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -78,7 +78,33 @@ module_param(home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
static int queue_mode = NULL_Q_MQ;
-module_param(queue_mode, int, S_IRUGO);
+
+static int null_param_store_val(const char *str, int *val, int min, int max)
+{
+ int ret, new_val;
+
+ ret = kstrtoint(str, 10, &new_val);
+ if (ret)
+ return -EINVAL;
+
+ if (new_val < min || new_val > max)
+ return -EINVAL;
+
+ *val = new_val;
+ return 0;
+}
+
+static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
+{
+ return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
+}
+
+static struct kernel_param_ops null_queue_mode_param_ops = {
+ .set = null_set_queue_mode,
+ .get = param_get_int,
+};
+
+device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
static int gb = 250;
@@ -94,7 +120,19 @@ module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
static int irqmode = NULL_IRQ_SOFTIRQ;
-module_param(irqmode, int, S_IRUGO);
+
+static int null_set_irqmode(const char *str, const struct kernel_param *kp)
+{
+ return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
+ NULL_IRQ_TIMER);
+}
+
+static struct kernel_param_ops null_irqmode_param_ops = {
+ .set = null_set_irqmode,
+ .get = param_get_int,
+};
+
+device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
static int completion_nsec = 10000;
@@ -313,15 +351,15 @@ static void null_request_fn(struct request_queue *q)
}
}
-static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq,
- bool last)
+static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- cmd->rq = rq;
+ cmd->rq = bd->rq;
cmd->nq = hctx->driver_data;
- blk_mq_start_request(rq);
+ blk_mq_start_request(bd->rq);
null_handle_cmd(cmd);
return BLK_MQ_RQ_QUEUE_OK;
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index e2bb8afbeae5..b1d5d8797315 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -13,9 +13,9 @@
*/
#include <linux/nvme.h>
-#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
-#include <linux/percpu.h>
#include <linux/poison.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -42,12 +41,12 @@
#include <scsi/sg.h>
#include <asm-generic/io-64-nonatomic-lo-hi.h>
-#include <trace/events/block.h>
-
#define NVME_Q_DEPTH 1024
+#define NVME_AQ_DEPTH 64
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define ADMIN_TIMEOUT (admin_timeout * HZ)
+#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
#define IOD_TIMEOUT (retry_time * HZ)
static unsigned char admin_timeout = 60;
@@ -62,6 +61,10 @@ static unsigned char retry_time = 30;
module_param(retry_time, byte, 0644);
MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
+static unsigned char shutdown_timeout = 5;
+module_param(shutdown_timeout, byte, 0644);
+MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
+
static int nvme_major;
module_param(nvme_major, int, 0);
@@ -76,10 +79,12 @@ static wait_queue_head_t nvme_kthread_wait;
static struct notifier_block nvme_nb;
static void nvme_reset_failed_dev(struct work_struct *ws);
+static int nvme_process_cq(struct nvme_queue *nvmeq);
struct async_cmd_info {
struct kthread_work work;
struct kthread_worker *worker;
+ struct request *req;
u32 result;
int status;
void *ctx;
@@ -90,7 +95,7 @@ struct async_cmd_info {
* commands and one for I/O commands).
*/
struct nvme_queue {
- struct rcu_head r_head;
+ struct llist_node node;
struct device *q_dmadev;
struct nvme_dev *dev;
char irqname[24]; /* nvme4294967295-65535\0 */
@@ -99,10 +104,6 @@ struct nvme_queue {
volatile struct nvme_completion *cqes;
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
- wait_queue_head_t sq_full;
- wait_queue_t sq_cong_wait;
- struct bio_list sq_cong;
- struct list_head iod_bio;
u32 __iomem *q_db;
u16 q_depth;
u16 cq_vector;
@@ -112,10 +113,8 @@ struct nvme_queue {
u16 qid;
u8 cq_phase;
u8 cqe_seen;
- u8 q_suspended;
- cpumask_var_t cpu_mask;
struct async_cmd_info cmdinfo;
- unsigned long cmdid_data[];
+ struct blk_mq_hw_ctx *hctx;
};
/*
@@ -143,62 +142,79 @@ typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
struct nvme_cmd_info {
nvme_completion_fn fn;
void *ctx;
- unsigned long timeout;
int aborted;
+ struct nvme_queue *nvmeq;
};
-static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
{
- return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+ struct nvme_dev *dev = data;
+ struct nvme_queue *nvmeq = dev->queues[0];
+
+ WARN_ON(nvmeq->hctx);
+ nvmeq->hctx = hctx;
+ hctx->driver_data = nvmeq;
+ return 0;
}
-static unsigned nvme_queue_extra(int depth)
+static int nvme_admin_init_request(void *data, struct request *req,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
{
- return DIV_ROUND_UP(depth, 8) + (depth * sizeof(struct nvme_cmd_info));
+ struct nvme_dev *dev = data;
+ struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = dev->queues[0];
+
+ BUG_ON(!nvmeq);
+ cmd->nvmeq = nvmeq;
+ return 0;
}
-/**
- * alloc_cmdid() - Allocate a Command ID
- * @nvmeq: The queue that will be used for this command
- * @ctx: A pointer that will be passed to the handler
- * @handler: The function to call on completion
- *
- * Allocate a Command ID for a queue. The data passed in will
- * be passed to the completion handler. This is implemented by using
- * the bottom two bits of the ctx pointer to store the handler ID.
- * Passing in a pointer that's not 4-byte aligned will cause a BUG.
- * We can change this if it becomes a problem.
- *
- * May be called with local interrupts disabled and the q_lock held,
- * or with interrupts enabled and no locks held.
- */
-static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
- nvme_completion_fn handler, unsigned timeout)
+static void nvme_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
- int depth = nvmeq->q_depth - 1;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- int cmdid;
+ struct nvme_queue *nvmeq = hctx->driver_data;
- do {
- cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
- if (cmdid >= depth)
- return -EBUSY;
- } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
+ nvmeq->hctx = NULL;
+}
+
+static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_dev *dev = data;
+ struct nvme_queue *nvmeq = dev->queues[
+ (hctx_idx % dev->queue_count) + 1];
+
+ if (!nvmeq->hctx)
+ nvmeq->hctx = hctx;
- info[cmdid].fn = handler;
- info[cmdid].ctx = ctx;
- info[cmdid].timeout = jiffies + timeout;
- info[cmdid].aborted = 0;
- return cmdid;
+ /* nvmeq queues are shared between namespaces. We assume here that
+ * blk-mq map the tags so they match up with the nvme queue tags. */
+ WARN_ON(nvmeq->hctx->tags != hctx->tags);
+
+ hctx->driver_data = nvmeq;
+ return 0;
}
-static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
- nvme_completion_fn handler, unsigned timeout)
+static int nvme_init_request(void *data, struct request *req,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
{
- int cmdid;
- wait_event_killable(nvmeq->sq_full,
- (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
- return (cmdid < 0) ? -EINTR : cmdid;
+ struct nvme_dev *dev = data;
+ struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
+
+ BUG_ON(!nvmeq);
+ cmd->nvmeq = nvmeq;
+ return 0;
+}
+
+static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
+ nvme_completion_fn handler)
+{
+ cmd->fn = handler;
+ cmd->ctx = ctx;
+ cmd->aborted = 0;
}
/* Special values must be less than 0x1000 */
@@ -206,17 +222,12 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
-#define CMD_CTX_ABORT (0x318 + CMD_CTX_BASE)
static void special_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
if (ctx == CMD_CTX_CANCELLED)
return;
- if (ctx == CMD_CTX_ABORT) {
- ++nvmeq->dev->abort_limit;
- return;
- }
if (ctx == CMD_CTX_COMPLETED) {
dev_warn(nvmeq->q_dmadev,
"completed id %d twice on queue %d\n",
@@ -229,99 +240,89 @@ static void special_completion(struct nvme_queue *nvmeq, void *ctx,
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
-
dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
}
-static void async_completion(struct nvme_queue *nvmeq, void *ctx,
- struct nvme_completion *cqe)
-{
- struct async_cmd_info *cmdinfo = ctx;
- cmdinfo->result = le32_to_cpup(&cqe->result);
- cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
- queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
-}
-
-/*
- * Called with local interrupts disabled and the q_lock held. May not sleep.
- */
-static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
- nvme_completion_fn *fn)
+static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
{
void *ctx;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- if (cmdid >= nvmeq->q_depth || !info[cmdid].fn) {
- if (fn)
- *fn = special_completion;
- return CMD_CTX_INVALID;
- }
if (fn)
- *fn = info[cmdid].fn;
- ctx = info[cmdid].ctx;
- info[cmdid].fn = special_completion;
- info[cmdid].ctx = CMD_CTX_COMPLETED;
- clear_bit(cmdid, nvmeq->cmdid_data);
- wake_up(&nvmeq->sq_full);
+ *fn = cmd->fn;
+ ctx = cmd->ctx;
+ cmd->fn = special_completion;
+ cmd->ctx = CMD_CTX_CANCELLED;
return ctx;
}
-static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
- nvme_completion_fn *fn)
+static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
+ struct nvme_completion *cqe)
{
- void *ctx;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- if (fn)
- *fn = info[cmdid].fn;
- ctx = info[cmdid].ctx;
- info[cmdid].fn = special_completion;
- info[cmdid].ctx = CMD_CTX_CANCELLED;
- return ctx;
-}
+ struct request *req = ctx;
-static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
-{
- return rcu_dereference_raw(dev->queues[qid]);
+ u32 result = le32_to_cpup(&cqe->result);
+ u16 status = le16_to_cpup(&cqe->status) >> 1;
+
+ if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
+ ++nvmeq->dev->event_limit;
+ if (status == NVME_SC_SUCCESS)
+ dev_warn(nvmeq->q_dmadev,
+ "async event result %08x\n", result);
+
+ blk_mq_free_hctx_request(nvmeq->hctx, req);
}
-static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
+static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
+ struct nvme_completion *cqe)
{
- struct nvme_queue *nvmeq;
- unsigned queue_id = get_cpu_var(*dev->io_queue);
+ struct request *req = ctx;
- rcu_read_lock();
- nvmeq = rcu_dereference(dev->queues[queue_id]);
- if (nvmeq)
- return nvmeq;
+ u16 status = le16_to_cpup(&cqe->status) >> 1;
+ u32 result = le32_to_cpup(&cqe->result);
- rcu_read_unlock();
- put_cpu_var(*dev->io_queue);
- return NULL;
+ blk_mq_free_hctx_request(nvmeq->hctx, req);
+
+ dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
+ ++nvmeq->dev->abort_limit;
}
-static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
+static void async_completion(struct nvme_queue *nvmeq, void *ctx,
+ struct nvme_completion *cqe)
{
- rcu_read_unlock();
- put_cpu_var(nvmeq->dev->io_queue);
+ struct async_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+ blk_mq_free_hctx_request(nvmeq->hctx, cmdinfo->req);
}
-static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
- __acquires(RCU)
+static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
+ unsigned int tag)
{
- struct nvme_queue *nvmeq;
+ struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
+ struct request *req = blk_mq_tag_to_rq(hctx->tags, tag);
- rcu_read_lock();
- nvmeq = rcu_dereference(dev->queues[q_idx]);
- if (nvmeq)
- return nvmeq;
-
- rcu_read_unlock();
- return NULL;
+ return blk_mq_rq_to_pdu(req);
}
-static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
+ nvme_completion_fn *fn)
{
- rcu_read_unlock();
+ struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag);
+ void *ctx;
+ if (tag >= nvmeq->q_depth) {
+ *fn = special_completion;
+ return CMD_CTX_INVALID;
+ }
+ if (fn)
+ *fn = cmd->fn;
+ ctx = cmd->ctx;
+ cmd->fn = special_completion;
+ cmd->ctx = CMD_CTX_COMPLETED;
+ return ctx;
}
/**
@@ -331,26 +332,29 @@ static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
*
* Safe to use from interrupt context
*/
-static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
- unsigned long flags;
- u16 tail;
- spin_lock_irqsave(&nvmeq->q_lock, flags);
- if (nvmeq->q_suspended) {
- spin_unlock_irqrestore(&nvmeq->q_lock, flags);
- return -EBUSY;
- }
- tail = nvmeq->sq_tail;
+ u16 tail = nvmeq->sq_tail;
+
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
if (++tail == nvmeq->q_depth)
tail = 0;
writel(tail, nvmeq->q_db);
nvmeq->sq_tail = tail;
- spin_unlock_irqrestore(&nvmeq->q_lock, flags);
return 0;
}
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+ unsigned long flags;
+ int ret;
+ spin_lock_irqsave(&nvmeq->q_lock, flags);
+ ret = __nvme_submit_cmd(nvmeq, cmd);
+ spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+ return ret;
+}
+
static __le64 **iod_list(struct nvme_iod *iod)
{
return ((void *)iod) + iod->offset;
@@ -361,17 +365,17 @@ static __le64 **iod_list(struct nvme_iod *iod)
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
*/
-static int nvme_npages(unsigned size)
+static int nvme_npages(unsigned size, struct nvme_dev *dev)
{
- unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
- return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+ unsigned nprps = DIV_ROUND_UP(size + dev->page_size, dev->page_size);
+ return DIV_ROUND_UP(8 * nprps, dev->page_size - 8);
}
static struct nvme_iod *
-nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
+nvme_alloc_iod(unsigned nseg, unsigned nbytes, struct nvme_dev *dev, gfp_t gfp)
{
struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
- sizeof(__le64 *) * nvme_npages(nbytes) +
+ sizeof(__le64 *) * nvme_npages(nbytes, dev) +
sizeof(struct scatterlist) * nseg, gfp);
if (iod) {
@@ -380,7 +384,6 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
iod->length = nbytes;
iod->nents = 0;
iod->first_dma = 0ULL;
- iod->start_time = jiffies;
}
return iod;
@@ -388,7 +391,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
{
- const int last_prp = PAGE_SIZE / 8 - 1;
+ const int last_prp = dev->page_size / 8 - 1;
int i;
__le64 **list = iod_list(iod);
dma_addr_t prp_dma = iod->first_dma;
@@ -404,65 +407,49 @@ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
kfree(iod);
}
-static void nvme_start_io_acct(struct bio *bio)
-{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
- if (blk_queue_io_stat(disk->queue)) {
- const int rw = bio_data_dir(bio);
- int cpu = part_stat_lock();
- part_round_stats(cpu, &disk->part0);
- part_stat_inc(cpu, &disk->part0, ios[rw]);
- part_stat_add(cpu, &disk->part0, sectors[rw],
- bio_sectors(bio));
- part_inc_in_flight(&disk->part0, rw);
- part_stat_unlock();
- }
-}
-
-static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
+static int nvme_error_status(u16 status)
{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
- if (blk_queue_io_stat(disk->queue)) {
- const int rw = bio_data_dir(bio);
- unsigned long duration = jiffies - start_time;
- int cpu = part_stat_lock();
- part_stat_add(cpu, &disk->part0, ticks[rw], duration);
- part_round_stats(cpu, &disk->part0);
- part_dec_in_flight(&disk->part0, rw);
- part_stat_unlock();
+ switch (status & 0x7ff) {
+ case NVME_SC_SUCCESS:
+ return 0;
+ case NVME_SC_CAP_EXCEEDED:
+ return -ENOSPC;
+ default:
+ return -EIO;
}
}
-static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
+static void req_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct nvme_iod *iod = ctx;
- struct bio *bio = iod->private;
+ struct request *req = iod->private;
+ struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
+
u16 status = le16_to_cpup(&cqe->status) >> 1;
- int error = 0;
if (unlikely(status)) {
- if (!(status & NVME_SC_DNR ||
- bio->bi_rw & REQ_FAILFAST_MASK) &&
- (jiffies - iod->start_time) < IOD_TIMEOUT) {
- if (!waitqueue_active(&nvmeq->sq_full))
- add_wait_queue(&nvmeq->sq_full,
- &nvmeq->sq_cong_wait);
- list_add_tail(&iod->node, &nvmeq->iod_bio);
- wake_up(&nvmeq->sq_full);
+ if (!(status & NVME_SC_DNR || blk_noretry_request(req))
+ && (jiffies - req->start_time) < req->timeout) {
+ blk_mq_requeue_request(req);
+ blk_mq_kick_requeue_list(req->q);
return;
}
- error = -EIO;
- }
- if (iod->nents) {
- dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
- bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- nvme_end_io_acct(bio, iod->start_time);
- }
+ req->errors = nvme_error_status(status);
+ } else
+ req->errors = 0;
+
+ if (cmd_rq->aborted)
+ dev_warn(&nvmeq->dev->pci_dev->dev,
+ "completing aborted command with status:%04x\n",
+ status);
+
+ if (iod->nents)
+ dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
+ rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_free_iod(nvmeq->dev, iod);
- trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, error);
- bio_endio(bio, error);
+ blk_mq_complete_request(req);
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -479,26 +466,27 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
__le64 **list = iod_list(iod);
dma_addr_t prp_dma;
int nprps, i;
+ u32 page_size = dev->page_size;
- length -= (PAGE_SIZE - offset);
+ length -= (page_size - offset);
if (length <= 0)
return total_len;
- dma_len -= (PAGE_SIZE - offset);
+ dma_len -= (page_size - offset);
if (dma_len) {
- dma_addr += (PAGE_SIZE - offset);
+ dma_addr += (page_size - offset);
} else {
sg = sg_next(sg);
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
- if (length <= PAGE_SIZE) {
+ if (length <= page_size) {
iod->first_dma = dma_addr;
return total_len;
}
- nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+ nprps = DIV_ROUND_UP(length, page_size);
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
iod->npages = 0;
@@ -511,13 +499,13 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
if (!prp_list) {
iod->first_dma = dma_addr;
iod->npages = -1;
- return (total_len - length) + PAGE_SIZE;
+ return (total_len - length) + page_size;
}
list[0] = prp_list;
iod->first_dma = prp_dma;
i = 0;
for (;;) {
- if (i == PAGE_SIZE / 8) {
+ if (i == page_size >> 3) {
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
if (!prp_list)
@@ -528,9 +516,9 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
i = 1;
}
prp_list[i++] = cpu_to_le64(dma_addr);
- dma_len -= PAGE_SIZE;
- dma_addr += PAGE_SIZE;
- length -= PAGE_SIZE;
+ dma_len -= page_size;
+ dma_addr += page_size;
+ length -= page_size;
if (length <= 0)
break;
if (dma_len > 0)
@@ -544,88 +532,25 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
return total_len;
}
-static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
- int len)
-{
- struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
- if (!split)
- return -ENOMEM;
-
- trace_block_split(bdev_get_queue(bio->bi_bdev), bio,
- split->bi_iter.bi_sector);
- bio_chain(split, bio);
-
- if (!waitqueue_active(&nvmeq->sq_full))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, split);
- bio_list_add(&nvmeq->sq_cong, bio);
- wake_up(&nvmeq->sq_full);
-
- return 0;
-}
-
-/* NVMe scatterlists require no holes in the virtual address */
-#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
- (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
-
-static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
- struct bio *bio, enum dma_data_direction dma_dir, int psegs)
-{
- struct bio_vec bvec, bvprv;
- struct bvec_iter iter;
- struct scatterlist *sg = NULL;
- int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
- int first = 1;
-
- if (nvmeq->dev->stripe_size)
- split_len = nvmeq->dev->stripe_size -
- ((bio->bi_iter.bi_sector << 9) &
- (nvmeq->dev->stripe_size - 1));
-
- sg_init_table(iod->sg, psegs);
- bio_for_each_segment(bvec, bio, iter) {
- if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
- sg->length += bvec.bv_len;
- } else {
- if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
- return nvme_split_and_submit(bio, nvmeq,
- length);
-
- sg = sg ? sg + 1 : iod->sg;
- sg_set_page(sg, bvec.bv_page,
- bvec.bv_len, bvec.bv_offset);
- nsegs++;
- }
-
- if (split_len - length < bvec.bv_len)
- return nvme_split_and_submit(bio, nvmeq, split_len);
- length += bvec.bv_len;
- bvprv = bvec;
- first = 0;
- }
- iod->nents = nsegs;
- sg_mark_end(sg);
- if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
- return -ENOMEM;
-
- BUG_ON(length != bio->bi_iter.bi_size);
- return length;
-}
-
-static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
- struct bio *bio, struct nvme_iod *iod, int cmdid)
+/*
+ * We reuse the small pool to allocate the 16-byte range here as it is not
+ * worth having a special pool for these or additional cases to handle freeing
+ * the iod.
+ */
+static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct request *req, struct nvme_iod *iod)
{
struct nvme_dsm_range *range =
(struct nvme_dsm_range *)iod_list(iod)[0];
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
+ range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
- cmnd->dsm.command_id = cmdid;
+ cmnd->dsm.command_id = req->tag;
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
cmnd->dsm.nr = 0;
@@ -634,11 +559,9 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
-
- return 0;
}
-static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int cmdid)
{
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
@@ -651,49 +574,34 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
-
- return 0;
}
-static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
+static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
+ struct nvme_ns *ns)
{
- struct bio *bio = iod->private;
- struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+ struct request *req = iod->private;
struct nvme_command *cmnd;
- int cmdid;
- u16 control;
- u32 dsmgmt;
-
- cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
- if (unlikely(cmdid < 0))
- return cmdid;
+ u16 control = 0;
+ u32 dsmgmt = 0;
- if (bio->bi_rw & REQ_DISCARD)
- return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
- if (bio->bi_rw & REQ_FLUSH)
- return nvme_submit_flush(nvmeq, ns, cmdid);
-
- control = 0;
- if (bio->bi_rw & REQ_FUA)
+ if (req->cmd_flags & REQ_FUA)
control |= NVME_RW_FUA;
- if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+ if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
control |= NVME_RW_LR;
- dsmgmt = 0;
- if (bio->bi_rw & REQ_RAHEAD)
+ if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
memset(cmnd, 0, sizeof(*cmnd));
- cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
- cmnd->rw.command_id = cmdid;
+ cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+ cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
- cmnd->rw.length =
- cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
@@ -704,45 +612,26 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
return 0;
}
-static int nvme_split_flush_data(struct nvme_queue *nvmeq, struct bio *bio)
-{
- struct bio *split = bio_clone(bio, GFP_ATOMIC);
- if (!split)
- return -ENOMEM;
-
- split->bi_iter.bi_size = 0;
- split->bi_phys_segments = 0;
- bio->bi_rw &= ~REQ_FLUSH;
- bio_chain(split, bio);
-
- if (!waitqueue_active(&nvmeq->sq_full))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, split);
- bio_list_add(&nvmeq->sq_cong, bio);
- wake_up_process(nvme_thread);
-
- return 0;
-}
-
-/*
- * Called with local interrupts disabled and the q_lock held. May not sleep.
- */
-static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
- struct bio *bio)
+static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_queue *nvmeq = hctx->driver_data;
+ struct request *req = bd->rq;
+ struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_iod *iod;
- int psegs = bio_phys_segments(ns->queue, bio);
- int result;
+ int psegs = req->nr_phys_segments;
+ enum dma_data_direction dma_dir;
+ unsigned size = !(req->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(req) :
+ sizeof(struct nvme_dsm_range);
- if ((bio->bi_rw & REQ_FLUSH) && psegs)
- return nvme_split_flush_data(nvmeq, bio);
-
- iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
+ iod = nvme_alloc_iod(psegs, size, ns->dev, GFP_ATOMIC);
if (!iod)
- return -ENOMEM;
+ return BLK_MQ_RQ_QUEUE_BUSY;
- iod->private = bio;
- if (bio->bi_rw & REQ_DISCARD) {
+ iod->private = req;
+
+ if (req->cmd_flags & REQ_DISCARD) {
void *range;
/*
* We reuse the small pool to allocate the 16-byte range here
@@ -752,35 +641,50 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
GFP_ATOMIC,
&iod->first_dma);
- if (!range) {
- result = -ENOMEM;
- goto free_iod;
- }
+ if (!range)
+ goto retry_cmd;
iod_list(iod)[0] = (__le64 *)range;
iod->npages = 0;
} else if (psegs) {
- result = nvme_map_bio(nvmeq, iod, bio,
- bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
- psegs);
- if (result <= 0)
- goto free_iod;
- if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
- result) {
- result = -ENOMEM;
- goto free_iod;
+ dma_dir = rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ sg_init_table(iod->sg, psegs);
+ iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+ if (!iod->nents)
+ goto error_cmd;
+
+ if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
+ goto retry_cmd;
+
+ if (blk_rq_bytes(req) !=
+ nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
+ dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg,
+ iod->nents, dma_dir);
+ goto retry_cmd;
}
- nvme_start_io_acct(bio);
}
- if (unlikely(nvme_submit_iod(nvmeq, iod))) {
- if (!waitqueue_active(&nvmeq->sq_full))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- list_add_tail(&iod->node, &nvmeq->iod_bio);
- }
- return 0;
- free_iod:
+ blk_mq_start_request(req);
+
+ nvme_set_info(cmd, iod, req_completion);
+ spin_lock_irq(&nvmeq->q_lock);
+ if (req->cmd_flags & REQ_DISCARD)
+ nvme_submit_discard(nvmeq, ns, req, iod);
+ else if (req->cmd_flags & REQ_FLUSH)
+ nvme_submit_flush(nvmeq, ns, req->tag);
+ else
+ nvme_submit_iod(nvmeq, iod, ns);
+
+ nvme_process_cq(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ return BLK_MQ_RQ_QUEUE_OK;
+
+ error_cmd:
nvme_free_iod(nvmeq->dev, iod);
- return result;
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ retry_cmd:
+ nvme_free_iod(nvmeq->dev, iod);
+ return BLK_MQ_RQ_QUEUE_BUSY;
}
static int nvme_process_cq(struct nvme_queue *nvmeq)
@@ -801,8 +705,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
head = 0;
phase = !phase;
}
-
- ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
+ ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
fn(nvmeq, ctx, &cqe);
}
@@ -823,29 +726,13 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
return 1;
}
-static void nvme_make_request(struct request_queue *q, struct bio *bio)
+/* Admin queue isn't initialized as a request queue. If at some point this
+ * happens anyway, make sure to notify the user */
+static int nvme_admin_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct nvme_ns *ns = q->queuedata;
- struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
- int result = -EBUSY;
-
- if (!nvmeq) {
- bio_endio(bio, -EIO);
- return;
- }
-
- spin_lock_irq(&nvmeq->q_lock);
- if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
- result = nvme_submit_bio_queue(nvmeq, ns, bio);
- if (unlikely(result)) {
- if (!waitqueue_active(&nvmeq->sq_full))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- }
-
- nvme_process_cq(nvmeq);
- spin_unlock_irq(&nvmeq->q_lock);
- put_nvmeq(nvmeq);
+ WARN_ON_ONCE(1);
+ return BLK_MQ_RQ_QUEUE_ERROR;
}
static irqreturn_t nvme_irq(int irq, void *data)
@@ -869,10 +756,11 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
+ cmd_info)
{
spin_lock_irq(&nvmeq->q_lock);
- cancel_cmdid(nvmeq, cmdid, NULL);
+ cancel_cmd_info(cmd_info, NULL);
spin_unlock_irq(&nvmeq->q_lock);
}
@@ -895,47 +783,40 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
- struct nvme_command *cmd,
+static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
- int cmdid, ret;
+ int ret;
struct sync_cmd_info cmdinfo;
- struct nvme_queue *nvmeq;
-
- nvmeq = lock_nvmeq(dev, q_idx);
- if (!nvmeq)
- return -ENODEV;
+ struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = cmd_rq->nvmeq;
cmdinfo.task = current;
cmdinfo.status = -EINTR;
- cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
- if (cmdid < 0) {
- unlock_nvmeq(nvmeq);
- return cmdid;
- }
- cmd->common.command_id = cmdid;
+ cmd->common.command_id = req->tag;
+
+ nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
set_current_state(TASK_KILLABLE);
ret = nvme_submit_cmd(nvmeq, cmd);
if (ret) {
- free_cmdid(nvmeq, cmdid, NULL);
- unlock_nvmeq(nvmeq);
+ nvme_finish_cmd(nvmeq, req->tag, NULL);
set_current_state(TASK_RUNNING);
- return ret;
}
- unlock_nvmeq(nvmeq);
- schedule_timeout(timeout);
-
- if (cmdinfo.status == -EINTR) {
- nvmeq = lock_nvmeq(dev, q_idx);
- if (nvmeq) {
- nvme_abort_command(nvmeq, cmdid);
- unlock_nvmeq(nvmeq);
- }
+ ret = schedule_timeout(timeout);
+
+ /*
+ * Ensure that sync_completion has either run, or that it will
+ * never run.
+ */
+ nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
+
+ /*
+ * We never got the completion
+ */
+ if (cmdinfo.status == -EINTR)
return -EINTR;
- }
if (result)
*result = cmdinfo.result;
@@ -943,59 +824,99 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
return cmdinfo.status;
}
-static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+static int nvme_submit_async_admin_req(struct nvme_dev *dev)
+{
+ struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_command c;
+ struct nvme_cmd_info *cmd_info;
+ struct request *req;
+
+ req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ cmd_info = blk_mq_rq_to_pdu(req);
+ nvme_set_info(cmd_info, req, async_req_completion);
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_async_event;
+ c.common.command_id = req->tag;
+
+ return __nvme_submit_cmd(nvmeq, &c);
+}
+
+static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
struct nvme_command *cmd,
struct async_cmd_info *cmdinfo, unsigned timeout)
{
- int cmdid;
+ struct nvme_queue *nvmeq = dev->queues[0];
+ struct request *req;
+ struct nvme_cmd_info *cmd_rq;
+
+ req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_KERNEL, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
- cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
- if (cmdid < 0)
- return cmdid;
+ req->timeout = timeout;
+ cmd_rq = blk_mq_rq_to_pdu(req);
+ cmdinfo->req = req;
+ nvme_set_info(cmd_rq, cmdinfo, async_completion);
cmdinfo->status = -EINTR;
- cmd->common.command_id = cmdid;
+
+ cmd->common.command_id = req->tag;
+
return nvme_submit_cmd(nvmeq, cmd);
}
-int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
- u32 *result)
+static int __nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+ u32 *result, unsigned timeout)
{
- return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
+ int res;
+ struct request *req;
+
+ req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_KERNEL, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ res = nvme_submit_sync_cmd(req, cmd, result, timeout);
+ blk_mq_free_request(req);
+ return res;
}
-int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
- return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
- NVME_IO_TIMEOUT);
+ return __nvme_submit_admin_cmd(dev, cmd, result, ADMIN_TIMEOUT);
}
-static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
- struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
+ struct nvme_command *cmd, u32 *result)
{
- return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
- ADMIN_TIMEOUT);
+ int res;
+ struct request *req;
+
+ req = blk_mq_alloc_request(ns->queue, WRITE, (GFP_KERNEL|__GFP_WAIT),
+ false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ res = nvme_submit_sync_cmd(req, cmd, result, NVME_IO_TIMEOUT);
+ blk_mq_free_request(req);
+ return res;
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
- int status;
struct nvme_command c;
memset(&c, 0, sizeof(c));
c.delete_queue.opcode = opcode;
c.delete_queue.qid = cpu_to_le16(id);
- status = nvme_submit_admin_cmd(dev, &c, NULL);
- if (status)
- return -EIO;
- return 0;
+ return nvme_submit_admin_cmd(dev, &c, NULL);
}
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq)
{
- int status;
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
@@ -1007,16 +928,12 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cq_flags = cpu_to_le16(flags);
c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
- status = nvme_submit_admin_cmd(dev, &c, NULL);
- if (status)
- return -EIO;
- return 0;
+ return nvme_submit_admin_cmd(dev, &c, NULL);
}
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq)
{
- int status;
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
@@ -1028,10 +945,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
c.create_sq.sq_flags = cpu_to_le16(flags);
c.create_sq.cqid = cpu_to_le16(qid);
- status = nvme_submit_admin_cmd(dev, &c, NULL);
- if (status)
- return -EIO;
- return 0;
+ return nvme_submit_admin_cmd(dev, &c, NULL);
}
static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
@@ -1087,28 +1001,27 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
}
/**
- * nvme_abort_cmd - Attempt aborting a command
- * @cmdid: Command id of a timed out IO
- * @queue: The queue with timed out IO
+ * nvme_abort_req - Attempt aborting a request
*
* Schedule controller reset if the command was already aborted once before and
* still hasn't been returned to the driver, or if this is the admin queue.
*/
-static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+static void nvme_abort_req(struct request *req)
{
- int a_cmdid;
- struct nvme_command cmd;
+ struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = cmd_rq->nvmeq;
struct nvme_dev *dev = nvmeq->dev;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- struct nvme_queue *adminq;
+ struct request *abort_req;
+ struct nvme_cmd_info *abort_cmd;
+ struct nvme_command cmd;
- if (!nvmeq->qid || info[cmdid].aborted) {
+ if (!nvmeq->qid || cmd_rq->aborted) {
if (work_busy(&dev->reset_work))
return;
list_del_init(&dev->node);
dev_warn(&dev->pci_dev->dev,
- "I/O %d QID %d timeout, reset controller\n", cmdid,
- nvmeq->qid);
+ "I/O %d QID %d timeout, reset controller\n",
+ req->tag, nvmeq->qid);
dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
return;
@@ -1117,120 +1030,110 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
if (!dev->abort_limit)
return;
- adminq = rcu_dereference(dev->queues[0]);
- a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
- ADMIN_TIMEOUT);
- if (a_cmdid < 0)
+ abort_req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC,
+ false);
+ if (IS_ERR(abort_req))
return;
+ abort_cmd = blk_mq_rq_to_pdu(abort_req);
+ nvme_set_info(abort_cmd, abort_req, abort_completion);
+
memset(&cmd, 0, sizeof(cmd));
cmd.abort.opcode = nvme_admin_abort_cmd;
- cmd.abort.cid = cmdid;
+ cmd.abort.cid = req->tag;
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
- cmd.abort.command_id = a_cmdid;
+ cmd.abort.command_id = abort_req->tag;
--dev->abort_limit;
- info[cmdid].aborted = 1;
- info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+ cmd_rq->aborted = 1;
- dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+ dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
nvmeq->qid);
- nvme_submit_cmd(adminq, &cmd);
+ if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) {
+ dev_warn(nvmeq->q_dmadev,
+ "Could not abort I/O %d QID %d",
+ req->tag, nvmeq->qid);
+ blk_mq_free_request(abort_req);
+ }
}
-/**
- * nvme_cancel_ios - Cancel outstanding I/Os
- * @queue: The queue to cancel I/Os on
- * @timeout: True to only cancel I/Os which have timed out
- */
-static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
+static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
+ struct request *req, void *data, bool reserved)
{
- int depth = nvmeq->q_depth - 1;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- unsigned long now = jiffies;
- int cmdid;
+ struct nvme_queue *nvmeq = data;
+ void *ctx;
+ nvme_completion_fn fn;
+ struct nvme_cmd_info *cmd;
+ static struct nvme_completion cqe = {
+ .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
+ };
- for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
- void *ctx;
- nvme_completion_fn fn;
- static struct nvme_completion cqe = {
- .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
- };
+ cmd = blk_mq_rq_to_pdu(req);
- if (timeout && !time_after(now, info[cmdid].timeout))
- continue;
- if (info[cmdid].ctx == CMD_CTX_CANCELLED)
- continue;
- if (timeout && nvmeq->dev->initialized) {
- nvme_abort_cmd(cmdid, nvmeq);
- continue;
- }
- dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
- nvmeq->qid);
- ctx = cancel_cmdid(nvmeq, cmdid, &fn);
- fn(nvmeq, ctx, &cqe);
- }
+ if (cmd->ctx == CMD_CTX_CANCELLED)
+ return;
+
+ dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
+ req->tag, nvmeq->qid);
+ ctx = cancel_cmd_info(cmd, &fn);
+ fn(nvmeq, ctx, &cqe);
}
-static void nvme_free_queue(struct rcu_head *r)
+static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
{
- struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
+ struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = cmd->nvmeq;
- spin_lock_irq(&nvmeq->q_lock);
- while (bio_list_peek(&nvmeq->sq_cong)) {
- struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
- bio_endio(bio, -EIO);
- }
- while (!list_empty(&nvmeq->iod_bio)) {
- static struct nvme_completion cqe = {
- .status = cpu_to_le16(
- (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
- };
- struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
- struct nvme_iod,
- node);
- list_del(&iod->node);
- bio_completion(nvmeq, iod, &cqe);
- }
- spin_unlock_irq(&nvmeq->q_lock);
+ dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+ nvmeq->qid);
+ if (nvmeq->dev->initialized)
+ nvme_abort_req(req);
+
+ /*
+ * The aborted req will be completed on receiving the abort req.
+ * We enable the timer again. If hit twice, it'll cause a device reset,
+ * as the device then is in a faulty state.
+ */
+ return BLK_EH_RESET_TIMER;
+}
+static void nvme_free_queue(struct nvme_queue *nvmeq)
+{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- if (nvmeq->qid)
- free_cpumask_var(nvmeq->cpu_mask);
kfree(nvmeq);
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
+ LLIST_HEAD(q_list);
+ struct nvme_queue *nvmeq, *next;
+ struct llist_node *entry;
int i;
for (i = dev->queue_count - 1; i >= lowest; i--) {
- struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
- rcu_assign_pointer(dev->queues[i], NULL);
- call_rcu(&nvmeq->r_head, nvme_free_queue);
+ struct nvme_queue *nvmeq = dev->queues[i];
+ llist_add(&nvmeq->node, &q_list);
dev->queue_count--;
+ dev->queues[i] = NULL;
}
+ synchronize_rcu();
+ entry = llist_del_all(&q_list);
+ llist_for_each_entry_safe(nvmeq, next, entry, node)
+ nvme_free_queue(nvmeq);
}
/**
* nvme_suspend_queue - put queue into suspended state
* @nvmeq - queue to suspend
- *
- * Returns 1 if already suspended, 0 otherwise.
*/
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
- if (nvmeq->q_suspended) {
- spin_unlock_irq(&nvmeq->q_lock);
- return 1;
- }
- nvmeq->q_suspended = 1;
nvmeq->dev->online_queues--;
spin_unlock_irq(&nvmeq->q_lock);
@@ -1242,15 +1145,18 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
static void nvme_clear_queue(struct nvme_queue *nvmeq)
{
+ struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
+
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
- nvme_cancel_ios(nvmeq, false);
+ if (hctx && hctx->tags)
+ blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
}
static void nvme_disable_queue(struct nvme_dev *dev, int qid)
{
- struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
+ struct nvme_queue *nvmeq = dev->queues[qid];
if (!nvmeq)
return;
@@ -1270,25 +1176,20 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
struct device *dmadev = &dev->pci_dev->dev;
- unsigned extra = nvme_queue_extra(depth);
- struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
+ struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
if (!nvmeq)
return NULL;
- nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
- &nvmeq->cq_dma_addr, GFP_KERNEL);
+ nvmeq->cqes = dma_zalloc_coherent(dmadev, CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
if (!nvmeq->cqes)
goto free_nvmeq;
- memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
&nvmeq->sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
goto free_cqdma;
- if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
- goto free_sqdma;
-
nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev;
snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
@@ -1296,23 +1197,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
- init_waitqueue_head(&nvmeq->sq_full);
- init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
- bio_list_init(&nvmeq->sq_cong);
- INIT_LIST_HEAD(&nvmeq->iod_bio);
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
nvmeq->qid = qid;
- nvmeq->q_suspended = 1;
dev->queue_count++;
- rcu_assign_pointer(dev->queues[qid], nvmeq);
+ dev->queues[qid] = nvmeq;
return nvmeq;
- free_sqdma:
- dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
- nvmeq->sq_dma_addr);
free_cqdma:
dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
@@ -1335,17 +1228,15 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
{
struct nvme_dev *dev = nvmeq->dev;
- unsigned extra = nvme_queue_extra(nvmeq->q_depth);
+ spin_lock_irq(&nvmeq->q_lock);
nvmeq->sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
- memset(nvmeq->cmdid_data, 0, extra);
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
- nvme_cancel_ios(nvmeq, false);
- nvmeq->q_suspended = 0;
dev->online_queues++;
+ spin_unlock_irq(&nvmeq->q_lock);
}
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
@@ -1365,10 +1256,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_sq;
- spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, qid);
- spin_unlock_irq(&nvmeq->q_lock);
-
return result;
release_sq:
@@ -1408,27 +1296,32 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
*/
static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
{
- u32 cc = readl(&dev->bar->cc);
+ dev->ctrl_config &= ~NVME_CC_SHN_MASK;
+ dev->ctrl_config &= ~NVME_CC_ENABLE;
+ writel(dev->ctrl_config, &dev->bar->cc);
- if (cc & NVME_CC_ENABLE)
- writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
return nvme_wait_ready(dev, cap, false);
}
static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
{
+ dev->ctrl_config &= ~NVME_CC_SHN_MASK;
+ dev->ctrl_config |= NVME_CC_ENABLE;
+ writel(dev->ctrl_config, &dev->bar->cc);
+
return nvme_wait_ready(dev, cap, true);
}
static int nvme_shutdown_ctrl(struct nvme_dev *dev)
{
unsigned long timeout;
- u32 cc;
- cc = (readl(&dev->bar->cc) & ~NVME_CC_SHN_MASK) | NVME_CC_SHN_NORMAL;
- writel(cc, &dev->bar->cc);
+ dev->ctrl_config &= ~NVME_CC_SHN_MASK;
+ dev->ctrl_config |= NVME_CC_SHN_NORMAL;
- timeout = 2 * HZ + jiffies;
+ writel(dev->ctrl_config, &dev->bar->cc);
+
+ timeout = SHUTDOWN_TIMEOUT + jiffies;
while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) !=
NVME_CSTS_SHST_CMPLT) {
msleep(100);
@@ -1444,20 +1337,86 @@ static int nvme_shutdown_ctrl(struct nvme_dev *dev)
return 0;
}
+static struct blk_mq_ops nvme_mq_admin_ops = {
+ .queue_rq = nvme_admin_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_hctx = nvme_admin_init_hctx,
+ .exit_hctx = nvme_exit_hctx,
+ .init_request = nvme_admin_init_request,
+ .timeout = nvme_timeout,
+};
+
+static struct blk_mq_ops nvme_mq_ops = {
+ .queue_rq = nvme_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_hctx = nvme_init_hctx,
+ .exit_hctx = nvme_exit_hctx,
+ .init_request = nvme_init_request,
+ .timeout = nvme_timeout,
+};
+
+static int nvme_alloc_admin_tags(struct nvme_dev *dev)
+{
+ if (!dev->admin_q) {
+ dev->admin_tagset.ops = &nvme_mq_admin_ops;
+ dev->admin_tagset.nr_hw_queues = 1;
+ dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
+ dev->admin_tagset.timeout = ADMIN_TIMEOUT;
+ dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
+ dev->admin_tagset.cmd_size = sizeof(struct nvme_cmd_info);
+ dev->admin_tagset.driver_data = dev;
+
+ if (blk_mq_alloc_tag_set(&dev->admin_tagset))
+ return -ENOMEM;
+
+ dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
+ if (!dev->admin_q) {
+ blk_mq_free_tag_set(&dev->admin_tagset);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void nvme_free_admin_tags(struct nvme_dev *dev)
+{
+ if (dev->admin_q)
+ blk_mq_free_tag_set(&dev->admin_tagset);
+}
+
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
u32 aqa;
u64 cap = readq(&dev->bar->cap);
struct nvme_queue *nvmeq;
+ unsigned page_shift = PAGE_SHIFT;
+ unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
+ unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
+
+ if (page_shift < dev_page_min) {
+ dev_err(&dev->pci_dev->dev,
+ "Minimum device page size (%u) too large for "
+ "host (%u)\n", 1 << dev_page_min,
+ 1 << page_shift);
+ return -ENODEV;
+ }
+ if (page_shift > dev_page_max) {
+ dev_info(&dev->pci_dev->dev,
+ "Device maximum page size (%u) smaller than "
+ "host (%u); enabling work-around\n",
+ 1 << dev_page_max, 1 << page_shift);
+ page_shift = dev_page_max;
+ }
result = nvme_disable_ctrl(dev, cap);
if (result < 0)
return result;
- nvmeq = raw_nvmeq(dev, 0);
+ nvmeq = dev->queues[0];
if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
if (!nvmeq)
return -ENOMEM;
}
@@ -1465,27 +1424,35 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
- dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
- dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+ dev->page_size = 1 << page_shift;
+
+ dev->ctrl_config = NVME_CC_CSS_NVM;
+ dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
writel(aqa, &dev->bar->aqa);
writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
- writel(dev->ctrl_config, &dev->bar->cc);
result = nvme_enable_ctrl(dev, cap);
if (result)
- return result;
+ goto free_nvmeq;
+
+ result = nvme_alloc_admin_tags(dev);
+ if (result)
+ goto free_nvmeq;
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
- return result;
+ goto free_tags;
- spin_lock_irq(&nvmeq->q_lock);
- nvme_init_queue(nvmeq, 0);
- spin_unlock_irq(&nvmeq->q_lock);
+ return result;
+
+ free_tags:
+ nvme_free_admin_tags(dev);
+ free_nvmeq:
+ nvme_free_queues(dev, 0);
return result;
}
@@ -1516,7 +1483,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
}
err = -ENOMEM;
- iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+ iod = nvme_alloc_iod(count, length, dev, GFP_KERNEL);
if (!iod)
goto put_pages;
@@ -1644,7 +1611,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
if (length != (io.nblocks + 1) << ns->lba_shift)
status = -ENOMEM;
else
- status = nvme_submit_io_cmd(dev, &c, NULL);
+ status = nvme_submit_io_cmd(dev, ns, &c, NULL);
if (meta_len) {
if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
@@ -1676,10 +1643,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return status;
}
-static int nvme_user_admin_cmd(struct nvme_dev *dev,
- struct nvme_admin_cmd __user *ucmd)
+static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
+ struct nvme_passthru_cmd __user *ucmd)
{
- struct nvme_admin_cmd cmd;
+ struct nvme_passthru_cmd cmd;
struct nvme_command c;
int status, length;
struct nvme_iod *uninitialized_var(iod);
@@ -1716,10 +1683,23 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
ADMIN_TIMEOUT;
+
if (length != cmd.data_len)
status = -ENOMEM;
- else
- status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
+ else if (ns) {
+ struct request *req;
+
+ req = blk_mq_alloc_request(ns->queue, WRITE,
+ (GFP_KERNEL|__GFP_WAIT), false);
+ if (IS_ERR(req))
+ status = PTR_ERR(req);
+ else {
+ status = nvme_submit_sync_cmd(req, &c, &cmd.result,
+ timeout);
+ blk_mq_free_request(req);
+ }
+ } else
+ status = __nvme_submit_admin_cmd(dev, &c, &cmd.result, timeout);
if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1743,7 +1723,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
force_successful_syscall_return();
return ns->ns_id;
case NVME_IOCTL_ADMIN_CMD:
- return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
+ return nvme_user_cmd(ns->dev, NULL, (void __user *)arg);
+ case NVME_IOCTL_IO_CMD:
+ return nvme_user_cmd(ns->dev, ns, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg);
case SG_GET_VERSION_NUM:
@@ -1759,11 +1741,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct nvme_ns *ns = bdev->bd_disk->private_data;
-
switch (cmd) {
case SG_IO:
- return nvme_sg_io32(ns, arg);
+ return -ENOIOCTLCMD;
}
return nvme_ioctl(bdev, mode, cmd, arg);
}
@@ -1773,11 +1753,18 @@ static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
- struct nvme_ns *ns = bdev->bd_disk->private_data;
- struct nvme_dev *dev = ns->dev;
+ int ret = 0;
+ struct nvme_ns *ns;
- kref_get(&dev->kref);
- return 0;
+ spin_lock(&dev_list_lock);
+ ns = bdev->bd_disk->private_data;
+ if (!ns)
+ ret = -ENXIO;
+ else if (!kref_get_unless_zero(&ns->dev->kref))
+ ret = -ENXIO;
+ spin_unlock(&dev_list_lock);
+
+ return ret;
}
static void nvme_free_dev(struct kref *kref);
@@ -1799,6 +1786,35 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
return 0;
}
+static int nvme_revalidate_disk(struct gendisk *disk)
+{
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_id_ns *id;
+ dma_addr_t dma_addr;
+ int lbaf;
+
+ id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
+ GFP_KERNEL);
+ if (!id) {
+ dev_warn(&dev->pci_dev->dev, "%s: Memory alocation failure\n",
+ __func__);
+ return 0;
+ }
+
+ if (nvme_identify(dev, ns->ns_id, 0, dma_addr))
+ goto free;
+
+ lbaf = id->flbas & 0xf;
+ ns->lba_shift = id->lbaf[lbaf].ds;
+
+ blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
+ set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+ free:
+ dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
+ return 0;
+}
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
@@ -1806,43 +1822,9 @@ static const struct block_device_operations nvme_fops = {
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
+ .revalidate_disk= nvme_revalidate_disk,
};
-static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
-{
- struct nvme_iod *iod, *next;
-
- list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
- if (unlikely(nvme_submit_iod(nvmeq, iod)))
- break;
- list_del(&iod->node);
- if (bio_list_empty(&nvmeq->sq_cong) &&
- list_empty(&nvmeq->iod_bio))
- remove_wait_queue(&nvmeq->sq_full,
- &nvmeq->sq_cong_wait);
- }
-}
-
-static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
-{
- while (bio_list_peek(&nvmeq->sq_cong)) {
- struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
- struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
-
- if (bio_list_empty(&nvmeq->sq_cong) &&
- list_empty(&nvmeq->iod_bio))
- remove_wait_queue(&nvmeq->sq_full,
- &nvmeq->sq_cong_wait);
- if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
- if (!waitqueue_active(&nvmeq->sq_full))
- add_wait_queue(&nvmeq->sq_full,
- &nvmeq->sq_cong_wait);
- bio_list_add_head(&nvmeq->sq_cong, bio);
- break;
- }
- }
-}
-
static int nvme_kthread(void *data)
{
struct nvme_dev *dev, *next;
@@ -1858,28 +1840,26 @@ static int nvme_kthread(void *data)
continue;
list_del_init(&dev->node);
dev_warn(&dev->pci_dev->dev,
- "Failed status, reset controller\n");
+ "Failed status: %x, reset controller\n",
+ readl(&dev->bar->csts));
dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
continue;
}
- rcu_read_lock();
for (i = 0; i < dev->queue_count; i++) {
- struct nvme_queue *nvmeq =
- rcu_dereference(dev->queues[i]);
+ struct nvme_queue *nvmeq = dev->queues[i];
if (!nvmeq)
continue;
spin_lock_irq(&nvmeq->q_lock);
- if (nvmeq->q_suspended)
- goto unlock;
nvme_process_cq(nvmeq);
- nvme_cancel_ios(nvmeq, true);
- nvme_resubmit_bios(nvmeq);
- nvme_resubmit_iods(nvmeq);
- unlock:
+
+ while ((i == 0) && (dev->event_limit > 0)) {
+ if (nvme_submit_async_admin_req(dev))
+ break;
+ dev->event_limit--;
+ }
spin_unlock_irq(&nvmeq->q_lock);
}
- rcu_read_unlock();
}
spin_unlock(&dev_list_lock);
schedule_timeout(round_jiffies_relative(HZ));
@@ -1902,28 +1882,28 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
{
struct nvme_ns *ns;
struct gendisk *disk;
+ int node = dev_to_node(&dev->pci_dev->dev);
int lbaf;
if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
return NULL;
- ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return NULL;
- ns->queue = blk_alloc_queue(GFP_KERNEL);
- if (!ns->queue)
+ ns->queue = blk_mq_init_queue(&dev->tagset);
+ if (IS_ERR(ns->queue))
goto out_free_ns;
- ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, ns->queue);
- blk_queue_make_request(ns->queue, nvme_make_request);
+ queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue);
ns->dev = dev;
ns->queue->queuedata = ns;
- disk = alloc_disk(0);
+ disk = alloc_disk_node(0, node);
if (!disk)
goto out_free_queue;
+
ns->ns_id = nsid;
ns->disk = disk;
lbaf = id->flbas & 0xf;
@@ -1932,6 +1912,8 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
if (dev->max_hw_sectors)
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
+ if (dev->stripe_size)
+ blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
if (dev->vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
@@ -1957,143 +1939,19 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL;
}
-static int nvme_find_closest_node(int node)
-{
- int n, val, min_val = INT_MAX, best_node = node;
-
- for_each_online_node(n) {
- if (n == node)
- continue;
- val = node_distance(node, n);
- if (val < min_val) {
- min_val = val;
- best_node = n;
- }
- }
- return best_node;
-}
-
-static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
- int count)
-{
- int cpu;
- for_each_cpu(cpu, qmask) {
- if (cpumask_weight(nvmeq->cpu_mask) >= count)
- break;
- if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
- *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
- }
-}
-
-static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
- const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
-{
- int next_cpu;
- for_each_cpu(next_cpu, new_mask) {
- cpumask_or(mask, mask, get_cpu_mask(next_cpu));
- cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
- cpumask_and(mask, mask, unassigned_cpus);
- nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
- }
-}
-
static void nvme_create_io_queues(struct nvme_dev *dev)
{
- unsigned i, max;
+ unsigned i;
- max = min(dev->max_qid, num_online_cpus());
- for (i = dev->queue_count; i <= max; i++)
+ for (i = dev->queue_count; i <= dev->max_qid; i++)
if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
break;
- max = min(dev->queue_count - 1, num_online_cpus());
- for (i = dev->online_queues; i <= max; i++)
- if (nvme_create_queue(raw_nvmeq(dev, i), i))
+ for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
+ if (nvme_create_queue(dev->queues[i], i))
break;
}
-/*
- * If there are fewer queues than online cpus, this will try to optimally
- * assign a queue to multiple cpus by grouping cpus that are "close" together:
- * thread siblings, core, socket, closest node, then whatever else is
- * available.
- */
-static void nvme_assign_io_queues(struct nvme_dev *dev)
-{
- unsigned cpu, cpus_per_queue, queues, remainder, i;
- cpumask_var_t unassigned_cpus;
-
- nvme_create_io_queues(dev);
-
- queues = min(dev->online_queues - 1, num_online_cpus());
- if (!queues)
- return;
-
- cpus_per_queue = num_online_cpus() / queues;
- remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
-
- if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
- return;
-
- cpumask_copy(unassigned_cpus, cpu_online_mask);
- cpu = cpumask_first(unassigned_cpus);
- for (i = 1; i <= queues; i++) {
- struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
- cpumask_t mask;
-
- cpumask_clear(nvmeq->cpu_mask);
- if (!cpumask_weight(unassigned_cpus)) {
- unlock_nvmeq(nvmeq);
- break;
- }
-
- mask = *get_cpu_mask(cpu);
- nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
- if (cpus_weight(mask) < cpus_per_queue)
- nvme_add_cpus(&mask, unassigned_cpus,
- topology_thread_cpumask(cpu),
- nvmeq, cpus_per_queue);
- if (cpus_weight(mask) < cpus_per_queue)
- nvme_add_cpus(&mask, unassigned_cpus,
- topology_core_cpumask(cpu),
- nvmeq, cpus_per_queue);
- if (cpus_weight(mask) < cpus_per_queue)
- nvme_add_cpus(&mask, unassigned_cpus,
- cpumask_of_node(cpu_to_node(cpu)),
- nvmeq, cpus_per_queue);
- if (cpus_weight(mask) < cpus_per_queue)
- nvme_add_cpus(&mask, unassigned_cpus,
- cpumask_of_node(
- nvme_find_closest_node(
- cpu_to_node(cpu))),
- nvmeq, cpus_per_queue);
- if (cpus_weight(mask) < cpus_per_queue)
- nvme_add_cpus(&mask, unassigned_cpus,
- unassigned_cpus,
- nvmeq, cpus_per_queue);
-
- WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
- "nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
- dev->instance, i);
-
- irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
- nvmeq->cpu_mask);
- cpumask_andnot(unassigned_cpus, unassigned_cpus,
- nvmeq->cpu_mask);
- cpu = cpumask_next(cpu, unassigned_cpus);
- if (remainder && !--remainder)
- cpus_per_queue++;
- unlock_nvmeq(nvmeq);
- }
- WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
- dev->instance);
- i = 0;
- cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
- for_each_cpu(cpu, unassigned_cpus)
- *per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
- free_cpumask_var(unassigned_cpus);
-}
-
static int set_queue_count(struct nvme_dev *dev, int count)
{
int status;
@@ -2107,7 +1965,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
if (status > 0) {
dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n",
status);
- return -EBUSY;
+ return 0;
}
return min(result & 0xffff, result >> 16) + 1;
}
@@ -2117,39 +1975,15 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
-static void nvme_cpu_workfn(struct work_struct *work)
-{
- struct nvme_dev *dev = container_of(work, struct nvme_dev, cpu_work);
- if (dev->initialized)
- nvme_assign_io_queues(dev);
-}
-
-static int nvme_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- struct nvme_dev *dev;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_DEAD:
- spin_lock(&dev_list_lock);
- list_for_each_entry(dev, &dev_list, node)
- schedule_work(&dev->cpu_work);
- spin_unlock(&dev_list_lock);
- break;
- }
- return NOTIFY_OK;
-}
-
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- struct nvme_queue *adminq = raw_nvmeq(dev, 0);
+ struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = dev->pci_dev;
int result, i, vecs, nr_io_queues, size;
nr_io_queues = num_possible_cpus();
result = set_queue_count(dev, nr_io_queues);
- if (result < 0)
+ if (result <= 0)
return result;
if (result < nr_io_queues)
nr_io_queues = result;
@@ -2172,6 +2006,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, adminq);
+ /*
+ * If we enable msix early due to not intx, disable it again before
+ * setting up the full range we need.
+ */
+ if (!pdev->irq)
+ pci_disable_msix(pdev);
+
for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i;
vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
@@ -2195,14 +2036,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->max_qid = nr_io_queues;
result = queue_request_irq(dev, adminq, adminq->irqname);
- if (result) {
- adminq->q_suspended = 1;
+ if (result)
goto free_queues;
- }
/* Free previously allocated queues that are no longer usable */
nvme_free_queues(dev, nr_io_queues + 1);
- nvme_assign_io_queues(dev);
+ nvme_create_io_queues(dev);
return 0;
@@ -2245,14 +2084,37 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->oncs = le16_to_cpup(&ctrl->oncs);
dev->abort_limit = ctrl->acl + 1;
dev->vwc = ctrl->vwc;
+ dev->event_limit = min(ctrl->aerl + 1, 8);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
- (pdev->device == 0x0953) && ctrl->vs[3])
+ (pdev->device == 0x0953) && ctrl->vs[3]) {
+ unsigned int max_hw_sectors;
+
dev->stripe_size = 1 << (ctrl->vs[3] + shift);
+ max_hw_sectors = dev->stripe_size >> (shift - 9);
+ if (dev->max_hw_sectors) {
+ dev->max_hw_sectors = min(max_hw_sectors,
+ dev->max_hw_sectors);
+ } else
+ dev->max_hw_sectors = max_hw_sectors;
+ }
+
+ dev->tagset.ops = &nvme_mq_ops;
+ dev->tagset.nr_hw_queues = dev->online_queues - 1;
+ dev->tagset.timeout = NVME_IO_TIMEOUT;
+ dev->tagset.numa_node = dev_to_node(&dev->pci_dev->dev);
+ dev->tagset.queue_depth =
+ min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+ dev->tagset.cmd_size = sizeof(struct nvme_cmd_info);
+ dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
+ dev->tagset.driver_data = dev;
+
+ if (blk_mq_alloc_tag_set(&dev->tagset))
+ goto out;
id_ns = mem;
for (i = 1; i <= nn; i++) {
@@ -2293,6 +2155,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (!bars)
+ goto disable_pci;
+
if (pci_request_selected_regions(pdev, bars, "nvme"))
goto disable_pci;
@@ -2303,10 +2168,22 @@ static int nvme_dev_map(struct nvme_dev *dev)
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar)
goto disable;
+
if (readl(&dev->bar->csts) == -1) {
result = -ENODEV;
goto unmap;
}
+
+ /*
+ * Some devices don't advertse INTx interrupts, pre-enable a single
+ * MSIX vec for setup. We'll adjust this later.
+ */
+ if (!pdev->irq) {
+ result = pci_enable_msix(pdev, dev->entry, 1);
+ if (result < 0)
+ goto unmap;
+ }
+
cap = readq(&dev->bar->cap);
dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
@@ -2402,7 +2279,8 @@ static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
init_kthread_work(&nvmeq->cmdinfo.work, fn);
- return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+ return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo,
+ ADMIN_TIMEOUT);
}
static void nvme_del_cq_work_handler(struct kthread_work *work)
@@ -2465,7 +2343,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
atomic_set(&dq.refcount, 0);
dq.worker = &worker;
for (i = dev->queue_count - 1; i > 0; i--) {
- struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
+ struct nvme_queue *nvmeq = dev->queues[i];
if (nvme_suspend_queue(nvmeq))
continue;
@@ -2501,13 +2379,16 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
+ u32 csts = -1;
dev->initialized = 0;
nvme_dev_list_remove(dev);
- if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+ if (dev->bar)
+ csts = readl(&dev->bar->csts);
+ if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
for (i = dev->queue_count - 1; i >= 0; i--) {
- struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
+ struct nvme_queue *nvmeq = dev->queues[i];
nvme_suspend_queue(nvmeq);
nvme_clear_queue(nvmeq);
}
@@ -2519,6 +2400,12 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_unmap(dev);
}
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+ if (dev->admin_q && !blk_queue_dying(dev->admin_q))
+ blk_cleanup_queue(dev->admin_q);
+}
+
static void nvme_dev_remove(struct nvme_dev *dev)
{
struct nvme_ns *ns;
@@ -2590,6 +2477,11 @@ static void nvme_free_namespaces(struct nvme_dev *dev)
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
list_del(&ns->list);
+
+ spin_lock(&dev_list_lock);
+ ns->disk->private_data = NULL;
+ spin_unlock(&dev_list_lock);
+
put_disk(ns->disk);
kfree(ns);
}
@@ -2599,8 +2491,10 @@ static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
+ pci_dev_put(dev->pci_dev);
nvme_free_namespaces(dev);
- free_percpu(dev->io_queue);
+ nvme_release_instance(dev);
+ blk_mq_free_tag_set(&dev->tagset);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2625,9 +2519,16 @@ static int nvme_dev_release(struct inode *inode, struct file *f)
static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
struct nvme_dev *dev = f->private_data;
+ struct nvme_ns *ns;
+
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
- return nvme_user_admin_cmd(dev, (void __user *)arg);
+ return nvme_user_cmd(dev, NULL, (void __user *)arg);
+ case NVME_IOCTL_IO_CMD:
+ if (list_empty(&dev->namespaces))
+ return -ENOTTY;
+ ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
+ return nvme_user_cmd(dev, ns, (void __user *)arg);
default:
return -ENOTTY;
}
@@ -2641,6 +2542,22 @@ static const struct file_operations nvme_dev_fops = {
.compat_ioctl = nvme_dev_ioctl,
};
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+ struct nvme_queue *nvmeq;
+ int i;
+
+ for (i = 0; i < dev->online_queues; i++) {
+ nvmeq = dev->queues[i];
+
+ if (!nvmeq->hctx)
+ continue;
+
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+ nvmeq->hctx->cpumask);
+ }
+}
+
static int nvme_dev_start(struct nvme_dev *dev)
{
int result;
@@ -2664,7 +2581,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
if (start_thread) {
nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
- wake_up(&nvme_kthread_wait);
+ wake_up_all(&nvme_kthread_wait);
} else
wait_event_killable(nvme_kthread_wait, nvme_thread);
@@ -2673,10 +2590,14 @@ static int nvme_dev_start(struct nvme_dev *dev)
goto disable;
}
+ nvme_init_queue(dev->queues[0], 0);
+
result = nvme_setup_io_queues(dev);
- if (result && result != -EBUSY)
+ if (result)
goto disable;
+ nvme_set_irq_hints(dev);
+
return result;
disable:
@@ -2693,7 +2614,7 @@ static int nvme_remove_dead_ctrl(void *arg)
struct pci_dev *pdev = dev->pci_dev;
if (pci_get_drvdata(pdev))
- pci_stop_and_remove_bus_device(pdev);
+ pci_stop_and_remove_bus_device_locked(pdev);
kref_put(&dev->kref, nvme_free_dev);
return 0;
}
@@ -2702,8 +2623,8 @@ static void nvme_remove_disks(struct work_struct *ws)
{
struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
- nvme_dev_remove(dev);
nvme_free_queues(dev, 1);
+ nvme_dev_remove(dev);
}
static int nvme_dev_resume(struct nvme_dev *dev)
@@ -2711,9 +2632,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
int ret;
ret = nvme_dev_start(dev);
- if (ret && ret != -EBUSY)
+ if (ret)
return ret;
- if (ret == -EBUSY) {
+ if (dev->online_queues < 2) {
spin_lock(&dev_list_lock);
dev->reset_workfn = nvme_remove_disks;
queue_work(nvme_workq, &dev->reset_work);
@@ -2727,7 +2648,7 @@ static void nvme_dev_reset(struct nvme_dev *dev)
{
nvme_dev_shutdown(dev);
if (nvme_dev_resume(dev)) {
- dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+ dev_warn(&dev->pci_dev->dev, "Device failed to resume\n");
kref_get(&dev->kref);
if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
dev->instance))) {
@@ -2752,33 +2673,33 @@ static void nvme_reset_workfn(struct work_struct *work)
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int result = -ENOMEM;
+ int node, result = -ENOMEM;
struct nvme_dev *dev;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ node = dev_to_node(&pdev->dev);
+ if (node == NUMA_NO_NODE)
+ set_dev_node(&pdev->dev, 0);
+
+ dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
- dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
- GFP_KERNEL);
+ dev->entry = kzalloc_node(num_possible_cpus() * sizeof(*dev->entry),
+ GFP_KERNEL, node);
if (!dev->entry)
goto free;
- dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
- GFP_KERNEL);
+ dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
+ GFP_KERNEL, node);
if (!dev->queues)
goto free;
- dev->io_queue = alloc_percpu(unsigned short);
- if (!dev->io_queue)
- goto free;
INIT_LIST_HEAD(&dev->namespaces);
dev->reset_workfn = nvme_reset_failed_dev;
INIT_WORK(&dev->reset_work, nvme_reset_workfn);
- INIT_WORK(&dev->cpu_work, nvme_cpu_workfn);
- dev->pci_dev = pdev;
+ dev->pci_dev = pci_dev_get(pdev);
pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
if (result)
- goto free;
+ goto put_pci;
result = nvme_setup_prp_pools(dev);
if (result)
@@ -2786,17 +2707,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
kref_init(&dev->kref);
result = nvme_dev_start(dev);
- if (result) {
- if (result == -EBUSY)
- goto create_cdev;
+ if (result)
goto release_pools;
- }
- result = nvme_dev_add(dev);
+ if (dev->online_queues > 1)
+ result = nvme_dev_add(dev);
if (result)
goto shutdown;
- create_cdev:
scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
dev->miscdev.minor = MISC_DYNAMIC_MINOR;
dev->miscdev.parent = &pdev->dev;
@@ -2806,11 +2724,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto remove;
+ nvme_set_irq_hints(dev);
+
dev->initialized = 1;
return 0;
remove:
nvme_dev_remove(dev);
+ nvme_dev_remove_admin(dev);
nvme_free_namespaces(dev);
shutdown:
nvme_dev_shutdown(dev);
@@ -2819,8 +2740,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_release_prp_pools(dev);
release:
nvme_release_instance(dev);
+ put_pci:
+ pci_dev_put(dev->pci_dev);
free:
- free_percpu(dev->io_queue);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2829,12 +2751,12 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
{
- struct nvme_dev *dev = pci_get_drvdata(pdev);
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
- if (prepare)
- nvme_dev_shutdown(dev);
- else
- nvme_dev_resume(dev);
+ if (prepare)
+ nvme_dev_shutdown(dev);
+ else
+ nvme_dev_resume(dev);
}
static void nvme_shutdown(struct pci_dev *pdev)
@@ -2853,13 +2775,12 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
flush_work(&dev->reset_work);
- flush_work(&dev->cpu_work);
misc_deregister(&dev->miscdev);
nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
+ nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- rcu_barrier();
- nvme_release_instance(dev);
+ nvme_free_admin_tags(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
@@ -2942,18 +2863,11 @@ static int __init nvme_init(void)
else if (result > 0)
nvme_major = result;
- nvme_nb.notifier_call = &nvme_cpu_notify;
- result = register_hotcpu_notifier(&nvme_nb);
- if (result)
- goto unregister_blkdev;
-
result = pci_register_driver(&nvme_driver);
if (result)
- goto unregister_hotcpu;
+ goto unregister_blkdev;
return 0;
- unregister_hotcpu:
- unregister_hotcpu_notifier(&nvme_nb);
unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
kill_workq:
@@ -2973,6 +2887,6 @@ static void __exit nvme_exit(void)
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.9");
+MODULE_VERSION("1.0");
module_init(nvme_init);
module_exit(nvme_exit);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 0b4b2775600e..5e78568026c3 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -2105,7 +2105,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
nvme_offset += unit_num_blocks;
- nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
+ nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
if (nvme_sc != NVME_SC_SUCCESS) {
nvme_unmap_user_pages(dev,
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2658,7 +2658,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id);
- nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
+ nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
goto out;
@@ -2686,7 +2686,7 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id);
- nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
+ nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
@@ -2894,7 +2894,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.dsm.nr = cpu_to_le32(ndesc - 1);
c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
+ nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
@@ -2915,6 +2915,14 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
+ /*
+ * Prime the hdr with good status for scsi commands that don't require
+ * an nvme command for translation.
+ */
+ retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
+ if (retcode)
+ return retcode;
+
opcode = cmd[0];
switch (opcode) {
@@ -3016,152 +3024,6 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return retcode;
}
-#ifdef CONFIG_COMPAT
-typedef struct sg_io_hdr32 {
- compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
- compat_int_t dxfer_direction; /* [i] data transfer direction */
- unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
- unsigned char mx_sb_len; /* [i] max length to write to sbp */
- unsigned short iovec_count; /* [i] 0 implies no scatter gather */
- compat_uint_t dxfer_len; /* [i] byte count of data transfer */
- compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
- or scatter gather list */
- compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
- compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
- compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
- compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
- compat_int_t pack_id; /* [i->o] unused internally (normally) */
- compat_uptr_t usr_ptr; /* [i->o] unused internally */
- unsigned char status; /* [o] scsi status */
- unsigned char masked_status; /* [o] shifted, masked scsi status */
- unsigned char msg_status; /* [o] messaging level data (optional) */
- unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
- unsigned short host_status; /* [o] errors from host adapter */
- unsigned short driver_status; /* [o] errors from software driver */
- compat_int_t resid; /* [o] dxfer_len - actual_transferred */
- compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
- compat_uint_t info; /* [o] auxiliary information */
-} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
-
-typedef struct sg_iovec32 {
- compat_uint_t iov_base;
- compat_uint_t iov_len;
-} sg_iovec32_t;
-
-static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
-{
- sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
- sg_iovec32_t __user *iov32 = dxferp;
- int i;
-
- for (i = 0; i < iovec_count; i++) {
- u32 base, len;
-
- if (get_user(base, &iov32[i].iov_base) ||
- get_user(len, &iov32[i].iov_len) ||
- put_user(compat_ptr(base), &iov[i].iov_base) ||
- put_user(len, &iov[i].iov_len))
- return -EFAULT;
- }
-
- if (put_user(iov, &sgio->dxferp))
- return -EFAULT;
- return 0;
-}
-
-int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
-{
- sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
- sg_io_hdr_t __user *sgio;
- u16 iovec_count;
- u32 data;
- void __user *dxferp;
- int err;
- int interface_id;
-
- if (get_user(interface_id, &sgio32->interface_id))
- return -EFAULT;
- if (interface_id != 'S')
- return -EINVAL;
-
- if (get_user(iovec_count, &sgio32->iovec_count))
- return -EFAULT;
-
- {
- void __user *top = compat_alloc_user_space(0);
- void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
- (iovec_count * sizeof(sg_iovec_t)));
- if (new > top)
- return -EINVAL;
-
- sgio = new;
- }
-
- /* Ok, now construct. */
- if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
- (2 * sizeof(int)) +
- (2 * sizeof(unsigned char)) +
- (1 * sizeof(unsigned short)) +
- (1 * sizeof(unsigned int))))
- return -EFAULT;
-
- if (get_user(data, &sgio32->dxferp))
- return -EFAULT;
- dxferp = compat_ptr(data);
- if (iovec_count) {
- if (sg_build_iovec(sgio, dxferp, iovec_count))
- return -EFAULT;
- } else {
- if (put_user(dxferp, &sgio->dxferp))
- return -EFAULT;
- }
-
- {
- unsigned char __user *cmdp;
- unsigned char __user *sbp;
-
- if (get_user(data, &sgio32->cmdp))
- return -EFAULT;
- cmdp = compat_ptr(data);
-
- if (get_user(data, &sgio32->sbp))
- return -EFAULT;
- sbp = compat_ptr(data);
-
- if (put_user(cmdp, &sgio->cmdp) ||
- put_user(sbp, &sgio->sbp))
- return -EFAULT;
- }
-
- if (copy_in_user(&sgio->timeout, &sgio32->timeout,
- 3 * sizeof(int)))
- return -EFAULT;
-
- if (get_user(data, &sgio32->usr_ptr))
- return -EFAULT;
- if (put_user(compat_ptr(data), &sgio->usr_ptr))
- return -EFAULT;
-
- err = nvme_sg_io(ns, sgio);
- if (err >= 0) {
- void __user *datap;
-
- if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
- sizeof(int)) ||
- get_user(datap, &sgio->usr_ptr) ||
- put_user((u32)(unsigned long)datap,
- &sgio32->usr_ptr) ||
- copy_in_user(&sgio32->status, &sgio->status,
- (4 * sizeof(unsigned char)) +
- (2 * sizeof(unsigned short)) +
- (3 * sizeof(int))))
- err = -EFAULT;
- }
-
- return err;
-}
-#endif
-
int nvme_sg_get_version_num(int __user *ip)
{
return put_user(sg_version_num, ip);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 27b71a0b72d0..3ec85dfce124 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2370,8 +2370,12 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
opcode = CEPH_OSD_OP_READ;
}
- osd_req_op_extent_init(osd_request, num_ops, opcode, offset, length,
- 0, 0);
+ if (opcode == CEPH_OSD_OP_DELETE)
+ osd_req_op_init(osd_request, num_ops, opcode);
+ else
+ osd_req_op_extent_init(osd_request, num_ops, opcode,
+ offset, length, 0, 0);
+
if (obj_request->type == OBJ_REQUEST_BIO)
osd_req_op_extent_osd_data_bio(osd_request, num_ops,
obj_request->bio_list, length);
@@ -3405,8 +3409,7 @@ err_rq:
if (result)
rbd_warn(rbd_dev, "%s %llx at %llx result %d",
obj_op_name(op_type), length, offset, result);
- if (snapc)
- ceph_put_snap_context(snapc);
+ ceph_put_snap_context(snapc);
blk_end_request_all(rq, result);
}
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 40ee7705df63..ac8c62cb4875 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -112,37 +112,16 @@ static const struct block_device_operations rsxx_fops = {
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
{
- struct hd_struct *part0 = &card->gendisk->part0;
- int rw = bio_data_dir(bio);
- int cpu;
-
- cpu = part_stat_lock();
-
- part_round_stats(cpu, part0);
- part_inc_in_flight(part0, rw);
-
- part_stat_unlock();
+ generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio),
+ &card->gendisk->part0);
}
static void disk_stats_complete(struct rsxx_cardinfo *card,
struct bio *bio,
unsigned long start_time)
{
- struct hd_struct *part0 = &card->gendisk->part0;
- unsigned long duration = jiffies - start_time;
- int rw = bio_data_dir(bio);
- int cpu;
-
- cpu = part_stat_lock();
-
- part_stat_add(cpu, part0, sectors[rw], bio_sectors(bio));
- part_stat_inc(cpu, part0, ios[rw]);
- part_stat_add(cpu, part0, ticks[rw], duration);
-
- part_round_stats(cpu, part0);
- part_dec_in_flight(part0, rw);
-
- part_stat_unlock();
+ generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0,
+ start_time);
}
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 0ebadf93b6c5..4b911ed96ea3 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -23,8 +23,8 @@
#define DRV_MODULE_NAME "sunvdc"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.1"
-#define DRV_MODULE_RELDATE "February 13, 2013"
+#define DRV_MODULE_VERSION "1.2"
+#define DRV_MODULE_RELDATE "November 24, 2014"
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1
+static struct workqueue_struct *sunvdc_wq;
+
struct vdc_req_entry {
struct request *req;
};
@@ -60,6 +62,10 @@ struct vdc_port {
u64 max_xfer_size;
u32 vdisk_block_size;
+ u64 ldc_timeout;
+ struct timer_list ldc_reset_timer;
+ struct work_struct ldc_reset_work;
+
/* The server fills these in for us in the disk attribute
* ACK packet.
*/
@@ -71,6 +77,10 @@ struct vdc_port {
char disk_name[32];
};
+static void vdc_ldc_reset(struct vdc_port *port);
+static void vdc_ldc_reset_work(struct work_struct *work);
+static void vdc_ldc_reset_timer(unsigned long _arg);
+
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
return container_of(vio, struct vdc_port, vio);
@@ -150,6 +160,21 @@ static const struct block_device_operations vdc_fops = {
.ioctl = vdc_ioctl,
};
+static void vdc_blk_queue_start(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ /* restart blk queue when ring is half emptied. also called after
+ * handshake completes, so check for initial handshake before we've
+ * allocated a disk.
+ */
+ if (port->disk && blk_queue_stopped(port->disk->queue) &&
+ vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
+ blk_start_queue(port->disk->queue);
+ }
+
+}
+
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
{
if (vio->cmp &&
@@ -163,7 +188,11 @@ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
static void vdc_handshake_complete(struct vio_driver_state *vio)
{
+ struct vdc_port *port = to_vdc_port(vio);
+
+ del_timer(&port->ldc_reset_timer);
vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
+ vdc_blk_queue_start(port);
}
static int vdc_handle_unknown(struct vdc_port *port, void *arg)
@@ -269,7 +298,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
desc->hdr.state = VIO_DESC_FREE;
- dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->cons = vio_dring_next(dr, index);
req = rqe->req;
if (req == NULL) {
@@ -281,10 +310,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
- /* restart blk queue when ring is half emptied */
- if (blk_queue_stopped(port->disk->queue) &&
- vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
- blk_start_queue(port->disk->queue);
+ vdc_blk_queue_start(port);
}
static int vdc_ack(struct vdc_port *port, void *msgbuf)
@@ -317,17 +343,20 @@ static void vdc_event(void *arg, int event)
spin_lock_irqsave(&vio->lock, flags);
- if (unlikely(event == LDC_EVENT_RESET ||
- event == LDC_EVENT_UP)) {
+ if (unlikely(event == LDC_EVENT_RESET)) {
vio_link_state_change(vio, event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
+ queue_work(sunvdc_wq, &port->ldc_reset_work);
+ goto out;
+ }
+
+ if (unlikely(event == LDC_EVENT_UP)) {
+ vio_link_state_change(vio, event);
+ goto out;
}
if (unlikely(event != LDC_EVENT_DATA_READY)) {
- printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
+ pr_warn(PFX "Unexpected LDC event %d\n", event);
+ goto out;
}
err = 0;
@@ -371,6 +400,7 @@ static void vdc_event(void *arg, int event)
}
if (err < 0)
vdc_finish(&port->vio, err, WAITING_FOR_ANY);
+out:
spin_unlock_irqrestore(&vio->lock, flags);
}
@@ -403,6 +433,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
delay = 128;
} while (err == -EAGAIN);
+ if (err == -ENOTCONN)
+ vdc_ldc_reset(port);
return err;
}
@@ -472,7 +504,7 @@ static int __send_request(struct request *req)
printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
} else {
port->req_id++;
- dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->prod = vio_dring_next(dr, dr->prod);
}
return err;
@@ -626,7 +658,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
err = __vdc_tx_trigger(port);
if (err >= 0) {
port->req_id++;
- dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->prod = vio_dring_next(dr, dr->prod);
spin_unlock_irqrestore(&port->vio.lock, flags);
wait_for_completion(&comp.com);
@@ -690,12 +722,9 @@ static void vdc_free_tx_ring(struct vdc_port *port)
}
}
-static int probe_disk(struct vdc_port *port)
+static int vdc_port_up(struct vdc_port *port)
{
struct vio_completion comp;
- struct request_queue *q;
- struct gendisk *g;
- int err;
init_completion(&comp.com);
comp.err = 0;
@@ -703,10 +732,27 @@ static int probe_disk(struct vdc_port *port)
port->vio.cmp = &comp;
vio_port_up(&port->vio);
-
wait_for_completion(&comp.com);
- if (comp.err)
- return comp.err;
+ return comp.err;
+}
+
+static void vdc_port_down(struct vdc_port *port)
+{
+ ldc_disconnect(port->vio.lp);
+ ldc_unbind(port->vio.lp);
+ vdc_free_tx_ring(port);
+ vio_ldc_free(&port->vio);
+}
+
+static int probe_disk(struct vdc_port *port)
+{
+ struct request_queue *q;
+ struct gendisk *g;
+ int err;
+
+ err = vdc_port_up(port);
+ if (err)
+ return err;
if (vdc_version_supported(port, 1, 1)) {
/* vdisk_size should be set during the handshake, if it wasn't
@@ -819,6 +865,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct mdesc_handle *hp;
struct vdc_port *port;
int err;
+ const u64 *ldc_timeout;
print_version();
@@ -848,6 +895,16 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
port->vdisk_size = -1;
+ /* Actual wall time may be double due to do_generic_file_read() doing
+ * a readahead I/O first, and once that fails it will try to read a
+ * single page.
+ */
+ ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
+ port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
+ setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
+ (unsigned long)port);
+ INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
+
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
vdc_versions, ARRAY_SIZE(vdc_versions),
&vdc_vio_ops, port->disk_name);
@@ -896,8 +953,21 @@ static int vdc_port_remove(struct vio_dev *vdev)
struct vdc_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->vio.lock, flags);
+ blk_stop_queue(port->disk->queue);
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+ flush_work(&port->ldc_reset_work);
+ del_timer_sync(&port->ldc_reset_timer);
del_timer_sync(&port->vio.timer);
+ del_gendisk(port->disk);
+ blk_cleanup_queue(port->disk->queue);
+ put_disk(port->disk);
+ port->disk = NULL;
+
vdc_free_tx_ring(port);
vio_ldc_free(&port->vio);
@@ -908,6 +978,102 @@ static int vdc_port_remove(struct vio_dev *vdev)
return 0;
}
+static void vdc_requeue_inflight(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ u32 idx;
+
+ for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
+ struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
+ struct vdc_req_entry *rqe = &port->rq_arr[idx];
+ struct request *req;
+
+ ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
+ desc->hdr.state = VIO_DESC_FREE;
+ dr->cons = vio_dring_next(dr, idx);
+
+ req = rqe->req;
+ if (req == NULL) {
+ vdc_end_special(port, desc);
+ continue;
+ }
+
+ rqe->req = NULL;
+ blk_requeue_request(port->disk->queue, req);
+ }
+}
+
+static void vdc_queue_drain(struct vdc_port *port)
+{
+ struct request *req;
+
+ while ((req = blk_fetch_request(port->disk->queue)) != NULL)
+ __blk_end_request_all(req, -EIO);
+}
+
+static void vdc_ldc_reset_timer(unsigned long _arg)
+{
+ struct vdc_port *port = (struct vdc_port *) _arg;
+ struct vio_driver_state *vio = &port->vio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vio->lock, flags);
+ if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
+ pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
+ port->disk_name, port->ldc_timeout);
+ vdc_queue_drain(port);
+ vdc_blk_queue_start(port);
+ }
+ spin_unlock_irqrestore(&vio->lock, flags);
+}
+
+static void vdc_ldc_reset_work(struct work_struct *work)
+{
+ struct vdc_port *port;
+ struct vio_driver_state *vio;
+ unsigned long flags;
+
+ port = container_of(work, struct vdc_port, ldc_reset_work);
+ vio = &port->vio;
+
+ spin_lock_irqsave(&vio->lock, flags);
+ vdc_ldc_reset(port);
+ spin_unlock_irqrestore(&vio->lock, flags);
+}
+
+static void vdc_ldc_reset(struct vdc_port *port)
+{
+ int err;
+
+ assert_spin_locked(&port->vio.lock);
+
+ pr_warn(PFX "%s ldc link reset\n", port->disk_name);
+ blk_stop_queue(port->disk->queue);
+ vdc_requeue_inflight(port);
+ vdc_port_down(port);
+
+ err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
+ if (err) {
+ pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
+ return;
+ }
+
+ err = vdc_alloc_tx_ring(port);
+ if (err) {
+ pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
+ goto err_free_ldc;
+ }
+
+ if (port->ldc_timeout)
+ mod_timer(&port->ldc_reset_timer,
+ round_jiffies(jiffies + HZ * port->ldc_timeout));
+ mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
+ return;
+
+err_free_ldc:
+ vio_ldc_free(&port->vio);
+}
+
static const struct vio_device_id vdc_port_match[] = {
{
.type = "vdc-port",
@@ -927,9 +1093,13 @@ static int __init vdc_init(void)
{
int err;
+ sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ if (!sunvdc_wq)
+ return -ENOMEM;
+
err = register_blkdev(0, VDCBLK_NAME);
if (err < 0)
- goto out_err;
+ goto out_free_wq;
vdc_major = err;
@@ -943,7 +1113,8 @@ out_unregister_blkdev:
unregister_blkdev(vdc_major, VDCBLK_NAME);
vdc_major = 0;
-out_err:
+out_free_wq:
+ destroy_workqueue(sunvdc_wq);
return err;
}
@@ -951,6 +1122,7 @@ static void __exit vdc_exit(void)
{
vio_unregister_driver(&vdc_port_driver);
unregister_blkdev(vdc_major, VDCBLK_NAME);
+ destroy_workqueue(sunvdc_wq);
}
module_init(vdc_init);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 6b44bbe528b7..b5afd495d482 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -971,7 +971,6 @@ static struct platform_driver swim_driver = {
.remove = swim_remove,
.driver = {
.name = CARDNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c6a27d54ad62..7ef7c098708f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -80,7 +80,7 @@ static int __virtblk_add_req(struct virtqueue *vq,
{
struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
unsigned int num_out = 0, num_in = 0;
- int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
+ __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
sgs[num_out++] = &hdr;
@@ -91,19 +91,19 @@ static int __virtblk_add_req(struct virtqueue *vq,
* block, and before the normal inhdr we put the sense data and the
* inhdr with additional status information.
*/
- if (type == VIRTIO_BLK_T_SCSI_CMD) {
+ if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
sgs[num_out++] = &cmd;
}
if (have_data) {
- if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
+ if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
sgs[num_out++] = data_sg;
else
sgs[num_out + num_in++] = data_sg;
}
- if (type == VIRTIO_BLK_T_SCSI_CMD) {
+ if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sgs[num_out + num_in++] = &sense;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
@@ -119,12 +119,13 @@ static int __virtblk_add_req(struct virtqueue *vq,
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ struct virtio_blk *vblk = req->q->queuedata;
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
- req->resid_len = vbr->in_hdr.residual;
- req->sense_len = vbr->in_hdr.sense_len;
- req->errors = vbr->in_hdr.errors;
+ req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
+ req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
+ req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
} else if (req->cmd_type == REQ_TYPE_SPECIAL) {
req->errors = (error != 0);
}
@@ -158,10 +159,11 @@ static void virtblk_done(struct virtqueue *vq)
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
-static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
- bool last)
+static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct request *req = bd->rq;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
unsigned int num;
@@ -173,25 +175,25 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
vbr->req = req;
if (req->cmd_flags & REQ_FLUSH) {
- vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
} else {
switch (req->cmd_type) {
case REQ_TYPE_FS:
vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = blk_rq_pos(vbr->req);
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
case REQ_TYPE_BLOCK_PC:
- vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
case REQ_TYPE_SPECIAL:
- vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
break;
default:
/* We don't put anything else in the queue. */
@@ -204,9 +206,9 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
if (num) {
if (rq_data_dir(vbr->req) == WRITE)
- vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
+ vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
else
- vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
+ vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
@@ -222,7 +224,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
return BLK_MQ_RQ_QUEUE_ERROR;
}
- if (last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
+ if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
notify = true;
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
@@ -331,7 +333,8 @@ static ssize_t virtblk_serial_show(struct device *dev,
return err;
}
-DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
+
+static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
static void virtblk_config_changed_work(struct work_struct *work)
{
@@ -476,7 +479,8 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
struct virtio_blk_config, wce,
&writeback);
if (err)
- writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
+ writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
return writeback;
}
@@ -821,25 +825,34 @@ static const struct virtio_device_id id_table[] = {
{ 0 },
};
-static unsigned int features[] = {
+static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
+}
+;
+static unsigned int features[] = {
+ VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
+ VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
+ VIRTIO_BLK_F_TOPOLOGY,
+ VIRTIO_BLK_F_MQ,
};
static struct virtio_driver virtio_blk = {
- .feature_table = features,
- .feature_table_size = ARRAY_SIZE(features),
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = virtblk_probe,
- .remove = virtblk_remove,
- .config_changed = virtblk_config_changed,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .feature_table_legacy = features_legacy,
+ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtblk_probe,
+ .remove = virtblk_remove,
+ .config_changed = virtblk_config_changed,
#ifdef CONFIG_PM_SLEEP
- .freeze = virtblk_freeze,
- .restore = virtblk_restore,
+ .freeze = virtblk_freeze,
+ .restore = virtblk_restore,
#endif
};
@@ -871,8 +884,8 @@ out_destroy_workqueue:
static void __exit fini(void)
{
- unregister_blkdev(major, "virtblk");
unregister_virtio_driver(&virtio_blk);
+ unregister_blkdev(major, "virtblk");
destroy_workqueue(virtblk_wq);
}
module_init(init);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5ac312f6e0be..2236c6f31608 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -126,7 +126,6 @@ struct blkfront_info
unsigned int persistent_gnts_c;
unsigned long shadow_free;
unsigned int feature_flush;
- unsigned int flush_op;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
@@ -479,7 +478,19 @@ static int blkif_queue_request(struct request *req)
* way. (It's also a FLUSH+FUA, since it is
* guaranteed ordered WRT previous writes.)
*/
- ring_req->operation = info->flush_op;
+ switch (info->feature_flush &
+ ((REQ_FLUSH|REQ_FUA))) {
+ case REQ_FLUSH|REQ_FUA:
+ ring_req->operation =
+ BLKIF_OP_WRITE_BARRIER;
+ break;
+ case REQ_FLUSH:
+ ring_req->operation =
+ BLKIF_OP_FLUSH_DISKCACHE;
+ break;
+ default:
+ ring_req->operation = 0;
+ }
}
ring_req->u.rw.nr_segments = nseg;
}
@@ -582,12 +593,14 @@ static inline void flush_requests(struct blkfront_info *info)
notify_remote_via_irq(info->irq);
}
-static inline bool blkif_request_flush_valid(struct request *req,
- struct blkfront_info *info)
+static inline bool blkif_request_flush_invalid(struct request *req,
+ struct blkfront_info *info)
{
return ((req->cmd_type != REQ_TYPE_FS) ||
- ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
- !info->flush_op));
+ ((req->cmd_flags & REQ_FLUSH) &&
+ !(info->feature_flush & REQ_FLUSH)) ||
+ ((req->cmd_flags & REQ_FUA) &&
+ !(info->feature_flush & REQ_FUA)));
}
/*
@@ -612,8 +625,8 @@ static void do_blkif_request(struct request_queue *rq)
blk_start_request(req);
- if (blkif_request_flush_valid(req, info)) {
- __blk_end_request_all(req, -EIO);
+ if (blkif_request_flush_invalid(req, info)) {
+ __blk_end_request_all(req, -EOPNOTSUPP);
continue;
}
@@ -683,20 +696,26 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
return 0;
}
+static const char *flush_info(unsigned int feature_flush)
+{
+ switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
+ case REQ_FLUSH|REQ_FUA:
+ return "barrier: enabled;";
+ case REQ_FLUSH:
+ return "flush diskcache: enabled;";
+ default:
+ return "barrier or flush: disabled;";
+ }
+}
static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_flush(info->rq, info->feature_flush);
- printk(KERN_INFO "blkfront: %s: %s: %s %s %s %s %s\n",
- info->gd->disk_name,
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
- "flush diskcache" : "barrier or flush"),
- info->feature_flush ? "enabled;" : "disabled;",
- "persistent grants:",
- info->feature_persistent ? "enabled;" : "disabled;",
- "indirect descriptors:",
- info->max_indirect_segments ? "enabled;" : "disabled;");
+ pr_info("blkfront: %s: %s %s %s %s %s\n",
+ info->gd->disk_name, flush_info(info->feature_flush),
+ "persistent grants:", info->feature_persistent ?
+ "enabled;" : "disabled;", "indirect descriptors:",
+ info->max_indirect_segments ? "enabled;" : "disabled;");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -1188,7 +1207,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
if (error == -EOPNOTSUPP)
error = 0;
info->feature_flush = 0;
- info->flush_op = 0;
xlvbd_flush(info);
}
/* fall through */
@@ -1808,7 +1826,6 @@ static void blkfront_connect(struct blkfront_info *info)
physical_sector_size = sector_size;
info->feature_flush = 0;
- info->flush_op = 0;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-barrier", "%d", &barrier,
@@ -1821,10 +1838,8 @@ static void blkfront_connect(struct blkfront_info *info)
*
* If there are barriers, then we use flush.
*/
- if (!err && barrier) {
+ if (!err && barrier)
info->feature_flush = REQ_FLUSH | REQ_FUA;
- info->flush_op = BLKIF_OP_WRITE_BARRIER;
- }
/*
* And if there is "feature-flush-cache" use that above
* barriers.
@@ -1833,10 +1848,8 @@ static void blkfront_connect(struct blkfront_info *info)
"feature-flush-cache", "%d", &flush,
NULL);
- if (!err && flush) {
+ if (!err && flush)
info->feature_flush = REQ_FLUSH;
- info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
- }
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-discard", "%d", &discard,
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 3920ee45aa59..bd8bda386e02 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -44,15 +44,14 @@ static const char *default_compressor = "lzo";
static unsigned int num_devices = 1;
#define ZRAM_ATTR_RO(name) \
-static ssize_t zram_attr_##name##_show(struct device *d, \
+static ssize_t name##_show(struct device *d, \
struct device_attribute *attr, char *b) \
{ \
struct zram *zram = dev_to_zram(d); \
return scnprintf(b, PAGE_SIZE, "%llu\n", \
(u64)atomic64_read(&zram->stats.name)); \
} \
-static struct device_attribute dev_attr_##name = \
- __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
+static DEVICE_ATTR_RO(name);
static inline int init_done(struct zram *zram)
{
@@ -287,19 +286,18 @@ static inline int is_partial_io(struct bio_vec *bvec)
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
-static inline int valid_io_request(struct zram *zram, struct bio *bio)
+static inline int valid_io_request(struct zram *zram,
+ sector_t start, unsigned int size)
{
- u64 start, end, bound;
+ u64 end, bound;
/* unaligned request */
- if (unlikely(bio->bi_iter.bi_sector &
- (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
return 0;
- if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
return 0;
- start = bio->bi_iter.bi_sector;
- end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
+ end = start + (size >> SECTOR_SHIFT);
bound = zram->disksize >> SECTOR_SHIFT;
/* out of range range */
if (unlikely(start >= bound || end > bound || start > end))
@@ -453,7 +451,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
+ u32 index, int offset)
{
int ret;
struct page *page;
@@ -645,14 +643,13 @@ out:
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, struct bio *bio)
+ int offset, int rw)
{
int ret;
- int rw = bio_data_dir(bio);
if (rw == READ) {
atomic64_inc(&zram->stats.num_reads);
- ret = zram_bvec_read(zram, bvec, index, offset, bio);
+ ret = zram_bvec_read(zram, bvec, index, offset);
} else {
atomic64_inc(&zram->stats.num_writes);
ret = zram_bvec_write(zram, bvec, index, offset);
@@ -853,7 +850,7 @@ out:
static void __zram_make_request(struct zram *zram, struct bio *bio)
{
- int offset;
+ int offset, rw;
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
@@ -868,6 +865,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
return;
}
+ rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
@@ -882,15 +880,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = max_transfer_size;
bv.bv_offset = bvec.bv_offset;
- if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
+ if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
+ if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
+ if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
goto out;
update_position(&index, &offset, &bvec);
@@ -915,7 +913,8 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
if (unlikely(!init_done(zram)))
goto error;
- if (!valid_io_request(zram, bio)) {
+ if (!valid_io_request(zram, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
goto error;
}
@@ -945,25 +944,64 @@ static void zram_slot_free_notify(struct block_device *bdev,
atomic64_inc(&zram->stats.notify_free);
}
+static int zram_rw_page(struct block_device *bdev, sector_t sector,
+ struct page *page, int rw)
+{
+ int offset, err;
+ u32 index;
+ struct zram *zram;
+ struct bio_vec bv;
+
+ zram = bdev->bd_disk->private_data;
+ if (!valid_io_request(zram, sector, PAGE_SIZE)) {
+ atomic64_inc(&zram->stats.invalid_io);
+ return -EINVAL;
+ }
+
+ down_read(&zram->init_lock);
+ if (unlikely(!init_done(zram))) {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ index = sector >> SECTORS_PER_PAGE_SHIFT;
+ offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+
+ bv.bv_page = page;
+ bv.bv_len = PAGE_SIZE;
+ bv.bv_offset = 0;
+
+ err = zram_bvec_rw(zram, &bv, index, offset, rw);
+out_unlock:
+ up_read(&zram->init_lock);
+ /*
+ * If I/O fails, just return error(ie, non-zero) without
+ * calling page_endio.
+ * It causes resubmit the I/O with bio request by upper functions
+ * of rw_page(e.g., swap_readpage, __swap_writepage) and
+ * bio->bi_end_io does things to handle the error
+ * (e.g., SetPageError, set_page_dirty and extra works).
+ */
+ if (err == 0)
+ page_endio(page, rw, 0);
+ return err;
+}
+
static const struct block_device_operations zram_devops = {
.swap_slot_free_notify = zram_slot_free_notify,
+ .rw_page = zram_rw_page,
.owner = THIS_MODULE
};
-static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
- disksize_show, disksize_store);
-static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
-static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
-static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
-static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
- mem_limit_store);
-static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
- mem_used_max_store);
-static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
- max_comp_streams_show, max_comp_streams_store);
-static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
- comp_algorithm_show, comp_algorithm_store);
+static DEVICE_ATTR_RW(disksize);
+static DEVICE_ATTR_RO(initstate);
+static DEVICE_ATTR_WO(reset);
+static DEVICE_ATTR_RO(orig_data_size);
+static DEVICE_ATTR_RO(mem_used_total);
+static DEVICE_ATTR_RW(mem_limit);
+static DEVICE_ATTR_RW(mem_used_max);
+static DEVICE_ATTR_RW(max_comp_streams);
+static DEVICE_ATTR_RW(comp_algorithm);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index c6ee271317f5..b05a816b09ac 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -66,8 +66,8 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
/* Page consists entirely of zeros */
- ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
- ZRAM_ACCESS, /* page in now accessed */
+ ZRAM_ZERO = ZRAM_FLAG_SHIFT,
+ ZRAM_ACCESS, /* page is now accessed */
__NR_ZRAM_PAGEFLAGS,
};
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4547dc238fc7..364f080768d0 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -210,6 +210,7 @@ config BT_MRVL_SDIO
tristate "Marvell BT-over-SDIO driver"
depends on BT_MRVL && MMC
select FW_LOADER
+ select WANT_DEV_COREDUMP
help
The driver for Marvell Bluetooth chipsets with SDIO interface.
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index d85ced27ebd5..1ee27ac18de0 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -79,6 +79,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe057) },
{ USB_DEVICE(0x0489, 0xe056) },
{ USB_DEVICE(0x0489, 0xe05f) },
+ { USB_DEVICE(0x0489, 0xe078) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@@ -86,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3007) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x04CA, 0x300b) },
+ { USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0930, 0x0227) },
@@ -105,6 +107,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x13d3, 0x3402) },
+ { USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3432) },
/* Atheros AR5BBU12 with sflash firmware */
@@ -130,6 +133,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -137,6 +141,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
@@ -156,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index 023d35e3c7a7..1828ed8cae7a 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -167,6 +167,35 @@ static const struct file_operations btmrvl_hscmd_fops = {
.llseek = default_llseek,
};
+static ssize_t btmrvl_fwdump_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct btmrvl_private *priv = file->private_data;
+ char buf[16];
+ bool result;
+
+ memset(buf, 0, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (strtobool(buf, &result))
+ return -EINVAL;
+
+ if (!result)
+ return -EINVAL;
+
+ btmrvl_firmware_dump(priv);
+
+ return count;
+}
+
+static const struct file_operations btmrvl_fwdump_fops = {
+ .write = btmrvl_fwdump_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
void btmrvl_debugfs_init(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
@@ -197,6 +226,8 @@ void btmrvl_debugfs_init(struct hci_dev *hdev)
priv, &btmrvl_hscmd_fops);
debugfs_create_file("hscfgcmd", 0644, dbg->config_dir,
priv, &btmrvl_hscfgcmd_fops);
+ debugfs_create_file("fw_dump", 0200, dbg->config_dir,
+ priv, &btmrvl_fwdump_fops);
dbg->status_dir = debugfs_create_dir("status", hdev->debugfs);
debugfs_create_u8("curpsmode", 0444, dbg->status_dir,
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 38ad66289ad6..330f8f84928d 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -32,6 +32,24 @@
/* Time to wait for command response in millisecond */
#define WAIT_UNTIL_CMD_RESP 5000
+enum rdwr_status {
+ RDWR_STATUS_SUCCESS = 0,
+ RDWR_STATUS_FAILURE = 1,
+ RDWR_STATUS_DONE = 2
+};
+
+#define FW_DUMP_MAX_NAME_LEN 8
+#define FW_DUMP_HOST_READY 0xEE
+#define FW_DUMP_DONE 0xFF
+#define FW_DUMP_READ_DONE 0xFE
+
+struct memory_type_mapping {
+ u8 mem_name[FW_DUMP_MAX_NAME_LEN];
+ u8 *mem_ptr;
+ u32 mem_size;
+ u8 done_flag;
+};
+
struct btmrvl_thread {
struct task_struct *task;
wait_queue_head_t wait_q;
@@ -81,6 +99,7 @@ struct btmrvl_private {
u8 *payload, u16 nb);
int (*hw_wakeup_firmware) (struct btmrvl_private *priv);
int (*hw_process_int_status) (struct btmrvl_private *priv);
+ void (*firmware_dump)(struct btmrvl_private *priv);
spinlock_t driver_lock; /* spinlock used by driver */
#ifdef CONFIG_DEBUG_FS
void *debugfs_data;
@@ -151,6 +170,7 @@ int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);
int btmrvl_enable_hs(struct btmrvl_private *priv);
+void btmrvl_firmware_dump(struct btmrvl_private *priv);
#ifdef CONFIG_DEBUG_FS
void btmrvl_debugfs_init(struct hci_dev *hdev);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 1d7db2064889..30939c993d94 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <linux/mmc/sdio_func.h>
#include "btmrvl_drv.h"
#include "btmrvl_sdio.h"
@@ -41,6 +42,11 @@ void btmrvl_interrupt(struct btmrvl_private *priv)
priv->adapter->int_count++;
+ if (priv->adapter->hs_state == HS_ACTIVATED) {
+ BT_DBG("BT: HS DEACTIVATED in ISR!");
+ priv->adapter->hs_state = HS_DEACTIVATED;
+ }
+
wake_up_interruptible(&priv->main_thread.wait_q);
}
EXPORT_SYMBOL_GPL(btmrvl_interrupt);
@@ -209,7 +215,7 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1);
if (ret)
- BT_ERR("module_cfg_cmd(%x) failed\n", subcmd);
+ BT_ERR("module_cfg_cmd(%x) failed", subcmd);
return ret;
}
@@ -245,7 +251,7 @@ int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2);
if (ret)
- BT_ERR("HSCFG command failed\n");
+ BT_ERR("HSCFG command failed");
return ret;
}
@@ -263,7 +269,7 @@ int btmrvl_enable_ps(struct btmrvl_private *priv)
ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1);
if (ret)
- BT_ERR("PSMODE command failed\n");
+ BT_ERR("PSMODE command failed");
return 0;
}
@@ -276,7 +282,7 @@ int btmrvl_enable_hs(struct btmrvl_private *priv)
ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
if (ret) {
- BT_ERR("Host sleep enable command failed\n");
+ BT_ERR("Host sleep enable command failed");
return ret;
}
@@ -323,12 +329,19 @@ int btmrvl_prepare_command(struct btmrvl_private *priv)
} else {
ret = priv->hw_wakeup_firmware(priv);
priv->adapter->hs_state = HS_DEACTIVATED;
+ BT_DBG("BT: HS DEACTIVATED due to host activity!");
}
}
return ret;
}
+void btmrvl_firmware_dump(struct btmrvl_private *priv)
+{
+ if (priv->firmware_dump)
+ priv->firmware_dump(priv);
+}
+
static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
{
int ret = 0;
@@ -487,34 +500,36 @@ static int btmrvl_download_cal_data(struct btmrvl_private *priv,
ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
BT_CAL_HDR_LEN + len);
if (ret)
- BT_ERR("Failed to download caibration data\n");
+ BT_ERR("Failed to download caibration data");
return 0;
}
-static int btmrvl_cal_data_dt(struct btmrvl_private *priv)
+static int btmrvl_check_device_tree(struct btmrvl_private *priv)
{
struct device_node *dt_node;
u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE];
- const char name[] = "btmrvl_caldata";
- const char property[] = "btmrvl,caldata";
int ret;
-
- dt_node = of_find_node_by_name(NULL, name);
- if (!dt_node)
- return -ENODEV;
-
- ret = of_property_read_u8_array(dt_node, property,
- cal_data + BT_CAL_HDR_LEN,
- BT_CAL_DATA_SIZE);
- if (ret)
- return ret;
-
- BT_DBG("Use cal data from device tree");
- ret = btmrvl_download_cal_data(priv, cal_data, BT_CAL_DATA_SIZE);
- if (ret) {
- BT_ERR("Fail to download calibrate data");
- return ret;
+ u32 val;
+
+ for_each_compatible_node(dt_node, NULL, "btmrvl,cfgdata") {
+ ret = of_property_read_u32(dt_node, "btmrvl,gpio-gap", &val);
+ if (!ret)
+ priv->btmrvl_dev.gpio_gap = val;
+
+ ret = of_property_read_u8_array(dt_node, "btmrvl,cal-data",
+ cal_data + BT_CAL_HDR_LEN,
+ BT_CAL_DATA_SIZE);
+ if (ret)
+ return ret;
+
+ BT_DBG("Use cal data from device tree");
+ ret = btmrvl_download_cal_data(priv, cal_data,
+ BT_CAL_DATA_SIZE);
+ if (ret) {
+ BT_ERR("Fail to download calibrate data");
+ return ret;
+ }
}
return 0;
@@ -526,14 +541,15 @@ static int btmrvl_setup(struct hci_dev *hdev)
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
- btmrvl_cal_data_dt(priv);
+ priv->btmrvl_dev.gpio_gap = 0xffff;
+
+ btmrvl_check_device_tree(priv);
btmrvl_pscan_window_reporting(priv, 0x01);
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
- priv->btmrvl_dev.gpio_gap = 0xffff;
btmrvl_send_hscfg_cmd(priv);
return 0;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 550bce089fa6..0057c0b7a776 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -24,6 +24,7 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include <linux/module.h>
+#include <linux/devcoredump.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -33,6 +34,24 @@
#define VERSION "1.0"
+static struct memory_type_mapping mem_type_mapping_tbl[] = {
+ {"ITCM", NULL, 0, 0xF0},
+ {"DTCM", NULL, 0, 0xF1},
+ {"SQRAM", NULL, 0, 0xF2},
+ {"APU", NULL, 0, 0xF3},
+ {"CIU", NULL, 0, 0xF4},
+ {"ICU", NULL, 0, 0xF5},
+ {"MAC", NULL, 0, 0xF6},
+ {"EXT7", NULL, 0, 0xF7},
+ {"EXT8", NULL, 0, 0xF8},
+ {"EXT9", NULL, 0, 0xF9},
+ {"EXT10", NULL, 0, 0xFA},
+ {"EXT11", NULL, 0, 0xFB},
+ {"EXT12", NULL, 0, 0xFC},
+ {"EXT13", NULL, 0, 0xFD},
+ {"EXTLAST", NULL, 0, 0xFE},
+};
+
/* The btmrvl_sdio_remove() callback function is called
* when user removes this module from kernel space or ejects
* the card from the slot. The driver handles these 2 cases
@@ -122,6 +141,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = {
.int_read_to_clear = true,
.host_int_rsr = 0x01,
.card_misc_cfg = 0xcc,
+ .fw_dump_ctrl = 0xe2,
+ .fw_dump_start = 0xe3,
+ .fw_dump_end = 0xea,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
@@ -130,6 +152,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.reg = &btmrvl_reg_8688,
.support_pscan_win_report = false,
.sd_blksz_fw_dl = 64,
+ .supports_fw_dump = false,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
@@ -138,6 +161,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.reg = &btmrvl_reg_87xx,
.support_pscan_win_report = false,
.sd_blksz_fw_dl = 256,
+ .supports_fw_dump = false,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
@@ -146,6 +170,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.reg = &btmrvl_reg_87xx,
.support_pscan_win_report = false,
.sd_blksz_fw_dl = 256,
+ .supports_fw_dump = false,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8887 = {
@@ -154,6 +179,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8887 = {
.reg = &btmrvl_reg_8887,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
+ .supports_fw_dump = false,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
@@ -162,6 +188,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.reg = &btmrvl_reg_8897,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
+ .supports_fw_dump = true,
};
static const struct sdio_device_id btmrvl_sdio_ids[] = {
@@ -764,8 +791,8 @@ static void btmrvl_sdio_interrupt(struct sdio_func *func)
card = sdio_get_drvdata(func);
if (!card || !card->priv) {
- BT_ERR("sbi_interrupt(%p) card or priv is "
- "NULL, card=%p\n", func, card);
+ BT_ERR("sbi_interrupt(%p) card or priv is NULL, card=%p",
+ func, card);
return;
}
@@ -1080,6 +1107,277 @@ static int btmrvl_sdio_wakeup_fw(struct btmrvl_private *priv)
return ret;
}
+static void btmrvl_sdio_dump_regs(struct btmrvl_private *priv)
+{
+ struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
+ int ret = 0;
+ unsigned int reg, reg_start, reg_end;
+ char buf[256], *ptr;
+ u8 loop, func, data;
+ int MAX_LOOP = 2;
+
+ btmrvl_sdio_wakeup_fw(priv);
+ sdio_claim_host(card->func);
+
+ for (loop = 0; loop < MAX_LOOP; loop++) {
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+
+ if (loop == 0) {
+ /* Read the registers of SDIO function0 */
+ func = loop;
+ reg_start = 0;
+ reg_end = 9;
+ } else {
+ func = 2;
+ reg_start = 0;
+ reg_end = 0x09;
+ }
+
+ ptr += sprintf(ptr, "SDIO Func%d (%#x-%#x): ",
+ func, reg_start, reg_end);
+ for (reg = reg_start; reg <= reg_end; reg++) {
+ if (func == 0)
+ data = sdio_f0_readb(card->func, reg, &ret);
+ else
+ data = sdio_readb(card->func, reg, &ret);
+
+ if (!ret) {
+ ptr += sprintf(ptr, "%02x ", data);
+ } else {
+ ptr += sprintf(ptr, "ERR");
+ break;
+ }
+ }
+
+ BT_INFO("%s", buf);
+ }
+
+ sdio_release_host(card->func);
+}
+
+/* This function read/write firmware */
+static enum
+rdwr_status btmrvl_sdio_rdwr_firmware(struct btmrvl_private *priv,
+ u8 doneflag)
+{
+ struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
+ int ret, tries;
+ u8 ctrl_data = 0;
+
+ sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
+ &ret);
+
+ if (ret) {
+ BT_ERR("SDIO write err");
+ return RDWR_STATUS_FAILURE;
+ }
+
+ for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+ ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl,
+ &ret);
+
+ if (ret) {
+ BT_ERR("SDIO read err");
+ return RDWR_STATUS_FAILURE;
+ }
+
+ if (ctrl_data == FW_DUMP_DONE)
+ break;
+ if (doneflag && ctrl_data == doneflag)
+ return RDWR_STATUS_DONE;
+ if (ctrl_data != FW_DUMP_HOST_READY) {
+ BT_INFO("The ctrl reg was changed, re-try again!");
+ sdio_writeb(card->func, FW_DUMP_HOST_READY,
+ card->reg->fw_dump_ctrl, &ret);
+ if (ret) {
+ BT_ERR("SDIO write err");
+ return RDWR_STATUS_FAILURE;
+ }
+ }
+ usleep_range(100, 200);
+ }
+
+ if (ctrl_data == FW_DUMP_HOST_READY) {
+ BT_ERR("Fail to pull ctrl_data");
+ return RDWR_STATUS_FAILURE;
+ }
+
+ return RDWR_STATUS_SUCCESS;
+}
+
+/* This function dump sdio register and memory data */
+static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv)
+{
+ struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
+ int ret = 0;
+ unsigned int reg, reg_start, reg_end;
+ enum rdwr_status stat;
+ u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr;
+ u8 dump_num, idx, i, read_reg, doneflag = 0;
+ u32 memory_size, fw_dump_len = 0;
+
+ /* dump sdio register first */
+ btmrvl_sdio_dump_regs(priv);
+
+ if (!card->supports_fw_dump) {
+ BT_ERR("Firmware dump not supported for this card!");
+ return;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
+ struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+
+ if (entry->mem_ptr) {
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = NULL;
+ }
+ entry->mem_size = 0;
+ }
+
+ btmrvl_sdio_wakeup_fw(priv);
+ sdio_claim_host(card->func);
+
+ BT_INFO("== btmrvl firmware dump start ==");
+
+ stat = btmrvl_sdio_rdwr_firmware(priv, doneflag);
+ if (stat == RDWR_STATUS_FAILURE)
+ goto done;
+
+ reg = card->reg->fw_dump_start;
+ /* Read the number of the memories which will dump */
+ dump_num = sdio_readb(card->func, reg, &ret);
+
+ if (ret) {
+ BT_ERR("SDIO read memory length err");
+ goto done;
+ }
+
+ /* Read the length of every memory which will dump */
+ for (idx = 0; idx < dump_num; idx++) {
+ struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+
+ stat = btmrvl_sdio_rdwr_firmware(priv, doneflag);
+ if (stat == RDWR_STATUS_FAILURE)
+ goto done;
+
+ memory_size = 0;
+ reg = card->reg->fw_dump_start;
+ for (i = 0; i < 4; i++) {
+ read_reg = sdio_readb(card->func, reg, &ret);
+ if (ret) {
+ BT_ERR("SDIO read err");
+ goto done;
+ }
+ memory_size |= (read_reg << i*8);
+ reg++;
+ }
+
+ if (memory_size == 0) {
+ BT_INFO("Firmware dump finished!");
+ break;
+ }
+
+ BT_INFO("%s_SIZE=0x%x", entry->mem_name, memory_size);
+ entry->mem_ptr = vzalloc(memory_size + 1);
+ entry->mem_size = memory_size;
+ if (!entry->mem_ptr) {
+ BT_ERR("Vzalloc %s failed", entry->mem_name);
+ goto done;
+ }
+
+ fw_dump_len += (strlen("========Start dump ") +
+ strlen(entry->mem_name) +
+ strlen("========\n") +
+ (memory_size + 1) +
+ strlen("\n========End dump========\n"));
+
+ dbg_ptr = entry->mem_ptr;
+ end_ptr = dbg_ptr + memory_size;
+
+ doneflag = entry->done_flag;
+ BT_INFO("Start %s output, please wait...",
+ entry->mem_name);
+
+ do {
+ stat = btmrvl_sdio_rdwr_firmware(priv, doneflag);
+ if (stat == RDWR_STATUS_FAILURE)
+ goto done;
+
+ reg_start = card->reg->fw_dump_start;
+ reg_end = card->reg->fw_dump_end;
+ for (reg = reg_start; reg <= reg_end; reg++) {
+ *dbg_ptr = sdio_readb(card->func, reg, &ret);
+ if (ret) {
+ BT_ERR("SDIO read err");
+ goto done;
+ }
+ if (dbg_ptr < end_ptr)
+ dbg_ptr++;
+ else
+ BT_ERR("Allocated buffer not enough");
+ }
+
+ if (stat != RDWR_STATUS_DONE) {
+ continue;
+ } else {
+ BT_INFO("%s done: size=0x%tx",
+ entry->mem_name,
+ dbg_ptr - entry->mem_ptr);
+ break;
+ }
+ } while (1);
+ }
+
+ BT_INFO("== btmrvl firmware dump end ==");
+
+done:
+ sdio_release_host(card->func);
+
+ if (fw_dump_len == 0)
+ return;
+
+ fw_dump_data = vzalloc(fw_dump_len+1);
+ if (!fw_dump_data) {
+ BT_ERR("Vzalloc fw_dump_data fail!");
+ return;
+ }
+ fw_dump_ptr = fw_dump_data;
+
+ /* Dump all the memory data into single file, a userspace script will
+ be used to split all the memory data to multiple files*/
+ BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start");
+ for (idx = 0; idx < dump_num; idx++) {
+ struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+
+ if (entry->mem_ptr) {
+ strcpy(fw_dump_ptr, "========Start dump ");
+ fw_dump_ptr += strlen("========Start dump ");
+
+ strcpy(fw_dump_ptr, entry->mem_name);
+ fw_dump_ptr += strlen(entry->mem_name);
+
+ strcpy(fw_dump_ptr, "========\n");
+ fw_dump_ptr += strlen("========\n");
+
+ memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size);
+ fw_dump_ptr += entry->mem_size;
+
+ strcpy(fw_dump_ptr, "\n========End dump========\n");
+ fw_dump_ptr += strlen("\n========End dump========\n");
+
+ vfree(mem_type_mapping_tbl[idx].mem_ptr);
+ mem_type_mapping_tbl[idx].mem_ptr = NULL;
+ }
+ }
+
+ /* fw_dump_data will be free in device coredump release function
+ after 5 min*/
+ dev_coredumpv(&priv->btmrvl_dev.hcidev->dev, fw_dump_data,
+ fw_dump_len, GFP_KERNEL);
+ BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end");
+}
+
static int btmrvl_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -1103,6 +1401,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
card->reg = data->reg;
card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
card->support_pscan_win_report = data->support_pscan_win_report;
+ card->supports_fw_dump = data->supports_fw_dump;
}
if (btmrvl_sdio_register_dev(card) < 0) {
@@ -1134,6 +1433,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw;
priv->hw_process_int_status = btmrvl_sdio_process_int_status;
+ priv->firmware_dump = btmrvl_sdio_dump_firmware;
if (btmrvl_register_hdev(priv)) {
BT_ERR("Register hdev failed!");
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 453559f98a75..1a3bd064c442 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -81,6 +81,9 @@ struct btmrvl_sdio_card_reg {
bool int_read_to_clear;
u8 host_int_rsr;
u8 card_misc_cfg;
+ u8 fw_dump_ctrl;
+ u8 fw_dump_start;
+ u8 fw_dump_end;
};
struct btmrvl_sdio_card {
@@ -90,6 +93,7 @@ struct btmrvl_sdio_card {
const char *firmware;
const struct btmrvl_sdio_card_reg *reg;
bool support_pscan_win_report;
+ bool supports_fw_dump;
u16 sd_blksz_fw_dl;
u8 rx_unit;
struct btmrvl_private *priv;
@@ -101,6 +105,7 @@ struct btmrvl_sdio_device {
const struct btmrvl_sdio_card_reg *reg;
const bool support_pscan_win_report;
u16 sd_blksz_fw_dl;
+ bool supports_fw_dump;
};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index edfc17bfcd44..19cf2cf22e87 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -106,9 +106,12 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0b05, 0x17b5) },
{ USB_DEVICE(0x0b05, 0x17cb) },
{ USB_DEVICE(0x413c, 0x8197) },
+ { USB_DEVICE(0x13d3, 0x3404),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* Foxconn - Hon Hai */
- { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom devices with vendor specific id */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
@@ -156,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -163,6 +167,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
@@ -182,6 +187,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
@@ -298,6 +304,8 @@ struct btusb_data {
unsigned int sco_num;
int isoc_altsetting;
int suspend_count;
+
+ int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
};
static inline void btusb_free_frags(struct btusb_data *data)
@@ -589,7 +597,7 @@ static void btusb_bulk_complete(struct urb *urb)
if (urb->status == 0) {
hdev->stat.byte_rx += urb->actual_length;
- if (btusb_recv_bulk(data, urb->transfer_buffer,
+ if (data->recv_bulk(data, urb->transfer_buffer,
urb->actual_length) < 0) {
BT_ERR("%s corrupted ACL packet", hdev->name);
hdev->stat.err_rx++;
@@ -2011,6 +2019,8 @@ static int btusb_probe(struct usb_interface *intf,
init_usb_anchor(&data->isoc_anchor);
spin_lock_init(&data->rxlock);
+ data->recv_bulk = btusb_recv_bulk;
+
hdev = hci_alloc_dev();
if (!hdev)
return -ENOMEM;
@@ -2034,6 +2044,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM_PATCHRAM) {
hdev->setup = btusb_setup_bcm_patchram;
hdev->set_bdaddr = btusb_set_bdaddr_bcm;
+ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
if (id->driver_info & BTUSB_INTEL) {
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index f038dba19e36..55c135b7757a 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -349,7 +349,6 @@ static struct platform_driver btwilink_driver = {
.remove = bt_ti_remove,
.driver = {
.name = "btwilink",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 0bc8a6a6a148..2ff6dfd2d3f0 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -74,7 +74,7 @@ static int ath_wakeup_ar3k(struct tty_struct *tty)
status = tty->driver->ops->tiocmget(tty);
- /* Disable Automatic RTSCTS */
+ /* Enable Automatic RTSCTS */
ktermios.c_cflag |= CRTSCTS;
status = tty_set_termios(tty, &ktermios);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index a22838669b4e..ec0fa7732c0d 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -168,6 +168,27 @@ wakeup:
hci_uart_tx_wakeup(hu);
}
+static void h5_peer_reset(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+
+ BT_ERR("Peer device has reset");
+
+ h5->state = H5_UNINITIALIZED;
+
+ del_timer(&h5->timer);
+
+ skb_queue_purge(&h5->rel);
+ skb_queue_purge(&h5->unrel);
+ skb_queue_purge(&h5->unack);
+
+ h5->tx_seq = 0;
+ h5->tx_ack = 0;
+
+ /* Send reset request to upper stack */
+ hci_reset_dev(hu->hdev);
+}
+
static int h5_open(struct hci_uart *hu)
{
struct h5 *h5;
@@ -283,8 +304,12 @@ static void h5_handle_internal_rx(struct hci_uart *hu)
conf_req[2] = h5_cfg_field(h5);
if (memcmp(data, sync_req, 2) == 0) {
+ if (h5->state == H5_ACTIVE)
+ h5_peer_reset(hu);
h5_link_control(hu, sync_rsp, 2);
} else if (memcmp(data, sync_rsp, 2) == 0) {
+ if (h5->state == H5_ACTIVE)
+ h5_peer_reset(hu);
h5->state = H5_INITIALIZED;
h5_link_control(hu, conf_req, 3);
} else if (memcmp(data, conf_req, 2) == 0) {
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 46de8dc39eb4..738612c45266 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -389,7 +389,6 @@ static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = {
static struct platform_driver brcmstb_gisb_arb_driver = {
.driver = {
.name = "brcm-gisb-arb",
- .owner = THIS_MODULE,
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
},
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 75c9681f8021..0958b6981773 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -206,7 +206,6 @@ static int __init weim_probe(struct platform_device *pdev)
static struct platform_driver weim_driver = {
.driver = {
.name = "imx-weim",
- .owner = THIS_MODULE,
.of_match_table = weim_id_table,
},
};
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 5511f9814ddd..723ec06ad2c8 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -77,7 +77,6 @@ static struct platform_driver omap_ocp2scp_driver = {
.remove = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(omap_ocp2scp_id_table),
},
};
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index 17d86595951c..029bc73de001 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -358,7 +358,6 @@ static struct platform_driver omap_l3_driver = {
.probe = omap_l3_probe,
.driver = {
.name = "omap_l3_noc",
- .owner = THIS_MODULE,
.pm = L3_DEV_PM_OPS,
.of_match_table = of_match_ptr(l3_noc_match),
},
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9a024f899dd4..f3334829e55a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -153,7 +153,6 @@ static struct page *i8xx_alloc_pages(void)
__free_pages(page, 2);
return NULL;
}
- get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page;
}
@@ -164,7 +163,6 @@ static void i8xx_destroy_pages(struct page *page)
return;
set_pages_wb(page, 4);
- put_page(page);
__free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp);
}
@@ -300,7 +298,6 @@ static int intel_gtt_setup_scratch_page(void)
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
- get_page(page);
set_pages_uc(page, 1);
if (intel_private.needs_dmar) {
@@ -560,7 +557,6 @@ static void intel_gtt_teardown_scratch_page(void)
set_pages_wb(intel_private.scratch_page, 1);
pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
}
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index ebc4c73d8ca4..a7c5c59675f0 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -168,8 +168,8 @@ static int __init hangcheck_init(void)
printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
VERSION_STR, hangcheck_tick, hangcheck_margin);
hangcheck_tsc_margin =
- (unsigned long long)(hangcheck_margin + hangcheck_tick);
- hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ;
+ (unsigned long long)hangcheck_margin + hangcheck_tick;
+ hangcheck_tsc_margin *= TIMER_FREQ;
hangcheck_tsc = ktime_get_ns();
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 91a04ae8003c..de57b38809c7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -64,7 +64,7 @@ config HW_RANDOM_AMD
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on ARCH_AT91 && HAVE_CLK
+ depends on ARCH_AT91 && HAVE_CLK && OF
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 851bc7e20ad2..0fcc9e69a346 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -67,7 +67,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (IS_ERR(trng->clk))
return PTR_ERR(trng->clk);
- ret = clk_enable(trng->clk);
+ ret = clk_prepare_enable(trng->clk);
if (ret)
return ret;
@@ -95,7 +95,7 @@ static int atmel_trng_remove(struct platform_device *pdev)
hwrng_unregister(&trng->rng);
writel(TRNG_KEY, trng->base + TRNG_CR);
- clk_disable(trng->clk);
+ clk_disable_unprepare(trng->clk);
return 0;
}
@@ -105,7 +105,7 @@ static int atmel_trng_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- clk_disable(trng->clk);
+ clk_disable_unprepare(trng->clk);
return 0;
}
@@ -114,7 +114,7 @@ static int atmel_trng_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- return clk_enable(trng->clk);
+ return clk_prepare_enable(trng->clk);
}
static const struct dev_pm_ops atmel_trng_pm_ops = {
@@ -123,15 +123,21 @@ static const struct dev_pm_ops atmel_trng_pm_ops = {
};
#endif /* CONFIG_PM */
+static const struct of_device_id atmel_trng_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g45-trng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
+
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
.remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &atmel_trng_pm_ops,
#endif /* CONFIG_PM */
+ .of_match_table = atmel_trng_dt_ids,
},
};
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index e900961cdd2e..7192ec25f667 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -99,7 +99,6 @@ MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
static struct platform_driver bcm2835_rng_driver = {
.driver = {
.name = "bcm2835-rng",
- .owner = THIS_MODULE,
.of_match_table = bcm2835_rng_of_match,
},
.probe = bcm2835_rng_probe,
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index 36581ea562cb..ba6a65ac023b 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -162,7 +162,6 @@ static struct platform_driver bcm63xx_rng_driver = {
.remove = bcm63xx_rng_remove,
.driver = {
.name = "bcm63xx-rng",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aa30a25c8d49..1500cfd799a7 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -281,7 +281,6 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
char *buf)
{
int err;
- ssize_t ret = 0;
struct hwrng *rng;
err = mutex_lock_interruptible(&rng_mutex);
@@ -289,16 +288,13 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
return -ERESTARTSYS;
buf[0] = '\0';
list_for_each_entry(rng, &rng_list, list) {
- strncat(buf, rng->name, PAGE_SIZE - ret - 1);
- ret += strlen(rng->name);
- strncat(buf, " ", PAGE_SIZE - ret - 1);
- ret++;
+ strlcat(buf, rng->name, PAGE_SIZE);
+ strlcat(buf, " ", PAGE_SIZE);
}
- strncat(buf, "\n", PAGE_SIZE - ret - 1);
- ret++;
+ strlcat(buf, "\n", PAGE_SIZE);
mutex_unlock(&rng_mutex);
- return ret;
+ return strlen(buf);
}
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 993efd7f6c7e..fed0830bf724 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -169,7 +169,6 @@ static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
static struct platform_driver exynos_rng_driver = {
.driver = {
.name = "exynos-rng",
- .owner = THIS_MODULE,
.pm = &exynos_rng_pm_ops,
},
.probe = exynos_rng_probe,
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 148521e51dc6..cea1c703d62f 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -185,7 +185,6 @@ static struct platform_driver msm_rng_driver = {
.remove = msm_rng_remove,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(msm_rng_of_match),
}
};
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 6a86b6f56af2..6cbb72ec6013 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -206,7 +206,6 @@ static int __exit mxc_rnga_remove(struct platform_device *pdev)
static struct platform_driver mxc_rnga_driver = {
.driver = {
.name = "mxc_rnga",
- .owner = THIS_MODULE,
},
.remove = __exit_p(mxc_rnga_remove),
};
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 292a5889f675..843d6f6aee7a 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -750,7 +750,6 @@ MODULE_DEVICE_TABLE(of, n2rng_match);
static struct platform_driver n2rng_driver = {
.driver = {
.name = "n2rng",
- .owner = THIS_MODULE,
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index b5cc3420c659..be1c3f607398 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -117,7 +117,6 @@ static int __exit octeon_rng_remove(struct platform_device *pdev)
static struct platform_driver octeon_rng_driver = {
.driver = {
.name = "octeon_rng",
- .owner = THIS_MODULE,
},
.probe = octeon_rng_probe,
.remove = __exit_p(octeon_rng_remove),
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index f66ea258382f..d14dcf788f17 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -456,7 +456,6 @@ static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
static struct platform_driver omap_rng_driver = {
.driver = {
.name = "omap_rng",
- .owner = THIS_MODULE,
.pm = OMAP_RNG_PM,
.of_match_table = of_match_ptr(omap_rng_of_match),
},
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 6f2eaffed623..a405cdcd8dd2 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -126,7 +126,6 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
static struct platform_driver omap3_rom_rng_driver = {
.driver = {
.name = "omap3-rom-rng",
- .owner = THIS_MODULE,
},
.probe = omap3_rom_rng_probe,
.remove = omap3_rom_rng_remove,
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index c0347d1dded0..3eb7bdd7f93b 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -142,7 +142,6 @@ static struct of_device_id rng_match[] = {
static struct platform_driver rng_driver = {
.driver = {
.name = "pasemi-rng",
- .owner = THIS_MODULE,
.of_match_table = rng_match,
},
.probe = rng_probe,
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index 521f76b0934b..c85d31a5f9e3 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -133,7 +133,6 @@ static struct of_device_id ppc4xx_rng_match[] = {
static struct platform_driver ppc4xx_rng_driver = {
.driver = {
.name = MODULE_NAME,
- .owner = THIS_MODULE,
.of_match_table = ppc4xx_rng_match,
},
.probe = ppc4xx_rng_probe,
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index b6ab9ac3f34d..cf37db263ecd 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -200,7 +200,6 @@ MODULE_DEVICE_TABLE(of, timeriomem_rng_match);
static struct platform_driver timeriomem_rng_driver = {
.driver = {
.name = "timeriomem_rng",
- .owner = THIS_MODULE,
.of_match_table = timeriomem_rng_match,
},
.probe = timeriomem_rng_probe,
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index 09c5fbea2b93..a7b694913416 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -158,7 +158,6 @@ static int __exit tx4939_rng_remove(struct platform_device *dev)
static struct platform_driver tx4939_rng_driver = {
.driver = {
.name = "tx4939-rng",
- .owner = THIS_MODULE,
},
.remove = tx4939_rng_remove,
};
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 34174d01462e..e34a019eb930 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -298,7 +298,7 @@ static int i8k_get_temp(int sensor)
int temp;
#ifdef I8K_TEMPERATURE_BUG
- static int prev[4];
+ static int prev[4] = { I8K_MAX_TEMP+1, I8K_MAX_TEMP+1, I8K_MAX_TEMP+1, I8K_MAX_TEMP+1 };
#endif
regs.ebx = sensor & 0xff;
rc = i8k_smm(&regs);
@@ -317,10 +317,12 @@ static int i8k_get_temp(int sensor)
*/
if (temp > I8K_MAX_TEMP) {
temp = prev[sensor];
- prev[sensor] = I8K_MAX_TEMP;
+ prev[sensor] = I8K_MAX_TEMP+1;
} else {
prev[sensor] = temp;
}
+ if (temp > I8K_MAX_TEMP)
+ return -ERANGE;
#endif
return temp;
@@ -499,6 +501,8 @@ static ssize_t i8k_hwmon_show_temp(struct device *dev,
int temp;
temp = i8k_get_temp(index);
+ if (temp == -ERANGE)
+ return -EINVAL;
if (temp < 0)
return temp;
return sprintf(buf, "%d\n", temp * 1000);
@@ -610,17 +614,17 @@ static int __init i8k_init_hwmon(void)
/* CPU temperature attributes, if temperature reading is OK */
err = i8k_get_temp(0);
- if (err >= 0)
+ if (err >= 0 || err == -ERANGE)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1;
/* check for additional temperature sensors */
err = i8k_get_temp(1);
- if (err >= 0)
+ if (err >= 0 || err == -ERANGE)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2;
err = i8k_get_temp(2);
- if (err >= 0)
+ if (err >= 0 || err == -ERANGE)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3;
err = i8k_get_temp(3);
- if (err >= 0)
+ if (err >= 0 || err == -ERANGE)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
/* Left fan attributes, if left fan is present */
@@ -711,6 +715,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
.driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
},
{
+ .ident = "Dell Latitude E6440",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6440"),
+ },
+ .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_E6540],
+ },
+ {
.ident = "Dell Latitude E6540",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -788,6 +800,8 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
{ }
};
+MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+
/*
* Probe for the presence of a supported laptop.
*/
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index db1c9b7adaa6..6ed9e9fe5233 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -62,6 +62,20 @@ config IPMI_SI_PROBE_DEFAULTS
only be available on older systems if the "ipmi_si_intf.trydefaults=1"
boot argument is passed.
+config IPMI_SSIF
+ tristate 'IPMI SMBus handler (SSIF)'
+ select I2C
+ help
+ Provides a driver for a SMBus interface to a BMC, meaning that you
+ have a driver that must be accessed over an I2C bus instead of a
+ standard interface. This module requires I2C support.
+
+config IPMI_POWERNV
+ depends on PPC_POWERNV
+ tristate 'POWERNV (OPAL firmware) IPMI interface'
+ help
+ Provides a driver for OPAL firmware-based IPMI interfaces.
+
config IPMI_WATCHDOG
tristate 'IPMI Watchdog Timer'
help
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 16a93648d54e..f3ffde1f5f1f 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -7,5 +7,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
obj-$(CONFIG_IPMI_SI) += ipmi_si.o
+obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index f816211f062f..5fa83f751378 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -56,6 +56,8 @@ static int ipmi_init_msghandler(void);
static void smi_recv_tasklet(unsigned long);
static void handle_new_recv_msgs(ipmi_smi_t intf);
static void need_waiter(ipmi_smi_t intf);
+static int handle_one_recv_msg(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg);
static int initialized;
@@ -191,12 +193,12 @@ struct ipmi_proc_entry {
#endif
struct bmc_device {
- struct platform_device *dev;
+ struct platform_device pdev;
struct ipmi_device_id id;
unsigned char guid[16];
int guid_set;
-
- struct kref refcount;
+ char name[16];
+ struct kref usecount;
/* bmc device attributes */
struct device_attribute device_id_attr;
@@ -210,6 +212,7 @@ struct bmc_device {
struct device_attribute guid_attr;
struct device_attribute aux_firmware_rev_attr;
};
+#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
/*
* Various statistics for IPMI, these index stats[] in the ipmi_smi
@@ -323,6 +326,9 @@ struct ipmi_smi {
struct kref refcount;
+ /* Set when the interface is being unregistered. */
+ bool in_shutdown;
+
/* Used for a list of interfaces. */
struct list_head link;
@@ -341,7 +347,6 @@ struct ipmi_smi {
struct bmc_device *bmc;
char *my_dev_name;
- char *sysfs_name;
/*
* This is the lower-layer's sender routine. Note that you
@@ -377,11 +382,16 @@ struct ipmi_smi {
* periodic timer interrupt. The tasklet is for handling received
* messages directly from the handler.
*/
- spinlock_t waiting_msgs_lock;
- struct list_head waiting_msgs;
+ spinlock_t waiting_rcv_msgs_lock;
+ struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
struct tasklet_struct recv_tasklet;
+ spinlock_t xmit_msgs_lock;
+ struct list_head xmit_msgs;
+ struct ipmi_smi_msg *curr_msg;
+ struct list_head hp_xmit_msgs;
+
/*
* The list of command receivers that are registered for commands
* on this interface.
@@ -474,6 +484,18 @@ static DEFINE_MUTEX(smi_watchers_mutex);
#define ipmi_get_stat(intf, stat) \
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
+ "ACPI", "SMBIOS", "PCI",
+ "device-tree", "default" };
+
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
+{
+ if (src > SI_DEFAULT)
+ src = 0; /* Invalid */
+ return addr_src_to_str[src];
+}
+EXPORT_SYMBOL(ipmi_addr_src_to_str);
+
static int is_lan_addr(struct ipmi_addr *addr)
{
return addr->addr_type == IPMI_LAN_ADDR_TYPE;
@@ -517,7 +539,7 @@ static void clean_up_interface_data(ipmi_smi_t intf)
tasklet_kill(&intf->recv_tasklet);
- free_smi_msg_list(&intf->waiting_msgs);
+ free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
@@ -1473,6 +1495,30 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
smi_msg->msgid = msgid;
}
+static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
+ struct ipmi_smi_msg *smi_msg, int priority)
+{
+ int run_to_completion = intf->run_to_completion;
+ unsigned long flags;
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ if (intf->curr_msg) {
+ if (priority > 0)
+ list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
+ else
+ list_add_tail(&smi_msg->link, &intf->xmit_msgs);
+ smi_msg = NULL;
+ } else {
+ intf->curr_msg = smi_msg;
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
+ if (smi_msg)
+ handlers->sender(intf->send_info, smi_msg);
+}
+
/*
* Separate from ipmi_request so that the user does not have to be
* supplied in certain circumstances (mainly at panic time). If
@@ -1497,7 +1543,6 @@ static int i_ipmi_request(ipmi_user_t user,
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
unsigned long flags;
- struct ipmi_smi_handlers *handlers;
if (supplied_recv)
@@ -1520,8 +1565,7 @@ static int i_ipmi_request(ipmi_user_t user,
}
rcu_read_lock();
- handlers = intf->handlers;
- if (!handlers) {
+ if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
@@ -1856,7 +1900,7 @@ static int i_ipmi_request(ipmi_user_t user,
}
#endif
- handlers->sender(intf->send_info, smi_msg, priority);
+ smi_send(intf, intf->handlers, smi_msg, priority);
rcu_read_unlock();
return 0;
@@ -2153,7 +2197,7 @@ static void remove_proc_entries(ipmi_smi_t smi)
static int __find_bmc_guid(struct device *dev, void *data)
{
unsigned char *id = data;
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return memcmp(bmc->guid, id, 16) == 0;
}
@@ -2164,7 +2208,7 @@ static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
if (dev)
- return dev_get_drvdata(dev);
+ return to_bmc_device(dev);
else
return NULL;
}
@@ -2177,7 +2221,7 @@ struct prod_dev_id {
static int __find_bmc_prod_dev_id(struct device *dev, void *data)
{
struct prod_dev_id *id = data;
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return (bmc->id.product_id == id->product_id
&& bmc->id.device_id == id->device_id);
@@ -2195,7 +2239,7 @@ static struct bmc_device *ipmi_find_bmc_prod_dev_id(
dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
if (dev)
- return dev_get_drvdata(dev);
+ return to_bmc_device(dev);
else
return NULL;
}
@@ -2204,84 +2248,92 @@ static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 10, "%u\n", bmc->id.device_id);
}
+DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
-static ssize_t provides_dev_sdrs_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t provides_device_sdrs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 10, "%u\n",
(bmc->id.device_revision & 0x80) >> 7);
}
+DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 20, "%u\n",
bmc->id.device_revision & 0x0F);
}
+DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
-static ssize_t firmware_rev_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t firmware_revision_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
bmc->id.firmware_revision_2);
}
+DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 20, "%u.%u\n",
ipmi_version_major(&bmc->id),
ipmi_version_minor(&bmc->id));
}
+DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 10, "0x%02x\n",
bmc->id.additional_device_support);
}
+DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
}
+DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
}
+DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
bmc->id.aux_firmware_revision[3],
@@ -2289,174 +2341,96 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
bmc->id.aux_firmware_revision[1],
bmc->id.aux_firmware_revision[0]);
}
+DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 100, "%Lx%Lx\n",
(long long) bmc->guid[0],
(long long) bmc->guid[8]);
}
+DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+
+static struct attribute *bmc_dev_attrs[] = {
+ &dev_attr_device_id.attr,
+ &dev_attr_provides_device_sdrs.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_firmware_revision.attr,
+ &dev_attr_ipmi_version.attr,
+ &dev_attr_additional_device_support.attr,
+ &dev_attr_manufacturer_id.attr,
+ &dev_attr_product_id.attr,
+ NULL
+};
-static void remove_files(struct bmc_device *bmc)
-{
- if (!bmc->dev)
- return;
+static struct attribute_group bmc_dev_attr_group = {
+ .attrs = bmc_dev_attrs,
+};
- device_remove_file(&bmc->dev->dev,
- &bmc->device_id_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->provides_dev_sdrs_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->revision_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->firmware_rev_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->version_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->add_dev_support_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->manufacturer_id_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->product_id_attr);
+static const struct attribute_group *bmc_dev_attr_groups[] = {
+ &bmc_dev_attr_group,
+ NULL
+};
- if (bmc->id.aux_firmware_revision_set)
- device_remove_file(&bmc->dev->dev,
- &bmc->aux_firmware_rev_attr);
- if (bmc->guid_set)
- device_remove_file(&bmc->dev->dev,
- &bmc->guid_attr);
+static struct device_type bmc_device_type = {
+ .groups = bmc_dev_attr_groups,
+};
+
+static void
+release_bmc_device(struct device *dev)
+{
+ kfree(to_bmc_device(dev));
}
static void
cleanup_bmc_device(struct kref *ref)
{
- struct bmc_device *bmc;
+ struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
- bmc = container_of(ref, struct bmc_device, refcount);
+ if (bmc->id.aux_firmware_revision_set)
+ device_remove_file(&bmc->pdev.dev,
+ &bmc->aux_firmware_rev_attr);
+ if (bmc->guid_set)
+ device_remove_file(&bmc->pdev.dev,
+ &bmc->guid_attr);
- remove_files(bmc);
- platform_device_unregister(bmc->dev);
- kfree(bmc);
+ platform_device_unregister(&bmc->pdev);
}
static void ipmi_bmc_unregister(ipmi_smi_t intf)
{
struct bmc_device *bmc = intf->bmc;
- if (intf->sysfs_name) {
- sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
- kfree(intf->sysfs_name);
- intf->sysfs_name = NULL;
- }
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
if (intf->my_dev_name) {
- sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
+ sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
}
mutex_lock(&ipmidriver_mutex);
- kref_put(&bmc->refcount, cleanup_bmc_device);
+ kref_put(&bmc->usecount, cleanup_bmc_device);
intf->bmc = NULL;
mutex_unlock(&ipmidriver_mutex);
}
-static int create_files(struct bmc_device *bmc)
+static int create_bmc_files(struct bmc_device *bmc)
{
int err;
- bmc->device_id_attr.attr.name = "device_id";
- bmc->device_id_attr.attr.mode = S_IRUGO;
- bmc->device_id_attr.show = device_id_show;
- sysfs_attr_init(&bmc->device_id_attr.attr);
-
- bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
- bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
- bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
- sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
-
- bmc->revision_attr.attr.name = "revision";
- bmc->revision_attr.attr.mode = S_IRUGO;
- bmc->revision_attr.show = revision_show;
- sysfs_attr_init(&bmc->revision_attr.attr);
-
- bmc->firmware_rev_attr.attr.name = "firmware_revision";
- bmc->firmware_rev_attr.attr.mode = S_IRUGO;
- bmc->firmware_rev_attr.show = firmware_rev_show;
- sysfs_attr_init(&bmc->firmware_rev_attr.attr);
-
- bmc->version_attr.attr.name = "ipmi_version";
- bmc->version_attr.attr.mode = S_IRUGO;
- bmc->version_attr.show = ipmi_version_show;
- sysfs_attr_init(&bmc->version_attr.attr);
-
- bmc->add_dev_support_attr.attr.name = "additional_device_support";
- bmc->add_dev_support_attr.attr.mode = S_IRUGO;
- bmc->add_dev_support_attr.show = add_dev_support_show;
- sysfs_attr_init(&bmc->add_dev_support_attr.attr);
-
- bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
- bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
- bmc->manufacturer_id_attr.show = manufacturer_id_show;
- sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
-
- bmc->product_id_attr.attr.name = "product_id";
- bmc->product_id_attr.attr.mode = S_IRUGO;
- bmc->product_id_attr.show = product_id_show;
- sysfs_attr_init(&bmc->product_id_attr.attr);
-
- bmc->guid_attr.attr.name = "guid";
- bmc->guid_attr.attr.mode = S_IRUGO;
- bmc->guid_attr.show = guid_show;
- sysfs_attr_init(&bmc->guid_attr.attr);
-
- bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
- bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
- bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
- sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
-
- err = device_create_file(&bmc->dev->dev,
- &bmc->device_id_attr);
- if (err)
- goto out;
- err = device_create_file(&bmc->dev->dev,
- &bmc->provides_dev_sdrs_attr);
- if (err)
- goto out_devid;
- err = device_create_file(&bmc->dev->dev,
- &bmc->revision_attr);
- if (err)
- goto out_sdrs;
- err = device_create_file(&bmc->dev->dev,
- &bmc->firmware_rev_attr);
- if (err)
- goto out_rev;
- err = device_create_file(&bmc->dev->dev,
- &bmc->version_attr);
- if (err)
- goto out_firm;
- err = device_create_file(&bmc->dev->dev,
- &bmc->add_dev_support_attr);
- if (err)
- goto out_version;
- err = device_create_file(&bmc->dev->dev,
- &bmc->manufacturer_id_attr);
- if (err)
- goto out_add_dev;
- err = device_create_file(&bmc->dev->dev,
- &bmc->product_id_attr);
- if (err)
- goto out_manu;
if (bmc->id.aux_firmware_revision_set) {
- err = device_create_file(&bmc->dev->dev,
+ bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
+ err = device_create_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
if (err)
- goto out_prod_id;
+ goto out;
}
if (bmc->guid_set) {
- err = device_create_file(&bmc->dev->dev,
+ bmc->guid_attr.attr.name = "guid";
+ err = device_create_file(&bmc->pdev.dev,
&bmc->guid_attr);
if (err)
goto out_aux_firm;
@@ -2466,44 +2440,17 @@ static int create_files(struct bmc_device *bmc)
out_aux_firm:
if (bmc->id.aux_firmware_revision_set)
- device_remove_file(&bmc->dev->dev,
+ device_remove_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
-out_prod_id:
- device_remove_file(&bmc->dev->dev,
- &bmc->product_id_attr);
-out_manu:
- device_remove_file(&bmc->dev->dev,
- &bmc->manufacturer_id_attr);
-out_add_dev:
- device_remove_file(&bmc->dev->dev,
- &bmc->add_dev_support_attr);
-out_version:
- device_remove_file(&bmc->dev->dev,
- &bmc->version_attr);
-out_firm:
- device_remove_file(&bmc->dev->dev,
- &bmc->firmware_rev_attr);
-out_rev:
- device_remove_file(&bmc->dev->dev,
- &bmc->revision_attr);
-out_sdrs:
- device_remove_file(&bmc->dev->dev,
- &bmc->provides_dev_sdrs_attr);
-out_devid:
- device_remove_file(&bmc->dev->dev,
- &bmc->device_id_attr);
out:
return err;
}
-static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
- const char *sysfs_name)
+static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
{
int rv;
struct bmc_device *bmc = intf->bmc;
struct bmc_device *old_bmc;
- int size;
- char dummy[1];
mutex_lock(&ipmidriver_mutex);
@@ -2527,7 +2474,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
intf->bmc = old_bmc;
bmc = old_bmc;
- kref_get(&bmc->refcount);
+ kref_get(&bmc->usecount);
mutex_unlock(&ipmidriver_mutex);
printk(KERN_INFO
@@ -2537,12 +2484,12 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
bmc->id.product_id,
bmc->id.device_id);
} else {
- char name[14];
unsigned char orig_dev_id = bmc->id.device_id;
int warn_printed = 0;
- snprintf(name, sizeof(name),
+ snprintf(bmc->name, sizeof(bmc->name),
"ipmi_bmc.%4.4x", bmc->id.product_id);
+ bmc->pdev.name = bmc->name;
while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
bmc->id.product_id,
@@ -2566,23 +2513,16 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
}
}
- bmc->dev = platform_device_alloc(name, bmc->id.device_id);
- if (!bmc->dev) {
- mutex_unlock(&ipmidriver_mutex);
- printk(KERN_ERR
- "ipmi_msghandler:"
- " Unable to allocate platform device\n");
- return -ENOMEM;
- }
- bmc->dev->dev.driver = &ipmidriver.driver;
- dev_set_drvdata(&bmc->dev->dev, bmc);
- kref_init(&bmc->refcount);
+ bmc->pdev.dev.driver = &ipmidriver.driver;
+ bmc->pdev.id = bmc->id.device_id;
+ bmc->pdev.dev.release = release_bmc_device;
+ bmc->pdev.dev.type = &bmc_device_type;
+ kref_init(&bmc->usecount);
- rv = platform_device_add(bmc->dev);
+ rv = platform_device_register(&bmc->pdev);
mutex_unlock(&ipmidriver_mutex);
if (rv) {
- platform_device_put(bmc->dev);
- bmc->dev = NULL;
+ put_device(&bmc->pdev.dev);
printk(KERN_ERR
"ipmi_msghandler:"
" Unable to register bmc device: %d\n",
@@ -2594,10 +2534,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
return rv;
}
- rv = create_files(bmc);
+ rv = create_bmc_files(bmc);
if (rv) {
mutex_lock(&ipmidriver_mutex);
- platform_device_unregister(bmc->dev);
+ platform_device_unregister(&bmc->pdev);
mutex_unlock(&ipmidriver_mutex);
return rv;
@@ -2614,44 +2554,26 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
* create symlink from system interface device to bmc device
* and back.
*/
- intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
- if (!intf->sysfs_name) {
- rv = -ENOMEM;
- printk(KERN_ERR
- "ipmi_msghandler: allocate link to BMC: %d\n",
- rv);
- goto out_err;
- }
-
- rv = sysfs_create_link(&intf->si_dev->kobj,
- &bmc->dev->dev.kobj, intf->sysfs_name);
+ rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
if (rv) {
- kfree(intf->sysfs_name);
- intf->sysfs_name = NULL;
printk(KERN_ERR
"ipmi_msghandler: Unable to create bmc symlink: %d\n",
rv);
goto out_err;
}
- size = snprintf(dummy, 0, "ipmi%d", ifnum);
- intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
+ intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum);
if (!intf->my_dev_name) {
- kfree(intf->sysfs_name);
- intf->sysfs_name = NULL;
rv = -ENOMEM;
printk(KERN_ERR
"ipmi_msghandler: allocate link from BMC: %d\n",
rv);
goto out_err;
}
- snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
- rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
+ rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
intf->my_dev_name);
if (rv) {
- kfree(intf->sysfs_name);
- intf->sysfs_name = NULL;
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
printk(KERN_ERR
@@ -2850,7 +2772,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *si_dev,
- const char *sysfs_name,
unsigned char slave_addr)
{
int i, j;
@@ -2909,12 +2830,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
#ifdef CONFIG_PROC_FS
mutex_init(&intf->proc_entry_lock);
#endif
- spin_lock_init(&intf->waiting_msgs_lock);
- INIT_LIST_HEAD(&intf->waiting_msgs);
+ spin_lock_init(&intf->waiting_rcv_msgs_lock);
+ INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
tasklet_init(&intf->recv_tasklet,
smi_recv_tasklet,
(unsigned long) intf);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
+ spin_lock_init(&intf->xmit_msgs_lock);
+ INIT_LIST_HEAD(&intf->xmit_msgs);
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
spin_lock_init(&intf->events_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -2984,7 +2908,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
if (rv == 0)
rv = add_proc_entries(intf, i);
- rv = ipmi_bmc_register(intf, i, sysfs_name);
+ rv = ipmi_bmc_register(intf, i);
out:
if (rv) {
@@ -3014,12 +2938,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
}
EXPORT_SYMBOL(ipmi_register_smi);
+static void deliver_smi_err_response(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg,
+ unsigned char err)
+{
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = err;
+ msg->rsp_size = 3;
+ /* It's an error, so it will never requeue, no need to check return. */
+ handle_one_recv_msg(intf, msg);
+}
+
static void cleanup_smi_msgs(ipmi_smi_t intf)
{
int i;
struct seq_table *ent;
+ struct ipmi_smi_msg *msg;
+ struct list_head *entry;
+ struct list_head tmplist;
+
+ /* Clear out our transmit queues and hold the messages. */
+ INIT_LIST_HEAD(&tmplist);
+ list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
+ list_splice_tail(&intf->xmit_msgs, &tmplist);
+
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+ schedule_timeout(1);
+ }
/* No need for locks, the interface is down. */
+
+ /*
+ * Return errors for all pending messages in queue and in the
+ * tables waiting for remote responses.
+ */
+ while (!list_empty(&tmplist)) {
+ entry = tmplist.next;
+ list_del(entry);
+ msg = list_entry(entry, struct ipmi_smi_msg, link);
+ deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
+ }
+
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
ent = &(intf->seq_table[i]);
if (!ent->inuse)
@@ -3031,20 +2993,33 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
int ipmi_unregister_smi(ipmi_smi_t intf)
{
struct ipmi_smi_watcher *w;
- int intf_num = intf->intf_num;
+ int intf_num = intf->intf_num;
+ ipmi_user_t user;
ipmi_bmc_unregister(intf);
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
- intf->handlers = NULL;
+ intf->in_shutdown = true;
list_del_rcu(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
synchronize_rcu();
cleanup_smi_msgs(intf);
+ /* Clean up the effects of users on the lower-level software. */
+ mutex_lock(&ipmi_interfaces_mutex);
+ rcu_read_lock();
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ module_put(intf->handlers->owner);
+ if (intf->handlers->dec_usecount)
+ intf->handlers->dec_usecount(intf->send_info);
+ }
+ rcu_read_unlock();
+ intf->handlers = NULL;
+ mutex_unlock(&ipmi_interfaces_mutex);
+
remove_proc_entries(intf);
/*
@@ -3134,7 +3109,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
ipmi_user_t user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
struct ipmi_recv_msg *recv_msg;
- struct ipmi_smi_handlers *handlers;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -3188,9 +3162,8 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
}
#endif
rcu_read_lock();
- handlers = intf->handlers;
- if (handlers) {
- handlers->sender(intf->send_info, msg, 0);
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
/*
* We used the message, so return the value
* that causes it to not be freed or
@@ -3857,32 +3830,32 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- while (!list_empty(&intf->waiting_msgs)) {
- smi_msg = list_entry(intf->waiting_msgs.next,
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ while (!list_empty(&intf->waiting_rcv_msgs)) {
+ smi_msg = list_entry(intf->waiting_rcv_msgs.next,
struct ipmi_smi_msg, link);
- list_del(&smi_msg->link);
if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
rv = handle_one_recv_msg(intf, smi_msg);
if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- if (rv == 0) {
- /* Message handled */
- ipmi_free_smi_msg(smi_msg);
- } else if (rv < 0) {
- /* Fatal error on the message, del but don't free. */
- } else {
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ if (rv > 0) {
/*
* To preserve message order, quit if we
* can't handle a message.
*/
- list_add(&smi_msg->link, &intf->waiting_msgs);
break;
+ } else {
+ list_del(&smi_msg->link);
+ if (rv == 0)
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
+ /* If rv < 0, fatal error, del but don't free. */
}
}
if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
/*
* If the pretimout count is non-zero, decrement one from it and
@@ -3903,7 +3876,41 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
static void smi_recv_tasklet(unsigned long val)
{
- handle_new_recv_msgs((ipmi_smi_t) val);
+ unsigned long flags = 0; /* keep us warning-free. */
+ ipmi_smi_t intf = (ipmi_smi_t) val;
+ int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *newmsg = NULL;
+
+ /*
+ * Start the next message if available.
+ *
+ * Do this here, not in the actual receiver, because we may deadlock
+ * because the lower layer is allowed to hold locks while calling
+ * message delivery.
+ */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ if (intf->curr_msg == NULL && !intf->in_shutdown) {
+ struct list_head *entry = NULL;
+
+ /* Pick the high priority queue first. */
+ if (!list_empty(&intf->hp_xmit_msgs))
+ entry = intf->hp_xmit_msgs.next;
+ else if (!list_empty(&intf->xmit_msgs))
+ entry = intf->xmit_msgs.next;
+
+ if (entry) {
+ list_del(entry);
+ newmsg = list_entry(entry, struct ipmi_smi_msg, link);
+ intf->curr_msg = newmsg;
+ }
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ if (newmsg)
+ intf->handlers->sender(intf->send_info, newmsg);
+
+ handle_new_recv_msgs(intf);
}
/* Handle a new message from the lower layer. */
@@ -3911,13 +3918,16 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int run_to_completion;
-
+ int run_to_completion = intf->run_to_completion;
if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
+
+ if (intf->in_shutdown)
+ goto free_msg;
+
/*
* This is the local response to a command send, start
* the timer for these. The user_data will not be
@@ -3953,29 +3963,40 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
+free_msg:
ipmi_free_smi_msg(msg);
- goto out;
+ } else {
+ /*
+ * To preserve message order, we keep a queue and deliver from
+ * a tasklet.
+ */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
}
- /*
- * To preserve message order, if the list is not empty, we
- * tack this message onto the end of the list.
- */
- run_to_completion = intf->run_to_completion;
if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_add_tail(&msg->link, &intf->waiting_msgs);
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ if (msg == intf->curr_msg)
+ intf->curr_msg = NULL;
if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
- tasklet_schedule(&intf->recv_tasklet);
- out:
- return;
+ if (run_to_completion)
+ smi_recv_tasklet((unsigned long) intf);
+ else
+ tasklet_schedule(&intf->recv_tasklet);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
{
+ if (intf->in_shutdown)
+ return;
+
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
tasklet_schedule(&intf->recv_tasklet);
}
@@ -4017,7 +4038,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct ipmi_recv_msg *msg;
struct ipmi_smi_handlers *handlers;
- if (intf->intf_num == -1)
+ if (intf->in_shutdown)
return;
if (!ent->inuse)
@@ -4082,8 +4103,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
ipmi_inc_stat(intf,
retransmitted_ipmb_commands);
- intf->handlers->sender(intf->send_info,
- smi_msg, 0);
+ smi_send(intf, intf->handlers, smi_msg, 0);
} else
ipmi_free_smi_msg(smi_msg);
@@ -4145,15 +4165,12 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
static void ipmi_request_event(ipmi_smi_t intf)
{
- struct ipmi_smi_handlers *handlers;
-
/* No event requests when in maintenance mode. */
if (intf->maintenance_mode_enable)
return;
- handlers = intf->handlers;
- if (handlers)
- handlers->request_events(intf->send_info);
+ if (!intf->in_shutdown)
+ intf->handlers->request_events(intf->send_info);
}
static struct timer_list ipmi_timer;
@@ -4548,6 +4565,7 @@ static int ipmi_init_msghandler(void)
proc_ipmi_root = proc_mkdir("ipmi", NULL);
if (!proc_ipmi_root) {
printk(KERN_ERR PFX "Unable to create IPMI proc dir");
+ driver_unregister(&ipmidriver.driver);
return -ENOMEM;
}
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
new file mode 100644
index 000000000000..79524ed2a3cb
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -0,0 +1,310 @@
+/*
+ * PowerNV OPAL IPMI driver
+ *
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#define pr_fmt(fmt) "ipmi-powernv: " fmt
+
+#include <linux/ipmi_smi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <asm/opal.h>
+
+
+struct ipmi_smi_powernv {
+ u64 interface_id;
+ struct ipmi_device_id ipmi_id;
+ ipmi_smi_t intf;
+ u64 event;
+ struct notifier_block event_nb;
+
+ /**
+ * We assume that there can only be one outstanding request, so
+ * keep the pending message in cur_msg. We protect this from concurrent
+ * updates through send & recv calls, (and consequently opal_msg, which
+ * is in-use when cur_msg is set) with msg_lock
+ */
+ spinlock_t msg_lock;
+ struct ipmi_smi_msg *cur_msg;
+ struct opal_ipmi_msg *opal_msg;
+};
+
+static int ipmi_powernv_start_processing(void *send_info, ipmi_smi_t intf)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+
+ smi->intf = intf;
+ return 0;
+}
+
+static void send_error_reply(struct ipmi_smi_powernv *smi,
+ struct ipmi_smi_msg *msg, u8 completion_code)
+{
+ msg->rsp[0] = msg->data[0] | 0x4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = completion_code;
+ msg->rsp_size = 3;
+ ipmi_smi_msg_received(smi->intf, msg);
+}
+
+static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+ struct opal_ipmi_msg *opal_msg;
+ unsigned long flags;
+ int comp, rc;
+ size_t size;
+
+ /* ensure data_len will fit in the opal_ipmi_msg buffer... */
+ if (msg->data_size > IPMI_MAX_MSG_LENGTH) {
+ comp = IPMI_REQ_LEN_EXCEEDED_ERR;
+ goto err;
+ }
+
+ /* ... and that we at least have netfn and cmd bytes */
+ if (msg->data_size < 2) {
+ comp = IPMI_REQ_LEN_INVALID_ERR;
+ goto err;
+ }
+
+ spin_lock_irqsave(&smi->msg_lock, flags);
+
+ if (smi->cur_msg) {
+ comp = IPMI_NODE_BUSY_ERR;
+ goto err_unlock;
+ }
+
+ /* format our data for the OPAL API */
+ opal_msg = smi->opal_msg;
+ opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1;
+ opal_msg->netfn = msg->data[0];
+ opal_msg->cmd = msg->data[1];
+ if (msg->data_size > 2)
+ memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2);
+
+ /* data_size already includes the netfn and cmd bytes */
+ size = sizeof(*opal_msg) + msg->data_size - 2;
+
+ pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__,
+ smi->interface_id, opal_msg, size);
+ rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
+ pr_devel("%s: -> %d\n", __func__, rc);
+
+ if (!rc) {
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return;
+ }
+
+ comp = IPMI_ERR_UNSPECIFIED;
+err_unlock:
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+err:
+ send_error_reply(smi, msg, comp);
+}
+
+static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
+{
+ struct opal_ipmi_msg *opal_msg;
+ struct ipmi_smi_msg *msg;
+ unsigned long flags;
+ uint64_t size;
+ int rc;
+
+ pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__,
+ smi->interface_id);
+
+ spin_lock_irqsave(&smi->msg_lock, flags);
+
+ if (!smi->cur_msg) {
+ pr_warn("no current message?\n");
+ return 0;
+ }
+
+ msg = smi->cur_msg;
+ opal_msg = smi->opal_msg;
+
+ size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH);
+
+ rc = opal_ipmi_recv(smi->interface_id,
+ opal_msg,
+ &size);
+ size = be64_to_cpu(size);
+ pr_devel("%s: -> %d (size %lld)\n", __func__,
+ rc, rc == 0 ? size : 0);
+ if (rc) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ ipmi_free_smi_msg(msg);
+ return 0;
+ }
+
+ if (size < sizeof(*opal_msg)) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("unexpected IPMI message size %lld\n", size);
+ return 0;
+ }
+
+ if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("unexpected IPMI message format (version %d)\n",
+ opal_msg->version);
+ return 0;
+ }
+
+ msg->rsp[0] = opal_msg->netfn;
+ msg->rsp[1] = opal_msg->cmd;
+ if (size > sizeof(*opal_msg))
+ memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg));
+ msg->rsp_size = 2 + size - sizeof(*opal_msg);
+
+ smi->cur_msg = NULL;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ ipmi_smi_msg_received(smi->intf, msg);
+ return 0;
+}
+
+static void ipmi_powernv_request_events(void *send_info)
+{
+}
+
+static void ipmi_powernv_set_run_to_completion(void *send_info,
+ bool run_to_completion)
+{
+}
+
+static void ipmi_powernv_poll(void *send_info)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+
+ ipmi_powernv_recv(smi);
+}
+
+static struct ipmi_smi_handlers ipmi_powernv_smi_handlers = {
+ .owner = THIS_MODULE,
+ .start_processing = ipmi_powernv_start_processing,
+ .sender = ipmi_powernv_send,
+ .request_events = ipmi_powernv_request_events,
+ .set_run_to_completion = ipmi_powernv_set_run_to_completion,
+ .poll = ipmi_powernv_poll,
+};
+
+static int ipmi_opal_event(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ struct ipmi_smi_powernv *smi = container_of(nb,
+ struct ipmi_smi_powernv, event_nb);
+
+ if (events & smi->event)
+ ipmi_powernv_recv(smi);
+ return 0;
+}
+
+static int ipmi_powernv_probe(struct platform_device *pdev)
+{
+ struct ipmi_smi_powernv *ipmi;
+ struct device *dev;
+ u32 prop;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+
+ ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL);
+ if (!ipmi)
+ return -ENOMEM;
+
+ spin_lock_init(&ipmi->msg_lock);
+
+ rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id",
+ &prop);
+ if (rc) {
+ dev_warn(dev, "No interface ID property\n");
+ goto err_free;
+ }
+ ipmi->interface_id = prop;
+
+ rc = of_property_read_u32(dev->of_node, "interrupts", &prop);
+ if (rc) {
+ dev_warn(dev, "No interrupts property\n");
+ goto err_free;
+ }
+
+ ipmi->event = 1ull << prop;
+ ipmi->event_nb.notifier_call = ipmi_opal_event;
+
+ rc = opal_notifier_register(&ipmi->event_nb);
+ if (rc) {
+ dev_warn(dev, "OPAL notifier registration failed (%d)\n", rc);
+ goto err_free;
+ }
+
+ ipmi->opal_msg = devm_kmalloc(dev,
+ sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH,
+ GFP_KERNEL);
+ if (!ipmi->opal_msg) {
+ rc = -ENOMEM;
+ goto err_unregister;
+ }
+
+ /* todo: query actual ipmi_device_id */
+ rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi,
+ &ipmi->ipmi_id, dev, 0);
+ if (rc) {
+ dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
+ goto err_free_msg;
+ }
+
+ dev_set_drvdata(dev, ipmi);
+ return 0;
+
+err_free_msg:
+ devm_kfree(dev, ipmi->opal_msg);
+err_unregister:
+ opal_notifier_unregister(&ipmi->event_nb);
+err_free:
+ devm_kfree(dev, ipmi);
+ return rc;
+}
+
+static int ipmi_powernv_remove(struct platform_device *pdev)
+{
+ struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
+
+ ipmi_unregister_smi(smi->intf);
+ opal_notifier_unregister(&smi->event_nb);
+ return 0;
+}
+
+static const struct of_device_id ipmi_powernv_match[] = {
+ { .compatible = "ibm,opal-ipmi" },
+ { },
+};
+
+
+static struct platform_driver powernv_ipmi_driver = {
+ .driver = {
+ .name = "ipmi-powernv",
+ .owner = THIS_MODULE,
+ .of_match_table = ipmi_powernv_match,
+ },
+ .probe = ipmi_powernv_probe,
+ .remove = ipmi_powernv_remove,
+};
+
+
+module_platform_driver(powernv_ipmi_driver);
+
+MODULE_DEVICE_TABLE(of, ipmi_powernv_match);
+MODULE_DESCRIPTION("powernv IPMI driver");
+MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5c4e1f625bbb..967b73aa4e66 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -92,12 +92,9 @@ enum si_intf_state {
SI_GETTING_FLAGS,
SI_GETTING_EVENTS,
SI_CLEARING_FLAGS,
- SI_CLEARING_FLAGS_THEN_SET_IRQ,
SI_GETTING_MESSAGES,
- SI_ENABLE_INTERRUPTS1,
- SI_ENABLE_INTERRUPTS2,
- SI_DISABLE_INTERRUPTS1,
- SI_DISABLE_INTERRUPTS2
+ SI_CHECKING_ENABLES,
+ SI_SETTING_ENABLES
/* FIXME - add watchdog stuff. */
};
@@ -111,10 +108,6 @@ enum si_type {
};
static char *si_to_str[] = { "kcs", "smic", "bt" };
-static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
- "ACPI", "SMBIOS", "PCI",
- "device-tree", "default" };
-
#define DEVICE_NAME "ipmi_si"
static struct platform_driver ipmi_driver;
@@ -174,8 +167,7 @@ struct smi_info {
struct si_sm_handlers *handlers;
enum si_type si_type;
spinlock_t si_lock;
- struct list_head xmit_msgs;
- struct list_head hp_xmit_msgs;
+ struct ipmi_smi_msg *waiting_msg;
struct ipmi_smi_msg *curr_msg;
enum si_intf_state si_state;
@@ -254,9 +246,6 @@ struct smi_info {
/* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies;
- /* Used to gracefully stop the timer without race conditions. */
- atomic_t stop_operation;
-
/* Are we waiting for the events, pretimeouts, received msgs? */
atomic_t need_watch;
@@ -268,6 +257,16 @@ struct smi_info {
*/
bool interrupt_disabled;
+ /*
+ * Does the BMC support events?
+ */
+ bool supports_event_msg_buff;
+
+ /*
+ * Did we get an attention that we did not handle?
+ */
+ bool got_attn;
+
/* From the get device id response... */
struct ipmi_device_id device_id;
@@ -332,7 +331,10 @@ static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
/* Deliver the message to the upper layer. */
- ipmi_smi_msg_received(smi_info->intf, msg);
+ if (smi_info->intf)
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ else
+ ipmi_free_smi_msg(msg);
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -356,28 +358,18 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
int rv;
- struct list_head *entry = NULL;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
- /* Pick the high priority queue first. */
- if (!list_empty(&(smi_info->hp_xmit_msgs))) {
- entry = smi_info->hp_xmit_msgs.next;
- } else if (!list_empty(&(smi_info->xmit_msgs))) {
- entry = smi_info->xmit_msgs.next;
- }
-
- if (!entry) {
+ if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
rv = SI_SM_IDLE;
} else {
int err;
- list_del(entry);
- smi_info->curr_msg = list_entry(entry,
- struct ipmi_smi_msg,
- link);
+ smi_info->curr_msg = smi_info->waiting_msg;
+ smi_info->waiting_msg = NULL;
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
@@ -401,30 +393,15 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
return rv;
}
-static void start_enable_irq(struct smi_info *smi_info)
+static void start_check_enables(struct smi_info *smi_info)
{
unsigned char msg[2];
- /*
- * If we are enabling interrupts, we have to tell the
- * BMC to use them.
- */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
- smi_info->si_state = SI_ENABLE_INTERRUPTS1;
-}
-
-static void start_disable_irq(struct smi_info *smi_info)
-{
- unsigned char msg[2];
-
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
-
- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
- smi_info->si_state = SI_DISABLE_INTERRUPTS1;
+ smi_info->si_state = SI_CHECKING_ENABLES;
}
static void start_clear_flags(struct smi_info *smi_info)
@@ -440,6 +417,32 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void start_getting_msg_queue(struct smi_info *smi_info)
+{
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_MESSAGES;
+}
+
+static void start_getting_events(struct smi_info *smi_info)
+{
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
+}
+
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
smi_info->last_timeout_jiffies = jiffies;
@@ -453,22 +456,45 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
* polled until we can allocate some memory. Once we have some
* memory, we will re-enable the interrupt.
*/
-static inline void disable_si_irq(struct smi_info *smi_info)
+static inline bool disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
- start_disable_irq(smi_info);
smi_info->interrupt_disabled = true;
- if (!atomic_read(&smi_info->stop_operation))
- smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+ start_check_enables(smi_info);
+ return true;
}
+ return false;
}
-static inline void enable_si_irq(struct smi_info *smi_info)
+static inline bool enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
- start_enable_irq(smi_info);
smi_info->interrupt_disabled = false;
+ start_check_enables(smi_info);
+ return true;
}
+ return false;
+}
+
+/*
+ * Allocate a message. If unable to allocate, start the interrupt
+ * disable process and return NULL. If able to allocate but
+ * interrupts are disabled, free the message and return NULL after
+ * starting the interrupt enable process.
+ */
+static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ if (!disable_si_irq(smi_info))
+ smi_info->si_state = SI_NORMAL;
+ } else if (enable_si_irq(smi_info)) {
+ ipmi_free_smi_msg(msg);
+ msg = NULL;
+ }
+ return msg;
}
static void handle_flags(struct smi_info *smi_info)
@@ -480,45 +506,22 @@ static void handle_flags(struct smi_info *smi_info)
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
- ipmi_smi_watchdog_pretimeout(smi_info->intf);
+ if (smi_info->intf)
+ ipmi_smi_watchdog_pretimeout(smi_info->intf);
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
- smi_info->curr_msg = ipmi_alloc_smi_msg();
- if (!smi_info->curr_msg) {
- disable_si_irq(smi_info);
- smi_info->si_state = SI_NORMAL;
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
return;
- }
- enable_si_irq(smi_info);
-
- smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
- smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
- smi_info->curr_msg->data_size = 2;
- smi_info->handlers->start_transaction(
- smi_info->si_sm,
- smi_info->curr_msg->data,
- smi_info->curr_msg->data_size);
- smi_info->si_state = SI_GETTING_MESSAGES;
+ start_getting_msg_queue(smi_info);
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
/* Events available. */
- smi_info->curr_msg = ipmi_alloc_smi_msg();
- if (!smi_info->curr_msg) {
- disable_si_irq(smi_info);
- smi_info->si_state = SI_NORMAL;
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
return;
- }
- enable_si_irq(smi_info);
-
- smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
- smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
- smi_info->curr_msg->data_size = 2;
- smi_info->handlers->start_transaction(
- smi_info->si_sm,
- smi_info->curr_msg->data,
- smi_info->curr_msg->data_size);
- smi_info->si_state = SI_GETTING_EVENTS;
+ start_getting_events(smi_info);
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
smi_info->oem_data_avail_handler) {
if (smi_info->oem_data_avail_handler(smi_info))
@@ -527,6 +530,55 @@ static void handle_flags(struct smi_info *smi_info)
smi_info->si_state = SI_NORMAL;
}
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+ IPMI_BMC_EVT_MSG_INTR)
+
+static u8 current_global_enables(struct smi_info *smi_info, u8 base,
+ bool *irq_on)
+{
+ u8 enables = 0;
+
+ if (smi_info->supports_event_msg_buff)
+ enables |= IPMI_BMC_EVT_MSG_BUFF;
+ else
+ enables &= ~IPMI_BMC_EVT_MSG_BUFF;
+
+ if (smi_info->irq && !smi_info->interrupt_disabled)
+ enables |= IPMI_BMC_RCV_MSG_INTR;
+ else
+ enables &= ~IPMI_BMC_RCV_MSG_INTR;
+
+ if (smi_info->supports_event_msg_buff &&
+ smi_info->irq && !smi_info->interrupt_disabled)
+
+ enables |= IPMI_BMC_EVT_MSG_INTR;
+ else
+ enables &= ~IPMI_BMC_EVT_MSG_INTR;
+
+ *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
+
+ return enables;
+}
+
+static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
+{
+ u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
+
+ irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
+
+ if ((bool)irqstate == irq_on)
+ return;
+
+ if (irq_on)
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+ else
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
+}
+
static void handle_transaction_done(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
@@ -581,7 +633,6 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
case SI_CLEARING_FLAGS:
- case SI_CLEARING_FLAGS_THEN_SET_IRQ:
{
unsigned char msg[3];
@@ -592,10 +643,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
dev_warn(smi_info->dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
- if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
- start_enable_irq(smi_info);
- else
- smi_info->si_state = SI_NORMAL;
+ smi_info->si_state = SI_NORMAL;
break;
}
@@ -675,9 +723,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
break;
}
- case SI_ENABLE_INTERRUPTS1:
+ case SI_CHECKING_ENABLES:
{
unsigned char msg[4];
+ u8 enables;
+ bool irq_on;
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
@@ -687,70 +737,53 @@ static void handle_transaction_done(struct smi_info *smi_info)
dev_warn(smi_info->dev,
"Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
- } else {
+ break;
+ }
+ enables = current_global_enables(smi_info, 0, &irq_on);
+ if (smi_info->si_type == SI_BT)
+ /* BT has its own interrupt enable bit. */
+ check_bt_irq(smi_info, irq_on);
+ if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
+ /* Enables are not correct, fix them. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
- msg[2] = (msg[3] |
- IPMI_BMC_RCV_MSG_INTR |
- IPMI_BMC_EVT_MSG_INTR);
+ msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
smi_info->handlers->start_transaction(
smi_info->si_sm, msg, 3);
- smi_info->si_state = SI_ENABLE_INTERRUPTS2;
+ smi_info->si_state = SI_SETTING_ENABLES;
+ } else if (smi_info->supports_event_msg_buff) {
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ start_getting_msg_queue(smi_info);
+ } else {
+ smi_info->si_state = SI_NORMAL;
}
break;
}
- case SI_ENABLE_INTERRUPTS2:
+ case SI_SETTING_ENABLES:
{
unsigned char msg[4];
- /* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
+ if (msg[2] != 0)
dev_warn(smi_info->dev,
- "Couldn't set irq info: %x.\n", msg[2]);
- dev_warn(smi_info->dev,
- "Maybe ok, but ipmi might run very slowly.\n");
- } else
- smi_info->interrupt_disabled = false;
- smi_info->si_state = SI_NORMAL;
- break;
- }
-
- case SI_DISABLE_INTERRUPTS1:
- {
- unsigned char msg[4];
+ "Could not set the global enables: 0x%x.\n",
+ msg[2]);
- /* We got the flags from the SMI, now handle them. */
- smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
- dev_warn(smi_info->dev, "Could not disable interrupts"
- ", failed get.\n");
- smi_info->si_state = SI_NORMAL;
+ if (smi_info->supports_event_msg_buff) {
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ start_getting_msg_queue(smi_info);
} else {
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
- msg[2] = (msg[3] &
- ~(IPMI_BMC_RCV_MSG_INTR |
- IPMI_BMC_EVT_MSG_INTR));
- smi_info->handlers->start_transaction(
- smi_info->si_sm, msg, 3);
- smi_info->si_state = SI_DISABLE_INTERRUPTS2;
- }
- break;
- }
-
- case SI_DISABLE_INTERRUPTS2:
- {
- unsigned char msg[4];
-
- /* We got the flags from the SMI, now handle them. */
- smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
- dev_warn(smi_info->dev, "Could not disable interrupts"
- ", failed set.\n");
+ smi_info->si_state = SI_NORMAL;
}
- smi_info->si_state = SI_NORMAL;
break;
}
}
@@ -808,25 +841,35 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
* We prefer handling attn over new messages. But don't do
* this if there is not yet an upper layer to handle anything.
*/
- if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
+ if (likely(smi_info->intf) &&
+ (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) {
unsigned char msg[2];
- smi_inc_stat(smi_info, attentions);
+ if (smi_info->si_state != SI_NORMAL) {
+ /*
+ * We got an ATTN, but we are doing something else.
+ * Handle the ATTN later.
+ */
+ smi_info->got_attn = true;
+ } else {
+ smi_info->got_attn = false;
+ smi_inc_stat(smi_info, attentions);
- /*
- * Got a attn, send down a get message flags to see
- * what's causing it. It would be better to handle
- * this in the upper layer, but due to the way
- * interrupts work with the SMI, that's not really
- * possible.
- */
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+ /*
+ * Got a attn, send down a get message flags to see
+ * what's causing it. It would be better to handle
+ * this in the upper layer, but due to the way
+ * interrupts work with the SMI, that's not really
+ * possible.
+ */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
- smi_info->handlers->start_transaction(
- smi_info->si_sm, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
- goto restart;
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+ goto restart;
+ }
}
/* If we are currently idle, try to start the next message. */
@@ -846,19 +889,21 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
*/
atomic_set(&smi_info->req_events, 0);
- smi_info->curr_msg = ipmi_alloc_smi_msg();
- if (!smi_info->curr_msg)
- goto out;
-
- smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
- smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
- smi_info->curr_msg->data_size = 2;
+ /*
+ * Take this opportunity to check the interrupt and
+ * message enable state for the BMC. The BMC can be
+ * asynchronously reset, and may thus get interrupts
+ * disable and messages disabled.
+ */
+ if (smi_info->supports_event_msg_buff || smi_info->irq) {
+ start_check_enables(smi_info);
+ } else {
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+ goto out;
- smi_info->handlers->start_transaction(
- smi_info->si_sm,
- smi_info->curr_msg->data,
- smi_info->curr_msg->data_size);
- smi_info->si_state = SI_GETTING_EVENTS;
+ start_getting_events(smi_info);
+ }
goto restart;
}
out:
@@ -879,8 +924,7 @@ static void check_start_timer_thread(struct smi_info *smi_info)
}
static void sender(void *send_info,
- struct ipmi_smi_msg *msg,
- int priority)
+ struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
@@ -889,14 +933,8 @@ static void sender(void *send_info,
struct timeval t;
#endif
- if (atomic_read(&smi_info->stop_operation)) {
- msg->rsp[0] = msg->data[0] | 4;
- msg->rsp[1] = msg->data[1];
- msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
- msg->rsp_size = 3;
- deliver_recv_msg(smi_info, msg);
- return;
- }
+ BUG_ON(smi_info->waiting_msg);
+ smi_info->waiting_msg = msg;
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
@@ -905,16 +943,16 @@ static void sender(void *send_info,
if (smi_info->run_to_completion) {
/*
- * If we are running to completion, then throw it in
- * the list and run transactions until everything is
- * clear. Priority doesn't matter here.
+ * If we are running to completion, start it and run
+ * transactions until everything is clear.
*/
+ smi_info->curr_msg = smi_info->waiting_msg;
+ smi_info->waiting_msg = NULL;
/*
* Run to completion means we are single-threaded, no
* need for locks.
*/
- list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
result = smi_event_handler(smi_info, 0);
while (result != SI_SM_IDLE) {
@@ -926,11 +964,6 @@ static void sender(void *send_info,
}
spin_lock_irqsave(&smi_info->si_lock, flags);
- if (priority > 0)
- list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
- else
- list_add_tail(&msg->link, &smi_info->xmit_msgs);
-
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
@@ -1068,8 +1101,7 @@ static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
- if (atomic_read(&smi_info->stop_operation) ||
- !smi_info->has_event_buffer)
+ if (!smi_info->has_event_buffer)
return;
atomic_set(&smi_info->req_events, 1);
@@ -1697,7 +1729,7 @@ static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
}
*s = '\0';
s++;
- for (i = 0; hotmod_ops[i].name; i++) {
+ for (i = 0; v[i].name; i++) {
if (strcmp(*curr, v[i].name) == 0) {
*val = v[i].val;
*curr = s;
@@ -2133,6 +2165,9 @@ static int try_init_spmi(struct SPMITable *spmi)
case 3: /* BT */
info->si_type = SI_BT;
break;
+ case 4: /* SSIF, just ignore */
+ kfree(info);
+ return -EIO;
default:
printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
spmi->InterfaceType);
@@ -2250,6 +2285,8 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
case 3:
info->si_type = SI_BT;
break;
+ case 4: /* SSIF, just ignore */
+ goto err_free;
default:
dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
goto err_free;
@@ -2751,7 +2788,6 @@ static struct of_device_id ipmi_match[] =
static struct platform_driver ipmi_driver = {
.driver = {
.name = DEVICE_NAME,
- .owner = THIS_MODULE,
.of_match_table = ipmi_match,
},
.probe = ipmi_probe,
@@ -2913,9 +2949,11 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
goto out;
}
- if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
/* buffer is already enabled, nothing to do. */
+ smi_info->supports_event_msg_buff = true;
goto out;
+ }
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
@@ -2948,6 +2986,9 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
* that the event buffer is not supported.
*/
rv = -ENOENT;
+ else
+ smi_info->supports_event_msg_buff = true;
+
out:
kfree(resp);
return rv;
@@ -3188,15 +3229,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
{
- if (smi_info->intf) {
- /*
- * The timer and thread are only running if the
- * interface has been started up and registered.
- */
- if (smi_info->thread != NULL)
- kthread_stop(smi_info->thread);
+ if (smi_info->thread != NULL)
+ kthread_stop(smi_info->thread);
+ if (smi_info->timer_running)
del_timer_sync(&smi_info->si_timer);
- }
}
static struct ipmi_default_vals
@@ -3274,8 +3310,8 @@ static int add_smi(struct smi_info *new_smi)
int rv = 0;
printk(KERN_INFO PFX "Adding %s-specified %s state machine",
- ipmi_addr_src_to_str[new_smi->addr_source],
- si_to_str[new_smi->si_type]);
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
printk(KERN_CONT " duplicate interface\n");
@@ -3305,7 +3341,7 @@ static int try_smi_init(struct smi_info *new_smi)
printk(KERN_INFO PFX "Trying %s-specified %s state"
" machine at %s address 0x%lx, slave address 0x%x,"
" irq %d\n",
- ipmi_addr_src_to_str[new_smi->addr_source],
+ ipmi_addr_src_to_str(new_smi->addr_source),
si_to_str[new_smi->si_type],
addr_space_to_str[new_smi->io.addr_type],
new_smi->io.addr_data,
@@ -3371,8 +3407,7 @@ static int try_smi_init(struct smi_info *new_smi)
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
- INIT_LIST_HEAD(&(new_smi->xmit_msgs));
- INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
+ new_smi->waiting_msg = NULL;
new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0);
new_smi->run_to_completion = false;
@@ -3380,7 +3415,6 @@ static int try_smi_init(struct smi_info *new_smi)
atomic_set(&new_smi->stats[i], 0);
new_smi->interrupt_disabled = true;
- atomic_set(&new_smi->stop_operation, 0);
atomic_set(&new_smi->need_watch, 0);
new_smi->intf_num = smi_num;
smi_num++;
@@ -3394,9 +3428,15 @@ static int try_smi_init(struct smi_info *new_smi)
* timer to avoid racing with the timer.
*/
start_clear_flags(new_smi);
- /* IRQ is defined to be set when non-zero. */
- if (new_smi->irq)
- new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
+
+ /*
+ * IRQ is defined to be set when non-zero. req_events will
+ * cause a global flags check that will enable interrupts.
+ */
+ if (new_smi->irq) {
+ new_smi->interrupt_disabled = false;
+ atomic_set(&new_smi->req_events, 1);
+ }
if (!new_smi->dev) {
/*
@@ -3428,7 +3468,6 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi,
&new_smi->device_id,
new_smi->dev,
- "bmc",
new_smi->slave_addr);
if (rv) {
dev_err(new_smi->dev, "Unable to register device: error %d\n",
@@ -3466,15 +3505,15 @@ static int try_smi_init(struct smi_info *new_smi)
return 0;
out_err_stop_timer:
- atomic_inc(&new_smi->stop_operation);
wait_for_timer_and_thread(new_smi);
out_err:
new_smi->interrupt_disabled = true;
if (new_smi->intf) {
- ipmi_unregister_smi(new_smi->intf);
+ ipmi_smi_t intf = new_smi->intf;
new_smi->intf = NULL;
+ ipmi_unregister_smi(intf);
}
if (new_smi->irq_cleanup) {
@@ -3653,60 +3692,49 @@ module_init(init_ipmi_si);
static void cleanup_one_si(struct smi_info *to_clean)
{
int rv = 0;
- unsigned long flags;
if (!to_clean)
return;
+ if (to_clean->intf) {
+ ipmi_smi_t intf = to_clean->intf;
+
+ to_clean->intf = NULL;
+ rv = ipmi_unregister_smi(intf);
+ if (rv) {
+ pr_err(PFX "Unable to unregister device: errno=%d\n",
+ rv);
+ }
+ }
+
if (to_clean->dev)
dev_set_drvdata(to_clean->dev, NULL);
list_del(&to_clean->link);
- /* Tell the driver that we are shutting down. */
- atomic_inc(&to_clean->stop_operation);
-
/*
- * Make sure the timer and thread are stopped and will not run
- * again.
+ * Make sure that interrupts, the timer and the thread are
+ * stopped and will not run again.
*/
+ if (to_clean->irq_cleanup)
+ to_clean->irq_cleanup(to_clean);
wait_for_timer_and_thread(to_clean);
/*
* Timeouts are stopped, now make sure the interrupts are off
- * for the device. A little tricky with locks to make sure
- * there are no races.
+ * in the BMC. Note that timers and CPU interrupts are off,
+ * so no need for locks.
*/
- spin_lock_irqsave(&to_clean->si_lock, flags);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
- spin_unlock_irqrestore(&to_clean->si_lock, flags);
poll(to_clean);
schedule_timeout_uninterruptible(1);
- spin_lock_irqsave(&to_clean->si_lock, flags);
}
disable_si_irq(to_clean);
- spin_unlock_irqrestore(&to_clean->si_lock, flags);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
poll(to_clean);
schedule_timeout_uninterruptible(1);
}
- /* Clean up interrupts and make sure that everything is done. */
- if (to_clean->irq_cleanup)
- to_clean->irq_cleanup(to_clean);
- while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
- poll(to_clean);
- schedule_timeout_uninterruptible(1);
- }
-
- if (to_clean->intf)
- rv = ipmi_unregister_smi(to_clean->intf);
-
- if (rv) {
- printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
- rv);
- }
-
if (to_clean->handlers)
to_clean->handlers->cleanup(to_clean->si_sm);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
new file mode 100644
index 000000000000..e178ac27e73c
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -0,0 +1,1870 @@
+/*
+ * ipmi_ssif.c
+ *
+ * The interface to the IPMI driver for SMBus access to a SMBus
+ * compliant device. Called SSIF by the IPMI spec.
+ *
+ * Author: Intel Corporation
+ * Todd Davis <todd.c.davis@intel.com>
+ *
+ * Rewritten by Corey Minyard <minyard@acm.org> to support the
+ * non-blocking I2C interface, add support for multi-part
+ * transactions, add PEC support, and general clenaup.
+ *
+ * Copyright 2003 Intel Corporation
+ * Copyright 2005 MontaVista Software
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SSIF state
+ * machine. It does the configuration, handles timers and interrupts,
+ * and drives the real SSIF state machine.
+ */
+
+/*
+ * TODO: Figure out how to use SMB alerts. This will require a new
+ * interface into the I2C driver, I believe.
+ */
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/ipmi_smi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/kthread.h>
+#include <linux/acpi.h>
+
+#define PFX "ipmi_ssif: "
+#define DEVICE_NAME "ipmi_ssif"
+
+#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57
+
+#define SSIF_IPMI_REQUEST 2
+#define SSIF_IPMI_MULTI_PART_REQUEST_START 6
+#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7
+#define SSIF_IPMI_RESPONSE 3
+#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9
+
+/* ssif_debug is a bit-field
+ * SSIF_DEBUG_MSG - commands and their responses
+ * SSIF_DEBUG_STATES - message states
+ * SSIF_DEBUG_TIMING - Measure times between events in the driver
+ */
+#define SSIF_DEBUG_TIMING 4
+#define SSIF_DEBUG_STATE 2
+#define SSIF_DEBUG_MSG 1
+#define SSIF_NODEBUG 0
+#define SSIF_DEFAULT_DEBUG (SSIF_NODEBUG)
+
+/*
+ * Timer values
+ */
+#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
+#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
+
+/* How many times to we retry sending/receiving the message. */
+#define SSIF_SEND_RETRIES 5
+#define SSIF_RECV_RETRIES 250
+
+#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
+#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+
+enum ssif_intf_state {
+ SSIF_NORMAL,
+ SSIF_GETTING_FLAGS,
+ SSIF_GETTING_EVENTS,
+ SSIF_CLEARING_FLAGS,
+ SSIF_GETTING_MESSAGES,
+ /* FIXME - add watchdog stuff. */
+};
+
+#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \
+ && (ssif)->curr_msg == NULL)
+
+/*
+ * Indexes into stats[] in ssif_info below.
+ */
+enum ssif_stat_indexes {
+ /* Number of total messages sent. */
+ SSIF_STAT_sent_messages = 0,
+
+ /*
+ * Number of message parts sent. Messages may be broken into
+ * parts if they are long.
+ */
+ SSIF_STAT_sent_messages_parts,
+
+ /*
+ * Number of time a message was retried.
+ */
+ SSIF_STAT_send_retries,
+
+ /*
+ * Number of times the send of a message failed.
+ */
+ SSIF_STAT_send_errors,
+
+ /*
+ * Number of message responses received.
+ */
+ SSIF_STAT_received_messages,
+
+ /*
+ * Number of message fragments received.
+ */
+ SSIF_STAT_received_message_parts,
+
+ /*
+ * Number of times the receive of a message was retried.
+ */
+ SSIF_STAT_receive_retries,
+
+ /*
+ * Number of errors receiving messages.
+ */
+ SSIF_STAT_receive_errors,
+
+ /*
+ * Number of times a flag fetch was requested.
+ */
+ SSIF_STAT_flag_fetches,
+
+ /*
+ * Number of times the hardware didn't follow the state machine.
+ */
+ SSIF_STAT_hosed,
+
+ /*
+ * Number of received events.
+ */
+ SSIF_STAT_events,
+
+ /* Number of asyncronous messages received. */
+ SSIF_STAT_incoming_messages,
+
+ /* Number of watchdog pretimeouts. */
+ SSIF_STAT_watchdog_pretimeouts,
+
+ /* Always add statistics before this value, it must be last. */
+ SSIF_NUM_STATS
+};
+
+struct ssif_addr_info {
+ unsigned short addr;
+ struct i2c_board_info binfo;
+ char *adapter_name;
+ int debug;
+ int slave_addr;
+ enum ipmi_addr_src addr_src;
+ union ipmi_smi_info_union addr_info;
+
+ struct mutex clients_mutex;
+ struct list_head clients;
+
+ struct list_head link;
+};
+
+struct ssif_info;
+
+typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len);
+
+struct ssif_info {
+ ipmi_smi_t intf;
+ int intf_num;
+ spinlock_t lock;
+ struct ipmi_smi_msg *waiting_msg;
+ struct ipmi_smi_msg *curr_msg;
+ enum ssif_intf_state ssif_state;
+ unsigned long ssif_debug;
+
+ struct ipmi_smi_handlers handlers;
+
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ union ipmi_smi_info_union addr_info;
+
+ /*
+ * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ * is set to hold the flags until we are done handling everything
+ * from the flags.
+ */
+#define RECEIVE_MSG_AVAIL 0x01
+#define EVENT_MSG_BUFFER_FULL 0x02
+#define WDT_PRE_TIMEOUT_INT 0x08
+ unsigned char msg_flags;
+
+ bool has_event_buffer;
+
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+ */
+ bool req_events;
+
+ /*
+ * If set to true, this will request flags the next time the
+ * state machine is idle.
+ */
+ bool req_flags;
+
+ /*
+ * Used to perform timer operations when run-to-completion
+ * mode is on. This is a countdown timer.
+ */
+ int rtc_us_timer;
+
+ /* Used for sending/receiving data. +1 for the length. */
+ unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
+ unsigned int data_len;
+
+ /* Temp receive buffer, gets copied into data. */
+ unsigned char recv[I2C_SMBUS_BLOCK_MAX];
+
+ struct i2c_client *client;
+ ssif_i2c_done done_handler;
+
+ /* Thread interface handling */
+ struct task_struct *thread;
+ struct completion wake_thread;
+ bool stopping;
+ int i2c_read_write;
+ int i2c_command;
+ unsigned char *i2c_data;
+ unsigned int i2c_size;
+
+ /* From the device id response. */
+ struct ipmi_device_id device_id;
+
+ struct timer_list retry_timer;
+ int retries_left;
+
+ /* Info from SSIF cmd */
+ unsigned char max_xmit_msg_size;
+ unsigned char max_recv_msg_size;
+ unsigned int multi_support;
+ int supports_pec;
+
+#define SSIF_NO_MULTI 0
+#define SSIF_MULTI_2_PART 1
+#define SSIF_MULTI_n_PART 2
+ unsigned char *multi_data;
+ unsigned int multi_len;
+ unsigned int multi_pos;
+
+ atomic_t stats[SSIF_NUM_STATS];
+};
+
+#define ssif_inc_stat(ssif, stat) \
+ atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
+#define ssif_get_stat(ssif, stat) \
+ ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
+
+static bool initialized;
+
+static atomic_t next_intf = ATOMIC_INIT(0);
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg);
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags);
+static int start_send(struct ssif_info *ssif_info,
+ unsigned char *data,
+ unsigned int len);
+
+static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&ssif_info->lock, *flags);
+ return flags;
+}
+
+static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&ssif_info->lock, *flags);
+}
+
+static void deliver_recv_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg)
+{
+ ipmi_smi_t intf = ssif_info->intf;
+
+ if (!intf) {
+ ipmi_free_smi_msg(msg);
+ } else if (msg->rsp_size < 0) {
+ return_hosed_msg(ssif_info, msg);
+ pr_err(PFX
+ "Malformed message in deliver_recv_msg: rsp_size = %d\n",
+ msg->rsp_size);
+ } else {
+ ipmi_smi_msg_received(intf, msg);
+ }
+}
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg)
+{
+ ssif_inc_stat(ssif_info, hosed);
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = 0xFF; /* Unknown error. */
+ msg->rsp_size = 3;
+
+ deliver_recv_msg(ssif_info, msg);
+}
+
+/*
+ * Must be called with the message lock held. This will release the
+ * message lock. Note that the caller will check SSIF_IDLE and start a
+ * new operation, so there is no need to check for new messages to
+ * start in here.
+ */
+static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ unsigned char msg[3];
+
+ ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ ssif_info->ssif_state = SSIF_CLEARING_FLAGS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+ if (start_send(ssif_info, msg, 3) != 0) {
+ /* Error, just go to normal state. */
+ ssif_info->ssif_state = SSIF_NORMAL;
+ }
+}
+
+static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ unsigned char mb[2];
+
+ ssif_info->req_flags = false;
+ ssif_info->ssif_state = SSIF_GETTING_FLAGS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+ if (start_send(ssif_info, mb, 2) != 0)
+ ssif_info->ssif_state = SSIF_NORMAL;
+}
+
+static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+ struct ipmi_smi_msg *msg)
+{
+ if (start_send(ssif_info, msg->data, msg->data_size) != 0) {
+ unsigned long oflags;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->curr_msg = NULL;
+ ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ ipmi_free_smi_msg(msg);
+ }
+}
+
+static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+
+ ssif_info->req_events = false;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ ssif_info->ssif_state = SSIF_NORMAL;
+ return;
+ }
+
+ ssif_info->curr_msg = msg;
+ ssif_info->ssif_state = SSIF_GETTING_EVENTS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ msg->data_size = 2;
+
+ check_start_send(ssif_info, flags, msg);
+}
+
+static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ ssif_info->ssif_state = SSIF_NORMAL;
+ return;
+ }
+
+ ssif_info->curr_msg = msg;
+ ssif_info->ssif_state = SSIF_GETTING_MESSAGES;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_GET_MSG_CMD;
+ msg->data_size = 2;
+
+ check_start_send(ssif_info, flags, msg);
+}
+
+/*
+ * Must be called with the message lock held. This will release the
+ * message lock. Note that the caller will check SSIF_IDLE and start a
+ * new operation, so there is no need to check for new messages to
+ * start in here.
+ */
+static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+ ipmi_smi_t intf = ssif_info->intf;
+ /* Watchdog pre-timeout */
+ ssif_inc_stat(ssif_info, watchdog_pretimeouts);
+ start_clear_flags(ssif_info, flags);
+ if (intf)
+ ipmi_smi_watchdog_pretimeout(intf);
+ } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL)
+ /* Messages available. */
+ start_recv_msg_fetch(ssif_info, flags);
+ else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL)
+ /* Events available. */
+ start_event_fetch(ssif_info, flags);
+ else {
+ ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+}
+
+static int ipmi_ssif_thread(void *data)
+{
+ struct ssif_info *ssif_info = data;
+
+ while (!kthread_should_stop()) {
+ int result;
+
+ /* Wait for something to do */
+ wait_for_completion(&ssif_info->wake_thread);
+ init_completion(&ssif_info->wake_thread);
+
+ if (ssif_info->stopping)
+ break;
+
+ if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
+ result = i2c_smbus_write_block_data(
+ ssif_info->client, SSIF_IPMI_REQUEST,
+ ssif_info->i2c_data[0],
+ ssif_info->i2c_data + 1);
+ ssif_info->done_handler(ssif_info, result, NULL, 0);
+ } else {
+ result = i2c_smbus_read_block_data(
+ ssif_info->client, SSIF_IPMI_RESPONSE,
+ ssif_info->i2c_data);
+ if (result < 0)
+ ssif_info->done_handler(ssif_info, result,
+ NULL, 0);
+ else
+ ssif_info->done_handler(ssif_info, 0,
+ ssif_info->i2c_data,
+ result);
+ }
+ }
+
+ return 0;
+}
+
+static int ssif_i2c_send(struct ssif_info *ssif_info,
+ ssif_i2c_done handler,
+ int read_write, int command,
+ unsigned char *data, unsigned int size)
+{
+ ssif_info->done_handler = handler;
+
+ ssif_info->i2c_read_write = read_write;
+ ssif_info->i2c_command = command;
+ ssif_info->i2c_data = data;
+ ssif_info->i2c_size = size;
+ complete(&ssif_info->wake_thread);
+ return 0;
+}
+
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len);
+
+static void retry_timeout(unsigned long data)
+{
+ struct ssif_info *ssif_info = (void *) data;
+ int rv;
+
+ if (ssif_info->stopping)
+ return;
+
+ ssif_info->rtc_us_timer = 0;
+
+ rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_RESPONSE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ /* request failed, just return the error. */
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error from i2c_non_blocking_op(5)\n");
+
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ }
+}
+
+static int start_resend(struct ssif_info *ssif_info);
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+{
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags, *flags;
+ int rv;
+
+ /*
+ * We are single-threaded here, so no need for a lock until we
+ * start messing with driver states or the queues.
+ */
+
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+ ssif_inc_stat(ssif_info, receive_retries);
+
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_JIFFIES);
+ ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, receive_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error in msg_done_handler: %d\n", result);
+ len = 0;
+ goto continue_op;
+ }
+
+ if ((len > 1) && (ssif_info->multi_pos == 0)
+ && (data[0] == 0x00) && (data[1] == 0x01)) {
+ /* Start of multi-part read. Start the next transaction. */
+ int i;
+
+ ssif_inc_stat(ssif_info, received_message_parts);
+
+ /* Remove the multi-part read marker. */
+ for (i = 0; i < (len-2); i++)
+ ssif_info->data[i] = data[i+2];
+ len -= 2;
+ ssif_info->multi_len = len;
+ ssif_info->multi_pos = 1;
+
+ rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error from i2c_non_blocking_op(1)\n");
+
+ result = -EIO;
+ } else
+ return;
+ } else if (ssif_info->multi_pos) {
+ /* Middle of multi-part read. Start the next transaction. */
+ int i;
+ unsigned char blocknum;
+
+ if (len == 0) {
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info(PFX "Middle message with no data\n");
+
+ goto continue_op;
+ }
+
+ blocknum = data[ssif_info->multi_len];
+
+ if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) {
+ /* Received message too big, abort the operation. */
+ result = -E2BIG;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Received message too big\n");
+
+ goto continue_op;
+ }
+
+ /* Remove the blocknum from the data. */
+ for (i = 0; i < (len-1); i++)
+ ssif_info->data[i+ssif_info->multi_len] = data[i+1];
+ len--;
+ ssif_info->multi_len += len;
+ if (blocknum == 0xff) {
+ /* End of read */
+ len = ssif_info->multi_len;
+ data = ssif_info->data;
+ } else if ((blocknum+1) != ssif_info->multi_pos) {
+ /*
+ * Out of sequence block, just abort. Block
+ * numbers start at zero for the second block,
+ * but multi_pos starts at one, so the +1.
+ */
+ result = -EIO;
+ } else {
+ ssif_inc_stat(ssif_info, received_message_parts);
+
+ ssif_info->multi_pos++;
+
+ rv = ssif_i2c_send(ssif_info, msg_done_handler,
+ I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv,
+ I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info(PFX
+ "Error from i2c_non_blocking_op(2)\n");
+
+ result = -EIO;
+ } else
+ return;
+ }
+ }
+
+ if (result < 0) {
+ ssif_inc_stat(ssif_info, receive_errors);
+ } else {
+ ssif_inc_stat(ssif_info, received_messages);
+ ssif_inc_stat(ssif_info, received_message_parts);
+ }
+
+
+ continue_op:
+ if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ pr_info(PFX "DONE 1: state = %d, result=%d.\n",
+ ssif_info->ssif_state, result);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ msg = ssif_info->curr_msg;
+ if (msg) {
+ msg->rsp_size = len;
+ if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
+ msg->rsp_size = IPMI_MAX_MSG_LENGTH;
+ memcpy(msg->rsp, data, msg->rsp_size);
+ ssif_info->curr_msg = NULL;
+ }
+
+ switch (ssif_info->ssif_state) {
+ case SSIF_NORMAL:
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (!msg)
+ break;
+
+ if (result < 0)
+ return_hosed_msg(ssif_info, msg);
+ else
+ deliver_recv_msg(ssif_info, msg);
+ break;
+
+ case SSIF_GETTING_FLAGS:
+ /* We got the flags from the SSIF, now handle them. */
+ if ((result < 0) || (len < 4) || (data[2] != 0)) {
+ /*
+ * Error fetching flags, or invalid length,
+ * just give up for now.
+ */
+ ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ pr_warn(PFX "Error getting flags: %d %d, %x\n",
+ result, len, data[2]);
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ pr_warn(PFX "Invalid response getting flags: %x %x\n",
+ data[0], data[1]);
+ } else {
+ ssif_inc_stat(ssif_info, flag_fetches);
+ ssif_info->msg_flags = data[3];
+ handle_flags(ssif_info, flags);
+ }
+ break;
+
+ case SSIF_CLEARING_FLAGS:
+ /* We cleared the flags. */
+ if ((result < 0) || (len < 3) || (data[2] != 0)) {
+ /* Error clearing flags */
+ pr_warn(PFX "Error clearing flags: %d %d, %x\n",
+ result, len, data[2]);
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
+ pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+ data[0], data[1]);
+ }
+ ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+
+ case SSIF_GETTING_EVENTS:
+ if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the event flag. */
+ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(ssif_info, flags);
+ } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) {
+ pr_warn(PFX "Invalid response getting events: %x %x\n",
+ msg->rsp[0], msg->rsp[1]);
+ msg->done(msg);
+ /* Take off the event flag. */
+ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(ssif_info, flags);
+ } else {
+ handle_flags(ssif_info, flags);
+ ssif_inc_stat(ssif_info, events);
+ deliver_recv_msg(ssif_info, msg);
+ }
+ break;
+
+ case SSIF_GETTING_MESSAGES:
+ if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(ssif_info, flags);
+ } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || msg->rsp[1] != IPMI_GET_MSG_CMD) {
+ pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+ msg->rsp[0], msg->rsp[1]);
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(ssif_info, flags);
+ } else {
+ ssif_inc_stat(ssif_info, incoming_messages);
+ handle_flags(ssif_info, flags);
+ deliver_recv_msg(ssif_info, msg);
+ }
+ break;
+ }
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ if (ssif_info->req_events)
+ start_event_fetch(ssif_info, flags);
+ else if (ssif_info->req_flags)
+ start_flag_fetch(ssif_info, flags);
+ else
+ start_next_msg(ssif_info, flags);
+ } else
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ pr_info(PFX "DONE 2: state = %d.\n", ssif_info->ssif_state);
+}
+
+static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+{
+ int rv;
+
+ /* We are single-threaded here, so no need for a lock. */
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+ if (!start_resend(ssif_info)) {
+ ssif_inc_stat(ssif_info, send_retries);
+ return;
+ }
+ /* request failed, just return the error. */
+ ssif_inc_stat(ssif_info, send_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info(PFX
+ "Out of retries in msg_written_handler\n");
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, send_errors);
+
+ /*
+ * Got an error on transmit, let the done routine
+ * handle it.
+ */
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error in msg_written_handler: %d\n", result);
+
+ msg_done_handler(ssif_info, result, NULL, 0);
+ return;
+ }
+
+ if (ssif_info->multi_data) {
+ /* In the middle of a multi-data write. */
+ int left;
+
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+ left = ssif_info->multi_len - ssif_info->multi_pos;
+ if (left > 32)
+ left = 32;
+ /* Length byte. */
+ ssif_info->multi_data[ssif_info->multi_pos] = left;
+ ssif_info->multi_pos += left;
+ if (left < 32)
+ /*
+ * Write is finished. Note that we must end
+ * with a write of less than 32 bytes to
+ * complete the transaction, even if it is
+ * zero bytes.
+ */
+ ssif_info->multi_data = NULL;
+
+ rv = ssif_i2c_send(ssif_info, msg_written_handler,
+ I2C_SMBUS_WRITE,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+ ssif_info->multi_data + ssif_info->multi_pos,
+ I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ /* request failed, just return the error. */
+ ssif_inc_stat(ssif_info, send_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error from i2c_non_blocking_op(3)\n");
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ }
+ } else {
+ ssif_inc_stat(ssif_info, sent_messages);
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+ /* Wait a jiffie then request the next message */
+ ssif_info->retries_left = SSIF_RECV_RETRIES;
+ ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_PART_JIFFIES);
+ return;
+ }
+}
+
+static int start_resend(struct ssif_info *ssif_info)
+{
+ int rv;
+ int command;
+
+ if (ssif_info->data_len > 32) {
+ command = SSIF_IPMI_MULTI_PART_REQUEST_START;
+ ssif_info->multi_data = ssif_info->data;
+ ssif_info->multi_len = ssif_info->data_len;
+ /*
+ * Subtle thing, this is 32, not 33, because we will
+ * overwrite the thing at position 32 (which was just
+ * transmitted) with the new length.
+ */
+ ssif_info->multi_pos = 32;
+ ssif_info->data[0] = 32;
+ } else {
+ ssif_info->multi_data = NULL;
+ command = SSIF_IPMI_REQUEST;
+ ssif_info->data[0] = ssif_info->data_len;
+ }
+
+ rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+ command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+ if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG))
+ pr_info("Error from i2c_non_blocking_op(4)\n");
+ return rv;
+}
+
+static int start_send(struct ssif_info *ssif_info,
+ unsigned char *data,
+ unsigned int len)
+{
+ if (len > IPMI_MAX_MSG_LENGTH)
+ return -E2BIG;
+ if (len > ssif_info->max_xmit_msg_size)
+ return -E2BIG;
+
+ ssif_info->retries_left = SSIF_SEND_RETRIES;
+ memcpy(ssif_info->data+1, data, len);
+ ssif_info->data_len = len;
+ return start_resend(ssif_info);
+}
+
+/* Must be called with the message lock held. */
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags;
+
+ restart:
+ if (!SSIF_IDLE(ssif_info)) {
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ if (!ssif_info->waiting_msg) {
+ ssif_info->curr_msg = NULL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ } else {
+ int rv;
+
+ ssif_info->curr_msg = ssif_info->waiting_msg;
+ ssif_info->waiting_msg = NULL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ rv = start_send(ssif_info,
+ ssif_info->curr_msg->data,
+ ssif_info->curr_msg->data_size);
+ if (rv) {
+ msg = ssif_info->curr_msg;
+ ssif_info->curr_msg = NULL;
+ return_hosed_msg(ssif_info, msg);
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ goto restart;
+ }
+ }
+}
+
+static void sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+ unsigned long oflags, *flags;
+
+ BUG_ON(ssif_info->waiting_msg);
+ ssif_info->waiting_msg = msg;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ start_next_msg(ssif_info, flags);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) {
+ struct timeval t;
+
+ do_gettimeofday(&t);
+ pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
+ msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+ }
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ data->addr_src = ssif_info->addr_source;
+ data->dev = &ssif_info->client->dev;
+ data->addr_info = ssif_info->addr_info;
+ get_device(data->dev);
+
+ return 0;
+}
+
+/*
+ * Instead of having our own timer to periodically check the message
+ * flags, we let the message handler drive us.
+ */
+static void request_events(void *send_info)
+{
+ struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+ unsigned long oflags, *flags;
+
+ if (!ssif_info->has_event_buffer)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ /*
+ * Request flags first, not events, because the lower layer
+ * doesn't have a way to send an attention. But make sure
+ * event checking still happens.
+ */
+ ssif_info->req_events = true;
+ if (SSIF_IDLE(ssif_info))
+ start_flag_fetch(ssif_info, flags);
+ else {
+ ssif_info->req_flags = true;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+}
+
+static int inc_usecount(void *send_info)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ if (!i2c_get_adapter(ssif_info->client->adapter->nr))
+ return -ENODEV;
+
+ i2c_use_client(ssif_info->client);
+ return 0;
+}
+
+static void dec_usecount(void *send_info)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ i2c_release_client(ssif_info->client);
+ i2c_put_adapter(ssif_info->client->adapter);
+}
+
+static int ssif_start_processing(void *send_info,
+ ipmi_smi_t intf)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ ssif_info->intf = intf;
+
+ return 0;
+}
+
+#define MAX_SSIF_BMCS 4
+
+static unsigned short addr[MAX_SSIF_BMCS];
+static int num_addrs;
+module_param_array(addr, ushort, &num_addrs, 0);
+MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs.");
+
+static char *adapter_name[MAX_SSIF_BMCS];
+static int num_adapter_names;
+module_param_array(adapter_name, charp, &num_adapter_names, 0);
+MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC. By default all devices are scanned.");
+
+static int slave_addrs[MAX_SSIF_BMCS];
+static int num_slave_addrs;
+module_param_array(slave_addrs, int, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs,
+ "The default IPMB slave address for the controller.");
+
+/*
+ * Bit 0 enables message debugging, bit 1 enables state debugging, and
+ * bit 2 enables timing debugging. This is an array indexed by
+ * interface number"
+ */
+static int dbg[MAX_SSIF_BMCS];
+static int num_dbg;
+module_param_array(dbg, int, &num_dbg, 0);
+MODULE_PARM_DESC(dbg, "Turn on debugging.");
+
+static bool ssif_dbg_probe;
+module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
+MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
+
+static int use_thread;
+module_param(use_thread, int, 0);
+MODULE_PARM_DESC(use_thread, "Use the thread interface.");
+
+static bool ssif_tryacpi = 1;
+module_param_named(tryacpi, ssif_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
+
+static bool ssif_trydmi = 1;
+module_param_named(trydmi, ssif_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)");
+
+static DEFINE_MUTEX(ssif_infos_mutex);
+static LIST_HEAD(ssif_infos);
+
+static int ssif_remove(struct i2c_client *client)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+ int rv;
+
+ if (!ssif_info)
+ return 0;
+
+ i2c_set_clientdata(client, NULL);
+
+ /*
+ * After this point, we won't deliver anything asychronously
+ * to the message handler. We can unregister ourself.
+ */
+ rv = ipmi_unregister_smi(ssif_info->intf);
+ if (rv) {
+ pr_err(PFX "Unable to unregister device: errno=%d\n", rv);
+ return rv;
+ }
+ ssif_info->intf = NULL;
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_NORMAL)
+ schedule_timeout(1);
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->retry_timer);
+ if (ssif_info->thread) {
+ complete(&ssif_info->wake_thread);
+ kthread_stop(ssif_info->thread);
+ }
+
+ /*
+ * No message can be outstanding now, we have removed the
+ * upper layer and it permitted us to do so.
+ */
+ kfree(ssif_info);
+ return 0;
+}
+
+static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+ int *resp_len, unsigned char *resp)
+{
+ int retry_cnt;
+ int ret;
+
+ retry_cnt = SSIF_SEND_RETRIES;
+ retry1:
+ ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+ if (ret) {
+ retry_cnt--;
+ if (retry_cnt > 0)
+ goto retry1;
+ return -ENODEV;
+ }
+
+ ret = -ENODEV;
+ retry_cnt = SSIF_RECV_RETRIES;
+ while (retry_cnt > 0) {
+ ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
+ resp);
+ if (ret > 0)
+ break;
+ msleep(SSIF_MSG_MSEC);
+ retry_cnt--;
+ if (retry_cnt <= 0)
+ break;
+ }
+
+ if (ret > 0) {
+ /* Validate that the response is correct. */
+ if (ret < 3 ||
+ (resp[0] != (msg[0] | (1 << 2))) ||
+ (resp[1] != msg[1]))
+ ret = -EINVAL;
+ else {
+ *resp_len = ret;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ unsigned char *resp;
+ unsigned char msg[3];
+ int rv;
+ int len;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Do a Get Device ID command, since it is required. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv)
+ rv = -ENODEV;
+ else
+ strlcpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+ kfree(resp);
+ return rv;
+}
+
+static int smi_type_proc_show(struct seq_file *m, void *v)
+{
+ return seq_puts(m, "ssif\n");
+}
+
+static int smi_type_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_type_proc_show, inode->i_private);
+}
+
+static const struct file_operations smi_type_proc_ops = {
+ .open = smi_type_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_stats_proc_show(struct seq_file *m, void *v)
+{
+ struct ssif_info *ssif_info = m->private;
+
+ seq_printf(m, "sent_messages: %u\n",
+ ssif_get_stat(ssif_info, sent_messages));
+ seq_printf(m, "sent_messages_parts: %u\n",
+ ssif_get_stat(ssif_info, sent_messages_parts));
+ seq_printf(m, "send_retries: %u\n",
+ ssif_get_stat(ssif_info, send_retries));
+ seq_printf(m, "send_errors: %u\n",
+ ssif_get_stat(ssif_info, send_errors));
+ seq_printf(m, "received_messages: %u\n",
+ ssif_get_stat(ssif_info, received_messages));
+ seq_printf(m, "received_message_parts: %u\n",
+ ssif_get_stat(ssif_info, received_message_parts));
+ seq_printf(m, "receive_retries: %u\n",
+ ssif_get_stat(ssif_info, receive_retries));
+ seq_printf(m, "receive_errors: %u\n",
+ ssif_get_stat(ssif_info, receive_errors));
+ seq_printf(m, "flag_fetches: %u\n",
+ ssif_get_stat(ssif_info, flag_fetches));
+ seq_printf(m, "hosed: %u\n",
+ ssif_get_stat(ssif_info, hosed));
+ seq_printf(m, "events: %u\n",
+ ssif_get_stat(ssif_info, events));
+ seq_printf(m, "watchdog_pretimeouts: %u\n",
+ ssif_get_stat(ssif_info, watchdog_pretimeouts));
+ return 0;
+}
+
+static int smi_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_stats_proc_ops = {
+ .open = smi_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct ssif_addr_info *ssif_info_find(unsigned short addr,
+ char *adapter_name,
+ bool match_null_name)
+{
+ struct ssif_addr_info *info, *found = NULL;
+
+restart:
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (info->binfo.addr == addr) {
+ if (info->adapter_name || adapter_name) {
+ if (!info->adapter_name != !adapter_name) {
+ /* One is NULL and one is not */
+ continue;
+ }
+ if (strcmp(info->adapter_name, adapter_name))
+ /* Names to not match */
+ continue;
+ }
+ found = info;
+ break;
+ }
+ }
+
+ if (!found && match_null_name) {
+ /* Try to get an exact match first, then try with a NULL name */
+ adapter_name = NULL;
+ match_null_name = false;
+ goto restart;
+ }
+
+ return found;
+}
+
+static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_handle acpi_handle;
+
+ acpi_handle = ACPI_HANDLE(dev);
+ if (acpi_handle) {
+ ssif_info->addr_source = SI_ACPI;
+ ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+ return true;
+ }
+#endif
+ return false;
+}
+
+static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ struct ssif_info *ssif_info;
+ int rv = 0;
+ int len;
+ int i;
+ u8 slave_addr = 0;
+ struct ssif_addr_info *addr_info = NULL;
+
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL);
+ if (!ssif_info) {
+ kfree(resp);
+ return -ENOMEM;
+ }
+
+ if (!check_acpi(ssif_info, &client->dev)) {
+ addr_info = ssif_info_find(client->addr, client->adapter->name,
+ true);
+ if (!addr_info) {
+ /* Must have come in through sysfs. */
+ ssif_info->addr_source = SI_HOTMOD;
+ } else {
+ ssif_info->addr_source = addr_info->addr_src;
+ ssif_info->ssif_debug = addr_info->debug;
+ ssif_info->addr_info = addr_info->addr_info;
+ slave_addr = addr_info->slave_addr;
+ }
+ }
+
+ pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
+ ipmi_addr_src_to_str(ssif_info->addr_source),
+ client->addr, client->adapter->name, slave_addr);
+
+ /*
+ * Do a Get Device ID command, since it comes back with some
+ * useful info.
+ */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv)
+ goto out;
+
+ rv = ipmi_demangle_device_id(resp, len, &ssif_info->device_id);
+ if (rv)
+ goto out;
+
+ ssif_info->client = client;
+ i2c_set_clientdata(client, ssif_info);
+
+ /* Now check for system interface capabilities */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
+ msg[2] = 0; /* SSIF */
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (!rv && (len >= 3) && (resp[2] == 0)) {
+ if (len < 7) {
+ if (ssif_dbg_probe)
+ pr_info(PFX "SSIF info too short: %d\n", len);
+ goto no_support;
+ }
+
+ /* Got a good SSIF response, handle it. */
+ ssif_info->max_xmit_msg_size = resp[5];
+ ssif_info->max_recv_msg_size = resp[6];
+ ssif_info->multi_support = (resp[4] >> 6) & 0x3;
+ ssif_info->supports_pec = (resp[4] >> 3) & 0x1;
+
+ /* Sanitize the data */
+ switch (ssif_info->multi_support) {
+ case SSIF_NO_MULTI:
+ if (ssif_info->max_xmit_msg_size > 32)
+ ssif_info->max_xmit_msg_size = 32;
+ if (ssif_info->max_recv_msg_size > 32)
+ ssif_info->max_recv_msg_size = 32;
+ break;
+
+ case SSIF_MULTI_2_PART:
+ if (ssif_info->max_xmit_msg_size > 64)
+ ssif_info->max_xmit_msg_size = 64;
+ if (ssif_info->max_recv_msg_size > 62)
+ ssif_info->max_recv_msg_size = 62;
+ break;
+
+ case SSIF_MULTI_n_PART:
+ break;
+
+ default:
+ /* Data is not sane, just give up. */
+ goto no_support;
+ }
+ } else {
+ no_support:
+ /* Assume no multi-part or PEC support */
+ pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+ rv, len, resp[2]);
+
+ ssif_info->max_xmit_msg_size = 32;
+ ssif_info->max_recv_msg_size = 32;
+ ssif_info->multi_support = SSIF_NO_MULTI;
+ ssif_info->supports_pec = 0;
+ }
+
+ /* Make sure the NMI timeout is cleared. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 3) || (resp[2] != 0))
+ pr_warn(PFX "Unable to clear message flags: %d %d %2.2x\n",
+ rv, len, resp[2]);
+
+ /* Attempt to enable the event buffer. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv || (len < 4) || (resp[2] != 0)) {
+ pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+ ssif_info->has_event_buffer = true;
+ /* buffer is already enabled, nothing to do. */
+ goto found;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 2)) {
+ pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[2] == 0)
+ /* A successful return means the event buffer is supported. */
+ ssif_info->has_event_buffer = true;
+
+ found:
+ ssif_info->intf_num = atomic_inc_return(&next_intf);
+
+ if (ssif_dbg_probe) {
+ pr_info("ssif_probe: i2c_probe found device at i2c address %x\n",
+ client->addr);
+ }
+
+ spin_lock_init(&ssif_info->lock);
+ ssif_info->ssif_state = SSIF_NORMAL;
+ init_timer(&ssif_info->retry_timer);
+ ssif_info->retry_timer.data = (unsigned long) ssif_info;
+ ssif_info->retry_timer.function = retry_timeout;
+
+ for (i = 0; i < SSIF_NUM_STATS; i++)
+ atomic_set(&ssif_info->stats[i], 0);
+
+ if (ssif_info->supports_pec)
+ ssif_info->client->flags |= I2C_CLIENT_PEC;
+
+ ssif_info->handlers.owner = THIS_MODULE;
+ ssif_info->handlers.start_processing = ssif_start_processing;
+ ssif_info->handlers.get_smi_info = get_smi_info;
+ ssif_info->handlers.sender = sender;
+ ssif_info->handlers.request_events = request_events;
+ ssif_info->handlers.inc_usecount = inc_usecount;
+ ssif_info->handlers.dec_usecount = dec_usecount;
+
+ {
+ unsigned int thread_num;
+
+ thread_num = ((ssif_info->client->adapter->nr << 8) |
+ ssif_info->client->addr);
+ init_completion(&ssif_info->wake_thread);
+ ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info,
+ "kssif%4.4x", thread_num);
+ if (IS_ERR(ssif_info->thread)) {
+ rv = PTR_ERR(ssif_info->thread);
+ dev_notice(&ssif_info->client->dev,
+ "Could not start kernel thread: error %d\n",
+ rv);
+ goto out;
+ }
+ }
+
+ rv = ipmi_register_smi(&ssif_info->handlers,
+ ssif_info,
+ &ssif_info->device_id,
+ &ssif_info->client->dev,
+ slave_addr);
+ if (rv) {
+ pr_err(PFX "Unable to register device: error %d\n", rv);
+ goto out;
+ }
+
+ rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type",
+ &smi_type_proc_ops,
+ ssif_info);
+ if (rv) {
+ pr_err(PFX "Unable to create proc entry: %d\n", rv);
+ goto out_err_unreg;
+ }
+
+ rv = ipmi_smi_add_proc_entry(ssif_info->intf, "ssif_stats",
+ &smi_stats_proc_ops,
+ ssif_info);
+ if (rv) {
+ pr_err(PFX "Unable to create proc entry: %d\n", rv);
+ goto out_err_unreg;
+ }
+
+ out:
+ if (rv)
+ kfree(ssif_info);
+ kfree(resp);
+ return rv;
+
+ out_err_unreg:
+ ipmi_unregister_smi(ssif_info->intf);
+ goto out;
+}
+
+static int ssif_adapter_handler(struct device *adev, void *opaque)
+{
+ struct ssif_addr_info *addr_info = opaque;
+
+ if (adev->type != &i2c_adapter_type)
+ return 0;
+
+ i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
+
+ if (!addr_info->adapter_name)
+ return 1; /* Only try the first I2C adapter by default. */
+ return 0;
+}
+
+static int new_ssif_client(int addr, char *adapter_name,
+ int debug, int slave_addr,
+ enum ipmi_addr_src addr_src)
+{
+ struct ssif_addr_info *addr_info;
+ int rv = 0;
+
+ mutex_lock(&ssif_infos_mutex);
+ if (ssif_info_find(addr, adapter_name, false)) {
+ rv = -EEXIST;
+ goto out_unlock;
+ }
+
+ addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL);
+ if (!addr_info) {
+ rv = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (adapter_name) {
+ addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL);
+ if (!addr_info->adapter_name) {
+ kfree(addr_info);
+ rv = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+
+ strncpy(addr_info->binfo.type, DEVICE_NAME,
+ sizeof(addr_info->binfo.type));
+ addr_info->binfo.addr = addr;
+ addr_info->binfo.platform_data = addr_info;
+ addr_info->debug = debug;
+ addr_info->slave_addr = slave_addr;
+ addr_info->addr_src = addr_src;
+
+ list_add_tail(&addr_info->link, &ssif_infos);
+
+ if (initialized)
+ i2c_for_each_dev(addr_info, ssif_adapter_handler);
+ /* Otherwise address list will get it */
+
+out_unlock:
+ mutex_unlock(&ssif_infos_mutex);
+ return rv;
+}
+
+static void free_ssif_clients(void)
+{
+ struct ssif_addr_info *info, *tmp;
+
+ mutex_lock(&ssif_infos_mutex);
+ list_for_each_entry_safe(info, tmp, &ssif_infos, link) {
+ list_del(&info->link);
+ kfree(info->adapter_name);
+ kfree(info);
+ }
+ mutex_unlock(&ssif_infos_mutex);
+}
+
+static unsigned short *ssif_address_list(void)
+{
+ struct ssif_addr_info *info;
+ unsigned int count = 0, i;
+ unsigned short *address_list;
+
+ list_for_each_entry(info, &ssif_infos, link)
+ count++;
+
+ address_list = kzalloc(sizeof(*address_list) * (count + 1), GFP_KERNEL);
+ if (!address_list)
+ return NULL;
+
+ i = 0;
+ list_for_each_entry(info, &ssif_infos, link) {
+ unsigned short addr = info->binfo.addr;
+ int j;
+
+ for (j = 0; j < i; j++) {
+ if (address_list[j] == addr)
+ goto skip_addr;
+ }
+ address_list[i] = addr;
+skip_addr:
+ i++;
+ }
+ address_list[i] = I2C_CLIENT_END;
+
+ return address_list;
+}
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id ssif_acpi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssif_acpi_match);
+
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially. Once we don't find a table, there
+ * are no more.
+ */
+static int acpi_failure;
+
+/*
+ * Defined in the IPMI 2.0 spec.
+ */
+struct SPMITable {
+ s8 Signature[4];
+ u32 Length;
+ u8 Revision;
+ u8 Checksum;
+ s8 OEMID[6];
+ s8 OEMTableID[8];
+ s8 OEMRevision[4];
+ s8 CreatorID[4];
+ s8 CreatorRevision[4];
+ u8 InterfaceType;
+ u8 IPMIlegacy;
+ s16 SpecificationRevision;
+
+ /*
+ * Bit 0 - SCI interrupt supported
+ * Bit 1 - I/O APIC/SAPIC
+ */
+ u8 InterruptType;
+
+ /*
+ * If bit 0 of InterruptType is set, then this is the SCI
+ * interrupt in the GPEx_STS register.
+ */
+ u8 GPE;
+
+ s16 Reserved;
+
+ /*
+ * If bit 1 of InterruptType is set, then this is the I/O
+ * APIC/SAPIC interrupt.
+ */
+ u32 GlobalSystemInterrupt;
+
+ /* The actual register address. */
+ struct acpi_generic_address addr;
+
+ u8 UID[4];
+
+ s8 spmi_id[1]; /* A '\0' terminated array starts here. */
+};
+
+static int try_init_spmi(struct SPMITable *spmi)
+{
+ unsigned short myaddr;
+
+ if (num_addrs >= MAX_SSIF_BMCS)
+ return -1;
+
+ if (spmi->IPMIlegacy != 1) {
+ pr_warn("IPMI: Bad SPMI legacy: %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
+ }
+
+ if (spmi->InterfaceType != 4)
+ return -ENODEV;
+
+ if (spmi->addr.space_id != ACPI_ADR_SPACE_SMBUS) {
+ pr_warn(PFX "Invalid ACPI SSIF I/O Address type: %d\n",
+ spmi->addr.space_id);
+ return -EIO;
+ }
+
+ myaddr = spmi->addr.address >> 1;
+
+ return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI);
+}
+
+static void spmi_find_bmc(void)
+{
+ acpi_status status;
+ struct SPMITable *spmi;
+ int i;
+
+ if (acpi_disabled)
+ return;
+
+ if (acpi_failure)
+ return;
+
+ for (i = 0; ; i++) {
+ status = acpi_get_table(ACPI_SIG_SPMI, i+1,
+ (struct acpi_table_header **)&spmi);
+ if (status != AE_OK)
+ return;
+
+ try_init_spmi(spmi);
+ }
+}
+#else
+static void spmi_find_bmc(void) { }
+#endif
+
+#ifdef CONFIG_DMI
+static int decode_dmi(const struct dmi_device *dmi_dev)
+{
+ struct dmi_header *dm = dmi_dev->device_data;
+ u8 *data = (u8 *) dm;
+ u8 len = dm->length;
+ unsigned short myaddr;
+ int slave_addr;
+
+ if (num_addrs >= MAX_SSIF_BMCS)
+ return -1;
+
+ if (len < 9)
+ return -1;
+
+ if (data[0x04] != 4) /* Not SSIF */
+ return -1;
+
+ if ((data[8] >> 1) == 0) {
+ /*
+ * Some broken systems put the I2C address in
+ * the slave address field. We try to
+ * accommodate them here.
+ */
+ myaddr = data[6] >> 1;
+ slave_addr = 0;
+ } else {
+ myaddr = data[8] >> 1;
+ slave_addr = data[6];
+ }
+
+ return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS);
+}
+
+static void dmi_iterator(void)
+{
+ const struct dmi_device *dev = NULL;
+
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev)))
+ decode_dmi(dev);
+}
+#else
+static void dmi_iterator(void) { }
+#endif
+
+static const struct i2c_device_id ssif_id[] = {
+ { DEVICE_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ssif_id);
+
+static struct i2c_driver ssif_i2c_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_NAME
+ },
+ .probe = ssif_probe,
+ .remove = ssif_remove,
+ .id_table = ssif_id,
+ .detect = ssif_detect
+};
+
+static int init_ipmi_ssif(void)
+{
+ int i;
+ int rv;
+
+ if (initialized)
+ return 0;
+
+ pr_info("IPMI SSIF Interface driver\n");
+
+ /* build list for i2c from addr list */
+ for (i = 0; i < num_addrs; i++) {
+ rv = new_ssif_client(addr[i], adapter_name[i],
+ dbg[i], slave_addrs[i],
+ SI_HARDCODED);
+ if (!rv)
+ pr_err(PFX
+ "Couldn't add hardcoded device at addr 0x%x\n",
+ addr[i]);
+ }
+
+ if (ssif_tryacpi)
+ ssif_i2c_driver.driver.acpi_match_table =
+ ACPI_PTR(ssif_acpi_match);
+ if (ssif_trydmi)
+ dmi_iterator();
+ if (ssif_tryacpi)
+ spmi_find_bmc();
+
+ ssif_i2c_driver.address_list = ssif_address_list();
+
+ rv = i2c_add_driver(&ssif_i2c_driver);
+ if (!rv)
+ initialized = true;
+
+ return rv;
+}
+module_init(init_ipmi_ssif);
+
+static void cleanup_ipmi_ssif(void)
+{
+ if (!initialized)
+ return;
+
+ initialized = false;
+
+ i2c_del_driver(&ssif_i2c_driver);
+
+ free_ssif_clients();
+}
+module_exit(cleanup_ipmi_ssif);
+
+MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>");
+MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 7cc1fe2241fd..e496daefe9e0 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1482,7 +1482,6 @@ static void sonypi_shutdown(struct platform_device *dev)
static struct platform_driver sonypi_driver = {
.driver = {
.name = "sonypi",
- .owner = THIS_MODULE,
.pm = SONYPI_PM,
},
.probe = sonypi_probe,
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 47b9fdfcf083..480a777db577 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -337,7 +337,6 @@ static struct platform_driver tb0219_device_driver = {
.remove = tb0219_remove,
.driver = {
.name = "TB0219",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 6069d13ae4ac..435c8b9dd2f8 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -152,7 +152,6 @@ static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume);
static struct platform_driver atml_drv = {
.driver = {
.name = "tpm_atmel",
- .owner = THIS_MODULE,
.pm = &tpm_atml_pm,
},
};
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 3179ec9cffdc..4d0a17ea8cde 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -258,7 +258,6 @@ static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
static struct platform_driver nsc_drv = {
.driver = {
.name = "tpm_nsc",
- .owner = THIS_MODULE,
.pm = &tpm_nsc_pm,
},
};
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 2c46734b266d..6f1985496112 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -837,7 +837,6 @@ MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
static struct platform_driver tis_drv = {
.driver = {
.name = "tpm_tis",
- .owner = THIS_MODULE,
.pm = &tpm_tis_pm,
},
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index cf7a561fad7c..de03df9dd7c9 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -355,7 +355,7 @@ static inline bool use_multiport(struct ports_device *portdev)
*/
if (!portdev->vdev)
return 0;
- return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
+ return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
}
static DEFINE_SPINLOCK(dma_bufs_lock);
@@ -566,9 +566,9 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
if (!use_multiport(portdev))
return 0;
- cpkt.id = port_id;
- cpkt.event = event;
- cpkt.value = value;
+ cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
+ cpkt.event = cpu_to_virtio16(portdev->vdev, event);
+ cpkt.value = cpu_to_virtio16(portdev->vdev, value);
vq = portdev->c_ovq;
@@ -669,8 +669,8 @@ done:
* Give out the data that's requested from the buffer that we have
* queued up.
*/
-static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
- bool to_user)
+static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
+ size_t out_count, bool to_user)
{
struct port_buffer *buf;
unsigned long flags;
@@ -688,7 +688,8 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
if (ret)
return -EFAULT;
} else {
- memcpy(out_buf, buf->buf + buf->offset, out_count);
+ memcpy((__force char *)out_buf, buf->buf + buf->offset,
+ out_count);
}
buf->offset += out_count;
@@ -1162,7 +1163,7 @@ static int get_chars(u32 vtermno, char *buf, int count)
/* If we don't have an input queue yet, we can't get input. */
BUG_ON(!port->in_vq);
- return fill_readbuf(port, buf, count, false);
+ return fill_readbuf(port, (__force char __user *)buf, count, false);
}
static void resize_console(struct port *port)
@@ -1602,7 +1603,8 @@ static void unplug_port(struct port *port)
}
/* Any private messages that the Host and Guest want to share */
-static void handle_control_message(struct ports_device *portdev,
+static void handle_control_message(struct virtio_device *vdev,
+ struct ports_device *portdev,
struct port_buffer *buf)
{
struct virtio_console_control *cpkt;
@@ -1612,15 +1614,16 @@ static void handle_control_message(struct ports_device *portdev,
cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
- port = find_port_by_id(portdev, cpkt->id);
- if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
+ port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id));
+ if (!port &&
+ cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) {
/* No valid header at start of buffer. Drop it. */
dev_dbg(&portdev->vdev->dev,
"Invalid index %u in control packet\n", cpkt->id);
return;
}
- switch (cpkt->event) {
+ switch (virtio16_to_cpu(vdev, cpkt->event)) {
case VIRTIO_CONSOLE_PORT_ADD:
if (port) {
dev_dbg(&portdev->vdev->dev,
@@ -1628,13 +1631,15 @@ static void handle_control_message(struct ports_device *portdev,
send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
break;
}
- if (cpkt->id >= portdev->config.max_nr_ports) {
+ if (virtio32_to_cpu(vdev, cpkt->id) >=
+ portdev->config.max_nr_ports) {
dev_warn(&portdev->vdev->dev,
- "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
+ "Request for adding port with "
+ "out-of-bound id %u, max. supported id: %u\n",
cpkt->id, portdev->config.max_nr_ports - 1);
break;
}
- add_port(portdev, cpkt->id);
+ add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
break;
case VIRTIO_CONSOLE_PORT_REMOVE:
unplug_port(port);
@@ -1670,7 +1675,7 @@ static void handle_control_message(struct ports_device *portdev,
break;
}
case VIRTIO_CONSOLE_PORT_OPEN:
- port->host_connected = cpkt->value;
+ port->host_connected = virtio16_to_cpu(vdev, cpkt->value);
wake_up_interruptible(&port->waitqueue);
/*
* If the host port got closed and the host had any
@@ -1752,7 +1757,7 @@ static void control_work_handler(struct work_struct *work)
buf->len = len;
buf->offset = 0;
- handle_control_message(portdev, buf);
+ handle_control_message(vq->vdev, portdev, buf);
spin_lock(&portdev->c_ivq_lock);
if (add_inbuf(portdev->c_ivq, buf) < 0) {
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 1ca0c7a4f1be..2002a3a28146 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -179,7 +179,6 @@ static struct platform_driver xillybus_platform_driver = {
.remove = xilly_drv_remove,
.driver = {
.name = xillyname,
- .owner = THIS_MODULE,
.of_match_table = xillybus_of_match,
},
};
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 455fd17d938e..3f44f292d066 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -28,7 +28,7 @@ config COMMON_CLK_WM831X
depends on MFD_WM831X
---help---
Supports the clocking subsystem of the WM831x/2x series of
- PMICs from Wolfson Microlectronics.
+ PMICs from Wolfson Microelectronics.
source "drivers/clk/versatile/Kconfig"
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index d2f1e119b450..0f6368ceec4c 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -593,7 +593,6 @@ static struct platform_driver axmclk_driver = {
.remove = axmclk_remove,
.driver = {
.name = "clk-axm5516",
- .owner = THIS_MODULE,
.of_match_table = axmclk_match_table,
},
};
diff --git a/drivers/clk/clk-ls1x.c b/drivers/clk/clk-ls1x.c
index f20b750235f6..ca80103ac188 100644
--- a/drivers/clk/clk-ls1x.c
+++ b/drivers/clk/clk-ls1x.c
@@ -15,7 +15,8 @@
#include <loongson1.h>
-#define OSC 33
+#define OSC (33 * 1000000)
+#define DIV_APB 2
static DEFINE_SPINLOCK(_lock);
@@ -29,13 +30,12 @@ static void ls1x_pll_clk_disable(struct clk_hw *hw)
}
static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+ unsigned long parent_rate)
{
u32 pll, rate;
pll = __raw_readl(LS1X_CLK_PLL_FREQ);
- rate = ((12 + (pll & 0x3f)) * 1000000) +
- ((((pll >> 8) & 0x3ff) * 1000000) >> 10);
+ rate = 12 + (pll & 0x3f) + (((pll >> 8) & 0x3ff) >> 10);
rate *= OSC;
rate >>= 1;
@@ -48,8 +48,10 @@ static const struct clk_ops ls1x_pll_clk_ops = {
.recalc_rate = ls1x_pll_recalc_rate,
};
-static struct clk * __init clk_register_pll(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags)
+static struct clk *__init clk_register_pll(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags)
{
struct clk_hw *hw;
struct clk *clk;
@@ -78,34 +80,83 @@ static struct clk * __init clk_register_pll(struct device *dev,
return clk;
}
+static const char const *cpu_parents[] = { "cpu_clk_div", "osc_33m_clk", };
+static const char const *ahb_parents[] = { "ahb_clk_div", "osc_33m_clk", };
+static const char const *dc_parents[] = { "dc_clk_div", "osc_33m_clk", };
+
void __init ls1x_clk_init(void)
{
struct clk *clk;
- clk = clk_register_pll(NULL, "pll_clk", NULL, CLK_IS_ROOT);
- clk_prepare_enable(clk);
-
- clk = clk_register_divider(NULL, "cpu_clk", "pll_clk",
- CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_CPU_SHIFT,
- DIV_CPU_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
- clk_prepare_enable(clk);
- clk_register_clkdev(clk, "cpu", NULL);
-
- clk = clk_register_divider(NULL, "dc_clk", "pll_clk",
- CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT,
- DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
- clk_prepare_enable(clk);
- clk_register_clkdev(clk, "dc", NULL);
-
- clk = clk_register_divider(NULL, "ahb_clk", "pll_clk",
- CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT,
- DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
- clk_prepare_enable(clk);
- clk_register_clkdev(clk, "ahb", NULL);
+ clk = clk_register_fixed_rate(NULL, "osc_33m_clk", NULL, CLK_IS_ROOT,
+ OSC);
+ clk_register_clkdev(clk, "osc_33m_clk", NULL);
+
+ /* clock derived from 33 MHz OSC clk */
+ clk = clk_register_pll(NULL, "pll_clk", "osc_33m_clk", 0);
+ clk_register_clkdev(clk, "pll_clk", NULL);
+
+ /* clock derived from PLL clk */
+ /* _____
+ * _______________________| |
+ * OSC ___/ | MUX |___ CPU CLK
+ * \___ PLL ___ CPU DIV ___| |
+ * |_____|
+ */
+ clk = clk_register_divider(NULL, "cpu_clk_div", "pll_clk",
+ CLK_GET_RATE_NOCACHE, LS1X_CLK_PLL_DIV,
+ DIV_CPU_SHIFT, DIV_CPU_WIDTH,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ROUND_CLOSEST, &_lock);
+ clk_register_clkdev(clk, "cpu_clk_div", NULL);
+ clk = clk_register_mux(NULL, "cpu_clk", cpu_parents,
+ ARRAY_SIZE(cpu_parents),
+ CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
+ BYPASS_CPU_SHIFT, BYPASS_CPU_WIDTH, 0, &_lock);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ /* _____
+ * _______________________| |
+ * OSC ___/ | MUX |___ DC CLK
+ * \___ PLL ___ DC DIV ___| |
+ * |_____|
+ */
+ clk = clk_register_divider(NULL, "dc_clk_div", "pll_clk",
+ 0, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT,
+ DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
+ clk_register_clkdev(clk, "dc_clk_div", NULL);
+ clk = clk_register_mux(NULL, "dc_clk", dc_parents,
+ ARRAY_SIZE(dc_parents),
+ CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
+ BYPASS_DC_SHIFT, BYPASS_DC_WIDTH, 0, &_lock);
+ clk_register_clkdev(clk, "dc_clk", NULL);
+
+ /* _____
+ * _______________________| |
+ * OSC ___/ | MUX |___ DDR CLK
+ * \___ PLL ___ DDR DIV ___| |
+ * |_____|
+ */
+ clk = clk_register_divider(NULL, "ahb_clk_div", "pll_clk",
+ 0, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT,
+ DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED,
+ &_lock);
+ clk_register_clkdev(clk, "ahb_clk_div", NULL);
+ clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
+ ARRAY_SIZE(ahb_parents),
+ CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
+ BYPASS_DDR_SHIFT, BYPASS_DDR_WIDTH, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
clk_register_clkdev(clk, "stmmaceth", NULL);
- clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1, 2);
- clk_prepare_enable(clk);
- clk_register_clkdev(clk, "apb", NULL);
+ /* clock derived from AHB clk */
+ /* APB clk is always half of the AHB clk */
+ clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
+ DIV_APB);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+ clk_register_clkdev(clk, "ls1x_i2c", NULL);
+ clk_register_clkdev(clk, "ls1x_pwmtimer", NULL);
+ clk_register_clkdev(clk, "ls1x_spi", NULL);
+ clk_register_clkdev(clk, "ls1x_wdt", NULL);
clk_register_clkdev(clk, "serial8250", NULL);
}
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index 8e58edfeeb37..b6e6c85507a5 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -294,7 +294,6 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
static struct platform_driver ppc_corenet_clk_driver __initdata = {
.driver = {
.name = "ppc_corenet_clock",
- .owner = THIS_MODULE,
.of_match_table = ppc_clk_ids,
},
.probe = ppc_corenet_clk_probe,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 7bb13af8e214..87a41038237d 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -312,7 +312,6 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
static struct platform_driver s2mps11_clk_driver = {
.driver = {
.name = "s2mps11-clk",
- .owner = THIS_MODULE,
},
.probe = s2mps11_clk_probe,
.remove = s2mps11_clk_remove,
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index ee52eb1c838a..e3ef90264214 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -3589,7 +3589,6 @@ static struct platform_driver gcc_apq8084_driver = {
.remove = gcc_apq8084_remove,
.driver = {
.name = "gcc-apq8084",
- .owner = THIS_MODULE,
.of_match_table = gcc_apq8084_match_table,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 5cd62a709ac7..afed5eb0691e 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -2431,7 +2431,6 @@ static struct platform_driver gcc_ipq806x_driver = {
.remove = gcc_ipq806x_remove,
.driver = {
.name = "gcc-ipq806x",
- .owner = THIS_MODULE,
.of_match_table = gcc_ipq806x_match_table,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index 0c4b727ae429..f366e68f7316 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2744,7 +2744,6 @@ static struct platform_driver gcc_msm8660_driver = {
.remove = gcc_msm8660_remove,
.driver = {
.name = "gcc-msm8660",
- .owner = THIS_MODULE,
.of_match_table = gcc_msm8660_match_table,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 007534f7a2d7..b0b562b9ce0e 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3519,7 +3519,6 @@ static struct platform_driver gcc_msm8960_driver = {
.remove = gcc_msm8960_remove,
.driver = {
.name = "gcc-msm8960",
- .owner = THIS_MODULE,
.of_match_table = gcc_msm8960_match_table,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 7af7c18d2144..a6937fe78d8a 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2737,7 +2737,6 @@ static struct platform_driver gcc_msm8974_driver = {
.remove = gcc_msm8974_remove,
.driver = {
.name = "gcc-msm8974",
- .owner = THIS_MODULE,
.of_match_table = gcc_msm8974_match_table,
},
};
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 13eae14c2cc2..acce708ace18 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -223,7 +223,6 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
static struct platform_driver exynos_audss_clk_driver = {
.driver = {
.name = "exynos-audss-clk",
- .owner = THIS_MODULE,
.of_match_table = exynos_audss_clk_of_match,
},
.probe = exynos_audss_clk_probe,
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index a8053b4aca56..de4455b75e8a 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -216,7 +216,6 @@ static const struct of_device_id s5pv210_audss_clk_of_match[] = {
static struct platform_driver s5pv210_audss_clk_driver = {
.driver = {
.name = "s5pv210-audss-clk",
- .owner = THIS_MODULE,
.of_match_table = s5pv210_audss_clk_of_match,
},
.probe = s5pv210_audss_clk_probe,
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index e7bd62cf60b3..3e5e05101302 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -121,7 +121,6 @@ static int abx500_clk_probe(struct platform_device *pdev)
static struct platform_driver abx500_clk_driver = {
.driver = {
.name = "abx500-clk",
- .owner = THIS_MODULE,
},
.probe = abx500_clk_probe,
};
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index 812f83f8b0c6..f827083defc4 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -42,7 +42,6 @@ static int lpt_clk_probe(struct platform_device *pdev)
static struct platform_driver lpt_clk_driver = {
.driver = {
.name = "clk-lpt",
- .owner = THIS_MODULE,
},
.probe = lpt_clk_probe,
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f657a48d20eb..fc01ec27d3c8 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -224,4 +224,9 @@ config CLKSRC_VERSATILE
ARM Versatile, RealView and Versatile Express reference
platforms.
+config CLKSRC_MIPS_GIC
+ bool
+ depends on MIPS_GIC
+ select CLKSRC_OF
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fae0435cc23d..94d90b24b56b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
+obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
new file mode 100644
index 000000000000..3bd31b1321f6
--- /dev/null
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -0,0 +1,166 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/mips-gic.h>
+#include <linux/notifier.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/time.h>
+
+static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
+static int gic_timer_irq;
+static unsigned int gic_frequency;
+
+static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ u64 cnt;
+ int res;
+
+ cnt = gic_read_count();
+ cnt += (u64)delta;
+ gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
+ res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
+ return res;
+}
+
+static void gic_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = dev_id;
+
+ gic_write_compare(gic_read_compare());
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+struct irqaction gic_compare_irqaction = {
+ .handler = gic_compare_interrupt,
+ .percpu_dev_id = &gic_clockevent_device,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "timer",
+};
+
+static void gic_clockevent_cpu_init(struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+
+ cd->name = "MIPS GIC";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP;
+
+ cd->rating = 350;
+ cd->irq = gic_timer_irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = gic_next_event;
+ cd->set_mode = gic_set_clock_mode;
+
+ clockevents_config_and_register(cd, gic_frequency, 0x300, 0x7fffffff);
+
+ enable_percpu_irq(gic_timer_irq, IRQ_TYPE_NONE);
+}
+
+static void gic_clockevent_cpu_exit(struct clock_event_device *cd)
+{
+ disable_percpu_irq(gic_timer_irq);
+}
+
+static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
+ break;
+ case CPU_DYING:
+ gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gic_cpu_nb = {
+ .notifier_call = gic_cpu_notifier,
+};
+
+static int gic_clockevent_init(void)
+{
+ if (!cpu_has_counter || !gic_frequency)
+ return -ENXIO;
+
+ setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+
+ register_cpu_notifier(&gic_cpu_nb);
+
+ gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
+
+ return 0;
+}
+
+static cycle_t gic_hpt_read(struct clocksource *cs)
+{
+ return gic_read_count();
+}
+
+static struct clocksource gic_clocksource = {
+ .name = "GIC",
+ .read = gic_hpt_read,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void __init __gic_clocksource_init(void)
+{
+ /* Set clocksource mask. */
+ gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width());
+
+ /* Calculate a somewhat reasonable rating value. */
+ gic_clocksource.rating = 200 + gic_frequency / 10000000;
+
+ clocksource_register_hz(&gic_clocksource, gic_frequency);
+
+ gic_clockevent_init();
+}
+
+void __init gic_clocksource_init(unsigned int frequency)
+{
+ gic_frequency = frequency;
+ gic_timer_irq = MIPS_GIC_IRQ_BASE +
+ GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE);
+
+ __gic_clocksource_init();
+}
+
+static void __init gic_clocksource_of_init(struct device_node *node)
+{
+ if (WARN_ON(!gic_present || !node->parent ||
+ !of_device_is_compatible(node->parent, "mti,gic")))
+ return;
+
+ if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) {
+ pr_err("GIC frequency not specified.\n");
+ return;
+ }
+ gic_timer_irq = irq_of_parse_and_map(node, 0);
+ if (!gic_timer_irq) {
+ pr_err("GIC timer IRQ not specified.\n");
+ return;
+ }
+
+ __gic_clocksource_init();
+}
+CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
+ gic_clocksource_of_init);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f612d68629dc..30f522848c73 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -141,12 +141,18 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
*/
static int cn_call_callback(struct sk_buff *skb)
{
+ struct nlmsghdr *nlh;
struct cn_callback_entry *i, *cbq = NULL;
struct cn_dev *dev = &cdev;
struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
int err = -ENODEV;
+ /* verify msg->len is within skb */
+ nlh = nlmsg_hdr(skb);
+ if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
+ return -EINVAL;
+
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&i->id.id, &msg->id)) {
diff --git a/drivers/coresight/Makefile b/drivers/coresight/Makefile
new file mode 100644
index 000000000000..4b4bec890ef5
--- /dev/null
+++ b/drivers/coresight/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for CoreSight drivers.
+#
+obj-$(CONFIG_CORESIGHT) += coresight.o
+obj-$(CONFIG_OF) += of_coresight.o
+obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
+obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
+obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
+obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
+ coresight-replicator.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
diff --git a/drivers/coresight/coresight-etb10.c b/drivers/coresight/coresight-etb10.c
new file mode 100644
index 000000000000..c922d4aded8a
--- /dev/null
+++ b/drivers/coresight/coresight-etb10.c
@@ -0,0 +1,537 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/seq_file.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+
+#include "coresight-priv.h"
+
+#define ETB_RAM_DEPTH_REG 0x004
+#define ETB_STATUS_REG 0x00c
+#define ETB_RAM_READ_DATA_REG 0x010
+#define ETB_RAM_READ_POINTER 0x014
+#define ETB_RAM_WRITE_POINTER 0x018
+#define ETB_TRG 0x01c
+#define ETB_CTL_REG 0x020
+#define ETB_RWD_REG 0x024
+#define ETB_FFSR 0x300
+#define ETB_FFCR 0x304
+#define ETB_ITMISCOP0 0xee0
+#define ETB_ITTRFLINACK 0xee4
+#define ETB_ITTRFLIN 0xee8
+#define ETB_ITATBDATA0 0xeeC
+#define ETB_ITATBCTR2 0xef0
+#define ETB_ITATBCTR1 0xef4
+#define ETB_ITATBCTR0 0xef8
+
+/* register description */
+/* STS - 0x00C */
+#define ETB_STATUS_RAM_FULL BIT(0)
+/* CTL - 0x020 */
+#define ETB_CTL_CAPT_EN BIT(0)
+/* FFCR - 0x304 */
+#define ETB_FFCR_EN_FTC BIT(0)
+#define ETB_FFCR_FON_MAN BIT(6)
+#define ETB_FFCR_STOP_FI BIT(12)
+#define ETB_FFCR_STOP_TRIGGER BIT(13)
+
+#define ETB_FFCR_BIT 6
+#define ETB_FFSR_BIT 1
+#define ETB_FRAME_SIZE_WORDS 4
+
+/**
+ * struct etb_drvdata - specifics associated to an ETB component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @csdev: component vitals needed by the framework.
+ * @miscdev: specifics to handle "/dev/xyz.etb" entry.
+ * @clk: the clock this component is associated to.
+ * @spinlock: only one at a time pls.
+ * @in_use: synchronise user space access to etb buffer.
+ * @buf: area of memory where ETB buffer content gets sent.
+ * @buffer_depth: size of @buf.
+ * @enable: this ETB is being used.
+ * @trigger_cntr: amount of words to store after a trigger.
+ */
+struct etb_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct miscdevice miscdev;
+ struct clk *clk;
+ spinlock_t spinlock;
+ atomic_t in_use;
+ u8 *buf;
+ u32 buffer_depth;
+ bool enable;
+ u32 trigger_cntr;
+};
+
+static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
+{
+ int ret;
+ u32 depth = 0;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ /* RO registers don't need locking */
+ depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
+
+ clk_disable_unprepare(drvdata->clk);
+ return depth;
+}
+
+static void etb_enable_hw(struct etb_drvdata *drvdata)
+{
+ int i;
+ u32 depth;
+
+ CS_UNLOCK(drvdata->base);
+
+ depth = drvdata->buffer_depth;
+ /* reset write RAM pointer address */
+ writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+ /* clear entire RAM buffer */
+ for (i = 0; i < depth; i++)
+ writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
+
+ /* reset write RAM pointer address */
+ writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+ /* reset read RAM pointer address */
+ writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+
+ writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
+ writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
+ drvdata->base + ETB_FFCR);
+ /* ETB trace capture enable */
+ writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
+
+ CS_LOCK(drvdata->base);
+}
+
+static int etb_enable(struct coresight_device *csdev)
+{
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
+ unsigned long flags;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ etb_enable_hw(drvdata);
+ drvdata->enable = true;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "ETB enabled\n");
+ return 0;
+}
+
+static void etb_disable_hw(struct etb_drvdata *drvdata)
+{
+ u32 ffcr;
+
+ CS_UNLOCK(drvdata->base);
+
+ ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
+ /* stop formatter when a stop has completed */
+ ffcr |= ETB_FFCR_STOP_FI;
+ writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
+ /* manually generate a flush of the system */
+ ffcr |= ETB_FFCR_FON_MAN;
+ writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
+
+ if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
+ dev_err(drvdata->dev,
+ "timeout observed when probing at offset %#x\n",
+ ETB_FFCR);
+ }
+
+ /* disable trace capture */
+ writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
+
+ if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
+ dev_err(drvdata->dev,
+ "timeout observed when probing at offset %#x\n",
+ ETB_FFCR);
+ }
+
+ CS_LOCK(drvdata->base);
+}
+
+static void etb_dump_hw(struct etb_drvdata *drvdata)
+{
+ int i;
+ u8 *buf_ptr;
+ u32 read_data, depth;
+ u32 read_ptr, write_ptr;
+ u32 frame_off, frame_endoff;
+
+ CS_UNLOCK(drvdata->base);
+
+ read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+ write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+
+ frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
+ frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
+ if (frame_off) {
+ dev_err(drvdata->dev,
+ "write_ptr: %lu not aligned to formatter frame size\n",
+ (unsigned long)write_ptr);
+ dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
+ (unsigned long)frame_off, (unsigned long)frame_endoff);
+ write_ptr += frame_endoff;
+ }
+
+ if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
+ & ETB_STATUS_RAM_FULL) == 0)
+ writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+ else
+ writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+ depth = drvdata->buffer_depth;
+ buf_ptr = drvdata->buf;
+ for (i = 0; i < depth; i++) {
+ read_data = readl_relaxed(drvdata->base +
+ ETB_RAM_READ_DATA_REG);
+ *buf_ptr++ = read_data >> 0;
+ *buf_ptr++ = read_data >> 8;
+ *buf_ptr++ = read_data >> 16;
+ *buf_ptr++ = read_data >> 24;
+ }
+
+ if (frame_off) {
+ buf_ptr -= (frame_endoff * 4);
+ for (i = 0; i < frame_endoff; i++) {
+ *buf_ptr++ = 0x0;
+ *buf_ptr++ = 0x0;
+ *buf_ptr++ = 0x0;
+ *buf_ptr++ = 0x0;
+ }
+ }
+
+ writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void etb_disable(struct coresight_device *csdev)
+{
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ etb_disable_hw(drvdata);
+ etb_dump_hw(drvdata);
+ drvdata->enable = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "ETB disabled\n");
+}
+
+static const struct coresight_ops_sink etb_sink_ops = {
+ .enable = etb_enable,
+ .disable = etb_disable,
+};
+
+static const struct coresight_ops etb_cs_ops = {
+ .sink_ops = &etb_sink_ops,
+};
+
+static void etb_dump(struct etb_drvdata *drvdata)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->enable) {
+ etb_disable_hw(drvdata);
+ etb_dump_hw(drvdata);
+ etb_enable_hw(drvdata);
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "ETB dumped\n");
+}
+
+static int etb_open(struct inode *inode, struct file *file)
+{
+ struct etb_drvdata *drvdata = container_of(file->private_data,
+ struct etb_drvdata, miscdev);
+
+ if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+ return -EBUSY;
+
+ dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+ return 0;
+}
+
+static ssize_t etb_read(struct file *file, char __user *data,
+ size_t len, loff_t *ppos)
+{
+ u32 depth;
+ struct etb_drvdata *drvdata = container_of(file->private_data,
+ struct etb_drvdata, miscdev);
+
+ etb_dump(drvdata);
+
+ depth = drvdata->buffer_depth;
+ if (*ppos + len > depth * 4)
+ len = depth * 4 - *ppos;
+
+ if (copy_to_user(data, drvdata->buf + *ppos, len)) {
+ dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
+ __func__, len, (int) (depth * 4 - *ppos));
+ return len;
+}
+
+static int etb_release(struct inode *inode, struct file *file)
+{
+ struct etb_drvdata *drvdata = container_of(file->private_data,
+ struct etb_drvdata, miscdev);
+ atomic_set(&drvdata->in_use, 0);
+
+ dev_dbg(drvdata->dev, "%s: released\n", __func__);
+ return 0;
+}
+
+static const struct file_operations etb_fops = {
+ .owner = THIS_MODULE,
+ .open = etb_open,
+ .read = etb_read,
+ .release = etb_release,
+ .llseek = no_llseek,
+};
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ unsigned long flags;
+ u32 etb_rdr, etb_sr, etb_rrp, etb_rwp;
+ u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr;
+ struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto out;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ CS_UNLOCK(drvdata->base);
+
+ etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
+ etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG);
+ etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+ etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+ etb_trg = readl_relaxed(drvdata->base + ETB_TRG);
+ etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG);
+ etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR);
+ etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
+
+ CS_LOCK(drvdata->base);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ return sprintf(buf,
+ "Depth:\t\t0x%x\n"
+ "Status:\t\t0x%x\n"
+ "RAM read ptr:\t0x%x\n"
+ "RAM wrt ptr:\t0x%x\n"
+ "Trigger cnt:\t0x%x\n"
+ "Control:\t0x%x\n"
+ "Flush status:\t0x%x\n"
+ "Flush ctrl:\t0x%x\n",
+ etb_rdr, etb_sr, etb_rrp, etb_rwp,
+ etb_trg, etb_cr, etb_ffsr, etb_ffcr);
+out:
+ return -EINVAL;
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t trigger_cntr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->trigger_cntr;
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_cntr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->trigger_cntr = val;
+ return size;
+}
+static DEVICE_ATTR_RW(trigger_cntr);
+
+static struct attribute *coresight_etb_attrs[] = {
+ &dev_attr_trigger_cntr.attr,
+ &dev_attr_status.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etb);
+
+static int etb_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata = NULL;
+ struct etb_drvdata *drvdata;
+ struct resource *res = &adev->res;
+ struct coresight_desc *desc;
+ struct device_node *np = adev->dev.of_node;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ /* validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ drvdata->clk = adev->pclk;
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
+ clk_disable_unprepare(drvdata->clk);
+
+ if (drvdata->buffer_depth < 0)
+ return -EINVAL;
+
+ drvdata->buf = devm_kzalloc(dev,
+ drvdata->buffer_depth * 4, GFP_KERNEL);
+ if (!drvdata->buf)
+ return -ENOMEM;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = CORESIGHT_DEV_TYPE_SINK;
+ desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc->ops = &etb_cs_ops;
+ desc->pdata = pdata;
+ desc->dev = dev;
+ desc->groups = coresight_etb_groups;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ drvdata->miscdev.name = pdata->name;
+ drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
+ drvdata->miscdev.fops = &etb_fops;
+ ret = misc_register(&drvdata->miscdev);
+ if (ret)
+ goto err_misc_register;
+
+ dev_info(dev, "ETB initialized\n");
+ return 0;
+
+err_misc_register:
+ coresight_unregister(drvdata->csdev);
+ return ret;
+}
+
+static int etb_remove(struct amba_device *adev)
+{
+ struct etb_drvdata *drvdata = amba_get_drvdata(adev);
+
+ misc_deregister(&drvdata->miscdev);
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct amba_id etb_ids[] = {
+ {
+ .id = 0x0003b907,
+ .mask = 0x0003ffff,
+ },
+ { 0, 0},
+};
+
+static struct amba_driver etb_driver = {
+ .drv = {
+ .name = "coresight-etb10",
+ .owner = THIS_MODULE,
+ },
+ .probe = etb_probe,
+ .remove = etb_remove,
+ .id_table = etb_ids,
+};
+
+static int __init etb_init(void)
+{
+ return amba_driver_register(&etb_driver);
+}
+module_init(etb_init);
+
+static void __exit etb_exit(void)
+{
+ amba_driver_unregister(&etb_driver);
+}
+module_exit(etb_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
diff --git a/drivers/coresight/coresight-etm-cp14.c b/drivers/coresight/coresight-etm-cp14.c
new file mode 100644
index 000000000000..12a220682117
--- /dev/null
+++ b/drivers/coresight/coresight-etm-cp14.c
@@ -0,0 +1,591 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <asm/hardware/cp14.h>
+
+#include "coresight-etm.h"
+
+int etm_readl_cp14(u32 reg, unsigned int *val)
+{
+ switch (reg) {
+ case ETMCR:
+ *val = etm_read(ETMCR);
+ return 0;
+ case ETMCCR:
+ *val = etm_read(ETMCCR);
+ return 0;
+ case ETMTRIGGER:
+ *val = etm_read(ETMTRIGGER);
+ return 0;
+ case ETMSR:
+ *val = etm_read(ETMSR);
+ return 0;
+ case ETMSCR:
+ *val = etm_read(ETMSCR);
+ return 0;
+ case ETMTSSCR:
+ *val = etm_read(ETMTSSCR);
+ return 0;
+ case ETMTEEVR:
+ *val = etm_read(ETMTEEVR);
+ return 0;
+ case ETMTECR1:
+ *val = etm_read(ETMTECR1);
+ return 0;
+ case ETMFFLR:
+ *val = etm_read(ETMFFLR);
+ return 0;
+ case ETMACVRn(0):
+ *val = etm_read(ETMACVR0);
+ return 0;
+ case ETMACVRn(1):
+ *val = etm_read(ETMACVR1);
+ return 0;
+ case ETMACVRn(2):
+ *val = etm_read(ETMACVR2);
+ return 0;
+ case ETMACVRn(3):
+ *val = etm_read(ETMACVR3);
+ return 0;
+ case ETMACVRn(4):
+ *val = etm_read(ETMACVR4);
+ return 0;
+ case ETMACVRn(5):
+ *val = etm_read(ETMACVR5);
+ return 0;
+ case ETMACVRn(6):
+ *val = etm_read(ETMACVR6);
+ return 0;
+ case ETMACVRn(7):
+ *val = etm_read(ETMACVR7);
+ return 0;
+ case ETMACVRn(8):
+ *val = etm_read(ETMACVR8);
+ return 0;
+ case ETMACVRn(9):
+ *val = etm_read(ETMACVR9);
+ return 0;
+ case ETMACVRn(10):
+ *val = etm_read(ETMACVR10);
+ return 0;
+ case ETMACVRn(11):
+ *val = etm_read(ETMACVR11);
+ return 0;
+ case ETMACVRn(12):
+ *val = etm_read(ETMACVR12);
+ return 0;
+ case ETMACVRn(13):
+ *val = etm_read(ETMACVR13);
+ return 0;
+ case ETMACVRn(14):
+ *val = etm_read(ETMACVR14);
+ return 0;
+ case ETMACVRn(15):
+ *val = etm_read(ETMACVR15);
+ return 0;
+ case ETMACTRn(0):
+ *val = etm_read(ETMACTR0);
+ return 0;
+ case ETMACTRn(1):
+ *val = etm_read(ETMACTR1);
+ return 0;
+ case ETMACTRn(2):
+ *val = etm_read(ETMACTR2);
+ return 0;
+ case ETMACTRn(3):
+ *val = etm_read(ETMACTR3);
+ return 0;
+ case ETMACTRn(4):
+ *val = etm_read(ETMACTR4);
+ return 0;
+ case ETMACTRn(5):
+ *val = etm_read(ETMACTR5);
+ return 0;
+ case ETMACTRn(6):
+ *val = etm_read(ETMACTR6);
+ return 0;
+ case ETMACTRn(7):
+ *val = etm_read(ETMACTR7);
+ return 0;
+ case ETMACTRn(8):
+ *val = etm_read(ETMACTR8);
+ return 0;
+ case ETMACTRn(9):
+ *val = etm_read(ETMACTR9);
+ return 0;
+ case ETMACTRn(10):
+ *val = etm_read(ETMACTR10);
+ return 0;
+ case ETMACTRn(11):
+ *val = etm_read(ETMACTR11);
+ return 0;
+ case ETMACTRn(12):
+ *val = etm_read(ETMACTR12);
+ return 0;
+ case ETMACTRn(13):
+ *val = etm_read(ETMACTR13);
+ return 0;
+ case ETMACTRn(14):
+ *val = etm_read(ETMACTR14);
+ return 0;
+ case ETMACTRn(15):
+ *val = etm_read(ETMACTR15);
+ return 0;
+ case ETMCNTRLDVRn(0):
+ *val = etm_read(ETMCNTRLDVR0);
+ return 0;
+ case ETMCNTRLDVRn(1):
+ *val = etm_read(ETMCNTRLDVR1);
+ return 0;
+ case ETMCNTRLDVRn(2):
+ *val = etm_read(ETMCNTRLDVR2);
+ return 0;
+ case ETMCNTRLDVRn(3):
+ *val = etm_read(ETMCNTRLDVR3);
+ return 0;
+ case ETMCNTENRn(0):
+ *val = etm_read(ETMCNTENR0);
+ return 0;
+ case ETMCNTENRn(1):
+ *val = etm_read(ETMCNTENR1);
+ return 0;
+ case ETMCNTENRn(2):
+ *val = etm_read(ETMCNTENR2);
+ return 0;
+ case ETMCNTENRn(3):
+ *val = etm_read(ETMCNTENR3);
+ return 0;
+ case ETMCNTRLDEVRn(0):
+ *val = etm_read(ETMCNTRLDEVR0);
+ return 0;
+ case ETMCNTRLDEVRn(1):
+ *val = etm_read(ETMCNTRLDEVR1);
+ return 0;
+ case ETMCNTRLDEVRn(2):
+ *val = etm_read(ETMCNTRLDEVR2);
+ return 0;
+ case ETMCNTRLDEVRn(3):
+ *val = etm_read(ETMCNTRLDEVR3);
+ return 0;
+ case ETMCNTVRn(0):
+ *val = etm_read(ETMCNTVR0);
+ return 0;
+ case ETMCNTVRn(1):
+ *val = etm_read(ETMCNTVR1);
+ return 0;
+ case ETMCNTVRn(2):
+ *val = etm_read(ETMCNTVR2);
+ return 0;
+ case ETMCNTVRn(3):
+ *val = etm_read(ETMCNTVR3);
+ return 0;
+ case ETMSQ12EVR:
+ *val = etm_read(ETMSQ12EVR);
+ return 0;
+ case ETMSQ21EVR:
+ *val = etm_read(ETMSQ21EVR);
+ return 0;
+ case ETMSQ23EVR:
+ *val = etm_read(ETMSQ23EVR);
+ return 0;
+ case ETMSQ31EVR:
+ *val = etm_read(ETMSQ31EVR);
+ return 0;
+ case ETMSQ32EVR:
+ *val = etm_read(ETMSQ32EVR);
+ return 0;
+ case ETMSQ13EVR:
+ *val = etm_read(ETMSQ13EVR);
+ return 0;
+ case ETMSQR:
+ *val = etm_read(ETMSQR);
+ return 0;
+ case ETMEXTOUTEVRn(0):
+ *val = etm_read(ETMEXTOUTEVR0);
+ return 0;
+ case ETMEXTOUTEVRn(1):
+ *val = etm_read(ETMEXTOUTEVR1);
+ return 0;
+ case ETMEXTOUTEVRn(2):
+ *val = etm_read(ETMEXTOUTEVR2);
+ return 0;
+ case ETMEXTOUTEVRn(3):
+ *val = etm_read(ETMEXTOUTEVR3);
+ return 0;
+ case ETMCIDCVRn(0):
+ *val = etm_read(ETMCIDCVR0);
+ return 0;
+ case ETMCIDCVRn(1):
+ *val = etm_read(ETMCIDCVR1);
+ return 0;
+ case ETMCIDCVRn(2):
+ *val = etm_read(ETMCIDCVR2);
+ return 0;
+ case ETMCIDCMR:
+ *val = etm_read(ETMCIDCMR);
+ return 0;
+ case ETMIMPSPEC0:
+ *val = etm_read(ETMIMPSPEC0);
+ return 0;
+ case ETMIMPSPEC1:
+ *val = etm_read(ETMIMPSPEC1);
+ return 0;
+ case ETMIMPSPEC2:
+ *val = etm_read(ETMIMPSPEC2);
+ return 0;
+ case ETMIMPSPEC3:
+ *val = etm_read(ETMIMPSPEC3);
+ return 0;
+ case ETMIMPSPEC4:
+ *val = etm_read(ETMIMPSPEC4);
+ return 0;
+ case ETMIMPSPEC5:
+ *val = etm_read(ETMIMPSPEC5);
+ return 0;
+ case ETMIMPSPEC6:
+ *val = etm_read(ETMIMPSPEC6);
+ return 0;
+ case ETMIMPSPEC7:
+ *val = etm_read(ETMIMPSPEC7);
+ return 0;
+ case ETMSYNCFR:
+ *val = etm_read(ETMSYNCFR);
+ return 0;
+ case ETMIDR:
+ *val = etm_read(ETMIDR);
+ return 0;
+ case ETMCCER:
+ *val = etm_read(ETMCCER);
+ return 0;
+ case ETMEXTINSELR:
+ *val = etm_read(ETMEXTINSELR);
+ return 0;
+ case ETMTESSEICR:
+ *val = etm_read(ETMTESSEICR);
+ return 0;
+ case ETMEIBCR:
+ *val = etm_read(ETMEIBCR);
+ return 0;
+ case ETMTSEVR:
+ *val = etm_read(ETMTSEVR);
+ return 0;
+ case ETMAUXCR:
+ *val = etm_read(ETMAUXCR);
+ return 0;
+ case ETMTRACEIDR:
+ *val = etm_read(ETMTRACEIDR);
+ return 0;
+ case ETMVMIDCVR:
+ *val = etm_read(ETMVMIDCVR);
+ return 0;
+ case ETMOSLSR:
+ *val = etm_read(ETMOSLSR);
+ return 0;
+ case ETMOSSRR:
+ *val = etm_read(ETMOSSRR);
+ return 0;
+ case ETMPDCR:
+ *val = etm_read(ETMPDCR);
+ return 0;
+ case ETMPDSR:
+ *val = etm_read(ETMPDSR);
+ return 0;
+ default:
+ *val = 0;
+ return -EINVAL;
+ }
+}
+
+int etm_writel_cp14(u32 reg, u32 val)
+{
+ switch (reg) {
+ case ETMCR:
+ etm_write(val, ETMCR);
+ break;
+ case ETMTRIGGER:
+ etm_write(val, ETMTRIGGER);
+ break;
+ case ETMSR:
+ etm_write(val, ETMSR);
+ break;
+ case ETMTSSCR:
+ etm_write(val, ETMTSSCR);
+ break;
+ case ETMTEEVR:
+ etm_write(val, ETMTEEVR);
+ break;
+ case ETMTECR1:
+ etm_write(val, ETMTECR1);
+ break;
+ case ETMFFLR:
+ etm_write(val, ETMFFLR);
+ break;
+ case ETMACVRn(0):
+ etm_write(val, ETMACVR0);
+ break;
+ case ETMACVRn(1):
+ etm_write(val, ETMACVR1);
+ break;
+ case ETMACVRn(2):
+ etm_write(val, ETMACVR2);
+ break;
+ case ETMACVRn(3):
+ etm_write(val, ETMACVR3);
+ break;
+ case ETMACVRn(4):
+ etm_write(val, ETMACVR4);
+ break;
+ case ETMACVRn(5):
+ etm_write(val, ETMACVR5);
+ break;
+ case ETMACVRn(6):
+ etm_write(val, ETMACVR6);
+ break;
+ case ETMACVRn(7):
+ etm_write(val, ETMACVR7);
+ break;
+ case ETMACVRn(8):
+ etm_write(val, ETMACVR8);
+ break;
+ case ETMACVRn(9):
+ etm_write(val, ETMACVR9);
+ break;
+ case ETMACVRn(10):
+ etm_write(val, ETMACVR10);
+ break;
+ case ETMACVRn(11):
+ etm_write(val, ETMACVR11);
+ break;
+ case ETMACVRn(12):
+ etm_write(val, ETMACVR12);
+ break;
+ case ETMACVRn(13):
+ etm_write(val, ETMACVR13);
+ break;
+ case ETMACVRn(14):
+ etm_write(val, ETMACVR14);
+ break;
+ case ETMACVRn(15):
+ etm_write(val, ETMACVR15);
+ break;
+ case ETMACTRn(0):
+ etm_write(val, ETMACTR0);
+ break;
+ case ETMACTRn(1):
+ etm_write(val, ETMACTR1);
+ break;
+ case ETMACTRn(2):
+ etm_write(val, ETMACTR2);
+ break;
+ case ETMACTRn(3):
+ etm_write(val, ETMACTR3);
+ break;
+ case ETMACTRn(4):
+ etm_write(val, ETMACTR4);
+ break;
+ case ETMACTRn(5):
+ etm_write(val, ETMACTR5);
+ break;
+ case ETMACTRn(6):
+ etm_write(val, ETMACTR6);
+ break;
+ case ETMACTRn(7):
+ etm_write(val, ETMACTR7);
+ break;
+ case ETMACTRn(8):
+ etm_write(val, ETMACTR8);
+ break;
+ case ETMACTRn(9):
+ etm_write(val, ETMACTR9);
+ break;
+ case ETMACTRn(10):
+ etm_write(val, ETMACTR10);
+ break;
+ case ETMACTRn(11):
+ etm_write(val, ETMACTR11);
+ break;
+ case ETMACTRn(12):
+ etm_write(val, ETMACTR12);
+ break;
+ case ETMACTRn(13):
+ etm_write(val, ETMACTR13);
+ break;
+ case ETMACTRn(14):
+ etm_write(val, ETMACTR14);
+ break;
+ case ETMACTRn(15):
+ etm_write(val, ETMACTR15);
+ break;
+ case ETMCNTRLDVRn(0):
+ etm_write(val, ETMCNTRLDVR0);
+ break;
+ case ETMCNTRLDVRn(1):
+ etm_write(val, ETMCNTRLDVR1);
+ break;
+ case ETMCNTRLDVRn(2):
+ etm_write(val, ETMCNTRLDVR2);
+ break;
+ case ETMCNTRLDVRn(3):
+ etm_write(val, ETMCNTRLDVR3);
+ break;
+ case ETMCNTENRn(0):
+ etm_write(val, ETMCNTENR0);
+ break;
+ case ETMCNTENRn(1):
+ etm_write(val, ETMCNTENR1);
+ break;
+ case ETMCNTENRn(2):
+ etm_write(val, ETMCNTENR2);
+ break;
+ case ETMCNTENRn(3):
+ etm_write(val, ETMCNTENR3);
+ break;
+ case ETMCNTRLDEVRn(0):
+ etm_write(val, ETMCNTRLDEVR0);
+ break;
+ case ETMCNTRLDEVRn(1):
+ etm_write(val, ETMCNTRLDEVR1);
+ break;
+ case ETMCNTRLDEVRn(2):
+ etm_write(val, ETMCNTRLDEVR2);
+ break;
+ case ETMCNTRLDEVRn(3):
+ etm_write(val, ETMCNTRLDEVR3);
+ break;
+ case ETMCNTVRn(0):
+ etm_write(val, ETMCNTVR0);
+ break;
+ case ETMCNTVRn(1):
+ etm_write(val, ETMCNTVR1);
+ break;
+ case ETMCNTVRn(2):
+ etm_write(val, ETMCNTVR2);
+ break;
+ case ETMCNTVRn(3):
+ etm_write(val, ETMCNTVR3);
+ break;
+ case ETMSQ12EVR:
+ etm_write(val, ETMSQ12EVR);
+ break;
+ case ETMSQ21EVR:
+ etm_write(val, ETMSQ21EVR);
+ break;
+ case ETMSQ23EVR:
+ etm_write(val, ETMSQ23EVR);
+ break;
+ case ETMSQ31EVR:
+ etm_write(val, ETMSQ31EVR);
+ break;
+ case ETMSQ32EVR:
+ etm_write(val, ETMSQ32EVR);
+ break;
+ case ETMSQ13EVR:
+ etm_write(val, ETMSQ13EVR);
+ break;
+ case ETMSQR:
+ etm_write(val, ETMSQR);
+ break;
+ case ETMEXTOUTEVRn(0):
+ etm_write(val, ETMEXTOUTEVR0);
+ break;
+ case ETMEXTOUTEVRn(1):
+ etm_write(val, ETMEXTOUTEVR1);
+ break;
+ case ETMEXTOUTEVRn(2):
+ etm_write(val, ETMEXTOUTEVR2);
+ break;
+ case ETMEXTOUTEVRn(3):
+ etm_write(val, ETMEXTOUTEVR3);
+ break;
+ case ETMCIDCVRn(0):
+ etm_write(val, ETMCIDCVR0);
+ break;
+ case ETMCIDCVRn(1):
+ etm_write(val, ETMCIDCVR1);
+ break;
+ case ETMCIDCVRn(2):
+ etm_write(val, ETMCIDCVR2);
+ break;
+ case ETMCIDCMR:
+ etm_write(val, ETMCIDCMR);
+ break;
+ case ETMIMPSPEC0:
+ etm_write(val, ETMIMPSPEC0);
+ break;
+ case ETMIMPSPEC1:
+ etm_write(val, ETMIMPSPEC1);
+ break;
+ case ETMIMPSPEC2:
+ etm_write(val, ETMIMPSPEC2);
+ break;
+ case ETMIMPSPEC3:
+ etm_write(val, ETMIMPSPEC3);
+ break;
+ case ETMIMPSPEC4:
+ etm_write(val, ETMIMPSPEC4);
+ break;
+ case ETMIMPSPEC5:
+ etm_write(val, ETMIMPSPEC5);
+ break;
+ case ETMIMPSPEC6:
+ etm_write(val, ETMIMPSPEC6);
+ break;
+ case ETMIMPSPEC7:
+ etm_write(val, ETMIMPSPEC7);
+ break;
+ case ETMSYNCFR:
+ etm_write(val, ETMSYNCFR);
+ break;
+ case ETMEXTINSELR:
+ etm_write(val, ETMEXTINSELR);
+ break;
+ case ETMTESSEICR:
+ etm_write(val, ETMTESSEICR);
+ break;
+ case ETMEIBCR:
+ etm_write(val, ETMEIBCR);
+ break;
+ case ETMTSEVR:
+ etm_write(val, ETMTSEVR);
+ break;
+ case ETMAUXCR:
+ etm_write(val, ETMAUXCR);
+ break;
+ case ETMTRACEIDR:
+ etm_write(val, ETMTRACEIDR);
+ break;
+ case ETMVMIDCVR:
+ etm_write(val, ETMVMIDCVR);
+ break;
+ case ETMOSLAR:
+ etm_write(val, ETMOSLAR);
+ break;
+ case ETMOSSRR:
+ etm_write(val, ETMOSSRR);
+ break;
+ case ETMPDCR:
+ etm_write(val, ETMPDCR);
+ break;
+ case ETMPDSR:
+ etm_write(val, ETMPDSR);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/coresight/coresight-etm.h b/drivers/coresight/coresight-etm.h
new file mode 100644
index 000000000000..501c5fac8a45
--- /dev/null
+++ b/drivers/coresight/coresight-etm.h
@@ -0,0 +1,251 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_CORESIGHT_ETM_H
+#define _CORESIGHT_CORESIGHT_ETM_H
+
+#include <linux/spinlock.h>
+#include "coresight-priv.h"
+
+/*
+ * Device registers:
+ * 0x000 - 0x2FC: Trace registers
+ * 0x300 - 0x314: Management registers
+ * 0x318 - 0xEFC: Trace registers
+ *
+ * Coresight registers
+ * 0xF00 - 0xF9C: Management registers
+ * 0xFA0 - 0xFA4: Management registers in PFTv1.0
+ * Trace registers in PFTv1.1
+ * 0xFA8 - 0xFFC: Management registers
+ */
+
+/* Trace registers (0x000-0x2FC) */
+#define ETMCR 0x000
+#define ETMCCR 0x004
+#define ETMTRIGGER 0x008
+#define ETMSR 0x010
+#define ETMSCR 0x014
+#define ETMTSSCR 0x018
+#define ETMTECR2 0x01c
+#define ETMTEEVR 0x020
+#define ETMTECR1 0x024
+#define ETMFFLR 0x02c
+#define ETMACVRn(n) (0x040 + (n * 4))
+#define ETMACTRn(n) (0x080 + (n * 4))
+#define ETMCNTRLDVRn(n) (0x140 + (n * 4))
+#define ETMCNTENRn(n) (0x150 + (n * 4))
+#define ETMCNTRLDEVRn(n) (0x160 + (n * 4))
+#define ETMCNTVRn(n) (0x170 + (n * 4))
+#define ETMSQ12EVR 0x180
+#define ETMSQ21EVR 0x184
+#define ETMSQ23EVR 0x188
+#define ETMSQ31EVR 0x18c
+#define ETMSQ32EVR 0x190
+#define ETMSQ13EVR 0x194
+#define ETMSQR 0x19c
+#define ETMEXTOUTEVRn(n) (0x1a0 + (n * 4))
+#define ETMCIDCVRn(n) (0x1b0 + (n * 4))
+#define ETMCIDCMR 0x1bc
+#define ETMIMPSPEC0 0x1c0
+#define ETMIMPSPEC1 0x1c4
+#define ETMIMPSPEC2 0x1c8
+#define ETMIMPSPEC3 0x1cc
+#define ETMIMPSPEC4 0x1d0
+#define ETMIMPSPEC5 0x1d4
+#define ETMIMPSPEC6 0x1d8
+#define ETMIMPSPEC7 0x1dc
+#define ETMSYNCFR 0x1e0
+#define ETMIDR 0x1e4
+#define ETMCCER 0x1e8
+#define ETMEXTINSELR 0x1ec
+#define ETMTESSEICR 0x1f0
+#define ETMEIBCR 0x1f4
+#define ETMTSEVR 0x1f8
+#define ETMAUXCR 0x1fc
+#define ETMTRACEIDR 0x200
+#define ETMVMIDCVR 0x240
+/* Management registers (0x300-0x314) */
+#define ETMOSLAR 0x300
+#define ETMOSLSR 0x304
+#define ETMOSSRR 0x308
+#define ETMPDCR 0x310
+#define ETMPDSR 0x314
+#define ETM_MAX_ADDR_CMP 16
+#define ETM_MAX_CNTR 4
+#define ETM_MAX_CTXID_CMP 3
+
+/* Register definition */
+/* ETMCR - 0x00 */
+#define ETMCR_PWD_DWN BIT(0)
+#define ETMCR_STALL_MODE BIT(7)
+#define ETMCR_ETM_PRG BIT(10)
+#define ETMCR_ETM_EN BIT(11)
+#define ETMCR_CYC_ACC BIT(12)
+#define ETMCR_CTXID_SIZE (BIT(14)|BIT(15))
+#define ETMCR_TIMESTAMP_EN BIT(28)
+/* ETMCCR - 0x04 */
+#define ETMCCR_FIFOFULL BIT(23)
+/* ETMPDCR - 0x310 */
+#define ETMPDCR_PWD_UP BIT(3)
+/* ETMTECR1 - 0x024 */
+#define ETMTECR1_ADDR_COMP_1 BIT(0)
+#define ETMTECR1_INC_EXC BIT(24)
+#define ETMTECR1_START_STOP BIT(25)
+/* ETMCCER - 0x1E8 */
+#define ETMCCER_TIMESTAMP BIT(22)
+
+#define ETM_MODE_EXCLUDE BIT(0)
+#define ETM_MODE_CYCACC BIT(1)
+#define ETM_MODE_STALL BIT(2)
+#define ETM_MODE_TIMESTAMP BIT(3)
+#define ETM_MODE_CTXID BIT(4)
+#define ETM_MODE_ALL 0x1f
+
+#define ETM_SQR_MASK 0x3
+#define ETM_TRACEID_MASK 0x3f
+#define ETM_EVENT_MASK 0x1ffff
+#define ETM_SYNC_MASK 0xfff
+#define ETM_ALL_MASK 0xffffffff
+
+#define ETMSR_PROG_BIT 1
+#define ETM_SEQ_STATE_MAX_VAL (0x2)
+#define PORT_SIZE_MASK (GENMASK(21, 21) | GENMASK(6, 4))
+
+#define ETM_HARD_WIRE_RES_A /* Hard wired, always true */ \
+ ((0x0f << 0) | \
+ /* Resource index A */ \
+ (0x06 << 4))
+
+#define ETM_ADD_COMP_0 /* Single addr comparator 1 */ \
+ ((0x00 << 7) | \
+ /* Resource index B */ \
+ (0x00 << 11))
+
+#define ETM_EVENT_NOT_A BIT(14) /* NOT(A) */
+
+#define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \
+ ETM_ADD_COMP_0 | \
+ ETM_EVENT_NOT_A)
+/**
+ * struct etm_drvdata - specifics associated to an ETM component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @csdev: component vitals needed by the framework.
+ * @clk: the clock this component is associated to.
+ * @spinlock: only one at a time pls.
+ * @cpu: the cpu this component is affined to.
+ * @port_size: port size as reported by ETMCR bit 4-6 and 21.
+ * @arch: ETM/PTM version number.
+ * @use_cpu14: true if management registers need to be accessed via CP14.
+ * @enable: is this ETM/PTM currently tracing.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:true if we should start tracing at boot time.
+ * @os_unlock: true if access to management registers is allowed.
+ * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
+ * @nr_cntr: Number of counters as found in ETMCCR bit 13-15.
+ * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19.
+ * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22.
+ * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
+ * @etmccr: value of register ETMCCR.
+ * @etmccer: value of register ETMCCER.
+ * @traceid: value of the current ID for this component.
+ * @mode: controls various modes supported by this ETM/PTM.
+ * @ctrl: used in conjunction with @mode.
+ * @trigger_event: setting for register ETMTRIGGER.
+ * @startstop_ctrl: setting for register ETMTSSCR.
+ * @enable_event: setting for register ETMTEEVR.
+ * @enable_ctrl1: setting for register ETMTECR1.
+ * @fifofull_level: setting for register ETMFFLR.
+ * @addr_idx: index for the address comparator selection.
+ * @addr_val: value for address comparator register.
+ * @addr_acctype: access type for address comparator register.
+ * @addr_type: current status of the comparator register.
+ * @cntr_idx: index for the counter register selection.
+ * @cntr_rld_val: reload value of a counter register.
+ * @cntr_event: control for counter enable register.
+ * @cntr_rld_event: value for counter reload event register.
+ * @cntr_val: counter value register.
+ * @seq_12_event: event causing the transition from 1 to 2.
+ * @seq_21_event: event causing the transition from 2 to 1.
+ * @seq_23_event: event causing the transition from 2 to 3.
+ * @seq_31_event: event causing the transition from 3 to 1.
+ * @seq_32_event: event causing the transition from 3 to 2.
+ * @seq_13_event: event causing the transition from 1 to 3.
+ * @seq_curr_state: current value of the sequencer register.
+ * @ctxid_idx: index for the context ID registers.
+ * @ctxid_val: value for the context ID to trigger on.
+ * @ctxid_mask: mask applicable to all the context IDs.
+ * @sync_freq: Synchronisation frequency.
+ * @timestamp_event: Defines an event that requests the insertion
+ of a timestamp into the trace stream.
+ */
+struct etm_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+ spinlock_t spinlock;
+ int cpu;
+ int port_size;
+ u8 arch;
+ bool use_cp14;
+ bool enable;
+ bool sticky_enable;
+ bool boot_enable;
+ bool os_unlock;
+ u8 nr_addr_cmp;
+ u8 nr_cntr;
+ u8 nr_ext_inp;
+ u8 nr_ext_out;
+ u8 nr_ctxid_cmp;
+ u32 etmccr;
+ u32 etmccer;
+ u32 traceid;
+ u32 mode;
+ u32 ctrl;
+ u32 trigger_event;
+ u32 startstop_ctrl;
+ u32 enable_event;
+ u32 enable_ctrl1;
+ u32 fifofull_level;
+ u8 addr_idx;
+ u32 addr_val[ETM_MAX_ADDR_CMP];
+ u32 addr_acctype[ETM_MAX_ADDR_CMP];
+ u32 addr_type[ETM_MAX_ADDR_CMP];
+ u8 cntr_idx;
+ u32 cntr_rld_val[ETM_MAX_CNTR];
+ u32 cntr_event[ETM_MAX_CNTR];
+ u32 cntr_rld_event[ETM_MAX_CNTR];
+ u32 cntr_val[ETM_MAX_CNTR];
+ u32 seq_12_event;
+ u32 seq_21_event;
+ u32 seq_23_event;
+ u32 seq_31_event;
+ u32 seq_32_event;
+ u32 seq_13_event;
+ u32 seq_curr_state;
+ u8 ctxid_idx;
+ u32 ctxid_val[ETM_MAX_CTXID_CMP];
+ u32 ctxid_mask;
+ u32 sync_freq;
+ u32 timestamp_event;
+};
+
+enum etm_addr_type {
+ ETM_ADDR_TYPE_NONE,
+ ETM_ADDR_TYPE_SINGLE,
+ ETM_ADDR_TYPE_RANGE,
+ ETM_ADDR_TYPE_START,
+ ETM_ADDR_TYPE_STOP,
+};
+#endif
diff --git a/drivers/coresight/coresight-etm3x.c b/drivers/coresight/coresight-etm3x.c
new file mode 100644
index 000000000000..d9e3ed6aa857
--- /dev/null
+++ b/drivers/coresight/coresight-etm3x.c
@@ -0,0 +1,1928 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <asm/sections.h>
+
+#include "coresight-etm.h"
+
+#ifdef CONFIG_CORESIGHT_SOURCE_ETM_DEFAULT_ENABLE
+static int boot_enable = 1;
+#else
+static int boot_enable;
+#endif
+module_param_named(
+ boot_enable, boot_enable, int, S_IRUGO
+);
+
+/* The number of ETM/PTM currently registered */
+static int etm_count;
+static struct etm_drvdata *etmdrvdata[NR_CPUS];
+
+static inline void etm_writel(struct etm_drvdata *drvdata,
+ u32 val, u32 off)
+{
+ if (drvdata->use_cp14) {
+ if (etm_writel_cp14(off, val)) {
+ dev_err(drvdata->dev,
+ "invalid CP14 access to ETM reg: %#x", off);
+ }
+ } else {
+ writel_relaxed(val, drvdata->base + off);
+ }
+}
+
+static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
+{
+ u32 val;
+
+ if (drvdata->use_cp14) {
+ if (etm_readl_cp14(off, &val)) {
+ dev_err(drvdata->dev,
+ "invalid CP14 access to ETM reg: %#x", off);
+ }
+ } else {
+ val = readl_relaxed(drvdata->base + off);
+ }
+
+ return val;
+}
+
+/*
+ * Memory mapped writes to clear os lock are not supported on some processors
+ * and OS lock must be unlocked before any memory mapped access on such
+ * processors, otherwise memory mapped reads/writes will be invalid.
+ */
+static void etm_os_unlock(void *info)
+{
+ struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
+ /* Writing any value to ETMOSLAR unlocks the trace registers */
+ etm_writel(drvdata, 0x0, ETMOSLAR);
+ isb();
+}
+
+static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
+{
+ u32 etmcr;
+
+ /* Ensure pending cp14 accesses complete before setting pwrdwn */
+ mb();
+ isb();
+ etmcr = etm_readl(drvdata, ETMCR);
+ etmcr |= ETMCR_PWD_DWN;
+ etm_writel(drvdata, etmcr, ETMCR);
+}
+
+static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
+{
+ u32 etmcr;
+
+ etmcr = etm_readl(drvdata, ETMCR);
+ etmcr &= ~ETMCR_PWD_DWN;
+ etm_writel(drvdata, etmcr, ETMCR);
+ /* Ensure pwrup completes before subsequent cp14 accesses */
+ mb();
+ isb();
+}
+
+static void etm_set_pwrup(struct etm_drvdata *drvdata)
+{
+ u32 etmpdcr;
+
+ etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
+ etmpdcr |= ETMPDCR_PWD_UP;
+ writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
+ /* Ensure pwrup completes before subsequent cp14 accesses */
+ mb();
+ isb();
+}
+
+static void etm_clr_pwrup(struct etm_drvdata *drvdata)
+{
+ u32 etmpdcr;
+
+ /* Ensure pending cp14 accesses complete before clearing pwrup */
+ mb();
+ isb();
+ etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
+ etmpdcr &= ~ETMPDCR_PWD_UP;
+ writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
+}
+
+/**
+ * coresight_timeout_etm - loop until a bit has changed to a specific state.
+ * @drvdata: etm's private data structure.
+ * @offset: address of a register, starting from @addr.
+ * @position: the position of the bit of interest.
+ * @value: the value the bit should have.
+ *
+ * Basically the same as @coresight_timeout except for the register access
+ * method where we have to account for CP14 configurations.
+
+ * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
+ * TIMEOUT_US has elapsed, which ever happens first.
+ */
+
+static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
+ int position, int value)
+{
+ int i;
+ u32 val;
+
+ for (i = TIMEOUT_US; i > 0; i--) {
+ val = etm_readl(drvdata, offset);
+ /* Waiting on the bit to go from 0 to 1 */
+ if (value) {
+ if (val & BIT(position))
+ return 0;
+ /* Waiting on the bit to go from 1 to 0 */
+ } else {
+ if (!(val & BIT(position)))
+ return 0;
+ }
+
+ /*
+ * Delay is arbitrary - the specification doesn't say how long
+ * we are expected to wait. Extra check required to make sure
+ * we don't wait needlessly on the last iteration.
+ */
+ if (i - 1)
+ udelay(1);
+ }
+
+ return -EAGAIN;
+}
+
+
+static void etm_set_prog(struct etm_drvdata *drvdata)
+{
+ u32 etmcr;
+
+ etmcr = etm_readl(drvdata, ETMCR);
+ etmcr |= ETMCR_ETM_PRG;
+ etm_writel(drvdata, etmcr, ETMCR);
+ /*
+ * Recommended by spec for cp14 accesses to ensure etmcr write is
+ * complete before polling etmsr
+ */
+ isb();
+ if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
+ dev_err(drvdata->dev,
+ "timeout observed when probing at offset %#x\n", ETMSR);
+ }
+}
+
+static void etm_clr_prog(struct etm_drvdata *drvdata)
+{
+ u32 etmcr;
+
+ etmcr = etm_readl(drvdata, ETMCR);
+ etmcr &= ~ETMCR_ETM_PRG;
+ etm_writel(drvdata, etmcr, ETMCR);
+ /*
+ * Recommended by spec for cp14 accesses to ensure etmcr write is
+ * complete before polling etmsr
+ */
+ isb();
+ if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
+ dev_err(drvdata->dev,
+ "timeout observed when probing at offset %#x\n", ETMSR);
+ }
+}
+
+static void etm_set_default(struct etm_drvdata *drvdata)
+{
+ int i;
+
+ drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
+ drvdata->enable_event = ETM_HARD_WIRE_RES_A;
+
+ drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
+ drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
+ drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
+ drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
+ drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
+ drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
+ drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ drvdata->cntr_rld_val[i] = 0x0;
+ drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
+ drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
+ drvdata->cntr_val[i] = 0x0;
+ }
+
+ drvdata->seq_curr_state = 0x0;
+ drvdata->ctxid_idx = 0x0;
+ for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+ drvdata->ctxid_val[i] = 0x0;
+ drvdata->ctxid_mask = 0x0;
+}
+
+static void etm_enable_hw(void *info)
+{
+ int i;
+ u32 etmcr;
+ struct etm_drvdata *drvdata = info;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Turn engine on */
+ etm_clr_pwrdwn(drvdata);
+ /* Apply power to trace registers */
+ etm_set_pwrup(drvdata);
+ /* Make sure all registers are accessible */
+ etm_os_unlock(drvdata);
+
+ etm_set_prog(drvdata);
+
+ etmcr = etm_readl(drvdata, ETMCR);
+ etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
+ etmcr |= drvdata->port_size;
+ etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
+ etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
+ etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
+ etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
+ etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
+ etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
+ for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
+ etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
+ }
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
+ etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
+ etm_writel(drvdata, drvdata->cntr_rld_event[i],
+ ETMCNTRLDEVRn(i));
+ etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
+ }
+ etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
+ etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
+ etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
+ etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
+ etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
+ etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
+ etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
+ for (i = 0; i < drvdata->nr_ext_out; i++)
+ etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
+ for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+ etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
+ etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
+ etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
+ /* No external input selected */
+ etm_writel(drvdata, 0x0, ETMEXTINSELR);
+ etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
+ /* No auxiliary control selected */
+ etm_writel(drvdata, 0x0, ETMAUXCR);
+ etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
+ /* No VMID comparator value selected */
+ etm_writel(drvdata, 0x0, ETMVMIDCVR);
+
+ /* Ensures trace output is enabled from this ETM */
+ etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
+
+ etm_clr_prog(drvdata);
+ CS_LOCK(drvdata->base);
+
+ dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
+}
+
+static int etm_trace_id_simple(struct etm_drvdata *drvdata)
+{
+ if (!drvdata->enable)
+ return drvdata->traceid;
+
+ return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+}
+
+static int etm_trace_id(struct coresight_device *csdev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ int trace_id = -1;
+
+ if (!drvdata->enable)
+ return drvdata->traceid;
+
+ if (clk_prepare_enable(drvdata->clk))
+ goto out;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ CS_UNLOCK(drvdata->base);
+ trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+ CS_LOCK(drvdata->base);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ clk_disable_unprepare(drvdata->clk);
+out:
+ return trace_id;
+}
+
+static int etm_enable(struct coresight_device *csdev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto err_clk;
+
+ spin_lock(&drvdata->spinlock);
+
+ /*
+ * Configure the ETM only if the CPU is online. If it isn't online
+ * hw configuration will take place when 'CPU_STARTING' is received
+ * in @etm_cpu_callback.
+ */
+ if (cpu_online(drvdata->cpu)) {
+ ret = smp_call_function_single(drvdata->cpu,
+ etm_enable_hw, drvdata, 1);
+ if (ret)
+ goto err;
+ }
+
+ drvdata->enable = true;
+ drvdata->sticky_enable = true;
+
+ spin_unlock(&drvdata->spinlock);
+
+ dev_info(drvdata->dev, "ETM tracing enabled\n");
+ return 0;
+err:
+ spin_unlock(&drvdata->spinlock);
+ clk_disable_unprepare(drvdata->clk);
+err_clk:
+ return ret;
+}
+
+static void etm_disable_hw(void *info)
+{
+ int i;
+ struct etm_drvdata *drvdata = info;
+
+ CS_UNLOCK(drvdata->base);
+ etm_set_prog(drvdata);
+
+ /* Program trace enable to low by using always false event */
+ etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
+
+ /* Read back sequencer and counters for post trace analysis */
+ drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+
+ for (i = 0; i < drvdata->nr_cntr; i++)
+ drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
+
+ etm_set_pwrdwn(drvdata);
+ CS_LOCK(drvdata->base);
+
+ dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
+}
+
+static void etm_disable(struct coresight_device *csdev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ /*
+ * Taking hotplug lock here protects from clocks getting disabled
+ * with tracing being left on (crash scenario) if user disable occurs
+ * after cpu online mask indicates the cpu is offline but before the
+ * DYING hotplug callback is serviced by the ETM driver.
+ */
+ get_online_cpus();
+ spin_lock(&drvdata->spinlock);
+
+ /*
+ * Executing etm_disable_hw on the cpu whose ETM is being disabled
+ * ensures that register writes occur when cpu is powered.
+ */
+ smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
+ drvdata->enable = false;
+
+ spin_unlock(&drvdata->spinlock);
+ put_online_cpus();
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "ETM tracing disabled\n");
+}
+
+static const struct coresight_ops_source etm_source_ops = {
+ .trace_id = etm_trace_id,
+ .enable = etm_enable,
+ .disable = etm_disable,
+};
+
+static const struct coresight_ops etm_cs_ops = {
+ .source_ops = &etm_source_ops,
+};
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_addr_cmp;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_cntr;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ctxid_cmp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_ctxid_cmp;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ctxid_cmp);
+
+static ssize_t etmsr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ unsigned long flags, val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ CS_UNLOCK(drvdata->base);
+
+ val = etm_readl(drvdata, ETMSR);
+
+ CS_LOCK(drvdata->base);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ clk_disable_unprepare(drvdata->clk);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(etmsr);
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int i, ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val) {
+ spin_lock(&drvdata->spinlock);
+ drvdata->mode = ETM_MODE_EXCLUDE;
+ drvdata->ctrl = 0x0;
+ drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
+ drvdata->startstop_ctrl = 0x0;
+ drvdata->addr_idx = 0x0;
+ for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ drvdata->addr_val[i] = 0x0;
+ drvdata->addr_acctype[i] = 0x0;
+ drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+ drvdata->cntr_idx = 0x0;
+
+ etm_set_default(drvdata);
+ spin_unlock(&drvdata->spinlock);
+ }
+
+ return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->mode;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->mode = val & ETM_MODE_ALL;
+
+ if (drvdata->mode & ETM_MODE_EXCLUDE)
+ drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
+ else
+ drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
+
+ if (drvdata->mode & ETM_MODE_CYCACC)
+ drvdata->ctrl |= ETMCR_CYC_ACC;
+ else
+ drvdata->ctrl &= ~ETMCR_CYC_ACC;
+
+ if (drvdata->mode & ETM_MODE_STALL) {
+ if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+ dev_warn(drvdata->dev, "stall mode not supported\n");
+ return -EINVAL;
+ }
+ drvdata->ctrl |= ETMCR_STALL_MODE;
+ } else
+ drvdata->ctrl &= ~ETMCR_STALL_MODE;
+
+ if (drvdata->mode & ETM_MODE_TIMESTAMP) {
+ if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+ dev_warn(drvdata->dev, "timestamp not supported\n");
+ return -EINVAL;
+ }
+ drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
+ } else
+ drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
+
+ if (drvdata->mode & ETM_MODE_CTXID)
+ drvdata->ctrl |= ETMCR_CTXID_SIZE;
+ else
+ drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t trigger_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->trigger_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->trigger_event = val & ETM_EVENT_MASK;
+
+ return size;
+}
+static DEVICE_ATTR_RW(trigger_event);
+
+static ssize_t enable_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->enable_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t enable_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->enable_event = val & ETM_EVENT_MASK;
+
+ return size;
+}
+static DEVICE_ATTR_RW(enable_event);
+
+static ssize_t fifofull_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->fifofull_level;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t fifofull_level_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->fifofull_level = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(fifofull_level);
+
+static ssize_t addr_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->addr_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_addr_cmp)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->addr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_single_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+
+ val = drvdata->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+
+ drvdata->addr_val[idx] = val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val1 = drvdata->addr_val[idx];
+ val2 = drvdata->addr_val[idx + 1];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+ /* Lower address comparator cannot have a higher address value */
+ if (val1 > val2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = val1;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+ drvdata->addr_val[idx + 1] = val2;
+ drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+ drvdata->enable_ctrl1 |= (1 << (idx/2));
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = drvdata->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
+ drvdata->startstop_ctrl |= (1 << idx);
+ drvdata->enable_ctrl1 |= BIT(25);
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = drvdata->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+ drvdata->startstop_ctrl |= (1 << (idx + 16));
+ drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_acctype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->addr_acctype[drvdata->addr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_acctype_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->addr_acctype[drvdata->addr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_acctype);
+
+static ssize_t cntr_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->cntr_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_cntr)
+ return -EINVAL;
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->cntr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntr_rld_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->cntr_rld_val[drvdata->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_val);
+
+static ssize_t cntr_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->cntr_event[drvdata->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_event);
+
+static ssize_t cntr_rld_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->cntr_rld_event[drvdata->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_event);
+
+static ssize_t cntr_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret = 0;
+ u32 val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!drvdata->enable) {
+ spin_lock(&drvdata->spinlock);
+ for (i = 0; i < drvdata->nr_cntr; i++)
+ ret += sprintf(buf, "counter %d: %x\n",
+ i, drvdata->cntr_val[i]);
+ spin_unlock(&drvdata->spinlock);
+ return ret;
+ }
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ val = etm_readl(drvdata, ETMCNTVRn(i));
+ ret += sprintf(buf, "counter %d: %x\n", i, val);
+ }
+
+ return ret;
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->cntr_val[drvdata->cntr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t seq_12_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_12_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_12_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->seq_12_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_12_event);
+
+static ssize_t seq_21_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_21_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_21_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->seq_21_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_21_event);
+
+static ssize_t seq_23_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_23_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_23_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->seq_23_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_23_event);
+
+static ssize_t seq_31_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_31_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_31_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->seq_31_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_31_event);
+
+static ssize_t seq_32_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_32_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_32_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->seq_32_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_32_event);
+
+static ssize_t seq_13_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->seq_13_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_13_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->seq_13_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_13_event);
+
+static ssize_t seq_curr_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ unsigned long val, flags;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!drvdata->enable) {
+ val = drvdata->seq_curr_state;
+ goto out;
+ }
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ CS_UNLOCK(drvdata->base);
+ val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+ CS_LOCK(drvdata->base);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ clk_disable_unprepare(drvdata->clk);
+out:
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_curr_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val > ETM_SEQ_STATE_MAX_VAL)
+ return -EINVAL;
+
+ drvdata->seq_curr_state = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(seq_curr_state);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->ctxid_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_ctxid_cmp)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ drvdata->ctxid_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->ctxid_val[drvdata->ctxid_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->ctxid_val[drvdata->ctxid_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_val);
+
+static ssize_t ctxid_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->ctxid_mask;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->ctxid_mask = val;
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_mask);
+
+static ssize_t sync_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->sync_freq;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t sync_freq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->sync_freq = val & ETM_SYNC_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(sync_freq);
+
+static ssize_t timestamp_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->timestamp_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t timestamp_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->timestamp_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(timestamp_event);
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ unsigned long flags;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ CS_UNLOCK(drvdata->base);
+ ret = sprintf(buf,
+ "ETMCCR: 0x%08x\n"
+ "ETMCCER: 0x%08x\n"
+ "ETMSCR: 0x%08x\n"
+ "ETMIDR: 0x%08x\n"
+ "ETMCR: 0x%08x\n"
+ "ETMTRACEIDR: 0x%08x\n"
+ "Enable event: 0x%08x\n"
+ "Enable start/stop: 0x%08x\n"
+ "Enable control: CR1 0x%08x CR2 0x%08x\n"
+ "CPU affinity: %d\n",
+ drvdata->etmccr, drvdata->etmccer,
+ etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
+ etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
+ etm_readl(drvdata, ETMTEEVR),
+ etm_readl(drvdata, ETMTSSCR),
+ etm_readl(drvdata, ETMTECR1),
+ etm_readl(drvdata, ETMTECR2),
+ drvdata->cpu);
+ CS_LOCK(drvdata->base);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ clk_disable_unprepare(drvdata->clk);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ unsigned long val, flags;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!drvdata->enable) {
+ val = drvdata->traceid;
+ goto out;
+ }
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ CS_UNLOCK(drvdata->base);
+
+ val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+
+ CS_LOCK(drvdata->base);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ clk_disable_unprepare(drvdata->clk);
+out:
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->traceid = val & ETM_TRACEID_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+static struct attribute *coresight_etm_attrs[] = {
+ &dev_attr_nr_addr_cmp.attr,
+ &dev_attr_nr_cntr.attr,
+ &dev_attr_nr_ctxid_cmp.attr,
+ &dev_attr_etmsr.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_trigger_event.attr,
+ &dev_attr_enable_event.attr,
+ &dev_attr_fifofull_level.attr,
+ &dev_attr_addr_idx.attr,
+ &dev_attr_addr_single.attr,
+ &dev_attr_addr_range.attr,
+ &dev_attr_addr_start.attr,
+ &dev_attr_addr_stop.attr,
+ &dev_attr_addr_acctype.attr,
+ &dev_attr_cntr_idx.attr,
+ &dev_attr_cntr_rld_val.attr,
+ &dev_attr_cntr_event.attr,
+ &dev_attr_cntr_rld_event.attr,
+ &dev_attr_cntr_val.attr,
+ &dev_attr_seq_12_event.attr,
+ &dev_attr_seq_21_event.attr,
+ &dev_attr_seq_23_event.attr,
+ &dev_attr_seq_31_event.attr,
+ &dev_attr_seq_32_event.attr,
+ &dev_attr_seq_13_event.attr,
+ &dev_attr_seq_curr_state.attr,
+ &dev_attr_ctxid_idx.attr,
+ &dev_attr_ctxid_val.attr,
+ &dev_attr_ctxid_mask.attr,
+ &dev_attr_sync_freq.attr,
+ &dev_attr_timestamp_event.attr,
+ &dev_attr_status.attr,
+ &dev_attr_traceid.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etm);
+
+static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (!etmdrvdata[cpu])
+ goto out;
+
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_STARTING:
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (!etmdrvdata[cpu]->os_unlock) {
+ etm_os_unlock(etmdrvdata[cpu]);
+ etmdrvdata[cpu]->os_unlock = true;
+ }
+
+ if (etmdrvdata[cpu]->enable)
+ etm_enable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ break;
+
+ case CPU_ONLINE:
+ if (etmdrvdata[cpu]->boot_enable &&
+ !etmdrvdata[cpu]->sticky_enable)
+ coresight_enable(etmdrvdata[cpu]->csdev);
+ break;
+
+ case CPU_DYING:
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (etmdrvdata[cpu]->enable)
+ etm_disable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ break;
+ }
+out:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block etm_cpu_notifier = {
+ .notifier_call = etm_cpu_callback,
+};
+
+static bool etm_arch_supported(u8 arch)
+{
+ switch (arch) {
+ case ETM_ARCH_V3_3:
+ break;
+ case ETM_ARCH_V3_5:
+ break;
+ case PFT_ARCH_V1_0:
+ break;
+ case PFT_ARCH_V1_1:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void etm_init_arch_data(void *info)
+{
+ u32 etmidr;
+ u32 etmccr;
+ struct etm_drvdata *drvdata = info;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* First dummy read */
+ (void)etm_readl(drvdata, ETMPDSR);
+ /* Provide power to ETM: ETMPDCR[3] == 1 */
+ etm_set_pwrup(drvdata);
+ /*
+ * Clear power down bit since when this bit is set writes to
+ * certain registers might be ignored.
+ */
+ etm_clr_pwrdwn(drvdata);
+ /*
+ * Set prog bit. It will be set from reset but this is included to
+ * ensure it is set
+ */
+ etm_set_prog(drvdata);
+
+ /* Find all capabilities */
+ etmidr = etm_readl(drvdata, ETMIDR);
+ drvdata->arch = BMVAL(etmidr, 4, 11);
+ drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
+
+ drvdata->etmccer = etm_readl(drvdata, ETMCCER);
+ etmccr = etm_readl(drvdata, ETMCCR);
+ drvdata->etmccr = etmccr;
+ drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
+ drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
+ drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
+ drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
+ drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+
+ etm_set_pwrdwn(drvdata);
+ etm_clr_pwrup(drvdata);
+ CS_LOCK(drvdata->base);
+}
+
+static void etm_init_default_data(struct etm_drvdata *drvdata)
+{
+ static int etm3x_traceid;
+
+ u32 flags = (1 << 0 | /* instruction execute*/
+ 3 << 3 | /* ARM instruction */
+ 0 << 5 | /* No data value comparison */
+ 0 << 7 | /* No exact mach */
+ 0 << 8 | /* Ignore context ID */
+ 0 << 10); /* Security ignored */
+
+ /*
+ * Initial configuration only - guarantees sources handled by
+ * this driver have a unique ID at startup time but not between
+ * all other types of sources. For that we lean on the core
+ * framework.
+ */
+ drvdata->traceid = etm3x_traceid++;
+ drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
+ drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
+ if (drvdata->nr_addr_cmp >= 2) {
+ drvdata->addr_val[0] = (u32) _stext;
+ drvdata->addr_val[1] = (u32) _etext;
+ drvdata->addr_acctype[0] = flags;
+ drvdata->addr_acctype[1] = flags;
+ drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+ drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+ }
+
+ etm_set_default(drvdata);
+}
+
+static int etm_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata = NULL;
+ struct etm_drvdata *drvdata;
+ struct resource *res = &adev->res;
+ struct coresight_desc *desc;
+ struct device_node *np = adev->dev.of_node;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ adev->dev.platform_data = pdata;
+ drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
+ }
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ drvdata->clk = adev->pclk;
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ drvdata->cpu = pdata ? pdata->cpu : 0;
+
+ get_online_cpus();
+ etmdrvdata[drvdata->cpu] = drvdata;
+
+ if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
+ drvdata->os_unlock = true;
+
+ if (smp_call_function_single(drvdata->cpu,
+ etm_init_arch_data, drvdata, 1))
+ dev_err(dev, "ETM arch init failed\n");
+
+ if (!etm_count++)
+ register_hotcpu_notifier(&etm_cpu_notifier);
+
+ put_online_cpus();
+
+ if (etm_arch_supported(drvdata->arch) == false) {
+ ret = -EINVAL;
+ goto err_arch_supported;
+ }
+ etm_init_default_data(drvdata);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ desc->ops = &etm_cs_ops;
+ desc->pdata = pdata;
+ desc->dev = dev;
+ desc->groups = coresight_etm_groups;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto err_arch_supported;
+ }
+
+ dev_info(dev, "ETM initialized\n");
+
+ if (boot_enable) {
+ coresight_enable(drvdata->csdev);
+ drvdata->boot_enable = true;
+ }
+
+ return 0;
+
+err_arch_supported:
+ clk_disable_unprepare(drvdata->clk);
+ if (--etm_count == 0)
+ unregister_hotcpu_notifier(&etm_cpu_notifier);
+ return ret;
+}
+
+static int etm_remove(struct amba_device *adev)
+{
+ struct etm_drvdata *drvdata = amba_get_drvdata(adev);
+
+ coresight_unregister(drvdata->csdev);
+ if (--etm_count == 0)
+ unregister_hotcpu_notifier(&etm_cpu_notifier);
+
+ return 0;
+}
+
+static struct amba_id etm_ids[] = {
+ { /* ETM 3.3 */
+ .id = 0x0003b921,
+ .mask = 0x0003ffff,
+ },
+ { /* ETM 3.5 */
+ .id = 0x0003b956,
+ .mask = 0x0003ffff,
+ },
+ { /* PTM 1.0 */
+ .id = 0x0003b950,
+ .mask = 0x0003ffff,
+ },
+ { /* PTM 1.1 */
+ .id = 0x0003b95f,
+ .mask = 0x0003ffff,
+ },
+ { 0, 0},
+};
+
+static struct amba_driver etm_driver = {
+ .drv = {
+ .name = "coresight-etm3x",
+ .owner = THIS_MODULE,
+ },
+ .probe = etm_probe,
+ .remove = etm_remove,
+ .id_table = etm_ids,
+};
+
+int __init etm_init(void)
+{
+ return amba_driver_register(&etm_driver);
+}
+module_init(etm_init);
+
+void __exit etm_exit(void)
+{
+ amba_driver_unregister(&etm_driver);
+}
+module_exit(etm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/drivers/coresight/coresight-funnel.c b/drivers/coresight/coresight-funnel.c
new file mode 100644
index 000000000000..2108edffe1f4
--- /dev/null
+++ b/drivers/coresight/coresight-funnel.c
@@ -0,0 +1,268 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+
+#include "coresight-priv.h"
+
+#define FUNNEL_FUNCTL 0x000
+#define FUNNEL_PRICTL 0x004
+
+#define FUNNEL_HOLDTIME_MASK 0xf00
+#define FUNNEL_HOLDTIME_SHFT 0x8
+#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
+
+/**
+ * struct funnel_drvdata - specifics associated to a funnel component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @csdev: component vitals needed by the framework.
+ * @clk: the clock this component is associated to.
+ * @priority: port selection order.
+ */
+struct funnel_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+ unsigned long priority;
+};
+
+static void funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
+{
+ u32 functl;
+
+ CS_UNLOCK(drvdata->base);
+
+ functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
+ functl &= ~FUNNEL_HOLDTIME_MASK;
+ functl |= FUNNEL_HOLDTIME;
+ functl |= (1 << port);
+ writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
+ writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL);
+
+ CS_LOCK(drvdata->base);
+}
+
+static int funnel_enable(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ funnel_enable_hw(drvdata, inport);
+
+ dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
+ return 0;
+}
+
+static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
+{
+ u32 functl;
+
+ CS_UNLOCK(drvdata->base);
+
+ functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
+ functl &= ~(1 << inport);
+ writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void funnel_disable(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ funnel_disable_hw(drvdata, inport);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
+}
+
+static const struct coresight_ops_link funnel_link_ops = {
+ .enable = funnel_enable,
+ .disable = funnel_disable,
+};
+
+static const struct coresight_ops funnel_cs_ops = {
+ .link_ops = &funnel_link_ops,
+};
+
+static ssize_t priority_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->priority;
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t priority_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->priority = val;
+ return size;
+}
+static DEVICE_ATTR_RW(priority);
+
+static u32 get_funnel_ctrl_hw(struct funnel_drvdata *drvdata)
+{
+ u32 functl;
+
+ CS_UNLOCK(drvdata->base);
+ functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
+ CS_LOCK(drvdata->base);
+
+ return functl;
+}
+
+static ssize_t funnel_ctrl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u32 val;
+ struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ val = get_funnel_ctrl_hw(drvdata);
+ clk_disable_unprepare(drvdata->clk);
+
+ return sprintf(buf, "%#x\n", val);
+}
+static DEVICE_ATTR_RO(funnel_ctrl);
+
+static struct attribute *coresight_funnel_attrs[] = {
+ &dev_attr_funnel_ctrl.attr,
+ &dev_attr_priority.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_funnel);
+
+static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata = NULL;
+ struct funnel_drvdata *drvdata;
+ struct resource *res = &adev->res;
+ struct coresight_desc *desc;
+ struct device_node *np = adev->dev.of_node;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+
+ drvdata->clk = adev->pclk;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = CORESIGHT_DEV_TYPE_LINK;
+ desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
+ desc->ops = &funnel_cs_ops;
+ desc->pdata = pdata;
+ desc->dev = dev;
+ desc->groups = coresight_funnel_groups;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_info(dev, "FUNNEL initialized\n");
+ return 0;
+}
+
+static int funnel_remove(struct amba_device *adev)
+{
+ struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct amba_id funnel_ids[] = {
+ {
+ .id = 0x0003b908,
+ .mask = 0x0003ffff,
+ },
+ { 0, 0},
+};
+
+static struct amba_driver funnel_driver = {
+ .drv = {
+ .name = "coresight-funnel",
+ .owner = THIS_MODULE,
+ },
+ .probe = funnel_probe,
+ .remove = funnel_remove,
+ .id_table = funnel_ids,
+};
+
+static int __init funnel_init(void)
+{
+ return amba_driver_register(&funnel_driver);
+}
+module_init(funnel_init);
+
+static void __exit funnel_exit(void)
+{
+ amba_driver_unregister(&funnel_driver);
+}
+module_exit(funnel_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Funnel driver");
diff --git a/drivers/coresight/coresight-priv.h b/drivers/coresight/coresight-priv.h
new file mode 100644
index 000000000000..7b3372fca4f6
--- /dev/null
+++ b/drivers/coresight/coresight-priv.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_PRIV_H
+#define _CORESIGHT_PRIV_H
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/coresight.h>
+
+/*
+ * Coresight management registers (0xf00-0xfcc)
+ * 0xfa0 - 0xfa4: Management registers in PFTv1.0
+ * Trace registers in PFTv1.1
+ */
+#define CORESIGHT_ITCTRL 0xf00
+#define CORESIGHT_CLAIMSET 0xfa0
+#define CORESIGHT_CLAIMCLR 0xfa4
+#define CORESIGHT_LAR 0xfb0
+#define CORESIGHT_LSR 0xfb4
+#define CORESIGHT_AUTHSTATUS 0xfb8
+#define CORESIGHT_DEVID 0xfc8
+#define CORESIGHT_DEVTYPE 0xfcc
+
+#define TIMEOUT_US 100
+#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
+
+static inline void CS_LOCK(void __iomem *addr)
+{
+ do {
+ /* Wait for things to settle */
+ mb();
+ writel_relaxed(0x0, addr + CORESIGHT_LAR);
+ } while (0);
+}
+
+static inline void CS_UNLOCK(void __iomem *addr)
+{
+ do {
+ writel_relaxed(CORESIGHT_UNLOCK, addr + CORESIGHT_LAR);
+ /* Make sure everyone has seen this */
+ mb();
+ } while (0);
+}
+
+#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
+extern int etm_readl_cp14(u32 off, unsigned int *val);
+extern int etm_writel_cp14(u32 off, u32 val);
+#else
+static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
+static inline int etm_writel_cp14(u32 val, u32 off) { return 0; }
+#endif
+
+#endif
diff --git a/drivers/coresight/coresight-replicator.c b/drivers/coresight/coresight-replicator.c
new file mode 100644
index 000000000000..a2dfcf903551
--- /dev/null
+++ b/drivers/coresight/coresight-replicator.c
@@ -0,0 +1,137 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+/**
+ * struct replicator_drvdata - specifics associated to a replicator component
+ * @dev: the device entity associated with this component
+ * @csdev: component vitals needed by the framework
+ */
+struct replicator_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+};
+
+static int replicator_enable(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ dev_info(drvdata->dev, "REPLICATOR enabled\n");
+ return 0;
+}
+
+static void replicator_disable(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ dev_info(drvdata->dev, "REPLICATOR disabled\n");
+}
+
+static const struct coresight_ops_link replicator_link_ops = {
+ .enable = replicator_enable,
+ .disable = replicator_disable,
+};
+
+static const struct coresight_ops replicator_cs_ops = {
+ .link_ops = &replicator_link_ops,
+};
+
+static int replicator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata = NULL;
+ struct replicator_drvdata *drvdata;
+ struct coresight_desc *desc;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = CORESIGHT_DEV_TYPE_LINK;
+ desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
+ desc->ops = &replicator_cs_ops;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_info(dev, "REPLICATOR initialized\n");
+ return 0;
+}
+
+static int replicator_remove(struct platform_device *pdev)
+{
+ struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct of_device_id replicator_match[] = {
+ {.compatible = "arm,coresight-replicator"},
+ {}
+};
+
+static struct platform_driver replicator_driver = {
+ .probe = replicator_probe,
+ .remove = replicator_remove,
+ .driver = {
+ .name = "coresight-replicator",
+ .of_match_table = replicator_match,
+ },
+};
+
+static int __init replicator_init(void)
+{
+ return platform_driver_register(&replicator_driver);
+}
+module_init(replicator_init);
+
+static void __exit replicator_exit(void)
+{
+ platform_driver_unregister(&replicator_driver);
+}
+module_exit(replicator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
new file mode 100644
index 000000000000..ce2c293f1707
--- /dev/null
+++ b/drivers/coresight/coresight-tmc.c
@@ -0,0 +1,776 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+
+#include "coresight-priv.h"
+
+#define TMC_RSZ 0x004
+#define TMC_STS 0x00c
+#define TMC_RRD 0x010
+#define TMC_RRP 0x014
+#define TMC_RWP 0x018
+#define TMC_TRG 0x01c
+#define TMC_CTL 0x020
+#define TMC_RWD 0x024
+#define TMC_MODE 0x028
+#define TMC_LBUFLEVEL 0x02c
+#define TMC_CBUFLEVEL 0x030
+#define TMC_BUFWM 0x034
+#define TMC_RRPHI 0x038
+#define TMC_RWPHI 0x03c
+#define TMC_AXICTL 0x110
+#define TMC_DBALO 0x118
+#define TMC_DBAHI 0x11c
+#define TMC_FFSR 0x300
+#define TMC_FFCR 0x304
+#define TMC_PSCR 0x308
+#define TMC_ITMISCOP0 0xee0
+#define TMC_ITTRFLIN 0xee8
+#define TMC_ITATBDATA0 0xeec
+#define TMC_ITATBCTR2 0xef0
+#define TMC_ITATBCTR1 0xef4
+#define TMC_ITATBCTR0 0xef8
+
+/* register description */
+/* TMC_CTL - 0x020 */
+#define TMC_CTL_CAPT_EN BIT(0)
+/* TMC_STS - 0x00C */
+#define TMC_STS_TRIGGERED BIT(1)
+/* TMC_AXICTL - 0x110 */
+#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
+#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
+#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
+#define TMC_AXICTL_WR_BURST_LEN 0xF00
+/* TMC_FFCR - 0x304 */
+#define TMC_FFCR_EN_FMT BIT(0)
+#define TMC_FFCR_EN_TI BIT(1)
+#define TMC_FFCR_FON_FLIN BIT(4)
+#define TMC_FFCR_FON_TRIG_EVT BIT(5)
+#define TMC_FFCR_FLUSHMAN BIT(6)
+#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
+#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
+
+#define TMC_STS_TRIGGERED_BIT 2
+#define TMC_FFCR_FLUSHMAN_BIT 6
+
+enum tmc_config_type {
+ TMC_CONFIG_TYPE_ETB,
+ TMC_CONFIG_TYPE_ETR,
+ TMC_CONFIG_TYPE_ETF,
+};
+
+enum tmc_mode {
+ TMC_MODE_CIRCULAR_BUFFER,
+ TMC_MODE_SOFTWARE_FIFO,
+ TMC_MODE_HARDWARE_FIFO,
+};
+
+enum tmc_mem_intf_width {
+ TMC_MEM_INTF_WIDTH_32BITS = 0x2,
+ TMC_MEM_INTF_WIDTH_64BITS = 0x3,
+ TMC_MEM_INTF_WIDTH_128BITS = 0x4,
+ TMC_MEM_INTF_WIDTH_256BITS = 0x5,
+};
+
+/**
+ * struct tmc_drvdata - specifics associated to an TMC component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @csdev: component vitals needed by the framework.
+ * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
+ * @clk: the clock this component is associated to.
+ * @spinlock: only one at a time pls.
+ * @read_count: manages preparation of buffer for reading.
+ * @buf: area of memory where trace data get sent.
+ * @paddr: DMA start location in RAM.
+ * @vaddr: virtual representation of @paddr.
+ * @size: @buf size.
+ * @enable: this TMC is being used.
+ * @config_type: TMC variant, must be of type @tmc_config_type.
+ * @trigger_cntr: amount of words to store after a trigger.
+ */
+struct tmc_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct miscdevice miscdev;
+ struct clk *clk;
+ spinlock_t spinlock;
+ int read_count;
+ bool reading;
+ char *buf;
+ dma_addr_t paddr;
+ void __iomem *vaddr;
+ u32 size;
+ bool enable;
+ enum tmc_config_type config_type;
+ u32 trigger_cntr;
+};
+
+static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
+{
+ /* Ensure formatter, unformatter and hardware fifo are empty */
+ if (coresight_timeout(drvdata->base,
+ TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
+ dev_err(drvdata->dev,
+ "timeout observed when probing at offset %#x\n",
+ TMC_STS);
+ }
+}
+
+static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
+{
+ u32 ffcr;
+
+ ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
+ ffcr |= TMC_FFCR_STOP_ON_FLUSH;
+ writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+ ffcr |= TMC_FFCR_FLUSHMAN;
+ writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+ /* Ensure flush completes */
+ if (coresight_timeout(drvdata->base,
+ TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
+ dev_err(drvdata->dev,
+ "timeout observed when probing at offset %#x\n",
+ TMC_FFCR);
+ }
+
+ tmc_wait_for_ready(drvdata);
+}
+
+static void tmc_enable_hw(struct tmc_drvdata *drvdata)
+{
+ writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
+}
+
+static void tmc_disable_hw(struct tmc_drvdata *drvdata)
+{
+ writel_relaxed(0x0, drvdata->base + TMC_CTL);
+}
+
+static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+{
+ /* Zero out the memory to help with debug */
+ memset(drvdata->buf, 0, drvdata->size);
+
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+ writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
+ TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
+ TMC_FFCR_TRIGON_TRIGIN,
+ drvdata->base + TMC_FFCR);
+
+ writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+ tmc_enable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+{
+ u32 axictl;
+
+ /* Zero out the memory to help with debug */
+ memset(drvdata->vaddr, 0, drvdata->size);
+
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
+ writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+
+ axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
+ axictl |= TMC_AXICTL_WR_BURST_LEN;
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+ axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+ axictl = (axictl &
+ ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
+ TMC_AXICTL_PROT_CTL_B1;
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+
+ writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
+ writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
+ writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
+ TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
+ TMC_FFCR_TRIGON_TRIGIN,
+ drvdata->base + TMC_FFCR);
+ writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+ tmc_enable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
+ writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
+ drvdata->base + TMC_FFCR);
+ writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
+ tmc_enable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
+{
+ int ret;
+ unsigned long flags;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ clk_disable_unprepare(drvdata->clk);
+ return -EBUSY;
+ }
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ tmc_etb_enable_hw(drvdata);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ tmc_etr_enable_hw(drvdata);
+ } else {
+ if (mode == TMC_MODE_CIRCULAR_BUFFER)
+ tmc_etb_enable_hw(drvdata);
+ else
+ tmc_etf_enable_hw(drvdata);
+ }
+ drvdata->enable = true;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC enabled\n");
+ return 0;
+}
+
+static int tmc_enable_sink(struct coresight_device *csdev)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
+}
+
+static int tmc_enable_link(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
+}
+
+static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
+{
+ enum tmc_mem_intf_width memwidth;
+ u8 memwords;
+ char *bufp;
+ u32 read_data;
+ int i;
+
+ memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
+ if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
+ memwords = 1;
+ else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
+ memwords = 2;
+ else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
+ memwords = 4;
+ else
+ memwords = 8;
+
+ bufp = drvdata->buf;
+ while (1) {
+ for (i = 0; i < memwords; i++) {
+ read_data = readl_relaxed(drvdata->base + TMC_RRD);
+ if (read_data == 0xFFFFFFFF)
+ return;
+ memcpy(bufp, &read_data, 4);
+ bufp += 4;
+ }
+ }
+}
+
+static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+ tmc_etb_dump_hw(drvdata);
+ tmc_disable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
+{
+ u32 rwp, val;
+
+ rwp = readl_relaxed(drvdata->base + TMC_RWP);
+ val = readl_relaxed(drvdata->base + TMC_STS);
+
+ /* How much memory do we still have */
+ if (val & BIT(0))
+ drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
+ else
+ drvdata->buf = drvdata->vaddr;
+}
+
+static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+ tmc_etr_dump_hw(drvdata);
+ tmc_disable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+ tmc_disable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading)
+ goto out;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ tmc_etb_disable_hw(drvdata);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ tmc_etr_disable_hw(drvdata);
+ } else {
+ if (mode == TMC_MODE_CIRCULAR_BUFFER)
+ tmc_etb_disable_hw(drvdata);
+ else
+ tmc_etf_disable_hw(drvdata);
+ }
+out:
+ drvdata->enable = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "TMC disabled\n");
+}
+
+static void tmc_disable_sink(struct coresight_device *csdev)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
+}
+
+static void tmc_disable_link(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
+}
+
+static const struct coresight_ops_sink tmc_sink_ops = {
+ .enable = tmc_enable_sink,
+ .disable = tmc_disable_sink,
+};
+
+static const struct coresight_ops_link tmc_link_ops = {
+ .enable = tmc_enable_link,
+ .disable = tmc_disable_link,
+};
+
+static const struct coresight_ops tmc_etb_cs_ops = {
+ .sink_ops = &tmc_sink_ops,
+};
+
+static const struct coresight_ops tmc_etr_cs_ops = {
+ .sink_ops = &tmc_sink_ops,
+};
+
+static const struct coresight_ops tmc_etf_cs_ops = {
+ .sink_ops = &tmc_sink_ops,
+ .link_ops = &tmc_link_ops,
+};
+
+static int tmc_read_prepare(struct tmc_drvdata *drvdata)
+{
+ int ret;
+ unsigned long flags;
+ enum tmc_mode mode;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->enable)
+ goto out;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ tmc_etb_disable_hw(drvdata);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ tmc_etr_disable_hw(drvdata);
+ } else {
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode == TMC_MODE_CIRCULAR_BUFFER) {
+ tmc_etb_disable_hw(drvdata);
+ } else {
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+out:
+ drvdata->reading = true;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC read start\n");
+ return 0;
+err:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return ret;
+}
+
+static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
+{
+ unsigned long flags;
+ enum tmc_mode mode;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->enable)
+ goto out;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ tmc_etb_enable_hw(drvdata);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ tmc_etr_enable_hw(drvdata);
+ } else {
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode == TMC_MODE_CIRCULAR_BUFFER)
+ tmc_etb_enable_hw(drvdata);
+ }
+out:
+ drvdata->reading = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC read end\n");
+}
+
+static int tmc_open(struct inode *inode, struct file *file)
+{
+ struct tmc_drvdata *drvdata = container_of(file->private_data,
+ struct tmc_drvdata, miscdev);
+ int ret = 0;
+
+ if (drvdata->read_count++)
+ goto out;
+
+ ret = tmc_read_prepare(drvdata);
+ if (ret)
+ return ret;
+out:
+ nonseekable_open(inode, file);
+
+ dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+ return 0;
+}
+
+static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
+ loff_t *ppos)
+{
+ struct tmc_drvdata *drvdata = container_of(file->private_data,
+ struct tmc_drvdata, miscdev);
+ char *bufp = drvdata->buf + *ppos;
+
+ if (*ppos + len > drvdata->size)
+ len = drvdata->size - *ppos;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ if (bufp == (char *)(drvdata->vaddr + drvdata->size))
+ bufp = drvdata->vaddr;
+ else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
+ bufp -= drvdata->size;
+ if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
+ len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
+ }
+
+ if (copy_to_user(data, bufp, len)) {
+ dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
+ __func__, len, (int) (drvdata->size - *ppos));
+ return len;
+}
+
+static int tmc_release(struct inode *inode, struct file *file)
+{
+ struct tmc_drvdata *drvdata = container_of(file->private_data,
+ struct tmc_drvdata, miscdev);
+
+ if (--drvdata->read_count) {
+ if (drvdata->read_count < 0) {
+ dev_err(drvdata->dev, "mismatched close\n");
+ drvdata->read_count = 0;
+ }
+ goto out;
+ }
+
+ tmc_read_unprepare(drvdata);
+out:
+ dev_dbg(drvdata->dev, "%s: released\n", __func__);
+ return 0;
+}
+
+static const struct file_operations tmc_fops = {
+ .owner = THIS_MODULE,
+ .open = tmc_open,
+ .read = tmc_read,
+ .release = tmc_release,
+ .llseek = no_llseek,
+};
+
+static ssize_t trigger_cntr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->trigger_cntr;
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_cntr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->trigger_cntr = val;
+ return size;
+}
+static DEVICE_ATTR_RW(trigger_cntr);
+
+static struct attribute *coresight_etb_attrs[] = {
+ &dev_attr_trigger_cntr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etb);
+
+static struct attribute *coresight_etr_attrs[] = {
+ &dev_attr_trigger_cntr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etr);
+
+static struct attribute *coresight_etf_attrs[] = {
+ &dev_attr_trigger_cntr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etf);
+
+static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret = 0;
+ u32 devid;
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata = NULL;
+ struct tmc_drvdata *drvdata;
+ struct resource *res = &adev->res;
+ struct coresight_desc *desc;
+ struct device_node *np = adev->dev.of_node;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ drvdata->clk = adev->pclk;
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+ drvdata->config_type = BMVAL(devid, 6, 7);
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ if (np)
+ ret = of_property_read_u32(np,
+ "arm,buffer-size",
+ &drvdata->size);
+ if (ret)
+ drvdata->size = SZ_1M;
+ } else {
+ drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
+ }
+
+ clk_disable_unprepare(drvdata->clk);
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
+ &drvdata->paddr, GFP_KERNEL);
+ if (!drvdata->vaddr)
+ return -ENOMEM;
+
+ memset(drvdata->vaddr, 0, drvdata->size);
+ drvdata->buf = drvdata->vaddr;
+ } else {
+ drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
+ if (!drvdata->buf)
+ return -ENOMEM;
+ }
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto err_devm_kzalloc;
+ }
+
+ desc->pdata = pdata;
+ desc->dev = dev;
+ desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ desc->type = CORESIGHT_DEV_TYPE_SINK;
+ desc->ops = &tmc_etb_cs_ops;
+ desc->groups = coresight_etb_groups;
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ desc->type = CORESIGHT_DEV_TYPE_SINK;
+ desc->ops = &tmc_etr_cs_ops;
+ desc->groups = coresight_etr_groups;
+ } else {
+ desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
+ desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
+ desc->ops = &tmc_etf_cs_ops;
+ desc->groups = coresight_etf_groups;
+ }
+
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto err_devm_kzalloc;
+ }
+
+ drvdata->miscdev.name = pdata->name;
+ drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
+ drvdata->miscdev.fops = &tmc_fops;
+ ret = misc_register(&drvdata->miscdev);
+ if (ret)
+ goto err_misc_register;
+
+ dev_info(dev, "TMC initialized\n");
+ return 0;
+
+err_misc_register:
+ coresight_unregister(drvdata->csdev);
+err_devm_kzalloc:
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+ dma_free_coherent(dev, drvdata->size,
+ &drvdata->paddr, GFP_KERNEL);
+ return ret;
+}
+
+static int tmc_remove(struct amba_device *adev)
+{
+ struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
+
+ misc_deregister(&drvdata->miscdev);
+ coresight_unregister(drvdata->csdev);
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+ dma_free_coherent(drvdata->dev, drvdata->size,
+ &drvdata->paddr, GFP_KERNEL);
+
+ return 0;
+}
+
+static struct amba_id tmc_ids[] = {
+ {
+ .id = 0x0003b961,
+ .mask = 0x0003ffff,
+ },
+ { 0, 0},
+};
+
+static struct amba_driver tmc_driver = {
+ .drv = {
+ .name = "coresight-tmc",
+ .owner = THIS_MODULE,
+ },
+ .probe = tmc_probe,
+ .remove = tmc_remove,
+ .id_table = tmc_ids,
+};
+
+static int __init tmc_init(void)
+{
+ return amba_driver_register(&tmc_driver);
+}
+module_init(tmc_init);
+
+static void __exit tmc_exit(void)
+{
+ amba_driver_unregister(&tmc_driver);
+}
+module_exit(tmc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/coresight/coresight-tpiu.c
new file mode 100644
index 000000000000..ae101082791a
--- /dev/null
+++ b/drivers/coresight/coresight-tpiu.c
@@ -0,0 +1,217 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+
+#include "coresight-priv.h"
+
+#define TPIU_SUPP_PORTSZ 0x000
+#define TPIU_CURR_PORTSZ 0x004
+#define TPIU_SUPP_TRIGMODES 0x100
+#define TPIU_TRIG_CNTRVAL 0x104
+#define TPIU_TRIG_MULT 0x108
+#define TPIU_SUPP_TESTPATM 0x200
+#define TPIU_CURR_TESTPATM 0x204
+#define TPIU_TEST_PATREPCNTR 0x208
+#define TPIU_FFSR 0x300
+#define TPIU_FFCR 0x304
+#define TPIU_FSYNC_CNTR 0x308
+#define TPIU_EXTCTL_INPORT 0x400
+#define TPIU_EXTCTL_OUTPORT 0x404
+#define TPIU_ITTRFLINACK 0xee4
+#define TPIU_ITTRFLIN 0xee8
+#define TPIU_ITATBDATA0 0xeec
+#define TPIU_ITATBCTR2 0xef0
+#define TPIU_ITATBCTR1 0xef4
+#define TPIU_ITATBCTR0 0xef8
+
+/** register definition **/
+/* FFCR - 0x304 */
+#define FFCR_FON_MAN BIT(6)
+
+/**
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @csdev: component vitals needed by the framework.
+ * @clk: the clock this component is associated to.
+ */
+struct tpiu_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+};
+
+static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ /* TODO: fill this up */
+
+ CS_LOCK(drvdata->base);
+}
+
+static int tpiu_enable(struct coresight_device *csdev)
+{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ tpiu_enable_hw(drvdata);
+
+ dev_info(drvdata->dev, "TPIU enabled\n");
+ return 0;
+}
+
+static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ /* Clear formatter controle reg. */
+ writel_relaxed(0x0, drvdata->base + TPIU_FFCR);
+ /* Generate manual flush */
+ writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tpiu_disable(struct coresight_device *csdev)
+{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ tpiu_disable_hw(drvdata);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "TPIU disabled\n");
+}
+
+static const struct coresight_ops_sink tpiu_sink_ops = {
+ .enable = tpiu_enable,
+ .disable = tpiu_disable,
+};
+
+static const struct coresight_ops tpiu_cs_ops = {
+ .sink_ops = &tpiu_sink_ops,
+};
+
+static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata = NULL;
+ struct tpiu_drvdata *drvdata;
+ struct resource *res = &adev->res;
+ struct coresight_desc *desc;
+ struct device_node *np = adev->dev.of_node;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+
+ drvdata->clk = adev->pclk;
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ /* Disable tpiu to support older devices */
+ tpiu_disable_hw(drvdata);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = CORESIGHT_DEV_TYPE_SINK;
+ desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
+ desc->ops = &tpiu_cs_ops;
+ desc->pdata = pdata;
+ desc->dev = dev;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_info(dev, "TPIU initialized\n");
+ return 0;
+}
+
+static int tpiu_remove(struct amba_device *adev)
+{
+ struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct amba_id tpiu_ids[] = {
+ {
+ .id = 0x0003b912,
+ .mask = 0x0003ffff,
+ },
+ { 0, 0},
+};
+
+static struct amba_driver tpiu_driver = {
+ .drv = {
+ .name = "coresight-tpiu",
+ .owner = THIS_MODULE,
+ },
+ .probe = tpiu_probe,
+ .remove = tpiu_remove,
+ .id_table = tpiu_ids,
+};
+
+static int __init tpiu_init(void)
+{
+ return amba_driver_register(&tpiu_driver);
+}
+module_init(tpiu_init);
+
+static void __exit tpiu_exit(void)
+{
+ amba_driver_unregister(&tpiu_driver);
+}
+module_exit(tpiu_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
new file mode 100644
index 000000000000..6e0181f84425
--- /dev/null
+++ b/drivers/coresight/coresight.c
@@ -0,0 +1,717 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/of_platform.h>
+#include <linux/delay.h>
+
+#include "coresight-priv.h"
+
+static DEFINE_MUTEX(coresight_mutex);
+
+static int coresight_id_match(struct device *dev, void *data)
+{
+ int trace_id, i_trace_id;
+ struct coresight_device *csdev, *i_csdev;
+
+ csdev = data;
+ i_csdev = to_coresight_device(dev);
+
+ /*
+ * No need to care about oneself and components that are not
+ * sources or not enabled
+ */
+ if (i_csdev == csdev || !i_csdev->enable ||
+ i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
+ return 0;
+
+ /* Get the source ID for both compoment */
+ trace_id = source_ops(csdev)->trace_id(csdev);
+ i_trace_id = source_ops(i_csdev)->trace_id(i_csdev);
+
+ /* All you need is one */
+ if (trace_id == i_trace_id)
+ return 1;
+
+ return 0;
+}
+
+static int coresight_source_is_unique(struct coresight_device *csdev)
+{
+ int trace_id = source_ops(csdev)->trace_id(csdev);
+
+ /* this shouldn't happen */
+ if (trace_id < 0)
+ return 0;
+
+ return !bus_for_each_dev(&coresight_bustype, NULL,
+ csdev, coresight_id_match);
+}
+
+static int coresight_find_link_inport(struct coresight_device *csdev)
+{
+ int i;
+ struct coresight_device *parent;
+ struct coresight_connection *conn;
+
+ parent = container_of(csdev->path_link.next,
+ struct coresight_device, path_link);
+
+ for (i = 0; i < parent->nr_outport; i++) {
+ conn = &parent->conns[i];
+ if (conn->child_dev == csdev)
+ return conn->child_port;
+ }
+
+ dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
+ dev_name(&parent->dev), dev_name(&csdev->dev));
+
+ return 0;
+}
+
+static int coresight_find_link_outport(struct coresight_device *csdev)
+{
+ int i;
+ struct coresight_device *child;
+ struct coresight_connection *conn;
+
+ child = container_of(csdev->path_link.prev,
+ struct coresight_device, path_link);
+
+ for (i = 0; i < csdev->nr_outport; i++) {
+ conn = &csdev->conns[i];
+ if (conn->child_dev == child)
+ return conn->outport;
+ }
+
+ dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
+ dev_name(&csdev->dev), dev_name(&child->dev));
+
+ return 0;
+}
+
+static int coresight_enable_sink(struct coresight_device *csdev)
+{
+ int ret;
+
+ if (!csdev->enable) {
+ if (sink_ops(csdev)->enable) {
+ ret = sink_ops(csdev)->enable(csdev);
+ if (ret)
+ return ret;
+ }
+ csdev->enable = true;
+ }
+
+ atomic_inc(csdev->refcnt);
+
+ return 0;
+}
+
+static void coresight_disable_sink(struct coresight_device *csdev)
+{
+ if (atomic_dec_return(csdev->refcnt) == 0) {
+ if (sink_ops(csdev)->disable) {
+ sink_ops(csdev)->disable(csdev);
+ csdev->enable = false;
+ }
+ }
+}
+
+static int coresight_enable_link(struct coresight_device *csdev)
+{
+ int ret;
+ int link_subtype;
+ int refport, inport, outport;
+
+ inport = coresight_find_link_inport(csdev);
+ outport = coresight_find_link_outport(csdev);
+ link_subtype = csdev->subtype.link_subtype;
+
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+ refport = inport;
+ else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+ refport = outport;
+ else
+ refport = 0;
+
+ if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
+ if (link_ops(csdev)->enable) {
+ ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (ret)
+ return ret;
+ }
+ }
+
+ csdev->enable = true;
+
+ return 0;
+}
+
+static void coresight_disable_link(struct coresight_device *csdev)
+{
+ int i, nr_conns;
+ int link_subtype;
+ int refport, inport, outport;
+
+ inport = coresight_find_link_inport(csdev);
+ outport = coresight_find_link_outport(csdev);
+ link_subtype = csdev->subtype.link_subtype;
+
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
+ refport = inport;
+ nr_conns = csdev->nr_inport;
+ } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
+ refport = outport;
+ nr_conns = csdev->nr_outport;
+ } else {
+ refport = 0;
+ nr_conns = 1;
+ }
+
+ if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
+ if (link_ops(csdev)->disable)
+ link_ops(csdev)->disable(csdev, inport, outport);
+ }
+
+ for (i = 0; i < nr_conns; i++)
+ if (atomic_read(&csdev->refcnt[i]) != 0)
+ return;
+
+ csdev->enable = false;
+}
+
+static int coresight_enable_source(struct coresight_device *csdev)
+{
+ int ret;
+
+ if (!coresight_source_is_unique(csdev)) {
+ dev_warn(&csdev->dev, "traceID %d not unique\n",
+ source_ops(csdev)->trace_id(csdev));
+ return -EINVAL;
+ }
+
+ if (!csdev->enable) {
+ if (source_ops(csdev)->enable) {
+ ret = source_ops(csdev)->enable(csdev);
+ if (ret)
+ return ret;
+ }
+ csdev->enable = true;
+ }
+
+ atomic_inc(csdev->refcnt);
+
+ return 0;
+}
+
+static void coresight_disable_source(struct coresight_device *csdev)
+{
+ if (atomic_dec_return(csdev->refcnt) == 0) {
+ if (source_ops(csdev)->disable) {
+ source_ops(csdev)->disable(csdev);
+ csdev->enable = false;
+ }
+ }
+}
+
+static int coresight_enable_path(struct list_head *path)
+{
+ int ret = 0;
+ struct coresight_device *cd;
+
+ list_for_each_entry(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ ret = coresight_enable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ /*
+ * Don't enable the source just yet - this needs to
+ * happen at the very end when all links and sink
+ * along the path have been configured properly.
+ */
+ ;
+ } else {
+ ret = coresight_enable_link(cd);
+ }
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ list_for_each_entry_continue_reverse(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ coresight_disable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ ;
+ } else {
+ coresight_disable_link(cd);
+ }
+ }
+
+ return ret;
+}
+
+static int coresight_disable_path(struct list_head *path)
+{
+ struct coresight_device *cd;
+
+ list_for_each_entry_reverse(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ coresight_disable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ /*
+ * The source has already been stopped, no need
+ * to do it again here.
+ */
+ ;
+ } else {
+ coresight_disable_link(cd);
+ }
+ }
+
+ return 0;
+}
+
+static int coresight_build_paths(struct coresight_device *csdev,
+ struct list_head *path,
+ bool enable)
+{
+ int i, ret = -EINVAL;
+ struct coresight_connection *conn;
+
+ list_add(&csdev->path_link, path);
+
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK && csdev->activated) {
+ if (enable)
+ ret = coresight_enable_path(path);
+ else
+ ret = coresight_disable_path(path);
+ } else {
+ for (i = 0; i < csdev->nr_outport; i++) {
+ conn = &csdev->conns[i];
+ if (coresight_build_paths(conn->child_dev,
+ path, enable) == 0)
+ ret = 0;
+ }
+ }
+
+ if (list_first_entry(path, struct coresight_device, path_link) != csdev)
+ dev_err(&csdev->dev, "wrong device in %s\n", __func__);
+
+ list_del(&csdev->path_link);
+
+ return ret;
+}
+
+int coresight_enable(struct coresight_device *csdev)
+{
+ int ret = 0;
+ LIST_HEAD(path);
+
+ mutex_lock(&coresight_mutex);
+ if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+ ret = -EINVAL;
+ dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
+ goto out;
+ }
+ if (csdev->enable)
+ goto out;
+
+ if (coresight_build_paths(csdev, &path, true)) {
+ dev_err(&csdev->dev, "building path(s) failed\n");
+ goto out;
+ }
+
+ if (coresight_enable_source(csdev))
+ dev_err(&csdev->dev, "source enable failed\n");
+out:
+ mutex_unlock(&coresight_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(coresight_enable);
+
+void coresight_disable(struct coresight_device *csdev)
+{
+ LIST_HEAD(path);
+
+ mutex_lock(&coresight_mutex);
+ if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+ dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
+ goto out;
+ }
+ if (!csdev->enable)
+ goto out;
+
+ coresight_disable_source(csdev);
+ if (coresight_build_paths(csdev, &path, false))
+ dev_err(&csdev->dev, "releasing path(s) failed\n");
+
+out:
+ mutex_unlock(&coresight_mutex);
+}
+EXPORT_SYMBOL_GPL(coresight_disable);
+
+static ssize_t enable_sink_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated);
+}
+
+static ssize_t enable_sink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val)
+ csdev->activated = true;
+ else
+ csdev->activated = false;
+
+ return size;
+
+}
+static DEVICE_ATTR_RW(enable_sink);
+
+static ssize_t enable_source_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
+}
+
+static ssize_t enable_source_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val) {
+ ret = coresight_enable(csdev);
+ if (ret)
+ return ret;
+ } else {
+ coresight_disable(csdev);
+ }
+
+ return size;
+}
+static DEVICE_ATTR_RW(enable_source);
+
+static struct attribute *coresight_sink_attrs[] = {
+ &dev_attr_enable_sink.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_sink);
+
+static struct attribute *coresight_source_attrs[] = {
+ &dev_attr_enable_source.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_source);
+
+static struct device_type coresight_dev_type[] = {
+ {
+ .name = "none",
+ },
+ {
+ .name = "sink",
+ .groups = coresight_sink_groups,
+ },
+ {
+ .name = "link",
+ },
+ {
+ .name = "linksink",
+ .groups = coresight_sink_groups,
+ },
+ {
+ .name = "source",
+ .groups = coresight_source_groups,
+ },
+};
+
+static void coresight_device_release(struct device *dev)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ kfree(csdev);
+}
+
+static int coresight_orphan_match(struct device *dev, void *data)
+{
+ int i;
+ bool still_orphan = false;
+ struct coresight_device *csdev, *i_csdev;
+ struct coresight_connection *conn;
+
+ csdev = data;
+ i_csdev = to_coresight_device(dev);
+
+ /* No need to check oneself */
+ if (csdev == i_csdev)
+ return 0;
+
+ /* Move on to another component if no connection is orphan */
+ if (!i_csdev->orphan)
+ return 0;
+ /*
+ * Circle throuch all the connection of that component. If we find
+ * an orphan connection whose name matches @csdev, link it.
+ */
+ for (i = 0; i < i_csdev->nr_outport; i++) {
+ conn = &i_csdev->conns[i];
+
+ /* We have found at least one orphan connection */
+ if (conn->child_dev == NULL) {
+ /* Does it match this newly added device? */
+ if (!strcmp(dev_name(&csdev->dev), conn->child_name))
+ conn->child_dev = csdev;
+ } else {
+ /* Too bad, this component still has an orphan */
+ still_orphan = true;
+ }
+ }
+
+ i_csdev->orphan = still_orphan;
+
+ /*
+ * Returning '0' ensures that all known component on the
+ * bus will be checked.
+ */
+ return 0;
+}
+
+static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
+{
+ /*
+ * No need to check for a return value as orphan connection(s)
+ * are hooked-up with each newly added component.
+ */
+ bus_for_each_dev(&coresight_bustype, NULL,
+ csdev, coresight_orphan_match);
+}
+
+
+static int coresight_name_match(struct device *dev, void *data)
+{
+ char *to_match;
+ struct coresight_device *i_csdev;
+
+ to_match = data;
+ i_csdev = to_coresight_device(dev);
+
+ if (!strcmp(to_match, dev_name(&i_csdev->dev)))
+ return 1;
+
+ return 0;
+}
+
+static void coresight_fixup_device_conns(struct coresight_device *csdev)
+{
+ int i;
+ struct device *dev = NULL;
+ struct coresight_connection *conn;
+
+ for (i = 0; i < csdev->nr_outport; i++) {
+ conn = &csdev->conns[i];
+ dev = bus_find_device(&coresight_bustype, NULL,
+ (void *)conn->child_name,
+ coresight_name_match);
+
+ if (dev) {
+ conn->child_dev = to_coresight_device(dev);
+ } else {
+ csdev->orphan = true;
+ conn->child_dev = NULL;
+ }
+ }
+}
+
+/**
+ * coresight_timeout - loop until a bit has changed to a specific state.
+ * @addr: base address of the area of interest.
+ * @offset: address of a register, starting from @addr.
+ * @position: the position of the bit of interest.
+ * @value: the value the bit should have.
+ *
+ * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
+ * TIMEOUT_US has elapsed, which ever happens first.
+ */
+
+int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
+{
+ int i;
+ u32 val;
+
+ for (i = TIMEOUT_US; i > 0; i--) {
+ val = __raw_readl(addr + offset);
+ /* waiting on the bit to go from 0 to 1 */
+ if (value) {
+ if (val & BIT(position))
+ return 0;
+ /* waiting on the bit to go from 1 to 0 */
+ } else {
+ if (!(val & BIT(position)))
+ return 0;
+ }
+
+ /*
+ * Delay is arbitrary - the specification doesn't say how long
+ * we are expected to wait. Extra check required to make sure
+ * we don't wait needlessly on the last iteration.
+ */
+ if (i - 1)
+ udelay(1);
+ }
+
+ return -EAGAIN;
+}
+
+struct bus_type coresight_bustype = {
+ .name = "coresight",
+};
+
+static int __init coresight_init(void)
+{
+ return bus_register(&coresight_bustype);
+}
+postcore_initcall(coresight_init);
+
+struct coresight_device *coresight_register(struct coresight_desc *desc)
+{
+ int i;
+ int ret;
+ int link_subtype;
+ int nr_refcnts = 1;
+ atomic_t *refcnts = NULL;
+ struct coresight_device *csdev;
+ struct coresight_connection *conns;
+
+ csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
+ if (!csdev) {
+ ret = -ENOMEM;
+ goto err_kzalloc_csdev;
+ }
+
+ if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
+ desc->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ link_subtype = desc->subtype.link_subtype;
+
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+ nr_refcnts = desc->pdata->nr_inport;
+ else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+ nr_refcnts = desc->pdata->nr_outport;
+ }
+
+ refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
+ if (!refcnts) {
+ ret = -ENOMEM;
+ goto err_kzalloc_refcnts;
+ }
+
+ csdev->refcnt = refcnts;
+
+ csdev->nr_inport = desc->pdata->nr_inport;
+ csdev->nr_outport = desc->pdata->nr_outport;
+ conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL);
+ if (!conns) {
+ ret = -ENOMEM;
+ goto err_kzalloc_conns;
+ }
+
+ for (i = 0; i < csdev->nr_outport; i++) {
+ conns[i].outport = desc->pdata->outports[i];
+ conns[i].child_name = desc->pdata->child_names[i];
+ conns[i].child_port = desc->pdata->child_ports[i];
+ }
+
+ csdev->conns = conns;
+
+ csdev->type = desc->type;
+ csdev->subtype = desc->subtype;
+ csdev->ops = desc->ops;
+ csdev->orphan = false;
+
+ csdev->dev.type = &coresight_dev_type[desc->type];
+ csdev->dev.groups = desc->groups;
+ csdev->dev.parent = desc->dev;
+ csdev->dev.release = coresight_device_release;
+ csdev->dev.bus = &coresight_bustype;
+ dev_set_name(&csdev->dev, "%s", desc->pdata->name);
+
+ ret = device_register(&csdev->dev);
+ if (ret)
+ goto err_device_register;
+
+ mutex_lock(&coresight_mutex);
+
+ coresight_fixup_device_conns(csdev);
+ coresight_fixup_orphan_conns(csdev);
+
+ mutex_unlock(&coresight_mutex);
+
+ return csdev;
+
+err_device_register:
+ kfree(conns);
+err_kzalloc_conns:
+ kfree(refcnts);
+err_kzalloc_refcnts:
+ kfree(csdev);
+err_kzalloc_csdev:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(coresight_register);
+
+void coresight_unregister(struct coresight_device *csdev)
+{
+ mutex_lock(&coresight_mutex);
+
+ kfree(csdev->conns);
+ device_unregister(&csdev->dev);
+
+ mutex_unlock(&coresight_mutex);
+}
+EXPORT_SYMBOL_GPL(coresight_unregister);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c
new file mode 100644
index 000000000000..5030c0734508
--- /dev/null
+++ b/drivers/coresight/of_coresight.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/coresight.h>
+#include <asm/smp_plat.h>
+
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static struct device *
+of_coresight_get_endpoint_device(struct device_node *endpoint)
+{
+ struct device *dev = NULL;
+
+ /*
+ * If we have a non-configuable replicator, it will be found on the
+ * platform bus.
+ */
+ dev = bus_find_device(&platform_bus_type, NULL,
+ endpoint, of_dev_node_match);
+ if (dev)
+ return dev;
+
+ /*
+ * We have a configurable component - circle through the AMBA bus
+ * looking for the device that matches the endpoint node.
+ */
+ return bus_find_device(&amba_bustype, NULL,
+ endpoint, of_dev_node_match);
+}
+
+static struct device_node *of_get_coresight_endpoint(
+ const struct device_node *parent, struct device_node *prev)
+{
+ struct device_node *node = of_graph_get_next_endpoint(parent, prev);
+
+ of_node_put(prev);
+ return node;
+}
+
+static void of_coresight_get_ports(struct device_node *node,
+ int *nr_inport, int *nr_outport)
+{
+ struct device_node *ep = NULL;
+ int in = 0, out = 0;
+
+ do {
+ ep = of_get_coresight_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ if (of_property_read_bool(ep, "slave-mode"))
+ in++;
+ else
+ out++;
+
+ } while (ep);
+
+ *nr_inport = in;
+ *nr_outport = out;
+}
+
+static int of_coresight_alloc_memory(struct device *dev,
+ struct coresight_platform_data *pdata)
+{
+ /* List of output port on this component */
+ pdata->outports = devm_kzalloc(dev, pdata->nr_outport *
+ sizeof(*pdata->outports),
+ GFP_KERNEL);
+ if (!pdata->outports)
+ return -ENOMEM;
+
+ /* Children connected to this component via @outport */
+ pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+ sizeof(*pdata->child_names),
+ GFP_KERNEL);
+ if (!pdata->child_names)
+ return -ENOMEM;
+
+ /* Port number on the child this component is connected to */
+ pdata->child_ports = devm_kzalloc(dev, pdata->nr_outport *
+ sizeof(*pdata->child_ports),
+ GFP_KERNEL);
+ if (!pdata->child_ports)
+ return -ENOMEM;
+
+ return 0;
+}
+
+struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node)
+{
+ int i = 0, ret = 0;
+ struct coresight_platform_data *pdata;
+ struct of_endpoint endpoint, rendpoint;
+ struct device *rdev;
+ struct device_node *cpu;
+ struct device_node *ep = NULL;
+ struct device_node *rparent = NULL;
+ struct device_node *rport = NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ /* Use device name as debugfs handle */
+ pdata->name = dev_name(dev);
+
+ /* Get the number of input and output port for this component */
+ of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
+
+ if (pdata->nr_outport) {
+ ret = of_coresight_alloc_memory(dev, pdata);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Iterate through each port to discover topology */
+ do {
+ /* Get a handle on a port */
+ ep = of_get_coresight_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ /*
+ * No need to deal with input ports, processing for as
+ * processing for output ports will deal with them.
+ */
+ if (of_find_property(ep, "slave-mode", NULL))
+ continue;
+
+ /* Get a handle on the local endpoint */
+ ret = of_graph_parse_endpoint(ep, &endpoint);
+
+ if (ret)
+ continue;
+
+ /* The local out port number */
+ pdata->outports[i] = endpoint.id;
+
+ /*
+ * Get a handle on the remote port and parent
+ * attached to it.
+ */
+ rparent = of_graph_get_remote_port_parent(ep);
+ rport = of_graph_get_remote_port(ep);
+
+ if (!rparent || !rport)
+ continue;
+
+ if (of_graph_parse_endpoint(rport, &rendpoint))
+ continue;
+
+ rdev = of_coresight_get_endpoint_device(rparent);
+ if (!dev)
+ continue;
+
+ pdata->child_names[i] = dev_name(rdev);
+ pdata->child_ports[i] = rendpoint.id;
+
+ i++;
+ } while (ep);
+ }
+
+ /* Affinity defaults to CPU0 */
+ pdata->cpu = 0;
+ cpu = of_parse_phandle(node, "cpu", 0);
+ if (cpu) {
+ const u32 *mpidr;
+ int len, index;
+
+ mpidr = of_get_property(cpu, "reg", &len);
+ if (mpidr && len == 4) {
+ index = get_logical_index(be32_to_cpup(mpidr));
+ if (index != -EINVAL)
+ pdata->cpu = index;
+ }
+ }
+
+ return pdata;
+}
+EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index ef0b3f1324d5..36d91dba2965 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -106,7 +106,6 @@ static int generic_bL_remove(struct platform_device *pdev)
static struct platform_driver generic_bL_platdrv = {
.driver = {
.name = "arm-bL-cpufreq-dt",
- .owner = THIS_MODULE,
},
.probe = generic_bL_probe,
.remove = generic_bL_remove,
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 9bc2720628a4..f56147a1daed 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -197,7 +197,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
if (ret) {
- pr_err("%s: Failed to allocate resources\n: %d", __func__, ret);
+ pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
return ret;
}
@@ -400,7 +400,6 @@ static int dt_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver dt_cpufreq_platdrv = {
.driver = {
.name = "cpufreq-dt",
- .owner = THIS_MODULE,
},
.probe = dt_cpufreq_probe,
.remove = dt_cpufreq_remove,
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 28a16dc6e02e..7e336d20c184 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -169,7 +169,6 @@ static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver davinci_cpufreq_driver = {
.driver = {
.name = "cpufreq-davinci",
- .owner = THIS_MODULE,
},
.remove = __exit_p(davinci_cpufreq_remove),
};
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 4bebc1b5db48..5c3ec1dd4921 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -69,7 +69,6 @@ static int dbx500_cpufreq_probe(struct platform_device *pdev)
static struct platform_driver dbx500_cpufreq_plat_driver = {
.driver = {
.name = "cpufreq-ux500",
- .owner = THIS_MODULE,
},
.probe = dbx500_cpufreq_probe,
};
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 1e0ec57bf6e3..f99a0b0b7c06 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -211,7 +211,6 @@ err_vdd_arm:
static struct platform_driver exynos_cpufreq_platdrv = {
.driver = {
.name = "exynos-cpufreq",
- .owner = THIS_MODULE,
},
.probe = exynos_cpufreq_probe,
};
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 27a57ed9eb2c..21a90ed7f3d8 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -442,7 +442,6 @@ static int exynos_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver exynos_cpufreq_platdrv = {
.driver = {
.name = "exynos5440-cpufreq",
- .owner = THIS_MODULE,
.of_match_table = exynos_cpufreq_match,
},
.probe = exynos_cpufreq_probe,
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 5da1d131f770..380a90d3c57e 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -357,7 +357,6 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver imx6q_cpufreq_platdrv = {
.driver = {
.name = "imx6q-cpufreq",
- .owner = THIS_MODULE,
},
.probe = imx6q_cpufreq_probe,
.remove = imx6q_cpufreq_remove,
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 6bd69adc3c5e..129e266f7621 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -226,7 +226,6 @@ static const struct of_device_id integrator_cpufreq_match[] = {
static struct platform_driver integrator_cpufreq_driver = {
.driver = {
.name = "integrator-cpufreq",
- .owner = THIS_MODULE,
.of_match_table = integrator_cpufreq_match,
},
.remove = __exit_p(integrator_cpufreq_remove),
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 7906d4acfe40..be42f103db60 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -183,7 +183,6 @@ static struct platform_driver kirkwood_cpufreq_platform_driver = {
.remove = kirkwood_cpufreq_remove,
.driver = {
.name = "kirkwood-cpufreq",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 9fa177206032..fc897babab55 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -130,7 +130,6 @@ MODULE_DEVICE_TABLE(platform, platform_device_ids);
static struct platform_driver platform_driver = {
.driver = {
.name = "loongson2_cpufreq",
- .owner = THIS_MODULE,
},
.id_table = platform_device_ids,
};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 5f69c9aa703c..e3866e0d5bf8 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -195,7 +195,6 @@ static int omap_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver omap_cpufreq_platdrv = {
.driver = {
.name = "omap-cpufreq",
- .owner = THIS_MODULE,
},
.probe = omap_cpufreq_probe,
.remove = omap_cpufreq_remove,
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 567caa6313ff..b0dac7d6ba31 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -656,7 +656,6 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
static struct platform_driver s5pv210_cpufreq_platdrv = {
.driver = {
.name = "s5pv210-cpufreq",
- .owner = THIS_MODULE,
},
.probe = s5pv210_cpufreq_probe,
};
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 38678396636d..4894924a3ca2 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -236,7 +236,6 @@ out_put_node:
static struct platform_driver spear_cpufreq_platdrv = {
.driver = {
.name = "spear-cpufreq",
- .owner = THIS_MODULE,
},
.probe = spear_cpufreq_probe,
};
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 7f7c9c01b44e..433e93fd4900 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -60,7 +60,6 @@ static int ve_spc_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver ve_spc_cpufreq_platdrv = {
.driver = {
.name = "vexpress-spc-cpufreq",
- .owner = THIS_MODULE,
},
.probe = ve_spc_cpufreq_probe,
.remove = ve_spc_cpufreq_remove,
diff --git a/drivers/cpuidle/cpuidle-at91.c b/drivers/cpuidle/cpuidle-at91.c
index 1964ff07117c..aae7bfc1ea36 100644
--- a/drivers/cpuidle/cpuidle-at91.c
+++ b/drivers/cpuidle/cpuidle-at91.c
@@ -60,7 +60,6 @@ static int at91_cpuidle_probe(struct platform_device *dev)
static struct platform_driver at91_cpuidle_driver = {
.driver = {
.name = "cpuidle-at91",
- .owner = THIS_MODULE,
},
.probe = at91_cpuidle_probe,
};
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 6541b0bfdfaa..9445e6cc02be 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -72,7 +72,6 @@ static int calxeda_cpuidle_probe(struct platform_device *pdev)
static struct platform_driver calxeda_cpuidle_plat_driver = {
.driver = {
.name = "cpuidle-calxeda",
- .owner = THIS_MODULE,
},
.probe = calxeda_cpuidle_probe,
};
diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c
index 5243811daa6e..18a7f7380508 100644
--- a/drivers/cpuidle/cpuidle-clps711x.c
+++ b/drivers/cpuidle/cpuidle-clps711x.c
@@ -54,7 +54,6 @@ static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
static struct platform_driver clps711x_cpuidle_driver = {
.driver = {
.name = CLPS711X_CPUIDLE_NAME,
- .owner = THIS_MODULE,
},
};
module_platform_driver_probe(clps711x_cpuidle_driver, clps711x_cpuidle_probe);
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 64d12a855ec6..4003a3160865 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -74,7 +74,6 @@ static struct platform_driver exynos_cpuidle_driver = {
.probe = exynos_cpuidle_probe,
.driver = {
.name = "exynos_cpuidle",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index d88f8d7c2143..cea0a6c4b1db 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -77,7 +77,6 @@ static struct platform_driver kirkwood_cpuidle_driver = {
.remove = kirkwood_cpuidle_remove,
.driver = {
.name = "kirkwood_cpuidle",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index dd4c176df2a3..38e68618513a 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -114,7 +114,6 @@ static int mvebu_v7_cpuidle_probe(struct platform_device *pdev)
static struct platform_driver armadaxp_cpuidle_plat_driver = {
.driver = {
.name = "cpuidle-armada-xp",
- .owner = THIS_MODULE,
},
.probe = mvebu_v7_cpuidle_probe,
};
@@ -124,7 +123,6 @@ module_platform_driver(armadaxp_cpuidle_plat_driver);
static struct platform_driver armada370_cpuidle_plat_driver = {
.driver = {
.name = "cpuidle-armada-370",
- .owner = THIS_MODULE,
},
.probe = mvebu_v7_cpuidle_probe,
};
@@ -134,7 +132,6 @@ module_platform_driver(armada370_cpuidle_plat_driver);
static struct platform_driver armada38x_cpuidle_plat_driver = {
.driver = {
.name = "cpuidle-armada-38x",
- .owner = THIS_MODULE,
},
.probe = mvebu_v7_cpuidle_probe,
};
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index 292e65a90308..66f81e410f0d 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -122,7 +122,6 @@ static int dbx500_cpuidle_probe(struct platform_device *pdev)
static struct platform_driver dbx500_cpuidle_plat_driver = {
.driver = {
.name = "cpuidle-dbx500",
- .owner = THIS_MODULE,
},
.probe = dbx500_cpuidle_probe,
};
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
index 022dec86de8e..002b8c9f98f5 100644
--- a/drivers/cpuidle/cpuidle-zynq.c
+++ b/drivers/cpuidle/cpuidle-zynq.c
@@ -71,7 +71,6 @@ static int zynq_cpuidle_probe(struct platform_device *pdev)
static struct platform_driver zynq_cpuidle_driver = {
.driver = {
.name = "cpuidle-zynq",
- .owner = THIS_MODULE,
},
.probe = zynq_cpuidle_probe,
};
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index e4c6c58fbb03..d02b77150070 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1288,7 +1288,6 @@ static const struct of_device_id crypto4xx_match[] = {
static struct platform_driver crypto4xx_driver = {
.driver = {
.name = "crypto4xx",
- .owner = THIS_MODULE,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index a083474991ab..53d1c330f8a8 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1473,7 +1473,6 @@ static struct platform_driver atmel_aes_driver = {
.remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_aes_dt_ids),
},
};
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 9a4f69eaa5e0..d94f07c78e19 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1529,7 +1529,6 @@ static struct platform_driver atmel_sha_driver = {
.remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_sha_dt_ids),
},
};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index d3a9041938ea..5e7c896cde30 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1524,7 +1524,6 @@ static struct platform_driver atmel_tdes_driver = {
.remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_tdes_dt_ids),
},
};
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index b099e33cb073..9ae149bddb6e 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -21,13 +21,13 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
-#include <linux/unaligned/access_ok.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <asm/unaligned.h>
#include <asm/dma.h>
#include <asm/portmux.h>
@@ -724,7 +724,6 @@ static struct platform_driver bfin_crypto_crc_driver = {
.resume = bfin_crypto_crc_resume,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index a80ea853701d..3187400daf31 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -60,6 +60,7 @@
#define CAAM_CRA_PRIORITY 3000
/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
+ CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
#define CAAM_MAX_IV_LENGTH 16
@@ -70,17 +71,34 @@
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+/* Note: Nonce is counted in enckeylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
+
#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
+#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
+#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
+
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
15 * CAAM_CMD_SZ)
-#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
+#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
CAAM_MAX_KEY_SIZE)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -128,11 +146,13 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
/*
* For aead encrypt and decrypt, read iv for both classes
*/
-static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
{
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | ivsize);
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ivoffset << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
}
/*
@@ -178,35 +198,60 @@ struct caam_ctx {
};
static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline)
+ int keys_fit_inline, bool is_rfc3686)
{
+ u32 *nonce;
+ unsigned int enckeylen = ctx->enckeylen;
+
+ /*
+ * RFC3686 specific:
+ * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
if (keys_fit_inline) {
append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
ctx->split_key_len, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC);
append_key_as_imm(desc, (void *)ctx->key +
- ctx->split_key_pad_len, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ ctx->split_key_pad_len, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
} else {
append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC);
append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
+ enckeylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
}
}
static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline)
+ int keys_fit_inline, bool is_rfc3686)
{
u32 *key_jump_cmd;
- init_sh_desc(desc, HDR_SHARE_SERIAL);
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- append_key_aead(desc, ctx, keys_fit_inline);
+ append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
set_jump_tgt_here(desc, key_jump_cmd);
}
@@ -406,10 +451,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct aead_tfm *tfm = &aead->base.crt_aead;
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
+ const char *alg_name = crypto_tfm_alg_name(ctfm);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
+ bool keys_fit_inline;
u32 geniv, moveiv;
+ u32 ctx1_iv_off = 0;
u32 *desc;
+ const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode &&
+ (strstr(alg_name, "rfc3686") != NULL));
if (!ctx->authsize)
return 0;
@@ -419,18 +471,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return aead_null_set_sh_desc(aead);
/*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686)
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+
+ /*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
+ keys_fit_inline = false;
if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen <=
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
/* Class 2 operation */
append_operation(desc, ctx->class2_alg_type |
@@ -448,7 +518,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- aead_append_ld_iv(desc, tfm->ivsize);
+ aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
@@ -482,14 +560,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
*/
keys_fit_inline = false;
if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen <=
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
/* Class 2 operation */
append_operation(desc, ctx->class2_alg_type |
@@ -506,9 +586,22 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- aead_append_ld_iv(desc, tfm->ivsize);
+ aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
- append_dec_op1(desc, ctx->class1_alg_type);
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctr_mode)
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, ctx->class1_alg_type);
/* Read and write cryptlen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
@@ -538,14 +631,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
*/
keys_fit_inline = false;
if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen <=
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
/* aead_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
@@ -554,13 +649,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
/* Copy IV to class 1 context */
- append_move(desc, MOVE_SRC_CLASS1CTX |
- MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
/* Return to encryption */
append_operation(desc, ctx->class2_alg_type |
@@ -576,7 +674,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- /* Copy iv from class 1 ctx to class 2 fifo*/
+ /* Copy iv from outfifo to class 2 fifo */
moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
@@ -584,6 +682,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
@@ -630,6 +736,912 @@ static int aead_setauthsize(struct crypto_aead *authenc,
return 0;
}
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *zero_payload_jump_cmd,
+ *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
+ u32 *desc;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD | JUMP_COND_SELF);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 7);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* if assoclen is ZERO, jump to IV reading - is the only input data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ /* jump to ICV writing */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* read IV - is the only input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+ FIFOLD_TYPE_LAST1);
+
+ /* write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ /* if asoclen is ZERO, skip reading assoc data */
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 4);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* if assoclen is ZERO, jump to ICV reading */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
+ u32 *desc;
+ u32 geniv;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* Read Salt */
+ append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+ 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+ /* Read AES-GCM-ESP IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read Salt */
+ append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+ 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+ /* Read AES-GCM-ESP IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* rfc4106_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to OFIFO */
+ write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+
+ /* Read Salt and generated IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
+ FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, move_cmd);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by generated IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* No need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store generated IV and encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
+ u32 *read_move_cmd, *write_move_cmd;
+ u32 *desc;
+ u32 geniv;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Load AES-GMAC ESP IV into Math1 register */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+ LDST_CLASS_DECO | tfm->ivsize);
+
+ /* Wait the DMA transaction to finish */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Overwrite blank immediate AES-GMAC ESP IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = (seqinlen - ivsize) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Read Salt and AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Authenticate AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* Load AES-GMAC ESP IV into Math1 register */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+ LDST_CLASS_DECO | tfm->ivsize);
+
+ /* Wait the DMA transaction to finish */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
+
+ /* Overwrite blank immediate AES-GMAC ESP IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read Salt and AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Authenticate AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* rfc4543_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move generated IV to Math1 register */
+ append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Overwrite blank immediate AES-GMAC IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Copy generated IV to OFIFO */
+ append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read Salt and AES-GMAC generated IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* No need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Authenticate AES-GMAC IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
u32 authkeylen)
{
@@ -703,20 +1715,154 @@ badkey:
return -EINVAL;
}
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
+
+ ret = gcm_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->enckeylen = keylen - 4;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ ret = rfc4106_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->enckeylen = keylen - 4;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ ret = rfc4543_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+ struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
struct device *jrdev = ctx->jrdev;
int ret = 0;
u32 *key_jump_cmd;
u32 *desc;
+ u32 *nonce;
+ u32 geniv;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode &&
+ (strstr(alg_name, "rfc3686") != NULL));
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
memcpy(ctx->key, key, keylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
@@ -729,7 +1875,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL);
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
@@ -739,11 +1885,31 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->enckeylen, CLASS_1 |
KEY_DEST_CLASS_REG);
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
set_jump_tgt_here(desc, key_jump_cmd);
/* Load iv */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | tfm->ivsize);
+ append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Load operation */
append_operation(desc, ctx->class1_alg_type |
@@ -768,7 +1934,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- init_sh_desc(desc, HDR_SHARE_SERIAL);
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
@@ -778,14 +1944,38 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->enckeylen, CLASS_1 |
KEY_DEST_CLASS_REG);
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
set_jump_tgt_here(desc, key_jump_cmd);
/* load IV */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | tfm->ivsize);
+ append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Choose operation */
- append_dec_op1(desc, ctx->class1_alg_type);
+ if (ctr_mode)
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, ctx->class1_alg_type);
/* Perform operation */
ablkcipher_append_src_dst(desc);
@@ -804,6 +1994,83 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ /* ablkcipher_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (crt->ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, crt->ivsize,
+ LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, (u32)1, LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
return ret;
}
@@ -1088,6 +2355,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
u32 out_options = 0, in_options;
dma_addr_t dst_dma, src_dma;
int len, sec4_sg_index = 0;
+ bool is_gcm = false;
#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1106,11 +2374,19 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
desc_bytes(sh_desc), 1);
#endif
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
- src_dma = sg_dma_address(req->assoc);
+ if (is_gcm)
+ src_dma = edesc->iv_dma;
+ else
+ src_dma = sg_dma_address(req->assoc);
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
@@ -1164,6 +2440,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
u32 out_options = 0, in_options;
dma_addr_t dst_dma, src_dma;
int len, sec4_sg_index = 0;
+ bool is_gcm = false;
#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1181,11 +2458,19 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
desc_bytes(sh_desc), 1);
#endif
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (contig & GIV_SRC_CONTIG) {
- src_dma = sg_dma_address(req->assoc);
+ if (is_gcm)
+ src_dma = edesc->iv_dma;
+ else
+ src_dma = sg_dma_address(req->assoc);
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
@@ -1200,7 +2485,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
} else {
if (likely(req->src == req->dst)) {
dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
- edesc->assoc_nents;
+ (edesc->assoc_nents +
+ (is_gcm ? 1 + edesc->src_nents : 0));
out_options = LDST_SGF;
} else {
dst_dma = edesc->sec4_sg_dma +
@@ -1272,6 +2558,54 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
}
/*
+ * Fill in ablkcipher givencrypt job descriptor
+ */
+static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req,
+ bool iv_contig)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc = edesc->hw_desc;
+ u32 out_options, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
+#endif
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (!edesc->src_nents) {
+ src_dma = sg_dma_address(req->src);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ in_options = LDST_SGF;
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+
+ if (iv_contig) {
+ dst_dma = edesc->iv_dma;
+ out_options = 0;
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+}
+
+/*
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -1292,6 +2626,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int ivsize = crypto_aead_ivsize(aead);
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
unsigned int authsize = ctx->authsize;
+ bool is_gcm = false;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
@@ -1326,15 +2661,31 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return ERR_PTR(-ENOMEM);
}
- /* Check if data are contiguous */
- if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
- iv_dma || src_nents || iv_dma + ivsize !=
- sg_dma_address(req->src)) {
- all_contig = false;
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
+ /*
+ * Check if data are contiguous.
+ * GCM expected input sequence: IV, AAD, text
+ * All other - expected input sequence: AAD, IV, text
+ */
+ if (is_gcm)
+ all_contig = (!assoc_nents &&
+ iv_dma + ivsize == sg_dma_address(req->assoc) &&
+ !src_nents && sg_dma_address(req->assoc) +
+ req->assoclen == sg_dma_address(req->src));
+ else
+ all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
+ req->assoclen == iv_dma && !src_nents &&
+ iv_dma + ivsize == sg_dma_address(req->src));
+ if (!all_contig) {
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
sec4_sg_len = assoc_nents + 1 + src_nents;
}
+
sec4_sg_len += dst_nents;
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -1361,14 +2712,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_index = 0;
if (!all_contig) {
- sg_to_sec4_sg(req->assoc,
- (assoc_nents ? : 1),
- edesc->sec4_sg +
- sec4_sg_index, 0);
- sec4_sg_index += assoc_nents ? : 1;
+ if (!is_gcm) {
+ sg_to_sec4_sg(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents ? : 1;
+ }
+
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
sec4_sg_index += 1;
+
+ if (is_gcm) {
+ sg_to_sec4_sg(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents ? : 1;
+ }
+
sg_to_sec4_sg_last(req->src,
(src_nents ? : 1),
edesc->sec4_sg +
@@ -1490,6 +2853,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
int ivsize = crypto_aead_ivsize(aead);
bool assoc_chained = false, src_chained = false, dst_chained = false;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ bool is_gcm = false;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
@@ -1516,24 +2880,53 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
return ERR_PTR(-ENOMEM);
}
- /* Check if data are contiguous */
- if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
- iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
- contig &= ~GIV_SRC_CONTIG;
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
+ /*
+ * Check if data are contiguous.
+ * GCM expected input sequence: IV, AAD, text
+ * All other - expected input sequence: AAD, IV, text
+ */
+
+ if (is_gcm) {
+ if (assoc_nents || iv_dma + ivsize !=
+ sg_dma_address(req->assoc) || src_nents ||
+ sg_dma_address(req->assoc) + req->assoclen !=
+ sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ } else {
+ if (assoc_nents ||
+ sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
+ src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ }
+
if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
contig &= ~GIV_DST_CONTIG;
- if (unlikely(req->src != req->dst)) {
- dst_nents = dst_nents ? : 1;
- sec4_sg_len += 1;
- }
+
if (!(contig & GIV_SRC_CONTIG)) {
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
sec4_sg_len += assoc_nents + 1 + src_nents;
- if (likely(req->src == req->dst))
+ if (req->src == req->dst &&
+ (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
contig &= ~GIV_DST_CONTIG;
}
- sec4_sg_len += dst_nents;
+
+ /*
+ * Add new sg entries for GCM output sequence.
+ * Expected output sequence: IV, encrypted text.
+ */
+ if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
+ sec4_sg_len += 1 + src_nents;
+
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = dst_nents ? : 1;
+ sec4_sg_len += 1 + dst_nents;
+ }
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -1559,18 +2952,36 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sec4_sg_index = 0;
if (!(contig & GIV_SRC_CONTIG)) {
- sg_to_sec4_sg(req->assoc, assoc_nents,
- edesc->sec4_sg +
- sec4_sg_index, 0);
- sec4_sg_index += assoc_nents;
+ if (!is_gcm) {
+ sg_to_sec4_sg(req->assoc, assoc_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ }
+
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
sec4_sg_index += 1;
+
+ if (is_gcm) {
+ sg_to_sec4_sg(req->assoc, assoc_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ }
+
sg_to_sec4_sg_last(req->src, src_nents,
edesc->sec4_sg +
sec4_sg_index, 0);
sec4_sg_index += src_nents;
}
+
+ if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
@@ -1814,6 +3225,151 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
return ret;
}
+/*
+ * allocate and map the ablkcipher extended descriptor
+ * for ablkcipher givencrypt
+ */
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ struct skcipher_givcrypt_request *greq,
+ int desc_bytes,
+ bool *iv_contig_out)
+{
+ struct ablkcipher_request *req = &greq->creq;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0, sec4_sg_bytes;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ bool iv_contig = false;
+ int sgc;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ bool src_chained = false, dst_chained = false;
+ int sec4_sg_index;
+
+ src_nents = sg_count(req->src, req->nbytes, &src_chained);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
+ /*
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
+ */
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
+ iv_contig = true;
+ else
+ dst_nents = dst_nents ? : 1;
+ sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(*edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+ sec4_sg_index = 0;
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ sec4_sg_index += src_nents;
+ }
+
+ if (!iv_contig) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
+#endif
+
+ *iv_contig_out = iv_contig;
+ return edesc;
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+ struct ablkcipher_request *req = &creq->creq;
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+ edesc, req, iv_contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
#define template_aead template_u.aead
#define template_ablkcipher template_u.ablkcipher
struct caam_alg_template {
@@ -2309,17 +3865,188 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
+ {
+ .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "rfc4106(gcm(aes))",
+ .driver_name = "rfc4106-gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ {
+ .name = "rfc4543(gcm(aes))",
+ .driver_name = "rfc4543-gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ /* Galois Counter Mode */
+ {
+ .name = "gcm(aes)",
+ .driver_name = "gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = NULL,
+ .geniv = "<built-in>",
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
/* ablkcipher descriptor */
{
.name = "cbc(aes)",
.driver_name = "cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
.template_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -2330,12 +4057,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "cbc(des3_ede)",
.driver_name = "cbc-3des-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
.template_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -2346,17 +4074,53 @@ static struct caam_alg_template driver_algs[] = {
.name = "cbc(des)",
.driver_name = "cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
.template_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
}
};
@@ -2457,6 +4221,10 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
template->type;
switch (template->type) {
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ alg->cra_type = &crypto_givcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher = template->template_ablkcipher;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index f227922cea38..acd7743e2603 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -28,6 +28,7 @@
#include <crypto/algapi.h>
#include <crypto/null.h>
#include <crypto/aes.h>
+#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 31000c8c4a90..70f1e6f37336 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -720,7 +720,6 @@ MODULE_DEVICE_TABLE(of, caam_match);
static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
- .owner = THIS_MODULE,
.of_match_table = caam_match,
},
.probe = caam_probe,
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 7eec20bb3849..9f79fd7bd4d7 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -192,6 +192,8 @@ static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
}
+
+APPEND_CMD_LEN(seq_load, SEQ_LOAD)
APPEND_CMD_LEN(seq_store, SEQ_STORE)
APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 6531054a44c8..66d73bf54166 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -213,27 +213,36 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
void (*report_ssed)(struct device *jrdev, const u32 status,
const char *error);
const char *error;
- } status_src[] = {
+ } status_src[16] = {
{ NULL, "No error" },
{ NULL, NULL },
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
- { NULL, NULL },
+ { NULL, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
};
u32 ssrc = status >> JRSTA_SSRC_SHIFT;
const char *error = status_src[ssrc].error;
/*
- * If there is no further error handling function, just
- * print the error code, error string and exit. Otherwise
- * call the handler function.
+ * If there is an error handling function, call it to report the error.
+ * Otherwise print the error source name.
*/
- if (!status_src[ssrc].report_ssed)
- dev_err(jrdev, "%08x: %s: \n", status, status_src[ssrc].error);
- else
+ if (status_src[ssrc].report_ssed)
status_src[ssrc].report_ssed(jrdev, status, error);
+ else if (error)
+ dev_err(jrdev, "%d: %s\n", ssrc, error);
+ else
+ dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 4d18e27ffa9e..9b3ef1bc9bd7 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -181,8 +181,6 @@ static void caam_jr_dequeue(unsigned long devarg)
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
- smp_read_barrier_depends();
-
if (jrp->outring[hw_idx].desc ==
jrp->entinfo[sw_idx].desc_addr_dma)
break; /* found */
@@ -218,7 +216,6 @@ static void caam_jr_dequeue(unsigned long devarg)
if (sw_idx == tail) {
do {
tail = (tail + 1) & (JOBR_DEPTH - 1);
- smp_read_barrier_depends();
} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
jrp->entinfo[tail].desc_addr_dma == 0);
@@ -514,7 +511,6 @@ MODULE_DEVICE_TABLE(of, caam_jr_match);
static struct platform_driver caam_jr_driver = {
.driver = {
.name = "caam_jr",
- .owner = THIS_MODULE,
.of_match_table = caam_jr_match,
},
.probe = caam_jr_probe,
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index b0a2806908f1..8c50bad25f7e 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -208,7 +208,6 @@ static const struct of_device_id ccp_platform_ids[] = {
static struct platform_driver ccp_platform_driver = {
.driver = {
.name = "AMD Cryptographic Coprocessor",
- .owner = THIS_MODULE,
.of_match_table = ccp_platform_ids,
},
.probe = ccp_platform_probe,
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 032c72c1f953..f91f15ddee92 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1180,7 +1180,6 @@ static struct platform_driver marvell_crypto = {
.probe = mv_probe,
.remove = mv_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "mv_crypto",
.of_match_table = mv_cesa_of_match_table,
},
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index b5f7e6db24d4..829d6394fb33 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -1090,7 +1090,6 @@ static struct platform_driver mxs_dcp_driver = {
.remove = mxs_dcp_remove,
.driver = {
.name = "mxs-dcp",
- .owner = THIS_MODULE,
.of_match_table = mxs_dcp_dt_ids,
},
};
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index f8e3207fecb1..afd136b45f49 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -2210,7 +2210,6 @@ MODULE_DEVICE_TABLE(of, n2_crypto_match);
static struct platform_driver n2_crypto_driver = {
.driver = {
.name = "n2cp",
- .owner = THIS_MODULE,
.of_match_table = n2_crypto_match,
},
.probe = n2_crypto_probe,
@@ -2238,7 +2237,6 @@ MODULE_DEVICE_TABLE(of, n2_mau_match);
static struct platform_driver n2_mau_driver = {
.driver = {
.name = "ncp",
- .owner = THIS_MODULE,
.of_match_table = n2_mau_match,
},
.probe = n2_mau_probe,
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 061407d59520..887196e9b50c 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1009,9 +1009,9 @@ error_out:
* notifier_to_errno() to decode this value
*/
static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
- void *update)
+ void *data)
{
- struct of_prop_reconfig *upd = update;
+ struct of_reconfig_data *upd = data;
struct nx842_devdata *local_devdata;
struct device_node *node = NULL;
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index cc00b52306ba..a066cc3450ae 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -72,27 +72,19 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+ to_process = nbytes - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
processed, csbcpb->cpb.aes_cbc.iv);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5ecd4c2414aa..67f80813a06f 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -181,6 +181,7 @@ static int generate_pat(u8 *iv,
unsigned int iauth_len = 0;
u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
int rc;
+ unsigned int max_sg_len;
/* zero the ctr value */
memset(iv + 15 - iv[0], 0, iv[0] + 1);
@@ -248,10 +249,19 @@ static int generate_pat(u8 *iv,
if (!req->assoclen) {
return rc;
} else if (req->assoclen <= 14) {
- nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
- nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
+ unsigned int len = 16;
+
+ nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
+
+ if (len != 16)
+ return -EINVAL;
+
+ nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
nx_ctx->ap->sglen);
+ if (len != 16)
+ return -EINVAL;
+
/* inlen should be negative, indicating to phyp that its a
* pointer to an sg list */
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
@@ -273,21 +283,24 @@ static int generate_pat(u8 *iv,
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
} else {
- u32 max_sg_len;
unsigned int processed = 0, to_process;
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
processed += iauth_len;
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
do {
to_process = min_t(u32, req->assoclen - processed,
nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+
+ nx_insg = nx_walk_and_build(nx_ctx->in_sg,
+ nx_ctx->ap->sglen,
+ req->assoc, processed,
+ &to_process);
if ((to_process + processed) < req->assoclen) {
NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
@@ -297,10 +310,6 @@ static int generate_pat(u8 *iv,
~NX_FDM_INTERMEDIATE;
}
- nx_insg = nx_walk_and_build(nx_ctx->in_sg,
- nx_ctx->ap->sglen,
- req->assoc, processed,
- to_process);
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
sizeof(struct nx_sg);
@@ -343,7 +352,6 @@ static int ccm_nx_decrypt(struct aead_request *req,
struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc = -1;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -360,19 +368,12 @@ static int ccm_nx_decrypt(struct aead_request *req,
if (rc)
goto out;
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
do {
/* to_process: the AES_BLOCK_SIZE data chunk to process in this
* update. This value is bound by sg list limits.
*/
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = nbytes - processed;
if ((to_process + processed) < nbytes)
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
@@ -382,7 +383,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- to_process, processed,
+ &to_process, processed,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
goto out;
@@ -427,7 +428,6 @@ static int ccm_nx_encrypt(struct aead_request *req,
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc = -1;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -437,18 +437,11 @@ static int ccm_nx_encrypt(struct aead_request *req,
if (rc)
goto out;
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
do {
/* to process: the AES_BLOCK_SIZE data chunk to process in this
* update. This value is bound by sg list limits.
*/
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = nbytes - processed;
if ((to_process + processed) < nbytes)
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
@@ -458,7 +451,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- to_process, processed,
+ &to_process, processed,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index a37d009dc75c..2617cd4d54dd 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -90,22 +90,14 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
do {
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+ to_process = nbytes - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
processed, csbcpb->cpb.aes_ctr.iv);
if (rc)
goto out;
@@ -143,6 +135,7 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE);
+ iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
desc->info = nx_ctx->priv.ctr.iv;
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 85a8d23cf29d..cfdde8b8bc76 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -72,27 +72,19 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+ to_process = nbytes - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
processed, NULL);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 025d9a8d5b19..88c562434bc0 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -131,7 +131,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
struct nx_sg *nx_sg = nx_ctx->in_sg;
unsigned int nbytes = req->assoclen;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
+ unsigned int max_sg_len;
if (nbytes <= AES_BLOCK_SIZE) {
scatterwalk_start(&walk, req->assoc);
@@ -143,8 +143,10 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
/* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
do {
/*
@@ -156,13 +158,14 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
to_process = min_t(u64, to_process,
NX_PAGE_SIZE * (max_sg_len - 1));
+ nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+ req->assoc, processed, &to_process);
+
if ((to_process + processed) < nbytes)
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
else
NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
- nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
- req->assoc, processed, to_process);
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
* sizeof(struct nx_sg);
@@ -195,7 +198,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
struct nx_sg *nx_sg;
unsigned int nbytes = req->assoclen;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
+ unsigned int max_sg_len;
/* Set GMAC mode */
csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
@@ -203,8 +206,10 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
/* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* Copy IV */
memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
@@ -219,13 +224,14 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
to_process = min_t(u64, to_process,
NX_PAGE_SIZE * (max_sg_len - 1));
+ nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+ req->assoc, processed, &to_process);
+
if ((to_process + processed) < nbytes)
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
- req->assoc, processed, to_process);
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
* sizeof(struct nx_sg);
@@ -264,6 +270,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
char out[AES_BLOCK_SIZE];
struct nx_sg *in_sg, *out_sg;
+ int len;
/* For scenarios where the input message is zero length, AES CTR mode
* may be used. Set the source data to be a single block (16B) of all
@@ -279,11 +286,22 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+ len = AES_BLOCK_SIZE;
+
/* Encrypt the counter/IV */
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
- AES_BLOCK_SIZE, nx_ctx->ap->sglen);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
+ &len, nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ len = sizeof(out);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
nx_ctx->ap->sglen);
+
+ if (len != sizeof(out))
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -317,7 +335,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
unsigned int nbytes = req->cryptlen;
unsigned int processed = 0, to_process;
unsigned long irq_flags;
- u32 max_sg_len;
int rc = -EINVAL;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -354,33 +371,24 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
}
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
do {
- /*
- * to_process: the data chunk to process in this update.
- * This value is bound by sg list limits.
- */
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
-
- if ((to_process + processed) < nbytes)
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- else
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
- req->src, to_process, processed,
+ req->src, &to_process, processed,
csbcpb->cpb.aes_gcm.iv_or_cnt);
+
if (rc)
goto out;
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 03c4bf57d066..8c2faffab4a3 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -75,6 +75,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
u8 keys[2][AES_BLOCK_SIZE];
u8 key[32];
int rc = 0;
+ int len;
/* Change to ECB mode */
csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
@@ -86,11 +87,20 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
memset(keys[0], 0x01, sizeof(keys[0]));
memset(keys[1], 0x03, sizeof(keys[1]));
+ len = sizeof(keys);
/* Generate K1 and K3 encrypting the patterns */
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys),
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
nx_ctx->ap->sglen);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys),
+
+ if (len != sizeof(keys))
+ return -EINVAL;
+
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
nx_ctx->ap->sglen);
+
+ if (len != sizeof(keys))
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -103,12 +113,23 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
/* XOr K3 with the padding for a 0 length message */
keys[1][0] ^= 0x80;
+ len = sizeof(keys[1]);
+
/* Encrypt the final result */
memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]),
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
nx_ctx->ap->sglen);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
+
+ if (len != sizeof(keys[1]))
+ return -EINVAL;
+
+ len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -133,6 +154,7 @@ static int nx_xcbc_init(struct shash_desc *desc)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *out_sg;
+ int len;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -144,8 +166,13 @@ static int nx_xcbc_init(struct shash_desc *desc)
memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
+ len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- AES_BLOCK_SIZE, nx_ctx->ap->sglen);
+ &len, nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
return 0;
@@ -159,10 +186,11 @@ static int nx_xcbc_update(struct shash_desc *desc,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
- u32 to_process, leftover, total;
- u32 max_sg_len;
+ u32 to_process = 0, leftover, total;
+ unsigned int max_sg_len;
unsigned long irq_flags;
int rc = 0;
+ int data_len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -180,17 +208,15 @@ static int nx_xcbc_update(struct shash_desc *desc,
}
in_sg = nx_ctx->in_sg;
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
do {
-
- /* to_process: the AES_BLOCK_SIZE data chunk to process in this
- * update */
- to_process = min_t(u64, total, nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = total - to_process;
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+
leftover = total - to_process;
/* the hardware will not accept a 0 byte operation for this
@@ -204,15 +230,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
}
if (sctx->count) {
+ data_len = sctx->count;
in_sg = nx_build_sg_list(nx_ctx->in_sg,
(u8 *) sctx->buffer,
- sctx->count,
+ &data_len,
max_sg_len);
+ if (data_len != sctx->count)
+ return -EINVAL;
}
+
+ data_len = to_process - sctx->count;
in_sg = nx_build_sg_list(in_sg,
(u8 *) data,
- to_process - sctx->count,
+ &data_len,
max_sg_len);
+
+ if (data_len != to_process - sctx->count)
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
@@ -263,6 +298,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
int rc = 0;
+ int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -285,11 +321,20 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ len = sctx->count;
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
- sctx->count, nx_ctx->ap->sglen);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
+ &len, nx_ctx->ap->sglen);
+
+ if (len != sctx->count)
+ return -EINVAL;
+
+ len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen);
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index da0b24a7633f..23621da624c3 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -23,6 +23,7 @@
#include <crypto/sha.h>
#include <linux/module.h>
#include <asm/vio.h>
+#include <asm/byteorder.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -32,7 +33,8 @@ static int nx_sha256_init(struct shash_desc *desc)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
+ int len;
+ int rc;
nx_ctx_init(nx_ctx, HCOP_FC_SHA);
@@ -41,10 +43,28 @@ static int nx_sha256_init(struct shash_desc *desc)
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- SHA256_DIGEST_SIZE, nx_ctx->ap->sglen);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ len = SHA256_DIGEST_SIZE;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+ &nx_ctx->op.outlen,
+ &len,
+ (u8 *) sctx->state,
+ NX_DS_SHA256);
+
+ if (rc)
+ goto out;
+
+ sctx->state[0] = __cpu_to_be32(SHA256_H0);
+ sctx->state[1] = __cpu_to_be32(SHA256_H1);
+ sctx->state[2] = __cpu_to_be32(SHA256_H2);
+ sctx->state[3] = __cpu_to_be32(SHA256_H3);
+ sctx->state[4] = __cpu_to_be32(SHA256_H4);
+ sctx->state[5] = __cpu_to_be32(SHA256_H5);
+ sctx->state[6] = __cpu_to_be32(SHA256_H6);
+ sctx->state[7] = __cpu_to_be32(SHA256_H7);
+ sctx->count = 0;
+
+out:
return 0;
}
@@ -54,11 +74,11 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
- u64 to_process, leftover, total;
- u32 max_sg_len;
+ u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
+ int data_len;
+ u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -66,16 +86,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
* 1: < SHA256_BLOCK_SIZE: copy into state, return 0
* 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
*/
- total = sctx->count + len;
+ total = (sctx->count % SHA256_BLOCK_SIZE) + len;
if (total < SHA256_BLOCK_SIZE) {
- memcpy(sctx->buf + sctx->count, data, len);
+ memcpy(sctx->buf + buf_len, data, len);
sctx->count += len;
goto out;
}
- in_sg = nx_ctx->in_sg;
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
+ memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
do {
/*
@@ -83,34 +103,42 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
* this update. This value is also restricted by the sg list
* limits.
*/
- to_process = min_t(u64, total, nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = total - to_process;
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
- leftover = total - to_process;
- if (sctx->count) {
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buf,
- sctx->count, max_sg_len);
+ if (buf_len) {
+ data_len = buf_len;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &data_len,
+ (u8 *) sctx->buf,
+ NX_DS_SHA256);
+
+ if (rc || data_len != buf_len)
+ goto out;
}
- in_sg = nx_build_sg_list(in_sg, (u8 *) data,
- to_process - sctx->count,
- max_sg_len);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
- sizeof(struct nx_sg);
-
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /*
- * we've hit the nx chip previously and we're updating
- * again, so copy over the partial digest.
- */
- memcpy(csbcpb->cpb.sha256.input_partial_digest,
+
+ data_len = to_process - buf_len;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &data_len,
+ (u8 *) data,
+ NX_DS_SHA256);
+
+ if (rc)
+ goto out;
+
+ to_process = (data_len + buf_len);
+ leftover = total - to_process;
+
+ /*
+ * we've hit the nx chip previously and we're updating
+ * again, so copy over the partial digest.
+ */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest,
csbcpb->cpb.sha256.message_digest,
SHA256_DIGEST_SIZE);
- }
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -122,22 +150,19 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
goto out;
atomic_inc(&(nx_ctx->stats->sha256_ops));
- csbcpb->cpb.sha256.message_bit_length += (u64)
- (csbcpb->cpb.sha256.spbc * 8);
-
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
total -= to_process;
- data += to_process - sctx->count;
- sctx->count = 0;
- in_sg = nx_ctx->in_sg;
+ data += to_process - buf_len;
+ buf_len = 0;
+
} while (leftover >= SHA256_BLOCK_SIZE);
/* copy the leftover back into the state struct */
if (leftover)
memcpy(sctx->buf, data, leftover);
- sctx->count = leftover;
+
+ sctx->count += len;
+ memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
@@ -148,34 +173,46 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg, *out_sg;
- u32 max_sg_len;
unsigned long irq_flags;
int rc;
+ int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
-
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ if (sctx->count >= SHA256_BLOCK_SIZE) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
- memcpy(csbcpb->cpb.sha256.input_partial_digest,
- csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ } else {
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
}
- /* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
- csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
+ len = sctx->count & (SHA256_BLOCK_SIZE - 1);
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &len,
+ (u8 *) sctx->buf,
+ NX_DS_SHA256);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
- sctx->count, max_sg_len);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
- max_sg_len);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
+ goto out;
+
+ len = SHA256_DIGEST_SIZE;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+ &nx_ctx->op.outlen,
+ &len,
+ out,
+ NX_DS_SHA256);
+
+ if (rc || len != SHA256_DIGEST_SIZE)
+ goto out;
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
@@ -189,8 +226,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
atomic_inc(&(nx_ctx->stats->sha256_ops));
- atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
- &(nx_ctx->stats->sha256_bytes));
+ atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
@@ -200,62 +236,18 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct sha256_state *octx = out;
- unsigned long irq_flags;
-
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- octx->count = sctx->count +
- (csbcpb->cpb.sha256.message_bit_length / 8);
- memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
-
- /* if no data has been processed yet, we need to export SHA256's
- * initial data, in case this context gets imported into a software
- * context */
- if (csbcpb->cpb.sha256.message_bit_length)
- memcpy(octx->state, csbcpb->cpb.sha256.message_digest,
- SHA256_DIGEST_SIZE);
- else {
- octx->state[0] = SHA256_H0;
- octx->state[1] = SHA256_H1;
- octx->state[2] = SHA256_H2;
- octx->state[3] = SHA256_H3;
- octx->state[4] = SHA256_H4;
- octx->state[5] = SHA256_H5;
- octx->state[6] = SHA256_H6;
- octx->state[7] = SHA256_H7;
- }
+ memcpy(out, sctx, sizeof(*sctx));
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- const struct sha256_state *ictx = in;
- unsigned long irq_flags;
-
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ memcpy(sctx, in, sizeof(*sctx));
- sctx->count = ictx->count & 0x3f;
- csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8;
-
- if (csbcpb->cpb.sha256.message_bit_length) {
- memcpy(csbcpb->cpb.sha256.message_digest, ictx->state,
- SHA256_DIGEST_SIZE);
-
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- }
-
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 4ae5b0f221d5..b3adf1022673 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -32,7 +32,8 @@ static int nx_sha512_init(struct shash_desc *desc)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
+ int len;
+ int rc;
nx_ctx_init(nx_ctx, HCOP_FC_SHA);
@@ -41,10 +42,28 @@ static int nx_sha512_init(struct shash_desc *desc)
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- SHA512_DIGEST_SIZE, nx_ctx->ap->sglen);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ len = SHA512_DIGEST_SIZE;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+ &nx_ctx->op.outlen,
+ &len,
+ (u8 *)sctx->state,
+ NX_DS_SHA512);
+
+ if (rc || len != SHA512_DIGEST_SIZE)
+ goto out;
+
+ sctx->state[0] = __cpu_to_be64(SHA512_H0);
+ sctx->state[1] = __cpu_to_be64(SHA512_H1);
+ sctx->state[2] = __cpu_to_be64(SHA512_H2);
+ sctx->state[3] = __cpu_to_be64(SHA512_H3);
+ sctx->state[4] = __cpu_to_be64(SHA512_H4);
+ sctx->state[5] = __cpu_to_be64(SHA512_H5);
+ sctx->state[6] = __cpu_to_be64(SHA512_H6);
+ sctx->state[7] = __cpu_to_be64(SHA512_H7);
+ sctx->count[0] = 0;
+
+out:
return 0;
}
@@ -54,11 +73,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
- u64 to_process, leftover, total, spbc_bits;
- u32 max_sg_len;
+ u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
+ int data_len;
+ u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -66,16 +85,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
* 1: < SHA512_BLOCK_SIZE: copy into state, return 0
* 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
*/
- total = sctx->count[0] + len;
+ total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
if (total < SHA512_BLOCK_SIZE) {
- memcpy(sctx->buf + sctx->count[0], data, len);
+ memcpy(sctx->buf + buf_len, data, len);
sctx->count[0] += len;
goto out;
}
- in_sg = nx_ctx->in_sg;
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
+ memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
do {
/*
@@ -83,34 +102,43 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
* this update. This value is also restricted by the sg list
* limits.
*/
- to_process = min_t(u64, total, nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = total - leftover;
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
leftover = total - to_process;
- if (sctx->count[0]) {
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buf,
- sctx->count[0], max_sg_len);
+ if (buf_len) {
+ data_len = buf_len;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &data_len,
+ (u8 *) sctx->buf,
+ NX_DS_SHA512);
+
+ if (rc || data_len != buf_len)
+ goto out;
}
- in_sg = nx_build_sg_list(in_sg, (u8 *) data,
- to_process - sctx->count[0],
- max_sg_len);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
- sizeof(struct nx_sg);
-
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /*
- * we've hit the nx chip previously and we're updating
- * again, so copy over the partial digest.
- */
- memcpy(csbcpb->cpb.sha512.input_partial_digest,
+
+ data_len = to_process - buf_len;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &data_len,
+ (u8 *) data,
+ NX_DS_SHA512);
+
+ if (rc || data_len != (to_process - buf_len))
+ goto out;
+
+ to_process = (data_len + buf_len);
+ leftover = total - to_process;
+
+ /*
+ * we've hit the nx chip previously and we're updating
+ * again, so copy over the partial digest.
+ */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest,
csbcpb->cpb.sha512.message_digest,
SHA512_DIGEST_SIZE);
- }
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -122,24 +150,18 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- spbc_bits = csbcpb->cpb.sha512.spbc * 8;
- csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
- if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
- csbcpb->cpb.sha512.message_bit_length_hi++;
-
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
total -= to_process;
- data += to_process - sctx->count[0];
- sctx->count[0] = 0;
- in_sg = nx_ctx->in_sg;
+ data += to_process - buf_len;
+ buf_len = 0;
+
} while (leftover >= SHA512_BLOCK_SIZE);
/* copy the leftover back into the state struct */
if (leftover)
memcpy(sctx->buf, data, leftover);
- sctx->count[0] = leftover;
+ sctx->count[0] += len;
+ memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
@@ -150,39 +172,52 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg, *out_sg;
- u32 max_sg_len;
u64 count0;
unsigned long irq_flags;
int rc;
+ int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
-
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
- memcpy(csbcpb->cpb.sha512.input_partial_digest,
- csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
+ SHA512_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ } else {
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
}
- /* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
count0 = sctx->count[0] * 8;
- csbcpb->cpb.sha512.message_bit_length_lo += count0;
- if (csbcpb->cpb.sha512.message_bit_length_lo < count0)
- csbcpb->cpb.sha512.message_bit_length_hi++;
+ csbcpb->cpb.sha512.message_bit_length_lo = count0;
- in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
- max_sg_len);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
- max_sg_len);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &len,
+ (u8 *)sctx->buf,
+ NX_DS_SHA512);
+
+ if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
+ goto out;
+
+ len = SHA512_DIGEST_SIZE;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+ &nx_ctx->op.outlen,
+ &len,
+ out,
+ NX_DS_SHA512);
+
+ if (rc)
+ goto out;
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
@@ -195,8 +230,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
- &(nx_ctx->stats->sha512_bytes));
+ atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
@@ -207,74 +241,18 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct sha512_state *octx = out;
- unsigned long irq_flags;
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ memcpy(out, sctx, sizeof(*sctx));
- /* move message_bit_length (128 bits) into count and convert its value
- * to bytes */
- octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 |
- ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61);
- octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3;
-
- octx->count[0] += sctx->count[0];
- if (octx->count[0] < sctx->count[0])
- octx->count[1]++;
-
- memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
-
- /* if no data has been processed yet, we need to export SHA512's
- * initial data, in case this context gets imported into a software
- * context */
- if (csbcpb->cpb.sha512.message_bit_length_hi ||
- csbcpb->cpb.sha512.message_bit_length_lo)
- memcpy(octx->state, csbcpb->cpb.sha512.message_digest,
- SHA512_DIGEST_SIZE);
- else {
- octx->state[0] = SHA512_H0;
- octx->state[1] = SHA512_H1;
- octx->state[2] = SHA512_H2;
- octx->state[3] = SHA512_H3;
- octx->state[4] = SHA512_H4;
- octx->state[5] = SHA512_H5;
- octx->state[6] = SHA512_H6;
- octx->state[7] = SHA512_H7;
- }
-
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- const struct sha512_state *ictx = in;
- unsigned long irq_flags;
-
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
-
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
- sctx->count[0] = ictx->count[0] & 0x3f;
- csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f)
- << 3;
- csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 |
- ictx->count[0] >> 61;
-
- if (csbcpb->cpb.sha512.message_bit_length_hi ||
- csbcpb->cpb.sha512.message_bit_length_lo) {
- memcpy(csbcpb->cpb.sha512.message_digest, ictx->state,
- SHA512_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- }
+ memcpy(sctx, in, sizeof(*sctx));
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 5533fe31c90d..a392465d3e3f 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -90,7 +90,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
*/
struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
u8 *start_addr,
- unsigned int len,
+ unsigned int *len,
u32 sgmax)
{
unsigned int sg_len = 0;
@@ -106,7 +106,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
else
sg_addr = __pa(sg_addr);
- end_addr = sg_addr + len;
+ end_addr = sg_addr + *len;
/* each iteration will write one struct nx_sg element and add the
* length of data described by that element to sg_len. Once @len bytes
@@ -118,7 +118,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
* Also when using vmalloc'ed data, every time that a system page
* boundary is crossed the physical address needs to be re-calculated.
*/
- for (sg = sg_head; sg_len < len; sg++) {
+ for (sg = sg_head; sg_len < *len; sg++) {
u64 next_page;
sg->addr = sg_addr;
@@ -133,15 +133,17 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
is_vmalloc_addr(start_addr + sg_len)) {
sg_addr = page_to_phys(vmalloc_to_page(
start_addr + sg_len));
- end_addr = sg_addr + len - sg_len;
+ end_addr = sg_addr + *len - sg_len;
}
if ((sg - sg_head) == sgmax) {
pr_err("nx: scatter/gather list overflow, pid: %d\n",
current->pid);
- return NULL;
+ sg++;
+ break;
}
}
+ *len = sg_len;
/* return the moved sg_head pointer */
return sg;
@@ -160,11 +162,11 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
unsigned int sglen,
struct scatterlist *sg_src,
unsigned int start,
- unsigned int src_len)
+ unsigned int *src_len)
{
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_dst;
- unsigned int n, offset = 0, len = src_len;
+ unsigned int n, offset = 0, len = *src_len;
char *dst;
/* we need to fast forward through @start bytes first */
@@ -182,27 +184,101 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
* element we're currently looking at */
scatterwalk_advance(&walk, start - offset);
- while (len && nx_sg) {
+ while (len && (nx_sg - nx_dst) < sglen) {
n = scatterwalk_clamp(&walk, len);
if (!n) {
- scatterwalk_start(&walk, sg_next(walk.sg));
+ /* In cases where we have scatterlist chain scatterwalk_sg_next
+ * handles with it properly */
+ scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
dst = scatterwalk_map(&walk);
- nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen);
+ nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
len -= n;
scatterwalk_unmap(dst);
scatterwalk_advance(&walk, n);
scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
}
+ /* update to_process */
+ *src_len -= len;
/* return the moved destination pointer */
return nx_sg;
}
/**
+ * trim_sg_list - ensures the bound in sg list.
+ * @sg: sg list head
+ * @end: sg lisg end
+ * @delta: is the amount we need to crop in order to bound the list.
+ *
+ */
+static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
+{
+ while (delta && end > sg) {
+ struct nx_sg *last = end - 1;
+
+ if (last->len > delta) {
+ last->len -= delta;
+ delta = 0;
+ } else {
+ end--;
+ delta -= last->len;
+ }
+ }
+ return (sg - end) * sizeof(struct nx_sg);
+}
+
+/**
+ * nx_sha_build_sg_list - walk and build sg list to sha modes
+ * using right bounds and limits.
+ * @nx_ctx: NX crypto context for the lists we're building
+ * @nx_sg: current sg list in or out list
+ * @op_len: current op_len to be used in order to build a sg list
+ * @nbytes: number or bytes to be processed
+ * @offset: buf offset
+ * @mode: SHA256 or SHA512
+ */
+int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
+ struct nx_sg *nx_in_outsg,
+ s64 *op_len,
+ unsigned int *nbytes,
+ u8 *offset,
+ u32 mode)
+{
+ unsigned int delta = 0;
+ unsigned int total = *nbytes;
+ struct nx_sg *nx_insg = nx_in_outsg;
+ unsigned int max_sg_len;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+ nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
+
+ switch (mode) {
+ case NX_DS_SHA256:
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
+ break;
+ case NX_DS_SHA512:
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
+ break;
+ default:
+ return -EINVAL;
+ }
+ *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
+
+ return 0;
+}
+
+/**
* nx_build_sg_lists - walk the input scatterlists and build arrays of NX
* scatterlists based on them.
*
@@ -223,26 +299,39 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
- unsigned int nbytes,
+ unsigned int *nbytes,
unsigned int offset,
u8 *iv)
{
+ unsigned int delta = 0;
+ unsigned int total = *nbytes;
struct nx_sg *nx_insg = nx_ctx->in_sg;
struct nx_sg *nx_outsg = nx_ctx->out_sg;
+ unsigned int max_sg_len;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
if (iv)
memcpy(iv, desc->info, AES_BLOCK_SIZE);
- nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src,
- offset, nbytes);
- nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst,
- offset, nbytes);
+ *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+
+ nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
+ offset, nbytes);
+ nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
+ offset, nbytes);
+
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
/* these lengths should be negative, which will indicate to phyp that
* the input and output parameters are scatterlists, not linear
* buffers */
- nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
- nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
+ nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
+ nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
return 0;
}
@@ -540,10 +629,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
/* we need an extra page for csbcpb_aead for these modes */
if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
- nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
+ nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
sizeof(struct nx_csbcpb);
else
- nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) +
+ nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
sizeof(struct nx_csbcpb);
nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index befda07ca1da..6c9ecaaead52 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -153,13 +153,15 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
-struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
+int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
+ s64 *, unsigned int *, u8 *, u32);
+struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
- struct scatterlist *, struct scatterlist *, unsigned int,
+ struct scatterlist *, struct scatterlist *, unsigned int *,
unsigned int, u8 *);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int,
- unsigned int);
+ unsigned int *);
#ifdef CONFIG_DEBUG_FS
#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index cb98fa54573d..f79dd410dede 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1314,7 +1314,6 @@ static struct platform_driver omap_aes_driver = {
.remove = omap_aes_remove,
.driver = {
.name = "omap-aes",
- .owner = THIS_MODULE,
.pm = &omap_aes_pm_ops,
.of_match_table = omap_aes_of_match,
},
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index b8bc84be8741..e350f5be4d2e 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1222,7 +1222,6 @@ static struct platform_driver omap_des_driver = {
.remove = omap_des_remove,
.driver = {
.name = "omap-des",
- .owner = THIS_MODULE,
.pm = &omap_des_pm_ops,
.of_match_table = of_match_ptr(omap_des_of_match),
},
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 24ef48965e45..3c76696ee578 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2029,7 +2029,6 @@ static struct platform_driver omap_sham_driver = {
.remove = omap_sham_remove,
.driver = {
.name = "omap-sham",
- .owner = THIS_MODULE,
.pm = &omap_sham_pm_ops,
.of_match_table = omap_sham_of_match,
},
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 633ba945e153..c178ed8c3908 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index bace885634f2..95f7d27ce491 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("sha1-padlock");
-MODULE_ALIAS("sha256-padlock");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-padlock");
+MODULE_ALIAS_CRYPTO("sha256-padlock");
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index fe7b3f06f6e6..2ed425664a16 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -56,8 +56,6 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
-#define ADF_DH895XCC_PMISC_BAR 1
-#define ADF_DH895XCC_ETR_BAR 2
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index c29d4c3926bf..10ce4a2854ab 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -90,7 +90,7 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
uint16_t ppdstat = 0, bridge_ctl = 0;
int pending = 0;
- pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id);
+ pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
if (pending) {
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 244d73378f0e..7ee93f881db6 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -52,6 +52,7 @@
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
+#include <linux/crypto.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@@ -487,4 +488,4 @@ module_exit(adf_unregister_ctl_device_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_ALIAS("intel_qat");
+MODULE_ALIAS_CRYPTO("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index ae71555c0868..4a0a829d4500 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -129,12 +129,13 @@ struct adf_accel_dev *adf_devmgr_get_first(void)
* Function returns acceleration device associated with the given pci device.
* To be used by QAT device specific drivers.
*
- * Return: pinter to accel_dev or NULL if not found.
+ * Return: pointer to accel_dev or NULL if not found.
*/
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
{
struct list_head *itr;
+ mutex_lock(&table_lock);
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
@@ -144,6 +145,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
return ptr;
}
}
+ mutex_unlock(&table_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
@@ -152,6 +154,7 @@ struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
{
struct list_head *itr;
+ mutex_lock(&table_lock);
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
@@ -161,6 +164,7 @@ struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
return ptr;
}
}
+ mutex_unlock(&table_lock);
return NULL;
}
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 9dd2cb72a4e8..7dd54aaee9fa 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -376,8 +376,9 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
return 0;
}
-static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
- const char *section, uint32_t bank_num_in_accel)
+static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
+ const char *section,
+ uint32_t bank_num_in_accel)
{
if (adf_get_cfg_int(bank->accel_dev, section,
ADF_ETRMGR_COALESCE_TIMER_FORMAT,
@@ -396,7 +397,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_etr_ring_data *ring;
struct adf_etr_ring_data *tx_ring;
- uint32_t i, coalesc_enabled;
+ uint32_t i, coalesc_enabled = 0;
memset(bank, 0, sizeof(*bank));
bank->bank_number = bank_num;
@@ -407,10 +408,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
/* Enable IRQ coalescing always. This will allow to use
* the optimised flag and coalesc register.
* If it is disabled in the config file just use min time value */
- if (adf_get_cfg_int(accel_dev, "Accelerator0",
- ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
- bank_num, &coalesc_enabled) && coalesc_enabled)
- adf_enable_coalesc(bank, "Accelerator0", bank_num);
+ if ((adf_get_cfg_int(accel_dev, "Accelerator0",
+ ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
+ &coalesc_enabled) == 0) && coalesc_enabled)
+ adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
else
bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 91d88d676580..160c9a36c919 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -83,14 +83,14 @@
#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-/* Valid internal msg size values internal */
+/* Valid internal msg size values */
#define ADF_MSG_SIZE_32 0x01
#define ADF_MSG_SIZE_64 0x02
#define ADF_MSG_SIZE_128 0x04
#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
-/* Size to bytes conversion macros for ring and msg values */
+/* Size to bytes conversion macros for ring and msg size values */
#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
@@ -100,8 +100,11 @@
#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
ADF_RING_SIZE_4K : SIZE)
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
- ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
#define BUILD_RING_CONFIG(size) \
((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
| (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 9e9619cd4a79..19eea1c832ac 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -161,7 +161,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
__be64 *hash512_state_out;
int i, offset;
- memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
+ memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
shash->tfm = ctx->hash_tfm;
shash->flags = 0x0;
@@ -174,13 +174,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
memcpy(ipad, buff, digest_size);
memcpy(opad, buff, digest_size);
- memset(ipad + digest_size, 0, block_size - digest_size);
- memset(opad + digest_size, 0, block_size - digest_size);
+ memzero_explicit(ipad + digest_size, block_size - digest_size);
+ memzero_explicit(opad + digest_size, block_size - digest_size);
} else {
memcpy(ipad, auth_key, auth_keylen);
memcpy(opad, auth_key, auth_keylen);
- memset(ipad + auth_keylen, 0, block_size - auth_keylen);
- memset(opad + auth_keylen, 0, block_size - auth_keylen);
+ memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
+ memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
}
for (i = 0; i < block_size; i++) {
@@ -254,6 +254,8 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
default:
return -EFAULT;
}
+ memzero_explicit(ipad, block_size);
+ memzero_explicit(opad, block_size);
return 0;
}
@@ -466,7 +468,6 @@ static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
break;
default:
goto bad_key;
- break;
}
if (qat_alg_init_enc_session(ctx, alg, &keys))
@@ -493,12 +494,12 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
if (ctx->enc_cd) {
/* rekeying */
dev = &GET_DEV(ctx->inst->accel_dev);
- memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
- memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
- memset(&ctx->enc_fw_req_tmpl, 0,
- sizeof(struct icp_qat_fw_la_bulk_req));
- memset(&ctx->dec_fw_req_tmpl, 0,
- sizeof(struct icp_qat_fw_la_bulk_req));
+ memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
+ memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
+ memzero_explicit(&ctx->enc_fw_req_tmpl,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ memzero_explicit(&ctx->dec_fw_req_tmpl,
+ sizeof(struct icp_qat_fw_la_bulk_req));
} else {
/* new key */
int node = get_current_node();
@@ -535,10 +536,12 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
return 0;
out_free_all:
+ memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
+ memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
@@ -836,7 +839,7 @@ static int qat_alg_init(struct crypto_tfm *tfm,
{
struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
- memset(ctx, '\0', sizeof(*ctx));
+ memzero_explicit(ctx, sizeof(*ctx));
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(ctx->hash_tfm))
return -EFAULT;
@@ -876,12 +879,16 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
return;
dev = &GET_DEV(inst->accel_dev);
- if (ctx->enc_cd)
+ if (ctx->enc_cd) {
+ memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
- if (ctx->dec_cd)
+ }
+ if (ctx->dec_cd) {
+ memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
+ }
qat_crypto_put_instance(inst);
}
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 9b8a31521ff3..b818c19713bf 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -679,7 +679,8 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
struct icp_qat_fw_loader_handle *handle;
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
+ struct adf_bar *bar =
+ &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 65dd1ff93d3b..01e0be21e93a 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -48,6 +48,8 @@
#define ADF_DH895x_HW_DATA_H_
/* PCIe configuration space */
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
#define ADF_DH895XCC_RX_RINGS_OFFSET 8
#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d96ee21b9b77..fe8f89697ad8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -186,10 +186,8 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
accel_dev->accel_pci_dev.msix_entries.names = names;
return 0;
err:
- for (i = 0; i < msix_num_entries; i++) {
- if (*(names + i))
- kfree(*(names + i));
- }
+ for (i = 0; i < msix_num_entries; i++)
+ kfree(*(names + i));
kfree(entries);
kfree(names);
return -ENOMEM;
@@ -203,10 +201,8 @@ static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
int i;
kfree(accel_dev->accel_pci_dev.msix_entries.entries);
- for (i = 0; i < msix_num_entries; i++) {
- if (*(names + i))
- kfree(*(names + i));
- }
+ for (i = 0; i < msix_num_entries; i++)
+ kfree(*(names + i));
kfree(names);
}
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 33ae3545dc48..718b32a3112e 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -273,7 +273,6 @@ static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
.remove = qce_crypto_remove,
.driver = {
- .owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
},
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 4197ad9a711b..f214a8755827 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -745,7 +745,6 @@ static struct platform_driver s5p_aes_crypto = {
.probe = s5p_aes_probe,
.remove = s5p_aes_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "s5p-secss",
.of_match_table = s5p_sss_dt_match,
},
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 164e1ec624e3..220b92f7eabc 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -3,6 +3,7 @@
*
* Support for SAHARA cryptographic accelerator.
*
+ * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
* Copyright (c) 2013 Vista Silicon S.L.
* Author: Javier Martin <javier.martin@vista-silicon.com>
*
@@ -15,6 +16,10 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -22,12 +27,19 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#define SHA_BUFFER_LEN PAGE_SIZE
+#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
#define SAHARA_NAME "sahara"
#define SAHARA_VERSION_3 3
+#define SAHARA_VERSION_4 4
#define SAHARA_TIMEOUT_MS 1000
#define SAHARA_MAX_HW_DESC 2
#define SAHARA_MAX_HW_LINK 20
@@ -36,7 +48,6 @@
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
#define FLAGS_NEW_KEY BIT(3)
-#define FLAGS_BUSY 4
#define SAHARA_HDR_BASE 0x00800000
#define SAHARA_HDR_SKHA_ALG_AES 0
@@ -50,6 +61,23 @@
#define SAHARA_HDR_CHA_MDHA (2 << 28)
#define SAHARA_HDR_PARITY_BIT (1 << 31)
+#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
+#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
+#define SAHARA_HDR_MDHA_HASH 0xA0850000
+#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
+#define SAHARA_HDR_MDHA_ALG_SHA1 0
+#define SAHARA_HDR_MDHA_ALG_MD5 1
+#define SAHARA_HDR_MDHA_ALG_SHA256 2
+#define SAHARA_HDR_MDHA_ALG_SHA224 3
+#define SAHARA_HDR_MDHA_PDATA (1 << 2)
+#define SAHARA_HDR_MDHA_HMAC (1 << 3)
+#define SAHARA_HDR_MDHA_INIT (1 << 5)
+#define SAHARA_HDR_MDHA_IPAD (1 << 6)
+#define SAHARA_HDR_MDHA_OPAD (1 << 7)
+#define SAHARA_HDR_MDHA_SWAP (1 << 8)
+#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
+#define SAHARA_HDR_MDHA_SSL (1 << 10)
+
/* SAHARA can only process one request at a time */
#define SAHARA_QUEUE_LENGTH 1
@@ -117,31 +145,74 @@ struct sahara_hw_link {
};
struct sahara_ctx {
- struct sahara_dev *dev;
unsigned long flags;
+
+ /* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
struct crypto_ablkcipher *fallback;
+
+ /* SHA-specific context */
+ struct crypto_shash *shash_fallback;
};
struct sahara_aes_reqctx {
unsigned long mode;
};
+/*
+ * struct sahara_sha_reqctx - private data per request
+ * @buf: holds data for requests smaller than block_size
+ * @rembuf: used to prepare one block_size-aligned request
+ * @context: hw-specific context for request. Digest is extracted from this
+ * @mode: specifies what type of hw-descriptor needs to be built
+ * @digest_size: length of digest for this request
+ * @context_size: length of hw-context for this request.
+ * Always digest_size + 4
+ * @buf_cnt: number of bytes saved in buf
+ * @sg_in_idx: number of hw links
+ * @in_sg: scatterlist for input data
+ * @in_sg_chain: scatterlists for chained input data
+ * @in_sg_chained: specifies if chained scatterlists are used or not
+ * @total: total number of bytes for transfer
+ * @last: is this the last block
+ * @first: is this the first block
+ * @active: inside a transfer
+ */
+struct sahara_sha_reqctx {
+ u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
+ u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
+ u8 context[SHA256_DIGEST_SIZE + 4];
+ struct mutex mutex;
+ unsigned int mode;
+ unsigned int digest_size;
+ unsigned int context_size;
+ unsigned int buf_cnt;
+ unsigned int sg_in_idx;
+ struct scatterlist *in_sg;
+ struct scatterlist in_sg_chain[2];
+ bool in_sg_chained;
+ size_t total;
+ unsigned int last;
+ unsigned int first;
+ unsigned int active;
+};
+
struct sahara_dev {
struct device *device;
+ unsigned int version;
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
+ struct mutex queue_mutex;
+ struct task_struct *kthread;
+ struct completion dma_completion;
struct sahara_ctx *ctx;
spinlock_t lock;
struct crypto_queue queue;
unsigned long flags;
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
-
struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
@@ -151,10 +222,12 @@ struct sahara_dev {
u8 *iv_base;
dma_addr_t iv_phys_base;
+ u8 *context_base;
+ dma_addr_t context_phys_base;
+
struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
- struct ablkcipher_request *req;
size_t total;
struct scatterlist *in_sg;
unsigned int nb_in_sg;
@@ -162,7 +235,6 @@ struct sahara_dev {
unsigned int nb_out_sg;
u32 error;
- struct timer_list watchdog;
};
static struct sahara_dev *dev_ptr;
@@ -401,34 +473,6 @@ static void sahara_dump_links(struct sahara_dev *dev)
dev_dbg(dev->device, "\n");
}
-static void sahara_aes_done_task(unsigned long data)
-{
- struct sahara_dev *dev = (struct sahara_dev *)data;
-
- dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
- DMA_FROM_DEVICE);
-
- spin_lock(&dev->lock);
- clear_bit(FLAGS_BUSY, &dev->flags);
- spin_unlock(&dev->lock);
-
- dev->req->base.complete(&dev->req->base, dev->error);
-}
-
-static void sahara_watchdog(unsigned long data)
-{
- struct sahara_dev *dev = (struct sahara_dev *)data;
- unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
- unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
-
- sahara_decode_status(dev, stat);
- sahara_decode_error(dev, err);
- dev->error = -ETIMEDOUT;
- sahara_aes_done_task(data);
-}
-
static int sahara_hw_descriptor_create(struct sahara_dev *dev)
{
struct sahara_ctx *ctx = dev->ctx;
@@ -512,9 +556,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
sahara_dump_descriptors(dev);
sahara_dump_links(dev);
- /* Start processing descriptor chain. */
- mod_timer(&dev->watchdog,
- jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
return 0;
@@ -529,37 +570,19 @@ unmap_in:
return -EINVAL;
}
-static void sahara_aes_queue_task(unsigned long data)
+static int sahara_aes_process(struct ablkcipher_request *req)
{
- struct sahara_dev *dev = (struct sahara_dev *)data;
- struct crypto_async_request *async_req, *backlog;
+ struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx;
- struct ablkcipher_request *req;
int ret;
- spin_lock(&dev->lock);
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
- if (!async_req)
- clear_bit(FLAGS_BUSY, &dev->flags);
- spin_unlock(&dev->lock);
-
- if (!async_req)
- return;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ablkcipher_request_cast(async_req);
-
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
req->nbytes, req->src, req->dst);
/* assign new request to device */
- dev->req = req;
dev->total = req->nbytes;
dev->in_sg = req->src;
dev->out_sg = req->dst;
@@ -573,16 +596,25 @@ static void sahara_aes_queue_task(unsigned long data)
memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
/* assign new context to device */
- ctx->dev = dev;
dev->ctx = ctx;
+ reinit_completion(&dev->dma_completion);
+
ret = sahara_hw_descriptor_create(dev);
- if (ret < 0) {
- spin_lock(&dev->lock);
- clear_bit(FLAGS_BUSY, &dev->flags);
- spin_unlock(&dev->lock);
- dev->req->base.complete(&dev->req->base, ret);
+
+ ret = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev->device, "AES timeout\n");
+ return -ETIMEDOUT;
}
+
+ dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_FROM_DEVICE);
+
+ return 0;
}
static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -624,12 +656,9 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct sahara_dev *dev = dev_ptr;
int err = 0;
- int busy;
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
@@ -640,16 +669,13 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return -EINVAL;
}
- ctx->dev = dev;
-
rctx->mode = mode;
- spin_lock_bh(&dev->lock);
+
+ mutex_lock(&dev->queue_mutex);
err = ablkcipher_enqueue_request(&dev->queue, req);
- busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
- spin_unlock_bh(&dev->lock);
+ mutex_unlock(&dev->queue_mutex);
- if (!busy)
- tasklet_schedule(&dev->queue_task);
+ wake_up_process(dev->kthread);
return err;
}
@@ -752,6 +778,484 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
ctx->fallback = NULL;
}
+static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx)
+{
+ u32 hdr = 0;
+
+ hdr = rctx->mode;
+
+ if (rctx->first) {
+ hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
+ hdr |= SAHARA_HDR_MDHA_INIT;
+ } else {
+ hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
+ }
+
+ if (rctx->last)
+ hdr |= SAHARA_HDR_MDHA_PDATA;
+
+ if (hweight_long(hdr) % 2 == 0)
+ hdr |= SAHARA_HDR_PARITY_BIT;
+
+ return hdr;
+}
+
+static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx,
+ int start)
+{
+ struct scatterlist *sg;
+ unsigned int i;
+ int ret;
+
+ dev->in_sg = rctx->in_sg;
+
+ dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
+ if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
+ dev_err(dev->device, "not enough hw links (%d)\n",
+ dev->nb_in_sg + dev->nb_out_sg);
+ return -EINVAL;
+ }
+
+ if (rctx->in_sg_chained) {
+ i = start;
+ sg = dev->in_sg;
+ while (sg) {
+ ret = dma_map_sg(dev->device, sg, 1,
+ DMA_TO_DEVICE);
+ if (!ret)
+ return -EFAULT;
+
+ dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->p = sg->dma_address;
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ i += 1;
+ }
+ dev->hw_link[i-1]->next = 0;
+ } else {
+ sg = dev->in_sg;
+ ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+ if (!ret)
+ return -EFAULT;
+
+ for (i = start; i < dev->nb_in_sg + start; i++) {
+ dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg + start - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ }
+ }
+ }
+
+ return i;
+}
+
+static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx,
+ struct ahash_request *req,
+ int index)
+{
+ unsigned result_len;
+ int i = index;
+
+ if (rctx->first)
+ /* Create initial descriptor: #8*/
+ dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
+ else
+ /* Create hash descriptor: #10. Must follow #6. */
+ dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
+
+ dev->hw_desc[index]->len1 = rctx->total;
+ if (dev->hw_desc[index]->len1 == 0) {
+ /* if len1 is 0, p1 must be 0, too */
+ dev->hw_desc[index]->p1 = 0;
+ rctx->sg_in_idx = 0;
+ } else {
+ /* Create input links */
+ dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
+ i = sahara_sha_hw_links_create(dev, rctx, index);
+
+ rctx->sg_in_idx = index;
+ if (i < 0)
+ return i;
+ }
+
+ dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
+
+ /* Save the context for the next operation */
+ result_len = rctx->context_size;
+ dev->hw_link[i]->p = dev->context_phys_base;
+
+ dev->hw_link[i]->len = result_len;
+ dev->hw_desc[index]->len2 = result_len;
+
+ dev->hw_link[i]->next = 0;
+
+ return 0;
+}
+
+/*
+ * Load descriptor aka #6
+ *
+ * To load a previously saved context back to the MDHA unit
+ *
+ * p1: Saved Context
+ * p2: NULL
+ *
+ */
+static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx,
+ struct ahash_request *req,
+ int index)
+{
+ dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
+
+ dev->hw_desc[index]->len1 = rctx->context_size;
+ dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
+ dev->hw_desc[index]->len2 = 0;
+ dev->hw_desc[index]->p2 = 0;
+
+ dev->hw_link[index]->len = rctx->context_size;
+ dev->hw_link[index]->p = dev->context_phys_base;
+ dev->hw_link[index]->next = 0;
+
+ return 0;
+}
+
+static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
+{
+ if (!sg || !sg->length)
+ return nbytes;
+
+ while (nbytes && sg) {
+ if (nbytes <= sg->length) {
+ sg->length = nbytes;
+ sg_mark_end(sg);
+ break;
+ }
+ nbytes -= sg->length;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return nbytes;
+}
+
+static int sahara_sha_prepare_request(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+ unsigned int hash_later;
+ unsigned int block_size;
+ unsigned int len;
+
+ block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+ /* append bytes from previous operation */
+ len = rctx->buf_cnt + req->nbytes;
+
+ /* only the last transfer can be padded in hardware */
+ if (!rctx->last && (len < block_size)) {
+ /* to few data, save for next operation */
+ scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
+ 0, req->nbytes, 0);
+ rctx->buf_cnt += req->nbytes;
+
+ return 0;
+ }
+
+ /* add data from previous operation first */
+ if (rctx->buf_cnt)
+ memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
+
+ /* data must always be a multiple of block_size */
+ hash_later = rctx->last ? 0 : len & (block_size - 1);
+ if (hash_later) {
+ unsigned int offset = req->nbytes - hash_later;
+ /* Save remaining bytes for later use */
+ scatterwalk_map_and_copy(rctx->buf, req->src, offset,
+ hash_later, 0);
+ }
+
+ /* nbytes should now be multiple of blocksize */
+ req->nbytes = req->nbytes - hash_later;
+
+ sahara_walk_and_recalc(req->src, req->nbytes);
+
+ /* have data from previous operation and current */
+ if (rctx->buf_cnt && req->nbytes) {
+ sg_init_table(rctx->in_sg_chain, 2);
+ sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
+
+ scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
+
+ rctx->total = req->nbytes + rctx->buf_cnt;
+ rctx->in_sg = rctx->in_sg_chain;
+
+ rctx->in_sg_chained = true;
+ req->src = rctx->in_sg_chain;
+ /* only data from previous operation */
+ } else if (rctx->buf_cnt) {
+ if (req->src)
+ rctx->in_sg = req->src;
+ else
+ rctx->in_sg = rctx->in_sg_chain;
+ /* buf was copied into rembuf above */
+ sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
+ rctx->total = rctx->buf_cnt;
+ rctx->in_sg_chained = false;
+ /* no data from previous operation */
+ } else {
+ rctx->in_sg = req->src;
+ rctx->total = req->nbytes;
+ req->src = rctx->in_sg;
+ rctx->in_sg_chained = false;
+ }
+
+ /* on next call, we only have the remaining data in the buffer */
+ rctx->buf_cnt = hash_later;
+
+ return -EINPROGRESS;
+}
+
+static void sahara_sha_unmap_sg(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx)
+{
+ struct scatterlist *sg;
+
+ if (rctx->in_sg_chained) {
+ sg = dev->in_sg;
+ while (sg) {
+ dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
+ sg = sg_next(sg);
+ }
+ } else {
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+ }
+}
+
+static int sahara_sha_process(struct ahash_request *req)
+{
+ struct sahara_dev *dev = dev_ptr;
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+ int ret = -EINPROGRESS;
+
+ ret = sahara_sha_prepare_request(req);
+ if (!ret)
+ return ret;
+
+ if (rctx->first) {
+ sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ dev->hw_desc[0]->next = 0;
+ rctx->first = 0;
+ } else {
+ memcpy(dev->context_base, rctx->context, rctx->context_size);
+
+ sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
+ dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+ sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ dev->hw_desc[1]->next = 0;
+ }
+
+ sahara_dump_descriptors(dev);
+ sahara_dump_links(dev);
+
+ reinit_completion(&dev->dma_completion);
+
+ sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
+
+ ret = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev->device, "SHA timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (rctx->sg_in_idx)
+ sahara_sha_unmap_sg(dev, rctx);
+
+ memcpy(rctx->context, dev->context_base, rctx->context_size);
+
+ if (req->result)
+ memcpy(req->result, rctx->context, rctx->digest_size);
+
+ return 0;
+}
+
+static int sahara_queue_manage(void *data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+ struct crypto_async_request *async_req;
+ int ret = 0;
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ mutex_lock(&dev->queue_mutex);
+ async_req = crypto_dequeue_request(&dev->queue);
+ mutex_unlock(&dev->queue_mutex);
+
+ if (async_req) {
+ if (crypto_tfm_alg_type(async_req->tfm) ==
+ CRYPTO_ALG_TYPE_AHASH) {
+ struct ahash_request *req =
+ ahash_request_cast(async_req);
+
+ ret = sahara_sha_process(req);
+ } else {
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(async_req);
+
+ ret = sahara_aes_process(req);
+ }
+
+ async_req->complete(async_req, ret);
+
+ continue;
+ }
+
+ schedule();
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static int sahara_sha_enqueue(struct ahash_request *req, int last)
+{
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct sahara_dev *dev = dev_ptr;
+ int ret;
+
+ if (!req->nbytes && !last)
+ return 0;
+
+ mutex_lock(&rctx->mutex);
+ rctx->last = last;
+
+ if (!rctx->active) {
+ rctx->active = 1;
+ rctx->first = 1;
+ }
+
+ mutex_lock(&dev->queue_mutex);
+ ret = crypto_enqueue_request(&dev->queue, &req->base);
+ mutex_unlock(&dev->queue_mutex);
+
+ wake_up_process(dev->kthread);
+ mutex_unlock(&rctx->mutex);
+
+ return ret;
+}
+
+static int sahara_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+ memset(rctx, 0, sizeof(*rctx));
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
+ rctx->digest_size = SHA1_DIGEST_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
+ rctx->digest_size = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->context_size = rctx->digest_size + 4;
+ rctx->active = 0;
+
+ mutex_init(&rctx->mutex);
+
+ return 0;
+}
+
+static int sahara_sha_update(struct ahash_request *req)
+{
+ return sahara_sha_enqueue(req, 0);
+}
+
+static int sahara_sha_final(struct ahash_request *req)
+{
+ req->nbytes = 0;
+ return sahara_sha_enqueue(req, 1);
+}
+
+static int sahara_sha_finup(struct ahash_request *req)
+{
+ return sahara_sha_enqueue(req, 1);
+}
+
+static int sahara_sha_digest(struct ahash_request *req)
+{
+ sahara_sha_init(req);
+
+ return sahara_sha_finup(req);
+}
+
+static int sahara_sha_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(struct sahara_ctx));
+ memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
+ sizeof(struct sahara_sha_reqctx));
+
+ return 0;
+}
+
+static int sahara_sha_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(struct sahara_ctx));
+ memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
+ sizeof(struct sahara_sha_reqctx));
+
+ return 0;
+}
+
+static int sahara_sha_cra_init(struct crypto_tfm *tfm)
+{
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->shash_fallback = crypto_alloc_shash(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shash_fallback)) {
+ pr_err("Error allocating fallback algo %s\n", name);
+ return PTR_ERR(ctx->shash_fallback);
+ }
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sahara_sha_reqctx) +
+ SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+
+ return 0;
+}
+
+static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(ctx->shash_fallback);
+ ctx->shash_fallback = NULL;
+}
+
static struct crypto_alg aes_algs[] = {
{
.cra_name = "ecb(aes)",
@@ -797,14 +1301,66 @@ static struct crypto_alg aes_algs[] = {
}
};
+static struct ahash_alg sha_v3_algs[] = {
+{
+ .init = sahara_sha_init,
+ .update = sahara_sha_update,
+ .final = sahara_sha_final,
+ .finup = sahara_sha_finup,
+ .digest = sahara_sha_digest,
+ .export = sahara_sha_export,
+ .import = sahara_sha_import,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sahara-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_sha_cra_init,
+ .cra_exit = sahara_sha_cra_exit,
+ }
+},
+};
+
+static struct ahash_alg sha_v4_algs[] = {
+{
+ .init = sahara_sha_init,
+ .update = sahara_sha_update,
+ .final = sahara_sha_final,
+ .finup = sahara_sha_finup,
+ .digest = sahara_sha_digest,
+ .export = sahara_sha_export,
+ .import = sahara_sha_import,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sahara-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_sha_cra_init,
+ .cra_exit = sahara_sha_cra_exit,
+ }
+},
+};
+
static irqreturn_t sahara_irq_handler(int irq, void *data)
{
struct sahara_dev *dev = (struct sahara_dev *)data;
unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
- del_timer(&dev->watchdog);
-
sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
SAHARA_REG_CMD);
@@ -819,7 +1375,7 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
dev->error = -EINVAL;
}
- tasklet_schedule(&dev->done_task);
+ complete(&dev->dma_completion);
return IRQ_HANDLED;
}
@@ -827,7 +1383,8 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
static int sahara_register_algs(struct sahara_dev *dev)
{
- int err, i, j;
+ int err;
+ unsigned int i, j, k, l;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
INIT_LIST_HEAD(&aes_algs[i].cra_list);
@@ -836,8 +1393,29 @@ static int sahara_register_algs(struct sahara_dev *dev)
goto err_aes_algs;
}
+ for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
+ err = crypto_register_ahash(&sha_v3_algs[k]);
+ if (err)
+ goto err_sha_v3_algs;
+ }
+
+ if (dev->version > SAHARA_VERSION_3)
+ for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
+ err = crypto_register_ahash(&sha_v4_algs[l]);
+ if (err)
+ goto err_sha_v4_algs;
+ }
+
return 0;
+err_sha_v4_algs:
+ for (j = 0; j < l; j++)
+ crypto_unregister_ahash(&sha_v4_algs[j]);
+
+err_sha_v3_algs:
+ for (j = 0; j < k; j++)
+ crypto_unregister_ahash(&sha_v4_algs[j]);
+
err_aes_algs:
for (j = 0; j < i; j++)
crypto_unregister_alg(&aes_algs[j]);
@@ -847,10 +1425,17 @@ err_aes_algs:
static void sahara_unregister_algs(struct sahara_dev *dev)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
+
+ for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ crypto_unregister_ahash(&sha_v3_algs[i]);
+
+ if (dev->version > SAHARA_VERSION_3)
+ for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ crypto_unregister_ahash(&sha_v4_algs[i]);
}
static struct platform_device_id sahara_platform_ids[] = {
@@ -860,6 +1445,7 @@ static struct platform_device_id sahara_platform_ids[] = {
MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
static struct of_device_id sahara_dt_ids[] = {
+ { .compatible = "fsl,imx53-sahara" },
{ .compatible = "fsl,imx27-sahara" },
{ /* sentinel */ }
};
@@ -939,6 +1525,16 @@ static int sahara_probe(struct platform_device *pdev)
dev->iv_base = dev->key_base + AES_KEYSIZE_128;
dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
+ /* Allocate space for context: largest digest + message length field */
+ dev->context_base = dma_alloc_coherent(&pdev->dev,
+ SHA256_DIGEST_SIZE + 4,
+ &dev->context_phys_base, GFP_KERNEL);
+ if (!dev->context_base) {
+ dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
+ err = -ENOMEM;
+ goto err_key;
+ }
+
/* Allocate space for HW links */
dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
@@ -956,28 +1552,40 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
+ spin_lock_init(&dev->lock);
+ mutex_init(&dev->queue_mutex);
+
dev_ptr = dev;
- tasklet_init(&dev->queue_task, sahara_aes_queue_task,
- (unsigned long)dev);
- tasklet_init(&dev->done_task, sahara_aes_done_task,
- (unsigned long)dev);
+ dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
+ if (IS_ERR(dev->kthread)) {
+ err = PTR_ERR(dev->kthread);
+ goto err_link;
+ }
- init_timer(&dev->watchdog);
- dev->watchdog.function = &sahara_watchdog;
- dev->watchdog.data = (unsigned long)dev;
+ init_completion(&dev->dma_completion);
clk_prepare_enable(dev->clk_ipg);
clk_prepare_enable(dev->clk_ahb);
version = sahara_read(dev, SAHARA_REG_VERSION);
- if (version != SAHARA_VERSION_3) {
+ if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
+ if (version != SAHARA_VERSION_3)
+ err = -ENODEV;
+ } else if (of_device_is_compatible(pdev->dev.of_node,
+ "fsl,imx53-sahara")) {
+ if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
+ err = -ENODEV;
+ version = (version >> 8) & 0xff;
+ }
+ if (err == -ENODEV) {
dev_err(&pdev->dev, "SAHARA version %d not supported\n",
- version);
- err = -ENODEV;
+ version);
goto err_algs;
}
+ dev->version = version;
+
sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
SAHARA_REG_CMD);
sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
@@ -1000,11 +1608,15 @@ err_algs:
dev->hw_link[0], dev->hw_phys_link[0]);
clk_disable_unprepare(dev->clk_ipg);
clk_disable_unprepare(dev->clk_ahb);
+ kthread_stop(dev->kthread);
dev_ptr = NULL;
err_link:
dma_free_coherent(&pdev->dev,
2 * AES_KEYSIZE_128,
dev->key_base, dev->key_phys_base);
+ dma_free_coherent(&pdev->dev,
+ SHA256_DIGEST_SIZE,
+ dev->context_base, dev->context_phys_base);
err_key:
dma_free_coherent(&pdev->dev,
SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
@@ -1027,8 +1639,7 @@ static int sahara_remove(struct platform_device *pdev)
SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
dev->hw_desc[0], dev->hw_phys_desc[0]);
- tasklet_kill(&dev->done_task);
- tasklet_kill(&dev->queue_task);
+ kthread_stop(dev->kthread);
sahara_unregister_algs(dev);
@@ -1045,7 +1656,6 @@ static struct platform_driver sahara_driver = {
.remove = sahara_remove,
.driver = {
.name = SAHARA_NAME,
- .owner = THIS_MODULE,
.of_match_table = sahara_dt_ids,
},
.id_table = sahara_platform_ids,
@@ -1055,4 +1665,5 @@ module_platform_driver(sahara_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 624b8be0c365..067ec2193d71 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2811,7 +2811,6 @@ MODULE_DEVICE_TABLE(of, talitos_match);
static struct platform_driver talitos_driver = {
.driver = {
.name = "talitos",
- .owner = THIS_MODULE,
.of_match_table = talitos_match,
},
.probe = talitos_probe,
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 92105f3dc8e0..f831bb952b2f 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1688,6 +1688,7 @@ static void ux500_cryp_shutdown(struct platform_device *pdev)
}
+#ifdef CONFIG_PM_SLEEP
static int ux500_cryp_suspend(struct device *dev)
{
int ret;
@@ -1768,6 +1769,7 @@ static int ux500_cryp_resume(struct device *dev)
return ret;
}
+#endif
static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
@@ -1781,7 +1783,6 @@ static struct platform_driver cryp_driver = {
.remove = ux500_cryp_remove,
.shutdown = ux500_cryp_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "cryp1",
.of_match_table = ux500_cryp_match,
.pm = &ux500_cryp_pm,
@@ -1810,7 +1811,7 @@ module_exit(ux500_cryp_mod_fini);
module_param(cryp_mode, int, 0);
MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
-MODULE_ALIAS("aes-all");
-MODULE_ALIAS("des-all");
+MODULE_ALIAS_CRYPTO("aes-all");
+MODULE_ALIAS_CRYPTO("des-all");
MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 1c73f4fbc252..70a20871e998 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1881,6 +1881,7 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
__func__);
}
+#ifdef CONFIG_PM_SLEEP
/**
* ux500_hash_suspend - Function that suspends the hash device.
* @dev: Device to suspend.
@@ -1949,6 +1950,7 @@ static int ux500_hash_resume(struct device *dev)
return ret;
}
+#endif
static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
@@ -1962,7 +1964,6 @@ static struct platform_driver hash_driver = {
.remove = ux500_hash_remove,
.shutdown = ux500_hash_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "hash1",
.of_match_table = ux500_hash_match,
.pm = &ux500_hash_pm,
@@ -1995,7 +1996,7 @@ module_exit(ux500_hash_mod_fini);
MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("hmac-sha1-all");
-MODULE_ALIAS("hmac-sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("hmac-sha1-all");
+MODULE_ALIAS_CRYPTO("hmac-sha256-all");
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
index d9b08d3b6830..da9509205169 100644
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ b/drivers/devfreq/exynos/exynos4_bus.c
@@ -1034,7 +1034,6 @@ static struct platform_driver exynos4_busfreq_driver = {
.id_table = exynos4_busfreq_id,
.driver = {
.name = "exynos4-busfreq",
- .owner = THIS_MODULE,
.pm = &exynos4_busfreq_pm_ops,
},
};
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
index 6cd0392e2798..297ea30d4159 100644
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ b/drivers/devfreq/exynos/exynos5_bus.c
@@ -393,7 +393,6 @@ static struct platform_driver exynos5_busfreq_int_driver = {
.remove = exynos5_busfreq_int_remove,
.driver = {
.name = "exynos5-bus-int",
- .owner = THIS_MODULE,
.pm = &exynos5_busfreq_int_pm_ops,
},
};
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7bb9d65d9a2c..e5541117b3e9 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -283,7 +283,7 @@ EXPORT_SYMBOL(fence_add_callback);
* @cb: [in] the callback to remove
*
* Remove a previously queued callback from the fence. This function returns
- * true if the callback is succesfully removed, or false if the fence has
+ * true if the callback is successfully removed, or false if the fence has
* already been signaled.
*
* *WARNING*:
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index de469821bc1b..f2b2c4e87aef 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -107,6 +107,13 @@ config AT_HDMAC
help
Support the Atmel AHB DMA controller.
+config AT_XDMAC
+ tristate "Atmel XDMA support"
+ depends on ARCH_AT91
+ select DMA_ENGINE
+ help
+ Support the Atmel XDMA controller.
+
config FSL_DMA
tristate "Freescale Elo series DMA support"
depends on FSL_SOC
@@ -395,12 +402,12 @@ config XILINX_VDMA
config DMA_SUN6I
tristate "Allwinner A31 SoCs DMA support"
- depends on MACH_SUN6I || COMPILE_TEST
+ depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
depends on RESET_CONTROLLER
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support for the DMA engine for Allwinner A31 SoCs.
+ Support for the DMA engine first found in Allwinner A31 SoCs.
config NBPFAXI_DMA
tristate "Renesas Type-AXI NBPF DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index cb626c179911..2022b5451377 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
+obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE_BASE) += sh/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e34024b000a4..1364d00881dd 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2164,7 +2164,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
__func__, ret);
goto out_no_memcpy;
}
- pl08x->memcpy.chancnt = ret;
/* Register slave channels */
ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
@@ -2175,7 +2174,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
__func__, ret);
goto out_no_slave;
}
- pl08x->slave.chancnt = ret;
ret = dma_async_device_register(&pl08x->memcpy);
if (ret) {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
new file mode 100644
index 000000000000..b60d77a22df6
--- /dev/null
+++ b/drivers/dma/at_xdmac.c
@@ -0,0 +1,1524 @@
+/*
+ * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
+ *
+ * Copyright (C) 2014 Atmel Corporation
+ *
+ * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/barrier.h>
+#include <dt-bindings/dma/at91.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include "dmaengine.h"
+
+/* Global registers */
+#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
+#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
+#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
+#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
+#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
+#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
+#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
+#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
+#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
+#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
+#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
+#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
+#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
+#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
+#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
+#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
+#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
+#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
+#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
+#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
+#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
+
+/* Channel relative registers offsets */
+#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
+#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
+#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
+#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
+#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
+#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
+#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
+#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
+#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
+#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
+#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
+#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
+#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
+#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
+#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
+#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
+#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
+#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
+#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
+#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
+#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
+#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
+#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
+#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
+#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
+#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
+#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
+#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
+#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
+#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
+#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
+#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
+#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
+#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
+#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
+#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
+#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
+#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
+#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
+#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
+#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
+#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
+#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
+#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
+#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
+#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
+#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
+#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
+#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
+#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
+#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
+#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
+#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
+#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
+#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
+#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
+#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
+#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
+#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
+#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
+#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
+#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
+#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
+#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
+#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
+#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
+#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
+#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
+#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
+#define AT_XDMAC_CC_DWIDTH_OFFSET 11
+#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
+#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
+#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
+#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
+#define AT_XDMAC_CC_DWIDTH_WORD 0x2
+#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
+#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
+#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
+#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
+#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
+#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
+#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
+#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
+#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
+#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
+#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
+#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
+#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
+#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
+#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
+#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
+#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
+#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
+#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
+#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
+#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
+#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
+#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
+#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
+#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
+#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
+
+#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
+
+/* Microblock control members */
+#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
+#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
+#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
+#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
+#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
+#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
+#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
+#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
+
+#define AT_XDMAC_MAX_CHAN 0x20
+
+enum atc_status {
+ AT_XDMAC_CHAN_IS_CYCLIC = 0,
+ AT_XDMAC_CHAN_IS_PAUSED,
+};
+
+/* ----- Channels ----- */
+struct at_xdmac_chan {
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ u32 mask; /* Channel Mask */
+ u32 cfg[3]; /* Channel Configuration Register */
+ #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */
+ #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */
+ #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */
+ u8 perid; /* Peripheral ID */
+ u8 perif; /* Peripheral Interface */
+ u8 memif; /* Memory Interface */
+ u32 per_src_addr;
+ u32 per_dst_addr;
+ u32 save_cim;
+ u32 save_cnda;
+ u32 save_cndc;
+ unsigned long status;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+
+ struct list_head xfers_list;
+ struct list_head free_descs_list;
+};
+
+
+/* ----- Controller ----- */
+struct at_xdmac {
+ struct dma_device dma;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ u32 save_gim;
+ u32 save_gs;
+ struct dma_pool *at_xdmac_desc_pool;
+ struct at_xdmac_chan chan[0];
+};
+
+
+/* ----- Descriptors ----- */
+
+/* Linked List Descriptor */
+struct at_xdmac_lld {
+ dma_addr_t mbr_nda; /* Next Descriptor Member */
+ u32 mbr_ubc; /* Microblock Control Member */
+ dma_addr_t mbr_sa; /* Source Address Member */
+ dma_addr_t mbr_da; /* Destination Address Member */
+ u32 mbr_cfg; /* Configuration Register */
+};
+
+
+struct at_xdmac_desc {
+ struct at_xdmac_lld lld;
+ enum dma_transfer_direction direction;
+ struct dma_async_tx_descriptor tx_dma_desc;
+ struct list_head desc_node;
+ /* Following members are only used by the first descriptor */
+ bool active_xfer;
+ unsigned int xfer_size;
+ struct list_head descs_list;
+ struct list_head xfer_node;
+};
+
+static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
+{
+ return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
+}
+
+#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
+#define at_xdmac_write(atxdmac, reg, value) \
+ writel_relaxed((value), (atxdmac)->regs + (reg))
+
+#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
+#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
+
+static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
+{
+ return container_of(dchan, struct at_xdmac_chan, chan);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
+{
+ return container_of(ddev, struct at_xdmac, dma);
+}
+
+static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
+}
+
+static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
+{
+ return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+}
+
+static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
+{
+ return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+}
+
+static inline int at_xdmac_csize(u32 maxburst)
+{
+ int csize;
+
+ csize = ffs(maxburst) - 1;
+ if (csize > 4)
+ csize = -EINVAL;
+
+ return csize;
+};
+
+static inline u8 at_xdmac_get_dwidth(u32 cfg)
+{
+ return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
+};
+
+static unsigned int init_nr_desc_per_channel = 64;
+module_param(init_nr_desc_per_channel, uint, 0644);
+MODULE_PARM_DESC(init_nr_desc_per_channel,
+ "initial descriptors per channel (default: 64)");
+
+
+static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
+{
+ return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
+}
+
+static void at_xdmac_off(struct at_xdmac *atxdmac)
+{
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
+
+ /* Wait that all chans are disabled. */
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
+ cpu_relax();
+
+ at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
+}
+
+/* Call with lock hold. */
+static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
+ struct at_xdmac_desc *first)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ u32 reg;
+
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
+
+ if (at_xdmac_chan_is_enabled(atchan))
+ return;
+
+ /* Set transfer as active to not try to start it again. */
+ first->active_xfer = true;
+
+ /* Tell xdmac where to get the first descriptor. */
+ reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
+ | AT_XDMAC_CNDA_NDAIF(atchan->memif);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
+
+ /*
+ * When doing memory to memory transfer we need to use the next
+ * descriptor view 2 since some fields of the configuration register
+ * depend on transfer size and src/dest addresses.
+ */
+ if (is_slave_direction(first->direction)) {
+ reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
+ if (first->direction == DMA_MEM_TO_DEV)
+ atchan->cfg[AT_XDMAC_CUR_CFG] =
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+ else
+ atchan->cfg[AT_XDMAC_CUR_CFG] =
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC,
+ atchan->cfg[AT_XDMAC_CUR_CFG]);
+ } else {
+ /*
+ * No need to write AT_XDMAC_CC reg, it will be done when the
+ * descriptor is fecthed.
+ */
+ reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
+ }
+
+ reg |= AT_XDMAC_CNDC_NDDUP
+ | AT_XDMAC_CNDC_NDSUP
+ | AT_XDMAC_CNDC_NDE;
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
+
+ dev_vdbg(chan2dev(&atchan->chan),
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
+ __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
+
+ at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
+ reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
+ /*
+ * There is no end of list when doing cyclic dma, we need to get
+ * an interrupt after each periods.
+ */
+ if (at_xdmac_chan_is_cyclic(atchan))
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
+ reg | AT_XDMAC_CIE_BIE);
+ else
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
+ reg | AT_XDMAC_CIE_LIE);
+ at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
+ dev_vdbg(chan2dev(&atchan->chan),
+ "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
+ wmb();
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+
+ dev_vdbg(chan2dev(&atchan->chan),
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
+ __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
+
+}
+
+static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct at_xdmac_desc *desc = txd_to_at_desc(tx);
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&atchan->lock);
+ cookie = dma_cookie_assign(tx);
+
+ dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
+ __func__, atchan, desc);
+ list_add_tail(&desc->xfer_node, &atchan->xfers_list);
+ if (list_is_singular(&atchan->xfers_list))
+ at_xdmac_start_xfer(atchan, desc);
+
+ spin_unlock_bh(&atchan->lock);
+ return cookie;
+}
+
+static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
+ gfp_t gfp_flags)
+{
+ struct at_xdmac_desc *desc;
+ struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
+ dma_addr_t phys;
+
+ desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
+ if (desc) {
+ memset(desc, 0, sizeof(*desc));
+ INIT_LIST_HEAD(&desc->descs_list);
+ dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
+ desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
+ desc->tx_dma_desc.phys = phys;
+ }
+
+ return desc;
+}
+
+/* Call must be protected by lock. */
+static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac_desc *desc;
+
+ if (list_empty(&atchan->free_descs_list)) {
+ desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
+ } else {
+ desc = list_first_entry(&atchan->free_descs_list,
+ struct at_xdmac_desc, desc_node);
+ list_del(&desc->desc_node);
+ desc->active_xfer = false;
+ }
+
+ return desc;
+}
+
+static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *of_dma)
+{
+ struct at_xdmac *atxdmac = of_dma->of_dma_data;
+ struct at_xdmac_chan *atchan;
+ struct dma_chan *chan;
+ struct device *dev = atxdmac->dma.dev;
+
+ if (dma_spec->args_count != 1) {
+ dev_err(dev, "dma phandler args: bad number of args\n");
+ return NULL;
+ }
+
+ chan = dma_get_any_slave_channel(&atxdmac->dma);
+ if (!chan) {
+ dev_err(dev, "can't get a dma channel\n");
+ return NULL;
+ }
+
+ atchan = to_at_xdmac_chan(chan);
+ atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
+ atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
+ atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
+ dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
+ atchan->memif, atchan->perif, atchan->perid);
+
+ return chan;
+}
+
+static int at_xdmac_set_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *sconfig)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ u8 dwidth;
+ int csize;
+
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
+ AT91_XDMAC_DT_PERID(atchan->perid)
+ | AT_XDMAC_CC_DAM_INCREMENTED_AM
+ | AT_XDMAC_CC_SAM_FIXED_AM
+ | AT_XDMAC_CC_DIF(atchan->memif)
+ | AT_XDMAC_CC_SIF(atchan->perif)
+ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+ | AT_XDMAC_CC_DSYNC_PER2MEM
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
+ | AT_XDMAC_CC_TYPE_PER_TRAN;
+ csize = at_xdmac_csize(sconfig->src_maxburst);
+ if (csize < 0) {
+ dev_err(chan2dev(chan), "invalid src maxburst value\n");
+ return -EINVAL;
+ }
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
+ dwidth = ffs(sconfig->src_addr_width) - 1;
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
+
+
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
+ AT91_XDMAC_DT_PERID(atchan->perid)
+ | AT_XDMAC_CC_DAM_FIXED_AM
+ | AT_XDMAC_CC_SAM_INCREMENTED_AM
+ | AT_XDMAC_CC_DIF(atchan->perif)
+ | AT_XDMAC_CC_SIF(atchan->memif)
+ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+ | AT_XDMAC_CC_DSYNC_MEM2PER
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
+ | AT_XDMAC_CC_TYPE_PER_TRAN;
+ csize = at_xdmac_csize(sconfig->dst_maxburst);
+ if (csize < 0) {
+ dev_err(chan2dev(chan), "invalid src maxburst value\n");
+ return -EINVAL;
+ }
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
+ dwidth = ffs(sconfig->dst_addr_width) - 1;
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
+
+ /* Src and dst addr are needed to configure the link list descriptor. */
+ atchan->per_src_addr = sconfig->src_addr;
+ atchan->per_dst_addr = sconfig->dst_addr;
+
+ dev_dbg(chan2dev(chan),
+ "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
+ __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
+ atchan->per_src_addr, atchan->per_dst_addr);
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
+ struct scatterlist *sg;
+ int i;
+ u32 cfg;
+ unsigned int xfer_size = 0;
+
+ if (!sgl)
+ return NULL;
+
+ if (!is_slave_direction(direction)) {
+ dev_err(chan2dev(chan), "invalid DMA direction\n");
+ return NULL;
+ }
+
+ dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
+ __func__, sg_len,
+ direction == DMA_MEM_TO_DEV ? "to device" : "from device",
+ flags);
+
+ /* Protect dma_sconfig field that can be modified by set_slave_conf. */
+ spin_lock_bh(&atchan->lock);
+
+ /* Prepare descriptors. */
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct at_xdmac_desc *desc = NULL;
+ u32 len, mem;
+
+ len = sg_dma_len(sg);
+ mem = sg_dma_address(sg);
+ if (unlikely(!len)) {
+ dev_err(chan2dev(chan), "sg data length is zero\n");
+ spin_unlock_bh(&atchan->lock);
+ return NULL;
+ }
+ dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
+ __func__, i, len, mem);
+
+ desc = at_xdmac_get_desc(atchan);
+ if (!desc) {
+ dev_err(chan2dev(chan), "can't get descriptor\n");
+ if (first)
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ spin_unlock_bh(&atchan->lock);
+ return NULL;
+ }
+
+ /* Linked list descriptor setup. */
+ if (direction == DMA_DEV_TO_MEM) {
+ desc->lld.mbr_sa = atchan->per_src_addr;
+ desc->lld.mbr_da = mem;
+ cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+ } else {
+ desc->lld.mbr_sa = mem;
+ desc->lld.mbr_da = atchan->per_dst_addr;
+ cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+ }
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */
+ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
+ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
+ | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
+ | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */
+ dev_dbg(chan2dev(chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
+
+ /* Chain lld. */
+ if (prev) {
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
+ dev_dbg(chan2dev(chan),
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
+ __func__, prev, &prev->lld.mbr_nda);
+ }
+
+ prev = desc;
+ if (!first)
+ first = desc;
+
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, desc, first);
+ list_add_tail(&desc->desc_node, &first->descs_list);
+ xfer_size += len;
+ }
+
+ spin_unlock_bh(&atchan->lock);
+
+ first->tx_dma_desc.flags = flags;
+ first->xfer_size = xfer_size;
+ first->direction = direction;
+
+ return &first->tx_dma_desc;
+}
+
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
+ unsigned int periods = buf_len / period_len;
+ int i;
+ u32 cfg;
+
+ dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
+ __func__, &buf_addr, buf_len, period_len,
+ direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
+
+ if (!is_slave_direction(direction)) {
+ dev_err(chan2dev(chan), "invalid DMA direction\n");
+ return NULL;
+ }
+
+ if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
+ dev_err(chan2dev(chan), "channel currently used\n");
+ return NULL;
+ }
+
+ for (i = 0; i < periods; i++) {
+ struct at_xdmac_desc *desc = NULL;
+
+ spin_lock_bh(&atchan->lock);
+ desc = at_xdmac_get_desc(atchan);
+ if (!desc) {
+ dev_err(chan2dev(chan), "can't get descriptor\n");
+ if (first)
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ spin_unlock_bh(&atchan->lock);
+ return NULL;
+ }
+ spin_unlock_bh(&atchan->lock);
+ dev_dbg(chan2dev(chan),
+ "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
+ __func__, desc, &desc->tx_dma_desc.phys);
+
+ if (direction == DMA_DEV_TO_MEM) {
+ desc->lld.mbr_sa = atchan->per_src_addr;
+ desc->lld.mbr_da = buf_addr + i * period_len;
+ cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+ } else {
+ desc->lld.mbr_sa = buf_addr + i * period_len;
+ desc->lld.mbr_da = atchan->per_dst_addr;
+ cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+ }
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
+ | AT_XDMAC_MBR_UBC_NDEN
+ | AT_XDMAC_MBR_UBC_NSEN
+ | AT_XDMAC_MBR_UBC_NDE
+ | period_len >> at_xdmac_get_dwidth(cfg);
+
+ dev_dbg(chan2dev(chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
+
+ /* Chain lld. */
+ if (prev) {
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
+ dev_dbg(chan2dev(chan),
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
+ __func__, prev, &prev->lld.mbr_nda);
+ }
+
+ prev = desc;
+ if (!first)
+ first = desc;
+
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, desc, first);
+ list_add_tail(&desc->desc_node, &first->descs_list);
+ }
+
+ prev->lld.mbr_nda = first->tx_dma_desc.phys;
+ dev_dbg(chan2dev(chan),
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
+ __func__, prev, &prev->lld.mbr_nda);
+ first->tx_dma_desc.flags = flags;
+ first->xfer_size = buf_len;
+ first->direction = direction;
+
+ return &first->tx_dma_desc;
+}
+
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
+ size_t remaining_size = len, xfer_size = 0, ublen;
+ dma_addr_t src_addr = src, dst_addr = dest;
+ u32 dwidth;
+ /*
+ * WARNING: We don't know the direction, it involves we can't
+ * dynamically set the source and dest interface so we have to use the
+ * same one. Only interface 0 allows EBI access. Hopefully we can
+ * access DDR through both ports (at least on SAMA5D4x), so we can use
+ * the same interface for source and dest, that solves the fact we
+ * don't know the direction.
+ */
+ u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
+ | AT_XDMAC_CC_SAM_INCREMENTED_AM
+ | AT_XDMAC_CC_DIF(0)
+ | AT_XDMAC_CC_SIF(0)
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
+ | AT_XDMAC_CC_TYPE_MEM_TRAN;
+
+ dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
+ __func__, &src, &dest, len, flags);
+
+ if (unlikely(!len))
+ return NULL;
+
+ /*
+ * Check address alignment to select the greater data width we can use.
+ * Some XDMAC implementations don't provide dword transfer, in this
+ * case selecting dword has the same behavior as selecting word transfers.
+ */
+ if (!((src_addr | dst_addr) & 7)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
+ } else if (!((src_addr | dst_addr) & 3)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_WORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
+ } else if (!((src_addr | dst_addr) & 1)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
+ } else {
+ dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
+ dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
+ }
+
+ /* Prepare descriptors. */
+ while (remaining_size) {
+ struct at_xdmac_desc *desc = NULL;
+
+ dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
+
+ spin_lock_bh(&atchan->lock);
+ desc = at_xdmac_get_desc(atchan);
+ spin_unlock_bh(&atchan->lock);
+ if (!desc) {
+ dev_err(chan2dev(chan), "can't get descriptor\n");
+ if (first)
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ return NULL;
+ }
+
+ /* Update src and dest addresses. */
+ src_addr += xfer_size;
+ dst_addr += xfer_size;
+
+ if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
+ xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
+ else
+ xfer_size = remaining_size;
+
+ dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
+
+ /* Check remaining length and change data width if needed. */
+ if (!((src_addr | dst_addr | xfer_size) & 7)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
+ } else if (!((src_addr | dst_addr | xfer_size) & 3)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_WORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
+ } else if (!((src_addr | dst_addr | xfer_size) & 1)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
+ } else if ((src_addr | dst_addr | xfer_size) & 1) {
+ dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
+ dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
+ }
+ chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
+
+ ublen = xfer_size >> dwidth;
+ remaining_size -= xfer_size;
+
+ desc->lld.mbr_sa = src_addr;
+ desc->lld.mbr_da = dst_addr;
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
+ | AT_XDMAC_MBR_UBC_NDEN
+ | AT_XDMAC_MBR_UBC_NSEN
+ | (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0)
+ | ublen;
+ desc->lld.mbr_cfg = chan_cc;
+
+ dev_dbg(chan2dev(chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
+
+ /* Chain lld. */
+ if (prev) {
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
+ dev_dbg(chan2dev(chan),
+ "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n",
+ __func__, prev, prev->lld.mbr_nda);
+ }
+
+ prev = desc;
+ if (!first)
+ first = desc;
+
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, desc, first);
+ list_add_tail(&desc->desc_node, &first->descs_list);
+ }
+
+ first->tx_dma_desc.flags = flags;
+ first->xfer_size = len;
+
+ return &first->tx_dma_desc;
+}
+
+static enum dma_status
+at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ struct at_xdmac_desc *desc, *_desc;
+ struct list_head *descs_list;
+ enum dma_status ret;
+ int residue;
+ u32 cur_nda, mask, value;
+ u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]);
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ if (!txstate)
+ return ret;
+
+ spin_lock_bh(&atchan->lock);
+
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
+
+ /*
+ * If the transfer has not been started yet, don't need to compute the
+ * residue, it's the transfer length.
+ */
+ if (!desc->active_xfer) {
+ dma_set_residue(txstate, desc->xfer_size);
+ spin_unlock_bh(&atchan->lock);
+ return ret;
+ }
+
+ residue = desc->xfer_size;
+ /*
+ * Flush FIFO: only relevant when the transfer is source peripheral
+ * synchronized.
+ */
+ mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
+ value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
+ if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+ cpu_relax();
+ }
+
+ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ /*
+ * Remove size of all microblocks already transferred and the current
+ * one. Then add the remaining size to transfer of the current
+ * microblock.
+ */
+ descs_list = &desc->descs_list;
+ list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
+ residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
+ if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+ break;
+ }
+ residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
+
+ spin_unlock_bh(&atchan->lock);
+
+ dma_set_residue(txstate, residue);
+
+ dev_dbg(chan2dev(chan),
+ "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
+ __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
+
+ return ret;
+}
+
+/* Call must be protected by lock. */
+static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
+ struct at_xdmac_desc *desc)
+{
+ dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+
+ /*
+ * Remove the transfer from the transfer list then move the transfer
+ * descriptors into the free descriptors list.
+ */
+ list_del(&desc->xfer_node);
+ list_splice_init(&desc->descs_list, &atchan->free_descs_list);
+}
+
+static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac_desc *desc;
+
+ spin_lock_bh(&atchan->lock);
+
+ /*
+ * If channel is enabled, do nothing, advance_work will be triggered
+ * after the interruption.
+ */
+ if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
+ desc = list_first_entry(&atchan->xfers_list,
+ struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ if (!desc->active_xfer)
+ at_xdmac_start_xfer(atchan, desc);
+ }
+
+ spin_unlock_bh(&atchan->lock);
+}
+
+static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac_desc *desc;
+ struct dma_async_tx_descriptor *txd;
+
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
+ txd = &desc->tx_dma_desc;
+
+ if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
+ txd->callback(txd->callback_param);
+}
+
+static void at_xdmac_tasklet(unsigned long data)
+{
+ struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
+ struct at_xdmac_desc *desc;
+ u32 error_mask;
+
+ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
+ __func__, atchan->status);
+
+ error_mask = AT_XDMAC_CIS_RBEIS
+ | AT_XDMAC_CIS_WBEIS
+ | AT_XDMAC_CIS_ROIS;
+
+ if (at_xdmac_chan_is_cyclic(atchan)) {
+ at_xdmac_handle_cyclic(atchan);
+ } else if ((atchan->status & AT_XDMAC_CIS_LIS)
+ || (atchan->status & error_mask)) {
+ struct dma_async_tx_descriptor *txd;
+
+ if (atchan->status & AT_XDMAC_CIS_RBEIS)
+ dev_err(chan2dev(&atchan->chan), "read bus error!!!");
+ if (atchan->status & AT_XDMAC_CIS_WBEIS)
+ dev_err(chan2dev(&atchan->chan), "write bus error!!!");
+ if (atchan->status & AT_XDMAC_CIS_ROIS)
+ dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
+
+ spin_lock_bh(&atchan->lock);
+ desc = list_first_entry(&atchan->xfers_list,
+ struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ BUG_ON(!desc->active_xfer);
+
+ txd = &desc->tx_dma_desc;
+
+ at_xdmac_remove_xfer(atchan, desc);
+ spin_unlock_bh(&atchan->lock);
+
+ if (!at_xdmac_chan_is_cyclic(atchan)) {
+ dma_cookie_complete(txd);
+ if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
+ txd->callback(txd->callback_param);
+ }
+
+ dma_run_dependencies(txd);
+
+ at_xdmac_advance_work(atchan);
+ }
+}
+
+static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
+{
+ struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
+ struct at_xdmac_chan *atchan;
+ u32 imr, status, pending;
+ u32 chan_imr, chan_status;
+ int i, ret = IRQ_NONE;
+
+ do {
+ imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+ status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
+ pending = status & imr;
+
+ dev_vdbg(atxdmac->dma.dev,
+ "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
+ __func__, status, imr, pending);
+
+ if (!pending)
+ break;
+
+ /* We have to find which channel has generated the interrupt. */
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
+ if (!((1 << i) & pending))
+ continue;
+
+ atchan = &atxdmac->chan[i];
+ chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
+ chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
+ atchan->status = chan_status & chan_imr;
+ dev_vdbg(atxdmac->dma.dev,
+ "%s: chan%d: imr=0x%x, status=0x%x\n",
+ __func__, i, chan_imr, chan_status);
+ dev_vdbg(chan2dev(&atchan->chan),
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
+ __func__,
+ at_xdmac_chan_read(atchan, AT_XDMAC_CC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
+
+ if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+
+ tasklet_schedule(&atchan->tasklet);
+ ret = IRQ_HANDLED;
+ }
+
+ } while (pending);
+
+ return ret;
+}
+
+static void at_xdmac_issue_pending(struct dma_chan *chan)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+
+ dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
+
+ if (!at_xdmac_chan_is_cyclic(atchan))
+ at_xdmac_advance_work(atchan);
+
+ return;
+}
+
+static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct at_xdmac_desc *desc, *_desc;
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ int ret = 0;
+
+ dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd);
+
+ spin_lock_bh(&atchan->lock);
+
+ switch (cmd) {
+ case DMA_PAUSE:
+ at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
+ set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+ break;
+
+ case DMA_RESUME:
+ if (!at_xdmac_chan_is_paused(atchan))
+ break;
+
+ at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
+ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
+ cpu_relax();
+
+ /* Cancel all pending transfers. */
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
+ at_xdmac_remove_xfer(atchan, desc);
+
+ clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+ break;
+
+ case DMA_SLAVE_CONFIG:
+ ret = at_xdmac_set_slave_config(chan,
+ (struct dma_slave_config *)arg);
+ break;
+
+ default:
+ dev_err(chan2dev(chan),
+ "unmanaged or unknown dma control cmd: %d\n", cmd);
+ ret = -ENXIO;
+ }
+
+ spin_unlock_bh(&atchan->lock);
+
+ return ret;
+}
+
+static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *desc;
+ int i;
+
+ spin_lock_bh(&atchan->lock);
+
+ if (at_xdmac_chan_is_enabled(atchan)) {
+ dev_err(chan2dev(chan),
+ "can't allocate channel resources (channel enabled)\n");
+ i = -EIO;
+ goto spin_unlock;
+ }
+
+ if (!list_empty(&atchan->free_descs_list)) {
+ dev_err(chan2dev(chan),
+ "can't allocate channel resources (channel not free from a previous use)\n");
+ i = -EIO;
+ goto spin_unlock;
+ }
+
+ for (i = 0; i < init_nr_desc_per_channel; i++) {
+ desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
+ if (!desc) {
+ dev_warn(chan2dev(chan),
+ "only %d descriptors have been allocated\n", i);
+ break;
+ }
+ list_add_tail(&desc->desc_node, &atchan->free_descs_list);
+ }
+
+ dma_cookie_init(chan);
+
+ dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
+
+spin_unlock:
+ spin_unlock_bh(&atchan->lock);
+ return i;
+}
+
+static void at_xdmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
+ struct at_xdmac_desc *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
+ dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
+ list_del(&desc->desc_node);
+ dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
+ }
+
+ return;
+}
+
+#define AT_XDMAC_DMA_BUSWIDTHS\
+ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+
+ caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atmel_xdmac_prepare(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+
+ /* Wait for transfer completion, except in cyclic case. */
+ if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
+ return -EAGAIN;
+ }
+ return 0;
+}
+#else
+# define atmel_xdmac_prepare NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int atmel_xdmac_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+
+ if (at_xdmac_chan_is_cyclic(atchan)) {
+ if (!at_xdmac_chan_is_paused(atchan))
+ at_xdmac_control(chan, DMA_PAUSE, 0);
+ atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
+ atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
+ atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
+ }
+ }
+ atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+
+ at_xdmac_off(atxdmac);
+ clk_disable_unprepare(atxdmac->clk);
+ return 0;
+}
+
+static int atmel_xdmac_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct at_xdmac_chan *atchan;
+ struct dma_chan *chan, *_chan;
+ int i;
+ u32 cfg;
+
+ clk_prepare_enable(atxdmac->clk);
+
+ /* Clear pending interrupts. */
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
+ atchan = &atxdmac->chan[i];
+ while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
+ cpu_relax();
+ }
+
+ at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
+ atchan = to_at_xdmac_chan(chan);
+ cfg = atchan->cfg[AT_XDMAC_CUR_CFG];
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
+ if (at_xdmac_chan_is_cyclic(atchan)) {
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+ wmb();
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int at_xdmac_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct at_xdmac *atxdmac;
+ int irq, size, nr_channels, i, ret;
+ void __iomem *base;
+ u32 reg;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /*
+ * Read number of xdmac channels, read helper function can't be used
+ * since atxdmac is not yet allocated and we need to know the number
+ * of channels to do the allocation.
+ */
+ reg = readl_relaxed(base + AT_XDMAC_GTYPE);
+ nr_channels = AT_XDMAC_NB_CH(reg);
+ if (nr_channels > AT_XDMAC_MAX_CHAN) {
+ dev_err(&pdev->dev, "invalid number of channels (%u)\n",
+ nr_channels);
+ return -EINVAL;
+ }
+
+ size = sizeof(*atxdmac);
+ size += nr_channels * sizeof(struct at_xdmac_chan);
+ atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!atxdmac) {
+ dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
+ return -ENOMEM;
+ }
+
+ atxdmac->regs = base;
+ atxdmac->irq = irq;
+
+ atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
+ if (IS_ERR(atxdmac->clk)) {
+ dev_err(&pdev->dev, "can't get dma_clk\n");
+ return PTR_ERR(atxdmac->clk);
+ }
+
+ /* Do not use dev res to prevent races with tasklet */
+ ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
+ if (ret) {
+ dev_err(&pdev->dev, "can't request irq\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(atxdmac->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "can't prepare or enable clock\n");
+ goto err_free_irq;
+ }
+
+ atxdmac->at_xdmac_desc_pool =
+ dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
+ sizeof(struct at_xdmac_desc), 4, 0);
+ if (!atxdmac->at_xdmac_desc_pool) {
+ dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
+ dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
+ /*
+ * Without DMA_PRIVATE the driver is not able to allocate more than
+ * one channel, second allocation fails in private_candidate.
+ */
+ dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
+ atxdmac->dma.dev = &pdev->dev;
+ atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
+ atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
+ atxdmac->dma.device_tx_status = at_xdmac_tx_status;
+ atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
+ atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
+ atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
+ atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
+ atxdmac->dma.device_control = at_xdmac_control;
+ atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps;
+
+ /* Disable all chans and interrupts. */
+ at_xdmac_off(atxdmac);
+
+ /* Init channels. */
+ INIT_LIST_HEAD(&atxdmac->dma.channels);
+ for (i = 0; i < nr_channels; i++) {
+ struct at_xdmac_chan *atchan = &atxdmac->chan[i];
+
+ atchan->chan.device = &atxdmac->dma;
+ list_add_tail(&atchan->chan.device_node,
+ &atxdmac->dma.channels);
+
+ atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
+ atchan->mask = 1 << i;
+
+ spin_lock_init(&atchan->lock);
+ INIT_LIST_HEAD(&atchan->xfers_list);
+ INIT_LIST_HEAD(&atchan->free_descs_list);
+ tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
+ (unsigned long)atchan);
+
+ /* Clear pending interrupts. */
+ while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
+ cpu_relax();
+ }
+ platform_set_drvdata(pdev, atxdmac);
+
+ ret = dma_async_device_register(&atxdmac->dma);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to register DMA engine device\n");
+ goto err_clk_disable;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ at_xdmac_xlate, atxdmac);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register of dma controller\n");
+ goto err_dma_unregister;
+ }
+
+ dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
+ nr_channels, atxdmac->regs);
+
+ return 0;
+
+err_dma_unregister:
+ dma_async_device_unregister(&atxdmac->dma);
+err_clk_disable:
+ clk_disable_unprepare(atxdmac->clk);
+err_free_irq:
+ free_irq(atxdmac->irq, atxdmac->dma.dev);
+ return ret;
+}
+
+static int at_xdmac_remove(struct platform_device *pdev)
+{
+ struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
+ int i;
+
+ at_xdmac_off(atxdmac);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&atxdmac->dma);
+ clk_disable_unprepare(atxdmac->clk);
+
+ synchronize_irq(atxdmac->irq);
+
+ free_irq(atxdmac->irq, atxdmac->dma.dev);
+
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
+ struct at_xdmac_chan *atchan = &atxdmac->chan[i];
+
+ tasklet_kill(&atchan->tasklet);
+ at_xdmac_free_chan_resources(&atchan->chan);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
+ .prepare = atmel_xdmac_prepare,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
+};
+
+static const struct of_device_id atmel_xdmac_dt_ids[] = {
+ {
+ .compatible = "atmel,sama5d4-dma",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
+
+static struct platform_driver at_xdmac_driver = {
+ .probe = at_xdmac_probe,
+ .remove = at_xdmac_remove,
+ .driver = {
+ .name = "at_xdmac",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
+ .pm = &atmel_xdmac_dev_pm_ops,
+ }
+};
+
+static int __init at_xdmac_init(void)
+{
+ return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
+}
+subsys_initcall(at_xdmac_init);
+
+MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 68007974961a..918b7b3f766f 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -525,8 +525,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
vchan_init(&c->vc, &d->ddev);
INIT_LIST_HEAD(&c->node);
- d->ddev.chancnt++;
-
c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
c->ch = chan_id;
c->irq_number = irq;
@@ -694,7 +692,6 @@ static struct platform_driver bcm2835_dma_driver = {
.remove = bcm2835_dma_remove,
.driver = {
.name = "bcm2835-dma",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(bcm2835_dma_of_match),
},
};
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index a8c2e2994d2e..fa378d88f6c8 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -495,7 +495,6 @@ static struct platform_driver mpc52xx_bcom_of_platform_driver = {
.remove = mpc52xx_bcom_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_bcom_of_match,
},
};
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index a58eec3b2cad..b743adf56465 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -1,3 +1,4 @@
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
@@ -567,7 +568,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
reg |= GCR_TEARDOWN;
cppi_writel(reg, c->gcr_reg);
c->td_queued = 1;
- c->td_retry = 100;
+ c->td_retry = 500;
}
if (!c->td_seen || !c->td_desc_seen) {
@@ -603,12 +604,16 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
* descriptor before the TD we fetch it from enqueue, it has to be
* there waiting for us.
*/
- if (!c->td_seen && c->td_retry)
+ if (!c->td_seen && c->td_retry) {
+ udelay(1);
return -EAGAIN;
-
+ }
WARN_ON(!c->td_retry);
+
if (!c->td_desc_seen) {
desc_phys = cppi41_pop_desc(cdd, c->q_num);
+ if (!desc_phys)
+ desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
WARN_ON(!desc_phys);
}
@@ -1088,7 +1093,6 @@ static struct platform_driver cpp41_dma_driver = {
.remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
- .owner = THIS_MODULE,
.pm = &cppi41_pm_ops,
.of_match_table = of_match_ptr(cppi41_dma_ids),
},
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index ae2ab14e64b3..bdeafeefa5f6 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -563,10 +563,9 @@ static int jz4740_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
dd->device_control = jz4740_dma_control;
dd->dev = &pdev->dev;
- dd->chancnt = JZ_DMA_NR_CHANS;
INIT_LIST_HEAD(&dd->channels);
- for (i = 0; i < dd->chancnt; i++) {
+ for (i = 0; i < JZ_DMA_NR_CHANS; i++) {
chan = &dmadev->chan[i];
chan->id = i;
chan->vchan.desc_free = jz4740_dma_desc_free;
@@ -608,7 +607,6 @@ static struct platform_driver jz4740_dma_driver = {
.remove = jz4740_dma_remove,
.driver = {
.name = "jz4740-dma",
- .owner = THIS_MODULE,
},
};
module_platform_driver(jz4740_dma_driver);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24bfaf0b92ba..e057935e3023 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -330,8 +330,7 @@ static int __init dma_channel_table_init(void)
if (err) {
pr_err("initialization failure\n");
for_each_dma_cap_mask(cap, dma_cap_mask_all)
- if (channel_table[cap])
- free_percpu(channel_table[cap]);
+ free_percpu(channel_table[cap]);
}
return err;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 4cfaaa5a49be..b969206439b7 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1092,7 +1092,6 @@ static struct platform_driver edma_driver = {
.remove = edma_remove,
.driver = {
.name = "edma-dma-engine",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 3c5711d5fe97..e9ebb89e1711 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -118,17 +118,17 @@
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
struct fsl_edma_hw_tcd {
- u32 saddr;
- u16 soff;
- u16 attr;
- u32 nbytes;
- u32 slast;
- u32 daddr;
- u16 doff;
- u16 citer;
- u32 dlast_sga;
- u16 csr;
- u16 biter;
+ __le32 saddr;
+ __le16 soff;
+ __le16 attr;
+ __le32 nbytes;
+ __le32 slast;
+ __le32 daddr;
+ __le16 doff;
+ __le16 citer;
+ __le32 dlast_sga;
+ __le16 csr;
+ __le16 biter;
};
struct fsl_edma_sw_tcd {
@@ -175,18 +175,12 @@ struct fsl_edma_engine {
};
/*
- * R/W functions for big- or little-endian registers
- * the eDMA controller's endian is independent of the CPU core's endian.
+ * R/W functions for big- or little-endian registers:
+ * The eDMA controller's endian is independent of the CPU core's endian.
+ * For the big-endian IP module, the offset for 8-bit or 16-bit registers
+ * should also be swapped opposite to that in little-endian IP.
*/
-static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
-{
- if (edma->big_endian)
- return ioread16be(addr);
- else
- return ioread16(addr);
-}
-
static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
if (edma->big_endian)
@@ -197,13 +191,18 @@ static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
{
- iowrite8(val, addr);
+ /* swap the reg offset for these in big-endian mode */
+ if (edma->big_endian)
+ iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
+ else
+ iowrite8(val, addr);
}
static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
{
+ /* swap the reg offset for these in big-endian mode */
if (edma->big_endian)
- iowrite16be(val, addr);
+ iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
else
iowrite16(val, addr);
}
@@ -254,13 +253,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
+ slot = EDMAMUX_CHCFG_SOURCE(slot);
if (enable)
- edma_writeb(fsl_chan->edma,
- EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
- muxaddr + ch_off);
+ iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
else
- edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+ iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
}
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
@@ -286,9 +284,8 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
fsl_desc = to_fsl_edma_desc(vdesc);
for (i = 0; i < fsl_desc->n_tcds; i++)
- dma_pool_free(fsl_desc->echan->tcd_pool,
- fsl_desc->tcd[i].vtcd,
- fsl_desc->tcd[i].ptcd);
+ dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
}
@@ -363,8 +360,8 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
/* calculate the total size in this desc */
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
- * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+ len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
if (!in_progress)
return len;
@@ -376,17 +373,15 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
- * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+ size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
if (dir == DMA_MEM_TO_DEV)
- dma_addr = edma_readl(fsl_chan->edma,
- &(edesc->tcd[i].vtcd->saddr));
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
else
- dma_addr = edma_readl(fsl_chan->edma,
- &(edesc->tcd[i].vtcd->daddr));
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
len -= size;
- if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
+ if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
len += dma_addr + size - cur_addr;
break;
}
@@ -424,55 +419,67 @@ static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
-static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
- u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
- u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
- u16 csr)
+static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd)
{
+ struct fsl_edma_engine *edma = fsl_chan->edma;
void __iomem *addr = fsl_chan->edma->membase;
u32 ch = fsl_chan->vchan.chan.chan_id;
/*
- * TCD parameters have been swapped in fill_tcd_params(),
- * so just write them to registers in the cpu endian here
+ * TCD parameters are stored in struct fsl_edma_hw_tcd in little
+ * endian format. However, we need to load the TCD registers in
+ * big- or little-endian obeying the eDMA engine model endian.
*/
- writew(0, addr + EDMA_TCD_CSR(ch));
- writel(src, addr + EDMA_TCD_SADDR(ch));
- writel(dst, addr + EDMA_TCD_DADDR(ch));
- writew(attr, addr + EDMA_TCD_ATTR(ch));
- writew(soff, addr + EDMA_TCD_SOFF(ch));
- writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
- writel(slast, addr + EDMA_TCD_SLAST(ch));
- writew(citer, addr + EDMA_TCD_CITER(ch));
- writew(biter, addr + EDMA_TCD_BITER(ch));
- writew(doff, addr + EDMA_TCD_DOFF(ch));
- writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
- writew(csr, addr + EDMA_TCD_CSR(ch));
-}
-
-static void fill_tcd_params(struct fsl_edma_engine *edma,
- struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
- u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
- u16 biter, u16 doff, u32 dlast_sga, bool major_int,
- bool disable_req, bool enable_sg)
+ edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
+ edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
+ edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
+
+ edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
+ edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
+
+ edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
+ edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
+
+ edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
+ edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
+ edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
+
+ edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
+
+ edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
+}
+
+static inline
+void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+ u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+ u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
{
u16 csr = 0;
/*
- * eDMA hardware SGs require the TCD parameters stored in memory
- * the same endian as the eDMA module so that they can be loaded
- * automatically by the engine
+ * eDMA hardware SGs require the TCDs to be stored in little
+ * endian format irrespective of the register endian model.
+ * So we put the value in little endian in memory, waiting
+ * for fsl_edma_set_tcd_regs doing the swap.
*/
- edma_writel(edma, src, &(tcd->saddr));
- edma_writel(edma, dst, &(tcd->daddr));
- edma_writew(edma, attr, &(tcd->attr));
- edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
- edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
- edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
- edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
- edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
- edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
- edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
+ tcd->saddr = cpu_to_le32(src);
+ tcd->daddr = cpu_to_le32(dst);
+
+ tcd->attr = cpu_to_le16(attr);
+
+ tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
+
+ tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
+ tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
+
+ tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
+ tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
+
+ tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
+
+ tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
if (major_int)
csr |= EDMA_TCD_CSR_INT_MAJOR;
@@ -482,7 +489,7 @@ static void fill_tcd_params(struct fsl_edma_engine *edma,
if (enable_sg)
csr |= EDMA_TCD_CSR_E_SG;
- edma_writew(edma, csr, &(tcd->csr));
+ tcd->csr = cpu_to_le16(csr);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@@ -558,9 +565,9 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
doff = fsl_chan->fsc.addr_width;
}
- fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
- dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
- iter, iter, doff, last_sg, true, false, true);
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_chan->fsc.attr, soff, nbytes, 0, iter,
+ iter, doff, last_sg, true, false, true);
dma_buf_next += period_len;
}
@@ -607,16 +614,16 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
iter = sg_dma_len(sg) / nbytes;
if (i < sg_len - 1) {
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
- src_addr, dst_addr, fsl_chan->fsc.attr,
- soff, nbytes, 0, iter, iter, doff, last_sg,
- false, false, true);
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->fsc.attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ false, false, true);
} else {
last_sg = 0;
- fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
- src_addr, dst_addr, fsl_chan->fsc.attr,
- soff, nbytes, 0, iter, iter, doff, last_sg,
- true, true, false);
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->fsc.attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ true, true, false);
}
}
@@ -625,17 +632,13 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
- struct fsl_edma_hw_tcd *tcd;
struct virt_dma_desc *vdesc;
vdesc = vchan_next_desc(&fsl_chan->vchan);
if (!vdesc)
return;
fsl_chan->edesc = to_fsl_edma_desc(vdesc);
- tcd = fsl_chan->edesc->tcd[0].vtcd;
- fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
- tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
- tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
+ fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
}
@@ -963,7 +966,6 @@ MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
static struct platform_driver fsl_edma_driver = {
.driver = {
.name = "fsl-edma",
- .owner = THIS_MODULE,
.of_match_table = fsl_edma_dt_ids,
},
.probe = fsl_edma_probe,
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 994bcb2c6b92..38821cdf862b 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1337,7 +1337,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
/* Add the channel to DMA device channel list */
list_add_tail(&chan->common.device_node, &fdev->common.channels);
- fdev->common.chancnt++;
dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
chan->irq != NO_IRQ ? chan->irq : fdev->irq);
@@ -1540,7 +1539,6 @@ static const struct of_device_id fsldma_of_ids[] = {
static struct platform_driver fsldma_of_driver = {
.driver = {
.name = "fsl-elo-dma",
- .owner = THIS_MODULE,
.of_match_table = fsldma_of_ids,
#ifdef CONFIG_PM
.pm = &fsldma_pm_ops,
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 9d2c9e7374dc..10bbc0a675b0 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1236,7 +1236,6 @@ static int imxdma_remove(struct platform_device *pdev)
static struct platform_driver imxdma_driver = {
.driver = {
.name = "imx-dma",
- .owner = THIS_MODULE,
.of_match_table = imx_dma_of_dev_id,
},
.id_table = imx_dma_devtype,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 88afc48c2ca7..d0df198f62e9 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -729,6 +729,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
case IMX_DMATYPE_CSPI:
case IMX_DMATYPE_EXT:
case IMX_DMATYPE_SSI:
+ case IMX_DMATYPE_SAI:
per_2_emi = sdma->script_addrs->app_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_app_addr;
break;
@@ -1287,7 +1288,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
unsigned short *ram_code;
if (!fw) {
- dev_err(sdma->dev, "firmware not found\n");
+ dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
+ /* In this case we just use the ROM firmware. */
return;
}
@@ -1346,7 +1348,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
return ret;
}
-static int __init sdma_init(struct sdma_engine *sdma)
+static int sdma_init(struct sdma_engine *sdma)
{
int i, ret;
dma_addr_t ccb_phys;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 895f869d6c2c..32eae38291e5 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1265,9 +1265,17 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
op = IOAT_OP_XOR;
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dest_dma))
+ goto dma_unmap;
+
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
DMA_PREP_INTERRUPT);
@@ -1298,7 +1306,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto dma_unmap;
}
- dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
@@ -1313,6 +1320,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
}
dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
/* skip validate if the capability is not present */
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
goto free_resources;
@@ -1327,8 +1336,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
xor_val_result = 1;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT);
@@ -1374,8 +1388,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
xor_val_result = 0;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT);
@@ -1417,14 +1436,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
dma_unmap:
if (op == IOAT_OP_XOR) {
- dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dest_dma != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
+ if (dma_srcs[i] != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
} else if (op == IOAT_OP_XOR_VAL) {
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
+ if (dma_srcs[i] != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
}
free_resources:
dma->device_free_chan_resources(dma_chan);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c56137bc3868..263d9f6a207e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1557,7 +1557,6 @@ static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
.remove = iop_adma_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "iop-adma",
},
};
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index bbf62927bd72..c2b017ad139d 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1783,7 +1783,6 @@ static int ipu_remove(struct platform_device *pdev)
static struct platform_driver ipu_platform_driver = {
.driver = {
.name = "ipu-core",
- .owner = THIS_MODULE,
},
.remove = ipu_remove,
};
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a1f911aaf220..a1de14ab2c51 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -722,7 +722,6 @@ static int k3_dma_probe(struct platform_device *op)
d->slave.device_issue_pending = k3_dma_issue_pending;
d->slave.device_control = k3_dma_control;
d->slave.copy_align = DMA_ALIGN;
- d->slave.chancnt = d->dma_requests;
/* init virtual channel */
d->chans = devm_kzalloc(&op->dev,
@@ -787,6 +786,7 @@ static int k3_dma_remove(struct platform_device *op)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int k3_dma_suspend(struct device *dev)
{
struct k3_dma_dev *d = dev_get_drvdata(dev);
@@ -816,13 +816,13 @@ static int k3_dma_resume(struct device *dev)
k3_dma_enable_dma(d, true);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
static struct platform_driver k3_pdma_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &k3_dma_pmops,
.of_match_table = k3_pdma_dt_ids,
},
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a1a4db5721b8..8b8952f35e6c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1098,7 +1098,6 @@ static const struct platform_device_id mmp_pdma_id_table[] = {
static struct platform_driver mmp_pdma_driver = {
.driver = {
.name = "mmp-pdma",
- .owner = THIS_MODULE,
.of_match_table = mmp_pdma_dt_ids,
},
.id_table = mmp_pdma_id_table,
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index c6bd015b7165..bfb46957c3dc 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -703,7 +703,6 @@ static const struct platform_device_id mmp_tdma_id_table[] = {
static struct platform_driver mmp_tdma_driver = {
.driver = {
.name = "mmp-tdma",
- .owner = THIS_MODULE,
.of_match_table = mmp_tdma_dt_ids,
},
.id_table = mmp_tdma_id_table,
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 3258e484e4f6..53032bac06e0 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -677,7 +677,6 @@ static struct platform_driver moxart_driver = {
.remove = moxart_remove,
.driver = {
.name = "moxart-dma-engine",
- .owner = THIS_MODULE,
.of_match_table = moxart_dma_match,
},
};
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 881db2bcb48b..01bec4023de2 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -885,6 +885,7 @@ static int mpc_dma_probe(struct platform_device *op)
struct resource res;
ulong regs_start, regs_size;
int retval, i;
+ u8 chancnt;
mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
if (!mdma) {
@@ -956,10 +957,6 @@ static int mpc_dma_probe(struct platform_device *op)
dma = &mdma->dma;
dma->dev = dev;
- if (mdma->is_mpc8308)
- dma->chancnt = MPC8308_DMACHAN_MAX;
- else
- dma->chancnt = MPC512x_DMACHAN_MAX;
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
@@ -972,7 +969,12 @@ static int mpc_dma_probe(struct platform_device *op)
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma_cap_set(DMA_SLAVE, dma->cap_mask);
- for (i = 0; i < dma->chancnt; i++) {
+ if (mdma->is_mpc8308)
+ chancnt = MPC8308_DMACHAN_MAX;
+ else
+ chancnt = MPC512x_DMACHAN_MAX;
+
+ for (i = 0; i < chancnt; i++) {
mchan = &mdma->channels[i];
mchan->chan.device = dma;
@@ -1090,7 +1092,6 @@ static struct platform_driver mpc_dma_driver = {
.remove = mpc_dma_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = mpc_dma_match,
},
};
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a63837ca1410..d7ac558c2c1c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1269,7 +1269,6 @@ static struct platform_driver mv_xor_driver = {
.probe = mv_xor_probe,
.remove = mv_xor_remove,
.driver = {
- .owner = THIS_MODULE,
.name = MV_XOR_NAME,
.of_match_table = of_match_ptr(mv_xor_dt_ids),
},
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index bda20e6e1007..d7d61e1a01c3 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1500,7 +1500,6 @@ static const struct dev_pm_ops nbpf_pm_ops = {
static struct platform_driver nbpf_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "dma-nbpf",
.of_match_table = nbpf_match,
.pm = &nbpf_pm_ops,
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index bbea8243f9e8..c0016a68b446 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1074,8 +1074,6 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
vchan_init(&c->vc, &od->ddev);
INIT_LIST_HEAD(&c->node);
- od->ddev.chancnt++;
-
return 0;
}
@@ -1233,7 +1231,6 @@ static struct platform_driver omap_dma_driver = {
.remove = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(omap_dma_match),
},
};
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 9f9ca9fe5ce6..6e0e47d76b23 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -997,7 +997,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
-const struct pci_device_id pch_dma_id_table[] = {
+static const struct pci_device_id pch_dma_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 19a99743cf52..bdf40b530032 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/err.h>
+#include <linux/pm_runtime.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
@@ -265,6 +266,9 @@ static unsigned cmd_line;
#define NR_DEFAULT_DESC 16
+/* Delay for runtime PM autosuspend, ms */
+#define PL330_AUTOSUSPEND_DELAY 20
+
/* Populated by the PL330 core driver for DMA API driver's info */
struct pl330_config {
u32 periph_id;
@@ -1958,6 +1962,7 @@ static void pl330_tasklet(unsigned long data)
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
+ bool power_down = false;
spin_lock_irqsave(&pch->lock, flags);
@@ -1972,10 +1977,17 @@ static void pl330_tasklet(unsigned long data)
/* Try to submit a req imm. next to the last completed cookie */
fill_queue(pch);
- /* Make sure the PL330 Channel thread is active */
- spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
- spin_unlock(&pch->thread->dmac->lock);
+ if (list_empty(&pch->work_list)) {
+ spin_lock(&pch->thread->dmac->lock);
+ _stop(pch->thread);
+ spin_unlock(&pch->thread->dmac->lock);
+ power_down = true;
+ } else {
+ /* Make sure the PL330 Channel thread is active */
+ spin_lock(&pch->thread->dmac->lock);
+ _start(pch->thread);
+ spin_unlock(&pch->thread->dmac->lock);
+ }
while (!list_empty(&pch->completed_list)) {
dma_async_tx_callback callback;
@@ -1990,6 +2002,12 @@ static void pl330_tasklet(unsigned long data)
if (pch->cyclic) {
desc->status = PREP;
list_move_tail(&desc->node, &pch->work_list);
+ if (power_down) {
+ spin_lock(&pch->thread->dmac->lock);
+ _start(pch->thread);
+ spin_unlock(&pch->thread->dmac->lock);
+ power_down = false;
+ }
} else {
desc->status = FREE;
list_move_tail(&desc->node, &pch->dmac->desc_pool);
@@ -2004,6 +2022,12 @@ static void pl330_tasklet(unsigned long data)
}
}
spin_unlock_irqrestore(&pch->lock, flags);
+
+ /* If work list empty, power down */
+ if (power_down) {
+ pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+ pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
+ }
}
bool pl330_filter(struct dma_chan *chan, void *param)
@@ -2073,6 +2097,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
switch (cmd) {
case DMA_TERMINATE_ALL:
+ pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
spin_lock(&pl330->lock);
@@ -2099,10 +2124,15 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
dma_cookie_complete(&desc->txd);
}
+ if (!list_empty(&pch->work_list))
+ pm_runtime_put(pl330->ddma.dev);
+
list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
+ pm_runtime_mark_last_busy(pl330->ddma.dev);
+ pm_runtime_put_autosuspend(pl330->ddma.dev);
break;
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
@@ -2138,6 +2168,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
tasklet_kill(&pch->task);
+ pm_runtime_get_sync(pch->dmac->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
pl330_release_channel(pch->thread);
@@ -2147,6 +2178,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
+ pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+ pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}
static enum dma_status
@@ -2162,6 +2195,15 @@ static void pl330_issue_pending(struct dma_chan *chan)
unsigned long flags;
spin_lock_irqsave(&pch->lock, flags);
+ if (list_empty(&pch->work_list)) {
+ /*
+ * Warn on nothing pending. Empty submitted_list may
+ * break our pm_runtime usage counter as it is
+ * updated on work_list emptiness status.
+ */
+ WARN_ON(list_empty(&pch->submitted_list));
+ pm_runtime_get_sync(pch->dmac->ddma.dev);
+ }
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
@@ -2594,6 +2636,46 @@ static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
return 0;
}
+/*
+ * Runtime PM callbacks are provided by amba/bus.c driver.
+ *
+ * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
+ * bus driver will only disable/enable the clock in runtime PM callbacks.
+ */
+static int __maybe_unused pl330_suspend(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+
+ pm_runtime_disable(dev);
+
+ if (!pm_runtime_status_suspended(dev)) {
+ /* amba did not disable the clock */
+ amba_pclk_disable(pcdev);
+ }
+ amba_pclk_unprepare(pcdev);
+
+ return 0;
+}
+
+static int __maybe_unused pl330_resume(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret;
+
+ ret = amba_pclk_prepare(pcdev);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_status_suspended(dev))
+ ret = amba_pclk_enable(pcdev);
+
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
+
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -2619,6 +2701,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
}
+ pd = &pl330->ddma;
+ pd->dev = &adev->dev;
+
pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
@@ -2655,7 +2740,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
dev_warn(&adev->dev, "unable to allocate desc\n");
- pd = &pl330->ddma;
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
@@ -2692,7 +2776,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
list_add_tail(&pch->chan.device_node, &pd->channels);
}
- pd->dev = &adev->dev;
if (pdat) {
pd->cap_mask = pdat->cap_mask;
} else {
@@ -2747,6 +2830,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
pcfg->num_peri, pcfg->num_events);
+ pm_runtime_irq_safe(&adev->dev);
+ pm_runtime_use_autosuspend(&adev->dev);
+ pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
+ pm_runtime_mark_last_busy(&adev->dev);
+ pm_runtime_put_autosuspend(&adev->dev);
+
return 0;
probe_err3:
/* Idle the DMAC */
@@ -2773,6 +2862,8 @@ static int pl330_remove(struct amba_device *adev)
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
+ pm_runtime_get_noresume(pl330->ddma.dev);
+
if (adev->dev.of_node)
of_dma_controller_free(adev->dev.of_node);
@@ -2811,6 +2902,7 @@ static struct amba_driver pl330_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "dma-pl330",
+ .pm = &pl330_pm,
},
.id_table = pl330_ids,
.probe = pl330_probe,
@@ -2819,6 +2911,6 @@ static struct amba_driver pl330_driver = {
module_amba_driver(pl330_driver);
-MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
MODULE_DESCRIPTION("API Driver for PL330 DMAC");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index ce7a8d7564ba..fa764a39cd36 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4579,7 +4579,6 @@ static struct platform_driver ppc440spe_adma_driver = {
.remove = ppc440spe_adma_remove,
.driver = {
.name = "PPC440SP(E)-ADMA",
- .owner = THIS_MODULE,
.of_match_table = ppc440spe_adma_of_match,
},
};
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 7a4bbb0f80a5..3122a99ec06b 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -79,35 +79,97 @@ struct bam_async_desc {
struct bam_desc_hw desc[0];
};
-#define BAM_CTRL 0x0000
-#define BAM_REVISION 0x0004
-#define BAM_SW_REVISION 0x0080
-#define BAM_NUM_PIPES 0x003C
-#define BAM_TIMER 0x0040
-#define BAM_TIMER_CTRL 0x0044
-#define BAM_DESC_CNT_TRSHLD 0x0008
-#define BAM_IRQ_SRCS 0x000C
-#define BAM_IRQ_SRCS_MSK 0x0010
-#define BAM_IRQ_SRCS_UNMASKED 0x0030
-#define BAM_IRQ_STTS 0x0014
-#define BAM_IRQ_CLR 0x0018
-#define BAM_IRQ_EN 0x001C
-#define BAM_CNFG_BITS 0x007C
-#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80))
-#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80))
-#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000))
-#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000))
-#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000))
-#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000))
-#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000))
-#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000))
-#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000))
-#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000))
-#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000))
-#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000))
-#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000))
-#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000))
-#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000))
+enum bam_reg {
+ BAM_CTRL,
+ BAM_REVISION,
+ BAM_NUM_PIPES,
+ BAM_DESC_CNT_TRSHLD,
+ BAM_IRQ_SRCS,
+ BAM_IRQ_SRCS_MSK,
+ BAM_IRQ_SRCS_UNMASKED,
+ BAM_IRQ_STTS,
+ BAM_IRQ_CLR,
+ BAM_IRQ_EN,
+ BAM_CNFG_BITS,
+ BAM_IRQ_SRCS_EE,
+ BAM_IRQ_SRCS_MSK_EE,
+ BAM_P_CTRL,
+ BAM_P_RST,
+ BAM_P_HALT,
+ BAM_P_IRQ_STTS,
+ BAM_P_IRQ_CLR,
+ BAM_P_IRQ_EN,
+ BAM_P_EVNT_DEST_ADDR,
+ BAM_P_EVNT_REG,
+ BAM_P_SW_OFSTS,
+ BAM_P_DATA_FIFO_ADDR,
+ BAM_P_DESC_FIFO_ADDR,
+ BAM_P_EVNT_GEN_TRSHLD,
+ BAM_P_FIFO_SIZES,
+};
+
+struct reg_offset_data {
+ u32 base_offset;
+ unsigned int pipe_mult, evnt_mult, ee_mult;
+};
+
+static const struct reg_offset_data bam_v1_3_reg_info[] = {
+ [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 },
+ [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 },
+ [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 },
+ [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 },
+ [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 },
+ [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 },
+ [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 },
+ [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 },
+ [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 },
+ [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 },
+ [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 },
+ [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 },
+ [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 },
+ [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 },
+ [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 },
+ [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 },
+ [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 },
+ [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 },
+ [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 },
+};
+
+static const struct reg_offset_data bam_v1_4_reg_info[] = {
+ [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 },
+ [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 },
+ [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 },
+ [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 },
+ [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 },
+ [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 },
+ [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 },
+ [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 },
+ [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
+ [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 },
+ [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 },
+ [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 },
+ [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
+ [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
+ [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
+ [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 },
+};
/* BAM CTRL */
#define BAM_SW_RST BIT(0)
@@ -297,6 +359,8 @@ struct bam_device {
/* execution environment ID, from DT */
u32 ee;
+ const struct reg_offset_data *layout;
+
struct clk *bamclk;
int irq;
@@ -305,6 +369,23 @@ struct bam_device {
};
/**
+ * bam_addr - returns BAM register address
+ * @bdev: bam device
+ * @pipe: pipe instance (ignored when register doesn't have multiple instances)
+ * @reg: register enum
+ */
+static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
+ enum bam_reg reg)
+{
+ const struct reg_offset_data r = bdev->layout[reg];
+
+ return bdev->regs + r.base_offset +
+ r.pipe_mult * pipe +
+ r.evnt_mult * pipe +
+ r.ee_mult * bdev->ee;
+}
+
+/**
* bam_reset_channel - Reset individual BAM DMA channel
* @bchan: bam channel
*
@@ -317,8 +398,8 @@ static void bam_reset_channel(struct bam_chan *bchan)
lockdep_assert_held(&bchan->vc.lock);
/* reset channel */
- writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id));
- writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id));
+ writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
+ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
/* don't allow cpu to reorder BAM register accesses done after this */
wmb();
@@ -347,17 +428,18 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
* because we allocated 1 more descriptor (8 bytes) than we can use
*/
writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
- bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id));
- writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs +
- BAM_P_FIFO_SIZES(bchan->id));
+ bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
+ writel_relaxed(BAM_DESC_FIFO_SIZE,
+ bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
- writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+ writel_relaxed(P_DEFAULT_IRQS_EN,
+ bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
/* unmask the specific pipe and EE combo */
- val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
val |= BIT(bchan->id);
- writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
/* don't allow cpu to reorder the channel enable done below */
wmb();
@@ -367,7 +449,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
if (dir == DMA_DEV_TO_MEM)
val |= P_DIRECTION;
- writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id));
+ writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
bchan->initialized = 1;
@@ -432,12 +514,12 @@ static void bam_free_chan(struct dma_chan *chan)
bchan->fifo_virt = NULL;
/* mask irq for pipe/channel */
- val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
val &= ~BIT(bchan->id);
- writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
/* disable irq */
- writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
}
/**
@@ -583,14 +665,14 @@ static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_PAUSE:
spin_lock_irqsave(&bchan->vc.lock, flag);
- writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id));
+ writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
bchan->paused = 1;
spin_unlock_irqrestore(&bchan->vc.lock, flag);
break;
case DMA_RESUME:
spin_lock_irqsave(&bchan->vc.lock, flag);
- writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id));
+ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
bchan->paused = 0;
spin_unlock_irqrestore(&bchan->vc.lock, flag);
break;
@@ -626,7 +708,7 @@ static u32 process_channel_irqs(struct bam_device *bdev)
unsigned long flags;
struct bam_async_desc *async_desc;
- srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee));
+ srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
/* return early if no pipe/channel interrupts are present */
if (!(srcs & P_IRQ))
@@ -639,11 +721,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
continue;
/* clear pipe irq */
- pipe_stts = readl_relaxed(bdev->regs +
- BAM_P_IRQ_STTS(i));
+ pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
- writel_relaxed(pipe_stts, bdev->regs +
- BAM_P_IRQ_CLR(i));
+ writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
spin_lock_irqsave(&bchan->vc.lock, flags);
async_desc = bchan->curr_txd;
@@ -694,12 +774,12 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
tasklet_schedule(&bdev->task);
if (srcs & BAM_IRQ)
- clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS);
+ clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
/* don't allow reorder of the various accesses to the BAM registers */
mb();
- writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR);
+ writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
return IRQ_HANDLED;
}
@@ -763,7 +843,7 @@ static void bam_apply_new_config(struct bam_chan *bchan,
else
maxburst = bchan->slave.dst_maxburst;
- writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD);
+ writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
bchan->reconfigure = 0;
}
@@ -830,7 +910,7 @@ static void bam_start_dma(struct bam_chan *bchan)
/* ensure descriptor writes and dma start not reordered */
wmb();
writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
- bdev->regs + BAM_P_EVNT_REG(bchan->id));
+ bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
}
/**
@@ -918,43 +998,44 @@ static int bam_init(struct bam_device *bdev)
u32 val;
/* read revision and configuration information */
- val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT;
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
val &= NUM_EES_MASK;
/* check that configured EE is within range */
if (bdev->ee >= val)
return -EINVAL;
- val = readl_relaxed(bdev->regs + BAM_NUM_PIPES);
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
bdev->num_channels = val & BAM_NUM_PIPES_MASK;
/* s/w reset bam */
/* after reset all pipes are disabled and idle */
- val = readl_relaxed(bdev->regs + BAM_CTRL);
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
val |= BAM_SW_RST;
- writel_relaxed(val, bdev->regs + BAM_CTRL);
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
val &= ~BAM_SW_RST;
- writel_relaxed(val, bdev->regs + BAM_CTRL);
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* make sure previous stores are visible before enabling BAM */
wmb();
/* enable bam */
val |= BAM_EN;
- writel_relaxed(val, bdev->regs + BAM_CTRL);
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* set descriptor threshhold, start with 4 bytes */
- writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD);
+ writel_relaxed(DEFAULT_CNT_THRSHLD,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
- writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS);
+ writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
/* enable irqs for errors */
writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
- bdev->regs + BAM_IRQ_EN);
+ bam_addr(bdev, 0, BAM_IRQ_EN));
/* unmask global bam interrupt */
- writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
return 0;
}
@@ -969,9 +1050,18 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
bchan->vc.desc_free = bam_dma_free_desc;
}
+static const struct of_device_id bam_of_match[] = {
+ { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
+ { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, bam_of_match);
+
static int bam_dma_probe(struct platform_device *pdev)
{
struct bam_device *bdev;
+ const struct of_device_id *match;
struct resource *iores;
int ret, i;
@@ -981,6 +1071,14 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->dev = &pdev->dev;
+ match = of_match_node(bam_of_match, pdev->dev.of_node);
+ if (!match) {
+ dev_err(&pdev->dev, "Unsupported BAM module\n");
+ return -ENODEV;
+ }
+
+ bdev->layout = match->data;
+
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(bdev->regs))
@@ -1084,7 +1182,7 @@ static int bam_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&bdev->common);
/* mask all interrupts for this execution environment */
- writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
devm_free_irq(bdev->dev, bdev->irq, bdev);
@@ -1104,18 +1202,11 @@ static int bam_dma_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id bam_of_match[] = {
- { .compatible = "qcom,bam-v1.4.0", },
- {}
-};
-MODULE_DEVICE_TABLE(of, bam_of_match);
-
static struct platform_driver bam_dma_driver = {
.probe = bam_dma_probe,
.remove = bam_dma_remove,
.driver = {
.name = "bam-dma-engine",
- .owner = THIS_MODULE,
.of_match_table = bam_of_match,
},
};
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 7416572d1e40..6941a77521c3 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -1402,7 +1402,6 @@ static int s3c24xx_dma_remove(struct platform_device *pdev)
static struct platform_driver s3c24xx_dma_driver = {
.driver = {
.name = "s3c24xx-dma",
- .owner = THIS_MODULE,
},
.id_table = s3c24xx_dma_driver_ids,
.probe = s3c24xx_dma_probe,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 4b0ef043729a..96bb62c39c41 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -829,7 +829,6 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
{
unsigned i;
- dmadev->chancnt = ARRAY_SIZE(chan_desc);
INIT_LIST_HEAD(&dmadev->channels);
dmadev->dev = dev;
dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
@@ -838,7 +837,7 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
dmadev->device_tx_status = sa11x0_dma_tx_status;
dmadev->device_issue_pending = sa11x0_dma_issue_pending;
- for (i = 0; i < dmadev->chancnt; i++) {
+ for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
struct sa11x0_dma_chan *c;
c = kzalloc(sizeof(*c), GFP_KERNEL);
@@ -1064,7 +1063,6 @@ static const struct dev_pm_ops sa11x0_dma_pm_ops = {
static struct platform_driver sa11x0_dma_driver = {
.driver = {
.name = "sa11x0-dma",
- .owner = THIS_MODULE,
.pm = &sa11x0_dma_pm_ops,
},
.probe = sa11x0_dma_probe,
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
index 80fd2aeb4870..d95bbdd721f4 100644
--- a/drivers/dma/sh/rcar-audmapp.c
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -253,7 +253,6 @@ static int audmapp_chan_probe(struct platform_device *pdev,
static void audmapp_chan_remove(struct audmapp_device *audev)
{
- struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
struct shdma_chan *schan;
int i;
@@ -261,7 +260,6 @@ static void audmapp_chan_remove(struct audmapp_device *audev)
BUG_ON(!schan);
shdma_chan_remove(schan);
}
- dma_dev->chancnt = 0;
}
static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
@@ -367,7 +365,6 @@ static struct platform_driver audmapp_driver = {
.probe = audmapp_probe,
.remove = audmapp_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rcar-audmapp-engine",
.of_match_table = audmapp_of_match,
},
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index b212d9471ab5..20a6f6f2a018 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -619,7 +619,6 @@ error:
static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
{
- struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
struct shdma_chan *schan;
int i;
@@ -628,7 +627,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
shdma_chan_remove(schan);
}
- dma_dev->chancnt = 0;
}
static int hpb_dmae_remove(struct platform_device *pdev)
@@ -655,7 +653,6 @@ static struct platform_driver hpb_dmae_driver = {
.remove = hpb_dmae_remove,
.shutdown = hpb_dmae_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "hpb-dma-engine",
},
};
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 42d497416196..3a2adb131d46 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -391,6 +391,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
pm_runtime_put(schan->dev);
schan->pm_state = SHDMA_PM_ESTABLISHED;
+ } else if (schan->pm_state == SHDMA_PM_PENDING) {
+ shdma_chan_xfer_ld_queue(schan);
}
}
}
@@ -951,7 +953,7 @@ void shdma_chan_probe(struct shdma_dev *sdev,
/* Add the channel to DMA device channel list */
list_add_tail(&schan->dma_chan.device_node,
&sdev->dma_dev.channels);
- sdev->schan[sdev->dma_dev.chancnt++] = schan;
+ sdev->schan[id] = schan;
}
EXPORT_SYMBOL(shdma_chan_probe);
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index b4ff9d3e56d1..f999f9b0d314 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
static struct platform_driver shdma_of = {
.driver = {
- .owner = THIS_MODULE,
.name = "shdma-of",
.of_match_table = shdma_of_match,
},
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 58eb85770eba..aec8a84784a4 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -572,7 +572,6 @@ err_no_irq:
static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
{
- struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
struct shdma_chan *schan;
int i;
@@ -581,7 +580,6 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
shdma_chan_remove(schan);
}
- dma_dev->chancnt = 0;
}
static void sh_dmae_shutdown(struct platform_device *pdev)
@@ -925,7 +923,6 @@ static int sh_dmae_remove(struct platform_device *pdev)
static struct platform_driver sh_dmae_driver = {
.driver = {
- .owner = THIS_MODULE,
.pm = &sh_dmae_pm,
.name = SH_DMAE_DRV_NAME,
.of_match_table = sh_dmae_of_match,
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 3ce103909896..6da2eaa6c294 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -295,7 +295,6 @@ err_no_irq:
static void sudmac_chan_remove(struct sudmac_device *su_dev)
{
- struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
struct shdma_chan *schan;
int i;
@@ -304,7 +303,6 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev)
shdma_chan_remove(schan);
}
- dma_dev->chancnt = 0;
}
static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
@@ -411,7 +409,6 @@ static int sudmac_remove(struct platform_device *pdev)
static struct platform_driver sudmac_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = SUDMAC_DRV_NAME,
},
.probe = sudmac_probe,
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index aac03ab10c54..3492a5f91d31 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -735,7 +735,6 @@ static int sirfsoc_dma_probe(struct platform_device *op)
dma = &sdma->dma;
dma->dev = dev;
- dma->chancnt = SIRFSOC_DMA_CHANNELS;
dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
@@ -752,7 +751,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
- for (i = 0; i < dma->chancnt; i++) {
+ for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
schan = &sdma->channels[i];
schan->chan.device = dma;
@@ -835,6 +834,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
@@ -916,6 +916,7 @@ static int sirfsoc_dma_pm_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
@@ -933,7 +934,6 @@ static struct platform_driver sirfsoc_dma_driver = {
.remove = sirfsoc_dma_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &sirfsoc_dma_pm_ops,
.of_match_table = sirfsoc_dma_match,
},
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index d9ca3e32d748..15d49461c0d2 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3432,6 +3432,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
d40_err(base->dev, "Failed to allocate %d pages.\n",
base->lcla_pool.pages);
+ ret = -ENOMEM;
for (j = 0; j < i; j++)
free_pages(page_list[j], base->lcla_pool.pages);
@@ -3749,7 +3750,6 @@ static const struct of_device_id d40_match[] = {
static struct platform_driver d40_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = D40_NAME,
.pm = &dma40_pm_ops,
.of_match_table = d40_match,
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 91292f5513ff..159f1736a16f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -26,24 +27,6 @@
#include "virt-dma.h"
/*
- * There's 16 physical channels that can work in parallel.
- *
- * However we have 30 different endpoints for our requests.
- *
- * Since the channels are able to handle only an unidirectional
- * transfer, we need to allocate more virtual channels so that
- * everyone can grab one channel.
- *
- * Some devices can't work in both direction (mostly because it
- * wouldn't make sense), so we have a bit fewer virtual channels than
- * 2 channels per endpoints.
- */
-
-#define NR_MAX_CHANNELS 16
-#define NR_MAX_REQUESTS 30
-#define NR_MAX_VCHANS 53
-
-/*
* Common registers
*/
#define DMA_IRQ_EN(x) ((x) * 0x04)
@@ -60,6 +43,12 @@
#define DMA_STAT 0x30
/*
+ * sun8i specific registers
+ */
+#define SUN8I_DMA_GATE 0x20
+#define SUN8I_DMA_GATE_ENABLE 0x4
+
+/*
* Channels specific registers
*/
#define DMA_CHAN_ENABLE 0x00
@@ -102,6 +91,19 @@
#define DRQ_SDRAM 1
/*
+ * Hardware channels / ports representation
+ *
+ * The hardware is used in several SoCs, with differing numbers
+ * of channels and endpoints. This structure ties those numbers
+ * to a certain compatible string.
+ */
+struct sun6i_dma_config {
+ u32 nr_max_channels;
+ u32 nr_max_requests;
+ u32 nr_max_vchans;
+};
+
+/*
* Hardware representation of the LLI
*
* The hardware will be fed the physical address of this structure,
@@ -159,6 +161,7 @@ struct sun6i_dma_dev {
struct dma_pool *pool;
struct sun6i_pchan *pchans;
struct sun6i_vchan *vchans;
+ const struct sun6i_dma_config *cfg;
};
static struct device *chan2dev(struct dma_chan *chan)
@@ -426,6 +429,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
static void sun6i_dma_tasklet(unsigned long data)
{
struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
+ const struct sun6i_dma_config *cfg = sdev->cfg;
struct sun6i_vchan *vchan;
struct sun6i_pchan *pchan;
unsigned int pchan_alloc = 0;
@@ -453,7 +457,7 @@ static void sun6i_dma_tasklet(unsigned long data)
}
spin_lock_irq(&sdev->lock);
- for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
+ for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
pchan = &sdev->pchans[pchan_idx];
if (pchan->vchan || list_empty(&sdev->pending))
@@ -474,7 +478,7 @@ static void sun6i_dma_tasklet(unsigned long data)
}
spin_unlock_irq(&sdev->lock);
- for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
+ for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
if (!(pchan_alloc & BIT(pchan_idx)))
continue;
@@ -496,7 +500,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
int i, j, ret = IRQ_NONE;
u32 status;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) {
status = readl(sdev->base + DMA_IRQ_STAT(i));
if (!status)
continue;
@@ -506,7 +510,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
writel(status, sdev->base + DMA_IRQ_STAT(i));
- for (j = 0; (j < 8) && status; j++) {
+ for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
if (status & DMA_IRQ_QUEUE) {
pchan = sdev->pchans + j;
vchan = pchan->vchan;
@@ -519,7 +523,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
}
}
- status = status >> 4;
+ status = status >> DMA_IRQ_CHAN_WIDTH;
}
if (!atomic_read(&sdev->tasklet_shutdown))
@@ -815,7 +819,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan;
u8 port = dma_spec->args[0];
- if (port > NR_MAX_REQUESTS)
+ if (port > sdev->cfg->nr_max_requests)
return NULL;
chan = dma_get_any_slave_channel(&sdev->slave);
@@ -848,7 +852,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
{
int i;
- for (i = 0; i < NR_MAX_VCHANS; i++) {
+ for (i = 0; i < sdev->cfg->nr_max_vchans; i++) {
struct sun6i_vchan *vchan = &sdev->vchans[i];
list_del(&vchan->vc.chan.device_node);
@@ -856,8 +860,48 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
}
}
+/*
+ * For A31:
+ *
+ * There's 16 physical channels that can work in parallel.
+ *
+ * However we have 30 different endpoints for our requests.
+ *
+ * Since the channels are able to handle only an unidirectional
+ * transfer, we need to allocate more virtual channels so that
+ * everyone can grab one channel.
+ *
+ * Some devices can't work in both direction (mostly because it
+ * wouldn't make sense), so we have a bit fewer virtual channels than
+ * 2 channels per endpoints.
+ */
+
+static struct sun6i_dma_config sun6i_a31_dma_cfg = {
+ .nr_max_channels = 16,
+ .nr_max_requests = 30,
+ .nr_max_vchans = 53,
+};
+
+/*
+ * The A23 only has 8 physical channels, a maximum DRQ port id of 24,
+ * and a total of 37 usable source and destination endpoints.
+ */
+
+static struct sun6i_dma_config sun8i_a23_dma_cfg = {
+ .nr_max_channels = 8,
+ .nr_max_requests = 24,
+ .nr_max_vchans = 37,
+};
+
+static struct of_device_id sun6i_dma_match[] = {
+ { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
+ { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
+ { /* sentinel */ }
+};
+
static int sun6i_dma_probe(struct platform_device *pdev)
{
+ const struct of_device_id *device;
struct sun6i_dma_dev *sdc;
struct resource *res;
int ret, i;
@@ -866,6 +910,11 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc)
return -ENOMEM;
+ device = of_match_device(sun6i_dma_match, &pdev->dev);
+ if (!device)
+ return -ENODEV;
+ sdc->cfg = device->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sdc->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sdc->base))
@@ -912,31 +961,30 @@ static int sun6i_dma_probe(struct platform_device *pdev)
sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
sdc->slave.device_control = sun6i_dma_control;
- sdc->slave.chancnt = NR_MAX_VCHANS;
sdc->slave.copy_align = 4;
sdc->slave.dev = &pdev->dev;
- sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS,
+ sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
sizeof(struct sun6i_pchan), GFP_KERNEL);
if (!sdc->pchans)
return -ENOMEM;
- sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS,
+ sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans,
sizeof(struct sun6i_vchan), GFP_KERNEL);
if (!sdc->vchans)
return -ENOMEM;
tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
- for (i = 0; i < NR_MAX_CHANNELS; i++) {
+ for (i = 0; i < sdc->cfg->nr_max_channels; i++) {
struct sun6i_pchan *pchan = &sdc->pchans[i];
pchan->idx = i;
pchan->base = sdc->base + 0x100 + i * 0x40;
}
- for (i = 0; i < NR_MAX_VCHANS; i++) {
+ for (i = 0; i < sdc->cfg->nr_max_vchans; i++) {
struct sun6i_vchan *vchan = &sdc->vchans[i];
INIT_LIST_HEAD(&vchan->node);
@@ -976,6 +1024,15 @@ static int sun6i_dma_probe(struct platform_device *pdev)
goto err_dma_unregister;
}
+ /*
+ * sun8i variant requires us to toggle a dma gating register,
+ * as seen in Allwinner's SDK. This register is not documented
+ * in the A23 user manual.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun8i-a23-dma"))
+ writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
+
return 0;
err_dma_unregister:
@@ -1008,11 +1065,6 @@ static int sun6i_dma_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id sun6i_dma_match[] = {
- { .compatible = "allwinner,sun6i-a31-dma" },
- { /* sentinel */ }
-};
-
static struct platform_driver sun6i_dma_driver = {
.probe = sun6i_dma_probe,
.remove = sun6i_dma_remove,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 1c867d0303db..d8450c3f35f0 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1597,7 +1597,6 @@ static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
static struct platform_driver tegra_dmac_driver = {
.driver = {
.name = "tegra-apbdma",
- .owner = THIS_MODULE,
.pm = &tegra_dma_dev_pm_ops,
.of_match_table = tegra_dma_of_match,
},
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 4506a7b4f972..2407ccf1a64b 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -783,7 +783,6 @@ static int td_remove(struct platform_device *pdev)
static struct platform_driver td_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = td_probe,
.remove = td_remove,
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 17686caf64d5..0659ec9c4488 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -76,7 +76,7 @@ static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
{
-#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
+#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
channel64_writel(dc, CHAR, 0);
channel64_writel(dc, __pad_CHAR, 0);
#else
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
index f5a760598882..f6517b928bab 100644
--- a/drivers/dma/txx9dmac.h
+++ b/drivers/dma/txx9dmac.h
@@ -67,7 +67,7 @@ static inline bool txx9_dma_have_SMPCHN(void)
/* Hardware register definitions. */
struct txx9dmac_cregs {
-#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
+#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
TXX9_DMA_REG32(CHAR); /* Chain Address Register */
#else
u64 CHAR; /* Chain Address Register */
@@ -201,7 +201,7 @@ static inline bool is_dmac64(const struct txx9dmac_chan *dc)
#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
/* Hardware descriptor definition. (for simple-chain) */
struct txx9dmac_hwdesc {
-#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
+#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
TXX9_DMA_REG32(CHAR);
#else
u64 CHAR;
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index a6e64767186e..4a3a8f3137b3 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -942,6 +942,9 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
if (!xt->numf || !xt->sgl[0].size)
return NULL;
+ if (xt->frame_size != 1)
+ return NULL;
+
/* Allocate a transaction descriptor. */
desc = xilinx_vdma_alloc_tx_descriptor(chan);
if (!desc)
@@ -960,7 +963,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
hw = &segment->hw;
hw->vsize = xt->numf;
hw->hsize = xt->sgl[0].size;
- hw->stride = xt->sgl[0].icg <<
+ hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
hw->stride |= chan->config.frm_dly <<
XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
@@ -971,9 +974,11 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
hw->buf_addr = xt->src_start;
/* Link the previous next descriptor to current */
- prev = list_last_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
+ if (!list_empty(&desc->segments)) {
+ prev = list_last_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ prev->hw.next_desc = segment->phys;
+ }
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index a12c8552f6a6..a9259b069dcd 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -245,7 +245,6 @@ static int cell_edac_remove(struct platform_device *pdev)
static struct platform_driver cell_edac_driver = {
.driver = {
.name = "cbe-mic",
- .owner = THIS_MODULE,
},
.probe = cell_edac_probe,
.remove = cell_edac_remove,
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index a6cd36100663..670d2829c547 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -372,7 +372,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
{
int err, chan;
- if (csrow->nr_channels >= EDAC_NR_CHANNELS)
+ if (csrow->nr_channels > EDAC_NR_CHANNELS)
return -ENODEV;
csrow->dev.type = &csrow_attr_type;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 7d3742edbaa2..ffb1a9a15ccd 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -715,7 +715,6 @@ static struct platform_driver mpc85xx_l2_err_driver = {
.remove = mpc85xx_l2_err_remove,
.driver = {
.name = "mpc85xx_l2_err",
- .owner = THIS_MODULE,
.of_match_table = mpc85xx_l2_err_of_match,
},
};
@@ -1215,7 +1214,6 @@ static struct platform_driver mpc85xx_mc_err_driver = {
.remove = mpc85xx_mc_err_remove,
.driver = {
.name = "mpc85xx_mc_err",
- .owner = THIS_MODULE,
.of_match_table = mpc85xx_mc_err_of_match,
},
};
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 41593539cec4..1b64fd060821 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -204,7 +204,6 @@ static struct platform_driver ppc4xx_edac_driver = {
.probe = ppc4xx_edac_probe,
.remove = ppc4xx_edac_remove,
.driver = {
- .owner = THIS_MODULE,
.name = PPC4XX_EDAC_MODULE_NAME,
.of_match_table = ppc4xx_edac_match,
},
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e9bb1af67c8d..63aa6730e89e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -135,6 +135,7 @@ static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
#define TOLM 0x80
#define TOHM 0x84
+#define HASWELL_TOLM 0xd0
#define HASWELL_TOHM_0 0xd4
#define HASWELL_TOHM_1 0xd8
@@ -261,6 +262,7 @@ enum type {
SANDY_BRIDGE,
IVY_BRIDGE,
HASWELL,
+ BROADWELL,
};
struct sbridge_pvt;
@@ -445,7 +447,7 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
* - each SMI channel interfaces with a scalable memory buffer
* - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
*/
-#define HASWELL_DDRCRCLKCONTROLS 0xa10
+#define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
#define HASWELL_HASYSDEFEATURE2 0x84
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
@@ -497,12 +499,53 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
};
/*
+ * Broadwell support
+ *
+ * DE processor:
+ * - 1 IMC
+ * - 2 DDR3 channels, 2 DPC per channel
+ */
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
+
+static const struct pci_id_descr pci_dev_descr_broadwell[] = {
+ /* first item must be the HA */
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1) },
+};
+
+static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+ {0,} /* 0 terminated list. */
+};
+
+/*
* pci_device_id table for which devices we are looking for
*/
static const struct pci_device_id sbridge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0)},
{0,} /* 0 terminated list. */
};
@@ -706,8 +749,8 @@ static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
{
u32 reg;
- pci_read_config_dword(pvt->info.pci_vtd, TOLM, &reg);
- return (GET_BITFIELD(reg, 26, 31) << 26) | 0x1ffffff;
+ pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
+ return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
}
static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
@@ -767,12 +810,22 @@ static int check_if_ecc_is_active(const u8 bus, enum type type)
struct pci_dev *pdev = NULL;
u32 mcmtr, id;
- if (type == IVY_BRIDGE)
+ switch (type) {
+ case IVY_BRIDGE:
id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
- else if (type == HASWELL)
+ break;
+ case HASWELL:
id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
- else
+ break;
+ case SANDY_BRIDGE:
id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
+ break;
+ case BROADWELL:
+ id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA;
+ break;
+ default:
+ return -ENODEV;
+ }
pdev = get_pdev_same_bus(bus, id);
if (!pdev) {
@@ -800,7 +853,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
enum edac_type mode;
enum mem_type mtype;
- if (pvt->info.type == HASWELL)
+ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL)
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
else
pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
@@ -848,7 +901,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
else
edac_dbg(0, "Memory is unregistered\n");
- if (mtype == MEM_DDR4 || MEM_RDDR4)
+ if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
banks = 16;
else
banks = 8;
@@ -909,7 +962,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
u32 reg;
u64 limit, prv = 0;
u64 tmp_mb;
- u32 mb, kb;
+ u32 gb, mb;
u32 rir_way;
/*
@@ -919,15 +972,17 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
pvt->tolm = pvt->info.get_tolm(pvt);
tmp_mb = (1 + pvt->tolm) >> 20;
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
+ edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
+ gb, (mb*1000)/1024, (u64)pvt->tolm);
/* Address range is already 45:25 */
pvt->tohm = pvt->info.get_tohm(pvt);
tmp_mb = (1 + pvt->tohm) >> 20;
- mb = div_u64_rem(tmp_mb, 1000, &kb);
- edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
+ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
+ gb, (mb*1000)/1024, (u64)pvt->tohm);
/*
* Step 2) Get SAD range and SAD Interleave list
@@ -949,11 +1004,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
break;
tmp_mb = (limit + 1) >> 20;
- mb = div_u64_rem(tmp_mb, 1000, &kb);
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
n_sads,
get_dram_attr(reg),
- mb, kb,
+ gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
reg);
@@ -984,9 +1039,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
break;
tmp_mb = (limit + 1) >> 20;
- mb = div_u64_rem(tmp_mb, 1000, &kb);
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
- n_tads, mb, kb,
+ n_tads, gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
(u32)TAD_SOCK(reg),
(u32)TAD_CH(reg),
@@ -1009,10 +1064,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tad_ch_nilv_offset[j],
&reg);
tmp_mb = TAD_OFFSET(reg) >> 20;
- mb = div_u64_rem(tmp_mb, 1000, &kb);
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
i, j,
- mb, kb,
+ gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
reg);
}
@@ -1034,10 +1089,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = pvt->info.rir_limit(reg) >> 20;
rir_way = 1 << RIR_WAY(reg);
- mb = div_u64_rem(tmp_mb, 1000, &kb);
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
i, j,
- mb, kb,
+ gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
rir_way,
reg);
@@ -1048,10 +1103,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
&reg);
tmp_mb = RIR_OFFSET(reg) << 6;
- mb = div_u64_rem(tmp_mb, 1000, &kb);
+ gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
i, j, k,
- mb, kb,
+ gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
(u32)RIR_RNK_TGT(reg),
reg);
@@ -1089,7 +1144,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
u8 ch_way, sck_way, pkg, sad_ha = 0;
u32 tad_offset;
u32 rir_way;
- u32 mb, kb;
+ u32 mb, gb;
u64 ch_addr, offset, limit = 0, prv = 0;
@@ -1179,7 +1234,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
*socket = sad_interleave[idx];
edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
idx, sad_way, *socket);
- } else if (pvt->info.type == HASWELL) {
+ } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
int bits, a7mode = A7MODE(dram_rule);
if (a7mode) {
@@ -1358,10 +1413,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
continue;
limit = pvt->info.rir_limit(reg);
- mb = div_u64_rem(limit >> 20, 1000, &kb);
+ gb = div_u64_rem(limit >> 20, 1024, &mb);
edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
n_rir,
- mb, kb,
+ gb, (mb*1000)/1024,
limit,
1 << RIR_WAY(reg));
if (ch_addr <= limit)
@@ -1828,6 +1883,82 @@ enodev:
return -ENODEV;
}
+static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
+ struct sbridge_dev *sbridge_dev)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ int i;
+
+ /* there's only one device per system; not tied to any bus */
+ if (pvt->info.pci_vtd == NULL)
+ /* result will be checked later */
+ pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
+ NULL);
+
+ for (i = 0; i < sbridge_dev->n_devs; i++) {
+ pdev = sbridge_dev->pdev[i];
+ if (!pdev)
+ continue;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
+ pvt->pci_sad0 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
+ pvt->pci_sad1 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
+ pvt->pci_ha0 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
+ pvt->pci_ta = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL:
+ pvt->pci_ras = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
+ pvt->pci_tad[0] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
+ pvt->pci_tad[1] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
+ pvt->pci_tad[2] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
+ pvt->pci_tad[3] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
+ pvt->pci_ddrio = pdev;
+ break;
+ default:
+ break;
+ }
+
+ edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
+ sbridge_dev->bus,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev);
+ }
+
+ /* Check if everything were registered */
+ if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
+ !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
+ goto enodev;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (!pvt->pci_tad[i])
+ goto enodev;
+ }
+ return 0;
+
+enodev:
+ sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
+ return -ENODEV;
+}
+
/****************************************************************************
Error check routines
****************************************************************************/
@@ -2240,6 +2371,25 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
if (unlikely(rc < 0))
goto fail0;
break;
+ case BROADWELL:
+ /* rankcfgr isn't used */
+ pvt->info.get_tolm = haswell_get_tolm;
+ pvt->info.get_tohm = haswell_get_tohm;
+ pvt->info.dram_rule = ibridge_dram_rule;
+ pvt->info.get_memory_type = haswell_get_memory_type;
+ pvt->info.get_node_id = haswell_get_node_id;
+ pvt->info.rir_limit = haswell_rir_limit;
+ pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
+ pvt->info.interleave_list = ibridge_interleave_list;
+ pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
+ pvt->info.interleave_pkg = ibridge_interleave_pkg;
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
+
+ /* Store pci devices at mci for faster access */
+ rc = broadwell_mci_bind_devs(mci, sbridge_dev);
+ if (unlikely(rc < 0))
+ goto fail0;
+ break;
}
/* Get dimm basic config and the memory layout */
@@ -2305,6 +2455,10 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table);
type = HASWELL;
break;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
+ rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_broadwell_table);
+ type = BROADWELL;
+ break;
}
if (unlikely(rc < 0))
goto fail0;
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 578f915ee195..71381642ce2a 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -199,7 +199,6 @@ static int tile_edac_mc_remove(struct platform_device *pdev)
static struct platform_driver tile_edac_mc_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = tile_edac_mc_probe,
.remove = tile_edac_mc_remove,
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index d860229e4de1..5d7ab577fba9 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -182,7 +182,6 @@ static struct platform_driver adc_jack_driver = {
.remove = adc_jack_remove,
.driver = {
.name = "adc-jack",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index ba51588cc000..63f01c42aed4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1469,7 +1469,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
static struct platform_driver arizona_extcon_driver = {
.driver = {
.name = "arizona-extcon",
- .owner = THIS_MODULE,
},
.probe = arizona_extcon_probe,
.remove = arizona_extcon_remove,
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 4c2f2c543bb7..043dcd9946c9 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -29,6 +29,7 @@
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/extcon.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/of.h>
@@ -997,13 +998,16 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
return ERR_PTR(-ENODEV);
}
- edev = extcon_get_extcon_dev(node->name);
- if (!edev) {
- dev_err(dev, "unable to get extcon device : %s\n", node->name);
- return ERR_PTR(-ENODEV);
+ mutex_lock(&extcon_dev_list_lock);
+ list_for_each_entry(edev, &extcon_dev_list, entry) {
+ if (edev->dev.parent && edev->dev.parent->of_node == node) {
+ mutex_unlock(&extcon_dev_list_lock);
+ return edev;
+ }
}
+ mutex_unlock(&extcon_dev_list_lock);
- return edev;
+ return ERR_PTR(-EPROBE_DEFER);
}
#else
struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 72f19a37fd01..7af33fc433cd 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -181,7 +181,6 @@ static struct platform_driver gpio_extcon_driver = {
.remove = gpio_extcon_remove,
.driver = {
.name = "extcon-gpio",
- .owner = THIS_MODULE,
.pm = &gpio_extcon_pm_ops,
},
};
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 7309743d0da1..c1bf0cf747b0 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -1,7 +1,7 @@
/*
* extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
*
- * Copyright (C) 2013,2014 Samsung Electrnoics
+ * Copyright (C) 2013,2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <k.kozlowski@samsung.com>
*
@@ -807,7 +807,6 @@ MODULE_DEVICE_TABLE(platform, max14577_muic_id);
static struct platform_driver max14577_muic_driver = {
.driver = {
.name = "max14577-muic",
- .owner = THIS_MODULE,
},
.probe = max14577_muic_probe,
.remove = max14577_muic_remove,
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 490e27475bac..740a14d35072 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -232,7 +232,7 @@ static const char *max77693_extcon_cable[] = {
[EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
[EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
[EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
- [EXTCON_CABLE_JIG_UART_ON] = "Dock-Car",
+ [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
[EXTCON_CABLE_DOCK_SMART] = "Dock-Smart",
[EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
[EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio",
@@ -532,9 +532,6 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
extcon_set_cable_state(info->edev, "Dock-Smart", attached);
extcon_set_cable_state(info->edev, "MHL", attached);
goto out;
- case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
- strcpy(dock_name, "Dock-Car");
- break;
case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
strcpy(dock_name, "Dock-Desk");
break;
@@ -669,6 +666,11 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
strcpy(cable_name, "JIG-UART-OFF");
path = CONTROL1_SW_UART;
break;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */
+ /* PATH:AP_UART */
+ strcpy(cable_name, "JIG-UART-ON");
+ path = CONTROL1_SW_UART;
+ break;
default:
dev_err(info->dev, "failed to detect %s jig cable\n",
attached ? "attached" : "detached");
@@ -708,13 +710,13 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info)
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
/* JIG */
ret = max77693_muic_jig_handler(info, cable_type, attached);
if (ret < 0)
return ret;
break;
case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
- case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
/*
@@ -1301,7 +1303,6 @@ static int max77693_muic_remove(struct platform_device *pdev)
static struct platform_driver max77693_muic_driver = {
.driver = {
.name = DEV_NAME,
- .owner = THIS_MODULE,
},
.probe = max77693_muic_probe,
.remove = max77693_muic_remove,
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 75e501c98005..fc1678fa95c4 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -792,7 +792,6 @@ static int max8997_muic_remove(struct platform_device *pdev)
static struct platform_driver max8997_muic_driver = {
.driver = {
.name = DEV_NAME,
- .owner = THIS_MODULE,
},
.probe = max8997_muic_probe,
.remove = max8997_muic_remove,
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 230e1220ce48..11c6757b6c40 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -291,7 +291,6 @@ static struct platform_driver palmas_usb_driver = {
.driver = {
.name = "palmas-usb",
.of_match_table = of_palmas_match_tbl,
- .owner = THIS_MODULE,
.pm = &palmas_pm_ops,
},
};
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 2c6d5e118ac1..f9e3aee6a211 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -115,6 +115,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
+ * An overlong string is silently truncated such that it and the
+ * zero byte fit into @size.
+ *
* Returns strlen(buf) or a negative error code.
*/
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a66a3217f1d9..aff9018d0658 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -689,8 +689,7 @@ static void ar_context_release(struct ar_context *ctx)
{
unsigned int i;
- if (ctx->buffer)
- vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
+ vunmap(ctx->buffer);
for (i = 0; i < AR_BUFFERS; i++)
if (ctx->pages[i]) {
@@ -1018,8 +1017,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
pages[i] = ctx->pages[i];
for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
pages[AR_BUFFERS + i] = ctx->pages[i];
- ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
- -1, PAGE_KERNEL);
+ ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
if (!ctx->buffer)
goto out_of_memory;
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 7aef911fdc71..64ac8f8f5098 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -174,6 +174,7 @@ struct sbp2_target {
unsigned int mgt_orb_timeout;
unsigned int max_payload;
+ spinlock_t lock;
int dont_block; /* counter for each logical unit */
int blocked; /* ditto */
};
@@ -270,6 +271,7 @@ struct sbp2_orb {
dma_addr_t request_bus;
int rcode;
void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
+ struct sbp2_logical_unit *lu;
struct list_head link;
};
@@ -321,7 +323,6 @@ struct sbp2_command_orb {
u8 command_block[SBP2_MAX_CDB_SIZE];
} request;
struct scsi_cmnd *cmd;
- struct sbp2_logical_unit *lu;
struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
dma_addr_t page_table_bus;
@@ -444,7 +445,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
}
/* Lookup the orb corresponding to this status write. */
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&lu->tgt->lock, flags);
list_for_each_entry(orb, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
STATUS_GET_ORB_LOW(status) == orb->request_bus) {
@@ -453,7 +454,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
break;
}
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&lu->tgt->lock, flags);
if (&orb->link != &lu->orb_list) {
orb->callback(orb, &status);
@@ -480,18 +481,18 @@ static void complete_transaction(struct fw_card *card, int rcode,
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
*/
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&orb->lu->tgt->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
if (orb->rcode != RCODE_COMPLETE) {
list_del(&orb->link);
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
orb->callback(orb, NULL);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
}
kref_put(&orb->kref, free_orb); /* transaction callback reference */
@@ -507,9 +508,10 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
orb_pointer.high = 0;
orb_pointer.low = cpu_to_be32(orb->request_bus);
- spin_lock_irqsave(&device->card->lock, flags);
+ orb->lu = lu;
+ spin_lock_irqsave(&lu->tgt->lock, flags);
list_add_tail(&orb->link, &lu->orb_list);
- spin_unlock_irqrestore(&device->card->lock, flags);
+ spin_unlock_irqrestore(&lu->tgt->lock, flags);
kref_get(&orb->kref); /* transaction callback reference */
kref_get(&orb->kref); /* orb callback reference */
@@ -524,13 +526,12 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
struct fw_device *device = target_parent_device(lu->tgt);
struct sbp2_orb *orb, *next;
struct list_head list;
- unsigned long flags;
int retval = -ENOENT;
INIT_LIST_HEAD(&list);
- spin_lock_irqsave(&device->card->lock, flags);
+ spin_lock_irq(&lu->tgt->lock);
list_splice_init(&lu->orb_list, &list);
- spin_unlock_irqrestore(&device->card->lock, flags);
+ spin_unlock_irq(&lu->tgt->lock);
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
@@ -687,16 +688,11 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
&d, 4, complete_agent_reset_write_no_wait, t);
}
-static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
+static inline void sbp2_allow_block(struct sbp2_target *tgt)
{
- /*
- * We may access dont_block without taking card->lock here:
- * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
- * are currently serialized against each other.
- * And a wrong result in sbp2_conditionally_block()'s access of
- * dont_block is rather harmless, it simply misses its first chance.
- */
- --lu->tgt->dont_block;
+ spin_lock_irq(&tgt->lock);
+ --tgt->dont_block;
+ spin_unlock_irq(&tgt->lock);
}
/*
@@ -705,7 +701,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
* logical units have been finished (indicated by dont_block == 0).
* - lu->generation is stale.
*
- * Note, scsi_block_requests() must be called while holding card->lock,
+ * Note, scsi_block_requests() must be called while holding tgt->lock,
* otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
* unblock the target.
*/
@@ -717,20 +713,20 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irqsave(&tgt->lock, flags);
if (!tgt->dont_block && !lu->blocked &&
lu->generation != card->generation) {
lu->blocked = true;
if (++tgt->blocked == 1)
scsi_block_requests(shost);
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irqrestore(&tgt->lock, flags);
}
/*
* Unblocks lu->tgt as soon as all its logical units can be unblocked.
* Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section. On the other hand, running it inside
+ * tgt->lock protected section. On the other hand, running it inside
* the section might clash with shost->host_lock.
*/
static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
@@ -739,15 +735,14 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
struct fw_card *card = target_parent_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- unsigned long flags;
bool unblock = false;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&tgt->lock);
if (lu->blocked && lu->generation == card->generation) {
lu->blocked = false;
unblock = --tgt->blocked == 0;
}
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&tgt->lock);
if (unblock)
scsi_unblock_requests(shost);
@@ -756,19 +751,17 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
/*
* Prevents future blocking of tgt and unblocks it.
* Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section. On the other hand, running it inside
+ * tgt->lock protected section. On the other hand, running it inside
* the section might clash with shost->host_lock.
*/
static void sbp2_unblock(struct sbp2_target *tgt)
{
- struct fw_card *card = target_parent_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&tgt->lock);
++tgt->dont_block;
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&tgt->lock);
scsi_unblock_requests(shost);
}
@@ -904,7 +897,7 @@ static void sbp2_login(struct work_struct *work)
/* No error during __scsi_add_device() */
lu->has_sdev = true;
scsi_device_put(sdev);
- sbp2_allow_block(lu);
+ sbp2_allow_block(tgt);
return;
@@ -1163,6 +1156,7 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
dev_set_drvdata(&unit->device, tgt);
tgt->unit = unit;
INIT_LIST_HEAD(&tgt->lu_list);
+ spin_lock_init(&tgt->lock);
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
if (fw_device_enable_phys_dma(device) < 0)
@@ -1359,12 +1353,12 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
{
struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base);
- struct fw_device *device = target_parent_device(orb->lu->tgt);
+ struct fw_device *device = target_parent_device(base_orb->lu->tgt);
int result;
if (status != NULL) {
if (STATUS_GET_DEAD(*status))
- sbp2_agent_reset_no_wait(orb->lu);
+ sbp2_agent_reset_no_wait(base_orb->lu);
switch (STATUS_GET_RESPONSE(*status)) {
case SBP2_STATUS_REQUEST_COMPLETE:
@@ -1390,7 +1384,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
* or when sending the write (less likely).
*/
result = DID_BUS_BUSY << 16;
- sbp2_conditionally_block(orb->lu);
+ sbp2_conditionally_block(base_orb->lu);
}
dma_unmap_single(device->card->device, orb->base.request_bus,
@@ -1487,7 +1481,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
/* Initialize rcode to something not RCODE_COMPLETE. */
orb->base.rcode = -1;
kref_init(&orb->base.kref);
- orb->lu = lu;
orb->cmd = cmd;
orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
orb->request.misc = cpu_to_be32(
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 7160c43c59fc..829eec8959f2 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -578,7 +578,6 @@ static int dcdbas_remove(struct platform_device *dev)
static struct platform_driver dcdbas_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = dcdbas_probe,
.remove = dcdbas_remove,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 23dfd5f59b39..633ec216e185 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -112,6 +112,20 @@ config GPIO_MAX730X
comment "Memory mapped GPIO drivers:"
+config GPIO_74XX_MMIO
+ tristate "GPIO driver for 74xx-ICs with MMIO access"
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ help
+ Say yes here to support GPIO functionality for 74xx-compatible ICs
+ with MMIO access. Compatible models include:
+ 1 bit: 741G125 (Input), 741G74 (Output)
+ 2 bits: 742G125 (Input), 7474 (Output)
+ 4 bits: 74125 (Input), 74175 (Output)
+ 6 bits: 74365 (Input), 74174 (Output)
+ 8 bits: 74244 (Input), 74273 (Output)
+ 16 bits: 741624 (Input), 7416374 (Output)
+
config GPIO_CLPS711X
tristate "CLPS711X GPIO support"
depends on ARCH_CLPS711X || COMPILE_TEST
@@ -134,6 +148,8 @@ config GPIO_GENERIC_PLATFORM
config GPIO_DWAPB
tristate "Synopsys DesignWare APB GPIO driver"
+ depends on ARM
+ depends on OF_GPIO
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
help
@@ -333,6 +349,13 @@ config GPIO_TZ1090_PDC
help
Say yes here to support Toumaz Xenif TZ1090 PDC GPIOs.
+config GPIO_VF610
+ def_bool y
+ depends on ARCH_MXC && SOC_VF610
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support Vybrid vf610 GPIOs.
+
config GPIO_XGENE
bool "APM X-Gene GPIO controller support"
depends on ARM64 && OF_GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index e60677b8ccb4..81755f1305e6 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
+obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
@@ -96,6 +97,7 @@ obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_TZ1090) += gpio-tz1090.o
obj-$(CONFIG_GPIO_TZ1090_PDC) += gpio-tz1090-pdc.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
new file mode 100644
index 000000000000..0763655cca6c
--- /dev/null
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -0,0 +1,170 @@
+/*
+ * 74xx MMIO GPIO driver
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/basic_mmio_gpio.h>
+#include <linux/platform_device.h>
+
+#define MMIO_74XX_DIR_IN (0 << 8)
+#define MMIO_74XX_DIR_OUT (1 << 8)
+#define MMIO_74XX_BIT_CNT(x) ((x) & 0xff)
+
+struct mmio_74xx_gpio_priv {
+ struct bgpio_chip bgc;
+ unsigned flags;
+};
+
+static const struct of_device_id mmio_74xx_gpio_ids[] = {
+ {
+ .compatible = "ti,741g125",
+ .data = (const void *)(MMIO_74XX_DIR_IN | 1),
+ },
+ {
+ .compatible = "ti,742g125",
+ .data = (const void *)(MMIO_74XX_DIR_IN | 2),
+ },
+ {
+ .compatible = "ti,74125",
+ .data = (const void *)(MMIO_74XX_DIR_IN | 4),
+ },
+ {
+ .compatible = "ti,74365",
+ .data = (const void *)(MMIO_74XX_DIR_IN | 6),
+ },
+ {
+ .compatible = "ti,74244",
+ .data = (const void *)(MMIO_74XX_DIR_IN | 8),
+ },
+ {
+ .compatible = "ti,741624",
+ .data = (const void *)(MMIO_74XX_DIR_IN | 16),
+ },
+ {
+ .compatible = "ti,741g74",
+ .data = (const void *)(MMIO_74XX_DIR_OUT | 1),
+ },
+ {
+ .compatible = "ti,7474",
+ .data = (const void *)(MMIO_74XX_DIR_OUT | 2),
+ },
+ {
+ .compatible = "ti,74175",
+ .data = (const void *)(MMIO_74XX_DIR_OUT | 4),
+ },
+ {
+ .compatible = "ti,74174",
+ .data = (const void *)(MMIO_74XX_DIR_OUT | 6),
+ },
+ {
+ .compatible = "ti,74273",
+ .data = (const void *)(MMIO_74XX_DIR_OUT | 8),
+ },
+ {
+ .compatible = "ti,7416374",
+ .data = (const void *)(MMIO_74XX_DIR_OUT | 16),
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmio_74xx_gpio_ids);
+
+static inline struct mmio_74xx_gpio_priv *to_74xx_gpio(struct gpio_chip *gc)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return container_of(bgc, struct mmio_74xx_gpio_priv, bgc);
+}
+
+static int mmio_74xx_get_direction(struct gpio_chip *gc, unsigned offset)
+{
+ struct mmio_74xx_gpio_priv *priv = to_74xx_gpio(gc);
+
+ return (priv->flags & MMIO_74XX_DIR_OUT) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
+static int mmio_74xx_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct mmio_74xx_gpio_priv *priv = to_74xx_gpio(gc);
+
+ return (priv->flags & MMIO_74XX_DIR_OUT) ? -ENOTSUPP : 0;
+}
+
+static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct mmio_74xx_gpio_priv *priv = to_74xx_gpio(gc);
+
+ if (priv->flags & MMIO_74XX_DIR_OUT) {
+ gc->set(gc, gpio, val);
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int mmio_74xx_gpio_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
+ struct mmio_74xx_gpio_priv *priv;
+ struct resource *res;
+ void __iomem *dat;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dat = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dat))
+ return PTR_ERR(dat);
+
+ priv->flags = (unsigned)of_id->data;
+
+ err = bgpio_init(&priv->bgc, &pdev->dev,
+ DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
+ dat, NULL, NULL, NULL, NULL, 0);
+ if (err)
+ return err;
+
+ priv->bgc.gc.direction_input = mmio_74xx_dir_in;
+ priv->bgc.gc.direction_output = mmio_74xx_dir_out;
+ priv->bgc.gc.get_direction = mmio_74xx_get_direction;
+ priv->bgc.gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags);
+ priv->bgc.gc.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, priv);
+
+ return gpiochip_add(&priv->bgc.gc);
+}
+
+static int mmio_74xx_gpio_remove(struct platform_device *pdev)
+{
+ struct mmio_74xx_gpio_priv *priv = platform_get_drvdata(pdev);
+
+ return bgpio_remove(&priv->bgc);
+}
+
+static struct platform_driver mmio_74xx_gpio_driver = {
+ .driver = {
+ .name = "74xx-mmio-gpio",
+ .of_match_table = mmio_74xx_gpio_ids,
+ },
+ .probe = mmio_74xx_gpio_probe,
+ .remove = mmio_74xx_gpio_remove,
+};
+module_platform_driver(mmio_74xx_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("74xx MMIO GPIO driver");
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index b08bd169e568..caff711ca5a9 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -177,7 +177,6 @@ static int adp5520_gpio_remove(struct platform_device *pdev)
static struct platform_driver adp5520_gpio_driver = {
.driver = {
.name = "adp5520-gpio",
- .owner = THIS_MODULE,
},
.probe = adp5520_gpio_probe,
.remove = adp5520_gpio_remove,
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 3c09f1a6872a..d3d2d1099f64 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -223,6 +223,7 @@ found:
if (err) {
printk(KERN_ERR "GPIO registering failed (%d)\n",
err);
+ ioport_unmap(gp.pm);
release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
goto out;
}
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index de0801e9767a..b164ce837b43 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -470,7 +470,7 @@ static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq)) {
+ if (gpiochip_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq)) {
dev_err(kona_gpio->gpio_chip.dev,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
@@ -483,7 +483,7 @@ static void bcm_kona_gpio_irq_relres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
- gpio_unlock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
+ gpiochip_unlock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
}
static struct irq_chip bcm_gpio_irq_chip = {
@@ -668,7 +668,6 @@ err_irq_domain:
static struct platform_driver bcm_kona_gpio_driver = {
.driver = {
.name = "bcm-kona-gpio",
- .owner = THIS_MODULE,
.of_match_table = bcm_kona_gpio_of_match,
},
.probe = bcm_kona_gpio_probe,
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index e1e861239e95..b6908f1ff1ab 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -87,7 +87,6 @@ MODULE_DEVICE_TABLE(of, clps711x_gpio_ids);
static struct platform_driver clps711x_gpio_driver = {
.driver = {
.name = "clps711x-gpio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(clps711x_gpio_ids),
},
.probe = clps711x_gpio_probe,
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index bbfe7f508502..55d4803d71b0 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -379,7 +379,6 @@ static struct platform_driver crystalcove_gpio_driver = {
.remove = crystalcove_gpio_remove,
.driver = {
.name = "crystal_cove_gpio",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 668127fe90ef..7b0b198a563d 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -322,7 +322,8 @@ static int cs5535_gpio_probe(struct platform_device *pdev)
goto done;
}
- if (!request_region(res->start, resource_size(res), pdev->name)) {
+ if (!devm_request_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
dev_err(&pdev->dev, "can't request region\n");
goto done;
}
@@ -348,31 +349,24 @@ static int cs5535_gpio_probe(struct platform_device *pdev)
/* finally, register with the generic GPIO API */
err = gpiochip_add(&cs5535_gpio_chip.chip);
if (err)
- goto release_region;
+ goto done;
return 0;
-release_region:
- release_region(res->start, resource_size(res));
done:
return err;
}
static int cs5535_gpio_remove(struct platform_device *pdev)
{
- struct resource *r;
-
gpiochip_remove(&cs5535_gpio_chip.chip);
- r = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(r->start, resource_size(r));
return 0;
}
static struct platform_driver cs5535_gpio_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = cs5535_gpio_probe,
.remove = cs5535_gpio_remove,
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index c5bccd4dec96..389a4d2a4926 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -246,7 +246,6 @@ static struct platform_driver da9052_gpio_driver = {
.remove = da9052_gpio_remove,
.driver = {
.name = "da9052-gpio",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 9167c4331081..b8d757036887 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -183,7 +183,6 @@ static struct platform_driver da9055_gpio_driver = {
.remove = da9055_gpio_remove,
.driver = {
.name = "da9055-gpio",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 9f0682534e2f..c5e05c82d67c 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -234,11 +234,6 @@ static int davinci_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "Invalid memory resource\n");
- return -EBUSY;
- }
-
gpio_base = devm_ioremap_resource(dev, res);
if (IS_ERR(gpio_base))
return PTR_ERR(gpio_base);
@@ -619,7 +614,6 @@ static struct platform_driver davinci_gpio_driver = {
.probe = davinci_gpio_probe,
.driver = {
.name = "davinci_gpio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(davinci_gpio_ids),
},
};
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index b43cd84b61f1..b4eb6a657d34 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -194,7 +194,7 @@ static int dwapb_irq_reqres(struct irq_data *d)
struct dwapb_gpio *gpio = igc->private;
struct bgpio_chip *bgc = &gpio->ports[0].bgc;
- if (gpio_lock_as_irq(&bgc->gc, irqd_to_hwirq(d))) {
+ if (gpiochip_lock_as_irq(&bgc->gc, irqd_to_hwirq(d))) {
dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
return -EINVAL;
@@ -208,7 +208,7 @@ static void dwapb_irq_relres(struct irq_data *d)
struct dwapb_gpio *gpio = igc->private;
struct bgpio_chip *bgc = &gpio->ports[0].bgc;
- gpio_unlock_as_irq(&bgc->gc, irqd_to_hwirq(d));
+ gpiochip_unlock_as_irq(&bgc->gc, irqd_to_hwirq(d));
}
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
@@ -703,7 +703,6 @@ static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend,
static struct platform_driver dwapb_gpio_driver = {
.driver = {
.name = "gpio-dwapb",
- .owner = THIS_MODULE,
.pm = &dwapb_gpio_pm_ops,
.of_match_table = of_match_ptr(dwapb_of_match),
},
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index fe49ec3cdb7d..3cfcfc620c8e 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -103,7 +103,7 @@ static int em_gio_irq_reqres(struct irq_data *d)
{
struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d))) {
+ if (gpiochip_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d))) {
dev_err(p->gpio_chip.dev,
"unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
@@ -116,7 +116,7 @@ static void em_gio_irq_relres(struct irq_data *d)
{
struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
- gpio_unlock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
+ gpiochip_unlock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
}
@@ -330,12 +330,7 @@ static int em_gio_probe(struct platform_device *pdev)
goto err0;
}
- ret = of_alias_get_id(pdev->dev.of_node, "gpio");
- if (ret < 0) {
- dev_err(&pdev->dev, "Couldn't get OF id\n");
- goto err0;
- }
- pdata->gpio_base = ret * 32; /* 32 GPIOs per instance */
+ pdata->gpio_base = -1;
}
gpio_chip = &p->gpio_chip;
@@ -428,7 +423,6 @@ static struct platform_driver em_gio_device_driver = {
.driver = {
.name = "em_gio",
.of_match_table = em_gio_dt_ids,
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index dcc2bb4074ef..45684f36ddb1 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -374,7 +374,6 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
static struct platform_driver ep93xx_gpio_driver = {
.driver = {
.name = "gpio-ep93xx",
- .owner = THIS_MODULE,
},
.probe = ep93xx_gpio_probe,
};
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index fd3202f968ff..1be291ac6319 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -417,7 +417,6 @@ err:
static struct platform_driver f7188x_gpio_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = f7188x_gpio_probe,
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 1237a73c3c91..aea5c2a53cc0 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -120,7 +120,6 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
static struct platform_driver gef_gpio_driver = {
.driver = {
.name = "gef-gpio",
- .owner = THIS_MODULE,
.of_match_table = gef_gpio_ids,
},
};
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 66ad3df9d9cf..09daaf2aeb56 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -441,6 +441,7 @@ static int grgpio_probe(struct platform_device *ofdev)
err = gpiochip_add(gc);
if (err) {
dev_err(&ofdev->dev, "Could not add gpiochip\n");
+ irq_domain_remove(priv->domain);
return err;
}
@@ -490,7 +491,6 @@ MODULE_DEVICE_TABLE(of, grgpio_match);
static struct platform_driver grgpio_driver = {
.driver = {
.name = "grgpio",
- .owner = THIS_MODULE,
.of_match_table = grgpio_match,
},
.probe = grgpio_probe,
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 3784e81e7762..7818cd1453ae 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -526,7 +526,6 @@ static int ichx_gpio_remove(struct platform_device *pdev)
static struct platform_driver ichx_gpio_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
.probe = ichx_gpio_probe,
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 0a5e9d3f308c..2ed0237a8baf 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -120,7 +120,6 @@ static int iop3xx_gpio_probe(struct platform_device *pdev)
static struct platform_driver iop3xx_gpio_driver = {
.driver = {
.name = "gpio-iop",
- .owner = THIS_MODULE,
},
.probe = iop3xx_gpio_probe,
};
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 29ffe22ad97a..3a1664335f5e 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -203,7 +203,6 @@ static int ttl_remove(struct platform_device *pdev)
static struct platform_driver ttl_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ttl_probe,
.remove = ttl_remove,
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index fd150adeebf9..443518f63f15 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -206,7 +206,6 @@ static int kempld_gpio_remove(struct platform_device *pdev)
static struct platform_driver kempld_gpio_driver = {
.driver = {
.name = "kempld-gpio",
- .owner = THIS_MODULE,
},
.probe = kempld_gpio_probe,
.remove = kempld_gpio_remove,
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 6bbdad805b78..cfc5b12b43ad 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -231,7 +231,6 @@ static struct platform_driver lp3943_gpio_driver = {
.remove = lp3943_gpio_remove,
.driver = {
.name = "lp3943-gpio",
- .owner = THIS_MODULE,
.of_match_table = lp3943_gpio_of_match,
},
};
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index b9b9799b368b..47e2dde63734 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -569,7 +569,6 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
static struct platform_driver lpc32xx_gpio_driver = {
.driver = {
.name = "lpc32xx-gpio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(lpc32xx_gpio_of_match),
},
.probe = lpc32xx_gpio_probe,
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index fa945ec9ccff..127c755b38dc 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -450,7 +450,6 @@ static struct platform_driver lp_gpio_driver = {
.remove = lp_gpio_remove,
.driver = {
.name = "lp_gpio",
- .owner = THIS_MODULE,
.pm = &lp_gpio_pm_ops,
.acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
},
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 8488e2fd307c..da9c316059bc 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -65,6 +65,7 @@ struct mcp23s08_ops {
struct mcp23s08 {
u8 addr;
+ bool irq_active_high;
u16 cache[11];
u16 irq_rise;
@@ -444,7 +445,7 @@ static int mcp23s08_irq_reqres(struct irq_data *data)
{
struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
- if (gpio_lock_as_irq(&mcp->chip, data->hwirq)) {
+ if (gpiochip_lock_as_irq(&mcp->chip, data->hwirq)) {
dev_err(mcp->chip.dev,
"unable to lock HW IRQ %lu for IRQ usage\n",
data->hwirq);
@@ -458,7 +459,7 @@ static void mcp23s08_irq_relres(struct irq_data *data)
{
struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
- gpio_unlock_as_irq(&mcp->chip, data->hwirq);
+ gpiochip_unlock_as_irq(&mcp->chip, data->hwirq);
}
static struct irq_chip mcp23s08_irq_chip = {
@@ -476,6 +477,7 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
{
struct gpio_chip *chip = &mcp->chip;
int err, irq, j;
+ unsigned long irqflags = IRQF_ONESHOT | IRQF_SHARED;
mutex_init(&mcp->irq_lock);
@@ -484,9 +486,13 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
if (!mcp->irq_domain)
return -ENODEV;
+ if (mcp->irq_active_high)
+ irqflags |= IRQF_TRIGGER_HIGH;
+ else
+ irqflags |= IRQF_TRIGGER_LOW;
+
err = devm_request_threaded_irq(chip->dev, mcp->irq, NULL, mcp23s08_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- dev_name(chip->dev), mcp);
+ irqflags, dev_name(chip->dev), mcp);
if (err != 0) {
dev_err(chip->dev, "unable to request IRQ#%d: %d\n",
mcp->irq, err);
@@ -514,8 +520,6 @@ static void mcp23s08_irq_teardown(struct mcp23s08 *mcp)
{
unsigned int irq, i;
- free_irq(mcp->irq, mcp);
-
for (i = 0; i < mcp->chip.ngpio; i++) {
irq = irq_find_mapping(mcp->irq_domain, i);
if (irq > 0)
@@ -590,6 +594,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->data = data;
mcp->addr = addr;
+ mcp->irq_active_high = false;
mcp->chip.direction_input = mcp23s08_direction_input;
mcp->chip.get = mcp23s08_get;
@@ -649,14 +654,25 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
mcp->irq_controller = pdata->irq_controller;
- if (mcp->irq && mcp->irq_controller && (type == MCP_TYPE_017))
- mirror = pdata->mirror;
+ if (mcp->irq && mcp->irq_controller) {
+ mcp->irq_active_high =
+ of_property_read_bool(mcp->chip.dev->of_node,
+ "microchip,irq-active-high");
- if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN) || mirror) {
+ if (type == MCP_TYPE_017)
+ mirror = pdata->mirror;
+ }
+
+ if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN) || mirror ||
+ mcp->irq_active_high) {
/* mcp23s17 has IOCON twice, make sure they are in sync */
status &= ~(IOCON_SEQOP | (IOCON_SEQOP << 8));
status |= IOCON_HAEN | (IOCON_HAEN << 8);
- status &= ~(IOCON_INTPOL | (IOCON_INTPOL << 8));
+ if (mcp->irq_active_high)
+ status |= IOCON_INTPOL | (IOCON_INTPOL << 8);
+ else
+ status &= ~(IOCON_INTPOL | (IOCON_INTPOL << 8));
+
if (mirror)
status |= IOCON_MIRROR | (IOCON_MIRROR << 8);
@@ -936,11 +952,14 @@ static int mcp23s08_probe(struct spi_device *spi)
return -ENOMEM;
spi_set_drvdata(spi, data);
+ spi->irq = irq_of_parse_and_map(spi->dev.of_node, 0);
+
for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
if (!(spi_present_mask & (1 << addr)))
continue;
chips--;
data->mcp[addr] = &data->chip[chips];
+ data->mcp[addr]->irq = spi->irq;
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
0x40 | (addr << 1), type, pdata,
addr);
@@ -981,6 +1000,8 @@ static int mcp23s08_remove(struct spi_device *spi)
if (!data->mcp[addr])
continue;
+ if (spi->irq && data->mcp[addr]->irq_controller)
+ mcp23s08_irq_teardown(data->mcp[addr]);
gpiochip_remove(&data->mcp[addr]->chip);
}
kfree(data);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index 2983dfbd0668..f228b1ce0ce0 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -145,7 +145,6 @@ static struct platform_driver ltq_mm_driver = {
.probe = ltq_mm_probe,
.driver = {
.name = "gpio-mm-ltq",
- .owner = THIS_MODULE,
.of_match_table = ltq_mm_match,
},
};
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index 4661e181be04..31e2551ed903 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -142,7 +142,6 @@ static const struct of_device_id moxart_gpio_match[] = {
static struct platform_driver moxart_gpio_driver = {
.driver = {
.name = "moxart-gpio",
- .owner = THIS_MODULE,
.of_match_table = moxart_gpio_match,
},
.probe = moxart_gpio_probe,
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 42647f26c9e0..8ce6c9510035 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -192,7 +192,6 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.driver = {
.name = "mpc5200-gpio-wkup",
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_wkup_gpiochip_match,
},
.probe = mpc52xx_wkup_gpiochip_probe,
@@ -347,7 +346,6 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.driver = {
.name = "mpc5200-gpio",
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_simple_gpiochip_match,
},
.probe = mpc52xx_simple_gpiochip_probe,
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index d7d6d72eba33..d1ff879e6ff2 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -105,6 +105,32 @@ static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
+static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+ for (i = 0; i < gc->ngpio; i++) {
+ if (*mask == 0)
+ break;
+ if (__test_and_clear_bit(i, mask)) {
+ if (test_bit(i, bits))
+ mpc8xxx_gc->data |= mpc8xxx_gpio2mask(i);
+ else
+ mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(i);
+ }
+ }
+
+ out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
+
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -344,6 +370,7 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
+ gc->set_multiple = mpc8xxx_gpio_set_multiple;
gc->to_irq = mpc8xxx_gpio_to_irq;
ret = of_mm_gpiochip_add(np, mm_gc);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 8f70ded82a2b..01acf0a8cdb1 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -321,7 +321,6 @@ err:
static struct platform_driver platform_msic_gpio_driver = {
.driver = {
.name = "msic_gpio",
- .owner = THIS_MODULE,
},
.probe = platform_msic_gpio_probe,
};
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
index 73b73969d361..edf285e26667 100644
--- a/drivers/gpio/gpio-msm-v1.c
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -686,7 +686,7 @@ static int gpio_msm_v1_probe(struct platform_device *pdev)
irq_set_chained_handler(irq1, msm_gpio_irq_handler);
irq_set_chained_handler(irq2, msm_gpio_irq_handler);
irq_set_irq_wake(irq1, 1);
- irq_set_irq_wake(irq2, 2);
+ irq_set_irq_wake(irq2, 1);
return 0;
}
@@ -701,7 +701,6 @@ MODULE_DEVICE_TABLE(platform, gpio_msm_v1_device_ids);
static struct platform_driver gpio_msm_v1_driver = {
.driver = {
.name = "gpio-msm-v1",
- .owner = THIS_MODULE,
},
.probe = gpio_msm_v1_probe,
.id_table = gpio_msm_v1_device_ids,
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 94f57670df9a..52ff18229fdc 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -450,7 +450,6 @@ static struct platform_driver msm_gpio_driver = {
.remove = msm_gpio_remove,
.driver = {
.name = "msmgpio",
- .owner = THIS_MODULE,
.of_match_table = msm_gpio_of_match,
},
};
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 418e38650363..7bc3e9b288f3 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -83,6 +83,14 @@ struct mvebu_gpio_chip {
int irqbase;
struct irq_domain *domain;
int soc_variant;
+
+ /* Used to preserve GPIO registers accross suspend/resume */
+ u32 out_reg;
+ u32 io_conf_reg;
+ u32 blink_en_reg;
+ u32 in_pol_reg;
+ u32 edge_mask_regs[4];
+ u32 level_mask_regs[4];
};
/*
@@ -554,6 +562,93 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
+static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mvebu_gpio_chip *mvchip = platform_get_drvdata(pdev);
+ int i;
+
+ mvchip->out_reg = readl(mvebu_gpioreg_out(mvchip));
+ mvchip->io_conf_reg = readl(mvebu_gpioreg_io_conf(mvchip));
+ mvchip->blink_en_reg = readl(mvebu_gpioreg_blink(mvchip));
+ mvchip->in_pol_reg = readl(mvebu_gpioreg_in_pol(mvchip));
+
+ switch (mvchip->soc_variant) {
+ case MVEBU_GPIO_SOC_VARIANT_ORION:
+ mvchip->edge_mask_regs[0] =
+ readl(mvchip->membase + GPIO_EDGE_MASK_OFF);
+ mvchip->level_mask_regs[0] =
+ readl(mvchip->membase + GPIO_LEVEL_MASK_OFF);
+ break;
+ case MVEBU_GPIO_SOC_VARIANT_MV78200:
+ for (i = 0; i < 2; i++) {
+ mvchip->edge_mask_regs[i] =
+ readl(mvchip->membase +
+ GPIO_EDGE_MASK_MV78200_OFF(i));
+ mvchip->level_mask_regs[i] =
+ readl(mvchip->membase +
+ GPIO_LEVEL_MASK_MV78200_OFF(i));
+ }
+ break;
+ case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+ for (i = 0; i < 4; i++) {
+ mvchip->edge_mask_regs[i] =
+ readl(mvchip->membase +
+ GPIO_EDGE_MASK_ARMADAXP_OFF(i));
+ mvchip->level_mask_regs[i] =
+ readl(mvchip->membase +
+ GPIO_LEVEL_MASK_ARMADAXP_OFF(i));
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static int mvebu_gpio_resume(struct platform_device *pdev)
+{
+ struct mvebu_gpio_chip *mvchip = platform_get_drvdata(pdev);
+ int i;
+
+ writel(mvchip->out_reg, mvebu_gpioreg_out(mvchip));
+ writel(mvchip->io_conf_reg, mvebu_gpioreg_io_conf(mvchip));
+ writel(mvchip->blink_en_reg, mvebu_gpioreg_blink(mvchip));
+ writel(mvchip->in_pol_reg, mvebu_gpioreg_in_pol(mvchip));
+
+ switch (mvchip->soc_variant) {
+ case MVEBU_GPIO_SOC_VARIANT_ORION:
+ writel(mvchip->edge_mask_regs[0],
+ mvchip->membase + GPIO_EDGE_MASK_OFF);
+ writel(mvchip->level_mask_regs[0],
+ mvchip->membase + GPIO_LEVEL_MASK_OFF);
+ break;
+ case MVEBU_GPIO_SOC_VARIANT_MV78200:
+ for (i = 0; i < 2; i++) {
+ writel(mvchip->edge_mask_regs[i],
+ mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(i));
+ writel(mvchip->level_mask_regs[i],
+ mvchip->membase +
+ GPIO_LEVEL_MASK_MV78200_OFF(i));
+ }
+ break;
+ case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+ for (i = 0; i < 4; i++) {
+ writel(mvchip->edge_mask_regs[i],
+ mvchip->membase +
+ GPIO_EDGE_MASK_ARMADAXP_OFF(i));
+ writel(mvchip->level_mask_regs[i],
+ mvchip->membase +
+ GPIO_LEVEL_MASK_ARMADAXP_OFF(i));
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
static int mvebu_gpio_probe(struct platform_device *pdev)
{
struct mvebu_gpio_chip *mvchip;
@@ -577,6 +672,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (!mvchip)
return -ENOMEM;
+ platform_set_drvdata(pdev, mvchip);
+
if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
dev_err(&pdev->dev, "Missing ngpios OF property\n");
return -ENODEV;
@@ -731,9 +828,10 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
static struct platform_driver mvebu_gpio_driver = {
.driver = {
.name = "mvebu-gpio",
- .owner = THIS_MODULE,
.of_match_table = mvebu_gpio_of_match,
},
.probe = mvebu_gpio_probe,
+ .suspend = mvebu_gpio_suspend,
+ .resume = mvebu_gpio_resume,
};
module_platform_driver(mvebu_gpio_driver);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index f4e54a92e04a..9f7446a7ac64 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -496,7 +496,6 @@ out_bgio:
static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
- .owner = THIS_MODULE,
.of_match_table = mxc_gpio_dt_ids,
},
.probe = mxc_gpio_probe,
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 8ffdd7d2bade..84cbda6acdda 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -227,6 +227,18 @@ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return irq_find_mapping(port->domain, offset);
}
+static int mxs_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+ struct mxs_gpio_port *port =
+ container_of(bgc, struct mxs_gpio_port, bgc);
+ u32 mask = 1 << offset;
+ u32 dir;
+
+ dir = readl(port->base + PINCTRL_DOE(port));
+ return !(dir & mask);
+}
+
static struct platform_device_id mxs_gpio_ids[] = {
{
.name = "imx23-gpio",
@@ -320,6 +332,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
goto out_irqdesc_free;
port->bgc.gc.to_irq = mxs_gpio_to_irq;
+ port->bgc.gc.get_direction = mxs_gpio_get_direction;
port->bgc.gc.base = port->id * 32;
err = gpiochip_add(&port->bgc.gc);
@@ -338,7 +351,6 @@ out_irqdesc_free:
static struct platform_driver mxs_gpio_driver = {
.driver = {
.name = "gpio-mxs",
- .owner = THIS_MODULE,
.of_match_table = mxs_gpio_dt_ids,
},
.probe = mxs_gpio_probe,
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 5c5770c99c80..62ae251d4490 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -144,7 +144,6 @@ MODULE_DEVICE_TABLE(of, octeon_gpio_match);
static struct platform_driver octeon_gpio_driver = {
.driver = {
.name = "octeon_gpio",
- .owner = THIS_MODULE,
.of_match_table = octeon_gpio_match,
},
.probe = octeon_gpio_probe,
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 3d6b445665ad..30646cfe0efa 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -800,7 +800,7 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
unsigned offset = GPIO_INDEX(bank, gpio);
spin_lock_irqsave(&bank->lock, flags);
- gpio_unlock_as_irq(&bank->chip, offset);
+ gpiochip_unlock_as_irq(&bank->chip, offset);
bank->irq_usage &= ~(BIT(offset));
omap_disable_gpio_module(bank, offset);
omap_reset_gpio(bank, gpio);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 84b49cfb81a8..04756130437f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -52,28 +52,34 @@ struct pl061_gpio {
void __iomem *base;
struct gpio_chip gc;
+ bool uses_pinctrl;
#ifdef CONFIG_PM
struct pl061_context_save_regs csave_regs;
#endif
};
-static int pl061_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int pl061_gpio_request(struct gpio_chip *gc, unsigned offset)
{
/*
* Map back to global GPIO space and request muxing, the direction
* parameter does not matter for this controller.
*/
- int gpio = chip->base + offset;
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+ int gpio = gc->base + offset;
- return pinctrl_request_gpio(gpio);
+ if (chip->uses_pinctrl)
+ return pinctrl_request_gpio(gpio);
+ return 0;
}
-static void pl061_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void pl061_gpio_free(struct gpio_chip *gc, unsigned offset)
{
- int gpio = chip->base + offset;
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+ int gpio = gc->base + offset;
- pinctrl_free_gpio(gpio);
+ if (chip->uses_pinctrl)
+ pinctrl_free_gpio(gpio);
}
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -263,6 +269,8 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(chip->base);
spin_lock_init(&chip->lock);
+ if (of_property_read_bool(dev->of_node, "gpio-ranges"))
+ chip->uses_pinctrl = true;
chip->gc.request = pl061_gpio_request;
chip->gc.free = pl061_gpio_free;
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 769233d2da6d..6eabf239676b 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -155,7 +155,6 @@ static int rc5t583_gpio_remove(struct platform_device *pdev)
static struct platform_driver rc5t583_gpio_driver = {
.driver = {
.name = "rc5t583-gpio",
- .owner = THIS_MODULE,
},
.probe = rc5t583_gpio_probe,
.remove = rc5t583_gpio_remove,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index bf6c09450fee..584484e3f1e3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -1,6 +1,7 @@
/*
* Renesas R-Car GPIO Support
*
+ * Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2013 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
@@ -291,22 +292,30 @@ struct gpio_rcar_info {
bool has_both_edge_trigger;
};
+static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
+ .has_both_edge_trigger = false,
+};
+
+static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
+ .has_both_edge_trigger = true,
+};
+
static const struct of_device_id gpio_rcar_of_table[] = {
{
.compatible = "renesas,gpio-r8a7790",
- .data = (void *)&(const struct gpio_rcar_info) {
- .has_both_edge_trigger = true,
- },
+ .data = &gpio_rcar_info_gen2,
}, {
.compatible = "renesas,gpio-r8a7791",
- .data = (void *)&(const struct gpio_rcar_info) {
- .has_both_edge_trigger = true,
- },
+ .data = &gpio_rcar_info_gen2,
+ }, {
+ .compatible = "renesas,gpio-r8a7793",
+ .data = &gpio_rcar_info_gen2,
+ }, {
+ .compatible = "renesas,gpio-r8a7794",
+ .data = &gpio_rcar_info_gen2,
}, {
.compatible = "renesas,gpio-rcar",
- .data = (void *)&(const struct gpio_rcar_info) {
- .has_both_edge_trigger = false,
- },
+ .data = &gpio_rcar_info_gen1,
}, {
/* Terminator */
},
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 99720c8bc8ed..0a0cf1307d2f 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -250,7 +250,6 @@ static int sch_gpio_remove(struct platform_device *pdev)
static struct platform_driver sch_gpio_driver = {
.driver = {
.name = "sch_gpio",
- .owner = THIS_MODULE,
},
.probe = sch_gpio_probe,
.remove = sch_gpio_remove,
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 353263c85d26..69ffca5b073b 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -191,7 +191,6 @@ MODULE_DEVICE_TABLE(of, spics_gpio_of_match);
static struct platform_driver spics_gpio_driver = {
.probe = spics_gpio_probe,
.driver = {
- .owner = THIS_MODULE,
.name = "spear-spics-gpio",
.of_match_table = spics_gpio_of_match,
},
@@ -204,5 +203,5 @@ static int __init spics_gpio_init(void)
subsys_initcall(spics_gpio_init);
MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
-MODULE_DESCRIPTION("ST Microlectronics SPEAr SPI Chip Select Abstraction");
+MODULE_DESCRIPTION("STMicroelectronics SPEAr SPI Chip Select Abstraction");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 68e3fcb1acea..18579ac65b2b 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -429,7 +429,6 @@ err_free_descs:
static struct platform_driver sta2x11_gpio_platform_driver = {
.driver = {
.name = "sta2x11-gpio",
- .owner = THIS_MODULE,
},
.probe = gsta_probe,
};
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 7e359b7cce1b..202361eb7279 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -199,21 +199,17 @@ static int xway_stp_hw_init(struct xway_stp *chip)
static int xway_stp_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *res;
const __be32 *shadow, *groups, *dsl, *phy;
struct xway_stp *chip;
struct clk *clk;
int ret = 0;
- if (!res) {
- dev_err(&pdev->dev, "failed to request STP resource\n");
- return -ENOENT;
- }
-
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
chip->virt = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(chip->virt))
return PTR_ERR(chip->virt);
@@ -287,7 +283,6 @@ static struct platform_driver xway_stp_driver = {
.probe = xway_stp_probe,
.driver = {
.name = "gpio-stp-xway",
- .owner = THIS_MODULE,
.of_match_table = xway_stp_match,
},
};
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index e82fde4b6898..257e2989215c 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -253,7 +253,6 @@ static int syscon_gpio_remove(struct platform_device *pdev)
static struct platform_driver syscon_gpio_driver = {
.driver = {
.name = "gpio-syscon",
- .owner = THIS_MODULE,
.of_match_table = syscon_gpio_ids,
},
.probe = syscon_gpio_probe,
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 9e615be8032c..62ab9f4b2cd3 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -195,18 +195,13 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (of_property_read_u32(dn, "abilis,ngpio", &ngpio))
return -EINVAL;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "No memory resource defined.\n");
- return -EINVAL;
- }
-
tb10x_gpio = devm_kzalloc(&pdev->dev, sizeof(*tb10x_gpio), GFP_KERNEL);
if (tb10x_gpio == NULL)
return -ENOMEM;
spin_lock_init(&tb10x_gpio->spinlock);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tb10x_gpio->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
@@ -316,7 +311,6 @@ static struct platform_driver tb10x_gpio_driver = {
.driver = {
.name = "tb10x-gpio",
.of_match_table = tb10x_gpio_dt_ids,
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 4e8fb8261a87..1741981d53c8 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -233,7 +233,7 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- ret = gpio_lock_as_irq(&tegra_gpio_chip, gpio);
+ ret = gpiochip_lock_as_irq(&tegra_gpio_chip, gpio);
if (ret) {
dev_err(dev, "unable to lock Tegra GPIO %d as IRQ\n", gpio);
return ret;
@@ -263,7 +263,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
{
int gpio = d->hwirq;
- gpio_unlock_as_irq(&tegra_gpio_chip, gpio);
+ gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio);
}
static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -528,7 +528,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
static struct platform_driver tegra_gpio_driver = {
.driver = {
.name = "tegra-gpio",
- .owner = THIS_MODULE,
.pm = &tegra_gpio_pm_ops,
.of_match_table = tegra_gpio_of_match,
},
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index a685a3cbbc81..e8f97e03c9bb 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -330,7 +330,6 @@ static int timbgpio_remove(struct platform_device *pdev)
static struct platform_driver timbgpio_platform_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = timbgpio_probe,
.remove = timbgpio_remove,
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 22052d84c63b..472fb5b8779f 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -124,7 +124,6 @@ static int tps65912_gpio_remove(struct platform_device *pdev)
static struct platform_driver tps65912_gpio_driver = {
.driver = {
.name = "tps65912-gpio",
- .owner = THIS_MODULE,
},
.probe = tps65912_gpio_probe,
.remove = tps65912_gpio_remove,
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index de18591ff11e..92fbabd82879 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -452,7 +452,6 @@ MODULE_DEVICE_TABLE(platform, ts5500_dio_ids);
static struct platform_driver ts5500_dio_driver = {
.driver = {
.name = "ts5500-dio",
- .owner = THIS_MODULE,
},
.probe = ts5500_dio_probe,
.remove = ts5500_dio_remove,
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 118828b3736f..9e1dbb9877c1 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -605,7 +605,6 @@ MODULE_ALIAS("platform:twl4030_gpio");
static struct platform_driver gpio_twl4030_driver = {
.driver = {
.name = "twl4030_gpio",
- .owner = THIS_MODULE,
.of_match_table = twl_gpio_match,
},
.probe = gpio_twl4030_probe,
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index f28e04b88aa9..c946e7eef3ee 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -121,7 +121,6 @@ MODULE_ALIAS("platform:twl6040-gpo");
static struct platform_driver gpo_twl6040_driver = {
.driver = {
.name = "twl6040-gpo",
- .owner = THIS_MODULE,
},
.probe = gpo_twl6040_probe,
.remove = gpo_twl6040_remove,
diff --git a/drivers/gpio/gpio-tz1090-pdc.c b/drivers/gpio/gpio-tz1090-pdc.c
index f512da299b3d..d7536226b847 100644
--- a/drivers/gpio/gpio-tz1090-pdc.c
+++ b/drivers/gpio/gpio-tz1090-pdc.c
@@ -230,7 +230,6 @@ static struct of_device_id tz1090_pdc_gpio_of_match[] = {
static struct platform_driver tz1090_pdc_gpio_driver = {
.driver = {
.name = "tz1090-pdc-gpio",
- .owner = THIS_MODULE,
.of_match_table = tz1090_pdc_gpio_of_match,
},
.probe = tz1090_pdc_gpio_probe,
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 5246a60eff6d..e3024bbba447 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -446,7 +446,7 @@ static int tz1090_gpio_bank_probe(struct tz1090_gpio_bank_info *info)
bank->irq = irq_of_parse_and_map(np, 0);
/* The interrupt is optional (it may be used by another core on chip) */
- if (bank->irq < 0) {
+ if (!bank->irq) {
dev_info(dev, "IRQ not provided for bank %u, IRQs disabled\n",
info->index);
return 0;
@@ -593,7 +593,6 @@ static struct of_device_id tz1090_gpio_of_match[] = {
static struct platform_driver tz1090_gpio_driver = {
.driver = {
.name = "tz1090-gpio",
- .owner = THIS_MODULE,
.of_match_table = tz1090_gpio_of_match,
},
.probe = tz1090_gpio_probe,
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
new file mode 100644
index 000000000000..4ee4cee832ec
--- /dev/null
+++ b/drivers/gpio/gpio-vf610.c
@@ -0,0 +1,295 @@
+/*
+ * vf610 GPIO support through PORT and GPIO module
+ *
+ * Copyright (c) 2014 Toradex AG.
+ *
+ * Author: Stefan Agner <stefan@agner.ch>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+
+#define VF610_GPIO_PER_PORT 32
+
+struct vf610_gpio_port {
+ struct gpio_chip gc;
+ void __iomem *base;
+ void __iomem *gpio_base;
+ u8 irqc[VF610_GPIO_PER_PORT];
+ int irq;
+};
+
+#define GPIO_PDOR 0x00
+#define GPIO_PSOR 0x04
+#define GPIO_PCOR 0x08
+#define GPIO_PTOR 0x0c
+#define GPIO_PDIR 0x10
+
+#define PORT_PCR(n) ((n) * 0x4)
+#define PORT_PCR_IRQC_OFFSET 16
+
+#define PORT_ISFR 0xa0
+#define PORT_DFER 0xc0
+#define PORT_DFCR 0xc4
+#define PORT_DFWR 0xc8
+
+#define PORT_INT_OFF 0x0
+#define PORT_INT_LOGIC_ZERO 0x8
+#define PORT_INT_RISING_EDGE 0x9
+#define PORT_INT_FALLING_EDGE 0xa
+#define PORT_INT_EITHER_EDGE 0xb
+#define PORT_INT_LOGIC_ONE 0xc
+
+static const struct of_device_id vf610_gpio_dt_ids[] = {
+ { .compatible = "fsl,vf610-gpio" },
+ { /* sentinel */ }
+};
+
+static inline void vf610_gpio_writel(u32 val, void __iomem *reg)
+{
+ writel_relaxed(val, reg);
+}
+
+static inline u32 vf610_gpio_readl(void __iomem *reg)
+{
+ return readl_relaxed(reg);
+}
+
+static int vf610_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void vf610_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct vf610_gpio_port *port =
+ container_of(gc, struct vf610_gpio_port, gc);
+
+ return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR) & BIT(gpio));
+}
+
+static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct vf610_gpio_port *port =
+ container_of(gc, struct vf610_gpio_port, gc);
+ unsigned long mask = BIT(gpio);
+
+ if (val)
+ vf610_gpio_writel(mask, port->gpio_base + GPIO_PSOR);
+ else
+ vf610_gpio_writel(mask, port->gpio_base + GPIO_PCOR);
+}
+
+static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ return pinctrl_gpio_direction_input(chip->base + gpio);
+}
+
+static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ vf610_gpio_set(chip, gpio, value);
+
+ return pinctrl_gpio_direction_output(chip->base + gpio);
+}
+
+static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+{
+ struct vf610_gpio_port *port = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int pin;
+ unsigned long irq_isfr;
+
+ chained_irq_enter(chip, desc);
+
+ irq_isfr = vf610_gpio_readl(port->base + PORT_ISFR);
+
+ for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
+ vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
+
+ generic_handle_irq(irq_find_mapping(port->gc.irqdomain, pin));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void vf610_gpio_irq_ack(struct irq_data *d)
+{
+ struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+ int gpio = d->hwirq;
+
+ vf610_gpio_writel(BIT(gpio), port->base + PORT_ISFR);
+}
+
+static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+ u8 irqc;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ irqc = PORT_INT_RISING_EDGE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irqc = PORT_INT_FALLING_EDGE;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ irqc = PORT_INT_EITHER_EDGE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irqc = PORT_INT_LOGIC_ZERO;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irqc = PORT_INT_LOGIC_ONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ port->irqc[d->hwirq] = irqc;
+
+ return 0;
+}
+
+static void vf610_gpio_irq_mask(struct irq_data *d)
+{
+ struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+
+ vf610_gpio_writel(0, pcr_base);
+}
+
+static void vf610_gpio_irq_unmask(struct irq_data *d)
+{
+ struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+
+ vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
+ pcr_base);
+}
+
+static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
+{
+ struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d);
+
+ if (enable)
+ enable_irq_wake(port->irq);
+ else
+ disable_irq_wake(port->irq);
+
+ return 0;
+}
+
+static struct irq_chip vf610_gpio_irq_chip = {
+ .name = "gpio-vf610",
+ .irq_ack = vf610_gpio_irq_ack,
+ .irq_mask = vf610_gpio_irq_mask,
+ .irq_unmask = vf610_gpio_irq_unmask,
+ .irq_set_type = vf610_gpio_irq_set_type,
+ .irq_set_wake = vf610_gpio_irq_set_wake,
+};
+
+static int vf610_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct vf610_gpio_port *port;
+ struct resource *iores;
+ struct gpio_chip *gc;
+ int ret;
+
+ port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ port->base = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(port->base))
+ return PTR_ERR(port->base);
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ port->gpio_base = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(port->gpio_base))
+ return PTR_ERR(port->gpio_base);
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
+ gc = &port->gc;
+ gc->of_node = np;
+ gc->dev = dev;
+ gc->label = "vf610-gpio",
+ gc->ngpio = VF610_GPIO_PER_PORT,
+ gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
+
+ gc->request = vf610_gpio_request,
+ gc->free = vf610_gpio_free,
+ gc->direction_input = vf610_gpio_direction_input,
+ gc->get = vf610_gpio_get,
+ gc->direction_output = vf610_gpio_direction_output,
+ gc->set = vf610_gpio_set,
+
+ ret = gpiochip_add(gc);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the interrupt status register for all GPIO's */
+ vf610_gpio_writel(~0, port->base + PORT_ISFR);
+
+ ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "failed to add irqchip\n");
+ gpiochip_remove(gc);
+ return ret;
+ }
+ gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq,
+ vf610_gpio_irq_handler);
+
+ return 0;
+}
+
+static struct platform_driver vf610_gpio_driver = {
+ .driver = {
+ .name = "gpio-vf610",
+ .owner = THIS_MODULE,
+ .of_match_table = vf610_gpio_dt_ids,
+ },
+ .probe = vf610_gpio_probe,
+};
+
+static int __init gpio_vf610_init(void)
+{
+ return platform_driver_register(&vf610_gpio_driver);
+}
+device_initcall(gpio_vf610_init);
+
+MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
+MODULE_DESCRIPTION("Freescale VF610 GPIO");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index dbf28fa03f67..c1caa459c02d 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -138,7 +138,7 @@ static void unmask_giuint_low(struct irq_data *d)
static unsigned int startup_giuint(struct irq_data *data)
{
- if (gpio_lock_as_irq(&vr41xx_gpio_chip, data->hwirq))
+ if (gpiochip_lock_as_irq(&vr41xx_gpio_chip, data->hwirq))
dev_err(vr41xx_gpio_chip.dev,
"unable to lock HW IRQ %lu for IRQ\n",
data->hwirq);
@@ -150,7 +150,7 @@ static unsigned int startup_giuint(struct irq_data *data)
static void shutdown_giuint(struct irq_data *data)
{
mask_giuint_low(data);
- gpio_unlock_as_irq(&vr41xx_gpio_chip, data->hwirq);
+ gpiochip_unlock_as_irq(&vr41xx_gpio_chip, data->hwirq);
}
static struct irq_chip giuint_low_irq_chip = {
@@ -591,7 +591,6 @@ static struct platform_driver giu_device_driver = {
.remove = giu_remove,
.driver = {
.name = "GIU",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 85971d4e23c1..9d21d2fcc327 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -306,7 +306,6 @@ static int vx855gpio_remove(struct platform_device *pdev)
static struct platform_driver vx855gpio_driver = {
.driver = {
.name = MODULE_NAME,
- .owner = THIS_MODULE,
},
.probe = vx855gpio_probe,
.remove = vx855gpio_remove,
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 7d489221dc1f..18a8182d4fec 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -229,7 +229,6 @@ MODULE_DEVICE_TABLE(of, xgene_gpio_of_match);
static struct platform_driver xgene_gpio_driver = {
.driver = {
.name = "xgene-gpio",
- .owner = THIS_MODULE,
.of_match_table = xgene_gpio_of_match,
.pm = XGENE_GPIO_PM_OPS,
},
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 7081304d6797..93ec95df67a3 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -157,7 +157,6 @@ static int xtensa_gpio_probe(struct platform_device *pdev)
static struct platform_driver xtensa_gpio_driver = {
.driver = {
.name = "xtensa-gpio",
- .owner = THIS_MODULE,
},
.probe = xtensa_gpio_probe,
};
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index 54e54e4cc6c4..f769cd53f4e4 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -212,7 +212,6 @@ MODULE_DEVICE_TABLE(of, zevio_gpio_of_match);
static struct platform_driver zevio_gpio_driver = {
.driver = {
.name = "gpio-zevio",
- .owner = THIS_MODULE,
.of_match_table = zevio_gpio_of_match,
},
.probe = zevio_gpio_probe,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index ba98bb59a58f..c0929d938ced 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -11,12 +11,14 @@
*/
#include <linux/errno.h>
+#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <linux/pinctrl/pinctrl.h>
#include "gpiolib.h"
@@ -55,6 +57,58 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
return ACPI_HANDLE(gc->dev) == data;
}
+#ifdef CONFIG_PINCTRL
+/**
+ * acpi_gpiochip_pin_to_gpio_offset() - translates ACPI GPIO to Linux GPIO
+ * @chip: GPIO chip
+ * @pin: ACPI GPIO pin number from GpioIo/GpioInt resource
+ *
+ * Function takes ACPI GpioIo/GpioInt pin number as a parameter and
+ * translates it to a corresponding offset suitable to be passed to a
+ * GPIO controller driver.
+ *
+ * Typically the returned offset is same as @pin, but if the GPIO
+ * controller uses pin controller and the mapping is not contigous the
+ * offset might be different.
+ */
+static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin)
+{
+ struct gpio_pin_range *pin_range;
+
+ /* If there are no ranges in this chip, use 1:1 mapping */
+ if (list_empty(&chip->pin_ranges))
+ return pin;
+
+ list_for_each_entry(pin_range, &chip->pin_ranges, node) {
+ const struct pinctrl_gpio_range *range = &pin_range->range;
+ int i;
+
+ if (range->pins) {
+ for (i = 0; i < range->npins; i++) {
+ if (range->pins[i] == pin)
+ return range->base + i - chip->base;
+ }
+ } else {
+ if (pin >= range->pin_base &&
+ pin < range->pin_base + range->npins) {
+ unsigned gpio_base;
+
+ gpio_base = range->base - chip->base;
+ return gpio_base + pin - range->pin_base;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+#else
+static inline int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip,
+ int pin)
+{
+ return pin;
+}
+#endif
+
/**
* acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API
* @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
@@ -69,6 +123,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
struct gpio_chip *chip;
acpi_handle handle;
acpi_status status;
+ int offset;
status = acpi_get_handle(NULL, path, &handle);
if (ACPI_FAILURE(status))
@@ -78,10 +133,11 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
if (!chip)
return ERR_PTR(-ENODEV);
- if (pin < 0 || pin > chip->ngpio)
- return ERR_PTR(-EINVAL);
+ offset = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ if (offset < 0)
+ return ERR_PTR(offset);
- return gpiochip_get_desc(chip, pin);
+ return gpiochip_get_desc(chip, offset);
}
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
@@ -153,7 +209,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
- ret = gpio_lock_as_irq(chip, pin);
+ ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
dev_err(chip->dev, "Failed to lock GPIO as interrupt\n");
goto fail_free_desc;
@@ -209,7 +265,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
fail_free_event:
kfree(event);
fail_unlock_irq:
- gpio_unlock_as_irq(chip, pin);
+ gpiochip_unlock_as_irq(chip, pin);
fail_free_desc:
gpiochip_free_own_desc(desc);
@@ -280,7 +336,7 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
desc = event->desc;
if (WARN_ON(IS_ERR(desc)))
continue;
- gpio_unlock_as_irq(chip, event->pin);
+ gpiochip_unlock_as_irq(chip, event->pin);
gpiochip_free_own_desc(desc);
list_del(&event->node);
kfree(event);
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 078ae6c2df79..8b830996fe02 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -24,6 +24,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
desc = gpio_to_desc(gpio);
+ /* Compatibility: assume unavailable "valid" GPIOs will appear later */
+ if (!desc && gpio_is_valid(gpio))
+ return -EPROBE_DEFER;
+
err = gpiod_request(desc, label);
if (err)
return err;
@@ -62,7 +66,13 @@ EXPORT_SYMBOL_GPL(gpio_request_one);
int gpio_request(unsigned gpio, const char *label)
{
- return gpiod_request(gpio_to_desc(gpio), label);
+ struct gpio_desc *desc = gpio_to_desc(gpio);
+
+ /* Compatibility: assume unavailable "valid" GPIOs will appear later */
+ if (!desc && gpio_is_valid(gpio))
+ return -EPROBE_DEFER;
+
+ return gpiod_request(desc, label);
}
EXPORT_SYMBOL_GPL(gpio_request);
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 5f2150b619a7..2ac1800b58bb 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -41,7 +41,7 @@ static DEFINE_MUTEX(sysfs_lock);
static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
+ struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -161,7 +161,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
desc->flags &= ~GPIO_TRIGGER_MASK;
if (!gpio_flags) {
- gpio_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
ret = 0;
goto free_id;
}
@@ -200,7 +200,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
if (ret < 0)
goto free_id;
- ret = gpio_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ ret = gpiochip_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
if (ret < 0) {
gpiod_warn(desc, "failed to flag the GPIO for IRQ\n");
goto free_id;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 58659dbe702a..487afe6f22fc 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -47,8 +47,6 @@
*/
DEFINE_SPINLOCK(gpio_lock);
-static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
-
#define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio)
static DEFINE_MUTEX(gpio_lookup_lock);
@@ -65,10 +63,24 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
*/
struct gpio_desc *gpio_to_desc(unsigned gpio)
{
- if (WARN(!gpio_is_valid(gpio), "invalid GPIO %d\n", gpio))
- return NULL;
- else
- return &gpio_desc[gpio];
+ struct gpio_chip *chip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ list_for_each_entry(chip, &gpio_chips, list) {
+ if (chip->base <= gpio && chip->base + chip->ngpio > gpio) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return &chip->desc[gpio - chip->base];
+ }
+ }
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ if (!gpio_is_valid(gpio))
+ WARN(1, "invalid GPIO %d\n", gpio);
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(gpio_to_desc);
@@ -91,7 +103,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
*/
int desc_to_gpio(const struct gpio_desc *desc)
{
- return desc - &gpio_desc[0];
+ return desc->chip->base + (desc - &desc->chip->desc[0]);
}
EXPORT_SYMBOL_GPL(desc_to_gpio);
@@ -138,7 +150,7 @@ static int gpiochip_find_base(int ngpio)
*
* This function may sleep if gpiod_cansleep() is true.
*/
-int gpiod_get_direction(const struct gpio_desc *desc)
+int gpiod_get_direction(struct gpio_desc *desc)
{
struct gpio_chip *chip;
unsigned offset;
@@ -154,13 +166,11 @@ int gpiod_get_direction(const struct gpio_desc *desc)
if (status > 0) {
/* GPIOF_DIR_IN, or other positive */
status = 1;
- /* FLAG_IS_OUT is just a cache of the result of get_direction(),
- * so it does not affect constness per se */
- clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
+ clear_bit(FLAG_IS_OUT, &desc->flags);
}
if (status == 0) {
/* GPIOF_DIR_OUT */
- set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
+ set_bit(FLAG_IS_OUT, &desc->flags);
}
return status;
}
@@ -206,7 +216,7 @@ static int gpiochip_add_to_list(struct gpio_chip *chip)
/**
* gpiochip_add() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
- * Context: potentially before irqs or kmalloc will work
+ * Context: potentially before irqs will work
*
* Returns a negative errno if the chip can't be registered, such as
* because the chip->base is invalid or already associated with a
@@ -226,12 +236,11 @@ int gpiochip_add(struct gpio_chip *chip)
int status = 0;
unsigned id;
int base = chip->base;
+ struct gpio_desc *descs;
- if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio - 1))
- && base >= 0) {
- status = -EINVAL;
- goto fail;
- }
+ descs = kcalloc(chip->ngpio, sizeof(descs[0]), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
spin_lock_irqsave(&gpio_lock, flags);
@@ -247,10 +256,8 @@ int gpiochip_add(struct gpio_chip *chip)
status = gpiochip_add_to_list(chip);
if (status == 0) {
- chip->desc = &gpio_desc[chip->base];
-
for (id = 0; id < chip->ngpio; id++) {
- struct gpio_desc *desc = &chip->desc[id];
+ struct gpio_desc *desc = &descs[id];
desc->chip = chip;
/* REVISIT: most hardware initializes GPIOs as
@@ -266,6 +273,8 @@ int gpiochip_add(struct gpio_chip *chip)
}
}
+ chip->desc = descs;
+
spin_unlock_irqrestore(&gpio_lock, flags);
#ifdef CONFIG_PINCTRL
@@ -291,6 +300,9 @@ int gpiochip_add(struct gpio_chip *chip)
unlock:
spin_unlock_irqrestore(&gpio_lock, flags);
fail:
+ kfree(descs);
+ chip->desc = NULL;
+
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
chip->base, chip->base + chip->ngpio - 1,
@@ -331,6 +343,9 @@ void gpiochip_remove(struct gpio_chip *chip)
list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
gpiochip_unexport(chip);
+
+ kfree(chip->desc);
+ chip->desc = NULL;
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -495,7 +510,7 @@ static int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(chip, d->hwirq)) {
+ if (gpiochip_lock_as_irq(chip, d->hwirq)) {
chip_err(chip,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
@@ -508,7 +523,7 @@ static void gpiochip_irq_relres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- gpio_unlock_as_irq(chip, d->hwirq);
+ gpiochip_unlock_as_irq(chip, d->hwirq);
}
static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -1254,6 +1269,88 @@ static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
chip->set(chip, gpio_chip_hwgpio(desc), value);
}
+/*
+ * set multiple outputs on the same chip;
+ * use the chip's set_multiple function if available;
+ * otherwise set the outputs sequentially;
+ * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
+ * defines which outputs are to be changed
+ * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
+ * defines the values the outputs specified by mask are to be set to
+ */
+static void gpio_chip_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ if (chip->set_multiple) {
+ chip->set_multiple(chip, mask, bits);
+ } else {
+ int i;
+ for (i = 0; i < chip->ngpio; i++) {
+ if (mask[BIT_WORD(i)] == 0) {
+ /* no more set bits in this mask word;
+ * skip ahead to the next word */
+ i = (BIT_WORD(i) + 1) * BITS_PER_LONG - 1;
+ continue;
+ }
+ /* set outputs if the corresponding mask bit is set */
+ if (__test_and_clear_bit(i, mask)) {
+ chip->set(chip, i, test_bit(i, bits));
+ }
+ }
+ }
+}
+
+static void gpiod_set_array_priv(bool raw, bool can_sleep,
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ int i = 0;
+
+ while (i < array_size) {
+ struct gpio_chip *chip = desc_array[i]->chip;
+ unsigned long mask[BITS_TO_LONGS(chip->ngpio)];
+ unsigned long bits[BITS_TO_LONGS(chip->ngpio)];
+ int count = 0;
+
+ if (!can_sleep) {
+ WARN_ON(chip->can_sleep);
+ }
+ memset(mask, 0, sizeof(mask));
+ do {
+ struct gpio_desc *desc = desc_array[i];
+ int hwgpio = gpio_chip_hwgpio(desc);
+ int value = value_array[i];
+
+ if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ trace_gpio_value(desc_to_gpio(desc), 0, value);
+ /*
+ * collect all normal outputs belonging to the same chip
+ * open drain and open source outputs are set individually
+ */
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ _gpio_set_open_drain_value(desc,value);
+ } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
+ _gpio_set_open_source_value(desc, value);
+ } else {
+ __set_bit(hwgpio, mask);
+ if (value) {
+ __set_bit(hwgpio, bits);
+ } else {
+ __clear_bit(hwgpio, bits);
+ }
+ count++;
+ }
+ i++;
+ } while ((i < array_size) && (desc_array[i]->chip == chip));
+ /* push collected bits to outputs */
+ if (count != 0) {
+ gpio_chip_set_multiple(chip, mask, bits);
+ }
+ }
+}
+
/**
* gpiod_set_raw_value() - assign a gpio's raw value
* @desc: gpio whose value will be assigned
@@ -1299,6 +1396,48 @@ void gpiod_set_value(struct gpio_desc *desc, int value)
EXPORT_SYMBOL_GPL(gpiod_set_value);
/**
+ * gpiod_set_raw_array() - assign values to an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be assigned
+ * @value_array: array of values to assign
+ *
+ * Set the raw values of the GPIOs, i.e. the values of the physical lines
+ * without regard for their ACTIVE_LOW status.
+ *
+ * This function should be called from contexts where we cannot sleep, and will
+ * complain if the GPIO chip functions potentially sleep.
+ */
+void gpiod_set_raw_array(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array)
+{
+ if (!desc_array)
+ return;
+ gpiod_set_array_priv(true, false, array_size, desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_set_raw_array);
+
+/**
+ * gpiod_set_array() - assign values to an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be assigned
+ * @value_array: array of values to assign
+ *
+ * Set the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
+ * into account.
+ *
+ * This function should be called from contexts where we cannot sleep, and will
+ * complain if the GPIO chip functions potentially sleep.
+ */
+void gpiod_set_array(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array)
+{
+ if (!desc_array)
+ return;
+ gpiod_set_array_priv(false, false, array_size, desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_set_array);
+
+/**
* gpiod_cansleep() - report whether gpio value access may sleep
* @desc: gpio to check
*
@@ -1332,14 +1471,14 @@ int gpiod_to_irq(const struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_to_irq);
/**
- * gpio_lock_as_irq() - lock a GPIO to be used as IRQ
+ * gpiochip_lock_as_irq() - lock a GPIO to be used as IRQ
* @chip: the chip the GPIO to lock belongs to
* @offset: the offset of the GPIO to lock as IRQ
*
* This is used directly by GPIO drivers that want to lock down
* a certain GPIO line to be used for IRQs.
*/
-int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
if (offset >= chip->ngpio)
return -EINVAL;
@@ -1354,24 +1493,24 @@ int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
set_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
return 0;
}
-EXPORT_SYMBOL_GPL(gpio_lock_as_irq);
+EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
/**
- * gpio_unlock_as_irq() - unlock a GPIO used as IRQ
+ * gpiochip_unlock_as_irq() - unlock a GPIO used as IRQ
* @chip: the chip the GPIO to lock belongs to
* @offset: the offset of the GPIO to lock as IRQ
*
* This is used directly by GPIO drivers that want to indicate
* that a certain GPIO is no longer used exclusively for IRQ.
*/
-void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
if (offset >= chip->ngpio)
return;
clear_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
}
-EXPORT_SYMBOL_GPL(gpio_unlock_as_irq);
+EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
/**
* gpiod_get_raw_value_cansleep() - return a gpio's raw value
@@ -1458,6 +1597,50 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
/**
+ * gpiod_set_raw_array_cansleep() - assign values to an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be assigned
+ * @value_array: array of values to assign
+ *
+ * Set the raw values of the GPIOs, i.e. the values of the physical lines
+ * without regard for their ACTIVE_LOW status.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+void gpiod_set_raw_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ might_sleep_if(extra_checks);
+ if (!desc_array)
+ return;
+ gpiod_set_array_priv(true, true, array_size, desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_set_raw_array_cansleep);
+
+/**
+ * gpiod_set_array_cansleep() - assign values to an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be assigned
+ * @value_array: array of values to assign
+ *
+ * Set the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
+ * into account.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+void gpiod_set_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ might_sleep_if(extra_checks);
+ if (!desc_array)
+ return;
+ gpiod_set_array_priv(false, true, array_size, desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_set_array_cansleep);
+
+/**
* gpiod_add_lookup_table() - register GPIO device consumers
* @table: table of consumers to register
*/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e3b4b0f02b3d..c3413b6adb17 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -167,6 +167,8 @@ config DRM_SAVAGE
source "drivers/gpu/drm/exynos/Kconfig"
+source "drivers/gpu/drm/rockchip/Kconfig"
+
source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"
@@ -200,3 +202,7 @@ source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
source "drivers/gpu/drm/sti/Kconfig"
+
+source "drivers/gpu/drm/amd/amdkfd/Kconfig"
+
+source "drivers/gpu/drm/imx/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9292a761ea6d..66e40398b3d3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
- drm_modeset_lock.o
+ drm_modeset_lock.o drm_atomic.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,7 +23,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
- drm_plane_helper.o drm_dp_mst_topology.o
+ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -49,6 +49,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
@@ -62,6 +63,8 @@ obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
+obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/README.drm b/drivers/gpu/drm/README.drm
deleted file mode 100644
index b5b332722581..000000000000
--- a/drivers/gpu/drm/README.drm
+++ /dev/null
@@ -1,43 +0,0 @@
-************************************************************
-* For the very latest on DRI development, please see: *
-* http://dri.freedesktop.org/ *
-************************************************************
-
-The Direct Rendering Manager (drm) is a device-independent kernel-level
-device driver that provides support for the XFree86 Direct Rendering
-Infrastructure (DRI).
-
-The DRM supports the Direct Rendering Infrastructure (DRI) in four major
-ways:
-
- 1. The DRM provides synchronized access to the graphics hardware via
- the use of an optimized two-tiered lock.
-
- 2. The DRM enforces the DRI security policy for access to the graphics
- hardware by only allowing authenticated X11 clients access to
- restricted regions of memory.
-
- 3. The DRM provides a generic DMA engine, complete with multiple
- queues and the ability to detect the need for an OpenGL context
- switch.
-
- 4. The DRM is extensible via the use of small device-specific modules
- that rely extensively on the API exported by the DRM module.
-
-
-Documentation on the DRI is available from:
- http://dri.freedesktop.org/wiki/Documentation
- http://sourceforge.net/project/showfiles.php?group_id=387
- http://dri.sourceforge.net/doc/
-
-For specific information about kernel-level support, see:
-
- The Direct Rendering Manager, Kernel Support for the Direct Rendering
- Infrastructure
- http://dri.sourceforge.net/doc/drm_low_level.html
-
- Hardware Locking for the Direct Rendering Infrastructure
- http://dri.sourceforge.net/doc/hardware_locking_low_level.html
-
- A Security Analysis of the Direct Rendering Infrastructure
- http://dri.sourceforge.net/doc/security_low_level.html
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
new file mode 100644
index 000000000000..8dfac37ff327
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -0,0 +1,9 @@
+#
+# Heterogenous system architecture configuration
+#
+
+config HSA_AMD
+ tristate "HSA kernel driver for AMD GPU devices"
+ depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+ help
+ Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
new file mode 100644
index 000000000000..be6246de5091
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Heterogenous System Architecture support for AMD GPU devices
+#
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+
+amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
+ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
+ kfd_process.o kfd_queue.o kfd_mqd_manager.o \
+ kfd_kernel_queue.o kfd_packet_manager.o \
+ kfd_process_queue_manager.o kfd_device_queue_manager.o \
+ kfd_interrupt.o
+
+obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
new file mode 100644
index 000000000000..607fc5ceadbe
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef CIK_REGS_H
+#define CIK_REGS_H
+
+#define IH_VMID_0_LUT 0x3D40u
+
+#define BIF_DOORBELL_CNTL 0x530Cu
+
+#define SRBM_GFX_CNTL 0xE44
+#define PIPEID(x) ((x) << 0)
+#define MEID(x) ((x) << 2)
+#define VMID(x) ((x) << 4)
+#define QUEUEID(x) ((x) << 8)
+
+#define SQ_CONFIG 0x8C00
+
+#define SH_MEM_BASES 0x8C28
+/* if PTR32, these are the bases for scratch and lds */
+#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
+#define SHARED_BASE(x) ((x) << 16) /* LDS */
+#define SH_MEM_APE1_BASE 0x8C2C
+/* if PTR32, this is the base location of GPUVM */
+#define SH_MEM_APE1_LIMIT 0x8C30
+/* if PTR32, this is the upper limit of GPUVM */
+#define SH_MEM_CONFIG 0x8C34
+#define PTR32 (1 << 0)
+#define PRIVATE_ATC (1 << 1)
+#define ALIGNMENT_MODE(x) ((x) << 2)
+#define SH_MEM_ALIGNMENT_MODE_DWORD 0
+#define SH_MEM_ALIGNMENT_MODE_DWORD_STRICT 1
+#define SH_MEM_ALIGNMENT_MODE_STRICT 2
+#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
+#define DEFAULT_MTYPE(x) ((x) << 4)
+#define APE1_MTYPE(x) ((x) << 7)
+
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+#define MTYPE_CACHED 0
+#define MTYPE_NONCACHED 3
+
+
+#define SH_STATIC_MEM_CONFIG 0x9604u
+
+#define TC_CFG_L1_LOAD_POLICY0 0xAC68
+#define TC_CFG_L1_LOAD_POLICY1 0xAC6C
+#define TC_CFG_L1_STORE_POLICY 0xAC70
+#define TC_CFG_L2_LOAD_POLICY0 0xAC74
+#define TC_CFG_L2_LOAD_POLICY1 0xAC78
+#define TC_CFG_L2_STORE_POLICY0 0xAC7C
+#define TC_CFG_L2_STORE_POLICY1 0xAC80
+#define TC_CFG_L2_ATOMIC_POLICY 0xAC84
+#define TC_CFG_L1_VOLATILE 0xAC88
+#define TC_CFG_L2_VOLATILE 0xAC8C
+
+#define CP_PQ_WPTR_POLL_CNTL 0xC20C
+#define WPTR_POLL_EN (1 << 31)
+
+#define CPC_INT_CNTL 0xC2D0
+#define CP_ME1_PIPE0_INT_CNTL 0xC214
+#define CP_ME1_PIPE1_INT_CNTL 0xC218
+#define CP_ME1_PIPE2_INT_CNTL 0xC21C
+#define CP_ME1_PIPE3_INT_CNTL 0xC220
+#define CP_ME2_PIPE0_INT_CNTL 0xC224
+#define CP_ME2_PIPE1_INT_CNTL 0xC228
+#define CP_ME2_PIPE2_INT_CNTL 0xC22C
+#define CP_ME2_PIPE3_INT_CNTL 0xC230
+#define DEQUEUE_REQUEST_INT_ENABLE (1 << 13)
+#define WRM_POLL_TIMEOUT_INT_ENABLE (1 << 17)
+#define PRIV_REG_INT_ENABLE (1 << 23)
+#define TIME_STAMP_INT_ENABLE (1 << 26)
+#define GENERIC2_INT_ENABLE (1 << 29)
+#define GENERIC1_INT_ENABLE (1 << 30)
+#define GENERIC0_INT_ENABLE (1 << 31)
+#define CP_ME1_PIPE0_INT_STATUS 0xC214
+#define CP_ME1_PIPE1_INT_STATUS 0xC218
+#define CP_ME1_PIPE2_INT_STATUS 0xC21C
+#define CP_ME1_PIPE3_INT_STATUS 0xC220
+#define CP_ME2_PIPE0_INT_STATUS 0xC224
+#define CP_ME2_PIPE1_INT_STATUS 0xC228
+#define CP_ME2_PIPE2_INT_STATUS 0xC22C
+#define CP_ME2_PIPE3_INT_STATUS 0xC230
+#define DEQUEUE_REQUEST_INT_STATUS (1 << 13)
+#define WRM_POLL_TIMEOUT_INT_STATUS (1 << 17)
+#define PRIV_REG_INT_STATUS (1 << 23)
+#define TIME_STAMP_INT_STATUS (1 << 26)
+#define GENERIC2_INT_STATUS (1 << 29)
+#define GENERIC1_INT_STATUS (1 << 30)
+#define GENERIC0_INT_STATUS (1 << 31)
+
+#define CP_HPD_EOP_BASE_ADDR 0xC904
+#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
+#define CP_HPD_EOP_VMID 0xC90C
+#define CP_HPD_EOP_CONTROL 0xC910
+#define EOP_SIZE(x) ((x) << 0)
+#define EOP_SIZE_MASK (0x3f << 0)
+#define CP_MQD_BASE_ADDR 0xC914
+#define CP_MQD_BASE_ADDR_HI 0xC918
+#define CP_HQD_ACTIVE 0xC91C
+#define CP_HQD_VMID 0xC920
+
+#define CP_HQD_PERSISTENT_STATE 0xC924u
+#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8)
+#define PRELOAD_REQ (1 << 0)
+
+#define CP_HQD_PIPE_PRIORITY 0xC928u
+#define CP_HQD_QUEUE_PRIORITY 0xC92Cu
+#define CP_HQD_QUANTUM 0xC930u
+#define QUANTUM_EN 1U
+#define QUANTUM_SCALE_1MS (1U << 4)
+#define QUANTUM_DURATION(x) ((x) << 8)
+
+#define CP_HQD_PQ_BASE 0xC934
+#define CP_HQD_PQ_BASE_HI 0xC938
+#define CP_HQD_PQ_RPTR 0xC93C
+#define CP_HQD_PQ_RPTR_REPORT_ADDR 0xC940
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI 0xC944
+#define CP_HQD_PQ_WPTR_POLL_ADDR 0xC948
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI 0xC94C
+#define CP_HQD_PQ_DOORBELL_CONTROL 0xC950
+#define DOORBELL_OFFSET(x) ((x) << 2)
+#define DOORBELL_OFFSET_MASK (0x1fffff << 2)
+#define DOORBELL_SOURCE (1 << 28)
+#define DOORBELL_SCHD_HIT (1 << 29)
+#define DOORBELL_EN (1 << 30)
+#define DOORBELL_HIT (1 << 31)
+#define CP_HQD_PQ_WPTR 0xC954
+#define CP_HQD_PQ_CONTROL 0xC958
+#define QUEUE_SIZE(x) ((x) << 0)
+#define QUEUE_SIZE_MASK (0x3f << 0)
+#define RPTR_BLOCK_SIZE(x) ((x) << 8)
+#define RPTR_BLOCK_SIZE_MASK (0x3f << 8)
+#define MIN_AVAIL_SIZE(x) ((x) << 20)
+#define PQ_ATC_EN (1 << 23)
+#define PQ_VOLATILE (1 << 26)
+#define NO_UPDATE_RPTR (1 << 27)
+#define UNORD_DISPATCH (1 << 28)
+#define ROQ_PQ_IB_FLIP (1 << 29)
+#define PRIV_STATE (1 << 30)
+#define KMD_QUEUE (1 << 31)
+
+#define DEFAULT_RPTR_BLOCK_SIZE RPTR_BLOCK_SIZE(5)
+#define DEFAULT_MIN_AVAIL_SIZE MIN_AVAIL_SIZE(3)
+
+#define CP_HQD_IB_BASE_ADDR 0xC95Cu
+#define CP_HQD_IB_BASE_ADDR_HI 0xC960u
+#define CP_HQD_IB_RPTR 0xC964u
+#define CP_HQD_IB_CONTROL 0xC968u
+#define IB_ATC_EN (1U << 23)
+#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
+
+#define CP_HQD_DEQUEUE_REQUEST 0xC974
+#define DEQUEUE_REQUEST_DRAIN 1
+#define DEQUEUE_REQUEST_RESET 2
+#define DEQUEUE_INT (1U << 8)
+
+#define CP_HQD_SEMA_CMD 0xC97Cu
+#define CP_HQD_MSG_TYPE 0xC980u
+#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u
+#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u
+#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu
+#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u
+#define CP_HQD_HQ_SCHEDULER0 0xC994u
+#define CP_HQD_HQ_SCHEDULER1 0xC998u
+
+
+#define CP_MQD_CONTROL 0xC99C
+#define MQD_VMID(x) ((x) << 0)
+#define MQD_VMID_MASK (0xf << 0)
+#define MQD_CONTROL_PRIV_STATE_EN (1U << 8)
+
+#define GRBM_GFX_INDEX 0x30800
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SH_INDEX(x) ((x) << 8)
+#define SE_INDEX(x) ((x) << 16)
+#define SH_BROADCAST_WRITES (1 << 29)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
+#define SQC_CACHES 0x30d20
+#define SQC_POLICY 0x8C38u
+#define SQC_VOLATILE 0x8C3Cu
+
+#define CP_PERFMON_CNTL 0x36020
+
+#define ATC_VMID0_PASID_MAPPING 0x339Cu
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u
+#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
+
+#define ATC_VM_APERTURE0_CNTL 0x3310u
+#define ATS_ACCESS_MODE_NEVER 0
+#define ATS_ACCESS_MODE_ALWAYS 1
+
+#define ATC_VM_APERTURE0_CNTL2 0x3318u
+#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u
+#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u
+#define ATC_VM_APERTURE1_CNTL 0x3314u
+#define ATC_VM_APERTURE1_CNTL2 0x331Cu
+#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
+#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
new file mode 100644
index 000000000000..4f7b275f2f7b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <uapi/linux/kfd_ioctl.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <uapi/asm-generic/mman-common.h>
+#include <asm/processor.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+
+static long kfd_ioctl(struct file *, unsigned int, unsigned long);
+static int kfd_open(struct inode *, struct file *);
+static int kfd_mmap(struct file *, struct vm_area_struct *);
+
+static const char kfd_dev_name[] = "kfd";
+
+static const struct file_operations kfd_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = kfd_ioctl,
+ .compat_ioctl = kfd_ioctl,
+ .open = kfd_open,
+ .mmap = kfd_mmap,
+};
+
+static int kfd_char_dev_major = -1;
+static struct class *kfd_class;
+struct device *kfd_device;
+
+int kfd_chardev_init(void)
+{
+ int err = 0;
+
+ kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
+ err = kfd_char_dev_major;
+ if (err < 0)
+ goto err_register_chrdev;
+
+ kfd_class = class_create(THIS_MODULE, kfd_dev_name);
+ err = PTR_ERR(kfd_class);
+ if (IS_ERR(kfd_class))
+ goto err_class_create;
+
+ kfd_device = device_create(kfd_class, NULL,
+ MKDEV(kfd_char_dev_major, 0),
+ NULL, kfd_dev_name);
+ err = PTR_ERR(kfd_device);
+ if (IS_ERR(kfd_device))
+ goto err_device_create;
+
+ return 0;
+
+err_device_create:
+ class_destroy(kfd_class);
+err_class_create:
+ unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+err_register_chrdev:
+ return err;
+}
+
+void kfd_chardev_exit(void)
+{
+ device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
+ class_destroy(kfd_class);
+ unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+}
+
+struct device *kfd_chardev(void)
+{
+ return kfd_device;
+}
+
+
+static int kfd_open(struct inode *inode, struct file *filep)
+{
+ struct kfd_process *process;
+ bool is_32bit_user_mode;
+
+ if (iminor(inode) != 0)
+ return -ENODEV;
+
+ is_32bit_user_mode = is_compat_task();
+
+ if (is_32bit_user_mode == true) {
+ dev_warn(kfd_device,
+ "Process %d (32-bit) failed to open /dev/kfd\n"
+ "32-bit processes are not supported by amdkfd\n",
+ current->pid);
+ return -EPERM;
+ }
+
+ process = kfd_create_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ process->is_32bit_user_mode = is_32bit_user_mode;
+
+ dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
+ process->pasid, process->is_32bit_user_mode);
+
+ kfd_init_apertures(process);
+
+ return 0;
+}
+
+static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+ void __user *arg)
+{
+ struct kfd_ioctl_get_version_args args;
+ int err = 0;
+
+ args.major_version = KFD_IOCTL_MAJOR_VERSION;
+ args.minor_version = KFD_IOCTL_MINOR_VERSION;
+
+ if (copy_to_user(arg, &args, sizeof(args)))
+ err = -EFAULT;
+
+ return err;
+}
+
+static int set_queue_properties_from_user(struct queue_properties *q_properties,
+ struct kfd_ioctl_create_queue_args *args)
+{
+ if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ return -EINVAL;
+ }
+
+ if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+ pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ return -EINVAL;
+ }
+
+ if ((args->ring_base_address) &&
+ (!access_ok(VERIFY_WRITE,
+ (const void __user *) args->ring_base_address,
+ sizeof(uint64_t)))) {
+ pr_err("kfd: can't access ring base address\n");
+ return -EFAULT;
+ }
+
+ if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
+ pr_err("kfd: ring size must be a power of 2 or 0\n");
+ return -EINVAL;
+ }
+
+ if (!access_ok(VERIFY_WRITE,
+ (const void __user *) args->read_pointer_address,
+ sizeof(uint32_t))) {
+ pr_err("kfd: can't access read pointer\n");
+ return -EFAULT;
+ }
+
+ if (!access_ok(VERIFY_WRITE,
+ (const void __user *) args->write_pointer_address,
+ sizeof(uint32_t))) {
+ pr_err("kfd: can't access write pointer\n");
+ return -EFAULT;
+ }
+
+ q_properties->is_interop = false;
+ q_properties->queue_percent = args->queue_percentage;
+ q_properties->priority = args->queue_priority;
+ q_properties->queue_address = args->ring_base_address;
+ q_properties->queue_size = args->ring_size;
+ q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
+ q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
+ if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
+ args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
+ q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
+ else
+ return -ENOTSUPP;
+
+ if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
+ q_properties->format = KFD_QUEUE_FORMAT_AQL;
+ else
+ q_properties->format = KFD_QUEUE_FORMAT_PM4;
+
+ pr_debug("Queue Percentage (%d, %d)\n",
+ q_properties->queue_percent, args->queue_percentage);
+
+ pr_debug("Queue Priority (%d, %d)\n",
+ q_properties->priority, args->queue_priority);
+
+ pr_debug("Queue Address (0x%llX, 0x%llX)\n",
+ q_properties->queue_address, args->ring_base_address);
+
+ pr_debug("Queue Size (0x%llX, %u)\n",
+ q_properties->queue_size, args->ring_size);
+
+ pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
+ (uint64_t) q_properties->read_ptr,
+ (uint64_t) q_properties->write_ptr);
+
+ pr_debug("Queue Format (%d)\n", q_properties->format);
+
+ return 0;
+}
+
+static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+ void __user *arg)
+{
+ struct kfd_ioctl_create_queue_args args;
+ struct kfd_dev *dev;
+ int err = 0;
+ unsigned int queue_id;
+ struct kfd_process_device *pdd;
+ struct queue_properties q_properties;
+
+ memset(&q_properties, 0, sizeof(struct queue_properties));
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ pr_debug("kfd: creating queue ioctl\n");
+
+ err = set_queue_properties_from_user(&q_properties, &args);
+ if (err)
+ return err;
+
+ dev = kfd_device_by_id(args.gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto err_bind_process;
+ }
+
+ pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
+ p->pasid,
+ dev->id);
+
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0,
+ KFD_QUEUE_TYPE_COMPUTE, &queue_id);
+ if (err != 0)
+ goto err_create_queue;
+
+ args.queue_id = queue_id;
+
+ /* Return gpu_id as doorbell offset for mmap usage */
+ args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
+
+ if (copy_to_user(arg, &args, sizeof(args))) {
+ err = -EFAULT;
+ goto err_copy_args_out;
+ }
+
+ mutex_unlock(&p->mutex);
+
+ pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+
+ pr_debug("ring buffer address == 0x%016llX\n",
+ args.ring_base_address);
+
+ pr_debug("read ptr address == 0x%016llX\n",
+ args.read_pointer_address);
+
+ pr_debug("write ptr address == 0x%016llX\n",
+ args.write_pointer_address);
+
+ return 0;
+
+err_copy_args_out:
+ pqm_destroy_queue(&p->pqm, queue_id);
+err_create_queue:
+err_bind_process:
+ mutex_unlock(&p->mutex);
+ return err;
+}
+
+static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
+ void __user *arg)
+{
+ int retval;
+ struct kfd_ioctl_destroy_queue_args args;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ pr_debug("kfd: destroying queue id %d for PASID %d\n",
+ args.queue_id,
+ p->pasid);
+
+ mutex_lock(&p->mutex);
+
+ retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+
+ mutex_unlock(&p->mutex);
+ return retval;
+}
+
+static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
+ void __user *arg)
+{
+ int retval;
+ struct kfd_ioctl_update_queue_args args;
+ struct queue_properties properties;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ return -EINVAL;
+ }
+
+ if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+ pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ return -EINVAL;
+ }
+
+ if ((args.ring_base_address) &&
+ (!access_ok(VERIFY_WRITE,
+ (const void __user *) args.ring_base_address,
+ sizeof(uint64_t)))) {
+ pr_err("kfd: can't access ring base address\n");
+ return -EFAULT;
+ }
+
+ if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+ pr_err("kfd: ring size must be a power of 2 or 0\n");
+ return -EINVAL;
+ }
+
+ properties.queue_address = args.ring_base_address;
+ properties.queue_size = args.ring_size;
+ properties.queue_percent = args.queue_percentage;
+ properties.priority = args.queue_priority;
+
+ pr_debug("kfd: updating queue id %d for PASID %d\n",
+ args.queue_id, p->pasid);
+
+ mutex_lock(&p->mutex);
+
+ retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+
+ mutex_unlock(&p->mutex);
+
+ return retval;
+}
+
+static long kfd_ioctl_set_memory_policy(struct file *filep,
+ struct kfd_process *p, void __user *arg)
+{
+ struct kfd_ioctl_set_memory_policy_args args;
+ struct kfd_dev *dev;
+ int err = 0;
+ struct kfd_process_device *pdd;
+ enum cache_policy default_policy, alternate_policy;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ return -EINVAL;
+ }
+
+ if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ return -EINVAL;
+ }
+
+ dev = kfd_device_by_id(args.gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto out;
+ }
+
+ default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ ? cache_policy_coherent : cache_policy_noncoherent;
+
+ alternate_policy =
+ (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ ? cache_policy_coherent : cache_policy_noncoherent;
+
+ if (!dev->dqm->set_cache_memory_policy(dev->dqm,
+ &pdd->qpd,
+ default_policy,
+ alternate_policy,
+ (void __user *)args.alternate_aperture_base,
+ args.alternate_aperture_size))
+ err = -EINVAL;
+
+out:
+ mutex_unlock(&p->mutex);
+
+ return err;
+}
+
+static long kfd_ioctl_get_clock_counters(struct file *filep,
+ struct kfd_process *p, void __user *arg)
+{
+ struct kfd_ioctl_get_clock_counters_args args;
+ struct kfd_dev *dev;
+ struct timespec time;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ dev = kfd_device_by_id(args.gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ /* Reading GPU clock counter from KGD */
+ args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+
+ /* No access to rdtsc. Using raw monotonic time */
+ getrawmonotonic(&time);
+ args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+
+ get_monotonic_boottime(&time);
+ args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+
+ /* Since the counter is in nano-seconds we use 1GHz frequency */
+ args.system_clock_freq = 1000000000;
+
+ if (copy_to_user(arg, &args, sizeof(args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+
+static int kfd_ioctl_get_process_apertures(struct file *filp,
+ struct kfd_process *p, void __user *arg)
+{
+ struct kfd_ioctl_get_process_apertures_args args;
+ struct kfd_process_device_apertures *pAperture;
+ struct kfd_process_device *pdd;
+
+ dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ args.num_of_nodes = 0;
+
+ mutex_lock(&p->mutex);
+
+ /*if the process-device list isn't empty*/
+ if (kfd_has_process_device_data(p)) {
+ /* Run over all pdd of the process */
+ pdd = kfd_get_first_process_device_data(p);
+ do {
+ pAperture = &args.process_apertures[args.num_of_nodes];
+ pAperture->gpu_id = pdd->dev->id;
+ pAperture->lds_base = pdd->lds_base;
+ pAperture->lds_limit = pdd->lds_limit;
+ pAperture->gpuvm_base = pdd->gpuvm_base;
+ pAperture->gpuvm_limit = pdd->gpuvm_limit;
+ pAperture->scratch_base = pdd->scratch_base;
+ pAperture->scratch_limit = pdd->scratch_limit;
+
+ dev_dbg(kfd_device,
+ "node id %u\n", args.num_of_nodes);
+ dev_dbg(kfd_device,
+ "gpu id %u\n", pdd->dev->id);
+ dev_dbg(kfd_device,
+ "lds_base %llX\n", pdd->lds_base);
+ dev_dbg(kfd_device,
+ "lds_limit %llX\n", pdd->lds_limit);
+ dev_dbg(kfd_device,
+ "gpuvm_base %llX\n", pdd->gpuvm_base);
+ dev_dbg(kfd_device,
+ "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+ dev_dbg(kfd_device,
+ "scratch_base %llX\n", pdd->scratch_base);
+ dev_dbg(kfd_device,
+ "scratch_limit %llX\n", pdd->scratch_limit);
+
+ args.num_of_nodes++;
+ } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
+ (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+ }
+
+ mutex_unlock(&p->mutex);
+
+ if (copy_to_user(arg, &args, sizeof(args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct kfd_process *process;
+ long err = -EINVAL;
+
+ dev_dbg(kfd_device,
+ "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
+ cmd, _IOC_NR(cmd), arg);
+
+ process = kfd_get_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ switch (cmd) {
+ case KFD_IOC_GET_VERSION:
+ err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
+ break;
+ case KFD_IOC_CREATE_QUEUE:
+ err = kfd_ioctl_create_queue(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_DESTROY_QUEUE:
+ err = kfd_ioctl_destroy_queue(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_SET_MEMORY_POLICY:
+ err = kfd_ioctl_set_memory_policy(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_GET_CLOCK_COUNTERS:
+ err = kfd_ioctl_get_clock_counters(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_GET_PROCESS_APERTURES:
+ err = kfd_ioctl_get_process_apertures(filep, process,
+ (void __user *)arg);
+ break;
+
+ case KFD_IOC_UPDATE_QUEUE:
+ err = kfd_ioctl_update_queue(filep, process,
+ (void __user *)arg);
+ break;
+
+ default:
+ dev_err(kfd_device,
+ "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
+ cmd, arg);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err < 0)
+ dev_err(kfd_device,
+ "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
+ err, cmd, _IOC_NR(cmd));
+
+ return err;
+}
+
+static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct kfd_process *process;
+
+ process = kfd_get_process(current);
+ if (IS_ERR(process))
+ return PTR_ERR(process);
+
+ return kfd_doorbell_mmap(process, vma);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
new file mode 100644
index 000000000000..a374fa3d3ee6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_CRAT_H_INCLUDED
+#define KFD_CRAT_H_INCLUDED
+
+#include <linux/types.h>
+
+#pragma pack(1)
+
+/*
+ * 4CC signature values for the CRAT and CDIT ACPI tables
+ */
+
+#define CRAT_SIGNATURE "CRAT"
+#define CDIT_SIGNATURE "CDIT"
+
+/*
+ * Component Resource Association Table (CRAT)
+ */
+
+#define CRAT_OEMID_LENGTH 6
+#define CRAT_OEMTABLEID_LENGTH 8
+#define CRAT_RESERVED_LENGTH 6
+
+#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+
+struct crat_header {
+ uint32_t signature;
+ uint32_t length;
+ uint8_t revision;
+ uint8_t checksum;
+ uint8_t oem_id[CRAT_OEMID_LENGTH];
+ uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
+ uint32_t oem_revision;
+ uint32_t creator_id;
+ uint32_t creator_revision;
+ uint32_t total_entries;
+ uint16_t num_domains;
+ uint8_t reserved[CRAT_RESERVED_LENGTH];
+};
+
+/*
+ * The header structure is immediately followed by total_entries of the
+ * data definitions
+ */
+
+/*
+ * The currently defined subtype entries in the CRAT
+ */
+#define CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY 0
+#define CRAT_SUBTYPE_MEMORY_AFFINITY 1
+#define CRAT_SUBTYPE_CACHE_AFFINITY 2
+#define CRAT_SUBTYPE_TLB_AFFINITY 3
+#define CRAT_SUBTYPE_CCOMPUTE_AFFINITY 4
+#define CRAT_SUBTYPE_IOLINK_AFFINITY 5
+#define CRAT_SUBTYPE_MAX 6
+
+#define CRAT_SIBLINGMAP_SIZE 32
+
+/*
+ * ComputeUnit Affinity structure and definitions
+ */
+#define CRAT_CU_FLAGS_ENABLED 0x00000001
+#define CRAT_CU_FLAGS_HOT_PLUGGABLE 0x00000002
+#define CRAT_CU_FLAGS_CPU_PRESENT 0x00000004
+#define CRAT_CU_FLAGS_GPU_PRESENT 0x00000008
+#define CRAT_CU_FLAGS_IOMMU_PRESENT 0x00000010
+#define CRAT_CU_FLAGS_RESERVED 0xffffffe0
+
+#define CRAT_COMPUTEUNIT_RESERVED_LENGTH 4
+
+struct crat_subtype_computeunit {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t proximity_domain;
+ uint32_t processor_id_low;
+ uint16_t num_cpu_cores;
+ uint16_t num_simd_cores;
+ uint16_t max_waves_simd;
+ uint16_t io_count;
+ uint16_t hsa_capability;
+ uint16_t lds_size_in_kb;
+ uint8_t wave_front_size;
+ uint8_t num_banks;
+ uint16_t micro_engine_id;
+ uint8_t num_arrays;
+ uint8_t num_cu_per_array;
+ uint8_t num_simd_per_cu;
+ uint8_t max_slots_scatch_cu;
+ uint8_t reserved2[CRAT_COMPUTEUNIT_RESERVED_LENGTH];
+};
+
+/*
+ * HSA Memory Affinity structure and definitions
+ */
+#define CRAT_MEM_FLAGS_ENABLED 0x00000001
+#define CRAT_MEM_FLAGS_HOT_PLUGGABLE 0x00000002
+#define CRAT_MEM_FLAGS_NON_VOLATILE 0x00000004
+#define CRAT_MEM_FLAGS_RESERVED 0xfffffff8
+
+#define CRAT_MEMORY_RESERVED_LENGTH 8
+
+struct crat_subtype_memory {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t promixity_domain;
+ uint32_t base_addr_low;
+ uint32_t base_addr_high;
+ uint32_t length_low;
+ uint32_t length_high;
+ uint32_t width;
+ uint8_t reserved2[CRAT_MEMORY_RESERVED_LENGTH];
+};
+
+/*
+ * HSA Cache Affinity structure and definitions
+ */
+#define CRAT_CACHE_FLAGS_ENABLED 0x00000001
+#define CRAT_CACHE_FLAGS_DATA_CACHE 0x00000002
+#define CRAT_CACHE_FLAGS_INST_CACHE 0x00000004
+#define CRAT_CACHE_FLAGS_CPU_CACHE 0x00000008
+#define CRAT_CACHE_FLAGS_SIMD_CACHE 0x00000010
+#define CRAT_CACHE_FLAGS_RESERVED 0xffffffe0
+
+#define CRAT_CACHE_RESERVED_LENGTH 8
+
+struct crat_subtype_cache {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t processor_id_low;
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ uint32_t cache_size;
+ uint8_t cache_level;
+ uint8_t lines_per_tag;
+ uint16_t cache_line_size;
+ uint8_t associativity;
+ uint8_t cache_properties;
+ uint16_t cache_latency;
+ uint8_t reserved2[CRAT_CACHE_RESERVED_LENGTH];
+};
+
+/*
+ * HSA TLB Affinity structure and definitions
+ */
+#define CRAT_TLB_FLAGS_ENABLED 0x00000001
+#define CRAT_TLB_FLAGS_DATA_TLB 0x00000002
+#define CRAT_TLB_FLAGS_INST_TLB 0x00000004
+#define CRAT_TLB_FLAGS_CPU_TLB 0x00000008
+#define CRAT_TLB_FLAGS_SIMD_TLB 0x00000010
+#define CRAT_TLB_FLAGS_RESERVED 0xffffffe0
+
+#define CRAT_TLB_RESERVED_LENGTH 4
+
+struct crat_subtype_tlb {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t processor_id_low;
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ uint32_t tlb_level;
+ uint8_t data_tlb_associativity_2mb;
+ uint8_t data_tlb_size_2mb;
+ uint8_t instruction_tlb_associativity_2mb;
+ uint8_t instruction_tlb_size_2mb;
+ uint8_t data_tlb_associativity_4k;
+ uint8_t data_tlb_size_4k;
+ uint8_t instruction_tlb_associativity_4k;
+ uint8_t instruction_tlb_size_4k;
+ uint8_t data_tlb_associativity_1gb;
+ uint8_t data_tlb_size_1gb;
+ uint8_t instruction_tlb_associativity_1gb;
+ uint8_t instruction_tlb_size_1gb;
+ uint8_t reserved2[CRAT_TLB_RESERVED_LENGTH];
+};
+
+/*
+ * HSA CCompute/APU Affinity structure and definitions
+ */
+#define CRAT_CCOMPUTE_FLAGS_ENABLED 0x00000001
+#define CRAT_CCOMPUTE_FLAGS_RESERVED 0xfffffffe
+
+#define CRAT_CCOMPUTE_RESERVED_LENGTH 16
+
+struct crat_subtype_ccompute {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t processor_id_low;
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ uint32_t apu_size;
+ uint8_t reserved2[CRAT_CCOMPUTE_RESERVED_LENGTH];
+};
+
+/*
+ * HSA IO Link Affinity structure and definitions
+ */
+#define CRAT_IOLINK_FLAGS_ENABLED 0x00000001
+#define CRAT_IOLINK_FLAGS_COHERENCY 0x00000002
+#define CRAT_IOLINK_FLAGS_RESERVED 0xfffffffc
+
+/*
+ * IO interface types
+ */
+#define CRAT_IOLINK_TYPE_UNDEFINED 0
+#define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1
+#define CRAT_IOLINK_TYPE_PCIEXPRESS 2
+#define CRAT_IOLINK_TYPE_OTHER 3
+#define CRAT_IOLINK_TYPE_MAX 255
+
+#define CRAT_IOLINK_RESERVED_LENGTH 24
+
+struct crat_subtype_iolink {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+ uint32_t proximity_domain_from;
+ uint32_t proximity_domain_to;
+ uint8_t io_interface_type;
+ uint8_t version_major;
+ uint16_t version_minor;
+ uint32_t minimum_latency;
+ uint32_t maximum_latency;
+ uint32_t minimum_bandwidth_mbs;
+ uint32_t maximum_bandwidth_mbs;
+ uint32_t recommended_transfer_size;
+ uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH];
+};
+
+/*
+ * HSA generic sub-type header
+ */
+
+#define CRAT_SUBTYPE_FLAGS_ENABLED 0x00000001
+
+struct crat_subtype_generic {
+ uint8_t type;
+ uint8_t length;
+ uint16_t reserved;
+ uint32_t flags;
+};
+
+/*
+ * Component Locality Distance Information Table (CDIT)
+ */
+#define CDIT_OEMID_LENGTH 6
+#define CDIT_OEMTABLEID_LENGTH 8
+
+struct cdit_header {
+ uint32_t signature;
+ uint32_t length;
+ uint8_t revision;
+ uint8_t checksum;
+ uint8_t oem_id[CDIT_OEMID_LENGTH];
+ uint8_t oem_table_id[CDIT_OEMTABLEID_LENGTH];
+ uint32_t oem_revision;
+ uint32_t creator_id;
+ uint32_t creator_revision;
+ uint32_t total_entries;
+ uint16_t num_domains;
+ uint8_t entry[1];
+};
+
+#pragma pack()
+
+#endif /* KFD_CRAT_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
new file mode 100644
index 000000000000..43884ebd4303
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/amd-iommu.h>
+#include <linux/bsearch.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+
+#define MQD_SIZE_ALIGNED 768
+
+static const struct kfd_device_info kaveri_device_info = {
+ .max_pasid_bits = 16,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .mqd_size_aligned = MQD_SIZE_ALIGNED
+};
+
+struct kfd_deviceid {
+ unsigned short did;
+ const struct kfd_device_info *device_info;
+};
+
+/* Please keep this sorted by increasing device id. */
+static const struct kfd_deviceid supported_devices[] = {
+ { 0x1304, &kaveri_device_info }, /* Kaveri */
+ { 0x1305, &kaveri_device_info }, /* Kaveri */
+ { 0x1306, &kaveri_device_info }, /* Kaveri */
+ { 0x1307, &kaveri_device_info }, /* Kaveri */
+ { 0x1309, &kaveri_device_info }, /* Kaveri */
+ { 0x130A, &kaveri_device_info }, /* Kaveri */
+ { 0x130B, &kaveri_device_info }, /* Kaveri */
+ { 0x130C, &kaveri_device_info }, /* Kaveri */
+ { 0x130D, &kaveri_device_info }, /* Kaveri */
+ { 0x130E, &kaveri_device_info }, /* Kaveri */
+ { 0x130F, &kaveri_device_info }, /* Kaveri */
+ { 0x1310, &kaveri_device_info }, /* Kaveri */
+ { 0x1311, &kaveri_device_info }, /* Kaveri */
+ { 0x1312, &kaveri_device_info }, /* Kaveri */
+ { 0x1313, &kaveri_device_info }, /* Kaveri */
+ { 0x1315, &kaveri_device_info }, /* Kaveri */
+ { 0x1316, &kaveri_device_info }, /* Kaveri */
+ { 0x1317, &kaveri_device_info }, /* Kaveri */
+ { 0x1318, &kaveri_device_info }, /* Kaveri */
+ { 0x131B, &kaveri_device_info }, /* Kaveri */
+ { 0x131C, &kaveri_device_info }, /* Kaveri */
+ { 0x131D, &kaveri_device_info }, /* Kaveri */
+};
+
+static const struct kfd_device_info *lookup_device_info(unsigned short did)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
+ if (supported_devices[i].did == did) {
+ BUG_ON(supported_devices[i].device_info == NULL);
+ return supported_devices[i].device_info;
+ }
+ }
+
+ return NULL;
+}
+
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
+{
+ struct kfd_dev *kfd;
+
+ const struct kfd_device_info *device_info =
+ lookup_device_info(pdev->device);
+
+ if (!device_info)
+ return NULL;
+
+ kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
+ if (!kfd)
+ return NULL;
+
+ kfd->kgd = kgd;
+ kfd->device_info = device_info;
+ kfd->pdev = pdev;
+ kfd->init_complete = false;
+
+ return kfd;
+}
+
+static bool device_iommu_pasid_init(struct kfd_dev *kfd)
+{
+ const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
+ AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
+ AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+
+ struct amd_iommu_device_info iommu_info;
+ unsigned int pasid_limit;
+ int err;
+
+ err = amd_iommu_device_info(kfd->pdev, &iommu_info);
+ if (err < 0) {
+ dev_err(kfd_device,
+ "error getting iommu info. is the iommu enabled?\n");
+ return false;
+ }
+
+ if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
+ dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
+ return false;
+ }
+
+ pasid_limit = min_t(unsigned int,
+ (unsigned int)1 << kfd->device_info->max_pasid_bits,
+ iommu_info.max_pasids);
+ /*
+ * last pasid is used for kernel queues doorbells
+ * in the future the last pasid might be used for a kernel thread.
+ */
+ pasid_limit = min_t(unsigned int,
+ pasid_limit,
+ kfd->doorbell_process_limit - 1);
+
+ err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+ if (err < 0) {
+ dev_err(kfd_device, "error initializing iommu device\n");
+ return false;
+ }
+
+ if (!kfd_set_pasid_limit(pasid_limit)) {
+ dev_err(kfd_device, "error setting pasid limit\n");
+ amd_iommu_free_device(kfd->pdev);
+ return false;
+ }
+
+ return true;
+}
+
+static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
+{
+ struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
+
+ if (dev)
+ kfd_unbind_process_from_device(dev, pasid);
+}
+
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources)
+{
+ unsigned int size;
+
+ kfd->shared_resources = *gpu_resources;
+
+ /* calculate max size of mqds needed for queues */
+ size = max_num_of_processes *
+ max_num_of_queues_per_process *
+ kfd->device_info->mqd_size_aligned;
+
+ /* add another 512KB for all other allocations on gart */
+ size += 512 * 1024;
+
+ if (kfd2kgd->init_sa_manager(kfd->kgd, size)) {
+ dev_err(kfd_device,
+ "Error initializing sa manager for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto out;
+ }
+
+ kfd_doorbell_init(kfd);
+
+ if (kfd_topology_add_device(kfd) != 0) {
+ dev_err(kfd_device,
+ "Error adding device (%x:%x) to topology\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto kfd_topology_add_device_error;
+ }
+
+ if (kfd_interrupt_init(kfd)) {
+ dev_err(kfd_device,
+ "Error initializing interrupts for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto kfd_interrupt_error;
+ }
+
+ if (!device_iommu_pasid_init(kfd)) {
+ dev_err(kfd_device,
+ "Error initializing iommuv2 for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto device_iommu_pasid_error;
+ }
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+ iommu_pasid_shutdown_callback);
+
+ kfd->dqm = device_queue_manager_init(kfd);
+ if (!kfd->dqm) {
+ dev_err(kfd_device,
+ "Error initializing queue manager for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto device_queue_manager_error;
+ }
+
+ if (kfd->dqm->start(kfd->dqm) != 0) {
+ dev_err(kfd_device,
+ "Error starting queuen manager for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto dqm_start_error;
+ }
+
+ kfd->init_complete = true;
+ dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
+ kfd->pdev->device);
+
+ pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
+ sched_policy);
+
+ goto out;
+
+dqm_start_error:
+ device_queue_manager_uninit(kfd->dqm);
+device_queue_manager_error:
+ amd_iommu_free_device(kfd->pdev);
+device_iommu_pasid_error:
+ kfd_interrupt_exit(kfd);
+kfd_interrupt_error:
+ kfd_topology_remove_device(kfd);
+kfd_topology_add_device_error:
+ kfd2kgd->fini_sa_manager(kfd->kgd);
+ dev_err(kfd_device,
+ "device (%x:%x) NOT added due to errors\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+out:
+ return kfd->init_complete;
+}
+
+void kgd2kfd_device_exit(struct kfd_dev *kfd)
+{
+ if (kfd->init_complete) {
+ device_queue_manager_uninit(kfd->dqm);
+ amd_iommu_free_device(kfd->pdev);
+ kfd_interrupt_exit(kfd);
+ kfd_topology_remove_device(kfd);
+ }
+
+ kfree(kfd);
+}
+
+void kgd2kfd_suspend(struct kfd_dev *kfd)
+{
+ BUG_ON(kfd == NULL);
+
+ if (kfd->init_complete) {
+ kfd->dqm->stop(kfd->dqm);
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+ amd_iommu_free_device(kfd->pdev);
+ }
+}
+
+int kgd2kfd_resume(struct kfd_dev *kfd)
+{
+ unsigned int pasid_limit;
+ int err;
+
+ BUG_ON(kfd == NULL);
+
+ pasid_limit = kfd_get_pasid_limit();
+
+ if (kfd->init_complete) {
+ err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+ if (err < 0)
+ return -ENXIO;
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+ iommu_pasid_shutdown_callback);
+ kfd->dqm->start(kfd->dqm);
+ }
+
+ return 0;
+}
+
+/* This is called directly from KGD at ISR. */
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+ if (kfd->init_complete) {
+ spin_lock(&kfd->interrupt_lock);
+
+ if (kfd->interrupts_active
+ && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+ schedule_work(&kfd->interrupt_work);
+
+ spin_unlock(&kfd->interrupt_lock);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
new file mode 100644
index 000000000000..924e90c072e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -0,0 +1,1062 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/bitops.h>
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_mqd_manager.h"
+#include "cik_regs.h"
+#include "kfd_kernel_queue.h"
+#include "../../radeon/cik_reg.h"
+
+/* Size of the per-pipe EOP queue */
+#define CIK_HPD_EOP_BYTES_LOG2 11
+#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
+
+static bool is_mem_initialized;
+
+static int init_memory(struct device_queue_manager *dqm);
+static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
+ unsigned int pasid, unsigned int vmid);
+
+static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
+static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+
+
+static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm || !dqm->dev);
+ return dqm->dev->shared_resources.compute_pipe_count;
+}
+
+static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm);
+ return dqm->dev->shared_resources.first_compute_pipe;
+}
+
+static inline unsigned int get_pipes_num_cpsch(void)
+{
+ return PIPE_PER_ME_CP_SCHEDULING;
+}
+
+static inline unsigned int
+get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+{
+ uint32_t nybble;
+
+ nybble = (pdd->lds_base >> 60) & 0x0E;
+
+ return nybble;
+
+}
+
+static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+{
+ unsigned int shared_base;
+
+ shared_base = (pdd->lds_base >> 16) & 0xFF;
+
+ return shared_base;
+}
+
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
+static void init_process_memory(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+ BUG_ON(!dqm || !qpd);
+
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+ if (qpd->sh_mem_config == 0) {
+ qpd->sh_mem_config =
+ ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
+ DEFAULT_MTYPE(MTYPE_NONCACHED) |
+ APE1_MTYPE(MTYPE_NONCACHED);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ }
+
+ if (qpd->pqm->process->is_32bit_user_mode) {
+ temp = get_sh_mem_bases_32(pdd);
+ qpd->sh_mem_bases = SHARED_BASE(temp);
+ qpd->sh_mem_config |= PTR32;
+ } else {
+ temp = get_sh_mem_bases_nybble_64(pdd);
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ }
+
+ pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+}
+
+static void program_sh_mem_settings(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
+ qpd->sh_mem_config,
+ qpd->sh_mem_ape1_base,
+ qpd->sh_mem_ape1_limit,
+ qpd->sh_mem_bases);
+}
+
+static int allocate_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int bit, allocated_vmid;
+
+ if (dqm->vmid_bitmap == 0)
+ return -ENOMEM;
+
+ bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
+ clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
+
+ /* Kaveri kfd vmid's starts from vmid 8 */
+ allocated_vmid = bit + KFD_VMID_START_OFFSET;
+ pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
+ qpd->vmid = allocated_vmid;
+ q->properties.vmid = allocated_vmid;
+
+ set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
+ program_sh_mem_settings(dqm, qpd);
+
+ return 0;
+}
+
+static void deallocate_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+
+ set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
+ qpd->vmid = 0;
+ q->properties.vmid = 0;
+}
+
+static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd,
+ int *allocated_vmid)
+{
+ int retval;
+
+ BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
+
+ pr_debug("kfd: In func %s\n", __func__);
+ print_queue(q);
+
+ mutex_lock(&dqm->lock);
+
+ if (list_empty(&qpd->queues_list)) {
+ retval = allocate_vmid(dqm, qpd, q);
+ if (retval != 0) {
+ mutex_unlock(&dqm->lock);
+ return retval;
+ }
+ }
+ *allocated_vmid = qpd->vmid;
+ q->properties.vmid = qpd->vmid;
+
+ retval = create_compute_queue_nocpsch(dqm, q, qpd);
+
+ if (retval != 0) {
+ if (list_empty(&qpd->queues_list)) {
+ deallocate_vmid(dqm, qpd, q);
+ *allocated_vmid = 0;
+ }
+ mutex_unlock(&dqm->lock);
+ return retval;
+ }
+
+ list_add(&q->list, &qpd->queues_list);
+ dqm->queue_count++;
+
+ mutex_unlock(&dqm->lock);
+ return 0;
+}
+
+static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
+{
+ bool set;
+ int pipe, bit;
+
+ set = false;
+
+ for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
+ pipe = (pipe + 1) % get_pipes_num(dqm)) {
+ if (dqm->allocated_queues[pipe] != 0) {
+ bit = find_first_bit(
+ (unsigned long *)&dqm->allocated_queues[pipe],
+ QUEUES_PER_PIPE);
+
+ clear_bit(bit,
+ (unsigned long *)&dqm->allocated_queues[pipe]);
+ q->pipe = pipe;
+ q->queue = bit;
+ set = true;
+ break;
+ }
+ }
+
+ if (set == false)
+ return -EBUSY;
+
+ pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
+ __func__, q->pipe, q->queue);
+ /* horizontal hqd allocation */
+ dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
+
+ return 0;
+}
+
+static inline void deallocate_hqd(struct device_queue_manager *dqm,
+ struct queue *q)
+{
+ set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
+}
+
+static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !q || !qpd);
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ if (mqd == NULL)
+ return -ENOMEM;
+
+ retval = allocate_hqd(dqm, q);
+ if (retval != 0)
+ return retval;
+
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0) {
+ deallocate_hqd(dqm, q);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !q || !q->mqd || !qpd);
+
+ retval = 0;
+
+ pr_debug("kfd: In Func %s\n", __func__);
+
+ mutex_lock(&dqm->lock);
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ if (mqd == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT,
+ QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ q->pipe, q->queue);
+
+ if (retval != 0)
+ goto out;
+
+ deallocate_hqd(dqm, q);
+
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+ list_del(&q->list);
+ if (list_empty(&qpd->queues_list))
+ deallocate_vmid(dqm, qpd, q);
+ dqm->queue_count--;
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !q || !q->mqd);
+
+ mutex_lock(&dqm->lock);
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ if (mqd == NULL) {
+ mutex_unlock(&dqm->lock);
+ return -ENOMEM;
+ }
+
+ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ if (q->properties.is_active == true)
+ dqm->queue_count++;
+ else
+ dqm->queue_count--;
+
+ if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
+ retval = execute_queues_cpsch(dqm, false);
+
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static struct mqd_manager *get_mqd_manager_nocpsch(
+ struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
+{
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
+
+ pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
+
+ mqd = dqm->mqds[type];
+ if (!mqd) {
+ mqd = mqd_manager_init(type, dqm->dev);
+ if (mqd == NULL)
+ pr_err("kfd: mqd manager is NULL");
+ dqm->mqds[type] = mqd;
+ }
+
+ return mqd;
+}
+
+static int register_process_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct device_process_node *n;
+
+ BUG_ON(!dqm || !qpd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->qpd = qpd;
+
+ mutex_lock(&dqm->lock);
+ list_add(&n->list, &dqm->queues);
+
+ init_process_memory(dqm, qpd);
+ dqm->processes_count++;
+
+ mutex_unlock(&dqm->lock);
+
+ return 0;
+}
+
+static int unregister_process_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ int retval;
+ struct device_process_node *cur, *next;
+
+ BUG_ON(!dqm || !qpd);
+
+ BUG_ON(!list_empty(&qpd->queues_list));
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ retval = 0;
+ mutex_lock(&dqm->lock);
+
+ list_for_each_entry_safe(cur, next, &dqm->queues, list) {
+ if (qpd == cur->qpd) {
+ list_del(&cur->list);
+ kfree(cur);
+ dqm->processes_count--;
+ goto out;
+ }
+ }
+ /* qpd not found in dqm list */
+ retval = 1;
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int
+set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
+ unsigned int vmid)
+{
+ uint32_t pasid_mapping;
+
+ pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID_PASID_MAPPING_VALID;
+ return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
+ vmid);
+}
+
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+ /* In 64-bit mode, we can only control the top 3 bits of the LDS,
+ * scratch and GPUVM apertures.
+ * The hardware fills in the remaining 59 bits according to the
+ * following pattern:
+ * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
+ * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
+ * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
+ *
+ * (where X/Y is the configurable nybble with the low-bit 0)
+ *
+ * LDS and scratch will have the same top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+ * GPUVM can have a different top nybble programmed in the
+ * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+ * We don't bother to support different top nybbles
+ * for LDS/Scratch and GPUVM.
+ */
+
+ BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ top_address_nybble == 0);
+
+ return PRIVATE_BASE(top_address_nybble << 12) |
+ SHARED_BASE(top_address_nybble << 12);
+}
+
+static int init_memory(struct device_queue_manager *dqm)
+{
+ int i, retval;
+
+ for (i = 8; i < 16; i++)
+ set_pasid_vmid_mapping(dqm, 0, i);
+
+ retval = kfd2kgd->init_memory(dqm->dev->kgd);
+ if (retval == 0)
+ is_mem_initialized = true;
+ return retval;
+}
+
+
+static int init_pipelines(struct device_queue_manager *dqm,
+ unsigned int pipes_num, unsigned int first_pipe)
+{
+ void *hpdptr;
+ struct mqd_manager *mqd;
+ unsigned int i, err, inx;
+ uint64_t pipe_hpd_addr;
+
+ BUG_ON(!dqm || !dqm->dev);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ /*
+ * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
+ * The driver never accesses this memory after zeroing it.
+ * It doesn't even have to be saved/restored on suspend/resume
+ * because it contains no data when there are no active queues.
+ */
+
+ err = kfd2kgd->allocate_mem(dqm->dev->kgd,
+ CIK_HPD_EOP_BYTES * pipes_num,
+ PAGE_SIZE,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &dqm->pipeline_mem);
+
+ if (err) {
+ pr_err("kfd: error allocate vidmem num pipes: %d\n",
+ pipes_num);
+ return -ENOMEM;
+ }
+
+ hpdptr = dqm->pipeline_mem->cpu_ptr;
+ dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
+
+ memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
+ if (mqd == NULL) {
+ kfd2kgd->free_mem(dqm->dev->kgd,
+ (struct kgd_mem *) dqm->pipeline_mem);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pipes_num; i++) {
+ inx = i + first_pipe;
+ pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
+ pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
+ /* = log2(bytes/4)-1 */
+ kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+ CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
+ }
+
+ return 0;
+}
+
+
+static int init_scheduler(struct device_queue_manager *dqm)
+{
+ int retval;
+
+ BUG_ON(!dqm);
+
+ pr_debug("kfd: In %s\n", __func__);
+
+ retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+ if (retval != 0)
+ return retval;
+
+ retval = init_memory(dqm);
+
+ return retval;
+}
+
+static int initialize_nocpsch(struct device_queue_manager *dqm)
+{
+ int i;
+
+ BUG_ON(!dqm);
+
+ pr_debug("kfd: In func %s num of pipes: %d\n",
+ __func__, get_pipes_num(dqm));
+
+ mutex_init(&dqm->lock);
+ INIT_LIST_HEAD(&dqm->queues);
+ dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!dqm->allocated_queues) {
+ mutex_destroy(&dqm->lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < get_pipes_num(dqm); i++)
+ dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
+
+ dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
+
+ init_scheduler(dqm);
+ return 0;
+}
+
+static void uninitialize_nocpsch(struct device_queue_manager *dqm)
+{
+ int i;
+
+ BUG_ON(!dqm);
+
+ BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+
+ kfree(dqm->allocated_queues);
+ for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
+ kfree(dqm->mqds[i]);
+ mutex_destroy(&dqm->lock);
+ kfd2kgd->free_mem(dqm->dev->kgd,
+ (struct kgd_mem *) dqm->pipeline_mem);
+}
+
+static int start_nocpsch(struct device_queue_manager *dqm)
+{
+ return 0;
+}
+
+static int stop_nocpsch(struct device_queue_manager *dqm)
+{
+ return 0;
+}
+
+/*
+ * Device Queue Manager implementation for cp scheduler
+ */
+
+static int set_sched_resources(struct device_queue_manager *dqm)
+{
+ struct scheduling_resources res;
+ unsigned int queue_num, queue_mask;
+
+ BUG_ON(!dqm);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
+ queue_mask = (1 << queue_num) - 1;
+ res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
+ res.vmid_mask <<= KFD_VMID_START_OFFSET;
+ res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
+ res.gws_mask = res.oac_mask = res.gds_heap_base =
+ res.gds_heap_size = 0;
+
+ pr_debug("kfd: scheduling resources:\n"
+ " vmid mask: 0x%8X\n"
+ " queue mask: 0x%8llX\n",
+ res.vmid_mask, res.queue_mask);
+
+ return pm_send_set_resources(&dqm->packets, &res);
+}
+
+static int initialize_cpsch(struct device_queue_manager *dqm)
+{
+ int retval;
+
+ BUG_ON(!dqm);
+
+ pr_debug("kfd: In func %s num of pipes: %d\n",
+ __func__, get_pipes_num_cpsch());
+
+ mutex_init(&dqm->lock);
+ INIT_LIST_HEAD(&dqm->queues);
+ dqm->queue_count = dqm->processes_count = 0;
+ dqm->active_runlist = false;
+ retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
+ if (retval != 0)
+ goto fail_init_pipelines;
+
+ return 0;
+
+fail_init_pipelines:
+ mutex_destroy(&dqm->lock);
+ return retval;
+}
+
+static int start_cpsch(struct device_queue_manager *dqm)
+{
+ struct device_process_node *node;
+ int retval;
+
+ BUG_ON(!dqm);
+
+ retval = 0;
+
+ retval = pm_init(&dqm->packets, dqm);
+ if (retval != 0)
+ goto fail_packet_manager_init;
+
+ retval = set_sched_resources(dqm);
+ if (retval != 0)
+ goto fail_set_sched_resources;
+
+ pr_debug("kfd: allocating fence memory\n");
+
+ /* allocate fence memory on the gart */
+ retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
+ sizeof(*dqm->fence_addr),
+ 32,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &dqm->fence_mem);
+
+ if (retval != 0)
+ goto fail_allocate_vidmem;
+
+ dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+ dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
+
+ list_for_each_entry(node, &dqm->queues, list)
+ if (node->qpd->pqm->process && dqm->dev)
+ kfd_bind_process_to_device(dqm->dev,
+ node->qpd->pqm->process);
+
+ execute_queues_cpsch(dqm, true);
+
+ return 0;
+fail_allocate_vidmem:
+fail_set_sched_resources:
+ pm_uninit(&dqm->packets);
+fail_packet_manager_init:
+ return retval;
+}
+
+static int stop_cpsch(struct device_queue_manager *dqm)
+{
+ struct device_process_node *node;
+ struct kfd_process_device *pdd;
+
+ BUG_ON(!dqm);
+
+ destroy_queues_cpsch(dqm, true);
+
+ list_for_each_entry(node, &dqm->queues, list) {
+ pdd = qpd_to_pdd(node->qpd);
+ pdd->bound = false;
+ }
+ kfd2kgd->free_mem(dqm->dev->kgd,
+ (struct kgd_mem *) dqm->fence_mem);
+ pm_uninit(&dqm->packets);
+
+ return 0;
+}
+
+static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd)
+{
+ BUG_ON(!dqm || !kq || !qpd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mutex_lock(&dqm->lock);
+ list_add(&kq->list, &qpd->priv_queue_list);
+ dqm->queue_count++;
+ qpd->is_debug = true;
+ execute_queues_cpsch(dqm, false);
+ mutex_unlock(&dqm->lock);
+
+ return 0;
+}
+
+static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd)
+{
+ BUG_ON(!dqm || !kq);
+
+ pr_debug("kfd: In %s\n", __func__);
+
+ mutex_lock(&dqm->lock);
+ destroy_queues_cpsch(dqm, false);
+ list_del(&kq->list);
+ dqm->queue_count--;
+ qpd->is_debug = false;
+ execute_queues_cpsch(dqm, false);
+ mutex_unlock(&dqm->lock);
+}
+
+static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd, int *allocate_vmid)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !q || !qpd);
+
+ retval = 0;
+
+ if (allocate_vmid)
+ *allocate_vmid = 0;
+
+ mutex_lock(&dqm->lock);
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+ if (mqd == NULL) {
+ mutex_unlock(&dqm->lock);
+ return -ENOMEM;
+ }
+
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0)
+ goto out;
+
+ list_add(&q->list, &qpd->queues_list);
+ if (q->properties.is_active) {
+ dqm->queue_count++;
+ retval = execute_queues_cpsch(dqm, false);
+ }
+
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int fence_wait_timeout(unsigned int *fence_addr,
+ unsigned int fence_value,
+ unsigned long timeout)
+{
+ BUG_ON(!fence_addr);
+ timeout += jiffies;
+
+ while (*fence_addr != fence_value) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("kfd: qcm fence wait loop timeout expired\n");
+ return -ETIME;
+ }
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+{
+ int retval;
+
+ BUG_ON(!dqm);
+
+ retval = 0;
+
+ if (lock)
+ mutex_lock(&dqm->lock);
+ if (dqm->active_runlist == false)
+ goto out;
+ retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
+ KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
+ if (retval != 0)
+ goto out;
+
+ *dqm->fence_addr = KFD_FENCE_INIT;
+ pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
+ KFD_FENCE_COMPLETED);
+ /* should be timed out */
+ fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
+ QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
+ pm_release_ib(&dqm->packets);
+ dqm->active_runlist = false;
+
+out:
+ if (lock)
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+{
+ int retval;
+
+ BUG_ON(!dqm);
+
+ if (lock)
+ mutex_lock(&dqm->lock);
+
+ retval = destroy_queues_cpsch(dqm, false);
+ if (retval != 0) {
+ pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+ goto out;
+ }
+
+ if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
+ retval = 0;
+ goto out;
+ }
+
+ if (dqm->active_runlist) {
+ retval = 0;
+ goto out;
+ }
+
+ retval = pm_send_runlist(&dqm->packets, &dqm->queues);
+ if (retval != 0) {
+ pr_err("kfd: failed to execute runlist");
+ goto out;
+ }
+ dqm->active_runlist = true;
+
+out:
+ if (lock)
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int retval;
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dqm || !qpd || !q);
+
+ retval = 0;
+
+ /* remove queue from list to prevent rescheduling after preemption */
+ mutex_lock(&dqm->lock);
+
+ mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+
+ list_del(&q->list);
+ dqm->queue_count--;
+
+ execute_queues_cpsch(dqm, false);
+
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+ mutex_unlock(&dqm->lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
+static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
+{
+ uint32_t default_mtype;
+ uint32_t ape1_mtype;
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mutex_lock(&dqm->lock);
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base)
+ goto out;
+
+ if ((base & APE1_FIXED_BITS_MASK) != 0)
+ goto out;
+
+ if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
+ goto out;
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
+
+ default_mtype = (default_policy == cache_policy_coherent) ?
+ MTYPE_NONCACHED :
+ MTYPE_CACHED;
+
+ ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+ MTYPE_NONCACHED :
+ MTYPE_CACHED;
+
+ qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
+ | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
+ | DEFAULT_MTYPE(default_mtype)
+ | APE1_MTYPE(ape1_mtype);
+
+ if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
+ program_sh_mem_settings(dqm, qpd);
+
+ pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
+ qpd->sh_mem_config, qpd->sh_mem_ape1_base,
+ qpd->sh_mem_ape1_limit);
+
+ mutex_unlock(&dqm->lock);
+ return true;
+
+out:
+ mutex_unlock(&dqm->lock);
+ return false;
+}
+
+struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+{
+ struct device_queue_manager *dqm;
+
+ BUG_ON(!dev);
+
+ dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
+ if (!dqm)
+ return NULL;
+
+ dqm->dev = dev;
+ switch (sched_policy) {
+ case KFD_SCHED_POLICY_HWS:
+ case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
+ /* initialize dqm for cp scheduling */
+ dqm->create_queue = create_queue_cpsch;
+ dqm->initialize = initialize_cpsch;
+ dqm->start = start_cpsch;
+ dqm->stop = stop_cpsch;
+ dqm->destroy_queue = destroy_queue_cpsch;
+ dqm->update_queue = update_queue;
+ dqm->get_mqd_manager = get_mqd_manager_nocpsch;
+ dqm->register_process = register_process_nocpsch;
+ dqm->unregister_process = unregister_process_nocpsch;
+ dqm->uninitialize = uninitialize_nocpsch;
+ dqm->create_kernel_queue = create_kernel_queue_cpsch;
+ dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
+ dqm->set_cache_memory_policy = set_cache_memory_policy;
+ break;
+ case KFD_SCHED_POLICY_NO_HWS:
+ /* initialize dqm for no cp scheduling */
+ dqm->start = start_nocpsch;
+ dqm->stop = stop_nocpsch;
+ dqm->create_queue = create_queue_nocpsch;
+ dqm->destroy_queue = destroy_queue_nocpsch;
+ dqm->update_queue = update_queue;
+ dqm->get_mqd_manager = get_mqd_manager_nocpsch;
+ dqm->register_process = register_process_nocpsch;
+ dqm->unregister_process = unregister_process_nocpsch;
+ dqm->initialize = initialize_nocpsch;
+ dqm->uninitialize = uninitialize_nocpsch;
+ dqm->set_cache_memory_policy = set_cache_memory_policy;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (dqm->initialize(dqm) != 0) {
+ kfree(dqm);
+ return NULL;
+ }
+
+ return dqm;
+}
+
+void device_queue_manager_uninit(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm);
+
+ dqm->uninitialize(dqm);
+ kfree(dqm);
+}
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
new file mode 100644
index 000000000000..c3f189e8ae35
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_DEVICE_QUEUE_MANAGER_H_
+#define KFD_DEVICE_QUEUE_MANAGER_H_
+
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+
+#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
+#define QUEUES_PER_PIPE (8)
+#define PIPE_PER_ME_CP_SCHEDULING (3)
+#define CIK_VMID_NUM (8)
+#define KFD_VMID_START_OFFSET (8)
+#define VMID_PER_DEVICE CIK_VMID_NUM
+#define KFD_DQM_FIRST_PIPE (0)
+
+struct device_process_node {
+ struct qcm_process_device *qpd;
+ struct list_head list;
+};
+
+/**
+ * struct device_queue_manager
+ *
+ * @create_queue: Queue creation routine.
+ *
+ * @destroy_queue: Queue destruction routine.
+ *
+ * @update_queue: Queue update routine.
+ *
+ * @get_mqd_manager: Returns the mqd manager according to the mqd type.
+ *
+ * @exeute_queues: Dispatches the queues list to the H/W.
+ *
+ * @register_process: This routine associates a specific process with device.
+ *
+ * @unregister_process: destroys the associations between process to device.
+ *
+ * @initialize: Initializes the pipelines and memory module for that device.
+ *
+ * @start: Initializes the resources/modules the the device needs for queues
+ * execution. This function is called on device initialization and after the
+ * system woke up after suspension.
+ *
+ * @stop: This routine stops execution of all the active queue running on the
+ * H/W and basically this function called on system suspend.
+ *
+ * @uninitialize: Destroys all the device queue manager resources allocated in
+ * initialize routine.
+ *
+ * @create_kernel_queue: Creates kernel queue. Used for debug queue.
+ *
+ * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
+ *
+ * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
+ * memory apertures.
+ *
+ * This struct is a base class for the kfd queues scheduler in the
+ * device level. The device base class should expose the basic operations
+ * for queue creation and queue destruction. This base class hides the
+ * scheduling mode of the driver and the specific implementation of the
+ * concrete device. This class is the only class in the queues scheduler
+ * that configures the H/W.
+ */
+
+struct device_queue_manager {
+ int (*create_queue)(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd,
+ int *allocate_vmid);
+ int (*destroy_queue)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q);
+ int (*update_queue)(struct device_queue_manager *dqm,
+ struct queue *q);
+
+ struct mqd_manager * (*get_mqd_manager)
+ (struct device_queue_manager *dqm,
+ enum KFD_MQD_TYPE type);
+
+ int (*register_process)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+ int (*unregister_process)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+ int (*initialize)(struct device_queue_manager *dqm);
+ int (*start)(struct device_queue_manager *dqm);
+ int (*stop)(struct device_queue_manager *dqm);
+ void (*uninitialize)(struct device_queue_manager *dqm);
+ int (*create_kernel_queue)(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd);
+ void (*destroy_kernel_queue)(struct device_queue_manager *dqm,
+ struct kernel_queue *kq,
+ struct qcm_process_device *qpd);
+ bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
+
+
+ struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
+ struct packet_manager packets;
+ struct kfd_dev *dev;
+ struct mutex lock;
+ struct list_head queues;
+ unsigned int processes_count;
+ unsigned int queue_count;
+ unsigned int next_pipe_to_allocate;
+ unsigned int *allocated_queues;
+ unsigned int vmid_bitmap;
+ uint64_t pipelines_addr;
+ struct kfd_mem_obj *pipeline_mem;
+ uint64_t fence_gpu_addr;
+ unsigned int *fence_addr;
+ struct kfd_mem_obj *fence_mem;
+ bool active_runlist;
+};
+
+
+
+#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
new file mode 100644
index 000000000000..b5791a5c7c06
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "kfd_priv.h"
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/*
+ * This extension supports a kernel level doorbells management for
+ * the kernel queues.
+ * Basically the last doorbells page is devoted to kernel queues
+ * and that's assures that any user process won't get access to the
+ * kernel doorbells page
+ */
+static DEFINE_MUTEX(doorbell_mutex);
+static unsigned long doorbell_available_index[
+ DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)] = { 0 };
+
+#define KERNEL_DOORBELL_PASID 1
+#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
+
+/*
+ * Each device exposes a doorbell aperture, a PCI MMIO aperture that
+ * receives 32-bit writes that are passed to queues as wptr values.
+ * The doorbells are intended to be written by applications as part
+ * of queueing work on user-mode queues.
+ * We assign doorbells to applications in PAGE_SIZE-sized and aligned chunks.
+ * We map the doorbell address space into user-mode when a process creates
+ * its first queue on each device.
+ * Although the mapping is done by KFD, it is equivalent to an mmap of
+ * the /dev/kfd with the particular device encoded in the mmap offset.
+ * There will be other uses for mmap of /dev/kfd, so only a range of
+ * offsets (KFD_MMAP_DOORBELL_START-END) is used for doorbells.
+ */
+
+/* # of doorbell bytes allocated for each process. */
+static inline size_t doorbell_process_allocation(void)
+{
+ return roundup(KFD_SIZE_OF_DOORBELL_IN_BYTES *
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ PAGE_SIZE);
+}
+
+/* Doorbell calculations for device init. */
+void kfd_doorbell_init(struct kfd_dev *kfd)
+{
+ size_t doorbell_start_offset;
+ size_t doorbell_aperture_size;
+ size_t doorbell_process_limit;
+
+ /*
+ * We start with calculations in bytes because the input data might
+ * only be byte-aligned.
+ * Only after we have done the rounding can we assume any alignment.
+ */
+
+ doorbell_start_offset =
+ roundup(kfd->shared_resources.doorbell_start_offset,
+ doorbell_process_allocation());
+
+ doorbell_aperture_size =
+ rounddown(kfd->shared_resources.doorbell_aperture_size,
+ doorbell_process_allocation());
+
+ if (doorbell_aperture_size > doorbell_start_offset)
+ doorbell_process_limit =
+ (doorbell_aperture_size - doorbell_start_offset) /
+ doorbell_process_allocation();
+ else
+ doorbell_process_limit = 0;
+
+ kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
+ doorbell_start_offset;
+
+ kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
+ kfd->doorbell_process_limit = doorbell_process_limit - 1;
+
+ kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
+ doorbell_process_allocation());
+
+ BUG_ON(!kfd->doorbell_kernel_ptr);
+
+ pr_debug("kfd: doorbell initialization:\n");
+ pr_debug("kfd: doorbell base == 0x%08lX\n",
+ (uintptr_t)kfd->doorbell_base);
+
+ pr_debug("kfd: doorbell_id_offset == 0x%08lX\n",
+ kfd->doorbell_id_offset);
+
+ pr_debug("kfd: doorbell_process_limit == 0x%08lX\n",
+ doorbell_process_limit);
+
+ pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n",
+ (uintptr_t)kfd->doorbell_base);
+
+ pr_debug("kfd: doorbell aperture size == 0x%08lX\n",
+ kfd->shared_resources.doorbell_aperture_size);
+
+ pr_debug("kfd: doorbell kernel address == 0x%08lX\n",
+ (uintptr_t)kfd->doorbell_kernel_ptr);
+}
+
+int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
+{
+ phys_addr_t address;
+ struct kfd_dev *dev;
+
+ /*
+ * For simplicitly we only allow mapping of the entire doorbell
+ * allocation of a single device & process.
+ */
+ if (vma->vm_end - vma->vm_start != doorbell_process_allocation())
+ return -EINVAL;
+
+ /* Find kfd device according to gpu id */
+ dev = kfd_device_by_id(vma->vm_pgoff);
+ if (dev == NULL)
+ return -EINVAL;
+
+ /* Find if pdd exists for combination of process and gpu id */
+ if (!kfd_get_process_device_data(dev, process, 0))
+ return -EINVAL;
+
+ /* Calculate physical address of doorbell */
+ address = kfd_get_process_doorbells(dev, process);
+
+ vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ pr_debug("kfd: mapping doorbell page in kfd_doorbell_mmap\n"
+ " target user address == 0x%08llX\n"
+ " physical address == 0x%08llX\n"
+ " vm_flags == 0x%04lX\n"
+ " size == 0x%04lX\n",
+ (unsigned long long) vma->vm_start, address, vma->vm_flags,
+ doorbell_process_allocation());
+
+
+ return io_remap_pfn_range(vma,
+ vma->vm_start,
+ address >> PAGE_SHIFT,
+ doorbell_process_allocation(),
+ vma->vm_page_prot);
+}
+
+
+/* get kernel iomem pointer for a doorbell */
+u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ unsigned int *doorbell_off)
+{
+ u32 inx;
+
+ BUG_ON(!kfd || !doorbell_off);
+
+ mutex_lock(&doorbell_mutex);
+ inx = find_first_zero_bit(doorbell_available_index,
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+
+ __set_bit(inx, doorbell_available_index);
+ mutex_unlock(&doorbell_mutex);
+
+ if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+ return NULL;
+
+ /*
+ * Calculating the kernel doorbell offset using "faked" kernel
+ * pasid that allocated for kernel queues only
+ */
+ *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
+ sizeof(u32)) + inx;
+
+ pr_debug("kfd: get kernel queue doorbell\n"
+ " doorbell offset == 0x%08d\n"
+ " kernel address == 0x%08lX\n",
+ *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
+
+ return kfd->doorbell_kernel_ptr + inx;
+}
+
+void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
+{
+ unsigned int inx;
+
+ BUG_ON(!kfd || !db_addr);
+
+ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+
+ mutex_lock(&doorbell_mutex);
+ __clear_bit(inx, doorbell_available_index);
+ mutex_unlock(&doorbell_mutex);
+}
+
+inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
+{
+ if (db) {
+ writel(value, db);
+ pr_debug("writing %d to doorbell address 0x%p\n", value, db);
+ }
+}
+
+/*
+ * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1
+ * to doorbells with the process's doorbell page
+ */
+unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
+ struct kfd_process *process,
+ unsigned int queue_id)
+{
+ /*
+ * doorbell_id_offset accounts for doorbells taken by KGD.
+ * pasid * doorbell_process_allocation/sizeof(u32) adjusts
+ * to the process's doorbells
+ */
+ return kfd->doorbell_id_offset +
+ process->pasid * (doorbell_process_allocation()/sizeof(u32)) +
+ queue_id;
+}
+
+uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
+{
+ uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size -
+ kfd->shared_resources.doorbell_start_offset) /
+ doorbell_process_allocation() + 1;
+
+ return num_of_elems;
+
+}
+
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+ struct kfd_process *process)
+{
+ return dev->doorbell_base +
+ process->pasid * doorbell_process_allocation();
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
new file mode 100644
index 000000000000..66df4da01c29
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <uapi/linux/kfd_ioctl.h>
+#include <linux/time.h>
+#include "kfd_priv.h"
+#include <linux/mm.h>
+#include <uapi/asm-generic/mman-common.h>
+#include <asm/processor.h>
+
+/*
+ * The primary memory I/O features being added for revisions of gfxip
+ * beyond 7.0 (Kaveri) are:
+ *
+ * Access to ATC/IOMMU mapped memory w/ associated extension of VA to 48b
+ *
+ * “Flat” shader memory access – These are new shader vector memory
+ * operations that do not reference a T#/V# so a “pointer” is what is
+ * sourced from the vector gprs for direct access to memory.
+ * This pointer space has the Shared(LDS) and Private(Scratch) memory
+ * mapped into this pointer space as apertures.
+ * The hardware then determines how to direct the memory request
+ * based on what apertures the request falls in.
+ *
+ * Unaligned support and alignment check
+ *
+ *
+ * System Unified Address - SUA
+ *
+ * The standard usage for GPU virtual addresses are that they are mapped by
+ * a set of page tables we call GPUVM and these page tables are managed by
+ * a combination of vidMM/driver software components. The current virtual
+ * address (VA) range for GPUVM is 40b.
+ *
+ * As of gfxip7.1 and beyond we’re adding the ability for compute memory
+ * clients (CP/RLC, DMA, SHADER(ifetch, scalar, and vector ops)) to access
+ * the same page tables used by host x86 processors and that are managed by
+ * the operating system. This is via a technique and hardware called ATC/IOMMU.
+ * The GPU has the capability of accessing both the GPUVM and ATC address
+ * spaces for a given VMID (process) simultaneously and we call this feature
+ * system unified address (SUA).
+ *
+ * There are three fundamental address modes of operation for a given VMID
+ * (process) on the GPU:
+ *
+ * HSA64 – 64b pointers and the default address space is ATC
+ * HSA32 – 32b pointers and the default address space is ATC
+ * GPUVM – 64b pointers and the default address space is GPUVM (driver
+ * model mode)
+ *
+ *
+ * HSA64 - ATC/IOMMU 64b
+ *
+ * A 64b pointer in the AMD64/IA64 CPU architecture is not fully utilized
+ * by the CPU so an AMD CPU can only access the high area
+ * (VA[63:47] == 0x1FFFF) and low area (VA[63:47 == 0) of the address space
+ * so the actual VA carried to translation is 48b. There is a “hole” in
+ * the middle of the 64b VA space.
+ *
+ * The GPU not only has access to all of the CPU accessible address space via
+ * ATC/IOMMU, but it also has access to the GPUVM address space. The “system
+ * unified address” feature (SUA) is the mapping of GPUVM and ATC address
+ * spaces into a unified pointer space. The method we take for 64b mode is
+ * to map the full 40b GPUVM address space into the hole of the 64b address
+ * space.
+
+ * The GPUVM_Base/GPUVM_Limit defines the aperture in the 64b space where we
+ * direct requests to be translated via GPUVM page tables instead of the
+ * IOMMU path.
+ *
+ *
+ * 64b to 49b Address conversion
+ *
+ * Note that there are still significant portions of unused regions (holes)
+ * in the 64b address space even for the GPU. There are several places in
+ * the pipeline (sw and hw), we wish to compress the 64b virtual address
+ * to a 49b address. This 49b address is constituted of an “ATC” bit
+ * plus a 48b virtual address. This 49b address is what is passed to the
+ * translation hardware. ATC==0 means the 48b address is a GPUVM address
+ * (max of 2^40 – 1) intended to be translated via GPUVM page tables.
+ * ATC==1 means the 48b address is intended to be translated via IOMMU
+ * page tables.
+ *
+ * A 64b pointer is compared to the apertures that are defined (Base/Limit), in
+ * this case the GPUVM aperture (red) is defined and if a pointer falls in this
+ * aperture, we subtract the GPUVM_Base address and set the ATC bit to zero
+ * as part of the 64b to 49b conversion.
+ *
+ * Where this 64b to 49b conversion is done is a function of the usage.
+ * Most GPU memory access is via memory objects where the driver builds
+ * a descriptor which consists of a base address and a memory access by
+ * the GPU usually consists of some kind of an offset or Cartesian coordinate
+ * that references this memory descriptor. This is the case for shader
+ * instructions that reference the T# or V# constants, or for specified
+ * locations of assets (ex. the shader program location). In these cases
+ * the driver is what handles the 64b to 49b conversion and the base
+ * address in the descriptor (ex. V# or T# or shader program location)
+ * is defined as a 48b address w/ an ATC bit. For this usage a given
+ * memory object cannot straddle multiple apertures in the 64b address
+ * space. For example a shader program cannot jump in/out between ATC
+ * and GPUVM space.
+ *
+ * In some cases we wish to pass a 64b pointer to the GPU hardware and
+ * the GPU hw does the 64b to 49b conversion before passing memory
+ * requests to the cache/memory system. This is the case for the
+ * S_LOAD and FLAT_* shader memory instructions where we have 64b pointers
+ * in scalar and vector GPRs respectively.
+ *
+ * In all cases (no matter where the 64b -> 49b conversion is done), the gfxip
+ * hardware sends a 48b address along w/ an ATC bit, to the memory controller
+ * on the memory request interfaces.
+ *
+ * <client>_MC_rdreq_atc // read request ATC bit
+ *
+ * 0 : <client>_MC_rdreq_addr is a GPUVM VA
+ *
+ * 1 : <client>_MC_rdreq_addr is a ATC VA
+ *
+ *
+ * “Spare” aperture (APE1)
+ *
+ * We use the GPUVM aperture to differentiate ATC vs. GPUVM, but we also use
+ * apertures to set the Mtype field for S_LOAD/FLAT_* ops which is input to the
+ * config tables for setting cache policies. The “spare” (APE1) aperture is
+ * motivated by getting a different Mtype from the default.
+ * The default aperture isn’t an actual base/limit aperture; it is just the
+ * address space that doesn’t hit any defined base/limit apertures.
+ * The following diagram is a complete picture of the gfxip7.x SUA apertures.
+ * The APE1 can be placed either below or above
+ * the hole (cannot be in the hole).
+ *
+ *
+ * General Aperture definitions and rules
+ *
+ * An aperture register definition consists of a Base, Limit, Mtype, and
+ * usually an ATC bit indicating which translation tables that aperture uses.
+ * In all cases (for SUA and DUA apertures discussed later), aperture base
+ * and limit definitions are 64KB aligned.
+ *
+ * <ape>_Base[63:0] = { <ape>_Base_register[63:16], 0x0000 }
+ *
+ * <ape>_Limit[63:0] = { <ape>_Limit_register[63:16], 0xFFFF }
+ *
+ * The base and limit are considered inclusive to an aperture so being
+ * inside an aperture means (address >= Base) AND (address <= Limit).
+ *
+ * In no case is a payload that straddles multiple apertures expected to work.
+ * For example a load_dword_x4 that starts in one aperture and ends in another,
+ * does not work. For the vector FLAT_* ops we have detection capability in
+ * the shader for reporting a “memory violation” back to the
+ * SQ block for use in traps.
+ * A memory violation results when an op falls into the hole,
+ * or a payload straddles multiple apertures. The S_LOAD instruction
+ * does not have this detection.
+ *
+ * Apertures cannot overlap.
+ *
+ *
+ *
+ * HSA32 - ATC/IOMMU 32b
+ *
+ * For HSA32 mode, the pointers are interpreted as 32 bits and use a single GPR
+ * instead of two for the S_LOAD and FLAT_* ops. The entire GPUVM space of 40b
+ * will not fit so there is only partial visibility to the GPUVM
+ * space (defined by the aperture) for S_LOAD and FLAT_* ops.
+ * There is no spare (APE1) aperture for HSA32 mode.
+ *
+ *
+ * GPUVM 64b mode (driver model)
+ *
+ * This mode is related to HSA64 in that the difference really is that
+ * the default aperture is GPUVM (ATC==0) and not ATC space.
+ * We have gfxip7.x hardware that has FLAT_* and S_LOAD support for
+ * SUA GPUVM mode, but does not support HSA32/HSA64.
+ *
+ *
+ * Device Unified Address - DUA
+ *
+ * Device unified address (DUA) is the name of the feature that maps the
+ * Shared(LDS) memory and Private(Scratch) memory into the overall address
+ * space for use by the new FLAT_* vector memory ops. The Shared and
+ * Private memories are mapped as apertures into the address space,
+ * and the hardware detects when a FLAT_* memory request is to be redirected
+ * to the LDS or Scratch memory when it falls into one of these apertures.
+ * Like the SUA apertures, the Shared/Private apertures are 64KB aligned and
+ * the base/limit is “in” the aperture. For both HSA64 and GPUVM SUA modes,
+ * the Shared/Private apertures are always placed in a limited selection of
+ * options in the hole of the 64b address space. For HSA32 mode, the
+ * Shared/Private apertures can be placed anywhere in the 32b space
+ * except at 0.
+ *
+ *
+ * HSA64 Apertures for FLAT_* vector ops
+ *
+ * For HSA64 SUA mode, the Shared and Private apertures are always placed
+ * in the hole w/ a limited selection of possible locations. The requests
+ * that fall in the private aperture are expanded as a function of the
+ * work-item id (tid) and redirected to the location of the
+ * “hidden private memory”. The hidden private can be placed in either GPUVM
+ * or ATC space. The addresses that fall in the shared aperture are
+ * re-directed to the on-chip LDS memory hardware.
+ *
+ *
+ * HSA32 Apertures for FLAT_* vector ops
+ *
+ * In HSA32 mode, the Private and Shared apertures can be placed anywhere
+ * in the 32b space except at 0 (Private or Shared Base at zero disables
+ * the apertures). If the base address of the apertures are non-zero
+ * (ie apertures exists), the size is always 64KB.
+ *
+ *
+ * GPUVM Apertures for FLAT_* vector ops
+ *
+ * In GPUVM mode, the Shared/Private apertures are specified identically
+ * to HSA64 mode where they are always in the hole at a limited selection
+ * of locations.
+ *
+ *
+ * Aperture Definitions for SUA and DUA
+ *
+ * The interpretation of the aperture register definitions for a given
+ * VMID is a function of the “SUA Mode” which is one of HSA64, HSA32, or
+ * GPUVM64 discussed in previous sections. The mode is first decoded, and
+ * then the remaining register decode is a function of the mode.
+ *
+ *
+ * SUA Mode Decode
+ *
+ * For the S_LOAD and FLAT_* shader operations, the SUA mode is decoded from
+ * the COMPUTE_DISPATCH_INITIATOR:DATA_ATC bit and
+ * the SH_MEM_CONFIG:PTR32 bits.
+ *
+ * COMPUTE_DISPATCH_INITIATOR:DATA_ATC SH_MEM_CONFIG:PTR32 Mode
+ *
+ * 1 0 HSA64
+ *
+ * 1 1 HSA32
+ *
+ * 0 X GPUVM64
+ *
+ * In general the hardware will ignore the PTR32 bit and treat
+ * as “0” whenever DATA_ATC = “0”, but sw should set PTR32=0
+ * when DATA_ATC=0.
+ *
+ * The DATA_ATC bit is only set for compute dispatches.
+ * All “Draw” dispatches are hardcoded to GPUVM64 mode
+ * for FLAT_* / S_LOAD operations.
+ */
+
+#define MAKE_GPUVM_APP_BASE(gpu_num) \
+ (((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
+
+#define MAKE_GPUVM_APP_LIMIT(base) \
+ (((uint64_t)(base) & \
+ 0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL)
+
+#define MAKE_SCRATCH_APP_BASE(gpu_num) \
+ (((uint64_t)(gpu_num) << 61) + 0x100000000L)
+
+#define MAKE_SCRATCH_APP_LIMIT(base) \
+ (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+#define MAKE_LDS_APP_BASE(gpu_num) \
+ (((uint64_t)(gpu_num) << 61) + 0x0)
+#define MAKE_LDS_APP_LIMIT(base) \
+ (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+
+int kfd_init_apertures(struct kfd_process *process)
+{
+ uint8_t id = 0;
+ struct kfd_dev *dev;
+ struct kfd_process_device *pdd;
+
+ mutex_lock(&process->mutex);
+
+ /*Iterating over all devices*/
+ while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
+ id < NUM_OF_SUPPORTED_GPUS) {
+
+ pdd = kfd_get_process_device_data(dev, process, 1);
+
+ /*
+ * For 64 bit process aperture will be statically reserved in
+ * the x86_64 non canonical process address space
+ * amdkfd doesn't currently support apertures for 32 bit process
+ */
+ if (process->is_32bit_user_mode) {
+ pdd->lds_base = pdd->lds_limit = 0;
+ pdd->gpuvm_base = pdd->gpuvm_limit = 0;
+ pdd->scratch_base = pdd->scratch_limit = 0;
+ } else {
+ /*
+ * node id couldn't be 0 - the three MSB bits of
+ * aperture shoudn't be 0
+ */
+ pdd->lds_base = MAKE_LDS_APP_BASE(id + 1);
+
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
+
+ pdd->gpuvm_limit =
+ MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base);
+
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1);
+
+ pdd->scratch_limit =
+ MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+
+ dev_dbg(kfd_device, "node id %u\n", id);
+ dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id);
+ dev_dbg(kfd_device, "lds_base %llX\n", pdd->lds_base);
+ dev_dbg(kfd_device, "lds_limit %llX\n", pdd->lds_limit);
+ dev_dbg(kfd_device, "gpuvm_base %llX\n", pdd->gpuvm_base);
+ dev_dbg(kfd_device, "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+ dev_dbg(kfd_device, "scratch_base %llX\n", pdd->scratch_base);
+ dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit);
+
+ id++;
+ }
+
+ mutex_unlock(&process->mutex);
+
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
new file mode 100644
index 000000000000..5b999095a1f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * KFD Interrupts.
+ *
+ * AMD GPUs deliver interrupts by pushing an interrupt description onto the
+ * interrupt ring and then sending an interrupt. KGD receives the interrupt
+ * in ISR and sends us a pointer to each new entry on the interrupt ring.
+ *
+ * We generally can't process interrupt-signaled events from ISR, so we call
+ * out to each interrupt client module (currently only the scheduler) to ask if
+ * each interrupt is interesting. If they return true, then it requires further
+ * processing so we copy it to an internal interrupt ring and call each
+ * interrupt client again from a work-queue.
+ *
+ * There's no acknowledgment for the interrupts we use. The hardware simply
+ * queues a new interrupt each time without waiting.
+ *
+ * The fixed-size internal queue means that it's possible for us to lose
+ * interrupts because we have no back-pressure to the hardware.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include "kfd_priv.h"
+
+#define KFD_INTERRUPT_RING_SIZE 256
+
+static void interrupt_wq(struct work_struct *);
+
+int kfd_interrupt_init(struct kfd_dev *kfd)
+{
+ void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
+ kfd->device_info->ih_ring_entry_size,
+ GFP_KERNEL);
+ if (!interrupt_ring)
+ return -ENOMEM;
+
+ kfd->interrupt_ring = interrupt_ring;
+ kfd->interrupt_ring_size =
+ KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
+ atomic_set(&kfd->interrupt_ring_wptr, 0);
+ atomic_set(&kfd->interrupt_ring_rptr, 0);
+
+ spin_lock_init(&kfd->interrupt_lock);
+
+ INIT_WORK(&kfd->interrupt_work, interrupt_wq);
+
+ kfd->interrupts_active = true;
+
+ /*
+ * After this function returns, the interrupt will be enabled. This
+ * barrier ensures that the interrupt running on a different processor
+ * sees all the above writes.
+ */
+ smp_wmb();
+
+ return 0;
+}
+
+void kfd_interrupt_exit(struct kfd_dev *kfd)
+{
+ /*
+ * Stop the interrupt handler from writing to the ring and scheduling
+ * workqueue items. The spinlock ensures that any interrupt running
+ * after we have unlocked sees interrupts_active = false.
+ */
+ unsigned long flags;
+
+ spin_lock_irqsave(&kfd->interrupt_lock, flags);
+ kfd->interrupts_active = false;
+ spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
+
+ /*
+ * Flush_scheduled_work ensures that there are no outstanding
+ * work-queue items that will access interrupt_ring. New work items
+ * can't be created because we stopped interrupt handling above.
+ */
+ flush_scheduled_work();
+
+ kfree(kfd->interrupt_ring);
+}
+
+/*
+ * This assumes that it can't be called concurrently with itself
+ * but only with dequeue_ih_ring_entry.
+ */
+bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+ unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+ unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+
+ if ((rptr - wptr) % kfd->interrupt_ring_size ==
+ kfd->device_info->ih_ring_entry_size) {
+ /* This is very bad, the system is likely to hang. */
+ dev_err_ratelimited(kfd_chardev(),
+ "Interrupt ring overflow, dropping interrupt.\n");
+ return false;
+ }
+
+ memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
+ kfd->device_info->ih_ring_entry_size);
+
+ wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
+ kfd->interrupt_ring_size;
+ smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
+ atomic_set(&kfd->interrupt_ring_wptr, wptr);
+
+ return true;
+}
+
+/*
+ * This assumes that it can't be called concurrently with itself
+ * but only with enqueue_ih_ring_entry.
+ */
+static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
+{
+ /*
+ * Assume that wait queues have an implicit barrier, i.e. anything that
+ * happened in the ISR before it queued work is visible.
+ */
+
+ unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+ unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+
+ if (rptr == wptr)
+ return false;
+
+ memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
+ kfd->device_info->ih_ring_entry_size);
+
+ rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
+ kfd->interrupt_ring_size;
+
+ /*
+ * Ensure the rptr write update is not visible until
+ * memcpy has finished reading.
+ */
+ smp_mb();
+ atomic_set(&kfd->interrupt_ring_rptr, rptr);
+
+ return true;
+}
+
+static void interrupt_wq(struct work_struct *work)
+{
+ struct kfd_dev *dev = container_of(work, struct kfd_dev,
+ interrupt_work);
+
+ uint32_t ih_ring_entry[DIV_ROUND_UP(
+ dev->device_info->ih_ring_entry_size,
+ sizeof(uint32_t))];
+
+ while (dequeue_ih_ring_entry(dev, ih_ring_entry))
+ ;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
new file mode 100644
index 000000000000..935071410724
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include "kfd_kernel_queue.h"
+#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
+#include "kfd_pm4_opcodes.h"
+
+#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
+
+static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size)
+{
+ struct queue_properties prop;
+ int retval;
+ union PM4_MES_TYPE_3_HEADER nop;
+
+ BUG_ON(!kq || !dev);
+ BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
+
+ pr_debug("kfd: In func %s initializing queue type %d size %d\n",
+ __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
+
+ nop.opcode = IT_NOP;
+ nop.type = PM4_TYPE_3;
+ nop.u32all |= PM4_COUNT_ZERO;
+
+ kq->dev = dev;
+ kq->nop_packet = nop.u32all;
+ switch (type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ case KFD_QUEUE_TYPE_HIQ:
+ kq->mqd = dev->dqm->get_mqd_manager(dev->dqm,
+ KFD_MQD_TYPE_CIK_HIQ);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (kq->mqd == NULL)
+ return false;
+
+ prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+
+ if (prop.doorbell_ptr == NULL)
+ goto err_get_kernel_doorbell;
+
+ retval = kfd2kgd->allocate_mem(dev->kgd,
+ queue_size,
+ PAGE_SIZE,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &kq->pq);
+
+ if (retval != 0)
+ goto err_pq_allocate_vidmem;
+
+ kq->pq_kernel_addr = kq->pq->cpu_ptr;
+ kq->pq_gpu_addr = kq->pq->gpu_addr;
+
+ retval = kfd2kgd->allocate_mem(dev->kgd,
+ sizeof(*kq->rptr_kernel),
+ 32,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &kq->rptr_mem);
+
+ if (retval != 0)
+ goto err_rptr_allocate_vidmem;
+
+ kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
+ kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
+
+ retval = kfd2kgd->allocate_mem(dev->kgd,
+ sizeof(*kq->wptr_kernel),
+ 32,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &kq->wptr_mem);
+
+ if (retval != 0)
+ goto err_wptr_allocate_vidmem;
+
+ kq->wptr_kernel = kq->wptr_mem->cpu_ptr;
+ kq->wptr_gpu_addr = kq->wptr_mem->gpu_addr;
+
+ memset(kq->pq_kernel_addr, 0, queue_size);
+ memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel));
+ memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel));
+
+ prop.queue_size = queue_size;
+ prop.is_interop = false;
+ prop.priority = 1;
+ prop.queue_percent = 100;
+ prop.type = type;
+ prop.vmid = 0;
+ prop.queue_address = kq->pq_gpu_addr;
+ prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr;
+ prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
+
+ if (init_queue(&kq->queue, prop) != 0)
+ goto err_init_queue;
+
+ kq->queue->device = dev;
+ kq->queue->process = kfd_get_process(current);
+
+ retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+ &kq->queue->mqd_mem_obj,
+ &kq->queue->gart_mqd_addr,
+ &kq->queue->properties);
+ if (retval != 0)
+ goto err_init_mqd;
+
+ /* assign HIQ to HQD */
+ if (type == KFD_QUEUE_TYPE_HIQ) {
+ pr_debug("assigning hiq to hqd\n");
+ kq->queue->pipe = KFD_CIK_HIQ_PIPE;
+ kq->queue->queue = KFD_CIK_HIQ_QUEUE;
+ kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
+ kq->queue->queue, NULL);
+ } else {
+ /* allocate fence for DIQ */
+
+ retval = kfd2kgd->allocate_mem(dev->kgd,
+ sizeof(uint32_t),
+ 32,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &kq->fence_mem_obj);
+
+ if (retval != 0)
+ goto err_alloc_fence;
+
+ kq->fence_kernel_address = kq->fence_mem_obj->cpu_ptr;
+ kq->fence_gpu_addr = kq->fence_mem_obj->gpu_addr;
+ }
+
+ print_queue(kq->queue);
+
+ return true;
+err_alloc_fence:
+err_init_mqd:
+ uninit_queue(kq->queue);
+err_init_queue:
+ kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem);
+err_wptr_allocate_vidmem:
+ kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem);
+err_rptr_allocate_vidmem:
+ kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
+err_pq_allocate_vidmem:
+ pr_err("kfd: error init pq\n");
+ kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
+err_get_kernel_doorbell:
+ pr_err("kfd: error init doorbell");
+ return false;
+
+}
+
+static void uninitialize(struct kernel_queue *kq)
+{
+ BUG_ON(!kq);
+
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+ kq->mqd->destroy_mqd(kq->mqd,
+ NULL,
+ false,
+ QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ kq->queue->pipe,
+ kq->queue->queue);
+
+ kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem);
+ kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
+ kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
+ kfd_release_kernel_doorbell(kq->dev,
+ kq->queue->properties.doorbell_ptr);
+ uninit_queue(kq->queue);
+}
+
+static int acquire_packet_buffer(struct kernel_queue *kq,
+ size_t packet_size_in_dwords, unsigned int **buffer_ptr)
+{
+ size_t available_size;
+ size_t queue_size_dwords;
+ uint32_t wptr, rptr;
+ unsigned int *queue_address;
+
+ BUG_ON(!kq || !buffer_ptr);
+
+ rptr = *kq->rptr_kernel;
+ wptr = *kq->wptr_kernel;
+ queue_address = (unsigned int *)kq->pq_kernel_addr;
+ queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
+
+ pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
+ __func__, rptr, wptr, queue_address);
+
+ available_size = (rptr - 1 - wptr + queue_size_dwords) %
+ queue_size_dwords;
+
+ if (packet_size_in_dwords >= queue_size_dwords ||
+ packet_size_in_dwords >= available_size) {
+ /*
+ * make sure calling functions know
+ * acquire_packet_buffer() failed
+ */
+ *buffer_ptr = NULL;
+ return -ENOMEM;
+ }
+
+ if (wptr + packet_size_in_dwords >= queue_size_dwords) {
+ while (wptr > 0) {
+ queue_address[wptr] = kq->nop_packet;
+ wptr = (wptr + 1) % queue_size_dwords;
+ }
+ }
+
+ *buffer_ptr = &queue_address[wptr];
+ kq->pending_wptr = wptr + packet_size_in_dwords;
+
+ return 0;
+}
+
+static void submit_packet(struct kernel_queue *kq)
+{
+#ifdef DEBUG
+ int i;
+#endif
+
+ BUG_ON(!kq);
+
+#ifdef DEBUG
+ for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
+ pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
+ if (i % 15 == 0)
+ pr_debug("\n");
+ }
+ pr_debug("\n");
+#endif
+
+ *kq->wptr_kernel = kq->pending_wptr;
+ write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr);
+}
+
+static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms)
+{
+ unsigned long org_timeout_ms;
+
+ BUG_ON(!kq);
+
+ org_timeout_ms = timeout_ms;
+ timeout_ms += jiffies * 1000 / HZ;
+ while (*kq->wptr_kernel != *kq->rptr_kernel) {
+ if (time_after(jiffies * 1000 / HZ, timeout_ms)) {
+ pr_err("kfd: kernel_queue %s timeout expired %lu\n",
+ __func__, org_timeout_ms);
+ pr_err("kfd: wptr: %d rptr: %d\n",
+ *kq->wptr_kernel, *kq->rptr_kernel);
+ return -ETIME;
+ }
+ schedule();
+ }
+
+ return 0;
+}
+
+static void rollback_packet(struct kernel_queue *kq)
+{
+ BUG_ON(!kq);
+ kq->pending_wptr = *kq->queue->properties.write_ptr;
+}
+
+struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+ enum kfd_queue_type type)
+{
+ struct kernel_queue *kq;
+
+ BUG_ON(!dev);
+
+ kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL);
+ if (!kq)
+ return NULL;
+
+ kq->initialize = initialize;
+ kq->uninitialize = uninitialize;
+ kq->acquire_packet_buffer = acquire_packet_buffer;
+ kq->submit_packet = submit_packet;
+ kq->sync_with_hw = sync_with_hw;
+ kq->rollback_packet = rollback_packet;
+
+ if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
+ pr_err("kfd: failed to init kernel queue\n");
+ kfree(kq);
+ return NULL;
+ }
+ return kq;
+}
+
+void kernel_queue_uninit(struct kernel_queue *kq)
+{
+ BUG_ON(!kq);
+
+ kq->uninitialize(kq);
+ kfree(kq);
+}
+
+static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
+{
+ struct kernel_queue *kq;
+ uint32_t *buffer, i;
+ int retval;
+
+ BUG_ON(!dev);
+
+ pr_debug("kfd: starting kernel queue test\n");
+
+ kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
+ BUG_ON(!kq);
+
+ retval = kq->acquire_packet_buffer(kq, 5, &buffer);
+ BUG_ON(retval != 0);
+ for (i = 0; i < 5; i++)
+ buffer[i] = kq->nop_packet;
+ kq->submit_packet(kq);
+ kq->sync_with_hw(kq, 1000);
+
+ pr_debug("kfd: ending kernel queue test\n");
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
new file mode 100644
index 000000000000..dcd2bdb68d44
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_KERNEL_QUEUE_H_
+#define KFD_KERNEL_QUEUE_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "kfd_priv.h"
+
+struct kernel_queue {
+ /* interface */
+ bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
+ enum kfd_queue_type type, unsigned int queue_size);
+ void (*uninitialize)(struct kernel_queue *kq);
+ int (*acquire_packet_buffer)(struct kernel_queue *kq,
+ size_t packet_size_in_dwords,
+ unsigned int **buffer_ptr);
+
+ void (*submit_packet)(struct kernel_queue *kq);
+ int (*sync_with_hw)(struct kernel_queue *kq,
+ unsigned long timeout_ms);
+ void (*rollback_packet)(struct kernel_queue *kq);
+
+ /* data */
+ struct kfd_dev *dev;
+ struct mqd_manager *mqd;
+ struct queue *queue;
+ uint32_t pending_wptr;
+ unsigned int nop_packet;
+
+ struct kfd_mem_obj *rptr_mem;
+ uint32_t *rptr_kernel;
+ uint64_t rptr_gpu_addr;
+ struct kfd_mem_obj *wptr_mem;
+ uint32_t *wptr_kernel;
+ uint64_t wptr_gpu_addr;
+ struct kfd_mem_obj *pq;
+ uint64_t pq_gpu_addr;
+ uint32_t *pq_kernel_addr;
+
+ struct kfd_mem_obj *fence_mem_obj;
+ uint64_t fence_gpu_addr;
+ void *fence_kernel_address;
+
+ struct list_head list;
+};
+
+#endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
new file mode 100644
index 000000000000..95d5af138e6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include "kfd_priv.h"
+
+#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
+
+#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
+#define KFD_DRIVER_DATE "20141113"
+#define KFD_DRIVER_MAJOR 0
+#define KFD_DRIVER_MINOR 7
+#define KFD_DRIVER_PATCHLEVEL 0
+
+const struct kfd2kgd_calls *kfd2kgd;
+static const struct kgd2kfd_calls kgd2kfd = {
+ .exit = kgd2kfd_exit,
+ .probe = kgd2kfd_probe,
+ .device_init = kgd2kfd_device_init,
+ .device_exit = kgd2kfd_device_exit,
+ .interrupt = kgd2kfd_interrupt,
+ .suspend = kgd2kfd_suspend,
+ .resume = kgd2kfd_resume,
+};
+
+int sched_policy = KFD_SCHED_POLICY_HWS;
+module_param(sched_policy, int, 0444);
+MODULE_PARM_DESC(sched_policy,
+ "Kernel cmdline parameter that defines the amdkfd scheduling policy");
+
+int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
+module_param(max_num_of_processes, int, 0444);
+MODULE_PARM_DESC(max_num_of_processes,
+ "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
+
+int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
+module_param(max_num_of_queues_per_process, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_process,
+ "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+
+bool kgd2kfd_init(unsigned interface_version,
+ const struct kfd2kgd_calls *f2g,
+ const struct kgd2kfd_calls **g2f)
+{
+ /*
+ * Only one interface version is supported,
+ * no kfd/kgd version skew allowed.
+ */
+ if (interface_version != KFD_INTERFACE_VERSION)
+ return false;
+
+ /* Protection against multiple amd kgd loads */
+ if (kfd2kgd)
+ return true;
+
+ kfd2kgd = f2g;
+ *g2f = &kgd2kfd;
+
+ return true;
+}
+EXPORT_SYMBOL(kgd2kfd_init);
+
+void kgd2kfd_exit(void)
+{
+}
+
+static int __init kfd_module_init(void)
+{
+ int err;
+
+ kfd2kgd = NULL;
+
+ /* Verify module parameters */
+ if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
+ (sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
+ pr_err("kfd: sched_policy has invalid value\n");
+ return -1;
+ }
+
+ /* Verify module parameters */
+ if ((max_num_of_processes < 0) ||
+ (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
+ pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
+ return -1;
+ }
+
+ if ((max_num_of_queues_per_process < 0) ||
+ (max_num_of_queues_per_process >
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
+ pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+ return -1;
+ }
+
+ err = kfd_pasid_init();
+ if (err < 0)
+ goto err_pasid;
+
+ err = kfd_chardev_init();
+ if (err < 0)
+ goto err_ioctl;
+
+ err = kfd_topology_init();
+ if (err < 0)
+ goto err_topology;
+
+ kfd_process_create_wq();
+
+ dev_info(kfd_device, "Initialized module\n");
+
+ return 0;
+
+err_topology:
+ kfd_chardev_exit();
+err_ioctl:
+ kfd_pasid_exit();
+err_pasid:
+ return err;
+}
+
+static void __exit kfd_module_exit(void)
+{
+ kfd_process_destroy_wq();
+ kfd_topology_shutdown();
+ kfd_chardev_exit();
+ kfd_pasid_exit();
+ dev_info(kfd_device, "Removed module\n");
+}
+
+module_init(kfd_module_init);
+module_exit(kfd_module_exit);
+
+MODULE_AUTHOR(KFD_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(KFD_DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(__stringify(KFD_DRIVER_MAJOR) "."
+ __stringify(KFD_DRIVER_MINOR) "."
+ __stringify(KFD_DRIVER_PATCHLEVEL));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
new file mode 100644
index 000000000000..adc31474e786
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+#include "cik_regs.h"
+#include "../../radeon/cik_reg.h"
+
+inline void busy_wait(unsigned long ms)
+{
+ while (time_before(jiffies, ms))
+ cpu_relax();
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+ return (struct cik_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ uint64_t addr;
+ struct cik_mqd *m;
+ int retval;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ retval = kfd2kgd->allocate_mem(mm->dev->kgd,
+ sizeof(struct cik_mqd),
+ 256,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+
+ memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+
+ m->header = 0xC0310800;
+ m->compute_pipelinestat_enable = 1;
+ m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+ /*
+ * Make sure to use the last queue state saved on mqd when the cp
+ * reassigns the queue, so when queue is switched on/off (e.g over
+ * subscription or quantum timeout) the context will be consistent
+ */
+ m->cp_hqd_persistent_state =
+ DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
+
+ m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
+ m->cp_mqd_base_addr_lo = lower_32_bits(addr);
+ m->cp_mqd_base_addr_hi = upper_32_bits(addr);
+
+ m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
+ /* Although WinKFD writes this, I suspect it should not be necessary */
+ m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
+
+ m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+ QUANTUM_DURATION(10);
+
+ /*
+ * Pipe Priority
+ * Identifies the pipe relative priority when this queue is connected
+ * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+ * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+ * 0 = CS_LOW (typically below GFX)
+ * 1 = CS_MEDIUM (typically between HP3D and GFX
+ * 2 = CS_HIGH (typically above HP3D)
+ */
+ m->cp_hqd_pipe_priority = 1;
+ m->cp_hqd_queue_priority = 15;
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ BUG_ON(!mm || !mqd);
+ kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+
+}
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ m = get_mqd(mqd);
+ m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+ DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
+
+ /*
+ * Calculating queue size which is log base 2 of actual queue size -1
+ * dwords and another -1 for ffs
+ */
+ m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
+ - 1 - 1;
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
+ DOORBELL_OFFSET(q->doorbell_off);
+
+ m->cp_hqd_vmid = q->vmid;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ m->cp_hqd_iq_rptr = AQL_ENABLE;
+ m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
+ }
+
+ m->cp_hqd_active = 0;
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->cp_hqd_active = 1;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+ pipe_id, queue_id);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+
+ return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+ pipe_id, queue_id);
+
+}
+
+/*
+ * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
+ * The HIQ queue in Kaveri is using the same MQD structure as all the user mode
+ * queues but with different initial values.
+ */
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ uint64_t addr;
+ struct cik_mqd *m;
+ int retval;
+
+ BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ retval = kfd2kgd->allocate_mem(mm->dev->kgd,
+ sizeof(struct cik_mqd),
+ 256,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+
+ memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+
+ m->header = 0xC0310800;
+ m->compute_pipelinestat_enable = 1;
+ m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+ m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
+ PRELOAD_REQ;
+ m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+ QUANTUM_DURATION(10);
+
+ m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
+ m->cp_mqd_base_addr_lo = lower_32_bits(addr);
+ m->cp_mqd_base_addr_hi = upper_32_bits(addr);
+
+ m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
+
+ /*
+ * Pipe Priority
+ * Identifies the pipe relative priority when this queue is connected
+ * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+ * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+ * 0 = CS_LOW (typically below GFX)
+ * 1 = CS_MEDIUM (typically between HP3D and GFX
+ * 2 = CS_HIGH (typically above HP3D)
+ */
+ m->cp_hqd_pipe_priority = 1;
+ m->cp_hqd_queue_priority = 15;
+
+ *mqd = m;
+ if (gart_addr)
+ *gart_addr = addr;
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+
+ BUG_ON(!mm || !q || !mqd);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ m = get_mqd(mqd);
+ m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
+ DEFAULT_MIN_AVAIL_SIZE |
+ PRIV_STATE |
+ KMD_QUEUE;
+
+ /*
+ * Calculating queue size which is log base 2 of actual queue
+ * size -1 dwords
+ */
+ m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
+ - 1 - 1;
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+ m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
+ DOORBELL_OFFSET(q->doorbell_off);
+
+ m->cp_hqd_vmid = q->vmid;
+
+ m->cp_hqd_active = 0;
+ q->is_active = false;
+ if (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0) {
+ m->cp_hqd_active = 1;
+ q->is_active = true;
+ }
+
+ return 0;
+}
+
+struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev)
+{
+ struct mqd_manager *mqd;
+
+ BUG_ON(!dev);
+ BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ if (!mqd)
+ return NULL;
+
+ mqd->dev = dev;
+
+ switch (type) {
+ case KFD_MQD_TYPE_CIK_CP:
+ case KFD_MQD_TYPE_CIK_COMPUTE:
+ mqd->init_mqd = init_mqd;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ case KFD_MQD_TYPE_CIK_HIQ:
+ mqd->init_mqd = init_mqd_hiq;
+ mqd->uninit_mqd = uninit_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ break;
+ default:
+ kfree(mqd);
+ return NULL;
+ }
+
+ return mqd;
+}
+
+/* SDMA queues should be implemented here when the cp will supports them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
new file mode 100644
index 000000000000..213a71e0b6c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_MQD_MANAGER_H_
+#define KFD_MQD_MANAGER_H_
+
+#include "kfd_priv.h"
+
+/**
+ * struct mqd_manager
+ *
+ * @init_mqd: Allocates the mqd buffer on local gpu memory and initialize it.
+ *
+ * @load_mqd: Loads the mqd to a concrete hqd slot. Used only for no cp
+ * scheduling mode.
+ *
+ * @update_mqd: Handles a update call for the MQD
+ *
+ * @destroy_mqd: Destroys the HQD slot and by that preempt the relevant queue.
+ * Used only for no cp scheduling.
+ *
+ * @uninit_mqd: Releases the mqd buffer from local gpu memory.
+ *
+ * @is_occupied: Checks if the relevant HQD slot is occupied.
+ *
+ * @mqd_mutex: Mqd manager mutex.
+ *
+ * @dev: The kfd device structure coupled with this module.
+ *
+ * MQD stands for Memory Queue Descriptor which represents the current queue
+ * state in the memory and initiate the HQD (Hardware Queue Descriptor) state.
+ * This structure is actually a base class for the different types of MQDs
+ * structures for the variant ASICs that should be supported in the future.
+ * This base class is also contains all the MQD specific operations.
+ * Another important thing to mention is that each queue has a MQD that keeps
+ * his state (or context) after each preemption or reassignment.
+ * Basically there are a instances of the mqd manager class per MQD type per
+ * ASIC. Currently the kfd driver supports only Kaveri so there are instances
+ * per KFD_MQD_TYPE for each device.
+ *
+ */
+
+struct mqd_manager {
+ int (*init_mqd)(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q);
+
+ int (*load_mqd)(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr);
+
+ int (*update_mqd)(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q);
+
+ int (*destroy_mqd)(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+
+ void (*uninit_mqd)(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj);
+
+ bool (*is_occupied)(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id);
+
+ struct mutex mqd_mutex;
+ struct kfd_dev *dev;
+};
+
+#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
new file mode 100644
index 000000000000..5ce9233d2004
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "kfd_device_queue_manager.h"
+#include "kfd_kernel_queue.h"
+#include "kfd_priv.h"
+#include "kfd_pm4_headers.h"
+#include "kfd_pm4_opcodes.h"
+
+static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
+ unsigned int buffer_size_bytes)
+{
+ unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
+
+ BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
+ *wptr = temp;
+}
+
+static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
+{
+ union PM4_MES_TYPE_3_HEADER header;
+
+ header.u32all = 0;
+ header.opcode = opcode;
+ header.count = packet_size/sizeof(uint32_t) - 2;
+ header.type = PM4_TYPE_3;
+
+ return header.u32all;
+}
+
+static void pm_calc_rlib_size(struct packet_manager *pm,
+ unsigned int *rlib_size,
+ bool *over_subscription)
+{
+ unsigned int process_count, queue_count;
+
+ BUG_ON(!pm || !rlib_size || !over_subscription);
+
+ process_count = pm->dqm->processes_count;
+ queue_count = pm->dqm->queue_count;
+
+ /* check if there is over subscription*/
+ *over_subscription = false;
+ if ((process_count > 1) ||
+ queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
+ *over_subscription = true;
+ pr_debug("kfd: over subscribed runlist\n");
+ }
+
+ /* calculate run list ib allocation size */
+ *rlib_size = process_count * sizeof(struct pm4_map_process) +
+ queue_count * sizeof(struct pm4_map_queues);
+
+ /*
+ * Increase the allocation size in case we need a chained run list
+ * when over subscription
+ */
+ if (*over_subscription)
+ *rlib_size += sizeof(struct pm4_runlist);
+
+ pr_debug("kfd: runlist ib size %d\n", *rlib_size);
+}
+
+static int pm_allocate_runlist_ib(struct packet_manager *pm,
+ unsigned int **rl_buffer,
+ uint64_t *rl_gpu_buffer,
+ unsigned int *rl_buffer_size,
+ bool *is_over_subscription)
+{
+ int retval;
+
+ BUG_ON(!pm);
+ BUG_ON(pm->allocated == true);
+ BUG_ON(is_over_subscription == NULL);
+
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+
+ retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
+ *rl_buffer_size,
+ PAGE_SIZE,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
+ (struct kgd_mem **) &pm->ib_buffer_obj);
+
+ if (retval != 0) {
+ pr_err("kfd: failed to allocate runlist IB\n");
+ return retval;
+ }
+
+ *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
+ *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
+
+ memset(*rl_buffer, 0, *rl_buffer_size);
+ pm->allocated = true;
+ return retval;
+}
+
+static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
+ uint64_t ib, size_t ib_size_in_dwords, bool chain)
+{
+ struct pm4_runlist *packet;
+
+ BUG_ON(!pm || !buffer || !ib);
+
+ packet = (struct pm4_runlist *)buffer;
+
+ memset(buffer, 0, sizeof(struct pm4_runlist));
+ packet->header.u32all = build_pm4_header(IT_RUN_LIST,
+ sizeof(struct pm4_runlist));
+
+ packet->bitfields4.ib_size = ib_size_in_dwords;
+ packet->bitfields4.chain = chain ? 1 : 0;
+ packet->bitfields4.offload_polling = 0;
+ packet->bitfields4.valid = 1;
+ packet->ordinal2 = lower_32_bits(ib);
+ packet->bitfields3.ib_base_hi = upper_32_bits(ib);
+
+ return 0;
+}
+
+static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
+ struct qcm_process_device *qpd)
+{
+ struct pm4_map_process *packet;
+ struct queue *cur;
+ uint32_t num_queues;
+
+ BUG_ON(!pm || !buffer || !qpd);
+
+ packet = (struct pm4_map_process *)buffer;
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ memset(buffer, 0, sizeof(struct pm4_map_process));
+
+ packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
+ sizeof(struct pm4_map_process));
+ packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+ packet->bitfields2.process_quantum = 1;
+ packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields3.page_table_base = qpd->page_table_base;
+ packet->bitfields10.gds_size = qpd->gds_size;
+ packet->bitfields10.num_gws = qpd->num_gws;
+ packet->bitfields10.num_oac = qpd->num_oac;
+ num_queues = 0;
+ list_for_each_entry(cur, &qpd->queues_list, list)
+ num_queues++;
+ packet->bitfields10.num_queues = num_queues;
+
+ packet->sh_mem_config = qpd->sh_mem_config;
+ packet->sh_mem_bases = qpd->sh_mem_bases;
+ packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+ packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+
+ packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+ packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+
+ return 0;
+}
+
+static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
+ struct queue *q)
+{
+ struct pm4_map_queues *packet;
+
+ BUG_ON(!pm || !buffer || !q);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ packet = (struct pm4_map_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+ packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+ sizeof(struct pm4_map_queues));
+ packet->bitfields2.alloc_format =
+ alloc_format__mes_map_queues__one_per_pipe;
+ packet->bitfields2.num_queues = 1;
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
+
+ packet->bitfields2.vidmem = (q->properties.is_interop) ?
+ vidmem__mes_map_queues__uses_video_memory :
+ vidmem__mes_map_queues__uses_no_video_memory;
+
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ case KFD_QUEUE_TYPE_DIQ:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_map_queues__compute;
+ break;
+ case KFD_QUEUE_TYPE_SDMA:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_map_queues__sdma0;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
+ q->properties.doorbell_off;
+
+ packet->mes_map_queues_ordinals[0].mqd_addr_lo =
+ lower_32_bits(q->gart_mqd_addr);
+
+ packet->mes_map_queues_ordinals[0].mqd_addr_hi =
+ upper_32_bits(q->gart_mqd_addr);
+
+ packet->mes_map_queues_ordinals[0].wptr_addr_lo =
+ lower_32_bits((uint64_t)q->properties.write_ptr);
+
+ packet->mes_map_queues_ordinals[0].wptr_addr_hi =
+ upper_32_bits((uint64_t)q->properties.write_ptr);
+
+ return 0;
+}
+
+static int pm_create_runlist_ib(struct packet_manager *pm,
+ struct list_head *queues,
+ uint64_t *rl_gpu_addr,
+ size_t *rl_size_bytes)
+{
+ unsigned int alloc_size_bytes;
+ unsigned int *rl_buffer, rl_wptr, i;
+ int retval, proccesses_mapped;
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+ struct kernel_queue *kq;
+ bool is_over_subscription;
+
+ BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
+
+ rl_wptr = retval = proccesses_mapped = 0;
+
+ retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
+ &alloc_size_bytes, &is_over_subscription);
+ if (retval != 0)
+ return retval;
+
+ *rl_size_bytes = alloc_size_bytes;
+
+ pr_debug("kfd: In func %s\n", __func__);
+ pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
+ pm->dqm->processes_count, pm->dqm->queue_count);
+
+ /* build the run list ib packet */
+ list_for_each_entry(cur, queues, list) {
+ qpd = cur->qpd;
+ /* build map process packet */
+ if (proccesses_mapped >= pm->dqm->processes_count) {
+ pr_debug("kfd: not enough space left in runlist IB\n");
+ pm_release_ib(pm);
+ return -ENOMEM;
+ }
+ retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
+ if (retval != 0)
+ return retval;
+ proccesses_mapped++;
+ inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
+ alloc_size_bytes);
+
+ list_for_each_entry(kq, &qpd->priv_queue_list, list) {
+ if (kq->queue->properties.is_active != true)
+ continue;
+ retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
+ kq->queue);
+ if (retval != 0)
+ return retval;
+ inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
+ alloc_size_bytes);
+ }
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->properties.is_active != true)
+ continue;
+ retval = pm_create_map_queue(pm,
+ &rl_buffer[rl_wptr], q);
+ if (retval != 0)
+ return retval;
+ inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
+ alloc_size_bytes);
+ }
+ }
+
+ pr_debug("kfd: finished map process and queues to runlist\n");
+
+ if (is_over_subscription)
+ pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
+ alloc_size_bytes / sizeof(uint32_t), true);
+
+ for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
+ pr_debug("0x%2X ", rl_buffer[i]);
+ pr_debug("\n");
+
+ return 0;
+}
+
+int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm);
+
+ pm->dqm = dqm;
+ mutex_init(&pm->lock);
+ pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
+ if (pm->priv_queue == NULL) {
+ mutex_destroy(&pm->lock);
+ return -ENOMEM;
+ }
+ pm->allocated = false;
+
+ return 0;
+}
+
+void pm_uninit(struct packet_manager *pm)
+{
+ BUG_ON(!pm);
+
+ mutex_destroy(&pm->lock);
+ kernel_queue_uninit(pm->priv_queue);
+}
+
+int pm_send_set_resources(struct packet_manager *pm,
+ struct scheduling_resources *res)
+{
+ struct pm4_set_resources *packet;
+
+ BUG_ON(!pm || !res);
+
+ pr_debug("kfd: In func %s\n", __func__);
+
+ mutex_lock(&pm->lock);
+ pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+ sizeof(*packet) / sizeof(uint32_t),
+ (unsigned int **)&packet);
+ if (packet == NULL) {
+ mutex_unlock(&pm->lock);
+ pr_err("kfd: failed to allocate buffer on kernel queue\n");
+ return -ENOMEM;
+ }
+
+ memset(packet, 0, sizeof(struct pm4_set_resources));
+ packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
+ sizeof(struct pm4_set_resources));
+
+ packet->bitfields2.queue_type =
+ queue_type__mes_set_resources__hsa_interface_queue_hiq;
+ packet->bitfields2.vmid_mask = res->vmid_mask;
+ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
+ packet->bitfields7.oac_mask = res->oac_mask;
+ packet->bitfields8.gds_heap_base = res->gds_heap_base;
+ packet->bitfields8.gds_heap_size = res->gds_heap_size;
+
+ packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+ packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+
+ packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+ packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+
+ pm->priv_queue->submit_packet(pm->priv_queue);
+ pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+ mutex_unlock(&pm->lock);
+
+ return 0;
+}
+
+int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
+{
+ uint64_t rl_gpu_ib_addr;
+ uint32_t *rl_buffer;
+ size_t rl_ib_size, packet_size_dwords;
+ int retval;
+
+ BUG_ON(!pm || !dqm_queues);
+
+ retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
+ &rl_ib_size);
+ if (retval != 0)
+ goto fail_create_runlist_ib;
+
+ pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+
+ packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
+ mutex_lock(&pm->lock);
+
+ retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+ packet_size_dwords, &rl_buffer);
+ if (retval != 0)
+ goto fail_acquire_packet_buffer;
+
+ retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
+ rl_ib_size / sizeof(uint32_t), false);
+ if (retval != 0)
+ goto fail_create_runlist;
+
+ pm->priv_queue->submit_packet(pm->priv_queue);
+ pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+ mutex_unlock(&pm->lock);
+
+ return retval;
+
+fail_create_runlist:
+ pm->priv_queue->rollback_packet(pm->priv_queue);
+fail_acquire_packet_buffer:
+ mutex_unlock(&pm->lock);
+fail_create_runlist_ib:
+ if (pm->allocated == true)
+ pm_release_ib(pm);
+ return retval;
+}
+
+int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+ uint32_t fence_value)
+{
+ int retval;
+ struct pm4_query_status *packet;
+
+ BUG_ON(!pm || !fence_address);
+
+ mutex_lock(&pm->lock);
+ retval = pm->priv_queue->acquire_packet_buffer(
+ pm->priv_queue,
+ sizeof(struct pm4_query_status) / sizeof(uint32_t),
+ (unsigned int **)&packet);
+ if (retval != 0)
+ goto fail_acquire_packet_buffer;
+
+ packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
+ sizeof(struct pm4_query_status));
+
+ packet->bitfields2.context_id = 0;
+ packet->bitfields2.interrupt_sel =
+ interrupt_sel__mes_query_status__completion_status;
+ packet->bitfields2.command =
+ command__mes_query_status__fence_only_after_write_ack;
+
+ packet->addr_hi = upper_32_bits((uint64_t)fence_address);
+ packet->addr_lo = lower_32_bits((uint64_t)fence_address);
+ packet->data_hi = upper_32_bits((uint64_t)fence_value);
+ packet->data_lo = lower_32_bits((uint64_t)fence_value);
+
+ pm->priv_queue->submit_packet(pm->priv_queue);
+ pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+ mutex_unlock(&pm->lock);
+
+ return 0;
+
+fail_acquire_packet_buffer:
+ mutex_unlock(&pm->lock);
+ return retval;
+}
+
+int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ enum kfd_preempt_type_filter mode,
+ uint32_t filter_param, bool reset,
+ unsigned int sdma_engine)
+{
+ int retval;
+ uint32_t *buffer;
+ struct pm4_unmap_queues *packet;
+
+ BUG_ON(!pm);
+
+ mutex_lock(&pm->lock);
+ retval = pm->priv_queue->acquire_packet_buffer(
+ pm->priv_queue,
+ sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
+ &buffer);
+ if (retval != 0)
+ goto err_acquire_packet_buffer;
+
+ packet = (struct pm4_unmap_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_unmap_queues));
+
+ packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
+ sizeof(struct pm4_unmap_queues));
+ switch (type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ case KFD_QUEUE_TYPE_DIQ:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_unmap_queues__compute;
+ break;
+ case KFD_QUEUE_TYPE_SDMA:
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (reset)
+ packet->bitfields2.action =
+ action__mes_unmap_queues__reset_queues;
+ else
+ packet->bitfields2.action =
+ action__mes_unmap_queues__preempt_queues;
+
+ switch (mode) {
+ case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
+ packet->bitfields2.num_queues = 1;
+ packet->bitfields3b.doorbell_offset0 = filter_param;
+ break;
+ case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+ packet->bitfields3a.pasid = filter_param;
+ break;
+ case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
+ break;
+ default:
+ BUG();
+ break;
+ };
+
+ pm->priv_queue->submit_packet(pm->priv_queue);
+ pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+
+ mutex_unlock(&pm->lock);
+ return 0;
+
+err_acquire_packet_buffer:
+ mutex_unlock(&pm->lock);
+ return retval;
+}
+
+void pm_release_ib(struct packet_manager *pm)
+{
+ BUG_ON(!pm);
+
+ mutex_lock(&pm->lock);
+ if (pm->allocated) {
+ kfd2kgd->free_mem(pm->dqm->dev->kgd,
+ (struct kgd_mem *) pm->ib_buffer_obj);
+ pm->allocated = false;
+ }
+ mutex_unlock(&pm->lock);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
new file mode 100644
index 000000000000..71699ad97d74
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "kfd_priv.h"
+
+static unsigned long *pasid_bitmap;
+static unsigned int pasid_limit;
+static DEFINE_MUTEX(pasid_mutex);
+
+int kfd_pasid_init(void)
+{
+ pasid_limit = max_num_of_processes;
+
+ pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+ if (!pasid_bitmap)
+ return -ENOMEM;
+
+ set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */
+
+ return 0;
+}
+
+void kfd_pasid_exit(void)
+{
+ kfree(pasid_bitmap);
+}
+
+bool kfd_set_pasid_limit(unsigned int new_limit)
+{
+ if (new_limit < pasid_limit) {
+ bool ok;
+
+ mutex_lock(&pasid_mutex);
+
+ /* ensure that no pasids >= new_limit are in-use */
+ ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) ==
+ pasid_limit);
+ if (ok)
+ pasid_limit = new_limit;
+
+ mutex_unlock(&pasid_mutex);
+
+ return ok;
+ }
+
+ return true;
+}
+
+inline unsigned int kfd_get_pasid_limit(void)
+{
+ return pasid_limit;
+}
+
+unsigned int kfd_pasid_alloc(void)
+{
+ unsigned int found;
+
+ mutex_lock(&pasid_mutex);
+
+ found = find_first_zero_bit(pasid_bitmap, pasid_limit);
+ if (found == pasid_limit)
+ found = 0;
+ else
+ set_bit(found, pasid_bitmap);
+
+ mutex_unlock(&pasid_mutex);
+
+ return found;
+}
+
+void kfd_pasid_free(unsigned int pasid)
+{
+ BUG_ON(pasid == 0 || pasid >= pasid_limit);
+ clear_bit(pasid, pasid_bitmap);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
new file mode 100644
index 000000000000..071ad5724bd2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_PM4_HEADERS_H_
+#define KFD_PM4_HEADERS_H_
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+ struct {
+ uint32_t reserved1:8; /* < reserved */
+ uint32_t opcode:8; /* < IT opcode */
+ uint32_t count:14; /* < number of DWORDs - 1
+ * in the information body.
+ */
+ uint32_t type:2; /* < packet identifier.
+ * It should be 3 for type 3 packets
+ */
+ };
+ uint32_t u32all;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/* --------------------MES_SET_RESOURCES-------------------- */
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum set_resources_queue_type_enum {
+ queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+ queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+ queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+struct pm4_set_resources {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t vmid_mask:16;
+ uint32_t unmap_latency:8;
+ uint32_t reserved1:5;
+ enum set_resources_queue_type_enum queue_type:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ uint32_t queue_mask_lo;
+ uint32_t queue_mask_hi;
+ uint32_t gws_mask_lo;
+ uint32_t gws_mask_hi;
+
+ union {
+ struct {
+ uint32_t oac_mask:16;
+ uint32_t reserved2:16;
+ } bitfields7;
+ uint32_t ordinal7;
+ };
+
+ union {
+ struct {
+ uint32_t gds_heap_base:6;
+ uint32_t reserved3:5;
+ uint32_t gds_heap_size:6;
+ uint32_t reserved4:15;
+ } bitfields8;
+ uint32_t ordinal8;
+ };
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST-------------------- */
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_runlist {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t reserved1:2;
+ uint32_t ib_base_lo:30;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t ib_base_hi:16;
+ uint32_t reserved2:16;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ union {
+ struct {
+ uint32_t ib_size:20;
+ uint32_t chain:1;
+ uint32_t offload_polling:1;
+ uint32_t reserved3:1;
+ uint32_t valid:1;
+ uint32_t reserved4:8;
+ } bitfields4;
+ uint32_t ordinal4;
+ };
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS-------------------- */
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_map_process {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved1:8;
+ uint32_t diq_enable:1;
+ uint32_t process_quantum:7;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t page_table_base:28;
+ uint32_t reserved3:4;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t sh_mem_config;
+ uint32_t gds_addr_lo;
+ uint32_t gds_addr_hi;
+
+ union {
+ struct {
+ uint32_t num_gws:6;
+ uint32_t reserved4:2;
+ uint32_t num_oac:4;
+ uint32_t reserved5:4;
+ uint32_t gds_size:6;
+ uint32_t num_queues:10;
+ } bitfields10;
+ uint32_t ordinal10;
+ };
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_DEFINED
+#define PM4_MES_MAP_QUEUES_DEFINED
+enum map_queues_queue_sel_enum {
+ queue_sel__mes_map_queues__map_to_specified_queue_slots = 0,
+ queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1,
+ queue_sel__mes_map_queues__enable_process_queues = 2
+};
+
+enum map_queues_vidmem_enum {
+ vidmem__mes_map_queues__uses_no_video_memory = 0,
+ vidmem__mes_map_queues__uses_video_memory = 1
+};
+
+enum map_queues_alloc_format_enum {
+ alloc_format__mes_map_queues__one_per_pipe = 0,
+ alloc_format__mes_map_queues__all_on_one_pipe = 1
+};
+
+enum map_queues_engine_sel_enum {
+ engine_sel__mes_map_queues__compute = 0,
+ engine_sel__mes_map_queues__sdma0 = 2,
+ engine_sel__mes_map_queues__sdma1 = 3
+};
+
+struct pm4_map_queues {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t reserved1:4;
+ enum map_queues_queue_sel_enum queue_sel:2;
+ uint32_t reserved2:2;
+ uint32_t vmid:4;
+ uint32_t reserved3:4;
+ enum map_queues_vidmem_enum vidmem:2;
+ uint32_t reserved4:6;
+ enum map_queues_alloc_format_enum alloc_format:2;
+ enum map_queues_engine_sel_enum engine_sel:3;
+ uint32_t num_queues:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ struct {
+ union {
+ struct {
+ uint32_t reserved5:2;
+ uint32_t doorbell_offset:21;
+ uint32_t reserved6:3;
+ uint32_t queue:6;
+ } bitfields3;
+ uint32_t ordinal3;
+ };
+
+ uint32_t mqd_addr_lo;
+ uint32_t mqd_addr_hi;
+ uint32_t wptr_addr_lo;
+ uint32_t wptr_addr_hi;
+
+ } mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */
+
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum query_status_interrupt_sel_enum {
+ interrupt_sel__mes_query_status__completion_status = 0,
+ interrupt_sel__mes_query_status__process_status = 1,
+ interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum query_status_command_enum {
+ command__mes_query_status__interrupt_only = 0,
+ command__mes_query_status__fence_only_immediate = 1,
+ command__mes_query_status__fence_only_after_write_ack = 2,
+ command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum query_status_engine_sel_enum {
+ engine_sel__mes_query_status__compute = 0,
+ engine_sel__mes_query_status__sdma0_queue = 2,
+ engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_query_status {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ uint32_t context_id:28;
+ enum query_status_interrupt_sel_enum interrupt_sel:2;
+ enum query_status_command_enum command:2;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved1:16;
+ } bitfields3a;
+ struct {
+ uint32_t reserved2:2;
+ uint32_t doorbell_offset:21;
+ uint32_t reserved3:3;
+ enum query_status_engine_sel_enum engine_sel:3;
+ uint32_t reserved4:3;
+ } bitfields3b;
+ uint32_t ordinal3;
+ };
+
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t data_lo;
+ uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum unmap_queues_action_enum {
+ action__mes_unmap_queues__preempt_queues = 0,
+ action__mes_unmap_queues__reset_queues = 1,
+ action__mes_unmap_queues__disable_process_queues = 2
+};
+
+enum unmap_queues_queue_sel_enum {
+ queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+ queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+ queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2
+};
+
+enum unmap_queues_engine_sel_enum {
+ engine_sel__mes_unmap_queues__compute = 0,
+ engine_sel__mes_unmap_queues__sdma0 = 2,
+ engine_sel__mes_unmap_queues__sdma1 = 3
+};
+
+struct pm4_unmap_queues {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
+ };
+
+ union {
+ struct {
+ enum unmap_queues_action_enum action:2;
+ uint32_t reserved1:2;
+ enum unmap_queues_queue_sel_enum queue_sel:2;
+ uint32_t reserved2:20;
+ enum unmap_queues_engine_sel_enum engine_sel:3;
+ uint32_t num_queues:3;
+ } bitfields2;
+ uint32_t ordinal2;
+ };
+
+ union {
+ struct {
+ uint32_t pasid:16;
+ uint32_t reserved3:16;
+ } bitfields3a;
+ struct {
+ uint32_t reserved4:2;
+ uint32_t doorbell_offset0:21;
+ uint32_t reserved5:9;
+ } bitfields3b;
+ uint32_t ordinal3;
+ };
+
+ union {
+ struct {
+ uint32_t reserved6:2;
+ uint32_t doorbell_offset1:21;
+ uint32_t reserved7:9;
+ } bitfields4;
+ uint32_t ordinal4;
+ };
+
+ union {
+ struct {
+ uint32_t reserved8:2;
+ uint32_t doorbell_offset2:21;
+ uint32_t reserved9:9;
+ } bitfields5;
+ uint32_t ordinal5;
+ };
+
+ union {
+ struct {
+ uint32_t reserved10:2;
+ uint32_t doorbell_offset3:21;
+ uint32_t reserved11:9;
+ } bitfields6;
+ uint32_t ordinal6;
+ };
+
+};
+#endif
+
+enum {
+ CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
+};
+
+#endif /* KFD_PM4_HEADERS_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h
new file mode 100644
index 000000000000..b72fa3b8c2d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#ifndef KFD_PM4_OPCODES_H
+#define KFD_PM4_OPCODES_H
+
+enum it_opcode_type {
+ IT_NOP = 0x10,
+ IT_SET_BASE = 0x11,
+ IT_CLEAR_STATE = 0x12,
+ IT_INDEX_BUFFER_SIZE = 0x13,
+ IT_DISPATCH_DIRECT = 0x15,
+ IT_DISPATCH_INDIRECT = 0x16,
+ IT_ATOMIC_GDS = 0x1D,
+ IT_OCCLUSION_QUERY = 0x1F,
+ IT_SET_PREDICATION = 0x20,
+ IT_REG_RMW = 0x21,
+ IT_COND_EXEC = 0x22,
+ IT_PRED_EXEC = 0x23,
+ IT_DRAW_INDIRECT = 0x24,
+ IT_DRAW_INDEX_INDIRECT = 0x25,
+ IT_INDEX_BASE = 0x26,
+ IT_DRAW_INDEX_2 = 0x27,
+ IT_CONTEXT_CONTROL = 0x28,
+ IT_INDEX_TYPE = 0x2A,
+ IT_DRAW_INDIRECT_MULTI = 0x2C,
+ IT_DRAW_INDEX_AUTO = 0x2D,
+ IT_NUM_INSTANCES = 0x2F,
+ IT_DRAW_INDEX_MULTI_AUTO = 0x30,
+ IT_INDIRECT_BUFFER_CNST = 0x33,
+ IT_STRMOUT_BUFFER_UPDATE = 0x34,
+ IT_DRAW_INDEX_OFFSET_2 = 0x35,
+ IT_DRAW_PREAMBLE = 0x36,
+ IT_WRITE_DATA = 0x37,
+ IT_DRAW_INDEX_INDIRECT_MULTI = 0x38,
+ IT_MEM_SEMAPHORE = 0x39,
+ IT_COPY_DW = 0x3B,
+ IT_WAIT_REG_MEM = 0x3C,
+ IT_INDIRECT_BUFFER = 0x3F,
+ IT_COPY_DATA = 0x40,
+ IT_PFP_SYNC_ME = 0x42,
+ IT_SURFACE_SYNC = 0x43,
+ IT_COND_WRITE = 0x45,
+ IT_EVENT_WRITE = 0x46,
+ IT_EVENT_WRITE_EOP = 0x47,
+ IT_EVENT_WRITE_EOS = 0x48,
+ IT_RELEASE_MEM = 0x49,
+ IT_PREAMBLE_CNTL = 0x4A,
+ IT_DMA_DATA = 0x50,
+ IT_ACQUIRE_MEM = 0x58,
+ IT_REWIND = 0x59,
+ IT_LOAD_UCONFIG_REG = 0x5E,
+ IT_LOAD_SH_REG = 0x5F,
+ IT_LOAD_CONFIG_REG = 0x60,
+ IT_LOAD_CONTEXT_REG = 0x61,
+ IT_SET_CONFIG_REG = 0x68,
+ IT_SET_CONTEXT_REG = 0x69,
+ IT_SET_CONTEXT_REG_INDIRECT = 0x73,
+ IT_SET_SH_REG = 0x76,
+ IT_SET_SH_REG_OFFSET = 0x77,
+ IT_SET_QUEUE_REG = 0x78,
+ IT_SET_UCONFIG_REG = 0x79,
+ IT_SCRATCH_RAM_WRITE = 0x7D,
+ IT_SCRATCH_RAM_READ = 0x7E,
+ IT_LOAD_CONST_RAM = 0x80,
+ IT_WRITE_CONST_RAM = 0x81,
+ IT_DUMP_CONST_RAM = 0x83,
+ IT_INCREMENT_CE_COUNTER = 0x84,
+ IT_INCREMENT_DE_COUNTER = 0x85,
+ IT_WAIT_ON_CE_COUNTER = 0x86,
+ IT_WAIT_ON_DE_COUNTER_DIFF = 0x88,
+ IT_SWITCH_BUFFER = 0x8B,
+ IT_SET_RESOURCES = 0xA0,
+ IT_MAP_PROCESS = 0xA1,
+ IT_MAP_QUEUES = 0xA2,
+ IT_UNMAP_QUEUES = 0xA3,
+ IT_QUERY_STATUS = 0xA4,
+ IT_RUN_LIST = 0xA5,
+};
+
+#define PM4_TYPE_0 0
+#define PM4_TYPE_2 2
+#define PM4_TYPE_3 3
+
+#endif /* KFD_PM4_OPCODES_H */
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
new file mode 100644
index 000000000000..f9fb81e3bb09
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -0,0 +1,600 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_PRIV_H_INCLUDED
+#define KFD_PRIV_H_INCLUDED
+
+#include <linux/hashtable.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/kfd_ioctl.h>
+#include <kgd_kfd_interface.h>
+
+#define KFD_SYSFS_FILE_MODE 0444
+
+/*
+ * When working with cp scheduler we should assign the HIQ manually or via
+ * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * definitions for Kaveri. In Kaveri only the first ME queues participates
+ * in the cp scheduling taking that in mind we set the HIQ slot in the
+ * second ME.
+ */
+#define KFD_CIK_HIQ_PIPE 4
+#define KFD_CIK_HIQ_QUEUE 0
+
+/* GPU ID hash width in bits */
+#define KFD_GPU_ID_HASH_WIDTH 16
+
+/* Macro for allocating structures */
+#define kfd_alloc_struct(ptr_to_struct) \
+ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
+
+/* Kernel module parameter to specify maximum number of supported processes */
+extern int max_num_of_processes;
+
+#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
+#define KFD_MAX_NUM_OF_PROCESSES 512
+
+/*
+ * Kernel module parameter to specify maximum number of supported queues
+ * per process
+ */
+extern int max_num_of_queues_per_process;
+
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+
+#define KFD_KERNEL_QUEUE_SIZE 2048
+
+/* Kernel module parameter to specify the scheduling policy */
+extern int sched_policy;
+
+/**
+ * enum kfd_sched_policy
+ *
+ * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
+ * scheduling. In this scheduling mode we're using the firmware code to
+ * schedule the user mode queues and kernel queues such as HIQ and DIQ.
+ * the HIQ queue is used as a special queue that dispatches the configuration
+ * to the cp and the user mode queues list that are currently running.
+ * the DIQ queue is a debugging queue that dispatches debugging commands to the
+ * firmware.
+ * in this scheduling mode user mode queues over subscription feature is
+ * enabled.
+ *
+ * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
+ * subscription feature disabled.
+ *
+ * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
+ * set the command processor registers and sets the queues "manually". This
+ * mode is used *ONLY* for debugging proposes.
+ *
+ */
+enum kfd_sched_policy {
+ KFD_SCHED_POLICY_HWS = 0,
+ KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
+ KFD_SCHED_POLICY_NO_HWS
+};
+
+enum cache_policy {
+ cache_policy_coherent,
+ cache_policy_noncoherent
+};
+
+struct kfd_device_info {
+ unsigned int max_pasid_bits;
+ size_t ih_ring_entry_size;
+ uint16_t mqd_size_aligned;
+};
+
+struct kfd_dev {
+ struct kgd_dev *kgd;
+
+ const struct kfd_device_info *device_info;
+ struct pci_dev *pdev;
+
+ unsigned int id; /* topology stub index */
+
+ phys_addr_t doorbell_base; /* Start of actual doorbells used by
+ * KFD. It is aligned for mapping
+ * into user mode
+ */
+ size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
+ * to HW doorbell, GFX reserved some
+ * at the start)
+ */
+ size_t doorbell_process_limit; /* Number of processes we have doorbell
+ * space for.
+ */
+ u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
+ * page used by kernel queue
+ */
+
+ struct kgd2kfd_shared_resources shared_resources;
+
+ void *interrupt_ring;
+ size_t interrupt_ring_size;
+ atomic_t interrupt_ring_rptr;
+ atomic_t interrupt_ring_wptr;
+ struct work_struct interrupt_work;
+ spinlock_t interrupt_lock;
+
+ /* QCM Device instance */
+ struct device_queue_manager *dqm;
+
+ bool init_complete;
+ /*
+ * Interrupts of interest to KFD are copied
+ * from the HW ring into a SW ring.
+ */
+ bool interrupts_active;
+};
+
+/* KGD2KFD callbacks */
+void kgd2kfd_exit(void);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources);
+void kgd2kfd_device_exit(struct kfd_dev *kfd);
+
+extern const struct kfd2kgd_calls *kfd2kgd;
+
+struct kfd_mem_obj {
+ void *bo;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+};
+
+enum kfd_mempool {
+ KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
+ KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
+ KFD_MEMPOOL_FRAMEBUFFER = 3,
+};
+
+/* Character device interface */
+int kfd_chardev_init(void);
+void kfd_chardev_exit(void);
+struct device *kfd_chardev(void);
+
+/**
+ * enum kfd_preempt_type_filter
+ *
+ * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
+ *
+ * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
+ * running queues list.
+ *
+ * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
+ * specific process.
+ *
+ */
+enum kfd_preempt_type_filter {
+ KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
+ KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
+ KFD_PREEMPT_TYPE_FILTER_BY_PASID
+};
+
+enum kfd_preempt_type {
+ KFD_PREEMPT_TYPE_WAVEFRONT,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET
+};
+
+/**
+ * enum kfd_queue_type
+ *
+ * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
+ *
+ * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
+ *
+ * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
+ *
+ * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
+ */
+enum kfd_queue_type {
+ KFD_QUEUE_TYPE_COMPUTE,
+ KFD_QUEUE_TYPE_SDMA,
+ KFD_QUEUE_TYPE_HIQ,
+ KFD_QUEUE_TYPE_DIQ
+};
+
+enum kfd_queue_format {
+ KFD_QUEUE_FORMAT_PM4,
+ KFD_QUEUE_FORMAT_AQL
+};
+
+/**
+ * struct queue_properties
+ *
+ * @type: The queue type.
+ *
+ * @queue_id: Queue identifier.
+ *
+ * @queue_address: Queue ring buffer address.
+ *
+ * @queue_size: Queue ring buffer size.
+ *
+ * @priority: Defines the queue priority relative to other queues in the
+ * process.
+ * This is just an indication and HW scheduling may override the priority as
+ * necessary while keeping the relative prioritization.
+ * the priority granularity is from 0 to f which f is the highest priority.
+ * currently all queues are initialized with the highest priority.
+ *
+ * @queue_percent: This field is partially implemented and currently a zero in
+ * this field defines that the queue is non active.
+ *
+ * @read_ptr: User space address which points to the number of dwords the
+ * cp read from the ring buffer. This field updates automatically by the H/W.
+ *
+ * @write_ptr: Defines the number of dwords written to the ring buffer.
+ *
+ * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
+ * the queue ring buffer. This field should be similar to write_ptr and the user
+ * should update this field after he updated the write_ptr.
+ *
+ * @doorbell_off: The doorbell offset in the doorbell pci-bar.
+ *
+ * @is_interop: Defines if this is a interop queue. Interop queue means that the
+ * queue can access both graphics and compute resources.
+ *
+ * @is_active: Defines if the queue is active or not.
+ *
+ * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
+ * of the queue.
+ *
+ * This structure represents the queue properties for each queue no matter if
+ * it's user mode or kernel mode queue.
+ *
+ */
+struct queue_properties {
+ enum kfd_queue_type type;
+ enum kfd_queue_format format;
+ unsigned int queue_id;
+ uint64_t queue_address;
+ uint64_t queue_size;
+ uint32_t priority;
+ uint32_t queue_percent;
+ uint32_t *read_ptr;
+ uint32_t *write_ptr;
+ uint32_t __iomem *doorbell_ptr;
+ uint32_t doorbell_off;
+ bool is_interop;
+ bool is_active;
+ /* Not relevant for user mode queues in cp scheduling */
+ unsigned int vmid;
+};
+
+/**
+ * struct queue
+ *
+ * @list: Queue linked list.
+ *
+ * @mqd: The queue MQD.
+ *
+ * @mqd_mem_obj: The MQD local gpu memory object.
+ *
+ * @gart_mqd_addr: The MQD gart mc address.
+ *
+ * @properties: The queue properties.
+ *
+ * @mec: Used only in no cp scheduling mode and identifies to micro engine id
+ * that the queue should be execute on.
+ *
+ * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
+ *
+ * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
+ *
+ * @process: The kfd process that created this queue.
+ *
+ * @device: The kfd device that created this queue.
+ *
+ * This structure represents user mode compute queues.
+ * It contains all the necessary data to handle such queues.
+ *
+ */
+
+struct queue {
+ struct list_head list;
+ void *mqd;
+ struct kfd_mem_obj *mqd_mem_obj;
+ uint64_t gart_mqd_addr;
+ struct queue_properties properties;
+
+ uint32_t mec;
+ uint32_t pipe;
+ uint32_t queue;
+
+ struct kfd_process *process;
+ struct kfd_dev *device;
+};
+
+/*
+ * Please read the kfd_mqd_manager.h description.
+ */
+enum KFD_MQD_TYPE {
+ KFD_MQD_TYPE_CIK_COMPUTE = 0, /* for no cp scheduling */
+ KFD_MQD_TYPE_CIK_HIQ, /* for hiq */
+ KFD_MQD_TYPE_CIK_CP, /* for cp queues and diq */
+ KFD_MQD_TYPE_CIK_SDMA, /* for sdma queues */
+ KFD_MQD_TYPE_MAX
+};
+
+struct scheduling_resources {
+ unsigned int vmid_mask;
+ enum kfd_queue_type type;
+ uint64_t queue_mask;
+ uint64_t gws_mask;
+ uint32_t oac_mask;
+ uint32_t gds_heap_base;
+ uint32_t gds_heap_size;
+};
+
+struct process_queue_manager {
+ /* data */
+ struct kfd_process *process;
+ unsigned int num_concurrent_processes;
+ struct list_head queues;
+ unsigned long *queue_slot_bitmap;
+};
+
+struct qcm_process_device {
+ /* The Device Queue Manager that owns this data */
+ struct device_queue_manager *dqm;
+ struct process_queue_manager *pqm;
+ /* Device Queue Manager lock */
+ struct mutex *lock;
+ /* Queues list */
+ struct list_head queues_list;
+ struct list_head priv_queue_list;
+
+ unsigned int queue_count;
+ unsigned int vmid;
+ bool is_debug;
+ /*
+ * All the memory management data should be here too
+ */
+ uint64_t gds_context_area;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t page_table_base;
+ uint32_t gds_size;
+ uint32_t num_gws;
+ uint32_t num_oac;
+};
+
+/* Data that is per-process-per device. */
+struct kfd_process_device {
+ /*
+ * List of all per-device data for a process.
+ * Starts from kfd_process.per_device_data.
+ */
+ struct list_head per_device_list;
+
+ /* The device that owns this data. */
+ struct kfd_dev *dev;
+
+
+ /* per-process-per device QCM data structure */
+ struct qcm_process_device qpd;
+
+ /*Apertures*/
+ uint64_t lds_base;
+ uint64_t lds_limit;
+ uint64_t gpuvm_base;
+ uint64_t gpuvm_limit;
+ uint64_t scratch_base;
+ uint64_t scratch_limit;
+
+ /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
+ bool bound;
+};
+
+#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
+
+/* Process data */
+struct kfd_process {
+ /*
+ * kfd_process are stored in an mm_struct*->kfd_process*
+ * hash table (kfd_processes in kfd_process.c)
+ */
+ struct hlist_node kfd_processes;
+
+ struct mm_struct *mm;
+
+ struct mutex mutex;
+
+ /*
+ * In any process, the thread that started main() is the lead
+ * thread and outlives the rest.
+ * It is here because amd_iommu_bind_pasid wants a task_struct.
+ */
+ struct task_struct *lead_thread;
+
+ /* We want to receive a notification when the mm_struct is destroyed */
+ struct mmu_notifier mmu_notifier;
+
+ /* Use for delayed freeing of kfd_process structure */
+ struct rcu_head rcu;
+
+ unsigned int pasid;
+
+ /*
+ * List of kfd_process_device structures,
+ * one for each device the process is using.
+ */
+ struct list_head per_device_data;
+
+ struct process_queue_manager pqm;
+
+ /* The process's queues. */
+ size_t queue_array_size;
+
+ /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
+ struct kfd_queue **queues;
+
+ unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
+
+ /*Is the user space process 32 bit?*/
+ bool is_32bit_user_mode;
+};
+
+void kfd_process_create_wq(void);
+void kfd_process_destroy_wq(void);
+struct kfd_process *kfd_create_process(const struct task_struct *);
+struct kfd_process *kfd_get_process(const struct task_struct *);
+
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+ struct kfd_process *p);
+void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p,
+ int create_pdd);
+
+/* Process device data iterator */
+struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
+struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+ struct kfd_process_device *pdd);
+bool kfd_has_process_device_data(struct kfd_process *p);
+
+/* PASIDs */
+int kfd_pasid_init(void);
+void kfd_pasid_exit(void);
+bool kfd_set_pasid_limit(unsigned int new_limit);
+unsigned int kfd_get_pasid_limit(void);
+unsigned int kfd_pasid_alloc(void);
+void kfd_pasid_free(unsigned int pasid);
+
+/* Doorbells */
+void kfd_doorbell_init(struct kfd_dev *kfd);
+int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
+u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ unsigned int *doorbell_off);
+void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
+u32 read_kernel_doorbell(u32 __iomem *db);
+void write_kernel_doorbell(u32 __iomem *db, u32 value);
+unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
+ struct kfd_process *process,
+ unsigned int queue_id);
+
+extern struct device *kfd_device;
+
+/* Topology */
+int kfd_topology_init(void);
+void kfd_topology_shutdown(void);
+int kfd_topology_add_device(struct kfd_dev *gpu);
+int kfd_topology_remove_device(struct kfd_dev *gpu);
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
+struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
+
+/* Interrupts */
+int kfd_interrupt_init(struct kfd_dev *dev);
+void kfd_interrupt_exit(struct kfd_dev *dev);
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
+bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
+
+/* Power Management */
+void kgd2kfd_suspend(struct kfd_dev *kfd);
+int kgd2kfd_resume(struct kfd_dev *kfd);
+
+/* amdkfd Apertures */
+int kfd_init_apertures(struct kfd_process *process);
+
+/* Queue Context Management */
+inline uint32_t lower_32(uint64_t x);
+inline uint32_t upper_32(uint64_t x);
+
+int init_queue(struct queue **q, struct queue_properties properties);
+void uninit_queue(struct queue *q);
+void print_queue_properties(struct queue_properties *q);
+void print_queue(struct queue *q);
+
+struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev);
+struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
+void device_queue_manager_uninit(struct device_queue_manager *dqm);
+struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+ enum kfd_queue_type type);
+void kernel_queue_uninit(struct kernel_queue *kq);
+
+/* Process Queue Manager */
+struct process_queue_node {
+ struct queue *q;
+ struct kernel_queue *kq;
+ struct list_head process_queue_list;
+};
+
+int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
+void pqm_uninit(struct process_queue_manager *pqm);
+int pqm_create_queue(struct process_queue_manager *pqm,
+ struct kfd_dev *dev,
+ struct file *f,
+ struct queue_properties *properties,
+ unsigned int flags,
+ enum kfd_queue_type type,
+ unsigned int *qid);
+int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
+int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p);
+
+/* Packet Manager */
+
+#define KFD_HIQ_TIMEOUT (500)
+
+#define KFD_FENCE_COMPLETED (100)
+#define KFD_FENCE_INIT (10)
+#define KFD_UNMAP_LATENCY (150)
+
+struct packet_manager {
+ struct device_queue_manager *dqm;
+ struct kernel_queue *priv_queue;
+ struct mutex lock;
+ bool allocated;
+ struct kfd_mem_obj *ib_buffer_obj;
+};
+
+int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
+void pm_uninit(struct packet_manager *pm);
+int pm_send_set_resources(struct packet_manager *pm,
+ struct scheduling_resources *res);
+int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
+int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+ uint32_t fence_value);
+
+int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ enum kfd_preempt_type_filter mode,
+ uint32_t filter_param, bool reset,
+ unsigned int sdma_engine);
+
+void pm_release_ib(struct packet_manager *pm);
+
+uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+ struct kfd_process *process);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
new file mode 100644
index 000000000000..b85eb0b830b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/mutex.h>
+#include <linux/log2.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+struct mm_struct;
+
+#include "kfd_priv.h"
+
+/*
+ * Initial size for the array of queues.
+ * The allocated size is doubled each time
+ * it is exceeded up to MAX_PROCESS_QUEUES.
+ */
+#define INITIAL_QUEUE_ARRAY_SIZE 16
+
+/*
+ * List of struct kfd_process (field kfd_process).
+ * Unique/indexed by mm_struct*
+ */
+#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
+static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
+static DEFINE_MUTEX(kfd_processes_mutex);
+
+DEFINE_STATIC_SRCU(kfd_processes_srcu);
+
+static struct workqueue_struct *kfd_process_wq;
+
+struct kfd_process_release_work {
+ struct work_struct kfd_work;
+ struct kfd_process *p;
+};
+
+static struct kfd_process *find_process(const struct task_struct *thread);
+static struct kfd_process *create_process(const struct task_struct *thread);
+
+void kfd_process_create_wq(void)
+{
+ if (!kfd_process_wq)
+ kfd_process_wq = create_workqueue("kfd_process_wq");
+}
+
+void kfd_process_destroy_wq(void)
+{
+ if (kfd_process_wq) {
+ flush_workqueue(kfd_process_wq);
+ destroy_workqueue(kfd_process_wq);
+ kfd_process_wq = NULL;
+ }
+}
+
+struct kfd_process *kfd_create_process(const struct task_struct *thread)
+{
+ struct kfd_process *process;
+
+ BUG_ON(!kfd_process_wq);
+
+ if (thread->mm == NULL)
+ return ERR_PTR(-EINVAL);
+
+ /* Only the pthreads threading model is supported. */
+ if (thread->group_leader->mm != thread->mm)
+ return ERR_PTR(-EINVAL);
+
+ /* Take mmap_sem because we call __mmu_notifier_register inside */
+ down_write(&thread->mm->mmap_sem);
+
+ /*
+ * take kfd processes mutex before starting of process creation
+ * so there won't be a case where two threads of the same process
+ * create two kfd_process structures
+ */
+ mutex_lock(&kfd_processes_mutex);
+
+ /* A prior open of /dev/kfd could have already created the process. */
+ process = find_process(thread);
+ if (process)
+ pr_debug("kfd: process already found\n");
+
+ if (!process)
+ process = create_process(thread);
+
+ mutex_unlock(&kfd_processes_mutex);
+
+ up_write(&thread->mm->mmap_sem);
+
+ return process;
+}
+
+struct kfd_process *kfd_get_process(const struct task_struct *thread)
+{
+ struct kfd_process *process;
+
+ if (thread->mm == NULL)
+ return ERR_PTR(-EINVAL);
+
+ /* Only the pthreads threading model is supported. */
+ if (thread->group_leader->mm != thread->mm)
+ return ERR_PTR(-EINVAL);
+
+ process = find_process(thread);
+
+ return process;
+}
+
+static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
+{
+ struct kfd_process *process;
+
+ hash_for_each_possible_rcu(kfd_processes_table, process,
+ kfd_processes, (uintptr_t)mm)
+ if (process->mm == mm)
+ return process;
+
+ return NULL;
+}
+
+static struct kfd_process *find_process(const struct task_struct *thread)
+{
+ struct kfd_process *p;
+ int idx;
+
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ p = find_process_by_mm(thread->mm);
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return p;
+}
+
+static void kfd_process_wq_release(struct work_struct *work)
+{
+ struct kfd_process_release_work *my_work;
+ struct kfd_process_device *pdd, *temp;
+ struct kfd_process *p;
+
+ my_work = (struct kfd_process_release_work *) work;
+
+ p = my_work->p;
+
+ mutex_lock(&p->mutex);
+
+ list_for_each_entry_safe(pdd, temp, &p->per_device_data,
+ per_device_list) {
+ amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
+ list_del(&pdd->per_device_list);
+
+ kfree(pdd);
+ }
+
+ kfd_pasid_free(p->pasid);
+
+ mutex_unlock(&p->mutex);
+
+ mutex_destroy(&p->mutex);
+
+ kfree(p->queues);
+
+ kfree(p);
+
+ kfree((void *)work);
+}
+
+static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+{
+ struct kfd_process_release_work *work;
+ struct kfd_process *p;
+
+ BUG_ON(!kfd_process_wq);
+
+ p = container_of(rcu, struct kfd_process, rcu);
+ BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
+
+ mmdrop(p->mm);
+
+ work = (struct kfd_process_release_work *)
+ kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
+
+ if (work) {
+ INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
+ work->p = p;
+ queue_work(kfd_process_wq, (struct work_struct *) work);
+ }
+}
+
+static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct kfd_process *p;
+
+ /*
+ * The kfd_process structure can not be free because the
+ * mmu_notifier srcu is read locked
+ */
+ p = container_of(mn, struct kfd_process, mmu_notifier);
+ BUG_ON(p->mm != mm);
+
+ mutex_lock(&kfd_processes_mutex);
+ hash_del_rcu(&p->kfd_processes);
+ mutex_unlock(&kfd_processes_mutex);
+ synchronize_srcu(&kfd_processes_srcu);
+
+ mutex_lock(&p->mutex);
+
+ /* In case our notifier is called before IOMMU notifier */
+ pqm_uninit(&p->pqm);
+
+ mutex_unlock(&p->mutex);
+
+ /*
+ * Because we drop mm_count inside kfd_process_destroy_delayed
+ * and because the mmu_notifier_unregister function also drop
+ * mm_count we need to take an extra count here.
+ */
+ atomic_inc(&p->mm->mm_count);
+ mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
+ mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
+}
+
+static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
+ .release = kfd_process_notifier_release,
+};
+
+static struct kfd_process *create_process(const struct task_struct *thread)
+{
+ struct kfd_process *process;
+ int err = -ENOMEM;
+
+ process = kzalloc(sizeof(*process), GFP_KERNEL);
+
+ if (!process)
+ goto err_alloc_process;
+
+ process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
+ sizeof(process->queues[0]), GFP_KERNEL);
+ if (!process->queues)
+ goto err_alloc_queues;
+
+ process->pasid = kfd_pasid_alloc();
+ if (process->pasid == 0)
+ goto err_alloc_pasid;
+
+ mutex_init(&process->mutex);
+
+ process->mm = thread->mm;
+
+ /* register notifier */
+ process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
+ err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
+ if (err)
+ goto err_mmu_notifier;
+
+ hash_add_rcu(kfd_processes_table, &process->kfd_processes,
+ (uintptr_t)process->mm);
+
+ process->lead_thread = thread->group_leader;
+
+ process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
+
+ INIT_LIST_HEAD(&process->per_device_data);
+
+ err = pqm_init(&process->pqm, process);
+ if (err != 0)
+ goto err_process_pqm_init;
+
+ return process;
+
+err_process_pqm_init:
+ hash_del_rcu(&process->kfd_processes);
+ synchronize_rcu();
+ mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
+err_mmu_notifier:
+ kfd_pasid_free(process->pasid);
+err_alloc_pasid:
+ kfree(process->queues);
+err_alloc_queues:
+ kfree(process);
+err_alloc_process:
+ return ERR_PTR(err);
+}
+
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+ struct kfd_process *p,
+ int create_pdd)
+{
+ struct kfd_process_device *pdd = NULL;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ if (pdd->dev == dev)
+ return pdd;
+
+ if (create_pdd) {
+ pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
+ if (pdd != NULL) {
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+ pdd->qpd.dqm = dev->dqm;
+ list_add(&pdd->per_device_list, &p->per_device_data);
+ }
+ }
+
+ return pdd;
+}
+
+/*
+ * Direct the IOMMU to bind the process (specifically the pasid->mm)
+ * to the device.
+ * Unbinding occurs when the process dies or the device is removed.
+ *
+ * Assumes that the process lock is held.
+ */
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+ struct kfd_process *p)
+{
+ struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
+ int err;
+
+ if (pdd == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (pdd->bound)
+ return pdd;
+
+ err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ pdd->bound = true;
+
+ return pdd;
+}
+
+void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
+{
+ struct kfd_process *p;
+ struct kfd_process_device *pdd;
+ int idx, i;
+
+ BUG_ON(dev == NULL);
+
+ idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
+ if (p->pasid == pasid)
+ break;
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ BUG_ON(p->pasid != pasid);
+
+ mutex_lock(&p->mutex);
+
+ pqm_uninit(&p->pqm);
+
+ pdd = kfd_get_process_device_data(dev, p, 0);
+
+ /*
+ * Just mark pdd as unbound, because we still need it to call
+ * amd_iommu_unbind_pasid() in when the process exits.
+ * We don't call amd_iommu_unbind_pasid() here
+ * because the IOMMU called us.
+ */
+ if (pdd)
+ pdd->bound = false;
+
+ mutex_unlock(&p->mutex);
+}
+
+struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
+{
+ return list_first_entry(&p->per_device_data,
+ struct kfd_process_device,
+ per_device_list);
+}
+
+struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+ struct kfd_process_device *pdd)
+{
+ if (list_is_last(&pdd->per_device_list, &p->per_device_data))
+ return NULL;
+ return list_next_entry(pdd, per_device_list);
+}
+
+bool kfd_has_process_device_data(struct kfd_process *p)
+{
+ return !(list_empty(&p->per_device_data));
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
new file mode 100644
index 000000000000..47526780d736
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include "kfd_device_queue_manager.h"
+#include "kfd_priv.h"
+#include "kfd_kernel_queue.h"
+
+static inline struct process_queue_node *get_queue_by_qid(
+ struct process_queue_manager *pqm, unsigned int qid)
+{
+ struct process_queue_node *pqn;
+
+ BUG_ON(!pqm);
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (pqn->q && pqn->q->properties.queue_id == qid)
+ return pqn;
+ if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
+ return pqn;
+ }
+
+ return NULL;
+}
+
+static int find_available_queue_slot(struct process_queue_manager *pqm,
+ unsigned int *qid)
+{
+ unsigned long found;
+
+ BUG_ON(!pqm || !qid);
+
+ pr_debug("kfd: in %s\n", __func__);
+
+ found = find_first_zero_bit(pqm->queue_slot_bitmap,
+ max_num_of_queues_per_process);
+
+ pr_debug("kfd: the new slot id %lu\n", found);
+
+ if (found >= max_num_of_queues_per_process) {
+ pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
+ pqm->process->pasid);
+ return -ENOMEM;
+ }
+
+ set_bit(found, pqm->queue_slot_bitmap);
+ *qid = found;
+
+ return 0;
+}
+
+int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
+{
+ BUG_ON(!pqm);
+
+ INIT_LIST_HEAD(&pqm->queues);
+ pqm->queue_slot_bitmap =
+ kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+ BITS_PER_BYTE), GFP_KERNEL);
+ if (pqm->queue_slot_bitmap == NULL)
+ return -ENOMEM;
+ pqm->process = p;
+
+ return 0;
+}
+
+void pqm_uninit(struct process_queue_manager *pqm)
+{
+ int retval;
+ struct process_queue_node *pqn, *next;
+
+ BUG_ON(!pqm);
+
+ pr_debug("In func %s\n", __func__);
+
+ list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
+ retval = pqm_destroy_queue(
+ pqm,
+ (pqn->q != NULL) ?
+ pqn->q->properties.queue_id :
+ pqn->kq->queue->properties.queue_id);
+
+ if (retval != 0) {
+ pr_err("kfd: failed to destroy queue\n");
+ return;
+ }
+ }
+ kfree(pqm->queue_slot_bitmap);
+ pqm->queue_slot_bitmap = NULL;
+}
+
+static int create_cp_queue(struct process_queue_manager *pqm,
+ struct kfd_dev *dev, struct queue **q,
+ struct queue_properties *q_properties,
+ struct file *f, unsigned int qid)
+{
+ int retval;
+
+ retval = 0;
+
+ /* Doorbell initialized in user space*/
+ q_properties->doorbell_ptr = NULL;
+
+ q_properties->doorbell_off =
+ kfd_queue_id_to_doorbell(dev, pqm->process, qid);
+
+ /* let DQM handle it*/
+ q_properties->vmid = 0;
+ q_properties->queue_id = qid;
+ q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
+
+ retval = init_queue(q, *q_properties);
+ if (retval != 0)
+ goto err_init_queue;
+
+ (*q)->device = dev;
+ (*q)->process = pqm->process;
+
+ pr_debug("kfd: PQM After init queue");
+
+ return retval;
+
+err_init_queue:
+ return retval;
+}
+
+int pqm_create_queue(struct process_queue_manager *pqm,
+ struct kfd_dev *dev,
+ struct file *f,
+ struct queue_properties *properties,
+ unsigned int flags,
+ enum kfd_queue_type type,
+ unsigned int *qid)
+{
+ int retval;
+ struct kfd_process_device *pdd;
+ struct queue_properties q_properties;
+ struct queue *q;
+ struct process_queue_node *pqn;
+ struct kernel_queue *kq;
+
+ BUG_ON(!pqm || !dev || !properties || !qid);
+
+ memset(&q_properties, 0, sizeof(struct queue_properties));
+ memcpy(&q_properties, properties, sizeof(struct queue_properties));
+ q = NULL;
+ kq = NULL;
+
+ pdd = kfd_get_process_device_data(dev, pqm->process, 1);
+ BUG_ON(!pdd);
+
+ retval = find_available_queue_slot(pqm, qid);
+ if (retval != 0)
+ return retval;
+
+ if (list_empty(&pqm->queues)) {
+ pdd->qpd.pqm = pqm;
+ dev->dqm->register_process(dev->dqm, &pdd->qpd);
+ }
+
+ pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
+ if (!pqn) {
+ retval = -ENOMEM;
+ goto err_allocate_pqn;
+ }
+
+ switch (type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ /* check if there is over subscription */
+ if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+ ((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
+ (dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE))) {
+ pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+
+ retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid);
+ if (retval != 0)
+ goto err_create_queue;
+ pqn->q = q;
+ pqn->kq = NULL;
+ retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
+ &q->properties.vmid);
+ print_queue(q);
+ break;
+ case KFD_QUEUE_TYPE_DIQ:
+ kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
+ if (kq == NULL) {
+ retval = -ENOMEM;
+ goto err_create_queue;
+ }
+ kq->queue->properties.queue_id = *qid;
+ pqn->kq = kq;
+ pqn->q = NULL;
+ retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (retval != 0) {
+ pr_err("kfd: error dqm create queue\n");
+ goto err_create_queue;
+ }
+
+ pr_debug("kfd: PQM After DQM create queue\n");
+
+ list_add(&pqn->process_queue_list, &pqm->queues);
+
+ if (q) {
+ *properties = q->properties;
+ pr_debug("kfd: PQM done creating queue\n");
+ print_queue_properties(properties);
+ }
+
+ return retval;
+
+err_create_queue:
+ kfree(pqn);
+err_allocate_pqn:
+ clear_bit(*qid, pqm->queue_slot_bitmap);
+ return retval;
+}
+
+int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+{
+ struct process_queue_node *pqn;
+ struct kfd_process_device *pdd;
+ struct device_queue_manager *dqm;
+ struct kfd_dev *dev;
+ int retval;
+
+ dqm = NULL;
+
+ BUG_ON(!pqm);
+ retval = 0;
+
+ pr_debug("kfd: In Func %s\n", __func__);
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (pqn == NULL) {
+ pr_err("kfd: queue id does not match any known queue\n");
+ return -EINVAL;
+ }
+
+ dev = NULL;
+ if (pqn->kq)
+ dev = pqn->kq->dev;
+ if (pqn->q)
+ dev = pqn->q->device;
+ BUG_ON(!dev);
+
+ pdd = kfd_get_process_device_data(dev, pqm->process, 1);
+ BUG_ON(!pdd);
+
+ if (pqn->kq) {
+ /* destroy kernel queue (DIQ) */
+ dqm = pqn->kq->dev->dqm;
+ dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
+ kernel_queue_uninit(pqn->kq);
+ }
+
+ if (pqn->q) {
+ dqm = pqn->q->device->dqm;
+ retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q);
+ if (retval != 0)
+ return retval;
+
+ uninit_queue(pqn->q);
+ }
+
+ list_del(&pqn->process_queue_list);
+ kfree(pqn);
+ clear_bit(qid, pqm->queue_slot_bitmap);
+
+ if (list_empty(&pqm->queues))
+ dqm->unregister_process(dqm, &pdd->qpd);
+
+ return retval;
+}
+
+int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p)
+{
+ int retval;
+ struct process_queue_node *pqn;
+
+ BUG_ON(!pqm);
+
+ pqn = get_queue_by_qid(pqm, qid);
+ BUG_ON(!pqn);
+
+ pqn->q->properties.queue_address = p->queue_address;
+ pqn->q->properties.queue_size = p->queue_size;
+ pqn->q->properties.queue_percent = p->queue_percent;
+ pqn->q->properties.priority = p->priority;
+
+ retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+static __attribute__((unused)) struct kernel_queue *pqm_get_kernel_queue(
+ struct process_queue_manager *pqm,
+ unsigned int qid)
+{
+ struct process_queue_node *pqn;
+
+ BUG_ON(!pqm);
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (pqn && pqn->kq)
+ return pqn->kq;
+
+ return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
new file mode 100644
index 000000000000..9a0c90b0702e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include "kfd_priv.h"
+
+void print_queue_properties(struct queue_properties *q)
+{
+ if (!q)
+ return;
+
+ pr_debug("Printing queue properties:\n");
+ pr_debug("Queue Type: %u\n", q->type);
+ pr_debug("Queue Size: %llu\n", q->queue_size);
+ pr_debug("Queue percent: %u\n", q->queue_percent);
+ pr_debug("Queue Address: 0x%llX\n", q->queue_address);
+ pr_debug("Queue Id: %u\n", q->queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->vmid);
+ pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr);
+ pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
+}
+
+void print_queue(struct queue *q)
+{
+ if (!q)
+ return;
+ pr_debug("Printing queue:\n");
+ pr_debug("Queue Type: %u\n", q->properties.type);
+ pr_debug("Queue Size: %llu\n", q->properties.queue_size);
+ pr_debug("Queue percent: %u\n", q->properties.queue_percent);
+ pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
+ pr_debug("Queue Id: %u\n", q->properties.queue_id);
+ pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
+ pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr);
+ pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr);
+ pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
+ pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
+ pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
+ pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr);
+ pr_debug("Queue Process Address: 0x%p\n", q->process);
+ pr_debug("Queue Device Address: 0x%p\n", q->device);
+}
+
+int init_queue(struct queue **q, struct queue_properties properties)
+{
+ struct queue *tmp;
+
+ BUG_ON(!q);
+
+ tmp = kzalloc(sizeof(struct queue), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
+
+ *q = tmp;
+ return 0;
+}
+
+void uninit_queue(struct queue *q)
+{
+ kfree(q);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
new file mode 100644
index 000000000000..5733e2859e8a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -0,0 +1,1235 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/hash.h>
+#include <linux/cpufreq.h>
+
+#include "kfd_priv.h"
+#include "kfd_crat.h"
+#include "kfd_topology.h"
+
+static struct list_head topology_device_list;
+static int topology_crat_parsed;
+static struct kfd_system_properties sys_props;
+
+static DECLARE_RWSEM(topology_lock);
+
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+{
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu_id == gpu_id) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+}
+
+struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
+{
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu->pdev == pdev) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+}
+
+static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
+{
+ struct acpi_table_header *crat_table;
+ acpi_status status;
+
+ if (!size)
+ return -EINVAL;
+
+ /*
+ * Fetch the CRAT table from ACPI
+ */
+ status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
+ if (status == AE_NOT_FOUND) {
+ pr_warn("CRAT table not found\n");
+ return -ENODATA;
+ } else if (ACPI_FAILURE(status)) {
+ const char *err = acpi_format_exception(status);
+
+ pr_err("CRAT table error: %s\n", err);
+ return -EINVAL;
+ }
+
+ if (*size >= crat_table->length && crat_image != NULL)
+ memcpy(crat_image, crat_table, crat_table->length);
+
+ *size = crat_table->length;
+
+ return 0;
+}
+
+static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+{
+ BUG_ON(!dev);
+ BUG_ON(!cu);
+
+ dev->node_props.cpu_cores_count = cu->num_cpu_cores;
+ dev->node_props.cpu_core_id_base = cu->processor_id_low;
+ if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
+ dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
+
+ pr_info("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
+ cu->processor_id_low);
+}
+
+static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+{
+ BUG_ON(!dev);
+ BUG_ON(!cu);
+
+ dev->node_props.simd_id_base = cu->processor_id_low;
+ dev->node_props.simd_count = cu->num_simd_cores;
+ dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
+ dev->node_props.max_waves_per_simd = cu->max_waves_simd;
+ dev->node_props.wave_front_size = cu->wave_front_size;
+ dev->node_props.mem_banks_count = cu->num_banks;
+ dev->node_props.array_count = cu->num_arrays;
+ dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
+ dev->node_props.simd_per_cu = cu->num_simd_per_cu;
+ dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
+ if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
+ dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
+ pr_info("CU GPU: simds=%d id_base=%d\n", cu->num_simd_cores,
+ cu->processor_id_low);
+}
+
+/* kfd_parse_subtype_cu is called when the topology mutex is already acquired */
+static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
+{
+ struct kfd_topology_device *dev;
+ int i = 0;
+
+ BUG_ON(!cu);
+
+ pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
+ cu->proximity_domain, cu->hsa_capability);
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (cu->proximity_domain == i) {
+ if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
+ kfd_populated_cu_info_cpu(dev, cu);
+
+ if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
+ kfd_populated_cu_info_gpu(dev, cu);
+ break;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+/*
+ * kfd_parse_subtype_mem is called when the topology mutex is
+ * already acquired
+ */
+static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
+{
+ struct kfd_mem_properties *props;
+ struct kfd_topology_device *dev;
+ int i = 0;
+
+ BUG_ON(!mem);
+
+ pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
+ mem->promixity_domain);
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (mem->promixity_domain == i) {
+ props = kfd_alloc_struct(props);
+ if (props == NULL)
+ return -ENOMEM;
+
+ if (dev->node_props.cpu_cores_count == 0)
+ props->heap_type = HSA_MEM_HEAP_TYPE_FB_PRIVATE;
+ else
+ props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+
+ if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
+ props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+ if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
+ props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+
+ props->size_in_bytes =
+ ((uint64_t)mem->length_high << 32) +
+ mem->length_low;
+ props->width = mem->width;
+
+ dev->mem_bank_count++;
+ list_add_tail(&props->list, &dev->mem_props);
+
+ break;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+/*
+ * kfd_parse_subtype_cache is called when the topology mutex
+ * is already acquired
+ */
+static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
+{
+ struct kfd_cache_properties *props;
+ struct kfd_topology_device *dev;
+ uint32_t id;
+
+ BUG_ON(!cache);
+
+ id = cache->processor_id_low;
+
+ pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
+ list_for_each_entry(dev, &topology_device_list, list)
+ if (id == dev->node_props.cpu_core_id_base ||
+ id == dev->node_props.simd_id_base) {
+ props = kfd_alloc_struct(props);
+ if (props == NULL)
+ return -ENOMEM;
+
+ props->processor_id_low = id;
+ props->cache_level = cache->cache_level;
+ props->cache_size = cache->cache_size;
+ props->cacheline_size = cache->cache_line_size;
+ props->cachelines_per_tag = cache->lines_per_tag;
+ props->cache_assoc = cache->associativity;
+ props->cache_latency = cache->cache_latency;
+
+ if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_DATA;
+ if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
+ if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_CPU;
+ if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_HSACU;
+
+ dev->cache_count++;
+ dev->node_props.caches_count++;
+ list_add_tail(&props->list, &dev->cache_props);
+
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * kfd_parse_subtype_iolink is called when the topology mutex
+ * is already acquired
+ */
+static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
+{
+ struct kfd_iolink_properties *props;
+ struct kfd_topology_device *dev;
+ uint32_t i = 0;
+ uint32_t id_from;
+ uint32_t id_to;
+
+ BUG_ON(!iolink);
+
+ id_from = iolink->proximity_domain_from;
+ id_to = iolink->proximity_domain_to;
+
+ pr_info("Found IO link entry in CRAT table with id_from=%d\n", id_from);
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (id_from == i) {
+ props = kfd_alloc_struct(props);
+ if (props == NULL)
+ return -ENOMEM;
+
+ props->node_from = id_from;
+ props->node_to = id_to;
+ props->ver_maj = iolink->version_major;
+ props->ver_min = iolink->version_minor;
+
+ /*
+ * weight factor (derived from CDIR), currently always 1
+ */
+ props->weight = 1;
+
+ props->min_latency = iolink->minimum_latency;
+ props->max_latency = iolink->maximum_latency;
+ props->min_bandwidth = iolink->minimum_bandwidth_mbs;
+ props->max_bandwidth = iolink->maximum_bandwidth_mbs;
+ props->rec_transfer_size =
+ iolink->recommended_transfer_size;
+
+ dev->io_link_count++;
+ dev->node_props.io_links_count++;
+ list_add_tail(&props->list, &dev->io_link_props);
+
+ break;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
+{
+ struct crat_subtype_computeunit *cu;
+ struct crat_subtype_memory *mem;
+ struct crat_subtype_cache *cache;
+ struct crat_subtype_iolink *iolink;
+ int ret = 0;
+
+ BUG_ON(!sub_type_hdr);
+
+ switch (sub_type_hdr->type) {
+ case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
+ cu = (struct crat_subtype_computeunit *)sub_type_hdr;
+ ret = kfd_parse_subtype_cu(cu);
+ break;
+ case CRAT_SUBTYPE_MEMORY_AFFINITY:
+ mem = (struct crat_subtype_memory *)sub_type_hdr;
+ ret = kfd_parse_subtype_mem(mem);
+ break;
+ case CRAT_SUBTYPE_CACHE_AFFINITY:
+ cache = (struct crat_subtype_cache *)sub_type_hdr;
+ ret = kfd_parse_subtype_cache(cache);
+ break;
+ case CRAT_SUBTYPE_TLB_AFFINITY:
+ /*
+ * For now, nothing to do here
+ */
+ pr_info("Found TLB entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
+ /*
+ * For now, nothing to do here
+ */
+ pr_info("Found CCOMPUTE entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_IOLINK_AFFINITY:
+ iolink = (struct crat_subtype_iolink *)sub_type_hdr;
+ ret = kfd_parse_subtype_iolink(iolink);
+ break;
+ default:
+ pr_warn("Unknown subtype (%d) in CRAT\n",
+ sub_type_hdr->type);
+ }
+
+ return ret;
+}
+
+static void kfd_release_topology_device(struct kfd_topology_device *dev)
+{
+ struct kfd_mem_properties *mem;
+ struct kfd_cache_properties *cache;
+ struct kfd_iolink_properties *iolink;
+
+ BUG_ON(!dev);
+
+ list_del(&dev->list);
+
+ while (dev->mem_props.next != &dev->mem_props) {
+ mem = container_of(dev->mem_props.next,
+ struct kfd_mem_properties, list);
+ list_del(&mem->list);
+ kfree(mem);
+ }
+
+ while (dev->cache_props.next != &dev->cache_props) {
+ cache = container_of(dev->cache_props.next,
+ struct kfd_cache_properties, list);
+ list_del(&cache->list);
+ kfree(cache);
+ }
+
+ while (dev->io_link_props.next != &dev->io_link_props) {
+ iolink = container_of(dev->io_link_props.next,
+ struct kfd_iolink_properties, list);
+ list_del(&iolink->list);
+ kfree(iolink);
+ }
+
+ kfree(dev);
+
+ sys_props.num_devices--;
+}
+
+static void kfd_release_live_view(void)
+{
+ struct kfd_topology_device *dev;
+
+ while (topology_device_list.next != &topology_device_list) {
+ dev = container_of(topology_device_list.next,
+ struct kfd_topology_device, list);
+ kfd_release_topology_device(dev);
+}
+
+ memset(&sys_props, 0, sizeof(sys_props));
+}
+
+static struct kfd_topology_device *kfd_create_topology_device(void)
+{
+ struct kfd_topology_device *dev;
+
+ dev = kfd_alloc_struct(dev);
+ if (dev == NULL) {
+ pr_err("No memory to allocate a topology device");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&dev->mem_props);
+ INIT_LIST_HEAD(&dev->cache_props);
+ INIT_LIST_HEAD(&dev->io_link_props);
+
+ list_add_tail(&dev->list, &topology_device_list);
+ sys_props.num_devices++;
+
+ return dev;
+}
+
+static int kfd_parse_crat_table(void *crat_image)
+{
+ struct kfd_topology_device *top_dev;
+ struct crat_subtype_generic *sub_type_hdr;
+ uint16_t node_id;
+ int ret;
+ struct crat_header *crat_table = (struct crat_header *)crat_image;
+ uint16_t num_nodes;
+ uint32_t image_len;
+
+ if (!crat_image)
+ return -EINVAL;
+
+ num_nodes = crat_table->num_domains;
+ image_len = crat_table->length;
+
+ pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+
+ for (node_id = 0; node_id < num_nodes; node_id++) {
+ top_dev = kfd_create_topology_device();
+ if (!top_dev) {
+ kfd_release_live_view();
+ return -ENOMEM;
+ }
+ }
+
+ sys_props.platform_id =
+ (*((uint64_t *)crat_table->oem_id)) & CRAT_OEMID_64BIT_MASK;
+ sys_props.platform_oem = *((uint64_t *)crat_table->oem_table_id);
+ sys_props.platform_rev = crat_table->revision;
+
+ sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
+ while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
+ ((char *)crat_image) + image_len) {
+ if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
+ ret = kfd_parse_subtype(sub_type_hdr);
+ if (ret != 0) {
+ kfd_release_live_view();
+ return ret;
+ }
+ }
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+ }
+
+ sys_props.generation_count++;
+ topology_crat_parsed = 1;
+
+ return 0;
+}
+
+
+#define sysfs_show_gen_prop(buffer, fmt, ...) \
+ snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
+#define sysfs_show_32bit_prop(buffer, name, value) \
+ sysfs_show_gen_prop(buffer, "%s %u\n", name, value)
+#define sysfs_show_64bit_prop(buffer, name, value) \
+ sysfs_show_gen_prop(buffer, "%s %llu\n", name, value)
+#define sysfs_show_32bit_val(buffer, value) \
+ sysfs_show_gen_prop(buffer, "%u\n", value)
+#define sysfs_show_str_val(buffer, value) \
+ sysfs_show_gen_prop(buffer, "%s\n", value)
+
+static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ if (attr == &sys_props.attr_genid) {
+ ret = sysfs_show_32bit_val(buffer, sys_props.generation_count);
+ } else if (attr == &sys_props.attr_props) {
+ sysfs_show_64bit_prop(buffer, "platform_oem",
+ sys_props.platform_oem);
+ sysfs_show_64bit_prop(buffer, "platform_id",
+ sys_props.platform_id);
+ ret = sysfs_show_64bit_prop(buffer, "platform_rev",
+ sys_props.platform_rev);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops sysprops_ops = {
+ .show = sysprops_show,
+};
+
+static struct kobj_type sysprops_type = {
+ .sysfs_ops = &sysprops_ops,
+};
+
+static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+ struct kfd_iolink_properties *iolink;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ iolink = container_of(attr, struct kfd_iolink_properties, attr);
+ sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
+ sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
+ sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
+ sysfs_show_32bit_prop(buffer, "node_from", iolink->node_from);
+ sysfs_show_32bit_prop(buffer, "node_to", iolink->node_to);
+ sysfs_show_32bit_prop(buffer, "weight", iolink->weight);
+ sysfs_show_32bit_prop(buffer, "min_latency", iolink->min_latency);
+ sysfs_show_32bit_prop(buffer, "max_latency", iolink->max_latency);
+ sysfs_show_32bit_prop(buffer, "min_bandwidth", iolink->min_bandwidth);
+ sysfs_show_32bit_prop(buffer, "max_bandwidth", iolink->max_bandwidth);
+ sysfs_show_32bit_prop(buffer, "recommended_transfer_size",
+ iolink->rec_transfer_size);
+ ret = sysfs_show_32bit_prop(buffer, "flags", iolink->flags);
+
+ return ret;
+}
+
+static const struct sysfs_ops iolink_ops = {
+ .show = iolink_show,
+};
+
+static struct kobj_type iolink_type = {
+ .sysfs_ops = &iolink_ops,
+};
+
+static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+ struct kfd_mem_properties *mem;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ mem = container_of(attr, struct kfd_mem_properties, attr);
+ sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
+ sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
+ sysfs_show_32bit_prop(buffer, "flags", mem->flags);
+ sysfs_show_32bit_prop(buffer, "width", mem->width);
+ ret = sysfs_show_32bit_prop(buffer, "mem_clk_max", mem->mem_clk_max);
+
+ return ret;
+}
+
+static const struct sysfs_ops mem_ops = {
+ .show = mem_show,
+};
+
+static struct kobj_type mem_type = {
+ .sysfs_ops = &mem_ops,
+};
+
+static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+ uint32_t i;
+ struct kfd_cache_properties *cache;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ cache = container_of(attr, struct kfd_cache_properties, attr);
+ sysfs_show_32bit_prop(buffer, "processor_id_low",
+ cache->processor_id_low);
+ sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
+ sysfs_show_32bit_prop(buffer, "size", cache->cache_size);
+ sysfs_show_32bit_prop(buffer, "cache_line_size", cache->cacheline_size);
+ sysfs_show_32bit_prop(buffer, "cache_lines_per_tag",
+ cache->cachelines_per_tag);
+ sysfs_show_32bit_prop(buffer, "association", cache->cache_assoc);
+ sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency);
+ sysfs_show_32bit_prop(buffer, "type", cache->cache_type);
+ snprintf(buffer, PAGE_SIZE, "%ssibling_map ", buffer);
+ for (i = 0; i < KFD_TOPOLOGY_CPU_SIBLINGS; i++)
+ ret = snprintf(buffer, PAGE_SIZE, "%s%d%s",
+ buffer, cache->sibling_map[i],
+ (i == KFD_TOPOLOGY_CPU_SIBLINGS-1) ?
+ "\n" : ",");
+
+ return ret;
+}
+
+static const struct sysfs_ops cache_ops = {
+ .show = kfd_cache_show,
+};
+
+static struct kobj_type cache_type = {
+ .sysfs_ops = &cache_ops,
+};
+
+static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ ssize_t ret;
+ struct kfd_topology_device *dev;
+ char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
+ uint32_t i;
+
+ /* Making sure that the buffer is an empty string */
+ buffer[0] = 0;
+
+ if (strcmp(attr->name, "gpu_id") == 0) {
+ dev = container_of(attr, struct kfd_topology_device,
+ attr_gpuid);
+ ret = sysfs_show_32bit_val(buffer, dev->gpu_id);
+ } else if (strcmp(attr->name, "name") == 0) {
+ dev = container_of(attr, struct kfd_topology_device,
+ attr_name);
+ for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
+ public_name[i] =
+ (char)dev->node_props.marketing_name[i];
+ if (dev->node_props.marketing_name[i] == 0)
+ break;
+ }
+ public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
+ ret = sysfs_show_str_val(buffer, public_name);
+ } else {
+ dev = container_of(attr, struct kfd_topology_device,
+ attr_props);
+ sysfs_show_32bit_prop(buffer, "cpu_cores_count",
+ dev->node_props.cpu_cores_count);
+ sysfs_show_32bit_prop(buffer, "simd_count",
+ dev->node_props.simd_count);
+
+ if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
+ pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+ dev->node_props.mem_banks_count,
+ dev->mem_bank_count);
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->mem_bank_count);
+ } else {
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->node_props.mem_banks_count);
+ }
+
+ sysfs_show_32bit_prop(buffer, "caches_count",
+ dev->node_props.caches_count);
+ sysfs_show_32bit_prop(buffer, "io_links_count",
+ dev->node_props.io_links_count);
+ sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
+ dev->node_props.cpu_core_id_base);
+ sysfs_show_32bit_prop(buffer, "simd_id_base",
+ dev->node_props.simd_id_base);
+ sysfs_show_32bit_prop(buffer, "capability",
+ dev->node_props.capability);
+ sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
+ dev->node_props.max_waves_per_simd);
+ sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
+ dev->node_props.lds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
+ dev->node_props.gds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "wave_front_size",
+ dev->node_props.wave_front_size);
+ sysfs_show_32bit_prop(buffer, "array_count",
+ dev->node_props.array_count);
+ sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
+ dev->node_props.simd_arrays_per_engine);
+ sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
+ dev->node_props.cu_per_simd_array);
+ sysfs_show_32bit_prop(buffer, "simd_per_cu",
+ dev->node_props.simd_per_cu);
+ sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
+ dev->node_props.max_slots_scratch_cu);
+ sysfs_show_32bit_prop(buffer, "engine_id",
+ dev->node_props.engine_id);
+ sysfs_show_32bit_prop(buffer, "vendor_id",
+ dev->node_props.vendor_id);
+ sysfs_show_32bit_prop(buffer, "device_id",
+ dev->node_props.device_id);
+ sysfs_show_32bit_prop(buffer, "location_id",
+ dev->node_props.location_id);
+
+ if (dev->gpu) {
+ sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
+ kfd2kgd->get_max_engine_clock_in_mhz(
+ dev->gpu->kgd));
+ sysfs_show_64bit_prop(buffer, "local_mem_size",
+ kfd2kgd->get_vmem_size(dev->gpu->kgd));
+ }
+
+ ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
+ cpufreq_quick_get_max(0)/1000);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops node_ops = {
+ .show = node_show,
+};
+
+static struct kobj_type node_type = {
+ .sysfs_ops = &node_ops,
+};
+
+static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr)
+{
+ sysfs_remove_file(kobj, attr);
+ kobject_del(kobj);
+ kobject_put(kobj);
+}
+
+static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
+{
+ struct kfd_iolink_properties *iolink;
+ struct kfd_cache_properties *cache;
+ struct kfd_mem_properties *mem;
+
+ BUG_ON(!dev);
+
+ if (dev->kobj_iolink) {
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ if (iolink->kobj) {
+ kfd_remove_sysfs_file(iolink->kobj,
+ &iolink->attr);
+ iolink->kobj = NULL;
+ }
+ kobject_del(dev->kobj_iolink);
+ kobject_put(dev->kobj_iolink);
+ dev->kobj_iolink = NULL;
+ }
+
+ if (dev->kobj_cache) {
+ list_for_each_entry(cache, &dev->cache_props, list)
+ if (cache->kobj) {
+ kfd_remove_sysfs_file(cache->kobj,
+ &cache->attr);
+ cache->kobj = NULL;
+ }
+ kobject_del(dev->kobj_cache);
+ kobject_put(dev->kobj_cache);
+ dev->kobj_cache = NULL;
+ }
+
+ if (dev->kobj_mem) {
+ list_for_each_entry(mem, &dev->mem_props, list)
+ if (mem->kobj) {
+ kfd_remove_sysfs_file(mem->kobj, &mem->attr);
+ mem->kobj = NULL;
+ }
+ kobject_del(dev->kobj_mem);
+ kobject_put(dev->kobj_mem);
+ dev->kobj_mem = NULL;
+ }
+
+ if (dev->kobj_node) {
+ sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
+ sysfs_remove_file(dev->kobj_node, &dev->attr_name);
+ sysfs_remove_file(dev->kobj_node, &dev->attr_props);
+ kobject_del(dev->kobj_node);
+ kobject_put(dev->kobj_node);
+ dev->kobj_node = NULL;
+ }
+}
+
+static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ uint32_t id)
+{
+ struct kfd_iolink_properties *iolink;
+ struct kfd_cache_properties *cache;
+ struct kfd_mem_properties *mem;
+ int ret;
+ uint32_t i;
+
+ BUG_ON(!dev);
+
+ /*
+ * Creating the sysfs folders
+ */
+ BUG_ON(dev->kobj_node);
+ dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
+ if (!dev->kobj_node)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(dev->kobj_node, &node_type,
+ sys_props.kobj_nodes, "%d", id);
+ if (ret < 0)
+ return ret;
+
+ dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
+ if (!dev->kobj_mem)
+ return -ENOMEM;
+
+ dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node);
+ if (!dev->kobj_cache)
+ return -ENOMEM;
+
+ dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node);
+ if (!dev->kobj_iolink)
+ return -ENOMEM;
+
+ /*
+ * Creating sysfs files for node properties
+ */
+ dev->attr_gpuid.name = "gpu_id";
+ dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&dev->attr_gpuid);
+ dev->attr_name.name = "name";
+ dev->attr_name.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&dev->attr_name);
+ dev->attr_props.name = "properties";
+ dev->attr_props.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&dev->attr_props);
+ ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid);
+ if (ret < 0)
+ return ret;
+ ret = sysfs_create_file(dev->kobj_node, &dev->attr_name);
+ if (ret < 0)
+ return ret;
+ ret = sysfs_create_file(dev->kobj_node, &dev->attr_props);
+ if (ret < 0)
+ return ret;
+
+ i = 0;
+ list_for_each_entry(mem, &dev->mem_props, list) {
+ mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (!mem->kobj)
+ return -ENOMEM;
+ ret = kobject_init_and_add(mem->kobj, &mem_type,
+ dev->kobj_mem, "%d", i);
+ if (ret < 0)
+ return ret;
+
+ mem->attr.name = "properties";
+ mem->attr.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&mem->attr);
+ ret = sysfs_create_file(mem->kobj, &mem->attr);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+
+ i = 0;
+ list_for_each_entry(cache, &dev->cache_props, list) {
+ cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (!cache->kobj)
+ return -ENOMEM;
+ ret = kobject_init_and_add(cache->kobj, &cache_type,
+ dev->kobj_cache, "%d", i);
+ if (ret < 0)
+ return ret;
+
+ cache->attr.name = "properties";
+ cache->attr.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&cache->attr);
+ ret = sysfs_create_file(cache->kobj, &cache->attr);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+
+ i = 0;
+ list_for_each_entry(iolink, &dev->io_link_props, list) {
+ iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (!iolink->kobj)
+ return -ENOMEM;
+ ret = kobject_init_and_add(iolink->kobj, &iolink_type,
+ dev->kobj_iolink, "%d", i);
+ if (ret < 0)
+ return ret;
+
+ iolink->attr.name = "properties";
+ iolink->attr.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&iolink->attr);
+ ret = sysfs_create_file(iolink->kobj, &iolink->attr);
+ if (ret < 0)
+ return ret;
+ i++;
+}
+
+ return 0;
+}
+
+static int kfd_build_sysfs_node_tree(void)
+{
+ struct kfd_topology_device *dev;
+ int ret;
+ uint32_t i = 0;
+
+ list_for_each_entry(dev, &topology_device_list, list) {
+ ret = kfd_build_sysfs_node_entry(dev, 0);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+
+ return 0;
+}
+
+static void kfd_remove_sysfs_node_tree(void)
+{
+ struct kfd_topology_device *dev;
+
+ list_for_each_entry(dev, &topology_device_list, list)
+ kfd_remove_sysfs_node_entry(dev);
+}
+
+static int kfd_topology_update_sysfs(void)
+{
+ int ret;
+
+ pr_info("Creating topology SYSFS entries\n");
+ if (sys_props.kobj_topology == NULL) {
+ sys_props.kobj_topology =
+ kfd_alloc_struct(sys_props.kobj_topology);
+ if (!sys_props.kobj_topology)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(sys_props.kobj_topology,
+ &sysprops_type, &kfd_device->kobj,
+ "topology");
+ if (ret < 0)
+ return ret;
+
+ sys_props.kobj_nodes = kobject_create_and_add("nodes",
+ sys_props.kobj_topology);
+ if (!sys_props.kobj_nodes)
+ return -ENOMEM;
+
+ sys_props.attr_genid.name = "generation_id";
+ sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&sys_props.attr_genid);
+ ret = sysfs_create_file(sys_props.kobj_topology,
+ &sys_props.attr_genid);
+ if (ret < 0)
+ return ret;
+
+ sys_props.attr_props.name = "system_properties";
+ sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&sys_props.attr_props);
+ ret = sysfs_create_file(sys_props.kobj_topology,
+ &sys_props.attr_props);
+ if (ret < 0)
+ return ret;
+ }
+
+ kfd_remove_sysfs_node_tree();
+
+ return kfd_build_sysfs_node_tree();
+}
+
+static void kfd_topology_release_sysfs(void)
+{
+ kfd_remove_sysfs_node_tree();
+ if (sys_props.kobj_topology) {
+ sysfs_remove_file(sys_props.kobj_topology,
+ &sys_props.attr_genid);
+ sysfs_remove_file(sys_props.kobj_topology,
+ &sys_props.attr_props);
+ if (sys_props.kobj_nodes) {
+ kobject_del(sys_props.kobj_nodes);
+ kobject_put(sys_props.kobj_nodes);
+ sys_props.kobj_nodes = NULL;
+ }
+ kobject_del(sys_props.kobj_topology);
+ kobject_put(sys_props.kobj_topology);
+ sys_props.kobj_topology = NULL;
+ }
+}
+
+int kfd_topology_init(void)
+{
+ void *crat_image = NULL;
+ size_t image_size = 0;
+ int ret;
+
+ /*
+ * Initialize the head for the topology device list
+ */
+ INIT_LIST_HEAD(&topology_device_list);
+ init_rwsem(&topology_lock);
+ topology_crat_parsed = 0;
+
+ memset(&sys_props, 0, sizeof(sys_props));
+
+ /*
+ * Get the CRAT image from the ACPI
+ */
+ ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
+ if (ret == 0 && image_size > 0) {
+ pr_info("Found CRAT image with size=%zd\n", image_size);
+ crat_image = kmalloc(image_size, GFP_KERNEL);
+ if (!crat_image) {
+ ret = -ENOMEM;
+ pr_err("No memory for allocating CRAT image\n");
+ goto err;
+ }
+ ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
+
+ if (ret == 0) {
+ down_write(&topology_lock);
+ ret = kfd_parse_crat_table(crat_image);
+ if (ret == 0)
+ ret = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+ } else {
+ pr_err("Couldn't get CRAT table size from ACPI\n");
+ }
+ kfree(crat_image);
+ } else if (ret == -ENODATA) {
+ ret = 0;
+ } else {
+ pr_err("Couldn't get CRAT table size from ACPI\n");
+ }
+
+err:
+ pr_info("Finished initializing topology ret=%d\n", ret);
+ return ret;
+}
+
+void kfd_topology_shutdown(void)
+{
+ kfd_topology_release_sysfs();
+ kfd_release_live_view();
+}
+
+static void kfd_debug_print_topology(void)
+{
+ struct kfd_topology_device *dev;
+ uint32_t i = 0;
+
+ pr_info("DEBUG PRINT OF TOPOLOGY:");
+ list_for_each_entry(dev, &topology_device_list, list) {
+ pr_info("Node: %d\n", i);
+ pr_info("\tGPU assigned: %s\n", (dev->gpu ? "yes" : "no"));
+ pr_info("\tCPU count: %d\n", dev->node_props.cpu_cores_count);
+ pr_info("\tSIMD count: %d", dev->node_props.simd_count);
+ i++;
+ }
+}
+
+static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
+{
+ uint32_t hashout;
+ uint32_t buf[7];
+ int i;
+
+ if (!gpu)
+ return 0;
+
+ buf[0] = gpu->pdev->devfn;
+ buf[1] = gpu->pdev->subsystem_vendor;
+ buf[2] = gpu->pdev->subsystem_device;
+ buf[3] = gpu->pdev->device;
+ buf[4] = gpu->pdev->bus->number;
+ buf[5] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) & 0xffffffff);
+ buf[6] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+
+ for (i = 0, hashout = 0; i < 7; i++)
+ hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
+
+ return hashout;
+}
+
+static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+{
+ struct kfd_topology_device *dev;
+ struct kfd_topology_device *out_dev = NULL;
+
+ BUG_ON(!gpu);
+
+ list_for_each_entry(dev, &topology_device_list, list)
+ if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
+ dev->gpu = gpu;
+ out_dev = dev;
+ break;
+ }
+
+ return out_dev;
+}
+
+static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
+{
+ /*
+ * TODO: Generate an event for thunk about the arrival/removal
+ * of the GPU
+ */
+}
+
+int kfd_topology_add_device(struct kfd_dev *gpu)
+{
+ uint32_t gpu_id;
+ struct kfd_topology_device *dev;
+ int res;
+
+ BUG_ON(!gpu);
+
+ gpu_id = kfd_generate_gpu_id(gpu);
+
+ pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+
+ down_write(&topology_lock);
+ /*
+ * Try to assign the GPU to existing topology device (generated from
+ * CRAT table
+ */
+ dev = kfd_assign_gpu(gpu);
+ if (!dev) {
+ pr_info("GPU was not found in the current topology. Extending.\n");
+ kfd_debug_print_topology();
+ dev = kfd_create_topology_device();
+ if (!dev) {
+ res = -ENOMEM;
+ goto err;
+ }
+ dev->gpu = gpu;
+
+ /*
+ * TODO: Make a call to retrieve topology information from the
+ * GPU vBIOS
+ */
+
+ /*
+ * Update the SYSFS tree, since we added another topology device
+ */
+ if (kfd_topology_update_sysfs() < 0)
+ kfd_topology_release_sysfs();
+
+ }
+
+ dev->gpu_id = gpu_id;
+ gpu->id = gpu_id;
+ dev->node_props.vendor_id = gpu->pdev->vendor;
+ dev->node_props.device_id = gpu->pdev->device;
+ dev->node_props.location_id = (gpu->pdev->bus->number << 24) +
+ (gpu->pdev->devfn & 0xffffff);
+ /*
+ * TODO: Retrieve max engine clock values from KGD
+ */
+
+ res = 0;
+
+err:
+ up_write(&topology_lock);
+
+ if (res == 0)
+ kfd_notify_gpu_change(gpu_id, 1);
+
+ return res;
+}
+
+int kfd_topology_remove_device(struct kfd_dev *gpu)
+{
+ struct kfd_topology_device *dev;
+ uint32_t gpu_id;
+ int res = -ENODEV;
+
+ BUG_ON(!gpu);
+
+ down_write(&topology_lock);
+
+ list_for_each_entry(dev, &topology_device_list, list)
+ if (dev->gpu == gpu) {
+ gpu_id = dev->gpu_id;
+ kfd_remove_sysfs_node_entry(dev);
+ kfd_release_topology_device(dev);
+ res = 0;
+ if (kfd_topology_update_sysfs() < 0)
+ kfd_topology_release_sysfs();
+ break;
+ }
+
+ up_write(&topology_lock);
+
+ if (res == 0)
+ kfd_notify_gpu_change(gpu_id, 0);
+
+ return res;
+}
+
+/*
+ * When idx is out of bounds, the function will return NULL
+ */
+struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx)
+{
+
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+ uint8_t device_idx = 0;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list) {
+ if (device_idx == idx) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ device_idx++;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
new file mode 100644
index 000000000000..989624b3cd14
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __KFD_TOPOLOGY_H__
+#define __KFD_TOPOLOGY_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include "kfd_priv.h"
+
+#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
+
+#define HSA_CAP_HOT_PLUGGABLE 0x00000001
+#define HSA_CAP_ATS_PRESENT 0x00000002
+#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004
+#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008
+#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010
+#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020
+#define HSA_CAP_VA_LIMIT 0x00000040
+#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
+#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
+#define HSA_CAP_RESERVED 0xfffff000
+
+struct kfd_node_properties {
+ uint32_t cpu_cores_count;
+ uint32_t simd_count;
+ uint32_t mem_banks_count;
+ uint32_t caches_count;
+ uint32_t io_links_count;
+ uint32_t cpu_core_id_base;
+ uint32_t simd_id_base;
+ uint32_t capability;
+ uint32_t max_waves_per_simd;
+ uint32_t lds_size_in_kb;
+ uint32_t gds_size_in_kb;
+ uint32_t wave_front_size;
+ uint32_t array_count;
+ uint32_t simd_arrays_per_engine;
+ uint32_t cu_per_simd_array;
+ uint32_t simd_per_cu;
+ uint32_t max_slots_scratch_cu;
+ uint32_t engine_id;
+ uint32_t vendor_id;
+ uint32_t device_id;
+ uint32_t location_id;
+ uint32_t max_engine_clk_fcompute;
+ uint32_t max_engine_clk_ccompute;
+ uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
+};
+
+#define HSA_MEM_HEAP_TYPE_SYSTEM 0
+#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
+#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2
+#define HSA_MEM_HEAP_TYPE_GPU_GDS 3
+#define HSA_MEM_HEAP_TYPE_GPU_LDS 4
+#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5
+
+#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
+#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
+#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
+
+struct kfd_mem_properties {
+ struct list_head list;
+ uint32_t heap_type;
+ uint64_t size_in_bytes;
+ uint32_t flags;
+ uint32_t width;
+ uint32_t mem_clk_max;
+ struct kobject *kobj;
+ struct attribute attr;
+};
+
+#define KFD_TOPOLOGY_CPU_SIBLINGS 256
+
+#define HSA_CACHE_TYPE_DATA 0x00000001
+#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
+#define HSA_CACHE_TYPE_CPU 0x00000004
+#define HSA_CACHE_TYPE_HSACU 0x00000008
+#define HSA_CACHE_TYPE_RESERVED 0xfffffff0
+
+struct kfd_cache_properties {
+ struct list_head list;
+ uint32_t processor_id_low;
+ uint32_t cache_level;
+ uint32_t cache_size;
+ uint32_t cacheline_size;
+ uint32_t cachelines_per_tag;
+ uint32_t cache_assoc;
+ uint32_t cache_latency;
+ uint32_t cache_type;
+ uint8_t sibling_map[KFD_TOPOLOGY_CPU_SIBLINGS];
+ struct kobject *kobj;
+ struct attribute attr;
+};
+
+struct kfd_iolink_properties {
+ struct list_head list;
+ uint32_t iolink_type;
+ uint32_t ver_maj;
+ uint32_t ver_min;
+ uint32_t node_from;
+ uint32_t node_to;
+ uint32_t weight;
+ uint32_t min_latency;
+ uint32_t max_latency;
+ uint32_t min_bandwidth;
+ uint32_t max_bandwidth;
+ uint32_t rec_transfer_size;
+ uint32_t flags;
+ struct kobject *kobj;
+ struct attribute attr;
+};
+
+struct kfd_topology_device {
+ struct list_head list;
+ uint32_t gpu_id;
+ struct kfd_node_properties node_props;
+ uint32_t mem_bank_count;
+ struct list_head mem_props;
+ uint32_t cache_count;
+ struct list_head cache_props;
+ uint32_t io_link_count;
+ struct list_head io_link_props;
+ struct kfd_dev *gpu;
+ struct kobject *kobj_node;
+ struct kobject *kobj_mem;
+ struct kobject *kobj_cache;
+ struct kobject *kobj_iolink;
+ struct attribute attr_gpuid;
+ struct attribute attr_name;
+ struct attribute attr_props;
+};
+
+struct kfd_system_properties {
+ uint32_t num_devices; /* Number of H-NUMA nodes */
+ uint32_t generation_count;
+ uint64_t platform_oem;
+ uint64_t platform_id;
+ uint64_t platform_rev;
+ struct kobject *kobj_topology;
+ struct kobject *kobj_nodes;
+ struct attribute attr_genid;
+ struct attribute attr_props;
+};
+
+
+
+#endif /* __KFD_TOPOLOGY_H__ */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
new file mode 100644
index 000000000000..9c729dd8dd50
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file defines the private interface between the
+ * AMD kernel graphics drivers and the AMD KFD.
+ */
+
+#ifndef KGD_KFD_INTERFACE_H_INCLUDED
+#define KGD_KFD_INTERFACE_H_INCLUDED
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+#define KFD_INTERFACE_VERSION 1
+
+struct kfd_dev;
+struct kgd_dev;
+
+struct kgd_mem;
+
+enum kgd_memory_pool {
+ KGD_POOL_SYSTEM_CACHEABLE = 1,
+ KGD_POOL_SYSTEM_WRITECOMBINE = 2,
+ KGD_POOL_FRAMEBUFFER = 3,
+};
+
+struct kgd2kfd_shared_resources {
+ /* Bit n == 1 means VMID n is available for KFD. */
+ unsigned int compute_vmid_bitmap;
+
+ /* Compute pipes are counted starting from MEC0/pipe0 as 0. */
+ unsigned int first_compute_pipe;
+
+ /* Number of MEC pipes available for KFD. */
+ unsigned int compute_pipe_count;
+
+ /* Base address of doorbell aperture. */
+ phys_addr_t doorbell_physical_address;
+
+ /* Size in bytes of doorbell aperture. */
+ size_t doorbell_aperture_size;
+
+ /* Number of bytes at start of aperture reserved for KGD. */
+ size_t doorbell_start_offset;
+};
+
+/**
+ * struct kgd2kfd_calls
+ *
+ * @exit: Notifies amdkfd that kgd module is unloaded
+ *
+ * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
+ *
+ * @device_init: Initialize the newly probed device (if it is a device that
+ * amdkfd supports)
+ *
+ * @device_exit: Notifies amdkfd about a removal of a kgd device
+ *
+ * @suspend: Notifies amdkfd about a suspend action done to a kgd device
+ *
+ * @resume: Notifies amdkfd about a resume action done to a kgd device
+ *
+ * This structure contains function callback pointers so the kgd driver
+ * will notify to the amdkfd about certain status changes.
+ *
+ */
+struct kgd2kfd_calls {
+ void (*exit)(void);
+ struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev);
+ bool (*device_init)(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources);
+ void (*device_exit)(struct kfd_dev *kfd);
+ void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
+ void (*suspend)(struct kfd_dev *kfd);
+ int (*resume)(struct kfd_dev *kfd);
+};
+
+/**
+ * struct kfd2kgd_calls
+ *
+ * @init_sa_manager: Initialize an instance of the sa manager, used by
+ * amdkfd for all system memory allocations that are mapped to the GART
+ * address space
+ *
+ * @fini_sa_manager: Releases all memory allocations for amdkfd that are
+ * handled by kgd sa manager
+ *
+ * @allocate_mem: Allocate a buffer from amdkfd's sa manager. The buffer can
+ * be used for mqds, hpds, kernel queue, fence and runlists
+ *
+ * @free_mem: Frees a buffer that was allocated by amdkfd's sa manager
+ *
+ * @get_vmem_size: Retrieves (physical) size of VRAM
+ *
+ * @get_gpu_clock_counter: Retrieves GPU clock counter
+ *
+ * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
+ *
+ * @program_sh_mem_settings: A function that should initiate the memory
+ * properties such as main aperture memory type (cache / non cached) and
+ * secondary aperture base address, size and memory type.
+ * This function is used only for no cp scheduling mode.
+ *
+ * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
+ * scheduling mode. Only used for no cp scheduling mode.
+ *
+ * @init_memory: Initializes memory apertures to fixed base/limit address
+ * and non cached memory types.
+ *
+ * @init_pipeline: Initialized the compute pipelines.
+ *
+ * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
+ * sceduling mode.
+ *
+ * @hqd_is_occupies: Checks if a hqd slot is occupied.
+ *
+ * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
+ *
+ * This structure contains function pointers to services that the kgd driver
+ * provides to amdkfd driver.
+ *
+ */
+struct kfd2kgd_calls {
+ /* Memory management. */
+ int (*init_sa_manager)(struct kgd_dev *kgd, unsigned int size);
+ void (*fini_sa_manager)(struct kgd_dev *kgd);
+ int (*allocate_mem)(struct kgd_dev *kgd, size_t size, size_t alignment,
+ enum kgd_memory_pool pool, struct kgd_mem **mem);
+
+ void (*free_mem)(struct kgd_dev *kgd, struct kgd_mem *mem);
+
+ uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
+ uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
+
+ uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
+
+ /* Register access functions */
+ void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+ int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+
+ int (*init_memory)(struct kgd_dev *kgd);
+ int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr);
+
+ int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr);
+
+ bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+
+ int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+};
+
+bool kgd2kfd_init(unsigned interface_version,
+ const struct kfd2kgd_calls *f2g,
+ const struct kgd2kfd_calls **g2f);
+
+#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index e4a1490b42c2..e3a7a5078e5c 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 908e5316eac4..b01420c84864 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -486,7 +486,6 @@ static struct platform_driver armada_drm_platform_driver = {
.remove = armada_drm_remove,
.driver = {
.name = "armada-drm",
- .owner = THIS_MODULE,
},
.id_table = armada_drm_platform_ids,
};
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9dc0fd5c1ea4..b7ee2634e47c 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -31,6 +31,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "ast_drv.h"
#include "ast_tables.h"
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index fe95d31cd110..61dbf09dff5d 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -9,6 +9,17 @@
/* ---------------------------------------------------------------------- */
+static int bochsfb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct bochs_device *bochs =
+ container_of(fb_helper, struct bochs_device, fb.helper);
+ struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj);
+
+ return ttm_fbdev_mmap(vma, &bo->bo);
+}
+
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -19,6 +30,7 @@ static struct fb_ops bochsfb_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_mmap = bochsfb_mmap,
};
static int bochsfb_create_object(struct bochs_device *bochs,
@@ -123,11 +135,9 @@ static int bochsfb_create(struct drm_fb_helper *helper,
info->screen_base = bo->kmap.virtual;
info->screen_size = size;
-#if 0
- /* FIXME: get this right for mmap(/dev/fb0) */
- info->fix.smem_start = bochs_bo_mmap_offset(bo);
+ drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
+ info->fix.smem_start = 0;
info->fix.smem_len = size;
-#endif
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index dbe619e6aab4..460389702d31 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -51,11 +51,10 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
{
struct bochs_device *bochs = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
- unsigned long addr, size, mem, ioaddr, iosize;
+ unsigned long addr, size, mem, ioaddr, iosize, qext_size;
u16 id;
- if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */
- (pdev->resource[2].flags & IORESOURCE_MEM)) {
+ if (pdev->resource[2].flags & IORESOURCE_MEM) {
/* mmio bar with vga and bochs registers present */
if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
DRM_ERROR("Cannot request mmio region\n");
@@ -116,6 +115,24 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
size / 1024, addr,
bochs->ioports ? "ioports" : "mmio",
ioaddr);
+
+ if (bochs->mmio && pdev->revision >= 2) {
+ qext_size = readl(bochs->mmio + 0x600);
+ if (qext_size < 4 || qext_size > iosize)
+ goto noext;
+ DRM_DEBUG("Found qemu ext regs, size %ld\n", qext_size);
+ if (qext_size >= 8) {
+#ifdef __BIG_ENDIAN
+ writel(0xbebebebe, bochs->mmio + 0x604);
+#else
+ writel(0x1e1e1e1e, bochs->mmio + 0x604);
+#endif
+ DRM_DEBUG(" qext endian: 0x%x\n",
+ readl(bochs->mmio + 0x604));
+ }
+ }
+
+noext:
return 0;
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 6b7efcf363d6..85f0f8cf1fb8 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -6,6 +6,7 @@
*/
#include "bochs.h"
+#include <drm/drm_plane_helper.h>
static int defx = 1024;
static int defy = 768;
@@ -108,11 +109,32 @@ static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
{
}
+static int bochs_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct bochs_device *bochs =
+ container_of(crtc, struct bochs_device, crtc);
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ unsigned long irqflags;
+
+ crtc->primary->fb = fb;
+ bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
+ if (event) {
+ spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
+ drm_send_vblank_event(bochs->dev, -1, event);
+ spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
+ }
+ return 0;
+}
+
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs bochs_crtc_funcs = {
.gamma_set = bochs_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = drm_crtc_cleanup,
+ .page_flip = bochs_crtc_page_flip,
};
static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index d44e69daa239..693a4565c4ff 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -210,6 +210,9 @@ int cirrus_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
+bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
+ int bpp, int pitch);
+
/* cirrus_display.c */
int cirrus_modeset_init(struct cirrus_device *cdev);
void cirrus_modeset_fini(struct cirrus_device *cdev);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index d231b1c317af..502a89eb54b5 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -139,6 +139,7 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = afbdev->helper.dev;
+ struct cirrus_device *cdev = dev->dev_private;
u32 bpp, depth;
u32 size;
struct drm_gem_object *gobj;
@@ -146,8 +147,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
int ret = 0;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
- if (bpp > 24)
+ if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
+ bpp, mode_cmd->pitches[0]))
return -EINVAL;
+
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = cirrus_gem_create(dev, size, true, &gobj);
if (ret)
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 99c1983f99d2..4c2d68e9102d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -49,14 +49,16 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
int ret;
u32 bpp, depth;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
- /* cirrus can't handle > 24bpp framebuffers at all */
- if (bpp > 24)
+
+ if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
+ bpp, mode_cmd->pitches[0]))
return ERR_PTR(-EINVAL);
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
@@ -96,8 +98,7 @@ static int cirrus_vram_init(struct cirrus_device *cdev)
{
/* BAR 0 is VRAM */
cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
- /* We have 4MB of VRAM */
- cdev->mc.vram_size = 4 * 1024 * 1024;
+ cdev->mc.vram_size = pci_resource_len(cdev->dev->pdev, 0);
if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
"cirrusdrmfb_vram")) {
@@ -179,17 +180,22 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
}
r = cirrus_mm_init(cdev);
- if (r)
+ if (r) {
dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+ goto out;
+ }
r = cirrus_modeset_init(cdev);
- if (r)
+ if (r) {
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+ goto out;
+ }
dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+
+ return 0;
out:
- if (r)
- cirrus_driver_unload(dev);
+ cirrus_driver_unload(dev);
return r;
}
@@ -307,3 +313,21 @@ out_unlock:
return ret;
}
+
+bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
+ int bpp, int pitch)
+{
+ const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
+ const int max_size = cdev->mc.vram_size;
+
+ if (bpp > 32)
+ return false;
+
+ if (pitch > max_pitch)
+ return false;
+
+ if (pitch * height > max_size)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index c7c5a9d91fa0..99d4a74ffeaf 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -16,6 +16,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include <video/cirrus.h>
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
new file mode 100644
index 000000000000..ff5f034cc405
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+
+static void kfree_state(struct drm_atomic_state *state)
+{
+ kfree(state->connectors);
+ kfree(state->connector_states);
+ kfree(state->crtcs);
+ kfree(state->crtc_states);
+ kfree(state->planes);
+ kfree(state->plane_states);
+ kfree(state);
+}
+
+/**
+ * drm_atomic_state_alloc - allocate atomic state
+ * @dev: DRM device
+ *
+ * This allocates an empty atomic state to track updates.
+ */
+struct drm_atomic_state *
+drm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct drm_atomic_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
+
+ state->crtcs = kcalloc(dev->mode_config.num_crtc,
+ sizeof(*state->crtcs), GFP_KERNEL);
+ if (!state->crtcs)
+ goto fail;
+ state->crtc_states = kcalloc(dev->mode_config.num_crtc,
+ sizeof(*state->crtc_states), GFP_KERNEL);
+ if (!state->crtc_states)
+ goto fail;
+ state->planes = kcalloc(dev->mode_config.num_total_plane,
+ sizeof(*state->planes), GFP_KERNEL);
+ if (!state->planes)
+ goto fail;
+ state->plane_states = kcalloc(dev->mode_config.num_total_plane,
+ sizeof(*state->plane_states), GFP_KERNEL);
+ if (!state->plane_states)
+ goto fail;
+ state->connectors = kcalloc(state->num_connector,
+ sizeof(*state->connectors),
+ GFP_KERNEL);
+ if (!state->connectors)
+ goto fail;
+ state->connector_states = kcalloc(state->num_connector,
+ sizeof(*state->connector_states),
+ GFP_KERNEL);
+ if (!state->connector_states)
+ goto fail;
+
+ state->dev = dev;
+
+ DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
+
+ return state;
+fail:
+ kfree_state(state);
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_state_alloc);
+
+/**
+ * drm_atomic_state_clear - clear state object
+ * @state: atomic state
+ *
+ * When the w/w mutex algorithm detects a deadlock we need to back off and drop
+ * all locks. So someone else could sneak in and change the current modeset
+ * configuration. Which means that all the state assembled in @state is no
+ * longer an atomic update to the current state, but to some arbitrary earlier
+ * state. Which could break assumptions the driver's ->atomic_check likely
+ * relies on.
+ *
+ * Hence we must clear all cached state and completely start over, using this
+ * function.
+ */
+void drm_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ int i;
+
+ DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
+
+ for (i = 0; i < state->num_connector; i++) {
+ struct drm_connector *connector = state->connectors[i];
+
+ if (!connector)
+ continue;
+
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+ connector->funcs->atomic_destroy_state(connector,
+ state->connector_states[i]);
+ }
+
+ for (i = 0; i < config->num_crtc; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ crtc->funcs->atomic_destroy_state(crtc,
+ state->crtc_states[i]);
+ }
+
+ for (i = 0; i < config->num_total_plane; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ plane->funcs->atomic_destroy_state(plane,
+ state->plane_states[i]);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_state_clear);
+
+/**
+ * drm_atomic_state_free - free all memory for an atomic state
+ * @state: atomic state to deallocate
+ *
+ * This frees all memory associated with an atomic state, including all the
+ * per-object state for planes, crtcs and connectors.
+ */
+void drm_atomic_state_free(struct drm_atomic_state *state)
+{
+ drm_atomic_state_clear(state);
+
+ DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
+
+ kfree_state(state);
+}
+EXPORT_SYMBOL(drm_atomic_state_free);
+
+/**
+ * drm_atomic_get_crtc_state - get crtc state
+ * @state: global atomic state object
+ * @crtc: crtc to get state object for
+ *
+ * This function returns the crtc state for the given crtc, allocating it if
+ * needed. It will also grab the relevant crtc lock to make sure that the state
+ * is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_crtc_state *
+drm_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ int ret, index;
+ struct drm_crtc_state *crtc_state;
+
+ index = drm_crtc_index(crtc);
+
+ if (state->crtc_states[index])
+ return state->crtc_states[index];
+
+ ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
+ if (!crtc_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->crtc_states[index] = crtc_state;
+ state->crtcs[index] = crtc;
+ crtc_state->state = state;
+
+ DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
+ crtc->base.id, crtc_state, state);
+
+ return crtc_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_crtc_state);
+
+/**
+ * drm_atomic_get_plane_state - get plane state
+ * @state: global atomic state object
+ * @plane: plane to get state object for
+ *
+ * This function returns the plane state for the given plane, allocating it if
+ * needed. It will also grab the relevant plane lock to make sure that the state
+ * is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_plane_state *
+drm_atomic_get_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ int ret, index;
+ struct drm_plane_state *plane_state;
+
+ index = drm_plane_index(plane);
+
+ if (state->plane_states[index])
+ return state->plane_states[index];
+
+ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ if (!plane_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->plane_states[index] = plane_state;
+ state->planes[index] = plane;
+ plane_state->state = state;
+
+ DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
+ plane->base.id, plane_state, state);
+
+ if (plane_state->crtc) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state,
+ plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return ERR_CAST(crtc_state);
+ }
+
+ return plane_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_plane_state);
+
+/**
+ * drm_atomic_get_connector_state - get connector state
+ * @state: global atomic state object
+ * @connector: connector to get state object for
+ *
+ * This function returns the connector state for the given connector,
+ * allocating it if needed. It will also grab the relevant connector lock to
+ * make sure that the state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_connector_state *
+drm_atomic_get_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ int ret, index;
+ struct drm_mode_config *config = &connector->dev->mode_config;
+ struct drm_connector_state *connector_state;
+
+ ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ index = drm_connector_index(connector);
+
+ /*
+ * Construction of atomic state updates can race with a connector
+ * hot-add which might overflow. In this case flip the table and just
+ * restart the entire ioctl - no one is fast enough to livelock a cpu
+ * with physical hotplug events anyway.
+ *
+ * Note that we only grab the indexes once we have the right lock to
+ * prevent hotplug/unplugging of connectors. So removal is no problem,
+ * at most the array is a bit too large.
+ */
+ if (index >= state->num_connector) {
+ DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
+ return ERR_PTR(-EAGAIN);
+ }
+
+ if (state->connector_states[index])
+ return state->connector_states[index];
+
+ connector_state = connector->funcs->atomic_duplicate_state(connector);
+ if (!connector_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->connector_states[index] = connector_state;
+ state->connectors[index] = connector;
+ connector_state->state = state;
+
+ DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
+ connector->base.id, connector_state, state);
+
+ if (connector_state->crtc) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state,
+ connector_state->crtc);
+ if (IS_ERR(crtc_state))
+ return ERR_CAST(crtc_state);
+ }
+
+ return connector_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_state);
+
+/**
+ * drm_atomic_set_crtc_for_plane - set crtc for plane
+ * @state: the incoming atomic state
+ * @plane: the plane whose incoming state to update
+ * @crtc: crtc to use for the plane
+ *
+ * Changing the assigned crtc for a plane requires us to grab the lock and state
+ * for the new crtc, as needed. This function takes care of all these details
+ * besides updating the pointer in the state object itself.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane, struct drm_crtc *crtc)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+
+ if (WARN_ON(IS_ERR(plane_state)))
+ return PTR_ERR(plane_state);
+
+ if (plane_state->crtc) {
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state,
+ plane_state->crtc);
+ if (WARN_ON(IS_ERR(crtc_state)))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
+ }
+
+ plane_state->crtc = crtc;
+
+ if (crtc) {
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state,
+ crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ crtc_state->plane_mask |= (1 << drm_plane_index(plane));
+ }
+
+ if (crtc)
+ DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
+ plane_state, crtc->base.id);
+ else
+ DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
+
+/**
+ * drm_atomic_set_fb_for_plane - set crtc for plane
+ * @plane_state: atomic state object for the plane
+ * @fb: fb to use for the plane
+ *
+ * Changing the assigned framebuffer for a plane requires us to grab a reference
+ * to the new fb and drop the reference to the old fb, if there is one. This
+ * function takes care of all these details besides updating the pointer in the
+ * state object itself.
+ */
+void
+drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb)
+{
+ if (plane_state->fb)
+ drm_framebuffer_unreference(plane_state->fb);
+ if (fb)
+ drm_framebuffer_reference(fb);
+ plane_state->fb = fb;
+
+ if (fb)
+ DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
+ fb->base.id, plane_state);
+ else
+ DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
+}
+EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
+
+/**
+ * drm_atomic_set_crtc_for_connector - set crtc for connector
+ * @conn_state: atomic state object for the connector
+ * @crtc: crtc to use for the connector
+ *
+ * Changing the assigned crtc for a connector requires us to grab the lock and
+ * state for the new crtc, as needed. This function takes care of all these
+ * details besides updating the pointer in the state object itself.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (crtc) {
+ crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ conn_state->crtc = crtc;
+
+ if (crtc)
+ DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
+ conn_state, crtc->base.id);
+ else
+ DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
+ conn_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
+
+/**
+ * drm_atomic_add_affected_connectors - add connectors for crtc
+ * @state: atomic state
+ * @crtc: DRM crtc
+ *
+ * This function walks the current configuration and adds all connectors
+ * currently using @crtc to the atomic configuration @state. Note that this
+ * function must acquire the connection mutex. This can potentially cause
+ * unneeded seralization if the update is just for the planes on one crtc. Hence
+ * drivers and helpers should only call this when really needed (e.g. when a
+ * full modeset needs to happen due to some change).
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ int ret;
+
+ ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
+ crtc->base.id, state);
+
+ /*
+ * Changed connectors are already in @state, so only need to look at the
+ * current configuration.
+ */
+ list_for_each_entry(connector, &config->connector_list, head) {
+ if (connector->state->crtc != crtc)
+ continue;
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
+
+/**
+ * drm_atomic_connectors_for_crtc - count number of connected outputs
+ * @state: atomic state
+ * @crtc: DRM crtc
+ *
+ * This function counts all connectors which will be connected to @crtc
+ * according to @state. Useful to recompute the enable state for @crtc.
+ */
+int
+drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ int i, num_connected_connectors = 0;
+
+ for (i = 0; i < state->num_connector; i++) {
+ struct drm_connector_state *conn_state;
+
+ conn_state = state->connector_states[i];
+
+ if (conn_state && conn_state->crtc == crtc)
+ num_connected_connectors++;
+ }
+
+ DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
+ state, num_connected_connectors, crtc->base.id);
+
+ return num_connected_connectors;
+}
+EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
+
+/**
+ * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
+ * @state: atomic state
+ *
+ * This function should be used by legacy entry points which don't understand
+ * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
+ * the slowpath completed.
+ */
+void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
+{
+ int ret;
+
+retry:
+ drm_modeset_backoff(state->acquire_ctx);
+
+ ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ goto retry;
+ ret = drm_modeset_lock_all_crtcs(state->dev,
+ state->acquire_ctx);
+ if (ret)
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_legacy_backoff);
+
+/**
+ * drm_atomic_check_only - check whether a given config would work
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_check_only(struct drm_atomic_state *state)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+
+ DRM_DEBUG_KMS("checking %p\n", state);
+
+ if (config->funcs->atomic_check)
+ return config->funcs->atomic_check(state->dev, state);
+ else
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_check_only);
+
+/**
+ * drm_atomic_commit - commit configuration atomically
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Also note that on successful execution ownership of @state is transferred
+ * from the caller of this function to the function itself. The caller must not
+ * free or in any other way access @state. If the function fails then the caller
+ * must clean up @state itself.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_commit(struct drm_atomic_state *state)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+ int ret;
+
+ ret = drm_atomic_check_only(state);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("commiting %p\n", state);
+
+ return config->funcs->atomic_commit(state->dev, state, false);
+}
+EXPORT_SYMBOL(drm_atomic_commit);
+
+/**
+ * drm_atomic_async_commit - atomic&async configuration commit
+ * @state: atomic configuration to check
+ *
+ * Note that this function can return -EDEADLK if the driver needed to acquire
+ * more locks but encountered a deadlock. The caller must then do the usual w/w
+ * backoff dance and restart. All other errors are fatal.
+ *
+ * Also note that on successful execution ownership of @state is transferred
+ * from the caller of this function to the function itself. The caller must not
+ * free or in any other way access @state. If the function fails then the caller
+ * must clean up @state itself.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_async_commit(struct drm_atomic_state *state)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+ int ret;
+
+ ret = drm_atomic_check_only(state);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
+
+ return config->funcs->atomic_commit(state->dev, state, true);
+}
+EXPORT_SYMBOL(drm_atomic_async_commit);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
new file mode 100644
index 000000000000..4a78a773151c
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -0,0 +1,1966 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <linux/fence.h>
+
+/**
+ * DOC: overview
+ *
+ * This helper library provides implementations of check and commit functions on
+ * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
+ * also provides convenience implementations for the atomic state handling
+ * callbacks for drivers which don't need to subclass the drm core structures to
+ * add their own additional internal state.
+ *
+ * This library also provides default implementations for the check callback in
+ * drm_atomic_helper_check and for the commit callback with
+ * drm_atomic_helper_commit. But the individual stages and callbacks are expose
+ * to allow drivers to mix and match and e.g. use the plane helpers only
+ * together with a driver private modeset implementation.
+ *
+ * This library also provides implementations for all the legacy driver
+ * interfaces on top of the atomic interface. See drm_atomic_helper_set_config,
+ * drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the
+ * various functions to implement set_property callbacks. New drivers must not
+ * implement these functions themselves but must use the provided helpers.
+ */
+static void
+drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state,
+ struct drm_plane *plane)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane->state->crtc) {
+ crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ crtc_state->planes_changed = true;
+ }
+
+ if (plane_state->crtc) {
+ crtc_state =
+ state->crtc_states[drm_crtc_index(plane_state->crtc)];
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ crtc_state->planes_changed = true;
+ }
+}
+
+static struct drm_crtc *
+get_current_crtc_for_encoder(struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+ list_for_each_entry(connector, &config->connector_list, head) {
+ if (connector->state->best_encoder != encoder)
+ continue;
+
+ return connector->state->crtc;
+ }
+
+ return NULL;
+}
+
+static int
+steal_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder,
+ struct drm_crtc *encoder_crtc)
+{
+ struct drm_mode_config *config = &state->dev->mode_config;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int ret;
+
+ /*
+ * We can only steal an encoder coming from a connector, which means we
+ * must already hold the connection_mutex.
+ */
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
+ encoder->base.id, encoder->name,
+ encoder_crtc->base.id);
+
+ crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+
+ list_for_each_entry(connector, &config->connector_list, head) {
+ if (connector->state->best_encoder != encoder)
+ continue;
+
+ DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+
+ connector_state = drm_atomic_get_connector_state(state,
+ connector);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+ if (ret)
+ return ret;
+ connector_state->best_encoder = NULL;
+ }
+
+ return 0;
+}
+
+static int
+update_connector_routing(struct drm_atomic_state *state, int conn_idx)
+{
+ struct drm_connector_helper_funcs *funcs;
+ struct drm_encoder *new_encoder;
+ struct drm_crtc *encoder_crtc;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_crtc_state *crtc_state;
+ int idx, ret;
+
+ connector = state->connectors[conn_idx];
+ connector_state = state->connector_states[conn_idx];
+
+ if (!connector)
+ return 0;
+
+ DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+
+ if (connector->state->crtc != connector_state->crtc) {
+ if (connector->state->crtc) {
+ idx = drm_crtc_index(connector->state->crtc);
+
+ crtc_state = state->crtc_states[idx];
+ crtc_state->mode_changed = true;
+ }
+
+ if (connector_state->crtc) {
+ idx = drm_crtc_index(connector_state->crtc);
+
+ crtc_state = state->crtc_states[idx];
+ crtc_state->mode_changed = true;
+ }
+ }
+
+ if (!connector_state->crtc) {
+ DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+
+ connector_state->best_encoder = NULL;
+
+ return 0;
+ }
+
+ funcs = connector->helper_private;
+ new_encoder = funcs->best_encoder(connector);
+
+ if (!new_encoder) {
+ DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+ return -EINVAL;
+ }
+
+ if (new_encoder == connector_state->best_encoder) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ connector_state->crtc->base.id);
+
+ return 0;
+ }
+
+ encoder_crtc = get_current_crtc_for_encoder(state->dev,
+ new_encoder);
+
+ if (encoder_crtc) {
+ ret = steal_encoder(state, new_encoder, encoder_crtc);
+ if (ret) {
+ DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ connector->name);
+ return ret;
+ }
+ }
+
+ connector_state->best_encoder = new_encoder;
+ idx = drm_crtc_index(connector_state->crtc);
+
+ crtc_state = state->crtc_states[idx];
+ crtc_state->mode_changed = true;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
+ connector->base.id,
+ connector->name,
+ new_encoder->base.id,
+ new_encoder->name,
+ connector_state->crtc->base.id);
+
+ return 0;
+}
+
+static int
+mode_fixup(struct drm_atomic_state *state)
+{
+ int ncrtcs = state->dev->mode_config.num_crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ int i;
+ bool ret;
+
+ for (i = 0; i < ncrtcs; i++) {
+ crtc_state = state->crtc_states[i];
+
+ if (!crtc_state || !crtc_state->mode_changed)
+ continue;
+
+ drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
+ }
+
+ for (i = 0; i < state->num_connector; i++) {
+ struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+
+ conn_state = state->connector_states[i];
+
+ if (!conn_state)
+ continue;
+
+ WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
+
+ if (!conn_state->crtc || !conn_state->best_encoder)
+ continue;
+
+ crtc_state =
+ state->crtc_states[drm_crtc_index(conn_state->crtc)];
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call ->mode_fixup twice.
+ */
+ encoder = conn_state->best_encoder;
+ funcs = encoder->helper_private;
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
+ ret = encoder->bridge->funcs->mode_fixup(
+ encoder->bridge, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("Bridge fixup failed\n");
+ return -EINVAL;
+ }
+ }
+
+
+ ret = funcs->mode_fixup(encoder, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
+ encoder->base.id, encoder->name);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc;
+
+ crtc_state = state->crtc_states[i];
+ crtc = state->crtcs[i];
+
+ if (!crtc_state || !crtc_state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+ ret = funcs->mode_fixup(crtc, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n",
+ crtc->base.id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+drm_atomic_helper_check_modeset(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ncrtcs = dev->mode_config.num_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
+
+ for (i = 0; i < ncrtcs; i++) {
+ crtc = state->crtcs[i];
+ crtc_state = state->crtc_states[i];
+
+ if (!crtc)
+ continue;
+
+ if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
+ DRM_DEBUG_KMS("[CRTC:%d] mode changed\n",
+ crtc->base.id);
+ crtc_state->mode_changed = true;
+ }
+
+ if (crtc->state->enable != crtc_state->enable) {
+ DRM_DEBUG_KMS("[CRTC:%d] enable changed\n",
+ crtc->base.id);
+ crtc_state->mode_changed = true;
+ }
+ }
+
+ for (i = 0; i < state->num_connector; i++) {
+ /*
+ * This only sets crtc->mode_changed for routing changes,
+ * drivers must set crtc->mode_changed themselves when connector
+ * properties need to be updated.
+ */
+ ret = update_connector_routing(state, i);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * After all the routing has been prepared we need to add in any
+ * connector which is itself unchanged, but who's crtc changes it's
+ * configuration. This must be done before calling mode_fixup in case a
+ * crtc only changed its mode but has the same set of connectors.
+ */
+ for (i = 0; i < ncrtcs; i++) {
+ int num_connectors;
+
+ crtc = state->crtcs[i];
+ crtc_state = state->crtc_states[i];
+
+ if (!crtc || !crtc_state->mode_changed)
+ continue;
+
+ DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n",
+ crtc->base.id,
+ crtc_state->enable ? 'y' : 'n');
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret != 0)
+ return ret;
+
+ num_connectors = drm_atomic_connectors_for_crtc(state,
+ crtc);
+
+ if (crtc_state->enable != !!num_connectors) {
+ DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n",
+ crtc->base.id);
+
+ return -EINVAL;
+ }
+ }
+
+ return mode_fixup(state);
+}
+
+/**
+ * drm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Check the state object to see if the requested state is physically possible.
+ * Only crtcs and planes have check callbacks, so for any additional (global)
+ * checking that a driver needs it can simply wrap that around this function.
+ * Drivers without such needs can directly use this as their ->atomic_check()
+ * callback.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int drm_atomic_helper_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ int i, ret = 0;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *plane_state = state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ drm_atomic_helper_plane_changed(state, plane_state, plane);
+
+ if (!funcs || !funcs->atomic_check)
+ continue;
+
+ ret = funcs->atomic_check(plane, plane_state);
+ if (ret) {
+ DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n",
+ plane->base.id);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (!funcs || !funcs->atomic_check)
+ continue;
+
+ ret = funcs->atomic_check(crtc, state->crtc_states[i]);
+ if (ret) {
+ DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n",
+ crtc->base.id);
+ return ret;
+ }
+ }
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check);
+
+static void
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
+
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *connector;
+ struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+
+ old_conn_state = old_state->connector_states[i];
+ connector = old_state->connectors[i];
+
+ /* Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state. */
+ if (!old_conn_state || !old_conn_state->crtc)
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /* We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ funcs = encoder->helper_private;
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call call disable hooks twice.
+ */
+ if (encoder->bridge)
+ encoder->bridge->funcs->disable(encoder->bridge);
+
+ /* Right function depends upon target state. */
+ if (connector->state->crtc)
+ funcs->prepare(encoder);
+ else if (funcs->disable)
+ funcs->disable(encoder);
+ else
+ funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->post_disable(encoder->bridge);
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc;
+
+ crtc = old_state->crtcs[i];
+
+ /* Shut down everything that needs a full modeset. */
+ if (!crtc || !crtc->state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ /* Right function depends upon target state. */
+ if (crtc->state->enable)
+ funcs->prepare(crtc);
+ else if (funcs->disable)
+ funcs->disable(crtc);
+ else
+ funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+}
+
+static void
+set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
+
+ /* clear out existing links */
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector *connector;
+
+ connector = old_state->connectors[i];
+
+ if (!connector || !connector->encoder)
+ continue;
+
+ WARN_ON(!connector->encoder->crtc);
+
+ connector->encoder->crtc = NULL;
+ connector->encoder = NULL;
+ }
+
+ /* set new links */
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector *connector;
+
+ connector = old_state->connectors[i];
+
+ if (!connector || !connector->state->crtc)
+ continue;
+
+ if (WARN_ON(!connector->state->best_encoder))
+ continue;
+
+ connector->encoder = connector->state->best_encoder;
+ connector->encoder->crtc = connector->state->crtc;
+ }
+
+ /* set legacy state in the crtc structure */
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc;
+
+ crtc = old_state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ crtc->mode = crtc->state->mode;
+ crtc->enabled = crtc->state->enable;
+ crtc->x = crtc->primary->state->src_x >> 16;
+ crtc->y = crtc->primary->state->src_y >> 16;
+ }
+}
+
+static void
+crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc;
+
+ crtc = old_state->crtcs[i];
+
+ if (!crtc || !crtc->state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (crtc->state->enable)
+ funcs->mode_set_nofb(crtc);
+ }
+
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector *connector;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *mode, *adjusted_mode;
+
+ connector = old_state->connectors[i];
+
+ if (!connector || !connector->state->best_encoder)
+ continue;
+
+ encoder = connector->state->best_encoder;
+ funcs = encoder->helper_private;
+ new_crtc_state = connector->state->crtc->state;
+ mode = &new_crtc_state->mode;
+ adjusted_mode = &new_crtc_state->adjusted_mode;
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call call mode_set hooks twice.
+ */
+ funcs->mode_set(encoder, mode, adjusted_mode);
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_set)
+ encoder->bridge->funcs->mode_set(encoder->bridge,
+ mode, adjusted_mode);
+ }
+}
+
+/**
+ * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates
+ * @dev: DRM device
+ * @state: atomic state
+ *
+ * This function commits the modeset changes that need to be committed before
+ * updating planes. It shuts down all the outputs that need to be shut down and
+ * prepares them (if required) with the new mode.
+ */
+void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ disable_outputs(dev, state);
+ set_routing_links(dev, state);
+ crtc_set_mode(dev, state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes);
+
+/**
+ * drm_atomic_helper_commit_post_planes - modeset commit after plane updates
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function commits the modeset changes that need to be committed after
+ * updating planes: It enables all the outputs with the new configuration which
+ * had to be turned off for the update.
+ */
+void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc;
+
+ crtc = old_state->crtcs[i];
+
+ /* Need to filter out CRTCs where only planes change. */
+ if (!crtc || !crtc->state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (crtc->state->enable)
+ funcs->commit(crtc);
+ }
+
+ for (i = 0; i < old_state->num_connector; i++) {
+ struct drm_connector *connector;
+ struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+
+ connector = old_state->connectors[i];
+
+ if (!connector || !connector->state->best_encoder)
+ continue;
+
+ encoder = connector->state->best_encoder;
+ funcs = encoder->helper_private;
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call call enable hooks twice.
+ */
+ if (encoder->bridge)
+ encoder->bridge->funcs->pre_enable(encoder->bridge);
+
+ funcs->commit(encoder);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->enable(encoder->bridge);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes);
+
+static void wait_for_fences(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane || !plane->state->fence)
+ continue;
+
+ WARN_ON(!plane->state->fb);
+
+ fence_wait(plane->state->fence, false);
+ fence_put(plane->state->fence);
+ plane->state->fence = NULL;
+ }
+}
+
+static bool framebuffer_changed(struct drm_device *dev,
+ struct drm_atomic_state *old_state,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ int nplanes = old_state->dev->mode_config.num_total_plane;
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ plane = old_state->planes[i];
+ old_plane_state = old_state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ if (plane->state->crtc != crtc &&
+ old_plane_state->crtc != crtc)
+ continue;
+
+ if (plane->state->fb != old_plane_state->fb)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * Helper to, after atomic commit, wait for vblanks on all effected
+ * crtcs (ie. before cleaning up old framebuffers using
+ * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the
+ * framebuffers have actually changed to optimize for the legacy cursor and
+ * plane update use-case.
+ */
+void
+drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i, ret;
+
+ for (i = 0; i < ncrtcs; i++) {
+ crtc = old_state->crtcs[i];
+ old_crtc_state = old_state->crtc_states[i];
+
+ if (!crtc)
+ continue;
+
+ /* No one cares about the old state, so abuse it for tracking
+ * and store whether we hold a vblank reference (and should do a
+ * vblank wait) in the ->enable boolean. */
+ old_crtc_state->enable = false;
+
+ if (!crtc->state->enable)
+ continue;
+
+ if (!framebuffer_changed(dev, old_state, crtc))
+ continue;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret != 0)
+ continue;
+
+ old_crtc_state->enable = true;
+ old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ crtc = old_state->crtcs[i];
+ old_crtc_state = old_state->crtc_states[i];
+
+ if (!crtc || !old_crtc_state->enable)
+ continue;
+
+ ret = wait_event_timeout(dev->vblank[i].queue,
+ old_crtc_state->last_vblank_count !=
+ drm_vblank_count(dev, i),
+ msecs_to_jiffies(50));
+
+ drm_crtc_vblank_put(crtc);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. For
+ * now this doesn't implement asynchronous commits.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int drm_atomic_helper_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ int ret;
+
+ if (async)
+ return -EBUSY;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * This is the point of no return - everything below never fails except
+ * when the hw goes bonghits. Which means we can commit the new state on
+ * the software side now.
+ */
+
+ drm_atomic_helper_swap_state(dev, state);
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one conditions: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ wait_for_fences(dev, state);
+
+ drm_atomic_helper_commit_pre_planes(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state);
+
+ drm_atomic_helper_commit_post_planes(dev, state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ drm_atomic_state_free(state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit);
+
+/**
+ * DOC: implementing async commit
+ *
+ * For now the atomic helpers don't support async commit directly. If there is
+ * real need it could be added though, using the dma-buf fence infrastructure
+ * for generic synchronization with outstanding rendering.
+ *
+ * For now drivers have to implement async commit themselves, with the following
+ * sequence being the recommended one:
+ *
+ * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
+ * which commit needs to call which can fail, so we want to run it first and
+ * synchronously.
+ *
+ * 2. Synchronize with any outstanding asynchronous commit worker threads which
+ * might be affected the new state update. This can be done by either cancelling
+ * or flushing the work items, depending upon whether the driver can deal with
+ * cancelled updates. Note that it is important to ensure that the framebuffer
+ * cleanup is still done when cancelling.
+ *
+ * For sufficient parallelism it is recommended to have a work item per crtc
+ * (for updates which don't touch global state) and a global one. Then we only
+ * need to synchronize with the crtc work items for changed crtcs and the global
+ * work item, which allows nice concurrent updates on disjoint sets of crtcs.
+ *
+ * 3. The software state is updated synchronously with
+ * drm_atomic_helper_swap_state. Doing this under the protection of all modeset
+ * locks means concurrent callers never see inconsistent state. And doing this
+ * while it's guaranteed that no relevant async worker runs means that async
+ * workers do not need grab any locks. Actually they must not grab locks, for
+ * otherwise the work flushing will deadlock.
+ *
+ * 4. Schedule a work item to do all subsequent steps, using the split-out
+ * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
+ * then cleaning up the framebuffers after the old framebuffer is no longer
+ * being displayed.
+ */
+
+/**
+ * drm_atomic_helper_prepare_planes - prepare plane resources after commit
+ * @dev: DRM device
+ * @state: atomic state object with old state structures
+ *
+ * This function prepares plane state, specifically framebuffers, for the new
+ * configuration. If any failure is encountered this function will call
+ * ->cleanup_fb on any already successfully prepared framebuffer.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ret, i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = state->planes[i];
+ struct drm_framebuffer *fb;
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ fb = state->plane_states[i]->fb;
+
+ if (fb && funcs->prepare_fb) {
+ ret = funcs->prepare_fb(plane, fb);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = state->planes[i];
+ struct drm_framebuffer *fb;
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ fb = state->plane_states[i]->fb;
+
+ if (fb && funcs->cleanup_fb)
+ funcs->cleanup_fb(plane, fb);
+
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
+
+/**
+ * drm_atomic_helper_commit_planes - commit plane state
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function commits the new plane state using the plane and atomic helper
+ * functions for planes and crtcs. It assumes that the atomic state has already
+ * been pushed into the relevant object state pointers, since this step can no
+ * longer fail.
+ *
+ * It still requires the global state object @old_state to know which planes and
+ * crtcs need to be updated though.
+ */
+void drm_atomic_helper_commit_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ int i;
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc = old_state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (!funcs || !funcs->atomic_begin)
+ continue;
+
+ funcs->atomic_begin(crtc);
+ }
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = old_state->planes[i];
+ struct drm_plane_state *old_plane_state;
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ if (!funcs || !funcs->atomic_update)
+ continue;
+
+ old_plane_state = old_state->plane_states[i];
+
+ funcs->atomic_update(plane, old_plane_state);
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_crtc *crtc = old_state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (!funcs || !funcs->atomic_flush)
+ continue;
+
+ funcs->atomic_flush(crtc);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
+
+/**
+ * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function cleans up plane state, specifically framebuffers, from the old
+ * configuration. Hence the old configuration must be perserved in @old_state to
+ * be able to call this function.
+ *
+ * This function must also be called on the new state when the atomic update
+ * fails at any point after calling drm_atomic_helper_prepare_planes().
+ */
+void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ int nplanes = dev->mode_config.num_total_plane;
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane_helper_funcs *funcs;
+ struct drm_plane *plane = old_state->planes[i];
+ struct drm_framebuffer *old_fb;
+
+ if (!plane)
+ continue;
+
+ funcs = plane->helper_private;
+
+ old_fb = old_state->plane_states[i]->fb;
+
+ if (old_fb && funcs->cleanup_fb)
+ funcs->cleanup_fb(plane, old_fb);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
+
+/**
+ * drm_atomic_helper_swap_state - store atomic state into current sw state
+ * @dev: DRM device
+ * @state: atomic state
+ *
+ * This function stores the atomic state into the current state pointers in all
+ * driver objects. It should be called after all failing steps have been done
+ * and succeeded, but before the actual hardware state is committed.
+ *
+ * For cleanup and error recovery the current state for all changed objects will
+ * be swaped into @state.
+ *
+ * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
+ *
+ * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
+ *
+ * 2. Do any other steps that might fail.
+ *
+ * 3. Put the staged state into the current state pointers with this function.
+ *
+ * 4. Actually commit the hardware state.
+ *
+ * 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3
+ * contains the old state. Also do any other cleanup required with that state.
+ */
+void drm_atomic_helper_swap_state(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_connector; i++) {
+ struct drm_connector *connector = state->connectors[i];
+
+ if (!connector)
+ continue;
+
+ connector->state->state = state;
+ swap(state->connector_states[i], connector->state);
+ connector->state->state = NULL;
+ }
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ crtc->state->state = state;
+ swap(state->crtc_states[i], crtc->state);
+ crtc->state->state = NULL;
+ }
+
+ for (i = 0; i < dev->mode_config.num_total_plane; i++) {
+ struct drm_plane *plane = state->planes[i];
+
+ if (!plane)
+ continue;
+
+ plane->state->state = state;
+ swap(state->plane_states[i], plane->state);
+ plane->state->state = NULL;
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_swap_state);
+
+/**
+ * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ *
+ * Provides a default plane update handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_helper_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_h = crtc_h;
+ plane_state->crtc_w = crtc_w;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_h = src_h;
+ plane_state->src_w = src_w;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_update_plane);
+
+/**
+ * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
+ * @plane: plane to disable
+ *
+ * Provides a default plane disable handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_atomic_helper_disable_plane(struct drm_plane *plane)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ /*
+ * FIXME: Without plane->crtc set we can't get at the implicit legacy
+ * acquire context. The real fix will be to wire the acquire ctx through
+ * everywhere we need it, but meanwhile prevent chaos by just skipping
+ * this noop. The critical case is the cursor ioctls which a) only grab
+ * crtc/cursor-plane locks (so we need the crtc to get at the right
+ * acquire context) and b) can try to disable the plane multiple times.
+ */
+ if (!plane->crtc)
+ return 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(plane->crtc);
+retry:
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(state, plane, NULL);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ plane_state->crtc_x = 0;
+ plane_state->crtc_y = 0;
+ plane_state->crtc_h = 0;
+ plane_state->crtc_w = 0;
+ plane_state->src_x = 0;
+ plane_state->src_y = 0;
+ plane_state->src_h = 0;
+ plane_state->src_w = 0;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
+
+static int update_output_state(struct drm_atomic_state *state,
+ struct drm_mode_set *set)
+{
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_connector_state *conn_state;
+ int ncrtcs = state->dev->mode_config.num_crtc;
+ int ret, i, j;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ /* First grab all affected connector/crtc states. */
+ for (i = 0; i < set->num_connectors; i++) {
+ conn_state = drm_atomic_get_connector_state(state,
+ set->connectors[i]);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+
+ if (!crtc)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ /* Then recompute connector->crtc links and crtc enabling state. */
+ for (i = 0; i < state->num_connector; i++) {
+ struct drm_connector *connector;
+
+ connector = state->connectors[i];
+ conn_state = state->connector_states[i];
+
+ if (!connector)
+ continue;
+
+ if (conn_state->crtc == set->crtc) {
+ ret = drm_atomic_set_crtc_for_connector(conn_state,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ for (j = 0; j < set->num_connectors; j++) {
+ if (set->connectors[j] == connector) {
+ ret = drm_atomic_set_crtc_for_connector(conn_state,
+ set->crtc);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ struct drm_crtc_state *crtc_state = state->crtc_states[i];
+
+ if (!crtc)
+ continue;
+
+ /* Don't update ->enable for the CRTC in the set_config request,
+ * since a mismatch would indicate a bug in the upper layers.
+ * The actual modeset code later on will catch any
+ * inconsistencies here. */
+ if (crtc == set->crtc)
+ continue;
+
+ crtc_state->enable =
+ drm_atomic_connectors_for_crtc(state, crtc);
+ }
+
+ return 0;
+}
+
+/**
+ * drm_atomic_helper_set_config - set a new config from userspace
+ * @set: mode set configuration
+ *
+ * Provides a default crtc set_config handler using the atomic driver interface.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
+ */
+int drm_atomic_helper_set_config(struct drm_mode_set *set)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *primary_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state)) {
+ ret = PTR_ERR(primary_state);
+ goto fail;
+ }
+
+ if (!set->mode) {
+ WARN_ON(set->fb);
+ WARN_ON(set->num_connectors);
+
+ crtc_state->enable = false;
+
+ ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(primary_state, NULL);
+
+ goto commit;
+ }
+
+ WARN_ON(!set->fb);
+ WARN_ON(!set->num_connectors);
+
+ crtc_state->enable = true;
+ drm_mode_copy(&crtc_state->mode, set->mode);
+
+ ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(primary_state, set->fb);
+ primary_state->crtc_x = 0;
+ primary_state->crtc_y = 0;
+ primary_state->crtc_h = set->mode->vdisplay;
+ primary_state->crtc_w = set->mode->hdisplay;
+ primary_state->src_x = set->x << 16;
+ primary_state->src_y = set->y << 16;
+ primary_state->src_h = set->mode->vdisplay << 16;
+ primary_state->src_w = set->mode->hdisplay << 16;
+
+commit:
+ ret = update_output_state(state, set);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ crtc->primary->old_fb = crtc->primary->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_set_config);
+
+/**
+ * drm_atomic_helper_crtc_set_property - helper for crtc prorties
+ * @crtc: DRM crtc
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disablle handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ /* ->set_property is always called with all locks held. */
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ ret = crtc->funcs->atomic_set_property(crtc, crtc_state,
+ property, val);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
+
+/**
+ * drm_atomic_helper_plane_set_property - helper for plane prorties
+ * @plane: DRM plane
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disable handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ /* ->set_property is always called with all locks held. */
+ state->acquire_ctx = plane->dev->mode_config.acquire_ctx;
+retry:
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = plane->funcs->atomic_set_property(plane, plane_state,
+ property, val);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
+
+/**
+ * drm_atomic_helper_connector_set_property - helper for connector prorties
+ * @connector: DRM connector
+ * @property: DRM property
+ * @val: value of property
+ *
+ * Provides a default plane disablle handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int
+drm_atomic_helper_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector_state *connector_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(connector->dev);
+ if (!state)
+ return -ENOMEM;
+
+ /* ->set_property is always called with all locks held. */
+ state->acquire_ctx = connector->dev->mode_config.acquire_ctx;
+retry:
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ goto fail;
+ }
+
+ ret = connector->funcs->atomic_set_property(connector, connector_state,
+ property, val);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
+
+/**
+ * drm_atomic_helper_page_flip - execute a legacy page flip
+ * @crtc: DRM crtc
+ * @fb: DRM framebuffer
+ * @event: optional DRM event to signal upon completion
+ * @flags: flip flags for non-vblank sync'ed updates
+ *
+ * Provides a default page flip implementation using the atomic driver interface.
+ *
+ * Note that for now so called async page flips (i.e. updates which are not
+ * synchronized to vblank) are not supported, since the atomic interfaces have
+ * no provisions for this yet.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
+ */
+int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct drm_plane *plane = crtc->primary;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ return -EINVAL;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+ crtc_state->event = event;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
+ if (ret != 0)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ ret = drm_atomic_async_commit(state);
+ if (ret != 0)
+ goto fail;
+
+ /* TODO: ->page_flip is the only driver callback where the core
+ * doesn't update plane->fb. For now patch it up here. */
+ plane->fb = plane->state->fb;
+
+ /* Driver takes ownership of state on successful async commit. */
+ return 0;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+
+ return ret;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_page_flip);
+
+/**
+ * DOC: atomic state reset and initialization
+ *
+ * Both the drm core and the atomic helpers assume that there is always the full
+ * and correct atomic software state for all connectors, CRTCs and planes
+ * available. Which is a bit a problem on driver load and also after system
+ * suspend. One way to solve this is to have a hardware state read-out
+ * infrastructure which reconstructs the full software state (e.g. the i915
+ * driver).
+ *
+ * The simpler solution is to just reset the software state to everything off,
+ * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
+ * the atomic helpers provide default reset implementations for all hooks.
+ */
+
+/**
+ * drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs
+ * @crtc: drm CRTC
+ *
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
+{
+ kfree(crtc->state);
+ crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
+
+/**
+ * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
+ * @crtc: drm CRTC
+ *
+ * Default CRTC state duplicate hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
+
+ if (state) {
+ state->mode_changed = false;
+ state->planes_changed = false;
+ state->event = NULL;
+ }
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * drm_atomic_helper_crtc_destroy_state - default state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ *
+ * Default CRTC state destroy hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * drm_atomic_helper_plane_reset - default ->reset hook for planes
+ * @plane: drm plane
+ *
+ * Resets the atomic state for @plane by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_plane_reset(struct drm_plane *plane)
+{
+ if (plane->state && plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+
+ kfree(plane->state);
+ plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
+
+/**
+ * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
+ * @plane: drm plane
+ *
+ * Default plane state duplicate hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_plane_state *state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
+
+ if (state && state->fb)
+ drm_framebuffer_reference(state->fb);
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * drm_atomic_helper_plane_destroy_state - default state destroy hook
+ * @plane: drm plane
+ * @state: plane state object to release
+ *
+ * Default plane state destroy hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
+
+/**
+ * drm_atomic_helper_connector_reset - default ->reset hook for connectors
+ * @connector: drm connector
+ *
+ * Resets the atomic state for @connector by freeing the state pointer (which
+ * might be NULL, e.g. at driver load time) and allocating a new empty state
+ * object.
+ */
+void drm_atomic_helper_connector_reset(struct drm_connector *connector)
+{
+ kfree(connector->state);
+ connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
+
+/**
+ * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
+ * @connector: drm connector
+ *
+ * Default connector state duplicate hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
+{
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * drm_atomic_helper_connector_destroy_state - default state destroy hook
+ * @connector: drm connector
+ * @state: connector state object to release
+ *
+ * Default connector state destroy hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e79c8d3700d8..5213da499d39 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -683,7 +683,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
drm_modeset_lock_init(&crtc->mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
- goto out;
+ return ret;
crtc->base.properties = &crtc->properties;
@@ -697,9 +697,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (cursor)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
- out:
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
@@ -723,6 +721,12 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
+
+ WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
+ if (crtc->state && crtc->funcs->atomic_destroy_state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ memset(crtc, 0, sizeof(*crtc));
}
EXPORT_SYMBOL(drm_crtc_cleanup);
@@ -766,7 +770,6 @@ static void drm_mode_remove(struct drm_connector *connector,
/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
- * @mode: returned mode
*
* The kernel supports per-connector configration of its consoles through
* use of the video= parameter. This function parses that option and
@@ -870,6 +873,8 @@ int drm_connector_init(struct drm_device *dev,
drm_connector_get_cmdline_mode(connector);
+ /* We should add connectors at the end to avoid upsetting the connector
+ * index too much. */
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
@@ -905,6 +910,11 @@ void drm_connector_cleanup(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(dev, connector->tile_group);
+ connector->tile_group = NULL;
+ }
+
list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
@@ -919,6 +929,13 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->name = NULL;
list_del(&connector->head);
dev->mode_config.num_connector--;
+
+ WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
+ if (connector->state && connector->funcs->atomic_destroy_state)
+ connector->funcs->atomic_destroy_state(connector,
+ connector->state);
+
+ memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -933,6 +950,9 @@ unsigned int drm_connector_index(struct drm_connector *connector)
{
unsigned int index = 0;
struct drm_connector *tmp;
+ struct drm_mode_config *config = &connector->dev->mode_config;
+
+ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
if (tmp == connector)
@@ -1057,6 +1077,8 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
list_del(&bridge->head);
dev->mode_config.num_bridge--;
drm_modeset_unlock_all(dev);
+
+ memset(bridge, 0, sizeof(*bridge));
}
EXPORT_SYMBOL(drm_bridge_cleanup);
@@ -1123,10 +1145,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
- encoder->name = NULL;
list_del(&encoder->head);
dev->mode_config.num_encoder--;
drm_modeset_unlock_all(dev);
+
+ memset(encoder, 0, sizeof(*encoder));
}
EXPORT_SYMBOL(drm_encoder_cleanup);
@@ -1153,11 +1176,11 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
{
int ret;
- drm_modeset_lock_all(dev);
-
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
- goto out;
+ return ret;
+
+ drm_modeset_lock_init(&plane->mutex);
plane->base.properties = &plane->properties;
plane->dev = dev;
@@ -1167,8 +1190,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_put(dev, &plane->base);
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
@@ -1185,10 +1207,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
dev->mode_config.plane_type_property,
plane->type);
- out:
- drm_modeset_unlock_all(dev);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_universal_plane_init);
@@ -1246,6 +1265,12 @@ void drm_plane_cleanup(struct drm_plane *plane)
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
dev->mode_config.num_overlay_plane--;
drm_modeset_unlock_all(dev);
+
+ WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
+ if (plane->state && plane->funcs->atomic_destroy_state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ memset(plane, 0, sizeof(*plane));
}
EXPORT_SYMBOL(drm_plane_cleanup);
@@ -1328,6 +1353,11 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
"PATH", 0);
dev->mode_config.path_property = dev_path;
+ dev->mode_config.tile_property = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "TILE", 0);
+
return 0;
}
@@ -1388,12 +1418,13 @@ EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
* responsible for allocating a list of format names and passing them to
* this routine.
*/
-int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int num_modes,
char *modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
- int i;
+ unsigned int i;
if (dev->mode_config.tv_select_subconnector_property)
return 0;
@@ -1491,7 +1522,7 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
* connectors.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
{
@@ -1535,6 +1566,30 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+/**
+ * drm_mode_create_suggested_offset_properties - create suggests offset properties
+ * @dev: DRM device
+ *
+ * Create the the suggested x/y offset property for connectors.
+ */
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
+{
+ if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
+ return 0;
+
+ dev->mode_config.suggested_x_property =
+ drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
+
+ dev->mode_config.suggested_y_property =
+ drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
+
+ if (dev->mode_config.suggested_x_property == NULL ||
+ dev->mode_config.suggested_y_property == NULL)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
+
static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
{
uint32_t total_objects = 0;
@@ -1651,7 +1706,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
* the caller.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
static int drm_crtc_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
@@ -1694,7 +1749,7 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -1745,7 +1800,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->count_fbs = fb_count;
mutex_unlock(&file_priv->fbs_lock);
- drm_modeset_lock_all(dev);
+ /* mode_config.mutex protects the connector list against e.g. DP MST
+ * connector hot-adding. CRTC/Plane lists are invariant. */
+ mutex_lock(&dev->mode_config.mutex);
if (!drm_is_primary_client(file_priv)) {
mode_group = NULL;
@@ -1865,7 +1922,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
card_res->count_connectors, card_res->count_encoders);
out:
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -1880,26 +1937,22 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
- int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
-
crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
+ if (!crtc)
+ return -ENOENT;
+ drm_modeset_lock_crtc(crtc, crtc->primary);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
@@ -1916,10 +1969,9 @@ int drm_mode_getcrtc(struct drm_device *dev,
} else {
crtc_resp->mode_valid = 0;
}
+ drm_modeset_unlock_crtc(crtc);
-out:
- drm_modeset_unlock_all(dev);
- return ret;
+ return 0;
}
static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
@@ -1935,6 +1987,15 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
return true;
}
+static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
+{
+ /* For atomic drivers only state objects are synchronously updated and
+ * protected by modeset locks, so check those first. */
+ if (connector->state)
+ return connector->state->best_encoder;
+ return connector->encoder;
+}
+
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
@@ -1946,13 +2007,14 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct drm_display_mode *mode;
int mode_count = 0;
int props_count = 0;
@@ -2008,8 +2070,10 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- if (connector->encoder)
- out_resp->encoder_id = connector->encoder->base.id;
+
+ encoder = drm_connector_get_encoder(connector);
+ if (encoder)
+ out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -2079,6 +2143,33 @@ out:
return ret;
}
+static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ bool uses_atomic = false;
+
+ /* For atomic drivers only state objects are synchronously updated and
+ * protected by modeset locks, so check those first. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->state)
+ continue;
+
+ uses_atomic = true;
+
+ if (connector->state->best_encoder != encoder)
+ continue;
+
+ return connector->state->crtc;
+ }
+
+ /* Don't return stale data (e.g. pending async disable). */
+ if (uses_atomic)
+ return NULL;
+
+ return encoder->crtc;
+}
+
/**
* drm_mode_getencoder - get encoder configuration
* @dev: drm device for the ioctl
@@ -2090,37 +2181,38 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
struct drm_encoder *encoder;
- int ret = 0;
+ struct drm_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
encoder = drm_encoder_find(dev, enc_resp->encoder_id);
- if (!encoder) {
- ret = -ENOENT;
- goto out;
- }
+ if (!encoder)
+ return -ENOENT;
- if (encoder->crtc)
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ crtc = drm_encoder_get_crtc(encoder);
+ if (crtc)
+ enc_resp->crtc_id = crtc->base.id;
+ else if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
else
enc_resp->crtc_id = 0;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
enc_resp->encoder_type = encoder->encoder_type;
enc_resp->encoder_id = encoder->base.id;
enc_resp->possible_crtcs = encoder->possible_crtcs;
enc_resp->possible_clones = encoder->possible_clones;
-out:
- drm_modeset_unlock_all(dev);
- return ret;
+ return 0;
}
/**
@@ -2134,7 +2226,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -2143,13 +2235,12 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
- int copied = 0, ret = 0;
+ int copied = 0;
unsigned num_planes;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
config = &dev->mode_config;
if (file_priv->universal_planes)
@@ -2165,6 +2256,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
(plane_resp->count_planes >= num_planes)) {
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+ /* Plane lists are invariant, no locking needed. */
list_for_each_entry(plane, &config->plane_list, head) {
/*
* Unless userspace set the 'universal planes'
@@ -2174,18 +2266,14 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
!file_priv->universal_planes)
continue;
- if (put_user(plane->base.id, plane_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
+ if (put_user(plane->base.id, plane_ptr + copied))
+ return -EFAULT;
copied++;
}
}
plane_resp->count_planes = num_planes;
-out:
- drm_modeset_unlock_all(dev);
- return ret;
+ return 0;
}
/**
@@ -2199,7 +2287,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -2207,18 +2295,15 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_mode_get_plane *plane_resp = data;
struct drm_plane *plane;
uint32_t __user *format_ptr;
- int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, plane_resp->plane_id);
- if (!plane) {
- ret = -ENOENT;
- goto out;
- }
+ if (!plane)
+ return -ENOENT;
+ drm_modeset_lock(&plane->mutex, NULL);
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
else
@@ -2228,6 +2313,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
plane_resp->fb_id = plane->fb->base.id;
else
plane_resp->fb_id = 0;
+ drm_modeset_unlock(&plane->mutex);
plane_resp->plane_id = plane->base.id;
plane_resp->possible_crtcs = plane->possible_crtcs;
@@ -2243,15 +2329,12 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
if (copy_to_user(format_ptr,
plane->format_types,
sizeof(uint32_t) * plane->format_count)) {
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
}
plane_resp->count_format_types = plane->format_count;
-out:
- drm_modeset_unlock_all(dev);
- return ret;
+ return 0;
}
/*
@@ -2274,7 +2357,7 @@ static int __setplane_internal(struct drm_plane *plane,
{
int ret = 0;
unsigned int fb_width, fb_height;
- int i;
+ unsigned int i;
/* No fb means shut it down */
if (!fb) {
@@ -2378,13 +2461,12 @@ static int setplane_internal(struct drm_plane *plane,
* valid crtc).
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb = NULL;
@@ -2407,14 +2489,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
- obj = drm_mode_object_find(dev, plane_req->plane_id,
- DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, plane_req->plane_id);
+ if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
- plane = obj_to_plane(obj);
if (plane_req->fb_id) {
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
@@ -2424,14 +2504,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
return -ENOENT;
}
- obj = drm_mode_object_find(dev, plane_req->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ if (!crtc) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
}
/*
@@ -2453,7 +2531,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* interface. The only thing it adds is correct refcounting dance.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
@@ -2546,7 +2624,7 @@ EXPORT_SYMBOL(drm_crtc_check_viewport);
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -2709,7 +2787,7 @@ out:
* userspace wants to make use of these capabilities.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
static int drm_mode_cursor_universal(struct drm_crtc *crtc,
struct drm_mode_cursor2 *req,
@@ -2810,7 +2888,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
* If this crtc has a universal cursor plane, call that plane's update
* handler rather than using legacy cursor handlers.
*/
- drm_modeset_lock_crtc(crtc);
+ drm_modeset_lock_crtc(crtc, crtc->cursor);
if (crtc->cursor) {
ret = drm_mode_cursor_universal(crtc, req, file_priv);
goto out;
@@ -2857,7 +2935,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -2884,7 +2962,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -2943,23 +3021,21 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
* @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request. This is the
- * original addfb ioclt which only supported RGB formats.
+ * original addfb ioctl which only supported RGB formats.
*
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *or = data;
struct drm_mode_fb_cmd2 r = {};
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
- /* Use new struct with format internally */
+ /* convert to new format and call new ioctl */
r.fb_id = or->fb_id;
r.width = or->width;
r.height = or->height;
@@ -2967,28 +3043,13 @@ int drm_mode_addfb(struct drm_device *dev,
r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
r.handles[0] = or->handle;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- if ((config->min_width > r.width) || (r.width > config->max_width))
- return -EINVAL;
-
- if ((config->min_height > r.height) || (r.height > config->max_height))
- return -EINVAL;
+ ret = drm_mode_addfb2(dev, &r, file_priv);
+ if (ret)
+ return ret;
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
- if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("could not create framebuffer\n");
- return PTR_ERR(fb);
- }
+ or->fb_id = r.fb_id;
- mutex_lock(&file_priv->fbs_lock);
- or->fb_id = fb->base.id;
- list_add(&fb->filp_head, &file_priv->fbs);
- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
- mutex_unlock(&file_priv->fbs_lock);
-
- return ret;
+ return 0;
}
static int format_check(const struct drm_mode_fb_cmd2 *r)
@@ -3080,7 +3141,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
num_planes = drm_format_num_planes(r->pixel_format);
if (r->width == 0 || r->width % hsub) {
- DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL;
}
@@ -3170,7 +3231,7 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3198,7 +3259,7 @@ int drm_mode_addfb2(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3252,7 +3313,7 @@ fail_lookup:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3313,7 +3374,7 @@ int drm_mode_getfb(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3393,7 +3454,7 @@ out_err1:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
{
@@ -3402,7 +3463,7 @@ void drm_fb_release(struct drm_file *priv)
/*
* When the file gets released that means no one else can access the fb
- * list any more, so no need to grab fpriv->fbs_lock. And we need to to
+ * list any more, so no need to grab fpriv->fbs_lock. And we need to
* avoid upsetting lockdep since the universal cursor code adds a
* framebuffer while holding mutex locks.
*
@@ -3435,6 +3496,10 @@ void drm_fb_release(struct drm_file *priv)
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
+ * Note that the DRM core keeps a per-device list of properties and that, if
+ * drm_mode_config_cleanup() is called, it will destroy all properties created
+ * by the driver.
+ *
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
@@ -3462,7 +3527,7 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
property->flags = flags;
property->num_values = num_values;
- INIT_LIST_HEAD(&property->enum_blob_list);
+ INIT_LIST_HEAD(&property->enum_list);
if (name) {
strncpy(property->name, name, DRM_PROP_NAME_LEN);
@@ -3611,7 +3676,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
- * Userspace is allowed to set any interger value in the (min, max) range
+ * Userspace is allowed to set any integer value in the (min, max) range
* inclusive.
*
* Returns:
@@ -3684,8 +3749,8 @@ int drm_property_add_enum(struct drm_property *property, int index,
(value > 63))
return -EINVAL;
- if (!list_empty(&property->enum_blob_list)) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+ if (!list_empty(&property->enum_list)) {
+ list_for_each_entry(prop_enum, &property->enum_list, head) {
if (prop_enum->value == value) {
strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
@@ -3703,7 +3768,7 @@ int drm_property_add_enum(struct drm_property *property, int index,
prop_enum->value = value;
property->values[index] = value;
- list_add_tail(&prop_enum->head, &property->enum_blob_list);
+ list_add_tail(&prop_enum->head, &property->enum_list);
return 0;
}
EXPORT_SYMBOL(drm_property_add_enum);
@@ -3720,7 +3785,7 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
{
struct drm_property_enum *prop_enum, *pt;
- list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+ list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
list_del(&prop_enum->head);
kfree(prop_enum);
}
@@ -3823,17 +3888,20 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
EXPORT_SYMBOL(drm_object_property_get_value);
/**
- * drm_mode_getproperty_ioctl - get the current value of a connector's property
+ * drm_mode_getproperty_ioctl - get the property metadata
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
- * This function retrieves the current value for an connectors's property.
+ * This function retrieves the metadata for a given property, like the different
+ * possible values for an enum property or the limits for a range property.
+ *
+ * Blob properties are special
*
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3841,16 +3909,12 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
- int blob_count = 0;
int value_count = 0;
int ret = 0, i;
int copied;
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
- struct drm_property_blob *prop_blob;
- uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
- uint32_t __user *blob_length_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -3864,11 +3928,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+ list_for_each_entry(prop_enum, &property->enum_list, head)
enum_count++;
- } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
- list_for_each_entry(prop_blob, &property->enum_blob_list, head)
- blob_count++;
}
value_count = property->num_values;
@@ -3893,7 +3954,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+ list_for_each_entry(prop_enum, &property->enum_list, head) {
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
ret = -EFAULT;
@@ -3911,35 +3972,24 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->count_enum_blobs = enum_count;
}
- if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
- if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
- copied = 0;
- blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
-
- list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
- if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- if (put_user(prop_blob->length, blob_length_ptr + copied)) {
- ret = -EFAULT;
- goto done;
- }
-
- copied++;
- }
- }
- out_resp->count_enum_blobs = blob_count;
- }
+ /*
+ * NOTE: The idea seems to have been to use this to read all the blob
+ * property values. But nothing ever added them to the corresponding
+ * list, userspace always used the special-purpose get_blob ioctl to
+ * read the value for a blob property. It also doesn't make a lot of
+ * sense to return values here when everything else is just metadata for
+ * the property itself.
+ */
+ if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+ out_resp->count_enum_blobs = 0;
done:
drm_modeset_unlock_all(dev);
return ret;
}
-static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
- void *data)
+static struct drm_property_blob *
+drm_property_create_blob(struct drm_device *dev, size_t length,
+ const void *data)
{
struct drm_property_blob *blob;
int ret;
@@ -3985,7 +4035,7 @@ static void drm_property_destroy_blob(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4019,12 +4069,25 @@ done:
return ret;
}
+/**
+ * drm_mode_connector_set_path_property - set tile property on connector
+ * @connector: connector to set property on.
+ * @path: path to use for property.
+ *
+ * This creates a property to expose to userspace to specify a
+ * connector path. This is mainly used for DisplayPort MST where
+ * connectors have a topology and we want to allow userspace to give
+ * them more meaningful names.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
int drm_mode_connector_set_path_property(struct drm_connector *connector,
- char *path)
+ const char *path)
{
struct drm_device *dev = connector->dev;
- int ret, size;
- size = strlen(path) + 1;
+ size_t size = strlen(path) + 1;
+ int ret;
connector->path_blob_ptr = drm_property_create_blob(connector->dev,
size, path);
@@ -4039,6 +4102,52 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_mode_connector_set_path_property);
/**
+ * drm_mode_connector_set_tile_property - set tile property on connector
+ * @connector: connector to set property on.
+ *
+ * This looks up the tile information for a connector, and creates a
+ * property for userspace to parse if it exists. The property is of
+ * the form of 8 integers using ':' as a separator.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+ char tile[256];
+
+ if (connector->tile_blob_ptr)
+ drm_property_destroy_blob(dev, connector->tile_blob_ptr);
+
+ if (!connector->has_tile) {
+ connector->tile_blob_ptr = NULL;
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.tile_property, 0);
+ return ret;
+ }
+
+ snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
+ connector->tile_group->id, connector->tile_is_single_monitor,
+ connector->num_h_tile, connector->num_v_tile,
+ connector->tile_h_loc, connector->tile_v_loc,
+ connector->tile_h_size, connector->tile_v_size);
+ size = strlen(tile) + 1;
+
+ connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
+ size, tile);
+ if (!connector->tile_blob_ptr)
+ return -EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.tile_property,
+ connector->tile_blob_ptr->base.id);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+
+/**
* drm_mode_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
@@ -4047,13 +4156,14 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property);
* connector's edid property.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- struct edid *edid)
+ const struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret, size;
+ size_t size;
+ int ret;
/* ignore requests to set edid when overridden */
if (connector->override_edid)
@@ -4143,7 +4253,7 @@ static bool drm_property_change_is_valid(struct drm_property *property,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4226,7 +4336,7 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
/**
- * drm_mode_getproperty_ioctl - get the current value of a object's property
+ * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
@@ -4238,7 +4348,7 @@ EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -4310,7 +4420,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -4382,7 +4492,7 @@ out:
* possible_clones and possible_crtcs bitmasks.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
@@ -4409,7 +4519,7 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
* fixed gamma table size.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
@@ -4438,7 +4548,7 @@ EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4510,7 +4620,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4576,7 +4686,7 @@ out:
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4599,7 +4709,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (!crtc)
return -ENOENT;
- drm_modeset_lock_crtc(crtc);
+ drm_modeset_lock_crtc(crtc, crtc->primary);
if (crtc->primary->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
@@ -4742,7 +4852,7 @@ EXPORT_SYMBOL(drm_mode_config_reset);
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4769,6 +4879,16 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
if (PAGE_ALIGN(size) == 0)
return -EINVAL;
+ /*
+ * handle, pitch and size are output parameters. Zero them out to
+ * prevent drivers from accidentally using uninitialized data. Since
+ * not all existing userspace is clearing these fields properly we
+ * cannot reject IOCTL with garbage in them.
+ */
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+
return dev->driver->dumb_create(file_priv, dev, args);
}
@@ -4784,7 +4904,7 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -4811,7 +4931,7 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
* Called by the user via ioctl.
*
* Returns:
- * Zero on success, errno on failure.
+ * Zero on success, negative errno on failure.
*/
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -5097,6 +5217,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
+ idr_init(&dev->mode_config.tile_idr);
drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
@@ -5184,6 +5305,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc);
}
+ idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
@@ -5206,3 +5328,100 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
supported_rotations);
}
EXPORT_SYMBOL(drm_mode_create_rotation_property);
+
+/**
+ * DOC: Tile group
+ *
+ * Tile groups are used to represent tiled monitors with a unique
+ * integer identifier. Tiled monitors using DisplayID v1.3 have
+ * a unique 8-byte handle, we store this in a tile group, so we
+ * have a common identifier for all tiles in a monitor group.
+ */
+static void drm_tile_group_free(struct kref *kref)
+{
+ struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
+ struct drm_device *dev = tg->dev;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.tile_idr, tg->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ kfree(tg);
+}
+
+/**
+ * drm_mode_put_tile_group - drop a reference to a tile group.
+ * @dev: DRM device
+ * @tg: tile group to drop reference to.
+ *
+ * drop reference to tile group and free if 0.
+ */
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg)
+{
+ kref_put(&tg->refcount, drm_tile_group_free);
+}
+
+/**
+ * drm_mode_get_tile_group - get a reference to an existing tile group
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Use the unique bytes to get a reference to an existing tile group.
+ *
+ * RETURNS:
+ * tile group or NULL if not found.
+ */
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int id;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
+ if (!memcmp(tg->group_data, topology, 8)) {
+ if (!kref_get_unless_zero(&tg->refcount))
+ tg = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+ }
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return NULL;
+}
+
+/**
+ * drm_mode_create_tile_group - create a tile group from a displayid description
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Create a tile group for the unique monitor, and get a unique
+ * identifier for the tile group.
+ *
+ * RETURNS:
+ * new tile group or error.
+ */
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int ret;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&tg->refcount);
+ memcpy(tg->group_data, topology, 8);
+ tg->dev = dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ tg->id = ret;
+ } else {
+ kfree(tg);
+ tg = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6c65a0a28fbd..d552708409de 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,12 +34,35 @@
#include <linux/moduleparam.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+/**
+ * DOC: overview
+ *
+ * The CRTC modeset helper library provides a default set_config implementation
+ * in drm_crtc_helper_set_config(). Plus a few other convenience functions using
+ * the same callbacks which drivers can use to e.g. restore the modeset
+ * configuration on resume with drm_helper_resume_force_mode().
+ *
+ * The driver callbacks are mostly compatible with the atomic modeset helpers,
+ * except for the handling of the primary plane: Atomic helpers require that the
+ * primary plane is implemented as a real standalone plane and not directly tied
+ * to the CRTC state. For easier transition this library provides functions to
+ * implement the old semantics required by the CRTC helpers using the new plane
+ * and atomic helper callbacks.
+ *
+ * Drivers are strongly urged to convert to the atomic helpers (by way of first
+ * converting to the plane helpers). New drivers must not use these functions
+ * but need to implement the atomic interface instead, potentially using the
+ * atomic helpers for that.
+ */
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
@@ -888,3 +911,112 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+/**
+ * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
+ * @crtc: DRM CRTC
+ * @mode: DRM display mode which userspace requested
+ * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
+ * @x: x offset of the CRTC scanout area on the underlying framebuffer
+ * @y: y offset of the CRTC scanout area on the underlying framebuffer
+ * @old_fb: previous framebuffer
+ *
+ * This function implements a callback useable as the ->mode_set callback
+ * required by the crtc helpers. Besides the atomic plane helper functions for
+ * the primary plane the driver must also provide the ->mode_set_nofb callback
+ * to set up the crtc.
+ *
+ * This is a transitional helper useful for converting drivers to the atomic
+ * interfaces.
+ */
+int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ int ret;
+
+ if (crtc->funcs->atomic_duplicate_state)
+ crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
+ else if (crtc->state)
+ crtc_state = kmemdup(crtc->state, sizeof(*crtc_state),
+ GFP_KERNEL);
+ else
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ if (!crtc_state)
+ return -ENOMEM;
+
+ crtc_state->enable = true;
+ crtc_state->planes_changed = true;
+ crtc_state->mode_changed = true;
+ drm_mode_copy(&crtc_state->mode, mode);
+ drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
+
+ if (crtc_funcs->atomic_check) {
+ ret = crtc_funcs->atomic_check(crtc, crtc_state);
+ if (ret) {
+ kfree(crtc_state);
+
+ return ret;
+ }
+ }
+
+ swap(crtc->state, crtc_state);
+
+ crtc_funcs->mode_set_nofb(crtc);
+
+ if (crtc_state) {
+ if (crtc->funcs->atomic_destroy_state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+ else
+ kfree(crtc_state);
+ }
+
+ return drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
+}
+EXPORT_SYMBOL(drm_helper_crtc_mode_set);
+
+/**
+ * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
+ * @crtc: DRM CRTC
+ * @x: x offset of the CRTC scanout area on the underlying framebuffer
+ * @y: y offset of the CRTC scanout area on the underlying framebuffer
+ * @old_fb: previous framebuffer
+ *
+ * This function implements a callback useable as the ->mode_set_base used
+ * required by the crtc helpers. The driver must provide the atomic plane helper
+ * functions for the primary plane.
+ *
+ * This is a transitional helper useful for converting drivers to the atomic
+ * interfaces.
+ */
+int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane = crtc->primary;
+
+ if (plane->funcs->atomic_duplicate_state)
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ else if (plane->state)
+ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+ else
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state)
+ return -ENOMEM;
+
+ plane_state->crtc = crtc;
+ drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
+ plane_state->crtc_x = 0;
+ plane_state->crtc_y = 0;
+ plane_state->crtc_h = crtc->mode.vdisplay;
+ plane_state->crtc_w = crtc->mode.hdisplay;
+ plane_state->src_x = x << 16;
+ plane_state->src_y = y << 16;
+ plane_state->src_h = crtc->mode.vdisplay << 16;
+ plane_state->src_w = crtc->mode.hdisplay << 16;
+
+ return drm_plane_helper_commit(plane, plane_state, old_fb);
+}
+EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 08e33b8b13a4..79968e39c8d0 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -39,198 +39,6 @@
* blocks, ...
*/
-/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-static int
-i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
- uint8_t write_byte, uint8_t *read_byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- ret = (*algo_data->aux_ch)(adapter, mode,
- write_byte, read_byte);
- return ret;
-}
-
-/*
- * I2C over AUX CH
- */
-
-/*
- * Send the address. If the I2C link is running, this 'restarts'
- * the connection with the new address, this is used for doing
- * a write followed by a read (as needed for DDC)
- */
-static int
-i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int mode = MODE_I2C_START;
- int ret;
-
- if (reading)
- mode |= MODE_I2C_READ;
- else
- mode |= MODE_I2C_WRITE;
- algo_data->address = address;
- algo_data->running = true;
- ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- return ret;
-}
-
-/*
- * Stop the I2C transaction. This closes out the link, sending
- * a bare address packet with the MOT bit turned off
- */
-static void
-i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int mode = MODE_I2C_STOP;
-
- if (reading)
- mode |= MODE_I2C_READ;
- else
- mode |= MODE_I2C_WRITE;
- if (algo_data->running) {
- (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
- algo_data->running = false;
- }
-}
-
-/*
- * Write a single byte to the current I2C address, the
- * the I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- if (!algo_data->running)
- return -EIO;
-
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
- return ret;
-}
-
-/*
- * Read a single byte from the current I2C address, the
- * I2C link must be running or this returns -EIO
- */
-static int
-i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
-{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- int ret;
-
- if (!algo_data->running)
- return -EIO;
-
- ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
- return ret;
-}
-
-static int
-i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs,
- int num)
-{
- int ret = 0;
- bool reading = false;
- int m;
- int b;
-
- for (m = 0; m < num; m++) {
- u16 len = msgs[m].len;
- u8 *buf = msgs[m].buf;
- reading = (msgs[m].flags & I2C_M_RD) != 0;
- ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
- if (ret < 0)
- break;
- if (reading) {
- for (b = 0; b < len; b++) {
- ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
- if (ret < 0)
- break;
- }
- } else {
- for (b = 0; b < len; b++) {
- ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
- if (ret < 0)
- break;
- }
- }
- if (ret < 0)
- break;
- }
- if (ret >= 0)
- ret = num;
- i2c_algo_dp_aux_stop(adapter, reading);
- DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
- return ret;
-}
-
-static u32
-i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA |
- I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
- I2C_FUNC_10BIT_ADDR;
-}
-
-static const struct i2c_algorithm i2c_dp_aux_algo = {
- .master_xfer = i2c_algo_dp_aux_xfer,
- .functionality = i2c_algo_dp_aux_functionality,
-};
-
-static void
-i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
-{
- (void) i2c_algo_dp_aux_address(adapter, 0, false);
- (void) i2c_algo_dp_aux_stop(adapter, false);
-}
-
-static int
-i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
-{
- adapter->algo = &i2c_dp_aux_algo;
- adapter->retries = 3;
- i2c_dp_aux_reset_bus(adapter);
- return 0;
-}
-
-/**
- * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
- * @adapter: i2c adapter to register
- *
- * This registers an i2c adapter that uses dp aux channel as it's underlaying
- * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
- * and store it in the algo_data member of the @adapter argument. This will be
- * used by the i2c over dp aux algorithm to drive the hardware.
- *
- * RETURNS:
- * 0 on success, -ERRNO on failure.
- *
- * IMPORTANT:
- * This interface is deprecated, please switch to the new dp aux helpers and
- * drm_dp_aux_register().
- */
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
-{
- int error;
-
- error = i2c_dp_aux_prepare_bus(adapter);
- if (error)
- return error;
- error = i2c_add_adapter(adapter);
- return error;
-}
-EXPORT_SYMBOL(i2c_dp_aux_add_bus);
-
/* Helpers for DP link training */
static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
@@ -378,10 +186,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
/*
* The specification doesn't give any recommendation on how often to
- * retry native transactions, so retry 7 times like for I2C-over-AUX
- * transactions.
+ * retry native transactions. We used to retry 7 times like for
+ * aux i2c transactions but real world devices this wasn't
+ * sufficient, bump to 32 which makes Dell 4k monitors happier.
*/
- for (retry = 0; retry < 7; retry++) {
+ for (retry = 0; retry < 32; retry++) {
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
@@ -654,10 +463,12 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("I2C nack\n");
+ aux->i2c_nack_count++;
return -EREMOTEIO;
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("I2C defer\n");
+ aux->i2c_defer_count++;
usleep_range(400, 500);
continue;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 070f913d2dba..9a5b68717ec8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
{
+ struct drm_dp_mst_branch *mstb;
+
switch (old_pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
@@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
drm_dp_mst_unregister_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING:
- drm_dp_put_mst_branch_device(port->mstb);
+ mstb = port->mstb;
port->mstb = NULL;
+ drm_dp_put_mst_branch_device(mstb);
break;
}
}
@@ -858,6 +861,8 @@ static void drm_dp_destroy_port(struct kref *kref)
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) {
port->vcpi.num_slots = 0;
+
+ kfree(port->cached_edid);
if (port->connector)
(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1011,19 +1016,20 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
static void build_mst_prop_path(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *mstb,
- char *proppath)
+ char *proppath,
+ size_t proppath_size)
{
int i;
char temp[8];
- snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
+ snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = mstb->rad[i / 2] >> shift;
- snprintf(temp, 8, "-%d", port_num);
- strncat(proppath, temp, 255);
+ snprintf(temp, sizeof(temp), "-%d", port_num);
+ strlcat(proppath, temp, proppath_size);
}
- snprintf(temp, 8, "-%d", port->port_num);
- strncat(proppath, temp, 255);
+ snprintf(temp, sizeof(temp), "-%d", port->port_num);
+ strlcat(proppath, temp, proppath_size);
}
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
@@ -1094,8 +1100,12 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (created && !port->input) {
char proppath[255];
- build_mst_prop_path(port, mstb, proppath);
+ build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+
+ if (port->port_num >= 8) {
+ port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ }
}
/* put reference to this port */
@@ -1798,17 +1808,27 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
+static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
+ int dp_link_count,
+ int *out)
{
switch (dp_link_bw) {
+ default:
+ DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
+ dp_link_bw, dp_link_count);
+ return false;
+
case DP_LINK_BW_1_62:
- return 3 * dp_link_count;
+ *out = 3 * dp_link_count;
+ break;
case DP_LINK_BW_2_7:
- return 5 * dp_link_count;
+ *out = 5 * dp_link_count;
+ break;
case DP_LINK_BW_5_4:
- return 10 * dp_link_count;
+ *out = 10 * dp_link_count;
+ break;
}
- BUG();
+ return true;
}
/**
@@ -1840,7 +1860,13 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
+ &mgr->pbn_div)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
mgr->total_pbn = 2560;
mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
mgr->avail_slots = mgr->total_slots;
@@ -2150,7 +2176,8 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
* This returns the current connection state for a port. It validates the
* port pointer still exists so the caller doesn't require a reference
*/
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
enum drm_connector_status status = connector_status_disconnected;
@@ -2169,6 +2196,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr
case DP_PEER_DEVICE_SST_SINK:
status = connector_status_connected;
+ /* for logical ports - cache the EDID */
+ if (port->port_num >= 8 && !port->cached_edid) {
+ port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
+ }
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
@@ -2200,7 +2231,12 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
if (!port)
return NULL;
- edid = drm_get_edid(connector, &port->aux.ddc);
+ if (port->cached_edid)
+ edid = drm_edid_duplicate(port->cached_edid);
+ else
+ edid = drm_get_edid(connector, &port->aux.ddc);
+
+ drm_mode_connector_set_tile_property(connector);
drm_dp_put_port(port);
return edid;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bc3da32d4585..4f41377b0b80 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -56,7 +56,7 @@ static struct idr drm_minors_idr;
struct class *drm_class;
static struct dentry *drm_debugfs_root;
-void drm_err(const char *func, const char *format, ...)
+void drm_err(const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -66,7 +66,8 @@ void drm_err(const char *func, const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+ printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
+ __builtin_return_address(0), &vaf);
va_end(args);
}
@@ -534,6 +535,8 @@ static void drm_fs_inode_free(struct inode *inode)
* The initial ref-count of the object is 1. Use drm_dev_ref() and
* drm_dev_unref() to take and drop further ref-counts.
*
+ * Note that for purely virtual devices @parent can be NULL.
+ *
* RETURNS:
* Pointer to new DRM device, or NULL if out of memory.
*/
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3bf999134bcc..53bc7a628909 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
+#include <drm/drm_displayid.h>
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
@@ -1014,6 +1015,27 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
+static void drm_get_displayid(struct drm_connector *connector,
+ struct edid *edid);
+
+static int drm_edid_block_checksum(const u8 *raw_edid)
+{
+ int i;
+ u8 csum = 0;
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+
+ return csum;
+}
+
+static bool drm_edid_is_zero(const u8 *in_edid, int length)
+{
+ if (memchr_inv(in_edid, 0, length))
+ return false;
+
+ return true;
+}
+
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @raw_edid: pointer to raw EDID block
@@ -1027,8 +1049,7 @@ MODULE_PARM_DESC(edid_fixup,
*/
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
- int i;
- u8 csum = 0;
+ u8 csum;
struct edid *edid = (struct edid *)raw_edid;
if (WARN_ON(!raw_edid))
@@ -1048,8 +1069,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
}
}
- for (i = 0; i < EDID_LENGTH; i++)
- csum += raw_edid[i];
+ csum = drm_edid_block_checksum(raw_edid);
if (csum) {
if (print_bad_edid) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
@@ -1080,9 +1100,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
bad:
if (print_bad_edid) {
- printk(KERN_ERR "Raw EDID:\n");
- print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+ if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) {
+ printk(KERN_ERR "EDID block is all zeroes\n");
+ } else {
+ printk(KERN_ERR "Raw EDID:\n");
+ print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
+ }
}
return false;
}
@@ -1115,7 +1139,7 @@ EXPORT_SYMBOL(drm_edid_is_valid);
#define DDC_SEGMENT_ADDR 0x30
/**
* drm_do_probe_ddc_edid() - get EDID information via I2C
- * @adapter: I2C device adaptor
+ * @data: I2C device adapter
* @buf: EDID data buffer to be filled
* @block: 128 byte EDID block to start fetching from
* @len: EDID data buffer length to fetch
@@ -1125,9 +1149,9 @@ EXPORT_SYMBOL(drm_edid_is_valid);
* Return: 0 on success or -1 on failure.
*/
static int
-drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
- int block, int len)
+drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct i2c_adapter *adapter = data;
unsigned char start = block * EDID_LENGTH;
unsigned char segment = block >> 1;
unsigned char xfers = segment ? 3 : 2;
@@ -1176,16 +1200,26 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
return ret == xfers ? 0 : -1;
}
-static bool drm_edid_is_zero(u8 *in_edid, int length)
-{
- if (memchr_inv(in_edid, 0, length))
- return false;
-
- return true;
-}
-
-static u8 *
-drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+/**
+ * drm_do_get_edid - get EDID data using a custom EDID block read function
+ * @connector: connector we're probing
+ * @get_edid_block: EDID block read function
+ * @data: private data passed to the block read function
+ *
+ * When the I2C adapter connected to the DDC bus is hidden behind a device that
+ * exposes a different interface to read EDID blocks this function can be used
+ * to get EDID data using a custom block read function.
+ *
+ * As in the general case the DDC bus is accessible by the kernel at the I2C
+ * level, drivers must make all reasonable efforts to expose it as an I2C
+ * adapter and use drm_get_edid() instead of abusing this function.
+ *
+ * Return: Pointer to valid EDID or NULL if we couldn't find any.
+ */
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
@@ -1196,7 +1230,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
/* base block fetch */
for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ if (get_edid_block(data, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
@@ -1210,7 +1244,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
- return block;
+ return (struct edid *)block;
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
@@ -1219,7 +1253,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter,
+ if (get_edid_block(data,
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
@@ -1247,7 +1281,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
block = new;
}
- return block;
+ return (struct edid *)block;
carp:
if (print_bad_edid) {
@@ -1260,6 +1294,7 @@ out:
kfree(block);
return NULL;
}
+EXPORT_SYMBOL_GPL(drm_do_get_edid);
/**
* drm_probe_ddc() - probe DDC presence
@@ -1289,11 +1324,14 @@ EXPORT_SYMBOL(drm_probe_ddc);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
- struct edid *edid = NULL;
+ struct edid *edid;
- if (drm_probe_ddc(adapter))
- edid = (struct edid *)drm_do_get_edid(connector, adapter);
+ if (!drm_probe_ddc(adapter))
+ return NULL;
+ edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
+ if (edid)
+ drm_get_displayid(connector, edid);
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
@@ -2389,7 +2427,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/*
* Search EDID for CEA extension block.
*/
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
{
u8 *edid_ext = NULL;
int i;
@@ -2401,7 +2439,7 @@ static u8 *drm_find_cea_extension(struct edid *edid)
/* Find CEA extension */
for (i = 0; i < edid->extensions; i++) {
edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
- if (edid_ext[0] == CEA_EXT)
+ if (edid_ext[0] == ext_id)
break;
}
@@ -2411,6 +2449,16 @@ static u8 *drm_find_cea_extension(struct edid *edid)
return edid_ext;
}
+static u8 *drm_find_cea_extension(struct edid *edid)
+{
+ return drm_find_edid_extension(edid, CEA_EXT);
+}
+
+static u8 *drm_find_displayid_extension(struct edid *edid)
+{
+ return drm_find_edid_extension(edid, DISPLAYID_EXT);
+}
+
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
@@ -3128,9 +3176,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
}
eld[5] |= sad_count << 4;
- eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
- DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+ eld[DRM_ELD_BASELINE_ELD_LEN] =
+ DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
+
+ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
+ drm_eld_size(eld), sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
@@ -3868,3 +3919,123 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
return 0;
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
+
+static int drm_parse_display_id(struct drm_connector *connector,
+ u8 *displayid, int length,
+ bool is_edid_extension)
+{
+ /* if this is an EDID extension the first byte will be 0x70 */
+ int idx = 0;
+ struct displayid_hdr *base;
+ struct displayid_block *block;
+ u8 csum = 0;
+ int i;
+
+ if (is_edid_extension)
+ idx = 1;
+
+ base = (struct displayid_hdr *)&displayid[idx];
+
+ DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+ base->rev, base->bytes, base->prod_id, base->ext_count);
+
+ if (base->bytes + 5 > length - idx)
+ return -EINVAL;
+
+ for (i = idx; i <= base->bytes + 5; i++) {
+ csum += displayid[i];
+ }
+ if (csum) {
+ DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
+ return -EINVAL;
+ }
+
+ block = (struct displayid_block *)&displayid[idx + 4];
+ DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
+ block->tag, block->rev, block->num_bytes);
+
+ switch (block->tag) {
+ case DATA_BLOCK_TILED_DISPLAY: {
+ struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+
+ u16 w, h;
+ u8 tile_v_loc, tile_h_loc;
+ u8 num_v_tile, num_h_tile;
+ struct drm_tile_group *tg;
+
+ w = tile->tile_size[0] | tile->tile_size[1] << 8;
+ h = tile->tile_size[2] | tile->tile_size[3] << 8;
+
+ num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
+ num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
+ tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
+ tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
+
+ connector->has_tile = true;
+ if (tile->tile_cap & 0x80)
+ connector->tile_is_single_monitor = true;
+
+ connector->num_h_tile = num_h_tile + 1;
+ connector->num_v_tile = num_v_tile + 1;
+ connector->tile_h_loc = tile_h_loc;
+ connector->tile_v_loc = tile_v_loc;
+ connector->tile_h_size = w + 1;
+ connector->tile_v_size = h + 1;
+
+ DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
+ DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
+ DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
+ num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
+ DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
+
+ tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
+ if (!tg) {
+ tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+ }
+ if (!tg)
+ return -ENOMEM;
+
+ if (connector->tile_group != tg) {
+ /* if we haven't got a pointer,
+ take the reference, drop ref to old tile group */
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(connector->dev, connector->tile_group);
+ }
+ connector->tile_group = tg;
+ } else
+ /* if same tile group, then release the ref we just took. */
+ drm_mode_put_tile_group(connector->dev, tg);
+ }
+ break;
+ default:
+ printk("unknown displayid tag %d\n", block->tag);
+ break;
+ }
+ return 0;
+}
+
+static void drm_get_displayid(struct drm_connector *connector,
+ struct edid *edid)
+{
+ void *displayid = NULL;
+ int ret;
+ connector->has_tile = false;
+ displayid = drm_find_displayid_extension(edid);
+ if (!displayid) {
+ /* drop reference to any tile group we had */
+ goto out_drop_ref;
+ }
+
+ ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+ if (ret < 0)
+ goto out_drop_ref;
+ if (!connector->has_tile)
+ goto out_drop_ref;
+ return;
+out_drop_ref:
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(connector->dev, connector->tile_group);
+ connector->tile_group = NULL;
+ }
+ return;
+}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 0a235fe61c9b..732cb6f8e653 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -254,8 +254,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
name, connector_name);
out:
- if (fw)
- release_firmware(fw);
+ release_firmware(fw);
return edid;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0c0c39bac23d..52ce26d6b4fb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
bool ret;
+ bool do_delayed = false;
+
drm_modeset_lock_all(dev);
ret = restore_fbdev_mode(fb_helper);
+
+ do_delayed = fb_helper->delayed_hotplug;
+ if (do_delayed)
+ fb_helper->delayed_hotplug = false;
drm_modeset_unlock_all(dev);
+
+ if (do_delayed)
+ drm_fb_helper_hotplug_event(fb_helper);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
@@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (fb_helper->delayed_hotplug) {
- fb_helper->delayed_hotplug = false;
- drm_fb_helper_hotplug_event(fb_helper);
- }
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -995,19 +1000,21 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
+ int x, y;
desired_mode = fb_helper->crtc_info[i].desired_mode;
-
+ x = fb_helper->crtc_info[i].x;
+ y = fb_helper->crtc_info[i].y;
if (desired_mode) {
if (gamma_size == 0)
gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
- if (desired_mode->hdisplay < sizes.fb_width)
- sizes.fb_width = desired_mode->hdisplay;
- if (desired_mode->vdisplay < sizes.fb_height)
- sizes.fb_height = desired_mode->vdisplay;
- if (desired_mode->hdisplay > sizes.surface_width)
- sizes.surface_width = desired_mode->hdisplay;
- if (desired_mode->vdisplay > sizes.surface_height)
- sizes.surface_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay + x < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay + x;
+ if (desired_mode->vdisplay + y < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay + y;
+ if (desired_mode->hdisplay + x > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay + x;
+ if (desired_mode->vdisplay + y > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay + y;
crtc_count++;
}
}
@@ -1307,6 +1314,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
int count, i, j;
@@ -1378,27 +1386,88 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
return false;
}
+static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
+ int idx,
+ int h_idx, int v_idx)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+ int hoffset = 0, voffset = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+ if (!fb_helper_conn->connector->has_tile)
+ continue;
+
+ if (!modes[i] && (h_idx || v_idx)) {
+ DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
+ fb_helper_conn->connector->base.id);
+ continue;
+ }
+ if (fb_helper_conn->connector->tile_h_loc < h_idx)
+ hoffset += modes[i]->hdisplay;
+
+ if (fb_helper_conn->connector->tile_v_loc < v_idx)
+ voffset += modes[i]->vdisplay;
+ }
+ offsets[idx].x = hoffset;
+ offsets[idx].y = voffset;
+ DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
+ return 0;
+}
+
static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
-
+ uint64_t conn_configured = 0, mask;
+ int tile_pass = 0;
+ mask = (1 << fb_helper->connector_count) - 1;
+retry:
for (i = 0; i < fb_helper->connector_count; i++) {
fb_helper_conn = fb_helper->connector_info[i];
- if (enabled[i] == false)
+ if (conn_configured & (1 << i))
continue;
+ if (enabled[i] == false) {
+ conn_configured |= (1 << i);
+ continue;
+ }
+
+ /* first pass over all the untiled connectors */
+ if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
+ continue;
+
+ if (tile_pass == 1) {
+ if (fb_helper_conn->connector->tile_h_loc != 0 ||
+ fb_helper_conn->connector->tile_v_loc != 0)
+ continue;
+
+ } else {
+ if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
+ fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
+ /* if this tile_pass doesn't cover any of the tiles - keep going */
+ continue;
+
+ /* find the tile offsets for this pass - need
+ to find all tiles left and above */
+ drm_get_tile_offsets(fb_helper, modes, offsets,
+ i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
+ }
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
fb_helper_conn->connector->base.id);
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- fb_helper_conn->connector->base.id);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
+ fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
}
/* No preferred modes, pick one off the list */
@@ -1408,6 +1477,12 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
+ conn_configured |= (1 << i);
+ }
+
+ if ((conn_configured & mask) != mask) {
+ tile_pass++;
+ goto retry;
}
return true;
}
@@ -1497,6 +1572,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
+ struct drm_fb_offset *offsets;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
@@ -1511,9 +1587,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
+ offsets = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_offset), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
- if (!crtcs || !modes || !enabled) {
+ if (!crtcs || !modes || !enabled || !offsets) {
DRM_ERROR("Memory allocation failed\n");
goto out;
}
@@ -1523,14 +1601,16 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
if (!(fb_helper->funcs->initial_config &&
fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+ offsets,
enabled, width, height))) {
memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+ memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
- if (!drm_target_cloned(fb_helper,
- modes, enabled, width, height) &&
- !drm_target_preferred(fb_helper,
- modes, enabled, width, height))
+ if (!drm_target_cloned(fb_helper, modes, offsets,
+ enabled, width, height) &&
+ !drm_target_preferred(fb_helper, modes, offsets,
+ enabled, width, height))
DRM_ERROR("Unable to find initial modes\n");
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
@@ -1550,18 +1630,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ struct drm_fb_offset *offset = &offsets[i];
modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, fb_crtc->mode_set.crtc->base.id);
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
fb_crtc->desired_mode = mode;
+ fb_crtc->x = offset->x;
+ fb_crtc->y = offset->y;
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
modeset->fb = fb_helper->fb;
+ modeset->x = offset->x;
+ modeset->y = offset->y;
}
}
@@ -1570,7 +1655,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
modeset = &fb_helper->crtc_info[i].mode_set;
if (modeset->num_connectors == 0) {
BUG_ON(modeset->fb);
- BUG_ON(modeset->num_connectors);
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = NULL;
@@ -1579,6 +1663,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
out:
kfree(crtcs);
kfree(modes);
+ kfree(offsets);
kfree(enabled);
}
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index f9c7fa3d0012..43d9b950ef9f 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -25,6 +25,44 @@
#include "drm_flip_work.h"
/**
+ * drm_flip_work_allocate_task - allocate a flip-work task
+ * @data: data associated to the task
+ * @flags: allocator flags
+ *
+ * Allocate a drm_flip_task object and attach private data to it.
+ */
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
+{
+ struct drm_flip_task *task;
+
+ task = kzalloc(sizeof(*task), flags);
+ if (task)
+ task->data = data;
+
+ return task;
+}
+EXPORT_SYMBOL(drm_flip_work_allocate_task);
+
+/**
+ * drm_flip_work_queue_task - queue a specific task
+ * @work: the flip-work
+ * @task: the task to handle
+ *
+ * Queues task, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+ struct drm_flip_task *task)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&work->lock, flags);
+ list_add_tail(&task->node, &work->queued);
+ spin_unlock_irqrestore(&work->lock, flags);
+}
+EXPORT_SYMBOL(drm_flip_work_queue_task);
+
+/**
* drm_flip_work_queue - queue work
* @work: the flip-work
* @val: the value to queue
@@ -34,10 +72,14 @@
*/
void drm_flip_work_queue(struct drm_flip_work *work, void *val)
{
- if (kfifo_put(&work->fifo, val)) {
- atomic_inc(&work->pending);
+ struct drm_flip_task *task;
+
+ task = drm_flip_work_allocate_task(val,
+ drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
+ if (task) {
+ drm_flip_work_queue_task(work, task);
} else {
- DRM_ERROR("%s fifo full!\n", work->name);
+ DRM_ERROR("%s could not allocate task!\n", work->name);
work->func(work, val);
}
}
@@ -56,9 +98,12 @@ EXPORT_SYMBOL(drm_flip_work_queue);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq)
{
- uint32_t pending = atomic_read(&work->pending);
- atomic_add(pending, &work->count);
- atomic_sub(pending, &work->pending);
+ unsigned long flags;
+
+ spin_lock_irqsave(&work->lock, flags);
+ list_splice_tail(&work->queued, &work->commited);
+ INIT_LIST_HEAD(&work->queued);
+ spin_unlock_irqrestore(&work->lock, flags);
queue_work(wq, &work->worker);
}
EXPORT_SYMBOL(drm_flip_work_commit);
@@ -66,47 +111,46 @@ EXPORT_SYMBOL(drm_flip_work_commit);
static void flip_worker(struct work_struct *w)
{
struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
- uint32_t count = atomic_read(&work->count);
- void *val = NULL;
+ struct list_head tasks;
+ unsigned long flags;
+
+ while (1) {
+ struct drm_flip_task *task, *tmp;
+
+ INIT_LIST_HEAD(&tasks);
+ spin_lock_irqsave(&work->lock, flags);
+ list_splice_tail(&work->commited, &tasks);
+ INIT_LIST_HEAD(&work->commited);
+ spin_unlock_irqrestore(&work->lock, flags);
- atomic_sub(count, &work->count);
+ if (list_empty(&tasks))
+ break;
- while(count--)
- if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
- work->func(work, val);
+ list_for_each_entry_safe(task, tmp, &tasks, node) {
+ work->func(work, task->data);
+ kfree(task);
+ }
+ }
}
/**
* drm_flip_work_init - initialize flip-work
* @work: the flip-work to initialize
- * @size: the max queue depth
* @name: debug name
* @func: the callback work function
*
* Initializes/allocates resources for the flip-work
- *
- * RETURNS:
- * Zero on success, error code on failure.
*/
-int drm_flip_work_init(struct drm_flip_work *work, int size,
+void drm_flip_work_init(struct drm_flip_work *work,
const char *name, drm_flip_func_t func)
{
- int ret;
-
work->name = name;
- atomic_set(&work->count, 0);
- atomic_set(&work->pending, 0);
+ INIT_LIST_HEAD(&work->queued);
+ INIT_LIST_HEAD(&work->commited);
+ spin_lock_init(&work->lock);
work->func = func;
- ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
- if (ret) {
- DRM_ERROR("could not allocate %s fifo\n", name);
- return ret;
- }
-
INIT_WORK(&work->worker, flip_worker);
-
- return 0;
}
EXPORT_SYMBOL(drm_flip_work_init);
@@ -118,7 +162,6 @@ EXPORT_SYMBOL(drm_flip_work_init);
*/
void drm_flip_work_cleanup(struct drm_flip_work *work)
{
- WARN_ON(!kfifo_is_empty(&work->fifo));
- kfifo_free(&work->fifo);
+ WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
}
EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index ed7bc68f7e87..0b9514b6cd64 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -515,16 +515,19 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
size_t total;
ssize_t ret;
- ret = wait_event_interruptible(file_priv->event_wait,
- !list_empty(&file_priv->event_list));
- if (ret < 0)
- return ret;
+ if ((filp->f_flags & O_NONBLOCK) == 0) {
+ ret = wait_event_interruptible(file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ if (ret < 0)
+ return ret;
+ }
total = 0;
while (drm_dequeue_event(file_priv, total, count, &e)) {
if (copy_to_user(buffer + total,
e->event, e->event->length)) {
total = -EFAULT;
+ e->destroy(e);
break;
}
@@ -532,7 +535,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
e->destroy(e);
}
- return total;
+ return total ?: -EAGAIN;
}
EXPORT_SYMBOL(drm_read);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f6ca51259fa3..16a164770713 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -188,7 +188,7 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
}
/**
- * drm_gem_object_free - release resources bound to userspace handles
+ * drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
* Called after the last handle to the object has been closed
@@ -309,7 +309,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
*
* This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when
@@ -362,7 +362,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
}
/**
- * gem_handle_create - create a gem handle for an object
+ * drm_gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pionter to return the created handle to the caller
@@ -371,10 +371,9 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
-int
-drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep)
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
{
mutex_lock(&obj->dev->object_name_lock);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 0316310e2cc4..e419eedf751d 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -29,18 +29,31 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_vma_manager.h>
-/*
+/**
+ * DOC: cma helpers
+ *
+ * The Contiguous Memory Allocator reserves a pool of memory at early boot
+ * that is used to service requests for large blocks of contiguous memory.
+ *
+ * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
+ * objects that are physically contiguous in memory. This is useful for
+ * display drivers that are unable to map scattered buffers via an IOMMU.
+ */
+
+/**
* __drm_gem_cma_create - Create a GEM CMA object without allocating memory
- * @drm: The drm device
- * @size: The GEM object size
+ * @drm: DRM device
+ * @size: size of the object to allocate
*
- * This function creates and initializes a GEM CMA object of the given size, but
- * doesn't allocate any memory to back the object.
+ * This function creates and initializes a GEM CMA object of the given size,
+ * but doesn't allocate any memory to back the object.
*
- * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
*/
static struct drm_gem_cma_object *
-__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
+__drm_gem_cma_create(struct drm_device *drm, size_t size)
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *gem_obj;
@@ -69,14 +82,21 @@ error:
return ERR_PTR(ret);
}
-/*
+/**
* drm_gem_cma_create - allocate an object with the given size
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ *
+ * This function creates a CMA GEM object and allocates a contiguous chunk of
+ * memory as backing store. The backing memory has the writecombine attribute
+ * set.
*
- * returns a struct drm_gem_cma_object* on success or ERR_PTR values
- * on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
*/
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- unsigned int size)
+ size_t size)
{
struct drm_gem_cma_object *cma_obj;
int ret;
@@ -104,17 +124,26 @@ error:
}
EXPORT_SYMBOL_GPL(drm_gem_cma_create);
-/*
- * drm_gem_cma_create_with_handle - allocate an object with the given
- * size and create a gem handle on it
+/**
+ * drm_gem_cma_create_with_handle - allocate an object with the given size and
+ * return a GEM handle to it
+ * @file_priv: DRM file-private structure to register the handle for
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ * @handle: return location for the GEM handle
+ *
+ * This function creates a CMA GEM object, allocating a physically contiguous
+ * chunk of memory as backing store. The GEM object is then added to the list
+ * of object associated with the given file and a handle to it is returned.
*
- * returns a struct drm_gem_cma_object* on success or ERR_PTR values
- * on failure.
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
*/
-static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
- struct drm_file *file_priv,
- struct drm_device *drm, unsigned int size,
- unsigned int *handle)
+static struct drm_gem_cma_object *
+drm_gem_cma_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *drm, size_t size,
+ uint32_t *handle)
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *gem_obj;
@@ -145,16 +174,19 @@ err_handle_create:
return ERR_PTR(ret);
}
-/*
- * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
- * function
+/**
+ * drm_gem_cma_free_object - free resources associated with a CMA GEM object
+ * @gem_obj: GEM object to free
+ *
+ * This function frees the backing memory of the CMA GEM object, cleans up the
+ * GEM object state and frees the memory used to store the object itself.
+ * Drivers using the CMA helpers should set this as their DRM driver's
+ * ->gem_free_object() callback.
*/
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
struct drm_gem_cma_object *cma_obj;
- drm_gem_free_mmap_offset(gem_obj);
-
cma_obj = to_drm_gem_cma_obj(gem_obj);
if (cma_obj->vaddr) {
@@ -170,18 +202,26 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
-/*
- * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
- * function
+/**
+ * drm_gem_cma_dumb_create_internal - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This aligns the pitch and size arguments to the minimum required. This is
+ * an internal helper that can be wrapped by a driver to account for hardware
+ * with more specific alignment requirements. It should not be used directly
+ * as the ->dumb_create() callback in a DRM driver.
*
- * This aligns the pitch and size arguments to the minimum required. wrap
- * this into your own function if you need bigger alignment.
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev, struct drm_mode_create_dumb *args)
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
{
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct drm_gem_cma_object *cma_obj;
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
if (args->pitch < min_pitch)
args->pitch = min_pitch;
@@ -189,18 +229,63 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
- args->size, &args->handle);
+ cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+ &args->handle);
+ return PTR_ERR_OR_ZERO(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
+
+/**
+ * drm_gem_cma_dumb_create - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their ->dumb_create() callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace and pass the IOCTL data along to the
+ * drm_gem_cma_dumb_create_internal() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ args->size = args->pitch * args->height;
+
+ cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+ &args->handle);
return PTR_ERR_OR_ZERO(cma_obj);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
-/*
- * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
- * function
+/**
+ * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
+ * object
+ * @file_priv: DRM file-private structure containing the GEM object
+ * @drm: DRM device
+ * @handle: GEM object handle
+ * @offset: return location for the fake mmap offset
+ *
+ * This function look up an object by its handle and returns the fake mmap
+ * offset associated with it. Drivers using the CMA helpers should set this
+ * as their DRM driver's ->dumb_map_offset() callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, uint32_t handle, uint64_t *offset)
+ struct drm_device *drm, u32 handle,
+ u64 *offset)
{
struct drm_gem_object *gem_obj;
@@ -208,7 +293,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
if (!gem_obj) {
- dev_err(drm->dev, "failed to lookup gem object\n");
+ dev_err(drm->dev, "failed to lookup GEM object\n");
mutex_unlock(&drm->struct_mutex);
return -EINVAL;
}
@@ -251,8 +336,20 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
return ret;
}
-/*
- * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+/**
+ * drm_gem_cma_mmap - memory-map a CMA GEM object
+ * @filp: file object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for CMA objects: In addition to the usual GEM VMA setup it
+ * immediately faults in the entire object instead of using on-demaind
+ * faulting. Drivers which employ the CMA helpers should use this function
+ * as their ->mmap() handler in the DRM device file's file_operations
+ * structure.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
*/
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
{
@@ -272,7 +369,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
#ifdef CONFIG_DEBUG_FS
-void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
+/**
+ * drm_gem_cma_describe - describe a CMA GEM object for debugfs
+ * @cma_obj: CMA GEM object
+ * @m: debugfs file handle
+ *
+ * This function can be used to dump a human-readable representation of the
+ * CMA GEM object into a synthetic file.
+ */
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
+ struct seq_file *m)
{
struct drm_gem_object *obj = &cma_obj->base;
struct drm_device *dev = obj->dev;
@@ -291,7 +397,18 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
#endif
-/* low-level interface prime helpers */
+/**
+ * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
+ * pages for a CMA GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API. Drivers using the CMA helpers should
+ * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -315,6 +432,23 @@ out:
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+/**
+ * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
+ * driver's scatter/gather table of pinned pages
+ * @dev: device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Imported buffers must be physically contiguous in memory
+ * (i.e. the scatter/gather table must contain a single entry). Drivers that
+ * use the CMA helpers should set this as their DRM driver's
+ * ->gem_prime_import_sg_table() callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
struct drm_gem_object *
drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -339,6 +473,18 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
+/**
+ * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer imported via DRM PRIME into a userspace
+ * process's address space. Drivers that use the CMA helpers should set this
+ * as their DRM driver's ->gem_prime_mmap() callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
@@ -357,6 +503,20 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
+/**
+ * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
+ * address space
+ * @obj: GEM object
+ *
+ * This function maps a buffer exported via DRM PRIME into the kernel's
+ * virtual address space. Since the CMA buffers are already mapped into the
+ * kernel virtual address space this simply returns the cached virtual
+ * address. Drivers using the CMA helpers should set this as their DRM
+ * driver's ->gem_prime_vmap() callback.
+ *
+ * Returns:
+ * The kernel virtual address of the CMA GEM object's backing store.
+ */
void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -365,6 +525,17 @@ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+/**
+ * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
+ * address space
+ * @obj: GEM object
+ * @vaddr: kernel virtual address where the CMA GEM object was mapped
+ *
+ * This function removes a buffer exported via DRM PRIME from the kernel's
+ * virtual address space. This is a no-op because CMA buffers cannot be
+ * unmapped from kernel space. Drivers using the CMA helpers should set this
+ * as their DRM driver's ->gem_prime_vunmap() callback.
+ */
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
/* Nothing to do */
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 5ef03c216a27..f5a5f18efa5b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -166,7 +166,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/*
- * If the vblank interrupt was already disbled update the count
+ * If the vblank interrupt was already disabled update the count
* and timestamp to maintain the appearance that the counter
* has been ticking all along until this time. This makes the
* count account for the entire time between drm_vblank_on() and
@@ -1029,7 +1029,8 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
- BUG_ON(atomic_read(&vblank->refcount) == 0);
+ if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+ return;
if (WARN_ON(crtc >= dev->num_crtcs))
return;
@@ -1190,7 +1191,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * drm_vblank_off() can be unbalanced and so can also be unconditionally called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the legacy version of drm_crtc_vblank_on().
@@ -1237,7 +1238,7 @@ EXPORT_SYMBOL(drm_vblank_on);
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
- * drm_vblank_off() can be unbalanced and so can also be unconditionaly called
+ * drm_vblank_off() can be unbalanced and so can also be unconditionally called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the native kms version of drm_vblank_on().
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index eb6dfe52cab2..c0644bb865f2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -35,6 +35,16 @@
#include <video/mipi_display.h>
+/**
+ * DOC: dsi helpers
+ *
+ * These functions contain some common logic and helpers to deal with MIPI DSI
+ * peripherals.
+ *
+ * Helpers are provided for a number of standard MIPI DSI command as well as a
+ * subset of the MIPI DCS command set.
+ */
+
static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
{
return of_driver_match_device(dev, drv);
@@ -57,6 +67,29 @@ static struct bus_type mipi_dsi_bus_type = {
.pm = &mipi_dsi_device_pm_ops,
};
+static int of_device_match(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
+ * device tree node
+ * @np: device tree node
+ *
+ * Return: A pointer to the MIPI DSI device corresponding to @np or NULL if no
+ * such device exists (or has not been registered yet).
+ */
+struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match);
+
+ return dev ? to_mipi_dsi_device(dev) : NULL;
+}
+EXPORT_SYMBOL(of_find_mipi_dsi_device_by_node);
+
static void mipi_dsi_dev_release(struct device *dev)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
@@ -198,59 +231,351 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
}
EXPORT_SYMBOL(mipi_dsi_detach);
+static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
+ struct mipi_dsi_msg *msg)
+{
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+ if (!ops || !ops->transfer)
+ return -ENOSYS;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
+ msg->flags |= MIPI_DSI_MSG_USE_LPM;
+
+ return ops->transfer(dsi->host, msg);
+}
+
/**
- * mipi_dsi_dcs_write - send DCS write command
- * @dsi: DSI device
- * @data: pointer to the command followed by parameters
- * @len: length of @data
+ * mipi_dsi_packet_format_is_short - check if a packet is of the short format
+ * @type: MIPI DSI data type of the packet
+ *
+ * Return: true if the packet for the given data type is a short packet, false
+ * otherwise.
*/
-ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
- size_t len)
+bool mipi_dsi_packet_format_is_short(u8 type)
+{
+ switch (type) {
+ case MIPI_DSI_V_SYNC_START:
+ case MIPI_DSI_V_SYNC_END:
+ case MIPI_DSI_H_SYNC_START:
+ case MIPI_DSI_H_SYNC_END:
+ case MIPI_DSI_END_OF_TRANSMISSION:
+ case MIPI_DSI_COLOR_MODE_OFF:
+ case MIPI_DSI_COLOR_MODE_ON:
+ case MIPI_DSI_SHUTDOWN_PERIPHERAL:
+ case MIPI_DSI_TURN_ON_PERIPHERAL:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
+
+/**
+ * mipi_dsi_packet_format_is_long - check if a packet is of the long format
+ * @type: MIPI DSI data type of the packet
+ *
+ * Return: true if the packet for the given data type is a long packet, false
+ * otherwise.
+ */
+bool mipi_dsi_packet_format_is_long(u8 type)
+{
+ switch (type) {
+ case MIPI_DSI_NULL_PACKET:
+ case MIPI_DSI_BLANKING_PACKET:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_30:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_36:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_16:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_18:
+ case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
+ case MIPI_DSI_PACKED_PIXEL_STREAM_24:
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
+
+/**
+ * mipi_dsi_create_packet - create a packet from a message according to the
+ * DSI protocol
+ * @packet: pointer to a DSI packet structure
+ * @msg: message to translate into a packet
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
+ const struct mipi_dsi_msg *msg)
+{
+ const u8 *tx = msg->tx_buf;
+
+ if (!packet || !msg)
+ return -EINVAL;
+
+ /* do some minimum sanity checking */
+ if (!mipi_dsi_packet_format_is_short(msg->type) &&
+ !mipi_dsi_packet_format_is_long(msg->type))
+ return -EINVAL;
+
+ if (msg->channel > 3)
+ return -EINVAL;
+
+ memset(packet, 0, sizeof(*packet));
+ packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
+
+ /* TODO: compute ECC if hardware support is not available */
+
+ /*
+ * Long write packets contain the word count in header bytes 1 and 2.
+ * The payload follows the header and is word count bytes long.
+ *
+ * Short write packets encode up to two parameters in header bytes 1
+ * and 2.
+ */
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ packet->header[1] = (msg->tx_len >> 0) & 0xff;
+ packet->header[2] = (msg->tx_len >> 8) & 0xff;
+
+ packet->payload_length = msg->tx_len;
+ packet->payload = tx;
+ } else {
+ packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
+ packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
+ }
+
+ packet->size = sizeof(packet->header) + packet->payload_length;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_create_packet);
+
+/*
+ * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
+ * the payload in a long packet transmitted from the peripheral back to the
+ * host processor
+ * @dsi: DSI peripheral device
+ * @value: the maximum size of the payload
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ u16 value)
+{
+ u8 tx[2] = { value & 0xff, value >> 8 };
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = sizeof(tx),
+ .tx_buf = tx,
+ };
+
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
+
+/**
+ * mipi_dsi_generic_write() - transmit data using a generic write packet
+ * @dsi: DSI peripheral device
+ * @payload: buffer containing the payload
+ * @size: size of payload buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the payload length.
+ *
+ * Return: The number of bytes transmitted on success or a negative error code
+ * on failure.
+ */
+ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ size_t size)
+{
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .tx_buf = payload,
+ .tx_len = size
+ };
+
+ switch (size) {
+ case 0:
+ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
+ break;
+
+ case 1:
+ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
+ break;
+
+ case 2:
+ msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
+ break;
+
+ default:
+ msg.type = MIPI_DSI_GENERIC_LONG_WRITE;
+ break;
+ }
+
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_generic_write);
+
+/**
+ * mipi_dsi_generic_read() - receive data using a generic read packet
+ * @dsi: DSI peripheral device
+ * @params: buffer containing the request parameters
+ * @num_params: number of request parameters
+ * @data: buffer in which to return the received data
+ * @size: size of receive buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the number of parameters passed in.
+ *
+ * Return: The number of bytes successfully read or a negative error code on
+ * failure.
+ */
+ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
+ size_t num_params, void *data, size_t size)
+{
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .tx_len = num_params,
+ .tx_buf = params,
+ .rx_len = size,
+ .rx_buf = data
+ };
+
+ switch (num_params) {
+ case 0:
+ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+ break;
+
+ case 1:
+ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+ break;
+
+ case 2:
+ msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_generic_read);
+
+/**
+ * mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
+ * @dsi: DSI peripheral device
+ * @data: buffer containing data to be transmitted
+ * @len: size of transmission buffer
+ *
+ * This function will automatically choose the right data type depending on
+ * the command payload length.
+ *
+ * Return: The number of bytes successfully transmitted or a negative error
+ * code on failure.
+ */
+ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
+ const void *data, size_t len)
{
- const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.tx_buf = data,
.tx_len = len
};
- if (!ops || !ops->transfer)
- return -ENOSYS;
-
switch (len) {
case 0:
return -EINVAL;
+
case 1:
msg.type = MIPI_DSI_DCS_SHORT_WRITE;
break;
+
case 2:
msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
break;
+
default:
msg.type = MIPI_DSI_DCS_LONG_WRITE;
break;
}
- if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
- msg.flags = MIPI_DSI_MSG_USE_LPM;
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer);
- return ops->transfer(dsi->host, &msg);
+/**
+ * mipi_dsi_dcs_write() - send DCS write command
+ * @dsi: DSI peripheral device
+ * @cmd: DCS command
+ * @data: buffer containing the command payload
+ * @len: command payload length
+ *
+ * This function will automatically choose the right data type depending on
+ * the command payload length.
+ *
+ * Return: The number of bytes successfully transmitted or a negative error
+ * code on failure.
+ */
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
+ const void *data, size_t len)
+{
+ ssize_t err;
+ size_t size;
+ u8 *tx;
+
+ if (len > 0) {
+ size = 1 + len;
+
+ tx = kmalloc(size, GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+
+ /* concatenate the DCS command byte and the payload */
+ tx[0] = cmd;
+ memcpy(&tx[1], data, len);
+ } else {
+ tx = &cmd;
+ size = 1;
+ }
+
+ err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
+
+ if (len > 0)
+ kfree(tx);
+
+ return err;
}
EXPORT_SYMBOL(mipi_dsi_dcs_write);
/**
- * mipi_dsi_dcs_read - send DCS read request command
- * @dsi: DSI device
- * @cmd: DCS read command
- * @data: pointer to read buffer
- * @len: length of @data
+ * mipi_dsi_dcs_read() - send DCS read request command
+ * @dsi: DSI peripheral device
+ * @cmd: DCS command
+ * @data: buffer in which to receive data
+ * @len: size of receive buffer
*
- * Function returns number of read bytes or error code.
+ * Return: The number of bytes read or a negative error code on failure.
*/
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len)
{
- const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
@@ -260,15 +585,282 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
.rx_len = len
};
- if (!ops || !ops->transfer)
- return -ENOSYS;
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read);
- if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
- msg.flags = MIPI_DSI_MSG_USE_LPM;
+/**
+ * mipi_dsi_dcs_nop() - send DCS nop packet
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_NOP, NULL, 0);
+ if (err < 0)
+ return err;
- return ops->transfer(dsi->host, &msg);
+ return 0;
}
-EXPORT_SYMBOL(mipi_dsi_dcs_read);
+EXPORT_SYMBOL(mipi_dsi_dcs_nop);
+
+/**
+ * mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SOFT_RESET, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset);
+
+/**
+ * mipi_dsi_dcs_get_power_mode() - query the display module's current power
+ * mode
+ * @dsi: DSI peripheral device
+ * @mode: return location for the current power mode
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_POWER_MODE, mode,
+ sizeof(*mode));
+ if (err <= 0) {
+ if (err == 0)
+ err = -ENODATA;
+
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_power_mode);
+
+/**
+ * mipi_dsi_dcs_get_pixel_format() - gets the pixel format for the RGB image
+ * data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: return location for the pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_PIXEL_FORMAT, format,
+ sizeof(*format));
+ if (err <= 0) {
+ if (err == 0)
+ err = -ENODATA;
+
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
+
+/**
+ * mipi_dsi_dcs_enter_sleep_mode() - disable all unnecessary blocks inside the
+ * display module except interface communication
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
+
+/**
+ * mipi_dsi_dcs_exit_sleep_mode() - enable all blocks inside the display
+ * module
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
+
+/**
+ * mipi_dsi_dcs_set_display_off() - stop displaying the image data on the
+ * display device
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
+
+/**
+ * mipi_dsi_dcs_set_display_on() - start displaying the image data on the
+ * display device
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
+
+/**
+ * mipi_dsi_dcs_set_column_address() - define the column extent of the frame
+ * memory accessed by the host processor
+ * @dsi: DSI peripheral device
+ * @start: first column of frame memory
+ * @end: last column of frame memory
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end)
+{
+ u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_COLUMN_ADDRESS, payload,
+ sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
+
+/**
+ * mipi_dsi_dcs_set_page_address() - define the page extent of the frame
+ * memory accessed by the host processor
+ * @dsi: DSI peripheral device
+ * @start: first page of frame memory
+ * @end: last page of frame memory
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end)
+{
+ u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PAGE_ADDRESS, payload,
+ sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
+
+/**
+ * mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
+ * output signal on the TE signal line
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
+
+/**
+ * mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
+ * output signal on the TE signal line.
+ * @dsi: DSI peripheral device
+ * @mode: the Tearing Effect Output Line mode
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
+ enum mipi_dsi_dcs_tear_mode mode)
+{
+ u8 value = mode;
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_ON, &value,
+ sizeof(value));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
+
+/**
+ * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
+ * data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
+ sizeof(format));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
static int mipi_dsi_drv_probe(struct device *dev)
{
@@ -295,12 +887,18 @@ static void mipi_dsi_drv_shutdown(struct device *dev)
}
/**
- * mipi_dsi_driver_register - register a driver for DSI devices
+ * mipi_dsi_driver_register_full() - register a driver for DSI devices
* @drv: DSI driver structure
+ * @owner: owner module
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
-int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
+int mipi_dsi_driver_register_full(struct mipi_dsi_driver *drv,
+ struct module *owner)
{
drv->driver.bus = &mipi_dsi_bus_type;
+ drv->driver.owner = owner;
+
if (drv->probe)
drv->driver.probe = mipi_dsi_drv_probe;
if (drv->remove)
@@ -310,11 +908,13 @@ int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL(mipi_dsi_driver_register);
+EXPORT_SYMBOL(mipi_dsi_driver_register_full);
/**
- * mipi_dsi_driver_unregister - unregister a driver for DSI devices
+ * mipi_dsi_driver_unregister() - unregister a driver for DSI devices
* @drv: DSI driver structure
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
{
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d1b7d2006529..6d8b941c8200 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -914,7 +914,7 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
*
* This function is a helper which can be used to validate modes against size
* limitations of the DRM device/connector. If a mode is too big its status
- * memeber is updated with the appropriate validation failure code. The list
+ * member is updated with the appropriate validation failure code. The list
* itself is not changed.
*/
void drm_mode_validate_size(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 474e4d12a2d8..51cc47d827d8 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -157,14 +157,20 @@ void drm_modeset_unlock_all(struct drm_device *dev)
EXPORT_SYMBOL(drm_modeset_unlock_all);
/**
- * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx
- * @crtc: drm crtc
+ * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
+ * @crtc: DRM CRTC
+ * @plane: DRM plane to be updated on @crtc
+ *
+ * This function locks the given crtc and plane (which should be either the
+ * primary or cursor plane) using a hidden acquire context. This is necessary so
+ * that drivers internally using the atomic interfaces can grab further locks
+ * with the lock acquire context.
*
- * This function locks the given crtc using a hidden acquire context. This is
- * necessary so that drivers internally using the atomic interfaces can grab
- * further locks with the lock acquire context.
+ * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
+ * converted to universal planes yet.
*/
-void drm_modeset_lock_crtc(struct drm_crtc *crtc)
+void drm_modeset_lock_crtc(struct drm_crtc *crtc,
+ struct drm_plane *plane)
{
struct drm_modeset_acquire_ctx *ctx;
int ret;
@@ -180,6 +186,18 @@ retry:
if (ret)
goto fail;
+ if (plane) {
+ ret = drm_modeset_lock(&plane->mutex, ctx);
+ if (ret)
+ goto fail;
+
+ if (plane->crtc) {
+ ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
+ if (ret)
+ goto fail;
+ }
+ }
+
WARN_ON(crtc->acquire_ctx);
/* now we hold the locks, so now that it is safe, stash the
@@ -437,15 +455,14 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock)
}
EXPORT_SYMBOL(drm_modeset_unlock);
-/* Temporary.. until we have sufficiently fine grained locking, there
- * are a couple scenarios where it is convenient to grab all crtc locks.
- * It is planned to remove this:
- */
+/* In some legacy codepaths it's convenient to just grab all the crtc and plane
+ * related locks. */
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_crtc *crtc;
+ struct drm_plane *plane;
int ret = 0;
list_for_each_entry(crtc, &config->crtc_list, head) {
@@ -454,6 +471,12 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev,
return ret;
}
+ list_for_each_entry(plane, &config->plane_list, head) {
+ ret = drm_modeset_lock(&plane->mutex, ctx);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 827ec1a3040b..18a1ac6ac22f 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -27,10 +27,38 @@
#include <drm/drmP.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
#define SUBPIXEL_MASK 0xffff
+/**
+ * DOC: overview
+ *
+ * This helper library has two parts. The first part has support to implement
+ * primary plane support on top of the normal CRTC configuration interface.
+ * Since the legacy ->set_config interface ties the primary plane together with
+ * the CRTC state this does not allow userspace to disable the primary plane
+ * itself. To avoid too much duplicated code use
+ * drm_plane_helper_check_update() which can be used to enforce the same
+ * restrictions as primary planes had thus. The default primary plane only
+ * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
+ * framebuffer.
+ *
+ * Drivers are highly recommended to implement proper support for primary
+ * planes, and newly merged drivers must not rely upon these transitional
+ * helpers.
+ *
+ * The second part also implements transitional helpers which allow drivers to
+ * gradually switch to the atomic helper infrastructure for plane updates. Once
+ * that switch is complete drivers shouldn't use these any longer, instead using
+ * the proper legacy implementations for update and disable plane hooks provided
+ * by the atomic helpers.
+ *
+ * Again drivers are strongly urged to switch to the new interfaces.
+ */
+
/*
* This is the minimal list of formats that seem to be safe for modeset use
* with all current DRM drivers. Most hardware can actually support more
@@ -127,6 +155,11 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
return -ERANGE;
}
+ if (!fb) {
+ *visible = false;
+ return 0;
+ }
+
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
@@ -369,3 +402,171 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
}
EXPORT_SYMBOL(drm_crtc_init);
+
+int drm_plane_helper_commit(struct drm_plane *plane,
+ struct drm_plane_state *plane_state,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_plane_helper_funcs *plane_funcs;
+ struct drm_crtc *crtc[2];
+ struct drm_crtc_helper_funcs *crtc_funcs[2];
+ int i, ret = 0;
+
+ plane_funcs = plane->helper_private;
+
+ /* Since this is a transitional helper we can't assume that plane->state
+ * is always valid. Hence we need to use plane->crtc instead of
+ * plane->state->crtc as the old crtc. */
+ crtc[0] = plane->crtc;
+ crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
+
+ for (i = 0; i < 2; i++)
+ crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
+
+ if (plane_funcs->atomic_check) {
+ ret = plane_funcs->atomic_check(plane, plane_state);
+ if (ret)
+ goto out;
+ }
+
+ if (plane_funcs->prepare_fb && plane_state->fb) {
+ ret = plane_funcs->prepare_fb(plane, plane_state->fb);
+ if (ret)
+ goto out;
+ }
+
+ /* Point of no return, commit sw state. */
+ swap(plane->state, plane_state);
+
+ for (i = 0; i < 2; i++) {
+ if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
+ crtc_funcs[i]->atomic_begin(crtc[i]);
+ }
+
+ plane_funcs->atomic_update(plane, plane_state);
+
+ for (i = 0; i < 2; i++) {
+ if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
+ crtc_funcs[i]->atomic_flush(crtc[i]);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (!crtc[i])
+ continue;
+
+ /* There's no other way to figure out whether the crtc is running. */
+ ret = drm_crtc_vblank_get(crtc[i]);
+ if (ret == 0) {
+ drm_crtc_wait_one_vblank(crtc[i]);
+ drm_crtc_vblank_put(crtc[i]);
+ }
+
+ ret = 0;
+ }
+
+ if (plane_funcs->cleanup_fb && old_fb)
+ plane_funcs->cleanup_fb(plane, old_fb);
+out:
+ if (plane_state) {
+ if (plane->funcs->atomic_destroy_state)
+ plane->funcs->atomic_destroy_state(plane, plane_state);
+ else
+ drm_atomic_helper_plane_destroy_state(plane, plane_state);
+ }
+
+ return ret;
+}
+
+/**
+ * drm_plane_helper_update() - Helper for primary plane update
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ *
+ * Provides a default plane update handler using the atomic plane update
+ * functions. It is fully left to the driver to check plane constraints and
+ * handle corner-cases like a fully occluded or otherwise invisible plane.
+ *
+ * This is useful for piecewise transitioning of a driver to the atomic helpers.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_plane_state *plane_state;
+
+ if (plane->funcs->atomic_duplicate_state)
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ else if (plane->state)
+ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+ else
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state)
+ return -ENOMEM;
+
+ plane_state->crtc = crtc;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_h = crtc_h;
+ plane_state->crtc_w = crtc_w;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_h = src_h;
+ plane_state->src_w = src_w;
+
+ return drm_plane_helper_commit(plane, plane_state, plane->fb);
+}
+EXPORT_SYMBOL(drm_plane_helper_update);
+
+/**
+ * drm_plane_helper_disable() - Helper for primary plane disable
+ * @plane: plane to disable
+ *
+ * Provides a default plane disable handler using the atomic plane update
+ * functions. It is fully left to the driver to check plane constraints and
+ * handle corner-cases like a fully occluded or otherwise invisible plane.
+ *
+ * This is useful for piecewise transitioning of a driver to the atomic helpers.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+int drm_plane_helper_disable(struct drm_plane *plane)
+{
+ struct drm_plane_state *plane_state;
+
+ /* crtc helpers love to call disable functions for already disabled hw
+ * functions. So cope with that. */
+ if (!plane->crtc)
+ return 0;
+
+ if (plane->funcs->atomic_duplicate_state)
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ else if (plane->state)
+ plane_state = drm_atomic_helper_plane_duplicate_state(plane);
+ else
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state)
+ return -ENOMEM;
+
+ plane_state->crtc = NULL;
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+
+ return drm_plane_helper_commit(plane, plane_state, plane->fb);
+}
+EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 78ca30808422..7482b06cd08f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -328,7 +328,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
*/
/**
- * drm_gem_prime_export - helper library implemention of the export callback
+ * drm_gem_prime_export - helper library implementation of the export callback
* @dev: drm_device to export from
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC
@@ -483,7 +483,7 @@ out_unlock:
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
/**
- * drm_gem_prime_import - helper library implemention of the import callback
+ * drm_gem_prime_import - helper library implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
@@ -669,7 +669,7 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
* the driver is responsible for mapping the pages into the
* importers address space for use with dma_buf itself.
*/
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
{
struct sg_table *sg = NULL;
int ret;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6857e9ad6339..7483a47de8e4 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -118,7 +118,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
mode->status = MODE_UNVERIFIED;
if (connector->force) {
- if (connector->force == DRM_FORCE_ON)
+ if (connector->force == DRM_FORCE_ON ||
+ connector->force == DRM_FORCE_ON_DIGITAL)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 6adb1e5cfb08..34d46aa75416 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -30,12 +30,17 @@
#include <drm/drm_panel.h>
#include <drm/bridge/ptn3460.h>
-#include "exynos_drm_drv.h"
#include "exynos_dp_core.h"
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
connector)
+static inline struct exynos_dp_device *
+display_to_dp(struct exynos_drm_display *d)
+{
+ return container_of(d, struct exynos_dp_device, display);
+}
+
struct bridge_init {
struct i2c_client *client;
struct device_node *node;
@@ -882,7 +887,7 @@ static void exynos_dp_hotplug(struct work_struct *work)
static void exynos_dp_commit(struct exynos_drm_display *display)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
int ret;
/* Keep the panel disabled while we configure video */
@@ -1020,7 +1025,7 @@ static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
static int exynos_dp_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
struct drm_connector *connector = &dp->connector;
int ret;
@@ -1052,33 +1057,19 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
static void exynos_dp_phy_init(struct exynos_dp_device *dp)
{
- if (dp->phy) {
+ if (dp->phy)
phy_power_on(dp->phy);
- } else if (dp->phy_addr) {
- u32 reg;
-
- reg = __raw_readl(dp->phy_addr);
- reg |= dp->enable_mask;
- __raw_writel(reg, dp->phy_addr);
- }
}
static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
{
- if (dp->phy) {
+ if (dp->phy)
phy_power_off(dp->phy);
- } else if (dp->phy_addr) {
- u32 reg;
-
- reg = __raw_readl(dp->phy_addr);
- reg &= ~(dp->enable_mask);
- __raw_writel(reg, dp->phy_addr);
- }
}
static void exynos_dp_poweron(struct exynos_drm_display *display)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
@@ -1099,7 +1090,7 @@ static void exynos_dp_poweron(struct exynos_drm_display *display)
static void exynos_dp_poweroff(struct exynos_drm_display *display)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1124,7 +1115,7 @@ static void exynos_dp_poweroff(struct exynos_drm_display *display)
static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
{
- struct exynos_dp_device *dp = display->ctx;
+ struct exynos_dp_device *dp = display_to_dp(display);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -1147,11 +1138,6 @@ static struct exynos_drm_display_ops exynos_dp_display_ops = {
.commit = exynos_dp_commit,
};
-static struct exynos_drm_display exynos_dp_display = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .ops = &exynos_dp_display_ops,
-};
-
static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
{
struct device_node *dp_node = dev->of_node;
@@ -1210,44 +1196,6 @@ static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
return dp_video_config;
}
-static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
-{
- struct device_node *dp_phy_node = of_node_get(dp->dev->of_node);
- u32 phy_base;
- int ret = 0;
-
- dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
- if (!dp_phy_node) {
- dp->phy = devm_phy_get(dp->dev, "dp");
- return PTR_ERR_OR_ZERO(dp->phy);
- }
-
- if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
- dev_err(dp->dev, "failed to get reg for dptx-phy\n");
- ret = -EINVAL;
- goto err;
- }
-
- if (of_property_read_u32(dp_phy_node, "samsung,enable-mask",
- &dp->enable_mask)) {
- dev_err(dp->dev, "failed to get enable-mask for dptx-phy\n");
- ret = -EINVAL;
- goto err;
- }
-
- dp->phy_addr = ioremap(phy_base, SZ_4);
- if (!dp->phy_addr) {
- dev_err(dp->dev, "failed to ioremap dp-phy\n");
- ret = -ENOMEM;
- goto err;
- }
-
-err:
- of_node_put(dp_phy_node);
-
- return ret;
-}
-
static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
@@ -1263,10 +1211,10 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
{
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
struct resource *res;
- struct exynos_dp_device *dp = exynos_dp_display.ctx;
unsigned int irq_flags;
int ret = 0;
@@ -1277,9 +1225,21 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dp->video_info))
return PTR_ERR(dp->video_info);
- ret = exynos_dp_dt_parse_phydata(dp);
- if (ret)
- return ret;
+ dp->phy = devm_phy_get(dp->dev, "dp");
+ if (IS_ERR(dp->phy)) {
+ dev_err(dp->dev, "no DP phy configured\n");
+ ret = PTR_ERR(dp->phy);
+ if (ret) {
+ /*
+ * phy itself is not enabled, so we can move forward
+ * assigning NULL to phy pointer.
+ */
+ if (ret == -ENOSYS || ret == -ENODEV)
+ dp->phy = NULL;
+ else
+ return ret;
+ }
+ }
if (!dp->panel) {
ret = exynos_dp_dt_parse_panel(dp);
@@ -1346,17 +1306,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
dp->drm_dev = drm_dev;
- platform_set_drvdata(pdev, &exynos_dp_display);
-
- return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display);
+ return exynos_drm_create_enc_conn(drm_dev, &dp->display);
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_drm_display *display = dev_get_drvdata(dev);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+ exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
}
static const struct component_ops exynos_dp_ops = {
@@ -1371,16 +1329,20 @@ static int exynos_dp_probe(struct platform_device *pdev)
struct exynos_dp_device *dp;
int ret;
- ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
- exynos_dp_display.type);
- if (ret)
- return ret;
-
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
if (!dp)
return -ENOMEM;
+ dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+ dp->display.ops = &exynos_dp_display_ops;
+ platform_set_drvdata(pdev, dp);
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ dp->display.type);
+ if (ret)
+ return ret;
+
panel_node = of_parse_phandle(dev->of_node, "panel", 0);
if (panel_node) {
dp->panel = of_drm_find_panel(panel_node);
@@ -1389,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- exynos_dp_display.ctx = dp;
-
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,
@@ -1410,19 +1370,17 @@ static int exynos_dp_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int exynos_dp_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_drm_display *display = platform_get_drvdata(pdev);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
+ exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
return 0;
}
static int exynos_dp_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_drm_display *display = platform_get_drvdata(pdev);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- exynos_dp_dpms(display, DRM_MODE_DPMS_ON);
+ exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a1aee6931bd7..164f171168e7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -17,6 +17,8 @@
#include <drm/drm_dp_helper.h>
#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+
#define DP_TIMEOUT_LOOP_COUNT 100
#define MAX_CR_LOOP 5
#define MAX_EQ_LOOP 5
@@ -145,6 +147,7 @@ struct link_train {
};
struct exynos_dp_device {
+ struct exynos_drm_display display;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
@@ -153,8 +156,6 @@ struct exynos_dp_device {
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
- void __iomem *phy_addr;
- unsigned int enable_mask;
struct video_info *video_info;
struct link_train link_train;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 690dcddab725..e353d353836f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,10 +15,7 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
-struct drm_device;
-struct drm_crtc;
-struct exynos_drm_manager;
-struct exynos_drm_overlay;
+#include "exynos_drm_drv.h"
int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3dc678ed9949..37678cf4425a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -22,6 +22,7 @@
#include "exynos_drm_drv.h"
struct exynos_dpi {
+ struct exynos_drm_display display;
struct device *dev;
struct device_node *panel_node;
@@ -35,6 +36,11 @@ struct exynos_dpi {
#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
+static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
+{
+ return container_of(d, struct exynos_dpi, display);
+}
+
static enum drm_connector_status
exynos_dpi_detect(struct drm_connector *connector, bool force)
{
@@ -100,7 +106,7 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
static int exynos_dpi_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct exynos_dpi *ctx = display->ctx;
+ struct exynos_dpi *ctx = display_to_dpi(display);
struct drm_connector *connector = &ctx->connector;
int ret;
@@ -141,7 +147,7 @@ static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
{
- struct exynos_dpi *ctx = display->ctx;
+ struct exynos_dpi *ctx = display_to_dpi(display);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -165,11 +171,6 @@ static struct exynos_drm_display_ops exynos_dpi_display_ops = {
.dpms = exynos_dpi_dpms
};
-static struct exynos_drm_display exynos_dpi_display = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .ops = &exynos_dpi_display_ops,
-};
-
/* of_* functions will be removed after merge of of_graph patches */
static struct device_node *
of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
@@ -299,20 +300,21 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
struct exynos_dpi *ctx;
int ret;
- ret = exynos_drm_component_add(dev,
- EXYNOS_DEVICE_TYPE_CONNECTOR,
- exynos_dpi_display.type);
- if (ret)
- return ERR_PTR(ret);
-
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- goto err_del_component;
+ return ERR_PTR(-ENOMEM);
+ ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+ ctx->display.ops = &exynos_dpi_display_ops;
ctx->dev = dev;
- exynos_dpi_display.ctx = ctx;
ctx->dpms_mode = DRM_MODE_DPMS_OFF;
+ ret = exynos_drm_component_add(dev,
+ EXYNOS_DEVICE_TYPE_CONNECTOR,
+ ctx->display.type);
+ if (ret)
+ return ERR_PTR(ret);
+
ret = exynos_dpi_parse_dt(ctx);
if (ret < 0) {
devm_kfree(dev, ctx);
@@ -328,7 +330,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
}
}
- return &exynos_dpi_display;
+ return &ctx->display;
err_del_component:
exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
@@ -336,16 +338,16 @@ err_del_component:
return NULL;
}
-int exynos_dpi_remove(struct device *dev)
+int exynos_dpi_remove(struct exynos_drm_display *display)
{
- struct exynos_dpi *ctx = exynos_dpi_display.ctx;
+ struct exynos_dpi *ctx = display_to_dpi(display);
- exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
+ exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
if (ctx->panel)
drm_panel_detach(ctx->panel);
- exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ exynos_drm_component_del(ctx->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e5c4c6c8c967..121470a83d1a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -203,8 +203,6 @@ static int exynos_drm_resume(struct drm_device *dev)
}
drm_modeset_unlock_all(dev);
- drm_helper_resume_force_mode(dev);
-
return 0;
}
@@ -475,8 +473,6 @@ void exynos_drm_component_del(struct device *dev,
list_del(&cdev->list);
kfree(cdev);
}
-
- break;
}
mutex_unlock(&drm_component_lock);
@@ -556,187 +552,72 @@ static const struct component_master_ops exynos_drm_ops = {
.unbind = exynos_drm_unbind,
};
-static int exynos_drm_platform_probe(struct platform_device *pdev)
-{
- struct component_match *match;
- int ret;
-
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
-
+static struct platform_driver *const exynos_drm_kms_drivers[] = {
#ifdef CONFIG_DRM_EXYNOS_FIMD
- ret = platform_driver_register(&fimd_driver);
- if (ret < 0)
- return ret;
+ &fimd_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_DP
- ret = platform_driver_register(&dp_driver);
- if (ret < 0)
- goto err_unregister_fimd_drv;
+ &dp_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_DSI
- ret = platform_driver_register(&dsi_driver);
- if (ret < 0)
- goto err_unregister_dp_drv;
+ &dsi_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_HDMI
- ret = platform_driver_register(&mixer_driver);
- if (ret < 0)
- goto err_unregister_dsi_drv;
- ret = platform_driver_register(&hdmi_driver);
- if (ret < 0)
- goto err_unregister_mixer_drv;
+ &mixer_driver,
+ &hdmi_driver,
#endif
+};
- match = exynos_drm_match_add(&pdev->dev);
- if (IS_ERR(match)) {
- ret = PTR_ERR(match);
- goto err_unregister_hdmi_drv;
- }
-
- ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
- match);
- if (ret < 0)
- goto err_unregister_hdmi_drv;
-
+static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
#ifdef CONFIG_DRM_EXYNOS_G2D
- ret = platform_driver_register(&g2d_driver);
- if (ret < 0)
- goto err_del_component_master;
+ &g2d_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_FIMC
- ret = platform_driver_register(&fimc_driver);
- if (ret < 0)
- goto err_unregister_g2d_drv;
+ &fimc_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
- ret = platform_driver_register(&rotator_driver);
- if (ret < 0)
- goto err_unregister_fimc_drv;
+ &rotator_driver,
#endif
-
#ifdef CONFIG_DRM_EXYNOS_GSC
- ret = platform_driver_register(&gsc_driver);
- if (ret < 0)
- goto err_unregister_rotator_drv;
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_IPP
- ret = platform_driver_register(&ipp_driver);
- if (ret < 0)
- goto err_unregister_gsc_drv;
-
- ret = exynos_platform_device_ipp_register();
- if (ret < 0)
- goto err_unregister_ipp_drv;
+ &gsc_driver,
#endif
-
- return ret;
-
#ifdef CONFIG_DRM_EXYNOS_IPP
-err_unregister_ipp_drv:
- platform_driver_unregister(&ipp_driver);
-err_unregister_gsc_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_GSC
- platform_driver_unregister(&gsc_driver);
-err_unregister_rotator_drv:
+ &ipp_driver,
#endif
+};
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
- platform_driver_unregister(&rotator_driver);
-err_unregister_fimc_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMC
- platform_driver_unregister(&fimc_driver);
-err_unregister_g2d_drv:
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_G2D
- platform_driver_unregister(&g2d_driver);
-err_del_component_master:
-#endif
- component_master_del(&pdev->dev, &exynos_drm_ops);
-
-err_unregister_hdmi_drv:
-#ifdef CONFIG_DRM_EXYNOS_HDMI
- platform_driver_unregister(&hdmi_driver);
-err_unregister_mixer_drv:
- platform_driver_unregister(&mixer_driver);
-err_unregister_dsi_drv:
-#endif
+static int exynos_drm_platform_probe(struct platform_device *pdev)
+{
+ struct component_match *match;
-#ifdef CONFIG_DRM_EXYNOS_DSI
- platform_driver_unregister(&dsi_driver);
-err_unregister_dp_drv:
-#endif
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
-#ifdef CONFIG_DRM_EXYNOS_DP
- platform_driver_unregister(&dp_driver);
-err_unregister_fimd_drv:
-#endif
+ match = exynos_drm_match_add(&pdev->dev);
+ if (IS_ERR(match)) {
+ return PTR_ERR(match);
+ }
-#ifdef CONFIG_DRM_EXYNOS_FIMD
- platform_driver_unregister(&fimd_driver);
-#endif
- return ret;
+ return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
+ match);
}
static int exynos_drm_platform_remove(struct platform_device *pdev)
{
-#ifdef CONFIG_DRM_EXYNOS_IPP
- exynos_platform_device_ipp_unregister();
- platform_driver_unregister(&ipp_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_GSC
- platform_driver_unregister(&gsc_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
- platform_driver_unregister(&rotator_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMC
- platform_driver_unregister(&fimc_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_G2D
- platform_driver_unregister(&g2d_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_HDMI
- platform_driver_unregister(&mixer_driver);
- platform_driver_unregister(&hdmi_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD
- platform_driver_unregister(&fimd_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_DSI
- platform_driver_unregister(&dsi_driver);
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_DP
- platform_driver_unregister(&dp_driver);
-#endif
component_master_del(&pdev->dev, &exynos_drm_ops);
return 0;
}
+static const char * const strings[] = {
+ "samsung,exynos3",
+ "samsung,exynos4",
+ "samsung,exynos5",
+};
+
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
.remove = exynos_drm_platform_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "exynos-drm",
.pm = &exynos_drm_pm_ops,
},
@@ -744,7 +625,25 @@ static struct platform_driver exynos_drm_platform_driver = {
static int exynos_drm_init(void)
{
- int ret;
+ bool is_exynos = false;
+ int ret, i, j;
+
+ /*
+ * Register device object only in case of Exynos SoC.
+ *
+ * Below codes resolves temporarily infinite loop issue incurred
+ * by Exynos drm driver when using multi-platform kernel.
+ * So these codes will be replaced with more generic way later.
+ */
+ for (i = 0; i < ARRAY_SIZE(strings); i++) {
+ if (of_machine_is_compatible(strings[i])) {
+ is_exynos = true;
+ break;
+ }
+ }
+
+ if (!is_exynos)
+ return -ENODEV;
/*
* Register device object only in case of Exynos SoC.
@@ -763,24 +662,50 @@ static int exynos_drm_init(void)
if (IS_ERR(exynos_drm_pdev))
return PTR_ERR(exynos_drm_pdev);
-#ifdef CONFIG_DRM_EXYNOS_VIDI
ret = exynos_drm_probe_vidi();
if (ret < 0)
goto err_unregister_pd;
+
+ for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) {
+ ret = platform_driver_register(exynos_drm_kms_drivers[i]);
+ if (ret < 0)
+ goto err_unregister_kms_drivers;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(exynos_drm_non_kms_drivers); ++j) {
+ ret = platform_driver_register(exynos_drm_non_kms_drivers[j]);
+ if (ret < 0)
+ goto err_unregister_non_kms_drivers;
+ }
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = exynos_platform_device_ipp_register();
+ if (ret < 0)
+ goto err_unregister_non_kms_drivers;
#endif
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret)
- goto err_remove_vidi;
+ goto err_unregister_resources;
return 0;
-err_remove_vidi:
-#ifdef CONFIG_DRM_EXYNOS_VIDI
+err_unregister_resources:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ exynos_platform_device_ipp_unregister();
+#endif
+
+err_unregister_non_kms_drivers:
+ while (--j >= 0)
+ platform_driver_unregister(exynos_drm_non_kms_drivers[j]);
+
+err_unregister_kms_drivers:
+ while (--i >= 0)
+ platform_driver_unregister(exynos_drm_kms_drivers[i]);
+
exynos_drm_remove_vidi();
err_unregister_pd:
-#endif
platform_device_unregister(exynos_drm_pdev);
return ret;
@@ -788,10 +713,22 @@ err_unregister_pd:
static void exynos_drm_exit(void)
{
+ int i;
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ exynos_platform_device_ipp_unregister();
+#endif
+
+ for (i = ARRAY_SIZE(exynos_drm_non_kms_drivers) - 1; i >= 0; --i)
+ platform_driver_unregister(exynos_drm_non_kms_drivers[i]);
+
+ for (i = ARRAY_SIZE(exynos_drm_kms_drivers) - 1; i >= 0; --i)
+ platform_driver_unregister(exynos_drm_kms_drivers[i]);
+
platform_driver_unregister(&exynos_drm_platform_driver);
-#ifdef CONFIG_DRM_EXYNOS_VIDI
+
exynos_drm_remove_vidi();
-#endif
+
platform_device_unregister(exynos_drm_pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d22e640f59a0..2e5063488c50 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -15,6 +15,7 @@
#ifndef _EXYNOS_DRM_DRV_H_
#define _EXYNOS_DRM_DRV_H_
+#include <drm/drmP.h>
#include <linux/module.h>
#define MAX_CRTC 3
@@ -22,24 +23,6 @@
#define MAX_FB_BUFFER 4
#define DEFAULT_ZPOS -1
-#define _wait_for(COND, MS) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- } \
- ret__; \
-})
-
-#define wait_for(COND, MS) _wait_for(COND, MS)
-
-struct drm_device;
-struct exynos_drm_overlay;
-struct drm_connector;
-
/* This enumerates device type. */
enum exynos_drm_device_type {
EXYNOS_DEVICE_TYPE_NONE,
@@ -83,10 +66,10 @@ enum exynos_drm_output_type {
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
* @zpos: order of overlay layer(z position).
- * @default_win: a window to be enabled.
- * @color_key: color key on or off.
* @index_color: if using color key feature then this value would be used
* as index color.
+ * @default_win: a window to be enabled.
+ * @color_key: color key on or off.
* @local_path: in case of lcd type, local path mode on or off.
* @transparency: transparency on or off.
* @activated: activated or not.
@@ -114,19 +97,20 @@ struct exynos_drm_overlay {
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
int zpos;
-
- bool default_win;
- bool color_key;
unsigned int index_color;
- bool local_path;
- bool transparency;
- bool activated;
+
+ bool default_win:1;
+ bool color_key:1;
+ bool local_path:1;
+ bool transparency:1;
+ bool activated:1;
};
/*
* Exynos DRM Display Structure.
* - this structure is common to analog tv, digital tv and lcd panel.
*
+ * @create_connector: initialize and register a new connector
* @remove: cleans up the display for removal
* @mode_fixup: fix mode data comparing to hw specific display mode.
* @mode_set: convert drm_display_mode to hw specific display mode and
@@ -168,7 +152,6 @@ struct exynos_drm_display {
struct drm_encoder *encoder;
struct drm_connector *connector;
struct exynos_drm_display_ops *ops;
- void *ctx;
};
/*
@@ -227,7 +210,6 @@ struct exynos_drm_manager {
struct drm_crtc *crtc;
int pipe;
struct exynos_drm_manager_ops *ops;
- void *ctx;
};
struct exynos_drm_g2d_private {
@@ -279,8 +261,6 @@ struct exynos_drm_private {
* @dev: pointer to device object for subdrv device driver.
* @drm_dev: pointer to drm_device and this pointer would be set
* when sub driver calls exynos_drm_subdrv_register().
- * @manager: subdrv has its own manager to control a hardware appropriately
- * and we can access a hardware drawing on this manager.
* @probe: this callback would be called by exynos drm driver after
* subdrv is registered to it.
* @remove: this callback is used to release resources created
@@ -312,45 +292,34 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
-/*
- * this function registers exynos drm hdmi platform device. It ensures only one
- * instance of the device is created.
- */
-int exynos_platform_device_hdmi_register(void);
-
-/*
- * this function unregisters exynos drm hdmi platform device if it exists.
- */
-void exynos_platform_device_hdmi_unregister(void);
-
-/*
- * this function registers exynos drm ipp platform device.
- */
+#ifdef CONFIG_DRM_EXYNOS_IPP
int exynos_platform_device_ipp_register(void);
-
-/*
- * this function unregisters exynos drm ipp platform device if it exists.
- */
void exynos_platform_device_ipp_unregister(void);
+#else
+static inline int exynos_platform_device_ipp_register(void) { return 0; }
+static inline void exynos_platform_device_ipp_unregister(void) {}
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_DPI
struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
-int exynos_dpi_remove(struct device *dev);
+int exynos_dpi_remove(struct exynos_drm_display *display);
#else
static inline struct exynos_drm_display *
exynos_dpi_probe(struct device *dev) { return NULL; }
-static inline int exynos_dpi_remove(struct device *dev) { return 0; }
+static inline int exynos_dpi_remove(struct exynos_drm_display *display)
+{
+ return 0;
+}
#endif
-/*
- * this function registers exynos drm vidi platform device/driver.
- */
+#ifdef CONFIG_DRM_EXYNOS_VIDI
int exynos_drm_probe_vidi(void);
-
-/*
- * this function unregister exynos drm vidi platform device/driver.
- */
void exynos_drm_remove_vidi(void);
+#else
+static inline int exynos_drm_probe_vidi(void) { return 0; }
+static inline void exynos_drm_remove_vidi(void) {}
+#endif
/* This function creates a encoder and a connector, and initializes them. */
int exynos_drm_create_enc_conn(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index acf7e9e39dcd..05fe93dc57a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -268,9 +268,9 @@ struct exynos_dsi_driver_data {
};
struct exynos_dsi {
+ struct exynos_drm_display display;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
- struct drm_encoder *encoder;
struct device_node *panel_node;
struct drm_panel *panel;
struct device *dev;
@@ -304,6 +304,11 @@ struct exynos_dsi {
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
+static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
+{
+ return container_of(d, struct exynos_dsi, display);
+}
+
static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
.plltmr_reg = 0x50,
.has_freqband = 1,
@@ -316,6 +321,11 @@ static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
.has_clklane_stop = 1,
};
+static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
+ .plltmr_reg = 0x58,
+ .has_clklane_stop = 1,
+};
+
static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
.plltmr_reg = 0x58,
};
@@ -325,6 +335,8 @@ static struct of_device_id exynos_dsi_of_match[] = {
.data = &exynos3_dsi_driver_data },
{ .compatible = "samsung,exynos4210-mipi-dsi",
.data = &exynos4_dsi_driver_data },
+ { .compatible = "samsung,exynos4415-mipi-dsi",
+ .data = &exynos4415_dsi_driver_data },
{ .compatible = "samsung,exynos5410-mipi-dsi",
.data = &exynos5_dsi_driver_data },
{ }
@@ -1104,7 +1116,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
{
struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
- struct drm_encoder *encoder = dsi->encoder;
+ struct drm_encoder *encoder = dsi->display.encoder;
if (dsi->state & DSIM_STATE_ENABLED)
exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1143,6 +1155,7 @@ static int exynos_dsi_init(struct exynos_dsi *dsi)
static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
{
int ret;
+ int te_gpio_irq;
dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
if (!gpio_is_valid(dsi->te_gpio)) {
@@ -1157,14 +1170,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
goto out;
}
- /*
- * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel
- * calls drm_panel_init() first then calls mipi_dsi_attach() in probe().
- * It means that te_gpio is invalid when exynos_dsi_enable_irq() is
- * called by drm_panel_init() before panel is attached.
- */
- ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
- exynos_dsi_te_irq_handler, NULL,
+ te_gpio_irq = gpio_to_irq(dsi->te_gpio);
+
+ irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN);
+ ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
IRQF_TRIGGER_RISING, "TE", dsi);
if (ret) {
dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
@@ -1195,9 +1204,6 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
dsi->mode_flags = device->mode_flags;
dsi->panel_node = device->dev.of_node;
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
/*
* This is a temporary solution and should be made by more generic way.
*
@@ -1211,6 +1217,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
return ret;
}
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
return 0;
}
@@ -1236,7 +1245,7 @@ static bool exynos_dsi_is_short_dsi_type(u8 type)
}
static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- struct mipi_dsi_msg *msg)
+ const struct mipi_dsi_msg *msg)
{
struct exynos_dsi *dsi = host_to_dsi(host);
struct exynos_dsi_transfer xfer;
@@ -1369,16 +1378,17 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
exynos_dsi_set_display_mode(dsi);
exynos_dsi_set_display_enable(dsi, true);
+ dsi->state |= DSIM_STATE_ENABLED;
+
ret = drm_panel_enable(dsi->panel);
if (ret < 0) {
+ dsi->state &= ~DSIM_STATE_ENABLED;
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
exynos_dsi_poweroff(dsi);
return ret;
}
- dsi->state |= DSIM_STATE_ENABLED;
-
return 0;
}
@@ -1397,7 +1407,7 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
{
- struct exynos_dsi *dsi = display->ctx;
+ struct exynos_dsi *dsi = display_to_dsi(display);
if (dsi->panel) {
switch (mode) {
@@ -1474,7 +1484,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
{
struct exynos_dsi *dsi = connector_to_dsi(connector);
- return dsi->encoder;
+ return dsi->display.encoder;
}
static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1486,12 +1496,10 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
static int exynos_dsi_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct exynos_dsi *dsi = display->ctx;
+ struct exynos_dsi *dsi = display_to_dsi(display);
struct drm_connector *connector = &dsi->connector;
int ret;
- dsi->encoder = encoder;
-
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(encoder->dev, connector,
@@ -1512,7 +1520,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
static void exynos_dsi_mode_set(struct exynos_drm_display *display,
struct drm_display_mode *mode)
{
- struct exynos_dsi *dsi = display->ctx;
+ struct exynos_dsi *dsi = display_to_dsi(display);
struct videomode *vm = &dsi->vm;
vm->hactive = mode->hdisplay;
@@ -1531,10 +1539,6 @@ static struct exynos_drm_display_ops exynos_dsi_display_ops = {
.dpms = exynos_dsi_dpms
};
-static struct exynos_drm_display exynos_dsi_display = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .ops = &exynos_dsi_display_ops,
-};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
/* of_* functions will be removed after merge of of_graph patches */
@@ -1640,28 +1644,28 @@ end:
static int exynos_dsi_bind(struct device *dev, struct device *master,
void *data)
{
+ struct exynos_drm_display *display = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = display_to_dsi(display);
struct drm_device *drm_dev = data;
- struct exynos_dsi *dsi;
int ret;
- ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display);
+ ret = exynos_drm_create_enc_conn(drm_dev, display);
if (ret) {
DRM_ERROR("Encoder create [%d] failed with %d\n",
- exynos_dsi_display.type, ret);
+ display->type, ret);
return ret;
}
- dsi = exynos_dsi_display.ctx;
-
return mipi_dsi_host_register(&dsi->dsi_host);
}
static void exynos_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+ struct exynos_drm_display *display = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = display_to_dsi(display);
- exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
+ exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
mipi_dsi_host_unregister(&dsi->dsi_host);
}
@@ -1673,22 +1677,23 @@ static const struct component_ops exynos_dsi_component_ops = {
static int exynos_dsi_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct resource *res;
struct exynos_dsi *dsi;
int ret;
- ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
- exynos_dsi_display.type);
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
+ dsi->display.ops = &exynos_dsi_display_ops;
+
+ ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ dsi->display.type);
if (ret)
return ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi) {
- dev_err(&pdev->dev, "failed to allocate dsi object.\n");
- ret = -ENOMEM;
- goto err_del_component;
- }
-
/* To be checked as invalid one */
dsi->te_gpio = -ENOENT;
@@ -1697,9 +1702,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dsi->transfer_list);
dsi->dsi_host.ops = &exynos_dsi_ops;
- dsi->dsi_host.dev = &pdev->dev;
+ dsi->dsi_host.dev = dev;
- dsi->dev = &pdev->dev;
+ dsi->dev = dev;
dsi->driver_data = exynos_dsi_get_driver_data(pdev);
ret = exynos_dsi_parse_dt(dsi);
@@ -1708,70 +1713,68 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->supplies[0].supply = "vddcore";
dsi->supplies[1].supply = "vddio";
- ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies),
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
- dev_info(&pdev->dev, "failed to get regulators: %d\n", ret);
+ dev_info(dev, "failed to get regulators: %d\n", ret);
return -EPROBE_DEFER;
}
- dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
+ dsi->pll_clk = devm_clk_get(dev, "pll_clk");
if (IS_ERR(dsi->pll_clk)) {
- dev_info(&pdev->dev, "failed to get dsi pll input clock\n");
+ dev_info(dev, "failed to get dsi pll input clock\n");
ret = PTR_ERR(dsi->pll_clk);
goto err_del_component;
}
- dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ dsi->bus_clk = devm_clk_get(dev, "bus_clk");
if (IS_ERR(dsi->bus_clk)) {
- dev_info(&pdev->dev, "failed to get dsi bus clock\n");
+ dev_info(dev, "failed to get dsi bus clock\n");
ret = PTR_ERR(dsi->bus_clk);
goto err_del_component;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ dsi->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->reg_base)) {
- dev_err(&pdev->dev, "failed to remap io region\n");
+ dev_err(dev, "failed to remap io region\n");
ret = PTR_ERR(dsi->reg_base);
goto err_del_component;
}
- dsi->phy = devm_phy_get(&pdev->dev, "dsim");
+ dsi->phy = devm_phy_get(dev, "dsim");
if (IS_ERR(dsi->phy)) {
- dev_info(&pdev->dev, "failed to get dsim phy\n");
+ dev_info(dev, "failed to get dsim phy\n");
ret = PTR_ERR(dsi->phy);
goto err_del_component;
}
dsi->irq = platform_get_irq(pdev, 0);
if (dsi->irq < 0) {
- dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+ dev_err(dev, "failed to request dsi irq resource\n");
ret = dsi->irq;
goto err_del_component;
}
irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL,
+ ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
exynos_dsi_irq, IRQF_ONESHOT,
- dev_name(&pdev->dev), dsi);
+ dev_name(dev), dsi);
if (ret) {
- dev_err(&pdev->dev, "failed to request dsi irq\n");
+ dev_err(dev, "failed to request dsi irq\n");
goto err_del_component;
}
- exynos_dsi_display.ctx = dsi;
-
- platform_set_drvdata(pdev, &exynos_dsi_display);
+ platform_set_drvdata(pdev, &dsi->display);
- ret = component_add(&pdev->dev, &exynos_dsi_component_ops);
+ ret = component_add(dev, &exynos_dsi_component_ops);
if (ret)
goto err_del_component;
return ret;
err_del_component:
- exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index b7a1620a7e79..26305d8dd93a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -14,8 +14,6 @@
#ifndef _EXYNOS_DRM_ENCODER_H_
#define _EXYNOS_DRM_ENCODER_H_
-struct exynos_drm_manager;
-
void exynos_drm_encoder_setup(struct drm_device *dev);
struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
struct exynos_drm_display *mgr,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 085b066a9993..e5810d13bf9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -84,8 +84,6 @@
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
-#define get_fimd_manager(mgr) platform_get_drvdata(to_platform_device(dev))
-
struct fimd_driver_data {
unsigned int timing_base;
unsigned int lcdblk_offset;
@@ -96,6 +94,7 @@ struct fimd_driver_data {
unsigned int has_clksel:1;
unsigned int has_limited_fmt:1;
unsigned int has_vidoutcon:1;
+ unsigned int has_vtsel:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -118,6 +117,17 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
.lcdblk_vt_shift = 10,
.lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
+ .has_vtsel = 1,
+};
+
+static struct fimd_driver_data exynos4415_fimd_driver_data = {
+ .timing_base = 0x20000,
+ .lcdblk_offset = 0x210,
+ .lcdblk_vt_shift = 10,
+ .lcdblk_bypass_shift = 1,
+ .has_shadowcon = 1,
+ .has_vidoutcon = 1,
+ .has_vtsel = 1,
};
static struct fimd_driver_data exynos5_fimd_driver_data = {
@@ -127,6 +137,7 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
.lcdblk_bypass_shift = 15,
.has_shadowcon = 1,
.has_vidoutcon = 1,
+ .has_vtsel = 1,
};
struct fimd_win_data {
@@ -146,6 +157,7 @@ struct fimd_win_data {
};
struct fimd_context {
+ struct exynos_drm_manager manager;
struct device *dev;
struct drm_device *drm_dev;
struct clk *bus_clk;
@@ -173,6 +185,11 @@ struct fimd_context {
struct exynos_drm_display *display;
};
+static inline struct fimd_context *mgr_to_fimd(struct exynos_drm_manager *mgr)
+{
+ return container_of(mgr, struct fimd_context, manager);
+}
+
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
@@ -180,6 +197,8 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos3_fimd_driver_data },
{ .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data },
+ { .compatible = "samsung,exynos4415-fimd",
+ .data = &exynos4415_fimd_driver_data },
{ .compatible = "samsung,exynos5250-fimd",
.data = &exynos5_fimd_driver_data },
{},
@@ -197,7 +216,7 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
if (ctx->suspended)
return;
@@ -214,9 +233,35 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
+static void fimd_enable_video_output(struct fimd_context *ctx, int win,
+ bool enable)
+{
+ u32 val = readl(ctx->regs + WINCON(win));
+
+ if (enable)
+ val |= WINCONx_ENWIN;
+ else
+ val &= ~WINCONx_ENWIN;
+
+ writel(val, ctx->regs + WINCON(win));
+}
+
+static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
+ bool enable)
+{
+ u32 val = readl(ctx->regs + SHADOWCON);
+
+ if (enable)
+ val |= SHADOWCON_CHx_ENABLE(win);
+ else
+ val &= ~SHADOWCON_CHx_ENABLE(win);
+
+ writel(val, ctx->regs + SHADOWCON);
+}
+
static void fimd_clear_channel(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
int win, ch_enabled = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -226,16 +271,12 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
u32 val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
- /* wincon */
- val &= ~WINCONx_ENWIN;
- writel(val, ctx->regs + WINCON(win));
-
- /* unprotect windows */
- if (ctx->driver_data->has_shadowcon) {
- val = readl(ctx->regs + SHADOWCON);
- val &= ~SHADOWCON_CHx_ENABLE(win);
- writel(val, ctx->regs + SHADOWCON);
- }
+ fimd_enable_video_output(ctx, win, false);
+
+ if (ctx->driver_data->has_shadowcon)
+ fimd_enable_shadow_channel_path(ctx, win,
+ false);
+
ch_enabled = 1;
}
}
@@ -253,7 +294,7 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
@@ -275,7 +316,7 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
static void fimd_mgr_remove(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
/* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev))
@@ -315,14 +356,14 @@ static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
static void fimd_mode_set(struct exynos_drm_manager *mgr,
const struct drm_display_mode *in_mode)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
drm_mode_copy(&ctx->mode, in_mode);
}
static void fimd_commit(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct drm_display_mode *mode = &ctx->mode;
struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
@@ -343,7 +384,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
writel(0, timing_base + I80IFCONFBx(0));
/* set video type selection to I80 interface */
- if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+ if (driver_data->has_vtsel && ctx->sysreg &&
+ regmap_update_bits(ctx->sysreg,
driver_data->lcdblk_offset,
0x3 << driver_data->lcdblk_vt_shift,
0x1 << driver_data->lcdblk_vt_shift)) {
@@ -421,7 +463,7 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
u32 val;
if (ctx->suspended)
@@ -431,12 +473,19 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
val = readl(ctx->regs + VIDINTCON0);
val |= VIDINTCON0_INT_ENABLE;
- val |= VIDINTCON0_INT_FRAME;
- val &= ~VIDINTCON0_FRAMESEL0_MASK;
- val |= VIDINTCON0_FRAMESEL0_VSYNC;
- val &= ~VIDINTCON0_FRAMESEL1_MASK;
- val |= VIDINTCON0_FRAMESEL1_NONE;
+ if (ctx->i80_if) {
+ val |= VIDINTCON0_INT_I80IFDONE;
+ val |= VIDINTCON0_INT_SYSMAINCON;
+ val &= ~VIDINTCON0_INT_SYSSUBCON;
+ } else {
+ val |= VIDINTCON0_INT_FRAME;
+
+ val &= ~VIDINTCON0_FRAMESEL0_MASK;
+ val |= VIDINTCON0_FRAMESEL0_VSYNC;
+ val &= ~VIDINTCON0_FRAMESEL1_MASK;
+ val |= VIDINTCON0_FRAMESEL1_NONE;
+ }
writel(val, ctx->regs + VIDINTCON0);
}
@@ -446,7 +495,7 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
u32 val;
if (ctx->suspended)
@@ -455,9 +504,15 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
- val &= ~VIDINTCON0_INT_FRAME;
val &= ~VIDINTCON0_INT_ENABLE;
+ if (ctx->i80_if) {
+ val &= ~VIDINTCON0_INT_I80IFDONE;
+ val &= ~VIDINTCON0_INT_SYSMAINCON;
+ val &= ~VIDINTCON0_INT_SYSSUBCON;
+ } else
+ val &= ~VIDINTCON0_INT_FRAME;
+
writel(val, ctx->regs + VIDINTCON0);
}
}
@@ -465,7 +520,7 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int win;
unsigned long offset;
@@ -623,7 +678,7 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
@@ -730,20 +785,14 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
if (win != 0)
fimd_win_set_colkey(ctx, win);
- /* wincon */
- val = readl(ctx->regs + WINCON(win));
- val |= WINCONx_ENWIN;
- writel(val, ctx->regs + WINCON(win));
+ fimd_enable_video_output(ctx, win, true);
+
+ if (ctx->driver_data->has_shadowcon)
+ fimd_enable_shadow_channel_path(ctx, win, true);
/* Enable DMA channel and unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
- if (ctx->driver_data->has_shadowcon) {
- val = readl(ctx->regs + SHADOWCON);
- val |= SHADOWCON_CHx_ENABLE(win);
- writel(val, ctx->regs + SHADOWCON);
- }
-
win_data->enabled = true;
if (ctx->i80_if)
@@ -752,10 +801,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int win = zpos;
- u32 val;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
@@ -774,18 +822,12 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
/* protect windows */
fimd_shadow_protect_win(ctx, win, true);
- /* wincon */
- val = readl(ctx->regs + WINCON(win));
- val &= ~WINCONx_ENWIN;
- writel(val, ctx->regs + WINCON(win));
+ fimd_enable_video_output(ctx, win, false);
- /* unprotect windows */
- if (ctx->driver_data->has_shadowcon) {
- val = readl(ctx->regs + SHADOWCON);
- val &= ~SHADOWCON_CHx_ENABLE(win);
- writel(val, ctx->regs + SHADOWCON);
- }
+ if (ctx->driver_data->has_shadowcon)
+ fimd_enable_shadow_channel_path(ctx, win, false);
+ /* unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
win_data->enabled = false;
@@ -793,7 +835,7 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
static void fimd_window_suspend(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
@@ -803,12 +845,11 @@ static void fimd_window_suspend(struct exynos_drm_manager *mgr)
if (win_data->enabled)
fimd_win_disable(mgr, i);
}
- fimd_wait_for_vblank(mgr);
}
static void fimd_window_resume(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
@@ -821,7 +862,7 @@ static void fimd_window_resume(struct exynos_drm_manager *mgr)
static void fimd_apply(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
struct fimd_win_data *win_data;
int i;
@@ -838,7 +879,7 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
static int fimd_poweron(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
int ret;
if (!ctx->suspended)
@@ -886,7 +927,7 @@ bus_clk_err:
static int fimd_poweroff(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
if (ctx->suspended)
return 0;
@@ -928,39 +969,41 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
static void fimd_trigger(struct device *dev)
{
- struct exynos_drm_manager *mgr = get_fimd_manager(dev);
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = dev_get_drvdata(dev);
struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
u32 reg;
- atomic_set(&ctx->triggering, 1);
+ /*
+ * Skips triggering if in triggering state, because multiple triggering
+ * requests can cause panel reset.
+ */
+ if (atomic_read(&ctx->triggering))
+ return;
- reg = readl(ctx->regs + VIDINTCON0);
- reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE |
- VIDINTCON0_INT_SYSMAINCON);
- writel(reg, ctx->regs + VIDINTCON0);
+ /* Enters triggering mode */
+ atomic_set(&ctx->triggering, 1);
reg = readl(timing_base + TRIGCON);
reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
writel(reg, timing_base + TRIGCON);
+
+ /*
+ * Exits triggering mode if vblank is not enabled yet, because when the
+ * VIDINTCON0 register is not set, it can not exit from triggering mode.
+ */
+ if (!test_bit(0, &ctx->irq_flags))
+ atomic_set(&ctx->triggering, 0);
}
static void fimd_te_handler(struct exynos_drm_manager *mgr)
{
- struct fimd_context *ctx = mgr->ctx;
+ struct fimd_context *ctx = mgr_to_fimd(mgr);
/* Checks the crtc is detached already from encoder */
if (ctx->pipe < 0 || !ctx->drm_dev)
return;
- /*
- * Skips to trigger if in triggering state, because multiple triggering
- * requests can cause panel reset.
- */
- if (atomic_read(&ctx->triggering))
- return;
-
/*
* If there is a page flip request, triggers and handles the page flip
* event so that current fb can be updated into panel GRAM.
@@ -972,10 +1015,10 @@ static void fimd_te_handler(struct exynos_drm_manager *mgr)
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
wake_up(&ctx->wait_vsync_queue);
-
- if (!atomic_read(&ctx->triggering))
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
}
+
+ if (test_bit(0, &ctx->irq_flags))
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
}
static struct exynos_drm_manager_ops fimd_manager_ops = {
@@ -992,11 +1035,6 @@ static struct exynos_drm_manager_ops fimd_manager_ops = {
.te_handler = fimd_te_handler,
};
-static struct exynos_drm_manager fimd_manager = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .ops = &fimd_manager_ops,
-};
-
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
@@ -1013,16 +1051,10 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
goto out;
if (ctx->i80_if) {
- /* unset I80 frame done interrupt */
- val = readl(ctx->regs + VIDINTCON0);
- val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
- writel(val, ctx->regs + VIDINTCON0);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
- /* exit triggering mode */
+ /* Exits triggering mode */
atomic_set(&ctx->triggering, 0);
-
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
} else {
drm_handle_vblank(ctx->drm_dev, ctx->pipe);
exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
@@ -1040,11 +1072,11 @@ out:
static int fimd_bind(struct device *dev, struct device *master, void *data)
{
- struct fimd_context *ctx = fimd_manager.ctx;
+ struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- fimd_mgr_initialize(&fimd_manager, drm_dev);
- exynos_drm_crtc_create(&fimd_manager);
+ fimd_mgr_initialize(&ctx->manager, drm_dev);
+ exynos_drm_crtc_create(&ctx->manager);
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
@@ -1055,15 +1087,14 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
static void fimd_unbind(struct device *dev, struct device *master,
void *data)
{
- struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
- struct fimd_context *ctx = fimd_manager.ctx;
+ struct fimd_context *ctx = dev_get_drvdata(dev);
- fimd_dpms(mgr, DRM_MODE_DPMS_OFF);
+ fimd_dpms(&ctx->manager, DRM_MODE_DPMS_OFF);
if (ctx->display)
- exynos_dpi_remove(dev);
+ exynos_dpi_remove(ctx->display);
- fimd_mgr_remove(mgr);
+ fimd_mgr_remove(&ctx->manager);
}
static const struct component_ops fimd_component_ops = {
@@ -1079,21 +1110,20 @@ static int fimd_probe(struct platform_device *pdev)
struct resource *res;
int ret = -EINVAL;
- ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
- fimd_manager.type);
- if (ret)
- return ret;
-
- if (!dev->of_node) {
- ret = -ENODEV;
- goto err_del_component;
- }
+ if (!dev->of_node)
+ return -ENODEV;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- ret = -ENOMEM;
- goto err_del_component;
- }
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->manager.type = EXYNOS_DISPLAY_TYPE_LCD;
+ ctx->manager.ops = &fimd_manager_ops;
+
+ ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
+ ctx->manager.type);
+ if (ret)
+ return ret;
ctx->dev = dev;
ctx->suspended = true;
@@ -1182,27 +1212,27 @@ static int fimd_probe(struct platform_device *pdev)
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
- platform_set_drvdata(pdev, &fimd_manager);
-
- fimd_manager.ctx = ctx;
+ platform_set_drvdata(pdev, ctx);
ctx->display = exynos_dpi_probe(dev);
- if (IS_ERR(ctx->display))
- return PTR_ERR(ctx->display);
+ if (IS_ERR(ctx->display)) {
+ ret = PTR_ERR(ctx->display);
+ goto err_del_component;
+ }
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
- ret = component_add(&pdev->dev, &fimd_component_ops);
+ ret = component_add(dev, &fimd_component_ops);
if (ret)
goto err_disable_pm_runtime;
return ret;
err_disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
err_del_component:
- exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+ exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 72376d41c512..35d25889b476 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -40,7 +40,6 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
#else
-struct dma_iommu_mapping;
static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 00d74b18f7cb..d5ad17dfc24d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -426,18 +426,21 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
c_node->start_work = ipp_create_cmd_work();
if (IS_ERR(c_node->start_work)) {
DRM_ERROR("failed to create start work.\n");
+ ret = PTR_ERR(c_node->start_work);
goto err_remove_id;
}
c_node->stop_work = ipp_create_cmd_work();
if (IS_ERR(c_node->stop_work)) {
DRM_ERROR("failed to create stop work.\n");
+ ret = PTR_ERR(c_node->stop_work);
goto err_free_start;
}
c_node->event_work = ipp_create_event_work();
if (IS_ERR(c_node->event_work)) {
DRM_ERROR("failed to create event work.\n");
+ ret = PTR_ERR(c_node->event_work);
goto err_free_stop;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 50faf913e574..45899fb63272 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/component.h>
#include <drm/exynos_drm.h>
@@ -28,7 +29,6 @@
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
-#define get_vidi_mgr(dev) platform_get_drvdata(to_platform_device(dev))
#define ctx_from_connector(c) container_of(c, struct vidi_context, \
connector)
@@ -47,11 +47,13 @@ struct vidi_win_data {
};
struct vidi_context {
+ struct exynos_drm_manager manager;
+ struct exynos_drm_display display;
+ struct platform_device *pdev;
struct drm_device *drm_dev;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector connector;
- struct exynos_drm_subdrv subdrv;
struct vidi_win_data win_data[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
@@ -66,6 +68,16 @@ struct vidi_context {
int pipe;
};
+static inline struct vidi_context *manager_to_vidi(struct exynos_drm_manager *m)
+{
+ return container_of(m, struct vidi_context, manager);
+}
+
+static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
+{
+ return container_of(d, struct vidi_context, display);
+}
+
static const char fake_edid_info[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
@@ -93,7 +105,7 @@ static const char fake_edid_info[] = {
static void vidi_apply(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
struct vidi_win_data *win_data;
int i;
@@ -110,7 +122,7 @@ static void vidi_apply(struct exynos_drm_manager *mgr)
static void vidi_commit(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
if (ctx->suspended)
return;
@@ -118,7 +130,7 @@ static void vidi_commit(struct exynos_drm_manager *mgr)
static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
if (ctx->suspended)
return -EPERM;
@@ -140,7 +152,7 @@ static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
if (ctx->suspended)
return;
@@ -152,7 +164,7 @@ static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct vidi_win_data *win_data;
int win;
unsigned long offset;
@@ -204,7 +216,7 @@ static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct vidi_win_data *win_data;
int win = zpos;
@@ -229,7 +241,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct vidi_win_data *win_data;
int win = zpos;
@@ -247,7 +259,7 @@ static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -271,7 +283,7 @@ static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
DRM_DEBUG_KMS("%d\n", mode);
@@ -297,7 +309,7 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev)
{
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = manager_to_vidi(mgr);
struct exynos_drm_private *priv = drm_dev->dev_private;
mgr->drm_dev = ctx->drm_dev = drm_dev;
@@ -316,11 +328,6 @@ static struct exynos_drm_manager_ops vidi_manager_ops = {
.win_disable = vidi_win_disable,
};
-static struct exynos_drm_manager vidi_manager = {
- .type = EXYNOS_DISPLAY_TYPE_VIDI,
- .ops = &vidi_manager_ops,
-};
-
static void vidi_fake_vblank_handler(struct work_struct *work)
{
struct vidi_context *ctx = container_of(work, struct vidi_context,
@@ -349,9 +356,8 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
static int vidi_show_connection(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct vidi_context *ctx = dev_get_drvdata(dev);
int rc;
- struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
- struct vidi_context *ctx = mgr->ctx;
mutex_lock(&ctx->lock);
@@ -366,8 +372,7 @@ static int vidi_store_connection(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = dev_get_drvdata(dev);
int ret;
ret = kstrtoint(buf, 0, &ctx->connected);
@@ -420,7 +425,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
display = exynos_drm_get_display(encoder);
if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
- ctx = display->ctx;
+ ctx = display_to_vidi(display);
break;
}
}
@@ -530,7 +535,7 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
static int vidi_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct vidi_context *ctx = display->ctx;
+ struct vidi_context *ctx = display_to_vidi(display);
struct drm_connector *connector = &ctx->connector;
int ret;
@@ -556,27 +561,22 @@ static struct exynos_drm_display_ops vidi_display_ops = {
.create_connector = vidi_create_connector,
};
-static struct exynos_drm_display vidi_display = {
- .type = EXYNOS_DISPLAY_TYPE_VIDI,
- .ops = &vidi_display_ops,
-};
-
-static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+static int vidi_bind(struct device *dev, struct device *master, void *data)
{
- struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
struct drm_crtc *crtc = ctx->crtc;
int ret;
- vidi_mgr_initialize(mgr, drm_dev);
+ vidi_mgr_initialize(&ctx->manager, drm_dev);
- ret = exynos_drm_crtc_create(&vidi_manager);
+ ret = exynos_drm_crtc_create(&ctx->manager);
if (ret) {
DRM_ERROR("failed to create crtc.\n");
return ret;
}
- ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display);
+ ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
if (ret) {
crtc->funcs->destroy(crtc);
DRM_ERROR("failed to create encoder and connector.\n");
@@ -586,9 +586,18 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
return 0;
}
+
+static void vidi_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops vidi_component_ops = {
+ .bind = vidi_bind,
+ .unbind = vidi_unbind,
+};
+
static int vidi_probe(struct platform_device *pdev)
{
- struct exynos_drm_subdrv *subdrv;
struct vidi_context *ctx;
int ret;
@@ -596,40 +605,54 @@ static int vidi_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
+ ctx->manager.type = EXYNOS_DISPLAY_TYPE_VIDI;
+ ctx->manager.ops = &vidi_manager_ops;
+ ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
+ ctx->display.ops = &vidi_display_ops;
ctx->default_win = 0;
+ ctx->pdev = pdev;
- INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
-
- vidi_manager.ctx = ctx;
- vidi_display.ctx = ctx;
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
+ ctx->manager.type);
+ if (ret)
+ return ret;
- mutex_init(&ctx->lock);
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ ctx->display.type);
+ if (ret)
+ goto err_del_crtc_component;
- platform_set_drvdata(pdev, &vidi_manager);
+ INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
- subdrv = &ctx->subdrv;
- subdrv->dev = &pdev->dev;
- subdrv->probe = vidi_subdrv_probe;
+ mutex_init(&ctx->lock);
- ret = exynos_drm_subdrv_register(subdrv);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register drm vidi device\n");
- return ret;
- }
+ platform_set_drvdata(pdev, ctx);
ret = device_create_file(&pdev->dev, &dev_attr_connection);
if (ret < 0) {
- exynos_drm_subdrv_unregister(subdrv);
- DRM_INFO("failed to create connection sysfs.\n");
+ DRM_ERROR("failed to create connection sysfs.\n");
+ goto err_del_conn_component;
}
- return 0;
+ ret = component_add(&pdev->dev, &vidi_component_ops);
+ if (ret)
+ goto err_remove_file;
+
+ return ret;
+
+err_remove_file:
+ device_remove_file(&pdev->dev, &dev_attr_connection);
+err_del_conn_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+err_del_crtc_component:
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
+ return ret;
}
static int vidi_remove(struct platform_device *pdev)
{
- struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
- struct vidi_context *ctx = mgr->ctx;
+ struct vidi_context *ctx = platform_get_drvdata(pdev);
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
@@ -638,6 +661,10 @@ static int vidi_remove(struct platform_device *pdev)
return -EINVAL;
}
+ component_del(&pdev->dev, &vidi_component_ops);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
+ exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+
return 0;
}
@@ -668,12 +695,19 @@ int exynos_drm_probe_vidi(void)
return ret;
}
+static int exynos_drm_remove_vidi_device(struct device *dev, void *data)
+{
+ platform_device_unregister(to_platform_device(dev));
+
+ return 0;
+}
+
void exynos_drm_remove_vidi(void)
{
- struct vidi_context *ctx = vidi_manager.ctx;
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct platform_device *pdev = to_platform_device(subdrv->dev);
+ int ret = driver_for_each_device(&vidi_driver.driver, NULL, NULL,
+ exynos_drm_remove_vidi_device);
+ /* silence compiler warning */
+ (void)ret;
platform_driver_unregister(&vidi_driver);
- platform_device_unregister(pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 563a19e62eb2..5765a161abdd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -49,7 +49,6 @@
#include <linux/gpio.h>
#include <media/s5p_hdmi.h>
-#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev))
#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
#define HOTPLUG_DEBOUNCE_MS 1100
@@ -182,6 +181,7 @@ struct hdmi_conf_regs {
};
struct hdmi_context {
+ struct exynos_drm_display display;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
@@ -213,6 +213,11 @@ struct hdmi_context {
enum hdmi_type type;
};
+static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
+{
+ return container_of(d, struct hdmi_context, display);
+}
+
struct hdmiphy_config {
int pixel_clock;
u8 conf[32];
@@ -1123,7 +1128,7 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
static int hdmi_create_connector(struct exynos_drm_display *display,
struct drm_encoder *encoder)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct drm_connector *connector = &hdata->connector;
int ret;
@@ -2000,7 +2005,7 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
static void hdmi_mode_set(struct exynos_drm_display *display,
struct drm_display_mode *mode)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct drm_display_mode *m = mode;
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
@@ -2019,7 +2024,7 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
static void hdmi_commit(struct exynos_drm_display *display)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
mutex_lock(&hdata->hdmi_mutex);
if (!hdata->powered) {
@@ -2033,7 +2038,7 @@ static void hdmi_commit(struct exynos_drm_display *display)
static void hdmi_poweron(struct exynos_drm_display *display)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct hdmi_resources *res = &hdata->res;
mutex_lock(&hdata->hdmi_mutex);
@@ -2064,7 +2069,7 @@ static void hdmi_poweron(struct exynos_drm_display *display)
static void hdmi_poweroff(struct exynos_drm_display *display)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct hdmi_resources *res = &hdata->res;
mutex_lock(&hdata->hdmi_mutex);
@@ -2099,7 +2104,7 @@ out:
static void hdmi_dpms(struct exynos_drm_display *display, int mode)
{
- struct hdmi_context *hdata = display->ctx;
+ struct hdmi_context *hdata = display_to_hdmi(display);
struct drm_encoder *encoder = hdata->encoder;
struct drm_crtc *crtc = encoder->crtc;
struct drm_crtc_helper_funcs *funcs = NULL;
@@ -2143,11 +2148,6 @@ static struct exynos_drm_display_ops hdmi_display_ops = {
.commit = hdmi_commit,
};
-static struct exynos_drm_display hdmi_display = {
- .type = EXYNOS_DISPLAY_TYPE_HDMI,
- .ops = &hdmi_display_ops,
-};
-
static void hdmi_hotplug_work_func(struct work_struct *work)
{
struct hdmi_context *hdata;
@@ -2302,12 +2302,11 @@ MODULE_DEVICE_TABLE (of, hdmi_match_types);
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm_dev = data;
- struct hdmi_context *hdata;
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
- hdata = hdmi_display.ctx;
hdata->drm_dev = drm_dev;
- return exynos_drm_create_enc_conn(drm_dev, &hdmi_display);
+ return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
}
static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2349,31 +2348,28 @@ static int hdmi_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
- hdmi_display.type);
- if (ret)
- return ret;
-
- if (!dev->of_node) {
- ret = -ENODEV;
- goto err_del_component;
- }
+ if (!dev->of_node)
+ return -ENODEV;
pdata = drm_hdmi_dt_parse_pdata(dev);
- if (!pdata) {
- ret = -EINVAL;
- goto err_del_component;
- }
+ if (!pdata)
+ return -EINVAL;
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
- if (!hdata) {
- ret = -ENOMEM;
- goto err_del_component;
- }
+ if (!hdata)
+ return -ENOMEM;
+
+ hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
+ hdata->display.ops = &hdmi_display_ops;
+
+ ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
+ hdata->display.type);
+ if (ret)
+ return ret;
mutex_init(&hdata->hdmi_mutex);
- platform_set_drvdata(pdev, &hdmi_display);
+ platform_set_drvdata(pdev, hdata);
match = of_match_node(hdmi_match_types, dev->of_node);
if (!match) {
@@ -2485,7 +2481,6 @@ out_get_phy_port:
}
pm_runtime_enable(dev);
- hdmi_display.ctx = hdata;
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
@@ -2510,7 +2505,7 @@ err_del_component:
static int hdmi_remove(struct platform_device *pdev)
{
- struct hdmi_context *hdata = hdmi_display.ctx;
+ struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index a41c84ee3a2d..820b76234ef4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,8 +40,6 @@
#include "exynos_drm_iommu.h"
#include "exynos_mixer.h"
-#define get_mixer_manager(dev) platform_get_drvdata(to_platform_device(dev))
-
#define MIXER_WIN_NR 3
#define MIXER_DEFAULT_WIN 0
@@ -86,6 +84,7 @@ enum mixer_version_id {
};
struct mixer_context {
+ struct exynos_drm_manager manager;
struct platform_device *pdev;
struct device *dev;
struct drm_device *drm_dev;
@@ -104,6 +103,11 @@ struct mixer_context {
atomic_t wait_vsync_event;
};
+static inline struct mixer_context *mgr_to_mixer(struct exynos_drm_manager *mgr)
+{
+ return container_of(mgr, struct mixer_context, manager);
+}
+
struct mixer_drv_data {
enum mixer_version_id version;
bool is_vp_enabled;
@@ -854,7 +858,7 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
struct drm_device *drm_dev)
{
int ret;
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct exynos_drm_private *priv;
priv = drm_dev->dev_private;
@@ -885,7 +889,7 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
if (is_drm_iommu_supported(mixer_ctx->drm_dev))
drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
@@ -893,7 +897,7 @@ static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &mixer_ctx->mixer_res;
if (!mixer_ctx->powered) {
@@ -910,7 +914,7 @@ static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &mixer_ctx->mixer_res;
/* disable vsync interrupt */
@@ -920,7 +924,7 @@ static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct hdmi_win_data *win_data;
int win;
@@ -971,7 +975,7 @@ static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -993,7 +997,7 @@ static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &mixer_ctx->mixer_res;
int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
unsigned long flags;
@@ -1021,7 +1025,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
{
- struct mixer_context *mixer_ctx = mgr->ctx;
+ struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
@@ -1048,7 +1052,7 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
static void mixer_window_suspend(struct exynos_drm_manager *mgr)
{
- struct mixer_context *ctx = mgr->ctx;
+ struct mixer_context *ctx = mgr_to_mixer(mgr);
struct hdmi_win_data *win_data;
int i;
@@ -1062,7 +1066,7 @@ static void mixer_window_suspend(struct exynos_drm_manager *mgr)
static void mixer_window_resume(struct exynos_drm_manager *mgr)
{
- struct mixer_context *ctx = mgr->ctx;
+ struct mixer_context *ctx = mgr_to_mixer(mgr);
struct hdmi_win_data *win_data;
int i;
@@ -1077,7 +1081,7 @@ static void mixer_window_resume(struct exynos_drm_manager *mgr)
static void mixer_poweron(struct exynos_drm_manager *mgr)
{
- struct mixer_context *ctx = mgr->ctx;
+ struct mixer_context *ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
@@ -1111,7 +1115,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
static void mixer_poweroff(struct exynos_drm_manager *mgr)
{
- struct mixer_context *ctx = mgr->ctx;
+ struct mixer_context *ctx = mgr_to_mixer(mgr);
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
@@ -1187,11 +1191,6 @@ static struct exynos_drm_manager_ops mixer_manager_ops = {
.win_disable = mixer_win_disable,
};
-static struct exynos_drm_manager mixer_manager = {
- .type = EXYNOS_DISPLAY_TYPE_HDMI,
- .ops = &mixer_manager_ops,
-};
-
static struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
.is_vp_enabled = 0,
@@ -1249,48 +1248,17 @@ MODULE_DEVICE_TABLE(of, mixer_match_types);
static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct mixer_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- struct mixer_context *ctx;
- struct mixer_drv_data *drv;
int ret;
- dev_info(dev, "probe start\n");
-
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- DRM_ERROR("failed to alloc mixer context.\n");
- return -ENOMEM;
- }
-
- mutex_init(&ctx->mixer_mutex);
-
- if (dev->of_node) {
- const struct of_device_id *match;
- match = of_match_node(mixer_match_types, dev->of_node);
- drv = (struct mixer_drv_data *)match->data;
- } else {
- drv = (struct mixer_drv_data *)
- platform_get_device_id(pdev)->driver_data;
- }
-
- ctx->pdev = pdev;
- ctx->dev = dev;
- ctx->vp_enabled = drv->is_vp_enabled;
- ctx->has_sclk = drv->has_sclk;
- ctx->mxr_ver = drv->version;
- init_waitqueue_head(&ctx->wait_vsync_queue);
- atomic_set(&ctx->wait_vsync_event, 0);
-
- mixer_manager.ctx = ctx;
- ret = mixer_initialize(&mixer_manager, drm_dev);
+ ret = mixer_initialize(&ctx->manager, drm_dev);
if (ret)
return ret;
- platform_set_drvdata(pdev, &mixer_manager);
- ret = exynos_drm_crtc_create(&mixer_manager);
+ ret = exynos_drm_crtc_create(&ctx->manager);
if (ret) {
- mixer_mgr_remove(&mixer_manager);
+ mixer_mgr_remove(&ctx->manager);
return ret;
}
@@ -1301,11 +1269,9 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
static void mixer_unbind(struct device *dev, struct device *master, void *data)
{
- struct exynos_drm_manager *mgr = dev_get_drvdata(dev);
+ struct mixer_context *ctx = dev_get_drvdata(dev);
- dev_info(dev, "remove successful\n");
-
- mixer_mgr_remove(mgr);
+ mixer_mgr_remove(&ctx->manager);
pm_runtime_disable(dev);
}
@@ -1317,22 +1283,62 @@ static const struct component_ops mixer_component_ops = {
static int mixer_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct mixer_drv_data *drv;
+ struct mixer_context *ctx;
int ret;
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc mixer context.\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ctx->mixer_mutex);
+
+ ctx->manager.type = EXYNOS_DISPLAY_TYPE_HDMI;
+ ctx->manager.ops = &mixer_manager_ops;
+
+ if (dev->of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(mixer_match_types, dev->of_node);
+ drv = (struct mixer_drv_data *)match->data;
+ } else {
+ drv = (struct mixer_drv_data *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+
+ ctx->pdev = pdev;
+ ctx->dev = dev;
+ ctx->vp_enabled = drv->is_vp_enabled;
+ ctx->has_sclk = drv->has_sclk;
+ ctx->mxr_ver = drv->version;
+ init_waitqueue_head(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
+
+ platform_set_drvdata(pdev, ctx);
+
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
- mixer_manager.type);
+ ctx->manager.type);
if (ret)
return ret;
ret = component_add(&pdev->dev, &mixer_component_ops);
- if (ret)
+ if (ret) {
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
return ret;
}
static int mixer_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
+
component_del(&pdev->dev, &mixer_component_ops);
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index b15315576376..190e55f2f891 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -39,6 +39,7 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_crtc.o \
oaktrail_lvds.o \
+ oaktrail_lvds_i2c.o \
oaktrail_hdmi.o \
oaktrail_hdmi_i2c.o
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 9f158eab517a..0fafb8e2483a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -37,6 +37,201 @@
#include "gma_display.h"
#include <drm/drm_dp_helper.h>
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ * aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ * the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
+struct i2c_algo_dp_aux_data {
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_adapter *adapter,
+ int mode, uint8_t write_byte,
+ uint8_t *read_byte);
+};
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+/*
+ * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
+ * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
+ */
+static int __deprecated
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+ error = i2c_add_adapter(adapter);
+ return error;
+}
+
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 87885d8c06e8..6b43ae3ffd73 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -25,6 +25,7 @@
*/
#include <linux/freezer.h>
+#include <video/mipi_display.h>
#include "mdfld_dsi_output.h"
#include "mdfld_dsi_pkg_sender.h"
@@ -32,20 +33,6 @@
#define MDFLD_DSI_READ_MAX_COUNT 5000
-enum data_type {
- DSI_DT_GENERIC_SHORT_WRITE_0 = 0x03,
- DSI_DT_GENERIC_SHORT_WRITE_1 = 0x13,
- DSI_DT_GENERIC_SHORT_WRITE_2 = 0x23,
- DSI_DT_GENERIC_READ_0 = 0x04,
- DSI_DT_GENERIC_READ_1 = 0x14,
- DSI_DT_GENERIC_READ_2 = 0x24,
- DSI_DT_GENERIC_LONG_WRITE = 0x29,
- DSI_DT_DCS_SHORT_WRITE_0 = 0x05,
- DSI_DT_DCS_SHORT_WRITE_1 = 0x15,
- DSI_DT_DCS_READ = 0x06,
- DSI_DT_DCS_LONG_WRITE = 0x39,
-};
-
enum {
MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
};
@@ -321,9 +308,9 @@ static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
u8 cmd;
switch (data_type) {
- case DSI_DT_DCS_SHORT_WRITE_0:
- case DSI_DT_DCS_SHORT_WRITE_1:
- case DSI_DT_DCS_LONG_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
cmd = *data;
break;
default:
@@ -334,12 +321,12 @@ static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
/*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
- if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+ if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
/*TODO: replace it with msleep later*/
mdelay(120);
}
- if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+ if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
/*TODO: replace it with msleep later*/
mdelay(120);
}
@@ -352,9 +339,9 @@ static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
u8 cmd;
switch (data_type) {
- case DSI_DT_DCS_SHORT_WRITE_0:
- case DSI_DT_DCS_SHORT_WRITE_1:
- case DSI_DT_DCS_LONG_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
cmd = *data;
break;
default:
@@ -362,15 +349,15 @@ static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
}
/*update panel status*/
- if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+ if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
/*TODO: replace it with msleep later*/
mdelay(120);
- } else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+ } else if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
/*TODO: replace it with msleep later*/
mdelay(120);
- } else if (unlikely(cmd == DCS_SOFT_RESET)) {
+ } else if (unlikely(cmd == MIPI_DCS_SOFT_RESET)) {
/*TODO: replace it with msleep later*/
mdelay(5);
}
@@ -405,19 +392,19 @@ static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
}
switch (data_type) {
- case DSI_DT_GENERIC_SHORT_WRITE_0:
- case DSI_DT_GENERIC_SHORT_WRITE_1:
- case DSI_DT_GENERIC_SHORT_WRITE_2:
- case DSI_DT_GENERIC_READ_0:
- case DSI_DT_GENERIC_READ_1:
- case DSI_DT_GENERIC_READ_2:
- case DSI_DT_DCS_SHORT_WRITE_0:
- case DSI_DT_DCS_SHORT_WRITE_1:
- case DSI_DT_DCS_READ:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_READ:
ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
break;
- case DSI_DT_GENERIC_LONG_WRITE:
- case DSI_DT_DCS_LONG_WRITE:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ case MIPI_DSI_DCS_LONG_WRITE:
ret = send_long_pkg(sender, data_type, data, len, hs);
break;
}
@@ -440,7 +427,7 @@ int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
}
spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs);
+ send_pkg(sender, MIPI_DSI_DCS_LONG_WRITE, data, len, hs);
spin_unlock_irqrestore(&sender->lock, flags);
return 0;
@@ -461,10 +448,10 @@ int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
data[0] = cmd;
if (param_num) {
- data_type = DSI_DT_DCS_SHORT_WRITE_1;
+ data_type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
data[1] = param;
} else {
- data_type = DSI_DT_DCS_SHORT_WRITE_0;
+ data_type = MIPI_DSI_DCS_SHORT_WRITE;
data[1] = 0;
}
@@ -489,17 +476,17 @@ int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
switch (param_num) {
case 0:
- data_type = DSI_DT_GENERIC_SHORT_WRITE_0;
+ data_type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
data[0] = 0;
data[1] = 0;
break;
case 1:
- data_type = DSI_DT_GENERIC_SHORT_WRITE_1;
+ data_type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
data[0] = param0;
data[1] = 0;
break;
case 2:
- data_type = DSI_DT_GENERIC_SHORT_WRITE_2;
+ data_type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
data[0] = param0;
data[1] = param1;
break;
@@ -523,7 +510,7 @@ int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
}
spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs);
+ send_pkg(sender, MIPI_DSI_GENERIC_LONG_WRITE, data, len, hs);
spin_unlock_irqrestore(&sender->lock, flags);
return 0;
@@ -594,7 +581,7 @@ int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
return -EINVAL;
}
- return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1,
+ return __read_panel_data(sender, MIPI_DSI_DCS_READ, &cmd, 1,
data, len, hs);
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
index 459cd7ea8b81..0478a21c15d5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
@@ -62,18 +62,6 @@ struct mdfld_dsi_pkg_sender {
u32 mipi_cmd_len_reg;
};
-/* DCS definitions */
-#define DCS_SOFT_RESET 0x01
-#define DCS_ENTER_SLEEP_MODE 0x10
-#define DCS_EXIT_SLEEP_MODE 0x11
-#define DCS_SET_DISPLAY_OFF 0x28
-#define DCS_SET_DISPLAY_ON 0x29
-#define DCS_SET_COLUMN_ADDRESS 0x2a
-#define DCS_SET_PAGE_ADDRESS 0x2b
-#define DCS_WRITE_MEM_START 0x2c
-#define DCS_SET_TEAR_OFF 0x34
-#define DCS_SET_TEAR_ON 0x35
-
extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
int pipe);
extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 0d39da6e8b7a..83bbc271bcfb 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -359,22 +359,26 @@ void oaktrail_lvds_init(struct drm_device *dev,
* if closed, act like it's not there for now
*/
+ edid = NULL;
mutex_lock(&dev->mode_config.mutex);
i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
- if (i2c_adap == NULL)
- dev_err(dev->dev, "No ddc adapter available!\n");
+ if (i2c_adap)
+ edid = drm_get_edid(connector, i2c_adap);
+ if (edid == NULL && dev_priv->lpc_gpio_base) {
+ oaktrail_lvds_i2c_init(encoder);
+ if (gma_encoder->ddc_bus != NULL) {
+ i2c_adap = &gma_encoder->ddc_bus->adapter;
+ edid = drm_get_edid(connector, i2c_adap);
+ }
+ }
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- if (i2c_adap) {
- edid = drm_get_edid(connector, i2c_adap);
- if (edid) {
- drm_mode_connector_update_edid_property(connector,
- edid);
- drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ kfree(edid);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -383,7 +387,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
goto out; /* FIXME: check for quirks */
}
}
- }
+ } else
+ dev_err(dev->dev, "No ddc adapter available!\n");
/*
* If we didn't get EDID, try geting panel timing
* from configuration data
@@ -411,8 +416,10 @@ failed_find:
mutex_unlock(&dev->mode_config.mutex);
dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
- if (gma_encoder->ddc_bus)
+ if (gma_encoder->ddc_bus) {
psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+ gma_encoder->ddc_bus = NULL;
+ }
/* failed_ddc: */
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
new file mode 100644
index 000000000000..f913a62eee5f
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2002-2010, Intel Corporation.
+ * Copyright (c) 2014 ATRON electronic GmbH
+ * Author: Jan Safrata <jan.nikitenko@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+
+/*
+ * LPC GPIO based I2C bus for LVDS of Atom E6xx
+ */
+
+/*-----------------------------------------------------------------------------
+ * LPC Register Offsets. Used for LVDS GPIO Bit Bashing. Registers are part
+ * Atom E6xx [D31:F0]
+ ----------------------------------------------------------------------------*/
+#define RGEN 0x20
+#define RGIO 0x24
+#define RGLVL 0x28
+#define RGTPE 0x2C
+#define RGTNE 0x30
+#define RGGPE 0x34
+#define RGSMI 0x38
+#define RGTS 0x3C
+
+/* The LVDS GPIO clock lines are GPIOSUS[3]
+ * The LVDS GPIO data lines are GPIOSUS[4]
+ */
+#define GPIO_CLOCK 0x08
+#define GPIO_DATA 0x10
+
+#define LPC_READ_REG(chan, r) inl((chan)->reg + (r))
+#define LPC_WRITE_REG(chan, r, val) outl((val), (chan)->reg + (r))
+
+static int get_clock(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ u32 val, tmp;
+
+ val = LPC_READ_REG(chan, RGIO);
+ val |= GPIO_CLOCK;
+ LPC_WRITE_REG(chan, RGIO, val);
+ tmp = LPC_READ_REG(chan, RGLVL);
+ val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
+
+ return val;
+}
+
+static int get_data(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ u32 val, tmp;
+
+ val = LPC_READ_REG(chan, RGIO);
+ val |= GPIO_DATA;
+ LPC_WRITE_REG(chan, RGIO, val);
+ tmp = LPC_READ_REG(chan, RGLVL);
+ val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
+
+ return val;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ u32 val;
+
+ if (state_high) {
+ val = LPC_READ_REG(chan, RGIO);
+ val |= GPIO_CLOCK;
+ LPC_WRITE_REG(chan, RGIO, val);
+ } else {
+ val = LPC_READ_REG(chan, RGIO);
+ val &= ~GPIO_CLOCK;
+ LPC_WRITE_REG(chan, RGIO, val);
+ val = LPC_READ_REG(chan, RGLVL);
+ val &= ~GPIO_CLOCK;
+ LPC_WRITE_REG(chan, RGLVL, val);
+ }
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ u32 val;
+
+ if (state_high) {
+ val = LPC_READ_REG(chan, RGIO);
+ val |= GPIO_DATA;
+ LPC_WRITE_REG(chan, RGIO, val);
+ } else {
+ val = LPC_READ_REG(chan, RGIO);
+ val &= ~GPIO_DATA;
+ LPC_WRITE_REG(chan, RGIO, val);
+ val = LPC_READ_REG(chan, RGLVL);
+ val &= ~GPIO_DATA;
+ LPC_WRITE_REG(chan, RGLVL, val);
+ }
+}
+
+void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_i2c_chan *chan;
+
+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+ if (!chan)
+ return;
+
+ chan->drm_dev = dev;
+ chan->reg = dev_priv->lpc_gpio_base;
+ strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1);
+ chan->adapter.owner = THIS_MODULE;
+ chan->adapter.algo_data = &chan->algo;
+ chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->algo.setsda = set_data;
+ chan->algo.setscl = set_clock;
+ chan->algo.getsda = get_data;
+ chan->algo.getscl = get_clock;
+ chan->algo.udelay = 100;
+ chan->algo.timeout = usecs_to_jiffies(2200);
+ chan->algo.data = chan;
+
+ i2c_set_adapdata(&chan->adapter, chan);
+
+ set_data(chan, 1);
+ set_clock(chan, 1);
+ udelay(50);
+
+ if (i2c_bit_add_bus(&chan->adapter)) {
+ kfree(chan);
+ return;
+ }
+
+ gma_encoder->ddc_bus = chan;
+}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 6ec3a905fdd2..92e7e5795398 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -212,6 +212,8 @@ static int psb_driver_unload(struct drm_device *dev)
}
if (dev_priv->aux_pdev)
pci_dev_put(dev_priv->aux_pdev);
+ if (dev_priv->lpc_pdev)
+ pci_dev_put(dev_priv->lpc_pdev);
/* Destroy VBT data */
psb_intel_destroy_bios(dev);
@@ -280,6 +282,24 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG_KMS("Couldn't find aux pci device");
}
dev_priv->gmbus_reg = dev_priv->aux_reg;
+
+ dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0));
+ if (dev_priv->lpc_pdev) {
+ pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
+ &dev_priv->lpc_gpio_base);
+ pci_write_config_dword(dev_priv->lpc_pdev, PSB_LPC_GBA,
+ (u32)dev_priv->lpc_gpio_base | (1L<<31));
+ pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
+ &dev_priv->lpc_gpio_base);
+ dev_priv->lpc_gpio_base &= 0xffc0;
+ if (dev_priv->lpc_gpio_base)
+ DRM_DEBUG_KMS("Found LPC GPIO at 0x%04x\n",
+ dev_priv->lpc_gpio_base);
+ else {
+ pci_dev_put(dev_priv->lpc_pdev);
+ dev_priv->lpc_pdev = NULL;
+ }
+ }
} else {
dev_priv->gmbus_reg = dev_priv->vdc_reg;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 55ebe2bd88dd..e38057b91865 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -83,6 +83,7 @@ enum {
#define PSB_PGETBL_CTL 0x2020
#define _PSB_PGETBL_ENABLED 0x00000001
#define PSB_SGX_2D_SLAVE_PORT 0x4000
+#define PSB_LPC_GBA 0x44
/* TODO: To get rid of */
#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
@@ -441,6 +442,7 @@ struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
struct pci_dev *aux_pdev; /* Currently only used by mrst */
+ struct pci_dev *lpc_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
const struct psb_offset *regmap;
@@ -470,6 +472,7 @@ struct drm_psb_private {
uint8_t __iomem *sgx_reg;
uint8_t __iomem *vdc_reg;
uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
+ uint16_t lpc_gpio_base;
uint32_t gatt_free_offset;
/* Fencing / irq */
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 87b50ba64ed4..b21a09451d1d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -21,6 +21,7 @@
#include <linux/i2c.h>
#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 336bd3aa1a06..860dd2177ca1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -223,6 +223,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev,
extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
extern void oaktrail_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_lvds_i2c_init(struct drm_encoder *encoder);
extern void mid_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int dsi_num);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 0be96fdb5e28..58529cea575d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1631,57 +1631,8 @@ static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
return !list_empty(&connector->probed_modes);
}
-static void
-psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
-{
- struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
- struct drm_device *dev = connector->dev;
-
- if (psb_intel_sdvo_connector->left)
- drm_property_destroy(dev, psb_intel_sdvo_connector->left);
- if (psb_intel_sdvo_connector->right)
- drm_property_destroy(dev, psb_intel_sdvo_connector->right);
- if (psb_intel_sdvo_connector->top)
- drm_property_destroy(dev, psb_intel_sdvo_connector->top);
- if (psb_intel_sdvo_connector->bottom)
- drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
- if (psb_intel_sdvo_connector->hpos)
- drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
- if (psb_intel_sdvo_connector->vpos)
- drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
- if (psb_intel_sdvo_connector->saturation)
- drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
- if (psb_intel_sdvo_connector->contrast)
- drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
- if (psb_intel_sdvo_connector->hue)
- drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
- if (psb_intel_sdvo_connector->sharpness)
- drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
- if (psb_intel_sdvo_connector->flicker_filter)
- drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
- if (psb_intel_sdvo_connector->flicker_filter_2d)
- drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
- if (psb_intel_sdvo_connector->flicker_filter_adaptive)
- drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
- if (psb_intel_sdvo_connector->tv_luma_filter)
- drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
- if (psb_intel_sdvo_connector->tv_chroma_filter)
- drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
- if (psb_intel_sdvo_connector->dot_crawl)
- drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
- if (psb_intel_sdvo_connector->brightness)
- drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
-}
-
static void psb_intel_sdvo_destroy(struct drm_connector *connector)
{
- struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
-
- if (psb_intel_sdvo_connector->tv_format)
- drm_property_destroy(connector->dev,
- psb_intel_sdvo_connector->tv_format);
-
- psb_intel_sdvo_destroy_enhance_property(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 4d341db462a2..22c7ed63a001 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -1,6 +1,12 @@
menu "I2C encoder or helper chips"
depends on DRM && DRM_KMS_HELPER && I2C
+config DRM_I2C_ADV7511
+ tristate "AV7511 encoder"
+ select REGMAP_I2C
+ help
+ Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
default m if DRM_NOUVEAU
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 43aa33baebed..2c72eb584ab7 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,5 +1,7 @@
ccflags-y := -Iinclude/drm
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
+
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
new file mode 100644
index 000000000000..faf1c0c5ab2e
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -0,0 +1,1010 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "adv7511.h"
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+
+ struct regmap *regmap;
+ struct regmap *packet_memory_regmap;
+ enum drm_connector_status status;
+ int dpms_mode;
+
+ unsigned int f_tmds;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+
+ wait_queue_head_t wq;
+ struct drm_encoder *encoder;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+};
+
+static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
+{
+ return to_encoder_slave(encoder)->slave_priv;
+}
+
+/* ADI recommended values for proper operation. */
+static const struct reg_default adv7511_fixed_registers[] = {
+ { 0x98, 0x03 },
+ { 0x9a, 0xe0 },
+ { 0x9c, 0x30 },
+ { 0x9d, 0x61 },
+ { 0xa2, 0xa4 },
+ { 0xa3, 0xa4 },
+ { 0xe0, 0xd0 },
+ { 0xf9, 0x00 },
+ { 0x55, 0x02 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Register access
+ */
+
+static const uint8_t adv7511_register_defaults[] = {
+ 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
+ 0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
+ 0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
+ 0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
+ 0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
+ 0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+ 0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
+ 0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
+ 0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
+ 0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
+ 0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
+ 0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
+ 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADV7511_REG_CHIP_REVISION:
+ case ADV7511_REG_SPDIF_FREQ:
+ case ADV7511_REG_CTS_AUTOMATIC1:
+ case ADV7511_REG_CTS_AUTOMATIC2:
+ case ADV7511_REG_VIC_DETECTED:
+ case ADV7511_REG_VIC_SEND:
+ case ADV7511_REG_AUX_VIC_DETECTED:
+ case ADV7511_REG_STATUS:
+ case ADV7511_REG_GC(1):
+ case ADV7511_REG_INT(0):
+ case ADV7511_REG_INT(1):
+ case ADV7511_REG_PLL_STATUS:
+ case ADV7511_REG_AN(0):
+ case ADV7511_REG_AN(1):
+ case ADV7511_REG_AN(2):
+ case ADV7511_REG_AN(3):
+ case ADV7511_REG_AN(4):
+ case ADV7511_REG_AN(5):
+ case ADV7511_REG_AN(6):
+ case ADV7511_REG_AN(7):
+ case ADV7511_REG_HDCP_STATUS:
+ case ADV7511_REG_BCAPS:
+ case ADV7511_REG_BKSV(0):
+ case ADV7511_REG_BKSV(1):
+ case ADV7511_REG_BKSV(2):
+ case ADV7511_REG_BKSV(3):
+ case ADV7511_REG_BKSV(4):
+ case ADV7511_REG_DDC_STATUS:
+ case ADV7511_REG_BSTATUS(0):
+ case ADV7511_REG_BSTATUS(1):
+ case ADV7511_REG_CHIP_ID_HIGH:
+ case ADV7511_REG_CHIP_ID_LOW:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config adv7511_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults_raw = adv7511_register_defaults,
+ .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
+
+ .volatile_reg = adv7511_register_volatile,
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
+ const uint16_t *coeff,
+ unsigned int scaling_factor)
+{
+ unsigned int i;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
+
+ if (enable) {
+ for (i = 0; i < 12; ++i) {
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_CSC_UPPER(i),
+ 0x1f, coeff[i] >> 8);
+ regmap_write(adv7511->regmap,
+ ADV7511_REG_CSC_LOWER(i),
+ coeff[i] & 0xff);
+ }
+ }
+
+ if (enable)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0xe0, 0x80 | (scaling_factor << 5));
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0x80, 0x00);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, 0);
+}
+
+static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0xff);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0xff);
+ }
+
+ return 0;
+}
+
+static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0x00);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0x00);
+ }
+
+ return 0;
+}
+
+/* Coefficients for adv7511 color space conversion */
+static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
+ 0x0734, 0x04ad, 0x0000, 0x1c1b,
+ 0x1ddc, 0x04ad, 0x1f24, 0x0135,
+ 0x0000, 0x04ad, 0x087c, 0x1b77,
+};
+
+static void adv7511_set_config_csc(struct adv7511 *adv7511,
+ struct drm_connector *connector,
+ bool rgb)
+{
+ struct adv7511_video_config config;
+ bool output_format_422, output_format_ycbcr;
+ unsigned int mode;
+ uint8_t infoframe[17];
+
+ if (adv7511->edid)
+ config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
+ else
+ config.hdmi_mode = false;
+
+ hdmi_avi_infoframe_init(&config.avi_infoframe);
+
+ config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+ if (rgb) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ } else {
+ config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
+ config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
+
+ if ((connector->display_info.color_formats &
+ DRM_COLOR_FORMAT_YCRCB422) &&
+ config.hdmi_mode) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace =
+ HDMI_COLORSPACE_YUV422;
+ } else {
+ config.csc_enable = true;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ }
+ }
+
+ if (config.hdmi_mode) {
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+
+ switch (config.avi_infoframe.colorspace) {
+ case HDMI_COLORSPACE_YUV444:
+ output_format_422 = false;
+ output_format_ycbcr = true;
+ break;
+ case HDMI_COLORSPACE_YUV422:
+ output_format_422 = true;
+ output_format_ycbcr = true;
+ break;
+ default:
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ break;
+ }
+ } else {
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ }
+
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+
+ adv7511_set_colormap(adv7511, config.csc_enable,
+ config.csc_coefficents,
+ config.csc_scaling_factor);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
+ (output_format_422 << 7) | output_format_ycbcr);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
+ ADV7511_HDMI_CFG_MODE_MASK, mode);
+
+ hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
+ sizeof(infoframe));
+
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ infoframe + 1, sizeof(infoframe) - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+}
+
+static void adv7511_set_link_config(struct adv7511 *adv7511,
+ const struct adv7511_link_config *config)
+{
+ /*
+ * The input style values documented in the datasheet don't match the
+ * hardware register field values :-(
+ */
+ static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
+
+ unsigned int clock_delay;
+ unsigned int color_depth;
+ unsigned int input_id;
+
+ clock_delay = (config->clock_delay + 1200) / 400;
+ color_depth = config->input_color_depth == 8 ? 3
+ : (config->input_color_depth == 10 ? 1 : 2);
+
+ /* TODO Support input ID 6 */
+ if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
+ input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
+ ? 5 : 0;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
+ input_id = config->embedded_sync ? 8 : 7;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
+ input_id = config->embedded_sync ? 4 : 3;
+ else
+ input_id = config->embedded_sync ? 2 : 1;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
+ input_id);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
+ (color_depth << 4) |
+ (input_styles[config->input_style] << 2));
+ regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
+ config->input_justification << 3);
+ regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
+ config->sync_pulse << 2);
+
+ regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
+
+ adv7511->embedded_sync = config->embedded_sync;
+ adv7511->hsync_polarity = config->hsync_polarity;
+ adv7511->vsync_polarity = config->vsync_polarity;
+ adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt and hotplug detection
+ */
+
+static bool adv7511_hpd(struct adv7511 *adv7511)
+{
+ unsigned int irq0;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return false;
+
+ if (irq0 & ADV7511_INT0_HDP) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_HDP);
+ return true;
+ }
+
+ return false;
+}
+
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+ struct adv7511 *adv7511 = devid;
+
+ if (adv7511_hpd(adv7511))
+ drm_helper_hpd_irq_event(adv7511->encoder->dev);
+
+ wake_up_all(&adv7511->wq);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
+ unsigned int irq)
+{
+ unsigned int irq0, irq1;
+ unsigned int pending;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return 0;
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+ if (ret < 0)
+ return 0;
+
+ pending = (irq1 << 8) | irq0;
+
+ return pending & irq;
+}
+
+static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
+ int timeout)
+{
+ unsigned int pending;
+ int ret;
+
+ if (adv7511->i2c_main->irq) {
+ ret = wait_event_interruptible_timeout(adv7511->wq,
+ adv7511_is_interrupt_pending(adv7511, irq),
+ msecs_to_jiffies(timeout));
+ if (ret <= 0)
+ return 0;
+ pending = adv7511_is_interrupt_pending(adv7511, irq);
+ } else {
+ if (timeout < 25)
+ timeout = 25;
+ do {
+ pending = adv7511_is_interrupt_pending(adv7511, irq);
+ if (pending)
+ break;
+ msleep(25);
+ timeout -= 25;
+ } while (timeout >= 25);
+ }
+
+ return pending;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
+{
+ struct adv7511 *adv7511 = data;
+ struct i2c_msg xfer[2];
+ uint8_t offset;
+ unsigned int i;
+ int ret;
+
+ if (len > 128)
+ return -EINVAL;
+
+ if (adv7511->current_edid_segment != block / 2) {
+ unsigned int status;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ if (status != 2) {
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+ block);
+ ret = adv7511_wait_for_interrupt(adv7511,
+ ADV7511_INT0_EDID_READY |
+ ADV7511_INT1_DDC_ERROR, 200);
+
+ if (!(ret & ADV7511_INT0_EDID_READY))
+ return -EIO;
+ }
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+
+ /* Break this apart, hopefully more I2C controllers will
+ * support 64 byte transfers than 256 byte transfers
+ */
+
+ xfer[0].addr = adv7511->i2c_edid->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &offset;
+ xfer[1].addr = adv7511->i2c_edid->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 64;
+ xfer[1].buf = adv7511->edid_buf;
+
+ offset = 0;
+
+ for (i = 0; i < 4; ++i) {
+ ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
+ ARRAY_SIZE(xfer));
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+
+ xfer[1].buf += 64;
+ offset += 64;
+ }
+
+ adv7511->current_edid_segment = block / 2;
+ }
+
+ if (block % 2 == 0)
+ memcpy(buf, adv7511->edid_buf, len);
+ else
+ memcpy(buf, adv7511->edid_buf + 128, len);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder operations
+ */
+
+static int adv7511_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ struct edid *edid;
+ unsigned int count;
+
+ /* Reading the EDID only works if the device is powered */
+ if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ adv7511->current_edid_segment = -1;
+ }
+
+ edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+
+ if (adv7511->dpms_mode != DRM_MODE_DPMS_ON)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+
+ kfree(adv7511->edid);
+ adv7511->edid = edid;
+ if (!edid)
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+
+ return count;
+}
+
+static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ adv7511->current_edid_segment = -1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ /*
+ * Per spec it is allowed to pulse the HDP signal to indicate
+ * that the EDID information has changed. Some monitors do this
+ * when they wakeup from standby or are enabled. When the HDP
+ * goes low the adv7511 is reset and the outputs are disabled
+ * which might cause the monitor to go to standby again. To
+ * avoid this we ignore the HDP pin for the first few seconds
+ * after enabeling the output.
+ */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_NONE);
+ /* Most of the registers are reset during power down or
+ * when HPD is low
+ */
+ regcache_sync(adv7511->regmap);
+ break;
+ default:
+ /* TODO: setup additional power down modes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+ regcache_mark_dirty(adv7511->regmap);
+ break;
+ }
+
+ adv7511->dpms_mode = mode;
+}
+
+static enum drm_connector_status
+adv7511_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ enum drm_connector_status status;
+ unsigned int val;
+ bool hpd;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ return connector_status_disconnected;
+
+ if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ hpd = adv7511_hpd(adv7511);
+
+ /* The chip resets itself when the cable is disconnected, so in case
+ * there is a pending HPD interrupt and the cable is connected there was
+ * at least one transition from disconnected to connected and the chip
+ * has to be reinitialized. */
+ if (status == connector_status_connected && hpd &&
+ adv7511->dpms_mode == DRM_MODE_DPMS_ON) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_encoder_dpms(encoder, adv7511->dpms_mode);
+ adv7511_get_modes(encoder, connector);
+ if (adv7511->status == connector_status_connected)
+ status = connector_status_disconnected;
+ } else {
+ /* Renable HDP sensing */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_BOTH);
+ }
+
+ adv7511->status = status;
+ return status;
+}
+
+static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ return MODE_OK;
+}
+
+static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ unsigned int low_refresh_rate;
+ unsigned int hsync_polarity = 0;
+ unsigned int vsync_polarity = 0;
+
+ if (adv7511->embedded_sync) {
+ unsigned int hsync_offset, hsync_len;
+ unsigned int vsync_offset, vsync_len;
+
+ hsync_offset = adj_mode->crtc_hsync_start -
+ adj_mode->crtc_hdisplay;
+ vsync_offset = adj_mode->crtc_vsync_start -
+ adj_mode->crtc_vdisplay;
+ hsync_len = adj_mode->crtc_hsync_end -
+ adj_mode->crtc_hsync_start;
+ vsync_len = adj_mode->crtc_vsync_end -
+ adj_mode->crtc_vsync_start;
+
+ /* The hardware vsync generator has a off-by-one bug */
+ vsync_offset += 1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
+ ((hsync_offset >> 10) & 0x7) << 5);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
+ (hsync_offset >> 2) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
+ ((hsync_offset & 0x3) << 6) |
+ ((hsync_len >> 4) & 0x3f));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
+ ((hsync_len & 0xf) << 4) |
+ ((vsync_offset >> 6) & 0xf));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
+ ((vsync_offset & 0x3f) << 2) |
+ ((vsync_len >> 8) & 0x3));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
+ vsync_len & 0xff);
+
+ hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
+ vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
+ } else {
+ enum adv7511_sync_polarity mode_hsync_polarity;
+ enum adv7511_sync_polarity mode_vsync_polarity;
+
+ /**
+ * If the input signal is always low or always high we want to
+ * invert or let it passthrough depending on the polarity of the
+ * current mode.
+ **/
+ if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adv7511->hsync_polarity != mode_hsync_polarity &&
+ adv7511->hsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ hsync_polarity = 1;
+
+ if (adv7511->vsync_polarity != mode_vsync_polarity &&
+ adv7511->vsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ vsync_polarity = 1;
+ }
+
+ if (mode->vrefresh <= 24000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
+ else if (mode->vrefresh <= 25000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
+ else if (mode->vrefresh <= 30000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
+ else
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+
+ regmap_update_bits(adv7511->regmap, 0xfb,
+ 0x6, low_refresh_rate << 1);
+ regmap_update_bits(adv7511->regmap, 0x17,
+ 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+
+ /*
+ * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
+ * supposed to give better results.
+ */
+
+ adv7511->f_tmds = mode->clock;
+}
+
+static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
+ .dpms = adv7511_encoder_dpms,
+ .mode_valid = adv7511_encoder_mode_valid,
+ .mode_set = adv7511_encoder_mode_set,
+ .detect = adv7511_encoder_detect,
+ .get_modes = adv7511_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
+
+static int adv7511_parse_dt(struct device_node *np,
+ struct adv7511_link_config *config)
+{
+ const char *str;
+ int ret;
+
+ memset(config, 0, sizeof(*config));
+
+ of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
+ if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
+ config->input_color_depth != 12)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-colorspace", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "rgb"))
+ config->input_colorspace = HDMI_COLORSPACE_RGB;
+ else if (!strcmp(str, "yuv422"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV422;
+ else if (!strcmp(str, "yuv444"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV444;
+ else
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-clock", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "1x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_1X;
+ else if (!strcmp(str, "2x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_2X;
+ else if (!strcmp(str, "ddr"))
+ config->input_clock = ADV7511_INPUT_CLOCK_DDR;
+ else
+ return -EINVAL;
+
+ if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
+ config->input_clock != ADV7511_INPUT_CLOCK_1X) {
+ ret = of_property_read_u32(np, "adi,input-style",
+ &config->input_style);
+ if (ret)
+ return ret;
+
+ if (config->input_style < 1 || config->input_style > 3)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-justification",
+ &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "left"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_LEFT;
+ else if (!strcmp(str, "evenly"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_EVENLY;
+ else if (!strcmp(str, "right"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_RIGHT;
+ else
+ return -EINVAL;
+
+ } else {
+ config->input_style = 1;
+ config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
+ }
+
+ of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
+ if (config->clock_delay < -1200 || config->clock_delay > 1600)
+ return -EINVAL;
+
+ config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
+
+ /* Hardcode the sync pulse configurations for now. */
+ config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
+ config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+ config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+
+ return 0;
+}
+
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
+static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ struct adv7511_link_config link_config;
+ struct adv7511 *adv7511;
+ struct device *dev = &i2c->dev;
+ unsigned int val;
+ int ret;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
+ if (!adv7511)
+ return -ENOMEM;
+
+ adv7511->dpms_mode = DRM_MODE_DPMS_OFF;
+ adv7511->status = connector_status_disconnected;
+
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ if (ret)
+ return ret;
+
+ /*
+ * The power down GPIO is optional. If present, toggle it from active to
+ * inactive to wake up the encoder.
+ */
+ adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
+ if (IS_ERR(adv7511->gpio_pd))
+ return PTR_ERR(adv7511->gpio_pd);
+
+ if (adv7511->gpio_pd) {
+ mdelay(5);
+ gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
+ }
+
+ adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
+ if (IS_ERR(adv7511->regmap))
+ return PTR_ERR(adv7511->regmap);
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "Rev. %d\n", val);
+
+ ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ if (ret)
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+ packet_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+ adv7511_packet_disable(adv7511, 0xffff);
+
+ adv7511->i2c_main = i2c;
+ adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+ if (!adv7511->i2c_edid)
+ return -ENOMEM;
+
+ if (i2c->irq) {
+ init_waitqueue_head(&adv7511->wq);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+ adv7511_irq_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ adv7511);
+ if (ret)
+ goto err_i2c_unregister_device;
+ }
+
+ /* CEC is unused for now */
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN);
+
+ adv7511->current_edid_segment = -1;
+
+ i2c_set_clientdata(i2c, adv7511);
+
+ adv7511_set_link_config(adv7511, &link_config);
+
+ return 0;
+
+err_i2c_unregister_device:
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ return ret;
+}
+
+static int adv7511_remove(struct i2c_client *i2c)
+{
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ kfree(adv7511->edid);
+
+ return 0;
+}
+
+static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ encoder->slave_priv = adv7511;
+ encoder->slave_funcs = &adv7511_encoder_funcs;
+
+ adv7511->encoder = &encoder->base;
+
+ return 0;
+}
+
+static const struct i2c_device_id adv7511_i2c_ids[] = {
+ { "adv7511", 0 },
+ { "adv7511w", 0 },
+ { "adv7513", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
+
+static const struct of_device_id adv7511_of_ids[] = {
+ { .compatible = "adi,adv7511", },
+ { .compatible = "adi,adv7511w", },
+ { .compatible = "adi,adv7513", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
+static struct drm_i2c_encoder_driver adv7511_driver = {
+ .i2c_driver = {
+ .driver = {
+ .name = "adv7511",
+ .of_match_table = adv7511_of_ids,
+ },
+ .id_table = adv7511_i2c_ids,
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ },
+
+ .encoder_init = adv7511_encoder_init,
+};
+
+static int __init adv7511_init(void)
+{
+ return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
+}
+module_init(adv7511_init);
+
+static void __exit adv7511_exit(void)
+{
+ drm_i2c_encoder_unregister(&adv7511_driver);
+}
+module_exit(adv7511_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
new file mode 100644
index 000000000000..6599ed538426
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -0,0 +1,289 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRM_I2C_ADV7511_H__
+#define __DRM_I2C_ADV7511_H__
+
+#include <linux/hdmi.h>
+
+#define ADV7511_REG_CHIP_REVISION 0x00
+#define ADV7511_REG_N0 0x01
+#define ADV7511_REG_N1 0x02
+#define ADV7511_REG_N2 0x03
+#define ADV7511_REG_SPDIF_FREQ 0x04
+#define ADV7511_REG_CTS_AUTOMATIC1 0x05
+#define ADV7511_REG_CTS_AUTOMATIC2 0x06
+#define ADV7511_REG_CTS_MANUAL0 0x07
+#define ADV7511_REG_CTS_MANUAL1 0x08
+#define ADV7511_REG_CTS_MANUAL2 0x09
+#define ADV7511_REG_AUDIO_SOURCE 0x0a
+#define ADV7511_REG_AUDIO_CONFIG 0x0b
+#define ADV7511_REG_I2S_CONFIG 0x0c
+#define ADV7511_REG_I2S_WIDTH 0x0d
+#define ADV7511_REG_AUDIO_SUB_SRC0 0x0e
+#define ADV7511_REG_AUDIO_SUB_SRC1 0x0f
+#define ADV7511_REG_AUDIO_SUB_SRC2 0x10
+#define ADV7511_REG_AUDIO_SUB_SRC3 0x11
+#define ADV7511_REG_AUDIO_CFG1 0x12
+#define ADV7511_REG_AUDIO_CFG2 0x13
+#define ADV7511_REG_AUDIO_CFG3 0x14
+#define ADV7511_REG_I2C_FREQ_ID_CFG 0x15
+#define ADV7511_REG_VIDEO_INPUT_CFG1 0x16
+#define ADV7511_REG_CSC_UPPER(x) (0x18 + (x) * 2)
+#define ADV7511_REG_CSC_LOWER(x) (0x19 + (x) * 2)
+#define ADV7511_REG_SYNC_DECODER(x) (0x30 + (x))
+#define ADV7511_REG_DE_GENERATOR (0x35 + (x))
+#define ADV7511_REG_PIXEL_REPETITION 0x3b
+#define ADV7511_REG_VIC_MANUAL 0x3c
+#define ADV7511_REG_VIC_SEND 0x3d
+#define ADV7511_REG_VIC_DETECTED 0x3e
+#define ADV7511_REG_AUX_VIC_DETECTED 0x3f
+#define ADV7511_REG_PACKET_ENABLE0 0x40
+#define ADV7511_REG_POWER 0x41
+#define ADV7511_REG_STATUS 0x42
+#define ADV7511_REG_EDID_I2C_ADDR 0x43
+#define ADV7511_REG_PACKET_ENABLE1 0x44
+#define ADV7511_REG_PACKET_I2C_ADDR 0x45
+#define ADV7511_REG_DSD_ENABLE 0x46
+#define ADV7511_REG_VIDEO_INPUT_CFG2 0x48
+#define ADV7511_REG_INFOFRAME_UPDATE 0x4a
+#define ADV7511_REG_GC(x) (0x4b + (x)) /* 0x4b - 0x51 */
+#define ADV7511_REG_AVI_INFOFRAME_VERSION 0x52
+#define ADV7511_REG_AVI_INFOFRAME_LENGTH 0x53
+#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM 0x54
+#define ADV7511_REG_AVI_INFOFRAME(x) (0x55 + (x)) /* 0x55 - 0x6f */
+#define ADV7511_REG_AUDIO_INFOFRAME_VERSION 0x70
+#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH 0x71
+#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM 0x72
+#define ADV7511_REG_AUDIO_INFOFRAME(x) (0x73 + (x)) /* 0x73 - 0x7c */
+#define ADV7511_REG_INT_ENABLE(x) (0x94 + (x))
+#define ADV7511_REG_INT(x) (0x96 + (x))
+#define ADV7511_REG_INPUT_CLK_DIV 0x9d
+#define ADV7511_REG_PLL_STATUS 0x9e
+#define ADV7511_REG_HDMI_POWER 0xa1
+#define ADV7511_REG_HDCP_HDMI_CFG 0xaf
+#define ADV7511_REG_AN(x) (0xb0 + (x)) /* 0xb0 - 0xb7 */
+#define ADV7511_REG_HDCP_STATUS 0xb8
+#define ADV7511_REG_BCAPS 0xbe
+#define ADV7511_REG_BKSV(x) (0xc0 + (x)) /* 0xc0 - 0xc3 */
+#define ADV7511_REG_EDID_SEGMENT 0xc4
+#define ADV7511_REG_DDC_STATUS 0xc8
+#define ADV7511_REG_EDID_READ_CTRL 0xc9
+#define ADV7511_REG_BSTATUS(x) (0xca + (x)) /* 0xca - 0xcb */
+#define ADV7511_REG_TIMING_GEN_SEQ 0xd0
+#define ADV7511_REG_POWER2 0xd6
+#define ADV7511_REG_HSYNC_PLACEMENT_MSB 0xfa
+
+#define ADV7511_REG_SYNC_ADJUSTMENT(x) (0xd7 + (x)) /* 0xd7 - 0xdc */
+#define ADV7511_REG_TMDS_CLOCK_INV 0xde
+#define ADV7511_REG_ARC_CTRL 0xdf
+#define ADV7511_REG_CEC_I2C_ADDR 0xe1
+#define ADV7511_REG_CEC_CTRL 0xe2
+#define ADV7511_REG_CHIP_ID_HIGH 0xf5
+#define ADV7511_REG_CHIP_ID_LOW 0xf6
+
+#define ADV7511_CSC_ENABLE BIT(7)
+#define ADV7511_CSC_UPDATE_MODE BIT(5)
+
+#define ADV7511_INT0_HDP BIT(7)
+#define ADV7511_INT0_VSYNC BIT(5)
+#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
+#define ADV7511_INT0_EDID_READY BIT(2)
+#define ADV7511_INT0_HDCP_AUTHENTICATED BIT(1)
+
+#define ADV7511_INT1_DDC_ERROR BIT(7)
+#define ADV7511_INT1_BKSV BIT(6)
+#define ADV7511_INT1_CEC_TX_READY BIT(5)
+#define ADV7511_INT1_CEC_TX_ARBIT_LOST BIT(4)
+#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT BIT(3)
+#define ADV7511_INT1_CEC_RX_READY3 BIT(2)
+#define ADV7511_INT1_CEC_RX_READY2 BIT(1)
+#define ADV7511_INT1_CEC_RX_READY1 BIT(0)
+
+#define ADV7511_ARC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_CEC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_POWER_POWER_DOWN BIT(6)
+
+#define ADV7511_HDMI_CFG_MODE_MASK 0x2
+#define ADV7511_HDMI_CFG_MODE_DVI 0x0
+#define ADV7511_HDMI_CFG_MODE_HDMI 0x2
+
+#define ADV7511_AUDIO_SELECT_I2C 0x0
+#define ADV7511_AUDIO_SELECT_SPDIF 0x1
+#define ADV7511_AUDIO_SELECT_DSD 0x2
+#define ADV7511_AUDIO_SELECT_HBR 0x3
+#define ADV7511_AUDIO_SELECT_DST 0x4
+
+#define ADV7511_I2S_SAMPLE_LEN_16 0x2
+#define ADV7511_I2S_SAMPLE_LEN_20 0x3
+#define ADV7511_I2S_SAMPLE_LEN_18 0x4
+#define ADV7511_I2S_SAMPLE_LEN_22 0x5
+#define ADV7511_I2S_SAMPLE_LEN_19 0x8
+#define ADV7511_I2S_SAMPLE_LEN_23 0x9
+#define ADV7511_I2S_SAMPLE_LEN_24 0xb
+#define ADV7511_I2S_SAMPLE_LEN_17 0xc
+#define ADV7511_I2S_SAMPLE_LEN_21 0xd
+
+#define ADV7511_SAMPLE_FREQ_44100 0x0
+#define ADV7511_SAMPLE_FREQ_48000 0x2
+#define ADV7511_SAMPLE_FREQ_32000 0x3
+#define ADV7511_SAMPLE_FREQ_88200 0x8
+#define ADV7511_SAMPLE_FREQ_96000 0xa
+#define ADV7511_SAMPLE_FREQ_176400 0xc
+#define ADV7511_SAMPLE_FREQ_192000 0xe
+
+#define ADV7511_STATUS_POWER_DOWN_POLARITY BIT(7)
+#define ADV7511_STATUS_HPD BIT(6)
+#define ADV7511_STATUS_MONITOR_SENSE BIT(5)
+#define ADV7511_STATUS_I2S_32BIT_MODE BIT(3)
+
+#define ADV7511_PACKET_ENABLE_N_CTS BIT(8+6)
+#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE BIT(8+5)
+#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME BIT(8+4)
+#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME BIT(8+3)
+#define ADV7511_PACKET_ENABLE_GC BIT(7)
+#define ADV7511_PACKET_ENABLE_SPD BIT(6)
+#define ADV7511_PACKET_ENABLE_MPEG BIT(5)
+#define ADV7511_PACKET_ENABLE_ACP BIT(4)
+#define ADV7511_PACKET_ENABLE_ISRC BIT(3)
+#define ADV7511_PACKET_ENABLE_GM BIT(2)
+#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
+#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
+
+#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0
+#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00
+#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40
+#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80
+#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0
+#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
+#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
+
+#define ADV7511_LOW_REFRESH_RATE_NONE 0x0
+#define ADV7511_LOW_REFRESH_RATE_24HZ 0x1
+#define ADV7511_LOW_REFRESH_RATE_25HZ 0x2
+#define ADV7511_LOW_REFRESH_RATE_30HZ 0x3
+
+#define ADV7511_AUDIO_CFG3_LEN_MASK 0x0f
+#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK 0xf0
+
+#define ADV7511_AUDIO_SOURCE_I2S 0
+#define ADV7511_AUDIO_SOURCE_SPDIF 1
+
+#define ADV7511_I2S_FORMAT_I2S 0
+#define ADV7511_I2S_FORMAT_RIGHT_J 1
+#define ADV7511_I2S_FORMAT_LEFT_J 2
+
+#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
+#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
+#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
+#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
+#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
+#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
+#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+
+enum adv7511_input_clock {
+ ADV7511_INPUT_CLOCK_1X,
+ ADV7511_INPUT_CLOCK_2X,
+ ADV7511_INPUT_CLOCK_DDR,
+};
+
+enum adv7511_input_justification {
+ ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
+ ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
+ ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
+};
+
+enum adv7511_input_sync_pulse {
+ ADV7511_INPUT_SYNC_PULSE_DE = 0,
+ ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
+ ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
+ ADV7511_INPUT_SYNC_PULSE_NONE = 3,
+};
+
+/**
+ * enum adv7511_sync_polarity - Polarity for the input sync signals
+ * @ADV7511_SYNC_POLARITY_PASSTHROUGH: Sync polarity matches that of
+ * the currently configured mode.
+ * @ADV7511_SYNC_POLARITY_LOW: Sync polarity is low
+ * @ADV7511_SYNC_POLARITY_HIGH: Sync polarity is high
+ *
+ * If the polarity is set to either LOW or HIGH the driver will configure the
+ * ADV7511 to internally invert the sync signal if required to match the sync
+ * polarity setting for the currently selected output mode.
+ *
+ * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
+ * unchanged. This is used when the upstream graphics core already generates
+ * the sync signals with the correct polarity.
+ */
+enum adv7511_sync_polarity {
+ ADV7511_SYNC_POLARITY_PASSTHROUGH,
+ ADV7511_SYNC_POLARITY_LOW,
+ ADV7511_SYNC_POLARITY_HIGH,
+};
+
+/**
+ * struct adv7511_link_config - Describes adv7511 hardware configuration
+ * @input_color_depth: Number of bits per color component (8, 10 or 12)
+ * @input_colorspace: The input colorspace (RGB, YUV444, YUV422)
+ * @input_clock: The input video clock style (1x, 2x, DDR)
+ * @input_style: The input component arrangement variant
+ * @input_justification: Video input format bit justification
+ * @clock_delay: Clock delay for the input clock (in ps)
+ * @embedded_sync: Video input uses BT.656-style embedded sync
+ * @sync_pulse: Select the sync pulse
+ * @vsync_polarity: vsync input signal configuration
+ * @hsync_polarity: hsync input signal configuration
+ */
+struct adv7511_link_config {
+ unsigned int input_color_depth;
+ enum hdmi_colorspace input_colorspace;
+ enum adv7511_input_clock input_clock;
+ unsigned int input_style;
+ enum adv7511_input_justification input_justification;
+
+ int clock_delay;
+
+ bool embedded_sync;
+ enum adv7511_input_sync_pulse sync_pulse;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+};
+
+/**
+ * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
+ * @ADV7511_CSC_SCALING_1: CSC results are not scaled
+ * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
+ * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
+ */
+enum adv7511_csc_scaling {
+ ADV7511_CSC_SCALING_1 = 0,
+ ADV7511_CSC_SCALING_2 = 1,
+ ADV7511_CSC_SCALING_4 = 2,
+};
+
+/**
+ * struct adv7511_video_config - Describes adv7511 hardware configuration
+ * @csc_enable: Whether to enable color space conversion
+ * @csc_scaling_factor: Color space conversion scaling factor
+ * @csc_coefficents: Color space conversion coefficents
+ * @hdmi_mode: Whether to use HDMI or DVI output mode
+ * @avi_infoframe: HDMI infoframe
+ */
+struct adv7511_video_config {
+ bool csc_enable;
+ enum adv7511_csc_scaling csc_scaling_factor;
+ const uint16_t *csc_coefficents;
+
+ bool hdmi_mode;
+ struct hdmi_avi_infoframe avi_infoframe;
+};
+
+#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c1dd485aeb6c..e4083e41a600 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,7 +11,9 @@ i915-y := i915_drv.o \
i915_params.o \
i915_suspend.o \
i915_sysfs.o \
- intel_pm.o
+ intel_pm.o \
+ intel_runtime_pm.o
+
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -38,13 +40,18 @@ i915-y += i915_cmd_parser.o \
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen7.o \
- intel_renderstate_gen8.o
+ intel_renderstate_gen8.o \
+ intel_renderstate_gen9.o
# modesetting core code
-i915-y += intel_bios.o \
+i915-y += intel_audio.o \
+ intel_bios.o \
intel_display.o \
+ intel_fifo_underrun.o \
+ intel_frontbuffer.o \
intel_modes.o \
intel_overlay.o \
+ intel_psr.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 593b657d3e59..22c992a78ac6 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -73,7 +73,7 @@
* those commands required by the parser. This generally works because command
* opcode ranges have standard command length encodings. So for commands that
* the parser does not need to check, it can easily skip them. This is
- * implementated via a per-ring length decoding vfunc.
+ * implemented via a per-ring length decoding vfunc.
*
* Unfortunately, there are a number of commands that do not follow the standard
* length encoding for their opcode range, primarily amongst the MI_* commands.
@@ -138,6 +138,11 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
+ /*
+ * MI_BATCH_BUFFER_START requires some special handling. It's not
+ * really a 'skip' action but it doesn't seem like it's worth adding
+ * a new action. See i915_parse_cmds().
+ */
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
@@ -408,6 +413,8 @@ static const u32 gen7_render_regs[] = {
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
OACONTROL, /* Only allowed for LRI and SRM. See below. */
+ REG64(MI_PREDICATE_SRC0),
+ REG64(MI_PREDICATE_SRC1),
GEN7_3DPRIM_END_OFFSET,
GEN7_3DPRIM_START_VERTEX,
GEN7_3DPRIM_VERTEX_COUNT,
@@ -838,7 +845,7 @@ finish:
* @ring: the ring in question
*
* Only certain platforms require software batch buffer command parsing, and
- * only when enabled via module paramter.
+ * only when enabled via module parameter.
*
* Return: true if the ring requires software command parsing
*/
@@ -847,12 +854,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
if (!ring->needs_cmd_parser)
return false;
- /*
- * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
- * disabled. That will cause all of the parser's PPGTT checks to
- * fail. For now, disable parsing when PPGTT is off.
- */
- if (USES_PPGTT(ring->dev))
+ if (!USES_PPGTT(ring->dev))
return false;
return (i915.enable_cmd_parser == 1);
@@ -888,8 +890,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false;
+ }
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[2] != 0);
@@ -958,7 +962,8 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
*
- * Return: non-zero if the parser finds violations or otherwise fails
+ * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
+ * if the batch appears legal but should use hardware parsing
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
@@ -1005,6 +1010,16 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
+ /*
+ * If the batch buffer contains a chained batch, return an
+ * error that tells the caller to abort and dispatch the
+ * workload as a non-secure batch.
+ */
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = -EACCES;
+ break;
+ }
+
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
@@ -1059,6 +1074,8 @@ int i915_cmd_parser_get_version(void)
*
* 1. Initial version. Checks batches and reports violations, but leaves
* hardware parsing enabled (so does not allow new use cases).
+ * 2. Allow access to the MI_PREDICATE_SRC0 and
+ * MI_PREDICATE_SRC1 registers.
*/
- return 1;
+ return 2;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 063b44817e08..779a275eb1fd 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -116,7 +116,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
{
- return obj->has_global_gtt_mapping ? "g" : " ";
+ return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
}
static void
@@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
struct intel_crtc *crtc;
int ret;
@@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work;
if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
@@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
}
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
mutex_unlock(&dev->struct_mutex);
@@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe))) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
@@ -1241,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rpmodectl1, rcctl1;
+ u32 rpmodectl1, rcctl1, pw_status;
unsigned fw_rendercount = 0, fw_mediacount = 0;
intel_runtime_pm_get(dev_priv);
+ pw_status = I915_READ(VLV_GTLC_PW_STATUS);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1264,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m)
yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
seq_printf(m, "Render Power Well: %s\n",
- (I915_READ(VLV_GTLC_PW_STATUS) &
- VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
- (I915_READ(VLV_GTLC_PW_STATUS) &
- VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Render RC6 residency since boot: %u\n",
I915_READ(VLV_GT_RENDER_RC6));
@@ -1774,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused)
return 0;
}
+static void i915_dump_lrc_obj(struct seq_file *m,
+ struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *ctx_obj)
+{
+ struct page *page;
+ uint32_t *reg_state;
+ int j;
+ unsigned long ggtt_offset = 0;
+
+ if (ctx_obj == NULL) {
+ seq_printf(m, "Context on %s with no gem object\n",
+ ring->name);
+ return;
+ }
+
+ seq_printf(m, "CONTEXT: %s %u\n", ring->name,
+ intel_execlists_ctx_id(ctx_obj));
+
+ if (!i915_gem_obj_ggtt_bound(ctx_obj))
+ seq_puts(m, "\tNot bound in GGTT\n");
+ else
+ ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+
+ if (i915_gem_object_get_pages(ctx_obj)) {
+ seq_puts(m, "\tFailed to get pages for context object\n");
+ return;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ if (!WARN_ON(page == NULL)) {
+ reg_state = kmap_atomic(page);
+
+ for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
+ seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ggtt_offset + 4096 + (j * 4),
+ reg_state[j], reg_state[j + 1],
+ reg_state[j + 2], reg_state[j + 3]);
+ }
+ kunmap_atomic(reg_state);
+ }
+
+ seq_putc(m, '\n');
+}
+
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1794,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_ring(ring, dev_priv, i) {
- struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-
- if (ring->default_context == ctx)
- continue;
-
- if (ctx_obj) {
- struct page *page = i915_gem_object_get_page(ctx_obj, 1);
- uint32_t *reg_state = kmap_atomic(page);
- int j;
-
- seq_printf(m, "CONTEXT: %s %u\n", ring->name,
- intel_execlists_ctx_id(ctx_obj));
-
- for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
- seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
- reg_state[j], reg_state[j + 1],
- reg_state[j + 2], reg_state[j + 3]);
- }
- kunmap_atomic(reg_state);
-
- seq_putc(m, '\n');
- }
+ if (ring->default_context != ctx)
+ i915_dump_lrc_obj(m, ring,
+ ctx->engine[i].state);
}
}
@@ -1849,6 +1871,8 @@ static int i915_execlists(struct seq_file *m, void *data)
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
+
for_each_ring(ring, dev_priv, ring_id) {
struct intel_ctx_submit_request *head_req = NULL;
int count = 0;
@@ -1900,6 +1924,7 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1973,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (IS_GEN3(dev) || IS_GEN4(dev)) {
seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
+ seq_printf(m, "DDC2 = 0x%08x\n",
+ I915_READ(DCC2));
seq_printf(m, "C0DRB3 = 0x%04x\n",
I915_READ16(C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
@@ -1986,7 +2013,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
I915_READ(MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
I915_READ(TILECTL));
- if (IS_GEN8(dev))
+ if (INTEL_INFO(dev)->gen >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
I915_READ(GAMTARBMODE));
else
@@ -1995,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
+
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ seq_puts(m, "L-shaped memory detected\n");
+
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -2628,14 +2659,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
- seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
- pll->active, yesno(pll->on));
+ seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
+ pll->config.crtc_mask, pll->active, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
+ seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n",
+ pll->config.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
}
drm_modeset_unlock_all(dev);
@@ -2656,18 +2688,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
- for (i = 0; i < dev_priv->num_wa_regs; ++i) {
- u32 addr, mask;
-
- addr = dev_priv->intel_wa_regs[i].addr;
- mask = dev_priv->intel_wa_regs[i].mask;
- dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
- if (dev_priv->intel_wa_regs[i].addr)
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- dev_priv->intel_wa_regs[i].addr,
- dev_priv->intel_wa_regs[i].value,
- dev_priv->intel_wa_regs[i].mask);
+ seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
+ for (i = 0; i < dev_priv->workarounds.count; ++i) {
+ u32 addr, mask, value, read;
+ bool ok;
+
+ addr = dev_priv->workarounds.reg[i].addr;
+ mask = dev_priv->workarounds.reg[i].mask;
+ value = dev_priv->workarounds.reg[i].value;
+ read = I915_READ(addr);
+ ok = (value & mask) == (read & mask);
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
+ addr, value, mask, read, ok ? "OK" : "FAIL");
}
intel_runtime_pm_put(dev_priv);
@@ -2676,6 +2708,42 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation *ddb;
+ struct skl_ddb_entry *entry;
+ enum pipe pipe;
+ int plane;
+
+ drm_modeset_lock_all(dev);
+
+ ddb = &dev_priv->wm.skl_hw.ddb;
+
+ seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+ for_each_pipe(dev_priv, pipe) {
+ seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+ for_each_plane(pipe, plane) {
+ entry = &ddb->plane[pipe][plane];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ entry->start, entry->end,
+ skl_ddb_entry_size(entry));
+ }
+
+ entry = &ddb->cursor[pipe];
+ seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
+ entry->end, skl_ddb_entry_size(entry));
+ }
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -2969,6 +3037,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
break;
}
break;
+ default:
+ break;
}
}
drm_modeset_unlock_all(dev);
@@ -3256,6 +3326,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
+ pipe));
u32 val = 0; /* shut up gcc */
int ret;
@@ -3266,6 +3338,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (pipe_crc->source && source)
return -EINVAL;
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+ DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ return -EIO;
+ }
+
if (IS_GEN2(dev))
ret = i8xx_pipe_crc_ctl_reg(&source, &val);
else if (INTEL_INFO(dev)->gen < 5)
@@ -3291,6 +3368,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (!pipe_crc->entries)
return -ENOMEM;
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ hsw_disable_ips(crtc);
+
spin_lock_irq(&pipe_crc->lock);
pipe_crc->head = 0;
pipe_crc->tail = 0;
@@ -3329,6 +3414,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
vlv_undo_pipe_scramble_reset(dev, pipe);
else if (IS_HASWELL(dev) && pipe == PIPE_A)
hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+
+ hsw_enable_ips(crtc);
}
return 0;
@@ -3506,7 +3593,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
.write = display_crc_ctl_write
};
-static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
+static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
struct drm_device *dev = m->private;
int num_levels = ilk_wm_max_level(dev) + 1;
@@ -3517,13 +3604,17 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
for (level = 0; level < num_levels; level++) {
unsigned int latency = wm[level];
- /* WM1+ latency values in 0.5us units */
- if (level > 0)
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9
+ */
+ if (INTEL_INFO(dev)->gen >= 9)
+ latency *= 10;
+ else if (level > 0)
latency *= 5;
seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level],
- latency / 10, latency % 10);
+ level, wm[level], latency / 10, latency % 10);
}
drm_modeset_unlock_all(dev);
@@ -3532,8 +3623,15 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.pri_latency;
- wm_latency_show(m, to_i915(dev)->wm.pri_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3541,8 +3639,15 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.spr_latency;
- wm_latency_show(m, to_i915(dev)->wm.spr_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3550,8 +3655,15 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.cur_latency;
- wm_latency_show(m, to_i915(dev)->wm.cur_latency);
+ wm_latency_show(m, latencies);
return 0;
}
@@ -3587,11 +3699,11 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, uint16_t wm[5])
+ size_t len, loff_t *offp, uint16_t wm[8])
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- uint16_t new[5] = { 0 };
+ uint16_t new[8] = { 0 };
int num_levels = ilk_wm_max_level(dev) + 1;
int level;
int ret;
@@ -3605,7 +3717,9 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
tmp[len] = '\0';
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
if (ret != num_levels)
return -EINVAL;
@@ -3625,8 +3739,15 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3634,8 +3755,15 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3643,8 +3771,15 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint16_t *latencies;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = to_i915(dev)->wm.cur_latency;
- return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
+ return wm_latency_write(file, ubuf, len, offp, latencies);
}
static const struct file_operations i915_pri_wm_latency_fops = {
@@ -4187,6 +4322,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
+ {"i915_ddb_info", i915_ddb_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 318ade9bb5af..ecee3bcc8772 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,884 +50,6 @@
#include <linux/pm_runtime.h>
#include <linux/oom.h>
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
- intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
- intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
- __intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file); \
-} while (0)
-
-static inline u32
-intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
-{
- if (I915_NEED_GFX_HWS(dev_priv->dev))
- return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
- else
- return intel_read_status_page(LP_RING(dev_priv), reg);
-}
-
-#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_BREADCRUMB_INDEX 0x21
-
-void i915_update_dri1_breadcrumb(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
-
- /*
- * The dri breadcrumb update races against the drm master disappearing.
- * Instead of trying to fix this (this is by far not the only ums issue)
- * just don't do the update in kms mode.
- */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-}
-
-static void i915_write_hws_pga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 addr;
-
- addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(dev)->gen >= 4)
- addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
- I915_WRITE(HWS_PGA, addr);
-}
-
-/**
- * Frees the hardware status page, whether it's a physical address or a virtual
- * address set up by the X Server.
- */
-static void i915_free_hws(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- }
-
- if (ring->status_page.gfx_addr) {
- ring->status_page.gfx_addr = 0;
- iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
- }
-
- /* Need to rewrite hardware status page */
- I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
-void i915_kernel_lost_context(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
- struct intel_ringbuffer *ringbuf = ring->buffer;
-
- /*
- * We should never lose context on the ring with modesetting
- * as we don't expose it to userspace
- */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
- if (ringbuf->space < 0)
- ringbuf->space += ringbuf->size;
-
- if (!dev->primary->master)
- return;
-
- master_priv = dev->primary->master->driver_priv;
- if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
-}
-
-static int i915_dma_cleanup(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
- for (i = 0; i < I915_NUM_RINGS; i++)
- intel_cleanup_ring_buffer(&dev_priv->ring[i]);
- mutex_unlock(&dev->struct_mutex);
-
- /* Clear the HWS virtual address at teardown */
- if (I915_NEED_GFX_HWS(dev))
- i915_free_hws(dev);
-
- return 0;
-}
-
-static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret;
-
- master_priv->sarea = drm_legacy_getsarea(dev);
- if (master_priv->sarea) {
- master_priv->sarea_priv = (drm_i915_sarea_t *)
- ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
- } else {
- DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
- }
-
- if (init->ring_size != 0) {
- if (LP_RING(dev_priv)->buffer->obj != NULL) {
- i915_dma_cleanup(dev);
- DRM_ERROR("Client tried to initialize ringbuffer in "
- "GEM mode\n");
- return -EINVAL;
- }
-
- ret = intel_render_ring_init_dri(dev,
- init->ring_start,
- init->ring_size);
- if (ret) {
- i915_dma_cleanup(dev);
- return ret;
- }
- }
-
- dev_priv->dri1.cpp = init->cpp;
- dev_priv->dri1.back_offset = init->back_offset;
- dev_priv->dri1.front_offset = init->front_offset;
- dev_priv->dri1.current_page = 0;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->pf_current_page = 0;
-
- /* Allow hardware batchbuffers unless told otherwise.
- */
- dev_priv->dri1.allow_batchbuffer = 1;
-
- return 0;
-}
-
-static int i915_dma_resume(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("%s\n", __func__);
-
- if (ring->buffer->virtual_start == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- /* Program Hardware Status Page */
- if (!ring->status_page.page_addr) {
- DRM_ERROR("Can not find hardware status page\n");
- return -EINVAL;
- }
- DRM_DEBUG_DRIVER("hw status page @ %p\n",
- ring->status_page.page_addr);
- if (ring->status_page.gfx_addr != 0)
- intel_ring_setup_status_page(ring);
- else
- i915_write_hws_pga(dev);
-
- DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-
- return 0;
-}
-
-static int i915_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_init_t *init = data;
- int retcode = 0;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- switch (init->func) {
- case I915_INIT_DMA:
- retcode = i915_initialize(dev, init);
- break;
- case I915_CLEANUP_DMA:
- retcode = i915_dma_cleanup(dev);
- break;
- case I915_RESUME_DMA:
- retcode = i915_dma_resume(dev);
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-/* Implement basically the same security restrictions as hardware does
- * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
- *
- * Most of the calculations below involve calculating the size of a
- * particular instruction. It's important to get the size right as
- * that tells us where the next instruction to check is. Any illegal
- * instruction detected will be given a size of zero, which is a
- * signal to abort the rest of the buffer.
- */
-static int validate_cmd(int cmd)
-{
- switch (((cmd >> 29) & 0x7)) {
- case 0x0:
- switch ((cmd >> 23) & 0x3f) {
- case 0x0:
- return 1; /* MI_NOOP */
- case 0x4:
- return 1; /* MI_FLUSH */
- default:
- return 0; /* disallow everything else */
- }
- break;
- case 0x1:
- return 0; /* reserved */
- case 0x2:
- return (cmd & 0xff) + 2; /* 2d commands */
- case 0x3:
- if (((cmd >> 24) & 0x1f) <= 0x18)
- return 1;
-
- switch ((cmd >> 24) & 0x1f) {
- case 0x1c:
- return 1;
- case 0x1d:
- switch ((cmd >> 16) & 0xff) {
- case 0x3:
- return (cmd & 0x1f) + 2;
- case 0x4:
- return (cmd & 0xf) + 2;
- default:
- return (cmd & 0xffff) + 2;
- }
- case 0x1e:
- if (cmd & (1 << 23))
- return (cmd & 0xffff) + 1;
- else
- return 1;
- case 0x1f:
- if ((cmd & (1 << 23)) == 0) /* inline vertices */
- return (cmd & 0x1ffff) + 2;
- else if (cmd & (1 << 17)) /* indirect random */
- if ((cmd & 0xffff) == 0)
- return 0; /* unknown length, too hard */
- else
- return (((cmd & 0xffff) + 1) / 2) + 1;
- else
- return 2; /* indirect sequential */
- default:
- return 0;
- }
- default:
- return 0;
- }
-
- return 0;
-}
-
-static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i, ret;
-
- if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
- return -EINVAL;
-
- for (i = 0; i < dwords;) {
- int sz = validate_cmd(buffer[i]);
-
- if (sz == 0 || i + sz > dwords)
- return -EINVAL;
- i += sz;
- }
-
- ret = BEGIN_LP_RING((dwords+1)&~1);
- if (ret)
- return ret;
-
- for (i = 0; i < dwords; i++)
- OUT_RING(buffer[i]);
- if (dwords & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-
- return 0;
-}
-
-int
-i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *box,
- int DR1, int DR4)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
- box->y2 <= 0 || box->x2 <= 0) {
- DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box->x1, box->y1, box->x2, box->y2);
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen >= 4) {
- ret = BEGIN_LP_RING(4);
- if (ret)
- return ret;
-
- OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
- OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
- OUT_RING(DR4);
- } else {
- ret = BEGIN_LP_RING(6);
- if (ret)
- return ret;
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(DR1);
- OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
- OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
- OUT_RING(DR4);
- OUT_RING(0);
- }
- ADVANCE_LP_RING();
-
- return 0;
-}
-
-/* XXX: Emitting the counter should really be moved to part of the IRQ
- * emit. For now, do it in both places:
- */
-
-static void i915_emit_breadcrumb(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- dev_priv->dri1.counter++;
- if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
- dev_priv->dri1.counter = 0;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static int i915_dispatch_cmdbuffer(struct drm_device *dev,
- drm_i915_cmdbuffer_t *cmd,
- struct drm_clip_rect *cliprects,
- void *cmdbuf)
-{
- int nbox = cmd->num_cliprects;
- int i = 0, count, ret;
-
- if (cmd->sz & 0x3) {
- DRM_ERROR("alignment");
- return -EINVAL;
- }
-
- i915_kernel_lost_context(dev);
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- ret = i915_emit_box(dev, &cliprects[i],
- cmd->DR1, cmd->DR4);
- if (ret)
- return ret;
- }
-
- ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
- if (ret)
- return ret;
- }
-
- i915_emit_breadcrumb(dev);
- return 0;
-}
-
-static int i915_dispatch_batchbuffer(struct drm_device *dev,
- drm_i915_batchbuffer_t *batch,
- struct drm_clip_rect *cliprects)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int nbox = batch->num_cliprects;
- int i, count, ret;
-
- if ((batch->start | batch->used) & 0x7) {
- DRM_ERROR("alignment");
- return -EINVAL;
- }
-
- i915_kernel_lost_context(dev);
-
- count = nbox ? nbox : 1;
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- ret = i915_emit_box(dev, &cliprects[i],
- batch->DR1, batch->DR4);
- if (ret)
- return ret;
- }
-
- if (!IS_I830(dev) && !IS_845G(dev)) {
- ret = BEGIN_LP_RING(2);
- if (ret)
- return ret;
-
- if (INTEL_INFO(dev)->gen >= 4) {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
- OUT_RING(batch->start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- }
- } else {
- ret = BEGIN_LP_RING(4);
- if (ret)
- return ret;
-
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- OUT_RING(batch->start + batch->used - 4);
- OUT_RING(0);
- }
- ADVANCE_LP_RING();
- }
-
-
- if (IS_G4X(dev) || IS_GEN5(dev)) {
- if (BEGIN_LP_RING(2) == 0) {
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
- }
-
- i915_emit_breadcrumb(dev);
- return 0;
-}
-
-static int i915_dispatch_flip(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv =
- dev->primary->master->driver_priv;
- int ret;
-
- if (!master_priv->sarea_priv)
- return -EINVAL;
-
- DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->dri1.current_page,
- master_priv->sarea_priv->pf_current_page);
-
- i915_kernel_lost_context(dev);
-
- ret = BEGIN_LP_RING(10);
- if (ret)
- return ret;
-
- OUT_RING(MI_FLUSH | MI_READ_FLUSH);
- OUT_RING(0);
-
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->dri1.current_page == 0) {
- OUT_RING(dev_priv->dri1.back_offset);
- dev_priv->dri1.current_page = 1;
- } else {
- OUT_RING(dev_priv->dri1.front_offset);
- dev_priv->dri1.current_page = 0;
- }
- OUT_RING(0);
-
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
- return 0;
-}
-
-static int i915_quiescent(struct drm_device *dev)
-{
- i915_kernel_lost_context(dev);
- return intel_ring_idle(LP_RING(dev->dev_private));
-}
-
-static int i915_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_quiescent(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-static int i915_batchbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- drm_i915_sarea_t *sarea_priv;
- drm_i915_batchbuffer_t *batch = data;
- int ret;
- struct drm_clip_rect *cliprects = NULL;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- master_priv = dev->primary->master->driver_priv;
- sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
-
- if (!dev_priv->dri1.allow_batchbuffer) {
- DRM_ERROR("Batchbuffer ioctl disabled\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
- batch->start, batch->used, batch->num_cliprects);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (batch->num_cliprects < 0)
- return -EINVAL;
-
- if (batch->num_cliprects) {
- cliprects = kcalloc(batch->num_cliprects,
- sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL)
- return -ENOMEM;
-
- ret = copy_from_user(cliprects, batch->cliprects,
- batch->num_cliprects *
- sizeof(struct drm_clip_rect));
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_free;
- }
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
- mutex_unlock(&dev->struct_mutex);
-
- if (sarea_priv)
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-fail_free:
- kfree(cliprects);
-
- return ret;
-}
-
-static int i915_cmdbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- drm_i915_sarea_t *sarea_priv;
- drm_i915_cmdbuffer_t *cmdbuf = data;
- struct drm_clip_rect *cliprects = NULL;
- void *batch_data;
- int ret;
-
- DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
- cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- master_priv = dev->primary->master->driver_priv;
- sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (cmdbuf->num_cliprects < 0)
- return -EINVAL;
-
- batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
- if (batch_data == NULL)
- return -ENOMEM;
-
- ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_batch_free;
- }
-
- if (cmdbuf->num_cliprects) {
- cliprects = kcalloc(cmdbuf->num_cliprects,
- sizeof(*cliprects), GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto fail_batch_free;
- }
-
- ret = copy_from_user(cliprects, cmdbuf->cliprects,
- cmdbuf->num_cliprects *
- sizeof(struct drm_clip_rect));
- if (ret != 0) {
- ret = -EFAULT;
- goto fail_clip_free;
- }
- }
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
- goto fail_clip_free;
- }
-
- if (sarea_priv)
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-fail_clip_free:
- kfree(cliprects);
-fail_batch_free:
- kfree(batch_data);
-
- return ret;
-}
-
-static int i915_emit_irq(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- i915_kernel_lost_context(dev);
-
- DRM_DEBUG_DRIVER("\n");
-
- dev_priv->dri1.counter++;
- if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
- dev_priv->dri1.counter = 1;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- return dev_priv->dri1.counter;
-}
-
-static int i915_wait_irq(struct drm_device *dev, int irq_nr)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret = 0;
- struct intel_engine_cs *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
-
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
- }
-
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- ring->irq_put(ring);
- } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
- ret = -EBUSY;
-
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
- }
-
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-static int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t *emit = data;
- int result;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-static int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t *irqwait = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
-static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-static int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
- */
- return -EINVAL;
-}
-
-static int i915_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- DRM_DEBUG_DRIVER("%s\n", __func__);
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_flip(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -936,21 +58,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
drm_i915_getparam_t *param = data;
int value;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
- value = dev->pdev->irq ? 1 : 0;
- break;
case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
- break;
case I915_PARAM_LAST_DISPATCH:
- value = READ_BREADCRUMB(dev_priv);
- break;
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
@@ -1027,6 +140,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version();
break;
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ value = 1;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -1046,19 +162,13 @@ static int i915_setparam(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
- break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
- break;
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
+
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
param->value < 0)
@@ -1075,54 +185,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
return 0;
}
-static int i915_set_status_page(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_hws_addr_t *hws = data;
- struct intel_engine_cs *ring;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (!I915_NEED_GFX_HWS(dev))
- return -EINVAL;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- WARN(1, "tried to set status page when mode setting active\n");
- return 0;
- }
-
- DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
-
- ring = LP_RING(dev_priv);
- ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
-
- dev_priv->dri1.gfx_hws_cpu_addr =
- ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
- if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
- i915_dma_cleanup(dev);
- ring->status_page.gfx_addr = 0;
- DRM_ERROR("can not ioremap virtual address for"
- " G33 hw status page\n");
- return -ENOMEM;
- }
-
- memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
-
- DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- ring->status_page.gfx_addr);
- DRM_DEBUG_DRIVER("load hws at %p\n",
- ring->status_page.page_addr);
- return 0;
-}
-
static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1275,12 +337,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
- i915_resume(dev);
+ i915_resume_legacy(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend(dev, pmm);
+ i915_suspend_legacy(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1338,14 +400,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_power_domains_init_hw(dev_priv);
- /*
- * We enable some interrupt sources in our postinstall hooks, so mark
- * interrupts as enabled _before_ actually enabling them to avoid
- * special cases in our ordering checks.
- */
- dev_priv->pm._irqs_disabled = false;
-
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = intel_irq_install(dev_priv);
if (ret)
goto cleanup_gem_stolen;
@@ -1370,7 +425,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(dev);
+ intel_hpd_init(dev_priv);
/*
* Some ports require correctly set-up hpd registers for detection to
@@ -1405,30 +460,6 @@ out:
return ret;
}
-int i915_master_create(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_i915_master_private *master_priv;
-
- master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
- if (!master_priv)
- return -ENOMEM;
-
- master->driver_priv = master_priv;
- return 0;
-}
-
-void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_i915_master_private *master_priv = master->driver_priv;
-
- if (!master_priv)
- return;
-
- kfree(master_priv);
-
- master->driver_priv = NULL;
-}
-
#if IS_ENABLED(CONFIG_FB)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
@@ -1534,7 +565,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info = (struct intel_device_info *)&dev_priv->info;
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
@@ -1614,7 +645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->backlight_lock);
+ mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
@@ -1742,7 +773,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_freewq;
}
- intel_irq_init(dev);
+ intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1784,9 +815,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_ERROR("failed to init modeset\n");
goto out_power_well;
}
- } else {
- /* Start out suspended in ums mode. */
- dev_priv->ums.mm_suspended = 1;
}
i915_setup_sysfs(dev);
@@ -1800,12 +828,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
- intel_init_runtime_pm(dev_priv);
+ intel_runtime_pm_enable(dev_priv);
return 0;
out_power_well:
- intel_power_domains_remove(dev_priv);
+ intel_power_domains_fini(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1848,16 +876,10 @@ int i915_driver_unload(struct drm_device *dev)
return ret;
}
- intel_fini_runtime_pm(dev_priv);
+ intel_power_domains_fini(dev_priv);
intel_gpu_ips_teardown();
- /* The i915.ko module is still not prepared to be loaded when
- * the power well is not enabled, so just enable it in case
- * we're going to unload/reload. */
- intel_display_set_init_power(dev_priv, true);
- intel_power_domains_remove(dev_priv);
-
i915_teardown_sysfs(dev);
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1868,8 +890,12 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister();
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_fbdev_fini(dev);
+
+ drm_vblank_cleanup(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
/*
@@ -1905,13 +931,8 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
-
- if (!I915_NEED_GFX_HWS(dev))
- i915_free_hws(dev);
}
- drm_vblank_cleanup(dev);
-
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1959,23 +980,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
void i915_driver_lastclose(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* On gen6+ we refuse to init without kms enabled, but then the drm core
- * goes right around and calls lastclose. Check for this and don't clean
- * up anything. */
- if (!dev_priv)
- return;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_fbdev_restore_mode(dev);
- vga_switcheroo_process_delayed_switch();
- return;
- }
-
- i915_gem_lastclose(dev);
-
- i915_dma_cleanup(dev);
+ intel_fbdev_restore_mode(dev);
+ vga_switcheroo_process_delayed_switch();
}
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
@@ -1999,24 +1005,24 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
}
const struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -2025,8 +1031,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2318b4c7a8f8..f990ab4c3efb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
CURSOR_OFFSETS,
};
+static const struct intel_device_info intel_skylake_info = {
+ .is_preliminary = 1,
+ .is_skylake = 1,
+ .gen = 9, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .has_llc = 1,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
- INTEL_CHV_IDS(&intel_cherryview_info)
+ INTEL_CHV_IDS(&intel_cherryview_info), \
+ INTEL_SKL_IDS(&intel_skylake_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -449,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_ULT(dev));
+ WARN_ON(IS_HSW_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
@@ -460,7 +474,15 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_ULT(dev));
+ WARN_ON(!IS_HSW_ULT(dev));
+ } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_SPT;
+ DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev));
+ } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_SPT;
+ DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev));
} else
continue;
@@ -529,10 +551,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
}
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
-static int intel_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume);
+static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+ bool rpm_resume);
-static int i915_drm_freeze(struct drm_device *dev)
+static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
@@ -562,6 +584,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
+ intel_suspend_gt_powersave(dev);
+
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw. Also, power gate the CRTC power wells.
@@ -573,16 +597,12 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_dp_mst_suspend(dev);
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- intel_runtime_pm_disable_interrupts(dev);
+ intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
- intel_suspend_gt_powersave(dev);
-
- intel_modeset_suspend_hw(dev);
+ intel_suspend_hw(dev);
}
i915_gem_suspend_gtt_mappings(dev);
@@ -608,7 +628,26 @@ static int i915_drm_freeze(struct drm_device *dev)
return 0;
}
-int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_suspend_late(struct drm_device *drm_dev)
+{
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ int ret;
+
+ ret = intel_suspend_complete(dev_priv);
+
+ if (ret) {
+ DRM_ERROR("Suspend complete failed: %d\n", ret);
+
+ return ret;
+ }
+
+ pci_disable_device(drm_dev->pdev);
+ pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+
+ return 0;
+}
+
+int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
{
int error;
@@ -618,48 +657,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return -ENODEV;
}
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
-
+ if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
+ state.event != PM_EVENT_FREEZE))
+ return -EINVAL;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- error = i915_drm_freeze(dev);
+ error = i915_drm_suspend(dev);
if (error)
return error;
- if (state.event == PM_EVENT_SUSPEND) {
- /* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
- }
-
- return 0;
+ return i915_drm_suspend_late(dev);
}
-static int i915_drm_thaw_early(struct drm_device *dev)
+static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = intel_resume_prepare(dev_priv, false);
- if (ret)
- DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
- intel_uncore_early_sanitize(dev, true);
- intel_uncore_sanitize(dev);
- intel_power_domains_init_hw(dev_priv);
-
- return ret;
-}
-
-static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET) &&
- restore_gtt_mappings) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
@@ -680,30 +696,29 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
}
mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_restore_interrupts(dev);
+ /* We need working interrupts for modeset enabling ... */
+ intel_runtime_pm_enable_interrupts(dev_priv);
intel_modeset_init_hw(dev);
- {
- unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
- intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
+ intel_dp_mst_resume(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
- intel_hpd_init(dev);
+ intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
}
@@ -718,21 +733,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_opregion_notify_adapter(dev, PCI_D0);
- return 0;
-}
-
-static int i915_drm_thaw(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- i915_check_and_clear_faults(dev);
+ drm_kms_helper_poll_enable(dev);
- return __i915_drm_thaw(dev, true);
+ return 0;
}
-static int i915_resume_early(struct drm_device *dev)
+static int i915_drm_resume_early(struct drm_device *dev)
{
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
- return 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
/*
* We have a resume ordering issue with the snd-hda driver also
@@ -748,33 +757,34 @@ static int i915_resume_early(struct drm_device *dev)
pci_set_master(dev->pdev);
- return i915_drm_thaw_early(dev);
+ if (IS_VALLEYVIEW(dev_priv))
+ ret = vlv_resume_prepare(dev_priv, false);
+ if (ret)
+ DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
+
+ intel_uncore_early_sanitize(dev, true);
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_disable_pc8(dev_priv);
+
+ intel_uncore_sanitize(dev);
+ intel_power_domains_init_hw(dev_priv);
+
+ return ret;
}
-int i915_resume(struct drm_device *dev)
+int i915_resume_legacy(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- /*
- * Platforms with opregion should have sane BIOS, older ones (gen3 and
- * earlier) need to restore the GTT mappings since the BIOS might clear
- * all our scratch PTEs.
- */
- ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ ret = i915_drm_resume_early(dev);
if (ret)
return ret;
- drm_kms_helper_poll_enable(dev);
- return 0;
-}
-
-static int i915_resume_legacy(struct drm_device *dev)
-{
- i915_resume_early(dev);
- i915_resume(dev);
-
- return 0;
+ return i915_drm_resume(dev);
}
/**
@@ -820,6 +830,9 @@ int i915_reset(struct drm_device *dev)
}
}
+ if (i915_stop_ring_allow_warn(dev_priv))
+ pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
if (ret) {
DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
@@ -840,10 +853,7 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->ums.mm_suspended) {
- dev_priv->ums.mm_suspended = 0;
-
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
@@ -923,15 +933,13 @@ static int i915_pm_suspend(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_freeze(drm_dev);
+ return i915_drm_suspend(drm_dev);
}
static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
- int ret;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -945,16 +953,7 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- ret = intel_suspend_complete(dev_priv);
-
- if (ret)
- DRM_ERROR("Suspend complete failed: %d\n", ret);
- else {
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return ret;
+ return i915_drm_suspend_late(drm_dev);
}
static int i915_pm_resume_early(struct device *dev)
@@ -962,61 +961,21 @@ static int i915_pm_resume_early(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return i915_resume_early(drm_dev);
-}
-
-static int i915_pm_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
- return i915_resume(drm_dev);
-}
-
-static int i915_pm_freeze(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
-
- if (!drm_dev || !drm_dev->dev_private) {
- dev_err(dev, "DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
- return i915_drm_freeze(drm_dev);
-}
-
-static int i915_pm_freeze_late(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
-
- return intel_suspend_complete(dev_priv);
-}
-
-static int i915_pm_thaw_early(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
- return i915_drm_thaw_early(drm_dev);
+ return i915_drm_resume_early(drm_dev);
}
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return i915_drm_thaw(drm_dev);
-}
-
-static int i915_pm_poweroff(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
- return i915_drm_freeze(drm_dev);
+ return i915_drm_resume(drm_dev);
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
@@ -1026,25 +985,6 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
return 0;
}
-static int snb_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- struct drm_device *dev = dev_priv->dev;
-
- if (rpm_resume)
- intel_init_pch_refclk(dev);
-
- return 0;
-}
-
-static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- hsw_disable_pc8(dev_priv);
-
- return 0;
-}
-
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1449,18 +1389,13 @@ static int intel_runtime_suspend(struct device *device)
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
- /*
- * rps.work can't be rearmed here, since we get here only after making
- * sure the GPU is idle and the RPS freq is set to the minimum. See
- * intel_mark_idle().
- */
- cancel_work_sync(&dev_priv->rps.work);
- intel_runtime_pm_disable_interrupts(dev);
+ intel_suspend_gt_powersave(dev);
+ intel_runtime_pm_disable_interrupts(dev_priv);
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
- intel_runtime_pm_restore_interrupts(dev);
+ intel_runtime_pm_enable_interrupts(dev_priv);
return ret;
}
@@ -1502,7 +1437,7 @@ static int intel_runtime_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
@@ -1512,7 +1447,13 @@ static int intel_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
- ret = intel_resume_prepare(dev_priv, true);
+ if (IS_GEN6(dev_priv))
+ intel_init_pch_refclk(dev);
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_disable_pc8(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ ret = vlv_resume_prepare(dev_priv, true);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
@@ -1520,8 +1461,8 @@ static int intel_runtime_resume(struct device *device)
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev);
- intel_runtime_pm_restore_interrupts(dev);
- intel_reset_gt_powersave(dev);
+ intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_enable_gt_powersave(dev);
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
@@ -1550,41 +1491,41 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
return ret;
}
-/*
- * This function implements common functionality of runtime and system
- * resume sequence. Variable rpm_resume used for implementing different
- * code paths.
- */
-static int intel_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- struct drm_device *dev = dev_priv->dev;
- int ret;
-
- if (IS_GEN6(dev))
- ret = snb_resume_prepare(dev_priv, rpm_resume);
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- ret = hsw_resume_prepare(dev_priv, rpm_resume);
- else if (IS_VALLEYVIEW(dev))
- ret = vlv_resume_prepare(dev_priv, rpm_resume);
- else
- ret = 0;
-
- return ret;
-}
-
static const struct dev_pm_ops i915_pm_ops = {
+ /*
+ * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
+ * PMSG_RESUME]
+ */
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
- .freeze = i915_pm_freeze,
- .freeze_late = i915_pm_freeze_late,
- .thaw_early = i915_pm_thaw_early,
- .thaw = i915_pm_thaw,
- .poweroff = i915_pm_poweroff,
+
+ /*
+ * S4 event handlers
+ * @freeze, @freeze_late : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw, @thaw_early : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff, @poweroff_late: called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore, @restore_early : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
+ */
+ .freeze = i915_pm_suspend,
+ .freeze_late = i915_pm_suspend_late,
+ .thaw_early = i915_pm_resume_early,
+ .thaw = i915_pm_resume,
+ .poweroff = i915_pm_suspend,
+ .poweroff_late = i915_pm_suspend_late,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
+
+ /* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
.runtime_resume = intel_runtime_resume,
};
@@ -1626,12 +1567,10 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
- .suspend = i915_suspend,
+ .suspend = i915_suspend_legacy,
.resume = i915_resume_legacy,
.device_is_agp = i915_driver_device_is_agp,
- .master_create = i915_master_create,
- .master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
@@ -1645,7 +1584,7 @@ static struct drm_driver driver = {
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_map_offset = i915_gem_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 16a6f6d187a1..63bcda5541ec 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,7 +55,10 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20140905"
+#define DRIVER_DATE "20141121"
+
+#undef WARN_ON
+#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
enum pipe {
INVALID_PIPE = -1,
@@ -76,6 +79,14 @@ enum transcoder {
};
#define transcoder_name(t) ((t) + 'A')
+/*
+ * This is the maximum (across all platforms) number of planes (primary +
+ * sprites) that can be active at the same time on one pipe.
+ *
+ * This value doesn't count the cursor plane.
+ */
+#define I915_MAX_PLANES 3
+
enum plane {
PLANE_A = 0,
PLANE_B,
@@ -202,10 +213,15 @@ enum intel_dpll_id {
/* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A = 0,
DPLL_ID_PCH_PLL_B = 1,
+ /* hsw/bdw */
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
+ /* skl */
+ DPLL_ID_SKL_DPLL1 = 0,
+ DPLL_ID_SKL_DPLL2 = 1,
+ DPLL_ID_SKL_DPLL3 = 2,
};
-#define I915_NUM_PLLS 2
+#define I915_NUM_PLLS 3
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -216,16 +232,33 @@ struct intel_dpll_hw_state {
/* hsw, bdw */
uint32_t wrpll;
+
+ /* skl */
+ /*
+ * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+ * lower part of crtl1 and they get shifted into position when writing
+ * the register. This allows us to easily compare the state to share
+ * the DPLL.
+ */
+ uint32_t ctrl1;
+ /* HDMI only, 0 when used for DP */
+ uint32_t cfgcr1, cfgcr2;
+};
+
+struct intel_shared_dpll_config {
+ unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+ struct intel_dpll_hw_state hw_state;
};
struct intel_shared_dpll {
- int refcount; /* count of number of CRTCs sharing this PLL */
+ struct intel_shared_dpll_config config;
+ struct intel_shared_dpll_config *new_config;
+
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
const char *name;
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
- struct intel_dpll_hw_state hw_state;
/* The mode_set hook is optional and should be used together with the
* intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
@@ -239,6 +272,11 @@ struct intel_shared_dpll {
struct intel_dpll_hw_state *hw_state);
};
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
@@ -267,7 +305,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
#define DRIVER_PATCHLEVEL 0
#define WATCH_LISTS 0
-#define WATCH_GTT 0
struct opregion_header;
struct opregion_acpi;
@@ -290,12 +327,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-struct drm_local_map;
-
-struct drm_i915_master_private {
- struct drm_local_map *sarea;
- struct _drm_i915_sarea *sarea_priv;
-};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
@@ -426,6 +457,7 @@ struct drm_i915_error_state {
};
struct intel_connector;
+struct intel_encoder;
struct intel_crtc_config;
struct intel_plane_config;
struct intel_crtc;
@@ -452,7 +484,7 @@ struct drm_i915_display_funcs {
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
- struct drm_crtc *crtc,
+ struct intel_crtc *crtc,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
@@ -468,15 +500,14 @@ struct drm_i915_display_funcs {
struct intel_crtc_config *);
void (*get_plane_config)(struct intel_crtc *,
struct intel_plane_config *);
- int (*crtc_mode_set)(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *old_fb);
+ int (*crtc_compute_clock)(struct intel_crtc *crtc);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
- void (*write_eld)(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode);
+ void (*audio_codec_enable)(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode);
+ void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -494,7 +525,7 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
- int (*setup_backlight)(struct intel_connector *connector);
+ int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get_backlight)(struct intel_connector *connector);
void (*set_backlight)(struct intel_connector *connector,
uint32_t level);
@@ -533,6 +564,7 @@ struct intel_uncore {
unsigned fw_rendercount;
unsigned fw_mediacount;
+ unsigned fw_blittercount;
struct timer_list force_wake_timer;
};
@@ -551,6 +583,7 @@ struct intel_uncore {
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
+ func(is_skylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
@@ -646,6 +679,7 @@ struct intel_context {
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
+ int unpin_count;
} engine[I915_NUM_RINGS];
struct list_head link;
@@ -663,6 +697,18 @@ struct i915_fbc {
bool false_color;
+ /* Tracks whether the HW is actually enabled, not whether the feature is
+ * possible. */
+ bool enabled;
+
+ /* On gen8 some rings cannont perform fbc clean operation so for now
+ * we are doing this on SW with mmio.
+ * This variable works in the opposite information direction
+ * of ring->fbc_dirty telling software on frontbuffer tracking
+ * to perform the cache clean on sw side.
+ */
+ bool need_sw_cache_clean;
+
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
@@ -704,6 +750,7 @@ enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
+ PCH_SPT, /* Sunrisepoint PCH */
PCH_NOP,
};
@@ -717,6 +764,7 @@ enum intel_sbi_destination {
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIPEB_FORCE (1<<4)
+#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
struct intel_fbdev;
struct intel_fbc_work;
@@ -768,7 +816,6 @@ struct i915_suspend_saved_registers {
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
- u32 saveBLC_HIST_CTL_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@@ -877,6 +924,7 @@ struct i915_suspend_saved_registers {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
+ u16 saveGCDGMBUS;
};
struct vlv_s0ix_state {
@@ -947,8 +995,12 @@ struct intel_rps_ei {
};
struct intel_gen6_power_mgmt {
- /* work and pm_iir are protected by dev_priv->irq_lock */
+ /*
+ * work, interrupts_enabled and pm_iir are protected by
+ * dev_priv->irq_lock
+ */
struct work_struct work;
+ bool interrupts_enabled;
u32 pm_iir;
/* Frequencies are stored in potentially platform dependent multiples.
@@ -1071,31 +1123,6 @@ struct i915_power_domains {
struct i915_power_well *power_wells;
};
-struct i915_dri1_state {
- unsigned allow_batchbuffer : 1;
- u32 __iomem *gfx_hws_cpu_addr;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
- uint32_t counter;
-};
-
-struct i915_ums_state {
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int mm_suspended;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -1357,6 +1384,49 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
+struct skl_ddb_entry {
+ uint16_t start, end; /* in number of blocks, 'end' is exclusive */
+};
+
+static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+{
+ return entry->end - entry->start;
+}
+
+static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
+ const struct skl_ddb_entry *e2)
+{
+ if (e1->start == e2->start && e1->end == e2->end)
+ return true;
+
+ return false;
+}
+
+struct skl_ddb_allocation {
+ struct skl_ddb_entry pipe[I915_MAX_PIPES];
+ struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ struct skl_ddb_entry cursor[I915_MAX_PIPES];
+};
+
+struct skl_wm_values {
+ bool dirty[I915_MAX_PIPES];
+ struct skl_ddb_allocation ddb;
+ uint32_t wm_linetime[I915_MAX_PIPES];
+ uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
+ uint32_t cursor[I915_MAX_PIPES][8];
+ uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
+ uint32_t cursor_trans[I915_MAX_PIPES];
+};
+
+struct skl_wm_level {
+ bool plane_en[I915_MAX_PLANES];
+ bool cursor_en;
+ uint16_t plane_res_b[I915_MAX_PLANES];
+ uint8_t plane_res_l[I915_MAX_PLANES];
+ uint16_t cursor_res_b;
+ uint8_t cursor_res_l;
+};
+
/*
* This struct helps tracking the state needed for runtime PM, which puts the
* device in PCI D3 state. Notice that when this happens, nothing on the
@@ -1369,7 +1439,7 @@ struct ilk_wm_values {
*
* Our driver uses the autosuspend delay feature, which means we'll only really
* suspend if we stay with zero refcount for a certain amount of time. The
- * default value is currently very conservative (see intel_init_runtime_pm), but
+ * default value is currently very conservative (see intel_runtime_pm_enable), but
* it can be changed with the standard runtime PM files from sysfs.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
@@ -1382,7 +1452,7 @@ struct ilk_wm_values {
*/
struct i915_runtime_pm {
bool suspended;
- bool _irqs_disabled;
+ bool irqs_enabled;
};
enum intel_pipe_crc_source {
@@ -1426,6 +1496,20 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
+struct i915_wa_reg {
+ u32 addr;
+ u32 value;
+ /* bitmask representing WA bits */
+ u32 mask;
+};
+
+#define I915_MAX_WA_REGS 16
+
+struct i915_workarounds {
+ struct i915_wa_reg reg[I915_MAX_WA_REGS];
+ u32 count;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1505,11 +1589,13 @@ struct drm_i915_private {
struct intel_opregion opregion;
struct intel_vbt_data vbt;
+ bool preserve_bios_swizzle;
+
/* overlay */
struct intel_overlay *overlay;
/* backlight registers and fields in struct intel_panel */
- spinlock_t backlight_lock;
+ struct mutex backlight_lock;
/* LVDS info */
bool no_aux_handshake;
@@ -1523,6 +1609,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int vlv_cdclk_freq;
+ unsigned int hpll_freq;
/**
* wq - Driver workqueue for GEM.
@@ -1568,19 +1655,7 @@ struct drm_i915_private {
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
- /*
- * workarounds are currently applied at different places and
- * changes are being done to consolidate them so exact count is
- * not clear at this point, use a max value for now.
- */
-#define I915_MAX_WA_REGS 16
- struct {
- u32 addr;
- u32 value;
- /* bitmask representing WA bits */
- u32 mask;
- } intel_wa_regs[I915_MAX_WA_REGS];
- u32 num_wa_regs;
+ struct i915_workarounds workarounds;
/* Reclocking support */
bool render_reclock_avail;
@@ -1644,9 +1719,25 @@ struct drm_i915_private {
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
+ /*
+ * Raw watermark memory latency values
+ * for SKL for all 8 levels
+ * in 1us units.
+ */
+ uint16_t skl_latency[8];
+
+ /*
+ * The skl_wm_values structure is a bit too big for stack
+ * allocation, so we keep the staging struct where we store
+ * intermediate results here instead.
+ */
+ struct skl_wm_values skl_results;
/* current hardware state */
- struct ilk_wm_values hw;
+ union {
+ struct ilk_wm_values hw;
+ struct skl_wm_values skl_hw;
+ };
} wm;
struct i915_runtime_pm pm;
@@ -1667,12 +1758,6 @@ struct drm_i915_private {
uint32_t bios_vgacntr;
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct i915_dri1_state dri1;
- /* Old ums support infrastructure, same warning applies. */
- struct i915_ums_state ums;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -1830,8 +1915,6 @@ struct drm_i915_gem_object {
unsigned long gt_ro:1;
unsigned int cache_level:3;
- unsigned int has_aliasing_ppgtt_mapping:1;
- unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
@@ -1864,10 +1947,10 @@ struct drm_i915_gem_object {
unsigned long user_pin_count;
struct drm_file *pin_filp;
- /** for phy allocated objects */
- struct drm_dma_handle *phys_handle;
-
union {
+ /** for phy allocated objects */
+ struct drm_dma_handle *phys_handle;
+
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
@@ -2073,6 +2156,7 @@ struct drm_i915_cmd_table {
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2080,9 +2164,10 @@ struct drm_i915_cmd_table {
((INTEL_DEVID(dev) & 0xf) == 0x2 || \
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
+ (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
@@ -2103,6 +2188,7 @@ struct drm_i915_cmd_table {
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
+#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
#define RENDER_RING (1<<RCS)
#define BSD_RING (1<<VCS)
@@ -2115,13 +2201,11 @@ struct drm_i915_cmd_table {
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- to_i915(dev)->ellc_size)
+ __I915__(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) (i915.enable_ppgtt)
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
@@ -2154,13 +2238,15 @@ struct drm_i915_cmd_table {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
+#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2168,8 +2254,11 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
+#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -2189,8 +2278,8 @@ struct drm_i915_cmd_table {
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern int i915_suspend(struct drm_device *dev, pm_message_t state);
-extern int i915_resume(struct drm_device *dev);
+extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
+extern int i915_resume_legacy(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -2227,8 +2316,6 @@ struct i915_params {
extern struct i915_params i915 __read_mostly;
/* i915_dma.c */
-void i915_update_dri1_breadcrumb(struct drm_device *dev);
-extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2242,9 +2329,6 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
-extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *box,
- int DR1, int DR4);
extern int intel_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2260,10 +2344,10 @@ __printf(3, 4)
void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...);
-void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
- int new_delay);
-extern void intel_irq_init(struct drm_device *dev);
-extern void intel_hpd_init(struct drm_device *dev);
+extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_hpd_init(struct drm_i915_private *dev_priv);
+int intel_irq_install(struct drm_i915_private *dev_priv);
+void intel_irq_uninstall(struct drm_i915_private *dev_priv);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
@@ -2283,10 +2367,19 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask);
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), 0)
/* i915_gem.c */
-int i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
@@ -2333,10 +2426,6 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -2379,7 +2468,6 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_lastclose(struct drm_device *dev);
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);
@@ -2413,8 +2501,9 @@ void i915_vma_move_to_active(struct i915_vma *vma,
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -2486,6 +2575,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+ unsigned reset_counter,
+ bool interruptible,
+ s64 *timeout,
+ struct drm_i915_file_private *file_priv);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -2755,7 +2849,6 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev);
/* intel_opregion.c */
-struct intel_encoder;
#ifdef CONFIG_ACPI
extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
@@ -2793,7 +2886,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_suspend_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -2804,7 +2896,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
+extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
@@ -2842,8 +2934,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
@@ -2873,7 +2965,9 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define FORCEWAKE_RENDER (1 << 0)
#define FORCEWAKE_MEDIA (1 << 1)
-#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
+#define FORCEWAKE_BLITTER (1 << 2)
+#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
+ FORCEWAKE_BLITTER)
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
@@ -2939,6 +3033,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+ return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
static inline unsigned long
timespec_to_jiffies_timeout(const struct timespec *value)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 28f91df2604d..4a9faea626db 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -160,33 +160,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
}
int
-i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_init *args = data;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
- if (args->gtt_start >= args->gtt_end ||
- (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
- return -EINVAL;
-
- /* GEM with user mode setting was never supported on ilk and later. */
- if (INTEL_INFO(dev)->gen >= 5)
- return -ENODEV;
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
- args->gtt_end);
- dev_priv->gtt.mappable_end = args->gtt_end;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
+static int
+i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
- drm_dma_handle_t *phys = obj->phys_handle;
+ struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ char *vaddr = obj->phys_handle->vaddr;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int i;
- if (!phys)
- return;
+ if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ return -EINVAL;
- if (obj->madv == I915_MADV_WILLNEED) {
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ page_cache_release(page);
+ vaddr += PAGE_SIZE;
+ }
+
+ i915_gem_chipset_flush(obj->base.dev);
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = obj->base.size;
+
+ sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_len(sg) = obj->base.size;
+
+ obj->pages = st;
+ obj->has_dma_mapping = true;
+ return 0;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
+
+ if (obj->dirty) {
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
- char *vaddr = phys->vaddr;
+ char *vaddr = obj->phys_handle->vaddr;
int i;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page = shmem_read_mapping_page(mapping, i);
- if (!IS_ERR(page)) {
- char *dst = kmap_atomic(page);
- memcpy(dst, vaddr, PAGE_SIZE);
- drm_clflush_virt_range(dst, PAGE_SIZE);
- kunmap_atomic(dst);
-
- set_page_dirty(page);
+ struct page *page;
+ char *dst;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ continue;
+
+ dst = kmap_atomic(page);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ memcpy(dst, vaddr, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
- }
+ page_cache_release(page);
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(obj->base.dev);
+ obj->dirty = 0;
}
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
- drm_pci_free(obj->base.dev, phys);
- obj->phys_handle = NULL;
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+
+ obj->has_dma_mapping = false;
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+ drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+ .get_pages = i915_gem_object_get_pages_phys,
+ .put_pages = i915_gem_object_put_pages_phys,
+ .release = i915_gem_object_release_phys,
+};
+
+static int
+drop_pages(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma, *next;
+ int ret;
+
+ drm_gem_object_reference(&obj->base);
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ ret = i915_gem_object_put_pages(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ return ret;
}
int
@@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
drm_dma_handle_t *phys;
- struct address_space *mapping;
- char *vaddr;
- int i;
+ int ret;
if (obj->phys_handle) {
if ((unsigned long)obj->phys_handle->vaddr & (align -1))
@@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->base.filp == NULL)
return -EINVAL;
+ ret = drop_pages(obj);
+ if (ret)
+ return ret;
+
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
if (!phys)
return -ENOMEM;
- vaddr = phys->vaddr;
-#ifdef CONFIG_X86
- set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
-#endif
- mapping = file_inode(obj->base.filp)->i_mapping;
- for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page;
- char *src;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page)) {
-#ifdef CONFIG_X86
- set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
-#endif
- drm_pci_free(obj->base.dev, phys);
- return PTR_ERR(page);
- }
-
- src = kmap_atomic(page);
- memcpy(vaddr, src, PAGE_SIZE);
- kunmap_atomic(src);
-
- mark_page_accessed(page);
- page_cache_release(page);
-
- vaddr += PAGE_SIZE;
- }
-
obj->phys_handle = phys;
- return 0;
+ obj->ops = &i915_gem_phys_ops;
+
+ return i915_gem_object_get_pages(obj);
}
static int
@@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret;
+
+ /* We manually control the domain here and pretend that it
+ * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+ */
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return -EFAULT;
}
+ drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
return 0;
}
@@ -346,6 +401,7 @@ static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
+ bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
@@ -361,6 +417,7 @@ i915_gem_create(struct drm_file *file,
if (obj == NULL)
return -ENOMEM;
+ obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
@@ -380,7 +437,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
- args->size, &args->handle);
+ args->size, true, &args->handle);
}
/**
@@ -393,7 +450,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
- args->size, &args->handle);
+ args->size, false, &args->handle);
}
static inline int
@@ -1046,11 +1103,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->phys_handle) {
- ret = i915_gem_phys_pwrite(obj, args, file);
- goto out;
- }
-
if (obj->tiling_mode == I915_TILING_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
@@ -1060,8 +1112,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT || ret == -ENOSPC)
- ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ if (ret == -EFAULT || ret == -ENOSPC) {
+ if (obj->phys_handle)
+ ret = i915_gem_phys_pwrite(obj, args, file);
+ else
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ }
out:
drm_gem_object_unreference(&obj->base);
@@ -1134,7 +1190,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
}
/**
- * __wait_seqno - wait until execution of seqno has finished
+ * __i915_wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @reset_counter: reset sequence associated with the given seqno
@@ -1151,7 +1207,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
@@ -1171,7 +1227,8 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
- timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
+ timeout_expire = timeout ?
+ jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
@@ -1247,6 +1304,16 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
s64 tres = *timeout - (now - before);
*timeout = tres < 0 ? 0 : tres;
+
+ /*
+ * Apparently ktime isn't accurate enough and occasionally has a
+ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+ * things up to make the test happy. We allow up to 1 jiffy.
+ *
+ * This is a regrssion from the timespec->ktime conversion.
+ */
+ if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+ *timeout = 0;
}
return ret;
@@ -1262,6 +1329,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible = dev_priv->mm.interruptible;
+ unsigned reset_counter;
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1275,14 +1343,13 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
if (ret)
return ret;
- return __wait_seqno(ring, seqno,
- atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL, NULL);
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
+ NULL, NULL);
}
static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
{
if (!obj->active)
return 0;
@@ -1319,7 +1386,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- return i915_gem_object_wait_rendering__tail(obj, ring);
+ return i915_gem_object_wait_rendering__tail(obj);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1354,12 +1421,13 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
+ ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
+ file_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
- return i915_gem_object_wait_rendering__tail(obj, ring);
+ return i915_gem_object_wait_rendering__tail(obj);
}
/**
@@ -1466,6 +1534,16 @@ unlock:
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
@@ -1762,10 +1840,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
drm_gem_free_mmap_offset(&obj->base);
}
-int
+static int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle,
+ uint32_t handle, bool dumb,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1782,6 +1860,13 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
+ "Illegal dumb map of accelerated buffer.\n");
+
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
@@ -1806,6 +1891,15 @@ unlock:
return ret;
}
+int
+i915_gem_dumb_map_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ return i915_gem_mmap_gtt(file, dev, handle, true, offset);
+}
+
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
@@ -1827,7 +1921,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+ return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
}
static inline int
@@ -1945,7 +2039,14 @@ unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
long target, unsigned flags)
{
- const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
+ const struct {
+ struct list_head *list;
+ unsigned int bit;
+ } phases[] = {
+ { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { NULL, 0 },
+ }, *phase;
unsigned long count = 0;
/*
@@ -1967,48 +2068,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
- if (flags & I915_SHRINK_UNBOUND) {
+ for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&dev_priv->mm.unbound_list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
-
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
- continue;
-
- drm_gem_object_reference(&obj->base);
-
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- drm_gem_object_unreference(&obj->base);
- }
- list_splice(&still_in_list, &dev_priv->mm.unbound_list);
- }
-
- if (flags & I915_SHRINK_BOUND) {
- struct list_head still_in_list;
+ if ((flags & phase->bit) == 0)
+ continue;
INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+ while (count < target && !list_empty(phase->list)) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma, *v;
- obj = list_first_entry(&dev_priv->mm.bound_list,
+ obj = list_first_entry(phase->list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ if (flags & I915_SHRINK_PURGEABLE &&
+ !i915_gem_object_is_purgeable(obj))
continue;
drm_gem_object_reference(&obj->base);
- list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ /* For the unbound phase, this should be a no-op! */
+ list_for_each_entry_safe(vma, v,
+ &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
@@ -2017,7 +2100,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
drm_gem_object_unreference(&obj->base);
}
- list_splice(&still_in_list, &dev_priv->mm.bound_list);
+ list_splice(&still_in_list, phase->list);
}
return count;
@@ -2122,6 +2205,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ i915_gem_object_pin_pages(obj);
+
return 0;
err_pages:
@@ -2420,15 +2507,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
ring->outstanding_lazy_seqno = 0;
ring->preallocated_lazy_request = NULL;
- if (!dev_priv->ums.mm_suspended) {
- i915_queue_hangcheck(ring->dev);
+ i915_queue_hangcheck(ring->dev);
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
- queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
- }
+ cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
+ intel_mark_busy(dev_priv->dev);
if (out_seqno)
*out_seqno = request->seqno;
@@ -2495,12 +2580,20 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
+ struct intel_context *ctx = request->ctx;
+
list_del(&request->list);
i915_gem_request_remove_from_client(request);
- if (request->ctx)
- i915_gem_context_unreference(request->ctx);
+ if (ctx) {
+ if (i915.enable_execlists) {
+ struct intel_engine_cs *ring = request->ring;
+ if (ctx != ring->default_context)
+ intel_lr_context_unpin(ring, ctx);
+ }
+ i915_gem_context_unreference(ctx);
+ }
kfree(request);
}
@@ -2555,6 +2648,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
}
/*
+ * Clear the execlists queue up before freeing the requests, as those
+ * are the ones that keep the context and ringbuffer backing objects
+ * pinned in place.
+ */
+ while (!list_empty(&ring->execlist_queue)) {
+ struct intel_ctx_submit_request *submit_req;
+
+ submit_req = list_first_entry(&ring->execlist_queue,
+ struct intel_ctx_submit_request,
+ execlist_link);
+ list_del(&submit_req->execlist_link);
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(submit_req->ctx);
+ kfree(submit_req);
+ }
+
+ /*
* We must free the requests after all the corresponding objects have
* been moved off active lists. Which is the same order as the normal
* retire_requests function does. This is important if object hold
@@ -2571,18 +2681,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
- while (!list_empty(&ring->execlist_queue)) {
- struct intel_ctx_submit_request *submit_req;
-
- submit_req = list_first_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
- execlist_link);
- list_del(&submit_req->execlist_link);
- intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(submit_req->ctx);
- kfree(submit_req);
- }
-
/* These may not have been flush before the reset, do so now */
kfree(ring->preallocated_lazy_request);
ring->preallocated_lazy_request = NULL;
@@ -2719,6 +2817,15 @@ i915_gem_retire_requests(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
+ if (i915.enable_execlists) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ idle &= list_empty(&ring->execlist_queue);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ intel_execlists_retire_requests(ring);
+ }
}
if (idle)
@@ -2811,6 +2918,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 seqno = 0;
int ret = 0;
+ if (args->flags != 0)
+ return -EINVAL;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -2846,8 +2956,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
- return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
- file->driver_priv);
+ return __i915_wait_seqno(ring, seqno, reset_counter, true,
+ &args->timeout_ns, file->driver_priv);
out:
drm_gem_object_unreference(&obj->base);
@@ -3166,6 +3276,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
obj->stride, obj->tiling_mode);
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
@@ -3384,46 +3495,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
return true;
}
-static void i915_gem_verify_gtt(struct drm_device *dev)
-{
-#if WATCH_GTT
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- int err = 0;
-
- list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
- if (obj->gtt_space == NULL) {
- printk(KERN_ERR "object found on GTT list with no space reserved\n");
- err++;
- continue;
- }
-
- if (obj->cache_level != obj->gtt_space->color) {
- printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
- i915_gem_obj_ggtt_offset(obj),
- i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
- obj->cache_level,
- obj->gtt_space->color);
- err++;
- continue;
- }
-
- if (!i915_gem_valid_gtt_space(dev,
- obj->gtt_space,
- obj->cache_level)) {
- printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
- i915_gem_obj_ggtt_offset(obj),
- i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
- obj->cache_level);
- err++;
- continue;
- }
- }
-
- WARN_ON(err);
-#endif
-}
-
/**
* Finds free space in the GTT aperture and binds the object there.
*/
@@ -3514,25 +3585,10 @@ search_free:
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
- if (i915_is_ggtt(vm)) {
- bool mappable, fenceable;
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + obj->base.size <=
- dev_priv->gtt.mappable_end);
-
- obj->map_and_fenceable = mappable && fenceable;
- }
-
- WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-
trace_i915_vma_bind(vma, flags);
vma->bind_vma(vma, obj->cache_level,
- flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+ flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
- i915_gem_verify_gtt(dev);
return vma;
err_remove_node:
@@ -3560,7 +3616,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
- if (obj->stolen)
+ if (obj->stolen || obj->phys_handle)
return false;
/* If the GPU is snooping the contents of the CPU cache,
@@ -3739,7 +3795,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
vma->bind_vma(vma, cache_level,
- obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+ vma->bound & GLOBAL_BIND);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3769,7 +3825,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
old_write_domain);
}
- i915_gem_verify_gtt(dev);
return 0;
}
@@ -4067,7 +4122,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -4101,6 +4156,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
+ unsigned bound;
int ret;
if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
@@ -4109,6 +4165,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
+ if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
+ return -EINVAL;
+
vma = i915_gem_obj_to_vma(obj, vm);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -4130,15 +4189,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
}
+ bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
- if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+ if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ if ((bound ^ vma->bound) & GLOBAL_BIND) {
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_gtt_size(obj->base.dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
+ obj->base.size,
+ obj->tiling_mode,
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + obj->base.size <=
+ dev_priv->gtt.mappable_end);
+
+ obj->map_and_fenceable = mappable && fenceable;
+ }
+
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+
vma->pin_count++;
if (flags & PIN_MAPPABLE)
obj->pin_mappable |= true;
@@ -4193,7 +4276,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
@@ -4249,6 +4332,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -4326,6 +4412,7 @@ int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -4353,6 +4440,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ if (obj->pages &&
+ obj->tiling_mode != I915_TILING_NONE &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ if (obj->madv == I915_MADV_WILLNEED)
+ i915_gem_object_unpin_pages(obj);
+ if (args->madv == I915_MADV_WILLNEED)
+ i915_gem_object_pin_pages(obj);
+ }
+
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
@@ -4495,8 +4591,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
}
}
- i915_gem_object_detach_phys(obj);
-
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
@@ -4504,6 +4598,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
WARN_ON(obj->frontbuffer_bits);
+ if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
+ obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_unpin_pages(obj);
+
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
@@ -4576,9 +4675,6 @@ i915_gem_suspend(struct drm_device *dev)
int ret = 0;
mutex_lock(&dev->struct_mutex);
- if (dev_priv->ums.mm_suspended)
- goto err;
-
ret = i915_gpu_idle(dev);
if (ret)
goto err;
@@ -4589,15 +4685,7 @@ i915_gem_suspend(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- i915_kernel_lost_context(dev);
i915_gem_stop_ringbuffers(dev);
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound ums.mm_suspended!
- */
- dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
- DRIVER_MODESET);
mutex_unlock(&dev->struct_mutex);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -4888,9 +4976,6 @@ int i915_gem_init(struct drm_device *dev)
}
mutex_unlock(&dev->struct_mutex);
- /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- dev_priv->dri1.allow_batchbuffer = 1;
return ret;
}
@@ -4905,74 +4990,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
dev_priv->gt.cleanup_ring(ring);
}
-int
-i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
-
- if (i915_reset_in_progress(&dev_priv->gpu_error)) {
- DRM_ERROR("Reenabling wedged hardware, good luck\n");
- atomic_set(&dev_priv->gpu_error.reset_counter, 0);
- }
-
- mutex_lock(&dev->struct_mutex);
- dev_priv->ums.mm_suspended = 0;
-
- ret = i915_gem_init_hw(dev);
- if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
-
- ret = drm_irq_install(dev, dev->pdev->irq);
- if (ret)
- goto cleanup_ringbuffer;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-
-cleanup_ringbuffer:
- i915_gem_cleanup_ringbuffer(dev);
- dev_priv->ums.mm_suspended = 1;
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
-i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
-
- mutex_lock(&dev->struct_mutex);
- drm_irq_uninstall(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return i915_gem_suspend(dev);
-}
-
-void
-i915_gem_lastclose(struct drm_device *dev)
-{
- int ret;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- ret = i915_gem_suspend(dev);
- if (ret)
- DRM_ERROR("failed to idle hardware: %d\n", ret);
-}
-
static void
init_ring_lists(struct intel_engine_cs *ring)
{
@@ -5119,6 +5136,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
+/**
+ * i915_gem_track_fb - update frontbuffer tracking
+ * old: current GEM buffer for the frontbuffer slots
+ * new: new GEM buffer for the frontbuffer slots
+ * frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
@@ -5302,7 +5328,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed;
+ unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
@@ -5319,7 +5345,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- freed = i915_gem_shrink_all(dev_priv);
+ freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
@@ -5350,14 +5376,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (unlock)
mutex_unlock(&dev->struct_mutex);
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed, pinned);
+ if (freed_pages || unbound || bound)
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed_pages << PAGE_SHIFT, pinned);
if (unbound || bound)
pr_err("%lu and %lu bytes still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
- *(unsigned long *)ptr += freed;
+ *(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5221d8f1580..d17ff435f276 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,6 +88,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_trace.h"
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
@@ -137,6 +138,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
+ trace_i915_context_free(ctx);
+
if (i915.enable_execlists)
intel_lr_context_free(ctx);
@@ -274,6 +277,8 @@ i915_gem_create_context(struct drm_device *dev,
ctx->ppgtt = ppgtt;
}
+ trace_i915_context_create(ctx);
+
return ctx;
err_unpin:
@@ -522,6 +527,7 @@ static int do_switch(struct intel_engine_cs *ring,
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
+ struct i915_vma *vma;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -548,6 +554,7 @@ static int do_switch(struct intel_engine_cs *ring,
from = ring->last_context;
if (to->ppgtt) {
+ trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
@@ -571,11 +578,10 @@ static int do_switch(struct intel_engine_cs *ring,
if (ret)
goto unpin_out;
- if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
- struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
- &dev_priv->gtt.base);
- vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
- }
+ vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
+ if (!(vma->bound & GLOBAL_BIND))
+ vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
+ GLOBAL_BIND);
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
@@ -629,7 +635,7 @@ done:
if (uninitialized) {
if (ring->init_context) {
- ret = ring->init_context(ring);
+ ret = ring->init_context(ring, to);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1a0611bb576b..f06027ba3ee5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,6 +121,9 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err;
}
+ WARN_ONCE(obj->base.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}
@@ -357,12 +360,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !target_i915_obj->has_global_gtt_mapping)) {
- struct i915_vma *vma =
- list_first_entry(&target_i915_obj->vma_list,
- typeof(*vma), vma_link);
- vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
- }
+ !(target_vma->bound & GLOBAL_BIND)))
+ target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@ -531,7 +531,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
flags = 0;
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
- flags |= PIN_MAPPABLE;
+ flags |= PIN_GLOBAL | PIN_MAPPABLE;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -1023,6 +1023,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static int
+i915_emit_box(struct intel_engine_cs *ring,
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
+{
+ int ret;
+
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ box->y2 <= 0 || box->x2 <= 0) {
+ DRM_ERROR("Bad box %d,%d..%d,%d\n",
+ box->x1, box->y1, box->x2, box->y2);
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(ring->dev)->gen >= 4) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
+ intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+ intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+ intel_ring_emit(ring, DR4);
+ } else {
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
+ intel_ring_emit(ring, DR1);
+ intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
+ intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
+ intel_ring_emit(ring, DR4);
+ intel_ring_emit(ring, 0);
+ }
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
@@ -1151,7 +1192,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(dev, &cliprects[i],
+ ret = i915_emit_box(ring, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
@@ -1300,12 +1341,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
- if (dev_priv->ums.mm_suspended) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EBUSY;
- goto pre_mutex_err;
- }
-
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
@@ -1368,17 +1403,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj,
args->batch_start_offset,
file->is_master);
- if (ret)
- goto err;
-
- /*
- * XXX: Actually do this when enabling batch copy...
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
- * from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically don't
- * want that set when the command parser is enabled.
- */
+ if (ret) {
+ if (ret != -EACCES)
+ goto err;
+ } else {
+ /*
+ * XXX: Actually do this when enabling batch copy...
+ *
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+ * from MI_BATCH_BUFFER_START commands issued in the
+ * dispatch_execbuffer implementations. We specifically don't
+ * want that set when the command parser is enabled.
+ */
+ }
}
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 728938f02341..171f6eafdeee 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -35,13 +35,26 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
- if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ bool has_aliasing_ppgtt;
+ bool has_full_ppgtt;
+
+ has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
+ has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+ if (IS_GEN8(dev))
+ has_full_ppgtt = false; /* XXX why? */
+
+ /*
+ * We don't allow disabling PPGTT for gen9+ as it's a requirement for
+ * execlists, the sole mechanism available to submit work.
+ */
+ if (INTEL_INFO(dev)->gen < 9 &&
+ (enable_ppgtt == 0 || !has_aliasing_ppgtt))
return 0;
if (enable_ppgtt == 1)
return 1;
- if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+ if (enable_ppgtt == 2 && has_full_ppgtt)
return 2;
#ifdef CONFIG_INTEL_IOMMU
@@ -59,7 +72,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return 0;
}
- return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
+ return has_aliasing_ppgtt ? 1 : 0;
}
@@ -156,9 +169,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
- /* Mark the page as writeable. Other platforms don't have a
- * setting for read-only/writable, so this matches that behavior.
- */
if (!(flags & PTE_READ_ONLY))
pte |= BYT_PTE_WRITEABLE;
@@ -1092,7 +1102,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev) || IS_GEN9(dev))
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
@@ -1166,6 +1176,8 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
ppgtt->file_priv = fpriv;
+ trace_i915_ppgtt_create(&ppgtt->base);
+
return ppgtt;
}
@@ -1174,6 +1186,8 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
+ trace_i915_ppgtt_release(&ppgtt->base);
+
/* vmas should already be unbound */
WARN_ON(!list_empty(&ppgtt->base.active_list));
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
@@ -1258,7 +1272,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
fault_reg = I915_READ(RING_FAULT_REG(ring));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\\n"
+ "\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
@@ -1328,7 +1342,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*/
- obj->has_global_gtt_mapping = 0;
+ vma->bound &= ~GLOBAL_BIND;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
}
@@ -1525,7 +1539,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
- vma->obj->has_global_gtt_mapping = 1;
+ vma->bound = GLOBAL_BIND;
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1544,7 +1558,7 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
BUG_ON(!i915_is_ggtt(vma->vm));
- vma->obj->has_global_gtt_mapping = 0;
+ vma->bound = 0;
intel_gtt_clear_range(first, size);
}
@@ -1572,24 +1586,24 @@ static void ggtt_bind_vma(struct i915_vma *vma,
* flags. At all other times, the GPU will use the aliasing PPGTT.
*/
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
- if (!obj->has_global_gtt_mapping ||
+ if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
cache_level, flags);
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
}
}
if (dev_priv->mm.aliasing_ppgtt &&
- (!obj->has_aliasing_ppgtt_mapping ||
+ (!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
cache_level, flags);
- vma->obj->has_aliasing_ppgtt_mapping = 1;
+ vma->bound |= LOCAL_BIND;
}
}
@@ -1599,21 +1613,21 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
- if (obj->has_global_gtt_mapping) {
+ if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm,
vma->node.start,
obj->base.size,
true);
- obj->has_global_gtt_mapping = 0;
+ vma->bound &= ~GLOBAL_BIND;
}
- if (obj->has_aliasing_ppgtt_mapping) {
+ if (vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
obj->base.size,
true);
- obj->has_aliasing_ppgtt_mapping = 0;
+ vma->bound &= ~LOCAL_BIND;
}
}
@@ -1650,10 +1664,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
}
}
-int i915_gem_setup_global_gtt(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
+static int i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
/* Let GEM Manage all of the aperture.
*
@@ -1691,7 +1705,7 @@ int i915_gem_setup_global_gtt(struct drm_device *dev,
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
return ret;
}
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
}
dev_priv->gtt.base.start = start;
@@ -1764,7 +1778,6 @@ static int setup_scratch_page(struct drm_device *dev)
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
- get_page(page);
set_pages_uc(page, 1);
#ifdef CONFIG_INTEL_IOMMU
@@ -1789,7 +1802,6 @@ static void teardown_scratch_page(struct drm_device *dev)
set_pages_wb(page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(page);
__free_page(page);
}
@@ -1859,6 +1871,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
return (gmch_ctrl - 0x17 + 9) << 22;
}
+static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
+{
+ gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+ gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
+
+ if (gen9_gmch_ctl < 0xf0)
+ return gen9_gmch_ctl << 25; /* 32 MB units */
+ else
+ /* 4MB increments starting at 0xf0 for 4MB */
+ return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+}
+
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
@@ -1934,9 +1958,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
* Only the snoop bit has meaning for CHV, the rest is
* ignored.
*
- * Note that the harware enforces snooping for all page
- * table accesses. The snoop bit is actually ignored for
- * PDEs.
+ * The hardware will never snoop for certain types of accesses:
+ * - CPU GTT (GMADR->GGTT->no snoop->memory)
+ * - PPGTT page tables
+ * - some other special cycles
+ *
+ * As with BDW, we also need to consider the following for GT accesses:
+ * "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * Which means we must set the snoop bit in PAT entry 0
+ * in order to keep the global status page working.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |
@@ -1971,7 +2003,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ *stolen = gen9_get_stolen_size(snb_gmch_ctl);
+ gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ } else if (IS_CHERRYVIEW(dev)) {
*stolen = chv_get_stolen_size(snb_gmch_ctl);
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
@@ -2143,6 +2178,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->obj = obj;
switch (INTEL_INFO(vm->dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d5c14af51e99..beaf4bcfdac8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -123,6 +123,12 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ /** Flags and address space this VMA is bound to */
+#define GLOBAL_BIND (1<<0)
+#define LOCAL_BIND (1<<1)
+#define PTE_READ_ONLY (1<<2)
+ unsigned int bound : 4;
+
/** This object's place on the active/inactive lists */
struct list_head mm_list;
@@ -155,8 +161,6 @@ struct i915_vma {
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
-#define GLOBAL_BIND (1<<0)
-#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
@@ -270,8 +274,6 @@ struct i915_hw_ppgtt {
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
-int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
- unsigned long mappable_end, unsigned long end);
void i915_global_gtt_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index a9a62d75aa57..98dcd94acba8 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -38,6 +38,8 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return &gen7_null_state;
case 8:
return &gen8_null_state;
+ case 9:
+ return &gen9_null_state;
}
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 85fda6b803e4..a2045848bd1a 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
r = devm_request_mem_region(dev->dev, base + 1,
dev_priv->gtt.stolen_size - 1,
"Graphics Stolen Memory");
- if (r == NULL) {
+ /*
+ * GEN3 firmware likes to smash pci bridges into the stolen
+ * range. Apparently this works.
+ */
+ if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size);
base = 0;
@@ -533,7 +537,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
}
}
- obj->has_global_gtt_mapping = 1;
+ vma->bound |= GLOBAL_BIND;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 2b1eaa29ada4..4727a4e2c87c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -102,22 +102,33 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
- uint32_t dimm_c0, dimm_c1;
- dimm_c0 = I915_READ(MAD_DIMM_C0);
- dimm_c1 = I915_READ(MAD_DIMM_C1);
- dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- /* Enable swizzling when the channels are populated with
- * identically sized dimms. We don't need to check the 3rd
- * channel because no cpu with gpu attached ships in that
- * configuration. Also, swizzling only makes sense for 2
- * channels anyway. */
- if (dimm_c0 == dimm_c1) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
+ if (dev_priv->preserve_bios_swizzle) {
+ if (I915_READ(DISP_ARB_CTL) &
+ DISP_TILE_SURFACE_SWIZZLING) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
} else {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated
+ * with identically sized dimms. We don't need to check
+ * the 3rd channel because no cpu with gpu attached
+ * ships in that configuration. Also, swizzling only
+ * makes sense for 2 channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
@@ -167,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
break;
}
+
+ /* check for L-shaped memory aka modified enhanced addressing */
+ if (IS_GEN4(dev)) {
+ uint32_t ddc2 = I915_READ(DCC2);
+
+ if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ }
+
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
@@ -369,6 +389,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
ret = i915_gem_object_ggtt_unbind(obj);
if (ret == 0) {
+ if (obj->pages &&
+ obj->madv == I915_MADV_WILLNEED &&
+ dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ if (args->tiling_mode == I915_TILING_NONE)
+ i915_gem_object_unpin_pages(obj);
+ if (obj->tiling_mode == I915_TILING_NONE)
+ i915_gem_object_pin_pages(obj);
+ }
+
obj->fence_dirty =
obj->last_fenced_seqno ||
obj->fence_reg != I915_FENCE_REG_NONE;
@@ -434,6 +463,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+ args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2c87a797213f..cdaee6ce05f8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -242,11 +242,15 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
- struct drm_i915_error_ring *ring)
+ struct drm_i915_error_state *error,
+ int ring_idx)
{
+ struct drm_i915_error_ring *ring = &error->ring[ring_idx];
+
if (!ring->valid)
return;
+ err_printf(m, "%s command stream:\n", ring_str(ring_idx));
err_printf(m, " HEAD: 0x%08x\n", ring->head);
err_printf(m, " TAIL: 0x%08x\n", ring->tail);
err_printf(m, " CTL: 0x%08x\n", ring->ctl);
@@ -388,10 +392,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- err_printf(m, "%s command stream:\n", ring_str(i));
- i915_ring_error_state(m, dev, &error->ring[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++)
+ i915_ring_error_state(m, dev, error, i);
for (i = 0; i < error->vm_count; i++) {
err_printf(m, "vm[%d]\n", i);
@@ -565,6 +567,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
struct drm_i915_error_object *dst;
+ struct i915_vma *vma = NULL;
int num_pages;
bool use_ggtt;
int i = 0;
@@ -585,16 +588,17 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
dst->gtt_offset = -1;
reloc_offset = dst->gtt_offset;
+ if (i915_is_ggtt(vm))
+ vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- i915_is_ggtt(vm) &&
- src->has_global_gtt_mapping &&
- reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+ vma && (vma->bound & GLOBAL_BIND) &&
+ reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
- if (!src->has_global_gtt_mapping)
+ if (!(vma && vma->bound & GLOBAL_BIND))
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
@@ -765,6 +769,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
case 7:
case 6:
@@ -804,9 +809,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
if (!error->semaphore_obj)
error->semaphore_obj =
- i915_error_object_create(dev_priv,
- dev_priv->semaphore_obj,
- &dev_priv->gtt.base);
+ i915_error_ggtt_object_create(dev_priv,
+ dev_priv->semaphore_obj);
for_each_ring(to, dev_priv, i) {
int idx;
@@ -923,6 +927,7 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
switch (INTEL_INFO(dev)->gen) {
+ case 9:
case 8:
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
@@ -1238,7 +1243,8 @@ static void i915_error_capture_msg(struct drm_device *dev,
ecode = i915_error_generate_code(dev_priv, error, &ring_id);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
+ "GPU HANG: ecode %d:%d:0x%08x",
+ INTEL_INFO(dev)->gen, ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
@@ -1326,13 +1332,12 @@ void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ spin_lock_irq(&dev_priv->gpu_error.lock);
error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ spin_unlock_irq(&dev_priv->gpu_error.lock);
}
@@ -1346,12 +1351,11 @@ void i915_destroy_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ spin_lock_irq(&dev_priv->gpu_error.lock);
error = dev_priv->gpu_error.first_error;
dev_priv->gpu_error.first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ spin_unlock_irq(&dev_priv->gpu_error.lock);
if (error)
kref_put(&error->ref, i915_error_state_free);
@@ -1389,6 +1393,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
WARN_ONCE(1, "Unsupported platform\n");
case 7:
case 8:
+ case 9:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 2e0613e26251..176de6322e4d 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
-#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
@@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
-#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f66392b6e287..981834b0f9b6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,6 +37,14 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/**
+ * DOC: interrupt handling
+ *
+ * These functions provide the basic support for enabling and disabling the
+ * interrupt handling support. There's a lot more functionality in i915_irq.c
+ * and related files, but that will be described in separate chapters.
+ */
+
static const u32 hpd_ibx[] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -118,20 +126,22 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
- POSTING_READ(GEN8_##type##_IER(which)); \
+ I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
+ POSTING_READ(GEN8_##type##_IMR(which)); \
} while (0)
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
- I915_WRITE(type##IMR, (imr_val)); \
I915_WRITE(type##IER, (ier_val)); \
- POSTING_READ(type##IER); \
+ I915_WRITE(type##IMR, (imr_val)); \
+ POSTING_READ(type##IMR); \
} while (0)
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+
/* For display hotplug interrupt */
-static void
+void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
@@ -146,7 +156,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
}
}
-static void
+void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
@@ -192,71 +202,28 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
ilk_update_gt_irq(dev_priv, mask, 0);
}
-/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
-{
- uint32_t new_val;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- new_val = dev_priv->pm_irq_mask;
- new_val &= ~interrupt_mask;
- new_val |= (~enabled_irq_mask & interrupt_mask);
-
- if (new_val != dev_priv->pm_irq_mask) {
- dev_priv->pm_irq_mask = new_val;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
- POSTING_READ(GEN6_PMIMR);
- }
-}
-
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
{
- snb_update_pm_irq(dev_priv, mask, mask);
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
{
- snb_update_pm_irq(dev_priv, mask, 0);
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}
-static bool ivb_can_enable_err_int(struct drm_device *dev)
+static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- enum pipe pipe;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (crtc->cpu_fifo_underrun_disabled)
- return false;
- }
-
- return true;
+ return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}
/**
- * bdw_update_pm_irq - update GT interrupt 2
+ * snb_update_pm_irq - update GEN6_PMIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
- *
- * Copied from the snb function, updated with relevant register offsets
*/
-static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
@@ -264,144 +231,87 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
- I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
- POSTING_READ(GEN8_GT_IMR(2));
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+ POSTING_READ(gen6_pm_imr(dev_priv));
}
}
-void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
- bdw_update_pm_irq(dev_priv, mask, mask);
-}
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return;
-void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-{
- bdw_update_pm_irq(dev_priv, mask, 0);
+ snb_update_pm_irq(dev_priv, mask, mask);
}
-static bool cpt_can_enable_serr_int(struct drm_device *dev)
+static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
- struct intel_crtc *crtc;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (crtc->pch_fifo_underrun_disabled)
- return false;
- }
-
- return true;
+ snb_update_pm_irq(dev_priv, mask, 0);
}
-void i9xx_check_fifo_underruns(struct drm_device *dev)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
- for_each_intel_crtc(dev, crtc) {
- u32 reg = PIPESTAT(crtc->pipe);
- u32 pipestat;
-
- if (crtc->cpu_fifo_underrun_disabled)
- continue;
-
- pipestat = I915_READ(reg) & 0xffff0000;
- if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
- continue;
-
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
-
- DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
- }
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return;
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ __gen6_disable_pm_irq(dev_priv, mask);
}
-static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+void gen6_reset_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0xffff0000;
-
- assert_spin_locked(&dev_priv->irq_lock);
+ uint32_t reg = gen6_pm_iir(dev_priv);
- if (enable) {
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
- } else {
- if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
- }
+ spin_lock_irq(&dev_priv->irq_lock);
+ I915_WRITE(reg, dev_priv->pm_rps_events);
+ I915_WRITE(reg, dev_priv->pm_rps_events);
+ POSTING_READ(reg);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
- DE_PIPEB_FIFO_UNDERRUN;
- if (enable)
- ironlake_enable_display_irq(dev_priv, bit);
- else
- ironlake_disable_display_irq(dev_priv, bit);
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+ dev_priv->rps.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (enable) {
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
- if (!ivb_can_enable_err_int(dev))
- return;
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->rps.interrupts_enabled = false;
+ spin_unlock_irq(&dev_priv->irq_lock);
- ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
- } else {
- ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ cancel_work_sync(&dev_priv->rps.work);
- if (old &&
- I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- DRM_ERROR("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
- }
- }
-}
+ spin_lock_irq(&dev_priv->irq_lock);
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
+ ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
- assert_spin_locked(&dev_priv->irq_lock);
+ __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
+ ~dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
+ I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
- if (enable)
- dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
- else
- dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ dev_priv->rps.pm_iir = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@@ -410,9 +320,9 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
{
uint32_t sdeimr = I915_READ(SDEIMR);
sdeimr &= ~interrupt_mask;
@@ -426,160 +336,6 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
I915_WRITE(SDEIMR, sdeimr);
POSTING_READ(SDEIMR);
}
-#define ibx_enable_display_interrupt(dev_priv, bits) \
- ibx_display_interrupt_update((dev_priv), (bits), (bits))
-#define ibx_disable_display_interrupt(dev_priv, bits) \
- ibx_display_interrupt_update((dev_priv), (bits), 0)
-
-static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
- SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
-
- if (enable)
- ibx_enable_display_interrupt(dev_priv, bit);
- else
- ibx_disable_display_interrupt(dev_priv, bit);
-}
-
-static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable, bool old)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (enable) {
- I915_WRITE(SERR_INT,
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-
- if (!cpt_can_enable_serr_int(dev))
- return;
-
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- } else {
- ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
- if (old && I915_READ(SERR_INT) &
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
- transcoder_name(pch_transcoder));
- }
- }
-}
-
-/**
- * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
- * @dev: drm device
- * @pipe: pipe
- * @enable: true if we want to report FIFO underrun errors, false otherwise
- *
- * This function makes us disable or enable CPU fifo underruns for a specific
- * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
- * reporting for one pipe may also disable all the other CPU error interruts for
- * the other pipes, due to the fact that there's just one interrupt mask/enable
- * bit for all the pipes.
- *
- * Returns the previous state of underrun reporting.
- */
-static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool old;
-
- assert_spin_locked(&dev_priv->irq_lock);
-
- old = !intel_crtc->cpu_fifo_underrun_disabled;
- intel_crtc->cpu_fifo_underrun_disabled = !enable;
-
- if (HAS_GMCH_DISPLAY(dev))
- i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
- ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev))
- ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN8(dev))
- broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
-
- return old;
-}
-
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
- bool ret;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return ret;
-}
-
-static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- return !intel_crtc->cpu_fifo_underrun_disabled;
-}
-
-/**
- * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
- * @dev: drm device
- * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
- * @enable: true if we want to report FIFO underrun errors, false otherwise
- *
- * This function makes us disable or enable PCH fifo underruns for a specific
- * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
- * underrun reporting for one transcoder may also disable all the other PCH
- * error interruts for the other transcoders, due to the fact that there's just
- * one interrupt mask/enable bit for all the transcoders.
- *
- * Returns the previous state of underrun reporting.
- */
-bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
- enum transcoder pch_transcoder,
- bool enable)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
- bool old;
-
- /*
- * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
- * has only one pch transcoder A that all pipes can use. To avoid racy
- * pch transcoder -> pipe lookups from interrupt code simply store the
- * underrun statistics in crtc A. Since we never expose this anywhere
- * nor use it outside of the fifo underrun code here using the "wrong"
- * crtc on LPT won't cause issues.
- */
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
- old = !intel_crtc->pch_fifo_underrun_disabled;
- intel_crtc->pch_fifo_underrun_disabled = !enable;
-
- if (HAS_PCH_IBX(dev))
- ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
- else
- cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- return old;
-}
-
static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -589,6 +345,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -615,6 +372,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -694,19 +452,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
/**
@@ -1094,18 +851,17 @@ static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, dig_port_work);
- unsigned long irqflags;
u32 long_port_mask, short_port_mask;
struct intel_digital_port *intel_dig_port;
int i, ret;
u32 old_bits = 0;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
long_port_mask = dev_priv->long_hpd_port_mask;
dev_priv->long_hpd_port_mask = 0;
short_port_mask = dev_priv->short_hpd_port_mask;
dev_priv->short_hpd_port_mask = 0;
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
for (i = 0; i < I915_MAX_PORTS; i++) {
bool valid = false;
@@ -1130,9 +886,9 @@ static void i915_digport_work_func(struct work_struct *work)
}
if (old_bits) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hpd_event_bits |= old_bits;
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
schedule_work(&dev_priv->hotplug_work);
}
}
@@ -1151,7 +907,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
- unsigned long irqflags;
bool hpd_disabled = false;
bool changed = false;
u32 hpd_event_bits;
@@ -1159,7 +914,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
hpd_event_bits = dev_priv->hpd_event_bits;
dev_priv->hpd_event_bits = 0;
@@ -1193,7 +948,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
@@ -1260,11 +1015,7 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_notify_mmio_flip(ring);
-
wake_up_all(&ring->irq_queue);
- i915_queue_hangcheck(dev);
}
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
@@ -1400,14 +1151,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
int new_delay, adj;
spin_lock_irq(&dev_priv->irq_lock);
+ /* Speed up work cancelation during disabling rps interrupts. */
+ if (!dev_priv->rps.interrupts_enabled) {
+ spin_unlock_irq(&dev_priv->irq_lock);
+ return;
+ }
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- if (INTEL_INFO(dev_priv->dev)->gen >= 8)
- gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- else {
- /* Make sure not to corrupt PMIMR state used by ringbuffer */
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- }
+ /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
@@ -1488,7 +1240,6 @@ static void ivybridge_parity_work(struct work_struct *work)
u32 error_status, row, bank, subbank;
char *parity_event[6];
uint32_t misccpctl;
- unsigned long flags;
uint8_t slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
@@ -1547,9 +1298,9 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irq(&dev_priv->irq_lock);
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irq(&dev_priv->irq_lock);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -1601,28 +1352,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
- i915_handle_error(dev, false, "GT error interrupt 0x%08x",
- gt_iir);
- }
+ GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(dev))
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
-static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- if ((pm_iir & dev_priv->pm_rps_events) == 0)
- return;
-
- spin_lock(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
- spin_unlock(&dev_priv->irq_lock);
-
- queue_work(dev_priv->wq, &dev_priv->rps.work);
-}
-
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
@@ -1684,7 +1420,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
- gen8_rps_irq_handler(dev_priv, tmp);
+ gen6_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1898,7 +1634,7 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
if (!pipe_crc->entries) {
spin_unlock(&pipe_crc->lock);
- DRM_ERROR("spurious interrupt\n");
+ DRM_DEBUG_KMS("spurious interrupt\n");
return;
}
@@ -1984,24 +1720,30 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ /* TODO: RPS on GEN9+ is not supported yet. */
+ if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
+ "GEN9+: unexpected RPS IRQ\n"))
+ return;
+
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ if (dev_priv->rps.interrupts_enabled) {
+ dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+ }
spin_unlock(&dev_priv->irq_lock);
-
- queue_work(dev_priv->wq, &dev_priv->rps.work);
}
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ return;
+
if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
- i915_handle_error(dev_priv->dev, false,
- "VEBOX CS error interrupt 0x%08x",
- pm_iir);
- }
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
}
@@ -2031,9 +1773,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
* we need to be careful that we only handle what we want to
* handle.
*/
- mask = 0;
- if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
- mask |= PIPE_FIFO_UNDERRUN_STATUS;
+
+ /* fifo underruns are filterered in the underrun handler. */
+ mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
case PIPE_A:
@@ -2078,9 +1820,8 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
@@ -2247,14 +1988,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
- false))
- DRM_ERROR("PCH transcoder A FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
- false))
- DRM_ERROR("PCH transcoder B FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
}
static void ivb_err_int_handler(struct drm_device *dev)
@@ -2267,12 +2004,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
DRM_ERROR("Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
- if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
if (IS_IVYBRIDGE(dev))
@@ -2294,19 +2027,13 @@ static void cpt_serr_int_handler(struct drm_device *dev)
DRM_ERROR("PCH poison interrupt\n");
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
- false))
- DRM_ERROR("PCH transcoder A FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
- false))
- DRM_ERROR("PCH transcoder B FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
- if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
- false))
- DRM_ERROR("PCH transcoder C FIFO underrun\n");
+ intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
I915_WRITE(SERR_INT, serr_int);
}
@@ -2372,9 +2099,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
intel_check_page_flip(dev, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -2524,6 +2249,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
uint32_t tmp = 0;
enum pipe pipe;
+ u32 aux_mask = GEN8_AUX_CHANNEL_A;
+
+ if (IS_GEN9(dev))
+ aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
@@ -2556,7 +2286,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
- if (tmp & GEN8_AUX_CHANNEL_A)
+
+ if (tmp & aux_mask)
dp_aux_irq_handler(dev);
else
DRM_ERROR("Unexpected DE Port interrupt\n");
@@ -2566,7 +2297,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
for_each_pipe(dev_priv, pipe) {
- uint32_t pipe_iir;
+ uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
@@ -2575,11 +2306,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir) {
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+
if (pipe_iir & GEN8_PIPE_VBLANK &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
- if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+ if (IS_GEN9(dev))
+ flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+
+ if (flip_done) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
}
@@ -2587,18 +2324,20 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
+
- if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ if (IS_GEN9(dev))
+ fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ else
+ fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+ if (fault_errors)
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
pipe_name(pipe),
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- }
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
@@ -2697,6 +2436,9 @@ static void i915_error_work_func(struct work_struct *work)
* simulated reset via debugs, so get an RPM reference.
*/
intel_runtime_pm_get(dev_priv);
+
+ intel_prepare_reset(dev);
+
/*
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
@@ -2705,7 +2447,7 @@ static void i915_error_work_func(struct work_struct *work)
*/
ret = i915_reset(dev);
- intel_display_handle_reset(dev);
+ intel_finish_reset(dev);
intel_runtime_pm_put(dev_priv);
@@ -3330,10 +3072,15 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
+
if (!i915.enable_hangcheck)
return;
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ /* Don't continually defer the hangcheck, but make sure it is active */
+ if (timer_pending(timer))
+ return;
+ mod_timer(timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@@ -3396,10 +3143,22 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ GEN5_IRQ_RESET(VLV_);
+}
+
static void valleyview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
@@ -3407,22 +3166,11 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
- /* and GT */
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIIR, I915_READ(GTIIR));
-
gen5_gt_irq_reset(dev);
- I915_WRITE(DPINVGTT, 0xff);
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- POSTING_READ(VLV_IER);
+ vlv_display_irq_reset(dev_priv);
}
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3444,8 +3192,8 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
for_each_pipe(dev_priv, pipe)
- if (intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(pipe)))
+ if (intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
GEN5_IRQ_RESET(GEN8_DE_PORT_);
@@ -3457,21 +3205,19 @@ static void gen8_irq_reset(struct drm_device *dev)
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
{
- unsigned long irqflags;
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -3480,20 +3226,9 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
- POSTING_READ(GEN8_PCU_IIR);
-
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
-
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IIR);
+ vlv_display_irq_reset(dev_priv);
}
static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -3584,7 +3319,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
static int ironlake_irq_postinstall(struct drm_device *dev)
{
- unsigned long irqflags;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 display_mask, extra_mask;
@@ -3623,9 +3357,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
* spinlocking not required here for correctness since interrupt
* setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
return 0;
@@ -3635,45 +3369,51 @@ static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
+ enum pipe pipe;
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
- I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
- I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
- PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(dev_priv, pipe)
+ i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ if (IS_CHERRYVIEW(dev_priv))
+ iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask &= ~iir_mask;
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- POSTING_READ(VLV_IER);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ POSTING_READ(VLV_IMR);
}
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
+ enum pipe pipe;
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ if (IS_CHERRYVIEW(dev_priv))
+ iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask |= iir_mask;
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
POSTING_READ(VLV_IIR);
@@ -3681,14 +3421,15 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
- PIPE_GMBUS_INTERRUPT_STATUS);
- i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
+ i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(dev_priv, pipe)
+ i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
- I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
- I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
+
+ for_each_pipe(dev_priv, pipe)
+ I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
}
@@ -3701,7 +3442,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = true;
- if (dev_priv->dev->irq_enabled)
+ if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_install(dev_priv);
}
@@ -3714,34 +3455,36 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
- if (dev_priv->dev->irq_enabled)
+ if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_uninstall(dev_priv);
}
-static int valleyview_irq_postinstall(struct drm_device *dev)
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
-
dev_priv->irq_mask = ~0;
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IER);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ POSTING_READ(VLV_IMR);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_install(dev_priv);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IIR, 0xffffffff);
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ vlv_display_irq_postinstall(dev_priv);
gen5_gt_irq_postinstall(dev);
@@ -3783,24 +3526,35 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
- uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
- GEN8_PIPE_CDCLK_CRC_DONE |
- GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
- GEN8_PIPE_FIFO_UNDERRUN;
+ uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+ uint32_t de_pipe_enables;
int pipe;
+ u32 aux_en = GEN8_AUX_CHANNEL_A;
+
+ if (IS_GEN9(dev_priv)) {
+ de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
+ GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+ } else
+ de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
+ GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+ de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+ GEN8_PIPE_FIFO_UNDERRUN;
+
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(dev_priv, pipe)
- if (intel_display_power_enabled(dev_priv,
+ if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
- GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
+ GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3823,33 +3577,8 @@ static int gen8_irq_postinstall(struct drm_device *dev)
static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
- u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
- unsigned long irqflags;
- int pipe;
- /*
- * Leave vblank interrupts masked initially. enable/disable will
- * toggle them based on usage.
- */
- dev_priv->irq_mask = ~enable_mask;
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, enable_mask);
+ vlv_display_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(dev_priv);
@@ -3869,41 +3598,39 @@ static void gen8_irq_uninstall(struct drm_device *dev)
gen8_irq_reset(dev);
}
+static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
+{
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ valleyview_display_irqs_uninstall(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_display_irq_reset(dev_priv);
+
+ dev_priv->irq_mask = 0;
+}
+
static void valleyview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
- int pipe;
if (!dev_priv)
return;
I915_WRITE(VLV_MASTER_IER, 0);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
+ gen5_gt_irq_reset(dev);
I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (dev_priv->display_irqs_enabled)
- valleyview_display_irqs_uninstall(dev_priv);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- dev_priv->irq_mask = 0;
-
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- POSTING_READ(VLV_IER);
+ vlv_display_irq_uninstall(dev_priv);
}
static void cherryview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
if (!dev_priv)
return;
@@ -3911,44 +3638,11 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
-#define GEN8_IRQ_FINI_NDX(type, which) \
-do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
-} while (0)
-
-#define GEN8_IRQ_FINI(type) \
-do { \
- I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
- I915_WRITE(GEN8_##type##_IER, 0); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR); \
- I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
-} while (0)
-
- GEN8_IRQ_FINI_NDX(GT, 0);
- GEN8_IRQ_FINI_NDX(GT, 1);
- GEN8_IRQ_FINI_NDX(GT, 2);
- GEN8_IRQ_FINI_NDX(GT, 3);
-
- GEN8_IRQ_FINI(PCU);
-
-#undef GEN8_IRQ_FINI
-#undef GEN8_IRQ_FINI_NDX
-
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ gen8_gt_irq_reset(dev_priv);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
+ GEN5_IRQ_RESET(GEN8_PCU_);
- I915_WRITE(VLV_IMR, 0xffffffff);
- I915_WRITE(VLV_IER, 0x0);
- I915_WRITE(VLV_IIR, 0xffffffff);
- POSTING_READ(VLV_IIR);
+ vlv_display_irq_uninstall(dev_priv);
}
static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -3976,7 +3670,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3999,10 +3692,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
return 0;
}
@@ -4047,7 +3740,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
- unsigned long irqflags;
int pipe;
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4063,11 +3755,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4079,13 +3769,11 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & 0x8000ffff)
I915_WRITE(reg, pipe_stats[pipe]);
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
I915_WRITE16(IIR, iir & ~flip_mask);
new_iir = I915_READ16(IIR); /* Flush posted writes */
- i915_update_dri1_breadcrumb(dev);
-
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
@@ -4101,9 +3789,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
}
iir = new_iir;
@@ -4149,7 +3837,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
- unsigned long irqflags;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4187,10 +3874,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
return 0;
}
@@ -4234,7 +3921,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
- unsigned long irqflags;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
@@ -4250,11 +3936,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4266,7 +3950,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
irq_received = true;
}
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
if (!irq_received)
break;
@@ -4297,9 +3981,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv,
+ pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4324,8 +4008,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
iir = new_iir;
} while (iir & ~flip_mask);
- i915_update_dri1_breadcrumb(dev);
-
return ret;
}
@@ -4372,7 +4054,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
u32 error_mask;
- unsigned long irqflags;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -4393,11 +4074,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
/*
* Enable some error detection, note the instruction error mask
@@ -4462,7 +4143,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
- unsigned long irqflags;
int ret = IRQ_NONE, pipe;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4479,11 +4159,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false,
- "Command parser error, iir 0x%08x",
- iir);
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
@@ -4497,7 +4175,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
irq_received = true;
}
}
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock(&dev_priv->irq_lock);
if (!irq_received)
break;
@@ -4527,9 +4205,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4556,8 +4233,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
iir = new_iir;
}
- i915_update_dri1_breadcrumb(dev);
-
return ret;
}
@@ -4584,19 +4259,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR));
}
-static void intel_hpd_irq_reenable(struct work_struct *work)
+static void intel_hpd_irq_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- unsigned long irqflags;
int i;
intel_runtime_pm_get(dev_priv);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector;
@@ -4620,14 +4294,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work)
}
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv);
}
-void intel_irq_init(struct drm_device *dev)
+/**
+ * intel_irq_init - initializes irq support
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes all the irq support including work items, timers
+ * and all the vtables. It does not setup the interrupt itself though.
+ */
+void intel_irq_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
@@ -4636,7 +4317,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */
- if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
@@ -4646,17 +4327,14 @@ void intel_irq_init(struct drm_device *dev)
i915_hangcheck_elapsed,
(unsigned long) dev);
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
- intel_hpd_irq_reenable);
+ intel_hpd_irq_reenable_work);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
- /* Haven't installed the IRQ handler yet */
- dev_priv->pm._irqs_disabled = true;
-
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
- } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
} else {
@@ -4669,7 +4347,7 @@ void intel_irq_init(struct drm_device *dev)
* Gen2 doesn't have a hardware frame counter and so depends on
* vblank interrupts to produce sane vblank seuquence numbers.
*/
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -4677,7 +4355,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
@@ -4685,7 +4363,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4693,7 +4371,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_GEN8(dev)) {
+ } else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
@@ -4710,12 +4388,12 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else {
- if (INTEL_INFO(dev)->gen == 2) {
+ if (INTEL_INFO(dev_priv)->gen == 2) {
dev->driver->irq_preinstall = i8xx_irq_preinstall;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
- } else if (INTEL_INFO(dev)->gen == 3) {
+ } else if (INTEL_INFO(dev_priv)->gen == 3) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4733,12 +4411,23 @@ void intel_irq_init(struct drm_device *dev)
}
}
-void intel_hpd_init(struct drm_device *dev)
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
- unsigned long irqflags;
int i;
for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -4756,27 +4445,72 @@ void intel_hpd_init(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
-/* Disable interrupts so we can allow runtime PM. */
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_install - enables the hardware interrupt
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hardware interrupt handling, but leaves the hotplug
+ * handling still disabled. It is called after intel_irq_init().
+ *
+ * In the driver load and resume code we need working interrupts in a few places
+ * but don't want to deal with the hassle of concurrent probe and hotplug
+ * workers. Hence the split into this two-stage approach.
+ */
+int intel_irq_install(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /*
+ * We enable some interrupt sources in our postinstall hooks, so mark
+ * interrupts as enabled _before_ actually enabling them to avoid
+ * special cases in our ordering checks.
+ */
+ dev_priv->pm.irqs_enabled = true;
- dev->driver->irq_uninstall(dev);
- dev_priv->pm._irqs_disabled = true;
+ return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
}
-/* Restore interrupts so we can recover from runtime PM. */
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_uninstall - finilizes all irq handling
+ * @dev_priv: i915 device instance
+ *
+ * This stops interrupt and hotplug handling and unregisters and frees all
+ * resources acquired in the init functions.
+ */
+void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_irq_uninstall(dev_priv->dev);
+ intel_hpd_cancel_work(dev_priv);
+ dev_priv->pm.irqs_enabled = false;
+}
- dev_priv->pm._irqs_disabled = false;
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
+/**
+ * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to disable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+{
+ dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+ dev_priv->pm.irqs_enabled = false;
+}
+
+/**
+ * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to enable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
+{
+ dev_priv->pm.irqs_enabled = true;
+ dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
+ dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c01e5f31430e..eefdc238f70b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,14 +26,25 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PLANE(plane, a, b) _PIPE(plane, a, b)
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
-
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
-#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
-#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+#define _MASKED_FIELD(mask, value) ({ \
+ if (__builtin_constant_p(mask)) \
+ BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
+ if (__builtin_constant_p(value)) \
+ BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
+ if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
+ BUILD_BUG_ON_MSG((value) & ~(mask), \
+ "Incorrect value for mask"); \
+ (mask) << 16 | (value); })
+#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
+#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
+
+
/* PCI config space */
@@ -74,15 +85,17 @@
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
+#define GCDGMBUS 0xcc
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
/* Graphics reset regs */
-#define I965_GDRST 0xc0 /* PCI config register */
+#define I915_GDRST 0xc0 /* PCI config register */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_MASK (3<<2)
+#define GRDOM_RESET_STATUS (1<<1)
#define GRDOM_RESET_ENABLE (1<<0)
#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
@@ -248,6 +261,16 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+/* SKL ones */
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -314,6 +337,8 @@
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
+#define MI_PREDICATE_SRC0 (0x2400)
+#define MI_PREDICATE_SRC1 (0x2408)
#define MI_PREDICATE_RESULT_2 (0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
@@ -564,6 +589,7 @@ enum punit_power_well {
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
+#define GPLLENABLE (1<<4)
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -672,7 +698,7 @@ enum punit_power_well {
* need to be accessed during AUX communication,
*
* Generally the common lane corresponds to the pipe and
- * the spline (PCS/TX) correponds to the port.
+ * the spline (PCS/TX) corresponds to the port.
*
* For dual channel PHY (VLV/CHV):
*
@@ -796,6 +822,8 @@ enum punit_power_well {
#define _VLV_PCS_DW0_CH1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
#define _VLV_PCS01_DW0_CH0 0x200
@@ -836,12 +864,31 @@ enum punit_power_well {
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
+#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
+#define DPIO_PCS_TX2MARGIN_000 (0<<13)
+#define DPIO_PCS_TX2MARGIN_101 (1<<13)
+#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
+#define DPIO_PCS_TX1MARGIN_000 (0<<10)
+#define DPIO_PCS_TX1MARGIN_101 (1<<10)
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+#define _VLV_PCS01_DW9_CH0 0x224
+#define _VLV_PCS23_DW9_CH0 0x424
+#define _VLV_PCS01_DW9_CH1 0x2624
+#define _VLV_PCS23_DW9_CH1 0x2824
+#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
+#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
+
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
+#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
+#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
+#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
+#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
+#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
+#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
#define _VLV_PCS01_DW10_CH0 0x0228
@@ -853,8 +900,18 @@ enum punit_power_well {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
+#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+#define _VLV_PCS01_DW11_CH0 0x022c
+#define _VLV_PCS23_DW11_CH0 0x042c
+#define _VLV_PCS01_DW11_CH1 0x262c
+#define _VLV_PCS23_DW11_CH1 0x282c
+#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
+#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
+
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
@@ -1237,7 +1294,7 @@ enum punit_power_well {
#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
-#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
+#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
@@ -1999,6 +2056,8 @@ enum punit_power_well {
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+#define DCC2 0x10204
+#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
@@ -2282,7 +2341,6 @@ enum punit_power_well {
#define GEN6_GT_THREAD_STATUS_REG 0x13805c
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
-#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
@@ -2506,9 +2564,7 @@ enum punit_power_well {
#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
-#define EDP_PSR_DPCD_COMMAND 0x80060000
#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
-#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
@@ -3645,6 +3701,7 @@ enum punit_power_well {
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
/*
* Computing GMCH M and N values for the Display Port link
@@ -4024,17 +4081,18 @@ enum punit_power_well {
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_16 16
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_PRECISION_64 (1<<31)
-#define DDL_CURSOR_PRECISION_32 (0<<31)
+#define DDL_CURSOR_PRECISION_HIGH (1<<31)
+#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite)))
-#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
+#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
-#define DDL_PLANE_PRECISION_64 (1<<7)
-#define DDL_PLANE_PRECISION_32 (0<<7)
+#define DDL_PLANE_PRECISION_HIGH (1<<7)
+#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
#define DRAIN_LATENCY_MASK 0x7f
@@ -4071,6 +4129,41 @@ enum punit_power_well {
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
+/* Watermark register definitions for SKL */
+#define CUR_WM_A_0 0x70140
+#define CUR_WM_B_0 0x71140
+#define PLANE_WM_1_A_0 0x70240
+#define PLANE_WM_1_B_0 0x71240
+#define PLANE_WM_2_A_0 0x70340
+#define PLANE_WM_2_B_0 0x71340
+#define PLANE_WM_TRANS_1_A_0 0x70268
+#define PLANE_WM_TRANS_1_B_0 0x71268
+#define PLANE_WM_TRANS_2_A_0 0x70368
+#define PLANE_WM_TRANS_2_B_0 0x71368
+#define CUR_WM_TRANS_A_0 0x70168
+#define CUR_WM_TRANS_B_0 0x71168
+#define PLANE_WM_EN (1 << 31)
+#define PLANE_WM_LINES_SHIFT 14
+#define PLANE_WM_LINES_MASK 0x1f
+#define PLANE_WM_BLOCKS_MASK 0x3ff
+
+#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0)
+#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level)))
+#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0)
+
+#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0)
+#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0)
+#define _PLANE_WM_BASE(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
+#define PLANE_WM(pipe, plane, level) \
+ (_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
+#define _PLANE_WM_TRANS_1(pipe) \
+ _PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0)
+#define _PLANE_WM_TRANS_2(pipe) \
+ _PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0)
+#define PLANE_WM_TRANS(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))
+
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
#define WM0_PIPE_PLANE_MASK (0xffff<<16)
@@ -4177,6 +4270,7 @@ enum punit_power_well {
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURSOR_ROTATE_180 (1<<15)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
@@ -4240,9 +4334,11 @@ enum punit_power_well {
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_ROTATE_180 (1<<15)
+#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
+#define DISPPLANE_ROTATE_180 (1<<15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
+#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
@@ -4263,6 +4359,24 @@ enum punit_power_well {
#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
+/* CHV pipe B blender and primary plane */
+#define _CHV_BLEND_A 0x60a00
+#define CHV_BLEND_LEGACY (0<<30)
+#define CHV_BLEND_ANDROID (1<<30)
+#define CHV_BLEND_MPO (2<<30)
+#define CHV_BLEND_MASK (3<<30)
+#define _CHV_CANVAS_A 0x60a04
+#define _PRIMPOS_A 0x60a08
+#define _PRIMSIZE_A 0x60a0c
+#define _PRIMCNSTALPHA_A 0x60a10
+#define PRIM_CONST_ALPHA_ENABLE (1<<31)
+
+#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A)
+#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A)
+#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A)
+#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A)
+#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A)
+
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
@@ -4464,6 +4578,7 @@ enum punit_power_well {
#define SP_FORMAT_RGBA1010102 (9<<26)
#define SP_FORMAT_RGBX8888 (0xe<<26)
#define SP_FORMAT_RGBA8888 (0xf<<26)
+#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
#define SP_SOURCE_KEY (1<<22)
#define SP_YUV_BYTE_ORDER_MASK (3<<16)
#define SP_YUV_ORDER_YUYV (0<<16)
@@ -4472,6 +4587,7 @@ enum punit_power_well {
#define SP_YUV_ORDER_VYUY (3<<16)
#define SP_ROTATE_180 (1<<15)
#define SP_TILED (1<<10)
+#define SP_MIRROR (1<<8) /* CHV pipe B */
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
@@ -4482,6 +4598,7 @@ enum punit_power_well {
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
+#define SP_CONST_ALPHA_ENABLE (1<<31)
#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
@@ -4510,6 +4627,195 @@ enum punit_power_well {
#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
+/*
+ * CHV pipe B sprite CSC
+ *
+ * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
+ * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
+ * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
+ */
+#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
+#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
+#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
+#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
+#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
+#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
+#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
+#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
+#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
+#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
+#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
+
+#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
+#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
+#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
+#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
+#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
+
+#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
+#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
+#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
+#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
+#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
+
+/* Skylake plane registers */
+
+#define _PLANE_CTL_1_A 0x70180
+#define _PLANE_CTL_2_A 0x70280
+#define _PLANE_CTL_3_A 0x70380
+#define PLANE_CTL_ENABLE (1 << 31)
+#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
+#define PLANE_CTL_FORMAT_MASK (0xf << 24)
+#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
+#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
+#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
+#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
+#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
+#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
+#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
+#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
+#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
+#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
+#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
+#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
+#define PLANE_CTL_ORDER_BGRX (0 << 20)
+#define PLANE_CTL_ORDER_RGBX (1 << 20)
+#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
+#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
+#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
+#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
+#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
+#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
+#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
+#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
+#define PLANE_CTL_TILED_MASK (0x7 << 10)
+#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
+#define PLANE_CTL_TILED_X ( 1 << 10)
+#define PLANE_CTL_TILED_Y ( 4 << 10)
+#define PLANE_CTL_TILED_YF ( 5 << 10)
+#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
+#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
+#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
+#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
+#define PLANE_CTL_ROTATE_MASK 0x3
+#define PLANE_CTL_ROTATE_0 0x0
+#define PLANE_CTL_ROTATE_180 0x2
+#define _PLANE_STRIDE_1_A 0x70188
+#define _PLANE_STRIDE_2_A 0x70288
+#define _PLANE_STRIDE_3_A 0x70388
+#define _PLANE_POS_1_A 0x7018c
+#define _PLANE_POS_2_A 0x7028c
+#define _PLANE_POS_3_A 0x7038c
+#define _PLANE_SIZE_1_A 0x70190
+#define _PLANE_SIZE_2_A 0x70290
+#define _PLANE_SIZE_3_A 0x70390
+#define _PLANE_SURF_1_A 0x7019c
+#define _PLANE_SURF_2_A 0x7029c
+#define _PLANE_SURF_3_A 0x7039c
+#define _PLANE_OFFSET_1_A 0x701a4
+#define _PLANE_OFFSET_2_A 0x702a4
+#define _PLANE_OFFSET_3_A 0x703a4
+#define _PLANE_KEYVAL_1_A 0x70194
+#define _PLANE_KEYVAL_2_A 0x70294
+#define _PLANE_KEYMSK_1_A 0x70198
+#define _PLANE_KEYMSK_2_A 0x70298
+#define _PLANE_KEYMAX_1_A 0x701a0
+#define _PLANE_KEYMAX_2_A 0x702a0
+#define _PLANE_BUF_CFG_1_A 0x7027c
+#define _PLANE_BUF_CFG_2_A 0x7037c
+
+#define _PLANE_CTL_1_B 0x71180
+#define _PLANE_CTL_2_B 0x71280
+#define _PLANE_CTL_3_B 0x71380
+#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
+#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
+#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
+#define PLANE_CTL(pipe, plane) \
+ _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
+
+#define _PLANE_STRIDE_1_B 0x71188
+#define _PLANE_STRIDE_2_B 0x71288
+#define _PLANE_STRIDE_3_B 0x71388
+#define _PLANE_STRIDE_1(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
+#define _PLANE_STRIDE_2(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
+#define _PLANE_STRIDE_3(pipe) \
+ _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
+#define PLANE_STRIDE(pipe, plane) \
+ _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
+
+#define _PLANE_POS_1_B 0x7118c
+#define _PLANE_POS_2_B 0x7128c
+#define _PLANE_POS_3_B 0x7138c
+#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
+#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
+#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
+#define PLANE_POS(pipe, plane) \
+ _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
+
+#define _PLANE_SIZE_1_B 0x71190
+#define _PLANE_SIZE_2_B 0x71290
+#define _PLANE_SIZE_3_B 0x71390
+#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
+#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
+#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
+#define PLANE_SIZE(pipe, plane) \
+ _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
+
+#define _PLANE_SURF_1_B 0x7119c
+#define _PLANE_SURF_2_B 0x7129c
+#define _PLANE_SURF_3_B 0x7139c
+#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
+#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
+#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
+#define PLANE_SURF(pipe, plane) \
+ _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+
+#define _PLANE_OFFSET_1_B 0x711a4
+#define _PLANE_OFFSET_2_B 0x712a4
+#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
+#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
+#define PLANE_OFFSET(pipe, plane) \
+ _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
+
+#define _PLANE_KEYVAL_1_B 0x71194
+#define _PLANE_KEYVAL_2_B 0x71294
+#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
+#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
+#define PLANE_KEYVAL(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
+
+#define _PLANE_KEYMSK_1_B 0x71198
+#define _PLANE_KEYMSK_2_B 0x71298
+#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
+#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
+#define PLANE_KEYMSK(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
+
+#define _PLANE_KEYMAX_1_B 0x711a0
+#define _PLANE_KEYMAX_2_B 0x712a0
+#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
+#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
+#define PLANE_KEYMAX(pipe, plane) \
+ _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
+
+#define _PLANE_BUF_CFG_1_B 0x7127c
+#define _PLANE_BUF_CFG_2_B 0x7137c
+#define _PLANE_BUF_CFG_1(pipe) \
+ _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
+#define _PLANE_BUF_CFG_2(pipe) \
+ _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+#define PLANE_BUF_CFG(pipe, plane) \
+ _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
+
+/* SKL new cursor registers */
+#define _CUR_BUF_CFG_A 0x7017c
+#define _CUR_BUF_CFG_B 0x7117c
+#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
+
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
@@ -4625,6 +4931,18 @@ enum punit_power_well {
#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+#define _PSA_CTL 0x68180
+#define _PSB_CTL 0x68980
+#define PS_ENABLE (1<<31)
+#define _PSA_WIN_SZ 0x68174
+#define _PSB_WIN_SZ 0x68974
+#define _PSA_WIN_POS 0x68170
+#define _PSB_WIN_POS 0x68970
+
+#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL)
+#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
+#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
+
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
@@ -4746,16 +5064,32 @@ enum punit_power_well {
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
+#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
+#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
+#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
+#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
+#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
+#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
+#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
+#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN8_PIPE_CURSOR_FAULT | \
GEN8_PIPE_SPRITE_FAULT | \
GEN8_PIPE_PRIMARY_FAULT)
+#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_PIPE_CURSOR_FAULT | \
+ GEN9_PIPE_PLANE3_FAULT | \
+ GEN9_PIPE_PLANE2_FAULT | \
+ GEN9_PIPE_PLANE1_FAULT)
#define GEN8_DE_PORT_ISR 0x44440
#define GEN8_DE_PORT_IMR 0x44444
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
+#define GEN9_AUX_CHANNEL_D (1 << 27)
+#define GEN9_AUX_CHANNEL_C (1 << 26)
+#define GEN9_AUX_CHANNEL_B (1 << 25)
#define GEN8_AUX_CHANNEL_A (1 << 0)
#define GEN8_DE_MISC_ISR 0x44460
@@ -4839,6 +5173,8 @@ enum punit_power_well {
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
#define HDC_FORCE_NON_COHERENT (1<<4)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
+#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5540,6 +5876,12 @@ enum punit_power_well {
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MEDIA_GEN9 0xa270
+#define FORCEWAKE_RENDER_GEN9 0xa278
+#define FORCEWAKE_BLITTER_GEN9 0xa188
+#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88
+#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84
+#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
@@ -5711,9 +6053,17 @@ enum punit_power_well {
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define DISPLAY_IPS_CONTROL 0x19
+#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
+#define GEN6_PCODE_DATA1 0x13812C
+
+#define GEN9_PCODE_READ_MEM_LATENCY 0x6
+#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
+#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
+#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
+#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
@@ -5751,6 +6101,9 @@ enum punit_power_well {
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define GEN9_HALF_SLICE_CHICKEN5 0xe188
+#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
+
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
@@ -5766,57 +6119,58 @@ enum punit_power_well {
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
-#define INTEL_AUDIO_DEVCL 0x808629FB
-#define INTEL_AUDIO_DEVBLC 0x80862801
-#define INTEL_AUDIO_DEVCTG 0x80862802
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
#define G4X_AUD_CNTL_ST 0x620B4
-#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
-#define G4X_ELDV_DEVCTG (1 << 14)
-#define G4X_ELD_ADDR (0xf << 5)
-#define G4X_ELD_ACK (1 << 4)
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR_MASK (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
-#define IBX_HDMIW_HDMIEDID_A 0xE2050
-#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define _IBX_HDMIW_HDMIEDID_A 0xE2050
+#define _IBX_HDMIW_HDMIEDID_B 0xE2150
#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- IBX_HDMIW_HDMIEDID_A, \
- IBX_HDMIW_HDMIEDID_B)
-#define IBX_AUD_CNTL_ST_A 0xE20B4
-#define IBX_AUD_CNTL_ST_B 0xE21B4
+ _IBX_HDMIW_HDMIEDID_A, \
+ _IBX_HDMIW_HDMIEDID_B)
+#define _IBX_AUD_CNTL_ST_A 0xE20B4
+#define _IBX_AUD_CNTL_ST_B 0xE21B4
#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- IBX_AUD_CNTL_ST_A, \
- IBX_AUD_CNTL_ST_B)
-#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
-#define IBX_ELD_ADDRESS (0x1f << 5)
-#define IBX_ELD_ACK (1 << 4)
+ _IBX_AUD_CNTL_ST_A, \
+ _IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
+#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
#define IBX_AUD_CNTL_ST2 0xE20C0
-#define IBX_ELD_VALIDB (1 << 0)
-#define IBX_CP_READYB (1 << 1)
+#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
+#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
-#define CPT_HDMIW_HDMIEDID_A 0xE5050
-#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define _CPT_HDMIW_HDMIEDID_A 0xE5050
+#define _CPT_HDMIW_HDMIEDID_B 0xE5150
#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- CPT_HDMIW_HDMIEDID_A, \
- CPT_HDMIW_HDMIEDID_B)
-#define CPT_AUD_CNTL_ST_A 0xE50B4
-#define CPT_AUD_CNTL_ST_B 0xE51B4
+ _CPT_HDMIW_HDMIEDID_A, \
+ _CPT_HDMIW_HDMIEDID_B)
+#define _CPT_AUD_CNTL_ST_A 0xE50B4
+#define _CPT_AUD_CNTL_ST_B 0xE51B4
#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- CPT_AUD_CNTL_ST_A, \
- CPT_AUD_CNTL_ST_B)
+ _CPT_AUD_CNTL_ST_A, \
+ _CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
-#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
-#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
+#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
+#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
- VLV_HDMIW_HDMIEDID_A, \
- VLV_HDMIW_HDMIEDID_B)
-#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
-#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
+ _VLV_HDMIW_HDMIEDID_A, \
+ _VLV_HDMIW_HDMIEDID_B)
+#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
+#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
- VLV_AUD_CNTL_ST_A, \
- VLV_AUD_CNTL_ST_B)
+ _VLV_AUD_CNTL_ST_A, \
+ _VLV_AUD_CNTL_ST_B)
#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
/* These are the 4 32-bit write offset registers for each stream
@@ -5825,28 +6179,28 @@ enum punit_power_well {
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
-#define IBX_AUD_CONFIG_A 0xe2000
-#define IBX_AUD_CONFIG_B 0xe2100
+#define _IBX_AUD_CONFIG_A 0xe2000
+#define _IBX_AUD_CONFIG_B 0xe2100
#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
- IBX_AUD_CONFIG_A, \
- IBX_AUD_CONFIG_B)
-#define CPT_AUD_CONFIG_A 0xe5000
-#define CPT_AUD_CONFIG_B 0xe5100
+ _IBX_AUD_CONFIG_A, \
+ _IBX_AUD_CONFIG_B)
+#define _CPT_AUD_CONFIG_A 0xe5000
+#define _CPT_AUD_CONFIG_B 0xe5100
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
- CPT_AUD_CONFIG_A, \
- CPT_AUD_CONFIG_B)
-#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
-#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
+ _CPT_AUD_CONFIG_A, \
+ _CPT_AUD_CONFIG_B)
+#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
+#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
- VLV_AUD_CONFIG_A, \
- VLV_AUD_CONFIG_B)
+ _VLV_AUD_CONFIG_A, \
+ _VLV_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
-#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
+#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
-#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
+#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
@@ -5862,52 +6216,44 @@ enum punit_power_well {
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
-#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
-#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
-#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
- HSW_AUD_CONFIG_A, \
- HSW_AUD_CONFIG_B)
-
-#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
-#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
-#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
- HSW_AUD_MISC_CTRL_A, \
- HSW_AUD_MISC_CTRL_B)
-
-#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
-#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
-#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
- HSW_AUD_DIP_ELD_CTRL_ST_A, \
- HSW_AUD_DIP_ELD_CTRL_ST_B)
+#define _HSW_AUD_CONFIG_A 0x65000
+#define _HSW_AUD_CONFIG_B 0x65100
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ _HSW_AUD_CONFIG_A, \
+ _HSW_AUD_CONFIG_B)
+
+#define _HSW_AUD_MISC_CTRL_A 0x65010
+#define _HSW_AUD_MISC_CTRL_B 0x65110
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ _HSW_AUD_MISC_CTRL_A, \
+ _HSW_AUD_MISC_CTRL_B)
+
+#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
+#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ _HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ _HSW_AUD_DIP_ELD_CTRL_ST_B)
/* Audio Digital Converter */
-#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
-#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
-#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
- HSW_AUD_DIG_CNVT_1, \
- HSW_AUD_DIG_CNVT_2)
-#define DIP_PORT_SEL_MASK 0x3
-
-#define HSW_AUD_EDID_DATA_A 0x65050
-#define HSW_AUD_EDID_DATA_B 0x65150
-#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
- HSW_AUD_EDID_DATA_A, \
- HSW_AUD_EDID_DATA_B)
-
-#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
-#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
-#define AUDIO_INACTIVE_C (1<<11)
-#define AUDIO_INACTIVE_B (1<<7)
-#define AUDIO_INACTIVE_A (1<<3)
-#define AUDIO_OUTPUT_ENABLE_A (1<<2)
-#define AUDIO_OUTPUT_ENABLE_B (1<<6)
-#define AUDIO_OUTPUT_ENABLE_C (1<<10)
-#define AUDIO_ELD_VALID_A (1<<0)
-#define AUDIO_ELD_VALID_B (1<<4)
-#define AUDIO_ELD_VALID_C (1<<8)
-#define AUDIO_CP_READY_A (1<<1)
-#define AUDIO_CP_READY_B (1<<5)
-#define AUDIO_CP_READY_C (1<<9)
+#define _HSW_AUD_DIG_CNVT_1 0x65080
+#define _HSW_AUD_DIG_CNVT_2 0x65180
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ _HSW_AUD_DIG_CNVT_1, \
+ _HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define _HSW_AUD_EDID_DATA_A 0x65050
+#define _HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ _HSW_AUD_EDID_DATA_A, \
+ _HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0
+#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
+#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
+#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
+#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
/* HSW Power Wells */
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
@@ -6125,6 +6471,83 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+/*
+ * SKL Clocks
+ */
+
+/* CDCLK_CTL */
+#define CDCLK_CTL 0x46000
+#define CDCLK_FREQ_SEL_MASK (3<<26)
+#define CDCLK_FREQ_450_432 (0<<26)
+#define CDCLK_FREQ_540 (1<<26)
+#define CDCLK_FREQ_337_308 (2<<26)
+#define CDCLK_FREQ_675_617 (3<<26)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+
+/* LCPLL_CTL */
+#define LCPLL1_CTL 0x46010
+#define LCPLL2_CTL 0x46014
+#define LCPLL_PLL_ENABLE (1<<31)
+
+/* DPLL control1 */
+#define DPLL_CTRL1 0x6C058
+#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
+#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
+#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
+#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
+#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
+#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
+#define DPLL_CRTL1_LINK_RATE_2700 0
+#define DPLL_CRTL1_LINK_RATE_1350 1
+#define DPLL_CRTL1_LINK_RATE_810 2
+#define DPLL_CRTL1_LINK_RATE_1620 3
+#define DPLL_CRTL1_LINK_RATE_1080 4
+#define DPLL_CRTL1_LINK_RATE_2160 5
+
+/* DPLL control2 */
+#define DPLL_CTRL2 0x6C05C
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
+
+/* DPLL Status */
+#define DPLL_STATUS 0x6C060
+#define DPLL_LOCK(id) (1<<((id)*8))
+
+/* DPLL cfg */
+#define DPLL1_CFGCR1 0x6C040
+#define DPLL2_CFGCR1 0x6C048
+#define DPLL3_CFGCR1 0x6C050
+#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
+#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
+
+#define DPLL1_CFGCR2 0x6C044
+#define DPLL2_CFGCR2 0x6C04C
+#define DPLL3_CFGCR2 0x6C054
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
+#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
+#define DPLL_CFGCR2_KDIV_MASK (3<<5)
+#define DPLL_CFGCR2_KDIV(x) (x<<5)
+#define DPLL_CFGCR2_KDIV_5 (0<<5)
+#define DPLL_CFGCR2_KDIV_2 (1<<5)
+#define DPLL_CFGCR2_KDIV_3 (2<<5)
+#define DPLL_CFGCR2_KDIV_1 (3<<5)
+#define DPLL_CFGCR2_PDIV_MASK (7<<2)
+#define DPLL_CFGCR2_PDIV(x) (x<<2)
+#define DPLL_CFGCR2_PDIV_1 (0<<2)
+#define DPLL_CFGCR2_PDIV_2 (1<<2)
+#define DPLL_CFGCR2_PDIV_3 (2<<2)
+#define DPLL_CFGCR2_PDIV_7 (4<<2)
+#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
+
+#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
+#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
+
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 043123c77a1f..26368822a33f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -203,34 +203,19 @@ static void i915_save_display(struct drm_device *dev)
i915_save_display_reg(dev);
/* LVDS state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-
- dev_priv->regfile.saveBLC_HIST_CTL =
- I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
- dev_priv->regfile.saveBLC_HIST_CTL_B =
- I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
- } else {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->regfile.saveLVDS = I915_READ(LVDS);
- }
-
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
+ /* Panel power sequencer */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
- } else {
+ } else if (!IS_VALLEYVIEW(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
@@ -259,29 +244,19 @@ static void i915_restore_display(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
mask = ~LVDS_PORT_EN;
+ /* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
-
+ /* Panel power sequencer */
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
- I915_WRITE(RSTDBYCTL,
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
- } else if (IS_VALLEYVIEW(dev)) {
- I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
- dev_priv->regfile.saveBLC_HIST_CTL);
- I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
- dev_priv->regfile.saveBLC_HIST_CTL);
- } else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ } else if (!IS_VALLEYVIEW(dev)) {
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -328,6 +303,10 @@ int i915_save_state(struct drm_device *dev)
}
}
+ if (IS_GEN4(dev))
+ pci_read_config_word(dev->pdev, GCDGMBUS,
+ &dev_priv->regfile.saveGCDGMBUS);
+
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -356,6 +335,10 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_restore_fences(dev);
+
+ if (IS_GEN4(dev))
+ pci_write_config_word(dev->pdev, GCDGMBUS,
+ dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -368,6 +351,8 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ I915_WRITE(RSTDBYCTL,
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(IER, dev_priv->regfile.saveIER);
I915_WRITE(IMR, dev_priv->regfile.saveIMR);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 503847f18fdd..4a5af695307e 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -139,8 +139,6 @@ static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
&dev_attr_rc6_residency_ms.attr,
- &dev_attr_rc6p_residency_ms.attr,
- &dev_attr_rc6pp_residency_ms.attr,
NULL
};
@@ -148,6 +146,17 @@ static struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
+
+static struct attribute *rc6p_attrs[] = {
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group rc6p_attr_group = {
+ .name = power_group_name,
+ .attrs = rc6p_attrs
+};
#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -595,12 +604,18 @@ void i915_setup_sysfs(struct drm_device *dev)
int ret;
#ifdef CONFIG_PM
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (HAS_RC6(dev)) {
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
+ if (HAS_RC6p(dev)) {
+ ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ &rc6p_attr_group);
+ if (ret)
+ DRM_ERROR("RC6p residency sysfs setup failed\n");
+ }
#endif
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
@@ -640,5 +655,6 @@ void i915_teardown_sysfs(struct drm_device *dev)
device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
+ sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
#endif
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f5aa0067755a..751d4ad14d62 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -587,6 +587,110 @@ TRACE_EVENT(intel_gpu_freq_change,
TP_printk("new_freq=%u", __entry->freq)
);
+/**
+ * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
+ *
+ * With full ppgtt enabled each process using drm will allocate at least one
+ * translation table. With these traces it is possible to keep track of the
+ * allocation and of the lifetime of the tables; this can be used during
+ * testing/debug to verify that we are not leaking ppgtts.
+ * These traces identify the ppgtt through the vm pointer, which is also printed
+ * by the i915_vma_bind and i915_vma_unbind tracepoints.
+ */
+DECLARE_EVENT_CLASS(i915_ppgtt,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->dev = vm->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
+)
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm)
+);
+
+/**
+ * DOC: i915_context_create and i915_context_free tracepoints
+ *
+ * These tracepoints are used to track creation and deletion of contexts.
+ * If full ppgtt is enabled, they also print the address of the vm assigned to
+ * the context.
+ */
+DECLARE_EVENT_CLASS(i915_context,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(struct intel_context *, ctx)
+ __field(struct i915_address_space *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+ __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ctx, __entry->vm)
+)
+
+DEFINE_EVENT(i915_context, i915_context_create,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(i915_context, i915_context_free,
+ TP_PROTO(struct intel_context *ctx),
+ TP_ARGS(ctx)
+);
+
+/**
+ * DOC: switch_mm tracepoint
+ *
+ * This tracepoint allows tracking of the mm switch, which is an important point
+ * in the lifetime of the vm in the legacy submission path. This tracepoint is
+ * called only if full ppgtt is enabled.
+ */
+TRACE_EVENT(switch_mm,
+ TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
+
+ TP_ARGS(ring, to),
+
+ TP_STRUCT__entry(
+ __field(u32, ring)
+ __field(struct intel_context *, to)
+ __field(struct i915_address_space *, vm)
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring->id;
+ __entry->to = to;
+ __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
+ __entry->dev = ring->dev->primary->index;
+ ),
+
+ TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ring, __entry->to, __entry->vm)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 480da593e6c0..d10fe3e9c49f 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,12 @@ void i915_save_display_reg(struct drm_device *dev)
}
/* FIXME: regfile.save TV & SDVO state */
+ /* Panel fitter */
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ }
+
/* Backlight */
if (INTEL_INFO(dev)->gen <= 4)
pci_read_config_byte(dev->pdev, PCI_LBPC,
@@ -284,6 +290,7 @@ void i915_save_display_reg(struct drm_device *dev)
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
}
return;
@@ -313,6 +320,13 @@ void i915_restore_display_reg(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ }
+
+ /* Panel fitter */
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
}
/* Display port ratios (must be done before clock is set) */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
new file mode 100644
index 000000000000..2c7ed5cb29c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: High Definition Audio over HDMI and Display Port
+ *
+ * The graphics and audio drivers together support High Definition Audio over
+ * HDMI and Display Port. The audio programming sequences are divided into audio
+ * codec and controller enable and disable sequences. The graphics driver
+ * handles the audio codec sequences, while the audio driver handles the audio
+ * controller sequences.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port. The enable sequences may only be performed after enabling the
+ * transcoder and port, and after completed link training.
+ *
+ * The codec and controller sequences could be done either parallel or serial,
+ * but generally the ELDV/PD change in the codec sequence indicates to the audio
+ * driver that the controller sequence should start. Indeed, most of the
+ * co-operation between the graphics and audio drivers is handled via audio
+ * related registers. (The notable exception is the power management, not
+ * covered here.)
+ */
+
+static const struct {
+ int clock;
+ u32 config;
+} hdmi_audio_clock[] = {
+ { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+ { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+ { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+ { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+ { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+ { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+ { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+ { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+ { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+ { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+ if (mode->clock == hdmi_audio_clock[i].clock)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+ DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+ i = 1;
+ }
+
+ DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
+
+ return hdmi_audio_clock[i].config;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t tmp;
+ int i;
+
+ tmp = I915_READ(reg_eldv);
+ tmp &= bits_eldv;
+
+ if (!tmp)
+ return false;
+
+ tmp = I915_READ(reg_elda);
+ tmp &= ~bits_elda;
+ I915_WRITE(reg_elda, tmp);
+
+ for (i = 0; i < drm_eld_size(eld) / 4; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
+static void g4x_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ uint32_t eldv, tmp;
+
+ DRM_DEBUG_KMS("Disable audio codec\n");
+
+ tmp = I915_READ(G4X_AUD_VID_DID);
+ if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ /* Invalidate ELD */
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp &= ~eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void g4x_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t tmp;
+ int len, i;
+
+ DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+
+ tmp = I915_READ(G4X_AUD_VID_DID);
+ if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
+ len = (tmp >> 9) & 0x1f; /* ELD buffer size */
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+
+ len = min(drm_eld_size(eld) / 4, len);
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+ tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp |= eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+}
+
+static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t tmp;
+
+ DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
+
+ /* Disable timestamps */
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ /* Invalidate ELD */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~AUDIO_ELD_VALID(pipe);
+ tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+}
+
+static void hsw_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ const uint8_t *eld = connector->eld;
+ uint32_t tmp;
+ int len, i;
+
+ DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
+ pipe_name(pipe), drm_eld_size(eld));
+
+ /* Enable audio presence detect, invalidate ELD */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= AUDIO_OUTPUT_ENABLE(pipe);
+ tmp &= ~AUDIO_ELD_VALID(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ /*
+ * FIXME: We're supposed to wait for vblank here, but we have vblanks
+ * disabled during the mode set. The proper fix would be to push the
+ * rest of the setup into a vblank work item, queued here, but the
+ * infrastructure is not there yet.
+ */
+
+ /* Reset ELD write address */
+ tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
+ tmp &= ~IBX_ELD_ADDRESS_MASK;
+ I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
+
+ /* Up to 84 bytes of hw ELD buffer */
+ len = min(drm_eld_size(eld), 84);
+ for (i = 0; i < len / 4; i++)
+ I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+
+ /* ELD valid */
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= AUDIO_ELD_VALID(pipe);
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ /* Enable timestamps */
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ else
+ tmp |= audio_config_hdmi_pixel_clock(mode);
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+}
+
+static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+ enum port port = intel_dig_port->port;
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t tmp, eldv;
+ int aud_config;
+ int aud_cntrl_st2;
+
+ DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ /* Disable timestamps */
+ tmp = I915_READ(aud_config);
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ I915_WRITE(aud_config, tmp);
+
+ if (WARN_ON(!port)) {
+ eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
+ IBX_ELD_VALID(PORT_D);
+ } else {
+ eldv = IBX_ELD_VALID(port);
+ }
+
+ /* Invalidate ELD */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+}
+
+static void ilk_audio_codec_enable(struct drm_connector *connector,
+ struct intel_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+ enum port port = intel_dig_port->port;
+ enum pipe pipe = intel_crtc->pipe;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t tmp;
+ int len, i;
+ int hdmiw_hdmiedid;
+ int aud_config;
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+
+ DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
+ port_name(port), pipe_name(pipe), drm_eld_size(eld));
+
+ /*
+ * FIXME: We're supposed to wait for vblank here, but we have vblanks
+ * disabled during the mode set. The proper fix would be to push the
+ * rest of the setup into a vblank work item, queued here, but the
+ * infrastructure is not there yet.
+ */
+
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(connector->dev)) {
+ hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ if (WARN_ON(!port)) {
+ eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
+ IBX_ELD_VALID(PORT_D);
+ } else {
+ eldv = IBX_ELD_VALID(port);
+ }
+
+ /* Invalidate ELD */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Reset ELD write address */
+ tmp = I915_READ(aud_cntl_st);
+ tmp &= ~IBX_ELD_ADDRESS_MASK;
+ I915_WRITE(aud_cntl_st, tmp);
+
+ /* Up to 84 bytes of hw ELD buffer */
+ len = min(drm_eld_size(eld), 84);
+ for (i = 0; i < len / 4; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+ /* ELD valid */
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= eldv;
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Enable timestamps */
+ tmp = I915_READ(aud_config);
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ else
+ tmp |= audio_config_hdmi_pixel_clock(mode);
+ I915_WRITE(aud_config, tmp);
+}
+
+/**
+ * intel_audio_codec_enable - Enable the audio codec for HD audio
+ * @intel_encoder: encoder on which to enable audio
+ *
+ * The enable sequences may only be performed after enabling the transcoder and
+ * port, and after completed link training.
+ */
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct drm_display_mode *mode = &crtc->config.adjusted_mode;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ connector = drm_select_eld(encoder, mode);
+ if (!connector)
+ return;
+
+ DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ connector->encoder->base.id,
+ connector->encoder->name);
+
+ /* ELD Conn_Type */
+ connector->eld[5] &= ~(3 << 2);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ connector->eld[5] |= (1 << 2);
+
+ connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+ if (dev_priv->display.audio_codec_enable)
+ dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
+}
+
+/**
+ * intel_audio_codec_disable - Disable the audio codec for HD audio
+ * @encoder: encoder on which to disable audio
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port.
+ */
+void intel_audio_codec_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.audio_codec_disable)
+ dev_priv->display.audio_codec_disable(encoder);
+}
+
+/**
+ * intel_init_audio - Set up chip specific audio functions
+ * @dev: drm device
+ */
+void intel_init_audio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_G4X(dev)) {
+ dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
+ dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
+ dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 905999bee2ac..7603765c91fc 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,7 +46,7 @@ struct bdb_header {
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
-};
+} __packed;
/* strictly speaking, this is a "skip" block, but it has interesting info */
struct vbios_data {
@@ -252,7 +252,7 @@ union child_device_config {
/* This one should also be safe to use anywhere, even without version
* checks. */
struct common_child_dev_config common;
-};
+} __packed;
struct bdb_general_definitions {
/* DDC GPIO */
@@ -888,12 +888,12 @@ struct mipi_pps_data {
u16 bl_disable_delay;
u16 panel_off_delay;
u16 panel_power_cycle_delay;
-};
+} __packed;
struct bdb_mipi_config {
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-};
+} __packed;
/* Block 53 contains MIPI sequences as needed by the panel
* for enabling it. This block can be variable in size and
@@ -902,7 +902,7 @@ struct bdb_mipi_config {
struct bdb_mipi_sequence {
u8 version;
u8 data[0];
-};
+} __packed;
/* MIPI Sequnece Block definitions */
enum mipi_seq {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9212e6504e0f..a9af9a4866db 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(crt->adpa_reg);
@@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b63d4fa204a3..e6b45cd150d3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
{ 0x00BEFFFF, 0x00140006 },
{ 0x80B2CFFF, 0x001B0002 },
{ 0x00FFFFFF, 0x000E000A },
- { 0x00D75FFF, 0x00180004 },
- { 0x80CB2FFF, 0x001B0002 },
+ { 0x00DB6FFF, 0x00160005 },
+ { 0x80C71FFF, 0x001A0002 },
{ 0x00F7DFFF, 0x00180004 },
{ 0x80D75FFF, 0x001B0002 },
};
@@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
{ 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
};
+static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+ { 0x00000018, 0x000000a0 },
+ { 0x00004014, 0x00000098 },
+ { 0x00006012, 0x00000088 },
+ { 0x00008010, 0x00000080 },
+ { 0x00000018, 0x00000098 },
+ { 0x00004014, 0x00000088 },
+ { 0x00006012, 0x00000080 },
+ { 0x00000018, 0x00000088 },
+ { 0x00004014, 0x00000080 },
+};
+
+static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+ /* Idx NT mV T mV db */
+ { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
+ { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
+ { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
+ { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
+ { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
+ { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
+ { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
+ { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
+ { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
+ { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
+};
+
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
@@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
- if (IS_BROADWELL(dev)) {
+ if (IS_SKYLAKE(dev)) {
+ ddi_translations_fdi = NULL;
+ ddi_translations_dp = skl_ddi_translations_dp;
+ ddi_translations_edp = skl_ddi_translations_dp;
+ ddi_translations_hdmi = skl_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ hdmi_800mV_0dB = 7;
+ } else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
@@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations = ddi_translations_dp;
break;
case PORT_E:
- ddi_translations = ddi_translations_fdi;
+ if (ddi_translations_fdi)
+ ddi_translations = ddi_translations_fdi;
+ else
+ ddi_translations = ddi_translations_dp;
break;
default:
BUG();
@@ -423,6 +459,27 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret;
}
+static struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_intel_encoder(dev, intel_encoder) {
+ if (intel_encoder->new_crtc == crtc) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+ }
+
+ WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
+ pipe_name(crtc->pipe));
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
#define LC_FREQ 2700
#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
@@ -613,6 +670,111 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
+static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ uint32_t dpll)
+{
+ uint32_t cfgcr1_reg, cfgcr2_reg;
+ uint32_t cfgcr1_val, cfgcr2_val;
+ uint32_t p0, p1, p2, dco_freq;
+
+ cfgcr1_reg = GET_CFG_CR1_REG(dpll);
+ cfgcr2_reg = GET_CFG_CR2_REG(dpll);
+
+ cfgcr1_val = I915_READ(cfgcr1_reg);
+ cfgcr2_val = I915_READ(cfgcr2_reg);
+
+ p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
+ p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
+
+ if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ }
+
+ dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+
+ dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
+ 1000) / 0x8000;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+
+static void skl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ int link_clock = 0;
+ uint32_t dpll_ctl1, dpll;
+
+ dpll = pipe_config->ddi_pll_sel;
+
+ dpll_ctl1 = I915_READ(DPLL_CTRL1);
+
+ if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
+ link_clock = skl_calc_wrpll_link(dev_priv, dpll);
+ } else {
+ link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
+ link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
+
+ switch (link_clock) {
+ case DPLL_CRTL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CRTL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ WARN(1, "Unsupported link rate\n");
+ break;
+ }
+ link_clock *= 2;
+ }
+
+ pipe_config->port_clock = link_clock;
+
+ if (pipe_config->has_dp_encoder)
+ pipe_config->adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
@@ -756,7 +918,7 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- intel_crtc->config.dpll_hw_state.wrpll = val;
+ intel_crtc->new_config->dpll_hw_state.wrpll = val;
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
@@ -765,12 +927,234 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
return false;
}
- intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+ intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
}
+struct skl_wrpll_params {
+ uint32_t dco_fraction;
+ uint32_t dco_integer;
+ uint32_t qdiv_ratio;
+ uint32_t qdiv_mode;
+ uint32_t kdiv;
+ uint32_t pdiv;
+ uint32_t central_freq;
+};
+
+static void
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ struct skl_wrpll_params *wrpll_params)
+{
+ uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ uint64_t dco_central_freq[3] = {8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL};
+ uint32_t min_dco_deviation = 400;
+ uint32_t min_dco_index = 3;
+ uint32_t P0[4] = {1, 2, 3, 7};
+ uint32_t P2[4] = {1, 2, 3, 5};
+ bool found = false;
+ uint32_t candidate_p = 0;
+ uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
+ uint32_t candidate_p2[3] = {0};
+ uint32_t dco_central_freq_deviation[3];
+ uint32_t i, P1, k, dco_count;
+ bool retry_with_odd = false;
+ uint64_t dco_freq;
+
+ /* Determine P0, P1 or P2 */
+ for (dco_count = 0; dco_count < 3; dco_count++) {
+ found = false;
+ candidate_p =
+ div64_u64(dco_central_freq[dco_count], afe_clock);
+ if (retry_with_odd == false)
+ candidate_p = (candidate_p % 2 == 0 ?
+ candidate_p : candidate_p + 1);
+
+ for (P1 = 1; P1 < candidate_p; P1++) {
+ for (i = 0; i < 4; i++) {
+ if (!(P0[i] != 1 || P1 == 1))
+ continue;
+
+ for (k = 0; k < 4; k++) {
+ if (P1 != 1 && P2[k] != 2)
+ continue;
+
+ if (candidate_p == P0[i] * P1 * P2[k]) {
+ /* Found possible P0, P1, P2 */
+ found = true;
+ candidate_p0[dco_count] = P0[i];
+ candidate_p1[dco_count] = P1;
+ candidate_p2[dco_count] = P2[k];
+ goto found;
+ }
+
+ }
+ }
+ }
+
+found:
+ if (found) {
+ dco_central_freq_deviation[dco_count] =
+ div64_u64(10000 *
+ abs_diff((candidate_p * afe_clock),
+ dco_central_freq[dco_count]),
+ dco_central_freq[dco_count]);
+
+ if (dco_central_freq_deviation[dco_count] <
+ min_dco_deviation) {
+ min_dco_deviation =
+ dco_central_freq_deviation[dco_count];
+ min_dco_index = dco_count;
+ }
+ }
+
+ if (min_dco_index > 2 && dco_count == 2) {
+ retry_with_odd = true;
+ dco_count = 0;
+ }
+ }
+
+ if (min_dco_index > 2) {
+ WARN(1, "No valid values found for the given pixel clock\n");
+ } else {
+ wrpll_params->central_freq = dco_central_freq[min_dco_index];
+
+ switch (dco_central_freq[min_dco_index]) {
+ case 9600000000ULL:
+ wrpll_params->central_freq = 0;
+ break;
+ case 9000000000ULL:
+ wrpll_params->central_freq = 1;
+ break;
+ case 8400000000ULL:
+ wrpll_params->central_freq = 3;
+ }
+
+ switch (candidate_p0[min_dco_index]) {
+ case 1:
+ wrpll_params->pdiv = 0;
+ break;
+ case 2:
+ wrpll_params->pdiv = 1;
+ break;
+ case 3:
+ wrpll_params->pdiv = 2;
+ break;
+ case 7:
+ wrpll_params->pdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
+
+ switch (candidate_p2[min_dco_index]) {
+ case 5:
+ wrpll_params->kdiv = 0;
+ break;
+ case 2:
+ wrpll_params->kdiv = 1;
+ break;
+ case 3:
+ wrpll_params->kdiv = 2;
+ break;
+ case 1:
+ wrpll_params->kdiv = 3;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
+ }
+
+ wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
+ wrpll_params->qdiv_mode =
+ (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
+
+ dco_freq = candidate_p0[min_dco_index] *
+ candidate_p1[min_dco_index] *
+ candidate_p2[min_dco_index] * afe_clock;
+
+ /*
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
+ wrpll_params->dco_fraction =
+ div_u64(((div_u64(dco_freq, 24) -
+ wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+
+ }
+}
+
+
+static bool
+skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_encoder *intel_encoder,
+ int clock)
+{
+ struct intel_shared_dpll *pll;
+ uint32_t ctrl1, cfgcr1, cfgcr2;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ struct skl_wrpll_params wrpll_params = { 0, };
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
+ break;
+ case DP_LINK_BW_2_7:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
+ break;
+ case DP_LINK_BW_5_4:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
+ break;
+ }
+
+ cfgcr1 = cfgcr2 = 0;
+ } else /* eDP */
+ return true;
+
+ intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
+ intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
+ intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ pll = intel_get_shared_dpll(intel_crtc);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ return false;
+ }
+
+ /* shared DPLL id 0 is DPLL 1 */
+ intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
+
+ return true;
+}
/*
* Tries to find a *shared* PLL for the CRTC and store it in
@@ -781,13 +1165,15 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
*/
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- int clock = intel_crtc->config.port_clock;
-
- intel_put_shared_dpll(intel_crtc);
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct intel_encoder *intel_encoder =
+ intel_ddi_get_crtc_new_encoder(intel_crtc);
+ int clock = intel_crtc->new_config->port_clock;
- return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ if (IS_SKYLAKE(dev))
+ return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
+ else
+ return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -962,7 +1348,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
uint32_t tmp;
power_domain = intel_display_port_power_domain(intel_encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
@@ -1008,7 +1394,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
int i;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(DDI_BUF_CTL(port));
@@ -1079,27 +1465,53 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
- if (crtc->config.has_audio) {
- DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
- pipe_name(crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
- intel_write_eld(encoder, &crtc->config.adjusted_mode);
- }
-
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
- WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ if (IS_SKYLAKE(dev)) {
+ uint32_t dpll = crtc->config.ddi_pll_sel;
+ uint32_t val;
+
+ /*
+ * DPLL0 is used for eDP and is the only "private" DPLL (as
+ * opposed to shared) on SKL
+ */
+ if (type == INTEL_OUTPUT_EDP) {
+ WARN_ON(dpll != SKL_DPLL0);
+
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
+ DPLL_CTRL1_SSC(dpll) |
+ DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+ }
+
+ /* DDI -> PLL mapping */
+ val = I915_READ(DPLL_CTRL2);
+
+ val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+ val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
+
+ I915_WRITE(DPLL_CTRL2, val);
+
+ } else {
+ WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
+ }
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1109,7 +1521,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
- if (port != PORT_A)
+ if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -1123,7 +1535,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1151,7 +1564,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
intel_edp_panel_off(intel_dp);
}
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ if (IS_SKYLAKE(dev))
+ I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
+ DPLL_CTRL2_DDI_CLK_OFF(port)));
+ else
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -1159,12 +1576,10 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
- uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) {
struct intel_digital_port *intel_dig_port =
@@ -1180,18 +1595,16 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (port == PORT_A)
+ if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
- intel_edp_psr_enable(intel_dp);
+ intel_psr_enable(intel_dp);
}
if (intel_crtc->config.has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_audio_codec_enable(intel_encoder);
}
}
@@ -1200,30 +1613,71 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
- /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
- * register is part of the power well on Haswell. */
if (intel_crtc->config.has_audio) {
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
- (pipe * 4));
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_edp_psr_disable(intel_dp);
+ intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
}
+static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
+ uint32_t cdctl = I915_READ(CDCLK_CTL);
+ uint32_t linkrate;
+
+ if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
+ WARN(1, "LCPLL1 not enabled\n");
+ return 24000; /* 24MHz is the cd freq with NSSC ref */
+ }
+
+ if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
+ return 540000;
+
+ linkrate = (I915_READ(DPLL_CTRL1) &
+ DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+
+ if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
+ linkrate == DPLL_CRTL1_LINK_RATE_1080) {
+ /* vco 8640 */
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ return 432000;
+ case CDCLK_FREQ_337_308:
+ return 308570;
+ case CDCLK_FREQ_675_617:
+ return 617140;
+ default:
+ WARN(1, "Unknown cd freq selection\n");
+ }
+ } else {
+ /* vco 8100 */
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ return 450000;
+ case CDCLK_FREQ_337_308:
+ return 337500;
+ case CDCLK_FREQ_675_617:
+ return 675000;
+ default:
+ WARN(1, "Unknown cd freq selection\n");
+ }
+ }
+
+ /* error case, do as if DPLL0 isn't enabled */
+ return 24000;
+}
+
static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
uint32_t lcpll = I915_READ(LCPLL_CTL);
@@ -1255,7 +1709,7 @@ static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
- else if (IS_ULT(dev))
+ else if (IS_HSW_ULT(dev))
return 337500;
else
return 540000;
@@ -1265,6 +1719,9 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ if (IS_SKYLAKE(dev))
+ return skl_get_cdclk_freq(dev_priv);
+
if (IS_BROADWELL(dev))
return bdw_get_cdclk_freq(dev_priv);
@@ -1275,7 +1732,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+ I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
@@ -1296,7 +1753,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(WRPLL_CTL(pll->id));
@@ -1326,26 +1783,156 @@ static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
}
}
+static const char * const skl_ddi_pll_names[] = {
+ "DPLL 1",
+ "DPLL 2",
+ "DPLL 3",
+};
+
+struct skl_dpll_regs {
+ u32 ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[3] = {
+ {
+ /* DPLL 1 */
+ .ctl = LCPLL2_CTL,
+ .cfgcr1 = DPLL1_CFGCR1,
+ .cfgcr2 = DPLL1_CFGCR2,
+ },
+ {
+ /* DPLL 2 */
+ .ctl = WRPLL_CTL1,
+ .cfgcr1 = DPLL2_CFGCR1,
+ .cfgcr2 = DPLL2_CFGCR2,
+ },
+ {
+ /* DPLL 3 */
+ .ctl = WRPLL_CTL2,
+ .cfgcr1 = DPLL3_CFGCR1,
+ .cfgcr2 = DPLL3_CFGCR2,
+ },
+};
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+ unsigned int dpll;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+ dpll = pll->id + 1;
+
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
+ DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ val |= pll->config.hw_state.ctrl1 << (dpll * 6);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+
+ I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
+ I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+ POSTING_READ(regs[pll->id].cfgcr1);
+ POSTING_READ(regs[pll->id].cfgcr2);
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+
+ if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
+ DRM_ERROR("DPLL %d not locked\n", dpll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
+ POSTING_READ(regs[pll->id].ctl);
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+ unsigned int dpll;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
+ dpll = pll->id + 1;
+
+ val = I915_READ(regs[pll->id].ctl);
+ if (!(val & LCPLL_PLL_ENABLE))
+ return false;
+
+ val = I915_READ(DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
+
+ /* avoid reading back stale values if HDMI mode is not enabled */
+ if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
+ hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
+ hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+ }
+
+ return true;
+}
+
+static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ dev_priv->num_shared_dpll = 3;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
+ dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
+ dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ skl_ddi_pll_get_hw_state;
+ }
+}
+
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
- hsw_shared_dplls_init(dev_priv);
-
- /* The LCPLL register should be turned on by the BIOS. For now let's
- * just check its state and print errors in case something is wrong.
- * Don't even try to turn it on.
- */
+ if (IS_SKYLAKE(dev))
+ skl_shared_dplls_init(dev_priv);
+ else
+ hsw_shared_dplls_init(dev_priv);
DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
intel_ddi_get_cdclk_freq(dev_priv));
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
+ if (IS_SKYLAKE(dev)) {
+ if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
+ DRM_ERROR("LCPLL1 is disabled\n");
+ } else {
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+ }
}
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
@@ -1440,7 +2027,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
+ struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
@@ -1474,6 +2063,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
+ intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+
+ if (intel_hdmi->infoframe_enabled(&encoder->base))
+ pipe_config->has_infoframe = true;
+ break;
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
@@ -1486,9 +2080,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
+ if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
pipe_config->has_audio = true;
}
@@ -1512,7 +2106,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
- hsw_ddi_clock_get(encoder, pipe_config);
+ if (INTEL_INFO(dev)->gen <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
+ else
+ skl_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9cb5c95d5898..fb3e3d429191 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static void intel_increase_pllclock(struct drm_device *dev,
- enum pipe pipe);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -96,8 +94,10 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
-static void vlv_prepare_pll(struct intel_crtc *crtc);
-static void chv_prepare_pll(struct intel_crtc *crtc);
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config);
+static void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
@@ -408,25 +408,43 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
/**
* Returns whether any output on the specified pipe is of the specified type
*/
-static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
- for_each_encoder_on_crtc(dev, crtc, encoder)
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->type == type)
return true;
return false;
}
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+/**
+ * Returns whether any output on the specified pipe will have the specified
+ * type after a staged modeset is complete, i.e., the same as
+ * intel_pipe_has_type() but looking at encoder->new_crtc instead of
+ * encoder->crtc.
+ */
+static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(dev, encoder)
+ if (encoder->new_crtc == crtc && encoder->type == type)
+ return true;
+
+ return false;
+}
+
+static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
int refclk)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -444,20 +462,20 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
return limit;
}
-static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -465,9 +483,9 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
+static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
@@ -475,7 +493,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
@@ -484,14 +502,14 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
+ else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
@@ -578,15 +596,15 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -639,15 +657,15 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -698,11 +716,11 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int max_n;
bool found;
@@ -710,7 +728,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int err_most = (target >> 8) + (target >> 9);
found = false;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
@@ -755,11 +773,11 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
@@ -812,11 +830,11 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
uint64_t m2;
int found = false;
@@ -889,60 +907,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return intel_crtc->config.cpu_transcoder;
}
-static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
-
- frame = I915_READ(frame_reg);
-
- if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
- WARN(1, "vblank wait on pipe %c timed out\n",
- pipe_name(pipe));
-}
-
-/**
- * intel_wait_for_vblank - wait for vblank on a given pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * Wait for vblank to occur on a given pipe. Needed for various bits of
- * mode setting code.
- */
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipestat_reg = PIPESTAT(pipe);
-
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
- g4x_wait_for_vblank(dev, pipe);
- return;
- }
-
- /* Clear existing vblank status. Note this will clear any other
- * sticky status fields as well.
- *
- * This races with i915_driver_irq_handler() with the result
- * that either function could miss a vblank event. Here it is not
- * fatal, as we will either wait upon the next vblank interrupt or
- * timeout. Generally speaking intel_wait_for_vblank() is only
- * called during modeset at which time the GPU should be idle and
- * should *not* be performing page flips and thus not waiting on
- * vblanks...
- * Currently, the result of us stealing a vblank from the irq
- * handler is that a single frame will be skipped during swapbuffers.
- */
- I915_WRITE(pipestat_reg,
- I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
-
- /* Wait for vblank interrupt bit to set */
- if (wait_for(I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS,
- 50))
- DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
- pipe_name(pipe));
-}
-
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1189,8 +1153,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
state_string(state), state_string(cur_state));
}
-static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int pp_reg;
@@ -1263,7 +1227,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
@@ -1332,7 +1296,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
int reg, sprite;
u32 val;
- if (IS_VALLEYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ for_each_sprite(pipe, sprite) {
+ val = I915_READ(PLANE_CTL(pipe, sprite));
+ WARN(val & PLANE_CTL_ENABLE,
+ "plane %d assertion failure, should be off on pipe %c but is still active\n",
+ sprite, pipe_name(pipe));
+ }
+ } else if (IS_VALLEYVIEW(dev)) {
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
@@ -1533,12 +1504,13 @@ static void intel_init_dpio(struct drm_device *dev)
}
}
-static void vlv_enable_pll(struct intel_crtc *crtc)
+static void vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
- u32 dpll = crtc->config.dpll_hw_state.dpll;
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1556,7 +1528,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
- I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+ I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(crtc->pipe));
/* We do this three times for luck */
@@ -1571,7 +1543,8 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
udelay(150); /* wait for warmup */
}
-static void chv_enable_pll(struct intel_crtc *crtc)
+static void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1596,14 +1569,14 @@ static void chv_enable_pll(struct intel_crtc *crtc)
udelay(1);
/* Enable PLL */
- I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
+ I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
/* not sure when this should be written */
- I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
+ I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(pipe));
mutex_unlock(&dev_priv->dpio_lock);
@@ -1616,7 +1589,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
for_each_intel_crtc(dev, crtc)
count += crtc->active &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
return count;
}
@@ -1695,7 +1668,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
intel_num_dvo_pipes(dev) == 1) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1806,7 +1779,7 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- WARN_ON(!pll->refcount);
+ WARN_ON(!pll->config.crtc_mask);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
@@ -1833,7 +1806,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- if (WARN_ON(pll->refcount == 0))
+ if (WARN_ON(pll->config.crtc_mask == 0))
return;
DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
@@ -1865,7 +1838,7 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL))
return;
- if (WARN_ON(pll->refcount == 0))
+ if (WARN_ON(pll->config.crtc_mask == 0))
return;
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -1933,7 +1906,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv->dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
@@ -2056,7 +2029,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -2221,11 +2194,13 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled)
}
int
-intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
+intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined)
{
+ struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 alignment;
int ret;
@@ -2233,7 +2208,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
switch (obj->tiling_mode) {
case I915_TILING_NONE:
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ alignment = 256 * 1024;
+ else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
@@ -2241,8 +2218,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
alignment = 64 * 1024;
break;
case I915_TILING_X:
- /* pin() will align the object as required by fence */
- alignment = 0;
+ if (INTEL_INFO(dev)->gen >= 9)
+ alignment = 256 * 1024;
+ else {
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ }
break;
case I915_TILING_Y:
WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2402,6 +2383,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct intel_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
@@ -2433,6 +2415,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dev_priv->preserve_bios_swizzle = true;
+
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -2486,6 +2471,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
+ } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+ I915_WRITE(PRIMSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(PRIMPOS(plane), 0);
+ I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
switch (fb->pixel_format) {
@@ -2672,6 +2663,92 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg);
}
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int pipe = intel_crtc->pipe;
+ u32 plane_ctl, stride;
+
+ if (!intel_crtc->primary_enabled) {
+ I915_WRITE(PLANE_CTL(pipe, 0), 0);
+ I915_WRITE(PLANE_SURF(pipe, 0), 0);
+ POSTING_READ(PLANE_CTL(pipe, 0));
+ return;
+ }
+
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+ break;
+ default:
+ BUG();
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ plane_ctl |= PLANE_CTL_TILED_X;
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ BUG();
+ }
+
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+ if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
+ plane_ctl |= PLANE_CTL_ROTATE_180;
+
+ I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
+
+ DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
+ i915_gem_obj_ggtt_offset(obj),
+ x, y, fb->width, fb->height,
+ fb->pitches[0]);
+
+ I915_WRITE(PLANE_POS(pipe, 0), 0);
+ I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
+ I915_WRITE(PLANE_SIZE(pipe, 0),
+ (intel_crtc->config.pipe_src_h - 1) << 16 |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+ I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+
+ POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2682,32 +2759,16 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
return 0;
}
-void intel_display_handle_reset(struct drm_device *dev)
+static void intel_complete_page_flips(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- /*
- * Flips in the rings have been nuked by the reset,
- * so complete all pending flips so that user space
- * will get its events and not get stuck.
- *
- * Also update the base address of all primary
- * planes to the the last fb to make sure we're
- * showing the correct fb after a reset.
- *
- * Need to make two loops over the crtcs so that we
- * don't try to grab a crtc mutex before the
- * pending_flip_queue really got woken up.
- */
-
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
@@ -2715,6 +2776,12 @@ void intel_display_handle_reset(struct drm_device *dev)
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip_plane(dev, plane);
}
+}
+
+static void intel_update_primary_planes(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2734,6 +2801,79 @@ void intel_display_handle_reset(struct drm_device *dev)
}
}
+void intel_prepare_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
+
+ /* no reset support for gen2 */
+ if (IS_GEN2(dev))
+ return;
+
+ /* reset doesn't touch the display */
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ return;
+
+ drm_modeset_lock_all(dev);
+
+ /*
+ * Disabling the crtcs gracefully seems nicer. Also the
+ * g33 docs say we should at least disable all the planes.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->active)
+ dev_priv->display.crtc_disable(&crtc->base);
+ }
+}
+
+void intel_finish_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /*
+ * Flips in the rings will be nuked by the reset,
+ * so complete all pending flips so that user space
+ * will get its events and not get stuck.
+ */
+ intel_complete_page_flips(dev);
+
+ /* no reset support for gen2 */
+ if (IS_GEN2(dev))
+ return;
+
+ /* reset doesn't touch the display */
+ if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+ /*
+ * Flips in the rings have been nuked by the reset,
+ * so update the base address of all primary
+ * planes to the the last fb to make sure we're
+ * showing the correct fb after a reset.
+ */
+ intel_update_primary_planes(dev);
+ return;
+ }
+
+ /*
+ * The display has been reset as well,
+ * so need a full re-initialization.
+ */
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_runtime_pm_enable_interrupts(dev_priv);
+
+ intel_modeset_init_hw(dev);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ intel_modeset_setup_hw_state(dev, true);
+
+ intel_hpd_init(dev_priv);
+
+ drm_modeset_unlock_all(dev);
+}
+
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
@@ -2762,20 +2902,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
bool pending;
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
return pending;
}
+static void intel_update_pipe_size(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_display_mode *adjusted_mode;
+
+ if (!i915.fastboot)
+ return;
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ *
+ * To fix this properly, we need to hoist the checks up into
+ * compute_mode_changes (or above), check the actual pfit state and
+ * whether the platform allows pfit disable with pipe active, and only
+ * then update the pipesrc and pfit state, even on the flip path.
+ */
+
+ adjusted_mode = &crtc->config.adjusted_mode;
+
+ I915_WRITE(PIPESRC(crtc->pipe),
+ ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+ (adjusted_mode->crtc_vdisplay - 1));
+ if (!crtc->config.pch_pfit.enabled &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ I915_WRITE(PF_CTL(crtc->pipe), 0);
+ I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
+ I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
+ }
+ crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+ crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
@@ -2785,7 +2963,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
@@ -2808,9 +2985,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret == 0)
- i915_gem_track_fb(old_obj, obj,
+ i915_gem_track_fb(old_obj, intel_fb_obj(fb),
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
@@ -2818,37 +2995,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /*
- * Update pipe size and adjust fitter if needed: the reason for this is
- * that in compute_mode_changes we check the native mode (not the pfit
- * mode) to see if we can flip rather than do a full mode set. In the
- * fastboot case, we'll flip, but if we don't update the pipesrc and
- * pfit state, we'll end up with a big fb scanned out into the wrong
- * sized surface.
- *
- * To fix this properly, we need to hoist the checks up into
- * compute_mode_changes (or above), check the actual pfit state and
- * whether the platform allows pfit disable with pipe active, and only
- * then update the pipesrc and pfit state, even on the flip path.
- */
- if (i915.fastboot) {
- const struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
-
- I915_WRITE(PIPESRC(intel_crtc->pipe),
- ((adjusted_mode->crtc_hdisplay - 1) << 16) |
- (adjusted_mode->crtc_vdisplay - 1));
- if (!intel_crtc->config.pch_pfit.enabled &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
- I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
- I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
- I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
- }
- intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
- intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
- }
-
dev_priv->display.update_primary_plane(crtc, fb, x, y);
if (intel_crtc->active)
@@ -3472,14 +3618,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
!intel_crtc_has_pending_flip(crtc),
60*HZ) == 0)) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (intel_crtc->unpin_work) {
WARN_ONCE(1, "Removing stuck page flip\n");
page_flip_completed(intel_crtc);
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
if (crtc->primary->fb) {
@@ -3704,9 +3849,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -3766,12 +3909,13 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
if (pll == NULL)
return;
- if (pll->refcount == 0) {
- WARN(1, "bad %s refcount\n", pll->name);
+ if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
+ WARN(1, "bad %s crtc mask\n", pll->name);
return;
}
- if (--pll->refcount == 0) {
+ pll->config.crtc_mask &= ~(1 << crtc->pipe);
+ if (pll->config.crtc_mask == 0) {
WARN_ON(pll->on);
WARN_ON(pll->active);
}
@@ -3782,15 +3926,9 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ struct intel_shared_dpll *pll;
enum intel_dpll_id i;
- if (pll) {
- DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
- crtc->base.base.id, pll->name);
- intel_put_shared_dpll(crtc);
- }
-
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
@@ -3799,7 +3937,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
- WARN_ON(pll->refcount);
+ WARN_ON(pll->new_config->crtc_mask);
goto found;
}
@@ -3808,15 +3946,16 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
- if (pll->refcount == 0)
+ if (pll->new_config->crtc_mask == 0)
continue;
- if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
- sizeof(pll->hw_state)) == 0) {
- DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
- crtc->base.base.id,
- pll->name, pll->refcount, pll->active);
-
+ if (memcmp(&crtc->new_config->dpll_hw_state,
+ &pll->new_config->hw_state,
+ sizeof(pll->new_config->hw_state)) == 0) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
+ crtc->base.base.id, pll->name,
+ pll->new_config->crtc_mask,
+ pll->active);
goto found;
}
}
@@ -3824,7 +3963,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
- if (pll->refcount == 0) {
+ if (pll->new_config->crtc_mask == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
@@ -3834,18 +3973,86 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
return NULL;
found:
- if (pll->refcount == 0)
- pll->hw_state = crtc->config.dpll_hw_state;
+ if (pll->new_config->crtc_mask == 0)
+ pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
- crtc->config.shared_dpll = i;
+ crtc->new_config->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- pll->refcount++;
+ pll->new_config->crtc_mask |= 1 << crtc->pipe;
return pll;
}
+/**
+ * intel_shared_dpll_start_config - start a new PLL staged config
+ * @dev_priv: DRM device
+ * @clear_pipes: mask of pipes that will have their PLLs freed
+ *
+ * Starts a new PLL staged config, copying the current config but
+ * releasing the references of pipes specified in clear_pipes.
+ */
+static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
+ unsigned clear_pipes)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ pll->new_config = kmemdup(&pll->config, sizeof pll->config,
+ GFP_KERNEL);
+ if (!pll->new_config)
+ goto cleanup;
+
+ pll->new_config->crtc_mask &= ~clear_pipes;
+ }
+
+ return 0;
+
+cleanup:
+ while (--i >= 0) {
+ pll = &dev_priv->shared_dplls[i];
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ WARN_ON(pll->new_config == &pll->config);
+
+ pll->config = *pll->new_config;
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+}
+
+static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
+{
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ WARN_ON(pll->new_config == &pll->config);
+
+ kfree(pll->new_config);
+ pll->new_config = NULL;
+ }
+}
+
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3860,6 +4067,19 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
+static void skylake_pfit_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ if (crtc->config.pch_pfit.enabled) {
+ I915_WRITE(PS_CTL(pipe), PS_ENABLE);
+ I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
+ I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ }
+}
+
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -3983,7 +4203,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
return;
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -4038,10 +4258,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- assert_vblank_disabled(crtc);
-
- drm_vblank_on(dev, pipe);
-
intel_enable_primary_hw_plane(crtc->primary, crtc);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
@@ -4087,10 +4303,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
* consider this a flip to a NULL plane.
*/
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
-
- drm_vblank_off(dev, pipe);
-
- assert_vblank_disabled(crtc);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4123,8 +4335,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -4160,6 +4372,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
}
@@ -4235,19 +4450,23 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
if (intel_crtc->config.has_pch_encoder) {
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+ true);
dev_priv->display.fdi_link_train(crtc);
}
intel_ddi_enable_pipe_clock(intel_crtc);
- ironlake_pfit_enable(intel_crtc);
+ if (IS_SKYLAKE(dev))
+ skylake_pfit_enable(intel_crtc);
+ else
+ ironlake_pfit_enable(intel_crtc);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -4272,12 +4491,30 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_opregion_notify_encoder(encoder, true);
}
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
intel_crtc_enable_planes(crtc);
}
+static void skylake_pfit_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ /* To avoid upsetting the power well on haswell only disable the pfit if
+ * it's in use. The hw state code will make sure we get this right. */
+ if (crtc->config.pch_pfit.enabled) {
+ I915_WRITE(PS_CTL(pipe), 0);
+ I915_WRITE(PS_WIN_POS(pipe), 0);
+ I915_WRITE(PS_WIN_SZ(pipe), 0);
+ }
+}
+
static void ironlake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4307,11 +4544,14 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_disable_planes(crtc);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_disable_pipe(intel_crtc);
@@ -4368,13 +4608,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_crtc_disable_planes(crtc);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
}
if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+ false);
intel_disable_pipe(intel_crtc);
if (intel_crtc->config.dp_encoder_is_mst)
@@ -4382,7 +4626,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- ironlake_pfit_disable(intel_crtc);
+ if (IS_SKYLAKE(dev))
+ skylake_pfit_disable(intel_crtc);
+ else
+ ironlake_pfit_disable(intel_crtc);
intel_ddi_disable_pipe_clock(intel_crtc);
@@ -4508,20 +4755,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
- bool enable)
-{
- if (dev_priv->power_domains.init_power_on == enable)
- return;
-
- if (enable)
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- else
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
- dev_priv->power_domains.init_power_on = enable;
-}
-
static void modeset_update_crtc_power_domains(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4544,6 +4777,9 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_power_get(dev_priv, domain);
}
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
@@ -4575,7 +4811,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
dev_priv->vlv_cdclk_freq);
/*
@@ -4614,10 +4850,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->rps.hw_lock);
if (cdclk == 400000) {
- u32 divider, vco;
+ u32 divider;
- vco = valleyview_get_vco(dev_priv);
- divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
+ divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
@@ -4696,8 +4931,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
- int vco = valleyview_get_vco(dev_priv);
- int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+ int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
/* FIXME: Punit isn't quite ready yet */
if (IS_CHERRYVIEW(dev_priv->dev))
@@ -4766,18 +5000,30 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
if (req_cdclk != dev_priv->vlv_cdclk_freq) {
+ /*
+ * FIXME: We can end up here with all power domains off, yet
+ * with a CDCLK frequency other than the minimum. To account
+ * for this take the PIPE-A power domain, which covers the HW
+ * blocks needed for the following programming. This can be
+ * removed once it's guaranteed that we get here either with
+ * the minimum CDCLK set, or the required power domains
+ * enabled.
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+
if (IS_CHERRYVIEW(dev))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
- }
- modeset_update_crtc_power_domains(dev);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ }
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -4788,13 +5034,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
- is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+ is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_prepare_pll(intel_crtc);
+ chv_prepare_pll(intel_crtc, &intel_crtc->config);
else
- vlv_prepare_pll(intel_crtc);
+ vlv_prepare_pll(intel_crtc, &intel_crtc->config);
}
if (intel_crtc->config.has_dp_encoder)
@@ -4802,11 +5048,18 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_set_pipe_timings(intel_crtc);
+ if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+ I915_WRITE(CHV_CANVAS(pipe), 0);
+ }
+
i9xx_set_pipeconf(intel_crtc);
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
@@ -4814,9 +5067,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
- chv_enable_pll(intel_crtc);
+ chv_enable_pll(intel_crtc, &intel_crtc->config);
else
- vlv_enable_pll(intel_crtc);
+ vlv_enable_pll(intel_crtc, &intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4833,10 +5086,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
/* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev);
+ i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -4851,6 +5107,7 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -4872,7 +5129,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
if (!IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
@@ -4890,6 +5147,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+ assert_vblank_disabled(crtc);
+ drm_crtc_vblank_on(crtc);
+
intel_crtc_enable_planes(crtc);
/*
@@ -4900,10 +5160,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
* but leave the pipe running.
*/
if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev);
+ i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4939,7 +5199,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
* but leave the pipe running.
*/
if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
* Vblank time updates from the shadow to live plane control register
@@ -4953,9 +5213,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
-
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
@@ -4964,6 +5221,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
+ drm_crtc_vblank_off(crtc);
+ assert_vblank_disabled(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
intel_disable_pipe(intel_crtc);
i9xx_pfit_disable(intel_crtc);
@@ -4972,7 +5235,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
+ if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
@@ -4982,7 +5245,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
}
if (!IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_crtc->active = false;
intel_update_watermarks(crtc);
@@ -4996,36 +5259,6 @@ static void i9xx_crtc_off(struct drm_crtc *crtc)
{
}
-static void intel_crtc_update_sarea(struct drm_crtc *crtc,
- bool enabled)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_master_private *master_priv;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
-
- if (!dev->primary->master)
- return;
-
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return;
-
- switch (pipe) {
- case 0:
- master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
- master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
- break;
- case 1:
- master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
- master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
- break;
- default:
- DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
- break;
- }
-}
-
/* Master function to enable/disable CRTC and corresponding power wells */
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
{
@@ -5069,8 +5302,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
enable |= intel_encoder->connectors_active;
intel_crtc_control(crtc, enable);
-
- intel_crtc_update_sarea(crtc, enable);
}
static void intel_crtc_disable(struct drm_crtc *crtc)
@@ -5085,7 +5316,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
WARN_ON(!crtc->enabled);
dev_priv->display.crtc_disable(crtc);
- intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
if (crtc->primary->fb) {
@@ -5324,11 +5554,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
- struct drm_i915_private *dev_priv = dev->dev_private;
int clock_limit =
dev_priv->display.get_display_clock_speed(dev);
@@ -5355,7 +5585,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -5377,13 +5607,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
- /*
- * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
- * old clock survives for now.
- */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
- pipe_config->shared_dpll = crtc->config.shared_dpll;
-
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
@@ -5393,7 +5616,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int vco = valleyview_get_vco(dev_priv);
u32 val;
int divider;
@@ -5401,6 +5623,9 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
if (IS_CHERRYVIEW(dev))
return 400000;
+ if (dev_priv->hpll_freq == 0)
+ dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
+
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
@@ -5411,7 +5636,7 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
(divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
"cdclk change in progress\n");
- return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
+ return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
}
static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -5543,15 +5768,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
if (IS_VALLEYVIEW(dev)) {
refclk = 100000;
- } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5581,24 +5806,24 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
u32 fp, fp2 = 0;
if (IS_PINEVIEW(dev)) {
- fp = pnv_dpll_compute_fp(&crtc->config.dpll);
+ fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
- fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
+ fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (reduced_clock)
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
- crtc->config.dpll_hw_state.fp0 = fp;
+ crtc->new_config->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
- crtc->config.dpll_hw_state.fp1 = fp2;
+ crtc->new_config->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
- crtc->config.dpll_hw_state.fp1 = fp;
+ crtc->new_config->dpll_hw_state.fp1 = fp;
}
}
@@ -5687,7 +5912,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc)
&crtc->config.dp_m2_n2);
}
-static void vlv_update_pll(struct intel_crtc *crtc)
+static void vlv_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
u32 dpll, dpll_md;
@@ -5702,14 +5928,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
if (crtc->pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ pipe_config->dpll_hw_state.dpll = dpll;
- dpll_md = (crtc->config.pixel_multiplier - 1)
+ dpll_md = (pipe_config->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ pipe_config->dpll_hw_state.dpll_md = dpll_md;
}
-static void vlv_prepare_pll(struct intel_crtc *crtc)
+static void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5720,11 +5947,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
mutex_lock(&dev_priv->dpio_lock);
- bestn = crtc->config.dpll.n;
- bestm1 = crtc->config.dpll.m1;
- bestm2 = crtc->config.dpll.m2;
- bestp1 = crtc->config.dpll.p1;
- bestp2 = crtc->config.dpll.p2;
+ bestn = pipe_config->dpll.n;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
/* See eDP HDMI DPIO driver vbios notes doc */
@@ -5761,17 +5988,16 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
- if (crtc->config.port_clock == 162000 ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
+ if (pipe_config->port_clock == 162000 ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
+ if (crtc->config.has_dp_encoder) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -5791,8 +6017,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -5800,19 +6026,21 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->dpio_lock);
}
-static void chv_update_pll(struct intel_crtc *crtc)
+static void chv_update_pll(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
- crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (crtc->pipe != PIPE_A)
- crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- crtc->config.dpll_hw_state.dpll_md =
- (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static void chv_prepare_pll(struct intel_crtc *crtc)
+static void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5823,18 +6051,18 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
int refclk;
- bestn = crtc->config.dpll.n;
- bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
- bestm1 = crtc->config.dpll.m1;
- bestm2 = crtc->config.dpll.m2 >> 22;
- bestp1 = crtc->config.dpll.p1;
- bestp2 = crtc->config.dpll.p2;
+ bestn = pipe_config->dpll.n;
+ bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2 >> 22;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
/*
* Enable Refclk and SSC
*/
I915_WRITE(dpll_reg,
- crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
mutex_lock(&dev_priv->dpio_lock);
@@ -5862,7 +6090,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
(2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
/* Loop filter */
- refclk = i9xx_get_refclk(&crtc->base, 0);
+ refclk = i9xx_get_refclk(crtc, 0);
loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
2 << DPIO_CHV_GAIN_CTRL_SHIFT;
if (refclk == 100000)
@@ -5882,6 +6110,53 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->dpio_lock);
}
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll)
+{
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc_config pipe_config = {
+ .pixel_multiplier = 1,
+ .dpll = *dpll,
+ };
+
+ if (IS_CHERRYVIEW(dev)) {
+ chv_update_pll(crtc, &pipe_config);
+ chv_prepare_pll(crtc, &pipe_config);
+ chv_enable_pll(crtc, &pipe_config);
+ } else {
+ vlv_update_pll(crtc, &pipe_config);
+ vlv_prepare_pll(crtc, &pipe_config);
+ vlv_enable_pll(crtc, &pipe_config);
+ }
+}
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+{
+ if (IS_CHERRYVIEW(dev))
+ chv_disable_pll(to_i915(dev), pipe);
+ else
+ vlv_disable_pll(to_i915(dev), pipe);
+}
+
static void i9xx_update_pll(struct intel_crtc *crtc,
intel_clock_t *reduced_clock,
int num_connectors)
@@ -5890,29 +6165,29 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
bool is_sdvo;
- struct dpll *clock = &crtc->config.dpll;
+ struct dpll *clock = &crtc->new_config->dpll;
i9xx_update_pll_dividers(crtc, reduced_clock);
- is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
- dpll |= (crtc->config.pixel_multiplier - 1)
+ dpll |= (crtc->new_config->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
+ if (crtc->new_config->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -5940,21 +6215,21 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (crtc->config.sdvo_tv_clock)
+ if (crtc->new_config->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
- u32 dpll_md = (crtc->config.pixel_multiplier - 1)
+ u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc->config.dpll_hw_state.dpll_md = dpll_md;
+ crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
}
}
@@ -5965,13 +6240,13 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
- struct dpll *clock = &crtc->config.dpll;
+ struct dpll *clock = &crtc->new_config->dpll;
i9xx_update_pll_dividers(crtc, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -5982,17 +6257,17 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- crtc->config.dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -6016,7 +6291,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -6174,7 +6449,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
- intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
+ intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -6188,13 +6463,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
bool ok, has_reduced_clock = false;
@@ -6202,7 +6474,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct intel_encoder *encoder;
const intel_limit_t *limit;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_intel_encoder(dev, encoder) {
+ if (encoder->new_crtc != crtc)
+ continue;
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -6210,6 +6485,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
+ default:
+ break;
}
num_connectors++;
@@ -6218,7 +6495,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (is_dsi)
return 0;
- if (!intel_crtc->config.clock_set) {
+ if (!crtc->new_config->clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
/*
@@ -6229,7 +6506,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
- intel_crtc->config.port_clock,
+ crtc->new_config->port_clock,
refclk, NULL, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6250,23 +6527,23 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
- intel_crtc->config.dpll.n = clock.n;
- intel_crtc->config.dpll.m1 = clock.m1;
- intel_crtc->config.dpll.m2 = clock.m2;
- intel_crtc->config.dpll.p1 = clock.p1;
- intel_crtc->config.dpll.p2 = clock.p2;
+ crtc->new_config->dpll.n = clock.n;
+ crtc->new_config->dpll.m1 = clock.m1;
+ crtc->new_config->dpll.m2 = clock.m2;
+ crtc->new_config->dpll.p1 = clock.p1;
+ crtc->new_config->dpll.p2 = clock.p2;
}
if (IS_GEN2(dev)) {
- i8xx_update_pll(intel_crtc,
+ i8xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
- chv_update_pll(intel_crtc);
+ chv_update_pll(crtc, crtc->new_config);
} else if (IS_VALLEYVIEW(dev)) {
- vlv_update_pll(intel_crtc);
+ vlv_update_pll(crtc, crtc->new_config);
} else {
- i9xx_update_pll(intel_crtc,
+ i9xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
}
@@ -6432,8 +6709,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ if (!intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -6538,6 +6815,8 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
if (enc_to_dig_port(&encoder->base)->port == PORT_A)
has_cpu_edp = true;
break;
+ default:
+ break;
}
}
@@ -6842,6 +7121,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
+ default:
+ break;
}
}
@@ -6870,11 +7151,16 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
int num_connectors = 0;
bool is_lvds = false;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_intel_encoder(dev, encoder) {
+ if (encoder->new_crtc != to_intel_crtc(crtc))
+ continue;
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
+ default:
+ break;
}
num_connectors++;
}
@@ -7019,7 +7305,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
val = 0;
switch (intel_crtc->config.pipe_bpp) {
@@ -7054,18 +7340,12 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- }
- }
+ is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
refclk = ironlake_get_refclk(crtc);
@@ -7074,9 +7354,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(crtc, refclk);
- ret = dev_priv->display.find_dpll(limit, crtc,
- to_intel_crtc(crtc)->config.port_clock,
+ limit = intel_limit(intel_crtc, refclk);
+ ret = dev_priv->display.find_dpll(limit, intel_crtc,
+ intel_crtc->new_config->port_clock,
refclk, NULL, clock);
if (!ret)
return false;
@@ -7089,7 +7369,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* downclock feature.
*/
*has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->display.find_dpll(limit, intel_crtc,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
@@ -7126,7 +7406,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
int factor, num_connectors = 0;
bool is_lvds = false, is_sdvo = false;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ for_each_intel_encoder(dev, intel_encoder) {
+ if (intel_encoder->new_crtc != to_intel_crtc(crtc))
+ continue;
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -7135,6 +7418,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
break;
+ default:
+ break;
}
num_connectors++;
@@ -7147,10 +7432,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
- } else if (intel_crtc->config.sdvo_tv_clock)
+ } else if (intel_crtc->new_config->sdvo_tv_clock)
factor = 20;
- if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
+ if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
*fp |= FP_CB_TUNE;
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -7163,20 +7448,20 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- dpll |= (intel_crtc->config.pixel_multiplier - 1)
+ dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (intel_crtc->config.has_dp_encoder)
+ if (intel_crtc->new_config->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (intel_crtc->config.dpll.p2) {
+ switch (intel_crtc->new_config->dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -7199,78 +7484,64 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
return dpll | DPLL_VCO_ENABLE;
}
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int num_connectors = 0;
+ struct drm_device *dev = crtc->base.dev;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
- struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- }
-
- num_connectors++;
- }
+ is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
- ok = ironlake_compute_clocks(crtc, &clock,
+ ok = ironlake_compute_clocks(&crtc->base, &clock,
&has_reduced_clock, &reduced_clock);
- if (!ok && !intel_crtc->config.clock_set) {
+ if (!ok && !crtc->new_config->clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Compat-code for transition, will disappear. */
- if (!intel_crtc->config.clock_set) {
- intel_crtc->config.dpll.n = clock.n;
- intel_crtc->config.dpll.m1 = clock.m1;
- intel_crtc->config.dpll.m2 = clock.m2;
- intel_crtc->config.dpll.p1 = clock.p1;
- intel_crtc->config.dpll.p2 = clock.p2;
+ if (!crtc->new_config->clock_set) {
+ crtc->new_config->dpll.n = clock.n;
+ crtc->new_config->dpll.m1 = clock.m1;
+ crtc->new_config->dpll.m2 = clock.m2;
+ crtc->new_config->dpll.p1 = clock.p1;
+ crtc->new_config->dpll.p2 = clock.p2;
}
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (intel_crtc->config.has_pch_encoder) {
- fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
+ if (crtc->new_config->has_pch_encoder) {
+ fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (has_reduced_clock)
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
- dpll = ironlake_compute_dpll(intel_crtc,
+ dpll = ironlake_compute_dpll(crtc,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
- intel_crtc->config.dpll_hw_state.dpll = dpll;
- intel_crtc->config.dpll_hw_state.fp0 = fp;
+ crtc->new_config->dpll_hw_state.dpll = dpll;
+ crtc->new_config->dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
- intel_crtc->config.dpll_hw_state.fp1 = fp2;
+ crtc->new_config->dpll_hw_state.fp1 = fp2;
else
- intel_crtc->config.dpll_hw_state.fp1 = fp;
+ crtc->new_config->dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(intel_crtc);
+ pll = intel_get_shared_dpll(crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
return -EINVAL;
}
- } else
- intel_put_shared_dpll(intel_crtc);
+ }
if (is_lvds && has_reduced_clock && i915.powersave)
- intel_crtc->lowfreq_avail = true;
+ crtc->lowfreq_avail = true;
else
- intel_crtc->lowfreq_avail = false;
+ crtc->lowfreq_avail = false;
return 0;
}
@@ -7351,6 +7622,22 @@ static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
&pipe_config->fdi_m_n, NULL);
}
+static void skylake_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PS_CTL(crtc->pipe));
+
+ if (tmp & PS_ENABLE) {
+ pipe_config->pch_pfit.enabled = true;
+ pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
+ pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
+ }
+}
+
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -7442,8 +7729,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ if (!intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7636,7 +7923,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
- unsigned long irqflags;
val = I915_READ(LCPLL_CTL);
@@ -7656,10 +7942,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* to call special forcewake code that doesn't touch runtime PM and
* doesn't enable the forcewake delayed work.
*/
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irq(&dev_priv->uncore.lock);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irq(&dev_priv->uncore.lock);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7690,10 +7976,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
/* See the big comment above. */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irq(&dev_priv->uncore.lock);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
/*
@@ -7755,28 +8041,36 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static void snb_modeset_global_resources(struct drm_device *dev)
+static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
{
- modeset_update_crtc_power_domains(dev);
-}
+ if (!intel_ddi_pll_select(crtc))
+ return -EINVAL;
-static void haswell_modeset_global_resources(struct drm_device *dev)
-{
- modeset_update_crtc_power_domains(dev);
+ crtc->lowfreq_avail = false;
+
+ return 0;
}
-static int haswell_crtc_mode_set(struct drm_crtc *crtc,
- int x, int y,
- struct drm_framebuffer *fb)
+static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (!intel_ddi_pll_select(intel_crtc))
- return -EINVAL;
+ u32 temp;
- intel_crtc->lowfreq_avail = false;
+ temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+ pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
- return 0;
+ switch (pipe_config->ddi_pll_sel) {
+ case SKL_DPLL1:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+ break;
+ case SKL_DPLL2:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+ break;
+ case SKL_DPLL3:
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+ break;
+ }
}
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -7808,7 +8102,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- haswell_get_ddi_pll(dev_priv, port, pipe_config);
+ if (IS_SKYLAKE(dev))
+ skylake_get_ddi_pll(dev_priv, port, pipe_config);
+ else
+ haswell_get_ddi_pll(dev_priv, port, pipe_config);
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -7822,7 +8119,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ if (INTEL_INFO(dev)->gen < 9 &&
+ (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -7841,7 +8139,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
@@ -7870,7 +8168,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_enabled(dev_priv,
+ if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
@@ -7883,8 +8181,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- if (intel_display_power_enabled(dev_priv, pfit_domain))
- ironlake_get_pfit_config(crtc, pipe_config);
+ if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+ if (IS_SKYLAKE(dev))
+ skylake_get_pfit_config(crtc, pipe_config);
+ else
+ ironlake_get_pfit_config(crtc, pipe_config);
+ }
if (IS_HASWELL(dev))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
@@ -7900,314 +8202,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
return true;
}
-static struct {
- int clock;
- u32 config;
-} hdmi_audio_clock[] = {
- { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
- { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
- { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
- { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
- { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
- { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
- { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
- { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
- { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
- { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
-};
-
-/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
- if (mode->clock == hdmi_audio_clock[i].clock)
- break;
- }
-
- if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
- i = 1;
- }
-
- DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
- hdmi_audio_clock[i].clock,
- hdmi_audio_clock[i].config);
-
- return hdmi_audio_clock[i].config;
-}
-
-static bool intel_eld_uptodate(struct drm_connector *connector,
- int reg_eldv, uint32_t bits_eldv,
- int reg_elda, uint32_t bits_elda,
- int reg_edid)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t i;
-
- i = I915_READ(reg_eldv);
- i &= bits_eldv;
-
- if (!eld[0])
- return !i;
-
- if (!i)
- return false;
-
- i = I915_READ(reg_elda);
- i &= ~bits_elda;
- I915_WRITE(reg_elda, i);
-
- for (i = 0; i < eld[2]; i++)
- if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
- return false;
-
- return true;
-}
-
-static void g4x_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t len;
- uint32_t i;
-
- i = I915_READ(G4X_AUD_VID_DID);
-
- if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
- eldv = G4X_ELDV_DEVCL_DEVBLC;
- else
- eldv = G4X_ELDV_DEVCTG;
-
- if (intel_eld_uptodate(connector,
- G4X_AUD_CNTL_ST, eldv,
- G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
- G4X_HDMIW_HDMIEDID))
- return;
-
- i = I915_READ(G4X_AUD_CNTL_ST);
- i &= ~(eldv | G4X_ELD_ADDR);
- len = (i >> 9) & 0x1f; /* ELD buffer size */
- I915_WRITE(G4X_AUD_CNTL_ST, i);
-
- if (!eld[0])
- return;
-
- len = min_t(uint8_t, eld[2], len);
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
-
- i = I915_READ(G4X_AUD_CNTL_ST);
- i |= eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, i);
-}
-
-static void haswell_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t i;
- int len;
- int pipe = to_intel_crtc(crtc)->pipe;
- int tmp;
-
- int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
- int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
- int aud_config = HSW_AUD_CFG(pipe);
- int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
-
- /* Audio output enable */
- DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
- tmp = I915_READ(aud_cntrl_st2);
- tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
- I915_WRITE(aud_cntrl_st2, tmp);
- POSTING_READ(aud_cntrl_st2);
-
- assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
-
- /* Set ELD valid state */
- tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
- tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
- I915_WRITE(aud_cntrl_st2, tmp);
- tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
-
- /* Enable HDMI mode */
- tmp = I915_READ(aud_config);
- DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
- /* clear N_programing_enable and N_value_index */
- tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
- I915_WRITE(aud_config, tmp);
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
- eldv = AUDIO_ELD_VALID_A << (pipe * 4);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else {
- I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
- }
-
- if (intel_eld_uptodate(connector,
- aud_cntrl_st2, eldv,
- aud_cntl_st, IBX_ELD_ADDRESS,
- hdmiw_hdmiedid))
- return;
-
- i = I915_READ(aud_cntrl_st2);
- i &= ~eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
- if (!eld[0])
- return;
-
- i = I915_READ(aud_cntl_st);
- i &= ~IBX_ELD_ADDRESS;
- I915_WRITE(aud_cntl_st, i);
- i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
- DRM_DEBUG_DRIVER("port num:%d\n", i);
-
- len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
- i = I915_READ(aud_cntrl_st2);
- i |= eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
-}
-
-static void ironlake_write_eld(struct drm_connector *connector,
- struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t i;
- int len;
- int hdmiw_hdmiedid;
- int aud_config;
- int aud_cntl_st;
- int aud_cntrl_st2;
- int pipe = to_intel_crtc(crtc)->pipe;
-
- if (HAS_PCH_IBX(connector->dev)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
- aud_config = IBX_AUD_CFG(pipe);
- aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(connector->dev)) {
- hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
- aud_config = VLV_AUD_CFG(pipe);
- aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
- aud_config = CPT_AUD_CFG(pipe);
- aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- }
-
- DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
- if (IS_VALLEYVIEW(connector->dev)) {
- struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
-
- intel_encoder = intel_attached_encoder(connector);
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- i = intel_dig_port->port;
- } else {
- i = I915_READ(aud_cntl_st);
- i = (i >> 29) & DIP_PORT_SEL_MASK;
- /* DIP_Port_Select, 0x1 = PortB */
- }
-
- if (!i) {
- DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
- /* operate blindly on all ports */
- eldv = IBX_ELD_VALIDB;
- eldv |= IBX_ELD_VALIDB << 4;
- eldv |= IBX_ELD_VALIDB << 8;
- } else {
- DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
- eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
- }
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
- } else {
- I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
- }
-
- if (intel_eld_uptodate(connector,
- aud_cntrl_st2, eldv,
- aud_cntl_st, IBX_ELD_ADDRESS,
- hdmiw_hdmiedid))
- return;
-
- i = I915_READ(aud_cntrl_st2);
- i &= ~eldv;
- I915_WRITE(aud_cntrl_st2, i);
-
- if (!eld[0])
- return;
-
- i = I915_READ(aud_cntl_st);
- i &= ~IBX_ELD_ADDRESS;
- I915_WRITE(aud_cntl_st, i);
-
- len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
- for (i = 0; i < len; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
-
- i = I915_READ(aud_cntrl_st2);
- i |= eldv;
- I915_WRITE(aud_cntrl_st2, i);
-}
-
-void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- connector = drm_select_eld(encoder, mode);
- if (!connector)
- return;
-
- DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id,
- connector->name,
- connector->encoder->base.id,
- connector->encoder->name);
-
- connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
-
- if (dev_priv->display.write_eld)
- dev_priv->display.write_eld(connector, crtc, mode);
-}
-
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
@@ -8253,8 +8247,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_cntl = 0;
}
- if (intel_crtc->cursor_base != base)
+ if (intel_crtc->cursor_base != base) {
I915_WRITE(_CURABASE, base);
+ intel_crtc->cursor_base = base;
+ }
if (intel_crtc->cursor_size != size) {
I915_WRITE(CURSIZE, size);
@@ -8294,9 +8290,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
+
+ if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
+ cntl |= CURSOR_ROTATE_180;
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
@@ -8307,6 +8307,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
/* and commit changes on next vblank */
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
+
+ intel_crtc->cursor_base = base;
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8353,11 +8355,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
+ /* ILK+ do this automagically */
+ if (HAS_GMCH_DISPLAY(dev) &&
+ to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
+ base += (intel_crtc->cursor_height *
+ intel_crtc->cursor_width - 1) * 4;
+ }
+
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
- intel_crtc->cursor_base = base;
}
static bool cursor_size_ok(struct drm_device *dev,
@@ -8397,22 +8405,15 @@ static bool cursor_size_ok(struct drm_device *dev,
return true;
}
-/*
- * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
- *
- * Note that the object's reference will be consumed if the update fails. If
- * the update succeeds, the reference of the old object (if any) will be
- * consumed.
- */
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- unsigned old_width, stride;
+ unsigned old_width;
uint32_t addr;
int ret;
@@ -8424,30 +8425,11 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
goto finish;
}
- /* Check for which cursor types we support */
- if (!cursor_size_ok(dev, width, height)) {
- DRM_DEBUG("Cursor dimension not supported\n");
- return -EINVAL;
- }
-
- stride = roundup_pow_of_two(width) * 4;
- if (obj->base.size < stride * height) {
- DRM_DEBUG_KMS("buffer is too small\n");
- ret = -ENOMEM;
- goto fail;
- }
-
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical) {
unsigned alignment;
- if (obj->tiling_mode) {
- DRM_DEBUG_KMS("cursor cannot be tiled\n");
- ret = -EINVAL;
- goto fail_locked;
- }
-
/*
* Global gtt pte registers are special registers which actually
* forward writes to a chunk of system memory. Which means that
@@ -8514,17 +8496,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
if (old_width != width)
intel_update_watermarks(crtc);
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
- }
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+ }
return 0;
fail_unpin:
i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
-fail:
- drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
@@ -8559,7 +8539,7 @@ __intel_framebuffer_create(struct drm_device *dev,
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_unreference(&obj->base);
return ERR_PTR(-ENOMEM);
}
@@ -8569,7 +8549,7 @@ __intel_framebuffer_create(struct drm_device *dev,
return &intel_fb->base;
err:
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_unreference(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
@@ -8702,6 +8682,9 @@ retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
+ ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
@@ -8739,6 +8722,9 @@ retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
+ ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
+ if (ret)
+ goto fail_unlock;
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
@@ -9021,35 +9007,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_increase_pllclock(struct drm_device *dev,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_reg = DPLL(pipe);
- int dpll;
-
- if (!HAS_GMCH_DISPLAY(dev))
- return;
-
- if (!dev_priv->lvds_downclock_avail)
- return;
-
- dpll = I915_READ(dpll_reg);
- if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
- DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
- assert_panel_unlocked(dev_priv, pipe);
-
- dpll &= ~DISPLAY_RATE_SELECT_FPA1;
- I915_WRITE(dpll_reg, dpll);
- intel_wait_for_vblank(dev, pipe);
-
- dpll = I915_READ(dpll_reg);
- if (dpll & DISPLAY_RATE_SELECT_FPA1)
- DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
- }
-}
-
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -9125,199 +9082,16 @@ out:
intel_runtime_pm_put(dev_priv);
}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
- unsigned frontbuffer_bits,
- struct intel_engine_cs *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
-
- if (!i915.powersave)
- return;
-
- for_each_pipe(dev_priv, pipe) {
- if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
- continue;
-
- intel_increase_pllclock(dev, pipe);
- if (ring && intel_fbc_enabled(dev))
- ring->fbc_dirty = true;
- }
-}
-
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (!obj->frontbuffer_bits)
- return;
-
- if (ring) {
- mutex_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits
- |= obj->frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits
- &= ~obj->frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
- }
-
- intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
- intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called every time rendering on the given planes has
- * completed and frontbuffer caching can be started again. Flushes will get
- * delayed if they're blocked by some oustanding asynchronous rendering.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* Delay flushing when rings are still busy.*/
- mutex_lock(&dev_priv->fb_tracking.lock);
- frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-
- intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
-
- intel_edp_psr_flush(dev, frontbuffer_bits);
-
- /*
- * FIXME: Unconditional fbc flushing here is a rather gross hack and
- * needs to be reworked into a proper frontbuffer tracking scheme like
- * psr employs.
- */
- if (IS_BROADWELL(dev))
- gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
-}
-
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned frontbuffer_bits;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (!obj->frontbuffer_bits)
- return;
-
- frontbuffer_bits = obj->frontbuffer_bits;
-
- if (retire) {
- mutex_lock(&dev_priv->fb_tracking.lock);
- /* Filter out new bits since rendering started. */
- frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
- }
-
- intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.flip_bits
- |= frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->fb_tracking.lock);
- /* Mask any cancelled flips. */
- frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
-
- intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct intel_unpin_work *work;
- unsigned long flags;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
work = intel_crtc->unpin_work;
intel_crtc->unpin_work = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
if (work) {
cancel_work_sync(&work->work);
@@ -9363,6 +9137,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
if (intel_crtc == NULL)
return;
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ */
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
@@ -9448,7 +9226,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
- /* NB: An MMIO update of the plane base pointer will also
+
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ *
+ * NB: An MMIO update of the plane base pointer will also
* generate a page-flip completion irq, i.e. every modeset
* is also accompanied by a spurious intel_prepare_page_flip().
*/
@@ -9738,115 +9521,128 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
+ bool atomic_update;
+ u32 start_vbl_count;
u32 dspcntr;
u32 reg;
intel_mark_page_flip_active(intel_crtc);
+ atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
- if (INTEL_INFO(dev)->gen >= 4) {
- if (obj->tiling_mode != I915_TILING_NONE)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- }
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSURF(intel_crtc->plane),
intel_crtc->unpin_work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
+
+ if (atomic_update)
+ intel_pipe_update_end(intel_crtc, start_vbl_count);
}
-static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+static void intel_mmio_flip_work_func(struct work_struct *work)
{
+ struct intel_crtc *intel_crtc =
+ container_of(work, struct intel_crtc, mmio_flip.work);
struct intel_engine_cs *ring;
- int ret;
+ uint32_t seqno;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ seqno = intel_crtc->mmio_flip.seqno;
+ ring = intel_crtc->mmio_flip.ring;
- if (!obj->last_write_seqno)
- return 0;
+ if (seqno)
+ WARN_ON(__i915_wait_seqno(ring, seqno,
+ intel_crtc->reset_counter,
+ false, NULL, NULL) != 0);
- ring = obj->ring;
-
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- obj->last_write_seqno))
- return 0;
-
- ret = i915_gem_check_olr(ring, obj->last_write_seqno);
- if (ret)
- return ret;
-
- if (WARN_ON(!ring->irq_get(ring)))
- return 0;
-
- return 1;
+ intel_do_mmio_flip(intel_crtc);
}
-void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
- struct intel_crtc *intel_crtc;
- unsigned long irq_flags;
- u32 seqno;
-
- seqno = ring->get_seqno(ring, false);
-
- spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
- for_each_intel_crtc(ring->dev, intel_crtc) {
- struct intel_mmio_flip *mmio_flip;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- mmio_flip = &intel_crtc->mmio_flip;
- if (mmio_flip->seqno == 0)
- continue;
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring = obj->ring;
- if (ring->id != mmio_flip->ring_id)
- continue;
+ schedule_work(&intel_crtc->mmio_flip.work);
- if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
- intel_do_mmio_flip(intel_crtc);
- mmio_flip->seqno = 0;
- ring->irq_put(ring);
- }
- }
- spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+ return 0;
}
-static int intel_queue_mmio_flip(struct drm_device *dev,
+static int intel_gen9_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long irq_flags;
+ uint32_t plane = 0, stride;
int ret;
- if (WARN_ON(intel_crtc->mmio_flip.seqno))
- return -EBUSY;
+ switch(intel_crtc->pipe) {
+ case PIPE_A:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
+ break;
+ case PIPE_B:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
+ break;
+ case PIPE_C:
+ plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ return -ENODEV;
+ }
- ret = intel_postpone_flip(obj);
- if (ret < 0)
- return ret;
- if (ret == 0) {
- intel_do_mmio_flip(intel_crtc);
- return 0;
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ WARN_ONCE(1, "unknown tiling in flip command\n");
+ return -ENODEV;
}
- spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
- intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
- intel_crtc->mmio_flip.ring_id = obj->ring->id;
- spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ intel_ring_emit(ring, 0);
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
+ intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
+ intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+
+ intel_mark_page_flip_active(intel_crtc);
+ __intel_ring_advance(ring);
- /*
- * Double check to catch cases where irq fired before
- * mmio flip data was ready
- */
- intel_notify_mmio_flip(obj->ring);
return 0;
}
@@ -9905,18 +9701,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long flags;
+
+ WARN_ON(!in_irq());
if (crtc == NULL)
return;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock(&dev->event_lock);
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock(&dev->event_lock);
}
static int intel_crtc_page_flip(struct drm_crtc *crtc,
@@ -9932,7 +9729,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
- unsigned long flags;
int ret;
/*
@@ -9973,7 +9769,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto free_work;
/* We borrow the event spin lock for protecting unpin_work */
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (intel_crtc->unpin_work) {
/* Before declaring the flip queue wedged, check if
* the hardware completed the operation behind our backs.
@@ -9983,7 +9779,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
page_flip_completed(intel_crtc);
} else {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
kfree(work);
@@ -9991,7 +9787,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
}
}
intel_crtc->unpin_work = work;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
@@ -10029,7 +9825,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ring = &dev_priv->ring[RCS];
}
- ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
if (ret)
goto cleanup_pending;
@@ -10078,9 +9874,9 @@ cleanup_pending:
mutex_unlock(&dev->struct_mutex);
cleanup:
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
free_work:
@@ -10091,9 +9887,9 @@ out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event) {
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
}
}
return ret;
@@ -10289,6 +10085,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dp_m2_n2.link_n,
pipe_config->dp_m2_n2.tu);
+ DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
+ pipe_config->has_audio,
+ pipe_config->has_infoframe);
+
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10350,6 +10150,48 @@ static bool check_encoder_cloning(struct intel_crtc *crtc)
return true;
}
+static bool check_digital_port_conflicts(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ unsigned int used_ports = 0;
+
+ /*
+ * Walk the connector list instead of the encoder
+ * list to detect the problem on ddi platforms
+ * where there's just one encoder per digital port.
+ */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, base.head) {
+ struct intel_encoder *encoder = connector->new_encoder;
+
+ if (!encoder)
+ continue;
+
+ WARN_ON(!encoder->new_crtc);
+
+ switch (encoder->type) {
+ unsigned int port_mask;
+ case INTEL_OUTPUT_UNKNOWN:
+ if (WARN_ON(!HAS_DDI(dev)))
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_EDP:
+ port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
+
+ /* the same port mustn't appear more than once */
+ if (used_ports & port_mask)
+ return false;
+
+ used_ports |= port_mask;
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
static struct intel_crtc_config *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -10366,6 +10208,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
return ERR_PTR(-EINVAL);
}
+ if (!check_digital_port_conflicts(dev)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ return ERR_PTR(-EINVAL);
+ }
+
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config)
return ERR_PTR(-ENOMEM);
@@ -10571,10 +10418,13 @@ static bool intel_crtc_in_use(struct drm_crtc *crtc)
static void
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc;
struct drm_connector *connector;
+ intel_shared_dpll_commit(dev_priv);
+
for_each_intel_encoder(dev, intel_encoder) {
if (!intel_encoder->base.crtc)
continue;
@@ -10754,6 +10604,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
IS_VALLEYVIEW(dev))
PIPE_CONF_CHECK_I(limited_color_range);
+ PIPE_CONF_CHECK_I(has_infoframe);
PIPE_CONF_CHECK_I(has_audio);
@@ -10810,6 +10661,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10827,6 +10681,56 @@ intel_pipe_config_compare(struct drm_device *dev,
return true;
}
+static void check_wm_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation hw_ddb, *sw_ddb;
+ struct intel_crtc *intel_crtc;
+ int plane;
+
+ if (INTEL_INFO(dev)->gen < 9)
+ return;
+
+ skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+ sw_ddb = &dev_priv->wm.skl_hw.ddb;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ struct skl_ddb_entry *hw_entry, *sw_entry;
+ const enum pipe pipe = intel_crtc->pipe;
+
+ if (!intel_crtc->active)
+ continue;
+
+ /* planes */
+ for_each_plane(pipe, plane) {
+ hw_entry = &hw_ddb.plane[pipe][plane];
+ sw_entry = &sw_ddb->plane[pipe][plane];
+
+ if (skl_ddb_entry_equal(hw_entry, sw_entry))
+ continue;
+
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
+
+ /* cursor */
+ hw_entry = &hw_ddb.cursor[pipe];
+ sw_entry = &sw_ddb->cursor[pipe];
+
+ if (skl_ddb_entry_equal(hw_entry, sw_entry))
+ continue;
+
+ DRM_ERROR("mismatch in DDB state pipe %c cursor "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
+}
+
static void
check_connector_state(struct drm_device *dev)
{
@@ -10993,9 +10897,9 @@ check_shared_dpll_state(struct drm_device *dev)
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
- WARN(pll->active > pll->refcount,
+ WARN(pll->active > hweight32(pll->config.crtc_mask),
"more active pll users than references: %i vs %i\n",
- pll->active, pll->refcount);
+ pll->active, hweight32(pll->config.crtc_mask));
WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
WARN(pll->on && !pll->active,
@@ -11013,11 +10917,11 @@ check_shared_dpll_state(struct drm_device *dev)
WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
- WARN(pll->refcount != enabled_crtcs,
+ WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
- pll->refcount, enabled_crtcs);
+ hweight32(pll->config.crtc_mask), enabled_crtcs);
- WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
+ WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
@@ -11026,6 +10930,7 @@ check_shared_dpll_state(struct drm_device *dev)
void
intel_modeset_check_state(struct drm_device *dev)
{
+ check_wm_state(dev);
check_connector_state(dev);
check_encoder_state(dev);
check_crtc_state(dev);
@@ -11076,50 +10981,67 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev) &&
- intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
}
+static struct intel_crtc_config *
+intel_modeset_compute_config(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb,
+ unsigned *modeset_pipes,
+ unsigned *prepare_pipes,
+ unsigned *disable_pipes)
+{
+ struct intel_crtc_config *pipe_config = NULL;
+
+ intel_modeset_affected_pipes(crtc, modeset_pipes,
+ prepare_pipes, disable_pipes);
+
+ if ((*modeset_pipes) == 0)
+ goto out;
+
+ /*
+ * Note this needs changes when we start tracking multiple modes
+ * and crtcs. At that point we'll need to compute the whole config
+ * (i.e. one pipe_config for each crtc) rather than just the one
+ * for this crtc.
+ */
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+ if (IS_ERR(pipe_config)) {
+ goto out;
+ }
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
+
+out:
+ return pipe_config;
+}
+
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+ int x, int y, struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config,
+ unsigned modeset_pipes,
+ unsigned prepare_pipes,
+ unsigned disable_pipes)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
- struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
- unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
- intel_modeset_affected_pipes(crtc, &modeset_pipes,
- &prepare_pipes, &disable_pipes);
-
*saved_mode = crtc->mode;
- /* Hack: Because we don't (yet) support global modeset on multiple
- * crtcs, we don't keep track of the new mode for more than one crtc.
- * Hence simply check whether any bit is set in modeset_pipes in all the
- * pieces of code that are not yet converted to deal with mutliple crtcs
- * changing their mode at the same time. */
- if (modeset_pipes) {
- pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- pipe_config = NULL;
-
- goto out;
- }
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
+ if (modeset_pipes)
to_intel_crtc(crtc)->new_config = pipe_config;
- }
/*
* See if the config requires any additional preparation, e.g.
@@ -11135,6 +11057,22 @@ static int __intel_set_mode(struct drm_crtc *crtc,
prepare_pipes &= ~disable_pipes;
}
+ if (dev_priv->display.crtc_compute_clock) {
+ unsigned clear_pipes = modeset_pipes | disable_pipes;
+
+ ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
+ if (ret)
+ goto done;
+
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = dev_priv->display.crtc_compute_clock(intel_crtc);
+ if (ret) {
+ intel_shared_dpll_abort_config(dev_priv);
+ goto done;
+ }
+ }
+ }
+
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
@@ -11145,6 +11083,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
* to set it here already despite that we pass it down the callchain.
+ *
+ * Note we'll need to fix this up when we start tracking multiple
+ * pipes; here we assume a single modeset_pipe and only track the
+ * single crtc and mode.
*/
if (modeset_pipes) {
crtc->mode = *mode;
@@ -11166,8 +11108,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
- if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(dev);
+ modeset_update_crtc_power_domains(dev);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
@@ -11178,9 +11119,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev,
- obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
@@ -11195,11 +11134,6 @@ static int __intel_set_mode(struct drm_crtc *crtc,
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
-
- ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
- x, y, fb);
- if (ret)
- goto done;
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -11214,19 +11148,23 @@ done:
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
-out:
kfree(pipe_config);
kfree(saved_mode);
return ret;
}
-static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+static int intel_set_mode_pipes(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config,
+ unsigned modeset_pipes,
+ unsigned prepare_pipes,
+ unsigned disable_pipes)
{
int ret;
- ret = __intel_set_mode(crtc, mode, x, y, fb);
+ ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
+ prepare_pipes, disable_pipes);
if (ret == 0)
intel_modeset_check_state(crtc->dev);
@@ -11234,6 +11172,26 @@ static int intel_set_mode(struct drm_crtc *crtc,
return ret;
}
+static int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct intel_crtc_config *pipe_config;
+ unsigned modeset_pipes, prepare_pipes, disable_pipes;
+
+ pipe_config = intel_modeset_compute_config(crtc, mode, fb,
+ &modeset_pipes,
+ &prepare_pipes,
+ &disable_pipes);
+
+ if (IS_ERR(pipe_config))
+ return PTR_ERR(pipe_config);
+
+ return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
+}
+
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
@@ -11562,6 +11520,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
+ struct intel_crtc_config *pipe_config;
+ unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
BUG_ON(!set);
@@ -11607,9 +11567,38 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
if (ret)
goto fail;
+ pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
+ set->fb,
+ &modeset_pipes,
+ &prepare_pipes,
+ &disable_pipes);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto fail;
+ } else if (pipe_config) {
+ if (pipe_config->has_audio !=
+ to_intel_crtc(set->crtc)->config.has_audio)
+ config->mode_changed = true;
+
+ /*
+ * Note we have an issue here with infoframes: current code
+ * only updates them on the full mode set path per hw
+ * requirements. So here we should be checking for any
+ * required changes and forcing a mode set.
+ */
+ }
+
+ /* set_mode will free it in the mode_changed case */
+ if (!config->mode_changed)
+ kfree(pipe_config);
+
+ intel_update_pipe_size(to_intel_crtc(set->crtc));
+
if (config->mode_changed) {
- ret = intel_set_mode(set->crtc, set->mode,
- set->x, set->y, set->fb);
+ ret = intel_set_mode_pipes(set->crtc, set->mode,
+ set->x, set->y, set->fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
} else if (config->fb_changed) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
@@ -11679,7 +11668,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
{
uint32_t val;
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(PCH_DPLL(pll->id));
@@ -11693,8 +11682,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
- I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+ I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
}
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
@@ -11703,7 +11692,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pll->id));
@@ -11714,7 +11703,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -11813,161 +11802,195 @@ disable_unpin:
}
static int
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *dest = &state->dst;
+ struct drm_rect *src = &state->src;
+ const struct drm_rect *clip = &state->clip;
+
+ return drm_plane_helper_check_update(plane, crtc, fb,
+ src, dest, clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &state->visible);
+}
+
+static int
+intel_prepare_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
- struct drm_rect dest = {
- /* integer pixels */
- .x1 = crtc_x,
- .y1 = crtc_y,
- .x2 = crtc_x + crtc_w,
- .y2 = crtc_y + crtc_h,
- };
- struct drm_rect src = {
- /* 16.16 fixed point */
- .x1 = src_x,
- .y1 = src_y,
- .x2 = src_x + src_w,
- .y2 = src_y + src_h,
- };
- const struct drm_rect clip = {
- /* integer pixels */
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- const struct {
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
- } orig = {
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- };
- struct intel_plane *intel_plane = to_intel_plane(plane);
- bool visible;
int ret;
- ret = drm_plane_helper_check_update(plane, crtc, fb,
- &src, &dest, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &visible);
+ intel_crtc_wait_for_pending_flips(crtc);
- if (ret)
- return ret;
+ if (intel_crtc_has_pending_flip(crtc)) {
+ DRM_ERROR("pipe is still busy with an old pageflip\n");
+ return -EBUSY;
+ }
- /*
- * If the CRTC isn't enabled, we're just pinning the framebuffer,
- * updating the fb pointer, and returning without touching the
- * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
- * turn on the display with all planes setup as desired.
- */
- if (!crtc->enabled) {
+ if (old_obj != obj) {
mutex_lock(&dev->struct_mutex);
-
- /*
- * If we already called setplane while the crtc was disabled,
- * we may have an fb pinned; unpin it.
- */
- if (plane->fb)
- intel_unpin_fb_obj(old_obj);
-
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
- /* Pin and return without programming hardware */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ if (ret != 0) {
+ DRM_DEBUG_KMS("pin & fence failed\n");
+ return ret;
+ }
}
- intel_crtc_wait_for_pending_flips(crtc);
+ return 0;
+}
- /*
- * If clipping results in a non-visible primary plane, we'll disable
- * the primary plane. Note that this is a bit different than what
- * happens if userspace explicitly disables the plane by passing fb=0
- * because plane->fb still gets set and pinned.
- */
- if (!visible) {
- mutex_lock(&dev->struct_mutex);
+static void
+intel_commit_primary_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *old_fb = plane->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_rect *src = &state->src;
+ crtc->primary->fb = fb;
+ crtc->x = src->x1 >> 16;
+ crtc->y = src->y1 >> 16;
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
+ intel_plane->obj = obj;
+
+ if (intel_crtc->active) {
/*
- * Try to pin the new fb first so that we can bail out if we
- * fail.
+ * FBC does not work on some platforms for rotated
+ * planes, so disable it when rotation is not 0 and
+ * update it when rotation is set back to 0.
+ *
+ * FIXME: This is redundant with the fbc update done in
+ * the primary plane enable function except that that
+ * one is done too late. We eventually need to unify
+ * this.
*/
- if (plane->fb != fb) {
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (intel_crtc->primary_enabled &&
+ INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+ dev_priv->fbc.plane == intel_crtc->plane &&
+ intel_plane->rotation != BIT(DRM_ROTATE_0)) {
+ intel_disable_fbc(dev);
}
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
- if (intel_crtc->primary_enabled)
- intel_disable_primary_hw_plane(plane, crtc);
+ if (state->visible) {
+ bool was_enabled = intel_crtc->primary_enabled;
+ /* FIXME: kill this fastboot hack */
+ intel_update_pipe_size(intel_crtc);
- if (plane->fb != fb)
- if (plane->fb)
- intel_unpin_fb_obj(old_obj);
+ intel_crtc->primary_enabled = true;
- mutex_unlock(&dev->struct_mutex);
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
- } else {
- if (intel_crtc && intel_crtc->active &&
- intel_crtc->primary_enabled) {
/*
- * FBC does not work on some platforms for rotated
- * planes, so disable it when rotation is not 0 and
- * update it when rotation is set back to 0.
- *
- * FIXME: This is redundant with the fbc update done in
- * the primary plane enable function except that that
- * one is done too late. We eventually need to unify
- * this.
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
*/
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.plane == intel_crtc->plane &&
- intel_plane->rotation != BIT(DRM_ROTATE_0)) {
- intel_disable_fbc(dev);
- }
+ if (IS_BROADWELL(dev) && !was_enabled)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ } else {
+ /*
+ * If clipping results in a non-visible primary plane,
+ * we'll disable the primary plane. Note that this is
+ * a bit different than what happens if userspace
+ * explicitly disables the plane by passing fb=0
+ * because plane->fb still gets set and pinned.
+ */
+ intel_disable_primary_hw_plane(plane, crtc);
}
- ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
- if (ret)
- return ret;
- if (!intel_crtc->primary_enabled)
- intel_enable_primary_hw_plane(plane, crtc);
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
}
- intel_plane->crtc_x = orig.crtc_x;
- intel_plane->crtc_y = orig.crtc_y;
- intel_plane->crtc_w = orig.crtc_w;
- intel_plane->crtc_h = orig.crtc_h;
- intel_plane->src_x = orig.src_x;
- intel_plane->src_y = orig.src_y;
- intel_plane->src_w = orig.src_w;
- intel_plane->src_h = orig.src_h;
- intel_plane->obj = obj;
+ if (old_fb && old_fb != fb) {
+ if (intel_crtc->active)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(old_obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_plane_state state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int ret;
+
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_primary_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ ret = intel_prepare_primary_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ intel_commit_primary_plane(plane, &state);
return 0;
}
@@ -12046,51 +12069,92 @@ intel_cursor_plane_disable(struct drm_plane *plane)
}
static int
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_cursor_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct drm_rect dest = {
- /* integer pixels */
- .x1 = crtc_x,
- .y1 = crtc_y,
- .x2 = crtc_x + crtc_w,
- .y2 = crtc_y + crtc_h,
- };
- struct drm_rect src = {
- /* 16.16 fixed point */
- .x1 = src_x,
- .y1 = src_y,
- .x2 = src_x + src_w,
- .y2 = src_y + src_h,
- };
- const struct drm_rect clip = {
- /* integer pixels */
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- bool visible;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *dest = &state->dst;
+ struct drm_rect *src = &state->src;
+ const struct drm_rect *clip = &state->clip;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ int crtc_w, crtc_h;
+ unsigned stride;
int ret;
ret = drm_plane_helper_check_update(plane, crtc, fb,
- &src, &dest, &clip,
+ src, dest, clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
- true, true, &visible);
+ true, true, &state->visible);
if (ret)
return ret;
- crtc->cursor_x = crtc_x;
- crtc->cursor_y = crtc_y;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!obj)
+ return 0;
+
+ /* Check for which cursor types we support */
+ crtc_w = drm_rect_width(&state->orig_dst);
+ crtc_h = drm_rect_height(&state->orig_dst);
+ if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
+ DRM_DEBUG("Cursor dimension not supported\n");
+ return -EINVAL;
+ }
+
+ stride = roundup_pow_of_two(crtc_w) * 4;
+ if (obj->base.size < stride * crtc_h) {
+ DRM_DEBUG_KMS("buffer is too small\n");
+ return -ENOMEM;
+ }
+
+ if (fb == crtc->cursor->fb)
+ return 0;
+
+ /* we only need to pin inside GTT if cursor is non-phy */
+ mutex_lock(&dev->struct_mutex);
+ if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
+ DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static int
+intel_commit_cursor_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ int crtc_w, crtc_h;
+
+ crtc->cursor_x = state->orig_dst.x1;
+ crtc->cursor_y = state->orig_dst.y1;
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
+ intel_plane->obj = obj;
+
if (fb != crtc->cursor->fb) {
+ crtc_w = drm_rect_width(&state->orig_dst);
+ crtc_h = drm_rect_height(&state->orig_dst);
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
} else {
- intel_crtc_update_cursor(crtc, visible);
+ intel_crtc_update_cursor(crtc, state->visible);
intel_frontbuffer_flip(crtc->dev,
INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
@@ -12098,10 +12162,53 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
return 0;
}
}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane_state state;
+ int ret;
+
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_cursor_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ return intel_commit_cursor_plane(plane, &state);
+}
+
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_cursor_plane_update,
.disable_plane = intel_cursor_plane_disable,
.destroy = intel_plane_destroy,
+ .set_property = intel_plane_set_property,
};
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -12117,12 +12224,26 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
+ cursor->rotation = BIT(DRM_ROTATE_0);
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (!dev->mode_config.rotation_property)
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_180));
+ if (dev->mode_config.rotation_property)
+ drm_object_attach_property(&cursor->base.base,
+ dev->mode_config.rotation_property,
+ cursor->rotation);
+ }
+
return &cursor->base;
}
@@ -12178,6 +12299,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+ INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
+
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -12198,7 +12321,7 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- if (!encoder)
+ if (!encoder || WARN_ON(!encoder->crtc))
return INVALID_PIPE;
return to_intel_crtc(encoder->crtc)->pipe;
@@ -12286,7 +12409,10 @@ static bool intel_crt_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_ULT(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ return false;
+
+ if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
return false;
if (IS_CHERRYVIEW(dev))
@@ -12430,7 +12556,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- intel_edp_psr_init(dev);
+ intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
@@ -12634,16 +12760,22 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
- dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock =
+ haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
+ if (INTEL_INFO(dev)->gen >= 9)
+ dev_priv->display.update_primary_plane =
+ skylake_update_primary_plane;
+ else
+ dev_priv->display.update_primary_plane =
+ ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
- dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock =
+ ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
@@ -12652,7 +12784,7 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
- dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
@@ -12661,7 +12793,7 @@ static void intel_init_display(struct drm_device *dev)
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
- dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
@@ -12698,31 +12830,20 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- if (IS_G4X(dev)) {
- dev_priv->display.write_eld = g4x_write_eld;
- } else if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- dev_priv->display.modeset_global_resources =
- snb_modeset_global_resources;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = haswell_write_eld;
- dev_priv->display.modeset_global_resources =
- haswell_modeset_global_resources;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
- dev_priv->display.write_eld = ironlake_write_eld;
}
/* Default just returns -ENODEV to indicate unsupported */
@@ -12749,6 +12870,9 @@ static void intel_init_display(struct drm_device *dev)
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
+ case 9:
+ dev_priv->display.queue_flip = intel_gen9_queue_flip;
+ break;
}
intel_panel_init_backlight_funcs(dev);
@@ -12953,11 +13077,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
intel_enable_gt_powersave(dev);
}
-void intel_modeset_suspend_hw(struct drm_device *dev)
-{
- intel_suspend_hw(dev);
-}
-
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -12983,6 +13102,7 @@ void intel_modeset_init(struct drm_device *dev)
return;
intel_init_display(dev);
+ intel_init_audio(dev);
if (IS_GEN2(dev)) {
dev->mode_config.max_width = 2048;
@@ -13293,7 +13413,7 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
i915_redisable_vga_power_on(dev);
@@ -13337,18 +13457,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
+ pll->on = pll->get_hw_state(dev_priv, pll,
+ &pll->config.hw_state);
pll->active = 0;
+ pll->config.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
- if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+ if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
pll->active++;
+ pll->config.crtc_mask |= 1 << crtc->pipe;
+ }
}
- pll->refcount = pll->active;
- DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
- pll->name, pll->refcount, pll->on);
+ DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
+ pll->name, pll->config.crtc_mask, pll->on);
- if (pll->refcount)
+ if (pll->config.crtc_mask)
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
@@ -13438,7 +13561,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
pll->on = false;
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_GEN9(dev))
+ skl_wm_get_hw_state(dev);
+ else if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
if (force_restore) {
@@ -13452,8 +13577,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
- __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
- crtc->primary->fb);
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ crtc->primary->fb);
}
} else {
intel_modeset_update_staged_output_state(dev);
@@ -13464,6 +13589,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
@@ -13471,6 +13597,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE);
+
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
@@ -13486,7 +13622,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
if (obj == NULL)
continue;
- if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
+ if (intel_pin_and_fence_fb_obj(c->primary,
+ c->primary->fb,
+ NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -13494,6 +13632,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
}
}
mutex_unlock(&dev->struct_mutex);
+
+ intel_backlight_register(dev);
}
void intel_connector_unregister(struct intel_connector *intel_connector)
@@ -13509,14 +13649,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
+ intel_disable_gt_powersave(dev);
+
+ intel_backlight_unregister(dev);
+
/*
* Interrupts and polling as the first thing to avoid creating havoc.
- * Too much stuff here (turning of rps, connectors, ...) would
+ * Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
- drm_irq_uninstall(dev);
- intel_hpd_cancel_work(dev_priv);
- dev_priv->pm._irqs_disabled = true;
+ intel_irq_uninstall(dev_priv);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -13530,8 +13672,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
- intel_disable_gt_powersave(dev);
-
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
@@ -13671,8 +13811,8 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
- intel_display_power_enabled_unlocked(dev_priv,
- POWER_DOMAIN_PIPE(i));
+ __intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
@@ -13707,7 +13847,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
- intel_display_power_enabled_unlocked(dev_priv,
+ __intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
@@ -13791,9 +13931,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
for_each_intel_crtc(dev, crtc) {
struct intel_unpin_work *work;
- unsigned long irqflags;
- spin_lock_irqsave(&dev->event_lock, irqflags);
+ spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work;
@@ -13803,6 +13942,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
work->event = NULL;
}
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock_irq(&dev->event_lock);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4bcd91757321..5cecc20efa71 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -113,6 +113,9 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
+static void vlv_steal_power_sequencer(struct drm_device *dev,
+ enum pipe pipe);
int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
@@ -224,8 +227,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static uint32_t
-pack_aux(uint8_t *src, int src_bytes)
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
{
int i;
uint32_t v = 0;
@@ -237,8 +239,7 @@ pack_aux(uint8_t *src, int src_bytes)
return v;
}
-static void
-unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@@ -283,12 +284,10 @@ intel_hrawclk(struct drm_device *dev)
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out);
+ struct intel_dp *intel_dp);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out);
+ struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
{
@@ -322,6 +321,66 @@ static void pps_unlock(struct intel_dp *intel_dp)
intel_display_power_put(dev_priv, power_domain);
}
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_dp->pps_pipe;
+ bool pll_enabled;
+ uint32_t DP;
+
+ if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+ pipe_name(pipe), port_name(intel_dig_port->port)))
+ return;
+
+ DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
+ pipe_name(pipe), port_name(intel_dig_port->port));
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ DP |= DP_PORT_WIDTH(1);
+ DP |= DP_LINK_TRAIN_PAT_1;
+
+ if (IS_CHERRYVIEW(dev))
+ DP |= DP_PIPE_SELECT_CHV(pipe);
+ else if (pipe == PIPE_B)
+ DP |= DP_PIPEB_SELECT;
+
+ pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+ /*
+ * The DPLL for the pipe must be enabled for this to work.
+ * So enable temporarily it if it's not already enabled.
+ */
+ if (!pll_enabled)
+ vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+
+ /*
+ * Similar magic as in intel_dp_enable_port().
+ * We _must_ do this port enable + disable trick
+ * to make this power seqeuencer lock onto the port.
+ * Otherwise even VDD force bit won't work.
+ */
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
+
+ I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+
+ if (!pll_enabled)
+ vlv_force_pll_off(dev, pipe);
+}
+
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
@@ -330,10 +389,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
- struct edp_power_seq power_seq;
+ enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
+ /* We should never land here with regular DP ports */
+ WARN_ON(!is_edp(intel_dp));
+
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
@@ -359,18 +421,26 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
* are two power sequencers and up to two eDP ports.
*/
if (WARN_ON(pipes == 0))
- return PIPE_A;
+ pipe = PIPE_A;
+ else
+ pipe = ffs(pipes) - 1;
- intel_dp->pps_pipe = ffs(pipes) - 1;
+ vlv_steal_power_sequencer(dev, pipe);
+ intel_dp->pps_pipe = pipe;
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe),
port_name(intel_dig_port->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+ /*
+ * Even vdd force doesn't work until we've made
+ * the power sequencer lock in on the port.
+ */
+ vlv_power_sequencer_kick(intel_dp);
return intel_dp->pps_pipe;
}
@@ -425,7 +495,6 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq power_seq;
enum port port = intel_dig_port->port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -453,9 +522,8 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
port_name(port), pipe_name(intel_dp->pps_pipe));
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -550,6 +618,10 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (IS_VALLEYVIEW(dev) &&
+ intel_dp->pps_pipe == INVALID_PIPE)
+ return false;
+
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
@@ -560,6 +632,10 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (IS_VALLEYVIEW(dev) &&
+ intel_dp->pps_pipe == INVALID_PIPE)
+ return false;
+
return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
@@ -661,6 +737,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 100;
}
+static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ /*
+ * SKL doesn't need us to program the AUX clock divider (Hardware will
+ * derive the clock from CDCLK automatically). We still implement the
+ * get_aux_clock_divider vfunc to plug-in into the existing code.
+ */
+ return index ? 0 : 1;
+}
+
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
bool has_aux_irq,
int send_bytes,
@@ -691,9 +777,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
+static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t unused)
+{
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_1600us |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+}
+
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
- uint8_t *send, int send_bytes,
+ const uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -760,7 +861,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
- pack_aux(send + i, send_bytes - i));
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, send_ctl);
@@ -814,8 +916,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- unpack_aux(I915_READ(ch_data + i),
- recv + i, recv_bytes - i);
+ intel_dp_unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
ret = recv_bytes;
out:
@@ -925,7 +1027,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
BUG();
}
- if (!HAS_DDI(dev))
+ /*
+ * The AUX_CTL register is usually DP_CTL + 0x10.
+ *
+ * On Haswell and Broadwell though:
+ * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
+ * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
+ *
+ * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
+ */
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
intel_dp->aux.name = name;
@@ -963,6 +1074,33 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
}
static void
+skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
+{
+ u32 ctrl1;
+
+ pipe_config->ddi_pll_sel = SKL_DPLL0;
+ pipe_config->dpll_hw_state.cfgcr1 = 0;
+ pipe_config->dpll_hw_state.cfgcr2 = 0;
+
+ ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
+ SKL_DPLL0);
+ break;
+ case DP_LINK_BW_2_7:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
+ SKL_DPLL0);
+ break;
+ case DP_LINK_BW_5_4:
+ ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
+ SKL_DPLL0);
+ break;
+ }
+ pipe_config->dpll_hw_state.ctrl1 = ctrl1;
+}
+
+static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
switch (link_bw) {
@@ -1139,7 +1277,9 @@ found:
&pipe_config->dp_m2_n2);
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_SKYLAKE(dev) && is_edp(intel_dp))
+ skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1212,12 +1352,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
- if (crtc->config.has_audio) {
- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
- pipe_name(crtc->pipe));
+ if (crtc->config.has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_write_eld(&encoder->base, adjusted_mode);
- }
/* Split out the IBX/CPU vs CPT settings */
@@ -1367,6 +1503,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return false;
+ cancel_delayed_work(&intel_dp->panel_vdd_work);
intel_dp->want_panel_vdd = true;
if (edp_have_panel_vdd(intel_dp))
@@ -1375,7 +1512,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- DRM_DEBUG_KMS("Turning eDP VDD on\n");
+ DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
+ port_name(intel_dig_port->port));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -1394,7 +1532,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP was not running\n");
+ DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
+ port_name(intel_dig_port->port));
msleep(intel_dp->panel_power_up_delay);
}
@@ -1419,7 +1558,8 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
- WARN(!vdd, "eDP VDD already requested on\n");
+ WARN(!vdd, "eDP port %c VDD already requested on\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -1440,7 +1580,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning eDP VDD off\n");
+ DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
+ port_name(intel_dig_port->port));
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -1501,7 +1642,8 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp))
return;
- WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
+ WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
+ port_name(dp_to_dig_port(intel_dp)->port));
intel_dp->want_panel_vdd = false;
@@ -1511,40 +1653,25 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
edp_panel_vdd_schedule_off(intel_dp);
}
-/*
- * Must be paired with intel_edp_panel_vdd_on().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
- if (!is_edp(intel_dp))
- return;
-
- pps_lock(intel_dp);
- edp_panel_vdd_off(intel_dp, sync);
- pps_unlock(intel_dp);
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
+static void edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_ctrl_reg;
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP power on\n");
+ DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
- pps_lock(intel_dp);
-
- if (edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP power already on\n");
- goto out;
- }
+ if (WARN(edp_have_panel_power(intel_dp),
+ "eDP port %c panel power already on\n",
+ port_name(dp_to_dig_port(intel_dp)->port)))
+ return;
wait_panel_power_cycle(intel_dp);
@@ -1572,12 +1699,20 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
}
+}
+
+void intel_edp_panel_on(struct intel_dp *intel_dp)
+{
+ if (!is_edp(intel_dp))
+ return;
- out:
+ pps_lock(intel_dp);
+ edp_panel_on(intel_dp);
pps_unlock(intel_dp);
}
-void intel_edp_panel_off(struct intel_dp *intel_dp)
+
+static void edp_panel_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1587,14 +1722,16 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
u32 pp;
u32 pp_ctrl_reg;
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
if (!is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP power off\n");
-
- pps_lock(intel_dp);
+ DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
- WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+ WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
+ port_name(dp_to_dig_port(intel_dp)->port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -1615,7 +1752,15 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
/* We got a reference when we enabled the VDD. */
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
+}
+
+void intel_edp_panel_off(struct intel_dp *intel_dp)
+{
+ if (!is_edp(intel_dp))
+ return;
+ pps_lock(intel_dp);
+ edp_panel_off(intel_dp);
pps_unlock(intel_dp);
}
@@ -1819,7 +1964,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(intel_dp->output_reg);
@@ -1951,368 +2096,14 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct intel_dp *intel_dp)
-{
- return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
-}
-
-static bool intel_edp_is_psr_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PSR(dev))
- return false;
-
- return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
-}
-
-static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
- struct edp_vsc_psr *vsc_psr)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
- u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
- uint32_t *data = (uint32_t *) vsc_psr;
- unsigned int i;
-
- /* As per BSPec (Pipe Video Data Island Packet), we need to disable
- the video DIP being updated before program video DIP data buffer
- registers for DIP being updated. */
- I915_WRITE(ctl_reg, 0);
- POSTING_READ(ctl_reg);
-
- for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
- if (i < sizeof(struct edp_vsc_psr))
- I915_WRITE(data_reg + i, *data++);
- else
- I915_WRITE(data_reg + i, 0);
- }
-
- I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
- POSTING_READ(ctl_reg);
-}
-
-static void intel_edp_psr_setup(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_vsc_psr psr_vsc;
-
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
-
- /* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-}
-
-static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t aux_clock_divider;
- int precharge = 0x3;
- int msg_size = 5; /* Header(4) + Message(1) */
- bool only_standby = false;
-
- aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
-
- /* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-
- /* Setup AUX registers */
- I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
- I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
- I915_WRITE(EDP_PSR_AUX_CTL(dev),
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
-}
-
-static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t max_sleep_time = 0x1f;
- uint32_t idle_frames = 1;
- uint32_t val = 0x0;
- const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- bool only_standby = false;
-
- if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
- only_standby = true;
-
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
- val |= EDP_PSR_LINK_STANDBY;
- val |= EDP_PSR_TP2_TP3_TIME_0us;
- val |= EDP_PSR_TP1_TIME_0us;
- val |= EDP_PSR_SKIP_AUX_EXIT;
- val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
- } else
- val |= EDP_PSR_LINK_DISABLE;
-
- I915_WRITE(EDP_PSR_CTL(dev), val |
- (IS_BROADWELL(dev) ? 0 : link_entry_time) |
- max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
- idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
- EDP_PSR_ENABLE);
-}
-
-static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- lockdep_assert_held(&dev_priv->psr.lock);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- dev_priv->psr.source_ok = false;
-
- if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
- DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
- return false;
- }
-
- if (!i915.enable_psr) {
- DRM_DEBUG_KMS("PSR disable by flag\n");
- return false;
- }
-
- /* Below limitations aren't valid for Broadwell */
- if (IS_BROADWELL(dev))
- goto out;
-
- if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
- S3D_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
- return false;
- }
-
- if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
- return false;
- }
-
- out:
- dev_priv->psr.source_ok = true;
- return true;
-}
-
-static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
- WARN_ON(dev_priv->psr.active);
- lockdep_assert_held(&dev_priv->psr.lock);
-
- /* Enable PSR on the panel */
- intel_edp_psr_enable_sink(intel_dp);
-
- /* Enable PSR on the host */
- intel_edp_psr_enable_source(intel_dp);
-
- dev_priv->psr.active = true;
-}
-
-void intel_edp_psr_enable(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PSR(dev)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
- return;
- }
-
- if (!is_edp_psr(intel_dp)) {
- DRM_DEBUG_KMS("PSR not supported by this panel\n");
- return;
- }
-
- mutex_lock(&dev_priv->psr.lock);
- if (dev_priv->psr.enabled) {
- DRM_DEBUG_KMS("PSR already in use\n");
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- dev_priv->psr.busy_frontbuffer_bits = 0;
-
- /* Setup PSR once */
- intel_edp_psr_setup(intel_dp);
-
- if (intel_edp_psr_match_conditions(intel_dp))
- dev_priv->psr.enabled = intel_dp;
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_disable(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- if (dev_priv->psr.active) {
- I915_WRITE(EDP_PSR_CTL(dev),
- I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
-
- /* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
-
- dev_priv->psr.active = false;
- } else {
- WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
- }
-
- dev_priv->psr.enabled = NULL;
- mutex_unlock(&dev_priv->psr.lock);
-
- cancel_delayed_work_sync(&dev_priv->psr.work);
-}
-
-static void intel_edp_psr_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
-
- mutex_lock(&dev_priv->psr.lock);
- intel_dp = dev_priv->psr.enabled;
-
- if (!intel_dp)
- goto unlock;
-
- /*
- * The delayed work can race with an invalidate hence we need to
- * recheck. Since psr_flush first clears this and then reschedules we
- * won't ever miss a flush when bailing out here.
- */
- if (dev_priv->psr.busy_frontbuffer_bits)
- goto unlock;
-
- intel_edp_psr_do_enable(intel_dp);
-unlock:
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-static void intel_edp_psr_do_exit(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->psr.active) {
- u32 val = I915_READ(EDP_PSR_CTL(dev));
-
- WARN_ON(!(val & EDP_PSR_ENABLE));
-
- I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
-
- dev_priv->psr.active = false;
- }
-
-}
-
-void intel_edp_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- intel_edp_psr_do_exit(dev);
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
- dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
- dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
- /*
- * On Haswell sprite plane updates don't result in a psr invalidating
- * signal in the hardware. Which means we need to manually fake this in
- * software for all flushes, not just when we've seen a preceding
- * invalidation through frontbuffer rendering.
- */
- if (IS_HASWELL(dev) &&
- (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
- intel_edp_psr_do_exit(dev);
-
- if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-void intel_edp_psr_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
- mutex_init(&dev_priv->psr.lock);
-}
-
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->config.has_audio)
+ intel_audio_codec_disable(encoder);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -2467,14 +2258,23 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_dp->DP |= DP_PORT_EN;
-
/* enable with pattern 1 (as per spec) */
_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_1);
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
+
+ /*
+ * Magic for VLV/CHV. We _must_ first set up the register
+ * without actually enabling the port, and then do another
+ * write to enable the port. Otherwise link training will
+ * fail when the power sequencer is freshly used for this port.
+ */
+ intel_dp->DP |= DP_PORT_EN;
+
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_encoder *encoder)
@@ -2482,19 +2282,38 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
+ pps_lock(intel_dp);
+
+ if (IS_VALLEYVIEW(dev))
+ vlv_init_panel_power_sequencer(intel_dp);
+
intel_dp_enable_port(intel_dp);
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_on(intel_dp);
- intel_edp_panel_vdd_off(intel_dp, true);
+
+ edp_panel_vdd_on(intel_dp);
+ edp_panel_on(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
+
+ pps_unlock(intel_dp);
+
+ if (IS_VALLEYVIEW(dev))
+ vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
+
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
+
+ if (crtc->config.has_audio) {
+ DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ intel_audio_codec_enable(encoder);
+ }
}
static void g4x_enable_dp(struct intel_encoder *encoder)
@@ -2526,6 +2345,32 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
}
}
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+ enum pipe pipe = intel_dp->pps_pipe;
+ int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+
+ edp_panel_vdd_off_sync(intel_dp);
+
+ /*
+ * VLV seems to get confused when multiple power seqeuencers
+ * have the same port selected (even if only one has power/vdd
+ * enabled). The failure manifests as vlv_wait_port_ready() failing
+ * CHV on the other hand doesn't seem to mind having the same port
+ * selected in multiple power seqeuencers, but let's clear the
+ * port select always when logically disconnecting a power sequencer
+ * from a port.
+ */
+ DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
+ pipe_name(pipe), port_name(intel_dig_port->port));
+ I915_WRITE(pp_on_reg, 0);
+ POSTING_READ(pp_on_reg);
+
+ intel_dp->pps_pipe = INVALID_PIPE;
+}
+
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe)
{
@@ -2534,6 +2379,9 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
struct intel_dp *intel_dp;
@@ -2551,10 +2399,12 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
- /* make sure vdd is off before we steal it */
- edp_panel_vdd_off_sync(intel_dp);
+ WARN(encoder->connectors_active,
+ "stealing pipe %c power sequencer from active eDP port %c\n",
+ pipe_name(pipe), port_name(port));
- intel_dp->pps_pipe = INVALID_PIPE;
+ /* make sure vdd is off before we steal it */
+ vlv_detach_power_sequencer(intel_dp);
}
}
@@ -2565,10 +2415,12 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct edp_power_seq power_seq;
lockdep_assert_held(&dev_priv->pps_mutex);
+ if (!is_edp(intel_dp))
+ return;
+
if (intel_dp->pps_pipe == crtc->pipe)
return;
@@ -2578,7 +2430,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
* we still have control of it.
*/
if (intel_dp->pps_pipe != INVALID_PIPE)
- edp_panel_vdd_off_sync(intel_dp);
+ vlv_detach_power_sequencer(intel_dp);
/*
* We may be stealing the power
@@ -2593,9 +2445,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
@@ -2624,15 +2475,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- if (is_edp(intel_dp)) {
- pps_lock(intel_dp);
- vlv_init_panel_power_sequencer(intel_dp);
- pps_unlock(intel_dp);
- }
-
intel_enable_dp(encoder);
-
- vlv_wait_port_ready(dev_priv, dport);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2680,6 +2523,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
mutex_lock(&dev_priv->dpio_lock);
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -2715,15 +2567,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- if (is_edp(intel_dp)) {
- pps_lock(intel_dp);
- vlv_init_panel_power_sequencer(intel_dp);
- pps_unlock(intel_dp);
- }
-
intel_enable_dp(encoder);
-
- vlv_wait_port_ready(dev_priv, dport);
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2843,7 +2687,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_VALLEYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2859,7 +2705,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ default:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
+ }
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3095,12 +2952,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
/* Program swing deemph */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
@@ -3341,7 +3212,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
@@ -3605,7 +3476,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
- intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3763,8 +3633,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- intel_edp_panel_vdd_on(intel_dp);
-
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
@@ -3772,8 +3640,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
-
- intel_edp_panel_vdd_off(intel_dp, false);
}
static bool
@@ -3787,7 +3653,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
- intel_edp_panel_vdd_on(intel_dp);
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n");
@@ -3797,7 +3662,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
intel_dp->is_mst = false;
}
}
- intel_edp_panel_vdd_off(intel_dp, false);
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
return intel_dp->is_mst;
@@ -3809,26 +3673,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
- u8 buf[1];
+ u8 buf;
+ int test_crc_count;
+ int attempts = 6;
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
- if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+ if (!(buf & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+ return -EIO;
+
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- DP_TEST_SINK_START) < 0)
+ buf | DP_TEST_SINK_START) < 0)
+ return -EIO;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
+ test_crc_count = buf & DP_TEST_COUNT_MASK;
- /* Wait 2 vblanks to be sure we will have the correct CRC value */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ do {
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_TEST_SINK_MISC, &buf) < 0)
+ return -EIO;
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
+
+ if (attempts == 0) {
+ DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
+ return -ETIMEDOUT;
+ }
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
return -EIO;
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+ return -EIO;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+ buf & ~DP_TEST_SINK_START) < 0)
+ return -EIO;
+
return 0;
}
@@ -4456,9 +4342,52 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
pps_unlock(intel_dp);
}
+static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ power_domain = intel_display_port_power_domain(&intel_dig_port->base);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
- intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
+ struct intel_dp *intel_dp;
+
+ if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
+ return;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ pps_lock(intel_dp);
+
+ /*
+ * Read out the current power sequencer assignment,
+ * in case the BIOS did something with it.
+ */
+ if (IS_VALLEYVIEW(encoder->dev))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ intel_edp_panel_vdd_sanitize(intel_dp);
+
+ pps_unlock(intel_dp);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -4645,16 +4574,20 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *out)
+ struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq cur, vbt, spec, final;
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div, pp;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
+
if (HAS_PCH_SPLIT(dev)) {
pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4716,7 +4649,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
-#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
spec.field : \
max(cur.field, vbt.field))
assign_final(t1_t3);
@@ -4726,7 +4659,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
assign_final(t11_t12);
#undef assign_final
-#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
intel_dp->panel_power_up_delay = get_delay(t1_t3);
intel_dp->backlight_on_delay = get_delay(t8);
intel_dp->backlight_off_delay = get_delay(t9);
@@ -4740,21 +4673,18 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-
- if (out)
- *out = final;
}
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
- struct edp_power_seq *seq)
+ struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
int pp_on_reg, pp_off_reg, pp_div_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
+ const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4837,7 +4767,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
*/
- if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
+ if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
@@ -4940,40 +4870,8 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
return downclock_mode;
}
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
-{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp *intel_dp;
- enum intel_display_power_domain power_domain;
-
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- return;
-
- intel_dp = enc_to_intel_dp(&intel_encoder->base);
-
- pps_lock(intel_dp);
-
- if (!edp_have_panel_vdd(intel_dp))
- goto out;
- /*
- * The VDD bit needs a power domain reference, so if the bit is
- * already enabled when we boot or resume, grab this reference and
- * schedule a vdd off, so we don't hold on to the reference
- * indefinitely.
- */
- DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
-
- edp_panel_vdd_schedule_off(intel_dp);
- out:
- pps_unlock(intel_dp);
-}
-
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector,
- struct edp_power_seq *power_seq)
+ struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -4985,18 +4883,19 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
+ enum pipe pipe = INVALID_PIPE;
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
if (!is_edp(intel_dp))
return true;
- intel_edp_panel_vdd_sanitize(intel_encoder);
+ pps_lock(intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ pps_unlock(intel_dp);
/* Cache DPCD and EDID for edp. */
- intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
- intel_edp_panel_vdd_off(intel_dp, false);
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -5011,7 +4910,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
/* We now know it's not a ghost, init power sequence regs. */
pps_lock(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
pps_unlock(intel_dp);
mutex_lock(&dev->mode_config.mutex);
@@ -5053,11 +4952,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (IS_VALLEYVIEW(dev)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
+
+ /*
+ * Figure out the current pipe for the initial backlight setup.
+ * If the current pipe isn't valid, try the PPS pipe, and if that
+ * fails just assume pipe A.
+ */
+ if (IS_CHERRYVIEW(dev))
+ pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
+ else
+ pipe = PORT_TO_PIPE(intel_dp->DP);
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = intel_dp->pps_pipe;
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = PIPE_A;
+
+ DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
+ pipe_name(pipe));
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight_power = intel_edp_backlight_power;
- intel_panel_setup_backlight(connector);
+ intel_panel_setup_backlight(connector, pipe);
return true;
}
@@ -5072,13 +4990,14 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
- struct edp_power_seq power_seq = { 0 };
int type;
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (IS_VALLEYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_VALLEYVIEW(dev))
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5087,7 +5006,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
- intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+ if (INTEL_INFO(dev)->gen >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -5106,6 +5028,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (type == DRM_MODE_CONNECTOR_eDP)
intel_encoder->type = INTEL_OUTPUT_EDP;
+ /* eDP only on port B and/or C on vlv/chv */
+ if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
+ return false;
+
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
port_name(port));
@@ -5148,13 +5075,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (is_edp(intel_dp)) {
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev)) {
+ intel_dp_init_panel_power_timestamps(intel_dp);
+ if (IS_VALLEYVIEW(dev))
vlv_initial_power_sequencer_setup(intel_dp);
- } else {
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_init_panel_power_sequencer(dev, intel_dp,
- &power_seq);
- }
+ else
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
pps_unlock(intel_dp);
}
@@ -5168,7 +5093,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
}
- if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
+ if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index d9a7a7865f66..7f8c6a66680a 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -278,20 +278,12 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_mst_port_dp_detect(struct drm_connector *connector)
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
-}
-
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- enum drm_connector_status status;
- status = intel_mst_port_dp_detect(connector);
- return status;
+ return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
}
static int
@@ -393,7 +385,7 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
#endif
}
-static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -422,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_dp_add_properties(intel_dp, connector);
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ba715229a540..25fdbb16d4e0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_rect.h>
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
@@ -93,18 +94,20 @@
/* these are outputs from the chip - integrated only
external chips are via DVO or SDVO output */
-#define INTEL_OUTPUT_UNUSED 0
-#define INTEL_OUTPUT_ANALOG 1
-#define INTEL_OUTPUT_DVO 2
-#define INTEL_OUTPUT_SDVO 3
-#define INTEL_OUTPUT_LVDS 4
-#define INTEL_OUTPUT_TVOUT 5
-#define INTEL_OUTPUT_HDMI 6
-#define INTEL_OUTPUT_DISPLAYPORT 7
-#define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_DSI 9
-#define INTEL_OUTPUT_UNKNOWN 10
-#define INTEL_OUTPUT_DP_MST 11
+enum intel_output_type {
+ INTEL_OUTPUT_UNUSED = 0,
+ INTEL_OUTPUT_ANALOG = 1,
+ INTEL_OUTPUT_DVO = 2,
+ INTEL_OUTPUT_SDVO = 3,
+ INTEL_OUTPUT_LVDS = 4,
+ INTEL_OUTPUT_TVOUT = 5,
+ INTEL_OUTPUT_HDMI = 6,
+ INTEL_OUTPUT_DISPLAYPORT = 7,
+ INTEL_OUTPUT_EDP = 8,
+ INTEL_OUTPUT_DSI = 9,
+ INTEL_OUTPUT_UNKNOWN = 10,
+ INTEL_OUTPUT_DP_MST = 11,
+};
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -135,7 +138,7 @@ struct intel_encoder {
*/
struct intel_crtc *new_crtc;
- int type;
+ enum intel_output_type type;
unsigned int cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
@@ -240,6 +243,17 @@ typedef struct dpll {
int p;
} intel_clock_t;
+struct intel_plane_state {
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_rect src;
+ struct drm_rect dst;
+ struct drm_rect clip;
+ struct drm_rect orig_src;
+ struct drm_rect orig_dst;
+ bool visible;
+};
+
struct intel_plane_config {
bool tiled;
int size;
@@ -278,6 +292,9 @@ struct intel_crtc_config {
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
+ /* Are we sending infoframes on the attached port */
+ bool has_infoframe;
+
/* CPU Transcoder for the pipe. Currently this can only differ from the
* pipe on Haswell (where we have a special eDP transcoder). */
enum transcoder cpu_transcoder;
@@ -326,7 +343,10 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
- /* PORT_CLK_SEL for DDI ports. */
+ /*
+ * - PORT_CLK_SEL for DDI ports on HSW/BDW.
+ * - enum skl_dpll on SKL
+ */
uint32_t ddi_pll_sel;
/* Actual register state of the dpll, for shared dpll cross-checking. */
@@ -387,7 +407,14 @@ struct intel_pipe_wm {
struct intel_mmio_flip {
u32 seqno;
- u32 ring_id;
+ struct intel_engine_cs *ring;
+ struct work_struct work;
+};
+
+struct skl_pipe_wm {
+ struct skl_wm_level wm[8];
+ struct skl_wm_level trans_wm;
+ uint32_t linetime;
};
struct intel_crtc {
@@ -437,6 +464,8 @@ struct intel_crtc {
struct {
/* watermarks currently being used */
struct intel_pipe_wm active;
+ /* SKL wm values currently in use */
+ struct skl_pipe_wm skl_active;
} wm;
int scanline_offset;
@@ -529,6 +558,7 @@ struct intel_hdmi {
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode);
+ bool (*infoframe_enabled)(struct drm_encoder *encoder);
};
struct intel_dp_mst_encoder;
@@ -578,6 +608,7 @@ struct intel_dp {
* this port. Only relevant on VLV/CHV.
*/
enum pipe pps_pipe;
+ struct edp_power_seq pps_delays;
bool use_tps3;
bool can_mst; /* this port supports mst */
@@ -734,32 +765,47 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+/*
+ * Returns the number of planes for this pipe, ie the number of sprites + 1
+ * (primary plane). This doesn't count the cursor plane then.
+ */
+static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
+{
+ return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
+}
-/* i915_irq.c */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+/* intel_fifo_underrun.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder);
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
+
+/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+void gen6_reset_rps_interrupts(struct drm_device *dev);
+void gen6_enable_rps_interrupts(struct drm_device *dev);
+void gen6_disable_rps_interrupts(struct drm_device *dev);
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
/*
* We only use drm_irq_uninstall() at unload and VT switch, so
* this is the only thing we need to check.
*/
- return !dev_priv->pm._irqs_disabled;
+ return dev_priv->pm.irqs_enabled;
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void i9xx_check_fifo_underruns(struct drm_device *dev);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
/* intel_crt.c */
@@ -792,11 +838,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-/* intel_display.c */
-const char *intel_output_name(int output);
-bool intel_has_pending_fb_unpin(struct drm_device *dev);
-int intel_pch_rawclk(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
+/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
@@ -806,7 +848,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
/**
- * intel_frontbuffer_flip - prepare frontbuffer flip
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
@@ -824,6 +866,18 @@ void intel_frontbuffer_flip(struct drm_device *dev,
}
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+
+
+/* intel_audio.c */
+void intel_init_audio(struct drm_device *dev);
+void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_disable(struct intel_encoder *encoder);
+
+/* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
@@ -844,7 +898,12 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
-void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline void
+intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ drm_wait_one_vblank(dev, pipe);
+}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport);
@@ -854,8 +913,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
-int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
+int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
@@ -877,7 +936,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
void intel_put_shared_dpll(struct intel_crtc *crtc);
+void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+
/* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -889,13 +954,12 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-void intel_write_eld(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
-void intel_display_handle_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_device *dev);
+void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -908,7 +972,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
-void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
@@ -936,25 +999,18 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_edp_psr_enable(struct intel_dp *intel_dp);
-void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-void intel_edp_psr_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_edp_psr_flush(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_edp_psr_init(struct drm_device *dev);
-
-int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_bw(struct intel_dp *intel_dp);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
+void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
+
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1044,7 +1100,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector);
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1054,6 +1110,41 @@ extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
+void intel_backlight_register(struct drm_device *dev);
+void intel_backlight_unregister(struct drm_device *dev);
+
+
+/* intel_psr.c */
+bool intel_psr_is_enabled(struct drm_device *dev);
+void intel_psr_enable(struct intel_dp *intel_dp);
+void intel_psr_disable(struct intel_dp *intel_dp);
+void intel_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_psr_init(struct drm_device *dev);
+
+/* intel_runtime_pm.c */
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_fini(struct drm_i915_private *);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
@@ -1072,17 +1163,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
-int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_remove(struct drm_i915_private *);
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
@@ -1093,14 +1173,10 @@ void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
+void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */);
/* intel_sdvo.c */
@@ -1120,7 +1196,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+bool intel_pipe_update_start(struct intel_crtc *crtc,
+ uint32_t *start_vbl_count);
+void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5bd9e09ad3c5..0b184079de14 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
/* XXX: this only works for one DSI output */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 9b584f3fbb99..850cf7d6578c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,25 +119,25 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
goto out;
}
- /* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
- if (ret) {
- DRM_ERROR("failed to pin obj: %d\n", ret);
- goto out_unref;
- }
-
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
- goto out_unpin;
+ goto out_unref;
+ }
+
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin obj: %d\n", ret);
+ goto out_fb;
}
ifbdev->fb = to_intel_framebuffer(fb);
return 0;
-out_unpin:
- i915_gem_object_ggtt_unpin(obj);
+out_fb:
+ drm_framebuffer_remove(fb);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
@@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
@@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
+ uint64_t conn_configured = 0, mask;
+ int pass = 0;
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
@@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
return false;
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
-
+ mask = (1 << fb_helper->connector_count) - 1;
+retry:
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
+ if (conn_configured & (1 << i))
+ continue;
+
+ if (pass == 0 && !connector->has_tile)
+ continue;
+
if (connector->status == connector_status_connected)
num_connectors_detected++;
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
+ conn_configured |= (1 << i);
continue;
}
@@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
+ conn_configured |= (1 << i);
continue;
}
@@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
/* try for preferred next */
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
- connector->name);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+ connector->name, connector->has_tile);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
@@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
+ conn_configured |= (1 << i);
+ }
+
+ if ((conn_configured & mask) != mask) {
+ pass++;
+ goto retry;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
new file mode 100644
index 000000000000..77af512d2d35
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: fifo underrun handling
+ *
+ * The i915 driver checks for display fifo underruns using the interrupt signals
+ * provided by the hardware. This is enabled by default and fairly useful to
+ * debug display issues, especially watermark settings.
+ *
+ * If an underrun is detected this is logged into dmesg. To avoid flooding logs
+ * and occupying the cpu underrun interrupts are disabled after the first
+ * occurrence until the next modeset on a given pipe.
+ *
+ * Note that underrun detection on gmch platforms is a bit more ugly since there
+ * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
+ * interrupt register). Also on some other platforms underrun interrupts are
+ * shared, which means that if we detect an underrun we need to disable underrun
+ * reporting on all pipes.
+ *
+ * The code also supports underrun detection on the PCH transcoder.
+ */
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->pch_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * i9xx_check_fifo_underruns - check for fifo underruns
+ * @dev_priv: i915 device instance
+ *
+ * This function checks for fifo underruns on GMCH platforms. This needs to be
+ * done manually on modeset to make sure that we catch all underruns since they
+ * do not generate an interrupt by themselves on these platforms.
+ */
+void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ for_each_intel_crtc(dev_priv->dev, crtc) {
+ u32 reg = PIPESTAT(crtc->pipe);
+ u32 pipestat;
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ continue;
+
+ pipestat = I915_READ(reg) & 0xffff0000;
+ if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ continue;
+
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+
+ DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ }
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0xffff0000;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (enable) {
+ I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ POSTING_READ(reg);
+ } else {
+ if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ }
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+ DE_PIPEB_FIFO_UNDERRUN;
+
+ if (enable)
+ ironlake_enable_display_irq(dev_priv, bit);
+ else
+ ironlake_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (enable) {
+ I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
+ if (!ivb_can_enable_err_int(dev))
+ return;
+
+ ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ } else {
+ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+ if (old &&
+ I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ DRM_ERROR("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
+ }
+ }
+}
+
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (enable)
+ dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+ else
+ dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+
+ if (enable)
+ ibx_enable_display_interrupt(dev_priv, bit);
+ else
+ ibx_disable_display_interrupt(dev_priv, bit);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (enable) {
+ I915_WRITE(SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
+ if (!cpt_can_enable_serr_int(dev))
+ return;
+
+ ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ } else {
+ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+ if (old && I915_READ(SERR_INT) &
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
+ }
+ }
+}
+
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool old;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ old = !intel_crtc->cpu_fifo_underrun_disabled;
+ intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+ if (HAS_GMCH_DISPLAY(dev))
+ i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+ else if (IS_GEN7(dev))
+ ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_GEN8(dev) || IS_GEN9(dev))
+ broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+
+ return old;
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ * @enable: whether underruns should be reported or not
+ *
+ * This function sets the fifo underrun state for @pipe. It is used in the
+ * modeset code to avoid false positives since on many platforms underruns are
+ * expected when disabling or enabling the pipe.
+ *
+ * Notice that on some platforms disabling underrun reports for one pipe
+ * disables for all due to shared interrupts. Actual reporting is still per-pipe
+ * though.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool enable)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+ enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return ret;
+}
+
+static bool
+__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return !intel_crtc->cpu_fifo_underrun_disabled;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: whether underruns should be reported or not
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool old;
+
+ /*
+ * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+ * has only one pch transcoder A that all pipes can use. To avoid racy
+ * pch transcoder -> pipe lookups from interrupt code simply store the
+ * underrun statistics in crtc A. Since we never expose this anywhere
+ * nor use it outside of the fifo underrun code here using the "wrong"
+ * crtc on LPT won't cause issues.
+ */
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ old = !intel_crtc->pch_fifo_underrun_disabled;
+ intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+ if (HAS_PCH_IBX(dev_priv->dev))
+ ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ enable);
+ else
+ cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ enable, old);
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ return old;
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ *
+ * This handles a CPU fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ /* GMCH can't disable fifo underruns, filter them. */
+ if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+ !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
+ return;
+
+ if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
+ DRM_ERROR("CPU pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ *
+ * This handles a PCH fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder)
+{
+ if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+ false))
+ DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+ transcoder_name(pch_transcoder));
+}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
new file mode 100644
index 000000000000..79f6d72179c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+/**
+ * DOC: frontbuffer tracking
+ *
+ * Many features require us to track changes to the currently active
+ * frontbuffer, especially rendering targeted at the frontbuffer.
+ *
+ * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
+ * frontbuffer slots through i915_gem_track_fb(). The function in this file are
+ * then called when the contents of the frontbuffer are invalidated, when
+ * frontbuffer rendering has stopped again to flush out all the changes and when
+ * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
+ * into the relevant places and filter for the frontbuffer slots that they are
+ * interested int.
+ *
+ * On a high level there are two types of powersaving features. The first one
+ * work like a special cache (FBC and PSR) and are interested when they should
+ * stop caching and when to restart caching. This is done by placing callbacks
+ * into the invalidate and the flush functions: At invalidate the caching must
+ * be stopped and at flush time it can be restarted. And maybe they need to know
+ * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
+ * and flush on its own) which can be achieved with placing callbacks into the
+ * flip functions.
+ *
+ * The other type of display power saving feature only cares about busyness
+ * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
+ * busyness. There is no direct way to detect idleness. Instead an idle timer
+ * work delayed work should be started from the flush and flip functions and
+ * cancelled as soon as busyness is detected.
+ *
+ * Note that there's also an older frontbuffer activity tracking scheme which
+ * just tracks general activity. This is done by the various mark_busy and
+ * mark_idle functions. For display power management features using these
+ * functions is deprecated and should be avoided.
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
+ if (!HAS_GMCH_DISPLAY(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ dpll = I915_READ(dpll_reg);
+ if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ if (dpll & DISPLAY_RATE_SELECT_FPA1)
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+ }
+}
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+
+ if (!i915.powersave)
+ return;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
+ continue;
+
+ intel_increase_pllclock(dev, pipe);
+ if (ring && intel_fbc_enabled(dev))
+ ring->fbc_dirty = true;
+ }
+}
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ if (ring) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits
+ |= obj->frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits
+ &= ~obj->frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+ intel_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some outstanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Delay flushing when rings are still busy.*/
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+ intel_psr_flush(dev, frontbuffer_bits);
+
+ /*
+ * FIXME: Unconditional fbc flushing here is a rather gross hack and
+ * needs to be reworked into a proper frontbuffer tracking scheme like
+ * psr employs.
+ */
+ if (dev_priv->fbc.need_sw_cache_clean) {
+ dev_priv->fbc.need_sw_cache_clean = false;
+ bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
+ }
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned frontbuffer_bits;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ frontbuffer_bits = obj->frontbuffer_bits;
+
+ if (retire) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+ /* Remove stale busy bits due to the old buffer. */
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Mask any cancelled flips. */
+ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 29ec1535992d..3abc2000fce9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
+static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ u32 val = I915_READ(VIDEO_DIP_CTL);
+
+ if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
+ return val & VIDEO_DIP_ENABLE;
+
+ return false;
+}
+
static void ibx_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void cpt_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void vlv_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
+static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ return val & VIDEO_DIP_ENABLE;
+}
+
static void hsw_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg);
}
+static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 val = I915_READ(ctl_reg);
+
+ return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW);
+}
+
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -661,14 +719,6 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (crtc->config.has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (crtc->config.has_audio) {
- WARN_ON(!crtc->config.has_hdmi_sink);
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(crtc->pipe));
- hdmi_val |= SDVO_AUDIO_ENABLE;
- intel_write_eld(&encoder->base, adjusted_mode);
- }
-
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (IS_CHERRYVIEW(dev))
@@ -690,7 +740,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -732,6 +782,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
+ if (intel_hdmi->infoframe_enabled(&encoder->base))
+ pipe_config->has_infoframe = true;
+
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
@@ -791,6 +844,13 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
+
+ if (intel_crtc->config.has_audio) {
+ WARN_ON(!intel_crtc->config.has_hdmi_sink);
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ intel_audio_codec_enable(encoder);
+ }
}
static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -802,9 +862,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+ if (crtc->config.has_audio)
+ intel_audio_codec_disable(encoder);
+
temp = I915_READ(intel_hdmi->hdmi_reg);
/* HW workaround for IBX, we need to move the port to transcoder A
@@ -922,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
+ if (pipe_config->has_hdmi_sink)
+ pipe_config->has_infoframe = true;
+
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (pipe_config->has_hdmi_sink &&
@@ -1394,10 +1461,13 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
@@ -1405,6 +1475,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_lock(&dev_priv->dpio_lock);
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -1441,12 +1520,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
for (i = 0; i < 4; i++) {
@@ -1499,6 +1592,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
+ intel_hdmi->set_infoframes(&encoder->base,
+ intel_crtc->config.has_hdmi_sink,
+ adjusted_mode);
+
intel_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport);
@@ -1593,18 +1690,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
+ intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
} else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
+ intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
+ intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
+ intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
intel_hdmi->set_infoframes = cpt_set_infoframes;
+ intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bafd38b5703e..e588376227ea 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -136,11 +136,10 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_ALIGN 4096
-
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
#define RING_EXECLIST0_VALID (1 << 0x4)
@@ -204,6 +203,9 @@ enum {
};
#define GEN8_CTX_ID_SHIFT 32
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
+
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
* @dev: DRM device.
@@ -219,6 +221,9 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
{
WARN_ON(i915.enable_ppgtt == -1);
+ if (INTEL_INFO(dev)->gen >= 9)
+ return 1;
+
if (enable_execlists == 0)
return 0;
@@ -275,7 +280,8 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj0,
struct drm_i915_gem_object *ctx_obj1)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
unsigned long flags;
@@ -300,13 +306,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
* Instead, we do the runtime_pm_get/put when creating/destroying requests.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (dev_priv->uncore.fw_blittercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
} else {
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
@@ -325,13 +336,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* Release Force Wakeup (see the big comment above). */
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (--dev_priv->uncore.fw_blittercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
} else {
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
@@ -341,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
}
-static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
+static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
+ struct drm_i915_gem_object *ring_obj,
+ u32 tail)
{
struct page *page;
uint32_t *reg_state;
@@ -350,43 +368,45 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
reg_state = kmap_atomic(page);
reg_state[CTX_RING_TAIL+1] = tail;
+ reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
kunmap_atomic(reg_state);
return 0;
}
-static int execlists_submit_context(struct intel_engine_cs *ring,
- struct intel_context *to0, u32 tail0,
- struct intel_context *to1, u32 tail1)
+static void execlists_submit_contexts(struct intel_engine_cs *ring,
+ struct intel_context *to0, u32 tail0,
+ struct intel_context *to1, u32 tail1)
{
- struct drm_i915_gem_object *ctx_obj0;
+ struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
struct drm_i915_gem_object *ctx_obj1 = NULL;
+ struct intel_ringbuffer *ringbuf1 = NULL;
- ctx_obj0 = to0->engine[ring->id].state;
BUG_ON(!ctx_obj0);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
+ WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
- execlists_ctx_write_tail(ctx_obj0, tail0);
+ execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
if (to1) {
+ ringbuf1 = to1->engine[ring->id].ringbuf;
ctx_obj1 = to1->engine[ring->id].state;
BUG_ON(!ctx_obj1);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
+ WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
- execlists_ctx_write_tail(ctx_obj1, tail1);
+ execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
}
execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
-
- return 0;
}
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
assert_spin_locked(&ring->execlist_lock);
@@ -403,7 +423,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
- queue_work(dev_priv->wq, &req0->work);
+ list_add_tail(&req0->execlist_link,
+ &ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
@@ -413,9 +434,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
WARN_ON(req1 && req1->elsp_submitted);
- WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
- req1 ? req1->ctx : NULL,
- req1 ? req1->tail : 0));
+ execlists_submit_contexts(ring, req0->ctx, req0->tail,
+ req1 ? req1->ctx : NULL,
+ req1 ? req1->tail : 0);
req0->elsp_submitted++;
if (req1)
@@ -425,7 +446,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ctx_submit_request *head_req;
assert_spin_locked(&ring->execlist_lock);
@@ -443,7 +463,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
- queue_work(dev_priv->wq, &head_req->work);
+ list_add_tail(&head_req->execlist_link,
+ &ring->execlist_retired_req_list);
return true;
}
}
@@ -512,22 +533,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
((u32)ring->next_context_status_buffer & 0x07) << 8);
}
-static void execlists_free_request_task(struct work_struct *work)
-{
- struct intel_ctx_submit_request *req =
- container_of(work, struct intel_ctx_submit_request, work);
- struct drm_device *dev = req->ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_runtime_pm_put(dev_priv);
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_context_unreference(req->ctx);
- mutex_unlock(&dev->struct_mutex);
-
- kfree(req);
-}
-
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail)
@@ -542,9 +547,12 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return -ENOMEM;
req->ctx = to;
i915_gem_context_reference(req->ctx);
+
+ if (to != ring->default_context)
+ intel_lr_context_pin(ring, to);
+
req->ring = ring;
req->tail = tail;
- INIT_WORK(&req->work, execlists_free_request_task);
intel_runtime_pm_get(dev_priv);
@@ -563,9 +571,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
if (to == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
- "More than 2 already-submitted reqs queued\n");
+ "More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
- queue_work(dev_priv->wq, &tail_req->work);
+ list_add_tail(&tail_req->execlist_link,
+ &ring->execlist_retired_req_list);
}
}
@@ -733,6 +742,36 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return 0;
}
+void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+{
+ struct intel_ctx_submit_request *req, *tmp;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ unsigned long flags;
+ struct list_head retired_list;
+
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (list_empty(&ring->execlist_retired_req_list))
+ return;
+
+ INIT_LIST_HEAD(&retired_list);
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ list_replace_init(&ring->execlist_retired_req_list, &retired_list);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
+ struct intel_context *ctx = req->ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ ctx->engine[ring->id].state;
+
+ if (ctx_obj && (ctx != ring->default_context))
+ intel_lr_context_unpin(ring, ctx);
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(req->ctx);
+ list_del(&req->execlist_link);
+ kfree(req);
+ }
+}
+
void intel_logical_ring_stop(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -793,9 +832,55 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
execlists_context_queue(ring, ctx, ringbuf->tail);
}
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ int ret = 0;
+
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (ctx->engine[ring->id].unpin_count++ == 0) {
+ ret = i915_gem_obj_ggtt_pin(ctx_obj,
+ GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret)
+ goto reset_unpin_count;
+
+ ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+ if (ret)
+ goto unpin_ctx_obj;
+ }
+
+ return ret;
+
+unpin_ctx_obj:
+ i915_gem_object_ggtt_unpin(ctx_obj);
+reset_unpin_count:
+ ctx->engine[ring->id].unpin_count = 0;
+
+ return ret;
+}
+
+void intel_lr_context_unpin(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+
+ if (ctx_obj) {
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (--ctx->engine[ring->id].unpin_count == 0) {
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ }
+ }
+}
+
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
+ int ret;
+
if (ring->outstanding_lazy_seqno)
return 0;
@@ -806,6 +891,14 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
if (request == NULL)
return -ENOMEM;
+ if (ctx != ring->default_context) {
+ ret = intel_lr_context_pin(ring, ctx);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+ }
+
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
@@ -991,6 +1084,44 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
return 0;
}
+static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret, i;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_workarounds *w = &dev_priv->workarounds;
+
+ if (WARN_ON(w->count == 0))
+ return 0;
+
+ ring->gpu_caches_dirty = true;
+ ret = logical_ring_flush_all_caches(ringbuf);
+ if (ret)
+ return ret;
+
+ ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
+ if (ret)
+ return ret;
+
+ intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+ for (i = 0; i < w->count; i++) {
+ intel_logical_ring_emit(ringbuf, w->reg[i].addr);
+ intel_logical_ring_emit(ringbuf, w->reg[i].value);
+ }
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+
+ intel_logical_ring_advance(ringbuf);
+
+ ring->gpu_caches_dirty = true;
+ ret = logical_ring_flush_all_caches(ringbuf);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -1034,7 +1165,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- return ret;
+ return init_workarounds_ring(ring);
}
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
@@ -1063,7 +1194,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1214,11 +1345,13 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
*/
void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv;
if (!intel_ring_initialized(ring))
return;
+ dev_priv = ring->dev->dev_private;
+
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
ring->preallocated_lazy_request = NULL;
@@ -1248,6 +1381,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
init_waitqueue_head(&ring->irq_queue);
INIT_LIST_HEAD(&ring->execlist_queue);
+ INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0;
@@ -1282,6 +1416,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
ring->init = gen8_init_render_ring;
+ ring->init_context = intel_logical_ring_workarounds_emit;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
@@ -1495,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ring_obj = ringbuf->obj;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct page *page;
uint32_t *reg_state;
@@ -1541,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
reg_state[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+ /* Ring buffer start address is not known until the buffer is pinned.
+ * It is written to the context image in execlists_update_context()
+ */
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
reg_state[CTX_RING_BUFFER_CONTROL+1] =
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
@@ -1617,12 +1753,18 @@ void intel_lr_context_free(struct intel_context *ctx)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
if (ctx_obj) {
+ struct intel_ringbuffer *ringbuf =
+ ctx->engine[i].ringbuf;
+ struct intel_engine_cs *ring = ringbuf->ring;
+
+ if (ctx == ring->default_context) {
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ }
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
}
}
@@ -1632,11 +1774,14 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
- WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
+ WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
switch (ring->id) {
case RCS:
- ret = GEN8_LR_CONTEXT_RENDER_SIZE;
+ if (INTEL_INFO(ring->dev)->gen >= 9)
+ ret = GEN9_LR_CONTEXT_RENDER_SIZE;
+ else
+ ret = GEN8_LR_CONTEXT_RENDER_SIZE;
break;
case VCS:
case BCS:
@@ -1649,6 +1794,23 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
return ret;
}
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *default_ctx_obj)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ /* The status page is offset 0 from the default context object
+ * in LRC mode. */
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
+ ring->status_page.page_addr =
+ kmap(sg_page(default_ctx_obj->pages->sgl));
+ ring->status_page.obj = default_ctx_obj;
+
+ I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+ (u32)ring->status_page.gfx_addr);
+ POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+}
+
/**
* intel_lr_context_deferred_create() - create the LRC specific bits of a context
* @ctx: LR context to create.
@@ -1660,11 +1822,12 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
* the creation is a deferred call: it's better to make sure first that we need to use
* a given ring with the context.
*
- * Return: non-zero on eror.
+ * Return: non-zero on error.
*/
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
+ const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
@@ -1684,21 +1847,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return ret;
}
- ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
- drm_gem_object_unreference(&ctx_obj->base);
- return ret;
+ if (is_global_default_ctx) {
+ ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
+ ret);
+ drm_gem_object_unreference(&ctx_obj->base);
+ return ret;
+ }
}
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
ring->name);
- i915_gem_object_ggtt_unpin(ctx_obj);
- drm_gem_object_unreference(&ctx_obj->base);
ret = -ENOMEM;
- return ret;
+ goto error_unpin_ctx;
}
ringbuf->ring = ring;
@@ -1711,46 +1875,51 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
- /* TODO: For now we put this in the mappable region so that we can reuse
- * the existing ringbuffer code which ioremaps it. When we start
- * creating many contexts, this will no longer work and we must switch
- * to a kmapish interface.
- */
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
+ if (ringbuf->obj == NULL) {
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_DEBUG_DRIVER(
+ "Failed to allocate ringbuffer obj %s: %d\n",
ring->name, ret);
- goto error;
+ goto error_free_rbuf;
+ }
+
+ if (is_global_default_ctx) {
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR(
+ "Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error_destroy_rbuf;
+ }
+ }
+
}
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
- intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
- if (ctx == ring->default_context) {
- /* The status page is offset 0 from the default context object
- * in LRC mode. */
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(ctx_obj);
- ring->status_page.page_addr =
- kmap(sg_page(ctx_obj->pages->sgl));
- if (ring->status_page.page_addr == NULL)
- return -ENOMEM;
- ring->status_page.obj = ctx_obj;
- }
+ if (ctx == ring->default_context)
+ lrc_setup_hardware_status_page(ring, ctx_obj);
if (ring->id == RCS && !ctx->rcs_initialized) {
+ if (ring->init_context) {
+ ret = ring->init_context(ring, ctx);
+ if (ret)
+ DRM_ERROR("ring init context: %d\n", ret);
+ }
+
ret = intel_lr_context_render_state_init(ring, ctx);
if (ret) {
DRM_ERROR("Init render state failed: %d\n", ret);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
- intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->rcs_initialized = true;
@@ -1759,8 +1928,15 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return 0;
error:
+ if (is_global_default_ctx)
+ intel_unpin_ringbuffer_obj(ringbuf);
+error_destroy_rbuf:
+ intel_destroy_ringbuffer_obj(ringbuf);
+error_free_rbuf:
kfree(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
+error_unpin_ctx:
+ if (is_global_default_ctx)
+ i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 33c3b4bf28c5..14b216b9be7f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -24,6 +24,8 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
+#define GEN8_LR_CONTEXT_ALIGN 4096
+
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
@@ -67,6 +69,8 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
+void intel_lr_context_unpin(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -104,11 +108,11 @@ struct intel_ctx_submit_request {
u32 tail;
struct list_head execlist_link;
- struct work_struct work;
int elsp_submitted;
};
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c0bbf2172446..14654d628ca4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_enabled(dev_priv, power_domain))
+ if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(lvds_encoder->reg);
@@ -1116,7 +1116,7 @@ out:
drm_connector_register(connector);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_panel_setup_backlight(connector);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 41b3be217493..4d63839bd9b4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -521,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return 0;
+
return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
@@ -536,15 +539,17 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 val;
- unsigned long flags;
+ struct intel_panel *panel = &connector->panel;
+ u32 val = 0;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
- val = dev_priv->display.get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
+ if (panel->backlight.enabled) {
+ val = dev_priv->display.get_backlight(connector);
+ val = intel_panel_compute_brightness(connector, val);
+ }
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
@@ -603,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
@@ -626,14 +634,12 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -643,7 +649,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -657,12 +663,17 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
- unsigned long flags;
+ /*
+ * INVALID_PIPE may occur during driver init because
+ * connection_mutex isn't held across the entire backlight
+ * setup + modeset readout, and the BIOS can issue the
+ * requests at any time.
+ */
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -678,7 +689,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
static void pch_disable_backlight(struct intel_connector *connector)
@@ -720,6 +731,9 @@ static void vlv_disable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
intel_panel_actually_set_backlight(connector, 0);
tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -731,10 +745,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = intel_get_pipe_from_connector(connector);
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
/*
@@ -748,14 +760,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
return;
}
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
dev_priv->display.disable_backlight(connector);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
static void bdw_enable_backlight(struct intel_connector *connector)
@@ -779,8 +791,9 @@ static void bdw_enable_backlight(struct intel_connector *connector)
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- /* BDW always uses the pch pwm controls. */
- pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+ /* After LPT, override is the default. */
+ if (HAS_PCH_LPT(dev_priv))
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
POSTING_READ(BLC_PWM_PCH_CTL1);
@@ -909,6 +922,9 @@ static void vlv_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2;
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
@@ -936,14 +952,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
- unsigned long flags;
- if (!panel->backlight.present || pipe == INVALID_PIPE)
+ if (!panel->backlight.present)
return;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -961,7 +976,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_unlock(&dev_priv->backlight_lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -1030,6 +1045,9 @@ static int intel_backlight_device_register(struct intel_connector *connector)
if (WARN_ON(panel->backlight.device))
return -ENODEV;
+ if (!panel->backlight.present)
+ return 0;
+
WARN_ON(panel->backlight.max == 0);
memset(&props, 0, sizeof(props));
@@ -1065,6 +1083,10 @@ static int intel_backlight_device_register(struct intel_connector *connector)
panel->backlight.device = NULL;
return -ENODEV;
}
+
+ DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
+ connector->base.name);
+
return 0;
}
@@ -1119,7 +1141,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
return scale(min, 0, 255, 0, panel->backlight.max);
}
-static int bdw_setup_backlight(struct intel_connector *connector)
+static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1145,7 +1167,7 @@ static int bdw_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int pch_setup_backlight(struct intel_connector *connector)
+static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1172,7 +1194,7 @@ static int pch_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int i9xx_setup_backlight(struct intel_connector *connector)
+static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1204,7 +1226,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int i965_setup_backlight(struct intel_connector *connector)
+static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1234,37 +1256,40 @@ static int i965_setup_backlight(struct intel_connector *connector)
return 0;
}
-static int vlv_setup_backlight(struct intel_connector *connector)
+static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
- enum pipe pipe;
+ enum pipe p;
u32 ctl, ctl2, val;
- for_each_pipe(dev_priv, pipe) {
- u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ for_each_pipe(dev_priv, p) {
+ u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
/* Skip if the modulation freq is already set */
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
continue;
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+ I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
cur_val);
}
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return -ENODEV;
+
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+ ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
return -ENODEV;
panel->backlight.min = get_backlight_min_vbt(connector);
- val = _vlv_get_backlight(dev, PIPE_A);
+ val = _vlv_get_backlight(dev, pipe);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
@@ -1273,13 +1298,12 @@ static int vlv_setup_backlight(struct intel_connector *connector)
return 0;
}
-int intel_panel_setup_backlight(struct drm_connector *connector)
+int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
- unsigned long flags;
int ret;
if (!dev_priv->vbt.backlight.present) {
@@ -1292,9 +1316,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
}
/* set level and max in panel struct */
- spin_lock_irqsave(&dev_priv->backlight_lock, flags);
- ret = dev_priv->display.setup_backlight(intel_connector);
- spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+ mutex_lock(&dev_priv->backlight_lock);
+ ret = dev_priv->display.setup_backlight(intel_connector, pipe);
+ mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@ -1302,15 +1326,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
return ret;
}
- intel_backlight_device_register(intel_connector);
-
panel->backlight.present = true;
- DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
- "sysfs interface %sregistered\n",
+ DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->name,
panel->backlight.enabled ? "enabled" : "disabled",
- panel->backlight.level, panel->backlight.max,
- panel->backlight.device ? "" : "not ");
+ panel->backlight.level, panel->backlight.max);
return 0;
}
@@ -1321,7 +1342,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
struct intel_panel *panel = &intel_connector->panel;
panel->backlight.present = false;
- intel_backlight_device_unregister(intel_connector);
}
/* Set up chip specific backlight functions */
@@ -1329,7 +1349,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
@@ -1384,3 +1404,19 @@ void intel_panel_fini(struct intel_panel *panel)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
}
+
+void intel_backlight_register(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+ intel_backlight_device_register(connector);
+}
+
+void intel_backlight_unregister(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+ intel_backlight_device_unregister(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ad2fd605f76b..1f4b56e273c8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,9 +30,6 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
-#include <linux/vgaarb.h>
-#include <drm/i915_powerwell.h>
-#include <linux/pm_runtime.h>
/**
* RC6 is a special power stage which allows the GPU to enter an very
@@ -66,11 +63,37 @@
* i915.i915_enable_fbc parameter
*/
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * WaDisableSDEUnitClockGating:skl
+ * This seems to be a pre-production w/a.
+ */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * WaDisableDgMirrorFixInHalfSliceChicken5:skl
+ * This is a pre-production w/a.
+ */
+ I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
+ I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
+ ~GEN9_DG_MIRROR_FIX_ENABLE);
+
+ /* Wa4x4STCOptimizationDisable:skl */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+}
+
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -99,6 +122,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
int i;
u32 fbc_ctl;
+ dev_priv->fbc.enabled = true;
+
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -153,6 +178,8 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
@@ -173,6 +200,8 @@ static void g4x_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -224,6 +253,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
@@ -264,6 +295,8 @@ static void ironlake_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = false;
+
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -290,6 +323,8 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
+ dev_priv->fbc.enabled = true;
+
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
@@ -339,19 +374,19 @@ bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev_priv->display.fbc_enabled)
- return false;
-
- return dev_priv->display.fbc_enabled(dev);
+ return dev_priv->fbc.enabled;
}
-void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_GEN8(dev))
return;
+ if (!intel_fbc_enabled(dev))
+ return;
+
I915_WRITE(MSG_FBC_REND_STATE, value);
}
@@ -1310,6 +1345,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
int *prec_mult,
int *drain_latency)
{
+ struct drm_device *dev = crtc->dev;
int entries;
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
@@ -1320,8 +1356,12 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
return false;
entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
- *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
- DRAIN_LATENCY_PRECISION_32;
+ if (IS_CHERRYVIEW(dev))
+ *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
+ DRAIN_LATENCY_PRECISION_16;
+ else
+ *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
+ DRAIN_LATENCY_PRECISION_32;
*drain_latency = (64 * (*prec_mult) * 4) / entries;
if (*drain_latency > DRAIN_LATENCY_MASK)
@@ -1340,15 +1380,18 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
static void vlv_update_drain_latency(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pixel_size;
int drain_latency;
enum pipe pipe = intel_crtc->pipe;
int plane_prec, prec_mult, plane_dl;
+ const int high_precision = IS_CHERRYVIEW(dev) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
- plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
- DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
+ plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
+ DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
(DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
if (!intel_crtc_active(crtc)) {
@@ -1359,9 +1402,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
/* Primary plane Drain Latency */
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_PLANE_PRECISION_64 :
- DDL_PLANE_PRECISION_32;
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_PLANE_PRECISION_HIGH :
+ DDL_PLANE_PRECISION_LOW;
plane_dl |= plane_prec | drain_latency;
}
@@ -1373,9 +1416,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
/* Program cursor DL only if it is enabled */
if (intel_crtc->cursor_base &&
vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_CURSOR_PRECISION_64 :
- DDL_CURSOR_PRECISION_32;
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_CURSOR_PRECISION_HIGH :
+ DDL_CURSOR_PRECISION_LOW;
plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
}
@@ -1543,15 +1586,17 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
int plane_prec;
int sprite_dl;
int prec_mult;
+ const int high_precision = IS_CHERRYVIEW(dev) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
- sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
+ sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
(DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
&drain_latency)) {
- plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
- DDL_SPRITE_PRECISION_64(sprite) :
- DDL_SPRITE_PRECISION_32(sprite);
+ plane_prec = (prec_mult == high_precision) ?
+ DDL_SPRITE_PRECISION_HIGH(sprite) :
+ DDL_SPRITE_PRECISION_LOW(sprite);
sprite_dl |= plane_prec |
(drain_latency << DDL_SPRITE_SHIFT(sprite));
}
@@ -1915,6 +1960,14 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
+struct skl_pipe_wm_parameters {
+ bool active;
+ uint32_t pipe_htotal;
+ uint32_t pixel_rate; /* in KHz */
+ struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
+ struct intel_plane_wm_parameters cursor;
+};
+
struct ilk_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
@@ -2226,11 +2279,82 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
PIPE_WM_LINETIME_TIME(linetime);
}
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_GEN9(dev)) {
+ uint32_t val;
+ int ret, i;
+ int level, max_level = ilk_wm_max_level(dev);
+
+ /* read the first set of memory latencies[0:3] */
+ val = 0; /* data0 to be programmed to 0 for first set */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN9_PCODE_READ_MEM_LATENCY,
+ &val);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret) {
+ DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+
+ /* read the second set of memory latencies[4:7] */
+ val = 1; /* data0 to be programmed to 1 for second set */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN9_PCODE_READ_MEM_LATENCY,
+ &val);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK;
+
+ /*
+ * punit doesn't take into account the read latency so we need
+ * to add 2us to the various latency levels we retrieve from
+ * the punit.
+ * - W0 is a bit special in that it's the only level that
+ * can't be disabled if we want to have display working, so
+ * we always add 2us there.
+ * - For levels >=1, punit returns 0us latency when they are
+ * disabled, so we respect that and don't add 2us then
+ *
+ * Additionally, if a level n (n > 1) has a 0us latency, all
+ * levels m (m >= n) need to be disabled. We make sure to
+ * sanitize the values out of the punit to satisfy this
+ * requirement.
+ */
+ wm[0] += 2;
+ for (level = 1; level <= max_level; level++)
+ if (wm[level] != 0)
+ wm[level] += 2;
+ else {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+
+ break;
+ }
+ } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2278,7 +2402,9 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_GEN9(dev))
+ return 7;
+ else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
else if (INTEL_INFO(dev)->gen >= 6)
return 3;
@@ -2288,7 +2414,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
static void intel_print_wm_latency(struct drm_device *dev,
const char *name,
- const uint16_t wm[5])
+ const uint16_t wm[8])
{
int level, max_level = ilk_wm_max_level(dev);
@@ -2301,8 +2427,13 @@ static void intel_print_wm_latency(struct drm_device *dev,
continue;
}
- /* WM1+ latency values in 0.5us units */
- if (level > 0)
+ /*
+ * - latencies are in us on gen9.
+ * - before then, WM1+ latency values are in 0.5us units
+ */
+ if (IS_GEN9(dev))
+ latency *= 10;
+ else if (level > 0)
latency *= 5;
DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
@@ -2370,6 +2501,14 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
snb_wm_latency_quirk(dev);
}
+static void skl_setup_wm_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
+ intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+}
+
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
struct ilk_pipe_wm_parameters *p)
{
@@ -2860,6 +2999,769 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
+/*
+ * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
+ * different active planes.
+ */
+
+#define SKL_DDB_SIZE 896 /* in blocks */
+
+static void
+skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+ struct drm_crtc *for_crtc,
+ const struct intel_wm_config *config,
+ const struct skl_pipe_wm_parameters *params,
+ struct skl_ddb_entry *alloc /* out */)
+{
+ struct drm_crtc *crtc;
+ unsigned int pipe_size, ddb_size;
+ int nth_active_pipe;
+
+ if (!params->active) {
+ alloc->start = 0;
+ alloc->end = 0;
+ return;
+ }
+
+ ddb_size = SKL_DDB_SIZE;
+
+ ddb_size -= 4; /* 4 blocks for bypass path allocation */
+
+ nth_active_pipe = 0;
+ for_each_crtc(dev, crtc) {
+ if (!intel_crtc_active(crtc))
+ continue;
+
+ if (crtc == for_crtc)
+ break;
+
+ nth_active_pipe++;
+ }
+
+ pipe_size = ddb_size / config->num_pipes_active;
+ alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+ alloc->end = alloc->start + pipe_size;
+}
+
+static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+{
+ if (config->num_pipes_active == 1)
+ return 32;
+
+ return 8;
+}
+
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+{
+ entry->start = reg & 0x3ff;
+ entry->end = (reg >> 16) & 0x3ff;
+ if (entry->end)
+ entry->end += 1;
+}
+
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+ int plane;
+ u32 val;
+
+ for_each_pipe(dev_priv, pipe) {
+ for_each_plane(pipe, plane) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane));
+ skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
+ val);
+ }
+
+ val = I915_READ(CUR_BUF_CFG(pipe));
+ skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
+ }
+}
+
+static unsigned int
+skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
+{
+ return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
+}
+
+/*
+ * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
+ * a 8192x4096@32bpp framebuffer:
+ * 3 * 4096 * 8192 * 4 < 2^32
+ */
+static unsigned int
+skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
+ const struct skl_pipe_wm_parameters *params)
+{
+ unsigned int total_data_rate = 0;
+ int plane;
+
+ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+ const struct intel_plane_wm_parameters *p;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ total_data_rate += skl_plane_relative_data_rate(p);
+ }
+
+ return total_data_rate;
+}
+
+static void
+skl_allocate_pipe_ddb(struct drm_crtc *crtc,
+ const struct intel_wm_config *config,
+ const struct skl_pipe_wm_parameters *params,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
+ uint16_t alloc_size, start, cursor_blocks;
+ unsigned int total_data_rate;
+ int plane;
+
+ skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
+ alloc_size = skl_ddb_entry_size(alloc);
+ if (alloc_size == 0) {
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
+ return;
+ }
+
+ cursor_blocks = skl_cursor_allocation(config);
+ ddb->cursor[pipe].start = alloc->end - cursor_blocks;
+ ddb->cursor[pipe].end = alloc->end;
+
+ alloc_size -= cursor_blocks;
+ alloc->end -= cursor_blocks;
+
+ /*
+ * Each active plane get a portion of the remaining space, in
+ * proportion to the amount of data they need to fetch from memory.
+ *
+ * FIXME: we may not allocate every single block here.
+ */
+ total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
+
+ start = alloc->start;
+ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
+ const struct intel_plane_wm_parameters *p;
+ unsigned int data_rate;
+ uint16_t plane_blocks;
+
+ p = &params->plane[plane];
+ if (!p->enabled)
+ continue;
+
+ data_rate = skl_plane_relative_data_rate(p);
+
+ /*
+ * promote the expression to 64 bits to avoid overflowing, the
+ * result is < available as data_rate / total_data_rate < 1
+ */
+ plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
+ total_data_rate);
+
+ ddb->plane[pipe][plane].start = start;
+ ddb->plane[pipe][plane].end = start + plane_blocks;
+
+ start += plane_blocks;
+ }
+
+}
+
+static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
+{
+ /* TODO: Take into account the scalers once we support them */
+ return config->adjusted_mode.crtc_clock;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and bytes_per_pixel should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+*/
+static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint32_t wm_intermediate_val, ret;
+
+ if (latency == 0)
+ return UINT_MAX;
+
+ wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
+ ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
+
+ return ret;
+}
+
+static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+ uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
+
+ if (latency == 0)
+ return UINT_MAX;
+
+ plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+ wm_intermediate_val = latency * pixel_rate;
+ ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
+ plane_bytes_per_line;
+
+ return ret;
+}
+
+static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
+ const struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ enum pipe pipe = intel_crtc->pipe;
+
+ if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
+ sizeof(new_ddb->plane[pipe])))
+ return true;
+
+ if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
+ sizeof(new_ddb->cursor[pipe])))
+ return true;
+
+ return false;
+}
+
+static void skl_compute_wm_global_parameters(struct drm_device *dev,
+ struct intel_wm_config *config)
+{
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ config->num_pipes_active += intel_crtc_active(crtc);
+
+ /* FIXME: I don't think we need those two global parameters on SKL */
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ config->sprites_enabled |= intel_plane->wm.enabled;
+ config->sprites_scaled |= intel_plane->wm.scaled;
+ }
+}
+
+static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *p)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_plane *plane;
+ int i = 1; /* Index for sprite planes start */
+
+ p->active = intel_crtc_active(crtc);
+ if (p->active) {
+ p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
+ p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
+
+ /*
+ * For now, assume primary and cursor planes are always enabled.
+ */
+ p->plane[0].enabled = true;
+ p->plane[0].bytes_per_pixel =
+ crtc->primary->fb->bits_per_pixel / 8;
+ p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
+ p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
+
+ p->cursor.enabled = true;
+ p->cursor.bytes_per_pixel = 4;
+ p->cursor.horiz_pixels = intel_crtc->cursor_width ?
+ intel_crtc->cursor_width : 64;
+ }
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ if (intel_plane->pipe == pipe)
+ p->plane[i++] = intel_plane->wm;
+ }
+}
+
+static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
+ struct intel_plane_wm_parameters *p_params,
+ uint16_t ddb_allocation,
+ uint32_t mem_value,
+ uint16_t *out_blocks, /* out */
+ uint8_t *out_lines /* out */)
+{
+ uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
+ uint32_t result_bytes;
+
+ if (mem_value == 0 || !p->active || !p_params->enabled)
+ return false;
+
+ method1 = skl_wm_method1(p->pixel_rate,
+ p_params->bytes_per_pixel,
+ mem_value);
+ method2 = skl_wm_method2(p->pixel_rate,
+ p->pipe_htotal,
+ p_params->horiz_pixels,
+ p_params->bytes_per_pixel,
+ mem_value);
+
+ plane_bytes_per_line = p_params->horiz_pixels *
+ p_params->bytes_per_pixel;
+
+ /* For now xtile and linear */
+ if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
+ result_bytes = min(method1, method2);
+ else
+ result_bytes = method1;
+
+ res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
+ res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
+
+ if (res_blocks > ddb_allocation || res_lines > 31)
+ return false;
+
+ *out_blocks = res_blocks;
+ *out_lines = res_lines;
+
+ return true;
+}
+
+static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm_parameters *p,
+ enum pipe pipe,
+ int level,
+ int num_planes,
+ struct skl_wm_level *result)
+{
+ uint16_t latency = dev_priv->wm.skl_latency[level];
+ uint16_t ddb_blocks;
+ int i;
+
+ for (i = 0; i < num_planes; i++) {
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
+
+ result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
+ ddb_blocks,
+ latency,
+ &result->plane_res_b[i],
+ &result->plane_res_l[i]);
+ }
+
+ ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
+ result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
+ latency, &result->cursor_res_b,
+ &result->cursor_res_l);
+}
+
+static uint32_t
+skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
+{
+ if (!intel_crtc_active(crtc))
+ return 0;
+
+ return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+
+}
+
+static void skl_compute_transition_wm(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *params,
+ struct skl_wm_level *trans_wm /* out */)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int i;
+
+ if (!params->active)
+ return;
+
+ /* Until we know more, just disable transition WMs */
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ trans_wm->plane_en[i] = false;
+ trans_wm->cursor_en = false;
+}
+
+static void skl_compute_pipe_wm(struct drm_crtc *crtc,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm_parameters *params,
+ struct skl_pipe_wm *pipe_wm)
+{
+ struct drm_device *dev = crtc->dev;
+ const struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int level, max_level = ilk_wm_max_level(dev);
+
+ for (level = 0; level <= max_level; level++) {
+ skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
+ level, intel_num_planes(intel_crtc),
+ &pipe_wm->wm[level]);
+ }
+ pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
+
+ skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
+}
+
+static void skl_compute_wm_results(struct drm_device *dev,
+ struct skl_pipe_wm_parameters *p,
+ struct skl_pipe_wm *p_wm,
+ struct skl_wm_values *r,
+ struct intel_crtc *intel_crtc)
+{
+ int level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t temp;
+ int i;
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = 0;
+
+ temp |= p_wm->wm[level].plane_res_l[i] <<
+ PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->wm[level].plane_res_b[i];
+ if (p_wm->wm[level].plane_en[i])
+ temp |= PLANE_WM_EN;
+
+ r->plane[pipe][i][level] = temp;
+ }
+
+ temp = 0;
+
+ temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->wm[level].cursor_res_b;
+
+ if (p_wm->wm[level].cursor_en)
+ temp |= PLANE_WM_EN;
+
+ r->cursor[pipe][level] = temp;
+
+ }
+
+ /* transition WMs */
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = 0;
+ temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->trans_wm.plane_res_b[i];
+ if (p_wm->trans_wm.plane_en[i])
+ temp |= PLANE_WM_EN;
+
+ r->plane_trans[pipe][i] = temp;
+ }
+
+ temp = 0;
+ temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
+ temp |= p_wm->trans_wm.cursor_res_b;
+ if (p_wm->trans_wm.cursor_en)
+ temp |= PLANE_WM_EN;
+
+ r->cursor_trans[pipe] = temp;
+
+ r->wm_linetime[pipe] = p_wm->linetime;
+}
+
+static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
+ const struct skl_ddb_entry *entry)
+{
+ if (entry->end)
+ I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ else
+ I915_WRITE(reg, 0);
+}
+
+static void skl_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct skl_wm_values *new)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ int i, level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (!new->dirty[pipe])
+ continue;
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ I915_WRITE(PLANE_WM(pipe, i, level),
+ new->plane[pipe][i][level]);
+ I915_WRITE(CUR_WM(pipe, level),
+ new->cursor[pipe][level]);
+ }
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ I915_WRITE(PLANE_WM_TRANS(pipe, i),
+ new->plane_trans[pipe][i]);
+ I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
+
+ for (i = 0; i < intel_num_planes(crtc); i++)
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, i),
+ &new->ddb.plane[pipe][i]);
+
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+ &new->ddb.cursor[pipe]);
+ }
+}
+
+/*
+ * When setting up a new DDB allocation arrangement, we need to correctly
+ * sequence the times at which the new allocations for the pipes are taken into
+ * account or we'll have pipes fetching from space previously allocated to
+ * another pipe.
+ *
+ * Roughly the sequence looks like:
+ * 1. re-allocate the pipe(s) with the allocation being reduced and not
+ * overlapping with a previous light-up pipe (another way to put it is:
+ * pipes with their new allocation strickly included into their old ones).
+ * 2. re-allocate the other pipes that get their allocation reduced
+ * 3. allocate the pipes having their allocation increased
+ *
+ * Steps 1. and 2. are here to take care of the following case:
+ * - Initially DDB looks like this:
+ * | B | C |
+ * - enable pipe A.
+ * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
+ * allocation
+ * | A | B | C |
+ *
+ * We need to sequence the re-allocation: C, B, A (and not B, C, A).
+ */
+
+static void
+skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int plane;
+
+ DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+
+ for_each_plane(pipe, plane) {
+ I915_WRITE(PLANE_SURF(pipe, plane),
+ I915_READ(PLANE_SURF(pipe, plane)));
+ }
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+}
+
+static bool
+skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe)
+{
+ uint16_t old_size, new_size;
+
+ old_size = skl_ddb_entry_size(&old->pipe[pipe]);
+ new_size = skl_ddb_entry_size(&new->pipe[pipe]);
+
+ return old_size != new_size &&
+ new->pipe[pipe].start >= old->pipe[pipe].start &&
+ new->pipe[pipe].end <= old->pipe[pipe].end;
+}
+
+static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
+ struct skl_wm_values *new_values)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct skl_ddb_allocation *cur_ddb, *new_ddb;
+ bool reallocated[I915_MAX_PIPES] = {false, false, false};
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ new_ddb = &new_values->ddb;
+ cur_ddb = &dev_priv->wm.skl_hw.ddb;
+
+ /*
+ * First pass: flush the pipes with the new allocation contained into
+ * the old space.
+ *
+ * We'll wait for the vblank on those pipes to ensure we can safely
+ * re-allocate the freed space without this pipe fetching from it.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
+ continue;
+
+ skl_wm_flush_pipe(dev_priv, pipe, 1);
+ intel_wait_for_vblank(dev, pipe);
+
+ reallocated[pipe] = true;
+ }
+
+
+ /*
+ * Second pass: flush the pipes that are having their allocation
+ * reduced, but overlapping with a previous allocation.
+ *
+ * Here as well we need to wait for the vblank to make sure the freed
+ * space is not used anymore.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ if (reallocated[pipe])
+ continue;
+
+ if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
+ skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
+ skl_wm_flush_pipe(dev_priv, pipe, 2);
+ intel_wait_for_vblank(dev, pipe);
+ }
+
+ reallocated[pipe] = true;
+ }
+
+ /*
+ * Third pass: flush the pipes that got more space allocated.
+ *
+ * We don't need to actively wait for the update here, next vblank
+ * will just get more DDB space with the correct WM values.
+ */
+ for_each_intel_crtc(dev, crtc) {
+ if (!crtc->active)
+ continue;
+
+ pipe = crtc->pipe;
+
+ /*
+ * At this point, only the pipes more space than before are
+ * left to re-allocate.
+ */
+ if (reallocated[pipe])
+ continue;
+
+ skl_wm_flush_pipe(dev_priv, pipe, 3);
+ }
+}
+
+static bool skl_update_pipe_wm(struct drm_crtc *crtc,
+ struct skl_pipe_wm_parameters *params,
+ struct intel_wm_config *config,
+ struct skl_ddb_allocation *ddb, /* out */
+ struct skl_pipe_wm *pipe_wm /* out */)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ skl_compute_wm_pipe_parameters(crtc, params);
+ skl_allocate_pipe_ddb(crtc, config, params, ddb);
+ skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
+
+ if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
+ return false;
+
+ intel_crtc->wm.skl_active = *pipe_wm;
+ return true;
+}
+
+static void skl_update_other_pipe_wm(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct intel_wm_config *config,
+ struct skl_wm_values *r)
+{
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc *this_crtc = to_intel_crtc(crtc);
+
+ /*
+ * If the WM update hasn't changed the allocation for this_crtc (the
+ * crtc we are currently computing the new WM values for), other
+ * enabled crtcs will keep the same allocation and we don't need to
+ * recompute anything for them.
+ */
+ if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
+ return;
+
+ /*
+ * Otherwise, because of this_crtc being freshly enabled/disabled, the
+ * other active pipes need new DDB allocation and WM values.
+ */
+ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ struct skl_pipe_wm_parameters params = {};
+ struct skl_pipe_wm pipe_wm = {};
+ bool wm_changed;
+
+ if (this_crtc->pipe == intel_crtc->pipe)
+ continue;
+
+ if (!intel_crtc->active)
+ continue;
+
+ wm_changed = skl_update_pipe_wm(&intel_crtc->base,
+ &params, config,
+ &r->ddb, &pipe_wm);
+
+ /*
+ * If we end up re-computing the other pipe WM values, it's
+ * because it was really needed, so we expect the WM values to
+ * be different.
+ */
+ WARN_ON(!wm_changed);
+
+ skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
+ r->dirty[intel_crtc->pipe] = true;
+ }
+}
+
+static void skl_update_wm(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_pipe_wm_parameters params = {};
+ struct skl_wm_values *results = &dev_priv->wm.skl_results;
+ struct skl_pipe_wm pipe_wm = {};
+ struct intel_wm_config config = {};
+
+ memset(results, 0, sizeof(*results));
+
+ skl_compute_wm_global_parameters(dev, &config);
+
+ if (!skl_update_pipe_wm(crtc, &params, &config,
+ &results->ddb, &pipe_wm))
+ return;
+
+ skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
+ results->dirty[intel_crtc->pipe] = true;
+
+ skl_update_other_pipe_wm(dev, crtc, &config, results);
+ skl_write_wm_values(dev_priv, results);
+ skl_flush_wm_values(dev_priv, results);
+
+ /* store the new configuration */
+ dev_priv->wm.skl_hw = *results;
+}
+
+static void
+skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enabled, bool scaled)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ intel_plane->wm.enabled = enabled;
+ intel_plane->wm.scaled = scaled;
+ intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.vert_pixels = sprite_height;
+ intel_plane->wm.bytes_per_pixel = pixel_size;
+
+ skl_update_wm(crtc);
+}
+
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2934,6 +3836,113 @@ ilk_update_sprite_wm(struct drm_plane *plane,
ilk_update_wm(crtc);
}
+static void skl_pipe_wm_active_state(uint32_t val,
+ struct skl_pipe_wm *active,
+ bool is_transwm,
+ bool is_cursor,
+ int i,
+ int level)
+{
+ bool is_enabled = (val & PLANE_WM_EN) != 0;
+
+ if (!is_transwm) {
+ if (!is_cursor) {
+ active->wm[level].plane_en[i] = is_enabled;
+ active->wm[level].plane_res_b[i] =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->wm[level].plane_res_l[i] =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ } else {
+ active->wm[level].cursor_en = is_enabled;
+ active->wm[level].cursor_res_b =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->wm[level].cursor_res_l =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ }
+ } else {
+ if (!is_cursor) {
+ active->trans_wm.plane_en[i] = is_enabled;
+ active->trans_wm.plane_res_b[i] =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->trans_wm.plane_res_l[i] =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ } else {
+ active->trans_wm.cursor_en = is_enabled;
+ active->trans_wm.cursor_res_b =
+ val & PLANE_WM_BLOCKS_MASK;
+ active->trans_wm.cursor_res_l =
+ (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
+ }
+ }
+}
+
+static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
+ enum pipe pipe = intel_crtc->pipe;
+ int level, i, max_level;
+ uint32_t temp;
+
+ max_level = ilk_wm_max_level(dev);
+
+ hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ hw->plane[pipe][i][level] =
+ I915_READ(PLANE_WM(pipe, i, level));
+ hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
+ }
+
+ for (i = 0; i < intel_num_planes(intel_crtc); i++)
+ hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
+ hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
+
+ if (!intel_crtc_active(crtc))
+ return;
+
+ hw->dirty[pipe] = true;
+
+ active->linetime = hw->wm_linetime[pipe];
+
+ for (level = 0; level <= max_level; level++) {
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = hw->plane[pipe][i][level];
+ skl_pipe_wm_active_state(temp, active, false,
+ false, i, level);
+ }
+ temp = hw->cursor[pipe][level];
+ skl_pipe_wm_active_state(temp, active, false, true, i, level);
+ }
+
+ for (i = 0; i < intel_num_planes(intel_crtc); i++) {
+ temp = hw->plane_trans[pipe][i];
+ skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+ }
+
+ temp = hw->cursor_trans[pipe];
+ skl_pipe_wm_active_state(temp, active, true, true, i, 0);
+}
+
+void skl_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
+ struct drm_crtc *crtc;
+
+ skl_ddb_get_hw_state(dev_priv, ddb);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ skl_pipe_wm_get_hw_state(crtc);
+}
+
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3442,7 +4451,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
dev_priv->rps.min_freq_softlimit);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
- & GENFREQSTATUS) == 0, 5))
+ & GENFREQSTATUS) == 0, 100))
DRM_ERROR("timed out waiting for Punit\n");
vlv_force_gfx_clock(dev_priv, false);
@@ -3495,14 +4504,8 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
"Odd GPU freq value\n"))
val &= ~1;
- if (val != dev_priv->rps.cur_freq) {
- DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
- dev_priv->rps.cur_freq,
- vlv_gpu_freq(dev_priv, val), val);
-
+ if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- }
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
@@ -3510,43 +4513,11 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
-static void gen8_disable_rps_interrupts(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
- I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
- ~dev_priv->pm_rps_events);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
- * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
- * gen8_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
-}
-
-static void gen6_disable_rps_interrupts(struct drm_device *dev)
+static void gen9_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
- ~dev_priv->pm_rps_events);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
+ I915_WRITE(GEN6_RC_CONTROL, 0);
}
static void gen6_disable_rps(struct drm_device *dev)
@@ -3555,11 +4526,6 @@ static void gen6_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
-
- if (IS_BROADWELL(dev))
- gen8_disable_rps_interrupts(dev);
- else
- gen6_disable_rps_interrupts(dev);
}
static void cherryview_disable_rps(struct drm_device *dev)
@@ -3567,8 +4533,6 @@ static void cherryview_disable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
-
- gen8_disable_rps_interrupts(dev);
}
static void valleyview_disable_rps(struct drm_device *dev)
@@ -3582,8 +4546,6 @@ static void valleyview_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
-
- gen6_disable_rps_interrupts(dev);
}
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -3594,10 +4556,15 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
else
mode = 0;
}
- DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ if (HAS_RC6p(dev))
+ DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+
+ else
+ DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
}
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@@ -3614,7 +4581,7 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
if (enable_rc6 >= 0) {
int mask;
- if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+ if (HAS_RC6p(dev))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
@@ -3642,54 +4609,92 @@ int intel_enable_rc6(const struct drm_device *dev)
return i915.enable_rc6;
}
-static void gen8_enable_rps_interrupts(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON(dev_priv->rps.pm_iir);
- gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void gen6_enable_rps_interrupts(struct drm_device *dev)
+static void gen6_init_rps_frequencies(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t rp_state_cap;
+ u32 ddcc_status = 0;
+ int ret;
- spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON(dev_priv->rps.pm_iir);
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
-{
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
- /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
+ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
+ dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
- /* XXX: only BYT has a special efficient freq */
- dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+ dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ ret = sandybridge_pcode_read(dev_priv,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status);
+ if (0 == ret)
+ dev_priv->rps.efficient_freq =
+ (ddcc_status >> 8) & 0xff;
+ }
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
- if (dev_priv->rps.min_freq_softlimit == 0)
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+ if (dev_priv->rps.min_freq_softlimit == 0) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ dev_priv->rps.min_freq_softlimit =
+ /* max(RPe, 450 MHz) */
+ max(dev_priv->rps.efficient_freq, (u8) 9);
+ else
+ dev_priv->rps.min_freq_softlimit =
+ dev_priv->rps.min_freq;
+ }
+}
+
+static void gen9_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ uint32_t rc6_mask = 0;
+ int unused;
+
+ /* 1a: Software RC state - RC0 */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /* 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 2a: Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ /* 2b: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_ring(ring, dev_priv, unused)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* 3a: Enable RC6 */
+ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+ "on" : "off");
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+
}
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- uint32_t rc6_mask = 0, rp_state_cap;
+ uint32_t rc6_mask = 0;
int unused;
/* 1a: Software RC state - RC0 */
@@ -3702,8 +4707,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- parse_rp_state_cap(dev_priv, rp_state_cap);
+ /* Initialize rps frequencies */
+ gen6_init_rps_frequencies(dev);
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -3761,9 +4766,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
- gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
-
- gen8_enable_rps_interrupts(dev);
+ dev_priv->rps.power = HIGH_POWER; /* force a reset */
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -3772,7 +4776,6 @@ static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- u32 rp_state_cap;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@@ -3796,9 +4799,8 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-
- parse_rp_state_cap(dev_priv, rp_state_cap);
+ /* Initialize rps frequencies */
+ gen6_init_rps_frequencies(dev);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3861,8 +4863,6 @@ static void gen6_enable_rps(struct drm_device *dev)
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- gen6_enable_rps_interrupts(dev);
-
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev) && ret) {
@@ -3915,9 +4915,9 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
- for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
+ for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
gpu_freq--) {
- int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
+ int diff = dev_priv->rps.max_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
if (INTEL_INFO(dev)->gen >= 8) {
@@ -4072,12 +5072,15 @@ static void cherryview_setup_pctx(struct drm_device *dev)
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
(gtt->stolen_size - pctx_size));
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
}
+
+ DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
}
static void valleyview_setup_pctx(struct drm_device *dev)
@@ -4103,6 +5106,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
goto out;
}
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
/*
* From the Gunit register HAS:
* The Gfx driver is expected to program this register and ensure
@@ -4121,6 +5126,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
I915_WRITE(VLV_PCBR, pctx_paddr);
out:
+ DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
}
@@ -4157,7 +5163,7 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
dev_priv->mem_freq = 1333;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -4199,7 +5205,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
mutex_lock(&dev_priv->rps.hw_lock);
- val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
+ mutex_unlock(&dev_priv->dpio_lock);
+
switch ((val >> 2) & 0x7) {
case 0:
case 1:
@@ -4223,7 +5232,7 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->mem_freq = 1600;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -4309,8 +5318,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* For now we assume BIOS is allocating and populating the PCBR */
pcbr = I915_READ(VLV_PCBR);
- DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
-
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
@@ -4340,7 +5347,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -4354,8 +5364,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen8_enable_rps_interrupts(dev);
-
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4420,7 +5428,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -4434,8 +5445,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
- gen6_enable_rps_interrupts(dev);
-
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5194,12 +6203,17 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(intel_irqs_enabled(dev_priv));
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- cancel_work_sync(&dev_priv->rps.work);
+ /*
+ * TODO: disable RPS interrupts on GEN9+ too once RPS support
+ * is added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_disable_rps_interrupts(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@@ -5209,9 +6223,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(intel_irqs_enabled(dev_priv));
-
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
@@ -5219,12 +6230,15 @@ void intel_disable_gt_powersave(struct drm_device *dev)
intel_suspend_gt_powersave(dev);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_CHERRYVIEW(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
+ gen9_disable_rps(dev);
+ else if (IS_CHERRYVIEW(dev))
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
+
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -5239,10 +6253,19 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ /*
+ * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
+ * added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_reset_rps_interrupts(dev);
+
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
+ } else if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
@@ -5251,6 +6274,10 @@ static void intel_gen6_powersave_work(struct work_struct *work)
__gen6_update_ring_freq(dev);
}
dev_priv->rps.enabled = true;
+
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_enable_rps_interrupts(dev);
+
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
@@ -5481,7 +6508,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN6_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
ilk_init_lp_watermarks(dev);
@@ -5609,16 +6636,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- /* FIXME(BDW): Check all the w/a, some might only apply to
- * pre-production hw. */
-
-
- I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
-
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
-
-
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5689,7 +6706,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5786,7 +6803,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
@@ -5899,18 +6916,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
/* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableGunitClockGating:chv (pre-production hw) */
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
- GINT_DIS);
-
- /* WaDisableFfDopClockGating:chv (pre-production hw) */
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
-
- /* WaDisableDopClockGating:chv (pre-production hw) */
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
- GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -6036,1161 +7041,35 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
-#define for_each_power_well(i, power_well, domain_mask, power_domains) \
- for (i = 0; \
- i < (power_domains)->power_well_count && \
- ((power_well) = &(power_domains)->power_wells[i]); \
- i++) \
- if ((power_well)->domains & (domain_mask))
-
-#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
- for (i = (power_domains)->power_well_count - 1; \
- i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
- i--) \
- if ((power_well)->domains & (domain_mask))
-
-/**
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return I915_READ(HSW_PWR_WELL_DRIVER) ==
- (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-}
-
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- bool is_enabled;
- int i;
-
- if (dev_priv->pm.suspended)
- return false;
-
- power_domains = &dev_priv->power_domains;
-
- is_enabled = true;
-
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- if (power_well->always_on)
- continue;
-
- if (!power_well->hw_enabled) {
- is_enabled = false;
- break;
- }
- }
-
- return is_enabled;
-}
-
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- bool ret;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
- ret = intel_display_power_enabled_unlocked(dev_priv, domain);
- mutex_unlock(&power_domains->lock);
-
- return ret;
-}
-
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-
- if (IS_BROADWELL(dev))
- gen8_irq_power_well_post_enable(dev_priv);
-}
-
-static void hsw_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- bool is_enabled, enable_requested;
- uint32_t tmp;
-
- tmp = I915_READ(HSW_PWR_WELL_DRIVER);
- is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
- enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
-
- if (enable) {
- if (!enable_requested)
- I915_WRITE(HSW_PWR_WELL_DRIVER,
- HSW_PWR_WELL_ENABLE_REQUEST);
-
- if (!is_enabled) {
- DRM_DEBUG_KMS("Enabling power well\n");
- if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- HSW_PWR_WELL_STATE_ENABLED), 20))
- DRM_ERROR("Timeout enabling power well\n");
- }
-
- hsw_power_well_post_enable(dev_priv);
- } else {
- if (enable_requested) {
- I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
- POSTING_READ(HSW_PWR_WELL_DRIVER);
- DRM_DEBUG_KMS("Requesting to disable the power well\n");
- }
- }
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
-
- /*
- * We're taking over the BIOS, so clear any requests made by it since
- * the driver is in charge now.
- */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
- I915_WRITE(HSW_PWR_WELL_BIOS, 0);
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, true);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, false);
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void intel_init_fbc(struct drm_i915_private *dev_priv)
{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return true;
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- enum punit_power_well power_well_id = power_well->data;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(power_well_id);
- state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
- PUNIT_PWRGT_PWR_GATE(power_well_id);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
- ctrl &= ~mask;
- ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int power_well_id = power_well->data;
- bool enabled = false;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(power_well_id);
- ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
- state != PUNIT_PWRGT_PWR_GATE(power_well_id));
- if (state == ctrl)
- enabled = true;
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return enabled;
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
+ if (!HAS_FBC(dev_priv)) {
+ dev_priv->fbc.enabled = false;
return;
-
- intel_hpd_init(dev_priv->dev);
-
- i915_redisable_vga_power_on(dev_priv->dev);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- vlv_set_power_well(dev_priv, power_well, false);
-
- vlv_power_sequencer_reset(dev_priv);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
- for_each_pipe(dev_priv, pipe)
- assert_pll_disabled(dev_priv, pipe);
-
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV);
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
- } else {
- phy = DPIO_PHY1;
- I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
}
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
- /* Poll for phypwrgood signal */
- if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
-
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
- PHY_COM_LANE_RESET_DEASSERT(phy));
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
-
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
+ if (INTEL_INFO(dev_priv)->gen >= 7) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = gen7_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (INTEL_INFO(dev_priv)->gen >= 5) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
} else {
- phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
- }
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
- ~PHY_COM_LANE_RESET_DEASSERT(phy));
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe = power_well->data;
- bool enabled;
- u32 state, ctrl;
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
- enabled = state == DP_SSS_PWR_ON(pipe);
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool enable)
-{
- enum pipe pipe = power_well->data;
- u32 state;
- u32 ctrl;
-
- state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
- ctrl &= ~DP_SSC_MASK(pipe);
- ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
-
-#undef COND
-
-out:
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
-
- chv_set_pipe_power_well(dev_priv, power_well, true);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->data != PIPE_A &&
- power_well->data != PIPE_B &&
- power_well->data != PIPE_C);
-
- chv_set_pipe_power_well(dev_priv, power_well, false);
-}
-
-static void check_power_well_state(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
- if (power_well->always_on || !i915.disable_power_well) {
- if (!enabled)
- goto mismatch;
-
- return;
- }
-
- if (enabled != (power_well->count > 0))
- goto mismatch;
-
- return;
-
-mismatch:
- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
- power_well->name, power_well->always_on, enabled,
- power_well->count, i915.disable_power_well);
-}
-
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
-
- intel_runtime_pm_get(dev_priv);
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
-
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++) {
- DRM_DEBUG_KMS("enabling %s\n", power_well->name);
- power_well->ops->enable(dev_priv, power_well);
- power_well->hw_enabled = true;
- }
-
- check_power_well_state(dev_priv, power_well);
+ /* This value was pulled out of someone's hat */
+ I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
- power_domains->domain_use_count[domain]++;
-
- mutex_unlock(&power_domains->lock);
-}
-
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
-
- WARN_ON(!power_domains->domain_use_count[domain]);
- power_domains->domain_use_count[domain]--;
-
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- WARN_ON(!power_well->count);
-
- if (!--power_well->count && i915.disable_power_well) {
- DRM_DEBUG_KMS("disabling %s\n", power_well->name);
- power_well->hw_enabled = false;
- power_well->ops->disable(dev_priv, power_well);
- }
-
- check_power_well_state(dev_priv, power_well);
- }
-
- mutex_unlock(&power_domains->lock);
-
- intel_runtime_pm_put(dev_priv);
-}
-
-static struct i915_power_domains *hsw_pwr;
-
-/* Display audio driver power well request */
-int i915_request_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_request_power_well);
-
-/* Display audio driver power well release */
-int i915_release_power_well(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
- return 0;
-}
-EXPORT_SYMBOL_GPL(i915_release_power_well);
-
-/*
- * Private interface for the audio driver to get CDCLK in kHz.
- *
- * Caller must request power well using i915_request_power_well() prior to
- * making the call.
- */
-int i915_get_cdclk_freq(void)
-{
- struct drm_i915_private *dev_priv;
-
- if (!hsw_pwr)
- return -ENODEV;
-
- dev_priv = container_of(hsw_pwr, struct drm_i915_private,
- power_domains);
-
- return intel_ddi_get_cdclk_freq(dev_priv);
-}
-EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
-
-
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
-#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_PLLS) | \
- BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
- HSW_ALWAYS_ON_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_A_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_B_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_C_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
- .sync_hw = i9xx_always_on_power_well_noop,
- .enable = i9xx_always_on_power_well_noop,
- .disable = i9xx_always_on_power_well_noop,
- .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = chv_pipe_power_well_sync_hw,
- .enable = chv_pipe_power_well_enable,
- .disable = chv_pipe_power_well_disable,
- .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = chv_dpio_cmn_power_well_enable,
- .disable = chv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well i9xx_always_on_power_well[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = hsw_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static struct i915_power_well hsw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = HSW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- },
-};
-
-static struct i915_power_well bdw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = BDW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_display_power_well_enable,
- .disable = vlv_display_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_dpio_cmn_power_well_enable,
- .disable = vlv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
- .enable = vlv_power_well_enable,
- .disable = vlv_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well vlv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
- .ops = &vlv_display_power_well_ops,
- },
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
- },
- {
- .name = "dpio-common",
- .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &vlv_dpio_cmn_power_well_ops,
- },
-};
-
-static struct i915_power_well chv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
- .ops = &i9xx_always_on_power_well_ops,
- },
-#if 0
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
- .ops = &vlv_display_power_well_ops,
- },
- {
- .name = "pipe-a",
- .domains = CHV_PIPE_A_POWER_DOMAINS,
- .data = PIPE_A,
- .ops = &chv_pipe_power_well_ops,
- },
- {
- .name = "pipe-b",
- .domains = CHV_PIPE_B_POWER_DOMAINS,
- .data = PIPE_B,
- .ops = &chv_pipe_power_well_ops,
- },
- {
- .name = "pipe-c",
- .domains = CHV_PIPE_C_POWER_DOMAINS,
- .data = PIPE_C,
- .ops = &chv_pipe_power_well_ops,
- },
-#endif
- {
- .name = "dpio-common-bc",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &chv_dpio_cmn_power_well_ops,
- },
- {
- .name = "dpio-common-d",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_D,
- .ops = &chv_dpio_cmn_power_well_ops,
- },
-#if 0
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
- },
- {
- .name = "dpio-tx-d-01",
- .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
- CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
- },
- {
- .name = "dpio-tx-d-23",
- .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
- CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
- },
-#endif
-};
-
-static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
- int i;
-
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- if (power_well->data == power_well_id)
- return power_well;
- }
-
- return NULL;
-}
-
-#define set_power_wells(power_domains, __power_wells) ({ \
- (power_domains)->power_wells = (__power_wells); \
- (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
-})
-
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- mutex_init(&power_domains->lock);
-
- /*
- * The enabling order will be from lower to higher indexed wells,
- * the disabling order is reversed.
- */
- if (IS_HASWELL(dev_priv->dev)) {
- set_power_wells(power_domains, hsw_power_wells);
- hsw_pwr = power_domains;
- } else if (IS_BROADWELL(dev_priv->dev)) {
- set_power_wells(power_domains, bdw_power_wells);
- hsw_pwr = power_domains;
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
- set_power_wells(power_domains, chv_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv->dev)) {
- set_power_wells(power_domains, vlv_power_wells);
- } else {
- set_power_wells(power_domains, i9xx_always_on_power_well);
- }
-
- return 0;
-}
-
-void intel_power_domains_remove(struct drm_i915_private *dev_priv)
-{
- hsw_pwr = NULL;
-}
-
-static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
- int i;
-
- mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- power_well->ops->sync_hw(dev_priv, power_well);
- power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
- power_well);
- }
- mutex_unlock(&power_domains->lock);
-}
-
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
- struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
-
- /* nothing to do if common lane is already off */
- if (!cmn->ops->is_enabled(dev_priv, cmn))
- return;
-
- /* If the display might be already active skip this */
- if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
- I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
- /* cmnlane needs DPLL registers */
- disp2d->ops->enable(dev_priv, disp2d);
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- cmn->ops->disable(dev_priv, cmn);
-}
-
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- power_domains->initializing = true;
-
- if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
- mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(dev_priv);
- mutex_unlock(&power_domains->lock);
- }
-
- /* For now, we need the power well to be always enabled. */
- intel_display_set_init_power(dev_priv, true);
- intel_power_domains_resume(dev_priv);
- power_domains->initializing = false;
-}
-
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
-{
- intel_runtime_pm_get(dev_priv);
-}
-
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
-{
- intel_runtime_pm_put(dev_priv);
-}
-
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_get_sync(device);
- WARN(dev_priv->pm.suspended, "Device still suspended.\n");
-}
-
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
- pm_runtime_get_noresume(device);
-}
-
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_mark_last_busy(device);
- pm_runtime_put_autosuspend(device);
-}
-
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- pm_runtime_set_active(device);
-
- /*
- * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
- * requirement.
- */
- if (!intel_enable_rc6(dev)) {
- DRM_INFO("RC6 disabled, disabling runtime PM support\n");
- return;
- }
-
- pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
- pm_runtime_mark_last_busy(device);
- pm_runtime_use_autosuspend(device);
-
- pm_runtime_put_autosuspend(device);
-}
-
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct device *device = &dev->pdev->dev;
-
- if (!HAS_RUNTIME_PM(dev))
- return;
-
- if (!intel_enable_rc6(dev))
- return;
-
- /* Make sure we're not suspended first. */
- pm_runtime_get_sync(device);
- pm_runtime_disable(device);
+ dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
}
/* Set up chip specific power management-related functions */
@@ -7198,28 +7077,7 @@ void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_FBC(dev)) {
- if (INTEL_INFO(dev)->gen >= 7) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = gen7_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (INTEL_INFO(dev)->gen >= 5) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
- /* This value was pulled out of someone's hat */
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
- }
- }
+ intel_init_fbc(dev_priv);
/* For cxsr */
if (IS_PINEVIEW(dev))
@@ -7228,7 +7086,13 @@ void intel_init_pm(struct drm_device *dev)
i915_ironlake_get_mem_freq(dev);
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 9) {
+ skl_setup_wm_latency(dev);
+
+ dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+ dev_priv->display.update_wm = skl_update_wm;
+ dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
+ } else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
@@ -7309,7 +7173,7 @@ void intel_init_pm(struct drm_device *dev)
}
}
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -7319,6 +7183,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
}
I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_DATA1, 0);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
@@ -7333,7 +7198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -7356,99 +7221,66 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int vlv_gpu_freq_div(unsigned int czclk_freq)
{
- int div;
-
- /* 4 x czclk */
- switch (dev_priv->mem_freq) {
- case 800:
- div = 10;
- break;
- case 1066:
- div = 12;
- break;
- case 1333:
- div = 16;
- break;
+ switch (czclk_freq) {
+ case 200:
+ return 10;
+ case 267:
+ return 12;
+ case 320:
+ case 333:
+ return 16;
+ case 400:
+ return 20;
default:
return -1;
}
+}
+
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
+
+ div = vlv_gpu_freq_div(czclk_freq);
+ if (div < 0)
+ return div;
- return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
+ return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
}
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul;
+ int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
- /* 4 x czclk */
- switch (dev_priv->mem_freq) {
- case 800:
- mul = 10;
- break;
- case 1066:
- mul = 12;
- break;
- case 1333:
- mul = 16;
- break;
- default:
- return -1;
- }
+ mul = vlv_gpu_freq_div(czclk_freq);
+ if (mul < 0)
+ return mul;
- return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
+ return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
}
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int div, freq;
+ int div, czclk_freq = dev_priv->rps.cz_freq;
- switch (dev_priv->rps.cz_freq) {
- case 200:
- div = 5;
- break;
- case 267:
- div = 6;
- break;
- case 320:
- case 333:
- case 400:
- div = 8;
- break;
- default:
- return -1;
- }
-
- freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+ div = vlv_gpu_freq_div(czclk_freq) / 2;
+ if (div < 0)
+ return div;
- return freq;
+ return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
}
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul, opcode;
+ int mul, czclk_freq = dev_priv->rps.cz_freq;
- switch (dev_priv->rps.cz_freq) {
- case 200:
- mul = 5;
- break;
- case 267:
- mul = 6;
- break;
- case 320:
- case 333:
- case 400:
- mul = 8;
- break;
- default:
- return -1;
- }
+ mul = vlv_gpu_freq_div(czclk_freq) / 2;
+ if (mul < 0)
+ return mul;
/* CHV needs even values */
- opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
-
- return opcode;
+ return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -7485,5 +7317,4 @@ void intel_pm_setup(struct drm_device *dev)
intel_gen6_powersave_work);
dev_priv->pm.suspended = false;
- dev_priv->pm._irqs_disabled = false;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644
index 000000000000..716b8a961eea
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Panel Self Refresh (PSR/SRD)
+ *
+ * Since Haswell Display controller supports Panel Self-Refresh on display
+ * panels witch have a remote frame buffer (RFB) implemented according to PSR
+ * spec in eDP1.3. PSR feature allows the display to go to lower standby states
+ * when system is idle but display is on as it eliminates display refresh
+ * request to DDR memory completely as long as the frame buffer for that
+ * display is unchanged.
+ *
+ * Panel Self Refresh must be supported by both Hardware (source) and
+ * Panel (sink).
+ *
+ * PSR saves power by caching the framebuffer in the panel RFB, which allows us
+ * to power down the link and memory controller. For DSI panels the same idea
+ * is called "manual mode".
+ *
+ * The implementation uses the hardware-based PSR support which automatically
+ * enters/exits self-refresh mode. The hardware takes care of sending the
+ * required DP aux message and could even retrain the link (that part isn't
+ * enabled yet though). The hardware also keeps track of any frontbuffer
+ * changes to know when to exit self-refresh mode again. Unfortunately that
+ * part doesn't work too well, hence why the i915 PSR support uses the
+ * software frontbuffer tracking to make sure it doesn't miss a screen
+ * update. For this integration intel_psr_invalidate() and intel_psr_flush()
+ * get called by the frontbuffer tracking code. Note that because of locking
+ * issues the self-refresh re-enable code is done from a work queue, which
+ * must be correctly synchronized/cancelled when shutting down the pipe."
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+bool intel_psr_is_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev))
+ return false;
+
+ return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+}
+
+static void intel_psr_write_vsc(struct intel_dp *intel_dp,
+ struct edp_vsc_psr *vsc_psr)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ uint32_t *data = (uint32_t *) vsc_psr;
+ unsigned int i;
+
+ /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+ the video DIP being updated before program video DIP data buffer
+ registers for DIP being updated. */
+ I915_WRITE(ctl_reg, 0);
+ POSTING_READ(ctl_reg);
+
+ for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+ if (i < sizeof(struct edp_vsc_psr))
+ I915_WRITE(data_reg + i, *data++);
+ else
+ I915_WRITE(data_reg + i, 0);
+ }
+
+ I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+ POSTING_READ(ctl_reg);
+}
+
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
+{
+ struct edp_vsc_psr psr_vsc;
+
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ intel_psr_write_vsc(intel_dp, &psr_vsc);
+}
+
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t aux_clock_divider;
+ int precharge = 0x3;
+ bool only_standby = false;
+ static const uint8_t aux_msg[] = {
+ [0] = DP_AUX_NATIVE_WRITE << 4,
+ [1] = DP_SET_POWER >> 8,
+ [2] = DP_SET_POWER & 0xff,
+ [3] = 1 - 1,
+ [4] = DP_SET_POWER_D0,
+ };
+ int i;
+
+ BUILD_BUG_ON(sizeof(aux_msg) > 20);
+
+ aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ /* Enable PSR in sink */
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+
+ /* Setup AUX registers */
+ for (i = 0; i < sizeof(aux_msg); i += 4)
+ I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
+ intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+
+ I915_WRITE(EDP_PSR_AUX_CTL(dev),
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t max_sleep_time = 0x1f;
+ uint32_t idle_frames = 1;
+ uint32_t val = 0x0;
+ const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ bool only_standby = false;
+
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
+ val |= EDP_PSR_LINK_STANDBY;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_SKIP_AUX_EXIT;
+ val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
+ } else
+ val |= EDP_PSR_LINK_DISABLE;
+
+ I915_WRITE(EDP_PSR_CTL(dev), val |
+ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
+ max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+ idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+ EDP_PSR_ENABLE);
+}
+
+static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ lockdep_assert_held(&dev_priv->psr.lock);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ dev_priv->psr.source_ok = false;
+
+ if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
+ DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+ return false;
+ }
+
+ if (!i915.enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ return false;
+ }
+
+ /* Below limitations aren't valid for Broadwell */
+ if (IS_BROADWELL(dev))
+ goto out;
+
+ if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+ S3D_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ return false;
+ }
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ return false;
+ }
+
+ out:
+ dev_priv->psr.source_ok = true;
+ return true;
+}
+
+static void intel_psr_do_enable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ WARN_ON(dev_priv->psr.active);
+ lockdep_assert_held(&dev_priv->psr.lock);
+
+ /* Enable/Re-enable PSR on the host */
+ intel_psr_enable_source(intel_dp);
+
+ dev_priv->psr.active = true;
+}
+
+/**
+ * intel_psr_enable - Enable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function can only be called after the pipe is fully trained and enabled.
+ */
+void intel_psr_enable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ return;
+ }
+
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR already in use\n");
+ goto unlock;
+ }
+
+ if (!intel_psr_match_conditions(intel_dp))
+ goto unlock;
+
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+
+ intel_psr_setup_vsc(intel_dp);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+
+ /* Enable PSR on the panel */
+ intel_psr_enable_sink(intel_dp);
+
+ dev_priv->psr.enabled = intel_dp;
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ if (dev_priv->psr.active) {
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ }
+
+ dev_priv->psr.enabled = NULL;
+ mutex_unlock(&dev_priv->psr.lock);
+
+ cancel_delayed_work_sync(&dev_priv->psr.work);
+}
+
+static void intel_psr_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
+
+ /* We have to make sure PSR is ready for re-enable
+ * otherwise it keeps disabled until next full enable/disable cycle.
+ * PSR might take some time to get fully disabled
+ * and be ready for re-enable.
+ */
+ if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->psr.lock);
+ intel_dp = dev_priv->psr.enabled;
+
+ if (!intel_dp)
+ goto unlock;
+
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck. Since psr_flush first clears this and then reschedules we
+ * won't ever miss a flush when bailing out here.
+ */
+ if (dev_priv->psr.busy_frontbuffer_bits)
+ goto unlock;
+
+ intel_psr_do_enable(intel_dp);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_psr_exit(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->psr.active) {
+ u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+
+ I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+ dev_priv->psr.active = false;
+ }
+
+}
+
+/**
+ * intel_psr_invalidate - Invalidade PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
+ * disabled if the frontbuffer mask contains a buffer relevant to PSR.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
+ */
+void intel_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ intel_psr_exit(dev);
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_flush - Flush PSR
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering has completed and flushed out to memory. PSR
+ * can be enabled again if no other frontbuffer relevant to PSR is dirty.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
+ */
+void intel_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /*
+ * On Haswell sprite plane updates don't result in a psr invalidating
+ * signal in the hardware. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering.
+ */
+ if (IS_HASWELL(dev) &&
+ (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+ intel_psr_exit(dev);
+
+ if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
+ * intel_psr_init - Init basic PSR work and mutex.
+ * @dev: DRM device
+ *
+ * This function is called only once at driver load to initialize basic
+ * PSR stuff.
+ */
+void intel_psr_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
+ mutex_init(&dev_priv->psr.lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index 6c792d3a9c9c..5bd69852752c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -29,6 +29,7 @@
extern const struct intel_renderstate_rodata gen6_null_state;
extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 75ef1b5de45c..78011d73fa9f 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -1,16 +1,134 @@
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {
- 0x00000048,
- 0x00000050,
- 0x00000060,
- 0x000003ec,
+ 0x00000798,
+ 0x000007a4,
+ 0x000007ac,
+ 0x000007bc,
-1,
};
static const u32 gen8_null_state_batch[] = {
+ 0x7a000004,
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x69040000,
- 0x61020001,
+ 0x78140000,
+ 0x04000000,
+ 0x7820000a,
+ 0x00000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x02001808,
+ 0x781f0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000800,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780c0000,
+ 0x00000000,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78300000,
+ 0x08010040,
+ 0x78310000,
+ 0x1e000000,
+ 0x78320000,
+ 0x1e000000,
+ 0x78330000,
+ 0x1e000000,
+ 0x79190002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791a0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791b0002,
+ 0x00000000,
0x00000000,
0x00000000,
0x79120000,
@@ -23,48 +141,435 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x79160000,
0x00000000,
- 0x6101000e,
- 0x00000001,
+ 0x78150009,
0x00000000,
- 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x784a0000,
+ 0x00000000,
+ 0x784b0000,
+ 0x00000004,
+ 0x79170101,
+ 0x00000000,
+ 0x00000080,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x60000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6101000e,
0x00000001, /* reloc */
0x00000000,
+ 0x00000000,
0x00000001, /* reloc */
0x00000000,
+ 0x00000001, /* reloc */
0x00000000,
+ 0x00000001,
0x00000000,
0x00000001, /* reloc */
0x00000000,
- 0xfffff001,
0x00001001,
- 0xfffff001,
0x00001001,
- 0x78230000,
- 0x000006e0,
- 0x78210000,
- 0x00000700,
- 0x78300000,
- 0x08010040,
- 0x78330000,
- 0x08000000,
- 0x78310000,
- 0x08000000,
- 0x78320000,
- 0x08000000,
- 0x78240000,
- 0x00000641,
- 0x780e0000,
- 0x00000601,
+ 0x00000001,
+ 0x00001001,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0xc0000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79080001,
+ 0x00000000,
+ 0x00000000,
+ 0x790a0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x79110000,
+ 0x00000000,
0x780d0000,
0x00000000,
- 0x78180000,
- 0x00000001,
- 0x78520003,
+ 0x79060000,
0x00000000,
+ 0x7907001f,
0x00000000,
0x00000000,
0x00000000,
- 0x78190009,
0x00000000,
0x00000000,
0x00000000,
@@ -75,7 +580,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x781b0007,
0x00000000,
0x00000000,
0x00000000,
@@ -84,26 +588,22 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x78270000,
0x00000000,
- 0x782c0000,
0x00000000,
- 0x781c0002,
0x00000000,
0x00000000,
0x00000000,
- 0x78160009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
+ 0x7902000f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
- 0x78110008,
0x00000000,
0x00000000,
0x00000000,
@@ -113,12 +613,10 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x78290000,
0x00000000,
- 0x782e0000,
0x00000000,
- 0x781a0009,
0x00000000,
+ 0x790c000f,
0x00000000,
0x00000000,
0x00000000,
@@ -128,7 +626,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x781d0007,
0x00000000,
0x00000000,
0x00000000,
@@ -136,153 +633,153 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x780a0003,
0x00000000,
- 0x78280000,
0x00000000,
- 0x782d0000,
0x00000000,
- 0x78260000,
0x00000000,
- 0x782b0000,
+ 0x78080083,
+ 0x00004000,
0x00000000,
- 0x78150009,
0x00000000,
0x00000000,
+ 0x04004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x08004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x0c004000,
0x00000000,
0x00000000,
- 0x78100007,
0x00000000,
+ 0x10004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x14004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x18004000,
0x00000000,
- 0x781e0003,
0x00000000,
0x00000000,
+ 0x1c004000,
0x00000000,
0x00000000,
- 0x78120002,
0x00000000,
+ 0x20004000,
0x00000000,
0x00000000,
- 0x781f0002,
- 0x30400820,
0x00000000,
+ 0x24004000,
0x00000000,
- 0x78510009,
0x00000000,
0x00000000,
+ 0x28004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x2c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x30004000,
0x00000000,
0x00000000,
- 0x78500003,
- 0x00210000,
0x00000000,
+ 0x34004000,
0x00000000,
0x00000000,
- 0x78130002,
0x00000000,
+ 0x38004000,
0x00000000,
0x00000000,
- 0x782a0000,
- 0x00000480,
- 0x782f0000,
- 0x00000540,
- 0x78140000,
- 0x00000800,
- 0x78170009,
0x00000000,
+ 0x3c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x40004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x44004000,
0x00000000,
0x00000000,
0x00000000,
- 0x7820000a,
- 0x00000580,
+ 0x48004000,
0x00000000,
- 0x08080000,
0x00000000,
0x00000000,
- 0x1f000002,
- 0x00060000,
+ 0x4c004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x50004000,
0x00000000,
- 0x784d0000,
- 0x40000000,
- 0x784f0000,
- 0x80000100,
- 0x780f0000,
- 0x00000740,
- 0x78050006,
0x00000000,
0x00000000,
+ 0x54004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x58004000,
0x00000000,
0x00000000,
- 0x78070003,
0x00000000,
+ 0x5c004000,
0x00000000,
0x00000000,
0x00000000,
- 0x78060003,
+ 0x60004000,
0x00000000,
0x00000000,
0x00000000,
+ 0x64004000,
0x00000000,
- 0x78040001,
0x00000000,
- 0x00000001,
- 0x79000002,
- 0xffffffff,
+ 0x00000000,
+ 0x68004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6c004000,
+ 0x00000000,
0x00000000,
0x00000000,
- 0x78080003,
- 0x00006000,
- 0x000005e0, /* reloc */
+ 0x70004000,
0x00000000,
0x00000000,
- 0x78090005,
+ 0x00000000,
+ 0x74004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090043,
0x02000000,
0x22220000,
- 0x02f60000,
- 0x11230000,
- 0x02850004,
- 0x11230000,
- 0x784b0000,
- 0x0000000f,
- 0x78490001,
0x00000000,
0x00000000,
- 0x7b000005,
0x00000000,
- 0x00000003,
0x00000000,
- 0x00000001,
0x00000000,
0x00000000,
- 0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
@@ -297,8 +794,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x000004c0, /* state start */
- 0x00000500,
0x00000000,
0x00000000,
0x00000000,
@@ -345,46 +840,65 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x680b0001,
+ 0x78260000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000000,
+ 0x780e0000,
+ 0x00000dc1,
+ 0x78240000,
+ 0x00000e01,
+ 0x784f0000,
+ 0x80000100,
+ 0x784d0000,
+ 0x40000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x782d0000,
0x00000000,
+ 0x782e0000,
0x00000000,
+ 0x782f0000,
0x00000000,
- 0x00000092,
+ 0x780f0000,
0x00000000,
+ 0x78230000,
+ 0x00000e60,
+ 0x78210000,
+ 0x00000e80,
+ 0x7b000005,
+ 0x00000004,
+ 0x00000001,
0x00000000,
+ 0x00000001,
0x00000000,
0x00000000,
+ 0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000, /* state start */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
- 0x0060005a,
- 0x21403ae8,
- 0x3a0000c0,
- 0x008d0040,
- 0x0060005a,
- 0x21603ae8,
- 0x3a0000c0,
- 0x008d0080,
- 0x0060005a,
- 0x21803ae8,
- 0x3a0000d0,
- 0x008d0040,
- 0x0060005a,
- 0x21a03ae8,
- 0x3a0000d0,
- 0x008d0080,
- 0x02800031,
- 0x2e0022e8,
- 0x0e000140,
- 0x08840001,
- 0x05800031,
- 0x200022e0,
- 0x0e000e00,
- 0x90031000,
0x00000000,
0x00000000,
0x00000000,
@@ -410,38 +924,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
- 0x06200000,
- 0x00000002,
0x00000000,
0x00000000,
0x00000000,
@@ -449,8 +931,6 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0xf99a130c,
- 0x799a130c,
0x00000000,
0x00000000,
0x00000000,
@@ -466,9 +946,7 @@ static const u32 gen8_null_state_batch[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x3f800000,
0x00000000,
- 0x3f800000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
new file mode 100644
index 000000000000..875075373807
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -0,0 +1,974 @@
+#include "intel_renderstate.h"
+
+static const u32 gen9_null_state_relocs[] = {
+ 0x000007a8,
+ 0x000007b4,
+ 0x000007bc,
+ 0x000007cc,
+ -1,
+};
+
+static const u32 gen9_null_state_batch[] = {
+ 0x7a000004,
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x69040300,
+ 0x78140000,
+ 0x04000000,
+ 0x7820000a,
+ 0x00000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x02001808,
+ 0x781f0004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000800,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780c0000,
+ 0x00000000,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78300000,
+ 0x08010040,
+ 0x78310000,
+ 0x1e000000,
+ 0x78320000,
+ 0x1e000000,
+ 0x78330000,
+ 0x1e000000,
+ 0x79190002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791a0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791b0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79120000,
+ 0x00000000,
+ 0x79130000,
+ 0x00000000,
+ 0x79140000,
+ 0x00000000,
+ 0x79150000,
+ 0x00000000,
+ 0x79160000,
+ 0x00000000,
+ 0x78150009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x784a0000,
+ 0x00000000,
+ 0x784b0000,
+ 0x00000004,
+ 0x79170101,
+ 0x00000000,
+ 0x00000080,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x60000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x61010011,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00001001,
+ 0x00001001,
+ 0x00000001,
+ 0x00001001,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0xc0000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79080001,
+ 0x00000000,
+ 0x00000000,
+ 0x790a0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x79110000,
+ 0x00000000,
+ 0x780d0000,
+ 0x00000000,
+ 0x79060000,
+ 0x00000000,
+ 0x7907001f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7902000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x790c000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780a0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78080083,
+ 0x00004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x04004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x08004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x10004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x14004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x18004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x1c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x20004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x24004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x28004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x2c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x34004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x38004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x3c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x40004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x44004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x48004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x4c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x50004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x58004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x5c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x60004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x64004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x68004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x70004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x74004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090043,
+ 0x02000000,
+ 0x22220000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78550003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x680b0001,
+ 0x780e0000,
+ 0x00000e01,
+ 0x78240000,
+ 0x00000e41,
+ 0x784f0000,
+ 0x80000100,
+ 0x784d0000,
+ 0x40000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x782d0000,
+ 0x00000000,
+ 0x782e0000,
+ 0x00000000,
+ 0x782f0000,
+ 0x00000000,
+ 0x780f0000,
+ 0x00000000,
+ 0x78230000,
+ 0x00000ea0,
+ 0x78210000,
+ 0x00000ec0,
+ 0x78260000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000000,
+ 0x7b000005,
+ 0x00000004,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state start */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(9);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0a80e419b589..9f445e9a75d1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
goto out;
}
- if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
- i915_kernel_lost_context(ring->dev);
- else {
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = intel_ring_space(ringbuf);
- ringbuf->last_retired_head = -1;
- }
+ ringbuf->head = I915_READ_HEAD(ring);
+ ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->space = intel_ring_space(ringbuf);
+ ringbuf->last_retired_head = -1;
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -665,76 +661,112 @@ err:
return ret;
}
-static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
- u32 addr, u32 value)
+static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
{
+ int ret, i;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
- return;
+ if (WARN_ON(w->count == 0))
+ return 0;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, addr);
- intel_ring_emit(ring, value);
+ ring->gpu_caches_dirty = true;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
- /* value is updated with the status of remaining bits of this
- * register when it is read from debugfs file
- */
- dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
- dev_priv->num_wa_regs++;
+ ret = intel_ring_begin(ring, (w->count * 2 + 2));
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+ for (i = 0; i < w->count; i++) {
+ intel_ring_emit(ring, w->reg[i].addr);
+ intel_ring_emit(ring, w->reg[i].value);
+ }
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
- return;
+ ring->gpu_caches_dirty = true;
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
+
+ return 0;
}
+static int wa_add(struct drm_i915_private *dev_priv,
+ const u32 addr, const u32 mask, const u32 val)
+{
+ const u32 idx = dev_priv->workarounds.count;
+
+ if (WARN_ON(idx >= I915_MAX_WA_REGS))
+ return -ENOSPC;
+
+ dev_priv->workarounds.reg[idx].addr = addr;
+ dev_priv->workarounds.reg[idx].value = val;
+ dev_priv->workarounds.reg[idx].mask = mask;
+
+ dev_priv->workarounds.count++;
+
+ return 0;
+}
+
+#define WA_REG(addr, mask, val) { \
+ const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+ if (r) \
+ return r; \
+ }
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+ WA_REG(addr, mask, _MASKED_FIELD(mask, value))
+
+#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
+
+#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
+
static int bdw_init_workarounds(struct intel_engine_cs *ring)
{
- int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * workarounds applied in this fn are part of register state context,
- * they need to be re-initialized followed by gpu reset, suspend/resume,
- * module reload.
- */
- dev_priv->num_wa_regs = 0;
- memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
-
- /*
- * update the number of dwords required based on the
- * actual number of workarounds applied
- */
- ret = intel_ring_begin(ring, 18);
- if (ret)
- return ret;
-
/* WaDisablePartialInstShootdown:bdw */
- /* WaDisableThreadStallDopClockGating:bdw */
- /* FIXME: Unclear whether we really need this on production bdw. */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
- | STALL_DOP_GATING_DISABLE));
+ /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
+ STALL_DOP_GATING_DISABLE);
- /* WaDisableDopClockGating:bdw May not be needed for production */
- intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ /* WaDisableDopClockGating:bdw */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
- intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
- _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
- intel_ring_emit_wa(ring, HDC_CHICKEN0,
- _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+ /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT |
+ (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* Wa4x4STCOptimizationDisable:bdw */
- intel_ring_emit_wa(ring, CACHE_MODE_1,
- _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+ WA_SET_BIT_MASKED(CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
@@ -744,52 +776,51 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
- intel_ring_emit_wa(ring, GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
-
- intel_ring_advance(ring);
-
- DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
- dev_priv->num_wa_regs);
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *ring)
{
- int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * workarounds applied in this fn are part of register state context,
- * they need to be re-initialized followed by gpu reset, suspend/resume,
- * module reload.
+ /* WaDisablePartialInstShootdown:chv */
+ /* WaDisableThreadStallDopClockGating:chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
+ STALL_DOP_GATING_DISABLE);
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
*/
- dev_priv->num_wa_regs = 0;
- memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
+ /* WaForceEnableNonCoherent:chv */
+ /* WaHdcDisableFetchWhenMasked:chv */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT |
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED);
- ret = intel_ring_begin(ring, 12);
- if (ret)
- return ret;
+ return 0;
+}
- /* WaDisablePartialInstShootdown:chv */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
+int init_workarounds_ring(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* WaDisableThreadStallDopClockGating:chv */
- intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ WARN_ON(ring->id != RCS);
- /* WaDisableDopClockGating:chv (pre-production hw) */
- intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ dev_priv->workarounds.count = 0;
- /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
- intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
- _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+ if (IS_BROADWELL(dev))
+ return bdw_init_workarounds(ring);
- intel_ring_advance(ring);
+ if (IS_CHERRYVIEW(dev))
+ return chv_init_workarounds(ring);
return 0;
}
@@ -812,7 +843,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
*/
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
@@ -849,7 +880,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
if (HAS_L3_DPF(dev))
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
- return ret;
+ return init_workarounds_ring(ring);
}
static void render_ring_cleanup(struct intel_engine_cs *ring)
@@ -1186,7 +1217,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1217,7 +1248,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (!intel_irqs_enabled(dev_priv))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1254,7 +1285,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (!intel_irqs_enabled(dev_priv))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1388,8 +1419,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
- return false;
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
@@ -1431,7 +1462,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1451,9 +1482,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
- return;
-
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
@@ -1469,7 +1497,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
- if (!dev->irq_enabled)
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1694,13 +1722,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
- if (!ringbuf->obj)
- return;
-
iounmap(ringbuf->virtual_start);
+ ringbuf->virtual_start = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
+}
+
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = ringbuf->obj;
+ int ret;
+
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret) {
+ i915_gem_object_ggtt_unpin(obj);
+ return ret;
+ }
+
+ ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
+ i915_gem_object_ggtt_unpin(obj);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
@@ -1708,12 +1765,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- int ret;
-
- if (ringbuf->obj)
- return 0;
obj = NULL;
if (!HAS_LLC(dev))
@@ -1726,30 +1778,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
- if (ret)
- goto err_unref;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto err_unpin;
-
- ringbuf->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
- ringbuf->size);
- if (ringbuf->virtual_start == NULL) {
- ret = -EINVAL;
- goto err_unpin;
- }
-
ringbuf->obj = obj;
- return 0;
-err_unpin:
- i915_gem_object_ggtt_unpin(obj);
-err_unref:
- drm_gem_object_unreference(&obj->base);
- return ret;
+ return 0;
}
static int intel_init_ring_buffer(struct drm_device *dev,
@@ -1786,10 +1817,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
- goto error;
+ if (ringbuf->obj == NULL) {
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error;
+ }
+
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ intel_destroy_ringbuffer_obj(ringbuf);
+ goto error;
+ }
}
/* Workaround an erratum on the i830 which causes a hang if
@@ -1818,15 +1860,19 @@ error:
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct drm_i915_private *dev_priv;
+ struct intel_ringbuffer *ringbuf;
if (!intel_ring_initialized(ring))
return;
+ dev_priv = to_i915(ring->dev);
+ ringbuf = ring->buffer;
+
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1912,13 +1958,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
break;
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
- dev->primary->master) {
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- }
-
msleep(1);
if (dev_priv->mm.interruptible && signal_pending(current)) {
@@ -2229,6 +2268,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
@@ -2259,8 +2299,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
}
intel_ring_advance(ring);
- if (IS_GEN7(dev) && !invalidate && flush)
- return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+ if (!invalidate && flush) {
+ if (IS_GEN7(dev))
+ return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+ else if (IS_BROADWELL(dev))
+ dev_priv->fbc.need_sw_cache_clean = true;
+ }
return 0;
}
@@ -2293,10 +2337,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
dev_priv->semaphore_obj = obj;
}
}
- if (IS_CHERRYVIEW(dev))
- ring->init_context = chv_init_workarounds;
- else
- ring->init_context = bdw_init_workarounds;
+
+ ring->init_context = intel_ring_workarounds_emit;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@@ -2406,91 +2448,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
- struct intel_ringbuffer *ringbuf = ring->buffer;
- int ret;
-
- if (ringbuf == NULL) {
- ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
- if (!ringbuf)
- return -ENOMEM;
- ring->buffer = ringbuf;
- }
-
- ring->name = "render ring";
- ring->id = RCS;
- ring->mmio_base = RENDER_RING_BASE;
-
- if (INTEL_INFO(dev)->gen >= 6) {
- /* non-kms not supported on gen6+ */
- ret = -ENODEV;
- goto err_ringbuf;
- }
-
- /* Note: gem is not supported on gen5/ilk without kms (the corresponding
- * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
- * the special gen5 functions. */
- ring->add_request = i9xx_add_request;
- if (INTEL_INFO(dev)->gen < 4)
- ring->flush = gen2_render_ring_flush;
- else
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
- if (IS_GEN2(dev)) {
- ring->irq_get = i8xx_ring_get_irq;
- ring->irq_put = i8xx_ring_put_irq;
- } else {
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
- }
- ring->irq_enable_mask = I915_USER_INTERRUPT;
- ring->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 4)
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
- else if (IS_I830(dev) || IS_845G(dev))
- ring->dispatch_execbuffer = i830_dispatch_execbuffer;
- else
- ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init = init_render_ring;
- ring->cleanup = render_ring_cleanup;
-
- ring->dev = dev;
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
-
- ringbuf->size = size;
- ringbuf->effective_size = ringbuf->size;
- if (IS_I830(ring->dev) || IS_845G(ring->dev))
- ringbuf->effective_size -= 2 * CACHELINE_BYTES;
-
- ringbuf->virtual_start = ioremap_wc(start, size);
- if (ringbuf->virtual_start == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- ret = -ENOMEM;
- goto err_ringbuf;
- }
-
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = init_phys_status_page(ring);
- if (ret)
- goto err_vstart;
- }
-
- return 0;
-
-err_vstart:
- iounmap(ringbuf->virtual_start);
-err_ringbuf:
- kfree(ringbuf);
- ring->buffer = NULL;
- return ret;
-}
-
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 96479c89f4bd..fe426cff598b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -148,7 +148,8 @@ struct intel_engine_cs {
int (*init)(struct intel_engine_cs *ring);
- int (*init_context)(struct intel_engine_cs *ring);
+ int (*init_context)(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
@@ -235,6 +236,7 @@ struct intel_engine_cs {
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
+ struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
@@ -381,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf);
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
@@ -424,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
+int init_workarounds_ring(struct intel_engine_cs *ring);
+
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
return ringbuf->tail;
@@ -441,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
ring->trace_irq_seqno = seqno;
}
-/* DRI warts */
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
-
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644
index 000000000000..f5a78d53e297
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -0,0 +1,1406 @@
+/*
+ * Copyright © 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/i915_powerwell.h>
+
+/**
+ * DOC: runtime pm
+ *
+ * The i915 driver supports dynamic enabling and disabling of entire hardware
+ * blocks at runtime. This is especially important on the display side where
+ * software is supposed to control many power gates manually on recent hardware,
+ * since on the GT side a lot of the power management is done by the hardware.
+ * But even there some manual control at the device level is required.
+ *
+ * Since i915 supports a diverse set of platforms with a unified codebase and
+ * hardware engineers just love to shuffle functionality around between power
+ * domains there's a sizeable amount of indirection required. This file provides
+ * generic functions to the driver for grabbing and releasing references for
+ * abstract power domains. It then maps those to the actual power wells
+ * present for a given platform.
+ */
+
+static struct i915_power_domains *hsw_pwr;
+
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+ for (i = 0; \
+ i < (power_domains)->power_well_count && \
+ ((power_well) = &(power_domains)->power_wells[i]); \
+ i++) \
+ if ((power_well)->domains & (domain_mask))
+
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+ for (i = (power_domains)->power_well_count - 1; \
+ i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+ i--) \
+ if ((power_well)->domains & (domain_mask))
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
+ (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ bool is_enabled;
+ int i;
+
+ if (dev_priv->pm.suspended)
+ return false;
+
+ power_domains = &dev_priv->power_domains;
+
+ is_enabled = true;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ if (power_well->always_on)
+ continue;
+
+ if (!power_well->hw_enabled) {
+ is_enabled = false;
+ break;
+ }
+ }
+
+ return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ bool ret;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ ret = __intel_display_power_is_enabled(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return ret;
+}
+
+/**
+ * intel_display_set_init_power - set the initial power domain state
+ * @dev_priv: i915 device instance
+ * @enable: whether to enable or disable the initial power domain state
+ *
+ * For simplicity our driver load/unload and system suspend/resume code assumes
+ * that all power domains are always enabled. This functions controls the state
+ * of this little hack. While the initial power domain state is enabled runtime
+ * pm is effectively disabled.
+ */
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ if (dev_priv->power_domains.init_power_on == enable)
+ return;
+
+ if (enable)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ else
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ dev_priv->power_domains.init_power_on = enable;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+ if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
+ gen8_irq_power_well_post_enable(dev_priv);
+}
+
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ bool is_enabled, enable_requested;
+ uint32_t tmp;
+
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
+
+ if (enable) {
+ if (!enable_requested)
+ I915_WRITE(HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_ENABLE_REQUEST);
+
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling power well\n");
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_STATE_ENABLED), 20))
+ DRM_ERROR("Timeout enabling power well\n");
+ hsw_power_well_post_enable(dev_priv);
+ }
+
+ } else {
+ if (enable_requested) {
+ I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
+ DRM_DEBUG_KMS("Requesting to disable the power well\n");
+ }
+ }
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+ /*
+ * We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now.
+ */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, true);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ hsw_set_power_well(dev_priv, power_well, false);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ enum punit_power_well power_well_id = power_well->data;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+ PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int power_well_id = power_well->data;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(power_well_id);
+ ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+ state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ WARN_ON(ctrl != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+
+ vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ for_each_pipe(dev_priv, pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV);
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ } else {
+ phy = DPIO_PHY1;
+ I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ }
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
+ PHY_COM_LANE_RESET_DEASSERT(phy));
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
+ ~PHY_COM_LANE_RESET_DEASSERT(phy));
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = power_well->data;
+ bool enabled;
+ u32 state, ctrl;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+ WARN_ON(ctrl << 16 != state);
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = power_well->data;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+
+#undef COND
+
+out:
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PIPE_A &&
+ power_well->data != PIPE_B &&
+ power_well->data != PIPE_C);
+
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+
+ if (power_well->data == PIPE_A) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+
+ i915_redisable_vga_power_on(dev_priv->dev);
+ }
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PIPE_A &&
+ power_well->data != PIPE_B &&
+ power_well->data != PIPE_C);
+
+ if (power_well->data == PIPE_A) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ }
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+
+ if (power_well->data == PIPE_A)
+ vlv_power_sequencer_reset(dev_priv);
+}
+
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+ if (power_well->always_on || !i915.disable_power_well) {
+ if (!enabled)
+ goto mismatch;
+
+ return;
+ }
+
+ if (enabled != (power_well->count > 0))
+ goto mismatch;
+
+ return;
+
+mismatch:
+ WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+ power_well->name, power_well->always_on, enabled,
+ power_well->count, i915.disable_power_well);
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ intel_runtime_pm_get(dev_priv);
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++) {
+ DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+ power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
+
+ power_domains->domain_use_count[domain]++;
+
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+
+ WARN_ON(!power_domains->domain_use_count[domain]);
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ WARN_ON(!power_well->count);
+
+ if (!--power_well->count && i915.disable_power_well) {
+ DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
+ power_well->ops->disable(dev_priv, power_well);
+ }
+
+ check_power_well_state(dev_priv, power_well);
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
+ HSW_ALWAYS_ON_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_always_on_power_well_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = chv_pipe_power_well_sync_hw,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well i9xx_always_on_power_well[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = HSW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = BDW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ },
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &vlv_dpio_cmn_power_well_ops,
+ },
+};
+
+static struct i915_power_well chv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+#if 0
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DISP2D,
+ .ops = &vlv_display_power_well_ops,
+ },
+#endif
+ {
+ .name = "pipe-a",
+ /*
+ * FIXME: pipe A power well seems to be the new disp2d well.
+ * At least all registers seem to be housed there. Figure
+ * out if this a a temporary situation in pre-production
+ * hardware or a permanent state of affairs.
+ */
+ .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
+ .data = PIPE_A,
+ .ops = &chv_pipe_power_well_ops,
+ },
+#if 0
+ {
+ .name = "pipe-b",
+ .domains = CHV_PIPE_B_POWER_DOMAINS,
+ .data = PIPE_B,
+ .ops = &chv_pipe_power_well_ops,
+ },
+ {
+ .name = "pipe-c",
+ .domains = CHV_PIPE_C_POWER_DOMAINS,
+ .data = PIPE_C,
+ .ops = &chv_pipe_power_well_ops,
+ },
+#endif
+ {
+ .name = "dpio-common-bc",
+ /*
+ * XXX: cmnreset for one PHY seems to disturb the other.
+ * As a workaround keep both powered on at the same
+ * time for now.
+ */
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+ {
+ .name = "dpio-common-d",
+ /*
+ * XXX: cmnreset for one PHY seems to disturb the other.
+ * As a workaround keep both powered on at the same
+ * time for now.
+ */
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+#if 0
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ },
+ {
+ .name = "dpio-tx-d-01",
+ .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+ CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
+ },
+ {
+ .name = "dpio-tx-d-23",
+ .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+ CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
+ },
+#endif
+};
+
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->data == power_well_id)
+ return power_well;
+ }
+
+ return NULL;
+}
+
+#define set_power_wells(power_domains, __power_wells) ({ \
+ (power_domains)->power_wells = (__power_wells); \
+ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
+})
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_init(&power_domains->lock);
+
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (IS_HASWELL(dev_priv->dev)) {
+ set_power_wells(power_domains, hsw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_BROADWELL(dev_priv->dev)) {
+ set_power_wells(power_domains, bdw_power_wells);
+ hsw_pwr = power_domains;
+ } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, chv_power_wells);
+ } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+ set_power_wells(power_domains, vlv_power_wells);
+ } else {
+ set_power_wells(power_domains, i9xx_always_on_power_well);
+ }
+
+ return 0;
+}
+
+static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ if (!intel_enable_rc6(dev))
+ return;
+
+ /* Make sure we're not suspended first. */
+ pm_runtime_get_sync(device);
+ pm_runtime_disable(device);
+}
+
+/**
+ * intel_power_domains_fini - finalizes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Finalizes the power domain structures for @dev_priv depending upon the
+ * supported platform. This function also disables runtime pm and ensures that
+ * the device stays powered up so that the driver can be reloaded.
+ */
+void intel_power_domains_fini(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_disable(dev_priv);
+
+ /* The i915.ko module is still not prepared to be loaded when
+ * the power well is not enabled, so just enable it in case
+ * we're going to unload/reload. */
+ intel_display_set_init_power(dev_priv, true);
+
+ hsw_pwr = NULL;
+}
+
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ power_well->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+ power_well);
+ }
+ mutex_unlock(&power_domains->lock);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+ /* If the display might be already active skip this */
+ if (cmn->ops->is_enabled(dev_priv, cmn) &&
+ disp2d->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->ops->disable(dev_priv, cmn);
+}
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power domains using intel_display_set_init_power().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ power_domains->initializing = true;
+
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(dev_priv);
+ mutex_unlock(&power_domains->lock);
+ }
+
+ /* For now, we need the power well to be always enabled. */
+ intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_resume(dev_priv);
+ power_domains->initializing = false;
+}
+
+/**
+ * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a power domain reference for the auxiliary power domain
+ * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
+ * parents are powered up. Therefore users should only grab a reference to the
+ * innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_aux_display_runtime_put() to release the reference again.
+ */
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_get(dev_priv);
+}
+
+/**
+ * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the auxilliary power domain reference obtained by
+ * intel_aux_display_runtime_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_runtime_pm_get - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on) and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_get_sync(device);
+ WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+/**
+ * intel_runtime_pm_get_noresume - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on).
+ *
+ * It will _not_ power up the device but instead only check that it's powered
+ * on. Therefore it is only valid to call this functions from contexts where
+ * the device is known to be powered up and where trying to power it up would
+ * result in hilarity and deadlocks. That pretty much means only the system
+ * suspend/resume code where this is used to grab runtime pm references for
+ * delayed setup down in work items.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
+ pm_runtime_get_noresume(device);
+}
+
+/**
+ * intel_runtime_pm_put - release a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_put_autosuspend(device);
+}
+
+/**
+ * intel_runtime_pm_enable - enable runtime pm
+ * @dev_priv: i915 device instance
+ *
+ * This function enables runtime pm at the end of the driver load sequence.
+ *
+ * Note that this function does currently not enable runtime pm for the
+ * subordinate display power domains. That is only done on the first modeset
+ * using intel_display_set_init_power().
+ */
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+
+ if (!HAS_RUNTIME_PM(dev))
+ return;
+
+ pm_runtime_set_active(device);
+
+ /*
+ * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+ * requirement.
+ */
+ if (!intel_enable_rc6(dev)) {
+ DRM_INFO("RC6 disabled, disabling runtime PM support\n");
+ return;
+ }
+
+ pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+ pm_runtime_mark_last_busy(device);
+ pm_runtime_use_autosuspend(device);
+
+ pm_runtime_put_autosuspend(device);
+}
+
+/* Display audio driver power well request */
+int i915_request_power_well(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i915_request_power_well);
+
+/* Display audio driver power well release */
+int i915_release_power_well(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i915_release_power_well);
+
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9350edd6728d..6d7a277458b5 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1991,57 +1991,10 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
return !list_empty(&connector->probed_modes);
}
-static void
-intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
-{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- struct drm_device *dev = connector->dev;
-
- if (intel_sdvo_connector->left)
- drm_property_destroy(dev, intel_sdvo_connector->left);
- if (intel_sdvo_connector->right)
- drm_property_destroy(dev, intel_sdvo_connector->right);
- if (intel_sdvo_connector->top)
- drm_property_destroy(dev, intel_sdvo_connector->top);
- if (intel_sdvo_connector->bottom)
- drm_property_destroy(dev, intel_sdvo_connector->bottom);
- if (intel_sdvo_connector->hpos)
- drm_property_destroy(dev, intel_sdvo_connector->hpos);
- if (intel_sdvo_connector->vpos)
- drm_property_destroy(dev, intel_sdvo_connector->vpos);
- if (intel_sdvo_connector->saturation)
- drm_property_destroy(dev, intel_sdvo_connector->saturation);
- if (intel_sdvo_connector->contrast)
- drm_property_destroy(dev, intel_sdvo_connector->contrast);
- if (intel_sdvo_connector->hue)
- drm_property_destroy(dev, intel_sdvo_connector->hue);
- if (intel_sdvo_connector->sharpness)
- drm_property_destroy(dev, intel_sdvo_connector->sharpness);
- if (intel_sdvo_connector->flicker_filter)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
- if (intel_sdvo_connector->flicker_filter_2d)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
- if (intel_sdvo_connector->flicker_filter_adaptive)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
- if (intel_sdvo_connector->tv_luma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
- if (intel_sdvo_connector->tv_chroma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
- if (intel_sdvo_connector->dot_crawl)
- drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
- if (intel_sdvo_connector->brightness)
- drm_property_destroy(dev, intel_sdvo_connector->brightness);
-}
-
static void intel_sdvo_destroy(struct drm_connector *connector)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (intel_sdvo_connector->tv_format)
- drm_property_destroy(connector->dev,
- intel_sdvo_connector->tv_format);
-
- intel_sdvo_destroy_enhance_property(connector);
drm_connector_cleanup(connector);
kfree(intel_sdvo_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 07a74ef589bd..7d9c340f7693 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,20 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static bool
+format_is_yuv(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YVYU:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
{
/* paranoia */
@@ -46,7 +60,23 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
}
-static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @crtc: the crtc of which the registers are going to be updated
+ * @start_vbl_count: vblank counter return pointer used for error checking
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays. The value written to @start_vbl_count should be
+ * supplied to intel_pipe_update_end() for error checking.
+ *
+ * Return: true if the call was successful
+ */
+bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
@@ -56,8 +86,6 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
- WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
-
vblank_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
@@ -112,7 +140,16 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
return true;
}
-static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @crtc: the crtc of which the registers were updated
+ * @start_vbl_count: start vblank counter (used for error checking)
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank using the value of @start_vbl_count.
+ */
+void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
@@ -139,6 +176,226 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
}
static void
+skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane + 1;
+ u32 plane_ctl, stride;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+ /* Mask out pixel format bits in case we change it */
+ plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
+ plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
+ plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
+ plane_ctl &= ~PLANE_CTL_TILED_MASK;
+ plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
+ plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
+
+ /* Trickle feed has to be enabled */
+ plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ break;
+ /*
+ * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+ * to be already pre-multiplied. We need to add a knob (or a different
+ * DRM_FORMAT) for user-space to configure that.
+ */
+ case DRM_FORMAT_ABGR8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+ PLANE_CTL_ORDER_RGBX |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
+ case DRM_FORMAT_YUYV:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ stride = fb->pitches[0] >> 6;
+ break;
+ case I915_TILING_X:
+ plane_ctl |= PLANE_CTL_TILED_X;
+ stride = fb->pitches[0] >> 9;
+ break;
+ default:
+ BUG();
+ }
+ if (intel_plane->rotation == BIT(DRM_ROTATE_180))
+ plane_ctl |= PLANE_CTL_ROTATE_180;
+
+ plane_ctl |= PLANE_CTL_ENABLE;
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+ intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
+ pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
+ I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+ I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+ I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+ I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+ I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+ POSTING_READ(PLANE_SURF(pipe, plane));
+}
+
+static void
+skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane + 1;
+
+ I915_WRITE(PLANE_CTL(pipe, plane),
+ I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+
+ /* Activate double buffered register update */
+ I915_WRITE(PLANE_CTL(pipe, plane), 0);
+ POSTING_READ(PLANE_CTL(pipe, plane));
+
+ intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
+}
+
+static int
+skl_update_colorkey(struct drm_plane *drm_plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane;
+ u32 plane_ctl;
+
+ I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+ plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+ I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+
+ POSTING_READ(PLANE_CTL(pipe, plane));
+
+ return 0;
+}
+
+static void
+skl_get_colorkey(struct drm_plane *drm_plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = drm_plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ const int pipe = intel_plane->pipe;
+ const int plane = intel_plane->plane;
+ u32 plane_ctl;
+
+ key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
+ key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
+ key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
+
+ plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+ switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
+ case PLANE_CTL_KEY_ENABLE_DESTINATION:
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ break;
+ case PLANE_CTL_KEY_ENABLE_SOURCE:
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ break;
+ default:
+ key->flags = I915_SET_COLORKEY_NONE;
+ }
+}
+
+static void
+chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
+{
+ struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+ int plane = intel_plane->plane;
+
+ /* Seems RGB data bypasses the CSC always */
+ if (!format_is_yuv(format))
+ return;
+
+ /*
+ * BT.601 limited range YCbCr -> full range RGB
+ *
+ * |r| | 6537 4769 0| |cr |
+ * |g| = |-3330 4769 -1605| x |y-64|
+ * |b| | 0 4769 8263| |cb |
+ *
+ * Cb and Cr apparently come in as signed already, so no
+ * need for any offset. For Y we need to remove the offset.
+ */
+ I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+ I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
+ I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
+ I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
+
+ I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+ I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+ I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+ I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+}
+
+static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
@@ -249,6 +506,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+ chv_update_csc(intel_plane, fb->pixel_format);
+
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -257,6 +517,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+ I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
+
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
@@ -821,20 +1083,6 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
key->flags = I915_SET_COLORKEY_NONE;
}
-static bool
-format_is_yuv(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_YVYU:
- return true;
- default:
- return false;
- }
-}
-
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
struct drm_intel_sprite_colorkey key;
@@ -845,57 +1093,23 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
}
static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+intel_check_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
- enum pipe pipe = intel_crtc->pipe;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct drm_i915_gem_object *old_obj = intel_plane->obj;
- int ret;
- bool primary_enabled;
- bool visible;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ struct drm_rect *src = &state->src;
+ struct drm_rect *dst = &state->dst;
+ struct drm_rect *orig_src = &state->orig_src;
+ const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- struct drm_rect src = {
- /* sample coordinates in 16.16 fixed point */
- .x1 = src_x,
- .x2 = src_x + src_w,
- .y1 = src_y,
- .y2 = src_y + src_h,
- };
- struct drm_rect dst = {
- /* integer pixels */
- .x1 = crtc_x,
- .x2 = crtc_x + crtc_w,
- .y1 = crtc_y,
- .y2 = crtc_y + crtc_h,
- };
- const struct drm_rect clip = {
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
- };
- const struct {
- int crtc_x, crtc_y;
- unsigned int crtc_w, crtc_h;
- uint32_t src_x, src_y, src_w, src_h;
- } orig = {
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- };
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@@ -927,55 +1141,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
- drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
+ drm_rect_rotate(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
- hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
+ hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(hscale < 0);
- vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(vscale < 0);
- visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
+ state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
- crtc_x = dst.x1;
- crtc_y = dst.y1;
- crtc_w = drm_rect_width(&dst);
- crtc_h = drm_rect_height(&dst);
+ crtc_x = dst->x1;
+ crtc_y = dst->y1;
+ crtc_w = drm_rect_width(dst);
+ crtc_h = drm_rect_height(dst);
- if (visible) {
+ if (state->visible) {
/* check again in case clipping clamped the results */
- hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
+ hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
if (hscale < 0) {
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
- drm_rect_debug_print(&src, true);
- drm_rect_debug_print(&dst, false);
+ drm_rect_debug_print(src, true);
+ drm_rect_debug_print(dst, false);
return hscale;
}
- vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (vscale < 0) {
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
- drm_rect_debug_print(&src, true);
- drm_rect_debug_print(&dst, false);
+ drm_rect_debug_print(src, true);
+ drm_rect_debug_print(dst, false);
return vscale;
}
/* Make the source viewport size an exact multiple of the scaling factors. */
- drm_rect_adjust_size(&src,
- drm_rect_width(&dst) * hscale - drm_rect_width(&src),
- drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+ drm_rect_adjust_size(src,
+ drm_rect_width(dst) * hscale - drm_rect_width(src),
+ drm_rect_height(dst) * vscale - drm_rect_height(src));
- drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
+ drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
/* sanity check to make sure the src viewport wasn't enlarged */
- WARN_ON(src.x1 < (int) src_x ||
- src.y1 < (int) src_y ||
- src.x2 > (int) (src_x + src_w) ||
- src.y2 > (int) (src_y + src_h));
+ WARN_ON(src->x1 < (int) orig_src->x1 ||
+ src->y1 < (int) orig_src->y1 ||
+ src->x2 > (int) orig_src->x2 ||
+ src->y2 > (int) orig_src->y2);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -983,10 +1197,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
- src_x = src.x1 >> 16;
- src_w = drm_rect_width(&src) >> 16;
- src_y = src.y1 >> 16;
- src_h = drm_rect_height(&src) >> 16;
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_y = src->y1 >> 16;
+ src_h = drm_rect_height(src) >> 16;
if (format_is_yuv(fb->pixel_format)) {
src_x &= ~1;
@@ -1000,12 +1214,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_w &= ~1;
if (crtc_w == 0)
- visible = false;
+ state->visible = false;
}
}
/* Check size restrictions when scaling */
- if (visible && (src_w != crtc_w || src_h != crtc_h)) {
+ if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
WARN_ON(!intel_plane->can_scale);
@@ -1013,12 +1227,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME interlacing min height is 6 */
if (crtc_w < 3 || crtc_h < 3)
- visible = false;
+ state->visible = false;
if (src_w < 3 || src_h < 3)
- visible = false;
+ state->visible = false;
- width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
+ width_bytes = ((src_x * pixel_size) & 63) +
+ src_w * pixel_size;
if (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096) {
@@ -1027,42 +1242,90 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
}
- dst.x1 = crtc_x;
- dst.x2 = crtc_x + crtc_w;
- dst.y1 = crtc_y;
- dst.y2 = crtc_y + crtc_h;
+ if (state->visible) {
+ src->x1 = src_x;
+ src->x2 = src_x + src_w;
+ src->y1 = src_y;
+ src->y2 = src_y + src_h;
+ }
- /*
- * If the sprite is completely covering the primary plane,
- * we can disable the primary and save power.
- */
- primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
- WARN_ON(!primary_enabled && !visible && intel_crtc->active);
+ dst->x1 = crtc_x;
+ dst->x2 = crtc_x + crtc_w;
+ dst->y1 = crtc_y;
+ dst->y2 = crtc_y + crtc_h;
- mutex_lock(&dev->struct_mutex);
+ return 0;
+}
- /* Note that this will apply the VT-d workaround for scanouts,
- * which is more restrictive than required for sprites. (The
- * primary plane requires 256KiB alignment with 64 PTE padding,
- * the sprite planes only require 128KiB alignment and 32 PTE padding.
- */
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+static int
+intel_prepare_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_crtc *crtc = state->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int ret;
- i915_gem_track_fb(old_obj, obj,
- INTEL_FRONTBUFFER_SPRITE(pipe));
- mutex_unlock(&dev->struct_mutex);
+ if (old_obj != obj) {
+ mutex_lock(&dev->struct_mutex);
- if (ret)
- return ret;
+ /* Note that this will apply the VT-d workaround for scanouts,
+ * which is more restrictive than required for sprites. (The
+ * primary plane requires 256KiB alignment with 64 PTE padding,
+ * the sprite planes only require 128KiB alignment and 32 PTE
+ * padding.
+ */
+ ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
- intel_plane->crtc_x = orig.crtc_x;
- intel_plane->crtc_y = orig.crtc_y;
- intel_plane->crtc_w = orig.crtc_w;
- intel_plane->crtc_h = orig.crtc_h;
- intel_plane->src_x = orig.src_x;
- intel_plane->src_y = orig.src_y;
- intel_plane->src_w = orig.src_w;
- intel_plane->src_h = orig.src_h;
+static void
+intel_commit_sprite_plane(struct drm_plane *plane,
+ struct intel_plane_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_crtc *crtc = state->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_plane->obj;
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y, src_w, src_h;
+ struct drm_rect *dst = &state->dst;
+ const struct drm_rect *clip = &state->clip;
+ bool primary_enabled;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
+ WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
+
+ intel_plane->crtc_x = state->orig_dst.x1;
+ intel_plane->crtc_y = state->orig_dst.y1;
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+ intel_plane->src_x = state->orig_src.x1;
+ intel_plane->src_y = state->orig_src.y1;
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
if (intel_crtc->active) {
@@ -1076,12 +1339,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (primary_was_enabled && !primary_enabled)
intel_pre_disable_primary(crtc);
- if (visible)
+ if (state->visible) {
+ crtc_x = state->dst.x1;
+ crtc_y = state->dst.y1;
+ crtc_w = drm_rect_width(&state->dst);
+ crtc_h = drm_rect_height(&state->dst);
+ src_x = state->src.x1;
+ src_y = state->src.y1;
+ src_w = drm_rect_width(&state->src);
+ src_h = drm_rect_height(&state->src);
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
- else
+ } else {
intel_plane->disable_plane(plane, crtc);
+ }
+
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
@@ -1090,21 +1363,65 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
/* Unpin old obj after new one is active to avoid ugliness */
- if (old_obj) {
+ if (old_obj && old_obj != obj) {
+
/*
* It's fairly common to simply update the position of
* an existing object. In that case, we don't need to
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
- if (old_obj != obj && intel_crtc->active)
+ if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_plane_state state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int ret;
+ state.crtc = crtc;
+ state.fb = fb;
+
+ /* sample coordinates in 16.16 fixed point */
+ state.src.x1 = src_x;
+ state.src.x2 = src_x + src_w;
+ state.src.y1 = src_y;
+ state.src.y2 = src_y + src_h;
+
+ /* integer pixels */
+ state.dst.x1 = crtc_x;
+ state.dst.x2 = crtc_x + crtc_w;
+ state.dst.y1 = crtc_y;
+ state.dst.y2 = crtc_y + crtc_h;
+
+ state.clip.x1 = 0;
+ state.clip.y1 = 0;
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+ state.orig_src = state.src;
+ state.orig_dst = state.dst;
+
+ ret = intel_check_sprite_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ ret = intel_prepare_sprite_plane(plane, &state);
+ if (ret)
+ return ret;
+
+ intel_commit_sprite_plane(plane, &state);
return 0;
}
@@ -1305,6 +1622,18 @@ static uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static uint32_t skl_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
@@ -1368,7 +1697,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
}
break;
-
+ case 9:
+ /*
+ * FIXME: Skylake planes can be scaled (with some restrictions),
+ * but this is for another time.
+ */
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
+ intel_plane->update_plane = skl_update_plane;
+ intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->update_colorkey = skl_update_colorkey;
+ intel_plane->get_colorkey = skl_get_colorkey;
+
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ break;
default:
kfree(intel_plane);
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c14341ca3ef9..6f5f59b880f5 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_disable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
return type;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 918b76163965..46de8d75b4bf 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -43,23 +43,17 @@
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
- WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
- "Device suspended\n");
+ WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+ "Device suspended\n");
}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
- u32 gt_thread_status_mask;
-
- if (IS_HASWELL(dev_priv->dev))
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
- else
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
-
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
@@ -120,8 +114,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:ivb,hsw */
- if (INTEL_INFO(dev_priv->dev)->gen < 8)
- __gen6_gt_wait_for_thread_c0(dev_priv);
+ __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for media to ack.\n");
}
-
- /* WaRsForcewakeWaitTC0:vlv */
- if (!IS_CHERRYVIEW(dev_priv->dev))
- __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -299,6 +288,154 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_DISABLE(0xffff));
+}
+
+static void
+__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_RENDER_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_RENDER_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Render to ack.\n");
+ }
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_MEDIA_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Media to ack.\n");
+ }
+
+ /* Check for Blitter Engine */
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_BLITTER_GEN9) &
+ FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ FORCEWAKE_ACK_BLITTER_GEN9) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
+ }
+}
+
+static void
+__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ /* Check for Render Engine */
+ if (FORCEWAKE_RENDER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* Check for Media Engine */
+ if (FORCEWAKE_MEDIA & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+ /* Check for Blitter Engine */
+ if (FORCEWAKE_BLITTER & fw_engine)
+ __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+}
+
+static void
+gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (FORCEWAKE_RENDER & fw_engine) {
+ if (dev_priv->uncore.fw_rendercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ if (dev_priv->uncore.fw_mediacount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ if (dev_priv->uncore.fw_blittercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (FORCEWAKE_RENDER & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+ if (--dev_priv->uncore.fw_rendercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_RENDER);
+ }
+
+ if (FORCEWAKE_MEDIA & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+ if (--dev_priv->uncore.fw_mediacount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_MEDIA);
+ }
+
+ if (FORCEWAKE_BLITTER & fw_engine) {
+ WARN_ON(dev_priv->uncore.fw_blittercount == 0);
+ if (--dev_priv->uncore.fw_blittercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_BLITTER);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
static void gen6_force_wake_timer(unsigned long arg)
{
struct drm_i915_private *dev_priv = (void *)arg;
@@ -337,6 +474,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
__gen7_gt_force_wake_mt_reset(dev_priv);
+ if (IS_GEN9(dev))
+ __gen9_gt_force_wake_mt_reset(dev_priv);
+
if (restore) { /* If reset with a user forcewake, try to restore */
unsigned fw = 0;
@@ -346,6 +486,15 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
+ } else if (IS_GEN9(dev)) {
+ if (dev_priv->uncore.fw_rendercount)
+ fw |= FORCEWAKE_RENDER;
+
+ if (dev_priv->uncore.fw_mediacount)
+ fw |= FORCEWAKE_MEDIA;
+
+ if (dev_priv->uncore.fw_blittercount)
+ fw |= FORCEWAKE_BLITTER;
} else {
if (dev_priv->uncore.forcewake_count)
fw = FORCEWAKE_ALL;
@@ -363,7 +512,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+static void __intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -389,6 +539,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+{
+ __intel_uncore_early_sanitize(dev, restore_forcewake);
+ i915_check_and_clear_faults(dev);
+}
+
void intel_uncore_sanitize(struct drm_device *dev)
{
/* BIOS often leaves RC6 enabled, but disable it for hw init */
@@ -410,6 +566,10 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
intel_runtime_pm_get(dev_priv);
+ /* Redirect to Gen9 specific routine */
+ if (IS_GEN9(dev_priv->dev))
+ return gen9_force_wake_get(dev_priv, fw_engine);
+
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev))
return vlv_force_wake_get(dev_priv, fw_engine);
@@ -431,6 +591,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
if (!dev_priv->uncore.funcs.force_wake_put)
return;
+ /* Redirect to Gen9 specific routine */
+ if (IS_GEN9(dev_priv->dev)) {
+ gen9_force_wake_put(dev_priv, fw_engine);
+ goto out;
+ }
+
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev)) {
vlv_force_wake_put(dev_priv, fw_engine);
@@ -504,6 +670,38 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x14000, 0x14400) || \
REG_RANGE((reg), 0x22000, 0x24000))
+#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
+ REG_RANGE((reg), 0xB00, 0x2000)
+
+#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x2700) || \
+ REG_RANGE((reg), 0x3000, 0x4000) || \
+ REG_RANGE((reg), 0x5200, 0x8000) || \
+ REG_RANGE((reg), 0x8140, 0x8160) || \
+ REG_RANGE((reg), 0x8300, 0x8500) || \
+ REG_RANGE((reg), 0x8C00, 0x8D00) || \
+ REG_RANGE((reg), 0xB000, 0xB480) || \
+ REG_RANGE((reg), 0xE000, 0xE900) || \
+ REG_RANGE((reg), 0x24400, 0x24800))
+
+#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x8130, 0x8140) || \
+ REG_RANGE((reg), 0x8800, 0x8A00) || \
+ REG_RANGE((reg), 0xD000, 0xD800) || \
+ REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x1A000, 0x1EA00) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
+ REG_RANGE((reg), 0x9400, 0x9800)
+
+#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
+ ((reg) < 0x40000 &&\
+ !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
+ !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
+
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -634,6 +832,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_FOOTER; \
}
+#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_read(x) \
+static u##x \
+gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ REG_READ_HEADER(x); \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } else { \
+ unsigned fwengine = 0; \
+ if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } else { \
+ if (dev_priv->uncore.fw_blittercount == 0) \
+ fwengine = FORCEWAKE_BLITTER; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ } \
+ REG_READ_FOOTER; \
+}
+
+__gen9_read(8)
+__gen9_read(16)
+__gen9_read(32)
+__gen9_read(64)
__chv_read(8)
__chv_read(16)
__chv_read(32)
@@ -655,6 +892,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
@@ -792,6 +1030,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
REG_WRITE_FOOTER; \
}
+static const u32 gen9_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ FORCEWAKE_BLITTER_GEN9,
+ FORCEWAKE_RENDER_GEN9,
+ FORCEWAKE_MEDIA_GEN9,
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+ if (reg == gen9_shadowed_regs[i])
+ return true;
+
+ return false;
+}
+
+#define __gen9_write(x) \
+static void \
+gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
+ bool trace) { \
+ REG_WRITE_HEADER; \
+ if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
+ is_gen9_shadowed(dev_priv, reg)) { \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ } else { \
+ unsigned fwengine = 0; \
+ if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } else { \
+ if (dev_priv->uncore.fw_blittercount == 0) \
+ fwengine = FORCEWAKE_BLITTER; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+ fwengine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+ fwengine); \
+ } \
+ REG_WRITE_FOOTER; \
+}
+
+__gen9_write(8)
+__gen9_write(16)
+__gen9_write(32)
+__gen9_write(64)
__chv_write(8)
__chv_write(16)
__chv_write(32)
@@ -817,6 +1118,7 @@ __gen4_write(16)
__gen4_write(32)
__gen4_write(64)
+#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
@@ -826,6 +1128,22 @@ __gen4_write(64)
#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
+#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
+do { \
+ dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
+ dev_priv->uncore.funcs.mmio_writew = x##_write16; \
+ dev_priv->uncore.funcs.mmio_writel = x##_write32; \
+ dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
+} while (0)
+
+#define ASSIGN_READ_MMIO_VFUNCS(x) \
+do { \
+ dev_priv->uncore.funcs.mmio_readb = x##_read8; \
+ dev_priv->uncore.funcs.mmio_readw = x##_read16; \
+ dev_priv->uncore.funcs.mmio_readl = x##_read32; \
+ dev_priv->uncore.funcs.mmio_readq = x##_read64; \
+} while (0)
+
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -833,9 +1151,12 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
- intel_uncore_early_sanitize(dev, false);
+ __intel_uncore_early_sanitize(dev, false);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_GEN9(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
+ } else if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
@@ -881,77 +1202,52 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
+ WARN_ON(1);
+ return;
+ case 9:
+ ASSIGN_WRITE_MMIO_VFUNCS(gen9);
+ ASSIGN_READ_MMIO_VFUNCS(gen9);
+ break;
+ case 8:
if (IS_CHERRYVIEW(dev)) {
- dev_priv->uncore.funcs.mmio_writeb = chv_write8;
- dev_priv->uncore.funcs.mmio_writew = chv_write16;
- dev_priv->uncore.funcs.mmio_writel = chv_write32;
- dev_priv->uncore.funcs.mmio_writeq = chv_write64;
- dev_priv->uncore.funcs.mmio_readb = chv_read8;
- dev_priv->uncore.funcs.mmio_readw = chv_read16;
- dev_priv->uncore.funcs.mmio_readl = chv_read32;
- dev_priv->uncore.funcs.mmio_readq = chv_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(chv);
+ ASSIGN_READ_MMIO_VFUNCS(chv);
} else {
- dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
- dev_priv->uncore.funcs.mmio_writew = gen8_write16;
- dev_priv->uncore.funcs.mmio_writel = gen8_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen8);
+ ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 7:
case 6:
if (IS_HASWELL(dev)) {
- dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
- dev_priv->uncore.funcs.mmio_writew = hsw_write16;
- dev_priv->uncore.funcs.mmio_writel = hsw_write32;
- dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
+ ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else {
- dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
- dev_priv->uncore.funcs.mmio_writew = gen6_write16;
- dev_priv->uncore.funcs.mmio_writel = gen6_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen6);
}
if (IS_VALLEYVIEW(dev)) {
- dev_priv->uncore.funcs.mmio_readb = vlv_read8;
- dev_priv->uncore.funcs.mmio_readw = vlv_read16;
- dev_priv->uncore.funcs.mmio_readl = vlv_read32;
- dev_priv->uncore.funcs.mmio_readq = vlv_read64;
+ ASSIGN_READ_MMIO_VFUNCS(vlv);
} else {
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 5:
- dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
- dev_priv->uncore.funcs.mmio_writew = gen5_write16;
- dev_priv->uncore.funcs.mmio_writel = gen5_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
- dev_priv->uncore.funcs.mmio_readb = gen5_read8;
- dev_priv->uncore.funcs.mmio_readw = gen5_read16;
- dev_priv->uncore.funcs.mmio_readl = gen5_read32;
- dev_priv->uncore.funcs.mmio_readq = gen5_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen5);
+ ASSIGN_READ_MMIO_VFUNCS(gen5);
break;
case 4:
case 3:
case 2:
- dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
- dev_priv->uncore.funcs.mmio_writew = gen4_write16;
- dev_priv->uncore.funcs.mmio_writel = gen4_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
- dev_priv->uncore.funcs.mmio_readb = gen4_read8;
- dev_priv->uncore.funcs.mmio_readw = gen4_read16;
- dev_priv->uncore.funcs.mmio_readl = gen4_read32;
- dev_priv->uncore.funcs.mmio_readq = gen4_read64;
+ ASSIGN_WRITE_MMIO_VFUNCS(gen4);
+ ASSIGN_READ_MMIO_VFUNCS(gen4);
break;
}
+
+ i915_check_and_clear_faults(dev);
}
+#undef ASSIGN_WRITE_MMIO_VFUNCS
+#undef ASSIGN_READ_MMIO_VFUNCS
void intel_uncore_fini(struct drm_device *dev)
{
@@ -968,7 +1264,7 @@ static const struct register_whitelist {
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
uint32_t gen_bitmask;
} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
};
int i915_reg_read_ioctl(struct drm_device *dev,
@@ -1053,41 +1349,34 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
return 0;
}
-static int i965_reset_complete(struct drm_device *dev)
+static int i915_reset_complete(struct drm_device *dev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
+ pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_STATUS) == 0;
}
-static int i965_do_reset(struct drm_device *dev)
+static int i915_do_reset(struct drm_device *dev)
{
- int ret;
-
- /* FIXME: i965g/gm need a display save/restore for gpu reset. */
- return -ENODEV;
+ /* assert reset for at least 20 usec */
+ pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ udelay(20);
+ pci_write_config_byte(dev->pdev, I915_GDRST, 0);
- /*
- * Set the domains we want to reset (GRDOM/bits 2 and 3) as
- * well as the reset bit (GR/bit 0). Setting the GR bit
- * triggers the reset; when done, the hardware will clear it.
- */
- pci_write_config_byte(dev->pdev, I965_GDRST,
- GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
-
- pci_write_config_byte(dev->pdev, I965_GDRST,
- GRDOM_MEDIA | GRDOM_RESET_ENABLE);
-
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
+ return wait_for(i915_reset_complete(dev), 500);
+}
- pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+static int g4x_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
- return 0;
+static int g33_do_reset(struct drm_device *dev)
+{
+ pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for(g4x_reset_complete(dev), 500);
}
static int g4x_do_reset(struct drm_device *dev)
@@ -1095,9 +1384,9 @@ static int g4x_do_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- pci_write_config_byte(dev->pdev, I965_GDRST,
+ pci_write_config_byte(dev->pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(dev), 500);
if (ret)
return ret;
@@ -1105,9 +1394,9 @@ static int g4x_do_reset(struct drm_device *dev)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I965_GDRST,
+ pci_write_config_byte(dev->pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(dev), 500);
if (ret)
return ret;
@@ -1115,7 +1404,7 @@ static int g4x_do_reset(struct drm_device *dev)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+ pci_write_config_byte(dev->pdev, I915_GDRST, 0);
return 0;
}
@@ -1173,8 +1462,10 @@ int intel_gpu_reset(struct drm_device *dev)
return ironlake_do_reset(dev);
else if (IS_G4X(dev))
return g4x_do_reset(dev);
- else if (IS_GEN4(dev))
- return i965_do_reset(dev);
+ else if (IS_G33(dev))
+ return g33_do_reset(dev);
+ else if (INTEL_INFO(dev)->gen >= 3)
+ return i915_do_reset(dev);
else
return -ENODEV;
}
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 82fb758a29bc..ab31848e92cf 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -6,6 +6,7 @@ config DRM_IMX
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
+ depends on IMX_IPUV3_CORE
help
enable i.MX graphics support
@@ -40,11 +41,11 @@ config DRM_IMX_LDB
found on i.MX53 and i.MX6 processors.
config DRM_IMX_IPUV3
- tristate "DRM Support for i.MX IPUv3"
+ tristate
depends on DRM_IMX
depends on IMX_IPUV3_CORE
- help
- Choose this if you have a i.MX5 or i.MX6 processor.
+ default y if DRM_IMX=y
+ default m if DRM_IMX=m
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/gpu/drm/imx/Makefile
index 582c438d8cbd..582c438d8cbd 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9cb222e2996f..b250130debc8 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -24,13 +24,12 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "imx-drm.h"
#define MAX_CRTC 4
-struct imx_drm_crtc;
-
struct imx_drm_component {
struct device_node *of_node;
struct list_head list;
@@ -632,7 +631,8 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
continue;
}
- component_match_add(&pdev->dev, &match, compare_of, remote);
+ component_match_add(&pdev->dev, &match, compare_of,
+ remote);
of_node_put(remote);
}
of_node_put(port);
@@ -691,7 +691,6 @@ static struct platform_driver imx_drm_pdrv = {
.probe = imx_drm_platform_probe,
.remove = imx_drm_platform_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "imx-drm",
.pm = &imx_drm_pm_ops,
.of_match_table = imx_drm_dt_ids,
diff --git a/drivers/staging/imx-drm/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 7453ae00c412..7453ae00c412 100644
--- a/drivers/staging/imx-drm/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/gpu/drm/imx/imx-hdmi.c
index aaec6b2cdf56..ddc53e039530 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/gpu/drm/imx/imx-hdmi.c
@@ -1754,7 +1754,6 @@ static struct platform_driver imx_hdmi_driver = {
.remove = imx_hdmi_platform_remove,
.driver = {
.name = "imx-hdmi",
- .owner = THIS_MODULE,
.of_match_table = imx_hdmi_dt_ids,
},
};
diff --git a/drivers/staging/imx-drm/imx-hdmi.h b/drivers/gpu/drm/imx/imx-hdmi.h
index 39b677689db6..39b677689db6 100644
--- a/drivers/staging/imx-drm/imx-hdmi.h
+++ b/drivers/gpu/drm/imx/imx-hdmi.h
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 4662e00b456a..c60460043e24 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/module.h>
@@ -604,7 +599,6 @@ static struct platform_driver imx_ldb_driver = {
.driver = {
.of_match_table = imx_ldb_dt_ids,
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 42c651be6c20..a729f4f7074c 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/clk.h>
@@ -665,7 +660,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val);
if (ret < 0) {
- dev_err(dev, "failed to read configuration register: %d\n", ret);
+ dev_err(dev, "failed to read configuration register: %d\n",
+ ret);
return ret;
}
if (val != 0x00100000) {
@@ -724,7 +720,6 @@ static struct platform_driver imx_tve_driver = {
.driver = {
.of_match_table = imx_tve_dt_ids,
.name = "imx-tve",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 11e84a251773..ebee59cb96d8 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/component.h>
#include <linux/module.h>
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 944962b692bb..6987e16fe99b 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -64,6 +64,7 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
{
struct drm_gem_cma_object *cma_obj;
unsigned long eba;
+ int active;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
@@ -74,12 +75,17 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
&cma_obj->paddr, x, y);
- ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
-
eba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
- ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
+
+ if (ipu_plane->enabled) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
+ ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ } else {
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
+ }
/* cache offsets for subsequent pageflips */
ipu_plane->x = x;
@@ -137,6 +143,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
if (crtc_h < 2)
return -EINVAL;
+ /*
+ * since we cannot touch active IDMAC channels, we do not support
+ * resizing the enabled plane or changing its format
+ */
+ if (ipu_plane->enabled) {
+ if (src_w != ipu_plane->w || src_h != ipu_plane->h ||
+ fb->pixel_format != ipu_plane->base.fb->pixel_format)
+ return -EINVAL;
+
+ return ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
+ }
+
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
ret = ipu_dp_setup_channel(ipu_plane->dp,
@@ -148,14 +166,22 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
ret);
return ret;
}
- ipu_dp_set_global_alpha(ipu_plane->dp, 1, 0, 1);
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
ipu_dp_setup_channel(ipu_plane->dp,
ipu_drm_fourcc_to_colorspace(fb->pixel_format),
IPUV3_COLORSPACE_UNKNOWN);
ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
- break;
+ /* Enable local alpha on partial plane */
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
+ break;
+ default:
+ break;
+ }
}
ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
@@ -181,11 +207,16 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
return ret;
}
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+ ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
+ ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
if (ret < 0)
return ret;
+ ipu_plane->w = src_w;
+ ipu_plane->h = src_h;
+
return 0;
}
diff --git a/drivers/staging/imx-drm/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index c0aae5bcb5d4..af125fb40ef5 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -26,6 +26,8 @@ struct ipu_plane {
int x;
int y;
+ int w;
+ int h;
bool enabled;
};
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 015a454b87e1..796c3c1c170a 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/component.h>
@@ -128,6 +123,10 @@ static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
static void imx_pd_encoder_commit(struct drm_encoder *encoder)
{
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ drm_panel_prepare(imxpd->panel);
+ drm_panel_enable(imxpd->panel);
}
static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
@@ -138,6 +137,10 @@ static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
static void imx_pd_encoder_disable(struct drm_encoder *encoder)
{
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ drm_panel_disable(imxpd->panel);
+ drm_panel_unprepare(imxpd->panel);
}
static struct drm_connector_funcs imx_pd_connector_funcs = {
@@ -284,7 +287,6 @@ static struct platform_driver imx_pd_driver = {
.driver = {
.of_match_table = imx_pd_dt_ids,
.name = "imx-parallel-display",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 83485ab81ce8..9872ba9abf1a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -15,6 +15,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 9d907c526c94..5b2a1ff95d3d 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,6 +3,7 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
select SHMEM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 6283dcb96af5..143d988f8add 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -7,6 +7,7 @@ msm-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
+ adreno/a4xx_gpu.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -24,12 +25,15 @@ msm-y := \
mdp/mdp4/mdp4_irq.o \
mdp/mdp4/mdp4_kms.o \
mdp/mdp4/mdp4_plane.o \
+ mdp/mdp5/mdp5_cfg.o \
+ mdp/mdp5/mdp5_ctl.o \
mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
+ msm_atomic.o \
msm_drv.o \
msm_fb.o \
msm_gem.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index a3104598c27f..22882cc0a573 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -926,11 +926,11 @@ static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
-#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
-static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
{
- return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
}
#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
@@ -1243,13 +1243,13 @@ static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
}
#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
}
#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
@@ -1257,13 +1257,13 @@ static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
}
#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
}
#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
@@ -1271,7 +1271,7 @@ static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
{
- return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
}
#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 82d015279b47..109e9a263daf 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -86,6 +86,14 @@ enum a3xx_vtx_fmt {
VFMT_NORM_USHORT_16_16 = 29,
VFMT_NORM_USHORT_16_16_16 = 30,
VFMT_NORM_USHORT_16_16_16_16 = 31,
+ VFMT_UINT_32 = 32,
+ VFMT_UINT_32_32 = 33,
+ VFMT_UINT_32_32_32 = 34,
+ VFMT_UINT_32_32_32_32 = 35,
+ VFMT_INT_32 = 36,
+ VFMT_INT_32_32 = 37,
+ VFMT_INT_32_32_32 = 38,
+ VFMT_INT_32_32_32_32 = 39,
VFMT_UBYTE_8 = 40,
VFMT_UBYTE_8_8 = 41,
VFMT_UBYTE_8_8_8 = 42,
@@ -112,7 +120,9 @@ enum a3xx_tex_fmt {
TFMT_NORM_USHORT_565 = 4,
TFMT_NORM_USHORT_5551 = 6,
TFMT_NORM_USHORT_4444 = 7,
+ TFMT_NORM_USHORT_Z16 = 9,
TFMT_NORM_UINT_X8Z24 = 10,
+ TFMT_FLOAT_Z32 = 11,
TFMT_NORM_UINT_NV12_UV_TILED = 17,
TFMT_NORM_UINT_NV12_Y_TILED = 19,
TFMT_NORM_UINT_NV12_UV = 21,
@@ -121,18 +131,38 @@ enum a3xx_tex_fmt {
TFMT_NORM_UINT_I420_U = 26,
TFMT_NORM_UINT_I420_V = 27,
TFMT_NORM_UINT_2_10_10_10 = 41,
+ TFMT_FLOAT_9_9_9_E5 = 42,
+ TFMT_FLOAT_10_11_11 = 43,
TFMT_NORM_UINT_A8 = 44,
TFMT_NORM_UINT_L8_A8 = 47,
TFMT_NORM_UINT_8 = 48,
TFMT_NORM_UINT_8_8 = 49,
TFMT_NORM_UINT_8_8_8 = 50,
TFMT_NORM_UINT_8_8_8_8 = 51,
+ TFMT_NORM_SINT_8_8 = 53,
+ TFMT_NORM_SINT_8_8_8_8 = 55,
+ TFMT_UINT_8_8 = 57,
+ TFMT_UINT_8_8_8_8 = 59,
+ TFMT_SINT_8_8 = 61,
+ TFMT_SINT_8_8_8_8 = 63,
TFMT_FLOAT_16 = 64,
TFMT_FLOAT_16_16 = 65,
TFMT_FLOAT_16_16_16_16 = 67,
+ TFMT_UINT_16 = 68,
+ TFMT_UINT_16_16 = 69,
+ TFMT_UINT_16_16_16_16 = 71,
+ TFMT_SINT_16 = 72,
+ TFMT_SINT_16_16 = 73,
+ TFMT_SINT_16_16_16_16 = 75,
TFMT_FLOAT_32 = 84,
TFMT_FLOAT_32_32 = 85,
TFMT_FLOAT_32_32_32_32 = 87,
+ TFMT_UINT_32 = 88,
+ TFMT_UINT_32_32 = 89,
+ TFMT_UINT_32_32_32_32 = 91,
+ TFMT_SINT_32 = 92,
+ TFMT_SINT_32_32 = 93,
+ TFMT_SINT_32_32_32_32 = 95,
};
enum a3xx_tex_fetchsize {
@@ -145,19 +175,34 @@ enum a3xx_tex_fetchsize {
};
enum a3xx_color_fmt {
+ RB_R5G6B5_UNORM = 0,
+ RB_R5G5B5A1_UNORM = 1,
+ RB_R4G4B4A4_UNORM = 3,
RB_R8G8B8_UNORM = 4,
RB_R8G8B8A8_UNORM = 8,
- RB_Z16_UNORM = 12,
+ RB_R8G8B8A8_UINT = 10,
+ RB_R8G8B8A8_SINT = 11,
+ RB_R8G8_UNORM = 12,
+ RB_R8_UINT = 14,
+ RB_R8_SINT = 15,
+ RB_R10G10B10A2_UNORM = 16,
RB_A8_UNORM = 20,
+ RB_R8_UNORM = 21,
RB_R16G16B16A16_FLOAT = 27,
+ RB_R11G11B10_FLOAT = 28,
+ RB_R16_SINT = 40,
+ RB_R16G16_SINT = 41,
+ RB_R16G16B16A16_SINT = 43,
+ RB_R16_UINT = 44,
+ RB_R16G16_UINT = 45,
+ RB_R16G16B16A16_UINT = 47,
RB_R32G32B32A32_FLOAT = 51,
-};
-
-enum a3xx_color_swap {
- WZYX = 0,
- WXYZ = 1,
- ZYXW = 2,
- XYZW = 3,
+ RB_R32_SINT = 52,
+ RB_R32G32_SINT = 53,
+ RB_R32G32B32A32_SINT = 55,
+ RB_R32_UINT = 56,
+ RB_R32G32_UINT = 57,
+ RB_R32G32B32A32_UINT = 59,
};
enum a3xx_sp_perfcounter_select {
@@ -194,6 +239,11 @@ enum a3xx_rb_blend_opcode {
BLEND_MAX_DST_SRC = 4,
};
+enum a3xx_intp_mode {
+ SMOOTH = 0,
+ FLAT = 1,
+};
+
enum a3xx_tex_filter {
A3XX_TEX_NEAREST = 0,
A3XX_TEX_LINEAR = 1,
@@ -536,6 +586,10 @@ enum a3xx_tex_type {
#define REG_A3XX_CP_MEQ_DATA 0x000001db
+#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5
+
+#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d
+
#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
#define REG_A3XX_CP_HW_FAULT 0x0000045c
@@ -550,6 +604,12 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00
+
+#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+
+#define REG_A3XX_TP0_CHICKEN 0x00000e1e
+
#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
@@ -632,13 +692,13 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
{
- return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+ return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
}
#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
{
- return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+ return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
}
#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
@@ -646,7 +706,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
{
- return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
+ return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
@@ -654,7 +714,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
{
- return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
+ return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
@@ -662,7 +722,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
{
- return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+ return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
}
#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
@@ -673,7 +733,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
{
- return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+ return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
}
#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
@@ -863,6 +923,7 @@ static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000
#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
@@ -1001,6 +1062,7 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
+#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1079,7 +1141,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
#define REG_A3XX_RB_DEPTH_INFO 0x00002102
-#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
{
@@ -1265,6 +1327,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
{
return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
}
+#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
@@ -1281,7 +1344,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize
#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
-#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
@@ -1484,6 +1552,8 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+
static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
@@ -1537,6 +1607,7 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
{
return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
}
+#define A3XX_VFD_DECODE_INSTR_INT 0x00100000
#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
@@ -1604,6 +1675,102 @@ static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
+}
static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
@@ -1928,6 +2095,8 @@ static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
}
#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
+#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
@@ -1947,6 +2116,8 @@ static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
}
+#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301
+
#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
@@ -2297,11 +2468,11 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
-#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
-static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
{
- return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
}
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
@@ -2347,17 +2518,23 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
+#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff
+#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0
+static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
+}
#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
{
- return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+ return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
}
#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
{
- return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+ return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
}
#define REG_A3XX_TEX_CONST_0 0x00000000
@@ -2448,6 +2625,24 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
}
#define REG_A3XX_TEX_CONST_3 0x00000003
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0000000f
+#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
+{
+ return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
+}
+#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
+#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
+static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
+}
+#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000
+#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+ return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 218c5b060398..b66c53bdc039 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@@ -406,6 +408,94 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
+/* Register offset defines for A3XX */
+static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
+ REG_A3XX_CP_PFP_UCODE_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
+ REG_A3XX_CP_PFP_UCODE_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
+ REG_A3XX_CP_PROTECT_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
+ REG_A3XX_RBBM_PERFCTR_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+ REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+ REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+ REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
+ REG_A3XX_RBBM_INT_0_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+ REG_A3XX_RBBM_AHB_ERROR_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
+ REG_A3XX_RBBM_INT_CLEAR_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
+ REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
+ REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
+ REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
+ REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
+ REG_A3XX_VSC_SIZE_ADDRESS),
+ REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
+ REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+ REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+ REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
+ REG_A3XX_SP_VS_OBJ_START_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
+ REG_A3XX_SP_FS_OBJ_START_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
+ REG_A3XX_RBBM_PM_OVERRIDE2),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
+ REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
+ REG_A3XX_SQ_GPR_MANAGEMENT),
+ REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
+ REG_A3XX_SQ_INST_STORE_MANAGMENT),
+ REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
+ REG_A3XX_RBBM_SW_RESET_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
+ REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+ REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+ REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
+};
static const struct adreno_gpu_funcs funcs = {
.base = {
@@ -463,6 +553,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
adreno_gpu->registers = a3xx_registers;
+ adreno_gpu->reg_offsets = a3xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
new file mode 100644
index 000000000000..5a24c416d2dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -0,0 +1,2144 @@
+#ifndef A4XX_XML
+#define A4XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
+
+Copyright (C) 2013-2014 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a4xx_color_fmt {
+ RB4_A8_UNORM = 1,
+ RB4_R5G6R5_UNORM = 14,
+ RB4_Z16_UNORM = 15,
+ RB4_R8G8B8_UNORM = 25,
+ RB4_R8G8B8A8_UNORM = 26,
+};
+
+enum a4xx_tile_mode {
+ TILE4_LINEAR = 0,
+ TILE4_3 = 3,
+};
+
+enum a4xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
+enum a4xx_vtx_fmt {
+ VFMT4_FLOAT_32 = 1,
+ VFMT4_FLOAT_32_32 = 2,
+ VFMT4_FLOAT_32_32_32 = 3,
+ VFMT4_FLOAT_32_32_32_32 = 4,
+ VFMT4_FLOAT_16 = 5,
+ VFMT4_FLOAT_16_16 = 6,
+ VFMT4_FLOAT_16_16_16 = 7,
+ VFMT4_FLOAT_16_16_16_16 = 8,
+ VFMT4_FIXED_32 = 9,
+ VFMT4_FIXED_32_32 = 10,
+ VFMT4_FIXED_32_32_32 = 11,
+ VFMT4_FIXED_32_32_32_32 = 12,
+ VFMT4_SHORT_16 = 16,
+ VFMT4_SHORT_16_16 = 17,
+ VFMT4_SHORT_16_16_16 = 18,
+ VFMT4_SHORT_16_16_16_16 = 19,
+ VFMT4_USHORT_16 = 20,
+ VFMT4_USHORT_16_16 = 21,
+ VFMT4_USHORT_16_16_16 = 22,
+ VFMT4_USHORT_16_16_16_16 = 23,
+ VFMT4_NORM_SHORT_16 = 24,
+ VFMT4_NORM_SHORT_16_16 = 25,
+ VFMT4_NORM_SHORT_16_16_16 = 26,
+ VFMT4_NORM_SHORT_16_16_16_16 = 27,
+ VFMT4_NORM_USHORT_16 = 28,
+ VFMT4_NORM_USHORT_16_16 = 29,
+ VFMT4_NORM_USHORT_16_16_16 = 30,
+ VFMT4_NORM_USHORT_16_16_16_16 = 31,
+ VFMT4_UBYTE_8 = 40,
+ VFMT4_UBYTE_8_8 = 41,
+ VFMT4_UBYTE_8_8_8 = 42,
+ VFMT4_UBYTE_8_8_8_8 = 43,
+ VFMT4_NORM_UBYTE_8 = 44,
+ VFMT4_NORM_UBYTE_8_8 = 45,
+ VFMT4_NORM_UBYTE_8_8_8 = 46,
+ VFMT4_NORM_UBYTE_8_8_8_8 = 47,
+ VFMT4_BYTE_8 = 48,
+ VFMT4_BYTE_8_8 = 49,
+ VFMT4_BYTE_8_8_8 = 50,
+ VFMT4_BYTE_8_8_8_8 = 51,
+ VFMT4_NORM_BYTE_8 = 52,
+ VFMT4_NORM_BYTE_8_8 = 53,
+ VFMT4_NORM_BYTE_8_8_8 = 54,
+ VFMT4_NORM_BYTE_8_8_8_8 = 55,
+ VFMT4_UINT_10_10_10_2 = 60,
+ VFMT4_NORM_UINT_10_10_10_2 = 61,
+ VFMT4_INT_10_10_10_2 = 62,
+ VFMT4_NORM_INT_10_10_10_2 = 63,
+};
+
+enum a4xx_tex_fmt {
+ TFMT4_NORM_USHORT_565 = 11,
+ TFMT4_NORM_USHORT_5551 = 10,
+ TFMT4_NORM_USHORT_4444 = 8,
+ TFMT4_NORM_UINT_X8Z24 = 71,
+ TFMT4_NORM_UINT_2_10_10_10 = 33,
+ TFMT4_NORM_UINT_A8 = 3,
+ TFMT4_NORM_UINT_L8_A8 = 13,
+ TFMT4_NORM_UINT_8 = 4,
+ TFMT4_NORM_UINT_8_8_8_8 = 28,
+ TFMT4_FLOAT_16 = 20,
+ TFMT4_FLOAT_16_16 = 40,
+ TFMT4_FLOAT_16_16_16_16 = 53,
+ TFMT4_FLOAT_32 = 43,
+ TFMT4_FLOAT_32_32 = 56,
+ TFMT4_FLOAT_32_32_32_32 = 63,
+};
+
+enum a4xx_depth_format {
+ DEPTH4_NONE = 0,
+ DEPTH4_16 = 1,
+ DEPTH4_24_8 = 2,
+};
+
+enum a4xx_tex_filter {
+ A4XX_TEX_NEAREST = 0,
+ A4XX_TEX_LINEAR = 1,
+};
+
+enum a4xx_tex_clamp {
+ A4XX_TEX_REPEAT = 0,
+ A4XX_TEX_CLAMP_TO_EDGE = 1,
+ A4XX_TEX_MIRROR_REPEAT = 2,
+ A4XX_TEX_CLAMP_NONE = 3,
+};
+
+enum a4xx_tex_swiz {
+ A4XX_TEX_X = 0,
+ A4XX_TEX_Y = 1,
+ A4XX_TEX_Z = 2,
+ A4XX_TEX_W = 3,
+ A4XX_TEX_ZERO = 4,
+ A4XX_TEX_ONE = 5,
+};
+
+enum a4xx_tex_type {
+ A4XX_TEX_1D = 0,
+ A4XX_TEX_2D = 1,
+ A4XX_TEX_CUBE = 2,
+ A4XX_TEX_3D = 3,
+};
+
+#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000
+#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20
+static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
+{
+ return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+}
+#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A4XX_INT0_VFD_ERROR 0x00000040
+#define A4XX_INT0_CP_SW_INT 0x00000080
+#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A4XX_INT0_CP_HW_FAULT 0x00000800
+#define A4XX_INT0_CP_DMA 0x00001000
+#define A4XX_INT0_CP_IB2_INT 0x00002000
+#define A4XX_INT0_CP_IB1_INT 0x00004000
+#define A4XX_INT0_CP_RB_INT 0x00008000
+#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A4XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A4XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A4XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A4XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
+
+#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
+}
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
+}
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf
+
+#define REG_A4XX_RB_MODE_CONTROL 0x000020a0
+#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f
+#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0
+static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
+}
+#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00
+#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8
+static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
+#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
+#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020
+
+#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2
+#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13
+static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+
+#define REG_A4XX_RB_MSAA_CONTROL2 0x000020a3
+#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK;
+}
+#define A4XX_RB_MSAA_CONTROL2_VARYING 0x00001000
+
+static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400
+#define A4XX_RB_MRT_CONTROL_B11 0x00000800
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; }
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
+static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; }
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
+#define A4XX_RB_FS_OUTPUT_ENABLE_COLOR_PIPE 0x00000001
+#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_CONTROL3 0x000020fb
+#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK 0x0000001f
+#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT 0
+static inline uint32_t A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT) & A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_CONTROL 0x000020fc
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
+static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
+#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
+#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
+static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000
+#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100
+#define A4XX_RB_FS_OUTPUT_REG_COLOR_PIPE_ENABLE 0x00000001
+#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020
+
+#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101
+#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
+#define A4XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
+#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
+#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
+
+#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
+
+#define REG_A4XX_RB_DEPTH_INFO 0x00002103
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH 0x00002104
+#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A4XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
+#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff
+#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
+#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
+
+#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
+#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_BIN_OFFSET 0x0000210d
+#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff
+#define A4XX_RB_BIN_OFFSET_X__SHIFT 0
+static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val)
+{
+ return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK;
+}
+#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000
+#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16
+static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
+}
+
+#define REG_A4XX_RB_VPORT_Z_CLAMP_MAX_15 0x0000213f
+
+#define REG_A4XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014
+
+#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015
+
+#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016
+
+#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017
+
+#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019
+
+#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b
+
+#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c
+
+#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d
+
+#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f
+
+#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020
+
+#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021
+
+#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022
+
+#define REG_A4XX_RBBM_AHB_CTL0 0x00000023
+
+#define REG_A4XX_RBBM_AHB_CTL1 0x00000024
+
+#define REG_A4XX_RBBM_AHB_CMD 0x00000025
+
+#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026
+
+#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034
+
+#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036
+
+#define REG_A4XX_RBBM_INT_0_MASK 0x00000037
+
+#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e
+
+#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f
+
+#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041
+
+#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042
+
+#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047
+
+#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080
+
+#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081
+
+#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a
+
+#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
+
+#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
+
+#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
+
+#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182
+
+#define REG_A4XX_RBBM_AHB_STATUS 0x00000189
+
+#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c
+
+#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d
+
+#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f
+
+#define REG_A4XX_RBBM_STATUS 0x00000191
+#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
+
+#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
+
+#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
+
+#define REG_A4XX_CP_RB_BASE 0x00000200
+
+#define REG_A4XX_CP_RB_CNTL 0x00000201
+
+#define REG_A4XX_CP_RB_WPTR 0x00000205
+
+#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203
+
+#define REG_A4XX_CP_RB_RPTR 0x00000204
+
+#define REG_A4XX_CP_IB1_BASE 0x00000206
+
+#define REG_A4XX_CP_IB1_BUFSZ 0x00000207
+
+#define REG_A4XX_CP_IB2_BASE 0x00000208
+
+#define REG_A4XX_CP_IB2_BUFSZ 0x00000209
+
+#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217
+
+#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219
+
+#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b
+
+#define REG_A4XX_CP_ROQ_ADDR 0x0000021c
+
+#define REG_A4XX_CP_ROQ_DATA 0x0000021d
+
+#define REG_A4XX_CP_MEQ_ADDR 0x0000021e
+
+#define REG_A4XX_CP_MEQ_DATA 0x0000021f
+
+#define REG_A4XX_CP_MERCIU_ADDR 0x00000220
+
+#define REG_A4XX_CP_MERCIU_DATA 0x00000221
+
+#define REG_A4XX_CP_MERCIU_DATA2 0x00000222
+
+#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223
+
+#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224
+
+#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225
+
+#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226
+
+#define REG_A4XX_CP_ME_RAM_DATA 0x00000227
+
+#define REG_A4XX_CP_PREEMPT 0x0000022a
+
+#define REG_A4XX_CP_CNTL 0x0000022c
+
+#define REG_A4XX_CP_ME_CNTL 0x0000022d
+
+#define REG_A4XX_CP_DEBUG 0x0000022e
+
+#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231
+
+#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
+
+#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
+
+static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+
+#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
+
+#define REG_A4XX_CP_ST_BASE 0x000004c0
+
+#define REG_A4XX_CP_STQ_AVAIL 0x000004ce
+
+#define REG_A4XX_CP_MERCIU_STAT 0x000004d0
+
+#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2
+
+#define REG_A4XX_CP_HW_FAULT 0x000004d8
+
+#define REG_A4XX_CP_PROTECT_STATUS 0x000004da
+
+#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
+
+#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
+
+static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+#define REG_A4XX_SP_VS_STATUS 0x00000ec0
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
+
+#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
+#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000
+
+#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1
+
+#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6
+#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_START 0x000022e1
+
+#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2
+
+#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3
+
+#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5
+
+#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000
+
+#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_FS_OBJ_START 0x000022eb
+
+#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec
+
+#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed
+
+#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef
+
+#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12
+static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
+}
+
+#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360
+
+#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60
+
+#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61
+
+#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
+
+#define REG_A4XX_VPC_ATTR 0x00002140
+#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
+#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A4XX_VPC_ATTR_PSIZE 0x00000200
+#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000
+#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A4XX_VPC_ATTR_ENABLE 0x02000000
+
+#define REG_A4XX_VPC_PACK 0x00002141
+#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff
+#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0
+static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e
+
+#define REG_A4XX_VSC_BIN_SIZE 0x00000c00
+#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01
+
+#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02
+
+#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
+#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
+#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51
+
+#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
+
+#define REG_A4XX_VFD_CONTROL_0 0x00002200
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9
+static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_1 0x00002201
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_2 0x00002202
+
+#define REG_A4XX_VFD_CONTROL_3 0x00002203
+
+#define REG_A4XX_VFD_CONTROL_4 0x00002204
+
+#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208
+
+static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000
+#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
+#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xfffffff0
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 4
+static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
+{
+ return ((val >> 4) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
+
+#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
+
+#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6
+
+#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80
+
+#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
+
+#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
+
+#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
+#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
+
+#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071
+#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
+#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
+static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
+
+#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80
+
+#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83
+
+#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84
+
+#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88
+
+#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a
+
+#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b
+
+#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
+
+#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
+
+#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04
+
+#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
+
+#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
+
+#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
+#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db
+
+#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
+#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+
+#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A4XX_PC_BIN_BASE 0x000021c0
+
+#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
+#define A4XX_PC_PRIM_VTX_CNTL_VAROUT 0x00000001
+#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
+
+#define REG_A4XX_UNKNOWN_21C5 0x000021c5
+
+#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
+
+#define REG_A4XX_PC_GS_PARAM 0x000021e5
+
+#define REG_A4XX_PC_HS_PARAM 0x000021e7
+
+#define REG_A4XX_VBIF_VERSION 0x00003000
+
+#define REG_A4XX_VBIF_CLKON 0x00003001
+#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001
+
+#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5
+
+#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6
+
+#define REG_A4XX_UNKNOWN_0D01 0x00000d01
+
+#define REG_A4XX_UNKNOWN_0E05 0x00000e05
+
+#define REG_A4XX_UNKNOWN_0E42 0x00000e42
+
+#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2
+
+#define REG_A4XX_UNKNOWN_0EC3 0x00000ec3
+
+#define REG_A4XX_UNKNOWN_0F03 0x00000f03
+
+#define REG_A4XX_UNKNOWN_2001 0x00002001
+
+#define REG_A4XX_UNKNOWN_209B 0x0000209b
+
+#define REG_A4XX_UNKNOWN_20EF 0x000020ef
+
+#define REG_A4XX_UNKNOWN_20F0 0x000020f0
+
+#define REG_A4XX_UNKNOWN_20F1 0x000020f1
+
+#define REG_A4XX_UNKNOWN_20F2 0x000020f2
+
+#define REG_A4XX_UNKNOWN_20F3 0x000020f3
+
+#define REG_A4XX_UNKNOWN_20F4 0x000020f4
+
+#define REG_A4XX_UNKNOWN_20F5 0x000020f5
+
+#define REG_A4XX_UNKNOWN_20F6 0x000020f6
+
+#define REG_A4XX_UNKNOWN_20F7 0x000020f7
+
+#define REG_A4XX_UNKNOWN_2152 0x00002152
+
+#define REG_A4XX_UNKNOWN_2153 0x00002153
+
+#define REG_A4XX_UNKNOWN_2154 0x00002154
+
+#define REG_A4XX_UNKNOWN_2155 0x00002155
+
+#define REG_A4XX_UNKNOWN_2156 0x00002156
+
+#define REG_A4XX_UNKNOWN_2157 0x00002157
+
+#define REG_A4XX_UNKNOWN_21C3 0x000021c3
+
+#define REG_A4XX_UNKNOWN_21E6 0x000021e6
+
+#define REG_A4XX_UNKNOWN_2209 0x00002209
+
+#define REG_A4XX_UNKNOWN_22D7 0x000022d7
+
+#define REG_A4XX_UNKNOWN_2381 0x00002381
+
+#define REG_A4XX_UNKNOWN_23A0 0x000023a0
+
+#define REG_A4XX_TEX_SAMP_0 0x00000000
+#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+
+#define REG_A4XX_TEX_SAMP_1 0x00000001
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_0 0x00000000
+#define A4XX_TEX_CONST_0_TILED 0x00000001
+#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A4XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
+{
+ return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK;
+}
+#define A4XX_TEX_CONST_0_TYPE__MASK 0x60000000
+#define A4XX_TEX_CONST_0_TYPE__SHIFT 29
+static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val)
+{
+ return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_1 0x00000001
+#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff
+#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
+static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_2 0x00000002
+#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
+#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
+static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A4XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_3 0x00000003
+#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x0000000f
+#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_4 0x00000004
+#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffff
+#define A4XX_TEX_CONST_4_BASE__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_5 0x00000005
+
+#define REG_A4XX_TEX_CONST_6 0x00000006
+
+#define REG_A4XX_TEX_CONST_7 0x00000007
+
+
+#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
new file mode 100644
index 000000000000..91221836c5ad
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -0,0 +1,604 @@
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "a4xx_gpu.h"
+#ifdef CONFIG_MSM_OCMEM
+# include <soc/qcom/ocmem.h>
+#endif
+
+#define A4XX_INT0_MASK \
+ (A4XX_INT0_RBBM_AHB_ERROR | \
+ A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A4XX_INT0_CP_T0_PACKET_IN_IB | \
+ A4XX_INT0_CP_OPCODE_ERROR | \
+ A4XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A4XX_INT0_CP_HW_FAULT | \
+ A4XX_INT0_CP_IB1_INT | \
+ A4XX_INT0_CP_IB2_INT | \
+ A4XX_INT0_CP_RB_INT | \
+ A4XX_INT0_CP_REG_PROTECT_FAULT | \
+ A4XX_INT0_CP_AHB_ERROR_HALT | \
+ A4XX_INT0_UCHE_OOB_ACCESS)
+
+extern bool hang_debug;
+static void a4xx_dump(struct msm_gpu *gpu);
+
+/*
+ * a4xx_enable_hwcg() - Program the clock control registers
+ * @device: The adreno device pointer
+ */
+static void a4xx_enable_hwcg(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
+
+ /* Disable L1 clocking in A420 due to CCU issues with it */
+ for (i = 0; i < 4; i++) {
+ if (adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+ 0x00002020);
+ } else {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+ 0x00022020);
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
+ 0x00000922);
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
+ 0x00000000);
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
+ 0x00000001);
+ }
+
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
+}
+
+static void a4xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu);
+ gpu->funcs->idle(gpu);
+}
+
+static int a4xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ if (adreno_is_a4xx(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
+ gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection */
+ gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /* Enable the RBBM error reporting bits */
+ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting*/
+ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Enable power counters*/
+ gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
+
+ /*
+ * Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
+ (1 << 30) | 0xFFFF);
+
+ gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a4xx_gpu->ocmem_base >> 14));
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* Disable L2 bypass to avoid UCHE out of bounds errors */
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+
+ gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
+ (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+
+ a4xx_enable_hwcg(gpu);
+
+ /*
+ * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
+ * due to timing issue with HLSQ_TP_CLK_EN
+ */
+ if (adreno_is_a420(adreno_gpu)) {
+ unsigned int val;
+ val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
+ val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+ val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
+ }
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
+
+
+ /* RB registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
+
+ /* HLSQ registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
+
+ /* VPC registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
+
+ /* SMMU registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
+
+ gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->pm4->data);
+ len = adreno_gpu->pm4->size / 4;
+ DBG("loading PM4 ucode version: %u", ptr[0]);
+ gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->pfp->data);
+ len = adreno_gpu->pfp->size / 4;
+ DBG("loading PFP ucode version: %u", ptr[0]);
+
+ gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
+
+ a4xx_me_init(gpu);
+ return 0;
+}
+
+static void a4xx_recover(struct msm_gpu *gpu)
+{
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a4xx_dump(gpu);
+
+ gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a4xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+#ifdef CONFIG_MSM_OCMEM
+ if (a4xx_gpu->ocmem_base)
+ ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
+#endif
+
+ kfree(a4xx_gpu);
+}
+
+static void a4xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ adreno_idle(gpu);
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
+ A4XX_RBBM_STATUS_GPU_BUSY)))
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
+ DBG("%s: Int status %08x", gpu->name, status);
+
+ gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a4xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* VMIDMT */
+ 0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
+ 0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
+ 0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
+ 0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
+ 0x1380, 0x1380,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* XPU */
+ 0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
+ 0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
+ 0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
+ /* VBIF */
+ 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
+ 0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
+ 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
+ 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
+ 0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
+ 0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
+ 0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
+ 0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
+ 0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
+ 0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
+ 0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
+ 0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
+ 0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
+ 0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
+ 0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
+ 0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
+ 0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
+ 0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
+ 0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
+ 0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
+ 0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
+ 0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
+ 0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
+ 0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
+ 0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
+ 0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
+ 0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
+ 0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
+ 0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
+ ~0 /* sentinel */
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A4XX_RBBM_STATUS));
+ gpu->funcs->pm_suspend(gpu);
+
+ adreno_show(gpu, m);
+
+}
+#endif
+
+/* Register offset defines for A4XX, in order of enum adreno_regs */
+static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
+ REG_A4XX_CP_PFP_UCODE_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
+ REG_A4XX_CP_PFP_UCODE_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
+ REG_A4XX_CP_PROTECT_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
+ REG_A4XX_RBBM_PERFCTR_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+ REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+ REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
+ REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+ REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
+ REG_A4XX_RBBM_INT_0_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+ REG_A4XX_RBBM_AHB_ERROR_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
+ REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
+ REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
+ REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
+ REG_A4XX_VPC_DEBUG_RAM_SEL),
+ REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
+ REG_A4XX_VPC_DEBUG_RAM_READ),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
+ REG_A4XX_RBBM_INT_CLEAR_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
+ REG_A4XX_VSC_SIZE_ADDRESS),
+ REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+ REG_A4XX_SP_VS_PVT_MEM_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+ REG_A4XX_SP_FS_PVT_MEM_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
+ REG_A4XX_SP_VS_OBJ_START),
+ REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
+ REG_A4XX_SP_FS_OBJ_START),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
+ REG_A4XX_RBBM_SW_RESET_CMD),
+ REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
+ REG_A4XX_UCHE_INVALIDATE0),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+ REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+ REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
+};
+
+static void a4xx_dump(struct msm_gpu *gpu)
+{
+ adreno_dump(gpu);
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A4XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a4xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a4xx_recover,
+ .last_fence = adreno_last_fence,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .idle = a4xx_idle,
+ .irq = a4xx_irq,
+ .destroy = a4xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a4xx_show,
+#endif
+ },
+};
+
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
+{
+ struct a4xx_gpu *a4xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a4xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
+ if (!a4xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a4xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a4xx_gpu->pdev = pdev;
+
+ gpu->perfcntrs = NULL;
+ gpu->num_perfcntrs = 0;
+
+ adreno_gpu->registers = a4xx_registers;
+ adreno_gpu->reg_offsets = a4xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ if (ret)
+ goto fail;
+
+ /* if needed, allocate gmem: */
+ if (adreno_is_a4xx(adreno_gpu)) {
+#ifdef CONFIG_MSM_OCMEM
+ /* TODO this is different/missing upstream: */
+ struct ocmem_buf *ocmem_hdl =
+ ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
+
+ a4xx_gpu->ocmem_hdl = ocmem_hdl;
+ a4xx_gpu->ocmem_base = ocmem_hdl->addr;
+ adreno_gpu->gmem = ocmem_hdl->len;
+ DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
+ a4xx_gpu->ocmem_base);
+#endif
+ }
+
+ if (!gpu->mmu) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ dev_err(dev->dev, "No memory protection without IOMMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
+
+fail:
+ if (a4xx_gpu)
+ a4xx_destroy(&a4xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
new file mode 100644
index 000000000000..01247204ac92
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A4XX_GPU_H__
+#define __A4XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a4xx.xml.h"
+
+struct a4xx_gpu {
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ /* if OCMEM is used for GMEM: */
+ uint32_t ocmem_base;
+ void *ocmem_hdl;
+};
+#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
+
+#endif /* __A4XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index cc341bc62b51..a4b33af9338d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -105,6 +105,7 @@ enum adreno_rb_dither_mode {
enum adreno_rb_depth_format {
DEPTHX_16 = 0,
DEPTHX_24_8 = 1,
+ DEPTHX_32 = 2,
};
enum adreno_rb_copy_control_mode {
@@ -132,6 +133,7 @@ enum a3xx_threadmode {
};
enum a3xx_instrbuffermode {
+ CACHE = 0,
BUFFER = 1,
};
@@ -140,6 +142,13 @@ enum a3xx_threadsize {
FOUR_QUADS = 1,
};
+enum a3xx_color_swap {
+ WZYX = 0,
+ WXYZ = 1,
+ ZYXW = 2,
+ XYZW = 3,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 7ab85af3a7db..be83dee83d08 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -2,6 +2,8 @@
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@@ -28,6 +30,7 @@ MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!
module_param_named(hang_debug, hang_debug, bool, 0600);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
static const struct adreno_info gpulist[] = {
{
@@ -54,6 +57,14 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a330_pfp.fw",
.gmem = SZ_1M,
.init = a3xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 2, 0, ANY_ID),
+ .revn = 420,
+ .name = "A420",
+ .pm4fw = "a420_pm4.fw",
+ .pfpfw = "a420_pfp.fw",
+ .gmem = (SZ_1M + SZ_512K),
+ .init = a4xx_gpu_init,
},
};
@@ -61,6 +72,8 @@ MODULE_FIRMWARE("a300_pm4.fw");
MODULE_FIRMWARE("a300_pfp.fw");
MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
+MODULE_FIRMWARE("a420_pm4.fw");
+MODULE_FIRMWARE("a420_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6afa29167fee..aa873048308b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@@ -63,19 +65,21 @@ int adreno_hw_init(struct msm_gpu *gpu)
}
/* Setup REG_CP_RB_CNTL: */
- gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
/* size is log2(quad-words): */
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
/* Setup ringbuffer address: */
- gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
- gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ rbmemptr(adreno_gpu, rptr));
/* Setup scratch/timestamp: */
- gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
+ rbmemptr(adreno_gpu, fence));
- gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
return 0;
}
@@ -151,7 +155,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence);
- if (adreno_is_a3xx(adreno_gpu)) {
+ if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed:
@@ -188,12 +192,13 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
void adreno_flush(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(gpu->rb);
/* ensure writes to ringbuffer have hit system memory: */
mb();
- gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
void adreno_idle(struct msm_gpu *gpu)
@@ -319,6 +324,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+ adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
+ RB_SIZE);
+ if (ret)
+ return ret;
+
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -333,12 +344,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
- adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
- RB_SIZE);
- if (ret)
- return ret;
-
mmu = gpu->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 52f051579753..a0cc30977e67 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
@@ -25,6 +27,81 @@
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
+#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+/**
+ * adreno_regs: List of registers that are used in across all
+ * 3D devices. Each device type has different offset value for the same
+ * register, so an array of register offsets are declared for every device
+ * and are indexed by the enumeration values defined in this enum
+ */
+enum adreno_regs {
+ REG_ADRENO_CP_DEBUG,
+ REG_ADRENO_CP_ME_RAM_WADDR,
+ REG_ADRENO_CP_ME_RAM_DATA,
+ REG_ADRENO_CP_PFP_UCODE_DATA,
+ REG_ADRENO_CP_PFP_UCODE_ADDR,
+ REG_ADRENO_CP_WFI_PEND_CTR,
+ REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR,
+ REG_ADRENO_CP_RB_WPTR,
+ REG_ADRENO_CP_PROTECT_CTRL,
+ REG_ADRENO_CP_ME_CNTL,
+ REG_ADRENO_CP_RB_CNTL,
+ REG_ADRENO_CP_IB1_BASE,
+ REG_ADRENO_CP_IB1_BUFSZ,
+ REG_ADRENO_CP_IB2_BASE,
+ REG_ADRENO_CP_IB2_BUFSZ,
+ REG_ADRENO_CP_TIMESTAMP,
+ REG_ADRENO_CP_ME_RAM_RADDR,
+ REG_ADRENO_CP_ROQ_ADDR,
+ REG_ADRENO_CP_ROQ_DATA,
+ REG_ADRENO_CP_MERCIU_ADDR,
+ REG_ADRENO_CP_MERCIU_DATA,
+ REG_ADRENO_CP_MERCIU_DATA2,
+ REG_ADRENO_CP_MEQ_ADDR,
+ REG_ADRENO_CP_MEQ_DATA,
+ REG_ADRENO_CP_HW_FAULT,
+ REG_ADRENO_CP_PROTECT_STATUS,
+ REG_ADRENO_SCRATCH_ADDR,
+ REG_ADRENO_SCRATCH_UMSK,
+ REG_ADRENO_SCRATCH_REG2,
+ REG_ADRENO_RBBM_STATUS,
+ REG_ADRENO_RBBM_PERFCTR_CTL,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
+ REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
+ REG_ADRENO_RBBM_INT_0_MASK,
+ REG_ADRENO_RBBM_INT_0_STATUS,
+ REG_ADRENO_RBBM_AHB_ERROR_STATUS,
+ REG_ADRENO_RBBM_PM_OVERRIDE2,
+ REG_ADRENO_RBBM_AHB_CMD,
+ REG_ADRENO_RBBM_INT_CLEAR_CMD,
+ REG_ADRENO_RBBM_SW_RESET_CMD,
+ REG_ADRENO_RBBM_CLOCK_CTL,
+ REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
+ REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
+ REG_ADRENO_VPC_DEBUG_RAM_SEL,
+ REG_ADRENO_VPC_DEBUG_RAM_READ,
+ REG_ADRENO_VSC_SIZE_ADDRESS,
+ REG_ADRENO_VFD_CONTROL_0,
+ REG_ADRENO_VFD_INDEX_MAX,
+ REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
+ REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
+ REG_ADRENO_SP_VS_OBJ_START_REG,
+ REG_ADRENO_SP_FS_OBJ_START_REG,
+ REG_ADRENO_PA_SC_AA_CONFIG,
+ REG_ADRENO_SQ_GPR_MANAGEMENT,
+ REG_ADRENO_SQ_INST_STORE_MANAGMENT,
+ REG_ADRENO_TP0_CHICKEN,
+ REG_ADRENO_RBBM_RBBM_CTL,
+ REG_ADRENO_UCHE_INVALIDATE0,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
+ REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
+ REG_ADRENO_REGISTER_MAX,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -76,6 +153,13 @@ struct adreno_gpu {
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
uint32_t memptrs_iova;
+
+ /*
+ * Register offsets are different between some GPUs.
+ * GPU specific offsets will be exported by GPU specific
+ * code (a3xx_gpu.c) and stored in this common location.
+ */
+ const unsigned int *reg_offsets;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -128,6 +212,16 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
}
+static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn >= 400) && (gpu->revn < 500);
+}
+
+static inline int adreno_is_a420(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 420;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
@@ -171,5 +265,37 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+/*
+ * adreno_checkreg_off() - Checks the validity of a register enum
+ * @gpu: Pointer to struct adreno_gpu
+ * @offset_name: The register enum that is checked
+ */
+static inline bool adreno_reg_check(struct adreno_gpu *gpu,
+ enum adreno_regs offset_name)
+{
+ if (offset_name >= REG_ADRENO_REGISTER_MAX ||
+ !gpu->reg_offsets[offset_name]) {
+ BUG();
+ }
+ return true;
+}
+
+static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
+ enum adreno_regs offset_name)
+{
+ u32 reg = gpu->reg_offsets[offset_name];
+ u32 val = 0;
+ if(adreno_reg_check(gpu,offset_name))
+ val = gpu_read(&gpu->base, reg - 1);
+ return val;
+}
+
+static inline void adreno_gpu_write(struct adreno_gpu *gpu,
+ enum adreno_regs offset_name, u32 data)
+{
+ u32 reg = gpu->reg_offsets[offset_name];
+ if(adreno_reg_check(gpu, offset_name))
+ gpu_write(&gpu->base, reg - 1, data);
+}
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 6ef43f66c30a..6a75cee94d81 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -157,6 +157,7 @@ enum adreno_pm4_type3_packets {
CP_IM_STORE = 44,
CP_SET_DRAW_INIT_FLAGS = 75,
CP_SET_PROTECTED_MODE = 95,
+ CP_BOOTSTRAP_UCODE = 111,
CP_LOAD_STATE = 48,
CP_COND_INDIRECT_BUFFER_PFE = 58,
CP_COND_INDIRECT_BUFFER_PFD = 50,
@@ -278,11 +279,11 @@ static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000
-#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16
-static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000
+#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24
+static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK;
+ return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_2 0x00000002
@@ -293,20 +294,20 @@ static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
}
-#define REG_CP_DRAW_INDX_2 0x00000002
-#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff
-#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val)
+#define REG_CP_DRAW_INDX_3 0x00000003
+#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK;
+ return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
}
-#define REG_CP_DRAW_INDX_2 0x00000002
-#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff
-#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val)
+#define REG_CP_DRAW_INDX_4 0x00000004
+#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK;
+ return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
}
#define REG_CP_DRAW_INDX_2_0 0x00000000
@@ -345,11 +346,11 @@ static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000
-#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16
-static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24
+static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK;
+ return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_2_2 0x00000002
@@ -388,11 +389,11 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size va
#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
-#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000
-#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val)
+#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK 0xffff0000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK;
+ return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK;
}
#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
@@ -405,20 +406,22 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
}
-#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
-#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val)
+#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
+
+#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK;
+ return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
}
-#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
-#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff
-#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0
-static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val)
+#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK;
+ return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
#define REG_CP_SET_DRAW_STATE_0 0x00000000
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index e965898dfda6..448438b759b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index f2bdda957205..c102a7f074ac 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index e5b071ffd865..a900134bdf33 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 9d00dcba6959..062c68725376 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_irq.h>
#include "hdmi.h"
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
power_on ? "Enable" : "Disable", ctrl);
}
-irqreturn_t hdmi_irq(int irq, void *dev_id)
+static irqreturn_t hdmi_irq(int irq, void *dev_id)
{
struct hdmi *hdmi = dev_id;
@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void hdmi_destroy(struct kref *kref)
+static void hdmi_destroy(struct hdmi *hdmi)
{
- struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
struct hdmi_phy *phy = hdmi->phy;
if (phy)
@@ -68,37 +68,24 @@ void hdmi_destroy(struct kref *kref)
platform_set_drvdata(hdmi->pdev, NULL);
}
-/* initialize connector */
-struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+/* construct hdmi at bind/probe time, grab all the resources. If
+ * we are to EPROBE_DEFER we want to do it here, rather than later
+ * at modeset_init() time
+ */
+static struct hdmi *hdmi_init(struct platform_device *pdev)
{
+ struct hdmi_platform_config *config = pdev->dev.platform_data;
struct hdmi *hdmi = NULL;
- struct msm_drm_private *priv = dev->dev_private;
- struct platform_device *pdev = priv->hdmi_pdev;
- struct hdmi_platform_config *config;
int i, ret;
- if (!pdev) {
- dev_err(dev->dev, "no hdmi device\n");
- ret = -ENXIO;
- goto fail;
- }
-
- config = pdev->dev.platform_data;
-
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) {
ret = -ENOMEM;
goto fail;
}
- kref_init(&hdmi->refcount);
-
- hdmi->dev = dev;
hdmi->pdev = pdev;
hdmi->config = config;
- hdmi->encoder = encoder;
-
- hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
/* not sure about which phy maps to which msm.. probably I miss some */
if (config->phy_init)
@@ -108,7 +95,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
- dev_err(dev->dev, "failed to load phy: %d\n", ret);
+ dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
hdmi->phy = NULL;
goto fail;
}
@@ -127,7 +114,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
+ dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -143,7 +130,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
+ dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
goto fail;
}
@@ -158,7 +145,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
+ dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
goto fail;
}
@@ -173,7 +160,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
+ dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
goto fail;
}
@@ -184,11 +171,40 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
hdmi->i2c = hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
- dev_err(dev->dev, "failed to get i2c: %d\n", ret);
+ dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
hdmi->i2c = NULL;
goto fail;
}
+ return hdmi;
+
+fail:
+ if (hdmi)
+ hdmi_destroy(hdmi);
+
+ return ERR_PTR(ret);
+}
+
+/* Second part of initialization, the drm/kms level modeset_init,
+ * constructs/initializes mode objects, etc, is called from master
+ * driver (not hdmi sub-device's probe/bind!)
+ *
+ * Any resource (regulator/clk/etc) which could be missing at boot
+ * should be handled in hdmi_init() so that failure happens from
+ * hdmi sub-device's probe.
+ */
+int hdmi_modeset_init(struct hdmi *hdmi,
+ struct drm_device *dev, struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = hdmi->pdev;
+ int ret;
+
+ hdmi->dev = dev;
+ hdmi->encoder = encoder;
+
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
hdmi->bridge = hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
@@ -205,22 +221,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
goto fail;
}
- if (!config->shared_irq) {
- hdmi->irq = platform_get_irq(pdev, 0);
- if (hdmi->irq < 0) {
- ret = hdmi->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
- goto fail;
- }
+ hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (hdmi->irq < 0) {
+ ret = hdmi->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
- ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
- NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "hdmi_isr", hdmi);
- if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
- hdmi->irq, ret);
- goto fail;
- }
+ ret = devm_request_irq(&pdev->dev, hdmi->irq,
+ hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ hdmi->irq, ret);
+ goto fail;
}
encoder->bridge = hdmi->bridge;
@@ -230,19 +244,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
platform_set_drvdata(pdev, hdmi);
- return hdmi;
+ return 0;
fail:
- if (hdmi) {
- /* bridge/connector are normally destroyed by drm: */
- if (hdmi->bridge)
- hdmi->bridge->funcs->destroy(hdmi->bridge);
- if (hdmi->connector)
- hdmi->connector->funcs->destroy(hdmi->connector);
- hdmi_destroy(&hdmi->refcount);
+ /* bridge/connector are normally destroyed by drm: */
+ if (hdmi->bridge) {
+ hdmi->bridge->funcs->destroy(hdmi->bridge);
+ hdmi->bridge = NULL;
+ }
+ if (hdmi->connector) {
+ hdmi->connector->funcs->destroy(hdmi->connector);
+ hdmi->connector = NULL;
}
- return ERR_PTR(ret);
+ return ret;
}
/*
@@ -251,13 +266,6 @@ fail:
#include <linux/of_gpio.h>
-static void set_hdmi_pdev(struct drm_device *dev,
- struct platform_device *pdev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- priv->hdmi_pdev = pdev;
-}
-
#ifdef CONFIG_OF
static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
{
@@ -278,7 +286,10 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
static struct hdmi_platform_config config = {};
+ struct hdmi *hdmi;
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
@@ -298,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.pwr_clk_names = pwr_clk_names;
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
- config.shared_irq = true;
} else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
@@ -369,14 +379,22 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
}
#endif
dev->platform_data = &config;
- set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ hdmi = hdmi_init(to_platform_device(dev));
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+ priv->hdmi = hdmi;
return 0;
}
static void hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
- set_hdmi_pdev(dev_get_drvdata(master), NULL);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ if (priv->hdmi) {
+ hdmi_destroy(priv->hdmi);
+ priv->hdmi = NULL;
+ }
}
static const struct component_ops hdmi_ops = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b981995410b5..43e654f751b7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -38,8 +38,6 @@ struct hdmi_audio {
};
struct hdmi {
- struct kref refcount;
-
struct drm_device *dev;
struct platform_device *pdev;
@@ -97,13 +95,9 @@ struct hdmi_platform_config {
/* gpio's: */
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
int mux_lpm_gpio;
-
- /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
- bool shared_irq;
};
void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
-void hdmi_destroy(struct kref *kref);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
{
@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
return msm_readl(hdmi->mmio + reg);
}
-static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
-{
- kref_get(&hdmi->refcount);
- return hdmi;
-}
-
-static inline void hdmi_unreference(struct hdmi *hdmi)
-{
- kref_put(&hdmi->refcount, hdmi_destroy);
-}
-
/*
* The phy appears to be different, for example between 8960 and 8x60,
* so split the phy related functions out and load the correct one at
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 76fd0cfc6558..5b0844befbab 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f6cf745c249e..6902ad6da710 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -26,7 +26,6 @@ struct hdmi_bridge {
static void hdmi_bridge_destroy(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
- hdmi_unreference(hdmi_bridge->hdmi);
drm_bridge_cleanup(bridge);
kfree(hdmi_bridge);
}
@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
goto fail;
}
- hdmi_bridge->hdmi = hdmi_reference(hdmi);
+ hdmi_bridge->hdmi = hdmi;
bridge = &hdmi_bridge->base;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 4aca2a3c667c..fbebb0405d76 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- hdmi_unreference(hdmi_connector->hdmi);
-
kfree(hdmi_connector);
}
@@ -401,6 +399,9 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
.detect = hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -422,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
goto fail;
}
- hdmi_connector->hdmi = hdmi_reference(hdmi);
+ hdmi_connector->hdmi = hdmi;
INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
connector = &hdmi_connector->base;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index f408b69486a8..eeed006eed13 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -510,7 +510,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
#ifdef CONFIG_COMMON_CLK
phy_8960->pll_hw.init = &pll_init;
- phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
+ phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw);
if (IS_ERR(phy_8960->pll)) {
ret = PTR_ERR(phy_8960->pll);
phy_8960->pll = NULL;
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index d53c29327df9..29bd796797de 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 03c0bd9cd5b9..a4a7f8c7122a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 7d00f7fb5773..a7672e100d8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -25,8 +25,6 @@
struct mdp4_crtc {
struct drm_crtc base;
char name[8];
- struct drm_plane *plane;
- struct drm_plane *planes[8];
int id;
int ovlp;
enum mdp4_dma dma;
@@ -52,25 +50,11 @@ struct mdp4_crtc {
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
- struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we logically (from PoV of KMS API) hold a ref
- * to. Which we may not yet be scanning out (we may still
- * be scanning out previous in case of page_flip while waiting
- * for gpu rendering to complete:
- */
- struct drm_framebuffer *fb;
-
- /* the fb that we currently hold a scanout ref to: */
- struct drm_framebuffer *scanout_fb;
-
- /* for unref'ing framebuffers after scanout completes: */
- struct drm_flip_work unref_fb_work;
-
/* for unref'ing cursor bo's after scanout completes: */
struct drm_flip_work unref_cursor_work;
@@ -97,15 +81,14 @@ static void crtc_flush(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t i, flush = 0;
+ struct drm_plane *plane;
+ uint32_t flush = 0;
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- flush |= pipe2flush(pipe_id);
- }
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
}
+
flush |= ovlp2flush(mdp4_crtc->ovlp);
DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -113,47 +96,6 @@ static void crtc_flush(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
}
-static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp4_crtc->fb;
-
- /* grab reference to incoming scanout fb: */
- drm_framebuffer_reference(new_fb);
- mdp4_crtc->base.primary->fb = new_fb;
- mdp4_crtc->fb = new_fb;
-
- if (old_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
-}
-
-/* unlike update_fb(), take a ref to the new scanout fb *before* updating
- * plane, then call this. Needed to ensure we don't unref the buffer that
- * is actually still being scanned out.
- *
- * Note that this whole thing goes away with atomic.. since we can defer
- * calling into driver until rendering is done.
- */
-static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- /* flush updates, to make sure hw is updated to new scanout fb,
- * so that we can safely queue unref to current fb (ie. next
- * vblank we know hw is done w/ previous scanout_fb).
- */
- crtc_flush(crtc);
-
- if (mdp4_crtc->scanout_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
- mdp4_crtc->scanout_fb);
-
- mdp4_crtc->scanout_fb = fb;
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
-}
-
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
@@ -171,38 +113,13 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp4_crtc->event = NULL;
+ DBG("%s: send event: %p", mdp4_crtc->name, event);
drm_send_vblank_event(dev, mdp4_crtc->id, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
- struct mdp4_crtc *mdp4_crtc =
- container_of(cb, struct mdp4_crtc, pageflip_cb);
- struct drm_crtc *crtc = &mdp4_crtc->base;
- struct drm_framebuffer *fb = crtc->primary->fb;
-
- if (!fb)
- return;
-
- drm_framebuffer_reference(fb);
- mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- update_scanout(crtc, fb);
-}
-
-static void unref_fb_worker(struct drm_flip_work *work, void *val)
-{
- struct mdp4_crtc *mdp4_crtc =
- container_of(work, struct mdp4_crtc, unref_fb_work);
- struct drm_device *dev = mdp4_crtc->base.dev;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_framebuffer_unreference(val);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
struct mdp4_crtc *mdp4_crtc =
@@ -218,7 +135,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
kfree(mdp4_crtc);
@@ -251,57 +167,70 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static void blend_setup(struct drm_crtc *crtc)
+/* statically (for now) map planes to mixer stage (z-order): */
+static const int idxs[] = {
+ [VG1] = 1,
+ [VG2] = 2,
+ [RGB1] = 0,
+ [RGB2] = 0,
+ [RGB3] = 0,
+ [VG3] = 3,
+ [VG4] = 4,
+
+};
+
+/* setup mixer config, for which we need to consider all crtc's and
+ * the planes attached to them
+ *
+ * TODO may possibly need some extra locking here
+ */
+static void setup_mixer(struct mdp4_kms *mdp4_kms)
{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- int i, ovlp = mdp4_crtc->ovlp;
+ struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
+ struct drm_crtc *crtc;
uint32_t mixer_cfg = 0;
static const enum mdp_mixer_stage_id stages[] = {
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
};
- /* statically (for now) map planes to mixer stage (z-order): */
- static const int idxs[] = {
- [VG1] = 1,
- [VG2] = 2,
- [RGB1] = 0,
- [RGB2] = 0,
- [RGB3] = 0,
- [VG3] = 3,
- [VG4] = 4,
- };
- bool alpha[4]= { false, false, false, false };
+ list_for_each_entry(crtc, &config->crtc_list, head) {
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_plane *plane;
- /* Don't rely on value read back from hw, but instead use our
- * own shadowed value. Possibly disable/reenable looses the
- * previous value and goes back to power-on default?
- */
- mixer_cfg = mdp4_kms->mixer_cfg;
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
+ pipe_id, stages[idx]);
+ }
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ int i, ovlp = mdp4_crtc->ovlp;
+ bool alpha[4]= { false, false, false, false };
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- int idx = idxs[pipe_id];
- if (idx > 0) {
- const struct mdp_format *format =
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ if (idx > 0) {
+ const struct mdp_format *format =
to_mdp_format(msm_framebuffer_format(plane->fb));
- alpha[idx-1] = format->alpha_enable;
- }
- mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
- pipe_id, stages[idx]);
+ alpha[idx-1] = format->alpha_enable;
}
}
- /* this shouldn't happen.. and seems to cause underflow: */
- WARN_ON(!mixer_cfg);
-
for (i = 0; i < 4; i++) {
uint32_t op;
@@ -324,22 +253,21 @@ static void blend_setup(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
}
- mdp4_kms->mixer_cfg = mixer_cfg;
- mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+ setup_mixer(mdp4_kms);
}
-static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
- int ret, ovlp = mdp4_crtc->ovlp;
+ int ovlp = mdp4_crtc->ovlp;
+ struct drm_display_mode *mode;
+
+ if (WARN_ON(!crtc->state))
+ return;
- mode = adjusted_mode;
+ mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mdp4_crtc->name, mode->base.id, mode->name,
@@ -350,28 +278,13 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- /* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->primary->fb);
-
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- drm_framebuffer_unreference(crtc->primary->fb);
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp4_crtc->name, ret);
- return ret;
- }
-
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
/* take data from pipe: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
- mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
- crtc->primary->fb->pitches[0]);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
MDP4_DMA_DST_SIZE_WIDTH(0) |
MDP4_DMA_DST_SIZE_HEIGHT(0));
@@ -380,8 +293,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
- mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
- crtc->primary->fb->pitches[0]);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
@@ -390,11 +302,6 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
-
- update_fb(crtc, crtc->primary->fb);
- update_scanout(crtc, crtc->primary->fb);
-
- return 0;
}
static void mdp4_crtc_prepare(struct drm_crtc *crtc)
@@ -416,60 +323,51 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
drm_crtc_vblank_put(crtc);
}
-static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_plane *plane = mdp4_crtc->plane;
- struct drm_display_mode *mode = &crtc->mode;
- int ret;
+ struct drm_device *dev = crtc->dev;
- /* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->primary->fb);
+ DBG("%s: check", mdp4_crtc->name);
- ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- drm_framebuffer_unreference(crtc->primary->fb);
- return ret;
+ if (mdp4_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
}
- update_fb(crtc, crtc->primary->fb);
- update_scanout(crtc, crtc->primary->fb);
+ // TODO anything else to check?
return 0;
}
-static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s: begin", mdp4_crtc->name);
}
-static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *obj;
unsigned long flags;
- if (mdp4_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
+ DBG("%s: flush", mdp4_crtc->name);
- obj = msm_framebuffer_bo(new_fb, 0);
+ WARN_ON(mdp4_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
- mdp4_crtc->event = event;
+ mdp4_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, new_fb);
-
- return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
+ blend_setup(crtc);
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_FLIP);
}
static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -607,22 +505,29 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
}
static const struct drm_crtc_funcs mdp4_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
+ .set_config = drm_atomic_helper_set_config,
.destroy = mdp4_crtc_destroy,
- .page_flip = mdp4_crtc_page_flip,
+ .page_flip = drm_atomic_helper_page_flip,
.set_property = mdp4_crtc_set_property,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
.dpms = mdp4_crtc_dpms,
.mode_fixup = mdp4_crtc_mode_fixup,
- .mode_set = mdp4_crtc_mode_set,
+ .mode_set_nofb = mdp4_crtc_mode_set_nofb,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp4_crtc_prepare,
.commit = mdp4_crtc_commit,
- .mode_set_base = mdp4_crtc_mode_set_base,
.load_lut = mdp4_crtc_load_lut,
+ .atomic_check = mdp4_crtc_atomic_check,
+ .atomic_begin = mdp4_crtc_atomic_begin,
+ .atomic_flush = mdp4_crtc_atomic_flush,
};
static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -638,7 +543,6 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
- drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
}
if (pending & PENDING_CURSOR) {
@@ -663,7 +567,8 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
{
- DBG("cancel: %p", file);
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s: cancel: %p", mdp4_crtc->name, file);
complete_flip(crtc, file);
}
@@ -717,35 +622,6 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
}
-static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
- struct drm_plane *plane)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
-
- if (mdp4_crtc->planes[pipe_id] == plane)
- return;
-
- mdp4_crtc->planes[pipe_id] = plane;
- blend_setup(crtc);
- if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
- crtc_flush(crtc);
-}
-
-void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
- set_attach(crtc, mdp4_plane_pipe(plane), plane);
-}
-
-void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
- /* don't actually detatch our primary plane: */
- if (to_mdp4_crtc(crtc)->plane == plane)
- return;
- set_attach(crtc, mdp4_plane_pipe(plane), NULL);
-}
-
static const char *dma_names[] = {
"DMA_P", "DMA_S", "DMA_E",
};
@@ -757,17 +633,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
{
struct drm_crtc *crtc = NULL;
struct mdp4_crtc *mdp4_crtc;
- int ret;
mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
- if (!mdp4_crtc) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!mdp4_crtc)
+ return ERR_PTR(-ENOMEM);
crtc = &mdp4_crtc->base;
- mdp4_crtc->plane = plane;
mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
@@ -784,26 +656,14 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
spin_lock_init(&mdp4_crtc->cursor.lock);
- ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
- "unref fb", unref_fb_worker);
- if (ret)
- goto fail;
-
- ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
+ drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
- INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+ plane->crtc = crtc;
- mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
+ mdp4_plane_install_properties(plane, &crtc->base);
return crtc;
-
-fail:
- if (crtc)
- mdp4_crtc_destroy(crtc);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 79d804e61cc4..a62109e4ae0d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -228,7 +228,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_panel *panel;
- struct hdmi *hdmi;
int ret;
/* construct non-private planes: */
@@ -326,11 +325,13 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
priv->encoders[priv->num_encoders++] = encoder;
- hdmi = hdmi_init(dev, encoder);
- if (IS_ERR(hdmi)) {
- ret = PTR_ERR(hdmi);
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
- goto fail;
+ if (priv->hdmi) {
+ /* Construct bridge/connector for HDMI: */
+ ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ goto fail;
+ }
}
return 0;
@@ -381,6 +382,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->dsi_pll_vddio))
mdp4_kms->dsi_pll_vddio = NULL;
+ /* NOTE: driver for this regulator still missing upstream.. use
+ * _get_exclusive() and ignore the error if it does not exist
+ * (and hope that the bootloader left it on for us)
+ */
mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 9ff6e7ccfe90..cbd77bc626d5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -32,13 +32,6 @@ struct mdp4_kms {
int rev;
- /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
- * crtcs/encoders is in one shared register, we need to update it
- * via read/modify/write. But to avoid getting confused by power-
- * on-default values after resume, use this shadow value instead:
- */
- uint32_t mixer_cfg;
-
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
@@ -194,14 +187,6 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
void mdp4_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
-void mdp4_plane_set_scanout(struct drm_plane *plane,
- struct drm_framebuffer *fb);
-int mdp4_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane);
@@ -210,8 +195,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
-void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
-void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id, int ovlp_id,
enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 310034688c15..4ddc28e1275b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -98,6 +98,9 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
.detect = mdp4_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mdp4_lvds_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 66f33dba1ebb..1e5ebe83647d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -31,47 +31,26 @@ struct mdp4_plane {
};
#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
-static struct mdp4_kms *get_kms(struct drm_plane *plane)
-{
- struct msm_drm_private *priv = plane->dev->dev_private;
- return to_mdp4_kms(to_mdp_kms(priv->kms));
-}
-
-static int mdp4_plane_update(struct drm_plane *plane,
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+static int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-
- mdp4_plane->enabled = true;
-
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
- return mdp4_plane_mode_set(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
-}
+ uint32_t src_w, uint32_t src_h);
-static int mdp4_plane_disable(struct drm_plane *plane)
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
{
- struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- DBG("%s: disable", mdp4_plane->name);
- if (plane->crtc)
- mdp4_crtc_detach(plane->crtc, plane);
- return 0;
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
}
static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- mdp4_plane_disable(plane);
+ drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@@ -92,19 +71,75 @@ int mdp4_plane_set_property(struct drm_plane *plane,
}
static const struct drm_plane_funcs mdp4_plane_funcs = {
- .update_plane = mdp4_plane_update,
- .disable_plane = mdp4_plane_disable,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp4_plane_destroy,
.set_property = mdp4_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
-void mdp4_plane_set_scanout(struct drm_plane *plane,
+static int mdp4_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+
+ DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->id);
+}
+
+static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+
+ DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->id);
+}
+
+
+static int mdp4_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void mdp4_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ int ret;
+
+ ret = mdp4_plane_mode_set(plane,
+ state->crtc, state->fb,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x, state->src_y,
+ state->src_w, state->src_h);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+}
+
+static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
+ .prepare_fb = mdp4_plane_prepare_fb,
+ .cleanup_fb = mdp4_plane_cleanup_fb,
+ .atomic_check = mdp4_plane_atomic_check,
+ .atomic_update = mdp4_plane_atomic_update,
+};
+
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
enum mdp4_pipe pipe = mdp4_plane->pipe;
- uint32_t iova;
+ uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0);
+
+ DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name,
+ iova, fb->pitches[0]);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -114,7 +149,6 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
- msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
plane->fb = fb;
@@ -122,7 +156,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
-int mdp4_plane_mode_set(struct drm_plane *plane,
+static int mdp4_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -137,6 +171,11 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ if (!(crtc && fb)) {
+ DBG("%s: disabled!", mdp4_plane->name);
+ return 0;
+ }
+
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
@@ -197,9 +236,6 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
- /* TODO detach from old crtc (if we had more than one) */
- mdp4_crtc_attach(crtc, plane);
-
return 0;
}
@@ -239,9 +275,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
ARRAY_SIZE(mdp4_plane->formats));
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
- mdp4_plane->formats, mdp4_plane->nformats,
- type);
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats, type);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
mdp4_plane_install_properties(plane, &plane->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 67f4f896ba8c..e87ef5512cb0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
new file mode 100644
index 000000000000..b0a44310cf2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_cfg.h"
+
+struct mdp5_cfg_handler {
+ int revision;
+ struct mdp5_cfg config;
+};
+
+/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
+const struct mdp5_cfg_hw *mdp5_cfg = NULL;
+
+const struct mdp5_cfg_hw msm8x74_config = {
+ .name = "msm8x74",
+ .smp = {
+ .mmb_count = 22,
+ .mmb_size = 4096,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01200, 0x01600, 0x01a00 },
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01e00, 0x02200, 0x02600 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02a00, 0x02e00 },
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04600, 0x04a00, 0x04e00 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+ },
+ .intf = {
+ .count = 4,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+ },
+ .max_clk = 200000000,
+};
+
+const struct mdp5_cfg_hw apq8084_config = {
+ .name = "apq8084",
+ .smp = {
+ .mmb_count = 44,
+ .mmb_size = 8192,
+ .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
+ .reserved[CID_RGB0] = 2,
+ .reserved[CID_RGB1] = 2,
+ .reserved[CID_RGB2] = 2,
+ .reserved[CID_RGB3] = 2,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x03200, 0x03600 },
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x13500, 0x13700, 0x13900 },
+ },
+ .intf = {
+ .count = 5,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_handler cfg_handlers[] = {
+ { .revision = 0, .config = { .hw = &msm8x74_config } },
+ { .revision = 2, .config = { .hw = &msm8x74_config } },
+ { .revision = 3, .config = { .hw = &apq8084_config } },
+};
+
+
+static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
+{
+ return cfg_handler->config.hw;
+}
+
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
+{
+ return &cfg_handler->config;
+}
+
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
+{
+ return cfg_handler->revision;
+}
+
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
+{
+ kfree(cfg_handler);
+}
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct platform_device *pdev = dev->platformdev;
+ struct mdp5_cfg_handler *cfg_handler;
+ struct mdp5_cfg_platform *pconfig;
+ int i, ret = 0;
+
+ cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
+ if (unlikely(!cfg_handler)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (major != 1) {
+ dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /* only after mdp5_cfg global pointer's init can we access the hw */
+ for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
+ if (cfg_handlers[i].revision != minor)
+ continue;
+ mdp5_cfg = cfg_handlers[i].config.hw;
+
+ break;
+ }
+ if (unlikely(!mdp5_cfg)) {
+ dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ cfg_handler->revision = minor;
+ cfg_handler->config.hw = mdp5_cfg;
+
+ pconfig = mdp5_get_config(pdev);
+ memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
+
+ DBG("MDP5: %s hw config selected", mdp5_cfg->name);
+
+ return cfg_handler;
+
+fail:
+ if (cfg_handler)
+ mdp5_cfg_destroy(cfg_handler);
+
+ return NULL;
+}
+
+static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
+{
+ static struct mdp5_cfg_platform config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#endif
+ config.iommu = iommu_domain_alloc(&platform_bus_type);
+
+ return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
new file mode 100644
index 000000000000..dba4d52cceeb
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDP5_CFG_H__
+#define __MDP5_CFG_H__
+
+#include "msm_drv.h"
+
+/*
+ * mdp5_cfg
+ *
+ * This module configures the dynamic offsets used by mdp5.xml.h
+ * (initialized in mdp5_cfg.c)
+ */
+extern const struct mdp5_cfg_hw *mdp5_cfg;
+
+#define MAX_CTL 8
+#define MAX_BASES 8
+#define MAX_SMP_BLOCKS 44
+#define MAX_CLIENTS 32
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+#define MDP5_SUB_BLOCK_DEFINITION \
+ int count; \
+ uint32_t base[MAX_BASES]
+
+struct mdp5_sub_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+};
+
+struct mdp5_lm_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t nb_stages; /* number of stages per blender */
+};
+
+struct mdp5_smp_block {
+ int mmb_count; /* number of SMP MMBs */
+ int mmb_size; /* MMB: size in bytes */
+ mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
+ int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
+};
+
+struct mdp5_cfg_hw {
+ char *name;
+
+ struct mdp5_smp_block smp;
+ struct mdp5_sub_block ctl;
+ struct mdp5_sub_block pipe_vig;
+ struct mdp5_sub_block pipe_rgb;
+ struct mdp5_sub_block pipe_dma;
+ struct mdp5_lm_block lm;
+ struct mdp5_sub_block dspp;
+ struct mdp5_sub_block ad;
+ struct mdp5_sub_block intf;
+
+ uint32_t max_clk;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp5_cfg_platform {
+ struct iommu_domain *iommu;
+};
+
+struct mdp5_cfg {
+ const struct mdp5_cfg_hw *hw;
+ struct mdp5_cfg_platform platform;
+};
+
+struct mdp5_kms;
+struct mdp5_cfg_handler;
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor);
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
+
+#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index ebe2e60f3ab1..0e9a2e3a82d7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -17,43 +18,35 @@
#include "mdp5_kms.h"
+#include <linux/sort.h>
#include <drm/drm_mode.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_flip_work.h"
+#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+
struct mdp5_crtc {
struct drm_crtc base;
char name[8];
- struct drm_plane *plane;
- struct drm_plane *planes[8];
int id;
bool enabled;
- /* which mixer/encoder we route output to: */
- int mixer;
+ /* layer mixer used for this CRTC (+ its lock): */
+#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
+ int lm;
+ spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
+
+ /* CTL used for this CRTC: */
+ struct mdp5_ctl *ctl;
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
- struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we logically (from PoV of KMS API) hold a ref
- * to. Which we may not yet be scanning out (we may still
- * be scanning out previous in case of page_flip while waiting
- * for gpu rendering to complete:
- */
- struct drm_framebuffer *fb;
-
- /* the fb that we currently hold a scanout ref to: */
- struct drm_framebuffer *scanout_fb;
-
- /* for unref'ing framebuffers after scanout completes: */
- struct drm_flip_work unref_fb_work;
-
struct mdp_irq vblank;
struct mdp_irq err;
};
@@ -73,67 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
-static void crtc_flush(struct drm_crtc *crtc)
-{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int id = mdp5_crtc->id;
- uint32_t i, flush = 0;
-
- for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
- struct drm_plane *plane = mdp5_crtc->planes[i];
- if (plane) {
- enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
- flush |= pipe2flush(pipe);
- }
- }
- flush |= mixer2flush(mdp5_crtc->id);
- flush |= MDP5_CTL_FLUSH_CTL;
-
- DBG("%s: flush=%08x", mdp5_crtc->name, flush);
-
- mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
-}
+#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
-static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp5_crtc->fb;
-
- /* grab reference to incoming scanout fb: */
- drm_framebuffer_reference(new_fb);
- mdp5_crtc->base.primary->fb = new_fb;
- mdp5_crtc->fb = new_fb;
- if (old_fb)
- drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
+ DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
+ mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
}
-/* unlike update_fb(), take a ref to the new scanout fb *before* updating
- * plane, then call this. Needed to ensure we don't unref the buffer that
- * is actually still being scanned out.
- *
- * Note that this whole thing goes away with atomic.. since we can defer
- * calling into driver until rendering is done.
+/*
+ * flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
*/
-static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+static void crtc_flush_all(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct drm_plane *plane;
+ uint32_t flush_mask = 0;
- /* flush updates, to make sure hw is updated to new scanout fb,
- * so that we can safely queue unref to current fb (ie. next
- * vblank we know hw is done w/ previous scanout_fb).
- */
- crtc_flush(crtc);
-
- if (mdp5_crtc->scanout_fb)
- drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
- mdp5_crtc->scanout_fb);
+ /* we could have already released CTL in the disable path: */
+ if (!mdp5_crtc->ctl)
+ return;
- mdp5_crtc->scanout_fb = fb;
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ flush_mask |= mdp5_plane_get_flush(plane);
+ }
+ flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
+ flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
+ crtc_flush(crtc, flush_mask);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -142,7 +106,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
- unsigned long flags, i;
+ struct drm_plane *plane;
+ unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp5_crtc->event;
@@ -153,50 +118,22 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
*/
if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL;
+ DBG("%s: send event: %p", mdp5_crtc->name, event);
drm_send_vblank_event(dev, mdp5_crtc->id, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
- struct drm_plane *plane = mdp5_crtc->planes[i];
- if (plane)
- mdp5_plane_complete_flip(plane);
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ mdp5_plane_complete_flip(plane);
}
}
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
- struct mdp5_crtc *mdp5_crtc =
- container_of(cb, struct mdp5_crtc, pageflip_cb);
- struct drm_crtc *crtc = &mdp5_crtc->base;
- struct drm_framebuffer *fb = mdp5_crtc->fb;
-
- if (!fb)
- return;
-
- drm_framebuffer_reference(fb);
- mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
- update_scanout(crtc, fb);
-}
-
-static void unref_fb_worker(struct drm_flip_work *work, void *val)
-{
- struct mdp5_crtc *mdp5_crtc =
- container_of(work, struct mdp5_crtc, unref_fb_work);
- struct drm_device *dev = mdp5_crtc->base.dev;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_framebuffer_unreference(val);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
kfree(mdp5_crtc);
}
@@ -214,6 +151,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
} else {
+ /* set STAGE_UNUSED for all layers */
+ mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
mdp5_disable(mdp5_kms);
}
@@ -228,54 +167,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+/*
+ * blend_setup() - blend all the planes of a CRTC
+ *
+ * When border is enabled, the border color will ALWAYS be the base layer.
+ * Therefore, the first plane (private RGB pipe) will start at STAGE0.
+ * If disabled, the first plane starts at STAGE_BASE.
+ *
+ * Note:
+ * Border is not enabled here because the private plane is exactly
+ * the CRTC resolution.
+ */
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int id = mdp5_crtc->id;
+ struct drm_plane *plane;
+ const struct mdp5_cfg_hw *hw_cfg;
+ uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+ unsigned long flags;
+#define blender(stage) ((stage) - STAGE_BASE)
- /*
- * Hard-coded setup for now until I figure out how the
- * layer-mixer works
- */
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* LM[id]: */
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
- MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
- MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
- MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
- MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
-
- /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
- * we want to be setting CTL[m].LAYER[n]. Not sure what the
- * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
- * used when chaining up mixers for high resolution displays?
- */
+ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+
+ /* ctl could be released already when we are shutting down: */
+ if (!mdp5_crtc->ctl)
+ goto out;
- /* CTL[id]: */
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
- MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
- MDP5_CTL_LAYER_REG_BORDER_COLOR);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp_mixer_stage_id stage =
+ to_mdp5_plane_state(plane->state)->stage;
+
+ /*
+ * Note: This cannot happen with current implementation but
+ * we need to check this condition once z property is added
+ */
+ BUG_ON(stage > hw_cfg->lm.nb_stages);
+
+ /* LM */
+ mdp5_write(mdp5_kms,
+ REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
+ MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
+ blender(stage)), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
+ blender(stage)), 0x00);
+ /* CTL */
+ blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
+ DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
+ pipe2name(mdp5_plane_pipe(plane)), stage);
+ }
+
+ DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
+ mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+
+out:
+ spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
-static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int ret;
+ unsigned long flags;
+ struct drm_display_mode *mode;
- mode = adjusted_mode;
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
mdp5_crtc->name, mode->base.id, mode->name,
@@ -286,28 +249,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- /* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->primary->fb);
-
- ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- drm_framebuffer_unreference(crtc->primary->fb);
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp5_crtc->name, ret);
- return ret;
- }
-
- mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
+ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
-
- update_fb(crtc, crtc->primary->fb);
- update_scanout(crtc, crtc->primary->fb);
-
- return 0;
+ spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
static void mdp5_crtc_prepare(struct drm_crtc *crtc)
@@ -321,66 +267,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
static void mdp5_crtc_commit(struct drm_crtc *crtc)
{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ DBG("%s", mdp5_crtc->name);
mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- crtc_flush(crtc);
+ crtc_flush_all(crtc);
/* drop the ref to mdp clk's that we got in prepare: */
mdp5_disable(get_kms(crtc));
}
-static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+struct plane_state {
+ struct drm_plane *plane;
+ struct mdp5_plane_state *state;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+ struct plane_state *pa = (struct plane_state *)a;
+ struct plane_state *pb = (struct plane_state *)b;
+ return pa->state->zpos - pb->state->zpos;
+}
+
+static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- struct drm_plane *plane = mdp5_crtc->plane;
- struct drm_display_mode *mode = &crtc->mode;
- int ret;
-
- /* grab extra ref for update_scanout() */
- drm_framebuffer_reference(crtc->primary->fb);
-
- ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- drm_framebuffer_unreference(crtc->primary->fb);
- return ret;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ struct drm_device *dev = crtc->dev;
+ struct plane_state pstates[STAGE3 + 1];
+ int cnt = 0, i;
+
+ DBG("%s: check", mdp5_crtc->name);
+
+ if (mdp5_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
}
- update_fb(crtc, crtc->primary->fb);
- update_scanout(crtc, crtc->primary->fb);
+ /* request a free CTL, if none is already allocated for this CRTC */
+ if (state->enable && !mdp5_crtc->ctl) {
+ mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
+ if (WARN_ON(!mdp5_crtc->ctl))
+ return -EINVAL;
+ }
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ drm_atomic_crtc_state_for_each_plane(plane, state) {
+ struct drm_plane_state *pstate;
+
+ if (cnt >= ARRAY_SIZE(pstates)) {
+ dev_err(dev->dev, "too many planes!\n");
+ return -EINVAL;
+ }
+
+ pstate = state->state->plane_states[drm_plane_index(plane)];
+
+ /* plane might not have changed, in which case take
+ * current state:
+ */
+ if (!pstate)
+ pstate = plane->state;
+
+ pstates[cnt].plane = plane;
+ pstates[cnt].state = to_mdp5_plane_state(pstate);
+
+ cnt++;
+ }
+
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+
+ for (i = 0; i < cnt; i++) {
+ pstates[i].state->stage = STAGE_BASE + i;
+ DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
+ pipe2name(mdp5_plane_pipe(pstates[i].plane)),
+ pstates[i].state->stage);
+ }
return 0;
}
-static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ DBG("%s: begin", mdp5_crtc->name);
}
-static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *obj;
unsigned long flags;
- if (mdp5_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
+ DBG("%s: flush", mdp5_crtc->name);
- obj = msm_framebuffer_bo(new_fb, 0);
+ WARN_ON(mdp5_crtc->event);
spin_lock_irqsave(&dev->event_lock, flags);
- mdp5_crtc->event = event;
+ mdp5_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, new_fb);
+ blend_setup(crtc);
+ crtc_flush_all(crtc);
+ request_pending(crtc, PENDING_FLIP);
- return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+ if (mdp5_crtc->ctl && !crtc->state->enable) {
+ mdp5_ctl_release(mdp5_crtc->ctl);
+ mdp5_crtc->ctl = NULL;
+ }
}
static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -391,27 +390,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
+ .set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
- .page_flip = mdp5_crtc_page_flip,
+ .page_flip = drm_atomic_helper_page_flip,
.set_property = mdp5_crtc_set_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.dpms = mdp5_crtc_dpms,
.mode_fixup = mdp5_crtc_mode_fixup,
- .mode_set = mdp5_crtc_mode_set,
+ .mode_set_nofb = mdp5_crtc_mode_set_nofb,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = mdp5_crtc_prepare,
.commit = mdp5_crtc_commit,
- .mode_set_base = mdp5_crtc_mode_set_base,
.load_lut = mdp5_crtc_load_lut,
+ .atomic_check = mdp5_crtc_atomic_check,
+ .atomic_begin = mdp5_crtc_atomic_begin,
+ .atomic_flush = mdp5_crtc_atomic_flush,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
struct drm_crtc *crtc = &mdp5_crtc->base;
- struct msm_drm_private *priv = crtc->dev->dev_private;
unsigned pending;
mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
@@ -420,16 +425,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
if (pending & PENDING_FLIP) {
complete_flip(crtc, NULL);
- drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
}
}
static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
- struct drm_crtc *crtc = &mdp5_crtc->base;
+
DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
- crtc_flush(crtc);
}
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -450,10 +453,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- static const enum mdp5_intfnum intfnum[] = {
- INTF0, INTF1, INTF2, INTF3,
- };
+ uint32_t flush_mask = 0;
uint32_t intf_sel;
+ unsigned long flags;
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf);
@@ -463,6 +465,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
if (!mdp5_kms)
return;
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf) {
@@ -487,45 +490,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
break;
}
- blend_setup(crtc);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
+ mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+ flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
+ flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
- mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
- MDP5_CTL_OP_MODE(MODE_NONE) |
- MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
-
- crtc_flush(crtc);
+ crtc_flush(crtc, flush_mask);
}
-static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
- struct drm_plane *plane)
+int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
+ if (WARN_ON(!crtc))
+ return -EINVAL;
- if (mdp5_crtc->planes[pipe_id] == plane)
- return;
-
- mdp5_crtc->planes[pipe_id] = plane;
- blend_setup(crtc);
- if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
- crtc_flush(crtc);
-}
-
-void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
- set_attach(crtc, mdp5_plane_pipe(plane), plane);
-}
-
-void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
- /* don't actually detatch our primary plane: */
- if (to_mdp5_crtc(crtc)->plane == plane)
- return;
- set_attach(crtc, mdp5_plane_pipe(plane), NULL);
+ return mdp5_crtc->lm;
}
/* initialize crtc */
@@ -534,18 +517,17 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
- int ret;
mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
- if (!mdp5_crtc) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!mdp5_crtc)
+ return ERR_PTR(-ENOMEM);
crtc = &mdp5_crtc->base;
- mdp5_crtc->plane = plane;
mdp5_crtc->id = id;
+ mdp5_crtc->lm = GET_LM_ID(id);
+
+ spin_lock_init(&mdp5_crtc->lm_lock);
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -553,23 +535,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
pipe2name(mdp5_plane_pipe(plane)), id);
- ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
- "unref fb", unref_fb_worker);
- if (ret)
- goto fail;
-
- INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+ plane->crtc = crtc;
- mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
+ mdp5_plane_install_properties(plane, &crtc->base);
return crtc;
-
-fail:
- if (crtc)
- mdp5_crtc_destroy(crtc);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
new file mode 100644
index 000000000000..dea4505ac963
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_ctl.h"
+
+/*
+ * CTL - MDP Control Pool Manager
+ *
+ * Controls are shared between all CRTCs.
+ *
+ * They are intended to be used for data path configuration.
+ * The top level register programming describes the complete data path for
+ * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
+ *
+ * Hardware capabilities determine the number of concurrent data paths
+ *
+ * In certain use cases (high-resolution dual pipe), one single CTL can be
+ * shared across multiple CRTCs.
+ *
+ * Because the number of CTLs can be less than the number of CRTCs,
+ * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
+ * requested by the client (in mdp5_crtc_mode_set()).
+ */
+
+struct mdp5_ctl {
+ struct mdp5_ctl_manager *ctlm;
+
+ u32 id;
+
+ /* whether this CTL has been allocated or not: */
+ bool busy;
+
+ /* memory output connection (@see mdp5_ctl_mode): */
+ u32 mode;
+
+ /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
+ spinlock_t hw_lock;
+ u32 reg_offset;
+
+ /* flush mask used to commit CTL registers */
+ u32 flush_mask;
+
+ bool cursor_on;
+
+ struct drm_crtc *crtc;
+};
+
+struct mdp5_ctl_manager {
+ struct drm_device *dev;
+
+ /* number of CTL / Layer Mixers in this hw config: */
+ u32 nlm;
+ u32 nctl;
+
+ /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
+ spinlock_t pool_lock;
+ struct mdp5_ctl ctls[MAX_CTL];
+};
+
+static inline
+struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
+{
+ struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline
+void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+ (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+ mdp5_write(mdp5_kms, reg, data);
+}
+
+static inline
+u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+ (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+ return mdp5_read(mdp5_kms, reg);
+}
+
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
+{
+ unsigned long flags;
+ static const enum mdp5_intfnum intfnum[] = {
+ INTF0, INTF1, INTF2, INTF3,
+ };
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
+ MDP5_CTL_OP_MODE(ctl->mode) |
+ MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ return 0;
+}
+
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+ u32 blend_cfg;
+ int lm;
+
+ lm = mdp5_crtc_get_lm(ctl->crtc);
+ if (unlikely(WARN_ON(lm < 0))) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
+ ctl->id, lm);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+
+ blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
+
+ if (enable)
+ blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
+ else
+ blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ ctl->cursor_on = enable;
+
+ return 0;
+}
+
+
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+{
+ unsigned long flags;
+
+ if (ctl->cursor_on)
+ blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
+ else
+ blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ return 0;
+}
+
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+
+ if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
+ int lm = mdp5_crtc_get_lm(ctl->crtc);
+
+ if (unlikely(WARN_ON(lm < 0))) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
+ ctl->id, lm);
+ return -EINVAL;
+ }
+
+ /* for current targets, cursor bit is the same as LM bit */
+ flush_mask |= mdp_ctl_flush_mask_lm(lm);
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ return 0;
+}
+
+u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
+{
+ return ctl->flush_mask;
+}
+
+void mdp5_ctl_release(struct mdp5_ctl *ctl)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+
+ if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
+ ctl->id, ctl->busy);
+ return;
+ }
+
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+ ctl->busy = false;
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+
+ DBG("CTL %d released", ctl->id);
+}
+
+/*
+ * mdp5_ctl_request() - CTL dynamic allocation
+ *
+ * Note: Current implementation considers that we can only have one CRTC per CTL
+ *
+ * @return first free CTL
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
+ struct drm_crtc *crtc)
+{
+ struct mdp5_ctl *ctl = NULL;
+ unsigned long flags;
+ int c;
+
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+
+ for (c = 0; c < ctl_mgr->nctl; c++)
+ if (!ctl_mgr->ctls[c].busy)
+ break;
+
+ if (unlikely(c >= ctl_mgr->nctl)) {
+ dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+ goto unlock;
+ }
+
+ ctl = &ctl_mgr->ctls[c];
+
+ ctl->crtc = crtc;
+ ctl->busy = true;
+ DBG("CTL %d allocated", ctl->id);
+
+unlock:
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ return ctl;
+}
+
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
+{
+ unsigned long flags;
+ int c;
+
+ for (c = 0; c < ctl_mgr->nctl; c++) {
+ struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+}
+
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
+{
+ kfree(ctl_mgr);
+}
+
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+{
+ struct mdp5_ctl_manager *ctl_mgr;
+ const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
+ unsigned long flags;
+ int c, ret;
+
+ ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
+ if (!ctl_mgr) {
+ dev_err(dev->dev, "failed to allocate CTL manager\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
+ dev_err(dev->dev, "Increase static pool size to at least %d\n",
+ ctl_cfg->count);
+ ret = -ENOSPC;
+ goto fail;
+ }
+
+ /* initialize the CTL manager: */
+ ctl_mgr->dev = dev;
+ ctl_mgr->nlm = hw_cfg->lm.count;
+ ctl_mgr->nctl = ctl_cfg->count;
+ spin_lock_init(&ctl_mgr->pool_lock);
+
+ /* initialize each CTL of the pool: */
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+ for (c = 0; c < ctl_mgr->nctl; c++) {
+ struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+ if (WARN_ON(!ctl_cfg->base[c])) {
+ dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ctl->ctlm = ctl_mgr;
+ ctl->id = c;
+ ctl->mode = MODE_NONE;
+ ctl->reg_offset = ctl_cfg->base[c];
+ ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
+ ctl->busy = false;
+ spin_lock_init(&ctl->hw_lock);
+ }
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
+
+ return ctl_mgr;
+
+fail:
+ if (ctl_mgr)
+ mdp5_ctlm_destroy(ctl_mgr);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
new file mode 100644
index 000000000000..1018519b6af2
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDP5_CTL_H__
+#define __MDP5_CTL_H__
+
+#include "msm_drv.h"
+
+/*
+ * CTL Manager prototypes:
+ * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
+ * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
+ */
+struct mdp5_ctl_manager;
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
+
+/*
+ * CTL prototypes:
+ * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
+ * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
+
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
+
+/* @blend_cfg: see LM blender config definition below */
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+
+/* @flush_mask: see CTL flush masks definitions below */
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
+u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
+
+void mdp5_ctl_release(struct mdp5_ctl *ctl);
+
+/*
+ * blend_cfg (LM blender config):
+ *
+ * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
+ * are being blended according to their stage (z-order), through @blend_cfg arg.
+ */
+static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+ case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+ case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+ case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+ case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+ case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+ case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+ case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+ case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+ case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+ default: return 0;
+ }
+}
+
+/*
+ * flush_mask (CTL flush masks):
+ *
+ * The following functions allow each DRM entity to get and store
+ * their own flush mask.
+ * Once stored, these masks will then be accessed through each DRM's
+ * interface and used by the caller of mdp5_ctl_commit() to specify
+ * which block(s) need to be flushed through @flush_mask parameter.
+ */
+
+#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
+
+static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
+{
+ /* TODO: use id once multiple cursor support is present */
+ (void)cursor_id;
+
+ return MDP5_CTL_FLUSH_CURSOR_DUMMY;
+}
+
+static inline u32 mdp_ctl_flush_mask_lm(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ case 5: return MDP5_CTL_FLUSH_LM5;
+ default: return 0;
+ }
+}
+
+static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+ default: return 0;
+ }
+}
+
+#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index edec7bfaa952..0254bfdeb92f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -24,6 +24,7 @@ struct mdp5_encoder {
struct drm_encoder base;
int intf;
enum mdp5_intf intf_id;
+ spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
};
@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
struct mdp5_kms *mdp5_kms = get_kms(encoder);
int intf = mdp5_encoder->intf;
bool enabled = (mode == DRM_MODE_DPMS_ON);
+ unsigned long flags;
DBG("mode=%d", mode);
@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
if (enabled) {
bs_set(mdp5_encoder, 1);
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
} else {
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
+
bs_set(mdp5_encoder, 0);
}
@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
uint32_t format;
+ unsigned long flags;
mode = adjusted_mode;
@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+
mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
+
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
}
static void mdp5_encoder_prepare(struct drm_encoder *encoder)
@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
mdp5_encoder->intf_id = intf_id;
encoder = &mdp5_encoder->base;
+ spin_lock_init(&mdp5_encoder->intf_lock);
+
drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index f2b985bc2adf..70ac81edd40f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
#include "msm_drv.h"
#include "mdp5_kms.h"
@@ -88,11 +90,17 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
VERB("intr=%08x", intr);
- if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
+ if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
mdp5_irq_mdp(mdp_kms);
+ intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
+ }
- if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
- hdmi_irq(0, mdp5_kms->hdmi);
+ while (intr) {
+ irq_hw_number_t hwirq = fls(intr) - 1;
+ generic_handle_irq(irq_find_mapping(
+ mdp5_kms->irqcontroller.domain, hwirq));
+ intr &= ~(1 << hwirq);
+ }
return IRQ_HANDLED;
}
@@ -109,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
}
+
+/*
+ * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
+ * can register to get their irq's delivered
+ */
+
+#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
+ MDP5_HW_INTR_STATUS_INTR_DSI1 | \
+ MDP5_HW_INTR_STATUS_INTR_HDMI | \
+ MDP5_HW_INTR_STATUS_INTR_EDP)
+
+static void mdp5_hw_mask_irq(struct irq_data *irqd)
+{
+ struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void mdp5_hw_unmask_irq(struct irq_data *irqd)
+{
+ struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip mdp5_hw_irq_chip = {
+ .name = "mdp5",
+ .irq_mask = mdp5_hw_mask_irq,
+ .irq_unmask = mdp5_hw_unmask_irq,
+};
+
+static int mdp5_hw_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct mdp5_kms *mdp5_kms = d->host_data;
+
+ if (!(VALID_IRQS & (1 << hwirq)))
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, mdp5_kms);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
+ .map = mdp5_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+
+int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
+{
+ struct device *dev = mdp5_kms->dev->dev;
+ struct irq_domain *d;
+
+ d = irq_domain_add_linear(dev->of_node, 32,
+ &mdp5_hw_irqdomain_ops, mdp5_kms);
+ if (!d) {
+ dev_err(dev, "mdp5 irq domain add failed\n");
+ return -ENXIO;
+ }
+
+ mdp5_kms->irqcontroller.enabled_mask = 0;
+ mdp5_kms->irqcontroller.domain = d;
+
+ return 0;
+}
+
+void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
+{
+ if (mdp5_kms->irqcontroller.domain) {
+ irq_domain_remove(mdp5_kms->irqcontroller.domain);
+ mdp5_kms->irqcontroller.domain = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 31a2c6331a1d..a11f1b80c488 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
"mdp_0",
};
-static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
-
-const struct mdp5_config *mdp5_cfg;
-
-static const struct mdp5_config msm8x74_config = {
- .name = "msm8x74",
- .ctl = {
- .count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
- },
- .pipe_vig = {
- .count = 3,
- .base = { 0x01200, 0x01600, 0x01a00 },
- },
- .pipe_rgb = {
- .count = 3,
- .base = { 0x01e00, 0x02200, 0x02600 },
- },
- .pipe_dma = {
- .count = 2,
- .base = { 0x02a00, 0x02e00 },
- },
- .lm = {
- .count = 5,
- .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
- },
- .dspp = {
- .count = 3,
- .base = { 0x04600, 0x04a00, 0x04e00 },
- },
- .ad = {
- .count = 2,
- .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
- },
- .intf = {
- .count = 4,
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
- },
-};
-
-static const struct mdp5_config apq8084_config = {
- .name = "apq8084",
- .ctl = {
- .count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
- },
- .pipe_vig = {
- .count = 4,
- .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
- },
- .pipe_rgb = {
- .count = 4,
- .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
- },
- .pipe_dma = {
- .count = 2,
- .base = { 0x03200, 0x03600 },
- },
- .lm = {
- .count = 6,
- .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
- },
- .dspp = {
- .count = 4,
- .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
-
- },
- .ad = {
- .count = 3,
- .base = { 0x13500, 0x13700, 0x13900 },
- },
- .intf = {
- .count = 5,
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
- },
-};
-
-struct mdp5_config_entry {
- int revision;
- const struct mdp5_config *config;
-};
-
-static const struct mdp5_config_entry mdp5_configs[] = {
- { .revision = 0, .config = &msm8x74_config },
- { .revision = 2, .config = &msm8x74_config },
- { .revision = 3, .config = &apq8084_config },
-};
-
-static int mdp5_select_hw_cfg(struct msm_kms *kms)
-{
- struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct drm_device *dev = mdp5_kms->dev;
- uint32_t version, major, minor;
- int i, ret = 0;
-
- mdp5_enable(mdp5_kms);
- version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
- mdp5_disable(mdp5_kms);
-
- major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
- minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
-
- DBG("found MDP5 version v%d.%d", major, minor);
-
- if (major != 1) {
- dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
- major, minor);
- ret = -ENXIO;
- goto out;
- }
-
- mdp5_kms->rev = minor;
-
- /* only after mdp5_cfg global pointer's init can we access the hw */
- for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
- if (mdp5_configs[i].revision != minor)
- continue;
- mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
- break;
- }
- if (unlikely(!mdp5_kms->hw_cfg)) {
- dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
- major, minor);
- ret = -ENXIO;
- goto out;
- }
-
- DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
-
- return 0;
-out:
- return ret;
-}
-
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
- int i;
+ unsigned long flags;
pm_runtime_get_sync(dev->dev);
@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
* care.
*/
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
- for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
+ mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
pm_runtime_put_sync(dev->dev);
@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
+ mdp5_irq_domain_fini(mdp5_kms);
+
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
+
+ if (mdp5_kms->ctlm)
+ mdp5_ctlm_destroy(mdp5_kms->ctlm);
+ if (mdp5_kms->smp)
+ mdp5_smp_destroy(mdp5_kms->smp);
+ if (mdp5_kms->cfg)
+ mdp5_cfg_destroy(mdp5_kms->cfg);
+
kfree(mdp5_kms);
}
@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
static const enum mdp5_pipe crtcs[] = {
SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
+ static const enum mdp5_pipe pub_planes[] = {
+ SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
+ };
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
+ const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
- /* construct CRTCs: */
- for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /* register our interrupt-controller for hdmi/eDP/dsi/etc
+ * to use for irqs routed through mdp:
+ */
+ ret = mdp5_irq_domain_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ /* construct CRTCs and their private planes: */
+ for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
struct drm_crtc *crtc;
- plane = mdp5_plane_init(dev, crtcs[i], true);
+ plane = mdp5_plane_init(dev, crtcs[i], true,
+ hw_cfg->pipe_rgb.base[i]);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
}
+ /* Construct public planes: */
+ for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
+ struct drm_plane *plane;
+
+ plane = mdp5_plane_init(dev, pub_planes[i], false,
+ hw_cfg->pipe_vig.base[i]);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ dev_err(dev->dev, "failed to construct %s plane: %d\n",
+ pipe2name(pub_planes[i]), ret);
+ goto fail;
+ }
+ }
+
/* Construct encoder for HDMI: */
encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
if (IS_ERR(encoder)) {
@@ -324,11 +230,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */
- mdp5_kms->hdmi = hdmi_init(dev, encoder);
- if (IS_ERR(mdp5_kms->hdmi)) {
- ret = PTR_ERR(mdp5_kms->hdmi);
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
- goto fail;
+ if (priv->hdmi) {
+ ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ goto fail;
+ }
}
return 0;
@@ -337,6 +244,21 @@ fail:
return ret;
}
+static void read_hw_revision(struct mdp5_kms *mdp5_kms,
+ uint32_t *major, uint32_t *minor)
+{
+ uint32_t version;
+
+ mdp5_enable(mdp5_kms);
+ version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+ mdp5_disable(mdp5_kms);
+
+ *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
+ *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+
+ DBG("MDP5 version v%d.%d", *major, *minor);
+}
+
static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name)
{
@@ -353,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
- struct mdp5_platform_config *config = mdp5_get_config(pdev);
+ struct mdp5_cfg *config;
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
+ uint32_t major, minor;
int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
@@ -366,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
+ spin_lock_init(&mdp5_kms->resource_lock);
+
mdp_kms_init(&mdp5_kms->base, &kms_funcs);
kms = &mdp5_kms->base.base;
mdp5_kms->dev = dev;
- mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
@@ -416,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
if (ret)
goto fail;
- ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+ /* we need to set a default rate before enabling. Set a safe
+ * rate first, then figure out hw revision, and then set a
+ * more optimal rate:
+ */
+ clk_set_rate(mdp5_kms->src_clk, 200000000);
+
+ read_hw_revision(mdp5_kms, &major, &minor);
- ret = mdp5_select_hw_cfg(kms);
- if (ret)
+ mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
+ if (IS_ERR(mdp5_kms->cfg)) {
+ ret = PTR_ERR(mdp5_kms->cfg);
+ mdp5_kms->cfg = NULL;
goto fail;
+ }
+
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
+
+ /* TODO: compute core clock rate at runtime */
+ clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
+
+ mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
+ if (IS_ERR(mdp5_kms->smp)) {
+ ret = PTR_ERR(mdp5_kms->smp);
+ mdp5_kms->smp = NULL;
+ goto fail;
+ }
+
+ mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+ if (IS_ERR(mdp5_kms->ctlm)) {
+ ret = PTR_ERR(mdp5_kms->ctlm);
+ mdp5_kms->ctlm = NULL;
+ goto fail;
+ }
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
mdp5_enable(mdp5_kms);
- for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
+ for (i = 0; i < config->hw->intf.count; i++)
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_disable(mdp5_kms);
mdelay(16);
- if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
+ if (config->platform.iommu) {
+ mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -474,18 +426,3 @@ fail:
mdp5_destroy(kms);
return ERR_PTR(ret);
}
-
-static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
-{
- static struct mdp5_platform_config config = {};
-#ifdef CONFIG_OF
- /* TODO */
-#endif
- config.iommu = iommu_domain_alloc(&platform_bus_type);
- /* TODO hard-coded in downstream mdss, but should it be? */
- config.max_clk = 200000000;
- /* TODO get from DT: */
- config.smp_blk_cnt = 22;
-
- return &config;
-}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 5bf340dd0f00..dd69c77c0d64 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,25 +21,9 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
-/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */
-#define MDP5_MAX_BASES 8
-struct mdp5_sub_block {
- int count;
- uint32_t base[MDP5_MAX_BASES];
-};
-struct mdp5_config {
- char *name;
- struct mdp5_sub_block ctl;
- struct mdp5_sub_block pipe_vig;
- struct mdp5_sub_block pipe_rgb;
- struct mdp5_sub_block pipe_dma;
- struct mdp5_sub_block lm;
- struct mdp5_sub_block dspp;
- struct mdp5_sub_block ad;
- struct mdp5_sub_block intf;
-};
-extern const struct mdp5_config *mdp5_cfg;
+#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
+#include "mdp5_ctl.h"
#include "mdp5_smp.h"
struct mdp5_kms {
@@ -47,17 +31,14 @@ struct mdp5_kms {
struct drm_device *dev;
- int rev;
- const struct mdp5_config *hw_cfg;
+ struct mdp5_cfg_handler *cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
struct msm_mmu *mmu;
- /* for tracking smp allocation amongst pipes: */
- mdp5_smp_state_t smp_state;
- struct mdp5_client_smp_state smp_client_state[CID_MAX];
- int smp_blk_cnt;
+ struct mdp5_smp *smp;
+ struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */
void __iomem *mmio, *vbif;
@@ -71,18 +52,47 @@ struct mdp5_kms {
struct clk *lut_clk;
struct clk *vsync_clk;
- struct hdmi *hdmi;
+ /*
+ * lock to protect access to global resources: ie., following register:
+ * - REG_MDP5_DISP_INTF_SEL
+ */
+ spinlock_t resource_lock;
struct mdp_irq error_handler;
+
+ struct {
+ volatile unsigned long enabled_mask;
+ struct irq_domain *domain;
+ } irqcontroller;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
-/* platform config data (ie. from DT, or pdata) */
-struct mdp5_platform_config {
- struct iommu_domain *iommu;
- uint32_t max_clk;
- int smp_blk_cnt;
+struct mdp5_plane_state {
+ struct drm_plane_state base;
+
+ /* "virtual" zpos.. we calculate actual mixer-stage at runtime
+ * by sorting the attached planes by zpos and then assigning
+ * mixer stage lowest to highest. Private planes get default
+ * zpos of zero, and public planes a unique value that is
+ * greater than zero. This way, things work out if a naive
+ * userspace assigns planes to a crtc without setting zpos.
+ */
+ int zpos;
+
+ /* the actual mixer stage, calculated in crtc->atomic_check()
+ * NOTE: this should move to mdp5_crtc_state, when that exists
+ */
+ enum mdp_mixer_stage_id stage;
+
+ /* some additional transactional status to help us know in the
+ * apply path whether we need to update SMP allocation, and
+ * whether current update is still pending:
+ */
+ bool mode_changed : 1;
+ bool pending : 1;
};
+#define to_mdp5_plane_state(x) \
+ container_of(x, struct mdp5_plane_state, base)
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
@@ -107,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
return names[pipe];
}
-static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
-{
- switch (pipe) {
- case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
- case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
- case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
- case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
- case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
- case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
- case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
- case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
- case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
- case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
- default: return 0;
- }
-}
-
static inline int pipe2nclients(enum mdp5_pipe pipe)
{
switch (pipe) {
@@ -137,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
}
}
-static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
-{
- WARN_ON(plane >= pipe2nclients(pipe));
- switch (pipe) {
- case SSPP_VIG0: return CID_VIG0_Y + plane;
- case SSPP_VIG1: return CID_VIG1_Y + plane;
- case SSPP_VIG2: return CID_VIG2_Y + plane;
- case SSPP_RGB0: return CID_RGB0;
- case SSPP_RGB1: return CID_RGB1;
- case SSPP_RGB2: return CID_RGB2;
- case SSPP_DMA0: return CID_DMA0_Y + plane;
- case SSPP_DMA1: return CID_DMA1_Y + plane;
- case SSPP_VIG3: return CID_VIG3_Y + plane;
- case SSPP_RGB3: return CID_RGB3;
- default: return CID_UNUSED;
- }
-}
-
-static inline uint32_t mixer2flush(int lm)
-{
- switch (lm) {
- case 0: return MDP5_CTL_FLUSH_LM0;
- case 1: return MDP5_CTL_FLUSH_LM1;
- case 2: return MDP5_CTL_FLUSH_LM2;
- default: return 0;
- }
-}
-
static inline uint32_t intf2err(int intf)
{
switch (intf) {
@@ -197,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
irqreturn_t mdp5_irq(struct msm_kms *kms);
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
+void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
static inline
uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
@@ -210,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
-void mdp5_plane_set_scanout(struct drm_plane *plane,
- struct drm_framebuffer *fb);
-int mdp5_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_complete_flip(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane);
+ enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+int mdp5_crtc_get_lm(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
enum mdp5_intf intf_id);
-void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
-void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index f3daec4412ad..26e5fdea6594 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -17,6 +18,7 @@
#include "mdp5_kms.h"
+#define MAX_PLANE 4
struct mdp5_plane {
struct drm_plane base;
@@ -24,6 +26,11 @@ struct mdp5_plane {
enum mdp5_pipe pipe;
+ spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
+ uint32_t reg_offset;
+
+ uint32_t flush_mask; /* used to commit pipe registers */
+
uint32_t nformats;
uint32_t formats[32];
@@ -31,31 +38,24 @@ struct mdp5_plane {
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+static void set_scanout_locked(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-static int mdp5_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static bool plane_enabled(struct drm_plane_state *state)
{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-
- mdp5_plane->enabled = true;
-
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
- return mdp5_plane_mode_set(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
+ return state->fb && state->crtc;
}
static int mdp5_plane_disable(struct drm_plane *plane)
@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane)
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
- int i;
DBG("%s: disable", mdp5_plane->name);
- /* update our SMP request to zero (release all our blks): */
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
-
- /* TODO detaching now will cause us not to get the last
- * vblank and mdp5_smp_commit().. so other planes will
- * still see smp blocks previously allocated to us as
- * in-use..
- */
- if (plane->crtc)
- mdp5_crtc_detach(plane->crtc, plane);
+ if (mdp5_kms) {
+ /* Release the memory we requested earlier from the SMP: */
+ mdp5_smp_release(mdp5_kms->smp, pipe);
+ }
return 0;
}
@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane)
static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- struct msm_drm_private *priv = plane->dev->dev_private;
-
- if (priv->kms)
- mdp5_plane_disable(plane);
+ drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
@@ -109,109 +98,186 @@ int mdp5_plane_set_property(struct drm_plane *plane,
return -EINVAL;
}
+static void mdp5_plane_reset(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (plane->state && plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+
+ kfree(to_mdp5_plane_state(plane->state));
+ mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ mdp5_state->zpos = 0;
+ } else {
+ mdp5_state->zpos = 1 + drm_plane_index(plane);
+ }
+
+ plane->state = &mdp5_state->base;
+}
+
+static struct drm_plane_state *
+mdp5_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
+ sizeof(*mdp5_state), GFP_KERNEL);
+
+ if (mdp5_state && mdp5_state->base.fb)
+ drm_framebuffer_reference(mdp5_state->base.fb);
+
+ mdp5_state->mode_changed = false;
+ mdp5_state->pending = false;
+
+ return &mdp5_state->base;
+}
+
+static void mdp5_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+
+ kfree(to_mdp5_plane_state(state));
+}
+
static const struct drm_plane_funcs mdp5_plane_funcs = {
- .update_plane = mdp5_plane_update,
- .disable_plane = mdp5_plane_disable,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
.set_property = mdp5_plane_set_property,
+ .reset = mdp5_plane_reset,
+ .atomic_duplicate_state = mdp5_plane_duplicate_state,
+ .atomic_destroy_state = mdp5_plane_destroy_state,
};
-void mdp5_plane_set_scanout(struct drm_plane *plane,
+static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
- uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
- uint32_t iova[4];
- int i;
-
- for (i = 0; i < nplanes; i++) {
- struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
- msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
- }
- for (; i < 4; i++)
- iova[i] = 0;
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
- MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
- MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
- MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
- MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
-
- plane->fb = fb;
+ DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->id);
}
-/* NOTE: looks like if horizontal decimation is used (if we supported that)
- * then the width used to calculate SMP block requirements is the post-
- * decimated width. Ie. SMP buffering sits downstream of decimation (which
- * presumably happens during the dma from scanout buffer).
- */
-static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
- uint32_t nplanes, uint32_t width)
+static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
- struct drm_device *dev = plane->dev;
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
- int i, hsub, nlines, nblks, ret;
- hsub = drm_format_horz_chroma_subsampling(format);
+ DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->id);
+}
- /* different if BWC (compressed framebuffer?) enabled: */
- nlines = 2;
+static int mdp5_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct drm_plane_state *old_state = plane->state;
- for (i = 0, nblks = 0; i < nplanes; i++) {
- int n, fetch_stride, cpp;
+ DBG("%s: check (%d -> %d)", mdp5_plane->name,
+ plane_enabled(old_state), plane_enabled(state));
- cpp = drm_format_plane_cpp(format, i);
- fetch_stride = width * cpp / (i ? hsub : 1);
+ if (plane_enabled(state) && plane_enabled(old_state)) {
+ /* we cannot change SMP block configuration during scanout: */
+ bool full_modeset = false;
+ if (state->fb->pixel_format != old_state->fb->pixel_format) {
+ DBG("%s: pixel_format change!", mdp5_plane->name);
+ full_modeset = true;
+ }
+ if (state->src_w != old_state->src_w) {
+ DBG("%s: src_w change!", mdp5_plane->name);
+ full_modeset = true;
+ }
+ if (to_mdp5_plane_state(old_state)->pending) {
+ DBG("%s: still pending!", mdp5_plane->name);
+ full_modeset = true;
+ }
+ if (full_modeset) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state->mode_changed = true;
+ to_mdp5_plane_state(state)->mode_changed = true;
+ }
+ } else {
+ to_mdp5_plane_state(state)->mode_changed = true;
+ }
- n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
+ return 0;
+}
- /* for hw rev v1.00 */
- if (mdp5_kms->rev == 0)
- n = roundup_pow_of_two(n);
+static void mdp5_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ struct drm_plane_state *state = plane->state;
- DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
- ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
- if (ret) {
- dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
- n, ret);
- return ret;
- }
+ DBG("%s: update", mdp5_plane->name);
- nblks += n;
+ if (!plane_enabled(state)) {
+ to_mdp5_plane_state(state)->pending = true;
+ mdp5_plane_disable(plane);
+ } else if (to_mdp5_plane_state(state)->mode_changed) {
+ int ret;
+ to_mdp5_plane_state(state)->pending = true;
+ ret = mdp5_plane_mode_set(plane,
+ state->crtc, state->fb,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x, state->src_y,
+ state->src_w, state->src_h);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ } else {
+ unsigned long flags;
+ spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+ set_scanout_locked(plane, state->fb);
+ spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
}
-
- /* in success case, return total # of blocks allocated: */
- return nblks;
}
-static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
+static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
+ .prepare_fb = mdp5_plane_prepare_fb,
+ .cleanup_fb = mdp5_plane_cleanup_fb,
+ .atomic_check = mdp5_plane_atomic_check,
+ .atomic_update = mdp5_plane_atomic_update,
+};
+
+static void set_scanout_locked(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
- uint32_t val;
- /* 1/4 of SMP pool that is being fetched */
- val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+ MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+ MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
+ msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
+ msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
+ msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
+ msm_framebuffer_iova(fb, mdp5_kms->id, 4));
+ plane->fb = fb;
}
-int mdp5_plane_mode_set(struct drm_plane *plane,
+static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -225,7 +291,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t nplanes, config = 0;
uint32_t phasex_step = 0, phasey_step = 0;
uint32_t hdecm = 0, vdecm = 0;
- int i, nblks;
+ unsigned long flags;
+ int ret;
nplanes = drm_format_num_planes(fb->pixel_format);
@@ -243,12 +310,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- /*
- * Calculate and request required # of smp blocks:
- */
- nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
- if (nblks < 0)
- return nblks;
+ /* Request some memory from the SMP: */
+ ret = mdp5_smp_request(mdp5_kms->smp,
+ mdp5_plane->pipe, fb->pixel_format, src_w);
+ if (ret)
+ return ret;
/*
* Currently we update the hw for allocations/requests immediately,
@@ -256,8 +322,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
* would move into atomic->check_plane_state(), while updating the
* hw would remain here:
*/
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+ mdp5_smp_configure(mdp5_kms->smp, pipe);
if (src_w != crtc_w) {
config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
@@ -269,6 +334,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
/* TODO calc phasey_step, vdecm */
}
+ spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
+
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
@@ -289,8 +356,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_OUT_XY_X(crtc_x) |
MDP5_PIPE_OUT_XY_Y(crtc_y));
- mdp5_plane_set_scanout(plane, fb);
-
format = to_mdp_format(msm_framebuffer_format(fb));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
@@ -330,22 +395,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
- set_fifo_thresholds(plane, nblks);
+ set_scanout_locked(plane, fb);
- /* TODO detach from old crtc (if we had more than one) */
- mdp5_crtc_attach(crtc, plane);
+ spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
- return 0;
+ return ret;
}
void mdp5_plane_complete_flip(struct drm_plane *plane)
{
struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
- int i;
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+ enum mdp5_pipe pipe = mdp5_plane->pipe;
+
+ DBG("%s: complete flip", mdp5_plane->name);
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+ mdp5_smp_commit(mdp5_kms->smp, pipe);
+
+ to_mdp5_plane_state(plane->state)->pending = false;
}
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
@@ -354,9 +421,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
return mdp5_plane->pipe;
}
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ return mdp5_plane->flush_mask;
+}
+
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
- enum mdp5_pipe pipe, bool private_plane)
+ enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
@@ -377,10 +451,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats));
+ mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
+ mdp5_plane->reg_offset = reg_offset;
+ spin_lock_init(&mdp5_plane->pipe_lock);
+
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
type);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
mdp5_plane_install_properties(plane, &plane->base);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 2d0236b963a6..bf551885e019 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -29,8 +30,11 @@
* Based on the size of the attached scanout buffer, a certain # of
* blocks must be allocated to that client out of the shared pool.
*
- * For each block, it can be either free, or pending/in-use by a
- * client. The updates happen in three steps:
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ * For each block that can be dynamically allocated, it can be either
+ * free, or pending/in-use by a client. The updates happen in three steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
@@ -61,21 +65,68 @@
* inuse and pending state of all clients..
*/
-static DEFINE_SPINLOCK(smp_lock);
+struct mdp5_smp {
+ struct drm_device *dev;
+
+ int blk_cnt;
+ int blk_size;
+
+ spinlock_t state_lock;
+ mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
+
+ struct mdp5_client_smp_state client_state[CID_MAX];
+};
+static inline
+struct mdp5_kms *get_kms(struct mdp5_smp *smp)
+{
+ struct msm_drm_private *priv = smp->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+ WARN_ON(plane >= pipe2nclients(pipe));
+ switch (pipe) {
+ case SSPP_VIG0: return CID_VIG0_Y + plane;
+ case SSPP_VIG1: return CID_VIG1_Y + plane;
+ case SSPP_VIG2: return CID_VIG2_Y + plane;
+ case SSPP_RGB0: return CID_RGB0;
+ case SSPP_RGB1: return CID_RGB1;
+ case SSPP_RGB2: return CID_RGB2;
+ case SSPP_DMA0: return CID_DMA0_Y + plane;
+ case SSPP_DMA1: return CID_DMA1_Y + plane;
+ case SSPP_VIG3: return CID_VIG3_Y + plane;
+ case SSPP_RGB3: return CID_RGB3;
+ default: return CID_UNUSED;
+ }
+}
/* step #1: update # of blocks pending for the client: */
-int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+static int smp_request_block(struct mdp5_smp *smp,
enum mdp5_client_id cid, int nblks)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ const struct mdp5_cfg_hw *hw_cfg;
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
+ int reserved;
unsigned long flags;
- spin_lock_irqsave(&smp_lock, flags);
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ reserved = hw_cfg->smp.reserved[cid];
+
+ spin_lock_irqsave(&smp->state_lock, flags);
- avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+ nblks -= reserved;
+ if (reserved)
+ DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
+
+ avail = cnt - bitmap_weight(smp->state, cnt);
if (nblks > avail) {
+ dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ nblks, avail);
ret = -ENOSPC;
goto fail;
}
@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
if (nblks > cur_nblks) {
/* grow the existing pending reservation: */
for (i = cur_nblks; i < nblks; i++) {
- int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+ int blk = find_first_zero_bit(smp->state, cnt);
set_bit(blk, ps->pending);
- set_bit(blk, mdp5_kms->smp_state);
+ set_bit(blk, smp->state);
}
} else {
/* shrink the existing pending reservation: */
@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
}
fail:
- spin_unlock_irqrestore(&smp_lock, flags);
+ spin_unlock_irqrestore(&smp->state_lock, flags);
+ return 0;
+}
+
+static void set_fifo_thresholds(struct mdp5_smp *smp,
+ enum mdp5_pipe pipe, int nblks)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
+ u32 val;
+
+ /* 1/4 of SMP pool that is being fetched */
+ val = (nblks * smp_entries_per_blk) / 4;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+}
+
+/*
+ * NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct drm_device *dev = mdp5_kms->dev;
+ int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
+ int i, hsub, nplanes, nlines, nblks, ret;
+
+ nplanes = drm_format_num_planes(fmt);
+ hsub = drm_format_horz_chroma_subsampling(fmt);
+
+ /* different if BWC (compressed framebuffer?) enabled: */
+ nlines = 2;
+
+ for (i = 0, nblks = 0; i < nplanes; i++) {
+ int n, fetch_stride, cpp;
+
+ cpp = drm_format_plane_cpp(fmt, i);
+ fetch_stride = width * cpp / (i ? hsub : 1);
+
+ n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
+
+ /* for hw rev v1.00 */
+ if (rev == 0)
+ n = roundup_pow_of_two(n);
+
+ DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
+ ret = smp_request_block(smp, pipe2client(pipe, i), n);
+ if (ret) {
+ dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ n, ret);
+ return ret;
+ }
+
+ nblks += n;
+ }
+
+ set_fifo_thresholds(smp, pipe, nblks);
+
return 0;
}
-static void update_smp_state(struct mdp5_kms *mdp5_kms,
+/* Release SMP blocks for all clients of the pipe */
+void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
+{
+ int i, nblks;
+
+ for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
+ smp_request_block(smp, pipe2client(pipe, i), 0);
+ set_fifo_thresholds(smp, pipe, 0);
+}
+
+static void update_smp_state(struct mdp5_smp *smp,
enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
{
- int cnt = mdp5_kms->smp_blk_cnt;
- uint32_t blk, val;
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int cnt = smp->blk_cnt;
+ u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) {
int idx = blk / 3;
@@ -135,39 +259,80 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
}
/* step #2: configure hw for union(pending, inuse): */
-void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int cnt = mdp5_kms->smp_blk_cnt;
+ int cnt = smp->blk_cnt;
mdp5_smp_state_t assigned;
+ int i;
- bitmap_or(assigned, ps->inuse, ps->pending, cnt);
- update_smp_state(mdp5_kms, cid, &assigned);
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ enum mdp5_client_id cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+ bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ update_smp_state(smp, cid, &assigned);
+ }
}
/* step #3: after vblank, copy pending -> inuse: */
-void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int cnt = mdp5_kms->smp_blk_cnt;
+ int cnt = smp->blk_cnt;
mdp5_smp_state_t released;
+ int i;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ enum mdp5_client_id cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+ /*
+ * Figure out if there are any blocks we where previously
+ * using, which can be released and made available to other
+ * clients:
+ */
+ if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp->state_lock, flags);
+ /* clear released blocks: */
+ bitmap_andnot(smp->state, smp->state, released, cnt);
+ spin_unlock_irqrestore(&smp->state_lock, flags);
- /*
- * Figure out if there are any blocks we where previously
- * using, which can be released and made available to other
- * clients:
- */
- if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
- unsigned long flags;
-
- spin_lock_irqsave(&smp_lock, flags);
- /* clear released blocks: */
- bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
- released, cnt);
- spin_unlock_irqrestore(&smp_lock, flags);
-
- update_smp_state(mdp5_kms, CID_UNUSED, &released);
+ update_smp_state(smp, CID_UNUSED, &released);
+ }
+
+ bitmap_copy(ps->inuse, ps->pending, cnt);
}
+}
+
+void mdp5_smp_destroy(struct mdp5_smp *smp)
+{
+ kfree(smp);
+}
+
+struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
+{
+ struct mdp5_smp *smp = NULL;
+ int ret;
+
+ smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+ if (unlikely(!smp)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ smp->dev = dev;
+ smp->blk_cnt = cfg->mmb_count;
+ smp->blk_size = cfg->mmb_size;
+
+ /* statically tied MMBs cannot be re-allocated: */
+ bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
+ spin_lock_init(&smp->state_lock);
+
+ return smp;
+fail:
+ if (smp)
+ mdp5_smp_destroy(smp);
- bitmap_copy(ps->inuse, ps->pending, cnt);
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 0ab739e1a1dd..e47179f63585 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -20,22 +21,26 @@
#include "msm_drv.h"
-#define MAX_SMP_BLOCKS 22
-#define SMP_BLK_SIZE 4096
-#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
-
-typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
-
struct mdp5_client_smp_state {
mdp5_smp_state_t inuse;
mdp5_smp_state_t pending;
};
struct mdp5_kms;
+struct mdp5_smp;
+
+/*
+ * SMP module prototypes:
+ * mdp5_smp_init() returns a SMP @handler,
+ * which is then used to call the other mdp5_smp_*(handler, ...) functions.
+ */
-int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
-void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
-void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
+void mdp5_smp_destroy(struct mdp5_smp *smp);
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
new file mode 100644
index 000000000000..f0de412e13dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gem.h"
+
+struct msm_commit {
+ struct drm_atomic_state *state;
+ uint32_t fence;
+ struct msm_fence_cb fence_cb;
+};
+
+static void fence_cb(struct msm_fence_cb *cb);
+
+static struct msm_commit *new_commit(struct drm_atomic_state *state)
+{
+ struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+ if (!c)
+ return NULL;
+
+ c->state = state;
+ /* TODO we might need a way to indicate to run the cb on a
+ * different wq so wait_for_vblanks() doesn't block retiring
+ * bo's..
+ */
+ INIT_FENCE_CB(&c->fence_cb, fence_cb);
+
+ return c;
+}
+
+/* The (potentially) asynchronous part of the commit. At this point
+ * nothing can fail short of armageddon.
+ */
+static void complete_commit(struct msm_commit *c)
+{
+ struct drm_atomic_state *state = c->state;
+ struct drm_device *dev = state->dev;
+
+ drm_atomic_helper_commit_pre_planes(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state);
+
+ drm_atomic_helper_commit_post_planes(dev, state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ drm_atomic_state_free(state);
+
+ kfree(c);
+}
+
+static void fence_cb(struct msm_fence_cb *cb)
+{
+ struct msm_commit *c =
+ container_of(cb, struct msm_commit, fence_cb);
+ complete_commit(c);
+}
+
+static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
+{
+ struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
+ c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+}
+
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. For
+ * now this doesn't implement asynchronous commits.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int msm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool async)
+{
+ struct msm_commit *c;
+ int nplanes = dev->mode_config.num_total_plane;
+ int i, ret;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ c = new_commit(state);
+
+ /*
+ * Figure out what fence to wait for:
+ */
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *new_state = state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ if ((plane->state->fb != new_state->fb) && new_state->fb)
+ add_fb(c, new_state->fb);
+ }
+
+ /*
+ * This is the point of no return - everything below never fails except
+ * when the hw goes bonghits. Which means we can commit the new state on
+ * the software side now.
+ */
+
+ drm_atomic_helper_swap_state(dev, state);
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one conditions: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ if (async) {
+ msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+ return 0;
+ }
+
+ ret = msm_wait_fence_interruptable(dev, c->fence, NULL);
+ if (ret) {
+ WARN_ON(ret); // TODO unswap state back? or??
+ kfree(c);
+ return ret;
+ }
+
+ complete_commit(c);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b67ef5985125..c795217e1bfc 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -29,6 +29,8 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = msm_atomic_commit,
};
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
@@ -294,6 +296,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
goto fail;
}
+ drm_mode_config_reset(dev);
+
#ifdef CONFIG_DRM_MSM_FBDEV
priv->fbdev = msm_fbdev_init(dev);
#endif
@@ -619,6 +623,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
return ret;
}
+int msm_queue_fence_cb(struct drm_device *dev,
+ struct msm_fence_cb *cb, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ if (!list_empty(&cb->work.entry)) {
+ ret = -EINVAL;
+ } else if (fence > priv->completed_fence) {
+ cb->fence = fence;
+ list_add_tail(&cb->work.entry, &priv->fence_cbs);
+ } else {
+ queue_work(priv->wq, &cb->work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
/* called from workqueue */
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
@@ -832,6 +856,7 @@ static struct drm_driver msm_driver = {
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
.gem_prime_vmap = msm_gem_prime_vmap,
.gem_prime_vunmap = msm_gem_prime_vunmap,
+ .gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
.debugfs_cleanup = msm_debugfs_cleanup,
@@ -987,7 +1012,6 @@ static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "msm",
.of_match_table = dt_match,
.pm = &msm_pm_ops,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 67f9d0a2332c..136303818436 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -32,15 +32,6 @@
#include <linux/types.h>
#include <asm/sizes.h>
-
-#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
-/* stubs we need for compile-test: */
-static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
- return NULL;
-}
-#endif
-
#ifndef CONFIG_OF
#include <mach/board.h>
#include <mach/socinfo.h>
@@ -48,7 +39,10 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
#endif
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
@@ -75,7 +69,12 @@ struct msm_drm_private {
struct msm_kms *kms;
/* subordinate devices, if present: */
- struct platform_device *hdmi_pdev, *gpu_pdev;
+ struct platform_device *gpu_pdev;
+
+ /* possibly this should be in the kms component, but it is
+ * shared by both mdp4 and mdp5..
+ */
+ struct hdmi *hdmi;
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
@@ -145,21 +144,29 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
+int msm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool async);
+
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout);
+int msm_queue_fence_cb(struct drm_device *dev,
+ struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
+int msm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@@ -170,6 +177,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -192,6 +200,9 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -202,8 +213,8 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
struct hdmi;
-struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
-irqreturn_t hdmi_irq(int irq, void *dev_id);
+int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
+ struct drm_encoder *encoder);
void __init hdmi_register(void);
void __exit hdmi_unregister(void);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 81bafdf19ab3..84dec161d836 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -24,7 +24,7 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
- struct drm_gem_object *planes[2];
+ struct drm_gem_object *planes[3];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
@@ -87,6 +87,44 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
}
#endif
+/* prepare/pin all the fb's bo's for scanout. Note that it is not valid
+ * to prepare an fb more multiple different initiator 'id's. But that
+ * should be fine, since only the scanout (mdpN) side of things needs
+ * this, the gpu doesn't care about fb's.
+ */
+int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ uint32_t iova;
+
+ for (i = 0; i < n; i++) {
+ ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+ DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ for (i = 0; i < n; i++)
+ msm_gem_put_iova(msm_fb->planes[i], id);
+}
+
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ if (!msm_fb->planes[plane])
+ return 0;
+ return msm_gem_iova(msm_fb->planes[plane], id);
+}
+
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
@@ -166,6 +204,11 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
+ if (n > ARRAY_SIZE(msm_fb->planes)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
for (i = 0; i < n; i++) {
unsigned int width = mode_cmd->width / (i ? hsub : 1);
unsigned int height = mode_cmd->height / (i ? vsub : 1);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ab5bfd2d0ebf..94d55e526b4e 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -93,9 +93,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
uint32_t paddr;
int ret, size;
- sizes->surface_bpp = 32;
- sizes->surface_depth = 24;
-
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4b1b82adabde..4a6f0e49d5b5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -309,6 +309,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return ret;
}
+/* get iova, taking a reference. Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -328,6 +329,16 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
return ret;
}
+/* get iova without taking a reference, used in places where you have
+ * already done a 'msm_gem_get_iova()'.
+ */
+uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!msm_obj->domain[id].iova);
+ return msm_obj->domain[id].iova;
+}
+
void msm_gem_put_iova(struct drm_gem_object *obj, int id)
{
// XXX TODO ..
@@ -397,23 +408,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb)
{
- struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret = 0;
-
- mutex_lock(&dev->struct_mutex);
- if (!list_empty(&cb->work.entry)) {
- ret = -EINVAL;
- } else if (is_active(msm_obj)) {
- cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
- list_add_tail(&cb->work.entry, &priv->fence_cbs);
- } else {
- queue_work(priv->wq, &cb->work);
- }
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ uint32_t fence = msm_gem_fence(msm_obj,
+ MSM_PREP_READ | MSM_PREP_WRITE);
+ return msm_queue_fence_cb(obj->dev, cb, fence);
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -452,12 +450,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
int ret = 0;
if (is_active(msm_obj)) {
- uint32_t fence = 0;
+ uint32_t fence = msm_gem_fence(msm_obj, op);
- if (op & MSM_PREP_READ)
- fence = msm_obj->write_fence;
- if (op & MSM_PREP_WRITE)
- fence = max(fence, msm_obj->read_fence);
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
@@ -525,13 +519,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
struct msm_mmu *mmu = priv->mmus[id];
if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = (uint32_t)mmap_offset(obj);
+ uint32_t offset = msm_obj->domain[id].iova;
mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
}
}
- drm_gem_free_mmap_offset(obj);
-
if (obj->import_attach) {
if (msm_obj->vaddr)
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index bfb052688f8e..8fbbd0594c46 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -70,6 +70,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
+static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
+ uint32_t op)
+{
+ uint32_t fence = 0;
+
+ if (op & MSM_PREP_READ)
+ fence = msm_obj->write_fence;
+ if (op & MSM_PREP_WRITE)
+ fence = max(fence, msm_obj->read_fence);
+
+ return fence;
+}
+
#define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ad772fe36115..dd7a7ab603e2 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -37,6 +37,19 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
/* TODO msm_gem_vunmap() */
}
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ mutex_unlock(&obj->dev->struct_mutex);
+ if (ret < 0)
+ return ret;
+
+ return msm_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 12c24c8abf7f..6461e3565afe 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -41,17 +41,28 @@ nouveau-y += core/subdev/bios/extdev.o
nouveau-y += core/subdev/bios/fan.o
nouveau-y += core/subdev/bios/gpio.o
nouveau-y += core/subdev/bios/i2c.o
+nouveau-y += core/subdev/bios/image.o
nouveau-y += core/subdev/bios/init.o
nouveau-y += core/subdev/bios/mxm.o
+nouveau-y += core/subdev/bios/npde.o
+nouveau-y += core/subdev/bios/pcir.o
nouveau-y += core/subdev/bios/perf.o
nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/pmu.o
nouveau-y += core/subdev/bios/ramcfg.o
nouveau-y += core/subdev/bios/rammap.o
+nouveau-y += core/subdev/bios/shadow.o
+nouveau-y += core/subdev/bios/shadowacpi.o
+nouveau-y += core/subdev/bios/shadowof.o
+nouveau-y += core/subdev/bios/shadowpci.o
+nouveau-y += core/subdev/bios/shadowramin.o
+nouveau-y += core/subdev/bios/shadowrom.o
nouveau-y += core/subdev/bios/timing.o
nouveau-y += core/subdev/bios/therm.o
nouveau-y += core/subdev/bios/vmap.o
nouveau-y += core/subdev/bios/volt.o
nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bios/M0203.o
nouveau-y += core/subdev/bios/M0205.o
nouveau-y += core/subdev/bios/M0209.o
nouveau-y += core/subdev/bios/P0260.o
@@ -86,6 +97,7 @@ nouveau-y += core/subdev/devinit/nva3.o
nouveau-y += core/subdev/devinit/nvaf.o
nouveau-y += core/subdev/devinit/nvc0.o
nouveau-y += core/subdev/devinit/gm107.o
+nouveau-y += core/subdev/devinit/gm204.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
nouveau-y += core/subdev/fb/nv10.o
@@ -129,6 +141,7 @@ nouveau-y += core/subdev/fb/ramgk20a.o
nouveau-y += core/subdev/fb/ramgm107.o
nouveau-y += core/subdev/fb/sddr2.o
nouveau-y += core/subdev/fb/sddr3.o
+nouveau-y += core/subdev/fb/gddr3.o
nouveau-y += core/subdev/fb/gddr5.o
nouveau-y += core/subdev/fuse/base.o
nouveau-y += core/subdev/fuse/g80.o
@@ -147,6 +160,7 @@ nouveau-y += core/subdev/i2c/bit.o
nouveau-y += core/subdev/i2c/pad.o
nouveau-y += core/subdev/i2c/padnv04.o
nouveau-y += core/subdev/i2c/padnv94.o
+nouveau-y += core/subdev/i2c/padgm204.o
nouveau-y += core/subdev/i2c/nv04.o
nouveau-y += core/subdev/i2c/nv4e.o
nouveau-y += core/subdev/i2c/nv50.o
@@ -154,6 +168,7 @@ nouveau-y += core/subdev/i2c/nv94.o
nouveau-y += core/subdev/i2c/nvd0.o
nouveau-y += core/subdev/i2c/gf117.o
nouveau-y += core/subdev/i2c/nve0.o
+nouveau-y += core/subdev/i2c/gm204.o
nouveau-y += core/subdev/ibus/nvc0.o
nouveau-y += core/subdev/ibus/nve0.o
nouveau-y += core/subdev/ibus/gk20a.o
@@ -211,6 +226,7 @@ nouveau-y += core/subdev/vm/nvc0.o
nouveau-y += core/subdev/volt/base.o
nouveau-y += core/subdev/volt/gpio.o
nouveau-y += core/subdev/volt/nv40.o
+nouveau-y += core/subdev/volt/gk20a.o
nouveau-y += core/engine/falcon.o
nouveau-y += core/engine/xtensa.o
@@ -254,6 +270,7 @@ nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/nve0.o
nouveau-y += core/engine/disp/nvf0.o
nouveau-y += core/engine/disp/gm107.o
+nouveau-y += core/engine/disp/gm204.o
nouveau-y += core/engine/disp/dacnv50.o
nouveau-y += core/engine/disp/dport.o
nouveau-y += core/engine/disp/hdanva3.o
@@ -266,6 +283,7 @@ nouveau-y += core/engine/disp/piornv50.o
nouveau-y += core/engine/disp/sornv50.o
nouveau-y += core/engine/disp/sornv94.o
nouveau-y += core/engine/disp/sornvd0.o
+nouveau-y += core/engine/disp/sorgm204.o
nouveau-y += core/engine/disp/vga.o
nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index a490b805d7e3..13f816cb08bd 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -222,116 +222,3 @@ nouveau_handle_put(struct nouveau_handle *handle)
if (handle)
nouveau_namedb_put(handle);
}
-
-int
-nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle,
- u16 _oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_object *parent = NULL;
- struct nouveau_object *engctx = NULL;
- struct nouveau_object *object = NULL;
- struct nouveau_object *engine;
- struct nouveau_oclass *oclass;
- struct nouveau_handle *handle;
- int ret;
-
- /* lookup parent object and ensure it *is* a parent */
- parent = nouveau_handle_ref(client, _parent);
- if (!parent) {
- nv_error(client, "parent 0x%08x not found\n", _parent);
- return -ENOENT;
- }
-
- if (!nv_iclass(parent, NV_PARENT_CLASS)) {
- nv_error(parent, "cannot have children\n");
- ret = -EINVAL;
- goto fail_class;
- }
-
- /* check that parent supports the requested subclass */
- ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
- if (ret) {
- nv_debug(parent, "illegal class 0x%04x\n", _oclass);
- goto fail_class;
- }
-
- /* make sure engine init has been completed *before* any objects
- * it controls are created - the constructors may depend on
- * state calculated at init (ie. default context construction)
- */
- if (engine) {
- ret = nouveau_object_inc(engine);
- if (ret)
- goto fail_class;
- }
-
- /* if engine requires it, create a context object to insert
- * between the parent and its children (eg. PGRAPH context)
- */
- if (engine && nv_engine(engine)->cclass) {
- ret = nouveau_object_ctor(parent, engine,
- nv_engine(engine)->cclass,
- data, size, &engctx);
- if (ret)
- goto fail_engctx;
- } else {
- nouveau_object_ref(parent, &engctx);
- }
-
- /* finally, create new object and bind it to its handle */
- ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
- *pobject = object;
- if (ret)
- goto fail_ctor;
-
- ret = nouveau_object_inc(object);
- if (ret)
- goto fail_init;
-
- ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
- if (ret)
- goto fail_handle;
-
- ret = nouveau_handle_init(handle);
- if (ret)
- nouveau_handle_destroy(handle);
-
-fail_handle:
- nouveau_object_dec(object, false);
-fail_init:
- nouveau_object_ref(NULL, &object);
-fail_ctor:
- nouveau_object_ref(NULL, &engctx);
-fail_engctx:
- if (engine)
- nouveau_object_dec(engine, false);
-fail_class:
- nouveau_object_ref(NULL, &parent);
- return ret;
-}
-
-int
-nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle)
-{
- struct nouveau_object *parent = NULL;
- struct nouveau_object *namedb = NULL;
- struct nouveau_handle *handle = NULL;
-
- parent = nouveau_handle_ref(client, _parent);
- if (!parent)
- return -ENOENT;
-
- namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
- if (namedb) {
- handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
- if (handle) {
- nouveau_namedb_put(handle);
- nouveau_handle_fini(handle, false);
- nouveau_handle_destroy(handle);
- }
- }
-
- nouveau_object_ref(NULL, &parent);
- return handle ? 0 : -EINVAL;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 0ef5a5713182..137e0b0faeae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,6 +29,7 @@
#include <nvif/unpack.h>
#include <nvif/class.h>
+#include <subdev/bios.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
@@ -138,7 +139,7 @@ nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
}
args->v0.chipset = device->chipset;
- args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00;
+ args->v0.revision = device->chiprev;
if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
else args->v0.ram_size = args->v0.ram_user = 0;
if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
@@ -222,6 +223,7 @@ static const u64 disable_map[] = {
[NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE,
[NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
@@ -235,6 +237,7 @@ static const u64 disable_map[] = {
[NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP,
[NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0,
[NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1,
+ [NVDEV_ENGINE_COPY2] = NV_DEVICE_V0_DISABLE_COPY1,
[NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
[NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC,
[NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
@@ -352,12 +355,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
/* determine chipset and derive architecture from it */
if ((boot0 & 0x1f000000) > 0) {
device->chipset = (boot0 & 0x1ff00000) >> 20;
+ device->chiprev = (boot0 & 0x000000ff);
switch (device->chipset & 0x1f0) {
case 0x010: {
if (0x461 & (1 << (device->chipset & 0xf)))
device->card_type = NV_10;
else
device->card_type = NV_11;
+ device->chiprev = 0x00;
break;
}
case 0x020: device->card_type = NV_20; break;
@@ -373,7 +378,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case 0x0e0:
case 0x0f0:
case 0x100: device->card_type = NV_E0; break;
- case 0x110: device->card_type = GM100; break;
+ case 0x110:
+ case 0x120: device->card_type = GM100; break;
default:
break;
}
@@ -427,6 +433,10 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
}
nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
+ } else
+ if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
+ device->cname = "NULL";
+ device->oclass[NVDEV_SUBDEV_VBIOS] = &nouveau_bios_oclass;
}
if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index 6295668e29a5..4e74a3376de8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -98,6 +98,49 @@ gm100_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
#endif
break;
+ case 0x124:
+ device->cname = "GM204";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
+#if 0
+ /* looks to be some non-trivial changes */
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ /* priv ring says no to 0x10eb14 writes */
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
+#endif
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
+#if 0
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &gm204_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &gm204_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &gm204_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+#endif
+ break;
default:
nv_fatal(device, "unknown Maxwell chipset\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index b1b2e484ecfa..674da1f095b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -179,6 +179,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &gk20a_volt_oclass;
break;
case 0xf0:
device->cname = "GK110";
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 39890221b91c..16db08dfba6e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -28,7 +28,7 @@
#include <subdev/bios/init.h>
#include <subdev/i2c.h>
-#include <engine/disp.h>
+#include "nv50.h"
#include <nvif/class.h>
@@ -326,7 +326,7 @@ void
nouveau_dp_train(struct work_struct *w)
{
struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
- struct nouveau_disp *disp = nouveau_disp(outp);
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
const struct dp_rates *cfg = nouveau_dp_rates;
struct dp_state _dp = {
.outp = outp,
@@ -334,8 +334,11 @@ nouveau_dp_train(struct work_struct *w)
u32 datarate = 0;
int ret;
+ if (!outp->base.info.location && priv->sor.magic)
+ priv->sor.magic(&outp->base);
+
/* bring capabilities within encoder limits */
- if (nv_mclass(disp) < GF110_DISP)
+ if (nv_mclass(priv) < GF110_DISP)
outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index b3df3fe2dc09..e2ad0543fb31 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -35,8 +35,8 @@
static struct nouveau_oclass
gm107_disp_sclass[] = {
- { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
{ GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
{ GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
{ GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ gm107_disp_sclass[] = {
};
static struct nouveau_oclass
-gm107_disp_base_oclass[] = {
- { GM107_DISP, &nvd0_disp_base_ofuncs },
+gm107_disp_main_oclass[] = {
+ { GM107_DISP, &nvd0_disp_main_ofuncs },
{}
};
@@ -72,7 +72,7 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = gm107_disp_base_oclass;
+ nv_engine(priv)->sclass = gm107_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -99,9 +99,9 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_mast_mthd_chan,
- .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.core = &nve0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_base_scanoutpos,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c
new file mode 100644
index 000000000000..672ded79b2a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <nvif/class.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static struct nouveau_oclass
+gm204_disp_sclass[] = {
+ { GM204_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
+ {}
+};
+
+static struct nouveau_oclass
+gm204_disp_main_oclass[] = {
+ { GM204_DISP, &nvd0_disp_main_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static int
+gm204_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int heads = nv_rd32(parent, 0x022448);
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, heads,
+ "PDISP", "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = gm204_disp_main_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+ priv->sclass = gm204_disp_sclass;
+ priv->head.nr = heads;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.magic = gm204_sor_magic;
+ return 0;
+}
+
+struct nouveau_oclass *
+gm204_disp_outp_sclass[] = {
+ &gm204_sor_dp_impl.base.base,
+ NULL
+};
+
+struct nouveau_oclass *
+gm204_disp_oclass = &(struct nv50_disp_impl) {
+ .base.base.handle = NV_ENGINE(DISP, 0x07),
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm204_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+ .base.vblank = &nvd0_disp_vblank_func,
+ .base.outp = gm204_disp_outp_sclass,
+ .mthd.core = &nve0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
+ .mthd.ovly = &nve0_disp_ovly_mthd_chan,
+ .mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 2df3a937037d..44a8290aaea5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -88,12 +88,14 @@ nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
+ nv_wr32(priv, 0x610020, 0x00000001 << index);
}
static void
nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
+ nv_wr32(priv, 0x610020, 0x00000001 << index);
nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
}
@@ -374,7 +376,7 @@ nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
}
const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_base = {
+nv50_disp_core_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -387,7 +389,7 @@ nv50_disp_mast_mthd_base = {
};
static const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_dac = {
+nv50_disp_core_mthd_dac = {
.mthd = 0x0080,
.addr = 0x000008,
.data = {
@@ -399,7 +401,7 @@ nv50_disp_mast_mthd_dac = {
};
const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_sor = {
+nv50_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
@@ -409,7 +411,7 @@ nv50_disp_mast_mthd_sor = {
};
const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_pior = {
+nv50_disp_core_mthd_pior = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
@@ -419,7 +421,7 @@ nv50_disp_mast_mthd_pior = {
};
static const struct nv50_disp_mthd_list
-nv50_disp_mast_mthd_head = {
+nv50_disp_core_mthd_head = {
.mthd = 0x0400,
.addr = 0x000540,
.data = {
@@ -466,21 +468,21 @@ nv50_disp_mast_mthd_head = {
};
static const struct nv50_disp_mthd_chan
-nv50_disp_mast_mthd_chan = {
+nv50_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nv50_disp_mast_mthd_base },
- { "DAC", 3, &nv50_disp_mast_mthd_dac },
- { "SOR", 2, &nv50_disp_mast_mthd_sor },
- { "PIOR", 3, &nv50_disp_mast_mthd_pior },
- { "HEAD", 2, &nv50_disp_mast_mthd_head },
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &nv50_disp_core_mthd_dac },
+ { "SOR", 2, &nv50_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &nv50_disp_core_mthd_head },
{}
}
};
int
-nv50_disp_mast_ctor(struct nouveau_object *parent,
+nv50_disp_core_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -509,7 +511,7 @@ nv50_disp_mast_ctor(struct nouveau_object *parent,
}
static int
-nv50_disp_mast_init(struct nouveau_object *object)
+nv50_disp_core_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -546,7 +548,7 @@ nv50_disp_mast_init(struct nouveau_object *object)
}
static int
-nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_core_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -567,11 +569,11 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
}
struct nv50_disp_chan_impl
-nv50_disp_mast_ofuncs = {
- .base.ctor = nv50_disp_mast_ctor,
+nv50_disp_core_ofuncs = {
+ .base.ctor = nv50_disp_core_ctor,
.base.dtor = nv50_disp_dmac_dtor,
- .base.init = nv50_disp_mast_init,
- .base.fini = nv50_disp_mast_fini,
+ .base.init = nv50_disp_core_init,
+ .base.fini = nv50_disp_core_fini,
.base.map = nv50_disp_chan_map,
.base.ntfy = nv50_disp_chan_ntfy,
.base.rd32 = nv50_disp_chan_rd32,
@@ -586,7 +588,7 @@ nv50_disp_mast_ofuncs = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nv50_disp_sync_mthd_base = {
+nv50_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -611,7 +613,7 @@ nv50_disp_sync_mthd_base = {
};
const struct nv50_disp_mthd_list
-nv50_disp_sync_mthd_image = {
+nv50_disp_base_mthd_image = {
.mthd = 0x0400,
.addr = 0x000000,
.data = {
@@ -625,18 +627,18 @@ nv50_disp_sync_mthd_image = {
};
static const struct nv50_disp_mthd_chan
-nv50_disp_sync_mthd_chan = {
+nv50_disp_base_mthd_chan = {
.name = "Base",
.addr = 0x000540,
.data = {
- { "Global", 1, &nv50_disp_sync_mthd_base },
- { "Image", 2, &nv50_disp_sync_mthd_image },
+ { "Global", 1, &nv50_disp_base_mthd_base },
+ { "Image", 2, &nv50_disp_base_mthd_image },
{}
}
};
int
-nv50_disp_sync_ctor(struct nouveau_object *parent,
+nv50_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -669,8 +671,8 @@ nv50_disp_sync_ctor(struct nouveau_object *parent,
}
struct nv50_disp_chan_impl
-nv50_disp_sync_ofuncs = {
- .base.ctor = nv50_disp_sync_ctor,
+nv50_disp_base_ofuncs = {
+ .base.ctor = nv50_disp_base_ctor,
.base.dtor = nv50_disp_dmac_dtor,
.base.init = nv50_disp_dmac_init,
.base.fini = nv50_disp_dmac_fini,
@@ -942,7 +944,7 @@ nv50_disp_curs_ofuncs = {
******************************************************************************/
int
-nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
+nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
{
const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
@@ -974,7 +976,7 @@ nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
}
int
-nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
+nv50_disp_main_mthd(struct nouveau_object *object, u32 mthd,
void *data, u32 size)
{
const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
@@ -1098,7 +1100,7 @@ nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
}
int
-nv50_disp_base_ctor(struct nouveau_object *parent,
+nv50_disp_main_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -1118,7 +1120,7 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
}
void
-nv50_disp_base_dtor(struct nouveau_object *object)
+nv50_disp_main_dtor(struct nouveau_object *object)
{
struct nv50_disp_base *base = (void *)object;
nouveau_ramht_ref(NULL, &base->ramht);
@@ -1126,7 +1128,7 @@ nv50_disp_base_dtor(struct nouveau_object *object)
}
static int
-nv50_disp_base_init(struct nouveau_object *object)
+nv50_disp_main_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -1194,7 +1196,7 @@ nv50_disp_base_init(struct nouveau_object *object)
}
static int
-nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+nv50_disp_main_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -1207,25 +1209,25 @@ nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
}
struct nouveau_ofuncs
-nv50_disp_base_ofuncs = {
- .ctor = nv50_disp_base_ctor,
- .dtor = nv50_disp_base_dtor,
- .init = nv50_disp_base_init,
- .fini = nv50_disp_base_fini,
- .mthd = nv50_disp_base_mthd,
+nv50_disp_main_ofuncs = {
+ .ctor = nv50_disp_main_ctor,
+ .dtor = nv50_disp_main_dtor,
+ .init = nv50_disp_main_init,
+ .fini = nv50_disp_main_fini,
+ .mthd = nv50_disp_main_mthd,
.ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
-nv50_disp_base_oclass[] = {
- { NV50_DISP, &nv50_disp_base_ofuncs },
+nv50_disp_main_oclass[] = {
+ { NV50_DISP, &nv50_disp_main_ofuncs },
{}
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
- { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -1974,7 +1976,7 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv50_disp_base_oclass;
+ nv_engine(priv)->sclass = nv50_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -2007,9 +2009,9 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv50_disp_mast_mthd_chan,
- .mthd.base = &nv50_disp_sync_mthd_chan,
+ .mthd.core = &nv50_disp_core_mthd_chan,
+ .mthd.base = &nv50_disp_base_mthd_chan,
.mthd.ovly = &nv50_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 5279feefec06..7f08078ee925 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -42,6 +42,7 @@ struct nv50_disp_priv {
int (*hda_eld)(NV50_DISP_MTHD_V1);
int (*hdmi)(NV50_DISP_MTHD_V1);
u32 lvdsconf;
+ void (*magic)(struct nvkm_output *);
} sor;
struct {
int nr;
@@ -63,10 +64,10 @@ struct nv50_disp_impl {
} head;
};
-int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
-int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+int nv50_disp_main_mthd(struct nouveau_object *, u32, void *, u32);
-int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
+int nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
int nv50_dac_power(NV50_DISP_MTHD_V1);
int nv50_dac_sense(NV50_DISP_MTHD_V1);
@@ -169,18 +170,18 @@ struct nv50_disp_mthd_chan {
} data[];
};
-extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs;
-int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *,
+extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
+int nv50_disp_core_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
-extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
-extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs;
-int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *,
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
+extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
+int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
+extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
@@ -194,12 +195,12 @@ extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
-extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
-int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
+extern struct nouveau_ofuncs nv50_disp_main_ofuncs;
+int nv50_disp_main_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
-void nv50_disp_base_dtor(struct nouveau_object *);
-extern struct nouveau_omthds nv50_disp_base_omthds[];
+void nv50_disp_main_dtor(struct nouveau_object *);
+extern struct nouveau_omthds nv50_disp_main_omthds[];
extern struct nouveau_oclass nv50_disp_cclass;
void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
const struct nv50_disp_mthd_chan *);
@@ -207,31 +208,31 @@ void nv50_disp_intr_supervisor(struct work_struct *);
void nv50_disp_intr(struct nouveau_subdev *);
extern const struct nvkm_event_func nv50_disp_vblank_func;
-extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
-extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
-extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
-extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
+extern const struct nv50_disp_mthd_chan nv84_disp_core_mthd_chan;
+extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_head;
+extern const struct nv50_disp_mthd_chan nv84_disp_base_mthd_chan;
extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
-extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_chan nv94_disp_core_mthd_chan;
-extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
-extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
-extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_core_ofuncs;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_pior;
+extern struct nv50_disp_chan_impl nvd0_disp_base_ofuncs;
extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs;
-extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
+extern const struct nv50_disp_mthd_chan nvd0_disp_base_mthd_chan;
extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs;
extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs;
-extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_main_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
void nvd0_disp_intr_supervisor(struct work_struct *);
void nvd0_disp_intr(struct nouveau_subdev *);
extern const struct nvkm_event_func nvd0_disp_vblank_func;
-extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
+extern const struct nv50_disp_mthd_chan nve0_disp_core_mthd_chan;
extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
@@ -242,6 +243,10 @@ int nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
extern struct nouveau_oclass *nv94_disp_outp_sclass[];
extern struct nvkm_output_dp_impl nvd0_sor_dp_impl;
+int nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
extern struct nouveau_oclass *nvd0_disp_outp_sclass[];
+void gm204_sor_magic(struct nvkm_output *outp);
+extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index d36284715b2a..13eff5e4ee51 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -34,7 +34,7 @@
******************************************************************************/
const struct nv50_disp_mthd_list
-nv84_disp_mast_mthd_dac = {
+nv84_disp_core_mthd_dac = {
.mthd = 0x0080,
.addr = 0x000008,
.data = {
@@ -46,7 +46,7 @@ nv84_disp_mast_mthd_dac = {
};
const struct nv50_disp_mthd_list
-nv84_disp_mast_mthd_head = {
+nv84_disp_core_mthd_head = {
.mthd = 0x0400,
.addr = 0x000540,
.data = {
@@ -98,15 +98,15 @@ nv84_disp_mast_mthd_head = {
};
const struct nv50_disp_mthd_chan
-nv84_disp_mast_mthd_chan = {
+nv84_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nv50_disp_mast_mthd_base },
- { "DAC", 3, &nv84_disp_mast_mthd_dac },
- { "SOR", 2, &nv50_disp_mast_mthd_sor },
- { "PIOR", 3, &nv50_disp_mast_mthd_pior },
- { "HEAD", 2, &nv84_disp_mast_mthd_head },
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &nv84_disp_core_mthd_dac },
+ { "SOR", 2, &nv50_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &nv84_disp_core_mthd_head },
{}
}
};
@@ -116,7 +116,7 @@ nv84_disp_mast_mthd_chan = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nv84_disp_sync_mthd_base = {
+nv84_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -146,12 +146,12 @@ nv84_disp_sync_mthd_base = {
};
const struct nv50_disp_mthd_chan
-nv84_disp_sync_mthd_chan = {
+nv84_disp_base_mthd_chan = {
.name = "Base",
.addr = 0x000540,
.data = {
- { "Global", 1, &nv84_disp_sync_mthd_base },
- { "Image", 2, &nv50_disp_sync_mthd_image },
+ { "Global", 1, &nv84_disp_base_mthd_base },
+ { "Image", 2, &nv50_disp_base_mthd_image },
{}
}
};
@@ -204,8 +204,8 @@ nv84_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nv84_disp_sclass[] = {
- { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -213,8 +213,8 @@ nv84_disp_sclass[] = {
};
static struct nouveau_oclass
-nv84_disp_base_oclass[] = {
- { G82_DISP, &nv50_disp_base_ofuncs },
+nv84_disp_main_oclass[] = {
+ { G82_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -240,7 +240,7 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv84_disp_base_oclass;
+ nv_engine(priv)->sclass = nv84_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -268,9 +268,9 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv84_disp_mast_mthd_chan,
- .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.core = &nv84_disp_core_mthd_chan,
+ .mthd.base = &nv84_disp_base_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index a117064002b1..2bb7ac5cd0e6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -34,7 +34,7 @@
******************************************************************************/
const struct nv50_disp_mthd_list
-nv94_disp_mast_mthd_sor = {
+nv94_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
@@ -44,15 +44,15 @@ nv94_disp_mast_mthd_sor = {
};
const struct nv50_disp_mthd_chan
-nv94_disp_mast_mthd_chan = {
+nv94_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nv50_disp_mast_mthd_base },
- { "DAC", 3, &nv84_disp_mast_mthd_dac },
- { "SOR", 4, &nv94_disp_mast_mthd_sor },
- { "PIOR", 3, &nv50_disp_mast_mthd_pior },
- { "HEAD", 2, &nv84_disp_mast_mthd_head },
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &nv84_disp_core_mthd_dac },
+ { "SOR", 4, &nv94_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &nv84_disp_core_mthd_head },
{}
}
};
@@ -63,8 +63,8 @@ nv94_disp_mast_mthd_chan = {
static struct nouveau_oclass
nv94_disp_sclass[] = {
- { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -72,8 +72,8 @@ nv94_disp_sclass[] = {
};
static struct nouveau_oclass
-nv94_disp_base_oclass[] = {
- { GT206_DISP, &nv50_disp_base_ofuncs },
+nv94_disp_main_oclass[] = {
+ { GT206_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -99,7 +99,7 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv94_disp_base_oclass;
+ nv_engine(priv)->sclass = nv94_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -134,9 +134,9 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv94_disp_outp_sclass,
- .mthd.core = &nv94_disp_mast_mthd_chan,
- .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.core = &nv94_disp_core_mthd_chan,
+ .mthd.base = &nv84_disp_base_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index c67e68aadd45..b32456c9494f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -80,8 +80,8 @@ nva0_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nva0_disp_sclass[] = {
- { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -89,8 +89,8 @@ nva0_disp_sclass[] = {
};
static struct nouveau_oclass
-nva0_disp_base_oclass[] = {
- { GT200_DISP, &nv50_disp_base_ofuncs },
+nva0_disp_main_oclass[] = {
+ { GT200_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -116,7 +116,7 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nva0_disp_base_oclass;
+ nv_engine(priv)->sclass = nva0_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -144,9 +144,9 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
- .mthd.core = &nv84_disp_mast_mthd_chan,
- .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.core = &nv84_disp_core_mthd_chan,
+ .mthd.base = &nv84_disp_base_mthd_chan,
.mthd.ovly = &nva0_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 22969f355aae..951d79f9b781 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -35,8 +35,8 @@
static struct nouveau_oclass
nva3_disp_sclass[] = {
- { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
- { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
+ { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
{ GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
{ GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
{ GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ nva3_disp_sclass[] = {
};
static struct nouveau_oclass
-nva3_disp_base_oclass[] = {
- { GT214_DISP, &nv50_disp_base_ofuncs },
+nva3_disp_main_oclass[] = {
+ { GT214_DISP, &nv50_disp_main_ofuncs },
{}
};
@@ -71,7 +71,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nva3_disp_base_oclass;
+ nv_engine(priv)->sclass = nva3_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -100,9 +100,9 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nv50_disp_vblank_func,
.base.outp = nv94_disp_outp_sclass,
- .mthd.core = &nv94_disp_mast_mthd_chan,
- .mthd.base = &nv84_disp_sync_mthd_chan,
+ .mthd.core = &nv94_disp_core_mthd_chan,
+ .mthd.base = &nv84_disp_base_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
- .head.scanoutpos = nv50_disp_base_scanoutpos,
+ .head.scanoutpos = nv50_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 747e64bb9c06..181a2d57e356 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -51,12 +51,14 @@ nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
+ nv_wr32(priv, 0x61008c, 0x00000001 << index);
}
static void
nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
{
struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
+ nv_wr32(priv, 0x61008c, 0x00000001 << index);
nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
}
@@ -151,7 +153,7 @@ nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
******************************************************************************/
const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_base = {
+nvd0_disp_core_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -164,7 +166,7 @@ nvd0_disp_mast_mthd_base = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_dac = {
+nvd0_disp_core_mthd_dac = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -177,7 +179,7 @@ nvd0_disp_mast_mthd_dac = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_sor = {
+nvd0_disp_core_mthd_sor = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -190,7 +192,7 @@ nvd0_disp_mast_mthd_sor = {
};
const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_pior = {
+nvd0_disp_core_mthd_pior = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
@@ -203,7 +205,7 @@ nvd0_disp_mast_mthd_pior = {
};
static const struct nv50_disp_mthd_list
-nvd0_disp_mast_mthd_head = {
+nvd0_disp_core_mthd_head = {
.mthd = 0x0300,
.addr = 0x000300,
.data = {
@@ -277,21 +279,21 @@ nvd0_disp_mast_mthd_head = {
};
static const struct nv50_disp_mthd_chan
-nvd0_disp_mast_mthd_chan = {
+nvd0_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nvd0_disp_mast_mthd_base },
- { "DAC", 3, &nvd0_disp_mast_mthd_dac },
- { "SOR", 8, &nvd0_disp_mast_mthd_sor },
- { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
- { "HEAD", 4, &nvd0_disp_mast_mthd_head },
+ { "Global", 1, &nvd0_disp_core_mthd_base },
+ { "DAC", 3, &nvd0_disp_core_mthd_dac },
+ { "SOR", 8, &nvd0_disp_core_mthd_sor },
+ { "PIOR", 4, &nvd0_disp_core_mthd_pior },
+ { "HEAD", 4, &nvd0_disp_core_mthd_head },
{}
}
};
static int
-nvd0_disp_mast_init(struct nouveau_object *object)
+nvd0_disp_core_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -322,7 +324,7 @@ nvd0_disp_mast_init(struct nouveau_object *object)
}
static int
-nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+nvd0_disp_core_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_dmac *mast = (void *)object;
@@ -344,11 +346,11 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
}
struct nv50_disp_chan_impl
-nvd0_disp_mast_ofuncs = {
- .base.ctor = nv50_disp_mast_ctor,
+nvd0_disp_core_ofuncs = {
+ .base.ctor = nv50_disp_core_ctor,
.base.dtor = nv50_disp_dmac_dtor,
- .base.init = nvd0_disp_mast_init,
- .base.fini = nvd0_disp_mast_fini,
+ .base.init = nvd0_disp_core_init,
+ .base.fini = nvd0_disp_core_fini,
.base.ntfy = nv50_disp_chan_ntfy,
.base.map = nv50_disp_chan_map,
.base.rd32 = nv50_disp_chan_rd32,
@@ -363,7 +365,7 @@ nvd0_disp_mast_ofuncs = {
******************************************************************************/
static const struct nv50_disp_mthd_list
-nvd0_disp_sync_mthd_base = {
+nvd0_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
@@ -413,7 +415,7 @@ nvd0_disp_sync_mthd_base = {
};
static const struct nv50_disp_mthd_list
-nvd0_disp_sync_mthd_image = {
+nvd0_disp_base_mthd_image = {
.mthd = 0x0400,
.addr = 0x000400,
.data = {
@@ -427,19 +429,19 @@ nvd0_disp_sync_mthd_image = {
};
const struct nv50_disp_mthd_chan
-nvd0_disp_sync_mthd_chan = {
+nvd0_disp_base_mthd_chan = {
.name = "Base",
.addr = 0x001000,
.data = {
- { "Global", 1, &nvd0_disp_sync_mthd_base },
- { "Image", 2, &nvd0_disp_sync_mthd_image },
+ { "Global", 1, &nvd0_disp_base_mthd_base },
+ { "Image", 2, &nvd0_disp_base_mthd_image },
{}
}
};
struct nv50_disp_chan_impl
-nvd0_disp_sync_ofuncs = {
- .base.ctor = nv50_disp_sync_ctor,
+nvd0_disp_base_ofuncs = {
+ .base.ctor = nv50_disp_base_ctor,
.base.dtor = nv50_disp_dmac_dtor,
.base.init = nvd0_disp_dmac_init,
.base.fini = nvd0_disp_dmac_fini,
@@ -624,7 +626,7 @@ nvd0_disp_curs_ofuncs = {
******************************************************************************/
int
-nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
+nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
{
const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
@@ -656,7 +658,7 @@ nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
}
static int
-nvd0_disp_base_init(struct nouveau_object *object)
+nvd0_disp_main_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -725,7 +727,7 @@ nvd0_disp_base_init(struct nouveau_object *object)
}
static int
-nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+nvd0_disp_main_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_disp_priv *priv = (void *)object->engine;
struct nv50_disp_base *base = (void *)object;
@@ -737,25 +739,25 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
}
struct nouveau_ofuncs
-nvd0_disp_base_ofuncs = {
- .ctor = nv50_disp_base_ctor,
- .dtor = nv50_disp_base_dtor,
- .init = nvd0_disp_base_init,
- .fini = nvd0_disp_base_fini,
- .mthd = nv50_disp_base_mthd,
+nvd0_disp_main_ofuncs = {
+ .ctor = nv50_disp_main_ctor,
+ .dtor = nv50_disp_main_dtor,
+ .init = nvd0_disp_main_init,
+ .fini = nvd0_disp_main_fini,
+ .mthd = nv50_disp_main_mthd,
.ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
-nvd0_disp_base_oclass[] = {
- { GF110_DISP, &nvd0_disp_base_ofuncs },
+nvd0_disp_main_oclass[] = {
+ { GF110_DISP, &nvd0_disp_main_ofuncs },
{}
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
- { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
- { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
{ GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
{ GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
{ GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -1055,6 +1057,9 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
if (nvkm_output_dp_train(outp, pclk, true))
ERR("link not trained before attach\n");
+ } else {
+ if (priv->sor.magic)
+ priv->sor.magic(outp);
}
exec_clkcmp(priv, head, 0, pclk, &conf);
@@ -1063,10 +1068,18 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
data = 0x00000000;
} else {
- if (outp->info.type == DCB_OUTPUT_DP)
- nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+ switch (outp->info.type) {
+ case DCB_OUTPUT_TMDS:
+ nv_mask(priv, addr, 0x007c0000, 0x00280000);
+ break;
+ case DCB_OUTPUT_DP:
+ nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
+ break;
+ default:
+ break;
+ }
}
nv_mask(priv, addr, 0x00000707, data);
@@ -1259,7 +1272,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+ nv_engine(priv)->sclass = nvd0_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -1292,9 +1305,9 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nvd0_disp_mast_mthd_chan,
- .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.core = &nvd0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
.mthd.ovly = &nvd0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_base_scanoutpos,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index db144b2cf06b..55debec7e68f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -34,7 +34,7 @@
******************************************************************************/
static const struct nv50_disp_mthd_list
-nve0_disp_mast_mthd_head = {
+nve0_disp_core_mthd_head = {
.mthd = 0x0300,
.addr = 0x000300,
.data = {
@@ -113,15 +113,15 @@ nve0_disp_mast_mthd_head = {
};
const struct nv50_disp_mthd_chan
-nve0_disp_mast_mthd_chan = {
+nve0_disp_core_mthd_chan = {
.name = "Core",
.addr = 0x000000,
.data = {
- { "Global", 1, &nvd0_disp_mast_mthd_base },
- { "DAC", 3, &nvd0_disp_mast_mthd_dac },
- { "SOR", 8, &nvd0_disp_mast_mthd_sor },
- { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
- { "HEAD", 4, &nve0_disp_mast_mthd_head },
+ { "Global", 1, &nvd0_disp_core_mthd_base },
+ { "DAC", 3, &nvd0_disp_core_mthd_dac },
+ { "SOR", 8, &nvd0_disp_core_mthd_sor },
+ { "PIOR", 4, &nvd0_disp_core_mthd_pior },
+ { "HEAD", 4, &nve0_disp_core_mthd_head },
{}
}
};
@@ -200,8 +200,8 @@ nve0_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nve0_disp_sclass[] = {
- { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
- { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
{ GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
{ GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
{ GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -209,8 +209,8 @@ nve0_disp_sclass[] = {
};
static struct nouveau_oclass
-nve0_disp_base_oclass[] = {
- { GK104_DISP, &nvd0_disp_base_ofuncs },
+nve0_disp_main_oclass[] = {
+ { GK104_DISP, &nvd0_disp_main_ofuncs },
{}
};
@@ -237,7 +237,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nve0_disp_base_oclass;
+ nv_engine(priv)->sclass = nve0_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -264,9 +264,9 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_mast_mthd_chan,
- .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.core = &nve0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_base_scanoutpos,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 402d7d67d806..3e7e2d28744c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -35,8 +35,8 @@
static struct nouveau_oclass
nvf0_disp_sclass[] = {
- { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
- { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
{ GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
{ GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
{ GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ nvf0_disp_sclass[] = {
};
static struct nouveau_oclass
-nvf0_disp_base_oclass[] = {
- { GK110_DISP, &nvd0_disp_base_ofuncs },
+nvf0_disp_main_oclass[] = {
+ { GK110_DISP, &nvd0_disp_main_ofuncs },
{}
};
@@ -72,7 +72,7 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nvf0_disp_base_oclass;
+ nv_engine(priv)->sclass = nvf0_disp_main_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -99,9 +99,9 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
},
.base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
- .mthd.core = &nve0_disp_mast_mthd_chan,
- .mthd.base = &nvd0_disp_sync_mthd_chan,
+ .mthd.core = &nve0_disp_core_mthd_chan,
+ .mthd.base = &nvd0_disp_base_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
- .head.scanoutpos = nvd0_disp_base_scanoutpos,
+ .head.scanoutpos = nvd0_disp_main_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
index a5ff00a9cedc..bbd9b6fdc90f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -85,7 +85,10 @@ nvkm_output_create_(struct nouveau_object *parent,
dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
dcbE->bus, dcbE->heads);
- outp->port = i2c->find(i2c, outp->info.i2c_index);
+ if (outp->info.type != DCB_OUTPUT_DP)
+ outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
+ else
+ outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
outp->edid = outp->port;
data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c
new file mode 100644
index 000000000000..0b4fad39e9a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+static inline u32
+gm204_sor_soff(struct nvkm_output_dp *outp)
+{
+ return (ffs(outp->base.info.or) - 1) * 0x800;
+}
+
+static inline u32
+gm204_sor_loff(struct nvkm_output_dp *outp)
+{
+ return gm204_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+}
+
+void
+gm204_sor_magic(struct nvkm_output *outp)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ const u32 soff = outp->or * 0x100;
+ const u32 data = outp->or + 1;
+ if (outp->info.sorconf.link & 1)
+ nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
+ if (outp->info.sorconf.link & 2)
+ nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
+}
+
+static inline u32
+gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ return lane * 0x08;
+}
+
+static int
+gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ const u32 soff = gm204_sor_soff(outp);
+ const u32 data = 0x01010101 * pattern;
+ if (outp->base.info.sorconf.link & 1)
+ nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data);
+ else
+ nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data);
+ return 0;
+}
+
+static int
+gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ const u32 soff = gm204_sor_soff(outp);
+ const u32 loff = gm204_sor_loff(outp);
+ u32 mask = 0, i;
+
+ for (i = 0; i < nr; i++)
+ mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
+ nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
+ nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+static int
+gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+{
+ struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 shift = gm204_sor_dp_lane_map(priv, ln);
+ const u32 loff = gm204_sor_loff(outp);
+ u32 addr, data[4];
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, outp->base.info.hasht,
+ outp->base.info.hashm,
+ &ver, &hdr, &cnt, &len, &info);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
+ &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+ data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+ data[2] = nv_rd32(priv, 0x61c130 + loff);
+ if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
+ data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
+ nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+ nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+ nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
+ data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
+ nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+ return 0;
+}
+
+struct nvkm_output_dp_impl
+gm204_sor_dp_impl = {
+ .base.base.handle = DCB_OUTPUT_DP,
+ .base.base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_output_dp_ctor,
+ .dtor = _nvkm_output_dp_dtor,
+ .init = _nvkm_output_dp_init,
+ .fini = _nvkm_output_dp_fini,
+ },
+ .pattern = gm204_sor_dp_pattern,
+ .lnk_pwr = gm204_sor_dp_lnk_pwr,
+ .lnk_ctl = nvd0_sor_dp_lnk_ctl,
+ .drv_ctl = gm204_sor_dp_drv_ctl,
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 7b7bbc3e459e..fdab2939070c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -60,7 +60,7 @@ nvd0_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
return 0;
}
-static int
+int
nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
{
struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index 3fc4f0b0eaca..19f5f6522962 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -51,6 +51,7 @@ nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
case GK104_DISP_CORE_CHANNEL_DMA:
case GK110_DISP_CORE_CHANNEL_DMA:
case GM107_DISP_CORE_CHANNEL_DMA:
+ case GM204_DISP_CORE_CHANNEL_DMA:
case GF110_DISP_BASE_CHANNEL_DMA:
case GK104_DISP_BASE_CHANNEL_DMA:
case GK110_DISP_BASE_CHANNEL_DMA:
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index f8734eb74eaa..6a8db7c80bd1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -792,7 +792,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
nouveau_engctx_put(engctx);
}
-static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr_0[] = {
{ 0x00000001, "MEMREQ" },
{ 0x00000002, "MEMACK_TIMEOUT" },
{ 0x00000004, "MEMACK_EXTRA" },
@@ -827,9 +827,10 @@ static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
};
static void
-nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_pbdma_0(struct nve0_fifo_priv *priv, int unit)
{
- u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+ u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
+ u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
@@ -840,11 +841,12 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
if (stat & 0x00800000) {
if (!nve0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
+ nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
}
if (show) {
nv_error(priv, "PBDMA%d:", unit);
- nouveau_bitfield_print(nve0_fifo_pbdma_intr, show);
+ nouveau_bitfield_print(nve0_fifo_pbdma_intr_0, show);
pr_cont("\n");
nv_error(priv,
"PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
@@ -853,10 +855,37 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
subc, mthd, data);
}
- nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
}
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr_1[] = {
+ { 0x00000001, "HCE_RE_ILLEGAL_OP" },
+ { 0x00000002, "HCE_RE_ALIGNB" },
+ { 0x00000004, "HCE_PRIV" },
+ { 0x00000008, "HCE_ILLEGAL_MTHD" },
+ { 0x00000010, "HCE_ILLEGAL_CLASS" },
+ {}
+};
+
+static void
+nve0_fifo_intr_pbdma_1(struct nve0_fifo_priv *priv, int unit)
+{
+ u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
+ u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
+ u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+
+ if (stat) {
+ nv_error(priv, "PBDMA%d:", unit);
+ nouveau_bitfield_print(nve0_fifo_pbdma_intr_1, stat);
+ pr_cont("\n");
+ nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
+ nv_rd32(priv, 0x040150 + (unit * 0x2000)),
+ nv_rd32(priv, 0x040154 + (unit * 0x2000)));
+ }
+
+ nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
+}
+
static void
nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
{
@@ -939,7 +968,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
u32 mask = nv_rd32(priv, 0x0025a0);
while (mask) {
u32 unit = __ffs(mask);
- nve0_fifo_intr_pbdma(priv, unit);
+ nve0_fifo_intr_pbdma_0(priv, unit);
+ nve0_fifo_intr_pbdma_1(priv, unit);
nv_wr32(priv, 0x0025a0, (1 << unit));
mask &= ~(1 << unit);
}
@@ -1022,6 +1052,12 @@ nve0_fifo_init(struct nouveau_object *object)
nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
}
+ /* PBDMA[n].HCE */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
+ }
+
nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
nv_wr32(priv, 0x002100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 30fd1dc64f93..17251e4b9e86 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1557,7 +1557,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
- return -EINVAL;
+ return -ENODEV;
priv->firmware = true;
}
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 1d9d893929bb..2ec2e50d3676 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -16,6 +16,7 @@ enum nv_subdev_type {
* to during POST.
*/
NVDEV_SUBDEV_DEVINIT,
+ NVDEV_SUBDEV_IBUS,
NVDEV_SUBDEV_GPIO,
NVDEV_SUBDEV_I2C,
NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
@@ -31,7 +32,6 @@ enum nv_subdev_type {
NVDEV_SUBDEV_TIMER,
NVDEV_SUBDEV_FB,
NVDEV_SUBDEV_LTC,
- NVDEV_SUBDEV_IBUS,
NVDEV_SUBDEV_INSTMEM,
NVDEV_SUBDEV_VM,
NVDEV_SUBDEV_BAR,
@@ -92,6 +92,7 @@ struct nouveau_device {
GM100 = 0x110,
} card_type;
u32 chipset;
+ u8 chiprev;
u32 crystal;
struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
@@ -158,6 +159,12 @@ nv_device_is_pci(struct nouveau_device *device)
return device->pdev != NULL;
}
+static inline bool
+nv_device_is_cpu_coherent(struct nouveau_device *device)
+{
+ return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
+}
+
static inline struct device *
nv_device_base(struct nouveau_device *device)
{
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
index ceb67d770875..d22a59138a9b 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/handle.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -23,11 +23,6 @@ void nouveau_handle_destroy(struct nouveau_handle *);
int nouveau_handle_init(struct nouveau_handle *);
int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
-int nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle,
- u16 oclass, void *data, u32 size,
- struct nouveau_object **);
-int nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle);
-
struct nouveau_object *
nouveau_handle_ref(struct nouveau_object *, u32 name);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index d7039482d6fd..2e2afa502c99 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -203,21 +203,4 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
return 0;
}
-#include <core/handle.h>
-
-static inline int
-nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle,
- u16 oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- return nouveau_handle_new(client, parent, handle, oclass,
- data, size, pobject);
-}
-
-static inline int
-nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle)
-{
- return nouveau_handle_del(client, parent, handle);
-}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 7a64f347b385..fc307f1317ff 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -31,5 +31,6 @@ extern struct nouveau_oclass *nvd0_disp_oclass;
extern struct nouveau_oclass *nve0_disp_oclass;
extern struct nouveau_oclass *nvf0_disp_oclass;
extern struct nouveau_oclass *gm107_disp_oclass;
+extern struct nouveau_oclass *gm204_disp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h
new file mode 100644
index 000000000000..1f84d3612dd8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h
@@ -0,0 +1,31 @@
+#ifndef __NVBIOS_M0203_H__
+#define __NVBIOS_M0203_H__
+
+struct nvbios_M0203T {
+#define M0203T_TYPE_RAMCFG 0x00
+ u8 type;
+ u16 pointer;
+};
+
+u32 nvbios_M0203Te(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_M0203Tp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_M0203T *);
+
+struct nvbios_M0203E {
+#define M0203E_TYPE_DDR2 0x0
+#define M0203E_TYPE_DDR3 0x1
+#define M0203E_TYPE_GDDR3 0x2
+#define M0203E_TYPE_GDDR5 0x3
+#define M0203E_TYPE_SKIP 0xf
+ u8 type;
+ u8 strap;
+ u8 group;
+};
+
+u32 nvbios_M0203Ee(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_M0203Ep(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_M0203E *);
+u32 nvbios_M0203Em(struct nouveau_bios *, u8 ramcfg, u8 *ver, u8 *hdr,
+ struct nvbios_M0203E *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
index 10b57a19a7de..c9bb112895af 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -4,11 +4,14 @@
struct nouveau_bios;
enum dcb_i2c_type {
- DCB_I2C_NV04_BIT = 0,
- DCB_I2C_NV4E_BIT = 4,
- DCB_I2C_NVIO_BIT = 5,
- DCB_I2C_NVIO_AUX = 6,
- DCB_I2C_UNUSED = 0xff
+ /* matches bios type field prior to ccb 4.1 */
+ DCB_I2C_NV04_BIT = 0x00,
+ DCB_I2C_NV4E_BIT = 0x04,
+ DCB_I2C_NVIO_BIT = 0x05,
+ DCB_I2C_NVIO_AUX = 0x06,
+ /* made up - mostly */
+ DCB_I2C_PMGR = 0x80,
+ DCB_I2C_UNUSED = 0xff
};
struct dcb_i2c_entry {
@@ -16,6 +19,7 @@ struct dcb_i2c_entry {
u8 drive;
u8 sense;
u8 share;
+ u8 auxch;
};
u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h
new file mode 100644
index 000000000000..3348b4580843
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h
@@ -0,0 +1,13 @@
+#ifndef __NVBIOS_IMAGE_H__
+#define __NVBIOS_IMAGE_H__
+
+struct nvbios_image {
+ u32 base;
+ u32 size;
+ u8 type;
+ bool last;
+};
+
+bool nvbios_image(struct nouveau_bios *, int, struct nvbios_image *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
new file mode 100644
index 000000000000..b18413d951e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
@@ -0,0 +1,12 @@
+#ifndef __NVBIOS_NPDE_H__
+#define __NVBIOS_NPDE_H__
+
+struct nvbios_npdeT {
+ u32 image_size;
+ bool last;
+};
+
+u32 nvbios_npdeTe(struct nouveau_bios *, u32);
+u32 nvbios_npdeTp(struct nouveau_bios *, u32, struct nvbios_npdeT *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h
new file mode 100644
index 000000000000..3d634a06dca1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h
@@ -0,0 +1,18 @@
+#ifndef __NVBIOS_PCIR_H__
+#define __NVBIOS_PCIR_H__
+
+struct nvbios_pcirT {
+ u16 vendor_id;
+ u16 device_id;
+ u8 class_code[3];
+ u32 image_size;
+ u16 image_rev;
+ u8 image_type;
+ bool last;
+};
+
+u32 nvbios_pcirTe(struct nouveau_bios *, u32, u8 *ver, u16 *hdr);
+u32 nvbios_pcirTp(struct nouveau_bios *, u32, u8 *ver, u16 *hdr,
+ struct nvbios_pcirT *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h
new file mode 100644
index 000000000000..9de593deaea8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h
@@ -0,0 +1,37 @@
+#ifndef __NVBIOS_PMU_H__
+#define __NVBIOS_PMU_H__
+
+struct nvbios_pmuT {
+};
+
+u32 nvbios_pmuTe(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_pmuTp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_pmuT *);
+
+struct nvbios_pmuE {
+ u8 type;
+ u32 data;
+};
+
+u32 nvbios_pmuEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u32 nvbios_pmuEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_pmuE *);
+
+struct nvbios_pmuR {
+ u32 boot_addr_pmu;
+ u32 boot_addr;
+ u32 boot_size;
+ u32 code_addr_pmu;
+ u32 code_addr;
+ u32 code_size;
+ u32 init_addr_pmu;
+
+ u32 data_addr_pmu;
+ u32 data_addr;
+ u32 data_size;
+ u32 args_addr_pmu;
+};
+
+bool nvbios_pmuRm(struct nouveau_bios *, u8 type, struct nvbios_pmuR *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
index a685bbd04568..4a0e0ceb41ba 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -43,8 +43,9 @@ struct nvbios_ramcfg {
unsigned ramcfg_10_02_08:1;
unsigned ramcfg_10_02_10:1;
unsigned ramcfg_10_02_20:1;
- unsigned ramcfg_10_02_40:1;
+ unsigned ramcfg_10_DLLoff:1;
unsigned ramcfg_10_03_0f:4;
+ unsigned ramcfg_10_04_01:1;
unsigned ramcfg_10_05:8;
unsigned ramcfg_10_06:8;
unsigned ramcfg_10_07:8;
@@ -95,9 +96,29 @@ struct nvbios_ramcfg {
union {
struct {
unsigned timing_10_WR:8;
+ unsigned timing_10_WTR:8;
unsigned timing_10_CL:8;
+ unsigned timing_10_RC:8;
+ /*empty: 4 */
+ unsigned timing_10_RFC:8; /* Byte 5 */
+ /*empty: 6 */
+ unsigned timing_10_RAS:8; /* Byte 7 */
+ /*empty: 8 */
+ unsigned timing_10_RP:8; /* Byte 9 */
+ unsigned timing_10_RCDRD:8;
+ unsigned timing_10_RCDWR:8;
+ unsigned timing_10_RRD:8;
+ unsigned timing_10_13:8;
unsigned timing_10_ODT:3;
+ /* empty: 15 */
+ unsigned timing_10_16:8;
+ /* empty: 17 */
+ unsigned timing_10_18:8;
unsigned timing_10_CWL:8;
+ unsigned timing_10_20:8;
+ unsigned timing_10_21:8;
+ /* empty: 22, 23 */
+ unsigned timing_10_24:8;
};
struct {
unsigned timing_20_2e_03:2;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
index e292271a84e4..e007a9d44683 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -30,5 +30,6 @@ extern struct nouveau_oclass *nva3_devinit_oclass;
extern struct nouveau_oclass *nvaf_devinit_oclass;
extern struct nouveau_oclass *nvc0_devinit_oclass;
extern struct nouveau_oclass *gm107_devinit_oclass;
+extern struct nouveau_oclass *gm204_devinit_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 1b937c2c25ae..d94ccacb40bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -8,6 +8,8 @@
#include <subdev/bios/i2c.h>
#define NV_I2C_PORT(n) (0x00 + (n))
+#define NV_I2C_AUX(n) (0x10 + (n))
+#define NV_I2C_EXT(n) (0x20 + (n))
#define NV_I2C_DEFAULT(n) (0x80 + (n))
#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
@@ -89,6 +91,7 @@ extern struct nouveau_oclass *nv94_i2c_oclass;
extern struct nouveau_oclass *nvd0_i2c_oclass;
extern struct nouveau_oclass *gf117_i2c_oclass;
extern struct nouveau_oclass *nve0_i2c_oclass;
+extern struct nouveau_oclass *gm204_i2c_oclass;
static inline int
nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
index bf3d1f611333..f2427bf5aeed 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -48,6 +48,8 @@ void nouveau_memx_wait(struct nouveau_memx *,
u32 addr, u32 mask, u32 data, u32 nsec);
void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
void nouveau_memx_wait_vblank(struct nouveau_memx *);
+void nouveau_memx_train(struct nouveau_memx *);
+int nouveau_memx_train_result(struct nouveau_pwr *, u32 *, int);
void nouveau_memx_block(struct nouveau_memx *);
void nouveau_memx_unblock(struct nouveau_memx *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
index 820b62ffd75b..67db5e58880d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
@@ -52,6 +52,7 @@ int _nouveau_volt_init(struct nouveau_object *);
#define _nouveau_volt_fini _nouveau_subdev_fini
extern struct nouveau_oclass nv40_volt_oclass;
+extern struct nouveau_oclass gk20a_volt_oclass;
int nouveau_voltgpio_init(struct nouveau_volt *);
int nouveau_voltgpio_get(struct nouveau_volt *);
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index ccfa21d72ddc..bdd05ee7ec72 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -23,6 +23,7 @@
#include <linux/pm_runtime.h>
#include <linux/power_supply.h>
#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c
new file mode 100644
index 000000000000..28906b16d4e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/M0203.h>
+
+u32
+nvbios_M0203Te(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct bit_entry bit_M;
+ u32 data = 0x00000000;
+
+ if (!bit_entry(bios, 'M', &bit_M)) {
+ if (bit_M.version == 2 && bit_M.length > 0x04)
+ data = nv_ro16(bios, bit_M.offset + 0x03);
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x10:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ return data;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0x00000000;
+}
+
+u32
+nvbios_M0203Tp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_M0203T *info)
+{
+ u32 data = nvbios_M0203Te(bios, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x10:
+ info->type = nv_ro08(bios, data + 0x04);
+ info->pointer = nv_ro16(bios, data + 0x05);
+ break;
+ default:
+ break;
+ }
+ return data;
+}
+
+u32
+nvbios_M0203Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+ u8 cnt, len;
+ u32 data = nvbios_M0203Te(bios, ver, hdr, &cnt, &len);
+ if (data && idx < cnt) {
+ data = data + *hdr + idx * len;
+ *hdr = len;
+ return data;
+ }
+ return 0x00000000;
+}
+
+u32
+nvbios_M0203Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_M0203E *info)
+{
+ u32 data = nvbios_M0203Ee(bios, idx, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ case 0x10:
+ info->type = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0;
+ info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4;
+ info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0;
+ return data;
+ default:
+ break;
+ }
+ return 0x00000000;
+}
+
+u32
+nvbios_M0203Em(struct nouveau_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
+ struct nvbios_M0203E *info)
+{
+ struct nvbios_M0203T M0203T;
+ u8 cnt, len, idx = 0xff;
+ u32 data;
+
+ if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
+ nv_warn(bios, "M0203T not found\n");
+ return 0x00000000;
+ }
+
+ while ((data = nvbios_M0203Ep(bios, ++idx, ver, hdr, info))) {
+ switch (M0203T.type) {
+ case M0203T_TYPE_RAMCFG:
+ if (info->strap != ramcfg)
+ continue;
+ return data;
+ default:
+ nv_warn(bios, "M0203T type %02x\n", M0203T.type);
+ return 0x00000000;
+ }
+ }
+
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index d45704a2c2df..7df3a273553d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -31,6 +31,8 @@
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include "priv.h"
+
u8
nvbios_checksum(const u8 *data, int size)
{
@@ -56,362 +58,21 @@ nvbios_findstr(const u8 *data, int size, const char *str, int len)
return 0;
}
-#if defined(__powerpc__)
-static void
-nouveau_bios_shadow_of(struct nouveau_bios *bios)
+int
+nvbios_extend(struct nouveau_bios *bios, u32 length)
{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- struct device_node *dn;
- const u32 *data;
- int size;
-
- dn = pci_device_to_OF_node(pdev);
- if (!dn) {
- nv_info(bios, "Unable to get the OF node\n");
- return;
- }
-
- data = of_get_property(dn, "NVDA,BMP", &size);
- if (data && size) {
- bios->size = size;
- bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (bios->data)
- memcpy(bios->data, data, size);
- }
-}
-#endif
-
-static void
-nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
-{
- struct nouveau_device *device = nv_device(bios);
- u64 addr = 0;
- u32 bar0 = 0;
- int i;
-
- if (device->card_type >= NV_50) {
- if (device->card_type >= NV_C0 && device->card_type < GM100) {
- if (nv_rd32(bios, 0x022500) & 0x00000001)
- return;
- } else
- if (device->card_type >= GM100) {
- if (nv_rd32(bios, 0x021c04) & 0x00000001)
- return;
- }
-
- addr = nv_rd32(bios, 0x619f04);
- if (!(addr & 0x00000008)) {
- nv_debug(bios, "... not enabled\n");
- return;
+ if (bios->size < length) {
+ u8 *prev = bios->data;
+ if (!(bios->data = kmalloc(length, GFP_KERNEL))) {
+ bios->data = prev;
+ return -ENOMEM;
}
- if ( (addr & 0x00000003) != 1) {
- nv_debug(bios, "... not in vram\n");
- return;
- }
-
- addr = (addr & 0xffffff00) << 8;
- if (!addr) {
- addr = (u64)nv_rd32(bios, 0x001700) << 16;
- addr += 0xf0000;
- }
-
- bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16);
- }
-
- /* bail if no rom signature */
- if (nv_rd08(bios, 0x700000) != 0x55 ||
- nv_rd08(bios, 0x700001) != 0xaa)
- goto out;
-
- bios->size = nv_rd08(bios, 0x700002) * 512;
- if (!bios->size)
- goto out;
-
- bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (bios->data) {
- for (i = 0; i < bios->size; i++)
- nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i));
- }
-
-out:
- if (device->card_type >= NV_50)
- nv_wr32(bios, 0x001700, bar0);
-}
-
-static void
-nouveau_bios_shadow_prom(struct nouveau_bios *bios)
-{
- struct nouveau_device *device = nv_device(bios);
- u32 pcireg, access;
- u16 pcir;
- int i;
-
- /* there is no prom on nv4x IGP's */
- if (device->card_type == NV_40 && device->chipset >= 0x4c)
- return;
-
- /* enable access to rom */
- if (device->card_type >= NV_50)
- pcireg = 0x088050;
- else
- pcireg = 0x001850;
- access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
-
- /* WARNING: PROM accesses should always be 32-bits aligned. Other
- * accesses work on most chipset but do not on Kepler chipsets
- */
-
- /* bail if no rom signature, with a workaround for a PROM reading
- * issue on some chipsets. the first read after a period of
- * inactivity returns the wrong result, so retry the first header
- * byte a few times before giving up as a workaround
- */
- i = 16;
- do {
- u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
- if (data == 0xaa55)
- break;
- } while (i--);
-
- if (!i)
- goto out;
-
- /* read entire bios image to system memory */
- bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
- bios->size = bios->size * 512;
- if (!bios->size)
- goto out;
-
- bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (!bios->data)
- goto out;
-
- for (i = 0; i < bios->size; i += 4)
- ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
-
- /* check the PCI record header */
- pcir = nv_ro16(bios, 0x0018);
- if (bios->data[pcir + 0] != 'P' ||
- bios->data[pcir + 1] != 'C' ||
- bios->data[pcir + 2] != 'I' ||
- bios->data[pcir + 3] != 'R') {
- bios->size = 0;
- kfree(bios->data);
- }
-
-out:
- /* disable access to rom */
- nv_wr32(bios, pcireg, access);
-}
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
-#else
-static inline bool
-nouveau_acpi_rom_supported(struct pci_dev *pdev) {
- return false;
-}
-
-static inline int
-nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) {
- return -EINVAL;
-}
-#endif
-
-static void
-nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
-{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- int ret, cnt, i;
-
- if (!nouveau_acpi_rom_supported(pdev)) {
- bios->data = NULL;
- return;
- }
-
- bios->size = 0;
- bios->data = kmalloc(4096, GFP_KERNEL);
- if (bios->data) {
- if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
- bios->size = bios->data[2] * 512;
- kfree(bios->data);
+ memcpy(bios->data, prev, bios->size);
+ bios->size = length;
+ kfree(prev);
+ return 1;
}
-
- if (!bios->size)
- return;
-
- bios->data = kmalloc(bios->size, GFP_KERNEL);
- if (bios->data) {
- /* disobey the acpi spec - much faster on at least w530 ... */
- ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
- if (ret != bios->size ||
- nvbios_checksum(bios->data, bios->size)) {
- /* ... that didn't work, ok, i'll be good now */
- for (i = 0; i < bios->size; i += cnt) {
- cnt = min((bios->size - i), (u32)4096);
- ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
- if (ret != cnt)
- break;
- }
- }
- }
-}
-
-static void
-nouveau_bios_shadow_pci(struct nouveau_bios *bios)
-{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- size_t size;
-
- if (!pci_enable_rom(pdev)) {
- void __iomem *rom = pci_map_rom(pdev, &size);
- if (rom && size) {
- bios->data = kmalloc(size, GFP_KERNEL);
- if (bios->data) {
- memcpy_fromio(bios->data, rom, size);
- bios->size = size;
- }
- }
- if (rom)
- pci_unmap_rom(pdev, rom);
-
- pci_disable_rom(pdev);
- }
-}
-
-static void
-nouveau_bios_shadow_platform(struct nouveau_bios *bios)
-{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- size_t size;
-
- void __iomem *rom = pci_platform_rom(pdev, &size);
- if (rom && size) {
- bios->data = kmalloc(size, GFP_KERNEL);
- if (bios->data) {
- memcpy_fromio(bios->data, rom, size);
- bios->size = size;
- }
- }
-}
-
-static int
-nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
-{
- if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
- bios->data[1] != 0xAA) {
- nv_info(bios, "... signature not found\n");
- return 0;
- }
-
- if (nvbios_checksum(bios->data,
- min_t(u32, bios->data[2] * 512, bios->size))) {
- nv_info(bios, "... checksum invalid\n");
- /* if a ro image is somewhat bad, it's probably all rubbish */
- return writeable ? 2 : 1;
- }
-
- nv_info(bios, "... appears to be valid\n");
- return 3;
-}
-
-struct methods {
- const char desc[16];
- void (*shadow)(struct nouveau_bios *);
- const bool rw;
- int score;
- u32 size;
- u8 *data;
-};
-
-static int
-nouveau_bios_shadow(struct nouveau_bios *bios)
-{
- struct methods shadow_methods[] = {
-#if defined(__powerpc__)
- { "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL },
-#endif
- { "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL },
- { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
- { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
- { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
- { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
- {}
- };
- struct methods *mthd, *best;
- const struct firmware *fw;
- const char *optarg;
- int optlen, ret;
- char *source;
-
- optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
- source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
- if (source) {
- /* try to match one of the built-in methods */
- mthd = shadow_methods;
- do {
- if (strcasecmp(source, mthd->desc))
- continue;
- nv_info(bios, "source: %s\n", mthd->desc);
-
- mthd->shadow(bios);
- mthd->score = nouveau_bios_score(bios, mthd->rw);
- if (mthd->score) {
- kfree(source);
- return 0;
- }
- } while ((++mthd)->shadow);
-
- /* attempt to load firmware image */
- ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev);
- if (ret == 0) {
- bios->size = fw->size;
- bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
- release_firmware(fw);
-
- nv_info(bios, "image: %s\n", source);
- if (nouveau_bios_score(bios, 1)) {
- kfree(source);
- return 0;
- }
-
- kfree(bios->data);
- bios->data = NULL;
- }
-
- nv_error(bios, "source \'%s\' invalid\n", source);
- kfree(source);
- }
-
- mthd = shadow_methods;
- do {
- nv_info(bios, "checking %s for image...\n", mthd->desc);
- mthd->shadow(bios);
- mthd->score = nouveau_bios_score(bios, mthd->rw);
- mthd->size = bios->size;
- mthd->data = bios->data;
- bios->data = NULL;
- } while (mthd->score != 3 && (++mthd)->shadow);
-
- mthd = shadow_methods;
- best = mthd;
- do {
- if (mthd->score > best->score) {
- kfree(best->data);
- best = mthd;
- }
- } while ((++mthd)->shadow);
-
- if (best->score) {
- nv_info(bios, "using image from %s\n", best->desc);
- bios->size = best->size;
- bios->data = best->data;
- return 0;
- }
-
- nv_error(bios, "unable to locate usable image\n");
- return -EINVAL;
+ return 0;
}
static u8
@@ -472,7 +133,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- ret = nouveau_bios_shadow(bios);
+ ret = nvbios_shadow(bios);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index bd8d348385b3..96099aff8b41 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -42,7 +42,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
*ver = nv_ro08(bios, dcb);
- if (*ver >= 0x41) {
+ if (*ver >= 0x42) {
nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
return 0x0000;
} else
@@ -157,17 +157,20 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
break;
}
- switch (conf & 0x0f000000) {
- case 0x0f000000:
- outp->dpconf.link_nr = 4;
- break;
- case 0x03000000:
- outp->dpconf.link_nr = 2;
- break;
- case 0x01000000:
- default:
- outp->dpconf.link_nr = 1;
- break;
+ outp->dpconf.link_nr = (conf & 0x0f000000) >> 24;
+ if (*ver < 0x41) {
+ switch (outp->dpconf.link_nr) {
+ case 0x0f:
+ outp->dpconf.link_nr = 4;
+ break;
+ case 0x03:
+ outp->dpconf.link_nr = 2;
+ break;
+ case 0x01:
+ default:
+ outp->dpconf.link_nr = 1;
+ break;
+ }
}
/* fall-through... */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
index 7f16e52d9bea..51f355599694 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -40,6 +40,7 @@ nvbios_disp_table(struct nouveau_bios *bios,
switch (*ver) {
case 0x20:
case 0x21:
+ case 0x22:
*hdr = nv_ro08(bios, data + 0x01);
*len = nv_ro08(bios, data + 0x02);
*cnt = nv_ro08(bios, data + 0x03);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index f309dd657250..cef53f81f12b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -41,6 +41,7 @@ nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
case 0x21:
case 0x30:
case 0x40:
+ case 0x41:
*hdr = nv_ro08(bios, data + 0x01);
*len = nv_ro08(bios, data + 0x02);
*cnt = nv_ro08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
*cnt = nv_ro08(bios, outp + 0x04);
break;
case 0x40:
+ case 0x41:
*hdr = nv_ro08(bios, data + 0x04);
*cnt = 0;
*len = 0;
@@ -108,6 +110,7 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
info->script[4] = nv_ro16(bios, data + 0x10);
break;
case 0x40:
+ case 0x41:
info->flags = nv_ro08(bios, data + 0x04);
info->script[0] = nv_ro16(bios, data + 0x05);
info->script[1] = nv_ro16(bios, data + 0x07);
@@ -172,10 +175,11 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
break;
case 0x30:
case 0x40:
+ case 0x41:
info->pc = nv_ro08(bios, data + 0x00);
info->dc = nv_ro08(bios, data + 0x01);
info->pe = nv_ro08(bios, data + 0x02);
- info->tx_pu = nv_ro08(bios, data + 0x03);
+ info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f;
break;
default:
data = 0x0000;
@@ -194,6 +198,10 @@ nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
u16 data;
if (*ver >= 0x30) {
+ /*XXX: there's a second set of these on at least 4.1, that
+ * i've witnessed nvidia using instead of the first
+ * on gm204. figure out what/why
+ */
const u8 vsoff[] = { 0, 4, 7, 9 };
idx = (pc * 10) + vsoff[vs] + pe;
} else {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
index b2a676e53580..49285d4f7ca5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -90,7 +90,7 @@ nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
u16 entry;
i = 0;
- while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
+ while ((entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
extdev_parse_entry(bios, entry, func);
if (func->type == type)
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
index cfb9288c6d28..282320ba9264 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -39,6 +39,11 @@ dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
i2c = nv_ro16(bios, dcb + 4);
}
+ if (i2c && *ver >= 0x42) {
+ nv_warn(bios, "ccb %02x not supported\n", *ver);
+ return 0x0000;
+ }
+
if (i2c && *ver >= 0x30) {
*ver = nv_ro08(bios, i2c + 0);
*hdr = nv_ro08(bios, i2c + 1);
@@ -70,14 +75,25 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
u8 ver, len;
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) {
- info->type = nv_ro08(bios, ent + 3);
- info->share = DCB_I2C_UNUSED;
- if (ver < 0x30) {
- info->type &= 0x07;
+ if (ver >= 0x41) {
+ if (!(nv_ro32(bios, ent) & 0x80000000))
+ info->type = DCB_I2C_UNUSED;
+ else
+ info->type = DCB_I2C_PMGR;
+ } else
+ if (ver >= 0x30) {
+ info->type = nv_ro08(bios, ent + 0x03);
+ } else {
+ info->type = nv_ro08(bios, ent + 0x03) & 0x07;
if (info->type == 0x07)
info->type = DCB_I2C_UNUSED;
}
+ info->drive = DCB_I2C_UNUSED;
+ info->sense = DCB_I2C_UNUSED;
+ info->share = DCB_I2C_UNUSED;
+ info->auxch = DCB_I2C_UNUSED;
+
switch (info->type) {
case DCB_I2C_NV04_BIT:
info->drive = nv_ro08(bios, ent + 0);
@@ -87,12 +103,23 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
info->drive = nv_ro08(bios, ent + 1);
return 0;
case DCB_I2C_NVIO_BIT:
- case DCB_I2C_NVIO_AUX:
info->drive = nv_ro08(bios, ent + 0) & 0x0f;
- if (nv_ro08(bios, ent + 1) & 0x01) {
- info->share = nv_ro08(bios, ent + 1) >> 1;
- info->share &= 0x0f;
- }
+ if (nv_ro08(bios, ent + 1) & 0x01)
+ info->share = nv_ro08(bios, ent + 1) >> 1;
+ return 0;
+ case DCB_I2C_NVIO_AUX:
+ info->auxch = nv_ro08(bios, ent + 0) & 0x0f;
+ if (nv_ro08(bios, ent + 1) & 0x01)
+ info->share = info->auxch;
+ return 0;
+ case DCB_I2C_PMGR:
+ info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0;
+ if (info->drive == 0x1f)
+ info->drive = DCB_I2C_UNUSED;
+ info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5;
+ if (info->auxch == 0x1f)
+ info->auxch = DCB_I2C_UNUSED;
+ info->share = info->auxch;
return 0;
case DCB_I2C_UNUSED:
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/image.c b/drivers/gpu/drm/nouveau/core/subdev/bios/image.c
new file mode 100644
index 000000000000..373f9a564ac9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/image.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/image.h>
+#include <subdev/bios/pcir.h>
+#include <subdev/bios/npde.h>
+
+static bool
+nvbios_imagen(struct nouveau_bios *bios, struct nvbios_image *image)
+{
+ struct nvbios_pcirT pcir;
+ struct nvbios_npdeT npde;
+ u8 ver;
+ u16 hdr;
+ u32 data;
+
+ switch ((data = nv_ro16(bios, image->base + 0x00))) {
+ case 0xaa55:
+ case 0xbb77:
+ case 0x4e56: /* NV */
+ break;
+ default:
+ nv_debug(bios, "%08x: ROM signature (%04x) unknown\n",
+ image->base, data);
+ return false;
+ }
+
+ if (!(data = nvbios_pcirTp(bios, image->base, &ver, &hdr, &pcir)))
+ return false;
+ image->size = pcir.image_size;
+ image->type = pcir.image_type;
+ image->last = pcir.last;
+
+ if (image->type != 0x70) {
+ if (!(data = nvbios_npdeTp(bios, image->base, &npde)))
+ return true;
+ image->size = npde.image_size;
+ image->last = npde.last;
+ } else {
+ image->last = true;
+ }
+
+ return true;
+}
+
+bool
+nvbios_image(struct nouveau_bios *bios, int idx, struct nvbios_image *image)
+{
+ memset(image, 0x00, sizeof(*image));
+ do {
+ image->base += image->size;
+ if (image->last || !nvbios_imagen(bios, image))
+ return false;
+ } while(idx--);
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 626380f9e4c0..c6579ef32cd1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -255,6 +255,8 @@ init_i2c(struct nvbios_init *init, int index)
}
index = init->outp->i2c_index;
+ if (init->outp->type == DCB_OUTPUT_DP)
+ index += NV_I2C_AUX(0);
}
return i2c->find(i2c, index);
@@ -278,7 +280,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
return -ENODEV;
}
-static int
+static u8
init_rdauxr(struct nvbios_init *init, u32 addr)
{
struct nouveau_i2c_port *port = init_i2c(init, -2);
@@ -286,20 +288,24 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
if (port && init_exec(init)) {
int ret = nv_rdaux(port, addr, &data, 1);
- if (ret)
- return ret;
- return data;
+ if (ret == 0)
+ return data;
+ trace("auxch read failed with %d\n", ret);
}
- return -ENODEV;
+ return 0x00;
}
static int
init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
{
struct nouveau_i2c_port *port = init_i2c(init, -2);
- if (port && init_exec(init))
- return nv_wraux(port, addr, &data, 1);
+ if (port && init_exec(init)) {
+ int ret = nv_wraux(port, addr, &data, 1);
+ if (ret)
+ trace("auxch write failed with %d\n", ret);
+ return ret;
+ }
return -ENODEV;
}
@@ -838,6 +844,40 @@ init_io_or(struct nvbios_init *init)
}
/**
+ * INIT_ANDN_REG - opcode 0x47
+ *
+ */
+static void
+init_andn_reg(struct nvbios_init *init)
+{
+ struct nouveau_bios *bios = init->bios;
+ u32 reg = nv_ro32(bios, init->offset + 1);
+ u32 mask = nv_ro32(bios, init->offset + 5);
+
+ trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
+ init->offset += 9;
+
+ init_mask(init, reg, mask, 0);
+}
+
+/**
+ * INIT_OR_REG - opcode 0x48
+ *
+ */
+static void
+init_or_reg(struct nvbios_init *init)
+{
+ struct nouveau_bios *bios = init->bios;
+ u32 reg = nv_ro32(bios, init->offset + 1);
+ u32 mask = nv_ro32(bios, init->offset + 5);
+
+ trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
+ init->offset += 9;
+
+ init_mask(init, reg, 0, mask);
+}
+
+/**
* INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
*
*/
@@ -2068,6 +2108,8 @@ static struct nvbios_init_opcode {
[0x3a] = { init_dp_condition },
[0x3b] = { init_io_mask_or },
[0x3c] = { init_io_or },
+ [0x47] = { init_andn_reg },
+ [0x48] = { init_or_reg },
[0x49] = { init_idx_addr_latched },
[0x4a] = { init_io_restrict_pll2 },
[0x4b] = { init_pll2 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c
new file mode 100644
index 000000000000..d694716a166c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/npde.h>
+#include <subdev/bios/pcir.h>
+
+u32
+nvbios_npdeTe(struct nouveau_bios *bios, u32 base)
+{
+ struct nvbios_pcirT pcir;
+ u8 ver; u16 hdr;
+ u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
+ if (data = (data + hdr + 0x0f) & ~0x0f, data) {
+ switch (nv_ro32(bios, data + 0x00)) {
+ case 0x4544504e: /* NPDE */
+ break;
+ default:
+ nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n",
+ data, nv_ro32(bios, data + 0x00));
+ data = 0;
+ break;
+ }
+ }
+ return data;
+}
+
+u32
+nvbios_npdeTp(struct nouveau_bios *bios, u32 base, struct nvbios_npdeT *info)
+{
+ u32 data = nvbios_npdeTe(bios, base);
+ memset(info, 0x00, sizeof(*info));
+ if (data) {
+ info->image_size = nv_ro16(bios, data + 0x08) * 512;
+ info->last = nv_ro08(bios, data + 0x0a) & 0x80;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c
new file mode 100644
index 000000000000..91dae26bc50f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/pcir.h>
+
+u32
+nvbios_pcirTe(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr)
+{
+ u32 data = nv_ro16(bios, base + 0x18);
+ if (data) {
+ data += base;
+ switch (nv_ro32(bios, data + 0x00)) {
+ case 0x52494350: /* PCIR */
+ case 0x53494752: /* RGIS */
+ case 0x5344504e: /* NPDS */
+ *hdr = nv_ro16(bios, data + 0x0a);
+ *ver = nv_ro08(bios, data + 0x0c);
+ break;
+ default:
+ nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n",
+ data, nv_ro32(bios, data + 0x00));
+ data = 0;
+ break;
+ }
+ }
+ return data;
+}
+
+u32
+nvbios_pcirTp(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr,
+ struct nvbios_pcirT *info)
+{
+ u32 data = nvbios_pcirTe(bios, base, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ if (data) {
+ info->vendor_id = nv_ro16(bios, data + 0x04);
+ info->device_id = nv_ro16(bios, data + 0x06);
+ info->class_code[0] = nv_ro08(bios, data + 0x0d);
+ info->class_code[1] = nv_ro08(bios, data + 0x0e);
+ info->class_code[2] = nv_ro08(bios, data + 0x0f);
+ info->image_size = nv_ro16(bios, data + 0x10) * 512;
+ info->image_rev = nv_ro16(bios, data + 0x12);
+ info->image_type = nv_ro08(bios, data + 0x14);
+ info->last = nv_ro08(bios, data + 0x15) & 0x80;
+ }
+ return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c
new file mode 100644
index 000000000000..66c56ba07d1b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/image.h>
+#include <subdev/bios/pmu.h>
+
+static u32
+weirdo_pointer(struct nouveau_bios *bios, u32 data)
+{
+ struct nvbios_image image;
+ int idx = 0;
+ if (nvbios_image(bios, idx++, &image)) {
+ data -= image.size;
+ while (nvbios_image(bios, idx++, &image)) {
+ if (image.type == 0xe0)
+ return image.base + data;
+ }
+ }
+ return 0;
+}
+
+u32
+nvbios_pmuTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct bit_entry bit_p;
+ u32 data = 0;
+
+ if (!bit_entry(bios, 'p', &bit_p)) {
+ if (bit_p.version == 2 && bit_p.length >= 4)
+ data = nv_ro32(bios, bit_p.offset + 0x00);
+ if ((data = weirdo_pointer(bios, data))) {
+ *ver = nv_ro08(bios, data + 0x00); /* maybe? */
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ }
+ }
+
+ return data;
+}
+
+u32
+nvbios_pmuTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_pmuT *info)
+{
+ u32 data = nvbios_pmuTe(bios, ver, hdr, cnt, len);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ default:
+ break;
+ }
+ return data;
+}
+
+u32
+nvbios_pmuEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+ u8 cnt, len;
+ u32 data = nvbios_pmuTe(bios, ver, hdr, &cnt, &len);
+ if (data && idx < cnt) {
+ data = data + *hdr + (idx * len);
+ *hdr = len;
+ return data;
+ }
+ return 0;
+}
+
+u32
+nvbios_pmuEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+ struct nvbios_pmuE *info)
+{
+ u32 data = nvbios_pmuEe(bios, idx, ver, hdr);
+ memset(info, 0x00, sizeof(*info));
+ switch (!!data * *ver) {
+ default:
+ info->type = nv_ro08(bios, data + 0x00);
+ info->data = nv_ro32(bios, data + 0x02);
+ break;
+ }
+ return data;
+}
+
+bool
+nvbios_pmuRm(struct nouveau_bios *bios, u8 type, struct nvbios_pmuR *info)
+{
+ struct nvbios_pmuE pmuE;
+ u8 ver, hdr, idx = 0;
+ u32 data;
+ memset(info, 0x00, sizeof(*info));
+ while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
+ if ( pmuE.type == type &&
+ (data = weirdo_pointer(bios, pmuE.data))) {
+ info->init_addr_pmu = nv_ro32(bios, data + 0x08);
+ info->args_addr_pmu = nv_ro32(bios, data + 0x0c);
+ info->boot_addr = data + 0x30;
+ info->boot_addr_pmu = nv_ro32(bios, data + 0x10) +
+ nv_ro32(bios, data + 0x18);
+ info->boot_size = nv_ro32(bios, data + 0x1c) -
+ nv_ro32(bios, data + 0x18);
+ info->code_addr = info->boot_addr + info->boot_size;
+ info->code_addr_pmu = info->boot_addr_pmu +
+ info->boot_size;
+ info->code_size = nv_ro32(bios, data + 0x20);
+ info->data_addr = data + 0x30 +
+ nv_ro32(bios, data + 0x24);
+ info->data_addr_pmu = nv_ro32(bios, data + 0x28);
+ info->data_size = nv_ro32(bios, data + 0x2c);
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h
new file mode 100644
index 000000000000..187d225bd1e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h
@@ -0,0 +1,25 @@
+#ifndef __NVKM_BIOS_PRIV_H__
+#define __NVKM_BIOS_PRIV_H__
+
+#include <subdev/bios.h>
+
+struct nvbios_source {
+ const char *name;
+ void *(*init)(struct nouveau_bios *, const char *);
+ void (*fini)(void *);
+ u32 (*read)(void *, u32 offset, u32 length, struct nouveau_bios *);
+ bool rw;
+};
+
+int nvbios_extend(struct nouveau_bios *, u32 length);
+int nvbios_shadow(struct nouveau_bios *);
+
+extern const struct nvbios_source nvbios_rom;
+extern const struct nvbios_source nvbios_ramin;
+extern const struct nvbios_source nvbios_acpi_fast;
+extern const struct nvbios_source nvbios_acpi_slow;
+extern const struct nvbios_source nvbios_pcirom;
+extern const struct nvbios_source nvbios_platform;
+extern const struct nvbios_source nvbios_of;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
index 6c401f70ab99..1623c8dfe797 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/ramcfg.h>
+#include <subdev/bios/M0203.h>
static u8
nvbios_ramcfg_strap(struct nouveau_subdev *subdev)
@@ -54,12 +55,22 @@ nvbios_ramcfg_index(struct nouveau_subdev *subdev)
u8 strap = nvbios_ramcfg_strap(subdev);
u32 xlat = 0x00000000;
struct bit_entry bit_M;
+ struct nvbios_M0203E M0203E;
+ u8 ver, hdr;
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 1 && bit_M.length >= 5)
xlat = nv_ro16(bios, bit_M.offset + 3);
- if (bit_M.version == 2 && bit_M.length >= 3)
+ if (bit_M.version == 2 && bit_M.length >= 3) {
+ /*XXX: is M ever shorter than this?
+ * if not - what is xlat used for now?
+ * also - sigh..
+ */
+ if (bit_M.length >= 7 &&
+ nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
+ return M0203E.group;
xlat = nv_ro16(bios, bit_M.offset + 1);
+ }
}
if (xlat)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
index 585e69331ccc..c5685228c322 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -162,8 +162,9 @@ nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
- p->ramcfg_10_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
+ p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
+ p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0;
p->ramcfg_10_05 = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
p->ramcfg_10_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
p->ramcfg_10_07 = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c
new file mode 100644
index 000000000000..bb9e0018d936
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "priv.h"
+#include <core/option.h>
+#include <subdev/bios/image.h>
+
+struct shadow {
+ struct nouveau_oclass base;
+ u32 skip;
+ const struct nvbios_source *func;
+ void *data;
+ u32 size;
+ int score;
+};
+
+static bool
+shadow_fetch(struct nouveau_bios *bios, u32 upto)
+{
+ struct shadow *mthd = (void *)nv_object(bios)->oclass;
+ const u32 limit = (upto + 3) & ~3;
+ const u32 start = bios->size;
+ void *data = mthd->data;
+ if (nvbios_extend(bios, limit) > 0) {
+ u32 read = mthd->func->read(data, start, limit - start, bios);
+ bios->size = start + read;
+ }
+ return bios->size >= limit;
+}
+
+static u8
+shadow_rd08(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_bios *bios = (void *)object;
+ if (shadow_fetch(bios, addr + 1))
+ return bios->data[addr];
+ return 0x00;
+}
+
+static u16
+shadow_rd16(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_bios *bios = (void *)object;
+ if (shadow_fetch(bios, addr + 2))
+ return get_unaligned_le16(&bios->data[addr]);
+ return 0x0000;
+}
+
+static u32
+shadow_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_bios *bios = (void *)object;
+ if (shadow_fetch(bios, addr + 4))
+ return get_unaligned_le32(&bios->data[addr]);
+ return 0x00000000;
+}
+
+static struct nouveau_oclass
+shadow_class = {
+ .handle = NV_SUBDEV(VBIOS, 0x00),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .rd08 = shadow_rd08,
+ .rd16 = shadow_rd16,
+ .rd32 = shadow_rd32,
+ },
+};
+
+static int
+shadow_image(struct nouveau_bios *bios, int idx, struct shadow *mthd)
+{
+ struct nvbios_image image;
+ int score = 1;
+
+ if (!nvbios_image(bios, idx, &image)) {
+ nv_debug(bios, "image %d invalid\n", idx);
+ return 0;
+ }
+ nv_debug(bios, "%08x: type %02x, %d bytes\n",
+ image.base, image.type, image.size);
+
+ if (!shadow_fetch(bios, image.size)) {
+ nv_debug(bios, "%08x: fetch failed\n", image.base);
+ return 0;
+ }
+
+ switch (image.type) {
+ case 0x00:
+ if (nvbios_checksum(&bios->data[image.base], image.size)) {
+ nv_debug(bios, "%08x: checksum failed\n", image.base);
+ if (mthd->func->rw)
+ score += 1;
+ score += 1;
+ } else {
+ score += 3;
+ }
+ break;
+ default:
+ score += 3;
+ break;
+ }
+
+ if (!image.last)
+ score += shadow_image(bios, idx + 1, mthd);
+ return score;
+}
+
+static int
+shadow_score(struct nouveau_bios *bios, struct shadow *mthd)
+{
+ struct nouveau_oclass *oclass = nv_object(bios)->oclass;
+ int score;
+ nv_object(bios)->oclass = &mthd->base;
+ score = shadow_image(bios, 0, mthd);
+ nv_object(bios)->oclass = oclass;
+ return score;
+
+}
+
+static int
+shadow_method(struct nouveau_bios *bios, struct shadow *mthd, const char *name)
+{
+ const struct nvbios_source *func = mthd->func;
+ if (func->name) {
+ nv_debug(bios, "trying %s...\n", name ? name : func->name);
+ if (func->init) {
+ mthd->data = func->init(bios, name);
+ if (IS_ERR(mthd->data)) {
+ mthd->data = NULL;
+ return 0;
+ }
+ }
+ mthd->score = shadow_score(bios, mthd);
+ if (func->fini)
+ func->fini(mthd->data);
+ nv_debug(bios, "scored %d\n", mthd->score);
+ mthd->data = bios->data;
+ mthd->size = bios->size;
+ bios->data = NULL;
+ bios->size = 0;
+ }
+ return mthd->score;
+}
+
+static u32
+shadow_fw_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ const struct firmware *fw = data;
+ if (offset + length <= fw->size) {
+ memcpy(bios->data + offset, fw->data + offset, length);
+ return length;
+ }
+ return 0;
+}
+
+static void *
+shadow_fw_init(struct nouveau_bios *bios, const char *name)
+{
+ struct device *dev = &nv_device(bios)->pdev->dev;
+ const struct firmware *fw;
+ int ret = request_firmware(&fw, name, dev);
+ if (ret)
+ return ERR_PTR(-ENOENT);
+ return (void *)fw;
+}
+
+static const struct nvbios_source
+shadow_fw = {
+ .name = "firmware",
+ .init = shadow_fw_init,
+ .fini = (void(*)(void *))release_firmware,
+ .read = shadow_fw_read,
+ .rw = false,
+};
+
+int
+nvbios_shadow(struct nouveau_bios *bios)
+{
+ struct shadow mthds[] = {
+ { shadow_class, 0, &nvbios_of },
+ { shadow_class, 0, &nvbios_ramin },
+ { shadow_class, 0, &nvbios_rom },
+ { shadow_class, 0, &nvbios_acpi_fast },
+ { shadow_class, 4, &nvbios_acpi_slow },
+ { shadow_class, 1, &nvbios_pcirom },
+ { shadow_class, 1, &nvbios_platform },
+ { shadow_class }
+ }, *mthd = mthds, *best = NULL;
+ const char *optarg;
+ char *source;
+ int optlen;
+
+ /* handle user-specified bios source */
+ optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+ source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
+ if (source) {
+ /* try to match one of the built-in methods */
+ for (mthd = mthds; mthd->func; mthd++) {
+ if (mthd->func->name &&
+ !strcasecmp(source, mthd->func->name)) {
+ best = mthd;
+ if (shadow_method(bios, mthd, NULL))
+ break;
+ }
+ }
+
+ /* otherwise, attempt to load as firmware */
+ if (!best && (best = mthd)) {
+ mthd->func = &shadow_fw;
+ shadow_method(bios, mthd, source);
+ mthd->func = NULL;
+ }
+
+ if (!best->score) {
+ nv_error(bios, "%s invalid\n", source);
+ kfree(source);
+ source = NULL;
+ }
+ }
+
+ /* scan all potential bios sources, looking for best image */
+ if (!best || !best->score) {
+ for (mthd = mthds, best = mthd; mthd->func; mthd++) {
+ if (!mthd->skip || best->score < mthd->skip) {
+ if (shadow_method(bios, mthd, NULL)) {
+ if (mthd->score > best->score)
+ best = mthd;
+ }
+ }
+ }
+ }
+
+ /* cleanup the ones we didn't use */
+ for (mthd = mthds; mthd->func; mthd++) {
+ if (mthd != best)
+ kfree(mthd->data);
+ }
+
+ if (!best->score) {
+ nv_fatal(bios, "unable to locate usable image\n");
+ return -EINVAL;
+ }
+
+ nv_info(bios, "using image from %s\n", best->func ?
+ best->func->name : source);
+ bios->data = best->data;
+ bios->size = best->size;
+ kfree(source);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c
new file mode 100644
index 000000000000..bc130c12ec06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+#else
+static inline bool
+nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+ return false;
+}
+
+static inline int
+nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+ return -EINVAL;
+}
+#endif
+
+/* This version of the shadow function disobeys the ACPI spec and tries
+ * to fetch in units of more than 4KiB at a time. This is a LOT faster
+ * on some systems, such as Lenovo W530.
+ */
+static u32
+acpi_read_fast(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ u32 limit = (offset + length + 0xfff) & ~0xfff;
+ u32 start = offset & ~0x00000fff;
+ u32 fetch = limit - start;
+
+ if (nvbios_extend(bios, limit) > 0) {
+ int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
+ if (ret == fetch)
+ return fetch;
+ }
+
+ return 0;
+}
+
+/* Other systems, such as the one in fdo#55948, will report a success
+ * but only return 4KiB of data. The common bios fetching logic will
+ * detect an invalid image, and fall back to this version of the read
+ * function.
+ */
+static u32
+acpi_read_slow(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ u32 limit = (offset + length + 0xfff) & ~0xfff;
+ u32 start = offset & ~0xfff;
+ u32 fetch = 0;
+
+ if (nvbios_extend(bios, limit) > 0) {
+ while (start + fetch < limit) {
+ int ret = nouveau_acpi_get_bios_chunk(bios->data,
+ start + fetch,
+ 0x1000);
+ if (ret != 0x1000)
+ break;
+ fetch += 0x1000;
+ }
+ }
+
+ return fetch;
+}
+
+static void *
+acpi_init(struct nouveau_bios *bios, const char *name)
+{
+ if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
+ return ERR_PTR(-ENODEV);
+ return NULL;
+}
+
+const struct nvbios_source
+nvbios_acpi_fast = {
+ .name = "ACPI",
+ .init = acpi_init,
+ .read = acpi_read_fast,
+ .rw = false,
+};
+
+const struct nvbios_source
+nvbios_acpi_slow = {
+ .name = "ACPI",
+ .init = acpi_init,
+ .read = acpi_read_slow,
+ .rw = false,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c
new file mode 100644
index 000000000000..3abe487a6025
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+#if defined(__powerpc__)
+struct priv {
+ const void __iomem *data;
+ int size;
+};
+
+static u32
+of_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ struct priv *priv = data;
+ if (offset + length <= priv->size) {
+ memcpy_fromio(bios->data + offset, priv->data + offset, length);
+ return length;
+ }
+ return 0;
+}
+
+static void *
+of_init(struct nouveau_bios *bios, const char *name)
+{
+ struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct device_node *dn;
+ struct priv *priv;
+ if (!(dn = pci_device_to_OF_node(pdev)))
+ return ERR_PTR(-ENODEV);
+ if (!(priv = kzalloc(sizeof(*priv), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+ if ((priv->data = of_get_property(dn, "NVDA,BMP", &priv->size)))
+ return priv;
+ kfree(priv);
+ return ERR_PTR(-EINVAL);
+}
+
+const struct nvbios_source
+nvbios_of = {
+ .name = "OpenFirmware",
+ .init = of_init,
+ .fini = (void(*)(void *))kfree,
+ .read = of_read,
+ .rw = false,
+};
+#else
+const struct nvbios_source
+nvbios_of = {
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c
new file mode 100644
index 000000000000..1d0389c0abef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+struct priv {
+ struct pci_dev *pdev;
+ void __iomem *rom;
+ size_t size;
+};
+
+static u32
+pcirom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ struct priv *priv = data;
+ if (offset + length <= priv->size) {
+ memcpy_fromio(bios->data + offset, priv->rom + offset, length);
+ return length;
+ }
+ return 0;
+}
+
+static void
+pcirom_fini(void *data)
+{
+ struct priv *priv = data;
+ pci_unmap_rom(priv->pdev, priv->rom);
+ pci_disable_rom(priv->pdev);
+ kfree(priv);
+}
+
+static void *
+pcirom_init(struct nouveau_bios *bios, const char *name)
+{
+ struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct priv *priv = NULL;
+ int ret;
+
+ if (!(ret = pci_enable_rom(pdev))) {
+ if (ret = -ENOMEM,
+ (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ if (ret = -EFAULT,
+ (priv->rom = pci_map_rom(pdev, &priv->size))) {
+ priv->pdev = pdev;
+ return priv;
+ }
+ kfree(priv);
+ }
+ pci_disable_rom(pdev);
+ }
+
+ return ERR_PTR(ret);
+}
+
+const struct nvbios_source
+nvbios_pcirom = {
+ .name = "PCIROM",
+ .init = pcirom_init,
+ .fini = pcirom_fini,
+ .read = pcirom_read,
+ .rw = true,
+};
+
+static void *
+platform_init(struct nouveau_bios *bios, const char *name)
+{
+ struct pci_dev *pdev = nv_device(bios)->pdev;
+ struct priv *priv;
+ int ret = -ENOMEM;
+
+ if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ if (ret = -ENODEV,
+ (priv->rom = pci_platform_rom(pdev, &priv->size)))
+ return priv;
+ kfree(priv);
+ }
+
+ return ERR_PTR(ret);
+}
+
+const struct nvbios_source
+nvbios_platform = {
+ .name = "PLATFORM",
+ .init = platform_init,
+ .fini = (void(*)(void *))kfree,
+ .read = pcirom_read,
+ .rw = true,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
new file mode 100644
index 000000000000..5e58bba0dd5c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+struct priv {
+ struct nouveau_bios *bios;
+ u32 bar0;
+};
+
+static u32
+pramin_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ u32 i;
+ if (offset + length <= 0x00100000) {
+ for (i = offset; i < offset + length; i += 4)
+ *(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i);
+ return length;
+ }
+ return 0;
+}
+
+static void
+pramin_fini(void *data)
+{
+ struct priv *priv = data;
+ nv_wr32(priv->bios, 0x001700, priv->bar0);
+ kfree(priv);
+}
+
+static void *
+pramin_init(struct nouveau_bios *bios, const char *name)
+{
+ struct priv *priv = NULL;
+ u64 addr = 0;
+
+ /* PRAMIN always potentially available prior to nv50 */
+ if (nv_device(bios)->card_type < NV_50)
+ return NULL;
+
+ /* we can't get the bios image pointer without PDISP */
+ if (nv_device(bios)->card_type >= GM100)
+ addr = nv_rd32(bios, 0x021c04);
+ else
+ if (nv_device(bios)->card_type >= NV_C0)
+ addr = nv_rd32(bios, 0x022500);
+ if (addr & 0x00000001) {
+ nv_debug(bios, "... display disabled\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* check that the window is enabled and in vram, particularly
+ * important as we don't want to be touching vram on an
+ * uninitialised board
+ */
+ addr = nv_rd32(bios, 0x619f04);
+ if (!(addr & 0x00000008)) {
+ nv_debug(bios, "... not enabled\n");
+ return ERR_PTR(-ENODEV);
+ }
+ if ( (addr & 0x00000003) != 1) {
+ nv_debug(bios, "... not in vram\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* some alternate method inherited from xf86-video-nv... */
+ addr = (addr & 0xffffff00) << 8;
+ if (!addr) {
+ addr = (u64)nv_rd32(bios, 0x001700) << 16;
+ addr += 0xf0000;
+ }
+
+ /* modify bar0 PRAMIN window to cover the bios image */
+ if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ nv_error(bios, "... out of memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ priv->bios = bios;
+ priv->bar0 = nv_rd32(bios, 0x001700);
+ nv_wr32(bios, 0x001700, addr >> 16);
+ return priv;
+}
+
+const struct nvbios_source
+nvbios_ramin = {
+ .name = "PRAMIN",
+ .init = pramin_init,
+ .fini = pramin_fini,
+ .read = pramin_read,
+ .rw = true,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c
new file mode 100644
index 000000000000..b7992bc3ffa5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+static u32
+prom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
+{
+ u32 i;
+ if (offset + length <= 0x00100000) {
+ for (i = offset; i < offset + length; i += 4)
+ *(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i);
+ return length;
+ }
+ return 0;
+}
+
+static void
+prom_fini(void *data)
+{
+ struct nouveau_bios *bios = data;
+ if (nv_device(bios)->card_type < NV_50)
+ nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
+ else
+ nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
+}
+
+static void *
+prom_init(struct nouveau_bios *bios, const char *name)
+{
+ if (nv_device(bios)->card_type < NV_50) {
+ if (nv_device(bios)->card_type == NV_40 &&
+ nv_device(bios)->chipset >= 0x4c)
+ return ERR_PTR(-ENODEV);
+ nv_mask(bios, 0x001850, 0x00000001, 0x00000000);
+ } else {
+ nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
+ }
+ return bios;
+}
+
+const struct nvbios_source
+nvbios_rom = {
+ .name = "PROM",
+ .init = prom_init,
+ .fini = prom_fini,
+ .read = prom_read,
+ .rw = false,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
index 46d955eb51eb..8521eca1ed9c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -93,10 +93,44 @@ nvbios_timingEp(struct nouveau_bios *bios, int idx,
p->timing_hdr = *hdr;
switch (!!data * *ver) {
case 0x10:
- p->timing_10_WR = nv_ro08(bios, data + 0x00);
- p->timing_10_CL = nv_ro08(bios, data + 0x02);
- p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07;
- p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+ p->timing_10_WR = nv_ro08(bios, data + 0x00);
+ p->timing_10_WTR = nv_ro08(bios, data + 0x01);
+ p->timing_10_CL = nv_ro08(bios, data + 0x02);
+ p->timing_10_RC = nv_ro08(bios, data + 0x03);
+ p->timing_10_RFC = nv_ro08(bios, data + 0x05);
+ p->timing_10_RAS = nv_ro08(bios, data + 0x07);
+ p->timing_10_RP = nv_ro08(bios, data + 0x09);
+ p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a);
+ p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b);
+ p->timing_10_RRD = nv_ro08(bios, data + 0x0c);
+ p->timing_10_13 = nv_ro08(bios, data + 0x0d);
+ p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07;
+
+ p->timing_10_24 = 0xff;
+ p->timing_10_21 = 0;
+ p->timing_10_20 = 0;
+ p->timing_10_CWL = 0;
+ p->timing_10_18 = 0;
+ p->timing_10_16 = 0;
+
+ switch (min_t(u8, *hdr, 25)) {
+ case 25:
+ p->timing_10_24 = nv_ro08(bios, data + 0x18);
+ case 24:
+ case 23:
+ case 22:
+ p->timing_10_21 = nv_ro08(bios, data + 0x15);
+ case 21:
+ p->timing_10_20 = nv_ro08(bios, data + 0x14);
+ case 20:
+ p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+ case 19:
+ p->timing_10_18 = nv_ro08(bios, data + 0x12);
+ case 18:
+ case 17:
+ p->timing_10_16 = nv_ro08(bios, data + 0x10);
+ }
+
break;
case 0x20:
p->timing[0] = nv_ro32(bios, data + 0x00);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
index 425a8d5e9129..fb4fad374bdd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
@@ -109,7 +109,7 @@ struct gk20a_clk_pllg_params {
};
static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
- .min_vco = 1000, .max_vco = 1700,
+ .min_vco = 1000, .max_vco = 2064,
.min_u = 12, .max_u = 38,
.min_m = 1, .max_m = 255,
.min_n = 8, .max_n = 255,
@@ -470,76 +470,91 @@ gk20a_pstates[] = {
{
.base = {
.domain[nv_clk_src_gpc] = 72000,
+ .voltage = 0,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 108000,
+ .voltage = 1,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 180000,
+ .voltage = 2,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 252000,
+ .voltage = 3,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 324000,
+ .voltage = 4,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 396000,
+ .voltage = 5,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 468000,
+ .voltage = 6,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 540000,
+ .voltage = 7,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 612000,
+ .voltage = 8,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 648000,
+ .voltage = 9,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 684000,
+ .voltage = 10,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 708000,
+ .voltage = 11,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 756000,
+ .voltage = 12,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 804000,
+ .voltage = 13,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 852000,
+ .voltage = 14,
},
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 094551d8ad9b..07ad01247675 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -510,7 +510,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0,
- false, &priv);
+ true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 239acfe876c3..0e45cee82463 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -24,8 +24,6 @@
#include <core/option.h>
-#include <subdev/bios.h>
-#include <subdev/bios/init.h>
#include <subdev/vga.h>
#include "priv.h"
@@ -56,7 +54,7 @@ _nouveau_devinit_init(struct nouveau_object *object)
if (ret)
return ret;
- ret = nvbios_init(&devinit->base, devinit->post);
+ ret = impl->post(&devinit->base, devinit->post);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
index c69bc7f54e37..4ba43d6a1ec8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
@@ -24,7 +24,7 @@
#include "nv50.h"
-static u64
+u64
gm107_devinit_disable(struct nouveau_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
@@ -53,4 +53,5 @@ gm107_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nvc0_devinit_pll_set,
.disable = gm107_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c
new file mode 100644
index 000000000000..e44a86662a2a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pmu.h>
+
+#include "nv50.h"
+
+static void
+pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ int i;
+
+ nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
+ for (i = 0; i < len; i += 4) {
+ if ((i & 0xff) == 0)
+ nv_wr32(priv, 0x10a188, (pmu + i) >> 8);
+ nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i));
+ }
+
+ while (i & 0xff) {
+ nv_wr32(priv, 0x10a184, 0x00000000);
+ i += 4;
+ }
+}
+
+static void
+pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ int i;
+
+ nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
+ for (i = 0; i < len; i += 4)
+ nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i));
+}
+
+static u32
+pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi)
+{
+ nv_wr32(priv, 0x10a1c0, argp);
+ nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi);
+ return nv_rd32(priv, 0x10a1c4);
+}
+
+static void
+pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr)
+{
+ nv_wr32(priv, 0x10a104, init_addr);
+ nv_wr32(priv, 0x10a10c, 0x00000000);
+ nv_wr32(priv, 0x10a100, 0x00000002);
+}
+
+static int
+pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
+ u32 *init_addr_pmu, u32 *args_addr_pmu)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pmuR pmu;
+
+ if (!nvbios_pmuRm(bios, type, &pmu)) {
+ nv_error(priv, "VBIOS PMU fuc %02x not found\n", type);
+ return -EINVAL;
+ }
+
+ if (!post)
+ return 0;
+
+ pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
+ pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
+ pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
+
+ if (init_addr_pmu) {
+ *init_addr_pmu = pmu.init_addr_pmu;
+ *args_addr_pmu = pmu.args_addr_pmu;
+ return 0;
+ }
+
+ return pmu_exec(priv, pmu.init_addr_pmu), 0;
+}
+
+static int
+gm204_devinit_post(struct nouveau_subdev *subdev, bool post)
+{
+ struct nv50_devinit_priv *priv = (void *)nouveau_devinit(subdev);
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct bit_entry bit_I;
+ u32 init, args;
+ int ret;
+
+ if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
+ bit_I.length < 0x1c) {
+ nv_error(priv, "VBIOS PMU init data not found\n");
+ return -EINVAL;
+ }
+
+ /* reset PMU and load init table parser ucode */
+ if (post) {
+ nv_mask(priv, 0x000200, 0x00002000, 0x00000000);
+ nv_mask(priv, 0x000200, 0x00002000, 0x00002000);
+ nv_rd32(priv, 0x000200);
+ while (nv_rd32(priv, 0x10a10c) & 0x00000006) {
+ }
+ }
+
+ ret = pmu_load(priv, 0x04, post, &init, &args);
+ if (ret)
+ return ret;
+
+ /* upload first chunk of init data */
+ if (post) {
+ u32 pmu = pmu_args(priv, args + 0x08, 0x08);
+ u32 img = nv_ro16(bios, bit_I.offset + 0x14);
+ u32 len = nv_ro16(bios, bit_I.offset + 0x16);
+ pmu_data(priv, pmu, img, len);
+ }
+
+ /* upload second chunk of init data */
+ if (post) {
+ u32 pmu = pmu_args(priv, args + 0x08, 0x10);
+ u32 img = nv_ro16(bios, bit_I.offset + 0x18);
+ u32 len = nv_ro16(bios, bit_I.offset + 0x1a);
+ pmu_data(priv, pmu, img, len);
+ }
+
+ /* execute init tables */
+ if (post) {
+ nv_wr32(priv, 0x10a040, 0x00005000);
+ pmu_exec(priv, init);
+ while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) {
+ }
+ }
+
+ /* load and execute some other ucode image (bios therm?) */
+ return pmu_load(priv, 0x01, post, NULL, NULL);
+}
+
+struct nouveau_oclass *
+gm204_devinit_oclass = &(struct nouveau_devinit_impl) {
+ .base.handle = NV_SUBDEV(DEVINIT, 0x07),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_devinit_ctor,
+ .dtor = _nouveau_devinit_dtor,
+ .init = nv50_devinit_init,
+ .fini = _nouveau_devinit_fini,
+ },
+ .pll_set = nvc0_devinit_pll_set,
+ .disable = gm107_devinit_disable,
+ .post = gm204_devinit_post,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index 052ad690b468..65651c50f6ea 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -464,4 +464,5 @@ nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.meminit = nv04_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
index 4a19c10e5178..a2007a3efc4d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -136,4 +136,5 @@ nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.meminit = nv05_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 3b8d657da279..178b46f79b50 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -107,4 +107,5 @@ nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.meminit = nv10_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index 526d0c6faacd..995dd97af3e9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -34,4 +34,5 @@ nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
.fini = nv04_devinit_fini,
},
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
index 04bc9732644c..915089fb46f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -71,4 +71,5 @@ nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.meminit = nv20_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index b46c62a1d5d8..968334d1dca4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -26,6 +26,7 @@
#include <subdev/bios/dcb.h>
#include <subdev/bios/disp.h>
#include <subdev/bios/init.h>
+#include <subdev/ibus.h>
#include <subdev/vga.h>
#include "nv50.h"
@@ -91,6 +92,7 @@ int
nv50_devinit_init(struct nouveau_object *object)
{
struct nouveau_bios *bios = nouveau_bios(object);
+ struct nouveau_ibus *ibus = nouveau_ibus(object);
struct nv50_devinit_priv *priv = (void *)object;
struct nvbios_outp info;
struct dcb_output outp;
@@ -105,6 +107,13 @@ nv50_devinit_init(struct nouveau_object *object)
}
}
+ /* some boards appear to require certain priv register timeouts
+ * to be bumped before runing devinit scripts. not a clue why
+ * the vbios engineers didn't make the scripts just work...
+ */
+ if (priv->base.post && ibus)
+ nv_ofuncs(ibus)->init(nv_object(ibus));
+
ret = nouveau_devinit_init(&priv->base);
if (ret)
return ret;
@@ -160,4 +169,5 @@ nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nv50_devinit_pll_set,
.disable = nv50_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
index 51d5076333ec..f412bb7f780e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
@@ -18,4 +18,6 @@ int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
int nvc0_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+u64 gm107_devinit_disable(struct nouveau_devinit *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
index 787422505d87..a7c80ded77cd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
@@ -60,4 +60,5 @@ nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nv50_devinit_pll_set,
.disable = nv84_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
index 2b0e963fc6f0..a773253a17f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
@@ -59,4 +59,5 @@ nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nv50_devinit_pll_set,
.disable = nv98_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
index 006cf348bda7..b9cd9e53f760 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
@@ -142,4 +142,5 @@ nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
.pll_set = nva3_devinit_pll_set,
.disable = nva3_devinit_disable,
.mmio = nva3_devinit_mmio,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
index 4fc68d27eff3..3729846a8e5c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
@@ -60,4 +60,5 @@ nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nva3_devinit_pll_set,
.disable = nvaf_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
index 30c765747eea..80bd7f5eda3d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
@@ -115,4 +115,5 @@ nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
},
.pll_set = nvc0_devinit_pll_set,
.disable = nvc0_devinit_disable,
+ .post = nvbios_init,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
index f0e8683ad840..cbcd51852472 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
@@ -3,6 +3,7 @@
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
#include <subdev/clock/pll.h>
#include <subdev/devinit.h>
@@ -12,6 +13,7 @@ struct nouveau_devinit_impl {
int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
u64 (*disable)(struct nouveau_devinit *);
u32 (*mmio)(struct nouveau_devinit *, u32);
+ int (*post)(struct nouveau_subdev *, bool);
};
#define nouveau_devinit_create(p,e,o,d) \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f009d8a39d9d..c866148c440f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -23,37 +23,30 @@
*/
#include <subdev/bios.h>
-#include <subdev/bios/bit.h>
+#include <subdev/bios/M0203.h>
#include "priv.h"
int
nouveau_fb_bios_memtype(struct nouveau_bios *bios)
{
- struct bit_entry M;
- u8 ramcfg;
-
- ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
- if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) {
- u16 table = nv_ro16(bios, M.offset + 3);
- u8 version = nv_ro08(bios, table + 0);
- u8 header = nv_ro08(bios, table + 1);
- u8 record = nv_ro08(bios, table + 2);
- u8 entries = nv_ro08(bios, table + 3);
- if (table && version == 0x10 && ramcfg < entries) {
- u16 entry = table + header + (ramcfg * record);
- switch (nv_ro08(bios, entry) & 0x0f) {
- case 0: return NV_MEM_TYPE_DDR2;
- case 1: return NV_MEM_TYPE_DDR3;
- case 2: return NV_MEM_TYPE_GDDR3;
- case 3: return NV_MEM_TYPE_GDDR5;
- default:
- break;
- }
-
+ const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+ struct nvbios_M0203E M0203E;
+ u8 ver, hdr;
+
+ if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
+ switch (M0203E.type) {
+ case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
+ case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
+ case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
+ case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
+ default:
+ nv_warn(bios, "M0203E type %02x\n", M0203E.type);
+ return NV_MEM_TYPE_UNKNOWN;
}
}
+ nv_warn(bios, "M0203E not matched!\n");
return NV_MEM_TYPE_UNKNOWN;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c
new file mode 100644
index 000000000000..d85a25d027ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ * Roy Spliet <rspliet@eclipso.eu>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+struct ramxlat {
+ int id;
+ u8 enc;
+};
+
+static inline int
+ramxlat(const struct ramxlat *xlat, int id)
+{
+ while (xlat->id >= 0) {
+ if (xlat->id == id)
+ return xlat->enc;
+ xlat++;
+ }
+ return -EINVAL;
+}
+
+static const struct ramxlat
+ramgddr3_cl_lo[] = {
+ { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 },
+ /* the below are mentioned in some, but not all, gddr3 docs */
+ { 12, 4 }, { 13, 5 }, { 14, 6 },
+ /* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
+ /* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
+ * { 15, 11 }, */
+ { -1 }
+};
+
+static const struct ramxlat
+ramgddr3_cl_hi[] = {
+ { 10, 2 }, { 11, 3 }, { 12, 4 }, { 13, 5 }, { 14, 6 }, { 15, 7 },
+ { 16, 0 }, { 17, 1 },
+ { -1 }
+};
+
+static const struct ramxlat
+ramgddr3_wr_lo[] = {
+ { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
+ { 11, 0 },
+ /* the below are mentioned in some, but not all, gddr3 docs */
+ { 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 },
+ { -1 }
+};
+
+int
+nouveau_gddr3_calc(struct nouveau_ram *ram)
+{
+ int CL, WR, CWL, DLL = 0, ODT = 0, hi;
+
+ switch (ram->next->bios.timing_ver) {
+ case 0x10:
+ CWL = ram->next->bios.timing_10_CWL;
+ CL = ram->next->bios.timing_10_CL;
+ WR = ram->next->bios.timing_10_WR;
+ DLL = !ram->next->bios.ramcfg_10_DLLoff;
+ ODT = ram->next->bios.timing_10_ODT;
+ break;
+ case 0x20:
+ CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
+ CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0;
+ WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
+ /* XXX: Get these values from the VBIOS instead */
+ DLL = !(ram->mr[1] & 0x1);
+ ODT = (ram->mr[1] & 0x004) >> 2 |
+ (ram->mr[1] & 0x040) >> 5 |
+ (ram->mr[1] & 0x200) >> 7;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ hi = ram->mr[2] & 0x1;
+ CL = ramxlat(hi ? ramgddr3_cl_hi : ramgddr3_cl_lo, CL);
+ WR = ramxlat(ramgddr3_wr_lo, WR);
+ if (CL < 0 || CWL < 1 || CWL > 7 || WR < 0)
+ return -EINVAL;
+
+ ram->mr[0] &= ~0xf74;
+ ram->mr[0] |= (CWL & 0x07) << 9;
+ ram->mr[0] |= (CL & 0x07) << 4;
+ ram->mr[0] |= (CL & 0x08) >> 1;
+
+ ram->mr[1] &= ~0x3fc;
+ ram->mr[1] |= (ODT & 0x03) << 2;
+ ram->mr[1] |= (ODT & 0x03) << 8;
+ ram->mr[1] |= (WR & 0x03) << 4;
+ ram->mr[1] |= (WR & 0x04) << 5;
+ ram->mr[1] |= !DLL << 6;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 60322e906dd4..283863f7aa9b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -37,6 +37,7 @@ extern struct nouveau_oclass gm107_ram_oclass;
int nouveau_sddr2_calc(struct nouveau_ram *ram);
int nouveau_sddr3_calc(struct nouveau_ram *ram);
+int nouveau_gddr3_calc(struct nouveau_ram *ram);
int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
#define nouveau_fb_create(p,e,c,d) \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index d1fbbe4b00a2..0ac7256443bb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -141,6 +141,20 @@ ramfuc_wait_vblank(struct ramfuc *ram)
}
static inline void
+ramfuc_train(struct ramfuc *ram)
+{
+ nouveau_memx_train(ram->memx);
+}
+
+static inline int
+ramfuc_train_result(struct nouveau_fb *pfb, u32 *result, u32 rsize)
+{
+ struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
+
+ return nouveau_memx_train_result(ppwr, result, rsize);
+}
+
+static inline void
ramfuc_block(struct ramfuc *ram)
{
nouveau_memx_block(ram->memx);
@@ -162,6 +176,8 @@ ramfuc_unblock(struct ramfuc *ram)
#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n))
#define ram_wait_vblank(s) ramfuc_wait_vblank(&(s)->base)
+#define ram_train(s) ramfuc_train(&(s)->base)
+#define ram_train_result(s,r,l) ramfuc_train_result((s), (r), (l))
#define ram_block(s) ramfuc_block(&(s)->base)
#define ram_unblock(s) ramfuc_unblock(&(s)->base)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index 3601deca0bd5..3b38a538845d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -20,86 +20,512 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
+ * Roy Spliet <rspliet@eclipso.eu>
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
+#include <subdev/bios/M0205.h>
#include <subdev/bios/timing.h>
#include <subdev/clock/nva3.h>
#include <subdev/clock/pll.h>
+#include <subdev/gpio.h>
+
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+
#include <core/option.h>
#include "ramfuc.h"
#include "nv50.h"
+/* XXX: Remove when memx gains GPIO support */
+extern int nv50_gpio_location(int line, u32 *reg, u32 *shift);
+
struct nva3_ramfuc {
struct ramfuc base;
+ struct ramfuc_reg r_0x001610;
+ struct ramfuc_reg r_0x001700;
+ struct ramfuc_reg r_0x002504;
struct ramfuc_reg r_0x004000;
struct ramfuc_reg r_0x004004;
struct ramfuc_reg r_0x004018;
struct ramfuc_reg r_0x004128;
struct ramfuc_reg r_0x004168;
+ struct ramfuc_reg r_0x100080;
struct ramfuc_reg r_0x100200;
struct ramfuc_reg r_0x100210;
struct ramfuc_reg r_0x100220[9];
+ struct ramfuc_reg r_0x100264;
struct ramfuc_reg r_0x1002d0;
struct ramfuc_reg r_0x1002d4;
struct ramfuc_reg r_0x1002dc;
struct ramfuc_reg r_0x10053c;
struct ramfuc_reg r_0x1005a0;
struct ramfuc_reg r_0x1005a4;
+ struct ramfuc_reg r_0x100700;
struct ramfuc_reg r_0x100714;
struct ramfuc_reg r_0x100718;
struct ramfuc_reg r_0x10071c;
+ struct ramfuc_reg r_0x100720;
struct ramfuc_reg r_0x100760;
struct ramfuc_reg r_0x1007a0;
struct ramfuc_reg r_0x1007e0;
+ struct ramfuc_reg r_0x100da0;
struct ramfuc_reg r_0x10f804;
struct ramfuc_reg r_0x1110e0;
struct ramfuc_reg r_0x111100;
struct ramfuc_reg r_0x111104;
+ struct ramfuc_reg r_0x1111e0;
+ struct ramfuc_reg r_0x111400;
struct ramfuc_reg r_0x611200;
struct ramfuc_reg r_mr[4];
+ struct ramfuc_reg r_gpioFBVREF;
+};
+
+struct nva3_ltrain {
+ enum {
+ NVA3_TRAIN_UNKNOWN,
+ NVA3_TRAIN_UNSUPPORTED,
+ NVA3_TRAIN_ONCE,
+ NVA3_TRAIN_EXEC,
+ NVA3_TRAIN_DONE
+ } state;
+ u32 r_100720;
+ u32 r_1111e0;
+ u32 r_111400;
+ struct nouveau_mem *mem;
};
struct nva3_ram {
struct nouveau_ram base;
struct nva3_ramfuc fuc;
+ struct nva3_ltrain ltrain;
};
+void
+nva3_link_train_calc(u32 *vals, struct nva3_ltrain *train)
+{
+ int i, lo, hi;
+ u8 median[8], bins[4] = {0, 0, 0, 0}, bin = 0, qty = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (lo = 0; lo < 0x40; lo++) {
+ if (!(vals[lo] & 0x80000000))
+ continue;
+ if (vals[lo] & (0x101 << i))
+ break;
+ }
+
+ if (lo == 0x40)
+ return;
+
+ for (hi = lo + 1; hi < 0x40; hi++) {
+ if (!(vals[lo] & 0x80000000))
+ continue;
+ if (!(vals[hi] & (0x101 << i))) {
+ hi--;
+ break;
+ }
+ }
+
+ median[i] = ((hi - lo) >> 1) + lo;
+ bins[(median[i] & 0xf0) >> 4]++;
+ median[i] += 0x30;
+ }
+
+ /* Find the best value for 0x1111e0 */
+ for (i = 0; i < 4; i++) {
+ if (bins[i] > qty) {
+ bin = i + 3;
+ qty = bins[i];
+ }
+ }
+
+ train->r_100720 = 0;
+ for (i = 0; i < 8; i++) {
+ median[i] = max(median[i], (u8) (bin << 4));
+ median[i] = min(median[i], (u8) ((bin << 4) | 0xf));
+
+ train->r_100720 |= ((median[i] & 0x0f) << (i << 2));
+ }
+
+ train->r_1111e0 = 0x02000000 | (bin * 0x101);
+ train->r_111400 = 0x0;
+}
+
+/*
+ * Link training for (at least) DDR3
+ */
+int
+nva3_link_train(struct nouveau_fb *pfb)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nouveau_clock *clk = nouveau_clock(pfb);
+ struct nva3_ltrain *train = &ram->ltrain;
+ struct nouveau_device *device = nv_device(pfb);
+ struct nva3_ramfuc *fuc = &ram->fuc;
+ u32 *result, r1700;
+ int ret, i;
+ struct nvbios_M0205T M0205T = { 0 };
+ u8 ver, hdr, cnt, len, snr, ssz;
+ unsigned int clk_current;
+ unsigned long flags;
+ unsigned long *f = &flags;
+
+ if (nouveau_boolopt(device->cfgopt, "NvMemExec", true) != true)
+ return -ENOSYS;
+
+ /* XXX: Multiple partitions? */
+ result = kmalloc(64 * sizeof(u32), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ train->state = NVA3_TRAIN_EXEC;
+
+ /* Clock speeds for training and back */
+ nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
+ if (M0205T.freq == 0)
+ return -ENOENT;
+
+ clk_current = clk->read(clk, nv_clk_src_mem);
+
+ ret = nva3_clock_pre(clk, f);
+ if (ret)
+ goto out;
+
+ /* First: clock up/down */
+ ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
+ if (ret)
+ goto out;
+
+ /* Do this *after* calc, eliminates write in script */
+ nv_wr32(pfb, 0x111400, 0x00000000);
+ /* XXX: Magic writes that improve train reliability? */
+ nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
+ nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
+ nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
+ nv_wr32(pfb, 0x100c04, 0x00000400);
+
+ /* Now the training script */
+ r1700 = ram_rd32(fuc, 0x001700);
+
+ ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+ ram_wr32(fuc, 0x611200, 0x3300);
+ ram_wait_vblank(fuc);
+ ram_wait(fuc, 0x611200, 0x00000003, 0x00000000, 500000);
+ ram_mask(fuc, 0x001610, 0x00000083, 0x00000003);
+ ram_mask(fuc, 0x100080, 0x00000020, 0x00000000);
+ ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
+ ram_wr32(fuc, 0x001700, 0x00000000);
+
+ ram_train(fuc);
+
+ /* Reset */
+ ram_mask(fuc, 0x10f804, 0x80000000, 0x80000000);
+ ram_wr32(fuc, 0x10053c, 0x0);
+ ram_wr32(fuc, 0x100720, train->r_100720);
+ ram_wr32(fuc, 0x1111e0, train->r_1111e0);
+ ram_wr32(fuc, 0x111400, train->r_111400);
+ ram_nuke(fuc, 0x100080);
+ ram_mask(fuc, 0x100080, 0x00000020, 0x00000020);
+ ram_nsec(fuc, 1000);
+
+ ram_wr32(fuc, 0x001700, r1700);
+ ram_mask(fuc, 0x001610, 0x00000083, 0x00000080);
+ ram_wr32(fuc, 0x611200, 0x3330);
+ ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
+
+ ram_exec(fuc, true);
+
+ ram->base.calc(pfb, clk_current);
+ ram_exec(fuc, true);
+
+ /* Post-processing, avoids flicker */
+ nv_mask(pfb, 0x616308, 0x10, 0x10);
+ nv_mask(pfb, 0x616b08, 0x10, 0x10);
+
+ nva3_clock_post(clk, f);
+
+ ram_train_result(pfb, result, 64);
+ for (i = 0; i < 64; i++)
+ nv_debug(pfb, "Train: %08x", result[i]);
+ nva3_link_train_calc(result, train);
+
+ nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
+ train->r_1111e0, train->r_111400);
+
+ kfree(result);
+
+ train->state = NVA3_TRAIN_DONE;
+
+ return ret;
+
+out:
+ if(ret == -EBUSY)
+ f = NULL;
+
+ train->state = NVA3_TRAIN_UNSUPPORTED;
+
+ nva3_clock_post(clk, f);
+ return ret;
+}
+
+int
+nva3_link_train_init(struct nouveau_fb *pfb)
+{
+ static const u32 pattern[16] = {
+ 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
+ 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
+ 0x33333333, 0x55555555, 0x77777777, 0x66666666,
+ 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
+ };
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nva3_ltrain *train = &ram->ltrain;
+ struct nouveau_mem *mem;
+ struct nvbios_M0205E M0205E;
+ u8 ver, hdr, cnt, len;
+ u32 r001700;
+ int ret, i = 0;
+
+ train->state = NVA3_TRAIN_UNSUPPORTED;
+
+ /* We support type "5"
+ * XXX: training pattern table appears to be unused for this routine */
+ if (!nvbios_M0205Ep(bios, i, &ver, &hdr, &cnt, &len, &M0205E))
+ return -ENOENT;
+
+ if (M0205E.type != 5)
+ return 0;
+
+ train->state = NVA3_TRAIN_ONCE;
+
+ ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
+ if (ret)
+ return ret;
+
+ mem = ram->ltrain.mem;
+
+ nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
+ nv_wr32(pfb, 0x1005a8, 0x0000ffff);
+ nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+
+ for (i = 0; i < 0x30; i++) {
+ nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
+ nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+ }
+
+ for (i = 0; i < 0x30; i++) {
+ nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
+ nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+ }
+
+ /* And upload the pattern */
+ r001700 = nv_rd32(pfb, 0x1700);
+ nv_wr32(pfb, 0x1700, mem->offset >> 16);
+ for (i = 0; i < 16; i++)
+ nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
+ for (i = 0; i < 16; i++)
+ nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
+ nv_wr32(pfb, 0x1700, r001700);
+
+ train->r_100720 = nv_rd32(pfb, 0x100720);
+ train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
+ train->r_111400 = nv_rd32(pfb, 0x111400);
+
+ return 0;
+}
+
+void
+nva3_link_train_fini(struct nouveau_fb *pfb)
+{
+ struct nva3_ram *ram = (void *)pfb->ram;
+
+ if (ram->ltrain.mem)
+ pfb->ram->put(pfb, &ram->ltrain.mem);
+}
+
+/*
+ * RAM reclocking
+ */
+#define T(t) cfg->timing_10_##t
+static int
+nva3_ram_timing_calc(struct nouveau_fb *pfb, u32 *timing)
+{
+ struct nva3_ram *ram = (void *)pfb->ram;
+ struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+ int tUNK_base, tUNK_40_0, prevCL;
+ u32 cur2, cur3, cur7, cur8;
+
+ cur2 = nv_rd32(pfb, 0x100228);
+ cur3 = nv_rd32(pfb, 0x10022c);
+ cur7 = nv_rd32(pfb, 0x10023c);
+ cur8 = nv_rd32(pfb, 0x100240);
+
+
+ switch ((!T(CWL)) * ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ T(CWL) = T(CL) - 1;
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
+ break;
+ }
+
+ prevCL = (cur3 & 0x000000ff) + 1;
+ tUNK_base = ((cur7 & 0x00ff0000) >> 16) - prevCL;
+
+ timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
+ timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
+ max_t(u8,T(18), 1) << 16 |
+ (T(WTR) + 1 + T(CWL)) << 8 |
+ (5 + T(CL) - T(CWL));
+ timing[2] = (T(CWL) - 1) << 24 |
+ (T(RRD) << 16) |
+ (T(RCDWR) << 8) |
+ T(RCDRD);
+ timing[3] = (cur3 & 0x00ff0000) |
+ (0x30 + T(CL)) << 24 |
+ (0xb + T(CL)) << 8 |
+ (T(CL) - 1);
+ timing[4] = T(20) << 24 |
+ T(21) << 16 |
+ T(13) << 8 |
+ T(13);
+ timing[5] = T(RFC) << 24 |
+ max_t(u8,T(RCDRD), T(RCDWR)) << 16 |
+ max_t(u8, (T(CWL) + 6), (T(CL) + 2)) << 8 |
+ T(RP);
+ timing[6] = (0x5a + T(CL)) << 16 |
+ max_t(u8, 1, (6 - T(CL) + T(CWL))) << 8 |
+ (0x50 + T(CL) - T(CWL));
+ timing[7] = (cur7 & 0xff000000) |
+ ((tUNK_base + T(CL)) << 16) |
+ 0x202;
+ timing[8] = cur8 & 0xffffff00;
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ case NV_MEM_TYPE_GDDR3:
+ tUNK_40_0 = prevCL - (cur8 & 0xff);
+ if (tUNK_40_0 > 0)
+ timing[8] |= T(CL);
+ break;
+ default:
+ break;
+ }
+
+ nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
+ timing[0], timing[1], timing[2], timing[3]);
+ nv_debug(pfb, " 230: %08x %08x %08x %08x\n",
+ timing[4], timing[5], timing[6], timing[7]);
+ nv_debug(pfb, " 240: %08x\n", timing[8]);
+ return 0;
+}
+#undef T
+
+static void
+nouveau_sddr2_dll_reset(struct nva3_ramfuc *fuc)
+{
+ ram_mask(fuc, mr[0], 0x100, 0x100);
+ ram_nsec(fuc, 1000);
+ ram_mask(fuc, mr[0], 0x100, 0x000);
+ ram_nsec(fuc, 1000);
+}
+
+static void
+nouveau_sddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
+{
+ u32 mr1_old = ram_rd32(fuc, mr[1]);
+
+ if (!(mr1_old & 0x1)) {
+ ram_wr32(fuc, 0x1002d4, 0x00000001);
+ ram_wr32(fuc, mr[1], mr[1]);
+ ram_nsec(fuc, 1000);
+ }
+}
+
+static void
+nouveau_gddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
+{
+ u32 mr1_old = ram_rd32(fuc, mr[1]);
+
+ if (!(mr1_old & 0x40)) {
+ ram_wr32(fuc, mr[1], mr[1]);
+ ram_nsec(fuc, 1000);
+ }
+}
+
+static void
+nva3_ram_lock_pll(struct nva3_ramfuc *fuc, struct nva3_clock_info *mclk)
+{
+ ram_wr32(fuc, 0x004004, mclk->pll);
+ ram_mask(fuc, 0x004000, 0x00000001, 0x00000001);
+ ram_mask(fuc, 0x004000, 0x00000010, 0x00000000);
+ ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
+ ram_mask(fuc, 0x004000, 0x00000010, 0x00000010);
+}
+
+static void
+nva3_ram_fbvref(struct nva3_ramfuc *fuc, u32 val)
+{
+ struct nouveau_gpio *gpio = nouveau_gpio(fuc->base.pfb);
+ struct dcb_gpio_func func;
+ u32 reg, sh, gpio_val;
+ int ret;
+
+ if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
+ ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ if (ret)
+ return;
+
+ nv50_gpio_location(func.line, &reg, &sh);
+ gpio_val = ram_rd32(fuc, gpioFBVREF);
+ if (gpio_val & (8 << sh))
+ val = !val;
+
+ ram_mask(fuc, gpioFBVREF, (0x3 << sh), ((val | 0x2) << sh));
+ ram_nsec(fuc, 20000);
+ }
+}
+
static int
nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
{
struct nouveau_bios *bios = nouveau_bios(pfb);
struct nva3_ram *ram = (void *)pfb->ram;
struct nva3_ramfuc *fuc = &ram->fuc;
+ struct nva3_ltrain *train = &ram->ltrain;
struct nva3_clock_info mclk;
struct nouveau_ram_data *next;
u8 ver, hdr, cnt, len, strap;
u32 data;
- u32 r004018, r100760, ctrl;
+ u32 r004018, r100760, r100da0, r111100, ctrl;
u32 unk714, unk718, unk71c;
int ret, i;
+ u32 timing[9];
+ bool pll2pll;
next = &ram->base.target;
next->freq = freq;
ram->base.next = next;
+ if (ram->ltrain.state == NVA3_TRAIN_ONCE)
+ nva3_link_train(pfb);
+
/* lookup memory config data relevant to the target frequency */
i = 0;
- while ((data = nvbios_rammapEp(bios, i++, &ver, &hdr, &cnt, &len,
- &next->bios))) {
- if (freq / 1000 >= next->bios.rammap_min &&
- freq / 1000 <= next->bios.rammap_max)
- break;
- }
-
- if (!data || ver != 0x10 || hdr < 0x0e) {
+ data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
+ &next->bios);
+ if (!data || ver != 0x10 || hdr < 0x05) {
nv_error(pfb, "invalid/missing rammap entry\n");
return -EINVAL;
}
@@ -113,7 +539,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
&ver, &hdr, &next->bios);
- if (!data || ver != 0x10 || hdr < 0x0e) {
+ if (!data || ver != 0x10 || hdr < 0x09) {
nv_error(pfb, "invalid/missing ramcfg entry\n");
return -EINVAL;
}
@@ -123,7 +549,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
&ver, &hdr, &cnt, &len,
&next->bios);
- if (!data || ver != 0x10 || hdr < 0x19) {
+ if (!data || ver != 0x10 || hdr < 0x17) {
nv_error(pfb, "invalid/missing timing entry\n");
return -EINVAL;
}
@@ -135,53 +561,99 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
return ret;
}
+ nva3_ram_timing_calc(pfb, timing);
+
ret = ram_init(fuc, pfb);
if (ret)
return ret;
+ /* Determine ram-specific MR values */
+ ram->base.mr[0] = ram_rd32(fuc, mr[0]);
+ ram->base.mr[1] = ram_rd32(fuc, mr[1]);
+ ram->base.mr[2] = ram_rd32(fuc, mr[2]);
+
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ ret = nouveau_sddr2_calc(&ram->base);
+ break;
+ case NV_MEM_TYPE_DDR3:
+ ret = nouveau_sddr3_calc(&ram->base);
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ ret = nouveau_gddr3_calc(&ram->base);
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
/* XXX: where the fuck does 750MHz come from? */
if (freq <= 750000) {
r004018 = 0x10000000;
r100760 = 0x22222222;
+ r100da0 = 0x00000010;
} else {
r004018 = 0x00000000;
r100760 = 0x00000000;
+ r100da0 = 0x00000000;
}
+ if (!next->bios.ramcfg_10_DLLoff)
+ r004018 |= 0x00004000;
+
+ /* pll2pll requires to switch to a safe clock first */
ctrl = ram_rd32(fuc, 0x004000);
- if (ctrl & 0x00000008) {
- if (mclk.pll) {
- ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
- ram_wr32(fuc, 0x004004, mclk.pll);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
- ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
- ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
- ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
- }
- } else {
- u32 ssel = 0x00000101;
- if (mclk.clk)
- ssel |= mclk.clk;
- else
- ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
- ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
- }
+ pll2pll = (!(ctrl & 0x00000008)) && mclk.pll;
+ /* Pre, NVIDIA does this outside the script */
if (next->bios.ramcfg_10_02_10) {
ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
} else {
ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
}
+ /* Always disable this bit during reclock */
+ ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+
+ /* If switching from non-pll to pll, lock before disabling FB */
+ if (mclk.pll && !pll2pll) {
+ ram_mask(fuc, 0x004128, 0x003f3141, mclk.clk | 0x00000101);
+ nva3_ram_lock_pll(fuc, &mclk);
+ }
+
+ /* Start with disabling some CRTCs and PFIFO? */
+ ram_wait_vblank(fuc);
+ ram_wr32(fuc, 0x611200, 0x3300);
+ ram_mask(fuc, 0x002504, 0x1, 0x1);
+ ram_nsec(fuc, 10000);
+ ram_wait(fuc, 0x002504, 0x10, 0x10, 20000); /* XXX: or longer? */
+ ram_block(fuc);
+ ram_nsec(fuc, 2000);
+
+ if (!next->bios.ramcfg_10_02_10) {
+ if (ram->base.type == NV_MEM_TYPE_GDDR3)
+ ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
+ else
+ ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
+ }
+
+ /* If we're disabling the DLL, do it now */
+ switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
+ case NV_MEM_TYPE_DDR3:
+ nouveau_sddr3_dll_disable(fuc, ram->base.mr);
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ nouveau_gddr3_dll_disable(fuc, ram->base.mr);
+ break;
+ }
- if (!next->bios.rammap_10_04_02)
- ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
- ram_wr32(fuc, 0x611200, 0x00003300);
- if (!next->bios.ramcfg_10_02_10)
- ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
+ if (fuc->r_gpioFBVREF.addr && next->bios.timing_10_ODT)
+ nva3_ram_fbvref(fuc, 0);
+ /* Brace RAM for impact */
ram_wr32(fuc, 0x1002d4, 0x00000001);
ram_wr32(fuc, 0x1002d0, 0x00000001);
ram_wr32(fuc, 0x1002d0, 0x00000001);
@@ -189,24 +661,38 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x1002dc, 0x00000001);
ram_nsec(fuc, 2000);
- ctrl = ram_rd32(fuc, 0x004000);
- if (!(ctrl & 0x00000008) && mclk.pll) {
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+ if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
+ ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
+
+ /* Fiddle with clocks */
+ /* There's 4 scenario's
+ * pll->pll: first switch to a 324MHz clock, set up new PLL, switch
+ * clk->pll: Set up new PLL, switch
+ * pll->clk: Set up clock, switch
+ * clk->clk: Overwrite ctrl and other bits, switch */
+
+ /* Switch to regular clock - 324MHz */
+ if (pll2pll) {
+ ram_mask(fuc, 0x004000, 0x00000004, 0x00000004);
+ ram_mask(fuc, 0x004168, 0x003f3141, 0x00083101);
+ ram_mask(fuc, 0x004000, 0x00000008, 0x00000008);
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
ram_wr32(fuc, 0x004018, 0x00001000);
- ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001));
- ram_wr32(fuc, 0x004004, mclk.pll);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
- udelay(64);
- ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
- udelay(20);
- } else
- if (!mclk.pll) {
- ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk);
- ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+ nva3_ram_lock_pll(fuc, &mclk);
+ }
+
+ if (mclk.pll) {
+ ram_mask(fuc, 0x004000, 0x00000105, 0x00000105);
+ ram_wr32(fuc, 0x004018, 0x00001000 | r004018);
+ ram_wr32(fuc, 0x100da0, r100da0);
+ } else {
+ ram_mask(fuc, 0x004168, 0x003f3141, mclk.clk | 0x00000101);
+ ram_mask(fuc, 0x004000, 0x00000108, 0x00000008);
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
- ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
+ ram_wr32(fuc, 0x004018, 0x00009000 | r004018);
+ ram_wr32(fuc, 0x100da0, r100da0);
}
+ ram_nsec(fuc, 20000);
if (next->bios.rammap_10_04_08) {
ram_wr32(fuc, 0x1005a0, next->bios.ramcfg_10_06 << 16 |
@@ -220,6 +706,12 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
0x80000000);
ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
} else {
+ if (train->state == NVA3_TRAIN_DONE) {
+ ram_wr32(fuc, 0x100080, 0x1020);
+ ram_mask(fuc, 0x111400, 0xffffffff, train->r_111400);
+ ram_mask(fuc, 0x1111e0, 0xffffffff, train->r_1111e0);
+ ram_mask(fuc, 0x100720, 0xffffffff, train->r_100720);
+ }
ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
ram_mask(fuc, 0x100760, 0x22222222, r100760);
@@ -227,65 +719,131 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
}
+ if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
+ ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
+ }
+
+ /* Final switch */
if (mclk.pll) {
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
- ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008));
+ ram_mask(fuc, 0x004000, 0x00000008, 0x00000000);
}
- /*XXX: LEAVE */
ram_wr32(fuc, 0x1002dc, 0x00000000);
ram_wr32(fuc, 0x1002d4, 0x00000001);
ram_wr32(fuc, 0x100210, 0x80000000);
- ram_nsec(fuc, 1000);
- ram_nsec(fuc, 1000);
+ ram_nsec(fuc, 2000);
- ram_mask(fuc, mr[2], 0x00000000, 0x00000000);
- ram_nsec(fuc, 1000);
- ram_nuke(fuc, mr[0]);
- ram_mask(fuc, mr[0], 0x00000000, 0x00000000);
- ram_nsec(fuc, 1000);
+ /* Set RAM MR parameters and timings */
+ for (i = 2; i >= 0; i--) {
+ if (ram_rd32(fuc, mr[i]) != ram->base.mr[i]) {
+ ram_wr32(fuc, mr[i], ram->base.mr[i]);
+ ram_nsec(fuc, 1000);
+ }
+ }
- ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
- ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
+ ram_wr32(fuc, 0x100220[3], timing[3]);
+ ram_wr32(fuc, 0x100220[1], timing[1]);
+ ram_wr32(fuc, 0x100220[6], timing[6]);
+ ram_wr32(fuc, 0x100220[7], timing[7]);
+ ram_wr32(fuc, 0x100220[2], timing[2]);
+ ram_wr32(fuc, 0x100220[4], timing[4]);
+ ram_wr32(fuc, 0x100220[5], timing[5]);
+ ram_wr32(fuc, 0x100220[0], timing[0]);
+ ram_wr32(fuc, 0x100220[8], timing[8]);
+ /* Misc */
ram_mask(fuc, 0x100200, 0x00001000, !next->bios.ramcfg_10_02_08 << 12);
- unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
- unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
- unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+ /* XXX: A lot of "chipset"/"ram type" specific stuff...? */
+ unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000130;
+ unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
+ unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+ r111100 = ram_rd32(fuc, 0x111100) & ~0x3a800000;
+
+ if (next->bios.ramcfg_10_02_04) {
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR3:
+ if (nv_device(pfb)->chipset != 0xa8)
+ r111100 |= 0x00000004;
+ /* no break */
+ case NV_MEM_TYPE_DDR2:
+ r111100 |= 0x08000000;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
+ r111100 |= 0x1a800000;
+ unk714 |= 0x00000010;
+ break;
+ case NV_MEM_TYPE_DDR3:
+ if (nv_device(pfb)->chipset == 0xa8) {
+ r111100 |= 0x08000000;
+ } else {
+ r111100 &= ~0x00000004;
+ r111100 |= 0x12800000;
+ }
+ unk714 |= 0x00000010;
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ r111100 |= 0x30000000;
+ unk714 |= 0x00000020;
+ break;
+ default:
+ break;
+ }
+ }
+
+ unk714 |= (next->bios.ramcfg_10_04_01) << 8;
+
if (next->bios.ramcfg_10_02_20)
unk714 |= 0xf0000000;
- if (!next->bios.ramcfg_10_02_04)
- unk714 |= 0x00000010;
- ram_wr32(fuc, 0x100714, unk714);
-
+ if (next->bios.ramcfg_10_02_02)
+ unk718 |= 0x00000100;
if (next->bios.ramcfg_10_02_01)
unk71c |= 0x00000100;
- ram_wr32(fuc, 0x10071c, unk71c);
+ if (next->bios.timing_10_24 != 0xff) {
+ unk718 &= ~0xf0000000;
+ unk718 |= next->bios.timing_10_24 << 28;
+ }
+ if (next->bios.ramcfg_10_02_10)
+ r111100 &= ~0x04020000;
- if (next->bios.ramcfg_10_02_02)
- unk718 |= 0x00000100;
- ram_wr32(fuc, 0x100718, unk718);
+ ram_mask(fuc, 0x100714, 0xffffffff, unk714);
+ ram_mask(fuc, 0x10071c, 0xffffffff, unk71c);
+ ram_mask(fuc, 0x100718, 0xffffffff, unk718);
+ ram_mask(fuc, 0x111100, 0xffffffff, r111100);
- if (next->bios.ramcfg_10_02_10)
- ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
+ if (fuc->r_gpioFBVREF.addr && !next->bios.timing_10_ODT)
+ nva3_ram_fbvref(fuc, 1);
- ram_mask(fuc, mr[0], 0x100, 0x100);
- ram_nsec(fuc, 1000);
- ram_mask(fuc, mr[0], 0x100, 0x000);
- ram_nsec(fuc, 1000);
+ /* Reset DLL */
+ if (!next->bios.ramcfg_10_DLLoff)
+ nouveau_sddr2_dll_reset(fuc);
- ram_nsec(fuc, 2000);
- ram_nsec(fuc, 12000);
+ if (ram->base.type == NV_MEM_TYPE_GDDR3) {
+ ram_nsec(fuc, 31000);
+ } else {
+ ram_nsec(fuc, 14000);
+ }
+
+ if (ram->base.type == NV_MEM_TYPE_DDR3) {
+ ram_wr32(fuc, 0x100264, 0x1);
+ ram_nsec(fuc, 2000);
+ }
- ram_wr32(fuc, 0x611200, 0x00003330);
+ ram_nuke(fuc, 0x100700);
+ ram_mask(fuc, 0x100700, 0x01000000, 0x01000000);
+ ram_mask(fuc, 0x100700, 0x01000000, 0x00000000);
+
+ /* Re-enable FB */
+ ram_unblock(fuc);
+ ram_wr32(fuc, 0x611200, 0x3330);
+
+ /* Post fiddlings */
if (next->bios.rammap_10_04_02)
ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
if (next->bios.ramcfg_10_02_10) {
@@ -313,7 +871,22 @@ nva3_ram_prog(struct nouveau_fb *pfb)
struct nouveau_device *device = nv_device(pfb);
struct nva3_ram *ram = (void *)pfb->ram;
struct nva3_ramfuc *fuc = &ram->fuc;
- ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true));
+ bool exec = nouveau_boolopt(device->cfgopt, "NvMemExec", true);
+
+ if (exec) {
+ nv_mask(pfb, 0x001534, 0x2, 0x2);
+
+ ram_exec(fuc, true);
+
+ /* Post-processing, avoids flicker */
+ nv_mask(pfb, 0x002504, 0x1, 0x0);
+ nv_mask(pfb, 0x001534, 0x2, 0x0);
+
+ nv_mask(pfb, 0x616308, 0x10, 0x10);
+ nv_mask(pfb, 0x616b08, 0x10, 0x10);
+ } else {
+ ram_exec(fuc, false);
+ }
return 0;
}
@@ -330,38 +903,24 @@ nva3_ram_init(struct nouveau_object *object)
{
struct nouveau_fb *pfb = (void *)object->parent;
struct nva3_ram *ram = (void *)object;
- int ret, i;
+ int ret;
ret = nouveau_ram_init(&ram->base);
if (ret)
return ret;
- /* prepare for ddr link training, and load training patterns */
- switch (ram->base.type) {
- case NV_MEM_TYPE_DDR3: {
- if (nv_device(pfb)->chipset == 0xa8) {
- static const u32 pattern[16] = {
- 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
- 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
- 0x33333333, 0x55555555, 0x77777777, 0x66666666,
- 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
- };
-
- nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
- nv_wr32(pfb, 0x1005a8, 0x0000ffff);
- nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
- for (i = 0; i < 0x30; i++) {
- nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
- nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
- nv_wr32(pfb, 0x10f900, pattern[i % 16]);
- nv_wr32(pfb, 0x10f920, pattern[i % 16]);
- }
- }
- }
- break;
- default:
- break;
- }
+ nva3_link_train_init(pfb);
+
+ return 0;
+}
+
+static int
+nva3_ram_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_fb *pfb = (void *)object->parent;
+
+ if (!suspend)
+ nva3_link_train_fini(pfb);
return 0;
}
@@ -371,8 +930,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 datasize,
struct nouveau_object **pobject)
{
+ struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nouveau_gpio *gpio = nouveau_gpio(pfb);
+ struct dcb_gpio_func func;
struct nva3_ram *ram;
int ret, i;
+ u32 reg, shift;
ret = nv50_ram_create(parent, engine, oclass, &ram);
*pobject = nv_object(ram);
@@ -380,7 +943,9 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
switch (ram->base.type) {
+ case NV_MEM_TYPE_DDR2:
case NV_MEM_TYPE_DDR3:
+ case NV_MEM_TYPE_GDDR3:
ram->base.calc = nva3_ram_calc;
ram->base.prog = nva3_ram_prog;
ram->base.tidy = nva3_ram_tidy;
@@ -390,31 +955,41 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
+ ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
+ ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
+ ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
+ ram->fuc.r_0x100080 = ramfuc_reg(0x100080);
ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
for (i = 0; i < 9; i++)
ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
+ ram->fuc.r_0x100264 = ramfuc_reg(0x100264);
ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
+ ram->fuc.r_0x100700 = ramfuc_reg(0x100700);
ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
+ ram->fuc.r_0x100720 = ramfuc_reg(0x100720);
ram->fuc.r_0x100760 = ramfuc_stride(0x100760, 4, ram->base.part_mask);
ram->fuc.r_0x1007a0 = ramfuc_stride(0x1007a0, 4, ram->base.part_mask);
ram->fuc.r_0x1007e0 = ramfuc_stride(0x1007e0, 4, ram->base.part_mask);
+ ram->fuc.r_0x100da0 = ramfuc_stride(0x100da0, 4, ram->base.part_mask);
ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
ram->fuc.r_0x1110e0 = ramfuc_stride(0x1110e0, 4, ram->base.part_mask);
ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
+ ram->fuc.r_0x1111e0 = ramfuc_reg(0x1111e0);
+ ram->fuc.r_0x111400 = ramfuc_reg(0x111400);
ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
if (ram->base.ranks > 1) {
@@ -429,6 +1004,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
}
+ ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ if (ret == 0) {
+ nv50_gpio_location(func.line, &reg, &shift);
+ ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
+ }
+
return 0;
}
@@ -438,6 +1019,6 @@ nva3_ram_oclass = {
.ctor = nva3_ram_ctor,
.dtor = _nouveau_ram_dtor,
.init = nva3_ram_init,
- .fini = _nouveau_ram_fini,
+ .fini = nva3_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
index bb1eb8f3e639..252575f3aa29 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
@@ -66,7 +66,7 @@ nouveau_sddr2_calc(struct nouveau_ram *ram)
case 0x10:
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
- DLL = !ram->next->bios.ramcfg_10_02_40;
+ DLL = !ram->next->bios.ramcfg_10_DLLoff;
ODT = ram->next->bios.timing_10_ODT & 3;
break;
case 0x20:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
index 83949b11833a..a2dca4869e52 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -80,7 +80,7 @@ nouveau_sddr3_calc(struct nouveau_ram *ram)
CWL = ram->next->bios.timing_10_CWL;
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
- DLL = !ram->next->bios.ramcfg_10_02_40;
+ DLL = !ram->next->bios.ramcfg_10_DLLoff;
ODT = ram->next->bios.timing_10_ODT;
break;
case 0x20:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index 1864fa98e6b1..2e30d5a62d6e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -54,7 +54,7 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
}
}
-static int
+int
nv50_gpio_location(int line, u32 *reg, u32 *shift)
{
const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2b1bf545e488..0dc605db7ec8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -473,18 +473,56 @@ nouveau_i2c_extdev_sclass[] = {
nouveau_anx9805_sclass,
};
+static void
+nouveau_i2c_create_port(struct nouveau_i2c *i2c, int index, u8 type,
+ struct dcb_i2c_entry *info)
+{
+ const struct nouveau_i2c_impl *impl = (void *)nv_oclass(i2c);
+ struct nouveau_oclass *oclass;
+ struct nouveau_object *parent;
+ struct nouveau_object *object;
+ int ret, pad;
+
+ if (info->share != DCB_I2C_UNUSED) {
+ pad = info->share;
+ oclass = impl->pad_s;
+ } else {
+ if (type != DCB_I2C_NVIO_AUX)
+ pad = 0x100 + info->drive;
+ else
+ pad = 0x100 + info->auxch;
+ oclass = impl->pad_x;
+ }
+
+ ret = nouveau_object_ctor(NULL, nv_object(i2c), oclass, NULL, pad,
+ &parent);
+ if (ret < 0)
+ return;
+
+ oclass = impl->sclass;
+ do {
+ ret = -EINVAL;
+ if (oclass->handle == type) {
+ ret = nouveau_object_ctor(parent, nv_object(i2c),
+ oclass, info, index,
+ &object);
+ }
+ } while (ret && (++oclass)->handle);
+
+ nouveau_object_ref(NULL, &parent);
+}
+
int
nouveau_i2c_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int length, void **pobject)
{
- const struct nouveau_i2c_impl *impl = (void *)oclass;
struct nouveau_bios *bios = nouveau_bios(parent);
struct nouveau_i2c *i2c;
struct nouveau_object *object;
struct dcb_i2c_entry info;
- int ret, i, j, index = -1, pad;
+ int ret, i, j, index = -1;
struct dcb_output outp;
u8 ver, hdr;
u32 data;
@@ -507,43 +545,40 @@ nouveau_i2c_create_(struct nouveau_object *parent,
INIT_LIST_HEAD(&i2c->ports);
while (!dcb_i2c_parse(bios, ++index, &info)) {
- if (info.type == DCB_I2C_UNUSED)
+ switch (info.type) {
+ case DCB_I2C_NV04_BIT:
+ case DCB_I2C_NV4E_BIT:
+ case DCB_I2C_NVIO_BIT:
+ nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
+ info.type, &info);
+ break;
+ case DCB_I2C_NVIO_AUX:
+ nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
+ info.type, &info);
+ break;
+ case DCB_I2C_PMGR:
+ if (info.drive != DCB_I2C_UNUSED) {
+ nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
+ DCB_I2C_NVIO_BIT,
+ &info);
+ }
+ if (info.auxch != DCB_I2C_UNUSED) {
+ nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
+ DCB_I2C_NVIO_AUX,
+ &info);
+ }
+ break;
+ case DCB_I2C_UNUSED:
+ default:
continue;
-
- if (info.share != DCB_I2C_UNUSED) {
- if (info.type == DCB_I2C_NVIO_AUX)
- pad = info.drive;
- else
- pad = info.share;
- oclass = impl->pad_s;
- } else {
- pad = 0x100 + info.drive;
- oclass = impl->pad_x;
}
-
- ret = nouveau_object_ctor(NULL, *pobject, oclass,
- NULL, pad, &parent);
- if (ret < 0)
- continue;
-
- oclass = impl->sclass;
- do {
- ret = -EINVAL;
- if (oclass->handle == info.type) {
- ret = nouveau_object_ctor(parent, *pobject,
- oclass, &info,
- index, &object);
- }
- } while (ret && (++oclass)->handle);
-
- nouveau_object_ref(NULL, &parent);
}
/* in addition to the busses specified in the i2c table, there
* may be ddc/aux channels hiding behind external tmds/dp/etc
* transmitters.
*/
- index = ((index + 0x0f) / 0x10) * 0x10;
+ index = NV_I2C_EXT(0);
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
if (!outp.location || !outp.extdev)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c
new file mode 100644
index 000000000000..06a2b87ccbf1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
+#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct nouveau_i2c *aux, int ch)
+{
+ nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+auxch_init(struct nouveau_i2c *aux, int ch)
+{
+ const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+ const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+ const u32 urep = unksel ? 0x01000000 : 0x02000000;
+ u32 ctrl, timeout;
+
+ /* wait up to 1ms for any previous transaction to be done... */
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
+ return -EBUSY;
+ }
+ } while (ctrl & 0x03010000);
+
+ /* set some magic, and wait up to 1ms for it to appear */
+ nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("magic wait 0x%08x\n", ctrl);
+ auxch_fini(aux, ch);
+ return -EBUSY;
+ }
+ } while ((ctrl & 0x03000000) != urep);
+
+ return 0;
+}
+
+int
+gm204_aux(struct nouveau_i2c_port *base, bool retry,
+ u8 type, u32 addr, u8 *data, u8 size)
+{
+ struct nouveau_i2c *aux = nouveau_i2c(base);
+ struct nv50_i2c_port *port = (void *)base;
+ u32 ctrl, stat, timeout, retries;
+ u32 xbuf[4] = {};
+ int ch = port->addr;
+ int ret, i;
+
+ AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+
+ ret = auxch_init(aux, ch);
+ if (ret)
+ goto out;
+
+ stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
+ if (!(stat & 0x10000000)) {
+ AUX_DBG("sink not detected\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!(type & 1)) {
+ memcpy(xbuf, data, size);
+ for (i = 0; i < 16; i += 4) {
+ AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+ nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
+ }
+ }
+
+ ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+ ctrl &= ~0x0001f0ff;
+ ctrl |= type << 12;
+ ctrl |= size - 1;
+ nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
+
+ /* (maybe) retry transaction a number of times on failure... */
+ for (retries = 0; !ret && retries < 32; retries++) {
+ /* reset, and delay a while if this is a retry */
+ nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
+ nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
+ if (retries)
+ udelay(400);
+
+ /* transaction request, wait up to 1ms for it to complete */
+ nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
+
+ timeout = 1000;
+ do {
+ ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
+ udelay(1);
+ if (!timeout--) {
+ AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+ ret = -EIO;
+ goto out;
+ }
+ } while (ctrl & 0x00010000);
+ ret = 1;
+
+ /* read status, and check if transaction completed ok */
+ stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
+ if ((stat & 0x000f0000) == 0x00080000 ||
+ (stat & 0x000f0000) == 0x00020000)
+ ret = retry ? 0 : 1;
+ if ((stat & 0x00000100))
+ ret = -ETIMEDOUT;
+ if ((stat & 0x00000e00))
+ ret = -EIO;
+
+ AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+ }
+
+ if (type & 1) {
+ for (i = 0; i < 16; i += 4) {
+ xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
+ AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+ }
+ memcpy(data, xbuf, size);
+ }
+
+out:
+ auxch_fini(aux, ch);
+ return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nouveau_i2c_func
+gm204_aux_func = {
+ .aux = gm204_aux,
+};
+
+int
+gm204_aux_port_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct dcb_i2c_entry *info = data;
+ struct nv50_i2c_port *port;
+ int ret;
+
+ ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+ &nouveau_i2c_aux_algo, &gm204_aux_func,
+ &port);
+ *pobject = nv_object(port);
+ if (ret)
+ return ret;
+
+ port->base.aux = info->auxch;
+ port->addr = info->auxch;
+ return 0;
+}
+
+struct nouveau_oclass
+gm204_i2c_sclass[] = {
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_i2c_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = nv50_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm204_aux_port_ctor,
+ .dtor = _nouveau_i2c_port_dtor,
+ .init = _nouveau_i2c_port_init,
+ .fini = _nouveau_i2c_port_fini,
+ },
+ },
+ {}
+};
+
+struct nouveau_oclass *
+gm204_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0x24),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+ .sclass = gm204_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+ .pad_s = &gm204_i2c_pad_oclass,
+ .aux = 8,
+ .aux_stat = nve0_aux_stat,
+ .aux_mask = nve0_aux_mask,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
index 5d2a77421c74..9ef965692fb1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -10,8 +10,6 @@ struct nv50_i2c_priv {
struct nv50_i2c_port {
struct nouveau_i2c_port base;
u32 addr;
- u32 ctrl;
- u32 data;
u32 state;
};
@@ -29,4 +27,8 @@ int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
void nv94_i2c_acquire(struct nouveau_i2c_port *);
void nv94_i2c_release(struct nouveau_i2c_port *);
+int nvd0_i2c_port_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
index f59c3a255462..e383ee81f4d2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -214,10 +214,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
port->state = 7;
port->addr = nv50_i2c_addr[info->drive];
- if (info->share != DCB_I2C_UNUSED) {
- port->ctrl = 0x00e500 + (info->share * 0x50);
- port->data = 0x0000e001;
- }
return 0;
}
@@ -242,13 +238,8 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- port->base.aux = info->drive;
- port->addr = info->drive;
- if (info->share != DCB_I2C_UNUSED) {
- port->ctrl = 0x00e500 + (info->drive * 0x50);
- port->data = 0x00002002;
- }
-
+ port->base.aux = info->auxch;
+ port->addr = info->auxch;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
index 364ddb1c5f03..fd99380502ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -48,7 +48,7 @@ nvd0_i2c_func = {
.sense_sda = nvd0_i2c_sense_sda,
};
-static int
+int
nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 index,
struct nouveau_object **pobject)
@@ -66,10 +66,6 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
port->state = 0x00000007;
port->addr = 0x00d014 + (info->drive * 0x20);
- if (info->share != DCB_I2C_UNUSED) {
- port->ctrl = 0x00e500 + (info->share * 0x50);
- port->data = 0x0000e001;
- }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
index cae77e1ad8dc..25fe5c2d110e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
@@ -24,7 +24,7 @@
#include "nv50.h"
-static void
+void
nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
{
u32 intr = nv_rd32(i2c, 0x00dc60);
@@ -38,7 +38,7 @@ nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
nv_wr32(i2c, 0x00dc60, intr);
}
-static void
+void
nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
{
u32 temp = nv_rd32(i2c, 0x00dc68), i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c
new file mode 100644
index 000000000000..f0e6fbbaa8cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "pad.h"
+
+struct gm204_i2c_pad {
+ struct nvkm_i2c_pad base;
+ int addr;
+};
+
+static int
+gm204_i2c_pad_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_i2c *i2c = (void *)object->engine;
+ struct gm204_i2c_pad *pad = (void *)object;
+ nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
+ return nvkm_i2c_pad_fini(&pad->base, suspend);
+}
+
+static int
+gm204_i2c_pad_init(struct nouveau_object *object)
+{
+ struct nouveau_i2c *i2c = (void *)object->engine;
+ struct gm204_i2c_pad *pad = (void *)object;
+
+ switch (nv_oclass(pad->base.next)->handle) {
+ case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
+ nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002);
+ break;
+ case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
+ default:
+ nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001);
+ break;
+ }
+
+ nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
+ return nvkm_i2c_pad_init(&pad->base);
+}
+
+static int
+gm204_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 index,
+ struct nouveau_object **pobject)
+{
+ struct gm204_i2c_pad *pad;
+ int ret;
+
+ ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
+ *pobject = nv_object(pad);
+ if (ret)
+ return ret;
+
+ pad->addr = index * 0x50;;
+ return 0;
+}
+
+struct nouveau_oclass
+gm204_i2c_pad_oclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm204_i2c_pad_ctor,
+ .dtor = _nvkm_i2c_pad_dtor,
+ .init = gm204_i2c_pad_init,
+ .fini = gm204_i2c_pad_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
index 780090b6425a..4fe7ae3fde4e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
@@ -5,6 +5,7 @@
extern struct nouveau_oclass nv04_i2c_pad_oclass;
extern struct nouveau_oclass nv94_i2c_pad_oclass;
+extern struct nouveau_oclass gm204_i2c_pad_oclass;
#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
@@ -82,4 +83,7 @@ struct nouveau_i2c_impl {
void nv94_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
void nv94_aux_mask(struct nouveau_i2c *, u32, u32, u32);
+void nve0_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
+void nve0_aux_mask(struct nouveau_i2c *, u32, u32, u32);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
index e89789a53b80..ec03f9a4290b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -50,6 +50,7 @@ handler(WR32 , 0x0000, 0x0002, #memx_func_wr32)
handler(WAIT , 0x0004, 0x0000, #memx_func_wait)
handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
handler(VBLANK, 0x0001, 0x0000, #memx_func_wait_vblank)
+handler(TRAIN , 0x0000, 0x0000, #memx_func_train)
memx_func_tail:
.equ #memx_func_size #memx_func_next - #memx_func_head
@@ -63,6 +64,10 @@ memx_ts_end:
memx_data_head:
.skip 0x0800
memx_data_tail:
+
+memx_train_head:
+.skip 0x0100
+memx_train_tail:
#endif
/******************************************************************************
@@ -260,6 +265,101 @@ memx_func_delay:
// description
//
// $r15 - current (memx)
+// $r4 - packet length
+// $r3 - opcode desciption
+// $r0 - zero
+memx_func_train:
+#if NVKM_PPWR_CHIPSET == GT215
+// $r5 - outer loop counter
+// $r6 - inner loop counter
+// $r7 - entry counter (#memx_train_head + $r7)
+ movw $r5 0x3
+ movw $r7 0x0
+
+// Read random memory to wake up... things
+ imm32($r9, 0x700000)
+ nv_rd32($r8,$r9)
+ movw $r14 0x2710
+ call(nsec)
+
+ memx_func_train_loop_outer:
+ mulu $r8 $r5 0x101
+ sethi $r8 0x02000000
+ imm32($r9, 0x1111e0)
+ nv_wr32($r9, $r8)
+ push $r5
+
+ movw $r6 0x0
+ memx_func_train_loop_inner:
+ movw $r8 0x1111
+ mulu $r9 $r6 $r8
+ shl b32 $r8 $r9 0x10
+ or $r8 $r9
+ imm32($r9, 0x100720)
+ nv_wr32($r9, $r8)
+
+ imm32($r9, 0x100080)
+ nv_rd32($r8, $r9)
+ or $r8 $r8 0x20
+ nv_wr32($r9, $r8)
+
+ imm32($r9, 0x10053c)
+ imm32($r8, 0x80003002)
+ nv_wr32($r9, $r8)
+
+ imm32($r14, 0x100560)
+ imm32($r13, 0x80000000)
+ add b32 $r12 $r13 0
+ imm32($r11, 0x001e8480)
+ call(wait)
+
+ // $r5 - inner inner loop counter
+ // $r9 - result
+ movw $r5 0
+ imm32($r9, 0x8300ffff)
+ memx_func_train_loop_4x:
+ imm32($r10, 0x100080)
+ nv_rd32($r8, $r10)
+ imm32($r11, 0xffffffdf)
+ and $r8 $r11
+ nv_wr32($r10, $r8)
+
+ imm32($r10, 0x10053c)
+ imm32($r8, 0x80003002)
+ nv_wr32($r10, $r8)
+
+ imm32($r14, 0x100560)
+ imm32($r13, 0x80000000)
+ mov b32 $r12 $r13
+ imm32($r11, 0x00002710)
+ call(wait)
+
+ nv_rd32($r13, $r14)
+ and $r9 $r9 $r13
+
+ add b32 $r5 1
+ cmp b16 $r5 0x4
+ bra l #memx_func_train_loop_4x
+
+ add b32 $r10 $r7 #memx_train_head
+ st b32 D[$r10 + 0] $r9
+ add b32 $r6 1
+ add b32 $r7 4
+
+ cmp b16 $r6 0x10
+ bra l #memx_func_train_loop_inner
+
+ pop $r5
+ add b32 $r5 1
+ cmp b16 $r5 7
+ bra l #memx_func_train_loop_outer
+
+#endif
+ ret
+
+// description
+//
+// $r15 - current (memx)
// $r14 - sender process name
// $r13 - message (exec)
// $r12 - head of script
@@ -307,8 +407,19 @@ memx_exec:
// $r11 - data1
// $r0 - zero
memx_info:
+ cmp b16 $r12 0x1
+ bra e #memx_info_train
+
+ memx_info_data:
mov $r12 #memx_data_head
mov $r11 #memx_data_tail - #memx_data_head
+ bra #memx_info_send
+
+ memx_info_train:
+ mov $r12 #memx_train_head
+ mov $r11 #memx_train_tail - #memx_train_head
+
+ memx_info_send:
call(send)
ret
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 4d278a96b2bb..713e11e2953d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000061c,
- 0x0000060e,
+ 0x0000062d,
+ 0x0000061f,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000620,
- 0x0000061e,
+ 0x00000631,
+ 0x0000062f,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000a24,
- 0x000008cb,
+ 0x00000a35,
+ 0x000008dc,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000a45,
- 0x00000a26,
+ 0x00000a56,
+ 0x00000a37,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000a50,
- 0x00000a4e,
+ 0x00000a61,
+ 0x00000a5f,
0x00000000,
0x00000000,
0x00000000,
@@ -246,13 +246,15 @@ uint32_t nv108_pwr_data[] = {
0x00010006,
0x00000000,
0x0000057b,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+ 0x00000007,
0x00000000,
-/* 0x03bc: memx_ts_end */
+ 0x000005c3,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
0x00000000,
+/* 0x03cc: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -764,8 +766,75 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+ 0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
0x00000400,
0x00000800,
0x00001000,
@@ -776,7 +845,7 @@ uint32_t nv108_pwr_data[] = {
0x00020000,
0x00040000,
0x00080000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
0x00100000,
0x00200000,
0x00400000,
@@ -844,9 +913,6 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nv108_pwr_code[] = {
@@ -1215,10 +1281,10 @@ uint32_t nv108_pwr_code[] = {
0xf40464f0,
0x2c06f70b,
0xb50066cf,
- 0x00f8ee06,
+ 0x00f8f106,
/* 0x0500: memx_func_leave */
0x66cf2c06,
- 0xef06b500,
+ 0xf206b500,
0xe4400406,
0x0006f607,
/* 0x0512: memx_func_leave_wait */
@@ -1270,370 +1336,374 @@ uint32_t nv108_pwr_code[] = {
0x9800f800,
0x10b6001e,
0x005d7e04,
-/* 0x05c3: memx_exec */
- 0xf900f800,
- 0xb2d0f9e0,
-/* 0x05cb: memx_exec_next */
- 0x98b2b2c1,
- 0x10b60013,
- 0xf034e704,
- 0xe033e701,
- 0x0132b601,
- 0x980c30f0,
- 0x55f9de35,
- 0x1ef412a6,
- 0xee0b98e5,
- 0xbbef0c98,
- 0xc44b02cb,
- 0x00bbcf07,
- 0xe0fcd0fc,
- 0x0002c27e,
-/* 0x0602: memx_info */
- 0xc04c00f8,
+/* 0x05c3: memx_func_train */
+ 0xf800f800,
+/* 0x05c5: memx_exec */
+ 0xf9e0f900,
+ 0xb2c1b2d0,
+/* 0x05cd: memx_exec_next */
+ 0x001398b2,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12a655f9,
+ 0x98e51ef4,
+ 0x0c98f10b,
+ 0x02cbbbf2,
+ 0xcf07c44b,
+ 0xd0fc00bb,
+ 0xc27ee0fc,
+ 0x00f80002,
+/* 0x0604: memx_info */
+ 0xf401c670,
+/* 0x060a: memx_info_data */
+ 0xcc4c0c0b,
0x08004b03,
- 0x0002c27e,
-/* 0x060e: memx_recv */
- 0xd6b000f8,
- 0xb20bf401,
- 0xf400d6b0,
- 0x00f8eb0b,
-/* 0x061c: memx_init */
-/* 0x061e: perf_recv */
- 0x00f800f8,
-/* 0x0620: perf_init */
-/* 0x0622: i2c_drive_scl */
- 0x36b000f8,
- 0x0d0bf400,
- 0xf607e040,
- 0x04bd0001,
-/* 0x0632: i2c_drive_scl_lo */
- 0xe44000f8,
- 0x0001f607,
- 0x00f804bd,
-/* 0x063c: i2c_drive_sda */
- 0xf40036b0,
- 0xe0400d0b,
- 0x0002f607,
- 0x00f804bd,
-/* 0x064c: i2c_drive_sda_lo */
- 0xf607e440,
- 0x04bd0002,
-/* 0x0656: i2c_sense_scl */
- 0x32f400f8,
- 0x07c44301,
- 0xfd0033cf,
- 0x0bf40431,
- 0x0131f406,
-/* 0x0668: i2c_sense_scl_done */
-/* 0x066a: i2c_sense_sda */
- 0x32f400f8,
- 0x07c44301,
- 0xfd0033cf,
- 0x0bf40432,
- 0x0131f406,
-/* 0x067c: i2c_sense_sda_done */
-/* 0x067e: i2c_raise_scl */
- 0x40f900f8,
- 0x03089844,
- 0x06227e01,
-/* 0x0689: i2c_raise_scl_wait */
- 0x03e84e00,
- 0x00005d7e,
- 0x0006567e,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x069d: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x06a1: i2c_start */
- 0x06567e00,
- 0x0d11f400,
- 0x00066a7e,
- 0xf40611f4,
-/* 0x06b2: i2c_start_rep */
- 0x00032e0e,
- 0x0006227e,
- 0x3c7e0103,
- 0x76bb0006,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0x7e50fc04,
- 0xb600067e,
- 0x11f40464,
-/* 0x06dd: i2c_start_send */
- 0x7e00031d,
- 0x4e00063c,
- 0x5d7e1388,
- 0x00030000,
- 0x0006227e,
- 0x7e13884e,
-/* 0x06f7: i2c_start_out */
- 0xf800005d,
-/* 0x06f9: i2c_stop */
- 0x7e000300,
- 0x03000622,
- 0x063c7e00,
- 0x03e84e00,
- 0x00005d7e,
- 0x227e0103,
- 0x884e0006,
- 0x005d7e13,
+/* 0x0613: memx_info_train */
+ 0x4c090ef4,
+ 0x004b0bcc,
+/* 0x0619: memx_info_send */
+ 0x02c27e01,
+/* 0x061f: memx_recv */
+ 0xb000f800,
+ 0x0bf401d6,
+ 0x00d6b0a3,
+ 0xf8dc0bf4,
+/* 0x062d: memx_init */
+/* 0x062f: perf_recv */
+ 0xf800f800,
+/* 0x0631: perf_init */
+/* 0x0633: i2c_drive_scl */
+ 0xb000f800,
+ 0x0bf40036,
+ 0x07e0400d,
+ 0xbd0001f6,
+/* 0x0643: i2c_drive_scl_lo */
+ 0x4000f804,
+ 0x01f607e4,
+ 0xf804bd00,
+/* 0x064d: i2c_drive_sda */
+ 0x0036b000,
+ 0x400d0bf4,
+ 0x02f607e0,
+ 0xf804bd00,
+/* 0x065d: i2c_drive_sda_lo */
+ 0x07e44000,
+ 0xbd0002f6,
+/* 0x0667: i2c_sense_scl */
+ 0xf400f804,
+ 0xc4430132,
+ 0x0033cf07,
+ 0xf40431fd,
+ 0x31f4060b,
+/* 0x0679: i2c_sense_scl_done */
+/* 0x067b: i2c_sense_sda */
+ 0xf400f801,
+ 0xc4430132,
+ 0x0033cf07,
+ 0xf40432fd,
+ 0x31f4060b,
+/* 0x068d: i2c_sense_sda_done */
+/* 0x068f: i2c_raise_scl */
+ 0xf900f801,
+ 0x08984440,
+ 0x337e0103,
+/* 0x069a: i2c_raise_scl_wait */
+ 0xe84e0006,
+ 0x005d7e03,
+ 0x06677e00,
+ 0x0901f400,
+ 0xf40142b6,
+/* 0x06ae: i2c_raise_scl_done */
+ 0x40fcef1b,
+/* 0x06b2: i2c_start */
+ 0x677e00f8,
+ 0x11f40006,
+ 0x067b7e0d,
+ 0x0611f400,
+/* 0x06c3: i2c_start_rep */
+ 0x032e0ef4,
+ 0x06337e00,
0x7e010300,
- 0x4e00063c,
- 0x5d7e1388,
- 0x00f80000,
-/* 0x0728: i2c_bitw */
- 0x00063c7e,
- 0x7e03e84e,
- 0xbb00005d,
+ 0xbb00064d,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x00067e7e,
+ 0x00068f7e,
0xf40464b6,
- 0x884e1711,
- 0x005d7e13,
- 0x7e000300,
- 0x4e000622,
- 0x5d7e1388,
-/* 0x0766: i2c_bitw_out */
- 0x00f80000,
-/* 0x0768: i2c_bitr */
- 0x3c7e0103,
+/* 0x06ee: i2c_start_send */
+ 0x00031d11,
+ 0x00064d7e,
+ 0x7e13884e,
+ 0x0300005d,
+ 0x06337e00,
+ 0x13884e00,
+ 0x00005d7e,
+/* 0x0708: i2c_start_out */
+/* 0x070a: i2c_stop */
+ 0x000300f8,
+ 0x0006337e,
+ 0x4d7e0003,
0xe84e0006,
0x005d7e03,
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x7e7e50fc,
- 0x64b60006,
- 0x1a11f404,
- 0x00066a7e,
- 0x227e0003,
- 0x884e0006,
- 0x005d7e13,
- 0x013cf000,
-/* 0x07ab: i2c_bitr_done */
- 0xf80131f4,
-/* 0x07ad: i2c_get_byte */
- 0x04000500,
-/* 0x07b1: i2c_get_byte_next */
- 0x0154b608,
+ 0x7e010300,
+ 0x4e000633,
+ 0x5d7e1388,
+ 0x01030000,
+ 0x00064d7e,
+ 0x7e13884e,
+ 0xf800005d,
+/* 0x0739: i2c_bitw */
+ 0x064d7e00,
+ 0x03e84e00,
+ 0x00005d7e,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x07687e50,
+ 0x068f7e50,
0x0464b600,
- 0xfd2a11f4,
- 0x42b60553,
- 0xd81bf401,
- 0x76bb0103,
+ 0x4e1711f4,
+ 0x5d7e1388,
+ 0x00030000,
+ 0x0006337e,
+ 0x7e13884e,
+/* 0x0777: i2c_bitw_out */
+ 0xf800005d,
+/* 0x0779: i2c_bitr */
+ 0x7e010300,
+ 0x4e00064d,
+ 0x5d7e03e8,
+ 0x76bb0000,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000728,
-/* 0x07fa: i2c_get_byte_done */
- 0x00f80464,
-/* 0x07fc: i2c_put_byte */
-/* 0x07fe: i2c_put_byte_next */
- 0x42b60804,
- 0x3854ff01,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x07287e50,
- 0x0464b600,
- 0xb03411f4,
- 0x1bf40046,
- 0x0076bbd8,
+ 0xb600068f,
+ 0x11f40464,
+ 0x067b7e1a,
+ 0x7e000300,
+ 0x4e000633,
+ 0x5d7e1388,
+ 0x3cf00000,
+ 0x0131f401,
+/* 0x07bc: i2c_bitr_done */
+/* 0x07be: i2c_get_byte */
+ 0x000500f8,
+/* 0x07c2: i2c_get_byte_next */
+ 0x54b60804,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x797e50fc,
+ 0x64b60007,
+ 0x2a11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0xbb0103d8,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0007397e,
+/* 0x080b: i2c_get_byte_done */
+ 0xf80464b6,
+/* 0x080d: i2c_put_byte */
+/* 0x080f: i2c_put_byte_next */
+ 0xb6080400,
+ 0x54ff0142,
+ 0x0076bb38,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x687e50fc,
+ 0x397e50fc,
0x64b60007,
- 0x0f11f404,
- 0xb00076bb,
- 0x1bf40136,
- 0x0132f406,
-/* 0x0854: i2c_put_byte_done */
-/* 0x0856: i2c_addr */
- 0x76bb00f8,
+ 0x3411f404,
+ 0xf40046b0,
+ 0x76bbd81b,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb60006a1,
+ 0xb6000779,
0x11f40464,
- 0x2ec3e729,
- 0x0134b601,
- 0xbb0553fd,
+ 0x0076bb0f,
+ 0xf40136b0,
+ 0x32f4061b,
+/* 0x0865: i2c_put_byte_done */
+/* 0x0867: i2c_addr */
+ 0xbb00f801,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0007fc7e,
-/* 0x089b: i2c_addr_done */
- 0xf80464b6,
-/* 0x089d: i2c_acquire_addr */
- 0xf8cec700,
- 0xb705e4b6,
- 0xf8d014e0,
-/* 0x08a9: i2c_acquire */
- 0x089d7e00,
- 0x00047e00,
- 0x03d9f000,
- 0x00002e7e,
-/* 0x08ba: i2c_release */
- 0x9d7e00f8,
+ 0x0006b27e,
+ 0xf40464b6,
+ 0xc3e72911,
+ 0x34b6012e,
+ 0x0553fd01,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x080d7e50,
+ 0x0464b600,
+/* 0x08ac: i2c_addr_done */
+/* 0x08ae: i2c_acquire_addr */
+ 0xcec700f8,
+ 0x05e4b6f8,
+ 0xd014e0b7,
+/* 0x08ba: i2c_acquire */
+ 0xae7e00f8,
0x047e0008,
- 0xdaf00000,
+ 0xd9f00000,
0x002e7e03,
-/* 0x08cb: i2c_recv */
- 0xf400f800,
- 0xc1c70132,
- 0x0214b6f8,
- 0xf52816b0,
- 0xb801371f,
- 0x000be813,
- 0xb8003298,
- 0x000bc013,
- 0xf4003198,
- 0xd0f90231,
- 0xd0f9e0f9,
- 0x000067f1,
- 0x100063f1,
- 0xbb016792,
+/* 0x08cb: i2c_release */
+ 0x7e00f800,
+ 0x7e0008ae,
+ 0xf0000004,
+ 0x2e7e03da,
+ 0x00f80000,
+/* 0x08dc: i2c_recv */
+ 0xc70132f4,
+ 0x14b6f8c1,
+ 0x2816b002,
+ 0x01371ff5,
+ 0x0cf413b8,
+ 0x00329800,
+ 0x0ccc13b8,
+ 0x00319800,
+ 0xf90231f4,
+ 0xf9e0f9d0,
+ 0x0067f1d0,
+ 0x0063f100,
+ 0x01679210,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x08ba7e50,
+ 0x0464b600,
+ 0xd6b0d0fc,
+ 0xb01bf500,
+ 0xbb000500,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0008a97e,
- 0xfc0464b6,
- 0x00d6b0d0,
- 0x00b01bf5,
- 0x76bb0005,
+ 0x0008677e,
+ 0xf50464b6,
+ 0xc700cc11,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000856,
+ 0xb600080d,
0x11f50464,
- 0xc5c700cc,
- 0x0076bbe0,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xfc7e50fc,
- 0x64b60007,
- 0xa911f504,
- 0xbb010500,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0008567e,
- 0xf50464b6,
- 0xbb008711,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0007ad7e,
- 0xf40464b6,
- 0x5bcb6711,
- 0x0076bbe0,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xf97e50fc,
- 0x64b60006,
- 0xbd5bb204,
- 0x410ef474,
-/* 0x09d0: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x00053b1b,
- 0x0008567e,
- 0xc73211f4,
- 0xfc7ee0c5,
- 0x11f40007,
- 0x7e000528,
- 0xf4000856,
- 0xb5c71f11,
- 0x07fc7ee0,
- 0x1511f400,
- 0x0006f97e,
- 0xc5c774bd,
- 0x091bf408,
- 0xf40232f4,
-/* 0x0a0e: i2c_recv_not_wr08 */
-/* 0x0a0e: i2c_recv_done */
- 0xcec7030e,
- 0x08ba7ef8,
- 0xfce0fc00,
- 0x0912f4d0,
- 0xc27e7cb2,
-/* 0x0a22: i2c_recv_exit */
- 0x00f80002,
-/* 0x0a24: i2c_init */
-/* 0x0a26: test_recv */
- 0x584100f8,
- 0x0011cf04,
- 0x400110b6,
- 0x01f60458,
- 0xf104bd00,
- 0xf1d900e7,
- 0x7e134fe3,
- 0xf8000201,
-/* 0x0a45: test_init */
- 0x08004e00,
- 0x0002017e,
-/* 0x0a4e: idle_recv */
- 0x00f800f8,
-/* 0x0a50: idle */
- 0x410031f4,
- 0x11cf0454,
+ 0x010500a9,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x08677e50,
+ 0x0464b600,
+ 0x008711f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x07be7e50,
+ 0x0464b600,
+ 0xcb6711f4,
+ 0x76bbe05b,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb600070a,
+ 0x5bb20464,
+ 0x0ef474bd,
+/* 0x09e1: i2c_recv_not_rd08 */
+ 0x01d6b041,
+ 0x053b1bf4,
+ 0x08677e00,
+ 0x3211f400,
+ 0x7ee0c5c7,
+ 0xf400080d,
+ 0x00052811,
+ 0x0008677e,
+ 0xc71f11f4,
+ 0x0d7ee0b5,
+ 0x11f40008,
+ 0x070a7e15,
+ 0xc774bd00,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0a1f: i2c_recv_not_wr08 */
+/* 0x0a1f: i2c_recv_done */
+ 0xc7030ef4,
+ 0xcb7ef8ce,
+ 0xe0fc0008,
+ 0x12f4d0fc,
+ 0x7e7cb209,
+/* 0x0a33: i2c_recv_exit */
+ 0xf80002c2,
+/* 0x0a35: i2c_init */
+/* 0x0a37: test_recv */
+ 0x4100f800,
+ 0x11cf0458,
0x0110b600,
- 0xf6045440,
+ 0xf6045840,
0x04bd0001,
-/* 0x0a64: idle_loop */
- 0x32f45801,
-/* 0x0a69: idle_proc */
-/* 0x0a69: idle_proc_exec */
- 0xb210f902,
- 0x02cb7e1e,
- 0xf410fc00,
- 0x31f40911,
- 0xf00ef402,
-/* 0x0a7c: idle_proc_next */
- 0xa65810b6,
- 0xe81bf41f,
- 0xf4e002f4,
- 0x0ef40028,
- 0x000000c6,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0xd900e7f1,
+ 0x134fe3f1,
+ 0x0002017e,
+/* 0x0a56: test_init */
+ 0x004e00f8,
+ 0x02017e08,
+/* 0x0a5f: idle_recv */
+ 0xf800f800,
+/* 0x0a61: idle */
+ 0x0031f400,
+ 0xcf045441,
+ 0x10b60011,
+ 0x04544001,
+ 0xbd0001f6,
+/* 0x0a75: idle_loop */
+ 0xf4580104,
+/* 0x0a7a: idle_proc */
+/* 0x0a7a: idle_proc_exec */
+ 0x10f90232,
+ 0xcb7e1eb2,
+ 0x10fc0002,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0a8d: idle_proc_next */
+ 0x5810b6f0,
+ 0x1bf41fa6,
+ 0xe002f4e8,
+ 0xf40028f4,
+ 0x0000c60e,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 64e97baabc3c..d1f9b6cb66d7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x000006e0,
- 0x000006d2,
+ 0x00000842,
+ 0x00000834,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x000006e4,
- 0x000006e2,
+ 0x00000846,
+ 0x00000844,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000b14,
- 0x000009b7,
+ 0x00000c76,
+ 0x00000b19,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000b3d,
- 0x00000b16,
+ 0x00000c9f,
+ 0x00000c78,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000b49,
- 0x00000b47,
+ 0x00000cab,
+ 0x00000ca9,
0x00000000,
0x00000000,
0x00000000,
@@ -246,13 +246,15 @@ uint32_t nva3_pwr_data[] = {
0x00010006,
0x00000000,
0x000005f8,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+ 0x00000007,
0x00000000,
-/* 0x03bc: memx_ts_end */
+ 0x0000067e,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
0x00000000,
+/* 0x03cc: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -764,8 +766,75 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+ 0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
0x00001000,
0x00004000,
0x00010000,
@@ -776,7 +845,7 @@ uint32_t nva3_pwr_data[] = {
0x01000000,
0x04000000,
0x10000000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
0x00002000,
0x00008000,
0x00020000,
@@ -787,7 +856,7 @@ uint32_t nva3_pwr_data[] = {
0x02000000,
0x08000000,
0x20000000,
-/* 0x0c10: i2c_ctrl */
+/* 0x0d1c: i2c_ctrl */
0x0000e138,
0x0000e150,
0x0000e168,
@@ -845,9 +914,6 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nva3_pwr_code[] = {
@@ -1258,11 +1324,11 @@ uint32_t nva3_pwr_code[] = {
0x67f0f30b,
0x0664b62c,
0x800066cf,
- 0x00f8ee06,
+ 0x00f8f106,
/* 0x05a8: memx_func_leave */
0xb62c67f0,
0x66cf0664,
- 0xef068000,
+ 0xf2068000,
0xf10467f0,
0xb607e407,
0x06d00604,
@@ -1323,408 +1389,479 @@ uint32_t nva3_pwr_code[] = {
0x9800f8a4,
0x10b6001e,
0x7f21f404,
-/* 0x067e: memx_exec */
- 0xe0f900f8,
- 0xc1b9d0f9,
- 0x02b2b902,
-/* 0x0688: memx_exec_next */
- 0xb6001398,
- 0x34e70410,
- 0x33e701f0,
- 0x32b601e0,
- 0x0c30f001,
- 0xf9de3598,
- 0x0612b855,
- 0x98e41ef4,
- 0x0c98ee0b,
- 0x02cbbbef,
- 0x07c4b7f1,
- 0xcf06b4b6,
- 0xd0fc00bb,
- 0x21f5e0fc,
+/* 0x067e: memx_func_train */
+ 0x57f100f8,
+ 0x77f10003,
+ 0x97f10000,
+ 0x93f00000,
+ 0x029eb970,
+ 0xb90421f4,
+ 0xe7f102d8,
+ 0x21f42710,
+/* 0x069d: memx_func_train_loop_outer */
+ 0x0158e07f,
+ 0x0083f101,
+ 0xe097f102,
+ 0x1193f011,
+ 0x80f990f9,
+ 0xe0fcd0fc,
+ 0xf93f21f4,
+ 0x0067f150,
+/* 0x06bd: memx_func_train_loop_inner */
+ 0x1187f100,
+ 0x9068ff11,
+ 0xfd109894,
+ 0x97f10589,
+ 0x93f00720,
+ 0xf990f910,
+ 0xfcd0fc80,
+ 0x3f21f4e0,
+ 0x008097f1,
+ 0xb91093f0,
+ 0x21f4029e,
+ 0x02d8b904,
+ 0xf92088c5,
+ 0xfc80f990,
+ 0xf4e0fcd0,
+ 0x97f13f21,
+ 0x93f0053c,
+ 0x0287f110,
+ 0x0083f130,
+ 0xf990f980,
+ 0xfcd0fc80,
+ 0x3f21f4e0,
+ 0x0560e7f1,
+ 0xf110e3f0,
+ 0xf10000d7,
+ 0x908000d3,
+ 0xb7f100dc,
+ 0xb3f08480,
+ 0xa421f41e,
+ 0x000057f1,
+ 0xffff97f1,
+ 0x830093f1,
+/* 0x073c: memx_func_train_loop_4x */
+ 0x0080a7f1,
+ 0xb910a3f0,
+ 0x21f402ae,
+ 0x02d8b904,
+ 0xffdfb7f1,
+ 0xffffb3f1,
+ 0xf9048bfd,
+ 0xfc80f9a0,
+ 0xf4e0fcd0,
+ 0xa7f13f21,
+ 0xa3f0053c,
+ 0x0287f110,
+ 0x0083f130,
+ 0xf9a0f980,
+ 0xfcd0fc80,
+ 0x3f21f4e0,
+ 0x0560e7f1,
+ 0xf110e3f0,
+ 0xf10000d7,
+ 0xb98000d3,
+ 0xb7f102dc,
+ 0xb3f02710,
+ 0xa421f400,
+ 0xf402eeb9,
+ 0xddb90421,
+ 0x949dff02,
+ 0x700150b6,
+ 0x1ef40456,
+ 0xcc7aa092,
+ 0x00a9800b,
+ 0xb60160b6,
+ 0x66700470,
+ 0x001ef510,
+ 0xb650fcff,
+ 0x56700150,
+ 0xd41ef507,
+/* 0x07cf: memx_exec */
+ 0xf900f8fe,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x07d9: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x06b4b607,
+ 0xfc00bbcf,
+ 0xf5e0fcd0,
+ 0xf8034221,
+/* 0x0815: memx_info */
+ 0x01c67000,
+/* 0x081b: memx_info_data */
+ 0xf10e0bf4,
+ 0xf103ccc7,
+ 0xf40800b7,
+/* 0x0826: memx_info_train */
+ 0xc7f10b0e,
+ 0xb7f10bcc,
+/* 0x082e: memx_info_send */
+ 0x21f50100,
0x00f80342,
-/* 0x06c4: memx_info */
- 0x03c0c7f1,
- 0x0800b7f1,
- 0x034221f5,
-/* 0x06d2: memx_recv */
- 0xd6b000f8,
- 0xa90bf401,
- 0xf400d6b0,
- 0x00f8e90b,
-/* 0x06e0: memx_init */
-/* 0x06e2: perf_recv */
+/* 0x0834: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0980b,
+ 0xd80bf400,
+/* 0x0842: memx_init */
+ 0x00f800f8,
+/* 0x0844: perf_recv */
+/* 0x0846: perf_init */
0x00f800f8,
-/* 0x06e4: perf_init */
-/* 0x06e6: i2c_drive_scl */
+/* 0x0848: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x085c: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x086a: i2c_drive_sda */
0x36b000f8,
0x110bf400,
0x07e007f1,
0xd00604b6,
- 0x04bd0001,
-/* 0x06fa: i2c_drive_scl_lo */
+ 0x04bd0002,
+/* 0x087e: i2c_drive_sda_lo */
0x07f100f8,
0x04b607e4,
- 0x0001d006,
- 0x00f804bd,
-/* 0x0708: i2c_drive_sda */
- 0xf40036b0,
- 0x07f1110b,
- 0x04b607e0,
0x0002d006,
0x00f804bd,
-/* 0x071c: i2c_drive_sda_lo */
- 0x07e407f1,
- 0xd00604b6,
- 0x04bd0002,
-/* 0x072a: i2c_sense_scl */
- 0x32f400f8,
- 0xc437f101,
- 0x0634b607,
- 0xfd0033cf,
- 0x0bf40431,
- 0x0131f406,
-/* 0x0740: i2c_sense_scl_done */
-/* 0x0742: i2c_sense_sda */
- 0x32f400f8,
- 0xc437f101,
- 0x0634b607,
- 0xfd0033cf,
- 0x0bf40432,
- 0x0131f406,
-/* 0x0758: i2c_sense_sda_done */
-/* 0x075a: i2c_raise_scl */
- 0x40f900f8,
- 0x089847f1,
- 0xf50137f0,
-/* 0x0767: i2c_raise_scl_wait */
- 0xf106e621,
- 0xf403e8e7,
- 0x21f57f21,
- 0x01f4072a,
- 0x0142b609,
-/* 0x077b: i2c_raise_scl_done */
- 0xfcef1bf4,
-/* 0x077f: i2c_start */
- 0xf500f840,
- 0xf4072a21,
- 0x21f50d11,
- 0x11f40742,
- 0x300ef406,
-/* 0x0790: i2c_start_rep */
- 0xf50037f0,
- 0xf006e621,
- 0x21f50137,
- 0x76bb0708,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6075a21,
- 0x11f40464,
-/* 0x07bd: i2c_start_send */
- 0x0037f01f,
- 0x070821f5,
- 0x1388e7f1,
- 0xf07f21f4,
- 0x21f50037,
- 0xe7f106e6,
- 0x21f41388,
-/* 0x07d9: i2c_start_out */
-/* 0x07db: i2c_stop */
- 0xf000f87f,
- 0x21f50037,
- 0x37f006e6,
- 0x0821f500,
- 0xe8e7f107,
+/* 0x088c: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x08a2: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x08a4: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x08ba: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x08bc: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x4821f501,
+/* 0x08c9: i2c_raise_scl_wait */
+ 0xe8e7f108,
0x7f21f403,
- 0xf50137f0,
- 0xf106e621,
- 0xf41388e7,
- 0x37f07f21,
- 0x0821f501,
- 0x88e7f107,
- 0x7f21f413,
-/* 0x080e: i2c_bitw */
- 0x21f500f8,
- 0xe7f10708,
- 0x21f403e8,
- 0x0076bb7f,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b6075a,
- 0x1811f404,
- 0x1388e7f1,
- 0xf07f21f4,
+ 0x088c21f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x08dd: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x08e1: i2c_start */
+ 0x8c21f500,
+ 0x0d11f408,
+ 0x08a421f5,
+ 0xf40611f4,
+/* 0x08f2: i2c_start_rep */
+ 0x37f0300e,
+ 0x4821f500,
+ 0x0137f008,
+ 0x086a21f5,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xbc21f550,
+ 0x0464b608,
+/* 0x091f: i2c_start_send */
+ 0xf01f11f4,
0x21f50037,
- 0xe7f106e6,
+ 0xe7f1086a,
0x21f41388,
-/* 0x084d: i2c_bitw_out */
-/* 0x084f: i2c_bitr */
- 0xf000f87f,
- 0x21f50137,
- 0xe7f10708,
- 0x21f403e8,
- 0x0076bb7f,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b6075a,
- 0x1b11f404,
- 0x074221f5,
+ 0x0037f07f,
+ 0x084821f5,
+ 0x1388e7f1,
+/* 0x093b: i2c_start_out */
+ 0xf87f21f4,
+/* 0x093d: i2c_stop */
+ 0x0037f000,
+ 0x084821f5,
0xf50037f0,
- 0xf106e621,
+ 0xf1086a21,
+ 0xf403e8e7,
+ 0x37f07f21,
+ 0x4821f501,
+ 0x88e7f108,
+ 0x7f21f413,
+ 0xf50137f0,
+ 0xf1086a21,
0xf41388e7,
- 0x3cf07f21,
- 0x0131f401,
-/* 0x0894: i2c_bitr_done */
-/* 0x0896: i2c_get_byte */
- 0x57f000f8,
- 0x0847f000,
-/* 0x089c: i2c_get_byte_next */
- 0xbb0154b6,
+ 0x00f87f21,
+/* 0x0970: i2c_bitw */
+ 0x086a21f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x084f21f5,
+ 0x08bc21f5,
0xf40464b6,
- 0x53fd2b11,
- 0x0142b605,
- 0xf0d81bf4,
- 0x76bb0137,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6080e21,
-/* 0x08e6: i2c_get_byte_done */
- 0x00f80464,
-/* 0x08e8: i2c_put_byte */
-/* 0x08eb: i2c_put_byte_next */
- 0xb60847f0,
- 0x54ff0142,
- 0x0076bb38,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x084821f5,
+ 0x1388e7f1,
+/* 0x09af: i2c_bitw_out */
+ 0xf87f21f4,
+/* 0x09b1: i2c_bitr */
+ 0x0137f000,
+ 0x086a21f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x08bc21f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f008a4,
+ 0x4821f500,
+ 0x88e7f108,
+ 0x7f21f413,
+ 0xf4013cf0,
+/* 0x09f6: i2c_bitr_done */
+ 0x00f80131,
+/* 0x09f8: i2c_get_byte */
+ 0xf00057f0,
+/* 0x09fe: i2c_get_byte_next */
+ 0x54b60847,
+ 0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b6080e,
- 0x3411f404,
- 0xf40046b0,
- 0x76bbd81b,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6084f21,
- 0x11f40464,
- 0x0076bb0f,
- 0xf40136b0,
- 0x32f4061b,
-/* 0x0941: i2c_put_byte_done */
-/* 0x0943: i2c_addr */
- 0xbb00f801,
+ 0x64b609b1,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x7021f550,
+ 0x0464b609,
+/* 0x0a48: i2c_get_byte_done */
+/* 0x0a4a: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0a4d: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x077f21f5,
+ 0x097021f5,
0xf40464b6,
- 0xc3e72911,
- 0x34b6012e,
- 0x0553fd01,
+ 0x46b03411,
+ 0xd81bf400,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xe821f550,
- 0x0464b608,
-/* 0x0988: i2c_addr_done */
-/* 0x098a: i2c_acquire_addr */
- 0xcec700f8,
- 0x02e4b6f8,
- 0x0c10e0b7,
- 0xf800ee98,
-/* 0x0999: i2c_acquire */
- 0x8a21f500,
- 0x0421f409,
- 0xf403d9f0,
- 0x00f83f21,
-/* 0x09a8: i2c_release */
- 0x098a21f5,
- 0xf00421f4,
- 0x21f403da,
-/* 0x09b7: i2c_recv */
- 0xf400f83f,
- 0xc1c70132,
- 0x0214b6f8,
- 0xf52816b0,
- 0xa0013a1f,
- 0x980be813,
- 0x13a00032,
- 0x31980bc0,
- 0x0231f400,
- 0xe0f9d0f9,
- 0x67f1d0f9,
- 0x63f10000,
- 0x67921000,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b60999,
- 0xb0d0fc04,
- 0x1bf500d6,
- 0x57f000b3,
+ 0xb121f550,
+ 0x0464b609,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x0aa3: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x0aa5: i2c_addr */
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60943,
- 0xd011f504,
- 0xe0c5c700,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xe821f550,
- 0x0464b608,
- 0x00ad11f5,
- 0xbb0157f0,
+ 0x64b608e1,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb60a4a21,
+/* 0x0aea: i2c_addr_done */
+ 0x00f80464,
+/* 0x0aec: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980d1c,
+/* 0x0afb: i2c_acquire */
+ 0xf500f800,
+ 0xf40aec21,
+ 0xd9f00421,
+ 0x3f21f403,
+/* 0x0b0a: i2c_release */
+ 0x21f500f8,
+ 0x21f40aec,
+ 0x03daf004,
+ 0xf83f21f4,
+/* 0x0b19: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x094321f5,
- 0xf50464b6,
- 0xbb008a11,
+ 0x0afb21f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x089621f5,
- 0xf40464b6,
- 0x5bcb6a11,
- 0x0076bbe0,
+ 0x0aa521f5,
+ 0xf50464b6,
+ 0xc700d011,
+ 0x76bbe0c5,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb60a4a21,
+ 0x11f50464,
+ 0x57f000ad,
+ 0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b607db,
- 0x025bb904,
- 0x0ef474bd,
-/* 0x0abd: i2c_recv_not_rd08 */
- 0x01d6b043,
- 0xf03d1bf4,
- 0x21f50057,
- 0x11f40943,
- 0xe0c5c733,
- 0x08e821f5,
- 0xf02911f4,
- 0x21f50057,
- 0x11f40943,
- 0xe0b5c71f,
- 0x08e821f5,
- 0xf51511f4,
- 0xbd07db21,
- 0x08c5c774,
- 0xf4091bf4,
- 0x0ef40232,
-/* 0x0afd: i2c_recv_not_wr08 */
-/* 0x0afd: i2c_recv_done */
- 0xf8cec703,
- 0x09a821f5,
- 0xd0fce0fc,
- 0xb90a12f4,
- 0x21f5027c,
-/* 0x0b12: i2c_recv_exit */
- 0x00f80342,
-/* 0x0b14: i2c_init */
-/* 0x0b16: test_recv */
- 0x17f100f8,
- 0x14b605d8,
- 0x0011cf06,
- 0xf10110b6,
- 0xb605d807,
- 0x01d00604,
- 0xf104bd00,
- 0xf1d900e7,
- 0xf5134fe3,
- 0xf8026221,
-/* 0x0b3d: test_init */
- 0x00e7f100,
- 0x6221f508,
-/* 0x0b47: idle_recv */
- 0xf800f802,
-/* 0x0b49: idle */
- 0x0031f400,
- 0x05d417f1,
+ 0x64b60aa5,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b609f8,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x093d21f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0c1f: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x0aa521f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40a4a,
+ 0x0057f029,
+ 0x0aa521f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40a4a,
+ 0x3d21f515,
+ 0xc774bd09,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0c5f: i2c_recv_not_wr08 */
+/* 0x0c5f: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0b0a,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x034221f5,
+/* 0x0c74: i2c_recv_exit */
+/* 0x0c76: i2c_init */
+ 0x00f800f8,
+/* 0x0c78: test_recv */
+ 0x05d817f1,
0xcf0614b6,
0x10b60011,
- 0xd407f101,
+ 0xd807f101,
0x0604b605,
0xbd0001d0,
-/* 0x0b65: idle_loop */
- 0x5817f004,
-/* 0x0b6b: idle_proc */
-/* 0x0b6b: idle_proc_exec */
- 0xf90232f4,
- 0x021eb910,
- 0x034b21f5,
- 0x11f410fc,
- 0x0231f409,
-/* 0x0b7f: idle_proc_next */
- 0xb6ef0ef4,
- 0x1fb85810,
- 0xe61bf406,
- 0xf4dd02f4,
- 0x0ef40028,
- 0x000000bb,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0x6221f513,
+/* 0x0c9f: test_init */
+ 0xf100f802,
+ 0xf50800e7,
+ 0xf8026221,
+/* 0x0ca9: idle_recv */
+/* 0x0cab: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x14b605d4,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d407,
+ 0x01d00604,
+/* 0x0cc7: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0ccd: idle_proc */
+/* 0x0ccd: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc034b,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0ce1: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00bb0ef4,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index ca30fa4011b5..90221d973f84 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000074b,
- 0x0000073d,
+ 0x0000075e,
+ 0x00000750,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000074f,
- 0x0000074d,
+ 0x00000762,
+ 0x00000760,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000b7f,
- 0x00000a22,
+ 0x00000b92,
+ 0x00000a35,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000ba8,
- 0x00000b81,
+ 0x00000bbb,
+ 0x00000b94,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000bb4,
- 0x00000bb2,
+ 0x00000bc7,
+ 0x00000bc5,
0x00000000,
0x00000000,
0x00000000,
@@ -246,13 +246,15 @@ uint32_t nvc0_pwr_data[] = {
0x00010006,
0x00000000,
0x00000663,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+ 0x00000007,
0x00000000,
-/* 0x03bc: memx_ts_end */
+ 0x000006e9,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
0x00000000,
+/* 0x03cc: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -764,8 +766,75 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+ 0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
0x00001000,
0x00004000,
0x00010000,
@@ -776,7 +845,7 @@ uint32_t nvc0_pwr_data[] = {
0x01000000,
0x04000000,
0x10000000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
0x00002000,
0x00008000,
0x00020000,
@@ -787,7 +856,7 @@ uint32_t nvc0_pwr_data[] = {
0x02000000,
0x08000000,
0x20000000,
-/* 0x0c10: i2c_ctrl */
+/* 0x0d1c: i2c_ctrl */
0x0000e138,
0x0000e150,
0x0000e168,
@@ -845,9 +914,6 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nvc0_pwr_code[] = {
@@ -1272,10 +1338,10 @@ uint32_t nvc0_pwr_code[] = {
0xcf0664b6,
0x06800066,
/* 0x05db: memx_func_leave */
- 0xf000f8ee,
+ 0xf000f8f1,
0x64b62c67,
0x0066cf06,
- 0xf0ef0680,
+ 0xf0f20680,
0x07f10467,
0x04b607e4,
0x0006d006,
@@ -1350,382 +1416,450 @@ uint32_t nvc0_pwr_code[] = {
0x1e9800f8,
0x0410b600,
0xf87f21f4,
-/* 0x06e9: memx_exec */
- 0xf9e0f900,
- 0x02c1b9d0,
-/* 0x06f3: memx_exec_next */
- 0x9802b2b9,
- 0x10b60013,
- 0xf034e704,
- 0xe033e701,
- 0x0132b601,
- 0x980c30f0,
- 0x55f9de35,
- 0xf40612b8,
- 0x0b98e41e,
- 0xef0c98ee,
- 0xf102cbbb,
- 0xb607c4b7,
- 0xbbcf06b4,
- 0xfcd0fc00,
- 0x4221f5e0,
-/* 0x072f: memx_info */
- 0xf100f803,
- 0xf103c0c7,
- 0xf50800b7,
+/* 0x06e9: memx_func_train */
+/* 0x06eb: memx_exec */
+ 0xf900f800,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x06f5: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x06b4b607,
+ 0xfc00bbcf,
+ 0xf5e0fcd0,
0xf8034221,
-/* 0x073d: memx_recv */
- 0x01d6b000,
- 0xb0a90bf4,
- 0x0bf400d6,
-/* 0x074b: memx_init */
- 0xf800f8e9,
-/* 0x074d: perf_recv */
-/* 0x074f: perf_init */
- 0xf800f800,
-/* 0x0751: i2c_drive_scl */
- 0x0036b000,
- 0xf1110bf4,
- 0xb607e007,
- 0x01d00604,
- 0xf804bd00,
-/* 0x0765: i2c_drive_scl_lo */
- 0xe407f100,
- 0x0604b607,
- 0xbd0001d0,
-/* 0x0773: i2c_drive_sda */
- 0xb000f804,
- 0x0bf40036,
- 0xe007f111,
- 0x0604b607,
- 0xbd0002d0,
-/* 0x0787: i2c_drive_sda_lo */
- 0xf100f804,
- 0xb607e407,
- 0x02d00604,
- 0xf804bd00,
-/* 0x0795: i2c_sense_scl */
- 0x0132f400,
- 0x07c437f1,
- 0xcf0634b6,
- 0x31fd0033,
- 0x060bf404,
-/* 0x07ab: i2c_sense_scl_done */
- 0xf80131f4,
-/* 0x07ad: i2c_sense_sda */
- 0x0132f400,
- 0x07c437f1,
- 0xcf0634b6,
- 0x32fd0033,
- 0x060bf404,
-/* 0x07c3: i2c_sense_sda_done */
- 0xf80131f4,
-/* 0x07c5: i2c_raise_scl */
- 0xf140f900,
- 0xf0089847,
- 0x21f50137,
-/* 0x07d2: i2c_raise_scl_wait */
- 0xe7f10751,
- 0x21f403e8,
- 0x9521f57f,
- 0x0901f407,
- 0xf40142b6,
-/* 0x07e6: i2c_raise_scl_done */
- 0x40fcef1b,
-/* 0x07ea: i2c_start */
- 0x21f500f8,
- 0x11f40795,
- 0xad21f50d,
- 0x0611f407,
-/* 0x07fb: i2c_start_rep */
- 0xf0300ef4,
- 0x21f50037,
- 0x37f00751,
- 0x7321f501,
- 0x0076bb07,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b607c5,
- 0x1f11f404,
-/* 0x0828: i2c_start_send */
- 0xf50037f0,
- 0xf1077321,
- 0xf41388e7,
- 0x37f07f21,
- 0x5121f500,
- 0x88e7f107,
- 0x7f21f413,
-/* 0x0844: i2c_start_out */
-/* 0x0846: i2c_stop */
- 0x37f000f8,
- 0x5121f500,
- 0x0037f007,
- 0x077321f5,
- 0x03e8e7f1,
- 0xf07f21f4,
- 0x21f50137,
- 0xe7f10751,
- 0x21f41388,
- 0x0137f07f,
- 0x077321f5,
- 0x1388e7f1,
- 0xf87f21f4,
-/* 0x0879: i2c_bitw */
- 0x7321f500,
+/* 0x0731: memx_info */
+ 0x01c67000,
+/* 0x0737: memx_info_data */
+ 0xf10e0bf4,
+ 0xf103ccc7,
+ 0xf40800b7,
+/* 0x0742: memx_info_train */
+ 0xc7f10b0e,
+ 0xb7f10bcc,
+/* 0x074a: memx_info_send */
+ 0x21f50100,
+ 0x00f80342,
+/* 0x0750: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0980b,
+ 0xd80bf400,
+/* 0x075e: memx_init */
+ 0x00f800f8,
+/* 0x0760: perf_recv */
+/* 0x0762: perf_init */
+ 0x00f800f8,
+/* 0x0764: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x0778: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0786: i2c_drive_sda */
+ 0x36b000f8,
+ 0x110bf400,
+ 0x07e007f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x079a: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x04b607e4,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x07a8: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x07be: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x07c0: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x07d6: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x07d8: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x6421f501,
+/* 0x07e5: i2c_raise_scl_wait */
0xe8e7f107,
0x7f21f403,
+ 0x07a821f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x07f9: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x07fd: i2c_start */
+ 0xa821f500,
+ 0x0d11f407,
+ 0x07c021f5,
+ 0xf40611f4,
+/* 0x080e: i2c_start_rep */
+ 0x37f0300e,
+ 0x6421f500,
+ 0x0137f007,
+ 0x078621f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xc521f550,
+ 0xd821f550,
0x0464b607,
- 0xf11811f4,
- 0xf41388e7,
+/* 0x083b: i2c_start_send */
+ 0xf01f11f4,
+ 0x21f50037,
+ 0xe7f10786,
+ 0x21f41388,
+ 0x0037f07f,
+ 0x076421f5,
+ 0x1388e7f1,
+/* 0x0857: i2c_start_out */
+ 0xf87f21f4,
+/* 0x0859: i2c_stop */
+ 0x0037f000,
+ 0x076421f5,
+ 0xf50037f0,
+ 0xf1078621,
+ 0xf403e8e7,
0x37f07f21,
- 0x5121f500,
+ 0x6421f501,
0x88e7f107,
0x7f21f413,
-/* 0x08b8: i2c_bitw_out */
-/* 0x08ba: i2c_bitr */
- 0x37f000f8,
- 0x7321f501,
- 0xe8e7f107,
- 0x7f21f403,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xc521f550,
- 0x0464b607,
- 0xf51b11f4,
- 0xf007ad21,
- 0x21f50037,
- 0xe7f10751,
+ 0xf50137f0,
+ 0xf1078621,
+ 0xf41388e7,
+ 0x00f87f21,
+/* 0x088c: i2c_bitw */
+ 0x078621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07d821f5,
+ 0xf40464b6,
+ 0xe7f11811,
0x21f41388,
- 0x013cf07f,
-/* 0x08ff: i2c_bitr_done */
- 0xf80131f4,
-/* 0x0901: i2c_get_byte */
- 0x0057f000,
-/* 0x0907: i2c_get_byte_next */
- 0xb60847f0,
- 0x76bb0154,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb608ba21,
- 0x11f40464,
- 0x0553fd2b,
- 0xf40142b6,
- 0x37f0d81b,
+ 0x0037f07f,
+ 0x076421f5,
+ 0x1388e7f1,
+/* 0x08cb: i2c_bitw_out */
+ 0xf87f21f4,
+/* 0x08cd: i2c_bitr */
+ 0x0137f000,
+ 0x078621f5,
+ 0x03e8e7f1,
+ 0xbb7f21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07d821f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f007c0,
+ 0x6421f500,
+ 0x88e7f107,
+ 0x7f21f413,
+ 0xf4013cf0,
+/* 0x0912: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0914: i2c_get_byte */
+ 0xf00057f0,
+/* 0x091a: i2c_get_byte_next */
+ 0x54b60847,
0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60879,
-/* 0x0951: i2c_get_byte_done */
-/* 0x0953: i2c_put_byte */
- 0xf000f804,
-/* 0x0956: i2c_put_byte_next */
- 0x42b60847,
- 0x3854ff01,
+ 0x64b608cd,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x7921f550,
+ 0x8c21f550,
0x0464b608,
- 0xb03411f4,
- 0x1bf40046,
- 0x0076bbd8,
+/* 0x0964: i2c_get_byte_done */
+/* 0x0966: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0969: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x088c21f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xcd21f550,
+ 0x0464b608,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x09bf: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x09c1: i2c_addr */
+ 0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b608ba,
- 0x0f11f404,
- 0xb00076bb,
- 0x1bf40136,
- 0x0132f406,
-/* 0x09ac: i2c_put_byte_done */
-/* 0x09ae: i2c_addr */
- 0x76bb00f8,
+ 0x64b607fd,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb607ea21,
- 0x11f40464,
- 0x2ec3e729,
- 0x0134b601,
- 0xbb0553fd,
+ 0xb6096621,
+/* 0x0a06: i2c_addr_done */
+ 0x00f80464,
+/* 0x0a08: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980d1c,
+/* 0x0a17: i2c_acquire */
+ 0xf500f800,
+ 0xf40a0821,
+ 0xd9f00421,
+ 0x3f21f403,
+/* 0x0a26: i2c_release */
+ 0x21f500f8,
+ 0x21f40a08,
+ 0x03daf004,
+ 0xf83f21f4,
+/* 0x0a35: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x095321f5,
-/* 0x09f3: i2c_addr_done */
- 0xf80464b6,
-/* 0x09f5: i2c_acquire_addr */
- 0xf8cec700,
- 0xb702e4b6,
- 0x980c10e0,
- 0x00f800ee,
-/* 0x0a04: i2c_acquire */
- 0x09f521f5,
- 0xf00421f4,
- 0x21f403d9,
-/* 0x0a13: i2c_release */
- 0xf500f83f,
- 0xf409f521,
- 0xdaf00421,
- 0x3f21f403,
-/* 0x0a22: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13a0013a,
- 0x32980be8,
- 0xc013a000,
- 0x0031980b,
- 0xf90231f4,
- 0xf9e0f9d0,
- 0x0067f1d0,
- 0x0063f100,
- 0x01679210,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x0421f550,
- 0x0464b60a,
- 0xd6b0d0fc,
- 0xb31bf500,
- 0x0057f000,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xae21f550,
- 0x0464b609,
- 0x00d011f5,
- 0xbbe0c5c7,
+ 0x0a1721f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x095321f5,
+ 0x09c121f5,
0xf50464b6,
- 0xf000ad11,
- 0x76bb0157,
+ 0xc700d011,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb609ae21,
+ 0xb6096621,
0x11f50464,
- 0x76bb008a,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6090121,
- 0x11f40464,
- 0xe05bcb6a,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x4621f550,
- 0x0464b608,
- 0xbd025bb9,
- 0x430ef474,
-/* 0x0b28: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x57f03d1b,
- 0xae21f500,
- 0x3311f409,
- 0xf5e0c5c7,
- 0xf4095321,
- 0x57f02911,
- 0xae21f500,
- 0x1f11f409,
- 0xf5e0b5c7,
- 0xf4095321,
- 0x21f51511,
- 0x74bd0846,
- 0xf408c5c7,
- 0x32f4091b,
- 0x030ef402,
-/* 0x0b68: i2c_recv_not_wr08 */
-/* 0x0b68: i2c_recv_done */
- 0xf5f8cec7,
- 0xfc0a1321,
- 0xf4d0fce0,
- 0x7cb90a12,
- 0x4221f502,
-/* 0x0b7d: i2c_recv_exit */
-/* 0x0b7f: i2c_init */
- 0xf800f803,
-/* 0x0b81: test_recv */
- 0xd817f100,
- 0x0614b605,
- 0xb60011cf,
- 0x07f10110,
- 0x04b605d8,
- 0x0001d006,
- 0xe7f104bd,
- 0xe3f1d900,
- 0x21f5134f,
- 0x00f80262,
-/* 0x0ba8: test_init */
- 0x0800e7f1,
- 0x026221f5,
-/* 0x0bb2: idle_recv */
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b609c1,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60914,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x085921f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0b3b: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x09c121f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40966,
+ 0x0057f029,
+ 0x09c121f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40966,
+ 0x5921f515,
+ 0xc774bd08,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0b7b: i2c_recv_not_wr08 */
+/* 0x0b7b: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0a26,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x034221f5,
+/* 0x0b90: i2c_recv_exit */
+/* 0x0b92: i2c_init */
0x00f800f8,
-/* 0x0bb4: idle */
- 0xf10031f4,
- 0xb605d417,
- 0x11cf0614,
- 0x0110b600,
- 0x05d407f1,
- 0xd00604b6,
- 0x04bd0001,
-/* 0x0bd0: idle_loop */
- 0xf45817f0,
-/* 0x0bd6: idle_proc */
-/* 0x0bd6: idle_proc_exec */
- 0x10f90232,
- 0xf5021eb9,
- 0xfc034b21,
- 0x0911f410,
- 0xf40231f4,
-/* 0x0bea: idle_proc_next */
- 0x10b6ef0e,
- 0x061fb858,
- 0xf4e61bf4,
- 0x28f4dd02,
- 0xbb0ef400,
+/* 0x0b94: test_recv */
+ 0x05d817f1,
+ 0xcf0614b6,
+ 0x10b60011,
+ 0xd807f101,
+ 0x0604b605,
+ 0xbd0001d0,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0x6221f513,
+/* 0x0bbb: test_init */
+ 0xf100f802,
+ 0xf50800e7,
+ 0xf8026221,
+/* 0x0bc5: idle_recv */
+/* 0x0bc7: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x14b605d4,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d407,
+ 0x01d00604,
+/* 0x0be3: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0be9: idle_proc */
+/* 0x0be9: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc034b,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0bfd: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00bb0ef4,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index 12d86f72ad10..7e16aab44d85 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x00000678,
- 0x0000066a,
+ 0x0000068b,
+ 0x0000067d,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000067c,
- 0x0000067a,
+ 0x0000068f,
+ 0x0000068d,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000a97,
- 0x0000093a,
+ 0x00000aaa,
+ 0x0000094d,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000aba,
- 0x00000a99,
+ 0x00000acd,
+ 0x00000aac,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000ac6,
- 0x00000ac4,
+ 0x00000ad9,
+ 0x00000ad7,
0x00000000,
0x00000000,
0x00000000,
@@ -246,13 +246,15 @@ uint32_t nvd0_pwr_data[] = {
0x00010006,
0x00000000,
0x000005d3,
-/* 0x03b8: memx_func_tail */
-/* 0x03b8: memx_ts_start */
+ 0x00000007,
0x00000000,
-/* 0x03bc: memx_ts_end */
+ 0x00000619,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
0x00000000,
-/* 0x03c0: memx_data_head */
+/* 0x03c8: memx_ts_end */
0x00000000,
+/* 0x03cc: memx_data_head */
0x00000000,
0x00000000,
0x00000000,
@@ -764,8 +766,75 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
-/* 0x0bc0: memx_data_tail */
-/* 0x0bc0: i2c_scl_map */
+ 0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
0x00000400,
0x00000800,
0x00001000,
@@ -776,7 +845,7 @@ uint32_t nvd0_pwr_data[] = {
0x00020000,
0x00040000,
0x00080000,
-/* 0x0be8: i2c_sda_map */
+/* 0x0cf4: i2c_sda_map */
0x00100000,
0x00200000,
0x00400000,
@@ -844,9 +913,6 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
};
uint32_t nvd0_pwr_code[] = {
@@ -1236,11 +1302,11 @@ uint32_t nvd0_pwr_code[] = {
0x0bf40464,
0x2c67f0f6,
0x800066cf,
- 0x00f8ee06,
+ 0x00f8f106,
/* 0x0554: memx_func_leave */
0xcf2c67f0,
0x06800066,
- 0x0467f0ef,
+ 0x0467f0f2,
0x07e407f1,
0xbd0006d0,
/* 0x0569: memx_func_leave_wait */
@@ -1292,379 +1358,383 @@ uint32_t nvd0_pwr_code[] = {
0x1e9800f8,
0x0410b600,
0xf86721f4,
-/* 0x0619: memx_exec */
- 0xf9e0f900,
- 0x02c1b9d0,
-/* 0x0623: memx_exec_next */
- 0x9802b2b9,
- 0x10b60013,
- 0xf034e704,
- 0xe033e701,
- 0x0132b601,
- 0x980c30f0,
- 0x55f9de35,
- 0xf40612b8,
- 0x0b98e41e,
- 0xef0c98ee,
- 0xf102cbbb,
- 0xcf07c4b7,
- 0xd0fc00bb,
- 0x21f5e0fc,
- 0x00f802f1,
-/* 0x065c: memx_info */
- 0x03c0c7f1,
- 0x0800b7f1,
+/* 0x0619: memx_func_train */
+/* 0x061b: memx_exec */
+ 0xf900f800,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x0625: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x00bbcf07,
+ 0xe0fcd0fc,
0x02f121f5,
-/* 0x066a: memx_recv */
- 0xd6b000f8,
- 0xac0bf401,
- 0xf400d6b0,
- 0x00f8e90b,
-/* 0x0678: memx_init */
-/* 0x067a: perf_recv */
- 0x00f800f8,
-/* 0x067c: perf_init */
-/* 0x067e: i2c_drive_scl */
- 0x36b000f8,
- 0x0e0bf400,
- 0x07e007f1,
- 0xbd0001d0,
-/* 0x068f: i2c_drive_scl_lo */
- 0xf100f804,
- 0xd007e407,
+/* 0x065e: memx_info */
+ 0xc67000f8,
+ 0x0e0bf401,
+/* 0x0664: memx_info_data */
+ 0x03ccc7f1,
+ 0x0800b7f1,
+/* 0x066f: memx_info_train */
+ 0xf10b0ef4,
+ 0xf10bccc7,
+/* 0x0677: memx_info_send */
+ 0xf50100b7,
+ 0xf802f121,
+/* 0x067d: memx_recv */
+ 0x01d6b000,
+ 0xb09b0bf4,
+ 0x0bf400d6,
+/* 0x068b: memx_init */
+ 0xf800f8d8,
+/* 0x068d: perf_recv */
+/* 0x068f: perf_init */
+ 0xf800f800,
+/* 0x0691: i2c_drive_scl */
+ 0x0036b000,
+ 0xf10e0bf4,
+ 0xd007e007,
0x04bd0001,
-/* 0x069a: i2c_drive_sda */
- 0x36b000f8,
- 0x0e0bf400,
- 0x07e007f1,
- 0xbd0002d0,
-/* 0x06ab: i2c_drive_sda_lo */
- 0xf100f804,
- 0xd007e407,
+/* 0x06a2: i2c_drive_scl_lo */
+ 0x07f100f8,
+ 0x01d007e4,
+ 0xf804bd00,
+/* 0x06ad: i2c_drive_sda */
+ 0x0036b000,
+ 0xf10e0bf4,
+ 0xd007e007,
0x04bd0002,
-/* 0x06b6: i2c_sense_scl */
+/* 0x06be: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x02d007e4,
+ 0xf804bd00,
+/* 0x06c9: i2c_sense_scl */
+ 0x0132f400,
+ 0x07c437f1,
+ 0xfd0033cf,
+ 0x0bf40431,
+ 0x0131f406,
+/* 0x06dc: i2c_sense_scl_done */
+/* 0x06de: i2c_sense_sda */
0x32f400f8,
0xc437f101,
0x0033cf07,
- 0xf40431fd,
+ 0xf40432fd,
0x31f4060b,
-/* 0x06c9: i2c_sense_scl_done */
-/* 0x06cb: i2c_sense_sda */
- 0xf400f801,
- 0x37f10132,
- 0x33cf07c4,
- 0x0432fd00,
- 0xf4060bf4,
-/* 0x06de: i2c_sense_sda_done */
- 0x00f80131,
-/* 0x06e0: i2c_raise_scl */
- 0x47f140f9,
- 0x37f00898,
- 0x7e21f501,
-/* 0x06ed: i2c_raise_scl_wait */
- 0xe8e7f106,
- 0x6721f403,
- 0x06b621f5,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x0701: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x0705: i2c_start */
- 0xb621f500,
- 0x0d11f406,
- 0x06cb21f5,
- 0xf40611f4,
-/* 0x0716: i2c_start_rep */
- 0x37f0300e,
- 0x7e21f500,
- 0x0137f006,
- 0x069a21f5,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xe021f550,
- 0x0464b606,
-/* 0x0743: i2c_start_send */
- 0xf01f11f4,
- 0x21f50037,
- 0xe7f1069a,
- 0x21f41388,
- 0x0037f067,
- 0x067e21f5,
- 0x1388e7f1,
-/* 0x075f: i2c_start_out */
- 0xf86721f4,
-/* 0x0761: i2c_stop */
- 0x0037f000,
- 0x067e21f5,
- 0xf50037f0,
- 0xf1069a21,
- 0xf403e8e7,
- 0x37f06721,
- 0x7e21f501,
- 0x88e7f106,
- 0x6721f413,
- 0xf50137f0,
- 0xf1069a21,
- 0xf41388e7,
- 0x00f86721,
-/* 0x0794: i2c_bitw */
- 0x069a21f5,
+/* 0x06f1: i2c_sense_sda_done */
+/* 0x06f3: i2c_raise_scl */
+ 0xf900f801,
+ 0x9847f140,
+ 0x0137f008,
+ 0x069121f5,
+/* 0x0700: i2c_raise_scl_wait */
0x03e8e7f1,
- 0xbb6721f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x06e021f5,
- 0xf40464b6,
- 0xe7f11811,
- 0x21f41388,
- 0x0037f067,
- 0x067e21f5,
- 0x1388e7f1,
-/* 0x07d3: i2c_bitw_out */
- 0xf86721f4,
-/* 0x07d5: i2c_bitr */
- 0x0137f000,
- 0x069a21f5,
- 0x03e8e7f1,
- 0xbb6721f4,
+ 0xf56721f4,
+ 0xf406c921,
+ 0x42b60901,
+ 0xef1bf401,
+/* 0x0714: i2c_raise_scl_done */
+ 0x00f840fc,
+/* 0x0718: i2c_start */
+ 0x06c921f5,
+ 0xf50d11f4,
+ 0xf406de21,
+ 0x0ef40611,
+/* 0x0729: i2c_start_rep */
+ 0x0037f030,
+ 0x069121f5,
+ 0xf50137f0,
+ 0xbb06ad21,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x06e021f5,
+ 0x06f321f5,
0xf40464b6,
- 0x21f51b11,
- 0x37f006cb,
- 0x7e21f500,
+/* 0x0756: i2c_start_send */
+ 0x37f01f11,
+ 0xad21f500,
0x88e7f106,
0x6721f413,
- 0xf4013cf0,
-/* 0x081a: i2c_bitr_done */
- 0x00f80131,
-/* 0x081c: i2c_get_byte */
- 0xf00057f0,
-/* 0x0822: i2c_get_byte_next */
- 0x54b60847,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b607d5,
- 0x2b11f404,
- 0xb60553fd,
- 0x1bf40142,
- 0x0137f0d8,
+ 0xf50037f0,
+ 0xf1069121,
+ 0xf41388e7,
+/* 0x0772: i2c_start_out */
+ 0x00f86721,
+/* 0x0774: i2c_stop */
+ 0xf50037f0,
+ 0xf0069121,
+ 0x21f50037,
+ 0xe7f106ad,
+ 0x21f403e8,
+ 0x0137f067,
+ 0x069121f5,
+ 0x1388e7f1,
+ 0xf06721f4,
+ 0x21f50137,
+ 0xe7f106ad,
+ 0x21f41388,
+/* 0x07a7: i2c_bitw */
+ 0xf500f867,
+ 0xf106ad21,
+ 0xf403e8e7,
+ 0x76bb6721,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb606f321,
+ 0x11f40464,
+ 0x88e7f118,
+ 0x6721f413,
+ 0xf50037f0,
+ 0xf1069121,
+ 0xf41388e7,
+/* 0x07e6: i2c_bitw_out */
+ 0x00f86721,
+/* 0x07e8: i2c_bitr */
+ 0xf50137f0,
+ 0xf106ad21,
+ 0xf403e8e7,
+ 0x76bb6721,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb606f321,
+ 0x11f40464,
+ 0xde21f51b,
+ 0x0037f006,
+ 0x069121f5,
+ 0x1388e7f1,
+ 0xf06721f4,
+ 0x31f4013c,
+/* 0x082d: i2c_bitr_done */
+/* 0x082f: i2c_get_byte */
+ 0xf000f801,
+ 0x47f00057,
+/* 0x0835: i2c_get_byte_next */
+ 0x0154b608,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x9421f550,
+ 0xe821f550,
0x0464b607,
-/* 0x086c: i2c_get_byte_done */
-/* 0x086e: i2c_put_byte */
- 0x47f000f8,
-/* 0x0871: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
+ 0xfd2b11f4,
+ 0x42b60553,
+ 0xd81bf401,
+ 0xbb0137f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07a721f5,
+/* 0x087f: i2c_get_byte_done */
+ 0xf80464b6,
+/* 0x0881: i2c_put_byte */
+ 0x0847f000,
+/* 0x0884: i2c_put_byte_next */
+ 0xff0142b6,
+ 0x76bb3854,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb607a721,
+ 0x11f40464,
+ 0x0046b034,
+ 0xbbd81bf4,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x079421f5,
+ 0x07e821f5,
0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0x76bb0f11,
+ 0x0136b000,
+ 0xf4061bf4,
+/* 0x08da: i2c_put_byte_done */
+ 0x00f80132,
+/* 0x08dc: i2c_addr */
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xd521f550,
+ 0x1821f550,
0x0464b607,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x08c7: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x08c9: i2c_addr */
- 0x0076bb00,
+ 0xe72911f4,
+ 0xb6012ec3,
+ 0x53fd0134,
+ 0x0076bb05,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60705,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6086e21,
-/* 0x090e: i2c_addr_done */
- 0x00f80464,
-/* 0x0910: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b705e4,
- 0x00f8d014,
-/* 0x091c: i2c_acquire */
- 0x091021f5,
- 0xf00421f4,
- 0x21f403d9,
-/* 0x092b: i2c_release */
- 0xf500f833,
- 0xf4091021,
- 0xdaf00421,
+ 0x64b60881,
+/* 0x0921: i2c_addr_done */
+/* 0x0923: i2c_acquire_addr */
+ 0xc700f804,
+ 0xe4b6f8ce,
+ 0x14e0b705,
+/* 0x092f: i2c_acquire */
+ 0xf500f8d0,
+ 0xf4092321,
+ 0xd9f00421,
0x3321f403,
-/* 0x093a: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13a0013a,
- 0x32980be8,
- 0xc013a000,
- 0x0031980b,
- 0xf90231f4,
- 0xf9e0f9d0,
- 0x0067f1d0,
- 0x0063f100,
- 0x01679210,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x1c21f550,
- 0x0464b609,
- 0xd6b0d0fc,
- 0xb31bf500,
- 0x0057f000,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xc921f550,
- 0x0464b608,
- 0x00d011f5,
- 0xbbe0c5c7,
+/* 0x093e: i2c_release */
+ 0x21f500f8,
+ 0x21f40923,
+ 0x03daf004,
+ 0xf83321f4,
+/* 0x094d: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x086e21f5,
+ 0x092f21f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x08dc21f5,
0xf50464b6,
- 0xf000ad11,
- 0x76bb0157,
+ 0xc700d011,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb608c921,
+ 0xb6088121,
0x11f50464,
- 0x76bb008a,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6081c21,
- 0x11f40464,
- 0xe05bcb6a,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x6121f550,
- 0x0464b607,
- 0xbd025bb9,
- 0x430ef474,
-/* 0x0a40: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x57f03d1b,
- 0xc921f500,
- 0x3311f408,
- 0xf5e0c5c7,
- 0xf4086e21,
- 0x57f02911,
- 0xc921f500,
- 0x1f11f408,
- 0xf5e0b5c7,
- 0xf4086e21,
- 0x21f51511,
- 0x74bd0761,
- 0xf408c5c7,
- 0x32f4091b,
- 0x030ef402,
-/* 0x0a80: i2c_recv_not_wr08 */
-/* 0x0a80: i2c_recv_done */
- 0xf5f8cec7,
- 0xfc092b21,
- 0xf4d0fce0,
- 0x7cb90a12,
- 0xf121f502,
-/* 0x0a95: i2c_recv_exit */
-/* 0x0a97: i2c_init */
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b608dc,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b6082f,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x077421f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0a53: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x08dc21f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40881,
+ 0x0057f029,
+ 0x08dc21f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40881,
+ 0x7421f515,
+ 0xc774bd07,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0a93: i2c_recv_not_wr08 */
+/* 0x0a93: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc093e,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x02f121f5,
+/* 0x0aa8: i2c_recv_exit */
+/* 0x0aaa: i2c_init */
+ 0x00f800f8,
+/* 0x0aac: test_recv */
+ 0x05d817f1,
+ 0xb60011cf,
+ 0x07f10110,
+ 0x01d005d8,
+ 0xf104bd00,
+ 0xf1d900e7,
+ 0xf5134fe3,
+ 0xf8022321,
+/* 0x0acd: test_init */
+ 0x00e7f100,
+ 0x2321f508,
+/* 0x0ad7: idle_recv */
0xf800f802,
-/* 0x0a99: test_recv */
- 0xd817f100,
- 0x0011cf05,
- 0xf10110b6,
- 0xd005d807,
- 0x04bd0001,
- 0xd900e7f1,
- 0x134fe3f1,
- 0x022321f5,
-/* 0x0aba: test_init */
- 0xe7f100f8,
- 0x21f50800,
- 0x00f80223,
-/* 0x0ac4: idle_recv */
-/* 0x0ac6: idle */
- 0x31f400f8,
- 0xd417f100,
- 0x0011cf05,
- 0xf10110b6,
- 0xd005d407,
- 0x04bd0001,
-/* 0x0adc: idle_loop */
- 0xf45817f0,
-/* 0x0ae2: idle_proc */
-/* 0x0ae2: idle_proc_exec */
- 0x10f90232,
- 0xf5021eb9,
- 0xfc02fa21,
- 0x0911f410,
- 0xf40231f4,
-/* 0x0af6: idle_proc_next */
- 0x10b6ef0e,
- 0x061fb858,
- 0xf4e61bf4,
- 0x28f4dd02,
- 0xc10ef400,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+/* 0x0ad9: idle */
+ 0x0031f400,
+ 0x05d417f1,
+ 0xb60011cf,
+ 0x07f10110,
+ 0x01d005d4,
+/* 0x0aef: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0af5: idle_proc */
+/* 0x0af5: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc02fa,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0b09: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00c10ef4,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
index 522e3079f824..c8b06cb77e72 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -18,6 +18,10 @@
#define MEMX_MSG_INFO 0
#define MEMX_MSG_EXEC 1
+/* MEMX: info types */
+#define MEMX_INFO_DATA 0
+#define MEMX_INFO_TRAIN 1
+
/* MEMX: script opcode definitions */
#define MEMX_ENTER 1
#define MEMX_LEAVE 2
@@ -25,6 +29,7 @@
#define MEMX_WAIT 4
#define MEMX_DELAY 5
#define MEMX_VBLANK 6
+#define MEMX_TRAIN 7
/* I2C_: message identifiers */
#define I2C__MSG_RD08 0
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
index 65eaa2546cad..7a9299d7159f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -47,7 +47,8 @@ nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
u32 reply[2];
int ret;
- ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0);
+ ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_DATA, 0);
if (ret)
return ret;
@@ -106,7 +107,7 @@ nouveau_memx_wait(struct nouveau_memx *memx,
{
nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
addr, mask, data, nsec);
- memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec });
+ memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
memx_out(memx); /* fuc can't handle multiple */
}
@@ -152,6 +153,38 @@ nouveau_memx_wait_vblank(struct nouveau_memx *memx)
}
void
+nouveau_memx_train(struct nouveau_memx *memx)
+{
+ nv_debug(memx->ppwr, " MEM TRAIN\n");
+ memx_cmd(memx, MEMX_TRAIN, 0, NULL);
+}
+
+int
+nouveau_memx_train_result(struct nouveau_pwr *ppwr, u32 *res, int rsize)
+{
+ u32 reply[2], base, size, i;
+ int ret;
+
+ ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
+ MEMX_INFO_TRAIN, 0);
+ if (ret)
+ return ret;
+
+ base = reply[0];
+ size = reply[1] >> 2;
+ if (size > rsize)
+ return -ENOMEM;
+
+ /* read the packet */
+ nv_wr32(ppwr, 0x10a1c0, 0x02000000 | base);
+
+ for (i = 0; i < size; i++)
+ res[i] = nv_rd32(ppwr, 0x10a1c4);
+
+ return 0;
+}
+
+void
nouveau_memx_block(struct nouveau_memx *memx)
{
nv_debug(memx->ppwr, " HOST BLOCKED\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
index 32794a999106..26ccd8df193f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
@@ -101,6 +101,41 @@ nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
return ret;
}
+static void nouveau_volt_parse_bios(struct nouveau_bios *bios,
+ struct nouveau_volt *volt)
+{
+ struct nvbios_volt_entry ivid;
+ struct nvbios_volt info;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+ int i;
+
+ data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
+ if (data && info.vidmask && info.base && info.step) {
+ for (i = 0; i < info.vidmask + 1; i++) {
+ if (info.base >= info.min &&
+ info.base <= info.max) {
+ volt->vid[volt->vid_nr].uv = info.base;
+ volt->vid[volt->vid_nr].vid = i;
+ volt->vid_nr++;
+ }
+ info.base += info.step;
+ }
+ volt->vid_mask = info.vidmask;
+ } else if (data && info.vidmask) {
+ for (i = 0; i < cnt; i++) {
+ data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
+ &ivid);
+ if (data) {
+ volt->vid[volt->vid_nr].uv = ivid.voltage;
+ volt->vid[volt->vid_nr].vid = ivid.vid;
+ volt->vid_nr++;
+ }
+ }
+ volt->vid_mask = info.vidmask;
+ }
+}
+
int
_nouveau_volt_init(struct nouveau_object *object)
{
@@ -136,10 +171,6 @@ nouveau_volt_create_(struct nouveau_object *parent,
{
struct nouveau_bios *bios = nouveau_bios(parent);
struct nouveau_volt *volt;
- struct nvbios_volt_entry ivid;
- struct nvbios_volt info;
- u8 ver, hdr, cnt, len;
- u16 data;
int ret, i;
ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
@@ -152,31 +183,9 @@ nouveau_volt_create_(struct nouveau_object *parent,
volt->set = nouveau_volt_set;
volt->set_id = nouveau_volt_set_id;
- data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
- if (data && info.vidmask && info.base && info.step) {
- for (i = 0; i < info.vidmask + 1; i++) {
- if (info.base >= info.min &&
- info.base <= info.max) {
- volt->vid[volt->vid_nr].uv = info.base;
- volt->vid[volt->vid_nr].vid = i;
- volt->vid_nr++;
- }
- info.base += info.step;
- }
- volt->vid_mask = info.vidmask;
- } else
- if (data && info.vidmask) {
- for (i = 0; i < cnt; i++) {
- data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
- &ivid);
- if (data) {
- volt->vid[volt->vid_nr].uv = ivid.voltage;
- volt->vid[volt->vid_nr].vid = ivid.vid;
- volt->vid_nr++;
- }
- }
- volt->vid_mask = info.vidmask;
- }
+ /* Assuming the non-bios device should build the voltage table later */
+ if (bios)
+ nouveau_volt_parse_bios(bios, volt);
if (volt->vid_nr) {
for (i = 0; i < volt->vid_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c
new file mode 100644
index 000000000000..717368ef31ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifdef __KERNEL__
+#include <nouveau_platform.h>
+#endif
+#include <subdev/volt.h>
+
+struct cvb_coef {
+ int c0;
+ int c1;
+ int c2;
+ int c3;
+ int c4;
+ int c5;
+};
+
+struct gk20a_volt_priv {
+ struct nouveau_volt base;
+ struct regulator *vdd;
+};
+
+const struct cvb_coef gk20a_cvb_coef[] = {
+ /* MHz, c0, c1, c2, c3, c4, c5 */
+ /* 72 */ { 1209886, -36468, 515, 417, -13123, 203},
+ /* 108 */ { 1130804, -27659, 296, 298, -10834, 221},
+ /* 180 */ { 1162871, -27110, 247, 238, -10681, 268},
+ /* 252 */ { 1220458, -28654, 247, 179, -10376, 298},
+ /* 324 */ { 1280953, -30204, 247, 119, -9766, 304},
+ /* 396 */ { 1344547, -31777, 247, 119, -8545, 292},
+ /* 468 */ { 1420168, -34227, 269, 60, -7172, 256},
+ /* 540 */ { 1490757, -35955, 274, 60, -5188, 197},
+ /* 612 */ { 1599112, -42583, 398, 0, -1831, 119},
+ /* 648 */ { 1366986, -16459, -274, 0, -3204, 72},
+ /* 684 */ { 1391884, -17078, -274, -60, -1526, 30},
+ /* 708 */ { 1415522, -17497, -274, -60, -458, 0},
+ /* 756 */ { 1464061, -18331, -274, -119, 1831, -72},
+ /* 804 */ { 1524225, -20064, -254, -119, 4272, -155},
+ /* 852 */ { 1608418, -21643, -269, 0, 763, -48},
+};
+
+/**
+ * cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0)
+ */
+static inline int
+gk20a_volt_get_cvb_voltage(int speedo, int s_scale,
+ const struct cvb_coef *coef)
+{
+ int mv;
+
+ mv = DIV_ROUND_CLOSEST(coef->c2 * speedo, s_scale);
+ mv = DIV_ROUND_CLOSEST((mv + coef->c1) * speedo, s_scale) + coef->c0;
+ return mv;
+}
+
+/**
+ * cvb_t_mv =
+ * ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) +
+ * ((c3 * speedo / s_scale + c4 + c5 * T / t_scale) * T / t_scale)
+ */
+static inline int
+gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
+ const struct cvb_coef *coef)
+{
+ int cvb_mv, mv;
+
+ cvb_mv = gk20a_volt_get_cvb_voltage(speedo, s_scale, coef);
+
+ mv = DIV_ROUND_CLOSEST(coef->c3 * speedo, s_scale) + coef->c4 +
+ DIV_ROUND_CLOSEST(coef->c5 * temp, t_scale);
+ mv = DIV_ROUND_CLOSEST(mv * temp, t_scale) + cvb_mv;
+ return mv;
+}
+
+static int
+gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
+{
+ int mv;
+
+ mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
+ mv = DIV_ROUND_UP(mv, 1000);
+
+ return mv * 1000;
+}
+
+static int
+gk20a_volt_vid_get(struct nouveau_volt *volt)
+{
+ struct gk20a_volt_priv *priv = (void *)volt;
+ int i, uv;
+
+ uv = regulator_get_voltage(priv->vdd);
+
+ for (i = 0; i < volt->vid_nr; i++)
+ if (volt->vid[i].uv >= uv)
+ return i;
+
+ return -EINVAL;
+}
+
+static int
+gk20a_volt_vid_set(struct nouveau_volt *volt, u8 vid)
+{
+ struct gk20a_volt_priv *priv = (void *)volt;
+
+ nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv);
+ return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000);
+}
+
+static int
+gk20a_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
+{
+ struct gk20a_volt_priv *priv = (void *)volt;
+ int prev_uv = regulator_get_voltage(priv->vdd);
+ int target_uv = volt->vid[id].uv;
+ int ret;
+
+ nv_debug(volt, "prev=%d, target=%d, condition=%d\n",
+ prev_uv, target_uv, condition);
+ if (!condition ||
+ (condition < 0 && target_uv < prev_uv) ||
+ (condition > 0 && target_uv > prev_uv)) {
+ ret = gk20a_volt_vid_set(volt, volt->vid[id].vid);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+gk20a_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct gk20a_volt_priv *priv;
+ struct nouveau_volt *volt;
+ struct nouveau_platform_device *plat;
+ int i, ret, uv;
+
+ ret = nouveau_volt_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ volt = &priv->base;
+
+ plat = nv_device_to_platform(nv_device(parent));
+
+ uv = regulator_get_voltage(plat->gpu->vdd);
+ nv_info(priv, "The default voltage is %duV\n", uv);
+
+ priv->vdd = plat->gpu->vdd;
+ priv->base.vid_get = gk20a_volt_vid_get;
+ priv->base.vid_set = gk20a_volt_vid_set;
+ priv->base.set_id = gk20a_volt_set_id;
+
+ volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
+ nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr);
+ for (i = 0; i < volt->vid_nr; i++) {
+ volt->vid[i].vid = i;
+ volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+ plat->gpu_speedo);
+ nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
+ volt->vid[i].uv);
+ }
+
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_volt_oclass = {
+ .handle = NV_SUBDEV(VOLT, 0xea),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_volt_ctor,
+ .dtor = _nouveau_volt_dtor,
+ .init = _nouveau_volt_init,
+ .fini = _nouveau_volt_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index fca6a1f9c20c..38402ade6835 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "nouveau_drm.h"
#include "nouveau_reg.h"
@@ -613,7 +614,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
@@ -1129,7 +1130,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 1e9056a8df94..9f2498571d09 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -126,7 +126,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return -ERANGE;
}
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
@@ -373,7 +373,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_w < src_w || crtc_h < src_h)
return -ERANGE;
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index a24faa5e2a2a..d39a15000068 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -308,7 +308,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
if (ret == 0)
- ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
+ ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index dae2c96deef8..7df6acc8bb34 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1258,7 +1258,7 @@ olddcb_table(struct drm_device *dev)
return NULL;
}
- if (dcb[0] >= 0x41) {
+ if (dcb[0] >= 0x42) {
NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
return NULL;
} else
@@ -1481,18 +1481,22 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->dpconf.link_bw = 540000;
break;
}
- switch ((conf & 0x0f000000) >> 24) {
- case 0xf:
- entry->dpconf.link_nr = 4;
- break;
- case 0x3:
- entry->dpconf.link_nr = 2;
- break;
- default:
- entry->dpconf.link_nr = 1;
- break;
+ entry->dpconf.link_nr = (conf & 0x0f000000) >> 24;
+ if (dcb->version < 0x41) {
+ switch (entry->dpconf.link_nr) {
+ case 0xf:
+ entry->dpconf.link_nr = 4;
+ break;
+ case 0x3:
+ entry->dpconf.link_nr = 2;
+ break;
+ default:
+ entry->dpconf.link_nr = 1;
+ break;
+ }
}
link = entry->dpconf.sor.link;
+ entry->i2c_index += NV_I2C_AUX(0);
break;
case DCB_OUTPUT_TMDS:
if (dcb->version >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3d474ac03f88..21ec561edc99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -214,6 +214,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
+ if (!nv_device_is_cpu_coherent(nvkm_device(&drm->device)))
+ nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+
nvbo->page_shift = 12;
if (drm->client.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
@@ -291,8 +294,9 @@ void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
{
struct ttm_placement *pl = &nvbo->placement;
- uint32_t flags = TTM_PL_MASK_CACHING |
- (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
+ uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
+ TTM_PL_MASK_CACHING) |
+ (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
pl->placement = nvbo->placements;
set_placement_list(nvbo->placements, &pl->num_placement,
@@ -306,42 +310,75 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
}
int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
+ bool force = false, evict = false;
int ret;
ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
- goto out;
+ return ret;
- if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
- NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
- 1 << bo->mem.mem_type, memtype);
- ret = -EINVAL;
- goto out;
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ memtype == TTM_PL_FLAG_VRAM && contig) {
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ struct nouveau_mem *mem = bo->mem.mm_node;
+ if (!list_is_singular(&mem->regions))
+ evict = true;
+ }
+ nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+ force = true;
+ }
}
- if (nvbo->pin_refcnt++)
+ if (nvbo->pin_refcnt) {
+ if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
+ NV_ERROR(drm, "bo %p pinned elsewhere: "
+ "0x%08x vs 0x%08x\n", bo,
+ 1 << bo->mem.mem_type, memtype);
+ ret = -EBUSY;
+ }
+ nvbo->pin_refcnt++;
goto out;
+ }
+ if (evict) {
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
+ ret = nouveau_bo_validate(nvbo, false, false);
+ if (ret)
+ goto out;
+ }
+
+ nvbo->pin_refcnt++;
nouveau_bo_placement_set(nvbo, memtype, 0);
+ /* drop pin_refcnt temporarily, so we don't trip the assertion
+ * in nouveau_bo_move() that makes sure we're not trying to
+ * move a pinned buffer
+ */
+ nvbo->pin_refcnt--;
ret = nouveau_bo_validate(nvbo, false, false);
- if (ret == 0) {
- switch (bo->mem.mem_type) {
- case TTM_PL_VRAM:
- drm->gem.vram_available -= bo->mem.size;
- break;
- case TTM_PL_TT:
- drm->gem.gart_available -= bo->mem.size;
- break;
- default:
- break;
- }
+ if (ret)
+ goto out;
+ nvbo->pin_refcnt++;
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ drm->gem.vram_available -= bo->mem.size;
+ break;
+ case TTM_PL_TT:
+ drm->gem.gart_available -= bo->mem.size;
+ break;
+ default:
+ break;
}
+
out:
+ if (force && ret)
+ nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
ttm_bo_unreserve(bo);
return ret;
}
@@ -392,7 +429,14 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+ /*
+ * TTM buffers allocated using the DMA API already have a mapping, let's
+ * use it instead.
+ */
+ if (!nvbo->force_coherent)
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+ &nvbo->kmap);
+
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
@@ -400,10 +444,57 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
- if (nvbo)
+ if (!nvbo)
+ return;
+
+ /*
+ * TTM buffers allocated using the DMA API already had a coherent
+ * mapping which we used, no need to unmap.
+ */
+ if (!nvbo->force_coherent)
ttm_bo_kunmap(&nvbo->kmap);
}
+void
+nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nvkm_device(&drm->device);
+ struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ int i;
+
+ if (!ttm_dma)
+ return;
+
+ /* Don't waste time looping if the object is coherent */
+ if (nvbo->force_coherent)
+ return;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ dma_sync_single_for_device(nv_device_base(device),
+ ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+void
+nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nvkm_device(&drm->device);
+ struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ int i;
+
+ if (!ttm_dma)
+ return;
+
+ /* Don't waste time looping if the object is coherent */
+ if (nvbo->force_coherent)
+ return;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ dma_sync_single_for_cpu(nv_device_base(device),
+ ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+}
+
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
@@ -415,15 +506,41 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
+ nouveau_bo_sync_for_device(nvbo);
+
return 0;
}
+static inline void *
+_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
+{
+ struct ttm_dma_tt *dma_tt;
+ u8 *m = mem;
+
+ index *= sz;
+
+ if (m) {
+ /* kmap'd address, return the corresponding offset */
+ m += index;
+ } else {
+ /* DMA-API mapping, lookup the right address */
+ dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ m = dma_tt->cpu_address[index / PAGE_SIZE];
+ m += index % PAGE_SIZE;
+ }
+
+ return m;
+}
+#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
+
u16
nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
{
bool is_iomem;
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
+
+ mem = nouveau_bo_mem_index(nvbo, index, mem);
+
if (is_iomem)
return ioread16_native((void __force __iomem *)mem);
else
@@ -435,7 +552,9 @@ nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
bool is_iomem;
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
+
+ mem = nouveau_bo_mem_index(nvbo, index, mem);
+
if (is_iomem)
iowrite16_native(val, (void __force __iomem *)mem);
else
@@ -447,7 +566,9 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
{
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
+
+ mem = nouveau_bo_mem_index(nvbo, index, mem);
+
if (is_iomem)
return ioread32_native((void __force __iomem *)mem);
else
@@ -459,7 +580,9 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
{
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = &mem[index];
+
+ mem = nouveau_bo_mem_index(nvbo, index, mem);
+
if (is_iomem)
iowrite32_native(val, (void __force __iomem *)mem);
else
@@ -1184,6 +1307,9 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
+ if (nvbo->pin_refcnt)
+ NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
+
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
@@ -1376,6 +1502,14 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
dev = drm->dev;
pdev = nv_device_base(device);
+ /*
+ * Objects matching this condition have been marked as force_coherent,
+ * so use the DMA API for them.
+ */
+ if (!nv_device_is_cpu_coherent(device) &&
+ ttm->caching_state == tt_uncached)
+ return ttm_dma_populate(ttm_dma, dev->dev);
+
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
return ttm_agp_tt_populate(ttm);
@@ -1433,6 +1567,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
dev = drm->dev;
pdev = nv_device_base(device);
+ /*
+ * Objects matching this condition have been marked as force_coherent,
+ * so use the DMA API for them.
+ */
+ if (!nv_device_is_cpu_coherent(device) &&
+ ttm->caching_state == tt_uncached)
+ ttm_dma_unpopulate(ttm_dma, dev->dev);
+
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
ttm_agp_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 22d2c764d80b..072222efeeb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -13,6 +13,7 @@ struct nouveau_bo {
u32 valid_domains;
struct ttm_place placements[3];
struct ttm_place busy_placements[3];
+ bool force_coherent;
struct ttm_bo_kmap_obj kmap;
struct list_head head;
@@ -72,7 +73,7 @@ int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct reservation_object *robj,
struct nouveau_bo **);
-int nouveau_bo_pin(struct nouveau_bo *, u32 flags);
+int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
int nouveau_bo_unpin(struct nouveau_bo *);
int nouveau_bo_map(struct nouveau_bo *);
void nouveau_bo_unmap(struct nouveau_bo *);
@@ -84,6 +85,8 @@ void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
+void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
+void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index fd3dbd59d73e..aff9099aae6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -102,14 +102,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->drm = drm;
/* allocate memory for dma push buffer */
- target = TTM_PL_FLAG_TT;
+ target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
if (nouveau_vram_pushbuf)
target = TTM_PL_FLAG_VRAM;
ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
&chan->push.buffer);
if (ret == 0) {
- ret = nouveau_bo_pin(chan->push.buffer, target);
+ ret = nouveau_bo_pin(chan->push.buffer, target, false);
if (ret == 0)
ret = nouveau_bo_map(chan->push.buffer);
}
@@ -285,7 +285,6 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_software_chan *swch;
struct nv_dma_v0 args = {};
int ret, i;
- bool save;
nvif_object_map(chan->object);
@@ -387,11 +386,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise synchronisation */
- save = cli->base.super;
- cli->base.super = true; /* hack until fencenv50 fixed */
- ret = nouveau_fence(chan->drm)->context_new(chan);
- cli->base.super = save;
- return ret;
+ return nouveau_fence(chan->drm)->context_new(chan);
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a88e6927f571..5d93902a91ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -479,6 +479,7 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
+ GM204_DISP,
GM107_DISP,
GK110_DISP,
GK104_DISP,
@@ -568,9 +569,10 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ if (nv_crtc->cursor.nvbo) {
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
}
return 0;
@@ -591,15 +593,17 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
if (!nouveau_fb || !nouveau_fb->nvbo)
continue;
- ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (!nv_crtc->cursor.nvbo)
+ continue;
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
if (!ret)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
@@ -630,9 +634,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 offset = nv_crtc->cursor.nvbo->bo.offset;
- nv_crtc->cursor.set_offset(nv_crtc, offset);
+ if (!nv_crtc->cursor.nvbo)
+ continue;
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
nv_crtc->cursor_saved_y);
}
@@ -710,7 +715,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENOMEM;
if (new_bo != old_bo) {
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
if (ret)
goto fail_free;
}
@@ -871,6 +876,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
+ bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
@@ -886,6 +892,14 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
+
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(!(gem->dumb || gem->import_attach),
+ "Illegal dumb map of accelerated buffer.\n");
+
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 62b97c4eef8d..65910e3aed0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -613,26 +613,6 @@ fail_display:
return ret;
}
-int nouveau_pmops_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
-
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
- return 0;
-
- ret = nouveau_do_suspend(drm_dev, false);
- if (ret)
- return ret;
-
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- return 0;
-}
-
static int
nouveau_do_resume(struct drm_device *dev, bool runtime)
{
@@ -667,7 +647,29 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
return 0;
}
-int nouveau_pmops_resume(struct device *dev)
+int
+nouveau_pmops_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
+ ret = nouveau_do_suspend(drm_dev, false);
+ if (ret)
+ return ret;
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+int
+nouveau_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
@@ -687,20 +689,122 @@ int nouveau_pmops_resume(struct device *dev)
return nouveau_do_resume(drm_dev, false);
}
-static int nouveau_pmops_freeze(struct device *dev)
+static int
+nouveau_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return nouveau_do_suspend(drm_dev, false);
}
-static int nouveau_pmops_thaw(struct device *dev)
+static int
+nouveau_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return nouveau_do_resume(drm_dev, false);
}
+static int
+nouveau_pmops_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ nv_debug_level(SILENT);
+ drm_kms_helper_poll_disable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+ nouveau_switcheroo_optimus_dsm();
+ ret = nouveau_do_suspend(drm_dev, true);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ return ret;
+}
+
+static int
+nouveau_pmops_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nvif_device *device = &nouveau_drm(drm_dev)->device;
+ int ret;
+
+ if (nouveau_runtime_pm == 0)
+ return -EINVAL;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ ret = nouveau_do_resume(drm_dev, true);
+ drm_kms_helper_poll_enable(drm_dev);
+ /* do magic */
+ nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ nv_debug_level(NORMAL);
+ return ret;
+}
+
+static int
+nouveau_pmops_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_crtc *crtc;
+
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
+
+ /* if we have a hdmi audio device - make sure it has a driver loaded */
+ if (drm->hdmi_device) {
+ if (!drm->hdmi_device->driver) {
+ DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
+ pm_runtime_mark_last_busy(dev);
+ return -EBUSY;
+ }
+ }
+
+ list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
+ if (crtc->enabled) {
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+ return -EBUSY;
+ }
+ }
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ return 1;
+}
static int
nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
@@ -907,104 +1011,6 @@ nouveau_drm_pci_table[] = {
{}
};
-static int nouveau_pmops_runtime_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
-
- if (nouveau_runtime_pm == 0) {
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* are we optimus enabled? */
- if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
- DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- nv_debug_level(SILENT);
- drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
- nouveau_switcheroo_optimus_dsm();
- ret = nouveau_do_suspend(drm_dev, true);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
- return ret;
-}
-
-static int nouveau_pmops_runtime_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nvif_device *device = &nouveau_drm(drm_dev)->device;
- int ret;
-
- if (nouveau_runtime_pm == 0)
- return -EINVAL;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- pci_set_master(pdev);
-
- ret = nouveau_do_resume(drm_dev, true);
- drm_kms_helper_poll_enable(drm_dev);
- /* do magic */
- nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
- nv_debug_level(NORMAL);
- return ret;
-}
-
-static int nouveau_pmops_runtime_idle(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct drm_crtc *crtc;
-
- if (nouveau_runtime_pm == 0) {
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* are we optimus enabled? */
- if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
- DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* if we have a hdmi audio device - make sure it has a driver loaded */
- if (drm->hdmi_device) {
- if (!drm->hdmi_device->driver) {
- DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
- pm_runtime_mark_last_busy(dev);
- return -EBUSY;
- }
- }
-
- list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
- }
- }
- pm_runtime_mark_last_busy(dev);
- pm_runtime_autosuspend(dev);
- /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
- return 1;
-}
-
static void nouveau_display_options(void)
{
DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 593ef8a2a069..3ed12a8cfc91 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
goto out;
}
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
goto out_unref;
@@ -498,6 +498,23 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
console_unlock();
}
+void
+nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->fbcon) {
+ if (state == FBINFO_STATE_RUNNING) {
+ schedule_work(&drm->fbcon->work);
+ return;
+ }
+ flush_work(&drm->fbcon->work);
+ console_lock();
+ fb_set_suspend(drm->fbcon->helper.fbdev, state);
+ nouveau_fbcon_accel_save_disable(dev);
+ console_unlock();
+ }
+}
+
int
nouveau_fbcon_init(struct drm_device *dev)
{
@@ -557,20 +574,3 @@ nouveau_fbcon_fini(struct drm_device *dev)
kfree(drm->fbcon);
drm->fbcon = NULL;
}
-
-void
-nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- if (state == FBINFO_STATE_RUNNING) {
- schedule_work(&drm->fbcon->work);
- return;
- }
- flush_work(&drm->fbcon->work);
- console_lock();
- fb_set_suspend(drm->fbcon->helper.fbdev, state);
- nouveau_fbcon_accel_save_disable(dev);
- console_unlock();
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 36951ee4b157..28d51a22a4bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -444,6 +444,9 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+ WARN_ONCE(nvbo->gem.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
@@ -867,6 +870,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
else
ret = lret;
}
+ nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);
return ret;
@@ -876,6 +880,17 @@ int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_gem_cpu_fini *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -ENOENT;
+ nvbo = nouveau_gem_object(gem);
+
+ nouveau_bo_sync_for_device(nvbo);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 246a824c16ca..b307bbedd4c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
+#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
#include "nouveau_drm.h"
@@ -128,6 +129,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
}
device->gpu = gpu;
+ device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
err = drm_dev_register(drm, 0);
if (err < 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 91f66504900e..58c28b5653d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -41,6 +41,8 @@ struct nouveau_platform_device {
struct nouveau_device device;
struct nouveau_platform_gpu *gpu;
+
+ int gpu_speedo;
};
#define nv_device_to_platform(d) \
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 228226ab27fc..dd32ad6db53d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,7 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
int ret;
/* pin buffer into GTT */
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 40b461c7d5c5..57860cfa1de5 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -131,7 +131,7 @@ nv17_fence_create(struct nouveau_drm *drm)
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index eb8b36714fa1..490b90866baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_dp_helper.h>
#include <nvif/class.h>
@@ -65,15 +66,29 @@ static int
nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
void *data, u32 size, struct nv50_chan *chan)
{
+ const u32 handle = (oclass[0] << 16) | head;
+ u32 sclass[8];
+ int ret, i;
+
+ ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass));
+ WARN_ON(ret > ARRAY_SIZE(sclass));
+ if (ret < 0)
+ return ret;
+
while (oclass[0]) {
- int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head,
- oclass[0], data, size,
- &chan->user);
- if (oclass++, ret == 0) {
- nvif_object_map(&chan->user);
- return ret;
+ for (i = 0; i < ARRAY_SIZE(sclass); i++) {
+ if (sclass[i] == oclass[0]) {
+ ret = nvif_object_init(disp, NULL, handle,
+ oclass[0], data, size,
+ &chan->user);
+ if (ret == 0)
+ nvif_object_map(&chan->user);
+ return ret;
+ }
}
+ oclass++;
}
+
return -ENOSYS;
}
@@ -110,6 +125,7 @@ nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
struct nv50_curs {
struct nv50_pioc base;
+ struct nouveau_bo *image;
};
static int
@@ -265,6 +281,7 @@ nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
.pushbuf = 0xb0007d00,
};
static const u32 oclass[] = {
+ GM204_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
GK110_DISP_CORE_CHANNEL_DMA,
GK104_DISP_CORE_CHANNEL_DMA,
@@ -424,8 +441,21 @@ evo_kick(u32 *push, void *evoc)
mutex_unlock(&dmac->lock);
}
+#if 1
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
#define evo_data(p,d) *((p)++) = (d)
+#else
+#define evo_mthd(p,m,s) do { \
+ const u32 _m = (m), _s = (s); \
+ printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
+ *((p)++) = ((_s << 18) | _m); \
+} while(0)
+#define evo_data(p,d) do { \
+ const u32 _d = (d); \
+ printk(KERN_ERR "\t%08x\n", _d); \
+ *((p)++) = _d; \
+} while(0)
+#endif
static bool
evo_sync_wait(void *data)
@@ -887,23 +917,24 @@ static void
nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
u32 *push = evo_wait(mast, 16);
if (push) {
if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_data(push, curs->image->bo.offset >> 8);
} else
if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_data(push, curs->image->bo.offset >> 8);
evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
evo_data(push, mast->base.vram.handle);
} else {
evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_data(push, curs->image->bo.offset >> 8);
evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
evo_data(push, mast->base.vram.handle);
}
@@ -940,8 +971,9 @@ static void
nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
- if (show)
+ if (show && curs->image)
nv50_crtc_cursor_show(nv_crtc);
else
nv50_crtc_cursor_hide(nv_crtc);
@@ -1041,7 +1073,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
evo_kick(push, mast);
}
- nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+ nv50_crtc_cursor_show_hide(nv_crtc, true, true);
nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
}
@@ -1060,7 +1092,7 @@ nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
struct nv50_head *head = nv50_head(crtc);
int ret;
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
if (ret == 0) {
if (head->image)
nouveau_bo_unpin(head->image);
@@ -1241,13 +1273,13 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_curs *curs = nv50_curs(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- bool visible = (handle != 0);
- int i, ret = 0;
+ struct drm_gem_object *gem = NULL;
+ struct nouveau_bo *nvbo = NULL;
+ int ret = 0;
- if (visible) {
+ if (handle) {
if (width != 64 || height != 64)
return -EINVAL;
@@ -1256,23 +1288,17 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- ret = nouveau_bo_map(nvbo);
- if (ret == 0) {
- for (i = 0; i < 64 * 64; i++) {
- u32 v = nouveau_bo_rd32(nvbo, i);
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
- }
- nouveau_bo_unmap(nvbo);
- }
-
- drm_gem_object_unreference_unlocked(gem);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
}
- if (visible != nv_crtc->cursor.visible) {
- nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
- nv_crtc->cursor.visible = visible;
+ if (ret == 0) {
+ if (curs->image)
+ nouveau_bo_unpin(curs->image);
+ nouveau_bo_ref(nvbo, &curs->image);
}
+ drm_gem_object_unreference_unlocked(gem);
+ nv50_crtc_cursor_show_hide(nv_crtc, true, true);
return ret;
}
@@ -1327,10 +1353,10 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
nouveau_bo_unpin(head->image);
nouveau_bo_ref(NULL, &head->image);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- if (nv_crtc->cursor.nvbo)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ /*XXX: ditto */
+ if (head->curs.image)
+ nouveau_bo_unpin(head->curs.image);
+ nouveau_bo_ref(NULL, &head->curs.image);
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
@@ -1362,16 +1388,6 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
.page_flip = nouveau_crtc_page_flip,
};
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
static int
nv50_crtc_create(struct drm_device *dev, int index)
{
@@ -1390,8 +1406,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
head->base.color_vibrance = 50;
head->base.vibrant_hue = 0;
- head->base.cursor.set_offset = nv50_cursor_set_offset;
- head->base.cursor.set_pos = nv50_cursor_set_pos;
for (i = 0; i < 256; i++) {
head->base.lut.r[i] = i << 8;
head->base.lut.g[i] = i << 8;
@@ -1406,7 +1420,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
if (!ret) {
- ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(head->base.lut.nvbo);
if (ret)
@@ -1426,22 +1440,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (ret)
goto out;
- ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, NULL, &head->base.cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret) {
- ret = nouveau_bo_map(head->base.cursor.nvbo);
- if (ret)
- nouveau_bo_unpin(head->base.lut.nvbo);
- }
- if (ret)
- nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
/* allocate page flip / sync resources */
ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
&head->sync);
@@ -1701,7 +1699,8 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
- nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4);
+ nvif_mthd(disp->disp, 0, &args,
+ sizeof(args.base) + drm_eld_size(args.data));
}
static void
@@ -2373,11 +2372,6 @@ nv50_fb_ctor(struct drm_framebuffer *fb)
u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
u8 tile = nvbo->tile_mode;
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
- NV_ERROR(drm, "framebuffer requires contiguous bo\n");
- return -EINVAL;
- }
-
if (drm->device.info.chipset >= 0xc0)
tile >>= 4; /* yep.. */
@@ -2491,7 +2485,7 @@ nv50_display_create(struct drm_device *dev)
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(disp->sync);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 22d242b37962..a82d9ea7c6fd 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -102,7 +102,7 @@ nv50_fence_create(struct nouveau_drm *drm)
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index d6c6c87c3f07..cb5b88938d45 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -234,7 +234,7 @@ nv84_fence_create(struct nouveau_drm *drm)
ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
if (ret)
@@ -246,10 +246,10 @@ nv84_fence_create(struct nouveau_drm *drm)
if (ret == 0)
ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
- TTM_PL_FLAG_TT, 0, 0, NULL, NULL,
- &priv->bo_gart);
+ TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
+ 0, NULL, NULL, &priv->bo_gart);
if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
+ ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
if (ret == 0) {
ret = nouveau_bo_map(priv->bo_gart);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h
index e5a27df0672b..4e308eacb27a 100644
--- a/drivers/gpu/drm/nouveau/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/nvif/class.h
@@ -35,6 +35,7 @@
#define GK104_DISP 0x00009170
#define GK110_DISP 0x00009270
#define GM107_DISP 0x00009470
+#define GM204_DISP 0x00009570
#define NV50_DISP_CURSOR 0x0000507a
#define G82_DISP_CURSOR 0x0000827a
@@ -65,6 +66,7 @@
#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d
#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d
#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d
+#define GM204_DISP_CORE_CHANNEL_DMA 0x0000957d
#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e
@@ -131,6 +133,7 @@ struct nv_device_v0 {
#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL
#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL
+#define NV_DEVICE_V0_DISABLE_COPY2 0x0000080000000000ULL
__u64 disable; /* disable particular subsystems */
__u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
};
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 3c4df1fc26dc..3f7ac5bc8e03 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -62,6 +62,7 @@ nvif_drivers[] = {
#else
&nvif_driver_drm,
&nvif_driver_lib,
+ &nvif_driver_null,
#endif
NULL
};
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h
index ac4bdb3ea506..8bd39e69229c 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/nvif/driver.h
@@ -17,5 +17,6 @@ struct nvif_driver {
extern const struct nvif_driver nvif_driver_nvkm;
extern const struct nvif_driver nvif_driver_drm;
extern const struct nvif_driver nvif_driver_lib;
+extern const struct nvif_driver nvif_driver_null;
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 2d28dc337cfb..b0566a1ca28f 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -20,6 +20,7 @@
#include "omap_drv.h"
#include <drm/drm_mode.h>
+#include <drm/drm_plane_helper.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 862ba03c236c..8241ed9b353c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -718,7 +718,6 @@ static const struct dev_pm_ops omapdrm_pm_ops = {
static struct platform_driver pdev = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &omapdrm_pm_ops,
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index e4849413ee80..aeb91ed653c9 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -612,8 +612,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
{
union omap_gem_size gsize;
- /* in case someone tries to feed us a completely bogus stride: */
- args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+ args->pitch = align_pitch(0, args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
gsize = (union omap_gem_size){
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 891a4dc608af..ee8e2b3a117e 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -388,20 +388,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct omap_plane *omap_plane;
struct omap_overlay_info *info;
- int ret;
DBG("%s: priv=%d", plane_names[id], private_plane);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
- goto fail;
+ return NULL;
- ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
+ drm_flip_work_init(&omap_plane->unpin_work,
"unpin", unpin_worker);
- if (ret) {
- dev_err(dev->dev, "could not allocate unpin FIFO\n");
- goto fail;
- }
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
@@ -443,10 +438,4 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane->info.zorder = id;
return plane;
-
-fail:
- if (plane)
- omap_plane_destroy(plane);
-
- return NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index bee9f72b3a93..024e98ef8e4d 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -27,4 +27,17 @@ config DRM_PANEL_S6E8AA0
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SHARP_LQ101R1SX01
+ tristate "Sharp LQ101R1SX01 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Sharp LQ101R1SX01
+ TFT-LCD modules. The panel has a 2560x1600 resolution and uses
+ 24 bit RGB per pixel. It provides a dual MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
+ To compile this driver as a module, choose M here: the module
+ will be called panel-sharp-lq101r1sx01.
+
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 8b929212fad7..4b2a0430804b 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index 42ac67b21e9f..08cf2c588c3d 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -145,7 +145,7 @@ static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
if (ctx->error < 0 || len == 0)
return;
- dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", len, data);
+ dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
ret = ld9040_spi_write_word(ctx, *data);
while (!ret && --len) {
@@ -154,8 +154,8 @@ static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
}
if (ret) {
- dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
- data);
+ dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
+ (int)len, data);
ctx->error = ret;
}
@@ -336,17 +336,12 @@ static int ld9040_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset");
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
- ret = gpiod_direction_output(ctx->reset_gpio, 1);
- if (ret < 0) {
- dev_err(dev, "cannot configure reset-gpios %d\n", ret);
- return ret;
- }
spi->bits_per_word = 9;
ret = spi_setup(spi);
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index b5217fe37f02..144b2733e3d7 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -141,10 +141,10 @@ static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
if (ctx->error < 0)
return;
- ret = mipi_dsi_dcs_write(dsi, data, len);
+ ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
if (ret < 0) {
- dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len,
- data);
+ dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
+ (int)len, data);
ctx->error = ret;
}
}
@@ -800,27 +800,15 @@ static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
}
static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
- int size)
+ u16 size)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- const struct mipi_dsi_host_ops *ops = dsi->host->ops;
- u8 buf[] = {size, 0};
- struct mipi_dsi_msg msg = {
- .channel = dsi->channel,
- .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
- .tx_len = sizeof(buf),
- .tx_buf = buf
- };
int ret;
if (ctx->error < 0)
return;
- if (!ops || !ops->transfer)
- ret = -EIO;
- else
- ret = ops->transfer(dsi->host, &msg);
-
+ ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
if (ret < 0) {
dev_err(ctx->dev,
"error %d setting maximum return packet size to %d\n",
@@ -1019,17 +1007,12 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- ctx->reset_gpio = devm_gpiod_get(dev, "reset");
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset-gpios %ld\n",
PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
- ret = gpiod_direction_output(ctx->reset_gpio, 1);
- if (ret < 0) {
- dev_err(dev, "cannot configure reset-gpios %d\n", ret);
- return ret;
- }
ctx->brightness = GAMMA_LEVEL_NUM - 1;
@@ -1069,7 +1052,6 @@ static struct mipi_dsi_driver s6e8aa0_driver = {
.remove = s6e8aa0_remove,
.driver = {
.name = "panel_s6e8aa0",
- .owner = THIS_MODULE,
.of_match_table = s6e8aa0_of_match,
},
};
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
new file mode 100644
index 000000000000..9d81759d82fc
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2014 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#include <linux/host1x.h>
+
+struct sharp_panel {
+ struct drm_panel base;
+ /* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */
+ struct mipi_dsi_device *link1;
+ struct mipi_dsi_device *link2;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+
+ bool prepared;
+ bool enabled;
+
+ const struct drm_display_mode *mode;
+};
+
+static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct sharp_panel, base);
+}
+
+static int sharp_panel_write(struct sharp_panel *sharp, u16 offset, u8 value)
+{
+ u8 payload[3] = { offset >> 8, offset & 0xff, value };
+ struct mipi_dsi_device *dsi = sharp->link1;
+ ssize_t err;
+
+ err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
+ if (err < 0) {
+ dev_err(&dsi->dev, "failed to write %02x to %04x: %zd\n",
+ value, offset, err);
+ return err;
+ }
+
+ err = mipi_dsi_dcs_nop(dsi);
+ if (err < 0) {
+ dev_err(&dsi->dev, "failed to send DCS nop: %zd\n", err);
+ return err;
+ }
+
+ usleep_range(10, 20);
+
+ return 0;
+}
+
+static __maybe_unused int sharp_panel_read(struct sharp_panel *sharp,
+ u16 offset, u8 *value)
+{
+ ssize_t err;
+
+ cpu_to_be16s(&offset);
+
+ err = mipi_dsi_generic_read(sharp->link1, &offset, sizeof(offset),
+ value, sizeof(*value));
+ if (err < 0)
+ dev_err(&sharp->link1->dev, "failed to read from %04x: %zd\n",
+ offset, err);
+
+ return err;
+}
+
+static int sharp_panel_disable(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+
+ if (!sharp->enabled)
+ return 0;
+
+ if (sharp->backlight) {
+ sharp->backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(sharp->backlight);
+ }
+
+ sharp->enabled = false;
+
+ return 0;
+}
+
+static int sharp_panel_unprepare(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+ int err;
+
+ if (!sharp->prepared)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(sharp->link1);
+ if (err < 0)
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+ err = mipi_dsi_dcs_enter_sleep_mode(sharp->link1);
+ if (err < 0)
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+
+ msleep(120);
+
+ regulator_disable(sharp->supply);
+
+ sharp->prepared = false;
+
+ return 0;
+}
+
+static int sharp_setup_symmetrical_split(struct mipi_dsi_device *left,
+ struct mipi_dsi_device *right,
+ const struct drm_display_mode *mode)
+{
+ int err;
+
+ err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1);
+ if (err < 0) {
+ dev_err(&left->dev, "failed to set column address: %d\n", err);
+ return err;
+ }
+
+ err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1);
+ if (err < 0) {
+ dev_err(&left->dev, "failed to set page address: %d\n", err);
+ return err;
+ }
+
+ err = mipi_dsi_dcs_set_column_address(right, mode->hdisplay / 2,
+ mode->hdisplay - 1);
+ if (err < 0) {
+ dev_err(&right->dev, "failed to set column address: %d\n", err);
+ return err;
+ }
+
+ err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1);
+ if (err < 0) {
+ dev_err(&right->dev, "failed to set page address: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sharp_panel_prepare(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+ u8 format = MIPI_DCS_PIXEL_FMT_24BIT;
+ int err;
+
+ if (sharp->prepared)
+ return 0;
+
+ err = regulator_enable(sharp->supply);
+ if (err < 0)
+ return err;
+
+ usleep_range(10000, 20000);
+
+ err = mipi_dsi_dcs_soft_reset(sharp->link1);
+ if (err < 0) {
+ dev_err(panel->dev, "soft reset failed: %d\n", err);
+ goto poweroff;
+ }
+
+ msleep(120);
+
+ err = mipi_dsi_dcs_exit_sleep_mode(sharp->link1);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ goto poweroff;
+ }
+
+ /*
+ * The MIPI DCS specification mandates this delay only between the
+ * exit_sleep_mode and enter_sleep_mode commands, so it isn't strictly
+ * necessary here.
+ */
+ /*
+ msleep(120);
+ */
+
+ /* set left-right mode */
+ err = sharp_panel_write(sharp, 0x1000, 0x2a);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set left-right mode: %d\n", err);
+ goto poweroff;
+ }
+
+ /* enable command mode */
+ err = sharp_panel_write(sharp, 0x1001, 0x01);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enable command mode: %d\n", err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_set_pixel_format(sharp->link1, format);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set pixel format: %d\n", err);
+ goto poweroff;
+ }
+
+ /*
+ * TODO: The device supports both left-right and even-odd split
+ * configurations, but this driver currently supports only the left-
+ * right split. To support a different mode a mechanism needs to be
+ * put in place to communicate the configuration back to the DSI host
+ * controller.
+ */
+ err = sharp_setup_symmetrical_split(sharp->link1, sharp->link2,
+ sharp->mode);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set up symmetrical split: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_set_display_on(sharp->link1);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
+ goto poweroff;
+ }
+
+ sharp->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(sharp->supply);
+ return err;
+}
+
+static int sharp_panel_enable(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+
+ if (sharp->enabled)
+ return 0;
+
+ if (sharp->backlight) {
+ sharp->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(sharp->backlight);
+ }
+
+ sharp->enabled = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 278000,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 128,
+ .hsync_end = 2560 + 128 + 64,
+ .htotal = 2560 + 128 + 64 + 64,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 4,
+ .vsync_end = 1600 + 4 + 8,
+ .vtotal = 1600 + 4 + 8 + 32,
+ .vrefresh = 60,
+};
+
+static int sharp_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = 217;
+ panel->connector->display_info.height_mm = 136;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs sharp_panel_funcs = {
+ .disable = sharp_panel_disable,
+ .unprepare = sharp_panel_unprepare,
+ .prepare = sharp_panel_prepare,
+ .enable = sharp_panel_enable,
+ .get_modes = sharp_panel_get_modes,
+};
+
+static const struct of_device_id sharp_of_match[] = {
+ { .compatible = "sharp,lq101r1sx01", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sharp_of_match);
+
+static int sharp_panel_add(struct sharp_panel *sharp)
+{
+ struct device_node *np;
+ int err;
+
+ sharp->mode = &default_mode;
+
+ sharp->supply = devm_regulator_get(&sharp->link1->dev, "power");
+ if (IS_ERR(sharp->supply))
+ return PTR_ERR(sharp->supply);
+
+ np = of_parse_phandle(sharp->link1->dev.of_node, "backlight", 0);
+ if (np) {
+ sharp->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!sharp->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ drm_panel_init(&sharp->base);
+ sharp->base.funcs = &sharp_panel_funcs;
+ sharp->base.dev = &sharp->link1->dev;
+
+ err = drm_panel_add(&sharp->base);
+ if (err < 0)
+ goto put_backlight;
+
+ return 0;
+
+put_backlight:
+ if (sharp->backlight)
+ put_device(&sharp->backlight->dev);
+
+ return err;
+}
+
+static void sharp_panel_del(struct sharp_panel *sharp)
+{
+ if (sharp->base.dev)
+ drm_panel_remove(&sharp->base);
+
+ if (sharp->backlight)
+ put_device(&sharp->backlight->dev);
+
+ if (sharp->link2)
+ put_device(&sharp->link2->dev);
+}
+
+static int sharp_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_device *secondary = NULL;
+ struct sharp_panel *sharp;
+ struct device_node *np;
+ int err;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_LPM;
+
+ /* Find DSI-LINK1 */
+ np = of_parse_phandle(dsi->dev.of_node, "link2", 0);
+ if (np) {
+ secondary = of_find_mipi_dsi_device_by_node(np);
+ of_node_put(np);
+
+ if (!secondary)
+ return -EPROBE_DEFER;
+ }
+
+ /* register a panel for only the DSI-LINK1 interface */
+ if (secondary) {
+ sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL);
+ if (!sharp) {
+ put_device(&secondary->dev);
+ return -ENOMEM;
+ }
+
+ mipi_dsi_set_drvdata(dsi, sharp);
+
+ sharp->link2 = secondary;
+ sharp->link1 = dsi;
+
+ err = sharp_panel_add(sharp);
+ if (err < 0) {
+ put_device(&secondary->dev);
+ return err;
+ }
+ }
+
+ err = mipi_dsi_attach(dsi);
+ if (err < 0) {
+ if (secondary)
+ sharp_panel_del(sharp);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int sharp_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ /* only detach from host for the DSI-LINK2 interface */
+ if (!sharp) {
+ mipi_dsi_detach(dsi);
+ return 0;
+ }
+
+ err = sharp_panel_disable(&sharp->base);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_detach(&sharp->base);
+ sharp_panel_del(sharp);
+
+ return 0;
+}
+
+static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
+
+ /* nothing to do for DSI-LINK2 */
+ if (!sharp)
+ return;
+
+ sharp_panel_disable(&sharp->base);
+}
+
+static struct mipi_dsi_driver sharp_panel_driver = {
+ .driver = {
+ .name = "panel-sharp-lq101r1sx01",
+ .of_match_table = sharp_of_match,
+ },
+ .probe = sharp_panel_probe,
+ .remove = sharp_panel_remove,
+ .shutdown = sharp_panel_shutdown,
+};
+module_mipi_dsi_driver(sharp_panel_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("Sharp LQ101R1SX01 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 23de22f8c820..e95385bf8356 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -247,21 +247,14 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
- panel->enable_gpio = devm_gpiod_get_optional(dev, "enable");
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
if (IS_ERR(panel->enable_gpio)) {
err = PTR_ERR(panel->enable_gpio);
dev_err(dev, "failed to request GPIO: %d\n", err);
return err;
}
- if (panel->enable_gpio) {
- err = gpiod_direction_output(panel->enable_gpio, 0);
- if (err < 0) {
- dev_err(dev, "failed to setup GPIO: %d\n", err);
- return err;
- }
- }
-
backlight = of_parse_phandle(dev->of_node, "backlight", 0);
if (backlight) {
panel->backlight = of_find_backlight_by_node(backlight);
@@ -376,6 +369,29 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
+static const struct drm_display_mode auo_b116xw03_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+ .modes = &auo_b116xw03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
static const struct drm_display_mode auo_b133xtn01_mode = {
.clock = 69500,
.hdisplay = 1366,
@@ -415,6 +431,7 @@ static const struct drm_display_mode auo_b133htn01_mode = {
static const struct panel_desc auo_b133htn01 = {
.modes = &auo_b133htn01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 293,
.height = 165,
@@ -536,22 +553,92 @@ static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
static const struct panel_desc foxlink_fl500wvr00_a0t = {
.modes = &foxlink_fl500wvr00_a0t_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 108,
.height = 65,
},
};
-static const struct drm_display_mode innolux_n116bge_mode = {
+static const struct drm_display_mode hannstar_hsd070pww1_mode = {
+ .clock = 71100,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 1,
+ .hsync_end = 1280 + 1 + 158,
+ .htotal = 1280 + 1 + 158 + 1,
+ .vdisplay = 800,
+ .vsync_start = 800 + 1,
+ .vsync_end = 800 + 1 + 21,
+ .vtotal = 800 + 1 + 21 + 1,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc hannstar_hsd070pww1 = {
+ .modes = &hannstar_hsd070pww1_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 151,
+ .height = 94,
+ },
+};
+
+static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
+ .clock = 33333,
+ .hdisplay = 800,
+ .hsync_start = 800 + 85,
+ .hsync_end = 800 + 85 + 86,
+ .htotal = 800 + 85 + 86 + 85,
+ .vdisplay = 480,
+ .vsync_start = 480 + 16,
+ .vsync_end = 480 + 16 + 13,
+ .vtotal = 480 + 16 + 13 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc hitachi_tx23d38vm0caa = {
+ .modes = &hitachi_tx23d38vm0caa_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 195,
+ .height = 117,
+ },
+};
+
+static const struct drm_display_mode innolux_g121i1_l01_mode = {
.clock = 71000,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 64,
+ .hsync_end = 1280 + 64 + 32,
+ .htotal = 1280 + 64 + 32 + 64,
+ .vdisplay = 800,
+ .vsync_start = 800 + 9,
+ .vsync_end = 800 + 9 + 6,
+ .vtotal = 800 + 9 + 6 + 9,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_g121i1_l01 = {
+ .modes = &innolux_g121i1_l01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 261,
+ .height = 163,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bge_mode = {
+ .clock = 76420,
.hdisplay = 1366,
- .hsync_start = 1366 + 64,
- .hsync_end = 1366 + 64 + 6,
- .htotal = 1366 + 64 + 6 + 64,
+ .hsync_start = 1366 + 136,
+ .hsync_end = 1366 + 136 + 30,
+ .htotal = 1366 + 136 + 30 + 60,
.vdisplay = 768,
.vsync_start = 768 + 8,
- .vsync_end = 768 + 8 + 4,
- .vtotal = 768 + 8 + 4 + 8,
+ .vsync_end = 768 + 8 + 12,
+ .vtotal = 768 + 8 + 12 + 12,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -643,6 +730,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
+ .compatible = "auo,b116xw03",
+ .data = &auo_b116xw03,
+ }, {
.compatible = "auo,b133htn01",
.data = &auo_b133htn01,
}, {
@@ -667,6 +757,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
+ .compatible = "hannstar,hsd070pww1",
+ .data = &hannstar_hsd070pww1,
+ }, {
+ .compatible = "hit,tx23d38vm0caa",
+ .data = &hitachi_tx23d38vm0caa
+ }, {
+ .compatible ="innolux,g121i1-l01",
+ .data = &innolux_g121i1_l01
+ }, {
.compatible = "innolux,n116bge",
.data = &innolux_n116bge,
}, {
@@ -708,7 +807,6 @@ static void panel_simple_platform_shutdown(struct platform_device *pdev)
static struct platform_driver panel_simple_platform_driver = {
.driver = {
.name = "panel-simple",
- .owner = THIS_MODULE,
.of_match_table = platform_of_match,
},
.probe = panel_simple_platform_probe,
@@ -741,6 +839,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.desc = {
.modes = &lg_ld070wx3_sl01_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 94,
.height = 151,
@@ -768,6 +867,7 @@ static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
.desc = {
.modes = &lg_lh500wx1_sd03_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 62,
.height = 110,
@@ -795,6 +895,7 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.desc = {
.modes = &panasonic_vvx10f004b00_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 217,
.height = 136,
@@ -864,7 +965,6 @@ static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
static struct mipi_dsi_driver panel_simple_dsi_driver = {
.driver = {
.name = "panel-simple-dsi",
- .owner = THIS_MODULE,
.of_match_table = dsi_of_match,
},
.probe = panel_simple_dsi_probe,
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 0d1396266857..4a0a8b29b0a1 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -29,6 +29,7 @@
#include "qxl_drv.h"
#include "qxl_object.h"
#include "drm_crtc_helper.h"
+#include <drm/drm_plane_helper.h>
static bool qxl_head_enabled(struct qxl_head *head)
{
@@ -100,14 +101,37 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
return 0;
}
+static void qxl_update_offset_props(struct qxl_device *qdev)
+{
+ struct drm_device *dev = qdev->ddev;
+ struct drm_connector *connector;
+ struct qxl_output *output;
+ struct qxl_head *head;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ output = drm_connector_to_qxl_output(connector);
+
+ head = &qdev->client_monitors_config->heads[output->index];
+
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.suggested_x_property, head->x);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.suggested_y_property, head->y);
+ }
+}
+
void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
+ struct drm_device *dev = qdev->ddev;
while (qxl_display_copy_rom_client_monitors_config(qdev)) {
qxl_io_log(qdev, "failed crc check for client_monitors_config,"
" retrying\n");
}
+ drm_modeset_lock_all(dev);
+ qxl_update_offset_props(qdev);
+ drm_modeset_unlock_all(dev);
if (!drm_helper_hpd_irq_event(qdev->ddev)) {
/* notify that the monitor configuration changed, to
adjust at the arbitrary resolution */
@@ -568,7 +592,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct qxl_device *qdev = dev->dev_private;
- struct qxl_mode *m = (void *)mode->private;
struct qxl_framebuffer *qfb;
struct qxl_bo *bo, *old_bo = NULL;
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
@@ -586,12 +609,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
}
qfb = to_qxl_framebuffer(crtc->primary->fb);
bo = gem_to_qxl_bo(qfb->obj);
- if (!m)
- /* and do we care? */
- DRM_DEBUG("%dx%d: not a native mode\n", x, y);
- else
- DRM_DEBUG("%dx%d: qxl id %d\n",
- mode->hdisplay, mode->vdisplay, m->id);
DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
x, y,
mode->hdisplay, mode->vdisplay,
@@ -951,6 +968,10 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_object_attach_property(&connector->base,
qdev->hotplug_mode_update_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
drm_connector_register(connector);
return 0;
}
@@ -1064,6 +1085,7 @@ int qxl_modeset_init(struct qxl_device *qdev)
qdev->ddev->mode_config.fb_base = qdev->vram_base;
+ drm_mode_create_suggested_offset_properties(qdev->ddev);
qxl_mode_create_hotplug_mode_update_property(qdev);
for (i = 0 ; i < qxl_num_crtc; ++i) {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 446e71ca36cb..d9b25684ac98 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
+ ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
+ !no_intr, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 575e986f82a7..8fd2d9f58f77 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -905,7 +905,7 @@ static int r128_cce_dispatch_write_span(struct drm_device *dev,
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- mask_size = depth->n * sizeof(u8);
+ mask_size = depth->n;
if (depth->mask) {
mask = memdup_user(depth->mask, mask_size);
if (IS_ERR(mask)) {
@@ -1010,7 +1010,7 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
}
if (depth->mask) {
- mask_size = depth->n * sizeof(u8);
+ mask_size = depth->n;
mask = memdup_user(depth->mask, mask_size);
if (IS_ERR(mask)) {
kfree(x);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index d01b87991422..12bc21219a0e 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o \
+ radeon_sync.o
# add async DMA block
radeon-y += \
@@ -104,6 +105,7 @@ radeon-y += \
radeon_vce.o \
vce_v1_0.o \
vce_v2_0.o \
+ radeon_kfd.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 30d242b25078..d59ec491dbb9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
+ radeon_cursor_reset(crtc);
/* update the hw version fpr dpm */
radeon_crtc->hw_mode = *adjusted_mode;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 11a55e9dad7f..f373a81ba3d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -46,15 +46,15 @@
static const struct ci_pt_defaults defaults_hawaii_xt =
{
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
- { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
- { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
};
static const struct ci_pt_defaults defaults_hawaii_pro =
{
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
- { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
- { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
};
static const struct ci_pt_defaults defaults_bonaire_xt =
@@ -184,6 +184,9 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
u32 target_tdp);
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
+static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter);
+
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
{
struct ci_power_info *pi = rdev->pm.dpm.priv;
@@ -249,7 +252,10 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
if (pi->caps_power_containment) {
pi->caps_cac = true;
- pi->enable_bapm_feature = true;
+ if (rdev->family == CHIP_HAWAII)
+ pi->enable_bapm_feature = false;
+ else
+ pi->enable_bapm_feature = true;
pi->enable_tdc_limit_feature = true;
pi->enable_pkg_pwr_tracking_feature = true;
}
@@ -352,6 +358,21 @@ static int ci_populate_dw8(struct radeon_device *rdev)
return 0;
}
+static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
+ (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
+ rdev->pm.dpm.fan.fan_output_sensitivity =
+ rdev->pm.dpm.fan.default_fan_output_sensitivity;
+
+ pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
+ cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
+
+ return 0;
+}
+
static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -477,6 +498,9 @@ static int ci_populate_pm_base(struct radeon_device *rdev)
ret = ci_populate_dw8(rdev);
if (ret)
return ret;
+ ret = ci_populate_fuzzy_fan(rdev);
+ if (ret)
+ return ret;
ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
if (ret)
return ret;
@@ -690,6 +714,25 @@ static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
return ret;
}
+static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result = PPSMC_Result_OK;
+
+ if (pi->thermal_sclk_dpm_enabled) {
+ if (enable)
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
+ else
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
+ }
+
+ if (smc_result == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
static int ci_power_control_set_level(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -700,13 +743,11 @@ static int ci_power_control_set_level(struct radeon_device *rdev)
int ret = 0;
bool adjust_polarity = false; /* ??? */
- if (pi->caps_power_containment &&
- (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
+ if (pi->caps_power_containment) {
adjust_percent = adjust_polarity ?
rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
target_tdp = ((100 + adjust_percent) *
(s32)cac_tdp_table->configurable_tdp) / 100;
- target_tdp *= 256;
ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
}
@@ -814,7 +855,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
-static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
+static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
@@ -850,6 +891,350 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
return 0;
}
+static int ci_thermal_enable_alert(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ PPSMC_Result result;
+
+ if (enable) {
+ thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ WREG32_SMC(CG_THERMAL_INT, thermal_int);
+ rdev->irq.dpm_thermal = false;
+ result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ } else {
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ WREG32_SMC(CG_THERMAL_INT, thermal_int);
+ rdev->irq.dpm_thermal = true;
+ result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (pi->fan_ctrl_is_in_default_mode) {
+ tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ pi->fan_ctrl_default_mode = tmp;
+ tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ pi->t_min = tmp;
+ pi->fan_ctrl_is_in_default_mode = false;
+ }
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(0);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(mode);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ u32 duty100;
+ u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ u16 fdo_min, slope1, slope2;
+ u32 reference_clock, tmp;
+ int ret;
+ u64 tmp64;
+
+ if (!pi->fan_table_start) {
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0) {
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (u16)tmp64;
+
+ t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
+ t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
+
+ pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
+ pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
+
+ slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = radeon_get_xclk(rdev);
+
+ fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((u16)duty100);
+
+ tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ fan_table.TempSrc = (uint8_t)tmp;
+
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->fan_table_start,
+ (u8 *)(&fan_table),
+ sizeof(fan_table),
+ pi->sram_end);
+
+ if (ret) {
+ DRM_ERROR("Failed to load fan table to the SMC.");
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ }
+
+ return 0;
+}
+
+static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result ret;
+
+ if (pi->caps_od_fuzzy_fan_control_support) {
+ ret = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_StartFanControl,
+ FAN_CONTROL_FUZZY);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ ret = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SetFanPwmMax,
+ rdev->pm.dpm.fan.default_max_fan_pwm);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ } else {
+ ret = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_StartFanControl,
+ FAN_CONTROL_TABLE);
+ if (ret != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#if 0
+static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
+{
+ PPSMC_Result ret;
+
+ ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
+ if (ret == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+ u32 *speed)
+{
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)duty * 100;
+ do_div(tmp64, duty100);
+ *speed = (u32)tmp64;
+
+ if (*speed > 100)
+ *speed = 100;
+
+ return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+ u32 speed)
+{
+ u32 tmp;
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ ci_fan_ctrl_stop_smc_fan_control(rdev);
+
+ duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (u32)tmp64;
+
+ tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+ tmp |= FDO_STATIC_DUTY(duty);
+ WREG32_SMC(CG_FDO_CTRL0, tmp);
+
+ ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+
+ return 0;
+}
+
+static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
+ u32 *speed)
+{
+ u32 tach_period;
+ u32 xclk = radeon_get_xclk(rdev);
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (rdev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ if (tach_period == 0)
+ return -ENOENT;
+
+ *speed = 60 * xclk * 10000 / tach_period;
+
+ return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
+ u32 speed)
+{
+ u32 tach_period, tmp;
+ u32 xclk = radeon_get_xclk(rdev);
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (rdev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ if ((speed < rdev->pm.fan_min_rpm) ||
+ (speed > rdev->pm.fan_max_rpm))
+ return -EINVAL;
+
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ ci_fan_ctrl_stop_smc_fan_control(rdev);
+
+ tach_period = 60 * xclk * 10000 / (8 * speed);
+ tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+ tmp |= TARGET_PERIOD(tach_period);
+ WREG32_SMC(CG_TACH_CTRL, tmp);
+
+ ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
+
+ return 0;
+}
+#endif
+
+static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (!pi->fan_ctrl_is_in_default_mode) {
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(pi->t_min);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+ pi->fan_ctrl_is_in_default_mode = true;
+ }
+}
+
+static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
+{
+ if (rdev->pm.dpm.fan.ucode_fan_control) {
+ ci_fan_ctrl_start_smc_fan_control(rdev);
+ ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+ }
+}
+
+static void ci_thermal_initialize(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (rdev->pm.fan_pulses_per_revolution) {
+ tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+ tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+ WREG32_SMC(CG_TACH_CTRL, tmp);
+ }
+
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+ tmp |= TACH_PWM_RESP_RATE(0x28);
+ WREG32_SMC(CG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
+{
+ int ret;
+
+ ci_thermal_initialize(rdev);
+ ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = ci_thermal_enable_alert(rdev, true);
+ if (ret)
+ return ret;
+ if (rdev->pm.dpm.fan.ucode_fan_control) {
+ ret = ci_thermal_setup_fan_table(rdev);
+ if (ret)
+ return ret;
+ ci_thermal_start_smc_fan_control(rdev);
+ }
+
+ return 0;
+}
+
+static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
+{
+ if (!rdev->pm.no_fan)
+ ci_fan_ctrl_set_default_mode(rdev);
+}
+
#if 0
static int ci_read_smc_soft_register(struct radeon_device *rdev,
u16 reg_offset, u32 *value)
@@ -1253,7 +1638,7 @@ static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
if (!pi->sclk_dpm_key_disabled) {
PPSMC_Result smc_result =
- ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
@@ -1267,7 +1652,7 @@ static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
if (!pi->mclk_dpm_key_disabled) {
PPSMC_Result smc_result =
- ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
@@ -2042,6 +2427,33 @@ static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
}
+static void ci_register_patching_mc_arb(struct radeon_device *rdev,
+ const u32 engine_clock,
+ const u32 memory_clock,
+ u32 *dram_timimg2)
+{
+ bool patch;
+ u32 tmp, tmp2;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+ patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+ if (patch &&
+ ((rdev->pdev->device == 0x67B0) ||
+ (rdev->pdev->device == 0x67B1))) {
+ if ((memory_clock > 100000) && (memory_clock <= 125000)) {
+ tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
+ *dram_timimg2 &= ~0x00ff0000;
+ *dram_timimg2 |= tmp2 << 16;
+ } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
+ tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
+ *dram_timimg2 &= ~0x00ff0000;
+ *dram_timimg2 |= tmp2 << 16;
+ }
+ }
+}
+
+
static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
u32 sclk,
u32 mclk,
@@ -2057,6 +2469,8 @@ static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+ ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
+
arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
arb_regs->McArbBurstTime = (u8)burst_time;
@@ -2351,10 +2765,10 @@ static int ci_calculate_mclk_params(struct radeon_device *rdev,
u32 tmp;
u32 reference_clock = rdev->clock.mpll.reference_freq;
- if (pi->mem_gddr5)
- freq_nom = memory_clock * 4;
+ if (mpll_param.qdr == 1)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
else
- freq_nom = memory_clock * 2;
+ freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
tmp = (freq_nom / reference_clock);
tmp = tmp * tmp;
@@ -2434,7 +2848,6 @@ static int ci_populate_single_memory_level(struct radeon_device *rdev,
&memory_level->MinVddcPhases);
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 1;
memory_level->UpH = 0;
memory_level->DownH = 100;
memory_level->VoltageDownH = 0;
@@ -2767,7 +3180,6 @@ static int ci_populate_single_graphic_level(struct radeon_device *rdev,
graphic_level->CcPwrDynRm = 0;
graphic_level->CcPwrDynRm1 = 0;
- graphic_level->EnabledForActivity = 1;
graphic_level->EnabledForThrottle = 1;
graphic_level->UpH = 0;
graphic_level->DownH = 0;
@@ -2816,10 +3228,13 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
&pi->smc_state_table.GraphicsLevel[i]);
if (ret)
return ret;
+ if (i > 1)
+ pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
if (i == (dpm_table->sclk_table.count - 1))
pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
PPSMC_DISPLAY_WATERMARK_HIGH;
}
+ pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
@@ -2863,6 +3278,16 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
return ret;
}
+ pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ if ((dpm_table->mclk_table.count >= 2) &&
+ ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
+ pi->smc_state_table.MemoryLevel[1].MinVddc =
+ pi->smc_state_table.MemoryLevel[0].MinVddc;
+ pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
+ pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
+ }
+
pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
@@ -2919,9 +3344,14 @@ static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
&pi->dpm_table.pcie_speed_table,
SMU7_MAX_LEVELS_LINK);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
- pi->pcie_gen_powersaving.min,
- pi->pcie_lane_powersaving.min);
+ if (rdev->family == CHIP_BONAIRE)
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.max);
+ else
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.min);
ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
pi->pcie_gen_performance.min,
pi->pcie_lane_performance.min);
@@ -2988,19 +3418,21 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
allowed_sclk_vddc_table->entries[i].clk)) {
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
allowed_sclk_vddc_table->entries[i].clk;
- pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
+ (i == 0) ? true : false;
pi->dpm_table.sclk_table.count++;
}
}
pi->dpm_table.mclk_table.count = 0;
for (i = 0; i < allowed_mclk_table->count; i++) {
- if ((i==0) ||
+ if ((i == 0) ||
(pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
allowed_mclk_table->entries[i].clk)) {
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
allowed_mclk_table->entries[i].clk;
- pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
+ (i == 0) ? true : false;
pi->dpm_table.mclk_table.count++;
}
}
@@ -3166,7 +3598,7 @@ static int ci_init_smc_table(struct radeon_device *rdev)
table->VddcVddciDelta = 4000;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+ table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
table->PCIeGenInterval = 1;
if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
table->SVI2Enable = 1;
@@ -3320,6 +3752,8 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
struct ci_power_info *pi = ci_get_pi(rdev);
PPSMC_Result result;
+ ci_apply_disp_minimum_voltage_request(rdev);
+
if (!pi->sclk_dpm_key_disabled) {
if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
result = ci_send_msg_to_smc_with_parameter(rdev,
@@ -3339,7 +3773,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
return -EINVAL;
}
}
-
+#if 0
if (!pi->pcie_dpm_key_disabled) {
if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
result = ci_send_msg_to_smc_with_parameter(rdev,
@@ -3349,9 +3783,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
return -EINVAL;
}
}
-
- ci_apply_disp_minimum_voltage_request(rdev);
-
+#endif
return 0;
}
@@ -3377,7 +3809,7 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
} else {
/* XXX check display min clock requirements */
- if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
+ if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
}
@@ -3707,62 +4139,61 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
struct ci_power_info *pi = ci_get_pi(rdev);
- PPSMC_Result smc_result;
u32 tmp, levels, i;
int ret;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
- if ((!pi->sclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_sclk(rdev, levels);
+ ret = ci_dpm_force_state_pcie(rdev, level);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
- CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
}
}
}
- if ((!pi->mclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_mclk(rdev, levels);
+ ret = ci_dpm_force_state_sclk(rdev, levels);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
- CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
}
}
}
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_pcie(rdev, level);
+ ret = ci_dpm_force_state_mclk(rdev, levels);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
- CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
@@ -3816,21 +4247,17 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
}
}
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
- if (!pi->sclk_dpm_key_disabled) {
- smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
- if (!pi->mclk_dpm_key_disabled) {
- smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
if (!pi->pcie_dpm_key_disabled) {
- smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ PPSMC_Result smc_result;
+
+ smc_result = ci_send_msg_to_smc(rdev,
+ PPSMC_MSG_PCIeDPM_UnForceLevel);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
+ ret = ci_upload_dpm_level_enable_mask(rdev);
+ if (ret)
+ return ret;
}
rdev->pm.dpm.forced_level = level;
@@ -4036,6 +4463,96 @@ static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
return 0;
}
+static int ci_register_patching_mc_seq(struct radeon_device *rdev,
+ struct ci_mc_reg_table *table)
+{
+ u8 i, k;
+ u32 tmp;
+ bool patch;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+ patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+ if (patch &&
+ ((rdev->pdev->device == 0x67B0) ||
+ (rdev->pdev->device == 0x67B1))) {
+ for (i = 0; i < table->last; i++) {
+ if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch(table->mc_reg_address[i].s1 >> 2) {
+ case MC_SEQ_MISC1:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
+ 0x00000007;
+ }
+ break;
+ case MC_SEQ_WR_CTL_D0:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+ 0x0000D0DD;
+ }
+ break;
+ case MC_SEQ_WR_CTL_D1:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+ 0x0000D0DD;
+ }
+ break;
+ case MC_SEQ_WR_CTL_2:
+ for (k = 0; k < table->num_entries; k++) {
+ if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+ (table->mc_reg_table_entry[k].mclk_max == 137500))
+ table->mc_reg_table_entry[k].mc_data[i] = 0;
+ }
+ break;
+ case MC_SEQ_CAS_TIMING:
+ for (k = 0; k < table->num_entries; k++) {
+ if (table->mc_reg_table_entry[k].mclk_max == 125000)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+ 0x000C0140;
+ else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+ 0x000C0150;
+ }
+ break;
+ case MC_SEQ_MISC_TIMING:
+ for (k = 0; k < table->num_entries; k++) {
+ if (table->mc_reg_table_entry[k].mclk_max == 125000)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+ 0x00000030;
+ else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+ table->mc_reg_table_entry[k].mc_data[i] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+ 0x00000035;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
+ tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
+ tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
+ }
+
+ return 0;
+}
+
static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4079,6 +4596,10 @@ static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
ci_set_s0_mc_reg_index(ci_table);
+ ret = ci_register_patching_mc_seq(rdev, ci_table);
+ if (ret)
+ goto init_mc_done;
+
ret = ci_set_mc_special_registers(rdev, ci_table);
if (ret)
goto init_mc_done;
@@ -4675,36 +5196,51 @@ int ci_dpm_enable(struct radeon_device *rdev)
return ret;
}
+ ret = ci_power_control_set_level(rdev);
+ if (ret) {
+ DRM_ERROR("ci_power_control_set_level failed\n");
+ return ret;
+ }
+
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
+ return ret;
+ }
+
+ ci_thermal_start_thermal_controller(rdev);
+
ci_update_current_ps(rdev, boot_ps);
return 0;
}
-int ci_dpm_late_enable(struct radeon_device *rdev)
+static int ci_set_temperature_range(struct radeon_device *rdev)
{
int ret;
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-#if 0
- PPSMC_Result result;
-#endif
- ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret) {
- DRM_ERROR("ci_set_thermal_temperature_range failed\n");
- return ret;
- }
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
-#if 0
- result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+ ret = ci_thermal_enable_alert(rdev, false);
+ if (ret)
+ return ret;
+ ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = ci_thermal_enable_alert(rdev, true);
+ if (ret)
+ return ret;
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-#endif
- }
+ return ret;
+}
+
+int ci_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
+
+ ret = ci_set_temperature_range(rdev);
+ if (ret)
+ return ret;
ci_dpm_powergate_uvd(rdev, true);
@@ -4721,6 +5257,8 @@ void ci_dpm_disable(struct radeon_device *rdev)
if (!ci_is_smc_running(rdev))
return;
+ ci_thermal_stop_thermal_controller(rdev);
+
if (pi->thermal_protection)
ci_enable_thermal_protection(rdev, false);
ci_enable_power_containment(rdev, false);
@@ -4729,12 +5267,13 @@ void ci_dpm_disable(struct radeon_device *rdev)
ci_enable_spread_spectrum(rdev, false);
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
ci_stop_dpm(rdev);
- ci_enable_ds_master_switch(rdev, true);
+ ci_enable_ds_master_switch(rdev, false);
ci_enable_ulv(rdev, false);
ci_clear_vc(rdev);
ci_reset_to_default(rdev);
ci_dpm_stop_smc(rdev);
ci_force_switch_to_arb_f0(rdev);
+ ci_enable_thermal_based_sclk_dpm(rdev, false);
ci_update_current_ps(rdev, boot_ps);
}
@@ -4804,11 +5343,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
return 0;
}
-int ci_dpm_power_control_set_level(struct radeon_device *rdev)
-{
- return ci_power_control_set_level(rdev);
-}
-
void ci_dpm_reset_asic(struct radeon_device *rdev)
{
ci_set_boot_state(rdev);
@@ -5068,6 +5602,8 @@ void ci_dpm_fini(struct radeon_device *rdev)
int ci_dpm_init(struct radeon_device *rdev)
{
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ SMU7_Discrete_DpmTable *dpm_table;
+ struct radeon_gpio_rec gpio;
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
@@ -5137,6 +5673,7 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->sclk_dpm_key_disabled = 0;
pi->mclk_dpm_key_disabled = 0;
pi->pcie_dpm_key_disabled = 0;
+ pi->thermal_sclk_dpm_enabled = 0;
/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
if ((rdev->pdev->device == 0x6658) &&
@@ -5201,6 +5738,55 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->uvd_enabled = false;
+ dpm_table = &pi->smc_state_table;
+
+ gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
+ if (gpio.valid) {
+ dpm_table->VRHotGpio = gpio.shift;
+ rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+ } else {
+ dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+ }
+
+ gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
+ if (gpio.valid) {
+ dpm_table->AcDcGpio = gpio.shift;
+ rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+ } else {
+ dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+ }
+
+ gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
+ if (gpio.valid) {
+ u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
+
+ switch (gpio.shift) {
+ case 0:
+ tmp &= ~GNB_SLOW_MODE_MASK;
+ tmp |= GNB_SLOW_MODE(1);
+ break;
+ case 1:
+ tmp &= ~GNB_SLOW_MODE_MASK;
+ tmp |= GNB_SLOW_MODE(2);
+ break;
+ case 2:
+ tmp |= GNB_SLOW;
+ break;
+ case 3:
+ tmp |= FORCE_NB_PS1;
+ break;
+ case 4:
+ tmp |= DPM_ENABLED;
+ break;
+ default:
+ DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+ break;
+ }
+ WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
+ }
+
pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
@@ -5262,6 +5848,8 @@ int ci_dpm_init(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ pi->fan_ctrl_is_in_default_mode = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index 93bbed977ffb..84e3d3bcf9f3 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -33,6 +33,8 @@
#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
+#define CISLANDS_UNUSED_GPIO_PIN 0x7F
+
struct ci_pl {
u32 mclk;
u32 sclk;
@@ -237,6 +239,7 @@ struct ci_power_info {
u32 sclk_dpm_key_disabled;
u32 mclk_dpm_key_disabled;
u32 pcie_dpm_key_disabled;
+ u32 thermal_sclk_dpm_enabled;
struct ci_pcie_perf_range pcie_gen_performance;
struct ci_pcie_perf_range pcie_lane_performance;
struct ci_pcie_perf_range pcie_gen_powersaving;
@@ -264,6 +267,7 @@ struct ci_power_info {
bool caps_automatic_dc_transition;
bool caps_sclk_throttle_low_notification;
bool caps_dynamic_ac_timing;
+ bool caps_od_fuzzy_fan_control_support;
/* flags */
bool thermal_protection;
bool pcie_performance_request;
@@ -285,6 +289,10 @@ struct ci_power_info {
struct ci_ps current_ps;
struct radeon_ps requested_rps;
struct ci_ps requested_ps;
+ /* fan control */
+ bool fan_ctrl_is_in_default_mode;
+ u32 t_min;
+ u32 fan_ctrl_default_mode;
};
#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index b630edc2fd0c..e78bcad7a43e 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -129,7 +129,7 @@ void ci_reset_smc(struct radeon_device *rdev)
int ci_program_jump_on_start(struct radeon_device *rdev)
{
- static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+ static const u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 89c01fa6dd8e..6dcde3798b45 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -32,6 +32,7 @@
#include "cik_blit_shaders.h"
#include "radeon_ucode.h"
#include "clearstate_ci.h"
+#include "radeon_kfd.h"
MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -1563,6 +1564,8 @@ static const u32 godavari_golden_registers[] =
static void cik_init_golden_registers(struct radeon_device *rdev)
{
+ /* Some of the registers might be dependent on GRBM_GFX_INDEX */
+ mutex_lock(&rdev->grbm_idx_mutex);
switch (rdev->family) {
case CHIP_BONAIRE:
radeon_program_register_sequence(rdev,
@@ -1637,6 +1640,7 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
default:
break;
}
+ mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -1806,7 +1810,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data = NULL;
const __le32 *new_fw_data = NULL;
- u32 running, blackout = 0;
+ u32 running, blackout = 0, tmp;
u32 *io_mc_regs = NULL;
const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
@@ -1866,6 +1870,15 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
}
+
+ tmp = RREG32(MC_SEQ_MISC0);
+ if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 5);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023);
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 9);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0);
+ }
+
/* load the MC ucode */
for (i = 0; i < ucode_size; i++) {
if (rdev->new_fw)
@@ -3419,6 +3432,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
+ mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -3430,6 +3444,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&rdev->grbm_idx_mutex);
mask = 1;
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
@@ -3440,6 +3455,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
rdev->config.cik.backend_enable_mask = enabled_rbs;
+ mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
@@ -3467,6 +3483,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
WREG32(PA_SC_RASTER_CONFIG, data);
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -3684,6 +3701,12 @@ static void cik_gpu_init(struct radeon_device *rdev)
/* set HW defaults for 3D engine */
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+ mutex_lock(&rdev->grbm_idx_mutex);
+ /*
+ * making sure that the following register writes will be broadcasted
+ * to all the shaders
+ */
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(SX_DEBUG_1, 0x20);
WREG32(TA_CNTL_AUX, 0x00010000);
@@ -3739,6 +3762,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
+ mutex_unlock(&rdev->grbm_idx_mutex);
udelay(50);
}
@@ -3970,31 +3994,27 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, control;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -4018,12 +4038,12 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
@@ -4046,6 +4066,7 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 header, control = INDIRECT_BUFFER_VALID;
if (ib->is_const_ib) {
@@ -4074,8 +4095,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
}
- control |= ib->length_dw |
- (ib->vm ? (ib->vm->id << 24) : 0);
+ control |= ib->length_dw | (vm_id << 24);
radeon_ring_write(ring, header);
radeon_ring_write(ring,
@@ -4675,12 +4695,11 @@ static int cik_mec_init(struct radeon_device *rdev)
/*
* KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
* CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
+ * Nonetheless, we assign only 1 pipe because all other pipes will
+ * be handled by KFD
*/
- if (rdev->family == CHIP_KAVERI)
- rdev->mec.num_mec = 2;
- else
- rdev->mec.num_mec = 1;
- rdev->mec.num_pipe = 4;
+ rdev->mec.num_mec = 1;
+ rdev->mec.num_pipe = 1;
rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
if (rdev->mec.hpd_eop_obj == NULL) {
@@ -4822,28 +4841,24 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* init the pipes */
mutex_lock(&rdev->srbm_mutex);
- for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
- int me = (i < 4) ? 1 : 2;
- int pipe = (i < 4) ? i : (i - 4);
- eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
+ eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr;
- cik_srbm_select(rdev, me, pipe, 0, 0);
+ cik_srbm_select(rdev, 0, 0, 0, 0);
- /* write the EOP addr */
- WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
- WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
+ /* write the EOP addr */
+ WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
+ WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
- /* set the VMID assigned */
- WREG32(CP_HPD_EOP_VMID, 0);
+ /* set the VMID assigned */
+ WREG32(CP_HPD_EOP_VMID, 0);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ tmp = RREG32(CP_HPD_EOP_CONTROL);
+ tmp &= ~EOP_SIZE_MASK;
+ tmp |= order_base_2(MEC_HPD_SIZE / 8);
+ WREG32(CP_HPD_EOP_CONTROL, tmp);
- /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32(CP_HPD_EOP_CONTROL);
- tmp &= ~EOP_SIZE_MASK;
- tmp |= order_base_2(MEC_HPD_SIZE / 8);
- WREG32(CP_HPD_EOP_CONTROL, tmp);
- }
- cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
@@ -5897,8 +5912,13 @@ int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
*/
int cik_vm_init(struct radeon_device *rdev)
{
- /* number of VMs */
- rdev->vm_manager.nvm = 16;
+ /*
+ * number of VMs
+ * VMID 0 is reserved for System
+ * radeon graphics/compute will use VMIDs 1-7
+ * amdkfd will use VMIDs 8-15
+ */
+ rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
/* base offset of vram pages */
if (rdev->flags & RADEON_IS_IGP) {
u64 tmp = RREG32(MC_VM_FB_OFFSET);
@@ -5958,26 +5978,23 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
* Update the page table base and flush the VM TLB
* using the CP (CIK).
*/
-void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
- int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
-
- if (vm == NULL)
- return;
+ int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
- if (vm->id < 8) {
+ if (vm_id < 8) {
radeon_ring_write(ring,
- (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
} else {
radeon_ring_write(ring,
- (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, pd_addr >> 12);
/* update SH_MEM_* regs */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -5985,7 +6002,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, VMID(vm->id));
+ radeon_ring_write(ring, VMID(vm_id));
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6006,7 +6023,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VMID(0));
/* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, ridx);
+ cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -6014,7 +6031,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
/* compute doesn't have PFP */
if (usepfp) {
@@ -6059,6 +6076,7 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
u32 i, j, k;
u32 mask;
+ mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -6070,6 +6088,7 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&rdev->grbm_idx_mutex);
mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
for (k = 0; k < rdev->usec_timeout; k++) {
@@ -6204,10 +6223,12 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(RLC_LB_PARAMS, 0x00600408);
WREG32(RLC_LB_CNTL, 0x80000004);
+ mutex_unlock(&rdev->grbm_idx_mutex);
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
@@ -6274,11 +6295,13 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
WREG32(RLC_SERDES_WR_CTRL, tmp2);
+ mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6314,17 +6337,20 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
}
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000001;
data &= 0xfffffffd;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
tmp = cik_halt_rlc(rdev);
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
WREG32(RLC_SERDES_WR_CTRL, data);
+ mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6345,7 +6371,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
}
} else {
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
- data |= 0x00000002;
+ data |= 0x00000003;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
@@ -6368,11 +6394,13 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
WREG32(RLC_SERDES_WR_CTRL, data);
+ mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
}
@@ -6801,10 +6829,12 @@ static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
u32 mask = 0, tmp, tmp1;
int i;
+ mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, se, sh);
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&rdev->grbm_idx_mutex);
tmp &= 0xffff0000;
@@ -7288,8 +7318,7 @@ static int cik_irq_init(struct radeon_device *rdev)
int cik_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl;
- u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
- u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
+ u32 cp_m1p0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -7323,13 +7352,6 @@ int cik_irq_set(struct radeon_device *rdev)
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
if (rdev->flags & RADEON_IS_IGP)
thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
@@ -7351,33 +7373,6 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
- case 1:
- cp_m1p1 |= TIME_STAMP_INT_ENABLE;
- break;
- case 2:
- cp_m1p2 |= TIME_STAMP_INT_ENABLE;
- break;
- case 3:
- cp_m1p2 |= TIME_STAMP_INT_ENABLE;
- break;
- default:
- DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
- break;
- }
- } else if (ring->me == 2) {
- switch (ring->pipe) {
- case 0:
- cp_m2p0 |= TIME_STAMP_INT_ENABLE;
- break;
- case 1:
- cp_m2p1 |= TIME_STAMP_INT_ENABLE;
- break;
- case 2:
- cp_m2p2 |= TIME_STAMP_INT_ENABLE;
- break;
- case 3:
- cp_m2p2 |= TIME_STAMP_INT_ENABLE;
- break;
default:
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
break;
@@ -7394,33 +7389,6 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
- case 1:
- cp_m1p1 |= TIME_STAMP_INT_ENABLE;
- break;
- case 2:
- cp_m1p2 |= TIME_STAMP_INT_ENABLE;
- break;
- case 3:
- cp_m1p2 |= TIME_STAMP_INT_ENABLE;
- break;
- default:
- DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
- break;
- }
- } else if (ring->me == 2) {
- switch (ring->pipe) {
- case 0:
- cp_m2p0 |= TIME_STAMP_INT_ENABLE;
- break;
- case 1:
- cp_m2p1 |= TIME_STAMP_INT_ENABLE;
- break;
- case 2:
- cp_m2p2 |= TIME_STAMP_INT_ENABLE;
- break;
- case 3:
- cp_m2p2 |= TIME_STAMP_INT_ENABLE;
- break;
default:
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
break;
@@ -7509,13 +7477,6 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
- WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
- WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
- WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
- WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
- WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
- WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
- WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
@@ -7832,6 +7793,10 @@ restart_ih:
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
+
+ radeon_kfd_interrupt(rdev,
+ (const void *) &rdev->ih.ring[ring_index]);
+
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
@@ -8521,6 +8486,10 @@ static int cik_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_kfd_resume(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -8569,6 +8538,7 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
+ radeon_kfd_suspend(rdev);
radeon_pm_suspend(rdev);
dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index ca1bb6133580..79c45e8a536b 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -147,4 +147,140 @@
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
+#define CP_HQD_IQ_RPTR 0xC970u
+#define AQL_ENABLE (1U << 0)
+
+#define IDLE (1 << 2)
+
+struct cik_mqd {
+ uint32_t header;
+ uint32_t compute_dispatch_initiator;
+ uint32_t compute_dim_x;
+ uint32_t compute_dim_y;
+ uint32_t compute_dim_z;
+ uint32_t compute_start_x;
+ uint32_t compute_start_y;
+ uint32_t compute_start_z;
+ uint32_t compute_num_thread_x;
+ uint32_t compute_num_thread_y;
+ uint32_t compute_num_thread_z;
+ uint32_t compute_pipelinestat_enable;
+ uint32_t compute_perfcount_enable;
+ uint32_t compute_pgm_lo;
+ uint32_t compute_pgm_hi;
+ uint32_t compute_tba_lo;
+ uint32_t compute_tba_hi;
+ uint32_t compute_tma_lo;
+ uint32_t compute_tma_hi;
+ uint32_t compute_pgm_rsrc1;
+ uint32_t compute_pgm_rsrc2;
+ uint32_t compute_vmid;
+ uint32_t compute_resource_limits;
+ uint32_t compute_static_thread_mgmt_se0;
+ uint32_t compute_static_thread_mgmt_se1;
+ uint32_t compute_tmpring_size;
+ uint32_t compute_static_thread_mgmt_se2;
+ uint32_t compute_static_thread_mgmt_se3;
+ uint32_t compute_restart_x;
+ uint32_t compute_restart_y;
+ uint32_t compute_restart_z;
+ uint32_t compute_thread_trace_enable;
+ uint32_t compute_misc_reserved;
+ uint32_t compute_user_data_0;
+ uint32_t compute_user_data_1;
+ uint32_t compute_user_data_2;
+ uint32_t compute_user_data_3;
+ uint32_t compute_user_data_4;
+ uint32_t compute_user_data_5;
+ uint32_t compute_user_data_6;
+ uint32_t compute_user_data_7;
+ uint32_t compute_user_data_8;
+ uint32_t compute_user_data_9;
+ uint32_t compute_user_data_10;
+ uint32_t compute_user_data_11;
+ uint32_t compute_user_data_12;
+ uint32_t compute_user_data_13;
+ uint32_t compute_user_data_14;
+ uint32_t compute_user_data_15;
+ uint32_t cp_compute_csinvoc_count_lo;
+ uint32_t cp_compute_csinvoc_count_hi;
+ uint32_t cp_mqd_base_addr_lo;
+ uint32_t cp_mqd_base_addr_hi;
+ uint32_t cp_hqd_active;
+ uint32_t cp_hqd_vmid;
+ uint32_t cp_hqd_persistent_state;
+ uint32_t cp_hqd_pipe_priority;
+ uint32_t cp_hqd_queue_priority;
+ uint32_t cp_hqd_quantum;
+ uint32_t cp_hqd_pq_base_lo;
+ uint32_t cp_hqd_pq_base_hi;
+ uint32_t cp_hqd_pq_rptr;
+ uint32_t cp_hqd_pq_rptr_report_addr_lo;
+ uint32_t cp_hqd_pq_rptr_report_addr_hi;
+ uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+ uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+ uint32_t cp_hqd_pq_doorbell_control;
+ uint32_t cp_hqd_pq_wptr;
+ uint32_t cp_hqd_pq_control;
+ uint32_t cp_hqd_ib_base_addr_lo;
+ uint32_t cp_hqd_ib_base_addr_hi;
+ uint32_t cp_hqd_ib_rptr;
+ uint32_t cp_hqd_ib_control;
+ uint32_t cp_hqd_iq_timer;
+ uint32_t cp_hqd_iq_rptr;
+ uint32_t cp_hqd_dequeue_request;
+ uint32_t cp_hqd_dma_offload;
+ uint32_t cp_hqd_sema_cmd;
+ uint32_t cp_hqd_msg_type;
+ uint32_t cp_hqd_atomic0_preop_lo;
+ uint32_t cp_hqd_atomic0_preop_hi;
+ uint32_t cp_hqd_atomic1_preop_lo;
+ uint32_t cp_hqd_atomic1_preop_hi;
+ uint32_t cp_hqd_hq_status0;
+ uint32_t cp_hqd_hq_control0;
+ uint32_t cp_mqd_control;
+ uint32_t cp_mqd_query_time_lo;
+ uint32_t cp_mqd_query_time_hi;
+ uint32_t cp_mqd_connect_start_time_lo;
+ uint32_t cp_mqd_connect_start_time_hi;
+ uint32_t cp_mqd_connect_end_time_lo;
+ uint32_t cp_mqd_connect_end_time_hi;
+ uint32_t cp_mqd_connect_end_wf_count;
+ uint32_t cp_mqd_connect_end_pq_rptr;
+ uint32_t cp_mqd_connect_end_pq_wptr;
+ uint32_t cp_mqd_connect_end_ib_rptr;
+ uint32_t reserved_96;
+ uint32_t reserved_97;
+ uint32_t reserved_98;
+ uint32_t reserved_99;
+ uint32_t iqtimer_pkt_header;
+ uint32_t iqtimer_pkt_dw0;
+ uint32_t iqtimer_pkt_dw1;
+ uint32_t iqtimer_pkt_dw2;
+ uint32_t iqtimer_pkt_dw3;
+ uint32_t iqtimer_pkt_dw4;
+ uint32_t iqtimer_pkt_dw5;
+ uint32_t iqtimer_pkt_dw6;
+ uint32_t reserved_108;
+ uint32_t reserved_109;
+ uint32_t reserved_110;
+ uint32_t reserved_111;
+ uint32_t queue_doorbell_id0;
+ uint32_t queue_doorbell_id1;
+ uint32_t queue_doorbell_id2;
+ uint32_t queue_doorbell_id3;
+ uint32_t queue_doorbell_id4;
+ uint32_t queue_doorbell_id5;
+ uint32_t queue_doorbell_id6;
+ uint32_t queue_doorbell_id7;
+ uint32_t queue_doorbell_id8;
+ uint32_t queue_doorbell_id9;
+ uint32_t queue_doorbell_id10;
+ uint32_t queue_doorbell_id11;
+ uint32_t queue_doorbell_id12;
+ uint32_t queue_doorbell_id13;
+ uint32_t queue_doorbell_id14;
+ uint32_t queue_doorbell_id15;
+};
+
#endif
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d748963af08b..dde5c7e29eb2 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -134,7 +134,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
+ u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 5;
@@ -541,31 +541,27 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -586,12 +582,12 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
@@ -904,25 +900,21 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
* Update the page table base and flush the VM TLB
* using sDMA (CIK).
*/
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- if (vm->id < 8) {
- radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ if (vm_id < 8) {
+ radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
} else {
- radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
}
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, pd_addr >> 12);
/* update SH_MEM_* regs */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
- radeon_ring_write(ring, VMID(vm->id));
+ radeon_ring_write(ring, VMID(vm_id));
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, SH_MEM_BASES >> 2);
@@ -945,11 +937,11 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
radeon_ring_write(ring, VMID(0));
/* flush HDP */
- cik_sdma_hdp_flush_ring_emit(rdev, ridx);
+ cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 0c6e1b55d968..ba85986febea 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -30,6 +30,8 @@
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
+#define RADEON_NUM_OF_VMIDS 8
+
/* DIDT IND registers */
#define DIDT_SQ_CTRL0 0x0
# define DIDT_CTRL_EN (1 << 0)
@@ -184,7 +186,10 @@
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
-
+#define CG_THERMAL_STATUS 0xC0300008
+#define FDO_PWM_DUTY(x) ((x) << 9)
+#define FDO_PWM_DUTY_MASK (0xff << 9)
+#define FDO_PWM_DUTY_SHIFT 9
#define CG_THERMAL_INT 0xC030000C
#define CI_DIG_THERM_INTH(x) ((x) << 8)
#define CI_DIG_THERM_INTH_MASK 0x0000FF00
@@ -194,7 +199,10 @@
#define CI_DIG_THERM_INTL_SHIFT 16
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
-
+#define CG_MULT_THERMAL_CTRL 0xC0300010
+#define TEMP_SEL(x) ((x) << 20)
+#define TEMP_SEL_MASK (0xff << 20)
+#define TEMP_SEL_SHIFT 20
#define CG_MULT_THERMAL_STATUS 0xC0300014
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -203,6 +211,36 @@
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
+#define CG_FDO_CTRL0 0xC0300064
+#define FDO_STATIC_DUTY(x) ((x) << 0)
+#define FDO_STATIC_DUTY_MASK 0x000000FF
+#define FDO_STATIC_DUTY_SHIFT 0
+#define CG_FDO_CTRL1 0xC0300068
+#define FMAX_DUTY100(x) ((x) << 0)
+#define FMAX_DUTY100_MASK 0x000000FF
+#define FMAX_DUTY100_SHIFT 0
+#define CG_FDO_CTRL2 0xC030006C
+#define TMIN(x) ((x) << 0)
+#define TMIN_MASK 0x000000FF
+#define TMIN_SHIFT 0
+#define FDO_PWM_MODE(x) ((x) << 11)
+#define FDO_PWM_MODE_MASK (7 << 11)
+#define FDO_PWM_MODE_SHIFT 11
+#define TACH_PWM_RESP_RATE(x) ((x) << 25)
+#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
+#define TACH_PWM_RESP_RATE_SHIFT 25
+#define CG_TACH_CTRL 0xC0300070
+# define EDGE_PER_REV(x) ((x) << 0)
+# define EDGE_PER_REV_MASK (0x7 << 0)
+# define EDGE_PER_REV_SHIFT 0
+# define TARGET_PERIOD(x) ((x) << 3)
+# define TARGET_PERIOD_MASK 0xfffffff8
+# define TARGET_PERIOD_SHIFT 3
+#define CG_TACH_STATUS 0xC0300074
+# define TACH_PERIOD(x) ((x) << 0)
+# define TACH_PERIOD_MASK 0xffffffff
+# define TACH_PERIOD_SHIFT 0
+
#define CG_ECLK_CNTL 0xC05000AC
# define ECLK_DIVIDER_MASK 0x7f
# define ECLK_DIR_CNTL_EN (1 << 8)
@@ -1137,6 +1175,9 @@
#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
#define DEFAULT_MTYPE(x) ((x) << 4)
#define APE1_MTYPE(x) ((x) << 7)
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+#define MTYPE_CACHED 0
+#define MTYPE_NONCACHED 3
#define SX_DEBUG_1 0x9060
@@ -1447,6 +1488,16 @@
#define CP_HQD_ACTIVE 0xC91C
#define CP_HQD_VMID 0xC920
+#define CP_HQD_PERSISTENT_STATE 0xC924u
+#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8)
+
+#define CP_HQD_PIPE_PRIORITY 0xC928u
+#define CP_HQD_QUEUE_PRIORITY 0xC92Cu
+#define CP_HQD_QUANTUM 0xC930u
+#define QUANTUM_EN 1U
+#define QUANTUM_SCALE_1MS (1U << 4)
+#define QUANTUM_DURATION(x) ((x) << 8)
+
#define CP_HQD_PQ_BASE 0xC934
#define CP_HQD_PQ_BASE_HI 0xC938
#define CP_HQD_PQ_RPTR 0xC93C
@@ -1474,12 +1525,32 @@
#define PRIV_STATE (1 << 30)
#define KMD_QUEUE (1 << 31)
-#define CP_HQD_DEQUEUE_REQUEST 0xC974
+#define CP_HQD_IB_BASE_ADDR 0xC95Cu
+#define CP_HQD_IB_BASE_ADDR_HI 0xC960u
+#define CP_HQD_IB_RPTR 0xC964u
+#define CP_HQD_IB_CONTROL 0xC968u
+#define IB_ATC_EN (1U << 23)
+#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
+
+#define CP_HQD_DEQUEUE_REQUEST 0xC974
+#define DEQUEUE_REQUEST_DRAIN 1
+#define DEQUEUE_REQUEST_RESET 2
#define CP_MQD_CONTROL 0xC99C
#define MQD_VMID(x) ((x) << 0)
#define MQD_VMID_MASK (0xf << 0)
+#define CP_HQD_SEMA_CMD 0xC97Cu
+#define CP_HQD_MSG_TYPE 0xC980u
+#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u
+#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u
+#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu
+#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u
+#define CP_HQD_HQ_SCHEDULER0 0xC994u
+#define CP_HQD_HQ_SCHEDULER1 0xC998u
+
+#define SH_STATIC_MEM_CONFIG 0x9604u
+
#define DB_RENDER_CONTROL 0x28000
#define PA_SC_RASTER_CONFIG 0x28350
@@ -2069,4 +2140,20 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
+#define ATC_VMID0_PASID_MAPPING 0x339Cu
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u
+#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
+
+#define ATC_VM_APERTURE0_CNTL 0x3310u
+#define ATS_ACCESS_MODE_NEVER 0
+#define ATS_ACCESS_MODE_ALWAYS 1
+
+#define ATC_VM_APERTURE0_CNTL2 0x3318u
+#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u
+#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u
+#define ATC_VM_APERTURE1_CNTL 0x3314u
+#define ATC_VM_APERTURE1_CNTL2 0x331Cu
+#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
+#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
+
#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 5c8b358f9fba..924b1b7ab455 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -35,7 +35,7 @@
#define MIN(a,b) (((a)<(b))?(a):(b))
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
+ struct radeon_bo_list **cs_reloc);
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
@@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 last_reg;
u32 m, i, tmp, *ib;
int r;
@@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
static int evergreen_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct evergreen_cs_track *track;
volatile u32 *ib;
unsigned idx;
@@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = NULL;
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
**/
int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+ struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr;
u32 idx;
@@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 66bcfadeedd1..96535aa8659c 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -110,31 +110,27 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
@@ -153,12 +149,12 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 4a85bb644e24..b928c17bdeed 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -347,7 +347,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
@@ -356,7 +356,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_first_entry - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
@@ -406,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
@@ -417,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
@@ -428,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
* @pos: the type * to use as a start point
* @head: the head of the list
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
*/
@@ -439,7 +439,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
@@ -453,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_continue_reverse - iterate backwards from the given point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Start to iterate over list of given type backwards, continuing after
* the current position.
@@ -467,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_from - iterate over list of given type from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing from current position.
*/
@@ -480,7 +480,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
@@ -493,7 +493,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing after current point,
* safe against removal of list entry.
@@ -509,7 +509,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type from current point, safe against
* removal of list entry.
@@ -524,7 +524,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate backwards over list of given type, safe against removal
* of list entry.
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 3faee58946dd..360de9f1f491 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1373,6 +1373,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
@@ -1395,15 +1396,14 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(ring, ib->length_dw |
- (ib->vm ? (ib->vm->id << 24) : 0));
+ radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
+ radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
}
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -2502,15 +2502,11 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
* Update the page table base and flush the VM TLB
* using the CP (cayman-si).
*/
-void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
- radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
+ radeon_ring_write(ring, pd_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
@@ -2518,7 +2514,7 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index f26f0a9fb522..50f88611ff60 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -123,6 +123,7 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 4;
@@ -140,7 +141,7 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
*/
while ((ring->wptr & 7) != 5)
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
@@ -446,16 +447,12 @@ void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
+ radeon_ring_write(ring, pd_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
@@ -465,6 +462,6 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
}
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 5670b8291285..7e5724a12f8b 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -56,6 +56,14 @@
#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+ FAN_CONTROL_FUZZY,
+ FAN_CONTROL_TABLE
+};
+
#define PPSMC_Result_OK ((uint8_t)0x01)
#define PPSMC_Result_Failed ((uint8_t)0xFF)
@@ -79,6 +87,8 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DisableCac ((uint8_t)0x54)
#define PPSMC_TDPClampingActive ((uint8_t)0x59)
#define PPSMC_TDPClampingInactive ((uint8_t)0x5A)
+#define PPSMC_StartFanControl ((uint8_t)0x5B)
+#define PPSMC_StopFanControl ((uint8_t)0x5C)
#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
@@ -106,6 +116,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
@@ -149,6 +160,10 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
+#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
+
+#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
@@ -157,10 +172,11 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
-#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
+#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a)
#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
+#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 2d532996c697..4c2eec49dadc 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -96,6 +96,14 @@ typedef struct _ATOM_PPLIB_FANTABLE2
USHORT usTMax; // The max temperature
} ATOM_PPLIB_FANTABLE2;
+typedef struct _ATOM_PPLIB_FANTABLE3
+{
+ ATOM_PPLIB_FANTABLE2 basicTable2;
+ UCHAR ucFanControlMode;
+ USHORT usFanPWMMax;
+ USHORT usFanOutputSensitivity;
+} ATOM_PPLIB_FANTABLE3;
+
typedef struct _ATOM_PPLIB_EXTENDEDHEADER
{
USHORT usSize;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b53b31a7b76f..74f06d540591 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
int r;
u32 tile_flags = 0;
u32 tmp;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 value;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
int idx)
{
unsigned c, i;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
@@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
@@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
static int r100_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
unsigned idx;
volatile uint32_t *ib;
@@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p)
}
if (r)
return r;
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 732d4938aab7..c70e6d5bcd19 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1bc4704034ce..064ad5569cca 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp, tile_flags = 0;
@@ -1142,7 +1142,7 @@ fail:
static int r300_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
unsigned idx;
@@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
if (r) {
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56b02927cd3d..ef5d6066fa5b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2889,31 +2889,27 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, tmp;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -2942,12 +2938,12 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index c47537a1ddba..acc1f99c84d9 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 m, i, tmp, *ib;
int r;
@@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
@@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = NULL;
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
return 0;
}
- p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* Copy the packet into the IB, the parser will read from the
* input memory (cached) and write to the IB (which can be
* uncached). */
- ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+ ib_chunk = parser.chunk_ib;
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
@@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void)
* GPU offset using the provided start.
**/
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
+ struct radeon_bo_list **cs_reloc)
{
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
*cs_reloc = NULL;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, p->nrelocs);
return -EINVAL;
}
- *cs_reloc = p->relocs_ptr[idx];
+ *cs_reloc = &p->relocs[idx];
p->dma_reloc_idx++;
return 0;
}
@@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
**/
int r600_dma_cs_parse(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- struct radeon_cs_reloc *src_reloc, *dst_reloc;
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+ struct radeon_bo_list *src_reloc, *dst_reloc;
u32 header, cmd, count, tiled;
volatile u32 *ib = p->ib.ptr;
u32 idx, idx_value;
@@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index cf0df45d455e..d2dd29ab24fa 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -441,31 +441,27 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
@@ -484,12 +480,12 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b5c73df8e202..843b65f46ece 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -811,6 +811,7 @@ union power_info {
union fan_info {
struct _ATOM_PPLIB_FANTABLE fan;
struct _ATOM_PPLIB_FANTABLE2 fan2;
+ struct _ATOM_PPLIB_FANTABLE3 fan3;
};
static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
@@ -900,6 +901,14 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
else
rdev->pm.dpm.fan.t_max = 10900;
rdev->pm.dpm.fan.cycle_delay = 100000;
+ if (fan_info->fan.ucFanTableFormat >= 3) {
+ rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
+ rdev->pm.dpm.fan.default_max_fan_pwm =
+ le16_to_cpu(fan_info->fan3.usFanPWMMax);
+ rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
+ rdev->pm.dpm.fan.fan_output_sensitivity =
+ le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
+ }
rdev->pm.dpm.fan.ucode_fan_control = true;
}
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 46b9d2a03018..bd499d749bc9 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -96,6 +96,9 @@
#define R600_TEMP_RANGE_MIN (90 * 1000)
#define R600_TEMP_RANGE_MAX (120 * 1000)
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
enum r600_power_level {
R600_POWER_LEVEL_LOW = 0,
R600_POWER_LEVEL_MEDIUM = 1,
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a9717b3fbf1b..54529b837afa 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -150,9 +150,6 @@ extern int radeon_backlight;
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
-/* number of hw syncs before falling back on blocking */
-#define RADEON_NUM_SYNCS 4
-
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -363,14 +360,15 @@ struct radeon_fence_driver {
};
struct radeon_fence {
- struct fence base;
+ struct fence base;
- struct radeon_device *rdev;
- uint64_t seq;
+ struct radeon_device *rdev;
+ uint64_t seq;
/* RB, DMA, etc. */
- unsigned ring;
+ unsigned ring;
+ bool is_vm_update;
- wait_queue_t fence_wake;
+ wait_queue_t fence_wake;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -452,12 +450,22 @@ struct radeon_mman {
#endif
};
+struct radeon_bo_list {
+ struct radeon_bo *robj;
+ struct ttm_validate_buffer tv;
+ uint64_t gpu_offset;
+ unsigned prefered_domains;
+ unsigned allowed_domains;
+ uint32_t tiling_flags;
+};
+
/* bo virtual address in a specific vm */
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
uint32_t flags;
uint64_t addr;
+ struct radeon_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex */
@@ -474,7 +482,7 @@ struct radeon_bo {
struct list_head list;
/* Protected by tbo.reserved */
u32 initial_domain;
- struct ttm_place placements[3];
+ struct ttm_place placements[4];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -576,10 +584,9 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
* Semaphores.
*/
struct radeon_semaphore {
- struct radeon_sa_bo *sa_bo;
- signed waiters;
- uint64_t gpu_addr;
- struct radeon_fence *sync_to[RADEON_NUM_RINGS];
+ struct radeon_sa_bo *sa_bo;
+ signed waiters;
+ uint64_t gpu_addr;
};
int radeon_semaphore_create(struct radeon_device *rdev,
@@ -588,20 +595,33 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
-void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
- struct radeon_fence *fence);
-int radeon_semaphore_sync_resv(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
- struct reservation_object *resv,
- bool shared);
-int radeon_semaphore_sync_rings(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
- int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
/*
+ * Synchronization
+ */
+struct radeon_sync {
+ struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
+ struct radeon_fence *last_vm_update;
+};
+
+void radeon_sync_create(struct radeon_sync *sync);
+void radeon_sync_fence(struct radeon_sync *sync,
+ struct radeon_fence *fence);
+int radeon_sync_resv(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ struct reservation_object *resv,
+ bool shared);
+int radeon_sync_rings(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ int waiting_ring);
+void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
+ struct radeon_fence *fence);
+
+/*
* GART structures, functions & helpers
*/
struct radeon_mc;
@@ -701,6 +721,10 @@ struct radeon_doorbell {
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
+void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset);
/*
* IRQS.
@@ -814,7 +838,7 @@ struct radeon_ib {
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
- struct radeon_semaphore *semaphore;
+ struct radeon_sync sync;
};
struct radeon_ring {
@@ -891,33 +915,40 @@ struct radeon_vm_pt {
uint64_t addr;
};
+struct radeon_vm_id {
+ unsigned id;
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct radeon_fence *flushed_updates;
+ /* last use of vmid */
+ struct radeon_fence *last_id_use;
+};
+
struct radeon_vm {
- struct rb_root va;
- unsigned id;
+ struct mutex mutex;
+
+ struct rb_root va;
+
+ /* protecting invalidated and freed */
+ spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
+ struct list_head invalidated;
/* BOs freed, but not yet updated in the PT */
- struct list_head freed;
+ struct list_head freed;
/* contains the page directory */
- struct radeon_bo *page_directory;
- uint64_t pd_gpu_addr;
- unsigned max_pde_used;
+ struct radeon_bo *page_directory;
+ unsigned max_pde_used;
/* array of page tables, one for each page directory entry */
- struct radeon_vm_pt *page_tables;
+ struct radeon_vm_pt *page_tables;
- struct radeon_bo_va *ib_bo_va;
+ struct radeon_bo_va *ib_bo_va;
- struct mutex mutex;
- /* last fence for cs using this vm */
- struct radeon_fence *fence;
- /* last flush or NULL if we still need to flush */
- struct radeon_fence *last_flush;
- /* last use of vmid */
- struct radeon_fence *last_id_use;
+ /* for id and flush management per ring */
+ struct radeon_vm_id ids[RADEON_NUM_RINGS];
};
struct radeon_vm_manager {
@@ -1025,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev);
/*
* CS.
*/
-struct radeon_cs_reloc {
- struct drm_gem_object *gobj;
- struct radeon_bo *robj;
- struct ttm_validate_buffer tv;
- uint64_t gpu_offset;
- unsigned prefered_domains;
- unsigned allowed_domains;
- uint32_t tiling_flags;
- uint32_t handle;
-};
-
struct radeon_cs_chunk {
- uint32_t chunk_id;
uint32_t length_dw;
uint32_t *kdata;
void __user *user_ptr;
@@ -1055,16 +1074,15 @@ struct radeon_cs_parser {
unsigned idx;
/* relocations */
unsigned nrelocs;
- struct radeon_cs_reloc *relocs;
- struct radeon_cs_reloc **relocs_ptr;
- struct radeon_cs_reloc *vm_bos;
+ struct radeon_bo_list *relocs;
+ struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
- int chunk_ib_idx;
- int chunk_relocs_idx;
- int chunk_flags_idx;
- int chunk_const_ib_idx;
+ struct radeon_cs_chunk *chunk_ib;
+ struct radeon_cs_chunk *chunk_relocs;
+ struct radeon_cs_chunk *chunk_flags;
+ struct radeon_cs_chunk *chunk_const_ib;
struct radeon_ib ib;
struct radeon_ib const_ib;
void *track;
@@ -1078,7 +1096,7 @@ struct radeon_cs_parser {
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_chunk *ibc = p->chunk_ib;
if (ibc->kdata)
return ibc->kdata[idx];
@@ -1490,6 +1508,10 @@ struct radeon_dpm_fan {
u8 t_hyst;
u32 cycle_delay;
u16 t_max;
+ u8 control_mode;
+ u16 default_max_fan_pwm;
+ u16 default_fan_output_sensitivity;
+ u16 fan_output_sensitivity;
bool ucode_fan_control;
};
@@ -1623,6 +1645,11 @@ struct radeon_pm {
/* internal thermal controller on rv6xx+ */
enum radeon_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
+ /* fan control parameters */
+ bool no_fan;
+ u8 fan_pulses_per_revolution;
+ u8 fan_min_rpm;
+ u8 fan_max_rpm;
/* dpm */
bool dpm_enabled;
struct radeon_dpm dpm;
@@ -1785,7 +1812,8 @@ struct radeon_asic_ring {
void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
- void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+ void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
/* testing functions */
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -2388,6 +2416,8 @@ struct radeon_device {
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
+ /* GRBM index mutex. Protects concurrents access to GRBM index */
+ struct mutex grbm_idx_mutex;
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
@@ -2400,6 +2430,10 @@ struct radeon_device {
u64 vram_pin_size;
u64 gart_pin_size;
+ /* amdkfd interface */
+ struct kfd_dev *kfd;
+ struct radeon_sa_manager kfd_bo;
+
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
};
@@ -2831,7 +2865,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
-#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
+#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
@@ -2940,14 +2974,14 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
- int ring);
+ int ring, struct radeon_fence *fence);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
@@ -3054,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt);
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc,
+ struct radeon_bo_list **cs_reloc,
int nomm);
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index d8ace5b28a5b..2a45d548d5ec 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -599,7 +599,8 @@ int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
-void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -624,7 +625,8 @@ void cayman_dma_vm_set_pages(struct radeon_device *rdev,
uint32_t incr, uint32_t flags);
void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
@@ -699,7 +701,8 @@ int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
@@ -721,7 +724,8 @@ void si_dma_vm_set_pages(struct radeon_device *rdev,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
@@ -793,7 +797,8 @@ int cik_irq_set(struct radeon_device *rdev);
int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
-void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
@@ -811,7 +816,8 @@ void cik_sdma_vm_set_pages(struct radeon_device *rdev,
uint32_t incr, uint32_t flags);
void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index df69b92ba164..dbc94f300297 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -196,8 +196,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
}
}
-static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
- u8 id)
+struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
+ u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
struct radeon_gpio_rec gpio;
@@ -221,6 +221,7 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
+ gpio.shift = pin->ucGpioPinBitShift;
gpio.mask = (1 << pin->ucGpioPinBitShift);
gpio.valid = true;
break;
@@ -801,7 +802,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
hpd_record =
(ATOM_HPD_INT_RECORD *)
record;
- gpio = radeon_lookup_gpio(rdev,
+ gpio = radeon_atombios_lookup_gpio(rdev,
hpd_record->ucHPDIntGPIOID);
hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
hpd.plugged_state = hpd_record->ucPlugged_PinState;
@@ -2128,7 +2129,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
+ radeon_atombios_lookup_gpio(rdev,
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2164,7 +2165,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
+ radeon_atombios_lookup_gpio(rdev,
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2200,7 +2201,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
+ radeon_atombios_lookup_gpio(rdev,
power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2248,6 +2249,14 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
/* add the i2c bus for thermal/fan chip */
if (controller->ucType > 0) {
+ if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
+ rdev->pm.no_fan = true;
+ rdev->pm.fan_pulses_per_revolution =
+ controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+ if (rdev->pm.fan_pulses_per_revolution) {
+ rdev->pm.fan_min_rpm = controller->ucFanMinRPM;
+ rdev->pm.fan_max_rpm = controller->ucFanMaxRPM;
+ }
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6f377de099f9..c830863bc98a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
struct radeon_cs_buckets buckets;
- unsigned i, j;
- bool duplicate, need_mmap_lock = false;
+ unsigned i;
+ bool need_mmap_lock = false;
int r;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
return 0;
}
- chunk = &p->chunks[p->chunk_relocs_idx];
+ chunk = p->chunk_relocs;
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
- p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
- if (p->relocs_ptr == NULL) {
- return -ENOMEM;
- }
- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
for (i = 0; i < p->nrelocs; i++) {
struct drm_radeon_cs_reloc *r;
+ struct drm_gem_object *gobj;
unsigned priority;
- duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- for (j = 0; j < i; j++) {
- if (r->handle == p->relocs[j].handle) {
- p->relocs_ptr[i] = &p->relocs[j];
- duplicate = true;
- break;
- }
- }
- if (duplicate) {
- p->relocs[i].handle = 0;
- continue;
- }
-
- p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
- r->handle);
- if (p->relocs[i].gobj == NULL) {
+ gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
+ if (gobj == NULL) {
DRM_ERROR("gem object lookup failed 0x%x\n",
r->handle);
return -ENOENT;
}
- p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+ p->relocs[i].robj = gem_to_radeon_bo(gobj);
/* The userspace buffer priorities are from 0 to 15. A higher
* number means the buffer is more important.
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
- p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
@@ -251,15 +232,15 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
int r;
list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv;
resv = reloc->robj->tbo.resv;
- r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
- reloc->tv.shared);
+ r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
+ reloc->tv.shared);
if (r)
return r;
}
@@ -282,13 +263,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
INIT_LIST_HEAD(&p->validated);
p->idx = 0;
p->ib.sa_bo = NULL;
- p->ib.semaphore = NULL;
p->const_ib.sa_bo = NULL;
- p->const_ib.semaphore = NULL;
- p->chunk_ib_idx = -1;
- p->chunk_relocs_idx = -1;
- p->chunk_flags_idx = -1;
- p->chunk_const_ib_idx = -1;
+ p->chunk_ib = NULL;
+ p->chunk_relocs = NULL;
+ p->chunk_flags = NULL;
+ p->chunk_const_ib = NULL;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
@@ -315,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].chunk_id = user_chunk.chunk_id;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
- p->chunk_relocs_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
+ p->chunk_relocs = &p->chunks[i];
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
- p->chunk_ib_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
+ p->chunk_ib = &p->chunks[i];
/* zero length IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
- p->chunk_const_ib_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+ p->chunk_const_ib = &p->chunks[i];
/* zero length CONST IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->chunk_flags_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->chunk_flags = &p->chunks[i];
/* zero length flags aren't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
@@ -341,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
p->chunks[i].user_ptr = cdata;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
continue;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
continue;
}
@@ -357,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
return -EFAULT;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
p->cs_flags = p->chunks[i].kdata[0];
if (p->chunks[i].length_dw > 1)
ring = p->chunks[i].kdata[1];
@@ -398,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct list_head *b)
{
- struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
- struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
+ struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
+ struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
@@ -440,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
- if (parser->relocs[i].gobj)
- drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+ struct radeon_bo *bo = parser->relocs[i].robj;
+ if (bo == NULL)
+ continue;
+
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
}
}
kfree(parser->track);
kfree(parser->relocs);
- kfree(parser->relocs_ptr);
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
@@ -461,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
{
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM)
@@ -521,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
for (i = 0; i < p->nrelocs; i++) {
struct radeon_bo *bo;
- /* ignore duplicates */
- if (p->relocs_ptr[i] != &p->relocs[i])
- continue;
-
bo = p->relocs[i].robj;
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
@@ -535,6 +511,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
+
+ radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
}
return radeon_vm_clear_invalids(rdev, vm);
@@ -547,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
struct radeon_vm *vm = &fpriv->vm;
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
@@ -579,10 +557,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to sync rings: %i\n", r);
goto out;
}
- radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib_idx != -1)) {
+ (parser->chunk_const_ib != NULL)) {
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
@@ -609,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
struct radeon_vm *vm = NULL;
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM) {
@@ -617,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
vm = &fpriv->vm;
if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib_idx != -1)) {
- ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+ (parser->chunk_const_ib != NULL)) {
+ ib_chunk = parser->chunk_const_ib;
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
@@ -637,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
return -EFAULT;
}
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ ib_chunk = parser->chunk_ib;
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
}
}
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ ib_chunk = parser->chunk_ib;
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
vm, ib_chunk->length_dw * 4);
@@ -735,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_device *rdev = p->rdev;
uint32_t header;
@@ -829,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
* GPU offset using the provided start.
**/
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc,
+ struct radeon_bo_list **cs_reloc,
int nomm)
{
struct radeon_cs_chunk *relocs_chunk;
@@ -837,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
unsigned idx;
int r;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
*cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
if (r)
return r;
@@ -868,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
(u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
} else
- *cs_reloc = p->relocs_ptr[(idx / 4)];
+ *cs_reloc = &p->relocs[(idx / 4)];
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 9630e8d95fb4..45e54060ee97 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -117,106 +117,7 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
}
}
-static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
- uint64_t gpu_addr)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
-
- if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(gpu_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->family >= CHIP_RV770) {
- if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
- }
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- gpu_addr & 0xffffffff);
- } else {
- radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
- /* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
- }
-}
-
-int radeon_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct radeon_bo *robj;
- uint64_t gpu_addr;
- int ret;
-
- if (!handle) {
- /* turn off cursor */
- radeon_hide_cursor(crtc);
- obj = NULL;
- goto unpin;
- }
-
- if ((width > radeon_crtc->max_cursor_width) ||
- (height > radeon_crtc->max_cursor_height)) {
- DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
- return -ENOENT;
- }
-
- robj = gem_to_radeon_bo(obj);
- ret = radeon_bo_reserve(robj, false);
- if (unlikely(ret != 0))
- goto fail;
- /* Only 27 bit offset for legacy cursor */
- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- &gpu_addr);
- radeon_bo_unreserve(robj);
- if (ret)
- goto fail;
-
- radeon_crtc->cursor_width = width;
- radeon_crtc->cursor_height = height;
-
- radeon_lock_cursor(crtc, true);
- radeon_set_cursor(crtc, obj, gpu_addr);
- radeon_show_cursor(crtc);
- radeon_lock_cursor(crtc, false);
-
-unpin:
- if (radeon_crtc->cursor_bo) {
- robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
- ret = radeon_bo_reserve(robj, false);
- if (likely(ret == 0)) {
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
- }
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
- }
-
- radeon_crtc->cursor_bo = obj;
- return 0;
-fail:
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
-int radeon_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
+static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
@@ -281,7 +182,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
}
}
- radeon_lock_cursor(crtc, true);
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
@@ -308,7 +208,173 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256)));
}
+
+ radeon_crtc->cursor_x = x;
+ radeon_crtc->cursor_y = y;
+
+ return 0;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ int ret;
+
+ radeon_lock_cursor(crtc, true);
+ ret = radeon_cursor_move_locked(crtc, x, y);
radeon_lock_cursor(crtc, false);
+ return ret;
+}
+
+static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
+ uint64_t gpu_addr;
+ int ret;
+
+ ret = radeon_bo_reserve(robj, false);
+ if (unlikely(ret != 0))
+ goto fail;
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &gpu_addr);
+ radeon_bo_unreserve(robj);
+ if (ret)
+ goto fail;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(gpu_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+ }
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
+ } else {
+ radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
+ }
+
return 0;
+
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height,
+ int32_t hot_x,
+ int32_t hot_y)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!handle) {
+ /* turn off cursor */
+ radeon_hide_cursor(crtc);
+ obj = NULL;
+ goto unpin;
+ }
+
+ if ((width > radeon_crtc->max_cursor_width) ||
+ (height > radeon_crtc->max_cursor_height)) {
+ DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+ return -ENOENT;
+ }
+
+ radeon_crtc->cursor_width = width;
+ radeon_crtc->cursor_height = height;
+
+ radeon_lock_cursor(crtc, true);
+
+ if (hot_x != radeon_crtc->cursor_hot_x ||
+ hot_y != radeon_crtc->cursor_hot_y) {
+ int x, y;
+
+ x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
+ y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+
+ radeon_cursor_move_locked(crtc, x, y);
+
+ radeon_crtc->cursor_hot_x = hot_x;
+ radeon_crtc->cursor_hot_y = hot_y;
+ }
+
+ ret = radeon_set_cursor(crtc, obj);
+
+ if (ret)
+ DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
+ ret);
+ else
+ radeon_show_cursor(crtc);
+
+ radeon_lock_cursor(crtc, false);
+
+unpin:
+ if (radeon_crtc->cursor_bo) {
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ ret = radeon_bo_reserve(robj, false);
+ if (likely(ret == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+ if (radeon_crtc->cursor_bo != obj)
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ }
+
+ radeon_crtc->cursor_bo = obj;
+ return 0;
+}
+
+/**
+ * radeon_cursor_reset - Re-set the current cursor, if any.
+ *
+ * @crtc: drm crtc
+ *
+ * If the CRTC passed in currently has a cursor assigned, this function
+ * makes sure it's visible.
+ */
+void radeon_cursor_reset(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ int ret;
+
+ if (radeon_crtc->cursor_bo) {
+ radeon_lock_cursor(crtc, true);
+
+ radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
+ radeon_crtc->cursor_y);
+
+ ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
+ if (ret)
+ DRM_ERROR("radeon_set_cursor returned %d, not showing "
+ "cursor\n", ret);
+ else
+ radeon_show_cursor(crtc);
+
+ radeon_lock_cursor(crtc, false);
+ }
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 995a8b1770dd..0ec65168f331 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -377,6 +377,37 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
__clear_bit(doorbell, rdev->doorbell.used);
}
+/**
+ * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
+ * setup KFD
+ *
+ * @rdev: radeon_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for radeon.
+ *
+ * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
+ * takes doorbells required for its own rings and reports the setup to KFD.
+ * Radeon reserved doorbells are at the start of the doorbell aperture.
+ */
+void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset)
+{
+ /* The first num_doorbells are used by radeon.
+ * KFD takes whatever's left in the aperture. */
+ if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
+ *aperture_base = rdev->doorbell.base;
+ *aperture_size = rdev->doorbell.size;
+ *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
+ } else {
+ *aperture_base = 0;
+ *aperture_size = 0;
+ *start_offset = 0;
+ }
+}
+
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
@@ -1273,6 +1304,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
+ mutex_init(&rdev->grbm_idx_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 00ead8c2758a..102116902a07 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_edid.h>
#include <linux/gcd.h>
@@ -634,7 +635,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
return ret;
}
static const struct drm_crtc_funcs radeon_crtc_funcs = {
- .cursor_set = radeon_crtc_cursor_set,
+ .cursor_set2 = radeon_crtc_cursor_set2,
.cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
.set_config = radeon_crtc_set_config,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dcffa30ee2db..4f50fb0e3d93 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -41,6 +41,8 @@
#include <drm/drm_gem.h>
#include "drm_crtc_helper.h"
+#include "radeon_kfd.h"
+
/*
* KMS wrapper.
* - 2.0.0 - initial interface
@@ -654,12 +656,15 @@ static int __init radeon_init(void)
#endif
}
+ radeon_kfd_init();
+
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
static void __exit radeon_exit(void)
{
+ radeon_kfd_fini();
drm_pci_exit(driver, pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0ea1db83d573..29b9220ec399 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,10 +48,40 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
+/**
+ * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
+ *
+ * @info: fbdev info
+ *
+ * This function hides the cursor on all CRTCs used by fbdev.
+ */
+static int radeon_fb_helper_set_par(struct fb_info *info)
+{
+ int ret;
+
+ ret = drm_fb_helper_set_par(info);
+
+ /* XXX: with universal plane support fbdev will automatically disable
+ * all non-primary planes (including the cursor)
+ */
+ if (ret == 0) {
+ struct drm_fb_helper *fb_helper = info->par;
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+ }
+ }
+
+ return ret;
+}
+
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ .fb_set_par = radeon_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 995167025282..d13d1b5a859f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,6 +140,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->rdev = rdev;
(*fence)->seq = seq;
(*fence)->ring = ring;
+ (*fence)->is_vm_update = false;
fence_init(&(*fence)->base, &radeon_fence_ops,
&rdev->fence_queue.lock, rdev->fence_context + ring, seq);
radeon_fence_ring_emit(rdev, ring, *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c194497aa586..fe48f229043e 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,9 +394,10 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-int radeon_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
+static int radeon_mode_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, bool dumb,
+ uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -405,6 +406,14 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
if (gobj == NULL) {
return -ENOENT;
}
+
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
+ "Illegal dumb map of GPU buffer.\n");
+
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
@@ -415,12 +424,20 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
return 0;
}
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
+{
+ return radeon_mode_mmap(filp, dev, handle, true, offset_p);
+}
+
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_mmap *args = data;
- return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+ return radeon_mode_mmap(filp, dev, args->handle, false,
+ &args->addr_ptr);
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -518,6 +535,68 @@ out:
return r;
}
+/**
+ * radeon_gem_va_update_vm -update the bo_va in its VM
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to update
+ *
+ * Update the bo_va directly after setting it's address. Errors are not
+ * vital here, so they are not reported back to userspace.
+ */
+static void radeon_gem_va_update_vm(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
+{
+ struct ttm_validate_buffer tv, *entry;
+ struct radeon_bo_list *vm_bos;
+ struct ww_acquire_ctx ticket;
+ struct list_head list;
+ unsigned domain;
+ int r;
+
+ INIT_LIST_HEAD(&list);
+
+ tv.bo = &bo_va->bo->tbo;
+ tv.shared = true;
+ list_add(&tv.head, &list);
+
+ vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
+ if (!vm_bos)
+ return;
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ if (r)
+ goto error_free;
+
+ list_for_each_entry(entry, &list, head) {
+ domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+ if (domain == RADEON_GEM_DOMAIN_CPU)
+ goto error_unreserve;
+ }
+
+ mutex_lock(&bo_va->vm->mutex);
+ r = radeon_vm_clear_freed(rdev, bo_va->vm);
+ if (r)
+ goto error_unlock;
+
+ if (bo_va->it.start)
+ r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
+
+error_unlock:
+ mutex_unlock(&bo_va->vm->mutex);
+
+error_unreserve:
+ ttm_eu_backoff_reservation(&ticket, &list);
+
+error_free:
+ drm_free_large(vm_bos);
+
+ if (r)
+ DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+}
+
int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -601,6 +680,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
if (bo_va->it.start) {
args->operation = RADEON_VA_RESULT_VA_EXIST;
args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
+ radeon_bo_unreserve(rbo);
goto out;
}
r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -611,12 +691,13 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
+ if (!r)
+ radeon_gem_va_update_vm(rdev, bo_va);
args->operation = RADEON_VA_RESULT_OK;
if (r) {
args->operation = RADEON_VA_RESULT_ERROR;
}
out:
- radeon_bo_unreserve(rbo);
drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -682,6 +763,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
+ gobj->dumb = true;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 3f39fcca4d07..c39ce1f05703 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -64,10 +64,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
return r;
}
- r = radeon_semaphore_create(rdev, &ib->semaphore);
- if (r) {
- return r;
- }
+ radeon_sync_create(&ib->sync);
ib->ring = ring;
ib->fence = NULL;
@@ -96,7 +93,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
*/
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+ radeon_sync_free(rdev, &ib->sync, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
@@ -145,11 +142,11 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (ib->vm) {
struct radeon_fence *vm_id_fence;
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
- radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence);
+ radeon_sync_fence(&ib->sync, vm_id_fence);
}
/* sync with other rings */
- r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
if (r) {
dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
radeon_ring_unlock_undo(rdev, ring);
@@ -157,11 +154,12 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
}
if (ib->vm)
- radeon_vm_flush(rdev, ib->vm, ib->ring);
+ radeon_vm_flush(rdev, ib->vm, ib->ring,
+ ib->sync.last_vm_update);
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
- radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ radeon_sync_free(rdev, &const_ib->sync, NULL);
}
radeon_ring_ib_execute(rdev, ib->ring, ib);
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
new file mode 100644
index 000000000000..065d02068ec3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "cikd.h"
+#include "cik_reg.h"
+#include "radeon_kfd.h"
+
+#define CIK_PIPE_PER_MEC (4)
+
+struct kgd_mem {
+ struct radeon_sa_bo *sa_bo;
+ uint64_t gpu_addr;
+ void *ptr;
+};
+
+static int init_sa_manager(struct kgd_dev *kgd, unsigned int size);
+static void fini_sa_manager(struct kgd_dev *kgd);
+
+static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
+ enum kgd_memory_pool pool, struct kgd_mem **mem);
+
+static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem);
+
+static uint64_t get_vmem_size(struct kgd_dev *kgd);
+static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid);
+
+static int kgd_init_memory(struct kgd_dev *kgd);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr);
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr);
+
+static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+ .init_sa_manager = init_sa_manager,
+ .fini_sa_manager = fini_sa_manager,
+ .allocate_mem = allocate_mem,
+ .free_mem = free_mem,
+ .get_vmem_size = get_vmem_size,
+ .get_gpu_clock_counter = get_gpu_clock_counter,
+ .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_memory = kgd_init_memory,
+ .init_pipeline = kgd_init_pipeline,
+ .hqd_load = kgd_hqd_load,
+ .hqd_is_occupies = kgd_hqd_is_occupies,
+ .hqd_destroy = kgd_hqd_destroy,
+};
+
+static const struct kgd2kfd_calls *kgd2kfd;
+
+bool radeon_kfd_init(void)
+{
+ bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
+ const struct kgd2kfd_calls**);
+
+ kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+ if (kgd2kfd_init_p == NULL)
+ return false;
+
+ if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ symbol_put(kgd2kfd_init);
+ kgd2kfd = NULL;
+
+ return false;
+ }
+
+ return true;
+}
+
+void radeon_kfd_fini(void)
+{
+ if (kgd2kfd) {
+ kgd2kfd->exit();
+ symbol_put(kgd2kfd_init);
+ }
+}
+
+void radeon_kfd_device_probe(struct radeon_device *rdev)
+{
+ if (kgd2kfd)
+ rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, rdev->pdev);
+}
+
+void radeon_kfd_device_init(struct radeon_device *rdev)
+{
+ if (rdev->kfd) {
+ struct kgd2kfd_shared_resources gpu_resources = {
+ .compute_vmid_bitmap = 0xFF00,
+
+ .first_compute_pipe = 1,
+ .compute_pipe_count = 8 - 1,
+ };
+
+ radeon_doorbell_get_kfd_info(rdev,
+ &gpu_resources.doorbell_physical_address,
+ &gpu_resources.doorbell_aperture_size,
+ &gpu_resources.doorbell_start_offset);
+
+ kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+ }
+}
+
+void radeon_kfd_device_fini(struct radeon_device *rdev)
+{
+ if (rdev->kfd) {
+ kgd2kfd->device_exit(rdev->kfd);
+ rdev->kfd = NULL;
+ }
+}
+
+void radeon_kfd_interrupt(struct radeon_device *rdev, const void *ih_ring_entry)
+{
+ if (rdev->kfd)
+ kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void radeon_kfd_suspend(struct radeon_device *rdev)
+{
+ if (rdev->kfd)
+ kgd2kfd->suspend(rdev->kfd);
+}
+
+int radeon_kfd_resume(struct radeon_device *rdev)
+{
+ int r = 0;
+
+ if (rdev->kfd)
+ r = kgd2kfd->resume(rdev->kfd);
+
+ return r;
+}
+
+static u32 pool_to_domain(enum kgd_memory_pool p)
+{
+ switch (p) {
+ case KGD_POOL_FRAMEBUFFER: return RADEON_GEM_DOMAIN_VRAM;
+ default: return RADEON_GEM_DOMAIN_GTT;
+ }
+}
+
+static int init_sa_manager(struct kgd_dev *kgd, unsigned int size)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+ int r;
+
+ BUG_ON(kgd == NULL);
+
+ r = radeon_sa_bo_manager_init(rdev, &rdev->kfd_bo,
+ size,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_GTT_WC);
+
+ if (r)
+ return r;
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->kfd_bo);
+ if (r)
+ radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
+
+ return r;
+}
+
+static void fini_sa_manager(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ BUG_ON(kgd == NULL);
+
+ radeon_sa_bo_manager_suspend(rdev, &rdev->kfd_bo);
+ radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
+}
+
+static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
+ enum kgd_memory_pool pool, struct kgd_mem **mem)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+ u32 domain;
+ int r;
+
+ BUG_ON(kgd == NULL);
+
+ domain = pool_to_domain(pool);
+ if (domain != RADEON_GEM_DOMAIN_GTT) {
+ dev_err(rdev->dev,
+ "Only allowed to allocate gart memory for kfd\n");
+ return -EINVAL;
+ }
+
+ *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if ((*mem) == NULL)
+ return -ENOMEM;
+
+ r = radeon_sa_bo_new(rdev, &rdev->kfd_bo, &(*mem)->sa_bo, size,
+ alignment);
+ if (r) {
+ dev_err(rdev->dev, "failed to get memory for kfd (%d)\n", r);
+ return r;
+ }
+
+ (*mem)->ptr = radeon_sa_bo_cpu_addr((*mem)->sa_bo);
+ (*mem)->gpu_addr = radeon_sa_bo_gpu_addr((*mem)->sa_bo);
+
+ return 0;
+}
+
+static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ BUG_ON(kgd == NULL);
+
+ radeon_sa_bo_free(rdev, &mem->sa_bo, NULL);
+ kfree(mem);
+}
+
+static uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ BUG_ON(kgd == NULL);
+
+ return rdev->mc.real_vram_size;
+}
+
+static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ return rdev->asic->get_gpu_clock_counter(rdev);
+}
+
+static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = (struct radeon_device *)kgd;
+
+ /* The sclk is in quantas of 10kHz */
+ return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
+
+static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd)
+{
+ return (struct radeon_device *)kgd;
+}
+
+static void write_register(struct kgd_dev *kgd, uint32_t offset, uint32_t value)
+{
+ struct radeon_device *rdev = get_radeon_device(kgd);
+
+ writel(value, (void __iomem *)(rdev->rmmio + offset));
+}
+
+static uint32_t read_register(struct kgd_dev *kgd, uint32_t offset)
+{
+ struct radeon_device *rdev = get_radeon_device(kgd);
+
+ return readl((void __iomem *)(rdev->rmmio + offset));
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ struct radeon_device *rdev = get_radeon_device(kgd);
+ uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+ mutex_lock(&rdev->srbm_mutex);
+ write_register(kgd, SRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+ struct radeon_device *rdev = get_radeon_device(kgd);
+
+ write_register(kgd, SRBM_GFX_CNTL, 0);
+ mutex_unlock(&rdev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+ unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases)
+{
+ lock_srbm(kgd, 0, 0, 0, vmid);
+
+ write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
+ write_register(kgd, SH_MEM_APE1_BASE, sh_mem_ape1_base);
+ write_register(kgd, SH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+ write_register(kgd, SH_MEM_BASES, sh_mem_bases);
+
+ unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+ unsigned int vmid)
+{
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0
+ * because a mapping is in progress or because a mapping finished and
+ * the SW cleared it.
+ * So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 :
+ (uint32_t)pasid | ATC_VMID_PASID_MAPPING_VALID;
+
+ write_register(kgd, ATC_VMID0_PASID_MAPPING + vmid*sizeof(uint32_t),
+ pasid_mapping);
+
+ while (!(read_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS) &
+ (1U << vmid)))
+ cpu_relax();
+ write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+ return 0;
+}
+
+static int kgd_init_memory(struct kgd_dev *kgd)
+{
+ /*
+ * Configure apertures:
+ * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
+ * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
+ * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
+ */
+ int i;
+ uint32_t sh_mem_bases = PRIVATE_BASE(0x6000) | SHARED_BASE(0x6000);
+
+ for (i = 8; i < 16; i++) {
+ uint32_t sh_mem_config;
+
+ lock_srbm(kgd, 0, 0, 0, i);
+
+ sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
+
+ write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
+
+ write_register(kgd, SH_MEM_BASES, sh_mem_bases);
+
+ /* Scratch aperture is not supported for now. */
+ write_register(kgd, SH_STATIC_MEM_CONFIG, 0);
+
+ /* APE1 disabled for now. */
+ write_register(kgd, SH_MEM_APE1_BASE, 1);
+ write_register(kgd, SH_MEM_APE1_LIMIT, 0);
+
+ unlock_srbm(kgd);
+ }
+
+ return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+ uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+ uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+ lock_srbm(kgd, mec, pipe, 0, 0);
+ write_register(kgd, CP_HPD_EOP_BASE_ADDR,
+ lower_32_bits(hpd_gpu_addr >> 8));
+ write_register(kgd, CP_HPD_EOP_BASE_ADDR_HI,
+ upper_32_bits(hpd_gpu_addr >> 8));
+ write_register(kgd, CP_HPD_EOP_VMID, 0);
+ write_register(kgd, CP_HPD_EOP_CONTROL, hpd_size);
+ unlock_srbm(kgd);
+
+ return 0;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+ return (struct cik_mqd *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr)
+{
+ uint32_t wptr_shadow, is_wptr_shadow_valid;
+ struct cik_mqd *m;
+
+ m = get_mqd(mqd);
+
+ is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ write_register(kgd, CP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+ write_register(kgd, CP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+ write_register(kgd, CP_MQD_CONTROL, m->cp_mqd_control);
+
+ write_register(kgd, CP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+ write_register(kgd, CP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+ write_register(kgd, CP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+ write_register(kgd, CP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+ write_register(kgd, CP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+ write_register(kgd, CP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+ write_register(kgd, CP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+ write_register(kgd, CP_HQD_PERSISTENT_STATE,
+ m->cp_hqd_persistent_state);
+ write_register(kgd, CP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+ write_register(kgd, CP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+ write_register(kgd, CP_HQD_ATOMIC0_PREOP_LO,
+ m->cp_hqd_atomic0_preop_lo);
+
+ write_register(kgd, CP_HQD_ATOMIC0_PREOP_HI,
+ m->cp_hqd_atomic0_preop_hi);
+
+ write_register(kgd, CP_HQD_ATOMIC1_PREOP_LO,
+ m->cp_hqd_atomic1_preop_lo);
+
+ write_register(kgd, CP_HQD_ATOMIC1_PREOP_HI,
+ m->cp_hqd_atomic1_preop_hi);
+
+ write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR,
+ m->cp_hqd_pq_rptr_report_addr_lo);
+
+ write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ m->cp_hqd_pq_rptr_report_addr_hi);
+
+ write_register(kgd, CP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+ write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR,
+ m->cp_hqd_pq_wptr_poll_addr_lo);
+
+ write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ m->cp_hqd_pq_wptr_poll_addr_hi);
+
+ write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL,
+ m->cp_hqd_pq_doorbell_control);
+
+ write_register(kgd, CP_HQD_VMID, m->cp_hqd_vmid);
+
+ write_register(kgd, CP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+ write_register(kgd, CP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+ write_register(kgd, CP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+ write_register(kgd, CP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+ if (is_wptr_shadow_valid)
+ write_register(kgd, CP_HQD_PQ_WPTR, wptr_shadow);
+
+ write_register(kgd, CP_HQD_ACTIVE, m->cp_hqd_active);
+ release_queue(kgd);
+
+ return 0;
+}
+
+static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ act = read_register(kgd, CP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == read_register(kgd, CP_HQD_PQ_BASE) &&
+ high == read_register(kgd, CP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ release_queue(kgd);
+ return retval;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t temp;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+ write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+ write_register(kgd, CP_HQD_DEQUEUE_REQUEST, reset_type);
+
+ while (true) {
+ temp = read_register(kgd, CP_HQD_ACTIVE);
+ if (temp & 0x1)
+ break;
+ if (timeout == 0) {
+ pr_err("kfd: cp queue preemption time out (%dms)\n",
+ temp);
+ return -ETIME;
+ }
+ msleep(20);
+ timeout -= 20;
+ }
+
+ release_queue(kgd);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/radeon/radeon_kfd.h
new file mode 100644
index 000000000000..f90e161ca507
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kfd.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * radeon_kfd.h defines the private interface between the
+ * AMD kernel graphics drivers and the AMD KFD.
+ */
+
+#ifndef RADEON_KFD_H_INCLUDED
+#define RADEON_KFD_H_INCLUDED
+
+#include <linux/types.h>
+#include "../amd/include/kgd_kfd_interface.h"
+
+struct radeon_device;
+
+bool radeon_kfd_init(void);
+void radeon_kfd_fini(void);
+
+void radeon_kfd_suspend(struct radeon_device *rdev);
+int radeon_kfd_resume(struct radeon_device *rdev);
+void radeon_kfd_interrupt(struct radeon_device *rdev,
+ const void *ih_ring_entry);
+void radeon_kfd_device_probe(struct radeon_device *rdev);
+void radeon_kfd_device_init(struct radeon_device *rdev);
+void radeon_kfd_device_fini(struct radeon_device *rdev);
+
+#endif /* RADEON_KFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 03586763ee86..3cf9c1fa6475 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -34,6 +34,8 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include "radeon_kfd.h"
+
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_has_atpx(void);
#else
@@ -63,6 +65,8 @@ int radeon_driver_unload_kms(struct drm_device *dev)
pm_runtime_get_sync(dev->dev);
+ radeon_kfd_device_fini(rdev);
+
radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
@@ -142,6 +146,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
+ radeon_kfd_device_probe(rdev);
+ radeon_kfd_device_init(rdev);
+
if (radeon_is_px(dev)) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -621,8 +628,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
-
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
radeon_vm_fini(rdev, vm);
kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cafb1ccf2ec3..678b4386540d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
}
}
+ radeon_cursor_reset(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 04db2fdd8692..390db897f322 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -321,6 +321,10 @@ struct radeon_crtc {
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
+ int cursor_x;
+ int cursor_y;
+ int cursor_hot_x;
+ int cursor_hot_y;
int cursor_width;
int cursor_height;
int max_cursor_width;
@@ -462,6 +466,7 @@ struct radeon_gpio_rec {
u8 id;
u32 reg;
u32 mask;
+ u32 shift;
};
struct radeon_hpd {
@@ -748,6 +753,8 @@ extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock);
+extern struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
+ u8 id);
extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint64_t freq,
@@ -802,13 +809,16 @@ extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic);
-extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height);
+extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height,
+ int32_t hot_x,
+ int32_t hot_y);
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
+extern void radeon_cursor_reset(struct drm_crtc *crtc);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
unsigned int flags,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4c0d786d5c7a..7d68223eb469 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -99,22 +99,39 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
- if (domain & RADEON_GEM_DOMAIN_VRAM)
+ if (domain & RADEON_GEM_DOMAIN_VRAM) {
+ /* Try placing BOs which don't need CPU access outside of the
+ * CPU accessible part of VRAM
+ */
+ if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
+ rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
+ rbo->placements[c].fpfn =
+ rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+ }
+
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
+ }
if (domain & RADEON_GEM_DOMAIN_GTT) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
(rbo->rdev->flags & RADEON_IS_AGP)) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
} else {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_TT;
}
@@ -122,30 +139,35 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
if (domain & RADEON_GEM_DOMAIN_CPU) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
} else {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_SYSTEM;
}
}
- if (!c)
+ if (!c) {
+ rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
TTM_PL_FLAG_SYSTEM;
+ }
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
- rbo->placements[i].fpfn = 0;
if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
- (rbo->placements[i].flags & TTM_PL_FLAG_VRAM))
+ (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+ !rbo->placements[i].fpfn)
rbo->placements[i].lpfn =
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
else
@@ -157,9 +179,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
* improve fragmentation quality.
* 512kb was measured as the most optimal number.
*/
- if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
- (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) &&
- rbo->tbo.mem.size > 512 * 1024) {
+ if (rbo->tbo.mem.size > 512 * 1024) {
for (i = 0; i < c; i++) {
rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
}
@@ -489,25 +509,29 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
struct ww_acquire_ctx *ticket,
struct list_head *head, int ring)
{
- struct radeon_cs_reloc *lobj;
- struct radeon_bo *bo;
+ struct radeon_bo_list *lobj;
+ struct list_head duplicates;
int r;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
- r = ttm_eu_reserve_buffers(ticket, head, true);
+ INIT_LIST_HEAD(&duplicates);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
list_for_each_entry(lobj, head, tv.head) {
- bo = lobj->robj;
+ struct radeon_bo *bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+ WARN_ONCE(bo->gem_base.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*
@@ -546,6 +570,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
}
+
+ list_for_each_entry(lobj, &duplicates, tv.head) {
+ lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
+ lobj->tiling_flags = lobj->robj->tiling_flags;
+ }
+
return 0;
}
@@ -750,8 +780,8 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct radeon_device *rdev;
struct radeon_bo *rbo;
- unsigned long offset, size;
- int r;
+ unsigned long offset, size, lpfn;
+ int i, r;
if (!radeon_ttm_bo_is_radeon_bo(bo))
return 0;
@@ -768,7 +798,13 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ for (i = 0; i < rbo->placement.num_placement; i++) {
+ /* Force into visible VRAM */
+ if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+ (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
+ rbo->placements[i].lpfn = lpfn;
+ }
r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -799,3 +835,22 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
ttm_bo_unreserve(&bo->tbo);
return r;
}
+
+/**
+ * radeon_bo_fence - add fence to buffer object
+ *
+ * @bo: buffer object in question
+ * @fence: fence to add
+ * @shared: true if fence should be added shared
+ *
+ */
+void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+ bool shared)
+{
+ struct reservation_object *resv = bo->tbo.resv;
+
+ if (shared)
+ reservation_object_add_shared_fence(resv, &fence->base);
+ else
+ reservation_object_add_excl_fence(resv, &fence->base);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 1b8ec7917154..3b0b377f76cb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -155,6 +155,8 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+ bool shared);
/*
* sub allocation
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 6deb08f045b7..e6ad54cdfa62 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,15 +34,14 @@
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- uint64_t *cpu_addr;
- int i, r;
+ int r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
- 8 * RADEON_NUM_SYNCS, 8);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ &(*semaphore)->sa_bo, 8, 8);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
@@ -51,12 +50,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
- cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
- for (i = 0; i < RADEON_NUM_SYNCS; ++i)
- cpu_addr[i] = 0;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- (*semaphore)->sync_to[i] = NULL;
+ *((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
@@ -95,146 +89,6 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
return false;
}
-/**
- * radeon_semaphore_sync_fence - use the semaphore to sync to a fence
- *
- * @semaphore: semaphore object to add fence to
- * @fence: fence to sync to
- *
- * Sync to the fence using this semaphore object
- */
-void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
- struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = semaphore->sync_to[fence->ring];
- semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
-/**
- * radeon_semaphore_sync_to - use the semaphore to sync to a reservation object
- *
- * @sema: semaphore object to add fence from reservation object to
- * @resv: reservation object with embedded fence
- * @shared: true if we should onyl sync to the exclusive fence
- *
- * Sync to the fence using this semaphore object
- */
-int radeon_semaphore_sync_resv(struct radeon_device *rdev,
- struct radeon_semaphore *sema,
- struct reservation_object *resv,
- bool shared)
-{
- struct reservation_object_list *flist;
- struct fence *f;
- struct radeon_fence *fence;
- unsigned i;
- int r = 0;
-
- /* always sync to the exclusive fence */
- f = reservation_object_get_excl(resv);
- fence = f ? to_radeon_fence(f) : NULL;
- if (fence && fence->rdev == rdev)
- radeon_semaphore_sync_fence(sema, fence);
- else if (f)
- r = fence_wait(f, true);
-
- flist = reservation_object_get_list(resv);
- if (shared || !flist || r)
- return r;
-
- for (i = 0; i < flist->shared_count; ++i) {
- f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(resv));
- fence = to_radeon_fence(f);
- if (fence && fence->rdev == rdev)
- radeon_semaphore_sync_fence(sema, fence);
- else
- r = fence_wait(f, true);
-
- if (r)
- break;
- }
- return r;
-}
-
-/**
- * radeon_semaphore_sync_rings - sync ring to all registered fences
- *
- * @rdev: radeon_device pointer
- * @semaphore: semaphore object to use for sync
- * @ring: ring that needs sync
- *
- * Ensure that all registered fences are signaled before letting
- * the ring continue. The caller must hold the ring lock.
- */
-int radeon_semaphore_sync_rings(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
- int ring)
-{
- unsigned count = 0;
- int i, r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_fence *fence = semaphore->sync_to[i];
-
- /* check if we really need to sync */
- if (!radeon_fence_need_sync(fence, ring))
- continue;
-
- /* prevent GPU deadlocks */
- if (!rdev->ring[i].ready) {
- dev_err(rdev->dev, "Syncing to a disabled ring!");
- return -EINVAL;
- }
-
- if (++count > RADEON_NUM_SYNCS) {
- /* not enough room, wait manually */
- r = radeon_fence_wait(fence, false);
- if (r)
- return r;
- continue;
- }
-
- /* allocate enough space for sync command */
- r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
- if (r) {
- return r;
- }
-
- /* emit the signal semaphore */
- if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
- /* signaling wasn't successful wait manually */
- radeon_ring_undo(&rdev->ring[i]);
- r = radeon_fence_wait(fence, false);
- if (r)
- return r;
- continue;
- }
-
- /* we assume caller has already allocated space on waiters ring */
- if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
- /* waiting wasn't successful wait manually */
- radeon_ring_undo(&rdev->ring[i]);
- r = radeon_fence_wait(fence, false);
- if (r)
- return r;
- continue;
- }
-
- radeon_ring_commit(rdev, &rdev->ring[i], false);
- radeon_fence_note_sync(fence, ring);
-
- semaphore->gpu_addr += 8;
- }
-
- return 0;
-}
-
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence)
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
new file mode 100644
index 000000000000..02ac8a1de4ff
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/**
+ * radeon_sync_create - zero init sync object
+ *
+ * @sync: sync object to initialize
+ *
+ * Just clear the sync object for now.
+ */
+void radeon_sync_create(struct radeon_sync *sync)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+ sync->semaphores[i] = NULL;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ sync->sync_to[i] = NULL;
+
+ sync->last_vm_update = NULL;
+}
+
+/**
+ * radeon_sync_fence - use the semaphore to sync to a fence
+ *
+ * @sync: sync object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using the semaphore objects
+ */
+void radeon_sync_fence(struct radeon_sync *sync,
+ struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = sync->sync_to[fence->ring];
+ sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
+
+ if (fence->is_vm_update) {
+ other = sync->last_vm_update;
+ sync->last_vm_update = radeon_fence_later(fence, other);
+ }
+}
+
+/**
+ * radeon_sync_resv - use the semaphores to sync to a reservation object
+ *
+ * @sync: sync object to add fences from reservation object to
+ * @resv: reservation object with embedded fence
+ * @shared: true if we should only sync to the exclusive fence
+ *
+ * Sync to the fence using the semaphore objects
+ */
+int radeon_sync_resv(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ struct reservation_object *resv,
+ bool shared)
+{
+ struct reservation_object_list *flist;
+ struct fence *f;
+ struct radeon_fence *fence;
+ unsigned i;
+ int r = 0;
+
+ /* always sync to the exclusive fence */
+ f = reservation_object_get_excl(resv);
+ fence = f ? to_radeon_fence(f) : NULL;
+ if (fence && fence->rdev == rdev)
+ radeon_sync_fence(sync, fence);
+ else if (f)
+ r = fence_wait(f, true);
+
+ flist = reservation_object_get_list(resv);
+ if (shared || !flist || r)
+ return r;
+
+ for (i = 0; i < flist->shared_count; ++i) {
+ f = rcu_dereference_protected(flist->shared[i],
+ reservation_object_held(resv));
+ fence = to_radeon_fence(f);
+ if (fence && fence->rdev == rdev)
+ radeon_sync_fence(sync, fence);
+ else
+ r = fence_wait(f, true);
+
+ if (r)
+ break;
+ }
+ return r;
+}
+
+/**
+ * radeon_sync_rings - sync ring to all registered fences
+ *
+ * @rdev: radeon_device pointer
+ * @sync: sync object to use
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
+int radeon_sync_rings(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ int ring)
+{
+ unsigned count = 0;
+ int i, r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = sync->sync_to[i];
+ struct radeon_semaphore *semaphore;
+
+ /* check if we really need to sync */
+ if (!radeon_fence_need_sync(fence, ring))
+ continue;
+
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Syncing to a disabled ring!");
+ return -EINVAL;
+ }
+
+ if (count >= RADEON_NUM_SYNCS) {
+ /* not enough room, wait manually */
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
+ continue;
+ }
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r)
+ return r;
+
+ sync->semaphores[count++] = semaphore;
+
+ /* allocate enough space for sync command */
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
+ if (r)
+ return r;
+
+ /* emit the signal semaphore */
+ if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
+ /* signaling wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
+ continue;
+ }
+
+ /* we assume caller has already allocated space on waiters ring */
+ if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
+ /* waiting wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
+ continue;
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[i], false);
+ radeon_fence_note_sync(fence, ring);
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_sync_free - free the sync object
+ *
+ * @rdev: radeon_device pointer
+ * @sync: sync object to use
+ * @fence: fence to use for the free
+ *
+ * Free the sync object by freeing all semaphores in it.
+ */
+void radeon_sync_free(struct radeon_device *rdev,
+ struct radeon_sync *sync,
+ struct radeon_fence *fence)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+ radeon_semaphore_free(rdev, &sync->semaphores[i], fence);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9db74a96ef61..ce075cb08cb2 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs,
TP_fast_assign(
__entry->ring = p->ring;
- __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+ __entry->dw = p->chunk_ib->length_dw;
__entry->fences = radeon_fence_count_emitted(
p->rdev, p->ring);
),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8624979afb65..d02aa1d0f588 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -196,9 +196,32 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+ if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
- else
+ else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
+ bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+ unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ int i;
+
+ /* Try evicting to the CPU inaccessible part of VRAM
+ * first, but only set GTT as busy placement, so this
+ * BO will be evicted to GTT rather than causing other
+ * BOs to be evicted from VRAM
+ */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
+ RADEON_GEM_DOMAIN_GTT);
+ rbo->placement.num_busy_placement = 0;
+ for (i = 0; i < rbo->placement.num_placement; i++) {
+ if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
+ if (rbo->placements[0].fpfn < fpfn)
+ rbo->placements[0].fpfn = fpfn;
+ } else {
+ rbo->placement.busy_placement =
+ &rbo->placements[i];
+ rbo->placement.num_busy_placement = 1;
+ }
+ }
+ } else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 11b662469253..c10b2aec6450 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
unsigned buf_sizes[], bool *has_msg_cmd)
{
struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
unsigned idx, cmd, offset;
uint64_t start, end;
int r;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
offset = radeon_get_ib_value(p, data0);
idx = radeon_get_ib_value(p, data1);
if (idx >= relocs_chunk->length_dw) {
@@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- reloc = p->relocs_ptr[(idx / 4)];
+ reloc = &p->relocs[(idx / 4)];
start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
@@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
[0x00000003] = 2048,
};
- if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+ if (p->chunk_ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
- p->chunks[p->chunk_ib_idx].length_dw);
+ p->chunk_ib->length_dw);
return -EINVAL;
}
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
@@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
if (!has_msg_cmd) {
DRM_ERROR("UVD-IBs need a msg command!\n");
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 9e85757d5599..976fe432f4e2 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
unsigned size)
{
struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
uint64_t start, end, offset;
unsigned idx;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
offset = radeon_get_ib_value(p, lo);
idx = radeon_get_ib_value(p, hi);
@@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
return -EINVAL;
}
- reloc = p->relocs_ptr[(idx / 4)];
+ reloc = &p->relocs[(idx / 4)];
start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
@@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
uint32_t *size = &tmp;
int i, r;
- while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
+ while (p->idx < p->chunk_ib->length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx);
uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index dfde266529e2..cde48c42b30a 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
* Add the page directory to the list of BOs to
* validate for command submission (cayman+).
*/
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head)
{
- struct radeon_cs_reloc *list;
+ struct radeon_bo_list *list;
unsigned i, idx;
list = drm_malloc_ab(vm->max_pde_used + 2,
- sizeof(struct radeon_cs_reloc));
+ sizeof(struct radeon_bo_list));
if (!list)
return NULL;
/* add the vm page table to the list */
- list[0].gobj = NULL;
list[0].robj = vm->page_directory;
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.shared = false;
+ list[0].tv.shared = true;
list[0].tiling_flags = 0;
- list[0].handle = 0;
list_add(&list[0].tv.head, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
continue;
- list[idx].gobj = NULL;
list[idx].robj = vm->page_tables[i].bo;
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = false;
+ list[idx].tv.shared = true;
list[idx].tiling_flags = 0;
- list[idx].handle = 0;
list_add(&list[idx++].tv.head, head);
}
@@ -182,15 +178,18 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring)
{
struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+ struct radeon_vm_id *vm_id = &vm->ids[ring];
+
unsigned choices[2] = {};
unsigned i;
/* check if the id is still valid */
- if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
+ if (vm_id->id && vm_id->last_id_use &&
+ vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
return NULL;
/* we definately need to flush */
- radeon_fence_unref(&vm->last_flush);
+ vm_id->pd_gpu_addr = ~0ll;
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < rdev->vm_manager.nvm; ++i) {
@@ -198,8 +197,8 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
if (fence == NULL) {
/* found a free one */
- vm->id = i;
- trace_radeon_vm_grab_id(vm->id, ring);
+ vm_id->id = i;
+ trace_radeon_vm_grab_id(i, ring);
return NULL;
}
@@ -211,8 +210,8 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
for (i = 0; i < 2; ++i) {
if (choices[i]) {
- vm->id = choices[i];
- trace_radeon_vm_grab_id(vm->id, ring);
+ vm_id->id = choices[i];
+ trace_radeon_vm_grab_id(choices[i], ring);
return rdev->vm_manager.active[choices[i]];
}
}
@@ -228,6 +227,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
* @rdev: radeon_device pointer
* @vm: vm we want to flush
* @ring: ring to use for flush
+ * @updates: last vm update that is waited for
*
* Flush the vm (cayman+).
*
@@ -235,15 +235,21 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
*/
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
- int ring)
+ int ring, struct radeon_fence *updates)
{
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+ struct radeon_vm_id *vm_id = &vm->ids[ring];
+
+ if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
+ radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
+
+ trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
+ radeon_fence_unref(&vm_id->flushed_updates);
+ vm_id->flushed_updates = radeon_fence_ref(updates);
+ vm_id->pd_gpu_addr = pd_addr;
+ radeon_ring_vm_flush(rdev, &rdev->ring[ring],
+ vm_id->id, vm_id->pd_gpu_addr);
- /* if we can't remember our last VM flush then flush now! */
- if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
- trace_radeon_vm_flush(pd_addr, ring, vm->id);
- vm->pd_gpu_addr = pd_addr;
- radeon_ring_vm_flush(rdev, ring, vm);
}
}
@@ -263,18 +269,13 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence)
{
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(fence);
-
- radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
- rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+ unsigned vm_id = vm->ids[fence->ring].id;
- radeon_fence_unref(&vm->last_id_use);
- vm->last_id_use = radeon_fence_ref(fence);
+ radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
+ rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
- /* we just flushed the VM, remember that */
- if (!vm->last_flush)
- vm->last_flush = radeon_fence_ref(fence);
+ radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
+ vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
}
/**
@@ -387,35 +388,25 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
static int radeon_vm_clear_bo(struct radeon_device *rdev,
struct radeon_bo *bo)
{
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head head;
struct radeon_ib ib;
unsigned entries;
uint64_t addr;
int r;
- memset(&tv, 0, sizeof(tv));
- tv.bo = &bo->tbo;
- tv.shared = false;
-
- INIT_LIST_HEAD(&head);
- list_add(&tv.head, &head);
-
- r = ttm_eu_reserve_buffers(&ticket, &head, true);
- if (r)
+ r = radeon_bo_reserve(bo, false);
+ if (r)
return r;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto error;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r)
+ goto error_unreserve;
addr = radeon_bo_gpu_offset(bo);
entries = radeon_bo_size(bo) / 8;
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
if (r)
- goto error;
+ goto error_unreserve;
ib.length_dw = 0;
@@ -425,15 +416,16 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r)
- goto error;
+ goto error_free;
- ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
- radeon_ib_free(rdev, &ib);
+ ib.fence->is_vm_update = true;
+ radeon_bo_fence(bo, ib.fence, false);
- return 0;
+error_free:
+ radeon_ib_free(rdev, &ib);
-error:
- ttm_eu_backoff_reservation(&ticket, &head);
+error_unreserve:
+ radeon_bo_unreserve(bo);
return r;
}
@@ -449,7 +441,7 @@ error:
* Validate and set the offset requested within the vm address space.
* Returns 0 for success, error for failure.
*
- * Object has to be reserved!
+ * Object has to be reserved and gets unreserved by this function!
*/
int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
@@ -495,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
tmp->vm = vm;
tmp->addr = bo_va->addr;
tmp->bo = radeon_bo_ref(bo_va->bo);
+ spin_lock(&vm->status_lock);
list_add(&tmp->vm_status, &vm->freed);
+ spin_unlock(&vm->status_lock);
}
interval_tree_remove(&bo_va->it, &vm->va);
@@ -575,7 +569,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_unlock(&vm->mutex);
- return radeon_bo_reserve(bo_va->bo, false);
+ return 0;
}
/**
@@ -699,17 +693,15 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false);
- radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
+ radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
- radeon_fence_unref(&vm->last_flush);
+ ib.fence->is_vm_update = true;
+ radeon_bo_fence(pd, ib.fence, false);
}
radeon_ib_free(rdev, &ib);
@@ -808,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
*
* Global and local mutex must be locked!
*/
-static void radeon_vm_update_ptes(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_ib *ib,
- uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+static int radeon_vm_update_ptes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_ib *ib,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
{
uint64_t mask = RADEON_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
@@ -825,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
+ int r;
- radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false);
+ radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
+ r = reservation_object_reserve_shared(pt->tbo.resv);
+ if (r)
+ return r;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -860,6 +856,33 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
last_pte + 8 * count,
last_dst, flags);
}
+
+ return 0;
+}
+
+/**
+ * radeon_vm_fence_pts - fence page tables after an update
+ *
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @fence: fence to use
+ *
+ * Fence the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_fence_pts(struct radeon_vm *vm,
+ uint64_t start, uint64_t end,
+ struct radeon_fence *fence)
+{
+ unsigned i;
+
+ start >>= radeon_vm_block_size;
+ end >>= radeon_vm_block_size;
+
+ for (i = start; i <= end; ++i)
+ radeon_bo_fence(vm->page_tables[i].bo, fence, true);
}
/**
@@ -892,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return -EINVAL;
}
+ spin_lock(&vm->status_lock);
list_del_init(&bo_va->vm_status);
+ spin_unlock(&vm->status_lock);
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
@@ -961,23 +986,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return r;
ib.length_dw = 0;
- radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
- bo_va->it.last + 1, addr,
- radeon_vm_page_flags(bo_va->flags));
+ if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
+ }
+
+ r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+ bo_va->it.last + 1, addr,
+ radeon_vm_page_flags(bo_va->flags));
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > ndw);
- radeon_semaphore_sync_fence(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
+ ib.fence->is_vm_update = true;
+ radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
+ radeon_fence_unref(&bo_va->last_pt_update);
+ bo_va->last_pt_update = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
- radeon_fence_unref(&vm->last_flush);
return 0;
}
@@ -996,16 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm)
{
- struct radeon_bo_va *bo_va, *tmp;
+ struct radeon_bo_va *bo_va;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->freed)) {
+ bo_va = list_first_entry(&vm->freed,
+ struct radeon_bo_va, vm_status);
+ spin_unlock(&vm->status_lock);
+
r = radeon_vm_bo_update(rdev, bo_va, NULL);
radeon_bo_unref(&bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
if (r)
return r;
+
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -1024,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
int radeon_vm_clear_invalids(struct radeon_device *rdev,
struct radeon_vm *vm)
{
- struct radeon_bo_va *bo_va, *tmp;
+ struct radeon_bo_va *bo_va;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct radeon_bo_va, vm_status);
+ spin_unlock(&vm->status_lock);
+
r = radeon_vm_bo_update(rdev, bo_va, NULL);
if (r)
return r;
+
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
+
return 0;
}
@@ -1054,14 +1108,17 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
mutex_lock(&vm->mutex);
interval_tree_remove(&bo_va->it, &vm->va);
+ spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status);
if (bo_va->addr) {
bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed);
} else {
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
+ spin_unlock(&vm->status_lock);
mutex_unlock(&vm->mutex);
}
@@ -1082,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
list_for_each_entry(bo_va, &bo->va, bo_list) {
if (bo_va->addr) {
- mutex_lock(&bo_va->vm->mutex);
+ spin_lock(&bo_va->vm->status_lock);
list_del(&bo_va->vm_status);
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- mutex_unlock(&bo_va->vm->mutex);
+ spin_unlock(&bo_va->vm->status_lock);
}
}
}
@@ -1103,15 +1160,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
RADEON_VM_PTE_COUNT * 8);
unsigned pd_size, pd_entries, pts_size;
- int r;
+ int i, r;
- vm->id = 0;
vm->ib_bo_va = NULL;
- vm->fence = NULL;
- vm->last_flush = NULL;
- vm->last_id_use = NULL;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ vm->ids[i].id = 0;
+ vm->ids[i].flushed_updates = NULL;
+ vm->ids[i].last_id_use = NULL;
+ }
mutex_init(&vm->mutex);
vm->va = RB_ROOT;
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
@@ -1165,11 +1224,13 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
if (!r) {
list_del_init(&bo_va->bo_list);
radeon_bo_unreserve(bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
}
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
radeon_bo_unref(&bo_va->bo);
+ radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1179,9 +1240,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_bo_unref(&vm->page_directory);
- radeon_fence_unref(&vm->fence);
- radeon_fence_unref(&vm->last_flush);
- radeon_fence_unref(&vm->last_id_use);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ radeon_fence_unref(&vm->ids[i].flushed_updates);
+ radeon_fence_unref(&vm->ids[i].last_id_use);
+ }
mutex_destroy(&vm->mutex);
}
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 7f34bad2e724..acff6e09cc40 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -44,31 +44,27 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
@@ -87,12 +83,12 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7d5083dc4acb..60df444bd075 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3365,6 +3365,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 header;
if (ib->is_const_ib) {
@@ -3400,14 +3401,13 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
- radeon_ring_write(ring, ib->length_dw |
- (ib->vm ? (ib->vm->id << 24) : 0));
+ radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
if (!ib->is_const_ib) {
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+ radeon_ring_write(ring, vm_id);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
@@ -5023,27 +5023,23 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
block, mc_id);
}
-void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- if (vm->id < 8) {
+ if (vm_id < 8) {
radeon_ring_write(ring,
- (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
} else {
radeon_ring_write(ring,
- (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, pd_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -5059,7 +5055,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index b58f12b762d7..f5cc777e1c5f 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -185,20 +185,17 @@ void si_dma_vm_set_pages(struct radeon_device *rdev,
}
}
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
+void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- if (vm->id < 8) {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ if (vm_id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
} else {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
}
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+ radeon_ring_write(ring, pd_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
@@ -208,7 +205,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
+ radeon_ring_write(ring, 1 << vm_id);
}
/**
@@ -229,31 +226,27 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct reservation_object *resv)
{
- struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
+ struct radeon_sync sync;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
int i, num_loops;
int r = 0;
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
- }
+ radeon_sync_create(&sync);
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(rdev, sem, resv, false);
- radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_sync_resv(rdev, &sync, resv, false);
+ radeon_sync_rings(rdev, &sync, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -272,12 +265,12 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_semaphore_free(rdev, &sem, NULL);
+ radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false);
- radeon_semaphore_free(rdev, &sem, fence);
+ radeon_sync_free(rdev, &sync, fence);
return fence;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 676e6c2ba90a..32e354b8b0ab 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3398,6 +3398,15 @@ static int si_process_firmware_header(struct radeon_device *rdev)
ret = si_read_smc_sram_dword(rdev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->fan_table_start = tmp;
+
+ ret = si_read_smc_sram_dword(rdev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
&tmp, si_pi->sram_end);
if (ret)
@@ -5817,8 +5826,33 @@ void si_dpm_setup_asic(struct radeon_device *rdev)
si_enable_acpi_power_management(rdev);
}
-static int si_set_thermal_temperature_range(struct radeon_device *rdev,
- int min_temp, int max_temp)
+static int si_thermal_enable_alert(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+ if (enable) {
+ PPSMC_Result result;
+
+ thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ WREG32(CG_THERMAL_INT, thermal_int);
+ rdev->irq.dpm_thermal = false;
+ result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ } else {
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ WREG32(CG_THERMAL_INT, thermal_int);
+ rdev->irq.dpm_thermal = true;
+ }
+
+ return 0;
+}
+
+static int si_thermal_set_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
@@ -5842,6 +5876,309 @@ static int si_set_thermal_temperature_range(struct radeon_device *rdev,
return 0;
}
+static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
+{
+ struct si_power_info *si_pi = si_get_pi(rdev);
+ u32 tmp;
+
+ if (si_pi->fan_ctrl_is_in_default_mode) {
+ tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ si_pi->fan_ctrl_default_mode = tmp;
+ tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ si_pi->t_min = tmp;
+ si_pi->fan_ctrl_is_in_default_mode = false;
+ }
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(0);
+ WREG32(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(mode);
+ WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct radeon_device *rdev)
+{
+ struct si_power_info *si_pi = si_get_pi(rdev);
+ PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+ u32 duty100;
+ u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ u16 fdo_min, slope1, slope2;
+ u32 reference_clock, tmp;
+ int ret;
+ u64 tmp64;
+
+ if (!si_pi->fan_table_start) {
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0) {
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (u16)tmp64;
+
+ t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
+ t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
+
+ pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
+ pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
+
+ slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.slope1 = cpu_to_be16(slope1);
+ fan_table.slope2 = cpu_to_be16(slope2);
+
+ fan_table.fdo_min = cpu_to_be16(fdo_min);
+
+ fan_table.hys_down = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
+
+ fan_table.hys_up = cpu_to_be16(1);
+
+ fan_table.hys_slope = cpu_to_be16(1);
+
+ fan_table.temp_resp_lim = cpu_to_be16(5);
+
+ reference_clock = radeon_get_xclk(rdev);
+
+ fan_table.refresh_period = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
+ reference_clock) / 1600);
+
+ fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+ tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ fan_table.temp_src = (uint8_t)tmp;
+
+ ret = si_copy_bytes_to_smc(rdev,
+ si_pi->fan_table_start,
+ (u8 *)(&fan_table),
+ sizeof(fan_table),
+ si_pi->sram_end);
+
+ if (ret) {
+ DRM_ERROR("Failed to load fan table to the SMC.");
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+ }
+
+ return 0;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
+{
+ PPSMC_Result ret;
+
+ ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl);
+ if (ret == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
+{
+ PPSMC_Result ret;
+
+ ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl);
+ if (ret == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
+ u32 *speed)
+{
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)duty * 100;
+ do_div(tmp64, duty100);
+ *speed = (u32)tmp64;
+
+ if (*speed > 100)
+ *speed = 100;
+
+ return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
+ u32 speed)
+{
+ u32 tmp;
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(rdev);
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (u32)tmp64;
+
+ tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+ tmp |= FDO_STATIC_DUTY(duty);
+ WREG32(CG_FDO_CTRL0, tmp);
+
+ si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+
+ return 0;
+}
+
+static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
+ u32 *speed)
+{
+ u32 tach_period;
+ u32 xclk = radeon_get_xclk(rdev);
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (rdev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ if (tach_period == 0)
+ return -ENOENT;
+
+ *speed = 60 * xclk * 10000 / tach_period;
+
+ return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
+ u32 speed)
+{
+ u32 tach_period, tmp;
+ u32 xclk = radeon_get_xclk(rdev);
+
+ if (rdev->pm.no_fan)
+ return -ENOENT;
+
+ if (rdev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ if ((speed < rdev->pm.fan_min_rpm) ||
+ (speed > rdev->pm.fan_max_rpm))
+ return -EINVAL;
+
+ if (rdev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(rdev);
+
+ tach_period = 60 * xclk * 10000 / (8 * speed);
+ tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+ tmp |= TARGET_PERIOD(tach_period);
+ WREG32(CG_TACH_CTRL, tmp);
+
+ si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
+
+ return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
+{
+ struct si_power_info *si_pi = si_get_pi(rdev);
+ u32 tmp;
+
+ if (!si_pi->fan_ctrl_is_in_default_mode) {
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+ WREG32(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(si_pi->t_min);
+ WREG32(CG_FDO_CTRL2, tmp);
+ si_pi->fan_ctrl_is_in_default_mode = true;
+ }
+}
+
+static void si_thermal_start_smc_fan_control(struct radeon_device *rdev)
+{
+ if (rdev->pm.dpm.fan.ucode_fan_control) {
+ si_fan_ctrl_start_smc_fan_control(rdev);
+ si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+ }
+}
+
+static void si_thermal_initialize(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (rdev->pm.fan_pulses_per_revolution) {
+ tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+ tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+ WREG32(CG_TACH_CTRL, tmp);
+ }
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+ tmp |= TACH_PWM_RESP_RATE(0x28);
+ WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct radeon_device *rdev)
+{
+ int ret;
+
+ si_thermal_initialize(rdev);
+ ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(rdev, true);
+ if (ret)
+ return ret;
+ if (rdev->pm.dpm.fan.ucode_fan_control) {
+ ret = si_halt_smc(rdev);
+ if (ret)
+ return ret;
+ ret = si_thermal_setup_fan_table(rdev);
+ if (ret)
+ return ret;
+ ret = si_resume_smc(rdev);
+ if (ret)
+ return ret;
+ si_thermal_start_smc_fan_control(rdev);
+ }
+
+ return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct radeon_device *rdev)
+{
+ if (!rdev->pm.no_fan) {
+ si_fan_ctrl_set_default_mode(rdev);
+ si_fan_ctrl_stop_smc_fan_control(rdev);
+ }
+}
+
int si_dpm_enable(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -5954,31 +6291,39 @@ int si_dpm_enable(struct radeon_device *rdev)
si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_thermal_start_thermal_controller(rdev);
+
ni_update_current_ps(rdev, boot_ps);
return 0;
}
-int si_dpm_late_enable(struct radeon_device *rdev)
+static int si_set_temperature_range(struct radeon_device *rdev)
{
int ret;
- if (rdev->irq.installed &&
- r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- PPSMC_Result result;
+ ret = si_thermal_enable_alert(rdev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(rdev, true);
+ if (ret)
+ return ret;
- ret = si_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
- result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+ return ret;
+}
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- }
+int si_dpm_late_enable(struct radeon_device *rdev)
+{
+ int ret;
- return 0;
+ ret = si_set_temperature_range(rdev);
+ if (ret)
+ return ret;
+
+ return ret;
}
void si_dpm_disable(struct radeon_device *rdev)
@@ -5988,6 +6333,7 @@ void si_dpm_disable(struct radeon_device *rdev)
if (!si_is_smc_running(rdev))
return;
+ si_thermal_stop_thermal_controller(rdev);
si_disable_ulv(rdev);
si_clear_vc(rdev);
if (pi->thermal_protection)
@@ -6526,6 +6872,9 @@ int si_dpm_init(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ si_pi->fan_ctrl_is_in_default_mode = true;
+ rdev->pm.dpm.fan.ucode_fan_control = false;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index 8b5c06a0832d..d16bb1b5f10f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -182,6 +182,7 @@ struct si_power_info {
u32 dte_table_start;
u32 spll_table_start;
u32 papm_cfg_table_start;
+ u32 fan_table_start;
/* CAC stuff */
const struct si_cac_config_reg *cac_weights;
const struct si_cac_config_reg *lcac_config;
@@ -197,6 +198,10 @@ struct si_power_info {
/* SVI2 */
u8 svd_gpio_id;
u8 svc_gpio_id;
+ /* fan control */
+ bool fan_ctrl_is_in_default_mode;
+ u32 t_min;
+ u32 fan_ctrl_default_mode;
};
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 73dbc79c959d..e5bb92f16775 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -135,7 +135,7 @@ void si_reset_smc(struct radeon_device *rdev)
int si_program_jump_on_start(struct radeon_device *rdev)
{
- static u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+ static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 6635da9ec986..4069be89e585 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -180,7 +180,10 @@
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
-
+#define CG_THERMAL_STATUS 0x704
+#define FDO_PWM_DUTY(x) ((x) << 9)
+#define FDO_PWM_DUTY_MASK (0xff << 9)
+#define FDO_PWM_DUTY_SHIFT 9
#define CG_THERMAL_INT 0x708
#define DIG_THERM_INTH(x) ((x) << 8)
#define DIG_THERM_INTH_MASK 0x0000FF00
@@ -191,6 +194,10 @@
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
+#define CG_MULT_THERMAL_CTRL 0x710
+#define TEMP_SEL(x) ((x) << 20)
+#define TEMP_SEL_MASK (0xff << 20)
+#define TEMP_SEL_SHIFT 20
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -199,6 +206,37 @@
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
+#define CG_FDO_CTRL0 0x754
+#define FDO_STATIC_DUTY(x) ((x) << 0)
+#define FDO_STATIC_DUTY_MASK 0x000000FF
+#define FDO_STATIC_DUTY_SHIFT 0
+#define CG_FDO_CTRL1 0x758
+#define FMAX_DUTY100(x) ((x) << 0)
+#define FMAX_DUTY100_MASK 0x000000FF
+#define FMAX_DUTY100_SHIFT 0
+#define CG_FDO_CTRL2 0x75C
+#define TMIN(x) ((x) << 0)
+#define TMIN_MASK 0x000000FF
+#define TMIN_SHIFT 0
+#define FDO_PWM_MODE(x) ((x) << 11)
+#define FDO_PWM_MODE_MASK (7 << 11)
+#define FDO_PWM_MODE_SHIFT 11
+#define TACH_PWM_RESP_RATE(x) ((x) << 25)
+#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
+#define TACH_PWM_RESP_RATE_SHIFT 25
+
+#define CG_TACH_CTRL 0x770
+# define EDGE_PER_REV(x) ((x) << 0)
+# define EDGE_PER_REV_MASK (0x7 << 0)
+# define EDGE_PER_REV_SHIFT 0
+# define TARGET_PERIOD(x) ((x) << 3)
+# define TARGET_PERIOD_MASK 0xfffffff8
+# define TARGET_PERIOD_SHIFT 3
+#define CG_TACH_STATUS 0x774
+# define TACH_PERIOD(x) ((x) << 0)
+# define TACH_PERIOD_MASK 0xffffffff
+# define TACH_PERIOD_SHIFT 0
+
#define GENERAL_PWRMGT 0x780
# define GLOBAL_PWRMGT_EN (1 << 0)
# define STATIC_PM_EN (1 << 1)
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 623a0b1e2d9d..3c779838d9ab 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -245,6 +245,31 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
+struct PP_SIslands_FanTable
+{
+ uint8_t fdo_mode;
+ uint8_t padding;
+ int16_t temp_min;
+ int16_t temp_med;
+ int16_t temp_max;
+ int16_t slope1;
+ int16_t slope2;
+ int16_t fdo_min;
+ int16_t hys_up;
+ int16_t hys_down;
+ int16_t hys_slope;
+ int16_t temp_resp_lim;
+ int16_t temp_curr;
+ int16_t slope_curr;
+ int16_t pwm_curr;
+ uint32_t refresh_period;
+ int16_t fdo_max;
+ uint8_t temp_src;
+ int8_t padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
index 82f70c90a9ee..0b0b404ff091 100644
--- a/drivers/gpu/drm/radeon/smu7_discrete.h
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -431,6 +431,31 @@ struct SMU7_Discrete_MCRegisters
typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
+struct SMU7_Discrete_FanTable
+{
+ uint16_t FdoMode;
+ int16_t TempMin;
+ int16_t TempMed;
+ int16_t TempMax;
+ int16_t Slope1;
+ int16_t Slope2;
+ int16_t FdoMin;
+ int16_t HystUp;
+ int16_t HystDown;
+ int16_t HystSlope;
+ int16_t TempRespLim;
+ int16_t TempCurr;
+ int16_t SlopeCurr;
+ int16_t PwmCurr;
+ uint32_t RefreshPeriod;
+ int16_t FdoMax;
+ uint8_t TempSrc;
+ int8_t Padding;
+};
+
+typedef struct SMU7_Discrete_FanTable SMU7_Discrete_FanTable;
+
+
struct SMU7_Discrete_PmFuses {
// dw0-dw1
uint8_t BapmVddCVidHiSidd[8];
@@ -462,7 +487,10 @@ struct SMU7_Discrete_PmFuses {
uint8_t BapmVddCVidHiSidd2[8];
// dw11-dw12
- uint32_t Reserved6[2];
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t CalcMeasPowerBlend;
// dw13-dw16
uint8_t GnbLPML[16];
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c96f6089f8bf..2324a526de65 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -11,10 +11,17 @@ config DRM_RCAR_DU
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
+config DRM_RCAR_HDMI
+ bool "R-Car DU HDMI Encoder Support"
+ depends on DRM_RCAR_DU
+ depends on OF
+ help
+ Enable support for external HDMI encoders.
+
config DRM_RCAR_LVDS
bool "R-Car DU LVDS Encoder Support"
depends on DRM_RCAR_DU
depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
help
- Enable support the R-Car Display Unit embedded LVDS encoders
- (currently only on R8A7790).
+ Enable support for the R-Car Display Unit embedded LVDS encoders
+ (currently only on R8A7790 and R8A7791).
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 12b8d4477835..05de1c4097af 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -7,6 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_plane.o \
rcar_du_vgacon.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \
+ rcar_du_hdmienc.o
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 148b50589181..23cc910951f4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
@@ -585,7 +586,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
if (irq < 0) {
dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
- return ret;
+ return irq;
}
ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index e97ae502dec5..984e6083699f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,7 +15,6 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
-#include <linux/platform_data/rcar-du.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
@@ -41,6 +40,15 @@ struct rcar_du_crtc {
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+enum rcar_du_output {
+ RCAR_DU_OUTPUT_DPAD0,
+ RCAR_DU_OUTPUT_DPAD1,
+ RCAR_DU_OUTPUT_LVDS0,
+ RCAR_DU_OUTPUT_LVDS1,
+ RCAR_DU_OUTPUT_TCON,
+ RCAR_DU_OUTPUT_MAX,
+};
+
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index d212efa6a495..7bfa09cf18d5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -146,12 +146,11 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
{
struct platform_device *pdev = dev->platformdev;
struct device_node *np = pdev->dev.of_node;
- struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
struct rcar_du_device *rcdu;
struct resource *mem;
int ret;
- if (pdata == NULL && np == NULL) {
+ if (np == NULL) {
dev_err(dev->dev, "no platform data\n");
return -ENODEV;
}
@@ -163,7 +162,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
}
rcdu->dev = &pdev->dev;
- rcdu->pdata = pdata;
rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data
: (void *)platform_get_device_id(pdev)->driver_data;
rcdu->ddev = dev;
@@ -330,7 +328,6 @@ static struct platform_driver rcar_du_platform_driver = {
.probe = rcar_du_probe,
.remove = rcar_du_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rcar-du",
.pm = &rcar_du_pm_ops,
.of_match_table = rcar_du_of_table,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 8e494633c3b3..0a724669f02d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,7 +15,6 @@
#define __RCAR_DU_DRV_H__
#include <linux/kernel.h>
-#include <linux/platform_data/rcar-du.h>
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
@@ -67,7 +66,6 @@ struct rcar_du_device_info {
struct rcar_du_device {
struct device *dev;
- const struct rcar_du_platform_data *pdata;
const struct rcar_du_device_info *info;
void __iomem *mmio;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 7c0ec95915ef..34a122a39664 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -19,6 +19,8 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
+#include "rcar_du_hdmicon.h"
+#include "rcar_du_hdmienc.h"
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
#include "rcar_du_lvdsenc.h"
@@ -33,7 +35,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector)
{
struct rcar_du_connector *rcon = to_rcar_connector(connector);
- return &rcon->encoder->encoder;
+ return rcar_encoder_to_drm_encoder(rcon->encoder);
}
/* -----------------------------------------------------------------------------
@@ -142,10 +144,11 @@ static const struct drm_encoder_funcs encoder_funcs = {
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_encoder_type type,
enum rcar_du_output output,
- const struct rcar_du_encoder_data *data,
- struct device_node *np)
+ struct device_node *enc_node,
+ struct device_node *con_node)
{
struct rcar_du_encoder *renc;
+ struct drm_encoder *encoder;
unsigned int encoder_type;
int ret;
@@ -154,6 +157,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
return -ENOMEM;
renc->output = output;
+ encoder = rcar_encoder_to_drm_encoder(renc);
switch (output) {
case RCAR_DU_OUTPUT_LVDS0:
@@ -175,6 +179,9 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
case RCAR_DU_ENCODER_LVDS:
encoder_type = DRM_MODE_ENCODER_LVDS;
break;
+ case RCAR_DU_ENCODER_HDMI:
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ break;
case RCAR_DU_ENCODER_NONE:
default:
/* No external encoder, use the internal encoder type. */
@@ -182,23 +189,35 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
break;
}
- ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
- encoder_type);
- if (ret < 0)
- return ret;
+ if (type == RCAR_DU_ENCODER_HDMI) {
+ if (renc->lvds) {
+ dev_err(rcdu->dev,
+ "Chaining LVDS and HDMI encoders not supported\n");
+ return -EINVAL;
+ }
- drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
+ ret = rcar_du_hdmienc_init(rcdu, renc, enc_node);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
+ encoder_type);
+ if (ret < 0)
+ return ret;
- switch (encoder_type) {
- case DRM_MODE_ENCODER_LVDS: {
- const struct rcar_du_panel_data *pdata =
- data ? &data->connector.lvds.panel : NULL;
- return rcar_du_lvds_connector_init(rcdu, renc, pdata, np);
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
}
+ switch (encoder_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ return rcar_du_lvds_connector_init(rcdu, renc, con_node);
+
case DRM_MODE_ENCODER_DAC:
return rcar_du_vga_connector_init(rcdu, renc);
+ case DRM_MODE_ENCODER_TMDS:
+ return rcar_du_hdmi_connector_init(rcdu, renc);
+
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index bd624135ef1f..719b6f2a031c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -14,21 +14,32 @@
#ifndef __RCAR_DU_ENCODER_H__
#define __RCAR_DU_ENCODER_H__
-#include <linux/platform_data/rcar-du.h>
-
#include <drm/drm_crtc.h>
+#include <drm/drm_encoder_slave.h>
struct rcar_du_device;
+struct rcar_du_hdmienc;
struct rcar_du_lvdsenc;
+enum rcar_du_encoder_type {
+ RCAR_DU_ENCODER_UNUSED = 0,
+ RCAR_DU_ENCODER_NONE,
+ RCAR_DU_ENCODER_VGA,
+ RCAR_DU_ENCODER_LVDS,
+ RCAR_DU_ENCODER_HDMI,
+};
+
struct rcar_du_encoder {
- struct drm_encoder encoder;
+ struct drm_encoder_slave slave;
enum rcar_du_output output;
+ struct rcar_du_hdmienc *hdmi;
struct rcar_du_lvdsenc *lvds;
};
#define to_rcar_encoder(e) \
- container_of(e, struct rcar_du_encoder, encoder)
+ container_of(e, struct rcar_du_encoder, slave.base)
+
+#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base)
struct rcar_du_connector {
struct drm_connector connector;
@@ -44,7 +55,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector);
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_encoder_type type,
enum rcar_du_output output,
- const struct rcar_du_encoder_data *data,
- struct device_node *np);
+ struct device_node *enc_node,
+ struct device_node *con_node);
#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
new file mode 100644
index 000000000000..4d7d4dd46d26
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -0,0 +1,121 @@
+/*
+ * R-Car Display Unit HDMI Connector
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_hdmicon.h"
+#include "rcar_du_kms.h"
+
+#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
+
+static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct rcar_du_connector *con = to_rcar_connector(connector);
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->get_modes == NULL)
+ return 0;
+
+ return sfuncs->get_modes(encoder, connector);
+}
+
+static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct rcar_du_connector *con = to_rcar_connector(connector);
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->mode_valid == NULL)
+ return MODE_OK;
+
+ return sfuncs->mode_valid(encoder, mode);
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+ .get_modes = rcar_du_hdmi_connector_get_modes,
+ .mode_valid = rcar_du_hdmi_connector_mode_valid,
+ .best_encoder = rcar_du_connector_best_encoder,
+};
+
+static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct rcar_du_connector *con = to_rcar_connector(connector);
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->detect == NULL)
+ return connector_status_unknown;
+
+ return sfuncs->detect(encoder, connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = rcar_du_hdmi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = rcar_du_hdmi_connector_destroy,
+};
+
+int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc)
+{
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
+ struct rcar_du_connector *rcon;
+ struct drm_connector *connector;
+ int ret;
+
+ rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
+ if (rcon == NULL)
+ return -ENOMEM;
+
+ connector = &rcon->connector;
+ connector->display_info.width_mm = 0;
+ connector->display_info.height_mm = 0;
+
+ ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret < 0)
+ return ret;
+
+ drm_connector_helper_add(connector, &connector_helper_funcs);
+ ret = drm_connector_register(connector);
+ if (ret < 0)
+ return ret;
+
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_object_property_set_value(&connector->base,
+ rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ return ret;
+
+ connector->encoder = encoder;
+ rcon->encoder = renc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
new file mode 100644
index 000000000000..87daa949227f
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
@@ -0,0 +1,31 @@
+/*
+ * R-Car Display Unit HDMI Connector
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_HDMICON_H__
+#define __RCAR_DU_HDMICON_H__
+
+struct rcar_du_device;
+struct rcar_du_encoder;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
+int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc);
+#else
+static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc)
+{
+ return -ENOSYS;
+}
+#endif
+
+#endif /* __RCAR_DU_HDMICON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
new file mode 100644
index 000000000000..359bc999a9c8
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -0,0 +1,151 @@
+/*
+ * R-Car Display Unit HDMI Encoder
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_hdmienc.h"
+
+struct rcar_du_hdmienc {
+ struct rcar_du_encoder *renc;
+ struct device *dev;
+ int dpms;
+};
+
+#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
+#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
+
+static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (hdmienc->dpms == mode)
+ return;
+
+ if (sfuncs->dpms)
+ sfuncs->dpms(encoder, mode);
+
+ hdmienc->dpms = mode;
+}
+
+static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->mode_fixup == NULL)
+ return true;
+
+ return sfuncs->mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static void rcar_du_hdmienc_mode_prepare(struct drm_encoder *encoder)
+{
+ rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void rcar_du_hdmienc_mode_commit(struct drm_encoder *encoder)
+{
+ rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+ struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+
+ if (sfuncs->mode_set)
+ sfuncs->mode_set(encoder, mode, adjusted_mode);
+
+ rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .dpms = rcar_du_hdmienc_dpms,
+ .mode_fixup = rcar_du_hdmienc_mode_fixup,
+ .prepare = rcar_du_hdmienc_mode_prepare,
+ .commit = rcar_du_hdmienc_mode_commit,
+ .mode_set = rcar_du_hdmienc_mode_set,
+};
+
+static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
+{
+ struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
+
+ rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ drm_encoder_cleanup(encoder);
+ put_device(hdmienc->dev);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = rcar_du_hdmienc_cleanup,
+};
+
+int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc, struct device_node *np)
+{
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
+ struct drm_i2c_encoder_driver *driver;
+ struct i2c_client *i2c_slave;
+ struct rcar_du_hdmienc *hdmienc;
+ int ret;
+
+ hdmienc = devm_kzalloc(rcdu->dev, sizeof(*hdmienc), GFP_KERNEL);
+ if (hdmienc == NULL)
+ return -ENOMEM;
+
+ /* Locate the slave I2C device and driver. */
+ i2c_slave = of_find_i2c_device_by_node(np);
+ if (!i2c_slave || !i2c_get_clientdata(i2c_slave))
+ return -EPROBE_DEFER;
+
+ hdmienc->dev = &i2c_slave->dev;
+
+ if (hdmienc->dev->driver == NULL) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ /* Initialize the slave encoder. */
+ driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
+ ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
+ if (ret < 0)
+ goto error;
+
+ ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret < 0)
+ goto error;
+
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+ renc->hdmi = hdmienc;
+ hdmienc->renc = renc;
+
+ return 0;
+
+error:
+ put_device(hdmienc->dev);
+ return ret;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
new file mode 100644
index 000000000000..2ff0128ac8e1
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
@@ -0,0 +1,35 @@
+/*
+ * R-Car Display Unit HDMI Encoder
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_HDMIENC_H__
+#define __RCAR_DU_HDMIENC_H__
+
+#include <linux/module.h>
+
+struct device_node;
+struct rcar_du_device;
+struct rcar_du_encoder;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
+int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc, struct device_node *np);
+#else
+static inline int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc,
+ struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif
+
+#endif /* __RCAR_DU_HDMIENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 6c24ad7d03ef..0c5ee616b5a3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -126,9 +126,9 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
else
align = 16 * args->bpp / 8;
- args->pitch = roundup(max(args->pitch, min_pitch), align);
+ args->pitch = roundup(min_pitch, align);
- return drm_gem_cma_dumb_create(file, dev, args);
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
}
static struct drm_framebuffer *
@@ -190,49 +190,16 @@ static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.output_poll_changed = rcar_du_output_poll_changed,
};
-static int rcar_du_encoders_init_pdata(struct rcar_du_device *rcdu)
-{
- unsigned int num_encoders = 0;
- unsigned int i;
- int ret;
-
- for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
- const struct rcar_du_encoder_data *pdata =
- &rcdu->pdata->encoders[i];
- const struct rcar_du_output_routing *route =
- &rcdu->info->routes[pdata->output];
-
- if (pdata->type == RCAR_DU_ENCODER_UNUSED)
- continue;
-
- if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
- route->possible_crtcs == 0) {
- dev_warn(rcdu->dev,
- "encoder %u references unexisting output %u, skipping\n",
- i, pdata->output);
- continue;
- }
-
- ret = rcar_du_encoder_init(rcdu, pdata->type, pdata->output,
- pdata, NULL);
- if (ret < 0)
- return ret;
-
- num_encoders++;
- }
-
- return num_encoders;
-}
-
-static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
- enum rcar_du_output output,
- struct of_endpoint *ep)
+static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
+ enum rcar_du_output output,
+ struct of_endpoint *ep)
{
static const struct {
const char *compatible;
enum rcar_du_encoder_type type;
} encoders[] = {
{ "adi,adv7123", RCAR_DU_ENCODER_VGA },
+ { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
{ "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
};
@@ -323,14 +290,14 @@ static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
connector = entity;
}
- ret = rcar_du_encoder_init(rcdu, enc_type, output, NULL, connector);
+ ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
of_node_put(encoder);
of_node_put(connector);
return ret < 0 ? ret : 1;
}
-static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu)
+static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
{
struct device_node *np = rcdu->dev->of_node;
struct device_node *prev = NULL;
@@ -377,7 +344,7 @@ static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu)
}
/* Process the output pipeline. */
- ret = rcar_du_encoders_init_dt_one(rcdu, output, &ep);
+ ret = rcar_du_encoders_init_one(rcdu, output, &ep);
if (ret < 0) {
of_node_put(ep_node);
return ret;
@@ -442,11 +409,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
- if (rcdu->pdata)
- ret = rcar_du_encoders_init_pdata(rcdu);
- else
- ret = rcar_du_encoders_init_dt(rcdu);
-
+ ret = rcar_du_encoders_init(rcdu);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 115eed20db12..6d9811c052c4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -27,7 +27,11 @@
struct rcar_du_lvds_connector {
struct rcar_du_connector connector;
- struct rcar_du_panel_data panel;
+ struct {
+ unsigned int width_mm; /* Panel width in mm */
+ unsigned int height_mm; /* Panel height in mm */
+ struct videomode mode;
+ } panel;
};
#define to_rcar_lvds_connector(c) \
@@ -78,31 +82,26 @@ static const struct drm_connector_funcs connector_funcs = {
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- const struct rcar_du_panel_data *panel,
/* TODO const */ struct device_node *np)
{
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
struct rcar_du_lvds_connector *lvdscon;
struct drm_connector *connector;
+ struct display_timing timing;
int ret;
lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL);
if (lvdscon == NULL)
return -ENOMEM;
- if (panel) {
- lvdscon->panel = *panel;
- } else {
- struct display_timing timing;
-
- ret = of_get_display_timing(np, "panel-timing", &timing);
- if (ret < 0)
- return ret;
+ ret = of_get_display_timing(np, "panel-timing", &timing);
+ if (ret < 0)
+ return ret;
- videomode_from_timing(&timing, &lvdscon->panel.mode);
+ videomode_from_timing(&timing, &lvdscon->panel.mode);
- of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
- of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
- }
+ of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
+ of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
connector = &lvdscon->connector.connector;
connector->display_info.width_mm = lvdscon->panel.width_mm;
@@ -122,11 +121,11 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
- ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
- connector->encoder = &renc->encoder;
+ connector->encoder = encoder;
lvdscon->connector.encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index d11424d537f9..d4881ee0be7e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -16,11 +16,9 @@
struct rcar_du_device;
struct rcar_du_encoder;
-struct rcar_du_panel_data;
int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc,
- const struct rcar_du_panel_data *panel,
struct device_node *np);
#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index 3303a55cec79..f65aabda0796 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/platform_data/rcar-du.h>
struct rcar_drm_crtc;
struct rcar_du_lvdsenc;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 564a723ede03..752747a5e920 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -52,6 +52,7 @@ static const struct drm_connector_funcs connector_funcs = {
int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc)
{
+ struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
struct rcar_du_connector *rcon;
struct drm_connector *connector;
int ret;
@@ -78,11 +79,11 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
drm_object_property_set_value(&connector->base,
rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
- ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
- connector->encoder = &renc->encoder;
+ connector->encoder = encoder;
rcon->encoder = renc;
return 0;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
new file mode 100644
index 000000000000..ca9f085efa92
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -0,0 +1,17 @@
+config DRM_ROCKCHIP
+ tristate "DRM Support for Rockchip"
+ depends on DRM && ROCKCHIP_IOMMU
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_PANEL
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have a Rockchip soc chipset.
+ This driver provides kernel mode setting and buffer
+ management to userspace. This driver does not provide
+ 2D or 3D acceleration; acceleration is performed by other
+ IP found on the SoC.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
new file mode 100644
index 000000000000..2cb0672f57ed
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
+ rockchip_drm_gem.o
+
+obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
new file mode 100644
index 000000000000..a798c7c71f91
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * based on exynos_drm_drv.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/dma-iommu.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_fb.h"
+#include "rockchip_drm_fbdev.h"
+#include "rockchip_drm_gem.h"
+
+#define DRIVER_NAME "rockchip"
+#define DRIVER_DESC "RockChip Soc DRM"
+#define DRIVER_DATE "20140818"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+/*
+ * Attach a (component) device to the shared drm dma mapping from master drm
+ * device. This is used by the VOPs to map GEM buffers to a common DMA
+ * mapping.
+ */
+int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
+ int ret;
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ return arm_iommu_attach_device(dev, mapping);
+}
+EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
+
+void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ arm_iommu_detach_device(dev);
+}
+EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
+
+int rockchip_register_crtc_funcs(struct drm_device *dev,
+ const struct rockchip_crtc_funcs *crtc_funcs,
+ int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ if (pipe > ROCKCHIP_MAX_CRTC)
+ return -EINVAL;
+
+ priv->crtc_funcs[pipe] = crtc_funcs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
+
+void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ if (pipe > ROCKCHIP_MAX_CRTC)
+ return;
+
+ priv->crtc_funcs[pipe] = NULL;
+}
+EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
+
+static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
+ int pipe)
+{
+ struct drm_crtc *crtc;
+ int i = 0;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ if (i++ == pipe)
+ return crtc;
+
+ return NULL;
+}
+
+static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+
+ if (crtc && priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->enable_vblank)
+ return priv->crtc_funcs[pipe]->enable_vblank(crtc);
+
+ return 0;
+}
+
+static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+
+ if (crtc && priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->enable_vblank)
+ priv->crtc_funcs[pipe]->disable_vblank(crtc);
+}
+
+static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
+{
+ struct rockchip_drm_private *private;
+ struct dma_iommu_mapping *mapping;
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ drm_dev->dev_private = private;
+
+ drm_mode_config_init(drm_dev);
+
+ rockchip_drm_mode_config_init(drm_dev);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_config_cleanup;
+ }
+
+ /* TODO(djkurtz): fetch the mapping start/size from somewhere */
+ mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
+ SZ_2G);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ goto err_config_cleanup;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_release_mapping;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ ret = arm_iommu_attach_device(dev, mapping);
+ if (ret)
+ goto err_release_mapping;
+
+ /* Try to bind all sub drivers. */
+ ret = component_bind_all(dev, drm_dev);
+ if (ret)
+ goto err_detach_device;
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(drm_dev);
+
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = true, we can use the vblank feature.
+ */
+ drm_dev->irq_enabled = true;
+
+ ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC);
+ if (ret)
+ goto err_kms_helper_poll_fini;
+
+ /*
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(after drm_vblank_put function is called)
+ */
+ drm_dev->vblank_disable_allowed = true;
+
+ ret = rockchip_drm_fbdev_init(drm_dev);
+ if (ret)
+ goto err_vblank_cleanup;
+
+ return 0;
+err_vblank_cleanup:
+ drm_vblank_cleanup(drm_dev);
+err_kms_helper_poll_fini:
+ drm_kms_helper_poll_fini(drm_dev);
+ component_unbind_all(dev, drm_dev);
+err_detach_device:
+ arm_iommu_detach_device(dev);
+err_release_mapping:
+ arm_iommu_release_mapping(dev->archdata.mapping);
+err_config_cleanup:
+ drm_mode_config_cleanup(drm_dev);
+ drm_dev->dev_private = NULL;
+ return ret;
+}
+
+static int rockchip_drm_unload(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ rockchip_drm_fbdev_fini(drm_dev);
+ drm_vblank_cleanup(drm_dev);
+ drm_kms_helper_poll_fini(drm_dev);
+ component_unbind_all(dev, drm_dev);
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(dev->archdata.mapping);
+ drm_mode_config_cleanup(drm_dev);
+ drm_dev->dev_private = NULL;
+
+ return 0;
+}
+
+void rockchip_drm_lastclose(struct drm_device *dev)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper);
+}
+
+static const struct file_operations rockchip_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = rockchip_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .release = drm_release,
+};
+
+const struct vm_operations_struct rockchip_drm_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static struct drm_driver rockchip_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .load = rockchip_drm_load,
+ .unload = rockchip_drm_unload,
+ .lastclose = rockchip_drm_lastclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = rockchip_drm_crtc_enable_vblank,
+ .disable_vblank = rockchip_drm_crtc_disable_vblank,
+ .gem_vm_ops = &rockchip_drm_vm_ops,
+ .gem_free_object = rockchip_gem_free_object,
+ .dumb_create = rockchip_gem_dumb_create,
+ .dumb_map_offset = rockchip_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
+ .gem_prime_vmap = rockchip_gem_prime_vmap,
+ .gem_prime_vunmap = rockchip_gem_prime_vunmap,
+ .gem_prime_mmap = rockchip_gem_mmap_buf,
+ .fops = &rockchip_drm_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct drm_connector *connector;
+
+ if (!drm)
+ return 0;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+ /* Set the old mode back to the connector for resume */
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+
+static int rockchip_drm_sys_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct drm_connector *connector;
+ enum drm_connector_status status;
+ bool changed = false;
+
+ if (!drm)
+ return 0;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int desired_mode = connector->dpms;
+
+ /*
+ * at suspend time, we save dpms to connector->dpms,
+ * restore the old_dpms, and at current time, the connector
+ * dpms status must be DRM_MODE_DPMS_OFF.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+
+ /*
+ * If the connector has been disconnected during suspend,
+ * disconnect it from the encoder and leave it off. We'll notify
+ * userspace at the end.
+ */
+ if (desired_mode == DRM_MODE_DPMS_ON) {
+ status = connector->funcs->detect(connector, true);
+ if (status == connector_status_disconnected) {
+ connector->encoder = NULL;
+ connector->status = status;
+ changed = true;
+ continue;
+ }
+ }
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, desired_mode);
+ }
+ drm_modeset_unlock_all(drm);
+
+ drm_helper_resume_force_mode(drm);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(drm);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rockchip_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend,
+ rockchip_drm_sys_resume)
+};
+
+/*
+ * @node: device tree node containing encoder input ports
+ * @encoder: drm_encoder
+ */
+int rockchip_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder)
+{
+ struct device_node *ep = NULL;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct of_endpoint endpoint;
+ struct device_node *port;
+ int ret;
+
+ if (!node || !crtc)
+ return -EINVAL;
+
+ do {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ port = of_graph_get_remote_port(ep);
+ of_node_put(port);
+ if (port == crtc->port) {
+ ret = of_graph_parse_endpoint(ep, &endpoint);
+ return ret ?: endpoint.id;
+ }
+ } while (ep);
+
+ return -EINVAL;
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static void rockchip_add_endpoints(struct device *dev,
+ struct component_match **match,
+ struct device_node *port)
+{
+ struct device_node *ep, *remote;
+
+ for_each_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent device of %s is not available\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(dev, match, compare_of, remote);
+ of_node_put(remote);
+ }
+}
+
+static int rockchip_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&rockchip_drm_driver, dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_free;
+
+ dev_set_drvdata(dev, drm);
+
+ return 0;
+
+err_free:
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void rockchip_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_dev_unregister(drm);
+ drm_dev_unref(drm);
+ dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops rockchip_drm_ops = {
+ .bind = rockchip_drm_bind,
+ .unbind = rockchip_drm_unbind,
+};
+
+static int rockchip_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct component_match *match = NULL;
+ struct device_node *np = dev->of_node;
+ struct device_node *port;
+ int i;
+
+ if (!np)
+ return -ENODEV;
+ /*
+ * Bind the crtc ports first, so that
+ * drm_of_find_possible_crtcs called from encoder .bind callbacks
+ * works as expected.
+ */
+ for (i = 0;; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ if (!of_device_is_available(port->parent)) {
+ of_node_put(port);
+ continue;
+ }
+
+ component_match_add(dev, &match, compare_of, port->parent);
+ of_node_put(port);
+ }
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ if (!match) {
+ dev_err(dev, "No available vop found for display-subsystem.\n");
+ return -ENODEV;
+ }
+ /*
+ * For each bound crtc, bind the encoders attached to its
+ * remote endpoint.
+ */
+ for (i = 0;; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ if (!of_device_is_available(port->parent)) {
+ of_node_put(port);
+ continue;
+ }
+
+ rockchip_add_endpoints(dev, &match, port);
+ of_node_put(port);
+ }
+
+ return component_master_add_with_match(dev, &rockchip_drm_ops, match);
+}
+
+static int rockchip_drm_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &rockchip_drm_ops);
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_drm_dt_ids[] = {
+ { .compatible = "rockchip,display-subsystem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
+
+static struct platform_driver rockchip_drm_platform_driver = {
+ .probe = rockchip_drm_platform_probe,
+ .remove = rockchip_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rockchip-drm",
+ .of_match_table = rockchip_drm_dt_ids,
+ .pm = &rockchip_drm_pm_ops,
+ },
+};
+
+module_platform_driver(rockchip_drm_platform_driver);
+
+MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
new file mode 100644
index 000000000000..dc4e5f03ac79
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * based on exynos_drm_drv.h
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_DRV_H
+#define _ROCKCHIP_DRM_DRV_H
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <linux/module.h>
+#include <linux/component.h>
+
+#define ROCKCHIP_MAX_FB_BUFFER 3
+#define ROCKCHIP_MAX_CONNECTOR 2
+#define ROCKCHIP_MAX_CRTC 2
+
+struct drm_device;
+struct drm_connector;
+
+/*
+ * Rockchip drm private crtc funcs.
+ * @enable_vblank: enable crtc vblank irq.
+ * @disable_vblank: disable crtc vblank irq.
+ */
+struct rockchip_crtc_funcs {
+ int (*enable_vblank)(struct drm_crtc *crtc);
+ void (*disable_vblank)(struct drm_crtc *crtc);
+};
+
+/*
+ * Rockchip drm private structure.
+ *
+ * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
+ * @num_pipe: number of pipes for this device.
+ */
+struct rockchip_drm_private {
+ struct drm_fb_helper fbdev_helper;
+ struct drm_gem_object *fbdev_bo;
+ const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
+};
+
+int rockchip_register_crtc_funcs(struct drm_device *dev,
+ const struct rockchip_crtc_funcs *crtc_funcs,
+ int pipe);
+void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe);
+int rockchip_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder);
+int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
+ int out_mode);
+int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+ struct device *dev);
+void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
+ struct device *dev);
+
+#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
new file mode 100644
index 000000000000..77d52893d40f
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+
+#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
+
+struct rockchip_drm_fb {
+ struct drm_framebuffer fb;
+ struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
+};
+
+struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
+
+ if (plane >= ROCKCHIP_MAX_FB_BUFFER)
+ return NULL;
+
+ return rk_fb->obj[plane];
+}
+EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
+
+static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
+ struct drm_gem_object *obj;
+ int i;
+
+ for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
+ obj = rockchip_fb->obj[i];
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ drm_framebuffer_cleanup(fb);
+ kfree(rockchip_fb);
+}
+
+static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
+
+ return drm_gem_handle_create(file_priv,
+ rockchip_fb->obj[0], handle);
+}
+
+static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
+ .destroy = rockchip_drm_fb_destroy,
+ .create_handle = rockchip_drm_fb_create_handle,
+};
+
+static struct rockchip_drm_fb *
+rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **obj, unsigned int num_planes)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+ int ret;
+ int i;
+
+ rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
+ if (!rockchip_fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ rockchip_fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
+ &rockchip_drm_fb_funcs);
+ if (ret) {
+ dev_err(dev->dev, "Failed to initialize framebuffer: %d\n",
+ ret);
+ kfree(rockchip_fb);
+ return ERR_PTR(ret);
+ }
+
+ return rockchip_fb;
+}
+
+static struct drm_framebuffer *
+rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+ struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
+ struct drm_gem_object *obj;
+ unsigned int hsub;
+ unsigned int vsub;
+ int num_planes;
+ int ret;
+ int i;
+
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+ num_planes = min(drm_format_num_planes(mode_cmd->pixel_format),
+ ROCKCHIP_MAX_FB_BUFFER);
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(dev, file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ dev_err(dev->dev, "Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i] +
+ mode_cmd->offsets[i] +
+ width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
+
+ if (obj->size < min_size) {
+ drm_gem_object_unreference_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = obj;
+ }
+
+ rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(rockchip_fb)) {
+ ret = PTR_ERR(rockchip_fb);
+ goto err_gem_object_unreference;
+ }
+
+ return &rockchip_fb->fb;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_unreference_unlocked(objs[i]);
+ return ERR_PTR(ret);
+}
+
+static void rockchip_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = &private->fbdev_helper;
+
+ drm_fb_helper_hotplug_event(fb_helper);
+}
+
+static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
+ .fb_create = rockchip_user_fb_create,
+ .output_poll_changed = rockchip_drm_output_poll_changed,
+};
+
+struct drm_framebuffer *
+rockchip_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+
+ rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+ if (IS_ERR(rockchip_fb))
+ return NULL;
+
+ return &rockchip_fb->fb;
+}
+
+void rockchip_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+
+ dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
new file mode 100644
index 000000000000..09574d48226f
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_FB_H
+#define _ROCKCHIP_DRM_FB_H
+
+struct drm_framebuffer *
+rockchip_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
+
+void rockchip_drm_mode_config_init(struct drm_device *dev);
+
+struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
new file mode 100644
index 000000000000..a5d889a8716b
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+#include "rockchip_drm_fb.h"
+
+#define PREFERRED_BPP 32
+#define to_drm_private(x) \
+ container_of(x, struct rockchip_drm_private, fbdev_helper)
+
+static int rockchip_fbdev_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct rockchip_drm_private *private = to_drm_private(helper);
+
+ return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
+}
+
+static struct fb_ops rockchip_drm_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_mmap = rockchip_fbdev_mmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct rockchip_drm_private *private = to_drm_private(helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *dev = helper->dev;
+ struct rockchip_gem_object *rk_obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ rk_obj = rockchip_gem_create_object(dev, size);
+ if (IS_ERR(rk_obj))
+ return -ENOMEM;
+
+ private->fbdev_bo = &rk_obj->base;
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_rockchip_gem_free_object;
+ }
+
+ helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
+ private->fbdev_bo);
+ if (IS_ERR(helper->fb)) {
+ dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(helper->fb);
+ goto err_framebuffer_release;
+ }
+
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &rockchip_drm_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(dev->dev, "Failed to allocate color map.\n");
+ goto err_drm_framebuffer_unref;
+ }
+
+ fb = helper->fb;
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ dev->mode_config.fb_base = 0;
+ fbi->screen_base = rk_obj->kvaddr + offset;
+ fbi->screen_size = rk_obj->base.size;
+ fbi->fix.smem_len = rk_obj->base.size;
+
+ DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
+ fb->width, fb->height, fb->depth, rk_obj->kvaddr,
+ offset, size);
+ return 0;
+
+err_drm_framebuffer_unref:
+ drm_framebuffer_unreference(helper->fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_rockchip_gem_free_object:
+ rockchip_gem_free_object(&rk_obj->base);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
+ .fb_probe = rockchip_drm_fbdev_create,
+};
+
+int rockchip_drm_fbdev_init(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *helper;
+ unsigned int num_crtc;
+ int ret;
+
+ if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ return -EINVAL;
+
+ num_crtc = dev->mode_config.num_crtc;
+
+ helper = &private->fbdev_helper;
+
+ drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to add connectors - %d.\n", ret);
+ goto err_drm_fb_helper_fini;
+ }
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
+ ret);
+ goto err_drm_fb_helper_fini;
+ }
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(helper);
+ return ret;
+}
+
+void rockchip_drm_fbdev_fini(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *helper;
+
+ helper = &private->fbdev_helper;
+
+ if (helper->fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = helper->fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
+ ret);
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (helper->fb)
+ drm_framebuffer_unreference(helper->fb);
+
+ drm_fb_helper_fini(helper);
+}
diff --git a/drivers/staging/android/binder.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
index eb0834656dfe..50432e9b5b37 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
@@ -1,10 +1,6 @@
/*
- * Copyright (C) 2008 Google, Inc.
- *
- * Based on, but no longer compatible with, the original
- * OpenBinder.org binder driver interface, which is:
- *
- * Copyright (c) 2005 Palmsource, Inc.
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -14,17 +10,12 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
-#ifndef _LINUX_BINDER_H
-#define _LINUX_BINDER_H
-
-#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
-#define BINDER_IPC_32BIT 1
-#endif
-
-#include "uapi/binder.h"
+#ifndef _ROCKCHIP_DRM_FBDEV_H
+#define _ROCKCHIP_DRM_FBDEV_H
-#endif /* _LINUX_BINDER_H */
+int rockchip_drm_fbdev_init(struct drm_device *dev);
+void rockchip_drm_fbdev_fini(struct drm_device *dev);
+#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
new file mode 100644
index 000000000000..bc98a227dc76
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_vma_manager.h>
+
+#include <linux/dma-attrs.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+
+static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+
+ init_dma_attrs(&rk_obj->dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
+
+ /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */
+ rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
+ &rk_obj->dma_addr, GFP_KERNEL,
+ &rk_obj->dma_attrs);
+ if (IS_ERR(rk_obj->kvaddr)) {
+ int ret = PTR_ERR(rk_obj->kvaddr);
+
+ DRM_ERROR("failed to allocate %#x byte dma buffer, %d",
+ obj->size, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+
+ dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
+ &rk_obj->dma_attrs);
+}
+
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+ unsigned long vm_size;
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > obj->size)
+ return -EINVAL;
+
+ return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ obj->size, &rk_obj->dma_attrs);
+}
+
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
+ int ret;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("failed to find vma node.\n");
+ return -EINVAL;
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EACCES;
+ }
+
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ ret = rockchip_gem_mmap_buf(obj, vma);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+struct rockchip_gem_object *
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size)
+{
+ struct rockchip_gem_object *rk_obj;
+ struct drm_gem_object *obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+
+ rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
+ if (!rk_obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &rk_obj->base;
+
+ drm_gem_private_object_init(drm, obj, size);
+
+ ret = rockchip_gem_alloc_buf(rk_obj);
+ if (ret)
+ goto err_free_rk_obj;
+
+ return rk_obj;
+
+err_free_rk_obj:
+ kfree(rk_obj);
+ return ERR_PTR(ret);
+}
+
+/*
+ * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void rockchip_gem_free_object(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj;
+
+ drm_gem_free_mmap_offset(obj);
+
+ rk_obj = to_rockchip_obj(obj);
+
+ rockchip_gem_free_buf(rk_obj);
+
+ kfree(rk_obj);
+}
+
+/*
+ * rockchip_gem_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct rockchip_gem_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct rockchip_gem_object *
+rockchip_gem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int size,
+ unsigned int *handle)
+{
+ struct rockchip_gem_object *rk_obj;
+ struct drm_gem_object *obj;
+ int ret;
+
+ rk_obj = rockchip_gem_create_object(drm, size);
+ if (IS_ERR(rk_obj))
+ return ERR_CAST(rk_obj);
+
+ obj = &rk_obj->base;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (ret)
+ goto err_handle_create;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return rk_obj;
+
+err_handle_create:
+ rockchip_gem_free_object(obj);
+
+ return ERR_PTR(ret);
+}
+
+int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
+
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/*
+ * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int rockchip_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct rockchip_gem_object *rk_obj;
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ /*
+ * align to 64 bytes since Mali requires it.
+ */
+ min_pitch = ALIGN(min_pitch, 64);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
+ &args->handle);
+
+ return PTR_ERR_OR_ZERO(rk_obj);
+}
+
+/*
+ * Allocate a sg_table for this GEM object.
+ * Note: Both the table's contents, and the sg_table itself must be freed by
+ * the caller.
+ * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
+ */
+struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
+ rk_obj->dma_addr, obj->size,
+ &rk_obj->dma_attrs);
+ if (ret) {
+ DRM_ERROR("failed to allocate sgt, %d\n", ret);
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
+}
+
+void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+
+ return rk_obj->kvaddr;
+}
+
+void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* Nothing to do */
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
new file mode 100644
index 000000000000..67bcebe90003
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_GEM_H
+#define _ROCKCHIP_DRM_GEM_H
+
+#define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base)
+
+struct rockchip_gem_object {
+ struct drm_gem_object base;
+ unsigned int flags;
+
+ void *kvaddr;
+ dma_addr_t dma_addr;
+ struct dma_attrs dma_attrs;
+};
+
+struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size,
+ struct sg_table *sgt);
+void *rockchip_gem_prime_vmap(struct drm_gem_object *obj);
+void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* mmap a gem object to userspace. */
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+struct rockchip_gem_object *
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size);
+
+void rockchip_gem_free_object(struct drm_gem_object *obj);
+
+int rockchip_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+#endif /* _ROCKCHIP_DRM_GEM_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
new file mode 100644
index 000000000000..e7ca25b3fb38
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -0,0 +1,1455 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/component.h>
+
+#include <linux/reset.h>
+#include <linux/delay.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+#include "rockchip_drm_fb.h"
+#include "rockchip_drm_vop.h"
+
+#define VOP_REG(off, _mask, s) \
+ {.offset = off, \
+ .mask = _mask, \
+ .shift = s,}
+
+#define __REG_SET_RELAXED(x, off, mask, shift, v) \
+ vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
+#define __REG_SET_NORMAL(x, off, mask, shift, v) \
+ vop_mask_write(x, off, (mask) << shift, (v) << shift)
+
+#define REG_SET(x, base, reg, v, mode) \
+ __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+
+#define VOP_WIN_SET(x, win, name, v) \
+ REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_CTRL_SET(x, name, v) \
+ REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
+
+#define VOP_WIN_GET(x, win, name) \
+ vop_read_reg(x, win->base, &win->phy->name)
+
+#define VOP_WIN_GET_YRGBADDR(vop, win) \
+ vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
+
+#define to_vop(x) container_of(x, struct vop, crtc)
+#define to_vop_win(x) container_of(x, struct vop_win, base)
+
+struct vop_win_state {
+ struct list_head head;
+ struct drm_framebuffer *fb;
+ dma_addr_t yrgb_mst;
+ struct drm_pending_vblank_event *event;
+};
+
+struct vop_win {
+ struct drm_plane base;
+ const struct vop_win_data *data;
+ struct vop *vop;
+
+ struct list_head pending;
+ struct vop_win_state *active;
+};
+
+struct vop {
+ struct drm_crtc crtc;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ unsigned int dpms;
+
+ int connector_type;
+ int connector_out_mode;
+
+ /* mutex vsync_ work */
+ struct mutex vsync_mutex;
+ bool vsync_work_pending;
+
+ const struct vop_data *data;
+
+ uint32_t *regsbak;
+ void __iomem *regs;
+
+ /* physical map length of vop register */
+ uint32_t len;
+
+ /* one time only one process allowed to config the register */
+ spinlock_t reg_lock;
+ /* lock vop irq reg */
+ spinlock_t irq_lock;
+
+ unsigned int irq;
+
+ /* vop AHP clk */
+ struct clk *hclk;
+ /* vop dclk */
+ struct clk *dclk;
+ /* vop share memory frequency */
+ struct clk *aclk;
+
+ /* vop dclk reset */
+ struct reset_control *dclk_rst;
+
+ int pipe;
+
+ struct vop_win win[];
+};
+
+enum vop_data_format {
+ VOP_FMT_ARGB8888 = 0,
+ VOP_FMT_RGB888,
+ VOP_FMT_RGB565,
+ VOP_FMT_YUV420SP = 4,
+ VOP_FMT_YUV422SP,
+ VOP_FMT_YUV444SP,
+};
+
+struct vop_reg_data {
+ uint32_t offset;
+ uint32_t value;
+};
+
+struct vop_reg {
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+};
+
+struct vop_ctrl {
+ struct vop_reg standby;
+ struct vop_reg data_blank;
+ struct vop_reg gate_en;
+ struct vop_reg mmu_en;
+ struct vop_reg rgb_en;
+ struct vop_reg edp_en;
+ struct vop_reg hdmi_en;
+ struct vop_reg mipi_en;
+ struct vop_reg out_mode;
+ struct vop_reg dither_down;
+ struct vop_reg dither_up;
+ struct vop_reg pin_pol;
+
+ struct vop_reg htotal_pw;
+ struct vop_reg hact_st_end;
+ struct vop_reg vtotal_pw;
+ struct vop_reg vact_st_end;
+ struct vop_reg hpost_st_end;
+ struct vop_reg vpost_st_end;
+};
+
+struct vop_win_phy {
+ const uint32_t *data_formats;
+ uint32_t nformats;
+
+ struct vop_reg enable;
+ struct vop_reg format;
+ struct vop_reg act_info;
+ struct vop_reg dsp_info;
+ struct vop_reg dsp_st;
+ struct vop_reg yrgb_mst;
+ struct vop_reg uv_mst;
+ struct vop_reg yrgb_vir;
+ struct vop_reg uv_vir;
+
+ struct vop_reg dst_alpha_ctl;
+ struct vop_reg src_alpha_ctl;
+};
+
+struct vop_win_data {
+ uint32_t base;
+ const struct vop_win_phy *phy;
+ enum drm_plane_type type;
+};
+
+struct vop_data {
+ const struct vop_reg_data *init_table;
+ unsigned int table_size;
+ const struct vop_ctrl *ctrl;
+ const struct vop_win_data *win;
+ unsigned int win_size;
+};
+
+static const uint32_t formats_01[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV24,
+};
+
+static const uint32_t formats_234[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+};
+
+static const struct vop_win_phy win01_data = {
+ .data_formats = formats_01,
+ .nformats = ARRAY_SIZE(formats_01),
+ .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
+ .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_phy win23_data = {
+ .data_formats = formats_234,
+ .nformats = ARRAY_SIZE(formats_234),
+ .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
+ .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
+ .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0),
+ .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_phy cursor_data = {
+ .data_formats = formats_234,
+ .nformats = ARRAY_SIZE(formats_234),
+ .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
+ .format = VOP_REG(HWC_CTRL0, 0x7, 1),
+ .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
+};
+
+static const struct vop_ctrl ctrl_data = {
+ .standby = VOP_REG(SYS_CTRL, 0x1, 22),
+ .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
+ .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20),
+ .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15),
+ .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1),
+ .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6),
+ .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19),
+ .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0),
+ .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4),
+ .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+ .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0),
+ .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+ .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0),
+ .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+ .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+};
+
+static const struct vop_reg_data vop_init_reg_table[] = {
+ {SYS_CTRL, 0x00c00000},
+ {DSP_CTRL0, 0x00000000},
+ {WIN0_CTRL0, 0x00000080},
+ {WIN1_CTRL0, 0x00000080},
+};
+
+/*
+ * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
+ * special support to get alpha blending working. For now, just use overlay
+ * window 1 for the drm cursor.
+ */
+static const struct vop_win_data rk3288_vop_win_data[] = {
+ { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
+ { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
+};
+
+static const struct vop_data rk3288_vop = {
+ .init_table = vop_init_reg_table,
+ .table_size = ARRAY_SIZE(vop_init_reg_table),
+ .ctrl = &ctrl_data,
+ .win = rk3288_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3288_vop_win_data),
+};
+
+static const struct of_device_id vop_driver_dt_match[] = {
+ { .compatible = "rockchip,rk3288-vop",
+ .data = &rk3288_vop },
+ {},
+};
+
+static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
+{
+ writel(v, vop->regs + offset);
+ vop->regsbak[offset >> 2] = v;
+}
+
+static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
+{
+ return readl(vop->regs + offset);
+}
+
+static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
+ const struct vop_reg *reg)
+{
+ return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
+}
+
+static inline void vop_cfg_done(struct vop *vop)
+{
+ writel(0x01, vop->regs + REG_CFG_DONE);
+}
+
+static inline void vop_mask_write(struct vop *vop, uint32_t offset,
+ uint32_t mask, uint32_t v)
+{
+ if (mask) {
+ uint32_t cached_val = vop->regsbak[offset >> 2];
+
+ cached_val = (cached_val & ~mask) | v;
+ writel(cached_val, vop->regs + offset);
+ vop->regsbak[offset >> 2] = cached_val;
+ }
+}
+
+static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
+ uint32_t mask, uint32_t v)
+{
+ if (mask) {
+ uint32_t cached_val = vop->regsbak[offset >> 2];
+
+ cached_val = (cached_val & ~mask) | v;
+ writel_relaxed(cached_val, vop->regs + offset);
+ vop->regsbak[offset >> 2] = cached_val;
+ }
+}
+
+static enum vop_data_format vop_convert_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return VOP_FMT_ARGB8888;
+ case DRM_FORMAT_RGB888:
+ return VOP_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ return VOP_FMT_RGB565;
+ case DRM_FORMAT_NV12:
+ return VOP_FMT_YUV420SP;
+ case DRM_FORMAT_NV16:
+ return VOP_FMT_YUV422SP;
+ case DRM_FORMAT_NV24:
+ return VOP_FMT_YUV444SP;
+ default:
+ DRM_ERROR("unsupport format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static bool is_alpha_support(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void vop_enable(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ int ret;
+
+ ret = clk_enable(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
+ return;
+ }
+
+ ret = clk_enable(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+ goto err_disable_hclk;
+ }
+
+ ret = clk_enable(vop->aclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
+ goto err_disable_dclk;
+ }
+
+ /*
+ * Slave iommu shares power, irq and clock with vop. It was associated
+ * automatically with this master device via common driver code.
+ * Now that we have enabled the clock we attach it to the shared drm
+ * mapping.
+ */
+ ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
+ if (ret) {
+ dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
+ goto err_disable_aclk;
+ }
+
+ spin_lock(&vop->reg_lock);
+
+ VOP_CTRL_SET(vop, standby, 0);
+
+ spin_unlock(&vop->reg_lock);
+
+ enable_irq(vop->irq);
+
+ drm_vblank_on(vop->drm_dev, vop->pipe);
+
+ return;
+
+err_disable_aclk:
+ clk_disable(vop->aclk);
+err_disable_dclk:
+ clk_disable(vop->dclk);
+err_disable_hclk:
+ clk_disable(vop->hclk);
+}
+
+static void vop_disable(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+
+ drm_vblank_off(crtc->dev, vop->pipe);
+
+ disable_irq(vop->irq);
+
+ /*
+ * TODO: Since standby doesn't take effect until the next vblank,
+ * when we turn off dclk below, the vop is probably still active.
+ */
+ spin_lock(&vop->reg_lock);
+
+ VOP_CTRL_SET(vop, standby, 1);
+
+ spin_unlock(&vop->reg_lock);
+ /*
+ * disable dclk to stop frame scan, so we can safely detach iommu,
+ */
+ clk_disable(vop->dclk);
+
+ rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
+
+ clk_disable(vop->aclk);
+ clk_disable(vop->hclk);
+}
+
+/*
+ * Caller must hold vsync_mutex.
+ */
+static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win)
+{
+ struct vop_win_state *last;
+ struct vop_win_state *active = vop_win->active;
+
+ if (list_empty(&vop_win->pending))
+ return active ? active->fb : NULL;
+
+ last = list_last_entry(&vop_win->pending, struct vop_win_state, head);
+ return last ? last->fb : NULL;
+}
+
+/*
+ * Caller must hold vsync_mutex.
+ */
+static int vop_win_queue_fb(struct vop_win *vop_win,
+ struct drm_framebuffer *fb, dma_addr_t yrgb_mst,
+ struct drm_pending_vblank_event *event)
+{
+ struct vop_win_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->fb = fb;
+ state->yrgb_mst = yrgb_mst;
+ state->event = event;
+
+ list_add_tail(&state->head, &vop_win->pending);
+
+ return 0;
+}
+
+static int vop_update_plane_event(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h,
+ struct drm_pending_vblank_event *event)
+{
+ struct vop_win *vop_win = to_vop_win(plane);
+ const struct vop_win_data *win = vop_win->data;
+ struct vop *vop = to_vop(crtc);
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rk_obj;
+ unsigned long offset;
+ unsigned int actual_w;
+ unsigned int actual_h;
+ unsigned int dsp_stx;
+ unsigned int dsp_sty;
+ unsigned int y_vir_stride;
+ dma_addr_t yrgb_mst;
+ enum vop_data_format format;
+ uint32_t val;
+ bool is_alpha;
+ bool visible;
+ int ret;
+ struct drm_rect dest = {
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
+ bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ can_position, false, &visible);
+ if (ret)
+ return ret;
+
+ if (!visible)
+ return 0;
+
+ is_alpha = is_alpha_support(fb->pixel_format);
+ format = vop_convert_format(fb->pixel_format);
+ if (format < 0)
+ return format;
+
+ obj = rockchip_fb_get_gem_obj(fb, 0);
+ if (!obj) {
+ DRM_ERROR("fail to get rockchip gem object from framebuffer\n");
+ return -EINVAL;
+ }
+
+ rk_obj = to_rockchip_obj(obj);
+
+ actual_w = (src.x2 - src.x1) >> 16;
+ actual_h = (src.y2 - src.y1) >> 16;
+ crtc_x = max(0, crtc_x);
+ crtc_y = max(0, crtc_y);
+
+ dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
+ dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+
+ offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+ offset += (src.y1 >> 16) * fb->pitches[0];
+ yrgb_mst = rk_obj->dma_addr + offset;
+
+ y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+
+ /*
+ * If this plane update changes the plane's framebuffer, (or more
+ * precisely, if this update has a different framebuffer than the last
+ * update), enqueue it so we can track when it completes.
+ *
+ * Only when we discover that this update has completed, can we
+ * unreference any previous framebuffers.
+ */
+ mutex_lock(&vop->vsync_mutex);
+ if (fb != vop_win_last_pending_fb(vop_win)) {
+ ret = drm_vblank_get(plane->dev, vop->pipe);
+ if (ret) {
+ DRM_ERROR("failed to get vblank, %d\n", ret);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ drm_framebuffer_reference(fb);
+
+ ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event);
+ if (ret) {
+ drm_vblank_put(plane->dev, vop->pipe);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ vop->vsync_work_pending = true;
+ }
+ mutex_unlock(&vop->vsync_mutex);
+
+ spin_lock(&vop->reg_lock);
+
+ VOP_WIN_SET(vop, win, format, format);
+ VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
+ VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+ val = (actual_h - 1) << 16;
+ val |= (actual_w - 1) & 0xffff;
+ VOP_WIN_SET(vop, win, act_info, val);
+ VOP_WIN_SET(vop, win, dsp_info, val);
+ val = (dsp_sty - 1) << 16;
+ val |= (dsp_stx - 1) & 0xffff;
+ VOP_WIN_SET(vop, win, dsp_st, val);
+
+ if (is_alpha) {
+ VOP_WIN_SET(vop, win, dst_alpha_ctl,
+ DST_FACTOR_M0(ALPHA_SRC_INVERSE));
+ val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
+ SRC_ALPHA_M0(ALPHA_STRAIGHT) |
+ SRC_BLEND_M0(ALPHA_PER_PIX) |
+ SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
+ SRC_FACTOR_M0(ALPHA_ONE);
+ VOP_WIN_SET(vop, win, src_alpha_ctl, val);
+ } else {
+ VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
+ }
+
+ VOP_WIN_SET(vop, win, enable, 1);
+
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ return 0;
+}
+
+static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
+{
+ return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w,
+ crtc_h, src_x, src_y, src_w, src_h,
+ NULL);
+}
+
+static int vop_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *event)
+{
+ unsigned int crtc_w, crtc_h;
+
+ crtc_w = crtc->primary->fb->width - crtc->x;
+ crtc_h = crtc->primary->fb->height - crtc->y;
+
+ return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, crtc_w, crtc_h, crtc->x << 16,
+ crtc->y << 16, crtc_w << 16,
+ crtc_h << 16, event);
+}
+
+static int vop_disable_plane(struct drm_plane *plane)
+{
+ struct vop_win *vop_win = to_vop_win(plane);
+ const struct vop_win_data *win = vop_win->data;
+ struct vop *vop;
+ int ret;
+
+ if (!plane->crtc)
+ return 0;
+
+ vop = to_vop(plane->crtc);
+
+ ret = drm_vblank_get(plane->dev, vop->pipe);
+ if (ret) {
+ DRM_ERROR("failed to get vblank, %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&vop->vsync_mutex);
+
+ ret = vop_win_queue_fb(vop_win, NULL, 0, NULL);
+ if (ret) {
+ drm_vblank_put(plane->dev, vop->pipe);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ vop->vsync_work_pending = true;
+ mutex_unlock(&vop->vsync_mutex);
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ return 0;
+}
+
+static void vop_plane_destroy(struct drm_plane *plane)
+{
+ vop_disable_plane(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs vop_plane_funcs = {
+ .update_plane = vop_update_plane,
+ .disable_plane = vop_disable_plane,
+ .destroy = vop_plane_destroy,
+};
+
+int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
+ int connector_type,
+ int out_mode)
+{
+ struct vop *vop = to_vop(crtc);
+
+ vop->connector_type = connector_type;
+ vop->connector_out_mode = out_mode;
+
+ return 0;
+}
+
+static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ unsigned long flags;
+
+ if (vop->dpms != DRM_MODE_DPMS_ON)
+ return -EPERM;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1));
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+ return 0;
+}
+
+static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ unsigned long flags;
+
+ if (vop->dpms != DRM_MODE_DPMS_ON)
+ return;
+ spin_lock_irqsave(&vop->irq_lock, flags);
+ vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static const struct rockchip_crtc_funcs private_crtc_funcs = {
+ .enable_vblank = vop_crtc_enable_vblank,
+ .disable_vblank = vop_crtc_disable_vblank,
+};
+
+static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct vop *vop = to_vop(crtc);
+
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ if (vop->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ vop_enable(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ vop_disable(crtc);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+
+ vop->dpms = mode;
+}
+
+static void vop_crtc_prepare(struct drm_crtc *crtc)
+{
+ vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
+ return false;
+
+ return true;
+}
+
+static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ int ret;
+
+ crtc->x = x;
+ crtc->y = y;
+
+ ret = vop_update_primary_plane(crtc, NULL);
+ if (ret < 0) {
+ DRM_ERROR("fail to update plane\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vop_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct vop *vop = to_vop(crtc);
+ u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+ u16 hdisplay = adjusted_mode->hdisplay;
+ u16 htotal = adjusted_mode->htotal;
+ u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
+ u16 hact_end = hact_st + hdisplay;
+ u16 vdisplay = adjusted_mode->vdisplay;
+ u16 vtotal = adjusted_mode->vtotal;
+ u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+ u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+ u16 vact_end = vact_st + vdisplay;
+ int ret;
+ uint32_t val;
+
+ /*
+ * disable dclk to stop frame scan, so that we can safe config mode and
+ * enable iommu.
+ */
+ clk_disable(vop->dclk);
+
+ switch (vop->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ VOP_CTRL_SET(vop, rgb_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ VOP_CTRL_SET(vop, edp_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_CTRL_SET(vop, hdmi_en, 1);
+ break;
+ default:
+ DRM_ERROR("unsupport connector_type[%d]\n",
+ vop->connector_type);
+ return -EINVAL;
+ };
+ VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
+
+ val = 0x8;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0;
+ VOP_CTRL_SET(vop, pin_pol, val);
+
+ VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
+ val = hact_st << 16;
+ val |= hact_end;
+ VOP_CTRL_SET(vop, hact_st_end, val);
+ VOP_CTRL_SET(vop, hpost_st_end, val);
+
+ VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
+ val = vact_st << 16;
+ val |= vact_end;
+ VOP_CTRL_SET(vop, vact_st_end, val);
+ VOP_CTRL_SET(vop, vpost_st_end, val);
+
+ ret = vop_crtc_mode_set_base(crtc, x, y, fb);
+ if (ret)
+ return ret;
+
+ /*
+ * reset dclk, take all mode config affect, so the clk would run in
+ * correct frame.
+ */
+ reset_control_assert(vop->dclk_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(vop->dclk_rst);
+
+ clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
+ ret = clk_enable(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vop_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
+ .dpms = vop_crtc_dpms,
+ .prepare = vop_crtc_prepare,
+ .mode_fixup = vop_crtc_mode_fixup,
+ .mode_set = vop_crtc_mode_set,
+ .mode_set_base = vop_crtc_mode_set_base,
+ .commit = vop_crtc_commit,
+};
+
+static int vop_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct vop *vop = to_vop(crtc);
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ int ret;
+
+ /* when the page flip is requested, crtc's dpms should be on */
+ if (vop->dpms > DRM_MODE_DPMS_ON) {
+ DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms);
+ return 0;
+ }
+
+ crtc->primary->fb = fb;
+
+ ret = vop_update_primary_plane(crtc, event);
+ if (ret)
+ crtc->primary->fb = old_fb;
+
+ return ret;
+}
+
+static void vop_win_state_complete(struct vop_win *vop_win,
+ struct vop_win_state *state)
+{
+ struct vop *vop = vop_win->vop;
+ struct drm_crtc *crtc = &vop->crtc;
+ struct drm_device *drm = crtc->dev;
+ unsigned long flags;
+
+ if (state->event) {
+ spin_lock_irqsave(&drm->event_lock, flags);
+ drm_send_vblank_event(drm, -1, state->event);
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+
+ list_del(&state->head);
+ drm_vblank_put(crtc->dev, vop->pipe);
+}
+
+static void vop_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+}
+
+static const struct drm_crtc_funcs vop_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = vop_crtc_page_flip,
+ .destroy = vop_crtc_destroy,
+};
+
+static bool vop_win_state_is_active(struct vop_win *vop_win,
+ struct vop_win_state *state)
+{
+ bool active = false;
+
+ if (state->fb) {
+ dma_addr_t yrgb_mst;
+
+ /* check yrgb_mst to tell if pending_fb is now front */
+ yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
+
+ active = (yrgb_mst == state->yrgb_mst);
+ } else {
+ bool enabled;
+
+ /* if enable bit is clear, plane is now disabled */
+ enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable);
+
+ active = (enabled == 0);
+ }
+
+ return active;
+}
+
+static void vop_win_state_destroy(struct vop_win_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
+ kfree(state);
+}
+
+static void vop_win_update_state(struct vop_win *vop_win)
+{
+ struct vop_win_state *state, *n, *new_active = NULL;
+
+ /* Check if any pending states are now active */
+ list_for_each_entry(state, &vop_win->pending, head)
+ if (vop_win_state_is_active(vop_win, state)) {
+ new_active = state;
+ break;
+ }
+
+ if (!new_active)
+ return;
+
+ /*
+ * Destroy any 'skipped' pending states - states that were queued
+ * before the newly active state.
+ */
+ list_for_each_entry_safe(state, n, &vop_win->pending, head) {
+ if (state == new_active)
+ break;
+ vop_win_state_complete(vop_win, state);
+ vop_win_state_destroy(state);
+ }
+
+ vop_win_state_complete(vop_win, new_active);
+
+ if (vop_win->active)
+ vop_win_state_destroy(vop_win->active);
+ vop_win->active = new_active;
+}
+
+static bool vop_win_has_pending_state(struct vop_win *vop_win)
+{
+ return !list_empty(&vop_win->pending);
+}
+
+static irqreturn_t vop_isr_thread(int irq, void *data)
+{
+ struct vop *vop = data;
+ const struct vop_data *vop_data = vop->data;
+ unsigned int i;
+
+ mutex_lock(&vop->vsync_mutex);
+
+ if (!vop->vsync_work_pending)
+ goto done;
+
+ vop->vsync_work_pending = false;
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+
+ vop_win_update_state(vop_win);
+ if (vop_win_has_pending_state(vop_win))
+ vop->vsync_work_pending = true;
+ }
+
+done:
+ mutex_unlock(&vop->vsync_mutex);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vop_isr(int irq, void *data)
+{
+ struct vop *vop = data;
+ uint32_t intr0_reg, active_irqs;
+ unsigned long flags;
+
+ /*
+ * INTR_CTRL0 register has interrupt status, enable and clear bits, we
+ * must hold irq_lock to avoid a race with enable/disable_vblank().
+ */
+ spin_lock_irqsave(&vop->irq_lock, flags);
+ intr0_reg = vop_readl(vop, INTR_CTRL0);
+ active_irqs = intr0_reg & INTR_MASK;
+ /* Clear all active interrupt sources */
+ if (active_irqs)
+ vop_writel(vop, INTR_CTRL0,
+ intr0_reg | (active_irqs << INTR_CLR_SHIFT));
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+ /* This is expected for vop iommu irqs, since the irq is shared */
+ if (!active_irqs)
+ return IRQ_NONE;
+
+ /* Only Frame Start Interrupt is enabled; other irqs are spurious. */
+ if (!(active_irqs & FS_INTR)) {
+ DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
+ return IRQ_NONE;
+ }
+
+ drm_handle_vblank(vop->drm_dev, vop->pipe);
+
+ return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static int vop_create_crtc(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ struct device *dev = vop->dev;
+ struct drm_device *drm_dev = vop->drm_dev;
+ struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+ struct drm_crtc *crtc = &vop->crtc;
+ struct device_node *port;
+ int ret;
+ int i;
+
+ /*
+ * Create drm_plane for primary and cursor planes first, since we need
+ * to pass them to drm_crtc_init_with_planes, which sets the
+ * "possible_crtcs" to the newly initialized crtc.
+ */
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = vop_win->data;
+
+ if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
+ win_data->type != DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
+ 0, &vop_plane_funcs,
+ win_data->phy->data_formats,
+ win_data->phy->nformats,
+ win_data->type);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_cleanup_planes;
+ }
+
+ plane = &vop_win->base;
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ primary = plane;
+ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ cursor = plane;
+ }
+
+ ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+ &vop_crtc_funcs);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+
+ /*
+ * Create drm_planes for overlay windows with possible_crtcs restricted
+ * to the newly created crtc.
+ */
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = vop_win->data;
+ unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+
+ if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
+ continue;
+
+ ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
+ possible_crtcs,
+ &vop_plane_funcs,
+ win_data->phy->data_formats,
+ win_data->phy->nformats,
+ win_data->type);
+ if (ret) {
+ DRM_ERROR("failed to initialize overlay plane\n");
+ goto err_cleanup_crtc;
+ }
+ }
+
+ port = of_get_child_by_name(dev->of_node, "port");
+ if (!port) {
+ DRM_ERROR("no port node found in %s\n",
+ dev->of_node->full_name);
+ goto err_cleanup_crtc;
+ }
+
+ crtc->port = port;
+ vop->pipe = drm_crtc_index(crtc);
+ rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
+
+ return 0;
+
+err_cleanup_crtc:
+ drm_crtc_cleanup(crtc);
+err_cleanup_planes:
+ list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+ drm_plane_cleanup(plane);
+ return ret;
+}
+
+static void vop_destroy_crtc(struct vop *vop)
+{
+ struct drm_crtc *crtc = &vop->crtc;
+
+ rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe);
+ of_node_put(crtc->port);
+ drm_crtc_cleanup(crtc);
+}
+
+static int vop_initial(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ const struct vop_reg_data *init_table = vop_data->init_table;
+ struct reset_control *ahb_rst;
+ int i, ret;
+
+ vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
+ if (IS_ERR(vop->hclk)) {
+ dev_err(vop->dev, "failed to get hclk source\n");
+ return PTR_ERR(vop->hclk);
+ }
+ vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
+ if (IS_ERR(vop->aclk)) {
+ dev_err(vop->dev, "failed to get aclk source\n");
+ return PTR_ERR(vop->aclk);
+ }
+ vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
+ if (IS_ERR(vop->dclk)) {
+ dev_err(vop->dev, "failed to get dclk source\n");
+ return PTR_ERR(vop->dclk);
+ }
+
+ ret = clk_prepare(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare hclk\n");
+ return ret;
+ }
+
+ ret = clk_prepare(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare dclk\n");
+ goto err_unprepare_hclk;
+ }
+
+ ret = clk_prepare(vop->aclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare aclk\n");
+ goto err_unprepare_dclk;
+ }
+
+ /*
+ * enable hclk, so that we can config vop register.
+ */
+ ret = clk_enable(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare aclk\n");
+ goto err_unprepare_aclk;
+ }
+ /*
+ * do hclk_reset, reset all vop registers.
+ */
+ ahb_rst = devm_reset_control_get(vop->dev, "ahb");
+ if (IS_ERR(ahb_rst)) {
+ dev_err(vop->dev, "failed to get ahb reset\n");
+ ret = PTR_ERR(ahb_rst);
+ goto err_disable_hclk;
+ }
+ reset_control_assert(ahb_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(ahb_rst);
+
+ memcpy(vop->regsbak, vop->regs, vop->len);
+
+ for (i = 0; i < vop_data->table_size; i++)
+ vop_writel(vop, init_table[i].offset, init_table[i].value);
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ const struct vop_win_data *win = &vop_data->win[i];
+
+ VOP_WIN_SET(vop, win, enable, 0);
+ }
+
+ vop_cfg_done(vop);
+
+ /*
+ * do dclk_reset, let all config take affect.
+ */
+ vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
+ if (IS_ERR(vop->dclk_rst)) {
+ dev_err(vop->dev, "failed to get dclk reset\n");
+ ret = PTR_ERR(vop->dclk_rst);
+ goto err_unprepare_aclk;
+ }
+ reset_control_assert(vop->dclk_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(vop->dclk_rst);
+
+ clk_disable(vop->hclk);
+
+ vop->dpms = DRM_MODE_DPMS_OFF;
+
+ return 0;
+
+err_disable_hclk:
+ clk_disable(vop->hclk);
+err_unprepare_aclk:
+ clk_unprepare(vop->aclk);
+err_unprepare_dclk:
+ clk_unprepare(vop->dclk);
+err_unprepare_hclk:
+ clk_unprepare(vop->hclk);
+ return ret;
+}
+
+/*
+ * Initialize the vop->win array elements.
+ */
+static void vop_win_init(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ unsigned int i;
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = &vop_data->win[i];
+
+ vop_win->data = win_data;
+ vop_win->vop = vop;
+ INIT_LIST_HEAD(&vop_win->pending);
+ }
+}
+
+static int vop_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct of_device_id *of_id;
+ const struct vop_data *vop_data;
+ struct drm_device *drm_dev = data;
+ struct vop *vop;
+ struct resource *res;
+ size_t alloc_size;
+ int ret;
+
+ of_id = of_match_device(vop_driver_dt_match, dev);
+ vop_data = of_id->data;
+ if (!vop_data)
+ return -ENODEV;
+
+ /* Allocate vop struct and its vop_win array */
+ alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
+ vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!vop)
+ return -ENOMEM;
+
+ vop->dev = dev;
+ vop->data = vop_data;
+ vop->drm_dev = drm_dev;
+ dev_set_drvdata(dev, vop);
+
+ vop_win_init(vop);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vop->len = resource_size(res);
+ vop->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vop->regs))
+ return PTR_ERR(vop->regs);
+
+ vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
+ if (!vop->regsbak)
+ return -ENOMEM;
+
+ ret = vop_initial(vop);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
+ return ret;
+ }
+
+ vop->irq = platform_get_irq(pdev, 0);
+ if (vop->irq < 0) {
+ dev_err(dev, "cannot find irq for vop\n");
+ return vop->irq;
+ }
+
+ spin_lock_init(&vop->reg_lock);
+ spin_lock_init(&vop->irq_lock);
+
+ mutex_init(&vop->vsync_mutex);
+
+ ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread,
+ IRQF_SHARED, dev_name(dev), vop);
+ if (ret)
+ return ret;
+
+ /* IRQ is initially disabled; it gets enabled in power_on */
+ disable_irq(vop->irq);
+
+ ret = vop_create_crtc(vop);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ return 0;
+}
+
+static void vop_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct vop *vop = dev_get_drvdata(dev);
+
+ pm_runtime_disable(dev);
+ vop_destroy_crtc(vop);
+}
+
+static const struct component_ops vop_component_ops = {
+ .bind = vop_bind,
+ .unbind = vop_unbind,
+};
+
+static int vop_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (!dev->of_node) {
+ dev_err(dev, "can't find vop devices\n");
+ return -ENODEV;
+ }
+
+ return component_add(dev, &vop_component_ops);
+}
+
+static int vop_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vop_component_ops);
+
+ return 0;
+}
+
+struct platform_driver vop_platform_driver = {
+ .probe = vop_probe,
+ .remove = vop_remove,
+ .driver = {
+ .name = "rockchip-vop",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(vop_driver_dt_match),
+ },
+};
+
+module_platform_driver(vop_platform_driver);
+
+MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
new file mode 100644
index 000000000000..63e9b3a084c5
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_VOP_H
+#define _ROCKCHIP_DRM_VOP_H
+
+/* register definition */
+#define REG_CFG_DONE 0x0000
+#define VERSION_INFO 0x0004
+#define SYS_CTRL 0x0008
+#define SYS_CTRL1 0x000c
+#define DSP_CTRL0 0x0010
+#define DSP_CTRL1 0x0014
+#define DSP_BG 0x0018
+#define MCU_CTRL 0x001c
+#define INTR_CTRL0 0x0020
+#define INTR_CTRL1 0x0024
+#define WIN0_CTRL0 0x0030
+#define WIN0_CTRL1 0x0034
+#define WIN0_COLOR_KEY 0x0038
+#define WIN0_VIR 0x003c
+#define WIN0_YRGB_MST 0x0040
+#define WIN0_CBR_MST 0x0044
+#define WIN0_ACT_INFO 0x0048
+#define WIN0_DSP_INFO 0x004c
+#define WIN0_DSP_ST 0x0050
+#define WIN0_SCL_FACTOR_YRGB 0x0054
+#define WIN0_SCL_FACTOR_CBR 0x0058
+#define WIN0_SCL_OFFSET 0x005c
+#define WIN0_SRC_ALPHA_CTRL 0x0060
+#define WIN0_DST_ALPHA_CTRL 0x0064
+#define WIN0_FADING_CTRL 0x0068
+/* win1 register */
+#define WIN1_CTRL0 0x0070
+#define WIN1_CTRL1 0x0074
+#define WIN1_COLOR_KEY 0x0078
+#define WIN1_VIR 0x007c
+#define WIN1_YRGB_MST 0x0080
+#define WIN1_CBR_MST 0x0084
+#define WIN1_ACT_INFO 0x0088
+#define WIN1_DSP_INFO 0x008c
+#define WIN1_DSP_ST 0x0090
+#define WIN1_SCL_FACTOR_YRGB 0x0094
+#define WIN1_SCL_FACTOR_CBR 0x0098
+#define WIN1_SCL_OFFSET 0x009c
+#define WIN1_SRC_ALPHA_CTRL 0x00a0
+#define WIN1_DST_ALPHA_CTRL 0x00a4
+#define WIN1_FADING_CTRL 0x00a8
+/* win2 register */
+#define WIN2_CTRL0 0x00b0
+#define WIN2_CTRL1 0x00b4
+#define WIN2_VIR0_1 0x00b8
+#define WIN2_VIR2_3 0x00bc
+#define WIN2_MST0 0x00c0
+#define WIN2_DSP_INFO0 0x00c4
+#define WIN2_DSP_ST0 0x00c8
+#define WIN2_COLOR_KEY 0x00cc
+#define WIN2_MST1 0x00d0
+#define WIN2_DSP_INFO1 0x00d4
+#define WIN2_DSP_ST1 0x00d8
+#define WIN2_SRC_ALPHA_CTRL 0x00dc
+#define WIN2_MST2 0x00e0
+#define WIN2_DSP_INFO2 0x00e4
+#define WIN2_DSP_ST2 0x00e8
+#define WIN2_DST_ALPHA_CTRL 0x00ec
+#define WIN2_MST3 0x00f0
+#define WIN2_DSP_INFO3 0x00f4
+#define WIN2_DSP_ST3 0x00f8
+#define WIN2_FADING_CTRL 0x00fc
+/* win3 register */
+#define WIN3_CTRL0 0x0100
+#define WIN3_CTRL1 0x0104
+#define WIN3_VIR0_1 0x0108
+#define WIN3_VIR2_3 0x010c
+#define WIN3_MST0 0x0110
+#define WIN3_DSP_INFO0 0x0114
+#define WIN3_DSP_ST0 0x0118
+#define WIN3_COLOR_KEY 0x011c
+#define WIN3_MST1 0x0120
+#define WIN3_DSP_INFO1 0x0124
+#define WIN3_DSP_ST1 0x0128
+#define WIN3_SRC_ALPHA_CTRL 0x012c
+#define WIN3_MST2 0x0130
+#define WIN3_DSP_INFO2 0x0134
+#define WIN3_DSP_ST2 0x0138
+#define WIN3_DST_ALPHA_CTRL 0x013c
+#define WIN3_MST3 0x0140
+#define WIN3_DSP_INFO3 0x0144
+#define WIN3_DSP_ST3 0x0148
+#define WIN3_FADING_CTRL 0x014c
+/* hwc register */
+#define HWC_CTRL0 0x0150
+#define HWC_CTRL1 0x0154
+#define HWC_MST 0x0158
+#define HWC_DSP_ST 0x015c
+#define HWC_SRC_ALPHA_CTRL 0x0160
+#define HWC_DST_ALPHA_CTRL 0x0164
+#define HWC_FADING_CTRL 0x0168
+/* post process register */
+#define POST_DSP_HACT_INFO 0x0170
+#define POST_DSP_VACT_INFO 0x0174
+#define POST_SCL_FACTOR_YRGB 0x0178
+#define POST_SCL_CTRL 0x0180
+#define POST_DSP_VACT_INFO_F1 0x0184
+#define DSP_HTOTAL_HS_END 0x0188
+#define DSP_HACT_ST_END 0x018c
+#define DSP_VTOTAL_VS_END 0x0190
+#define DSP_VACT_ST_END 0x0194
+#define DSP_VS_ST_END_F1 0x0198
+#define DSP_VACT_ST_END_F1 0x019c
+/* register definition end */
+
+/* interrupt define */
+#define DSP_HOLD_VALID_INTR (1 << 0)
+#define FS_INTR (1 << 1)
+#define LINE_FLAG_INTR (1 << 2)
+#define BUS_ERROR_INTR (1 << 3)
+
+#define INTR_MASK (DSP_HOLD_VALID_INTR | FS_INTR | \
+ LINE_FLAG_INTR | BUS_ERROR_INTR)
+
+#define DSP_HOLD_VALID_INTR_EN(x) ((x) << 4)
+#define FS_INTR_EN(x) ((x) << 5)
+#define LINE_FLAG_INTR_EN(x) ((x) << 6)
+#define BUS_ERROR_INTR_EN(x) ((x) << 7)
+#define DSP_HOLD_VALID_INTR_MASK (1 << 4)
+#define FS_INTR_MASK (1 << 5)
+#define LINE_FLAG_INTR_MASK (1 << 6)
+#define BUS_ERROR_INTR_MASK (1 << 7)
+
+#define INTR_CLR_SHIFT 8
+#define DSP_HOLD_VALID_INTR_CLR (1 << (INTR_CLR_SHIFT + 0))
+#define FS_INTR_CLR (1 << (INTR_CLR_SHIFT + 1))
+#define LINE_FLAG_INTR_CLR (1 << (INTR_CLR_SHIFT + 2))
+#define BUS_ERROR_INTR_CLR (1 << (INTR_CLR_SHIFT + 3))
+
+#define DSP_LINE_NUM(x) (((x) & 0x1fff) << 12)
+#define DSP_LINE_NUM_MASK (0x1fff << 12)
+
+/* src alpha ctrl define */
+#define SRC_FADING_VALUE(x) (((x) & 0xff) << 24)
+#define SRC_GLOBAL_ALPHA(x) (((x) & 0xff) << 16)
+#define SRC_FACTOR_M0(x) (((x) & 0x7) << 6)
+#define SRC_ALPHA_CAL_M0(x) (((x) & 0x1) << 5)
+#define SRC_BLEND_M0(x) (((x) & 0x3) << 3)
+#define SRC_ALPHA_M0(x) (((x) & 0x1) << 2)
+#define SRC_COLOR_M0(x) (((x) & 0x1) << 1)
+#define SRC_ALPHA_EN(x) (((x) & 0x1) << 0)
+/* dst alpha ctrl define */
+#define DST_FACTOR_M0(x) (((x) & 0x7) << 6)
+
+/*
+ * display output interface supported by rockchip lcdc
+ */
+#define ROCKCHIP_OUT_MODE_P888 0
+#define ROCKCHIP_OUT_MODE_P666 1
+#define ROCKCHIP_OUT_MODE_P565 2
+/* for use special outface */
+#define ROCKCHIP_OUT_MODE_AAAA 15
+
+enum alpha_mode {
+ ALPHA_STRAIGHT,
+ ALPHA_INVERSE,
+};
+
+enum global_blend_mode {
+ ALPHA_GLOBAL,
+ ALPHA_PER_PIX,
+ ALPHA_PER_PIX_GLOBAL,
+};
+
+enum alpha_cal_mode {
+ ALPHA_SATURATION,
+ ALPHA_NO_SATURATION,
+};
+
+enum color_mode {
+ ALPHA_SRC_PRE_MUL,
+ ALPHA_SRC_NO_PRE_MUL,
+};
+
+enum factor_mode {
+ ALPHA_ZERO,
+ ALPHA_ONE,
+ ALPHA_SRC,
+ ALPHA_SRC_INVERSE,
+ ALPHA_SRC_GLOBAL,
+};
+
+#endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0ddce4d046d9..859ccb658601 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include <video/sh_mobile_meram.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index e62cbde81e50..666321de7b99 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -348,7 +348,6 @@ static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
.remove = shmob_drm_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "shmob-drm",
.pm = &shmob_drm_pm_ops,
},
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index ae8850f3e63b..d6d6b705b8c1 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -5,6 +5,7 @@ config DRM_STI
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select FW_LOADER_USER_HELPER_FALLBACK
help
Choose this option to enable DRM on STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index 04ac2ceef27f..6ba9d27c1b90 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -3,6 +3,7 @@ sticompositor-y := \
sti_mixer.o \
sti_gdp.o \
sti_vid.o \
+ sti_cursor.o \
sti_compositor.o \
sti_drm_crtc.o \
sti_drm_plane.o
@@ -18,4 +19,5 @@ obj-$(CONFIG_DRM_STI) = \
sti_hda.o \
sti_tvout.o \
sticompositor.o \
- sti_drm_drv.o \ No newline at end of file
+ sti_hqvdp.o \
+ sti_drm_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 390d93e9a06c..43215d3020fb 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -24,14 +24,16 @@
* stiH407 compositor properties
*/
struct sti_compositor_data stih407_compositor_data = {
- .nb_subdev = 6,
+ .nb_subdev = 8,
.subdev_desc = {
+ {STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000},
{STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
{STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
- {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
+ {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
},
};
@@ -67,11 +69,11 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
break;
case STI_GPD_SUBDEV:
case STI_VID_SUBDEV:
+ case STI_CURSOR_SUBDEV:
compo->layer[layer_id++] =
sti_layer_create(compo->dev, desc[i].id,
compo->regs + desc[i].offset);
break;
- /* case STI_CURSOR_SUBDEV : TODO */
default:
DRM_ERROR("Unknow subdev compoment type\n");
return 1;
@@ -102,33 +104,35 @@ static int sti_compositor_bind(struct device *dev, struct device *master,
enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
- if (compo->mixer[crtc])
+ if (crtc < compo->nb_mixers)
plane_type = DRM_PLANE_TYPE_PRIMARY;
switch (type) {
case STI_CUR:
cursor = sti_drm_plane_init(drm_dev,
compo->layer[i],
- (1 << crtc) - 1,
- DRM_PLANE_TYPE_CURSOR);
+ 1, DRM_PLANE_TYPE_CURSOR);
break;
case STI_GDP:
case STI_VID:
primary = sti_drm_plane_init(drm_dev,
compo->layer[i],
- (1 << crtc) - 1, plane_type);
+ (1 << compo->nb_mixers) - 1,
+ plane_type);
plane++;
break;
case STI_BCK:
+ case STI_VDP:
break;
}
/* The first planes are reserved for primary planes*/
- if (compo->mixer[crtc]) {
+ if (crtc < compo->nb_mixers && primary) {
sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
primary, cursor);
crtc++;
cursor = NULL;
+ primary = NULL;
}
}
}
@@ -267,7 +271,6 @@ static int sti_compositor_remove(struct platform_device *pdev)
static struct platform_driver sti_compositor_driver = {
.driver = {
.name = "sti-compositor",
- .owner = THIS_MODULE,
.of_match_table = compositor_of_match,
},
.probe = sti_compositor_probe,
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 3ea19db72e0f..019eb44c62cc 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -64,7 +64,6 @@ struct sti_compositor_data {
* @layer: array of layers
* @nb_mixers: number of mixers for this compositor
* @nb_layers: number of layers (GDP,VID,...) for this compositor
- * @enable: true if compositor is enable else false
* @vtg_vblank_nb: callback for VTG VSYNC notification
*/
struct sti_compositor {
@@ -83,7 +82,6 @@ struct sti_compositor {
struct sti_layer *layer[STI_MAX_LAYER];
int nb_mixers;
int nb_layers;
- bool enable;
struct notifier_block vtg_vblank_nb;
};
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
new file mode 100644
index 000000000000..010eaee60bf7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Vincent Abriou <vincent.abriou@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <drm/drmP.h>
+
+#include "sti_cursor.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+/* Registers */
+#define CUR_CTL 0x00
+#define CUR_VPO 0x0C
+#define CUR_PML 0x14
+#define CUR_PMP 0x18
+#define CUR_SIZE 0x1C
+#define CUR_CML 0x20
+#define CUR_AWS 0x28
+#define CUR_AWE 0x2C
+
+#define CUR_CTL_CLUT_UPDATE BIT(1)
+
+#define STI_CURS_MIN_SIZE 1
+#define STI_CURS_MAX_SIZE 128
+
+/*
+ * pixmap dma buffer stucture
+ *
+ * @paddr: physical address
+ * @size: buffer size
+ * @base: virtual address
+ */
+struct dma_pixmap {
+ dma_addr_t paddr;
+ size_t size;
+ void *base;
+};
+
+/**
+ * STI Cursor structure
+ *
+ * @layer: layer structure
+ * @width: cursor width
+ * @height: cursor height
+ * @clut: color look up table
+ * @clut_paddr: color look up table physical address
+ * @pixmap: pixmap dma buffer (clut8-format cursor)
+ */
+struct sti_cursor {
+ struct sti_layer layer;
+ unsigned int width;
+ unsigned int height;
+ unsigned short *clut;
+ dma_addr_t clut_paddr;
+ struct dma_pixmap pixmap;
+};
+
+static const uint32_t cursor_supported_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
+
+static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
+{
+ return cursor_supported_formats;
+}
+
+static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(cursor_supported_formats);
+}
+
+static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ u32 *src = layer->vaddr;
+ u8 *dst = cursor->pixmap.base;
+ unsigned int i, j;
+ u32 a, r, g, b;
+
+ for (i = 0; i < cursor->height; i++) {
+ for (j = 0; j < cursor->width; j++) {
+ /* Pick the 2 higher bits of each component */
+ a = (*src >> 30) & 3;
+ r = (*src >> 22) & 3;
+ g = (*src >> 14) & 3;
+ b = (*src >> 6) & 3;
+ *dst = a << 6 | r << 4 | g << 2 | b;
+ src++;
+ dst++;
+ }
+ }
+}
+
+static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ struct drm_display_mode *mode = layer->mode;
+ u32 y, x;
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ if (layer->src_w < STI_CURS_MIN_SIZE ||
+ layer->src_h < STI_CURS_MIN_SIZE ||
+ layer->src_w > STI_CURS_MAX_SIZE ||
+ layer->src_h > STI_CURS_MAX_SIZE) {
+ DRM_ERROR("Invalid cursor size (%dx%d)\n",
+ layer->src_w, layer->src_h);
+ return -EINVAL;
+ }
+
+ /* If the cursor size has changed, re-allocated the pixmap */
+ if (!cursor->pixmap.base ||
+ (cursor->width != layer->src_w) ||
+ (cursor->height != layer->src_h)) {
+ cursor->width = layer->src_w;
+ cursor->height = layer->src_h;
+
+ if (cursor->pixmap.base)
+ dma_free_writecombine(layer->dev,
+ cursor->pixmap.size,
+ cursor->pixmap.base,
+ cursor->pixmap.paddr);
+
+ cursor->pixmap.size = cursor->width * cursor->height;
+
+ cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
+ cursor->pixmap.size,
+ &cursor->pixmap.paddr,
+ GFP_KERNEL | GFP_DMA);
+ if (!cursor->pixmap.base) {
+ DRM_ERROR("Failed to allocate memory for pixmap\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* Convert ARGB8888 to CLUT8 */
+ sti_cursor_argb8888_to_clut8(layer);
+
+ /* AWS and AWE depend on the mode */
+ y = sti_vtg_get_line_number(*mode, 0);
+ x = sti_vtg_get_pixel_number(*mode, 0);
+ val = y << 16 | x;
+ writel(val, layer->regs + CUR_AWS);
+ y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+ val = y << 16 | x;
+ writel(val, layer->regs + CUR_AWE);
+
+ if (first_prepare) {
+ /* Set and fetch CLUT */
+ writel(cursor->clut_paddr, layer->regs + CUR_CML);
+ writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
+ }
+
+ return 0;
+}
+
+static int sti_cursor_commit_layer(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ struct drm_display_mode *mode = layer->mode;
+ u32 ydo, xdo;
+
+ dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ /* Set memory location, size, and position */
+ writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
+ writel(cursor->width, layer->regs + CUR_PMP);
+ writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
+
+ ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
+ xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
+ writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
+
+ return 0;
+}
+
+static int sti_cursor_disable_layer(struct sti_layer *layer)
+{
+ return 0;
+}
+
+static void sti_cursor_init(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ unsigned short *base = cursor->clut;
+ unsigned int a, r, g, b;
+
+ /* Assign CLUT values, ARGB444 format */
+ for (a = 0; a < 4; a++)
+ for (r = 0; r < 4; r++)
+ for (g = 0; g < 4; g++)
+ for (b = 0; b < 4; b++)
+ *base++ = (a * 5) << 12 |
+ (r * 5) << 8 |
+ (g * 5) << 4 |
+ (b * 5);
+}
+
+static const struct sti_layer_funcs cursor_ops = {
+ .get_formats = sti_cursor_get_formats,
+ .get_nb_formats = sti_cursor_get_nb_formats,
+ .init = sti_cursor_init,
+ .prepare = sti_cursor_prepare_layer,
+ .commit = sti_cursor_commit_layer,
+ .disable = sti_cursor_disable_layer,
+};
+
+struct sti_layer *sti_cursor_create(struct device *dev)
+{
+ struct sti_cursor *cursor;
+
+ cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
+ if (!cursor) {
+ DRM_ERROR("Failed to allocate memory for cursor\n");
+ return NULL;
+ }
+
+ /* Allocate clut buffer */
+ cursor->clut = dma_alloc_writecombine(dev,
+ 0x100 * sizeof(unsigned short),
+ &cursor->clut_paddr,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!cursor->clut) {
+ DRM_ERROR("Failed to allocate memory for cursor clut\n");
+ devm_kfree(dev, cursor);
+ return NULL;
+ }
+
+ cursor->layer.ops = &cursor_ops;
+
+ return (struct sti_layer *)cursor;
+}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
new file mode 100644
index 000000000000..3c9827404f27
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Authors: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_CURSOR_H_
+#define _STI_CURSOR_H_
+
+struct sti_layer *sti_cursor_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index d2ae0c0e13be..4c651c200f20 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -10,6 +10,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
#include "sti_drm_drv.h"
@@ -27,7 +28,7 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- compo->enable = true;
+ mixer->enabled = true;
/* Prepare and enable the compo IP clock */
if (mixer->id == STI_MIXER_MAIN) {
@@ -37,6 +38,8 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
if (clk_prepare_enable(compo->clk_compo_aux))
DRM_INFO("Failed to prepare/enable compo_aux clk\n");
}
+
+ sti_mixer_clear_all_layers(mixer);
}
static void sti_drm_crtc_commit(struct drm_crtc *crtc)
@@ -61,6 +64,8 @@ static void sti_drm_crtc_commit(struct drm_crtc *crtc)
/* Enable layer on mixer */
if (sti_mixer_set_layer_status(mixer, layer, true))
DRM_ERROR("Can not enable layer at mixer\n");
+
+ drm_crtc_vblank_on(crtc);
}
static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -143,7 +148,8 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
w = crtc->primary->fb->width - x;
h = crtc->primary->fb->height - y;
- return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ return sti_layer_prepare(layer, crtc,
+ crtc->primary->fb, &crtc->mode,
mixer->id, 0, 0, w, h, x, y, w, h);
}
@@ -170,7 +176,8 @@ static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
w = crtc->primary->fb->width - crtc->x;
h = crtc->primary->fb->height - crtc->y;
- ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ ret = sti_layer_prepare(layer, crtc,
+ crtc->primary->fb, &crtc->mode,
mixer->id, 0, 0, w, h,
crtc->x, crtc->y, w, h);
if (ret) {
@@ -195,7 +202,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
struct sti_compositor *compo = dev_get_drvdata(dev);
struct sti_layer *layer;
- if (!compo->enable)
+ if (!mixer->enabled)
return;
DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
@@ -221,7 +228,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
/* Then disable layer itself */
sti_layer_disable(layer);
- drm_vblank_off(crtc->dev, mixer->id);
+ drm_crtc_vblank_off(crtc);
/* Disable pixel clock and compo IP clocks */
if (mixer->id == STI_MIXER_MAIN) {
@@ -232,7 +239,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
clk_disable_unprepare(compo->clk_compo_aux);
}
- compo->enable = false;
+ mixer->enabled = false;
}
static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
@@ -363,7 +370,6 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
struct sti_drm_private *priv = dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
- unsigned long flags;
DRM_DEBUG_DRIVER("\n");
@@ -372,13 +378,10 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
/* free the resources of the pending requests */
- spin_lock_irqsave(&dev->event_lock, flags);
if (compo->mixer[crtc]->pending_event) {
drm_vblank_put(dev, crtc);
compo->mixer[crtc]->pending_event = NULL;
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
}
EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
@@ -398,6 +401,7 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
return false;
}
+EXPORT_SYMBOL(sti_drm_crtc_is_main);
int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor)
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
index 223d93c3a05d..5239fa121726 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -67,8 +67,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
sti_drm_mode_config_init(dev);
ret = component_bind_all(dev->dev, dev);
- if (ret)
+ if (ret) {
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ kfree(private);
return ret;
+ }
drm_helper_disable_unused_functions(dev);
@@ -184,7 +188,6 @@ static struct platform_driver sti_drm_master_driver = {
.probe = sti_drm_master_probe,
.remove = sti_drm_master_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME "__master",
},
};
@@ -228,7 +231,6 @@ static struct platform_driver sti_drm_platform_driver = {
.probe = sti_drm_platform_probe,
.remove = sti_drm_platform_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
.of_match_table = sti_drm_dt_ids,
},
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
index f4118d4cac22..bb6a29339e10 100644
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -45,7 +45,8 @@ sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
/* src_x are in 16.16 format. */
- res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id,
+ res = sti_layer_prepare(layer, crtc, fb,
+ &crtc->mode, mixer->id,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x >> 16, src_y >> 16,
src_w >> 16, src_h >> 16);
@@ -193,3 +194,4 @@ struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
return &layer->plane;
}
+EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 4e30b74559f5..32448d1d1e8f 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -73,7 +73,9 @@ struct sti_gdp_node {
struct sti_gdp_node_list {
struct sti_gdp_node *top_field;
+ dma_addr_t top_field_paddr;
struct sti_gdp_node *btm_field;
+ dma_addr_t btm_field_paddr;
};
/**
@@ -81,6 +83,8 @@ struct sti_gdp_node_list {
*
* @layer: layer structure
* @clk_pix: pixel clock for the current gdp
+ * @clk_main_parent: gdp parent clock if main path used
+ * @clk_aux_parent: gdp parent clock if aux path used
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
* @node_list: array of node list
@@ -88,6 +92,8 @@ struct sti_gdp_node_list {
struct sti_gdp {
struct sti_layer layer;
struct clk *clk_pix;
+ struct clk *clk_main_parent;
+ struct clk *clk_aux_parent;
struct notifier_block vtg_field_nb;
bool is_curr_top;
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
@@ -168,7 +174,6 @@ static int sti_gdp_get_alpharange(int format)
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
{
int hw_nvn;
- void *virt_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
@@ -176,11 +181,9 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
if (!hw_nvn)
goto end;
- virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
-
for (i = 0; i < GDP_NODE_NB_BANK; i++)
- if ((virt_nvn != gdp->node_list[i].btm_field) &&
- (virt_nvn != gdp->node_list[i].top_field))
+ if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
+ (hw_nvn != gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
/* in hazardious cases restart with the first node */
@@ -204,7 +207,6 @@ static
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
{
int hw_nvn;
- void *virt_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
@@ -212,11 +214,9 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
if (!hw_nvn)
goto end;
- virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
-
for (i = 0; i < GDP_NODE_NB_BANK; i++)
- if ((virt_nvn == gdp->node_list[i].btm_field) ||
- (virt_nvn == gdp->node_list[i].top_field))
+ if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
+ (hw_nvn == gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
end:
@@ -292,8 +292,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
- top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
- btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);
+ top_field->gam_gdp_nvn = list->btm_field_paddr;
+ btm_field->gam_gdp_nvn = list->top_field_paddr;
/* Interlaced mode */
if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -311,6 +311,17 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
/* Set and enable gdp clock */
if (gdp->clk_pix) {
+ struct clk *clkp;
+ /* According to the mixer used, the gdp pixel clock
+ * should have a different parent clock. */
+ if (layer->mixer_id == STI_MIXER_MAIN)
+ clkp = gdp->clk_main_parent;
+ else
+ clkp = gdp->clk_aux_parent;
+
+ if (clkp)
+ clk_set_parent(gdp->clk_pix, clkp);
+
res = clk_set_rate(gdp->clk_pix, rate);
if (res < 0) {
DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
@@ -349,8 +360,8 @@ static int sti_gdp_commit_layer(struct sti_layer *layer)
struct sti_gdp_node *updated_top_node = updated_list->top_field;
struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
struct sti_gdp *gdp = to_sti_gdp(layer);
- u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node);
- u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node);
+ u32 dma_updated_top = updated_list->top_field_paddr;
+ u32 dma_updated_btm = updated_list->btm_field_paddr;
struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
@@ -461,16 +472,16 @@ static void sti_gdp_init(struct sti_layer *layer)
{
struct sti_gdp *gdp = to_sti_gdp(layer);
struct device_node *np = layer->dev->of_node;
- dma_addr_t dma;
+ dma_addr_t dma_addr;
void *base;
unsigned int i, size;
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
-
base = dma_alloc_writecombine(layer->dev,
- size, &dma, GFP_KERNEL | GFP_DMA);
+ size, &dma_addr, GFP_KERNEL | GFP_DMA);
+
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
return;
@@ -478,21 +489,26 @@ static void sti_gdp_init(struct sti_layer *layer)
memset(base, 0, size);
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
- if (virt_to_dma(layer->dev, base) & 0xF) {
+ if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].top_field = base;
+ gdp->node_list[i].top_field_paddr = dma_addr;
+
DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
+ dma_addr += sizeof(struct sti_gdp_node);
- if (virt_to_dma(layer->dev, base) & 0xF) {
+ if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].btm_field = base;
+ gdp->node_list[i].btm_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
+ dma_addr += sizeof(struct sti_gdp_node);
}
if (of_device_is_compatible(np, "st,stih407-compositor")) {
@@ -520,6 +536,14 @@ static void sti_gdp_init(struct sti_layer *layer)
gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
+
+ gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
+ if (IS_ERR(gdp->clk_main_parent))
+ DRM_ERROR("Cannot get main_parent clock\n");
+
+ gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
+ if (IS_ERR(gdp->clk_aux_parent))
+ DRM_ERROR("Cannot get aux_parent clock\n");
}
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index b22968c08d1f..d032e024b0b8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -130,8 +130,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
/* Hot plug/unplug IRQ */
if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
- /* read gpio to get the status */
- hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
if (hdmi->drm_dev)
drm_helper_hpd_irq_event(hdmi->drm_dev);
}
@@ -273,31 +272,32 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
/* Infoframe header */
- val = buffer[0x0];
- val |= buffer[0x1] << 8;
- val |= buffer[0x2] << 16;
+ val = buffer[0];
+ val |= buffer[1] << 8;
+ val |= buffer[2] << 16;
hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
/* Infoframe packet bytes */
- val = frame[0x0];
- val |= frame[0x1] << 8;
- val |= frame[0x2] << 16;
- val |= frame[0x3] << 24;
+ val = buffer[3];
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
- val = frame[0x4];
- val |= frame[0x5] << 8;
- val |= frame[0x6] << 16;
- val |= frame[0x7] << 24;
+ val = *(frame++);
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
- val = frame[0x8];
- val |= frame[0x9] << 8;
- val |= frame[0xA] << 16;
- val |= frame[0xB] << 24;
+ val = *(frame++);
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
- val = frame[0xC];
+ val = *(frame++);
+ val |= *(frame) << 8;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
/* Enable transmission slot for AVI infoframe
@@ -480,17 +480,15 @@ static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
{
- struct i2c_adapter *i2c_adap;
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
struct edid *edid;
int count;
DRM_DEBUG_DRIVER("\n");
- i2c_adap = i2c_get_adapter(1);
- if (!i2c_adap)
- goto fail;
-
- edid = drm_get_edid(connector, i2c_adap);
+ edid = drm_get_edid(connector, hdmi->ddc_adapt);
if (!edid)
goto fail;
@@ -603,29 +601,38 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
- struct i2c_adapter *i2c_adap;
+ struct device_node *ddc;
int err;
- i2c_adap = i2c_get_adapter(1);
- if (!i2c_adap)
- return -EPROBE_DEFER;
+ ddc = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (ddc) {
+ hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
+ if (!hdmi->ddc_adapt) {
+ err = -EPROBE_DEFER;
+ of_node_put(ddc);
+ return err;
+ }
+
+ of_node_put(ddc);
+ }
/* Set the drm device handle */
hdmi->drm_dev = drm_dev;
encoder = sti_hdmi_find_encoder(drm_dev);
if (!encoder)
- return -ENOMEM;
+ goto err_adapt;
connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
if (!connector)
- return -ENOMEM;
+ goto err_adapt;
+
connector->hdmi = hdmi;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return -ENOMEM;
+ goto err_adapt;
bridge->driver_private = hdmi;
drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
@@ -662,6 +669,8 @@ err_sysfs:
err_connector:
drm_bridge_cleanup(bridge);
drm_connector_cleanup(drm_connector);
+err_adapt:
+ put_device(&hdmi->ddc_adapt->dev);
return -EINVAL;
}
@@ -757,13 +766,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->clk_audio);
}
- hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0);
- if (hdmi->hpd_gpio < 0) {
- DRM_ERROR("Failed to get hdmi hpd-gpio\n");
- return -EIO;
- }
-
- hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
init_waitqueue_head(&hdmi->wait_event);
@@ -788,6 +791,11 @@ static int sti_hdmi_probe(struct platform_device *pdev)
static int sti_hdmi_remove(struct platform_device *pdev)
{
+ struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
+
+ if (hdmi->ddc_adapt)
+ put_device(&hdmi->ddc_adapt->dev);
+
component_del(&pdev->dev, &sti_hdmi_ops);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 61bec6557ceb..3d22390e1f3b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -14,6 +14,9 @@
#define HDMI_STA 0x0010
#define HDMI_STA_DLL_LCK BIT(5)
+#define HDMI_STA_HOT_PLUG_SHIFT 4
+#define HDMI_STA_HOT_PLUG (1 << HDMI_STA_HOT_PLUG_SHIFT)
+
struct sti_hdmi;
struct hdmi_phy_ops {
@@ -37,7 +40,6 @@ struct hdmi_phy_ops {
* @irq_status: interrupt status register
* @phy_ops: phy start/stop operations
* @enabled: true if hdmi is enabled else false
- * @hpd_gpio: hdmi hot plug detect gpio number
* @hpd: hot plug detect status
* @wait_event: wait event
* @event_received: wait event status
@@ -57,11 +59,11 @@ struct sti_hdmi {
u32 irq_status;
struct hdmi_phy_ops *phy_ops;
bool enabled;
- int hpd_gpio;
bool hpd;
wait_queue_head_t wait_event;
bool event_received;
struct reset_control *reset;
+ struct i2c_adapter *ddc_adapt;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
new file mode 100644
index 000000000000..f3db05dab0ab
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -0,0 +1,1073 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+
+#include "sti_drm_plane.h"
+#include "sti_hqvdp.h"
+#include "sti_hqvdp_lut.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+/* Firmware name */
+#define HQVDP_FMW_NAME "hqvdp-stih407.bin"
+
+/* Regs address */
+#define HQVDP_DMEM 0x00000000 /* 0x00000000 */
+#define HQVDP_PMEM 0x00040000 /* 0x00040000 */
+#define HQVDP_RD_PLUG 0x000E0000 /* 0x000E0000 */
+#define HQVDP_RD_PLUG_CONTROL (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
+#define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
+#define HQVDP_RD_PLUG_MIN_OPC (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
+#define HQVDP_RD_PLUG_MAX_OPC (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
+#define HQVDP_RD_PLUG_MAX_CHK (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
+#define HQVDP_RD_PLUG_MAX_MSG (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
+#define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
+#define HQVDP_WR_PLUG 0x000E2000 /* 0x000E2000 */
+#define HQVDP_WR_PLUG_CONTROL (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
+#define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
+#define HQVDP_WR_PLUG_MIN_OPC (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
+#define HQVDP_WR_PLUG_MAX_OPC (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
+#define HQVDP_WR_PLUG_MAX_CHK (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
+#define HQVDP_WR_PLUG_MAX_MSG (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
+#define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
+#define HQVDP_MBX 0x000E4000 /* 0x000E4000 */
+#define HQVDP_MBX_IRQ_TO_XP70 (HQVDP_MBX + 0x0000) /* 0x000E4000 */
+#define HQVDP_MBX_INFO_HOST (HQVDP_MBX + 0x0004) /* 0x000E4004 */
+#define HQVDP_MBX_IRQ_TO_HOST (HQVDP_MBX + 0x0008) /* 0x000E4008 */
+#define HQVDP_MBX_INFO_XP70 (HQVDP_MBX + 0x000C) /* 0x000E400C */
+#define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010) /* 0x000E4010 */
+#define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014) /* 0x000E4014 */
+#define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018) /* 0x000E4018 */
+#define HQVDP_MBX_GP_STATUS (HQVDP_MBX + 0x001C) /* 0x000E401C */
+#define HQVDP_MBX_NEXT_CMD (HQVDP_MBX + 0x0020) /* 0x000E4020 */
+#define HQVDP_MBX_CURRENT_CMD (HQVDP_MBX + 0x0024) /* 0x000E4024 */
+#define HQVDP_MBX_SOFT_VSYNC (HQVDP_MBX + 0x0028) /* 0x000E4028 */
+
+/* Plugs config */
+#define PLUG_CONTROL_ENABLE 0x00000001
+#define PLUG_PAGE_SIZE_256 0x00000002
+#define PLUG_MIN_OPC_8 0x00000003
+#define PLUG_MAX_OPC_64 0x00000006
+#define PLUG_MAX_CHK_2X 0x00000001
+#define PLUG_MAX_MSG_1X 0x00000000
+#define PLUG_MIN_SPACE_1 0x00000000
+
+/* SW reset CTRL */
+#define SW_RESET_CTRL_FULL BIT(0)
+#define SW_RESET_CTRL_CORE BIT(1)
+
+/* Startup ctrl 1 */
+#define STARTUP_CTRL1_RST_DONE BIT(0)
+#define STARTUP_CTRL1_AUTH_IDLE BIT(2)
+
+/* Startup ctrl 2 */
+#define STARTUP_CTRL2_FETCH_EN BIT(1)
+
+/* Info xP70 */
+#define INFO_XP70_FW_READY BIT(15)
+#define INFO_XP70_FW_PROCESSING BIT(14)
+#define INFO_XP70_FW_INITQUEUES BIT(13)
+
+/* SOFT_VSYNC */
+#define SOFT_VSYNC_HW 0x00000000
+#define SOFT_VSYNC_SW_CMD 0x00000001
+#define SOFT_VSYNC_SW_CTRL_IRQ 0x00000003
+
+/* Reset & boot poll config */
+#define POLL_MAX_ATTEMPT 50
+#define POLL_DELAY_MS 20
+
+#define SCALE_FACTOR 8192
+#define SCALE_MAX_FOR_LEG_LUT_F 4096
+#define SCALE_MAX_FOR_LEG_LUT_E 4915
+#define SCALE_MAX_FOR_LEG_LUT_D 6654
+#define SCALE_MAX_FOR_LEG_LUT_C 8192
+
+enum sti_hvsrc_orient {
+ HVSRC_HORI,
+ HVSRC_VERT
+};
+
+/* Command structures */
+struct sti_hqvdp_top {
+ u32 config;
+ u32 mem_format;
+ u32 current_luma;
+ u32 current_enh_luma;
+ u32 current_right_luma;
+ u32 current_enh_right_luma;
+ u32 current_chroma;
+ u32 current_enh_chroma;
+ u32 current_right_chroma;
+ u32 current_enh_right_chroma;
+ u32 output_luma;
+ u32 output_chroma;
+ u32 luma_src_pitch;
+ u32 luma_enh_src_pitch;
+ u32 luma_right_src_pitch;
+ u32 luma_enh_right_src_pitch;
+ u32 chroma_src_pitch;
+ u32 chroma_enh_src_pitch;
+ u32 chroma_right_src_pitch;
+ u32 chroma_enh_right_src_pitch;
+ u32 luma_processed_pitch;
+ u32 chroma_processed_pitch;
+ u32 input_frame_size;
+ u32 input_viewport_ori;
+ u32 input_viewport_ori_right;
+ u32 input_viewport_size;
+ u32 left_view_border_width;
+ u32 right_view_border_width;
+ u32 left_view_3d_offset_width;
+ u32 right_view_3d_offset_width;
+ u32 side_stripe_color;
+ u32 crc_reset_ctrl;
+};
+
+/* Configs for interlaced : no IT, no pass thru, 3 fields */
+#define TOP_CONFIG_INTER_BTM 0x00000000
+#define TOP_CONFIG_INTER_TOP 0x00000002
+
+/* Config for progressive : no IT, no pass thru, 3 fields */
+#define TOP_CONFIG_PROGRESSIVE 0x00000001
+
+/* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
+#define TOP_MEM_FORMAT_DFLT 0x00018060
+
+/* Min/Max size */
+#define MAX_WIDTH 0x1FFF
+#define MAX_HEIGHT 0x0FFF
+#define MIN_WIDTH 0x0030
+#define MIN_HEIGHT 0x0010
+
+struct sti_hqvdp_vc1re {
+ u32 ctrl_prv_csdi;
+ u32 ctrl_cur_csdi;
+ u32 ctrl_nxt_csdi;
+ u32 ctrl_cur_fmd;
+ u32 ctrl_nxt_fmd;
+};
+
+struct sti_hqvdp_fmd {
+ u32 config;
+ u32 viewport_ori;
+ u32 viewport_size;
+ u32 next_next_luma;
+ u32 next_next_right_luma;
+ u32 next_next_next_luma;
+ u32 next_next_next_right_luma;
+ u32 threshold_scd;
+ u32 threshold_rfd;
+ u32 threshold_move;
+ u32 threshold_cfd;
+};
+
+struct sti_hqvdp_csdi {
+ u32 config;
+ u32 config2;
+ u32 dcdi_config;
+ u32 prev_luma;
+ u32 prev_enh_luma;
+ u32 prev_right_luma;
+ u32 prev_enh_right_luma;
+ u32 next_luma;
+ u32 next_enh_luma;
+ u32 next_right_luma;
+ u32 next_enh_right_luma;
+ u32 prev_chroma;
+ u32 prev_enh_chroma;
+ u32 prev_right_chroma;
+ u32 prev_enh_right_chroma;
+ u32 next_chroma;
+ u32 next_enh_chroma;
+ u32 next_right_chroma;
+ u32 next_enh_right_chroma;
+ u32 prev_motion;
+ u32 prev_right_motion;
+ u32 cur_motion;
+ u32 cur_right_motion;
+ u32 next_motion;
+ u32 next_right_motion;
+};
+
+/* Config for progressive: by pass */
+#define CSDI_CONFIG_PROG 0x00000000
+/* Config for directional deinterlacing without motion */
+#define CSDI_CONFIG_INTER_DIR 0x00000016
+/* Additional configs for fader, blender, motion,... deinterlace algorithms */
+#define CSDI_CONFIG2_DFLT 0x000001B3
+#define CSDI_DCDI_CONFIG_DFLT 0x00203803
+
+struct sti_hqvdp_hvsrc {
+ u32 hor_panoramic_ctrl;
+ u32 output_picture_size;
+ u32 init_horizontal;
+ u32 init_vertical;
+ u32 param_ctrl;
+ u32 yh_coef[NB_COEF];
+ u32 ch_coef[NB_COEF];
+ u32 yv_coef[NB_COEF];
+ u32 cv_coef[NB_COEF];
+ u32 hori_shift;
+ u32 vert_shift;
+};
+
+/* Default ParamCtrl: all controls enabled */
+#define HVSRC_PARAM_CTRL_DFLT 0xFFFFFFFF
+
+struct sti_hqvdp_iqi {
+ u32 config;
+ u32 demo_wind_size;
+ u32 pk_config;
+ u32 coeff0_coeff1;
+ u32 coeff2_coeff3;
+ u32 coeff4;
+ u32 pk_lut;
+ u32 pk_gain;
+ u32 pk_coring_level;
+ u32 cti_config;
+ u32 le_config;
+ u32 le_lut[64];
+ u32 con_bri;
+ u32 sat_gain;
+ u32 pxf_conf;
+ u32 default_color;
+};
+
+/* Default Config : IQI bypassed */
+#define IQI_CONFIG_DFLT 0x00000001
+/* Default Contrast & Brightness gain = 256 */
+#define IQI_CON_BRI_DFLT 0x00000100
+/* Default Saturation gain = 256 */
+#define IQI_SAT_GAIN_DFLT 0x00000100
+/* Default PxfConf : P2I bypassed */
+#define IQI_PXF_CONF_DFLT 0x00000001
+
+struct sti_hqvdp_top_status {
+ u32 processing_time;
+ u32 input_y_crc;
+ u32 input_uv_crc;
+};
+
+struct sti_hqvdp_fmd_status {
+ u32 fmd_repeat_move_status;
+ u32 fmd_scene_count_status;
+ u32 cfd_sum;
+ u32 field_sum;
+ u32 next_y_fmd_crc;
+ u32 next_next_y_fmd_crc;
+ u32 next_next_next_y_fmd_crc;
+};
+
+struct sti_hqvdp_csdi_status {
+ u32 prev_y_csdi_crc;
+ u32 cur_y_csdi_crc;
+ u32 next_y_csdi_crc;
+ u32 prev_uv_csdi_crc;
+ u32 cur_uv_csdi_crc;
+ u32 next_uv_csdi_crc;
+ u32 y_csdi_crc;
+ u32 uv_csdi_crc;
+ u32 uv_cup_crc;
+ u32 mot_csdi_crc;
+ u32 mot_cur_csdi_crc;
+ u32 mot_prev_csdi_crc;
+};
+
+struct sti_hqvdp_hvsrc_status {
+ u32 y_hvsrc_crc;
+ u32 u_hvsrc_crc;
+ u32 v_hvsrc_crc;
+};
+
+struct sti_hqvdp_iqi_status {
+ u32 pxf_it_status;
+ u32 y_iqi_crc;
+ u32 u_iqi_crc;
+ u32 v_iqi_crc;
+};
+
+/* Main commands. We use 2 commands one being processed by the firmware, one
+ * ready to be fetched upon next Vsync*/
+#define NB_VDP_CMD 2
+
+struct sti_hqvdp_cmd {
+ struct sti_hqvdp_top top;
+ struct sti_hqvdp_vc1re vc1re;
+ struct sti_hqvdp_fmd fmd;
+ struct sti_hqvdp_csdi csdi;
+ struct sti_hqvdp_hvsrc hvsrc;
+ struct sti_hqvdp_iqi iqi;
+ struct sti_hqvdp_top_status top_status;
+ struct sti_hqvdp_fmd_status fmd_status;
+ struct sti_hqvdp_csdi_status csdi_status;
+ struct sti_hqvdp_hvsrc_status hvsrc_status;
+ struct sti_hqvdp_iqi_status iqi_status;
+};
+
+/*
+ * STI HQVDP structure
+ *
+ * @dev: driver device
+ * @drm_dev: the drm device
+ * @regs: registers
+ * @layer: layer structure for hqvdp it self
+ * @vid_plane: VID plug used as link with compositor IP
+ * @clk: IP clock
+ * @clk_pix_main: pix main clock
+ * @reset: reset control
+ * @vtg_nb: notifier to handle VTG Vsync
+ * @btm_field_pending: is there any bottom field (interlaced frame) to display
+ * @curr_field_count: number of field updates
+ * @last_field_count: number of field updates since last fps measure
+ * @hqvdp_cmd: buffer of commands
+ * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
+ * @vtg: vtg for main data path
+ */
+struct sti_hqvdp {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ void __iomem *regs;
+ struct sti_layer layer;
+ struct drm_plane *vid_plane;
+ struct clk *clk;
+ struct clk *clk_pix_main;
+ struct reset_control *reset;
+ struct notifier_block vtg_nb;
+ bool btm_field_pending;
+ unsigned int curr_field_count;
+ unsigned int last_field_count;
+ void *hqvdp_cmd;
+ dma_addr_t hqvdp_cmd_paddr;
+ struct sti_vtg *vtg;
+};
+
+#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
+
+static const uint32_t hqvdp_supported_formats[] = {
+ DRM_FORMAT_NV12,
+};
+
+static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
+{
+ return hqvdp_supported_formats;
+}
+
+static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(hqvdp_supported_formats);
+}
+
+/**
+ * sti_hqvdp_get_free_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
+ *
+ * RETURNS:
+ * the offset of the command to be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
+{
+ int curr_cmd, next_cmd;
+ dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ int i;
+
+ curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+ next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ for (i = 0; i < NB_VDP_CMD; i++) {
+ if ((cmd != curr_cmd) && (cmd != next_cmd))
+ return i * sizeof(struct sti_hqvdp_cmd);
+ cmd += sizeof(struct sti_hqvdp_cmd);
+ }
+
+ return -1;
+}
+
+/**
+ * sti_hqvdp_get_curr_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for the hqvdp_cmd that is being used by the FW.
+ *
+ * RETURNS:
+ * the offset of the command to be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
+{
+ int curr_cmd;
+ dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ unsigned int i;
+
+ curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+
+ for (i = 0; i < NB_VDP_CMD; i++) {
+ if (cmd == curr_cmd)
+ return i * sizeof(struct sti_hqvdp_cmd);
+
+ cmd += sizeof(struct sti_hqvdp_cmd);
+ }
+
+ return -1;
+}
+
+/**
+ * sti_hqvdp_update_hvsrc
+ * @orient: horizontal or vertical
+ * @scale: scaling/zoom factor
+ * @hvsrc: the structure containing the LUT coef
+ *
+ * Update the Y and C Lut coef, as well as the shift param
+ *
+ * RETURNS:
+ * None.
+ */
+static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
+ struct sti_hqvdp_hvsrc *hvsrc)
+{
+ const int *coef_c, *coef_y;
+ int shift_c, shift_y;
+
+ /* Get the appropriate coef tables */
+ if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
+ coef_y = coef_lut_f_y_legacy;
+ coef_c = coef_lut_f_c_legacy;
+ shift_y = SHIFT_LUT_F_Y_LEGACY;
+ shift_c = SHIFT_LUT_F_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
+ coef_y = coef_lut_e_y_legacy;
+ coef_c = coef_lut_e_c_legacy;
+ shift_y = SHIFT_LUT_E_Y_LEGACY;
+ shift_c = SHIFT_LUT_E_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
+ coef_y = coef_lut_d_y_legacy;
+ coef_c = coef_lut_d_c_legacy;
+ shift_y = SHIFT_LUT_D_Y_LEGACY;
+ shift_c = SHIFT_LUT_D_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
+ coef_y = coef_lut_c_y_legacy;
+ coef_c = coef_lut_c_c_legacy;
+ shift_y = SHIFT_LUT_C_Y_LEGACY;
+ shift_c = SHIFT_LUT_C_C_LEGACY;
+ } else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
+ coef_y = coef_c = coef_lut_b;
+ shift_y = shift_c = SHIFT_LUT_B;
+ } else {
+ coef_y = coef_c = coef_lut_a_legacy;
+ shift_y = shift_c = SHIFT_LUT_A_LEGACY;
+ }
+
+ if (orient == HVSRC_HORI) {
+ hvsrc->hori_shift = (shift_c << 16) | shift_y;
+ memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
+ memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
+ } else {
+ hvsrc->vert_shift = (shift_c << 16) | shift_y;
+ memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
+ memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
+ }
+}
+
+/**
+ * sti_hqvdp_check_hw_scaling
+ * @layer: hqvdp layer
+ *
+ * Check if the HW is able to perform the scaling request
+ * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
+ * Zy = OutputHeight / InputHeight
+ * LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
+ * Tx : Total video mode horizontal resolution
+ * IPClock : HQVDP IP clock (Mhz)
+ * MaxNbCycles: max(InputWidth, OutputWidth)
+ * Cp: Video mode pixel clock (Mhz)
+ *
+ * RETURNS:
+ * True if the HW can scale.
+ */
+static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ unsigned long lfw;
+ unsigned int inv_zy;
+
+ lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
+ lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
+
+ inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
+
+ return (inv_zy <= lfw) ? true : false;
+}
+
+/**
+ * sti_hqvdp_prepare_layer
+ * @layer: hqvdp layer
+ * @first_prepare: true if it is the first time this function is called
+ *
+ * Prepares a command for the firmware
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ struct sti_hqvdp_cmd *cmd;
+ int scale_h, scale_v;
+ int cmd_offset;
+
+ dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ /* prepare and commit VID plane */
+ hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
+ layer->crtc, layer->fb,
+ layer->dst_x, layer->dst_y,
+ layer->dst_w, layer->dst_h,
+ layer->src_x, layer->src_y,
+ layer->src_w, layer->src_h);
+
+ cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ DRM_ERROR("No available hqvdp_cmd now\n");
+ return -EBUSY;
+ }
+ cmd = hqvdp->hqvdp_cmd + cmd_offset;
+
+ if (!sti_hqvdp_check_hw_scaling(layer)) {
+ DRM_ERROR("Scaling beyond HW capabilities\n");
+ return -EINVAL;
+ }
+
+ /* Static parameters, defaulting to progressive mode */
+ cmd->top.config = TOP_CONFIG_PROGRESSIVE;
+ cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
+ cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
+ cmd->csdi.config = CSDI_CONFIG_PROG;
+
+ /* VC1RE, FMD bypassed : keep everything set to 0
+ * IQI/P2I bypassed */
+ cmd->iqi.config = IQI_CONFIG_DFLT;
+ cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
+ cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
+ cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
+
+ /* Buffer planes address */
+ cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
+ cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
+
+ /* Pitches */
+ cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
+ layer->pitches[0];
+ cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
+ layer->pitches[1];
+
+ /* Input / output size
+ * Align to upper even value */
+ layer->dst_w = ALIGN(layer->dst_w, 2);
+ layer->dst_h = ALIGN(layer->dst_h, 2);
+
+ if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
+ (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
+ (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
+ (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
+ DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+ layer->src_w, layer->src_h,
+ layer->dst_w, layer->dst_h);
+ return -EINVAL;
+ }
+ cmd->top.input_viewport_size = cmd->top.input_frame_size =
+ layer->src_h << 16 | layer->src_w;
+ cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
+ cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
+
+ /* Handle interlaced */
+ if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
+ /* Top field to display */
+ cmd->top.config = TOP_CONFIG_INTER_TOP;
+
+ /* Update pitches and vert size */
+ cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
+ layer->src_w;
+ cmd->top.luma_processed_pitch *= 2;
+ cmd->top.luma_src_pitch *= 2;
+ cmd->top.chroma_processed_pitch *= 2;
+ cmd->top.chroma_src_pitch *= 2;
+
+ /* Enable directional deinterlacing processing */
+ cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
+ cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
+ cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
+ }
+
+ /* Update hvsrc lut coef */
+ scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
+ sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
+
+ scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
+ sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
+
+ if (first_prepare) {
+ /* Prevent VTG shutdown */
+ if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+ DRM_ERROR("Failed to prepare/enable pix main clk\n");
+ return -ENXIO;
+ }
+
+ /* Register VTG Vsync callback to handle bottom fields */
+ if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
+ sti_vtg_register_client(hqvdp->vtg,
+ &hqvdp->vtg_nb, layer->mixer_id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static int sti_hqvdp_commit_layer(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int cmd_offset;
+
+ dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ DRM_ERROR("No available hqvdp_cmd now\n");
+ return -EBUSY;
+ }
+
+ writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
+ hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ hqvdp->curr_field_count++;
+
+ /* Interlaced : get ready to display the bottom field at next Vsync */
+ if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
+ hqvdp->btm_field_pending = true;
+
+ dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+ __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+
+ return 0;
+}
+
+static int sti_hqvdp_disable_layer(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int i;
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+
+ /* Unregister VTG Vsync callback */
+ if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
+ sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ /* Set next cmd to NULL */
+ writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
+ & INFO_XP70_FW_READY)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+
+ /* VTG can stop now */
+ clk_disable_unprepare(hqvdp->clk_pix_main);
+
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("XP70 could not revert to idle\n");
+ return -ENXIO;
+ }
+
+ /* disable VID plane */
+ hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
+
+ return 0;
+}
+
+/**
+ * sti_vdp_vtg_cb
+ * @nb: notifier block
+ * @evt: event message
+ * @data: private data
+ *
+ * Handle VTG Vsync event, display pending bottom field
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
+{
+ struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
+ int btm_cmd_offset, top_cmd_offest;
+ struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
+
+ if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
+ DRM_DEBUG_DRIVER("Unknown event\n");
+ return 0;
+ }
+
+ if (hqvdp->btm_field_pending) {
+ /* Create the btm field command from the current one */
+ btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
+ if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
+ DRM_ERROR("Cannot get cmds, skip btm field\n");
+ return -EBUSY;
+ }
+
+ btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
+ top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
+
+ memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
+
+ btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
+ btm_cmd->top.current_luma +=
+ btm_cmd->top.luma_src_pitch / 2;
+ btm_cmd->top.current_chroma +=
+ btm_cmd->top.chroma_src_pitch / 2;
+
+ /* Post the command to mailbox */
+ writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
+ hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ hqvdp->curr_field_count++;
+ hqvdp->btm_field_pending = false;
+
+ dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+ __func__, hqvdp->hqvdp_cmd_paddr);
+ }
+
+ return 0;
+}
+
+static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
+{
+ struct drm_plane *plane;
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ if (layer->desc == id)
+ return plane;
+ }
+
+ return NULL;
+}
+
+static void sti_hqvd_init(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int size;
+
+ /* find the plane macthing with vid 0 */
+ hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
+ if (!hqvdp->vid_plane) {
+ DRM_ERROR("Cannot find Main video layer\n");
+ return;
+ }
+
+ hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
+
+ /* Allocate memory for the VDP commands */
+ size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
+ hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
+ &hqvdp->hqvdp_cmd_paddr,
+ GFP_KERNEL | GFP_DMA);
+ if (!hqvdp->hqvdp_cmd) {
+ DRM_ERROR("Failed to allocate memory for VDP cmd\n");
+ return;
+ }
+
+ memset(hqvdp->hqvdp_cmd, 0, size);
+}
+
+static const struct sti_layer_funcs hqvdp_ops = {
+ .get_formats = sti_hqvdp_get_formats,
+ .get_nb_formats = sti_hqvdp_get_nb_formats,
+ .init = sti_hqvd_init,
+ .prepare = sti_hqvdp_prepare_layer,
+ .commit = sti_hqvdp_commit_layer,
+ .disable = sti_hqvdp_disable_layer,
+};
+
+struct sti_layer *sti_hqvdp_create(struct device *dev)
+{
+ struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+
+ hqvdp->layer.ops = &hqvdp_ops;
+
+ return &hqvdp->layer;
+}
+EXPORT_SYMBOL(sti_hqvdp_create);
+
+static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
+{
+ /* Configure Plugs (same for RD & WR) */
+ writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
+ writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
+ writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
+ writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
+ writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
+ writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
+ writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
+
+ writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
+ writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
+ writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
+ writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
+ writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
+ writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
+ writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
+}
+
+/**
+ * sti_hqvdp_start_xp70
+ * @firmware: firmware found
+ * @ctxt: hqvdp structure
+ *
+ * Run the xP70 initialization sequence
+ */
+static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
+{
+ struct sti_hqvdp *hqvdp = ctxt;
+ u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
+ u8 *data;
+ int i;
+ struct fw_header {
+ int rd_size;
+ int wr_size;
+ int pmem_size;
+ int dmem_size;
+ } *header;
+
+ DRM_DEBUG_DRIVER("\n");
+ /* Check firmware parts */
+ if (!firmware) {
+ DRM_ERROR("Firmware not available\n");
+ return;
+ }
+
+ header = (struct fw_header *) firmware->data;
+ if (firmware->size < sizeof(*header)) {
+ DRM_ERROR("Invalid firmware size (%d)\n", firmware->size);
+ goto out;
+ }
+ if ((sizeof(*header) + header->rd_size + header->wr_size +
+ header->pmem_size + header->dmem_size) != firmware->size) {
+ DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n",
+ sizeof(*header), header->rd_size, header->wr_size,
+ header->pmem_size, header->dmem_size,
+ firmware->size);
+ goto out;
+ }
+
+ data = (u8 *) firmware->data;
+ data += sizeof(*header);
+ fw_rd_plug = (void *) data;
+ data += header->rd_size;
+ fw_wr_plug = (void *) data;
+ data += header->wr_size;
+ fw_pmem = (void *) data;
+ data += header->pmem_size;
+ fw_dmem = (void *) data;
+
+ /* Enable clock */
+ if (clk_prepare_enable(hqvdp->clk))
+ DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
+
+ /* Reset */
+ writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
+
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
+ & STARTUP_CTRL1_RST_DONE)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("Could not reset\n");
+ goto out;
+ }
+
+ /* Init Read & Write plugs */
+ for (i = 0; i < header->rd_size / 4; i++)
+ writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
+ for (i = 0; i < header->wr_size / 4; i++)
+ writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
+
+ sti_hqvdp_init_plugs(hqvdp);
+
+ /* Authorize Idle Mode */
+ writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
+
+ /* Prevent VTG interruption during the boot */
+ writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
+ writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ /* Download PMEM & DMEM */
+ for (i = 0; i < header->pmem_size / 4; i++)
+ writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
+ for (i = 0; i < header->dmem_size / 4; i++)
+ writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
+
+ /* Enable fetch */
+ writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
+
+ /* Wait end of boot */
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
+ & INFO_XP70_FW_READY)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("Could not boot\n");
+ goto out;
+ }
+
+ /* Launch Vsync */
+ writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
+
+ DRM_INFO("HQVDP XP70 started\n");
+out:
+ release_firmware(firmware);
+}
+
+int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct sti_layer *layer;
+ int err;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hqvdp->drm_dev = drm_dev;
+
+ /* Request for firmware */
+ err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ HQVDP_FMW_NAME, hqvdp->dev,
+ GFP_KERNEL, hqvdp, sti_hqvdp_start_xp70);
+ if (err) {
+ DRM_ERROR("Can't get HQVDP firmware\n");
+ return err;
+ }
+
+ layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
+ if (!layer) {
+ DRM_ERROR("Can't create HQVDP plane\n");
+ return -ENOMEM;
+ }
+
+ sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
+
+ return 0;
+}
+
+static void sti_hqvdp_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hqvdp_ops = {
+ .bind = sti_hqvdp_bind,
+ .unbind = sti_hqvdp_unbind,
+};
+
+static int sti_hqvdp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vtg_np;
+ struct sti_hqvdp *hqvdp;
+ struct resource *res;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
+ if (!hqvdp) {
+ DRM_ERROR("Failed to allocate HQVDP context\n");
+ return -ENOMEM;
+ }
+
+ hqvdp->dev = dev;
+
+ /* Get Memory resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENXIO;
+ }
+ hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (hqvdp->regs == NULL) {
+ DRM_ERROR("Register mapping failed\n");
+ return -ENXIO;
+ }
+
+ /* Get clock resources */
+ hqvdp->clk = devm_clk_get(dev, "hqvdp");
+ hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
+ if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) {
+ DRM_ERROR("Cannot get clocks\n");
+ return -ENXIO;
+ }
+
+ /* Get reset resources */
+ hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
+ if (!IS_ERR(hqvdp->reset))
+ reset_control_deassert(hqvdp->reset);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
+ if (vtg_np)
+ hqvdp->vtg = of_vtg_find(vtg_np);
+
+ platform_set_drvdata(pdev, hqvdp);
+
+ return component_add(&pdev->dev, &sti_hqvdp_ops);
+}
+
+static int sti_hqvdp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hqvdp_ops);
+ return 0;
+}
+
+static struct of_device_id hqvdp_of_match[] = {
+ { .compatible = "st,stih407-hqvdp", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, hqvdp_of_match);
+
+struct platform_driver sti_hqvdp_driver = {
+ .driver = {
+ .name = "sti-hqvdp",
+ .owner = THIS_MODULE,
+ .of_match_table = hqvdp_of_match,
+ },
+ .probe = sti_hqvdp_probe,
+ .remove = sti_hqvdp_remove,
+};
+
+module_platform_driver(sti_hqvdp_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
new file mode 100644
index 000000000000..cd5ecd0a6dea
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HQVDP_H_
+#define _STI_HQVDP_H_
+
+struct sti_layer *sti_hqvdp_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hqvdp_lut.h b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
new file mode 100644
index 000000000000..619af7f4384e
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HQVDP_LUT_H_
+#define _STI_HQVDP_LUT_H_
+
+#define NB_COEF 128
+
+#define SHIFT_LUT_A_LEGACY 8
+#define SHIFT_LUT_B 8
+#define SHIFT_LUT_C_Y_LEGACY 8
+#define SHIFT_LUT_C_C_LEGACY 8
+#define SHIFT_LUT_D_Y_LEGACY 8
+#define SHIFT_LUT_D_C_LEGACY 8
+#define SHIFT_LUT_E_Y_LEGACY 8
+#define SHIFT_LUT_E_C_LEGACY 8
+#define SHIFT_LUT_F_Y_LEGACY 8
+#define SHIFT_LUT_F_C_LEGACY 8
+
+static const u32 coef_lut_a_legacy[NB_COEF] = {
+ 0x0000ffff, 0x00010000, 0x000100ff, 0x00000000,
+ 0x00000000, 0x00050000, 0xfffc00ff, 0x00000000,
+ 0x00000000, 0x00090000, 0xfff900fe, 0x00000000,
+ 0x00000000, 0x0010ffff, 0xfff600fb, 0x00000000,
+ 0x00000000, 0x0017fffe, 0xfff400f7, 0x00000000,
+ 0x00000000, 0x001ffffd, 0xfff200f2, 0x00000000,
+ 0x00000000, 0x0027fffc, 0xfff100ec, 0x00000000,
+ 0x00000000, 0x0030fffb, 0xfff000e5, 0x00000000,
+ 0x00000000, 0x003afffa, 0xffee00de, 0x00000000,
+ 0x00000000, 0x0044fff9, 0xffed00d6, 0x00000000,
+ 0x00000000, 0x004efff8, 0xffed00cd, 0x00000000,
+ 0x00000000, 0x0059fff6, 0xffed00c4, 0x00000000,
+ 0x00000000, 0x0064fff5, 0xffed00ba, 0x00000000,
+ 0x00000000, 0x006ffff3, 0xffee00b0, 0x00000000,
+ 0x00000000, 0x007afff2, 0xffee00a6, 0x00000000,
+ 0x00000000, 0x0085fff1, 0xffef009b, 0x00000000,
+ 0x00000000, 0x0090fff0, 0xfff00090, 0x00000000,
+ 0x00000000, 0x009bffef, 0xfff10085, 0x00000000,
+ 0x00000000, 0x00a6ffee, 0xfff2007a, 0x00000000,
+ 0x00000000, 0x00b0ffee, 0xfff3006f, 0x00000000,
+ 0x00000000, 0x00baffed, 0xfff50064, 0x00000000,
+ 0x00000000, 0x00c4ffed, 0xfff60059, 0x00000000,
+ 0x00000000, 0x00cdffed, 0xfff8004e, 0x00000000,
+ 0x00000000, 0x00d6ffed, 0xfff90044, 0x00000000,
+ 0x00000000, 0x00deffee, 0xfffa003a, 0x00000000,
+ 0x00000000, 0x00e5fff0, 0xfffb0030, 0x00000000,
+ 0x00000000, 0x00ecfff1, 0xfffc0027, 0x00000000,
+ 0x00000000, 0x00f2fff2, 0xfffd001f, 0x00000000,
+ 0x00000000, 0x00f7fff4, 0xfffe0017, 0x00000000,
+ 0x00000000, 0x00fbfff6, 0xffff0010, 0x00000000,
+ 0x00000000, 0x00fefff9, 0x00000009, 0x00000000,
+ 0x00000000, 0x00fffffc, 0x00000005, 0x00000000
+};
+
+static const u32 coef_lut_b[NB_COEF] = {
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000
+};
+
+static const u32 coef_lut_c_y_legacy[NB_COEF] = {
+ 0x00060004, 0x0038ffe1, 0x003800be, 0x0006ffe1,
+ 0x00050005, 0x0042ffe1, 0x003800b3, 0x0007ffe1,
+ 0x00040006, 0x0046ffe1, 0x003300b2, 0x0008ffe2,
+ 0x00030007, 0x004cffe1, 0x002e00b1, 0x0008ffe2,
+ 0x00020006, 0x0051ffe2, 0x002900b0, 0x0009ffe3,
+ 0x00010008, 0x0056ffe2, 0x002400ae, 0x0009ffe4,
+ 0xffff0008, 0x005cffe3, 0x001f00ad, 0x000affe4,
+ 0xfffe0008, 0x0062ffe4, 0x001a00ab, 0x000affe5,
+ 0xfffd000a, 0x0066ffe5, 0x001500a8, 0x000bffe6,
+ 0xfffc0009, 0x006bffe7, 0x001100a5, 0x000bffe8,
+ 0xfffa000a, 0x0070ffe8, 0x000d00a3, 0x000bffe9,
+ 0xfff9000b, 0x0076ffea, 0x0008009f, 0x000bffea,
+ 0xfff7000b, 0x007affec, 0x0005009b, 0x000cffec,
+ 0xfff6000b, 0x007effef, 0x00010098, 0x000cffed,
+ 0xfff4000b, 0x0084fff1, 0xfffd0095, 0x000cffee,
+ 0xfff3000b, 0x0088fff4, 0xfffa0090, 0x000cfff0,
+ 0xfff1000b, 0x008dfff7, 0xfff7008d, 0x000bfff1,
+ 0xfff0000c, 0x0090fffa, 0xfff40088, 0x000bfff3,
+ 0xffee000c, 0x0095fffd, 0xfff10084, 0x000bfff4,
+ 0xffed000c, 0x00980001, 0xffef007e, 0x000bfff6,
+ 0xffec000c, 0x009b0005, 0xffec007a, 0x000bfff7,
+ 0xffea000b, 0x009f0008, 0xffea0076, 0x000bfff9,
+ 0xffe9000b, 0x00a3000d, 0xffe80070, 0x000afffa,
+ 0xffe8000b, 0x00a50011, 0xffe7006b, 0x0009fffc,
+ 0xffe6000b, 0x00a80015, 0xffe50066, 0x000afffd,
+ 0xffe5000a, 0x00ab001a, 0xffe40062, 0x0008fffe,
+ 0xffe4000a, 0x00ad001f, 0xffe3005c, 0x0008ffff,
+ 0xffe40009, 0x00ae0024, 0xffe20056, 0x00080001,
+ 0xffe30009, 0x00b00029, 0xffe20051, 0x00060002,
+ 0xffe20008, 0x00b1002e, 0xffe1004c, 0x00070003,
+ 0xffe20008, 0x00b20033, 0xffe10046, 0x00060004,
+ 0xffe10007, 0x00b30038, 0xffe10042, 0x00050005
+};
+
+static const u32 coef_lut_c_c_legacy[NB_COEF] = {
+ 0x0001fff3, 0x003afffb, 0x003a00a1, 0x0001fffb,
+ 0x0001fff5, 0x0041fffb, 0x0038009a, 0x0001fffb,
+ 0x0001fff5, 0x0046fffb, 0x00340099, 0x0001fffb,
+ 0x0001fff7, 0x0049fffb, 0x00300098, 0x0001fffb,
+ 0x0001fff9, 0x004cfffb, 0x002d0096, 0x0001fffb,
+ 0x0001fffa, 0x004ffffc, 0x00290095, 0x0001fffb,
+ 0x0001fff9, 0x0054fffd, 0x00250093, 0x0001fffc,
+ 0x0001fffa, 0x0058fffd, 0x00220092, 0x0000fffc,
+ 0x0001fffb, 0x005bfffe, 0x001f0090, 0x0000fffc,
+ 0x0001fffd, 0x005effff, 0x001c008c, 0x0000fffd,
+ 0x0001fffd, 0x00620000, 0x0019008a, 0x0000fffd,
+ 0x0001fffe, 0x00660001, 0x00160088, 0xfffffffd,
+ 0x0000fffe, 0x006a0003, 0x00130085, 0xfffffffe,
+ 0x0000fffe, 0x006e0004, 0x00100083, 0xfffffffe,
+ 0x0000fffe, 0x00710006, 0x000e007f, 0xffffffff,
+ 0x0000fffe, 0x00750008, 0x000c007c, 0xfffeffff,
+ 0xfffffffe, 0x0079000a, 0x000a0079, 0xfffeffff,
+ 0xfffffffe, 0x007c000c, 0x00080075, 0xfffe0000,
+ 0xffffffff, 0x007f000e, 0x00060071, 0xfffe0000,
+ 0xfffeffff, 0x00830010, 0x0004006e, 0xfffe0000,
+ 0xfffeffff, 0x00850013, 0x0003006a, 0xfffe0000,
+ 0xfffdffff, 0x00880016, 0x00010066, 0xfffe0001,
+ 0xfffd0000, 0x008a0019, 0x00000062, 0xfffd0001,
+ 0xfffd0000, 0x008c001c, 0xffff005e, 0xfffd0001,
+ 0xfffc0000, 0x0090001f, 0xfffe005b, 0xfffb0001,
+ 0xfffc0000, 0x00920022, 0xfffd0058, 0xfffa0001,
+ 0xfffc0001, 0x00930025, 0xfffd0054, 0xfff90001,
+ 0xfffb0001, 0x00950029, 0xfffc004f, 0xfffa0001,
+ 0xfffb0001, 0x0096002d, 0xfffb004c, 0xfff90001,
+ 0xfffb0001, 0x00980030, 0xfffb0049, 0xfff70001,
+ 0xfffb0001, 0x00990034, 0xfffb0046, 0xfff50001,
+ 0xfffb0001, 0x009a0038, 0xfffb0041, 0xfff50001
+};
+
+static const u32 coef_lut_d_y_legacy[NB_COEF] = {
+ 0xfff80009, 0x0046ffec, 0x004600a3, 0xfff8ffec,
+ 0xfff70009, 0x004effed, 0x0044009d, 0xfff9ffeb,
+ 0xfff6000a, 0x0052ffee, 0x003f009d, 0xfffaffea,
+ 0xfff50009, 0x0057ffef, 0x003b009d, 0xfffbffe9,
+ 0xfff50008, 0x005bfff0, 0x0037009c, 0xfffcffe9,
+ 0xfff40008, 0x005ffff2, 0x0033009b, 0xfffcffe9,
+ 0xfff30007, 0x0064fff3, 0x002f009b, 0xfffdffe8,
+ 0xfff20007, 0x0068fff5, 0x002b0099, 0xfffeffe8,
+ 0xfff10008, 0x006bfff7, 0x00270097, 0xffffffe8,
+ 0xfff00007, 0x006ffff9, 0x00230097, 0xffffffe8,
+ 0xffef0006, 0x0073fffb, 0x00200095, 0x0000ffe8,
+ 0xffee0005, 0x0077fffe, 0x001c0093, 0x0000ffe9,
+ 0xffee0005, 0x007a0000, 0x00180091, 0x0001ffe9,
+ 0xffed0005, 0x007d0003, 0x0015008e, 0x0002ffe9,
+ 0xffec0005, 0x00800006, 0x0012008b, 0x0002ffea,
+ 0xffeb0004, 0x00840008, 0x000e008a, 0x0003ffea,
+ 0xffeb0003, 0x0087000b, 0x000b0087, 0x0003ffeb,
+ 0xffea0003, 0x008a000e, 0x00080084, 0x0004ffeb,
+ 0xffea0002, 0x008b0012, 0x00060080, 0x0005ffec,
+ 0xffe90002, 0x008e0015, 0x0003007d, 0x0005ffed,
+ 0xffe90001, 0x00910018, 0x0000007a, 0x0005ffee,
+ 0xffe90000, 0x0093001c, 0xfffe0077, 0x0005ffee,
+ 0xffe80000, 0x00950020, 0xfffb0073, 0x0006ffef,
+ 0xffe8ffff, 0x00970023, 0xfff9006f, 0x0007fff0,
+ 0xffe8ffff, 0x00970027, 0xfff7006b, 0x0008fff1,
+ 0xffe8fffe, 0x0099002b, 0xfff50068, 0x0007fff2,
+ 0xffe8fffd, 0x009b002f, 0xfff30064, 0x0007fff3,
+ 0xffe9fffc, 0x009b0033, 0xfff2005f, 0x0008fff4,
+ 0xffe9fffc, 0x009c0037, 0xfff0005b, 0x0008fff5,
+ 0xffe9fffb, 0x009d003b, 0xffef0057, 0x0009fff5,
+ 0xffeafffa, 0x009d003f, 0xffee0052, 0x000afff6,
+ 0xffebfff9, 0x009d0044, 0xffed004e, 0x0009fff7
+};
+
+static const u32 coef_lut_d_c_legacy[NB_COEF] = {
+ 0xfffeffff, 0x003fffff, 0x003f0089, 0xfffeffff,
+ 0xfffe0000, 0x00460000, 0x0042007d, 0xfffffffe,
+ 0xfffe0000, 0x00490001, 0x003f007d, 0xfffffffd,
+ 0xfffd0001, 0x004b0002, 0x003c007d, 0x0000fffc,
+ 0xfffd0001, 0x004e0003, 0x0039007c, 0x0000fffc,
+ 0xfffc0001, 0x00510005, 0x0036007c, 0x0000fffb,
+ 0xfffc0001, 0x00540006, 0x0033007b, 0x0001fffa,
+ 0xfffc0003, 0x00550008, 0x00310078, 0x0001fffa,
+ 0xfffb0003, 0x00580009, 0x002e0078, 0x0001fffa,
+ 0xfffb0002, 0x005b000b, 0x002b0077, 0x0002fff9,
+ 0xfffa0003, 0x005e000d, 0x00280075, 0x0002fff9,
+ 0xfffa0002, 0x0060000f, 0x00260074, 0x0002fff9,
+ 0xfffa0004, 0x00610011, 0x00230072, 0x0002fff9,
+ 0xfffa0004, 0x00640013, 0x00200070, 0x0002fff9,
+ 0xfff90004, 0x00660015, 0x001e006e, 0x0003fff9,
+ 0xfff90004, 0x00680017, 0x001c006c, 0x0003fff9,
+ 0xfff90003, 0x006b0019, 0x0019006b, 0x0003fff9,
+ 0xfff90003, 0x006c001c, 0x00170068, 0x0004fff9,
+ 0xfff90003, 0x006e001e, 0x00150066, 0x0004fff9,
+ 0xfff90002, 0x00700020, 0x00130064, 0x0004fffa,
+ 0xfff90002, 0x00720023, 0x00110061, 0x0004fffa,
+ 0xfff90002, 0x00740026, 0x000f0060, 0x0002fffa,
+ 0xfff90002, 0x00750028, 0x000d005e, 0x0003fffa,
+ 0xfff90002, 0x0077002b, 0x000b005b, 0x0002fffb,
+ 0xfffa0001, 0x0078002e, 0x00090058, 0x0003fffb,
+ 0xfffa0001, 0x00780031, 0x00080055, 0x0003fffc,
+ 0xfffa0001, 0x007b0033, 0x00060054, 0x0001fffc,
+ 0xfffb0000, 0x007c0036, 0x00050051, 0x0001fffc,
+ 0xfffc0000, 0x007c0039, 0x0003004e, 0x0001fffd,
+ 0xfffc0000, 0x007d003c, 0x0002004b, 0x0001fffd,
+ 0xfffdffff, 0x007d003f, 0x00010049, 0x0000fffe,
+ 0xfffeffff, 0x007d0042, 0x00000046, 0x0000fffe
+};
+
+static const u32 coef_lut_e_y_legacy[NB_COEF] = {
+ 0xfff10001, 0x00490004, 0x00490083, 0xfff10004,
+ 0xfff10000, 0x00500006, 0x004b007b, 0xfff10002,
+ 0xfff10000, 0x00530007, 0x0048007b, 0xfff10001,
+ 0xfff10000, 0x00550009, 0x0046007a, 0xfff10000,
+ 0xfff1fffe, 0x0058000b, 0x0043007b, 0xfff2fffe,
+ 0xfff1ffff, 0x005a000d, 0x0040007a, 0xfff2fffd,
+ 0xfff1fffd, 0x005d000f, 0x003e007a, 0xfff2fffc,
+ 0xfff1fffd, 0x005f0011, 0x003b0079, 0xfff3fffb,
+ 0xfff1fffc, 0x00610013, 0x00390079, 0xfff3fffa,
+ 0xfff1fffb, 0x00640015, 0x00360079, 0xfff3fff9,
+ 0xfff1fffa, 0x00660017, 0x00340078, 0xfff4fff8,
+ 0xfff1fffb, 0x00680019, 0x00310077, 0xfff4fff7,
+ 0xfff2fff9, 0x006a001b, 0x002f0076, 0xfff5fff6,
+ 0xfff2fff9, 0x006c001e, 0x002c0075, 0xfff5fff5,
+ 0xfff2fff9, 0x006d0020, 0x002a0073, 0xfff6fff5,
+ 0xfff3fff7, 0x00700022, 0x00270073, 0xfff6fff4,
+ 0xfff3fff7, 0x00710025, 0x00250071, 0xfff7fff3,
+ 0xfff4fff6, 0x00730027, 0x00220070, 0xfff7fff3,
+ 0xfff5fff6, 0x0073002a, 0x0020006d, 0xfff9fff2,
+ 0xfff5fff5, 0x0075002c, 0x001e006c, 0xfff9fff2,
+ 0xfff6fff5, 0x0076002f, 0x001b006a, 0xfff9fff2,
+ 0xfff7fff4, 0x00770031, 0x00190068, 0xfffbfff1,
+ 0xfff8fff4, 0x00780034, 0x00170066, 0xfffafff1,
+ 0xfff9fff3, 0x00790036, 0x00150064, 0xfffbfff1,
+ 0xfffafff3, 0x00790039, 0x00130061, 0xfffcfff1,
+ 0xfffbfff3, 0x0079003b, 0x0011005f, 0xfffdfff1,
+ 0xfffcfff2, 0x007a003e, 0x000f005d, 0xfffdfff1,
+ 0xfffdfff2, 0x007a0040, 0x000d005a, 0xfffffff1,
+ 0xfffefff2, 0x007b0043, 0x000b0058, 0xfffefff1,
+ 0x0000fff1, 0x007a0046, 0x00090055, 0x0000fff1,
+ 0x0001fff1, 0x007b0048, 0x00070053, 0x0000fff1,
+ 0x0002fff1, 0x007b004b, 0x00060050, 0x0000fff1
+};
+
+static const u32 coef_lut_e_c_legacy[NB_COEF] = {
+ 0xfffa0001, 0x003f0010, 0x003f006d, 0xfffa0010,
+ 0xfffb0002, 0x00440011, 0x00440062, 0xfffa000e,
+ 0xfffb0001, 0x00460013, 0x00420062, 0xfffa000d,
+ 0xfffb0000, 0x00480014, 0x00410062, 0xfffa000c,
+ 0xfffb0001, 0x00490015, 0x003f0061, 0xfffb000b,
+ 0xfffb0000, 0x004b0017, 0x003d0061, 0xfffb000a,
+ 0xfffb0000, 0x004d0018, 0x003b0062, 0xfffb0008,
+ 0xfffcffff, 0x004f001a, 0x00390061, 0xfffb0007,
+ 0xfffc0000, 0x004f001c, 0x00380060, 0xfffb0006,
+ 0xfffcffff, 0x0052001d, 0x00360060, 0xfffb0005,
+ 0xfffdfffe, 0x0053001f, 0x00340060, 0xfffb0004,
+ 0xfffdfffe, 0x00540021, 0x0032005e, 0xfffc0004,
+ 0xfffeffff, 0x00550022, 0x0030005d, 0xfffc0003,
+ 0xfffeffff, 0x00560024, 0x002f005c, 0xfffc0002,
+ 0xfffffffd, 0x00580026, 0x002d005c, 0xfffc0001,
+ 0xfffffffd, 0x005a0027, 0x002b005c, 0xfffc0000,
+ 0x0000fffd, 0x005a0029, 0x0029005a, 0xfffd0000,
+ 0x0000fffc, 0x005c002b, 0x0027005a, 0xfffdffff,
+ 0x0001fffc, 0x005c002d, 0x00260058, 0xfffdffff,
+ 0x0002fffc, 0x005c002f, 0x00240056, 0xfffffffe,
+ 0x0003fffc, 0x005d0030, 0x00220055, 0xfffffffe,
+ 0x0004fffc, 0x005e0032, 0x00210054, 0xfffefffd,
+ 0x0004fffb, 0x00600034, 0x001f0053, 0xfffefffd,
+ 0x0005fffb, 0x00600036, 0x001d0052, 0xfffffffc,
+ 0x0006fffb, 0x00600038, 0x001c004f, 0x0000fffc,
+ 0x0007fffb, 0x00610039, 0x001a004f, 0xfffffffc,
+ 0x0008fffb, 0x0062003b, 0x0018004d, 0x0000fffb,
+ 0x000afffb, 0x0061003d, 0x0017004b, 0x0000fffb,
+ 0x000bfffb, 0x0061003f, 0x00150049, 0x0001fffb,
+ 0x000cfffa, 0x00620041, 0x00140048, 0x0000fffb,
+ 0x000dfffa, 0x00620042, 0x00130046, 0x0001fffb,
+ 0x000efffa, 0x00620044, 0x00110044, 0x0002fffb
+};
+
+static const u32 coef_lut_f_y_legacy[NB_COEF] = {
+ 0xfff6fff0, 0x00490012, 0x0049006e, 0xfff60012,
+ 0xfff7fff1, 0x004e0013, 0x00490068, 0xfff60010,
+ 0xfff7fff2, 0x004f0015, 0x00470067, 0xfff6000f,
+ 0xfff7fff5, 0x004f0017, 0x00450065, 0xfff6000e,
+ 0xfff8fff5, 0x00500018, 0x00440065, 0xfff6000c,
+ 0xfff8fff6, 0x0051001a, 0x00420064, 0xfff6000b,
+ 0xfff8fff6, 0x0052001c, 0x00400064, 0xfff6000a,
+ 0xfff9fff6, 0x0054001d, 0x003e0064, 0xfff60008,
+ 0xfff9fff8, 0x0054001f, 0x003c0063, 0xfff60007,
+ 0xfffafff8, 0x00550021, 0x003a0062, 0xfff60006,
+ 0xfffbfff7, 0x00560022, 0x00390062, 0xfff60005,
+ 0xfffbfff8, 0x00570024, 0x00370061, 0xfff60004,
+ 0xfffcfff8, 0x00580026, 0x00350060, 0xfff60003,
+ 0xfffdfff8, 0x00590028, 0x0033005f, 0xfff60002,
+ 0xfffdfff7, 0x005b002a, 0x0031005f, 0xfff60001,
+ 0xfffefff7, 0x005c002c, 0x002f005e, 0xfff60000,
+ 0xfffffff6, 0x005e002d, 0x002d005e, 0xfff6ffff,
+ 0x0000fff6, 0x005e002f, 0x002c005c, 0xfff7fffe,
+ 0x0001fff6, 0x005f0031, 0x002a005b, 0xfff7fffd,
+ 0x0002fff6, 0x005f0033, 0x00280059, 0xfff8fffd,
+ 0x0003fff6, 0x00600035, 0x00260058, 0xfff8fffc,
+ 0x0004fff6, 0x00610037, 0x00240057, 0xfff8fffb,
+ 0x0005fff6, 0x00620039, 0x00220056, 0xfff7fffb,
+ 0x0006fff6, 0x0062003a, 0x00210055, 0xfff8fffa,
+ 0x0007fff6, 0x0063003c, 0x001f0054, 0xfff8fff9,
+ 0x0008fff6, 0x0064003e, 0x001d0054, 0xfff6fff9,
+ 0x000afff6, 0x00640040, 0x001c0052, 0xfff6fff8,
+ 0x000bfff6, 0x00640042, 0x001a0051, 0xfff6fff8,
+ 0x000cfff6, 0x00650044, 0x00180050, 0xfff5fff8,
+ 0x000efff6, 0x00650045, 0x0017004f, 0xfff5fff7,
+ 0x000ffff6, 0x00670047, 0x0015004f, 0xfff2fff7,
+ 0x0010fff6, 0x00680049, 0x0013004e, 0xfff1fff7
+};
+
+static const u32 coef_lut_f_c_legacy[NB_COEF] = {
+ 0x0000fffb, 0x003a001a, 0x003a005d, 0x0000001a,
+ 0x0001fffb, 0x003f001b, 0x00400051, 0x00000019,
+ 0x0001fffc, 0x0040001c, 0x003f0051, 0x00000017,
+ 0x0002fffb, 0x0042001d, 0x003e0051, 0xffff0016,
+ 0x0002fffb, 0x0043001e, 0x003d0051, 0xffff0015,
+ 0x0003fffc, 0x00430020, 0x003b0050, 0xffff0014,
+ 0x0003fffb, 0x00450021, 0x003a0051, 0xfffe0013,
+ 0x0004fffc, 0x00450022, 0x00390050, 0xfffe0012,
+ 0x0005fffc, 0x00460023, 0x0038004f, 0xfffe0011,
+ 0x0005fffb, 0x00480025, 0x00360050, 0xfffd0010,
+ 0x0006fffc, 0x00480026, 0x0035004f, 0xfffd000f,
+ 0x0006fffc, 0x00490027, 0x0034004f, 0xfffd000e,
+ 0x0007fffd, 0x00490028, 0x0033004e, 0xfffd000d,
+ 0x0008fffc, 0x004a002a, 0x0031004d, 0xfffd000d,
+ 0x0009fffd, 0x004a002b, 0x0030004d, 0xfffc000c,
+ 0x0009fffc, 0x004c002c, 0x002f004d, 0xfffc000b,
+ 0x000afffc, 0x004c002e, 0x002e004c, 0xfffc000a,
+ 0x000bfffc, 0x004d002f, 0x002c004c, 0xfffc0009,
+ 0x000cfffc, 0x004d0030, 0x002b004a, 0xfffd0009,
+ 0x000dfffd, 0x004d0031, 0x002a004a, 0xfffc0008,
+ 0x000dfffd, 0x004e0033, 0x00280049, 0xfffd0007,
+ 0x000efffd, 0x004f0034, 0x00270049, 0xfffc0006,
+ 0x000ffffd, 0x004f0035, 0x00260048, 0xfffc0006,
+ 0x0010fffd, 0x00500036, 0x00250048, 0xfffb0005,
+ 0x0011fffe, 0x004f0038, 0x00230046, 0xfffc0005,
+ 0x0012fffe, 0x00500039, 0x00220045, 0xfffc0004,
+ 0x0013fffe, 0x0051003a, 0x00210045, 0xfffb0003,
+ 0x0014ffff, 0x0050003b, 0x00200043, 0xfffc0003,
+ 0x0015ffff, 0x0051003d, 0x001e0043, 0xfffb0002,
+ 0x0016ffff, 0x0051003e, 0x001d0042, 0xfffb0002,
+ 0x00170000, 0x0051003f, 0x001c0040, 0xfffc0001,
+ 0x00190000, 0x00510040, 0x001b003f, 0xfffb0001
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
index 06a587c4f1bb..899104f9d4bc 100644
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ b/drivers/gpu/drm/sti/sti_layer.c
@@ -11,7 +11,9 @@
#include <drm/drm_fb_cma_helper.h>
#include "sti_compositor.h"
+#include "sti_cursor.h"
#include "sti_gdp.h"
+#include "sti_hqvdp.h"
#include "sti_layer.h"
#include "sti_vid.h"
@@ -32,10 +34,13 @@ const char *sti_layer_to_str(struct sti_layer *layer)
return "VID1";
case STI_CURSOR:
return "CURSOR";
+ case STI_HQVDP_0:
+ return "HQVDP0";
default:
return "<UNKNOWN LAYER>";
}
}
+EXPORT_SYMBOL(sti_layer_to_str);
struct sti_layer *sti_layer_create(struct device *dev, int desc,
void __iomem *baseaddr)
@@ -50,6 +55,12 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc,
case STI_VID:
layer = sti_vid_create(dev);
break;
+ case STI_CUR:
+ layer = sti_cursor_create(dev);
+ break;
+ case STI_VDP:
+ layer = sti_hqvdp_create(dev);
+ break;
}
if (!layer) {
@@ -67,8 +78,11 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc,
return layer;
}
+EXPORT_SYMBOL(sti_layer_create);
-int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+int sti_layer_prepare(struct sti_layer *layer,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_display_mode *mode, int mixer_id,
int dest_x, int dest_y, int dest_w, int dest_h,
int src_x, int src_y, int src_w, int src_h)
@@ -88,6 +102,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
return 1;
}
+ layer->crtc = crtc;
layer->fb = fb;
layer->mode = mode;
layer->mixer_id = mixer_id;
@@ -100,6 +115,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
layer->src_w = src_w;
layer->src_h = src_h;
layer->format = fb->pixel_format;
+ layer->vaddr = cma_obj->vaddr;
layer->paddr = cma_obj->paddr;
for (i = 0; i < 4; i++) {
layer->pitches[i] = fb->pitches[i];
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
index 198c3774cc12..ceff497f557e 100644
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ b/drivers/gpu/drm/sti/sti_layer.h
@@ -22,7 +22,8 @@ enum sti_layer_type {
STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
- STI_BCK = 4 << STI_LAYER_TYPE_SHIFT
+ STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
+ STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
};
enum sti_layer_id_of_type {
@@ -39,6 +40,7 @@ enum sti_layer_desc {
STI_GDP_3 = STI_GDP | STI_ID_3,
STI_VID_0 = STI_VID | STI_ID_0,
STI_VID_1 = STI_VID | STI_ID_1,
+ STI_HQVDP_0 = STI_VDP | STI_ID_0,
STI_CURSOR = STI_CUR,
STI_BACK = STI_BCK
};
@@ -67,6 +69,7 @@ struct sti_layer_funcs {
*
* @plane: drm plane it is bound to (if any)
* @fb: drm fb it is bound to
+ * @crtc: crtc it is bound to
* @mode: display mode
* @desc: layer type & id
* @device: driver device
@@ -82,11 +85,13 @@ struct sti_layer_funcs {
* @format: format
* @pitches: pitch of 'planes' (eg: Y, U, V)
* @offsets: offset of 'planes'
+ * @vaddr: virtual address of the input buffer
* @paddr: physical address of the input buffer
*/
struct sti_layer {
struct drm_plane plane;
struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
struct drm_display_mode *mode;
enum sti_layer_desc desc;
struct device *dev;
@@ -102,12 +107,15 @@ struct sti_layer {
uint32_t format;
unsigned int pitches[4];
unsigned int offsets[4];
+ void *vaddr;
dma_addr_t paddr;
};
struct sti_layer *sti_layer_create(struct device *dev, int desc,
void __iomem *baseaddr);
-int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+int sti_layer_prepare(struct sti_layer *layer,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_display_mode *mode,
int mixer_id,
int dest_x, int dest_y,
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 79f369db9fb6..13a4b84deab6 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -45,6 +45,7 @@ static const u32 mixerColorSpaceMatIdentity[] = {
#define GAM_CTL_GDP1_MASK BIT(4)
#define GAM_CTL_GDP2_MASK BIT(5)
#define GAM_CTL_GDP3_MASK BIT(6)
+#define GAM_CTL_CURSOR_MASK BIT(9)
const char *sti_mixer_to_str(struct sti_mixer *mixer)
{
@@ -122,11 +123,15 @@ int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
layer_id = GAM_DEPTH_GDP3_ID;
break;
case STI_VID_0:
+ case STI_HQVDP_0:
layer_id = GAM_DEPTH_VID0_ID;
break;
case STI_VID_1:
layer_id = GAM_DEPTH_VID1_ID;
break;
+ case STI_CURSOR:
+ /* no need to set depth for cursor */
+ return 0;
default:
DRM_ERROR("Unknown layer %d\n", layer->desc);
return 1;
@@ -185,9 +190,12 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
case STI_GDP_3:
return GAM_CTL_GDP3_MASK;
case STI_VID_0:
+ case STI_HQVDP_0:
return GAM_CTL_VID0_MASK;
case STI_VID_1:
return GAM_CTL_VID1_MASK;
+ case STI_CURSOR:
+ return GAM_CTL_CURSOR_MASK;
default:
return 0;
}
@@ -215,6 +223,15 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
return 0;
}
+void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+}
+
void sti_mixer_set_matrix(struct sti_mixer *mixer)
{
unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 874372102e52..b97282182908 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -23,6 +23,7 @@
* @id: id of the mixer
* @drm_crtc: crtc object link to the mixer
* @pending_event: set if a flip event is pending on crtc
+ * @enabled: to know if the mixer is active or not
*/
struct sti_mixer {
struct device *dev;
@@ -30,6 +31,7 @@ struct sti_mixer {
int id;
struct drm_crtc drm_crtc;
struct drm_pending_vblank_event *pending_event;
+ bool enabled;
};
const char *sti_mixer_to_str(struct sti_mixer *mixer);
@@ -39,6 +41,7 @@ struct sti_mixer *sti_mixer_create(struct device *dev, int id,
int sti_mixer_set_layer_status(struct sti_mixer *mixer,
struct sti_layer *layer, bool status);
+void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
int sti_mixer_active_video_area(struct sti_mixer *mixer,
struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index b8afe490356a..cb924aa2b321 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,6 +16,8 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include "sti_drm_crtc.h"
+
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
#define TVO_CSC_MAIN_M1 0x004
@@ -96,7 +98,7 @@
#define TVO_SYNC_HD_DCS_SHIFT 8
-#define ENCODER_MAIN_CRTC_MASK BIT(0)
+#define ENCODER_CRTC_MASK (BIT(0) | BIT(1))
/* enum listing the supported output data format */
enum sti_tvout_video_out_type {
@@ -149,14 +151,15 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
* Set the clipping mode of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @cr_r:
* @y_g:
* @cb_b:
*/
-static void tvout_vip_set_color_order(struct sti_tvout *tvout,
+static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
u32 cr_r, u32 y_g, u32 cb_b)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
@@ -165,52 +168,58 @@ static void tvout_vip_set_color_order(struct sti_tvout *tvout,
val |= y_g << TVO_VIP_REORDER_G_SHIFT;
val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Set the clipping mode of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @range: clipping range
*/
-static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range)
+static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
val |= range << TVO_VIP_CLIP_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Set the rounded value of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @rnd: rounded val per component
*/
-static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd)
+static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
val |= rnd << TVO_VIP_RND_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Select the VIP input
*
* @tvout: tvout structure
+ * @reg: register to set
+ * @main_path: main or auxiliary path
+ * @sel_input_logic_inverted: need to invert the logic
* @sel_input: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
+ int reg,
bool main_path,
bool sel_input_logic_inverted,
enum sti_tvout_video_out_type video_out)
{
u32 sel_input;
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
if (main_path)
sel_input = TVO_VIP_SEL_INPUT_MAIN;
@@ -232,22 +241,24 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
val &= ~TVO_VIP_SEL_INPUT_MASK;
val |= sel_input;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Select the input video signed or unsigned
*
* @tvout: tvout structure
+ * @reg: register to set
* @in_vid_signed: used video input format
*/
-static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt)
+static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
+ int reg, u32 in_vid_fmt)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~TVO_IN_FMT_SIGNED;
val |= in_vid_fmt;
- tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT);
+ tvout_write(tvout, val, reg);
}
/**
@@ -261,6 +272,7 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
{
struct device_node *node = tvout->dev->of_node;
bool sel_input_logic_inverted = false;
+ u32 tvo_in_vid_format;
dev_dbg(tvout->dev, "%s\n", __func__);
@@ -268,33 +280,36 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
DRM_DEBUG_DRIVER("main vip for hdmi\n");
/* select the input sync for hdmi = VTG set 1 */
tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for hdmi\n");
/* select the input sync for hdmi = VTG set 1 */
tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
/* set color channel order */
- tvout_vip_set_color_order(tvout,
+ tvout_vip_set_color_order(tvout, TVO_VIP_HDMI,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
/* set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI,
+ TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
/* set round mode (rounded to 8-bit per component) */
- tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED);
+ tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
if (of_device_is_compatible(node, "st,stih407-tvout")) {
/* set input video format */
- tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT,
- TVO_IN_FMT_SIGNED);
+ tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
+ TVO_IN_FMT_SIGNED);
sel_input_logic_inverted = true;
}
/* input selection */
- tvout_vip_set_sel_input(tvout, main_path,
+ tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path,
sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
}
@@ -309,48 +324,47 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
{
struct device_node *node = tvout->dev->of_node;
bool sel_input_logic_inverted = false;
+ u32 tvo_in_vid_format;
+ int val;
dev_dbg(tvout->dev, "%s\n", __func__);
- if (!main_path) {
- DRM_ERROR("HD Analog on aux not implemented\n");
- return;
+ if (main_path) {
+ val = TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_MAIN_VTG_SET_3;
+ tvout_write(tvout, val, TVO_HD_SYNC_SEL);
+ tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
+ } else {
+ val = TVO_SYNC_AUX_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_AUX_VTG_SET_3;
+ tvout_write(tvout, val, TVO_HD_SYNC_SEL);
+ tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
- DRM_DEBUG_DRIVER("main vip for HDF\n");
-
/* set color channel order */
- tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF,
+ tvout_vip_set_color_order(tvout, TVO_VIP_HDF,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF,
- TVO_VIP_CLIP_LIMITED_RANGE_CB_CR);
+ /* set clipping mode (EAV/SAV clipping) */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_EAV_SAV);
/* set round mode (rounded to 10-bit per component) */
- tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
+ tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
if (of_device_is_compatible(node, "st,stih407-tvout")) {
/* set input video format */
- tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED);
+ tvout_vip_set_in_vid_fmt(tvout,
+ tvo_in_vid_format, TVO_IN_FMT_SIGNED);
sel_input_logic_inverted = true;
}
/* Input selection */
- tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF,
- main_path,
+ tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path,
sel_input_logic_inverted,
STI_TVOUT_VIDEO_OUT_YUV);
- /* select the input sync for HD analog = VTG set 3
- * and HD DCS = VTG set 2 */
- tvout_write(tvout,
- (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT)
- | TVO_SYNC_MAIN_VTG_SET_3,
- TVO_HD_SYNC_SEL);
-
/* power up HD DAC */
tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
}
@@ -392,7 +406,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hda_start(tvout, true);
+ tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
}
static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -429,7 +443,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
drm_encoder = (struct drm_encoder *) encoder;
- drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
@@ -444,7 +458,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hdmi_start(tvout, true);
+ tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
}
static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -478,7 +492,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
drm_encoder = (struct drm_encoder *) encoder;
- drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder->possible_clones = 1 << 1;
drm_encoder_init(dev, drm_encoder,
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 740d6e347a62..9564f2568e2c 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -51,10 +51,19 @@
#define VTG_TOP_V_HD_3 0x010C
#define VTG_BOT_V_HD_3 0x0110
+#define VTG_H_HD_4 0x0120
+#define VTG_TOP_V_VD_4 0x0124
+#define VTG_BOT_V_VD_4 0x0128
+#define VTG_TOP_V_HD_4 0x012c
+#define VTG_BOT_V_HD_4 0x0130
+
#define VTG_IRQ_BOTTOM BIT(0)
#define VTG_IRQ_TOP BIT(1)
#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
+/* Delay introduced by the HDMI in nb of pixel */
+#define HDMI_DELAY (6)
+
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
#define AWG_DELAY_ED (-8)
@@ -133,10 +142,10 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(tmp, vtg->regs + VTG_VID_TFS);
writel(tmp, vtg->regs + VTG_VID_BFS);
- /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */
- tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ /* prepare VTG set 1 for HDMI */
+ tmp = (mode->hsync_end - mode->hsync_start + HDMI_DELAY) << 16;
+ tmp |= HDMI_DELAY;
writel(tmp, vtg->regs + VTG_H_HD_1);
- writel(tmp, vtg->regs + VTG_H_HD_2);
tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
tmp |= 1;
@@ -146,6 +155,11 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(0, vtg->regs + VTG_BOT_V_HD_1);
/* prepare VTG set 2 for for HD DCS */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_2);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
writel(0, vtg->regs + VTG_TOP_V_HD_2);
@@ -166,6 +180,17 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
+ /* Prepare VTG set 4 for DVO */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_4);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_4);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_4);
+ writel(0, vtg->regs + VTG_TOP_V_HD_4);
+ writel(0, vtg->regs + VTG_BOT_V_HD_4);
+
/* mode */
writel(type, vtg->regs + VTG_MODE);
}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 354ddb29231f..74d9d621453d 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,6 +1,7 @@
config DRM_TEGRA
tristate "NVIDIA Tegra DRM"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
+ depends on COMMON_CLK
depends on DRM
depends on RESET_CONTROLLER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 054a79f143ae..3367960286a6 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -9,17 +9,23 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/iommu.h>
#include <linux/reset.h>
+#include <soc/tegra/pmc.h>
+
#include "dc.h"
#include "drm.h"
#include "gem.h"
+#include <drm/drm_plane_helper.h>
+
struct tegra_dc_soc_info {
bool supports_interlacing;
bool supports_cursor;
bool supports_block_linear;
unsigned int pitch_align;
+ bool has_powergate;
};
struct tegra_plane {
@@ -32,6 +38,26 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
return container_of(plane, struct tegra_plane, base);
}
+static void tegra_dc_window_commit(struct tegra_dc *dc, unsigned int index)
+{
+ u32 value = WIN_A_ACT_REQ << index;
+
+ tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_dc_cursor_commit(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_dc_commit(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap)
{
/* assume no swapping of fetched data */
@@ -303,17 +329,260 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
break;
}
- tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+ tegra_dc_window_commit(dc, index);
+
+ return 0;
+}
+
+static int tegra_window_plane_disable(struct drm_plane *plane)
+{
+ struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ u32 value;
+
+ if (!plane->crtc)
+ return 0;
+
+ value = WINDOW_A_SELECT << p->index;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_window_commit(dc, p->index);
+
+ return 0;
+}
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+
+ drm_plane_cleanup(plane);
+ kfree(p);
+}
+
+static const u32 tegra_primary_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+};
+
+static int tegra_primary_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
+{
+ struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_dc_window window;
+ int err;
+
+ memset(&window, 0, sizeof(window));
+ window.src.x = src_x >> 16;
+ window.src.y = src_y >> 16;
+ window.src.w = src_w >> 16;
+ window.src.h = src_h >> 16;
+ window.dst.x = crtc_x;
+ window.dst.y = crtc_y;
+ window.dst.w = crtc_w;
+ window.dst.h = crtc_h;
+ window.format = tegra_dc_format(fb->pixel_format, &window.swap);
+ window.bits_per_pixel = fb->bits_per_pixel;
+ window.bottom_up = tegra_fb_is_bottom_up(fb);
+
+ err = tegra_fb_get_tiling(fb, &window.tiling);
+ if (err < 0)
+ return err;
+
+ window.base[0] = bo->paddr + fb->offsets[0];
+ window.stride[0] = fb->pitches[0];
+
+ err = tegra_dc_setup_window(dc, p->index, &window);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_primary_plane_destroy(struct drm_plane *plane)
+{
+ tegra_window_plane_disable(plane);
+ tegra_plane_destroy(plane);
+}
+
+static const struct drm_plane_funcs tegra_primary_plane_funcs = {
+ .update_plane = tegra_primary_plane_update,
+ .disable_plane = tegra_window_plane_disable,
+ .destroy = tegra_primary_plane_destroy,
+};
+
+static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ num_formats = ARRAY_SIZE(tegra_primary_plane_formats);
+ formats = tegra_primary_plane_formats;
+
+ err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_primary_plane_funcs, formats,
+ num_formats, DRM_PLANE_TYPE_PRIMARY);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ return &plane->base;
+}
+
+static const u32 tegra_cursor_plane_formats[] = {
+ DRM_FORMAT_RGBA8888,
+};
+
+static int tegra_cursor_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
+{
+ struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value = CURSOR_CLIP_DISPLAY;
+
+ /* scaling not supported for cursor */
+ if ((src_w >> 16 != crtc_w) || (src_h >> 16 != crtc_h))
+ return -EINVAL;
+
+ /* only square cursors supported */
+ if (src_w != src_h)
+ return -EINVAL;
+
+ switch (crtc_w) {
+ case 32:
+ value |= CURSOR_SIZE_32x32;
+ break;
+
+ case 64:
+ value |= CURSOR_SIZE_64x64;
+ break;
+
+ case 128:
+ value |= CURSOR_SIZE_128x128;
+ break;
+
+ case 256:
+ value |= CURSOR_SIZE_256x256;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ value |= (bo->paddr >> 10) & 0x3fffff;
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ value = (bo->paddr >> 32) & 0x3;
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
+#endif
+
+ /* enable cursor and set blend mode */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= CURSOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
+ value &= ~CURSOR_DST_BLEND_MASK;
+ value &= ~CURSOR_SRC_BLEND_MASK;
+ value |= CURSOR_MODE_NORMAL;
+ value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
+ value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
+ value |= CURSOR_ALPHA;
+ tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
+
+ /* position the cursor */
+ value = (crtc_y & 0x3fff) << 16 | (crtc_x & 0x3fff);
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
+
+ /* apply changes */
+ tegra_dc_cursor_commit(dc);
+ tegra_dc_commit(dc);
+
+ return 0;
+}
+
+static int tegra_cursor_plane_disable(struct drm_plane *plane)
+{
+ struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+ u32 value;
+
+ if (!plane->crtc)
+ return 0;
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~CURSOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_cursor_commit(dc);
+ tegra_dc_commit(dc);
return 0;
}
-static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x,
- int crtc_y, unsigned int crtc_w,
- unsigned int crtc_h, uint32_t src_x,
- uint32_t src_y, uint32_t src_w, uint32_t src_h)
+static const struct drm_plane_funcs tegra_cursor_plane_funcs = {
+ .update_plane = tegra_cursor_plane_update,
+ .disable_plane = tegra_cursor_plane_disable,
+ .destroy = tegra_plane_destroy,
+};
+
+static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
+ formats = tegra_cursor_plane_formats;
+
+ err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_cursor_plane_funcs, formats,
+ num_formats, DRM_PLANE_TYPE_CURSOR);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ return &plane->base;
+}
+
+static int tegra_overlay_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -359,44 +628,19 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
return tegra_dc_setup_window(dc, p->index, &window);
}
-static int tegra_plane_disable(struct drm_plane *plane)
+static void tegra_overlay_plane_destroy(struct drm_plane *plane)
{
- struct tegra_dc *dc = to_tegra_dc(plane->crtc);
- struct tegra_plane *p = to_tegra_plane(plane);
- unsigned long value;
-
- if (!plane->crtc)
- return 0;
-
- value = WINDOW_A_SELECT << p->index;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
- value &= ~WIN_ENABLE;
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
-
- tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
-
- return 0;
-}
-
-static void tegra_plane_destroy(struct drm_plane *plane)
-{
- struct tegra_plane *p = to_tegra_plane(plane);
-
- tegra_plane_disable(plane);
- drm_plane_cleanup(plane);
- kfree(p);
+ tegra_window_plane_disable(plane);
+ tegra_plane_destroy(plane);
}
-static const struct drm_plane_funcs tegra_plane_funcs = {
- .update_plane = tegra_plane_update,
- .disable_plane = tegra_plane_disable,
- .destroy = tegra_plane_destroy,
+static const struct drm_plane_funcs tegra_overlay_plane_funcs = {
+ .update_plane = tegra_overlay_plane_update,
+ .disable_plane = tegra_window_plane_disable,
+ .destroy = tegra_overlay_plane_destroy,
};
-static const uint32_t plane_formats[] = {
+static const uint32_t tegra_overlay_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB565,
@@ -406,27 +650,44 @@ static const uint32_t plane_formats[] = {
DRM_FORMAT_YUV422,
};
-static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc,
+ unsigned int index)
{
- unsigned int i;
- int err = 0;
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ const u32 *formats;
+ int err;
- for (i = 0; i < 2; i++) {
- struct tegra_plane *plane;
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
- plane = kzalloc(sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return -ENOMEM;
+ plane->index = index;
- plane->index = 1 + i;
+ num_formats = ARRAY_SIZE(tegra_overlay_plane_formats);
+ formats = tegra_overlay_plane_formats;
- err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
- &tegra_plane_funcs, plane_formats,
- ARRAY_SIZE(plane_formats), false);
- if (err < 0) {
- kfree(plane);
- return err;
- }
+ err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
+ &tegra_overlay_plane_funcs, formats,
+ num_formats, DRM_PLANE_TYPE_OVERLAY);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ return &plane->base;
+}
+
+static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+{
+ struct drm_plane *plane;
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ plane = tegra_dc_overlay_plane_create(drm, dc, 1 + i);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
}
return 0;
@@ -513,10 +774,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
- value = GENERAL_UPDATE | WIN_A_UPDATE;
- tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
-
value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+ tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
return 0;
@@ -548,109 +807,6 @@ void tegra_dc_disable_vblank(struct tegra_dc *dc)
spin_unlock_irqrestore(&dc->lock, flags);
}
-static int tegra_dc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file,
- uint32_t handle, uint32_t width,
- uint32_t height, int32_t hot_x, int32_t hot_y)
-{
- unsigned long value = CURSOR_CLIP_DISPLAY;
- struct tegra_dc *dc = to_tegra_dc(crtc);
- struct drm_gem_object *gem;
- struct tegra_bo *bo = NULL;
-
- if (!dc->soc->supports_cursor)
- return -ENXIO;
-
- if (width != height)
- return -EINVAL;
-
- switch (width) {
- case 32:
- value |= CURSOR_SIZE_32x32;
- break;
-
- case 64:
- value |= CURSOR_SIZE_64x64;
- break;
-
- case 128:
- value |= CURSOR_SIZE_128x128;
-
- case 256:
- value |= CURSOR_SIZE_256x256;
- break;
-
- default:
- return -EINVAL;
- }
-
- if (handle) {
- gem = drm_gem_object_lookup(crtc->dev, file, handle);
- if (!gem)
- return -ENOENT;
-
- bo = to_tegra_bo(gem);
- }
-
- if (bo) {
- unsigned long addr = (bo->paddr & 0xfffffc00) >> 10;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- unsigned long high = (bo->paddr & 0xfffffffc) >> 32;
-#endif
-
- tegra_dc_writel(dc, value | addr, DC_DISP_CURSOR_START_ADDR);
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- tegra_dc_writel(dc, high, DC_DISP_CURSOR_START_ADDR_HI);
-#endif
-
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= CURSOR_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
- value &= ~CURSOR_DST_BLEND_MASK;
- value &= ~CURSOR_SRC_BLEND_MASK;
- value |= CURSOR_MODE_NORMAL;
- value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
- value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
- value |= CURSOR_ALPHA;
- tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
- } else {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~CURSOR_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
- }
-
- tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- return 0;
-}
-
-static int tegra_dc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long value;
-
- if (!dc->soc->supports_cursor)
- return -ENXIO;
-
- value = ((y & 0x3fff) << 16) | (x & 0x3fff);
- tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
-
- tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- /* XXX: only required on generations earlier than Tegra124? */
- tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
- return 0;
-}
-
static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
{
struct drm_device *drm = dc->base.dev;
@@ -727,8 +883,6 @@ static void tegra_dc_destroy(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs tegra_crtc_funcs = {
- .cursor_set2 = tegra_dc_cursor_set2,
- .cursor_move = tegra_dc_cursor_move,
.page_flip = tegra_dc_page_flip,
.set_config = drm_crtc_helper_set_config,
.destroy = tegra_dc_destroy,
@@ -736,12 +890,13 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
static void tegra_crtc_disable(struct drm_crtc *crtc)
{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
struct drm_plane *plane;
drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
if (plane->crtc == crtc) {
- tegra_plane_disable(plane);
+ tegra_window_plane_disable(plane);
plane->crtc = NULL;
if (plane->fb) {
@@ -752,6 +907,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
}
drm_crtc_vblank_off(crtc);
+ tegra_dc_commit(dc);
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -934,15 +1090,9 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
static void tegra_crtc_commit(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long value;
-
- value = GENERAL_UPDATE | WIN_A_UPDATE;
- tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
-
- value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
- tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
drm_crtc_vblank_on(crtc);
+ tegra_dc_commit(dc);
}
static void tegra_crtc_load_lut(struct drm_crtc *crtc)
@@ -996,7 +1146,7 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
struct tegra_dc *dc = node->info_ent->data;
#define DUMP_REG(name) \
- seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \
+ seq_printf(s, "%-40s %#05x %08x\n", #name, name, \
tegra_dc_readl(dc, name))
DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
@@ -1284,9 +1434,40 @@ static int tegra_dc_init(struct host1x_client *client)
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dc *dc = host1x_client_to_dc(client);
struct tegra_drm *tegra = drm->dev_private;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
int err;
- drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+ if (tegra->domain) {
+ err = iommu_attach_device(tegra->domain, dc->dev);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to attach to domain: %d\n",
+ err);
+ return err;
+ }
+
+ dc->domain = tegra->domain;
+ }
+
+ primary = tegra_dc_primary_plane_create(drm, dc);
+ if (IS_ERR(primary)) {
+ err = PTR_ERR(primary);
+ goto cleanup;
+ }
+
+ if (dc->soc->supports_cursor) {
+ cursor = tegra_dc_cursor_plane_create(drm, dc);
+ if (IS_ERR(cursor)) {
+ err = PTR_ERR(cursor);
+ goto cleanup;
+ }
+ }
+
+ err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
+ &tegra_crtc_funcs);
+ if (err < 0)
+ goto cleanup;
+
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
@@ -1300,12 +1481,12 @@ static int tegra_dc_init(struct host1x_client *client)
err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
- return err;
+ goto cleanup;
}
err = tegra_dc_add_planes(drm, dc);
if (err < 0)
- return err;
+ goto cleanup;
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_dc_debugfs_init(dc, drm->primary);
@@ -1318,10 +1499,24 @@ static int tegra_dc_init(struct host1x_client *client)
if (err < 0) {
dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
err);
- return err;
+ goto cleanup;
}
return 0;
+
+cleanup:
+ if (cursor)
+ drm_plane_cleanup(cursor);
+
+ if (primary)
+ drm_plane_cleanup(primary);
+
+ if (tegra->domain) {
+ iommu_detach_device(tegra->domain, dc->dev);
+ dc->domain = NULL;
+ }
+
+ return err;
}
static int tegra_dc_exit(struct host1x_client *client)
@@ -1343,6 +1538,11 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
+ if (dc->domain) {
+ iommu_detach_device(dc->domain, dc->dev);
+ dc->domain = NULL;
+ }
+
return 0;
}
@@ -1356,6 +1556,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_cursor = false,
.supports_block_linear = false,
.pitch_align = 8,
+ .has_powergate = false,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -1363,6 +1564,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_cursor = false,
.supports_block_linear = false,
.pitch_align = 8,
+ .has_powergate = false,
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -1370,6 +1572,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.supports_cursor = false,
.supports_block_linear = false,
.pitch_align = 64,
+ .has_powergate = true,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -1377,6 +1580,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_cursor = true,
.supports_block_linear = true,
.pitch_align = 64,
+ .has_powergate = true,
};
static const struct of_device_id tegra_dc_of_match[] = {
@@ -1384,6 +1588,9 @@ static const struct of_device_id tegra_dc_of_match[] = {
.compatible = "nvidia,tegra124-dc",
.data = &tegra124_dc_soc_info,
}, {
+ .compatible = "nvidia,tegra114-dc",
+ .data = &tegra114_dc_soc_info,
+ }, {
.compatible = "nvidia,tegra30-dc",
.data = &tegra30_dc_soc_info,
}, {
@@ -1466,9 +1673,34 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->rst);
}
- err = clk_prepare_enable(dc->clk);
- if (err < 0)
- return err;
+ if (dc->soc->has_powergate) {
+ if (dc->pipe == 0)
+ dc->powergate = TEGRA_POWERGATE_DIS;
+ else
+ dc->powergate = TEGRA_POWERGATE_DISB;
+
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to power partition: %d\n",
+ err);
+ return err;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable clock: %d\n",
+ err);
+ return err;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to deassert reset: %d\n",
+ err);
+ return err;
+ }
+ }
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dc->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -1522,6 +1754,10 @@ static int tegra_dc_remove(struct platform_device *pdev)
}
reset_control_assert(dc->rst);
+
+ if (dc->soc->has_powergate)
+ tegra_powergate_power_off(dc->powergate);
+
clk_disable_unprepare(dc->clk);
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 59736bb810cd..e549afeece1f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -8,6 +8,7 @@
*/
#include <linux/host1x.h>
+#include <linux/iommu.h>
#include "drm.h"
#include "gem.h"
@@ -33,6 +34,17 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (!tegra)
return -ENOMEM;
+ if (iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (IS_ERR(tegra->domain)) {
+ err = PTR_ERR(tegra->domain);
+ goto free;
+ }
+
+ DRM_DEBUG("IOMMU context initialized\n");
+ drm_mm_init(&tegra->mm, 0, SZ_2G);
+ }
+
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
drm->dev_private = tegra;
@@ -42,13 +54,13 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
err = tegra_drm_fb_prepare(drm);
if (err < 0)
- return err;
+ goto config;
drm_kms_helper_poll_init(drm);
err = host1x_device_init(device);
if (err < 0)
- return err;
+ goto fbdev;
/*
* We don't use the drm_irq_install() helpers provided by the DRM
@@ -59,18 +71,37 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
- return err;
+ goto device;
err = tegra_drm_fb_init(drm);
if (err < 0)
- return err;
+ goto vblank;
return 0;
+
+vblank:
+ drm_vblank_cleanup(drm);
+device:
+ host1x_device_exit(device);
+fbdev:
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_free(drm);
+config:
+ drm_mode_config_cleanup(drm);
+
+ if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ drm_mm_takedown(&tegra->mm);
+ }
+free:
+ kfree(tegra);
+ return err;
}
static int tegra_drm_unload(struct drm_device *drm)
{
struct host1x_device *device = to_host1x_device(drm->dev);
+ struct tegra_drm *tegra = drm->dev_private;
int err;
drm_kms_helper_poll_fini(drm);
@@ -82,6 +113,13 @@ static int tegra_drm_unload(struct drm_device *drm)
if (err < 0)
return err;
+ if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ drm_mm_takedown(&tegra->mm);
+ }
+
+ kfree(tegra);
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index e89c70fa82d5..3a3b2e7b5b3f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -39,6 +39,9 @@ struct tegra_fbdev {
struct tegra_drm {
struct drm_device *drm;
+ struct iommu_domain *domain;
+ struct drm_mm mm;
+
struct mutex clients_lock;
struct list_head clients;
@@ -101,6 +104,7 @@ struct tegra_dc {
spinlock_t lock;
struct drm_crtc base;
+ int powergate;
int pipe;
struct clk *clk;
@@ -120,6 +124,8 @@ struct tegra_dc {
struct drm_pending_vblank_event *event;
const struct tegra_dc_soc_info *soc;
+
+ struct iommu_domain *domain;
};
static inline struct tegra_dc *
@@ -133,16 +139,15 @@ static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
}
-static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
- unsigned long reg)
+static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
+ unsigned long offset)
{
- writel(value, dc->regs + (reg << 2));
+ writel(value, dc->regs + (offset << 2));
}
-static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
- unsigned long reg)
+static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned long offset)
{
- return readl(dc->regs + (reg << 2));
+ return readl(dc->regs + (offset << 2));
}
struct tegra_dc_window {
@@ -287,6 +292,7 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling);
int tegra_drm_fb_prepare(struct drm_device *drm);
+void tegra_drm_fb_free(struct drm_device *drm);
int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index f7874458926a..33f67fd601c6 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -11,6 +11,7 @@
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -26,9 +27,6 @@
#include "dsi.h"
#include "mipi-phy.h"
-#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
-#define DSI_HOST_FIFO_DEPTH 64
-
struct tegra_dsi {
struct host1x_client client;
struct tegra_output output;
@@ -54,6 +52,13 @@ struct tegra_dsi {
struct regulator *vdd;
bool enabled;
+
+ unsigned int video_fifo_depth;
+ unsigned int host_fifo_depth;
+
+ /* for ganged-mode support */
+ struct tegra_dsi *master;
+ struct tegra_dsi *slave;
};
static inline struct tegra_dsi *
@@ -318,6 +323,21 @@ static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = {
[11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
};
+static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = {
+ [ 0] = 0,
+ [ 1] = 0,
+ [ 2] = 0,
+ [ 3] = 0,
+ [ 4] = 0,
+ [ 5] = 0,
+ [ 6] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(3) | PKT_LP,
+ [ 7] = 0,
+ [ 8] = 0,
+ [ 9] = 0,
+ [10] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(5) | PKT_LP,
+ [11] = 0,
+};
+
static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
{
struct mipi_dphy_timing timing;
@@ -329,7 +349,7 @@ static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
if (rate < 0)
return rate;
- period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2);
+ period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate * 2);
err = mipi_dphy_timing_get_default(&timing, period);
if (err < 0)
@@ -369,6 +389,9 @@ static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
DSI_TIMING_FIELD(timing.tago, period, 1);
tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
+ if (dsi->slave)
+ return tegra_dsi_set_phy_timing(dsi->slave);
+
return 0;
}
@@ -426,26 +449,59 @@ static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format,
return 0;
}
-static int tegra_output_dsi_enable(struct tegra_output *output)
+static void tegra_dsi_ganged_enable(struct tegra_dsi *dsi, unsigned int start,
+ unsigned int size)
+{
+ u32 value;
+
+ tegra_dsi_writel(dsi, start, DSI_GANGED_MODE_START);
+ tegra_dsi_writel(dsi, size << 16 | size, DSI_GANGED_MODE_SIZE);
+
+ value = DSI_GANGED_MODE_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_GANGED_MODE_CONTROL);
+}
+
+static void tegra_dsi_enable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_enable(dsi->slave);
+}
+
+static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi)
+{
+ if (dsi->master)
+ return dsi->master->lanes + dsi->lanes;
+
+ if (dsi->slave)
+ return dsi->lanes + dsi->slave->lanes;
+
+ return dsi->lanes;
+}
+
+static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
+ const struct drm_display_mode *mode)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
- struct drm_display_mode *mode = &dc->base.mode;
unsigned int hact, hsw, hbp, hfp, i, mul, div;
- struct tegra_dsi *dsi = to_dsi(output);
enum tegra_dsi_format format;
- unsigned long value;
const u32 *pkt_seq;
+ u32 value;
int err;
- if (dsi->enabled)
- return 0;
-
if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
pkt_seq = pkt_seq_video_non_burst_sync_pulses;
- } else {
+ } else if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
DRM_DEBUG_KMS("Non-burst video mode with sync events\n");
pkt_seq = pkt_seq_video_non_burst_sync_events;
+ } else {
+ DRM_DEBUG_KMS("Command mode\n");
+ pkt_seq = pkt_seq_command_mode;
}
err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
@@ -456,61 +512,136 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
if (err < 0)
return err;
- err = clk_enable(dsi->clk);
- if (err < 0)
- return err;
-
- reset_control_deassert(dsi->rst);
-
value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) |
DSI_CONTROL_LANES(dsi->lanes - 1) |
- DSI_CONTROL_SOURCE(dc->pipe);
+ DSI_CONTROL_SOURCE(pipe);
tegra_dsi_writel(dsi, value, DSI_CONTROL);
- tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD);
+ tegra_dsi_writel(dsi, dsi->video_fifo_depth, DSI_MAX_THRESHOLD);
- value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS |
- DSI_HOST_CONTROL_ECC;
+ value = DSI_HOST_CONTROL_HS;
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
value = tegra_dsi_readl(dsi, DSI_CONTROL);
+
if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
value |= DSI_CONTROL_HS_CLK_CTRL;
+
value &= ~DSI_CONTROL_TX_TRIG(3);
- value &= ~DSI_CONTROL_DCS_ENABLE;
+
+ /* enable DCS commands for command mode */
+ if (dsi->flags & MIPI_DSI_MODE_VIDEO)
+ value &= ~DSI_CONTROL_DCS_ENABLE;
+ else
+ value |= DSI_CONTROL_DCS_ENABLE;
+
value |= DSI_CONTROL_VIDEO_ENABLE;
value &= ~DSI_CONTROL_HOST_ENABLE;
tegra_dsi_writel(dsi, value, DSI_CONTROL);
- err = tegra_dsi_set_phy_timing(dsi);
- if (err < 0)
- return err;
-
for (i = 0; i < NUM_PKT_SEQ; i++)
tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
- /* horizontal active pixels */
- hact = mode->hdisplay * mul / div;
+ if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
+ /* horizontal active pixels */
+ hact = mode->hdisplay * mul / div;
- /* horizontal sync width */
- hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
- hsw -= 10;
+ /* horizontal sync width */
+ hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+ hsw -= 10;
- /* horizontal back porch */
- hbp = (mode->htotal - mode->hsync_end) * mul / div;
- hbp -= 14;
+ /* horizontal back porch */
+ hbp = (mode->htotal - mode->hsync_end) * mul / div;
+ hbp -= 14;
- /* horizontal front porch */
- hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
- hfp -= 8;
+ /* horizontal front porch */
+ hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+ hfp -= 8;
- tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
- tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
- tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
- tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+ tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+ tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+ tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+ tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
- /* set SOL delay */
- tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+ /* set SOL delay (for non-burst mode only) */
+ tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+ /* TODO: implement ganged mode */
+ } else {
+ u16 bytes;
+
+ if (dsi->master || dsi->slave) {
+ /*
+ * For ganged mode, assume symmetric left-right mode.
+ */
+ bytes = 1 + (mode->hdisplay / 2) * mul / div;
+ } else {
+ /* 1 byte (DCS command) + pixel data */
+ bytes = 1 + mode->hdisplay * mul / div;
+ }
+
+ tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1);
+ tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_2_3);
+ tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_4_5);
+ tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_6_7);
+
+ value = MIPI_DCS_WRITE_MEMORY_START << 8 |
+ MIPI_DCS_WRITE_MEMORY_CONTINUE;
+ tegra_dsi_writel(dsi, value, DSI_DCS_CMDS);
+
+ /* set SOL delay */
+ if (dsi->master || dsi->slave) {
+ unsigned int lanes = tegra_dsi_get_lanes(dsi);
+ unsigned long delay, bclk, bclk_ganged;
+
+ /* SOL to valid, valid to FIFO and FIFO write delay */
+ delay = 4 + 4 + 2;
+ delay = DIV_ROUND_UP(delay * mul, div * lanes);
+ /* FIFO read delay */
+ delay = delay + 6;
+
+ bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
+ bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
+ value = bclk - bclk_ganged + delay + 20;
+ } else {
+ /* TODO: revisit for non-ganged mode */
+ value = 8 * mul / div;
+ }
+
+ tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
+ }
+
+ if (dsi->slave) {
+ err = tegra_dsi_configure(dsi->slave, pipe, mode);
+ if (err < 0)
+ return err;
+
+ /*
+ * TODO: Support modes other than symmetrical left-right
+ * split.
+ */
+ tegra_dsi_ganged_enable(dsi, 0, mode->hdisplay / 2);
+ tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2,
+ mode->hdisplay / 2);
+ }
+
+ return 0;
+}
+
+static int tegra_output_dsi_enable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ const struct drm_display_mode *mode = &dc->base.mode;
+ struct tegra_dsi *dsi = to_dsi(output);
+ u32 value;
+ int err;
+
+ if (dsi->enabled)
+ return 0;
+
+ err = tegra_dsi_configure(dsi, dc->pipe, mode);
+ if (err < 0)
+ return err;
/* enable display controller */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
@@ -531,28 +662,79 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
/* enable DSI controller */
- value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
- value |= DSI_POWER_CONTROL_ENABLE;
- tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+ tegra_dsi_enable(dsi);
dsi->enabled = true;
return 0;
}
+static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout)
+{
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_dsi_readl(dsi, DSI_STATUS);
+ if (value & DSI_STATUS_IDLE)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void tegra_dsi_video_disable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = tegra_dsi_readl(dsi, DSI_CONTROL);
+ value &= ~DSI_CONTROL_VIDEO_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_video_disable(dsi->slave);
+}
+
+static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
+{
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
+}
+
+static void tegra_dsi_disable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ if (dsi->slave) {
+ tegra_dsi_ganged_disable(dsi->slave);
+ tegra_dsi_ganged_disable(dsi);
+ }
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value &= ~DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_disable(dsi->slave);
+
+ usleep_range(5000, 10000);
+}
+
static int tegra_output_dsi_disable(struct tegra_output *output)
{
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct tegra_dsi *dsi = to_dsi(output);
unsigned long value;
+ int err;
if (!dsi->enabled)
return 0;
- /* disable DSI controller */
- value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
- value &= ~DSI_POWER_CONTROL_ENABLE;
- tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+ tegra_dsi_video_disable(dsi);
/*
* The following accesses registers of the display controller, so make
@@ -576,39 +758,68 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
}
- clk_disable(dsi->clk);
+ err = tegra_dsi_wait_idle(dsi, 100);
+ if (err < 0)
+ dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
+
+ tegra_dsi_disable(dsi);
dsi->enabled = false;
return 0;
}
+static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
+ unsigned int vrefresh)
+{
+ unsigned int timeout;
+ u32 value;
+
+ /* one frame high-speed transmission timeout */
+ timeout = (bclk / vrefresh) / 512;
+ value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+ /* 2 ms peripheral timeout for panel */
+ timeout = 2 * bclk / 512 * 1000;
+ value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+ value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+ tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+ if (dsi->slave)
+ tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh);
+}
+
static int tegra_output_dsi_setup_clock(struct tegra_output *output,
struct clk *clk, unsigned long pclk,
unsigned int *divp)
{
struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
struct drm_display_mode *mode = &dc->base.mode;
- unsigned int timeout, mul, div, vrefresh;
struct tegra_dsi *dsi = to_dsi(output);
- unsigned long bclk, plld, value;
+ unsigned int mul, div, vrefresh, lanes;
+ unsigned long bclk, plld;
int err;
+ lanes = tegra_dsi_get_lanes(dsi);
+
err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
if (err < 0)
return err;
- DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, dsi->lanes);
+ DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, lanes);
vrefresh = drm_mode_vrefresh(mode);
DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh);
/* compute byte clock */
- bclk = (pclk * mul) / (div * dsi->lanes);
+ bclk = (pclk * mul) / (div * lanes);
/*
* Compute bit clock and round up to the next MHz.
*/
- plld = DIV_ROUND_UP(bclk * 8, 1000000) * 1000000;
+ plld = DIV_ROUND_UP(bclk * 8, USEC_PER_SEC) * USEC_PER_SEC;
/*
* We divide the frequency by two here, but we make up for that by
@@ -640,25 +851,17 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output,
* not working properly otherwise. Perhaps the PLLs cannot generate
* frequencies sufficiently high.
*/
- *divp = ((8 * mul) / (div * dsi->lanes)) - 2;
+ *divp = ((8 * mul) / (div * lanes)) - 2;
/*
* XXX: Move the below somewhere else so that we don't need to have
* access to the vrefresh in this function?
*/
+ tegra_dsi_set_timeout(dsi, bclk, vrefresh);
- /* one frame high-speed transmission timeout */
- timeout = (bclk / vrefresh) / 512;
- value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
- tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
-
- /* 2 ms peripheral timeout for panel */
- timeout = 2 * bclk / 512 * 1000;
- value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
- tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
-
- value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
- tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+ err = tegra_dsi_set_phy_timing(dsi);
+ if (err < 0)
+ return err;
return 0;
}
@@ -695,7 +898,7 @@ static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
{
- unsigned long value;
+ u32 value;
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
@@ -720,14 +923,17 @@ static int tegra_dsi_init(struct host1x_client *client)
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
- dsi->output.type = TEGRA_OUTPUT_DSI;
- dsi->output.dev = client->dev;
- dsi->output.ops = &dsi_ops;
-
- err = tegra_output_init(drm, &dsi->output);
- if (err < 0) {
- dev_err(client->dev, "output setup failed: %d\n", err);
- return err;
+ /* Gangsters must not register their own outputs. */
+ if (!dsi->master) {
+ dsi->output.type = TEGRA_OUTPUT_DSI;
+ dsi->output.dev = client->dev;
+ dsi->output.ops = &dsi_ops;
+
+ err = tegra_output_init(drm, &dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output setup failed: %d\n", err);
+ return err;
+ }
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
@@ -736,12 +942,6 @@ static int tegra_dsi_init(struct host1x_client *client)
dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
}
- err = tegra_dsi_pad_calibrate(dsi);
- if (err < 0) {
- dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
- return err;
- }
-
return 0;
}
@@ -756,16 +956,20 @@ static int tegra_dsi_exit(struct host1x_client *client)
dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
}
- err = tegra_output_disable(&dsi->output);
- if (err < 0) {
- dev_err(client->dev, "output failed to disable: %d\n", err);
- return err;
- }
-
- err = tegra_output_exit(&dsi->output);
- if (err < 0) {
- dev_err(client->dev, "output cleanup failed: %d\n", err);
- return err;
+ if (!dsi->master) {
+ err = tegra_output_disable(&dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output failed to disable: %d\n",
+ err);
+ return err;
+ }
+
+ err = tegra_output_exit(&dsi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output cleanup failed: %d\n",
+ err);
+ return err;
+ }
}
return 0;
@@ -792,20 +996,324 @@ static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
return 0;
}
+static const char * const error_report[16] = {
+ "SoT Error",
+ "SoT Sync Error",
+ "EoT Sync Error",
+ "Escape Mode Entry Command Error",
+ "Low-Power Transmit Sync Error",
+ "Peripheral Timeout Error",
+ "False Control Error",
+ "Contention Detected",
+ "ECC Error, single-bit",
+ "ECC Error, multi-bit",
+ "Checksum Error",
+ "DSI Data Type Not Recognized",
+ "DSI VC ID Invalid",
+ "Invalid Transmission Length",
+ "Reserved",
+ "DSI Protocol Violation",
+};
+
+static ssize_t tegra_dsi_read_response(struct tegra_dsi *dsi,
+ const struct mipi_dsi_msg *msg,
+ size_t count)
+{
+ u8 *rx = msg->rx_buf;
+ unsigned int i, j, k;
+ size_t size = 0;
+ u16 errors;
+ u32 value;
+
+ /* read and parse packet header */
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+
+ switch (value & 0x3f) {
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ errors = (value >> 8) & 0xffff;
+ dev_dbg(dsi->dev, "Acknowledge and error report: %04x\n",
+ errors);
+ for (i = 0; i < ARRAY_SIZE(error_report); i++)
+ if (errors & BIT(i))
+ dev_dbg(dsi->dev, " %2u: %s\n", i,
+ error_report[i]);
+ break;
+
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ rx[0] = (value >> 8) & 0xff;
+ size = 1;
+ break;
+
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ rx[0] = (value >> 8) & 0xff;
+ rx[1] = (value >> 16) & 0xff;
+ size = 2;
+ break;
+
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
+ break;
+
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
+ break;
+
+ default:
+ dev_err(dsi->dev, "unhandled response type: %02x\n",
+ value & 0x3f);
+ return -EPROTO;
+ }
+
+ size = min(size, msg->rx_len);
+
+ if (msg->rx_buf && size > 0) {
+ for (i = 0, j = 0; i < count - 1; i++, j += 4) {
+ u8 *rx = msg->rx_buf + j;
+
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+
+ for (k = 0; k < 4 && (j + k) < msg->rx_len; k++)
+ rx[j + k] = (value >> (k << 3)) & 0xff;
+ }
+ }
+
+ return size;
+}
+
+static int tegra_dsi_transmit(struct tegra_dsi *dsi, unsigned long timeout)
+{
+ tegra_dsi_writel(dsi, DSI_TRIGGER_HOST, DSI_TRIGGER);
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ u32 value = tegra_dsi_readl(dsi, DSI_TRIGGER);
+ if ((value & DSI_TRIGGER_HOST) == 0)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ DRM_DEBUG_KMS("timeout waiting for transmission to complete\n");
+ return -ETIMEDOUT;
+}
+
+static int tegra_dsi_wait_for_response(struct tegra_dsi *dsi,
+ unsigned long timeout)
+{
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ u32 value = tegra_dsi_readl(dsi, DSI_STATUS);
+ u8 count = value & 0x1f;
+
+ if (count > 0)
+ return count;
+
+ usleep_range(1000, 2000);
+ }
+
+ DRM_DEBUG_KMS("peripheral returned no data\n");
+ return -ETIMEDOUT;
+}
+
+static void tegra_dsi_writesl(struct tegra_dsi *dsi, unsigned long offset,
+ const void *buffer, size_t size)
+{
+ const u8 *buf = buffer;
+ size_t i, j;
+ u32 value;
+
+ for (j = 0; j < size; j += 4) {
+ value = 0;
+
+ for (i = 0; i < 4 && j + i < size; i++)
+ value |= buf[j + i] << (i << 3);
+
+ tegra_dsi_writel(dsi, value, DSI_WR_DATA);
+ }
+}
+
+static ssize_t tegra_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+ struct mipi_dsi_packet packet;
+ const u8 *header;
+ size_t count;
+ ssize_t err;
+ u32 value;
+
+ err = mipi_dsi_create_packet(&packet, msg);
+ if (err < 0)
+ return err;
+
+ header = packet.header;
+
+ /* maximum FIFO depth is 1920 words */
+ if (packet.size > dsi->video_fifo_depth * 4)
+ return -ENOSPC;
+
+ /* reset underflow/overflow flags */
+ value = tegra_dsi_readl(dsi, DSI_STATUS);
+ if (value & (DSI_STATUS_UNDERFLOW | DSI_STATUS_OVERFLOW)) {
+ value = DSI_HOST_CONTROL_FIFO_RESET;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+ usleep_range(10, 20);
+ }
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ usleep_range(5000, 10000);
+
+ value = DSI_HOST_CONTROL_CRC_RESET | DSI_HOST_CONTROL_TX_TRIG_HOST |
+ DSI_HOST_CONTROL_CS | DSI_HOST_CONTROL_ECC;
+
+ if ((msg->flags & MIPI_DSI_MSG_USE_LPM) == 0)
+ value |= DSI_HOST_CONTROL_HS;
+
+ /*
+ * The host FIFO has a maximum of 64 words, so larger transmissions
+ * need to use the video FIFO.
+ */
+ if (packet.size > dsi->host_fifo_depth * 4)
+ value |= DSI_HOST_CONTROL_FIFO_SEL;
+
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+ /*
+ * For reads and messages with explicitly requested ACK, generate a
+ * BTA sequence after the transmission of the packet.
+ */
+ if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
+ (msg->rx_buf && msg->rx_len > 0)) {
+ value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+ value |= DSI_HOST_CONTROL_PKT_BTA;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+ }
+
+ value = DSI_CONTROL_LANES(0) | DSI_CONTROL_HOST_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ /* write packet header, ECC is generated by hardware */
+ value = header[2] << 16 | header[1] << 8 | header[0];
+ tegra_dsi_writel(dsi, value, DSI_WR_DATA);
+
+ /* write payload (if any) */
+ if (packet.payload_length > 0)
+ tegra_dsi_writesl(dsi, DSI_WR_DATA, packet.payload,
+ packet.payload_length);
+
+ err = tegra_dsi_transmit(dsi, 250);
+ if (err < 0)
+ return err;
+
+ if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
+ (msg->rx_buf && msg->rx_len > 0)) {
+ err = tegra_dsi_wait_for_response(dsi, 250);
+ if (err < 0)
+ return err;
+
+ count = err;
+
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+ switch (value) {
+ case 0x84:
+ /*
+ dev_dbg(dsi->dev, "ACK\n");
+ */
+ break;
+
+ case 0x87:
+ /*
+ dev_dbg(dsi->dev, "ESCAPE\n");
+ */
+ break;
+
+ default:
+ dev_err(dsi->dev, "unknown status: %08x\n", value);
+ break;
+ }
+
+ if (count > 1) {
+ err = tegra_dsi_read_response(dsi, msg, count);
+ if (err < 0)
+ dev_err(dsi->dev,
+ "failed to parse response: %zd\n",
+ err);
+ else {
+ /*
+ * For read commands, return the number of
+ * bytes returned by the peripheral.
+ */
+ count = err;
+ }
+ }
+ } else {
+ /*
+ * For write commands, we have transmitted the 4-byte header
+ * plus the variable-length payload.
+ */
+ count = 4 + packet.payload_length;
+ }
+
+ return count;
+}
+
+static int tegra_dsi_ganged_setup(struct tegra_dsi *dsi)
+{
+ struct clk *parent;
+ int err;
+
+ /* make sure both DSI controllers share the same PLL */
+ parent = clk_get_parent(dsi->slave->clk);
+ if (!parent)
+ return -EINVAL;
+
+ err = clk_set_parent(parent, dsi->clk_parent);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct tegra_dsi *dsi = host_to_tegra(host);
- struct tegra_output *output = &dsi->output;
dsi->flags = device->mode_flags;
dsi->format = device->format;
dsi->lanes = device->lanes;
- output->panel = of_drm_find_panel(device->dev.of_node);
- if (output->panel) {
- if (output->connector.dev)
+ if (dsi->slave) {
+ int err;
+
+ dev_dbg(dsi->dev, "attaching dual-channel device %s\n",
+ dev_name(&device->dev));
+
+ err = tegra_dsi_ganged_setup(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to set up ganged mode: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /*
+ * Slaves don't have a panel associated with them, so they provide
+ * merely the second channel.
+ */
+ if (!dsi->master) {
+ struct tegra_output *output = &dsi->output;
+
+ output->panel = of_drm_find_panel(device->dev.of_node);
+ if (output->panel && output->connector.dev) {
+ drm_panel_attach(output->panel, &output->connector);
drm_helper_hpd_irq_event(output->connector.dev);
+ }
}
return 0;
@@ -818,10 +1326,10 @@ static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
struct tegra_output *output = &dsi->output;
if (output->panel && &device->dev == output->panel->dev) {
+ output->panel = NULL;
+
if (output->connector.dev)
drm_helper_hpd_irq_event(output->connector.dev);
-
- output->panel = NULL;
}
return 0;
@@ -830,8 +1338,29 @@ static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
.attach = tegra_dsi_host_attach,
.detach = tegra_dsi_host_detach,
+ .transfer = tegra_dsi_host_transfer,
};
+static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+{
+ struct device_node *np;
+
+ np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
+ if (np) {
+ struct platform_device *gangster = of_find_device_by_node(np);
+
+ dsi->slave = platform_get_drvdata(gangster);
+ of_node_put(np);
+
+ if (!dsi->slave)
+ return -EPROBE_DEFER;
+
+ dsi->slave->master = dsi;
+ }
+
+ return 0;
+}
+
static int tegra_dsi_probe(struct platform_device *pdev)
{
struct tegra_dsi *dsi;
@@ -843,11 +1372,19 @@ static int tegra_dsi_probe(struct platform_device *pdev)
return -ENOMEM;
dsi->output.dev = dsi->dev = &pdev->dev;
+ dsi->video_fifo_depth = 1920;
+ dsi->host_fifo_depth = 64;
+
+ err = tegra_dsi_ganged_probe(dsi);
+ if (err < 0)
+ return err;
err = tegra_output_probe(&dsi->output);
if (err < 0)
return err;
+ dsi->output.connector.polled = DRM_CONNECTOR_POLL_HPD;
+
/*
* Assume these values by default. When a DSI peripheral driver
* attaches to the DSI host, the parameters will be taken from
@@ -861,68 +1398,83 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->rst))
return PTR_ERR(dsi->rst);
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to bring DSI out of reset: %d\n",
+ err);
+ return err;
+ }
+
dsi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dsi->clk)) {
dev_err(&pdev->dev, "cannot get DSI clock\n");
- return PTR_ERR(dsi->clk);
+ err = PTR_ERR(dsi->clk);
+ goto reset;
}
err = clk_prepare_enable(dsi->clk);
if (err < 0) {
dev_err(&pdev->dev, "cannot enable DSI clock\n");
- return err;
+ goto reset;
}
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
if (IS_ERR(dsi->clk_lp)) {
dev_err(&pdev->dev, "cannot get low-power clock\n");
- return PTR_ERR(dsi->clk_lp);
+ err = PTR_ERR(dsi->clk_lp);
+ goto disable_clk;
}
err = clk_prepare_enable(dsi->clk_lp);
if (err < 0) {
dev_err(&pdev->dev, "cannot enable low-power clock\n");
- return err;
+ goto disable_clk;
}
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dsi->clk_parent)) {
dev_err(&pdev->dev, "cannot get parent clock\n");
- return PTR_ERR(dsi->clk_parent);
- }
-
- err = clk_prepare_enable(dsi->clk_parent);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable parent clock\n");
- return err;
+ err = PTR_ERR(dsi->clk_parent);
+ goto disable_clk_lp;
}
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
if (IS_ERR(dsi->vdd)) {
dev_err(&pdev->dev, "cannot get VDD supply\n");
- return PTR_ERR(dsi->vdd);
+ err = PTR_ERR(dsi->vdd);
+ goto disable_clk_lp;
}
err = regulator_enable(dsi->vdd);
if (err < 0) {
dev_err(&pdev->dev, "cannot enable VDD supply\n");
- return err;
+ goto disable_clk_lp;
}
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- return err;
+ goto disable_vdd;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs))
- return PTR_ERR(dsi->regs);
+ if (IS_ERR(dsi->regs)) {
+ err = PTR_ERR(dsi->regs);
+ goto disable_vdd;
+ }
dsi->mipi = tegra_mipi_request(&pdev->dev);
- if (IS_ERR(dsi->mipi))
- return PTR_ERR(dsi->mipi);
+ if (IS_ERR(dsi->mipi)) {
+ err = PTR_ERR(dsi->mipi);
+ goto disable_vdd;
+ }
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+ goto mipi_free;
+ }
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -930,7 +1482,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
err = mipi_dsi_host_register(&dsi->host);
if (err < 0) {
dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
- return err;
+ goto mipi_free;
}
INIT_LIST_HEAD(&dsi->client.list);
@@ -941,12 +1493,26 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto unregister;
}
platform_set_drvdata(pdev, dsi);
return 0;
+
+unregister:
+ mipi_dsi_host_unregister(&dsi->host);
+mipi_free:
+ tegra_mipi_free(dsi->mipi);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+reset:
+ reset_control_assert(dsi->rst);
+ return err;
}
static int tegra_dsi_remove(struct platform_device *pdev)
@@ -965,7 +1531,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
tegra_mipi_free(dsi->mipi);
regulator_disable(dsi->vdd);
- clk_disable_unprepare(dsi->clk_parent);
clk_disable_unprepare(dsi->clk_lp);
clk_disable_unprepare(dsi->clk);
reset_control_assert(dsi->rst);
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index 5ce610d08d77..bad1006a5150 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -21,9 +21,16 @@
#define DSI_INT_STATUS 0x0d
#define DSI_INT_MASK 0x0e
#define DSI_HOST_CONTROL 0x0f
+#define DSI_HOST_CONTROL_FIFO_RESET (1 << 21)
+#define DSI_HOST_CONTROL_CRC_RESET (1 << 20)
+#define DSI_HOST_CONTROL_TX_TRIG_SOL (0 << 12)
+#define DSI_HOST_CONTROL_TX_TRIG_FIFO (1 << 12)
+#define DSI_HOST_CONTROL_TX_TRIG_HOST (2 << 12)
#define DSI_HOST_CONTROL_RAW (1 << 6)
#define DSI_HOST_CONTROL_HS (1 << 5)
-#define DSI_HOST_CONTROL_BTA (1 << 2)
+#define DSI_HOST_CONTROL_FIFO_SEL (1 << 4)
+#define DSI_HOST_CONTROL_IMM_BTA (1 << 3)
+#define DSI_HOST_CONTROL_PKT_BTA (1 << 2)
#define DSI_HOST_CONTROL_CS (1 << 1)
#define DSI_HOST_CONTROL_ECC (1 << 0)
#define DSI_CONTROL 0x10
@@ -39,9 +46,13 @@
#define DSI_SOL_DELAY 0x11
#define DSI_MAX_THRESHOLD 0x12
#define DSI_TRIGGER 0x13
+#define DSI_TRIGGER_HOST (1 << 1)
+#define DSI_TRIGGER_VIDEO (1 << 0)
#define DSI_TX_CRC 0x14
#define DSI_STATUS 0x15
#define DSI_STATUS_IDLE (1 << 10)
+#define DSI_STATUS_UNDERFLOW (1 << 9)
+#define DSI_STATUS_OVERFLOW (1 << 8)
#define DSI_INIT_SEQ_CONTROL 0x1a
#define DSI_INIT_SEQ_DATA_0 0x1b
#define DSI_INIT_SEQ_DATA_1 0x1c
@@ -104,6 +115,7 @@
#define DSI_PAD_CONTROL_3 0x51
#define DSI_PAD_CONTROL_4 0x52
#define DSI_GANGED_MODE_CONTROL 0x53
+#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0)
#define DSI_GANGED_MODE_START 0x54
#define DSI_GANGED_MODE_SIZE 0x55
#define DSI_RAW_DATA_BYTE_COUNT 0x56
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 3513d12d5aa1..e9c715d89261 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -65,8 +65,12 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
for (i = 0; i < fb->num_planes; i++) {
struct tegra_bo *bo = fb->planes[i];
- if (bo)
+ if (bo) {
+ if (bo->pages && bo->vaddr)
+ vunmap(bo->vaddr);
+
drm_gem_object_unreference_unlocked(&bo->gem);
+ }
}
drm_framebuffer_cleanup(framebuffer);
@@ -223,14 +227,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info = framebuffer_alloc(0, drm->dev);
if (!info) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
- tegra_bo_free_object(&bo->gem);
+ drm_gem_object_unreference_unlocked(&bo->gem);
return -ENOMEM;
}
fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
if (IS_ERR(fbdev->fb)) {
- dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
err = PTR_ERR(fbdev->fb);
+ dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
+ err);
+ drm_gem_object_unreference_unlocked(&bo->gem);
goto release;
}
@@ -254,6 +260,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
offset = info->var.xoffset * bytes_per_pixel +
info->var.yoffset * fb->pitches[0];
+ if (bo->pages) {
+ bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!bo->vaddr) {
+ dev_err(drm->dev, "failed to vmap() framebuffer\n");
+ err = -ENOMEM;
+ goto destroy;
+ }
+ }
+
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
@@ -289,6 +305,11 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
return fbdev;
}
+static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+{
+ kfree(fbdev);
+}
+
static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
unsigned int preferred_bpp,
unsigned int num_crtc,
@@ -299,19 +320,21 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
if (err < 0) {
- dev_err(drm->dev, "failed to initialize DRM FB helper\n");
+ dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
+ err);
return err;
}
err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
if (err < 0) {
- dev_err(drm->dev, "failed to add connectors\n");
+ dev_err(drm->dev, "failed to add connectors: %d\n", err);
goto fini;
}
err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
if (err < 0) {
- dev_err(drm->dev, "failed to set initial configuration\n");
+ dev_err(drm->dev, "failed to set initial configuration: %d\n",
+ err);
goto fini;
}
@@ -322,7 +345,7 @@ fini:
return err;
}
-static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
{
struct fb_info *info = fbdev->base.fbdev;
@@ -341,11 +364,11 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
if (fbdev->fb) {
drm_framebuffer_unregister_private(&fbdev->fb->base);
- tegra_fb_destroy(&fbdev->fb->base);
+ drm_framebuffer_remove(&fbdev->fb->base);
}
drm_fb_helper_fini(&fbdev->base);
- kfree(fbdev);
+ tegra_fbdev_free(fbdev);
}
void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
@@ -393,6 +416,15 @@ int tegra_drm_fb_prepare(struct drm_device *drm)
return 0;
}
+void tegra_drm_fb_free(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra_fbdev_free(tegra->fbdev);
+#endif
+}
+
int tegra_drm_fb_init(struct drm_device *drm)
{
#ifdef CONFIG_DRM_TEGRA_FBDEV
@@ -413,6 +445,6 @@ void tegra_drm_fb_exit(struct drm_device *drm)
#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
- tegra_fbdev_free(tegra->fbdev);
+ tegra_fbdev_exit(tegra->fbdev);
#endif
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ce023fa3e8ae..da32086cbeaf 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -14,6 +14,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/iommu.h>
#include <drm/tegra_drm.h>
#include "drm.h"
@@ -91,13 +92,90 @@ static const struct host1x_bo_ops tegra_bo_ops = {
.kunmap = tegra_bo_kunmap,
};
-static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
+/*
+ * A generic iommu_map_sg() function is being reviewed and will hopefully be
+ * merged soon. At that point this function can be dropped in favour of the
+ * one provided by the IOMMU API.
+ */
+static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot)
{
- dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+ struct scatterlist *s;
+ size_t offset = 0;
+ unsigned int i;
+ int err;
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ size_t length = s->offset + s->length;
+
+ err = iommu_map(domain, iova + offset, phys, length, prot);
+ if (err < 0) {
+ iommu_unmap(domain, iova, offset);
+ return err;
+ }
+
+ offset += length;
+ }
+
+ return offset;
}
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
- unsigned long flags)
+static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
+{
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ ssize_t err;
+
+ if (bo->mm)
+ return -EBUSY;
+
+ bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
+ if (!bo->mm)
+ return -ENOMEM;
+
+ err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
+ PAGE_SIZE, 0, 0, 0);
+ if (err < 0) {
+ dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
+ err);
+ goto free;
+ }
+
+ bo->paddr = bo->mm->start;
+
+ err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->sgt->nents, prot);
+ if (err < 0) {
+ dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
+ goto remove;
+ }
+
+ bo->size = err;
+
+ return 0;
+
+remove:
+ drm_mm_remove_node(bo->mm);
+free:
+ kfree(bo->mm);
+ return err;
+}
+
+static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
+{
+ if (!bo->mm)
+ return 0;
+
+ iommu_unmap(tegra->domain, bo->paddr, bo->size);
+ drm_mm_remove_node(bo->mm);
+ kfree(bo->mm);
+
+ return 0;
+}
+
+static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
+ size_t size)
{
struct tegra_bo *bo;
int err;
@@ -109,22 +187,96 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
host1x_bo_init(&bo->base, &tegra_bo_ops);
size = round_up(size, PAGE_SIZE);
- bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
- GFP_KERNEL | __GFP_NOWARN);
- if (!bo->vaddr) {
- dev_err(drm->dev, "failed to allocate buffer with size %u\n",
- size);
- err = -ENOMEM;
- goto err_dma;
- }
-
err = drm_gem_object_init(drm, &bo->gem, size);
- if (err)
- goto err_init;
+ if (err < 0)
+ goto free;
err = drm_gem_create_mmap_offset(&bo->gem);
- if (err)
- goto err_mmap;
+ if (err < 0)
+ goto release;
+
+ return bo;
+
+release:
+ drm_gem_object_release(&bo->gem);
+free:
+ kfree(bo);
+ return ERR_PTR(err);
+}
+
+static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
+{
+ if (bo->pages) {
+ drm_gem_put_pages(&bo->gem, bo->pages, true, true);
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
+ } else if (bo->vaddr) {
+ dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
+ bo->paddr);
+ }
+}
+
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
+ size_t size)
+{
+ bo->pages = drm_gem_get_pages(&bo->gem);
+ if (IS_ERR(bo->pages))
+ return PTR_ERR(bo->pages);
+
+ bo->num_pages = size >> PAGE_SHIFT;
+
+ bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+ if (IS_ERR(bo->sgt)) {
+ drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+ return PTR_ERR(bo->sgt);
+ }
+
+ return 0;
+}
+
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
+ size_t size)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
+
+ if (tegra->domain) {
+ err = tegra_bo_get_pages(drm, bo, size);
+ if (err < 0)
+ return err;
+
+ err = tegra_bo_iommu_map(tegra, bo);
+ if (err < 0) {
+ tegra_bo_free(drm, bo);
+ return err;
+ }
+ } else {
+ bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!bo->vaddr) {
+ dev_err(drm->dev,
+ "failed to allocate buffer of size %zu\n",
+ size);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
+ unsigned long flags)
+{
+ struct tegra_bo *bo;
+ int err;
+
+ bo = tegra_bo_alloc_object(drm, size);
+ if (IS_ERR(bo))
+ return bo;
+
+ err = tegra_bo_alloc(drm, bo, size);
+ if (err < 0)
+ goto release;
if (flags & DRM_TEGRA_GEM_CREATE_TILED)
bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
@@ -134,69 +286,52 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
return bo;
-err_mmap:
+release:
drm_gem_object_release(&bo->gem);
-err_init:
- tegra_bo_destroy(drm, bo);
-err_dma:
kfree(bo);
-
return ERR_PTR(err);
}
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
struct drm_device *drm,
- unsigned int size,
+ size_t size,
unsigned long flags,
- unsigned int *handle)
+ u32 *handle)
{
struct tegra_bo *bo;
- int ret;
+ int err;
bo = tegra_bo_create(drm, size, flags);
if (IS_ERR(bo))
return bo;
- ret = drm_gem_handle_create(file, &bo->gem, handle);
- if (ret)
- goto err;
+ err = drm_gem_handle_create(file, &bo->gem, handle);
+ if (err) {
+ tegra_bo_free_object(&bo->gem);
+ return ERR_PTR(err);
+ }
drm_gem_object_unreference_unlocked(&bo->gem);
return bo;
-
-err:
- tegra_bo_free_object(&bo->gem);
- return ERR_PTR(ret);
}
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
struct dma_buf *buf)
{
+ struct tegra_drm *tegra = drm->dev_private;
struct dma_buf_attachment *attach;
struct tegra_bo *bo;
- ssize_t size;
int err;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
-
- host1x_bo_init(&bo->base, &tegra_bo_ops);
- size = round_up(buf->size, PAGE_SIZE);
-
- err = drm_gem_object_init(drm, &bo->gem, size);
- if (err < 0)
- goto free;
-
- err = drm_gem_create_mmap_offset(&bo->gem);
- if (err < 0)
- goto release;
+ bo = tegra_bo_alloc_object(drm, buf->size);
+ if (IS_ERR(bo))
+ return bo;
attach = dma_buf_attach(buf, drm->dev);
if (IS_ERR(attach)) {
err = PTR_ERR(attach);
- goto free_mmap;
+ goto free;
}
get_dma_buf(buf);
@@ -212,12 +347,19 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
goto detach;
}
- if (bo->sgt->nents > 1) {
- err = -EINVAL;
- goto detach;
+ if (tegra->domain) {
+ err = tegra_bo_iommu_map(tegra, bo);
+ if (err < 0)
+ goto detach;
+ } else {
+ if (bo->sgt->nents > 1) {
+ err = -EINVAL;
+ goto detach;
+ }
+
+ bo->paddr = sg_dma_address(bo->sgt->sgl);
}
- bo->paddr = sg_dma_address(bo->sgt->sgl);
bo->gem.import_attach = attach;
return bo;
@@ -228,47 +370,41 @@ detach:
dma_buf_detach(buf, attach);
dma_buf_put(buf);
-free_mmap:
- drm_gem_free_mmap_offset(&bo->gem);
-release:
- drm_gem_object_release(&bo->gem);
free:
+ drm_gem_object_release(&bo->gem);
kfree(bo);
-
return ERR_PTR(err);
}
void tegra_bo_free_object(struct drm_gem_object *gem)
{
+ struct tegra_drm *tegra = gem->dev->dev_private;
struct tegra_bo *bo = to_tegra_bo(gem);
+ if (tegra->domain)
+ tegra_bo_iommu_unmap(tegra, bo);
+
if (gem->import_attach) {
dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
drm_prime_gem_destroy(gem, NULL);
} else {
- tegra_bo_destroy(gem->dev, bo);
+ tegra_bo_free(gem->dev, bo);
}
- drm_gem_free_mmap_offset(gem);
drm_gem_object_release(gem);
-
kfree(bo);
}
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
- min_pitch = round_up(min_pitch, tegra->pitch_align);
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
-
- if (args->size < args->pitch * args->height)
- args->size = args->pitch * args->height;
+ args->pitch = round_up(min_pitch, tegra->pitch_align);
+ args->size = args->pitch * args->height;
bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
&args->handle);
@@ -279,7 +415,7 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
}
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
- uint32_t handle, uint64_t *offset)
+ u32 handle, u64 *offset)
{
struct drm_gem_object *gem;
struct tegra_bo *bo;
@@ -304,7 +440,38 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
return 0;
}
+static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *gem = vma->vm_private_data;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct page *page;
+ pgoff_t offset;
+ int err;
+
+ if (!bo->pages)
+ return VM_FAULT_SIGBUS;
+
+ offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+ page = bo->pages[offset];
+
+ err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ switch (err) {
+ case -EAGAIN:
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ return VM_FAULT_NOPAGE;
+
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ }
+
+ return VM_FAULT_SIGBUS;
+}
+
const struct vm_operations_struct tegra_bo_vm_ops = {
+ .fault = tegra_bo_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
@@ -322,12 +489,30 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
gem = vma->vm_private_data;
bo = to_tegra_bo(gem);
- ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
- if (ret)
- drm_gem_vm_close(vma);
+ if (!bo->pages) {
+ unsigned long vm_pgoff = vma->vm_pgoff;
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
+ bo->paddr, gem->size);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ return ret;
+ }
+
+ vma->vm_pgoff = vm_pgoff;
+ } else {
+ pgprot_t prot = vm_get_page_prot(vma->vm_flags);
+
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags &= ~VM_PFNMAP;
- return ret;
+ vma->vm_page_prot = pgprot_writecombine(prot);
+ }
+
+ return 0;
}
static struct sg_table *
@@ -342,21 +527,44 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
if (!sgt)
return NULL;
- if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
- kfree(sgt);
- return NULL;
- }
+ if (bo->pages) {
+ struct scatterlist *sg;
+ unsigned int i;
- sg_dma_address(sgt->sgl) = bo->paddr;
- sg_dma_len(sgt->sgl) = gem->size;
+ if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
+ goto free;
+
+ for_each_sg(sgt->sgl, sg, bo->num_pages, i)
+ sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
+
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free;
+ } else {
+ if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ goto free;
+
+ sg_dma_address(sgt->sgl) = bo->paddr;
+ sg_dma_len(sgt->sgl) = gem->size;
+ }
return sgt;
+
+free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
}
static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
+ struct drm_gem_object *gem = attach->dmabuf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ if (bo->pages)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
sg_free_table(sgt);
kfree(sgt);
}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 6538b56780c2..6c5f12ac0087 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -38,6 +38,12 @@ struct tegra_bo {
dma_addr_t paddr;
void *vaddr;
+ struct drm_mm_node *mm;
+ unsigned long num_pages;
+ struct page **pages;
+ /* size of IOMMU mapping */
+ size_t size;
+
struct tegra_bo_tiling tiling;
};
@@ -46,18 +52,18 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
return container_of(gem, struct tegra_bo, gem);
}
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
unsigned long flags);
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
struct drm_device *drm,
- unsigned int size,
+ size_t size,
unsigned long flags,
- unsigned int *handle);
+ u32 *handle);
void tegra_bo_free_object(struct drm_gem_object *gem);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
- uint32_t handle, uint64_t *offset);
+ u32 handle, u64 *offset);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 0c67d7eebc94..6a5c7b81fbc5 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -157,22 +157,18 @@ static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
static void tegra_encoder_prepare(struct drm_encoder *encoder)
{
+ tegra_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void tegra_encoder_commit(struct drm_encoder *encoder)
{
+ tegra_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
static void tegra_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted)
{
- struct tegra_output *output = encoder_to_output(encoder);
- int err;
-
- err = tegra_output_enable(output);
- if (err < 0)
- dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
@@ -187,7 +183,8 @@ static irqreturn_t hpd_irq(int irq, void *data)
{
struct tegra_output *output = data;
- drm_helper_hpd_irq_event(output->connector.dev);
+ if (output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
return IRQ_HANDLED;
}
@@ -259,6 +256,13 @@ int tegra_output_probe(struct tegra_output *output)
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ /*
+ * Disable the interrupt until the connector has been
+ * initialized to avoid a race in the hotplug interrupt
+ * handler.
+ */
+ disable_irq(output->hpd_irq);
}
return 0;
@@ -324,10 +328,27 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
output->encoder.possible_crtcs = 0x3;
+ /*
+ * The connector is now registered and ready to receive hotplug events
+ * so the hotplug interrupt can be enabled.
+ */
+ if (gpio_is_valid(output->hpd_gpio))
+ enable_irq(output->hpd_irq);
+
return 0;
}
int tegra_output_exit(struct tegra_output *output)
{
+ /*
+ * The connector is going away, so the interrupt must be disabled to
+ * prevent the hotplug interrupt handler from potentially crashing.
+ */
+ if (gpio_is_valid(output->hpd_gpio))
+ disable_irq(output->hpd_irq);
+
+ if (output->panel)
+ drm_panel_detach(output->panel);
+
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index d642d4a02134..c73588483be0 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -16,6 +16,7 @@
*/
#include "drm_flip_work.h"
+#include <drm/drm_plane_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -664,12 +665,8 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
- ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
+ drm_flip_work_init(&tilcdc_crtc->unref_work,
"unref", unref_worker);
- if (ret) {
- dev_err(dev->dev, "could not allocate unref FIFO\n");
- goto fail;
- }
ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
if (ret < 0)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 79a34cbd29f5..095fca91525c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -58,8 +58,7 @@ static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- if (priv->fbdev)
- drm_fbdev_cma_hotplug_event(priv->fbdev);
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
}
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -645,7 +644,6 @@ static struct platform_driver tilcdc_platform_driver = {
.probe = tilcdc_pdev_probe,
.remove = tilcdc_pdev_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "tilcdc",
.pm = &tilcdc_pm_ops,
.of_match_table = tilcdc_of_match,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 964387fc5c8f..aa0bd054d3e9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn;
int ret;
@@ -67,15 +68,16 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node)
return -ENOMEM;
- if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
aflags = DRM_MM_CREATE_TOP;
+ }
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
mem->page_alignment, 0,
place->fpfn, lpfn,
- DRM_MM_SEARCH_BEST,
- aflags);
+ sflags, aflags);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8ce508e76208..3820ae97a030 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
*/
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr)
+ struct list_head *list, bool intr,
+ struct list_head *dups)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
@@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
__ttm_bo_unreserve(bo);
ret = -EBUSY;
+
+ } else if (ret == -EALREADY && dups) {
+ struct ttm_validate_buffer *safe = entry;
+ entry = list_prev_entry(entry, head);
+ list_del(&safe->head);
+ list_add(&safe->head, dups);
+ continue;
}
if (!ret) {
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 09874d695188..025c429050c0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
- * @gfp: GFP flags.
+ * @use_static: Safe to use static buffer
**/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
- gfp_t gfp)
+ bool use_static)
{
+ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
unsigned long irq_flags;
struct page *p;
struct page **pages_to_free;
@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+ if (use_static)
+ pages_to_free = static_buf;
+ else
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
@@ -374,7 +379,8 @@ restart:
if (freed_pages)
ttm_pages_put(pages_to_free, freed_pages);
out:
- kfree(pages_to_free);
+ if (pages_to_free != static_buf)
+ kfree(pages_to_free);
return nr_free;
}
@@ -383,8 +389,6 @@ out:
*
* XXX: (dchinner) Deadlock warning!
*
- * We need to pass sc->gfp_mask to ttm_page_pool_free().
- *
* This code is crying out for a shrinker per pool....
*/
static unsigned long
@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- shrink_pages = ttm_page_pool_free(pool, nr_free,
- sc->gfp_mask);
+ /* OK to use static buffer since global mutex is held. */
+ shrink_pages = ttm_page_pool_free(pool, nr_free, true);
freed += nr_free - shrink_pages;
}
mutex_unlock(&lock);
@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
- ttm_page_pool_free(pool, npages, GFP_KERNEL);
+ ttm_page_pool_free(pool, npages, false);
}
/*
@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
pr_info("Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager);
+ /* OK to use static buffer since global mutex is no longer used. */
for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
- GFP_KERNEL);
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
kobject_put(&_manager->kobj);
_manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index c96db433f8af..01e1d27eb078 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
*
* @pool: to free the pages from
* @nr_free: If set to true will free all pages in pool
- * @gfp: GFP flags.
+ * @use_static: Safe to use static buffer
**/
static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
- gfp_t gfp)
+ bool use_static)
{
+ static struct page *static_buf[NUM_PAGES_TO_ALLOC];
unsigned long irq_flags;
struct dma_page *dma_p, *tmp;
struct page **pages_to_free;
@@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
npages_to_free, nr_free);
}
#endif
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+ if (use_static)
+ pages_to_free = static_buf;
+ else
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -502,7 +507,8 @@ restart:
if (freed_pages)
ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
out:
- kfree(pages_to_free);
+ if (pages_to_free != static_buf)
+ kfree(pages_to_free);
return nr_free;
}
@@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
if (pool->type != type)
continue;
/* Takes a spinlock.. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
+ /* OK to use static buffer since global mutex is held. */
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
/* This code path is called after _all_ references to the
* struct device has been dropped - so nobody should be
@@ -986,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
- ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
+ ttm_dma_page_pool_free(pool, npages, false);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -996,8 +1003,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
*
* XXX: (dchinner) Deadlock warning!
*
- * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
- *
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
*/
@@ -1030,8 +1035,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (++idx < pool_offset)
continue;
nr_free = shrink_pages;
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
- sc->gfp_mask);
+ /* OK to use static buffer since global mutex is held. */
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
freed += nr_free - shrink_pages;
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index 05c7481bfd40..195bcac0b6c8 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,6 +1,6 @@
ccflags-y := -Iinclude/drm
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
new file mode 100644
index 000000000000..ac8a66b4dfc2
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -0,0 +1,276 @@
+/*
+ * udl_dmabuf.c
+ *
+ * Copyright (c) 2014 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+#include "udl_drv.h"
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+
+struct udl_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+ bool is_mapped;
+};
+
+static int udl_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
+{
+ struct udl_drm_dmabuf_attachment *udl_attach;
+
+ DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+ attach->dmabuf->size);
+
+ udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
+ if (!udl_attach)
+ return -ENOMEM;
+
+ udl_attach->dir = DMA_NONE;
+ attach->priv = udl_attach;
+
+ return 0;
+}
+
+static void udl_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+ struct sg_table *sgt;
+
+ if (!udl_attach)
+ return;
+
+ DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+ attach->dmabuf->size);
+
+ sgt = &udl_attach->sgt;
+
+ if (udl_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ udl_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(udl_attach);
+ attach->priv = NULL;
+}
+
+static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+ struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
+ struct drm_device *dev = obj->base.dev;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt = NULL;
+ unsigned int i;
+ int page_count;
+ int nents, ret;
+
+ DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
+ attach->dmabuf->size, dir);
+
+ /* just return current sgt if already requested. */
+ if (udl_attach->dir == dir && udl_attach->is_mapped)
+ return &udl_attach->sgt;
+
+ if (!obj->pages) {
+ ret = udl_gem_get_pages(obj);
+ if (ret) {
+ DRM_ERROR("failed to map pages.\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ page_count = obj->base.size / PAGE_SIZE;
+ obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
+ if (IS_ERR(obj->sg)) {
+ DRM_ERROR("failed to allocate sgt.\n");
+ return ERR_CAST(obj->sg);
+ }
+
+ sgt = &udl_attach->sgt;
+
+ ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ rd = obj->sg->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ if (dir != DMA_NONE) {
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sg_free_table(sgt);
+ sgt = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+ }
+
+ udl_attach->is_mapped = true;
+ udl_attach->dir = dir;
+ attach->priv = udl_attach;
+
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return sgt;
+}
+
+static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ /* Nothing to do. */
+ DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
+ attach->dmabuf->size, dir);
+}
+
+static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+ /* TODO */
+}
+
+static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num,
+ void *addr)
+{
+ /* TODO */
+}
+
+static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ /* TODO */
+
+ return -EINVAL;
+}
+
+static struct dma_buf_ops udl_dmabuf_ops = {
+ .attach = udl_attach_dma_buf,
+ .detach = udl_detach_dma_buf,
+ .map_dma_buf = udl_map_dma_buf,
+ .unmap_dma_buf = udl_unmap_dma_buf,
+ .kmap = udl_dmabuf_kmap,
+ .kmap_atomic = udl_dmabuf_kmap_atomic,
+ .kunmap = udl_dmabuf_kunmap,
+ .kunmap_atomic = udl_dmabuf_kunmap_atomic,
+ .mmap = udl_dmabuf_mmap,
+ .release = drm_gem_dmabuf_release,
+};
+
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
+}
+
+static int udl_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct udl_gem_object **obj_p)
+{
+ struct udl_gem_object *obj;
+ int npages;
+
+ npages = size / PAGE_SIZE;
+
+ *obj_p = NULL;
+ obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->sg = sg;
+ obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (obj->pages == NULL) {
+ DRM_ERROR("obj pages is NULL %d\n", npages);
+ return -ENOMEM;
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+ *obj_p = obj;
+ return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct udl_gem_object *uobj;
+ int ret;
+
+ /* need to attach */
+ get_device(dev->dev);
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ put_device(dev->dev);
+ return ERR_CAST(attach);
+ }
+
+ get_dma_buf(dma_buf);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+ if (ret)
+ goto fail_unmap;
+
+ uobj->base.import_attach = attach;
+ uobj->flags = UDL_BO_WC;
+
+ return &uobj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+ put_device(dev->dev);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 8607e9e513db..d5728ec85254 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -51,7 +51,9 @@ static struct drm_driver driver = {
.dumb_destroy = drm_gem_dumb_destroy,
.fops = &udl_driver_fops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = udl_gem_prime_export,
.gem_prime_import = udl_gem_prime_import,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index c7490a2489a7..80adbac82bde 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -25,6 +25,9 @@
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 1
+#define UDL_BO_CACHEABLE (1 << 0)
+#define UDL_BO_WC (1 << 1)
+
struct udl_device;
struct urb_node {
@@ -69,6 +72,7 @@ struct udl_gem_object {
struct page **pages;
void *vmapping;
struct sg_table *sg;
+ unsigned int flags;
};
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
@@ -120,9 +124,13 @@ int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
+int udl_gem_get_pages(struct udl_gem_object *obj);
+void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8044f5fb7c49..2a0a784ab6ee 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -25,6 +25,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ obj->flags = UDL_BO_CACHEABLE;
return obj;
}
@@ -56,6 +57,23 @@ udl_gem_create(struct drm_file *file,
return 0;
}
+static void update_vm_cache_attr(struct udl_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+ /* non-cacheable as default. */
+ if (obj->flags & UDL_BO_CACHEABLE) {
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ } else if (obj->flags & UDL_BO_WC) {
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else {
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ }
+}
+
int udl_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
@@ -77,6 +95,8 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
+ update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
+
return ret;
}
@@ -107,7 +127,7 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
-static int udl_gem_get_pages(struct udl_gem_object *obj)
+int udl_gem_get_pages(struct udl_gem_object *obj)
{
struct page **pages;
@@ -123,7 +143,7 @@ static int udl_gem_get_pages(struct udl_gem_object *obj)
return 0;
}
-static void udl_gem_put_pages(struct udl_gem_object *obj)
+void udl_gem_put_pages(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
drm_free_large(obj->pages);
@@ -164,8 +184,7 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
return;
}
- if (obj->vmapping)
- vunmap(obj->vmapping);
+ vunmap(obj->vmapping);
udl_gem_put_pages(obj);
}
@@ -220,73 +239,3 @@ unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
-
-static int udl_prime_create(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct udl_gem_object **obj_p)
-{
- struct udl_gem_object *obj;
- int npages;
-
- npages = size / PAGE_SIZE;
-
- *obj_p = NULL;
- obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
- if (!obj)
- return -ENOMEM;
-
- obj->sg = sg;
- obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (obj->pages == NULL) {
- DRM_ERROR("obj pages is NULL %d\n", npages);
- return -ENOMEM;
- }
-
- drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
- *obj_p = obj;
- return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct udl_gem_object *uobj;
- int ret;
-
- /* need to attach */
- get_device(dev->dev);
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach)) {
- put_device(dev->dev);
- return ERR_CAST(attach);
- }
-
- get_dma_buf(dma_buf);
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
- if (ret) {
- goto fail_unmap;
- }
-
- uobj->base.import_attach = attach;
-
- return &uobj->base;
-
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- dma_buf_put(dma_buf);
- put_device(dev->dev);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index dc145d320b25..1701f1dfb23f 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -14,6 +14,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
#include "udl_drv.h"
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25f3c250fd98..7b5d22110f25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -889,8 +889,7 @@ static int vmw_driver_unload(struct drm_device *dev)
if (dev_priv->ctx.res_ht_initialized)
drm_ht_remove(&dev_priv->ctx.res_ht);
- if (dev_priv->ctx.cmd_bounce)
- vfree(dev_priv->ctx.cmd_bounce);
+ vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
@@ -1063,8 +1062,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
vmaster = vmw_master_check(dev, file_priv, flags);
if (unlikely(IS_ERR(vmaster))) {
- DRM_INFO("IOCTL ERROR %d\n", nr);
- return PTR_ERR(vmaster);
+ ret = PTR_ERR(vmaster);
+
+ if (ret != -ERESTARTSYS)
+ DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
+ nr, ret);
+ return ret;
}
ret = ioctl_func(filp, cmd, arg);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 596cd6dafd33..33176d05db35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err_nores;
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
+ ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
+ true, NULL);
if (unlikely(ret != 0))
goto out_err;
@@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
+ ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
+ false, NULL);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 197164fd7803..b7594cb758af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
-
fence_free(&fence->base);
-
- /*
- * Free kernel space accounting.
- */
- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
- fman->fence_size);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
struct vmw_fence_obj **p_fence)
{
- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
struct vmw_fence_obj *fence;
int ret;
- ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
- false, false);
- if (unlikely(ret != 0))
- return ret;
-
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (unlikely(fence == NULL)) {
- ret = -ENOMEM;
- goto out_no_object;
- }
+ if (unlikely(fence == NULL))
+ return -ENOMEM;
ret = vmw_fence_obj_init(fman, fence, seqno,
vmw_fence_destroy);
@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
out_err_init:
kfree(fence);
-out_no_object:
- ttm_mem_global_free(mem_glob, fman->fence_size);
return ret;
}
@@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
if (ret != 0)
goto out_no_queue;
+ return 0;
+
out_no_queue:
event->base.destroy(&event->base);
out_no_event:
@@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
BUG_ON(fence == NULL);
- if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
- else
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
-
+ ret = vmw_event_fence_action_create(file_priv, fence,
+ arg->flags,
+ arg->user_data,
+ true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to attach event to fence.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 941a7bc0b791..3725b521d931 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
ret = 0;
out:
drm_modeset_unlock_all(dev_priv->dev);
- drm_modeset_lock_crtc(crtc);
+ drm_modeset_lock_crtc(crtc, crtc->cursor);
return ret;
}
@@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_y + du->hotspot_y);
drm_modeset_unlock_all(dev_priv->dev);
- drm_modeset_lock_crtc(crtc);
+ drm_modeset_lock_crtc(crtc, crtc->cursor);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 15e185ae4c99..5c289f748ab4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -26,6 +26,7 @@
**************************************************************************/
#include "vmwgfx_kms.h"
+#include <drm/drm_plane_helper.h>
#define vmw_crtc_to_ldu(x) \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 026de7cea0f6..210ef15b1d09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b295463a60b3..7dc591d04d9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -26,6 +26,7 @@
**************************************************************************/
#include "vmwgfx_kms.h"
+#include <drm/drm_plane_helper.h>
#define vmw_crtc_to_sou(x) \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 8719fb3cccc9..6a4584a43aa6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -198,7 +198,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.mobid = bo->mem.start;
- cmd->body.offsetInBytes = 0;
+ cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 3995255b16c7..5a8c8d55317a 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -97,7 +97,7 @@ fail:
static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
{
u32 pos = pb->pos;
- u32 *p = (u32 *)((u32)pb->mapped + pos);
+ u32 *p = (u32 *)((void *)pb->mapped + pos);
WARN_ON(pos == pb->fence);
*(p++) = op1;
*(p++) = op2;
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
index 313c4b784348..470087af8fe5 100644
--- a/drivers/gpu/host1x/cdma.h
+++ b/drivers/gpu/host1x/cdma.h
@@ -42,7 +42,7 @@ struct host1x_job;
*/
struct push_buffer {
- u32 *mapped; /* mapped pushbuffer memory */
+ void *mapped; /* mapped pushbuffer memory */
dma_addr_t phys; /* physical address of pushbuffer */
u32 fence; /* index we've written */
u32 pos; /* index to write to */
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 6b09b71940c2..305ea8f3382d 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -26,11 +26,11 @@
#include "../debug.h"
/*
- * Put the restart at the end of pushbuffer memor
+ * Put the restart at the end of pushbuffer memory
*/
static void push_buffer_init(struct push_buffer *pb)
{
- *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
+ *(u32 *)(pb->mapped + pb->size_bytes) = host1x_opcode_restart(0);
}
/*
@@ -51,11 +51,11 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
/* NOP all the PB slots */
while (nr_slots--) {
- u32 *p = (u32 *)((u32)pb->mapped + getptr);
+ u32 *p = (u32 *)(pb->mapped + getptr);
*(p++) = HOST1X_OPCODE_NOP;
*(p++) = HOST1X_OPCODE_NOP;
- dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__,
- (u64)pb->phys + getptr);
+ dev_dbg(host1x->dev, "%s: NOP at %pad+%#x\n", __func__,
+ &pb->phys, getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1);
}
wmb();
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 4608257ab656..946c332c3906 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -32,6 +32,7 @@
static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
u32 offset, u32 words)
{
+ struct device *dev = cdma_to_channel(cdma)->dev;
void *mem = NULL;
if (host1x_debug_trace_cmdbuf)
@@ -44,11 +45,14 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
* of how much you can output to ftrace at once.
*/
for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
- trace_host1x_cdma_push_gather(
- dev_name(cdma_to_channel(cdma)->dev),
- (u32)bo, min(words - i, TRACE_MAX_LENGTH),
- offset + i * sizeof(u32), mem);
+ u32 num_words = min(words - i, TRACE_MAX_LENGTH);
+ offset += i * sizeof(u32);
+
+ trace_host1x_cdma_push_gather(dev_name(dev), bo,
+ num_words, offset,
+ mem);
}
+
host1x_bo_munmap(bo, mem);
}
}
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index f72c873eff81..791de9351eeb 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
continue;
}
- host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
- (u64)g->base, g->offset, g->words);
+ host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n",
+ &g->base, g->offset, g->words);
show_gather(o, g->base + g->offset, g->words, cdma,
g->base, mapped);
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 33a697d6dcef..8b3c15df0660 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -23,7 +23,7 @@ struct host1x_job_gather {
u32 words;
dma_addr_t base;
struct host1x_bo *bo;
- int offset;
+ u32 offset;
bool handled;
};
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index 9882ea122024..fbc6ee6ca337 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -49,35 +49,47 @@
#define MIPI_CAL_CONFIG_DSIC 0x10
#define MIPI_CAL_CONFIG_DSID 0x11
+#define MIPI_CAL_CONFIG_DSIAB_CLK 0x19
+#define MIPI_CAL_CONFIG_DSICD_CLK 0x1a
+#define MIPI_CAL_CONFIG_CSIAB_CLK 0x1b
+#define MIPI_CAL_CONFIG_CSICD_CLK 0x1c
+#define MIPI_CAL_CONFIG_CSIE_CLK 0x1d
+
+/* for data and clock lanes */
#define MIPI_CAL_CONFIG_SELECT (1 << 21)
+
+/* for data lanes */
#define MIPI_CAL_CONFIG_HSPDOS(x) (((x) & 0x1f) << 16)
#define MIPI_CAL_CONFIG_HSPUOS(x) (((x) & 0x1f) << 8)
#define MIPI_CAL_CONFIG_TERMOS(x) (((x) & 0x1f) << 0)
+/* for clock lanes */
+#define MIPI_CAL_CONFIG_HSCLKPDOSD(x) (((x) & 0x1f) << 8)
+#define MIPI_CAL_CONFIG_HSCLKPUOSD(x) (((x) & 0x1f) << 0)
+
#define MIPI_CAL_BIAS_PAD_CFG0 0x16
#define MIPI_CAL_BIAS_PAD_PDVCLAMP (1 << 1)
#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
#define MIPI_CAL_BIAS_PAD_CFG1 0x17
+#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
#define MIPI_CAL_BIAS_PAD_CFG2 0x18
#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
-static const struct module {
- unsigned long reg;
-} modules[] = {
- { .reg = MIPI_CAL_CONFIG_CSIA },
- { .reg = MIPI_CAL_CONFIG_CSIB },
- { .reg = MIPI_CAL_CONFIG_CSIC },
- { .reg = MIPI_CAL_CONFIG_CSID },
- { .reg = MIPI_CAL_CONFIG_CSIE },
- { .reg = MIPI_CAL_CONFIG_DSIA },
- { .reg = MIPI_CAL_CONFIG_DSIB },
- { .reg = MIPI_CAL_CONFIG_DSIC },
- { .reg = MIPI_CAL_CONFIG_DSID },
+struct tegra_mipi_pad {
+ unsigned long data;
+ unsigned long clk;
+};
+
+struct tegra_mipi_soc {
+ bool has_clk_lane;
+ const struct tegra_mipi_pad *pads;
+ unsigned int num_pads;
};
struct tegra_mipi {
+ const struct tegra_mipi_soc *soc;
void __iomem *regs;
struct mutex lock;
struct clk *clk;
@@ -90,16 +102,16 @@ struct tegra_mipi_device {
unsigned long pads;
};
-static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi,
- unsigned long reg)
+static inline u32 tegra_mipi_readl(struct tegra_mipi *mipi,
+ unsigned long offset)
{
- return readl(mipi->regs + (reg << 2));
+ return readl(mipi->regs + (offset << 2));
}
-static inline void tegra_mipi_writel(struct tegra_mipi *mipi,
- unsigned long value, unsigned long reg)
+static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
+ unsigned long offset)
{
- writel(value, mipi->regs + (reg << 2));
+ writel(value, mipi->regs + (offset << 2));
}
struct tegra_mipi_device *tegra_mipi_request(struct device *device)
@@ -117,36 +129,35 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
- of_node_put(args.np);
err = -ENOMEM;
goto out;
}
dev->pdev = of_find_device_by_node(args.np);
if (!dev->pdev) {
- of_node_put(args.np);
err = -ENODEV;
goto free;
}
- of_node_put(args.np);
-
dev->mipi = platform_get_drvdata(dev->pdev);
if (!dev->mipi) {
err = -EPROBE_DEFER;
- goto pdev_put;
+ goto put;
}
+ of_node_put(args.np);
+
dev->pads = args.args[0];
dev->device = device;
return dev;
-pdev_put:
+put:
platform_device_put(dev->pdev);
free:
kfree(dev);
out:
+ of_node_put(args.np);
return ERR_PTR(err);
}
EXPORT_SYMBOL(tegra_mipi_request);
@@ -161,7 +172,7 @@ EXPORT_SYMBOL(tegra_mipi_free);
static int tegra_mipi_wait(struct tegra_mipi *mipi)
{
unsigned long timeout = jiffies + msecs_to_jiffies(250);
- unsigned long value;
+ u32 value;
while (time_before(jiffies, timeout)) {
value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
@@ -177,8 +188,9 @@ static int tegra_mipi_wait(struct tegra_mipi *mipi)
int tegra_mipi_calibrate(struct tegra_mipi_device *device)
{
- unsigned long value;
+ const struct tegra_mipi_soc *soc = device->mipi->soc;
unsigned int i;
+ u32 value;
int err;
err = clk_enable(device->mipi->clk);
@@ -192,23 +204,35 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+ tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
+ MIPI_CAL_BIAS_PAD_CFG1);
+
value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
- for (i = 0; i < ARRAY_SIZE(modules); i++) {
- if (device->pads & BIT(i))
- value = MIPI_CAL_CONFIG_SELECT |
- MIPI_CAL_CONFIG_HSPDOS(0) |
- MIPI_CAL_CONFIG_HSPUOS(4) |
- MIPI_CAL_CONFIG_TERMOS(5);
- else
- value = 0;
+ for (i = 0; i < soc->num_pads; i++) {
+ u32 clk = 0, data = 0;
+
+ if (device->pads & BIT(i)) {
+ data = MIPI_CAL_CONFIG_SELECT |
+ MIPI_CAL_CONFIG_HSPDOS(0) |
+ MIPI_CAL_CONFIG_HSPUOS(4) |
+ MIPI_CAL_CONFIG_TERMOS(5);
+ clk = MIPI_CAL_CONFIG_SELECT |
+ MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
+ MIPI_CAL_CONFIG_HSCLKPUOSD(4);
+ }
- tegra_mipi_writel(device->mipi, value, modules[i].reg);
+ tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
+
+ if (soc->has_clk_lane)
+ tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
}
- tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL);
+ value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
+ value |= MIPI_CAL_CTRL_START;
+ tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
err = tegra_mipi_wait(device->mipi);
@@ -219,16 +243,63 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
}
EXPORT_SYMBOL(tegra_mipi_calibrate);
+static const struct tegra_mipi_pad tegra114_mipi_pads[] = {
+ { .data = MIPI_CAL_CONFIG_CSIA },
+ { .data = MIPI_CAL_CONFIG_CSIB },
+ { .data = MIPI_CAL_CONFIG_CSIC },
+ { .data = MIPI_CAL_CONFIG_CSID },
+ { .data = MIPI_CAL_CONFIG_CSIE },
+ { .data = MIPI_CAL_CONFIG_DSIA },
+ { .data = MIPI_CAL_CONFIG_DSIB },
+ { .data = MIPI_CAL_CONFIG_DSIC },
+ { .data = MIPI_CAL_CONFIG_DSID },
+};
+
+static const struct tegra_mipi_soc tegra114_mipi_soc = {
+ .has_clk_lane = false,
+ .pads = tegra114_mipi_pads,
+ .num_pads = ARRAY_SIZE(tegra114_mipi_pads),
+};
+
+static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
+ { .data = MIPI_CAL_CONFIG_CSIA, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
+ { .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
+ { .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
+ { .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
+ { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+ { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+};
+
+static const struct tegra_mipi_soc tegra124_mipi_soc = {
+ .has_clk_lane = true,
+ .pads = tegra124_mipi_pads,
+ .num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+};
+
+static struct of_device_id tegra_mipi_of_match[] = {
+ { .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
+ { .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
+ { },
+};
+
static int tegra_mipi_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct tegra_mipi *mipi;
struct resource *res;
int err;
+ match = of_match_node(tegra_mipi_of_match, pdev->dev.of_node);
+ if (!match)
+ return -ENODEV;
+
mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ mipi->soc = match->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mipi->regs))
@@ -260,11 +331,6 @@ static int tegra_mipi_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tegra_mipi_of_match[] = {
- { .compatible = "nvidia,tegra114-mipi", },
- { },
-};
-
struct platform_driver tegra_mipi_driver = {
.driver = {
.name = "tegra-mipi",
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index d6f56471bd2a..752cdd2da89a 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -227,83 +227,83 @@ static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
{
switch (mbus_code) {
- case V4L2_MBUS_FMT_BGR565_2X8_BE:
- case V4L2_MBUS_FMT_BGR565_2X8_LE:
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_BGR565_2X8_BE:
+ case MEDIA_BUS_FMT_BGR565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
cfg->mipi_dt = MIPI_DT_RGB565;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
- case V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE:
- case V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444;
cfg->mipi_dt = MIPI_DT_RGB444;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
cfg->mipi_dt = MIPI_DT_RGB555;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
cfg->mipi_dt = MIPI_DT_YUV422;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
cfg->mipi_dt = MIPI_DT_YUV422;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
- case V4L2_MBUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
cfg->mipi_dt = MIPI_DT_YUV422;
cfg->data_width = IPU_CSI_DATA_WIDTH_16;
break;
- case V4L2_MBUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
cfg->mipi_dt = MIPI_DT_YUV422;
cfg->data_width = IPU_CSI_DATA_WIDTH_16;
break;
- case V4L2_MBUS_FMT_SBGGR8_1X8:
- case V4L2_MBUS_FMT_SGBRG8_1X8:
- case V4L2_MBUS_FMT_SGRBG8_1X8:
- case V4L2_MBUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW8;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
- case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE:
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE:
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE:
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW10;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
- case V4L2_MBUS_FMT_SBGGR10_1X10:
- case V4L2_MBUS_FMT_SGBRG10_1X10:
- case V4L2_MBUS_FMT_SGRBG10_1X10:
- case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW10;
cfg->data_width = IPU_CSI_DATA_WIDTH_10;
break;
- case V4L2_MBUS_FMT_SBGGR12_1X12:
- case V4L2_MBUS_FMT_SGBRG12_1X12:
- case V4L2_MBUS_FMT_SGRBG12_1X12:
- case V4L2_MBUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW12;
cfg->data_width = IPU_CSI_DATA_WIDTH_12;
break;
- case V4L2_MBUS_FMT_JPEG_1X8:
+ case MEDIA_BUS_FMT_JPEG_1X8:
/* TODO */
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG;
cfg->mipi_dt = MIPI_DT_RAW8;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index f42df4dd58d2..230b6f887cd8 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -371,6 +371,7 @@ config HID_LOGITECH_DJ
tristate "Logitech Unifying receivers full support"
depends on HIDRAW
depends on HID_LOGITECH
+ select HID_LOGITECH_HIDPP
---help---
Say Y if you want support for Logitech Unifying receivers and devices.
Unifying receivers are capable of pairing up to 6 Logitech compliant
@@ -378,6 +379,17 @@ config HID_LOGITECH_DJ
generic USB_HID driver and all incoming events will be multiplexed
into a single mouse and a single keyboard device.
+config HID_LOGITECH_HIDPP
+ tristate "Logitech HID++ devices support"
+ depends on HID_LOGITECH
+ ---help---
+ Support for Logitech devices relyingon the HID++ Logitech specification
+
+ Say Y if you want support for Logitech devices relying on the HID++
+ specification. Such devices are the various Logitech Touchpads (T650,
+ T651, TK820), some mice (Zone Touch mouse), or even keyboards (Solar
+ Keayboard).
+
config LOGITECH_FF
bool "Logitech force feedback support"
depends on HID_LOGITECH
@@ -613,6 +625,13 @@ config HID_PICOLCD_CIR
---help---
Provide access to PicoLCD's CIR interface via remote control (LIRC).
+config HID_PLANTRONICS
+ tristate "Plantronics USB HID Driver"
+ default !EXPERT
+ depends on HID
+ ---help---
+ Provides HID support for Plantronics telephony devices.
+
config HID_PRIMAX
tristate "Primax non-fully HID-compliant devices"
depends on HID
@@ -629,7 +648,7 @@ config HID_ROCCAT
support for its special functionalities.
config HID_SAITEK
- tristate "Saitek non-fully HID-compliant devices"
+ tristate "Saitek (Mad Catz) non-fully HID-compliant devices"
depends on HID
---help---
Support for Saitek devices that are not fully compliant with the
@@ -637,6 +656,7 @@ config HID_SAITEK
Supported devices:
- PS1000 Dual Analog Pad
+ - R.A.T.9 Gaming Mouse
- R.A.T.7 Gaming Mouse
- M.M.O.7 Gaming Mouse
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index e2850d8af9ca..debd15b44b59 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
+obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
@@ -94,6 +95,7 @@ ifdef CONFIG_DEBUG_FS
hid-picolcd-y += hid-picolcd_debugfs.o
endif
+obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3402033fa52a..c3d0ac1a0988 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -702,6 +702,11 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
type == HID_COLLECTION_PHYSICAL)
hid->group = HID_GROUP_SENSOR_HUB;
+
+ if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 &&
+ hid->group == HID_GROUP_MULTITOUCH)
+ hid->group = HID_GROUP_GENERIC;
}
static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
@@ -780,22 +785,19 @@ static int hid_scan_report(struct hid_device *hid)
hid->group = HID_GROUP_MULTITOUCH_WIN_8;
/*
- * Vendor specific handlings
- */
- if ((hid->vendor == USB_VENDOR_ID_SYNAPTICS) &&
- (hid->group == HID_GROUP_GENERIC) &&
- /* only bind to the mouse interface of composite USB devices */
- (hid->bus != BUS_USB || hid->type == HID_TYPE_USBMOUSE))
- /* hid-rmi should take care of them, not hid-generic */
- hid->group = HID_GROUP_RMI;
-
- /*
* Vendor specific handlings
*/
switch (hid->vendor) {
case USB_VENDOR_ID_WACOM:
hid->group = HID_GROUP_WACOM;
break;
+ case USB_VENDOR_ID_SYNAPTICS:
+ if ((hid->group == HID_GROUP_GENERIC) &&
+ (hid->bus != BUS_USB || hid->type == HID_TYPE_USBMOUSE))
+ /* hid-rmi should only bind to the mouse interface of
+ * composite USB devices */
+ hid->group = HID_GROUP_RMI;
+ break;
}
vfree(parser);
@@ -1280,12 +1282,6 @@ void hid_output_report(struct hid_report *report, __u8 *data)
}
EXPORT_SYMBOL_GPL(hid_output_report);
-static int hid_report_len(struct hid_report *report)
-{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
-}
-
/*
* Allocator for buffer that is going to be passed to hid_output_report()
*/
@@ -1822,6 +1818,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
@@ -1862,6 +1859,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
@@ -1887,6 +1885,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
#if IS_ENABLED(CONFIG_HID_ROCCAT)
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
@@ -1910,10 +1909,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
@@ -2539,7 +2540,8 @@ int hid_add_device(struct hid_device *hdev)
* Scan generic devices for group information
*/
if (hid_ignore_special_drivers ||
- !hid_match_id(hdev, hid_have_special_driver)) {
+ (!hdev->group &&
+ !hid_match_id(hdev, hid_have_special_driver))) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7c863738e419..7460f3402298 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -300,6 +300,7 @@
#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c
#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
#define USB_VENDOR_ID_ELECOM 0x056e
@@ -578,6 +579,7 @@
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
+#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -620,6 +622,7 @@
#define USB_VENDOR_ID_MADCATZ 0x0738
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+#define USB_DEVICE_ID_MADCATZ_RAT9 0x1709
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
@@ -649,6 +652,7 @@
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
+#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07dc
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -716,6 +720,8 @@
#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
+#define USB_VENDOR_ID_PLANTRONICS 0x047f
+
#define USB_VENDOR_ID_PANASONIC 0x04da
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
#define USB_DEVICE_ID_PANABOARD_UBT880 0x104d
@@ -813,6 +819,9 @@
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
+#define USB_VENDOR_ID_SMK 0x0609
+#define USB_DEVICE_ID_SMK_PS3_BDREMOTE 0x0306
+
#define USB_VENDOR_ID_SONY 0x054c
#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
@@ -931,6 +940,9 @@
#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
+#define USB_VENDOR_ID_VTL 0x0306
+#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f
+
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 725f22ca47fc..e0a0f06ac5ef 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -872,7 +872,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
- default: goto ignore;
+ default: map_key_clear(KEY_UNKNOWN);
}
break;
@@ -1215,7 +1215,7 @@ static void hidinput_led_worker(struct work_struct *work)
return hid->ll_driver->request(hid, report, HID_REQ_SET_REPORT);
/* fall back to generic raw-output-report */
- len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ len = hid_report_len(report);
buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
return;
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index bf227f7679af..4c55f4d95798 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -62,7 +62,6 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
/* HID_UP_LNVENDOR = USB, HID_UP_MSVENDOR = BT */
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR ||
(usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0x00f1: /* Fn-F4: Mic mute */
map_key_clear(KEY_MICMUTE);
@@ -85,13 +84,13 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
case 0x00f8: /* Fn-F11: View open applications (3 boxes) */
map_key_clear(KEY_SCALE);
return 1;
- case 0x00fa: /* Fn-Esc: Fn-lock toggle */
- map_key_clear(KEY_FN_ESC);
- return 1;
- case 0x00fb: /* Fn-F12: Open My computer (6 boxes) USB-only */
+ case 0x00f9: /* Fn-F12: Open My computer (6 boxes) USB-only */
/* NB: This mapping is invented in raw_event below */
map_key_clear(KEY_FILE);
return 1;
+ case 0x00fa: /* Fn-Esc: Fn-lock toggle */
+ map_key_clear(KEY_FN_ESC);
+ return 1;
}
}
@@ -207,8 +206,8 @@ static int lenovo_raw_event(struct hid_device *hdev,
&& data[0] == 0x15
&& data[1] == 0x94
&& data[2] == 0x01)) {
- data[1] = 0x0;
- data[2] = 0x4;
+ data[1] = 0x00;
+ data[2] = 0x01;
}
return 0;
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 71f569292cab..c917ab61aafa 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -26,9 +26,104 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/kfifo.h>
#include <asm/unaligned.h>
#include "hid-ids.h"
-#include "hid-logitech-dj.h"
+
+#define DJ_MAX_PAIRED_DEVICES 6
+#define DJ_MAX_NUMBER_NOTIFICATIONS 8
+#define DJ_RECEIVER_INDEX 0
+#define DJ_DEVICE_INDEX_MIN 1
+#define DJ_DEVICE_INDEX_MAX 6
+
+#define DJREPORT_SHORT_LENGTH 15
+#define DJREPORT_LONG_LENGTH 32
+
+#define REPORT_ID_DJ_SHORT 0x20
+#define REPORT_ID_DJ_LONG 0x21
+
+#define REPORT_ID_HIDPP_SHORT 0x10
+#define REPORT_ID_HIDPP_LONG 0x11
+
+#define HIDPP_REPORT_SHORT_LENGTH 7
+#define HIDPP_REPORT_LONG_LENGTH 20
+
+#define HIDPP_RECEIVER_INDEX 0xff
+
+#define REPORT_TYPE_RFREPORT_FIRST 0x01
+#define REPORT_TYPE_RFREPORT_LAST 0x1F
+
+/* Command Switch to DJ mode */
+#define REPORT_TYPE_CMD_SWITCH 0x80
+#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00
+#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01
+#define TIMEOUT_NO_KEEPALIVE 0x00
+
+/* Command to Get the list of Paired devices */
+#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81
+
+/* Device Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41
+#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01
+#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02
+#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02
+#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03
+
+/* Device Un-Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
+
+
+/* Connection Status Notification */
+#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
+#define CONNECTION_STATUS_PARAM_STATUS 0x00
+#define STATUS_LINKLOSS 0x01
+
+/* Error Notification */
+#define REPORT_TYPE_NOTIF_ERROR 0x7F
+#define NOTIF_ERROR_PARAM_ETYPE 0x00
+#define ETYPE_KEEPALIVE_TIMEOUT 0x01
+
+/* supported DJ HID && RF report types */
+#define REPORT_TYPE_KEYBOARD 0x01
+#define REPORT_TYPE_MOUSE 0x02
+#define REPORT_TYPE_CONSUMER_CONTROL 0x03
+#define REPORT_TYPE_SYSTEM_CONTROL 0x04
+#define REPORT_TYPE_MEDIA_CENTER 0x08
+#define REPORT_TYPE_LEDS 0x0E
+
+/* RF Report types bitfield */
+#define STD_KEYBOARD 0x00000002
+#define STD_MOUSE 0x00000004
+#define MULTIMEDIA 0x00000008
+#define POWER_KEYS 0x00000010
+#define MEDIA_CENTER 0x00000100
+#define KBD_LEDS 0x00004000
+
+struct dj_report {
+ u8 report_id;
+ u8 device_index;
+ u8 report_type;
+ u8 report_params[DJREPORT_SHORT_LENGTH - 3];
+};
+
+struct dj_receiver_dev {
+ struct hid_device *hdev;
+ struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
+ DJ_DEVICE_INDEX_MIN];
+ struct work_struct work;
+ struct kfifo notif_fifo;
+ spinlock_t lock;
+ bool querying_devices;
+};
+
+struct dj_device {
+ struct hid_device *hdev;
+ struct dj_receiver_dev *dj_receiver_dev;
+ u32 reports_supported;
+ u8 device_index;
+};
/* Keyboard descriptor (1) */
static const char kbd_descriptor[] = {
@@ -156,6 +251,57 @@ static const char media_descriptor[] = {
0xc0, /* EndCollection */
}; /* */
+/* HIDPP descriptor */
+static const char hidpp_descriptor[] = {
+ 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
+ 0x09, 0x01, /* Usage (Vendor Usage 1) */
+ 0xa1, 0x01, /* Collection (Application) */
+ 0x85, 0x10, /* Report ID (16) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x06, /* Report Count (6) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xff, 0x00, /* Logical Maximum (255) */
+ 0x09, 0x01, /* Usage (Vendor Usage 1) */
+ 0x81, 0x00, /* Input (Data,Arr,Abs) */
+ 0x09, 0x01, /* Usage (Vendor Usage 1) */
+ 0x91, 0x00, /* Output (Data,Arr,Abs) */
+ 0xc0, /* End Collection */
+ 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
+ 0x09, 0x02, /* Usage (Vendor Usage 2) */
+ 0xa1, 0x01, /* Collection (Application) */
+ 0x85, 0x11, /* Report ID (17) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x13, /* Report Count (19) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xff, 0x00, /* Logical Maximum (255) */
+ 0x09, 0x02, /* Usage (Vendor Usage 2) */
+ 0x81, 0x00, /* Input (Data,Arr,Abs) */
+ 0x09, 0x02, /* Usage (Vendor Usage 2) */
+ 0x91, 0x00, /* Output (Data,Arr,Abs) */
+ 0xc0, /* End Collection */
+ 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
+ 0x09, 0x04, /* Usage (Vendor Usage 0x04) */
+ 0xa1, 0x01, /* Collection (Application) */
+ 0x85, 0x20, /* Report ID (32) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x0e, /* Report Count (14) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xff, 0x00, /* Logical Maximum (255) */
+ 0x09, 0x41, /* Usage (Vendor Usage 0x41) */
+ 0x81, 0x00, /* Input (Data,Arr,Abs) */
+ 0x09, 0x41, /* Usage (Vendor Usage 0x41) */
+ 0x91, 0x00, /* Output (Data,Arr,Abs) */
+ 0x85, 0x21, /* Report ID (33) */
+ 0x95, 0x1f, /* Report Count (31) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xff, 0x00, /* Logical Maximum (255) */
+ 0x09, 0x42, /* Usage (Vendor Usage 0x42) */
+ 0x81, 0x00, /* Input (Data,Arr,Abs) */
+ 0x09, 0x42, /* Usage (Vendor Usage 0x42) */
+ 0x91, 0x00, /* Output (Data,Arr,Abs) */
+ 0xc0, /* End Collection */
+};
+
/* Maximum size of all defined hid reports in bytes (including report id) */
#define MAX_REPORT_SIZE 8
@@ -165,7 +311,8 @@ static const char media_descriptor[] = {
sizeof(mse_descriptor) + \
sizeof(consumer_descriptor) + \
sizeof(syscontrol_descriptor) + \
- sizeof(media_descriptor))
+ sizeof(media_descriptor) + \
+ sizeof(hidpp_descriptor))
/* Number of possible hid report types that can be created by this driver.
*
@@ -256,11 +403,15 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
dj_hiddev->dev.parent = &djrcv_hdev->dev;
dj_hiddev->bus = BUS_USB;
dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
- dj_hiddev->product = le16_to_cpu(usbdev->descriptor.idProduct);
+ dj_hiddev->product =
+ (dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB]
+ << 8) |
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB];
snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
- "Logitech Unifying Device. Wireless PID:%02x%02x",
- dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB],
- dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]);
+ "Logitech Unifying Device. Wireless PID:%04x",
+ dj_hiddev->product);
+
+ dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE;
usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
@@ -422,6 +573,13 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
}
}
+static void logi_dj_recv_forward_hidpp(struct dj_device *dj_dev, u8 *data,
+ int size)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ if (hid_input_report(dj_dev->hdev, HID_INPUT_REPORT, data, size, 1))
+ dbg_hid("hid_input_report error\n");
+}
static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
@@ -472,7 +630,9 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
+ struct hid_device *hdev = djrcv_dev->hdev;
struct dj_report *dj_report;
+ u8 *buf;
int retval;
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
@@ -484,7 +644,6 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
- kfree(dj_report);
/*
* Ugly sleep to work around a USB 3.0 bug when the receiver is still
@@ -493,6 +652,30 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
*/
msleep(50);
+ /*
+ * Magical bits to set up hidpp notifications when the dj devices
+ * are connected/disconnected.
+ *
+ * We can reuse dj_report because HIDPP_REPORT_SHORT_LENGTH is smaller
+ * than DJREPORT_SHORT_LENGTH.
+ */
+ buf = (u8 *)dj_report;
+
+ memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH);
+
+ buf[0] = REPORT_ID_HIDPP_SHORT;
+ buf[1] = 0xFF;
+ buf[2] = 0x80;
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+ buf[5] = 0x09;
+ buf[6] = 0x00;
+
+ hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
+ HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
+
+ kfree(dj_report);
return retval;
}
@@ -509,6 +692,9 @@ static void logi_dj_ll_close(struct hid_device *hid)
dbg_hid("%s:%s\n", __func__, hid->phys);
}
+static u8 unifying_name_query[] = {0x10, 0xff, 0x83, 0xb5, 0x40, 0x00, 0x00};
+static u8 unifying_name_answer[] = {0x11, 0xff, 0x83, 0xb5};
+
static int logi_dj_ll_raw_request(struct hid_device *hid,
unsigned char reportnum, __u8 *buf,
size_t count, unsigned char report_type,
@@ -519,6 +705,22 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
u8 *out_buf;
int ret;
+ if ((buf[0] == REPORT_ID_HIDPP_SHORT) ||
+ (buf[0] == REPORT_ID_HIDPP_LONG)) {
+ if (count < 2)
+ return -EINVAL;
+
+ /* special case where we should not overwrite
+ * the device_index */
+ if (count == 7 && !memcmp(buf, unifying_name_query,
+ sizeof(unifying_name_query)))
+ buf[4] |= djdev->device_index - 1;
+ else
+ buf[1] = djdev->device_index;
+ return hid_hw_raw_request(djrcv_dev->hdev, reportnum, buf,
+ count, report_type, reqtype);
+ }
+
if (buf[0] != REPORT_TYPE_LEDS)
return -EINVAL;
@@ -597,6 +799,8 @@ static int logi_dj_ll_parse(struct hid_device *hid)
__func__, djdev->reports_supported);
}
+ rdcat(rdesc, &rsize, hidpp_descriptor, sizeof(hidpp_descriptor));
+
retval = hid_parse_report(hid, rdesc, rsize);
kfree(rdesc);
@@ -624,8 +828,7 @@ static struct hid_ll_driver logi_dj_ll_driver = {
.raw_request = logi_dj_ll_raw_request,
};
-
-static int logi_dj_raw_event(struct hid_device *hdev,
+static int logi_dj_dj_event(struct hid_device *hdev,
struct hid_report *report, u8 *data,
int size)
{
@@ -633,36 +836,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
struct dj_report *dj_report = (struct dj_report *) data;
unsigned long flags;
- dbg_hid("%s, size:%d\n", __func__, size);
-
- /* Here we receive all data coming from iface 2, there are 4 cases:
- *
- * 1) Data should continue its normal processing i.e. data does not
- * come from the DJ collection, in which case we do nothing and
- * return 0, so hid-core can continue normal processing (will forward
- * to associated hidraw device)
+ /*
+ * Here we receive all data coming from iface 2, there are 3 cases:
*
- * 2) Data is from DJ collection, and is intended for this driver i. e.
- * data contains arrival, departure, etc notifications, in which case
- * we queue them for delayed processing by the work queue. We return 1
- * to hid-core as no further processing is required from it.
+ * 1) Data is intended for this driver i. e. data contains arrival,
+ * departure, etc notifications, in which case we queue them for delayed
+ * processing by the work queue. We return 1 to hid-core as no further
+ * processing is required from it.
*
- * 3) Data is from DJ collection, and informs a connection change,
- * if the change means rf link loss, then we must send a null report
- * to the upper layer to discard potentially pressed keys that may be
- * repeated forever by the input layer. Return 1 to hid-core as no
- * further processing is required.
+ * 2) Data informs a connection change, if the change means rf link
+ * loss, then we must send a null report to the upper layer to discard
+ * potentially pressed keys that may be repeated forever by the input
+ * layer. Return 1 to hid-core as no further processing is required.
*
- * 4) Data is from DJ collection and is an actual input event from
- * a paired DJ device in which case we forward it to the correct hid
- * device (via hid_input_report() ) and return 1 so hid-core does not do
- * anything else with it.
+ * 3) Data is an actual input event from a paired DJ device in which
+ * case we forward it to the correct hid device (via hid_input_report()
+ * ) and return 1 so hid-core does not anything else with it.
*/
- /* case 1) */
- if (data[0] != REPORT_ID_DJ_SHORT)
- return false;
-
if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
(dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
/*
@@ -707,6 +898,80 @@ out:
return true;
}
+static int logi_dj_hidpp_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data,
+ int size)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_report *dj_report = (struct dj_report *) data;
+ unsigned long flags;
+ u8 device_index = dj_report->device_index;
+
+ if (device_index == HIDPP_RECEIVER_INDEX) {
+ /* special case were the device wants to know its unifying
+ * name */
+ if (size == HIDPP_REPORT_LONG_LENGTH &&
+ !memcmp(data, unifying_name_answer,
+ sizeof(unifying_name_answer)) &&
+ ((data[4] & 0xF0) == 0x40))
+ device_index = (data[4] & 0x0F) + 1;
+ else
+ return false;
+ }
+
+ /*
+ * Data is from the HID++ collection, in this case, we forward the
+ * data to the corresponding child dj device and return 0 to hid-core
+ * so he data also goes to the hidraw device of the receiver. This
+ * allows a user space application to implement the full HID++ routing
+ * via the receiver.
+ */
+
+ if ((device_index < DJ_DEVICE_INDEX_MIN) ||
+ (device_index > DJ_DEVICE_INDEX_MAX)) {
+ /*
+ * Device index is wrong, bail out.
+ * This driver can ignore safely the receiver notifications,
+ * so ignore those reports too.
+ */
+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+ __func__, dj_report->device_index);
+ return false;
+ }
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+
+ if (!djrcv_dev->paired_dj_devices[device_index])
+ /* received an event for an unknown device, bail out */
+ goto out;
+
+ logi_dj_recv_forward_hidpp(djrcv_dev->paired_dj_devices[device_index],
+ data, size);
+
+out:
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ return false;
+}
+
+static int logi_dj_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data,
+ int size)
+{
+ dbg_hid("%s, size:%d\n", __func__, size);
+
+ switch (data[0]) {
+ case REPORT_ID_DJ_SHORT:
+ return logi_dj_dj_event(hdev, report, data, size);
+ case REPORT_ID_HIDPP_SHORT:
+ /* intentional fallthrough */
+ case REPORT_ID_HIDPP_LONG:
+ return logi_dj_hidpp_event(hdev, report, data, size);
+ }
+
+ return false;
+}
+
static int logi_dj_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -714,9 +979,6 @@ static int logi_dj_probe(struct hid_device *hdev,
struct dj_receiver_dev *djrcv_dev;
int retval;
- if (is_dj_device((struct dj_device *)hdev->driver_data))
- return -ENODEV;
-
dbg_hid("%s called for ifnum %d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
@@ -869,22 +1131,6 @@ static void logi_dj_remove(struct hid_device *hdev)
hid_set_drvdata(hdev, NULL);
}
-static int logi_djdevice_probe(struct hid_device *hdev,
- const struct hid_device_id *id)
-{
- int ret;
- struct dj_device *dj_dev = hdev->driver_data;
-
- if (!is_dj_device(dj_dev))
- return -ENODEV;
-
- ret = hid_parse(hdev);
- if (!ret)
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
-
- return ret;
-}
-
static const struct hid_device_id logi_dj_receivers[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
@@ -906,51 +1152,8 @@ static struct hid_driver logi_djreceiver_driver = {
#endif
};
+module_hid_driver(logi_djreceiver_driver);
-static const struct hid_device_id logi_dj_devices[] = {
- {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
- {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
- {}
-};
-
-static struct hid_driver logi_djdevice_driver = {
- .name = "logitech-djdevice",
- .id_table = logi_dj_devices,
- .probe = logi_djdevice_probe,
-};
-
-
-static int __init logi_dj_init(void)
-{
- int retval;
-
- dbg_hid("Logitech-DJ:%s\n", __func__);
-
- retval = hid_register_driver(&logi_djreceiver_driver);
- if (retval)
- return retval;
-
- retval = hid_register_driver(&logi_djdevice_driver);
- if (retval)
- hid_unregister_driver(&logi_djreceiver_driver);
-
- return retval;
-
-}
-
-static void __exit logi_dj_exit(void)
-{
- dbg_hid("Logitech-DJ:%s\n", __func__);
-
- hid_unregister_driver(&logi_djdevice_driver);
- hid_unregister_driver(&logi_djreceiver_driver);
-
-}
-
-module_init(logi_dj_init);
-module_exit(logi_dj_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Logitech");
MODULE_AUTHOR("Nestor Lopez Casado");
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
deleted file mode 100644
index daeb0aa4bee9..000000000000
--- a/drivers/hid/hid-logitech-dj.h
+++ /dev/null
@@ -1,125 +0,0 @@
-#ifndef __HID_LOGITECH_DJ_H
-#define __HID_LOGITECH_DJ_H
-
-/*
- * HID driver for Logitech Unifying receivers
- *
- * Copyright (c) 2011 Logitech
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/kfifo.h>
-
-#define DJ_MAX_PAIRED_DEVICES 6
-#define DJ_MAX_NUMBER_NOTIFICATIONS 8
-#define DJ_RECEIVER_INDEX 0
-#define DJ_DEVICE_INDEX_MIN 1
-#define DJ_DEVICE_INDEX_MAX 6
-
-#define DJREPORT_SHORT_LENGTH 15
-#define DJREPORT_LONG_LENGTH 32
-
-#define REPORT_ID_DJ_SHORT 0x20
-#define REPORT_ID_DJ_LONG 0x21
-
-#define REPORT_TYPE_RFREPORT_FIRST 0x01
-#define REPORT_TYPE_RFREPORT_LAST 0x1F
-
-/* Command Switch to DJ mode */
-#define REPORT_TYPE_CMD_SWITCH 0x80
-#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00
-#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01
-#define TIMEOUT_NO_KEEPALIVE 0x00
-
-/* Command to Get the list of Paired devices */
-#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81
-
-/* Device Paired Notification */
-#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41
-#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01
-#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02
-#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00
-#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01
-#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02
-#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03
-
-/* Device Un-Paired Notification */
-#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
-
-
-/* Connection Status Notification */
-#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
-#define CONNECTION_STATUS_PARAM_STATUS 0x00
-#define STATUS_LINKLOSS 0x01
-
-/* Error Notification */
-#define REPORT_TYPE_NOTIF_ERROR 0x7F
-#define NOTIF_ERROR_PARAM_ETYPE 0x00
-#define ETYPE_KEEPALIVE_TIMEOUT 0x01
-
-/* supported DJ HID && RF report types */
-#define REPORT_TYPE_KEYBOARD 0x01
-#define REPORT_TYPE_MOUSE 0x02
-#define REPORT_TYPE_CONSUMER_CONTROL 0x03
-#define REPORT_TYPE_SYSTEM_CONTROL 0x04
-#define REPORT_TYPE_MEDIA_CENTER 0x08
-#define REPORT_TYPE_LEDS 0x0E
-
-/* RF Report types bitfield */
-#define STD_KEYBOARD 0x00000002
-#define STD_MOUSE 0x00000004
-#define MULTIMEDIA 0x00000008
-#define POWER_KEYS 0x00000010
-#define MEDIA_CENTER 0x00000100
-#define KBD_LEDS 0x00004000
-
-struct dj_report {
- u8 report_id;
- u8 device_index;
- u8 report_type;
- u8 report_params[DJREPORT_SHORT_LENGTH - 3];
-};
-
-struct dj_receiver_dev {
- struct hid_device *hdev;
- struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
- DJ_DEVICE_INDEX_MIN];
- struct work_struct work;
- struct kfifo notif_fifo;
- spinlock_t lock;
- bool querying_devices;
-};
-
-struct dj_device {
- struct hid_device *hdev;
- struct dj_receiver_dev *dj_receiver_dev;
- u32 reports_supported;
- u8 device_index;
-};
-
-/**
- * is_dj_device - know if the given dj_device is not the receiver.
- * @dj_dev: the dj device to test
- *
- * This macro tests if a struct dj_device pointer is a device created
- * by the bus enumarator.
- */
-#define is_dj_device(dj_dev) \
- (&(dj_dev)->dj_receiver_dev->hdev->dev == (dj_dev)->hdev->dev.parent)
-
-#endif
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
new file mode 100644
index 000000000000..2f420c0b6609
--- /dev/null
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -0,0 +1,1241 @@
+/*
+ * HIDPP protocol for Logitech Unifying receivers
+ *
+ * Copyright (c) 2011 Logitech (c)
+ * Copyright (c) 2012-2013 Google (c)
+ * Copyright (c) 2013-2014 Red Hat Inc.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/kfifo.h>
+#include <linux/input/mt.h>
+#include <asm/unaligned.h>
+#include "hid-ids.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
+
+#define REPORT_ID_HIDPP_SHORT 0x10
+#define REPORT_ID_HIDPP_LONG 0x11
+
+#define HIDPP_REPORT_SHORT_LENGTH 7
+#define HIDPP_REPORT_LONG_LENGTH 20
+
+#define HIDPP_QUIRK_CLASS_WTP BIT(0)
+
+/* bits 1..20 are reserved for classes */
+#define HIDPP_QUIRK_DELAYED_INIT BIT(21)
+#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
+#define HIDPP_QUIRK_MULTI_INPUT BIT(23)
+
+/*
+ * There are two hidpp protocols in use, the first version hidpp10 is known
+ * as register access protocol or RAP, the second version hidpp20 is known as
+ * feature access protocol or FAP
+ *
+ * Most older devices (including the Unifying usb receiver) use the RAP protocol
+ * where as most newer devices use the FAP protocol. Both protocols are
+ * compatible with the underlying transport, which could be usb, Unifiying, or
+ * bluetooth. The message lengths are defined by the hid vendor specific report
+ * descriptor for the HIDPP_SHORT report type (total message lenth 7 bytes) and
+ * the HIDPP_LONG report type (total message length 20 bytes)
+ *
+ * The RAP protocol uses both report types, whereas the FAP only uses HIDPP_LONG
+ * messages. The Unifying receiver itself responds to RAP messages (device index
+ * is 0xFF for the receiver), and all messages (short or long) with a device
+ * index between 1 and 6 are passed untouched to the corresponding paired
+ * Unifying device.
+ *
+ * The paired device can be RAP or FAP, it will receive the message untouched
+ * from the Unifiying receiver.
+ */
+
+struct fap {
+ u8 feature_index;
+ u8 funcindex_clientid;
+ u8 params[HIDPP_REPORT_LONG_LENGTH - 4U];
+};
+
+struct rap {
+ u8 sub_id;
+ u8 reg_address;
+ u8 params[HIDPP_REPORT_LONG_LENGTH - 4U];
+};
+
+struct hidpp_report {
+ u8 report_id;
+ u8 device_index;
+ union {
+ struct fap fap;
+ struct rap rap;
+ u8 rawbytes[sizeof(struct fap)];
+ };
+} __packed;
+
+struct hidpp_device {
+ struct hid_device *hid_dev;
+ struct mutex send_mutex;
+ void *send_receive_buf;
+ wait_queue_head_t wait;
+ bool answer_available;
+ u8 protocol_major;
+ u8 protocol_minor;
+
+ void *private_data;
+
+ struct work_struct work;
+ struct kfifo delayed_work_fifo;
+ atomic_t connected;
+ struct input_dev *delayed_input;
+
+ unsigned long quirks;
+};
+
+
+#define HIDPP_ERROR 0x8f
+#define HIDPP_ERROR_SUCCESS 0x00
+#define HIDPP_ERROR_INVALID_SUBID 0x01
+#define HIDPP_ERROR_INVALID_ADRESS 0x02
+#define HIDPP_ERROR_INVALID_VALUE 0x03
+#define HIDPP_ERROR_CONNECT_FAIL 0x04
+#define HIDPP_ERROR_TOO_MANY_DEVICES 0x05
+#define HIDPP_ERROR_ALREADY_EXISTS 0x06
+#define HIDPP_ERROR_BUSY 0x07
+#define HIDPP_ERROR_UNKNOWN_DEVICE 0x08
+#define HIDPP_ERROR_RESOURCE_ERROR 0x09
+#define HIDPP_ERROR_REQUEST_UNAVAILABLE 0x0a
+#define HIDPP_ERROR_INVALID_PARAM_VALUE 0x0b
+#define HIDPP_ERROR_WRONG_PIN_CODE 0x0c
+
+static void hidpp_connect_event(struct hidpp_device *hidpp_dev);
+
+static int __hidpp_send_report(struct hid_device *hdev,
+ struct hidpp_report *hidpp_report)
+{
+ int fields_count, ret;
+
+ switch (hidpp_report->report_id) {
+ case REPORT_ID_HIDPP_SHORT:
+ fields_count = HIDPP_REPORT_SHORT_LENGTH;
+ break;
+ case REPORT_ID_HIDPP_LONG:
+ fields_count = HIDPP_REPORT_LONG_LENGTH;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ /*
+ * set the device_index as the receiver, it will be overwritten by
+ * hid_hw_request if needed
+ */
+ hidpp_report->device_index = 0xff;
+
+ ret = hid_hw_raw_request(hdev, hidpp_report->report_id,
+ (u8 *)hidpp_report, fields_count, HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
+
+ return ret == fields_count ? 0 : -1;
+}
+
+/**
+ * hidpp_send_message_sync() returns 0 in case of success, and something else
+ * in case of a failure.
+ * - If ' something else' is positive, that means that an error has been raised
+ * by the protocol itself.
+ * - If ' something else' is negative, that means that we had a classic error
+ * (-ENOMEM, -EPIPE, etc...)
+ */
+static int hidpp_send_message_sync(struct hidpp_device *hidpp,
+ struct hidpp_report *message,
+ struct hidpp_report *response)
+{
+ int ret;
+
+ mutex_lock(&hidpp->send_mutex);
+
+ hidpp->send_receive_buf = response;
+ hidpp->answer_available = false;
+
+ /*
+ * So that we can later validate the answer when it arrives
+ * in hidpp_raw_event
+ */
+ *response = *message;
+
+ ret = __hidpp_send_report(hidpp->hid_dev, message);
+
+ if (ret) {
+ dbg_hid("__hidpp_send_report returned err: %d\n", ret);
+ memset(response, 0, sizeof(struct hidpp_report));
+ goto exit;
+ }
+
+ if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
+ 5*HZ)) {
+ dbg_hid("%s:timeout waiting for response\n", __func__);
+ memset(response, 0, sizeof(struct hidpp_report));
+ ret = -ETIMEDOUT;
+ }
+
+ if (response->report_id == REPORT_ID_HIDPP_SHORT &&
+ response->fap.feature_index == HIDPP_ERROR) {
+ ret = response->fap.params[1];
+ dbg_hid("__hidpp_send_report got hidpp error %02X\n", ret);
+ goto exit;
+ }
+
+exit:
+ mutex_unlock(&hidpp->send_mutex);
+ return ret;
+
+}
+
+static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
+ u8 feat_index, u8 funcindex_clientid, u8 *params, int param_count,
+ struct hidpp_report *response)
+{
+ struct hidpp_report *message;
+ int ret;
+
+ if (param_count > sizeof(message->fap.params))
+ return -EINVAL;
+
+ message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL);
+ if (!message)
+ return -ENOMEM;
+ message->report_id = REPORT_ID_HIDPP_LONG;
+ message->fap.feature_index = feat_index;
+ message->fap.funcindex_clientid = funcindex_clientid;
+ memcpy(&message->fap.params, params, param_count);
+
+ ret = hidpp_send_message_sync(hidpp, message, response);
+ kfree(message);
+ return ret;
+}
+
+static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
+ u8 report_id, u8 sub_id, u8 reg_address, u8 *params, int param_count,
+ struct hidpp_report *response)
+{
+ struct hidpp_report *message;
+ int ret;
+
+ if ((report_id != REPORT_ID_HIDPP_SHORT) &&
+ (report_id != REPORT_ID_HIDPP_LONG))
+ return -EINVAL;
+
+ if (param_count > sizeof(message->rap.params))
+ return -EINVAL;
+
+ message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL);
+ if (!message)
+ return -ENOMEM;
+ message->report_id = report_id;
+ message->rap.sub_id = sub_id;
+ message->rap.reg_address = reg_address;
+ memcpy(&message->rap.params, params, param_count);
+
+ ret = hidpp_send_message_sync(hidpp_dev, message, response);
+ kfree(message);
+ return ret;
+}
+
+static void delayed_work_cb(struct work_struct *work)
+{
+ struct hidpp_device *hidpp = container_of(work, struct hidpp_device,
+ work);
+ hidpp_connect_event(hidpp);
+}
+
+static inline bool hidpp_match_answer(struct hidpp_report *question,
+ struct hidpp_report *answer)
+{
+ return (answer->fap.feature_index == question->fap.feature_index) &&
+ (answer->fap.funcindex_clientid == question->fap.funcindex_clientid);
+}
+
+static inline bool hidpp_match_error(struct hidpp_report *question,
+ struct hidpp_report *answer)
+{
+ return (answer->fap.feature_index == HIDPP_ERROR) &&
+ (answer->fap.funcindex_clientid == question->fap.feature_index) &&
+ (answer->fap.params[0] == question->fap.funcindex_clientid);
+}
+
+static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
+{
+ return (report->report_id == REPORT_ID_HIDPP_SHORT) &&
+ (report->rap.sub_id == 0x41);
+}
+
+/* -------------------------------------------------------------------------- */
+/* HIDP++ 1.0 commands */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_SET_REGISTER 0x80
+#define HIDPP_GET_REGISTER 0x81
+#define HIDPP_SET_LONG_REGISTER 0x82
+#define HIDPP_GET_LONG_REGISTER 0x83
+
+#define HIDPP_REG_PAIRING_INFORMATION 0xB5
+#define DEVICE_NAME 0x40
+
+static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
+{
+ struct hidpp_report response;
+ int ret;
+ /* hid-logitech-dj is in charge of setting the right device index */
+ u8 params[1] = { DEVICE_NAME };
+ char *name;
+ int len;
+
+ ret = hidpp_send_rap_command_sync(hidpp_dev,
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_LONG_REGISTER,
+ HIDPP_REG_PAIRING_INFORMATION,
+ params, 1, &response);
+ if (ret)
+ return NULL;
+
+ len = response.rap.params[1];
+
+ if (2 + len > sizeof(response.rap.params))
+ return NULL;
+
+ name = kzalloc(len + 1, GFP_KERNEL);
+ if (!name)
+ return NULL;
+
+ memcpy(name, &response.rap.params[2], len);
+ return name;
+}
+
+/* -------------------------------------------------------------------------- */
+/* 0x0000: Root */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_ROOT 0x0000
+#define HIDPP_PAGE_ROOT_IDX 0x00
+
+#define CMD_ROOT_GET_FEATURE 0x01
+#define CMD_ROOT_GET_PROTOCOL_VERSION 0x11
+
+static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
+ u8 *feature_index, u8 *feature_type)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 params[2] = { feature >> 8, feature & 0x00FF };
+
+ ret = hidpp_send_fap_command_sync(hidpp,
+ HIDPP_PAGE_ROOT_IDX,
+ CMD_ROOT_GET_FEATURE,
+ params, 2, &response);
+ if (ret)
+ return ret;
+
+ *feature_index = response.fap.params[0];
+ *feature_type = response.fap.params[1];
+
+ return ret;
+}
+
+static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
+{
+ struct hidpp_report response;
+ int ret;
+
+ ret = hidpp_send_fap_command_sync(hidpp,
+ HIDPP_PAGE_ROOT_IDX,
+ CMD_ROOT_GET_PROTOCOL_VERSION,
+ NULL, 0, &response);
+
+ if (ret == HIDPP_ERROR_INVALID_SUBID) {
+ hidpp->protocol_major = 1;
+ hidpp->protocol_minor = 0;
+ return 0;
+ }
+
+ /* the device might not be connected */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
+
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ hidpp->protocol_major = response.fap.params[0];
+ hidpp->protocol_minor = response.fap.params[1];
+
+ return ret;
+}
+
+static bool hidpp_is_connected(struct hidpp_device *hidpp)
+{
+ int ret;
+
+ ret = hidpp_root_get_protocol_version(hidpp);
+ if (!ret)
+ hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ return ret == 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* 0x0005: GetDeviceNameType */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_GET_DEVICE_NAME_TYPE 0x0005
+
+#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x01
+#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x11
+#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x21
+
+static int hidpp_devicenametype_get_count(struct hidpp_device *hidpp,
+ u8 feature_index, u8 *nameLength)
+{
+ struct hidpp_report response;
+ int ret;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_GET_DEVICE_NAME_TYPE_GET_COUNT, NULL, 0, &response);
+
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ *nameLength = response.fap.params[0];
+
+ return ret;
+}
+
+static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp,
+ u8 feature_index, u8 char_index, char *device_name, int len_buf)
+{
+ struct hidpp_report response;
+ int ret, i;
+ int count;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME, &char_index, 1,
+ &response);
+
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ if (response.report_id == REPORT_ID_HIDPP_LONG)
+ count = HIDPP_REPORT_LONG_LENGTH - 4;
+ else
+ count = HIDPP_REPORT_SHORT_LENGTH - 4;
+
+ if (len_buf < count)
+ count = len_buf;
+
+ for (i = 0; i < count; i++)
+ device_name[i] = response.fap.params[i];
+
+ return count;
+}
+
+static char *hidpp_get_device_name(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ u8 feature_index;
+ u8 __name_length;
+ char *name;
+ unsigned index = 0;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_GET_DEVICE_NAME_TYPE,
+ &feature_index, &feature_type);
+ if (ret)
+ return NULL;
+
+ ret = hidpp_devicenametype_get_count(hidpp, feature_index,
+ &__name_length);
+ if (ret)
+ return NULL;
+
+ name = kzalloc(__name_length + 1, GFP_KERNEL);
+ if (!name)
+ return NULL;
+
+ while (index < __name_length) {
+ ret = hidpp_devicenametype_get_device_name(hidpp,
+ feature_index, index, name + index,
+ __name_length - index);
+ if (ret <= 0) {
+ kfree(name);
+ return NULL;
+ }
+ index += ret;
+ }
+
+ return name;
+}
+
+/* -------------------------------------------------------------------------- */
+/* 0x6100: TouchPadRawXY */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_TOUCHPAD_RAW_XY 0x6100
+
+#define CMD_TOUCHPAD_GET_RAW_INFO 0x01
+#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x21
+
+#define EVENT_TOUCHPAD_RAW_XY 0x00
+
+#define TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT 0x01
+#define TOUCHPAD_RAW_XY_ORIGIN_UPPER_LEFT 0x03
+
+struct hidpp_touchpad_raw_info {
+ u16 x_size;
+ u16 y_size;
+ u8 z_range;
+ u8 area_range;
+ u8 timestamp_unit;
+ u8 maxcontacts;
+ u8 origin;
+ u16 res;
+};
+
+struct hidpp_touchpad_raw_xy_finger {
+ u8 contact_type;
+ u8 contact_status;
+ u16 x;
+ u16 y;
+ u8 z;
+ u8 area;
+ u8 finger_id;
+};
+
+struct hidpp_touchpad_raw_xy {
+ u16 timestamp;
+ struct hidpp_touchpad_raw_xy_finger fingers[2];
+ u8 spurious_flag;
+ u8 end_of_frame;
+ u8 finger_count;
+ u8 button;
+};
+
+static int hidpp_touchpad_get_raw_info(struct hidpp_device *hidpp,
+ u8 feature_index, struct hidpp_touchpad_raw_info *raw_info)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_TOUCHPAD_GET_RAW_INFO, NULL, 0, &response);
+
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ raw_info->x_size = get_unaligned_be16(&params[0]);
+ raw_info->y_size = get_unaligned_be16(&params[2]);
+ raw_info->z_range = params[4];
+ raw_info->area_range = params[5];
+ raw_info->maxcontacts = params[7];
+ raw_info->origin = params[8];
+ /* res is given in unit per inch */
+ raw_info->res = get_unaligned_be16(&params[13]) * 2 / 51;
+
+ return ret;
+}
+
+static int hidpp_touchpad_set_raw_report_state(struct hidpp_device *hidpp_dev,
+ u8 feature_index, bool send_raw_reports,
+ bool sensor_enhanced_settings)
+{
+ struct hidpp_report response;
+
+ /*
+ * Params:
+ * bit 0 - enable raw
+ * bit 1 - 16bit Z, no area
+ * bit 2 - enhanced sensitivity
+ * bit 3 - width, height (4 bits each) instead of area
+ * bit 4 - send raw + gestures (degrades smoothness)
+ * remaining bits - reserved
+ */
+ u8 params = send_raw_reports | (sensor_enhanced_settings << 2);
+
+ return hidpp_send_fap_command_sync(hidpp_dev, feature_index,
+ CMD_TOUCHPAD_SET_RAW_REPORT_STATE, &params, 1, &response);
+}
+
+static void hidpp_touchpad_touch_event(u8 *data,
+ struct hidpp_touchpad_raw_xy_finger *finger)
+{
+ u8 x_m = data[0] << 2;
+ u8 y_m = data[2] << 2;
+
+ finger->x = x_m << 6 | data[1];
+ finger->y = y_m << 6 | data[3];
+
+ finger->contact_type = data[0] >> 6;
+ finger->contact_status = data[2] >> 6;
+
+ finger->z = data[4];
+ finger->area = data[5];
+ finger->finger_id = data[6] >> 4;
+}
+
+static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
+ u8 *data, struct hidpp_touchpad_raw_xy *raw_xy)
+{
+ memset(raw_xy, 0, sizeof(struct hidpp_touchpad_raw_xy));
+ raw_xy->end_of_frame = data[8] & 0x01;
+ raw_xy->spurious_flag = (data[8] >> 1) & 0x01;
+ raw_xy->finger_count = data[15] & 0x0f;
+ raw_xy->button = (data[8] >> 2) & 0x01;
+
+ if (raw_xy->finger_count) {
+ hidpp_touchpad_touch_event(&data[2], &raw_xy->fingers[0]);
+ hidpp_touchpad_touch_event(&data[9], &raw_xy->fingers[1]);
+ }
+}
+
+/* ************************************************************************** */
+/* */
+/* Device Support */
+/* */
+/* ************************************************************************** */
+
+/* -------------------------------------------------------------------------- */
+/* Touchpad HID++ devices */
+/* -------------------------------------------------------------------------- */
+
+#define WTP_MANUAL_RESOLUTION 39
+
+struct wtp_data {
+ struct input_dev *input;
+ u16 x_size, y_size;
+ u8 finger_count;
+ u8 mt_feature_index;
+ u8 button_feature_index;
+ u8 maxcontacts;
+ bool flip_y;
+ unsigned int resolution;
+};
+
+static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+
+ if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
+ (field->application == HID_GD_KEYBOARD))
+ return 0;
+
+ return -1;
+}
+
+static void wtp_populate_input(struct hidpp_device *hidpp,
+ struct input_dev *input_dev, bool origin_is_hid_core)
+{
+ struct wtp_data *wd = hidpp->private_data;
+
+ if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
+ /* this is the generic hid-input call */
+ return;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __clear_bit(EV_REL, input_dev->evbit);
+ __clear_bit(EV_LED, input_dev->evbit);
+
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, wd->x_size, 0, 0);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X, wd->resolution);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, wd->y_size, 0, 0);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y, wd->resolution);
+
+ /* Max pressure is not given by the devices, pick one */
+ input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, 50, 0, 0);
+
+ input_set_capability(input_dev, EV_KEY, BTN_LEFT);
+
+ if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS)
+ input_set_capability(input_dev, EV_KEY, BTN_RIGHT);
+ else
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+
+ input_mt_init_slots(input_dev, wd->maxcontacts, INPUT_MT_POINTER |
+ INPUT_MT_DROP_UNUSED);
+
+ wd->input = input_dev;
+}
+
+static void wtp_touch_event(struct wtp_data *wd,
+ struct hidpp_touchpad_raw_xy_finger *touch_report)
+{
+ int slot;
+
+ if (!touch_report->finger_id || touch_report->contact_type)
+ /* no actual data */
+ return;
+
+ slot = input_mt_get_slot_by_key(wd->input, touch_report->finger_id);
+
+ input_mt_slot(wd->input, slot);
+ input_mt_report_slot_state(wd->input, MT_TOOL_FINGER,
+ touch_report->contact_status);
+ if (touch_report->contact_status) {
+ input_event(wd->input, EV_ABS, ABS_MT_POSITION_X,
+ touch_report->x);
+ input_event(wd->input, EV_ABS, ABS_MT_POSITION_Y,
+ wd->flip_y ? wd->y_size - touch_report->y :
+ touch_report->y);
+ input_event(wd->input, EV_ABS, ABS_MT_PRESSURE,
+ touch_report->area);
+ }
+}
+
+static void wtp_send_raw_xy_event(struct hidpp_device *hidpp,
+ struct hidpp_touchpad_raw_xy *raw)
+{
+ struct wtp_data *wd = hidpp->private_data;
+ int i;
+
+ for (i = 0; i < 2; i++)
+ wtp_touch_event(wd, &(raw->fingers[i]));
+
+ if (raw->end_of_frame &&
+ !(hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS))
+ input_event(wd->input, EV_KEY, BTN_LEFT, raw->button);
+
+ if (raw->end_of_frame || raw->finger_count <= 2) {
+ input_mt_sync_frame(wd->input);
+ input_sync(wd->input);
+ }
+}
+
+static int wtp_mouse_raw_xy_event(struct hidpp_device *hidpp, u8 *data)
+{
+ struct wtp_data *wd = hidpp->private_data;
+ u8 c1_area = ((data[7] & 0xf) * (data[7] & 0xf) +
+ (data[7] >> 4) * (data[7] >> 4)) / 2;
+ u8 c2_area = ((data[13] & 0xf) * (data[13] & 0xf) +
+ (data[13] >> 4) * (data[13] >> 4)) / 2;
+ struct hidpp_touchpad_raw_xy raw = {
+ .timestamp = data[1],
+ .fingers = {
+ {
+ .contact_type = 0,
+ .contact_status = !!data[7],
+ .x = get_unaligned_le16(&data[3]),
+ .y = get_unaligned_le16(&data[5]),
+ .z = c1_area,
+ .area = c1_area,
+ .finger_id = data[2],
+ }, {
+ .contact_type = 0,
+ .contact_status = !!data[13],
+ .x = get_unaligned_le16(&data[9]),
+ .y = get_unaligned_le16(&data[11]),
+ .z = c2_area,
+ .area = c2_area,
+ .finger_id = data[8],
+ }
+ },
+ .finger_count = wd->maxcontacts,
+ .spurious_flag = 0,
+ .end_of_frame = (data[0] >> 7) == 0,
+ .button = data[0] & 0x01,
+ };
+
+ wtp_send_raw_xy_event(hidpp, &raw);
+
+ return 1;
+}
+
+static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct wtp_data *wd = hidpp->private_data;
+ struct hidpp_report *report = (struct hidpp_report *)data;
+ struct hidpp_touchpad_raw_xy raw;
+
+ if (!wd || !wd->input)
+ return 1;
+
+ switch (data[0]) {
+ case 0x02:
+ if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
+ input_event(wd->input, EV_KEY, BTN_LEFT,
+ !!(data[1] & 0x01));
+ input_event(wd->input, EV_KEY, BTN_RIGHT,
+ !!(data[1] & 0x02));
+ input_sync(wd->input);
+ } else {
+ if (size < 21)
+ return 1;
+ return wtp_mouse_raw_xy_event(hidpp, &data[7]);
+ }
+ case REPORT_ID_HIDPP_LONG:
+ if ((report->fap.feature_index != wd->mt_feature_index) ||
+ (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
+ return 1;
+ hidpp_touchpad_raw_xy_event(hidpp, data + 4, &raw);
+
+ wtp_send_raw_xy_event(hidpp, &raw);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int wtp_get_config(struct hidpp_device *hidpp)
+{
+ struct wtp_data *wd = hidpp->private_data;
+ struct hidpp_touchpad_raw_info raw_info = {0};
+ u8 feature_type;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_TOUCHPAD_RAW_XY,
+ &wd->mt_feature_index, &feature_type);
+ if (ret)
+ /* means that the device is not powered up */
+ return ret;
+
+ ret = hidpp_touchpad_get_raw_info(hidpp, wd->mt_feature_index,
+ &raw_info);
+ if (ret)
+ return ret;
+
+ wd->x_size = raw_info.x_size;
+ wd->y_size = raw_info.y_size;
+ wd->maxcontacts = raw_info.maxcontacts;
+ wd->flip_y = raw_info.origin == TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT;
+ wd->resolution = raw_info.res;
+ if (!wd->resolution)
+ wd->resolution = WTP_MANUAL_RESOLUTION;
+
+ return 0;
+}
+
+static int wtp_allocate(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct wtp_data *wd;
+
+ wd = devm_kzalloc(&hdev->dev, sizeof(struct wtp_data),
+ GFP_KERNEL);
+ if (!wd)
+ return -ENOMEM;
+
+ hidpp->private_data = wd;
+
+ return 0;
+};
+
+static void wtp_connect(struct hid_device *hdev, bool connected)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct wtp_data *wd = hidpp->private_data;
+ int ret;
+
+ if (!connected)
+ return;
+
+ if (!wd->x_size) {
+ ret = wtp_get_config(hidpp);
+ if (ret) {
+ hid_err(hdev, "Can not get wtp config: %d\n", ret);
+ return;
+ }
+ }
+
+ hidpp_touchpad_set_raw_report_state(hidpp, wd->mt_feature_index,
+ true, true);
+}
+
+/* -------------------------------------------------------------------------- */
+/* Generic HID++ devices */
+/* -------------------------------------------------------------------------- */
+
+static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
+ return wtp_input_mapping(hdev, hi, field, usage, bit, max);
+
+ return 0;
+}
+
+static void hidpp_populate_input(struct hidpp_device *hidpp,
+ struct input_dev *input, bool origin_is_hid_core)
+{
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
+ wtp_populate_input(hidpp, input, origin_is_hid_core);
+}
+
+static void hidpp_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct input_dev *input = hidinput->input;
+
+ hidpp_populate_input(hidpp, input, true);
+}
+
+static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
+ int size)
+{
+ struct hidpp_report *question = hidpp->send_receive_buf;
+ struct hidpp_report *answer = hidpp->send_receive_buf;
+ struct hidpp_report *report = (struct hidpp_report *)data;
+
+ /*
+ * If the mutex is locked then we have a pending answer from a
+ * previoulsly sent command
+ */
+ if (unlikely(mutex_is_locked(&hidpp->send_mutex))) {
+ /*
+ * Check for a correct hidpp20 answer or the corresponding
+ * error
+ */
+ if (hidpp_match_answer(question, report) ||
+ hidpp_match_error(question, report)) {
+ *answer = *report;
+ hidpp->answer_available = true;
+ wake_up(&hidpp->wait);
+ /*
+ * This was an answer to a command that this driver sent
+ * We return 1 to hid-core to avoid forwarding the
+ * command upstream as it has been treated by the driver
+ */
+
+ return 1;
+ }
+ }
+
+ if (unlikely(hidpp_report_is_connect_event(report))) {
+ atomic_set(&hidpp->connected,
+ !(report->rap.params[0] & (1 << 6)));
+ if ((hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) &&
+ (schedule_work(&hidpp->work) == 0))
+ dbg_hid("%s: connect event already queued\n", __func__);
+ return 1;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
+ return wtp_raw_event(hidpp->hid_dev, data, size);
+
+ return 0;
+}
+
+static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+
+ switch (data[0]) {
+ case REPORT_ID_HIDPP_LONG:
+ if (size != HIDPP_REPORT_LONG_LENGTH) {
+ hid_err(hdev, "received hid++ report of bad size (%d)",
+ size);
+ return 1;
+ }
+ return hidpp_raw_hidpp_event(hidpp, data, size);
+ case REPORT_ID_HIDPP_SHORT:
+ if (size != HIDPP_REPORT_SHORT_LENGTH) {
+ hid_err(hdev, "received hid++ report of bad size (%d)",
+ size);
+ return 1;
+ }
+ return hidpp_raw_hidpp_event(hidpp, data, size);
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
+ return wtp_raw_event(hdev, data, size);
+
+ return 0;
+}
+
+static void hidpp_overwrite_name(struct hid_device *hdev, bool use_unifying)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ char *name;
+
+ if (use_unifying)
+ /*
+ * the device is connected through an Unifying receiver, and
+ * might not be already connected.
+ * Ask the receiver for its name.
+ */
+ name = hidpp_get_unifying_name(hidpp);
+ else
+ name = hidpp_get_device_name(hidpp);
+
+ if (!name)
+ hid_err(hdev, "unable to retrieve the name of the device");
+ else
+ snprintf(hdev->name, sizeof(hdev->name), "%s", name);
+
+ kfree(name);
+}
+
+static int hidpp_input_open(struct input_dev *dev)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ return hid_hw_open(hid);
+}
+
+static void hidpp_input_close(struct input_dev *dev)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ hid_hw_close(hid);
+}
+
+static struct input_dev *hidpp_allocate_input(struct hid_device *hdev)
+{
+ struct input_dev *input_dev = devm_input_allocate_device(&hdev->dev);
+
+ if (!input_dev)
+ return NULL;
+
+ input_set_drvdata(input_dev, hdev);
+ input_dev->open = hidpp_input_open;
+ input_dev->close = hidpp_input_close;
+
+ input_dev->name = hdev->name;
+ input_dev->phys = hdev->phys;
+ input_dev->uniq = hdev->uniq;
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
+ input_dev->dev.parent = &hdev->dev;
+
+ return input_dev;
+}
+
+static void hidpp_connect_event(struct hidpp_device *hidpp)
+{
+ struct hid_device *hdev = hidpp->hid_dev;
+ int ret = 0;
+ bool connected = atomic_read(&hidpp->connected);
+ struct input_dev *input;
+ char *name, *devm_name;
+
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
+ wtp_connect(hdev, connected);
+
+ if (!connected || hidpp->delayed_input)
+ return;
+
+ if (!hidpp->protocol_major) {
+ ret = !hidpp_is_connected(hidpp);
+ if (ret) {
+ hid_err(hdev, "Can not get the protocol version.\n");
+ return;
+ }
+ }
+
+ /* the device is already connected, we can ask for its name and
+ * protocol */
+ hid_info(hdev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+
+ input = hidpp_allocate_input(hdev);
+ if (!input) {
+ hid_err(hdev, "cannot allocate new input device: %d\n", ret);
+ return;
+ }
+
+ name = hidpp_get_device_name(hidpp);
+ if (!name) {
+ hid_err(hdev, "unable to retrieve the name of the device");
+ } else {
+ devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name);
+ if (devm_name)
+ input->name = devm_name;
+ kfree(name);
+ }
+
+ hidpp_populate_input(hidpp, input, false);
+
+ ret = input_register_device(input);
+ if (ret)
+ input_free_device(input);
+
+ hidpp->delayed_input = input;
+}
+
+static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct hidpp_device *hidpp;
+ int ret;
+ bool connected;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+
+ hidpp = devm_kzalloc(&hdev->dev, sizeof(struct hidpp_device),
+ GFP_KERNEL);
+ if (!hidpp)
+ return -ENOMEM;
+
+ hidpp->hid_dev = hdev;
+ hid_set_drvdata(hdev, hidpp);
+
+ hidpp->quirks = id->driver_data;
+
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
+ ret = wtp_allocate(hdev, id);
+ if (ret)
+ goto wtp_allocate_fail;
+ }
+
+ INIT_WORK(&hidpp->work, delayed_work_cb);
+ mutex_init(&hidpp->send_mutex);
+ init_waitqueue_head(&hidpp->wait);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "%s:parse failed\n", __func__);
+ goto hid_parse_fail;
+ }
+
+ /* Allow incoming packets */
+ hid_device_io_start(hdev);
+
+ connected = hidpp_is_connected(hidpp);
+ if (id->group != HID_GROUP_LOGITECH_DJ_DEVICE) {
+ if (!connected) {
+ hid_err(hdev, "Device not connected");
+ hid_device_io_stop(hdev);
+ goto hid_parse_fail;
+ }
+
+ hid_info(hdev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ }
+
+ hidpp_overwrite_name(hdev, id->group == HID_GROUP_LOGITECH_DJ_DEVICE);
+ atomic_set(&hidpp->connected, connected);
+
+ if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
+ ret = wtp_get_config(hidpp);
+ if (ret)
+ goto hid_parse_fail;
+ }
+
+ /* Block incoming packets */
+ hid_device_io_stop(hdev);
+
+ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
+ connect_mask &= ~HID_CONNECT_HIDINPUT;
+
+ /* Re-enable hidinput for multi-input devices */
+ if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
+ connect_mask |= HID_CONNECT_HIDINPUT;
+
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+ goto hid_hw_start_fail;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) {
+ /* Allow incoming packets */
+ hid_device_io_start(hdev);
+
+ hidpp_connect_event(hidpp);
+ }
+
+ return ret;
+
+hid_hw_start_fail:
+hid_parse_fail:
+ cancel_work_sync(&hidpp->work);
+ mutex_destroy(&hidpp->send_mutex);
+wtp_allocate_fail:
+ hid_set_drvdata(hdev, NULL);
+ return ret;
+}
+
+static void hidpp_remove(struct hid_device *hdev)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+
+ cancel_work_sync(&hidpp->work);
+ mutex_destroy(&hidpp->send_mutex);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id hidpp_devices[] = {
+ { /* wireless touchpad */
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, 0x4011),
+ .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
+ HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
+ { /* wireless touchpad T650 */
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, 0x4101),
+ .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
+ { /* wireless touchpad T651 */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_T651),
+ .driver_data = HIDPP_QUIRK_CLASS_WTP },
+ { /* Keyboard TK820 */
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, 0x4102),
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
+ HIDPP_QUIRK_CLASS_WTP },
+
+ { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
+ USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, hidpp_devices);
+
+static struct hid_driver hidpp_driver = {
+ .name = "logitech-hidpp-device",
+ .id_table = hidpp_devices,
+ .probe = hidpp_probe,
+ .remove = hidpp_remove,
+ .raw_event = hidpp_raw_event,
+ .input_configured = hidpp_input_configured,
+ .input_mapping = hidpp_input_mapping,
+};
+
+module_hid_driver(hidpp_driver);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 8ba17a946f2a..cacda43f6a6f 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -274,6 +274,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
.driver_data = MS_DUPLICATE_USAGES },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ .driver_data = MS_HIDINPUT },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 51e25b9407f2..f65e78b46999 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -67,6 +67,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
#define MT_QUIRK_HOVERING (1 << 11)
#define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12)
+#define MT_QUIRK_FORCE_GET_FEATURE (1 << 13)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -150,6 +151,7 @@ static void mt_post_parse(struct mt_device *td);
#define MT_CLS_FLATFROG 0x0107
#define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108
#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109
+#define MT_CLS_VTL 0x0110
#define MT_DEFAULT_MAXCONTACT 10
#define MT_MAX_MAXCONTACT 250
@@ -255,6 +257,11 @@ static struct mt_class mt_classes[] = {
.sn_move = 2048,
.maxcontacts = 40,
},
+ { .name = MT_CLS_VTL,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_FORCE_GET_FEATURE,
+ },
{ }
};
@@ -809,6 +816,9 @@ static void mt_set_input_mode(struct hid_device *hdev)
struct mt_device *td = hid_get_drvdata(hdev);
struct hid_report *r;
struct hid_report_enum *re;
+ struct mt_class *cls = &td->mtclass;
+ char *buf;
+ int report_len;
if (td->inputmode < 0)
return;
@@ -816,6 +826,18 @@ static void mt_set_input_mode(struct hid_device *hdev)
re = &(hdev->report_enum[HID_FEATURE_REPORT]);
r = re->report_id_hash[td->inputmode];
if (r) {
+ if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
+ report_len = hid_report_len(r);
+ buf = hid_alloc_report_buf(r, GFP_KERNEL);
+ if (!buf) {
+ hid_err(hdev, "failed to allocate buffer for report\n");
+ return;
+ }
+ hid_hw_raw_request(hdev, r->id, buf, report_len,
+ HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ kfree(buf);
+ }
r->field[0]->value[td->inputmode_index] = td->inputmode_value;
hid_hw_request(hdev, r, HID_REQ_SET_REPORT);
}
@@ -1281,6 +1303,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
+ /* VTL panels */
+ { .driver_data = MT_CLS_VTL,
+ MT_USB_DEVICE(USB_VENDOR_ID_VTL,
+ USB_DEVICE_ID_VTL_MULTITOUCH_FF3F) },
+
/* Wistron panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_WISTRON,
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
new file mode 100644
index 000000000000..2180e0789b76
--- /dev/null
+++ b/drivers/hid/hid-plantronics.c
@@ -0,0 +1,55 @@
+/*
+ * Plantronics USB HID Driver
+ *
+ * Copyright (c) 2014 JD Cole <jd.cole@plantronics.com>
+ * Copyright (c) 2014 Terry Junge <terry.junge@plantronics.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include "hid-ids.h"
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+static int plantronics_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (field->application == HID_CP_CONSUMERCONTROL
+ && (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) {
+ hid_dbg(hdev, "usage: %08x (appl: %08x) - defaulted\n",
+ usage->hid, field->application);
+ return 0;
+ }
+
+ hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n",
+ usage->hid, field->application);
+
+ return -1;
+}
+
+static const struct hid_device_id plantronics_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, plantronics_devices);
+
+static struct hid_driver plantronics_driver = {
+ .name = "plantronics",
+ .id_table = plantronics_devices,
+ .input_mapping = plantronics_input_mapping,
+};
+module_hid_driver(plantronics_driver);
+
+MODULE_AUTHOR("JD Cole <jd.cole@plantronics.com>");
+MODULE_AUTHOR("Terry Junge <terry.junge@plantronics.com>");
+MODULE_DESCRIPTION("Plantronics USB HID Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 3cccff73b9b9..b51200fe2f33 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -584,11 +584,15 @@ static int rmi_populate_f11(struct hid_device *hdev)
bool has_query10 = false;
bool has_query11;
bool has_query12;
+ bool has_query27;
+ bool has_query28;
+ bool has_query36 = false;
bool has_physical_props;
bool has_gestures;
bool has_rel;
+ bool has_data40 = false;
unsigned x_size, y_size;
- u16 query12_offset;
+ u16 query_offset;
if (!data->f11.query_base_addr) {
hid_err(hdev, "No 2D sensor found, giving up.\n");
@@ -604,6 +608,8 @@ static int rmi_populate_f11(struct hid_device *hdev)
has_query9 = !!(buf[0] & BIT(3));
has_query11 = !!(buf[0] & BIT(4));
has_query12 = !!(buf[0] & BIT(5));
+ has_query27 = !!(buf[0] & BIT(6));
+ has_query28 = !!(buf[0] & BIT(7));
/* query 1 to get the max number of fingers */
ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf);
@@ -626,43 +632,43 @@ static int rmi_populate_f11(struct hid_device *hdev)
has_rel = !!(buf[0] & BIT(3));
has_gestures = !!(buf[0] & BIT(5));
+ /*
+ * At least 4 queries are guaranteed to be present in F11
+ * +1 for query 5 which is present since absolute events are
+ * reported and +1 for query 12.
+ */
+ query_offset = 6;
+
+ if (has_rel)
+ ++query_offset; /* query 6 is present */
+
if (has_gestures) {
/* query 8 to find out if query 10 exists */
- ret = rmi_read(hdev, data->f11.query_base_addr + 8, buf);
+ ret = rmi_read(hdev,
+ data->f11.query_base_addr + query_offset + 1, buf);
if (ret) {
hid_err(hdev, "can not read gesture information: %d.\n",
ret);
return ret;
}
has_query10 = !!(buf[0] & BIT(2));
- }
- /*
- * At least 4 queries are guaranteed to be present in F11
- * +1 for query 5 which is present since absolute events are
- * reported and +1 for query 12.
- */
- query12_offset = 6;
-
- if (has_rel)
- ++query12_offset; /* query 6 is present */
-
- if (has_gestures)
- query12_offset += 2; /* query 7 and 8 are present */
+ query_offset += 2; /* query 7 and 8 are present */
+ }
if (has_query9)
- ++query12_offset;
+ ++query_offset;
if (has_query10)
- ++query12_offset;
+ ++query_offset;
if (has_query11)
- ++query12_offset;
+ ++query_offset;
/* query 12 to know if the physical properties are reported */
if (has_query12) {
ret = rmi_read(hdev, data->f11.query_base_addr
- + query12_offset, buf);
+ + query_offset, buf);
if (ret) {
hid_err(hdev, "can not get query 12: %d.\n", ret);
return ret;
@@ -670,9 +676,10 @@ static int rmi_populate_f11(struct hid_device *hdev)
has_physical_props = !!(buf[0] & BIT(5));
if (has_physical_props) {
+ query_offset += 1;
ret = rmi_read_block(hdev,
data->f11.query_base_addr
- + query12_offset + 1, buf, 4);
+ + query_offset, buf, 4);
if (ret) {
hid_err(hdev, "can not read query 15-18: %d.\n",
ret);
@@ -687,9 +694,45 @@ static int rmi_populate_f11(struct hid_device *hdev)
hid_info(hdev, "%s: size in mm: %d x %d\n",
__func__, data->x_size_mm, data->y_size_mm);
+
+ /*
+ * query 15 - 18 contain the size of the sensor
+ * and query 19 - 26 contain bezel dimensions
+ */
+ query_offset += 12;
+ }
+ }
+
+ if (has_query27)
+ ++query_offset;
+
+ if (has_query28) {
+ ret = rmi_read(hdev, data->f11.query_base_addr
+ + query_offset, buf);
+ if (ret) {
+ hid_err(hdev, "can not get query 28: %d.\n", ret);
+ return ret;
+ }
+
+ has_query36 = !!(buf[0] & BIT(6));
+ }
+
+ if (has_query36) {
+ query_offset += 2;
+ ret = rmi_read(hdev, data->f11.query_base_addr
+ + query_offset, buf);
+ if (ret) {
+ hid_err(hdev, "can not get query 36: %d.\n", ret);
+ return ret;
}
+
+ has_data40 = !!(buf[0] & BIT(5));
}
+
+ if (has_data40)
+ data->f11.report_size += data->max_fingers * 2;
+
/*
* retrieve the ctrl registers
* the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 6101816a7ddd..c29265055ac1 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -46,6 +46,7 @@ static void kone_profile_activated(struct kone_device *kone, uint new_profile)
static void kone_profile_report(struct kone_device *kone, uint new_profile)
{
struct kone_roccat_report roccat_report;
+
roccat_report.event = kone_mouse_event_switch_profile;
roccat_report.value = new_profile;
roccat_report.key = 0;
@@ -163,6 +164,7 @@ static int kone_set_settings(struct usb_device *usb_dev,
struct kone_settings const *settings)
{
int retval;
+
retval = kone_send(usb_dev, kone_command_settings,
settings, sizeof(struct kone_settings));
if (retval)
@@ -387,7 +389,7 @@ static struct bin_attribute bin_attr_profile##number = { \
.read = kone_sysfs_read_profilex, \
.write = kone_sysfs_write_profilex, \
.private = &profile_numbers[number-1], \
-};
+}
PROFILE_ATTR(1);
PROFILE_ATTR(2);
PROFILE_ATTR(3);
@@ -456,6 +458,7 @@ static ssize_t kone_sysfs_show_tcu(struct device *dev,
static int kone_tcu_command(struct usb_device *usb_dev, int number)
{
unsigned char value;
+
value = number;
return kone_send(usb_dev, kone_command_calibrate, &value, 1);
}
@@ -697,10 +700,8 @@ static int kone_init_specials(struct hid_device *hdev)
== USB_INTERFACE_PROTOCOL_MOUSE) {
kone = kzalloc(sizeof(*kone), GFP_KERNEL);
- if (!kone) {
- hid_err(hdev, "can't alloc device descriptor\n");
+ if (!kone)
return -ENOMEM;
- }
hid_set_drvdata(hdev, kone);
retval = kone_init_kone_device_struct(usb_dev, kone);
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 69cca1476a0c..5632c54eadf0 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -7,7 +7,7 @@
* (This module is based on "hid-ortek".)
* Copyright (c) 2012 Andreas Hübner
*
- * R.A.T.7, M.M.O.7 (USB gaming mice):
+ * R.A.T.7, R.A.T.9, M.M.O.7 (USB gaming mice):
* Fixes the mode button which cycles through three constantly pressed
* buttons. All three press events are mapped to one button and the
* missing release event is generated immediately.
@@ -179,6 +179,8 @@ static const struct hid_device_id saitek_devices[] = {
.driver_data = SAITEK_FIX_PS1000 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
+ .driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
.driver_data = SAITEK_RELEASE_MODE_MMO7 },
{ }
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index bc4269e559f1..31e9d2561106 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -798,6 +798,12 @@ union sixaxis_output_report_01 {
__u8 buf[36];
};
+#define DS4_REPORT_0x02_SIZE 37
+#define DS4_REPORT_0x05_SIZE 32
+#define DS4_REPORT_0x11_SIZE 78
+#define DS4_REPORT_0x81_SIZE 7
+#define SIXAXIS_REPORT_0xF2_SIZE 18
+
static spinlock_t sony_dev_list_lock;
static LIST_HEAD(sony_device_list);
static DEFINE_IDA(sony_device_id_allocator);
@@ -811,6 +817,7 @@ struct sony_sc {
struct work_struct state_worker;
struct power_supply battery;
int device_id;
+ __u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
__u8 left;
@@ -1142,9 +1149,20 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
static int sixaxis_set_operational_bt(struct hid_device *hdev)
{
- unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
- return hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+ static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
+ __u8 *buf;
+ int ret;
+
+ buf = kmemdup(report, sizeof(report), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(report),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ kfree(buf);
+
+ return ret;
}
/*
@@ -1153,10 +1171,19 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev)
*/
static int dualshock4_set_operational_bt(struct hid_device *hdev)
{
- __u8 buf[37] = { 0 };
+ __u8 *buf;
+ int ret;
+
+ buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- return hid_hw_raw_request(hdev, 0x02, buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+
+ kfree(buf);
+
+ return ret;
}
static void sixaxis_set_leds_from_id(int id, __u8 values[MAX_LEDS])
@@ -1471,9 +1498,7 @@ error_leds:
static void sixaxis_state_worker(struct work_struct *work)
{
- struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
- int n;
- union sixaxis_output_report_01 report = {
+ static const union sixaxis_output_report_01 default_report = {
.buf = {
0x01,
0x00, 0xff, 0x00, 0xff, 0x00,
@@ -1485,20 +1510,27 @@ static void sixaxis_state_worker(struct work_struct *work)
0x00, 0x00, 0x00, 0x00, 0x00
}
};
+ struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
+ struct sixaxis_output_report *report =
+ (struct sixaxis_output_report *)sc->output_report_dmabuf;
+ int n;
+
+ /* Initialize the report with default values */
+ memcpy(report, &default_report, sizeof(struct sixaxis_output_report));
#ifdef CONFIG_SONY_FF
- report.data.rumble.right_motor_on = sc->right ? 1 : 0;
- report.data.rumble.left_motor_force = sc->left;
+ report->rumble.right_motor_on = sc->right ? 1 : 0;
+ report->rumble.left_motor_force = sc->left;
#endif
- report.data.leds_bitmap |= sc->led_state[0] << 1;
- report.data.leds_bitmap |= sc->led_state[1] << 2;
- report.data.leds_bitmap |= sc->led_state[2] << 3;
- report.data.leds_bitmap |= sc->led_state[3] << 4;
+ report->leds_bitmap |= sc->led_state[0] << 1;
+ report->leds_bitmap |= sc->led_state[1] << 2;
+ report->leds_bitmap |= sc->led_state[2] << 3;
+ report->leds_bitmap |= sc->led_state[3] << 4;
/* Set flag for all leds off, required for 3rd party INTEC controller */
- if ((report.data.leds_bitmap & 0x1E) == 0)
- report.data.leds_bitmap |= 0x20;
+ if ((report->leds_bitmap & 0x1E) == 0)
+ report->leds_bitmap |= 0x20;
/*
* The LEDs in the report are indexed in reverse order to their
@@ -1511,28 +1543,30 @@ static void sixaxis_state_worker(struct work_struct *work)
*/
for (n = 0; n < 4; n++) {
if (sc->led_delay_on[n] || sc->led_delay_off[n]) {
- report.data.led[3 - n].duty_off = sc->led_delay_off[n];
- report.data.led[3 - n].duty_on = sc->led_delay_on[n];
+ report->led[3 - n].duty_off = sc->led_delay_off[n];
+ report->led[3 - n].duty_on = sc->led_delay_on[n];
}
}
- hid_hw_raw_request(sc->hdev, report.data.report_id, report.buf,
- sizeof(report), HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report,
+ sizeof(struct sixaxis_output_report),
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
}
static void dualshock4_state_worker(struct work_struct *work)
{
struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
struct hid_device *hdev = sc->hdev;
+ __u8 *buf = sc->output_report_dmabuf;
int offset;
- __u8 buf[78] = { 0 };
-
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
+ memset(buf, 0, DS4_REPORT_0x05_SIZE);
buf[0] = 0x05;
buf[1] = 0xFF;
offset = 4;
} else {
+ memset(buf, 0, DS4_REPORT_0x11_SIZE);
buf[0] = 0x11;
buf[1] = 0xB0;
buf[3] = 0x0F;
@@ -1560,12 +1594,33 @@ static void dualshock4_state_worker(struct work_struct *work)
buf[offset++] = sc->led_delay_off[3];
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- hid_hw_output_report(hdev, buf, 32);
+ hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE);
else
- hid_hw_raw_request(hdev, 0x11, buf, 78,
+ hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE,
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
}
+static int sony_allocate_output_report(struct sony_sc *sc)
+{
+ if (sc->quirks & SIXAXIS_CONTROLLER)
+ sc->output_report_dmabuf =
+ kmalloc(sizeof(union sixaxis_output_report_01),
+ GFP_KERNEL);
+ else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
+ sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE,
+ GFP_KERNEL);
+ else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
+ sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE,
+ GFP_KERNEL);
+ else
+ return 0;
+
+ if (!sc->output_report_dmabuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
#ifdef CONFIG_SONY_FF
static int sony_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
@@ -1754,6 +1809,7 @@ static int sony_get_bt_devaddr(struct sony_sc *sc)
static int sony_check_add(struct sony_sc *sc)
{
+ __u8 *buf = NULL;
int n, ret;
if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) ||
@@ -1769,36 +1825,44 @@ static int sony_check_add(struct sony_sc *sc)
return 0;
}
} else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- __u8 buf[7];
+ buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
/*
* The MAC address of a DS4 controller connected via USB can be
* retrieved with feature report 0x81. The address begins at
* offset 1.
*/
- ret = hid_hw_raw_request(sc->hdev, 0x81, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
+ DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
- if (ret != 7) {
+ if (ret != DS4_REPORT_0x81_SIZE) {
hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
- return ret < 0 ? ret : -EINVAL;
+ ret = ret < 0 ? ret : -EINVAL;
+ goto out_free;
}
memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
} else if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
- __u8 buf[18];
+ buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
/*
* The MAC address of a Sixaxis controller connected via USB can
* be retrieved with feature report 0xf2. The address begins at
* offset 4.
*/
- ret = hid_hw_raw_request(sc->hdev, 0xf2, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ ret = hid_hw_raw_request(sc->hdev, 0xf2, buf,
+ SIXAXIS_REPORT_0xF2_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
- if (ret != 18) {
+ if (ret != SIXAXIS_REPORT_0xF2_SIZE) {
hid_err(sc->hdev, "failed to retrieve feature report 0xf2 with the Sixaxis MAC address\n");
- return ret < 0 ? ret : -EINVAL;
+ ret = ret < 0 ? ret : -EINVAL;
+ goto out_free;
}
/*
@@ -1811,7 +1875,13 @@ static int sony_check_add(struct sony_sc *sc)
return 0;
}
- return sony_check_add_dev_list(sc);
+ ret = sony_check_add_dev_list(sc);
+
+out_free:
+
+ kfree(buf);
+
+ return ret;
}
static int sony_set_device_id(struct sony_sc *sc)
@@ -1895,6 +1965,12 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
+ ret = sony_allocate_output_report(sc);
+ if (ret < 0) {
+ hid_err(hdev, "failed to allocate the output report buffer\n");
+ goto err_stop;
+ }
+
ret = sony_set_device_id(sc);
if (ret < 0) {
hid_err(hdev, "failed to allocate the device id\n");
@@ -1984,6 +2060,7 @@ err_stop:
if (sc->quirks & SONY_BATTERY_SUPPORT)
sony_battery_remove(sc);
sony_cancel_work_sync(sc);
+ kfree(sc->output_report_dmabuf);
sony_remove_dev_list(sc);
sony_release_device_id(sc);
hid_hw_stop(hdev);
@@ -2004,6 +2081,8 @@ static void sony_remove(struct hid_device *hdev)
sony_cancel_work_sync(sc);
+ kfree(sc->output_report_dmabuf);
+
sony_remove_dev_list(sc);
sony_release_device_id(sc);
@@ -2034,6 +2113,9 @@ static const struct hid_device_id sony_devices[] = {
/* Logitech Harmony Adapter for PS3 */
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3),
.driver_data = PS3REMOTE },
+ /* SMK-Link PS3 BD Remote Control */
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE),
+ .driver_data = PS3REMOTE },
/* Sony Dualshock 4 controllers for PS4 */
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_USB },
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index f09e70cafaf1..d32037cbf9db 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -137,6 +137,7 @@ struct i2c_hid {
* descriptor. */
unsigned int bufsize; /* i2c buffer size */
char *inbuf; /* Input buffer */
+ char *rawbuf; /* Raw Input buffer */
char *cmdbuf; /* Command buffer */
char *argsbuf; /* Command arguments buffer */
@@ -369,7 +370,7 @@ static int i2c_hid_hwreset(struct i2c_client *client)
static void i2c_hid_get_input(struct i2c_hid *ihid)
{
int ret, ret_size;
- int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+ int size = ihid->bufsize;
ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
if (ret != size) {
@@ -437,7 +438,7 @@ static void i2c_hid_init_report(struct hid_report *report, u8 *buffer,
report->id, buffer, size))
return;
- i2c_hid_dbg(ihid, "report (len=%d): %*ph\n", size, size, ihid->inbuf);
+ i2c_hid_dbg(ihid, "report (len=%d): %*ph\n", size, size, buffer);
ret_size = buffer[0] | (buffer[1] << 8);
@@ -504,9 +505,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
static void i2c_hid_free_buffers(struct i2c_hid *ihid)
{
kfree(ihid->inbuf);
+ kfree(ihid->rawbuf);
kfree(ihid->argsbuf);
kfree(ihid->cmdbuf);
ihid->inbuf = NULL;
+ ihid->rawbuf = NULL;
ihid->cmdbuf = NULL;
ihid->argsbuf = NULL;
ihid->bufsize = 0;
@@ -522,10 +525,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
report_size; /* report */
ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
+ ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
- if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
+ if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
i2c_hid_free_buffers(ihid);
return -ENOMEM;
}
@@ -552,12 +556,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
ret = i2c_hid_get_report(client,
report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
- report_number, ihid->inbuf, ask_count);
+ report_number, ihid->rawbuf, ask_count);
if (ret < 0)
return ret;
- ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
+ ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
if (ret_count <= 2)
return 0;
@@ -566,7 +570,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
/* The query buffer contains the size, dropping it in the reply */
count = min(count, ret_count - 2);
- memcpy(buf, ihid->inbuf + 2, count);
+ memcpy(buf, ihid->rawbuf + 2, count);
return count;
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ca6849a0121e..bfbe1bedda7f 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -278,18 +278,20 @@ static void hid_irq_in(struct urb *urb)
usbhid->retry_delay = 0;
if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
break;
- hid_input_report(urb->context, HID_INPUT_REPORT,
- urb->transfer_buffer,
- urb->actual_length, 1);
- /*
- * autosuspend refused while keys are pressed
- * because most keyboards don't wake up when
- * a key is released
- */
- if (hid_check_keys_pressed(hid))
- set_bit(HID_KEYS_PRESSED, &usbhid->iofl);
- else
- clear_bit(HID_KEYS_PRESSED, &usbhid->iofl);
+ if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
+ hid_input_report(urb->context, HID_INPUT_REPORT,
+ urb->transfer_buffer,
+ urb->actual_length, 1);
+ /*
+ * autosuspend refused while keys are pressed
+ * because most keyboards don't wake up when
+ * a key is released
+ */
+ if (hid_check_keys_pressed(hid))
+ set_bit(HID_KEYS_PRESSED, &usbhid->iofl);
+ else
+ clear_bit(HID_KEYS_PRESSED, &usbhid->iofl);
+ }
break;
case -EPIPE: /* stall */
usbhid_mark_busy(usbhid);
@@ -338,8 +340,7 @@ static int hid_submit_out(struct hid_device *hid)
report = usbhid->out[usbhid->outtail].report;
raw_report = usbhid->out[usbhid->outtail].raw_report;
- usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) +
- 1 + (report->id > 0);
+ usbhid->urbout->transfer_buffer_length = hid_report_len(report);
usbhid->urbout->dev = hid_to_usb_dev(hid);
if (raw_report) {
memcpy(usbhid->outbuf, raw_report,
@@ -688,6 +689,7 @@ int usbhid_open(struct hid_device *hid)
goto done;
}
usbhid->intf->needs_remote_wakeup = 1;
+ set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
res = hid_start_in(hid);
if (res) {
if (res != -ENOSPC) {
@@ -701,6 +703,15 @@ int usbhid_open(struct hid_device *hid)
}
}
usb_autopm_put_interface(usbhid->intf);
+
+ /*
+ * In case events are generated while nobody was listening,
+ * some are released when the device is re-opened.
+ * Wait 50 msec for the queue to empty before allowing events
+ * to go through hid.
+ */
+ msleep(50);
+ clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
}
done:
mutex_unlock(&hid_open_mut);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 552671ee7c5d..dc89be90b35e 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -73,11 +73,13 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index f633c24ce28b..807922b49aa4 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -52,6 +52,7 @@ struct usb_interface *usbhid_find_interface(int minor);
#define HID_STARTED 8
#define HID_KEYS_PRESSED 10
#define HID_NO_BANDWIDTH 11
+#define HID_RESUME_RUNNING 12
/*
* USB-specific HID struct, to be pointed to
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 0cc53440543a..7db432809e9e 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -140,7 +140,7 @@ extern const struct hid_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
void wacom_setup_device_quirks(struct wacom_features *features);
-int wacom_setup_input_capabilities(struct input_dev *input_dev,
+int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8593047bb726..654202941d30 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -13,6 +13,7 @@
#include "wacom_wac.h"
#include "wacom.h"
+#include <linux/input/mt.h>
#define WAC_MSG_RETRIES 5
@@ -70,22 +71,15 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
static int wacom_open(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
- int retval;
-
- mutex_lock(&wacom->lock);
- retval = hid_hw_open(wacom->hdev);
- mutex_unlock(&wacom->lock);
- return retval;
+ return hid_hw_open(wacom->hdev);
}
static void wacom_close(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
- mutex_lock(&wacom->lock);
hid_hw_close(wacom->hdev);
- mutex_unlock(&wacom->lock);
}
/*
@@ -192,9 +186,15 @@ static void wacom_usage_mapping(struct hid_device *hdev,
if (!pen && !finger)
return;
- if (finger && !features->touch_max)
- /* touch device at least supports one touch point */
- features->touch_max = 1;
+ /*
+ * Bamboo models do not support HID_DG_CONTACTMAX.
+ * And, Bamboo Pen only descriptor contains touch.
+ */
+ if (features->type != BAMBOO_PT) {
+ /* ISDv4 touch devices at least supports one touch point */
+ if (finger && !features->touch_max)
+ features->touch_max = 1;
+ }
switch (usage->hid) {
case HID_GD_X:
@@ -230,6 +230,21 @@ static void wacom_usage_mapping(struct hid_device *hdev,
wacom_wac_usage_mapping(hdev, field, usage);
}
+static void wacom_post_parse_hid(struct hid_device *hdev,
+ struct wacom_features *features)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ if (features->type == HID_GENERIC) {
+ /* Any last-minute generic device setup */
+ if (features->touch_max > 1) {
+ input_mt_init_slots(wacom_wac->input, wacom_wac->features.touch_max,
+ INPUT_MT_DIRECT);
+ }
+ }
+}
+
static void wacom_parse_hid(struct hid_device *hdev,
struct wacom_features *features)
{
@@ -264,6 +279,8 @@ static void wacom_parse_hid(struct hid_device *hdev,
wacom_usage_mapping(hdev, hreport->field[i],
hreport->field[i]->usage + j);
}
+
+ wacom_post_parse_hid(hdev, features);
}
static int wacom_hid_set_device_mode(struct hid_device *hdev)
@@ -1129,7 +1146,7 @@ static void wacom_clean_inputs(struct wacom *wacom)
input_free_device(wacom->wacom_wac.input);
}
if (wacom->wacom_wac.pad_input) {
- if (wacom->wacom_wac.input_registered)
+ if (wacom->wacom_wac.pad_registered)
input_unregister_device(wacom->wacom_wac.pad_input);
else
input_free_device(wacom->wacom_wac.pad_input);
@@ -1151,13 +1168,13 @@ static int wacom_register_inputs(struct wacom *wacom)
if (!input_dev || !pad_input_dev)
return -EINVAL;
- error = wacom_setup_input_capabilities(input_dev, wacom_wac);
- if (error)
- return error;
-
- error = input_register_device(input_dev);
- if (error)
- return error;
+ error = wacom_setup_pentouch_input_capabilities(input_dev, wacom_wac);
+ if (!error) {
+ error = input_register_device(input_dev);
+ if (error)
+ return error;
+ wacom_wac->input_registered = true;
+ }
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
if (error) {
@@ -1169,22 +1186,23 @@ static int wacom_register_inputs(struct wacom *wacom)
error = input_register_device(pad_input_dev);
if (error)
goto fail_register_pad_input;
+ wacom_wac->pad_registered = true;
error = wacom_initialize_leds(wacom);
if (error)
goto fail_leds;
}
- wacom_wac->input_registered = true;
-
return 0;
fail_leds:
input_unregister_device(pad_input_dev);
pad_input_dev = NULL;
+ wacom_wac->pad_registered = false;
fail_register_pad_input:
input_unregister_device(input_dev);
wacom_wac->input = NULL;
+ wacom_wac->input_registered = false;
return error;
}
@@ -1321,12 +1339,6 @@ static void wacom_calculate_res(struct wacom_features *features)
features->unitExpo);
}
-static int wacom_hid_report_len(struct hid_report *report)
-{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
-}
-
static size_t wacom_compute_pktlen(struct hid_device *hdev)
{
struct hid_report_enum *report_enum;
@@ -1336,7 +1348,7 @@ static size_t wacom_compute_pktlen(struct hid_device *hdev)
report_enum = hdev->report_enum + HID_INPUT_REPORT;
list_for_each_entry(report, &report_enum->report_list, list) {
- size_t report_size = wacom_hid_report_len(report);
+ size_t report_size = hid_report_len(report);
if (report_size > size)
size = report_size;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 586b2405b0d4..ac7447c7b82e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -25,6 +25,10 @@
#define WACOM_INTUOS_RES 100
#define WACOM_INTUOS3_RES 200
+/* Newer Cintiq and DTU have an offset between tablet and screen areas */
+#define WACOM_DTU_OFFSET 200
+#define WACOM_CINTIQ_OFFSET 400
+
/*
* Scale factor relating reported contact size to logical contact area.
* 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo
@@ -600,8 +604,8 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
}
input_report_abs(input, ABS_PRESSURE, t);
input_report_abs(input, ABS_TILT_X,
- ((data[7] << 1) & 0x7e) | (data[8] >> 7));
- input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
+ (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
+ input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
input_report_key(input, BTN_STYLUS, data[1] & 2);
input_report_key(input, BTN_STYLUS2, data[1] & 4);
input_report_key(input, BTN_TOUCH, t > 10);
@@ -612,8 +616,8 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
input_report_abs(input, ABS_WHEEL,
(data[6] << 2) | ((data[7] >> 6) & 3));
input_report_abs(input, ABS_TILT_X,
- ((data[7] << 1) & 0x7e) | (data[8] >> 7));
- input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
+ (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
+ input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
}
}
@@ -915,8 +919,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, BTN_EXTRA, data[6] & 0x10);
input_report_abs(input, ABS_TILT_X,
- ((data[7] << 1) & 0x7e) | (data[8] >> 7));
- input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
+ (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
+ input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
} else {
/* 2D mouse packet */
input_report_key(input, BTN_LEFT, data[8] & 0x04);
@@ -1377,11 +1381,12 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct input_dev *input = wacom_wac->input;
+ struct wacom_features *features = &wacom_wac->features;
unsigned touch_max = wacom_wac->features.touch_max;
switch (usage->hid) {
case HID_GD_X:
+ features->last_slot_field = usage->hid;
if (touch_max == 1)
wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4);
else
@@ -1389,6 +1394,7 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
ABS_MT_POSITION_X, 4);
break;
case HID_GD_Y:
+ features->last_slot_field = usage->hid;
if (touch_max == 1)
wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4);
else
@@ -1396,19 +1402,48 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
ABS_MT_POSITION_Y, 4);
break;
case HID_DG_CONTACTID:
- input_mt_init_slots(input, wacom_wac->features.touch_max,
- INPUT_MT_DIRECT);
+ features->last_slot_field = usage->hid;
break;
case HID_DG_INRANGE:
+ features->last_slot_field = usage->hid;
break;
case HID_DG_INVERT:
+ features->last_slot_field = usage->hid;
break;
case HID_DG_TIPSWITCH:
+ features->last_slot_field = usage->hid;
wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0);
break;
}
}
+static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ struct input_dev *input)
+{
+ struct hid_data *hid_data = &wacom_wac->hid_data;
+ bool mt = wacom_wac->features.touch_max > 1;
+ bool prox = hid_data->tipswitch &&
+ !wacom_wac->shared->stylus_in_proximity;
+
+ if (mt) {
+ int slot;
+
+ slot = input_mt_get_slot_by_key(input, hid_data->id);
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, prox);
+ }
+ else {
+ input_report_key(input, BTN_TOUCH, prox);
+ }
+
+ if (prox) {
+ input_report_abs(input, mt ? ABS_MT_POSITION_X : ABS_X,
+ hid_data->x);
+ input_report_abs(input, mt ? ABS_MT_POSITION_Y : ABS_Y,
+ hid_data->y);
+ }
+}
+
static int wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
@@ -1431,36 +1466,35 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
}
+ if (usage->usage_index + 1 == field->report_count) {
+ if (usage->hid == wacom_wac->features.last_slot_field)
+ wacom_wac_finger_slot(wacom_wac, wacom_wac->input);
+ }
+
return 0;
}
-static void wacom_wac_finger_mt_report(struct wacom_wac *wacom_wac,
- struct input_dev *input, bool touch)
+static int wacom_wac_finger_count_touches(struct hid_device *hdev)
{
- int slot;
- struct hid_data *hid_data = &wacom_wac->hid_data;
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->input;
+ unsigned touch_max = wacom_wac->features.touch_max;
+ int count = 0;
+ int i;
- slot = input_mt_get_slot_by_key(input, hid_data->id);
+ if (touch_max == 1)
+ return wacom_wac->hid_data.tipswitch &&
+ !wacom_wac->shared->stylus_in_proximity;
- input_mt_slot(input, slot);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
- if (touch) {
- input_report_abs(input, ABS_MT_POSITION_X, hid_data->x);
- input_report_abs(input, ABS_MT_POSITION_Y, hid_data->y);
+ for (i = 0; i < input->mt->num_slots; i++) {
+ struct input_mt_slot *ps = &input->mt->slots[i];
+ int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+ if (id >= 0)
+ count++;
}
- input_mt_sync_frame(input);
-}
-static void wacom_wac_finger_single_touch_report(struct wacom_wac *wacom_wac,
- struct input_dev *input, bool touch)
-{
- struct hid_data *hid_data = &wacom_wac->hid_data;
-
- if (touch) {
- input_report_abs(input, ABS_X, hid_data->x);
- input_report_abs(input, ABS_Y, hid_data->y);
- }
- input_report_key(input, BTN_TOUCH, touch);
+ return count;
}
static void wacom_wac_finger_report(struct hid_device *hdev,
@@ -1469,24 +1503,23 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->input;
- bool touch = wacom_wac->hid_data.tipswitch &&
- !wacom_wac->shared->stylus_in_proximity;
unsigned touch_max = wacom_wac->features.touch_max;
if (touch_max > 1)
- wacom_wac_finger_mt_report(wacom_wac, input, touch);
- else
- wacom_wac_finger_single_touch_report(wacom_wac, input, touch);
+ input_mt_sync_frame(input);
+
input_sync(input);
/* keep touch state for pen event */
- wacom_wac->shared->touch_down = touch;
+ wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(hdev);
}
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
- ((f)->physical == HID_DG_STYLUS))
+ ((f)->physical == HID_DG_STYLUS) || \
+ ((f)->application == HID_DG_PEN))
#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
- ((f)->physical == HID_DG_FINGER))
+ ((f)->physical == HID_DG_FINGER) || \
+ ((f)->application == HID_DG_TOUCHSCREEN))
void wacom_wac_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
@@ -1681,7 +1714,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
return 0;
if (data[0] == WACOM_REPORT_USB) {
- if (features->type == INTUOSHT && features->touch_max) {
+ if (features->type == INTUOSHT &&
+ wacom->shared->touch_input &&
+ features->touch_max) {
input_report_switch(wacom->shared->touch_input,
SW_MUTE_DEVICE, data[8] & 0x40);
input_sync(wacom->shared->touch_input);
@@ -1774,7 +1809,8 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
int pid, battery, ps_connected;
if ((wacom->shared->type == INTUOSHT) &&
- wacom->shared->touch_max) {
+ wacom->shared->touch_input &&
+ wacom->shared->touch_max) {
input_report_switch(wacom->shared->touch_input,
SW_MUTE_DEVICE, data[5] & 0x40);
input_sync(wacom->shared->touch_input);
@@ -1838,6 +1874,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
break;
case DTUS:
+ case DTUSX:
sync = wacom_dtus_irq(wacom_wac);
break;
@@ -1926,8 +1963,10 @@ static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
input_set_abs_params(input_dev, ABS_DISTANCE,
0, wacom_wac->features.distance_max, 0, 0);
input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_X, -64, 63, 0, 0);
+ input_abs_set_res(input_dev, ABS_TILT_X, 57);
+ input_set_abs_params(input_dev, ABS_TILT_Y, -64, 63, 0, 0);
+ input_abs_set_res(input_dev, ABS_TILT_Y, 57);
}
static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
@@ -1947,6 +1986,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
__set_bit(BTN_TOOL_LENS, input_dev->keybit);
input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_RZ, 287);
input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
}
@@ -2029,7 +2069,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
}
}
-int wacom_setup_input_capabilities(struct input_dev *input_dev,
+int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac)
{
struct wacom_features *features = &wacom_wac->features;
@@ -2047,9 +2087,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
switch (features->type) {
case WACOM_MO:
- input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
- /* fall through */
-
case WACOM_G4:
/* fall through */
@@ -2092,6 +2129,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case WACOM_24HD:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 287);
input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
/* fall through */
@@ -2106,6 +2144,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case WACOM_BEE:
case CINTIQ:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 287);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
@@ -2114,6 +2153,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case WACOM_13HD:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 287);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
wacom_setup_cintiq(wacom_wac);
break;
@@ -2122,6 +2162,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case INTUOS3L:
case INTUOS3S:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 287);
/* fall through */
case INTUOS:
@@ -2144,6 +2185,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
0, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 287);
wacom_setup_intuos(wacom_wac);
} else if (features->device_type == BTN_TOOL_FINGER) {
@@ -2162,6 +2204,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case INTUOS4L:
case INTUOS4S:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 287);
wacom_setup_intuos(wacom_wac);
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
@@ -2196,6 +2239,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case DTUS:
+ case DTUSX:
case PL:
case DTU:
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -2246,6 +2290,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__clear_bit(ABS_X, input_dev->absbit);
__clear_bit(ABS_Y, input_dev->absbit);
__clear_bit(BTN_TOUCH, input_dev->keybit);
+
+ /* PAD is setup by wacom_setup_pad_input_capabilities later */
+ return 1;
}
} else if (features->device_type == BTN_TOOL_PEN) {
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
@@ -2261,6 +2308,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case CINTIQ_HYBRID:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_abs_set_res(input_dev, ABS_Z, 287);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
wacom_setup_cintiq(wacom_wac);
@@ -2303,9 +2351,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case WACOM_G4:
__set_bit(BTN_BACK, input_dev->keybit);
- __set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_FORWARD, input_dev->keybit);
- __set_bit(BTN_RIGHT, input_dev->keybit);
input_set_capability(input_dev, EV_REL, REL_WHEEL);
break;
@@ -2402,7 +2448,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOSPS:
/* touch interface does not have the pad device */
if (features->device_type != BTN_TOOL_PEN)
- return 1;
+ return -ENODEV;
for (i = 0; i < 7; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
@@ -2446,8 +2492,10 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
case INTUOSHT:
case BAMBOO_PT:
/* pad device is on the touch interface */
- if (features->device_type != BTN_TOOL_FINGER)
- return 1;
+ if ((features->device_type != BTN_TOOL_FINGER) ||
+ /* Bamboo Pen only tablet does not have pad */
+ ((features->type == BAMBOO_PT) && !features->touch_max))
+ return -ENODEV;
__clear_bit(ABS_MISC, input_dev->absbit);
@@ -2460,7 +2508,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
default:
/* no pad supported */
- return 1;
+ return -ENODEV;
}
return 0;
}
@@ -2664,11 +2712,13 @@ static const struct wacom_features wacom_features_0x317 =
INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xF4 =
- { "Wacom Cintiq 24HD", 104280, 65400, 2047, 63,
- WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
+ WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", 104280, 65400, 2047, 63, /* Pen */
- WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ { "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
+ WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
@@ -2684,8 +2734,9 @@ static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x304 =
- { "Wacom Cintiq 13HD", 59352, 33648, 1023, 63,
- WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
+ WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xC7 =
{ "Wacom DTU1931", 37832, 30305, 511, 0,
PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2697,28 +2748,38 @@ static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", 34623, 19553, 511, 0,
DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xFB =
- { "Wacom DTU1031", 22096, 13960, 511, 0,
- DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom DTU1031", 21896, 13760, 511, 0,
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
+static const struct wacom_features wacom_features_0x32F =
+ { "Wacom DTU1031X", 22472, 12728, 511, 0,
+ DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x57 =
{ "Wacom DTK2241", 95640, 54060, 2047, 63,
- DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x59 = /* Pen */
{ "Wacom DTH2242", 95640, 54060, 2047, 63,
- DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
{ "Wacom DTH2242", .type = WACOM_24HDT,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xCC =
- { "Wacom Cintiq 21UX2", 87000, 65400, 2047, 63,
- WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
+ WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xFA =
- { "Wacom Cintiq 22HD", 95640, 54060, 2047, 63,
- WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
+ WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x5B =
- { "Wacom Cintiq 22HDT", 95640, 54060, 2047, 63,
- WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ { "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
+ WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -2863,21 +2924,27 @@ static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", 12800, 8000, 255, 0,
TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x307 =
- { "Wacom ISDv5 307", 59352, 33648, 2047, 63,
- CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ { "Wacom ISDv5 307", 59152, 33448, 2047, 63,
+ CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
static const struct wacom_features wacom_features_0x309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x30A =
- { "Wacom ISDv5 30A", 59352, 33648, 2047, 63,
- CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ { "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
+ CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
static const struct wacom_features wacom_features_0x30C =
{ "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30A, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x323 =
+ { "Wacom Intuos P M", 21600, 13500, 1023, 31,
+ INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC };
@@ -3022,10 +3089,13 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x314) },
{ USB_DEVICE_WACOM(0x315) },
{ USB_DEVICE_WACOM(0x317) },
+ { USB_DEVICE_WACOM(0x323) },
+ { USB_DEVICE_WACOM(0x32F) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
{ USB_DEVICE_WACOM(0x5002) },
+ { USB_DEVICE_LENOVO(0x6004) },
{ USB_DEVICE_WACOM(HID_ANY_ID) },
{ }
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 0f0b85ec1322..bfad815cda8a 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -80,6 +80,7 @@ enum {
PL,
DTU,
DTUS,
+ DTUSX,
INTUOS,
INTUOS3S,
INTUOS3,
@@ -144,6 +145,7 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
+ int last_slot_field;
};
struct wacom_shared {
@@ -183,6 +185,7 @@ struct wacom_wac {
struct input_dev *input;
struct input_dev *pad_input;
bool input_registered;
+ bool pad_registered;
int pid;
int battery_capacity;
int num_contacts_left;
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
index 363b780dacea..f0c21458962c 100644
--- a/drivers/hsi/clients/nokia-modem.c
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -29,7 +29,7 @@
#include <linux/of_gpio.h>
#include <linux/hsi/ssi_protocol.h>
-static unsigned int pm;
+static unsigned int pm = 1;
module_param(pm, int, 0400);
MODULE_PARM_DESC(pm,
"Enable power management (0=disabled, 1=userland based [default])");
@@ -164,9 +164,9 @@ static int nokia_modem_probe(struct device *dev)
dev_set_drvdata(dev, modem);
irq = irq_of_parse_and_map(np, 0);
- if (irq < 0) {
+ if (!irq) {
dev_err(dev, "Invalid rst_ind interrupt (%d)\n", irq);
- return irq;
+ return -EINVAL;
}
modem->nokia_modem_rst_ind_irq = irq;
pflags = irq_get_trigger_type(irq);
@@ -174,7 +174,7 @@ static int nokia_modem_probe(struct device *dev)
tasklet_init(&modem->nokia_modem_rst_ind_tasklet,
do_nokia_modem_rst_ind_tasklet, (unsigned long)modem);
err = devm_request_irq(dev, irq, nokia_modem_rst_ind_isr,
- IRQF_DISABLED | pflags, "modem_rst_ind", modem);
+ pflags, "modem_rst_ind", modem);
if (err < 0) {
dev_err(dev, "Request rst_ind irq(%d) failed (flags %d)\n",
irq, pflags);
diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c
index 4d5b682fc6af..089c6c3feb3e 100644
--- a/drivers/hsi/controllers/omap_ssi.c
+++ b/drivers/hsi/controllers/omap_ssi.c
@@ -610,7 +610,6 @@ static struct platform_driver ssi_pdriver = {
.remove = __exit_p(ssi_remove),
.driver = {
.name = "omap_ssi",
- .owner = THIS_MODULE,
.pm = DEV_PM_OPS,
.of_match_table = omap_ssi_of_match,
},
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index d836cfe50513..1f8652b3de06 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -1118,8 +1118,7 @@ static int __init ssi_port_probe(struct platform_device *pd)
dev_dbg(&pd->dev, "init ssi port...\n");
if (!try_module_get(ssi->owner)) {
- dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n",
- err);
+ dev_err(&pd->dev, "could not increment parent module refcount\n");
return -ENODEV;
}
@@ -1385,7 +1384,6 @@ static struct platform_driver ssi_port_pdriver = {
.remove = __exit_p(ssi_port_remove),
.driver = {
.name = "omap_ssi_port",
- .owner = THIS_MODULE,
.of_match_table = omap_ssi_port_of_match,
.pm = DEV_PM_OPS,
},
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index a2d1a9612c86..2c59f030546b 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -216,9 +216,16 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
unsigned long flags;
struct vmbus_channel *primary_channel;
struct vmbus_channel_relid_released msg;
+ struct device *dev;
+
+ if (channel->device_obj) {
+ dev = get_device(&channel->device_obj->device);
+ if (dev) {
+ vmbus_device_unregister(channel->device_obj);
+ put_device(dev);
+ }
+ }
- if (channel->device_obj)
- vmbus_device_unregister(channel->device_obj);
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = channel->offermsg.child_relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
@@ -517,6 +524,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
/* Just return here, no channel found */
return;
+ channel->rescind = true;
+
/* work is initialized for vmbus_process_rescind_offer() from
* vmbus_process_offer() where the channel got created */
queue_work(channel->controlwq, &channel->work);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 5e90c5d771a7..b958ded8ac7e 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1087,10 +1087,12 @@ static void balloon_up(struct work_struct *dummy)
struct dm_balloon_response *bl_resp;
int alloc_unit;
int ret;
- bool alloc_error = false;
+ bool alloc_error;
bool done = false;
int i;
+ /* The host balloons pages in 2M granularity. */
+ WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
/*
* We will attempt 2M allocations. However, if we fail to
@@ -1107,16 +1109,18 @@ static void balloon_up(struct work_struct *dummy)
num_pages -= num_ballooned;
+ alloc_error = false;
num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
bl_resp, alloc_unit,
&alloc_error);
- if ((alloc_error) && (alloc_unit != 1)) {
+ if (alloc_unit != 1 && num_ballooned == 0) {
alloc_unit = 1;
continue;
}
- if ((alloc_error) || (num_ballooned == num_pages)) {
+ if ((alloc_unit == 1 && alloc_error) ||
+ (num_ballooned == num_pages)) {
bl_resp->more_pages = 0;
done = true;
dm_device.state = DM_INITIALIZED;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 521c14625b3a..beb8105c0e7b 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -350,6 +350,7 @@ kvp_send_key(struct work_struct *dummy)
__u8 pool = kvp_transaction.kvp_msg->kvp_hdr.pool;
__u32 val32;
__u64 val64;
+ int rc;
msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC);
if (!msg)
@@ -446,7 +447,13 @@ kvp_send_key(struct work_struct *dummy)
}
msg->len = sizeof(struct hv_kvp_msg);
- cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
+ rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
+ if (rc) {
+ pr_debug("KVP: failed to communicate to the daemon: %d\n", rc);
+ if (cancel_delayed_work_sync(&kvp_work))
+ kvp_respond_to_host(message, HV_E_FAIL);
+ }
+
kfree(msg);
return;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 34f14fddb666..9d5e0d1efdb5 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -28,7 +28,7 @@
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
-
+#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
/*
* Global state maintained for transaction that is being processed.
@@ -55,12 +55,24 @@ static const char vss_name[] = "vss_kernel_module";
static __u8 *recv_buffer;
static void vss_send_op(struct work_struct *dummy);
+static void vss_timeout_func(struct work_struct *dummy);
+
+static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
static DECLARE_WORK(vss_send_op_work, vss_send_op);
/*
* Callback when data is received from user mode.
*/
+static void vss_timeout_func(struct work_struct *dummy)
+{
+ /*
+ * Timeout waiting for userspace component to reply happened.
+ */
+ pr_warn("VSS: timeout waiting for daemon to reply\n");
+ vss_respond_to_host(HV_E_FAIL);
+}
+
static void
vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
@@ -76,13 +88,15 @@ vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
return;
}
- vss_respond_to_host(vss_msg->error);
+ if (cancel_delayed_work_sync(&vss_timeout_work))
+ vss_respond_to_host(vss_msg->error);
}
static void vss_send_op(struct work_struct *dummy)
{
int op = vss_transaction.msg->vss_hdr.operation;
+ int rc;
struct cn_msg *msg;
struct hv_vss_msg *vss_msg;
@@ -98,7 +112,12 @@ static void vss_send_op(struct work_struct *dummy)
vss_msg->vss_hdr.operation = op;
msg->len = sizeof(struct hv_vss_msg);
- cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
+ rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
+ if (rc) {
+ pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
+ if (cancel_delayed_work_sync(&vss_timeout_work))
+ vss_respond_to_host(HV_E_FAIL);
+ }
kfree(msg);
return;
@@ -223,6 +242,8 @@ void hv_vss_onchannelcallback(void *context)
case VSS_OP_FREEZE:
case VSS_OP_THAW:
schedule_work(&vss_send_op_work);
+ schedule_delayed_work(&vss_timeout_work,
+ VSS_USERSPACE_TIMEOUT);
return;
case VSS_OP_HOT_BACKUP:
@@ -277,5 +298,6 @@ hv_vss_init(struct hv_util_service *srv)
void hv_vss_deinit(void)
{
cn_del_callback(&vss_id);
+ cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_send_op_work);
}
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 9c8a6bab8228..7a09c1615aa9 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1547,7 +1547,6 @@ static SIMPLE_DEV_PM_OPS(abituguru_pm, abituguru_suspend, abituguru_resume);
static struct platform_driver abituguru_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = ABIT_UGURU_NAME,
.pm = ABIT_UGURU_PM,
},
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 4ae74aa8cdc1..3d2a4ae92d1e 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1167,7 +1167,6 @@ static SIMPLE_DEV_PM_OPS(abituguru3_pm, abituguru3_suspend, abituguru3_resume);
static struct platform_driver abituguru3_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = ABIT_UGURU3_NAME,
.pm = ABIT_UGURU3_PM
},
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 769fe20ec938..13875968c844 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -474,7 +474,6 @@ static const struct of_device_id abx500_temp_match[] = {
static struct platform_driver abx500_temp_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "abx500-temp",
.of_match_table = of_match_ptr(abx500_temp_match),
},
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 3288f13d2d87..0af63da6b603 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -676,7 +676,6 @@ static struct platform_driver applesmc_driver = {
.probe = applesmc_probe,
.driver = {
.name = "applesmc",
- .owner = THIS_MODULE,
.pm = &applesmc_pm_ops,
},
};
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index d76f0b70c6e0..5b7fec824f10 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -596,7 +596,6 @@ static int coretemp_remove(struct platform_device *pdev)
static struct platform_driver coretemp_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = coretemp_probe,
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 692b3f34d88c..c9832bfacfe5 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -282,7 +282,6 @@ static struct platform_driver da9052_hwmon_driver = {
.probe = da9052_hwmon_probe,
.driver = {
.name = "da9052-hwmon",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 9916a3fb4bb9..f6e159cabe23 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -286,7 +286,6 @@ static struct platform_driver da9055_hwmon_driver = {
.probe = da9055_hwmon_probe,
.driver = {
.name = "da9055-hwmon",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index bea0a344fab5..8763c4a8280c 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -2732,7 +2732,6 @@ static int dme1737_isa_remove(struct platform_device *pdev)
static struct platform_driver dme1737_isa_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "dme1737",
},
.probe = dme1737_isa_probe,
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 9e57b77ecd34..facd05cda26d 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1503,7 +1503,6 @@ static int f71805f_remove(struct platform_device *pdev)
static struct platform_driver f71805f_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = f71805f_probe,
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 03d8592810bf..2e5c6f46e442 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -369,7 +369,6 @@ static int f71882fg_remove(struct platform_device *pdev);
static struct platform_driver f71882fg_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = f71882fg_probe,
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index 6c0080a3b902..6b3d1972cef7 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -581,7 +581,6 @@ static int i5k_amb_remove(struct platform_device *pdev)
static struct platform_driver i5k_amb_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = i5k_amb_probe,
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 7c2c7be182f2..febe8175d36c 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -313,7 +313,6 @@ static struct platform_driver ibmpowernv_driver = {
.probe = ibmpowernv_probe,
.id_table = opal_sensor_driver_ids,
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
};
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 980175628563..17ae2eb26ce2 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -177,7 +177,6 @@ MODULE_DEVICE_TABLE(of, iio_hwmon_of_match);
static struct platform_driver __refdata iio_hwmon_driver = {
.driver = {
.name = "iio_hwmon",
- .owner = THIS_MODULE,
.of_match_table = iio_hwmon_of_match,
},
.probe = iio_hwmon_probe,
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index a327fd3402a7..409116c52cc5 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -498,7 +498,6 @@ static void it87_init_device(struct platform_device *pdev);
static struct platform_driver it87_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = it87_probe,
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 7488e36809c8..df9b3447f2a8 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -172,7 +172,6 @@ static struct platform_driver jz4740_hwmon_driver = {
.remove = jz4740_hwmon_remove,
.driver = {
.name = "jz4740-hwmon",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 6753fd940c76..fe41d5ae7cb2 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -177,6 +177,10 @@ static struct attribute *lm75_attrs[] = {
};
ATTRIBUTE_GROUPS(lm75);
+static const struct thermal_zone_of_device_ops lm75_of_thermal_ops = {
+ .get_temp = lm75_read_temp,
+};
+
/*-----------------------------------------------------------------------*/
/* device probe and removal */
@@ -296,10 +300,9 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (IS_ERR(data->hwmon_dev))
return PTR_ERR(data->hwmon_dev);
- data->tz = thermal_zone_of_sensor_register(data->hwmon_dev,
- 0,
+ data->tz = thermal_zone_of_sensor_register(data->hwmon_dev, 0,
data->hwmon_dev,
- lm75_read_temp, NULL);
+ &lm75_of_thermal_ops);
if (IS_ERR(data->tz))
data->tz = NULL;
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 759661c7d480..539efe4ad991 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -836,7 +836,6 @@ static int lm78_isa_probe(struct platform_device *pdev)
static struct platform_driver lm78_isa_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "lm78",
},
.probe = lm78_isa_probe,
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 82128ad79a91..cb0dcfda958c 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -334,7 +334,6 @@ MODULE_DEVICE_TABLE(platform, max197_device_ids);
static struct platform_driver max197_driver = {
.driver = {
.name = "max197",
- .owner = THIS_MODULE,
},
.probe = max197_probe,
.remove = max197_remove,
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index ae00e60d856c..0c02f40eb0c1 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -267,7 +267,6 @@ MODULE_DEVICE_TABLE(platform, mc13783_adc_idtable);
static struct platform_driver mc13783_adc_driver = {
.remove = mc13783_adc_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
},
.id_table = mc13783_adc_idtable,
diff --git a/drivers/hwmon/menf21bmc_hwmon.c b/drivers/hwmon/menf21bmc_hwmon.c
index afc6b58eaa62..c29a4c3c6b9e 100644
--- a/drivers/hwmon/menf21bmc_hwmon.c
+++ b/drivers/hwmon/menf21bmc_hwmon.c
@@ -219,7 +219,6 @@ static struct platform_driver menf21bmc_hwmon = {
.probe = menf21bmc_hwmon_probe,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 7710f4694ba1..f3830db02d46 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -1282,7 +1282,6 @@ static const struct dev_pm_ops nct6683_dev_pm_ops = {
static struct platform_driver nct6683_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
.pm = NCT6683_DEV_PM_OPS,
},
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index dc0df57200cd..1be41177b620 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -4080,7 +4080,6 @@ static const struct dev_pm_ops nct6775_dev_pm_ops = {
static struct platform_driver nct6775_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
.pm = NCT6775_DEV_PM_OPS,
},
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 4ff89b2482e4..112e4d45e4a0 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -486,6 +486,10 @@ static const struct attribute_group ntc_attr_group = {
.attrs = ntc_attributes,
};
+static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = {
+ .get_temp = ntc_read_temp,
+};
+
static int ntc_thermistor_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -579,7 +583,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
pdev_id->name);
data->tz = thermal_zone_of_sensor_register(data->dev, 0, data->dev,
- ntc_read_temp, NULL);
+ &ntc_of_thermal_ops);
if (IS_ERR(data->tz)) {
dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n");
data->tz = NULL;
@@ -609,7 +613,6 @@ static int ntc_thermistor_remove(struct platform_device *pdev)
static struct platform_driver ntc_thermistor_driver = {
.driver = {
.name = "ntc-thermistor",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ntc_match),
},
.probe = ntc_thermistor_probe,
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 145f674c1d87..d50fbf93a737 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -244,7 +244,6 @@ static struct pc87360_data *pc87360_update_device(struct device *dev);
static struct platform_driver pc87360_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "pc87360",
},
.probe = pc87360_probe,
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 9e4684e747ea..cb9fdd37bd0d 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -1153,7 +1153,6 @@ static int pc87427_remove(struct platform_device *pdev)
static struct platform_driver pc87427_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = pc87427_probe,
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 0674c13bbd4b..0c4710d35d16 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -378,7 +378,6 @@ static int s3c_hwmon_remove(struct platform_device *dev)
static struct platform_driver s3c_hwmon_driver = {
.driver = {
.name = "s3c-hwmon",
- .owner = THIS_MODULE,
},
.probe = s3c_hwmon_probe,
.remove = s3c_hwmon_remove,
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 0cc99fd83e8e..19f85c0da270 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -591,7 +591,6 @@ error:
static struct platform_driver sch5627_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = sch5627_probe,
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 547b5c952eff..131a2815dbda 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,7 +521,6 @@ error:
static struct platform_driver sch5636_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = sch5636_probe,
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 97cd45a8432c..d4f0935daaa1 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -1087,7 +1087,6 @@ MODULE_DEVICE_TABLE(platform, sht15_device_ids);
static struct platform_driver sht15_driver = {
.driver = {
.name = "sht15",
- .owner = THIS_MODULE,
},
.probe = sht15_probe,
.remove = sht15_remove,
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index bf1d7893d51c..45a028fb8851 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -215,7 +215,6 @@ static void sis5595_init_device(struct sis5595_data *data);
static struct platform_driver sis5595_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "sis5595",
},
.probe = sis5595_probe,
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 221f0931bf1c..6bd200756560 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -219,7 +219,6 @@ static int smsc47b397_probe(struct platform_device *pdev);
static struct platform_driver smsc47b397_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = smsc47b397_probe,
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index d7485659acc5..5d323186d2c1 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -849,7 +849,6 @@ static int __exit smsc47m1_remove(struct platform_device *pdev)
static struct platform_driver smsc47m1_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.remove = __exit_p(smsc47m1_remove),
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 51719956cc03..ba9f478f64ee 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -158,6 +158,10 @@ ATTRIBUTE_GROUPS(tmp102);
#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
+static const struct thermal_zone_of_device_ops tmp102_of_thermal_ops = {
+ .get_temp = tmp102_read_temp,
+};
+
static int tmp102_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -215,7 +219,7 @@ static int tmp102_probe(struct i2c_client *client,
}
tmp102->hwmon_dev = hwmon_dev;
tmp102->tz = thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
- tmp102_read_temp, NULL);
+ &tmp102_of_thermal_ops);
if (IS_ERR(tmp102->tz))
tmp102->tz = NULL;
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 9a0e2b8e8b94..b5caf7fdb487 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -107,7 +107,6 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
.probe = twl4030_madc_hwmon_probe,
.driver = {
.name = "twl4030_madc_hwmon",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 7d4658636064..f2816c7c918f 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -314,7 +314,6 @@ MODULE_DEVICE_TABLE(of, env_match);
static struct platform_driver env_driver = {
.driver = {
.name = "ultra45_env",
- .owner = THIS_MODULE,
.of_match_table = env_match,
},
.probe = env_probe,
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index c53619086f33..cf1848b8fb32 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -247,7 +247,6 @@ static struct platform_driver vexpress_hwmon_driver = {
.probe = vexpress_hwmon_probe,
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
.of_match_table = vexpress_hwmon_of_match,
},
};
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 8df43c51de2c..ac91c07e3f90 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -205,7 +205,6 @@ static int via_cputemp_remove(struct platform_device *pdev)
static struct platform_driver via_cputemp_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = via_cputemp_probe,
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index babd732b4e18..40dd93c8f9f4 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -670,7 +670,6 @@ static const struct attribute_group via686a_group = {
static struct platform_driver via686a_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "via686a",
},
.probe = via686a_probe,
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 3ea57c3504e2..3a6bfa51cb94 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -1233,7 +1233,6 @@ static int vt1211_remove(struct platform_device *pdev)
static struct platform_driver vt1211_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = vt1211_probe,
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index b3babe3326fb..cb69a8c2ed5b 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -759,7 +759,6 @@ static const struct attribute_group vt8231_group = {
static struct platform_driver vt8231_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "vt8231",
},
.probe = vt8231_probe,
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index f0ab61db7a0d..b10353b31806 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -2705,7 +2705,6 @@ static const struct dev_pm_ops w83627ehf_dev_pm_ops = {
static struct platform_driver w83627ehf_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
.pm = W83627EHF_DEV_PM_OPS,
},
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 2f55973a8c4c..721295b9a051 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -474,7 +474,6 @@ static const struct dev_pm_ops w83627hf_dev_pm_ops = {
static struct platform_driver w83627hf_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
.pm = W83627HF_DEV_PM_OPS,
},
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 84911616d8c0..54848fdd181e 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1839,7 +1839,6 @@ w83781d_isa_remove(struct platform_device *pdev)
static struct platform_driver w83781d_isa_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "w83781d",
},
.probe = w83781d_isa_probe,
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 3e6a3195cd11..a16cce72e4e2 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -154,7 +154,6 @@ static struct platform_driver wm831x_hwmon_driver = {
.probe = wm831x_hwmon_probe,
.driver = {
.name = "wm831x-hwmon",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 90e3d918e597..31af438ffa88 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -93,7 +93,6 @@ static struct platform_driver wm8350_hwmon_driver = {
.probe = wm8350_hwmon_probe,
.driver = {
.name = "wm8350-hwmon",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index c1e2cd4d85fe..47a275c6ece1 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -179,7 +179,6 @@ static struct platform_driver omap_hwspinlock_driver = {
.remove = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 401c33bcdb45..e93eabbd660f 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -175,7 +175,6 @@ static struct platform_driver u8500_hsem_driver = {
.remove = u8500_hsem_remove,
.driver = {
.name = "u8500_hsem",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index b51a402752c4..8c9e619f3026 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -110,6 +110,16 @@ config I2C_STUB
If you don't know what to do here, definitely say N.
+config I2C_SLAVE
+ bool "I2C slave support"
+
+if I2C_SLAVE
+
+config I2C_SLAVE_EEPROM
+ tristate "I2C eeprom slave driver"
+
+endif
+
config I2C_DEBUG_CORE
bool "I2C Core debugging messages"
help
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index 1722f50f2473..45095b3d16a9 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-$(CONFIG_I2C_MUX) += i2c-mux.o
obj-y += algos/ busses/ muxes/
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
+obj-$(CONFIG_I2C_SLAVE_EEPROM) += i2c-slave-eeprom.o
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
CFLAGS_i2c-core.o := -Wno-deprecated-declarations
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index b4d135cc2f39..c1351d9fb35b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -123,6 +123,7 @@ config I2C_I801
Wildcat Point-LP (PCH)
BayTrail (SOC)
Sunrise Point-H (PCH)
+ Sunrise Point-LP (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -523,6 +524,16 @@ config I2C_IBM_IIC
This driver can also be built as a module. If so, the module
will be called i2c-ibm_iic.
+config I2C_IMG
+ tristate "Imagination Technologies I2C SCB Controller"
+ depends on MIPS || METAG || COMPILE_TEST
+ help
+ Say Y here if you want to use the IMG I2C SCB controller,
+ available on the TZ1090 and other IMG SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-img-scb.
+
config I2C_IMX
tristate "IMX I2C interface"
depends on ARCH_MXC
@@ -553,6 +564,13 @@ config I2C_KEMPLD
This driver can also be built as a module. If so, the module
will be called i2c-kempld.
+config I2C_MESON
+ tristate "Amlogic Meson I2C controller"
+ depends on ARCH_MESON
+ help
+ If you say yes to this option, support will be included for the
+ I2C interface on the Amlogic Meson family of SoCs.
+
config I2C_MPC
tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
depends on PPC
@@ -702,7 +720,7 @@ config I2C_RIIC
config I2C_RK3X
tristate "Rockchip RK3xxx I2C adapter"
- depends on OF
+ depends on OF && COMMON_CLK
help
Say Y here to include support for the I2C adapter in Rockchip RK3xxx
SoCs.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index cdac7f15eab5..5e6c8223719e 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -50,9 +50,11 @@ obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
obj-$(CONFIG_I2C_HIX5HD2) += i2c-hix5hd2.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
+obj-$(CONFIG_I2C_IMG) += i2c-img-scb.o
obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
+obj-$(CONFIG_I2C_MESON) += i2c-meson.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index e05a672db3e5..636fd2efad88 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -31,10 +31,13 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/platform_data/dma-atmel.h>
+#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#define DEFAULT_TWI_CLK_HZ 100000 /* max 400 Kbits/s */
#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
+#define AUTOSUSPEND_TIMEOUT 2000
/* AT91 TWI register definitions */
#define AT91_TWI_CR 0x0000 /* Control Register */
@@ -72,7 +75,6 @@ struct at91_twi_pdata {
unsigned clk_max_div;
unsigned clk_offset;
bool has_unre_flag;
- bool has_dma_support;
struct at_dma_slave dma_slave;
};
@@ -481,6 +483,10 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ goto out;
+
/*
* The hardware can handle at most two messages concatenated by a
* repeated start via it's internal address feature.
@@ -488,18 +494,21 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
if (num > 2) {
dev_err(dev->dev,
"cannot handle more than two concatenated messages.\n");
- return 0;
+ ret = 0;
+ goto out;
} else if (num == 2) {
int internal_address = 0;
int i;
if (msg->flags & I2C_M_RD) {
dev_err(dev->dev, "first transfer must be write.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (msg->len > 3) {
dev_err(dev->dev, "first message size must be <= 3.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* 1st msg is put into the internal address, start with 2nd */
@@ -523,7 +532,12 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
ret = at91_do_twi_transfer(dev);
- return (ret < 0) ? ret : num;
+ ret = (ret < 0) ? ret : num;
+out:
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return ret;
}
static u32 at91_twi_func(struct i2c_adapter *adapter)
@@ -541,35 +555,30 @@ static struct at91_twi_pdata at91rm9200_config = {
.clk_max_div = 5,
.clk_offset = 3,
.has_unre_flag = true,
- .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9261_config = {
.clk_max_div = 5,
.clk_offset = 4,
.has_unre_flag = false,
- .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9260_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
- .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9g20_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
- .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9g10_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
- .has_dma_support = false,
};
static const struct platform_device_id at91_twi_devtypes[] = {
@@ -598,7 +607,6 @@ static struct at91_twi_pdata at91sam9x5_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
- .has_dma_support = true,
};
static const struct of_device_id atmel_twi_dt_ids[] = {
@@ -627,30 +635,11 @@ static const struct of_device_id atmel_twi_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
#endif
-static bool filter(struct dma_chan *chan, void *pdata)
-{
- struct at91_twi_pdata *sl_pdata = pdata;
- struct at_dma_slave *sl;
-
- if (!sl_pdata)
- return false;
-
- sl = &sl_pdata->dma_slave;
- if (sl && (sl->dma_dev == chan->device->dev)) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
{
int ret = 0;
- struct at91_twi_pdata *pdata = dev->pdata;
struct dma_slave_config slave_config;
struct at91_twi_dma *dma = &dev->dma;
- dma_cap_mask_t mask;
memset(&slave_config, 0, sizeof(slave_config));
slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
@@ -661,22 +650,17 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
slave_config.dst_maxburst = 1;
slave_config.device_fc = false;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- dma->chan_tx = dma_request_slave_channel_compat(mask, filter, pdata,
- dev->dev, "tx");
- if (!dma->chan_tx) {
- dev_err(dev->dev, "can't get a DMA channel for tx\n");
- ret = -EBUSY;
+ dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+ ret = PTR_ERR(dma->chan_tx);
+ dma->chan_tx = NULL;
goto error;
}
- dma->chan_rx = dma_request_slave_channel_compat(mask, filter, pdata,
- dev->dev, "rx");
- if (!dma->chan_rx) {
- dev_err(dev->dev, "can't get a DMA channel for rx\n");
- ret = -EBUSY;
+ dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
+ ret = PTR_ERR(dma->chan_rx);
+ dma->chan_rx = NULL;
goto error;
}
@@ -697,6 +681,7 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
sg_init_table(&dma->sg, 1);
dma->buf_mapped = false;
dma->xfer_in_progress = false;
+ dev->use_dma = true;
dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
@@ -704,7 +689,8 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
return ret;
error:
- dev_info(dev->dev, "can't use DMA\n");
+ if (ret != -EPROBE_DEFER)
+ dev_info(dev->dev, "can't use DMA, error %d\n", ret);
if (dma->chan_rx)
dma_release_channel(dma->chan_rx);
if (dma->chan_tx)
@@ -772,9 +758,10 @@ static int at91_twi_probe(struct platform_device *pdev)
}
clk_prepare_enable(dev->clk);
- if (dev->pdata->has_dma_support) {
- if (at91_twi_configure_dma(dev, phy_addr) == 0)
- dev->use_dma = true;
+ if (dev->dev->of_node) {
+ rc = at91_twi_configure_dma(dev, phy_addr);
+ if (rc == -EPROBE_DEFER)
+ return rc;
}
rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
@@ -795,11 +782,20 @@ static int at91_twi_probe(struct platform_device *pdev)
dev->adapter.timeout = AT91_I2C_TIMEOUT;
dev->adapter.dev.of_node = pdev->dev.of_node;
+ pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_enable(dev->dev);
+
rc = i2c_add_numbered_adapter(&dev->adapter);
if (rc) {
dev_err(dev->dev, "Adapter %s registration failed\n",
dev->adapter.name);
clk_disable_unprepare(dev->clk);
+
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
+
return rc;
}
@@ -814,6 +810,9 @@ static int at91_twi_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
clk_disable_unprepare(dev->clk);
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
+
return 0;
}
@@ -823,7 +822,9 @@ static int at91_twi_runtime_suspend(struct device *dev)
{
struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
- clk_disable(twi_dev->clk);
+ clk_disable_unprepare(twi_dev->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
return 0;
}
@@ -832,10 +833,38 @@ static int at91_twi_runtime_resume(struct device *dev)
{
struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
- return clk_enable(twi_dev->clk);
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(twi_dev->clk);
+}
+
+static int at91_twi_suspend_noirq(struct device *dev)
+{
+ if (!pm_runtime_status_suspended(dev))
+ at91_twi_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int at91_twi_resume_noirq(struct device *dev)
+{
+ int ret;
+
+ if (!pm_runtime_status_suspended(dev)) {
+ ret = at91_twi_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+
+ return 0;
}
static const struct dev_pm_ops at91_twi_pm = {
+ .suspend_noirq = at91_twi_suspend_noirq,
+ .resume_noirq = at91_twi_resume_noirq,
.runtime_suspend = at91_twi_runtime_suspend,
.runtime_resume = at91_twi_runtime_resume,
};
@@ -851,7 +880,6 @@ static struct platform_driver at91_twi_driver = {
.id_table = at91_twi_devtypes,
.driver = {
.name = "at91_i2c",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_twi_dt_ids),
.pm = at91_twi_pm_ops,
},
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 6f8c0756e350..a6aae84e5706 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -413,7 +413,6 @@ static const struct dev_pm_ops i2c_au1550_pmops = {
static struct platform_driver au1xpsc_smbus_driver = {
.driver = {
.name = "au1xpsc_smbus",
- .owner = THIS_MODULE,
.pm = AU1XPSC_SMBUS_PMOPS,
},
.probe = i2c_au1550_probe,
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 18a74a6751a9..2c9d9b1c8e64 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -895,7 +895,6 @@ MODULE_DEVICE_TABLE(of, bcm_kona_i2c_of_match);
static struct platform_driver bcm_kona_i2c_driver = {
.driver = {
.name = "bcm-kona-i2c",
- .owner = THIS_MODULE,
.of_match_table = bcm_kona_i2c_of_match,
},
.probe = bcm_kona_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 4b8ecd0b3661..5d6feb937b9d 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -313,7 +313,6 @@ static struct platform_driver bcm2835_i2c_driver = {
.remove = bcm2835_i2c_remove,
.driver = {
.name = "i2c-bcm2835",
- .owner = THIS_MODULE,
.of_match_table = bcm2835_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 067c1615e968..af162b4c7a6d 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -717,7 +717,6 @@ static struct platform_driver i2c_bfin_twi_driver = {
.remove = i2c_bfin_twi_remove,
.driver = {
.name = "i2c-bfin-twi",
- .owner = THIS_MODULE,
.pm = I2C_BFIN_TWI_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index c604f4c3ac0d..626f74ecd4be 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -901,7 +901,6 @@ MODULE_DEVICE_TABLE(of, cdns_i2c_of_match);
static struct platform_driver cdns_i2c_drv = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = cdns_i2c_of_match,
.pm = &cdns_i2c_dev_pm_ops,
},
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index bdf040fd8675..b4f91e48948a 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -287,7 +287,6 @@ static struct platform_driver cbus_i2c_driver = {
.probe = cbus_i2c_probe,
.remove = cbus_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "i2c-cbus-gpio",
.of_match_table = of_match_ptr(i2c_cbus_dt_ids),
},
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 5bdbc71698d0..2d466538b2e2 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -716,7 +716,6 @@ static struct platform_driver cpm_i2c_driver = {
.remove = cpm_i2c_remove,
.driver = {
.name = "fsl-i2c-cpm",
- .owner = THIS_MODULE,
.of_match_table = cpm_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 01f0cd87a4a5..6dc7ff5d3d9a 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -368,8 +368,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
flag |= DAVINCI_I2C_MDR_STP;
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
- r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
- dev->adapter.timeout);
+ r = wait_for_completion_timeout(&dev->cmd_complete, dev->adapter.timeout);
if (r == 0) {
dev_err(dev->dev, "controller timed out\n");
davinci_i2c_recover_bus(dev);
@@ -380,7 +379,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
if (dev->buf_len) {
/* This should be 0 if all bytes were transferred
* or dev->cmd_err denotes an error.
- * A signal may have aborted the transfer.
*/
if (r >= 0) {
dev_err(dev->dev, "abnormal termination buf_len=%i\n",
@@ -634,13 +632,17 @@ static int davinci_i2c_probe(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev;
struct i2c_adapter *adap;
- struct resource *mem, *irq;
- int r;
-
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no irq resource?\n");
- return -ENODEV;
+ struct resource *mem;
+ int r, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ if (!irq)
+ irq = -ENXIO;
+ if (irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "can't get irq resource ret=%d\n", irq);
+ return irq;
}
dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_i2c_dev),
@@ -655,7 +657,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
init_completion(&dev->xfr_complete);
#endif
dev->dev = &pdev->dev;
- dev->irq = irq->start;
+ dev->irq = irq;
dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev);
@@ -787,7 +789,6 @@ static struct platform_driver davinci_i2c_driver = {
.remove = davinci_i2c_remove,
.driver = {
.name = "i2c_davinci",
- .owner = THIS_MODULE,
.pm = davinci_i2c_pm_ops,
.of_match_table = davinci_i2c_of_match,
},
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 373dd4d47765..2b463c313e4e 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -327,7 +327,6 @@ static struct platform_driver dw_i2c_driver = {
.remove = dw_i2c_remove,
.driver = {
.name = "i2c_designware",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(dw_i2c_of_match),
.acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
.pm = &dw_i2c_dev_pm_ops,
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index 10b8323b08d4..8eff62738877 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -473,7 +473,6 @@ static struct platform_driver efm32_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = efm32_i2c_dt_ids,
},
};
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 81e6263cd7da..b29c7500461a 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -457,7 +457,7 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
goto stop;
} else if (int_status & HSI2C_INT_TIMEOUT) {
dev_dbg(i2c->dev, "Accessing device timed out\n");
- i2c->state = -EAGAIN;
+ i2c->state = -ETIMEDOUT;
goto stop;
}
} else if (int_status & HSI2C_INT_I2C) {
@@ -476,7 +476,7 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
goto stop;
} else if (trans_status & HSI2C_TIMEOUT_AUTO) {
dev_dbg(i2c->dev, "Accessing device timed out\n");
- i2c->state = -EAGAIN;
+ i2c->state = -ETIMEDOUT;
goto stop;
} else if (trans_status & HSI2C_TRANS_DONE) {
i2c->trans_done = 1;
@@ -861,7 +861,6 @@ static struct platform_driver exynos5_i2c_driver = {
.probe = exynos5_i2c_probe,
.remove = exynos5_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "exynos5-hsi2c",
.pm = &exynos5_i2c_dev_pm_ops,
.of_match_table = exynos5_i2c_match,
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 933f1e453e41..34cfc0ebdcb9 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -260,7 +260,6 @@ MODULE_DEVICE_TABLE(of, i2c_gpio_dt_ids);
static struct platform_driver i2c_gpio_driver = {
.driver = {
.name = "i2c-gpio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(i2c_gpio_dt_ids),
},
.probe = i2c_gpio_probe,
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 512fcfabc18e..56dc69e7349f 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -456,7 +456,6 @@ static int highlander_i2c_remove(struct platform_device *pdev)
static struct platform_driver highlander_i2c_driver = {
.driver = {
.name = "i2c-highlander",
- .owner = THIS_MODULE,
},
.probe = highlander_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 6ab4f1cb21f3..8fafb254e42a 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -2,7 +2,7 @@
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>,
Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
<mdsxyz123@yahoo.com>
- Copyright (C) 2007 - 2012 Jean Delvare <jdelvare@suse.de>
+ Copyright (C) 2007 - 2014 Jean Delvare <jdelvare@suse.de>
Copyright (C) 2010 Intel Corporation,
David Woodhouse <dwmw2@infradead.org>
@@ -59,6 +59,7 @@
* Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
* BayTrail (SOC) 0x0f12 32 hard yes yes yes
* Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes
+ * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -109,12 +110,16 @@
/* PCI Address Constants */
#define SMBBAR 4
+#define SMBPCICTL 0x004
#define SMBPCISTS 0x006
#define SMBHSTCFG 0x040
/* Host status bits for SMBPCISTS */
#define SMBPCISTS_INTS 0x08
+/* Control bits for SMBPCICTL */
+#define SMBPCICTL_INTDIS 0x0400
+
/* Host configuration bits for SMBHSTCFG */
#define SMBHSTCFG_HST_EN 1
#define SMBHSTCFG_SMB_SMI_EN 2
@@ -182,6 +187,7 @@
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23
struct i801_mux_config {
char *gpio_chip;
@@ -371,6 +377,7 @@ static int i801_transaction(struct i801_priv *priv, int xact)
{
int status;
int result;
+ const struct i2c_adapter *adap = &priv->adapter;
result = i801_check_pre(priv);
if (result < 0)
@@ -379,7 +386,14 @@ static int i801_transaction(struct i801_priv *priv, int xact)
if (priv->features & FEATURE_IRQ) {
outb_p(xact | SMBHSTCNT_INTREN | SMBHSTCNT_START,
SMBHSTCNT(priv));
- wait_event(priv->waitq, (status = priv->status));
+ result = wait_event_timeout(priv->waitq,
+ (status = priv->status),
+ adap->timeout);
+ if (!result) {
+ status = -ETIMEDOUT;
+ dev_warn(&priv->pci_dev->dev,
+ "Timeout waiting for interrupt!\n");
+ }
priv->status = 0;
return i801_check_post(priv, status);
}
@@ -493,9 +507,6 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
return IRQ_NONE;
status = inb_p(SMBHSTSTS(priv));
- if (status != 0x42)
- dev_dbg(&priv->pci_dev->dev, "irq: status = %02x\n", status);
-
if (status & SMBHSTSTS_BYTE_DONE)
i801_isr_byte_done(priv);
@@ -527,6 +538,7 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
int smbcmd;
int status;
int result;
+ const struct i2c_adapter *adap = &priv->adapter;
result = i801_check_pre(priv);
if (result < 0)
@@ -555,7 +567,14 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
priv->data = &data->block[1];
outb_p(priv->cmd | SMBHSTCNT_START, SMBHSTCNT(priv));
- wait_event(priv->waitq, (status = priv->status));
+ result = wait_event_timeout(priv->waitq,
+ (status = priv->status),
+ adap->timeout);
+ if (!result) {
+ status = -ETIMEDOUT;
+ dev_warn(&priv->pci_dev->dev,
+ "Timeout waiting for interrupt!\n");
+ }
priv->status = 0;
return i801_check_post(priv, status);
}
@@ -829,6 +848,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
{ 0, }
};
@@ -1212,6 +1232,25 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ /* Default timeout in interrupt mode: 200 ms */
+ priv->adapter.timeout = HZ / 5;
+
+ if (priv->features & FEATURE_IRQ) {
+ u16 pcictl, pcists;
+
+ /* Complain if an interrupt is already pending */
+ pci_read_config_word(priv->pci_dev, SMBPCISTS, &pcists);
+ if (pcists & SMBPCISTS_INTS)
+ dev_warn(&dev->dev, "An interrupt is pending!\n");
+
+ /* Check if interrupts have been disabled */
+ pci_read_config_word(priv->pci_dev, SMBPCICTL, &pcictl);
+ if (pcictl & SMBPCICTL_INTDIS) {
+ dev_info(&dev->dev, "Interrupts are disabled\n");
+ priv->features &= ~FEATURE_IRQ;
+ }
+ }
+
if (priv->features & FEATURE_IRQ) {
init_waitqueue_head(&priv->waitq);
@@ -1220,10 +1259,11 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err) {
dev_err(&dev->dev, "Failed to allocate irq %d: %d\n",
dev->irq, err);
- goto exit_release;
+ priv->features &= ~FEATURE_IRQ;
}
- dev_info(&dev->dev, "SMBus using PCI Interrupt\n");
}
+ dev_info(&dev->dev, "SMBus using %s\n",
+ priv->features & FEATURE_IRQ ? "PCI interrupt" : "polling");
/* set up the sysfs linkage to our parent device */
priv->adapter.dev.parent = &dev->dev;
@@ -1250,7 +1290,6 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
exit_free_irq:
if (priv->features & FEATURE_IRQ)
free_irq(dev->irq, priv);
-exit_release:
pci_release_region(dev, SMBBAR);
exit:
kfree(priv);
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 274312c96b12..722f839cfa3c 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -802,7 +802,6 @@ static const struct of_device_id ibm_iic_match[] = {
static struct platform_driver ibm_iic_driver = {
.driver = {
.name = "ibm-iic",
- .owner = THIS_MODULE,
.of_match_table = ibm_iic_match,
},
.probe = iic_probe,
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
new file mode 100644
index 000000000000..0fcc1694c607
--- /dev/null
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -0,0 +1,1412 @@
+/*
+ * I2C adapter for the IMG Serial Control Bus (SCB) IP block.
+ *
+ * Copyright (C) 2009, 2010, 2012, 2014 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * There are three ways that this I2C controller can be driven:
+ *
+ * - Raw control of the SDA and SCK signals.
+ *
+ * This corresponds to MODE_RAW, which takes control of the signals
+ * directly for a certain number of clock cycles (the INT_TIMING
+ * interrupt can be used for timing).
+ *
+ * - Atomic commands. A low level I2C symbol (such as generate
+ * start/stop/ack/nack bit, generate byte, receive byte, and receive
+ * ACK) is given to the hardware, with detection of completion by bits
+ * in the LINESTAT register.
+ *
+ * This mode of operation is used by MODE_ATOMIC, which uses an I2C
+ * state machine in the interrupt handler to compose/react to I2C
+ * transactions using atomic mode commands, and also by MODE_SEQUENCE,
+ * which emits a simple fixed sequence of atomic mode commands.
+ *
+ * Due to software control, the use of atomic commands usually results
+ * in suboptimal use of the bus, with gaps between the I2C symbols while
+ * the driver decides what to do next.
+ *
+ * - Automatic mode. A bus address, and whether to read/write is
+ * specified, and the hardware takes care of the I2C state machine,
+ * using a FIFO to send/receive bytes of data to an I2C slave. The
+ * driver just has to keep the FIFO drained or filled in response to the
+ * appropriate FIFO interrupts.
+ *
+ * This corresponds to MODE_AUTOMATIC, which manages the FIFOs and deals
+ * with control of repeated start bits between I2C messages.
+ *
+ * Use of automatic mode and the FIFO can make much more efficient use
+ * of the bus compared to individual atomic commands, with potentially
+ * no wasted time between I2C symbols or I2C messages.
+ *
+ * In most cases MODE_AUTOMATIC is used, however if any of the messages in
+ * a transaction are zero byte writes (e.g. used by i2cdetect for probing
+ * the bus), MODE_ATOMIC must be used since automatic mode is normally
+ * started by the writing of data into the FIFO.
+ *
+ * The other modes are used in specific circumstances where MODE_ATOMIC and
+ * MODE_AUTOMATIC aren't appropriate. MODE_RAW is used to implement a bus
+ * recovery routine. MODE_SEQUENCE is used to reset the bus and make sure
+ * it is in a sane state.
+ *
+ * Notice that the driver implements a timer-based timeout mechanism.
+ * The reason for this mechanism is to reduce the number of interrupts
+ * received in automatic mode.
+ *
+ * The driver would get a slave event and transaction done interrupts for
+ * each atomic mode command that gets completed. However, these events are
+ * not needed in automatic mode, becase those atomic mode commands are
+ * managed automatically by the hardware.
+ *
+ * In practice, normal I2C transactions will be complete well before you
+ * get the timer interrupt, as the timer is re-scheduled during FIFO
+ * maintenance and disabled after the transaction is complete.
+ *
+ * In this way normal automatic mode operation isn't impacted by
+ * unnecessary interrupts, but the exceptional abort condition can still be
+ * detected (with a slight delay).
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+/* Register offsets */
+
+#define SCB_STATUS_REG 0x00
+#define SCB_OVERRIDE_REG 0x04
+#define SCB_READ_ADDR_REG 0x08
+#define SCB_READ_COUNT_REG 0x0c
+#define SCB_WRITE_ADDR_REG 0x10
+#define SCB_READ_DATA_REG 0x14
+#define SCB_WRITE_DATA_REG 0x18
+#define SCB_FIFO_STATUS_REG 0x1c
+#define SCB_CONTROL_SOFT_RESET 0x1f
+#define SCB_CLK_SET_REG 0x3c
+#define SCB_INT_STATUS_REG 0x40
+#define SCB_INT_CLEAR_REG 0x44
+#define SCB_INT_MASK_REG 0x48
+#define SCB_CONTROL_REG 0x4c
+#define SCB_TIME_TPL_REG 0x50
+#define SCB_TIME_TPH_REG 0x54
+#define SCB_TIME_TP2S_REG 0x58
+#define SCB_TIME_TBI_REG 0x60
+#define SCB_TIME_TSL_REG 0x64
+#define SCB_TIME_TDL_REG 0x68
+#define SCB_TIME_TSDL_REG 0x6c
+#define SCB_TIME_TSDH_REG 0x70
+#define SCB_READ_XADDR_REG 0x74
+#define SCB_WRITE_XADDR_REG 0x78
+#define SCB_WRITE_COUNT_REG 0x7c
+#define SCB_CORE_REV_REG 0x80
+#define SCB_TIME_TCKH_REG 0x84
+#define SCB_TIME_TCKL_REG 0x88
+#define SCB_FIFO_FLUSH_REG 0x8c
+#define SCB_READ_FIFO_REG 0x94
+#define SCB_CLEAR_REG 0x98
+
+/* SCB_CONTROL_REG bits */
+
+#define SCB_CONTROL_CLK_ENABLE 0x1e0
+#define SCB_CONTROL_TRANSACTION_HALT 0x200
+
+#define FIFO_READ_FULL BIT(0)
+#define FIFO_READ_EMPTY BIT(1)
+#define FIFO_WRITE_FULL BIT(2)
+#define FIFO_WRITE_EMPTY BIT(3)
+
+/* SCB_CLK_SET_REG bits */
+#define SCB_FILT_DISABLE BIT(31)
+#define SCB_FILT_BYPASS BIT(30)
+#define SCB_FILT_INC_MASK 0x7f
+#define SCB_FILT_INC_SHIFT 16
+#define SCB_INC_MASK 0x7f
+#define SCB_INC_SHIFT 8
+
+/* SCB_INT_*_REG bits */
+
+#define INT_BUS_INACTIVE BIT(0)
+#define INT_UNEXPECTED_START BIT(1)
+#define INT_SCLK_LOW_TIMEOUT BIT(2)
+#define INT_SDAT_LOW_TIMEOUT BIT(3)
+#define INT_WRITE_ACK_ERR BIT(4)
+#define INT_ADDR_ACK_ERR BIT(5)
+#define INT_FIFO_FULL BIT(9)
+#define INT_FIFO_FILLING BIT(10)
+#define INT_FIFO_EMPTY BIT(11)
+#define INT_FIFO_EMPTYING BIT(12)
+#define INT_TRANSACTION_DONE BIT(15)
+#define INT_SLAVE_EVENT BIT(16)
+#define INT_TIMING BIT(18)
+
+#define INT_FIFO_FULL_FILLING (INT_FIFO_FULL | INT_FIFO_FILLING)
+#define INT_FIFO_EMPTY_EMPTYING (INT_FIFO_EMPTY | INT_FIFO_EMPTYING)
+
+/* Level interrupts need clearing after handling instead of before */
+#define INT_LEVEL 0x01e00
+
+/* Don't allow any interrupts while the clock may be off */
+#define INT_ENABLE_MASK_INACTIVE 0x00000
+
+/* Interrupt masks for the different driver modes */
+
+#define INT_ENABLE_MASK_RAW INT_TIMING
+
+#define INT_ENABLE_MASK_ATOMIC (INT_TRANSACTION_DONE | \
+ INT_SLAVE_EVENT | \
+ INT_ADDR_ACK_ERR | \
+ INT_WRITE_ACK_ERR)
+
+#define INT_ENABLE_MASK_AUTOMATIC (INT_SCLK_LOW_TIMEOUT | \
+ INT_ADDR_ACK_ERR | \
+ INT_WRITE_ACK_ERR | \
+ INT_FIFO_FULL | \
+ INT_FIFO_FILLING | \
+ INT_FIFO_EMPTY | \
+ INT_FIFO_EMPTYING)
+
+#define INT_ENABLE_MASK_WAITSTOP (INT_SLAVE_EVENT | \
+ INT_ADDR_ACK_ERR | \
+ INT_WRITE_ACK_ERR)
+
+/* SCB_STATUS_REG fields */
+
+#define LINESTAT_SCLK_LINE_STATUS BIT(0)
+#define LINESTAT_SCLK_EN BIT(1)
+#define LINESTAT_SDAT_LINE_STATUS BIT(2)
+#define LINESTAT_SDAT_EN BIT(3)
+#define LINESTAT_DET_START_STATUS BIT(4)
+#define LINESTAT_DET_STOP_STATUS BIT(5)
+#define LINESTAT_DET_ACK_STATUS BIT(6)
+#define LINESTAT_DET_NACK_STATUS BIT(7)
+#define LINESTAT_BUS_IDLE BIT(8)
+#define LINESTAT_T_DONE_STATUS BIT(9)
+#define LINESTAT_SCLK_OUT_STATUS BIT(10)
+#define LINESTAT_SDAT_OUT_STATUS BIT(11)
+#define LINESTAT_GEN_LINE_MASK_STATUS BIT(12)
+#define LINESTAT_START_BIT_DET BIT(13)
+#define LINESTAT_STOP_BIT_DET BIT(14)
+#define LINESTAT_ACK_DET BIT(15)
+#define LINESTAT_NACK_DET BIT(16)
+#define LINESTAT_INPUT_HELD_V BIT(17)
+#define LINESTAT_ABORT_DET BIT(18)
+#define LINESTAT_ACK_OR_NACK_DET (LINESTAT_ACK_DET | LINESTAT_NACK_DET)
+#define LINESTAT_INPUT_DATA 0xff000000
+#define LINESTAT_INPUT_DATA_SHIFT 24
+
+#define LINESTAT_CLEAR_SHIFT 13
+#define LINESTAT_LATCHED (0x3f << LINESTAT_CLEAR_SHIFT)
+
+/* SCB_OVERRIDE_REG fields */
+
+#define OVERRIDE_SCLK_OVR BIT(0)
+#define OVERRIDE_SCLKEN_OVR BIT(1)
+#define OVERRIDE_SDAT_OVR BIT(2)
+#define OVERRIDE_SDATEN_OVR BIT(3)
+#define OVERRIDE_MASTER BIT(9)
+#define OVERRIDE_LINE_OVR_EN BIT(10)
+#define OVERRIDE_DIRECT BIT(11)
+#define OVERRIDE_CMD_SHIFT 4
+#define OVERRIDE_CMD_MASK 0x1f
+#define OVERRIDE_DATA_SHIFT 24
+
+#define OVERRIDE_SCLK_DOWN (OVERRIDE_LINE_OVR_EN | \
+ OVERRIDE_SCLKEN_OVR)
+#define OVERRIDE_SCLK_UP (OVERRIDE_LINE_OVR_EN | \
+ OVERRIDE_SCLKEN_OVR | \
+ OVERRIDE_SCLK_OVR)
+#define OVERRIDE_SDAT_DOWN (OVERRIDE_LINE_OVR_EN | \
+ OVERRIDE_SDATEN_OVR)
+#define OVERRIDE_SDAT_UP (OVERRIDE_LINE_OVR_EN | \
+ OVERRIDE_SDATEN_OVR | \
+ OVERRIDE_SDAT_OVR)
+
+/* OVERRIDE_CMD values */
+
+#define CMD_PAUSE 0x00
+#define CMD_GEN_DATA 0x01
+#define CMD_GEN_START 0x02
+#define CMD_GEN_STOP 0x03
+#define CMD_GEN_ACK 0x04
+#define CMD_GEN_NACK 0x05
+#define CMD_RET_DATA 0x08
+#define CMD_RET_ACK 0x09
+
+/* Fixed timing values */
+
+#define TIMEOUT_TBI 0x0
+#define TIMEOUT_TSL 0xffff
+#define TIMEOUT_TDL 0x0
+
+/* Transaction timeout */
+
+#define IMG_I2C_TIMEOUT (msecs_to_jiffies(1000))
+
+/*
+ * Worst incs are 1 (innacurate) and 16*256 (irregular).
+ * So a sensible inc is the logarithmic mean: 64 (2^6), which is
+ * in the middle of the valid range (0-127).
+ */
+#define SCB_OPT_INC 64
+
+/* Setup the clock enable filtering for 25 ns */
+#define SCB_FILT_GLITCH 25
+
+/*
+ * Bits to return from interrupt handler functions for different modes.
+ * This delays completion until we've finished with the registers, so that the
+ * function waiting for completion can safely disable the clock to save power.
+ */
+#define ISR_COMPLETE_M BIT(31)
+#define ISR_FATAL_M BIT(30)
+#define ISR_WAITSTOP BIT(29)
+#define ISR_STATUS_M 0x0000ffff /* contains +ve errno */
+#define ISR_COMPLETE(err) (ISR_COMPLETE_M | (ISR_STATUS_M & (err)))
+#define ISR_FATAL(err) (ISR_COMPLETE(err) | ISR_FATAL_M)
+
+#define REL_SOC_IP_SCB_2_2_1 0x00020201
+
+enum img_i2c_mode {
+ MODE_INACTIVE,
+ MODE_RAW,
+ MODE_ATOMIC,
+ MODE_AUTOMATIC,
+ MODE_SEQUENCE,
+ MODE_FATAL,
+ MODE_WAITSTOP,
+ MODE_SUSPEND,
+};
+
+/* Timing parameters for i2c modes (in ns) */
+struct img_i2c_timings {
+ const char *name;
+ unsigned int max_bitrate;
+ unsigned int tckh, tckl, tsdh, tsdl;
+ unsigned int tp2s, tpl, tph;
+};
+
+/* The timings array must be ordered from slower to faster */
+static struct img_i2c_timings timings[] = {
+ /* Standard mode */
+ {
+ .name = "standard",
+ .max_bitrate = 100000,
+ .tckh = 4000,
+ .tckl = 4700,
+ .tsdh = 4700,
+ .tsdl = 8700,
+ .tp2s = 4700,
+ .tpl = 4700,
+ .tph = 4000,
+ },
+ /* Fast mode */
+ {
+ .name = "fast",
+ .max_bitrate = 400000,
+ .tckh = 600,
+ .tckl = 1300,
+ .tsdh = 600,
+ .tsdl = 1200,
+ .tp2s = 1300,
+ .tpl = 600,
+ .tph = 600,
+ },
+};
+
+/* Reset dance */
+static u8 img_i2c_reset_seq[] = { CMD_GEN_START,
+ CMD_GEN_DATA, 0xff,
+ CMD_RET_ACK,
+ CMD_GEN_START,
+ CMD_GEN_STOP,
+ 0 };
+/* Just issue a stop (after an abort condition) */
+static u8 img_i2c_stop_seq[] = { CMD_GEN_STOP,
+ 0 };
+
+/* We're interested in different interrupts depending on the mode */
+static unsigned int img_i2c_int_enable_by_mode[] = {
+ [MODE_INACTIVE] = INT_ENABLE_MASK_INACTIVE,
+ [MODE_RAW] = INT_ENABLE_MASK_RAW,
+ [MODE_ATOMIC] = INT_ENABLE_MASK_ATOMIC,
+ [MODE_AUTOMATIC] = INT_ENABLE_MASK_AUTOMATIC,
+ [MODE_SEQUENCE] = INT_ENABLE_MASK_ATOMIC,
+ [MODE_FATAL] = 0,
+ [MODE_WAITSTOP] = INT_ENABLE_MASK_WAITSTOP,
+ [MODE_SUSPEND] = 0,
+};
+
+/* Atomic command names */
+static const char * const img_i2c_atomic_cmd_names[] = {
+ [CMD_PAUSE] = "PAUSE",
+ [CMD_GEN_DATA] = "GEN_DATA",
+ [CMD_GEN_START] = "GEN_START",
+ [CMD_GEN_STOP] = "GEN_STOP",
+ [CMD_GEN_ACK] = "GEN_ACK",
+ [CMD_GEN_NACK] = "GEN_NACK",
+ [CMD_RET_DATA] = "RET_DATA",
+ [CMD_RET_ACK] = "RET_ACK",
+};
+
+struct img_i2c {
+ struct i2c_adapter adap;
+
+ void __iomem *base;
+
+ /*
+ * The scb core clock is used to get the input frequency, and to disable
+ * it after every set of transactions to save some power.
+ */
+ struct clk *scb_clk, *sys_clk;
+ unsigned int bitrate;
+ bool need_wr_rd_fence;
+
+ /* state */
+ struct completion msg_complete;
+ spinlock_t lock; /* lock before doing anything with the state */
+ struct i2c_msg msg;
+
+ /* After the last transaction, wait for a stop bit */
+ bool last_msg;
+ int msg_status;
+
+ enum img_i2c_mode mode;
+ u32 int_enable; /* depends on mode */
+ u32 line_status; /* line status over command */
+
+ /*
+ * To avoid slave event interrupts in automatic mode, use a timer to
+ * poll the abort condition if we don't get an interrupt for too long.
+ */
+ struct timer_list check_timer;
+ bool t_halt;
+
+ /* atomic mode state */
+ bool at_t_done;
+ bool at_slave_event;
+ int at_cur_cmd;
+ u8 at_cur_data;
+
+ /* Sequence: either reset or stop. See img_i2c_sequence. */
+ u8 *seq;
+
+ /* raw mode */
+ unsigned int raw_timeout;
+};
+
+static void img_i2c_writel(struct img_i2c *i2c, u32 offset, u32 value)
+{
+ writel(value, i2c->base + offset);
+}
+
+static u32 img_i2c_readl(struct img_i2c *i2c, u32 offset)
+{
+ return readl(i2c->base + offset);
+}
+
+/*
+ * The code to read from the master read fifo, and write to the master
+ * write fifo, checks a bit in an SCB register before every byte to
+ * ensure that the fifo is not full (write fifo) or empty (read fifo).
+ * Due to clock domain crossing inside the SCB block the updated value
+ * of this bit is only visible after 2 cycles.
+ *
+ * The scb_wr_rd_fence() function does 2 dummy writes (to the read-only
+ * revision register), and it's called after reading from or writing to the
+ * fifos to ensure that subsequent reads of the fifo status bits do not read
+ * stale values.
+ */
+static void img_i2c_wr_rd_fence(struct img_i2c *i2c)
+{
+ if (i2c->need_wr_rd_fence) {
+ img_i2c_writel(i2c, SCB_CORE_REV_REG, 0);
+ img_i2c_writel(i2c, SCB_CORE_REV_REG, 0);
+ }
+}
+
+static void img_i2c_switch_mode(struct img_i2c *i2c, enum img_i2c_mode mode)
+{
+ i2c->mode = mode;
+ i2c->int_enable = img_i2c_int_enable_by_mode[mode];
+ i2c->line_status = 0;
+}
+
+static void img_i2c_raw_op(struct img_i2c *i2c)
+{
+ i2c->raw_timeout = 0;
+ img_i2c_writel(i2c, SCB_OVERRIDE_REG,
+ OVERRIDE_SCLKEN_OVR |
+ OVERRIDE_SDATEN_OVR |
+ OVERRIDE_MASTER |
+ OVERRIDE_LINE_OVR_EN |
+ OVERRIDE_DIRECT |
+ ((i2c->at_cur_cmd & OVERRIDE_CMD_MASK) << OVERRIDE_CMD_SHIFT) |
+ (i2c->at_cur_data << OVERRIDE_DATA_SHIFT));
+}
+
+static const char *img_i2c_atomic_op_name(unsigned int cmd)
+{
+ if (unlikely(cmd >= ARRAY_SIZE(img_i2c_atomic_cmd_names)))
+ return "UNKNOWN";
+ return img_i2c_atomic_cmd_names[cmd];
+}
+
+/* Send a single atomic mode command to the hardware */
+static void img_i2c_atomic_op(struct img_i2c *i2c, int cmd, u8 data)
+{
+ i2c->at_cur_cmd = cmd;
+ i2c->at_cur_data = data;
+
+ /* work around lack of data setup time when generating data */
+ if (cmd == CMD_GEN_DATA && i2c->mode == MODE_ATOMIC) {
+ u32 line_status = img_i2c_readl(i2c, SCB_STATUS_REG);
+
+ if (line_status & LINESTAT_SDAT_LINE_STATUS && !(data & 0x80)) {
+ /* hold the data line down for a moment */
+ img_i2c_switch_mode(i2c, MODE_RAW);
+ img_i2c_raw_op(i2c);
+ return;
+ }
+ }
+
+ dev_dbg(i2c->adap.dev.parent,
+ "atomic cmd=%s (%d) data=%#x\n",
+ img_i2c_atomic_op_name(cmd), cmd, data);
+ i2c->at_t_done = (cmd == CMD_RET_DATA || cmd == CMD_RET_ACK);
+ i2c->at_slave_event = false;
+ i2c->line_status = 0;
+
+ img_i2c_writel(i2c, SCB_OVERRIDE_REG,
+ ((cmd & OVERRIDE_CMD_MASK) << OVERRIDE_CMD_SHIFT) |
+ OVERRIDE_MASTER |
+ OVERRIDE_DIRECT |
+ (data << OVERRIDE_DATA_SHIFT));
+}
+
+/* Start a transaction in atomic mode */
+static void img_i2c_atomic_start(struct img_i2c *i2c)
+{
+ img_i2c_switch_mode(i2c, MODE_ATOMIC);
+ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable);
+ img_i2c_atomic_op(i2c, CMD_GEN_START, 0x00);
+}
+
+static void img_i2c_soft_reset(struct img_i2c *i2c)
+{
+ i2c->t_halt = false;
+ img_i2c_writel(i2c, SCB_CONTROL_REG, 0);
+ img_i2c_writel(i2c, SCB_CONTROL_REG,
+ SCB_CONTROL_CLK_ENABLE | SCB_CONTROL_SOFT_RESET);
+}
+
+/* enable or release transaction halt for control of repeated starts */
+static void img_i2c_transaction_halt(struct img_i2c *i2c, bool t_halt)
+{
+ u32 val;
+
+ if (i2c->t_halt == t_halt)
+ return;
+ i2c->t_halt = t_halt;
+ val = img_i2c_readl(i2c, SCB_CONTROL_REG);
+ if (t_halt)
+ val |= SCB_CONTROL_TRANSACTION_HALT;
+ else
+ val &= ~SCB_CONTROL_TRANSACTION_HALT;
+ img_i2c_writel(i2c, SCB_CONTROL_REG, val);
+}
+
+/* Drain data from the FIFO into the buffer (automatic mode) */
+static void img_i2c_read_fifo(struct img_i2c *i2c)
+{
+ while (i2c->msg.len) {
+ u32 fifo_status;
+ u8 data;
+
+ fifo_status = img_i2c_readl(i2c, SCB_FIFO_STATUS_REG);
+ if (fifo_status & FIFO_READ_EMPTY)
+ break;
+
+ data = img_i2c_readl(i2c, SCB_READ_DATA_REG);
+ *i2c->msg.buf = data;
+
+ img_i2c_writel(i2c, SCB_READ_FIFO_REG, 0xff);
+ img_i2c_wr_rd_fence(i2c);
+ i2c->msg.len--;
+ i2c->msg.buf++;
+ }
+}
+
+/* Fill the FIFO with data from the buffer (automatic mode) */
+static void img_i2c_write_fifo(struct img_i2c *i2c)
+{
+ while (i2c->msg.len) {
+ u32 fifo_status;
+
+ fifo_status = img_i2c_readl(i2c, SCB_FIFO_STATUS_REG);
+ if (fifo_status & FIFO_WRITE_FULL)
+ break;
+
+ img_i2c_writel(i2c, SCB_WRITE_DATA_REG, *i2c->msg.buf);
+ img_i2c_wr_rd_fence(i2c);
+ i2c->msg.len--;
+ i2c->msg.buf++;
+ }
+
+ /* Disable fifo emptying interrupt if nothing more to write */
+ if (!i2c->msg.len)
+ i2c->int_enable &= ~INT_FIFO_EMPTYING;
+}
+
+/* Start a read transaction in automatic mode */
+static void img_i2c_read(struct img_i2c *i2c)
+{
+ img_i2c_switch_mode(i2c, MODE_AUTOMATIC);
+ if (!i2c->last_msg)
+ i2c->int_enable |= INT_SLAVE_EVENT;
+
+ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable);
+ img_i2c_writel(i2c, SCB_READ_ADDR_REG, i2c->msg.addr);
+ img_i2c_writel(i2c, SCB_READ_COUNT_REG, i2c->msg.len);
+
+ img_i2c_transaction_halt(i2c, false);
+ mod_timer(&i2c->check_timer, jiffies + msecs_to_jiffies(1));
+}
+
+/* Start a write transaction in automatic mode */
+static void img_i2c_write(struct img_i2c *i2c)
+{
+ img_i2c_switch_mode(i2c, MODE_AUTOMATIC);
+ if (!i2c->last_msg)
+ i2c->int_enable |= INT_SLAVE_EVENT;
+
+ img_i2c_writel(i2c, SCB_WRITE_ADDR_REG, i2c->msg.addr);
+ img_i2c_writel(i2c, SCB_WRITE_COUNT_REG, i2c->msg.len);
+
+ img_i2c_transaction_halt(i2c, false);
+ mod_timer(&i2c->check_timer, jiffies + msecs_to_jiffies(1));
+ img_i2c_write_fifo(i2c);
+
+ /* img_i2c_write_fifo() may modify int_enable */
+ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable);
+}
+
+/*
+ * Indicate that the transaction is complete. This is called from the
+ * ISR to wake up the waiting thread, after which the ISR must not
+ * access any more SCB registers.
+ */
+static void img_i2c_complete_transaction(struct img_i2c *i2c, int status)
+{
+ img_i2c_switch_mode(i2c, MODE_INACTIVE);
+ if (status) {
+ i2c->msg_status = status;
+ img_i2c_transaction_halt(i2c, false);
+ }
+ complete(&i2c->msg_complete);
+}
+
+static unsigned int img_i2c_raw_atomic_delay_handler(struct img_i2c *i2c,
+ u32 int_status, u32 line_status)
+{
+ /* Stay in raw mode for this, so we don't just loop infinitely */
+ img_i2c_atomic_op(i2c, i2c->at_cur_cmd, i2c->at_cur_data);
+ img_i2c_switch_mode(i2c, MODE_ATOMIC);
+ return 0;
+}
+
+static unsigned int img_i2c_raw(struct img_i2c *i2c, u32 int_status,
+ u32 line_status)
+{
+ if (int_status & INT_TIMING) {
+ if (i2c->raw_timeout == 0)
+ return img_i2c_raw_atomic_delay_handler(i2c,
+ int_status, line_status);
+ --i2c->raw_timeout;
+ }
+ return 0;
+}
+
+static unsigned int img_i2c_sequence(struct img_i2c *i2c, u32 int_status)
+{
+ static const unsigned int continue_bits[] = {
+ [CMD_GEN_START] = LINESTAT_START_BIT_DET,
+ [CMD_GEN_DATA] = LINESTAT_INPUT_HELD_V,
+ [CMD_RET_ACK] = LINESTAT_ACK_DET | LINESTAT_NACK_DET,
+ [CMD_RET_DATA] = LINESTAT_INPUT_HELD_V,
+ [CMD_GEN_STOP] = LINESTAT_STOP_BIT_DET,
+ };
+ int next_cmd = -1;
+ u8 next_data = 0x00;
+
+ if (int_status & INT_SLAVE_EVENT)
+ i2c->at_slave_event = true;
+ if (int_status & INT_TRANSACTION_DONE)
+ i2c->at_t_done = true;
+
+ if (!i2c->at_slave_event || !i2c->at_t_done)
+ return 0;
+
+ /* wait if no continue bits are set */
+ if (i2c->at_cur_cmd >= 0 &&
+ i2c->at_cur_cmd < ARRAY_SIZE(continue_bits)) {
+ unsigned int cont_bits = continue_bits[i2c->at_cur_cmd];
+
+ if (cont_bits) {
+ cont_bits |= LINESTAT_ABORT_DET;
+ if (!(i2c->line_status & cont_bits))
+ return 0;
+ }
+ }
+
+ /* follow the sequence of commands in i2c->seq */
+ next_cmd = *i2c->seq;
+ /* stop on a nil */
+ if (!next_cmd) {
+ img_i2c_writel(i2c, SCB_OVERRIDE_REG, 0);
+ return ISR_COMPLETE(0);
+ }
+ /* when generating data, the next byte is the data */
+ if (next_cmd == CMD_GEN_DATA) {
+ ++i2c->seq;
+ next_data = *i2c->seq;
+ }
+ ++i2c->seq;
+ img_i2c_atomic_op(i2c, next_cmd, next_data);
+
+ return 0;
+}
+
+static void img_i2c_reset_start(struct img_i2c *i2c)
+{
+ /* Initiate the magic dance */
+ img_i2c_switch_mode(i2c, MODE_SEQUENCE);
+ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable);
+ i2c->seq = img_i2c_reset_seq;
+ i2c->at_slave_event = true;
+ i2c->at_t_done = true;
+ i2c->at_cur_cmd = -1;
+
+ /* img_i2c_reset_seq isn't empty so the following won't fail */
+ img_i2c_sequence(i2c, 0);
+}
+
+static void img_i2c_stop_start(struct img_i2c *i2c)
+{
+ /* Initiate a stop bit sequence */
+ img_i2c_switch_mode(i2c, MODE_SEQUENCE);
+ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable);
+ i2c->seq = img_i2c_stop_seq;
+ i2c->at_slave_event = true;
+ i2c->at_t_done = true;
+ i2c->at_cur_cmd = -1;
+
+ /* img_i2c_stop_seq isn't empty so the following won't fail */
+ img_i2c_sequence(i2c, 0);
+}
+
+static unsigned int img_i2c_atomic(struct img_i2c *i2c,
+ u32 int_status,
+ u32 line_status)
+{
+ int next_cmd = -1;
+ u8 next_data = 0x00;
+
+ if (int_status & INT_SLAVE_EVENT)
+ i2c->at_slave_event = true;
+ if (int_status & INT_TRANSACTION_DONE)
+ i2c->at_t_done = true;
+
+ if (!i2c->at_slave_event || !i2c->at_t_done)
+ goto next_atomic_cmd;
+ if (i2c->line_status & LINESTAT_ABORT_DET) {
+ dev_dbg(i2c->adap.dev.parent, "abort condition detected\n");
+ next_cmd = CMD_GEN_STOP;
+ i2c->msg_status = -EIO;
+ goto next_atomic_cmd;
+ }
+
+ /* i2c->at_cur_cmd may have completed */
+ switch (i2c->at_cur_cmd) {
+ case CMD_GEN_START:
+ next_cmd = CMD_GEN_DATA;
+ next_data = (i2c->msg.addr << 1);
+ if (i2c->msg.flags & I2C_M_RD)
+ next_data |= 0x1;
+ break;
+ case CMD_GEN_DATA:
+ if (i2c->line_status & LINESTAT_INPUT_HELD_V)
+ next_cmd = CMD_RET_ACK;
+ break;
+ case CMD_RET_ACK:
+ if (i2c->line_status & LINESTAT_ACK_DET) {
+ if (i2c->msg.len == 0) {
+ next_cmd = CMD_GEN_STOP;
+ } else if (i2c->msg.flags & I2C_M_RD) {
+ next_cmd = CMD_RET_DATA;
+ } else {
+ next_cmd = CMD_GEN_DATA;
+ next_data = *i2c->msg.buf;
+ --i2c->msg.len;
+ ++i2c->msg.buf;
+ }
+ } else if (i2c->line_status & LINESTAT_NACK_DET) {
+ i2c->msg_status = -EIO;
+ next_cmd = CMD_GEN_STOP;
+ }
+ break;
+ case CMD_RET_DATA:
+ if (i2c->line_status & LINESTAT_INPUT_HELD_V) {
+ *i2c->msg.buf = (i2c->line_status &
+ LINESTAT_INPUT_DATA)
+ >> LINESTAT_INPUT_DATA_SHIFT;
+ --i2c->msg.len;
+ ++i2c->msg.buf;
+ if (i2c->msg.len)
+ next_cmd = CMD_GEN_ACK;
+ else
+ next_cmd = CMD_GEN_NACK;
+ }
+ break;
+ case CMD_GEN_ACK:
+ if (i2c->line_status & LINESTAT_ACK_DET) {
+ next_cmd = CMD_RET_DATA;
+ } else {
+ i2c->msg_status = -EIO;
+ next_cmd = CMD_GEN_STOP;
+ }
+ break;
+ case CMD_GEN_NACK:
+ next_cmd = CMD_GEN_STOP;
+ break;
+ case CMD_GEN_STOP:
+ img_i2c_writel(i2c, SCB_OVERRIDE_REG, 0);
+ return ISR_COMPLETE(0);
+ default:
+ dev_err(i2c->adap.dev.parent, "bad atomic command %d\n",
+ i2c->at_cur_cmd);
+ i2c->msg_status = -EIO;
+ next_cmd = CMD_GEN_STOP;
+ break;
+ }
+
+next_atomic_cmd:
+ if (next_cmd != -1) {
+ /* don't actually stop unless we're the last transaction */
+ if (next_cmd == CMD_GEN_STOP && !i2c->msg_status &&
+ !i2c->last_msg)
+ return ISR_COMPLETE(0);
+ img_i2c_atomic_op(i2c, next_cmd, next_data);
+ }
+ return 0;
+}
+
+/*
+ * Timer function to check if something has gone wrong in automatic mode (so we
+ * don't have to handle so many interrupts just to catch an exception).
+ */
+static void img_i2c_check_timer(unsigned long arg)
+{
+ struct img_i2c *i2c = (struct img_i2c *)arg;
+ unsigned long flags;
+ unsigned int line_status;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+ line_status = img_i2c_readl(i2c, SCB_STATUS_REG);
+
+ /* check for an abort condition */
+ if (line_status & LINESTAT_ABORT_DET) {
+ dev_dbg(i2c->adap.dev.parent,
+ "abort condition detected by check timer\n");
+ /* enable slave event interrupt mask to trigger irq */
+ img_i2c_writel(i2c, SCB_INT_MASK_REG,
+ i2c->int_enable | INT_SLAVE_EVENT);
+ }
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+}
+
+static unsigned int img_i2c_auto(struct img_i2c *i2c,
+ unsigned int int_status,
+ unsigned int line_status)
+{
+ if (int_status & (INT_WRITE_ACK_ERR | INT_ADDR_ACK_ERR))
+ return ISR_COMPLETE(EIO);
+
+ if (line_status & LINESTAT_ABORT_DET) {
+ dev_dbg(i2c->adap.dev.parent, "abort condition detected\n");
+ /* empty the read fifo */
+ if ((i2c->msg.flags & I2C_M_RD) &&
+ (int_status & INT_FIFO_FULL_FILLING))
+ img_i2c_read_fifo(i2c);
+ /* use atomic mode and try to force a stop bit */
+ i2c->msg_status = -EIO;
+ img_i2c_stop_start(i2c);
+ return 0;
+ }
+
+ /* Enable transaction halt on start bit */
+ if (!i2c->last_msg && i2c->line_status & LINESTAT_START_BIT_DET) {
+ img_i2c_transaction_halt(i2c, true);
+ /* we're no longer interested in the slave event */
+ i2c->int_enable &= ~INT_SLAVE_EVENT;
+ }
+
+ mod_timer(&i2c->check_timer, jiffies + msecs_to_jiffies(1));
+
+ if (i2c->msg.flags & I2C_M_RD) {
+ if (int_status & INT_FIFO_FULL_FILLING) {
+ img_i2c_read_fifo(i2c);
+ if (i2c->msg.len == 0)
+ return ISR_WAITSTOP;
+ }
+ } else {
+ if (int_status & INT_FIFO_EMPTY_EMPTYING) {
+ /*
+ * The write fifo empty indicates that we're in the
+ * last byte so it's safe to start a new write
+ * transaction without losing any bytes from the
+ * previous one.
+ * see 2.3.7 Repeated Start Transactions.
+ */
+ if ((int_status & INT_FIFO_EMPTY) &&
+ i2c->msg.len == 0)
+ return ISR_WAITSTOP;
+ img_i2c_write_fifo(i2c);
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t img_i2c_isr(int irq, void *dev_id)
+{
+ struct img_i2c *i2c = (struct img_i2c *)dev_id;
+ u32 int_status, line_status;
+ /* We handle transaction completion AFTER accessing registers */
+ unsigned int hret;
+
+ /* Read interrupt status register. */
+ int_status = img_i2c_readl(i2c, SCB_INT_STATUS_REG);
+ /* Clear detected interrupts. */
+ img_i2c_writel(i2c, SCB_INT_CLEAR_REG, int_status);
+
+ /*
+ * Read line status and clear it until it actually is clear. We have
+ * to be careful not to lose any line status bits that get latched.
+ */
+ line_status = img_i2c_readl(i2c, SCB_STATUS_REG);
+ if (line_status & LINESTAT_LATCHED) {
+ img_i2c_writel(i2c, SCB_CLEAR_REG,
+ (line_status & LINESTAT_LATCHED)
+ >> LINESTAT_CLEAR_SHIFT);
+ img_i2c_wr_rd_fence(i2c);
+ }
+
+ spin_lock(&i2c->lock);
+
+ /* Keep track of line status bits received */
+ i2c->line_status &= ~LINESTAT_INPUT_DATA;
+ i2c->line_status |= line_status;
+
+ /*
+ * Certain interrupts indicate that sclk low timeout is not
+ * a problem. If any of these are set, just continue.
+ */
+ if ((int_status & INT_SCLK_LOW_TIMEOUT) &&
+ !(int_status & (INT_SLAVE_EVENT |
+ INT_FIFO_EMPTY |
+ INT_FIFO_FULL))) {
+ dev_crit(i2c->adap.dev.parent,
+ "fatal: clock low timeout occurred %s addr 0x%02x\n",
+ (i2c->msg.flags & I2C_M_RD) ? "reading" : "writing",
+ i2c->msg.addr);
+ hret = ISR_FATAL(EIO);
+ goto out;
+ }
+
+ if (i2c->mode == MODE_ATOMIC)
+ hret = img_i2c_atomic(i2c, int_status, line_status);
+ else if (i2c->mode == MODE_AUTOMATIC)
+ hret = img_i2c_auto(i2c, int_status, line_status);
+ else if (i2c->mode == MODE_SEQUENCE)
+ hret = img_i2c_sequence(i2c, int_status);
+ else if (i2c->mode == MODE_WAITSTOP && (int_status & INT_SLAVE_EVENT) &&
+ (line_status & LINESTAT_STOP_BIT_DET))
+ hret = ISR_COMPLETE(0);
+ else if (i2c->mode == MODE_RAW)
+ hret = img_i2c_raw(i2c, int_status, line_status);
+ else
+ hret = 0;
+
+ /* Clear detected level interrupts. */
+ img_i2c_writel(i2c, SCB_INT_CLEAR_REG, int_status & INT_LEVEL);
+
+out:
+ if (hret & ISR_WAITSTOP) {
+ /*
+ * Only wait for stop on last message.
+ * Also we may already have detected the stop bit.
+ */
+ if (!i2c->last_msg || i2c->line_status & LINESTAT_STOP_BIT_DET)
+ hret = ISR_COMPLETE(0);
+ else
+ img_i2c_switch_mode(i2c, MODE_WAITSTOP);
+ }
+
+ /* now we've finished using regs, handle transaction completion */
+ if (hret & ISR_COMPLETE_M) {
+ int status = -(hret & ISR_STATUS_M);
+
+ img_i2c_complete_transaction(i2c, status);
+ if (hret & ISR_FATAL_M)
+ img_i2c_switch_mode(i2c, MODE_FATAL);
+ }
+
+ /* Enable interrupts (int_enable may be altered by changing mode) */
+ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable);
+
+ spin_unlock(&i2c->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* Force a bus reset sequence and wait for it to complete */
+static int img_i2c_reset_bus(struct img_i2c *i2c)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+ reinit_completion(&i2c->msg_complete);
+ img_i2c_reset_start(i2c);
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ ret = wait_for_completion_timeout(&i2c->msg_complete, IMG_I2C_TIMEOUT);
+ if (ret == 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct img_i2c *i2c = i2c_get_adapdata(adap);
+ bool atomic = false;
+ int i, ret;
+
+ if (i2c->mode == MODE_SUSPEND) {
+ WARN(1, "refusing to service transaction in suspended state\n");
+ return -EIO;
+ }
+
+ if (i2c->mode == MODE_FATAL)
+ return -EIO;
+
+ for (i = 0; i < num; i++) {
+ if (likely(msgs[i].len))
+ continue;
+ /*
+ * 0 byte reads are not possible because the slave could try
+ * and pull the data line low, preventing a stop bit.
+ */
+ if (unlikely(msgs[i].flags & I2C_M_RD))
+ return -EIO;
+ /*
+ * 0 byte writes are possible and used for probing, but we
+ * cannot do them in automatic mode, so use atomic mode
+ * instead.
+ */
+ atomic = true;
+ }
+
+ ret = clk_prepare_enable(i2c->scb_clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *msg = &msgs[i];
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c->lock, flags);
+
+ /*
+ * Make a copy of the message struct. We mustn't modify the
+ * original or we'll confuse drivers and i2c-dev.
+ */
+ i2c->msg = *msg;
+ i2c->msg_status = 0;
+
+ /*
+ * After the last message we must have waited for a stop bit.
+ * Not waiting can cause problems when the clock is disabled
+ * before the stop bit is sent, and the linux I2C interface
+ * requires separate transfers not to joined with repeated
+ * start.
+ */
+ i2c->last_msg = (i == num - 1);
+ reinit_completion(&i2c->msg_complete);
+
+ if (atomic)
+ img_i2c_atomic_start(i2c);
+ else if (msg->flags & I2C_M_RD)
+ img_i2c_read(i2c);
+ else
+ img_i2c_write(i2c);
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ ret = wait_for_completion_timeout(&i2c->msg_complete,
+ IMG_I2C_TIMEOUT);
+ del_timer_sync(&i2c->check_timer);
+
+ if (ret == 0) {
+ dev_err(adap->dev.parent, "i2c transfer timed out\n");
+ i2c->msg_status = -ETIMEDOUT;
+ break;
+ }
+
+ if (i2c->msg_status)
+ break;
+ }
+
+ clk_disable_unprepare(i2c->scb_clk);
+
+ return i2c->msg_status ? i2c->msg_status : num;
+}
+
+static u32 img_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm img_i2c_algo = {
+ .master_xfer = img_i2c_xfer,
+ .functionality = img_i2c_func,
+};
+
+static int img_i2c_init(struct img_i2c *i2c)
+{
+ unsigned int clk_khz, bitrate_khz, clk_period, tckh, tckl, tsdh;
+ unsigned int i, ret, data, prescale, inc, int_bitrate, filt;
+ struct img_i2c_timings timing;
+ u32 rev;
+
+ ret = clk_prepare_enable(i2c->scb_clk);
+ if (ret)
+ return ret;
+
+ rev = img_i2c_readl(i2c, SCB_CORE_REV_REG);
+ if ((rev & 0x00ffffff) < 0x00020200) {
+ dev_info(i2c->adap.dev.parent,
+ "Unknown hardware revision (%d.%d.%d.%d)\n",
+ (rev >> 24) & 0xff, (rev >> 16) & 0xff,
+ (rev >> 8) & 0xff, rev & 0xff);
+ clk_disable_unprepare(i2c->scb_clk);
+ return -EINVAL;
+ }
+
+ if (rev == REL_SOC_IP_SCB_2_2_1) {
+ i2c->need_wr_rd_fence = true;
+ dev_info(i2c->adap.dev.parent, "fence quirk enabled");
+ }
+
+ bitrate_khz = i2c->bitrate / 1000;
+ clk_khz = clk_get_rate(i2c->scb_clk) / 1000;
+
+ /* Determine what mode we're in from the bitrate */
+ timing = timings[0];
+ for (i = 0; i < ARRAY_SIZE(timings); i++) {
+ if (i2c->bitrate <= timings[i].max_bitrate) {
+ timing = timings[i];
+ break;
+ }
+ }
+
+ /* Find the prescale that would give us that inc (approx delay = 0) */
+ prescale = SCB_OPT_INC * clk_khz / (256 * 16 * bitrate_khz);
+ prescale = clamp_t(unsigned int, prescale, 1, 8);
+ clk_khz /= prescale;
+
+ /* Setup the clock increment value */
+ inc = (256 * 16 * bitrate_khz) / clk_khz;
+
+ /*
+ * The clock generation logic allows to filter glitches on the bus.
+ * This filter is able to remove bus glitches shorter than 50ns.
+ * If the clock enable rate is greater than 20 MHz, no filtering
+ * is required, so we need to disable it.
+ * If it's between the 20-40 MHz range, there's no need to divide
+ * the clock to get a filter.
+ */
+ if (clk_khz < 20000) {
+ filt = SCB_FILT_DISABLE;
+ } else if (clk_khz < 40000) {
+ filt = SCB_FILT_BYPASS;
+ } else {
+ /* Calculate filter clock */
+ filt = (64000 / ((clk_khz / 1000) * SCB_FILT_GLITCH));
+
+ /* Scale up if needed */
+ if (64000 % ((clk_khz / 1000) * SCB_FILT_GLITCH))
+ inc++;
+
+ if (filt > SCB_FILT_INC_MASK)
+ filt = SCB_FILT_INC_MASK;
+
+ filt = (filt & SCB_FILT_INC_MASK) << SCB_FILT_INC_SHIFT;
+ }
+ data = filt | ((inc & SCB_INC_MASK) << SCB_INC_SHIFT) | (prescale - 1);
+ img_i2c_writel(i2c, SCB_CLK_SET_REG, data);
+
+ /* Obtain the clock period of the fx16 clock in ns */
+ clk_period = (256 * 1000000) / (clk_khz * inc);
+
+ /* Calculate the bitrate in terms of internal clock pulses */
+ int_bitrate = 1000000 / (bitrate_khz * clk_period);
+ if ((1000000 % (bitrate_khz * clk_period)) >=
+ ((bitrate_khz * clk_period) / 2))
+ int_bitrate++;
+
+ /* Setup TCKH value */
+ tckh = timing.tckh / clk_period;
+ if (timing.tckh % clk_period)
+ tckh++;
+
+ if (tckh > 0)
+ data = tckh - 1;
+ else
+ data = 0;
+
+ img_i2c_writel(i2c, SCB_TIME_TCKH_REG, data);
+
+ /* Setup TCKL value */
+ tckl = int_bitrate - tckh;
+
+ if (tckl > 0)
+ data = tckl - 1;
+ else
+ data = 0;
+
+ img_i2c_writel(i2c, SCB_TIME_TCKL_REG, data);
+
+ /* Setup TSDH value */
+ tsdh = timing.tsdh / clk_period;
+ if (timing.tsdh % clk_period)
+ tsdh++;
+
+ if (tsdh > 1)
+ data = tsdh - 1;
+ else
+ data = 0x01;
+ img_i2c_writel(i2c, SCB_TIME_TSDH_REG, data);
+
+ /* This value is used later */
+ tsdh = data;
+
+ /* Setup TPL value */
+ data = timing.tpl / clk_period;
+ if (data > 0)
+ --data;
+ img_i2c_writel(i2c, SCB_TIME_TPL_REG, data);
+
+ /* Setup TPH value */
+ data = timing.tph / clk_period;
+ if (data > 0)
+ --data;
+ img_i2c_writel(i2c, SCB_TIME_TPH_REG, data);
+
+ /* Setup TSDL value to TPL + TSDH + 2 */
+ img_i2c_writel(i2c, SCB_TIME_TSDL_REG, data + tsdh + 2);
+
+ /* Setup TP2S value */
+ data = timing.tp2s / clk_period;
+ if (data > 0)
+ --data;
+ img_i2c_writel(i2c, SCB_TIME_TP2S_REG, data);
+
+ img_i2c_writel(i2c, SCB_TIME_TBI_REG, TIMEOUT_TBI);
+ img_i2c_writel(i2c, SCB_TIME_TSL_REG, TIMEOUT_TSL);
+ img_i2c_writel(i2c, SCB_TIME_TDL_REG, TIMEOUT_TDL);
+
+ /* Take module out of soft reset and enable clocks */
+ img_i2c_soft_reset(i2c);
+
+ /* Disable all interrupts */
+ img_i2c_writel(i2c, SCB_INT_MASK_REG, 0);
+
+ /* Clear all interrupts */
+ img_i2c_writel(i2c, SCB_INT_CLEAR_REG, ~0);
+
+ /* Clear the scb_line_status events */
+ img_i2c_writel(i2c, SCB_CLEAR_REG, ~0);
+
+ /* Enable interrupts */
+ img_i2c_writel(i2c, SCB_INT_MASK_REG, i2c->int_enable);
+
+ /* Perform a synchronous sequence to reset the bus */
+ ret = img_i2c_reset_bus(i2c);
+
+ clk_disable_unprepare(i2c->scb_clk);
+
+ return ret;
+}
+
+static int img_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct img_i2c *i2c;
+ struct resource *res;
+ int irq, ret;
+ u32 val;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(struct img_i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "can't get irq number\n");
+ return irq;
+ }
+
+ i2c->sys_clk = devm_clk_get(&pdev->dev, "sys");
+ if (IS_ERR(i2c->sys_clk)) {
+ dev_err(&pdev->dev, "can't get system clock\n");
+ return PTR_ERR(i2c->sys_clk);
+ }
+
+ i2c->scb_clk = devm_clk_get(&pdev->dev, "scb");
+ if (IS_ERR(i2c->scb_clk)) {
+ dev_err(&pdev->dev, "can't get core clock\n");
+ return PTR_ERR(i2c->scb_clk);
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, img_i2c_isr, 0,
+ pdev->name, i2c);
+ if (ret) {
+ dev_err(&pdev->dev, "can't request irq %d\n", irq);
+ return ret;
+ }
+
+ /* Set up the exception check timer */
+ init_timer(&i2c->check_timer);
+ i2c->check_timer.function = img_i2c_check_timer;
+ i2c->check_timer.data = (unsigned long)i2c;
+
+ i2c->bitrate = timings[0].max_bitrate;
+ if (!of_property_read_u32(node, "clock-frequency", &val))
+ i2c->bitrate = val;
+
+ i2c_set_adapdata(&i2c->adap, i2c);
+ i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = node;
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &img_i2c_algo;
+ i2c->adap.retries = 5;
+ i2c->adap.nr = pdev->id;
+ snprintf(i2c->adap.name, sizeof(i2c->adap.name), "IMG SCB I2C");
+
+ img_i2c_switch_mode(i2c, MODE_INACTIVE);
+ spin_lock_init(&i2c->lock);
+ init_completion(&i2c->msg_complete);
+
+ platform_set_drvdata(pdev, i2c);
+
+ ret = clk_prepare_enable(i2c->sys_clk);
+ if (ret)
+ return ret;
+
+ ret = img_i2c_init(i2c);
+ if (ret)
+ goto disable_clk;
+
+ ret = i2c_add_numbered_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add adapter\n");
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(i2c->sys_clk);
+ return ret;
+}
+
+static int img_i2c_remove(struct platform_device *dev)
+{
+ struct img_i2c *i2c = platform_get_drvdata(dev);
+
+ i2c_del_adapter(&i2c->adap);
+ clk_disable_unprepare(i2c->sys_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int img_i2c_suspend(struct device *dev)
+{
+ struct img_i2c *i2c = dev_get_drvdata(dev);
+
+ img_i2c_switch_mode(i2c, MODE_SUSPEND);
+
+ clk_disable_unprepare(i2c->sys_clk);
+
+ return 0;
+}
+
+static int img_i2c_resume(struct device *dev)
+{
+ struct img_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(i2c->sys_clk);
+ if (ret)
+ return ret;
+
+ img_i2c_init(i2c);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(img_i2c_pm, img_i2c_suspend, img_i2c_resume);
+
+static const struct of_device_id img_scb_i2c_match[] = {
+ { .compatible = "img,scb-i2c" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, img_scb_i2c_match);
+
+static struct platform_driver img_scb_i2c_driver = {
+ .driver = {
+ .name = "img-i2c-scb",
+ .of_match_table = img_scb_i2c_match,
+ .pm = &img_i2c_pm,
+ },
+ .probe = img_i2c_probe,
+ .remove = img_i2c_remove,
+};
+module_platform_driver(img_scb_i2c_driver);
+
+MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>");
+MODULE_DESCRIPTION("IMG host I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e9fb7cf78612..7f3a9fe9bf4e 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -32,22 +32,27 @@
/** Includes *******************************************************************
*******************************************************************************/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/sched.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/platform_data/i2c-imx.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
/** Defines ********************************************************************
*******************************************************************************/
@@ -58,6 +63,15 @@
/* Default value */
#define IMX_I2C_BIT_RATE 100000 /* 100kHz */
+/*
+ * Enable DMA if transfer byte size is bigger than this threshold.
+ * As the hardware request, it must bigger than 4 bytes.\
+ * I have set '16' here, maybe it's not the best but I think it's
+ * the appropriate.
+ */
+#define DMA_THRESHOLD 16
+#define DMA_TIMEOUT 1000
+
/* IMX I2C registers:
* the I2C register offset is different between SoCs,
* to provid support for all these chips, split the
@@ -83,6 +97,7 @@
#define I2SR_IBB 0x20
#define I2SR_IAAS 0x40
#define I2SR_ICF 0x80
+#define I2CR_DMAEN 0x02
#define I2CR_RSTA 0x04
#define I2CR_TXAK 0x08
#define I2CR_MTX 0x10
@@ -169,6 +184,17 @@ struct imx_i2c_hwdata {
unsigned i2cr_ien_opcode;
};
+struct imx_i2c_dma {
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_using;
+ struct completion cmd_complete;
+ dma_addr_t dma_buf;
+ unsigned int dma_len;
+ enum dma_transfer_direction dma_transfer_dir;
+ enum dma_data_direction dma_data_dir;
+};
+
struct imx_i2c_struct {
struct i2c_adapter adapter;
struct clk *clk;
@@ -181,6 +207,8 @@ struct imx_i2c_struct {
unsigned int cur_clk;
unsigned int bitrate;
const struct imx_i2c_hwdata *hwdata;
+
+ struct imx_i2c_dma *dma;
};
static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
@@ -251,6 +279,138 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift));
}
+/* Functions for DMA support */
+static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
+ dma_addr_t phy_addr)
+{
+ struct imx_i2c_dma *dma;
+ struct dma_slave_config dma_sconfig;
+ struct device *dev = &i2c_imx->adapter.dev;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return;
+
+ dma->chan_tx = dma_request_slave_channel(dev, "tx");
+ if (!dma->chan_tx) {
+ dev_dbg(dev, "can't request DMA tx channel\n");
+ ret = -ENODEV;
+ goto fail_al;
+ }
+
+ dma_sconfig.dst_addr = phy_addr +
+ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift);
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_sconfig.dst_maxburst = 1;
+ dma_sconfig.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig);
+ if (ret < 0) {
+ dev_dbg(dev, "can't configure tx channel\n");
+ goto fail_tx;
+ }
+
+ dma->chan_rx = dma_request_slave_channel(dev, "rx");
+ if (!dma->chan_rx) {
+ dev_dbg(dev, "can't request DMA rx channel\n");
+ ret = -ENODEV;
+ goto fail_tx;
+ }
+
+ dma_sconfig.src_addr = phy_addr +
+ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift);
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_sconfig.src_maxburst = 1;
+ dma_sconfig.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig);
+ if (ret < 0) {
+ dev_dbg(dev, "can't configure rx channel\n");
+ goto fail_rx;
+ }
+
+ i2c_imx->dma = dma;
+ init_completion(&dma->cmd_complete);
+ dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
+
+ return;
+
+fail_rx:
+ dma_release_channel(dma->chan_rx);
+fail_tx:
+ dma_release_channel(dma->chan_tx);
+fail_al:
+ devm_kfree(dev, dma);
+ dev_info(dev, "can't use DMA\n");
+}
+
+static void i2c_imx_dma_callback(void *arg)
+{
+ struct imx_i2c_struct *i2c_imx = (struct imx_i2c_struct *)arg;
+ struct imx_i2c_dma *dma = i2c_imx->dma;
+
+ dma_unmap_single(dma->chan_using->device->dev, dma->dma_buf,
+ dma->dma_len, dma->dma_data_dir);
+ complete(&dma->cmd_complete);
+}
+
+static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
+ struct i2c_msg *msgs)
+{
+ struct imx_i2c_dma *dma = i2c_imx->dma;
+ struct dma_async_tx_descriptor *txdesc;
+ struct device *dev = &i2c_imx->adapter.dev;
+ struct device *chan_dev = dma->chan_using->device->dev;
+
+ dma->dma_buf = dma_map_single(chan_dev, msgs->buf,
+ dma->dma_len, dma->dma_data_dir);
+ if (dma_mapping_error(chan_dev, dma->dma_buf)) {
+ dev_err(dev, "DMA mapping failed\n");
+ goto err_map;
+ }
+
+ txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf,
+ dma->dma_len, dma->dma_transfer_dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc) {
+ dev_err(dev, "Not able to get desc for DMA xfer\n");
+ goto err_desc;
+ }
+
+ txdesc->callback = i2c_imx_dma_callback;
+ txdesc->callback_param = i2c_imx;
+ if (dma_submit_error(dmaengine_submit(txdesc))) {
+ dev_err(dev, "DMA submit failed\n");
+ goto err_submit;
+ }
+
+ dma_async_issue_pending(dma->chan_using);
+ return 0;
+
+err_submit:
+err_desc:
+ dma_unmap_single(chan_dev, dma->dma_buf,
+ dma->dma_len, dma->dma_data_dir);
+err_map:
+ return -EINVAL;
+}
+
+static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
+{
+ struct imx_i2c_dma *dma = i2c_imx->dma;
+
+ dma->dma_buf = 0;
+ dma->dma_len = 0;
+
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+
+ dma_release_channel(dma->chan_rx);
+ dma->chan_rx = NULL;
+
+ dma->chan_using = NULL;
+}
+
/** Functions for IMX I2C adapter driver ***************************************
*******************************************************************************/
@@ -382,6 +542,7 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
i2c_imx->stopped = 0;
temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
+ temp &= ~I2CR_DMAEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
return result;
}
@@ -395,6 +556,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~(I2CR_MSTA | I2CR_MTX);
+ if (i2c_imx->dma)
+ temp &= ~I2CR_DMAEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
}
if (is_imx1_i2c(i2c_imx)) {
@@ -435,6 +598,155 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
return IRQ_NONE;
}
+static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
+ struct i2c_msg *msgs)
+{
+ int result;
+ unsigned int temp = 0;
+ unsigned long orig_jiffies = jiffies;
+ struct imx_i2c_dma *dma = i2c_imx->dma;
+ struct device *dev = &i2c_imx->adapter.dev;
+
+ dma->chan_using = dma->chan_tx;
+ dma->dma_transfer_dir = DMA_MEM_TO_DEV;
+ dma->dma_data_dir = DMA_TO_DEVICE;
+ dma->dma_len = msgs->len - 1;
+ result = i2c_imx_dma_xfer(i2c_imx, msgs);
+ if (result)
+ return result;
+
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp |= I2CR_DMAEN;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+
+ /*
+ * Write slave address.
+ * The first byte must be transmitted by the CPU.
+ */
+ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
+ reinit_completion(&i2c_imx->dma->cmd_complete);
+ result = wait_for_completion_timeout(
+ &i2c_imx->dma->cmd_complete,
+ msecs_to_jiffies(DMA_TIMEOUT));
+ if (result <= 0) {
+ dmaengine_terminate_all(dma->chan_using);
+ return result ?: -ETIMEDOUT;
+ }
+
+ /* Waiting for transfer complete. */
+ while (1) {
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
+ if (temp & I2SR_ICF)
+ break;
+ if (time_after(jiffies, orig_jiffies +
+ msecs_to_jiffies(DMA_TIMEOUT))) {
+ dev_dbg(dev, "<%s> Timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ schedule();
+ }
+
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp &= ~I2CR_DMAEN;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+
+ /* The last data byte must be transferred by the CPU. */
+ imx_i2c_write_reg(msgs->buf[msgs->len-1],
+ i2c_imx, IMX_I2C_I2DR);
+ result = i2c_imx_trx_complete(i2c_imx);
+ if (result)
+ return result;
+
+ return i2c_imx_acked(i2c_imx);
+}
+
+static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
+ struct i2c_msg *msgs, bool is_lastmsg)
+{
+ int result;
+ unsigned int temp;
+ unsigned long orig_jiffies = jiffies;
+ struct imx_i2c_dma *dma = i2c_imx->dma;
+ struct device *dev = &i2c_imx->adapter.dev;
+
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp |= I2CR_DMAEN;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+
+ dma->chan_using = dma->chan_rx;
+ dma->dma_transfer_dir = DMA_DEV_TO_MEM;
+ dma->dma_data_dir = DMA_FROM_DEVICE;
+ /* The last two data bytes must be transferred by the CPU. */
+ dma->dma_len = msgs->len - 2;
+ result = i2c_imx_dma_xfer(i2c_imx, msgs);
+ if (result)
+ return result;
+
+ reinit_completion(&i2c_imx->dma->cmd_complete);
+ result = wait_for_completion_timeout(
+ &i2c_imx->dma->cmd_complete,
+ msecs_to_jiffies(DMA_TIMEOUT));
+ if (result <= 0) {
+ dmaengine_terminate_all(dma->chan_using);
+ return result ?: -ETIMEDOUT;
+ }
+
+ /* waiting for transfer complete. */
+ while (1) {
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
+ if (temp & I2SR_ICF)
+ break;
+ if (time_after(jiffies, orig_jiffies +
+ msecs_to_jiffies(DMA_TIMEOUT))) {
+ dev_dbg(dev, "<%s> Timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ schedule();
+ }
+
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp &= ~I2CR_DMAEN;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+
+ /* read n-1 byte data */
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp |= I2CR_TXAK;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+
+ msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+ /* read n byte data */
+ result = i2c_imx_trx_complete(i2c_imx);
+ if (result)
+ return result;
+
+ if (is_lastmsg) {
+ /*
+ * It must generate STOP before read I2DR to prevent
+ * controller from generating another clock cycle
+ */
+ dev_dbg(dev, "<%s> clear MSTA\n", __func__);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp &= ~(I2CR_MSTA | I2CR_MTX);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ i2c_imx_bus_busy(i2c_imx, 0);
+ i2c_imx->stopped = 1;
+ } else {
+ /*
+ * For i2c master receiver repeat restart operation like:
+ * read -> repeat MSTA -> read/write
+ * The controller must set MTX before read the last byte in
+ * the first read operation, otherwise the first read cost
+ * one extra clock cycle.
+ */
+ temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp |= I2CR_MTX;
+ writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ }
+ msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+
+ return 0;
+}
+
static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
{
int i, result;
@@ -504,6 +816,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
+ if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data)
+ return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
+
/* read data */
for (i = 0; i < msgs->len; i++) {
u8 len = 0;
@@ -618,8 +933,12 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
#endif
if (msgs[i].flags & I2C_M_RD)
result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg);
- else
- result = i2c_imx_write(i2c_imx, &msgs[i]);
+ else {
+ if (i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD)
+ result = i2c_imx_dma_write(i2c_imx, &msgs[i]);
+ else
+ result = i2c_imx_write(i2c_imx, &msgs[i]);
+ }
if (result)
goto fail0;
}
@@ -654,6 +973,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
int irq, ret;
+ dma_addr_t phy_addr;
dev_dbg(&pdev->dev, "<%s>\n", __func__);
@@ -668,8 +988,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct),
- GFP_KERNEL);
+ phy_addr = (dma_addr_t)res->start;
+ i2c_imx = devm_kzalloc(&pdev->dev, sizeof(*i2c_imx), GFP_KERNEL);
if (!i2c_imx)
return -ENOMEM;
@@ -743,6 +1063,9 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx->adapter.name);
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
+ /* Init DMA config if support*/
+ i2c_imx_dma_request(i2c_imx, phy_addr);
+
return 0; /* Return OK */
clk_disable:
@@ -758,6 +1081,9 @@ static int i2c_imx_remove(struct platform_device *pdev)
dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
i2c_del_adapter(&i2c_imx->adapter);
+ if (i2c_imx->dma)
+ i2c_imx_dma_free(i2c_imx);
+
/* setup chip registers to defaults */
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
@@ -772,7 +1098,6 @@ static struct platform_driver i2c_imx_driver = {
.remove = i2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = i2c_imx_dt_ids,
},
.id_table = imx_i2c_devtype,
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 3d16c2f60a5e..72d6161cf77c 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -516,7 +516,6 @@ static struct platform_driver iop3xx_i2c_driver = {
.probe = iop3xx_i2c_probe,
.remove = iop3xx_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "IOP3xx-I2C",
},
};
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 113293d275f6..c2f25f19d76f 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -309,7 +309,6 @@ static int smbus_sch_remove(struct platform_device *pdev)
static struct platform_driver smbus_sch_driver = {
.driver = {
.name = "isch_smbus",
- .owner = THIS_MODULE,
},
.probe = smbus_sch_probe,
.remove = smbus_sch_remove,
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index af8f65fb1c05..25993d2e64bf 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -394,7 +394,6 @@ static int kempld_i2c_resume(struct platform_device *pdev)
static struct platform_driver kempld_i2c_driver = {
.driver = {
.name = "kempld-i2c",
- .owner = THIS_MODULE,
},
.probe = kempld_i2c_probe,
.remove = kempld_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
new file mode 100644
index 000000000000..5e176adca8e8
--- /dev/null
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -0,0 +1,492 @@
+/*
+ * I2C bus driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/* Meson I2C register map */
+#define REG_CTRL 0x00
+#define REG_SLAVE_ADDR 0x04
+#define REG_TOK_LIST0 0x08
+#define REG_TOK_LIST1 0x0c
+#define REG_TOK_WDATA0 0x10
+#define REG_TOK_WDATA1 0x14
+#define REG_TOK_RDATA0 0x18
+#define REG_TOK_RDATA1 0x1c
+
+/* Control register fields */
+#define REG_CTRL_START BIT(0)
+#define REG_CTRL_ACK_IGNORE BIT(1)
+#define REG_CTRL_STATUS BIT(2)
+#define REG_CTRL_ERROR BIT(3)
+#define REG_CTRL_CLKDIV_SHIFT 12
+#define REG_CTRL_CLKDIV_MASK ((BIT(10) - 1) << REG_CTRL_CLKDIV_SHIFT)
+
+#define I2C_TIMEOUT_MS 500
+#define DEFAULT_FREQ 100000
+
+enum {
+ TOKEN_END = 0,
+ TOKEN_START,
+ TOKEN_SLAVE_ADDR_WRITE,
+ TOKEN_SLAVE_ADDR_READ,
+ TOKEN_DATA,
+ TOKEN_DATA_LAST,
+ TOKEN_STOP,
+};
+
+enum {
+ STATE_IDLE,
+ STATE_READ,
+ STATE_WRITE,
+ STATE_STOP,
+};
+
+/**
+ * struct meson_i2c - Meson I2C device private data
+ *
+ * @adap: I2C adapter instance
+ * @dev: Pointer to device structure
+ * @regs: Base address of the device memory mapped registers
+ * @clk: Pointer to clock structure
+ * @irq: IRQ number
+ * @msg: Pointer to the current I2C message
+ * @state: Current state in the driver state machine
+ * @last: Flag set for the last message in the transfer
+ * @count: Number of bytes to be sent/received in current transfer
+ * @pos: Current position in the send/receive buffer
+ * @error: Flag set when an error is received
+ * @lock: To avoid race conditions between irq handler and xfer code
+ * @done: Completion used to wait for transfer termination
+ * @frequency: Operating frequency of I2C bus clock
+ * @tokens: Sequence of tokens to be written to the device
+ * @num_tokens: Number of tokens
+ */
+struct meson_i2c {
+ struct i2c_adapter adap;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+
+ struct i2c_msg *msg;
+ int state;
+ bool last;
+ int count;
+ int pos;
+ int error;
+
+ spinlock_t lock;
+ struct completion done;
+ unsigned int frequency;
+ u32 tokens[2];
+ int num_tokens;
+};
+
+static void meson_i2c_set_mask(struct meson_i2c *i2c, int reg, u32 mask,
+ u32 val)
+{
+ u32 data;
+
+ data = readl(i2c->regs + reg);
+ data &= ~mask;
+ data |= val & mask;
+ writel(data, i2c->regs + reg);
+}
+
+static void meson_i2c_reset_tokens(struct meson_i2c *i2c)
+{
+ i2c->tokens[0] = 0;
+ i2c->tokens[1] = 0;
+ i2c->num_tokens = 0;
+}
+
+static void meson_i2c_add_token(struct meson_i2c *i2c, int token)
+{
+ if (i2c->num_tokens < 8)
+ i2c->tokens[0] |= (token & 0xf) << (i2c->num_tokens * 4);
+ else
+ i2c->tokens[1] |= (token & 0xf) << ((i2c->num_tokens % 8) * 4);
+
+ i2c->num_tokens++;
+}
+
+static void meson_i2c_write_tokens(struct meson_i2c *i2c)
+{
+ writel(i2c->tokens[0], i2c->regs + REG_TOK_LIST0);
+ writel(i2c->tokens[1], i2c->regs + REG_TOK_LIST1);
+}
+
+static void meson_i2c_set_clk_div(struct meson_i2c *i2c)
+{
+ unsigned long clk_rate = clk_get_rate(i2c->clk);
+ unsigned int div;
+
+ div = DIV_ROUND_UP(clk_rate, i2c->frequency * 4);
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK,
+ div << REG_CTRL_CLKDIV_SHIFT);
+
+ dev_dbg(i2c->dev, "%s: clk %lu, freq %u, div %u\n", __func__,
+ clk_rate, i2c->frequency, div);
+}
+
+static void meson_i2c_get_data(struct meson_i2c *i2c, char *buf, int len)
+{
+ u32 rdata0, rdata1;
+ int i;
+
+ rdata0 = readl(i2c->regs + REG_TOK_RDATA0);
+ rdata1 = readl(i2c->regs + REG_TOK_RDATA1);
+
+ dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
+ rdata0, rdata1, len);
+
+ for (i = 0; i < min_t(int, 4, len); i++)
+ *buf++ = (rdata0 >> i * 8) & 0xff;
+
+ for (i = 4; i < min_t(int, 8, len); i++)
+ *buf++ = (rdata1 >> (i - 4) * 8) & 0xff;
+}
+
+static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
+{
+ u32 wdata0 = 0, wdata1 = 0;
+ int i;
+
+ for (i = 0; i < min_t(int, 4, len); i++)
+ wdata0 |= *buf++ << (i * 8);
+
+ for (i = 4; i < min_t(int, 8, len); i++)
+ wdata1 |= *buf++ << ((i - 4) * 8);
+
+ writel(wdata0, i2c->regs + REG_TOK_WDATA0);
+ writel(wdata0, i2c->regs + REG_TOK_WDATA1);
+
+ dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
+ wdata0, wdata1, len);
+}
+
+static void meson_i2c_prepare_xfer(struct meson_i2c *i2c)
+{
+ bool write = !(i2c->msg->flags & I2C_M_RD);
+ int i;
+
+ i2c->count = min_t(int, i2c->msg->len - i2c->pos, 8);
+
+ for (i = 0; i < i2c->count - 1; i++)
+ meson_i2c_add_token(i2c, TOKEN_DATA);
+
+ if (i2c->count) {
+ if (write || i2c->pos + i2c->count < i2c->msg->len)
+ meson_i2c_add_token(i2c, TOKEN_DATA);
+ else
+ meson_i2c_add_token(i2c, TOKEN_DATA_LAST);
+ }
+
+ if (write)
+ meson_i2c_put_data(i2c, i2c->msg->buf + i2c->pos, i2c->count);
+}
+
+static void meson_i2c_stop(struct meson_i2c *i2c)
+{
+ dev_dbg(i2c->dev, "%s: last %d\n", __func__, i2c->last);
+
+ if (i2c->last) {
+ i2c->state = STATE_STOP;
+ meson_i2c_add_token(i2c, TOKEN_STOP);
+ } else {
+ i2c->state = STATE_IDLE;
+ complete_all(&i2c->done);
+ }
+}
+
+static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
+{
+ struct meson_i2c *i2c = dev_id;
+ unsigned int ctrl;
+
+ spin_lock(&i2c->lock);
+
+ meson_i2c_reset_tokens(i2c);
+ ctrl = readl(i2c->regs + REG_CTRL);
+
+ dev_dbg(i2c->dev, "irq: state %d, pos %d, count %d, ctrl %08x\n",
+ i2c->state, i2c->pos, i2c->count, ctrl);
+
+ if (ctrl & REG_CTRL_ERROR && i2c->state != STATE_IDLE) {
+ /*
+ * The bit is set when the IGNORE_NAK bit is cleared
+ * and the device didn't respond. In this case, the
+ * I2C controller automatically generates a STOP
+ * condition.
+ */
+ dev_dbg(i2c->dev, "error bit set\n");
+ i2c->error = -ENXIO;
+ i2c->state = STATE_IDLE;
+ complete_all(&i2c->done);
+ goto out;
+ }
+
+ switch (i2c->state) {
+ case STATE_READ:
+ if (i2c->count > 0) {
+ meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos,
+ i2c->count);
+ i2c->pos += i2c->count;
+ }
+
+ if (i2c->pos >= i2c->msg->len) {
+ meson_i2c_stop(i2c);
+ break;
+ }
+
+ meson_i2c_prepare_xfer(i2c);
+ break;
+ case STATE_WRITE:
+ i2c->pos += i2c->count;
+
+ if (i2c->pos >= i2c->msg->len) {
+ meson_i2c_stop(i2c);
+ break;
+ }
+
+ meson_i2c_prepare_xfer(i2c);
+ break;
+ case STATE_STOP:
+ i2c->state = STATE_IDLE;
+ complete_all(&i2c->done);
+ break;
+ case STATE_IDLE:
+ break;
+ }
+
+out:
+ if (i2c->state != STATE_IDLE) {
+ /* Restart the processing */
+ meson_i2c_write_tokens(i2c);
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0);
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START,
+ REG_CTRL_START);
+ }
+
+ spin_unlock(&i2c->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg)
+{
+ int token;
+
+ token = (msg->flags & I2C_M_RD) ? TOKEN_SLAVE_ADDR_READ :
+ TOKEN_SLAVE_ADDR_WRITE;
+
+ writel(msg->addr << 1, i2c->regs + REG_SLAVE_ADDR);
+ meson_i2c_add_token(i2c, TOKEN_START);
+ meson_i2c_add_token(i2c, token);
+}
+
+static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
+ int last)
+{
+ unsigned long time_left, flags;
+ int ret = 0;
+
+ i2c->msg = msg;
+ i2c->last = last;
+ i2c->pos = 0;
+ i2c->count = 0;
+ i2c->error = 0;
+
+ meson_i2c_reset_tokens(i2c);
+
+ flags = (msg->flags & I2C_M_IGNORE_NAK) ? REG_CTRL_ACK_IGNORE : 0;
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_ACK_IGNORE, flags);
+
+ if (!(msg->flags & I2C_M_NOSTART))
+ meson_i2c_do_start(i2c, msg);
+
+ i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE;
+ meson_i2c_prepare_xfer(i2c);
+ meson_i2c_write_tokens(i2c);
+ reinit_completion(&i2c->done);
+
+ /* Start the transfer */
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, REG_CTRL_START);
+
+ time_left = msecs_to_jiffies(I2C_TIMEOUT_MS);
+ time_left = wait_for_completion_timeout(&i2c->done, time_left);
+
+ /*
+ * Protect access to i2c struct and registers from interrupt
+ * handlers triggered by a transfer terminated after the
+ * timeout period
+ */
+ spin_lock_irqsave(&i2c->lock, flags);
+
+ /* Abort any active operation */
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0);
+
+ if (!time_left) {
+ i2c->state = STATE_IDLE;
+ ret = -ETIMEDOUT;
+ }
+
+ if (i2c->error)
+ ret = i2c->error;
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+ return ret;
+}
+
+static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct meson_i2c *i2c = adap->algo_data;
+ int i, ret = 0, count = 0;
+
+ clk_enable(i2c->clk);
+ meson_i2c_set_clk_div(i2c);
+
+ for (i = 0; i < num; i++) {
+ ret = meson_i2c_xfer_msg(i2c, msgs + i, i == num - 1);
+ if (ret)
+ break;
+ count++;
+ }
+
+ clk_disable(i2c->clk);
+
+ return ret ? ret : count;
+}
+
+static u32 meson_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm meson_i2c_algorithm = {
+ .master_xfer = meson_i2c_xfer,
+ .functionality = meson_i2c_func,
+};
+
+static int meson_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct meson_i2c *i2c;
+ struct resource *mem;
+ int ret = 0;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(struct meson_i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &i2c->frequency))
+ i2c->frequency = DEFAULT_FREQ;
+
+ i2c->dev = &pdev->dev;
+ platform_set_drvdata(pdev, i2c);
+
+ spin_lock_init(&i2c->lock);
+ init_completion(&i2c->done);
+
+ i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "can't get device clock\n");
+ return PTR_ERR(i2c->clk);
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(i2c->regs))
+ return PTR_ERR(i2c->regs);
+
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0) {
+ dev_err(&pdev->dev, "can't find IRQ\n");
+ return i2c->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, i2c->irq, meson_i2c_irq,
+ 0, dev_name(&pdev->dev), i2c);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't request IRQ\n");
+ return ret;
+ }
+
+ ret = clk_prepare(i2c->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't prepare clock\n");
+ return ret;
+ }
+
+ strlcpy(i2c->adap.name, "Meson I2C adapter",
+ sizeof(i2c->adap.name));
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &meson_i2c_algorithm;
+ i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = np;
+ i2c->adap.algo_data = i2c;
+
+ /*
+ * A transfer is triggered when START bit changes from 0 to 1.
+ * Ensure that the bit is set to 0 after probe
+ */
+ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0);
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't register adapter\n");
+ clk_unprepare(i2c->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int meson_i2c_remove(struct platform_device *pdev)
+{
+ struct meson_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+ clk_unprepare(i2c->clk);
+
+ return 0;
+}
+
+static const struct of_device_id meson_i2c_match[] = {
+ { .compatible = "amlogic,meson6-i2c" },
+ { },
+};
+
+static struct platform_driver meson_i2c_driver = {
+ .probe = meson_i2c_probe,
+ .remove = meson_i2c_remove,
+ .driver = {
+ .name = "meson-i2c",
+ .of_match_table = meson_i2c_match,
+ },
+};
+
+module_platform_driver(meson_i2c_driver);
+
+MODULE_DESCRIPTION("Amlogic Meson I2C Bus driver");
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 0edf630b099a..c74cc2be613b 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -124,7 +124,7 @@ static void mpc_i2c_fixup(struct mpc_i2c *i2c)
static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
{
unsigned long orig_jiffies = jiffies;
- u32 x;
+ u32 cmd_err;
int result = 0;
if (!i2c->irq) {
@@ -133,11 +133,11 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
if (time_after(jiffies, orig_jiffies + timeout)) {
dev_dbg(i2c->dev, "timeout\n");
writeccr(i2c, 0);
- result = -EIO;
+ result = -ETIMEDOUT;
break;
}
}
- x = readb(i2c->base + MPC_I2C_SR);
+ cmd_err = readb(i2c->base + MPC_I2C_SR);
writeb(0, i2c->base + MPC_I2C_SR);
} else {
/* Interrupt mode */
@@ -150,28 +150,28 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
result = -ETIMEDOUT;
}
- x = i2c->interrupt;
+ cmd_err = i2c->interrupt;
i2c->interrupt = 0;
}
if (result < 0)
return result;
- if (!(x & CSR_MCF)) {
+ if (!(cmd_err & CSR_MCF)) {
dev_dbg(i2c->dev, "unfinished\n");
return -EIO;
}
- if (x & CSR_MAL) {
+ if (cmd_err & CSR_MAL) {
dev_dbg(i2c->dev, "MAL\n");
- return -EIO;
+ return -EAGAIN;
}
- if (writing && (x & CSR_RXAK)) {
+ if (writing && (cmd_err & CSR_RXAK)) {
dev_dbg(i2c->dev, "No RXAK\n");
/* generate stop */
writeccr(i2c, CCR_MEN);
- return -EIO;
+ return -ENXIO;
}
return 0;
}
@@ -813,7 +813,6 @@ static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
.remove = fsl_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
.pm = MPC_I2C_PM_OPS,
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 2f64273d3f2b..373f6d4e4080 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -925,7 +925,6 @@ static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
.remove = mv64xxx_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = MV64XXX_I2C_CTLR_NAME,
.of_match_table = mv64xxx_i2c_of_match_table,
},
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 07e1be6f8992..ff8b12c8d25f 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -811,7 +811,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
struct resource *res;
int err, irq;
- i2c = devm_kzalloc(dev, sizeof(struct mxs_i2c_dev), GFP_KERNEL);
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
@@ -893,7 +893,6 @@ static int mxs_i2c_remove(struct platform_device *pdev)
static struct platform_driver mxs_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = mxs_i2c_dt_ids,
},
.probe = mxs_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 2a4fe0b7cfb7..7249b5b1e5d0 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -480,7 +480,6 @@ static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
.remove = ocores_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "ocores-i2c",
.of_match_table = ocores_i2c_match,
.pm = OCORES_I2C_PM,
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 81042b08a947..6e75e016bffc 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -623,7 +623,6 @@ static struct platform_driver octeon_i2c_driver = {
.probe = octeon_i2c_probe,
.remove = octeon_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = octeon_i2c_match,
},
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 0e650a0d0ad0..0e894193accf 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -54,6 +54,9 @@
/* timeout for pm runtime autosuspend */
#define OMAP_I2C_PM_TIMEOUT 1000 /* ms */
+/* timeout for making decision on bus free status */
+#define OMAP_I2C_BUS_FREE_TIMEOUT (msecs_to_jiffies(10))
+
/* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */
enum {
OMAP_I2C_REV_REG = 0,
@@ -98,7 +101,7 @@ enum {
#define OMAP_I2C_STAT_ROVR (1 << 11) /* Receive overrun */
#define OMAP_I2C_STAT_XUDF (1 << 10) /* Transmit underflow */
#define OMAP_I2C_STAT_AAS (1 << 9) /* Address as slave */
-#define OMAP_I2C_STAT_AD0 (1 << 8) /* Address zero */
+#define OMAP_I2C_STAT_BF (1 << 8) /* Bus Free */
#define OMAP_I2C_STAT_XRDY (1 << 4) /* Transmit data ready */
#define OMAP_I2C_STAT_RRDY (1 << 3) /* Receive data ready */
#define OMAP_I2C_STAT_ARDY (1 << 2) /* Register access ready */
@@ -146,16 +149,20 @@ enum {
#define OMAP_I2C_SCLH_HSSCLH 8
/* I2C System Test Register (OMAP_I2C_SYSTEST): */
-#ifdef DEBUG
#define OMAP_I2C_SYSTEST_ST_EN (1 << 15) /* System test enable */
#define OMAP_I2C_SYSTEST_FREE (1 << 14) /* Free running mode */
#define OMAP_I2C_SYSTEST_TMODE_MASK (3 << 12) /* Test mode select */
#define OMAP_I2C_SYSTEST_TMODE_SHIFT (12) /* Test mode select */
+/* Functional mode */
+#define OMAP_I2C_SYSTEST_SCL_I_FUNC (1 << 8) /* SCL line input value */
+#define OMAP_I2C_SYSTEST_SCL_O_FUNC (1 << 7) /* SCL line output value */
+#define OMAP_I2C_SYSTEST_SDA_I_FUNC (1 << 6) /* SDA line input value */
+#define OMAP_I2C_SYSTEST_SDA_O_FUNC (1 << 5) /* SDA line output value */
+/* SDA/SCL IO mode */
#define OMAP_I2C_SYSTEST_SCL_I (1 << 3) /* SCL line sense in */
#define OMAP_I2C_SYSTEST_SCL_O (1 << 2) /* SCL line drive out */
#define OMAP_I2C_SYSTEST_SDA_I (1 << 1) /* SDA line sense in */
#define OMAP_I2C_SYSTEST_SDA_O (1 << 0) /* SDA line drive out */
-#endif
/* OCP_SYSSTATUS bit definitions */
#define SYSS_RESETDONE_MASK (1 << 0)
@@ -202,6 +209,9 @@ struct omap_i2c_dev {
*/
u32 rev;
unsigned b_hw:1; /* bad h/w fixes */
+ unsigned bb_valid:1; /* true when BB-bit reflects
+ * the I2C bus state
+ */
unsigned receiver:1; /* true when we're in receiver mode */
u16 iestate; /* Saved interrupt register */
u16 pscstate;
@@ -290,6 +300,12 @@ static void __omap_i2c_init(struct omap_i2c_dev *dev)
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
/*
+ * NOTE: right after setting CON_EN, STAT_BB could be 0 while the
+ * bus is busy. It will be changed to 1 on the next IP FCLK clock.
+ * udelay(1) will be enough to fix that.
+ */
+
+ /*
* Don't write to this register if the IE state is 0 as it can
* cause deadlock.
*/
@@ -328,7 +344,12 @@ static int omap_i2c_reset(struct omap_i2c_dev *dev)
/* SYSC register is cleared by the reset; rewrite it */
omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, sysc);
+ if (dev->rev > OMAP_I2C_REV_ON_3430_3530) {
+ /* Schedule I2C-bus monitoring on the next transfer */
+ dev->bb_valid = 0;
+ }
}
+
return 0;
}
@@ -441,6 +462,11 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
dev->scllstate = scll;
dev->sclhstate = sclh;
+ if (dev->rev <= OMAP_I2C_REV_ON_3430_3530) {
+ /* Not implemented */
+ dev->bb_valid = 1;
+ }
+
__omap_i2c_init(dev);
return 0;
@@ -465,6 +491,91 @@ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *dev)
return 0;
}
+/*
+ * Wait while BB-bit doesn't reflect the I2C bus state
+ *
+ * In a multimaster environment, after IP software reset, BB-bit value doesn't
+ * correspond to the current bus state. It may happen what BB-bit will be 0,
+ * while the bus is busy due to another I2C master activity.
+ * Here are BB-bit values after reset:
+ * SDA SCL BB NOTES
+ * 0 0 0 1, 2
+ * 1 0 0 1, 2
+ * 0 1 1
+ * 1 1 0 3
+ * Later, if IP detect SDA=0 and SCL=1 (ACK) or SDA 1->0 while SCL=1 (START)
+ * combinations on the bus, it set BB-bit to 1.
+ * If IP detect SDA 0->1 while SCL=1 (STOP) combination on the bus,
+ * it set BB-bit to 0 and BF to 1.
+ * BB and BF bits correctly tracks the bus state while IP is suspended
+ * BB bit became valid on the next FCLK clock after CON_EN bit set
+ *
+ * NOTES:
+ * 1. Any transfer started when BB=0 and bus is busy wouldn't be
+ * completed by IP and results in controller timeout.
+ * 2. Any transfer started when BB=0 and SCL=0 results in IP
+ * starting to drive SDA low. In that case IP corrupt data
+ * on the bus.
+ * 3. Any transfer started in the middle of another master's transfer
+ * results in unpredictable results and data corruption
+ */
+static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *dev)
+{
+ unsigned long bus_free_timeout = 0;
+ unsigned long timeout;
+ int bus_free = 0;
+ u16 stat, systest;
+
+ if (dev->bb_valid)
+ return 0;
+
+ timeout = jiffies + OMAP_I2C_TIMEOUT;
+ while (1) {
+ stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ /*
+ * We will see BB or BF event in a case IP had detected any
+ * activity on the I2C bus. Now IP correctly tracks the bus
+ * state. BB-bit value is valid.
+ */
+ if (stat & (OMAP_I2C_STAT_BB | OMAP_I2C_STAT_BF))
+ break;
+
+ /*
+ * Otherwise, we must look signals on the bus to make
+ * the right decision.
+ */
+ systest = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+ if ((systest & OMAP_I2C_SYSTEST_SCL_I_FUNC) &&
+ (systest & OMAP_I2C_SYSTEST_SDA_I_FUNC)) {
+ if (!bus_free) {
+ bus_free_timeout = jiffies +
+ OMAP_I2C_BUS_FREE_TIMEOUT;
+ bus_free = 1;
+ }
+
+ /*
+ * SDA and SCL lines was high for 10 ms without bus
+ * activity detected. The bus is free. Consider
+ * BB-bit value is valid.
+ */
+ if (time_after(jiffies, bus_free_timeout))
+ break;
+ } else {
+ bus_free = 0;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev->dev, "timeout waiting for bus ready\n");
+ return -ETIMEDOUT;
+ }
+
+ msleep(1);
+ }
+
+ dev->bb_valid = 1;
+ return 0;
+}
+
static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
{
u16 buf;
@@ -557,7 +668,11 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
if (!dev->b_hw && stop)
w |= OMAP_I2C_CON_STP;
-
+ /*
+ * NOTE: STAT_BB bit could became 1 here if another master occupy
+ * the bus. IP successfully complete transfer when the bus will be
+ * free again (BB reset to 0).
+ */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
/*
@@ -600,13 +715,15 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
return 0;
/* We have an error */
- if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR |
- OMAP_I2C_STAT_XUDF)) {
+ if (dev->cmd_err & (OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) {
omap_i2c_reset(dev);
__omap_i2c_init(dev);
return -EIO;
}
+ if (dev->cmd_err & OMAP_I2C_STAT_AL)
+ return -EAGAIN;
+
if (dev->cmd_err & OMAP_I2C_STAT_NACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
return 0;
@@ -635,6 +752,10 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
if (r < 0)
goto out;
+ r = omap_i2c_wait_for_bb_valid(dev);
+ if (r < 0)
+ goto out;
+
r = omap_i2c_wait_for_bb(dev);
if (r < 0)
goto out;
@@ -1332,7 +1453,6 @@ static struct platform_driver omap_i2c_driver = {
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
- .owner = THIS_MODULE,
.pm = OMAP_I2C_PM_OPS,
.of_match_table = of_match_ptr(omap_i2c_of_match),
},
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index d1f625f923c7..1bcdd10b68b9 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -183,7 +183,6 @@ static int i2c_parport_remove(struct platform_device *pdev)
static struct platform_driver i2c_parport_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = i2c_parport_probe,
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 845f12598e79..6336f02ec566 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -280,7 +280,6 @@ static struct platform_driver i2c_pca_pf_driver = {
.remove = i2c_pca_pf_remove,
.driver = {
.name = "i2c-pca-platform",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 177834e2d841..44f03eed00dd 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -624,7 +624,6 @@ static struct platform_driver pmcmsptwi_driver = {
.remove = pmcmsptwi_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index dc7ff829ad78..e814a36d9b78 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -751,7 +751,6 @@ MODULE_DEVICE_TABLE(of, i2c_pnx_of_match);
static struct platform_driver i2c_pnx_driver = {
.driver = {
.name = "pnx-i2c",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(i2c_pnx_of_match),
.pm = PNX_I2C_PM,
},
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index c83fc3ccdd2b..82b6f02544da 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -270,7 +270,6 @@ static struct platform_driver puv3_i2c_driver = {
.remove = puv3_i2c_remove,
.driver = {
.name = "PKUnity-v3-I2C",
- .owner = THIS_MODULE,
.pm = PUV3_I2C_PM,
}
};
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index be671f7a0e06..d9c0d6a17ad6 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -885,7 +885,9 @@ static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr)
return; /* ignore */
}
- if (isr & ISR_BED) {
+ if ((isr & ISR_BED) &&
+ (!((i2c->msg->flags & I2C_M_IGNORE_NAK) &&
+ (isr & ISR_ACKNAK)))) {
int ret = BUS_ERROR;
/*
@@ -919,12 +921,14 @@ static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr)
icr |= ICR_ALDIE | ICR_TB;
/*
- * If this is the last byte of the last message, send
- * a STOP.
+ * If this is the last byte of the last message or last byte
+ * of any message with I2C_M_STOP (e.g. SCCB), send a STOP.
*/
- if (i2c->msg_ptr == i2c->msg->len &&
- i2c->msg_idx == i2c->msg_num - 1)
- icr |= ICR_STOP;
+ if ((i2c->msg_ptr == i2c->msg->len) &&
+ ((i2c->msg->flags & I2C_M_STOP) ||
+ (i2c->msg_idx == i2c->msg_num - 1)))
+ icr |= ICR_STOP;
+
} else if (i2c->msg_idx < i2c->msg_num - 1) {
/*
* Next segment of the message.
@@ -1071,7 +1075,8 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num
static u32 i2c_pxa_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_PROTOCOL_MANGLING | I2C_FUNC_NOSTART;
}
static const struct i2c_algorithm i2c_pxa_algorithm = {
@@ -1328,7 +1333,6 @@ static struct platform_driver i2c_pxa_driver = {
.remove = i2c_pxa_remove,
.driver = {
.name = "pxa2xx-i2c",
- .owner = THIS_MODULE,
.pm = I2C_PXA_DEV_PM_OPS,
.of_match_table = i2c_pxa_dt_ids,
},
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 092d89bd3224..4dad23bdffbe 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -764,7 +764,6 @@ static struct platform_driver qup_i2c_driver = {
.remove = qup_i2c_remove,
.driver = {
.name = "i2c_qup",
- .owner = THIS_MODULE,
.pm = &qup_i2c_qup_pm_ops,
.of_match_table = qup_i2c_dt_match,
},
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d826e82dd997..71a6e07eb7ab 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -48,6 +48,12 @@
#define ICMAR 0x20 /* master address */
#define ICRXTX 0x24 /* data port */
+/* ICSCR */
+#define SDBS (1 << 3) /* slave data buffer select */
+#define SIE (1 << 2) /* slave interface enable */
+#define GCAE (1 << 1) /* general call address enable */
+#define FNA (1 << 0) /* forced non acknowledgment */
+
/* ICMCR */
#define MDBS (1 << 7) /* non-fifo mode switch */
#define FSCL (1 << 6) /* override SCL pin */
@@ -58,6 +64,15 @@
#define FSB (1 << 1) /* force stop bit */
#define ESG (1 << 0) /* en startbit gen */
+/* ICSSR (also for ICSIER) */
+#define GCAR (1 << 6) /* general call received */
+#define STM (1 << 5) /* slave transmit mode */
+#define SSR (1 << 4) /* stop received */
+#define SDE (1 << 3) /* slave data empty */
+#define SDT (1 << 2) /* slave data transmitted */
+#define SDR (1 << 1) /* slave data received */
+#define SAR (1 << 0) /* slave addr received */
+
/* ICMSR (also for ICMIE) */
#define MNR (1 << 6) /* nack received */
#define MAL (1 << 5) /* arbitration lost */
@@ -103,6 +118,7 @@ struct rcar_i2c_priv {
u32 icccr;
u32 flags;
enum rcar_i2c_type devtype;
+ struct i2c_client *slave;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -126,15 +142,6 @@ static u32 rcar_i2c_read(struct rcar_i2c_priv *priv, int reg)
static void rcar_i2c_init(struct rcar_i2c_priv *priv)
{
- /*
- * reset slave mode.
- * slave mode is not used on this driver
- */
- rcar_i2c_write(priv, ICSIER, 0);
- rcar_i2c_write(priv, ICSAR, 0);
- rcar_i2c_write(priv, ICSCR, 0);
- rcar_i2c_write(priv, ICSSR, 0);
-
/* reset master mode */
rcar_i2c_write(priv, ICMIER, 0);
rcar_i2c_write(priv, ICMCR, 0);
@@ -360,6 +367,63 @@ static int rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
return 0;
}
+static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+{
+ u32 ssr_raw, ssr_filtered;
+ u8 value;
+
+ ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff;
+ ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER);
+
+ if (!ssr_filtered)
+ return false;
+
+ /* address detected */
+ if (ssr_filtered & SAR) {
+ /* read or write request */
+ if (ssr_raw & STM) {
+ i2c_slave_event(priv->slave, I2C_SLAVE_REQ_READ_START, &value);
+ rcar_i2c_write(priv, ICRXTX, value);
+ rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR);
+ } else {
+ i2c_slave_event(priv->slave, I2C_SLAVE_REQ_WRITE_START, &value);
+ rcar_i2c_read(priv, ICRXTX); /* dummy read */
+ rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
+ }
+
+ rcar_i2c_write(priv, ICSSR, ~SAR & 0xff);
+ }
+
+ /* master sent stop */
+ if (ssr_filtered & SSR) {
+ i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
+ rcar_i2c_write(priv, ICSIER, SAR | SSR);
+ rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
+ }
+
+ /* master wants to write to us */
+ if (ssr_filtered & SDR) {
+ int ret;
+
+ value = rcar_i2c_read(priv, ICRXTX);
+ ret = i2c_slave_event(priv->slave, I2C_SLAVE_REQ_WRITE_END, &value);
+ /* Send NACK in case of error */
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0));
+ i2c_slave_event(priv->slave, I2C_SLAVE_REQ_WRITE_START, &value);
+ rcar_i2c_write(priv, ICSSR, ~SDR & 0xff);
+ }
+
+ /* master wants to read from us */
+ if (ssr_filtered & SDE) {
+ i2c_slave_event(priv->slave, I2C_SLAVE_REQ_READ_END, &value);
+ i2c_slave_event(priv->slave, I2C_SLAVE_REQ_READ_START, &value);
+ rcar_i2c_write(priv, ICRXTX, value);
+ rcar_i2c_write(priv, ICSSR, ~SDE & 0xff);
+ }
+
+ return true;
+}
+
static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
{
struct rcar_i2c_priv *priv = ptr;
@@ -369,6 +433,9 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
/*-------------- spin lock -----------------*/
spin_lock(&priv->lock);
+ if (rcar_i2c_slave_irq(priv))
+ goto exit;
+
msr = rcar_i2c_read(priv, ICMSR);
/* Only handle interrupts that are currently enabled */
@@ -499,6 +566,43 @@ out:
return ret;
}
+static int rcar_reg_slave(struct i2c_client *slave)
+{
+ struct rcar_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
+
+ if (priv->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ pm_runtime_forbid(rcar_i2c_priv_to_dev(priv));
+
+ priv->slave = slave;
+ rcar_i2c_write(priv, ICSAR, slave->addr);
+ rcar_i2c_write(priv, ICSSR, 0);
+ rcar_i2c_write(priv, ICSIER, SAR | SSR);
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS);
+
+ return 0;
+}
+
+static int rcar_unreg_slave(struct i2c_client *slave)
+{
+ struct rcar_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
+
+ WARN_ON(!priv->slave);
+
+ rcar_i2c_write(priv, ICSIER, 0);
+ rcar_i2c_write(priv, ICSCR, 0);
+
+ priv->slave = NULL;
+
+ pm_runtime_allow(rcar_i2c_priv_to_dev(priv));
+
+ return 0;
+}
+
static u32 rcar_i2c_func(struct i2c_adapter *adap)
{
/* This HW can't do SMBUS_QUICK and NOSTART */
@@ -508,6 +612,8 @@ static u32 rcar_i2c_func(struct i2c_adapter *adap)
static const struct i2c_algorithm rcar_i2c_algo = {
.master_xfer = rcar_i2c_master_xfer,
.functionality = rcar_i2c_func,
+ .reg_slave = rcar_reg_slave,
+ .unreg_slave = rcar_unreg_slave,
};
static const struct of_device_id rcar_i2c_dt_ids[] = {
@@ -620,7 +726,6 @@ MODULE_DEVICE_TABLE(platform, rcar_i2c_id_table);
static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
- .owner = THIS_MODULE,
.of_match_table = rcar_i2c_dt_ids,
},
.probe = rcar_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index af3b3d032a9f..d7e3af671543 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -414,7 +414,6 @@ static struct platform_driver riic_i2c_driver = {
.remove = riic_i2c_remove,
.driver = {
.name = "i2c-riic",
- .owner = THIS_MODULE,
.of_match_table = riic_i2c_dt_ids,
},
};
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index f486d0eac4d0..92462843db66 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/math64.h>
/* Register Map */
@@ -97,6 +98,7 @@ struct rk3x_i2c {
/* Hardware resources */
void __iomem *regs;
struct clk *clk;
+ struct notifier_block clk_rate_nb;
/* Settings */
unsigned int scl_frequency;
@@ -428,18 +430,231 @@ out:
return IRQ_HANDLED;
}
-static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate)
+/**
+ * Calculate divider values for desired SCL frequency
+ *
+ * @clk_rate: I2C input clock rate
+ * @scl_rate: Desired SCL rate
+ * @div_low: Divider output for low
+ * @div_high: Divider output for high
+ *
+ * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
+ * a best-effort divider value is returned in divs. If the target rate is
+ * too high, we silently use the highest possible rate.
+ */
+static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
+ unsigned long *div_low, unsigned long *div_high)
{
- unsigned long i2c_rate = clk_get_rate(i2c->clk);
- unsigned int div;
+ unsigned long min_low_ns, min_high_ns;
+ unsigned long max_data_hold_ns;
+ unsigned long data_hold_buffer_ns;
+ unsigned long max_low_ns, min_total_ns;
+
+ unsigned long clk_rate_khz, scl_rate_khz;
+
+ unsigned long min_low_div, min_high_div;
+ unsigned long max_low_div;
+
+ unsigned long min_div_for_hold, min_total_div;
+ unsigned long extra_div, extra_low_div, ideal_low_div;
+
+ int ret = 0;
+
+ /* Only support standard-mode and fast-mode */
+ if (WARN_ON(scl_rate > 400000))
+ scl_rate = 400000;
+
+ /* prevent scl_rate_khz from becoming 0 */
+ if (WARN_ON(scl_rate < 1000))
+ scl_rate = 1000;
+
+ /*
+ * min_low_ns: The minimum number of ns we need to hold low
+ * to meet i2c spec
+ * min_high_ns: The minimum number of ns we need to hold high
+ * to meet i2c spec
+ * max_low_ns: The maximum number of ns we can hold low
+ * to meet i2c spec
+ *
+ * Note: max_low_ns should be (max data hold time * 2 - buffer)
+ * This is because the i2c host on Rockchip holds the data line
+ * for half the low time.
+ */
+ if (scl_rate <= 100000) {
+ min_low_ns = 4700;
+ min_high_ns = 4000;
+ max_data_hold_ns = 3450;
+ data_hold_buffer_ns = 50;
+ } else {
+ min_low_ns = 1300;
+ min_high_ns = 600;
+ max_data_hold_ns = 900;
+ data_hold_buffer_ns = 50;
+ }
+ max_low_ns = max_data_hold_ns * 2 - data_hold_buffer_ns;
+ min_total_ns = min_low_ns + min_high_ns;
+
+ /* Adjust to avoid overflow */
+ clk_rate_khz = DIV_ROUND_UP(clk_rate, 1000);
+ scl_rate_khz = scl_rate / 1000;
+
+ /*
+ * We need the total div to be >= this number
+ * so we don't clock too fast.
+ */
+ min_total_div = DIV_ROUND_UP(clk_rate_khz, scl_rate_khz * 8);
+
+ /* These are the min dividers needed for min hold times. */
+ min_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, 8 * 1000000);
+ min_high_div = DIV_ROUND_UP(clk_rate_khz * min_high_ns, 8 * 1000000);
+ min_div_for_hold = (min_low_div + min_high_div);
+
+ /*
+ * This is the maximum divider so we don't go over the max.
+ * We don't round up here (we round down) since this is a max.
+ */
+ max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000);
+
+ if (min_low_div > max_low_div) {
+ WARN_ONCE(true,
+ "Conflicting, min_low_div %lu, max_low_div %lu\n",
+ min_low_div, max_low_div);
+ max_low_div = min_low_div;
+ }
+
+ if (min_div_for_hold > min_total_div) {
+ /*
+ * Time needed to meet hold requirements is important.
+ * Just use that.
+ */
+ *div_low = min_low_div;
+ *div_high = min_high_div;
+ } else {
+ /*
+ * We've got to distribute some time among the low and high
+ * so we don't run too fast.
+ */
+ extra_div = min_total_div - min_div_for_hold;
+
+ /*
+ * We'll try to split things up perfectly evenly,
+ * biasing slightly towards having a higher div
+ * for low (spend more time low).
+ */
+ ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns,
+ scl_rate_khz * 8 * min_total_ns);
+
+ /* Don't allow it to go over the max */
+ if (ideal_low_div > max_low_div)
+ ideal_low_div = max_low_div;
+
+ /*
+ * Handle when the ideal low div is going to take up
+ * more than we have.
+ */
+ if (ideal_low_div > min_low_div + extra_div)
+ ideal_low_div = min_low_div + extra_div;
- /* set DIV = DIVH = DIVL
- * SCL rate = (clk rate) / (8 * (DIVH + 1 + DIVL + 1))
- * = (clk rate) / (16 * (DIV + 1))
+ /* Give low the "ideal" and give high whatever extra is left */
+ extra_low_div = ideal_low_div - min_low_div;
+ *div_low = ideal_low_div;
+ *div_high = min_high_div + (extra_div - extra_low_div);
+ }
+
+ /*
+ * Adjust to the fact that the hardware has an implicit "+1".
+ * NOTE: Above calculations always produce div_low > 0 and div_high > 0.
*/
- div = DIV_ROUND_UP(i2c_rate, scl_rate * 16) - 1;
+ *div_low = *div_low - 1;
+ *div_high = *div_high - 1;
+
+ /* Maximum divider supported by hw is 0xffff */
+ if (*div_low > 0xffff) {
+ *div_low = 0xffff;
+ ret = -EINVAL;
+ }
+
+ if (*div_high > 0xffff) {
+ *div_high = 0xffff;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
+{
+ unsigned long div_low, div_high;
+ u64 t_low_ns, t_high_ns;
+ int ret;
+
+ ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, &div_low,
+ &div_high);
+
+ WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
+
+ clk_enable(i2c->clk);
+ i2c_writel(i2c, (div_high << 16) | (div_low & 0xffff), REG_CLKDIV);
+ clk_disable(i2c->clk);
- i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV);
+ t_low_ns = div_u64(((u64)div_low + 1) * 8 * 1000000000, clk_rate);
+ t_high_ns = div_u64(((u64)div_high + 1) * 8 * 1000000000, clk_rate);
+ dev_dbg(i2c->dev,
+ "CLK %lukhz, Req %uns, Act low %lluns high %lluns\n",
+ clk_rate / 1000,
+ 1000000000 / i2c->scl_frequency,
+ t_low_ns, t_high_ns);
+}
+
+/**
+ * rk3x_i2c_clk_notifier_cb - Clock rate change callback
+ * @nb: Pointer to notifier block
+ * @event: Notification reason
+ * @data: Pointer to notification data object
+ *
+ * The callback checks whether a valid bus frequency can be generated after the
+ * change. If so, the change is acknowledged, otherwise the change is aborted.
+ * New dividers are written to the HW in the pre- or post change notification
+ * depending on the scaling direction.
+ *
+ * Code adapted from i2c-cadence.c.
+ *
+ * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
+ * to acknowedge the change, NOTIFY_DONE if the notification is
+ * considered irrelevant.
+ */
+static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
+ event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct rk3x_i2c *i2c = container_of(nb, struct rk3x_i2c, clk_rate_nb);
+ unsigned long div_low, div_high;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
+ &div_low, &div_high) != 0) {
+ return NOTIFY_STOP;
+ }
+
+ /* scale up */
+ if (ndata->new_rate > ndata->old_rate)
+ rk3x_i2c_adapt_div(i2c, ndata->new_rate);
+
+ return NOTIFY_OK;
+ case POST_RATE_CHANGE:
+ /* scale down */
+ if (ndata->new_rate < ndata->old_rate)
+ rk3x_i2c_adapt_div(i2c, ndata->new_rate);
+ return NOTIFY_OK;
+ case ABORT_RATE_CHANGE:
+ /* scale up */
+ if (ndata->new_rate > ndata->old_rate)
+ rk3x_i2c_adapt_div(i2c, ndata->old_rate);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
}
/**
@@ -536,9 +751,6 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
clk_enable(i2c->clk);
- /* The clock rate might have changed, so setup the divider again */
- rk3x_i2c_set_scl_rate(i2c, i2c->scl_frequency);
-
i2c->is_last_msg = false;
/*
@@ -624,6 +836,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
int bus_nr;
u32 value;
int irq;
+ unsigned long clk_rate;
i2c = devm_kzalloc(&pdev->dev, sizeof(struct rk3x_i2c), GFP_KERNEL);
if (!i2c)
@@ -724,16 +937,28 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
return ret;
}
+ i2c->clk_rate_nb.notifier_call = rk3x_i2c_clk_notifier_cb;
+ ret = clk_notifier_register(i2c->clk, &i2c->clk_rate_nb);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Unable to register clock notifier\n");
+ goto err_clk;
+ }
+
+ clk_rate = clk_get_rate(i2c->clk);
+ rk3x_i2c_adapt_div(i2c, clk_rate);
+
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register adapter\n");
- goto err_clk;
+ goto err_clk_notifier;
}
dev_info(&pdev->dev, "Initialized RK3xxx I2C bus at %p\n", i2c->regs);
return 0;
+err_clk_notifier:
+ clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb);
err_clk:
clk_unprepare(i2c->clk);
return ret;
@@ -744,6 +969,8 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
struct rk3x_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
+
+ clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb);
clk_unprepare(i2c->clk);
return 0;
@@ -753,7 +980,6 @@ static struct platform_driver rk3x_i2c_driver = {
.probe = rk3x_i2c_probe,
.remove = rk3x_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rk3x-i2c",
.of_match_table = rk3x_i2c_match,
},
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 65244774bfa3..bff20a589621 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -35,6 +35,8 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <asm/irq.h>
@@ -87,6 +89,9 @@
/* Max time to wait for bus to become idle after a xfer (in us) */
#define S3C2410_IDLE_TIMEOUT 5000
+/* Exynos5 Sysreg offset */
+#define EXYNOS5_SYS_I2C_CFG 0x0234
+
/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
@@ -123,6 +128,8 @@ struct s3c24xx_i2c {
#if defined(CONFIG_ARM_S3C24XX_CPUFREQ)
struct notifier_block freq_transition;
#endif
+ struct regmap *sysreg;
+ unsigned int sys_i2c_cfg;
};
static struct platform_device_id s3c24xx_driver_ids[] = {
@@ -1071,6 +1078,7 @@ static void
s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
{
struct s3c2410_platform_i2c *pdata = i2c->pdata;
+ int id;
if (!np)
return;
@@ -1080,6 +1088,21 @@ s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
of_property_read_u32(np, "samsung,i2c-slave-addr", &pdata->slave_addr);
of_property_read_u32(np, "samsung,i2c-max-bus-freq",
(u32 *)&pdata->frequency);
+ /*
+ * Exynos5's legacy i2c controller and new high speed i2c
+ * controller have muxed interrupt sources. By default the
+ * interrupts for 4-channel HS-I2C controller are enabled.
+ * If nodes for first four channels of legacy i2c controller
+ * are available then re-configure the interrupts via the
+ * system register.
+ */
+ id = of_alias_get_id(np, "i2c");
+ i2c->sysreg = syscon_regmap_lookup_by_phandle(np,
+ "samsung,sysreg-phandle");
+ if (IS_ERR(i2c->sysreg))
+ return;
+
+ regmap_update_bits(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, BIT(id), 0);
}
#else
static void
@@ -1260,6 +1283,9 @@ static int s3c24xx_i2c_suspend_noirq(struct device *dev)
i2c->suspended = 1;
+ if (!IS_ERR(i2c->sysreg))
+ regmap_read(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, &i2c->sys_i2c_cfg);
+
return 0;
}
@@ -1268,6 +1294,9 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ if (!IS_ERR(i2c->sysreg))
+ regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
+
clk_prepare_enable(i2c->clk);
s3c24xx_i2c_init(i2c);
clk_disable_unprepare(i2c->clk);
@@ -1301,7 +1330,6 @@ static struct platform_driver s3c24xx_i2c_driver = {
.remove = s3c24xx_i2c_remove,
.id_table = s3c24xx_driver_ids,
.driver = {
- .owner = THIS_MODULE,
.name = "s3c-i2c",
.pm = S3C24XX_DEV_PM_OPS,
.of_match_table = of_match_ptr(s3c24xx_i2c_match),
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index d76f3d9737ec..24968384b401 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -552,7 +552,6 @@ static int sh7760_i2c_remove(struct platform_device *pdev)
static struct platform_driver sh7760_i2c_drv = {
.driver = {
.name = SH7760_I2C_DEVNAME,
- .owner = THIS_MODULE,
},
.probe = sh7760_i2c_probe,
.remove = sh7760_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 4855188747c9..d7efaf44868b 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -1,6 +1,8 @@
/*
* SuperH Mobile I2C Controller
*
+ * Copyright (C) 2014 Wolfram Sang <wsa@sang-engineering.com>
+ *
* Copyright (C) 2008 Magnus Damm
*
* Portions of the code based on out-of-tree driver i2c-sh7343.c
@@ -16,20 +18,22 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c/i2c-sh_mobile.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/err.h>
#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
-#include <linux/i2c/i2c-sh_mobile.h>
/* Transmit operation: */
/* */
@@ -110,6 +114,7 @@ enum sh_mobile_i2c_op {
OP_TX_FIRST,
OP_TX,
OP_TX_STOP,
+ OP_TX_STOP_DATA,
OP_TX_TO_RX,
OP_RX,
OP_RX_STOP,
@@ -134,6 +139,11 @@ struct sh_mobile_i2c_data {
int pos;
int sr;
bool send_stop;
+
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+ struct scatterlist sg;
+ enum dma_data_direction dma_direction;
};
struct sh_mobile_dt_config {
@@ -171,6 +181,8 @@ struct sh_mobile_dt_config {
#define ICIC_ICCLB8 0x80
#define ICIC_ICCHB8 0x40
+#define ICIC_TDMAE 0x20
+#define ICIC_RDMAE 0x10
#define ICIC_ALE 0x08
#define ICIC_TACKE 0x04
#define ICIC_WAITE 0x02
@@ -277,6 +289,7 @@ static int sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
else
pd->icic &= ~ICIC_ICCHB8;
+ dev_dbg(pd->dev, "timing values: L/H=0x%x/0x%x\n", pd->iccl, pd->icch);
return 0;
}
@@ -332,8 +345,10 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
case OP_TX: /* write data */
iic_wr(pd, ICDR, data);
break;
- case OP_TX_STOP: /* write data and issue a stop afterwards */
+ case OP_TX_STOP_DATA: /* write data and issue a stop afterwards */
iic_wr(pd, ICDR, data);
+ /* fallthrough */
+ case OP_TX_STOP: /* issue a stop */
iic_wr(pd, ICCR, pd->send_stop ? ICCR_ICE | ICCR_TRS
: ICCR_ICE | ICCR_TRS | ICCR_BBSY);
break;
@@ -389,13 +404,17 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
{
unsigned char data;
- if (pd->pos == pd->msg->len)
+ if (pd->pos == pd->msg->len) {
+ /* Send stop if we haven't yet (DMA case) */
+ if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY))
+ i2c_op(pd, OP_TX_STOP, 0);
return 1;
+ }
sh_mobile_i2c_get_data(pd, &data);
if (sh_mobile_i2c_is_last_byte(pd))
- i2c_op(pd, OP_TX_STOP, data);
+ i2c_op(pd, OP_TX_STOP_DATA, data);
else if (sh_mobile_i2c_is_first_byte(pd))
i2c_op(pd, OP_TX_FIRST, data);
else
@@ -447,10 +466,9 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
{
- struct platform_device *dev = dev_id;
- struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
+ struct sh_mobile_i2c_data *pd = dev_id;
unsigned char sr;
- int wakeup;
+ int wakeup = 0;
sr = iic_rd(pd, ICSR);
pd->sr |= sr; /* remember state */
@@ -459,15 +477,21 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
(pd->msg->flags & I2C_M_RD) ? "read" : "write",
pd->pos, pd->msg->len);
- if (sr & (ICSR_AL | ICSR_TACK)) {
+ /* Kick off TxDMA after preface was done */
+ if (pd->dma_direction == DMA_TO_DEVICE && pd->pos == 0)
+ iic_set_clr(pd, ICIC, ICIC_TDMAE, 0);
+ else if (sr & (ICSR_AL | ICSR_TACK))
/* don't interrupt transaction - continue to issue stop */
iic_wr(pd, ICSR, sr & ~(ICSR_AL | ICSR_TACK));
- wakeup = 0;
- } else if (pd->msg->flags & I2C_M_RD)
+ else if (pd->msg->flags & I2C_M_RD)
wakeup = sh_mobile_i2c_isr_rx(pd);
else
wakeup = sh_mobile_i2c_isr_tx(pd);
+ /* Kick off RxDMA after preface was done */
+ if (pd->dma_direction == DMA_FROM_DEVICE && pd->pos == 1)
+ iic_set_clr(pd, ICIC, ICIC_RDMAE, 0);
+
if (sr & ICSR_WAIT) /* TODO: add delay here to support slow acks */
iic_wr(pd, ICSR, sr & ~ICSR_WAIT);
@@ -482,6 +506,84 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void sh_mobile_i2c_dma_unmap(struct sh_mobile_i2c_data *pd)
+{
+ struct dma_chan *chan = pd->dma_direction == DMA_FROM_DEVICE
+ ? pd->dma_rx : pd->dma_tx;
+
+ dma_unmap_single(chan->device->dev, sg_dma_address(&pd->sg),
+ pd->msg->len, pd->dma_direction);
+
+ pd->dma_direction = DMA_NONE;
+}
+
+static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd)
+{
+ if (pd->dma_direction == DMA_NONE)
+ return;
+ else if (pd->dma_direction == DMA_FROM_DEVICE)
+ dmaengine_terminate_all(pd->dma_rx);
+ else if (pd->dma_direction == DMA_TO_DEVICE)
+ dmaengine_terminate_all(pd->dma_tx);
+
+ sh_mobile_i2c_dma_unmap(pd);
+}
+
+static void sh_mobile_i2c_dma_callback(void *data)
+{
+ struct sh_mobile_i2c_data *pd = data;
+
+ sh_mobile_i2c_dma_unmap(pd);
+ pd->pos = pd->msg->len;
+
+ iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
+}
+
+static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
+{
+ bool read = pd->msg->flags & I2C_M_RD;
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct dma_chan *chan = read ? pd->dma_rx : pd->dma_tx;
+ struct dma_async_tx_descriptor *txdesc;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie;
+
+ if (!chan)
+ return;
+
+ dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
+ if (dma_mapping_error(pd->dev, dma_addr)) {
+ dev_dbg(pd->dev, "dma map failed, using PIO\n");
+ return;
+ }
+
+ sg_dma_len(&pd->sg) = pd->msg->len;
+ sg_dma_address(&pd->sg) = dma_addr;
+
+ pd->dma_direction = dir;
+
+ txdesc = dmaengine_prep_slave_sg(chan, &pd->sg, 1,
+ read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc) {
+ dev_dbg(pd->dev, "dma prep slave sg failed, using PIO\n");
+ sh_mobile_i2c_cleanup_dma(pd);
+ return;
+ }
+
+ txdesc->callback = sh_mobile_i2c_dma_callback;
+ txdesc->callback_param = pd;
+
+ cookie = dmaengine_submit(txdesc);
+ if (dma_submit_error(cookie)) {
+ dev_dbg(pd->dev, "submitting dma failed, using PIO\n");
+ sh_mobile_i2c_cleanup_dma(pd);
+ return;
+ }
+
+ dma_async_issue_pending(chan);
+}
+
static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
bool do_init)
{
@@ -506,6 +608,9 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
pd->pos = -1;
pd->sr = 0;
+ if (pd->msg->len > 8)
+ sh_mobile_i2c_xfer_dma(pd);
+
/* Enable all interrupts to begin with */
iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
return 0;
@@ -589,6 +694,9 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
5 * HZ);
if (!k) {
dev_err(pd->dev, "Transfer request timed out\n");
+ if (pd->dma_direction != DMA_NONE)
+ sh_mobile_i2c_cleanup_dma(pd);
+
err = -ETIMEDOUT;
break;
}
@@ -622,22 +730,77 @@ static const struct sh_mobile_dt_config default_dt_config = {
.clks_per_count = 1,
};
-static const struct sh_mobile_dt_config rcar_gen2_dt_config = {
+static const struct sh_mobile_dt_config fast_clock_dt_config = {
.clks_per_count = 2,
};
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
- { .compatible = "renesas,iic-r8a7790", .data = &rcar_gen2_dt_config },
- { .compatible = "renesas,iic-r8a7791", .data = &rcar_gen2_dt_config },
- { .compatible = "renesas,iic-r8a7792", .data = &rcar_gen2_dt_config },
- { .compatible = "renesas,iic-r8a7793", .data = &rcar_gen2_dt_config },
- { .compatible = "renesas,iic-r8a7794", .data = &rcar_gen2_dt_config },
+ { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7790", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7793", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7794", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
-static int sh_mobile_i2c_hook_irqs(struct platform_device *dev)
+static int sh_mobile_i2c_request_dma_chan(struct device *dev, enum dma_transfer_direction dir,
+ dma_addr_t port_addr, struct dma_chan **chan_ptr)
+{
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
+ int ret;
+
+ *chan_ptr = NULL;
+
+ chan = dma_request_slave_channel_reason(dev, chan_name);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
+ dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
+ return ret;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = port_addr;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else {
+ cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
+ dma_release_channel(chan);
+ return ret;
+ }
+
+ *chan_ptr = chan;
+
+ dev_dbg(dev, "got DMA channel for %s\n", chan_name);
+ return 0;
+}
+
+static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd)
+{
+ if (pd->dma_tx) {
+ dma_release_channel(pd->dma_tx);
+ pd->dma_tx = NULL;
+ }
+
+ if (pd->dma_rx) {
+ dma_release_channel(pd->dma_rx);
+ pd->dma_rx = NULL;
+ }
+}
+
+static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, struct sh_mobile_i2c_data *pd)
{
struct resource *res;
resource_size_t n;
@@ -646,7 +809,7 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev)
while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
for (n = res->start; n <= res->end; n++) {
ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr,
- 0, dev_name(&dev->dev), dev);
+ 0, dev_name(&dev->dev), pd);
if (ret) {
dev_err(&dev->dev, "cannot request IRQ %pa\n", &n);
return ret;
@@ -677,7 +840,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
return PTR_ERR(pd->clk);
}
- ret = sh_mobile_i2c_hook_irqs(dev);
+ ret = sh_mobile_i2c_hook_irqs(dev, pd);
if (ret)
return ret;
@@ -723,6 +886,21 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
if (ret)
return ret;
+ /* Init DMA */
+ sg_init_table(&pd->sg, 1);
+ pd->dma_direction = DMA_NONE;
+ ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM,
+ res->start + ICDR, &pd->dma_rx);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
+ res->start + ICDR, &pd->dma_tx);
+ if (ret == -EPROBE_DEFER) {
+ sh_mobile_i2c_release_dma(pd);
+ return ret;
+ }
+
/* Enable Runtime PM for this device.
*
* Also tell the Runtime PM core to ignore children
@@ -754,13 +932,13 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
ret = i2c_add_numbered_adapter(adap);
if (ret < 0) {
+ sh_mobile_i2c_release_dma(pd);
dev_err(&dev->dev, "cannot add numbered adapter\n");
return ret;
}
- dev_info(&dev->dev,
- "I2C adapter %d with bus speed %lu Hz (L/H=0x%x/0x%x)\n",
- adap->nr, pd->bus_speed, pd->iccl, pd->icch);
+ dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz, DMA=%c\n",
+ adap->nr, pd->bus_speed, (pd->dma_rx || pd->dma_tx) ? 'y' : 'n');
return 0;
}
@@ -770,6 +948,7 @@ static int sh_mobile_i2c_remove(struct platform_device *dev)
struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
i2c_del_adapter(&pd->adap);
+ sh_mobile_i2c_release_dma(pd);
pm_runtime_disable(&dev->dev);
return 0;
}
@@ -794,7 +973,6 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
static struct platform_driver sh_mobile_i2c_driver = {
.driver = {
.name = "i2c-sh_mobile",
- .owner = THIS_MODULE,
.pm = &sh_mobile_i2c_dev_pm_ops,
.of_match_table = sh_mobile_i2c_dt_ids,
},
@@ -806,16 +984,15 @@ static int __init sh_mobile_i2c_adap_init(void)
{
return platform_driver_register(&sh_mobile_i2c_driver);
}
+subsys_initcall(sh_mobile_i2c_adap_init);
static void __exit sh_mobile_i2c_adap_exit(void)
{
platform_driver_unregister(&sh_mobile_i2c_driver);
}
-
-subsys_initcall(sh_mobile_i2c_adap_init);
module_exit(sh_mobile_i2c_adap_exit);
MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver");
-MODULE_AUTHOR("Magnus Damm");
+MODULE_AUTHOR("Magnus Damm and Wolfram Sang");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:i2c-sh_mobile");
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 15ac8395dcd3..b4685bb9b5d7 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -154,7 +154,6 @@ static int simtec_i2c_remove(struct platform_device *dev)
static struct platform_driver simtec_i2c_driver = {
.driver = {
.name = "simtec-i2c",
- .owner = THIS_MODULE,
},
.probe = simtec_i2c_probe,
.remove = simtec_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index b1336d5f0531..1092d4eeeb54 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -451,7 +451,6 @@ MODULE_DEVICE_TABLE(of, sirfsoc_i2c_of_match);
static struct platform_driver i2c_sirfsoc_driver = {
.driver = {
.name = "sirfsoc_i2c",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &i2c_sirfsoc_pm_ops,
#endif
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 2e4eccd6599a..88057fad9dfe 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -863,7 +863,6 @@ MODULE_DEVICE_TABLE(of, st_i2c_match);
static struct platform_driver st_i2c_driver = {
.driver = {
.name = "st-i2c",
- .owner = THIS_MODULE,
.of_match_table = st_i2c_match,
.pm = ST_I2C_PM,
},
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 6a44f37798c8..4885da9e9298 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -981,7 +981,6 @@ static const struct of_device_id stu300_dt_match[] = {
static struct platform_driver stu300_i2c_driver = {
.driver = {
.name = NAME,
- .owner = THIS_MODULE,
.pm = STU300_I2C_PM,
.of_match_table = stu300_dt_match,
},
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index 4d75d4759709..7668e2e9b8fd 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -332,7 +332,6 @@ static struct platform_driver p2wi_driver = {
.probe = p2wi_probe,
.remove = p2wi_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "i2c-sunxi-p2wi",
.of_match_table = p2wi_of_match_table,
},
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index efba1ebe16ba..28b87e683503 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -893,7 +893,6 @@ static struct platform_driver tegra_i2c_driver = {
.remove = tegra_i2c_remove,
.driver = {
.name = "tegra-i2c",
- .owner = THIS_MODULE,
.of_match_table = tegra_i2c_of_match,
.pm = TEGRA_I2C_PM,
},
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 6bb3a89a440f..240637f01d11 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -138,7 +138,6 @@ static struct platform_driver i2c_versatile_driver = {
.remove = i2c_versatile_remove,
.driver = {
.name = "versatile-i2c",
- .owner = THIS_MODULE,
.of_match_table = i2c_versatile_match,
},
};
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index f80a38c2072c..82ea34925489 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -462,7 +462,6 @@ static struct platform_driver wmt_i2c_driver = {
.remove = wmt_i2c_remove,
.driver = {
.name = "wmt-i2c",
- .owner = THIS_MODULE,
.of_match_table = wmt_i2c_dt_ids,
},
};
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index cc65ea0b818f..e8400042b358 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -46,6 +46,11 @@ enum xilinx_i2c_state {
STATE_START
};
+enum xiic_endian {
+ LITTLE,
+ BIG
+};
+
/**
* struct xiic_i2c - Internal representation of the XIIC I2C bus
* @base: Memory base of the HW registers
@@ -70,6 +75,7 @@ struct xiic_i2c {
enum xilinx_i2c_state state;
struct i2c_msg *rx_msg;
int rx_pos;
+ enum xiic_endian endianness;
};
@@ -170,29 +176,58 @@ struct xiic_i2c {
static void xiic_start_xfer(struct xiic_i2c *i2c);
static void __xiic_start_xfer(struct xiic_i2c *i2c);
+/*
+ * For the register read and write functions, a little-endian and big-endian
+ * version are necessary. Endianness is detected during the probe function.
+ * Only the least significant byte [doublet] of the register are ever
+ * accessed. This requires an offset of 3 [2] from the base address for
+ * big-endian systems.
+ */
+
static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value)
{
- iowrite8(value, i2c->base + reg);
+ if (i2c->endianness == LITTLE)
+ iowrite8(value, i2c->base + reg);
+ else
+ iowrite8(value, i2c->base + reg + 3);
}
static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg)
{
- return ioread8(i2c->base + reg);
+ u8 ret;
+
+ if (i2c->endianness == LITTLE)
+ ret = ioread8(i2c->base + reg);
+ else
+ ret = ioread8(i2c->base + reg + 3);
+ return ret;
}
static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value)
{
- iowrite16(value, i2c->base + reg);
+ if (i2c->endianness == LITTLE)
+ iowrite16(value, i2c->base + reg);
+ else
+ iowrite16be(value, i2c->base + reg + 2);
}
static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value)
{
- iowrite32(value, i2c->base + reg);
+ if (i2c->endianness == LITTLE)
+ iowrite32(value, i2c->base + reg);
+ else
+ iowrite32be(value, i2c->base + reg);
}
static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
{
- return ioread32(i2c->base + reg);
+ u32 ret;
+
+ if (i2c->endianness == LITTLE)
+ ret = ioread32(i2c->base + reg);
+ else
+ ret = ioread32be(i2c->base + reg);
+ return ret;
}
static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
@@ -692,6 +727,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
u8 i;
+ u32 sr;
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -724,6 +760,18 @@ static int xiic_i2c_probe(struct platform_device *pdev)
return ret;
}
+ /*
+ * Detect endianness
+ * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not
+ * set, assume that the endianness was wrong and swap.
+ */
+ i2c->endianness = LITTLE;
+ xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
+ /* Reset is cleared in xiic_reinit */
+ sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET);
+ if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
+ i2c->endianness = BIG;
+
xiic_reinit(i2c);
/* add i2c adapter to i2c tree */
@@ -767,7 +815,6 @@ static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = xiic_i2c_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(xiic_of_match),
},
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 17f7352eca6b..8b36bcfd952d 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -263,7 +263,6 @@ static struct platform_driver xlr_i2c_driver = {
.remove = xlr_i2c_remove,
.driver = {
.name = "xlr-i2cbus",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 5153354b1a6b..0a7e410b6195 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -544,7 +544,6 @@ static int scx200_remove(struct platform_device *pdev)
static struct platform_driver scx200_pci_driver = {
.driver = {
.name = "cs5535-smb",
- .owner = THIS_MODULE,
},
.probe = scx200_probe,
.remove = scx200_remove,
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 68aeb8eedae0..39d25a8cb1ad 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -24,6 +24,7 @@
(c) 2013 Wolfram Sang <wsa@the-dreams.de>
I2C ACPI code Copyright (C) 2014 Intel Corp
Author: Lan Tianyu <tianyu.lan@intel.com>
+ I2C slave support (c) 2014 by Wolfram Sang <wsa@sang-engineering.com>
*/
#include <linux/module.h>
@@ -49,6 +50,7 @@
#include <linux/acpi.h>
#include <linux/jump_label.h>
#include <asm/uaccess.h>
+#include <linux/err.h>
#include "i2c-core.h"
@@ -260,7 +262,7 @@ acpi_i2c_space_handler(u32 function, acpi_physical_address command,
struct acpi_resource *ares;
u32 accessor_type = function >> 16;
u8 action = function & ACPI_IO_MASK;
- acpi_status ret = AE_OK;
+ acpi_status ret;
int status;
ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
@@ -627,6 +629,17 @@ static int i2c_device_probe(struct device *dev)
if (!client)
return 0;
+ if (!client->irq && dev->of_node) {
+ int irq = of_irq_get(dev->of_node, 0);
+
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ if (irq < 0)
+ irq = 0;
+
+ client->irq = irq;
+ }
+
driver = to_i2c_driver(dev->driver);
if (!driver->probe || !driver->id_table)
return -ENODEV;
@@ -1369,63 +1382,67 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
/* OF support code */
#if IS_ENABLED(CONFIG_OF)
-static void of_i2c_register_devices(struct i2c_adapter *adap)
+static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
+ struct device_node *node)
{
- void *result;
- struct device_node *node;
+ struct i2c_client *result;
+ struct i2c_board_info info = {};
+ struct dev_archdata dev_ad = {};
+ const __be32 *addr;
+ int len;
- /* Only register child devices if the adapter has a node pointer set */
- if (!adap->dev.of_node)
- return;
+ dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
- dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
+ if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+ dev_err(&adap->dev, "of_i2c: modalias failure on %s\n",
+ node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
- for_each_available_child_of_node(adap->dev.of_node, node) {
- struct i2c_board_info info = {};
- struct dev_archdata dev_ad = {};
- const __be32 *addr;
- int len;
+ addr = of_get_property(node, "reg", &len);
+ if (!addr || (len < sizeof(int))) {
+ dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
+ node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
- dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
+ info.addr = be32_to_cpup(addr);
+ if (info.addr > (1 << 10) - 1) {
+ dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
+ info.addr, node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
- if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
- dev_err(&adap->dev, "of_i2c: modalias failure on %s\n",
- node->full_name);
- continue;
- }
+ info.of_node = of_node_get(node);
+ info.archdata = &dev_ad;
- addr = of_get_property(node, "reg", &len);
- if (!addr || (len < sizeof(int))) {
- dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
- node->full_name);
- continue;
- }
+ if (of_get_property(node, "wakeup-source", NULL))
+ info.flags |= I2C_CLIENT_WAKE;
- info.addr = be32_to_cpup(addr);
- if (info.addr > (1 << 10) - 1) {
- dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
- info.addr, node->full_name);
- continue;
- }
+ request_module("%s%s", I2C_MODULE_PREFIX, info.type);
- info.irq = irq_of_parse_and_map(node, 0);
- info.of_node = of_node_get(node);
- info.archdata = &dev_ad;
+ result = i2c_new_device(adap, &info);
+ if (result == NULL) {
+ dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
+ node->full_name);
+ of_node_put(node);
+ return ERR_PTR(-EINVAL);
+ }
+ return result;
+}
- if (of_get_property(node, "wakeup-source", NULL))
- info.flags |= I2C_CLIENT_WAKE;
+static void of_i2c_register_devices(struct i2c_adapter *adap)
+{
+ struct device_node *node;
- request_module("%s%s", I2C_MODULE_PREFIX, info.type);
+ /* Only register child devices if the adapter has a node pointer set */
+ if (!adap->dev.of_node)
+ return;
- result = i2c_new_device(adap, &info);
- if (result == NULL) {
- dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
- node->full_name);
- of_node_put(node);
- irq_dispose_mapping(info.irq);
- continue;
- }
- }
+ dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
+
+ for_each_available_child_of_node(adap->dev.of_node, node)
+ of_i2c_register_device(adap, node);
}
static int of_dev_node_match(struct device *dev, void *data)
@@ -1945,6 +1962,52 @@ void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg)
}
EXPORT_SYMBOL(i2c_clients_command);
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct i2c_adapter *adap;
+ struct i2c_client *client;
+
+ switch (of_reconfig_get_state_change(action, rd)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ adap = of_find_i2c_adapter_by_node(rd->dn->parent);
+ if (adap == NULL)
+ return NOTIFY_OK; /* not for us */
+
+ client = of_i2c_register_device(adap, rd->dn);
+ put_device(&adap->dev);
+
+ if (IS_ERR(client)) {
+ pr_err("%s: failed to create for '%s'\n",
+ __func__, rd->dn->full_name);
+ return notifier_from_errno(PTR_ERR(client));
+ }
+ break;
+ case OF_RECONFIG_CHANGE_REMOVE:
+ /* find our device by node */
+ client = of_find_i2c_device_by_node(rd->dn);
+ if (client == NULL)
+ return NOTIFY_OK; /* no? not meant for us */
+
+ /* unregister takes one ref away */
+ i2c_unregister_device(client);
+
+ /* and put the reference of the find */
+ put_device(&client->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+static struct notifier_block i2c_of_notifier = {
+ .notifier_call = of_i2c_notify,
+};
+#else
+extern struct notifier_block i2c_of_notifier;
+#endif /* CONFIG_OF_DYNAMIC */
+
static int __init i2c_init(void)
{
int retval;
@@ -1962,6 +2025,10 @@ static int __init i2c_init(void)
retval = i2c_add_driver(&dummy_driver);
if (retval)
goto class_err;
+
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_register(&i2c_of_notifier));
+
return 0;
class_err:
@@ -1975,6 +2042,8 @@ bus_err:
static void __exit i2c_exit(void)
{
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier));
i2c_del_driver(&dummy_driver);
#ifdef CONFIG_I2C_COMPAT
class_compat_unregister(i2c_adapter_compat_class);
@@ -2903,6 +2972,54 @@ trace:
}
EXPORT_SYMBOL(i2c_smbus_xfer);
+int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
+{
+ int ret;
+
+ if (!client || !slave_cb)
+ return -EINVAL;
+
+ if (!(client->flags & I2C_CLIENT_TEN)) {
+ /* Enforce stricter address checking */
+ ret = i2c_check_addr_validity(client->addr);
+ if (ret)
+ return ret;
+ }
+
+ if (!client->adapter->algo->reg_slave)
+ return -EOPNOTSUPP;
+
+ client->slave_cb = slave_cb;
+
+ i2c_lock_adapter(client->adapter);
+ ret = client->adapter->algo->reg_slave(client);
+ i2c_unlock_adapter(client->adapter);
+
+ if (ret)
+ client->slave_cb = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i2c_slave_register);
+
+int i2c_slave_unregister(struct i2c_client *client)
+{
+ int ret;
+
+ if (!client->adapter->algo->unreg_slave)
+ return -EOPNOTSUPP;
+
+ i2c_lock_adapter(client->adapter);
+ ret = client->adapter->algo->unreg_slave(client);
+ i2c_unlock_adapter(client->adapter);
+
+ if (ret == 0)
+ client->slave_cb = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i2c_slave_unregister);
+
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus main module");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 2d0847b6be62..593f7ca9adc7 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -110,6 +110,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
void *, u32))
{
struct i2c_mux_priv *priv;
+ char symlink_name[20];
int ret;
priv = kzalloc(sizeof(struct i2c_mux_priv), GFP_KERNEL);
@@ -183,6 +184,12 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
return NULL;
}
+ WARN(sysfs_create_link(&priv->adap.dev.kobj, &mux_dev->kobj, "mux_device"),
+ "can't create symlink to mux device\n");
+
+ snprintf(symlink_name, sizeof(symlink_name), "channel-%u", chan_id);
+ WARN(sysfs_create_link(&mux_dev->kobj, &priv->adap.dev.kobj, symlink_name),
+ "can't create symlink for channel %u\n", chan_id);
dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
i2c_adapter_id(&priv->adap));
@@ -193,7 +200,12 @@ EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
void i2c_del_mux_adapter(struct i2c_adapter *adap)
{
struct i2c_mux_priv *priv = adap->algo_data;
+ char symlink_name[20];
+
+ snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
+ sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
+ sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
i2c_del_adapter(adap);
kfree(priv);
}
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
new file mode 100644
index 000000000000..6631400b5f02
--- /dev/null
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -0,0 +1,170 @@
+/*
+ * I2C slave mode EEPROM simulator
+ *
+ * Copyright (C) 2014 by Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
+ * Copyright (C) 2014 by Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ *
+ * Because most IP blocks can only detect one I2C slave address anyhow, this
+ * driver does not support simulating EEPROM types which take more than one
+ * address. It is prepared to simulate bigger EEPROMs with an internal 16 bit
+ * pointer, yet implementation is deferred until the need actually arises.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+
+struct eeprom_data {
+ struct bin_attribute bin;
+ bool first_write;
+ spinlock_t buffer_lock;
+ u8 buffer_idx;
+ u8 buffer[];
+};
+
+static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct eeprom_data *eeprom = i2c_get_clientdata(client);
+
+ switch (event) {
+ case I2C_SLAVE_REQ_WRITE_END:
+ if (eeprom->first_write) {
+ eeprom->buffer_idx = *val;
+ eeprom->first_write = false;
+ } else {
+ spin_lock(&eeprom->buffer_lock);
+ eeprom->buffer[eeprom->buffer_idx++] = *val;
+ spin_unlock(&eeprom->buffer_lock);
+ }
+ break;
+
+ case I2C_SLAVE_REQ_READ_START:
+ spin_lock(&eeprom->buffer_lock);
+ *val = eeprom->buffer[eeprom->buffer_idx];
+ spin_unlock(&eeprom->buffer_lock);
+ break;
+
+ case I2C_SLAVE_REQ_READ_END:
+ eeprom->buffer_idx++;
+ break;
+
+ case I2C_SLAVE_STOP:
+ eeprom->first_write = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+ struct eeprom_data *eeprom;
+ unsigned long flags;
+
+ if (off + count >= attr->size)
+ return -EFBIG;
+
+ eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
+
+ spin_lock_irqsave(&eeprom->buffer_lock, flags);
+ memcpy(buf, &eeprom->buffer[off], count);
+ spin_unlock_irqrestore(&eeprom->buffer_lock, flags);
+
+ return count;
+}
+
+static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+ struct eeprom_data *eeprom;
+ unsigned long flags;
+
+ if (off + count >= attr->size)
+ return -EFBIG;
+
+ eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
+
+ spin_lock_irqsave(&eeprom->buffer_lock, flags);
+ memcpy(&eeprom->buffer[off], buf, count);
+ spin_unlock_irqrestore(&eeprom->buffer_lock, flags);
+
+ return count;
+}
+
+static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct eeprom_data *eeprom;
+ int ret;
+ unsigned size = id->driver_data;
+
+ eeprom = devm_kzalloc(&client->dev, sizeof(struct eeprom_data) + size, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ eeprom->first_write = true;
+ spin_lock_init(&eeprom->buffer_lock);
+ i2c_set_clientdata(client, eeprom);
+
+ sysfs_bin_attr_init(&eeprom->bin);
+ eeprom->bin.attr.name = "slave-eeprom";
+ eeprom->bin.attr.mode = S_IRUSR | S_IWUSR;
+ eeprom->bin.read = i2c_slave_eeprom_bin_read;
+ eeprom->bin.write = i2c_slave_eeprom_bin_write;
+ eeprom->bin.size = size;
+
+ ret = sysfs_create_bin_file(&client->dev.kobj, &eeprom->bin);
+ if (ret)
+ return ret;
+
+ ret = i2c_slave_register(client, i2c_slave_eeprom_slave_cb);
+ if (ret) {
+ sysfs_remove_bin_file(&client->dev.kobj, &eeprom->bin);
+ return ret;
+ }
+
+ return 0;
+};
+
+static int i2c_slave_eeprom_remove(struct i2c_client *client)
+{
+ struct eeprom_data *eeprom = i2c_get_clientdata(client);
+
+ i2c_slave_unregister(client);
+ sysfs_remove_bin_file(&client->dev.kobj, &eeprom->bin);
+
+ return 0;
+}
+
+static const struct i2c_device_id i2c_slave_eeprom_id[] = {
+ { "slave-24c02", 2048 / 8 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, i2c_slave_eeprom_id);
+
+static struct i2c_driver i2c_slave_eeprom_driver = {
+ .driver = {
+ .name = "i2c-slave-eeprom",
+ .owner = THIS_MODULE,
+ },
+ .probe = i2c_slave_eeprom_probe,
+ .remove = i2c_slave_eeprom_remove,
+ .id_table = i2c_slave_eeprom_id,
+};
+module_i2c_driver(i2c_slave_eeprom_driver);
+
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_DESCRIPTION("I2C slave mode EEPROM simulator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 69afffa8f427..5cf1b60b69e2 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -235,7 +235,6 @@ static struct platform_driver i2c_arbitrator_driver = {
.probe = i2c_arbitrator_probe,
.remove = i2c_arbitrator_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "i2c-arb-gpio-challenge",
.of_match_table = i2c_arbitrator_of_match,
},
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index d8989c823f50..f5798eb4076b 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -280,7 +280,6 @@ static struct platform_driver i2c_mux_gpio_driver = {
.probe = i2c_mux_gpio_probe,
.remove = i2c_mux_gpio_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "i2c-mux-gpio",
.of_match_table = i2c_mux_gpio_of_match,
},
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 4ff0ef3e07a6..b48378c4b40d 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -264,7 +264,6 @@ MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
static struct platform_driver i2c_mux_pinctrl_driver = {
.driver = {
.name = "i2c-mux-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
},
.probe = i2c_mux_pinctrl_probe,
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 07ea58084068..4d181a918d72 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -586,7 +586,6 @@ static int au_ide_remove(struct platform_device *dev)
static struct platform_driver au1200_ide_driver = {
.driver = {
.name = "au1200-ide",
- .owner = THIS_MODULE,
},
.probe = au_ide_probe,
.remove = au_ide_remove,
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index 97a2f9dc75d8..901e6ebfeb96 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -179,7 +179,6 @@ static struct platform_driver amiga_gayle_ide_driver = {
.remove = __exit_p(amiga_gayle_ide_remove),
.driver = {
.name = "amiga-gayle-ide",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 3d42043fec51..8c6363cdd208 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -487,7 +487,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
* it. It should be fixed as of version 1.9, but to be on the safe side
* we'll leave the limitation below for the 2.2.x tree.
*/
- if (!strncmp((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI", 20)) {
+ if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI")) {
drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
/* This value will be visible in the /proc/ide/hdx/settings */
drive->pc_delay = IDEFLOPPY_PC_DELAY;
@@ -498,7 +498,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
* Guess what? The IOMEGA Clik! drive also needs the above fix. It makes
* nasty clicking noises without it, so please don't remove this.
*/
- if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) {
+ if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA Clik!")) {
blk_queue_max_hw_sectors(drive->queue, 64);
drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
/* IOMEGA Clik! drives do not support lock/unlock commands */
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 2ce6268a2734..e29b02ca9e91 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -101,8 +101,7 @@ void ide_device_put(ide_drive_t *drive)
struct device *host_dev = drive->hwif->host->dev[0];
struct module *module = host_dev ? host_dev->driver->owner : NULL;
- if (module)
- module_put(module);
+ module_put(module);
#endif
put_device(&drive->gendev);
}
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index d48de6de503e..2b43be3bca0a 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -125,7 +125,6 @@ static int plat_ide_remove(struct platform_device *pdev)
static struct platform_driver platform_ide_driver = {
.driver = {
.name = "pata_platform",
- .owner = THIS_MODULE,
},
.probe = plat_ide_probe,
.remove = plat_ide_remove,
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index ba20d18c0373..8012e43bf8f6 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -386,7 +386,6 @@ MODULE_ALIAS("platform:palm_bk3710");
static struct platform_driver platform_bk_driver = {
.driver = {
.name = "palm_bk3710",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index 68edd4f58a28..40a3f55b08dd 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -198,7 +198,6 @@ static int __exit tx4938ide_remove(struct platform_device *pdev)
static struct platform_driver tx4938ide_driver = {
.driver = {
.name = "tx4938ide",
- .owner = THIS_MODULE,
},
.remove = __exit_p(tx4938ide_remove),
};
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 4ecdee5eca83..67d4a7d4acc8 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -618,7 +618,6 @@ static int tx4939ide_resume(struct platform_device *dev)
static struct platform_driver tx4939ide_driver = {
.driver = {
.name = MODNAME,
- .owner = THIS_MODULE,
},
.remove = __exit_p(tx4939ide_remove),
.resume = tx4939ide_resume,
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index c3877630b2e4..fa9646034305 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -33,8 +33,7 @@ static const struct st_sensors_platform_data default_accel_pdata = {
.drdy_int_pin = 1,
};
-int st_accel_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata);
+int st_accel_common_probe(struct iio_dev *indio_dev);
void st_accel_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 087864854c61..53f32629283a 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -161,7 +161,7 @@ static const struct iio_chan_spec st_accel_16bit_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3)
};
-static const struct st_sensors st_accel_sensors[] = {
+static const struct st_sensor_settings st_accel_sensors_settings[] = {
{
.wai = ST_ACCEL_1_WAI_EXP,
.sensors_supported = {
@@ -457,8 +457,7 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
#define ST_ACCEL_TRIGGER_OPS NULL
#endif
-int st_accel_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *plat_data)
+int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
int irq = adata->get_irq_data_ready(indio_dev);
@@ -470,24 +469,25 @@ int st_accel_common_probe(struct iio_dev *indio_dev,
st_sensors_power_enable(indio_dev);
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_accel_sensors), st_accel_sensors);
+ ARRAY_SIZE(st_accel_sensors_settings),
+ st_accel_sensors_settings);
if (err < 0)
return err;
adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
- adata->multiread_bit = adata->sensor->multi_read_bit;
- indio_dev->channels = adata->sensor->ch;
+ adata->multiread_bit = adata->sensor_settings->multi_read_bit;
+ indio_dev->channels = adata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
adata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &adata->sensor->fs.fs_avl[0];
- adata->odr = adata->sensor->odr.odr_avl[0].hz;
+ &adata->sensor_settings->fs.fs_avl[0];
+ adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
- if (!plat_data)
- plat_data =
+ if (!adata->dev->platform_data)
+ adata->dev->platform_data =
(struct st_sensors_platform_data *)&default_accel_pdata;
- err = st_sensors_init_sensor(indio_dev, plat_data);
+ err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
if (err < 0)
return err;
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 7164aeff3ab1..c7246bdd30b9 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -79,12 +79,11 @@ static int st_accel_i2c_probe(struct i2c_client *client,
return -ENOMEM;
adata = iio_priv(indio_dev);
- adata->dev = &client->dev;
st_sensors_of_i2c_probe(client, st_accel_of_match);
st_sensors_i2c_configure(indio_dev, client, adata);
- err = st_accel_common_probe(indio_dev, client->dev.platform_data);
+ err = st_accel_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 195639646e34..12ec29389e4b 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -29,11 +29,10 @@ static int st_accel_spi_probe(struct spi_device *spi)
return -ENOMEM;
adata = iio_priv(indio_dev);
- adata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, adata);
- err = st_accel_common_probe(indio_dev, spi->dev.platform_data);
+ err = st_accel_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index bc4e787096e8..0f79e4725763 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -214,6 +214,20 @@ config NAU7802
To compile this driver as a module, choose M here: the
module will be called nau7802.
+config QCOM_SPMI_IADC
+ tristate "Qualcomm SPMI PMIC current ADC"
+ depends on SPMI
+ select REGMAP_SPMI
+ help
+ This is the IIO Current ADC driver for Qualcomm QPNP IADC Chip.
+
+ The driver supports single mode operation to read from one of two
+ channels (external or internal). Hardware have additional
+ channels internally used for gain and offset calibration.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-spmi-iadc.
+
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index f30093f5b67a..701fdb7c96aa 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
+obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 43620fd4c66a..3a2dbb3b4926 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -39,6 +39,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/machine.h>
#include <linux/iio/driver.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
/* S3C/EXYNOS4412/5250 ADC_V1 registers definitions */
#define ADC_V1_CON(x) ((x) + 0x00)
@@ -90,11 +92,14 @@
#define EXYNOS_ADC_TIMEOUT (msecs_to_jiffies(100))
+#define EXYNOS_ADCV1_PHY_OFFSET 0x0718
+#define EXYNOS_ADCV2_PHY_OFFSET 0x0720
+
struct exynos_adc {
struct exynos_adc_data *data;
struct device *dev;
void __iomem *regs;
- void __iomem *enable_reg;
+ struct regmap *pmu_map;
struct clk *clk;
struct clk *sclk;
unsigned int irq;
@@ -110,6 +115,7 @@ struct exynos_adc_data {
int num_channels;
bool needs_sclk;
bool needs_adc_phy;
+ int phy_offset;
u32 mask;
void (*init_hw)(struct exynos_adc *info);
@@ -183,7 +189,7 @@ static void exynos_adc_v1_init_hw(struct exynos_adc *info)
u32 con1;
if (info->data->needs_adc_phy)
- writel(1, info->enable_reg);
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
/* set default prescaler values and Enable prescaler */
con1 = ADC_V1_CON_PRSCLV(49) | ADC_V1_CON_PRSCEN;
@@ -198,7 +204,7 @@ static void exynos_adc_v1_exit_hw(struct exynos_adc *info)
u32 con;
if (info->data->needs_adc_phy)
- writel(0, info->enable_reg);
+ regmap_write(info->pmu_map, info->data->phy_offset, 0);
con = readl(ADC_V1_CON(info->regs));
con |= ADC_V1_CON_STANDBY;
@@ -225,6 +231,7 @@ static const struct exynos_adc_data exynos_adc_v1_data = {
.num_channels = MAX_ADC_V1_CHANNELS,
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
.needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
.init_hw = exynos_adc_v1_init_hw,
.exit_hw = exynos_adc_v1_exit_hw,
@@ -314,7 +321,7 @@ static void exynos_adc_v2_init_hw(struct exynos_adc *info)
u32 con1, con2;
if (info->data->needs_adc_phy)
- writel(1, info->enable_reg);
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
con1 = ADC_V2_CON1_SOFT_RESET;
writel(con1, ADC_V2_CON1(info->regs));
@@ -332,7 +339,7 @@ static void exynos_adc_v2_exit_hw(struct exynos_adc *info)
u32 con;
if (info->data->needs_adc_phy)
- writel(0, info->enable_reg);
+ regmap_write(info->pmu_map, info->data->phy_offset, 0);
con = readl(ADC_V2_CON1(info->regs));
con &= ~ADC_CON_EN_START;
@@ -362,6 +369,7 @@ static const struct exynos_adc_data exynos_adc_v2_data = {
.num_channels = MAX_ADC_V2_CHANNELS,
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
.needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV2_PHY_OFFSET,
.init_hw = exynos_adc_v2_init_hw,
.exit_hw = exynos_adc_v2_exit_hw,
@@ -374,6 +382,7 @@ static const struct exynos_adc_data exynos3250_adc_data = {
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
.needs_sclk = true,
.needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
.init_hw = exynos_adc_v2_init_hw,
.exit_hw = exynos_adc_v2_exit_hw,
@@ -381,6 +390,35 @@ static const struct exynos_adc_data exynos3250_adc_data = {
.start_conv = exynos_adc_v2_start_conv,
};
+static void exynos_adc_exynos7_init_hw(struct exynos_adc *info)
+{
+ u32 con1, con2;
+
+ if (info->data->needs_adc_phy)
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
+
+ con1 = ADC_V2_CON1_SOFT_RESET;
+ writel(con1, ADC_V2_CON1(info->regs));
+
+ con2 = readl(ADC_V2_CON2(info->regs));
+ con2 &= ~ADC_V2_CON2_C_TIME(7);
+ con2 |= ADC_V2_CON2_C_TIME(0);
+ writel(con2, ADC_V2_CON2(info->regs));
+
+ /* Enable interrupts */
+ writel(1, ADC_V2_INT_EN(info->regs));
+}
+
+static const struct exynos_adc_data exynos7_adc_data = {
+ .num_channels = MAX_ADC_V1_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+
+ .init_hw = exynos_adc_exynos7_init_hw,
+ .exit_hw = exynos_adc_v2_exit_hw,
+ .clear_irq = exynos_adc_v2_clear_irq,
+ .start_conv = exynos_adc_v2_start_conv,
+};
+
static const struct of_device_id exynos_adc_match[] = {
{
.compatible = "samsung,s3c2410-adc",
@@ -406,6 +444,9 @@ static const struct of_device_id exynos_adc_match[] = {
}, {
.compatible = "samsung,exynos3250-adc",
.data = &exynos3250_adc_data,
+ }, {
+ .compatible = "samsung,exynos7-adc",
+ .data = &exynos7_adc_data,
},
{},
};
@@ -558,10 +599,13 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->needs_adc_phy) {
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- info->enable_reg = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(info->enable_reg))
- return PTR_ERR(info->enable_reg);
+ info->pmu_map = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(info->pmu_map)) {
+ dev_err(&pdev->dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(info->pmu_map);
+ }
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 28a086e48776..efbfd12a4bfd 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -1,9 +1,30 @@
/*
* Copyright (C) 2013 Oskar Andero <oskar.andero@gmail.com>
+ * Copyright (C) 2014 Rose Technology
+ * Allan Bendorff Jensen <abj@rosetechnology.dk>
+ * Soren Andersen <san@rosetechnology.dk>
+ *
+ * Driver for following ADC chips from Microchip Technology's:
+ * 10 Bit converter
+ * MCP3001
+ * MCP3002
+ * MCP3004
+ * MCP3008
+ * ------------
+ * 12 bit converter
+ * MCP3201
+ * MCP3202
+ * MCP3204
+ * MCP3208
+ * ------------
*
- * Driver for Microchip Technology's MCP3204 and MCP3208 ADC chips.
* Datasheet can be found here:
- * http://ww1.microchip.com/downloads/en/devicedoc/21298c.pdf
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21294E.pdf mcp3002
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21295d.pdf mcp3004/08
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21290D.pdf mcp3201
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21034D.pdf mcp3202
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21298c.pdf mcp3204/08
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,19 +32,29 @@
*/
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
-#define MCP_SINGLE_ENDED (1 << 3)
-#define MCP_START_BIT (1 << 4)
-
enum {
+ mcp3001,
+ mcp3002,
+ mcp3004,
+ mcp3008,
+ mcp3201,
+ mcp3202,
mcp3204,
mcp3208,
};
+struct mcp320x_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ unsigned int resolution;
+};
+
struct mcp320x {
struct spi_device *spi;
struct spi_message msg;
@@ -34,19 +65,69 @@ struct mcp320x {
struct regulator *reg;
struct mutex lock;
+ const struct mcp320x_chip_info *chip_info;
};
-static int mcp320x_adc_conversion(struct mcp320x *adc, u8 msg)
+static int mcp320x_channel_to_tx_data(int device_index,
+ const unsigned int channel, bool differential)
+{
+ int start_bit = 1;
+
+ switch (device_index) {
+ case mcp3001:
+ case mcp3201:
+ return 0;
+ case mcp3002:
+ case mcp3202:
+ return ((start_bit << 4) | (!differential << 3) |
+ (channel << 2));
+ case mcp3004:
+ case mcp3204:
+ case mcp3008:
+ case mcp3208:
+ return ((start_bit << 6) | (!differential << 5) |
+ (channel << 2));
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
+ bool differential, int device_index)
{
int ret;
- adc->tx_buf = msg;
- ret = spi_sync(adc->spi, &adc->msg);
- if (ret < 0)
- return ret;
+ adc->rx_buf[0] = 0;
+ adc->rx_buf[1] = 0;
+ adc->tx_buf = mcp320x_channel_to_tx_data(device_index,
+ channel, differential);
+
+ if (device_index != mcp3001 && device_index != mcp3201) {
+ ret = spi_sync(adc->spi, &adc->msg);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = spi_read(adc->spi, &adc->rx_buf, sizeof(adc->rx_buf));
+ if (ret < 0)
+ return ret;
+ }
- return ((adc->rx_buf[0] & 0x3f) << 6) |
- (adc->rx_buf[1] >> 2);
+ switch (device_index) {
+ case mcp3001:
+ return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ case mcp3002:
+ case mcp3004:
+ case mcp3008:
+ return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ case mcp3201:
+ return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ case mcp3202:
+ case mcp3204:
+ case mcp3208:
+ return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ default:
+ return -EINVAL;
+ }
}
static int mcp320x_read_raw(struct iio_dev *indio_dev,
@@ -55,18 +136,17 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
{
struct mcp320x *adc = iio_priv(indio_dev);
int ret = -EINVAL;
+ int device_index = 0;
mutex_lock(&adc->lock);
+ device_index = spi_get_device_id(adc->spi)->driver_data;
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (channel->differential)
- ret = mcp320x_adc_conversion(adc,
- MCP_START_BIT | channel->address);
- else
- ret = mcp320x_adc_conversion(adc,
- MCP_START_BIT | MCP_SINGLE_ENDED |
- channel->address);
+ ret = mcp320x_adc_conversion(adc, channel->address,
+ channel->differential, device_index);
+
if (ret < 0)
goto out;
@@ -75,18 +155,15 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_SCALE:
- /* Digital output code = (4096 * Vin) / Vref */
ret = regulator_get_voltage(adc->reg);
if (ret < 0)
goto out;
+ /* convert regulator output voltage to mV */
*val = ret / 1000;
- *val2 = 12;
+ *val2 = adc->chip_info->resolution;
ret = IIO_VAL_FRACTIONAL_LOG2;
break;
-
- default:
- break;
}
out:
@@ -117,6 +194,16 @@ out:
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
+static const struct iio_chan_spec mcp3201_channels[] = {
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+};
+
+static const struct iio_chan_spec mcp3202_channels[] = {
+ MCP320X_VOLTAGE_CHANNEL(0),
+ MCP320X_VOLTAGE_CHANNEL(1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+};
+
static const struct iio_chan_spec mcp3204_channels[] = {
MCP320X_VOLTAGE_CHANNEL(0),
MCP320X_VOLTAGE_CHANNEL(1),
@@ -146,19 +233,46 @@ static const struct iio_info mcp320x_info = {
.driver_module = THIS_MODULE,
};
-struct mcp3208_chip_info {
- const struct iio_chan_spec *channels;
- unsigned int num_channels;
-};
-
-static const struct mcp3208_chip_info mcp3208_chip_infos[] = {
+static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
+ [mcp3001] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 10
+ },
+ [mcp3002] = {
+ .channels = mcp3202_channels,
+ .num_channels = ARRAY_SIZE(mcp3202_channels),
+ .resolution = 10
+ },
+ [mcp3004] = {
+ .channels = mcp3204_channels,
+ .num_channels = ARRAY_SIZE(mcp3204_channels),
+ .resolution = 10
+ },
+ [mcp3008] = {
+ .channels = mcp3208_channels,
+ .num_channels = ARRAY_SIZE(mcp3208_channels),
+ .resolution = 10
+ },
+ [mcp3201] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 12
+ },
+ [mcp3202] = {
+ .channels = mcp3202_channels,
+ .num_channels = ARRAY_SIZE(mcp3202_channels),
+ .resolution = 12
+ },
[mcp3204] = {
.channels = mcp3204_channels,
- .num_channels = ARRAY_SIZE(mcp3204_channels)
+ .num_channels = ARRAY_SIZE(mcp3204_channels),
+ .resolution = 12
},
[mcp3208] = {
.channels = mcp3208_channels,
- .num_channels = ARRAY_SIZE(mcp3208_channels)
+ .num_channels = ARRAY_SIZE(mcp3208_channels),
+ .resolution = 12
},
};
@@ -166,7 +280,7 @@ static int mcp320x_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct mcp320x *adc;
- const struct mcp3208_chip_info *chip_info;
+ const struct mcp320x_chip_info *chip_info;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
@@ -181,7 +295,7 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
- chip_info = &mcp3208_chip_infos[spi_get_device_id(spi)->driver_data];
+ chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
@@ -226,7 +340,45 @@ static int mcp320x_remove(struct spi_device *spi)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id mcp320x_dt_ids[] = {
+ {
+ .compatible = "mcp3001",
+ .data = &mcp320x_chip_infos[mcp3001],
+ }, {
+ .compatible = "mcp3002",
+ .data = &mcp320x_chip_infos[mcp3002],
+ }, {
+ .compatible = "mcp3004",
+ .data = &mcp320x_chip_infos[mcp3004],
+ }, {
+ .compatible = "mcp3008",
+ .data = &mcp320x_chip_infos[mcp3008],
+ }, {
+ .compatible = "mcp3201",
+ .data = &mcp320x_chip_infos[mcp3201],
+ }, {
+ .compatible = "mcp3202",
+ .data = &mcp320x_chip_infos[mcp3202],
+ }, {
+ .compatible = "mcp3204",
+ .data = &mcp320x_chip_infos[mcp3204],
+ }, {
+ .compatible = "mcp3208",
+ .data = &mcp320x_chip_infos[mcp3208],
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(of, mcp320x_dt_ids);
+#endif
+
static const struct spi_device_id mcp320x_id[] = {
+ { "mcp3001", mcp3001 },
+ { "mcp3002", mcp3002 },
+ { "mcp3004", mcp3004 },
+ { "mcp3008", mcp3008 },
+ { "mcp3201", mcp3201 },
+ { "mcp3202", mcp3202 },
{ "mcp3204", mcp3204 },
{ "mcp3208", mcp3208 },
{ }
@@ -245,5 +397,5 @@ static struct spi_driver mcp320x_driver = {
module_spi_driver(mcp320x_driver);
MODULE_AUTHOR("Oskar Andero <oskar.andero@gmail.com>");
-MODULE_DESCRIPTION("Microchip Technology MCP3204/08");
+MODULE_DESCRIPTION("Microchip Technology MCP3x01/02/04/08");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
new file mode 100644
index 000000000000..b9666f2f5e51
--- /dev/null
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* IADC register and bit definition */
+#define IADC_REVISION2 0x1
+#define IADC_REVISION2_SUPPORTED_IADC 1
+
+#define IADC_PERPH_TYPE 0x4
+#define IADC_PERPH_TYPE_ADC 8
+
+#define IADC_PERPH_SUBTYPE 0x5
+#define IADC_PERPH_SUBTYPE_IADC 3
+
+#define IADC_STATUS1 0x8
+#define IADC_STATUS1_OP_MODE 4
+#define IADC_STATUS1_REQ_STS BIT(1)
+#define IADC_STATUS1_EOC BIT(0)
+#define IADC_STATUS1_REQ_STS_EOC_MASK 0x3
+
+#define IADC_MODE_CTL 0x40
+#define IADC_OP_MODE_SHIFT 3
+#define IADC_OP_MODE_NORMAL 0
+#define IADC_TRIM_EN BIT(0)
+
+#define IADC_EN_CTL1 0x46
+#define IADC_EN_CTL1_SET BIT(7)
+
+#define IADC_CH_SEL_CTL 0x48
+
+#define IADC_DIG_PARAM 0x50
+#define IADC_DIG_DEC_RATIO_SEL_SHIFT 2
+
+#define IADC_HW_SETTLE_DELAY 0x51
+
+#define IADC_CONV_REQ 0x52
+#define IADC_CONV_REQ_SET BIT(7)
+
+#define IADC_FAST_AVG_CTL 0x5a
+#define IADC_FAST_AVG_EN 0x5b
+#define IADC_FAST_AVG_EN_SET BIT(7)
+
+#define IADC_PERH_RESET_CTL3 0xda
+#define IADC_FOLLOW_WARM_RB BIT(2)
+
+#define IADC_DATA 0x60 /* 16 bits */
+
+#define IADC_SEC_ACCESS 0xd0
+#define IADC_SEC_ACCESS_DATA 0xa5
+
+#define IADC_NOMINAL_RSENSE 0xf4
+#define IADC_NOMINAL_RSENSE_SIGN_MASK BIT(7)
+
+#define IADC_REF_GAIN_MICRO_VOLTS 17857
+
+#define IADC_INT_RSENSE_DEVIATION 15625 /* nano Ohms per bit */
+
+#define IADC_INT_RSENSE_IDEAL_VALUE 10000 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_VALUE 7800 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_GF 9000 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_SMIC 9700 /* micro Ohms */
+
+#define IADC_CONV_TIME_MIN_US 2000
+#define IADC_CONV_TIME_MAX_US 2100
+
+#define IADC_DEF_PRESCALING 0 /* 1:1 */
+#define IADC_DEF_DECIMATION 0 /* 512 */
+#define IADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define IADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+
+/* IADC channel list */
+#define IADC_INT_RSENSE 0
+#define IADC_EXT_RSENSE 1
+#define IADC_GAIN_17P857MV 3
+#define IADC_EXT_OFFSET_CSP_CSN 5
+#define IADC_INT_OFFSET_CSP2_CSN2 6
+
+/**
+ * struct iadc_chip - IADC Current ADC device structure.
+ * @regmap: regmap for register read/write.
+ * @dev: This device pointer.
+ * @base: base offset for the ADC peripheral.
+ * @rsense: Values of the internal and external sense resister in micro Ohms.
+ * @poll_eoc: Poll for end of conversion instead of waiting for IRQ.
+ * @offset: Raw offset values for the internal and external channels.
+ * @gain: Raw gain of the channels.
+ * @lock: ADC lock for access to the peripheral.
+ * @complete: ADC notification after end of conversion interrupt is received.
+ */
+struct iadc_chip {
+ struct regmap *regmap;
+ struct device *dev;
+ u16 base;
+ bool poll_eoc;
+ u32 rsense[2];
+ u16 offset[2];
+ u16 gain;
+ struct mutex lock;
+ struct completion complete;
+};
+
+static int iadc_read(struct iadc_chip *iadc, u16 offset, u8 *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(iadc->regmap, iadc->base + offset, &val);
+ if (ret < 0)
+ return ret;
+
+ *data = val;
+ return 0;
+}
+
+static int iadc_write(struct iadc_chip *iadc, u16 offset, u8 data)
+{
+ return regmap_write(iadc->regmap, iadc->base + offset, data);
+}
+
+static int iadc_reset(struct iadc_chip *iadc)
+{
+ u8 data;
+ int ret;
+
+ ret = iadc_write(iadc, IADC_SEC_ACCESS, IADC_SEC_ACCESS_DATA);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_read(iadc, IADC_PERH_RESET_CTL3, &data);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_write(iadc, IADC_SEC_ACCESS, IADC_SEC_ACCESS_DATA);
+ if (ret < 0)
+ return ret;
+
+ data |= IADC_FOLLOW_WARM_RB;
+
+ return iadc_write(iadc, IADC_PERH_RESET_CTL3, data);
+}
+
+static int iadc_set_state(struct iadc_chip *iadc, bool state)
+{
+ return iadc_write(iadc, IADC_EN_CTL1, state ? IADC_EN_CTL1_SET : 0);
+}
+
+static void iadc_status_show(struct iadc_chip *iadc)
+{
+ u8 mode, sta1, chan, dig, en, req;
+ int ret;
+
+ ret = iadc_read(iadc, IADC_MODE_CTL, &mode);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_DIG_PARAM, &dig);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_CH_SEL_CTL, &chan);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_CONV_REQ, &req);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_STATUS1, &sta1);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_EN_CTL1, &en);
+ if (ret < 0)
+ return;
+
+ dev_err(iadc->dev,
+ "mode:%02x en:%02x chan:%02x dig:%02x req:%02x sta1:%02x\n",
+ mode, en, chan, dig, req, sta1);
+}
+
+static int iadc_configure(struct iadc_chip *iadc, int channel)
+{
+ u8 decim, mode;
+ int ret;
+
+ /* Mode selection */
+ mode = (IADC_OP_MODE_NORMAL << IADC_OP_MODE_SHIFT) | IADC_TRIM_EN;
+ ret = iadc_write(iadc, IADC_MODE_CTL, mode);
+ if (ret < 0)
+ return ret;
+
+ /* Channel selection */
+ ret = iadc_write(iadc, IADC_CH_SEL_CTL, channel);
+ if (ret < 0)
+ return ret;
+
+ /* Digital parameter setup */
+ decim = IADC_DEF_DECIMATION << IADC_DIG_DEC_RATIO_SEL_SHIFT;
+ ret = iadc_write(iadc, IADC_DIG_PARAM, decim);
+ if (ret < 0)
+ return ret;
+
+ /* HW settle time delay */
+ ret = iadc_write(iadc, IADC_HW_SETTLE_DELAY, IADC_DEF_HW_SETTLE_TIME);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_write(iadc, IADC_FAST_AVG_CTL, IADC_DEF_AVG_SAMPLES);
+ if (ret < 0)
+ return ret;
+
+ if (IADC_DEF_AVG_SAMPLES)
+ ret = iadc_write(iadc, IADC_FAST_AVG_EN, IADC_FAST_AVG_EN_SET);
+ else
+ ret = iadc_write(iadc, IADC_FAST_AVG_EN, 0);
+
+ if (ret < 0)
+ return ret;
+
+ if (!iadc->poll_eoc)
+ reinit_completion(&iadc->complete);
+
+ ret = iadc_set_state(iadc, true);
+ if (ret < 0)
+ return ret;
+
+ /* Request conversion */
+ return iadc_write(iadc, IADC_CONV_REQ, IADC_CONV_REQ_SET);
+}
+
+static int iadc_poll_wait_eoc(struct iadc_chip *iadc, unsigned int interval_us)
+{
+ unsigned int count, retry;
+ int ret;
+ u8 sta1;
+
+ retry = interval_us / IADC_CONV_TIME_MIN_US;
+
+ for (count = 0; count < retry; count++) {
+ ret = iadc_read(iadc, IADC_STATUS1, &sta1);
+ if (ret < 0)
+ return ret;
+
+ sta1 &= IADC_STATUS1_REQ_STS_EOC_MASK;
+ if (sta1 == IADC_STATUS1_EOC)
+ return 0;
+
+ usleep_range(IADC_CONV_TIME_MIN_US, IADC_CONV_TIME_MAX_US);
+ }
+
+ iadc_status_show(iadc);
+
+ return -ETIMEDOUT;
+}
+
+static int iadc_read_result(struct iadc_chip *iadc, u16 *data)
+{
+ return regmap_bulk_read(iadc->regmap, iadc->base + IADC_DATA, data, 2);
+}
+
+static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data)
+{
+ unsigned int wait;
+ int ret;
+
+ ret = iadc_configure(iadc, chan);
+ if (ret < 0)
+ goto exit;
+
+ wait = BIT(IADC_DEF_AVG_SAMPLES) * IADC_CONV_TIME_MIN_US * 2;
+
+ if (iadc->poll_eoc) {
+ ret = iadc_poll_wait_eoc(iadc, wait);
+ } else {
+ ret = wait_for_completion_timeout(&iadc->complete, wait);
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ /* double check conversion status */
+ ret = iadc_poll_wait_eoc(iadc, IADC_CONV_TIME_MIN_US);
+ }
+
+ if (!ret)
+ ret = iadc_read_result(iadc, data);
+exit:
+ iadc_set_state(iadc, false);
+ if (ret < 0)
+ dev_err(iadc->dev, "conversion failed\n");
+
+ return ret;
+}
+
+static int iadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iadc_chip *iadc = iio_priv(indio_dev);
+ s32 isense_ua, vsense_uv;
+ u16 adc_raw, vsense_raw;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&iadc->lock);
+ ret = iadc_do_conversion(iadc, chan->channel, &adc_raw);
+ mutex_unlock(&iadc->lock);
+ if (ret < 0)
+ return ret;
+
+ vsense_raw = adc_raw - iadc->offset[chan->channel];
+
+ vsense_uv = vsense_raw * IADC_REF_GAIN_MICRO_VOLTS;
+ vsense_uv /= (s32)iadc->gain - iadc->offset[chan->channel];
+
+ isense_ua = vsense_uv / iadc->rsense[chan->channel];
+
+ dev_dbg(iadc->dev, "off %d gain %d adc %d %duV I %duA\n",
+ iadc->offset[chan->channel], iadc->gain,
+ adc_raw, vsense_uv, isense_ua);
+
+ *val = isense_ua;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info iadc_info = {
+ .read_raw = iadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t iadc_isr(int irq, void *dev_id)
+{
+ struct iadc_chip *iadc = dev_id;
+
+ complete(&iadc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int iadc_update_offset(struct iadc_chip *iadc)
+{
+ int ret;
+
+ ret = iadc_do_conversion(iadc, IADC_GAIN_17P857MV, &iadc->gain);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_do_conversion(iadc, IADC_INT_OFFSET_CSP2_CSN2,
+ &iadc->offset[IADC_INT_RSENSE]);
+ if (ret < 0)
+ return ret;
+
+ if (iadc->gain == iadc->offset[IADC_INT_RSENSE]) {
+ dev_err(iadc->dev, "error: internal offset == gain %d\n",
+ iadc->gain);
+ return -EINVAL;
+ }
+
+ ret = iadc_do_conversion(iadc, IADC_EXT_OFFSET_CSP_CSN,
+ &iadc->offset[IADC_EXT_RSENSE]);
+ if (ret < 0)
+ return ret;
+
+ if (iadc->gain == iadc->offset[IADC_EXT_RSENSE]) {
+ dev_err(iadc->dev, "error: external offset == gain %d\n",
+ iadc->gain);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iadc_version_check(struct iadc_chip *iadc)
+{
+ u8 val;
+ int ret;
+
+ ret = iadc_read(iadc, IADC_PERPH_TYPE, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_PERPH_TYPE_ADC) {
+ dev_err(iadc->dev, "%d is not ADC\n", val);
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_PERPH_SUBTYPE, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_PERPH_SUBTYPE_IADC) {
+ dev_err(iadc->dev, "%d is not IADC\n", val);
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_REVISION2, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_REVISION2_SUPPORTED_IADC) {
+ dev_err(iadc->dev, "revision %d not supported\n", val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iadc_rsense_read(struct iadc_chip *iadc, struct device_node *node)
+{
+ int ret, sign, int_sense;
+ u8 deviation;
+
+ ret = of_property_read_u32(node, "qcom,external-resistor-micro-ohms",
+ &iadc->rsense[IADC_EXT_RSENSE]);
+ if (ret < 0)
+ iadc->rsense[IADC_EXT_RSENSE] = IADC_INT_RSENSE_IDEAL_VALUE;
+
+ if (!iadc->rsense[IADC_EXT_RSENSE]) {
+ dev_err(iadc->dev, "external resistor can't be zero Ohms");
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_NOMINAL_RSENSE, &deviation);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Deviation value stored is an offset from 10 mili Ohms, bit 7 is
+ * the sign, the remaining bits have an LSB of 15625 nano Ohms.
+ */
+ sign = (deviation & IADC_NOMINAL_RSENSE_SIGN_MASK) ? -1 : 1;
+
+ deviation &= ~IADC_NOMINAL_RSENSE_SIGN_MASK;
+
+ /* Scale it to nono Ohms */
+ int_sense = IADC_INT_RSENSE_IDEAL_VALUE * 1000;
+ int_sense += sign * deviation * IADC_INT_RSENSE_DEVIATION;
+ int_sense /= 1000; /* micro Ohms */
+
+ iadc->rsense[IADC_INT_RSENSE] = int_sense;
+ return 0;
+}
+
+static const struct iio_chan_spec iadc_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .datasheet_name = "INTERNAL_RSENSE",
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ },
+ {
+ .type = IIO_CURRENT,
+ .datasheet_name = "EXTERNAL_RSENSE",
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ },
+};
+
+static int iadc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct iadc_chip *iadc;
+ int ret, irq_eoc;
+ u32 res;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*iadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iadc = iio_priv(indio_dev);
+ iadc->dev = dev;
+
+ iadc->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!iadc->regmap)
+ return -ENODEV;
+
+ init_completion(&iadc->complete);
+ mutex_init(&iadc->lock);
+
+ ret = of_property_read_u32(node, "reg", &res);
+ if (ret < 0)
+ return -ENODEV;
+
+ iadc->base = res;
+
+ ret = iadc_version_check(iadc);
+ if (ret < 0)
+ return -ENODEV;
+
+ ret = iadc_rsense_read(iadc, node);
+ if (ret < 0)
+ return -ENODEV;
+
+ dev_dbg(iadc->dev, "sense resistors %d and %d micro Ohm\n",
+ iadc->rsense[IADC_INT_RSENSE],
+ iadc->rsense[IADC_EXT_RSENSE]);
+
+ irq_eoc = platform_get_irq(pdev, 0);
+ if (irq_eoc == -EPROBE_DEFER)
+ return irq_eoc;
+
+ if (irq_eoc < 0)
+ iadc->poll_eoc = true;
+
+ ret = iadc_reset(iadc);
+ if (ret < 0) {
+ dev_err(dev, "reset failed\n");
+ return ret;
+ }
+
+ if (!iadc->poll_eoc) {
+ ret = devm_request_irq(dev, irq_eoc, iadc_isr, 0,
+ "spmi-iadc", iadc);
+ if (!ret)
+ enable_irq_wake(irq_eoc);
+ else
+ return ret;
+ } else {
+ device_init_wakeup(iadc->dev, 1);
+ }
+
+ ret = iadc_update_offset(iadc);
+ if (ret < 0) {
+ dev_err(dev, "failed offset calibration\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = node;
+ indio_dev->name = pdev->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &iadc_info;
+ indio_dev->channels = iadc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iadc_channels);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id iadc_match_table[] = {
+ { .compatible = "qcom,spmi-iadc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, iadc_match_table);
+
+static struct platform_driver iadc_driver = {
+ .driver = {
+ .name = "qcom-spmi-iadc",
+ .of_match_table = iadc_match_table,
+ },
+ .probe = iadc_probe,
+};
+
+module_platform_driver(iadc_driver);
+
+MODULE_ALIAS("platform:qcom-spmi-iadc");
+MODULE_DESCRIPTION("Qualcomm SPMI PMIC current ADC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index e074a0b03f28..8d4e019ea4ca 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -18,13 +18,13 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#define SARADC_DATA 0x00
-#define SARADC_DATA_MASK 0x3ff
#define SARADC_STAS 0x04
#define SARADC_STAS_BUSY BIT(0)
@@ -38,15 +38,22 @@
#define SARADC_DLY_PU_SOC 0x0c
#define SARADC_DLY_PU_SOC_MASK 0x3f
-#define SARADC_BITS 10
#define SARADC_TIMEOUT msecs_to_jiffies(100)
+struct rockchip_saradc_data {
+ int num_bits;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ unsigned long clk_rate;
+};
+
struct rockchip_saradc {
void __iomem *regs;
struct clk *pclk;
struct clk *clk;
struct completion completion;
struct regulator *vref;
+ const struct rockchip_saradc_data *data;
u16 last_val;
};
@@ -90,7 +97,7 @@ static int rockchip_saradc_read_raw(struct iio_dev *indio_dev,
}
*val = ret / 1000;
- *val2 = SARADC_BITS;
+ *val2 = info->data->num_bits;
return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
@@ -103,7 +110,7 @@ static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id)
/* Read value */
info->last_val = readl_relaxed(info->regs + SARADC_DATA);
- info->last_val &= SARADC_DATA_MASK;
+ info->last_val &= GENMASK(info->data->num_bits - 1, 0);
/* Clear irq & power down adc */
writel_relaxed(0, info->regs + SARADC_CTRL);
@@ -133,12 +140,44 @@ static const struct iio_chan_spec rockchip_saradc_iio_channels[] = {
ADC_CHANNEL(2, "adc2"),
};
+static const struct rockchip_saradc_data saradc_data = {
+ .num_bits = 10,
+ .channels = rockchip_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_saradc_iio_channels),
+ .clk_rate = 1000000,
+};
+
+static const struct iio_chan_spec rockchip_rk3066_tsadc_iio_channels[] = {
+ ADC_CHANNEL(0, "adc0"),
+ ADC_CHANNEL(1, "adc1"),
+};
+
+static const struct rockchip_saradc_data rk3066_tsadc_data = {
+ .num_bits = 12,
+ .channels = rockchip_rk3066_tsadc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3066_tsadc_iio_channels),
+ .clk_rate = 50000,
+};
+
+static const struct of_device_id rockchip_saradc_match[] = {
+ {
+ .compatible = "rockchip,saradc",
+ .data = &saradc_data,
+ }, {
+ .compatible = "rockchip,rk3066-tsadc",
+ .data = &rk3066_tsadc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
+
static int rockchip_saradc_probe(struct platform_device *pdev)
{
struct rockchip_saradc *info = NULL;
struct device_node *np = pdev->dev.of_node;
struct iio_dev *indio_dev = NULL;
struct resource *mem;
+ const struct of_device_id *match;
int ret;
int irq;
@@ -152,6 +191,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
}
info = iio_priv(indio_dev);
+ match = of_match_device(rockchip_saradc_match, &pdev->dev);
+ info->data = match->data;
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
info->regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(info->regs))
@@ -192,10 +234,10 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
}
/*
- * Use a default of 1MHz for the converter clock.
+ * Use a default value for the converter clock.
* This may become user-configurable in the future.
*/
- ret = clk_set_rate(info->clk, 1000000);
+ ret = clk_set_rate(info->clk, info->data->clk_rate);
if (ret < 0) {
dev_err(&pdev->dev, "failed to set adc clk rate, %d\n", ret);
return ret;
@@ -227,8 +269,8 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
indio_dev->info = &rockchip_saradc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = rockchip_saradc_iio_channels;
- indio_dev->num_channels = ARRAY_SIZE(rockchip_saradc_iio_channels);
+ indio_dev->channels = info->data->channels;
+ indio_dev->num_channels = info->data->num_channels;
ret = iio_device_register(indio_dev);
if (ret)
@@ -296,18 +338,11 @@ static int rockchip_saradc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rockchip_saradc_pm_ops,
rockchip_saradc_suspend, rockchip_saradc_resume);
-static const struct of_device_id rockchip_saradc_match[] = {
- { .compatible = "rockchip,saradc" },
- {},
-};
-MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
-
static struct platform_driver rockchip_saradc_driver = {
.probe = rockchip_saradc_probe,
.remove = rockchip_saradc_remove,
.driver = {
.name = "rockchip-saradc",
- .owner = THIS_MODULE,
.of_match_table = rockchip_saradc_match,
.pm = &rockchip_saradc_pm_ops,
},
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 4a10ae97dbf2..8ec353c01d98 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -91,7 +91,7 @@
#define VF610_ADC_CAL 0x80
/* Other field define */
-#define VF610_ADC_ADCHC(x) ((x) & 0xF)
+#define VF610_ADC_ADCHC(x) ((x) & 0x1F)
#define VF610_ADC_AIEN (0x1 << 7)
#define VF610_ADC_CONV_DISABLE 0x1F
#define VF610_ADC_HS_COCO0 0x1
@@ -153,6 +153,12 @@ struct vf610_adc {
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
}
+#define VF610_ADC_TEMPERATURE_CHAN(_idx, _chan_type) { \
+ .type = (_chan_type), \
+ .channel = (_idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+}
+
static const struct iio_chan_spec vf610_adc_iio_channels[] = {
VF610_ADC_CHAN(0, IIO_VOLTAGE),
VF610_ADC_CHAN(1, IIO_VOLTAGE),
@@ -170,6 +176,7 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
VF610_ADC_CHAN(13, IIO_VOLTAGE),
VF610_ADC_CHAN(14, IIO_VOLTAGE),
VF610_ADC_CHAN(15, IIO_VOLTAGE),
+ VF610_ADC_TEMPERATURE_CHAN(26, IIO_TEMP),
/* sentinel */
};
@@ -451,6 +458,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
mutex_lock(&indio_dev->mlock);
reinit_completion(&info->completion);
@@ -468,7 +476,23 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
return ret;
}
- *val = info->value;
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = info->value;
+ break;
+ case IIO_TEMP:
+ /*
+ * Calculate in degree Celsius times 1000
+ * Using sensor slope of 1.84 mV/°C and
+ * V at 25°C of 696 mV
+ */
+ *val = 25000 - ((int)info->value - 864) * 1000000 / 1840;
+ break;
+ default:
+ mutex_unlock(&indio_dev->mlock);
+ return -EINVAL;
+ }
+
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
@@ -569,9 +593,9 @@ static int vf610_adc_probe(struct platform_device *pdev)
return PTR_ERR(info->regs);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
- return -EINVAL;
+ return irq;
}
ret = devm_request_irq(info->dev, irq,
@@ -586,8 +610,7 @@ static int vf610_adc_probe(struct platform_device *pdev)
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed getting clock, err = %ld\n",
PTR_ERR(info->clk));
- ret = PTR_ERR(info->clk);
- return ret;
+ return PTR_ERR(info->clk);
}
info->vref = devm_regulator_get(&pdev->dev, "vref");
@@ -681,17 +704,19 @@ static int vf610_adc_resume(struct device *dev)
ret = clk_prepare_enable(info->clk);
if (ret)
- return ret;
+ goto disable_reg;
vf610_adc_hw_init(info);
return 0;
+
+disable_reg:
+ regulator_disable(info->vref);
+ return ret;
}
#endif
-static SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops,
- vf610_adc_suspend,
- vf610_adc_resume);
+static SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops, vf610_adc_suspend, vf610_adc_resume);
static struct platform_driver vf610_adc_driver = {
.probe = vf610_adc_probe,
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 24cfe4e044f9..edd13d2b4121 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -44,18 +44,18 @@ st_sensors_write_data_with_mask_error:
return err;
}
-static int st_sensors_match_odr(struct st_sensors *sensor,
+static int st_sensors_match_odr(struct st_sensor_settings *sensor_settings,
unsigned int odr, struct st_sensor_odr_avl *odr_out)
{
int i, ret = -EINVAL;
for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
- if (sensor->odr.odr_avl[i].hz == 0)
+ if (sensor_settings->odr.odr_avl[i].hz == 0)
goto st_sensors_match_odr_error;
- if (sensor->odr.odr_avl[i].hz == odr) {
- odr_out->hz = sensor->odr.odr_avl[i].hz;
- odr_out->value = sensor->odr.odr_avl[i].value;
+ if (sensor_settings->odr.odr_avl[i].hz == odr) {
+ odr_out->hz = sensor_settings->odr.odr_avl[i].hz;
+ odr_out->value = sensor_settings->odr.odr_avl[i].value;
ret = 0;
break;
}
@@ -71,23 +71,26 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
- err = st_sensors_match_odr(sdata->sensor, odr, &odr_out);
+ err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
goto st_sensors_match_odr_error;
- if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
- (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
+ if ((sdata->sensor_settings->odr.addr ==
+ sdata->sensor_settings->pw.addr) &&
+ (sdata->sensor_settings->odr.mask ==
+ sdata->sensor_settings->pw.mask)) {
if (sdata->enabled == true) {
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->odr.addr,
- sdata->sensor->odr.mask,
+ sdata->sensor_settings->odr.addr,
+ sdata->sensor_settings->odr.mask,
odr_out.value);
} else {
err = 0;
}
} else {
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->odr.addr, sdata->sensor->odr.mask,
+ sdata->sensor_settings->odr.addr,
+ sdata->sensor_settings->odr.mask,
odr_out.value);
}
if (err >= 0)
@@ -98,16 +101,16 @@ st_sensors_match_odr_error:
}
EXPORT_SYMBOL(st_sensors_set_odr);
-static int st_sensors_match_fs(struct st_sensors *sensor,
+static int st_sensors_match_fs(struct st_sensor_settings *sensor_settings,
unsigned int fs, int *index_fs_avl)
{
int i, ret = -EINVAL;
for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
- if (sensor->fs.fs_avl[i].num == 0)
+ if (sensor_settings->fs.fs_avl[i].num == 0)
goto st_sensors_match_odr_error;
- if (sensor->fs.fs_avl[i].num == fs) {
+ if (sensor_settings->fs.fs_avl[i].num == fs) {
*index_fs_avl = i;
ret = 0;
break;
@@ -118,25 +121,24 @@ st_sensors_match_odr_error:
return ret;
}
-static int st_sensors_set_fullscale(struct iio_dev *indio_dev,
- unsigned int fs)
+static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
{
int err, i = 0;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- err = st_sensors_match_fs(sdata->sensor, fs, &i);
+ err = st_sensors_match_fs(sdata->sensor_settings, fs, &i);
if (err < 0)
goto st_accel_set_fullscale_error;
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->fs.addr,
- sdata->sensor->fs.mask,
- sdata->sensor->fs.fs_avl[i].value);
+ sdata->sensor_settings->fs.addr,
+ sdata->sensor_settings->fs.mask,
+ sdata->sensor_settings->fs.fs_avl[i].value);
if (err < 0)
goto st_accel_set_fullscale_error;
sdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &sdata->sensor->fs.fs_avl[i];
+ &sdata->sensor_settings->fs.fs_avl[i];
return err;
st_accel_set_fullscale_error:
@@ -153,10 +155,12 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
struct st_sensor_data *sdata = iio_priv(indio_dev);
if (enable) {
- tmp_value = sdata->sensor->pw.value_on;
- if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
- (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
- err = st_sensors_match_odr(sdata->sensor,
+ tmp_value = sdata->sensor_settings->pw.value_on;
+ if ((sdata->sensor_settings->odr.addr ==
+ sdata->sensor_settings->pw.addr) &&
+ (sdata->sensor_settings->odr.mask ==
+ sdata->sensor_settings->pw.mask)) {
+ err = st_sensors_match_odr(sdata->sensor_settings,
sdata->odr, &odr_out);
if (err < 0)
goto set_enable_error;
@@ -164,8 +168,8 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
found = true;
}
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->pw.addr,
- sdata->sensor->pw.mask, tmp_value);
+ sdata->sensor_settings->pw.addr,
+ sdata->sensor_settings->pw.mask, tmp_value);
if (err < 0)
goto set_enable_error;
@@ -175,9 +179,9 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
sdata->odr = odr_out.hz;
} else {
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->pw.addr,
- sdata->sensor->pw.mask,
- sdata->sensor->pw.value_off);
+ sdata->sensor_settings->pw.addr,
+ sdata->sensor_settings->pw.mask,
+ sdata->sensor_settings->pw.value_off);
if (err < 0)
goto set_enable_error;
@@ -194,8 +198,9 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
struct st_sensor_data *sdata = iio_priv(indio_dev);
return st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->enable_axis.addr,
- sdata->sensor->enable_axis.mask, axis_enable);
+ sdata->sensor_settings->enable_axis.addr,
+ sdata->sensor_settings->enable_axis.mask,
+ axis_enable);
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
@@ -236,13 +241,13 @@ void st_sensors_power_disable(struct iio_dev *indio_dev)
EXPORT_SYMBOL(st_sensors_power_disable);
static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata)
+ struct st_sensors_platform_data *pdata)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
switch (pdata->drdy_int_pin) {
case 1:
- if (sdata->sensor->drdy_irq.mask_int1 == 0) {
+ if (sdata->sensor_settings->drdy_irq.mask_int1 == 0) {
dev_err(&indio_dev->dev,
"DRDY on INT1 not available.\n");
return -EINVAL;
@@ -250,7 +255,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
sdata->drdy_int_pin = 1;
break;
case 2:
- if (sdata->sensor->drdy_irq.mask_int2 == 0) {
+ if (sdata->sensor_settings->drdy_irq.mask_int2 == 0) {
dev_err(&indio_dev->dev,
"DRDY on INT2 not available.\n");
return -EINVAL;
@@ -318,7 +323,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
if (sdata->current_fullscale) {
err = st_sensors_set_fullscale(indio_dev,
- sdata->current_fullscale->num);
+ sdata->current_fullscale->num);
if (err < 0)
return err;
} else
@@ -330,7 +335,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
/* set BDU */
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->bdu.addr, sdata->sensor->bdu.mask, true);
+ sdata->sensor_settings->bdu.addr,
+ sdata->sensor_settings->bdu.mask, true);
if (err < 0)
return err;
@@ -346,26 +352,28 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
u8 drdy_mask;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- if (!sdata->sensor->drdy_irq.addr)
+ if (!sdata->sensor_settings->drdy_irq.addr)
return 0;
/* Enable/Disable the interrupt generator 1. */
- if (sdata->sensor->drdy_irq.ig1.en_addr > 0) {
+ if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) {
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->drdy_irq.ig1.en_addr,
- sdata->sensor->drdy_irq.ig1.en_mask, (int)enable);
+ sdata->sensor_settings->drdy_irq.ig1.en_addr,
+ sdata->sensor_settings->drdy_irq.ig1.en_mask,
+ (int)enable);
if (err < 0)
goto st_accel_set_dataready_irq_error;
}
if (sdata->drdy_int_pin == 1)
- drdy_mask = sdata->sensor->drdy_irq.mask_int1;
+ drdy_mask = sdata->sensor_settings->drdy_irq.mask_int1;
else
- drdy_mask = sdata->sensor->drdy_irq.mask_int2;
+ drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
/* Enable/Disable the interrupt generator for data ready. */
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->drdy_irq.addr, drdy_mask, (int)enable);
+ sdata->sensor_settings->drdy_irq.addr,
+ drdy_mask, (int)enable);
st_accel_set_dataready_irq_error:
return err;
@@ -378,8 +386,8 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale)
struct st_sensor_data *sdata = iio_priv(indio_dev);
for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
- if ((sdata->sensor->fs.fs_avl[i].gain == scale) &&
- (sdata->sensor->fs.fs_avl[i].gain != 0)) {
+ if ((sdata->sensor_settings->fs.fs_avl[i].gain == scale) &&
+ (sdata->sensor_settings->fs.fs_avl[i].gain != 0)) {
err = 0;
break;
}
@@ -388,7 +396,7 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale)
goto st_sensors_match_scale_error;
err = st_sensors_set_fullscale(indio_dev,
- sdata->sensor->fs.fs_avl[i].num);
+ sdata->sensor_settings->fs.fs_avl[i].num);
st_sensors_match_scale_error:
return err;
@@ -439,7 +447,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
if (err < 0)
goto out;
- msleep((sdata->sensor->bootime * 1000) / sdata->odr);
+ msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
err = st_sensors_read_axis_data(indio_dev, ch, val);
if (err < 0)
goto out;
@@ -456,7 +464,8 @@ out:
EXPORT_SYMBOL(st_sensors_read_info_raw);
int st_sensors_check_device_support(struct iio_dev *indio_dev,
- int num_sensors_list, const struct st_sensors *sensors)
+ int num_sensors_list,
+ const struct st_sensor_settings *sensor_settings)
{
u8 wai;
int i, n, err;
@@ -470,23 +479,24 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
}
for (i = 0; i < num_sensors_list; i++) {
- if (sensors[i].wai == wai)
+ if (sensor_settings[i].wai == wai)
break;
}
if (i == num_sensors_list)
goto device_not_supported;
- for (n = 0; n < ARRAY_SIZE(sensors[i].sensors_supported); n++) {
+ for (n = 0; n < ARRAY_SIZE(sensor_settings[i].sensors_supported); n++) {
if (strcmp(indio_dev->name,
- &sensors[i].sensors_supported[n][0]) == 0)
+ &sensor_settings[i].sensors_supported[n][0]) == 0)
break;
}
- if (n == ARRAY_SIZE(sensors[i].sensors_supported)) {
+ if (n == ARRAY_SIZE(sensor_settings[i].sensors_supported)) {
dev_err(&indio_dev->dev, "device name and WhoAmI mismatch.\n");
goto sensor_name_mismatch;
}
- sdata->sensor = (struct st_sensors *)&sensors[i];
+ sdata->sensor_settings =
+ (struct st_sensor_settings *)&sensor_settings[i];
return i;
@@ -508,11 +518,11 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
mutex_lock(&indio_dev->mlock);
for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
- if (sdata->sensor->odr.odr_avl[i].hz == 0)
+ if (sdata->sensor_settings->odr.odr_avl[i].hz == 0)
break;
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- sdata->sensor->odr.odr_avl[i].hz);
+ sdata->sensor_settings->odr.odr_avl[i].hz);
}
mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
@@ -530,11 +540,11 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
mutex_lock(&indio_dev->mlock);
for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
- if (sdata->sensor->fs.fs_avl[i].num == 0)
+ if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
break;
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- sdata->sensor->fs.fs_avl[i].gain);
+ sdata->sensor_settings->fs.fs_avl[i].gain);
}
mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index bb6f3085f57b..98cfee296d46 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -72,6 +72,7 @@ void st_sensors_i2c_configure(struct iio_dev *indio_dev,
indio_dev->dev.parent = &client->dev;
indio_dev->name = client->name;
+ sdata->dev = &client->dev;
sdata->tf = &st_sensors_tf_i2c;
sdata->get_irq_data_ready = st_sensors_i2c_get_irq;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 251baf6abc25..78a6a1ab3ece 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -111,6 +111,7 @@ void st_sensors_spi_configure(struct iio_dev *indio_dev,
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi->modalias;
+ sdata->dev = &spi->dev;
sdata->tf = &st_sensors_tf_spi;
sdata->get_irq_data_ready = st_sensors_spi_get_irq;
}
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index c197360c450b..5353d6328c54 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -30,8 +30,7 @@ static const struct st_sensors_platform_data gyro_pdata = {
.drdy_int_pin = 2,
};
-int st_gyro_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata);
+int st_gyro_common_probe(struct iio_dev *indio_dev);
void st_gyro_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index f156fc6c5c6c..f07a2336f7dc 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -103,7 +103,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3)
};
-static const struct st_sensors st_gyro_sensors[] = {
+static const struct st_sensor_settings st_gyro_sensors_settings[] = {
{
.wai = ST_GYRO_1_WAI_EXP,
.sensors_supported = {
@@ -309,8 +309,7 @@ static const struct iio_trigger_ops st_gyro_trigger_ops = {
#define ST_GYRO_TRIGGER_OPS NULL
#endif
-int st_gyro_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata)
+int st_gyro_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *gdata = iio_priv(indio_dev);
int irq = gdata->get_irq_data_ready(indio_dev);
@@ -322,20 +321,22 @@ int st_gyro_common_probe(struct iio_dev *indio_dev,
st_sensors_power_enable(indio_dev);
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_gyro_sensors), st_gyro_sensors);
+ ARRAY_SIZE(st_gyro_sensors_settings),
+ st_gyro_sensors_settings);
if (err < 0)
return err;
gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
- gdata->multiread_bit = gdata->sensor->multi_read_bit;
- indio_dev->channels = gdata->sensor->ch;
+ gdata->multiread_bit = gdata->sensor_settings->multi_read_bit;
+ indio_dev->channels = gdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
gdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &gdata->sensor->fs.fs_avl[0];
- gdata->odr = gdata->sensor->odr.odr_avl[0].hz;
+ &gdata->sensor_settings->fs.fs_avl[0];
+ gdata->odr = gdata->sensor_settings->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev, pdata);
+ err = st_sensors_init_sensor(indio_dev,
+ (struct st_sensors_platform_data *)&gyro_pdata);
if (err < 0)
return err;
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 8fa0ad2ef4ef..64480b16c689 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -67,13 +67,11 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
return -ENOMEM;
gdata = iio_priv(indio_dev);
- gdata->dev = &client->dev;
st_sensors_of_i2c_probe(client, st_gyro_of_match);
st_sensors_i2c_configure(indio_dev, client, gdata);
- err = st_gyro_common_probe(indio_dev,
- (struct st_sensors_platform_data *)&gyro_pdata);
+ err = st_gyro_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index b4ad3be26687..e59bead6bc3c 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -29,12 +29,10 @@ static int st_gyro_spi_probe(struct spi_device *spi)
return -ENOMEM;
gdata = iio_priv(indio_dev);
- gdata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, gdata);
- err = st_gyro_common_probe(indio_dev,
- (struct st_sensors_platform_data *)&gyro_pdata);
+ err = st_gyro_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index e116bd8dd0e4..4813b793b9f7 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -22,4 +22,14 @@ config SI7005
To compile this driver as a module, choose M here: the module
will be called si7005.
+config SI7020
+ tristate "Si7013/20/21 Relative Humidity and Temperature Sensors"
+ depends on I2C
+ help
+ Say yes here to build support for the Silicon Labs Si7013/20/21
+ Relative Humidity and Temperature Sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called si7020.
+
endmenu
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index e3f3d942e646..86e2d26e9f4d 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_SI7005) += si7005.o
+obj-$(CONFIG_SI7020) += si7020.o
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
new file mode 100644
index 000000000000..b54164677b89
--- /dev/null
+++ b/drivers/iio/humidity/si7020.c
@@ -0,0 +1,161 @@
+/*
+ * si7020.c - Silicon Labs Si7013/20/21 Relative Humidity and Temp Sensors
+ * Copyright (c) 2013,2014 Uplogix, Inc.
+ * David Barksdale <dbarksdale@uplogix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * The Silicon Labs Si7013/20/21 Relative Humidity and Temperature Sensors
+ * are i2c devices which have an identical programming interface for
+ * measuring relative humidity and temperature. The Si7013 has an additional
+ * temperature input which this driver does not support.
+ *
+ * Data Sheets:
+ * Si7013: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7013.pdf
+ * Si7020: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7020.pdf
+ * Si7021: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7021.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Measure Relative Humidity, Hold Master Mode */
+#define SI7020CMD_RH_HOLD 0xE5
+/* Measure Temperature, Hold Master Mode */
+#define SI7020CMD_TEMP_HOLD 0xE3
+/* Software Reset */
+#define SI7020CMD_RESET 0xFE
+
+static int si7020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct i2c_client *client = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(client,
+ chan->type == IIO_TEMP ?
+ SI7020CMD_TEMP_HOLD :
+ SI7020CMD_RH_HOLD);
+ if (ret < 0)
+ return ret;
+ *val = ret >> 2;
+ if (chan->type == IIO_HUMIDITYRELATIVE)
+ *val &= GENMASK(11, 0);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP)
+ *val = 175720; /* = 175.72 * 1000 */
+ else
+ *val = 125 * 1000;
+ *val2 = 65536 >> 2;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_OFFSET:
+ /*
+ * Since iio_convert_raw_to_processed_unlocked assumes offset
+ * is an integer we have to round these values and lose
+ * accuracy.
+ * Relative humidity will be 0.0032959% too high and
+ * temperature will be 0.00277344 degrees too high.
+ * This is no big deal because it's within the accuracy of the
+ * sensor.
+ */
+ if (chan->type == IIO_TEMP)
+ *val = -4368; /* = -46.85 * (65536 >> 2) / 175.72 */
+ else
+ *val = -786; /* = -6 * (65536 >> 2) / 125 */
+ return IIO_VAL_INT;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec si7020_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ }
+};
+
+static const struct iio_info si7020_info = {
+ .read_raw = si7020_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int si7020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct i2c_client **data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ /* Reset device, loads default settings. */
+ ret = i2c_smbus_write_byte(client, SI7020CMD_RESET);
+ if (ret < 0)
+ return ret;
+ /* Wait the maximum power-up time after software reset. */
+ msleep(15);
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ *data = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &si7020_info;
+ indio_dev->channels = si7020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(si7020_channels);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id si7020_id[] = {
+ { "si7020", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si7020_id);
+
+static struct i2c_driver si7020_driver = {
+ .driver.name = "si7020",
+ .probe = si7020_probe,
+ .id_table = si7020_id,
+};
+
+module_i2c_driver(si7020_driver);
+MODULE_DESCRIPTION("Silicon Labs Si7013/20/21 Relative Humidity and Temperature Sensors");
+MODULE_AUTHOR("David Barksdale <dbarksdale@uplogix.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index f0846108d006..866fe904cba2 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -100,6 +100,28 @@ static int iio_dev_node_match(struct device *dev, void *data)
return dev->of_node == data && dev->type == &iio_device_type;
}
+/**
+ * __of_iio_simple_xlate - translate iiospec to the IIO channel index
+ * @indio_dev: pointer to the iio_dev structure
+ * @iiospec: IIO specifier as found in the device tree
+ *
+ * This is simple translation function, suitable for the most 1:1 mapped
+ * channels in IIO chips. This function performs only one sanity check:
+ * whether IIO index is less than num_channels (that is specified in the
+ * iio_dev).
+ */
+static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ if (!iiospec->args_count)
+ return 0;
+
+ if (iiospec->args[0] >= indio_dev->num_channels)
+ return -EINVAL;
+
+ return iiospec->args[0];
+}
+
static int __of_iio_channel_get(struct iio_channel *channel,
struct device_node *np, int index)
{
@@ -122,18 +144,19 @@ static int __of_iio_channel_get(struct iio_channel *channel,
indio_dev = dev_to_iio_dev(idev);
channel->indio_dev = indio_dev;
- index = iiospec.args_count ? iiospec.args[0] : 0;
- if (index >= indio_dev->num_channels) {
- err = -EINVAL;
+ if (indio_dev->info->of_xlate)
+ index = indio_dev->info->of_xlate(indio_dev, &iiospec);
+ else
+ index = __of_iio_simple_xlate(indio_dev, &iiospec);
+ if (index < 0)
goto err_put;
- }
channel->channel = &indio_dev->channels[index];
return 0;
err_put:
iio_device_put(indio_dev);
- return err;
+ return index;
}
static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 694e33e0fb72..7e81d00ef0c3 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -18,8 +18,7 @@
#define LSM303DLM_MAGN_DEV_NAME "lsm303dlm_magn"
#define LIS3MDL_MAGN_DEV_NAME "lis3mdl"
-int st_magn_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata);
+int st_magn_common_probe(struct iio_dev *indio_dev);
void st_magn_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 68cae86dbd29..8ade473f99fe 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -149,7 +149,7 @@ static const struct iio_chan_spec st_magn_2_16bit_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3)
};
-static const struct st_sensors st_magn_sensors[] = {
+static const struct st_sensor_settings st_magn_sensors_settings[] = {
{
.wai = ST_MAGN_1_WAI_EXP,
.sensors_supported = {
@@ -361,8 +361,7 @@ static const struct iio_info magn_info = {
.write_raw = &st_magn_write_raw,
};
-int st_magn_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata)
+int st_magn_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *mdata = iio_priv(indio_dev);
int irq = mdata->get_irq_data_ready(indio_dev);
@@ -374,20 +373,21 @@ int st_magn_common_probe(struct iio_dev *indio_dev,
st_sensors_power_enable(indio_dev);
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_magn_sensors), st_magn_sensors);
+ ARRAY_SIZE(st_magn_sensors_settings),
+ st_magn_sensors_settings);
if (err < 0)
return err;
mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
- mdata->multiread_bit = mdata->sensor->multi_read_bit;
- indio_dev->channels = mdata->sensor->ch;
+ mdata->multiread_bit = mdata->sensor_settings->multi_read_bit;
+ indio_dev->channels = mdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
mdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &mdata->sensor->fs.fs_avl[0];
- mdata->odr = mdata->sensor->odr.odr_avl[0].hz;
+ &mdata->sensor_settings->fs.fs_avl[0];
+ mdata->odr = mdata->sensor_settings->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev, pdata);
+ err = st_sensors_init_sensor(indio_dev, NULL);
if (err < 0)
return err;
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 689250058442..92e5c15452a3 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -51,12 +51,11 @@ static int st_magn_i2c_probe(struct i2c_client *client,
return -ENOMEM;
mdata = iio_priv(indio_dev);
- mdata->dev = &client->dev;
st_sensors_of_i2c_probe(client, st_magn_of_match);
st_sensors_i2c_configure(indio_dev, client, mdata);
- err = st_magn_common_probe(indio_dev, NULL);
+ err = st_magn_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index a6143ea51dfc..7adacf160146 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -29,11 +29,10 @@ static int st_magn_spi_probe(struct spi_device *spi)
return -ENOMEM;
mdata = iio_priv(indio_dev);
- mdata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, mdata);
- err = st_magn_common_probe(indio_dev, NULL);
+ err = st_magn_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 15afbc919521..a3be53792072 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,17 @@
menu "Pressure sensors"
+config BMP280
+ tristate "Bosch Sensortec BMP280 pressure sensor driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Bosch Sensortec BMP280
+ pressure and temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bmp280.
+
config HID_SENSOR_PRESS
depends on HID_SENSOR_HUB
select IIO_BUFFER
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 90a37e85cf21..88011f2ae00e 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_BMP280) += bmp280.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_MPL115) += mpl115.o
obj-$(CONFIG_MPL3115) += mpl3115.o
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
new file mode 100644
index 000000000000..75038dacfff1
--- /dev/null
+++ b/drivers/iio/pressure/bmp280.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * Driver for Bosch Sensortec BMP280 digital pressure sensor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "bmp280: " fmt
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define BMP280_REG_TEMP_XLSB 0xFC
+#define BMP280_REG_TEMP_LSB 0xFB
+#define BMP280_REG_TEMP_MSB 0xFA
+#define BMP280_REG_PRESS_XLSB 0xF9
+#define BMP280_REG_PRESS_LSB 0xF8
+#define BMP280_REG_PRESS_MSB 0xF7
+
+#define BMP280_REG_CONFIG 0xF5
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_STATUS 0xF3
+#define BMP280_REG_RESET 0xE0
+#define BMP280_REG_ID 0xD0
+
+#define BMP280_REG_COMP_TEMP_START 0x88
+#define BMP280_COMP_TEMP_REG_COUNT 6
+
+#define BMP280_REG_COMP_PRESS_START 0x8E
+#define BMP280_COMP_PRESS_REG_COUNT 18
+
+#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_FILTER_OFF 0
+#define BMP280_FILTER_2X BIT(2)
+#define BMP280_FILTER_4X BIT(3)
+#define BMP280_FILTER_8X (BIT(3) | BIT(2))
+#define BMP280_FILTER_16X BIT(4)
+
+#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_SKIP 0
+#define BMP280_OSRS_TEMP_1X BIT(5)
+#define BMP280_OSRS_TEMP_2X BIT(6)
+#define BMP280_OSRS_TEMP_4X (BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_8X BIT(7)
+#define BMP280_OSRS_TEMP_16X (BIT(7) | BIT(5))
+
+#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_SKIP 0
+#define BMP280_OSRS_PRESS_1X BIT(2)
+#define BMP280_OSRS_PRESS_2X BIT(3)
+#define BMP280_OSRS_PRESS_4X (BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_8X BIT(4)
+#define BMP280_OSRS_PRESS_16X (BIT(4) | BIT(2))
+
+#define BMP280_MODE_MASK (BIT(1) | BIT(0))
+#define BMP280_MODE_SLEEP 0
+#define BMP280_MODE_FORCED BIT(0)
+#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+
+#define BMP280_CHIP_ID 0x58
+#define BMP280_SOFT_RESET_VAL 0xB6
+
+struct bmp280_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct regmap *regmap;
+
+ /*
+ * Carryover value from temperature conversion, used in pressure
+ * calculation.
+ */
+ s32 t_fine;
+};
+
+/* Compensation parameters. */
+struct bmp280_comp_temp {
+ u16 dig_t1;
+ s16 dig_t2, dig_t3;
+};
+
+struct bmp280_comp_press {
+ u16 dig_p1;
+ s16 dig_p2, dig_p3, dig_p4, dig_p5, dig_p6, dig_p7, dig_p8, dig_p9;
+};
+
+static const struct iio_chan_spec bmp280_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+ case BMP280_REG_PRESS_XLSB:
+ case BMP280_REG_PRESS_LSB:
+ case BMP280_REG_PRESS_MSB:
+ case BMP280_REG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config bmp280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP280_REG_TEMP_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp280_is_writeable_reg,
+ .volatile_reg = bmp280_is_volatile_reg,
+};
+
+static int bmp280_read_compensation_temp(struct bmp280_data *data,
+ struct bmp280_comp_temp *comp)
+{
+ int ret;
+ __le16 buf[BMP280_COMP_TEMP_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+ buf, BMP280_COMP_TEMP_REG_COUNT);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read temperature calibration parameters\n");
+ return ret;
+ }
+
+ comp->dig_t1 = (u16) le16_to_cpu(buf[0]);
+ comp->dig_t2 = (s16) le16_to_cpu(buf[1]);
+ comp->dig_t3 = (s16) le16_to_cpu(buf[2]);
+
+ return 0;
+}
+
+static int bmp280_read_compensation_press(struct bmp280_data *data,
+ struct bmp280_comp_press *comp)
+{
+ int ret;
+ __le16 buf[BMP280_COMP_PRESS_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
+ buf, BMP280_COMP_PRESS_REG_COUNT);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read pressure calibration parameters\n");
+ return ret;
+ }
+
+ comp->dig_p1 = (u16) le16_to_cpu(buf[0]);
+ comp->dig_p2 = (s16) le16_to_cpu(buf[1]);
+ comp->dig_p3 = (s16) le16_to_cpu(buf[2]);
+ comp->dig_p4 = (s16) le16_to_cpu(buf[3]);
+ comp->dig_p5 = (s16) le16_to_cpu(buf[4]);
+ comp->dig_p6 = (s16) le16_to_cpu(buf[5]);
+ comp->dig_p7 = (s16) le16_to_cpu(buf[6]);
+ comp->dig_p8 = (s16) le16_to_cpu(buf[7]);
+ comp->dig_p9 = (s16) le16_to_cpu(buf[8]);
+
+ return 0;
+}
+
+/*
+ * Returns temperature in DegC, resolution is 0.01 DegC. Output value of
+ * "5123" equals 51.23 DegC. t_fine carries fine temperature as global
+ * value.
+ *
+ * Taken from datasheet, Section 3.11.3, "Compensation formula".
+ */
+static s32 bmp280_compensate_temp(struct bmp280_data *data,
+ struct bmp280_comp_temp *comp,
+ s32 adc_temp)
+{
+ s32 var1, var2, t;
+
+ var1 = (((adc_temp >> 3) - ((s32) comp->dig_t1 << 1)) *
+ ((s32) comp->dig_t2)) >> 11;
+ var2 = (((((adc_temp >> 4) - ((s32) comp->dig_t1)) *
+ ((adc_temp >> 4) - ((s32) comp->dig_t1))) >> 12) *
+ ((s32) comp->dig_t3)) >> 14;
+
+ data->t_fine = var1 + var2;
+ t = (data->t_fine * 5 + 128) >> 8;
+
+ return t;
+}
+
+/*
+ * Returns pressure in Pa as unsigned 32 bit integer in Q24.8 format (24
+ * integer bits and 8 fractional bits). Output value of "24674867"
+ * represents 24674867/256 = 96386.2 Pa = 963.862 hPa
+ *
+ * Taken from datasheet, Section 3.11.3, "Compensation formula".
+ */
+static u32 bmp280_compensate_press(struct bmp280_data *data,
+ struct bmp280_comp_press *comp,
+ s32 adc_press)
+{
+ s64 var1, var2, p;
+
+ var1 = ((s64) data->t_fine) - 128000;
+ var2 = var1 * var1 * (s64) comp->dig_p6;
+ var2 = var2 + ((var1 * (s64) comp->dig_p5) << 17);
+ var2 = var2 + (((s64) comp->dig_p4) << 35);
+ var1 = ((var1 * var1 * (s64) comp->dig_p3) >> 8) +
+ ((var1 * (s64) comp->dig_p2) << 12);
+ var1 = (((((s64) 1) << 47) + var1)) * ((s64) comp->dig_p1) >> 33;
+
+ if (var1 == 0)
+ return 0;
+
+ p = ((((s64) 1048576 - adc_press) << 31) - var2) * 3125;
+ p = div64_s64(p, var1);
+ var1 = (((s64) comp->dig_p9) * (p >> 13) * (p >> 13)) >> 25;
+ var2 = (((s64) comp->dig_p8) * p) >> 19;
+ p = ((p + var1 + var2) >> 8) + (((s64) comp->dig_p7) << 4);
+
+ return (u32) p;
+}
+
+static int bmp280_read_temp(struct bmp280_data *data,
+ int *val)
+{
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_temp, comp_temp;
+ struct bmp280_comp_temp comp;
+
+ ret = bmp280_read_compensation_temp(data, &comp);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ adc_temp = be32_to_cpu(tmp) >> 12;
+ comp_temp = bmp280_compensate_temp(data, &comp, adc_temp);
+
+ /*
+ * val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ *val = comp_temp * 10;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp280_read_press(struct bmp280_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_press;
+ u32 comp_press;
+ struct bmp280_comp_press comp;
+
+ ret = bmp280_read_compensation_press(data, &comp);
+ if (ret < 0)
+ return ret;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp280_read_temp(data, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ adc_press = be32_to_cpu(tmp) >> 12;
+ comp_press = bmp280_compensate_press(data, &comp, adc_press);
+
+ *val = comp_press;
+ *val2 = 256000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ ret = bmp280_read_press(data, val, val2);
+ break;
+ case IIO_TEMP:
+ ret = bmp280_read_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static const struct iio_info bmp280_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &bmp280_read_raw,
+};
+
+static int bmp280_chip_init(struct bmp280_data *data)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+ BMP280_OSRS_TEMP_MASK |
+ BMP280_OSRS_PRESS_MASK |
+ BMP280_MODE_MASK,
+ BMP280_OSRS_TEMP_2X |
+ BMP280_OSRS_PRESS_16X |
+ BMP280_MODE_NORMAL);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to write config register\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CONFIG,
+ BMP280_FILTER_MASK,
+ BMP280_FILTER_4X);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to write config register\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bmp280_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct bmp280_data *data;
+ unsigned int chip_id;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, indio_dev);
+ data = iio_priv(indio_dev);
+ mutex_init(&data->lock);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = bmp280_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bmp280_channels);
+ indio_dev->info = &bmp280_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data->regmap = devm_regmap_init_i2c(client, &bmp280_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id);
+ if (ret < 0)
+ return ret;
+ if (chip_id != BMP280_CHIP_ID) {
+ dev_err(&client->dev, "bad chip id. expected %x got %x\n",
+ BMP280_CHIP_ID, chip_id);
+ return -EINVAL;
+ }
+
+ ret = bmp280_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct acpi_device_id bmp280_acpi_match[] = {
+ {"BMP0280", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match);
+
+static const struct i2c_device_id bmp280_id[] = {
+ {"bmp280", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, bmp280_id);
+
+static struct i2c_driver bmp280_driver = {
+ .driver = {
+ .name = "bmp280",
+ .acpi_match_table = ACPI_PTR(bmp280_acpi_match),
+ },
+ .probe = bmp280_probe,
+ .id_table = bmp280_id,
+};
+module_i2c_driver(bmp280_driver);
+
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP280 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 242943c0c4e4..f5f41490060b 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -26,8 +26,7 @@ static const struct st_sensors_platform_data default_press_pdata = {
.drdy_int_pin = 1,
};
-int st_press_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *pdata);
+int st_press_common_probe(struct iio_dev *indio_dev);
void st_press_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c
index b37b1c9ac932..2ff53f222352 100644
--- a/drivers/iio/pressure/st_pressure_buffer.c
+++ b/drivers/iio/pressure/st_pressure_buffer.c
@@ -38,10 +38,10 @@ static int st_press_buffer_preenable(struct iio_dev *indio_dev)
static int st_press_buffer_postenable(struct iio_dev *indio_dev)
{
int err;
- struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
- pdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (pdata->buffer_data == NULL) {
+ press_data->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (press_data->buffer_data == NULL) {
err = -ENOMEM;
goto allocate_memory_error;
}
@@ -53,7 +53,7 @@ static int st_press_buffer_postenable(struct iio_dev *indio_dev)
return err;
st_press_buffer_postenable_error:
- kfree(pdata->buffer_data);
+ kfree(press_data->buffer_data);
allocate_memory_error:
return err;
}
@@ -61,7 +61,7 @@ allocate_memory_error:
static int st_press_buffer_predisable(struct iio_dev *indio_dev)
{
int err;
- struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
err = iio_triggered_buffer_predisable(indio_dev);
if (err < 0)
@@ -70,7 +70,7 @@ static int st_press_buffer_predisable(struct iio_dev *indio_dev)
err = st_sensors_set_enable(indio_dev, false);
st_press_buffer_predisable_error:
- kfree(pdata->buffer_data);
+ kfree(press_data->buffer_data);
return err;
}
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 473d914ef470..97baf40d424b 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -175,7 +175,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(1)
};
-static const struct st_sensors st_press_sensors[] = {
+static const struct st_sensor_settings st_press_sensors_settings[] = {
{
.wai = ST_PRESS_LPS331AP_WAI_EXP,
.sensors_supported = {
@@ -333,7 +333,7 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
int err;
- struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -347,10 +347,10 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
switch (ch->type) {
case IIO_PRESSURE:
- *val2 = pdata->current_fullscale->gain;
+ *val2 = press_data->current_fullscale->gain;
break;
case IIO_TEMP:
- *val2 = pdata->current_fullscale->gain2;
+ *val2 = press_data->current_fullscale->gain2;
break;
default:
err = -EINVAL;
@@ -371,7 +371,7 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = pdata->odr;
+ *val = press_data->odr;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -409,11 +409,10 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
#define ST_PRESS_TRIGGER_OPS NULL
#endif
-int st_press_common_probe(struct iio_dev *indio_dev,
- struct st_sensors_platform_data *plat_data)
+int st_press_common_probe(struct iio_dev *indio_dev)
{
- struct st_sensor_data *pdata = iio_priv(indio_dev);
- int irq = pdata->get_irq_data_ready(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
+ int irq = press_data->get_irq_data_ready(indio_dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -422,28 +421,30 @@ int st_press_common_probe(struct iio_dev *indio_dev,
st_sensors_power_enable(indio_dev);
err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_press_sensors),
- st_press_sensors);
+ ARRAY_SIZE(st_press_sensors_settings),
+ st_press_sensors_settings);
if (err < 0)
return err;
- pdata->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
- pdata->multiread_bit = pdata->sensor->multi_read_bit;
- indio_dev->channels = pdata->sensor->ch;
- indio_dev->num_channels = pdata->sensor->num_ch;
+ press_data->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
+ press_data->multiread_bit = press_data->sensor_settings->multi_read_bit;
+ indio_dev->channels = press_data->sensor_settings->ch;
+ indio_dev->num_channels = press_data->sensor_settings->num_ch;
- if (pdata->sensor->fs.addr != 0)
- pdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &pdata->sensor->fs.fs_avl[0];
+ if (press_data->sensor_settings->fs.addr != 0)
+ press_data->current_fullscale =
+ (struct st_sensor_fullscale_avl *)
+ &press_data->sensor_settings->fs.fs_avl[0];
- pdata->odr = pdata->sensor->odr.odr_avl[0].hz;
+ press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
/* Some devices don't support a data ready pin. */
- if (!plat_data && pdata->sensor->drdy_irq.addr)
- plat_data =
+ if (!press_data->dev->platform_data &&
+ press_data->sensor_settings->drdy_irq.addr)
+ press_data->dev->platform_data =
(struct st_sensors_platform_data *)&default_press_pdata;
- err = st_sensors_init_sensor(indio_dev, plat_data);
+ err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
if (err < 0)
return err;
@@ -479,12 +480,12 @@ EXPORT_SYMBOL(st_press_common_probe);
void st_press_common_remove(struct iio_dev *indio_dev)
{
- struct st_sensor_data *pdata = iio_priv(indio_dev);
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
st_sensors_power_disable(indio_dev);
iio_device_unregister(indio_dev);
- if (pdata->get_irq_data_ready(indio_dev) > 0)
+ if (press_data->get_irq_data_ready(indio_dev) > 0)
st_sensors_deallocate_trigger(indio_dev);
st_press_deallocate_ring(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index acaf165260bb..137788bba4a3 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -43,20 +43,19 @@ static int st_press_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
- struct st_sensor_data *pdata;
+ struct st_sensor_data *press_data;
int err;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*pdata));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*press_data));
if (!indio_dev)
return -ENOMEM;
- pdata = iio_priv(indio_dev);
- pdata->dev = &client->dev;
+ press_data = iio_priv(indio_dev);
st_sensors_of_i2c_probe(client, st_press_of_match);
- st_sensors_i2c_configure(indio_dev, client, pdata);
+ st_sensors_i2c_configure(indio_dev, client, press_data);
- err = st_press_common_probe(indio_dev, client->dev.platform_data);
+ err = st_press_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index f45d430ec529..1ffa6d4d349c 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -21,19 +21,18 @@
static int st_press_spi_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
- struct st_sensor_data *pdata;
+ struct st_sensor_data *press_data;
int err;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*pdata));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*press_data));
if (indio_dev == NULL)
return -ENOMEM;
- pdata = iio_priv(indio_dev);
- pdata->dev = &spi->dev;
+ press_data = iio_priv(indio_dev);
- st_sensors_spi_configure(indio_dev, spi, pdata);
+ st_sensors_spi_configure(indio_dev, spi, press_data);
- err = st_press_common_probe(indio_dev, spi->dev.platform_data);
+ err = st_press_common_probe(indio_dev);
if (err < 0)
return err;
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 8349cc0fdf66..466aa4314667 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -95,7 +95,7 @@ static int as3935_read(struct as3935_state *st, unsigned int reg, int *val)
*val = ret;
return 0;
-};
+}
static int as3935_write(struct as3935_state *st,
unsigned int reg,
@@ -107,7 +107,7 @@ static int as3935_write(struct as3935_state *st,
buf[1] = val;
return spi_write(st->spi, buf, 2);
-};
+}
static ssize_t as3935_sensor_sensitivity_show(struct device *dev,
struct device_attribute *attr,
@@ -122,7 +122,7 @@ static ssize_t as3935_sensor_sensitivity_show(struct device *dev,
val = (val & AS3935_AFE_MASK) >> 1;
return sprintf(buf, "%d\n", val);
-};
+}
static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
struct device_attribute *attr,
@@ -142,7 +142,7 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
as3935_write(st, AS3935_AFE_GAIN, val << 1);
return len;
-};
+}
static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR,
as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0);
@@ -214,7 +214,7 @@ err_read:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
-};
+}
static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
.owner = THIS_MODULE,
@@ -238,7 +238,7 @@ static void as3935_event_work(struct work_struct *work)
dev_warn(&st->spi->dev, "noise level is too high");
break;
}
-};
+}
static irqreturn_t as3935_interrupt_handler(int irq, void *private)
{
@@ -417,7 +417,7 @@ unregister_trigger:
iio_trigger_unregister(st->trig);
return ret;
-};
+}
static int as3935_remove(struct spi_device *spi)
{
@@ -429,7 +429,7 @@ static int as3935_remove(struct spi_device *spi)
iio_trigger_unregister(st->trig);
return 0;
-};
+}
static const struct spi_device_id as3935_id[] = {
{"as3935", 0},
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 77089399359b..b899531498eb 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -38,6 +38,17 @@ config INFINIBAND_USER_MEM
depends on INFINIBAND_USER_ACCESS != n
default y
+config INFINIBAND_ON_DEMAND_PAGING
+ bool "InfiniBand on-demand paging support"
+ depends on INFINIBAND_USER_MEM
+ select MMU_NOTIFIER
+ default y
+ ---help---
+ On demand paging support for the InfiniBand subsystem.
+ Together with driver support this allows registration of
+ memory regions without pinning their pages, fetching the
+ pages on demand instead.
+
config INFINIBAND_ADDR_TRANS
bool
depends on INFINIBAND
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index ffd0af6734af..acf736764445 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
+ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 8172d37f9add..f80da50d84a5 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -176,8 +176,8 @@ static void set_timeout(unsigned long time)
unsigned long delay;
delay = time - jiffies;
- if ((long)delay <= 0)
- delay = 1;
+ if ((long)delay < 0)
+ delay = 0;
mod_delayed_work(addr_wq, &work, delay);
}
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index d2360a8ef0b2..fa17b552ff78 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -525,17 +525,22 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
if (status)
process_join_error(group, status);
else {
+ int mgids_changed, is_mgid0;
ib_find_pkey(group->port->dev->device, group->port->port_num,
be16_to_cpu(rec->pkey), &pkey_index);
spin_lock_irq(&group->port->lock);
- group->rec = *rec;
if (group->state == MCAST_BUSY &&
group->pkey_index == MCAST_INVALID_PKEY_INDEX)
group->pkey_index = pkey_index;
- if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
+ mgids_changed = memcmp(&rec->mgid, &group->rec.mgid,
+ sizeof(group->rec.mgid));
+ group->rec = *rec;
+ if (mgids_changed) {
rb_erase(&group->node, &group->port->table);
- mcast_insert(group->port, group, 1);
+ is_mgid0 = !memcmp(&mgid0, &group->rec.mgid,
+ sizeof(mgid0));
+ mcast_insert(group->port, group, is_mgid0);
}
spin_unlock_irq(&group->port->lock);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index df0c4f605a21..aec7a6aa2951 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -39,6 +39,7 @@
#include <linux/hugetlb.h>
#include <linux/dma-attrs.h>
#include <linux/slab.h>
+#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
@@ -69,6 +70,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
/**
* ib_umem_get - Pin and DMA map userspace memory.
+ *
+ * If access flags indicate ODP memory, avoid pinning. Instead, stores
+ * the mm for future page fault handling in conjunction with MMU notifiers.
+ *
* @context: userspace context to pin memory for
* @addr: userspace virtual address to start at
* @size: length of region to pin
@@ -103,17 +108,30 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->context = context;
umem->length = size;
- umem->offset = addr & ~PAGE_MASK;
+ umem->address = addr;
umem->page_size = PAGE_SIZE;
umem->pid = get_task_pid(current, PIDTYPE_PID);
/*
- * We ask for writable memory if any access flags other than
- * "remote read" are set. "Local write" and "remote write"
+ * We ask for writable memory if any of the following
+ * access flags are set. "Local write" and "remote write"
* obviously require write access. "Remote atomic" can do
* things like fetch and add, which will modify memory, and
* "MW bind" can change permissions by binding a window.
*/
- umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
+ umem->writable = !!(access &
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+
+ if (access & IB_ACCESS_ON_DEMAND) {
+ ret = ib_umem_odp_get(context, umem);
+ if (ret) {
+ kfree(umem);
+ return ERR_PTR(ret);
+ }
+ return umem;
+ }
+
+ umem->odp_data = NULL;
/* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1;
@@ -132,7 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!vma_list)
umem->hugetlb = 0;
- npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
+ npages = ib_umem_num_pages(umem);
down_write(&current->mm->mmap_sem);
@@ -235,6 +253,11 @@ void ib_umem_release(struct ib_umem *umem)
struct task_struct *task;
unsigned long diff;
+ if (umem->odp_data) {
+ ib_umem_odp_release(umem);
+ return;
+ }
+
__ib_umem_release(umem->context->device, umem, 1);
task = get_pid_task(umem->pid, PIDTYPE_PID);
@@ -246,7 +269,7 @@ void ib_umem_release(struct ib_umem *umem)
if (!mm)
goto out;
- diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
+ diff = ib_umem_num_pages(umem);
/*
* We may be called with the mm's mmap_sem already held. This
@@ -283,6 +306,9 @@ int ib_umem_page_count(struct ib_umem *umem)
int n;
struct scatterlist *sg;
+ if (umem->odp_data)
+ return ib_umem_num_pages(umem);
+
shift = ilog2(umem->page_size);
n = 0;
@@ -292,3 +318,37 @@ int ib_umem_page_count(struct ib_umem *umem)
return n;
}
EXPORT_SYMBOL(ib_umem_page_count);
+
+/*
+ * Copy from the given ib_umem's pages to the given buffer.
+ *
+ * umem - the umem to copy from
+ * offset - offset to start copying from
+ * dst - destination buffer
+ * length - buffer length
+ *
+ * Returns 0 on success, or an error code.
+ */
+int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length)
+{
+ size_t end = offset + length;
+ int ret;
+
+ if (offset > umem->length || length > umem->length - offset) {
+ pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
+ offset, umem->length, end);
+ return -EINVAL;
+ }
+
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ offset + ib_umem_offset(umem));
+
+ if (ret < 0)
+ return ret;
+ else if (ret != length)
+ return -EINVAL;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(ib_umem_copy_from);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
new file mode 100644
index 000000000000..6095872549e7
--- /dev/null
+++ b/drivers/infiniband/core/umem_odp.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pid.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+
+static void ib_umem_notifier_start_account(struct ib_umem *item)
+{
+ mutex_lock(&item->odp_data->umem_mutex);
+
+ /* Only update private counters for this umem if it has them.
+ * Otherwise skip it. All page faults will be delayed for this umem. */
+ if (item->odp_data->mn_counters_active) {
+ int notifiers_count = item->odp_data->notifiers_count++;
+
+ if (notifiers_count == 0)
+ /* Initialize the completion object for waiting on
+ * notifiers. Since notifier_count is zero, no one
+ * should be waiting right now. */
+ reinit_completion(&item->odp_data->notifier_completion);
+ }
+ mutex_unlock(&item->odp_data->umem_mutex);
+}
+
+static void ib_umem_notifier_end_account(struct ib_umem *item)
+{
+ mutex_lock(&item->odp_data->umem_mutex);
+
+ /* Only update private counters for this umem if it has them.
+ * Otherwise skip it. All page faults will be delayed for this umem. */
+ if (item->odp_data->mn_counters_active) {
+ /*
+ * This sequence increase will notify the QP page fault that
+ * the page that is going to be mapped in the spte could have
+ * been freed.
+ */
+ ++item->odp_data->notifiers_seq;
+ if (--item->odp_data->notifiers_count == 0)
+ complete_all(&item->odp_data->notifier_completion);
+ }
+ mutex_unlock(&item->odp_data->umem_mutex);
+}
+
+/* Account for a new mmu notifier in an ib_ucontext. */
+static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
+{
+ atomic_inc(&context->notifier_count);
+}
+
+/* Account for a terminating mmu notifier in an ib_ucontext.
+ *
+ * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
+ * the function takes the semaphore itself. */
+static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
+{
+ int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
+
+ if (zero_notifiers &&
+ !list_empty(&context->no_private_counters)) {
+ /* No currently running mmu notifiers. Now is the chance to
+ * add private accounting to all previously added umems. */
+ struct ib_umem_odp *odp_data, *next;
+
+ /* Prevent concurrent mmu notifiers from working on the
+ * no_private_counters list. */
+ down_write(&context->umem_rwsem);
+
+ /* Read the notifier_count again, with the umem_rwsem
+ * semaphore taken for write. */
+ if (!atomic_read(&context->notifier_count)) {
+ list_for_each_entry_safe(odp_data, next,
+ &context->no_private_counters,
+ no_private_counters) {
+ mutex_lock(&odp_data->umem_mutex);
+ odp_data->mn_counters_active = true;
+ list_del(&odp_data->no_private_counters);
+ complete_all(&odp_data->notifier_completion);
+ mutex_unlock(&odp_data->umem_mutex);
+ }
+ }
+
+ up_write(&context->umem_rwsem);
+ }
+}
+
+static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie) {
+ /*
+ * Increase the number of notifiers running, to
+ * prevent any further fault handling on this MR.
+ */
+ ib_umem_notifier_start_account(item);
+ item->odp_data->dying = 1;
+ /* Make sure that the fact the umem is dying is out before we release
+ * all pending page faults. */
+ smp_wmb();
+ complete_all(&item->odp_data->notifier_completion);
+ item->context->invalidate_range(item, ib_umem_start(item),
+ ib_umem_end(item));
+ return 0;
+}
+
+static void ib_umem_notifier_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
+ ULLONG_MAX,
+ ib_umem_notifier_release_trampoline,
+ NULL);
+ up_read(&context->umem_rwsem);
+}
+
+static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_start_account(item);
+ item->context->invalidate_range(item, start, start + PAGE_SIZE);
+ ib_umem_notifier_end_account(item);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
+ address + PAGE_SIZE,
+ invalidate_page_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+ ib_ucontext_notifier_end_account(context);
+}
+
+static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_start_account(item);
+ item->context->invalidate_range(item, start, end);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ ib_ucontext_notifier_start_account(context);
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ end,
+ invalidate_range_start_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+}
+
+static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
+ u64 end, void *cookie)
+{
+ ib_umem_notifier_end_account(item);
+ return 0;
+}
+
+static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+
+ if (!context->invalidate_range)
+ return;
+
+ down_read(&context->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ end,
+ invalidate_range_end_trampoline, NULL);
+ up_read(&context->umem_rwsem);
+ ib_ucontext_notifier_end_account(context);
+}
+
+static struct mmu_notifier_ops ib_umem_notifiers = {
+ .release = ib_umem_notifier_release,
+ .invalidate_page = ib_umem_notifier_invalidate_page,
+ .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
+ .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
+};
+
+int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
+{
+ int ret_val;
+ struct pid *our_pid;
+ struct mm_struct *mm = get_task_mm(current);
+
+ if (!mm)
+ return -EINVAL;
+
+ /* Prevent creating ODP MRs in child processes */
+ rcu_read_lock();
+ our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
+ put_pid(our_pid);
+ if (context->tgid != our_pid) {
+ ret_val = -EINVAL;
+ goto out_mm;
+ }
+
+ umem->hugetlb = 0;
+ umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
+ if (!umem->odp_data) {
+ ret_val = -ENOMEM;
+ goto out_mm;
+ }
+ umem->odp_data->umem = umem;
+
+ mutex_init(&umem->odp_data->umem_mutex);
+
+ init_completion(&umem->odp_data->notifier_completion);
+
+ umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
+ sizeof(*umem->odp_data->page_list));
+ if (!umem->odp_data->page_list) {
+ ret_val = -ENOMEM;
+ goto out_odp_data;
+ }
+
+ umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
+ sizeof(*umem->odp_data->dma_list));
+ if (!umem->odp_data->dma_list) {
+ ret_val = -ENOMEM;
+ goto out_page_list;
+ }
+
+ /*
+ * When using MMU notifiers, we will get a
+ * notification before the "current" task (and MM) is
+ * destroyed. We use the umem_rwsem semaphore to synchronize.
+ */
+ down_write(&context->umem_rwsem);
+ context->odp_mrs_count++;
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_insert(&umem->odp_data->interval_tree,
+ &context->umem_tree);
+ if (likely(!atomic_read(&context->notifier_count)))
+ umem->odp_data->mn_counters_active = true;
+ else
+ list_add(&umem->odp_data->no_private_counters,
+ &context->no_private_counters);
+ downgrade_write(&context->umem_rwsem);
+
+ if (context->odp_mrs_count == 1) {
+ /*
+ * Note that at this point, no MMU notifier is running
+ * for this context!
+ */
+ atomic_set(&context->notifier_count, 0);
+ INIT_HLIST_NODE(&context->mn.hlist);
+ context->mn.ops = &ib_umem_notifiers;
+ /*
+ * Lock-dep detects a false positive for mmap_sem vs.
+ * umem_rwsem, due to not grasping downgrade_write correctly.
+ */
+ lockdep_off();
+ ret_val = mmu_notifier_register(&context->mn, mm);
+ lockdep_on();
+ if (ret_val) {
+ pr_err("Failed to register mmu_notifier %d\n", ret_val);
+ ret_val = -EBUSY;
+ goto out_mutex;
+ }
+ }
+
+ up_read(&context->umem_rwsem);
+
+ /*
+ * Note that doing an mmput can cause a notifier for the relevant mm.
+ * If the notifier is called while we hold the umem_rwsem, this will
+ * cause a deadlock. Therefore, we release the reference only after we
+ * released the semaphore.
+ */
+ mmput(mm);
+ return 0;
+
+out_mutex:
+ up_read(&context->umem_rwsem);
+ vfree(umem->odp_data->dma_list);
+out_page_list:
+ vfree(umem->odp_data->page_list);
+out_odp_data:
+ kfree(umem->odp_data);
+out_mm:
+ mmput(mm);
+ return ret_val;
+}
+
+void ib_umem_odp_release(struct ib_umem *umem)
+{
+ struct ib_ucontext *context = umem->context;
+
+ /*
+ * Ensure that no more pages are mapped in the umem.
+ *
+ * It is the driver's responsibility to ensure, before calling us,
+ * that the hardware will not attempt to access the MR any more.
+ */
+ ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
+ ib_umem_end(umem));
+
+ down_write(&context->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_remove(&umem->odp_data->interval_tree,
+ &context->umem_tree);
+ context->odp_mrs_count--;
+ if (!umem->odp_data->mn_counters_active) {
+ list_del(&umem->odp_data->no_private_counters);
+ complete_all(&umem->odp_data->notifier_completion);
+ }
+
+ /*
+ * Downgrade the lock to a read lock. This ensures that the notifiers
+ * (who lock the mutex for reading) will be able to finish, and we
+ * will be able to enventually obtain the mmu notifiers SRCU. Note
+ * that since we are doing it atomically, no other user could register
+ * and unregister while we do the check.
+ */
+ downgrade_write(&context->umem_rwsem);
+ if (!context->odp_mrs_count) {
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(context->tgid,
+ PIDTYPE_PID);
+ if (owning_process == NULL)
+ /*
+ * The process is already dead, notifier were removed
+ * already.
+ */
+ goto out;
+
+ owning_mm = get_task_mm(owning_process);
+ if (owning_mm == NULL)
+ /*
+ * The process' mm is already dead, notifier were
+ * removed already.
+ */
+ goto out_put_task;
+ mmu_notifier_unregister(&context->mn, owning_mm);
+
+ mmput(owning_mm);
+
+out_put_task:
+ put_task_struct(owning_process);
+ }
+out:
+ up_read(&context->umem_rwsem);
+
+ vfree(umem->odp_data->dma_list);
+ vfree(umem->odp_data->page_list);
+ kfree(umem->odp_data);
+ kfree(umem);
+}
+
+/*
+ * Map for DMA and insert a single page into the on-demand paging page tables.
+ *
+ * @umem: the umem to insert the page to.
+ * @page_index: index in the umem to add the page to.
+ * @page: the page struct to map and add.
+ * @access_mask: access permissions needed for this page.
+ * @current_seq: sequence number for synchronization with invalidations.
+ * the sequence number is taken from
+ * umem->odp_data->notifiers_seq.
+ *
+ * The function returns -EFAULT if the DMA mapping operation fails. It returns
+ * -EAGAIN if a concurrent invalidation prevents us from updating the page.
+ *
+ * The page is released via put_page even if the operation failed. For
+ * on-demand pinning, the page is released whenever it isn't stored in the
+ * umem.
+ */
+static int ib_umem_odp_map_dma_single_page(
+ struct ib_umem *umem,
+ int page_index,
+ u64 base_virt_addr,
+ struct page *page,
+ u64 access_mask,
+ unsigned long current_seq)
+{
+ struct ib_device *dev = umem->context->device;
+ dma_addr_t dma_addr;
+ int stored_page = 0;
+ int remove_existing_mapping = 0;
+ int ret = 0;
+
+ mutex_lock(&umem->odp_data->umem_mutex);
+ /*
+ * Note: we avoid writing if seq is different from the initial seq, to
+ * handle case of a racing notifier. This check also allows us to bail
+ * early if we have a notifier running in parallel with us.
+ */
+ if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (!(umem->odp_data->dma_list[page_index])) {
+ dma_addr = ib_dma_map_page(dev,
+ page,
+ 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(dev, dma_addr)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
+ umem->odp_data->page_list[page_index] = page;
+ stored_page = 1;
+ } else if (umem->odp_data->page_list[page_index] == page) {
+ umem->odp_data->dma_list[page_index] |= access_mask;
+ } else {
+ pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
+ umem->odp_data->page_list[page_index], page);
+ /* Better remove the mapping now, to prevent any further
+ * damage. */
+ remove_existing_mapping = 1;
+ }
+
+out:
+ mutex_unlock(&umem->odp_data->umem_mutex);
+
+ /* On Demand Paging - avoid pinning the page */
+ if (umem->context->invalidate_range || !stored_page)
+ put_page(page);
+
+ if (remove_existing_mapping && umem->context->invalidate_range) {
+ invalidate_page_trampoline(
+ umem,
+ base_virt_addr + (page_index * PAGE_SIZE),
+ base_virt_addr + ((page_index+1)*PAGE_SIZE),
+ NULL);
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+/**
+ * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
+ *
+ * Pins the range of pages passed in the argument, and maps them to
+ * DMA addresses. The DMA addresses of the mapped pages is updated in
+ * umem->odp_data->dma_list.
+ *
+ * Returns the number of pages mapped in success, negative error code
+ * for failure.
+ * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
+ * the function from completing its task.
+ *
+ * @umem: the umem to map and pin
+ * @user_virt: the address from which we need to map.
+ * @bcnt: the minimal number of bytes to pin and map. The mapping might be
+ * bigger due to alignment, and may also be smaller in case of an error
+ * pinning or mapping a page. The actual pages mapped is returned in
+ * the return value.
+ * @access_mask: bit mask of the requested access permissions for the given
+ * range.
+ * @current_seq: the MMU notifiers sequance value for synchronization with
+ * invalidations. the sequance number is read from
+ * umem->odp_data->notifiers_seq before calling this function
+ */
+int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
+ u64 access_mask, unsigned long current_seq)
+{
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+ struct page **local_page_list = NULL;
+ u64 off;
+ int j, k, ret = 0, start_idx, npages = 0;
+ u64 base_virt_addr;
+
+ if (access_mask == 0)
+ return -EINVAL;
+
+ if (user_virt < ib_umem_start(umem) ||
+ user_virt + bcnt > ib_umem_end(umem))
+ return -EFAULT;
+
+ local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
+ if (!local_page_list)
+ return -ENOMEM;
+
+ off = user_virt & (~PAGE_MASK);
+ user_virt = user_virt & PAGE_MASK;
+ base_virt_addr = user_virt;
+ bcnt += off; /* Charge for the first page offset as well. */
+
+ owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
+ if (owning_process == NULL) {
+ ret = -EINVAL;
+ goto out_no_task;
+ }
+
+ owning_mm = get_task_mm(owning_process);
+ if (owning_mm == NULL) {
+ ret = -EINVAL;
+ goto out_put_task;
+ }
+
+ start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
+ k = start_idx;
+
+ while (bcnt > 0) {
+ const size_t gup_num_pages =
+ min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
+ PAGE_SIZE / sizeof(struct page *));
+
+ down_read(&owning_mm->mmap_sem);
+ /*
+ * Note: this might result in redundent page getting. We can
+ * avoid this by checking dma_list to be 0 before calling
+ * get_user_pages. However, this make the code much more
+ * complex (and doesn't gain us much performance in most use
+ * cases).
+ */
+ npages = get_user_pages(owning_process, owning_mm, user_virt,
+ gup_num_pages,
+ access_mask & ODP_WRITE_ALLOWED_BIT, 0,
+ local_page_list, NULL);
+ up_read(&owning_mm->mmap_sem);
+
+ if (npages < 0)
+ break;
+
+ bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
+ user_virt += npages << PAGE_SHIFT;
+ for (j = 0; j < npages; ++j) {
+ ret = ib_umem_odp_map_dma_single_page(
+ umem, k, base_virt_addr, local_page_list[j],
+ access_mask, current_seq);
+ if (ret < 0)
+ break;
+ k++;
+ }
+
+ if (ret < 0) {
+ /* Release left over pages when handling errors. */
+ for (++j; j < npages; ++j)
+ put_page(local_page_list[j]);
+ break;
+ }
+ }
+
+ if (ret >= 0) {
+ if (npages < 0 && k == start_idx)
+ ret = npages;
+ else
+ ret = k - start_idx;
+ }
+
+ mmput(owning_mm);
+out_put_task:
+ put_task_struct(owning_process);
+out_no_task:
+ free_page((unsigned long)local_page_list);
+ return ret;
+}
+EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
+
+void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
+ u64 bound)
+{
+ int idx;
+ u64 addr;
+ struct ib_device *dev = umem->context->device;
+
+ virt = max_t(u64, virt, ib_umem_start(umem));
+ bound = min_t(u64, bound, ib_umem_end(umem));
+ /* Note that during the run of this function, the
+ * notifiers_count of the MR is > 0, preventing any racing
+ * faults from completion. We might be racing with other
+ * invalidations, so we must make sure we free each page only
+ * once. */
+ for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
+ idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
+ mutex_lock(&umem->odp_data->umem_mutex);
+ if (umem->odp_data->page_list[idx]) {
+ struct page *page = umem->odp_data->page_list[idx];
+ struct page *head_page = compound_head(page);
+ dma_addr_t dma = umem->odp_data->dma_list[idx];
+ dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
+
+ WARN_ON(!dma_addr);
+
+ ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma & ODP_WRITE_ALLOWED_BIT)
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
+ /* on demand pinning support */
+ if (!umem->context->invalidate_range)
+ put_page(page);
+ umem->odp_data->page_list[idx] = NULL;
+ umem->odp_data->dma_list[idx] = 0;
+ }
+ mutex_unlock(&umem->odp_data->umem_mutex);
+ }
+}
+EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/umem_rbtree.c b/drivers/infiniband/core/umem_rbtree.c
new file mode 100644
index 000000000000..727d788448f5
--- /dev/null
+++ b/drivers/infiniband/core/umem_rbtree.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <rdma/ib_umem_odp.h>
+
+/*
+ * The ib_umem list keeps track of memory regions for which the HW
+ * device request to receive notification when the related memory
+ * mapping is changed.
+ *
+ * ib_umem_lock protects the list.
+ */
+
+static inline u64 node_start(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_start(umem_odp->umem);
+}
+
+/* Note that the representation of the intervals in the interval tree
+ * considers the ending point as contained in the interval, while the
+ * function ib_umem_end returns the first address which is not contained
+ * in the umem.
+ */
+static inline u64 node_last(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_end(umem_odp->umem) - 1;
+}
+
+INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
+ node_start, node_last, , rbt_ib_umem)
+
+/* @last is not a part of the interval. See comment for function
+ * node_last.
+ */
+int rbt_ib_umem_for_each_in_range(struct rb_root *root,
+ u64 start, u64 last,
+ umem_call_back cb,
+ void *cookie)
+{
+ int ret_val = 0;
+ struct umem_odp_node *node;
+ struct ib_umem_odp *umem;
+
+ if (unlikely(start == last))
+ return ret_val;
+
+ for (node = rbt_ib_umem_iter_first(root, start, last - 1); node;
+ node = rbt_ib_umem_iter_next(node, start, last - 1)) {
+ umem = container_of(node, struct ib_umem_odp, interval_tree);
+ ret_val = cb(umem->umem, start, last, cookie) || ret_val;
+ }
+
+ return ret_val;
+}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 643c08a025a5..b716b0815644 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
IB_UVERBS_DECLARE_EX_CMD(create_flow);
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
+IB_UVERBS_DECLARE_EX_CMD(query_device);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5ba2a86aab6a..532d8eba8b02 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -36,6 +36,7 @@
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
@@ -288,6 +289,9 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
struct ib_uverbs_get_context_resp resp;
struct ib_udata udata;
struct ib_device *ibdev = file->device->ib_dev;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct ib_device_attr dev_attr;
+#endif
struct ib_ucontext *ucontext;
struct file *filp;
int ret;
@@ -325,8 +329,25 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->ah_list);
INIT_LIST_HEAD(&ucontext->xrcd_list);
INIT_LIST_HEAD(&ucontext->rule_list);
+ rcu_read_lock();
+ ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
ucontext->closing = 0;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ ucontext->umem_tree = RB_ROOT;
+ init_rwsem(&ucontext->umem_rwsem);
+ ucontext->odp_mrs_count = 0;
+ INIT_LIST_HEAD(&ucontext->no_private_counters);
+
+ ret = ib_query_device(ibdev, &dev_attr);
+ if (ret)
+ goto err_free;
+ if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
+ ucontext->invalidate_range = NULL;
+
+#endif
+
resp.num_comp_vectors = file->device->num_comp_vectors;
ret = get_unused_fd_flags(O_CLOEXEC);
@@ -371,6 +392,7 @@ err_fd:
put_unused_fd(resp.async_fd);
err_free:
+ put_pid(ucontext->tgid);
ibdev->dealloc_ucontext(ucontext);
err:
@@ -378,6 +400,52 @@ err:
return ret;
}
+static void copy_query_dev_fields(struct ib_uverbs_file *file,
+ struct ib_uverbs_query_device_resp *resp,
+ struct ib_device_attr *attr)
+{
+ resp->fw_ver = attr->fw_ver;
+ resp->node_guid = file->device->ib_dev->node_guid;
+ resp->sys_image_guid = attr->sys_image_guid;
+ resp->max_mr_size = attr->max_mr_size;
+ resp->page_size_cap = attr->page_size_cap;
+ resp->vendor_id = attr->vendor_id;
+ resp->vendor_part_id = attr->vendor_part_id;
+ resp->hw_ver = attr->hw_ver;
+ resp->max_qp = attr->max_qp;
+ resp->max_qp_wr = attr->max_qp_wr;
+ resp->device_cap_flags = attr->device_cap_flags;
+ resp->max_sge = attr->max_sge;
+ resp->max_sge_rd = attr->max_sge_rd;
+ resp->max_cq = attr->max_cq;
+ resp->max_cqe = attr->max_cqe;
+ resp->max_mr = attr->max_mr;
+ resp->max_pd = attr->max_pd;
+ resp->max_qp_rd_atom = attr->max_qp_rd_atom;
+ resp->max_ee_rd_atom = attr->max_ee_rd_atom;
+ resp->max_res_rd_atom = attr->max_res_rd_atom;
+ resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
+ resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
+ resp->atomic_cap = attr->atomic_cap;
+ resp->max_ee = attr->max_ee;
+ resp->max_rdd = attr->max_rdd;
+ resp->max_mw = attr->max_mw;
+ resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
+ resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
+ resp->max_mcast_grp = attr->max_mcast_grp;
+ resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
+ resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
+ resp->max_ah = attr->max_ah;
+ resp->max_fmr = attr->max_fmr;
+ resp->max_map_per_fmr = attr->max_map_per_fmr;
+ resp->max_srq = attr->max_srq;
+ resp->max_srq_wr = attr->max_srq_wr;
+ resp->max_srq_sge = attr->max_srq_sge;
+ resp->max_pkeys = attr->max_pkeys;
+ resp->local_ca_ack_delay = attr->local_ca_ack_delay;
+ resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
+}
+
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
const char __user *buf,
int in_len, int out_len)
@@ -398,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return ret;
memset(&resp, 0, sizeof resp);
-
- resp.fw_ver = attr.fw_ver;
- resp.node_guid = file->device->ib_dev->node_guid;
- resp.sys_image_guid = attr.sys_image_guid;
- resp.max_mr_size = attr.max_mr_size;
- resp.page_size_cap = attr.page_size_cap;
- resp.vendor_id = attr.vendor_id;
- resp.vendor_part_id = attr.vendor_part_id;
- resp.hw_ver = attr.hw_ver;
- resp.max_qp = attr.max_qp;
- resp.max_qp_wr = attr.max_qp_wr;
- resp.device_cap_flags = attr.device_cap_flags;
- resp.max_sge = attr.max_sge;
- resp.max_sge_rd = attr.max_sge_rd;
- resp.max_cq = attr.max_cq;
- resp.max_cqe = attr.max_cqe;
- resp.max_mr = attr.max_mr;
- resp.max_pd = attr.max_pd;
- resp.max_qp_rd_atom = attr.max_qp_rd_atom;
- resp.max_ee_rd_atom = attr.max_ee_rd_atom;
- resp.max_res_rd_atom = attr.max_res_rd_atom;
- resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
- resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
- resp.atomic_cap = attr.atomic_cap;
- resp.max_ee = attr.max_ee;
- resp.max_rdd = attr.max_rdd;
- resp.max_mw = attr.max_mw;
- resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
- resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
- resp.max_mcast_grp = attr.max_mcast_grp;
- resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
- resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
- resp.max_ah = attr.max_ah;
- resp.max_fmr = attr.max_fmr;
- resp.max_map_per_fmr = attr.max_map_per_fmr;
- resp.max_srq = attr.max_srq;
- resp.max_srq_wr = attr.max_srq_wr;
- resp.max_srq_sge = attr.max_srq_sge;
- resp.max_pkeys = attr.max_pkeys;
- resp.local_ca_ack_delay = attr.local_ca_ack_delay;
- resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
+ copy_query_dev_fields(file, &resp, &attr);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
@@ -947,6 +975,18 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
goto err_free;
}
+ if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
+ struct ib_device_attr attr;
+
+ ret = ib_query_device(pd->device, &attr);
+ if (ret || !(attr.device_cap_flags &
+ IB_DEVICE_ON_DEMAND_PAGING)) {
+ pr_debug("ODP support not available\n");
+ ret = -EINVAL;
+ goto err_put;
+ }
+ }
+
mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
cmd.access_flags, &udata);
if (IS_ERR(mr)) {
@@ -3253,3 +3293,52 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
return ret ? ret : in_len;
}
+
+int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_query_device_resp resp;
+ struct ib_uverbs_ex_query_device cmd;
+ struct ib_device_attr attr;
+ struct ib_device *device;
+ int err;
+
+ device = file->device->ib_dev;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+ if (err)
+ return err;
+
+ if (cmd.reserved)
+ return -EINVAL;
+
+ err = device->query_device(device, &attr);
+ if (err)
+ return err;
+
+ memset(&resp, 0, sizeof(resp));
+ copy_query_dev_fields(file, &resp.base, &attr);
+ resp.comp_mask = 0;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
+ resp.odp_caps.general_caps = attr.odp_caps.general_caps;
+ resp.odp_caps.per_transport_caps.rc_odp_caps =
+ attr.odp_caps.per_transport_caps.rc_odp_caps;
+ resp.odp_caps.per_transport_caps.uc_odp_caps =
+ attr.odp_caps.per_transport_caps.uc_odp_caps;
+ resp.odp_caps.per_transport_caps.ud_odp_caps =
+ attr.odp_caps.per_transport_caps.ud_odp_caps;
+ resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
+ }
+#endif
+
+ err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 71ab83fde472..e6c23b9eab33 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -122,7 +122,8 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
struct ib_udata *ucore,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
- [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow
+ [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
+ [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -296,6 +297,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj);
}
+ put_pid(context->tgid);
+
return context->device->dealloc_ucontext(context);
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c2b89cc5dbca..f93eb8da7b5a 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -879,7 +879,8 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
- qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
+ if (!(*qp_attr_mask & IB_QP_VID))
+ qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
} else {
ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 2d5cbf4363e4..bdf3507810cb 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -476,7 +476,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
c2mr->umem->page_size,
i,
length,
- c2mr->umem->offset,
+ ib_umem_offset(c2mr->umem),
&kva,
c2_convert_access(acc),
c2mr);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index fb61f6685809..9edc200b311d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -472,13 +472,13 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
skb = get_skb(skb, flowclen, GFP_KERNEL);
flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
- flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
- FW_FLOWC_WR_NPARAMS(8));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
- 16)) | FW_WR_FLOWID(ep->hwtid));
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(8));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
+ 16)) | FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
- flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
(ep->com.dev->rdev.lldi.pf));
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
@@ -649,31 +649,31 @@ static int send_connect(struct c4iw_ep *ep)
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win);
- opt2 = RX_CHANNEL(0) |
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win);
+ opt2 = RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps)
opt2 |= TSTAMPS_EN(1);
if (enable_tcp_sack)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- opt2 |= WND_SCALE_EN(1);
+ opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
- opt2 |= T5_OPT_2_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
}
@@ -736,7 +736,7 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->local_ip = la->sin_addr.s_addr;
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
- t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
+ t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
@@ -762,7 +762,7 @@ static int send_connect(struct c4iw_ep *ep)
t5_req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
- t5_req6->params = cpu_to_be64(V_FILTER_TUPLE(
+ t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
@@ -803,16 +803,16 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
@@ -897,16 +897,16 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
@@ -977,16 +977,16 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
@@ -1249,15 +1249,15 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
* due to the limit in the number of bits in the RCV_BUFSIZ field,
* then add the overage in to the credits returned.
*/
- if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
- credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
+ if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
+ credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
- req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
+ req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
F_RX_DACK_CHANGE |
V_RX_DACK_MODE(dack_mode));
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
@@ -1640,7 +1640,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
__state_set(&ep->com, MPA_REQ_RCVD);
/* drive upcall */
- mutex_lock(&ep->parent_ep->com.mutex);
+ mutex_lock_nested(&ep->parent_ep->com.mutex,
+ SINGLE_DEPTH_NESTING);
if (ep->parent_ep->com.state != DEAD) {
if (connect_request_upcall(ep))
abort_connection(ep, skb, GFP_KERNEL);
@@ -1751,7 +1752,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
- req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
@@ -1762,10 +1763,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->le.pport = sin->sin_port;
req->le.u.ipv4.pip = sin->sin_addr.s_addr;
req->tcb.t_state_to_astid =
- htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
- V_FW_OFLD_CONNECTION_WR_ASTID(atid));
+ htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
+ FW_OFLD_CONNECTION_WR_ASTID_V(atid));
req->tcb.cplrxdataack_cplpassacceptrpl =
- htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
+ htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
@@ -1778,34 +1779,34 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
(nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win));
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win));
req->tcb.opt2 = (__force __be32) (PACE(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
- RX_CHANNEL(0) |
+ RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
if (enable_tcp_timestamps)
- req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
+ req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
if (enable_tcp_sack)
- req->tcb.opt2 |= (__force __be32) SACK_EN(1);
+ req->tcb.opt2 |= (__force __be32)SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
- req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
- req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
+ req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
+ req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
+ req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
set_bit(ACT_OFLD_CONN, &ep->com.history);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -2178,28 +2179,28 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos >> 2) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win);
- opt2 = RX_CHANNEL(0) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win);
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps && req->tcpopt.tstamp)
opt2 |= TSTAMPS_EN(1);
if (enable_tcp_sack && req->tcpopt.sack)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- opt2 |= WND_SCALE_EN(1);
+ opt2 |= WND_SCALE_EN_F;
if (enable_ecn) {
const struct tcphdr *tcph;
u32 hlen = ntohl(req->hdr_len);
@@ -2211,7 +2212,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1;
- opt2 |= T5_OPT_2_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
rpl5 = (void *)rpl;
@@ -3126,6 +3127,8 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
&ep->com.wr_wait,
0, 0, __func__);
+ else if (err > 0)
+ err = net_xmit_errno(err);
if (err)
pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
err, ep->stid,
@@ -3159,6 +3162,8 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
&ep->com.wr_wait,
0, 0, __func__);
+ else if (err > 0)
+ err = net_xmit_errno(err);
}
if (err)
pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
@@ -3537,9 +3542,9 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
- req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
- req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+ req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
req->le.filter = (__force __be32) filter;
req->le.lport = lport;
req->le.pport = rport;
@@ -3548,16 +3553,16 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req->tcb.rcv_nxt = htonl(rcv_isn + 1);
req->tcb.rcv_adv = htons(window);
req->tcb.t_state_to_astid =
- htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
- V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
- V_FW_OFLD_CONNECTION_WR_ASTID(
+ htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
+ FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
+ FW_OFLD_CONNECTION_WR_ASTID_V(
GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
/*
* We store the qid in opt2 which will be used by the firmware
* to send us the wr response.
*/
- req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
+ req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
/*
* We initialize the MSS index in TCB to 0xF.
@@ -3565,7 +3570,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
* TCB picks up the correct value. If this was 0
* TP will ignore any value > 0 for MSS index.
*/
- req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
+ req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
req->cookie = (unsigned long)skb;
set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 0f773e78e080..e9fd3a029296 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -51,9 +51,9 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
- FW_WR_OP(FW_RI_RES_WR) |
+ FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(1) |
- FW_WR_COMPL(1));
+ FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
@@ -121,9 +121,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
- FW_WR_OP(FW_RI_RES_WR) |
+ FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(1) |
- FW_WR_COMPL(1));
+ FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 72f1f052e88c..eb5df4e62703 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -670,7 +670,7 @@ static int ep_open(struct inode *inode, struct file *file)
idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
spin_unlock_irq(&epd->devp->lock);
- epd->bufsize = count * 160;
+ epd->bufsize = count * 240;
epd->buf = vmalloc(epd->bufsize);
if (!epd->buf) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index ec7a2988a703..cb43c2299ac0 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -50,6 +50,13 @@ static int inline_threshold = C4IW_INLINE_THRESHOLD;
module_param(inline_threshold, int, 0644);
MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
+static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
+{
+ return (is_t4(dev->rdev.lldi.adapter_type) ||
+ is_t5(dev->rdev.lldi.adapter_type)) &&
+ length >= 8*1024*1024*1024ULL;
+}
+
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
u32 len, dma_addr_t data, int wait)
{
@@ -74,18 +81,18 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
memset(req, 0, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
- req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
- (wait ? FW_WR_COMPL(1) : 0));
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
+ (wait ? FW_WR_COMPL_F : 0));
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
- req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
+ req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
- req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
- req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1);
- sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1));
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
@@ -107,12 +114,12 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
- __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
if (is_t4(rdev->lldi.adapter_type))
- cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
+ cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
else
- cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
+ cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF;
PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -135,23 +142,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
INIT_ULPTX_WR(req, wr_len, 0, 0);
if (i == (num_wqe-1)) {
- req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
- FW_WR_COMPL(1));
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
+ FW_WR_COMPL_F);
req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
} else
- req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
- FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+ FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cmd;
- req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
16));
- req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
sc = (struct ulptx_idata *)(req + 1);
- sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
to_dp = (u8 *)(sc + 1);
@@ -369,9 +376,11 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
int ret;
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
- FW_RI_STAG_NSMR, mhp->attr.perms,
+ FW_RI_STAG_NSMR, mhp->attr.len ?
+ mhp->attr.perms : 0,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
- mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.va_fbo, mhp->attr.len ?
+ mhp->attr.len : -1, shift - 12,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret)
return ret;
@@ -536,6 +545,11 @@ int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
return ret;
}
+ if (mr_exceeds_hw_limits(rhp, total_size)) {
+ kfree(page_list);
+ return -EINVAL;
+ }
+
ret = reregister_mem(rhp, php, &mh, shift, npages);
kfree(page_list);
if (ret)
@@ -596,6 +610,12 @@ struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
if (ret)
goto err;
+ if (mr_exceeds_hw_limits(rhp, total_size)) {
+ kfree(page_list);
+ ret = -EINVAL;
+ goto err;
+ }
+
ret = alloc_pbl(mhp, npages);
if (ret) {
kfree(page_list);
@@ -699,6 +719,10 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
php = to_c4iw_pd(pd);
rhp = php->rhp;
+
+ if (mr_exceeds_hw_limits(rhp, length))
+ return ERR_PTR(-EINVAL);
+
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 72e3b69d1b76..66bd6a2ad83b 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -408,10 +408,10 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
PDBG("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%u.%u.%u.%u\n",
- FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
+ FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
}
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 41cd6882b648..bb85d479e66e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -271,9 +271,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
- FW_WR_OP(FW_RI_RES_WR) |
+ FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(2) |
- FW_WR_COMPL(1));
+ FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
@@ -1082,10 +1082,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
- wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
+ wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
wqe->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(qhp->ep->hwtid) |
- FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ FW_WR_FLOWID_V(qhp->ep->hwtid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
@@ -1204,11 +1204,11 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
wqe->op_compl = cpu_to_be32(
- FW_WR_OP(FW_RI_INIT_WR) |
- FW_WR_COMPL(1));
+ FW_WR_OP_V(FW_RI_INIT_WR) |
+ FW_WR_COMPL_F);
wqe->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
wqe->cookie = (unsigned long) &ep->com.wr_wait;
wqe->u.fini.type = FW_RI_TYPE_FINI;
@@ -1273,11 +1273,11 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
wqe->op_compl = cpu_to_be32(
- FW_WR_OP(FW_RI_INIT_WR) |
- FW_WR_COMPL(1));
+ FW_WR_OP_V(FW_RI_INIT_WR) |
+ FW_WR_COMPL_F);
wqe->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(qhp->ep->hwtid) |
- FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ FW_WR_FLOWID_V(qhp->ep->hwtid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
@@ -1538,9 +1538,9 @@ err:
set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
abort = 1;
- wake_up(&qhp->wait);
BUG_ON(!ep);
flush_qp(qhp);
+ wake_up(&qhp->wait);
out:
mutex_unlock(&qhp->mutex);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 3488e8c9fcb4..f914b30999f8 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -399,7 +399,7 @@ reg_user_mr_fallback:
pginfo.num_kpages = num_kpages;
pginfo.num_hwpages = num_hwpages;
pginfo.u.usr.region = e_mr->umem;
- pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
+ pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 5e61e9bff697..c7278f6a8217 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -214,7 +214,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
- mr->mr.offset = umem->offset;
+ mr->mr.offset = ib_umem_offset(umem);
mr->mr.access_flags = mr_access_flags;
mr->mr.max_segs = n;
mr->umem = umem;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 1066eec854a9..a3b70f6c4035 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -233,7 +233,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
if (err)
goto err_dbmap;
- cq->mcq.comp = mlx4_ib_cq_comp;
+ if (context)
+ cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
+ else
+ cq->mcq.comp = mlx4_ib_cq_comp;
cq->mcq.event = mlx4_ib_cq_event;
if (context)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8b72cf392b34..57ecc5b204f3 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1975,8 +1975,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
dev->caps.num_ports > dev->caps.comp_pool)
return;
- eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
- dev->caps.num_ports);
+ eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
/* Init eq table */
added_eqs = 0;
@@ -2228,7 +2227,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
- &ibdev->steer_qpn_base);
+ &ibdev->steer_qpn_base, 0);
if (err)
goto err_counter;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 8f9325cfc85d..c36ccbd9a644 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -223,7 +223,6 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
if (flags & IB_MR_REREG_TRANS) {
int shift;
- int err;
int n;
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9c5150c3cb31..cf000b7ad64f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -802,16 +802,21 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
}
} else {
- /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
- * BlueFlame setup flow wrongly causes VLAN insertion. */
+ /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
+ * otherwise, the WQE BlueFlame setup flow wrongly causes
+ * VLAN insertion. */
if (init_attr->qp_type == IB_QPT_RAW_PACKET)
- err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
+ err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
+ (init_attr->cap.max_send_wr ?
+ MLX4_RESERVE_ETH_BF_QP : 0) |
+ (init_attr->cap.max_recv_wr ?
+ MLX4_RESERVE_A0_QP : 0));
else
if (qp->flags & MLX4_IB_QP_NETIF)
err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
else
err = mlx4_qp_reserve_range(dev->dev, 1, 1,
- &qpn);
+ &qpn, 0);
if (err)
goto err_proxy;
}
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 4ea0135af484..27a70159e2ea 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
+mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 10cfce5119a9..c463e7bba5f4 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -805,14 +805,14 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
}
- mlx5_vfree(cqb);
+ kvfree(cqb);
return &cq->ibcq;
err_cmd:
mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
err_cqb:
- mlx5_vfree(cqb);
+ kvfree(cqb);
if (context)
destroy_cq_user(cq, context);
else
@@ -1159,11 +1159,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
}
mutex_unlock(&cq->resize_mutex);
- mlx5_vfree(in);
+ kvfree(in);
return 0;
ex_alloc:
- mlx5_vfree(in);
+ kvfree(in);
ex_resize:
if (udata)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 1ba6c42e4df8..8a87404e9c76 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -244,6 +244,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
+ props->odp_caps = dev->odp_caps;
+#endif
+
out:
kfree(in_mad);
kfree(out_mad);
@@ -568,6 +574,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_count;
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
+#endif
+
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -858,7 +868,7 @@ static ssize_t show_reg_pages(struct device *device,
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages);
+ return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -1321,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
(1ull << IB_USER_VERBS_CMD_OPEN_QP);
+ dev->ib_dev.uverbs_ex_cmd_mask =
+ (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
@@ -1366,6 +1378,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
+ mlx5_ib_internal_query_odp_caps(dev);
+
if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -1379,16 +1393,19 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
goto err_eqs;
mutex_init(&dev->cap_mask_mutex);
- spin_lock_init(&dev->mr_lock);
err = create_dev_resources(&dev->devr);
if (err)
goto err_eqs;
- err = ib_register_device(&dev->ib_dev, NULL);
+ err = mlx5_ib_odp_init_one(dev);
if (err)
goto err_rsrc;
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
+ goto err_odp;
+
err = create_umr_res(dev);
if (err)
goto err_dev;
@@ -1410,6 +1427,9 @@ err_umrc:
err_dev:
ib_unregister_device(&dev->ib_dev);
+err_odp:
+ mlx5_ib_odp_remove_one(dev);
+
err_rsrc:
destroy_dev_resources(&dev->devr);
@@ -1425,8 +1445,10 @@ err_dealloc:
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
struct mlx5_ib_dev *dev = context;
+
ib_unregister_device(&dev->ib_dev);
destroy_umrc_res(dev);
+ mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
free_comp_eqs(dev);
ib_dealloc_device(&dev->ib_dev);
@@ -1440,15 +1462,30 @@ static struct mlx5_interface mlx5_ib_interface = {
static int __init mlx5_ib_init(void)
{
+ int err;
+
if (deprecated_prof_sel != 2)
pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
- return mlx5_register_interface(&mlx5_ib_interface);
+ err = mlx5_ib_odp_init();
+ if (err)
+ return err;
+
+ err = mlx5_register_interface(&mlx5_ib_interface);
+ if (err)
+ goto clean_odp;
+
+ return err;
+
+clean_odp:
+ mlx5_ib_odp_cleanup();
+ return err;
}
static void __exit mlx5_ib_cleanup(void)
{
mlx5_unregister_interface(&mlx5_ib_interface);
+ mlx5_ib_odp_cleanup();
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index dae07eae9507..b56e4c5593ee 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
/* @umem: umem object to scan
@@ -57,6 +58,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int entry;
unsigned long page_shift = ilog2(umem->page_size);
+ /* With ODP we must always match OS page size. */
+ if (umem->odp_data) {
+ *count = ib_umem_page_count(umem);
+ *shift = PAGE_SHIFT;
+ *ncont = *count;
+ if (order)
+ *order = ilog2(roundup_pow_of_two(*count));
+
+ return;
+ }
+
addr = addr >> page_shift;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, sizeof(tmp));
@@ -108,8 +120,36 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
*count = i;
}
-void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int umr)
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
+{
+ u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
+
+ if (umem_dma & ODP_READ_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_READ;
+ if (umem_dma & ODP_WRITE_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_WRITE;
+
+ return mtt_entry;
+}
+#endif
+
+/*
+ * Populate the given array with bus addresses from the umem.
+ *
+ * dev - mlx5_ib device
+ * umem - umem to use to fill the pages
+ * page_shift - determines the page size used in the resulting array
+ * offset - offset into the umem to start from,
+ * only implemented for ODP umems
+ * num_pages - total number of pages to fill
+ * pas - bus addresses array to fill
+ * access_flags - access flags to set on all present pages.
+ use enum mlx5_ib_mtt_access_flags for this.
+ */
+void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, size_t offset, size_t num_pages,
+ __be64 *pas, int access_flags)
{
unsigned long umem_page_shift = ilog2(umem->page_size);
int shift = page_shift - umem_page_shift;
@@ -120,6 +160,21 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int len;
struct scatterlist *sg;
int entry;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ const bool odp = umem->odp_data != NULL;
+
+ if (odp) {
+ WARN_ON(shift != 0);
+ WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
+
+ for (i = 0; i < num_pages; ++i) {
+ dma_addr_t pa = umem->odp_data->dma_list[offset + i];
+
+ pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ }
+ return;
+ }
+#endif
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
@@ -128,8 +183,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
cur = base + (k << umem_page_shift);
- if (umr)
- cur |= 3;
+ cur |= access_flags;
pas[i >> shift] = cpu_to_be64(cur);
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
@@ -142,6 +196,13 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
}
}
+void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, __be64 *pas, int access_flags)
+{
+ return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
+ ib_umem_num_pages(umem), pas,
+ access_flags);
+}
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
{
u64 page_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 386780f0d1e1..83f22fe297c8 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -111,6 +111,8 @@ struct mlx5_ib_pd {
*/
#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
+#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
+#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
@@ -147,6 +149,29 @@ enum {
MLX5_QP_EMPTY
};
+/*
+ * Connect-IB can trigger up to four concurrent pagefaults
+ * per-QP.
+ */
+enum mlx5_ib_pagefault_context {
+ MLX5_IB_PAGEFAULT_RESPONDER_READ,
+ MLX5_IB_PAGEFAULT_REQUESTOR_READ,
+ MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
+ MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
+ MLX5_IB_PAGEFAULT_CONTEXTS
+};
+
+static inline enum mlx5_ib_pagefault_context
+ mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
+{
+ return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
+}
+
+struct mlx5_ib_pfault {
+ struct work_struct work;
+ struct mlx5_pagefault mpfault;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
struct mlx5_core_qp mqp;
@@ -192,6 +217,21 @@ struct mlx5_ib_qp {
/* Store signature errors */
bool signature_en;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /*
+ * A flag that is true for QP's that are in a state that doesn't
+ * allow page faults, and shouldn't schedule any more faults.
+ */
+ int disable_page_faults;
+ /*
+ * The disable_page_faults_lock protects a QP's disable_page_faults
+ * field, allowing for a thread to atomically check whether the QP
+ * allows page faults, and if so schedule a page fault.
+ */
+ spinlock_t disable_page_faults_lock;
+ struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
+#endif
};
struct mlx5_ib_cq_buf {
@@ -206,6 +246,19 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
};
+struct mlx5_umr_wr {
+ union {
+ u64 virt_addr;
+ u64 offset;
+ } target;
+ struct ib_pd *pd;
+ unsigned int page_shift;
+ unsigned int npages;
+ u32 length;
+ int access_flags;
+ u32 mkey;
+};
+
struct mlx5_shared_mr_info {
int mr_id;
struct ib_umem *umem;
@@ -253,6 +306,13 @@ struct mlx5_ib_xrcd {
u32 xrcdn;
};
+enum mlx5_ib_mtt_access_flags {
+ MLX5_IB_MTT_READ = (1 << 0),
+ MLX5_IB_MTT_WRITE = (1 << 1),
+};
+
+#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_core_mr mmr;
@@ -261,12 +321,11 @@ struct mlx5_ib_mr {
struct list_head list;
int order;
int umred;
- __be64 *pas;
- dma_addr_t dma;
int npages;
struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out;
struct mlx5_core_sig_ctx *sig;
+ int live;
};
struct mlx5_ib_fast_reg_page_list {
@@ -372,11 +431,18 @@ struct mlx5_ib_dev {
struct umr_common umrc;
/* sync used page count stats
*/
- spinlock_t mr_lock;
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
int fill_delay;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct ib_odp_caps odp_caps;
+ /*
+ * Sleepable RCU that prevents destruction of MRs while they are still
+ * being used by a page fault handler.
+ */
+ struct srcu_struct mr_srcu;
+#endif
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -490,6 +556,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
+int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
+ void *buffer, u32 length);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
int vector, struct ib_ucontext *context,
struct ib_udata *udata);
@@ -502,6 +570,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
+ int npages, int zap);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
@@ -533,8 +603,11 @@ int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order);
+void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, size_t offset, size_t num_pages,
+ __be64 *pas, int access_flags);
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- int page_shift, __be64 *pas, int umr);
+ int page_shift, __be64 *pas, int access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
@@ -544,6 +617,38 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+extern struct workqueue_struct *mlx5_ib_page_fault_wq;
+
+int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev);
+void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault);
+void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
+void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
+int __init mlx5_ib_odp_init(void);
+void mlx5_ib_odp_cleanup(void);
+void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
+void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
+void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+ unsigned long end);
+
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
+static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
+static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
+static inline int mlx5_ib_odp_init(void) { return 0; }
+static inline void mlx5_ib_odp_cleanup(void) {}
+static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
+static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
+
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -561,4 +666,7 @@ static inline u8 convert_access(int acc)
MLX5_PERM_LOCAL_READ;
}
+#define MLX5_MAX_UMR_SHIFT 16
+#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8ee7cb46e059..32a28bd50b20 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -37,21 +37,34 @@
#include <linux/export.h>
#include <linux/delay.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/ib_verbs.h>
#include "mlx5_ib.h"
enum {
MAX_PENDING_REG_MR = 8,
};
-enum {
- MLX5_UMR_ALIGN = 2048
-};
+#define MLX5_UMR_ALIGN 2048
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static __be64 mlx5_ib_update_mtt_emergency_buffer[
+ MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
+ __aligned(MLX5_UMR_ALIGN);
+static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
+#endif
+
+static int clean_mr(struct mlx5_ib_mr *mr);
-static __be64 *mr_align(__be64 *ptr, int align)
+static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- unsigned long mask = align - 1;
+ int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
- return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /* Wait until all page fault handlers using the mr complete. */
+ synchronize_srcu(&dev->mr_srcu);
+#endif
+
+ return err;
}
static int order2idx(struct mlx5_ib_dev *dev, int order)
@@ -146,7 +159,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
mr->order = ent->order;
mr->umred = 1;
mr->dev = dev;
- in->seg.status = 1 << 6;
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
@@ -159,6 +172,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
sizeof(*in), reg_mr_callback,
mr, &mr->out);
if (err) {
+ spin_lock_irq(&ent->lock);
+ ent->pending--;
+ spin_unlock_irq(&ent->lock);
mlx5_ib_warn(dev, "create mkey failed %d\n", err);
kfree(mr);
break;
@@ -188,7 +204,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
@@ -479,7 +495,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
@@ -665,7 +681,7 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
static int use_umr(int order)
{
- return order <= 17;
+ return order <= MLX5_MAX_UMR_SHIFT;
}
static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
@@ -675,6 +691,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_mr *mr = dev->umrc.mr;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
sg->addr = dma;
sg->length = ALIGN(sizeof(u64) * n, 64);
@@ -689,21 +706,24 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
wr->num_sge = 0;
wr->opcode = MLX5_IB_WR_UMR;
- wr->wr.fast_reg.page_list_len = n;
- wr->wr.fast_reg.page_shift = page_shift;
- wr->wr.fast_reg.rkey = key;
- wr->wr.fast_reg.iova_start = virt_addr;
- wr->wr.fast_reg.length = len;
- wr->wr.fast_reg.access_flags = access_flags;
- wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
+
+ umrwr->npages = n;
+ umrwr->page_shift = page_shift;
+ umrwr->mkey = key;
+ umrwr->target.virt_addr = virt_addr;
+ umrwr->length = len;
+ umrwr->access_flags = access_flags;
+ umrwr->pd = pd;
}
static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
struct ib_send_wr *wr, u32 key)
{
- wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+
+ wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
wr->opcode = MLX5_IB_WR_UMR;
- wr->wr.fast_reg.rkey = key;
+ umrwr->mkey = key;
}
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
@@ -739,7 +759,10 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
- int size = sizeof(u64) * npages;
+ int size;
+ __be64 *mr_pas;
+ __be64 *pas;
+ dma_addr_t dma;
int err = 0;
int i;
@@ -758,25 +781,31 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr)
return ERR_PTR(-EAGAIN);
- mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
- if (!mr->pas) {
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the pas array, we allocate
+ * a little more. */
+ size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
+ mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+ if (!mr_pas) {
err = -ENOMEM;
goto free_mr;
}
- mlx5_ib_populate_pas(dev, umem, page_shift,
- mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
+ pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
+ mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the actual pages. */
+ memset(pas + npages, 0, size - npages * sizeof(u64));
- mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, mr->dma)) {
+ dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
err = -ENOMEM;
goto free_pas;
}
memset(&wr, 0, sizeof(wr));
wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
+ prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
+ virt_addr, len, access_flags);
mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
@@ -796,12 +825,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mr->mmr.size = len;
mr->mmr.pd = to_mpd(pd)->pdn;
+ mr->live = 1;
+
unmap_dma:
up(&umrc->sem);
- dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
+ dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
free_pas:
- kfree(mr->pas);
+ kfree(mr_pas);
free_mr:
if (err) {
@@ -812,6 +843,128 @@ free_mr:
return mr;
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
+ int zap)
+{
+ struct mlx5_ib_dev *dev = mr->dev;
+ struct device *ddev = dev->ib_dev.dma_device;
+ struct umr_common *umrc = &dev->umrc;
+ struct mlx5_ib_umr_context umr_context;
+ struct ib_umem *umem = mr->umem;
+ int size;
+ __be64 *pas;
+ dma_addr_t dma;
+ struct ib_send_wr wr, *bad;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
+ struct ib_sge sg;
+ int err = 0;
+ const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
+ const int page_index_mask = page_index_alignment - 1;
+ size_t pages_mapped = 0;
+ size_t pages_to_map = 0;
+ size_t pages_iter = 0;
+ int use_emergency_buf = 0;
+
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
+ * so we need to align the offset and length accordingly */
+ if (start_page_index & page_index_mask) {
+ npages += start_page_index & page_index_mask;
+ start_page_index &= ~page_index_mask;
+ }
+
+ pages_to_map = ALIGN(npages, page_index_alignment);
+
+ if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
+ return -EINVAL;
+
+ size = sizeof(u64) * pages_to_map;
+ size = min_t(int, PAGE_SIZE, size);
+ /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
+ * code, when we are called from an invalidation. The pas buffer must
+ * be 2k-aligned for Connect-IB. */
+ pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
+ if (!pas) {
+ mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
+ pas = mlx5_ib_update_mtt_emergency_buffer;
+ size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
+ use_emergency_buf = 1;
+ mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
+ memset(pas, 0, size);
+ }
+ pages_iter = size / sizeof(u64);
+ dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
+ err = -ENOMEM;
+ goto free_pas;
+ }
+
+ for (pages_mapped = 0;
+ pages_mapped < pages_to_map && !err;
+ pages_mapped += pages_iter, start_page_index += pages_iter) {
+ dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
+
+ npages = min_t(size_t,
+ pages_iter,
+ ib_umem_num_pages(umem) - start_page_index);
+
+ if (!zap) {
+ __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
+ start_page_index, npages, pas,
+ MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the pages brought from the
+ * umem. */
+ memset(pas + npages, 0, size - npages * sizeof(u64));
+ }
+
+ dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
+
+ memset(&wr, 0, sizeof(wr));
+ wr.wr_id = (u64)(unsigned long)&umr_context;
+
+ sg.addr = dma;
+ sg.length = ALIGN(npages * sizeof(u64),
+ MLX5_UMR_MTT_ALIGNMENT);
+ sg.lkey = dev->umrc.mr->lkey;
+
+ wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
+ MLX5_IB_SEND_UMR_UPDATE_MTT;
+ wr.sg_list = &sg;
+ wr.num_sge = 1;
+ wr.opcode = MLX5_IB_WR_UMR;
+ umrwr->npages = sg.length / sizeof(u64);
+ umrwr->page_shift = PAGE_SHIFT;
+ umrwr->mkey = mr->mmr.key;
+ umrwr->target.offset = start_page_index;
+
+ mlx5_ib_init_umr_context(&umr_context);
+ down(&umrc->sem);
+ err = ib_post_send(umrc->qp, &wr, &bad);
+ if (err) {
+ mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
+ } else {
+ wait_for_completion(&umr_context.done);
+ if (umr_context.status != IB_WC_SUCCESS) {
+ mlx5_ib_err(dev, "UMR completion failed, code %d\n",
+ umr_context.status);
+ err = -EFAULT;
+ }
+ }
+ up(&umrc->sem);
+ }
+ dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
+
+free_pas:
+ if (!use_emergency_buf)
+ free_page((unsigned long)pas);
+ else
+ mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
+
+ return err;
+}
+#endif
+
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
u64 length, struct ib_umem *umem,
int npages, int page_shift,
@@ -822,6 +975,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
struct mlx5_ib_mr *mr;
int inlen;
int err;
+ bool pg_cap = !!(dev->mdev->caps.gen.flags &
+ MLX5_DEV_CAP_FLAG_ON_DMND_PG);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
@@ -833,8 +988,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
err = -ENOMEM;
goto err_1;
}
- mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
+ mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
+ pg_cap ? MLX5_IB_MTT_PRESENT : 0);
+ /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
+ * in the page list submitted with the command. */
+ in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
in->seg.flags = convert_access(access_flags) |
MLX5_ACCESS_MODE_MTT;
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
@@ -853,14 +1012,15 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
goto err_2;
}
mr->umem = umem;
- mlx5_vfree(in);
+ mr->live = 1;
+ kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
return mr;
err_2:
- mlx5_vfree(in);
+ kvfree(in);
err_1:
kfree(mr);
@@ -907,6 +1067,10 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "cache empty for order %d", order);
mr = NULL;
}
+ } else if (access_flags & IB_ACCESS_ON_DEMAND) {
+ err = -EINVAL;
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+ goto error;
}
if (!mr)
@@ -922,16 +1086,51 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
mr->npages = npages;
- spin_lock(&dev->mr_lock);
- dev->mdev->priv.reg_pages += npages;
- spin_unlock(&dev->mr_lock);
+ atomic_add(npages, &dev->mdev->priv.reg_pages);
mr->ibmr.lkey = mr->mmr.key;
mr->ibmr.rkey = mr->mmr.key;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (umem->odp_data) {
+ /*
+ * This barrier prevents the compiler from moving the
+ * setting of umem->odp_data->private to point to our
+ * MR, before reg_umr finished, to ensure that the MR
+ * initialization have finished before starting to
+ * handle invalidations.
+ */
+ smp_wmb();
+ mr->umem->odp_data->private = mr;
+ /*
+ * Make sure we will see the new
+ * umem->odp_data->private value in the invalidation
+ * routines, before we can get page faults on the
+ * MR. Page faults can happen once we put the MR in
+ * the tree, below this line. Without the barrier,
+ * there can be a fault handling and an invalidation
+ * before umem->odp_data->private == mr is visible to
+ * the invalidation handler.
+ */
+ smp_wmb();
+ }
+#endif
+
return &mr->ibmr;
error:
+ /*
+ * Destroy the umem *before* destroying the MR, to ensure we
+ * will not have any in-flight notifiers when destroying the
+ * MR.
+ *
+ * As the MR is completely invalid to begin with, and this
+ * error path is only taken if we can't push the mr entry into
+ * the pagefault tree, this is safe.
+ */
+
ib_umem_release(umem);
+ /* Kill the MR, and return an error code. */
+ clean_mr(mr);
return ERR_PTR(err);
}
@@ -968,17 +1167,14 @@ error:
return err;
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+static int clean_mr(struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
- struct mlx5_ib_mr *mr = to_mmr(ibmr);
- struct ib_umem *umem = mr->umem;
- int npages = mr->npages;
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
int umred = mr->umred;
int err;
if (!umred) {
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
@@ -993,15 +1189,47 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
free_cached_mr(dev, mr);
}
- if (umem) {
+ if (!umred)
+ kfree(mr);
+
+ return 0;
+}
+
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ int npages = mr->npages;
+ struct ib_umem *umem = mr->umem;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (umem && umem->odp_data) {
+ /* Prevent new page faults from succeeding */
+ mr->live = 0;
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&dev->mr_srcu);
+ /* Destroy all page mappings */
+ mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
+ ib_umem_end(umem));
+ /*
+ * We kill the umem before the MR for ODP,
+ * so that there will not be any invalidations in
+ * flight, looking at the *mr struct.
+ */
ib_umem_release(umem);
- spin_lock(&dev->mr_lock);
- dev->mdev->priv.reg_pages -= npages;
- spin_unlock(&dev->mr_lock);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
+
+ /* Avoid double-freeing the umem. */
+ umem = NULL;
}
+#endif
- if (!umred)
- kfree(mr);
+ clean_mr(mr);
+
+ if (umem) {
+ ib_umem_release(umem);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ }
return 0;
}
@@ -1025,7 +1253,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
goto err_free;
}
- in->seg.status = 1 << 6; /* free */
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32(ndescs);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
@@ -1110,7 +1338,7 @@ int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
kfree(mr->sig);
}
- err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ err = destroy_mkey(dev, mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
@@ -1140,7 +1368,7 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
goto err_free;
}
- in->seg.status = 1 << 6; /* free */
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
new file mode 100644
index 000000000000..a2c541c4809a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
+
+#include "mlx5_ib.h"
+
+#define MAX_PREFETCH_LEN (4*1024*1024U)
+
+/* Timeout in ms to wait for an active mmu notifier to complete when handling
+ * a pagefault. */
+#define MMU_NOTIFIER_TIMEOUT 1000
+
+struct workqueue_struct *mlx5_ib_page_fault_wq;
+
+void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+ unsigned long end)
+{
+ struct mlx5_ib_mr *mr;
+ const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1;
+ u64 idx = 0, blk_start_idx = 0;
+ int in_block = 0;
+ u64 addr;
+
+ if (!umem || !umem->odp_data) {
+ pr_err("invalidation called on NULL umem or non-ODP umem\n");
+ return;
+ }
+
+ mr = umem->odp_data->private;
+
+ if (!mr || !mr->ibmr.pd)
+ return;
+
+ start = max_t(u64, ib_umem_start(umem), start);
+ end = min_t(u64, ib_umem_end(umem), end);
+
+ /*
+ * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
+ * while we are doing the invalidation, no page fault will attempt to
+ * overwrite the same MTTs. Concurent invalidations might race us,
+ * but they will write 0s as well, so no difference in the end result.
+ */
+
+ for (addr = start; addr < end; addr += (u64)umem->page_size) {
+ idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
+ /*
+ * Strive to write the MTTs in chunks, but avoid overwriting
+ * non-existing MTTs. The huristic here can be improved to
+ * estimate the cost of another UMR vs. the cost of bigger
+ * UMR.
+ */
+ if (umem->odp_data->dma_list[idx] &
+ (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (!in_block) {
+ blk_start_idx = idx;
+ in_block = 1;
+ }
+ } else {
+ u64 umr_offset = idx & umr_block_mask;
+
+ if (in_block && umr_offset == 0) {
+ mlx5_ib_update_mtt(mr, blk_start_idx,
+ idx - blk_start_idx, 1);
+ in_block = 0;
+ }
+ }
+ }
+ if (in_block)
+ mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1,
+ 1);
+
+ /*
+ * We are now sure that the device will not access the
+ * memory. We can safely unmap it, and mark it as dirty if
+ * needed.
+ */
+
+ ib_umem_odp_unmap_dma_pages(umem, start, end);
+}
+
+#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \
+ if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \
+ ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \
+} while (0)
+
+int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+{
+ int err;
+ struct mlx5_odp_caps hw_caps;
+ struct ib_odp_caps *caps = &dev->odp_caps;
+
+ memset(caps, 0, sizeof(*caps));
+
+ if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
+ return 0;
+
+ err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
+ if (err)
+ goto out;
+
+ caps->general_caps = IB_ODP_SUPPORT;
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps,
+ SEND);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ SEND);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ RECV);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ WRITE);
+ COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
+ READ);
+
+out:
+ return err;
+}
+
+static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
+ u32 key)
+{
+ u32 base_key = mlx5_base_mkey(key);
+ struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
+ struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr);
+
+ if (!mmr || mmr->key != key || !mr->live)
+ return NULL;
+
+ return container_of(mmr, struct mlx5_ib_mr, mmr);
+}
+
+static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault,
+ int error) {
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn,
+ pfault->mpfault.flags,
+ error);
+ if (ret)
+ pr_err("Failed to resolve the page fault on QP 0x%x\n",
+ qp->mqp.qpn);
+}
+
+/*
+ * Handle a single data segment in a page-fault WQE.
+ *
+ * Returns number of pages retrieved on success. The caller will continue to
+ * the next data segment.
+ * Can return the following error codes:
+ * -EAGAIN to designate a temporary error. The caller will abort handling the
+ * page fault and resolve it.
+ * -EFAULT when there's an error mapping the requested pages. The caller will
+ * abort the page fault handling and possibly move the QP to an error state.
+ * On other errors the QP should also be closed with an error.
+ */
+static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault,
+ u32 key, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped)
+{
+ struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device);
+ int srcu_key;
+ unsigned int current_seq;
+ u64 start_idx;
+ int npages = 0, ret = 0;
+ struct mlx5_ib_mr *mr;
+ u64 access_mask = ODP_READ_ALLOWED_BIT;
+
+ srcu_key = srcu_read_lock(&mib_dev->mr_srcu);
+ mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key);
+ /*
+ * If we didn't find the MR, it means the MR was closed while we were
+ * handling the ODP event. In this case we return -EFAULT so that the
+ * QP will be closed.
+ */
+ if (!mr || !mr->ibmr.pd) {
+ pr_err("Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n",
+ key);
+ ret = -EFAULT;
+ goto srcu_unlock;
+ }
+ if (!mr->umem->odp_data) {
+ pr_debug("skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped +=
+ (bcnt - pfault->mpfault.bytes_committed);
+ goto srcu_unlock;
+ }
+ if (mr->ibmr.pd != qp->ibqp.pd) {
+ pr_err("Page-fault with different PDs for QP and MR.\n");
+ ret = -EFAULT;
+ goto srcu_unlock;
+ }
+
+ current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq);
+ /*
+ * Ensure the sequence number is valid for some time before we call
+ * gup.
+ */
+ smp_rmb();
+
+ /*
+ * Avoid branches - this code will perform correctly
+ * in all iterations (in iteration 2 and above,
+ * bytes_committed == 0).
+ */
+ io_virt += pfault->mpfault.bytes_committed;
+ bcnt -= pfault->mpfault.bytes_committed;
+
+ start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT;
+
+ if (mr->umem->writable)
+ access_mask |= ODP_WRITE_ALLOWED_BIT;
+ npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt,
+ access_mask, current_seq);
+ if (npages < 0) {
+ ret = npages;
+ goto srcu_unlock;
+ }
+
+ if (npages > 0) {
+ mutex_lock(&mr->umem->odp_data->umem_mutex);
+ if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
+ /*
+ * No need to check whether the MTTs really belong to
+ * this MR, since ib_umem_odp_map_dma_pages already
+ * checks this.
+ */
+ ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0);
+ } else {
+ ret = -EAGAIN;
+ }
+ mutex_unlock(&mr->umem->odp_data->umem_mutex);
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ pr_err("Failed to update mkey page tables\n");
+ goto srcu_unlock;
+ }
+
+ if (bytes_mapped) {
+ u32 new_mappings = npages * PAGE_SIZE -
+ (io_virt - round_down(io_virt, PAGE_SIZE));
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
+ }
+ }
+
+srcu_unlock:
+ if (ret == -EAGAIN) {
+ if (!mr->umem->odp_data->dying) {
+ struct ib_umem_odp *odp_data = mr->umem->odp_data;
+ unsigned long timeout =
+ msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+
+ if (!wait_for_completion_timeout(
+ &odp_data->notifier_completion,
+ timeout)) {
+ pr_warn("timeout waiting for mmu notifier completion\n");
+ }
+ } else {
+ /* The MR is being killed, kill the QP as well. */
+ ret = -EFAULT;
+ }
+ }
+ srcu_read_unlock(&mib_dev->mr_srcu, srcu_key);
+ pfault->mpfault.bytes_committed = 0;
+ return ret ? ret : npages;
+}
+
+/**
+ * Parse a series of data segments for page fault handling.
+ *
+ * @qp the QP on which the fault occurred.
+ * @pfault contains page fault information.
+ * @wqe points at the first data segment in the WQE.
+ * @wqe_end points after the end of the WQE.
+ * @bytes_mapped receives the number of bytes that the function was able to
+ * map. This allows the caller to decide intelligently whether
+ * enough memory was mapped to resolve the page fault
+ * successfully (e.g. enough for the next MTU, or the entire
+ * WQE).
+ * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
+ * the committed bytes).
+ *
+ * Returns the number of pages loaded if positive, zero for an empty WQE, or a
+ * negative error code.
+ */
+static int pagefault_data_segments(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault, void *wqe,
+ void *wqe_end, u32 *bytes_mapped,
+ u32 *total_wqe_bytes, int receive_queue)
+{
+ int ret = 0, npages = 0;
+ u64 io_virt;
+ u32 key;
+ u32 byte_count;
+ size_t bcnt;
+ int inline_segment;
+
+ /* Skip SRQ next-WQE segment. */
+ if (receive_queue && qp->ibqp.srq)
+ wqe += sizeof(struct mlx5_wqe_srq_next_seg);
+
+ if (bytes_mapped)
+ *bytes_mapped = 0;
+ if (total_wqe_bytes)
+ *total_wqe_bytes = 0;
+
+ while (wqe < wqe_end) {
+ struct mlx5_wqe_data_seg *dseg = wqe;
+
+ io_virt = be64_to_cpu(dseg->addr);
+ key = be32_to_cpu(dseg->lkey);
+ byte_count = be32_to_cpu(dseg->byte_count);
+ inline_segment = !!(byte_count & MLX5_INLINE_SEG);
+ bcnt = byte_count & ~MLX5_INLINE_SEG;
+
+ if (inline_segment) {
+ bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
+ wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
+ 16);
+ } else {
+ wqe += sizeof(*dseg);
+ }
+
+ /* receive WQE end of sg list. */
+ if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
+ io_virt == 0)
+ break;
+
+ if (!inline_segment && total_wqe_bytes) {
+ *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
+ pfault->mpfault.bytes_committed);
+ }
+
+ /* A zero length data segment designates a length of 2GB. */
+ if (bcnt == 0)
+ bcnt = 1U << 31;
+
+ if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) {
+ pfault->mpfault.bytes_committed -=
+ min_t(size_t, bcnt,
+ pfault->mpfault.bytes_committed);
+ continue;
+ }
+
+ ret = pagefault_single_data_segment(qp, pfault, key, io_virt,
+ bcnt, bytes_mapped);
+ if (ret < 0)
+ break;
+ npages += ret;
+ }
+
+ return ret < 0 ? ret : npages;
+}
+
+/*
+ * Parse initiator WQE. Advances the wqe pointer to point at the
+ * scatter-gather list, and set wqe_end to the end of the WQE.
+ */
+static int mlx5_ib_mr_initiator_pfault_handler(
+ struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
+ void **wqe, void **wqe_end, int wqe_length)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
+ u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+ unsigned ds, opcode;
+#if defined(DEBUG)
+ u32 ctrl_wqe_index, ctrl_qpn;
+#endif
+
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
+ mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
+ ds, wqe_length);
+ return -EFAULT;
+ }
+
+ if (ds == 0) {
+ mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
+ wqe_index, qp->mqp.qpn);
+ return -EFAULT;
+ }
+
+#if defined(DEBUG)
+ ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
+ MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
+ MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
+ if (wqe_index != ctrl_wqe_index) {
+ mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
+ wqe_index, qp->mqp.qpn,
+ ctrl_wqe_index);
+ return -EFAULT;
+ }
+
+ ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
+ MLX5_WQE_CTRL_QPN_SHIFT;
+ if (qp->mqp.qpn != ctrl_qpn) {
+ mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
+ wqe_index, qp->mqp.qpn,
+ ctrl_qpn);
+ return -EFAULT;
+ }
+#endif /* DEBUG */
+
+ *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
+ *wqe += sizeof(*ctrl);
+
+ opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
+ MLX5_WQE_CTRL_OPCODE_MASK;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ switch (opcode) {
+ case MLX5_OPCODE_SEND:
+ case MLX5_OPCODE_SEND_IMM:
+ case MLX5_OPCODE_SEND_INVAL:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_SEND))
+ goto invalid_transport_or_opcode;
+ break;
+ case MLX5_OPCODE_RDMA_WRITE:
+ case MLX5_OPCODE_RDMA_WRITE_IMM:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_WRITE))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ break;
+ case MLX5_OPCODE_RDMA_READ:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_READ))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ break;
+ default:
+ goto invalid_transport_or_opcode;
+ }
+ break;
+ case IB_QPT_UD:
+ switch (opcode) {
+ case MLX5_OPCODE_SEND:
+ case MLX5_OPCODE_SEND_IMM:
+ if (!(dev->odp_caps.per_transport_caps.ud_odp_caps &
+ IB_ODP_SUPPORT_SEND))
+ goto invalid_transport_or_opcode;
+ *wqe += sizeof(struct mlx5_wqe_datagram_seg);
+ break;
+ default:
+ goto invalid_transport_or_opcode;
+ }
+ break;
+ default:
+invalid_transport_or_opcode:
+ mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
+ qp->ibqp.qp_type, opcode);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Parse responder WQE. Advances the wqe pointer to point at the
+ * scatter-gather list, and set wqe_end to the end of the WQE.
+ */
+static int mlx5_ib_mr_responder_pfault_handler(
+ struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
+ void **wqe, void **wqe_end, int wqe_length)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ struct mlx5_ib_wq *wq = &qp->rq;
+ int wqe_size = 1 << wq->wqe_shift;
+
+ if (qp->ibqp.srq) {
+ mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
+ return -EFAULT;
+ }
+
+ if (qp->wq_sig) {
+ mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
+ return -EFAULT;
+ }
+
+ if (wqe_size > wqe_length) {
+ mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
+ return -EFAULT;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+ IB_ODP_SUPPORT_RECV))
+ goto invalid_transport_or_opcode;
+ break;
+ default:
+invalid_transport_or_opcode:
+ mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
+ qp->ibqp.qp_type);
+ return -EFAULT;
+ }
+
+ *wqe_end = *wqe + wqe_size;
+
+ return 0;
+}
+
+static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
+ int ret;
+ void *wqe, *wqe_end;
+ u32 bytes_mapped, total_wqe_bytes;
+ char *buffer = NULL;
+ int resume_with_error = 0;
+ u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+ int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
+
+ buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!buffer) {
+ mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
+ PAGE_SIZE);
+ if (ret < 0) {
+ mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
+ -ret, wqe_index, qp->mqp.qpn);
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ wqe = buffer;
+ if (requestor)
+ ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe,
+ &wqe_end, ret);
+ else
+ ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe,
+ &wqe_end, ret);
+ if (ret < 0) {
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ if (wqe >= wqe_end) {
+ mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+ ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped,
+ &total_wqe_bytes, !requestor);
+ if (ret == -EAGAIN) {
+ goto resolve_page_fault;
+ } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
+ mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n",
+ -ret);
+ resume_with_error = 1;
+ goto resolve_page_fault;
+ }
+
+resolve_page_fault:
+ mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
+ qp->mqp.qpn, resume_with_error, pfault->mpfault.flags);
+
+ free_page((unsigned long)buffer);
+}
+
+static int pages_in_range(u64 address, u32 length)
+{
+ return (ALIGN(address + length, PAGE_SIZE) -
+ (address & PAGE_MASK)) >> PAGE_SHIFT;
+}
+
+static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ struct mlx5_pagefault *mpfault = &pfault->mpfault;
+ u64 address;
+ u32 length;
+ u32 prefetch_len = mpfault->bytes_committed;
+ int prefetch_activated = 0;
+ u32 rkey = mpfault->rdma.r_key;
+ int ret;
+
+ /* The RDMA responder handler handles the page fault in two parts.
+ * First it brings the necessary pages for the current packet
+ * (and uses the pfault context), and then (after resuming the QP)
+ * prefetches more pages. The second operation cannot use the pfault
+ * context and therefore uses the dummy_pfault context allocated on
+ * the stack */
+ struct mlx5_ib_pfault dummy_pfault = {};
+
+ dummy_pfault.mpfault.bytes_committed = 0;
+
+ mpfault->rdma.rdma_va += mpfault->bytes_committed;
+ mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed,
+ mpfault->rdma.rdma_op_len);
+ mpfault->bytes_committed = 0;
+
+ address = mpfault->rdma.rdma_va;
+ length = mpfault->rdma.rdma_op_len;
+
+ /* For some operations, the hardware cannot tell the exact message
+ * length, and in those cases it reports zero. Use prefetch
+ * logic. */
+ if (length == 0) {
+ prefetch_activated = 1;
+ length = mpfault->rdma.packet_size;
+ prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
+ }
+
+ ret = pagefault_single_data_segment(qp, pfault, rkey, address, length,
+ NULL);
+ if (ret == -EAGAIN) {
+ /* We're racing with an invalidation, don't prefetch */
+ prefetch_activated = 0;
+ } else if (ret < 0 || pages_in_range(address, length) > ret) {
+ mlx5_ib_page_fault_resume(qp, pfault, 1);
+ return;
+ }
+
+ mlx5_ib_page_fault_resume(qp, pfault, 0);
+
+ /* At this point, there might be a new pagefault already arriving in
+ * the eq, switch to the dummy pagefault for the rest of the
+ * processing. We're still OK with the objects being alive as the
+ * work-queue is being fenced. */
+
+ if (prefetch_activated) {
+ ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey,
+ address,
+ prefetch_len,
+ NULL);
+ if (ret < 0) {
+ pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
+ ret, prefetch_activated,
+ qp->ibqp.qp_num, address, prefetch_len);
+ }
+ }
+}
+
+void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_pfault *pfault)
+{
+ u8 event_subtype = pfault->mpfault.event_subtype;
+
+ switch (event_subtype) {
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ mlx5_ib_mr_wqe_pfault_handler(qp, pfault);
+ break;
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ mlx5_ib_mr_rdma_pfault_handler(qp, pfault);
+ break;
+ default:
+ pr_warn("Invalid page fault event subtype: 0x%x\n",
+ event_subtype);
+ mlx5_ib_page_fault_resume(qp, pfault, 1);
+ break;
+ }
+}
+
+static void mlx5_ib_qp_pfault_action(struct work_struct *work)
+{
+ struct mlx5_ib_pfault *pfault = container_of(work,
+ struct mlx5_ib_pfault,
+ work);
+ enum mlx5_ib_pagefault_context context =
+ mlx5_ib_get_pagefault_context(&pfault->mpfault);
+ struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
+ pagefaults[context]);
+ mlx5_ib_mr_pfault_handler(qp, pfault);
+}
+
+void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
+ qp->disable_page_faults = 1;
+ spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
+
+ /*
+ * Note that at this point, we are guarenteed that no more
+ * work queue elements will be posted to the work queue with
+ * the QP we are closing.
+ */
+ flush_workqueue(mlx5_ib_page_fault_wq);
+}
+
+void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
+ qp->disable_page_faults = 0;
+ spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
+}
+
+static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
+ struct mlx5_pagefault *pfault)
+{
+ /*
+ * Note that we will only get one fault event per QP per context
+ * (responder/initiator, read/write), until we resolve the page fault
+ * with the mlx5_ib_page_fault_resume command. Since this function is
+ * called from within the work element, there is no risk of missing
+ * events.
+ */
+ struct mlx5_ib_qp *mibqp = to_mibqp(qp);
+ enum mlx5_ib_pagefault_context context =
+ mlx5_ib_get_pagefault_context(pfault);
+ struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
+
+ qp_pfault->mpfault = *pfault;
+
+ /* No need to stop interrupts here since we are in an interrupt */
+ spin_lock(&mibqp->disable_page_faults_lock);
+ if (!mibqp->disable_page_faults)
+ queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
+ spin_unlock(&mibqp->disable_page_faults_lock);
+}
+
+void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
+{
+ int i;
+
+ qp->disable_page_faults = 1;
+ spin_lock_init(&qp->disable_page_faults_lock);
+
+ qp->mqp.pfault_handler = mlx5_ib_pfault_handler;
+
+ for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
+ INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
+}
+
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
+{
+ int ret;
+
+ ret = init_srcu_struct(&ibdev->mr_srcu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
+{
+ cleanup_srcu_struct(&ibdev->mr_srcu);
+}
+
+int __init mlx5_ib_odp_init(void)
+{
+ mlx5_ib_page_fault_wq =
+ create_singlethread_workqueue("mlx5_ib_page_faults");
+ if (!mlx5_ib_page_fault_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5_ib_odp_cleanup(void)
+{
+ destroy_workqueue(mlx5_ib_page_fault_wq);
+}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e261a53f9a02..be0cd358b080 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -70,15 +70,6 @@ static const u32 mlx5_ib_opcode[] = {
[MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
};
-struct umr_wr {
- u64 virt_addr;
- struct ib_pd *pd;
- unsigned int page_shift;
- unsigned int npages;
- u32 length;
- int access_flags;
- u32 mkey;
-};
static int is_qp0(enum ib_qp_type qp_type)
{
@@ -110,6 +101,77 @@ void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
}
+/**
+ * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
+ *
+ * @qp: QP to copy from.
+ * @send: copy from the send queue when non-zero, use the receive queue
+ * otherwise.
+ * @wqe_index: index to start copying from. For send work queues, the
+ * wqe_index is in units of MLX5_SEND_WQE_BB.
+ * For receive work queue, it is the number of work queue
+ * element in the queue.
+ * @buffer: destination buffer.
+ * @length: maximum number of bytes to copy.
+ *
+ * Copies at least a single WQE, but may copy more data.
+ *
+ * Return: the number of bytes copied, or an error code.
+ */
+int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
+ void *buffer, u32 length)
+{
+ struct ib_device *ibdev = qp->ibqp.device;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
+ size_t offset;
+ size_t wq_end;
+ struct ib_umem *umem = qp->umem;
+ u32 first_copy_length;
+ int wqe_length;
+ int ret;
+
+ if (wq->wqe_cnt == 0) {
+ mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
+ qp->ibqp.qp_type);
+ return -EINVAL;
+ }
+
+ offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
+ wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
+
+ if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
+ return -EINVAL;
+
+ if (offset > umem->length ||
+ (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
+ return -EINVAL;
+
+ first_copy_length = min_t(u32, offset + length, wq_end) - offset;
+ ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
+ if (ret)
+ return ret;
+
+ if (send) {
+ struct mlx5_wqe_ctrl_seg *ctrl = buffer;
+ int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+ } else {
+ wqe_length = 1 << wq->wqe_shift;
+ }
+
+ if (wqe_length <= first_copy_length)
+ return first_copy_length;
+
+ ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
+ wqe_length - first_copy_length);
+ if (ret)
+ return ret;
+
+ return wqe_length;
+}
+
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
@@ -647,7 +709,7 @@ err_unmap:
mlx5_ib_db_unmap_user(context, &qp->db);
err_free:
- mlx5_vfree(*in);
+ kvfree(*in);
err_umem:
if (qp->umem)
@@ -761,7 +823,7 @@ err_wrid:
kfree(qp->rq.wrid);
err_free:
- mlx5_vfree(*in);
+ kvfree(*in);
err_buf:
mlx5_buf_free(dev->mdev, &qp->buf);
@@ -814,6 +876,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
int inlen = sizeof(*in);
int err;
+ mlx5_ib_odp_create_qp(qp);
+
gen = &dev->mdev->caps.gen;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
@@ -971,7 +1035,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_create;
}
- mlx5_vfree(in);
+ kvfree(in);
/* Hardware wants QPN written in big-endian order (after
* shifting) for send doorbell. Precompute this value to save
* a little bit when posting sends.
@@ -988,7 +1052,7 @@ err_create:
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
- mlx5_vfree(in);
+ kvfree(in);
return err;
}
@@ -1011,9 +1075,14 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv
}
} else {
spin_lock_irq(&send_cq->lock);
+ __acquire(&recv_cq->lock);
}
} else if (recv_cq) {
spin_lock_irq(&recv_cq->lock);
+ __acquire(&send_cq->lock);
+ } else {
+ __acquire(&send_cq->lock);
+ __acquire(&recv_cq->lock);
}
}
@@ -1033,10 +1102,15 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
spin_unlock_irq(&recv_cq->lock);
}
} else {
+ __release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
}
} else if (recv_cq) {
+ __release(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
+ } else {
+ __release(&recv_cq->lock);
+ __release(&send_cq->lock);
}
}
@@ -1088,11 +1162,13 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return;
- if (qp->state != IB_QPS_RESET)
+ if (qp->state != IB_QPS_RESET) {
+ mlx5_ib_qp_disable_pagefaults(qp);
if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
qp->mqp.qpn);
+ }
get_cqs(qp, &send_cq, &recv_cq);
@@ -1640,6 +1716,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (mlx5_st < 0)
goto out;
+ /* If moving to a reset or error state, we must disable page faults on
+ * this QP and flush all current page faults. Otherwise a stale page
+ * fault may attempt to work on this QP after it is reset and moved
+ * again to RTS, and may cause the driver and the device to get out of
+ * sync. */
+ if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
+ (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
+ mlx5_ib_qp_disable_pagefaults(qp);
+
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
in->optparam = cpu_to_be32(optpar);
@@ -1649,6 +1734,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (err)
goto out;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ mlx5_ib_qp_enable_pagefaults(qp);
+
qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS)
@@ -1838,37 +1926,70 @@ static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
umr->mkey_mask = frwr_mkey_mask();
}
+static __be64 get_umr_reg_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_PD |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_A |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_unreg_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_mtt_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
struct ib_send_wr *wr)
{
- struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
- u64 mask;
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
memset(umr, 0, sizeof(*umr));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
+ else
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+
if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
- umr->flags = 1 << 5; /* fail if not free */
umr->klm_octowords = get_klm_octo(umrwr->npages);
- mask = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_PD |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
- MLX5_MKEY_MASK_FREE;
- umr->mkey_mask = cpu_to_be64(mask);
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
+ umr->mkey_mask = get_umr_update_mtt_mask();
+ umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
+ umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+ } else {
+ umr->mkey_mask = get_umr_reg_mr_mask();
+ }
} else {
- umr->flags = 2 << 5; /* fail if free */
- mask = MLX5_MKEY_MASK_FREE;
- umr->mkey_mask = cpu_to_be64(mask);
+ umr->mkey_mask = get_umr_unreg_mr_mask();
}
if (!wr->num_sge)
- umr->flags |= (1 << 7); /* inline */
+ umr->flags |= MLX5_UMR_INLINE;
}
static u8 get_umr_flags(int acc)
@@ -1885,7 +2006,7 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
{
memset(seg, 0, sizeof(*seg));
if (li) {
- seg->status = 1 << 6;
+ seg->status = MLX5_MKEY_STATUS_FREE;
return;
}
@@ -1902,19 +2023,23 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
{
+ struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
+
memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
- seg->status = 1 << 6;
+ seg->status = MLX5_MKEY_STATUS_FREE;
return;
}
- seg->flags = convert_access(wr->wr.fast_reg.access_flags);
- seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
- seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
- seg->len = cpu_to_be64(wr->wr.fast_reg.length);
- seg->log2_page_size = wr->wr.fast_reg.page_shift;
+ seg->flags = convert_access(umrwr->access_flags);
+ if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
+ }
+ seg->len = cpu_to_be64(umrwr->length);
+ seg->log2_page_size = umrwr->page_shift;
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
- mlx5_mkey_variant(wr->wr.fast_reg.rkey));
+ mlx5_mkey_variant(umrwr->mkey));
}
static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
@@ -2411,7 +2536,7 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl,
- struct ib_send_wr *wr, int *idx,
+ struct ib_send_wr *wr, unsigned *idx,
int *size, int nreq)
{
int err = 0;
@@ -2737,6 +2862,8 @@ out:
if (bf->need_lock)
spin_lock(&bf->lock);
+ else
+ __acquire(&bf->lock);
/* TBD enable WC */
if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
@@ -2753,6 +2880,8 @@ out:
bf->offset ^= bf->buf_size;
if (bf->need_lock)
spin_unlock(&bf->lock);
+ else
+ __release(&bf->lock);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -2913,6 +3042,14 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx5_state;
int err = 0;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /*
+ * Wait for any outstanding page faults, in case the user frees memory
+ * based upon this query's result.
+ */
+ flush_workqueue(mlx5_ib_page_fault_wq);
+#endif
+
mutex_lock(&qp->mutex);
outb = kzalloc(sizeof(*outb), GFP_KERNEL);
if (!outb) {
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 97cc1baaa8e3..41fec66217dd 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -141,7 +141,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return 0;
err_in:
- mlx5_vfree(*in);
+ kvfree(*in);
err_umem:
ib_umem_release(srq->umem);
@@ -209,7 +209,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
return 0;
err_in:
- mlx5_vfree(*in);
+ kvfree(*in);
err_buf:
mlx5_buf_free(dev->mdev, &srq->buf);
@@ -306,7 +306,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
- mlx5_vfree(in);
+ kvfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_usr_kern_srq;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index fef067c959fc..c0d0296e7a00 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2341,9 +2341,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
" offset = %u, page size = %u.\n",
(unsigned long int)start, (unsigned long int)virt, (u32)length,
- region->offset, region->page_size);
+ ib_umem_offset(region), region->page_size);
- skip_pages = ((u32)region->offset) >> 12;
+ skip_pages = ((u32)ib_umem_offset(region)) >> 12;
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
ib_umem_release(region);
@@ -2408,7 +2408,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
region_length -= skip_pages << 12;
for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
skip_pages = 0;
- if ((page_count != 0) && (page_count<<12)-(region->offset&(4096-1)) >= region->length)
+ if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
goto enough_pages;
if ((page_count&0x01FF) == 0) {
if (page_count >= 1024 * 512) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index ac02ce4e8040..f3cc8c9e65ae 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -96,7 +96,6 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
union ib_gid sgid;
- u8 zmac[ETH_ALEN];
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
@@ -118,9 +117,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
goto av_conf_err;
}
- memset(&zmac, 0, ETH_ALEN);
- if (pd->uctx &&
- memcmp(attr->dmac, &zmac, ETH_ALEN)) {
+ if (pd->uctx) {
status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
attr->dmac, &attr->vlan_id);
if (status) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 4c68305ee781..fb8d8c4dfbb9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -805,7 +805,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto umem_err;
mr->hwmr.pbe_size = mr->umem->page_size;
- mr->hwmr.fbo = mr->umem->offset;
+ mr->hwmr.fbo = ib_umem_offset(mr->umem);
mr->hwmr.va = usr_addr;
mr->hwmr.len = len;
mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
@@ -1410,6 +1410,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
mutex_unlock(&dev->dev_lock);
if (status)
goto mbx_err;
+ if (qp->qp_type == IB_QPT_UD)
+ qp_attr->qkey = params.qkey;
qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
qp_attr->path_mtu =
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 9bbb55347cc1..a77fb4fb14e4 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -258,7 +258,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
- mr->mr.offset = umem->offset;
+ mr->mr.offset = ib_umem_offset(umem);
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index d7562beb5423..8ba80a6d3a46 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -98,9 +98,15 @@ enum {
IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
IPOIB_MCAST_FLAG_SENDONLY = 1,
- IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
+ /*
+ * For IPOIB_MCAST_FLAG_BUSY
+ * When set, in flight join and mcast->mc is unreliable
+ * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
+ * haven't started yet
+ * When clear and mcast->mc is valid pointer, join was successful
+ */
+ IPOIB_MCAST_FLAG_BUSY = 2,
IPOIB_MCAST_FLAG_ATTACHED = 3,
- IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -317,6 +323,7 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
+ struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
struct work_struct flush_light;
@@ -477,10 +484,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
-int ipoib_ib_dev_open(struct net_device *dev, int flush);
+int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev, int flush);
-int ipoib_ib_dev_stop(struct net_device *dev, int flush);
+int ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_stop(struct net_device *dev);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -492,7 +499,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
+int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 933efcea0d03..56959adb6c7d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
}
spin_lock_irq(&priv->lock);
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
}
return;
}
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
- queue_work(ipoib_workqueue, &priv->cm.start_task);
+ queue_work(priv->wq, &priv->cm.start_task);
return tx;
}
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4);
tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
- queue_work(ipoib_workqueue, &priv->cm.skb_task);
+ queue_work(priv->wq, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
}
if (!list_empty(&priv->cm.passive_ids))
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 72626c348174..fe65abb5150c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
__ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
drain_tx_cq((struct net_device *)ctx);
}
-int ipoib_ib_dev_open(struct net_device *dev, int flush)
+int ipoib_ib_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev, flush);
+ ipoib_ib_dev_stop(dev);
return -1;
}
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
return ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev, int flush)
+int ipoib_ib_dev_down(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
- ipoib_mcast_stop_thread(dev, flush);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
local_bh_enable();
}
-int ipoib_ib_dev_stop(struct net_device *dev, int flush)
+int ipoib_ib_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
@@ -880,8 +880,7 @@ timeout:
/* Wait for all AHs to be reaped */
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
- if (flush)
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
begin = jiffies;
@@ -918,7 +917,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev);
if (dev->flags & IFF_UP) {
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
@@ -1040,12 +1039,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level >= IPOIB_FLUSH_NORMAL)
- ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- ipoib_ib_dev_stop(dev, 0);
- if (ipoib_ib_dev_open(dev, 0) != 0)
+ ipoib_ib_dev_stop(dev);
+ if (ipoib_ib_dev_open(dev) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1097,7 +1096,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
*/
ipoib_flush_paths(dev);
- ipoib_mcast_stop_thread(dev, 1);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 58b5aa3b6f2d..6bad17d4d588 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
return 0;
err_stop:
- ipoib_ib_dev_stop(dev, 1);
+ ipoib_ib_dev_stop(dev);
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev, 1);
- ipoib_ib_dev_stop(dev, 0);
+ ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_stop(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
return;
}
- queue_work(ipoib_workqueue, &priv->restart_task);
+ queue_work(priv->wq, &priv->restart_task);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
}
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
/* start garbage collection */
clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
return 0;
@@ -1262,15 +1262,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- if (ipoib_neigh_hash_init(priv) < 0)
- goto out;
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
- goto out_neigh_hash_cleanup;
+ goto out;
}
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1285,16 +1283,24 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_init(dev, ca, port))
goto out_tx_ring_cleanup;
+ /*
+ * Must be after ipoib_ib_dev_init so we can allocate a per
+ * device wq there and use it here
+ */
+ if (ipoib_neigh_hash_init(priv) < 0)
+ goto out_dev_uninit;
+
return 0;
+out_dev_uninit:
+ ipoib_ib_dev_cleanup(dev);
+
out_tx_ring_cleanup:
vfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
-out_neigh_hash_cleanup:
- ipoib_neigh_hash_uninit(dev);
out:
return -ENOMEM;
}
@@ -1317,6 +1323,12 @@ void ipoib_dev_cleanup(struct net_device *dev)
}
unregister_netdevice_many(&head);
+ /*
+ * Must be before ipoib_ib_dev_cleanup or we delete an in use
+ * work queue
+ */
+ ipoib_neigh_hash_uninit(dev);
+
ipoib_ib_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1324,8 +1336,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->rx_ring = NULL;
priv->tx_ring = NULL;
-
- ipoib_neigh_hash_uninit(dev);
}
static const struct header_ops ipoib_header_ops = {
@@ -1636,7 +1646,7 @@ register_failed:
/* Stop GC if started before flush */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1707,7 +1717,7 @@ static void ipoib_remove_one(struct ib_device *device)
/* Stop GC */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
unregister_netdev(priv->dev);
free_netdev(priv->dev);
@@ -1748,8 +1758,13 @@ static int __init ipoib_init_module(void)
* unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device
* removal.
+ *
+ * In addition, bringing one device up and another down at the
+ * same time can deadlock a single workqueue, so we have this
+ * global fallback workqueue, but we also attempt to open a
+ * per device workqueue each time we bring an interface up
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib");
+ ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ffb83b5f7e80..bc50dd0d0e4d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -190,12 +190,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
spin_unlock_irq(&priv->lock);
priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
set_qkey = 1;
-
- if (!ipoib_cm_admin_enabled(dev)) {
- rtnl_lock();
- dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
- rtnl_unlock();
- }
}
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -277,16 +271,27 @@ ipoib_mcast_sendonly_join_complete(int status,
struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev;
+ /*
+ * We have to take the mutex to force mcast_sendonly_join to
+ * return from ib_sa_multicast_join and set mcast->mc to a
+ * valid value. Otherwise we were racing with ourselves in
+ * that we might fail here, but get a valid return from
+ * ib_sa_multicast_join after we had cleared mcast->mc here,
+ * resulting in mis-matched joins and leaves and a deadlock
+ */
+ mutex_lock(&mcast_mutex);
+
/* We trap for port events ourselves. */
if (status == -ENETRESET)
- return 0;
+ goto out;
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (status) {
if (mcast->logcount++ < 20)
- ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
+ ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast "
+ "join failed for %pI6, status %d\n",
mcast->mcmember.mgid.raw, status);
/* Flush out any queued packets */
@@ -296,11 +301,15 @@ ipoib_mcast_sendonly_join_complete(int status,
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
netif_tx_unlock_bh(dev);
-
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
- &mcast->flags);
}
+out:
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (status)
+ mcast->mc = NULL;
+ complete(&mcast->done);
+ if (status == -ENETRESET)
+ status = 0;
+ mutex_unlock(&mcast_mutex);
return status;
}
@@ -318,12 +327,14 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
int ret = 0;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
+ ipoib_dbg_mcast(priv, "device shutting down, no sendonly "
+ "multicast joins\n");
return -ENODEV;
}
- if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
- ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "multicast entry busy, skipping "
+ "sendonly join\n");
return -EBUSY;
}
@@ -331,6 +342,9 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
rec.port_gid = priv->local_gid;
rec.pkey = cpu_to_be16(priv->pkey);
+ mutex_lock(&mcast_mutex);
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
priv->port, &rec,
IB_SA_MCMEMBER_REC_MGID |
@@ -343,12 +357,14 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
if (IS_ERR(mcast->mc)) {
ret = PTR_ERR(mcast->mc);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
- ret);
+ complete(&mcast->done);
+ ipoib_warn(priv, "ib_sa_join_multicast for sendonly join "
+ "failed (ret = %d)\n", ret);
} else {
- ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
- mcast->mcmember.mgid.raw);
+ ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting "
+ "sendonly join\n", mcast->mcmember.mgid.raw);
}
+ mutex_unlock(&mcast_mutex);
return ret;
}
@@ -359,18 +375,29 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
carrier_on_task);
struct ib_port_attr attr;
- /*
- * Take rtnl_lock to avoid racing with ipoib_stop() and
- * turning the carrier back on while a device is being
- * removed.
- */
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
- rtnl_lock();
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed. However, ipoib_stop() will attempt to flush
+ * the workqueue while holding the rtnl lock, so loop
+ * on trylock until either we get the lock or we see
+ * FLAG_ADMIN_UP go away as that signals that we are bailing
+ * and can safely ignore the carrier on work.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
+ if (!ipoib_cm_admin_enabled(priv->dev))
+ dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
netif_carrier_on(priv->dev);
rtnl_unlock();
}
@@ -385,60 +412,63 @@ static int ipoib_mcast_join_complete(int status,
ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
mcast->mcmember.mgid.raw, status);
+ /*
+ * We have to take the mutex to force mcast_join to
+ * return from ib_sa_multicast_join and set mcast->mc to a
+ * valid value. Otherwise we were racing with ourselves in
+ * that we might fail here, but get a valid return from
+ * ib_sa_multicast_join after we had cleared mcast->mc here,
+ * resulting in mis-matched joins and leaves and a deadlock
+ */
+ mutex_lock(&mcast_mutex);
+
/* We trap for port events ourselves. */
- if (status == -ENETRESET) {
- status = 0;
+ if (status == -ENETRESET)
goto out;
- }
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (!status) {
mcast->backoff = 1;
- mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, 0);
- mutex_unlock(&mcast_mutex);
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
/*
- * Defer carrier on work to ipoib_workqueue to avoid a
+ * Defer carrier on work to priv->wq to avoid a
* deadlock on rtnl_lock here.
*/
if (mcast == priv->broadcast)
- queue_work(ipoib_workqueue, &priv->carrier_on_task);
-
- status = 0;
- goto out;
- }
-
- if (mcast->logcount++ < 20) {
- if (status == -ETIMEDOUT || status == -EAGAIN) {
- ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
- } else {
- ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
+ queue_work(priv->wq, &priv->carrier_on_task);
+ } else {
+ if (mcast->logcount++ < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN) {
+ ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
+ mcast->mcmember.mgid.raw, status);
+ }
}
- }
-
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-
- mutex_lock(&mcast_mutex);
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ }
+out:
spin_lock_irq(&priv->lock);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (status)
+ mcast->mc = NULL;
+ complete(&mcast->done);
+ if (status == -ENETRESET)
+ status = 0;
+ if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->mcast_task,
mcast->backoff * HZ);
spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex);
-out:
- complete(&mcast->done);
+
return status;
}
@@ -487,10 +517,9 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
}
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ mutex_lock(&mcast_mutex);
init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
-
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
@@ -504,13 +533,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
- mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task,
+ queue_delayed_work(priv->wq, &priv->mcast_task,
mcast->backoff * HZ);
- mutex_unlock(&mcast_mutex);
}
+ mutex_unlock(&mcast_mutex);
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -547,8 +574,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
ipoib_warn(priv, "failed to allocate broadcast group\n");
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, HZ);
+ queue_delayed_work(priv->wq, &priv->mcast_task,
+ HZ);
mutex_unlock(&mcast_mutex);
return;
}
@@ -563,7 +590,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
}
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
- if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+ if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
ipoib_mcast_join(dev, priv->broadcast, 0);
return;
}
@@ -571,23 +599,33 @@ void ipoib_mcast_join_task(struct work_struct *work)
while (1) {
struct ipoib_mcast *mcast = NULL;
+ /*
+ * Need the mutex so our flags are consistent, need the
+ * priv->lock so we don't race with list removals in either
+ * mcast_dev_flush or mcast_restart_task
+ */
+ mutex_lock(&mcast_mutex);
spin_lock_irq(&priv->lock);
list_for_each_entry(mcast, &priv->multicast_list, list) {
- if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ if (IS_ERR_OR_NULL(mcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
+ !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
/* Found the next unjoined group */
break;
}
}
spin_unlock_irq(&priv->lock);
+ mutex_unlock(&mcast_mutex);
if (&mcast->list == &priv->multicast_list) {
/* All done */
break;
}
- ipoib_mcast_join(dev, mcast, 1);
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_mcast_sendonly_join(mcast);
+ else
+ ipoib_mcast_join(dev, mcast, 1);
return;
}
@@ -604,13 +642,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
+int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -621,8 +659,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
cancel_delayed_work(&priv->mcast_task);
mutex_unlock(&mcast_mutex);
- if (flush)
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
return 0;
}
@@ -633,6 +670,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
+
+ if (!IS_ERR_OR_NULL(mcast->mc))
ib_sa_free_multicast(mcast->mc);
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -685,6 +725,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
+ if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
}
if (!mcast->ah) {
@@ -698,8 +740,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
ipoib_dbg_mcast(priv, "no address vector, "
"but multicast join already started\n");
- else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- ipoib_mcast_sendonly_join(mcast);
/*
* If lookup completes between here and out:, don't
@@ -759,9 +799,12 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* seperate between the wait to the leave*/
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -794,8 +837,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- ipoib_mcast_stop_thread(dev, 0);
-
local_irq_save(flags);
netif_addr_lock(dev);
spin_lock(&priv->lock);
@@ -880,14 +921,38 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /* We have to cancel outside of the spinlock */
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
+ /*
+ * We have to cancel outside of the spinlock, but we have to
+ * take the rtnl lock or else we race with the removal of
+ * entries from the remove list in mcast_dev_flush as part
+ * of ipoib_stop(). We detect the drop of the ADMIN_UP flag
+ * to signal that we have hit this particular race, and we
+ * return since we know we don't need to do anything else
+ * anyway.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
-
- if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- ipoib_mcast_start_thread(dev);
+ /*
+ * Restart our join task if needed
+ */
+ ipoib_mcast_start_thread(dev);
+ rtnl_unlock();
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index c56d5d44c53b..b72a753eb41d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -145,10 +145,20 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size;
int i;
+ /*
+ * the various IPoIB tasks assume they will never race against
+ * themselves, so always use a single thread workqueue
+ */
+ priv->wq = create_singlethread_workqueue("ipoib_wq");
+ if (!priv->wq) {
+ printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
+ return -ENODEV;
+ }
+
priv->pd = ib_alloc_pd(priv->ca);
if (IS_ERR(priv->pd)) {
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
- return -ENODEV;
+ goto out_free_wq;
}
priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -242,6 +252,10 @@ out_free_mr:
out_free_pd:
ib_dealloc_pd(priv->pd);
+
+out_free_wq:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
return -ENODEV;
}
@@ -270,6 +284,12 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
if (ib_dealloc_pd(priv->pd))
ipoib_warn(priv, "ib_dealloc_pd failed\n");
+
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
}
void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 20ca6a619476..6a594aac2290 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,7 +97,7 @@ module_param_named(pi_enable, iser_pi_enable, bool, 0644);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
module_param_named(pi_guard, iser_pi_guard, int, 0644);
-MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:IP_CSUM)");
+MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
static struct workqueue_struct *release_wq;
struct iser_global ig;
@@ -164,18 +164,42 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
return 0;
}
-int iser_initialize_task_headers(struct iscsi_task *task,
- struct iser_tx_desc *tx_desc)
+/**
+ * iser_initialize_task_headers() - Initialize task headers
+ * @task: iscsi task
+ * @tx_desc: iser tx descriptor
+ *
+ * Notes:
+ * This routine may race with iser teardown flow for scsi
+ * error handling TMFs. So for TMF we should acquire the
+ * state mutex to avoid dereferencing the IB device which
+ * may have already been terminated.
+ */
+int
+iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
{
- struct iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
+ const bool mgmt_task = !task->sc && !in_interrupt();
+ int ret = 0;
+
+ if (unlikely(mgmt_task))
+ mutex_lock(&iser_conn->state_mutex);
+
+ if (unlikely(iser_conn->state != ISER_CONN_UP)) {
+ ret = -ENODEV;
+ goto out;
+ }
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
- if (ib_dma_mapping_error(device->ib_device, dma_addr))
- return -ENOMEM;
+ if (ib_dma_mapping_error(device->ib_device, dma_addr)) {
+ ret = -ENOMEM;
+ goto out;
+ }
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
@@ -183,7 +207,11 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_task->iser_conn = iser_conn;
- return 0;
+out:
+ if (unlikely(mgmt_task))
+ mutex_unlock(&iser_conn->state_mutex);
+
+ return ret;
}
/**
@@ -199,9 +227,14 @@ static int
iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ int ret;
- if (iser_initialize_task_headers(task, &iser_task->desc))
- return -ENOMEM;
+ ret = iser_initialize_task_headers(task, &iser_task->desc);
+ if (ret) {
+ iser_err("Failed to init task %p, err = %d\n",
+ iser_task, ret);
+ return ret;
+ }
/* mgmt task */
if (!task->sc)
@@ -508,8 +541,8 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
- iscsi_conn_stop(cls_conn, flag);
iser_conn_terminate(iser_conn);
+ iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
@@ -541,12 +574,13 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
static inline unsigned int
iser_dif_prot_caps(int prot_caps)
{
- return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? SHOST_DIF_TYPE2_PROTECTION |
- SHOST_DIX_TYPE2_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? SHOST_DIF_TYPE3_PROTECTION |
- SHOST_DIX_TYPE3_PROTECTION : 0);
+ return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ?
+ SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_2) ?
+ SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_3) ?
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0);
}
/**
@@ -569,6 +603,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
+ u16 max_cmds;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
@@ -586,26 +621,41 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/
if (ep) {
iser_conn = ep->dd_data;
+ max_cmds = iser_conn->max_cmds;
+
+ mutex_lock(&iser_conn->state_mutex);
+ if (iser_conn->state != ISER_CONN_UP) {
+ iser_err("iser conn %p already started teardown\n",
+ iser_conn);
+ mutex_unlock(&iser_conn->state_mutex);
+ goto free_host;
+ }
+
ib_conn = &iser_conn->ib_conn;
if (ib_conn->pi_support) {
u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
- if (iser_pi_guard)
- scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
- else
- scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
}
- }
- if (iscsi_host_add(shost, ep ?
- ib_conn->device->ib_device->dma_device : NULL))
- goto free_host;
+ if (iscsi_host_add(shost,
+ ib_conn->device->ib_device->dma_device)) {
+ mutex_unlock(&iser_conn->state_mutex);
+ goto free_host;
+ }
+ mutex_unlock(&iser_conn->state_mutex);
+ } else {
+ max_cmds = ISER_DEF_XMIT_CMDS_MAX;
+ if (iscsi_host_add(shost, NULL))
+ goto free_host;
+ }
- if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
+ if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
- cmds_max, ISER_DEF_XMIT_CMDS_MAX);
- cmds_max = ISER_DEF_XMIT_CMDS_MAX;
+ cmds_max, max_cmds);
+ cmds_max = max_cmds;
}
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index cd4174ca9a76..5ce26817e7e1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -69,34 +69,31 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "1.4.8"
+#define DRV_VER "1.5"
#define iser_dbg(fmt, arg...) \
do { \
- if (iser_debug_level > 2) \
+ if (unlikely(iser_debug_level > 2)) \
printk(KERN_DEBUG PFX "%s: " fmt,\
__func__ , ## arg); \
} while (0)
#define iser_warn(fmt, arg...) \
do { \
- if (iser_debug_level > 0) \
+ if (unlikely(iser_debug_level > 0)) \
pr_warn(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
#define iser_info(fmt, arg...) \
do { \
- if (iser_debug_level > 1) \
+ if (unlikely(iser_debug_level > 1)) \
pr_info(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
-#define iser_err(fmt, arg...) \
- do { \
- printk(KERN_ERR PFX "%s: " fmt, \
- __func__ , ## arg); \
- } while (0)
+#define iser_err(fmt, arg...) \
+ pr_err(PFX "%s: " fmt, __func__ , ## arg)
#define SHIFT_4K 12
#define SIZE_4K (1ULL << SHIFT_4K)
@@ -144,6 +141,11 @@
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
+#define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr \
+ - ISER_MAX_TX_MISC_PDUS \
+ - ISER_MAX_RX_MISC_PDUS) / \
+ (1 + ISER_INFLIGHT_DATAOUTS))
+
#define ISER_WC_BATCH_COUNT 16
#define ISER_SIGNAL_CMD_COUNT 32
@@ -247,7 +249,6 @@ struct iscsi_endpoint;
* @va: MR start address (buffer va)
* @len: MR length
* @mem_h: pointer to registration context (FMR/Fastreg)
- * @is_mr: indicates weather we registered the buffer
*/
struct iser_mem_reg {
u32 lkey;
@@ -255,7 +256,6 @@ struct iser_mem_reg {
u64 va;
u64 len;
void *mem_h;
- int is_mr;
};
/**
@@ -323,8 +323,6 @@ struct iser_rx_desc {
char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed));
-#define ISER_MAX_CQ 4
-
struct iser_conn;
struct ib_conn;
struct iscsi_iser_task;
@@ -375,7 +373,7 @@ struct iser_device {
struct list_head ig_list;
int refcount;
int comps_used;
- struct iser_comp comps[ISER_MAX_CQ];
+ struct iser_comp *comps;
int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
@@ -432,6 +430,7 @@ struct fast_reg_descriptor {
* @cma_id: rdma_cm connection maneger handle
* @qp: Connection Queue-pair
* @post_recv_buf_count: post receive counter
+ * @sig_count: send work request signal count
* @rx_wr: receive work request for batch posts
* @device: reference to iser device
* @comp: iser completion context
@@ -452,6 +451,7 @@ struct ib_conn {
struct rdma_cm_id *cma_id;
struct ib_qp *qp;
int post_recv_buf_count;
+ u8 sig_count;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device;
struct iser_comp *comp;
@@ -482,6 +482,7 @@ struct ib_conn {
* to max number of post recvs
* @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
* @min_posted_rx: (qp_max_recv_dtos >> 2)
+ * @max_cmds: maximum cmds allowed for this connection
* @name: connection peer portal
* @release_work: deffered work for release job
* @state_mutex: protects iser onnection state
@@ -507,6 +508,7 @@ struct iser_conn {
unsigned qp_max_recv_dtos;
unsigned qp_max_recv_dtos_mask;
unsigned min_posted_rx;
+ u16 max_cmds;
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
struct mutex state_mutex;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5a489ea63732..3821633f1065 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -369,7 +369,7 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
return 0;
}
-static inline bool iser_signal_comp(int sig_count)
+static inline bool iser_signal_comp(u8 sig_count)
{
return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
}
@@ -388,7 +388,7 @@ int iser_send_command(struct iscsi_conn *conn,
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
- static unsigned sig_count;
+ u8 sig_count = ++iser_conn->ib_conn.sig_count;
edtl = ntohl(hdr->data_length);
@@ -435,7 +435,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(&iser_conn->ib_conn, tx_desc,
- iser_signal_comp(++sig_count));
+ iser_signal_comp(sig_count));
if (!err)
return 0;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 6c5ce357fba6..abce9339333f 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -73,7 +73,6 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
if (cmd_dir == ISER_DIR_OUT) {
/* copy the unaligned sg the buffer which is used for RDMA */
- int i;
char *p, *from;
sgl = (struct scatterlist *)data->buf;
@@ -409,7 +408,6 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
regd_buf->reg.rkey = device->mr->rkey;
regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
- regd_buf->reg.is_mr = 0;
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
"va: 0x%08lX sz: %ld]\n",
@@ -440,13 +438,13 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
return 0;
}
-static inline void
+static void
iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
- domain->sig.dif.pi_interval = sc->device->sector_size;
- domain->sig.dif.ref_tag = scsi_get_lba(sc) & 0xffffffff;
+ domain->sig.dif.pi_interval = scsi_prot_interval(sc);
+ domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
@@ -454,8 +452,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
domain->sig.dif.apptag_check_mask = 0xffff;
domain->sig.dif.app_escape = true;
domain->sig.dif.ref_escape = true;
- if (scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE1 ||
- scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE2)
+ if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
domain->sig.dif.ref_remap = true;
};
@@ -473,26 +470,16 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
case SCSI_PROT_WRITE_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
- /*
- * At the moment we use this modparam to tell what is
- * the memory bg_type, in the future we will take it
- * from sc.
- */
- sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
- IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
+ IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
- /*
- * At the moment we use this modparam to tell what is
- * the memory bg_type, in the future we will take it
- * from sc.
- */
- sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
- IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
+ IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
default:
iser_err("Unsupported PI operation %d\n",
@@ -503,26 +490,28 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
return 0;
}
-static int
+static inline void
iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
- switch (scsi_get_prot_type(sc)) {
- case SCSI_PROT_DIF_TYPE0:
- break;
- case SCSI_PROT_DIF_TYPE1:
- case SCSI_PROT_DIF_TYPE2:
- *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG;
- break;
- case SCSI_PROT_DIF_TYPE3:
- *mask = ISER_CHECK_GUARD;
- break;
- default:
- iser_err("Unsupported protection type %d\n",
- scsi_get_prot_type(sc));
- return -EINVAL;
- }
+ *mask = 0;
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
+ *mask |= ISER_CHECK_REFTAG;
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
+ *mask |= ISER_CHECK_GUARD;
+}
- return 0;
+static void
+iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
+{
+ u32 rkey;
+
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->ex.invalidate_rkey = mr->rkey;
+
+ rkey = ib_inc_rkey(mr->rkey);
+ ib_update_fast_reg_key(mr, rkey);
}
static int
@@ -536,26 +525,17 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct ib_send_wr *bad_wr, *wr = NULL;
struct ib_sig_attrs sig_attrs;
int ret;
- u32 key;
memset(&sig_attrs, 0, sizeof(sig_attrs));
ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
if (ret)
goto err;
- ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
- if (ret)
- goto err;
+ iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ iser_inv_rkey(&inv_wr, pi_ctx->sig_mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
}
memset(&sig_wr, 0, sizeof(sig_wr));
@@ -585,12 +565,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
sig_sge->lkey = pi_ctx->sig_mr->lkey;
sig_sge->addr = 0;
- sig_sge->length = data_sge->length + prot_sge->length;
- if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT ||
- scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) {
- sig_sge->length += (data_sge->length /
- iser_task->sc->device->sector_size) * 8;
- }
+ sig_sge->length = scsi_transfer_length(iser_task->sc);
iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
sig_sge->addr, sig_sge->length,
@@ -613,7 +588,6 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fastreg_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
- u8 key;
int ret, offset, size, plen;
/* if there a single dma entry, dma mr suffices */
@@ -645,14 +619,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
}
if (!(desc->reg_indicators & ind)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.ex.invalidate_rkey = mr->rkey;
+ iser_inv_rkey(&inv_wr, mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
@@ -770,15 +738,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
regd_buf->reg.va = sig_sge.addr;
regd_buf->reg.len = sig_sge.length;
- regd_buf->reg.is_mr = 1;
} else {
- if (desc) {
+ if (desc)
regd_buf->reg.rkey = desc->data_mr->rkey;
- regd_buf->reg.is_mr = 1;
- } else {
+ else
regd_buf->reg.rkey = device->mr->rkey;
- regd_buf->reg.is_mr = 0;
- }
regd_buf->reg.lkey = data_sge.lkey;
regd_buf->reg.va = data_sge.addr;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 67225bb82bb5..695a2704bd43 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -76,7 +76,7 @@ static void iser_event_handler(struct ib_event_handler *handler,
static int iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device_attr *dev_attr = &device->dev_attr;
- int ret, i;
+ int ret, i, max_cqe;
ret = ib_query_device(device->ib_device, dev_attr);
if (ret) {
@@ -104,11 +104,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
return -1;
}
- device->comps_used = min(ISER_MAX_CQ,
+ device->comps_used = min_t(int, num_online_cpus(),
device->ib_device->num_comp_vectors);
- iser_info("using %d CQs, device %s supports %d vectors\n",
+
+ device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
+ GFP_KERNEL);
+ if (!device->comps)
+ goto comps_err;
+
+ max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
+
+ iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
device->comps_used, device->ib_device->name,
- device->ib_device->num_comp_vectors);
+ device->ib_device->num_comp_vectors, max_cqe);
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd))
@@ -122,7 +130,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
iser_cq_callback,
iser_cq_event_callback,
(void *)comp,
- ISER_MAX_CQ_LEN, i);
+ max_cqe, i);
if (IS_ERR(comp->cq)) {
comp->cq = NULL;
goto cq_err;
@@ -162,6 +170,8 @@ cq_err:
}
ib_dealloc_pd(device->pd);
pd_err:
+ kfree(device->comps);
+comps_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
@@ -187,6 +197,9 @@ static void iser_free_device_ib_res(struct iser_device *device)
(void)ib_dereg_mr(device->mr);
(void)ib_dealloc_pd(device->pd);
+ kfree(device->comps);
+ device->comps = NULL;
+
device->mr = NULL;
device->pd = NULL;
}
@@ -425,7 +438,10 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
+ struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
+ ib_conn);
struct iser_device *device;
+ struct ib_device_attr *dev_attr;
struct ib_qp_init_attr init_attr;
int ret = -ENOMEM;
int index, min_index = 0;
@@ -433,6 +449,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
+ dev_attr = &device->dev_attr;
memset(&init_attr, 0, sizeof init_attr);
@@ -460,8 +477,20 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
if (ib_conn->pi_support) {
init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
} else {
- init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
+ if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
+ init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
+ } else {
+ init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
+ iser_conn->max_cmds =
+ ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
+ iser_dbg("device %s supports max_send_wr %d\n",
+ device->ib_device->name, dev_attr->max_qp_wr);
+ }
}
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
@@ -475,7 +504,11 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
return ret;
out_err:
+ mutex_lock(&ig.connlist_mutex);
+ ib_conn->comp->active_qps--;
+ mutex_unlock(&ig.connlist_mutex);
iser_err("unable to alloc mem or create resource, err %d\n", ret);
+
return ret;
}
@@ -610,9 +643,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
mutex_unlock(&ig.connlist_mutex);
mutex_lock(&iser_conn->state_mutex);
- if (iser_conn->state != ISER_CONN_DOWN)
+ if (iser_conn->state != ISER_CONN_DOWN) {
iser_warn("iser conn %p state %d, expected state down.\n",
iser_conn, iser_conn->state);
+ iser_conn->state = ISER_CONN_DOWN;
+ }
/*
* In case we never got to bind stage, we still need to
* release IB resources (which is safe to call more than once).
@@ -662,8 +697,10 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
/* post an indication that all flush errors were consumed */
err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
- if (err)
+ if (err) {
iser_err("conn %p failed to post beacon", ib_conn);
+ return 1;
+ }
wait_for_completion(&ib_conn->flush_comp);
}
@@ -846,20 +883,21 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
break;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_ADDR_CHANGE:
- iser_disconnected_handler(cma_id);
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ iser_cleanup_handler(cma_id, false);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
/*
* we *must* destroy the device as we cannot rely
* on iscsid to be around to initiate error handling.
- * also implicitly destroy the cma_id.
+ * also if we are not in state DOWN implicitly destroy
+ * the cma_id.
*/
iser_cleanup_handler(cma_id, true);
- iser_conn->ib_conn.cma_id = NULL;
- ret = 1;
- break;
- case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- iser_cleanup_handler(cma_id, false);
+ if (iser_conn->state != ISER_CONN_DOWN) {
+ iser_conn->ib_conn.cma_id = NULL;
+ ret = 1;
+ }
break;
default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -981,7 +1019,6 @@ int iser_reg_page_vec(struct ib_conn *ib_conn,
mem_reg->rkey = mem->fmr->rkey;
mem_reg->len = page_vec->length * SIZE_4K;
mem_reg->va = io_addr;
- mem_reg->is_mr = 1;
mem_reg->mem_h = (void *)mem;
mem_reg->va += page_vec->offset;
@@ -1008,7 +1045,7 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
int ret;
- if (!reg->is_mr)
+ if (!reg->mem_h)
return;
iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
@@ -1028,11 +1065,10 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct fast_reg_descriptor *desc = reg->mem_h;
- if (!reg->is_mr)
+ if (!desc)
return;
reg->mem_h = NULL;
- reg->is_mr = 0;
spin_lock_bh(&ib_conn->lock);
list_add_tail(&desc->list, &ib_conn->fastreg.pool);
spin_unlock_bh(&ib_conn->lock);
@@ -1049,7 +1085,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = ib_conn->device->mr->lkey;
- rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf;
+ rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
@@ -1073,7 +1109,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &iser_conn->rx_descs[my_rx_head];
- rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->wr_id = (uintptr_t)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
@@ -1110,7 +1146,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
DMA_TO_DEVICE);
send_wr.next = NULL;
- send_wr.wr_id = (unsigned long)tx_desc;
+ send_wr.wr_id = (uintptr_t)tx_desc;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
@@ -1160,6 +1196,7 @@ static void
iser_handle_comp_error(struct ib_conn *ib_conn,
struct ib_wc *wc)
{
+ void *wr_id = (void *)(uintptr_t)wc->wr_id;
struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
ib_conn);
@@ -1168,8 +1205,8 @@ iser_handle_comp_error(struct ib_conn *ib_conn,
iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
- if (is_iser_tx_desc(iser_conn, (void *)wc->wr_id)) {
- struct iser_tx_desc *desc = (struct iser_tx_desc *)wc->wr_id;
+ if (is_iser_tx_desc(iser_conn, wr_id)) {
+ struct iser_tx_desc *desc = wr_id;
if (desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, desc);
@@ -1193,14 +1230,14 @@ static void iser_handle_wc(struct ib_wc *wc)
struct iser_rx_desc *rx_desc;
ib_conn = wc->qp->qp_context;
- if (wc->status == IB_WC_SUCCESS) {
+ if (likely(wc->status == IB_WC_SUCCESS)) {
if (wc->opcode == IB_WC_RECV) {
- rx_desc = (struct iser_rx_desc *)wc->wr_id;
+ rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
iser_rcv_completion(rx_desc, wc->byte_len,
ib_conn);
} else
if (wc->opcode == IB_WC_SEND) {
- tx_desc = (struct iser_tx_desc *)wc->wr_id;
+ tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
iser_snd_completion(tx_desc, ib_conn);
} else {
iser_err("Unknown wc opcode %d\n", wc->opcode);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5461924c9f10..db3c8c851af1 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2929,7 +2929,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
return -ENOMEM;
sep_opt = options;
- while ((p = strsep(&sep_opt, ",")) != NULL) {
+ while ((p = strsep(&sep_opt, ",\n")) != NULL) {
if (!*p)
continue;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index e29c04e2aff4..e853a2134680 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
*/
static void gameport_init_port(struct gameport *gameport)
{
- static atomic_t gameport_no = ATOMIC_INIT(0);
+ static atomic_t gameport_no = ATOMIC_INIT(-1);
__module_get(THIS_MODULE);
mutex_init(&gameport->drv_mutex);
device_initialize(&gameport->dev);
dev_set_name(&gameport->dev, "gameport%lu",
- (unsigned long)atomic_inc_return(&gameport_no) - 1);
+ (unsigned long)atomic_inc_return(&gameport_no));
gameport->dev.bus = &gameport_bus;
gameport->dev.release = gameport_release_port;
if (gameport->parent)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 0f175f55782b..04217c2e345c 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
*/
struct input_dev *input_allocate_device(void)
{
- static atomic_t input_no = ATOMIC_INIT(0);
+ static atomic_t input_no = ATOMIC_INIT(-1);
struct input_dev *dev;
dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
INIT_LIST_HEAD(&dev->node);
dev_set_name(&dev->dev, "input%lu",
- (unsigned long) atomic_inc_return(&input_no) - 1);
+ (unsigned long)atomic_inc_return(&input_no));
__module_get(THIS_MODULE);
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index fc55f0d15b70..3aa2f3f3da5b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -886,8 +886,8 @@ static void xpad_led_set(struct led_classdev *led_cdev,
static int xpad_led_probe(struct usb_xpad *xpad)
{
- static atomic_t led_seq = ATOMIC_INIT(0);
- long led_no;
+ static atomic_t led_seq = ATOMIC_INIT(-1);
+ unsigned long led_no;
struct xpad_led *led;
struct led_classdev *led_cdev;
int error;
@@ -899,9 +899,9 @@ static int xpad_led_probe(struct usb_xpad *xpad)
if (!led)
return -ENOMEM;
- led_no = (long)atomic_inc_return(&led_seq) - 1;
+ led_no = atomic_inc_return(&led_seq);
- snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
+ snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
led->xpad = xpad;
led_cdev = &led->led_cdev;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a3958c63d7d5..96ee26c555e0 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -665,14 +665,14 @@ config KEYBOARD_CROS_EC
To compile this driver as a module, choose M here: the
module will be called cros_ec_keyb.
-config KEYBOARD_CAP1106
- tristate "Microchip CAP1106 touch sensor"
+config KEYBOARD_CAP11XX
+ tristate "Microchip CAP11XX based touch sensors"
depends on OF && I2C
select REGMAP_I2C
help
- Say Y here to enable the CAP1106 touch sensor driver.
+ Say Y here to enable the CAP11XX touch sensor driver.
To compile this driver as a module, choose M here: the
- module will be called cap1106.
+ module will be called cap11xx.
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 0a3345634d79..febafa527eb6 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
-obj-$(CONFIG_KEYBOARD_CAP1106) += cap1106.o
+obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o
obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index 7f4a8b58efc1..db1004dad108 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -184,7 +184,6 @@ static int adp5520_keys_remove(struct platform_device *pdev)
static struct platform_driver adp5520_keys_driver = {
.driver = {
.name = "adp5520-keys",
- .owner = THIS_MODULE,
},
.probe = adp5520_keys_probe,
.remove = adp5520_keys_remove,
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 096d6067ae1f..e04a3b4e55d6 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -45,6 +45,7 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga keyboard driver");
MODULE_LICENSE("GPL");
+#ifdef CONFIG_HW_CONSOLE
static unsigned char amikbd_keycode[0x78] __initdata = {
[0] = KEY_GRAVE,
[1] = KEY_1,
@@ -144,6 +145,32 @@ static unsigned char amikbd_keycode[0x78] __initdata = {
[103] = KEY_RIGHTMETA
};
+static void __init amikbd_init_console_keymaps(void)
+{
+ /* We can spare 512 bytes on stack for temp_map in init path. */
+ unsigned short temp_map[NR_KEYS];
+ int i, j;
+
+ for (i = 0; i < MAX_NR_KEYMAPS; i++) {
+ if (!key_maps[i])
+ continue;
+ memset(temp_map, 0, sizeof(temp_map));
+ for (j = 0; j < 0x78; j++) {
+ if (!amikbd_keycode[j])
+ continue;
+ temp_map[j] = key_maps[i][amikbd_keycode[j]];
+ }
+ for (j = 0; j < NR_KEYS; j++) {
+ if (!temp_map[j])
+ temp_map[j] = 0xf200;
+ }
+ memcpy(key_maps[i], temp_map, sizeof(temp_map));
+ }
+}
+#else /* !CONFIG_HW_CONSOLE */
+static inline void amikbd_init_console_keymaps(void) {}
+#endif /* !CONFIG_HW_CONSOLE */
+
static const char *amikbd_messages[8] = {
[0] = KERN_ALERT "amikbd: Ctrl-Amiga-Amiga reset warning!!\n",
[1] = KERN_WARNING "amikbd: keyboard lost sync\n",
@@ -186,7 +213,7 @@ static irqreturn_t amikbd_interrupt(int irq, void *data)
static int __init amikbd_probe(struct platform_device *pdev)
{
struct input_dev *dev;
- int i, j, err;
+ int i, err;
dev = input_allocate_device();
if (!dev) {
@@ -207,22 +234,8 @@ static int __init amikbd_probe(struct platform_device *pdev)
for (i = 0; i < 0x78; i++)
set_bit(i, dev->keybit);
- for (i = 0; i < MAX_NR_KEYMAPS; i++) {
- static u_short temp_map[NR_KEYS] __initdata;
- if (!key_maps[i])
- continue;
- memset(temp_map, 0, sizeof(temp_map));
- for (j = 0; j < 0x78; j++) {
- if (!amikbd_keycode[j])
- continue;
- temp_map[j] = key_maps[i][amikbd_keycode[j]];
- }
- for (j = 0; j < NR_KEYS; j++) {
- if (!temp_map[j])
- temp_map[j] = 0xf200;
- }
- memcpy(key_maps[i], temp_map, sizeof(temp_map));
- }
+ amikbd_init_console_keymaps();
+
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
dev);
@@ -255,7 +268,6 @@ static struct platform_driver amikbd_driver = {
.remove = __exit_p(amikbd_remove),
.driver = {
.name = "amiga-keyboard",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6f5d79569136..e27a25892db4 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -456,8 +456,9 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
keycode = atkbd->keycode[code];
- if (keycode != ATKBD_KEY_NULL)
- input_event(dev, EV_MSC, MSC_SCAN, code);
+ if (!(atkbd->release && test_bit(code, atkbd->force_release_mask)))
+ if (keycode != ATKBD_KEY_NULL)
+ input_event(dev, EV_MSC, MSC_SCAN, code);
switch (keycode) {
case ATKBD_KEY_NULL:
@@ -511,6 +512,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
input_sync(dev);
if (value && test_bit(code, atkbd->force_release_mask)) {
+ input_event(dev, EV_MSC, MSC_SCAN, code);
input_report_key(dev, keycode, 0);
input_sync(dev);
}
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index e6d46c5994d7..81b07dddae86 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -385,7 +385,6 @@ static int bfin_kpad_resume(struct platform_device *pdev)
static struct platform_driver bfin_kpad_device_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = bfin_kpad_probe,
.remove = bfin_kpad_remove,
diff --git a/drivers/input/keyboard/cap1106.c b/drivers/input/keyboard/cap1106.c
deleted file mode 100644
index d70b65a14ced..000000000000
--- a/drivers/input/keyboard/cap1106.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Input driver for Microchip CAP1106, 6 channel capacitive touch sensor
- *
- * http://www.microchip.com/wwwproducts/Devices.aspx?product=CAP1106
- *
- * (c) 2014 Daniel Mack <linux@zonque.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/of_irq.h>
-#include <linux/regmap.h>
-#include <linux/i2c.h>
-#include <linux/gpio/consumer.h>
-
-#define CAP1106_REG_MAIN_CONTROL 0x00
-#define CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT (6)
-#define CAP1106_REG_MAIN_CONTROL_GAIN_MASK (0xc0)
-#define CAP1106_REG_MAIN_CONTROL_DLSEEP BIT(4)
-#define CAP1106_REG_GENERAL_STATUS 0x02
-#define CAP1106_REG_SENSOR_INPUT 0x03
-#define CAP1106_REG_NOISE_FLAG_STATUS 0x0a
-#define CAP1106_REG_SENOR_DELTA(X) (0x10 + (X))
-#define CAP1106_REG_SENSITIVITY_CONTROL 0x1f
-#define CAP1106_REG_CONFIG 0x20
-#define CAP1106_REG_SENSOR_ENABLE 0x21
-#define CAP1106_REG_SENSOR_CONFIG 0x22
-#define CAP1106_REG_SENSOR_CONFIG2 0x23
-#define CAP1106_REG_SAMPLING_CONFIG 0x24
-#define CAP1106_REG_CALIBRATION 0x26
-#define CAP1106_REG_INT_ENABLE 0x27
-#define CAP1106_REG_REPEAT_RATE 0x28
-#define CAP1106_REG_MT_CONFIG 0x2a
-#define CAP1106_REG_MT_PATTERN_CONFIG 0x2b
-#define CAP1106_REG_MT_PATTERN 0x2d
-#define CAP1106_REG_RECALIB_CONFIG 0x2f
-#define CAP1106_REG_SENSOR_THRESH(X) (0x30 + (X))
-#define CAP1106_REG_SENSOR_NOISE_THRESH 0x38
-#define CAP1106_REG_STANDBY_CHANNEL 0x40
-#define CAP1106_REG_STANDBY_CONFIG 0x41
-#define CAP1106_REG_STANDBY_SENSITIVITY 0x42
-#define CAP1106_REG_STANDBY_THRESH 0x43
-#define CAP1106_REG_CONFIG2 0x44
-#define CAP1106_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
-#define CAP1106_REG_SENSOR_CALIB (0xb1 + (X))
-#define CAP1106_REG_SENSOR_CALIB_LSB1 0xb9
-#define CAP1106_REG_SENSOR_CALIB_LSB2 0xba
-#define CAP1106_REG_PRODUCT_ID 0xfd
-#define CAP1106_REG_MANUFACTURER_ID 0xfe
-#define CAP1106_REG_REVISION 0xff
-
-#define CAP1106_NUM_CHN 6
-#define CAP1106_PRODUCT_ID 0x55
-#define CAP1106_MANUFACTURER_ID 0x5d
-
-struct cap1106_priv {
- struct regmap *regmap;
- struct input_dev *idev;
-
- /* config */
- unsigned short keycodes[CAP1106_NUM_CHN];
-};
-
-static const struct reg_default cap1106_reg_defaults[] = {
- { CAP1106_REG_MAIN_CONTROL, 0x00 },
- { CAP1106_REG_GENERAL_STATUS, 0x00 },
- { CAP1106_REG_SENSOR_INPUT, 0x00 },
- { CAP1106_REG_NOISE_FLAG_STATUS, 0x00 },
- { CAP1106_REG_SENSITIVITY_CONTROL, 0x2f },
- { CAP1106_REG_CONFIG, 0x20 },
- { CAP1106_REG_SENSOR_ENABLE, 0x3f },
- { CAP1106_REG_SENSOR_CONFIG, 0xa4 },
- { CAP1106_REG_SENSOR_CONFIG2, 0x07 },
- { CAP1106_REG_SAMPLING_CONFIG, 0x39 },
- { CAP1106_REG_CALIBRATION, 0x00 },
- { CAP1106_REG_INT_ENABLE, 0x3f },
- { CAP1106_REG_REPEAT_RATE, 0x3f },
- { CAP1106_REG_MT_CONFIG, 0x80 },
- { CAP1106_REG_MT_PATTERN_CONFIG, 0x00 },
- { CAP1106_REG_MT_PATTERN, 0x3f },
- { CAP1106_REG_RECALIB_CONFIG, 0x8a },
- { CAP1106_REG_SENSOR_THRESH(0), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(1), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(2), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(3), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(4), 0x40 },
- { CAP1106_REG_SENSOR_THRESH(5), 0x40 },
- { CAP1106_REG_SENSOR_NOISE_THRESH, 0x01 },
- { CAP1106_REG_STANDBY_CHANNEL, 0x00 },
- { CAP1106_REG_STANDBY_CONFIG, 0x39 },
- { CAP1106_REG_STANDBY_SENSITIVITY, 0x02 },
- { CAP1106_REG_STANDBY_THRESH, 0x40 },
- { CAP1106_REG_CONFIG2, 0x40 },
- { CAP1106_REG_SENSOR_CALIB_LSB1, 0x00 },
- { CAP1106_REG_SENSOR_CALIB_LSB2, 0x00 },
-};
-
-static bool cap1106_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case CAP1106_REG_MAIN_CONTROL:
- case CAP1106_REG_SENSOR_INPUT:
- case CAP1106_REG_SENOR_DELTA(0):
- case CAP1106_REG_SENOR_DELTA(1):
- case CAP1106_REG_SENOR_DELTA(2):
- case CAP1106_REG_SENOR_DELTA(3):
- case CAP1106_REG_SENOR_DELTA(4):
- case CAP1106_REG_SENOR_DELTA(5):
- case CAP1106_REG_PRODUCT_ID:
- case CAP1106_REG_MANUFACTURER_ID:
- case CAP1106_REG_REVISION:
- return true;
- }
-
- return false;
-}
-
-static const struct regmap_config cap1106_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = CAP1106_REG_REVISION,
- .reg_defaults = cap1106_reg_defaults,
-
- .num_reg_defaults = ARRAY_SIZE(cap1106_reg_defaults),
- .cache_type = REGCACHE_RBTREE,
- .volatile_reg = cap1106_volatile_reg,
-};
-
-static irqreturn_t cap1106_thread_func(int irq_num, void *data)
-{
- struct cap1106_priv *priv = data;
- unsigned int status;
- int ret, i;
-
- /*
- * Deassert interrupt. This needs to be done before reading the status
- * registers, which will not carry valid values otherwise.
- */
- ret = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL, 1, 0);
- if (ret < 0)
- goto out;
-
- ret = regmap_read(priv->regmap, CAP1106_REG_SENSOR_INPUT, &status);
- if (ret < 0)
- goto out;
-
- for (i = 0; i < CAP1106_NUM_CHN; i++)
- input_report_key(priv->idev, priv->keycodes[i],
- status & (1 << i));
-
- input_sync(priv->idev);
-
-out:
- return IRQ_HANDLED;
-}
-
-static int cap1106_set_sleep(struct cap1106_priv *priv, bool sleep)
-{
- return regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL,
- CAP1106_REG_MAIN_CONTROL_DLSEEP,
- sleep ? CAP1106_REG_MAIN_CONTROL_DLSEEP : 0);
-}
-
-static int cap1106_input_open(struct input_dev *idev)
-{
- struct cap1106_priv *priv = input_get_drvdata(idev);
-
- return cap1106_set_sleep(priv, false);
-}
-
-static void cap1106_input_close(struct input_dev *idev)
-{
- struct cap1106_priv *priv = input_get_drvdata(idev);
-
- cap1106_set_sleep(priv, true);
-}
-
-static int cap1106_i2c_probe(struct i2c_client *i2c_client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &i2c_client->dev;
- struct cap1106_priv *priv;
- struct device_node *node;
- int i, error, irq, gain = 0;
- unsigned int val, rev;
- u32 gain32, keycodes[CAP1106_NUM_CHN];
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->regmap = devm_regmap_init_i2c(i2c_client, &cap1106_regmap_config);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
-
- error = regmap_read(priv->regmap, CAP1106_REG_PRODUCT_ID, &val);
- if (error)
- return error;
-
- if (val != CAP1106_PRODUCT_ID) {
- dev_err(dev, "Product ID: Got 0x%02x, expected 0x%02x\n",
- val, CAP1106_PRODUCT_ID);
- return -ENODEV;
- }
-
- error = regmap_read(priv->regmap, CAP1106_REG_MANUFACTURER_ID, &val);
- if (error)
- return error;
-
- if (val != CAP1106_MANUFACTURER_ID) {
- dev_err(dev, "Manufacturer ID: Got 0x%02x, expected 0x%02x\n",
- val, CAP1106_MANUFACTURER_ID);
- return -ENODEV;
- }
-
- error = regmap_read(priv->regmap, CAP1106_REG_REVISION, &rev);
- if (error < 0)
- return error;
-
- dev_info(dev, "CAP1106 detected, revision 0x%02x\n", rev);
- i2c_set_clientdata(i2c_client, priv);
- node = dev->of_node;
-
- if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
- if (is_power_of_2(gain32) && gain32 <= 8)
- gain = ilog2(gain32);
- else
- dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
- }
-
- BUILD_BUG_ON(ARRAY_SIZE(keycodes) != ARRAY_SIZE(priv->keycodes));
-
- /* Provide some useful defaults */
- for (i = 0; i < ARRAY_SIZE(keycodes); i++)
- keycodes[i] = KEY_A + i;
-
- of_property_read_u32_array(node, "linux,keycodes",
- keycodes, ARRAY_SIZE(keycodes));
-
- for (i = 0; i < ARRAY_SIZE(keycodes); i++)
- priv->keycodes[i] = keycodes[i];
-
- error = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL,
- CAP1106_REG_MAIN_CONTROL_GAIN_MASK,
- gain << CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT);
- if (error)
- return error;
-
- /* Disable autorepeat. The Linux input system has its own handling. */
- error = regmap_write(priv->regmap, CAP1106_REG_REPEAT_RATE, 0);
- if (error)
- return error;
-
- priv->idev = devm_input_allocate_device(dev);
- if (!priv->idev)
- return -ENOMEM;
-
- priv->idev->name = "CAP1106 capacitive touch sensor";
- priv->idev->id.bustype = BUS_I2C;
- priv->idev->evbit[0] = BIT_MASK(EV_KEY);
-
- if (of_property_read_bool(node, "autorepeat"))
- __set_bit(EV_REP, priv->idev->evbit);
-
- for (i = 0; i < CAP1106_NUM_CHN; i++)
- __set_bit(priv->keycodes[i], priv->idev->keybit);
-
- __clear_bit(KEY_RESERVED, priv->idev->keybit);
-
- priv->idev->keycode = priv->keycodes;
- priv->idev->keycodesize = sizeof(priv->keycodes[0]);
- priv->idev->keycodemax = ARRAY_SIZE(priv->keycodes);
-
- priv->idev->id.vendor = CAP1106_MANUFACTURER_ID;
- priv->idev->id.product = CAP1106_PRODUCT_ID;
- priv->idev->id.version = rev;
-
- priv->idev->open = cap1106_input_open;
- priv->idev->close = cap1106_input_close;
-
- input_set_drvdata(priv->idev, priv);
-
- /*
- * Put the device in deep sleep mode for now.
- * ->open() will bring it back once the it is actually needed.
- */
- cap1106_set_sleep(priv, true);
-
- error = input_register_device(priv->idev);
- if (error)
- return error;
-
- irq = irq_of_parse_and_map(node, 0);
- if (!irq) {
- dev_err(dev, "Unable to parse or map IRQ\n");
- return -ENXIO;
- }
-
- error = devm_request_threaded_irq(dev, irq, NULL, cap1106_thread_func,
- IRQF_ONESHOT, dev_name(dev), priv);
- if (error)
- return error;
-
- return 0;
-}
-
-static const struct of_device_id cap1106_dt_ids[] = {
- { .compatible = "microchip,cap1106", },
- {}
-};
-MODULE_DEVICE_TABLE(of, cap1106_dt_ids);
-
-static const struct i2c_device_id cap1106_i2c_ids[] = {
- { "cap1106", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, cap1106_i2c_ids);
-
-static struct i2c_driver cap1106_i2c_driver = {
- .driver = {
- .name = "cap1106",
- .owner = THIS_MODULE,
- .of_match_table = cap1106_dt_ids,
- },
- .id_table = cap1106_i2c_ids,
- .probe = cap1106_i2c_probe,
-};
-
-module_i2c_driver(cap1106_i2c_driver);
-
-MODULE_ALIAS("platform:cap1106");
-MODULE_DESCRIPTION("Microchip CAP1106 driver");
-MODULE_AUTHOR("Daniel Mack <linux@zonque.org>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
new file mode 100644
index 000000000000..4f59f0bab28f
--- /dev/null
+++ b/drivers/input/keyboard/cap11xx.c
@@ -0,0 +1,376 @@
+/*
+ * Input driver for Microchip CAP11xx based capacitive touch sensors
+ *
+ * (c) 2014 Daniel Mack <linux@zonque.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/gpio/consumer.h>
+
+#define CAP11XX_REG_MAIN_CONTROL 0x00
+#define CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT (6)
+#define CAP11XX_REG_MAIN_CONTROL_GAIN_MASK (0xc0)
+#define CAP11XX_REG_MAIN_CONTROL_DLSEEP BIT(4)
+#define CAP11XX_REG_GENERAL_STATUS 0x02
+#define CAP11XX_REG_SENSOR_INPUT 0x03
+#define CAP11XX_REG_NOISE_FLAG_STATUS 0x0a
+#define CAP11XX_REG_SENOR_DELTA(X) (0x10 + (X))
+#define CAP11XX_REG_SENSITIVITY_CONTROL 0x1f
+#define CAP11XX_REG_CONFIG 0x20
+#define CAP11XX_REG_SENSOR_ENABLE 0x21
+#define CAP11XX_REG_SENSOR_CONFIG 0x22
+#define CAP11XX_REG_SENSOR_CONFIG2 0x23
+#define CAP11XX_REG_SAMPLING_CONFIG 0x24
+#define CAP11XX_REG_CALIBRATION 0x26
+#define CAP11XX_REG_INT_ENABLE 0x27
+#define CAP11XX_REG_REPEAT_RATE 0x28
+#define CAP11XX_REG_MT_CONFIG 0x2a
+#define CAP11XX_REG_MT_PATTERN_CONFIG 0x2b
+#define CAP11XX_REG_MT_PATTERN 0x2d
+#define CAP11XX_REG_RECALIB_CONFIG 0x2f
+#define CAP11XX_REG_SENSOR_THRESH(X) (0x30 + (X))
+#define CAP11XX_REG_SENSOR_NOISE_THRESH 0x38
+#define CAP11XX_REG_STANDBY_CHANNEL 0x40
+#define CAP11XX_REG_STANDBY_CONFIG 0x41
+#define CAP11XX_REG_STANDBY_SENSITIVITY 0x42
+#define CAP11XX_REG_STANDBY_THRESH 0x43
+#define CAP11XX_REG_CONFIG2 0x44
+#define CAP11XX_REG_CONFIG2_ALT_POL BIT(6)
+#define CAP11XX_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
+#define CAP11XX_REG_SENSOR_CALIB (0xb1 + (X))
+#define CAP11XX_REG_SENSOR_CALIB_LSB1 0xb9
+#define CAP11XX_REG_SENSOR_CALIB_LSB2 0xba
+#define CAP11XX_REG_PRODUCT_ID 0xfd
+#define CAP11XX_REG_MANUFACTURER_ID 0xfe
+#define CAP11XX_REG_REVISION 0xff
+
+#define CAP11XX_MANUFACTURER_ID 0x5d
+
+struct cap11xx_priv {
+ struct regmap *regmap;
+ struct input_dev *idev;
+
+ /* config */
+ u32 keycodes[];
+};
+
+struct cap11xx_hw_model {
+ u8 product_id;
+ unsigned int num_channels;
+};
+
+enum {
+ CAP1106,
+ CAP1126,
+ CAP1188,
+};
+
+static const struct cap11xx_hw_model cap11xx_devices[] = {
+ [CAP1106] = { .product_id = 0x55, .num_channels = 6 },
+ [CAP1126] = { .product_id = 0x53, .num_channels = 6 },
+ [CAP1188] = { .product_id = 0x50, .num_channels = 8 },
+};
+
+static const struct reg_default cap11xx_reg_defaults[] = {
+ { CAP11XX_REG_MAIN_CONTROL, 0x00 },
+ { CAP11XX_REG_GENERAL_STATUS, 0x00 },
+ { CAP11XX_REG_SENSOR_INPUT, 0x00 },
+ { CAP11XX_REG_NOISE_FLAG_STATUS, 0x00 },
+ { CAP11XX_REG_SENSITIVITY_CONTROL, 0x2f },
+ { CAP11XX_REG_CONFIG, 0x20 },
+ { CAP11XX_REG_SENSOR_ENABLE, 0x3f },
+ { CAP11XX_REG_SENSOR_CONFIG, 0xa4 },
+ { CAP11XX_REG_SENSOR_CONFIG2, 0x07 },
+ { CAP11XX_REG_SAMPLING_CONFIG, 0x39 },
+ { CAP11XX_REG_CALIBRATION, 0x00 },
+ { CAP11XX_REG_INT_ENABLE, 0x3f },
+ { CAP11XX_REG_REPEAT_RATE, 0x3f },
+ { CAP11XX_REG_MT_CONFIG, 0x80 },
+ { CAP11XX_REG_MT_PATTERN_CONFIG, 0x00 },
+ { CAP11XX_REG_MT_PATTERN, 0x3f },
+ { CAP11XX_REG_RECALIB_CONFIG, 0x8a },
+ { CAP11XX_REG_SENSOR_THRESH(0), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(1), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(2), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(3), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(4), 0x40 },
+ { CAP11XX_REG_SENSOR_THRESH(5), 0x40 },
+ { CAP11XX_REG_SENSOR_NOISE_THRESH, 0x01 },
+ { CAP11XX_REG_STANDBY_CHANNEL, 0x00 },
+ { CAP11XX_REG_STANDBY_CONFIG, 0x39 },
+ { CAP11XX_REG_STANDBY_SENSITIVITY, 0x02 },
+ { CAP11XX_REG_STANDBY_THRESH, 0x40 },
+ { CAP11XX_REG_CONFIG2, 0x40 },
+ { CAP11XX_REG_SENSOR_CALIB_LSB1, 0x00 },
+ { CAP11XX_REG_SENSOR_CALIB_LSB2, 0x00 },
+};
+
+static bool cap11xx_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CAP11XX_REG_MAIN_CONTROL:
+ case CAP11XX_REG_SENSOR_INPUT:
+ case CAP11XX_REG_SENOR_DELTA(0):
+ case CAP11XX_REG_SENOR_DELTA(1):
+ case CAP11XX_REG_SENOR_DELTA(2):
+ case CAP11XX_REG_SENOR_DELTA(3):
+ case CAP11XX_REG_SENOR_DELTA(4):
+ case CAP11XX_REG_SENOR_DELTA(5):
+ case CAP11XX_REG_PRODUCT_ID:
+ case CAP11XX_REG_MANUFACTURER_ID:
+ case CAP11XX_REG_REVISION:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config cap11xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CAP11XX_REG_REVISION,
+ .reg_defaults = cap11xx_reg_defaults,
+
+ .num_reg_defaults = ARRAY_SIZE(cap11xx_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = cap11xx_volatile_reg,
+};
+
+static irqreturn_t cap11xx_thread_func(int irq_num, void *data)
+{
+ struct cap11xx_priv *priv = data;
+ unsigned int status;
+ int ret, i;
+
+ /*
+ * Deassert interrupt. This needs to be done before reading the status
+ * registers, which will not carry valid values otherwise.
+ */
+ ret = regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL, 1, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(priv->regmap, CAP11XX_REG_SENSOR_INPUT, &status);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < priv->idev->keycodemax; i++)
+ input_report_key(priv->idev, priv->keycodes[i],
+ status & (1 << i));
+
+ input_sync(priv->idev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int cap11xx_set_sleep(struct cap11xx_priv *priv, bool sleep)
+{
+ return regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
+ CAP11XX_REG_MAIN_CONTROL_DLSEEP,
+ sleep ? CAP11XX_REG_MAIN_CONTROL_DLSEEP : 0);
+}
+
+static int cap11xx_input_open(struct input_dev *idev)
+{
+ struct cap11xx_priv *priv = input_get_drvdata(idev);
+
+ return cap11xx_set_sleep(priv, false);
+}
+
+static void cap11xx_input_close(struct input_dev *idev)
+{
+ struct cap11xx_priv *priv = input_get_drvdata(idev);
+
+ cap11xx_set_sleep(priv, true);
+}
+
+static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c_client->dev;
+ struct cap11xx_priv *priv;
+ struct device_node *node;
+ const struct cap11xx_hw_model *cap;
+ int i, error, irq, gain = 0;
+ unsigned int val, rev;
+ u32 gain32;
+
+ if (id->driver_data >= ARRAY_SIZE(cap11xx_devices)) {
+ dev_err(dev, "Invalid device ID %lu\n", id->driver_data);
+ return -EINVAL;
+ }
+
+ cap = &cap11xx_devices[id->driver_data];
+ if (!cap || !cap->num_channels) {
+ dev_err(dev, "Invalid device configuration\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev,
+ sizeof(*priv) +
+ cap->num_channels * sizeof(priv->keycodes[0]),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init_i2c(i2c_client, &cap11xx_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_PRODUCT_ID, &val);
+ if (error)
+ return error;
+
+ if (val != cap->product_id) {
+ dev_err(dev, "Product ID: Got 0x%02x, expected 0x%02x\n",
+ val, cap->product_id);
+ return -ENXIO;
+ }
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_MANUFACTURER_ID, &val);
+ if (error)
+ return error;
+
+ if (val != CAP11XX_MANUFACTURER_ID) {
+ dev_err(dev, "Manufacturer ID: Got 0x%02x, expected 0x%02x\n",
+ val, CAP11XX_MANUFACTURER_ID);
+ return -ENXIO;
+ }
+
+ error = regmap_read(priv->regmap, CAP11XX_REG_REVISION, &rev);
+ if (error < 0)
+ return error;
+
+ dev_info(dev, "CAP11XX detected, revision 0x%02x\n", rev);
+ i2c_set_clientdata(i2c_client, priv);
+ node = dev->of_node;
+
+ if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
+ if (is_power_of_2(gain32) && gain32 <= 8)
+ gain = ilog2(gain32);
+ else
+ dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
+ }
+
+ if (of_property_read_bool(node, "microchip,irq-active-high")) {
+ error = regmap_update_bits(priv->regmap, CAP11XX_REG_CONFIG2,
+ CAP11XX_REG_CONFIG2_ALT_POL, 0);
+ if (error)
+ return error;
+ }
+
+ /* Provide some useful defaults */
+ for (i = 0; i < cap->num_channels; i++)
+ priv->keycodes[i] = KEY_A + i;
+
+ of_property_read_u32_array(node, "linux,keycodes",
+ priv->keycodes, cap->num_channels);
+
+ error = regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
+ CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
+ gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
+ if (error)
+ return error;
+
+ /* Disable autorepeat. The Linux input system has its own handling. */
+ error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0);
+ if (error)
+ return error;
+
+ priv->idev = devm_input_allocate_device(dev);
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "CAP11XX capacitive touch sensor";
+ priv->idev->id.bustype = BUS_I2C;
+ priv->idev->evbit[0] = BIT_MASK(EV_KEY);
+
+ if (of_property_read_bool(node, "autorepeat"))
+ __set_bit(EV_REP, priv->idev->evbit);
+
+ for (i = 0; i < cap->num_channels; i++)
+ __set_bit(priv->keycodes[i], priv->idev->keybit);
+
+ __clear_bit(KEY_RESERVED, priv->idev->keybit);
+
+ priv->idev->keycode = priv->keycodes;
+ priv->idev->keycodesize = sizeof(priv->keycodes[0]);
+ priv->idev->keycodemax = cap->num_channels;
+
+ priv->idev->id.vendor = CAP11XX_MANUFACTURER_ID;
+ priv->idev->id.product = cap->product_id;
+ priv->idev->id.version = rev;
+
+ priv->idev->open = cap11xx_input_open;
+ priv->idev->close = cap11xx_input_close;
+
+ input_set_drvdata(priv->idev, priv);
+
+ /*
+ * Put the device in deep sleep mode for now.
+ * ->open() will bring it back once the it is actually needed.
+ */
+ cap11xx_set_sleep(priv, true);
+
+ error = input_register_device(priv->idev);
+ if (error)
+ return error;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ dev_err(dev, "Unable to parse or map IRQ\n");
+ return -ENXIO;
+ }
+
+ error = devm_request_threaded_irq(dev, irq, NULL, cap11xx_thread_func,
+ IRQF_ONESHOT, dev_name(dev), priv);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id cap11xx_dt_ids[] = {
+ { .compatible = "microchip,cap1106", },
+ { .compatible = "microchip,cap1126", },
+ { .compatible = "microchip,cap1188", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cap11xx_dt_ids);
+
+static const struct i2c_device_id cap11xx_i2c_ids[] = {
+ { "cap1106", CAP1106 },
+ { "cap1126", CAP1126 },
+ { "cap1188", CAP1188 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cap11xx_i2c_ids);
+
+static struct i2c_driver cap11xx_i2c_driver = {
+ .driver = {
+ .name = "cap11xx",
+ .owner = THIS_MODULE,
+ .of_match_table = cap11xx_dt_ids,
+ },
+ .id_table = cap11xx_i2c_ids,
+ .probe = cap11xx_i2c_probe,
+};
+
+module_i2c_driver(cap11xx_i2c_driver);
+
+MODULE_ALIAS("platform:cap11xx");
+MODULE_DESCRIPTION("Microchip CAP11XX driver");
+MODULE_AUTHOR("Daniel Mack <linux@zonque.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index 552b65c6e6b0..27ef29f8fe6a 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -194,7 +194,6 @@ MODULE_DEVICE_TABLE(of, clps711x_keypad_of_match);
static struct platform_driver clps711x_keypad_driver = {
.driver = {
.name = "clps711x-keypad",
- .owner = THIS_MODULE,
.of_match_table = clps711x_keypad_of_match,
},
.probe = clps711x_keypad_probe,
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index 1559dc1cf951..f363d1d2907a 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -322,7 +322,6 @@ static int davinci_ks_remove(struct platform_device *pdev)
static struct platform_driver davinci_ks_driver = {
.driver = {
.name = "davinci_keyscan",
- .owner = THIS_MODULE,
},
.remove = davinci_ks_remove,
};
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index e59876212b8c..f77b295e0123 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -373,7 +373,6 @@ static int ep93xx_keypad_remove(struct platform_device *pdev)
static struct platform_driver ep93xx_keypad_driver = {
.driver = {
.name = "ep93xx-keypad",
- .owner = THIS_MODULE,
.pm = &ep93xx_keypad_pm_ops,
},
.probe = ep93xx_keypad_probe,
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index 69e854763370..907e4e278fce 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -181,7 +181,6 @@ static int events_probe(struct platform_device *pdev)
static struct platform_driver events_driver = {
.probe = events_probe,
.driver = {
- .owner = THIS_MODULE,
.name = "goldfish_events",
},
};
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 8c98e97f8e41..d4dd78a7d56b 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
struct gpio_button_data {
@@ -617,27 +618,31 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
- int gpio;
+ int gpio = -1;
enum of_gpio_flags flags;
- if (!of_find_property(pp, "gpios", NULL)) {
- pdata->nbuttons--;
- dev_warn(dev, "Found button without gpios\n");
- continue;
- }
+ button = &pdata->buttons[i++];
- gpio = of_get_gpio_flags(pp, 0, &flags);
- if (gpio < 0) {
- error = gpio;
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- return ERR_PTR(error);
+ if (!of_find_property(pp, "gpios", NULL)) {
+ button->irq = irq_of_parse_and_map(pp, 0);
+ if (button->irq == 0) {
+ i--;
+ pdata->nbuttons--;
+ dev_warn(dev, "Found button without gpios or irqs\n");
+ continue;
+ }
+ } else {
+ gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (gpio < 0) {
+ error = gpio;
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get gpio flags, error: %d\n",
+ error);
+ return ERR_PTR(error);
+ }
}
- button = &pdata->buttons[i++];
-
button->gpio = gpio;
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
@@ -835,7 +840,6 @@ static struct platform_driver gpio_keys_device_driver = {
.remove = gpio_keys_remove,
.driver = {
.name = "gpio-keys",
- .owner = THIS_MODULE,
.pm = &gpio_keys_pm_ops,
.of_match_table = of_match_ptr(gpio_keys_of_match),
}
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index c9c1c8ca7267..90df4df58b07 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -297,7 +297,6 @@ static struct platform_driver gpio_keys_polled_driver = {
.probe = gpio_keys_polled_probe,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = gpio_keys_polled_of_match,
},
};
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 20a99c368d16..e53f232eda0e 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -580,7 +580,6 @@ static SIMPLE_DEV_PM_OPS(imx_kbd_pm_ops, imx_kbd_suspend, imx_kbd_resume);
static struct platform_driver imx_keypad_driver = {
.driver = {
.name = "imx-keypad",
- .owner = THIS_MODULE,
.pm = &imx_kbd_pm_ops,
.of_match_table = of_match_ptr(imx_keypad_of_match),
},
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 0ba4428da24a..80c81278ad2c 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -237,7 +237,6 @@ static int jornada680kbd_probe(struct platform_device *pdev)
static struct platform_driver jornada680kbd_driver = {
.driver = {
.name = "jornada680_kbd",
- .owner = THIS_MODULE,
},
.probe = jornada680kbd_probe,
};
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index cd729d485e98..421d9c55b0e8 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -167,7 +167,6 @@ MODULE_ALIAS("platform:jornada720_kbd");
static struct platform_driver jornada720_kbd_driver = {
.driver = {
.name = "jornada720_kbd",
- .owner = THIS_MODULE,
},
.probe = jornada720_kbd_probe,
.remove = jornada720_kbd_remove,
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index cb32e2b506b7..21bea52d4365 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -616,6 +616,8 @@ static ssize_t lm8323_set_disable(struct device *dev,
unsigned int i;
ret = kstrtouint(buf, 10, &i);
+ if (ret)
+ return ret;
mutex_lock(&lm->lock);
lm->kp_enabled = !i;
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 8b1b01361ec6..265d641c40e2 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -66,7 +66,6 @@
struct lpc32xx_kscan_drv {
struct input_dev *input;
struct clk *clk;
- struct resource *iores;
void __iomem *kscan_base;
unsigned int irq;
@@ -188,32 +187,27 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
return -EINVAL;
}
- kscandat = kzalloc(sizeof(struct lpc32xx_kscan_drv), GFP_KERNEL);
- if (!kscandat) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ kscandat = devm_kzalloc(&pdev->dev, sizeof(*kscandat),
+ GFP_KERNEL);
+ if (!kscandat)
return -ENOMEM;
- }
error = lpc32xx_parse_dt(&pdev->dev, kscandat);
if (error) {
dev_err(&pdev->dev, "failed to parse device tree\n");
- goto err_free_mem;
+ return error;
}
keymap_size = sizeof(kscandat->keymap[0]) *
(kscandat->matrix_sz << kscandat->row_shift);
- kscandat->keymap = kzalloc(keymap_size, GFP_KERNEL);
- if (!kscandat->keymap) {
- dev_err(&pdev->dev, "could not allocate memory for keymap\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ kscandat->keymap = devm_kzalloc(&pdev->dev, keymap_size, GFP_KERNEL);
+ if (!kscandat->keymap)
+ return -ENOMEM;
- kscandat->input = input = input_allocate_device();
+ kscandat->input = input = devm_input_allocate_device(&pdev->dev);
if (!input) {
dev_err(&pdev->dev, "failed to allocate input device\n");
- error = -ENOMEM;
- goto err_free_keymap;
+ return -ENOMEM;
}
/* Setup key input */
@@ -234,39 +228,26 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
kscandat->keymap, kscandat->input);
if (error) {
dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_free_input;
+ return error;
}
input_set_drvdata(kscandat->input, kscandat);
- kscandat->iores = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!kscandat->iores) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto err_free_input;
- }
-
- kscandat->kscan_base = ioremap(kscandat->iores->start,
- resource_size(kscandat->iores));
- if (!kscandat->kscan_base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -EBUSY;
- goto err_release_memregion;
- }
+ kscandat->kscan_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kscandat->kscan_base))
+ return PTR_ERR(kscandat->kscan_base);
/* Get the key scanner clock */
- kscandat->clk = clk_get(&pdev->dev, NULL);
+ kscandat->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(kscandat->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
- error = PTR_ERR(kscandat->clk);
- goto err_unmap;
+ return PTR_ERR(kscandat->clk);
}
/* Configure the key scanner */
error = clk_prepare_enable(kscandat->clk);
if (error)
- goto err_clk_put;
+ return error;
writel(kscandat->deb_clks, LPC32XX_KS_DEB(kscandat->kscan_base));
writel(kscandat->scan_delay, LPC32XX_KS_SCAN_CTL(kscandat->kscan_base));
@@ -277,52 +258,20 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
writel(1, LPC32XX_KS_IRQ(kscandat->kscan_base));
clk_disable_unprepare(kscandat->clk);
- error = request_irq(irq, lpc32xx_kscan_irq, 0, pdev->name, kscandat);
+ error = devm_request_irq(&pdev->dev, irq, lpc32xx_kscan_irq, 0,
+ pdev->name, kscandat);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_clk_put;
+ return error;
}
error = input_register_device(kscandat->input);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ return error;
}
platform_set_drvdata(pdev, kscandat);
- return 0;
-
-err_free_irq:
- free_irq(irq, kscandat);
-err_clk_put:
- clk_put(kscandat->clk);
-err_unmap:
- iounmap(kscandat->kscan_base);
-err_release_memregion:
- release_mem_region(kscandat->iores->start,
- resource_size(kscandat->iores));
-err_free_input:
- input_free_device(kscandat->input);
-err_free_keymap:
- kfree(kscandat->keymap);
-err_free_mem:
- kfree(kscandat);
-
- return error;
-}
-
-static int lpc32xx_kscan_remove(struct platform_device *pdev)
-{
- struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev);
-
- free_irq(platform_get_irq(pdev, 0), kscandat);
- clk_put(kscandat->clk);
- iounmap(kscandat->kscan_base);
- release_mem_region(kscandat->iores->start,
- resource_size(kscandat->iores));
- input_unregister_device(kscandat->input);
- kfree(kscandat->keymap);
- kfree(kscandat);
return 0;
}
@@ -378,10 +327,8 @@ MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match);
static struct platform_driver lpc32xx_kscan_driver = {
.probe = lpc32xx_kscan_probe,
- .remove = lpc32xx_kscan_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &lpc32xx_kscan_pm_ops,
.of_match_table = lpc32xx_kscan_match,
}
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index e651fa692afe..b370a59cb759 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -565,7 +565,6 @@ static struct platform_driver matrix_keypad_driver = {
.remove = matrix_keypad_remove,
.driver = {
.name = "matrix-keypad",
- .owner = THIS_MODULE,
.pm = &matrix_keypad_pm_ops,
.of_match_table = of_match_ptr(matrix_keypad_dt_match),
},
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 009c82256e89..3aa2ec45bcab 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -214,13 +214,14 @@ static int mpr_touchkey_probe(struct i2c_client *client,
return -EINVAL;
}
- mpr121 = kzalloc(sizeof(struct mpr121_touchkey), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!mpr121 || !input_dev) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ mpr121 = devm_kzalloc(&client->dev, sizeof(*mpr121),
+ GFP_KERNEL);
+ if (!mpr121)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev)
+ return -ENOMEM;
mpr121->client = client;
mpr121->input_dev = input_dev;
@@ -243,44 +244,26 @@ static int mpr_touchkey_probe(struct i2c_client *client,
error = mpr121_phys_init(pdata, mpr121, client);
if (error) {
dev_err(&client->dev, "Failed to init register\n");
- goto err_free_mem;
+ return error;
}
- error = request_threaded_irq(client->irq, NULL,
+ error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
mpr_touchkey_interrupt,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->dev.driver->name, mpr121);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_mem;
+ return error;
}
error = input_register_device(input_dev);
if (error)
- goto err_free_irq;
+ return error;
i2c_set_clientdata(client, mpr121);
device_init_wakeup(&client->dev, pdata->wakeup);
return 0;
-
-err_free_irq:
- free_irq(client->irq, mpr121);
-err_free_mem:
- input_free_device(input_dev);
- kfree(mpr121);
- return error;
-}
-
-static int mpr_touchkey_remove(struct i2c_client *client)
-{
- struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client);
-
- free_irq(client->irq, mpr121);
- input_unregister_device(mpr121->input_dev);
- kfree(mpr121);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -327,7 +310,6 @@ static struct i2c_driver mpr_touchkey_driver = {
},
.id_table = mpr121_id,
.probe = mpr_touchkey_probe,
- .remove = mpr_touchkey_remove,
};
module_i2c_driver(mpr_touchkey_driver);
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 63332e2f8628..c7d5b1666fc3 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -425,7 +425,6 @@ static SIMPLE_DEV_PM_OPS(ske_keypad_dev_pm_ops,
static struct platform_driver ske_keypad_driver = {
.driver = {
.name = "nmk-ske-keypad",
- .owner = THIS_MODULE,
.pm = &ske_keypad_dev_pm_ops,
},
.remove = ske_keypad_remove,
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index b31064981e96..7abfd34eb87e 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -268,7 +268,6 @@ MODULE_DEVICE_TABLE(of, nspire_keypad_dt_match);
static struct platform_driver nspire_keypad_driver = {
.driver = {
.name = "nspire-keypad",
- .owner = THIS_MODULE,
.of_match_table = nspire_keypad_dt_match,
},
.probe = nspire_keypad_probe,
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index b1acc9852eb7..7502e46165fa 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -383,7 +383,6 @@ static struct platform_driver omap_kp_driver = {
.resume = omap_kp_resume,
.driver = {
.name = "omap-keypad",
- .owner = THIS_MODULE,
},
};
module_platform_driver(omap_kp_driver);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 024b7bdffe5b..b052afec9a11 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -460,7 +460,6 @@ static struct platform_driver omap4_keypad_driver = {
.remove = omap4_keypad_remove,
.driver = {
.name = "omap4-keypad",
- .owner = THIS_MODULE,
.pm = &omap4_keypad_pm_ops,
.of_match_table = omap_keypad_dt_match,
},
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 80c6b0ef3fc8..32580afecc26 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -687,7 +687,6 @@ static struct platform_driver pmic8xxx_kp_driver = {
.probe = pmic8xxx_kp_probe,
.driver = {
.name = "pm8xxx-keypad",
- .owner = THIS_MODULE,
.pm = &pm8xxx_kp_pm_ops,
.of_match_table = pm8xxx_match_table,
},
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index a15063bea700..a90d6bdc499e 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -741,37 +741,27 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
return -ENXIO;
}
- keypad = kzalloc(sizeof(struct pxa27x_keypad), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!keypad || !input_dev) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- error = -ENOMEM;
- goto failed_free;
- }
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad),
+ GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
keypad->pdata = pdata;
keypad->input_dev = input_dev;
keypad->irq = irq;
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto failed_free;
- }
-
- keypad->mmio_base = ioremap(res->start, resource_size(res));
- if (keypad->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto failed_free_mem;
- }
+ keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(keypad->mmio_base))
+ return PTR_ERR(keypad->mmio_base);
- keypad->clk = clk_get(&pdev->dev, NULL);
+ keypad->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get keypad clock\n");
- error = PTR_ERR(keypad->clk);
- goto failed_free_io;
+ return PTR_ERR(keypad->clk);
}
input_dev->name = pdev->name;
@@ -802,7 +792,7 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
}
if (error) {
dev_err(&pdev->dev, "failed to build keycode\n");
- goto failed_put_clk;
+ return error;
}
keypad->row_shift = get_count_order(pdata->matrix_key_cols);
@@ -812,61 +802,26 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
input_dev->evbit[0] |= BIT_MASK(EV_REL);
}
- error = request_irq(irq, pxa27x_keypad_irq_handler, 0,
- pdev->name, keypad);
+ error = devm_request_irq(&pdev->dev, irq, pxa27x_keypad_irq_handler,
+ 0, pdev->name, keypad);
if (error) {
dev_err(&pdev->dev, "failed to request IRQ\n");
- goto failed_put_clk;
+ return error;
}
/* Register the input device */
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto failed_free_irq;
+ return error;
}
platform_set_drvdata(pdev, keypad);
device_init_wakeup(&pdev->dev, 1);
return 0;
-
-failed_free_irq:
- free_irq(irq, keypad);
-failed_put_clk:
- clk_put(keypad->clk);
-failed_free_io:
- iounmap(keypad->mmio_base);
-failed_free_mem:
- release_mem_region(res->start, resource_size(res));
-failed_free:
- input_free_device(input_dev);
- kfree(keypad);
- return error;
}
-static int pxa27x_keypad_remove(struct platform_device *pdev)
-{
- struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(keypad->irq, keypad);
- clk_put(keypad->clk);
-
- input_unregister_device(keypad->input_dev);
- iounmap(keypad->mmio_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(keypad);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:pxa27x-keypad");
-
#ifdef CONFIG_OF
static const struct of_device_id pxa27x_keypad_dt_match[] = {
{ .compatible = "marvell,pxa27x-keypad" },
@@ -877,11 +832,9 @@ MODULE_DEVICE_TABLE(of, pxa27x_keypad_dt_match);
static struct platform_driver pxa27x_keypad_driver = {
.probe = pxa27x_keypad_probe,
- .remove = pxa27x_keypad_remove,
.driver = {
.name = "pxa27x-keypad",
.of_match_table = of_match_ptr(pxa27x_keypad_dt_match),
- .owner = THIS_MODULE,
.pm = &pxa27x_keypad_pm_ops,
},
};
@@ -889,3 +842,5 @@ module_platform_driver(pxa27x_keypad_driver);
MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
MODULE_LICENSE("GPL");
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:pxa27x-keypad");
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 374ca0246c8f..1cf5211fddaa 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -189,7 +189,6 @@ static int pxa930_rotary_remove(struct platform_device *pdev)
static struct platform_driver pxa930_rotary_driver = {
.driver = {
.name = "pxa930-rotary",
- .owner = THIS_MODULE,
},
.probe = pxa930_rotary_probe,
.remove = pxa930_rotary_remove,
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index c994e3bbd776..6b9fdf6cf8e8 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -602,7 +602,6 @@ static struct platform_driver samsung_keypad_driver = {
.remove = samsung_keypad_remove,
.driver = {
.name = "samsung-keypad",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(samsung_keypad_dt_match),
.pm = &samsung_keypad_pm_ops,
},
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 258af10e5811..f42a543db043 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -385,7 +385,6 @@ static struct platform_driver spear_kbd_driver = {
.remove = spear_kbd_remove,
.driver = {
.name = "keyboard",
- .owner = THIS_MODULE,
.pm = &spear_kbd_pm_ops,
.of_match_table = of_match_ptr(spear_kbd_id_table),
},
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index ad7abae69078..8ff612d160b0 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -505,7 +505,6 @@ static SIMPLE_DEV_PM_OPS(tc3589x_keypad_dev_pm_ops,
static struct platform_driver tc3589x_keypad_driver = {
.driver = {
.name = "tc3589x-keypad",
- .owner = THIS_MODULE,
.pm = &tc3589x_keypad_dev_pm_ops,
},
.probe = tc3589x_keypad_probe,
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 9757a58bc897..f97c73bd14f8 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -822,7 +822,6 @@ static struct platform_driver tegra_kbc_driver = {
.probe = tegra_kbc_probe,
.driver = {
.name = "tegra-kbc",
- .owner = THIS_MODULE,
.pm = &tegra_kbc_pm_ops,
.of_match_table = tegra_kbc_of_match,
},
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index c5a11700a1bf..bbcccd67247d 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -459,7 +459,6 @@ static struct platform_driver twl4030_kp_driver = {
.probe = twl4030_kp_probe,
.driver = {
.name = "twl4030_keypad",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(twl4030_keypad_dt_match_table),
},
};
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index e8b9d94daae7..a1ff69c53102 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -258,7 +258,6 @@ static struct platform_driver w90p910_keypad_driver = {
.remove = w90p910_keypad_remove,
.driver = {
.name = "nuc900-kpi",
- .owner = THIS_MODULE,
},
};
module_platform_driver(w90p910_keypad_driver);
diff --git a/drivers/input/misc/88pm80x_onkey.c b/drivers/input/misc/88pm80x_onkey.c
index ee43e5b7c881..cf9908f1e5d5 100644
--- a/drivers/input/misc/88pm80x_onkey.c
+++ b/drivers/input/misc/88pm80x_onkey.c
@@ -153,7 +153,6 @@ static int pm80x_onkey_remove(struct platform_device *pdev)
static struct platform_driver pm80x_onkey_driver = {
.driver = {
.name = "88pm80x-onkey",
- .owner = THIS_MODULE,
.pm = &pm80x_onkey_pm_ops,
},
.probe = pm80x_onkey_probe,
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index 220ce0fa15d9..cc87443aa2ee 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -112,8 +112,7 @@ static int pm860x_onkey_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pm860x_onkey_suspend(struct device *dev)
+static int __maybe_unused pm860x_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -122,7 +121,7 @@ static int pm860x_onkey_suspend(struct device *dev)
chip->wakeup_flag |= 1 << PM8607_IRQ_ONKEY;
return 0;
}
-static int pm860x_onkey_resume(struct device *dev)
+static int __maybe_unused pm860x_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -131,14 +130,12 @@ static int pm860x_onkey_resume(struct device *dev)
chip->wakeup_flag &= ~(1 << PM8607_IRQ_ONKEY);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm860x_onkey_pm_ops, pm860x_onkey_suspend, pm860x_onkey_resume);
static struct platform_driver pm860x_onkey_driver = {
.driver = {
.name = "88pm860x-onkey",
- .owner = THIS_MODULE,
.pm = &pm860x_onkey_pm_ops,
},
.probe = pm860x_onkey_probe,
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 95ef7dd6442d..1f7e15ca5fbe 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -123,7 +123,6 @@ static const struct of_device_id ab8500_ponkey_match[] = {
static struct platform_driver ab8500_ponkey_driver = {
.driver = {
.name = "ab8500-poweron-key",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ab8500_ponkey_match),
},
.probe = ab8500_ponkey_probe,
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e0f522516ef5..189bdc8e91a5 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -13,17 +13,15 @@
#include <linux/pm.h>
#include "ad714x.h"
-#ifdef CONFIG_PM_SLEEP
-static int ad714x_i2c_suspend(struct device *dev)
+static int __maybe_unused ad714x_i2c_suspend(struct device *dev)
{
return ad714x_disable(i2c_get_clientdata(to_i2c_client(dev)));
}
-static int ad714x_i2c_resume(struct device *dev)
+static int __maybe_unused ad714x_i2c_resume(struct device *dev)
{
return ad714x_enable(i2c_get_clientdata(to_i2c_client(dev)));
}
-#endif
static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 3a90b710e309..a79e50b58bf5 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -16,17 +16,15 @@
#define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */
#define AD714x_SPI_READ BIT(10)
-#ifdef CONFIG_PM_SLEEP
-static int ad714x_spi_suspend(struct device *dev)
+static int __maybe_unused ad714x_spi_suspend(struct device *dev)
{
return ad714x_disable(spi_get_drvdata(to_spi_device(dev)));
}
-static int ad714x_spi_resume(struct device *dev)
+static int __maybe_unused ad714x_spi_resume(struct device *dev)
{
return ad714x_enable(spi_get_drvdata(to_spi_device(dev)));
}
-#endif
static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index 416f47ddcc90..470bfd6f0830 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -105,8 +105,7 @@ static int adxl34x_i2c_remove(struct i2c_client *client)
return adxl34x_remove(ac);
}
-#ifdef CONFIG_PM_SLEEP
-static int adxl34x_i2c_suspend(struct device *dev)
+static int __maybe_unused adxl34x_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -116,7 +115,7 @@ static int adxl34x_i2c_suspend(struct device *dev)
return 0;
}
-static int adxl34x_i2c_resume(struct device *dev)
+static int __maybe_unused adxl34x_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -125,7 +124,6 @@ static int adxl34x_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(adxl34x_i2c_pm, adxl34x_i2c_suspend,
adxl34x_i2c_resume);
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 76dc0679d3b1..da6e76b58dab 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -94,8 +94,7 @@ static int adxl34x_spi_remove(struct spi_device *spi)
return adxl34x_remove(ac);
}
-#ifdef CONFIG_PM_SLEEP
-static int adxl34x_spi_suspend(struct device *dev)
+static int __maybe_unused adxl34x_spi_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = spi_get_drvdata(spi);
@@ -105,7 +104,7 @@ static int adxl34x_spi_suspend(struct device *dev)
return 0;
}
-static int adxl34x_spi_resume(struct device *dev)
+static int __maybe_unused adxl34x_spi_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct adxl34x *ac = spi_get_drvdata(spi);
@@ -114,7 +113,6 @@ static int adxl34x_spi_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
adxl34x_spi_resume);
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index ef2e281b0a43..4dbbed74c9e4 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -225,7 +225,6 @@ static struct platform_driver arizona_haptics_driver = {
.remove = arizona_haptics_remove,
.driver = {
.name = "arizona-haptics",
- .owner = THIS_MODULE,
},
};
module_platform_driver(arizona_haptics_driver);
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index e69d9bcb37e1..3f4351579372 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -256,7 +256,6 @@ static struct platform_driver bfin_rotary_device_driver = {
.remove = bfin_rotary_remove,
.driver = {
.name = "bfin-rotary",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &bfin_rotary_pm_ops,
#endif
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 3e11510ff82d..fbe72afc9347 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -157,7 +157,6 @@ static struct platform_driver cobalt_buttons_driver = {
.remove = cobalt_buttons_remove,
.driver = {
.name = "Cobalt buttons",
- .owner = THIS_MODULE,
},
};
module_platform_driver(cobalt_buttons_driver);
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 184c8f21ab59..266e07fdc182 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -149,7 +149,6 @@ static struct platform_driver da9052_onkey_driver = {
.remove = da9052_onkey_remove,
.driver = {
.name = "da9052-onkey",
- .owner = THIS_MODULE,
},
};
module_platform_driver(da9052_onkey_driver);
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
index 4765799fef74..3251a9693f45 100644
--- a/drivers/input/misc/da9055_onkey.c
+++ b/drivers/input/misc/da9055_onkey.c
@@ -157,7 +157,6 @@ static struct platform_driver da9055_onkey_driver = {
.remove = da9055_onkey_remove,
.driver = {
.name = "da9055-onkey",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 0eba94f581df..b6b7bd4e5462 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -263,7 +263,6 @@ static struct platform_driver dm355evm_keys_driver = {
.probe = dm355evm_keys_probe,
.remove = dm355evm_keys_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "dm355evm_keys",
},
};
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index cab87f5ce6d3..a364e109ca7c 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -639,8 +639,7 @@ static int drv260x_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int drv260x_suspend(struct device *dev)
+static int __maybe_unused drv260x_suspend(struct device *dev)
{
struct drv260x_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -672,7 +671,7 @@ out:
return ret;
}
-static int drv260x_resume(struct device *dev)
+static int __maybe_unused drv260x_resume(struct device *dev)
{
struct drv260x_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -702,7 +701,6 @@ out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(drv260x_pm_ops, drv260x_suspend, drv260x_resume);
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index 0f437581cc04..a021744e608c 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -406,8 +406,7 @@ static int drv2667_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int drv2667_suspend(struct device *dev)
+static int __maybe_unused drv2667_suspend(struct device *dev)
{
struct drv2667_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -436,7 +435,7 @@ out:
return ret;
}
-static int drv2667_resume(struct device *dev)
+static int __maybe_unused drv2667_resume(struct device *dev)
{
struct drv2667_data *haptics = dev_get_drvdata(dev);
int ret = 0;
@@ -464,7 +463,6 @@ out:
mutex_unlock(&haptics->input_dev->mutex);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(drv2667_pm_ops, drv2667_suspend, drv2667_resume);
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index de21e317da32..0ac176d66a6f 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -225,8 +225,7 @@ static int gp2a_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gp2a_suspend(struct device *dev)
+static int __maybe_unused gp2a_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gp2a_data *dt = i2c_get_clientdata(client);
@@ -244,7 +243,7 @@ static int gp2a_suspend(struct device *dev)
return retval;
}
-static int gp2a_resume(struct device *dev)
+static int __maybe_unused gp2a_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gp2a_data *dt = i2c_get_clientdata(client);
@@ -261,7 +260,6 @@ static int gp2a_resume(struct device *dev)
return retval;
}
-#endif
static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
diff --git a/drivers/input/misc/gpio-beeper.c b/drivers/input/misc/gpio-beeper.c
index 8886af63eae3..4817c5f0c3e4 100644
--- a/drivers/input/misc/gpio-beeper.c
+++ b/drivers/input/misc/gpio-beeper.c
@@ -112,7 +112,6 @@ MODULE_DEVICE_TABLE(of, gpio_beeper_of_match);
static struct platform_driver gpio_beeper_platform_driver = {
.driver = {
.name = BEEPER_MODNAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(gpio_beeper_of_match),
},
.probe = gpio_beeper_probe,
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
index 1a81d9115226..f103b99d1852 100644
--- a/drivers/input/misc/gpio_tilt_polled.c
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -199,7 +199,6 @@ static struct platform_driver gpio_tilt_polled_driver = {
.remove = gpio_tilt_polled_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/input/misc/ideapad_slidebar.c b/drivers/input/misc/ideapad_slidebar.c
index edfd6239f131..b0acb878d1cf 100644
--- a/drivers/input/misc/ideapad_slidebar.c
+++ b/drivers/input/misc/ideapad_slidebar.c
@@ -272,7 +272,6 @@ static int ideapad_remove(struct platform_device *pdev)
static struct platform_driver slidebar_drv = {
.driver = {
.name = "ideapad_slidebar",
- .owner = THIS_MODULE,
},
.remove = ideapad_remove,
};
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index afed8e2b2f94..ac1fa5f44580 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
{
- static atomic_t device_no = ATOMIC_INIT(0);
+ static atomic_t device_no = ATOMIC_INIT(-1);
const struct ims_pcu_device_info *info;
int error;
@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
}
/* Device appears to be operable, complete initialization */
- pcu->device_no = atomic_inc_return(&device_no) - 1;
+ pcu->device_no = atomic_inc_return(&device_no);
/*
* PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index ed8e5e8449d3..1fe149f3def2 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -168,7 +168,6 @@ static void ixp4xx_spkr_shutdown(struct platform_device *dev)
static struct platform_driver ixp4xx_spkr_platform_driver = {
.driver = {
.name = "ixp4xx-beeper",
- .owner = THIS_MODULE,
},
.probe = ixp4xx_spkr_probe,
.remove = ixp4xx_spkr_remove,
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index d708478bc5b5..6e29349da537 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -615,8 +615,7 @@ static int kxtj9_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int kxtj9_suspend(struct device *dev)
+static int __maybe_unused kxtj9_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -631,7 +630,7 @@ static int kxtj9_suspend(struct device *dev)
return 0;
}
-static int kxtj9_resume(struct device *dev)
+static int __maybe_unused kxtj9_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -646,7 +645,6 @@ static int kxtj9_resume(struct device *dev)
mutex_unlock(&input_dev->mutex);
return retval;
}
-#endif
static SIMPLE_DEV_PM_OPS(kxtj9_pm_ops, kxtj9_suspend, kxtj9_resume);
diff --git a/drivers/input/misc/m68kspkr.c b/drivers/input/misc/m68kspkr.c
index def21dc84522..312d63623038 100644
--- a/drivers/input/misc/m68kspkr.c
+++ b/drivers/input/misc/m68kspkr.c
@@ -100,7 +100,6 @@ static void m68kspkr_shutdown(struct platform_device *dev)
static struct platform_driver m68kspkr_platform_driver = {
.driver = {
.name = "m68kspkr",
- .owner = THIS_MODULE,
},
.probe = m68kspkr_probe,
.remove = m68kspkr_remove,
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index ef6a9d650d69..39e930c10ebb 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -309,8 +309,7 @@ static int max77693_haptic_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max77693_haptic_suspend(struct device *dev)
+static int __maybe_unused max77693_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max77693_haptic *haptic = platform_get_drvdata(pdev);
@@ -323,7 +322,7 @@ static int max77693_haptic_suspend(struct device *dev)
return 0;
}
-static int max77693_haptic_resume(struct device *dev)
+static int __maybe_unused max77693_haptic_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max77693_haptic *haptic = platform_get_drvdata(pdev);
@@ -335,7 +334,6 @@ static int max77693_haptic_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
max77693_haptic_suspend, max77693_haptic_resume);
@@ -343,7 +341,6 @@ static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
static struct platform_driver max77693_haptic_driver = {
.driver = {
.name = "max77693-haptic",
- .owner = THIS_MODULE,
.pm = &max77693_haptic_pm_ops,
},
.probe = max77693_haptic_probe,
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 3809618e6a5d..7c49b8d23894 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -133,8 +133,7 @@ static int max8925_onkey_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max8925_onkey_suspend(struct device *dev)
+static int __maybe_unused max8925_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
@@ -148,7 +147,7 @@ static int max8925_onkey_suspend(struct device *dev)
return 0;
}
-static int max8925_onkey_resume(struct device *dev)
+static int __maybe_unused max8925_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
@@ -161,14 +160,12 @@ static int max8925_onkey_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max8925_onkey_pm_ops, max8925_onkey_suspend, max8925_onkey_resume);
static struct platform_driver max8925_onkey_driver = {
.driver = {
.name = "max8925-onkey",
- .owner = THIS_MODULE,
.pm = &max8925_onkey_pm_ops,
},
.probe = max8925_onkey_probe,
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index a363ebbd9cc0..d0f687281339 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -378,8 +378,7 @@ static int max8997_haptic_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int max8997_haptic_suspend(struct device *dev)
+static int __maybe_unused max8997_haptic_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8997_haptic *chip = platform_get_drvdata(pdev);
@@ -388,7 +387,6 @@ static int max8997_haptic_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(max8997_haptic_pm_ops, max8997_haptic_suspend, NULL);
@@ -401,7 +399,6 @@ MODULE_DEVICE_TABLE(i2c, max8997_haptic_id);
static struct platform_driver max8997_haptic_driver = {
.driver = {
.name = "max8997-haptic",
- .owner = THIS_MODULE,
.pm = &max8997_haptic_pm_ops,
},
.probe = max8997_haptic_probe,
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 0df6e8d8bd03..afdf8ef25ee8 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -258,7 +258,6 @@ static struct platform_driver mc13783_pwrbutton_driver = {
.remove = mc13783_pwrbutton_remove,
.driver = {
.name = "mc13783-pwrbutton",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index f505ac3a8d87..1f9b5ee92746 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -260,7 +260,6 @@ static int palmas_pwron_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
/**
* palmas_pwron_suspend() - suspend handler
* @dev: power button device
@@ -269,7 +268,7 @@ static int palmas_pwron_remove(struct platform_device *pdev)
*
* Return: 0
*/
-static int palmas_pwron_suspend(struct device *dev)
+static int __maybe_unused palmas_pwron_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
@@ -290,7 +289,7 @@ static int palmas_pwron_suspend(struct device *dev)
*
* Return: 0
*/
-static int palmas_pwron_resume(struct device *dev)
+static int __maybe_unused palmas_pwron_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct palmas_pwron *pwron = platform_get_drvdata(pdev);
@@ -300,7 +299,6 @@ static int palmas_pwron_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(palmas_pwron_pm,
palmas_pwron_suspend, palmas_pwron_resume);
@@ -319,7 +317,6 @@ static struct platform_driver palmas_pwron_driver = {
.remove = palmas_pwron_remove,
.driver = {
.name = "palmas_pwrbutton",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_palmas_pwr_match),
.pm = &palmas_pwron_pm,
},
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index cd230365166e..3b81daf67726 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -121,7 +121,6 @@ static struct platform_driver pcap_keys_device_driver = {
.remove = pcap_keys_remove,
.driver = {
.name = "pcap-keys",
- .owner = THIS_MODULE,
}
};
module_platform_driver(pcap_keys_device_driver);
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 674a2cfc3c0e..72b1fc3ab910 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -125,7 +125,6 @@ static const struct dev_pm_ops pcspkr_pm_ops = {
static struct platform_driver pcspkr_platform_driver = {
.driver = {
.name = "pcspkr",
- .owner = THIS_MODULE,
.pm = &pcspkr_pm_ops,
},
.probe = pcspkr_probe,
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index 6a915ba31bba..5113877153d7 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -199,8 +199,7 @@ static int pm8xxx_vib_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pm8xxx_vib_suspend(struct device *dev)
+static int __maybe_unused pm8xxx_vib_suspend(struct device *dev)
{
struct pm8xxx_vib *vib = dev_get_drvdata(dev);
@@ -209,7 +208,6 @@ static int pm8xxx_vib_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
@@ -224,7 +222,6 @@ static struct platform_driver pm8xxx_vib_driver = {
.probe = pm8xxx_vib_probe,
.driver = {
.name = "pm8xxx-vib",
- .owner = THIS_MODULE,
.pm = &pm8xxx_vib_pm_ops,
.of_match_table = pm8xxx_vib_id_table,
},
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index c91e3d33aea9..c4ca20e63221 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -53,8 +53,7 @@ static irqreturn_t pwrkey_release_irq(int irq, void *_pwr)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
-static int pmic8xxx_pwrkey_suspend(struct device *dev)
+static int __maybe_unused pmic8xxx_pwrkey_suspend(struct device *dev)
{
struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
@@ -64,7 +63,7 @@ static int pmic8xxx_pwrkey_suspend(struct device *dev)
return 0;
}
-static int pmic8xxx_pwrkey_resume(struct device *dev)
+static int __maybe_unused pmic8xxx_pwrkey_resume(struct device *dev)
{
struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
@@ -73,7 +72,6 @@ static int pmic8xxx_pwrkey_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
@@ -195,7 +193,6 @@ static struct platform_driver pmic8xxx_pwrkey_driver = {
.remove = pmic8xxx_pwrkey_remove,
.driver = {
.name = "pm8xxx-pwrkey",
- .owner = THIS_MODULE,
.pm = &pm8xxx_pwr_key_pm_ops,
.of_match_table = pm8xxx_pwr_key_id_table,
},
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 8ef288e7c971..a28ee70ff158 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -144,8 +144,7 @@ static int pwm_beeper_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pwm_beeper_suspend(struct device *dev)
+static int __maybe_unused pwm_beeper_suspend(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
@@ -155,7 +154,7 @@ static int pwm_beeper_suspend(struct device *dev)
return 0;
}
-static int pwm_beeper_resume(struct device *dev)
+static int __maybe_unused pwm_beeper_resume(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
@@ -170,6 +169,7 @@ static int pwm_beeper_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
pwm_beeper_suspend, pwm_beeper_resume);
+#ifdef CONFIG_PM_SLEEP
#define PWM_BEEPER_PM_OPS (&pwm_beeper_pm_ops)
#else
#define PWM_BEEPER_PM_OPS NULL
@@ -187,7 +187,6 @@ static struct platform_driver pwm_beeper_driver = {
.remove = pwm_beeper_remove,
.driver = {
.name = "pwm-beeper",
- .owner = THIS_MODULE,
.pm = PWM_BEEPER_PM_OPS,
.of_match_table = of_match_ptr(pwm_beeper_match),
},
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index 83fff38b86b3..e956e81cd4e6 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -96,7 +96,6 @@ static struct platform_driver rb532_button_driver = {
.remove = rb532_button_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
module_platform_driver(rb532_button_driver);
diff --git a/drivers/input/misc/retu-pwrbutton.c b/drivers/input/misc/retu-pwrbutton.c
index 4bff1aa9b0db..0c8ac60e2639 100644
--- a/drivers/input/misc/retu-pwrbutton.c
+++ b/drivers/input/misc/retu-pwrbutton.c
@@ -85,7 +85,6 @@ static struct platform_driver retu_pwrbutton_driver = {
.remove = retu_pwrbutton_remove,
.driver = {
.name = "retu-pwrbutton",
- .owner = THIS_MODULE,
},
};
module_platform_driver(retu_pwrbutton_driver);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 93558a1c7f70..f27f81ee84ed 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -325,7 +325,6 @@ static struct platform_driver rotary_encoder_driver = {
.remove = rotary_encoder_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rotary_encoder_of_match),
}
};
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index f10474937a64..7bbe79d89f5c 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -157,7 +157,6 @@ static struct platform_driver sgi_buttons_driver = {
.remove = sgi_buttons_remove,
.driver = {
.name = "sgibtns",
- .owner = THIS_MODULE,
},
};
module_platform_driver(sgi_buttons_driver);
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index fed5102e1802..9d5b89befe6f 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -179,8 +179,7 @@ static int sirfsoc_pwrc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_pwrc_resume(struct device *dev)
+static int __maybe_unused sirfsoc_pwrc_resume(struct device *dev)
{
struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_get_drvdata(dev);
struct input_dev *input = pwrcdrv->input;
@@ -196,7 +195,6 @@ static int sirfsoc_pwrc_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(sirfsoc_pwrc_pm_ops, NULL, sirfsoc_pwrc_resume);
@@ -205,7 +203,6 @@ static struct platform_driver sirfsoc_pwrc_driver = {
.remove = sirfsoc_pwrc_remove,
.driver = {
.name = "sirfsoc-pwrc",
- .owner = THIS_MODULE,
.pm = &sirfsoc_pwrc_pm_ops,
.of_match_table = sirfsoc_pwrc_of_match,
}
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index e097f1ab427f..79cc0f79896f 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -214,7 +214,6 @@ static struct platform_driver soc_button_driver = {
.remove = soc_button_remove,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(soc_button_acpi_match),
},
};
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 179ff1cd6f6b..54116e544c96 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -257,7 +257,6 @@ static const struct of_device_id bbc_beep_match[] = {
static struct platform_driver bbc_beep_driver = {
.driver = {
.name = "bbcbeep",
- .owner = THIS_MODULE,
.of_match_table = bbc_beep_match,
},
.probe = bbc_beep_probe,
@@ -337,7 +336,6 @@ static const struct of_device_id grover_beep_match[] = {
static struct platform_driver grover_beep_driver = {
.driver = {
.name = "groverbeep",
- .owner = THIS_MODULE,
.of_match_table = grover_beep_match,
},
.probe = grover_beep_probe,
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index 8400a1a34d87..e98cc81a84c6 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -102,7 +102,6 @@ static struct platform_driver twl4030_pwrbutton_driver = {
.probe = twl4030_pwrbutton_probe,
.driver = {
.name = "twl4030_pwrbutton",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(twl4030_pwrbutton_dt_match_table),
},
};
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 960ef2a70910..fc17b9592f54 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -157,8 +157,7 @@ static void twl4030_vibra_close(struct input_dev *input)
}
/*** Module ***/
-#ifdef CONFIG_PM_SLEEP
-static int twl4030_vibra_suspend(struct device *dev)
+static int __maybe_unused twl4030_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -169,12 +168,11 @@ static int twl4030_vibra_suspend(struct device *dev)
return 0;
}
-static int twl4030_vibra_resume(struct device *dev)
+static int __maybe_unused twl4030_vibra_resume(struct device *dev)
{
vibra_disable_leds();
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
@@ -253,7 +251,6 @@ static struct platform_driver twl4030_vibra_driver = {
.probe = twl4030_vibra_probe,
.driver = {
.name = "twl4030-vibra",
- .owner = THIS_MODULE,
.pm = &twl4030_vibra_pm_ops,
},
};
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 6d26eecc278c..0e0d094df2e6 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -236,8 +236,7 @@ static void twl6040_vibra_close(struct input_dev *input)
mutex_unlock(&info->mutex);
}
-#ifdef CONFIG_PM_SLEEP
-static int twl6040_vibra_suspend(struct device *dev)
+static int __maybe_unused twl6040_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -251,7 +250,6 @@ static int twl6040_vibra_suspend(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
@@ -388,7 +386,6 @@ static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
.driver = {
.name = "twl6040-vibra",
- .owner = THIS_MODULE,
.pm = &twl6040_vibra_pm_ops,
},
};
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 7b7add5061a5..e25f87ba19f6 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -1347,7 +1347,6 @@ static const struct dev_pm_ops wistron_pm_ops = {
static struct platform_driver wistron_driver = {
.driver = {
.name = "wistron-bios",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &wistron_pm_ops,
#endif
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 173b6dcca0da..59d4f7bcb4a3 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -138,7 +138,6 @@ static struct platform_driver wm831x_on_driver = {
.remove = wm831x_on_remove,
.driver = {
.name = "wm831x-on",
- .owner = THIS_MODULE,
},
};
module_platform_driver(wm831x_on_driver);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 366fc7ad5eb6..d8b46b0f2dbe 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -215,6 +215,36 @@ config MOUSE_CYAPA
To compile this driver as a module, choose M here: the module will be
called cyapa.
+config MOUSE_ELAN_I2C
+ tristate "ELAN I2C Touchpad support"
+ depends on I2C
+ help
+ This driver adds support for Elan I2C/SMbus Trackpads.
+
+ Say Y here if you have a ELAN I2C/SMbus Touchpad.
+
+ To compile this driver as a module, choose M here: the module will be
+ called elan_i2c.
+
+config MOUSE_ELAN_I2C_I2C
+ bool "Enable I2C support"
+ depends on MOUSE_ELAN_I2C
+ default y
+ help
+ Say Y here if Elan Touchpad in your system is connected to
+ a standard I2C controller.
+
+ If unsure, say Y.
+
+config MOUSE_ELAN_I2C_SMBUS
+ bool "Enable SMbus support"
+ depends on MOUSE_ELAN_I2C
+ help
+ Say Y here if Elan Touchpad in your system is connected to
+ a SMbus adapter.
+
+ If unsure, say Y.
+
config MOUSE_INPORT
tristate "InPort/MS/ATIXL busmouse"
depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index dda507f8b3a2..560003dcac37 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
obj-$(CONFIG_MOUSE_CYAPA) += cyapa.o
+obj-$(CONFIG_MOUSE_ELAN_I2C) += elan_i2c.o
obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
@@ -34,3 +35,7 @@ psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o
psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o
psmouse-$(CONFIG_MOUSE_PS2_CYPRESS) += cypress_ps2.o
+
+elan_i2c-objs := elan_i2c_core.o
+elan_i2c-$(CONFIG_MOUSE_ELAN_I2C_I2C) += elan_i2c_i2c.o
+elan_i2c-$(CONFIG_MOUSE_ELAN_I2C_SMBUS) += elan_i2c_smbus.o
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index 62ec52b2e347..a7fd8f22ba56 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -141,7 +141,6 @@ static struct platform_driver amimouse_driver = {
.remove = __exit_p(amimouse_remove),
.driver = {
.name = "amiga-mouse",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index b409c3d7d4fb..1bece8cad46f 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -6,7 +6,7 @@
* Daniel Kurtz <djkurtz@chromium.org>
* Benson Leung <bleung@chromium.org>
*
- * Copyright (C) 2011-2012 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
* Copyright (C) 2011-2012 Google, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -206,7 +206,6 @@ struct cyapa {
struct i2c_client *client;
struct input_dev *input;
char phys[32]; /* device physical location */
- int irq;
bool irq_wake; /* irq wake is enabled */
bool smbus;
@@ -422,8 +421,8 @@ static ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
*/
static int cyapa_get_state(struct cyapa *cyapa)
{
- int ret;
u8 status[BL_STATUS_SIZE];
+ int error;
cyapa->state = CYAPA_STATE_NO_DEVICE;
@@ -433,18 +432,18 @@ static int cyapa_get_state(struct cyapa *cyapa)
* If the device is in operation mode, this will be the DATA regs.
*
*/
- ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
- status);
+ error = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
+ status);
/*
* On smbus systems in OP mode, the i2c_reg_read will fail with
* -ETIMEDOUT. In this case, try again using the smbus equivalent
* command. This should return a BL_HEAD indicating CYAPA_STATE_OP.
*/
- if (cyapa->smbus && (ret == -ETIMEDOUT || ret == -ENXIO))
- ret = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
+ if (cyapa->smbus && (error == -ETIMEDOUT || error == -ENXIO))
+ error = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
- if (ret != BL_STATUS_SIZE)
+ if (error != BL_STATUS_SIZE)
goto error;
if ((status[REG_OP_STATUS] & OP_STATUS_SRC) == OP_STATUS_SRC) {
@@ -454,7 +453,7 @@ static int cyapa_get_state(struct cyapa *cyapa)
cyapa->state = CYAPA_STATE_OP;
break;
default:
- ret = -EAGAIN;
+ error = -EAGAIN;
goto error;
}
} else {
@@ -468,7 +467,7 @@ static int cyapa_get_state(struct cyapa *cyapa)
return 0;
error:
- return (ret < 0) ? ret : -EAGAIN;
+ return (error < 0) ? error : -EAGAIN;
}
/*
@@ -487,31 +486,31 @@ error:
*/
static int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
{
- int ret;
+ int error;
int tries = timeout / 100;
- ret = cyapa_get_state(cyapa);
- while ((ret || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
+ error = cyapa_get_state(cyapa);
+ while ((error || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
msleep(100);
- ret = cyapa_get_state(cyapa);
+ error = cyapa_get_state(cyapa);
}
- return (ret == -EAGAIN || ret == -ETIMEDOUT) ? -ETIMEDOUT : ret;
+ return (error == -EAGAIN || error == -ETIMEDOUT) ? -ETIMEDOUT : error;
}
static int cyapa_bl_deactivate(struct cyapa *cyapa)
{
- int ret;
+ int error;
- ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
- bl_deactivate);
- if (ret < 0)
- return ret;
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
+ bl_deactivate);
+ if (error)
+ return error;
/* wait for bootloader to switch to idle state; should take < 100ms */
msleep(100);
- ret = cyapa_poll_state(cyapa, 500);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 500);
+ if (error)
+ return error;
if (cyapa->state != CYAPA_STATE_BL_IDLE)
return -EAGAIN;
return 0;
@@ -532,11 +531,11 @@ static int cyapa_bl_deactivate(struct cyapa *cyapa)
*/
static int cyapa_bl_exit(struct cyapa *cyapa)
{
- int ret;
+ int error;
- ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
- if (ret < 0)
- return ret;
+ error = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
+ if (error)
+ return error;
/*
* Wait for bootloader to exit, and operation mode to start.
@@ -548,9 +547,9 @@ static int cyapa_bl_exit(struct cyapa *cyapa)
* updated to new firmware, it must first calibrate its sensors, which
* can take up to an additional 2 seconds.
*/
- ret = cyapa_poll_state(cyapa, 2000);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 2000);
+ if (error < 0)
+ return error;
if (cyapa->state != CYAPA_STATE_OP)
return -EAGAIN;
@@ -577,10 +576,13 @@ static int cyapa_set_power_mode(struct cyapa *cyapa, u8 power_mode)
power = ret & ~PWR_MODE_MASK;
power |= power_mode & PWR_MODE_MASK;
ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "failed to set power_mode 0x%02x err = %d\n",
power_mode, ret);
- return ret;
+ return ret;
+ }
+
+ return 0;
}
static int cyapa_get_query_data(struct cyapa *cyapa)
@@ -637,28 +639,28 @@ static int cyapa_check_is_operational(struct cyapa *cyapa)
{
struct device *dev = &cyapa->client->dev;
static const char unique_str[] = "CYTRA";
- int ret;
+ int error;
- ret = cyapa_poll_state(cyapa, 2000);
- if (ret < 0)
- return ret;
+ error = cyapa_poll_state(cyapa, 2000);
+ if (error)
+ return error;
switch (cyapa->state) {
case CYAPA_STATE_BL_ACTIVE:
- ret = cyapa_bl_deactivate(cyapa);
- if (ret)
- return ret;
+ error = cyapa_bl_deactivate(cyapa);
+ if (error)
+ return error;
/* Fallthrough state */
case CYAPA_STATE_BL_IDLE:
- ret = cyapa_bl_exit(cyapa);
- if (ret)
- return ret;
+ error = cyapa_bl_exit(cyapa);
+ if (error)
+ return error;
/* Fallthrough state */
case CYAPA_STATE_OP:
- ret = cyapa_get_query_data(cyapa);
- if (ret < 0)
- return ret;
+ error = cyapa_get_query_data(cyapa);
+ if (error)
+ return error;
/* only support firmware protocol gen3 */
if (cyapa->gen != CYAPA_GEN3) {
@@ -753,18 +755,42 @@ static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
return ret;
}
+static int cyapa_open(struct input_dev *input)
+{
+ struct cyapa *cyapa = input_get_drvdata(input);
+ struct i2c_client *client = cyapa->client;
+ int error;
+
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (error) {
+ dev_err(&client->dev, "set active power failed: %d\n", error);
+ return error;
+ }
+
+ enable_irq(client->irq);
+ return 0;
+}
+
+static void cyapa_close(struct input_dev *input)
+{
+ struct cyapa *cyapa = input_get_drvdata(input);
+
+ disable_irq(cyapa->client->irq);
+ cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+}
+
static int cyapa_create_input_dev(struct cyapa *cyapa)
{
struct device *dev = &cyapa->client->dev;
- int ret;
struct input_dev *input;
+ int error;
if (!cyapa->physical_size_x || !cyapa->physical_size_y)
return -EINVAL;
- input = cyapa->input = input_allocate_device();
+ input = devm_input_allocate_device(dev);
if (!input) {
- dev_err(dev, "allocate memory for input device failed\n");
+ dev_err(dev, "failed to allocate memory for input device.\n");
return -ENOMEM;
}
@@ -772,14 +798,17 @@ static int cyapa_create_input_dev(struct cyapa *cyapa)
input->phys = cyapa->phys;
input->id.bustype = BUS_I2C;
input->id.version = 1;
- input->id.product = 0; /* means any product in eventcomm. */
+ input->id.product = 0; /* Means any product in eventcomm. */
input->dev.parent = &cyapa->client->dev;
+ input->open = cyapa_open;
+ input->close = cyapa_close;
+
input_set_drvdata(input, cyapa);
__set_bit(EV_ABS, input->evbit);
- /* finger position */
+ /* Finger position */
input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
@@ -801,35 +830,25 @@ static int cyapa_create_input_dev(struct cyapa *cyapa)
if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- /* handle pointer emulation and unused slots in core */
- ret = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
- INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
- if (ret) {
- dev_err(dev, "allocate memory for MT slots failed, %d\n", ret);
- goto err_free_device;
+ /* Handle pointer emulation and unused slots in core */
+ error = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "failed to initialize MT slots: %d\n", error);
+ return error;
}
- /* Register the device in input subsystem */
- ret = input_register_device(input);
- if (ret) {
- dev_err(dev, "input device register failed, %d\n", ret);
- goto err_free_device;
- }
+ cyapa->input = input;
return 0;
-
-err_free_device:
- input_free_device(input);
- cyapa->input = NULL;
- return ret;
}
static int cyapa_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
- int ret;
- u8 adapter_func;
- struct cyapa *cyapa;
struct device *dev = &client->dev;
+ struct cyapa *cyapa;
+ u8 adapter_func;
+ int error;
adapter_func = cyapa_check_adapter_functionality(client);
if (adapter_func == CYAPA_ADAPTER_FUNC_NONE) {
@@ -837,11 +856,9 @@ static int cyapa_probe(struct i2c_client *client,
return -EIO;
}
- cyapa = kzalloc(sizeof(struct cyapa), GFP_KERNEL);
- if (!cyapa) {
- dev_err(dev, "allocate memory for cyapa failed\n");
+ cyapa = devm_kzalloc(dev, sizeof(struct cyapa), GFP_KERNEL);
+ if (!cyapa)
return -ENOMEM;
- }
cyapa->gen = CYAPA_GEN3;
cyapa->client = client;
@@ -852,67 +869,61 @@ static int cyapa_probe(struct i2c_client *client,
/* i2c isn't supported, use smbus */
if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
cyapa->smbus = true;
+
cyapa->state = CYAPA_STATE_NO_DEVICE;
- ret = cyapa_check_is_operational(cyapa);
- if (ret) {
- dev_err(dev, "device not operational, %d\n", ret);
- goto err_mem_free;
- }
- ret = cyapa_create_input_dev(cyapa);
- if (ret) {
- dev_err(dev, "create input_dev instance failed, %d\n", ret);
- goto err_mem_free;
+ error = cyapa_check_is_operational(cyapa);
+ if (error) {
+ dev_err(dev, "device not operational, %d\n", error);
+ return error;
}
- ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
- if (ret) {
- dev_err(dev, "set active power failed, %d\n", ret);
- goto err_unregister_device;
+ /* Power down the device until we need it */
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+ if (error) {
+ dev_err(dev, "failed to quiesce the device: %d\n", error);
+ return error;
}
- cyapa->irq = client->irq;
- ret = request_threaded_irq(cyapa->irq,
- NULL,
- cyapa_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "cyapa",
- cyapa);
- if (ret) {
- dev_err(dev, "IRQ request failed: %d\n, ", ret);
- goto err_unregister_device;
+ error = cyapa_create_input_dev(cyapa);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, cyapa_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cyapa", cyapa);
+ if (error) {
+ dev_err(dev, "failed to request threaded irq: %d\n", error);
+ return error;
}
- return 0;
+ /* Disable IRQ until the device is opened */
+ disable_irq(client->irq);
-err_unregister_device:
- input_unregister_device(cyapa->input);
-err_mem_free:
- kfree(cyapa);
-
- return ret;
-}
-
-static int cyapa_remove(struct i2c_client *client)
-{
- struct cyapa *cyapa = i2c_get_clientdata(client);
-
- free_irq(cyapa->irq, cyapa);
- input_unregister_device(cyapa->input);
- cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
- kfree(cyapa);
+ /* Register the device in input subsystem */
+ error = input_register_device(cyapa->input);
+ if (error) {
+ dev_err(dev, "failed to register input device: %d\n", error);
+ return error;
+ }
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int cyapa_suspend(struct device *dev)
+static int __maybe_unused cyapa_suspend(struct device *dev)
{
- int ret;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+ struct input_dev *input = cyapa->input;
u8 power_mode;
- struct cyapa *cyapa = dev_get_drvdata(dev);
+ int error;
- disable_irq(cyapa->irq);
+ error = mutex_lock_interruptible(&input->mutex);
+ if (error)
+ return error;
+
+ disable_irq(client->irq);
/*
* Set trackpad device to idle mode if wakeup is allowed,
@@ -920,31 +931,44 @@ static int cyapa_suspend(struct device *dev)
*/
power_mode = device_may_wakeup(dev) ? PWR_MODE_IDLE
: PWR_MODE_OFF;
- ret = cyapa_set_power_mode(cyapa, power_mode);
- if (ret < 0)
- dev_err(dev, "set power mode failed, %d\n", ret);
+ error = cyapa_set_power_mode(cyapa, power_mode);
+ if (error)
+ dev_err(dev, "resume: set power mode to %d failed: %d\n",
+ power_mode, error);
if (device_may_wakeup(dev))
- cyapa->irq_wake = (enable_irq_wake(cyapa->irq) == 0);
+ cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
+
+ mutex_unlock(&input->mutex);
+
return 0;
}
-static int cyapa_resume(struct device *dev)
+static int __maybe_unused cyapa_resume(struct device *dev)
{
- int ret;
- struct cyapa *cyapa = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+ struct input_dev *input = cyapa->input;
+ u8 power_mode;
+ int error;
+
+ mutex_lock(&input->mutex);
if (device_may_wakeup(dev) && cyapa->irq_wake)
- disable_irq_wake(cyapa->irq);
+ disable_irq_wake(client->irq);
+
+ power_mode = input->users ? PWR_MODE_FULL_ACTIVE : PWR_MODE_OFF;
+ error = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (error)
+ dev_warn(dev, "resume: set power mode to %d failed: %d\n",
+ power_mode, error);
+
+ enable_irq(client->irq);
- ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
- if (ret)
- dev_warn(dev, "resume active power failed, %d\n", ret);
+ mutex_unlock(&input->mutex);
- enable_irq(cyapa->irq);
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(cyapa_pm_ops, cyapa_suspend, cyapa_resume);
@@ -962,7 +986,6 @@ static struct i2c_driver cyapa_driver = {
},
.probe = cyapa_probe,
- .remove = cyapa_remove,
.id_table = cyapa_id_table,
};
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
new file mode 100644
index 000000000000..2e838626205f
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c.h
@@ -0,0 +1,86 @@
+/*
+ * Elan I2C/SMBus Touchpad driver
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#ifndef _ELAN_I2C_H
+#define _ELAN_i2C_H
+
+#include <linux/types.h>
+
+#define ETP_ENABLE_ABS 0x0001
+#define ETP_ENABLE_CALIBRATE 0x0002
+#define ETP_DISABLE_CALIBRATE 0x0000
+#define ETP_DISABLE_POWER 0x0001
+
+/* IAP Firmware handling */
+#define ETP_FW_NAME "elan_i2c.bin"
+#define ETP_IAP_START_ADDR 0x0083
+#define ETP_FW_IAP_PAGE_ERR (1 << 5)
+#define ETP_FW_IAP_INTF_ERR (1 << 4)
+#define ETP_FW_PAGE_SIZE 64
+#define ETP_FW_PAGE_COUNT 768
+#define ETP_FW_SIZE (ETP_FW_PAGE_SIZE * ETP_FW_PAGE_COUNT)
+
+struct i2c_client;
+struct completion;
+
+enum tp_mode {
+ IAP_MODE = 1,
+ MAIN_MODE
+};
+
+struct elan_transport_ops {
+ int (*initialize)(struct i2c_client *client);
+ int (*sleep_control)(struct i2c_client *, bool sleep);
+ int (*power_control)(struct i2c_client *, bool enable);
+ int (*set_mode)(struct i2c_client *client, u8 mode);
+
+ int (*calibrate)(struct i2c_client *client);
+ int (*calibrate_result)(struct i2c_client *client, u8 *val);
+
+ int (*get_baseline_data)(struct i2c_client *client,
+ bool max_baseliune, u8 *value);
+
+ int (*get_version)(struct i2c_client *client, bool iap, u8 *version);
+ int (*get_sm_version)(struct i2c_client *client, u8 *version);
+ int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
+ int (*get_product_id)(struct i2c_client *client, u8 *id);
+
+ int (*get_max)(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y);
+ int (*get_resolution)(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y);
+ int (*get_num_traces)(struct i2c_client *client,
+ unsigned int *x_tracenum,
+ unsigned int *y_tracenum);
+
+ int (*iap_get_mode)(struct i2c_client *client, enum tp_mode *mode);
+ int (*iap_reset)(struct i2c_client *client);
+
+ int (*prepare_fw_update)(struct i2c_client *client);
+ int (*write_fw_block)(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx);
+ int (*finish_fw_update)(struct i2c_client *client,
+ struct completion *reset_done);
+
+ int (*get_report)(struct i2c_client *client, u8 *report);
+};
+
+extern const struct elan_transport_ops elan_smbus_ops, elan_i2c_ops;
+
+#endif /* _ELAN_I2C_H */
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
new file mode 100644
index 000000000000..0cb2be48d537
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -0,0 +1,1137 @@
+/*
+ * Elan I2C/SMBus Touchpad driver
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/uaccess.h>
+#include <linux/jiffies.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#include "elan_i2c.h"
+
+#define DRIVER_NAME "elan_i2c"
+#define ELAN_DRIVER_VERSION "1.5.5"
+#define ETP_PRESSURE_OFFSET 25
+#define ETP_MAX_PRESSURE 255
+#define ETP_FWIDTH_REDUCE 90
+#define ETP_FINGER_WIDTH 15
+#define ETP_RETRY_COUNT 3
+
+#define ETP_MAX_FINGERS 5
+#define ETP_FINGER_DATA_LEN 5
+#define ETP_REPORT_ID 0x5D
+#define ETP_REPORT_ID_OFFSET 2
+#define ETP_TOUCH_INFO_OFFSET 3
+#define ETP_FINGER_DATA_OFFSET 4
+#define ETP_MAX_REPORT_LEN 34
+
+/* The main device structure */
+struct elan_tp_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct regulator *vcc;
+
+ const struct elan_transport_ops *ops;
+
+ /* for fw update */
+ struct completion fw_completion;
+ bool in_fw_update;
+
+ struct mutex sysfs_mutex;
+
+ unsigned int max_x;
+ unsigned int max_y;
+ unsigned int width_x;
+ unsigned int width_y;
+ unsigned int x_res;
+ unsigned int y_res;
+
+ u8 product_id;
+ u8 fw_version;
+ u8 sm_version;
+ u8 iap_version;
+ u16 fw_checksum;
+
+ u8 mode;
+
+ bool irq_wake;
+
+ u8 min_baseline;
+ u8 max_baseline;
+ bool baseline_ready;
+};
+
+static int elan_enable_power(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(&data->client->dev,
+ "Failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ do {
+ error = data->ops->power_control(data->client, true);
+ if (error >= 0)
+ return 0;
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_disable_power(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->power_control(data->client, false);
+ if (!error) {
+ error = regulator_disable(data->vcc);
+ if (error) {
+ dev_err(&data->client->dev,
+ "Failed to disable regulator: %d\n",
+ error);
+ /* Attempt to power the chip back up */
+ data->ops->power_control(data->client, true);
+ break;
+ }
+
+ return 0;
+ }
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_sleep(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->sleep_control(data->client, true);
+ if (!error)
+ return 0;
+
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int __elan_initialize(struct elan_tp_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+
+ error = data->ops->initialize(client);
+ if (error) {
+ dev_err(&client->dev, "device initialize failed: %d\n", error);
+ return error;
+ }
+
+ data->mode |= ETP_ENABLE_ABS;
+ error = data->ops->set_mode(client, data->mode);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to switch to absolute mode: %d\n", error);
+ return error;
+ }
+
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_initialize(struct elan_tp_data *data)
+{
+ int repeat = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = __elan_initialize(data);
+ if (!error)
+ return 0;
+
+ repeat--;
+ msleep(30);
+ } while (--repeat > 0);
+
+ return error;
+}
+
+static int elan_query_device_info(struct elan_tp_data *data)
+{
+ int error;
+
+ error = data->ops->get_product_id(data->client, &data->product_id);
+ if (error)
+ return error;
+
+ error = data->ops->get_version(data->client, false, &data->fw_version);
+ if (error)
+ return error;
+
+ error = data->ops->get_checksum(data->client, false,
+ &data->fw_checksum);
+ if (error)
+ return error;
+
+ error = data->ops->get_sm_version(data->client, &data->sm_version);
+ if (error)
+ return error;
+
+ error = data->ops->get_version(data->client, true, &data->iap_version);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static unsigned int elan_convert_resolution(u8 val)
+{
+ /*
+ * (value from firmware) * 10 + 790 = dpi
+ *
+ * We also have to convert dpi to dots/mm (*10/254 to avoid floating
+ * point).
+ */
+
+ return ((int)(char)val * 10 + 790) * 10 / 254;
+}
+
+static int elan_query_device_parameters(struct elan_tp_data *data)
+{
+ unsigned int x_traces, y_traces;
+ u8 hw_x_res, hw_y_res;
+ int error;
+
+ error = data->ops->get_max(data->client, &data->max_x, &data->max_y);
+ if (error)
+ return error;
+
+ error = data->ops->get_num_traces(data->client, &x_traces, &y_traces);
+ if (error)
+ return error;
+
+ data->width_x = data->max_x / x_traces;
+ data->width_y = data->max_y / y_traces;
+
+ error = data->ops->get_resolution(data->client, &hw_x_res, &hw_y_res);
+ if (error)
+ return error;
+
+ data->x_res = elan_convert_resolution(hw_x_res);
+ data->y_res = elan_convert_resolution(hw_y_res);
+
+ return 0;
+}
+
+/*
+ **********************************************************
+ * IAP firmware updater related routines
+ **********************************************************
+ */
+static int elan_write_fw_block(struct elan_tp_data *data,
+ const u8 *page, u16 checksum, int idx)
+{
+ int retry = ETP_RETRY_COUNT;
+ int error;
+
+ do {
+ error = data->ops->write_fw_block(data->client,
+ page, checksum, idx);
+ if (!error)
+ return 0;
+
+ dev_dbg(&data->client->dev,
+ "IAP retrying page %d (error: %d)\n", idx, error);
+ } while (--retry > 0);
+
+ return error;
+}
+
+static int __elan_update_firmware(struct elan_tp_data *data,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ int i, j;
+ int error;
+ u16 iap_start_addr;
+ u16 boot_page_count;
+ u16 sw_checksum = 0, fw_checksum = 0;
+
+ error = data->ops->prepare_fw_update(client);
+ if (error)
+ return error;
+
+ iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
+
+ boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
+ for (i = boot_page_count; i < ETP_FW_PAGE_COUNT; i++) {
+ u16 checksum = 0;
+ const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
+
+ for (j = 0; j < ETP_FW_PAGE_SIZE; j += 2)
+ checksum += ((page[j + 1] << 8) | page[j]);
+
+ error = elan_write_fw_block(data, page, checksum, i);
+ if (error) {
+ dev_err(dev, "write page %d fail: %d\n", i, error);
+ return error;
+ }
+
+ sw_checksum += checksum;
+ }
+
+ /* Wait WDT reset and power on reset */
+ msleep(600);
+
+ error = data->ops->finish_fw_update(client, &data->fw_completion);
+ if (error)
+ return error;
+
+ error = data->ops->get_checksum(client, true, &fw_checksum);
+ if (error)
+ return error;
+
+ if (sw_checksum != fw_checksum) {
+ dev_err(dev, "checksum diff sw=[%04X], fw=[%04X]\n",
+ sw_checksum, fw_checksum);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_update_firmware(struct elan_tp_data *data,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = data->client;
+ int retval;
+
+ dev_dbg(&client->dev, "Starting firmware update....\n");
+
+ disable_irq(client->irq);
+ data->in_fw_update = true;
+
+ retval = __elan_update_firmware(data, fw);
+ if (retval) {
+ dev_err(&client->dev, "firmware update failed: %d\n", retval);
+ data->ops->iap_reset(client);
+ } else {
+ /* Reinitialize TP after fw is updated */
+ elan_initialize(data);
+ elan_query_device_info(data);
+ }
+
+ data->in_fw_update = false;
+ enable_irq(client->irq);
+
+ return retval;
+}
+
+/*
+ *******************************************************************
+ * SYSFS attributes
+ *******************************************************************
+ */
+static ssize_t elan_sysfs_read_fw_checksum(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "0x%04x\n", data->fw_checksum);
+}
+
+static ssize_t elan_sysfs_read_product_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->product_id);
+}
+
+static ssize_t elan_sysfs_read_fw_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->fw_version);
+}
+
+static ssize_t elan_sysfs_read_sm_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->sm_version);
+}
+
+static ssize_t elan_sysfs_read_iap_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.0\n", data->iap_version);
+}
+
+static ssize_t elan_sysfs_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, ETP_FW_NAME, dev);
+ if (error) {
+ dev_err(dev, "cannot load firmware %s: %d\n",
+ ETP_FW_NAME, error);
+ return error;
+ }
+
+ /* Firmware must be exactly PAGE_NUM * PAGE_SIZE bytes */
+ if (fw->size != ETP_FW_SIZE) {
+ dev_err(dev, "invalid firmware size = %zu, expected %d.\n",
+ fw->size, ETP_FW_SIZE);
+ error = -EBADF;
+ goto out_release_fw;
+ }
+
+ error = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (error)
+ goto out_release_fw;
+
+ error = elan_update_firmware(data, fw);
+
+ mutex_unlock(&data->sysfs_mutex);
+
+out_release_fw:
+ release_firmware(fw);
+ return error ?: count;
+}
+
+static ssize_t calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int tries = 20;
+ int retval;
+ int error;
+ u8 val[3];
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ disable_irq(client->irq);
+
+ data->mode |= ETP_ENABLE_CALIBRATE;
+ retval = data->ops->set_mode(client, data->mode);
+ if (retval) {
+ dev_err(dev, "failed to enable calibration mode: %d\n",
+ retval);
+ goto out;
+ }
+
+ retval = data->ops->calibrate(client);
+ if (retval) {
+ dev_err(dev, "failed to start calibration: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ val[0] = 0xff;
+ do {
+ /* Wait 250ms before checking if calibration has completed. */
+ msleep(250);
+
+ retval = data->ops->calibrate_result(client, val);
+ if (retval)
+ dev_err(dev, "failed to check calibration result: %d\n",
+ retval);
+ else if (val[0] == 0)
+ break; /* calibration done */
+
+ } while (--tries);
+
+ if (tries == 0) {
+ dev_err(dev, "failed to calibrate. Timeout.\n");
+ retval = -ETIMEDOUT;
+ }
+
+out_disable_calibrate:
+ data->mode &= ~ETP_ENABLE_CALIBRATE;
+ error = data->ops->set_mode(data->client, data->mode);
+ if (error) {
+ dev_err(dev, "failed to disable calibration mode: %d\n",
+ error);
+ if (!retval)
+ retval = error;
+ }
+out:
+ enable_irq(client->irq);
+ mutex_unlock(&data->sysfs_mutex);
+ return retval ?: count;
+}
+
+static ssize_t elan_sysfs_read_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+ enum tp_mode mode;
+
+ error = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = data->ops->iap_get_mode(data->client, &mode);
+
+ mutex_unlock(&data->sysfs_mutex);
+
+ if (error)
+ return error;
+
+ return sprintf(buf, "%d\n", (int)mode);
+}
+
+static DEVICE_ATTR(product_id, S_IRUGO, elan_sysfs_read_product_id, NULL);
+static DEVICE_ATTR(firmware_version, S_IRUGO, elan_sysfs_read_fw_ver, NULL);
+static DEVICE_ATTR(sample_version, S_IRUGO, elan_sysfs_read_sm_ver, NULL);
+static DEVICE_ATTR(iap_version, S_IRUGO, elan_sysfs_read_iap_ver, NULL);
+static DEVICE_ATTR(fw_checksum, S_IRUGO, elan_sysfs_read_fw_checksum, NULL);
+static DEVICE_ATTR(mode, S_IRUGO, elan_sysfs_read_mode, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, elan_sysfs_update_fw);
+
+static DEVICE_ATTR_WO(calibrate);
+
+static struct attribute *elan_sysfs_entries[] = {
+ &dev_attr_product_id.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_sample_version.attr,
+ &dev_attr_iap_version.attr,
+ &dev_attr_fw_checksum.attr,
+ &dev_attr_calibrate.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_update_fw.attr,
+ NULL,
+};
+
+static const struct attribute_group elan_sysfs_group = {
+ .attrs = elan_sysfs_entries,
+};
+
+static ssize_t acquire_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ disable_irq(client->irq);
+
+ data->baseline_ready = false;
+
+ data->mode |= ETP_ENABLE_CALIBRATE;
+ retval = data->ops->set_mode(data->client, data->mode);
+ if (retval) {
+ dev_err(dev, "Failed to enable calibration mode to get baseline: %d\n",
+ retval);
+ goto out;
+ }
+
+ msleep(250);
+
+ retval = data->ops->get_baseline_data(data->client, true,
+ &data->max_baseline);
+ if (retval) {
+ dev_err(dev, "Failed to read max baseline form device: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ retval = data->ops->get_baseline_data(data->client, false,
+ &data->min_baseline);
+ if (retval) {
+ dev_err(dev, "Failed to read min baseline form device: %d\n",
+ retval);
+ goto out_disable_calibrate;
+ }
+
+ data->baseline_ready = true;
+
+out_disable_calibrate:
+ data->mode &= ~ETP_ENABLE_CALIBRATE;
+ error = data->ops->set_mode(data->client, data->mode);
+ if (error) {
+ dev_err(dev, "Failed to disable calibration mode after acquiring baseline: %d\n",
+ error);
+ if (!retval)
+ retval = error;
+ }
+out:
+ enable_irq(client->irq);
+ mutex_unlock(&data->sysfs_mutex);
+ return retval ?: count;
+}
+
+static ssize_t min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ if (!data->baseline_ready) {
+ retval = -ENODATA;
+ goto out;
+ }
+
+ retval = snprintf(buf, PAGE_SIZE, "%d", data->min_baseline);
+
+out:
+ mutex_unlock(&data->sysfs_mutex);
+ return retval;
+}
+
+static ssize_t max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int retval;
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+ return retval;
+
+ if (!data->baseline_ready) {
+ retval = -ENODATA;
+ goto out;
+ }
+
+ retval = snprintf(buf, PAGE_SIZE, "%d", data->max_baseline);
+
+out:
+ mutex_unlock(&data->sysfs_mutex);
+ return retval;
+}
+
+
+static DEVICE_ATTR_WO(acquire);
+static DEVICE_ATTR_RO(min);
+static DEVICE_ATTR_RO(max);
+
+static struct attribute *elan_baseline_sysfs_entries[] = {
+ &dev_attr_acquire.attr,
+ &dev_attr_min.attr,
+ &dev_attr_max.attr,
+ NULL,
+};
+
+static const struct attribute_group elan_baseline_sysfs_group = {
+ .name = "baseline",
+ .attrs = elan_baseline_sysfs_entries,
+};
+
+static const struct attribute_group *elan_sysfs_groups[] = {
+ &elan_sysfs_group,
+ &elan_baseline_sysfs_group,
+ NULL
+};
+
+/*
+ ******************************************************************
+ * Elan isr functions
+ ******************************************************************
+ */
+static void elan_report_contact(struct elan_tp_data *data,
+ int contact_num, bool contact_valid,
+ u8 *finger_data)
+{
+ struct input_dev *input = data->input;
+ unsigned int pos_x, pos_y;
+ unsigned int pressure, mk_x, mk_y;
+ unsigned int area_x, area_y, major, minor, new_pressure;
+
+
+ if (contact_valid) {
+ pos_x = ((finger_data[0] & 0xf0) << 4) |
+ finger_data[1];
+ pos_y = ((finger_data[0] & 0x0f) << 8) |
+ finger_data[2];
+ mk_x = (finger_data[3] & 0x0f);
+ mk_y = (finger_data[3] >> 4);
+ pressure = finger_data[4];
+
+ if (pos_x > data->max_x || pos_y > data->max_y) {
+ dev_dbg(input->dev.parent,
+ "[%d] x=%d y=%d over max (%d, %d)",
+ contact_num, pos_x, pos_y,
+ data->max_x, data->max_y);
+ return;
+ }
+
+ /*
+ * To avoid treating large finger as palm, let's reduce the
+ * width x and y per trace.
+ */
+ area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
+ area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
+
+ major = max(area_x, area_y);
+ minor = min(area_x, area_y);
+
+ new_pressure = pressure + ETP_PRESSURE_OFFSET;
+ if (new_pressure > ETP_MAX_PRESSURE)
+ new_pressure = ETP_MAX_PRESSURE;
+
+ input_mt_slot(input, contact_num);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, pos_x);
+ input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
+ input_report_abs(input, ABS_MT_PRESSURE, new_pressure);
+ input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
+ } else {
+ input_mt_slot(input, contact_num);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ }
+}
+
+static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
+{
+ struct input_dev *input = data->input;
+ u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET];
+ int i;
+ u8 tp_info = packet[ETP_TOUCH_INFO_OFFSET];
+ bool contact_valid;
+
+ for (i = 0; i < ETP_MAX_FINGERS; i++) {
+ contact_valid = tp_info & (1U << (3 + i));
+ elan_report_contact(data, i, contact_valid, finger_data);
+
+ if (contact_valid)
+ finger_data += ETP_FINGER_DATA_LEN;
+ }
+
+ input_report_key(input, BTN_LEFT, tp_info & 0x01);
+ input_mt_report_pointer_emulation(input, true);
+ input_sync(input);
+}
+
+static irqreturn_t elan_isr(int irq, void *dev_id)
+{
+ struct elan_tp_data *data = dev_id;
+ struct device *dev = &data->client->dev;
+ int error;
+ u8 report[ETP_MAX_REPORT_LEN];
+
+ /*
+ * When device is connected to i2c bus, when all IAP page writes
+ * complete, the driver will receive interrupt and must read
+ * 0000 to confirm that IAP is finished.
+ */
+ if (data->in_fw_update) {
+ complete(&data->fw_completion);
+ goto out;
+ }
+
+ error = data->ops->get_report(data->client, report);
+ if (error)
+ goto out;
+
+ if (report[ETP_REPORT_ID_OFFSET] != ETP_REPORT_ID)
+ dev_err(dev, "invalid report id data (%x)\n",
+ report[ETP_REPORT_ID_OFFSET]);
+ else
+ elan_report_absolute(data, report);
+
+out:
+ return IRQ_HANDLED;
+}
+
+/*
+ ******************************************************************
+ * Elan initialization functions
+ ******************************************************************
+ */
+static int elan_setup_input_device(struct elan_tp_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct input_dev *input;
+ unsigned int max_width = max(data->width_x, data->width_y);
+ unsigned int min_width = min(data->width_x, data->width_y);
+ int error;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Elan Touchpad";
+ input->id.bustype = BUS_I2C;
+ input_set_drvdata(input, data);
+
+ error = input_mt_init_slots(input, ETP_MAX_FINGERS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(BTN_LEFT, input->keybit);
+
+ /* Set up ST parameters */
+ input_set_abs_params(input, ABS_X, 0, data->max_x, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, data->max_y, 0, 0);
+ input_abs_set_res(input, ABS_X, data->x_res);
+ input_abs_set_res(input, ABS_Y, data->y_res);
+ input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+
+ /* And MT parameters */
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, data->max_y, 0, 0);
+ input_abs_set_res(input, ABS_MT_POSITION_X, data->x_res);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, data->y_res);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0,
+ ETP_MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
+ ETP_FINGER_WIDTH * max_width, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
+ ETP_FINGER_WIDTH * min_width, 0, 0);
+
+ data->input = input;
+
+ return 0;
+}
+
+static void elan_disable_regulator(void *_data)
+{
+ struct elan_tp_data *data = _data;
+
+ regulator_disable(data->vcc);
+}
+
+static void elan_remove_sysfs_groups(void *_data)
+{
+ struct elan_tp_data *data = _data;
+
+ sysfs_remove_groups(&data->client->dev.kobj, elan_sysfs_groups);
+}
+
+static int elan_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ const struct elan_transport_ops *transport_ops;
+ struct device *dev = &client->dev;
+ struct elan_tp_data *data;
+ unsigned long irqflags;
+ int error;
+
+ if (IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_I2C) &&
+ i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ transport_ops = &elan_i2c_ops;
+ } else if (IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) &&
+ i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ transport_ops = &elan_smbus_ops;
+ } else {
+ dev_err(dev, "not a supported I2C/SMBus adapter\n");
+ return -EIO;
+ }
+
+ data = devm_kzalloc(&client->dev, sizeof(struct elan_tp_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+
+ data->ops = transport_ops;
+ data->client = client;
+ init_completion(&data->fw_completion);
+ mutex_init(&data->sysfs_mutex);
+
+ data->vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ error = PTR_ERR(data->vcc);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get 'vcc' regulator: %d\n",
+ error);
+ return error;
+ }
+
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable regulator: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ elan_disable_regulator, data);
+ if (error) {
+ regulator_disable(data->vcc);
+ dev_err(&client->dev,
+ "Failed to add disable regulator action: %d\n",
+ error);
+ return error;
+ }
+
+ /* Initialize the touchpad. */
+ error = elan_initialize(data);
+ if (error)
+ return error;
+
+ error = elan_query_device_info(data);
+ if (error)
+ return error;
+
+ error = elan_query_device_parameters(data);
+ if (error)
+ return error;
+
+ dev_dbg(&client->dev,
+ "Elan Touchpad Information:\n"
+ " Module product ID: 0x%04x\n"
+ " Firmware Version: 0x%04x\n"
+ " Sample Version: 0x%04x\n"
+ " IAP Version: 0x%04x\n"
+ " Max ABS X,Y: %d,%d\n"
+ " Width X,Y: %d,%d\n"
+ " Resolution X,Y: %d,%d (dots/mm)\n",
+ data->product_id,
+ data->fw_version,
+ data->sm_version,
+ data->iap_version,
+ data->max_x, data->max_y,
+ data->width_x, data->width_y,
+ data->x_res, data->y_res);
+
+ /* Set up input device properties based on queried parameters. */
+ error = elan_setup_input_device(data);
+ if (error)
+ return error;
+
+ /*
+ * Systems using device tree should set up interrupt via DTS,
+ * the rest will use the default falling edge interrupts.
+ */
+ irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, elan_isr,
+ irqflags | IRQF_ONESHOT,
+ client->name, data);
+ if (error) {
+ dev_err(&client->dev, "cannot register irq=%d\n", client->irq);
+ return error;
+ }
+
+ error = sysfs_create_groups(&client->dev.kobj, elan_sysfs_groups);
+ if (error) {
+ dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
+ error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ elan_remove_sysfs_groups, data);
+ if (error) {
+ elan_remove_sysfs_groups(data);
+ dev_err(&client->dev,
+ "Failed to add sysfs cleanup action: %d\n",
+ error);
+ return error;
+ }
+
+ error = input_register_device(data->input);
+ if (error) {
+ dev_err(&client->dev, "failed to register input device: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Systems using device tree should set up wakeup via DTS,
+ * the rest will configure device as wakeup source by default.
+ */
+ if (!client->dev.of_node)
+ device_init_wakeup(&client->dev, true);
+
+ return 0;
+}
+
+static int __maybe_unused elan_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ /*
+ * We are taking the mutex to make sure sysfs operations are
+ * complete before we attempt to bring the device into low[er]
+ * power mode.
+ */
+ ret = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (ret)
+ return ret;
+
+ disable_irq(client->irq);
+
+ if (device_may_wakeup(dev)) {
+ ret = elan_sleep(data);
+ /* Enable wake from IRQ */
+ data->irq_wake = (enable_irq_wake(client->irq) == 0);
+ } else {
+ ret = elan_disable_power(data);
+ }
+
+ mutex_unlock(&data->sysfs_mutex);
+ return ret;
+}
+
+static int __maybe_unused elan_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elan_tp_data *data = i2c_get_clientdata(client);
+ int error;
+
+ if (device_may_wakeup(dev) && data->irq_wake) {
+ disable_irq_wake(client->irq);
+ data->irq_wake = false;
+ }
+
+ error = elan_enable_power(data);
+ if (error)
+ dev_err(dev, "power up when resuming failed: %d\n", error);
+
+ error = elan_initialize(data);
+ if (error)
+ dev_err(dev, "initialize when resuming failed: %d\n", error);
+
+ enable_irq(data->client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(elan_pm_ops, elan_suspend, elan_resume);
+
+static const struct i2c_device_id elan_id[] = {
+ { DRIVER_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, elan_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id elan_of_match[] = {
+ { .compatible = "elan,ekth3000" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, elan_of_match);
+#endif
+
+static struct i2c_driver elan_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &elan_pm_ops,
+ .acpi_match_table = ACPI_PTR(elan_acpi_id),
+ .of_match_table = of_match_ptr(elan_of_match),
+ },
+ .probe = elan_probe,
+ .id_table = elan_id,
+};
+
+module_i2c_driver(elan_driver);
+
+MODULE_AUTHOR("Duson Lin <dusonlin@emc.com.tw>");
+MODULE_DESCRIPTION("Elan I2C/SMBus Touchpad driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ELAN_DRIVER_VERSION);
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
new file mode 100644
index 000000000000..97d4937fc244
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -0,0 +1,611 @@
+/*
+ * Elan I2C/SMBus Touchpad driver - I2C interface
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/unaligned.h>
+
+#include "elan_i2c.h"
+
+/* Elan i2c commands */
+#define ETP_I2C_RESET 0x0100
+#define ETP_I2C_WAKE_UP 0x0800
+#define ETP_I2C_SLEEP 0x0801
+#define ETP_I2C_DESC_CMD 0x0001
+#define ETP_I2C_REPORT_DESC_CMD 0x0002
+#define ETP_I2C_STAND_CMD 0x0005
+#define ETP_I2C_UNIQUEID_CMD 0x0101
+#define ETP_I2C_FW_VERSION_CMD 0x0102
+#define ETP_I2C_SM_VERSION_CMD 0x0103
+#define ETP_I2C_XY_TRACENUM_CMD 0x0105
+#define ETP_I2C_MAX_X_AXIS_CMD 0x0106
+#define ETP_I2C_MAX_Y_AXIS_CMD 0x0107
+#define ETP_I2C_RESOLUTION_CMD 0x0108
+#define ETP_I2C_IAP_VERSION_CMD 0x0110
+#define ETP_I2C_SET_CMD 0x0300
+#define ETP_I2C_POWER_CMD 0x0307
+#define ETP_I2C_FW_CHECKSUM_CMD 0x030F
+#define ETP_I2C_IAP_CTRL_CMD 0x0310
+#define ETP_I2C_IAP_CMD 0x0311
+#define ETP_I2C_IAP_RESET_CMD 0x0314
+#define ETP_I2C_IAP_CHECKSUM_CMD 0x0315
+#define ETP_I2C_CALIBRATE_CMD 0x0316
+#define ETP_I2C_MAX_BASELINE_CMD 0x0317
+#define ETP_I2C_MIN_BASELINE_CMD 0x0318
+
+#define ETP_I2C_REPORT_LEN 34
+#define ETP_I2C_DESC_LENGTH 30
+#define ETP_I2C_REPORT_DESC_LENGTH 158
+#define ETP_I2C_INF_LENGTH 2
+#define ETP_I2C_IAP_PASSWORD 0x1EA5
+#define ETP_I2C_IAP_RESET 0xF0F0
+#define ETP_I2C_MAIN_MODE_ON (1 << 9)
+#define ETP_I2C_IAP_REG_L 0x01
+#define ETP_I2C_IAP_REG_H 0x06
+
+static int elan_i2c_read_block(struct i2c_client *client,
+ u16 reg, u8 *val, u16 len)
+{
+ __le16 buf[] = {
+ cpu_to_le16(reg),
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags & I2C_M_TEN,
+ .len = sizeof(buf),
+ .buf = (u8 *)buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = (client->flags & I2C_M_TEN) | I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ return ret == ARRAY_SIZE(msgs) ? 0 : (ret < 0 ? ret : -EIO);
+}
+
+static int elan_i2c_read_cmd(struct i2c_client *client, u16 reg, u8 *val)
+{
+ int retval;
+
+ retval = elan_i2c_read_block(client, reg, val, ETP_I2C_INF_LENGTH);
+ if (retval < 0) {
+ dev_err(&client->dev, "reading cmd (0x%04x) fail.\n", reg);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_write_cmd(struct i2c_client *client, u16 reg, u16 cmd)
+{
+ __le16 buf[] = {
+ cpu_to_le16(reg),
+ cpu_to_le16(cmd),
+ };
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = client->flags & I2C_M_TEN,
+ .len = sizeof(buf),
+ .buf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ return ret == 1 ? 0 : (ret < 0 ? ret : -EIO);
+}
+
+static int elan_i2c_initialize(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int error;
+ u8 val[256];
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
+ if (error) {
+ dev_err(dev, "device reset failed: %d\n", error);
+ return error;
+ }
+
+ /* Wait for the device to reset */
+ msleep(100);
+
+ /* get reset acknowledgement 0000 */
+ error = i2c_master_recv(client, val, ETP_I2C_INF_LENGTH);
+ if (error < 0) {
+ dev_err(dev, "failed to read reset response: %d\n", error);
+ return error;
+ }
+
+ error = elan_i2c_read_block(client, ETP_I2C_DESC_CMD,
+ val, ETP_I2C_DESC_LENGTH);
+ if (error) {
+ dev_err(dev, "cannot get device descriptor: %d\n", error);
+ return error;
+ }
+
+ error = elan_i2c_read_block(client, ETP_I2C_REPORT_DESC_CMD,
+ val, ETP_I2C_REPORT_DESC_LENGTH);
+ if (error) {
+ dev_err(dev, "fetching report descriptor failed.: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_sleep_control(struct i2c_client *client, bool sleep)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD,
+ sleep ? ETP_I2C_SLEEP : ETP_I2C_WAKE_UP);
+}
+
+static int elan_i2c_power_control(struct i2c_client *client, bool enable)
+{
+ u8 val[2];
+ u16 reg;
+ int error;
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_POWER_CMD, val);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read current power state: %d\n",
+ error);
+ return error;
+ }
+
+ reg = le16_to_cpup((__le16 *)val);
+ if (enable)
+ reg &= ~ETP_DISABLE_POWER;
+ else
+ reg |= ETP_DISABLE_POWER;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_POWER_CMD, reg);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to write current power state: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_set_mode(struct i2c_client *client, u8 mode)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_SET_CMD, mode);
+}
+
+
+static int elan_i2c_calibrate(struct i2c_client *client)
+{
+ return elan_i2c_write_cmd(client, ETP_I2C_CALIBRATE_CMD, 1);
+}
+
+static int elan_i2c_calibrate_result(struct i2c_client *client, u8 *val)
+{
+ return elan_i2c_read_block(client, ETP_I2C_CALIBRATE_CMD, val, 1);
+}
+
+static int elan_i2c_get_baseline_data(struct i2c_client *client,
+ bool max_baseline, u8 *value)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ max_baseline ? ETP_I2C_MAX_BASELINE_CMD :
+ ETP_I2C_MIN_BASELINE_CMD,
+ val);
+ if (error)
+ return error;
+
+ *value = le16_to_cpup((__le16 *)val);
+
+ return 0;
+}
+
+static int elan_i2c_get_version(struct i2c_client *client,
+ bool iap, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ iap ? ETP_I2C_IAP_VERSION_CMD :
+ ETP_I2C_FW_VERSION_CMD,
+ val);
+ if (error) {
+ dev_err(&client->dev, "failed to get %s version: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *version = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_sm_version(struct i2c_client *client, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_SM_VERSION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get SM version: %d\n", error);
+ return error;
+ }
+
+ *version = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_UNIQUEID_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get product ID: %d\n", error);
+ return error;
+ }
+
+ *id = val[0];
+ return 0;
+}
+
+static int elan_i2c_get_checksum(struct i2c_client *client,
+ bool iap, u16 *csum)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client,
+ iap ? ETP_I2C_IAP_CHECKSUM_CMD :
+ ETP_I2C_FW_CHECKSUM_CMD,
+ val);
+ if (error) {
+ dev_err(&client->dev, "failed to get %s checksum: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *csum = le16_to_cpup((__le16 *)val);
+ return 0;
+}
+
+static int elan_i2c_get_max(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_MAX_X_AXIS_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get X dimension: %d\n", error);
+ return error;
+ }
+
+ *max_x = le16_to_cpup((__le16 *)val) & 0x0fff;
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_MAX_Y_AXIS_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get Y dimension: %d\n", error);
+ return error;
+ }
+
+ *max_y = le16_to_cpup((__le16 *)val) & 0x0fff;
+
+ return 0;
+}
+
+static int elan_i2c_get_resolution(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_RESOLUTION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get resolution: %d\n", error);
+ return error;
+ }
+
+ *hw_res_x = val[0];
+ *hw_res_y = val[1];
+
+ return 0;
+}
+
+static int elan_i2c_get_num_traces(struct i2c_client *client,
+ unsigned int *x_traces,
+ unsigned int *y_traces)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_XY_TRACENUM_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get trace info: %d\n", error);
+ return error;
+ }
+
+ *x_traces = val[0] - 1;
+ *y_traces = val[1] - 1;
+
+ return 0;
+}
+
+static int elan_i2c_iap_get_mode(struct i2c_client *client, enum tp_mode *mode)
+{
+ int error;
+ u16 constant;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read iap control register: %d\n",
+ error);
+ return error;
+ }
+
+ constant = le16_to_cpup((__le16 *)val);
+ dev_dbg(&client->dev, "iap control reg: 0x%04x.\n", constant);
+
+ *mode = (constant & ETP_I2C_MAIN_MODE_ON) ? MAIN_MODE : IAP_MODE;
+
+ return 0;
+}
+
+static int elan_i2c_iap_reset(struct i2c_client *client)
+{
+ int error;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_IAP_RESET_CMD,
+ ETP_I2C_IAP_RESET);
+ if (error) {
+ dev_err(&client->dev, "cannot reset IC: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_set_flash_key(struct i2c_client *client)
+{
+ int error;
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_IAP_CMD,
+ ETP_I2C_IAP_PASSWORD);
+ if (error) {
+ dev_err(&client->dev, "cannot set flash key: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_prepare_fw_update(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int error;
+ enum tp_mode mode;
+ u8 val[3];
+ u16 password;
+
+ /* Get FW in which mode (IAP_MODE/MAIN_MODE) */
+ error = elan_i2c_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == IAP_MODE) {
+ /* Reset IC */
+ error = elan_i2c_iap_reset(client);
+ if (error)
+ return error;
+
+ msleep(30);
+ }
+
+ /* Set flash key*/
+ error = elan_i2c_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Wait for F/W IAP initialization */
+ msleep(mode == MAIN_MODE ? 100 : 30);
+
+ /* Check if we are in IAP mode or not */
+ error = elan_i2c_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == MAIN_MODE) {
+ dev_err(dev, "wrong mode: %d\n", mode);
+ return -EIO;
+ }
+
+ /* Set flash key again */
+ error = elan_i2c_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Wait for F/W IAP initialization */
+ msleep(30);
+
+ /* read back to check we actually enabled successfully. */
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CMD, val);
+ if (error) {
+ dev_err(dev, "cannot read iap password: %d\n",
+ error);
+ return error;
+ }
+
+ password = le16_to_cpup((__le16 *)val);
+ if (password != ETP_I2C_IAP_PASSWORD) {
+ dev_err(dev, "wrong iap password: 0x%X\n", password);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_write_fw_block(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx)
+{
+ struct device *dev = &client->dev;
+ u8 page_store[ETP_FW_PAGE_SIZE + 4];
+ u8 val[3];
+ u16 result;
+ int ret, error;
+
+ page_store[0] = ETP_I2C_IAP_REG_L;
+ page_store[1] = ETP_I2C_IAP_REG_H;
+ memcpy(&page_store[2], page, ETP_FW_PAGE_SIZE);
+ /* recode checksum at last two bytes */
+ put_unaligned_le16(checksum, &page_store[ETP_FW_PAGE_SIZE + 2]);
+
+ ret = i2c_master_send(client, page_store, sizeof(page_store));
+ if (ret != sizeof(page_store)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(dev, "Failed to write page %d: %d\n", idx, error);
+ return error;
+ }
+
+ /* Wait for F/W to update one page ROM data. */
+ msleep(20);
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
+ if (error) {
+ dev_err(dev, "Failed to read IAP write result: %d\n", error);
+ return error;
+ }
+
+ result = le16_to_cpup((__le16 *)val);
+ if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
+ dev_err(dev, "IAP reports failed write: %04hx\n",
+ result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_finish_fw_update(struct i2c_client *client,
+ struct completion *completion)
+{
+ struct device *dev = &client->dev;
+ long ret;
+ int error;
+ int len;
+ u8 buffer[ETP_I2C_INF_LENGTH];
+
+ reinit_completion(completion);
+ enable_irq(client->irq);
+
+ error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
+ if (!error)
+ ret = wait_for_completion_interruptible_timeout(completion,
+ msecs_to_jiffies(300));
+ disable_irq(client->irq);
+
+ if (error) {
+ dev_err(dev, "device reset failed: %d\n", error);
+ return error;
+ } else if (ret == 0) {
+ dev_err(dev, "timeout waiting for device reset\n");
+ return -ETIMEDOUT;
+ } else if (ret < 0) {
+ error = ret;
+ dev_err(dev, "error waiting for device reset: %d\n", error);
+ return error;
+ }
+
+ len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH);
+ if (len != ETP_I2C_INF_LENGTH) {
+ error = len < 0 ? len : -EIO;
+ dev_err(dev, "failed to read INT signal: %d (%d)\n",
+ error, len);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_i2c_get_report(struct i2c_client *client, u8 *report)
+{
+ int len;
+
+ len = i2c_master_recv(client, report, ETP_I2C_REPORT_LEN);
+ if (len < 0) {
+ dev_err(&client->dev, "failed to read report data: %d\n", len);
+ return len;
+ }
+
+ if (len != ETP_I2C_REPORT_LEN) {
+ dev_err(&client->dev,
+ "wrong report length (%d vs %d expected)\n",
+ len, ETP_I2C_REPORT_LEN);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+const struct elan_transport_ops elan_i2c_ops = {
+ .initialize = elan_i2c_initialize,
+ .sleep_control = elan_i2c_sleep_control,
+ .power_control = elan_i2c_power_control,
+ .set_mode = elan_i2c_set_mode,
+
+ .calibrate = elan_i2c_calibrate,
+ .calibrate_result = elan_i2c_calibrate_result,
+
+ .get_baseline_data = elan_i2c_get_baseline_data,
+
+ .get_version = elan_i2c_get_version,
+ .get_sm_version = elan_i2c_get_sm_version,
+ .get_product_id = elan_i2c_get_product_id,
+ .get_checksum = elan_i2c_get_checksum,
+
+ .get_max = elan_i2c_get_max,
+ .get_resolution = elan_i2c_get_resolution,
+ .get_num_traces = elan_i2c_get_num_traces,
+
+ .iap_get_mode = elan_i2c_iap_get_mode,
+ .iap_reset = elan_i2c_iap_reset,
+
+ .prepare_fw_update = elan_i2c_prepare_fw_update,
+ .write_fw_block = elan_i2c_write_fw_block,
+ .finish_fw_update = elan_i2c_finish_fw_update,
+
+ .get_report = elan_i2c_get_report,
+};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
new file mode 100644
index 000000000000..359bf8583d54
--- /dev/null
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -0,0 +1,514 @@
+/*
+ * Elan I2C/SMBus Touchpad driver - SMBus interface
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Version: 1.5.5
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include "elan_i2c.h"
+
+/* Elan SMbus commands */
+#define ETP_SMBUS_IAP_CMD 0x00
+#define ETP_SMBUS_ENABLE_TP 0x20
+#define ETP_SMBUS_SLEEP_CMD 0x21
+#define ETP_SMBUS_IAP_PASSWORD_WRITE 0x29
+#define ETP_SMBUS_IAP_PASSWORD_READ 0x80
+#define ETP_SMBUS_WRITE_FW_BLOCK 0x2A
+#define ETP_SMBUS_IAP_RESET_CMD 0x2B
+#define ETP_SMBUS_RANGE_CMD 0xA0
+#define ETP_SMBUS_FW_VERSION_CMD 0xA1
+#define ETP_SMBUS_XY_TRACENUM_CMD 0xA2
+#define ETP_SMBUS_SM_VERSION_CMD 0xA3
+#define ETP_SMBUS_UNIQUEID_CMD 0xA3
+#define ETP_SMBUS_RESOLUTION_CMD 0xA4
+#define ETP_SMBUS_HELLOPACKET_CMD 0xA7
+#define ETP_SMBUS_PACKET_QUERY 0xA8
+#define ETP_SMBUS_IAP_VERSION_CMD 0xAC
+#define ETP_SMBUS_IAP_CTRL_CMD 0xAD
+#define ETP_SMBUS_IAP_CHECKSUM_CMD 0xAE
+#define ETP_SMBUS_FW_CHECKSUM_CMD 0xAF
+#define ETP_SMBUS_MAX_BASELINE_CMD 0xC3
+#define ETP_SMBUS_MIN_BASELINE_CMD 0xC4
+#define ETP_SMBUS_CALIBRATE_QUERY 0xC5
+
+#define ETP_SMBUS_REPORT_LEN 32
+#define ETP_SMBUS_REPORT_OFFSET 2
+#define ETP_SMBUS_HELLOPACKET_LEN 5
+#define ETP_SMBUS_IAP_PASSWORD 0x1234
+#define ETP_SMBUS_IAP_MODE_ON (1 << 6)
+
+static int elan_smbus_initialize(struct i2c_client *client)
+{
+ u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
+ u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+ int len, error;
+
+ /* Get hello packet */
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_HELLOPACKET_CMD, values);
+ if (len != ETP_SMBUS_HELLOPACKET_LEN) {
+ dev_err(&client->dev, "hello packet length fail: %d\n", len);
+ error = len < 0 ? len : -EIO;
+ return error;
+ }
+
+ /* compare hello packet */
+ if (memcmp(values, check, ETP_SMBUS_HELLOPACKET_LEN)) {
+ dev_err(&client->dev, "hello packet fail [%*px]\n",
+ ETP_SMBUS_HELLOPACKET_LEN, values);
+ return -ENXIO;
+ }
+
+ /* enable tp */
+ error = i2c_smbus_write_byte(client, ETP_SMBUS_ENABLE_TP);
+ if (error) {
+ dev_err(&client->dev, "failed to enable touchpad: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_set_mode(struct i2c_client *client, u8 mode)
+{
+ u8 cmd[4] = { 0x00, 0x07, 0x00, mode };
+
+ return i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+}
+
+static int elan_smbus_sleep_control(struct i2c_client *client, bool sleep)
+{
+ if (sleep)
+ return i2c_smbus_write_byte(client, ETP_SMBUS_SLEEP_CMD);
+ else
+ return 0; /* XXX should we send ETP_SMBUS_ENABLE_TP here? */
+}
+
+static int elan_smbus_power_control(struct i2c_client *client, bool enable)
+{
+ return 0; /* A no-op */
+}
+
+static int elan_smbus_calibrate(struct i2c_client *client)
+{
+ u8 cmd[4] = { 0x00, 0x08, 0x00, 0x01 };
+
+ return i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+}
+
+static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
+{
+ int error;
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_CALIBRATE_QUERY, val);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int elan_smbus_get_baseline_data(struct i2c_client *client,
+ bool max_baseline, u8 *value)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ max_baseline ?
+ ETP_SMBUS_MAX_BASELINE_CMD :
+ ETP_SMBUS_MIN_BASELINE_CMD,
+ val);
+ if (error < 0)
+ return error;
+
+ *value = be16_to_cpup((__be16 *)val);
+
+ return 0;
+}
+
+static int elan_smbus_get_version(struct i2c_client *client,
+ bool iap, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ iap ? ETP_SMBUS_IAP_VERSION_CMD :
+ ETP_SMBUS_FW_VERSION_CMD,
+ val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get %s version: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *version = val[2];
+ return 0;
+}
+
+static int elan_smbus_get_sm_version(struct i2c_client *client, u8 *version)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_SM_VERSION_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get SM version: %d\n", error);
+ return error;
+ }
+
+ *version = val[0]; /* XXX Why 0 and not 2 as in IAP/FW versions? */
+ return 0;
+}
+
+static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_UNIQUEID_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get product ID: %d\n", error);
+ return error;
+ }
+
+ *id = val[1];
+ return 0;
+}
+
+static int elan_smbus_get_checksum(struct i2c_client *client,
+ bool iap, u16 *csum)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
+ ETP_SMBUS_IAP_CHECKSUM_CMD,
+ val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to get %s checksum: %d\n",
+ iap ? "IAP" : "FW", error);
+ return error;
+ }
+
+ *csum = be16_to_cpup((__be16 *)val);
+ return 0;
+}
+
+static int elan_smbus_get_max(struct i2c_client *client,
+ unsigned int *max_x, unsigned int *max_y)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get dimensions: %d\n", error);
+ return error;
+ }
+
+ *max_x = (0x0f & val[0]) << 8 | val[1];
+ *max_y = (0xf0 & val[0]) << 4 | val[2];
+
+ return 0;
+}
+
+static int elan_smbus_get_resolution(struct i2c_client *client,
+ u8 *hw_res_x, u8 *hw_res_y)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_RESOLUTION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get resolution: %d\n", error);
+ return error;
+ }
+
+ *hw_res_x = val[1] & 0x0F;
+ *hw_res_y = (val[1] & 0xF0) >> 4;
+
+ return 0;
+}
+
+static int elan_smbus_get_num_traces(struct i2c_client *client,
+ unsigned int *x_traces,
+ unsigned int *y_traces)
+{
+ int error;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_XY_TRACENUM_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get trace info: %d\n", error);
+ return error;
+ }
+
+ *x_traces = val[1] - 1;
+ *y_traces = val[2] - 1;
+
+ return 0;
+}
+
+static int elan_smbus_iap_get_mode(struct i2c_client *client,
+ enum tp_mode *mode)
+{
+ int error;
+ u16 constant;
+ u8 val[3];
+
+ error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to read iap ctrol register: %d\n",
+ error);
+ return error;
+ }
+
+ constant = be16_to_cpup((__be16 *)val);
+ dev_dbg(&client->dev, "iap control reg: 0x%04x.\n", constant);
+
+ *mode = (constant & ETP_SMBUS_IAP_MODE_ON) ? IAP_MODE : MAIN_MODE;
+
+ return 0;
+}
+
+static int elan_smbus_iap_reset(struct i2c_client *client)
+{
+ int error;
+
+ error = i2c_smbus_write_byte(client, ETP_SMBUS_IAP_RESET_CMD);
+ if (error) {
+ dev_err(&client->dev, "cannot reset IC: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_set_flash_key(struct i2c_client *client)
+{
+ int error;
+ u8 cmd[4] = { 0x00, 0x0B, 0x00, 0x5A };
+
+ error = i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+ if (error) {
+ dev_err(&client->dev, "cannot set flash key: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_prepare_fw_update(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int len;
+ int error;
+ enum tp_mode mode;
+ u8 val[3];
+ u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
+ u16 password;
+
+ /* Get FW in which mode (IAP_MODE/MAIN_MODE) */
+ error = elan_smbus_iap_get_mode(client, &mode);
+ if (error)
+ return error;
+
+ if (mode == MAIN_MODE) {
+
+ /* set flash key */
+ error = elan_smbus_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* write iap password */
+ if (i2c_smbus_write_byte(client,
+ ETP_SMBUS_IAP_PASSWORD_WRITE) < 0) {
+ dev_err(dev, "cannot write iap password\n");
+ return -EIO;
+ }
+
+ error = i2c_smbus_write_block_data(client, ETP_SMBUS_IAP_CMD,
+ sizeof(cmd), cmd);
+ if (error) {
+ dev_err(dev, "failed to write iap password: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Read back password to make sure we enabled flash
+ * successfully.
+ */
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_IAP_PASSWORD_READ,
+ val);
+ if (len < sizeof(u16)) {
+ error = len < 0 ? len : -EIO;
+ dev_err(dev, "failed to read iap password: %d\n",
+ error);
+ return error;
+ }
+
+ password = be16_to_cpup((__be16 *)val);
+ if (password != ETP_SMBUS_IAP_PASSWORD) {
+ dev_err(dev, "wrong iap password = 0x%X\n", password);
+ return -EIO;
+ }
+
+ /* Wait 30ms for MAIN_MODE change to IAP_MODE */
+ msleep(30);
+ }
+
+ error = elan_smbus_set_flash_key(client);
+ if (error)
+ return error;
+
+ /* Reset IC */
+ error = elan_smbus_iap_reset(client);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+
+static int elan_smbus_write_fw_block(struct i2c_client *client,
+ const u8 *page, u16 checksum, int idx)
+{
+ struct device *dev = &client->dev;
+ int error;
+ u16 result;
+ u8 val[3];
+
+ /*
+ * Due to the limitation of smbus protocol limiting
+ * transfer to 32 bytes at a time, we must split block
+ * in 2 transfers.
+ */
+ error = i2c_smbus_write_block_data(client,
+ ETP_SMBUS_WRITE_FW_BLOCK,
+ ETP_FW_PAGE_SIZE / 2,
+ page);
+ if (error) {
+ dev_err(dev, "Failed to write page %d (part %d): %d\n",
+ idx, 1, error);
+ return error;
+ }
+
+ error = i2c_smbus_write_block_data(client,
+ ETP_SMBUS_WRITE_FW_BLOCK,
+ ETP_FW_PAGE_SIZE / 2,
+ page + ETP_FW_PAGE_SIZE / 2);
+ if (error) {
+ dev_err(dev, "Failed to write page %d (part %d): %d\n",
+ idx, 2, error);
+ return error;
+ }
+
+
+ /* Wait for F/W to update one page ROM data. */
+ usleep_range(8000, 10000);
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_IAP_CTRL_CMD, val);
+ if (error < 0) {
+ dev_err(dev, "Failed to read IAP write result: %d\n",
+ error);
+ return error;
+ }
+
+ result = be16_to_cpup((__be16 *)val);
+ if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
+ dev_err(dev, "IAP reports failed write: %04hx\n",
+ result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
+{
+ int len;
+
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_PACKET_QUERY,
+ &report[ETP_SMBUS_REPORT_OFFSET]);
+ if (len < 0) {
+ dev_err(&client->dev, "failed to read report data: %d\n", len);
+ return len;
+ }
+
+ if (len != ETP_SMBUS_REPORT_LEN) {
+ dev_err(&client->dev,
+ "wrong report length (%d vs %d expected)\n",
+ len, ETP_SMBUS_REPORT_LEN);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int elan_smbus_finish_fw_update(struct i2c_client *client,
+ struct completion *fw_completion)
+{
+ /* No special handling unlike I2C transport */
+ return 0;
+}
+
+const struct elan_transport_ops elan_smbus_ops = {
+ .initialize = elan_smbus_initialize,
+ .sleep_control = elan_smbus_sleep_control,
+ .power_control = elan_smbus_power_control,
+ .set_mode = elan_smbus_set_mode,
+
+ .calibrate = elan_smbus_calibrate,
+ .calibrate_result = elan_smbus_calibrate_result,
+
+ .get_baseline_data = elan_smbus_get_baseline_data,
+
+ .get_version = elan_smbus_get_version,
+ .get_sm_version = elan_smbus_get_sm_version,
+ .get_product_id = elan_smbus_get_product_id,
+ .get_checksum = elan_smbus_get_checksum,
+
+ .get_max = elan_smbus_get_max,
+ .get_resolution = elan_smbus_get_resolution,
+ .get_num_traces = elan_smbus_get_num_traces,
+
+ .iap_get_mode = elan_smbus_iap_get_mode,
+ .iap_reset = elan_smbus_iap_reset,
+
+ .prepare_fw_update = elan_smbus_prepare_fw_update,
+ .write_fw_block = elan_smbus_write_fw_block,
+ .finish_fw_update = elan_smbus_finish_fw_update,
+
+ .get_report = elan_smbus_get_report,
+};
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 8c7d94200bdb..ced07391304b 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -171,7 +171,6 @@ static struct platform_driver gpio_mouse_device_driver = {
.remove = gpio_mouse_remove,
.driver = {
.name = "gpio_mouse",
- .owner = THIS_MODULE,
}
};
module_platform_driver(gpio_mouse_device_driver);
diff --git a/drivers/input/mouse/lifebook.h b/drivers/input/mouse/lifebook.h
index 4c4326c6f504..0baf02a70a99 100644
--- a/drivers/input/mouse/lifebook.h
+++ b/drivers/input/mouse/lifebook.h
@@ -16,14 +16,14 @@ void lifebook_module_init(void);
int lifebook_detect(struct psmouse *psmouse, bool set_properties);
int lifebook_init(struct psmouse *psmouse);
#else
-inline void lifebook_module_init(void)
+static inline void lifebook_module_init(void)
{
}
-inline int lifebook_detect(struct psmouse *psmouse, bool set_properties)
+static inline int lifebook_detect(struct psmouse *psmouse, bool set_properties)
{
return -ENOSYS;
}
-inline int lifebook_init(struct psmouse *psmouse)
+static inline int lifebook_init(struct psmouse *psmouse)
{
return -ENOSYS;
}
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index 1ccc88af1f0b..d6e8f58a1de3 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -318,8 +318,7 @@ static int navpoint_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int navpoint_suspend(struct device *dev)
+static int __maybe_unused navpoint_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct navpoint *navpoint = platform_get_drvdata(pdev);
@@ -333,7 +332,7 @@ static int navpoint_suspend(struct device *dev)
return 0;
}
-static int navpoint_resume(struct device *dev)
+static int __maybe_unused navpoint_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct navpoint *navpoint = platform_get_drvdata(pdev);
@@ -346,7 +345,6 @@ static int navpoint_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(navpoint_pm_ops, navpoint_suspend, navpoint_resume);
@@ -355,7 +353,6 @@ static struct platform_driver navpoint_driver = {
.remove = navpoint_remove,
.driver = {
.name = "navpoint",
- .owner = THIS_MODULE,
.pm = &navpoint_pm_ops,
},
};
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index ad822608f6ee..878f18498f3b 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -614,8 +614,7 @@ static int synaptics_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int synaptics_i2c_suspend(struct device *dev)
+static int __maybe_unused synaptics_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct synaptics_i2c *touch = i2c_get_clientdata(client);
@@ -628,7 +627,7 @@ static int synaptics_i2c_suspend(struct device *dev)
return 0;
}
-static int synaptics_i2c_resume(struct device *dev)
+static int __maybe_unused synaptics_i2c_resume(struct device *dev)
{
int ret;
struct i2c_client *client = to_i2c_client(dev);
@@ -643,7 +642,6 @@ static int synaptics_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(synaptics_i2c_pm, synaptics_i2c_suspend,
synaptics_i2c_resume);
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index 58781c8a8aec..131d7826dc6b 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -24,9 +24,7 @@
struct ps2if {
struct serio *io;
- struct resource *iomem_res;
void __iomem *base;
- unsigned irq;
};
/*
@@ -83,16 +81,34 @@ static void altera_ps2_close(struct serio *io)
static int altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
+ struct resource *res;
struct serio *serio;
int error, irq;
- ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
- serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
- if (!ps2if || !serio) {
- error = -ENOMEM;
- goto err_free_mem;
+ ps2if = devm_kzalloc(&pdev->dev, sizeof(struct ps2if), GFP_KERNEL);
+ if (!ps2if)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ps2if->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ps2if->base))
+ return PTR_ERR(ps2if->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ error = devm_request_irq(&pdev->dev, irq, altera_ps2_rxint, 0,
+ pdev->name, ps2if);
+ if (error) {
+ dev_err(&pdev->dev, "could not request IRQ %d\n", irq);
+ return error;
}
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
serio->id.type = SERIO_8042;
serio->write = altera_ps2_write;
serio->open = altera_ps2_open;
@@ -103,56 +119,12 @@ static int altera_ps2_probe(struct platform_device *pdev)
serio->dev.parent = &pdev->dev;
ps2if->io = serio;
- ps2if->iomem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (ps2if->iomem_res == NULL) {
- error = -ENOENT;
- goto err_free_mem;
- }
-
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- error = -ENXIO;
- goto err_free_mem;
- }
- ps2if->irq = irq;
-
- if (!request_mem_region(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res), pdev->name)) {
- error = -EBUSY;
- goto err_free_mem;
- }
-
- ps2if->base = ioremap(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res));
- if (!ps2if->base) {
- error = -ENOMEM;
- goto err_free_res;
- }
-
- error = request_irq(ps2if->irq, altera_ps2_rxint, 0, pdev->name, ps2if);
- if (error) {
- dev_err(&pdev->dev, "could not allocate IRQ %d: %d\n",
- ps2if->irq, error);
- goto err_unmap;
- }
-
- dev_info(&pdev->dev, "base %p, irq %d\n", ps2if->base, ps2if->irq);
+ dev_info(&pdev->dev, "base %p, irq %d\n", ps2if->base, irq);
serio_register_port(ps2if->io);
platform_set_drvdata(pdev, ps2if);
return 0;
-
- err_unmap:
- iounmap(ps2if->base);
- err_free_res:
- release_mem_region(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res));
- err_free_mem:
- kfree(ps2if);
- kfree(serio);
- return error;
}
/*
@@ -163,11 +135,6 @@ static int altera_ps2_remove(struct platform_device *pdev)
struct ps2if *ps2if = platform_get_drvdata(pdev);
serio_unregister_port(ps2if->io);
- free_irq(ps2if->irq, ps2if);
- iounmap(ps2if->base);
- release_mem_region(ps2if->iomem_res->start,
- resource_size(ps2if->iomem_res));
- kfree(ps2if);
return 0;
}
@@ -189,7 +156,6 @@ static struct platform_driver altera_ps2_driver = {
.remove = altera_ps2_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(altera_ps2_match),
},
};
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index 98be824544a5..45d4e08ca4f8 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -214,7 +214,6 @@ MODULE_DEVICE_TABLE(of, apbps2_of_match);
static struct platform_driver apbps2_of_driver = {
.driver = {
.name = "grlib-apbps2",
- .owner = THIS_MODULE,
.of_match_table = apbps2_of_match,
},
.probe = apbps2_of_probe,
diff --git a/drivers/input/serio/arc_ps2.c b/drivers/input/serio/arc_ps2.c
index 8024a6d7fccb..99e57a418753 100644
--- a/drivers/input/serio/arc_ps2.c
+++ b/drivers/input/serio/arc_ps2.c
@@ -266,7 +266,6 @@ MODULE_DEVICE_TABLE(of, arc_ps2_match);
static struct platform_driver arc_ps2_driver = {
.driver = {
.name = "arc_ps2",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(arc_ps2_match),
},
.probe = arc_ps2_probe,
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
index 3290b287ac4b..2e4ff5bac754 100644
--- a/drivers/input/serio/at32psif.c
+++ b/drivers/input/serio/at32psif.c
@@ -352,7 +352,6 @@ static struct platform_driver psif_driver = {
.remove = __exit_p(psif_remove),
.driver = {
.name = "atmel_psif",
- .owner = THIS_MODULE,
.pm = &psif_pm_ops,
},
};
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index cfe549d4eaa5..9c54c43c9749 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -209,7 +209,6 @@ static int ct82c710_remove(struct platform_device *dev)
static struct platform_driver ct82c710_driver = {
.driver = {
.name = "ct82c710",
- .owner = THIS_MODULE,
},
.probe = ct82c710_probe,
.remove = ct82c710_remove,
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 93cb7912703c..afcd1c1a05b2 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -99,7 +99,6 @@ MODULE_DEVICE_TABLE(of, sparc_i8042_match);
static struct platform_driver sparc_i8042_driver = {
.driver = {
.name = "i8042",
- .owner = THIS_MODULE,
.of_match_table = sparc_i8042_match,
},
.probe = sparc_i8042_probe,
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index faeeb1372462..c66d1b53843e 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -579,6 +579,16 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
},
},
{
+ /*
+ * Intel NUC D54250WYK - does not have i8042 controller but
+ * declares PS/2 devices in DSDT.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ },
+ },
+ {
/* MSI Wind U-100 */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "U-100"),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index f5a98af3b325..924e4bf357fb 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1463,7 +1463,6 @@ static int i8042_remove(struct platform_device *dev)
static struct platform_driver i8042_driver = {
.driver = {
.name = "i8042",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &i8042_pm_ops,
#endif
diff --git a/drivers/input/serio/maceps2.c b/drivers/input/serio/maceps2.c
index bc85e1cc66d8..e365c5f4cbc9 100644
--- a/drivers/input/serio/maceps2.c
+++ b/drivers/input/serio/maceps2.c
@@ -162,7 +162,6 @@ static int maceps2_remove(struct platform_device *dev)
static struct platform_driver maceps2_driver = {
.driver = {
.name = "maceps2",
- .owner = THIS_MODULE,
},
.probe = maceps2_probe,
.remove = maceps2_remove,
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index d906f3ebc8c8..8e9a4209fcad 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -273,7 +273,6 @@ static struct platform_driver olpc_apsp_driver = {
.remove = olpc_apsp_remove,
.driver = {
.name = "olpc-apsp",
- .owner = THIS_MODULE,
.of_match_table = olpc_apsp_dt_ids,
},
};
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index 594256c38554..5a9d521510bf 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -186,7 +186,6 @@ static int q40kbd_remove(struct platform_device *pdev)
static struct platform_driver q40kbd_driver = {
.driver = {
.name = "q40kbd",
- .owner = THIS_MODULE,
},
.remove = q40kbd_remove,
};
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index e462e7791bb8..8cf964736902 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -168,7 +168,6 @@ static struct platform_driver rpckbd_driver = {
.remove = rpckbd_remove,
.driver = {
.name = "kart",
- .owner = THIS_MODULE,
},
};
module_platform_driver(rpckbd_driver);
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index d399b8b0f000..a05a5179da32 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
*/
static void serio_init_port(struct serio *serio)
{
- static atomic_t serio_no = ATOMIC_INIT(0);
+ static atomic_t serio_no = ATOMIC_INIT(-1);
__module_get(THIS_MODULE);
@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
mutex_init(&serio->drv_mutex);
device_initialize(&serio->dev);
dev_set_name(&serio->dev, "serio%lu",
- (unsigned long)atomic_inc_return(&serio_no) - 1);
+ (unsigned long)atomic_inc_return(&serio_no));
serio->dev.bus = &serio_bus;
serio->dev.release = serio_release_port;
serio->dev.groups = serio_device_attr_groups;
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index c9a02fe57576..71ef5d65a0c6 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
{
- static atomic_t serio_raw_no = ATOMIC_INIT(0);
+ static atomic_t serio_raw_no = ATOMIC_INIT(-1);
struct serio_raw *serio_raw;
int err;
@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
}
snprintf(serio_raw->name, sizeof(serio_raw->name),
- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
+ "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
kref_init(&serio_raw->kref);
INIT_LIST_HEAD(&serio_raw->client_list);
init_waitqueue_head(&serio_raw->wait);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index e6cf52ebad87..5223cbf94262 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -363,7 +363,6 @@ MODULE_DEVICE_TABLE(of, xps2_of_match);
static struct platform_driver xps2_of_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = xps2_of_match,
},
.probe = xps2_of_probe,
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 0d4a9fad4a78..251ff2aa0633 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -292,7 +292,6 @@ static int pm860x_touch_probe(struct platform_device *pdev)
static struct platform_driver pm860x_touch_driver = {
.driver = {
.name = "88pm860x-touch",
- .owner = THIS_MODULE,
},
.probe = pm860x_touch_probe,
};
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index e1d8003d01f8..58917525126e 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -295,6 +295,19 @@ config TOUCHSCREEN_FUJITSU
To compile this driver as a module, choose M here: the
module will be called fujitsu-ts.
+config TOUCHSCREEN_GOODIX
+ tristate "Goodix I2C touchscreen"
+ depends on I2C && ACPI
+ help
+ Say Y here if you have the Goodix touchscreen (such as one
+ installed in Onda v975w tablets) connected to your
+ system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called goodix.
+
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
@@ -334,6 +347,18 @@ config TOUCHSCREEN_GUNZE
To compile this driver as a module, choose M here: the
module will be called gunze.
+config TOUCHSCREEN_ELAN
+ tristate "Elan eKTH I2C touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have an Elan eKTH I2C touchscreen
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called elants_i2c.
+
config TOUCHSCREEN_ELO
tristate "Elo serial touchscreens"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 090e61cc9171..0242fea2102a 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -31,9 +31,11 @@ obj-$(CONFIG_TOUCHSCREEN_EDT_FT5X06) += edt-ft5x06.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
+obj-$(CONFIG_TOUCHSCREEN_ELAN) += elants_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
+obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 523865daa1d3..da4e5bb5e045 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -820,8 +820,7 @@ static int ad7877_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int ad7877_suspend(struct device *dev)
+static int __maybe_unused ad7877_suspend(struct device *dev)
{
struct ad7877 *ts = dev_get_drvdata(dev);
@@ -830,7 +829,7 @@ static int ad7877_suspend(struct device *dev)
return 0;
}
-static int ad7877_resume(struct device *dev)
+static int __maybe_unused ad7877_resume(struct device *dev)
{
struct ad7877 *ts = dev_get_drvdata(dev);
@@ -838,7 +837,6 @@ static int ad7877_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 1eb9d3c20886..fec66ad80513 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -284,8 +284,7 @@ static void ad7879_close(struct input_dev* input)
__ad7879_disable(ts);
}
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_suspend(struct device *dev)
+static int __maybe_unused ad7879_suspend(struct device *dev)
{
struct ad7879 *ts = dev_get_drvdata(dev);
@@ -301,7 +300,7 @@ static int ad7879_suspend(struct device *dev)
return 0;
}
-static int ad7879_resume(struct device *dev)
+static int __maybe_unused ad7879_resume(struct device *dev)
{
struct ad7879 *ts = dev_get_drvdata(dev);
@@ -316,7 +315,6 @@ static int ad7879_resume(struct device *dev)
return 0;
}
-#endif
SIMPLE_DEV_PM_OPS(ad7879_pm_ops, ad7879_suspend, ad7879_resume);
EXPORT_SYMBOL(ad7879_pm_ops);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index e57ba52bf484..e4eb8a6c658f 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -883,8 +883,7 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
-static int ads7846_suspend(struct device *dev)
+static int __maybe_unused ads7846_suspend(struct device *dev)
{
struct ads7846 *ts = dev_get_drvdata(dev);
@@ -906,7 +905,7 @@ static int ads7846_suspend(struct device *dev)
return 0;
}
-static int ads7846_resume(struct device *dev)
+static int __maybe_unused ads7846_resume(struct device *dev)
{
struct ads7846 *ts = dev_get_drvdata(dev);
@@ -927,7 +926,6 @@ static int ads7846_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index 279c0e42b8a7..7ec0421c0dd8 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -425,7 +425,6 @@ static struct platform_driver atmel_wm97xx_driver = {
.remove = __exit_p(atmel_wm97xx_remove),
.driver = {
.name = "wm97xx-touch",
- .owner = THIS_MODULE,
.pm = &atmel_wm97xx_pm_ops,
},
};
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index aaacf8bfa61f..bb070206223c 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2244,8 +2244,7 @@ static int mxt_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mxt_suspend(struct device *dev)
+static int __maybe_unused mxt_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mxt_data *data = i2c_get_clientdata(client);
@@ -2261,7 +2260,7 @@ static int mxt_suspend(struct device *dev)
return 0;
}
-static int mxt_resume(struct device *dev)
+static int __maybe_unused mxt_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mxt_data *data = i2c_get_clientdata(client);
@@ -2276,7 +2275,6 @@ static int mxt_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index 7f3c94787787..40e02dd5b2f9 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -417,8 +417,7 @@ static void auo_pixcir_input_close(struct input_dev *dev)
return;
}
-#ifdef CONFIG_PM_SLEEP
-static int auo_pixcir_suspend(struct device *dev)
+static int __maybe_unused auo_pixcir_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
@@ -450,7 +449,7 @@ unlock:
return ret;
}
-static int auo_pixcir_resume(struct device *dev)
+static int __maybe_unused auo_pixcir_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
@@ -479,7 +478,6 @@ unlock:
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops,
auo_pixcir_suspend, auo_pixcir_resume);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 5bf1aeeea825..f2119ee0e21b 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -291,8 +291,7 @@ err_free_mem:
return err;
}
-#ifdef CONFIG_PM_SLEEP
-static int cy8ctmg110_suspend(struct device *dev)
+static int __maybe_unused cy8ctmg110_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
@@ -306,7 +305,7 @@ static int cy8ctmg110_suspend(struct device *dev)
return 0;
}
-static int cy8ctmg110_resume(struct device *dev)
+static int __maybe_unused cy8ctmg110_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
@@ -319,7 +318,6 @@ static int cy8ctmg110_resume(struct device *dev)
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index eee656f77a2e..5b74e8b84e79 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -472,8 +472,7 @@ static int cyttsp_disable(struct cyttsp *ts)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int cyttsp_suspend(struct device *dev)
+static int __maybe_unused cyttsp_suspend(struct device *dev)
{
struct cyttsp *ts = dev_get_drvdata(dev);
int retval = 0;
@@ -491,7 +490,7 @@ static int cyttsp_suspend(struct device *dev)
return retval;
}
-static int cyttsp_resume(struct device *dev)
+static int __maybe_unused cyttsp_resume(struct device *dev)
{
struct cyttsp *ts = dev_get_drvdata(dev);
@@ -507,8 +506,6 @@ static int cyttsp_resume(struct device *dev)
return 0;
}
-#endif
-
SIMPLE_DEV_PM_OPS(cyttsp_pm_ops, cyttsp_suspend, cyttsp_resume);
EXPORT_SYMBOL_GPL(cyttsp_pm_ops);
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index cf6f4b31db4d..8264822dc4b9 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -357,7 +357,6 @@ static int da9034_touch_probe(struct platform_device *pdev)
static struct platform_driver da9034_touch_driver = {
.driver = {
.name = "da9034-touch",
- .owner = THIS_MODULE,
},
.probe = da9034_touch_probe,
};
diff --git a/drivers/input/touchscreen/da9052_tsi.c b/drivers/input/touchscreen/da9052_tsi.c
index ab64d58c3ac0..5a013bb7bcad 100644
--- a/drivers/input/touchscreen/da9052_tsi.c
+++ b/drivers/input/touchscreen/da9052_tsi.c
@@ -337,7 +337,6 @@ static struct platform_driver da9052_tsi_driver = {
.remove = da9052_ts_remove,
.driver = {
.name = "da9052-tsi",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index ee3434f1e949..3793fcc7e5db 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1092,8 +1092,7 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int edt_ft5x06_ts_suspend(struct device *dev)
+static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1103,7 +1102,7 @@ static int edt_ft5x06_ts_suspend(struct device *dev)
return 0;
}
-static int edt_ft5x06_ts_resume(struct device *dev)
+static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1112,7 +1111,6 @@ static int edt_ft5x06_ts_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index b1884ddd7a84..09be6ced7151 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -264,8 +264,7 @@ static int eeti_ts_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int eeti_ts_suspend(struct device *dev)
+static int __maybe_unused eeti_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
@@ -284,7 +283,7 @@ static int eeti_ts_suspend(struct device *dev)
return 0;
}
-static int eeti_ts_resume(struct device *dev)
+static int __maybe_unused eeti_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
@@ -302,7 +301,6 @@ static int eeti_ts_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume);
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index c8057847d71d..4c56299284ef 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -239,8 +239,7 @@ static const struct i2c_device_id egalax_ts_id[] = {
};
MODULE_DEVICE_TABLE(i2c, egalax_ts_id);
-#ifdef CONFIG_PM_SLEEP
-static int egalax_ts_suspend(struct device *dev)
+static int __maybe_unused egalax_ts_suspend(struct device *dev)
{
static const u8 suspend_cmd[MAX_I2C_DATA_LEN] = {
0x3, 0x6, 0xa, 0x3, 0x36, 0x3f, 0x2, 0, 0, 0
@@ -252,13 +251,12 @@ static int egalax_ts_suspend(struct device *dev)
return ret > 0 ? 0 : ret;
}
-static int egalax_ts_resume(struct device *dev)
+static int __maybe_unused egalax_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
return egalax_wake_up_device(client);
}
-#endif
static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
new file mode 100644
index 000000000000..a510f7ef9b66
--- /dev/null
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -0,0 +1,1271 @@
+/*
+ * Elan Microelectronics touch panels with I2C interface
+ *
+ * Copyright (C) 2014 Elan Microelectronics Corporation.
+ * Scott Liu <scott.liu@emc.com.tw>
+ *
+ * This code is partly based on hid-multitouch.c:
+ *
+ * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr>
+ * Copyright (c) 2010-2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2010-2012 Ecole Nationale de l'Aviation Civile, France
+ *
+ *
+ * This code is partly based on i2c-hid.c:
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ * Copyright (c) 2012 Red Hat, Inc
+ */
+
+/*
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/async.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/buffer_head.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/input/mt.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <asm/unaligned.h>
+
+/* Device, Driver information */
+#define DEVICE_NAME "elants_i2c"
+#define DRV_VERSION "1.0.9"
+
+/* Convert from rows or columns into resolution */
+#define ELAN_TS_RESOLUTION(n, m) (((n) - 1) * (m))
+
+/* FW header data */
+#define HEADER_SIZE 4
+#define FW_HDR_TYPE 0
+#define FW_HDR_COUNT 1
+#define FW_HDR_LENGTH 2
+
+/* Buffer mode Queue Header information */
+#define QUEUE_HEADER_SINGLE 0x62
+#define QUEUE_HEADER_NORMAL 0X63
+#define QUEUE_HEADER_WAIT 0x64
+
+/* Command header definition */
+#define CMD_HEADER_WRITE 0x54
+#define CMD_HEADER_READ 0x53
+#define CMD_HEADER_6B_READ 0x5B
+#define CMD_HEADER_RESP 0x52
+#define CMD_HEADER_6B_RESP 0x9B
+#define CMD_HEADER_HELLO 0x55
+#define CMD_HEADER_REK 0x66
+
+/* FW position data */
+#define PACKET_SIZE 55
+#define MAX_CONTACT_NUM 10
+#define FW_POS_HEADER 0
+#define FW_POS_STATE 1
+#define FW_POS_TOTAL 2
+#define FW_POS_XY 3
+#define FW_POS_CHECKSUM 34
+#define FW_POS_WIDTH 35
+#define FW_POS_PRESSURE 45
+
+#define HEADER_REPORT_10_FINGER 0x62
+
+/* Header (4 bytes) plus 3 fill 10-finger packets */
+#define MAX_PACKET_SIZE 169
+
+#define BOOT_TIME_DELAY_MS 50
+
+/* FW read command, 0x53 0x?? 0x0, 0x01 */
+#define E_ELAN_INFO_FW_VER 0x00
+#define E_ELAN_INFO_BC_VER 0x10
+#define E_ELAN_INFO_TEST_VER 0xE0
+#define E_ELAN_INFO_FW_ID 0xF0
+#define E_INFO_OSR 0xD6
+#define E_INFO_PHY_SCAN 0xD7
+#define E_INFO_PHY_DRIVER 0xD8
+
+#define MAX_RETRIES 3
+#define MAX_FW_UPDATE_RETRIES 30
+
+#define ELAN_FW_PAGESIZE 132
+#define ELAN_FW_FILENAME "elants_i2c.bin"
+
+/* calibration timeout definition */
+#define ELAN_CALI_TIMEOUT_MSEC 10000
+
+enum elants_state {
+ ELAN_STATE_NORMAL,
+ ELAN_WAIT_QUEUE_HEADER,
+ ELAN_WAIT_RECALIBRATION,
+};
+
+enum elants_iap_mode {
+ ELAN_IAP_OPERATIONAL,
+ ELAN_IAP_RECOVERY,
+};
+
+/* struct elants_data - represents state of Elan touchscreen device */
+struct elants_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+
+ u16 fw_version;
+ u8 test_version;
+ u8 solution_version;
+ u8 bc_version;
+ u8 iap_version;
+ u16 hw_version;
+ unsigned int x_res; /* resolution in units/mm */
+ unsigned int y_res;
+ unsigned int x_max;
+ unsigned int y_max;
+
+ enum elants_state state;
+ enum elants_iap_mode iap_mode;
+
+ /* Guards against concurrent access to the device via sysfs */
+ struct mutex sysfs_mutex;
+
+ u8 cmd_resp[HEADER_SIZE];
+ struct completion cmd_done;
+
+ u8 buf[MAX_PACKET_SIZE];
+
+ bool wake_irq_enabled;
+};
+
+static int elants_i2c_send(struct i2c_client *client,
+ const void *data, size_t size)
+{
+ int ret;
+
+ ret = i2c_master_send(client, data, size);
+ if (ret == size)
+ return 0;
+
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "%s failed (%*ph): %d\n",
+ __func__, (int)size, data, ret);
+
+ return ret;
+}
+
+static int elants_i2c_read(struct i2c_client *client, void *data, size_t size)
+{
+ int ret;
+
+ ret = i2c_master_recv(client, data, size);
+ if (ret == size)
+ return 0;
+
+ if (ret >= 0)
+ ret = -EIO;
+
+ dev_err(&client->dev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int elants_i2c_execute_command(struct i2c_client *client,
+ const u8 *cmd, size_t cmd_size,
+ u8 *resp, size_t resp_size)
+{
+ struct i2c_msg msgs[2];
+ int ret;
+ u8 expected_response;
+
+ switch (cmd[0]) {
+ case CMD_HEADER_READ:
+ expected_response = CMD_HEADER_RESP;
+ break;
+
+ case CMD_HEADER_6B_READ:
+ expected_response = CMD_HEADER_6B_RESP;
+ break;
+
+ default:
+ dev_err(&client->dev, "%s: invalid command %*ph\n",
+ __func__, (int)cmd_size, cmd);
+ return -EINVAL;
+ }
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags & I2C_M_TEN;
+ msgs[0].len = cmd_size;
+ msgs[0].buf = (u8 *)cmd;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = client->flags & I2C_M_TEN;
+ msgs[1].flags |= I2C_M_RD;
+ msgs[1].len = resp_size;
+ msgs[1].buf = resp;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ if (ret != ARRAY_SIZE(msgs) || resp[FW_HDR_TYPE] != expected_response)
+ return -EIO;
+
+ return 0;
+}
+
+static int elants_i2c_calibrate(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret, error;
+ static const u8 w_flashkey[] = { 0x54, 0xC0, 0xE1, 0x5A };
+ static const u8 rek[] = { 0x54, 0x29, 0x00, 0x01 };
+ static const u8 rek_resp[] = { CMD_HEADER_REK, 0x66, 0x66, 0x66 };
+
+ disable_irq(client->irq);
+
+ ts->state = ELAN_WAIT_RECALIBRATION;
+ reinit_completion(&ts->cmd_done);
+
+ elants_i2c_send(client, w_flashkey, sizeof(w_flashkey));
+ elants_i2c_send(client, rek, sizeof(rek));
+
+ enable_irq(client->irq);
+
+ ret = wait_for_completion_interruptible_timeout(&ts->cmd_done,
+ msecs_to_jiffies(ELAN_CALI_TIMEOUT_MSEC));
+
+ ts->state = ELAN_STATE_NORMAL;
+
+ if (ret <= 0) {
+ error = ret < 0 ? ret : -ETIMEDOUT;
+ dev_err(&client->dev,
+ "error while waiting for calibration to complete: %d\n",
+ error);
+ return error;
+ }
+
+ if (memcmp(rek_resp, ts->cmd_resp, sizeof(rek_resp))) {
+ dev_err(&client->dev,
+ "unexpected calibration response: %*ph\n",
+ (int)sizeof(ts->cmd_resp), ts->cmd_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int elants_i2c_sw_reset(struct i2c_client *client)
+{
+ const u8 soft_rst_cmd[] = { 0x77, 0x77, 0x77, 0x77 };
+ int error;
+
+ error = elants_i2c_send(client, soft_rst_cmd,
+ sizeof(soft_rst_cmd));
+ if (error) {
+ dev_err(&client->dev, "software reset failed: %d\n", error);
+ return error;
+ }
+
+ /*
+ * We should wait at least 10 msec (but no more than 40) before
+ * sending fastboot or IAP command to the device.
+ */
+ msleep(30);
+
+ return 0;
+}
+
+static u16 elants_i2c_parse_version(u8 *buf)
+{
+ return get_unaligned_be32(buf) >> 4;
+}
+
+static int elants_i2c_query_fw_id(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_ID, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (!error) {
+ ts->hw_version = elants_i2c_parse_version(resp);
+ if (ts->hw_version != 0xffff)
+ return 0;
+ }
+
+ dev_dbg(&client->dev, "read fw id error=%d, buf=%*phC\n",
+ error, (int)sizeof(resp), resp);
+ }
+
+ dev_err(&client->dev,
+ "Failed to read fw id or fw id is invalid\n");
+
+ return -EINVAL;
+}
+
+static int elants_i2c_query_fw_version(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_VER, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (!error) {
+ ts->fw_version = elants_i2c_parse_version(resp);
+ if (ts->fw_version != 0x0000 &&
+ ts->fw_version != 0xffff)
+ return 0;
+ }
+
+ dev_dbg(&client->dev, "read fw version error=%d, buf=%*phC\n",
+ error, (int)sizeof(resp), resp);
+ }
+
+ dev_err(&client->dev,
+ "Failed to read fw version or fw version is invalid\n");
+
+ return -EINVAL;
+}
+
+static int elants_i2c_query_test_version(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+ u16 version;
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_TEST_VER, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (!error) {
+ version = elants_i2c_parse_version(resp);
+ ts->test_version = version >> 8;
+ ts->solution_version = version & 0xff;
+
+ return 0;
+ }
+
+ dev_dbg(&client->dev,
+ "read test version error rc=%d, buf=%*phC\n",
+ error, (int)sizeof(resp), resp);
+ }
+
+ dev_err(&client->dev, "Failed to read test version\n");
+
+ return -EINVAL;
+}
+
+static int elants_i2c_query_bc_version(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_BC_VER, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+ u16 version;
+ int error;
+
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev,
+ "read BC version error=%d, buf=%*phC\n",
+ error, (int)sizeof(resp), resp);
+ return error;
+ }
+
+ version = elants_i2c_parse_version(resp);
+ ts->bc_version = version >> 8;
+ ts->iap_version = version & 0xff;
+
+ return 0;
+}
+
+static int elants_i2c_query_ts_info(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error;
+ u8 resp[17];
+ u16 phy_x, phy_y, rows, cols, osr;
+ const u8 get_resolution_cmd[] = {
+ CMD_HEADER_6B_READ, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ const u8 get_osr_cmd[] = {
+ CMD_HEADER_READ, E_INFO_OSR, 0x00, 0x01
+ };
+ const u8 get_physical_scan_cmd[] = {
+ CMD_HEADER_READ, E_INFO_PHY_SCAN, 0x00, 0x01
+ };
+ const u8 get_physical_drive_cmd[] = {
+ CMD_HEADER_READ, E_INFO_PHY_DRIVER, 0x00, 0x01
+ };
+
+ /* Get trace number */
+ error = elants_i2c_execute_command(client,
+ get_resolution_cmd,
+ sizeof(get_resolution_cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "get resolution command failed: %d\n",
+ error);
+ return error;
+ }
+
+ rows = resp[2] + resp[6] + resp[10];
+ cols = resp[3] + resp[7] + resp[11];
+
+ /* Process mm_to_pixel information */
+ error = elants_i2c_execute_command(client,
+ get_osr_cmd, sizeof(get_osr_cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "get osr command failed: %d\n",
+ error);
+ return error;
+ }
+
+ osr = resp[3];
+
+ error = elants_i2c_execute_command(client,
+ get_physical_scan_cmd,
+ sizeof(get_physical_scan_cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "get physical scan command failed: %d\n",
+ error);
+ return error;
+ }
+
+ phy_x = get_unaligned_be16(&resp[2]);
+
+ error = elants_i2c_execute_command(client,
+ get_physical_drive_cmd,
+ sizeof(get_physical_drive_cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "get physical drive command failed: %d\n",
+ error);
+ return error;
+ }
+
+ phy_y = get_unaligned_be16(&resp[2]);
+
+ dev_dbg(&client->dev, "phy_x=%d, phy_y=%d\n", phy_x, phy_y);
+
+ if (rows == 0 || cols == 0 || osr == 0) {
+ dev_warn(&client->dev,
+ "invalid trace number data: %d, %d, %d\n",
+ rows, cols, osr);
+ } else {
+ /* translate trace number to TS resolution */
+ ts->x_max = ELAN_TS_RESOLUTION(rows, osr);
+ ts->x_res = DIV_ROUND_CLOSEST(ts->x_max, phy_x);
+ ts->y_max = ELAN_TS_RESOLUTION(cols, osr);
+ ts->y_res = DIV_ROUND_CLOSEST(ts->y_max, phy_y);
+ }
+
+ return 0;
+}
+
+static int elants_i2c_fastboot(struct i2c_client *client)
+{
+ const u8 boot_cmd[] = { 0x4D, 0x61, 0x69, 0x6E };
+ int error;
+
+ error = elants_i2c_send(client, boot_cmd, sizeof(boot_cmd));
+ if (error) {
+ dev_err(&client->dev, "boot failed: %d\n", error);
+ return error;
+ }
+
+ dev_dbg(&client->dev, "boot success -- 0x%x\n", client->addr);
+ return 0;
+}
+
+static int elants_i2c_initialize(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+ const u8 hello_packet[] = { 0x55, 0x55, 0x55, 0x55 };
+ const u8 recov_packet[] = { 0x55, 0x55, 0x80, 0x80 };
+ u8 buf[HEADER_SIZE];
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_sw_reset(client);
+ if (error) {
+ /* Continue initializing if it's the last try */
+ if (retry_cnt < MAX_RETRIES - 1)
+ continue;
+ }
+
+ error = elants_i2c_fastboot(client);
+ if (error) {
+ /* Continue initializing if it's the last try */
+ if (retry_cnt < MAX_RETRIES - 1)
+ continue;
+ }
+
+ /* Wait for Hello packet */
+ msleep(BOOT_TIME_DELAY_MS);
+
+ error = elants_i2c_read(client, buf, sizeof(buf));
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read 'hello' packet: %d\n", error);
+ } else if (!memcmp(buf, hello_packet, sizeof(hello_packet))) {
+ ts->iap_mode = ELAN_IAP_OPERATIONAL;
+ break;
+ } else if (!memcmp(buf, recov_packet, sizeof(recov_packet))) {
+ /*
+ * Setting error code will mark device
+ * in recovery mode below.
+ */
+ error = -EIO;
+ break;
+ } else {
+ error = -EINVAL;
+ dev_err(&client->dev,
+ "invalid 'hello' packet: %*ph\n",
+ (int)sizeof(buf), buf);
+ }
+ }
+
+ if (!error)
+ error = elants_i2c_query_fw_id(ts);
+ if (!error)
+ error = elants_i2c_query_fw_version(ts);
+
+ if (error) {
+ ts->iap_mode = ELAN_IAP_RECOVERY;
+ } else {
+ elants_i2c_query_test_version(ts);
+ elants_i2c_query_bc_version(ts);
+ elants_i2c_query_ts_info(ts);
+ }
+
+ return 0;
+}
+
+/*
+ * Firmware update interface.
+ */
+
+static int elants_i2c_fw_write_page(struct i2c_client *client,
+ const void *page)
+{
+ const u8 ack_ok[] = { 0xaa, 0xaa };
+ u8 buf[2];
+ int retry;
+ int error;
+
+ for (retry = 0; retry < MAX_FW_UPDATE_RETRIES; retry++) {
+ error = elants_i2c_send(client, page, ELAN_FW_PAGESIZE);
+ if (error) {
+ dev_err(&client->dev,
+ "IAP Write Page failed: %d\n", error);
+ continue;
+ }
+
+ error = elants_i2c_read(client, buf, 2);
+ if (error) {
+ dev_err(&client->dev,
+ "IAP Ack read failed: %d\n", error);
+ return error;
+ }
+
+ if (!memcmp(buf, ack_ok, sizeof(ack_ok)))
+ return 0;
+
+ error = -EIO;
+ dev_err(&client->dev,
+ "IAP Get Ack Error [%02x:%02x]\n",
+ buf[0], buf[1]);
+ }
+
+ return error;
+}
+
+static int elants_i2c_do_update_firmware(struct i2c_client *client,
+ const struct firmware *fw,
+ bool force)
+{
+ const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
+ const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
+ const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
+ u8 buf[HEADER_SIZE];
+ u16 send_id;
+ int page, n_fw_pages;
+ int error;
+
+ /* Recovery mode detection! */
+ if (force) {
+ dev_dbg(&client->dev, "Recovery mode procedure\n");
+ error = elants_i2c_send(client, enter_iap2, sizeof(enter_iap2));
+ } else {
+ /* Start IAP Procedure */
+ dev_dbg(&client->dev, "Normal IAP procedure\n");
+ elants_i2c_sw_reset(client);
+
+ error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
+ }
+
+ if (error) {
+ dev_err(&client->dev, "failed to enter IAP mode: %d\n", error);
+ return error;
+ }
+
+ msleep(20);
+
+ /* check IAP state */
+ error = elants_i2c_read(client, buf, 4);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read IAP acknowledgement: %d\n",
+ error);
+ return error;
+ }
+
+ if (memcmp(buf, iap_ack, sizeof(iap_ack))) {
+ dev_err(&client->dev,
+ "failed to enter IAP: %*ph (expected %*ph)\n",
+ (int)sizeof(buf), buf, (int)sizeof(iap_ack), iap_ack);
+ return -EIO;
+ }
+
+ dev_info(&client->dev, "successfully entered IAP mode");
+
+ send_id = client->addr;
+ error = elants_i2c_send(client, &send_id, 1);
+ if (error) {
+ dev_err(&client->dev, "sending dummy byte failed: %d\n",
+ error);
+ return error;
+ }
+
+ /* Clear the last page of Master */
+ error = elants_i2c_send(client, fw->data, ELAN_FW_PAGESIZE);
+ if (error) {
+ dev_err(&client->dev, "clearing of the last page failed: %d\n",
+ error);
+ return error;
+ }
+
+ error = elants_i2c_read(client, buf, 2);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read ACK for clearing the last page: %d\n",
+ error);
+ return error;
+ }
+
+ n_fw_pages = fw->size / ELAN_FW_PAGESIZE;
+ dev_dbg(&client->dev, "IAP Pages = %d\n", n_fw_pages);
+
+ for (page = 0; page < n_fw_pages; page++) {
+ error = elants_i2c_fw_write_page(client,
+ fw->data + page * ELAN_FW_PAGESIZE);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to write FW page %d: %d\n",
+ page, error);
+ return error;
+ }
+ }
+
+ /* Old iap needs to wait 200ms for WDT and rest is for hello packets */
+ msleep(300);
+
+ dev_info(&client->dev, "firmware update completed\n");
+ return 0;
+}
+
+static int elants_i2c_fw_update(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, ELAN_FW_FILENAME, &client->dev);
+ if (error) {
+ dev_err(&client->dev, "failed to request firmware %s: %d\n",
+ ELAN_FW_FILENAME, error);
+ return error;
+ }
+
+ if (fw->size % ELAN_FW_PAGESIZE) {
+ dev_err(&client->dev, "invalid firmware length: %zu\n",
+ fw->size);
+ error = -EINVAL;
+ goto out;
+ }
+
+ disable_irq(client->irq);
+
+ error = elants_i2c_do_update_firmware(client, fw,
+ ts->iap_mode == ELAN_IAP_RECOVERY);
+ if (error) {
+ dev_err(&client->dev, "firmware update failed: %d\n", error);
+ ts->iap_mode = ELAN_IAP_RECOVERY;
+ goto out_enable_irq;
+ }
+
+ error = elants_i2c_initialize(ts);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to initialize device after firmware update: %d\n",
+ error);
+ ts->iap_mode = ELAN_IAP_RECOVERY;
+ goto out_enable_irq;
+ }
+
+ ts->iap_mode = ELAN_IAP_OPERATIONAL;
+
+out_enable_irq:
+ ts->state = ELAN_STATE_NORMAL;
+ enable_irq(client->irq);
+ msleep(100);
+
+ if (!error)
+ elants_i2c_calibrate(ts);
+out:
+ release_firmware(fw);
+ return error;
+}
+
+/*
+ * Event reporting.
+ */
+
+static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
+{
+ struct input_dev *input = ts->input;
+ unsigned int n_fingers;
+ u16 finger_state;
+ int i;
+
+ n_fingers = buf[FW_POS_STATE + 1] & 0x0f;
+ finger_state = ((buf[FW_POS_STATE + 1] & 0x30) << 4) |
+ buf[FW_POS_STATE];
+
+ dev_dbg(&ts->client->dev,
+ "n_fingers: %u, state: %04x\n", n_fingers, finger_state);
+
+ for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) {
+ if (finger_state & 1) {
+ unsigned int x, y, p, w;
+ u8 *pos;
+
+ pos = &buf[FW_POS_XY + i * 3];
+ x = (((u16)pos[0] & 0xf0) << 4) | pos[1];
+ y = (((u16)pos[0] & 0x0f) << 8) | pos[2];
+ p = buf[FW_POS_PRESSURE + i];
+ w = buf[FW_POS_WIDTH + i];
+
+ dev_dbg(&ts->client->dev, "i=%d x=%d y=%d p=%d w=%d\n",
+ i, x, y, p, w);
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, w);
+
+ n_fingers--;
+ }
+
+ finger_state >>= 1;
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static u8 elants_i2c_calculate_checksum(u8 *buf)
+{
+ u8 checksum = 0;
+ u8 i;
+
+ for (i = 0; i < FW_POS_CHECKSUM; i++)
+ checksum += buf[i];
+
+ return checksum;
+}
+
+static void elants_i2c_event(struct elants_data *ts, u8 *buf)
+{
+ u8 checksum = elants_i2c_calculate_checksum(buf);
+
+ if (unlikely(buf[FW_POS_CHECKSUM] != checksum))
+ dev_warn(&ts->client->dev,
+ "%s: invalid checksum for packet %02x: %02x vs. %02x\n",
+ __func__, buf[FW_POS_HEADER],
+ checksum, buf[FW_POS_CHECKSUM]);
+ else if (unlikely(buf[FW_POS_HEADER] != HEADER_REPORT_10_FINGER))
+ dev_warn(&ts->client->dev,
+ "%s: unknown packet type: %02x\n",
+ __func__, buf[FW_POS_HEADER]);
+ else
+ elants_i2c_mt_event(ts, buf);
+}
+
+static irqreturn_t elants_i2c_irq(int irq, void *_dev)
+{
+ const u8 wait_packet[] = { 0x64, 0x64, 0x64, 0x64 };
+ struct elants_data *ts = _dev;
+ struct i2c_client *client = ts->client;
+ int report_count, report_len;
+ int i;
+ int len;
+
+ len = i2c_master_recv(client, ts->buf, sizeof(ts->buf));
+ if (len < 0) {
+ dev_err(&client->dev, "%s: failed to read data: %d\n",
+ __func__, len);
+ goto out;
+ }
+
+ dev_dbg(&client->dev, "%s: packet %*ph\n",
+ __func__, HEADER_SIZE, ts->buf);
+
+ switch (ts->state) {
+ case ELAN_WAIT_RECALIBRATION:
+ if (ts->buf[FW_HDR_TYPE] == CMD_HEADER_REK) {
+ memcpy(ts->cmd_resp, ts->buf, sizeof(ts->cmd_resp));
+ complete(&ts->cmd_done);
+ ts->state = ELAN_STATE_NORMAL;
+ }
+ break;
+
+ case ELAN_WAIT_QUEUE_HEADER:
+ if (ts->buf[FW_HDR_TYPE] != QUEUE_HEADER_NORMAL)
+ break;
+
+ ts->state = ELAN_STATE_NORMAL;
+ /* fall through */
+
+ case ELAN_STATE_NORMAL:
+
+ switch (ts->buf[FW_HDR_TYPE]) {
+ case CMD_HEADER_HELLO:
+ case CMD_HEADER_RESP:
+ case CMD_HEADER_REK:
+ break;
+
+ case QUEUE_HEADER_WAIT:
+ if (memcmp(ts->buf, wait_packet, sizeof(wait_packet))) {
+ dev_err(&client->dev,
+ "invalid wait packet %*ph\n",
+ HEADER_SIZE, ts->buf);
+ } else {
+ ts->state = ELAN_WAIT_QUEUE_HEADER;
+ udelay(30);
+ }
+ break;
+
+ case QUEUE_HEADER_SINGLE:
+ elants_i2c_event(ts, &ts->buf[HEADER_SIZE]);
+ break;
+
+ case QUEUE_HEADER_NORMAL:
+ report_count = ts->buf[FW_HDR_COUNT];
+ if (report_count > 3) {
+ dev_err(&client->dev,
+ "too large report count: %*ph\n",
+ HEADER_SIZE, ts->buf);
+ break;
+ }
+
+ report_len = ts->buf[FW_HDR_LENGTH] / report_count;
+ if (report_len != PACKET_SIZE) {
+ dev_err(&client->dev,
+ "mismatching report length: %*ph\n",
+ HEADER_SIZE, ts->buf);
+ break;
+ }
+
+ for (i = 0; i < report_count; i++) {
+ u8 *buf = ts->buf + HEADER_SIZE +
+ i * PACKET_SIZE;
+ elants_i2c_event(ts, buf);
+ }
+ break;
+
+ default:
+ dev_err(&client->dev, "unknown packet %*ph\n",
+ HEADER_SIZE, ts->buf);
+ break;
+ }
+ break;
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+/*
+ * sysfs interface
+ */
+static ssize_t calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ int error;
+
+ error = mutex_lock_interruptible(&ts->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = elants_i2c_calibrate(ts);
+
+ mutex_unlock(&ts->sysfs_mutex);
+ return error ?: count;
+}
+
+static ssize_t write_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ int error;
+
+ error = mutex_lock_interruptible(&ts->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = elants_i2c_fw_update(ts);
+ dev_dbg(dev, "firmware update result: %d\n", error);
+
+ mutex_unlock(&ts->sysfs_mutex);
+ return error ?: count;
+}
+
+static ssize_t show_iap_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%s\n",
+ ts->iap_mode == ELAN_IAP_OPERATIONAL ?
+ "Normal" : "Recovery");
+}
+
+static DEVICE_ATTR(calibrate, S_IWUSR, NULL, calibrate_store);
+static DEVICE_ATTR(iap_mode, S_IRUGO, show_iap_mode, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, write_update_fw);
+
+struct elants_version_attribute {
+ struct device_attribute dattr;
+ size_t field_offset;
+ size_t field_size;
+};
+
+#define __ELANTS_FIELD_SIZE(_field) \
+ sizeof(((struct elants_data *)NULL)->_field)
+#define __ELANTS_VERIFY_SIZE(_field) \
+ (BUILD_BUG_ON_ZERO(__ELANTS_FIELD_SIZE(_field) > 2) + \
+ __ELANTS_FIELD_SIZE(_field))
+#define ELANTS_VERSION_ATTR(_field) \
+ struct elants_version_attribute elants_ver_attr_##_field = { \
+ .dattr = __ATTR(_field, S_IRUGO, \
+ elants_version_attribute_show, NULL), \
+ .field_offset = offsetof(struct elants_data, _field), \
+ .field_size = __ELANTS_VERIFY_SIZE(_field), \
+ }
+
+static ssize_t elants_version_attribute_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ struct elants_version_attribute *attr =
+ container_of(dattr, struct elants_version_attribute, dattr);
+ u8 *field = (u8 *)((char *)ts + attr->field_offset);
+ unsigned int fmt_size;
+ unsigned int val;
+
+ if (attr->field_size == 1) {
+ val = *field;
+ fmt_size = 2; /* 2 HEX digits */
+ } else {
+ val = *(u16 *)field;
+ fmt_size = 4; /* 4 HEX digits */
+ }
+
+ return sprintf(buf, "%0*x\n", fmt_size, val);
+}
+
+static ELANTS_VERSION_ATTR(fw_version);
+static ELANTS_VERSION_ATTR(hw_version);
+static ELANTS_VERSION_ATTR(test_version);
+static ELANTS_VERSION_ATTR(solution_version);
+static ELANTS_VERSION_ATTR(bc_version);
+static ELANTS_VERSION_ATTR(iap_version);
+
+static struct attribute *elants_attributes[] = {
+ &dev_attr_calibrate.attr,
+ &dev_attr_update_fw.attr,
+ &dev_attr_iap_mode.attr,
+
+ &elants_ver_attr_fw_version.dattr.attr,
+ &elants_ver_attr_hw_version.dattr.attr,
+ &elants_ver_attr_test_version.dattr.attr,
+ &elants_ver_attr_solution_version.dattr.attr,
+ &elants_ver_attr_bc_version.dattr.attr,
+ &elants_ver_attr_iap_version.dattr.attr,
+ NULL
+};
+
+static struct attribute_group elants_attribute_group = {
+ .attrs = elants_attributes,
+};
+
+static void elants_i2c_remove_sysfs_group(void *_data)
+{
+ struct elants_data *ts = _data;
+
+ sysfs_remove_group(&ts->client->dev.kobj, &elants_attribute_group);
+}
+
+static int elants_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ union i2c_smbus_data dummy;
+ struct elants_data *ts;
+ unsigned long irqflags;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev,
+ "%s: i2c check functionality error\n", DEVICE_NAME);
+ return -ENXIO;
+ }
+
+ /* Make sure there is something at this address */
+ if (i2c_smbus_xfer(client->adapter, client->addr, 0,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
+ dev_err(&client->dev, "nothing at this address\n");
+ return -ENXIO;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(struct elants_data), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ mutex_init(&ts->sysfs_mutex);
+ init_completion(&ts->cmd_done);
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+
+ error = elants_i2c_initialize(ts);
+ if (error) {
+ dev_err(&client->dev, "failed to initialize: %d\n", error);
+ return error;
+ }
+
+ ts->input = devm_input_allocate_device(&client->dev);
+ if (!ts->input) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ ts->input->name = "Elan Touchscreen";
+ ts->input->id.bustype = BUS_I2C;
+
+ __set_bit(BTN_TOUCH, ts->input->keybit);
+ __set_bit(EV_ABS, ts->input->evbit);
+ __set_bit(EV_KEY, ts->input->evbit);
+
+ /* Single touch input params setup */
+ input_set_abs_params(ts->input, ABS_X, 0, ts->x_max, 0, 0);
+ input_set_abs_params(ts->input, ABS_Y, 0, ts->y_max, 0, 0);
+ input_set_abs_params(ts->input, ABS_PRESSURE, 0, 255, 0, 0);
+ input_abs_set_res(ts->input, ABS_X, ts->x_res);
+ input_abs_set_res(ts->input, ABS_Y, ts->y_res);
+
+ /* Multitouch input params setup */
+ error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, ts->x_max, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
+
+ input_set_drvdata(ts->input, ts);
+
+ error = input_register_device(ts->input);
+ if (error) {
+ dev_err(&client->dev,
+ "unable to register input device: %d\n", error);
+ return error;
+ }
+
+ /*
+ * Systems using device tree should set up interrupt via DTS,
+ * the rest will use the default falling edge interrupts.
+ */
+ irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, elants_i2c_irq,
+ irqflags | IRQF_ONESHOT,
+ client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ return error;
+ }
+
+ /*
+ * Systems using device tree should set up wakeup via DTS,
+ * the rest will configure device as wakeup source by default.
+ */
+ if (!client->dev.of_node)
+ device_init_wakeup(&client->dev, true);
+
+ error = sysfs_create_group(&client->dev.kobj, &elants_attribute_group);
+ if (error) {
+ dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
+ error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ elants_i2c_remove_sysfs_group, ts);
+ if (error) {
+ elants_i2c_remove_sysfs_group(ts);
+ dev_err(&client->dev,
+ "Failed to add sysfs cleanup action: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused elants_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ const u8 set_sleep_cmd[] = { 0x54, 0x50, 0x00, 0x01 };
+ int retry_cnt;
+ int error;
+
+ /* Command not support in IAP recovery mode */
+ if (ts->iap_mode != ELAN_IAP_OPERATIONAL)
+ return -EBUSY;
+
+ disable_irq(client->irq);
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_send(client, set_sleep_cmd,
+ sizeof(set_sleep_cmd));
+ if (!error)
+ break;
+
+ dev_err(&client->dev, "suspend command failed: %d\n", error);
+ }
+
+ if (device_may_wakeup(dev))
+ ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
+
+ return 0;
+}
+
+static int __maybe_unused elants_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct elants_data *ts = i2c_get_clientdata(client);
+ const u8 set_active_cmd[] = { 0x54, 0x58, 0x00, 0x01 };
+ int retry_cnt;
+ int error;
+
+ if (device_may_wakeup(dev) && ts->wake_irq_enabled)
+ disable_irq_wake(client->irq);
+
+ for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ error = elants_i2c_send(client, set_active_cmd,
+ sizeof(set_active_cmd));
+ if (!error)
+ break;
+
+ dev_err(&client->dev, "resume command failed: %d\n", error);
+ }
+
+ ts->state = ELAN_STATE_NORMAL;
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(elants_i2c_pm_ops,
+ elants_i2c_suspend, elants_i2c_resume);
+
+static const struct i2c_device_id elants_i2c_id[] = {
+ { DEVICE_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, elants_i2c_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id elants_acpi_id[] = {
+ { "ELAN0001", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id elants_of_match[] = {
+ { .compatible = "elan,ekth3500" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, elants_of_match);
+#endif
+
+static struct i2c_driver elants_i2c_driver = {
+ .probe = elants_i2c_probe,
+ .id_table = elants_i2c_id,
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &elants_i2c_pm_ops,
+ .acpi_match_table = ACPI_PTR(elants_acpi_id),
+ .of_match_table = of_match_ptr(elants_of_match),
+ },
+};
+module_i2c_driver(elants_i2c_driver);
+
+MODULE_AUTHOR("Scott Liu <scott.liu@emc.com.tw>");
+MODULE_DESCRIPTION("Elan I2c Touchscreen driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
new file mode 100644
index 000000000000..ca196689f025
--- /dev/null
+++ b/drivers/input/touchscreen/goodix.c
@@ -0,0 +1,395 @@
+/*
+ * Driver for Goodix Touchscreens
+ *
+ * Copyright (c) 2014 Red Hat Inc.
+ *
+ * This code is based on gt9xx.c authored by andrew@goodix.com:
+ *
+ * 2010 - 2012 Goodix Technology.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+struct goodix_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ int abs_x_max;
+ int abs_y_max;
+ unsigned int max_touch_num;
+ unsigned int int_trigger_type;
+};
+
+#define GOODIX_MAX_HEIGHT 4096
+#define GOODIX_MAX_WIDTH 4096
+#define GOODIX_INT_TRIGGER 1
+#define GOODIX_CONTACT_SIZE 8
+#define GOODIX_MAX_CONTACTS 10
+
+#define GOODIX_CONFIG_MAX_LENGTH 240
+
+/* Register defines */
+#define GOODIX_READ_COOR_ADDR 0x814E
+#define GOODIX_REG_CONFIG_DATA 0x8047
+#define GOODIX_REG_VERSION 0x8140
+
+#define RESOLUTION_LOC 1
+#define TRIGGER_LOC 6
+
+static const unsigned long goodix_irq_flags[] = {
+ IRQ_TYPE_EDGE_RISING,
+ IRQ_TYPE_EDGE_FALLING,
+ IRQ_TYPE_LEVEL_LOW,
+ IRQ_TYPE_LEVEL_HIGH,
+};
+
+/**
+ * goodix_i2c_read - read data from a register of the i2c slave device.
+ *
+ * @client: i2c device.
+ * @reg: the register to read from.
+ * @buf: raw write data buffer.
+ * @len: length of the buffer to write
+ */
+static int goodix_i2c_read(struct i2c_client *client,
+ u16 reg, u8 *buf, int len)
+{
+ struct i2c_msg msgs[2];
+ u16 wbuf = cpu_to_be16(reg);
+ int ret;
+
+ msgs[0].flags = 0;
+ msgs[0].addr = client->addr;
+ msgs[0].len = 2;
+ msgs[0].buf = (u8 *) &wbuf;
+
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].addr = client->addr;
+ msgs[1].len = len;
+ msgs[1].buf = buf;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ return ret < 0 ? ret : (ret != ARRAY_SIZE(msgs) ? -EIO : 0);
+}
+
+static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
+{
+ int touch_num;
+ int error;
+
+ error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data,
+ GOODIX_CONTACT_SIZE + 1);
+ if (error) {
+ dev_err(&ts->client->dev, "I2C transfer error: %d\n", error);
+ return error;
+ }
+
+ touch_num = data[0] & 0x0f;
+ if (touch_num > GOODIX_MAX_CONTACTS)
+ return -EPROTO;
+
+ if (touch_num > 1) {
+ data += 1 + GOODIX_CONTACT_SIZE;
+ error = goodix_i2c_read(ts->client,
+ GOODIX_READ_COOR_ADDR +
+ 1 + GOODIX_CONTACT_SIZE,
+ data,
+ GOODIX_CONTACT_SIZE * (touch_num - 1));
+ if (error)
+ return error;
+ }
+
+ return touch_num;
+}
+
+static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
+{
+ int id = coor_data[0] & 0x0F;
+ int input_x = get_unaligned_le16(&coor_data[1]);
+ int input_y = get_unaligned_le16(&coor_data[3]);
+ int input_w = get_unaligned_le16(&coor_data[5]);
+
+ input_mt_slot(ts->input_dev, id);
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, input_y);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
+}
+
+/**
+ * goodix_process_events - Process incoming events
+ *
+ * @ts: our goodix_ts_data pointer
+ *
+ * Called when the IRQ is triggered. Read the current device state, and push
+ * the input events to the user space.
+ */
+static void goodix_process_events(struct goodix_ts_data *ts)
+{
+ u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+ int touch_num;
+ int i;
+
+ touch_num = goodix_ts_read_input_report(ts, point_data);
+ if (touch_num < 0)
+ return;
+
+ for (i = 0; i < touch_num; i++)
+ goodix_ts_report_touch(ts,
+ &point_data[1 + GOODIX_CONTACT_SIZE * i]);
+
+ input_mt_sync_frame(ts->input_dev);
+ input_sync(ts->input_dev);
+}
+
+/**
+ * goodix_ts_irq_handler - The IRQ handler
+ *
+ * @irq: interrupt number.
+ * @dev_id: private data pointer.
+ */
+static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
+{
+ static const u8 end_cmd[] = {
+ GOODIX_READ_COOR_ADDR >> 8,
+ GOODIX_READ_COOR_ADDR & 0xff,
+ 0
+ };
+ struct goodix_ts_data *ts = dev_id;
+
+ goodix_process_events(ts);
+
+ if (i2c_master_send(ts->client, end_cmd, sizeof(end_cmd)) < 0)
+ dev_err(&ts->client->dev, "I2C write end_cmd error\n");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * goodix_read_config - Read the embedded configuration of the panel
+ *
+ * @ts: our goodix_ts_data pointer
+ *
+ * Must be called during probe
+ */
+static void goodix_read_config(struct goodix_ts_data *ts)
+{
+ u8 config[GOODIX_CONFIG_MAX_LENGTH];
+ int error;
+
+ error = goodix_i2c_read(ts->client, GOODIX_REG_CONFIG_DATA,
+ config,
+ GOODIX_CONFIG_MAX_LENGTH);
+ if (error) {
+ dev_warn(&ts->client->dev,
+ "Error reading config (%d), using defaults\n",
+ error);
+ ts->abs_x_max = GOODIX_MAX_WIDTH;
+ ts->abs_y_max = GOODIX_MAX_HEIGHT;
+ ts->int_trigger_type = GOODIX_INT_TRIGGER;
+ return;
+ }
+
+ ts->abs_x_max = get_unaligned_le16(&config[RESOLUTION_LOC]);
+ ts->abs_y_max = get_unaligned_le16(&config[RESOLUTION_LOC + 2]);
+ ts->int_trigger_type = (config[TRIGGER_LOC]) & 0x03;
+ if (!ts->abs_x_max || !ts->abs_y_max) {
+ dev_err(&ts->client->dev,
+ "Invalid config, using defaults\n");
+ ts->abs_x_max = GOODIX_MAX_WIDTH;
+ ts->abs_y_max = GOODIX_MAX_HEIGHT;
+ }
+}
+
+
+/**
+ * goodix_read_version - Read goodix touchscreen version
+ *
+ * @client: the i2c client
+ * @version: output buffer containing the version on success
+ */
+static int goodix_read_version(struct i2c_client *client, u16 *version)
+{
+ int error;
+ u8 buf[6];
+
+ error = goodix_i2c_read(client, GOODIX_REG_VERSION, buf, sizeof(buf));
+ if (error) {
+ dev_err(&client->dev, "read version failed: %d\n", error);
+ return error;
+ }
+
+ if (version)
+ *version = get_unaligned_le16(&buf[4]);
+
+ dev_info(&client->dev, "IC VERSION: %6ph\n", buf);
+
+ return 0;
+}
+
+/**
+ * goodix_i2c_test - I2C test function to check if the device answers.
+ *
+ * @client: the i2c client
+ */
+static int goodix_i2c_test(struct i2c_client *client)
+{
+ int retry = 0;
+ int error;
+ u8 test;
+
+ while (retry++ < 2) {
+ error = goodix_i2c_read(client, GOODIX_REG_CONFIG_DATA,
+ &test, 1);
+ if (!error)
+ return 0;
+
+ dev_err(&client->dev, "i2c test failed attempt %d: %d\n",
+ retry, error);
+ msleep(20);
+ }
+
+ return error;
+}
+
+/**
+ * goodix_request_input_dev - Allocate, populate and register the input device
+ *
+ * @ts: our goodix_ts_data pointer
+ *
+ * Must be called during probe
+ */
+static int goodix_request_input_dev(struct goodix_ts_data *ts)
+{
+ int error;
+
+ ts->input_dev = devm_input_allocate_device(&ts->client->dev);
+ if (!ts->input_dev) {
+ dev_err(&ts->client->dev, "Failed to allocate input device.");
+ return -ENOMEM;
+ }
+
+ ts->input_dev->evbit[0] = BIT_MASK(EV_SYN) |
+ BIT_MASK(EV_KEY) |
+ BIT_MASK(EV_ABS);
+
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, 0,
+ ts->abs_x_max, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, 0,
+ ts->abs_y_max, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+ input_mt_init_slots(ts->input_dev, GOODIX_MAX_CONTACTS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+
+ ts->input_dev->name = "Goodix Capacitive TouchScreen";
+ ts->input_dev->phys = "input/ts";
+ ts->input_dev->id.bustype = BUS_I2C;
+ ts->input_dev->id.vendor = 0x0416;
+ ts->input_dev->id.product = 0x1001;
+ ts->input_dev->id.version = 10427;
+
+ error = input_register_device(ts->input_dev);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Failed to register input device: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int goodix_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct goodix_ts_data *ts;
+ unsigned long irq_flags;
+ int error;
+ u16 version_info;
+
+ dev_dbg(&client->dev, "I2C Address: 0x%02x\n", client->addr);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "I2C check functionality failed.\n");
+ return -ENXIO;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+
+ error = goodix_i2c_test(client);
+ if (error) {
+ dev_err(&client->dev, "I2C communication failure: %d\n", error);
+ return error;
+ }
+
+ error = goodix_read_version(client, &version_info);
+ if (error) {
+ dev_err(&client->dev, "Read version failed.\n");
+ return error;
+ }
+
+ goodix_read_config(ts);
+
+ error = goodix_request_input_dev(ts);
+ if (error)
+ return error;
+
+ irq_flags = goodix_irq_flags[ts->int_trigger_type] | IRQF_ONESHOT;
+ error = devm_request_threaded_irq(&ts->client->dev, client->irq,
+ NULL, goodix_ts_irq_handler,
+ irq_flags, client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "request IRQ failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id goodix_ts_id[] = {
+ { "GDIX1001:00", 0 },
+ { }
+};
+
+static const struct acpi_device_id goodix_acpi_match[] = {
+ { "GDIX1001", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
+
+static struct i2c_driver goodix_ts_driver = {
+ .probe = goodix_ts_probe,
+ .id_table = goodix_ts_id,
+ .driver = {
+ .name = "Goodix-TS",
+ .owner = THIS_MODULE,
+ .acpi_match_table = goodix_acpi_match,
+ },
+};
+module_i2c_driver(goodix_ts_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("Goodix touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 2a5089139818..da6dc819c846 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -311,8 +311,7 @@ static int ili210x_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int ili210x_i2c_suspend(struct device *dev)
+static int __maybe_unused ili210x_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -322,7 +321,7 @@ static int ili210x_i2c_suspend(struct device *dev)
return 0;
}
-static int ili210x_i2c_resume(struct device *dev)
+static int __maybe_unused ili210x_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -331,7 +330,6 @@ static int ili210x_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ili210x_i2c_pm,
ili210x_i2c_suspend, ili210x_i2c_resume);
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index c38ca4a7e386..b4f0725a1c3d 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -644,7 +644,6 @@ static int mrstouch_probe(struct platform_device *pdev)
static struct platform_driver mrstouch_driver = {
.driver = {
.name = "pmic_touch",
- .owner = THIS_MODULE,
},
.probe = mrstouch_probe,
};
diff --git a/drivers/input/touchscreen/ipaq-micro-ts.c b/drivers/input/touchscreen/ipaq-micro-ts.c
index 62c8976e616f..33c134820ef9 100644
--- a/drivers/input/touchscreen/ipaq-micro-ts.c
+++ b/drivers/input/touchscreen/ipaq-micro-ts.c
@@ -122,8 +122,7 @@ static int micro_ts_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int micro_ts_suspend(struct device *dev)
+static int __maybe_unused micro_ts_suspend(struct device *dev)
{
struct touchscreen_data *ts = dev_get_drvdata(dev);
@@ -132,7 +131,7 @@ static int micro_ts_suspend(struct device *dev)
return 0;
}
-static int micro_ts_resume(struct device *dev)
+static int __maybe_unused micro_ts_resume(struct device *dev)
{
struct touchscreen_data *ts = dev_get_drvdata(dev);
struct input_dev *input = ts->input;
@@ -146,7 +145,6 @@ static int micro_ts_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops micro_ts_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(micro_ts_suspend, micro_ts_resume)
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 651ec71a5c68..ea3b6a5b83e6 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -148,7 +148,6 @@ static struct platform_driver jornada720_ts_driver = {
.probe = jornada720_ts_probe,
.driver = {
.name = "jornada_ts",
- .owner = THIS_MODULE,
},
};
module_platform_driver(jornada720_ts_driver);
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index bb47d3442a35..24d704cd9f88 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -396,7 +396,6 @@ static struct platform_driver lpc32xx_ts_driver = {
.remove = lpc32xx_ts_remove,
.driver = {
.name = MOD_NAME,
- .owner = THIS_MODULE,
.pm = LPC32XX_TS_PM_OPS,
.of_match_table = of_match_ptr(lpc32xx_tsc_of_match),
},
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index d6f099c47f84..913e25a994b4 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -243,7 +243,6 @@ static int mc13783_ts_remove(struct platform_device *pdev)
static struct platform_driver mc13783_ts_driver = {
.remove = mc13783_ts_remove,
.driver = {
- .owner = THIS_MODULE,
.name = MC13783_TS_NAME,
},
};
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 372bbf7658fe..67c0d31613d8 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -515,8 +515,7 @@ static int mms114_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mms114_suspend(struct device *dev)
+static int __maybe_unused mms114_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mms114_data *data = i2c_get_clientdata(client);
@@ -540,7 +539,7 @@ static int mms114_suspend(struct device *dev)
return 0;
}
-static int mms114_resume(struct device *dev)
+static int __maybe_unused mms114_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mms114_data *data = i2c_get_clientdata(client);
@@ -559,7 +558,6 @@ static int mms114_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(mms114_pm_ops, mms114_suspend, mms114_resume);
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index cff2376817e5..23a354a392ae 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -247,7 +247,6 @@ static struct platform_driver pcap_ts_driver = {
.remove = pcap_ts_remove,
.driver = {
.name = "pcap-ts",
- .owner = THIS_MODULE,
.pm = PCAP_TS_PM_OPS,
},
};
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index fc49c75317d1..4fb5537fdd42 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -347,8 +347,7 @@ static void pixcir_input_close(struct input_dev *dev)
pixcir_stop(ts);
}
-#ifdef CONFIG_PM_SLEEP
-static int pixcir_i2c_ts_suspend(struct device *dev)
+static int __maybe_unused pixcir_i2c_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pixcir_i2c_ts_data *ts = i2c_get_clientdata(client);
@@ -377,7 +376,7 @@ unlock:
return ret;
}
-static int pixcir_i2c_ts_resume(struct device *dev)
+static int __maybe_unused pixcir_i2c_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pixcir_i2c_ts_data *ts = i2c_get_clientdata(client);
@@ -405,7 +404,6 @@ unlock:
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 5a69ded9b53c..bdfa27dc097b 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -422,7 +422,6 @@ MODULE_DEVICE_TABLE(platform, s3cts_driver_ids);
static struct platform_driver s3c_ts_driver = {
.driver = {
.name = "samsung-ts",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &s3c_ts_pmops,
#endif
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 3c0f57efe7b1..697e26e52d54 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -243,8 +243,7 @@ static int st1232_ts_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int st1232_ts_suspend(struct device *dev)
+static int __maybe_unused st1232_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct st1232_ts_data *ts = i2c_get_clientdata(client);
@@ -259,7 +258,7 @@ static int st1232_ts_suspend(struct device *dev)
return 0;
}
-static int st1232_ts_resume(struct device *dev)
+static int __maybe_unused st1232_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct st1232_ts_data *ts = i2c_get_clientdata(client);
@@ -274,8 +273,6 @@ static int st1232_ts_resume(struct device *dev)
return 0;
}
-#endif
-
static SIMPLE_DEV_PM_OPS(st1232_ts_pm_ops,
st1232_ts_suspend, st1232_ts_resume);
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 42ce31afa259..2d5ff86b343f 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -384,7 +384,6 @@ static int stmpe_ts_remove(struct platform_device *pdev)
static struct platform_driver stmpe_ts_driver = {
.driver = {
.name = STMPE_TS_NAME,
- .owner = THIS_MODULE,
},
.probe = stmpe_input_probe,
.remove = stmpe_ts_remove,
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 2ba826024954..28a06749ae42 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -324,7 +324,6 @@ MODULE_DEVICE_TABLE(of, sun4i_ts_of_match);
static struct platform_driver sun4i_ts_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "sun4i-ts",
.of_match_table = of_match_ptr(sun4i_ts_of_match),
},
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 2ce649520fe0..004f1346a957 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -518,7 +518,6 @@ static struct platform_driver ti_tsc_driver = {
.remove = titsc_remove,
.driver = {
.name = "TI-am335x-tsc",
- .owner = THIS_MODULE,
.pm = TITSC_PM_OPS,
.of_match_table = ti_tsc_dt_ids,
},
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 94cde2cb1491..4ffd829d1990 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -314,7 +314,6 @@ static int tps6507x_ts_remove(struct platform_device *pdev)
static struct platform_driver tps6507x_ts_driver = {
.driver = {
.name = "tps6507x-ts",
- .owner = THIS_MODULE,
},
.probe = tps6507x_ts_probe,
.remove = tps6507x_ts_remove,
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 52380b68ebdf..72657c579430 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -773,8 +773,7 @@ static int tsc2005_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tsc2005_suspend(struct device *dev)
+static int __maybe_unused tsc2005_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct tsc2005 *ts = spi_get_drvdata(spi);
@@ -791,7 +790,7 @@ static int tsc2005_suspend(struct device *dev)
return 0;
}
-static int tsc2005_resume(struct device *dev)
+static int __maybe_unused tsc2005_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct tsc2005 *ts = spi_get_drvdata(spi);
@@ -807,7 +806,6 @@ static int tsc2005_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(tsc2005_pm_ops, tsc2005_suspend, tsc2005_resume);
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index b46c55cd1bbb..c1e23cfc6155 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -406,8 +406,7 @@ static int ucb1400_ts_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int ucb1400_ts_suspend(struct device *dev)
+static int __maybe_unused ucb1400_ts_suspend(struct device *dev)
{
struct ucb1400_ts *ucb = dev_get_platdata(dev);
struct input_dev *idev = ucb->ts_idev;
@@ -421,7 +420,7 @@ static int ucb1400_ts_suspend(struct device *dev)
return 0;
}
-static int ucb1400_ts_resume(struct device *dev)
+static int __maybe_unused ucb1400_ts_resume(struct device *dev)
{
struct ucb1400_ts *ucb = dev_get_platdata(dev);
struct input_dev *idev = ucb->ts_idev;
@@ -434,7 +433,6 @@ static int ucb1400_ts_resume(struct device *dev)
mutex_unlock(&idev->mutex);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
ucb1400_ts_suspend, ucb1400_ts_resume);
@@ -444,7 +442,6 @@ static struct platform_driver ucb1400_ts_driver = {
.remove = ucb1400_ts_remove,
.driver = {
.name = "ucb1400_ts",
- .owner = THIS_MODULE,
.pm = &ucb1400_ts_pm_ops,
},
};
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index 003d0c3b5d08..da6004e97753 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -326,7 +326,6 @@ static struct platform_driver w90x900ts_driver = {
.remove = w90x900ts_remove,
.driver = {
.name = "nuc900-ts",
- .owner = THIS_MODULE,
},
};
module_platform_driver(w90x900ts_driver);
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 7ccaa1b12b05..32f8ac003936 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -242,8 +242,7 @@ static int wacom_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int wacom_i2c_suspend(struct device *dev)
+static int __maybe_unused wacom_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -252,7 +251,7 @@ static int wacom_i2c_suspend(struct device *dev)
return 0;
}
-static int wacom_i2c_resume(struct device *dev)
+static int __maybe_unused wacom_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -260,7 +259,6 @@ static int wacom_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(wacom_i2c_pm, wacom_i2c_suspend, wacom_i2c_resume);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 1b953a066b2c..1db0a1410929 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -392,7 +392,6 @@ static int wm831x_ts_remove(struct platform_device *pdev)
static struct platform_driver wm831x_ts_driver = {
.driver = {
.name = "wm831x-touch",
- .owner = THIS_MODULE,
},
.probe = wm831x_ts_probe,
.remove = wm831x_ts_remove,
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 8ba48f5eff7b..19880c7385e3 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -602,8 +602,7 @@ static void zforce_input_close(struct input_dev *dev)
return;
}
-#ifdef CONFIG_PM_SLEEP
-static int zforce_suspend(struct device *dev)
+static int __maybe_unused zforce_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct zforce_ts *ts = i2c_get_clientdata(client);
@@ -648,7 +647,7 @@ unlock:
return ret;
}
-static int zforce_resume(struct device *dev)
+static int __maybe_unused zforce_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct zforce_ts *ts = i2c_get_clientdata(client);
@@ -685,7 +684,6 @@ unlock:
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 6dbfbc209491..325188eef1c1 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -15,7 +15,7 @@ if IOMMU_SUPPORT
config OF_IOMMU
def_bool y
- depends on OF
+ depends on OF && IOMMU_API
config FSL_PAMU
bool "Freescale IOMMU support"
@@ -144,13 +144,26 @@ config OMAP_IOMMU
select IOMMU_API
config OMAP_IOMMU_DEBUG
- tristate "Export OMAP IOMMU internals in DebugFS"
- depends on OMAP_IOMMU && DEBUG_FS
- help
- Select this to see extensive information about
- the internal state of OMAP IOMMU in debugfs.
+ bool "Export OMAP IOMMU internals in DebugFS"
+ depends on OMAP_IOMMU && DEBUG_FS
+ ---help---
+ Select this to see extensive information about
+ the internal state of OMAP IOMMU in debugfs.
+
+ Say N unless you know you need this.
- Say N unless you know you need this.
+config ROCKCHIP_IOMMU
+ bool "Rockchip IOMMU Support"
+ depends on ARM
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMUs found on Rockchip rk32xx SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+ Say Y here if you are using a Rockchip SoC that includes an IOMMU
+ device.
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
@@ -174,7 +187,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS
+ depends on ARCH_EXYNOS && ARM
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 16edef74b8ee..7b976f294a69 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
-obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
+obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2d84c9edf3b8..b205f76d7129 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return true;
case IOMMU_CAP_INTR_REMAP:
return (irq_remapping_enabled == 1);
+ case IOMMU_CAP_NOEXEC:
+ return false;
}
return false;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 90d734bbf467..90f70d0e1141 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -92,13 +92,6 @@ static spinlock_t state_lock;
static struct workqueue_struct *iommu_wq;
-/*
- * Empty page table - Used between
- * mmu_notifier_invalidate_range_start and
- * mmu_notifier_invalidate_range_end
- */
-static u64 *empty_page_table;
-
static void free_pasid_states(struct device_state *dev_state);
static u16 device_id(struct pci_dev *pdev)
@@ -279,10 +272,8 @@ static void free_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state(struct pasid_state *pasid_state)
{
- if (atomic_dec_and_test(&pasid_state->count)) {
- put_device_state(pasid_state->device_state);
+ if (atomic_dec_and_test(&pasid_state->count))
wake_up(&pasid_state->wq);
- }
}
static void put_pasid_state_wait(struct pasid_state *pasid_state)
@@ -291,9 +282,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state)
prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
- if (atomic_dec_and_test(&pasid_state->count))
- put_device_state(pasid_state->device_state);
- else
+ if (!atomic_dec_and_test(&pasid_state->count))
schedule();
finish_wait(&pasid_state->wq, &wait);
@@ -418,46 +407,21 @@ static void mn_invalidate_page(struct mmu_notifier *mn,
__mn_flush_page(mn, address);
}
-static void mn_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- struct pasid_state *pasid_state;
- struct device_state *dev_state;
- unsigned long flags;
-
- pasid_state = mn_to_state(mn);
- dev_state = pasid_state->device_state;
-
- spin_lock_irqsave(&pasid_state->lock, flags);
- if (pasid_state->mmu_notifier_count == 0) {
- amd_iommu_domain_set_gcr3(dev_state->domain,
- pasid_state->pasid,
- __pa(empty_page_table));
- }
- pasid_state->mmu_notifier_count += 1;
- spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void mn_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static void mn_invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
- unsigned long flags;
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
- spin_lock_irqsave(&pasid_state->lock, flags);
- pasid_state->mmu_notifier_count -= 1;
- if (pasid_state->mmu_notifier_count == 0) {
- amd_iommu_domain_set_gcr3(dev_state->domain,
- pasid_state->pasid,
- __pa(pasid_state->mm->pgd));
- }
- spin_unlock_irqrestore(&pasid_state->lock, flags);
+ if ((start ^ (end - 1)) < PAGE_SIZE)
+ amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
+ start);
+ else
+ amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
}
static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -482,8 +446,7 @@ static struct mmu_notifier_ops iommu_mn = {
.release = mn_release,
.clear_flush_young = mn_clear_flush_young,
.invalidate_page = mn_invalidate_page,
- .invalidate_range_start = mn_invalidate_range_start,
- .invalidate_range_end = mn_invalidate_range_end,
+ .invalidate_range = mn_invalidate_range,
};
static void set_pri_tag_status(struct pasid_state *pasid_state,
@@ -513,45 +476,67 @@ static void finish_pri_tag(struct device_state *dev_state,
spin_unlock_irqrestore(&pasid_state->lock, flags);
}
+static void handle_fault_error(struct fault *fault)
+{
+ int status;
+
+ if (!fault->dev_state->inv_ppr_cb) {
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ return;
+ }
+
+ status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
+ fault->pasid,
+ fault->address,
+ fault->flags);
+ switch (status) {
+ case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
+ set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_INVALID:
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_FAIL:
+ set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
+ break;
+ default:
+ BUG();
+ }
+}
+
static void do_fault(struct work_struct *work)
{
struct fault *fault = container_of(work, struct fault, work);
- int npages, write;
- struct page *page;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ u64 address;
+ int ret, write;
write = !!(fault->flags & PPR_FAULT_WRITE);
- down_read(&fault->state->mm->mmap_sem);
- npages = get_user_pages(NULL, fault->state->mm,
- fault->address, 1, write, 0, &page, NULL);
- up_read(&fault->state->mm->mmap_sem);
-
- if (npages == 1) {
- put_page(page);
- } else if (fault->dev_state->inv_ppr_cb) {
- int status;
-
- status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
- fault->pasid,
- fault->address,
- fault->flags);
- switch (status) {
- case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
- set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
- break;
- case AMD_IOMMU_INV_PRI_RSP_INVALID:
- set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
- break;
- case AMD_IOMMU_INV_PRI_RSP_FAIL:
- set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
- break;
- default:
- BUG();
- }
- } else {
- set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ mm = fault->state->mm;
+ address = fault->address;
+
+ down_read(&mm->mmap_sem);
+ vma = find_extend_vma(mm, address);
+ if (!vma || address < vma->vm_start) {
+ /* failed to get a vma in the right range */
+ up_read(&mm->mmap_sem);
+ handle_fault_error(fault);
+ goto out;
+ }
+
+ ret = handle_mm_fault(mm, vma, address, write);
+ if (ret & VM_FAULT_ERROR) {
+ /* failed to service fault */
+ up_read(&mm->mmap_sem);
+ handle_fault_error(fault);
+ goto out;
}
+ up_read(&mm->mmap_sem);
+
+out:
finish_pri_tag(fault->dev_state, fault->state, fault->tag);
put_pasid_state(fault->state);
@@ -954,18 +939,10 @@ static int __init amd_iommu_v2_init(void)
if (iommu_wq == NULL)
goto out;
- ret = -ENOMEM;
- empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
- if (empty_page_table == NULL)
- goto out_destroy_wq;
-
amd_iommu_register_ppr_notifier(&ppr_nb);
return 0;
-out_destroy_wq:
- destroy_workqueue(iommu_wq);
-
out:
return ret;
}
@@ -999,8 +976,6 @@ static void __exit amd_iommu_v2_exit(void)
}
destroy_workqueue(iommu_wq);
-
- free_page((unsigned long)empty_page_table);
}
module_init(amd_iommu_v2_init);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e393ae01b5d2..6cd47b75286f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -404,9 +404,16 @@ struct arm_smmu_cfg {
#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
+enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
+};
+
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct arm_smmu_cfg cfg;
+ enum arm_smmu_domain_stage stage;
spinlock_t lock;
};
@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
- if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
+ /*
+ * Mapping the requested stage onto what we support is surprisingly
+ * complicated, mainly because the spec allows S1+S2 SMMUs without
+ * support for nested translation. That means we end up with the
+ * following table:
+ *
+ * Requested Supported Actual
+ * S1 N S1
+ * S1 S1+S2 S1
+ * S1 S2 S2
+ * S1 S1 S1
+ * N N N
+ * N S1+S2 S2
+ * N S2 S2
+ * N S1 S1
+ *
+ * Note that you can't actually request stage-2 mappings.
+ */
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+ start = smmu->num_s2_context_banks;
+ break;
+ case ARM_SMMU_DOMAIN_NESTED:
/*
* We will likely want to change this if/when KVM gets
* involved.
*/
- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
- start = smmu->num_s2_context_banks;
- } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) {
- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
- start = smmu->num_s2_context_banks;
- } else {
+ case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
}
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int prot, int stage)
{
pte_t *pte, *start;
- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
+ pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
pteval |= ARM_SMMU_PTE_MEMATTR_NC;
}
+ if (prot & IOMMU_NOEXEC)
+ pteval |= ARM_SMMU_PTE_XN;
+
/* If no access, create a faulting entry to avoid TLB fills */
- if (prot & IOMMU_EXEC)
- pteval &= ~ARM_SMMU_PTE_XN;
- else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap)
return true;
case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */
+ case IOMMU_CAP_NOEXEC:
+ return true;
default:
return false;
}
@@ -1644,21 +1681,57 @@ static void arm_smmu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
+static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu)
+ return -EPERM;
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
static const struct iommu_ops arm_smmu_ops = {
- .capable = arm_smmu_capable,
- .domain_init = arm_smmu_domain_init,
- .domain_destroy = arm_smmu_domain_destroy,
- .attach_dev = arm_smmu_attach_dev,
- .detach_dev = arm_smmu_detach_dev,
- .map = arm_smmu_map,
- .unmap = arm_smmu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
- .pgsize_bitmap = (SECTION_SIZE |
- ARM_SMMU_PTE_CONT_SIZE |
- PAGE_SIZE),
+ .capable = arm_smmu_capable,
+ .domain_init = arm_smmu_domain_init,
+ .domain_destroy = arm_smmu_domain_destroy,
+ .attach_dev = arm_smmu_attach_dev,
+ .detach_dev = arm_smmu_detach_dev,
+ .map = arm_smmu_map,
+ .unmap = arm_smmu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .add_device = arm_smmu_add_device,
+ .remove_device = arm_smmu_remove_device,
+ .domain_get_attr = arm_smmu_domain_get_attr,
+ .domain_set_attr = arm_smmu_domain_set_attr,
+ .pgsize_bitmap = (SECTION_SIZE |
+ ARM_SMMU_PTE_CONT_SIZE |
+ PAGE_SIZE),
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -2063,7 +2136,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
static struct platform_driver arm_smmu_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "arm-smmu",
.of_match_table = of_match_ptr(arm_smmu_of_match),
},
@@ -2073,8 +2145,20 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void)
{
+ struct device_node *np;
int ret;
+ /*
+ * Play nice with systems that don't have an ARM SMMU by checking that
+ * an ARM SMMU exists in the system before proceeding with the driver
+ * and IOMMU bus operation registration.
+ */
+ np = of_find_matching_node(NULL, arm_smmu_of_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
ret = platform_driver_register(&arm_smmu_driver);
if (ret)
return ret;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index c5c61cabd6e3..9847613085e1 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -44,6 +44,14 @@
#include "irq_remapping.h"
+typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
+struct dmar_res_callback {
+ dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
+ void *arg[ACPI_DMAR_TYPE_RESERVED];
+ bool ignore_unhandled;
+ bool print_entry;
+};
+
/*
* Assumptions:
* 1) The hotplug framework guarentees that DMAR unit will be hot-added
@@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1;
+static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
-static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
+static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
* add INCLUDE_ALL at the tail, so scan the list will find it at
@@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = {
.priority = INT_MIN,
};
+static struct dmar_drhd_unit *
+dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
+ if (dmaru->segment == drhd->segment &&
+ dmaru->reg_base_addr == drhd->address)
+ return dmaru;
+
+ return NULL;
+}
+
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
* present in the platform
*/
-static int __init
-dmar_parse_one_drhd(struct acpi_dmar_header *header)
+static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru;
int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *)header;
- dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
+ dmaru = dmar_find_dmaru(drhd);
+ if (dmaru)
+ goto out;
+
+ dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
if (!dmaru)
return -ENOMEM;
- dmaru->hdr = header;
+ /*
+ * If header is allocated from slab by ACPI _DSM method, we need to
+ * copy the content because the memory buffer will be freed on return.
+ */
+ dmaru->hdr = (void *)(dmaru + 1);
+ memcpy(dmaru->hdr, header, header->length);
dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
@@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return ret;
}
dmar_register_drhd_unit(dmaru);
+
+out:
+ if (arg)
+ (*(int *)arg)++;
+
return 0;
}
@@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
kfree(dmaru);
}
-static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
+static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
+ void *arg)
{
struct acpi_dmar_andd *andd = (void *)header;
@@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
}
#ifdef CONFIG_ACPI_NUMA
-static int __init
-dmar_parse_one_rhsa(struct acpi_dmar_header *header)
+static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_rhsa *rhsa;
struct dmar_drhd_unit *drhd;
@@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
return 0;
}
+#else
+#define dmar_parse_one_rhsa dmar_res_noop
#endif
static void __init
@@ -503,6 +540,52 @@ static int __init dmar_table_detect(void)
return (ACPI_SUCCESS(status) ? 1 : 0);
}
+static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
+ size_t len, struct dmar_res_callback *cb)
+{
+ int ret = 0;
+ struct acpi_dmar_header *iter, *next;
+ struct acpi_dmar_header *end = ((void *)start) + len;
+
+ for (iter = start; iter < end && ret == 0; iter = next) {
+ next = (void *)iter + iter->length;
+ if (iter->length == 0) {
+ /* Avoid looping forever on bad ACPI tables */
+ pr_debug(FW_BUG "Invalid 0-length structure\n");
+ break;
+ } else if (next > end) {
+ /* Avoid passing table end */
+ pr_warn(FW_BUG "record passes table end\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (cb->print_entry)
+ dmar_table_print_dmar_entry(iter);
+
+ if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
+ /* continue for forward compatibility */
+ pr_debug("Unknown DMAR structure type %d\n",
+ iter->type);
+ } else if (cb->cb[iter->type]) {
+ ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
+ } else if (!cb->ignore_unhandled) {
+ pr_warn("No handler for DMAR structure type %d\n",
+ iter->type);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
+ struct dmar_res_callback *cb)
+{
+ return dmar_walk_remapping_entries((void *)(dmar + 1),
+ dmar->header.length - sizeof(*dmar), cb);
+}
+
/**
* parse_dmar_table - parses the DMA reporting table
*/
@@ -510,9 +593,18 @@ static int __init
parse_dmar_table(void)
{
struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
int ret = 0;
int drhd_count = 0;
+ struct dmar_res_callback cb = {
+ .print_entry = true,
+ .ignore_unhandled = true,
+ .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
+ .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
+ .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
+ .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
+ .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
+ .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
+ };
/*
* Do it again, earlier dmar_tbl mapping could be mapped with
@@ -536,51 +628,10 @@ parse_dmar_table(void)
}
pr_info("Host address width %d\n", dmar->width + 1);
-
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- pr_warn("Invalid 0-length structure\n");
- ret = -EINVAL;
- break;
- }
-
- dmar_table_print_dmar_entry(entry_header);
-
- switch (entry_header->type) {
- case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd_count++;
- ret = dmar_parse_one_drhd(entry_header);
- break;
- case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- ret = dmar_parse_one_rmrr(entry_header);
- break;
- case ACPI_DMAR_TYPE_ROOT_ATS:
- ret = dmar_parse_one_atsr(entry_header);
- break;
- case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
-#ifdef CONFIG_ACPI_NUMA
- ret = dmar_parse_one_rhsa(entry_header);
-#endif
- break;
- case ACPI_DMAR_TYPE_NAMESPACE:
- ret = dmar_parse_one_andd(entry_header);
- break;
- default:
- pr_warn("Unknown DMAR structure type %d\n",
- entry_header->type);
- ret = 0; /* for forward compatibility */
- break;
- }
- if (ret)
- break;
-
- entry_header = ((void *)entry_header + entry_header->length);
- }
- if (drhd_count == 0)
+ ret = dmar_walk_dmar_table(dmar, &cb);
+ if (ret == 0 && drhd_count == 0)
pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
+
return ret;
}
@@ -778,76 +829,68 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}
-static int __init check_zero_address(void)
+static int __ref
+dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
{
- struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
struct acpi_dmar_hardware_unit *drhd;
+ void __iomem *addr;
+ u64 cap, ecap;
- dmar = (struct acpi_table_dmar *)dmar_tbl;
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
-
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- pr_warn("Invalid 0-length structure\n");
- return 0;
- }
+ drhd = (void *)entry;
+ if (!drhd->address) {
+ warn_invalid_dmar(0, "");
+ return -EINVAL;
+ }
- if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
- void __iomem *addr;
- u64 cap, ecap;
+ if (arg)
+ addr = ioremap(drhd->address, VTD_PAGE_SIZE);
+ else
+ addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
+ if (!addr) {
+ pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
+ return -EINVAL;
+ }
- drhd = (void *)entry_header;
- if (!drhd->address) {
- warn_invalid_dmar(0, "");
- goto failed;
- }
+ cap = dmar_readq(addr + DMAR_CAP_REG);
+ ecap = dmar_readq(addr + DMAR_ECAP_REG);
- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
- if (!addr ) {
- printk("IOMMU: can't validate: %llx\n", drhd->address);
- goto failed;
- }
- cap = dmar_readq(addr + DMAR_CAP_REG);
- ecap = dmar_readq(addr + DMAR_ECAP_REG);
- early_iounmap(addr, VTD_PAGE_SIZE);
- if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->address,
- " returns all ones");
- goto failed;
- }
- }
+ if (arg)
+ iounmap(addr);
+ else
+ early_iounmap(addr, VTD_PAGE_SIZE);
- entry_header = ((void *)entry_header + entry_header->length);
+ if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
+ warn_invalid_dmar(drhd->address, " returns all ones");
+ return -EINVAL;
}
- return 1;
-failed:
return 0;
}
int __init detect_intel_iommu(void)
{
int ret;
+ struct dmar_res_callback validate_drhd_cb = {
+ .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
+ .ignore_unhandled = true,
+ };
down_write(&dmar_global_lock);
ret = dmar_table_detect();
if (ret)
- ret = check_zero_address();
- {
- if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
- iommu_detected = 1;
- /* Make sure ACS will be enabled */
- pci_request_acs();
- }
+ ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
+ &validate_drhd_cb);
+ if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ iommu_detected = 1;
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+ }
#ifdef CONFIG_X86
- if (ret)
- x86_init.iommu.iommu_init = intel_iommu_init;
+ if (ret)
+ x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- }
+
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
up_write(&dmar_global_lock);
@@ -931,11 +974,32 @@ out:
return err;
}
+static int dmar_alloc_seq_id(struct intel_iommu *iommu)
+{
+ iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
+ DMAR_UNITS_SUPPORTED);
+ if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
+ iommu->seq_id = -1;
+ } else {
+ set_bit(iommu->seq_id, dmar_seq_ids);
+ sprintf(iommu->name, "dmar%d", iommu->seq_id);
+ }
+
+ return iommu->seq_id;
+}
+
+static void dmar_free_seq_id(struct intel_iommu *iommu)
+{
+ if (iommu->seq_id >= 0) {
+ clear_bit(iommu->seq_id, dmar_seq_ids);
+ iommu->seq_id = -1;
+ }
+}
+
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
- static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
int err;
@@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;
- iommu->seq_id = iommu_allocated++;
- sprintf (iommu->name, "dmar%d", iommu->seq_id);
+ if (dmar_alloc_seq_id(iommu) < 0) {
+ pr_err("IOMMU: failed to allocate seq_id\n");
+ err = -ENOSPC;
+ goto error;
+ }
err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
pr_err("IOMMU: failed to map %s\n", iommu->name);
- goto error;
+ goto error_free_seq_id;
}
err = -EINVAL;
@@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
- err_unmap:
+err_unmap:
unmap_iommu(iommu);
- error:
+error_free_seq_id:
+ dmar_free_seq_id(iommu);
+error:
kfree(iommu);
return err;
}
@@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
+ dmar_free_seq_id(iommu);
kfree(iommu);
}
@@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void)
return dmar->flags & 0x1;
}
+/* Check whether DMAR units are in use */
+static inline bool dmar_in_use(void)
+{
+ return irq_remapping_enabled || intel_iommu_enabled;
+}
+
static int __init dmar_free_unused_resources(void)
{
struct dmar_drhd_unit *dmaru, *dmaru_n;
- /* DMAR units are in use */
- if (irq_remapping_enabled || intel_iommu_enabled)
+ if (dmar_in_use())
return 0;
if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
@@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void)
late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
+
+/*
+ * DMAR Hotplug Support
+ * For more details, please refer to Intel(R) Virtualization Technology
+ * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
+ * "Remapping Hardware Unit Hot Plug".
+ */
+static u8 dmar_hp_uuid[] = {
+ /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
+ /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
+};
+
+/*
+ * Currently there's only one revision and BIOS will not check the revision id,
+ * so use 0 for safety.
+ */
+#define DMAR_DSM_REV_ID 0
+#define DMAR_DSM_FUNC_DRHD 1
+#define DMAR_DSM_FUNC_ATSR 2
+#define DMAR_DSM_FUNC_RHSA 3
+
+static inline bool dmar_detect_dsm(acpi_handle handle, int func)
+{
+ return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
+}
+
+static int dmar_walk_dsm_resource(acpi_handle handle, int func,
+ dmar_res_handler_t handler, void *arg)
+{
+ int ret = -ENODEV;
+ union acpi_object *obj;
+ struct acpi_dmar_header *start;
+ struct dmar_res_callback callback;
+ static int res_type[] = {
+ [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
+ [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
+ [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
+ };
+
+ if (!dmar_detect_dsm(handle, func))
+ return 0;
+
+ obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
+ func, NULL, ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -ENODEV;
+
+ memset(&callback, 0, sizeof(callback));
+ callback.cb[res_type[func]] = handler;
+ callback.arg[res_type[func]] = arg;
+ start = (struct acpi_dmar_header *)obj->buffer.pointer;
+ ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
+
+ ACPI_FREE(obj);
+
+ return ret;
+}
+
+static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ int ret;
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (!dmaru)
+ return -ENODEV;
+
+ ret = dmar_ir_hotplug(dmaru, true);
+ if (ret == 0)
+ ret = dmar_iommu_hotplug(dmaru, true);
+
+ return ret;
+}
+
+static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ int i, ret;
+ struct device *dev;
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (!dmaru)
+ return 0;
+
+ /*
+ * All PCI devices managed by this unit should have been destroyed.
+ */
+ if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
+ for_each_active_dev_scope(dmaru->devices,
+ dmaru->devices_cnt, i, dev)
+ return -EBUSY;
+
+ ret = dmar_ir_hotplug(dmaru, false);
+ if (ret == 0)
+ ret = dmar_iommu_hotplug(dmaru, false);
+
+ return ret;
+}
+
+static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (dmaru) {
+ list_del_rcu(&dmaru->list);
+ synchronize_rcu();
+ dmar_free_drhd(dmaru);
+ }
+
+ return 0;
+}
+
+static int dmar_hotplug_insert(acpi_handle handle)
+{
+ int ret;
+ int drhd_count = 0;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_validate_one_drhd, (void *)1);
+ if (ret)
+ goto out;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_parse_one_drhd, (void *)&drhd_count);
+ if (ret == 0 && drhd_count == 0) {
+ pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
+ goto out;
+ } else if (ret) {
+ goto release_drhd;
+ }
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
+ &dmar_parse_one_rhsa, NULL);
+ if (ret)
+ goto release_drhd;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_parse_one_atsr, NULL);
+ if (ret)
+ goto release_atsr;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_add_drhd, NULL);
+ if (!ret)
+ return 0;
+
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_remove_drhd, NULL);
+release_atsr:
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_release_one_atsr, NULL);
+release_drhd:
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_release_drhd, NULL);
+out:
+ return ret;
+}
+
+static int dmar_hotplug_remove(acpi_handle handle)
+{
+ int ret;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_check_one_atsr, NULL);
+ if (ret)
+ return ret;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_remove_drhd, NULL);
+ if (ret == 0) {
+ WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_release_one_atsr, NULL));
+ WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_release_drhd, NULL));
+ } else {
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_add_drhd, NULL);
+ }
+
+ return ret;
+}
+
+static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
+ void *context, void **retval)
+{
+ acpi_handle *phdl = retval;
+
+ if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+ *phdl = handle;
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+static int dmar_device_hotplug(acpi_handle handle, bool insert)
+{
+ int ret;
+ acpi_handle tmp = NULL;
+ acpi_status status;
+
+ if (!dmar_in_use())
+ return 0;
+
+ if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+ tmp = handle;
+ } else {
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX,
+ dmar_get_dsm_handle,
+ NULL, NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("Failed to locate _DSM method.\n");
+ return -ENXIO;
+ }
+ }
+ if (tmp == NULL)
+ return 0;
+
+ down_write(&dmar_global_lock);
+ if (insert)
+ ret = dmar_hotplug_insert(tmp);
+ else
+ ret = dmar_hotplug_remove(tmp);
+ up_write(&dmar_global_lock);
+
+ return ret;
+}
+
+int dmar_device_add(acpi_handle handle)
+{
+ return dmar_device_hotplug(handle, true);
+}
+
+int dmar_device_remove(acpi_handle handle)
+{
+ return dmar_device_hotplug(handle, false);
+}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 28372b85d8da..7ce52737c7a1 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -684,7 +684,6 @@ static const struct of_device_id sysmmu_of_match[] __initconst = {
static struct platform_driver exynos_sysmmu_driver __refdata = {
.probe = exynos_sysmmu_probe,
.driver = {
- .owner = THIS_MODULE,
.name = "exynos-sysmmu",
.of_match_table = sysmmu_of_match,
}
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 2b6ce9387af1..80ac68d884c5 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -1227,7 +1227,6 @@ static const struct of_device_id fsl_of_pamu_ids[] = {
static struct platform_driver fsl_of_pamu_driver = {
.driver = {
.name = "fsl-of-pamu",
- .owner = THIS_MODULE,
},
.probe = fsl_pamu_probe,
};
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 02cd26a17fe0..1232336b960e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root)
}
static inline void set_root_value(struct root_entry *root, unsigned long value)
{
+ root->val &= ~VTD_PAGE_MASK;
root->val |= value & VTD_PAGE_MASK;
}
@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context,
static inline void context_set_address_root(struct context_entry *context,
unsigned long value)
{
+ context->lo &= ~VTD_PAGE_MASK;
context->lo |= value & VTD_PAGE_MASK;
}
@@ -328,17 +330,10 @@ static int hw_pass_through = 1;
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
-/* define the limit of IOMMUs supported in each domain */
-#ifdef CONFIG_X86
-# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define IOMMU_UNITS_SUPPORTED 64
-#endif
-
struct dmar_domain {
int id; /* domain id */
int nid; /* node id */
- DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
+ DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */
@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
- if (!root)
+ if (!root) {
+ pr_err("IOMMU: allocating root entry for %s failed\n",
+ iommu->name);
return -ENOMEM;
+ }
__iommu_flush_cache(iommu, root, ROOT_SIZE);
@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return 0;
}
-static void free_dmar_iommu(struct intel_iommu *iommu)
+static void disable_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
int i;
@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
+}
- kfree(iommu->domains);
- kfree(iommu->domain_ids);
- iommu->domains = NULL;
- iommu->domain_ids = NULL;
+static void free_dmar_iommu(struct intel_iommu *iommu)
+{
+ if ((iommu->domains) && (iommu->domain_ids)) {
+ kfree(iommu->domains);
+ kfree(iommu->domain_ids);
+ iommu->domains = NULL;
+ iommu->domain_ids = NULL;
+ }
g_iommus[iommu->seq_id] = NULL;
@@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
{
struct dma_pte *first_pte = NULL, *pte = NULL;
phys_addr_t uninitialized_var(pteval);
- unsigned long sg_res;
+ unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
@@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
- if (sg)
- sg_res = 0;
- else {
- sg_res = nr_pages + 1;
+ if (!sg) {
+ sg_res = nr_pages;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
@@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
+static void intel_iommu_init_qi(struct intel_iommu *iommu)
+{
+ /*
+ * Start from the sane iommu hardware state.
+ * If the queued invalidation is already initialized by us
+ * (for example, while enabling interrupt-remapping) then
+ * we got the things already rolling from a sane state.
+ */
+ if (!iommu->qi) {
+ /*
+ * Clear any previous faults.
+ */
+ dmar_fault(-1, iommu);
+ /*
+ * Disable queued invalidation if supported and already enabled
+ * before OS handover.
+ */
+ dmar_disable_qi(iommu);
+ }
+
+ if (dmar_enable_qi(iommu)) {
+ /*
+ * Queued Invalidate not enabled, use Register Based Invalidate
+ */
+ iommu->flush.flush_context = __iommu_flush_context;
+ iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+ pr_info("IOMMU: %s using Register based invalidation\n",
+ iommu->name);
+ } else {
+ iommu->flush.flush_context = qi_flush_context;
+ iommu->flush.flush_iotlb = qi_flush_iotlb;
+ pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
+ }
+}
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -2728,14 +2764,18 @@ static int __init init_dmars(void)
* threaded kernel __init code path all other access are read
* only
*/
- if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
+ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
g_num_of_iommus++;
continue;
}
printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
- IOMMU_UNITS_SUPPORTED);
+ DMAR_UNITS_SUPPORTED);
}
+ /* Preallocate enough resources for IOMMU hot-addition */
+ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
+ g_num_of_iommus = DMAR_UNITS_SUPPORTED;
+
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL);
if (!g_iommus) {
@@ -2764,58 +2804,14 @@ static int __init init_dmars(void)
* among all IOMMU's. Need to Split it later.
*/
ret = iommu_alloc_root_entry(iommu);
- if (ret) {
- printk(KERN_ERR "IOMMU: allocate root entry failed\n");
+ if (ret)
goto free_iommu;
- }
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
}
- /*
- * Start from the sane iommu hardware state.
- */
- for_each_active_iommu(iommu, drhd) {
- /*
- * If the queued invalidation is already initialized by us
- * (for example, while enabling interrupt-remapping) then
- * we got the things already rolling from a sane state.
- */
- if (iommu->qi)
- continue;
-
- /*
- * Clear any previous faults.
- */
- dmar_fault(-1, iommu);
- /*
- * Disable queued invalidation if supported and already enabled
- * before OS handover.
- */
- dmar_disable_qi(iommu);
- }
-
- for_each_active_iommu(iommu, drhd) {
- if (dmar_enable_qi(iommu)) {
- /*
- * Queued Invalidate not enabled, use Register Based
- * Invalidate
- */
- iommu->flush.flush_context = __iommu_flush_context;
- iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- } else {
- iommu->flush.flush_context = qi_flush_context;
- iommu->flush.flush_iotlb = qi_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- }
- }
+ for_each_active_iommu(iommu, drhd)
+ intel_iommu_init_qi(iommu);
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
@@ -2901,8 +2897,10 @@ static int __init init_dmars(void)
return 0;
free_iommu:
- for_each_active_iommu(iommu, drhd)
+ for_each_active_iommu(iommu, drhd) {
+ disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
+ }
kfree(deferred_flush);
free_g_iommus:
kfree(g_iommus);
@@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
-int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
@@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
return 0;
}
-int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
+{
+ struct dmar_atsr_unit *atsru;
+ struct acpi_dmar_atsr *tmp;
+
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+ tmp = (struct acpi_dmar_atsr *)atsru->hdr;
+ if (atsr->segment != tmp->segment)
+ continue;
+ if (atsr->header.length != tmp->header.length)
+ continue;
+ if (memcmp(atsr, tmp, atsr->header.length) == 0)
+ return atsru;
+ }
+
+ return NULL;
+}
+
+int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
+ if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
+ return 0;
+
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ atsru = dmar_find_atsr(atsr);
+ if (atsru)
+ return 0;
+
+ atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
if (!atsru)
return -ENOMEM;
- atsru->hdr = hdr;
+ /*
+ * If memory is allocated from slab by ACPI _DSM method, we need to
+ * copy the memory content because the memory buffer will be freed
+ * on return.
+ */
+ atsru->hdr = (void *)(atsru + 1);
+ memcpy(atsru->hdr, hdr, hdr->length);
atsru->include_all = atsr->flags & 0x1;
if (!atsru->include_all) {
atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
@@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
kfree(atsru);
}
+int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = dmar_find_atsr(atsr);
+ if (atsru) {
+ list_del_rcu(&atsru->list);
+ synchronize_rcu();
+ intel_iommu_free_atsr(atsru);
+ }
+
+ return 0;
+}
+
+int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+ int i;
+ struct device *dev;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = dmar_find_atsr(atsr);
+ if (!atsru)
+ return 0;
+
+ if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
+ for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
+ i, dev)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
+{
+ int sp, ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (g_iommus[iommu->seq_id])
+ return 0;
+
+ if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
+ pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+ if (!ecap_sc_support(iommu->ecap) &&
+ domain_update_iommu_snooping(iommu)) {
+ pr_warn("IOMMU: %s doesn't support snooping.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+ sp = domain_update_iommu_superpage(iommu) - 1;
+ if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
+ pr_warn("IOMMU: %s doesn't support large page.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+
+ /*
+ * Disable translation if already enabled prior to OS handover.
+ */
+ if (iommu->gcmd & DMA_GCMD_TE)
+ iommu_disable_translation(iommu);
+
+ g_iommus[iommu->seq_id] = iommu;
+ ret = iommu_init_domains(iommu);
+ if (ret == 0)
+ ret = iommu_alloc_root_entry(iommu);
+ if (ret)
+ goto out;
+
+ if (dmaru->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(iommu);
+ return 0;
+ }
+
+ intel_iommu_init_qi(iommu);
+ iommu_flush_write_buffer(iommu);
+ ret = dmar_set_interrupt(iommu);
+ if (ret)
+ goto disable_iommu;
+
+ iommu_set_root_entry(iommu);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ iommu_enable_translation(iommu);
+
+ if (si_domain) {
+ ret = iommu_attach_domain(si_domain, iommu);
+ if (ret < 0 || si_domain->id != ret)
+ goto disable_iommu;
+ domain_attach_iommu(si_domain, iommu);
+ }
+
+ iommu_disable_protect_mem_regions(iommu);
+ return 0;
+
+disable_iommu:
+ disable_dmar_iommu(iommu);
+out:
+ free_dmar_iommu(iommu);
+ return ret;
+}
+
+int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (!intel_iommu_enabled)
+ return 0;
+ if (iommu == NULL)
+ return -EINVAL;
+
+ if (insert) {
+ ret = intel_iommu_add(dmaru);
+ } else {
+ disable_dmar_iommu(iommu);
+ free_dmar_iommu(iommu);
+ }
+
+ return ret;
+}
+
static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 7c80661b35c1..27541d440849 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -36,7 +36,6 @@ struct hpet_scope {
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
-static int ir_ioapic_num, ir_hpet_num;
/*
* Lock ordering:
@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
int i;
for (i = 0; i < MAX_HPET_TBS; i++)
- if (ir_hpet[i].id == hpet_id)
+ if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
return ir_hpet[i].iommu;
return NULL;
}
@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic)
int i;
for (i = 0; i < MAX_IO_APICS; i++)
- if (ir_ioapic[i].id == apic)
+ if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
return ir_ioapic[i].iommu;
return NULL;
}
@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) {
- if (ir_ioapic[i].id == apic) {
+ if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) {
- if (ir_hpet[i].id == id) {
+ if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break;
}
@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-
-static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
+static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
struct ir_table *ir_table;
struct page *pages;
unsigned long *bitmap;
- ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
- GFP_ATOMIC);
+ if (iommu->ir_table)
+ return 0;
- if (!iommu->ir_table)
+ ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
+ if (!ir_table)
return -ENOMEM;
pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
- kfree(iommu->ir_table);
- return -ENOMEM;
+ goto out_free_table;
}
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
- __free_pages(pages, INTR_REMAP_PAGE_ORDER);
- kfree(ir_table);
- return -ENOMEM;
+ goto out_free_pages;
}
ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
-
- iommu_set_irq_remapping(iommu, mode);
+ iommu->ir_table = ir_table;
return 0;
+
+out_free_pages:
+ __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+out_free_table:
+ kfree(ir_table);
+ return -ENOMEM;
+}
+
+static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
+{
+ if (iommu && iommu->ir_table) {
+ free_pages((unsigned long)iommu->ir_table->base,
+ INTR_REMAP_PAGE_ORDER);
+ kfree(iommu->ir_table->bitmap);
+ kfree(iommu->ir_table);
+ iommu->ir_table = NULL;
+ }
}
/*
@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void)
if (!ecap_ir_support(iommu->ecap))
continue;
- if (intel_setup_irq_remapping(iommu, eim))
+ if (intel_setup_irq_remapping(iommu))
goto error;
+ iommu_set_irq_remapping(iommu, eim);
setup = 1;
}
@@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void)
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error:
- /*
- * handle error condition gracefully here!
- */
+ for_each_iommu(iommu, drhd)
+ if (ecap_ir_support(iommu->ecap)) {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ }
if (x2apic_present)
pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
@@ -699,12 +714,13 @@ error:
return -1;
}
-static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
+static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu,
+ struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
- int count;
+ int count, free = -1;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
PCI_SECONDARY_BUS);
path++;
}
- ir_hpet[ir_hpet_num].bus = bus;
- ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
- ir_hpet[ir_hpet_num].iommu = iommu;
- ir_hpet[ir_hpet_num].id = scope->enumeration_id;
- ir_hpet_num++;
+
+ for (count = 0; count < MAX_HPET_TBS; count++) {
+ if (ir_hpet[count].iommu == iommu &&
+ ir_hpet[count].id == scope->enumeration_id)
+ return 0;
+ else if (ir_hpet[count].iommu == NULL && free == -1)
+ free = count;
+ }
+ if (free == -1) {
+ pr_warn("Exceeded Max HPET blocks\n");
+ return -ENOSPC;
+ }
+
+ ir_hpet[free].iommu = iommu;
+ ir_hpet[free].id = scope->enumeration_id;
+ ir_hpet[free].bus = bus;
+ ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
+ pr_info("HPET id %d under DRHD base 0x%Lx\n",
+ scope->enumeration_id, drhd->address);
+
+ return 0;
}
-static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
+static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu,
+ struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
- int count;
+ int count, free = -1;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
path++;
}
- ir_ioapic[ir_ioapic_num].bus = bus;
- ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
- ir_ioapic[ir_ioapic_num].iommu = iommu;
- ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
- ir_ioapic_num++;
+ for (count = 0; count < MAX_IO_APICS; count++) {
+ if (ir_ioapic[count].iommu == iommu &&
+ ir_ioapic[count].id == scope->enumeration_id)
+ return 0;
+ else if (ir_ioapic[count].iommu == NULL && free == -1)
+ free = count;
+ }
+ if (free == -1) {
+ pr_warn("Exceeded Max IO APICS\n");
+ return -ENOSPC;
+ }
+
+ ir_ioapic[free].bus = bus;
+ ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
+ ir_ioapic[free].iommu = iommu;
+ ir_ioapic[free].id = scope->enumeration_id;
+ pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
+ scope->enumeration_id, drhd->address, iommu->seq_id);
+
+ return 0;
}
static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
+ int ret = 0;
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_device_scope *scope;
void *start, *end;
drhd = (struct acpi_dmar_hardware_unit *)header;
-
start = (void *)(drhd + 1);
end = ((void *)drhd) + header->length;
- while (start < end) {
+ while (start < end && ret == 0) {
scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- if (ir_ioapic_num == MAX_IO_APICS) {
- printk(KERN_WARNING "Exceeded Max IO APICS\n");
- return -1;
- }
-
- printk(KERN_INFO "IOAPIC id %d under DRHD base "
- " 0x%Lx IOMMU %d\n", scope->enumeration_id,
- drhd->address, iommu->seq_id);
+ if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
+ ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
+ else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
+ ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
+ start += scope->length;
+ }
- ir_parse_one_ioapic_scope(scope, iommu);
- } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
- if (ir_hpet_num == MAX_HPET_TBS) {
- printk(KERN_WARNING "Exceeded Max HPET blocks\n");
- return -1;
- }
+ return ret;
+}
- printk(KERN_INFO "HPET id %d under DRHD base"
- " 0x%Lx\n", scope->enumeration_id,
- drhd->address);
+static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
+{
+ int i;
- ir_parse_one_hpet_scope(scope, iommu);
- }
- start += scope->length;
- }
+ for (i = 0; i < MAX_HPET_TBS; i++)
+ if (ir_hpet[i].iommu == iommu)
+ ir_hpet[i].iommu = NULL;
- return 0;
+ for (i = 0; i < MAX_IO_APICS; i++)
+ if (ir_ioapic[i].iommu == iommu)
+ ir_ioapic[i].iommu = NULL;
}
/*
@@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = {
.msi_setup_irq = intel_msi_setup_irq,
.alloc_hpet_msi = intel_alloc_hpet_msi,
};
+
+/*
+ * Support of Interrupt Remapping Unit Hotplug
+ */
+static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
+{
+ int ret;
+ int eim = x2apic_enabled();
+
+ if (eim && !ecap_eim_support(iommu->ecap)) {
+ pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
+ iommu->reg_phys, iommu->ecap);
+ return -ENODEV;
+ }
+
+ if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
+ pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
+ iommu->reg_phys);
+ return -ENODEV;
+ }
+
+ /* TODO: check all IOAPICs are covered by IOMMU */
+
+ /* Setup Interrupt-remapping now. */
+ ret = intel_setup_irq_remapping(iommu);
+ if (ret) {
+ pr_err("DRHD %Lx: failed to allocate resource\n",
+ iommu->reg_phys);
+ ir_remove_ioapic_hpet_scope(iommu);
+ return ret;
+ }
+
+ if (!iommu->qi) {
+ /* Clear previous faults. */
+ dmar_fault(-1, iommu);
+ iommu_disable_irq_remapping(iommu);
+ dmar_disable_qi(iommu);
+ }
+
+ /* Enable queued invalidation */
+ ret = dmar_enable_qi(iommu);
+ if (!ret) {
+ iommu_set_irq_remapping(iommu, eim);
+ } else {
+ pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
+ iommu->reg_phys, iommu->ecap, ret);
+ intel_teardown_irq_remapping(iommu);
+ ir_remove_ioapic_hpet_scope(iommu);
+ }
+
+ return ret;
+}
+
+int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (!irq_remapping_enabled)
+ return 0;
+ if (iommu == NULL)
+ return -EINVAL;
+ if (!ecap_ir_support(iommu->ecap))
+ return 0;
+
+ if (insert) {
+ if (!iommu->ir_table)
+ ret = dmar_ir_add(dmaru, iommu);
+ } else {
+ if (iommu->ir_table) {
+ if (!bitmap_empty(iommu->ir_table->bitmap,
+ INTR_REMAP_TABLE_ENTRIES)) {
+ ret = -EBUSY;
+ } else {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ ir_remove_ioapic_hpet_scope(iommu);
+ }
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 02e4313e937c..f7718d73e984 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data)
const struct iommu_ops *ops = cb->ops;
if (!ops->add_device)
- return -ENODEV;
+ return 0;
WARN_ON(dev->iommu_group);
@@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
{
struct scatterlist *s;
size_t mapped = 0;
- unsigned int i;
+ unsigned int i, min_pagesz;
int ret;
- for_each_sg(sg, s, nents, i) {
- phys_addr_t phys = page_to_phys(sg_page(s));
+ if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+ return 0;
- /* We are mapping on page boundarys, so offset must be 0 */
- if (s->offset)
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+
+ /*
+ * We are mapping on IOMMU page boundaries, so offset within
+ * the page must be 0. However, the IOMMU may support pages
+ * smaller than PAGE_SIZE, so s->offset may still represent
+ * an offset of that boundary within the CPU page.
+ */
+ if (!IS_ALIGNED(s->offset, min_pagesz))
goto out_err;
ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index e509c58eee92..68dfb0fd5ee9 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_name(&pdev->dev), mmu);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
- return irq;
+ return ret;
}
ipmmu_device_reset(mmu);
@@ -1222,7 +1222,6 @@ static int ipmmu_remove(struct platform_device *pdev)
static struct platform_driver ipmmu_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ipmmu-vmsa",
},
.probe = ipmmu_probe,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 1c7b78ecf3e3..e1b05379ca0e 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -73,8 +73,7 @@ fail:
static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{
- if (drvdata->clk)
- clk_disable(drvdata->clk);
+ clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
}
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 61def7cb5263..b6d01f97e537 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
struct clk *iommu_clk;
struct clk *iommu_pclk;
struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
+ struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
void __iomem *regs_base;
int ret, irq, par;
@@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
- if (iommu_clk)
- clk_disable(iommu_clk);
+ clk_disable(iommu_clk);
clk_disable(iommu_pclk);
@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev)
static int msm_iommu_ctx_probe(struct platform_device *pdev)
{
- struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
+ struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int i, ret;
@@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev)
SET_NSCFG(drvdata->base, mid, 3);
}
- if (drvdata->clk)
- clk_disable(drvdata->clk);
+ clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e550ccb7634e..af1dc6a1c0a1 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -18,9 +18,14 @@
*/
#include <linux/export.h>
+#include <linux/iommu.h>
#include <linux/limits.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
+#include <linux/slab.h>
+
+static const struct of_device_id __iommu_of_table_sentinel
+ __used __section(__iommu_of_table_end);
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
@@ -89,3 +94,87 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
return 0;
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
+
+struct of_iommu_node {
+ struct list_head list;
+ struct device_node *np;
+ struct iommu_ops *ops;
+};
+static LIST_HEAD(of_iommu_list);
+static DEFINE_SPINLOCK(of_iommu_lock);
+
+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
+{
+ struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+
+ if (WARN_ON(!iommu))
+ return;
+
+ INIT_LIST_HEAD(&iommu->list);
+ iommu->np = np;
+ iommu->ops = ops;
+ spin_lock(&of_iommu_lock);
+ list_add_tail(&iommu->list, &of_iommu_list);
+ spin_unlock(&of_iommu_lock);
+}
+
+struct iommu_ops *of_iommu_get_ops(struct device_node *np)
+{
+ struct of_iommu_node *node;
+ struct iommu_ops *ops = NULL;
+
+ spin_lock(&of_iommu_lock);
+ list_for_each_entry(node, &of_iommu_list, list)
+ if (node->np == np) {
+ ops = node->ops;
+ break;
+ }
+ spin_unlock(&of_iommu_lock);
+ return ops;
+}
+
+struct iommu_ops *of_iommu_configure(struct device *dev)
+{
+ struct of_phandle_args iommu_spec;
+ struct device_node *np;
+ struct iommu_ops *ops = NULL;
+ int idx = 0;
+
+ /*
+ * We don't currently walk up the tree looking for a parent IOMMU.
+ * See the `Notes:' section of
+ * Documentation/devicetree/bindings/iommu/iommu.txt
+ */
+ while (!of_parse_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells", idx,
+ &iommu_spec)) {
+ np = iommu_spec.np;
+ ops = of_iommu_get_ops(np);
+
+ if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ goto err_put_node;
+
+ of_node_put(np);
+ idx++;
+ }
+
+ return ops;
+
+err_put_node:
+ of_node_put(np);
+ return NULL;
+}
+
+void __init of_iommu_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match, *matches = &__iommu_of_table;
+
+ for_each_matching_node_and_match(np, matches, &match) {
+ const of_iommu_init_fn init_fn = match->data;
+
+ if (init_fn(np))
+ pr_err("Failed to initialise IOMMU %s\n",
+ of_node_full_name(np));
+ }
+}
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 531658d17333..f3d20a2039d2 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -10,45 +10,35 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/platform_device.h>
#include <linux/debugfs.h>
-#include <linux/omap-iommu.h>
#include <linux/platform_data/iommu-omap.h>
#include "omap-iopgtable.h"
#include "omap-iommu.h"
-#define MAXCOLUMN 100 /* for short messages */
-
static DEFINE_MUTEX(iommu_debug_lock);
static struct dentry *iommu_debug_root;
-static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
{
- u32 ver = omap_iommu_arch_version();
- char buf[MAXCOLUMN], *p = buf;
-
- p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ return !obj->domain;
}
static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ struct omap_iommu *obj = file->private_data;
char *p, *buf;
ssize_t bytes;
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ struct omap_iommu *obj = file->private_data;
char *p, *buf;
ssize_t bytes, rest;
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
return bytes;
}
-static ssize_t debug_write_pagetable(struct file *file,
- const char __user *userbuf, size_t count, loff_t *ppos)
+static void dump_ioptable(struct seq_file *s)
{
- struct iotlb_entry e;
- struct cr_regs cr;
- int err;
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- char buf[MAXCOLUMN], *p = buf;
-
- count = min(count, sizeof(buf));
-
- mutex_lock(&iommu_debug_lock);
- if (copy_from_user(p, userbuf, count)) {
- mutex_unlock(&iommu_debug_lock);
- return -EFAULT;
- }
-
- sscanf(p, "%x %x", &cr.cam, &cr.ram);
- if (!cr.cam || !cr.ram) {
- mutex_unlock(&iommu_debug_lock);
- return -EINVAL;
- }
-
- omap_iotlb_cr_to_e(&cr, &e);
- err = omap_iopgtable_store_entry(obj, &e);
- if (err)
- dev_err(obj->dev, "%s: fail to store cr\n", __func__);
-
- mutex_unlock(&iommu_debug_lock);
- return count;
-}
-
-#define dump_ioptable_entry_one(lv, da, val) \
- ({ \
- int __err = 0; \
- ssize_t bytes; \
- const int maxcol = 22; \
- const char *str = "%d: %08x %08x\n"; \
- bytes = snprintf(p, maxcol, str, lv, da, val); \
- p += bytes; \
- len -= bytes; \
- if (len < maxcol) \
- __err = -ENOMEM; \
- __err; \
- })
-
-static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
-{
- int i;
- u32 *iopgd;
- char *p = buf;
+ int i, j;
+ u32 da;
+ u32 *iopgd, *iopte;
+ struct omap_iommu *obj = s->private;
spin_lock(&obj->page_table_lock);
iopgd = iopgd_offset(obj, 0);
for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
- int j, err;
- u32 *iopte;
- u32 da;
-
if (!*iopgd)
continue;
if (!(*iopgd & IOPGD_TABLE)) {
da = i << IOPGD_SHIFT;
-
- err = dump_ioptable_entry_one(1, da, *iopgd);
- if (err)
- goto out;
+ seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd);
continue;
}
iopte = iopte_offset(iopgd, 0);
-
for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
if (!*iopte)
continue;
da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
- err = dump_ioptable_entry_one(2, da, *iopgd);
- if (err)
- goto out;
+ seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte);
}
}
-out:
- spin_unlock(&obj->page_table_lock);
- return p - buf;
+ spin_unlock(&obj->page_table_lock);
}
-static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static int debug_read_pagetable(struct seq_file *s, void *data)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- char *p, *buf;
- size_t bytes;
+ struct omap_iommu *obj = s->private;
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- p = buf;
-
- p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
- p += sprintf(p, "-----------------------------------------\n");
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
mutex_lock(&iommu_debug_lock);
- bytes = PAGE_SIZE - (p - buf);
- p += dump_ioptable(obj, p, bytes);
-
- bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ seq_printf(s, "L: %8s %8s\n", "da:", "pte:");
+ seq_puts(s, "--------------------------\n");
+ dump_ioptable(s);
mutex_unlock(&iommu_debug_lock);
- free_page((unsigned long)buf);
- return bytes;
+ return 0;
}
-#define DEBUG_FOPS(name) \
- static const struct file_operations debug_##name##_fops = { \
- .open = simple_open, \
- .read = debug_read_##name, \
- .write = debug_write_##name, \
- .llseek = generic_file_llseek, \
- };
+#define DEBUG_SEQ_FOPS_RO(name) \
+ static int debug_open_##name(struct inode *inode, struct file *file) \
+ { \
+ return single_open(file, debug_read_##name, inode->i_private); \
+ } \
+ \
+ static const struct file_operations debug_##name##_fops = { \
+ .open = debug_open_##name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
#define DEBUG_FOPS_RO(name) \
static const struct file_operations debug_##name##_fops = { \
@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
.llseek = generic_file_llseek, \
};
-DEBUG_FOPS_RO(ver);
DEBUG_FOPS_RO(regs);
DEBUG_FOPS_RO(tlb);
-DEBUG_FOPS(pagetable);
+DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \
{ \
struct dentry *dent; \
- dent = debugfs_create_file(#attr, mode, parent, \
- dev, &debug_##attr##_fops); \
+ dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
+ obj, &debug_##attr##_fops); \
if (!dent) \
- return -ENOMEM; \
+ goto err; \
}
-#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
-static int iommu_debug_register(struct device *dev, void *data)
+void omap_iommu_debugfs_add(struct omap_iommu *obj)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_iommu *obj = platform_get_drvdata(pdev);
- struct omap_iommu_arch_data *arch_data;
- struct dentry *d, *parent;
-
- if (!obj || !obj->dev)
- return -EINVAL;
-
- arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
- if (!arch_data)
- return -ENOMEM;
-
- arch_data->iommu_dev = obj;
+ struct dentry *d;
- dev->archdata.iommu = arch_data;
+ if (!iommu_debug_root)
+ return;
- d = debugfs_create_dir(obj->name, iommu_debug_root);
- if (!d)
- goto nomem;
- parent = d;
+ obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
+ if (!obj->debug_dir)
+ return;
- d = debugfs_create_u8("nr_tlb_entries", 400, parent,
+ d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
(u8 *)&obj->nr_tlb_entries);
if (!d)
- goto nomem;
+ return;
- DEBUG_ADD_FILE_RO(ver);
DEBUG_ADD_FILE_RO(regs);
DEBUG_ADD_FILE_RO(tlb);
- DEBUG_ADD_FILE(pagetable);
+ DEBUG_ADD_FILE_RO(pagetable);
- return 0;
+ return;
-nomem:
- kfree(arch_data);
- return -ENOMEM;
+err:
+ debugfs_remove_recursive(obj->debug_dir);
}
-static int iommu_debug_unregister(struct device *dev, void *data)
+void omap_iommu_debugfs_remove(struct omap_iommu *obj)
{
- if (!dev->archdata.iommu)
- return 0;
-
- kfree(dev->archdata.iommu);
+ if (!obj->debug_dir)
+ return;
- dev->archdata.iommu = NULL;
-
- return 0;
+ debugfs_remove_recursive(obj->debug_dir);
}
-static int __init iommu_debug_init(void)
+void __init omap_iommu_debugfs_init(void)
{
- struct dentry *d;
- int err;
-
- d = debugfs_create_dir("iommu", NULL);
- if (!d)
- return -ENOMEM;
- iommu_debug_root = d;
-
- err = omap_foreach_iommu_device(d, iommu_debug_register);
- if (err)
- goto err_out;
- return 0;
-
-err_out:
- debugfs_remove_recursive(iommu_debug_root);
- return err;
+ iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
+ if (!iommu_debug_root)
+ pr_err("can't create debugfs dir\n");
}
-module_init(iommu_debug_init)
-static void __exit iommu_debugfs_exit(void)
+void __exit omap_iommu_debugfs_exit(void)
{
- debugfs_remove_recursive(iommu_debug_root);
- omap_foreach_iommu_device(NULL, iommu_debug_unregister);
+ debugfs_remove(iommu_debug_root);
}
-module_exit(iommu_debugfs_exit)
-
-MODULE_DESCRIPTION("omap iommu: debugfs interface");
-MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 18003c044454..bbb7dcef02d3 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -76,53 +76,23 @@ struct iotlb_lock {
short vict;
};
-/* accommodate the difference between omap1 and omap2/3 */
-static const struct iommu_functions *arch_iommu;
-
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
/**
- * omap_install_iommu_arch - Install archtecure specific iommu functions
- * @ops: a pointer to architecture specific iommu functions
- *
- * There are several kind of iommu algorithm(tlb, pagetable) among
- * omap series. This interface installs such an iommu algorighm.
- **/
-int omap_install_iommu_arch(const struct iommu_functions *ops)
-{
- if (arch_iommu)
- return -EBUSY;
-
- arch_iommu = ops;
- return 0;
-}
-EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
-
-/**
- * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
- * @ops: a pointer to architecture specific iommu functions
- *
- * This interface uninstalls the iommu algorighm installed previously.
- **/
-void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
-{
- if (arch_iommu != ops)
- pr_err("%s: not your arch\n", __func__);
-
- arch_iommu = NULL;
-}
-EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
-
-/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
**/
void omap_iommu_save_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ u32 *p = obj->ctx;
+ int i;
- arch_iommu->save_ctx(obj);
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ p[i] = iommu_read_reg(obj, i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ }
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
@@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
void omap_iommu_restore_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ u32 *p = obj->ctx;
+ int i;
- arch_iommu->restore_ctx(obj);
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ iommu_write_reg(obj, p[i], i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ }
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
-/**
- * omap_iommu_arch_version - Return running iommu arch version
- **/
-u32 omap_iommu_arch_version(void)
+static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
- return arch_iommu->version;
+ u32 l = iommu_read_reg(obj, MMU_CNTL);
+
+ if (on)
+ iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
+ else
+ iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
+
+ l &= ~MMU_CNTL_MASK;
+ if (on)
+ l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
+ else
+ l |= (MMU_CNTL_MMU_EN);
+
+ iommu_write_reg(obj, l, MMU_CNTL);
+}
+
+static int omap2_iommu_enable(struct omap_iommu *obj)
+{
+ u32 l, pa;
+
+ if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
+ return -EINVAL;
+
+ pa = virt_to_phys(obj->iopgd);
+ if (!IS_ALIGNED(pa, SZ_16K))
+ return -EINVAL;
+
+ l = iommu_read_reg(obj, MMU_REVISION);
+ dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
+ (l >> 4) & 0xf, l & 0xf);
+
+ iommu_write_reg(obj, pa, MMU_TTB);
+
+ if (obj->has_bus_err_back)
+ iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
+
+ __iommu_set_twl(obj, true);
+
+ return 0;
+}
+
+static void omap2_iommu_disable(struct omap_iommu *obj)
+{
+ u32 l = iommu_read_reg(obj, MMU_CNTL);
+
+ l &= ~MMU_CNTL_MASK;
+ iommu_write_reg(obj, l, MMU_CNTL);
+
+ dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
-EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct omap_iommu *obj)
{
int err;
struct platform_device *pdev = to_platform_device(obj->dev);
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
-
- if (!arch_iommu)
- return -ENODEV;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->deassert_reset) {
err = pdata->deassert_reset(pdev, pdata->reset_name);
@@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj)
pm_runtime_get_sync(obj->dev);
- err = arch_iommu->enable(obj);
+ err = omap2_iommu_enable(obj);
return err;
}
@@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj)
static void iommu_disable(struct omap_iommu *obj)
{
struct platform_device *pdev = to_platform_device(obj->dev);
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
- arch_iommu->disable(obj);
+ omap2_iommu_disable(obj);
pm_runtime_put_sync(obj->dev);
@@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj)
/*
* TLB operations
*/
-void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
-{
- BUG_ON(!cr || !e);
-
- arch_iommu->cr_to_e(cr, e);
-}
-EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
-
static inline int iotlb_cr_valid(struct cr_regs *cr)
{
if (!cr)
return -EINVAL;
- return arch_iommu->cr_valid(cr);
-}
-
-static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
- struct iotlb_entry *e)
-{
- if (!e)
- return NULL;
-
- return arch_iommu->alloc_cr(obj, e);
+ return cr->cam & MMU_CAM_V;
}
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{
- return arch_iommu->cr_to_virt(cr);
+ u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
+ u32 mask = get_cam_va_mask(cr->cam & page_size);
+
+ return cr->cam & mask;
}
static u32 get_iopte_attr(struct iotlb_entry *e)
{
- return arch_iommu->get_pte_attr(e);
+ u32 attr;
+
+ attr = e->mixed << 5;
+ attr |= e->endian;
+ attr |= e->elsz >> 3;
+ attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
+ (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
+ return attr;
}
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
{
- return arch_iommu->fault_isr(obj, da);
+ u32 status, fault_addr;
+
+ status = iommu_read_reg(obj, MMU_IRQSTATUS);
+ status &= MMU_IRQ_MASK;
+ if (!status) {
+ *da = 0;
+ return 0;
+ }
+
+ fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
+ *da = fault_addr;
+
+ iommu_write_reg(obj, status, MMU_IRQSTATUS);
+
+ return status;
}
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
@@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
- arch_iommu->tlb_read_cr(obj, cr);
+ cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
+ cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
- arch_iommu->tlb_load_cr(obj, cr);
+ iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
+ iommu_write_reg(obj, cr->ram, MMU_RAM);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
iommu_write_reg(obj, 1, MMU_LD_TLB);
}
-/**
- * iotlb_dump_cr - Dump an iommu tlb entry into buf
- * @obj: target iommu
- * @cr: contents of cam and ram register
- * @buf: output buffer
- **/
-static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
- char *buf)
-{
- BUG_ON(!cr || !buf);
-
- return arch_iommu->dump_cr(obj, cr, buf);
-}
-
/* only used in iotlb iteration for-loop */
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{
@@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
return cr;
}
+#ifdef PREFETCH_IOTLB
+static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
+ struct iotlb_entry *e)
+{
+ struct cr_regs *cr;
+
+ if (!e)
+ return NULL;
+
+ if (e->da & ~(get_cam_va_mask(e->pgsz))) {
+ dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
+ e->da);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cr = kmalloc(sizeof(*cr), GFP_KERNEL);
+ if (!cr)
+ return ERR_PTR(-ENOMEM);
+
+ cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
+ cr->ram = e->pa | e->endian | e->elsz | e->mixed;
+
+ return cr;
+}
+
/**
* load_iotlb_entry - Set an iommu tlb entry
* @obj: target iommu
* @e: an iommu tlb entry info
**/
-#ifdef PREFETCH_IOTLB
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err = 0;
@@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj)
pm_runtime_put_sync(obj->dev);
}
-#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
+
+#define pr_reg(name) \
+ do { \
+ ssize_t bytes; \
+ const char *str = "%20s: %08x\n"; \
+ const int maxcol = 32; \
+ bytes = snprintf(p, maxcol, str, __stringify(name), \
+ iommu_read_reg(obj, MMU_##name)); \
+ p += bytes; \
+ len -= bytes; \
+ if (len < maxcol) \
+ goto out; \
+ } while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+ char *p = buf;
+
+ pr_reg(REVISION);
+ pr_reg(IRQSTATUS);
+ pr_reg(IRQENABLE);
+ pr_reg(WALKING_ST);
+ pr_reg(CNTL);
+ pr_reg(FAULT_AD);
+ pr_reg(TTB);
+ pr_reg(LOCK);
+ pr_reg(LD_TLB);
+ pr_reg(CAM);
+ pr_reg(RAM);
+ pr_reg(GFLUSH);
+ pr_reg(FLUSH_ENTRY);
+ pr_reg(READ_CAM);
+ pr_reg(READ_RAM);
+ pr_reg(EMU_FAULT_AD);
+out:
+ return p - buf;
+}
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
{
@@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
pm_runtime_get_sync(obj->dev);
- bytes = arch_iommu->dump_ctx(obj, buf, bytes);
+ bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
pm_runtime_put_sync(obj->dev);
return bytes;
}
-EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
@@ -464,6 +536,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
}
/**
+ * iotlb_dump_cr - Dump an iommu tlb entry into buf
+ * @obj: target iommu
+ * @cr: contents of cam and ram register
+ * @buf: output buffer
+ **/
+static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+ char *buf)
+{
+ char *p = buf;
+
+ /* FIXME: Need more detail analysis of cam/ram */
+ p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
+
+ return p - buf;
+}
+
+/**
* omap_dump_tlb_entries - dump cr arrays to given buffer
* @obj: target iommu
* @buf: output buffer
@@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
return p - buf;
}
-EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
-
-int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
-{
- return driver_for_each_device(&omap_iommu_driver.driver,
- NULL, data, fn);
-}
-EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
-#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
+#endif /* CONFIG_OMAP_IOMMU_DEBUG */
/*
* H/W pagetable operations
@@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
* @obj: target iommu
* @e: an iommu tlb entry info
**/
-int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
+static int
+omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err;
@@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
prefetch_iotlb_entry(obj, e);
return err;
}
-EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
/**
* iopgtable_lookup_entry - Lookup an iommu pte entry
@@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 *iopgd, *iopte;
struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
+ struct omap_iommu_domain *omap_domain = domain->priv;
- if (!obj->refcount)
+ if (!omap_domain->iommu_dev)
return IRQ_NONE;
errs = iommu_report_fault(obj, &da);
@@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
spin_lock(&obj->iommu_lock);
- /* an iommu device can only be attached once */
- if (++obj->refcount > 1) {
- dev_err(dev, "%s: already attached!\n", obj->name);
- err = -EBUSY;
- goto err_enable;
- }
-
obj->iopgd = iopgd;
err = iommu_enable(obj);
if (err)
@@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
return obj;
err_enable:
- obj->refcount--;
spin_unlock(&obj->iommu_lock);
return ERR_PTR(err);
}
@@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
spin_lock(&obj->iommu_lock);
- if (--obj->refcount == 0)
- iommu_disable(obj);
-
+ iommu_disable(obj);
obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock);
@@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
int irq;
struct omap_iommu *obj;
struct resource *res;
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *of = pdev->dev.of_node;
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
@@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
+ omap_iommu_debugfs_add(obj);
+
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
}
@@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
struct omap_iommu *obj = platform_get_drvdata(pdev);
iopgtable_clear_entry_all(obj);
+ omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev);
@@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
e->da = da;
e->pa = pa;
e->valid = MMU_CAM_V;
- /* FIXME: add OMAP1 support */
e->pgsz = pgsz;
e->endian = MMU_RAM_ENDIAN_LITTLE;
e->elsz = MMU_RAM_ELSZ_8;
@@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
omap_domain->dev = NULL;
+ oiommu->domain = NULL;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
@@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void)
bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+ omap_iommu_debugfs_init();
+
return platform_driver_register(&omap_iommu_driver);
}
/* must be ready before omap3isp is probed */
@@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void)
kmem_cache_destroy(iopte_cachep);
platform_driver_unregister(&omap_iommu_driver);
+
+ omap_iommu_debugfs_exit();
}
module_exit(omap_iommu_exit);
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 4f1b68c08c15..d736630df3c8 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -10,9 +10,8 @@
* published by the Free Software Foundation.
*/
-#if defined(CONFIG_ARCH_OMAP1)
-#error "iommu for this processor not implemented yet"
-#endif
+#ifndef _OMAP_IOMMU_H
+#define _OMAP_IOMMU_H
struct iotlb_entry {
u32 da;
@@ -30,10 +29,9 @@ struct omap_iommu {
const char *name;
void __iomem *regbase;
struct device *dev;
- void *isr_priv;
struct iommu_domain *domain;
+ struct dentry *debug_dir;
- unsigned int refcount;
spinlock_t iommu_lock; /* global for this whole object */
/*
@@ -67,34 +65,6 @@ struct cr_regs {
};
};
-/* architecture specific functions */
-struct iommu_functions {
- unsigned long version;
-
- int (*enable)(struct omap_iommu *obj);
- void (*disable)(struct omap_iommu *obj);
- void (*set_twl)(struct omap_iommu *obj, bool on);
- u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
-
- void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
- void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
-
- struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
- struct iotlb_entry *e);
- int (*cr_valid)(struct cr_regs *cr);
- u32 (*cr_to_virt)(struct cr_regs *cr);
- void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
- ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
- char *buf);
-
- u32 (*get_pte_attr)(struct iotlb_entry *e);
-
- void (*save_ctx)(struct omap_iommu *obj);
- void (*restore_ctx)(struct omap_iommu *obj);
- ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
-};
-
-#ifdef CONFIG_IOMMU_API
/**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device
@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
return arch_data->iommu_dev;
}
-#endif
/*
* MMU Register offsets
@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* MMU Register bit definitions
*/
+/* IRQSTATUS & IRQENABLE */
+#define MMU_IRQ_MULTIHITFAULT (1 << 4)
+#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
+#define MMU_IRQ_EMUMISS (1 << 2)
+#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
+#define MMU_IRQ_TLBMISS (1 << 0)
+
+#define __MMU_IRQ_FAULT \
+ (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
+#define MMU_IRQ_MASK \
+ (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
+#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
+#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
+
+/* MMU_CNTL */
+#define MMU_CNTL_SHIFT 1
+#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
+#define MMU_CNTL_EML_TLB (1 << 3)
+#define MMU_CNTL_TWL_EN (1 << 2)
+#define MMU_CNTL_MMU_EN (1 << 1)
+
+/* CAM */
#define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_CAM_PGSZ_4K (2 << 0)
#define MMU_CAM_PGSZ_16M (3 << 0)
+/* RAM */
#define MMU_RAM_PADDR_SHIFT 12
#define MMU_RAM_PADDR_MASK \
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
+#define get_cam_va_mask(pgsz) \
+ (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
+ ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
+ ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
+ ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
+
/*
* utilities for super page(16MB, 1MB, 64KB and 4KB)
*/
@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* global functions
*/
-extern u32 omap_iommu_arch_version(void);
-
-extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
-
-extern int
-omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
-
-extern void omap_iommu_save_ctx(struct device *dev);
-extern void omap_iommu_restore_ctx(struct device *dev);
-
-extern int omap_foreach_iommu_device(void *data,
- int (*fn)(struct device *, void *));
-
-extern int omap_install_iommu_arch(const struct iommu_functions *ops);
-extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
-
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
extern ssize_t
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
+void omap_iommu_debugfs_init(void);
+void omap_iommu_debugfs_exit(void);
+
+void omap_iommu_debugfs_add(struct omap_iommu *obj);
+void omap_iommu_debugfs_remove(struct omap_iommu *obj);
+#else
+static inline void omap_iommu_debugfs_init(void) { }
+static inline void omap_iommu_debugfs_exit(void) { }
+
+static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { }
+static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { }
+#endif
+
/*
* register accessors
*/
@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
{
__raw_writel(val, obj->regbase + offs);
}
+
+#endif /* _OMAP_IOMMU_H */
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
deleted file mode 100644
index 5e1ea3b0bf16..000000000000
--- a/drivers/iommu/omap-iommu2.c
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * omap iommu: omap2/3 architecture specific functions
- *
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
- * Paul Mundt and Toshihiro Kobayashi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/omap-iommu.h>
-#include <linux/slab.h>
-#include <linux/stringify.h>
-#include <linux/platform_data/iommu-omap.h>
-
-#include "omap-iommu.h"
-
-/*
- * omap2 architecture specific register bit definitions
- */
-#define IOMMU_ARCH_VERSION 0x00000011
-
-/* IRQSTATUS & IRQENABLE */
-#define MMU_IRQ_MULTIHITFAULT (1 << 4)
-#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
-#define MMU_IRQ_EMUMISS (1 << 2)
-#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
-#define MMU_IRQ_TLBMISS (1 << 0)
-
-#define __MMU_IRQ_FAULT \
- (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
-#define MMU_IRQ_MASK \
- (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
-#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
-#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
-
-/* MMU_CNTL */
-#define MMU_CNTL_SHIFT 1
-#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
-#define MMU_CNTL_EML_TLB (1 << 3)
-#define MMU_CNTL_TWL_EN (1 << 2)
-#define MMU_CNTL_MMU_EN (1 << 1)
-
-#define get_cam_va_mask(pgsz) \
- (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
- ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
- ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
- ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
-
-/* IOMMU errors */
-#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
-#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
-#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
-#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
-#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
-
-static void __iommu_set_twl(struct omap_iommu *obj, bool on)
-{
- u32 l = iommu_read_reg(obj, MMU_CNTL);
-
- if (on)
- iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
- else
- iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
-
- l &= ~MMU_CNTL_MASK;
- if (on)
- l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
- else
- l |= (MMU_CNTL_MMU_EN);
-
- iommu_write_reg(obj, l, MMU_CNTL);
-}
-
-
-static int omap2_iommu_enable(struct omap_iommu *obj)
-{
- u32 l, pa;
-
- if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
- return -EINVAL;
-
- pa = virt_to_phys(obj->iopgd);
- if (!IS_ALIGNED(pa, SZ_16K))
- return -EINVAL;
-
- l = iommu_read_reg(obj, MMU_REVISION);
- dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
- (l >> 4) & 0xf, l & 0xf);
-
- iommu_write_reg(obj, pa, MMU_TTB);
-
- if (obj->has_bus_err_back)
- iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
-
- __iommu_set_twl(obj, true);
-
- return 0;
-}
-
-static void omap2_iommu_disable(struct omap_iommu *obj)
-{
- u32 l = iommu_read_reg(obj, MMU_CNTL);
-
- l &= ~MMU_CNTL_MASK;
- iommu_write_reg(obj, l, MMU_CNTL);
-
- dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
-}
-
-static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
-{
- __iommu_set_twl(obj, false);
-}
-
-static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
-{
- u32 stat, da;
- u32 errs = 0;
-
- stat = iommu_read_reg(obj, MMU_IRQSTATUS);
- stat &= MMU_IRQ_MASK;
- if (!stat) {
- *ra = 0;
- return 0;
- }
-
- da = iommu_read_reg(obj, MMU_FAULT_AD);
- *ra = da;
-
- if (stat & MMU_IRQ_TLBMISS)
- errs |= OMAP_IOMMU_ERR_TLB_MISS;
- if (stat & MMU_IRQ_TRANSLATIONFAULT)
- errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
- if (stat & MMU_IRQ_EMUMISS)
- errs |= OMAP_IOMMU_ERR_EMU_MISS;
- if (stat & MMU_IRQ_TABLEWALKFAULT)
- errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
- if (stat & MMU_IRQ_MULTIHITFAULT)
- errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
- iommu_write_reg(obj, stat, MMU_IRQSTATUS);
-
- return errs;
-}
-
-static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
-{
- cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
- cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
-}
-
-static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
-{
- iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
- iommu_write_reg(obj, cr->ram, MMU_RAM);
-}
-
-static u32 omap2_cr_to_virt(struct cr_regs *cr)
-{
- u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
- u32 mask = get_cam_va_mask(cr->cam & page_size);
-
- return cr->cam & mask;
-}
-
-static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
- struct iotlb_entry *e)
-{
- struct cr_regs *cr;
-
- if (e->da & ~(get_cam_va_mask(e->pgsz))) {
- dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
- e->da);
- return ERR_PTR(-EINVAL);
- }
-
- cr = kmalloc(sizeof(*cr), GFP_KERNEL);
- if (!cr)
- return ERR_PTR(-ENOMEM);
-
- cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
- cr->ram = e->pa | e->endian | e->elsz | e->mixed;
-
- return cr;
-}
-
-static inline int omap2_cr_valid(struct cr_regs *cr)
-{
- return cr->cam & MMU_CAM_V;
-}
-
-static u32 omap2_get_pte_attr(struct iotlb_entry *e)
-{
- u32 attr;
-
- attr = e->mixed << 5;
- attr |= e->endian;
- attr |= e->elsz >> 3;
- attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
- (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
- return attr;
-}
-
-static ssize_t
-omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
-{
- char *p = buf;
-
- /* FIXME: Need more detail analysis of cam/ram */
- p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
- (cr->cam & MMU_CAM_P) ? 1 : 0);
-
- return p - buf;
-}
-
-#define pr_reg(name) \
- do { \
- ssize_t bytes; \
- const char *str = "%20s: %08x\n"; \
- const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
- iommu_read_reg(obj, MMU_##name)); \
- p += bytes; \
- len -= bytes; \
- if (len < maxcol) \
- goto out; \
- } while (0)
-
-static ssize_t
-omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
-{
- char *p = buf;
-
- pr_reg(REVISION);
- pr_reg(IRQSTATUS);
- pr_reg(IRQENABLE);
- pr_reg(WALKING_ST);
- pr_reg(CNTL);
- pr_reg(FAULT_AD);
- pr_reg(TTB);
- pr_reg(LOCK);
- pr_reg(LD_TLB);
- pr_reg(CAM);
- pr_reg(RAM);
- pr_reg(GFLUSH);
- pr_reg(FLUSH_ENTRY);
- pr_reg(READ_CAM);
- pr_reg(READ_RAM);
- pr_reg(EMU_FAULT_AD);
-out:
- return p - buf;
-}
-
-static void omap2_iommu_save_ctx(struct omap_iommu *obj)
-{
- int i;
- u32 *p = obj->ctx;
-
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- p[i] = iommu_read_reg(obj, i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
- }
-
- BUG_ON(p[0] != IOMMU_ARCH_VERSION);
-}
-
-static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
-{
- int i;
- u32 *p = obj->ctx;
-
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- iommu_write_reg(obj, p[i], i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
- }
-
- BUG_ON(p[0] != IOMMU_ARCH_VERSION);
-}
-
-static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
-{
- e->da = cr->cam & MMU_CAM_VATAG_MASK;
- e->pa = cr->ram & MMU_RAM_PADDR_MASK;
- e->valid = cr->cam & MMU_CAM_V;
- e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
- e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
- e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
- e->mixed = cr->ram & MMU_RAM_MIXED;
-}
-
-static const struct iommu_functions omap2_iommu_ops = {
- .version = IOMMU_ARCH_VERSION,
-
- .enable = omap2_iommu_enable,
- .disable = omap2_iommu_disable,
- .set_twl = omap2_iommu_set_twl,
- .fault_isr = omap2_iommu_fault_isr,
-
- .tlb_read_cr = omap2_tlb_read_cr,
- .tlb_load_cr = omap2_tlb_load_cr,
-
- .cr_to_e = omap2_cr_to_e,
- .cr_to_virt = omap2_cr_to_virt,
- .alloc_cr = omap2_alloc_cr,
- .cr_valid = omap2_cr_valid,
- .dump_cr = omap2_dump_cr,
-
- .get_pte_attr = omap2_get_pte_attr,
-
- .save_ctx = omap2_iommu_save_ctx,
- .restore_ctx = omap2_iommu_restore_ctx,
- .dump_ctx = omap2_iommu_dump_ctx,
-};
-
-static int __init omap2_iommu_init(void)
-{
- return omap_install_iommu_arch(&omap2_iommu_ops);
-}
-module_init(omap2_iommu_init);
-
-static void __exit omap2_iommu_exit(void)
-{
- omap_uninstall_iommu_arch(&omap2_iommu_ops);
-}
-module_exit(omap2_iommu_exit);
-
-MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
-MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
new file mode 100644
index 000000000000..b2023af384b9
--- /dev/null
+++ b/drivers/iommu/rockchip-iommu.c
@@ -0,0 +1,1038 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/** MMU register offsets */
+#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
+#define RK_MMU_STATUS 0x04
+#define RK_MMU_COMMAND 0x08
+#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
+#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
+#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
+#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
+#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
+#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
+#define RK_MMU_AUTO_GATING 0x24
+
+#define DTE_ADDR_DUMMY 0xCAFEBABE
+#define FORCE_RESET_TIMEOUT 100 /* ms */
+
+/* RK_MMU_STATUS fields */
+#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
+#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
+#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
+#define RK_MMU_STATUS_IDLE BIT(3)
+#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
+
+/* RK_MMU_COMMAND command values */
+#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
+#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
+#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
+#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
+#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
+#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
+#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
+
+/* RK_MMU_INT_* register fields */
+#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
+#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
+#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
+
+#define NUM_DT_ENTRIES 1024
+#define NUM_PT_ENTRIES 1024
+
+#define SPAGE_ORDER 12
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+ /*
+ * Support mapping any size that fits in one page table:
+ * 4 KiB to 4 MiB
+ */
+#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
+
+#define IOMMU_REG_POLL_COUNT_FAST 1000
+
+struct rk_iommu_domain {
+ struct list_head iommus;
+ u32 *dt; /* page directory table */
+ spinlock_t iommus_lock; /* lock for iommus list */
+ spinlock_t dt_lock; /* lock for modifying page directory table */
+};
+
+struct rk_iommu {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+ struct list_head node; /* entry in rk_iommu_domain.iommus */
+ struct iommu_domain *domain; /* domain to which iommu is attached */
+};
+
+static inline void rk_table_flush(u32 *va, unsigned int count)
+{
+ phys_addr_t pa_start = virt_to_phys(va);
+ phys_addr_t pa_end = virt_to_phys(va + count);
+ size_t size = pa_end - pa_start;
+
+ __cpuc_flush_dcache_area(va, size);
+ outer_flush_range(pa_start, pa_end);
+}
+
+/**
+ * Inspired by _wait_for in intel_drv.h
+ * This is NOT safe for use in interrupt context.
+ *
+ * Note that it's important that we check the condition again after having
+ * timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define rk_wait_for(COND, MS) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = (COND) ? 0 : -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(50, 100); \
+ } \
+ ret__; \
+})
+
+/*
+ * The Rockchip rk3288 iommu uses a 2-level page table.
+ * The first level is the "Directory Table" (DT).
+ * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
+ * to a "Page Table".
+ * The second level is the 1024 Page Tables (PT).
+ * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
+ * a 4 KB page of physical memory.
+ *
+ * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
+ * Each iommu device has a MMU_DTE_ADDR register that contains the physical
+ * address of the start of the DT page.
+ *
+ * The structure of the page table is as follows:
+ *
+ * DT
+ * MMU_DTE_ADDR -> +-----+
+ * | |
+ * +-----+ PT
+ * | DTE | -> +-----+
+ * +-----+ | | Memory
+ * | | +-----+ Page
+ * | | | PTE | -> +-----+
+ * +-----+ +-----+ | |
+ * | | | |
+ * | | | |
+ * +-----+ | |
+ * | |
+ * | |
+ * +-----+
+ */
+
+/*
+ * Each DTE has a PT address and a valid bit:
+ * +---------------------+-----------+-+
+ * | PT address | Reserved |V|
+ * +---------------------+-----------+-+
+ * 31:12 - PT address (PTs always starts on a 4 KB boundary)
+ * 11: 1 - Reserved
+ * 0 - 1 if PT @ PT address is valid
+ */
+#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
+#define RK_DTE_PT_VALID BIT(0)
+
+static inline phys_addr_t rk_dte_pt_address(u32 dte)
+{
+ return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
+}
+
+static inline bool rk_dte_is_pt_valid(u32 dte)
+{
+ return dte & RK_DTE_PT_VALID;
+}
+
+static u32 rk_mk_dte(u32 *pt)
+{
+ phys_addr_t pt_phys = virt_to_phys(pt);
+ return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
+}
+
+/*
+ * Each PTE has a Page address, some flags and a valid bit:
+ * +---------------------+---+-------+-+
+ * | Page address |Rsv| Flags |V|
+ * +---------------------+---+-------+-+
+ * 31:12 - Page address (Pages always start on a 4 KB boundary)
+ * 11: 9 - Reserved
+ * 8: 1 - Flags
+ * 8 - Read allocate - allocate cache space on read misses
+ * 7 - Read cache - enable cache & prefetch of data
+ * 6 - Write buffer - enable delaying writes on their way to memory
+ * 5 - Write allocate - allocate cache space on write misses
+ * 4 - Write cache - different writes can be merged together
+ * 3 - Override cache attributes
+ * if 1, bits 4-8 control cache attributes
+ * if 0, the system bus defaults are used
+ * 2 - Writable
+ * 1 - Readable
+ * 0 - 1 if Page @ Page address is valid
+ */
+#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
+#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
+#define RK_PTE_PAGE_WRITABLE BIT(2)
+#define RK_PTE_PAGE_READABLE BIT(1)
+#define RK_PTE_PAGE_VALID BIT(0)
+
+static inline phys_addr_t rk_pte_page_address(u32 pte)
+{
+ return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
+}
+
+static inline bool rk_pte_is_page_valid(u32 pte)
+{
+ return pte & RK_PTE_PAGE_VALID;
+}
+
+/* TODO: set cache flags per prot IOMMU_CACHE */
+static u32 rk_mk_pte(phys_addr_t page, int prot)
+{
+ u32 flags = 0;
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+ page &= RK_PTE_PAGE_ADDRESS_MASK;
+ return page | flags | RK_PTE_PAGE_VALID;
+}
+
+static u32 rk_mk_pte_invalid(u32 pte)
+{
+ return pte & ~RK_PTE_PAGE_VALID;
+}
+
+/*
+ * rk3288 iova (IOMMU Virtual Address) format
+ * 31 22.21 12.11 0
+ * +-----------+-----------+-------------+
+ * | DTE index | PTE index | Page offset |
+ * +-----------+-----------+-------------+
+ * 31:22 - DTE index - index of DTE in DT
+ * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
+ * 11: 0 - Page offset - offset into page @ PTE.page_address
+ */
+#define RK_IOVA_DTE_MASK 0xffc00000
+#define RK_IOVA_DTE_SHIFT 22
+#define RK_IOVA_PTE_MASK 0x003ff000
+#define RK_IOVA_PTE_SHIFT 12
+#define RK_IOVA_PAGE_MASK 0x00000fff
+#define RK_IOVA_PAGE_SHIFT 0
+
+static u32 rk_iova_dte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
+}
+
+static u32 rk_iova_pte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
+}
+
+static u32 rk_iova_page_offset(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
+}
+
+static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
+{
+ return readl(iommu->base + offset);
+}
+
+static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
+{
+ writel(value, iommu->base + offset);
+}
+
+static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
+{
+ writel(command, iommu->base + RK_MMU_COMMAND);
+}
+
+static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
+ size_t size)
+{
+ dma_addr_t iova_end = iova + size;
+ /*
+ * TODO(djkurtz): Figure out when it is more efficient to shootdown the
+ * entire iotlb rather than iterate over individual iovas.
+ */
+ for (; iova < iova_end; iova += SPAGE_SIZE)
+ rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
+}
+
+static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
+{
+ return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
+}
+
+static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
+{
+ return rk_iommu_read(iommu, RK_MMU_STATUS) &
+ RK_MMU_STATUS_PAGING_ENABLED;
+}
+
+static int rk_iommu_enable_stall(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (rk_iommu_is_stall_active(iommu))
+ return 0;
+
+ /* Stall can only be enabled if paging is enabled */
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
+
+ ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_disable_stall(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (!rk_iommu_is_stall_active(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
+
+ ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_enable_paging(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
+
+ ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_disable_paging(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
+
+ ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_force_reset(struct rk_iommu *iommu)
+{
+ int ret;
+ u32 dte_addr;
+
+ /*
+ * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
+ * and verifying that upper 5 nybbles are read back.
+ */
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+
+ dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+ return -EFAULT;
+ }
+
+ rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
+
+ ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
+ FORCE_RESET_TIMEOUT);
+ if (ret)
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+
+ return ret;
+}
+
+static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
+{
+ u32 dte_index, pte_index, page_offset;
+ u32 mmu_dte_addr;
+ phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
+ u32 *dte_addr;
+ u32 dte;
+ phys_addr_t pte_addr_phys = 0;
+ u32 *pte_addr = NULL;
+ u32 pte = 0;
+ phys_addr_t page_addr_phys = 0;
+ u32 page_flags = 0;
+
+ dte_index = rk_iova_dte_index(iova);
+ pte_index = rk_iova_pte_index(iova);
+ page_offset = rk_iova_page_offset(iova);
+
+ mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+
+ dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
+ dte_addr = phys_to_virt(dte_addr_phys);
+ dte = *dte_addr;
+
+ if (!rk_dte_is_pt_valid(dte))
+ goto print_it;
+
+ pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
+ pte_addr = phys_to_virt(pte_addr_phys);
+ pte = *pte_addr;
+
+ if (!rk_pte_is_page_valid(pte))
+ goto print_it;
+
+ page_addr_phys = rk_pte_page_address(pte) + page_offset;
+ page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
+
+print_it:
+ dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
+ &iova, dte_index, pte_index, page_offset);
+ dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
+ &mmu_dte_addr_phys, &dte_addr_phys, dte,
+ rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
+ rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
+}
+
+static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
+{
+ struct rk_iommu *iommu = dev_id;
+ u32 status;
+ u32 int_status;
+ dma_addr_t iova;
+
+ int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
+ if (int_status == 0)
+ return IRQ_NONE;
+
+ iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
+
+ if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
+ int flags;
+
+ status = rk_iommu_read(iommu, RK_MMU_STATUS);
+ flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+ IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+ dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+ &iova,
+ (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+
+ log_iova(iommu, iova);
+
+ /*
+ * Report page fault to any installed handlers.
+ * Ignore the return code, though, since we always zap cache
+ * and clear the page fault anyway.
+ */
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
+ flags);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
+ }
+
+ if (int_status & RK_MMU_IRQ_BUS_ERROR)
+ dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
+
+ if (int_status & ~RK_MMU_IRQ_MASK)
+ dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+ int_status);
+
+ rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
+
+ return IRQ_HANDLED;
+}
+
+static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ phys_addr_t pt_phys, phys = 0;
+ u32 dte, pte;
+ u32 *page_table;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ if (!rk_dte_is_pt_valid(dte))
+ goto out;
+
+ pt_phys = rk_dte_pt_address(dte);
+ page_table = (u32 *)phys_to_virt(pt_phys);
+ pte = page_table[rk_iova_pte_index(iova)];
+ if (!rk_pte_is_page_valid(pte))
+ goto out;
+
+ phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+out:
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ return phys;
+}
+
+static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova, size_t size)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ /* shootdown these iova from all iommus using this domain */
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_for_each(pos, &rk_domain->iommus) {
+ struct rk_iommu *iommu;
+ iommu = list_entry(pos, struct rk_iommu, node);
+ rk_iommu_zap_lines(iommu, iova, size);
+ }
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+}
+
+static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova)
+{
+ u32 *page_table, *dte_addr;
+ u32 dte;
+ phys_addr_t pt_phys;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
+ dte = *dte_addr;
+ if (rk_dte_is_pt_valid(dte))
+ goto done;
+
+ page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ if (!page_table)
+ return ERR_PTR(-ENOMEM);
+
+ dte = rk_mk_dte(page_table);
+ *dte_addr = dte;
+
+ rk_table_flush(page_table, NUM_PT_ENTRIES);
+ rk_table_flush(dte_addr, 1);
+
+ /*
+ * Zap the first iova of newly allocated page table so iommu evicts
+ * old cached value of new dte from the iotlb.
+ */
+ rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
+
+done:
+ pt_phys = rk_dte_pt_address(dte);
+ return (u32 *)phys_to_virt(pt_phys);
+}
+
+static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
+ u32 *pte_addr, dma_addr_t iova, size_t size)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+ if (!rk_pte_is_page_valid(pte))
+ break;
+
+ pte_addr[pte_count] = rk_mk_pte_invalid(pte);
+ }
+
+ rk_table_flush(pte_addr, pte_count);
+
+ return pte_count * SPAGE_SIZE;
+}
+
+static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
+ dma_addr_t iova, phys_addr_t paddr, size_t size,
+ int prot)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+ phys_addr_t page_phys;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+
+ if (rk_pte_is_page_valid(pte))
+ goto unwind;
+
+ pte_addr[pte_count] = rk_mk_pte(paddr, prot);
+
+ paddr += SPAGE_SIZE;
+ }
+
+ rk_table_flush(pte_addr, pte_count);
+
+ return 0;
+unwind:
+ /* Unmap the range of iovas that we just mapped */
+ rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
+
+ iova += pte_count * SPAGE_SIZE;
+ page_phys = rk_pte_page_address(pte_addr[pte_count]);
+ pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
+ &iova, &page_phys, &paddr, prot);
+
+ return -EADDRINUSE;
+}
+
+static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ dma_addr_t iova = (dma_addr_t)_iova;
+ u32 *page_table, *pte_addr;
+ int ret;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_map() guarantees that both iova and size will be
+ * aligned, we will always only be mapping from a single dte here.
+ */
+ page_table = rk_dte_get_page_table(rk_domain, iova);
+ if (IS_ERR(page_table)) {
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ return PTR_ERR(page_table);
+ }
+
+ pte_addr = &page_table[rk_iova_pte_index(iova)];
+ ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ return ret;
+}
+
+static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
+ size_t size)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ dma_addr_t iova = (dma_addr_t)_iova;
+ phys_addr_t pt_phys;
+ u32 dte;
+ u32 *pte_addr;
+ size_t unmap_size;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_unmap() guarantees that both iova and size will be
+ * aligned, we will always only be unmapping from a single dte here.
+ */
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ /* Just return 0 if iova is unmapped */
+ if (!rk_dte_is_pt_valid(dte)) {
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ return 0;
+ }
+
+ pt_phys = rk_dte_pt_address(dte);
+ pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
+
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ /* Shootdown iotlb entries for iova range that was just unmapped */
+ rk_iommu_zap_iova(rk_domain, iova, unmap_size);
+
+ return unmap_size;
+}
+
+static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
+{
+ struct iommu_group *group;
+ struct device *iommu_dev;
+ struct rk_iommu *rk_iommu;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return NULL;
+ iommu_dev = iommu_group_get_iommudata(group);
+ rk_iommu = dev_get_drvdata(iommu_dev);
+ iommu_group_put(group);
+
+ return rk_iommu;
+}
+
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ int ret;
+ phys_addr_t dte_addr;
+
+ /*
+ * Allow 'virtual devices' (e.g., drm) to attach to domain.
+ * Such a device does not belong to an iommu group.
+ */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return 0;
+
+ ret = rk_iommu_enable_stall(iommu);
+ if (ret)
+ return ret;
+
+ ret = rk_iommu_force_reset(iommu);
+ if (ret)
+ return ret;
+
+ iommu->domain = domain;
+
+ ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (ret)
+ return ret;
+
+ dte_addr = virt_to_phys(rk_domain->dt);
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
+ rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+
+ ret = rk_iommu_enable_paging(iommu);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_add_tail(&iommu->node, &rk_domain->iommus);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ dev_info(dev, "Attached to iommu domain\n");
+
+ rk_iommu_disable_stall(iommu);
+
+ return 0;
+}
+
+static void rk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+
+ /* Allow 'virtual devices' (eg drm) to detach from domain */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_del_init(&iommu->node);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ /* Ignore error while disabling, just keep going */
+ rk_iommu_enable_stall(iommu);
+ rk_iommu_disable_paging(iommu);
+ rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
+ rk_iommu_disable_stall(iommu);
+
+ devm_free_irq(dev, iommu->irq, iommu);
+
+ iommu->domain = NULL;
+
+ dev_info(dev, "Detached from iommu domain\n");
+}
+
+static int rk_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain;
+
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
+ if (!rk_domain)
+ return -ENOMEM;
+
+ /*
+ * rk32xx iommus use a 2 level pagetable.
+ * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
+ * Allocate one 4 KiB page for each table.
+ */
+ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+ if (!rk_domain->dt)
+ goto err_dt;
+
+ rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
+
+ spin_lock_init(&rk_domain->iommus_lock);
+ spin_lock_init(&rk_domain->dt_lock);
+ INIT_LIST_HEAD(&rk_domain->iommus);
+
+ domain->priv = rk_domain;
+
+ return 0;
+err_dt:
+ kfree(rk_domain);
+ return -ENOMEM;
+}
+
+static void rk_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ int i;
+
+ WARN_ON(!list_empty(&rk_domain->iommus));
+
+ for (i = 0; i < NUM_DT_ENTRIES; i++) {
+ u32 dte = rk_domain->dt[i];
+ if (rk_dte_is_pt_valid(dte)) {
+ phys_addr_t pt_phys = rk_dte_pt_address(dte);
+ u32 *page_table = phys_to_virt(pt_phys);
+ free_page((unsigned long)page_table);
+ }
+ }
+
+ free_page((unsigned long)rk_domain->dt);
+ kfree(domain->priv);
+ domain->priv = NULL;
+}
+
+static bool rk_iommu_is_dev_iommu_master(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
+ return (ret > 0);
+}
+
+static int rk_iommu_group_set_iommudata(struct iommu_group *group,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct platform_device *pd;
+ int ret;
+ struct of_phandle_args args;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
+ &args);
+ if (ret) {
+ dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
+ np->full_name, ret);
+ return ret;
+ }
+ if (args.args_count != 0) {
+ dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
+ args.np->full_name, args.args_count);
+ return -EINVAL;
+ }
+
+ pd = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!pd) {
+ dev_err(dev, "iommu %s not found\n", args.np->full_name);
+ return -EPROBE_DEFER;
+ }
+
+ /* TODO(djkurtz): handle multiple slave iommus for a single master */
+ iommu_group_set_iommudata(group, &pd->dev, NULL);
+
+ return 0;
+}
+
+static int rk_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return -ENODEV;
+
+ group = iommu_group_get(dev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ if (ret)
+ goto err_put_group;
+
+ ret = rk_iommu_group_set_iommudata(group, dev);
+ if (ret)
+ goto err_remove_device;
+
+ iommu_group_put(group);
+
+ return 0;
+
+err_remove_device:
+ iommu_group_remove_device(dev);
+err_put_group:
+ iommu_group_put(group);
+ return ret;
+}
+
+static void rk_iommu_remove_device(struct device *dev)
+{
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return;
+
+ iommu_group_remove_device(dev);
+}
+
+static const struct iommu_ops rk_iommu_ops = {
+ .domain_init = rk_iommu_domain_init,
+ .domain_destroy = rk_iommu_domain_destroy,
+ .attach_dev = rk_iommu_attach_device,
+ .detach_dev = rk_iommu_detach_device,
+ .map = rk_iommu_map,
+ .unmap = rk_iommu_unmap,
+ .add_device = rk_iommu_add_device,
+ .remove_device = rk_iommu_remove_device,
+ .iova_to_phys = rk_iommu_iova_to_phys,
+ .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
+};
+
+static int rk_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rk_iommu *iommu;
+ struct resource *res;
+
+ iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, iommu);
+ iommu->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iommu->base))
+ return PTR_ERR(iommu->base);
+
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int rk_iommu_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rk_iommu_dt_ids[] = {
+ { .compatible = "rockchip,iommu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
+#endif
+
+static struct platform_driver rk_iommu_driver = {
+ .probe = rk_iommu_probe,
+ .remove = rk_iommu_remove,
+ .driver = {
+ .name = "rk_iommu",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rk_iommu_dt_ids),
+ },
+};
+
+static int __init rk_iommu_init(void)
+{
+ int ret;
+
+ ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&rk_iommu_driver);
+}
+static void __exit rk_iommu_exit(void)
+{
+ platform_driver_unregister(&rk_iommu_driver);
+}
+
+subsys_initcall(rk_iommu_init);
+module_exit(rk_iommu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for Rockchip");
+MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
+MODULE_ALIAS("platform:rockchip-iommu");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
index bd97adecb1fd..951651a9746b 100644
--- a/drivers/iommu/shmobile-ipmmu.c
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -118,7 +118,6 @@ static int ipmmu_probe(struct platform_device *pdev)
static struct platform_driver ipmmu_driver = {
.probe = ipmmu_probe,
.driver = {
- .owner = THIS_MODULE,
.name = "ipmmu",
},
};
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index a6d76abf2c06..f722a0c466cf 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -425,7 +425,6 @@ static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
.remove = tegra_gart_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
.of_match_table = tegra_gart_of_match,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 9efe5f10f97b..cc79d2a5a8c2 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -5,8 +5,15 @@ config IRQCHIP
config ARM_GIC
bool
select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER
+config ARM_GIC_V2M
+ bool
+ depends on ARM_GIC
+ depends on PCI && PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
+
config GIC_NON_BANKED
bool
@@ -14,6 +21,11 @@ config ARM_GIC_V3
bool
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
+ select IRQ_DOMAIN_HIERARCHY
+
+config ARM_GIC_V3_ITS
+ bool
+ select PCI_MSI_IRQ_DOMAIN
config ARM_NVIC
bool
@@ -130,3 +142,7 @@ config KEYSTONE_IRQ
help
Support for Texas Instruments Keystone 2 IRQ controller IP which
is part of the Keystone 2 IPC mechanism
+
+config MIPS_GIC
+ bool
+ select MIPS_CM
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index f0909d05eae3..9516a324be6d 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -19,7 +19,9 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
@@ -38,3 +40,5 @@ obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
+obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o
+obj-$(CONFIG_ARCH_MEDIATEK) += irq-mtk-sysirq.o
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
new file mode 100644
index 000000000000..fdf706555d72
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -0,0 +1,333 @@
+/*
+ * ARM GIC v2m MSI(-X) support
+ * Support for Message Signaled Interrupts for systems that
+ * implement ARM Generic Interrupt Controller: GICv2m.
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
+ * Brandon Anderson <brandon.anderson@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "GICv2m: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/*
+* MSI_TYPER:
+* [31:26] Reserved
+* [25:16] lowest SPI assigned to MSI
+* [15:10] Reserved
+* [9:0] Numer of SPIs assigned to MSI
+*/
+#define V2M_MSI_TYPER 0x008
+#define V2M_MSI_TYPER_BASE_SHIFT 16
+#define V2M_MSI_TYPER_BASE_MASK 0x3FF
+#define V2M_MSI_TYPER_NUM_MASK 0x3FF
+#define V2M_MSI_SETSPI_NS 0x040
+#define V2M_MIN_SPI 32
+#define V2M_MAX_SPI 1019
+
+#define V2M_MSI_TYPER_BASE_SPI(x) \
+ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
+
+#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
+
+struct v2m_data {
+ spinlock_t msi_cnt_lock;
+ struct msi_controller mchip;
+ struct resource res; /* GICv2m resource */
+ void __iomem *base; /* GICv2m virt address */
+ u32 spi_start; /* The SPI number that MSIs start */
+ u32 nr_spis; /* The number of SPIs for MSIs */
+ unsigned long *bm; /* MSI vector bitmap */
+ struct irq_domain *domain;
+};
+
+static void gicv2m_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void gicv2m_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip gicv2m_msi_irq_chip = {
+ .name = "MSI",
+ .irq_mask = gicv2m_mask_msi_irq,
+ .irq_unmask = gicv2m_unmask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pci_msi_domain_write_msg,
+};
+
+static struct msi_domain_info gicv2m_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &gicv2m_msi_irq_chip,
+};
+
+static int gicv2m_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ int ret;
+
+ ret = irq_chip_set_affinity_parent(irq_data, mask, force);
+ if (ret == IRQ_SET_MASK_OK)
+ ret = IRQ_SET_MASK_OK_DONE;
+
+ return ret;
+}
+
+static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
+
+ msg->address_hi = (u32) (addr >> 32);
+ msg->address_lo = (u32) (addr);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip gicv2m_irq_chip = {
+ .name = "GICv2m",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = gicv2m_set_affinity,
+ .irq_compose_msi_msg = gicv2m_compose_msi_msg,
+};
+
+static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct of_phandle_args args;
+ struct irq_data *d;
+ int err;
+
+ args.np = domain->parent->of_node;
+ args.args_count = 3;
+ args.args[0] = 0;
+ args.args[1] = hwirq - 32;
+ args.args[2] = IRQ_TYPE_EDGE_RISING;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
+ if (err)
+ return err;
+
+ /* Configure the interrupt line to be edge */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+ return 0;
+}
+
+static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
+{
+ int pos;
+
+ pos = hwirq - v2m->spi_start;
+ if (pos < 0 || pos >= v2m->nr_spis) {
+ pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
+ return;
+ }
+
+ spin_lock(&v2m->msi_cnt_lock);
+ __clear_bit(pos, v2m->bm);
+ spin_unlock(&v2m->msi_cnt_lock);
+}
+
+static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct v2m_data *v2m = domain->host_data;
+ int hwirq, offset, err = 0;
+
+ spin_lock(&v2m->msi_cnt_lock);
+ offset = find_first_zero_bit(v2m->bm, v2m->nr_spis);
+ if (offset < v2m->nr_spis)
+ __set_bit(offset, v2m->bm);
+ else
+ err = -ENOSPC;
+ spin_unlock(&v2m->msi_cnt_lock);
+
+ if (err)
+ return err;
+
+ hwirq = v2m->spi_start + offset;
+
+ err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
+ if (err) {
+ gicv2m_unalloc_msi(v2m, hwirq);
+ return err;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &gicv2m_irq_chip, v2m);
+
+ return 0;
+}
+
+static void gicv2m_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
+
+ BUG_ON(nr_irqs != 1);
+ gicv2m_unalloc_msi(v2m, d->hwirq);
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops gicv2m_domain_ops = {
+ .alloc = gicv2m_irq_domain_alloc,
+ .free = gicv2m_irq_domain_free,
+};
+
+static bool is_msi_spi_valid(u32 base, u32 num)
+{
+ if (base < V2M_MIN_SPI) {
+ pr_err("Invalid MSI base SPI (base:%u)\n", base);
+ return false;
+ }
+
+ if ((num == 0) || (base + num > V2M_MAX_SPI)) {
+ pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
+ num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
+ return false;
+ }
+
+ return true;
+}
+
+static int __init gicv2m_init_one(struct device_node *node,
+ struct irq_domain *parent)
+{
+ int ret;
+ struct v2m_data *v2m;
+
+ v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
+ if (!v2m) {
+ pr_err("Failed to allocate struct v2m_data.\n");
+ return -ENOMEM;
+ }
+
+ ret = of_address_to_resource(node, 0, &v2m->res);
+ if (ret) {
+ pr_err("Failed to allocate v2m resource.\n");
+ goto err_free_v2m;
+ }
+
+ v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
+ if (!v2m->base) {
+ pr_err("Failed to map GICv2m resource\n");
+ ret = -ENOMEM;
+ goto err_free_v2m;
+ }
+
+ if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) &&
+ !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) {
+ pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n",
+ v2m->spi_start, v2m->nr_spis);
+ } else {
+ u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
+
+ v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
+ v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
+ }
+
+ if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+
+ v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
+ GFP_KERNEL);
+ if (!v2m->bm) {
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
+ if (!v2m->domain) {
+ pr_err("Failed to create GICv2m domain\n");
+ ret = -ENOMEM;
+ goto err_free_bm;
+ }
+
+ v2m->domain->parent = parent;
+ v2m->mchip.of_node = node;
+ v2m->mchip.domain = pci_msi_create_irq_domain(node,
+ &gicv2m_msi_domain_info,
+ v2m->domain);
+ if (!v2m->mchip.domain) {
+ pr_err("Failed to create MSI domain\n");
+ ret = -ENOMEM;
+ goto err_free_domains;
+ }
+
+ spin_lock_init(&v2m->msi_cnt_lock);
+
+ ret = of_pci_msi_chip_add(&v2m->mchip);
+ if (ret) {
+ pr_err("Failed to add msi_chip.\n");
+ goto err_free_domains;
+ }
+
+ pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
+ (unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
+ v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
+
+ return 0;
+
+err_free_domains:
+ if (v2m->mchip.domain)
+ irq_domain_remove(v2m->mchip.domain);
+ if (v2m->domain)
+ irq_domain_remove(v2m->domain);
+err_free_bm:
+ kfree(v2m->bm);
+err_iounmap:
+ iounmap(v2m->base);
+err_free_v2m:
+ kfree(v2m);
+ return ret;
+}
+
+static struct of_device_id gicv2m_device_id[] = {
+ { .compatible = "arm,gic-v2m-frame", },
+ {},
+};
+
+int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent)
+{
+ int ret = 0;
+ struct device_node *child;
+
+ for (child = of_find_matching_node(node, gicv2m_device_id); child;
+ child = of_find_matching_node(child, gicv2m_device_id)) {
+ if (!of_find_property(child, "msi-controller", NULL))
+ continue;
+
+ ret = gicv2m_init_one(child, parent);
+ if (ret) {
+ of_node_put(node);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
new file mode 100644
index 000000000000..86e4684adeb1
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -0,0 +1,1425 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0)
+
+#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+
+/*
+ * Collection structure - just an ID, and a redistributor address to
+ * ping. We use one per CPU as a bag of interrupts assigned to this
+ * CPU.
+ */
+struct its_collection {
+ u64 target_address;
+ u16 col_id;
+};
+
+/*
+ * The ITS structure - contains most of the infrastructure, with the
+ * msi_controller, the command queue, the collections, and the list of
+ * devices writing to it.
+ */
+struct its_node {
+ raw_spinlock_t lock;
+ struct list_head entry;
+ struct msi_controller msi_chip;
+ struct irq_domain *domain;
+ void __iomem *base;
+ unsigned long phys_base;
+ struct its_cmd_block *cmd_base;
+ struct its_cmd_block *cmd_write;
+ void *tables[GITS_BASER_NR_REGS];
+ struct its_collection *collections;
+ struct list_head its_device_list;
+ u64 flags;
+ u32 ite_size;
+};
+
+#define ITS_ITT_ALIGN SZ_256
+
+/*
+ * The ITS view of a device - belongs to an ITS, a collection, owns an
+ * interrupt translation table, and a list of interrupts.
+ */
+struct its_device {
+ struct list_head entry;
+ struct its_node *its;
+ struct its_collection *collection;
+ void *itt;
+ unsigned long *lpi_map;
+ irq_hw_number_t lpi_base;
+ int nr_lpis;
+ u32 nr_ites;
+ u32 device_id;
+};
+
+static LIST_HEAD(its_nodes);
+static DEFINE_SPINLOCK(its_lock);
+static struct device_node *gic_root_node;
+static struct rdists *gic_rdists;
+
+#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+
+/*
+ * ITS command descriptors - parameters to be encoded in a command
+ * block.
+ */
+struct its_cmd_desc {
+ union {
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_inv_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_int_cmd;
+
+ struct {
+ struct its_device *dev;
+ int valid;
+ } its_mapd_cmd;
+
+ struct {
+ struct its_collection *col;
+ int valid;
+ } its_mapc_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 phys_id;
+ u32 event_id;
+ } its_mapvi_cmd;
+
+ struct {
+ struct its_device *dev;
+ struct its_collection *col;
+ u32 id;
+ } its_movi_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 event_id;
+ } its_discard_cmd;
+
+ struct {
+ struct its_collection *col;
+ } its_invall_cmd;
+ };
+};
+
+/*
+ * The ITS command block, which is what the ITS actually parses.
+ */
+struct its_cmd_block {
+ u64 raw_cmd[4];
+};
+
+#define ITS_CMD_QUEUE_SZ SZ_64K
+#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
+
+typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
+ struct its_cmd_desc *);
+
+static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
+{
+ cmd->raw_cmd[0] &= ~0xffUL;
+ cmd->raw_cmd[0] |= cmd_nr;
+}
+
+static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
+{
+ cmd->raw_cmd[0] &= ~(0xffffUL << 32);
+ cmd->raw_cmd[0] |= ((u64)devid) << 32;
+}
+
+static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
+{
+ cmd->raw_cmd[1] &= ~0xffffffffUL;
+ cmd->raw_cmd[1] |= id;
+}
+
+static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
+{
+ cmd->raw_cmd[1] &= 0xffffffffUL;
+ cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
+}
+
+static void its_encode_size(struct its_cmd_block *cmd, u8 size)
+{
+ cmd->raw_cmd[1] &= ~0x1fUL;
+ cmd->raw_cmd[1] |= size & 0x1f;
+}
+
+static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
+{
+ cmd->raw_cmd[2] &= ~0xffffffffffffUL;
+ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
+}
+
+static void its_encode_valid(struct its_cmd_block *cmd, int valid)
+{
+ cmd->raw_cmd[2] &= ~(1UL << 63);
+ cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
+}
+
+static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
+{
+ cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
+ cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
+}
+
+static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
+{
+ cmd->raw_cmd[2] &= ~0xffffUL;
+ cmd->raw_cmd[2] |= col;
+}
+
+static inline void its_fixup_cmd(struct its_cmd_block *cmd)
+{
+ /* Let's fixup BE commands */
+ cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
+ cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
+ cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
+ cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
+}
+
+static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ unsigned long itt_addr;
+ u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
+
+ itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
+ itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
+
+ its_encode_cmd(cmd, GITS_CMD_MAPD);
+ its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
+ its_encode_size(cmd, size - 1);
+ its_encode_itt(cmd, itt_addr);
+ its_encode_valid(cmd, desc->its_mapd_cmd.valid);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_mapd_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_MAPC);
+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+ its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
+ its_encode_valid(cmd, desc->its_mapc_cmd.valid);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_mapc_cmd.col;
+}
+
+static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_MAPVI);
+ its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
+ its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
+ its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_mapvi_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_MOVI);
+ its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_movi_cmd.id);
+ its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_movi_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_DISCARD);
+ its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_discard_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_INV);
+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_inv_cmd.dev->collection;
+}
+
+static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_INVALL);
+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+
+ its_fixup_cmd(cmd);
+
+ return NULL;
+}
+
+static u64 its_cmd_ptr_to_offset(struct its_node *its,
+ struct its_cmd_block *ptr)
+{
+ return (ptr - its->cmd_base) * sizeof(*ptr);
+}
+
+static int its_queue_full(struct its_node *its)
+{
+ int widx;
+ int ridx;
+
+ widx = its->cmd_write - its->cmd_base;
+ ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
+
+ /* This is incredibly unlikely to happen, unless the ITS locks up. */
+ if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
+ return 1;
+
+ return 0;
+}
+
+static struct its_cmd_block *its_allocate_entry(struct its_node *its)
+{
+ struct its_cmd_block *cmd;
+ u32 count = 1000000; /* 1s! */
+
+ while (its_queue_full(its)) {
+ count--;
+ if (!count) {
+ pr_err_ratelimited("ITS queue not draining\n");
+ return NULL;
+ }
+ cpu_relax();
+ udelay(1);
+ }
+
+ cmd = its->cmd_write++;
+
+ /* Handle queue wrapping */
+ if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
+ its->cmd_write = its->cmd_base;
+
+ return cmd;
+}
+
+static struct its_cmd_block *its_post_commands(struct its_node *its)
+{
+ u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
+
+ writel_relaxed(wr, its->base + GITS_CWRITER);
+
+ return its->cmd_write;
+}
+
+static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
+{
+ /*
+ * Make sure the commands written to memory are observable by
+ * the ITS.
+ */
+ if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
+ __flush_dcache_area(cmd, sizeof(*cmd));
+ else
+ dsb(ishst);
+}
+
+static void its_wait_for_range_completion(struct its_node *its,
+ struct its_cmd_block *from,
+ struct its_cmd_block *to)
+{
+ u64 rd_idx, from_idx, to_idx;
+ u32 count = 1000000; /* 1s! */
+
+ from_idx = its_cmd_ptr_to_offset(its, from);
+ to_idx = its_cmd_ptr_to_offset(its, to);
+
+ while (1) {
+ rd_idx = readl_relaxed(its->base + GITS_CREADR);
+ if (rd_idx >= to_idx || rd_idx < from_idx)
+ break;
+
+ count--;
+ if (!count) {
+ pr_err_ratelimited("ITS queue timeout\n");
+ return;
+ }
+ cpu_relax();
+ udelay(1);
+ }
+}
+
+static void its_send_single_command(struct its_node *its,
+ its_cmd_builder_t builder,
+ struct its_cmd_desc *desc)
+{
+ struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
+ struct its_collection *sync_col;
+
+ raw_spin_lock(&its->lock);
+
+ cmd = its_allocate_entry(its);
+ if (!cmd) { /* We're soooooo screewed... */
+ pr_err_ratelimited("ITS can't allocate, dropping command\n");
+ raw_spin_unlock(&its->lock);
+ return;
+ }
+ sync_col = builder(cmd, desc);
+ its_flush_cmd(its, cmd);
+
+ if (sync_col) {
+ sync_cmd = its_allocate_entry(its);
+ if (!sync_cmd) {
+ pr_err_ratelimited("ITS can't SYNC, skipping\n");
+ goto post;
+ }
+ its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
+ its_encode_target(sync_cmd, sync_col->target_address);
+ its_fixup_cmd(sync_cmd);
+ its_flush_cmd(its, sync_cmd);
+ }
+
+post:
+ next_cmd = its_post_commands(its);
+ raw_spin_unlock(&its->lock);
+
+ its_wait_for_range_completion(its, cmd, next_cmd);
+}
+
+static void its_send_inv(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_inv_cmd.dev = dev;
+ desc.its_inv_cmd.event_id = event_id;
+
+ its_send_single_command(dev->its, its_build_inv_cmd, &desc);
+}
+
+static void its_send_mapd(struct its_device *dev, int valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapd_cmd.dev = dev;
+ desc.its_mapd_cmd.valid = !!valid;
+
+ its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
+}
+
+static void its_send_mapc(struct its_node *its, struct its_collection *col,
+ int valid)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapc_cmd.col = col;
+ desc.its_mapc_cmd.valid = !!valid;
+
+ its_send_single_command(its, its_build_mapc_cmd, &desc);
+}
+
+static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_mapvi_cmd.dev = dev;
+ desc.its_mapvi_cmd.phys_id = irq_id;
+ desc.its_mapvi_cmd.event_id = id;
+
+ its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
+}
+
+static void its_send_movi(struct its_device *dev,
+ struct its_collection *col, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_movi_cmd.dev = dev;
+ desc.its_movi_cmd.col = col;
+ desc.its_movi_cmd.id = id;
+
+ its_send_single_command(dev->its, its_build_movi_cmd, &desc);
+}
+
+static void its_send_discard(struct its_device *dev, u32 id)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_discard_cmd.dev = dev;
+ desc.its_discard_cmd.event_id = id;
+
+ its_send_single_command(dev->its, its_build_discard_cmd, &desc);
+}
+
+static void its_send_invall(struct its_node *its, struct its_collection *col)
+{
+ struct its_cmd_desc desc;
+
+ desc.its_invall_cmd.col = col;
+
+ its_send_single_command(its, its_build_invall_cmd, &desc);
+}
+
+/*
+ * irqchip functions - assumes MSI, mostly.
+ */
+
+static inline u32 its_get_event_id(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ return d->hwirq - its_dev->lpi_base;
+}
+
+static void lpi_set_config(struct irq_data *d, bool enable)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = d->hwirq;
+ u32 id = its_get_event_id(d);
+ u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
+
+ if (enable)
+ *cfg |= LPI_PROP_ENABLED;
+ else
+ *cfg &= ~LPI_PROP_ENABLED;
+
+ /*
+ * Make the above write visible to the redistributors.
+ * And yes, we're flushing exactly: One. Single. Byte.
+ * Humpf...
+ */
+ if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
+ __flush_dcache_area(cfg, sizeof(*cfg));
+ else
+ dsb(ishst);
+ its_send_inv(its_dev, id);
+}
+
+static void its_mask_irq(struct irq_data *d)
+{
+ lpi_set_config(d, false);
+}
+
+static void its_unmask_irq(struct irq_data *d)
+{
+ lpi_set_config(d, true);
+}
+
+static void its_eoi_irq(struct irq_data *d)
+{
+ gic_write_eoir(d->hwirq);
+}
+
+static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_collection *target_col;
+ u32 id = its_get_event_id(d);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ target_col = &its_dev->its->collections[cpu];
+ its_send_movi(its_dev, target_col, id);
+ its_dev->collection = target_col;
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+ u64 addr;
+
+ its = its_dev->its;
+ addr = its->phys_base + GITS_TRANSLATER;
+
+ msg->address_lo = addr & ((1UL << 32) - 1);
+ msg->address_hi = addr >> 32;
+ msg->data = its_get_event_id(d);
+}
+
+static struct irq_chip its_irq_chip = {
+ .name = "ITS",
+ .irq_mask = its_mask_irq,
+ .irq_unmask = its_unmask_irq,
+ .irq_eoi = its_eoi_irq,
+ .irq_set_affinity = its_set_affinity,
+ .irq_compose_msi_msg = its_irq_compose_msi_msg,
+};
+
+static void its_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void its_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip its_msi_irq_chip = {
+ .name = "ITS-MSI",
+ .irq_unmask = its_unmask_msi_irq,
+ .irq_mask = its_mask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pci_msi_domain_write_msg,
+};
+
+/*
+ * How we allocate LPIs:
+ *
+ * The GIC has id_bits bits for interrupt identifiers. From there, we
+ * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
+ * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
+ * bits to the right.
+ *
+ * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ */
+#define IRQS_PER_CHUNK_SHIFT 5
+#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
+
+static unsigned long *lpi_bitmap;
+static u32 lpi_chunks;
+static DEFINE_SPINLOCK(lpi_lock);
+
+static int its_lpi_to_chunk(int lpi)
+{
+ return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+}
+
+static int its_chunk_to_lpi(int chunk)
+{
+ return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+}
+
+static int its_lpi_init(u32 id_bits)
+{
+ lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+
+ lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
+ GFP_KERNEL);
+ if (!lpi_bitmap) {
+ lpi_chunks = 0;
+ return -ENOMEM;
+ }
+
+ pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
+ return 0;
+}
+
+static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+{
+ unsigned long *bitmap = NULL;
+ int chunk_id;
+ int nr_chunks;
+ int i;
+
+ nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+
+ spin_lock(&lpi_lock);
+
+ do {
+ chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
+ 0, nr_chunks, 0);
+ if (chunk_id < lpi_chunks)
+ break;
+
+ nr_chunks--;
+ } while (nr_chunks > 0);
+
+ if (!nr_chunks)
+ goto out;
+
+ bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
+ GFP_ATOMIC);
+ if (!bitmap)
+ goto out;
+
+ for (i = 0; i < nr_chunks; i++)
+ set_bit(chunk_id + i, lpi_bitmap);
+
+ *base = its_chunk_to_lpi(chunk_id);
+ *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+
+out:
+ spin_unlock(&lpi_lock);
+
+ return bitmap;
+}
+
+static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
+{
+ int lpi;
+
+ spin_lock(&lpi_lock);
+
+ for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
+ int chunk = its_lpi_to_chunk(lpi);
+ BUG_ON(chunk > lpi_chunks);
+ if (test_bit(chunk, lpi_bitmap)) {
+ clear_bit(chunk, lpi_bitmap);
+ } else {
+ pr_err("Bad LPI chunk %d\n", chunk);
+ }
+ }
+
+ spin_unlock(&lpi_lock);
+
+ kfree(bitmap);
+}
+
+/*
+ * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
+ * deal with (one configuration byte per interrupt). PENDBASE has to
+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
+ */
+#define LPI_PROPBASE_SZ SZ_64K
+#define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
+
+/*
+ * This is how many bits of ID we need, including the useless ones.
+ */
+#define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
+
+#define LPI_PROP_DEFAULT_PRIO 0xa0
+
+static int __init its_alloc_lpi_tables(void)
+{
+ phys_addr_t paddr;
+
+ gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
+ get_order(LPI_PROPBASE_SZ));
+ if (!gic_rdists->prop_page) {
+ pr_err("Failed to allocate PROPBASE\n");
+ return -ENOMEM;
+ }
+
+ paddr = page_to_phys(gic_rdists->prop_page);
+ pr_info("GIC: using LPI property table @%pa\n", &paddr);
+
+ /* Priority 0xa0, Group-1, disabled */
+ memset(page_address(gic_rdists->prop_page),
+ LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
+ LPI_PROPBASE_SZ);
+
+ /* Make sure the GIC will observe the written configuration */
+ __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
+
+ return 0;
+}
+
+static const char *its_base_type_string[] = {
+ [GITS_BASER_TYPE_DEVICE] = "Devices",
+ [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
+ [GITS_BASER_TYPE_CPU] = "Physical CPUs",
+ [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
+ [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
+ [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
+ [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
+};
+
+static void its_free_tables(struct its_node *its)
+{
+ int i;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (its->tables[i]) {
+ free_page((unsigned long)its->tables[i]);
+ its->tables[i] = NULL;
+ }
+ }
+}
+
+static int its_alloc_tables(struct its_node *its)
+{
+ int err;
+ int i;
+ int psz = PAGE_SIZE;
+ u64 shr = GITS_BASER_InnerShareable;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
+ u64 type = GITS_BASER_TYPE(val);
+ u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
+ u64 tmp;
+ void *base;
+
+ if (type == GITS_BASER_TYPE_NONE)
+ continue;
+
+ /* We're lazy and only allocate a single page for now */
+ base = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!base) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ its->tables[i] = base;
+
+retry_baser:
+ val = (virt_to_phys(base) |
+ (type << GITS_BASER_TYPE_SHIFT) |
+ ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
+ GITS_BASER_WaWb |
+ shr |
+ GITS_BASER_VALID);
+
+ switch (psz) {
+ case SZ_4K:
+ val |= GITS_BASER_PAGE_SIZE_4K;
+ break;
+ case SZ_16K:
+ val |= GITS_BASER_PAGE_SIZE_16K;
+ break;
+ case SZ_64K:
+ val |= GITS_BASER_PAGE_SIZE_64K;
+ break;
+ }
+
+ val |= (PAGE_SIZE / psz) - 1;
+
+ writeq_relaxed(val, its->base + GITS_BASER + i * 8);
+ tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
+
+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+ /*
+ * Shareability didn't stick. Just use
+ * whatever the read reported, which is likely
+ * to be the only thing this redistributor
+ * supports.
+ */
+ shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+ goto retry_baser;
+ }
+
+ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
+ /*
+ * Page size didn't stick. Let's try a smaller
+ * size and retry. If we reach 4K, then
+ * something is horribly wrong...
+ */
+ switch (psz) {
+ case SZ_16K:
+ psz = SZ_4K;
+ goto retry_baser;
+ case SZ_64K:
+ psz = SZ_16K;
+ goto retry_baser;
+ }
+ }
+
+ if (val != tmp) {
+ pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
+ its->msi_chip.of_node->full_name, i,
+ (unsigned long) val, (unsigned long) tmp);
+ err = -ENXIO;
+ goto out_free;
+ }
+
+ pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
+ (int)(PAGE_SIZE / entry_size),
+ its_base_type_string[type],
+ (unsigned long)virt_to_phys(base),
+ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
+ }
+
+ return 0;
+
+out_free:
+ its_free_tables(its);
+
+ return err;
+}
+
+static int its_alloc_collections(struct its_node *its)
+{
+ its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
+ GFP_KERNEL);
+ if (!its->collections)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void its_cpu_init_lpis(void)
+{
+ void __iomem *rbase = gic_data_rdist_rd_base();
+ struct page *pend_page;
+ u64 val, tmp;
+
+ /* If we didn't allocate the pending table yet, do it now */
+ pend_page = gic_data_rdist()->pend_page;
+ if (!pend_page) {
+ phys_addr_t paddr;
+ /*
+ * The pending pages have to be at least 64kB aligned,
+ * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
+ */
+ pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
+ get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
+ if (!pend_page) {
+ pr_err("Failed to allocate PENDBASE for CPU%d\n",
+ smp_processor_id());
+ return;
+ }
+
+ /* Make sure the GIC will observe the zero-ed page */
+ __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
+
+ paddr = page_to_phys(pend_page);
+ pr_info("CPU%d: using LPI pending table @%pa\n",
+ smp_processor_id(), &paddr);
+ gic_data_rdist()->pend_page = pend_page;
+ }
+
+ /* Disable LPIs */
+ val = readl_relaxed(rbase + GICR_CTLR);
+ val &= ~GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
+ /*
+ * Make sure any change to the table is observable by the GIC.
+ */
+ dsb(sy);
+
+ /* set PROPBASE */
+ val = (page_to_phys(gic_rdists->prop_page) |
+ GICR_PROPBASER_InnerShareable |
+ GICR_PROPBASER_WaWb |
+ ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
+
+ writeq_relaxed(val, rbase + GICR_PROPBASER);
+ tmp = readq_relaxed(rbase + GICR_PROPBASER);
+
+ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
+ pr_info_once("GIC: using cache flushing for LPI property table\n");
+ gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
+ }
+
+ /* set PENDBASE */
+ val = (page_to_phys(pend_page) |
+ GICR_PROPBASER_InnerShareable |
+ GICR_PROPBASER_WaWb);
+
+ writeq_relaxed(val, rbase + GICR_PENDBASER);
+
+ /* Enable LPIs */
+ val = readl_relaxed(rbase + GICR_CTLR);
+ val |= GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
+ /* Make sure the GIC has seen the above */
+ dsb(sy);
+}
+
+static void its_cpu_init_collection(void)
+{
+ struct its_node *its;
+ int cpu;
+
+ spin_lock(&its_lock);
+ cpu = smp_processor_id();
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 target;
+
+ /*
+ * We now have to bind each collection to its target
+ * redistributor.
+ */
+ if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+ /*
+ * This ITS wants the physical address of the
+ * redistributor.
+ */
+ target = gic_data_rdist()->phys_base;
+ } else {
+ /*
+ * This ITS wants a linear CPU number.
+ */
+ target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
+ target = GICR_TYPER_CPU_NUMBER(target);
+ }
+
+ /* Perform collection mapping */
+ its->collections[cpu].target_address = target;
+ its->collections[cpu].col_id = cpu;
+
+ its_send_mapc(its, &its->collections[cpu], 1);
+ its_send_invall(its, &its->collections[cpu]);
+ }
+
+ spin_unlock(&its_lock);
+}
+
+static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
+{
+ struct its_device *its_dev = NULL, *tmp;
+
+ raw_spin_lock(&its->lock);
+
+ list_for_each_entry(tmp, &its->its_device_list, entry) {
+ if (tmp->device_id == dev_id) {
+ its_dev = tmp;
+ break;
+ }
+ }
+
+ raw_spin_unlock(&its->lock);
+
+ return its_dev;
+}
+
+static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+ int nvecs)
+{
+ struct its_device *dev;
+ unsigned long *lpi_map;
+ void *itt;
+ int lpi_base;
+ int nr_lpis;
+ int nr_ites;
+ int cpu;
+ int sz;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ /*
+ * At least one bit of EventID is being used, hence a minimum
+ * of two entries. No, the architecture doesn't let you
+ * express an ITT with a single entry.
+ */
+ nr_ites = max(2, roundup_pow_of_two(nvecs));
+ sz = nr_ites * its->ite_size;
+ sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
+ itt = kmalloc(sz, GFP_KERNEL);
+ lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+
+ if (!dev || !itt || !lpi_map) {
+ kfree(dev);
+ kfree(itt);
+ kfree(lpi_map);
+ return NULL;
+ }
+
+ dev->its = its;
+ dev->itt = itt;
+ dev->nr_ites = nr_ites;
+ dev->lpi_map = lpi_map;
+ dev->lpi_base = lpi_base;
+ dev->nr_lpis = nr_lpis;
+ dev->device_id = dev_id;
+ INIT_LIST_HEAD(&dev->entry);
+
+ raw_spin_lock(&its->lock);
+ list_add(&dev->entry, &its->its_device_list);
+ raw_spin_unlock(&its->lock);
+
+ /* Bind the device to the first possible CPU */
+ cpu = cpumask_first(cpu_online_mask);
+ dev->collection = &its->collections[cpu];
+
+ /* Map device to its ITT */
+ its_send_mapd(dev, 1);
+
+ return dev;
+}
+
+static void its_free_device(struct its_device *its_dev)
+{
+ raw_spin_lock(&its_dev->its->lock);
+ list_del(&its_dev->entry);
+ raw_spin_unlock(&its_dev->its->lock);
+ kfree(its_dev->itt);
+ kfree(its_dev);
+}
+
+static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+{
+ int idx;
+
+ idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
+ if (idx == dev->nr_lpis)
+ return -ENOSPC;
+
+ *hwirq = dev->lpi_base + idx;
+ set_bit(idx, dev->lpi_map);
+
+ return 0;
+}
+
+static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct pci_dev *pdev;
+ struct its_node *its;
+ u32 dev_id;
+ struct its_device *its_dev;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ pdev = to_pci_dev(dev);
+ dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ its = domain->parent->host_data;
+
+ its_dev = its_find_device(its, dev_id);
+ if (WARN_ON(its_dev))
+ return -EINVAL;
+
+ its_dev = its_create_device(its, dev_id, nvec);
+ if (!its_dev)
+ return -ENOMEM;
+
+ dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec));
+
+ info->scratchpad[0].ptr = its_dev;
+ info->scratchpad[1].ptr = dev;
+ return 0;
+}
+
+static struct msi_domain_ops its_pci_msi_ops = {
+ .msi_prepare = its_msi_prepare,
+};
+
+static struct msi_domain_info its_pci_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .ops = &its_pci_msi_ops,
+ .chip = &its_msi_irq_chip,
+};
+
+static int its_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct of_phandle_args args;
+
+ args.np = domain->parent->of_node;
+ args.args_count = 3;
+ args.args[0] = GIC_IRQ_TYPE_LPI;
+ args.args[1] = hwirq;
+ args.args[2] = IRQ_TYPE_EDGE_RISING;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
+}
+
+static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ msi_alloc_info_t *info = args;
+ struct its_device *its_dev = info->scratchpad[0].ptr;
+ irq_hw_number_t hwirq;
+ int err;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = its_alloc_device_irq(its_dev, &hwirq);
+ if (err)
+ return err;
+
+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
+ if (err)
+ return err;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i,
+ hwirq, &its_irq_chip, its_dev);
+ dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
+ (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
+ }
+
+ return 0;
+}
+
+static void its_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ /* Map the GIC IRQ and event to the device */
+ its_send_mapvi(its_dev, d->hwirq, event);
+}
+
+static void its_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ /* Stop the delivery of interrupts */
+ its_send_discard(its_dev, event);
+}
+
+static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *data = irq_domain_get_irq_data(domain,
+ virq + i);
+ u32 event = its_get_event_id(data);
+
+ /* Mark interrupt index as unused */
+ clear_bit(event, its_dev->lpi_map);
+
+ /* Nuke the entry in the domain */
+ irq_domain_reset_irq_data(data);
+ }
+
+ /* If all interrupts have been freed, start mopping the floor */
+ if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
+ its_lpi_free(its_dev->lpi_map,
+ its_dev->lpi_base,
+ its_dev->nr_lpis);
+
+ /* Unmap device/itt */
+ its_send_mapd(its_dev, 0);
+ its_free_device(its_dev);
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops its_domain_ops = {
+ .alloc = its_irq_domain_alloc,
+ .free = its_irq_domain_free,
+ .activate = its_irq_domain_activate,
+ .deactivate = its_irq_domain_deactivate,
+};
+
+static int its_probe(struct device_node *node, struct irq_domain *parent)
+{
+ struct resource res;
+ struct its_node *its;
+ void __iomem *its_base;
+ u32 val;
+ u64 baser, tmp;
+ int err;
+
+ err = of_address_to_resource(node, 0, &res);
+ if (err) {
+ pr_warn("%s: no regs?\n", node->full_name);
+ return -ENXIO;
+ }
+
+ its_base = ioremap(res.start, resource_size(&res));
+ if (!its_base) {
+ pr_warn("%s: unable to map registers\n", node->full_name);
+ return -ENOMEM;
+ }
+
+ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (val != 0x30 && val != 0x40) {
+ pr_warn("%s: no ITS detected, giving up\n", node->full_name);
+ err = -ENODEV;
+ goto out_unmap;
+ }
+
+ pr_info("ITS: %s\n", node->full_name);
+
+ its = kzalloc(sizeof(*its), GFP_KERNEL);
+ if (!its) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ raw_spin_lock_init(&its->lock);
+ INIT_LIST_HEAD(&its->entry);
+ INIT_LIST_HEAD(&its->its_device_list);
+ its->base = its_base;
+ its->phys_base = res.start;
+ its->msi_chip.of_node = node;
+ its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+
+ its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
+ if (!its->cmd_base) {
+ err = -ENOMEM;
+ goto out_free_its;
+ }
+ its->cmd_write = its->cmd_base;
+
+ err = its_alloc_tables(its);
+ if (err)
+ goto out_free_cmd;
+
+ err = its_alloc_collections(its);
+ if (err)
+ goto out_free_tables;
+
+ baser = (virt_to_phys(its->cmd_base) |
+ GITS_CBASER_WaWb |
+ GITS_CBASER_InnerShareable |
+ (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
+ GITS_CBASER_VALID);
+
+ writeq_relaxed(baser, its->base + GITS_CBASER);
+ tmp = readq_relaxed(its->base + GITS_CBASER);
+ writeq_relaxed(0, its->base + GITS_CWRITER);
+ writel_relaxed(1, its->base + GITS_CTLR);
+
+ if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
+ pr_info("ITS: using cache flushing for cmd queue\n");
+ its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
+ }
+
+ if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
+ its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
+ if (!its->domain) {
+ err = -ENOMEM;
+ goto out_free_tables;
+ }
+
+ its->domain->parent = parent;
+
+ its->msi_chip.domain = pci_msi_create_irq_domain(node,
+ &its_pci_msi_domain_info,
+ its->domain);
+ if (!its->msi_chip.domain) {
+ err = -ENOMEM;
+ goto out_free_domains;
+ }
+
+ err = of_pci_msi_chip_add(&its->msi_chip);
+ if (err)
+ goto out_free_domains;
+ }
+
+ spin_lock(&its_lock);
+ list_add(&its->entry, &its_nodes);
+ spin_unlock(&its_lock);
+
+ return 0;
+
+out_free_domains:
+ if (its->msi_chip.domain)
+ irq_domain_remove(its->msi_chip.domain);
+ if (its->domain)
+ irq_domain_remove(its->domain);
+out_free_tables:
+ its_free_tables(its);
+out_free_cmd:
+ kfree(its->cmd_base);
+out_free_its:
+ kfree(its);
+out_unmap:
+ iounmap(its_base);
+ pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
+ return err;
+}
+
+static bool gic_rdists_supports_plpis(void)
+{
+ return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+}
+
+int its_cpu_init(void)
+{
+ if (!gic_rdists_supports_plpis()) {
+ pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+ return -ENXIO;
+ }
+
+ if (!list_empty(&its_nodes)) {
+ its_cpu_init_lpis();
+ its_cpu_init_collection();
+ }
+
+ return 0;
+}
+
+static struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+int its_init(struct device_node *node, struct rdists *rdists,
+ struct irq_domain *parent_domain)
+{
+ struct device_node *np;
+
+ for (np = of_find_matching_node(node, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ its_probe(np, parent_domain);
+ }
+
+ if (list_empty(&its_nodes)) {
+ pr_warn("ITS: No ITS available, not enabling LPIs\n");
+ return -ENXIO;
+ }
+
+ gic_rdists = rdists;
+ gic_root_node = node;
+
+ its_alloc_lpi_tables();
+ its_lpi_init(rdists->id_bits);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index aa17ae805a70..1a146ccee701 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -34,20 +34,25 @@
#include "irq-gic-common.h"
#include "irqchip.h"
+struct redist_region {
+ void __iomem *redist_base;
+ phys_addr_t phys_base;
+};
+
struct gic_chip_data {
void __iomem *dist_base;
- void __iomem **redist_base;
- void __iomem * __percpu *rdist;
+ struct redist_region *redist_regions;
+ struct rdists rdists;
struct irq_domain *domain;
u64 redist_stride;
- u32 redist_regions;
+ u32 nr_redist_regions;
unsigned int irq_nr;
};
static struct gic_chip_data gic_data __read_mostly;
-#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
-#define gic_data_rdist_rd_base() (*gic_data_rdist())
+#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
/* Our default, arbitrary priority value. Linux only uses one anyway. */
@@ -71,9 +76,6 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
if (d->hwirq <= 1023) /* SPI -> dist_base */
return gic_data.dist_base;
- if (d->hwirq >= 8192)
- BUG(); /* LPI Detected!!! */
-
return NULL;
}
@@ -271,11 +273,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
do {
irqnr = gic_read_iar();
- if (likely(irqnr > 15 && irqnr < 1020)) {
+ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
int err;
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
- WARN_ONCE(true, "Unexpected SPI received!\n");
+ WARN_ONCE(true, "Unexpected interrupt received!\n");
gic_write_eoir(irqnr);
}
continue;
@@ -333,8 +335,8 @@ static int gic_populate_rdist(void)
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
- for (i = 0; i < gic_data.redist_regions; i++) {
- void __iomem *ptr = gic_data.redist_base[i];
+ for (i = 0; i < gic_data.nr_redist_regions; i++) {
+ void __iomem *ptr = gic_data.redist_regions[i].redist_base;
u32 reg;
reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
@@ -347,10 +349,13 @@ static int gic_populate_rdist(void)
do {
typer = readq_relaxed(ptr + GICR_TYPER);
if ((typer >> 32) == aff) {
+ u64 offset = ptr - gic_data.redist_regions[i].redist_base;
gic_data_rdist_rd_base() = ptr;
- pr_info("CPU%d: found redistributor %llx @%p\n",
+ gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
+ pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
smp_processor_id(),
- (unsigned long long)mpidr, ptr);
+ (unsigned long long)mpidr,
+ i, &gic_data_rdist()->phys_base);
return 0;
}
@@ -385,6 +390,11 @@ static void gic_cpu_sys_reg_init(void)
gic_write_grpen1(1);
}
+static int gic_dist_supports_lpis(void)
+{
+ return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
+}
+
static void gic_cpu_init(void)
{
void __iomem *rbase;
@@ -399,6 +409,10 @@ static void gic_cpu_init(void)
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+ /* Give LPIs a spin */
+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+ its_cpu_init();
+
/* initialise system registers */
gic_cpu_sys_reg_init();
}
@@ -585,26 +599,43 @@ static struct irq_chip gic_chip = {
.irq_set_affinity = gic_set_affinity,
};
+#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
+
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
/* SGIs are private to the core kernel */
if (hw < 16)
return -EPERM;
+ /* Nothing here */
+ if (hw >= gic_data.irq_nr && hw < 8192)
+ return -EPERM;
+ /* Off limits */
+ if (hw >= GIC_ID_NR)
+ return -EPERM;
+
/* PPIs */
if (hw < 32) {
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
}
/* SPIs */
if (hw >= 32 && hw < gic_data.irq_nr) {
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_fasteoi_irq);
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- irq_set_chip_data(irq, d->host_data);
+ /* LPIs */
+ if (hw >= 8192 && hw < GIC_ID_NR) {
+ if (!gic_dist_supports_lpis())
+ return -EPERM;
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+
return 0;
}
@@ -625,6 +656,9 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
case 1: /* PPI */
*out_hwirq = intspec[1] + 16;
break;
+ case GIC_IRQ_TYPE_LPI: /* LPI */
+ *out_hwirq = intspec[1];
+ break;
default:
return -EINVAL;
}
@@ -633,17 +667,50 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
return 0;
}
+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct of_phandle_args *irq_data = arg;
+
+ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
+ irq_data->args_count, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++)
+ gic_irq_domain_map(domain, virq + i, hwirq + i);
+
+ return 0;
+}
+
+static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
static const struct irq_domain_ops gic_irq_domain_ops = {
- .map = gic_irq_domain_map,
.xlate = gic_irq_domain_xlate,
+ .alloc = gic_irq_domain_alloc,
+ .free = gic_irq_domain_free,
};
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
- void __iomem **redist_base;
+ struct redist_region *rdist_regs;
u64 redist_stride;
- u32 redist_regions;
+ u32 nr_redist_regions;
+ u32 typer;
u32 reg;
int gic_irqs;
int err;
@@ -664,54 +731,63 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
goto out_unmap_dist;
}
- if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
- redist_regions = 1;
+ if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
+ nr_redist_regions = 1;
- redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
- if (!redist_base) {
+ rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
+ if (!rdist_regs) {
err = -ENOMEM;
goto out_unmap_dist;
}
- for (i = 0; i < redist_regions; i++) {
- redist_base[i] = of_iomap(node, 1 + i);
- if (!redist_base[i]) {
+ for (i = 0; i < nr_redist_regions; i++) {
+ struct resource res;
+ int ret;
+
+ ret = of_address_to_resource(node, 1 + i, &res);
+ rdist_regs[i].redist_base = of_iomap(node, 1 + i);
+ if (ret || !rdist_regs[i].redist_base) {
pr_err("%s: couldn't map region %d\n",
node->full_name, i);
err = -ENODEV;
goto out_unmap_rdist;
}
+ rdist_regs[i].phys_base = res.start;
}
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
redist_stride = 0;
gic_data.dist_base = dist_base;
- gic_data.redist_base = redist_base;
- gic_data.redist_regions = redist_regions;
+ gic_data.redist_regions = rdist_regs;
+ gic_data.nr_redist_regions = nr_redist_regions;
gic_data.redist_stride = redist_stride;
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
*/
- gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
- gic_irqs = (gic_irqs + 1) * 32;
+ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
+ gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+ gic_irqs = GICD_TYPER_IRQS(typer);
if (gic_irqs > 1020)
gic_irqs = 1020;
gic_data.irq_nr = gic_irqs;
gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
&gic_data);
- gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
+ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
- if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM;
goto out_free;
}
set_handle_irq(gic_handle_irq);
+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+ its_init(node, &gic_data.rdists, gic_data.domain);
+
gic_smp_init();
gic_dist_init();
gic_cpu_init();
@@ -722,12 +798,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
out_free:
if (gic_data.domain)
irq_domain_remove(gic_data.domain);
- free_percpu(gic_data.rdist);
+ free_percpu(gic_data.rdists.rdist);
out_unmap_rdist:
- for (i = 0; i < redist_regions; i++)
- if (redist_base[i])
- iounmap(redist_base[i]);
- kfree(redist_base);
+ for (i = 0; i < nr_redist_regions; i++)
+ if (rdist_regs[i].redist_base)
+ iounmap(rdist_regs[i].redist_base);
+ kfree(rdist_regs);
out_unmap_dist:
iounmap(dist_base);
return err;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7f9be0785c6a..d617ee5a3d8a 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -788,17 +788,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
{
if (hw < 32) {
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
} else {
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_fasteoi_irq);
+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
gic_routable_irq_domain_ops->map(d, irq, hw);
}
- irq_set_chip_data(irq, d->host_data);
return 0;
}
@@ -858,6 +857,31 @@ static struct notifier_block gic_cpu_notifier = {
};
#endif
+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct of_phandle_args *irq_data = arg;
+
+ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
+ irq_data->args_count, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++)
+ gic_irq_domain_map(domain, virq + i, hwirq + i);
+
+ return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
+ .xlate = gic_irq_domain_xlate,
+ .alloc = gic_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
static const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
.unmap = gic_irq_domain_unmap,
@@ -948,18 +972,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_cpu_map[i] = 0xff;
/*
- * For primary GICs, skip over SGIs.
- * For secondary GICs, skip over PPIs, too.
- */
- if (gic_nr == 0 && (irq_start & 31) > 0) {
- hwirq_base = 16;
- if (irq_start != -1)
- irq_start = (irq_start & ~31) + 16;
- } else {
- hwirq_base = 32;
- }
-
- /*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
*/
@@ -969,10 +981,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_irqs = 1020;
gic->gic_irqs = gic_irqs;
- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+ if (node) { /* DT case */
+ const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops;
+
+ if (!of_property_read_u32(node, "arm,routable-irqs",
+ &nr_routable_irqs)) {
+ ops = &gic_irq_domain_ops;
+ gic_irqs = nr_routable_irqs;
+ }
+
+ gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic);
+ } else { /* Non-DT case */
+ /*
+ * For primary GICs, skip over SGIs.
+ * For secondary GICs, skip over PPIs, too.
+ */
+ if (gic_nr == 0 && (irq_start & 31) > 0) {
+ hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ } else {
+ hwirq_base = 32;
+ }
+
+ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
- if (of_property_read_u32(node, "arm,routable-irqs",
- &nr_routable_irqs)) {
irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
numa_node_id());
if (IS_ERR_VALUE(irq_base)) {
@@ -983,10 +1016,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
hwirq_base, &gic_irq_domain_ops, gic);
- } else {
- gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
- &gic_irq_domain_ops,
- gic);
}
if (WARN_ON(!gic->domain))
@@ -1037,6 +1066,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
irq = irq_of_parse_and_map(node, 0);
gic_cascade_irq(gic_cnt, irq);
}
+
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+ gicv2m_of_init(node, gic_data[gic_cnt].domain);
+
gic_cnt++;
return 0;
}
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 608abf9c9283..78e8b3ce5252 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -218,7 +218,6 @@ static struct platform_driver keystone_irq_device_driver = {
.remove = keystone_irq_remove,
.driver = {
.name = "keystone_irq",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(keystone_irq_dt_ids),
}
};
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
new file mode 100644
index 000000000000..2b0468e3df6a
--- /dev/null
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -0,0 +1,789 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/bitmap.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/mips-gic.h>
+#include <linux/of_address.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/mips-cm.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
+#include "irqchip.h"
+
+unsigned int gic_present;
+
+struct gic_pcpu_mask {
+ DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
+};
+
+static void __iomem *gic_base;
+static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
+static DEFINE_SPINLOCK(gic_lock);
+static struct irq_domain *gic_irq_domain;
+static int gic_shared_intrs;
+static int gic_vpes;
+static unsigned int gic_cpu_pin;
+static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
+
+static void __gic_irq_dispatch(void);
+
+static inline unsigned int gic_read(unsigned int reg)
+{
+ return __raw_readl(gic_base + reg);
+}
+
+static inline void gic_write(unsigned int reg, unsigned int val)
+{
+ __raw_writel(val, gic_base + reg);
+}
+
+static inline void gic_update_bits(unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ unsigned int regval;
+
+ regval = gic_read(reg);
+ regval &= ~mask;
+ regval |= val;
+ gic_write(reg, regval);
+}
+
+static inline void gic_reset_mask(unsigned int intr)
+{
+ gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
+ 1 << GIC_INTR_BIT(intr));
+}
+
+static inline void gic_set_mask(unsigned int intr)
+{
+ gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
+ 1 << GIC_INTR_BIT(intr));
+}
+
+static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
+{
+ gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
+ GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
+ pol << GIC_INTR_BIT(intr));
+}
+
+static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
+{
+ gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
+ GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
+ trig << GIC_INTR_BIT(intr));
+}
+
+static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
+{
+ gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
+ 1 << GIC_INTR_BIT(intr),
+ dual << GIC_INTR_BIT(intr));
+}
+
+static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
+{
+ gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
+ GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
+}
+
+static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
+{
+ gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
+ GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
+ GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
+}
+
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+cycle_t gic_read_count(void)
+{
+ unsigned int hi, hi2, lo;
+
+ do {
+ hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+ lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
+ hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+ } while (hi2 != hi);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+
+unsigned int gic_get_count_width(void)
+{
+ unsigned int bits, config;
+
+ config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
+ bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
+ GIC_SH_CONFIG_COUNTBITS_SHF);
+
+ return bits;
+}
+
+void gic_write_compare(cycle_t cnt)
+{
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+}
+
+void gic_write_cpu_compare(cycle_t cnt, int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+
+ local_irq_restore(flags);
+}
+
+cycle_t gic_read_compare(void)
+{
+ unsigned int hi, lo;
+
+ hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
+ lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+#endif
+
+static bool gic_local_irq_is_routable(int intr)
+{
+ u32 vpe_ctl;
+
+ /* All local interrupts are routable in EIC mode. */
+ if (cpu_has_veic)
+ return true;
+
+ vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
+ switch (intr) {
+ case GIC_LOCAL_INT_TIMER:
+ return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
+ case GIC_LOCAL_INT_PERFCTR:
+ return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
+ case GIC_LOCAL_INT_FDC:
+ return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
+ case GIC_LOCAL_INT_SWINT0:
+ case GIC_LOCAL_INT_SWINT1:
+ return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
+ default:
+ return true;
+ }
+}
+
+unsigned int gic_get_timer_pending(void)
+{
+ unsigned int vpe_pending;
+
+ vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
+ return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
+}
+
+static void gic_bind_eic_interrupt(int irq, int set)
+{
+ /* Convert irq vector # to hw int # */
+ irq -= GIC_PIN_TO_VEC_OFFSET;
+
+ /* Set irq to use shadow set */
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
+ GIC_VPE_EIC_SS(irq), set);
+}
+
+void gic_send_ipi(unsigned int intr)
+{
+ gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
+}
+
+int gic_get_c0_compare_int(void)
+{
+ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
+ return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+ return irq_create_mapping(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
+}
+
+int gic_get_c0_perfcount_int(void)
+{
+ if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
+ /* Is the erformance counter shared with the timer? */
+ if (cp0_perfcount_irq < 0)
+ return -1;
+ return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ }
+ return irq_create_mapping(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
+}
+
+static unsigned int gic_get_int(void)
+{
+ unsigned int i;
+ unsigned long *pcpu_mask;
+ unsigned long pending_reg, intrmask_reg;
+ DECLARE_BITMAP(pending, GIC_MAX_INTRS);
+ DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
+
+ /* Get per-cpu bitmaps */
+ pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
+
+ pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
+ intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
+
+ for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
+ pending[i] = gic_read(pending_reg);
+ intrmask[i] = gic_read(intrmask_reg);
+ pending_reg += 0x4;
+ intrmask_reg += 0x4;
+ }
+
+ bitmap_and(pending, pending, intrmask, gic_shared_intrs);
+ bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
+
+ return find_first_bit(pending, gic_shared_intrs);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+ gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
+}
+
+static void gic_ack_irq(struct irq_data *d)
+{
+ unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+
+ gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ unsigned long flags;
+ bool is_edge;
+
+ spin_lock_irqsave(&gic_lock, flags);
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ gic_set_polarity(irq, GIC_POL_NEG);
+ gic_set_trigger(irq, GIC_TRIG_EDGE);
+ gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
+ is_edge = true;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ gic_set_polarity(irq, GIC_POL_POS);
+ gic_set_trigger(irq, GIC_TRIG_EDGE);
+ gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
+ is_edge = true;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ /* polarity is irrelevant in this case */
+ gic_set_trigger(irq, GIC_TRIG_EDGE);
+ gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
+ is_edge = true;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ gic_set_polarity(irq, GIC_POL_NEG);
+ gic_set_trigger(irq, GIC_TRIG_LEVEL);
+ gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
+ is_edge = false;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ default:
+ gic_set_polarity(irq, GIC_POL_POS);
+ gic_set_trigger(irq, GIC_TRIG_LEVEL);
+ gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
+ is_edge = false;
+ break;
+ }
+
+ if (is_edge) {
+ __irq_set_chip_handler_name_locked(d->irq,
+ &gic_edge_irq_controller,
+ handle_edge_irq, NULL);
+ } else {
+ __irq_set_chip_handler_name_locked(d->irq,
+ &gic_level_irq_controller,
+ handle_level_irq, NULL);
+ }
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ cpumask_t tmp = CPU_MASK_NONE;
+ unsigned long flags;
+ int i;
+
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
+ if (cpus_empty(tmp))
+ return -EINVAL;
+
+ /* Assumption : cpumask refers to a single CPU */
+ spin_lock_irqsave(&gic_lock, flags);
+
+ /* Re-route this IRQ */
+ gic_map_to_vpe(irq, first_cpu(tmp));
+
+ /* Update the pcpu_masks */
+ for (i = 0; i < NR_CPUS; i++)
+ clear_bit(irq, pcpu_masks[i].pcpu_mask);
+ set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+
+ cpumask_copy(d->affinity, cpumask);
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+#endif
+
+static struct irq_chip gic_level_irq_controller = {
+ .name = "MIPS GIC",
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_set_type = gic_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+};
+
+static struct irq_chip gic_edge_irq_controller = {
+ .name = "MIPS GIC",
+ .irq_ack = gic_ack_irq,
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_set_type = gic_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+};
+
+static unsigned int gic_get_local_int(void)
+{
+ unsigned long pending, masked;
+
+ pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
+ masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
+
+ bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
+
+ return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
+}
+
+static void gic_mask_local_irq(struct irq_data *d)
+{
+ int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
+}
+
+static void gic_unmask_local_irq(struct irq_data *d)
+{
+ int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
+}
+
+static struct irq_chip gic_local_irq_controller = {
+ .name = "MIPS GIC Local",
+ .irq_mask = gic_mask_local_irq,
+ .irq_unmask = gic_unmask_local_irq,
+};
+
+static void gic_mask_local_irq_all_vpes(struct irq_data *d)
+{
+ int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gic_lock, flags);
+ for (i = 0; i < gic_vpes; i++) {
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
+ }
+ spin_unlock_irqrestore(&gic_lock, flags);
+}
+
+static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
+{
+ int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gic_lock, flags);
+ for (i = 0; i < gic_vpes; i++) {
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
+ }
+ spin_unlock_irqrestore(&gic_lock, flags);
+}
+
+static struct irq_chip gic_all_vpes_local_irq_controller = {
+ .name = "MIPS GIC Local",
+ .irq_mask = gic_mask_local_irq_all_vpes,
+ .irq_unmask = gic_unmask_local_irq_all_vpes,
+};
+
+static void __gic_irq_dispatch(void)
+{
+ unsigned int intr, virq;
+
+ while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) {
+ virq = irq_linear_revmap(gic_irq_domain,
+ GIC_LOCAL_TO_HWIRQ(intr));
+ do_IRQ(virq);
+ }
+
+ while ((intr = gic_get_int()) != gic_shared_intrs) {
+ virq = irq_linear_revmap(gic_irq_domain,
+ GIC_SHARED_TO_HWIRQ(intr));
+ do_IRQ(virq);
+ }
+}
+
+static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+ __gic_irq_dispatch();
+}
+
+#ifdef CONFIG_MIPS_GIC_IPI
+static int gic_resched_int_base;
+static int gic_call_int_base;
+
+unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
+{
+ return gic_resched_int_base + cpu;
+}
+
+unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
+{
+ return gic_call_int_base + cpu;
+}
+
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+ smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+ .handler = ipi_resched_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "IPI resched"
+};
+
+static struct irqaction irq_call = {
+ .handler = ipi_call_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "IPI call"
+};
+
+static __init void gic_ipi_init_one(unsigned int intr, int cpu,
+ struct irqaction *action)
+{
+ int virq = irq_create_mapping(gic_irq_domain,
+ GIC_SHARED_TO_HWIRQ(intr));
+ int i;
+
+ gic_map_to_vpe(intr, cpu);
+ for (i = 0; i < NR_CPUS; i++)
+ clear_bit(intr, pcpu_masks[i].pcpu_mask);
+ set_bit(intr, pcpu_masks[cpu].pcpu_mask);
+
+ irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
+
+ irq_set_handler(virq, handle_percpu_irq);
+ setup_irq(virq, action);
+}
+
+static __init void gic_ipi_init(void)
+{
+ int i;
+
+ /* Use last 2 * NR_CPUS interrupts as IPIs */
+ gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
+ gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
+
+ for (i = 0; i < nr_cpu_ids; i++) {
+ gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
+ gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
+ }
+}
+#else
+static inline void gic_ipi_init(void)
+{
+}
+#endif
+
+static void __init gic_basic_init(void)
+{
+ unsigned int i;
+
+ board_bind_eic_interrupt = &gic_bind_eic_interrupt;
+
+ /* Setup defaults */
+ for (i = 0; i < gic_shared_intrs; i++) {
+ gic_set_polarity(i, GIC_POL_POS);
+ gic_set_trigger(i, GIC_TRIG_LEVEL);
+ gic_reset_mask(i);
+ }
+
+ for (i = 0; i < gic_vpes; i++) {
+ unsigned int j;
+
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
+ if (!gic_local_irq_is_routable(j))
+ continue;
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
+ }
+ }
+}
+
+static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ int intr = GIC_HWIRQ_TO_LOCAL(hw);
+ int ret = 0;
+ int i;
+ unsigned long flags;
+
+ if (!gic_local_irq_is_routable(intr))
+ return -EPERM;
+
+ /*
+ * HACK: These are all really percpu interrupts, but the rest
+ * of the MIPS kernel code does not use the percpu IRQ API for
+ * the CP0 timer and performance counter interrupts.
+ */
+ if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
+ irq_set_chip_and_handler(virq,
+ &gic_local_irq_controller,
+ handle_percpu_devid_irq);
+ irq_set_percpu_devid(virq);
+ } else {
+ irq_set_chip_and_handler(virq,
+ &gic_all_vpes_local_irq_controller,
+ handle_percpu_irq);
+ }
+
+ spin_lock_irqsave(&gic_lock, flags);
+ for (i = 0; i < gic_vpes; i++) {
+ u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
+
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+
+ switch (intr) {
+ case GIC_LOCAL_INT_WD:
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
+ break;
+ case GIC_LOCAL_INT_COMPARE:
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
+ break;
+ case GIC_LOCAL_INT_TIMER:
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
+ break;
+ case GIC_LOCAL_INT_PERFCTR:
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
+ break;
+ case GIC_LOCAL_INT_SWINT0:
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
+ break;
+ case GIC_LOCAL_INT_SWINT1:
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
+ break;
+ case GIC_LOCAL_INT_FDC:
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
+ break;
+ default:
+ pr_err("Invalid local IRQ %d\n", intr);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return ret;
+}
+
+static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ int intr = GIC_HWIRQ_TO_SHARED(hw);
+ unsigned long flags;
+
+ irq_set_chip_and_handler(virq, &gic_level_irq_controller,
+ handle_level_irq);
+
+ spin_lock_irqsave(&gic_lock, flags);
+ gic_map_to_pin(intr, gic_cpu_pin);
+ /* Map to VPE 0 by default */
+ gic_map_to_vpe(intr, 0);
+ set_bit(intr, pcpu_masks[0].pcpu_mask);
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
+}
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
+ return gic_local_irq_domain_map(d, virq, hw);
+ return gic_shared_irq_domain_map(d, virq, hw);
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ if (intspec[0] == GIC_SHARED)
+ *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
+ else if (intspec[0] == GIC_LOCAL)
+ *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
+ else
+ return -EINVAL;
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .xlate = gic_irq_domain_xlate,
+};
+
+static void __init __gic_init(unsigned long gic_base_addr,
+ unsigned long gic_addrspace_size,
+ unsigned int cpu_vec, unsigned int irqbase,
+ struct device_node *node)
+{
+ unsigned int gicconfig;
+
+ gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
+
+ gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
+ gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
+ GIC_SH_CONFIG_NUMINTRS_SHF;
+ gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
+
+ gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
+ GIC_SH_CONFIG_NUMVPES_SHF;
+ gic_vpes = gic_vpes + 1;
+
+ if (cpu_has_veic) {
+ /* Always use vector 1 in EIC mode */
+ gic_cpu_pin = 0;
+ set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
+ __gic_irq_dispatch);
+ } else {
+ gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
+ gic_irq_dispatch);
+ }
+
+ gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
+ gic_shared_intrs, irqbase,
+ &gic_irq_domain_ops, NULL);
+ if (!gic_irq_domain)
+ panic("Failed to add GIC IRQ domain");
+
+ gic_basic_init();
+
+ gic_ipi_init();
+}
+
+void __init gic_init(unsigned long gic_base_addr,
+ unsigned long gic_addrspace_size,
+ unsigned int cpu_vec, unsigned int irqbase)
+{
+ __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
+}
+
+static int __init gic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ unsigned int cpu_vec, i = 0, reserved = 0;
+ phys_addr_t gic_base;
+ size_t gic_len;
+
+ /* Find the first available CPU vector. */
+ while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
+ i++, &cpu_vec))
+ reserved |= BIT(cpu_vec);
+ for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
+ if (!(reserved & BIT(cpu_vec)))
+ break;
+ }
+ if (cpu_vec == 8) {
+ pr_err("No CPU vectors available for GIC\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ /*
+ * Probe the CM for the GIC base address if not specified
+ * in the device-tree.
+ */
+ if (mips_cm_present()) {
+ gic_base = read_gcr_gic_base() &
+ ~CM_GCR_GIC_BASE_GICEN_MSK;
+ gic_len = 0x20000;
+ } else {
+ pr_err("Failed to get GIC memory range\n");
+ return -ENODEV;
+ }
+ } else {
+ gic_base = res.start;
+ gic_len = resource_size(&res);
+ }
+
+ if (mips_cm_present())
+ write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ gic_present = true;
+
+ __gic_init(gic_base, gic_len, cpu_vec, 0, node);
+
+ return 0;
+}
+IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
new file mode 100644
index 000000000000..7e342df6a62f
--- /dev/null
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Joe.C <yingjoe.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "irqchip.h"
+
+#define MT6577_SYS_INTPOL_NUM (224)
+
+struct mtk_sysirq_chip_data {
+ spinlock_t lock;
+ void __iomem *intpol_base;
+};
+
+static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
+{
+ irq_hw_number_t hwirq = data->hwirq;
+ struct mtk_sysirq_chip_data *chip_data = data->chip_data;
+ u32 offset, reg_index, value;
+ unsigned long flags;
+ int ret;
+
+ offset = hwirq & 0x1f;
+ reg_index = hwirq >> 5;
+
+ spin_lock_irqsave(&chip_data->lock, flags);
+ value = readl_relaxed(chip_data->intpol_base + reg_index * 4);
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
+ if (type == IRQ_TYPE_LEVEL_LOW)
+ type = IRQ_TYPE_LEVEL_HIGH;
+ else
+ type = IRQ_TYPE_EDGE_RISING;
+ value |= (1 << offset);
+ } else {
+ value &= ~(1 << offset);
+ }
+ writel(value, chip_data->intpol_base + reg_index * 4);
+
+ data = data->parent_data;
+ ret = data->chip->irq_set_type(data, type);
+ spin_unlock_irqrestore(&chip_data->lock, flags);
+ return ret;
+}
+
+static struct irq_chip mtk_sysirq_chip = {
+ .name = "MT_SYSIRQ",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = mtk_sysirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int mtk_sysirq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ /* sysirq doesn't support PPI */
+ if (intspec[0])
+ return -EINVAL;
+
+ *out_hwirq = intspec[1];
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i;
+ irq_hw_number_t hwirq;
+ struct of_phandle_args *irq_data = arg;
+ struct of_phandle_args gic_data = *irq_data;
+
+ if (irq_data->args_count != 3)
+ return -EINVAL;
+
+ /* sysirq doesn't support PPI */
+ if (irq_data->args[0])
+ return -EINVAL;
+
+ hwirq = irq_data->args[1];
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &mtk_sysirq_chip,
+ domain->host_data);
+
+ gic_data.np = domain->parent->of_node;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data);
+}
+
+static struct irq_domain_ops sysirq_domain_ops = {
+ .xlate = mtk_sysirq_domain_xlate,
+ .alloc = mtk_sysirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init mtk_sysirq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *domain_parent;
+ struct mtk_sysirq_chip_data *chip_data;
+ int ret = 0;
+
+ domain_parent = irq_find_host(parent);
+ if (!domain_parent) {
+ pr_err("mtk_sysirq: interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return -ENOMEM;
+
+ chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
+ if (!chip_data->intpol_base) {
+ pr_err("mtk_sysirq: unable to map sysirq register\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ domain = irq_domain_add_hierarchy(domain_parent, 0,
+ MT6577_SYS_INTPOL_NUM, node,
+ &sysirq_domain_ops, chip_data);
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ spin_lock_init(&chip_data->lock);
+
+ return 0;
+
+out_unmap:
+ iounmap(chip_data->intpol_base);
+out_free:
+ kfree(chip_data);
+ return ret;
+}
+IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 542e850f4946..078cac5e2d08 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -562,7 +562,6 @@ static struct platform_driver intc_irqpin_device_driver = {
.driver = {
.name = "renesas_intc_irqpin",
.of_match_table = intc_irqpin_dt_ids,
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 8777065012a5..384e6ed61d7c 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -281,7 +281,6 @@ static struct platform_driver irqc_device_driver = {
.driver = {
.name = "renesas_irqc",
.of_match_table = irqc_dt_ids,
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index 36c1b37cea0a..9846d82eb097 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -201,7 +201,7 @@ static unsigned char *cpars[] =
#define structTRcpyovl(x, y, l) memmove(y, x, l)
/*-------------------------------------------------------*/
-static unsigned command_2_index(unsigned c, unsigned sc)
+static unsigned command_2_index(u8 c, u8 sc)
{
if (c & 0x80)
c = 0x9 + (c & 0x0f);
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index dde5e09e6267..83f62b8d82b5 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -20,7 +20,7 @@ if ISDN_DRV_GIGASET
config GIGASET_CAPI
bool "Gigaset CAPI support"
depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m')
- default ISDN_I4L='n'
+ default 'y'
help
Build the Gigaset driver as a CAPI 2.0 driver interfacing with
the Kernel CAPI subsystem. To use it with the old ISDN4Linux
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index eb63a0f7a02a..166537e2dfca 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -751,9 +751,6 @@ void gigaset_stop(struct cardstate *cs);
/* Tell common.c that the driver is being unloaded. */
int gigaset_shutdown(struct cardstate *cs);
-/* Tell common.c that an skb has been sent. */
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
-
/* Append event to the queue.
* Returns NULL on failure or a pointer to the event on success.
* ptr must be kmalloc()ed (and not be freed by the caller).
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index a8e652dac54d..5f306e2eece5 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -293,7 +293,7 @@ static int gigaset_close_bchannel(struct bc_state *bcs)
}
static int write_modem(struct cardstate *cs);
-static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb);
+static int send_cb(struct cardstate *cs);
/* Write tasklet handler: Continue sending current skb, or send command, or
@@ -303,8 +303,6 @@ static void gigaset_modem_fill(unsigned long data)
{
struct cardstate *cs = (struct cardstate *) data;
struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
- struct cmdbuf_t *cb;
- int again;
gig_dbg(DEBUG_OUTPUT, "modem_fill");
@@ -313,36 +311,32 @@ static void gigaset_modem_fill(unsigned long data)
return;
}
- do {
- again = 0;
- if (!bcs->tx_skb) { /* no skb is being sent */
- cb = cs->cmdbuf;
- if (cb) { /* commands to send? */
- gig_dbg(DEBUG_OUTPUT, "modem_fill: cb");
- if (send_cb(cs, cb) < 0) {
- gig_dbg(DEBUG_OUTPUT,
- "modem_fill: send_cb failed");
- again = 1; /* no callback will be
- called! */
- }
- } else { /* skbs to send? */
- bcs->tx_skb = skb_dequeue(&bcs->squeue);
- if (bcs->tx_skb)
- gig_dbg(DEBUG_INTR,
- "Dequeued skb (Adr: %lx)!",
- (unsigned long) bcs->tx_skb);
- }
- }
-
- if (bcs->tx_skb) {
- gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
- if (write_modem(cs) < 0) {
+again:
+ if (!bcs->tx_skb) { /* no skb is being sent */
+ if (cs->cmdbuf) { /* commands to send? */
+ gig_dbg(DEBUG_OUTPUT, "modem_fill: cb");
+ if (send_cb(cs) < 0) {
gig_dbg(DEBUG_OUTPUT,
- "modem_fill: write_modem failed");
- again = 1; /* no callback will be called! */
+ "modem_fill: send_cb failed");
+ goto again; /* no callback will be called! */
}
+ return;
}
- } while (again);
+
+ /* skbs to send? */
+ bcs->tx_skb = skb_dequeue(&bcs->squeue);
+ if (!bcs->tx_skb)
+ return;
+
+ gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)!",
+ (unsigned long) bcs->tx_skb);
+ }
+
+ gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
+ if (write_modem(cs) < 0) {
+ gig_dbg(DEBUG_OUTPUT, "modem_fill: write_modem failed");
+ goto again; /* no callback will be called! */
+ }
}
/*
@@ -429,9 +423,9 @@ static void gigaset_write_bulk_callback(struct urb *urb)
spin_unlock_irqrestore(&cs->lock, flags);
}
-static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
+static int send_cb(struct cardstate *cs)
{
- struct cmdbuf_t *tcb;
+ struct cmdbuf_t *cb = cs->cmdbuf;
unsigned long flags;
int count;
int status = -ENOENT;
@@ -439,26 +433,27 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
do {
if (!cb->len) {
- tcb = cb;
-
spin_lock_irqsave(&cs->cmdlock, flags);
cs->cmdbytes -= cs->curlen;
gig_dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left",
cs->curlen, cs->cmdbytes);
- cs->cmdbuf = cb = cb->next;
- if (cb) {
- cb->prev = NULL;
- cs->curlen = cb->len;
+ cs->cmdbuf = cb->next;
+ if (cs->cmdbuf) {
+ cs->cmdbuf->prev = NULL;
+ cs->curlen = cs->cmdbuf->len;
} else {
cs->lastcmdbuf = NULL;
cs->curlen = 0;
}
spin_unlock_irqrestore(&cs->cmdlock, flags);
- if (tcb->wake_tasklet)
- tasklet_schedule(tcb->wake_tasklet);
- kfree(tcb);
+ if (cb->wake_tasklet)
+ tasklet_schedule(cb->wake_tasklet);
+ kfree(cb);
+
+ cb = cs->cmdbuf;
}
+
if (cb) {
count = min(cb->len, ucs->bulk_out_size);
gig_dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count);
diff --git a/drivers/isdn/hisax/hfc_2bs0.c b/drivers/isdn/hisax/hfc_2bs0.c
index 838531b6a60e..14dada42874e 100644
--- a/drivers/isdn/hisax/hfc_2bs0.c
+++ b/drivers/isdn/hisax/hfc_2bs0.c
@@ -31,7 +31,7 @@ WaitForBusy(struct IsdnCardState *cs)
to--;
}
if (!to) {
- printk(KERN_WARNING "HiSax: waitforBusy timeout\n");
+ printk(KERN_WARNING "HiSax: %s timeout\n", __func__);
return (0);
} else
return (to);
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index fa1fefd711cd..b1fad81f0722 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1159,7 +1159,8 @@ hfcsx_l2l1(struct PStack *st, int pr, void *arg)
case (PH_PULL | INDICATION):
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->tx_skb) {
- printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
+ printk(KERN_WARNING "%s: this shouldn't happen\n",
+ __func__);
} else {
// test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->tx_skb = skb;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 849a80752685..678bd5224bc3 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -927,9 +927,8 @@ start_int_fifo(usb_fifo *fifo)
fifo->active = 1; /* must be marked active */
errcode = usb_submit_urb(fifo->urb, GFP_KERNEL);
if (errcode) {
- printk(KERN_ERR
- "HFC-S USB: submit URB error(start_int_info): status:%i\n",
- errcode);
+ printk(KERN_ERR "HFC-S USB: submit URB error(%s): status:%i\n",
+ __func__, errcode);
fifo->active = 0;
fifo->skbuff = NULL;
}
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 5faa5de24305..9cc26b40a437 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -580,7 +580,7 @@ bch_fill_fifo(struct BCState *bcs)
if (cs->debug & L1_DEB_HSCX_FIFO) {
char *t = bcs->blog;
- t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count);
+ t += sprintf(t, "%s() B-%d cnt %d", __func__, hscx, count);
QuickHex(t, ptr, count);
debugl1(cs, "%s", bcs->blog);
}
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c
index 800095781bfb..a560842c0e48 100644
--- a/drivers/isdn/hisax/isdnl1.c
+++ b/drivers/isdn/hisax/isdnl1.c
@@ -867,7 +867,7 @@ l1_msg(struct IsdnCardState *cs, int pr, void *arg) {
break;
default:
if (cs->debug)
- debugl1(cs, "l1msg %04X unhandled", pr);
+ debugl1(cs, "%s %04X unhandled", __func__, pr);
break;
}
st = st->next;
diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
index 45b03840f716..c754706f83cd 100644
--- a/drivers/isdn/hisax/isdnl3.c
+++ b/drivers/isdn/hisax/isdnl3.c
@@ -153,7 +153,7 @@ void
newl3state(struct l3_process *pc, int state)
{
if (pc->debug & L3_DEB_STATE)
- l3_debug(pc->st, "newstate cr %d %d --> %d",
+ l3_debug(pc->st, "%s cr %d %d --> %d", __func__,
pc->callref & 0x7F,
pc->state, state);
pc->state = state;
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 00aad10507d8..93bae94314a6 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -501,7 +501,7 @@ static char *hycapi_procinfo(struct capi_ctr *ctrl)
{
hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_proc_info\n");
+ printk(KERN_NOTICE "%s\n", __func__);
#endif
if (!cinfo)
return "";
diff --git a/drivers/isdn/mISDN/l1oip_codec.c b/drivers/isdn/mISDN/l1oip_codec.c
index a601c8472220..9b033be11a5f 100644
--- a/drivers/isdn/mISDN/l1oip_codec.c
+++ b/drivers/isdn/mISDN/l1oip_codec.c
@@ -312,10 +312,8 @@ l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result)
void
l1oip_4bit_free(void)
{
- if (table_dec)
- vfree(table_dec);
- if (table_com)
- vfree(table_com);
+ vfree(table_dec);
+ vfree(table_com);
table_com = NULL;
table_dec = NULL;
}
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 9f454d76cc06..67c21876c35f 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1334,7 +1334,7 @@ init_card(struct l1oip *hc, int pri, int bundle)
if (id[l1oip_cnt] == 0) {
printk(KERN_WARNING "Warning: No 'id' value given or "
"0, this is highly unsecure. Please use 32 "
- "bit randmom number 0x...\n");
+ "bit random number 0x...\n");
}
hc->id = id[l1oip_cnt];
if (debug & DEBUG_L1OIP_INIT)
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 1be82284cf9d..84b35925ee4d 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -163,7 +163,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
MISDN_HEADER_LEN);
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
mISDN_sock_cmsg(sk, msg, skb);
@@ -203,7 +203,7 @@ mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
goto done;
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
err = -EFAULT;
goto done;
}
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index 42ecfef80132..46e1240ae074 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -85,7 +85,6 @@ pcbit_l2_write(struct pcbit_dev *dev, ulong msg, ushort refnum,
}
if ((frame = kmalloc(sizeof(struct frame_buf),
GFP_ATOMIC)) == NULL) {
- printk(KERN_WARNING "pcbit_2_write: kmalloc failed\n");
dev_kfree_skb(skb);
return -1;
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a210338cfeb1..a6c3d2f153f3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -250,6 +250,17 @@ config LEDS_LP8788
help
This option enables support for the Keyboard LEDs on the LP8788 PMIC.
+config LEDS_LP8860
+ tristate "LED support for the TI LP8860 4 channel LED driver"
+ depends on LEDS_CLASS && I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the TI LP8860 4 channel
+ LED driver.
+ This option enables support for the display cluster LEDs
+ on the LP8860 4 channel LED driver using the I2C communication
+ bus.
+
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a2b164741465..1c65a191d907 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o
obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
+obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 7440c58b8e6f..dbeebac38d31 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -40,17 +40,27 @@ static ssize_t brightness_store(struct device *dev,
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
unsigned long state;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
+
+ mutex_lock(&led_cdev->led_access);
+
+ if (led_sysfs_is_disabled(led_cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
ret = kstrtoul(buf, 10, &state);
if (ret)
- return ret;
+ goto unlock;
if (state == LED_OFF)
led_trigger_remove(led_cdev);
- __led_set_brightness(led_cdev, state);
+ led_set_brightness(led_cdev, state);
- return size;
+ ret = size;
+unlock:
+ mutex_unlock(&led_cdev->led_access);
+ return ret;
}
static DEVICE_ATTR_RW(brightness);
@@ -99,7 +109,7 @@ static void led_timer_function(unsigned long data)
unsigned long delay;
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
- __led_set_brightness(led_cdev, LED_OFF);
+ led_set_brightness_async(led_cdev, LED_OFF);
return;
}
@@ -122,7 +132,7 @@ static void led_timer_function(unsigned long data)
delay = led_cdev->blink_delay_off;
}
- __led_set_brightness(led_cdev, brightness);
+ led_set_brightness_async(led_cdev, brightness);
/* Return in next iteration if led is in one-shot mode and we are in
* the final blink state so that the led is toggled each delay_on +
@@ -148,7 +158,7 @@ static void set_brightness_delayed(struct work_struct *ws)
led_stop_software_blink(led_cdev);
- __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
+ led_set_brightness_async(led_cdev, led_cdev->delayed_set_value);
}
/**
@@ -214,6 +224,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
#ifdef CONFIG_LEDS_TRIGGERS
init_rwsem(&led_cdev->trigger_lock);
#endif
+ mutex_init(&led_cdev->led_access);
/* add to the list of leds */
down_write(&leds_list_lock);
list_add_tail(&led_cdev->node, &leds_list);
@@ -222,6 +233,8 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
if (!led_cdev->max_brightness)
led_cdev->max_brightness = LED_FULL;
+ led_cdev->flags |= SET_BRIGHTNESS_ASYNC;
+
led_update_brightness(led_cdev);
INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
@@ -267,6 +280,8 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
down_write(&leds_list_lock);
list_del(&led_cdev->node);
up_write(&leds_list_lock);
+
+ mutex_destroy(&led_cdev->led_access);
}
EXPORT_SYMBOL_GPL(led_classdev_unregister);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index aaa8eba9099f..9886dace5ad2 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -42,13 +42,13 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
/* never on - just set to off */
if (!delay_on) {
- __led_set_brightness(led_cdev, LED_OFF);
+ led_set_brightness_async(led_cdev, LED_OFF);
return;
}
/* never off - just set to brightness */
if (!delay_off) {
- __led_set_brightness(led_cdev, led_cdev->blink_brightness);
+ led_set_brightness_async(led_cdev, led_cdev->blink_brightness);
return;
}
@@ -117,6 +117,8 @@ EXPORT_SYMBOL_GPL(led_stop_software_blink);
void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
+ int ret = 0;
+
/* delay brightness setting if need to stop soft-blink timer */
if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
led_cdev->delayed_set_value = brightness;
@@ -124,7 +126,17 @@ void led_set_brightness(struct led_classdev *led_cdev,
return;
}
- __led_set_brightness(led_cdev, brightness);
+ if (led_cdev->flags & SET_BRIGHTNESS_ASYNC) {
+ led_set_brightness_async(led_cdev, brightness);
+ return;
+ } else if (led_cdev->flags & SET_BRIGHTNESS_SYNC)
+ ret = led_set_brightness_sync(led_cdev, brightness);
+ else
+ ret = -EINVAL;
+
+ if (ret < 0)
+ dev_dbg(led_cdev->dev, "Setting LED brightness failed (%d)\n",
+ ret);
}
EXPORT_SYMBOL(led_set_brightness);
@@ -143,3 +155,21 @@ int led_update_brightness(struct led_classdev *led_cdev)
return ret;
}
EXPORT_SYMBOL(led_update_brightness);
+
+/* Caller must ensure led_cdev->led_access held */
+void led_sysfs_disable(struct led_classdev *led_cdev)
+{
+ lockdep_assert_held(&led_cdev->led_access);
+
+ led_cdev->flags |= LED_SYSFS_DISABLE;
+}
+EXPORT_SYMBOL_GPL(led_sysfs_disable);
+
+/* Caller must ensure led_cdev->led_access held */
+void led_sysfs_enable(struct led_classdev *led_cdev)
+{
+ lockdep_assert_held(&led_cdev->led_access);
+
+ led_cdev->flags &= ~LED_SYSFS_DISABLE;
+}
+EXPORT_SYMBOL_GPL(led_sysfs_enable);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index c3734f10fdd5..e8b1120f486d 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -37,6 +37,14 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
char trigger_name[TRIG_NAME_MAX];
struct led_trigger *trig;
size_t len;
+ int ret = count;
+
+ mutex_lock(&led_cdev->led_access);
+
+ if (led_sysfs_is_disabled(led_cdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
trigger_name[sizeof(trigger_name) - 1] = '\0';
strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
@@ -47,7 +55,7 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
if (!strcmp(trigger_name, "none")) {
led_trigger_remove(led_cdev);
- return count;
+ goto unlock;
}
down_read(&triggers_list_lock);
@@ -58,12 +66,14 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
- return count;
+ goto unlock;
}
}
up_read(&triggers_list_lock);
- return -EINVAL;
+unlock:
+ mutex_unlock(&led_cdev->led_access);
+ return ret;
}
EXPORT_SYMBOL_GPL(led_trigger_store);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index c2def5551ce1..1497a09166d6 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -237,7 +237,6 @@ static int pm860x_led_remove(struct platform_device *pdev)
static struct platform_driver pm860x_led_driver = {
.driver = {
.name = "88pm860x-led",
- .owner = THIS_MODULE,
},
.probe = pm860x_led_probe,
.remove = pm860x_led_remove,
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index 5036d7b4f82e..07e66cae32d3 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -201,7 +201,6 @@ static int adp5520_led_remove(struct platform_device *pdev)
static struct platform_driver adp5520_led_driver = {
.driver = {
.name = "adp5520-led",
- .owner = THIS_MODULE,
},
.probe = adp5520_led_probe,
.remove = adp5520_led_remove,
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 70c74a7f0dfe..1b71eac639f0 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -168,7 +168,6 @@ static struct platform_driver asic3_led_driver = {
.remove = asic3_led_remove,
.driver = {
.name = "leds-asic3",
- .owner = THIS_MODULE,
.pm = &asic3_led_pm_ops,
},
};
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index f58a354428e3..0f9ed1ea0e89 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -168,7 +168,6 @@ static struct platform_driver clevo_mail_led_driver = {
.remove = clevo_mail_led_remove,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 910339d86edf..d97522080491 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -76,7 +76,6 @@ static struct platform_driver cobalt_qube_led_driver = {
.remove = cobalt_qube_led_remove,
.driver = {
.name = "cobalt-qube-leds",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index 001088b31373..06dbe18a2065 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -124,7 +124,6 @@ static struct platform_driver cobalt_raq_led_driver = {
.remove = cobalt_raq_led_remove,
.driver = {
.name = "cobalt-raq-leds",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 54b8b5216b8b..952ba96e5b38 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -144,7 +144,6 @@ static int da903x_led_remove(struct platform_device *pdev)
static struct platform_driver da903x_led_driver = {
.driver = {
.name = "da903x-led",
- .owner = THIS_MODULE,
},
.probe = da903x_led_probe,
.remove = da903x_led_remove,
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
index e4da1f460ac5..28291b6acc8e 100644
--- a/drivers/leds/leds-da9052.c
+++ b/drivers/leds/leds-da9052.c
@@ -199,7 +199,6 @@ static int da9052_led_remove(struct platform_device *pdev)
static struct platform_driver da9052_led_driver = {
.driver = {
.name = "da9052-leds",
- .owner = THIS_MODULE,
},
.probe = da9052_led_probe,
.remove = da9052_led_remove,
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 5fb4440127d9..7ea1ea42c2d2 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -291,7 +291,6 @@ static struct platform_driver gpio_led_driver = {
.remove = gpio_led_remove,
.driver = {
.name = "leds-gpio",
- .owner = THIS_MODULE,
.of_match_table = of_gpio_leds_match,
},
};
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index d61a98896c71..0b84c0113126 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -83,7 +83,6 @@ static struct platform_driver hp6xxled_driver = {
.remove = hp6xxled_remove,
.driver = {
.name = "hp6xx-led",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index cbf61a40137d..6e2e02035dd7 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -766,7 +766,6 @@ static void lm3533_led_shutdown(struct platform_device *pdev)
static struct platform_driver lm3533_led_driver = {
.driver = {
.name = "lm3533-leds",
- .owner = THIS_MODULE,
},
.probe = lm3533_led_probe,
.remove = lm3533_led_remove,
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 7c2cb384e7ae..3409f03c1fa8 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -183,7 +183,6 @@ static struct platform_driver lp8788_led_driver = {
.remove = lp8788_led_remove,
.driver = {
.name = LP8788_DEV_KEYLED,
- .owner = THIS_MODULE,
},
};
module_platform_driver(lp8788_led_driver);
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
new file mode 100644
index 000000000000..840e93f3ab3e
--- /dev/null
+++ b/drivers/leds/leds-lp8860.c
@@ -0,0 +1,491 @@
+/*
+ * TI LP8860 4-Channel LED Driver
+ *
+ * Copyright (C) 2014 Texas Instruments
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/slab.h>
+
+#define LP8860_DISP_CL1_BRT_MSB 0x00
+#define LP8860_DISP_CL1_BRT_LSB 0x01
+#define LP8860_DISP_CL1_CURR_MSB 0x02
+#define LP8860_DISP_CL1_CURR_LSB 0x03
+#define LP8860_CL2_BRT_MSB 0x04
+#define LP8860_CL2_BRT_LSB 0x05
+#define LP8860_CL2_CURRENT 0x06
+#define LP8860_CL3_BRT_MSB 0x07
+#define LP8860_CL3_BRT_LSB 0x08
+#define LP8860_CL3_CURRENT 0x09
+#define LP8860_CL4_BRT_MSB 0x0a
+#define LP8860_CL4_BRT_LSB 0x0b
+#define LP8860_CL4_CURRENT 0x0c
+#define LP8860_CONFIG 0x0d
+#define LP8860_STATUS 0x0e
+#define LP8860_FAULT 0x0f
+#define LP8860_LED_FAULT 0x10
+#define LP8860_FAULT_CLEAR 0x11
+#define LP8860_ID 0x12
+#define LP8860_TEMP_MSB 0x13
+#define LP8860_TEMP_LSB 0x14
+#define LP8860_DISP_LED_CURR_MSB 0x15
+#define LP8860_DISP_LED_CURR_LSB 0x16
+#define LP8860_DISP_LED_PWM_MSB 0x17
+#define LP8860_DISP_LED_PWM_LSB 0x18
+#define LP8860_EEPROM_CNTRL 0x19
+#define LP8860_EEPROM_UNLOCK 0x1a
+
+#define LP8860_EEPROM_REG_0 0x60
+#define LP8860_EEPROM_REG_1 0x61
+#define LP8860_EEPROM_REG_2 0x62
+#define LP8860_EEPROM_REG_3 0x63
+#define LP8860_EEPROM_REG_4 0x64
+#define LP8860_EEPROM_REG_5 0x65
+#define LP8860_EEPROM_REG_6 0x66
+#define LP8860_EEPROM_REG_7 0x67
+#define LP8860_EEPROM_REG_8 0x68
+#define LP8860_EEPROM_REG_9 0x69
+#define LP8860_EEPROM_REG_10 0x6a
+#define LP8860_EEPROM_REG_11 0x6b
+#define LP8860_EEPROM_REG_12 0x6c
+#define LP8860_EEPROM_REG_13 0x6d
+#define LP8860_EEPROM_REG_14 0x6e
+#define LP8860_EEPROM_REG_15 0x6f
+#define LP8860_EEPROM_REG_16 0x70
+#define LP8860_EEPROM_REG_17 0x71
+#define LP8860_EEPROM_REG_18 0x72
+#define LP8860_EEPROM_REG_19 0x73
+#define LP8860_EEPROM_REG_20 0x74
+#define LP8860_EEPROM_REG_21 0x75
+#define LP8860_EEPROM_REG_22 0x76
+#define LP8860_EEPROM_REG_23 0x77
+#define LP8860_EEPROM_REG_24 0x78
+
+#define LP8860_LOCK_EEPROM 0x00
+#define LP8860_UNLOCK_EEPROM 0x01
+#define LP8860_PROGRAM_EEPROM 0x02
+#define LP8860_EEPROM_CODE_1 0x08
+#define LP8860_EEPROM_CODE_2 0xba
+#define LP8860_EEPROM_CODE_3 0xef
+
+#define LP8860_CLEAR_FAULTS 0x01
+
+#define LP8860_DISP_LED_NAME "display_cluster"
+
+/**
+ * struct lp8860_led -
+ * @lock - Lock for reading/writing the device
+ * @work - Work item used to off load the brightness register writes
+ * @client - Pointer to the I2C client
+ * @led_dev - led class device pointer
+ * @regmap - Devices register map
+ * @eeprom_regmap - EEPROM register map
+ * @enable_gpio - VDDIO/EN gpio to enable communication interface
+ * @regulator - LED supply regulator pointer
+ * @brightness - Current brightness value requested
+ * @label - LED label
+**/
+struct lp8860_led {
+ struct mutex lock;
+ struct work_struct work;
+ struct i2c_client *client;
+ struct led_classdev led_dev;
+ struct regmap *regmap;
+ struct regmap *eeprom_regmap;
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ enum led_brightness brightness;
+ const char *label;
+};
+
+struct lp8860_eeprom_reg {
+ uint8_t reg;
+ uint8_t value;
+};
+
+static struct lp8860_eeprom_reg lp8860_eeprom_disp_regs[] = {
+ { LP8860_EEPROM_REG_0, 0xed },
+ { LP8860_EEPROM_REG_1, 0xdf },
+ { LP8860_EEPROM_REG_2, 0xdc },
+ { LP8860_EEPROM_REG_3, 0xf0 },
+ { LP8860_EEPROM_REG_4, 0xdf },
+ { LP8860_EEPROM_REG_5, 0xe5 },
+ { LP8860_EEPROM_REG_6, 0xf2 },
+ { LP8860_EEPROM_REG_7, 0x77 },
+ { LP8860_EEPROM_REG_8, 0x77 },
+ { LP8860_EEPROM_REG_9, 0x71 },
+ { LP8860_EEPROM_REG_10, 0x3f },
+ { LP8860_EEPROM_REG_11, 0xb7 },
+ { LP8860_EEPROM_REG_12, 0x17 },
+ { LP8860_EEPROM_REG_13, 0xef },
+ { LP8860_EEPROM_REG_14, 0xb0 },
+ { LP8860_EEPROM_REG_15, 0x87 },
+ { LP8860_EEPROM_REG_16, 0xce },
+ { LP8860_EEPROM_REG_17, 0x72 },
+ { LP8860_EEPROM_REG_18, 0xe5 },
+ { LP8860_EEPROM_REG_19, 0xdf },
+ { LP8860_EEPROM_REG_20, 0x35 },
+ { LP8860_EEPROM_REG_21, 0x06 },
+ { LP8860_EEPROM_REG_22, 0xdc },
+ { LP8860_EEPROM_REG_23, 0x88 },
+ { LP8860_EEPROM_REG_24, 0x3E },
+};
+
+static int lp8860_unlock_eeprom(struct lp8860_led *led, int lock)
+{
+ int ret;
+
+ mutex_lock(&led->lock);
+
+ if (lock == LP8860_UNLOCK_EEPROM) {
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_UNLOCK,
+ LP8860_EEPROM_CODE_1);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_UNLOCK,
+ LP8860_EEPROM_CODE_2);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ goto out;
+ }
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_UNLOCK,
+ LP8860_EEPROM_CODE_3);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ goto out;
+ }
+ } else {
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_UNLOCK,
+ LP8860_LOCK_EEPROM);
+ }
+
+out:
+ mutex_unlock(&led->lock);
+ return ret;
+}
+
+static int lp8860_fault_check(struct lp8860_led *led)
+{
+ int ret, fault;
+ unsigned int read_buf;
+
+ ret = regmap_read(led->regmap, LP8860_LED_FAULT, &read_buf);
+ if (ret)
+ goto out;
+
+ fault = read_buf;
+
+ ret = regmap_read(led->regmap, LP8860_FAULT, &read_buf);
+ if (ret)
+ goto out;
+
+ fault |= read_buf;
+
+ /* Attempt to clear any faults */
+ if (fault)
+ ret = regmap_write(led->regmap, LP8860_FAULT_CLEAR,
+ LP8860_CLEAR_FAULTS);
+out:
+ return ret;
+}
+
+static void lp8860_led_brightness_work(struct work_struct *work)
+{
+ struct lp8860_led *led = container_of(work, struct lp8860_led, work);
+ int ret;
+ int disp_brightness = led->brightness * 255;
+
+ mutex_lock(&led->lock);
+
+ ret = lp8860_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_MSB,
+ (disp_brightness & 0xff00) >> 8);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write CL1 MSB\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_LSB,
+ disp_brightness & 0xff);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write CL1 LSB\n");
+ goto out;
+ }
+out:
+ mutex_unlock(&led->lock);
+}
+
+static void lp8860_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lp8860_led *led =
+ container_of(led_cdev, struct lp8860_led, led_dev);
+
+ led->brightness = brt_val;
+ schedule_work(&led->work);
+}
+
+static int lp8860_init(struct lp8860_led *led)
+{
+ unsigned int read_buf;
+ int ret, i, reg_count;
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 1);
+
+ ret = lp8860_fault_check(led);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(led->regmap, LP8860_STATUS, &read_buf);
+ if (ret)
+ goto out;
+
+ ret = lp8860_unlock_eeprom(led, LP8860_UNLOCK_EEPROM);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed unlocking EEPROM\n");
+ goto out;
+ }
+
+ reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs) / sizeof(lp8860_eeprom_disp_regs[0]);
+ for (i = 0; i < reg_count; i++) {
+ ret = regmap_write(led->eeprom_regmap,
+ lp8860_eeprom_disp_regs[i].reg,
+ lp8860_eeprom_disp_regs[i].value);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed writing EEPROM\n");
+ goto out;
+ }
+ }
+
+ ret = lp8860_unlock_eeprom(led, LP8860_LOCK_EEPROM);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap,
+ LP8860_EEPROM_CNTRL,
+ LP8860_PROGRAM_EEPROM);
+ if (ret)
+ dev_err(&led->client->dev, "Failed programming EEPROM\n");
+out:
+ if (ret)
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+ return ret;
+}
+
+static struct reg_default lp8860_reg_defs[] = {
+ { LP8860_DISP_CL1_BRT_MSB, 0x00},
+ { LP8860_DISP_CL1_BRT_LSB, 0x00},
+ { LP8860_DISP_CL1_CURR_MSB, 0x00},
+ { LP8860_DISP_CL1_CURR_LSB, 0x00},
+ { LP8860_CL2_BRT_MSB, 0x00},
+ { LP8860_CL2_BRT_LSB, 0x00},
+ { LP8860_CL2_CURRENT, 0x00},
+ { LP8860_CL3_BRT_MSB, 0x00},
+ { LP8860_CL3_BRT_LSB, 0x00},
+ { LP8860_CL3_CURRENT, 0x00},
+ { LP8860_CL4_BRT_MSB, 0x00},
+ { LP8860_CL4_BRT_LSB, 0x00},
+ { LP8860_CL4_CURRENT, 0x00},
+ { LP8860_CONFIG, 0x00},
+ { LP8860_FAULT_CLEAR, 0x00},
+ { LP8860_EEPROM_CNTRL, 0x80},
+ { LP8860_EEPROM_UNLOCK, 0x00},
+};
+
+static const struct regmap_config lp8860_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LP8860_EEPROM_UNLOCK,
+ .reg_defaults = lp8860_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lp8860_reg_defs),
+ .cache_type = REGCACHE_NONE,
+};
+
+static struct reg_default lp8860_eeprom_defs[] = {
+ { LP8860_EEPROM_REG_0, 0x00 },
+ { LP8860_EEPROM_REG_1, 0x00 },
+ { LP8860_EEPROM_REG_2, 0x00 },
+ { LP8860_EEPROM_REG_3, 0x00 },
+ { LP8860_EEPROM_REG_4, 0x00 },
+ { LP8860_EEPROM_REG_5, 0x00 },
+ { LP8860_EEPROM_REG_6, 0x00 },
+ { LP8860_EEPROM_REG_7, 0x00 },
+ { LP8860_EEPROM_REG_8, 0x00 },
+ { LP8860_EEPROM_REG_9, 0x00 },
+ { LP8860_EEPROM_REG_10, 0x00 },
+ { LP8860_EEPROM_REG_11, 0x00 },
+ { LP8860_EEPROM_REG_12, 0x00 },
+ { LP8860_EEPROM_REG_13, 0x00 },
+ { LP8860_EEPROM_REG_14, 0x00 },
+ { LP8860_EEPROM_REG_15, 0x00 },
+ { LP8860_EEPROM_REG_16, 0x00 },
+ { LP8860_EEPROM_REG_17, 0x00 },
+ { LP8860_EEPROM_REG_18, 0x00 },
+ { LP8860_EEPROM_REG_19, 0x00 },
+ { LP8860_EEPROM_REG_20, 0x00 },
+ { LP8860_EEPROM_REG_21, 0x00 },
+ { LP8860_EEPROM_REG_22, 0x00 },
+ { LP8860_EEPROM_REG_23, 0x00 },
+ { LP8860_EEPROM_REG_24, 0x00 },
+};
+
+static const struct regmap_config lp8860_eeprom_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LP8860_EEPROM_REG_24,
+ .reg_defaults = lp8860_eeprom_defs,
+ .num_reg_defaults = ARRAY_SIZE(lp8860_eeprom_defs),
+ .cache_type = REGCACHE_NONE,
+};
+
+static int lp8860_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lp8860_led *led;
+ struct device_node *np = client->dev.of_node;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->label = LP8860_DISP_LED_NAME;
+
+ if (client->dev.of_node) {
+ ret = of_property_read_string(np, "label", &led->label);
+ if (ret) {
+ dev_err(&client->dev, "Missing label in dt\n");
+ return -EINVAL;
+ }
+ }
+
+ led->enable_gpio = devm_gpiod_get(&client->dev, "enable");
+ if (IS_ERR(led->enable_gpio))
+ led->enable_gpio = NULL;
+ else
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ led->regulator = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(led->regulator))
+ led->regulator = NULL;
+
+ led->client = client;
+ led->led_dev.name = led->label;
+ led->led_dev.max_brightness = LED_FULL;
+ led->led_dev.brightness_set = lp8860_brightness_set;
+
+ mutex_init(&led->lock);
+ INIT_WORK(&led->work, lp8860_led_brightness_work);
+
+ i2c_set_clientdata(client, led);
+
+ led->regmap = devm_regmap_init_i2c(client, &lp8860_regmap_config);
+ if (IS_ERR(led->regmap)) {
+ ret = PTR_ERR(led->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ led->eeprom_regmap = devm_regmap_init_i2c(client, &lp8860_eeprom_regmap_config);
+ if (IS_ERR(led->eeprom_regmap)) {
+ ret = PTR_ERR(led->eeprom_regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = lp8860_init(led);
+ if (ret)
+ return ret;
+
+ ret = led_classdev_register(&client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lp8860_remove(struct i2c_client *client)
+{
+ struct lp8860_led *led = i2c_get_clientdata(client);
+ int ret;
+
+ led_classdev_unregister(&led->led_dev);
+ cancel_work_sync(&led->work);
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id lp8860_id[] = {
+ { "lp8860", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp8860_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_lp8860_leds_match[] = {
+ { .compatible = "ti,lp8860", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lp8860_leds_match);
+#endif
+
+static struct i2c_driver lp8860_driver = {
+ .driver = {
+ .name = "lp8860",
+ .of_match_table = of_match_ptr(of_lp8860_leds_match),
+ },
+ .probe = lp8860_probe,
+ .remove = lp8860_remove,
+ .id_table = lp8860_id,
+};
+module_i2c_driver(lp8860_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8860 LED drvier");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 059f5b1f3553..9f41124765cc 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -184,7 +184,6 @@ static struct platform_driver lt3593_led_driver = {
.remove = lt3593_led_remove,
.driver = {
.name = "leds-lt3593",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 607bc2755aba..c592aa5662bb 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -303,7 +303,6 @@ static int max8997_led_remove(struct platform_device *pdev)
static struct platform_driver max8997_led_driver = {
.driver = {
.name = "max8997-led",
- .owner = THIS_MODULE,
},
.probe = max8997_led_probe,
.remove = max8997_led_remove,
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index f1db88e25138..85c3714e1b5a 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -325,7 +325,6 @@ MODULE_DEVICE_TABLE(platform, mc13xxx_led_id_table);
static struct platform_driver mc13xxx_led_driver = {
.driver = {
.name = "mc13xxx-led",
- .owner = THIS_MODULE,
},
.remove = mc13xxx_led_remove,
.id_table = mc13xxx_led_id_table,
diff --git a/drivers/leds/leds-menf21bmc.c b/drivers/leds/leds-menf21bmc.c
index 89dd57769e3b..4b9eea815b1a 100644
--- a/drivers/leds/leds-menf21bmc.c
+++ b/drivers/leds/leds-menf21bmc.c
@@ -119,7 +119,6 @@ static struct platform_driver menf21bmc_led = {
.remove = menf21bmc_led_remove,
.driver = {
.name = "menf21bmc_led",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index 27d06c528246..ec3a2e8adcae 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -53,7 +53,6 @@ static struct platform_driver net48xx_led_driver = {
.remove = net48xx_led_remove,
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 64fde485dcaa..26515c27ea8c 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -404,7 +404,6 @@ static struct platform_driver netxbig_led_driver = {
.remove = netxbig_led_remove,
.driver = {
.name = "leds-netxbig",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 231993d1fe21..1fd6adbb43b7 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -380,7 +380,6 @@ static struct platform_driver ns2_led_driver = {
.remove = ns2_led_remove,
.driver = {
.name = "leds-ns2",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_ns2_leds_match),
},
};
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
index c9d906098466..39870de20a26 100644
--- a/drivers/leds/leds-ot200.c
+++ b/drivers/leds/leds-ot200.c
@@ -158,7 +158,6 @@ static struct platform_driver ot200_led_driver = {
.remove = ot200_led_remove,
.driver = {
.name = "leds-ot200",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index d672bb4480f6..f668500a2157 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -232,7 +232,6 @@ static struct platform_driver led_pwm_driver = {
.remove = led_pwm_remove,
.driver = {
.name = "leds_pwm",
- .owner = THIS_MODULE,
.of_match_table = of_pwm_leds_match,
},
};
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index 2e746d257b02..fcd1215b64a2 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -53,7 +53,6 @@ static struct platform_driver rb532_led_driver = {
.remove = rb532_led_remove,
.driver = {
.name = "rb532-led",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 358430db6e66..ffc21397a675 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -153,24 +153,21 @@ static int regulator_led_probe(struct platform_device *pdev)
return -ENODEV;
}
- vcc = regulator_get_exclusive(&pdev->dev, "vled");
+ vcc = devm_regulator_get_exclusive(&pdev->dev, "vled");
if (IS_ERR(vcc)) {
dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
return PTR_ERR(vcc);
}
led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
- if (led == NULL) {
- ret = -ENOMEM;
- goto err_vcc;
- }
+ if (led == NULL)
+ return -ENOMEM;
led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
if (pdata->brightness > led->cdev.max_brightness) {
dev_err(&pdev->dev, "Invalid default brightness %d\n",
pdata->brightness);
- ret = -EINVAL;
- goto err_vcc;
+ return -EINVAL;
}
led->value = pdata->brightness;
@@ -191,7 +188,7 @@ static int regulator_led_probe(struct platform_device *pdev)
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0) {
cancel_work_sync(&led->work);
- goto err_vcc;
+ return ret;
}
/* to expose the default value to userspace */
@@ -201,10 +198,6 @@ static int regulator_led_probe(struct platform_device *pdev)
regulator_led_set_value(led);
return 0;
-
-err_vcc:
- regulator_put(vcc);
- return ret;
}
static int regulator_led_remove(struct platform_device *pdev)
@@ -214,14 +207,12 @@ static int regulator_led_remove(struct platform_device *pdev)
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
regulator_led_disable(led);
- regulator_put(led->vcc);
return 0;
}
static struct platform_driver regulator_led_driver = {
.driver = {
.name = "leds-regulator",
- .owner = THIS_MODULE,
},
.probe = regulator_led_probe,
.remove = regulator_led_remove,
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 785eb53a87fc..83641a7b299a 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -116,7 +116,6 @@ static struct platform_driver s3c24xx_led_driver = {
.remove = s3c24xx_led_remove,
.driver = {
.name = "s3c24xx_led",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c
index 0b8cc4a021a6..c2553c54f2cf 100644
--- a/drivers/leds/leds-sunfire.c
+++ b/drivers/leds/leds-sunfire.c
@@ -223,7 +223,6 @@ static struct platform_driver sunfire_clockboard_led_driver = {
.remove = sunfire_led_generic_remove,
.driver = {
.name = "sunfire-clockboard-leds",
- .owner = THIS_MODULE,
},
};
@@ -232,7 +231,6 @@ static struct platform_driver sunfire_fhc_led_driver = {
.remove = sunfire_led_generic_remove,
.driver = {
.name = "sunfire-fhc-leds",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index 3afec79c43f4..6896e2d9ba58 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -18,10 +18,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
- *
- * This driver provides system reboot functionality for APM X-Gene SoC.
- * For system shutdown, this is board specify. If a board designer
- * implements GPIO shutdown, use the gpio-poweroff.c driver.
*/
#include <linux/io.h>
#include <linux/of_device.h>
@@ -70,39 +66,13 @@ static void syscon_led_set(struct led_classdev *led_cdev,
dev_err(sled->cdev.dev, "error updating LED status\n");
}
-static const struct of_device_id syscon_match[] = {
- { .compatible = "syscon", },
- {},
-};
-
-static int __init syscon_leds_init(void)
+static int __init syscon_leds_spawn(struct device_node *np,
+ struct device *dev,
+ struct regmap *map)
{
- const struct of_device_id *devid;
- struct device_node *np;
struct device_node *child;
- struct regmap *map;
- struct platform_device *pdev;
- struct device *dev;
int ret;
- np = of_find_matching_node_and_match(NULL, syscon_match,
- &devid);
- if (!np)
- return -ENODEV;
-
- map = syscon_node_to_regmap(np);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- /*
- * If the map is there, the device should be there, we allocate
- * memory on the syscon device's behalf here.
- */
- pdev = of_find_device_by_node(np);
- if (!pdev)
- return -ENODEV;
- dev = &pdev->dev;
-
for_each_available_child_of_node(np, child) {
struct syscon_led *sled;
const char *state;
@@ -150,7 +120,6 @@ static int __init syscon_leds_init(void)
if (ret < 0)
return ret;
}
-
}
sled->cdev.brightness_set = syscon_led_set;
@@ -160,7 +129,39 @@ static int __init syscon_leds_init(void)
dev_info(dev, "registered LED %s\n", sled->cdev.name);
}
+ return 0;
+}
+
+static int __init syscon_leds_init(void)
+{
+ struct device_node *np;
+
+ for_each_of_allnodes(np) {
+ struct platform_device *pdev;
+ struct regmap *map;
+ int ret;
+
+ if (!of_device_is_compatible(np, "syscon"))
+ continue;
+
+ map = syscon_node_to_regmap(np);
+ if (IS_ERR(map)) {
+ pr_err("error getting regmap for syscon LEDs\n");
+ continue;
+ }
+
+ /*
+ * If the map is there, the device should be there, we allocate
+ * memory on the syscon device's behalf here.
+ */
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return -ENODEV;
+ ret = syscon_leds_spawn(np, &pdev->dev, map);
+ if (ret)
+ dev_err(&pdev->dev, "could not spawn syscon LEDs\n");
+ }
- return 0;
+ return 0;
}
device_initcall(syscon_leds_init);
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 1b71e0701002..56027ef7c7e8 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -312,7 +312,6 @@ static int wm831x_status_remove(struct platform_device *pdev)
static struct platform_driver wm831x_status_driver = {
.driver = {
.name = "wm831x-status",
- .owner = THIS_MODULE,
},
.probe = wm831x_status_probe,
.remove = wm831x_status_remove,
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 4133ffe29015..0d121835673f 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -272,7 +272,6 @@ static int wm8350_led_remove(struct platform_device *pdev)
static struct platform_driver wm8350_led_driver = {
.driver = {
.name = "wm8350-led",
- .owner = THIS_MODULE,
},
.probe = wm8350_led_probe,
.remove = wm8350_led_remove,
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index b358cc05eff5..1ba3defdd460 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -111,7 +111,6 @@ static struct platform_driver wrap_led_driver = {
.remove = wrap_led_remove,
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 4c50365344a9..2348dbda5269 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -17,16 +17,28 @@
#include <linux/rwsem.h>
#include <linux/leds.h>
-static inline void __led_set_brightness(struct led_classdev *led_cdev,
+static inline void led_set_brightness_async(struct led_classdev *led_cdev,
enum led_brightness value)
{
- if (value > led_cdev->max_brightness)
- value = led_cdev->max_brightness;
- led_cdev->brightness = value;
+ led_cdev->brightness = min(value, led_cdev->max_brightness);
+
if (!(led_cdev->flags & LED_SUSPENDED))
led_cdev->brightness_set(led_cdev, value);
}
+static inline int led_set_brightness_sync(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ int ret = 0;
+
+ led_cdev->brightness = min(value, led_cdev->max_brightness);
+
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ ret = led_cdev->brightness_set_sync(led_cdev,
+ led_cdev->brightness);
+ return ret;
+}
+
static inline int led_get_brightness(struct led_classdev *led_cdev)
{
return led_cdev->brightness;
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 47e55aa9eefa..59eca17d9661 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -51,9 +51,9 @@ static int fb_notifier_callback(struct notifier_block *p,
if ((n->old_status == UNBLANK) ^ n->invert) {
n->brightness = led->brightness;
- __led_set_brightness(led, LED_OFF);
+ led_set_brightness_async(led, LED_OFF);
} else {
- __led_set_brightness(led, n->brightness);
+ led_set_brightness_async(led, n->brightness);
}
n->old_status = new_status;
@@ -89,9 +89,9 @@ static ssize_t bl_trig_invert_store(struct device *dev,
/* After inverting, we need to update the LED. */
if ((n->old_status == BLANK) ^ n->invert)
- __led_set_brightness(led, LED_OFF);
+ led_set_brightness_async(led, LED_OFF);
else
- __led_set_brightness(led, n->brightness);
+ led_set_brightness_async(led, n->brightness);
return num;
}
diff --git a/drivers/leds/trigger/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c
index 81a91be8e18d..6f38f883aaf1 100644
--- a/drivers/leds/trigger/ledtrig-default-on.c
+++ b/drivers/leds/trigger/ledtrig-default-on.c
@@ -19,7 +19,7 @@
static void defon_trig_activate(struct led_classdev *led_cdev)
{
- __led_set_brightness(led_cdev, led_cdev->max_brightness);
+ led_set_brightness_async(led_cdev, led_cdev->max_brightness);
}
static struct led_trigger defon_led_trigger = {
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index c86c41826476..4cc7040746c6 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -54,12 +54,12 @@ static void gpio_trig_work(struct work_struct *work)
if (tmp) {
if (gpio_data->desired_brightness)
- __led_set_brightness(gpio_data->led,
+ led_set_brightness_async(gpio_data->led,
gpio_data->desired_brightness);
else
- __led_set_brightness(gpio_data->led, LED_FULL);
+ led_set_brightness_async(gpio_data->led, LED_FULL);
} else {
- __led_set_brightness(gpio_data->led, LED_OFF);
+ led_set_brightness_async(gpio_data->led, LED_OFF);
}
}
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 5c8464a33172..fea6871d2609 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -74,7 +74,7 @@ static void led_heartbeat_function(unsigned long data)
break;
}
- __led_set_brightness(led_cdev, brightness);
+ led_set_brightness_async(led_cdev, brightness);
mod_timer(&heartbeat_data->timer, jiffies + delay);
}
diff --git a/drivers/leds/trigger/ledtrig-oneshot.c b/drivers/leds/trigger/ledtrig-oneshot.c
index cb4c7466692a..fbd02cdc3ad7 100644
--- a/drivers/leds/trigger/ledtrig-oneshot.c
+++ b/drivers/leds/trigger/ledtrig-oneshot.c
@@ -63,9 +63,9 @@ static ssize_t led_invert_store(struct device *dev,
oneshot_data->invert = !!state;
if (oneshot_data->invert)
- __led_set_brightness(led_cdev, LED_FULL);
+ led_set_brightness_async(led_cdev, LED_FULL);
else
- __led_set_brightness(led_cdev, LED_OFF);
+ led_set_brightness_async(led_cdev, LED_OFF);
return size;
}
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index e5abc00bb00c..3c34de404d18 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -41,7 +41,7 @@ static void transient_timer_function(unsigned long data)
struct transient_trig_data *transient_data = led_cdev->trigger_data;
transient_data->activate = 0;
- __led_set_brightness(led_cdev, transient_data->restore_state);
+ led_set_brightness_async(led_cdev, transient_data->restore_state);
}
static ssize_t transient_activate_show(struct device *dev,
@@ -72,7 +72,8 @@ static ssize_t transient_activate_store(struct device *dev,
if (state == 0 && transient_data->activate == 1) {
del_timer(&transient_data->timer);
transient_data->activate = state;
- __led_set_brightness(led_cdev, transient_data->restore_state);
+ led_set_brightness_async(led_cdev,
+ transient_data->restore_state);
return size;
}
@@ -80,7 +81,7 @@ static ssize_t transient_activate_store(struct device *dev,
if (state == 1 && transient_data->activate == 0 &&
transient_data->duration != 0) {
transient_data->activate = state;
- __led_set_brightness(led_cdev, transient_data->state);
+ led_set_brightness_async(led_cdev, transient_data->state);
transient_data->restore_state =
(transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
mod_timer(&transient_data->timer,
@@ -203,7 +204,8 @@ static void transient_trig_deactivate(struct led_classdev *led_cdev)
if (led_cdev->activated) {
del_timer_sync(&transient_data->timer);
- __led_set_brightness(led_cdev, transient_data->restore_state);
+ led_set_brightness_async(led_cdev,
+ transient_data->restore_state);
device_remove_file(led_cdev->dev, &dev_attr_activate);
device_remove_file(led_cdev->dev, &dev_attr_duration);
device_remove_file(led_cdev->dev, &dev_attr_state);
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index d0a1d8a45c81..89088d6538fd 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -94,7 +94,7 @@ static unsigned desc_size(const struct lguest_device_desc *desc)
}
/* This gets the device's feature bits. */
-static u32 lg_get_features(struct virtio_device *vdev)
+static u64 lg_get_features(struct virtio_device *vdev)
{
unsigned int i;
u32 features = 0;
@@ -126,7 +126,7 @@ static void status_notify(struct virtio_device *vdev)
* sorted out, this routine is called so we can tell the Host which features we
* understand and accept.
*/
-static void lg_finalize_features(struct virtio_device *vdev)
+static int lg_finalize_features(struct virtio_device *vdev)
{
unsigned int i, bits;
struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
@@ -136,20 +136,25 @@ static void lg_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
/*
- * The vdev->feature array is a Linux bitmask: this isn't the same as a
- * the simple array of bits used by lguest devices for features. So we
- * do this slow, manual conversion which is completely general.
+ * Since lguest is currently x86-only, we're little-endian. That
+ * means we could just memcpy. But it's not time critical, and in
+ * case someone copies this code, we do it the slow, obvious way.
*/
memset(out_features, 0, desc->feature_len);
bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++) {
- if (test_bit(i, vdev->features))
+ if (__virtio_test_bit(vdev, i))
out_features[i / 8] |= (1 << (i % 8));
}
/* Tell Host we've finished with this device's feature negotiation */
status_notify(vdev);
+
+ return 0;
}
/* Once they've found a field, getting a copy of it is easy. */
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 4eab93aa570b..10ae69bcbbd2 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -667,7 +667,6 @@ static struct platform_driver smu_of_platform_driver =
{
.driver = {
.name = "smu",
- .owner = THIS_MODULE,
.of_match_table = smu_platform_match,
},
.probe = smu_platform_probe,
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 97cfc5ac9fd0..7ed92582d2cf 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2243,7 +2243,6 @@ static struct platform_driver fcu_of_platform_driver =
{
.driver = {
.name = "temperature",
- .owner = THIS_MODULE,
.of_match_table = fcu_match,
},
.probe = fcu_of_probe,
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 3b4a157714b1..109dcaa15934 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -463,7 +463,6 @@ static const struct of_device_id therm_of_match[] = {{
static struct platform_driver therm_of_driver = {
.driver = {
.name = "temperature",
- .owner = THIS_MODULE,
.of_match_table = therm_of_match,
},
.probe = therm_of_probe,
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 2a5e1b15b1d2..93faf298a3c5 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -770,7 +770,6 @@ static struct platform_driver wf_smu_driver = {
.remove = wf_smu_remove,
.driver = {
.name = "windfarm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index a8ac66cd3b13..81fdf40c5b82 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -699,7 +699,6 @@ static struct platform_driver wf_smu_driver = {
.remove = wf_smu_remove,
.driver = {
.name = "windfarm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 9fd9c6717e0c..c04fed9eb15d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -33,4 +33,16 @@ config OMAP_MBOX_KFIFO_SIZE
Specify the default size of mailbox's kfifo buffers (bytes).
This can also be changed at runtime (via the mbox_kfifo_size
module parameter).
+
+config PCC
+ bool "Platform Communication Channel Driver"
+ depends on ACPI
+ help
+ ACPI 5.0+ spec defines a generic mode of communication
+ between the OS and a platform such as the BMC. This medium
+ (PCC) is typically used by CPPC (ACPI CPU Performance management),
+ RAS (ACPI reliability protocol) and MPST (ACPI Memory power
+ states). Select this driver if your platform implements the
+ PCC clients mentioned above.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 94ed7cefb14d..dd412c22208b 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_MAILBOX) += mailbox.o
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
+
+obj-$(CONFIG_PCC) += pcc.o
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index afcb430508ec..59aad4d5da53 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -21,13 +21,13 @@
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
-#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
-#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
-#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
+#include "mailbox.h"
static LIST_HEAD(mbox_cons);
static DEFINE_MUTEX(con_mutex);
+static void poll_txdone(unsigned long data);
+
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
@@ -60,7 +60,7 @@ static void msg_submit(struct mbox_chan *chan)
unsigned count, idx;
unsigned long flags;
void *data;
- int err;
+ int err = -EBUSY;
spin_lock_irqsave(&chan->lock, flags);
@@ -76,6 +76,8 @@ static void msg_submit(struct mbox_chan *chan)
data = chan->msg_data[idx];
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
/* Try to submit a message to the MBOX controller */
err = chan->mbox->ops->send_data(chan, data);
if (!err) {
@@ -84,6 +86,9 @@ static void msg_submit(struct mbox_chan *chan)
}
exit:
spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (!err && chan->txdone_method == TXDONE_BY_POLL)
+ poll_txdone((unsigned long)chan->mbox);
}
static void tx_tick(struct mbox_chan *chan, int r)
@@ -117,10 +122,11 @@ static void poll_txdone(unsigned long data)
struct mbox_chan *chan = &mbox->chans[i];
if (chan->active_req && chan->cl) {
- resched = true;
txdone = chan->mbox->ops->last_tx_done(chan);
if (txdone)
tx_tick(chan, 0);
+ else
+ resched = true;
}
}
@@ -252,9 +258,6 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
msg_submit(chan);
- if (chan->txdone_method == TXDONE_BY_POLL)
- poll_txdone((unsigned long)chan->mbox);
-
if (chan->cl->tx_block && chan->active_req) {
unsigned long wait;
int ret;
diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h
new file mode 100644
index 000000000000..456ba68513bb
--- /dev/null
+++ b/drivers/mailbox/mailbox.h
@@ -0,0 +1,14 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_H
+#define __MAILBOX_H
+
+#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
+#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
+#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
+
+#endif /* __MAILBOX_H */
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index bcc7ee129276..0f332c178b07 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -29,13 +29,14 @@
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/err.h>
-#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/mailbox-omap.h>
#include <linux/omap-mailbox.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
#define MAILBOX_REVISION 0x000
#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
@@ -80,7 +81,6 @@ struct omap_mbox_queue {
spinlock_t lock;
struct kfifo fifo;
struct work_struct work;
- struct tasklet_struct tasklet;
struct omap_mbox *mbox;
bool full;
};
@@ -92,6 +92,7 @@ struct omap_mbox_device {
u32 num_users;
u32 num_fifos;
struct omap_mbox **mboxes;
+ struct mbox_controller controller;
struct list_head elem;
};
@@ -110,15 +111,14 @@ struct omap_mbox_fifo_info {
struct omap_mbox {
const char *name;
int irq;
- struct omap_mbox_queue *txq, *rxq;
+ struct omap_mbox_queue *rxq;
struct device *dev;
struct omap_mbox_device *parent;
struct omap_mbox_fifo tx_fifo;
struct omap_mbox_fifo rx_fifo;
u32 ctx[OMAP4_MBOX_NR_REGS];
u32 intr_type;
- int use_count;
- struct blocking_notifier_head notifier;
+ struct mbox_chan *chan;
};
/* global variables for the mailbox devices */
@@ -129,6 +129,14 @@ static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
module_param(mbox_kfifo_size, uint, S_IRUGO);
MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
+static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
+{
+ if (!chan || !chan->con_priv)
+ return NULL;
+
+ return (struct omap_mbox *)chan->con_priv;
+}
+
static inline
unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
{
@@ -194,41 +202,14 @@ static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
return (int)(enable & status & bit);
}
-/*
- * message sender
- */
-int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
-{
- struct omap_mbox_queue *mq = mbox->txq;
- int ret = 0, len;
-
- spin_lock_bh(&mq->lock);
-
- if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (kfifo_is_empty(&mq->fifo) && !mbox_fifo_full(mbox)) {
- mbox_fifo_write(mbox, msg);
- goto out;
- }
-
- len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
- WARN_ON(len != sizeof(msg));
-
- tasklet_schedule(&mbox->txq->tasklet);
-
-out:
- spin_unlock_bh(&mq->lock);
- return ret;
-}
-EXPORT_SYMBOL(omap_mbox_msg_send);
-
-void omap_mbox_save_ctx(struct omap_mbox *mbox)
+void omap_mbox_save_ctx(struct mbox_chan *chan)
{
int i;
int nr_regs;
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
if (mbox->intr_type)
nr_regs = OMAP4_MBOX_NR_REGS;
@@ -243,10 +224,14 @@ void omap_mbox_save_ctx(struct omap_mbox *mbox)
}
EXPORT_SYMBOL(omap_mbox_save_ctx);
-void omap_mbox_restore_ctx(struct omap_mbox *mbox)
+void omap_mbox_restore_ctx(struct mbox_chan *chan)
{
int i;
int nr_regs;
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
if (mbox->intr_type)
nr_regs = OMAP4_MBOX_NR_REGS;
@@ -254,14 +239,13 @@ void omap_mbox_restore_ctx(struct omap_mbox *mbox)
nr_regs = MBOX_NR_REGS;
for (i = 0; i < nr_regs; i++) {
mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32));
-
dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
i, mbox->ctx[i]);
}
}
EXPORT_SYMBOL(omap_mbox_restore_ctx);
-void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
u32 l;
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
@@ -273,9 +257,8 @@ void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
l |= bit;
mbox_write_reg(mbox->parent, l, irqenable);
}
-EXPORT_SYMBOL(omap_mbox_enable_irq);
-void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
@@ -291,28 +274,28 @@ void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
mbox_write_reg(mbox->parent, bit, irqdisable);
}
-EXPORT_SYMBOL(omap_mbox_disable_irq);
-static void mbox_tx_tasklet(unsigned long tx_data)
+void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
{
- struct omap_mbox *mbox = (struct omap_mbox *)tx_data;
- struct omap_mbox_queue *mq = mbox->txq;
- mbox_msg_t msg;
- int ret;
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
- while (kfifo_len(&mq->fifo)) {
- if (mbox_fifo_full(mbox)) {
- omap_mbox_enable_irq(mbox, IRQ_TX);
- break;
- }
+ if (WARN_ON(!mbox))
+ return;
- ret = kfifo_out(&mq->fifo, (unsigned char *)&msg,
- sizeof(msg));
- WARN_ON(ret != sizeof(msg));
+ _omap_mbox_enable_irq(mbox, irq);
+}
+EXPORT_SYMBOL(omap_mbox_enable_irq);
- mbox_fifo_write(mbox, msg);
- }
+void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ _omap_mbox_disable_irq(mbox, irq);
}
+EXPORT_SYMBOL(omap_mbox_disable_irq);
/*
* Message receiver(workqueue)
@@ -328,12 +311,11 @@ static void mbox_rx_work(struct work_struct *work)
len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
WARN_ON(len != sizeof(msg));
- blocking_notifier_call_chain(&mq->mbox->notifier, len,
- (void *)msg);
+ mbox_chan_received_data(mq->mbox->chan, (void *)msg);
spin_lock_irq(&mq->lock);
if (mq->full) {
mq->full = false;
- omap_mbox_enable_irq(mq->mbox, IRQ_RX);
+ _omap_mbox_enable_irq(mq->mbox, IRQ_RX);
}
spin_unlock_irq(&mq->lock);
}
@@ -344,9 +326,9 @@ static void mbox_rx_work(struct work_struct *work)
*/
static void __mbox_tx_interrupt(struct omap_mbox *mbox)
{
- omap_mbox_disable_irq(mbox, IRQ_TX);
+ _omap_mbox_disable_irq(mbox, IRQ_TX);
ack_mbox_irq(mbox, IRQ_TX);
- tasklet_schedule(&mbox->txq->tasklet);
+ mbox_chan_txdone(mbox->chan, 0);
}
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
@@ -357,7 +339,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
while (!mbox_fifo_empty(mbox)) {
if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
- omap_mbox_disable_irq(mbox, IRQ_RX);
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
mq->full = true;
goto nomem;
}
@@ -388,11 +370,13 @@ static irqreturn_t mbox_interrupt(int irq, void *p)
}
static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
- void (*work) (struct work_struct *),
- void (*tasklet)(unsigned long))
+ void (*work)(struct work_struct *))
{
struct omap_mbox_queue *mq;
+ if (!work)
+ return NULL;
+
mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
if (!mq)
return NULL;
@@ -402,12 +386,9 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
goto error;
- if (work)
- INIT_WORK(&mq->work, work);
-
- if (tasklet)
- tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox);
+ INIT_WORK(&mq->work, work);
return mq;
+
error:
kfree(mq);
return NULL;
@@ -423,71 +404,35 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
{
int ret = 0;
struct omap_mbox_queue *mq;
- struct omap_mbox_device *mdev = mbox->parent;
- mutex_lock(&mdev->cfg_lock);
- ret = pm_runtime_get_sync(mdev->dev);
- if (unlikely(ret < 0))
- goto fail_startup;
-
- if (!mbox->use_count++) {
- mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
- if (!mq) {
- ret = -ENOMEM;
- goto fail_alloc_txq;
- }
- mbox->txq = mq;
+ mq = mbox_queue_alloc(mbox, mbox_rx_work);
+ if (!mq)
+ return -ENOMEM;
+ mbox->rxq = mq;
+ mq->mbox = mbox;
+
+ ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
+ mbox->name, mbox);
+ if (unlikely(ret)) {
+ pr_err("failed to register mailbox interrupt:%d\n", ret);
+ goto fail_request_irq;
+ }
- mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL);
- if (!mq) {
- ret = -ENOMEM;
- goto fail_alloc_rxq;
- }
- mbox->rxq = mq;
- mq->mbox = mbox;
- ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
- mbox->name, mbox);
- if (unlikely(ret)) {
- pr_err("failed to register mailbox interrupt:%d\n",
- ret);
- goto fail_request_irq;
- }
+ _omap_mbox_enable_irq(mbox, IRQ_RX);
- omap_mbox_enable_irq(mbox, IRQ_RX);
- }
- mutex_unlock(&mdev->cfg_lock);
return 0;
fail_request_irq:
mbox_queue_free(mbox->rxq);
-fail_alloc_rxq:
- mbox_queue_free(mbox->txq);
-fail_alloc_txq:
- pm_runtime_put_sync(mdev->dev);
- mbox->use_count--;
-fail_startup:
- mutex_unlock(&mdev->cfg_lock);
return ret;
}
static void omap_mbox_fini(struct omap_mbox *mbox)
{
- struct omap_mbox_device *mdev = mbox->parent;
-
- mutex_lock(&mdev->cfg_lock);
-
- if (!--mbox->use_count) {
- omap_mbox_disable_irq(mbox, IRQ_RX);
- free_irq(mbox->irq, mbox);
- tasklet_kill(&mbox->txq->tasklet);
- flush_work(&mbox->rxq->work);
- mbox_queue_free(mbox->txq);
- mbox_queue_free(mbox->rxq);
- }
-
- pm_runtime_put_sync(mdev->dev);
-
- mutex_unlock(&mdev->cfg_lock);
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+ free_irq(mbox->irq, mbox);
+ flush_work(&mbox->rxq->work);
+ mbox_queue_free(mbox->rxq);
}
static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
@@ -509,42 +454,55 @@ static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
return mbox;
}
-struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
+struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
+ const char *chan_name)
{
+ struct device *dev = cl->dev;
struct omap_mbox *mbox = NULL;
struct omap_mbox_device *mdev;
+ struct mbox_chan *chan;
+ unsigned long flags;
int ret;
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (dev->of_node) {
+ pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
mutex_lock(&omap_mbox_devices_lock);
list_for_each_entry(mdev, &omap_mbox_devices, elem) {
- mbox = omap_mbox_device_find(mdev, name);
+ mbox = omap_mbox_device_find(mdev, chan_name);
if (mbox)
break;
}
mutex_unlock(&omap_mbox_devices_lock);
- if (!mbox)
+ if (!mbox || !mbox->chan)
return ERR_PTR(-ENOENT);
- if (nb)
- blocking_notifier_chain_register(&mbox->notifier, nb);
+ chan = mbox->chan;
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+ spin_unlock_irqrestore(&chan->lock, flags);
- ret = omap_mbox_startup(mbox);
+ ret = chan->mbox->ops->startup(chan);
if (ret) {
- blocking_notifier_chain_unregister(&mbox->notifier, nb);
- return ERR_PTR(-ENODEV);
+ pr_err("Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ chan = ERR_PTR(ret);
}
- return mbox;
-}
-EXPORT_SYMBOL(omap_mbox_get);
-
-void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb)
-{
- blocking_notifier_chain_unregister(&mbox->notifier, nb);
- omap_mbox_fini(mbox);
+ return chan;
}
-EXPORT_SYMBOL(omap_mbox_put);
+EXPORT_SYMBOL(omap_mbox_request_channel);
static struct class omap_mbox_class = { .name = "mbox", };
@@ -560,25 +518,25 @@ static int omap_mbox_register(struct omap_mbox_device *mdev)
mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++) {
struct omap_mbox *mbox = mboxes[i];
- mbox->dev = device_create(&omap_mbox_class,
- mdev->dev, 0, mbox, "%s", mbox->name);
+ mbox->dev = device_create(&omap_mbox_class, mdev->dev,
+ 0, mbox, "%s", mbox->name);
if (IS_ERR(mbox->dev)) {
ret = PTR_ERR(mbox->dev);
goto err_out;
}
-
- BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier);
}
mutex_lock(&omap_mbox_devices_lock);
list_add(&mdev->elem, &omap_mbox_devices);
mutex_unlock(&omap_mbox_devices_lock);
- return 0;
+ ret = mbox_controller_register(&mdev->controller);
err_out:
- while (i--)
- device_unregister(mboxes[i]->dev);
+ if (ret) {
+ while (i--)
+ device_unregister(mboxes[i]->dev);
+ }
return ret;
}
@@ -594,12 +552,64 @@ static int omap_mbox_unregister(struct omap_mbox_device *mdev)
list_del(&mdev->elem);
mutex_unlock(&omap_mbox_devices_lock);
+ mbox_controller_unregister(&mdev->controller);
+
mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++)
device_unregister(mboxes[i]->dev);
return 0;
}
+static int omap_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox_device *mdev = mbox->parent;
+ int ret = 0;
+
+ mutex_lock(&mdev->cfg_lock);
+ pm_runtime_get_sync(mdev->dev);
+ ret = omap_mbox_startup(mbox);
+ if (ret)
+ pm_runtime_put_sync(mdev->dev);
+ mutex_unlock(&mdev->cfg_lock);
+ return ret;
+}
+
+static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox_device *mdev = mbox->parent;
+
+ mutex_lock(&mdev->cfg_lock);
+ omap_mbox_fini(mbox);
+ pm_runtime_put_sync(mdev->dev);
+ mutex_unlock(&mdev->cfg_lock);
+}
+
+static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ int ret = -EBUSY;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (!mbox_fifo_full(mbox)) {
+ mbox_fifo_write(mbox, (mbox_msg_t)data);
+ ret = 0;
+ }
+
+ /* always enable the interrupt */
+ _omap_mbox_enable_irq(mbox, IRQ_TX);
+ return ret;
+}
+
+static struct mbox_chan_ops omap_mbox_chan_ops = {
+ .startup = omap_mbox_chan_startup,
+ .send_data = omap_mbox_chan_send_data,
+ .shutdown = omap_mbox_chan_shutdown,
+};
+
static const struct of_device_id omap_mailbox_of_match[] = {
{
.compatible = "ti,omap2-mailbox",
@@ -619,10 +629,35 @@ static const struct of_device_id omap_mailbox_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
+static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *sp)
+{
+ phandle phandle = sp->args[0];
+ struct device_node *node;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox *mbox;
+
+ mdev = container_of(controller, struct omap_mbox_device, controller);
+ if (WARN_ON(!mdev))
+ return NULL;
+
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find node phandle 0x%x\n",
+ __func__, phandle);
+ return NULL;
+ }
+
+ mbox = omap_mbox_device_find(mdev, node->name);
+ of_node_put(node);
+ return mbox ? mbox->chan : NULL;
+}
+
static int omap_mbox_probe(struct platform_device *pdev)
{
struct resource *mem;
int ret;
+ struct mbox_chan *chnls;
struct omap_mbox **list, *mbox, *mboxblk;
struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
struct omap_mbox_dev_info *info = NULL;
@@ -727,6 +762,11 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (!list)
return -ENOMEM;
+ chnls = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*chnls),
+ GFP_KERNEL);
+ if (!chnls)
+ return -ENOMEM;
+
mboxblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*mbox),
GFP_KERNEL);
if (!mboxblk)
@@ -758,6 +798,8 @@ static int omap_mbox_probe(struct platform_device *pdev)
mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
if (mbox->irq < 0)
return mbox->irq;
+ mbox->chan = &chnls[i];
+ chnls[i].con_priv = mbox;
list[i] = mbox++;
}
@@ -766,6 +808,14 @@ static int omap_mbox_probe(struct platform_device *pdev)
mdev->num_users = num_users;
mdev->num_fifos = num_fifos;
mdev->mboxes = list;
+
+ /* OMAP does not have a Tx-Done IRQ, but rather a Tx-Ready IRQ */
+ mdev->controller.txdone_irq = true;
+ mdev->controller.dev = mdev->dev;
+ mdev->controller.ops = &omap_mbox_chan_ops;
+ mdev->controller.chans = chnls;
+ mdev->controller.num_chans = info_count;
+ mdev->controller.of_xlate = omap_mbox_of_xlate;
ret = omap_mbox_register(mdev);
if (ret)
return ret;
@@ -814,7 +864,6 @@ static struct platform_driver omap_mbox_driver = {
.remove = omap_mbox_remove,
.driver = {
.name = "omap-mailbox",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(omap_mailbox_of_match),
},
};
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
new file mode 100644
index 000000000000..6dbf6fcbdfaf
--- /dev/null
+++ b/drivers/mailbox/pcc.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * PCC (Platform Communication Channel) is defined in the ACPI 5.0+
+ * specification. It is a mailbox like mechanism to allow clients
+ * such as CPPC (Collaborative Processor Performance Control), RAS
+ * (Reliability, Availability and Serviceability) and MPST (Memory
+ * Node Power State Table) to talk to the platform (e.g. BMC) through
+ * shared memory regions as defined in the PCC table entries. The PCC
+ * specification supports a Doorbell mechanism for the PCC clients
+ * to notify the platform about new data. This Doorbell information
+ * is also specified in each PCC table entry. See pcc_send_data()
+ * and pcc_tx_done() for basic mode of operation.
+ *
+ * For more details about PCC, please see the ACPI specification from
+ * http://www.uefi.org/ACPIv5.1 Section 14.
+ *
+ * This file implements PCC as a Mailbox controller and allows for PCC
+ * clients to be implemented as its Mailbox Client Channels.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#include "mailbox.h"
+
+#define MAX_PCC_SUBSPACES 256
+#define PCCS_SS_SIG_MAGIC 0x50434300
+#define PCC_CMD_COMPLETE 0x1
+
+static struct mbox_chan *pcc_mbox_channels;
+
+static struct mbox_controller pcc_mbox_ctrl = {};
+/**
+ * get_pcc_channel - Given a PCC subspace idx, get
+ * the respective mbox_channel.
+ * @id: PCC subspace index.
+ *
+ * Return: ERR_PTR(errno) if error, else pointer
+ * to mbox channel.
+ */
+static struct mbox_chan *get_pcc_channel(int id)
+{
+ struct mbox_chan *pcc_chan;
+
+ if (id < 0 || id > pcc_mbox_ctrl.num_chans)
+ return ERR_PTR(-ENOENT);
+
+ pcc_chan = (struct mbox_chan *)
+ (unsigned long) pcc_mbox_channels +
+ (id * sizeof(*pcc_chan));
+
+ return pcc_chan;
+}
+
+/**
+ * get_subspace_id - Given a Mailbox channel, find out the
+ * PCC subspace id.
+ * @chan: Pointer to Mailbox Channel from which we want
+ * the index.
+ * Return: Errno if not found, else positive index number.
+ */
+static int get_subspace_id(struct mbox_chan *chan)
+{
+ unsigned int id = chan - pcc_mbox_channels;
+
+ if (id < 0 || id > pcc_mbox_ctrl.num_chans)
+ return -ENOENT;
+
+ return id;
+}
+
+/**
+ * pcc_mbox_request_channel - PCC clients call this function to
+ * request a pointer to their PCC subspace, from which they
+ * can get the details of communicating with the remote.
+ * @cl: Pointer to Mailbox client, so we know where to bind the
+ * Channel.
+ * @subspace_id: The PCC Subspace index as parsed in the PCC client
+ * ACPI package. This is used to lookup the array of PCC
+ * subspaces as parsed by the PCC Mailbox controller.
+ *
+ * Return: Pointer to the Mailbox Channel if successful or
+ * ERR_PTR.
+ */
+struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
+ int subspace_id)
+{
+ struct device *dev = pcc_mbox_ctrl.dev;
+ struct mbox_chan *chan;
+ unsigned long flags;
+
+ /*
+ * Each PCC Subspace is a Mailbox Channel.
+ * The PCC Clients get their PCC Subspace ID
+ * from their own tables and pass it here.
+ * This returns a pointer to the PCC subspace
+ * for the Client to operate on.
+ */
+ chan = get_pcc_channel(subspace_id);
+
+ if (!chan || chan->cl) {
+ dev_err(dev, "%s: PCC mailbox not free\n", __func__);
+ return ERR_PTR(-EBUSY);
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method |= TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
+
+/**
+ * pcc_mbox_free_channel - Clients call this to free their Channel.
+ *
+ * @chan: Pointer to the mailbox channel as returned by
+ * pcc_mbox_request_channel()
+ */
+void pcc_mbox_free_channel(struct mbox_chan *chan)
+{
+ unsigned long flags;
+
+ if (!chan || !chan->cl)
+ return;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+
+/**
+ * pcc_tx_done - Callback from Mailbox controller code to
+ * check if PCC message transmission completed.
+ * @chan: Pointer to Mailbox channel on which previous
+ * transmission occurred.
+ *
+ * Return: TRUE if succeeded.
+ */
+static bool pcc_tx_done(struct mbox_chan *chan)
+{
+ struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
+ struct acpi_pcct_shared_memory *generic_comm_base =
+ (struct acpi_pcct_shared_memory *) pcct_ss->base_address;
+ u16 cmd_delay = pcct_ss->latency;
+ unsigned int retries = 0;
+
+ /* Try a few times while waiting for platform to consume */
+ while (!(readw_relaxed(&generic_comm_base->status)
+ & PCC_CMD_COMPLETE)) {
+
+ if (retries++ < 5)
+ udelay(cmd_delay);
+ else {
+ /*
+ * If the remote is dead, this will cause the Mbox
+ * controller to timeout after mbox client.tx_tout
+ * msecs.
+ */
+ pr_err("PCC platform did not respond.\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * pcc_send_data - Called from Mailbox Controller code to finally
+ * transmit data over channel.
+ * @chan: Pointer to Mailbox channel over which to send data.
+ * @data: Actual data to be written over channel.
+ *
+ * Return: Err if something failed else 0 for success.
+ */
+static int pcc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
+ struct acpi_pcct_shared_memory *generic_comm_base =
+ (struct acpi_pcct_shared_memory *) pcct_ss->base_address;
+ struct acpi_generic_address doorbell;
+ u64 doorbell_preserve;
+ u64 doorbell_val;
+ u64 doorbell_write;
+ u16 cmd = *(u16 *) data;
+ u16 ss_idx = -1;
+
+ ss_idx = get_subspace_id(chan);
+
+ if (ss_idx < 0) {
+ pr_err("Invalid Subspace ID from PCC client\n");
+ return -EINVAL;
+ }
+
+ doorbell = pcct_ss->doorbell_register;
+ doorbell_preserve = pcct_ss->preserve_mask;
+ doorbell_write = pcct_ss->write_mask;
+
+ /* Write to the shared comm region. */
+ writew(cmd, &generic_comm_base->command);
+
+ /* Write Subspace MAGIC value so platform can identify destination. */
+ writel((PCCS_SS_SIG_MAGIC | ss_idx), &generic_comm_base->signature);
+
+ /* Flip CMD COMPLETE bit */
+ writew(0, &generic_comm_base->status);
+
+ /* Sync notification from OSPM to Platform. */
+ acpi_read(&doorbell_val, &doorbell);
+ acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
+ &doorbell);
+
+ return 0;
+}
+
+static struct mbox_chan_ops pcc_chan_ops = {
+ .send_data = pcc_send_data,
+ .last_tx_done = pcc_tx_done,
+};
+
+/**
+ * parse_pcc_subspace - Parse the PCC table and verify PCC subspace
+ * entries. There should be one entry per PCC client.
+ * @header: Pointer to the ACPI subtable header under the PCCT.
+ * @end: End of subtable entry.
+ *
+ * Return: 0 for Success, else errno.
+ *
+ * This gets called for each entry in the PCC table.
+ */
+static int parse_pcc_subspace(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_pcct_hw_reduced *pcct_ss;
+
+ if (pcc_mbox_ctrl.num_chans <= MAX_PCC_SUBSPACES) {
+ pcct_ss = (struct acpi_pcct_hw_reduced *) header;
+
+ if (pcct_ss->header.type !=
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) {
+ pr_err("Incorrect PCC Subspace type detected\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpi_pcc_probe - Parse the ACPI tree for the PCCT.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int __init acpi_pcc_probe(void)
+{
+ acpi_size pcct_tbl_header_size;
+ struct acpi_table_header *pcct_tbl;
+ struct acpi_subtable_header *pcct_entry;
+ int count, i;
+ acpi_status status = AE_OK;
+
+ /* Search for PCCT */
+ status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
+ &pcct_tbl,
+ &pcct_tbl_header_size);
+
+ if (ACPI_FAILURE(status) || !pcct_tbl) {
+ pr_warn("PCCT header not found.\n");
+ return -ENODEV;
+ }
+
+ count = acpi_table_parse_entries(ACPI_SIG_PCCT,
+ sizeof(struct acpi_table_pcct),
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE,
+ parse_pcc_subspace, MAX_PCC_SUBSPACES);
+
+ if (count <= 0) {
+ pr_err("Error parsing PCC subspaces from PCCT\n");
+ return -EINVAL;
+ }
+
+ pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) *
+ count, GFP_KERNEL);
+
+ if (!pcc_mbox_channels) {
+ pr_err("Could not allocate space for PCC mbox channels\n");
+ return -ENOMEM;
+ }
+
+ /* Point to the first PCC subspace entry */
+ pcct_entry = (struct acpi_subtable_header *) (
+ (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
+
+ for (i = 0; i < count; i++) {
+ pcc_mbox_channels[i].con_priv = pcct_entry;
+ pcct_entry = (struct acpi_subtable_header *)
+ ((unsigned long) pcct_entry + pcct_entry->length);
+ }
+
+ pcc_mbox_ctrl.num_chans = count;
+
+ pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
+
+ return 0;
+}
+
+/**
+ * pcc_mbox_probe - Called when we find a match for the
+ * PCCT platform device. This is purely used to represent
+ * the PCCT as a virtual device for registering with the
+ * generic Mailbox framework.
+ *
+ * @pdev: Pointer to platform device returned when a match
+ * is found.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_mbox_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ pcc_mbox_ctrl.chans = pcc_mbox_channels;
+ pcc_mbox_ctrl.ops = &pcc_chan_ops;
+ pcc_mbox_ctrl.txdone_poll = true;
+ pcc_mbox_ctrl.txpoll_period = 10;
+ pcc_mbox_ctrl.dev = &pdev->dev;
+
+ pr_info("Registering PCC driver as Mailbox controller\n");
+ ret = mbox_controller_register(&pcc_mbox_ctrl);
+
+ if (ret) {
+ pr_err("Err registering PCC as Mailbox controller: %d\n", ret);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+struct platform_driver pcc_mbox_driver = {
+ .probe = pcc_mbox_probe,
+ .driver = {
+ .name = "PCCT",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pcc_init(void)
+{
+ int ret;
+ struct platform_device *pcc_pdev;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* Check if PCC support is available. */
+ ret = acpi_pcc_probe();
+
+ if (ret) {
+ pr_err("ACPI PCC probe failed.\n");
+ return -ENODEV;
+ }
+
+ pcc_pdev = platform_create_bundle(&pcc_mbox_driver,
+ pcc_mbox_probe, NULL, 0, NULL, 0);
+
+ if (!pcc_pdev) {
+ pr_err("Err creating PCC platform bundle\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+device_initcall(pcc_init);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 62e6e98186b5..ab43faddb447 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -601,13 +601,8 @@ static void request_endio(struct bio *bio, int error)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- int cpu, rw = bio_data_dir(s->orig_bio);
- unsigned long duration = jiffies - s->start_time;
-
- cpu = part_stat_lock();
- part_round_stats(cpu, &s->d->disk->part0);
- part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
- part_stat_unlock();
+ generic_end_io_acct(bio_data_dir(s->orig_bio),
+ &s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
bio_endio(s->orig_bio, s->iop.error);
@@ -959,12 +954,9 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
struct search *s;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
- int cpu, rw = bio_data_dir(bio);
+ int rw = bio_data_dir(bio);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &d->disk->part0, ios[rw]);
- part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
- part_stat_unlock();
+ generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
bio->bi_bdev = dc->bdev;
bio->bi_iter.bi_sector += dc->sb.data_offset;
@@ -1074,12 +1066,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
struct search *s;
struct closure *cl;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
- int cpu, rw = bio_data_dir(bio);
+ int rw = bio_data_dir(bio);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &d->disk->part0, ios[rw]);
- part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
- part_stat_unlock();
+ generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
s = search_alloc(bio, d);
cl = &s->cl;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8f37ed215b19..4c06585bf165 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -605,13 +605,10 @@ static void end_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->bio;
unsigned long duration = jiffies - io->start_time;
- int pending, cpu;
+ int pending;
int rw = bio_data_dir(bio);
- cpu = part_stat_lock();
- part_round_stats(cpu, &dm_disk(md)->part0);
- part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
- part_stat_unlock();
+ generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
@@ -1651,16 +1648,12 @@ static void _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
- int cpu;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
- part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
- part_stat_unlock();
+ generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9233c71138f1..709755fb6d7b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -247,7 +247,6 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
- int cpu;
unsigned int sectors;
if (mddev == NULL || mddev->pers == NULL
@@ -284,10 +283,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
- part_stat_unlock();
+ generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
@@ -2695,7 +2691,8 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
if (kstrtoull(buf, 10, &new_offset) < 0)
return -EINVAL;
- if (mddev->sync_thread)
+ if (mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
return -EBUSY;
if (new_offset == rdev->data_offset)
/* reset is always permitted */
@@ -3272,6 +3269,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
*/
if (mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
return -EBUSY;
@@ -4026,6 +4024,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
+ flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
@@ -5044,6 +5043,7 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
@@ -5104,19 +5104,22 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
- if (mddev->sync_thread) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
- }
+
mddev_unlock(mddev);
- wait_event(resync_wait, mddev->sync_thread == NULL);
+ wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
+ &mddev->recovery));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
@@ -5162,20 +5165,24 @@ static int do_md_stop(struct mddev *mddev, int mode,
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
- if (mddev->sync_thread) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
- }
+
mddev_unlock(mddev);
- wait_event(resync_wait, mddev->sync_thread == NULL);
+ wait_event(resync_wait, (mddev->sync_thread == NULL &&
+ !test_bit(MD_RECOVERY_RUNNING,
+ &mddev->recovery)));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sysfs_active ||
mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
@@ -5950,7 +5957,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
* of each device. If num_sectors is zero, we find the largest size
* that fits.
*/
- if (mddev->sync_thread)
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ mddev->sync_thread)
return -EBUSY;
if (mddev->ro)
return -EROFS;
@@ -5981,7 +5989,9 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
- if (mddev->sync_thread || mddev->reshape_position != MaxSector)
+ if (mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ mddev->reshape_position != MaxSector)
return -EBUSY;
rdev_for_each(rdev, mddev) {
@@ -6969,7 +6979,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
int mask;
if (md_unloading)
- return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;
+ return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
poll_wait(filp, &md_event_waiters, wait);
/* always allow read */
@@ -7593,6 +7603,7 @@ static void md_start_sync(struct work_struct *ws)
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ wake_up(&resync_wait);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
@@ -7761,6 +7772,7 @@ void md_check_recovery(struct mddev *mddev)
not_running:
if (!mddev->sync_thread) {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ wake_up(&resync_wait);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
@@ -7779,7 +7791,6 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
- wake_up(&resync_wait);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
@@ -7807,6 +7818,7 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9c66e5997fc8..c1b0d52bfcb0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2917,8 +2917,11 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
(sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
(!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
!test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
- (sh->raid_conf->level == 6 && s->failed && s->to_write &&
- s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 &&
+ ((sh->raid_conf->level == 6 ||
+ sh->sector >= sh->raid_conf->mddev->recovery_cp)
+ && s->failed && s->to_write &&
+ (s->to_write - s->non_overwrite <
+ sh->raid_conf->raid_disks - sh->raid_conf->max_degraded) &&
(!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
/* we would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 3c89fcbc621e..49cd30870e0d 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -160,7 +160,6 @@ source "drivers/media/usb/Kconfig"
source "drivers/media/pci/Kconfig"
source "drivers/media/platform/Kconfig"
source "drivers/media/mmc/Kconfig"
-source "drivers/media/parport/Kconfig"
source "drivers/media/radio/Kconfig"
comment "Supported FireWire (IEEE 1394) Adapters"
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 620f275a45c9..e608bbce0c35 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -28,6 +28,6 @@ obj-y += rc/
# Finally, merge the drivers that require the core
#
-obj-y += common/ platform/ pci/ usb/ mmc/ firewire/ parport/
+obj-y += common/ platform/ pci/ usb/ mmc/ firewire/
obj-$(CONFIG_VIDEO_DEV) += radio/
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index be763150b8aa..c07b9db51b05 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -931,6 +931,35 @@ static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params)
}
}
+/* Check for correctness of the ctrl's value based on the data from
+ struct v4l2_queryctrl and the available menu items. Note that
+ menu_items may be NULL, in that case it is ignored. */
+static int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
+ const char * const *menu_items)
+{
+ if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED)
+ return -EINVAL;
+ if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED)
+ return -EBUSY;
+ if (qctrl->type == V4L2_CTRL_TYPE_STRING)
+ return 0;
+ if (qctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ qctrl->type == V4L2_CTRL_TYPE_INTEGER64 ||
+ qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ return 0;
+ if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum)
+ return -ERANGE;
+ if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) {
+ if (menu_items[ctrl->value] == NULL ||
+ menu_items[ctrl->value][0] == '\0')
+ return -EINVAL;
+ }
+ if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
+ (ctrl->value & ~qctrl->maximum))
+ return -ERANGE;
+ return 0;
+}
+
int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, int busy,
struct v4l2_ext_controls *ctrls, unsigned int cmd)
{
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index 4418119cf707..1ff9f5323bc3 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -71,7 +71,7 @@ static inline int saa7146_wait_for_debi_done_sleep(struct saa7146_dev *dev,
if (saa7146_read(dev, MC2) & 2)
break;
if (err) {
- pr_err("%s: %s timed out while waiting for registers getting programmed\n",
+ pr_debug("%s: %s timed out while waiting for registers getting programmed\n",
dev->name, __func__);
return -ETIMEDOUT;
}
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 273043ea8f47..35d0e887bd65 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -107,8 +107,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
void sms_ir_exit(struct smscore_device_t *coredev)
{
- if (coredev->ir.dev)
- rc_unregister_device(coredev->ir.dev);
+ rc_unregister_device(coredev->ir.dev);
sms_log("");
}
diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c
index c7dace671a9d..47da0378cad8 100644
--- a/drivers/media/common/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -286,9 +286,17 @@ static const struct {
{ TUNER_ABSENT, "Xceive XC5200C"},
{ TUNER_ABSENT, "NXP 18273"},
{ TUNER_ABSENT, "Montage M88TS2022"},
- /* 180-189 */
+ /* 180-188 */
{ TUNER_ABSENT, "NXP 18272M"},
{ TUNER_ABSENT, "NXP 18272S"},
+
+ { TUNER_ABSENT, "Mirics MSi003"},
+ { TUNER_ABSENT, "MaxLinear MxL256"},
+ { TUNER_ABSENT, "SiLabs Si2158"},
+ { TUNER_ABSENT, "SiLabs Si2178"},
+ { TUNER_ABSENT, "SiLabs Si2157"},
+ { TUNER_ABSENT, "SiLabs Si2177"},
+ { TUNER_ABSENT, "ITE IT9137FN"},
};
/* Use TVEEPROM_AUDPROC_INTERNAL for those audio 'chips' that are
@@ -351,6 +359,16 @@ static const struct {
{ TVEEPROM_AUDPROC_INTERNAL, "CX23887" },
{ TVEEPROM_AUDPROC_INTERNAL, "SAA7164" },
{ TVEEPROM_AUDPROC_INTERNAL, "AU8522" },
+ /* 45-49 */
+ { TVEEPROM_AUDPROC_INTERNAL, "AVF4910B" },
+ { TVEEPROM_AUDPROC_INTERNAL, "SAA7231" },
+ { TVEEPROM_AUDPROC_INTERNAL, "CX23102" },
+ { TVEEPROM_AUDPROC_INTERNAL, "SAA7163" },
+ { TVEEPROM_AUDPROC_OTHER, "AK4113" },
+ /* 50-52 */
+ { TVEEPROM_AUDPROC_OTHER, "CS5340" },
+ { TVEEPROM_AUDPROC_OTHER, "CS8416" },
+ { TVEEPROM_AUDPROC_OTHER, "CX20810" },
};
/* This list is supplied by Hauppauge. Thanks! */
@@ -371,8 +389,12 @@ static const char *decoderIC[] = {
"CX25843", "CX23418", "NEC61153", "CX23885", "CX23888",
/* 35-39 */
"SAA7131", "CX25837", "CX23887", "CX23885A", "CX23887A",
- /* 40-42 */
- "SAA7164", "CX23885B", "AU8522"
+ /* 40-44 */
+ "SAA7164", "CX23885B", "AU8522", "ADV7401", "AVF4910B",
+ /* 45-49 */
+ "SAA7231", "CX23102", "SAA7163", "ADV7441A", "ADV7181C",
+ /* 50-53 */
+ "CX25836", "TDA9955", "TDA19977", "ADV7842"
};
static int hasRadioTuner(int tunerType)
@@ -548,10 +570,10 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
tvee->serial_number =
eeprom_data[i+5] +
(eeprom_data[i+6] << 8) +
- (eeprom_data[i+7] << 16);
+ (eeprom_data[i+7] << 16)+
+ (eeprom_data[i+8] << 24);
- if ((eeprom_data[i + 8] & 0xf0) &&
- (tvee->serial_number < 0xffffff)) {
+ if (eeprom_data[i + 8] == 0xf0) {
tvee->MAC_address[0] = 0x00;
tvee->MAC_address[1] = 0x0D;
tvee->MAC_address[2] = 0xFE;
@@ -696,7 +718,7 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
}
}
- tveeprom_info("Hauppauge model %d, rev %s, serial# %d\n",
+ tveeprom_info("Hauppauge model %d, rev %s, serial# %u\n",
tvee->model, tvee->rev_str, tvee->serial_number);
if (tvee->has_MAC_address == 1)
tveeprom_info("MAC address is %pM\n", tvee->MAC_address);
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index e07a84e7bc56..80ab8d0ff6e0 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -356,6 +356,7 @@
#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
#define USB_PID_SONY_PLAYTV 0x0003
#define USB_PID_MYGICA_D689 0xd811
+#define USB_PID_MYGICA_T230 0xc688
#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 059e6117f22b..e4041f074909 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -379,7 +379,9 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
/* Check TS error conditions: sync_byte, transport_error_indicator, scrambling_control . */
if ((ts[0] != TS_SYNC) || (ts[1] & TS_TEI) || ((ts[3] & TS_SC) != 0)) {
printk(KERN_WARNING "%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
- priv->ts_count, ts[0], ts[1] & TS_TEI >> 7, ts[3] & 0xC0 >> 6);
+ priv->ts_count, ts[0],
+ (ts[1] & TS_TEI) >> 7,
+ (ts[3] & TS_SC) >> 6);
/* Drop partly decoded SNDU, reset state, resync on PUSI. */
if (priv->ule_skb) {
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 5a134547e325..6c75418222e2 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -648,12 +648,15 @@ config DVB_MB86A20S
A driver for Fujitsu mb86a20s ISDB-T/ISDB-Tsb demodulator.
Say Y when you want to support this frontend.
+comment "ISDB-S (satellite) & ISDB-T (terrestrial) frontends"
+ depends on DVB_CORE
+
config DVB_TC90522
tristate "Toshiba TC90522"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- A Toshiba TC90522 2xISDB-T + 2xISDB-S demodulator.
+ Toshiba TC90522 2xISDB-S 8PSK + 2xISDB-T OFDM demodulator.
Say Y when you want to support this frontend.
comment "Digital terrestrial only tuners/PLL"
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 63a89c1c59ff..82ce47bdf5dc 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -291,6 +291,12 @@ static int af9033_init(struct dvb_frontend *fe)
if (clock_adc_lut[i].clock == dev->cfg.clock)
break;
}
+ if (i == ARRAY_SIZE(clock_adc_lut)) {
+ dev_err(&dev->client->dev,
+ "Couldn't find ADC config for clock=%d\n",
+ dev->cfg.clock);
+ goto err;
+ }
adc_cw = af9033_div(dev, clock_adc_lut[i].adc, 1000000ul, 19ul);
buf[0] = (adc_cw >> 0) & 0xff;
@@ -580,7 +586,15 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
break;
}
}
- ret = af9033_wr_regs(dev, 0x800001,
+ if (i == ARRAY_SIZE(coeff_lut)) {
+ dev_err(&dev->client->dev,
+ "Couldn't find LUT config for clock=%d\n",
+ dev->cfg.clock);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = af9033_wr_regs(dev, 0x800001,
coeff_lut[i].val, sizeof(coeff_lut[i].val));
}
@@ -592,6 +606,13 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
if (clock_adc_lut[i].clock == dev->cfg.clock)
break;
}
+ if (i == ARRAY_SIZE(clock_adc_lut)) {
+ dev_err(&dev->client->dev,
+ "Couldn't find ADC clock for clock=%d\n",
+ dev->cfg.clock);
+ ret = -EINVAL;
+ goto err;
+ }
adc_freq = clock_adc_lut[i].adc;
/* get used IF frequency */
@@ -849,29 +870,97 @@ static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct af9033_dev *dev = fe->demodulator_priv;
struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
+ int ret;
+ u8 u8tmp;
/* use DVBv5 CNR */
- if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
- *snr = div_s64(c->cnr.stat[0].svalue, 100); /* 1000x => 10x */
- else
+ if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) {
+ /* Return 0.1 dB for AF9030 and 0-0xffff for IT9130. */
+ if (dev->is_af9035) {
+ /* 1000x => 10x (0.1 dB) */
+ *snr = div_s64(c->cnr.stat[0].svalue, 100);
+ } else {
+ /* 1000x => 1x (1 dB) */
+ *snr = div_s64(c->cnr.stat[0].svalue, 1000);
+
+ /* read current modulation */
+ ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
+ if (ret)
+ goto err;
+
+ /* scale value to 0x0000-0xffff */
+ switch ((u8tmp >> 0) & 3) {
+ case 0:
+ *snr = *snr * 0xffff / 23;
+ break;
+ case 1:
+ *snr = *snr * 0xffff / 26;
+ break;
+ case 2:
+ *snr = *snr * 0xffff / 32;
+ break;
+ default:
+ goto err;
+ }
+ }
+ } else {
*snr = 0;
+ }
return 0;
+
+err:
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
+
+ return ret;
}
static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct af9033_dev *dev = fe->demodulator_priv;
- int ret;
- u8 strength2;
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
+ int ret, tmp, power_real;
+ u8 u8tmp, gain_offset, buf[7];
- /* read signal strength of 0-100 scale */
- ret = af9033_rd_reg(dev, 0x800048, &strength2);
- if (ret < 0)
- goto err;
+ if (dev->is_af9035) {
+ /* read signal strength of 0-100 scale */
+ ret = af9033_rd_reg(dev, 0x800048, &u8tmp);
+ if (ret < 0)
+ goto err;
+
+ /* scale value to 0x0000-0xffff */
+ *strength = u8tmp * 0xffff / 100;
+ } else {
+ ret = af9033_rd_reg(dev, 0x8000f7, &u8tmp);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_rd_regs(dev, 0x80f900, buf, 7);
+ if (ret < 0)
+ goto err;
+
+ if (c->frequency <= 300000000)
+ gain_offset = 7; /* VHF */
+ else
+ gain_offset = 4; /* UHF */
+
+ power_real = (u8tmp - 100 - gain_offset) -
+ power_reference[((buf[3] >> 0) & 3)][((buf[6] >> 0) & 7)];
+
+ if (power_real < -15)
+ tmp = 0;
+ else if ((power_real >= -15) && (power_real < 0))
+ tmp = (2 * (power_real + 15)) / 3;
+ else if ((power_real >= 0) && (power_real < 20))
+ tmp = 4 * power_real + 10;
+ else if ((power_real >= 20) && (power_real < 35))
+ tmp = (2 * (power_real - 20)) / 3 + 90;
+ else
+ tmp = 100;
- /* scale value to 0x0000-0xffff */
- *strength = strength2 * 0xffff / 100;
+ /* scale value to 0x0000-0xffff */
+ *strength = tmp * 0xffff / 100;
+ }
return 0;
@@ -1011,6 +1100,33 @@ static void af9033_stat_work(struct work_struct *work)
snr_val = (buf[2] << 16) | (buf[1] << 8) | (buf[0] << 0);
+ /* read superframe number */
+ ret = af9033_rd_reg(dev, 0x80f78b, &u8tmp);
+ if (ret)
+ goto err;
+
+ if (u8tmp)
+ snr_val /= u8tmp;
+
+ /* read current transmission mode */
+ ret = af9033_rd_reg(dev, 0x80f900, &u8tmp);
+ if (ret)
+ goto err;
+
+ switch ((u8tmp >> 0) & 3) {
+ case 0:
+ snr_val *= 4;
+ break;
+ case 1:
+ snr_val *= 1;
+ break;
+ case 2:
+ snr_val *= 2;
+ break;
+ default:
+ goto err_schedule_delayed_work;
+ }
+
/* read current modulation */
ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
if (ret)
diff --git a/drivers/media/dvb-frontends/af9033_priv.h b/drivers/media/dvb-frontends/af9033_priv.h
index c12c92cb5855..8e23275148ed 100644
--- a/drivers/media/dvb-frontends/af9033_priv.h
+++ b/drivers/media/dvb-frontends/af9033_priv.h
@@ -181,7 +181,10 @@ static const struct val_snr qam64_snr_lut[] = {
{ 0x05570d, 26 },
{ 0x059feb, 27 },
{ 0x05bf38, 28 },
- { 0xffffff, 29 },
+ { 0x05f78f, 29 },
+ { 0x0612c3, 30 },
+ { 0x0626be, 31 },
+ { 0xffffff, 32 },
};
static const struct reg_val ofsm_init[] = {
@@ -2051,4 +2054,10 @@ static const struct reg_val tuner_init_it9135_62[] = {
{ 0x80fd8b, 0x00 },
};
+/* NorDig power reference table */
+static const int power_reference[][5] = {
+ {-93, -91, -90, -89, -88}, /* QPSK 1/2 ~ 7/8 */
+ {-87, -85, -84, -83, -82}, /* 16QAM 1/2 ~ 7/8 */
+ {-82, -80, -78, -77, -76}, /* 64QAM 1/2 ~ 7/8 */
+};
#endif /* AF9033_PRIV_H */
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index a68974f6d708..5d06c99b0e97 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -29,6 +29,7 @@
#include "au8522_priv.h"
static int debug;
+static int zv_mode = 1; /* default to on */
#define dprintk(arg...)\
do { if (debug)\
@@ -469,6 +470,87 @@ static struct {
{ 0x8526, 0x01 },
};
+static struct {
+ u16 reg;
+ u16 data;
+} QAM256_mod_tab_zv_mode[] = {
+ { 0x80a3, 0x09 },
+ { 0x80a4, 0x00 },
+ { 0x8081, 0xc4 },
+ { 0x80a5, 0x40 },
+ { 0x80b5, 0xfb },
+ { 0x80b6, 0x8e },
+ { 0x80b7, 0x39 },
+ { 0x80aa, 0x77 },
+ { 0x80ad, 0x77 },
+ { 0x80a6, 0x67 },
+ { 0x8262, 0x20 },
+ { 0x821c, 0x30 },
+ { 0x80b8, 0x3e },
+ { 0x80b9, 0xf0 },
+ { 0x80ba, 0x01 },
+ { 0x80bb, 0x18 },
+ { 0x80bc, 0x50 },
+ { 0x80bd, 0x00 },
+ { 0x80be, 0xea },
+ { 0x80bf, 0xef },
+ { 0x80c0, 0xfc },
+ { 0x80c1, 0xbd },
+ { 0x80c2, 0x1f },
+ { 0x80c3, 0xfc },
+ { 0x80c4, 0xdd },
+ { 0x80c5, 0xaf },
+ { 0x80c6, 0x00 },
+ { 0x80c7, 0x38 },
+ { 0x80c8, 0x30 },
+ { 0x80c9, 0x05 },
+ { 0x80ca, 0x4a },
+ { 0x80cb, 0xd0 },
+ { 0x80cc, 0x01 },
+ { 0x80cd, 0xd9 },
+ { 0x80ce, 0x6f },
+ { 0x80cf, 0xf9 },
+ { 0x80d0, 0x70 },
+ { 0x80d1, 0xdf },
+ { 0x80d2, 0xf7 },
+ { 0x80d3, 0xc2 },
+ { 0x80d4, 0xdf },
+ { 0x80d5, 0x02 },
+ { 0x80d6, 0x9a },
+ { 0x80d7, 0xd0 },
+ { 0x8250, 0x0d },
+ { 0x8251, 0xcd },
+ { 0x8252, 0xe0 },
+ { 0x8253, 0x05 },
+ { 0x8254, 0xa7 },
+ { 0x8255, 0xff },
+ { 0x8256, 0xed },
+ { 0x8257, 0x5b },
+ { 0x8258, 0xae },
+ { 0x8259, 0xe6 },
+ { 0x825a, 0x3d },
+ { 0x825b, 0x0f },
+ { 0x825c, 0x0d },
+ { 0x825d, 0xea },
+ { 0x825e, 0xf2 },
+ { 0x825f, 0x51 },
+ { 0x8260, 0xf5 },
+ { 0x8261, 0x06 },
+ { 0x821a, 0x01 },
+ { 0x8546, 0x40 },
+ { 0x8210, 0x26 },
+ { 0x8211, 0xf6 },
+ { 0x8212, 0x84 },
+ { 0x8213, 0x02 },
+ { 0x8502, 0x01 },
+ { 0x8121, 0x04 },
+ { 0x8122, 0x04 },
+ { 0x852e, 0x10 },
+ { 0x80a4, 0xca },
+ { 0x80a7, 0x40 },
+ { 0x8526, 0x01 },
+};
+
static int au8522_enable_modulation(struct dvb_frontend *fe,
fe_modulation_t m)
{
@@ -495,12 +577,23 @@ static int au8522_enable_modulation(struct dvb_frontend *fe,
au8522_set_if(fe, state->config->qam_if);
break;
case QAM_256:
- dprintk("%s() QAM 256\n", __func__);
- for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++)
- au8522_writereg(state,
- QAM256_mod_tab[i].reg,
- QAM256_mod_tab[i].data);
- au8522_set_if(fe, state->config->qam_if);
+ if (zv_mode) {
+ dprintk("%s() QAM 256 (zv_mode)\n", __func__);
+ for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab_zv_mode); i++)
+ au8522_writereg(state,
+ QAM256_mod_tab_zv_mode[i].reg,
+ QAM256_mod_tab_zv_mode[i].data);
+ au8522_set_if(fe, state->config->qam_if);
+ msleep(100);
+ au8522_writereg(state, 0x821a, 0x00);
+ } else {
+ dprintk("%s() QAM 256\n", __func__);
+ for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++)
+ au8522_writereg(state,
+ QAM256_mod_tab[i].reg,
+ QAM256_mod_tab[i].data);
+ au8522_set_if(fe, state->config->qam_if);
+ }
break;
default:
dprintk("%s() Invalid modulation\n", __func__);
@@ -537,7 +630,12 @@ static int au8522_set_frontend(struct dvb_frontend *fe)
return ret;
/* Allow the tuner to settle */
- msleep(100);
+ if (zv_mode) {
+ dprintk("%s() increase tuner settling time for zv_mode\n",
+ __func__);
+ msleep(250);
+ } else
+ msleep(100);
au8522_enable_modulation(fe, c->modulation);
@@ -823,6 +921,11 @@ static struct dvb_frontend_ops au8522_ops = {
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable verbose debug messages");
+module_param(zv_mode, int, 0644);
+MODULE_PARM_DESC(zv_mode, "Turn on/off ZeeVee modulator compatability mode (default:on).\n"
+ "\t\ton - modified AU8522 QAM256 initialization.\n"
+ "\t\tProvides faster lock when using ZeeVee modulator based sources");
+
MODULE_DESCRIPTION("Auvitek AU8522 QAM-B/ATSC Demodulator driver");
MODULE_AUTHOR("Steven Toth");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index 3d399d9a6343..86563260d0f2 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -169,6 +169,9 @@ static int cx22700_set_tps(struct cx22700_state *state,
cx22700_writereg (state, 0x04, val);
+ if (p->code_rate_HP - FEC_1_2 >= sizeof(fec_tab) ||
+ p->code_rate_LP - FEC_1_2 >= sizeof(fec_tab))
+ return -EINVAL;
val = fec_tab[p->code_rate_HP - FEC_1_2] << 3;
val |= fec_tab[p->code_rate_LP - FEC_1_2];
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 95b981cd7115..7b510f2ae20f 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -177,47 +177,45 @@ static int cx24110_set_inversion (struct cx24110_state* state, fe_spectral_inver
return 0;
}
-static int cx24110_set_fec (struct cx24110_state* state, fe_code_rate_t fec)
+static int cx24110_set_fec(struct cx24110_state* state, fe_code_rate_t fec)
{
-/* fixme (low): error handling */
-
- static const int rate[]={-1,1,2,3,5,7,-1};
- static const int g1[]={-1,0x01,0x02,0x05,0x15,0x45,-1};
- static const int g2[]={-1,0x01,0x03,0x06,0x1a,0x7a,-1};
+ static const int rate[FEC_AUTO] = {-1, 1, 2, 3, 5, 7, -1};
+ static const int g1[FEC_AUTO] = {-1, 0x01, 0x02, 0x05, 0x15, 0x45, -1};
+ static const int g2[FEC_AUTO] = {-1, 0x01, 0x03, 0x06, 0x1a, 0x7a, -1};
/* Well, the AutoAcq engine of the cx24106 and 24110 automatically
searches all enabled viterbi rates, and can handle non-standard
rates as well. */
- if (fec>FEC_AUTO)
- fec=FEC_AUTO;
+ if (fec > FEC_AUTO)
+ fec = FEC_AUTO;
- if (fec==FEC_AUTO) { /* (re-)establish AutoAcq behaviour */
- cx24110_writereg(state,0x37,cx24110_readreg(state,0x37)&0xdf);
+ if (fec == FEC_AUTO) { /* (re-)establish AutoAcq behaviour */
+ cx24110_writereg(state, 0x37, cx24110_readreg(state, 0x37) & 0xdf);
/* clear AcqVitDis bit */
- cx24110_writereg(state,0x18,0xae);
+ cx24110_writereg(state, 0x18, 0xae);
/* allow all DVB standard code rates */
- cx24110_writereg(state,0x05,(cx24110_readreg(state,0x05)&0xf0)|0x3);
+ cx24110_writereg(state, 0x05, (cx24110_readreg(state, 0x05) & 0xf0) | 0x3);
/* set nominal Viterbi rate 3/4 */
- cx24110_writereg(state,0x22,(cx24110_readreg(state,0x22)&0xf0)|0x3);
+ cx24110_writereg(state, 0x22, (cx24110_readreg(state, 0x22) & 0xf0) | 0x3);
/* set current Viterbi rate 3/4 */
- cx24110_writereg(state,0x1a,0x05); cx24110_writereg(state,0x1b,0x06);
+ cx24110_writereg(state, 0x1a, 0x05);
+ cx24110_writereg(state, 0x1b, 0x06);
/* set the puncture registers for code rate 3/4 */
return 0;
} else {
- cx24110_writereg(state,0x37,cx24110_readreg(state,0x37)|0x20);
+ cx24110_writereg(state, 0x37, cx24110_readreg(state, 0x37) | 0x20);
/* set AcqVitDis bit */
- if(rate[fec]>0) {
- cx24110_writereg(state,0x05,(cx24110_readreg(state,0x05)&0xf0)|rate[fec]);
- /* set nominal Viterbi rate */
- cx24110_writereg(state,0x22,(cx24110_readreg(state,0x22)&0xf0)|rate[fec]);
- /* set current Viterbi rate */
- cx24110_writereg(state,0x1a,g1[fec]);
- cx24110_writereg(state,0x1b,g2[fec]);
- /* not sure if this is the right way: I always used AutoAcq mode */
- } else
- return -EOPNOTSUPP;
-/* fixme (low): which is the correct return code? */
+ if (rate[fec] < 0)
+ return -EINVAL;
+
+ cx24110_writereg(state, 0x05, (cx24110_readreg(state, 0x05) & 0xf0) | rate[fec]);
+ /* set nominal Viterbi rate */
+ cx24110_writereg(state, 0x22, (cx24110_readreg(state, 0x22) & 0xf0) | rate[fec]);
+ /* set current Viterbi rate */
+ cx24110_writereg(state, 0x1a, g1[fec]);
+ cx24110_writereg(state, 0x1b, g2[fec]);
+ /* not sure if this is the right way: I always used AutoAcq mode */
}
return 0;
}
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index a6c3c9e2e897..acb965ce0358 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -459,7 +459,7 @@ static int cx24117_firmware_ondemand(struct dvb_frontend *fe)
if (state->priv->skip_fw_load)
return 0;
- /* check if firmware if already running */
+ /* check if firmware is already running */
if (cx24117_readreg(state, 0xeb) != 0xa) {
/* Load firmware */
/* request the firmware, this will block until loaded */
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 589134e95175..c505d696f92d 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -1780,7 +1780,7 @@ static u32 interpolate_value(u32 value, struct linear_segments *segments,
}
/* FIXME: may require changes - this one was borrowed from dib8000 */
-static u32 dib7000p_get_time_us(struct dvb_frontend *demod, int layer)
+static u32 dib7000p_get_time_us(struct dvb_frontend *demod)
{
struct dtv_frontend_properties *c = &demod->dtv_property_cache;
u64 time_us, tmp64;
@@ -1881,7 +1881,6 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat)
{
struct dib7000p_state *state = demod->demodulator_priv;
struct dtv_frontend_properties *c = &demod->dtv_property_cache;
- int i;
int show_per_stats = 0;
u32 time_us = 0, val, snr;
u64 blocks, ucb;
@@ -1935,7 +1934,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat)
/* Estimate the number of packets based on bitrate */
if (!time_us)
- time_us = dib7000p_get_time_us(demod, -1);
+ time_us = dib7000p_get_time_us(demod);
if (time_us) {
blocks = 1250000ULL * 1000000ULL;
@@ -1949,7 +1948,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat)
/* Get post-BER measures */
if (time_after(jiffies, state->ber_jiffies_stats)) {
- time_us = dib7000p_get_time_us(demod, -1);
+ time_us = dib7000p_get_time_us(demod);
state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
dprintk("Next all layers stats available in %u us.", time_us);
@@ -1969,7 +1968,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat)
c->block_error.stat[0].scale = FE_SCALE_COUNTER;
c->block_error.stat[0].uvalue += val;
- time_us = dib7000p_get_time_us(demod, i);
+ time_us = dib7000p_get_time_us(demod);
if (time_us) {
blocks = 1250000ULL * 1000000ULL;
do_div(blocks, time_us * 8 * 204);
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 5ec221ffdfca..2bfa7a435974 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12255,8 +12255,7 @@ static void drx39xxj_release(struct dvb_frontend *fe)
kfree(demod->my_ext_attr);
kfree(demod->my_common_attr);
kfree(demod->my_i2c_dev_addr);
- if (demod->firmware)
- release_firmware(demod->firmware);
+ release_firmware(demod->firmware);
kfree(demod);
kfree(state);
}
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 672195147d01..d46cf5f7cd2e 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -166,9 +166,9 @@ static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
-#define dprintk(level, fmt, arg...) do { \
-if (debug >= level) \
- pr_debug(fmt, ##arg); \
+#define dprintk(level, fmt, arg...) do { \
+if (debug >= level) \
+ printk(KERN_DEBUG KBUILD_MODNAME ": %s " fmt, __func__, ##arg); \
} while (0)
@@ -6310,8 +6310,7 @@ static void drxk_release(struct dvb_frontend *fe)
struct drxk_state *state = fe->demodulator_priv;
dprintk(1, "\n");
- if (state->fw)
- release_firmware(state->fw);
+ release_firmware(state->fw);
kfree(state);
}
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 81657e94c5a4..ba4ee0b48834 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1,5 +1,5 @@
/*
- * Montage M88DS3103 demodulator driver
+ * Montage M88DS3103/M88RS6000 demodulator driver
*
* Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
*
@@ -162,7 +162,7 @@ static int m88ds3103_wr_reg_val_tab(struct m88ds3103_priv *priv,
dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
- if (tab_len > 83) {
+ if (tab_len > 86) {
ret = -EINVAL;
goto err;
}
@@ -245,9 +245,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, len;
const struct m88ds3103_reg_val *init;
- u8 u8tmp, u8tmp1, u8tmp2;
- u8 buf[2];
- u16 u16tmp, divide_ratio;
+ u8 u8tmp, u8tmp1 = 0, u8tmp2 = 0; /* silence compiler warning */
+ u8 buf[3];
+ u16 u16tmp, divide_ratio = 0;
u32 tuner_frequency, target_mclk;
s32 s32tmp;
@@ -262,6 +262,22 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
goto err;
}
+ /* reset */
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x80);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+ if (ret)
+ goto err;
+
+ /* Disable demod clock path */
+ if (priv->chip_id == M88RS6000_CHIP_ID) {
+ ret = m88ds3103_wr_reg(priv, 0x06, 0xe0);
+ if (ret)
+ goto err;
+ }
+
/* program tuner */
if (fe->ops.tuner_ops.set_params) {
ret = fe->ops.tuner_ops.set_params(fe);
@@ -282,49 +298,44 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
tuner_frequency = c->frequency;
}
- /* reset */
- ret = m88ds3103_wr_reg(priv, 0x07, 0x80);
- if (ret)
- goto err;
-
- ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
- if (ret)
- goto err;
-
- ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
- if (ret)
- goto err;
+ /* select M88RS6000 demod main mclk and ts mclk from tuner die. */
+ if (priv->chip_id == M88RS6000_CHIP_ID) {
+ if (c->symbol_rate > 45010000)
+ priv->mclk_khz = 110250;
+ else
+ priv->mclk_khz = 96000;
- ret = m88ds3103_wr_reg(priv, 0x00, 0x01);
- if (ret)
- goto err;
+ if (c->delivery_system == SYS_DVBS)
+ target_mclk = 96000;
+ else
+ target_mclk = 144000;
- switch (c->delivery_system) {
- case SYS_DVBS:
- len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals);
- init = m88ds3103_dvbs_init_reg_vals;
- target_mclk = 96000;
- break;
- case SYS_DVBS2:
- len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals);
- init = m88ds3103_dvbs2_init_reg_vals;
+ /* Enable demod clock path */
+ ret = m88ds3103_wr_reg(priv, 0x06, 0x00);
+ if (ret)
+ goto err;
+ usleep_range(10000, 20000);
+ } else {
+ /* set M88DS3103 mclk and ts mclk. */
+ priv->mclk_khz = 96000;
switch (priv->cfg->ts_mode) {
case M88DS3103_TS_SERIAL:
case M88DS3103_TS_SERIAL_D7:
- if (c->symbol_rate < 18000000)
- target_mclk = 96000;
- else
- target_mclk = 144000;
+ target_mclk = priv->cfg->ts_clk;
break;
case M88DS3103_TS_PARALLEL:
case M88DS3103_TS_CI:
- if (c->symbol_rate < 18000000)
+ if (c->delivery_system == SYS_DVBS)
target_mclk = 96000;
- else if (c->symbol_rate < 28000000)
- target_mclk = 144000;
- else
- target_mclk = 192000;
+ else {
+ if (c->symbol_rate < 18000000)
+ target_mclk = 96000;
+ else if (c->symbol_rate < 28000000)
+ target_mclk = 144000;
+ else
+ target_mclk = 192000;
+ }
break;
default:
dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n",
@@ -332,6 +343,55 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
ret = -EINVAL;
goto err;
}
+
+ switch (target_mclk) {
+ case 96000:
+ u8tmp1 = 0x02; /* 0b10 */
+ u8tmp2 = 0x01; /* 0b01 */
+ break;
+ case 144000:
+ u8tmp1 = 0x00; /* 0b00 */
+ u8tmp2 = 0x01; /* 0b01 */
+ break;
+ case 192000:
+ u8tmp1 = 0x03; /* 0b11 */
+ u8tmp2 = 0x00; /* 0b00 */
+ break;
+ }
+ ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
+ if (ret)
+ goto err;
+ ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
+ if (ret)
+ goto err;
+ }
+
+ ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ if (priv->chip_id == M88RS6000_CHIP_ID) {
+ len = ARRAY_SIZE(m88rs6000_dvbs_init_reg_vals);
+ init = m88rs6000_dvbs_init_reg_vals;
+ } else {
+ len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals);
+ init = m88ds3103_dvbs_init_reg_vals;
+ }
+ break;
+ case SYS_DVBS2:
+ if (priv->chip_id == M88RS6000_CHIP_ID) {
+ len = ARRAY_SIZE(m88rs6000_dvbs2_init_reg_vals);
+ init = m88rs6000_dvbs2_init_reg_vals;
+ } else {
+ len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals);
+ init = m88ds3103_dvbs2_init_reg_vals;
+ }
break;
default:
dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
@@ -347,7 +407,30 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
goto err;
}
- u8tmp1 = 0; /* silence compiler warning */
+ if (priv->chip_id == M88RS6000_CHIP_ID) {
+ if ((c->delivery_system == SYS_DVBS2)
+ && ((c->symbol_rate / 1000) <= 5000)) {
+ ret = m88ds3103_wr_reg(priv, 0xc0, 0x04);
+ if (ret)
+ goto err;
+ buf[0] = 0x09;
+ buf[1] = 0x22;
+ buf[2] = 0x88;
+ ret = m88ds3103_wr_regs(priv, 0x8a, buf, 3);
+ if (ret)
+ goto err;
+ }
+ ret = m88ds3103_wr_reg_mask(priv, 0x9d, 0x08, 0x08);
+ if (ret)
+ goto err;
+ ret = m88ds3103_wr_reg(priv, 0xf1, 0x01);
+ if (ret)
+ goto err;
+ ret = m88ds3103_wr_reg_mask(priv, 0x30, 0x80, 0x80);
+ if (ret)
+ goto err;
+ }
+
switch (priv->cfg->ts_mode) {
case M88DS3103_TS_SERIAL:
u8tmp1 = 0x00;
@@ -383,16 +466,15 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
ret = m88ds3103_wr_reg_mask(priv, 0x29, u8tmp1, 0x20);
if (ret)
goto err;
- }
-
- if (priv->cfg->ts_clk) {
- divide_ratio = DIV_ROUND_UP(target_mclk, priv->cfg->ts_clk);
- u8tmp1 = divide_ratio / 2;
- u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
- } else {
- divide_ratio = 0;
u8tmp1 = 0;
u8tmp2 = 0;
+ break;
+ default:
+ if (priv->cfg->ts_clk) {
+ divide_ratio = DIV_ROUND_UP(target_mclk, priv->cfg->ts_clk);
+ u8tmp1 = divide_ratio / 2;
+ u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
+ }
}
dev_dbg(&priv->i2c->dev,
@@ -420,29 +502,6 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
- switch (target_mclk) {
- case 96000:
- u8tmp1 = 0x02; /* 0b10 */
- u8tmp2 = 0x01; /* 0b01 */
- break;
- case 144000:
- u8tmp1 = 0x00; /* 0b00 */
- u8tmp2 = 0x01; /* 0b01 */
- break;
- case 192000:
- u8tmp1 = 0x03; /* 0b11 */
- u8tmp2 = 0x00; /* 0b00 */
- break;
- }
-
- ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
- if (ret)
- goto err;
-
- ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
- if (ret)
- goto err;
-
if (c->symbol_rate <= 3000000)
u8tmp = 0x20;
else if (c->symbol_rate <= 10000000)
@@ -466,7 +525,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
- u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, M88DS3103_MCLK_KHZ / 2);
+ u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, priv->mclk_khz / 2);
buf[0] = (u16tmp >> 0) & 0xff;
buf[1] = (u16tmp >> 8) & 0xff;
ret = m88ds3103_wr_regs(priv, 0x61, buf, 2);
@@ -489,7 +548,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
(tuner_frequency - c->frequency));
s32tmp = 0x10000 * (tuner_frequency - c->frequency);
- s32tmp = DIV_ROUND_CLOSEST(s32tmp, M88DS3103_MCLK_KHZ);
+ s32tmp = DIV_ROUND_CLOSEST(s32tmp, priv->mclk_khz);
if (s32tmp < 0)
s32tmp += 0x10000;
@@ -520,7 +579,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
struct m88ds3103_priv *priv = fe->demodulator_priv;
int ret, len, remaining;
const struct firmware *fw = NULL;
- u8 *fw_file = M88DS3103_FIRMWARE;
+ u8 *fw_file;
u8 u8tmp;
dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
@@ -541,15 +600,6 @@ static int m88ds3103_init(struct dvb_frontend *fe)
if (ret)
goto err;
- /* reset */
- ret = m88ds3103_wr_reg(priv, 0x07, 0x60);
- if (ret)
- goto err;
-
- ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
- if (ret)
- goto err;
-
/* firmware status */
ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
if (ret)
@@ -560,10 +610,23 @@ static int m88ds3103_init(struct dvb_frontend *fe)
if (u8tmp)
goto skip_fw_download;
+ /* global reset, global diseqc reset, golbal fec reset */
+ ret = m88ds3103_wr_reg(priv, 0x07, 0xe0);
+ if (ret)
+ goto err;
+
+ ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
+ if (ret)
+ goto err;
+
/* cold state - try to download firmware */
dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state\n",
KBUILD_MODNAME, m88ds3103_ops.info.name);
+ if (priv->chip_id == M88RS6000_CHIP_ID)
+ fw_file = M88RS6000_FIRMWARE;
+ else
+ fw_file = M88DS3103_FIRMWARE;
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
if (ret) {
@@ -577,7 +640,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
if (ret)
- goto err;
+ goto error_fw_release;
for (remaining = fw->size; remaining > 0;
remaining -= (priv->cfg->i2c_wr_max - 1)) {
@@ -591,13 +654,13 @@ static int m88ds3103_init(struct dvb_frontend *fe)
dev_err(&priv->i2c->dev,
"%s: firmware download failed=%d\n",
KBUILD_MODNAME, ret);
- goto err;
+ goto error_fw_release;
}
}
ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
if (ret)
- goto err;
+ goto error_fw_release;
release_firmware(fw);
fw = NULL;
@@ -623,10 +686,10 @@ skip_fw_download:
priv->warm = true;
return 0;
-err:
- if (fw)
- release_firmware(fw);
+error_fw_release:
+ release_firmware(fw);
+err:
dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -635,13 +698,18 @@ static int m88ds3103_sleep(struct dvb_frontend *fe)
{
struct m88ds3103_priv *priv = fe->demodulator_priv;
int ret;
+ u8 u8tmp;
dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
priv->delivery_system = SYS_UNDEFINED;
/* TS Hi-Z */
- ret = m88ds3103_wr_reg_mask(priv, 0x27, 0x00, 0x01);
+ if (priv->chip_id == M88RS6000_CHIP_ID)
+ u8tmp = 0x29;
+ else
+ u8tmp = 0x27;
+ ret = m88ds3103_wr_reg_mask(priv, u8tmp, 0x00, 0x01);
if (ret)
goto err;
@@ -830,7 +898,7 @@ static int m88ds3103_get_frontend(struct dvb_frontend *fe)
goto err;
c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) *
- M88DS3103_MCLK_KHZ * 1000 / 0x10000;
+ priv->mclk_khz * 1000 / 0x10000;
return 0;
err:
@@ -1310,18 +1378,22 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
priv->i2c = i2c;
mutex_init(&priv->i2c_mutex);
- ret = m88ds3103_rd_reg(priv, 0x01, &chip_id);
+ /* 0x00: chip id[6:0], 0x01: chip ver[7:0], 0x02: chip ver[15:8] */
+ ret = m88ds3103_rd_reg(priv, 0x00, &chip_id);
if (ret)
goto err;
- dev_dbg(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+ chip_id >>= 1;
+ dev_info(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
switch (chip_id) {
- case 0xd0:
+ case M88RS6000_CHIP_ID:
+ case M88DS3103_CHIP_ID:
break;
default:
goto err;
}
+ priv->chip_id = chip_id;
switch (priv->cfg->clock_out) {
case M88DS3103_CLOCK_OUT_DISABLED:
@@ -1337,6 +1409,11 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
goto err;
}
+ /* 0x29 register is defined differently for m88rs6000. */
+ /* set internal tuner address to 0x21 */
+ if (chip_id == M88RS6000_CHIP_ID)
+ u8tmp = 0x00;
+
ret = m88ds3103_wr_reg(priv, 0x29, u8tmp);
if (ret)
goto err;
@@ -1364,6 +1441,9 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
/* create dvb_frontend */
memcpy(&priv->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
+ if (priv->chip_id == M88RS6000_CHIP_ID)
+ strncpy(priv->fe.ops.info.name,
+ "Montage M88RS6000", sizeof(priv->fe.ops.info.name));
priv->fe.demodulator_priv = priv;
return &priv->fe;
@@ -1423,3 +1503,4 @@ MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Montage M88DS3103 DVB-S/S2 demodulator driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(M88DS3103_FIRMWARE);
+MODULE_FIRMWARE(M88RS6000_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
index 9169fdd143cf..a2c0958111f8 100644
--- a/drivers/media/dvb-frontends/m88ds3103_priv.h
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -25,7 +25,10 @@
#include <linux/math64.h>
#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw"
+#define M88RS6000_FIRMWARE "dvb-demod-m88rs6000.fw"
#define M88DS3103_MCLK_KHZ 96000
+#define M88RS6000_CHIP_ID 0x74
+#define M88DS3103_CHIP_ID 0x70
struct m88ds3103_priv {
struct i2c_adapter *i2c;
@@ -38,6 +41,10 @@ struct m88ds3103_priv {
u32 ber;
bool warm; /* FW running */
struct i2c_adapter *i2c_adapter;
+ /* auto detect chip id to do different config */
+ u8 chip_id;
+ /* main mclk is calculated for M88RS6000 dynamically */
+ u32 mclk_khz;
};
struct m88ds3103_reg_val {
@@ -214,4 +221,178 @@ static const struct m88ds3103_reg_val m88ds3103_dvbs2_init_reg_vals[] = {
{0xb8, 0x00},
};
+static const struct m88ds3103_reg_val m88rs6000_dvbs_init_reg_vals[] = {
+ {0x23, 0x07},
+ {0x08, 0x03},
+ {0x0c, 0x02},
+ {0x20, 0x00},
+ {0x21, 0x54},
+ {0x25, 0x82},
+ {0x27, 0x31},
+ {0x30, 0x08},
+ {0x31, 0x40},
+ {0x32, 0x32},
+ {0x33, 0x35},
+ {0x35, 0xff},
+ {0x3a, 0x00},
+ {0x37, 0x10},
+ {0x38, 0x10},
+ {0x39, 0x02},
+ {0x42, 0x60},
+ {0x4a, 0x80},
+ {0x4b, 0x04},
+ {0x4d, 0x91},
+ {0x5d, 0xc8},
+ {0x50, 0x36},
+ {0x51, 0x36},
+ {0x52, 0x36},
+ {0x53, 0x36},
+ {0x63, 0x0f},
+ {0x64, 0x30},
+ {0x65, 0x40},
+ {0x68, 0x26},
+ {0x69, 0x4c},
+ {0x70, 0x20},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x40},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x60},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x80},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0xa0},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x1f},
+ {0x76, 0x38},
+ {0x77, 0xa6},
+ {0x78, 0x0c},
+ {0x79, 0x80},
+ {0x7f, 0x14},
+ {0x7c, 0x00},
+ {0xae, 0x82},
+ {0x80, 0x64},
+ {0x81, 0x66},
+ {0x82, 0x44},
+ {0x85, 0x04},
+ {0xcd, 0xf4},
+ {0x90, 0x33},
+ {0xa0, 0x44},
+ {0xbe, 0x00},
+ {0xc0, 0x08},
+ {0xc3, 0x10},
+ {0xc4, 0x08},
+ {0xc5, 0xf0},
+ {0xc6, 0xff},
+ {0xc7, 0x00},
+ {0xc8, 0x1a},
+ {0xc9, 0x80},
+ {0xe0, 0xf8},
+ {0xe6, 0x8b},
+ {0xd0, 0x40},
+ {0xf8, 0x20},
+ {0xfa, 0x0f},
+ {0x00, 0x00},
+ {0xbd, 0x01},
+ {0xb8, 0x00},
+ {0x29, 0x11},
+};
+
+static const struct m88ds3103_reg_val m88rs6000_dvbs2_init_reg_vals[] = {
+ {0x23, 0x07},
+ {0x08, 0x07},
+ {0x0c, 0x02},
+ {0x20, 0x00},
+ {0x21, 0x54},
+ {0x25, 0x82},
+ {0x27, 0x31},
+ {0x30, 0x08},
+ {0x32, 0x32},
+ {0x33, 0x35},
+ {0x35, 0xff},
+ {0x3a, 0x00},
+ {0x37, 0x10},
+ {0x38, 0x10},
+ {0x39, 0x02},
+ {0x42, 0x60},
+ {0x4a, 0x80},
+ {0x4b, 0x04},
+ {0x4d, 0x91},
+ {0x5d, 0xc8},
+ {0x50, 0x36},
+ {0x51, 0x36},
+ {0x52, 0x36},
+ {0x53, 0x36},
+ {0x63, 0x0f},
+ {0x64, 0x10},
+ {0x65, 0x20},
+ {0x68, 0x46},
+ {0x69, 0xcd},
+ {0x70, 0x20},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x40},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x60},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x80},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0xa0},
+ {0x71, 0x70},
+ {0x72, 0x04},
+ {0x73, 0x00},
+ {0x70, 0x1f},
+ {0x76, 0x38},
+ {0x77, 0xa6},
+ {0x78, 0x0c},
+ {0x79, 0x80},
+ {0x7f, 0x14},
+ {0x85, 0x08},
+ {0xcd, 0xf4},
+ {0x90, 0x33},
+ {0x86, 0x00},
+ {0x87, 0x0f},
+ {0x89, 0x00},
+ {0x8b, 0x44},
+ {0x8c, 0x66},
+ {0x9d, 0xc1},
+ {0x8a, 0x10},
+ {0xad, 0x40},
+ {0xa0, 0x44},
+ {0xbe, 0x00},
+ {0xc0, 0x08},
+ {0xc1, 0x10},
+ {0xc2, 0x08},
+ {0xc3, 0x10},
+ {0xc4, 0x08},
+ {0xc5, 0xf0},
+ {0xc6, 0xff},
+ {0xc7, 0x00},
+ {0xc8, 0x1a},
+ {0xc9, 0x80},
+ {0xca, 0x23},
+ {0xcb, 0x24},
+ {0xcc, 0xf4},
+ {0xce, 0x74},
+ {0x00, 0x00},
+ {0xbd, 0x01},
+ {0xb8, 0x00},
+ {0x29, 0x01},
+};
#endif
diff --git a/drivers/media/dvb-frontends/mn88472.h b/drivers/media/dvb-frontends/mn88472.h
new file mode 100644
index 000000000000..da4558bce60f
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88472.h
@@ -0,0 +1,38 @@
+/*
+ * Panasonic MN88472 DVB-T/T2/C demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MN88472_H
+#define MN88472_H
+
+#include <linux/dvb/frontend.h>
+
+struct mn88472_config {
+ /*
+ * Max num of bytes given I2C adapter could write at once.
+ * Default: none
+ */
+ u16 i2c_wr_max;
+
+
+ /* Everything after that is returned by the driver. */
+
+ /*
+ * DVB frontend.
+ */
+ struct dvb_frontend **fe;
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/mn88473.h b/drivers/media/dvb-frontends/mn88473.h
new file mode 100644
index 000000000000..a373ec93cbe0
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88473.h
@@ -0,0 +1,38 @@
+/*
+ * Panasonic MN88473 DVB-T/T2/C demodulator driver
+ *
+ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MN88473_H
+#define MN88473_H
+
+#include <linux/dvb/frontend.h>
+
+struct mn88473_config {
+ /*
+ * Max num of bytes given I2C adapter could write at once.
+ * Default: none
+ */
+ u16 i2c_wr_max;
+
+
+ /* Everything after that is returned by the driver. */
+
+ /*
+ * DVB frontend.
+ */
+ struct dvb_frontend **fe;
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index eb737cf29a36..9026e1aee163 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -258,13 +258,11 @@ static int rtl2832_rd_regs(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val,
return rtl2832_rd(priv, reg, val, len);
}
-#if 0 /* currently not used */
/* write single register */
static int rtl2832_wr_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 val)
{
return rtl2832_wr_regs(priv, reg, page, &val, 1);
}
-#endif
/* read single register */
static int rtl2832_rd_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val)
@@ -599,6 +597,11 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
+ /* PIP mode related */
+ ret = rtl2832_wr_regs(priv, 0x92, 1, "\x00\x0f\xff", 3);
+ if (ret)
+ goto err;
+
/* If the frontend has get_if_frequency(), use it */
if (fe->ops.tuner_ops.get_if_frequency) {
u32 if_freq;
@@ -661,7 +664,6 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
-
/* soft reset */
ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1);
if (ret)
@@ -1020,6 +1022,58 @@ static int rtl2832_deselect(struct i2c_adapter *adap, void *mux_priv,
return 0;
}
+int rtl2832_enable_external_ts_if(struct dvb_frontend *fe)
+{
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+ int ret;
+
+ dev_dbg(&priv->i2c->dev, "%s: setting PIP mode\n", __func__);
+
+ ret = rtl2832_wr_regs(priv, 0x0c, 1, "\x5f\xff", 2);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_demod_reg(priv, DVBT_PIP_ON, 0x1);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_reg(priv, 0xbc, 0, 0x18);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_reg(priv, 0x22, 0, 0x01);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_reg(priv, 0x26, 0, 0x1f);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_reg(priv, 0x27, 0, 0xff);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_regs(priv, 0x92, 1, "\x7f\xf7\xff", 3);
+ if (ret)
+ goto err;
+
+ /* soft reset */
+ ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x0);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+
+}
+EXPORT_SYMBOL(rtl2832_enable_external_ts_if);
+
struct i2c_adapter *rtl2832_get_i2c_adapter(struct dvb_frontend *fe)
{
struct rtl2832_priv *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index cb3b6b0775b8..5254c1dfc8de 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -64,6 +64,10 @@ extern struct i2c_adapter *rtl2832_get_private_i2c_adapter(
struct dvb_frontend *fe
);
+extern int rtl2832_enable_external_ts_if(
+ struct dvb_frontend *fe
+);
+
#else
static inline struct dvb_frontend *rtl2832_attach(
@@ -89,6 +93,13 @@ static inline struct i2c_adapter *rtl2832_get_private_i2c_adapter(
return NULL;
}
+static inline int rtl2832_enable_external_ts_if(
+ struct dvb_frontend *fe
+)
+{
+ return -ENODEV;
+}
+
#endif
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 7bf98cf6bbe1..2896b47c29d8 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1013,6 +1013,10 @@ static int rtl2832_sdr_start_streaming(struct vb2_queue *vq, unsigned int count)
if (s->d->props->power_ctrl)
s->d->props->power_ctrl(s->d, 1);
+ /* enable ADC */
+ if (s->d->props->frontend_ctrl)
+ s->d->props->frontend_ctrl(s->fe, 1);
+
set_bit(POWER_ON, &s->flags);
ret = rtl2832_sdr_set_tuner(s);
@@ -1064,6 +1068,10 @@ static void rtl2832_sdr_stop_streaming(struct vb2_queue *vq)
clear_bit(POWER_ON, &s->flags);
+ /* disable ADC */
+ if (s->d->props->frontend_ctrl)
+ s->d->props->frontend_ctrl(s->fe, 0);
+
if (s->d->props->power_ctrl)
s->d->props->power_ctrl(s->d, 0);
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 1cd93be281ed..ce9ab442b4b6 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -308,14 +308,16 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x09\x10\xe3\x18", 6);
+ memcpy(cmd.args, "\x14\x00\x09\x10\xe3\x08", 6);
+ cmd.args[5] |= s->ts_clock_inv ? 0x00 : 0x10;
cmd.wlen = 6;
cmd.rlen = 4;
ret = si2168_cmd_execute(s, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x08\x10\xd7\x15", 6);
+ memcpy(cmd.args, "\x14\x00\x08\x10\xd7\x05", 6);
+ cmd.args[5] |= s->ts_clock_inv ? 0x00 : 0x10;
cmd.wlen = 6;
cmd.rlen = 4;
ret = si2168_cmd_execute(s, &cmd);
@@ -453,27 +455,45 @@ static int si2168_init(struct dvb_frontend *fe)
dev_err(&s->client->dev,
"firmware file '%s' not found\n",
fw_file);
- goto err;
+ goto error_fw_release;
}
}
dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
fw_file);
- for (remaining = fw->size; remaining > 0; remaining -= i2c_wr_max) {
- len = remaining;
- if (len > i2c_wr_max)
- len = i2c_wr_max;
-
- memcpy(cmd.args, &fw->data[fw->size - remaining], len);
- cmd.wlen = len;
- cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
- if (ret) {
- dev_err(&s->client->dev,
- "firmware download failed=%d\n",
- ret);
- goto err;
+ if ((fw->size % 17 == 0) && (fw->data[0] > 5)) {
+ /* firmware is in the new format */
+ for (remaining = fw->size; remaining > 0; remaining -= 17) {
+ len = fw->data[fw->size - remaining];
+ memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
+ cmd.wlen = len;
+ cmd.rlen = 1;
+ ret = si2168_cmd_execute(s, &cmd);
+ if (ret) {
+ dev_err(&s->client->dev,
+ "firmware download failed=%d\n",
+ ret);
+ goto error_fw_release;
+ }
+ }
+ } else {
+ /* firmware is in the old format */
+ for (remaining = fw->size; remaining > 0; remaining -= i2c_wr_max) {
+ len = remaining;
+ if (len > i2c_wr_max)
+ len = i2c_wr_max;
+
+ memcpy(cmd.args, &fw->data[fw->size - remaining], len);
+ cmd.wlen = len;
+ cmd.rlen = 1;
+ ret = si2168_cmd_execute(s, &cmd);
+ if (ret) {
+ dev_err(&s->client->dev,
+ "firmware download failed=%d\n",
+ ret);
+ goto error_fw_release;
+ }
}
}
@@ -487,6 +507,17 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
+ /* query firmware version */
+ memcpy(cmd.args, "\x11", 1);
+ cmd.wlen = 1;
+ cmd.rlen = 10;
+ ret = si2168_cmd_execute(s, &cmd);
+ if (ret)
+ goto err;
+
+ dev_dbg(&s->client->dev, "firmware version: %c.%c.%d\n",
+ cmd.args[6], cmd.args[7], cmd.args[8]);
+
/* set ts mode */
memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
cmd.args[4] |= s->ts_mode;
@@ -498,17 +529,16 @@ static int si2168_init(struct dvb_frontend *fe)
s->fw_loaded = true;
-warm:
dev_info(&s->client->dev, "found a '%s' in warm state\n",
si2168_ops.info.name);
-
+warm:
s->active = true;
return 0;
-err:
- if (fw)
- release_firmware(fw);
+error_fw_release:
+ release_firmware(fw);
+err:
dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -670,6 +700,7 @@ static int si2168_probe(struct i2c_client *client,
*config->i2c_adapter = s->adapter;
*config->fe = &s->fe;
s->ts_mode = config->ts_mode;
+ s->ts_clock_inv = config->ts_clock_inv;
s->fw_loaded = false;
i2c_set_clientdata(client, s);
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index e086d6719451..87bc12146667 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -37,6 +37,10 @@ struct si2168_config {
/* TS mode */
u8 ts_mode;
+
+ /* TS clock inverted */
+ bool ts_clock_inv;
+
};
#define SI2168_TS_PARALLEL 0x06
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index e13983ed4be1..60bc3349b6c3 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -38,6 +38,7 @@ struct si2168 {
bool active;
bool fw_loaded;
u8 ts_mode;
+ bool ts_clock_inv;
};
/* firmare command struct */
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 15bf4318cb74..cc1ef966f99f 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -92,6 +92,9 @@ static int sp2_write_i2c(struct sp2 *s, u8 reg, u8 *buf, int len)
return -EIO;
}
+ dev_dbg(&s->client->dev, "addr=0x%04x, reg = 0x%02x, data = %*ph\n",
+ client->addr, reg, len, buf);
+
return 0;
}
@@ -103,9 +106,6 @@ static int sp2_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 acs,
int mem, ret;
int (*ci_op_cam)(void*, u8, int, u8, int*) = s->ci_control;
- dev_dbg(&s->client->dev, "slot=%d, acs=0x%02x, addr=0x%04x, data = 0x%02x",
- slot, acs, addr, data);
-
if (slot != 0)
return -EINVAL;
@@ -140,13 +140,16 @@ static int sp2_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 acs,
if (ret)
return ret;
- if (read) {
- dev_dbg(&s->client->dev, "cam read, addr=0x%04x, data = 0x%04x",
- addr, mem);
+ dev_dbg(&s->client->dev, "%s: slot=%d, addr=0x%04x, %s, data=%x",
+ (read) ? "read" : "write", slot, addr,
+ (acs == SP2_CI_ATTR_ACS) ? "attr" : "io",
+ (read) ? mem : data);
+
+ if (read)
return mem;
- } else {
+ else
return 0;
- }
+
}
int sp2_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
@@ -407,7 +410,7 @@ err:
static int sp2_remove(struct i2c_client *client)
{
- struct si2157 *s = i2c_get_clientdata(client);
+ struct sp2 *s = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 07cd5ea7a038..19646fbb061d 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -705,7 +705,7 @@ static int stb0899_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_ma
struct stb0899_state *state = fe->demodulator_priv;
u8 reg, i;
- if (cmd->msg_len > 8)
+ if (cmd->msg_len > sizeof(cmd->msg))
return -EINVAL;
/* enable FIFO precharge */
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 23e872f84742..0b2a934f53e5 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -2146,7 +2146,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
u32 reg;
s32 car_step, steps, cur_step, dir, freq, timeout_lock;
- int lock = 0;
+ int lock;
if (state->srate >= 10000000)
timeout_lock = timeout_dmd / 3;
@@ -2154,98 +2154,96 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
timeout_lock = timeout_dmd / 2;
lock = stv090x_get_dmdlock(state, timeout_lock); /* cold start wait */
- if (!lock) {
- if (state->srate >= 10000000) {
- if (stv090x_chk_tmg(state)) {
- if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
- goto err;
- if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
- goto err;
- lock = stv090x_get_dmdlock(state, timeout_dmd);
- } else {
- lock = 0;
- }
- } else {
- if (state->srate <= 4000000)
- car_step = 1000;
- else if (state->srate <= 7000000)
- car_step = 2000;
- else if (state->srate <= 10000000)
- car_step = 3000;
- else
- car_step = 5000;
-
- steps = (state->search_range / 1000) / car_step;
- steps /= 2;
- steps = 2 * (steps + 1);
- if (steps < 0)
- steps = 2;
- else if (steps > 12)
- steps = 12;
-
- cur_step = 1;
- dir = 1;
-
- if (!lock) {
- freq = state->frequency;
- state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + state->srate;
- while ((cur_step <= steps) && (!lock)) {
- if (dir > 0)
- freq += cur_step * car_step;
- else
- freq -= cur_step * car_step;
-
- /* Setup tuner */
- if (stv090x_i2c_gate_ctrl(state, 1) < 0)
- goto err;
+ if (lock)
+ return lock;
- if (state->config->tuner_set_frequency) {
- if (state->config->tuner_set_frequency(fe, freq) < 0)
- goto err_gateoff;
- }
+ if (state->srate >= 10000000) {
+ if (stv090x_chk_tmg(state)) {
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
+ goto err;
+ return stv090x_get_dmdlock(state, timeout_dmd);
+ }
+ return 0;
+ }
- if (state->config->tuner_set_bandwidth) {
- if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
- goto err_gateoff;
- }
+ if (state->srate <= 4000000)
+ car_step = 1000;
+ else if (state->srate <= 7000000)
+ car_step = 2000;
+ else if (state->srate <= 10000000)
+ car_step = 3000;
+ else
+ car_step = 5000;
- if (stv090x_i2c_gate_ctrl(state, 0) < 0)
- goto err;
+ steps = (state->search_range / 1000) / car_step;
+ steps /= 2;
+ steps = 2 * (steps + 1);
+ if (steps < 0)
+ steps = 2;
+ else if (steps > 12)
+ steps = 12;
- msleep(50);
+ cur_step = 1;
+ dir = 1;
- if (stv090x_i2c_gate_ctrl(state, 1) < 0)
- goto err;
+ freq = state->frequency;
+ state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + state->srate;
+ while ((cur_step <= steps) && (!lock)) {
+ if (dir > 0)
+ freq += cur_step * car_step;
+ else
+ freq -= cur_step * car_step;
- if (state->config->tuner_get_status) {
- if (state->config->tuner_get_status(fe, &reg) < 0)
- goto err_gateoff;
- }
+ /* Setup tuner */
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
+ goto err;
- if (reg)
- dprintk(FE_DEBUG, 1, "Tuner phase locked");
- else
- dprintk(FE_DEBUG, 1, "Tuner unlocked");
+ if (state->config->tuner_set_frequency) {
+ if (state->config->tuner_set_frequency(fe, freq) < 0)
+ goto err_gateoff;
+ }
- if (stv090x_i2c_gate_ctrl(state, 0) < 0)
- goto err;
+ if (state->config->tuner_set_bandwidth) {
+ if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
+ goto err_gateoff;
+ }
- STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
- if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0)
- goto err;
- if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
- goto err;
- if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
- goto err;
- if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
- goto err;
- lock = stv090x_get_dmdlock(state, (timeout_dmd / 3));
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
+ goto err;
- dir *= -1;
- cur_step++;
- }
- }
+ msleep(50);
+
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_get_status) {
+ if (state->config->tuner_get_status(fe, &reg) < 0)
+ goto err_gateoff;
}
+
+ if (reg)
+ dprintk(FE_DEBUG, 1, "Tuner phase locked");
+ else
+ dprintk(FE_DEBUG, 1, "Tuner unlocked");
+
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
+ goto err;
+
+ STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
+ if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
+ goto err;
+ if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
+ goto err;
+ lock = stv090x_get_dmdlock(state, (timeout_dmd / 3));
+
+ dir *= -1;
+ cur_step++;
}
return lock;
@@ -2663,13 +2661,9 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
return STV090x_RANGEOK;
else if (abs(offst_freq) <= (stv090x_car_width(state->srate, state->rolloff) / 2000))
return STV090x_RANGEOK;
- else
- return STV090x_OUTOFRANGE; /* Out of Range */
} else {
if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
return STV090x_RANGEOK;
- else
- return STV090x_OUTOFRANGE;
}
return STV090x_OUTOFRANGE;
@@ -2789,6 +2783,12 @@ static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_mod
aclc = car_loop[i].crl_pilots_off_30;
}
} else { /* 16APSK and 32APSK */
+ /*
+ * This should never happen in practice, except if
+ * something is really wrong at the car_loop table.
+ */
+ if (i >= 11)
+ i = 10;
if (state->srate <= 3000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_2;
else if (state->srate <= 7000000)
@@ -3470,7 +3470,20 @@ static enum dvbfe_search stv090x_search(struct dvb_frontend *fe)
if (props->frequency == 0)
return DVBFE_ALGO_SEARCH_INVALID;
- state->delsys = props->delivery_system;
+ switch (props->delivery_system) {
+ case SYS_DSS:
+ state->delsys = STV090x_DSS;
+ break;
+ case SYS_DVBS:
+ state->delsys = STV090x_DVBS1;
+ break;
+ case SYS_DVBS2:
+ state->delsys = STV090x_DVBS2;
+ break;
+ default:
+ return DVBFE_ALGO_SEARCH_INVALID;
+ }
+
state->frequency = props->frequency;
state->srate = props->symbol_rate;
state->search_mode = STV090x_SEARCH_AUTO;
@@ -4859,8 +4872,8 @@ err:
return -1;
}
-int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
- u8 xor_value)
+static int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir,
+ u8 value, u8 xor_value)
{
struct stv090x_state *state = fe->demodulator_priv;
u8 reg = 0;
@@ -4871,7 +4884,6 @@ int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg);
}
-EXPORT_SYMBOL(stv090x_set_gpio);
static struct dvb_frontend_ops stv090x_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
@@ -4908,7 +4920,7 @@ static struct dvb_frontend_ops stv090x_ops = {
};
-struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
+struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
struct i2c_adapter *i2c,
enum stv090x_demodulator demod)
{
@@ -4969,6 +4981,8 @@ struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
if (config->diseqc_envelope_mode)
stv090x_send_diseqc_burst(&state->frontend, SEC_MINI_A);
+ config->set_gpio = stv090x_set_gpio;
+
dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x",
state->device == STV0900 ? "STV0900" : "STV0903",
demod,
diff --git a/drivers/media/dvb-frontends/stv090x.h b/drivers/media/dvb-frontends/stv090x.h
index 0bd6adcfee8a..742eeda99000 100644
--- a/drivers/media/dvb-frontends/stv090x.h
+++ b/drivers/media/dvb-frontends/stv090x.h
@@ -89,29 +89,29 @@ struct stv090x_config {
bool diseqc_envelope_mode;
- int (*tuner_init) (struct dvb_frontend *fe);
- int (*tuner_sleep) (struct dvb_frontend *fe);
- int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
- int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
- int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
- int (*tuner_set_bandwidth) (struct dvb_frontend *fe, u32 bandwidth);
- int (*tuner_get_bandwidth) (struct dvb_frontend *fe, u32 *bandwidth);
- int (*tuner_set_bbgain) (struct dvb_frontend *fe, u32 gain);
- int (*tuner_get_bbgain) (struct dvb_frontend *fe, u32 *gain);
- int (*tuner_set_refclk) (struct dvb_frontend *fe, u32 refclk);
- int (*tuner_get_status) (struct dvb_frontend *fe, u32 *status);
- void (*tuner_i2c_lock) (struct dvb_frontend *fe, int lock);
+ int (*tuner_init)(struct dvb_frontend *fe);
+ int (*tuner_sleep)(struct dvb_frontend *fe);
+ int (*tuner_set_mode)(struct dvb_frontend *fe, enum tuner_mode mode);
+ int (*tuner_set_frequency)(struct dvb_frontend *fe, u32 frequency);
+ int (*tuner_get_frequency)(struct dvb_frontend *fe, u32 *frequency);
+ int (*tuner_set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth);
+ int (*tuner_get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth);
+ int (*tuner_set_bbgain)(struct dvb_frontend *fe, u32 gain);
+ int (*tuner_get_bbgain)(struct dvb_frontend *fe, u32 *gain);
+ int (*tuner_set_refclk)(struct dvb_frontend *fe, u32 refclk);
+ int (*tuner_get_status)(struct dvb_frontend *fe, u32 *status);
+ void (*tuner_i2c_lock)(struct dvb_frontend *fe, int lock);
+
+ /* dir = 0 -> output, dir = 1 -> input/open-drain */
+ int (*set_gpio)(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
+ u8 xor_value);
};
#if IS_ENABLED(CONFIG_DVB_STV090x)
-extern struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
- struct i2c_adapter *i2c,
- enum stv090x_demodulator demod);
-
-/* dir = 0 -> output, dir = 1 -> input/open-drain */
-extern int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio,
- u8 dir, u8 value, u8 xor_value);
+struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
+ struct i2c_adapter *i2c,
+ enum stv090x_demodulator demod);
#else
@@ -123,12 +123,6 @@ static inline struct dvb_frontend *stv090x_attach(const struct stv090x_config *c
return NULL;
}
-static inline int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio,
- u8 opd, u8 value, u8 xor_value)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
-}
#endif /* CONFIG_DVB_STV090x */
#endif /* __STV090x_H */
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index e5ebdbfe8c19..e63f582378bf 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -253,6 +253,5 @@ int fdtv_ca_register(struct firedtv *fdtv)
void fdtv_ca_release(struct firedtv *fdtv)
{
- if (fdtv->cadev)
- dvb_unregister_device(fdtv->cadev);
+ dvb_unregister_device(fdtv->cadev);
}
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index c2ba085e0d20..346a85be6de2 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -96,7 +96,7 @@ struct firedtv {
enum model_type type;
char subunit;
- char isochannel;
+ s8 isochannel;
struct fdtv_ir_context *ir_context;
fe_sec_voltage_t voltage;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index f40b4cf6107a..205d71364343 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -284,15 +284,6 @@ config VIDEO_SAA711X
To compile this driver as a module, choose M here: the
module will be called saa7115.
-config VIDEO_SAA7191
- tristate "Philips SAA7191 video decoder"
- depends on VIDEO_V4L2 && I2C
- ---help---
- Support for the Philips SAA7191 video decoder.
-
- To compile this driver as a module, choose M here: the
- module will be called saa7191.
-
config VIDEO_TVP514X
tristate "Texas Instruments TVP514x video decoder"
depends on VIDEO_V4L2 && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 01ae9328e582..9858900168bf 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_VIDEO_SAA711X) += saa7115.o
obj-$(CONFIG_VIDEO_SAA717X) += saa717x.o
obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
-obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index 04bb29720aaf..40a1a95c7ce9 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -63,9 +63,9 @@ static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd)
static char *inputs[] = { "pass_through", "play_back" };
-static enum v4l2_mbus_pixelcode adv7170_codes[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_UYVY8_1X16,
+static u32 adv7170_codes[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
};
/* ----------------------------------------------------------------------- */
@@ -263,7 +263,7 @@ static int adv7170_s_routing(struct v4l2_subdev *sd,
}
static int adv7170_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(adv7170_codes))
return -EINVAL;
@@ -278,9 +278,9 @@ static int adv7170_g_fmt(struct v4l2_subdev *sd,
u8 val = adv7170_read(sd, 0x7);
if ((val & 0x40) == (1 << 6))
- mf->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ mf->code = MEDIA_BUS_FMT_UYVY8_1X16;
else
- mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
mf->width = 0;
@@ -297,11 +297,11 @@ static int adv7170_s_fmt(struct v4l2_subdev *sd,
int ret;
switch (mf->code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
val &= ~0x40;
break;
- case V4L2_MBUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
val |= 0x40;
break;
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index b88f3b3d5ed9..d220af579a64 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -60,9 +60,9 @@ static inline struct adv7175 *to_adv7175(struct v4l2_subdev *sd)
static char *inputs[] = { "pass_through", "play_back", "color_bar" };
-static enum v4l2_mbus_pixelcode adv7175_codes[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_UYVY8_1X16,
+static u32 adv7175_codes[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
};
/* ----------------------------------------------------------------------- */
@@ -301,7 +301,7 @@ static int adv7175_s_routing(struct v4l2_subdev *sd,
}
static int adv7175_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(adv7175_codes))
return -EINVAL;
@@ -316,9 +316,9 @@ static int adv7175_g_fmt(struct v4l2_subdev *sd,
u8 val = adv7175_read(sd, 0x7);
if ((val & 0x40) == (1 << 6))
- mf->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ mf->code = MEDIA_BUS_FMT_UYVY8_1X16;
else
- mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
mf->width = 0;
@@ -335,11 +335,11 @@ static int adv7175_s_fmt(struct v4l2_subdev *sd,
int ret;
switch (mf->code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
val &= ~0x40;
break;
- case V4L2_MBUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
val |= 0x40;
break;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 821178dcb08e..bffe6eb528a3 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -422,12 +422,12 @@ static void adv7180_exit_controls(struct adv7180_state *state)
}
static int adv7180_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index > 0)
return -EINVAL;
- *code = V4L2_MBUS_FMT_YUYV8_2X8;
+ *code = MEDIA_BUS_FMT_YUYV8_2X8;
return 0;
}
@@ -437,7 +437,7 @@ static int adv7180_mbus_fmt(struct v4l2_subdev *sd,
{
struct adv7180_state *state = to_state(sd);
- fmt->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
fmt->field = V4L2_FIELD_INTERLACED;
fmt->width = 720;
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index df461b07b2f7..28940cc3a766 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -421,12 +421,12 @@ static int adv7183_g_input_status(struct v4l2_subdev *sd, u32 *status)
}
static int adv7183_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index > 0)
return -EINVAL;
- *code = V4L2_MBUS_FMT_UYVY8_2X8;
+ *code = MEDIA_BUS_FMT_UYVY8_2X8;
return 0;
}
@@ -435,7 +435,7 @@ static int adv7183_try_mbus_fmt(struct v4l2_subdev *sd,
{
struct adv7183 *decoder = to_adv7183(sd);
- fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
if (decoder->std & V4L2_STD_525_60) {
fmt->field = V4L2_FIELD_SEQ_TB;
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index f98acf4aafd4..81736aaf0f31 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -26,6 +26,7 @@
#include <linux/videodev2.h>
#include <linux/gpio.h>
#include <linux/workqueue.h>
+#include <linux/hdmi.h>
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
@@ -96,6 +97,10 @@ struct adv7511_state {
bool have_monitor;
/* timings from s_dv_timings */
struct v4l2_dv_timings dv_timings;
+ u32 fmt_code;
+ u32 colorspace;
+ u32 ycbcr_enc;
+ u32 quantization;
/* controls */
struct v4l2_ctrl *hdmi_mode_ctrl;
struct v4l2_ctrl *hotplug_ctrl;
@@ -779,26 +784,234 @@ static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
struct adv7511_state *state = get_adv7511_state(sd);
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
if (edid->pad != 0)
return -EINVAL;
- if ((edid->blocks == 0) || (edid->blocks > 256))
- return -EINVAL;
- if (!state->edid.segments) {
- v4l2_dbg(1, debug, sd, "EDID segment 0 not found\n");
- return -ENODATA;
+
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = state->edid.segments * 2;
+ return 0;
}
+
+ if (state->edid.segments == 0)
+ return -ENODATA;
+
if (edid->start_block >= state->edid.segments * 2)
- return -E2BIG;
- if ((edid->blocks + edid->start_block) >= state->edid.segments * 2)
+ return -EINVAL;
+
+ if (edid->start_block + edid->blocks > state->edid.segments * 2)
edid->blocks = state->edid.segments * 2 - edid->start_block;
memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
128 * edid->blocks);
+
+ return 0;
+}
+
+static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad != 0)
+ return -EINVAL;
+
+ switch (code->index) {
+ case 0:
+ code->code = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ case 1:
+ code->code = MEDIA_BUS_FMT_YUYV8_1X16;
+ break;
+ case 2:
+ code->code = MEDIA_BUS_FMT_UYVY8_1X16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void adv7511_fill_format(struct adv7511_state *state,
+ struct v4l2_mbus_framefmt *format)
+{
+ memset(format, 0, sizeof(*format));
+
+ format->width = state->dv_timings.bt.width;
+ format->height = state->dv_timings.bt.height;
+ format->field = V4L2_FIELD_NONE;
+}
+
+static int adv7511_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ adv7511_fill_format(state, &format->format);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_get_try_format(fh, format->pad);
+ format->format.code = fmt->code;
+ format->format.colorspace = fmt->colorspace;
+ format->format.ycbcr_enc = fmt->ycbcr_enc;
+ format->format.quantization = fmt->quantization;
+ } else {
+ format->format.code = state->fmt_code;
+ format->format.colorspace = state->colorspace;
+ format->format.ycbcr_enc = state->ycbcr_enc;
+ format->format.quantization = state->quantization;
+ }
+
+ return 0;
+}
+
+static int adv7511_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ /*
+ * Bitfield namings come the CEA-861-F standard, table 8 "Auxiliary
+ * Video Information (AVI) InfoFrame Format"
+ *
+ * c = Colorimetry
+ * ec = Extended Colorimetry
+ * y = RGB or YCbCr
+ * q = RGB Quantization Range
+ * yq = YCC Quantization Range
+ */
+ u8 c = HDMI_COLORIMETRY_NONE;
+ u8 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ u8 y = HDMI_COLORSPACE_RGB;
+ u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+
+ if (format->pad != 0)
+ return -EINVAL;
+ switch (format->format.code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adv7511_fill_format(state, &format->format);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_get_try_format(fh, format->pad);
+ fmt->code = format->format.code;
+ fmt->colorspace = format->format.colorspace;
+ fmt->ycbcr_enc = format->format.ycbcr_enc;
+ fmt->quantization = format->format.quantization;
+ return 0;
+ }
+
+ switch (format->format.code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
+ adv7511_wr_and_or(sd, 0x16, 0x03, 0xb8);
+ y = HDMI_COLORSPACE_YUV422;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
+ adv7511_wr_and_or(sd, 0x16, 0x03, 0xbc);
+ y = HDMI_COLORSPACE_YUV422;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ default:
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x00);
+ adv7511_wr_and_or(sd, 0x16, 0x03, 0x00);
+ break;
+ }
+ state->fmt_code = format->format.code;
+ state->colorspace = format->format.colorspace;
+ state->ycbcr_enc = format->format.ycbcr_enc;
+ state->quantization = format->format.quantization;
+
+ switch (format->format.colorspace) {
+ case V4L2_COLORSPACE_ADOBERGB:
+ c = HDMI_COLORIMETRY_EXTENDED;
+ ec = y ? HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601 :
+ HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB;
+ break;
+ case V4L2_COLORSPACE_SMPTE170M:
+ c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
+ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV601) {
+ c = HDMI_COLORIMETRY_EXTENDED;
+ ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ }
+ break;
+ case V4L2_COLORSPACE_REC709:
+ c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE;
+ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV709) {
+ c = HDMI_COLORIMETRY_EXTENDED;
+ ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
+ }
+ break;
+ case V4L2_COLORSPACE_SRGB:
+ c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE;
+ ec = y ? HDMI_EXTENDED_COLORIMETRY_S_YCC_601 :
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ break;
+ case V4L2_COLORSPACE_BT2020:
+ c = HDMI_COLORIMETRY_EXTENDED;
+ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
+ ec = 5; /* Not yet available in hdmi.h */
+ else
+ ec = 6; /* Not yet available in hdmi.h */
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * CEA-861-F says that for RGB formats the YCC range must match the
+ * RGB range, although sources should ignore the YCC range.
+ *
+ * The RGB quantization range shouldn't be non-zero if the EDID doesn't
+ * have the Q bit set in the Video Capabilities Data Block, however this
+ * isn't checked at the moment. The assumption is that the application
+ * knows the EDID and can detect this.
+ *
+ * The same is true for the YCC quantization range: non-standard YCC
+ * quantization ranges should only be sent if the EDID has the YQ bit
+ * set in the Video Capabilities Data Block.
+ */
+ switch (format->format.quantization) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
+ HDMI_QUANTIZATION_RANGE_FULL;
+ yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
+ break;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ break;
+ }
+
+ adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
+ adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
+ adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
+ adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2));
+ adv7511_wr_and_or(sd, 0x59, 0x0f, yq << 4);
+ adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
+
return 0;
}
static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
.get_edid = adv7511_get_edid,
+ .enum_mbus_code = adv7511_enum_mbus_code,
+ .get_fmt = adv7511_get_fmt,
+ .set_fmt = adv7511_set_fmt,
.enum_dv_timings = adv7511_enum_dv_timings,
.dv_timings_cap = adv7511_dv_timings_cap,
};
@@ -1116,6 +1329,8 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
return -ENODEV;
}
memcpy(&state->pdata, pdata, sizeof(state->pdata));
+ state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
+ state->colorspace = V4L2_COLORSPACE_SRGB;
sd = &state->sd;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 47795ff71688..e43dd2e2a38a 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -88,7 +88,7 @@ struct adv7604_reg_seq {
};
struct adv7604_format_info {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
u8 op_ch_sel;
bool rgb_out;
bool swap_cb_cr;
@@ -749,77 +749,77 @@ static void adv7604_write_reg_seq(struct v4l2_subdev *sd,
*/
static const struct adv7604_format_info adv7604_formats[] = {
- { V4L2_MBUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false,
+ { MEDIA_BUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false,
ADV7604_OP_MODE_SEL_SDR_444 | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YUYV10_2X10, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV10_2X10, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT },
- { V4L2_MBUS_FMT_YVYU10_2X10, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU10_2X10, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT },
- { V4L2_MBUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false,
+ { MEDIA_BUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true,
+ { MEDIA_BUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_UYVY10_1X20, ADV7604_OP_CH_SEL_RBG, false, false,
+ { MEDIA_BUS_FMT_UYVY10_1X20, ADV7604_OP_CH_SEL_RBG, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
- { V4L2_MBUS_FMT_VYUY10_1X20, ADV7604_OP_CH_SEL_RBG, false, true,
+ { MEDIA_BUS_FMT_VYUY10_1X20, ADV7604_OP_CH_SEL_RBG, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
- { V4L2_MBUS_FMT_YUYV10_1X20, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV10_1X20, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
- { V4L2_MBUS_FMT_YVYU10_1X20, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU10_1X20, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
- { V4L2_MBUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false,
+ { MEDIA_BUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true,
+ { MEDIA_BUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
};
static const struct adv7604_format_info adv7611_formats[] = {
- { V4L2_MBUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false,
+ { MEDIA_BUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false,
ADV7604_OP_MODE_SEL_SDR_444 | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false,
+ { MEDIA_BUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true,
+ { MEDIA_BUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
- { V4L2_MBUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false,
+ { MEDIA_BUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true,
+ { MEDIA_BUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false,
+ { MEDIA_BUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
- { V4L2_MBUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true,
+ { MEDIA_BUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true,
ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
};
static const struct adv7604_format_info *
-adv7604_format_info(struct adv7604_state *state, enum v4l2_mbus_pixelcode code)
+adv7604_format_info(struct adv7604_state *state, u32 code)
{
unsigned int i;
@@ -1917,7 +1917,7 @@ static int adv7604_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
info = adv7604_format_info(state, format->format.code);
if (info == NULL)
- info = adv7604_format_info(state, V4L2_MBUS_FMT_YUYV8_2X8);
+ info = adv7604_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
adv7604_fill_format(state, &format->format);
format->format.code = info->code;
@@ -1997,19 +1997,7 @@ static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
struct adv7604_state *state = to_state(sd);
u8 *data = NULL;
- if (edid->pad > ADV7604_PAD_HDMI_PORT_D)
- return -EINVAL;
- if (edid->blocks == 0)
- return -EINVAL;
- if (edid->blocks > 2)
- return -EINVAL;
- if (edid->start_block > 1)
- return -EINVAL;
- if (edid->start_block == 1)
- edid->blocks = 1;
-
- if (edid->blocks > state->edid.blocks)
- edid->blocks = state->edid.blocks;
+ memset(edid->reserved, 0, sizeof(edid->reserved));
switch (edid->pad) {
case ADV7604_PAD_HDMI_PORT_A:
@@ -2021,14 +2009,24 @@ static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
break;
default:
return -EINVAL;
- break;
}
- if (!data)
+
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = data ? state->edid.blocks : 0;
+ return 0;
+ }
+
+ if (data == NULL)
return -ENODATA;
- memcpy(edid->edid,
- data + edid->start_block * 128,
- edid->blocks * 128);
+ if (edid->start_block >= state->edid.blocks)
+ return -EINVAL;
+
+ if (edid->start_block + edid->blocks > state->edid.blocks)
+ edid->blocks = state->edid.blocks - edid->start_block;
+
+ memcpy(edid->edid, data + edid->start_block * 128, edid->blocks * 128);
+
return 0;
}
@@ -2068,6 +2066,8 @@ static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
int err;
int i;
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
if (edid->pad > ADV7604_PAD_HDMI_PORT_D)
return -EINVAL;
if (edid->start_block != 0)
@@ -2164,7 +2164,6 @@ static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
return -EIO;
}
-
/* enable hotplug after 100 ms */
queue_delayed_work(state->work_queues,
&state->delayed_work_enable_hotplug, HZ / 10);
@@ -2807,7 +2806,7 @@ static int adv7604_probe(struct i2c_client *client,
}
state->timings = cea640x480;
- state->format = adv7604_format_info(state, V4L2_MBUS_FMT_YUYV8_2X8);
+ state->format = adv7604_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7604_ops);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 48b628bc6714..75d26dfd0939 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1877,12 +1877,12 @@ static int adv7842_s_routing(struct v4l2_subdev *sd,
}
static int adv7842_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index)
return -EINVAL;
/* Good enough for now */
- *code = V4L2_MBUS_FMT_FIXED;
+ *code = MEDIA_BUS_FMT_FIXED;
return 0;
}
@@ -1893,7 +1893,7 @@ static int adv7842_g_mbus_fmt(struct v4l2_subdev *sd,
fmt->width = state->timings.bt.width;
fmt->height = state->timings.bt.height;
- fmt->code = V4L2_MBUS_FMT_FIXED;
+ fmt->code = MEDIA_BUS_FMT_FIXED;
fmt->field = V4L2_FIELD_NONE;
if (state->mode == ADV7842_MODE_SDP) {
@@ -2028,16 +2028,7 @@ static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
struct adv7842_state *state = to_state(sd);
u8 *data = NULL;
- if (edid->pad > ADV7842_EDID_PORT_VGA)
- return -EINVAL;
- if (edid->blocks == 0)
- return -EINVAL;
- if (edid->blocks > 2)
- return -EINVAL;
- if (edid->start_block > 1)
- return -EINVAL;
- if (edid->start_block == 1)
- edid->blocks = 1;
+ memset(edid->reserved, 0, sizeof(edid->reserved));
switch (edid->pad) {
case ADV7842_EDID_PORT_A:
@@ -2052,12 +2043,23 @@ static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
default:
return -EINVAL;
}
+
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = data ? 2 : 0;
+ return 0;
+ }
+
if (!data)
return -ENODATA;
- memcpy(edid->edid,
- data + edid->start_block * 128,
- edid->blocks * 128);
+ if (edid->start_block >= 2)
+ return -EINVAL;
+
+ if (edid->start_block + edid->blocks > 2)
+ edid->blocks = 2 - edid->start_block;
+
+ memcpy(edid->edid, data + edid->start_block * 128, edid->blocks * 128);
+
return 0;
}
@@ -2066,12 +2068,16 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
struct adv7842_state *state = to_state(sd);
int err = 0;
+ memset(e->reserved, 0, sizeof(e->reserved));
+
if (e->pad > ADV7842_EDID_PORT_VGA)
return -EINVAL;
if (e->start_block != 0)
return -EINVAL;
- if (e->blocks > 2)
+ if (e->blocks > 2) {
+ e->blocks = 2;
return -E2BIG;
+ }
/* todo, per edid */
state->aspect_ratio = v4l2_calc_aspect_ratio(e->edid[0x15],
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index c14e66756b98..69aeaf397624 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -102,7 +102,7 @@ static int ak881x_try_g_mbus_fmt(struct v4l2_subdev *sd,
v4l_bound_align_image(&mf->width, 0, 720, 2,
&mf->height, 0, ak881x->lines, 1, 0);
mf->field = V4L2_FIELD_INTERLACED;
- mf->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ mf->code = MEDIA_BUS_FMT_YUYV8_2X8;
mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
@@ -112,19 +112,19 @@ static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
if (mf->field != V4L2_FIELD_INTERLACED ||
- mf->code != V4L2_MBUS_FMT_YUYV8_2X8)
+ mf->code != MEDIA_BUS_FMT_YUYV8_2X8)
return -EINVAL;
return ak881x_try_g_mbus_fmt(sd, mf);
}
static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index)
return -EINVAL;
- *code = V4L2_MBUS_FMT_YUYV8_2X8;
+ *code = MEDIA_BUS_FMT_YUYV8_2X8;
return 0;
}
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index e453a3ffe7d1..573e08826b9b 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -879,7 +879,7 @@ void cx25840_std_setup(struct i2c_client *client)
/* Sets horizontal blanking delay and active lines */
cx25840_write(client, 0x470, hblank);
cx25840_write(client, 0x471,
- 0xff & (((hblank >> 8) & 0x3) | (hactive << 4)));
+ (((hblank >> 8) & 0x3) | (hactive << 4)) & 0xff);
cx25840_write(client, 0x472, hactive >> 4);
/* Sets burst gate delay */
@@ -888,13 +888,13 @@ void cx25840_std_setup(struct i2c_client *client)
/* Sets vertical blanking delay and active duration */
cx25840_write(client, 0x474, vblank);
cx25840_write(client, 0x475,
- 0xff & (((vblank >> 8) & 0x3) | (vactive << 4)));
+ (((vblank >> 8) & 0x3) | (vactive << 4)) & 0xff);
cx25840_write(client, 0x476, vactive >> 4);
cx25840_write(client, 0x477, vblank656);
/* Sets src decimation rate */
- cx25840_write(client, 0x478, 0xff & src_decimation);
- cx25840_write(client, 0x479, 0xff & (src_decimation >> 8));
+ cx25840_write(client, 0x478, src_decimation & 0xff);
+ cx25840_write(client, 0x479, (src_decimation >> 8) & 0xff);
/* Sets Luma and UV Low pass filters */
cx25840_write(client, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30));
@@ -904,8 +904,8 @@ void cx25840_std_setup(struct i2c_client *client)
/* Sets SC Step*/
cx25840_write(client, 0x47c, sc);
- cx25840_write(client, 0x47d, 0xff & sc >> 8);
- cx25840_write(client, 0x47e, 0xff & sc >> 16);
+ cx25840_write(client, 0x47d, (sc >> 8) & 0xff);
+ cx25840_write(client, 0x47e, (sc >> 16) & 0xff);
/* Sets VBI parameters */
if (std & V4L2_STD_625_50) {
@@ -1373,7 +1373,7 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
int is_50Hz = !(state->std & V4L2_STD_525_60);
- if (fmt->code != V4L2_MBUS_FMT_FIXED)
+ if (fmt->code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
fmt->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/i2c/cx25840/cx25840-firmware.c b/drivers/media/i2c/cx25840/cx25840-firmware.c
index b3169f94ece8..9bbb31adc29d 100644
--- a/drivers/media/i2c/cx25840/cx25840-firmware.c
+++ b/drivers/media/i2c/cx25840/cx25840-firmware.c
@@ -113,7 +113,7 @@ int cx25840_loadfw(struct i2c_client *client)
const u8 *ptr;
const char *fwname = get_fw_name(client);
int size, retval;
- int MAX_BUF_SIZE = FWSEND;
+ int max_buf_size = FWSEND;
u32 gpio_oe = 0, gpio_da = 0;
if (is_cx2388x(state)) {
@@ -122,10 +122,9 @@ int cx25840_loadfw(struct i2c_client *client)
gpio_da = cx25840_read(client, 0x164);
}
- if (is_cx231xx(state) && MAX_BUF_SIZE > 16) {
- v4l_err(client, " Firmware download size changed to 16 bytes max length\n");
- MAX_BUF_SIZE = 16; /* cx231xx cannot accept more than 16 bytes at a time */
- }
+ /* cx231xx cannot accept more than 16 bytes at a time */
+ if (is_cx231xx(state) && max_buf_size > 16)
+ max_buf_size = 16;
if (request_firmware(&fw, fwname, FWDEV(client)) != 0) {
v4l_err(client, "unable to open firmware %s\n", fwname);
@@ -140,7 +139,7 @@ int cx25840_loadfw(struct i2c_client *client)
size = fw->size;
ptr = fw->data;
while (size > 0) {
- int len = min(MAX_BUF_SIZE - 2, size);
+ int len = min(max_buf_size - 2, size);
memcpy(buffer + 2, ptr, len);
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 8311f1a9a38e..175a76114953 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -464,8 +464,7 @@ static int ir_remove(struct i2c_client *client)
cancel_delayed_work_sync(&ir->work);
/* unregister device */
- if (ir->rc)
- rc_unregister_device(ir->rc);
+ rc_unregister_device(ir->rc);
/* free memory */
return 0;
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 8d870b7b43ff..2820f7c38cba 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -57,14 +57,14 @@ static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = {
[M5MOLS_RESTYPE_MONITOR] = {
.width = 1920,
.height = 1080,
- .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_JPEG,
},
[M5MOLS_RESTYPE_CAPTURE] = {
.width = 1920,
.height = 1080,
- .code = V4L2_MBUS_FMT_JPEG_1X8,
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_JPEG,
},
@@ -479,7 +479,7 @@ static int m5mols_get_version(struct v4l2_subdev *sd)
* __find_restype - Lookup M-5MOLS resolution type according to pixel code
* @code: pixel code
*/
-static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code)
+static enum m5mols_restype __find_restype(u32 code)
{
enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR;
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index 2cace7313a22..d7307862c2c5 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -192,12 +192,12 @@ static int ml86v7667_g_input_status(struct v4l2_subdev *sd, u32 *status)
}
static int ml86v7667_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index > 0)
return -EINVAL;
- *code = V4L2_MBUS_FMT_YUYV8_2X8;
+ *code = MEDIA_BUS_FMT_YUYV8_2X8;
return 0;
}
@@ -207,7 +207,7 @@ static int ml86v7667_mbus_fmt(struct v4l2_subdev *sd,
{
struct ml86v7667_priv *priv = to_ml86v7667(sd);
- fmt->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
/* The top field is always transferred first by the chip */
fmt->field = V4L2_FIELD_INTERLACED_TB;
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 85ec3bacdf1c..45b3fca188ca 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -323,7 +323,7 @@ static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index != 0)
return -EINVAL;
- code->code = V4L2_MBUS_FMT_Y8_1X8;
+ code->code = MEDIA_BUS_FMT_Y8_1X8;
return 0;
}
@@ -331,7 +331,7 @@ static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index != 0 || fse->code != V4L2_MBUS_FMT_Y8_1X8)
+ if (fse->index != 0 || fse->code != MEDIA_BUS_FMT_Y8_1X8)
return -EINVAL;
fse->min_width = MT9M032_COLUMN_SIZE_DEF;
@@ -759,7 +759,7 @@ static int mt9m032_probe(struct i2c_client *client,
sensor->format.width = sensor->crop.width;
sensor->format.height = sensor->crop.height;
- sensor->format.code = V4L2_MBUS_FMT_Y8_1X8;
+ sensor->format.code = MEDIA_BUS_FMT_Y8_1X8;
sensor->format.field = V4L2_FIELD_NONE;
sensor->format.colorspace = V4L2_COLORSPACE_SRGB;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index e18797ff7faf..edb76bd33d16 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -950,9 +950,9 @@ static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
format = v4l2_subdev_get_try_format(fh, 0);
if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
- format->code = V4L2_MBUS_FMT_Y12_1X12;
+ format->code = MEDIA_BUS_FMT_Y12_1X12;
else
- format->code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format->code = MEDIA_BUS_FMT_SGRBG12_1X12;
format->width = MT9P031_WINDOW_WIDTH_DEF;
format->height = MT9P031_WINDOW_HEIGHT_DEF;
@@ -1120,9 +1120,9 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->crop.top = MT9P031_ROW_START_DEF;
if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
- mt9p031->format.code = V4L2_MBUS_FMT_Y12_1X12;
+ mt9p031->format.code = MEDIA_BUS_FMT_Y12_1X12;
else
- mt9p031->format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ mt9p031->format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
mt9p031->format.width = MT9P031_WINDOW_WIDTH_DEF;
mt9p031->format.height = MT9P031_WINDOW_HEIGHT_DEF;
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 422e068f5f1b..d9e9889b579f 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -333,7 +333,7 @@ static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index > 0)
return -EINVAL;
- code->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
return 0;
}
@@ -341,7 +341,7 @@ static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= 8 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
+ if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
return -EINVAL;
fse->min_width = (MT9T001_WINDOW_WIDTH_DEF + 1) / fse->index;
@@ -792,7 +792,7 @@ static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
crop->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
format = v4l2_subdev_get_try_format(fh, 0);
- format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
format->width = MT9T001_WINDOW_WIDTH_DEF + 1;
format->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
format->field = V4L2_FIELD_NONE;
@@ -917,7 +917,7 @@ static int mt9t001_probe(struct i2c_client *client,
mt9t001->crop.width = MT9T001_WINDOW_WIDTH_DEF + 1;
mt9t001->crop.height = MT9T001_WINDOW_HEIGHT_DEF + 1;
- mt9t001->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ mt9t001->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
mt9t001->format.width = MT9T001_WINDOW_WIDTH_DEF + 1;
mt9t001->format.height = MT9T001_WINDOW_HEIGHT_DEF + 1;
mt9t001->format.field = V4L2_FIELD_NONE;
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 47e475319a24..a10f7f8f0558 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -325,18 +325,18 @@ static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
}
static int mt9v011_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index > 0)
return -EINVAL;
- *code = V4L2_MBUS_FMT_SGRBG8_1X8;
+ *code = MEDIA_BUS_FMT_SGRBG8_1X8;
return 0;
}
static int mt9v011_try_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
{
- if (fmt->code != V4L2_MBUS_FMT_SGRBG8_1X8)
+ if (fmt->code != MEDIA_BUS_FMT_SGRBG8_1X8)
return -EINVAL;
v4l_bound_align_image(&fmt->width, 48, 639, 1,
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index d044bce312e0..93687c1e4097 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -454,7 +454,7 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index > 0)
return -EINVAL;
- code->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
return 0;
}
@@ -462,7 +462,7 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= 3 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10)
+ if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
return -EINVAL;
fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
@@ -814,9 +814,9 @@ static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
format = v4l2_subdev_get_try_format(fh, 0);
if (mt9v032->model->color)
- format->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
else
- format->code = V4L2_MBUS_FMT_Y10_1X10;
+ format->code = MEDIA_BUS_FMT_Y10_1X10;
format->width = MT9V032_WINDOW_WIDTH_DEF;
format->height = MT9V032_WINDOW_HEIGHT_DEF;
@@ -966,9 +966,9 @@ static int mt9v032_probe(struct i2c_client *client,
mt9v032->crop.height = MT9V032_WINDOW_HEIGHT_DEF;
if (mt9v032->model->color)
- mt9v032->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ mt9v032->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
else
- mt9v032->format.code = V4L2_MBUS_FMT_Y10_1X10;
+ mt9v032->format.code = MEDIA_BUS_FMT_Y10_1X10;
mt9v032->format.width = MT9V032_WINDOW_WIDTH_DEF;
mt9v032->format.height = MT9V032_WINDOW_HEIGHT_DEF;
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index 7eae48766e2b..00c7b26f4823 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -112,7 +112,7 @@ MODULE_PARM_DESC(debug, "Enable module debug trace. Set to 1 to enable.");
#define REG_TERM 0xFFFF
struct noon010_format {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
u16 ispctl1_reg;
};
@@ -175,23 +175,23 @@ static const struct noon010_frmsize noon010_sizes[] = {
/* Supported pixel formats. */
static const struct noon010_format noon010_formats[] = {
{
- .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x03,
}, {
- .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x02,
}, {
- .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0,
}, {
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x01,
}, {
- .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x40,
},
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index dd3db2458a4f..957927f7a353 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -632,31 +632,31 @@ static int ov7670_detect(struct v4l2_subdev *sd)
* The magic matrix numbers come from OmniVision.
*/
static struct ov7670_format_struct {
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
enum v4l2_colorspace colorspace;
struct regval_list *regs;
int cmatrix[CMATRIX_LEN];
} ov7670_formats[] = {
{
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.regs = ov7670_fmt_yuv422,
.cmatrix = { 128, -128, 0, -34, -94, 128 },
},
{
- .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
.colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_rgb444,
.cmatrix = { 179, -179, 0, -61, -176, 228 },
},
{
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_rgb565,
.cmatrix = { 179, -179, 0, -61, -176, 228 },
},
{
- .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_raw,
.cmatrix = { 0, 0, 0, 0, 0, 0 },
@@ -772,7 +772,7 @@ static void ov7675_get_framerate(struct v4l2_subdev *sd,
pll_factor = PLL_FACTOR;
clkrc++;
- if (info->fmt->mbus_code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ if (info->fmt->mbus_code == MEDIA_BUS_FMT_SBGGR8_1X8)
clkrc = (clkrc >> 1);
tpf->numerator = 1;
@@ -810,7 +810,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
} else {
clkrc = (5 * pll_factor * info->clock_speed * tpf->numerator) /
(4 * tpf->denominator);
- if (info->fmt->mbus_code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ if (info->fmt->mbus_code == MEDIA_BUS_FMT_SBGGR8_1X8)
clkrc = (clkrc << 1);
clkrc--;
}
@@ -900,7 +900,7 @@ static int ov7670_set_hw(struct v4l2_subdev *sd, int hstart, int hstop,
static int ov7670_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= N_OV7670_FMTS)
return -EINVAL;
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 4da90c621f7e..2246bd5436ad 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -384,17 +384,17 @@ static const struct ov965x_framesize ov965x_framesizes[] = {
};
struct ov965x_pixfmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
u32 colorspace;
/* REG_TSLB value, only bits [3:2] may be set. */
u8 tslb_reg;
};
static const struct ov965x_pixfmt ov965x_formats[] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 0x00},
- { V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG, 0x04},
- { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG, 0x0c},
- { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 0x08},
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 0x00},
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG, 0x04},
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG, 0x0c},
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 0x08},
};
/*
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 9656b6723dc6..13aed59f0f5d 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -27,8 +27,8 @@
#define DRIVER_NAME "S5C73M3"
-#define S5C73M3_ISP_FMT V4L2_MBUS_FMT_VYUY8_2X8
-#define S5C73M3_JPEG_FMT V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8
+#define S5C73M3_ISP_FMT MEDIA_BUS_FMT_VYUY8_2X8
+#define S5C73M3_JPEG_FMT MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8
/* Subdevs pad index definitions */
enum s5c73m3_pads {
@@ -402,7 +402,7 @@ struct s5c73m3 {
const struct s5c73m3_frame_size *sensor_pix_size[2];
const struct s5c73m3_frame_size *oif_pix_size[2];
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
const struct s5c73m3_interval *fiv;
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 1fcc76fd1bbf..d1c50c9d43ae 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -151,7 +151,7 @@ static const struct s5k4ecgx_frmsize s5k4ecgx_prev_sizes[] = {
#define S5K4ECGX_NUM_PREV ARRAY_SIZE(s5k4ecgx_prev_sizes)
struct s5k4ecgx_pixfmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
u32 colorspace;
/* REG_TC_PCFG_Format register value */
u16 reg_p_format;
@@ -159,7 +159,7 @@ struct s5k4ecgx_pixfmt {
/* By default value, output from sensor will be YUV422 0-255 */
static const struct s5k4ecgx_pixfmt s5k4ecgx_formats[] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 },
};
static const char * const s5k4ecgx_supply_names[] = {
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 0e461a6fd065..60a74d8d38d5 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -248,7 +248,7 @@ enum s5k5baf_gpio_id {
#define NUM_ISP_PADS 2
struct s5k5baf_pixfmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
u32 colorspace;
/* REG_P_FMT(x) register value */
u16 reg_p_fmt;
@@ -331,10 +331,10 @@ struct s5k5baf {
};
static const struct s5k5baf_pixfmt s5k5baf_formats[] = {
- { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 5 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 5 },
/* range 16-240 */
- { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_REC709, 6 },
- { V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_REC709, 6 },
+ { MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
};
static struct v4l2_rect s5k5baf_cis_rect = {
@@ -1206,7 +1206,7 @@ static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
if (code->pad == PAD_CIS) {
if (code->index > 0)
return -EINVAL;
- code->code = V4L2_MBUS_FMT_FIXED;
+ code->code = MEDIA_BUS_FMT_FIXED;
return 0;
}
@@ -1227,7 +1227,7 @@ static int s5k5baf_enum_frame_size(struct v4l2_subdev *sd,
return -EINVAL;
if (fse->pad == PAD_CIS) {
- fse->code = V4L2_MBUS_FMT_FIXED;
+ fse->code = MEDIA_BUS_FMT_FIXED;
fse->min_width = S5K5BAF_CIS_WIDTH;
fse->max_width = S5K5BAF_CIS_WIDTH;
fse->min_height = S5K5BAF_CIS_HEIGHT;
@@ -1252,7 +1252,7 @@ static void s5k5baf_try_cis_format(struct v4l2_mbus_framefmt *mf)
{
mf->width = S5K5BAF_CIS_WIDTH;
mf->height = S5K5BAF_CIS_HEIGHT;
- mf->code = V4L2_MBUS_FMT_FIXED;
+ mf->code = MEDIA_BUS_FMT_FIXED;
mf->colorspace = V4L2_COLORSPACE_JPEG;
mf->field = V4L2_FIELD_NONE;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index c11a40850ed1..91b841a1b850 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -80,7 +80,7 @@ static inline struct s5k6a3 *sd_to_s5k6a3(struct v4l2_subdev *sd)
static const struct v4l2_mbus_framefmt s5k6a3_formats[] = {
{
- .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
.colorspace = V4L2_COLORSPACE_SRGB,
.field = V4L2_FIELD_NONE,
}
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 629a5cdadd3a..2851581e0061 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -191,7 +191,7 @@ struct s5k6aa_regval {
};
struct s5k6aa_pixfmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
u32 colorspace;
/* REG_P_FMT(x) register value */
u16 reg_p_fmt;
@@ -285,10 +285,10 @@ static struct s5k6aa_regval s5k6aa_analog_config[] = {
/* TODO: Add RGB888 and Bayer format */
static const struct s5k6aa_pixfmt s5k6aa_formats[] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 },
/* range 16-240 */
- { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_REC709, 6 },
- { V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_REC709, 6 },
+ { MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
};
static const struct s5k6aa_interval s5k6aa_intervals[] = {
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 4024ea6f1371..f14c0e6435a3 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -562,7 +562,7 @@ static int saa6752hs_g_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefm
h->video_format = SAA6752HS_VF_D1;
f->width = v4l2_format_table[h->video_format].fmt.pix.width;
f->height = v4l2_format_table[h->video_format].fmt.pix.height;
- f->code = V4L2_MBUS_FMT_FIXED;
+ f->code = MEDIA_BUS_FMT_FIXED;
f->field = V4L2_FIELD_INTERLACED;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
@@ -572,7 +572,7 @@ static int saa6752hs_try_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_frame
{
int dist_352, dist_480, dist_720;
- f->code = V4L2_MBUS_FMT_FIXED;
+ f->code = MEDIA_BUS_FMT_FIXED;
dist_352 = abs(f->width - 352);
dist_480 = abs(f->width - 480);
@@ -599,7 +599,7 @@ static int saa6752hs_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefm
{
struct saa6752hs_state *h = to_state(sd);
- if (f->code != V4L2_MBUS_FMT_FIXED)
+ if (f->code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
/*
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 35a44648150e..7147c8b68fac 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1172,7 +1172,7 @@ static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
static int saa711x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
{
- if (fmt->code != V4L2_MBUS_FMT_FIXED)
+ if (fmt->code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
fmt->field = V4L2_FIELD_INTERLACED;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index 6922a9f9a5cd..0d0f9a917cd3 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -998,7 +998,7 @@ static int saa717x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
v4l2_dbg(1, debug, sd, "decoder set size\n");
- if (fmt->code != V4L2_MBUS_FMT_FIXED)
+ if (fmt->code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
/* FIXME need better bounds checking here */
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index 2335529b195c..e40d9027df3d 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -65,26 +65,89 @@ static int bounds_check(struct device *dev, uint32_t val,
static void print_pll(struct device *dev, struct smiapp_pll *pll)
{
- dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div);
- dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier);
- if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
- dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
- dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
+ dev_dbg(dev, "pre_pll_clk_div\t%u\n", pll->pre_pll_clk_div);
+ dev_dbg(dev, "pll_multiplier \t%u\n", pll->pll_multiplier);
+ if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
+ dev_dbg(dev, "op_sys_clk_div \t%u\n", pll->op.sys_clk_div);
+ dev_dbg(dev, "op_pix_clk_div \t%u\n", pll->op.pix_clk_div);
}
- dev_dbg(dev, "vt_sys_clk_div \t%d\n", pll->vt_sys_clk_div);
- dev_dbg(dev, "vt_pix_clk_div \t%d\n", pll->vt_pix_clk_div);
-
- dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
- dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
- dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
- if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
- dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
- pll->op_sys_clk_freq_hz);
- dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
- pll->op_pix_clk_freq_hz);
+ dev_dbg(dev, "vt_sys_clk_div \t%u\n", pll->vt.sys_clk_div);
+ dev_dbg(dev, "vt_pix_clk_div \t%u\n", pll->vt.pix_clk_div);
+
+ dev_dbg(dev, "ext_clk_freq_hz \t%u\n", pll->ext_clk_freq_hz);
+ dev_dbg(dev, "pll_ip_clk_freq_hz \t%u\n", pll->pll_ip_clk_freq_hz);
+ dev_dbg(dev, "pll_op_clk_freq_hz \t%u\n", pll->pll_op_clk_freq_hz);
+ if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
+ dev_dbg(dev, "op_sys_clk_freq_hz \t%u\n",
+ pll->op.sys_clk_freq_hz);
+ dev_dbg(dev, "op_pix_clk_freq_hz \t%u\n",
+ pll->op.pix_clk_freq_hz);
}
- dev_dbg(dev, "vt_sys_clk_freq_hz \t%d\n", pll->vt_sys_clk_freq_hz);
- dev_dbg(dev, "vt_pix_clk_freq_hz \t%d\n", pll->vt_pix_clk_freq_hz);
+ dev_dbg(dev, "vt_sys_clk_freq_hz \t%u\n", pll->vt.sys_clk_freq_hz);
+ dev_dbg(dev, "vt_pix_clk_freq_hz \t%u\n", pll->vt.pix_clk_freq_hz);
+}
+
+static int check_all_bounds(struct device *dev,
+ const struct smiapp_pll_limits *limits,
+ const struct smiapp_pll_branch_limits *op_limits,
+ struct smiapp_pll *pll,
+ struct smiapp_pll_branch *op_pll)
+{
+ int rval;
+
+ rval = bounds_check(dev, pll->pll_ip_clk_freq_hz,
+ limits->min_pll_ip_freq_hz,
+ limits->max_pll_ip_freq_hz,
+ "pll_ip_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->pll_multiplier,
+ limits->min_pll_multiplier, limits->max_pll_multiplier,
+ "pll_multiplier");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->pll_op_clk_freq_hz,
+ limits->min_pll_op_freq_hz, limits->max_pll_op_freq_hz,
+ "pll_op_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, op_pll->sys_clk_div,
+ op_limits->min_sys_clk_div, op_limits->max_sys_clk_div,
+ "op_sys_clk_div");
+ if (!rval)
+ rval = bounds_check(
+ dev, op_pll->sys_clk_freq_hz,
+ op_limits->min_sys_clk_freq_hz,
+ op_limits->max_sys_clk_freq_hz,
+ "op_sys_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, op_pll->pix_clk_freq_hz,
+ op_limits->min_pix_clk_freq_hz,
+ op_limits->max_pix_clk_freq_hz,
+ "op_pix_clk_freq_hz");
+
+ /*
+ * If there are no OP clocks, the VT clocks are contained in
+ * the OP clock struct.
+ */
+ if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)
+ return rval;
+
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->vt.sys_clk_freq_hz,
+ limits->vt.min_sys_clk_freq_hz,
+ limits->vt.max_sys_clk_freq_hz,
+ "vt_sys_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->vt.pix_clk_freq_hz,
+ limits->vt.min_pix_clk_freq_hz,
+ limits->vt.max_pix_clk_freq_hz,
+ "vt_pix_clk_freq_hz");
+
+ return rval;
}
/*
@@ -98,10 +161,11 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
*
* @return Zero on success, error code on error.
*/
-static int __smiapp_pll_calculate(struct device *dev,
- const struct smiapp_pll_limits *limits,
- struct smiapp_pll *pll, uint32_t mul,
- uint32_t div, uint32_t lane_op_clock_ratio)
+static int __smiapp_pll_calculate(
+ struct device *dev, const struct smiapp_pll_limits *limits,
+ const struct smiapp_pll_branch_limits *op_limits,
+ struct smiapp_pll *pll, struct smiapp_pll_branch *op_pll, uint32_t mul,
+ uint32_t div, uint32_t lane_op_clock_ratio)
{
uint32_t sys_div;
uint32_t best_pix_div = INT_MAX >> 1;
@@ -117,17 +181,16 @@ static int __smiapp_pll_calculate(struct device *dev,
uint32_t min_vt_div, max_vt_div, vt_div;
uint32_t min_sys_div, max_sys_div;
unsigned int i;
- int rval;
/*
* Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be
* too high.
*/
- dev_dbg(dev, "pre_pll_clk_div %d\n", pll->pre_pll_clk_div);
+ dev_dbg(dev, "pre_pll_clk_div %u\n", pll->pre_pll_clk_div);
/* Don't go above max pll multiplier. */
more_mul_max = limits->max_pll_multiplier / mul;
- dev_dbg(dev, "more_mul_max: max_pll_multiplier check: %d\n",
+ dev_dbg(dev, "more_mul_max: max_pll_multiplier check: %u\n",
more_mul_max);
/* Don't go above max pll op frequency. */
more_mul_max =
@@ -135,30 +198,30 @@ static int __smiapp_pll_calculate(struct device *dev,
more_mul_max,
limits->max_pll_op_freq_hz
/ (pll->ext_clk_freq_hz / pll->pre_pll_clk_div * mul));
- dev_dbg(dev, "more_mul_max: max_pll_op_freq_hz check: %d\n",
+ dev_dbg(dev, "more_mul_max: max_pll_op_freq_hz check: %u\n",
more_mul_max);
/* Don't go above the division capability of op sys clock divider. */
more_mul_max = min(more_mul_max,
- limits->op.max_sys_clk_div * pll->pre_pll_clk_div
+ op_limits->max_sys_clk_div * pll->pre_pll_clk_div
/ div);
- dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %d\n",
+ dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %u\n",
more_mul_max);
/* Ensure we won't go above min_pll_multiplier. */
more_mul_max = min(more_mul_max,
DIV_ROUND_UP(limits->max_pll_multiplier, mul));
- dev_dbg(dev, "more_mul_max: min_pll_multiplier check: %d\n",
+ dev_dbg(dev, "more_mul_max: min_pll_multiplier check: %u\n",
more_mul_max);
/* Ensure we won't go below min_pll_op_freq_hz. */
more_mul_min = DIV_ROUND_UP(limits->min_pll_op_freq_hz,
pll->ext_clk_freq_hz / pll->pre_pll_clk_div
* mul);
- dev_dbg(dev, "more_mul_min: min_pll_op_freq_hz check: %d\n",
+ dev_dbg(dev, "more_mul_min: min_pll_op_freq_hz check: %u\n",
more_mul_min);
/* Ensure we won't go below min_pll_multiplier. */
more_mul_min = max(more_mul_min,
DIV_ROUND_UP(limits->min_pll_multiplier, mul));
- dev_dbg(dev, "more_mul_min: min_pll_multiplier check: %d\n",
+ dev_dbg(dev, "more_mul_min: min_pll_multiplier check: %u\n",
more_mul_min);
if (more_mul_min > more_mul_max) {
@@ -168,23 +231,23 @@ static int __smiapp_pll_calculate(struct device *dev,
}
more_mul_factor = lcm(div, pll->pre_pll_clk_div) / div;
- dev_dbg(dev, "more_mul_factor: %d\n", more_mul_factor);
- more_mul_factor = lcm(more_mul_factor, limits->op.min_sys_clk_div);
+ dev_dbg(dev, "more_mul_factor: %u\n", more_mul_factor);
+ more_mul_factor = lcm(more_mul_factor, op_limits->min_sys_clk_div);
dev_dbg(dev, "more_mul_factor: min_op_sys_clk_div: %d\n",
more_mul_factor);
i = roundup(more_mul_min, more_mul_factor);
if (!is_one_or_even(i))
i <<= 1;
- dev_dbg(dev, "final more_mul: %d\n", i);
+ dev_dbg(dev, "final more_mul: %u\n", i);
if (i > more_mul_max) {
- dev_dbg(dev, "final more_mul is bad, max %d\n", more_mul_max);
+ dev_dbg(dev, "final more_mul is bad, max %u\n", more_mul_max);
return -EINVAL;
}
pll->pll_multiplier = mul * i;
- pll->op_sys_clk_div = div * i / pll->pre_pll_clk_div;
- dev_dbg(dev, "op_sys_clk_div: %d\n", pll->op_sys_clk_div);
+ op_pll->sys_clk_div = div * i / pll->pre_pll_clk_div;
+ dev_dbg(dev, "op_sys_clk_div: %u\n", op_pll->sys_clk_div);
pll->pll_ip_clk_freq_hz = pll->ext_clk_freq_hz
/ pll->pre_pll_clk_div;
@@ -193,14 +256,19 @@ static int __smiapp_pll_calculate(struct device *dev,
* pll->pll_multiplier;
/* Derive pll_op_clk_freq_hz. */
- pll->op_sys_clk_freq_hz =
- pll->pll_op_clk_freq_hz / pll->op_sys_clk_div;
+ op_pll->sys_clk_freq_hz =
+ pll->pll_op_clk_freq_hz / op_pll->sys_clk_div;
+
+ op_pll->pix_clk_div = pll->bits_per_pixel;
+ dev_dbg(dev, "op_pix_clk_div: %u\n", op_pll->pix_clk_div);
- pll->op_pix_clk_div = pll->bits_per_pixel;
- dev_dbg(dev, "op_pix_clk_div: %d\n", pll->op_pix_clk_div);
+ op_pll->pix_clk_freq_hz =
+ op_pll->sys_clk_freq_hz / op_pll->pix_clk_div;
- pll->op_pix_clk_freq_hz =
- pll->op_sys_clk_freq_hz / pll->op_pix_clk_div;
+ if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
+ /* No OP clocks --- VT clocks are used instead. */
+ goto out_skip_vt_calc;
+ }
/*
* Some sensors perform analogue binning and some do this
@@ -214,7 +282,7 @@ static int __smiapp_pll_calculate(struct device *dev,
vt_op_binning_div = pll->binning_horizontal;
else
vt_op_binning_div = 1;
- dev_dbg(dev, "vt_op_binning_div: %d\n", vt_op_binning_div);
+ dev_dbg(dev, "vt_op_binning_div: %u\n", vt_op_binning_div);
/*
* Profile 2 supports vt_pix_clk_div E [4, 10]
@@ -227,30 +295,30 @@ static int __smiapp_pll_calculate(struct device *dev,
*
* Find absolute limits for the factor of vt divider.
*/
- dev_dbg(dev, "scale_m: %d\n", pll->scale_m);
- min_vt_div = DIV_ROUND_UP(pll->op_pix_clk_div * pll->op_sys_clk_div
+ dev_dbg(dev, "scale_m: %u\n", pll->scale_m);
+ min_vt_div = DIV_ROUND_UP(op_pll->pix_clk_div * op_pll->sys_clk_div
* pll->scale_n,
lane_op_clock_ratio * vt_op_binning_div
* pll->scale_m);
/* Find smallest and biggest allowed vt divisor. */
- dev_dbg(dev, "min_vt_div: %d\n", min_vt_div);
+ dev_dbg(dev, "min_vt_div: %u\n", min_vt_div);
min_vt_div = max(min_vt_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
limits->vt.max_pix_clk_freq_hz));
- dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %d\n",
+ dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %u\n",
min_vt_div);
min_vt_div = max_t(uint32_t, min_vt_div,
limits->vt.min_pix_clk_div
* limits->vt.min_sys_clk_div);
- dev_dbg(dev, "min_vt_div: min_vt_clk_div: %d\n", min_vt_div);
+ dev_dbg(dev, "min_vt_div: min_vt_clk_div: %u\n", min_vt_div);
max_vt_div = limits->vt.max_sys_clk_div * limits->vt.max_pix_clk_div;
- dev_dbg(dev, "max_vt_div: %d\n", max_vt_div);
+ dev_dbg(dev, "max_vt_div: %u\n", max_vt_div);
max_vt_div = min(max_vt_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
limits->vt.min_pix_clk_freq_hz));
- dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %d\n",
+ dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %u\n",
max_vt_div);
/*
@@ -258,28 +326,28 @@ static int __smiapp_pll_calculate(struct device *dev,
* with all values of pix_clk_div.
*/
min_sys_div = limits->vt.min_sys_clk_div;
- dev_dbg(dev, "min_sys_div: %d\n", min_sys_div);
+ dev_dbg(dev, "min_sys_div: %u\n", min_sys_div);
min_sys_div = max(min_sys_div,
DIV_ROUND_UP(min_vt_div,
limits->vt.max_pix_clk_div));
- dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %d\n", min_sys_div);
+ dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %u\n", min_sys_div);
min_sys_div = max(min_sys_div,
pll->pll_op_clk_freq_hz
/ limits->vt.max_sys_clk_freq_hz);
- dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %d\n", min_sys_div);
+ dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %u\n", min_sys_div);
min_sys_div = clk_div_even_up(min_sys_div);
- dev_dbg(dev, "min_sys_div: one or even: %d\n", min_sys_div);
+ dev_dbg(dev, "min_sys_div: one or even: %u\n", min_sys_div);
max_sys_div = limits->vt.max_sys_clk_div;
- dev_dbg(dev, "max_sys_div: %d\n", max_sys_div);
+ dev_dbg(dev, "max_sys_div: %u\n", max_sys_div);
max_sys_div = min(max_sys_div,
DIV_ROUND_UP(max_vt_div,
limits->vt.min_pix_clk_div));
- dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %d\n", max_sys_div);
+ dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %u\n", max_sys_div);
max_sys_div = min(max_sys_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
limits->vt.min_pix_clk_freq_hz));
- dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %d\n", max_sys_div);
+ dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %u\n", max_sys_div);
/*
* Find pix_div such that a legal pix_div * sys_div results
@@ -296,7 +364,7 @@ static int __smiapp_pll_calculate(struct device *dev,
if (pix_div < limits->vt.min_pix_clk_div
|| pix_div > limits->vt.max_pix_clk_div) {
dev_dbg(dev,
- "pix_div %d too small or too big (%d--%d)\n",
+ "pix_div %u too small or too big (%u--%u)\n",
pix_div,
limits->vt.min_pix_clk_div,
limits->vt.max_pix_clk_div);
@@ -312,73 +380,28 @@ static int __smiapp_pll_calculate(struct device *dev,
break;
}
- pll->vt_sys_clk_div = DIV_ROUND_UP(min_vt_div, best_pix_div);
- pll->vt_pix_clk_div = best_pix_div;
+ pll->vt.sys_clk_div = DIV_ROUND_UP(min_vt_div, best_pix_div);
+ pll->vt.pix_clk_div = best_pix_div;
- pll->vt_sys_clk_freq_hz =
- pll->pll_op_clk_freq_hz / pll->vt_sys_clk_div;
- pll->vt_pix_clk_freq_hz =
- pll->vt_sys_clk_freq_hz / pll->vt_pix_clk_div;
+ pll->vt.sys_clk_freq_hz =
+ pll->pll_op_clk_freq_hz / pll->vt.sys_clk_div;
+ pll->vt.pix_clk_freq_hz =
+ pll->vt.sys_clk_freq_hz / pll->vt.pix_clk_div;
+out_skip_vt_calc:
pll->pixel_rate_csi =
- pll->op_pix_clk_freq_hz * lane_op_clock_ratio;
+ op_pll->pix_clk_freq_hz * lane_op_clock_ratio;
+ pll->pixel_rate_pixel_array = pll->vt.pix_clk_freq_hz;
- rval = bounds_check(dev, pll->pll_ip_clk_freq_hz,
- limits->min_pll_ip_freq_hz,
- limits->max_pll_ip_freq_hz,
- "pll_ip_clk_freq_hz");
- if (!rval)
- rval = bounds_check(
- dev, pll->pll_multiplier,
- limits->min_pll_multiplier, limits->max_pll_multiplier,
- "pll_multiplier");
- if (!rval)
- rval = bounds_check(
- dev, pll->pll_op_clk_freq_hz,
- limits->min_pll_op_freq_hz, limits->max_pll_op_freq_hz,
- "pll_op_clk_freq_hz");
- if (!rval)
- rval = bounds_check(
- dev, pll->op_sys_clk_div,
- limits->op.min_sys_clk_div, limits->op.max_sys_clk_div,
- "op_sys_clk_div");
- if (!rval)
- rval = bounds_check(
- dev, pll->op_pix_clk_div,
- limits->op.min_pix_clk_div, limits->op.max_pix_clk_div,
- "op_pix_clk_div");
- if (!rval)
- rval = bounds_check(
- dev, pll->op_sys_clk_freq_hz,
- limits->op.min_sys_clk_freq_hz,
- limits->op.max_sys_clk_freq_hz,
- "op_sys_clk_freq_hz");
- if (!rval)
- rval = bounds_check(
- dev, pll->op_pix_clk_freq_hz,
- limits->op.min_pix_clk_freq_hz,
- limits->op.max_pix_clk_freq_hz,
- "op_pix_clk_freq_hz");
- if (!rval)
- rval = bounds_check(
- dev, pll->vt_sys_clk_freq_hz,
- limits->vt.min_sys_clk_freq_hz,
- limits->vt.max_sys_clk_freq_hz,
- "vt_sys_clk_freq_hz");
- if (!rval)
- rval = bounds_check(
- dev, pll->vt_pix_clk_freq_hz,
- limits->vt.min_pix_clk_freq_hz,
- limits->vt.max_pix_clk_freq_hz,
- "vt_pix_clk_freq_hz");
-
- return rval;
+ return check_all_bounds(dev, limits, op_limits, pll, op_pll);
}
int smiapp_pll_calculate(struct device *dev,
const struct smiapp_pll_limits *limits,
struct smiapp_pll *pll)
{
+ const struct smiapp_pll_branch_limits *op_limits = &limits->op;
+ struct smiapp_pll_branch *op_pll = &pll->op;
uint16_t min_pre_pll_clk_div;
uint16_t max_pre_pll_clk_div;
uint32_t lane_op_clock_ratio;
@@ -386,13 +409,23 @@ int smiapp_pll_calculate(struct device *dev,
unsigned int i;
int rval = -EINVAL;
+ if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
+ /*
+ * If there's no OP PLL at all, use the VT values
+ * instead. The OP values are ignored for the rest of
+ * the PLL calculation.
+ */
+ op_limits = &limits->vt;
+ op_pll = &pll->vt;
+ }
+
if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
lane_op_clock_ratio = pll->csi2.lanes;
else
lane_op_clock_ratio = 1;
- dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
+ dev_dbg(dev, "lane_op_clock_ratio: %u\n", lane_op_clock_ratio);
- dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
+ dev_dbg(dev, "binning: %ux%u\n", pll->binning_horizontal,
pll->binning_vertical);
switch (pll->bus_type) {
@@ -411,7 +444,7 @@ int smiapp_pll_calculate(struct device *dev,
}
/* Figure out limits for pre-pll divider based on extclk */
- dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
+ dev_dbg(dev, "min / max pre_pll_clk_div: %u / %u\n",
limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
max_pre_pll_clk_div =
min_t(uint16_t, limits->max_pre_pll_clk_div,
@@ -422,26 +455,27 @@ int smiapp_pll_calculate(struct device *dev,
clk_div_even_up(
DIV_ROUND_UP(pll->ext_clk_freq_hz,
limits->max_pll_ip_freq_hz)));
- dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
+ dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %u / %u\n",
min_pre_pll_clk_div, max_pre_pll_clk_div);
i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
mul = div_u64(pll->pll_op_clk_freq_hz, i);
div = pll->ext_clk_freq_hz / i;
- dev_dbg(dev, "mul %d / div %d\n", mul, div);
+ dev_dbg(dev, "mul %u / div %u\n", mul, div);
min_pre_pll_clk_div =
max_t(uint16_t, min_pre_pll_clk_div,
clk_div_even_up(
DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
limits->max_pll_op_freq_hz)));
- dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
+ dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %u / %u\n",
min_pre_pll_clk_div, max_pre_pll_clk_div);
for (pll->pre_pll_clk_div = min_pre_pll_clk_div;
pll->pre_pll_clk_div <= max_pre_pll_clk_div;
pll->pre_pll_clk_div += 2 - (pll->pre_pll_clk_div & 1)) {
- rval = __smiapp_pll_calculate(dev, limits, pll, mul, div,
+ rval = __smiapp_pll_calculate(dev, limits, op_limits, pll,
+ op_pll, mul, div,
lane_op_clock_ratio);
if (rval)
continue;
diff --git a/drivers/media/i2c/smiapp-pll.h b/drivers/media/i2c/smiapp-pll.h
index 5ce2b61da3c5..e8f035a50c76 100644
--- a/drivers/media/i2c/smiapp-pll.h
+++ b/drivers/media/i2c/smiapp-pll.h
@@ -35,6 +35,13 @@
#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
+struct smiapp_pll_branch {
+ uint16_t sys_clk_div;
+ uint16_t pix_clk_div;
+ uint32_t sys_clk_freq_hz;
+ uint32_t pix_clk_freq_hz;
+};
+
struct smiapp_pll {
/* input values */
uint8_t bus_type;
@@ -53,24 +60,18 @@ struct smiapp_pll {
uint8_t scale_n;
uint8_t bits_per_pixel;
uint32_t link_freq;
+ uint32_t ext_clk_freq_hz;
/* output values */
uint16_t pre_pll_clk_div;
uint16_t pll_multiplier;
- uint16_t op_sys_clk_div;
- uint16_t op_pix_clk_div;
- uint16_t vt_sys_clk_div;
- uint16_t vt_pix_clk_div;
-
- uint32_t ext_clk_freq_hz;
uint32_t pll_ip_clk_freq_hz;
uint32_t pll_op_clk_freq_hz;
- uint32_t op_sys_clk_freq_hz;
- uint32_t op_pix_clk_freq_hz;
- uint32_t vt_sys_clk_freq_hz;
- uint32_t vt_pix_clk_freq_hz;
+ struct smiapp_pll_branch vt;
+ struct smiapp_pll_branch op;
uint32_t pixel_rate_csi;
+ uint32_t pixel_rate_pixel_array;
};
struct smiapp_pll_branch_limits {
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index b10aaeda2bb4..0df5070e73c7 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -205,12 +205,12 @@ static int smiapp_pll_configure(struct smiapp_sensor *sensor)
int rval;
rval = smiapp_write(
- sensor, SMIAPP_REG_U16_VT_PIX_CLK_DIV, pll->vt_pix_clk_div);
+ sensor, SMIAPP_REG_U16_VT_PIX_CLK_DIV, pll->vt.pix_clk_div);
if (rval < 0)
return rval;
rval = smiapp_write(
- sensor, SMIAPP_REG_U16_VT_SYS_CLK_DIV, pll->vt_sys_clk_div);
+ sensor, SMIAPP_REG_U16_VT_SYS_CLK_DIV, pll->vt.sys_clk_div);
if (rval < 0)
return rval;
@@ -227,20 +227,21 @@ static int smiapp_pll_configure(struct smiapp_sensor *sensor)
/* Lane op clock ratio does not apply here. */
rval = smiapp_write(
sensor, SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS,
- DIV_ROUND_UP(pll->op_sys_clk_freq_hz, 1000000 / 256 / 256));
+ DIV_ROUND_UP(pll->op.sys_clk_freq_hz, 1000000 / 256 / 256));
if (rval < 0 || sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
return rval;
rval = smiapp_write(
- sensor, SMIAPP_REG_U16_OP_PIX_CLK_DIV, pll->op_pix_clk_div);
+ sensor, SMIAPP_REG_U16_OP_PIX_CLK_DIV, pll->op.pix_clk_div);
if (rval < 0)
return rval;
return smiapp_write(
- sensor, SMIAPP_REG_U16_OP_SYS_CLK_DIV, pll->op_sys_clk_div);
+ sensor, SMIAPP_REG_U16_OP_SYS_CLK_DIV, pll->op.sys_clk_div);
}
-static int smiapp_pll_update(struct smiapp_sensor *sensor)
+static int smiapp_pll_try(struct smiapp_sensor *sensor,
+ struct smiapp_pll *pll)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
struct smiapp_pll_limits lim = {
@@ -274,19 +275,15 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
.min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN],
.min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK],
};
+
+ return smiapp_pll_calculate(&client->dev, &lim, pll);
+}
+
+static int smiapp_pll_update(struct smiapp_sensor *sensor)
+{
struct smiapp_pll *pll = &sensor->pll;
int rval;
- if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) {
- /*
- * Fill in operational clock divisors limits from the
- * video timing ones. On profile 0 sensors the
- * requirements regarding them are essentially the
- * same as on VT ones.
- */
- lim.op = lim.vt;
- }
-
pll->binning_horizontal = sensor->binning_horizontal;
pll->binning_vertical = sensor->binning_vertical;
pll->link_freq =
@@ -294,12 +291,12 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
pll->scale_m = sensor->scale_m;
pll->bits_per_pixel = sensor->csi_format->compressed;
- rval = smiapp_pll_calculate(&client->dev, &lim, pll);
+ rval = smiapp_pll_try(sensor, pll);
if (rval < 0)
return rval;
__v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_parray,
- pll->vt_pix_clk_freq_hz);
+ pll->pixel_rate_pixel_array);
__v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_csi, pll->pixel_rate_csi);
return 0;
@@ -333,22 +330,22 @@ static void __smiapp_update_exposure_limits(struct smiapp_sensor *sensor)
* orders must be defined.
*/
static const struct smiapp_csi_data_format smiapp_csi_data_formats[] = {
- { V4L2_MBUS_FMT_SGRBG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GRBG, },
- { V4L2_MBUS_FMT_SRGGB12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_RGGB, },
- { V4L2_MBUS_FMT_SBGGR12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_BGGR, },
- { V4L2_MBUS_FMT_SGBRG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GBRG, },
- { V4L2_MBUS_FMT_SGRBG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GRBG, },
- { V4L2_MBUS_FMT_SRGGB10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_RGGB, },
- { V4L2_MBUS_FMT_SBGGR10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_BGGR, },
- { V4L2_MBUS_FMT_SGBRG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GBRG, },
- { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GRBG, },
- { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_RGGB, },
- { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_BGGR, },
- { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GBRG, },
- { V4L2_MBUS_FMT_SGRBG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GRBG, },
- { V4L2_MBUS_FMT_SRGGB8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_RGGB, },
- { V4L2_MBUS_FMT_SBGGR8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_BGGR, },
- { V4L2_MBUS_FMT_SGBRG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GBRG, },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GRBG, },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_RGGB, },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_BGGR, },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GBRG, },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GRBG, },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_RGGB, },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_BGGR, },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GBRG, },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GRBG, },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_RGGB, },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_BGGR, },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GBRG, },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GRBG, },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_RGGB, },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_BGGR, },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GBRG, },
};
const char *pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" };
@@ -526,6 +523,8 @@ static const struct v4l2_ctrl_ops smiapp_ctrl_ops = {
static int smiapp_init_controls(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned long *valid_link_freqs = &sensor->valid_link_freqs[
+ sensor->csi_format->compressed - SMIAPP_COMPRESSED_BASE];
unsigned int max, i;
int rval;
@@ -608,8 +607,8 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
sensor->link_freq = v4l2_ctrl_new_int_menu(
&sensor->src->ctrl_handler, &smiapp_ctrl_ops,
- V4L2_CID_LINK_FREQ, max, 0,
- sensor->platform_data->op_sys_clock);
+ V4L2_CID_LINK_FREQ, __fls(*valid_link_freqs),
+ __ffs(*valid_link_freqs), sensor->platform_data->op_sys_clock);
sensor->pixel_rate_csi = v4l2_ctrl_new_std(
&sensor->src->ctrl_handler, &smiapp_ctrl_ops,
@@ -745,6 +744,7 @@ static int smiapp_get_limits_binning(struct smiapp_sensor *sensor)
static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct smiapp_pll *pll = &sensor->pll;
unsigned int type, n;
unsigned int i, pixel_order;
int rval;
@@ -808,14 +808,57 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
dev_dbg(&client->dev, "jolly good! %d\n", j);
sensor->default_mbus_frame_fmts |= 1 << j;
- if (!sensor->csi_format
- || f->width > sensor->csi_format->width
- || (f->width == sensor->csi_format->width
- && f->compressed
- > sensor->csi_format->compressed)) {
- sensor->csi_format = f;
- sensor->internal_csi_format = f;
- }
+ }
+ }
+
+ /* Figure out which BPP values can be used with which formats. */
+ pll->binning_horizontal = 1;
+ pll->binning_vertical = 1;
+ pll->scale_m = sensor->scale_m;
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
+ const struct smiapp_csi_data_format *f =
+ &smiapp_csi_data_formats[i];
+ unsigned long *valid_link_freqs =
+ &sensor->valid_link_freqs[
+ f->compressed - SMIAPP_COMPRESSED_BASE];
+ unsigned int j;
+
+ BUG_ON(f->compressed < SMIAPP_COMPRESSED_BASE);
+ BUG_ON(f->compressed > SMIAPP_COMPRESSED_MAX);
+
+ if (!(sensor->default_mbus_frame_fmts & 1 << i))
+ continue;
+
+ pll->bits_per_pixel = f->compressed;
+
+ for (j = 0; sensor->platform_data->op_sys_clock[j]; j++) {
+ pll->link_freq = sensor->platform_data->op_sys_clock[j];
+
+ rval = smiapp_pll_try(sensor, pll);
+ dev_dbg(&client->dev, "link freq %u Hz, bpp %u %s\n",
+ pll->link_freq, pll->bits_per_pixel,
+ rval ? "not ok" : "ok");
+ if (rval)
+ continue;
+
+ set_bit(j, valid_link_freqs);
+ }
+
+ if (!*valid_link_freqs) {
+ dev_info(&client->dev,
+ "no valid link frequencies for %u bpp\n",
+ f->compressed);
+ sensor->default_mbus_frame_fmts &= ~BIT(i);
+ continue;
+ }
+
+ if (!sensor->csi_format
+ || f->width > sensor->csi_format->width
+ || (f->width == sensor->csi_format->width
+ && f->compressed > sensor->csi_format->compressed)) {
+ sensor->csi_format = f;
+ sensor->internal_csi_format = f;
}
}
@@ -904,7 +947,7 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
dev_dbg(&client->dev, "hblank\t\t%d\n", sensor->hblank->val);
dev_dbg(&client->dev, "real timeperframe\t100/%d\n",
- sensor->pll.vt_pix_clk_freq_hz /
+ sensor->pll.pixel_rate_pixel_array /
((sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
+ sensor->hblank->val) *
(sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
@@ -1687,51 +1730,77 @@ static const struct smiapp_csi_data_format
return csi_format;
}
-static int smiapp_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_format *fmt)
+static int smiapp_set_format_source(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
- struct v4l2_rect *crops[SMIAPP_PADS];
+ const struct smiapp_csi_data_format *csi_format,
+ *old_csi_format = sensor->csi_format;
+ unsigned long *valid_link_freqs;
+ u32 code = fmt->format.code;
+ unsigned int i;
+ int rval;
- mutex_lock(&sensor->mutex);
+ rval = __smiapp_get_format(subdev, fh, fmt);
+ if (rval)
+ return rval;
/*
* Media bus code is changeable on src subdev's source pad. On
* other source pads we just get format here.
*/
- if (fmt->pad == ssd->source_pad) {
- u32 code = fmt->format.code;
- int rval = __smiapp_get_format(subdev, fh, fmt);
- bool range_changed = false;
- unsigned int i;
+ if (subdev != &sensor->src->sd)
+ return 0;
- if (!rval && subdev == &sensor->src->sd) {
- const struct smiapp_csi_data_format *csi_format =
- smiapp_validate_csi_data_format(sensor, code);
+ csi_format = smiapp_validate_csi_data_format(sensor, code);
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- if (csi_format->width !=
- sensor->csi_format->width)
- range_changed = true;
+ fmt->format.code = csi_format->code;
- sensor->csi_format = csi_format;
- }
-
- fmt->format.code = csi_format->code;
- }
+ if (fmt->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return 0;
- mutex_unlock(&sensor->mutex);
- if (rval || !range_changed)
- return rval;
+ sensor->csi_format = csi_format;
+ if (csi_format->width != old_csi_format->width)
for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++)
- v4l2_ctrl_modify_range(
- sensor->test_data[i],
- 0, (1 << sensor->csi_format->width) - 1, 1, 0);
+ __v4l2_ctrl_modify_range(
+ sensor->test_data[i], 0,
+ (1 << csi_format->width) - 1, 1, 0);
+ if (csi_format->compressed == old_csi_format->compressed)
return 0;
+
+ valid_link_freqs =
+ &sensor->valid_link_freqs[sensor->csi_format->compressed
+ - SMIAPP_COMPRESSED_BASE];
+
+ __v4l2_ctrl_modify_range(
+ sensor->link_freq, 0,
+ __fls(*valid_link_freqs), ~*valid_link_freqs,
+ __ffs(*valid_link_freqs));
+
+ return smiapp_pll_update(sensor);
+}
+
+static int smiapp_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *crops[SMIAPP_PADS];
+
+ mutex_lock(&sensor->mutex);
+
+ if (fmt->pad == ssd->source_pad) {
+ int rval;
+
+ rval = smiapp_set_format_source(subdev, fh, fmt);
+
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
}
/* Sink pad. Width and height are changeable here. */
@@ -2023,6 +2092,11 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
== SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP)
return 0;
return -EINVAL;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ if (ssd == sensor->pixel_array
+ && sel->pad == SMIAPP_PA_PAD_SRC)
+ return 0;
+ return -EINVAL;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
if (sel->pad == ssd->source_pad)
@@ -2121,7 +2195,9 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_NATIVE_SIZE:
if (ssd == sensor->pixel_array) {
+ sel->r.left = sel->r.top = 0;
sel->r.width =
sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
sel->r.height =
@@ -2482,12 +2558,6 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
goto out_power_off;
}
- rval = smiapp_get_mbus_formats(sensor);
- if (rval) {
- rval = -ENODEV;
- goto out_power_off;
- }
-
if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
u32 val;
@@ -2569,6 +2639,22 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ /* prepare PLL configuration input values */
+ pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
+ pll->csi2.lanes = sensor->platform_data->lanes;
+ pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
+ pll->flags = smiapp_call_quirk(sensor, pll_flags);
+ pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ /* Profile 0 sensors have no separate OP clock branch. */
+ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+ pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+
+ rval = smiapp_get_mbus_formats(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_nvm_release;
+ }
+
for (i = 0; i < SMIAPP_SUBDEVS; i++) {
struct {
struct smiapp_subdev *ssd;
@@ -2666,18 +2752,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
if (rval < 0)
goto out_nvm_release;
- /* prepare PLL configuration input values */
- pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
- pll->csi2.lanes = sensor->platform_data->lanes;
- pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
- pll->flags = smiapp_call_quirk(sensor, pll_flags);
-
- /* Profile 0 sensors have no separate OP clock branch. */
- if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
- pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
- pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
-
+ mutex_lock(&sensor->mutex);
rval = smiapp_update_mode(sensor);
+ mutex_unlock(&sensor->mutex);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
goto out_nvm_release;
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 874b49ffd88f..f88f8ec344d3 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -156,6 +156,11 @@ struct smiapp_csi_data_format {
#define SMIAPP_PAD_SRC 1
#define SMIAPP_PADS 2
+#define SMIAPP_COMPRESSED_BASE 8
+#define SMIAPP_COMPRESSED_MAX 12
+#define SMIAPP_NR_OF_COMPRESSED (SMIAPP_COMPRESSED_MAX - \
+ SMIAPP_COMPRESSED_BASE + 1)
+
struct smiapp_binning_subtype {
u8 horizontal:4;
u8 vertical:4;
@@ -232,6 +237,9 @@ struct smiapp_sensor {
struct smiapp_pll pll;
+ /* Is a default format supported for a given BPP? */
+ unsigned long valid_link_freqs[SMIAPP_NR_OF_COMPRESSED];
+
/* Pixel array controls */
struct v4l2_ctrl *analog_gain;
struct v4l2_ctrl *exposure;
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index 5b915936c3f3..ec89cfa927a2 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -71,7 +71,7 @@
/* IMX074 has only one fixed colorspace per pixelcode */
struct imx074_datafmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
};
@@ -82,7 +82,7 @@ struct imx074 {
};
static const struct imx074_datafmt imx074_colour_fmts[] = {
- {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
};
static struct imx074 *to_imx074(const struct i2c_client *client)
@@ -91,7 +91,7 @@ static struct imx074 *to_imx074(const struct i2c_client *client)
}
/* Find a data format by a pixel code in an array */
-static const struct imx074_datafmt *imx074_find_datafmt(enum v4l2_mbus_pixelcode code)
+static const struct imx074_datafmt *imx074_find_datafmt(u32 code)
{
int i;
@@ -236,7 +236,7 @@ static int imx074_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
}
static int imx074_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if ((unsigned int)index >= ARRAY_SIZE(imx074_colour_fmts))
return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c
index dbd8c142d6ef..2e9a53502551 100644
--- a/drivers/media/i2c/soc_camera/mt9m001.c
+++ b/drivers/media/i2c/soc_camera/mt9m001.c
@@ -53,13 +53,13 @@
/* MT9M001 has only one fixed colorspace per pixelcode */
struct mt9m001_datafmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
};
/* Find a data format by a pixel code in an array */
static const struct mt9m001_datafmt *mt9m001_find_datafmt(
- enum v4l2_mbus_pixelcode code, const struct mt9m001_datafmt *fmt,
+ u32 code, const struct mt9m001_datafmt *fmt,
int n)
{
int i;
@@ -75,14 +75,14 @@ static const struct mt9m001_datafmt mt9m001_colour_fmts[] = {
* Order important: first natively supported,
* second supported with a GPIO extender
*/
- {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
};
static const struct mt9m001_datafmt mt9m001_monochrome_fmts[] = {
/* Order important - see above */
- {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
- {V4L2_MBUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG},
};
struct mt9m001 {
@@ -563,7 +563,7 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
};
static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index b51e8562e775..5992ea93257a 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -182,23 +182,23 @@ static struct mt9m111_context context_b = {
/* MT9M111 has only one fixed colorspace per pixelcode */
struct mt9m111_datafmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
};
static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
- {V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
- {V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
- {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
- {V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG},
- {V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_BGR565_2X8_LE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_BGR565_2X8_BE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_BGR565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_BGR565_2X8_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
};
struct mt9m111 {
@@ -218,7 +218,7 @@ struct mt9m111 {
/* Find a data format by a pixel code */
static const struct mt9m111_datafmt *mt9m111_find_datafmt(struct mt9m111 *mt9m111,
- enum v4l2_mbus_pixelcode code)
+ u32 code)
{
int i;
for (i = 0; i < ARRAY_SIZE(mt9m111_colour_fmts); i++)
@@ -331,7 +331,7 @@ static int mt9m111_setup_rect_ctx(struct mt9m111 *mt9m111,
}
static int mt9m111_setup_geometry(struct mt9m111 *mt9m111, struct v4l2_rect *rect,
- int width, int height, enum v4l2_mbus_pixelcode code)
+ int width, int height, u32 code)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
@@ -345,7 +345,7 @@ static int mt9m111_setup_geometry(struct mt9m111 *mt9m111, struct v4l2_rect *rec
if (!ret)
ret = reg_write(WINDOW_HEIGHT, rect->height);
- if (code != V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
+ if (code != MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) {
/* IFP in use, down-scaling possible */
if (!ret)
ret = mt9m111_setup_rect_ctx(mt9m111, &context_b,
@@ -393,8 +393,8 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
- mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
+ if (mt9m111->fmt->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
+ mt9m111->fmt->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) {
/* Bayer format - even size lengths */
rect.width = ALIGN(rect.width, 2);
rect.height = ALIGN(rect.height, 2);
@@ -462,7 +462,7 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
}
static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
- enum v4l2_mbus_pixelcode code)
+ u32 code)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
u16 data_outfmt2, mask_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
@@ -474,46 +474,46 @@ static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
int ret;
switch (code) {
- case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
data_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
MT9M111_OUTFMT_RGB;
break;
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
data_outfmt2 = MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB;
break;
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555 |
MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
break;
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555;
break;
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
break;
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565;
break;
- case V4L2_MBUS_FMT_BGR565_2X8_BE:
+ case MEDIA_BUS_FMT_BGR565_2X8_BE:
data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
- case V4L2_MBUS_FMT_BGR565_2X8_LE:
+ case MEDIA_BUS_FMT_BGR565_2X8_LE:
data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
data_outfmt2 = 0;
break;
- case V4L2_MBUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
break;
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
@@ -542,8 +542,8 @@ static int mt9m111_try_fmt(struct v4l2_subdev *sd,
fmt = mt9m111_find_datafmt(mt9m111, mf->code);
- bayer = fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
- fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE;
+ bayer = fmt->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
+ fmt->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE;
/*
* With Bayer format enforce even side lengths, but let the user play
@@ -554,7 +554,7 @@ static int mt9m111_try_fmt(struct v4l2_subdev *sd,
rect->height = ALIGN(rect->height, 2);
}
- if (fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
+ if (fmt->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) {
/* IFP bypass mode, no scaling */
mf->width = rect->width;
mf->height = rect->height;
@@ -840,7 +840,7 @@ static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
};
static int mt9m111_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(mt9m111_colour_fmts))
return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index f8358c4071a9..35d9c8d25589 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -345,7 +345,7 @@ static int mt9t031_g_fmt(struct v4l2_subdev *sd,
mf->width = mt9t031->rect.width / mt9t031->xskip;
mf->height = mt9t031->rect.height / mt9t031->yskip;
- mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
+ mf->code = MEDIA_BUS_FMT_SBGGR10_1X10;
mf->colorspace = V4L2_COLORSPACE_SRGB;
mf->field = V4L2_FIELD_NONE;
@@ -367,7 +367,7 @@ static int mt9t031_s_fmt(struct v4l2_subdev *sd,
xskip = mt9t031_skip(&rect.width, mf->width, MT9T031_MAX_WIDTH);
yskip = mt9t031_skip(&rect.height, mf->height, MT9T031_MAX_HEIGHT);
- mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
+ mf->code = MEDIA_BUS_FMT_SBGGR10_1X10;
mf->colorspace = V4L2_COLORSPACE_SRGB;
/* mt9t031_set_params() doesn't change width and height */
@@ -385,7 +385,7 @@ static int mt9t031_try_fmt(struct v4l2_subdev *sd,
&mf->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
&mf->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
- mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
+ mf->code = MEDIA_BUS_FMT_SBGGR10_1X10;
mf->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
@@ -673,12 +673,12 @@ static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
};
static int mt9t031_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index)
return -EINVAL;
- *code = V4L2_MBUS_FMT_SBGGR10_1X10;
+ *code = MEDIA_BUS_FMT_SBGGR10_1X10;
return 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index 996d7b4007a5..64f08365e6b2 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -77,7 +77,7 @@
struct
************************************************************************/
struct mt9t112_format {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
u16 fmt;
u16 order;
@@ -103,32 +103,32 @@ struct mt9t112_priv {
static const struct mt9t112_format mt9t112_cfmts[] = {
{
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.fmt = 1,
.order = 0,
}, {
- .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.fmt = 1,
.order = 1,
}, {
- .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.fmt = 1,
.order = 2,
}, {
- .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.fmt = 1,
.order = 3,
}, {
- .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
.colorspace = V4L2_COLORSPACE_SRGB,
.fmt = 8,
.order = 2,
}, {
- .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.colorspace = V4L2_COLORSPACE_SRGB,
.fmt = 4,
.order = 2,
@@ -840,7 +840,7 @@ static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
static int mt9t112_set_params(struct mt9t112_priv *priv,
const struct v4l2_rect *rect,
- enum v4l2_mbus_pixelcode code)
+ u32 code)
{
int i;
@@ -953,7 +953,7 @@ static int mt9t112_try_fmt(struct v4l2_subdev *sd,
break;
if (i == priv->num_formats) {
- mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
mf->colorspace = V4L2_COLORSPACE_JPEG;
} else {
mf->colorspace = mt9t112_cfmts[i].colorspace;
@@ -967,7 +967,7 @@ static int mt9t112_try_fmt(struct v4l2_subdev *sd,
}
static int mt9t112_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
@@ -1101,7 +1101,7 @@ static int mt9t112_probe(struct i2c_client *client,
/* Cannot fail: using the default supported pixel code */
if (!ret)
- mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
+ mt9t112_set_params(priv, &rect, MEDIA_BUS_FMT_UYVY8_2X8);
else
v4l2_clk_put(priv->clk);
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index 99022c8d76eb..a246d4d64b8b 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -85,13 +85,13 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
/* MT9V022 has only one fixed colorspace per pixelcode */
struct mt9v022_datafmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
};
/* Find a data format by a pixel code in an array */
static const struct mt9v022_datafmt *mt9v022_find_datafmt(
- enum v4l2_mbus_pixelcode code, const struct mt9v022_datafmt *fmt,
+ u32 code, const struct mt9v022_datafmt *fmt,
int n)
{
int i;
@@ -107,14 +107,14 @@ static const struct mt9v022_datafmt mt9v022_colour_fmts[] = {
* Order important: first natively supported,
* second supported with a GPIO extender
*/
- {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
};
static const struct mt9v022_datafmt mt9v022_monochrome_fmts[] = {
/* Order important - see above */
- {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
- {V4L2_MBUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG},
};
/* only registers with different addresses on different mt9v02x sensors */
@@ -410,13 +410,13 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
* .try_mbus_fmt(), datawidth is from our supported format list
*/
switch (mf->code) {
- case V4L2_MBUS_FMT_Y8_1X8:
- case V4L2_MBUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_Y10_1X10:
if (mt9v022->model != MT9V022IX7ATM)
return -EINVAL;
break;
- case V4L2_MBUS_FMT_SBGGR8_1X8:
- case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
if (mt9v022->model != MT9V022IX7ATC)
return -EINVAL;
break;
@@ -443,8 +443,8 @@ static int mt9v022_try_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
const struct mt9v022_datafmt *fmt;
- int align = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
- mf->code == V4L2_MBUS_FMT_SBGGR10_1X10;
+ int align = mf->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_1X10;
v4l_bound_align_image(&mf->width, MT9V022_MIN_WIDTH,
MT9V022_MAX_WIDTH, align,
@@ -759,7 +759,7 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
};
static int mt9v022_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 6c6b1c3b45e3..6f2dd9093d94 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -302,7 +302,7 @@ struct ov2640_win_size {
struct ov2640_priv {
struct v4l2_subdev subdev;
struct v4l2_ctrl_handler hdl;
- enum v4l2_mbus_pixelcode cfmt_code;
+ u32 cfmt_code;
struct v4l2_clk *clk;
const struct ov2640_win_size *win;
};
@@ -623,11 +623,11 @@ static const struct regval_list ov2640_rgb565_le_regs[] = {
ENDMARKER,
};
-static enum v4l2_mbus_pixelcode ov2640_codes[] = {
- V4L2_MBUS_FMT_YUYV8_2X8,
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_RGB565_2X8_BE,
- V4L2_MBUS_FMT_RGB565_2X8_LE,
+static u32 ov2640_codes[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_RGB565_2X8_BE,
+ MEDIA_BUS_FMT_RGB565_2X8_LE,
};
/*
@@ -785,7 +785,7 @@ static const struct ov2640_win_size *ov2640_select_win(u32 *width, u32 *height)
}
static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height,
- enum v4l2_mbus_pixelcode code)
+ u32 code)
{
struct ov2640_priv *priv = to_ov2640(client);
const struct regval_list *selected_cfmt_regs;
@@ -797,20 +797,20 @@ static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height,
/* select format */
priv->cfmt_code = 0;
switch (code) {
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__);
selected_cfmt_regs = ov2640_rgb565_be_regs;
break;
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
dev_dbg(&client->dev, "%s: Selected cfmt RGB565 LE", __func__);
selected_cfmt_regs = ov2640_rgb565_le_regs;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
dev_dbg(&client->dev, "%s: Selected cfmt YUYV (YUV422)", __func__);
selected_cfmt_regs = ov2640_yuyv_regs;
break;
default:
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
dev_dbg(&client->dev, "%s: Selected cfmt UYVY", __func__);
selected_cfmt_regs = ov2640_uyvy_regs;
}
@@ -869,7 +869,7 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
if (!priv->win) {
u32 width = W_SVGA, height = H_SVGA;
priv->win = ov2640_select_win(&width, &height);
- priv->cfmt_code = V4L2_MBUS_FMT_UYVY8_2X8;
+ priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
}
mf->width = priv->win->width;
@@ -877,13 +877,13 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
mf->code = priv->cfmt_code;
switch (mf->code) {
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
mf->field = V4L2_FIELD_NONE;
@@ -899,14 +899,14 @@ static int ov2640_s_fmt(struct v4l2_subdev *sd,
switch (mf->code) {
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
- mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -926,14 +926,14 @@ static int ov2640_try_fmt(struct v4l2_subdev *sd,
mf->field = V4L2_FIELD_NONE;
switch (mf->code) {
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
- mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -941,7 +941,7 @@ static int ov2640_try_fmt(struct v4l2_subdev *sd,
}
static int ov2640_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(ov2640_codes))
return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index d2daa6a8f272..93ae031bdafb 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -602,7 +602,7 @@ static struct regval_list ov5642_default_regs_finalise[] = {
};
struct ov5642_datafmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
};
@@ -618,7 +618,7 @@ struct ov5642 {
};
static const struct ov5642_datafmt ov5642_colour_fmts[] = {
- {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
};
static struct ov5642 *to_ov5642(const struct i2c_client *client)
@@ -628,7 +628,7 @@ static struct ov5642 *to_ov5642(const struct i2c_client *client)
/* Find a data format by a pixel code in an array */
static const struct ov5642_datafmt
- *ov5642_find_datafmt(enum v4l2_mbus_pixelcode code)
+ *ov5642_find_datafmt(u32 code)
{
int i;
@@ -840,7 +840,7 @@ static int ov5642_g_fmt(struct v4l2_subdev *sd,
}
static int ov5642_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(ov5642_colour_fmts))
return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index ab01598ec83f..f4eef2fa6f6f 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -202,18 +202,18 @@ struct ov6650 {
unsigned long pclk_limit; /* from host */
unsigned long pclk_max; /* from resolution and format */
struct v4l2_fract tpf; /* as requested with s_parm */
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
};
-static enum v4l2_mbus_pixelcode ov6650_codes[] = {
- V4L2_MBUS_FMT_YUYV8_2X8,
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_YVYU8_2X8,
- V4L2_MBUS_FMT_VYUY8_2X8,
- V4L2_MBUS_FMT_SBGGR8_1X8,
- V4L2_MBUS_FMT_Y8_1X8,
+static u32 ov6650_codes[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_Y8_1X8,
};
/* read a register */
@@ -555,29 +555,29 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
.height = mf->height << half_scale,
},
};
- enum v4l2_mbus_pixelcode code = mf->code;
+ u32 code = mf->code;
unsigned long mclk, pclk;
u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc;
int ret;
/* select color matrix configuration for given color encoding */
switch (code) {
- case V4L2_MBUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
dev_dbg(&client->dev, "pixel format GREY8_1X8\n");
coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP;
coma_set |= COMA_BW;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n");
coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP;
coma_set |= COMA_WORD_SWAP;
break;
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n");
coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP |
COMA_BYTE_SWAP;
break;
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n");
if (half_scale) {
coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
@@ -587,7 +587,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
}
break;
- case V4L2_MBUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n");
if (half_scale) {
coma_mask |= COMA_RGB | COMA_BW;
@@ -597,7 +597,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
coma_set |= COMA_BYTE_SWAP;
}
break;
- case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n");
coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP;
coma_set |= COMA_RAW_RGB | COMA_RGB;
@@ -608,8 +608,8 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
}
priv->code = code;
- if (code == V4L2_MBUS_FMT_Y8_1X8 ||
- code == V4L2_MBUS_FMT_SBGGR8_1X8) {
+ if (code == MEDIA_BUS_FMT_Y8_1X8 ||
+ code == MEDIA_BUS_FMT_SBGGR8_1X8) {
coml_mask = COML_ONE_CHANNEL;
coml_set = 0;
priv->pclk_max = 4000000;
@@ -619,7 +619,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
priv->pclk_max = 8000000;
}
- if (code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ if (code == MEDIA_BUS_FMT_SBGGR8_1X8)
priv->colorspace = V4L2_COLORSPACE_SRGB;
else if (code != 0)
priv->colorspace = V4L2_COLORSPACE_JPEG;
@@ -697,18 +697,18 @@ static int ov6650_try_fmt(struct v4l2_subdev *sd,
mf->field = V4L2_FIELD_NONE;
switch (mf->code) {
- case V4L2_MBUS_FMT_Y10_1X10:
- mf->code = V4L2_MBUS_FMT_Y8_1X8;
- case V4L2_MBUS_FMT_Y8_1X8:
- case V4L2_MBUS_FMT_YVYU8_2X8:
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_VYUY8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_Y10_1X10:
+ mf->code = MEDIA_BUS_FMT_Y8_1X8;
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
break;
default:
- mf->code = V4L2_MBUS_FMT_SBGGR8_1X8;
- case V4L2_MBUS_FMT_SBGGR8_1X8:
+ mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
@@ -717,7 +717,7 @@ static int ov6650_try_fmt(struct v4l2_subdev *sd,
}
static int ov6650_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(ov6650_codes))
return -EINVAL;
@@ -1013,7 +1013,7 @@ static int ov6650_probe(struct i2c_client *client,
priv->rect.width = W_CIF;
priv->rect.height = H_CIF;
priv->half_scale = false;
- priv->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
priv->colorspace = V4L2_COLORSPACE_JPEG;
priv->clk = v4l2_clk_get(&client->dev, "mclk");
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 970a04e1e56e..8daac88b33fe 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -376,7 +376,7 @@
*/
struct ov772x_color_format {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
u8 dsp3;
u8 dsp4;
@@ -408,7 +408,7 @@ struct ov772x_priv {
*/
static const struct ov772x_color_format ov772x_cfmts[] = {
{
- .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.dsp3 = 0x0,
.dsp4 = DSP_OFMT_YUV,
@@ -416,7 +416,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
.com7 = OFMT_YUV,
},
{
- .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.dsp3 = UV_ON,
.dsp4 = DSP_OFMT_YUV,
@@ -424,7 +424,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
.com7 = OFMT_YUV,
},
{
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.dsp3 = 0x0,
.dsp4 = DSP_OFMT_YUV,
@@ -432,7 +432,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
.com7 = OFMT_YUV,
},
{
- .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
.colorspace = V4L2_COLORSPACE_SRGB,
.dsp3 = 0x0,
.dsp4 = DSP_OFMT_YUV,
@@ -440,7 +440,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
.com7 = FMT_RGB555 | OFMT_RGB,
},
{
- .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
.colorspace = V4L2_COLORSPACE_SRGB,
.dsp3 = 0x0,
.dsp4 = DSP_OFMT_YUV,
@@ -448,7 +448,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
.com7 = FMT_RGB555 | OFMT_RGB,
},
{
- .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.colorspace = V4L2_COLORSPACE_SRGB,
.dsp3 = 0x0,
.dsp4 = DSP_OFMT_YUV,
@@ -456,7 +456,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
.com7 = FMT_RGB565 | OFMT_RGB,
},
{
- .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
.colorspace = V4L2_COLORSPACE_SRGB,
.dsp3 = 0x0,
.dsp4 = DSP_OFMT_YUV,
@@ -468,7 +468,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
* regardless of the COM7 value. We can thus only support 10-bit
* Bayer until someone figures it out.
*/
- .code = V4L2_MBUS_FMT_SBGGR10_1X10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
.colorspace = V4L2_COLORSPACE_SRGB,
.dsp3 = 0x0,
.dsp4 = DSP_OFMT_RAW10,
@@ -990,7 +990,7 @@ static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
};
static int ov772x_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(ov772x_cfmts))
return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index bc74224503e7..aa93d2e88572 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -159,10 +159,10 @@ static const struct ov9640_reg ov9640_regs_rgb[] = {
{ OV9640_MTXS, 0x65 },
};
-static enum v4l2_mbus_pixelcode ov9640_codes[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
- V4L2_MBUS_FMT_RGB565_2X8_LE,
+static u32 ov9640_codes[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ MEDIA_BUS_FMT_RGB565_2X8_LE,
};
/* read a register */
@@ -351,22 +351,22 @@ static void ov9640_res_roundup(u32 *width, u32 *height)
}
/* Prepare necessary register changes depending on color encoding */
-static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code,
+static void ov9640_alter_regs(u32 code,
struct ov9640_reg_alt *alt)
{
switch (code) {
default:
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
alt->com12 = OV9640_COM12_YUV_AVG;
alt->com13 = OV9640_COM13_Y_DELAY_EN |
OV9640_COM13_YUV_DLY(0x01);
break;
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
alt->com7 = OV9640_COM7_RGB;
alt->com13 = OV9640_COM13_RGB_AVG;
alt->com15 = OV9640_COM15_RGB_555;
break;
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
alt->com7 = OV9640_COM7_RGB;
alt->com13 = OV9640_COM13_RGB_AVG;
alt->com15 = OV9640_COM15_RGB_565;
@@ -376,7 +376,7 @@ static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code,
/* Setup registers according to resolution and color encoding */
static int ov9640_write_regs(struct i2c_client *client, u32 width,
- enum v4l2_mbus_pixelcode code, struct ov9640_reg_alt *alts)
+ u32 code, struct ov9640_reg_alt *alts)
{
const struct ov9640_reg *ov9640_regs, *matrix_regs;
int ov9640_regs_len, matrix_regs_len;
@@ -419,7 +419,7 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width,
}
/* select color matrix configuration for given color encoding */
- if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ if (code == MEDIA_BUS_FMT_UYVY8_2X8) {
matrix_regs = ov9640_regs_yuv;
matrix_regs_len = ARRAY_SIZE(ov9640_regs_yuv);
} else {
@@ -487,7 +487,7 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_reg_alt alts = {0};
enum v4l2_colorspace cspace;
- enum v4l2_mbus_pixelcode code = mf->code;
+ u32 code = mf->code;
int ret;
ov9640_res_roundup(&mf->width, &mf->height);
@@ -500,13 +500,13 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd,
return ret;
switch (code) {
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
cspace = V4L2_COLORSPACE_SRGB;
break;
default:
- code = V4L2_MBUS_FMT_UYVY8_2X8;
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ code = MEDIA_BUS_FMT_UYVY8_2X8;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
cspace = V4L2_COLORSPACE_JPEG;
}
@@ -527,13 +527,13 @@ static int ov9640_try_fmt(struct v4l2_subdev *sd,
mf->field = V4L2_FIELD_NONE;
switch (mf->code) {
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
- mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -541,7 +541,7 @@ static int ov9640_try_fmt(struct v4l2_subdev *sd,
}
static int ov9640_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(ov9640_codes))
return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index ee9eb635d540..841dc55457cf 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -392,8 +392,8 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_ISP_CTRL19, 0x02 },
};
-static enum v4l2_mbus_pixelcode ov9740_codes[] = {
- V4L2_MBUS_FMT_YUYV8_2X8,
+static u32 ov9740_codes[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
};
/* read a register */
@@ -674,13 +674,13 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9740_priv *priv = to_ov9740(sd);
enum v4l2_colorspace cspace;
- enum v4l2_mbus_pixelcode code = mf->code;
+ u32 code = mf->code;
int ret;
ov9740_res_roundup(&mf->width, &mf->height);
switch (code) {
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
cspace = V4L2_COLORSPACE_SRGB;
break;
default:
@@ -710,14 +710,14 @@ static int ov9740_try_fmt(struct v4l2_subdev *sd,
ov9740_res_roundup(&mf->width, &mf->height);
mf->field = V4L2_FIELD_NONE;
- mf->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ mf->code = MEDIA_BUS_FMT_YUYV8_2X8;
mf->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
}
static int ov9740_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(ov9740_codes))
return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/rj54n1cb0c.c b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
index 7e6d97847874..1752428c43c5 100644
--- a/drivers/media/i2c/soc_camera/rj54n1cb0c.c
+++ b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
@@ -111,13 +111,13 @@
/* RJ54N1CB0C has only one fixed colorspace per pixelcode */
struct rj54n1_datafmt {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
};
/* Find a data format by a pixel code in an array */
static const struct rj54n1_datafmt *rj54n1_find_datafmt(
- enum v4l2_mbus_pixelcode code, const struct rj54n1_datafmt *fmt,
+ u32 code, const struct rj54n1_datafmt *fmt,
int n)
{
int i;
@@ -129,15 +129,15 @@ static const struct rj54n1_datafmt *rj54n1_find_datafmt(
}
static const struct rj54n1_datafmt rj54n1_colour_fmts[] = {
- {V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
- {V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
- {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
- {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
};
struct rj54n1_clock_div {
@@ -486,7 +486,7 @@ static int reg_write_multiple(struct i2c_client *client,
}
static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(rj54n1_colour_fmts))
return -EINVAL;
@@ -965,11 +965,11 @@ static int rj54n1_try_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
- int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 ||
- mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE ||
- mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE ||
- mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE ||
- mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE;
+ int align = mf->code == MEDIA_BUS_FMT_SBGGR10_1X10 ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE;
dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
__func__, mf->code, mf->width, mf->height);
@@ -1025,55 +1025,55 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
/* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */
switch (mf->code) {
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
ret = reg_write(client, RJ54N1_OUT_SEL, 0);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
break;
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
ret = reg_write(client, RJ54N1_OUT_SEL, 0);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
break;
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
break;
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
break;
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
ret = reg_write(client, RJ54N1_OUT_SEL, 4);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
if (!ret)
ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
break;
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
ret = reg_write(client, RJ54N1_OUT_SEL, 4);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
if (!ret)
ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
break;
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
ret = reg_write(client, RJ54N1_OUT_SEL, 4);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
if (!ret)
ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
break;
- case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
ret = reg_write(client, RJ54N1_OUT_SEL, 4);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
if (!ret)
ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
break;
- case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
ret = reg_write(client, RJ54N1_OUT_SEL, 5);
break;
default:
@@ -1083,7 +1083,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
/* Special case: a raw mode with 10 bits of data per clock tick */
if (!ret)
ret = reg_set(client, RJ54N1_OCLK_SEL_EN,
- (mf->code == V4L2_MBUS_FMT_SBGGR10_1X10) << 1, 2);
+ (mf->code == MEDIA_BUS_FMT_SBGGR10_1X10) << 1, 2);
if (ret < 0)
return ret;
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 416402eb4f82..9b853215d146 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -705,7 +705,7 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd,
mf->width = priv->scale->width;
mf->height = priv->scale->height;
- mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
mf->colorspace = V4L2_COLORSPACE_JPEG;
mf->field = V4L2_FIELD_INTERLACED_BT;
@@ -724,7 +724,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
/*
* check color format
*/
- if (mf->code != V4L2_MBUS_FMT_UYVY8_2X8)
+ if (mf->code != MEDIA_BUS_FMT_UYVY8_2X8)
return -EINVAL;
mf->colorspace = V4L2_COLORSPACE_JPEG;
@@ -751,7 +751,7 @@ static int tw9910_try_fmt(struct v4l2_subdev *sd,
return -EINVAL;
}
- mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
mf->colorspace = V4L2_COLORSPACE_JPEG;
/*
@@ -822,12 +822,12 @@ static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
};
static int tw9910_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index)
return -EINVAL;
- *code = V4L2_MBUS_FMT_UYVY8_2X8;
+ *code = MEDIA_BUS_FMT_UYVY8_2X8;
return 0;
}
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index 118f8ee88465..10c735c3a082 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -165,7 +165,7 @@ struct sr030pc30_info {
};
struct sr030pc30_format {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
enum v4l2_colorspace colorspace;
u16 ispctl1_reg;
};
@@ -201,23 +201,23 @@ static const struct sr030pc30_frmsize sr030pc30_sizes[] = {
/* supported pixel formats */
static const struct sr030pc30_format sr030pc30_formats[] = {
{
- .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x03,
}, {
- .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x02,
}, {
- .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0,
}, {
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x01,
}, {
- .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x40,
},
@@ -472,7 +472,7 @@ static int sr030pc30_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (!code || index >= ARRAY_SIZE(sr030pc30_formats))
return -EINVAL;
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index b9dabc9f4050..204204259ac6 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -756,12 +756,12 @@ static int tvp514x_s_ctrl(struct v4l2_ctrl *ctrl)
*/
static int
tvp514x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index)
return -EINVAL;
- *code = V4L2_MBUS_FMT_YUYV10_2X10;
+ *code = MEDIA_BUS_FMT_YUYV10_2X10;
return 0;
}
@@ -784,7 +784,7 @@ tvp514x_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f)
/* Calculate height and width based on current standard */
current_std = decoder->current_std;
- f->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ f->code = MEDIA_BUS_FMT_YUYV8_2X8;
f->width = decoder->std_list[current_std].width;
f->height = decoder->std_list[current_std].height;
f->field = V4L2_FIELD_INTERLACED;
@@ -942,7 +942,7 @@ static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
if (index != 0)
return -EINVAL;
- code->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ code->code = MEDIA_BUS_FMT_YUYV8_2X8;
return 0;
}
@@ -967,7 +967,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
return 0;
}
- format->format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
format->format.width = tvp514x_std_list[decoder->current_std].width;
format->format.height = tvp514x_std_list[decoder->current_std].height;
format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -991,7 +991,7 @@ static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
struct tvp514x_decoder *decoder = to_decoder(sd);
if (fmt->format.field != V4L2_FIELD_INTERLACED ||
- fmt->format.code != V4L2_MBUS_FMT_YUYV8_2X8 ||
+ fmt->format.code != MEDIA_BUS_FMT_YUYV8_2X8 ||
fmt->format.colorspace != V4L2_COLORSPACE_SMPTE170M ||
fmt->format.width != tvp514x_std_list[decoder->current_std].width ||
fmt->format.height != tvp514x_std_list[decoder->current_std].height)
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 193e7d6c29c8..68cdab9c0903 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -818,12 +818,12 @@ static v4l2_std_id tvp5150_read_std(struct v4l2_subdev *sd)
}
static int tvp5150_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index)
return -EINVAL;
- *code = V4L2_MBUS_FMT_UYVY8_2X8;
+ *code = MEDIA_BUS_FMT_UYVY8_2X8;
return 0;
}
@@ -840,7 +840,7 @@ static int tvp5150_mbus_fmt(struct v4l2_subdev *sd,
f->width = decoder->rect.width;
f->height = decoder->rect.height;
- f->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ f->code = MEDIA_BUS_FMT_UYVY8_2X8;
f->field = V4L2_FIELD_SEQ_TB;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 51bac762638b..fe4870e22cfe 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -626,7 +626,7 @@ static int tvp7002_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f
f->width = bt->width;
f->height = bt->height;
- f->code = V4L2_MBUS_FMT_YUYV10_1X20;
+ f->code = MEDIA_BUS_FMT_YUYV10_1X20;
f->field = device->current_timings->scanmode;
f->colorspace = device->current_timings->color_space;
@@ -756,12 +756,12 @@ static int tvp7002_s_register(struct v4l2_subdev *sd,
*/
static int tvp7002_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
/* Check requested format index is within range */
if (index)
return -EINVAL;
- *code = V4L2_MBUS_FMT_YUYV10_1X20;
+ *code = MEDIA_BUS_FMT_YUYV10_1X20;
return 0;
}
@@ -859,7 +859,7 @@ tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (code->index != 0)
return -EINVAL;
- code->code = V4L2_MBUS_FMT_YUYV10_1X20;
+ code->code = MEDIA_BUS_FMT_YUYV10_1X20;
return 0;
}
@@ -878,7 +878,7 @@ tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
{
struct tvp7002 *tvp7002 = to_tvp7002(sd);
- fmt->format.code = V4L2_MBUS_FMT_YUYV10_1X20;
+ fmt->format.code = MEDIA_BUS_FMT_YUYV10_1X20;
fmt->format.width = tvp7002->current_timings->timings.bt.width;
fmt->format.height = tvp7002->current_timings->timings.bt.height;
fmt->format.field = tvp7002->current_timings->scanmode;
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 373f2df52492..00e7f043977e 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -45,19 +45,19 @@ struct vs6624 {
};
static const struct vs6624_format {
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
enum v4l2_colorspace colorspace;
} vs6624_formats[] = {
{
- .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
},
{
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
},
{
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.colorspace = V4L2_COLORSPACE_SRGB,
},
};
@@ -65,7 +65,7 @@ static const struct vs6624_format {
static struct v4l2_mbus_framefmt vs6624_default_fmt = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_JPEG,
};
@@ -558,7 +558,7 @@ static int vs6624_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int vs6624_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
if (index >= ARRAY_SIZE(vs6624_formats))
return -EINVAL;
@@ -605,15 +605,15 @@ static int vs6624_s_mbus_fmt(struct v4l2_subdev *sd,
/* set image format */
switch (fmt->code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
vs6624_write(sd, VS6624_IMG_FMT0, 0x0);
vs6624_write(sd, VS6624_YUV_SETUP, 0x1);
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
vs6624_write(sd, VS6624_IMG_FMT0, 0x0);
vs6624_write(sd, VS6624_YUV_SETUP, 0x3);
break;
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
vs6624_write(sd, VS6624_IMG_FMT0, 0x4);
vs6624_write(sd, VS6624_RGB_SETUP, 0x0);
break;
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 37c334edc7e8..4d8e01c7b1b2 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -279,8 +279,14 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
continue;
ret = entity->ops->link_validate(link);
- if (ret < 0 && ret != -ENOIOCTLCMD)
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(entity->parent->dev,
+ "link validation failed for \"%s\":%u -> \"%s\":%u, error %d\n",
+ entity->name, link->source->index,
+ link->sink->entity->name,
+ link->sink->index, ret);
goto error;
+ }
}
/* Either no links or validated links are fine. */
@@ -288,6 +294,11 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
if (!bitmap_full(active, entity->num_pads)) {
ret = -EPIPE;
+ dev_dbg(entity->parent->dev,
+ "\"%s\":%u must be connected by an enabled link\n",
+ entity->name,
+ (unsigned)find_first_zero_bit(
+ active, entity->num_pads));
goto error;
}
}
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index f8cec8e8cf82..218144a99016 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -46,6 +46,7 @@ source "drivers/media/pci/pt3/Kconfig"
source "drivers/media/pci/mantis/Kconfig"
source "drivers/media/pci/ngene/Kconfig"
source "drivers/media/pci/ddbridge/Kconfig"
+source "drivers/media/pci/smipcie/Kconfig"
endif
endif #MEDIA_PCI_SUPPORT
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index a12926e4b51f..0baf0d2967ee 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -11,7 +11,8 @@ obj-y += ttpci/ \
mantis/ \
ngene/ \
ddbridge/ \
- saa7146/
+ saa7146/ \
+ smipcie/
obj-$(CONFIG_VIDEO_IVTV) += ivtv/
obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d8ec583c154c..41055606b969 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -3870,10 +3870,10 @@ static void osprey_eeprom(struct bttv *btv, const u8 ee[256])
} else {
unsigned short type;
- for (i = 4*16; i < 8*16; i += 16) {
- u16 checksum = ip_compute_csum(ee + i, 16);
+ for (i = 4 * 16; i < 8 * 16; i += 16) {
+ u16 checksum = (__force u16)ip_compute_csum(ee + i, 16);
- if ((checksum&0xff) + (checksum>>8) == 0xff)
+ if ((checksum & 0xff) + (checksum >> 8) == 0xff)
break;
}
if (i >= 8*16)
diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c
index 82cc47d2e3fa..4d3f05a19af3 100644
--- a/drivers/media/pci/bt8xx/bttv-risc.c
+++ b/drivers/media/pci/bt8xx/bttv-risc.c
@@ -84,7 +84,7 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
continue;
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
if (bpl <= sg_dma_len(sg)-offset) {
/* fits into current chunk */
@@ -100,13 +100,13 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
todo -= (sg_dma_len(sg)-offset);
offset = 0;
- sg++;
+ sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++)=cpu_to_le32(BT848_RISC_WRITE|
sg_dma_len(sg));
*(rp++)=cpu_to_le32(sg_dma_address(sg));
todo -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
*(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_EOL|
todo);
@@ -187,15 +187,15 @@ bttv_risc_planar(struct bttv *btv, struct btcx_riscmem *risc,
/* go to next sg entry if needed */
while (yoffset && yoffset >= sg_dma_len(ysg)) {
yoffset -= sg_dma_len(ysg);
- ysg++;
+ ysg = sg_next(ysg);
}
while (uoffset && uoffset >= sg_dma_len(usg)) {
uoffset -= sg_dma_len(usg);
- usg++;
+ usg = sg_next(usg);
}
while (voffset && voffset >= sg_dma_len(vsg)) {
voffset -= sg_dma_len(vsg);
- vsg++;
+ vsg = sg_next(vsg);
}
/* calculate max number of bytes we can write */
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index 2d3afe0431a9..5a55630d09db 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -490,8 +490,8 @@ void cx18_av_std_setup(struct cx18 *cx)
/* Sets horizontal blanking delay and active lines */
cx18_av_write(cx, 0x470, hblank);
- cx18_av_write(cx, 0x471, 0xff & (((hblank >> 8) & 0x3) |
- (hactive << 4)));
+ cx18_av_write(cx, 0x471,
+ (((hblank >> 8) & 0x3) | (hactive << 4)) & 0xff);
cx18_av_write(cx, 0x472, hactive >> 4);
/* Sets burst gate delay */
@@ -499,14 +499,14 @@ void cx18_av_std_setup(struct cx18 *cx)
/* Sets vertical blanking delay and active duration */
cx18_av_write(cx, 0x474, vblank);
- cx18_av_write(cx, 0x475, 0xff & (((vblank >> 8) & 0x3) |
- (vactive << 4)));
+ cx18_av_write(cx, 0x475,
+ (((vblank >> 8) & 0x3) | (vactive << 4)) & 0xff);
cx18_av_write(cx, 0x476, vactive >> 4);
cx18_av_write(cx, 0x477, vblank656);
/* Sets src decimation rate */
- cx18_av_write(cx, 0x478, 0xff & src_decimation);
- cx18_av_write(cx, 0x479, 0xff & (src_decimation >> 8));
+ cx18_av_write(cx, 0x478, src_decimation & 0xff);
+ cx18_av_write(cx, 0x479, (src_decimation >> 8) & 0xff);
/* Sets Luma and UV Low pass filters */
cx18_av_write(cx, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30));
@@ -516,8 +516,8 @@ void cx18_av_std_setup(struct cx18 *cx)
/* Sets SC Step*/
cx18_av_write(cx, 0x47c, sc);
- cx18_av_write(cx, 0x47d, 0xff & sc >> 8);
- cx18_av_write(cx, 0x47e, 0xff & sc >> 16);
+ cx18_av_write(cx, 0x47d, (sc >> 8) & 0xff);
+ cx18_av_write(cx, 0x47e, (sc >> 16) & 0xff);
if (std & V4L2_STD_625_50) {
state->slicer_line_delay = 1;
@@ -952,7 +952,7 @@ static int cx18_av_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
int is_50Hz = !(state->std & V4L2_STD_525_60);
- if (fmt->code != V4L2_MBUS_FMT_FIXED)
+ if (fmt->code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
fmt->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/pci/cx18/cx18-cards.h b/drivers/media/pci/cx18/cx18-cards.h
index add7391ecaba..f6b921f3b0ac 100644
--- a/drivers/media/pci/cx18/cx18-cards.h
+++ b/drivers/media/pci/cx18/cx18-cards.h
@@ -57,7 +57,8 @@
/* V4L2 capability aliases */
#define CX18_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | \
- V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE)
+ V4L2_CAP_STREAMING | V4L2_CAP_VBI_CAPTURE | \
+ V4L2_CAP_SLICED_VBI_CAPTURE)
struct cx18_card_video_input {
u8 video_type; /* video input type */
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index 282a3d29fdaa..4aeb7c6b8ce1 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -98,7 +98,7 @@ static int cx18_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
/* fix videodecoder resolution */
fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
fmt.height = cxhdl->height;
- fmt.code = V4L2_MBUS_FMT_FIXED;
+ fmt.code = MEDIA_BUS_FMT_FIXED;
v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &fmt);
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 57f4688ea55b..207d6e82403b 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -290,7 +290,7 @@ struct cx18_options {
* list_entry_is_past_end - check if a previous loop cursor is off list end
* @pos: the type * previously used as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Check if the entry's list_head is the head of the list, thus it's not a
* real entry but was the loop cursor that walked past the end
@@ -379,6 +379,7 @@ struct cx18_stream {
const char *name; /* name of the stream */
int type; /* stream type */
u32 handle; /* task handle */
+ u32 v4l2_dev_caps; /* device capabilities */
unsigned int mdl_base_idx;
u32 id;
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 6f2b59042b73..b8e4b68a9196 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -294,7 +294,7 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
mbus_fmt.width = cx->cxhdl.width = w;
mbus_fmt.height = cx->cxhdl.height = h;
- mbus_fmt.code = V4L2_MBUS_FMT_FIXED;
+ mbus_fmt.code = MEDIA_BUS_FMT_FIXED;
v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &mbus_fmt);
return cx18_g_fmt_vid_cap(file, fh, fmt);
}
@@ -393,15 +393,16 @@ static int cx18_querycap(struct file *file, void *fh,
struct v4l2_capability *vcap)
{
struct cx18_open_id *id = fh2id(fh);
+ struct cx18_stream *s = video_drvdata(file);
struct cx18 *cx = id->cx;
strlcpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
strlcpy(vcap->card, cx->card_name, sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info),
"PCI:%s", pci_name(cx->pci_dev));
- vcap->capabilities = cx->v4l2_cap; /* capabilities */
- if (id->type == CX18_ENC_STREAM_TYPE_YUV)
- vcap->capabilities |= V4L2_CAP_STREAMING;
+ vcap->capabilities = cx->v4l2_cap; /* capabilities */
+ vcap->device_caps = s->v4l2_dev_caps; /* device capabilities */
+ vcap->capabilities |= V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index f3541b5156ce..369445fcf3e5 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -58,11 +58,14 @@ static struct {
int vfl_type;
int num_offset;
int dma;
+ u32 caps;
} cx18_stream_info[] = {
{ /* CX18_ENC_STREAM_TYPE_MPG */
"encoder MPEG",
VFL_TYPE_GRABBER, 0,
PCI_DMA_FROMDEVICE,
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_TS */
"TS",
@@ -73,11 +76,15 @@ static struct {
"encoder YUV",
VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET,
PCI_DMA_FROMDEVICE,
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
PCI_DMA_FROMDEVICE,
+ V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_PCM */
"encoder PCM audio",
@@ -93,6 +100,7 @@ static struct {
"encoder radio",
VFL_TYPE_RADIO, 0,
PCI_DMA_NONE,
+ V4L2_CAP_RADIO | V4L2_CAP_TUNER
},
};
@@ -260,6 +268,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
s->handle = CX18_INVALID_TASK_HANDLE;
s->dma = cx18_stream_info[type].dma;
+ s->v4l2_dev_caps = cx18_stream_info[type].caps;
s->buffers = cx->stream_buffers[type];
s->buf_size = cx->stream_buf_size[type];
INIT_LIST_HEAD(&s->buf_pool);
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 3948db386fb5..e4901a503c73 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1148,6 +1148,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
dev->ts1.ts_packet_count = mpeglines;
*num_planes = 1;
sizes[0] = mpeglinesize * mpeglines;
+ alloc_ctxs[0] = dev->alloc_ctx;
*num_buffers = mpegbufs;
return 0;
}
@@ -1166,11 +1167,8 @@ static void buffer_finish(struct vb2_buffer *vb)
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
- struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
cx23885_free_buffer(dev, buf);
-
- dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
static void buffer_queue(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 88c257d1161b..db99ca2613ba 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -680,6 +680,32 @@ struct cx23885_board cx23885_boards[] = {
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_DVBSKY_T980C] = {
+ .name = "DVBSky T980C",
+ .portb = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_DVBSKY_S950C] = {
+ .name = "DVBSky S950C",
+ .portb = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_TT_CT2_4500_CI] = {
+ .name = "Technotrend TT-budget CT2-4500 CI",
+ .portb = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_DVBSKY_S950] = {
+ .name = "DVBSky S950",
+ .portb = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_DVBSKY_S952] = {
+ .name = "DVBSky S952",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_DVBSKY_T982] = {
+ .name = "DVBSky T982",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -939,6 +965,30 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x4254,
.subdevice = 0x9580,
.card = CX23885_BOARD_DVBSKY_T9580,
+ }, {
+ .subvendor = 0x4254,
+ .subdevice = 0x980c,
+ .card = CX23885_BOARD_DVBSKY_T980C,
+ }, {
+ .subvendor = 0x4254,
+ .subdevice = 0x950c,
+ .card = CX23885_BOARD_DVBSKY_S950C,
+ }, {
+ .subvendor = 0x13c2,
+ .subdevice = 0x3013,
+ .card = CX23885_BOARD_TT_CT2_4500_CI,
+ }, {
+ .subvendor = 0x4254,
+ .subdevice = 0x0950,
+ .card = CX23885_BOARD_DVBSKY_S950,
+ }, {
+ .subvendor = 0x4254,
+ .subdevice = 0x0952,
+ .card = CX23885_BOARD_DVBSKY_S952,
+ }, {
+ .subvendor = 0x4254,
+ .subdevice = 0x0982,
+ .card = CX23885_BOARD_DVBSKY_T982,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1534,6 +1584,8 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
mdelay(60);
break;
case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982:
/* enable GPIO3-18 pins */
cx_write(MC417_CTL, 0x00000037);
cx23885_gpio_enable(dev, GPIO_2 | GPIO_11, 1);
@@ -1541,6 +1593,45 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
mdelay(100);
cx23885_gpio_set(dev, GPIO_2 | GPIO_11);
break;
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ /*
+ * GPIO-0 INTA from CiMax, input
+ * GPIO-1 reset CiMax, output, high active
+ * GPIO-2 reset demod, output, low active
+ * GPIO-3 to GPIO-10 data/addr for CAM
+ * GPIO-11 ~CS0 to CiMax1
+ * GPIO-12 ~CS1 to CiMax2
+ * GPIO-13 ADL0 load LSB addr
+ * GPIO-14 ADL1 load MSB addr
+ * GPIO-15 ~RDY from CiMax
+ * GPIO-17 ~RD to CiMax
+ * GPIO-18 ~WR to CiMax
+ */
+
+ cx_set(GP0_IO, 0x00060002); /* GPIO 1/2 as output */
+ cx_clear(GP0_IO, 0x00010004); /* GPIO 0 as input */
+ mdelay(100); /* reset delay */
+ cx_set(GP0_IO, 0x00060004); /* GPIO as out, reset high */
+ cx_clear(GP0_IO, 0x00010002);
+ cx_write(MC417_CTL, 0x00000037); /* enable GPIO3-18 pins */
+
+ /* GPIO-15 IN as ~ACK, rest as OUT */
+ cx_write(MC417_OEN, 0x00001000);
+
+ /* ~RD, ~WR high; ADL0, ADL1 low; ~CS0, ~CS1 high */
+ cx_write(MC417_RWD, 0x0000c300);
+
+ /* enable irq */
+ cx_write(GPIO_ISM, 0x00000000); /* INTERRUPTS active low */
+ break;
+ case CX23885_BOARD_DVBSKY_S950:
+ cx23885_gpio_enable(dev, GPIO_2, 1);
+ cx23885_gpio_clear(dev, GPIO_2);
+ msleep(100);
+ cx23885_gpio_set(dev, GPIO_2);
+ break;
}
}
@@ -1621,6 +1712,13 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TBS_6980:
case CX23885_BOARD_TBS_6981:
+ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
@@ -1667,6 +1765,13 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TBS_6980:
case CX23885_BOARD_TBS_6981:
+ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982:
cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
/* sd_ir is a duplicate pointer to the AV Core, just clear it */
dev->sd_ir = NULL;
@@ -1714,6 +1819,13 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TBS_6980:
case CX23885_BOARD_TBS_6981:
+ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
break;
@@ -1817,6 +1929,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_TEVII_S471:
case CX23885_BOARD_DVBWORLD_2005:
case CX23885_BOARD_PROF_8000:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ case CX23885_BOARD_DVBSKY_S950:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
@@ -1865,6 +1981,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_T982:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
@@ -1872,6 +1989,14 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_DVBSKY_S952:
+ ts1->gen_ctrl_val = 0x5; /* Parallel */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ ts2->gen_ctrl_val = 0xe; /* Serial bus */
+ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
@@ -1935,6 +2060,12 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_TBS_6980:
case CX23885_BOARD_TBS_6981:
case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 3bd386c371f7..1d9d0f86ca8c 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1453,17 +1453,12 @@ int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
struct cx23885_dev *dev = port->dev;
int size = port->ts_packet_size * port->ts_packet_count;
struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
- int rc;
dprintk(1, "%s: %p\n", __func__, buf);
if (vb2_plane_size(&buf->vb, 0) < size)
return -EINVAL;
vb2_set_plane_payload(&buf->vb, 0, size);
- rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
- if (!rc)
- return -EIO;
-
cx23885_risc_databuffer(dev->pci, &buf->risc,
sgt->sgl,
port->ts_packet_size, port->ts_packet_count, 0);
@@ -1997,9 +1992,14 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
if (!pci_dma_supported(pci_dev, 0xffffffff)) {
printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
err = -EIO;
- goto fail_irq;
+ goto fail_context;
}
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail_context;
+ }
err = request_irq(pci_dev->irq, cx23885_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
@@ -2028,6 +2028,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
return 0;
fail_irq:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
+fail_context:
cx23885_dev_unregister(dev);
fail_ctrl:
v4l2_ctrl_handler_free(hdl);
@@ -2053,6 +2055,7 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
free_irq(pci_dev->irq, dev);
cx23885_dev_unregister(dev);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 4cb90317ff45..c47d18270cfc 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -71,6 +71,7 @@
#include "si2165.h"
#include "si2168.h"
#include "si2157.h"
+#include "sp2.h"
#include "m88ds3103.h"
#include "m88ts2022.h"
@@ -101,6 +102,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
port->ts_packet_count = 32;
*num_planes = 1;
sizes[0] = port->ts_packet_size * port->ts_packet_count;
+ alloc_ctxs[0] = port->dev->alloc_ctx;
*num_buffers = 32;
return 0;
}
@@ -121,11 +123,8 @@ static void buffer_finish(struct vb2_buffer *vb)
struct cx23885_dev *dev = port->dev;
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
- struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
cx23885_free_buffer(dev, buf);
-
- dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
static void buffer_queue(struct vb2_buffer *vb)
@@ -616,6 +615,103 @@ static int dvbsky_t9580_set_voltage(struct dvb_frontend *fe,
return 0;
}
+static int dvbsky_s952_portc_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct cx23885_tsport *port = fe->dvb->priv;
+ struct cx23885_dev *dev = port->dev;
+
+ cx23885_gpio_enable(dev, GPIO_12 | GPIO_13, 1);
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ cx23885_gpio_set(dev, GPIO_13);
+ cx23885_gpio_clear(dev, GPIO_12);
+ break;
+ case SEC_VOLTAGE_18:
+ cx23885_gpio_set(dev, GPIO_13);
+ cx23885_gpio_set(dev, GPIO_12);
+ break;
+ case SEC_VOLTAGE_OFF:
+ cx23885_gpio_clear(dev, GPIO_13);
+ cx23885_gpio_clear(dev, GPIO_12);
+ break;
+ }
+ /* call the frontend set_voltage function */
+ return port->fe_set_voltage(fe, voltage);
+}
+
+static int cx23885_sp2_ci_ctrl(void *priv, u8 read, int addr,
+ u8 data, int *mem)
+{
+ /* MC417 */
+ #define SP2_DATA 0x000000ff
+ #define SP2_WR 0x00008000
+ #define SP2_RD 0x00004000
+ #define SP2_ACK 0x00001000
+ #define SP2_ADHI 0x00000800
+ #define SP2_ADLO 0x00000400
+ #define SP2_CS1 0x00000200
+ #define SP2_CS0 0x00000100
+ #define SP2_EN_ALL 0x00001000
+ #define SP2_CTRL_OFF (SP2_CS1 | SP2_CS0 | SP2_WR | SP2_RD)
+
+ struct cx23885_tsport *port = priv;
+ struct cx23885_dev *dev = port->dev;
+ int ret;
+ int tmp = 0;
+ unsigned long timeout;
+
+ mutex_lock(&dev->gpio_lock);
+
+ /* write addr */
+ cx_write(MC417_OEN, SP2_EN_ALL);
+ cx_write(MC417_RWD, SP2_CTRL_OFF |
+ SP2_ADLO | (0xff & addr));
+ cx_clear(MC417_RWD, SP2_ADLO);
+ cx_write(MC417_RWD, SP2_CTRL_OFF |
+ SP2_ADHI | (0xff & (addr >> 8)));
+ cx_clear(MC417_RWD, SP2_ADHI);
+
+ if (read)
+ /* data in */
+ cx_write(MC417_OEN, SP2_EN_ALL | SP2_DATA);
+ else
+ /* data out */
+ cx_write(MC417_RWD, SP2_CTRL_OFF | data);
+
+ /* chip select 0 */
+ cx_clear(MC417_RWD, SP2_CS0);
+
+ /* read/write */
+ cx_clear(MC417_RWD, (read) ? SP2_RD : SP2_WR);
+
+ /* wait for a maximum of 1 msec */
+ timeout = jiffies + msecs_to_jiffies(1);
+ while (!time_after(jiffies, timeout)) {
+ tmp = cx_read(MC417_RWD);
+ if ((tmp & SP2_ACK) == 0)
+ break;
+ usleep_range(50, 100);
+ }
+
+ cx_set(MC417_RWD, SP2_CTRL_OFF);
+ *mem = tmp & 0xff;
+
+ mutex_unlock(&dev->gpio_lock);
+
+ if (!read) {
+ if (*mem < 0) {
+ ret = -EREMOTEIO;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
static int cx23885_dvb_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
@@ -793,6 +889,32 @@ static const struct m88ds3103_config dvbsky_t9580_m88ds3103_config = {
.agc = 0x99,
};
+static const struct m88ds3103_config dvbsky_s950c_m88ds3103_config = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+ .ts_mode = M88DS3103_TS_CI,
+ .ts_clk = 10000,
+ .ts_clk_pol = 1,
+ .lnb_en_pol = 1,
+ .lnb_hv_pol = 0,
+ .agc = 0x99,
+};
+
+static const struct m88ds3103_config dvbsky_s952_portc_m88ds3103_config = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+ .ts_mode = M88DS3103_TS_SERIAL,
+ .ts_clk = 96000,
+ .ts_clk_pol = 0,
+ .lnb_en_pol = 1,
+ .lnb_hv_pol = 0,
+ .agc = 0x99,
+};
+
static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
@@ -944,11 +1066,13 @@ static int dvb_register(struct cx23885_tsport *port)
struct vb2_dvb_frontend *fe0, *fe1 = NULL;
struct si2168_config si2168_config;
struct si2157_config si2157_config;
+ struct sp2_config sp2_config;
struct m88ts2022_config m88ts2022_config;
struct i2c_board_info info;
struct i2c_adapter *adapter;
- struct i2c_client *client_demod;
- struct i2c_client *client_tuner;
+ struct i2c_client *client_demod = NULL, *client_tuner = NULL, *client_ci = NULL;
+ const struct m88ds3103_config *p_m88ds3103_config = NULL;
+ int (*p_set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage) = NULL;
int mfe_shared = 0; /* bus not shared by default */
int ret;
@@ -973,11 +1097,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&hauppauge_generic_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(mt2131_attach, fe0->dvb.frontend,
- &i2c_bus->i2c_adap,
- &hauppauge_generic_tunerconfig, 0);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(mt2131_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_generic_tunerconfig, 0);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
@@ -985,11 +1109,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(lgdt3305_attach,
&hauppauge_lgdt3305_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(tda18271_attach, fe0->dvb.frontend,
- 0x60, &dev->i2c_bus[1].i2c_adap,
- &hauppauge_hvr127x_config);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_hvr127x_config);
if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1275)
cx23885_set_frontend_hook(port, fe0->dvb.frontend);
break;
@@ -999,11 +1123,12 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&hcw_s5h1411_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(tda18271_attach, fe0->dvb.frontend,
- 0x60, &dev->i2c_bus[1].i2c_adap,
- &hauppauge_tda18271_config);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_tda18271_config);
tda18271_attach(&dev->ts1.analog_fe,
0x60, &dev->i2c_bus[1].i2c_adap,
@@ -1018,14 +1143,15 @@ static int dvb_register(struct cx23885_tsport *port)
dvb_attach(s5h1409_attach,
&hauppauge_ezqam_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(tda829x_attach, fe0->dvb.frontend,
- &dev->i2c_bus[1].i2c_adap, 0x42,
- &tda829x_no_probe);
- dvb_attach(tda18271_attach, fe0->dvb.frontend,
- 0x60, &dev->i2c_bus[1].i2c_adap,
- &hauppauge_tda18271_config);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ dvb_attach(tda829x_attach, fe0->dvb.frontend,
+ &dev->i2c_bus[1].i2c_adap, 0x42,
+ &tda829x_no_probe);
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_tda18271_config);
break;
case 0:
default:
@@ -1033,11 +1159,11 @@ static int dvb_register(struct cx23885_tsport *port)
dvb_attach(s5h1409_attach,
&hauppauge_generic_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL)
- dvb_attach(mt2131_attach, fe0->dvb.frontend,
- &i2c_bus->i2c_adap,
- &hauppauge_generic_tunerconfig, 0);
- break;
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(mt2131_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_generic_tunerconfig, 0);
}
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
@@ -1045,32 +1171,33 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&hauppauge_hvr1800lp_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(mt2131_attach, fe0->dvb.frontend,
- &i2c_bus->i2c_adap,
- &hauppauge_generic_tunerconfig, 0);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(mt2131_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_generic_tunerconfig, 0);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP:
i2c_bus = &dev->i2c_bus[0];
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_5_express,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
- &i2c_bus->i2c_adap, 0x61,
- TUNER_LG_TDVS_H06XF);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap, 0x61,
+ TUNER_LG_TDVS_H06XF);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
i2c_bus = &dev->i2c_bus[1];
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&hauppauge_hvr1500q_config,
&dev->i2c_bus[0].i2c_adap);
- if (fe0->dvb.frontend != NULL)
- dvb_attach(xc5000_attach, fe0->dvb.frontend,
- &i2c_bus->i2c_adap,
- &hauppauge_hvr1500q_tunerconfig);
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(xc5000_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_hvr1500q_tunerconfig);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500:
i2c_bus = &dev->i2c_bus[1];
@@ -1101,14 +1228,14 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(tda10048_attach,
&hauppauge_hvr1200_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(tda829x_attach, fe0->dvb.frontend,
- &dev->i2c_bus[1].i2c_adap, 0x42,
- &tda829x_no_probe);
- dvb_attach(tda18271_attach, fe0->dvb.frontend,
- 0x60, &dev->i2c_bus[1].i2c_adap,
- &hauppauge_hvr1200_tuner_config);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(tda829x_attach, fe0->dvb.frontend,
+ &dev->i2c_bus[1].i2c_adap, 0x42,
+ &tda829x_no_probe);
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_hvr1200_tuner_config);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1210:
i2c_bus = &dev->i2c_bus[0];
@@ -1367,12 +1494,10 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(lgs8gxx_attach,
&mygica_x8506_lgs8gl5_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(xc5000_attach,
- fe0->dvb.frontend,
- &i2c_bus2->i2c_adap,
- &mygica_x8506_xc5000_config);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(xc5000_attach, fe0->dvb.frontend,
+ &i2c_bus2->i2c_adap, &mygica_x8506_xc5000_config);
cx23885_set_frontend_hook(port, fe0->dvb.frontend);
break;
case CX23885_BOARD_MYGICA_X8507:
@@ -1381,12 +1506,12 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(mb86a20s_attach,
&mygica_x8507_mb86a20s_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(xc5000_attach,
- fe0->dvb.frontend,
- &i2c_bus2->i2c_adap,
- &mygica_x8507_xc5000_config);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ dvb_attach(xc5000_attach, fe0->dvb.frontend,
+ &i2c_bus2->i2c_adap,
+ &mygica_x8507_xc5000_config);
cx23885_set_frontend_hook(port, fe0->dvb.frontend);
break;
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
@@ -1395,12 +1520,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(lgs8gxx_attach,
&magicpro_prohdtve2_lgs8g75_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(xc5000_attach,
- fe0->dvb.frontend,
- &i2c_bus2->i2c_adap,
- &magicpro_prohdtve2_xc5000_config);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(xc5000_attach, fe0->dvb.frontend,
+ &i2c_bus2->i2c_adap,
+ &magicpro_prohdtve2_xc5000_config);
cx23885_set_frontend_hook(port, fe0->dvb.frontend);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
@@ -1408,10 +1532,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&hcw_s5h1411_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL)
- dvb_attach(tda18271_attach, fe0->dvb.frontend,
- 0x60, &dev->i2c_bus[0].i2c_adap,
- &hauppauge_tda18271_config);
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[0].i2c_adap,
+ &hauppauge_tda18271_config);
tda18271_attach(&dev->ts1.analog_fe,
0x60, &dev->i2c_bus[1].i2c_adap,
@@ -1423,10 +1548,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&hcw_s5h1411_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL)
- dvb_attach(tda18271_attach, fe0->dvb.frontend,
- 0x60, &dev->i2c_bus[0].i2c_adap,
- &hauppauge_tda18271_config);
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[0].i2c_adap,
+ &hauppauge_tda18271_config);
break;
case CX23885_BOARD_MYGICA_X8558PRO:
switch (port->nr) {
@@ -1436,12 +1562,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(atbm8830_attach,
&mygica_x8558pro_atbm8830_cfg1,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(max2165_attach,
- fe0->dvb.frontend,
- &i2c_bus->i2c_adap,
- &mygic_x8558pro_max2165_cfg1);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(max2165_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &mygic_x8558pro_max2165_cfg1);
break;
/* port C */
case 2:
@@ -1449,13 +1574,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(atbm8830_attach,
&mygica_x8558pro_atbm8830_cfg2,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(max2165_attach,
- fe0->dvb.frontend,
- &i2c_bus->i2c_adap,
- &mygic_x8558pro_max2165_cfg2);
- }
- break;
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(max2165_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &mygic_x8558pro_max2165_cfg2);
}
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
@@ -1467,15 +1590,15 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(stv0367ter_attach,
&netup_stv0367_config[port->nr - 1],
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- if (NULL == dvb_attach(xc5000_attach,
- fe0->dvb.frontend,
+ if (fe0->dvb.frontend == NULL)
+ break;
+ if (NULL == dvb_attach(xc5000_attach, fe0->dvb.frontend,
&i2c_bus->i2c_adap,
&netup_xc5000_config[port->nr - 1]))
- goto frontend_detach;
- /* load xc5000 firmware */
- fe0->dvb.frontend->ops.tuner_ops.init(fe0->dvb.frontend);
- }
+ goto frontend_detach;
+ /* load xc5000 firmware */
+ fe0->dvb.frontend->ops.tuner_ops.init(fe0->dvb.frontend);
+
/* MFE frontend 2 */
fe1 = vb2_dvb_get_frontend(&port->frontends, 2);
if (fe1 == NULL)
@@ -1484,14 +1607,15 @@ static int dvb_register(struct cx23885_tsport *port)
fe1->dvb.frontend = dvb_attach(stv0367cab_attach,
&netup_stv0367_config[port->nr - 1],
&i2c_bus->i2c_adap);
- if (fe1->dvb.frontend != NULL) {
- fe1->dvb.frontend->id = 1;
- if (NULL == dvb_attach(xc5000_attach,
- fe1->dvb.frontend,
- &i2c_bus->i2c_adap,
- &netup_xc5000_config[port->nr - 1]))
- goto frontend_detach;
- }
+ if (fe1->dvb.frontend == NULL)
+ break;
+
+ fe1->dvb.frontend->id = 1;
+ if (NULL == dvb_attach(xc5000_attach,
+ fe1->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &netup_xc5000_config[port->nr - 1]))
+ goto frontend_detach;
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
i2c_bus = &dev->i2c_bus[0];
@@ -1503,26 +1627,26 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(drxk_attach,
&terratec_drxk_config[0],
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- if (!dvb_attach(mt2063_attach,
- fe0->dvb.frontend,
- &terratec_mt2063_config[0],
- &i2c_bus2->i2c_adap))
- goto frontend_detach;
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ if (!dvb_attach(mt2063_attach,
+ fe0->dvb.frontend,
+ &terratec_mt2063_config[0],
+ &i2c_bus2->i2c_adap))
+ goto frontend_detach;
break;
/* port c */
case 2:
fe0->dvb.frontend = dvb_attach(drxk_attach,
&terratec_drxk_config[1],
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- if (!dvb_attach(mt2063_attach,
- fe0->dvb.frontend,
- &terratec_mt2063_config[1],
- &i2c_bus2->i2c_adap))
- goto frontend_detach;
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ if (!dvb_attach(mt2063_attach,
+ fe0->dvb.frontend,
+ &terratec_mt2063_config[1],
+ &i2c_bus2->i2c_adap))
+ goto frontend_detach;
break;
}
break;
@@ -1532,10 +1656,10 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- dvb_attach(ts2020_attach, fe0->dvb.frontend,
- &tevii_ts2020_config, &i2c_bus->i2c_adap);
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ dvb_attach(ts2020_attach, fe0->dvb.frontend,
+ &tevii_ts2020_config, &i2c_bus->i2c_adap);
break;
case CX23885_BOARD_PROF_8000:
i2c_bus = &dev->i2c_bus[0];
@@ -1544,15 +1668,15 @@ static int dvb_register(struct cx23885_tsport *port)
&prof_8000_stv090x_config,
&i2c_bus->i2c_adap,
STV090x_DEMODULATOR_0);
- if (fe0->dvb.frontend != NULL) {
- if (!dvb_attach(stb6100_attach,
- fe0->dvb.frontend,
- &prof_8000_stb6100_config,
- &i2c_bus->i2c_adap))
- goto frontend_detach;
+ if (fe0->dvb.frontend == NULL)
+ break;
+ if (!dvb_attach(stb6100_attach,
+ fe0->dvb.frontend,
+ &prof_8000_stb6100_config,
+ &i2c_bus->i2c_adap))
+ goto frontend_detach;
- fe0->dvb.frontend->ops.set_voltage = p8000_set_voltage;
- }
+ fe0->dvb.frontend->ops.set_voltage = p8000_set_voltage;
break;
case CX23885_BOARD_HAUPPAUGE_HVR4400:
i2c_bus = &dev->i2c_bus[0];
@@ -1563,30 +1687,31 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(tda10071_attach,
&hauppauge_tda10071_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- if (!dvb_attach(a8293_attach, fe0->dvb.frontend,
- &i2c_bus->i2c_adap,
- &hauppauge_a8293_config))
- goto frontend_detach;
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ if (!dvb_attach(a8293_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_a8293_config))
+ goto frontend_detach;
break;
/* port c */
case 2:
fe0->dvb.frontend = dvb_attach(si2165_attach,
&hauppauge_hvr4400_si2165_config,
&i2c_bus->i2c_adap);
- if (fe0->dvb.frontend != NULL) {
- fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
- if (!dvb_attach(tda18271_attach,
- fe0->dvb.frontend,
- 0x60, &i2c_bus2->i2c_adap,
- &hauppauge_hvr4400_tuner_config))
- goto frontend_detach;
- }
+ if (fe0->dvb.frontend == NULL)
+ break;
+ fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
+ if (!dvb_attach(tda18271_attach,
+ fe0->dvb.frontend,
+ 0x60, &i2c_bus2->i2c_adap,
+ &hauppauge_hvr4400_tuner_config))
+ goto frontend_detach;
break;
}
break;
case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_S950:
i2c_bus = &dev->i2c_bus[0];
i2c_bus2 = &dev->i2c_bus[1];
switch (port->nr) {
@@ -1680,6 +1805,201 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ i2c_bus = &dev->i2c_bus[1];
+ i2c_bus2 = &dev->i2c_bus[0];
+
+ /* attach frontend */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &fe0->dvb.frontend;
+ si2168_config.ts_mode = SI2168_TS_PARALLEL;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+ request_module(info.type);
+ client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
+ if (client_demod == NULL ||
+ client_demod->dev.driver == NULL)
+ goto frontend_detach;
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_demod = client_demod;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = fe0->dvb.frontend;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module(info.type);
+ client_tuner = i2c_new_device(adapter, &info);
+ if (client_tuner == NULL ||
+ client_tuner->dev.driver == NULL) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+ break;
+ case CX23885_BOARD_DVBSKY_S950C:
+ i2c_bus = &dev->i2c_bus[1];
+ i2c_bus2 = &dev->i2c_bus[0];
+
+ /* attach frontend */
+ fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
+ &dvbsky_s950c_m88ds3103_config,
+ &i2c_bus->i2c_adap, &adapter);
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ /* attach tuner */
+ memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
+ m88ts2022_config.fe = fe0->dvb.frontend;
+ m88ts2022_config.clock = 27000000;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &m88ts2022_config;
+ request_module(info.type);
+ client_tuner = i2c_new_device(adapter, &info);
+ if (client_tuner == NULL ||
+ client_tuner->dev.driver == NULL)
+ goto frontend_detach;
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ goto frontend_detach;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ fe0->dvb.frontend->ops.read_signal_strength =
+ fe0->dvb.frontend->ops.tuner_ops.get_rf_strength;
+
+ port->i2c_client_tuner = client_tuner;
+ break;
+ case CX23885_BOARD_DVBSKY_S952:
+ switch (port->nr) {
+ /* port b */
+ case 1:
+ i2c_bus = &dev->i2c_bus[1];
+ p_m88ds3103_config = &dvbsky_t9580_m88ds3103_config;
+ p_set_voltage = dvbsky_t9580_set_voltage;
+ break;
+ /* port c */
+ case 2:
+ i2c_bus = &dev->i2c_bus[0];
+ p_m88ds3103_config = &dvbsky_s952_portc_m88ds3103_config;
+ p_set_voltage = dvbsky_s952_portc_set_voltage;
+ break;
+ }
+
+ /* attach frontend */
+ fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
+ p_m88ds3103_config,
+ &i2c_bus->i2c_adap, &adapter);
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ /* attach tuner */
+ memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
+ m88ts2022_config.fe = fe0->dvb.frontend;
+ m88ts2022_config.clock = 27000000;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &m88ts2022_config;
+ request_module(info.type);
+ client_tuner = i2c_new_device(adapter, &info);
+ if (client_tuner == NULL ||
+ client_tuner->dev.driver == NULL)
+ goto frontend_detach;
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ goto frontend_detach;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ fe0->dvb.frontend->ops.read_signal_strength =
+ fe0->dvb.frontend->ops.tuner_ops.get_rf_strength;
+
+ /*
+ * for setting the voltage we need to set GPIOs on
+ * the card.
+ */
+ port->fe_set_voltage =
+ fe0->dvb.frontend->ops.set_voltage;
+ fe0->dvb.frontend->ops.set_voltage = p_set_voltage;
+
+ port->i2c_client_tuner = client_tuner;
+ break;
+ case CX23885_BOARD_DVBSKY_T982:
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ switch (port->nr) {
+ /* port b */
+ case 1:
+ i2c_bus = &dev->i2c_bus[1];
+ si2168_config.ts_mode = SI2168_TS_PARALLEL;
+ break;
+ /* port c */
+ case 2:
+ i2c_bus = &dev->i2c_bus[0];
+ si2168_config.ts_mode = SI2168_TS_SERIAL;
+ break;
+ }
+
+ /* attach frontend */
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &fe0->dvb.frontend;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+ request_module(info.type);
+ client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
+ if (client_demod == NULL ||
+ client_demod->dev.driver == NULL)
+ goto frontend_detach;
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_demod = client_demod;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = fe0->dvb.frontend;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module(info.type);
+ client_tuner = i2c_new_device(adapter, &info);
+ if (client_tuner == NULL ||
+ client_tuner->dev.driver == NULL) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
@@ -1754,7 +2074,10 @@ static int dvb_register(struct cx23885_tsport *port)
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
break;
}
- case CX23885_BOARD_DVBSKY_T9580: {
+ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982: {
u8 eeprom[256]; /* 24C02 i2c eeprom */
if (port->nr > 2)
@@ -1764,12 +2087,67 @@ static int dvb_register(struct cx23885_tsport *port)
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
sizeof(eeprom));
- printk(KERN_INFO "DVBSky T9580 port %d MAC address: %pM\n",
- port->nr, eeprom + 0xc0 + (port->nr-1) * 8);
+ printk(KERN_INFO "%s port %d MAC address: %pM\n",
+ cx23885_boards[dev->board].name, port->nr,
+ eeprom + 0xc0 + (port->nr-1) * 8);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
(port->nr-1) * 8, 6);
break;
}
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_TT_CT2_4500_CI: {
+ u8 eeprom[256]; /* 24C02 i2c eeprom */
+
+ /* attach CI */
+ memset(&sp2_config, 0, sizeof(sp2_config));
+ sp2_config.dvb_adap = &port->frontends.adapter;
+ sp2_config.priv = port;
+ sp2_config.ci_control = cx23885_sp2_ci_ctrl;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "sp2", I2C_NAME_SIZE);
+ info.addr = 0x40;
+ info.platform_data = &sp2_config;
+ request_module(info.type);
+ client_ci = i2c_new_device(&i2c_bus2->i2c_adap, &info);
+ if (client_ci == NULL ||
+ client_ci->dev.driver == NULL) {
+ if (client_tuner) {
+ module_put(client_tuner->dev.driver->owner);
+ i2c_unregister_device(client_tuner);
+ }
+ if (client_demod) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ }
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_ci->dev.driver->owner)) {
+ i2c_unregister_device(client_ci);
+ if (client_tuner) {
+ module_put(client_tuner->dev.driver->owner);
+ i2c_unregister_device(client_tuner);
+ }
+ if (client_demod) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ }
+ goto frontend_detach;
+ }
+ port->i2c_client_ci = client_ci;
+
+ if (port->nr != 1)
+ break;
+
+ /* Read entire EEPROM */
+ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
+ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
+ sizeof(eeprom));
+ printk(KERN_INFO "%s MAC address: %pM\n",
+ cx23885_boards[dev->board].name, eeprom + 0xc0);
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0, 6);
+ break;
+ }
}
return ret;
@@ -1810,7 +2188,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
fe0 = vb2_dvb_get_frontend(&port->frontends, i);
if (!fe0)
- err = -EINVAL;
+ return -EINVAL;
dprintk(1, "%s\n", __func__);
dprintk(1, " ->probed by Card=%d Name=%s, PCI %02x:%02x\n",
@@ -1853,6 +2231,13 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port)
struct vb2_dvb_frontend *fe0;
struct i2c_client *client;
+ /* remove I2C client for CI */
+ client = port->i2c_client_ci;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
/* remove I2C client for tuner */
client = port->i2c_client_tuner;
if (client) {
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 9d37fe661691..088799c3b49b 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -87,6 +87,13 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TBS_6980:
case CX23885_BOARD_TBS_6981:
+ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982:
/*
* The only boards we handle right now. However other boards
* using the CX2388x integrated IR controller should be similar
@@ -139,6 +146,13 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982:
/*
* The IR controller on this board only returns pulse widths.
* Any other mode setting will fail to set up the device.
@@ -305,6 +319,23 @@ int cx23885_input_init(struct cx23885_dev *dev)
/* A guess at the remote */
rc_map = RC_MAP_TBS_NEC;
break;
+ case CX23885_BOARD_DVBSKY_T9580:
+ case CX23885_BOARD_DVBSKY_T980C:
+ case CX23885_BOARD_DVBSKY_S950C:
+ case CX23885_BOARD_DVBSKY_S950:
+ case CX23885_BOARD_DVBSKY_S952:
+ case CX23885_BOARD_DVBSKY_T982:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+ allowed_protos = RC_BIT_ALL;
+ rc_map = RC_MAP_DVBSKY;
+ break;
+ case CX23885_BOARD_TT_CT2_4500_CI:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+ allowed_protos = RC_BIT_ALL;
+ rc_map = RC_MAP_TT_1500;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index a7c6ef8f3ea3..d362d3838c84 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -132,6 +132,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
lines = VBI_NTSC_LINE_COUNT;
*num_planes = 1;
sizes[0] = lines * VBI_LINE_LENGTH * 2;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -142,7 +143,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
struct cx23885_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
unsigned lines = VBI_PAL_LINE_COUNT;
- int ret;
if (dev->tvnorm & V4L2_STD_525_60)
lines = VBI_NTSC_LINE_COUNT;
@@ -151,10 +151,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2);
- ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
- if (!ret)
- return -EIO;
-
cx23885_risc_vbibuffer(dev->pci, &buf->risc,
sgt->sgl,
0, VBI_LINE_LENGTH * lines,
@@ -165,14 +161,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb)
{
- struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
- struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
-
- dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
/*
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 682a4f95df6b..5e93c682a3f5 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -323,6 +323,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
*num_planes = 1;
sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -334,7 +335,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
u32 line0_offset, line1_offset;
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
int field_tff;
- int ret;
buf->bpl = (dev->width * dev->fmt->depth) >> 3;
@@ -342,10 +342,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
vb2_set_plane_payload(vb, 0, dev->height * buf->bpl);
- ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
- if (!ret)
- return -EIO;
-
switch (dev->field) {
case V4L2_FIELD_TOP:
cx23885_risc_buffer(dev->pci, &buf->risc,
@@ -413,14 +409,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb)
{
- struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
- struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
-
- dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
/*
@@ -608,7 +600,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
dev->field = f->fmt.pix.field;
dprintk(2, "%s() width=%d height=%d field=%d\n", __func__,
dev->width, dev->height, dev->field);
- v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
+ v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
call_all(dev, video, s_mbus_fmt, &mbus_fmt);
v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
/* s_mbus_fmt overwrites f->fmt.pix.field, restore it */
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 6c35e6115969..f55cd12da0fd 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -93,6 +93,12 @@
#define CX23885_BOARD_HAUPPAUGE_IMPACTVCBE 43
#define CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP2 44
#define CX23885_BOARD_DVBSKY_T9580 45
+#define CX23885_BOARD_DVBSKY_T980C 46
+#define CX23885_BOARD_DVBSKY_S950C 47
+#define CX23885_BOARD_TT_CT2_4500_CI 48
+#define CX23885_BOARD_DVBSKY_S950 49
+#define CX23885_BOARD_DVBSKY_S952 50
+#define CX23885_BOARD_DVBSKY_T982 51
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
@@ -296,6 +302,7 @@ struct cx23885_tsport {
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
+ struct i2c_client *i2c_client_ci;
int (*set_frontend)(struct dvb_frontend *fe);
int (*fe_set_voltage)(struct dvb_frontend *fe,
@@ -418,6 +425,7 @@ struct cx23885_dev {
struct vb2_queue vb2_vidq;
struct cx23885_dmaqueue vbiq;
struct vb2_queue vb2_vbiq;
+ void *alloc_ctx;
spinlock_t slock;
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index e81173c41e5a..389fffd2f36f 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -996,7 +996,7 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
@@ -1014,14 +1014,14 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
offset = 0;
- sg++;
+ sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
@@ -1101,7 +1101,7 @@ static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
if (lpi && line > 0 && !(line % lpi))
@@ -1125,14 +1125,14 @@ static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
offset = 0;
- sg++;
+ sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
diff --git a/drivers/media/pci/cx88/Kconfig b/drivers/media/pci/cx88/Kconfig
index a63a9ad163b2..14b813d634a8 100644
--- a/drivers/media/pci/cx88/Kconfig
+++ b/drivers/media/pci/cx88/Kconfig
@@ -2,8 +2,7 @@ config VIDEO_CX88
tristate "Conexant 2388x (bt878 successor) support"
depends on VIDEO_DEV && PCI && I2C && RC_CORE
select I2C_ALGOBIT
- select VIDEO_BTCX
- select VIDEOBUF_DMA_SG
+ select VIDEOBUF2_DMA_SG
select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_WM8775 if MEDIA_SUBDRV_AUTOSELECT
@@ -45,7 +44,7 @@ config VIDEO_CX88_BLACKBIRD
config VIDEO_CX88_DVB
tristate "DVB/ATSC Support for cx2388x based TV cards"
depends on VIDEO_CX88 && DVB_CORE
- select VIDEOBUF_DVB
+ select VIDEOBUF2_DVB
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/cx88/Makefile b/drivers/media/pci/cx88/Makefile
index 8619c1becee2..d3679c3ee248 100644
--- a/drivers/media/pci/cx88/Makefile
+++ b/drivers/media/pci/cx88/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
ccflags-y += -Idrivers/media/i2c
-ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index a72579a9f67f..7f8dc60028d5 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -61,8 +61,11 @@
struct cx88_audio_buffer {
unsigned int bpl;
- struct btcx_riscmem risc;
- struct videobuf_dmabuf dma;
+ struct cx88_riscmem risc;
+ void *vaddr;
+ struct scatterlist *sglist;
+ int sglen;
+ int nr_pages;
};
struct cx88_audio_dev {
@@ -84,8 +87,6 @@ struct cx88_audio_dev {
unsigned int period_size;
unsigned int num_periods;
- struct videobuf_dmabuf *dma_risc;
-
struct cx88_audio_buffer *buf;
struct snd_pcm_substream *substream;
@@ -290,19 +291,97 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
+static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages)
+{
+ struct cx88_audio_buffer *buf = chip->buf;
+ struct page *pg;
+ int i;
+
+ buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
+ if (NULL == buf->vaddr) {
+ dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
+ return -ENOMEM;
+ }
+
+ dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
+ (unsigned long)buf->vaddr,
+ nr_pages << PAGE_SHIFT);
+
+ memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
+ buf->nr_pages = nr_pages;
+
+ buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
+ if (NULL == buf->sglist)
+ goto vzalloc_err;
+
+ sg_init_table(buf->sglist, buf->nr_pages);
+ for (i = 0; i < buf->nr_pages; i++) {
+ pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
+ if (NULL == pg)
+ goto vmalloc_to_page_err;
+ sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
+ }
+ return 0;
+
+vmalloc_to_page_err:
+ vfree(buf->sglist);
+ buf->sglist = NULL;
+vzalloc_err:
+ vfree(buf->vaddr);
+ buf->vaddr = NULL;
+ return -ENOMEM;
+}
+
+static int cx88_alsa_dma_map(struct cx88_audio_dev *dev)
+{
+ struct cx88_audio_buffer *buf = dev->buf;
+
+ buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
+ buf->nr_pages, PCI_DMA_FROMDEVICE);
+
+ if (0 == buf->sglen) {
+ pr_warn("%s: cx88_alsa_map_sg failed\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int cx88_alsa_dma_unmap(struct cx88_audio_dev *dev)
+{
+ struct cx88_audio_buffer *buf = dev->buf;
+
+ if (!buf->sglen)
+ return 0;
+
+ dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
+ buf->sglen = 0;
+ return 0;
+}
+
+static int cx88_alsa_dma_free(struct cx88_audio_buffer *buf)
+{
+ vfree(buf->sglist);
+ buf->sglist = NULL;
+ vfree(buf->vaddr);
+ buf->vaddr = NULL;
+ return 0;
+}
+
static int dsp_buffer_free(snd_cx88_card_t *chip)
{
+ struct cx88_riscmem *risc = &chip->buf->risc;
+
BUG_ON(!chip->dma_size);
dprintk(2,"Freeing buffer\n");
- videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc);
- videobuf_dma_free(chip->dma_risc);
- btcx_riscmem_free(chip->pci,&chip->buf->risc);
+ cx88_alsa_dma_unmap(chip);
+ cx88_alsa_dma_free(chip->buf);
+ if (risc->cpu)
+ pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
kfree(chip->buf);
- chip->dma_risc = NULL;
- chip->dma_size = 0;
+ chip->buf = NULL;
return 0;
}
@@ -387,7 +466,6 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
struct snd_pcm_hw_params * hw_params)
{
snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
- struct videobuf_dmabuf *dma;
struct cx88_audio_buffer *buf;
int ret;
@@ -408,20 +486,19 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
if (NULL == buf)
return -ENOMEM;
+ chip->buf = buf;
buf->bpl = chip->period_size;
- dma = &buf->dma;
- videobuf_dma_init(dma);
- ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE,
+ ret = cx88_alsa_dma_init(chip,
(PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
if (ret < 0)
goto error;
- ret = videobuf_dma_map(&chip->pci->dev, dma);
+ ret = cx88_alsa_dma_map(chip);
if (ret < 0)
goto error;
- ret = cx88_risc_databuffer(chip->pci, &buf->risc, dma->sglist,
+ ret = cx88_risc_databuffer(chip->pci, &buf->risc, buf->sglist,
chip->period_size, chip->num_periods, 1);
if (ret < 0)
goto error;
@@ -430,10 +507,7 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- chip->buf = buf;
- chip->dma_risc = dma;
-
- substream->runtime->dma_area = chip->dma_risc->vaddr;
+ substream->runtime->dma_area = chip->buf->vaddr;
substream->runtime->dma_bytes = chip->dma_size;
substream->runtime->dma_addr = 0;
return 0;
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 150bb76e7839..d3c79d964f2c 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -45,10 +45,6 @@ MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
-static unsigned int mpegbufs = 32;
-module_param(mpegbufs,int,0644);
-MODULE_PARM_DESC(mpegbufs,"number of mpeg buffers, range 2-32");
-
static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
@@ -326,13 +322,13 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
memory_read(dev->core, dev->mailbox - 4, &value);
if (value != 0x12345678) {
dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n");
- return -1;
+ return -EIO;
}
memory_read(dev->core, dev->mailbox, &flag);
if (flag) {
dprintk(0, "ERROR: Mailbox appears to be in use (%x)\n", flag);
- return -1;
+ return -EIO;
}
flag |= 1; /* tell 'em we're working on it */
@@ -352,14 +348,14 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
memory_write(dev->core, dev->mailbox, flag);
/* wait for firmware to handle the API command */
- timeout = jiffies + msecs_to_jiffies(10);
+ timeout = jiffies + msecs_to_jiffies(1000);
for (;;) {
memory_read(dev->core, dev->mailbox, &flag);
if (0 != (flag & 4))
break;
if (time_after(jiffies,timeout)) {
- dprintk(0, "ERROR: API Mailbox timeout\n");
- return -1;
+ dprintk(0, "ERROR: API Mailbox timeout %x\n", command);
+ return -EIO;
}
udelay(10);
}
@@ -420,7 +416,7 @@ static int blackbird_find_mailbox(struct cx8802_dev *dev)
}
}
dprintk(0, "Mailbox signature values not found!\n");
- return -1;
+ return -EIO;
}
static int blackbird_load_firmware(struct cx8802_dev *dev)
@@ -432,7 +428,7 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
int i, retval = 0;
u32 value = 0;
u32 checksum = 0;
- u32 *dataptr;
+ __le32 *dataptr;
retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED);
retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
@@ -449,29 +445,28 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
if (retval != 0) {
- dprintk(0, "ERROR: Hotplug firmware request failed (%s).\n",
+ pr_err("Hotplug firmware request failed (%s).\n",
CX2341X_FIRM_ENC_FILENAME);
- dprintk(0, "Please fix your hotplug setup, the board will "
- "not work without firmware loaded!\n");
- return -1;
+ pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
+ return -EIO;
}
if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
- dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n",
+ pr_err("Firmware size mismatch (have %zd, expected %d)\n",
firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
release_firmware(firmware);
- return -1;
+ return -EINVAL;
}
if (0 != memcmp(firmware->data, magic, 8)) {
- dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n");
+ pr_err("Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
- return -1;
+ return -EINVAL;
}
/* transfer to the chip */
dprintk(1,"Loading firmware ...\n");
- dataptr = (u32*)firmware->data;
+ dataptr = (__le32 *)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
value = le32_to_cpu(*dataptr);
checksum += ~value;
@@ -484,12 +479,11 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
memory_read(dev->core, i, &value);
checksum -= ~value;
}
+ release_firmware(firmware);
if (checksum) {
- dprintk(0, "ERROR: Firmware load failed (checksum mismatch).\n");
- release_firmware(firmware);
- return -1;
+ pr_err("Firmware load might have failed (checksum mismatch).\n");
+ return -EIO;
}
- release_firmware(firmware);
dprintk(0, "Firmware upload successful.\n");
retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
@@ -521,12 +515,14 @@ DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | M
static void blackbird_codec_settings(struct cx8802_dev *dev)
{
+ struct cx88_core *core = dev->core;
+
/* assign frame size */
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
- dev->height, dev->width);
+ core->height, core->width);
- dev->cxhdl.width = dev->width;
- dev->cxhdl.height = dev->height;
+ dev->cxhdl.width = core->width;
+ dev->cxhdl.height = core->height;
cx2341x_handler_set_50hz(&dev->cxhdl, dev->core->tvnorm & V4L2_STD_625_50);
cx2341x_handler_setup(&dev->cxhdl);
}
@@ -540,9 +536,6 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
dprintk(1,"Initialize codec\n");
retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
-
- dev->mpeg_active = 0;
-
/* ping was not successful, reset and upload firmware */
cx_write(MO_SRST_IO, 0); /* SYS_RSTO=0 */
cx_write(MO_SRST_IO, 1); /* SYS_RSTO=1 */
@@ -589,9 +582,8 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
return 0;
}
-static int blackbird_start_codec(struct file *file, void *priv)
+static int blackbird_start_codec(struct cx8802_dev *dev)
{
- struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx88_core *core = dev->core;
/* start capturing to the host interface */
u32 reg;
@@ -627,7 +619,6 @@ static int blackbird_start_codec(struct file *file, void *priv)
BLACKBIRD_RAW_BITS_NONE
);
- dev->mpeg_active = 1;
return 0;
}
@@ -641,51 +632,137 @@ static int blackbird_stop_codec(struct cx8802_dev *dev)
cx2341x_handler_set_busy(&dev->cxhdl, 0);
- dev->mpeg_active = 0;
return 0;
}
/* ------------------------------------------------------------------ */
-static int bb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct cx8802_fh *fh = q->priv_data;
-
- fh->dev->ts_packet_size = 188 * 4; /* was: 512 */
- fh->dev->ts_packet_count = mpegbufs; /* was: 100 */
+ struct cx8802_dev *dev = q->drv_priv;
- *size = fh->dev->ts_packet_size * fh->dev->ts_packet_count;
- *count = fh->dev->ts_packet_count;
+ *num_planes = 1;
+ dev->ts_packet_size = 188 * 4;
+ dev->ts_packet_count = 32;
+ sizes[0] = dev->ts_packet_size * dev->ts_packet_count;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
-static int
-bb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct cx8802_fh *fh = q->priv_data;
- return cx8802_buf_prepare(q, fh->dev, (struct cx88_buffer*)vb, field);
+ struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+
+ return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
+}
+
+static void buffer_finish(struct vb2_buffer *vb)
+{
+ struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_riscmem *risc = &buf->risc;
+
+ if (risc->cpu)
+ pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ memset(risc, 0, sizeof(*risc));
}
-static void
-bb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx8802_fh *fh = q->priv_data;
- cx8802_buf_queue(fh->dev, (struct cx88_buffer*)vb);
+ struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+
+ cx8802_buf_queue(dev, buf);
}
-static void
-bb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *q, unsigned int count)
{
- cx88_free_buffer(q, (struct cx88_buffer*)vb);
+ struct cx8802_dev *dev = q->drv_priv;
+ struct cx88_dmaqueue *dmaq = &dev->mpegq;
+ struct cx8802_driver *drv;
+ struct cx88_buffer *buf;
+ unsigned long flags;
+ int err;
+
+ /* Make sure we can acquire the hardware */
+ drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
+ if (!drv) {
+ dprintk(1, "%s: blackbird driver is not loaded\n", __func__);
+ err = -ENODEV;
+ goto fail;
+ }
+
+ err = drv->request_acquire(drv);
+ if (err != 0) {
+ dprintk(1, "%s: Unable to acquire hardware, %d\n", __func__, err);
+ goto fail;
+ }
+
+ if (blackbird_initialize_codec(dev) < 0) {
+ drv->request_release(drv);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ err = blackbird_start_codec(dev);
+ if (err == 0) {
+ buf = list_entry(dmaq->active.next, struct cx88_buffer, list);
+ cx8802_start_dma(dev, dmaq, buf);
+ return 0;
+ }
+
+fail:
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx88_buffer *buf = list_entry(dmaq->active.next,
+ struct cx88_buffer, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return err;
}
-static struct videobuf_queue_ops blackbird_qops = {
- .buf_setup = bb_buf_setup,
- .buf_prepare = bb_buf_prepare,
- .buf_queue = bb_buf_queue,
- .buf_release = bb_buf_release,
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct cx8802_dev *dev = q->drv_priv;
+ struct cx88_dmaqueue *dmaq = &dev->mpegq;
+ struct cx8802_driver *drv = NULL;
+ unsigned long flags;
+
+ cx8802_cancel_buffers(dev);
+ blackbird_stop_codec(dev);
+
+ /* Make sure we release the hardware */
+ drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
+ WARN_ON(!drv);
+ if (drv)
+ drv->request_release(drv);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx88_buffer *buf = list_entry(dmaq->active.next,
+ struct cx88_buffer, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static struct vb2_ops blackbird_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
};
/* ------------------------------------------------------------------ */
@@ -693,8 +770,8 @@ static struct videobuf_queue_ops blackbird_qops = {
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
- struct cx88_core *core = dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
strcpy(cap->driver, "cx88_blackbird");
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
@@ -714,131 +791,111 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
return 0;
}
-static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx8802_fh *fh = priv;
- struct cx8802_dev *dev = fh->dev;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */
+ f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.width = dev->width;
- f->fmt.pix.height = dev->height;
- f->fmt.pix.field = fh->mpegq.field;
- dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
- dev->width, dev->height, fh->mpegq.field );
+ f->fmt.pix.width = core->width;
+ f->fmt.pix.height = core->height;
+ f->fmt.pix.field = core->field;
return 0;
}
-static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx8802_fh *fh = priv;
- struct cx8802_dev *dev = fh->dev;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
+ unsigned maxw, maxh;
+ enum v4l2_field field;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */
+ f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
- dev->width, dev->height, fh->mpegq.field );
+
+ maxw = norm_maxw(core->tvnorm);
+ maxh = norm_maxh(core->tvnorm);
+
+ field = f->fmt.pix.field;
+
+ switch (field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_SEQ_TB:
+ break;
+ default:
+ field = (f->fmt.pix.height > maxh / 2)
+ ? V4L2_FIELD_INTERLACED
+ : V4L2_FIELD_BOTTOM;
+ break;
+ }
+ if (V4L2_FIELD_HAS_T_OR_B(field))
+ maxh /= 2;
+
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
+ f->fmt.pix.field = field;
return 0;
}
-static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx8802_fh *fh = priv;
- struct cx8802_dev *dev = fh->dev;
+ struct cx8802_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
- f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- dev->width = f->fmt.pix.width;
- dev->height = f->fmt.pix.height;
- fh->mpegq.field = f->fmt.pix.field;
+ if (vb2_is_busy(&dev->vb2_mpegq))
+ return -EBUSY;
+ if (core->v4ldev && (vb2_is_busy(&core->v4ldev->vb2_vidq) ||
+ vb2_is_busy(&core->v4ldev->vb2_vbiq)))
+ return -EBUSY;
+ vidioc_try_fmt_vid_cap(file, priv, f);
+ core->width = f->fmt.pix.width;
+ core->height = f->fmt.pix.height;
+ core->field = f->fmt.pix.field;
cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
f->fmt.pix.height, f->fmt.pix.width);
- dprintk(1, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
- f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field );
return 0;
}
-static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p)
-{
- struct cx8802_fh *fh = priv;
- return (videobuf_reqbufs(&fh->mpegq, p));
-}
-
-static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct cx8802_fh *fh = priv;
- return (videobuf_querybuf(&fh->mpegq, p));
-}
-
-static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct cx8802_fh *fh = priv;
- return (videobuf_qbuf(&fh->mpegq, p));
-}
-
-static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct cx8802_fh *fh = priv;
- return (videobuf_dqbuf(&fh->mpegq, p,
- file->f_flags & O_NONBLOCK));
-}
-
-static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx8802_fh *fh = priv;
- struct cx8802_dev *dev = fh->dev;
-
- if (!dev->mpeg_active)
- blackbird_start_codec(file, fh);
- return videobuf_streamon(&fh->mpegq);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx8802_fh *fh = priv;
- struct cx8802_dev *dev = fh->dev;
-
- if (dev->mpeg_active)
- blackbird_stop_codec(dev);
- return videobuf_streamoff(&fh->mpegq);
-}
-
static int vidioc_s_frequency (struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct cx8802_fh *fh = priv;
- struct cx8802_dev *dev = fh->dev;
- struct cx88_core *core = dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
+ bool streaming;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
- if (dev->mpeg_active)
+ streaming = vb2_start_streaming_called(&dev->vb2_mpegq);
+ if (streaming)
blackbird_stop_codec(dev);
cx88_set_freq (core,f);
blackbird_initialize_codec(dev);
- cx88_set_scale(dev->core, dev->width, dev->height,
- fh->mpegq.field);
+ cx88_set_scale(core, core->width, core->height,
+ core->field);
+ if (streaming)
+ blackbird_start_codec(dev);
return 0;
}
static int vidioc_log_status (struct file *file, void *priv)
{
- struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
- struct cx88_core *core = dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
char name[32 + 2];
snprintf(name, sizeof(name), "%s/2", core->name);
@@ -850,15 +907,16 @@ static int vidioc_log_status (struct file *file, void *priv)
static int vidioc_enum_input (struct file *file, void *priv,
struct v4l2_input *i)
{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
return cx88_enum_input (core,i);
}
static int vidioc_g_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct cx8802_fh *fh = priv;
- struct cx88_core *core = fh->dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
@@ -873,7 +931,8 @@ static int vidioc_g_frequency (struct file *file, void *priv,
static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
*i = core->input;
return 0;
@@ -881,24 +940,24 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
if (i >= 4)
return -EINVAL;
if (0 == INPUT(i).type)
return -EINVAL;
- mutex_lock(&core->lock);
cx88_newstation(core);
cx88_video_mux(core,i);
- mutex_unlock(&core->lock);
return 0;
}
static int vidioc_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
u32 reg;
if (unlikely(UNSET == core->board.tuner_type))
@@ -920,7 +979,8 @@ static int vidioc_g_tuner (struct file *file, void *priv,
static int vidioc_s_tuner (struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
if (UNSET == core->board.tuner_type)
return -EINVAL;
@@ -933,7 +993,8 @@ static int vidioc_s_tuner (struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
+ struct cx8802_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
*tvnorm = core->tvnorm;
return 0;
@@ -941,155 +1002,20 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
-
- mutex_lock(&core->lock);
- cx88_set_tvnorm(core, id);
- mutex_unlock(&core->lock);
- return 0;
-}
-
-/* FIXME: cx88_ioctl_hook not implemented */
-
-static int mpeg_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
struct cx8802_dev *dev = video_drvdata(file);
- struct cx8802_fh *fh;
- struct cx8802_driver *drv = NULL;
- int err;
-
- dprintk( 1, "%s\n", __func__);
-
- mutex_lock(&dev->core->lock);
-
- /* Make sure we can acquire the hardware */
- drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
- if (!drv) {
- dprintk(1, "%s: blackbird driver is not loaded\n", __func__);
- mutex_unlock(&dev->core->lock);
- return -ENODEV;
- }
-
- err = drv->request_acquire(drv);
- if (err != 0) {
- dprintk(1,"%s: Unable to acquire hardware, %d\n", __func__, err);
- mutex_unlock(&dev->core->lock);
- return err;
- }
-
- if (!dev->core->mpeg_users && blackbird_initialize_codec(dev) < 0) {
- drv->request_release(drv);
- mutex_unlock(&dev->core->lock);
- return -EINVAL;
- }
- dprintk(1, "open dev=%s\n", video_device_node_name(vdev));
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh),GFP_KERNEL);
- if (NULL == fh) {
- drv->request_release(drv);
- mutex_unlock(&dev->core->lock);
- return -ENOMEM;
- }
- v4l2_fh_init(&fh->fh, vdev);
- file->private_data = fh;
- fh->dev = dev;
-
- videobuf_queue_sg_init(&fh->mpegq, &blackbird_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx88_buffer),
- fh, NULL);
-
- /* FIXME: locking against other video device */
- cx88_set_scale(dev->core, dev->width, dev->height,
- fh->mpegq.field);
-
- dev->core->mpeg_users++;
- mutex_unlock(&dev->core->lock);
- v4l2_fh_add(&fh->fh);
- return 0;
-}
-
-static int mpeg_release(struct file *file)
-{
- struct cx8802_fh *fh = file->private_data;
- struct cx8802_dev *dev = fh->dev;
- struct cx8802_driver *drv = NULL;
-
- mutex_lock(&dev->core->lock);
-
- if (dev->mpeg_active && dev->core->mpeg_users == 1)
- blackbird_stop_codec(dev);
-
- cx8802_cancel_buffers(fh->dev);
- /* stop mpeg capture */
- videobuf_stop(&fh->mpegq);
-
- videobuf_mmap_free(&fh->mpegq);
-
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- file->private_data = NULL;
- kfree(fh);
-
- /* Make sure we release the hardware */
- drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
- WARN_ON(!drv);
- if (drv)
- drv->request_release(drv);
-
- dev->core->mpeg_users--;
-
- mutex_unlock(&dev->core->lock);
-
- return 0;
-}
-
-static ssize_t
-mpeg_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
-{
- struct cx8802_fh *fh = file->private_data;
- struct cx8802_dev *dev = fh->dev;
-
- if (!dev->mpeg_active)
- blackbird_start_codec(file, fh);
-
- return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
-}
-
-static unsigned int
-mpeg_poll(struct file *file, struct poll_table_struct *wait)
-{
- unsigned long req_events = poll_requested_events(wait);
- struct cx8802_fh *fh = file->private_data;
- struct cx8802_dev *dev = fh->dev;
-
- if (!dev->mpeg_active && (req_events & (POLLIN | POLLRDNORM)))
- blackbird_start_codec(file, fh);
-
- return v4l2_ctrl_poll(file, wait) | videobuf_poll_stream(file, &fh->mpegq, wait);
-}
-
-static int
-mpeg_mmap(struct file *file, struct vm_area_struct * vma)
-{
- struct cx8802_fh *fh = file->private_data;
+ struct cx88_core *core = dev->core;
- return videobuf_mmap_mapper(&fh->mpegq, vma);
+ return cx88_set_tvnorm(core, id);
}
static const struct v4l2_file_operations mpeg_fops =
{
.owner = THIS_MODULE,
- .open = mpeg_open,
- .release = mpeg_release,
- .read = mpeg_read,
- .poll = mpeg_poll,
- .mmap = mpeg_mmap,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -1099,12 +1025,12 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_log_status = vidioc_log_status,
.vidioc_enum_input = vidioc_enum_input,
@@ -1189,11 +1115,12 @@ static int blackbird_register_video(struct cx8802_dev *dev)
{
int err;
- dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci,
- &cx8802_mpeg_template,"mpeg");
+ dev->mpeg_dev = cx88_vdev_init(dev->core, dev->pci,
+ &cx8802_mpeg_template, "mpeg");
dev->mpeg_dev->ctrl_handler = &dev->cxhdl.hdl;
video_set_drvdata(dev->mpeg_dev, dev);
- err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1);
+ dev->mpeg_dev->queue = &dev->vb2_mpegq;
+ err = video_register_device(dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
if (err < 0) {
printk(KERN_INFO "%s/2: can't register mpeg device\n",
dev->core->name);
@@ -1210,6 +1137,7 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
struct cx8802_dev *dev = core->dvbdev;
+ struct vb2_queue *q;
int err;
dprintk( 1, "%s\n", __func__);
@@ -1223,15 +1151,9 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
if (!(core->board.mpeg & CX88_MPEG_BLACKBIRD))
goto fail_core;
- dev->width = 720;
- if (core->tvnorm & V4L2_STD_525_60) {
- dev->height = 480;
- } else {
- dev->height = 576;
- }
dev->cxhdl.port = CX2341X_PORT_STREAMING;
- dev->cxhdl.width = dev->width;
- dev->cxhdl.height = dev->height;
+ dev->cxhdl.width = core->width;
+ dev->cxhdl.height = core->height;
dev->cxhdl.func = blackbird_mbox_func;
dev->cxhdl.priv = dev;
err = cx2341x_handler_init(&dev->cxhdl, 36);
@@ -1250,13 +1172,30 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
// init_controls(core);
cx88_set_tvnorm(core,core->tvnorm);
cx88_video_mux(core,0);
- cx2341x_handler_set_50hz(&dev->cxhdl, dev->height == 576);
+ cx2341x_handler_set_50hz(&dev->cxhdl, core->height == 576);
cx2341x_handler_setup(&dev->cxhdl);
+
+ q = &dev->vb2_mpegq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx88_buffer);
+ q->ops = &blackbird_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &core->lock;
+
+ err = vb2_queue_init(q);
+ if (err < 0)
+ goto fail_core;
+
blackbird_register_video(dev);
return 0;
- fail_core:
+fail_core:
return err;
}
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 851754bf1291..8f2556ec3971 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -347,7 +347,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_IODATA_GVVCP3PCI] = {
.name = "IODATA GV-VCP3/PCI",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -436,7 +436,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_KWORLD_DVB_T] = {
.name = "KWorld/VStream XPert DVB-T",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -455,7 +455,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1] = {
.name = "DViCO FusionHDTV DVB-T1",
- .tuner_type = TUNER_ABSENT, /* No analog tuner */
+ .tuner_type = UNSET, /* No analog tuner */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -542,7 +542,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_HAUPPAUGE_DVB_T1] = {
.name = "Hauppauge Nova-T DVB-T",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -554,7 +554,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_CONEXANT_DVB_T1] = {
.name = "Conexant DVB-T reference design",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -579,7 +579,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS] = {
.name = "DViCO FusionHDTV DVB-T Plus",
- .tuner_type = TUNER_ABSENT, /* No analog tuner */
+ .tuner_type = UNSET, /* No analog tuner */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -596,7 +596,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_DNTV_LIVE_DVB_T] = {
.name = "digitalnow DNTV Live! DVB-T",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -787,7 +787,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_ADSTECH_DVB_T_PCI] = {
.name = "ADS Tech Instant TV DVB-T PCI",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -806,7 +806,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1] = {
.name = "TerraTec Cinergy 1400 DVB-T",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
@@ -924,7 +924,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_WINFAST_DTV1000] = {
.name = "WinFast DTV1000-T",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -972,7 +972,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1] = {
.name = "Hauppauge Nova-S-Plus DVB-S",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -998,7 +998,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_HAUPPAUGE_NOVASE2_S1] = {
.name = "Hauppauge Nova-SE2 DVB-S",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1010,7 +1010,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_KWORLD_DVBS_100] = {
.name = "KWorld DVB-S 100",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1102,7 +1102,7 @@ static const struct cx88_board cx88_boards[] = {
/* DTT 7579 Conexant CX22702-19 Conexant CX2388x */
/* Manenti Marco <marco_manenti@colman.it> */
.name = "KWorld/VStream XPert DVB-T with cx22702",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1121,7 +1121,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL] = {
.name = "DViCO FusionHDTV DVB-T Dual Digital",
- .tuner_type = TUNER_ABSENT, /* No analog tuner */
+ .tuner_type = UNSET, /* No analog tuner */
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1356,7 +1356,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_GENIATECH_DVBS] = {
.name = "Geniatech DVB-S",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1494,7 +1494,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_SAMSUNG_SMT_7020] = {
.name = "Samsung SMT 7020 DVB-S",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1506,7 +1506,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_ADSTECH_PTV_390] = {
.name = "ADS Tech Instant Video PCI",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1553,7 +1553,7 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO] = {
.name = "DViCO FusionHDTV 5 PCI nano",
/* xc3008 tuner, digital only for now */
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -2069,7 +2069,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_TBS_8920] = {
.name = "TBS 8920 DVB-S/S2",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -2304,7 +2304,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_TWINHAN_VP1027_DVBS] = {
.name = "Twinhan VP-1027 DVB-S",
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = UNSET,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -2921,33 +2921,33 @@ static const struct {
int fm;
const char *name;
} gdi_tuner[] = {
- [ 0x01 ] = { .id = TUNER_ABSENT,
+ [ 0x01 ] = { .id = UNSET,
.name = "NTSC_M" },
- [ 0x02 ] = { .id = TUNER_ABSENT,
+ [ 0x02 ] = { .id = UNSET,
.name = "PAL_B" },
- [ 0x03 ] = { .id = TUNER_ABSENT,
+ [ 0x03 ] = { .id = UNSET,
.name = "PAL_I" },
- [ 0x04 ] = { .id = TUNER_ABSENT,
+ [ 0x04 ] = { .id = UNSET,
.name = "PAL_D" },
- [ 0x05 ] = { .id = TUNER_ABSENT,
+ [ 0x05 ] = { .id = UNSET,
.name = "SECAM" },
- [ 0x10 ] = { .id = TUNER_ABSENT,
+ [ 0x10 ] = { .id = UNSET,
.fm = 1,
.name = "TEMIC_4049" },
[ 0x11 ] = { .id = TUNER_TEMIC_4136FY5,
.name = "TEMIC_4136" },
- [ 0x12 ] = { .id = TUNER_ABSENT,
+ [ 0x12 ] = { .id = UNSET,
.name = "TEMIC_4146" },
[ 0x20 ] = { .id = TUNER_PHILIPS_FQ1216ME,
.fm = 1,
.name = "PHILIPS_FQ1216_MK3" },
- [ 0x21 ] = { .id = TUNER_ABSENT, .fm = 1,
+ [ 0x21 ] = { .id = UNSET, .fm = 1,
.name = "PHILIPS_FQ1236_MK3" },
- [ 0x22 ] = { .id = TUNER_ABSENT,
+ [ 0x22 ] = { .id = UNSET,
.name = "PHILIPS_FI1236_MK3" },
- [ 0x23 ] = { .id = TUNER_ABSENT,
+ [ 0x23 ] = { .id = UNSET,
.name = "PHILIPS_FI1216_MK3" },
};
@@ -3564,7 +3564,7 @@ static void cx88_card_setup(struct cx88_core *core)
mode_mask &= ~T_RADIO;
}
- if (core->board.tuner_type != TUNER_ABSENT) {
+ if (core->board.tuner_type != UNSET) {
tun_setup.mode_mask = mode_mask;
tun_setup.type = core->board.tuner_type;
tun_setup.addr = core->board.tuner_addr;
@@ -3691,6 +3691,11 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
core->nr = nr;
sprintf(core->name, "cx88[%d]", core->nr);
+ core->tvnorm = V4L2_STD_NTSC_M;
+ core->width = 320;
+ core->height = 240;
+ core->field = V4L2_FIELD_INTERLACED;
+
strcpy(core->v4l2_dev.name, core->name);
if (v4l2_device_register(NULL, &core->v4l2_dev)) {
kfree(core);
@@ -3772,7 +3777,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
cx88_i2c_init(core, pci);
/* load tuner module, if needed */
- if (TUNER_ABSENT != core->board.tuner_type) {
+ if (UNSET != core->board.tuner_type) {
/* Ignore 0x6b and 0x6f on cx88 boards.
* FusionHDTV5 RT Gold has an ir receiver at 0x6b
* and an RTC at 0x6f which can get corrupted if probed. */
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 71630238027b..dee177ed5fe9 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -76,11 +76,16 @@ static DEFINE_MUTEX(devlist);
static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
- unsigned int lines, unsigned int lpi)
+ unsigned int lines, unsigned int lpi, bool jump)
{
struct scatterlist *sg;
unsigned int line,todo,sol;
+ if (jump) {
+ (*rp++) = cpu_to_le32(RISC_JUMP);
+ (*rp++) = 0;
+ }
+
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
@@ -90,7 +95,7 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
if (lpi && line>0 && !(line % lpi))
sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
@@ -109,13 +114,13 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
todo -= (sg_dma_len(sg)-offset);
offset = 0;
- sg++;
+ sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++)=cpu_to_le32(RISC_WRITE|
sg_dma_len(sg));
*(rp++)=cpu_to_le32(sg_dma_address(sg));
todo -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
*(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
*(rp++)=cpu_to_le32(sg_dma_address(sg));
@@ -127,14 +132,13 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
return rp;
}
-int cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+int cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
struct scatterlist *sglist,
unsigned int top_offset, unsigned int bottom_offset,
unsigned int bpl, unsigned int padding, unsigned int lines)
{
u32 instructions,fields;
__le32 *rp;
- int rc;
fields = 0;
if (UNSET != top_offset)
@@ -147,18 +151,21 @@ int cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
can cause next bpl to start close to a page border. First DMA
region may be smaller than PAGE_SIZE */
instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines);
- instructions += 2;
- if ((rc = btcx_riscmem_alloc(pci,risc,instructions*8)) < 0)
- return rc;
+ instructions += 4;
+ risc->size = instructions * 8;
+ risc->dma = 0;
+ risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
+ if (NULL == risc->cpu)
+ return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
if (UNSET != top_offset)
rp = cx88_risc_field(rp, sglist, top_offset, 0,
- bpl, padding, lines, 0);
+ bpl, padding, lines, 0, true);
if (UNSET != bottom_offset)
rp = cx88_risc_field(rp, sglist, bottom_offset, 0x200,
- bpl, padding, lines, 0);
+ bpl, padding, lines, 0, top_offset == UNSET);
/* save pointer to jmp instruction address */
risc->jmp = rp;
@@ -166,26 +173,28 @@ int cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
return 0;
}
-int cx88_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
struct scatterlist *sglist, unsigned int bpl,
unsigned int lines, unsigned int lpi)
{
u32 instructions;
__le32 *rp;
- int rc;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Here
there is no padding and no sync. First DMA region may be smaller
than PAGE_SIZE */
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
- instructions += 1;
- if ((rc = btcx_riscmem_alloc(pci,risc,instructions*8)) < 0)
- return rc;
+ instructions += 3;
+ risc->size = instructions * 8;
+ risc->dma = 0;
+ risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
+ if (NULL == risc->cpu)
+ return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
- rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines, lpi);
+ rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines, lpi, !lpi);
/* save pointer to jmp instruction address */
risc->jmp = rp;
@@ -193,39 +202,6 @@ int cx88_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
return 0;
}
-int cx88_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
- u32 reg, u32 mask, u32 value)
-{
- __le32 *rp;
- int rc;
-
- if ((rc = btcx_riscmem_alloc(pci, risc, 4*16)) < 0)
- return rc;
-
- /* write risc instructions */
- rp = risc->cpu;
- *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2 | RISC_IMM);
- *(rp++) = cpu_to_le32(reg);
- *(rp++) = cpu_to_le32(value);
- *(rp++) = cpu_to_le32(mask);
- *(rp++) = cpu_to_le32(RISC_JUMP);
- *(rp++) = cpu_to_le32(risc->dma);
- return 0;
-}
-
-void
-cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
-{
- struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
-
- BUG_ON(in_interrupt());
- videobuf_waiton(q, &buf->vb, 0, 0);
- videobuf_dma_unmap(q->dev, dma);
- videobuf_dma_free(dma);
- btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
-
/* ------------------------------------------------------------------ */
/* our SRAM memory layout */
@@ -539,33 +515,12 @@ void cx88_wakeup(struct cx88_core *core,
struct cx88_dmaqueue *q, u32 count)
{
struct cx88_buffer *buf;
- int bc;
-
- for (bc = 0;; bc++) {
- if (list_empty(&q->active))
- break;
- buf = list_entry(q->active.next,
- struct cx88_buffer, vb.queue);
- /* count comes from the hw and is is 16bit wide --
- * this trick handles wrap-arounds correctly for
- * up to 32767 buffers in flight... */
- if ((s16) (count - buf->count) < 0)
- break;
- v4l2_get_timestamp(&buf->vb.ts);
- dprintk(2,"[%p/%d] wakeup reg=%d buf=%d\n",buf,buf->vb.i,
- count, buf->count);
- buf->vb.state = VIDEOBUF_DONE;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
- }
- if (list_empty(&q->active)) {
- del_timer(&q->timeout);
- } else {
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
- }
- if (bc != 1)
- dprintk(2, "%s: %d buffers handled (should be 1)\n",
- __func__, bc);
+
+ buf = list_entry(q->active.next,
+ struct cx88_buffer, list);
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
void cx88_shutdown(struct cx88_core *core)
@@ -909,6 +864,13 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
u32 bdelay,agcdelay,htotal;
u32 cxiformat, cxoformat;
+ if (norm == core->tvnorm)
+ return 0;
+ if (core->v4ldev && (vb2_is_busy(&core->v4ldev->vb2_vidq) ||
+ vb2_is_busy(&core->v4ldev->vb2_vbiq)))
+ return -EBUSY;
+ if (core->dvbdev && vb2_is_busy(&core->dvbdev->vb2_mpegq))
+ return -EBUSY;
core->tvnorm = norm;
fsc8 = norm_fsc8(norm);
adc_clock = xtal;
@@ -1043,6 +1005,7 @@ struct video_device *cx88_vdev_init(struct cx88_core *core,
vfd->v4l2_dev = &core->v4l2_dev;
vfd->dev_parent = &pci->dev;
vfd->release = video_device_release;
+ vfd->lock = &core->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
core->name, type, core->board.name);
return vfd;
@@ -1114,8 +1077,6 @@ EXPORT_SYMBOL(cx88_shutdown);
EXPORT_SYMBOL(cx88_risc_buffer);
EXPORT_SYMBOL(cx88_risc_databuffer);
-EXPORT_SYMBOL(cx88_risc_stopper);
-EXPORT_SYMBOL(cx88_free_buffer);
EXPORT_SYMBOL(cx88_sram_channels);
EXPORT_SYMBOL(cx88_sram_channel_setup);
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 053ed1ba1d85..5780e2f013b4 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -82,43 +82,87 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/* ------------------------------------------------------------------ */
-static int dvb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct cx8802_dev *dev = q->priv_data;
+ struct cx8802_dev *dev = q->drv_priv;
+ *num_planes = 1;
dev->ts_packet_size = 188 * 4;
dev->ts_packet_count = dvb_buf_tscnt;
-
- *size = dev->ts_packet_size * dev->ts_packet_count;
- *count = dvb_buf_tscnt;
+ sizes[0] = dev->ts_packet_size * dev->ts_packet_count;
+ alloc_ctxs[0] = dev->alloc_ctx;
+ *num_buffers = dvb_buf_tscnt;
return 0;
}
-static int dvb_buf_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb, enum v4l2_field field)
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+
+ return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
+}
+
+static void buffer_finish(struct vb2_buffer *vb)
{
- struct cx8802_dev *dev = q->priv_data;
- return cx8802_buf_prepare(q, dev, (struct cx88_buffer*)vb,field);
+ struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_riscmem *risc = &buf->risc;
+
+ if (risc->cpu)
+ pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ memset(risc, 0, sizeof(*risc));
}
-static void dvb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx8802_dev *dev = q->priv_data;
- cx8802_buf_queue(dev, (struct cx88_buffer*)vb);
+ struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+
+ cx8802_buf_queue(dev, buf);
}
-static void dvb_buf_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *q, unsigned int count)
{
- cx88_free_buffer(q, (struct cx88_buffer*)vb);
+ struct cx8802_dev *dev = q->drv_priv;
+ struct cx88_dmaqueue *dmaq = &dev->mpegq;
+ struct cx88_buffer *buf;
+
+ buf = list_entry(dmaq->active.next, struct cx88_buffer, list);
+ cx8802_start_dma(dev, dmaq, buf);
+ return 0;
}
-static const struct videobuf_queue_ops dvb_qops = {
- .buf_setup = dvb_buf_setup,
- .buf_prepare = dvb_buf_prepare,
- .buf_queue = dvb_buf_queue,
- .buf_release = dvb_buf_release,
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct cx8802_dev *dev = q->drv_priv;
+ struct cx88_dmaqueue *dmaq = &dev->mpegq;
+ unsigned long flags;
+
+ cx8802_cancel_buffers(dev);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx88_buffer *buf = list_entry(dmaq->active.next,
+ struct cx88_buffer, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static struct vb2_ops dvb_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
};
/* ------------------------------------------------------------------ */
@@ -130,7 +174,7 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
int ret = 0;
int fe_id;
- fe_id = videobuf_dvb_find_frontend(&dev->frontends, fe);
+ fe_id = vb2_dvb_find_frontend(&dev->frontends, fe);
if (!fe_id) {
printk(KERN_ERR "%s() No frontend found\n", __func__);
return -EINVAL;
@@ -154,8 +198,8 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
{
- struct videobuf_dvb_frontends *f;
- struct videobuf_dvb_frontend *fe;
+ struct vb2_dvb_frontends *f;
+ struct vb2_dvb_frontend *fe;
if (!core->dvbdev)
return;
@@ -166,9 +210,9 @@ static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
return;
if (f->gate <= 1) /* undefined or fe0 */
- fe = videobuf_dvb_get_frontend(f, 1);
+ fe = vb2_dvb_get_frontend(f, 1);
else
- fe = videobuf_dvb_get_frontend(f, f->gate);
+ fe = vb2_dvb_get_frontend(f, f->gate);
if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open);
@@ -565,7 +609,7 @@ static const struct xc5000_config dvico_fusionhdtv7_tuner_config = {
static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
{
struct dvb_frontend *fe;
- struct videobuf_dvb_frontend *fe0 = NULL;
+ struct vb2_dvb_frontend *fe0 = NULL;
struct xc2028_ctrl ctl;
struct xc2028_config cfg = {
.i2c_adap = &dev->core->i2c_adap,
@@ -574,7 +618,7 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
};
/* Get the first frontend */
- fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+ fe0 = vb2_dvb_get_frontend(&dev->frontends, 1);
if (!fe0)
return -EINVAL;
@@ -611,10 +655,10 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
{
struct dvb_frontend *fe;
- struct videobuf_dvb_frontend *fe0 = NULL;
+ struct vb2_dvb_frontend *fe0 = NULL;
/* Get the first frontend */
- fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+ fe0 = vb2_dvb_get_frontend(&dev->frontends, 1);
if (!fe0)
return -EINVAL;
@@ -745,7 +789,7 @@ static const struct stv0288_config tevii_tuner_earda_config = {
static int cx8802_alloc_frontends(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
- struct videobuf_dvb_frontend *fe = NULL;
+ struct vb2_dvb_frontend *fe = NULL;
int i;
mutex_init(&dev->frontends.lock);
@@ -757,10 +801,10 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
core->board.num_frontends);
for (i = 1; i <= core->board.num_frontends; i++) {
- fe = videobuf_dvb_alloc_frontend(&dev->frontends, i);
+ fe = vb2_dvb_alloc_frontend(&dev->frontends, i);
if (!fe) {
printk(KERN_ERR "%s() failed to alloc\n", __func__);
- videobuf_dvb_dealloc_frontends(&dev->frontends);
+ vb2_dvb_dealloc_frontends(&dev->frontends);
return -ENOMEM;
}
}
@@ -958,7 +1002,7 @@ static const struct stv0299_config samsung_stv0299_config = {
static int dvb_register(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
- struct videobuf_dvb_frontend *fe0, *fe1 = NULL;
+ struct vb2_dvb_frontend *fe0, *fe1 = NULL;
int mfe_shared = 0; /* bus not shared by default */
int res = -EINVAL;
@@ -968,7 +1012,7 @@ static int dvb_register(struct cx8802_dev *dev)
}
/* Get the first frontend */
- fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+ fe0 = vb2_dvb_get_frontend(&dev->frontends, 1);
if (!fe0)
goto frontend_detach;
@@ -1046,7 +1090,7 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
}
/* MFE frontend 2 */
- fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2);
+ fe1 = vb2_dvb_get_frontend(&dev->frontends, 2);
if (!fe1)
goto frontend_detach;
/* DVB-T init */
@@ -1415,7 +1459,7 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
}
/* MFE frontend 2 */
- fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2);
+ fe1 = vb2_dvb_get_frontend(&dev->frontends, 2);
if (!fe1)
goto frontend_detach;
/* DVB-T Init */
@@ -1594,7 +1638,7 @@ static int dvb_register(struct cx8802_dev *dev)
call_all(core, core, s_power, 0);
/* register everything */
- res = videobuf_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
+ res = vb2_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
&dev->pci->dev, adapter_nr, mfe_shared);
if (res)
goto frontend_detach;
@@ -1602,7 +1646,7 @@ static int dvb_register(struct cx8802_dev *dev)
frontend_detach:
core->gate_ctrl = NULL;
- videobuf_dvb_dealloc_frontends(&dev->frontends);
+ vb2_dvb_dealloc_frontends(&dev->frontends);
return res;
}
@@ -1697,7 +1741,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
struct cx88_core *core = drv->core;
struct cx8802_dev *dev = drv->core->dvbdev;
int err;
- struct videobuf_dvb_frontend *fe;
+ struct vb2_dvb_frontend *fe;
int i;
dprintk( 1, "%s\n", __func__);
@@ -1726,19 +1770,31 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
err = -ENODEV;
for (i = 1; i <= core->board.num_frontends; i++) {
- fe = videobuf_dvb_get_frontend(&core->dvbdev->frontends, i);
+ struct vb2_queue *q;
+
+ fe = vb2_dvb_get_frontend(&core->dvbdev->frontends, i);
if (fe == NULL) {
printk(KERN_ERR "%s() failed to get frontend(%d)\n",
__func__, i);
goto fail_probe;
}
- videobuf_queue_sg_init(&fe->dvb.dvbq, &dvb_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_TOP,
- sizeof(struct cx88_buffer),
- dev, NULL);
- /* init struct videobuf_dvb */
+ q = &fe->dvb.dvbq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx88_buffer);
+ q->ops = &dvb_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &core->lock;
+
+ err = vb2_queue_init(q);
+ if (err < 0)
+ goto fail_probe;
+
+ /* init struct vb2_dvb */
fe->dvb.name = dev->core->name;
}
@@ -1749,7 +1805,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
core->name, err);
return err;
fail_probe:
- videobuf_dvb_dealloc_frontends(&core->dvbdev->frontends);
+ vb2_dvb_dealloc_frontends(&core->dvbdev->frontends);
fail_core:
return err;
}
@@ -1761,7 +1817,7 @@ static int cx8802_dvb_remove(struct cx8802_driver *drv)
dprintk( 1, "%s\n", __func__);
- videobuf_dvb_unregister_bus(&dev->frontends);
+ vb2_dvb_unregister_bus(&dev->frontends);
vp3054_i2c_remove(dev);
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 74b7b8614c23..1c1f69e6b0b9 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -86,21 +86,21 @@ static LIST_HEAD(cx8802_devlist);
static DEFINE_MUTEX(cx8802_mutex);
/* ------------------------------------------------------------------ */
-static int cx8802_start_dma(struct cx8802_dev *dev,
+int cx8802_start_dma(struct cx8802_dev *dev,
struct cx88_dmaqueue *q,
struct cx88_buffer *buf)
{
struct cx88_core *core = dev->core;
dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n",
- buf->vb.width, buf->vb.height, buf->vb.field);
+ core->width, core->height, core->field);
/* setup fifo + format */
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28],
dev->ts_packet_size, buf->risc.dma);
/* write TS length to chip */
- cx_write(MO_TS_LNGTH, buf->vb.width);
+ cx_write(MO_TS_LNGTH, dev->ts_packet_size);
/* FIXME: this needs a review.
* also: move to cx88-blackbird + cx88-dvb source files? */
@@ -210,83 +210,40 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
dprintk( 1, "cx8802_restart_queue\n" );
if (list_empty(&q->active))
- {
- struct cx88_buffer *prev;
- prev = NULL;
-
- dprintk(1, "cx8802_restart_queue: queue is empty\n" );
-
- for (;;) {
- if (list_empty(&q->queued))
- return 0;
- buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
- if (NULL == prev) {
- list_move_tail(&buf->vb.queue, &q->active);
- cx8802_start_dma(dev, q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
- dprintk(1,"[%p/%d] restart_queue - first active\n",
- buf,buf->vb.i);
-
- } else if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_move_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- dprintk(1,"[%p/%d] restart_queue - move to active\n",
- buf,buf->vb.i);
- } else {
- return 0;
- }
- prev = buf;
- }
return 0;
- }
- buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
+ buf = list_entry(q->active.next, struct cx88_buffer, list);
dprintk(2,"restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.i);
+ buf, buf->vb.v4l2_buf.index);
cx8802_start_dma(dev, q, buf);
- list_for_each_entry(buf, &q->active, vb.queue)
+ list_for_each_entry(buf, &q->active, list)
buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
/* ------------------------------------------------------------------ */
-int cx8802_buf_prepare(struct videobuf_queue *q, struct cx8802_dev *dev,
- struct cx88_buffer *buf, enum v4l2_field field)
+int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
+ struct cx88_buffer *buf)
{
int size = dev->ts_packet_size * dev->ts_packet_count;
- struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
+ struct cx88_riscmem *risc = &buf->risc;
int rc;
- dprintk(1, "%s: %p\n", __func__, buf);
- if (0 != buf->vb.baddr && buf->vb.bsize < size)
+ if (vb2_plane_size(&buf->vb, 0) < size)
return -EINVAL;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- buf->vb.width = dev->ts_packet_size;
- buf->vb.height = dev->ts_packet_count;
- buf->vb.size = size;
- buf->vb.field = field /*V4L2_FIELD_TOP*/;
-
- if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL)))
- goto fail;
- cx88_risc_databuffer(dev->pci, &buf->risc,
- dma->sglist,
- buf->vb.width, buf->vb.height, 0);
+ vb2_set_plane_payload(&buf->vb, 0, size);
+
+ rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
+ dev->ts_packet_size, dev->ts_packet_count, 0);
+ if (rc) {
+ if (risc->cpu)
+ pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ memset(risc, 0, sizeof(*risc));
+ return rc;
}
- buf->vb.state = VIDEOBUF_PREPARED;
return 0;
-
- fail:
- cx88_free_buffer(q,buf);
- return rc;
}
void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
@@ -295,35 +252,33 @@ void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
struct cx88_dmaqueue *cx88q = &dev->mpegq;
dprintk( 1, "cx8802_buf_queue\n" );
- /* add jump to stopper */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
+ /* add jump to start */
+ buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
if (list_empty(&cx88q->active)) {
dprintk( 1, "queue is empty - first active\n" );
- list_add_tail(&buf->vb.queue,&cx88q->active);
- cx8802_start_dma(dev, cx88q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
+ list_add_tail(&buf->list, &cx88q->active);
buf->count = cx88q->count++;
- mod_timer(&cx88q->timeout, jiffies+BUFFER_TIMEOUT);
dprintk(1,"[%p/%d] %s - first active\n",
- buf, buf->vb.i, __func__);
+ buf, buf->vb.v4l2_buf.index, __func__);
} else {
+ buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
dprintk( 1, "queue is not empty - append to active\n" );
- prev = list_entry(cx88q->active.prev, struct cx88_buffer, vb.queue);
- list_add_tail(&buf->vb.queue,&cx88q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
+ prev = list_entry(cx88q->active.prev, struct cx88_buffer, list);
+ list_add_tail(&buf->list, &cx88q->active);
buf->count = cx88q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk( 1, "[%p/%d] %s - append to active\n",
- buf, buf->vb.i, __func__);
+ buf, buf->vb.v4l2_buf.index, __func__);
}
}
/* ----------------------------------------------------------- */
-static void do_cancel_buffers(struct cx8802_dev *dev, const char *reason, int restart)
+static void do_cancel_buffers(struct cx8802_dev *dev)
{
struct cx88_dmaqueue *q = &dev->mpegq;
struct cx88_buffer *buf;
@@ -331,41 +286,18 @@ static void do_cancel_buffers(struct cx8802_dev *dev, const char *reason, int re
spin_lock_irqsave(&dev->slock,flags);
while (!list_empty(&q->active)) {
- buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
- list_del(&buf->vb.queue);
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
- dprintk(1,"[%p/%d] %s - dma=0x%08lx\n",
- buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
- }
- if (restart)
- {
- dprintk(1, "restarting queue\n" );
- cx8802_restart_queue(dev,q);
+ buf = list_entry(q->active.next, struct cx88_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock,flags);
}
void cx8802_cancel_buffers(struct cx8802_dev *dev)
{
- struct cx88_dmaqueue *q = &dev->mpegq;
-
dprintk( 1, "cx8802_cancel_buffers" );
- del_timer_sync(&q->timeout);
- cx8802_stop_dma(dev);
- do_cancel_buffers(dev,"cancel",0);
-}
-
-static void cx8802_timeout(unsigned long data)
-{
- struct cx8802_dev *dev = (struct cx8802_dev*)data;
-
- dprintk(1, "%s\n",__func__);
-
- if (debug)
- cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH28]);
cx8802_stop_dma(dev);
- do_cancel_buffers(dev,"timeout",1);
+ do_cancel_buffers(dev);
}
static const char * cx88_mpeg_irqs[32] = {
@@ -411,19 +343,11 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
spin_unlock(&dev->slock);
}
- /* risc2 y */
- if (status & 0x10) {
- spin_lock(&dev->slock);
- cx8802_restart_queue(dev,&dev->mpegq);
- spin_unlock(&dev->slock);
- }
-
/* other general errors */
if (status & 0x1f0100) {
dprintk( 0, "general errors: 0x%08x\n", status & 0x1f0100 );
spin_lock(&dev->slock);
cx8802_stop_dma(dev);
- cx8802_restart_queue(dev,&dev->mpegq);
spin_unlock(&dev->slock);
}
}
@@ -490,12 +414,6 @@ static int cx8802_init_common(struct cx8802_dev *dev)
/* init dma queue */
INIT_LIST_HEAD(&dev->mpegq.active);
- INIT_LIST_HEAD(&dev->mpegq.queued);
- dev->mpegq.timeout.function = cx8802_timeout;
- dev->mpegq.timeout.data = (unsigned long)dev;
- init_timer(&dev->mpegq.timeout);
- cx88_risc_stopper(dev->pci,&dev->mpegq.stopper,
- MO_TS_DMACNTRL,0x11,0x00);
/* get irq */
err = request_irq(dev->pci->irq, cx8802_irq,
@@ -520,9 +438,6 @@ static void cx8802_fini_common(struct cx8802_dev *dev)
/* unregister stuff */
free_irq(dev->pci->irq, dev);
-
- /* free memory */
- btcx_riscmem_free(dev->pci,&dev->mpegq.stopper);
}
/* ----------------------------------------------------------- */
@@ -539,7 +454,6 @@ static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
dprintk( 2, "suspend\n" );
printk("%s: suspend mpeg\n", core->name);
cx8802_stop_dma(dev);
- del_timer(&dev->mpegq.timeout);
}
spin_unlock_irqrestore(&dev->slock, flags);
@@ -815,6 +729,11 @@ static int cx8802_probe(struct pci_dev *pci_dev,
if (NULL == dev)
goto fail_core;
dev->pci = pci_dev;
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail_core;
+ }
dev->core = core;
/* Maintain a reference so cx88-video can query the 8802 device. */
@@ -834,6 +753,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
return 0;
fail_free:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
kfree(dev);
fail_core:
core->dvbdev = NULL;
@@ -880,6 +800,7 @@ static void cx8802_remove(struct pci_dev *pci_dev)
/* common */
cx8802_fini_common(dev);
cx88_core_put(dev->core,dev->pci);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
kfree(dev);
}
@@ -907,6 +828,7 @@ module_pci_driver(cx8802_pci_driver);
EXPORT_SYMBOL(cx8802_buf_prepare);
EXPORT_SYMBOL(cx8802_buf_queue);
EXPORT_SYMBOL(cx8802_cancel_buffers);
+EXPORT_SYMBOL(cx8802_start_dma);
EXPORT_SYMBOL(cx8802_register_driver);
EXPORT_SYMBOL(cx8802_unregister_driver);
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index f8f8389c0362..32eb7fdb875e 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -6,10 +6,6 @@
#include "cx88.h"
-static unsigned int vbibufs = 4;
-module_param(vbibufs,int,0644);
-MODULE_PARM_DESC(vbibufs,"number of vbi buffers, range 2-32");
-
static unsigned int vbi_debug;
module_param(vbi_debug,int,0644);
MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
@@ -22,26 +18,27 @@ MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
int cx8800_vbi_fmt (struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx8800_fh *fh = priv;
- struct cx8800_dev *dev = fh->dev;
+ struct cx8800_dev *dev = video_drvdata(file);
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
f->fmt.vbi.offset = 244;
- f->fmt.vbi.count[0] = VBI_LINE_COUNT;
- f->fmt.vbi.count[1] = VBI_LINE_COUNT;
if (dev->core->tvnorm & V4L2_STD_525_60) {
/* ntsc */
f->fmt.vbi.sampling_rate = 28636363;
f->fmt.vbi.start[0] = 10;
f->fmt.vbi.start[1] = 273;
+ f->fmt.vbi.count[0] = VBI_LINE_NTSC_COUNT;
+ f->fmt.vbi.count[1] = VBI_LINE_NTSC_COUNT;
} else if (dev->core->tvnorm & V4L2_STD_625_50) {
/* pal */
f->fmt.vbi.sampling_rate = 35468950;
- f->fmt.vbi.start[0] = 7 -1;
- f->fmt.vbi.start[1] = 319 -1;
+ f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
+ f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
+ f->fmt.vbi.count[0] = VBI_LINE_PAL_COUNT;
+ f->fmt.vbi.count[1] = VBI_LINE_PAL_COUNT;
}
return 0;
}
@@ -54,7 +51,7 @@ static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
/* setup fifo + format */
cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
- buf->vb.width, buf->risc.dma);
+ VBI_LINE_LENGTH, buf->risc.dma);
cx_write(MO_VBOS_CONTROL, ( (1 << 18) | // comb filter delay fixup
(1 << 15) | // enable vbi capture
@@ -78,7 +75,7 @@ static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
return 0;
}
-int cx8800_stop_vbi_dma(struct cx8800_dev *dev)
+void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
{
struct cx88_core *core = dev->core;
@@ -91,7 +88,6 @@ int cx8800_stop_vbi_dma(struct cx8800_dev *dev)
/* disable irqs */
cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
cx_clear(MO_VID_INTMSK, 0x0f0088);
- return 0;
}
int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
@@ -102,144 +98,137 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
if (list_empty(&q->active))
return 0;
- buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
+ buf = list_entry(q->active.next, struct cx88_buffer, list);
dprintk(2,"restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.i);
+ buf, buf->vb.v4l2_buf.index);
cx8800_start_vbi_dma(dev, q, buf);
- list_for_each_entry(buf, &q->active, vb.queue)
+ list_for_each_entry(buf, &q->active, list)
buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
-void cx8800_vbi_timeout(unsigned long data)
-{
- struct cx8800_dev *dev = (struct cx8800_dev*)data;
- struct cx88_core *core = dev->core;
- struct cx88_dmaqueue *q = &dev->vbiq;
- struct cx88_buffer *buf;
- unsigned long flags;
-
- cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH24]);
-
- cx_clear(MO_VID_DMACNTRL, 0x88);
- cx_clear(VID_CAPTURE_CONTROL, 0x18);
-
- spin_lock_irqsave(&dev->slock,flags);
- while (!list_empty(&q->active)) {
- buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
- list_del(&buf->vb.queue);
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
- printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", dev->core->name,
- buf, buf->vb.i, (unsigned long)buf->risc.dma);
- }
- cx8800_restart_vbi_queue(dev,q);
- spin_unlock_irqrestore(&dev->slock,flags);
-}
-
/* ------------------------------------------------------------------ */
-static int
-vbi_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- *size = VBI_LINE_COUNT * VBI_LINE_LENGTH * 2;
- if (0 == *count)
- *count = vbibufs;
- if (*count < 2)
- *count = 2;
- if (*count > 32)
- *count = 32;
+ struct cx8800_dev *dev = q->drv_priv;
+
+ *num_planes = 1;
+ if (dev->core->tvnorm & V4L2_STD_525_60)
+ sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
+ else
+ sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
-static int
-vbi_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct cx8800_fh *fh = q->priv_data;
- struct cx8800_dev *dev = fh->dev;
- struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
+ struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
+ unsigned int lines;
unsigned int size;
- int rc;
- size = VBI_LINE_COUNT * VBI_LINE_LENGTH * 2;
- if (0 != buf->vb.baddr && buf->vb.bsize < size)
+ if (dev->core->tvnorm & V4L2_STD_525_60)
+ lines = VBI_LINE_NTSC_COUNT;
+ else
+ lines = VBI_LINE_PAL_COUNT;
+ size = lines * VBI_LINE_LENGTH * 2;
+ if (vb2_plane_size(vb, 0) < size)
return -EINVAL;
+ vb2_set_plane_payload(vb, 0, size);
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
- buf->vb.width = VBI_LINE_LENGTH;
- buf->vb.height = VBI_LINE_COUNT;
- buf->vb.size = size;
- buf->vb.field = V4L2_FIELD_SEQ_TB;
-
- if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL)))
- goto fail;
- cx88_risc_buffer(dev->pci, &buf->risc,
- dma->sglist,
- 0, buf->vb.width * buf->vb.height,
- buf->vb.width, 0,
- buf->vb.height);
- }
- buf->vb.state = VIDEOBUF_PREPARED;
+ cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
+ 0, VBI_LINE_LENGTH * lines,
+ VBI_LINE_LENGTH, 0,
+ lines);
return 0;
+}
- fail:
- cx88_free_buffer(q,buf);
- return rc;
+static void buffer_finish(struct vb2_buffer *vb)
+{
+ struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_riscmem *risc = &buf->risc;
+
+ if (risc->cpu)
+ pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ memset(risc, 0, sizeof(*risc));
}
-static void
-vbi_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
+ struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
struct cx88_buffer *prev;
- struct cx8800_fh *fh = vq->priv_data;
- struct cx8800_dev *dev = fh->dev;
struct cx88_dmaqueue *q = &dev->vbiq;
- /* add jump to stopper */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
+ /* add jump to start */
+ buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
if (list_empty(&q->active)) {
- list_add_tail(&buf->vb.queue,&q->active);
+ list_add_tail(&buf->list, &q->active);
cx8800_start_vbi_dma(dev, q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
dprintk(2,"[%p/%d] vbi_queue - first active\n",
- buf, buf->vb.i);
+ buf, buf->vb.v4l2_buf.index);
} else {
- prev = list_entry(q->active.prev, struct cx88_buffer, vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
+ buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
+ prev = list_entry(q->active.prev, struct cx88_buffer, list);
+ list_add_tail(&buf->list, &q->active);
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(2,"[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.i);
+ buf, buf->vb.v4l2_buf.index);
}
}
-static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
+ struct cx8800_dev *dev = q->drv_priv;
+ struct cx88_dmaqueue *dmaq = &dev->vbiq;
+ struct cx88_buffer *buf = list_entry(dmaq->active.next,
+ struct cx88_buffer, list);
- cx88_free_buffer(q,buf);
+ cx8800_start_vbi_dma(dev, dmaq, buf);
+ return 0;
}
-const struct videobuf_queue_ops cx8800_vbi_qops = {
- .buf_setup = vbi_setup,
- .buf_prepare = vbi_prepare,
- .buf_queue = vbi_queue,
- .buf_release = vbi_release,
-};
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct cx8800_dev *dev = q->drv_priv;
+ struct cx88_core *core = dev->core;
+ struct cx88_dmaqueue *dmaq = &dev->vbiq;
+ unsigned long flags;
-/* ------------------------------------------------------------------ */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+ cx_clear(MO_VID_DMACNTRL, 0x11);
+ cx_clear(VID_CAPTURE_CONTROL, 0x06);
+ cx8800_stop_vbi_dma(dev);
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx88_buffer *buf = list_entry(dmaq->active.next,
+ struct cx88_buffer, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+const struct vb2_ops cx8800_vbi_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index ce27e6d4f16e..860c98fc72c7 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -70,10 +70,6 @@ static unsigned int irq_debug;
module_param(irq_debug,int,0644);
MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]");
-static unsigned int vid_limit = 16;
-module_param(vid_limit,int,0644);
-MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
-
#define dprintk(level,fmt, arg...) if (video_debug >= level) \
printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
@@ -297,56 +293,6 @@ enum {
CX8800_AUD_CTLS = ARRAY_SIZE(cx8800_aud_ctls),
};
-/* ------------------------------------------------------------------- */
-/* resource management */
-
-static int res_get(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bit)
-{
- struct cx88_core *core = dev->core;
- if (fh->resources & bit)
- /* have it already allocated */
- return 1;
-
- /* is it free? */
- mutex_lock(&core->lock);
- if (dev->resources & bit) {
- /* no, someone else uses it */
- mutex_unlock(&core->lock);
- return 0;
- }
- /* it's free, grab it */
- fh->resources |= bit;
- dev->resources |= bit;
- dprintk(1,"res: get %d\n",bit);
- mutex_unlock(&core->lock);
- return 1;
-}
-
-static
-int res_check(struct cx8800_fh *fh, unsigned int bit)
-{
- return (fh->resources & bit);
-}
-
-static
-int res_locked(struct cx8800_dev *dev, unsigned int bit)
-{
- return (dev->resources & bit);
-}
-
-static
-void res_free(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bits)
-{
- struct cx88_core *core = dev->core;
- BUG_ON((fh->resources & bits) != bits);
-
- mutex_lock(&core->lock);
- fh->resources &= ~bits;
- dev->resources &= ~bits;
- dprintk(1,"res: put %d\n",bits);
- mutex_unlock(&core->lock);
-}
-
/* ------------------------------------------------------------------ */
int cx88_video_mux(struct cx88_core *core, unsigned int input)
@@ -419,8 +365,8 @@ static int start_video_dma(struct cx8800_dev *dev,
/* setup fifo + format */
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21],
buf->bpl, buf->risc.dma);
- cx88_set_scale(core, buf->vb.width, buf->vb.height, buf->vb.field);
- cx_write(MO_COLOR_CTRL, buf->fmt->cxformat | ColorFormatGamma);
+ cx88_set_scale(core, core->width, core->height, core->field);
+ cx_write(MO_COLOR_CTRL, dev->fmt->cxformat | ColorFormatGamma);
/* reset counter */
cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET);
@@ -470,433 +416,203 @@ static int restart_video_queue(struct cx8800_dev *dev,
struct cx88_dmaqueue *q)
{
struct cx88_core *core = dev->core;
- struct cx88_buffer *buf, *prev;
+ struct cx88_buffer *buf;
if (!list_empty(&q->active)) {
- buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
+ buf = list_entry(q->active.next, struct cx88_buffer, list);
dprintk(2,"restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.i);
+ buf, buf->vb.v4l2_buf.index);
start_video_dma(dev, q, buf);
- list_for_each_entry(buf, &q->active, vb.queue)
+ list_for_each_entry(buf, &q->active, list)
buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
- return 0;
- }
-
- prev = NULL;
- for (;;) {
- if (list_empty(&q->queued))
- return 0;
- buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
- if (NULL == prev) {
- list_move_tail(&buf->vb.queue, &q->active);
- start_video_dma(dev, q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
- dprintk(2,"[%p/%d] restart_queue - first active\n",
- buf,buf->vb.i);
-
- } else if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_move_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- dprintk(2,"[%p/%d] restart_queue - move to active\n",
- buf,buf->vb.i);
- } else {
- return 0;
- }
- prev = buf;
}
+ return 0;
}
/* ------------------------------------------------------------------ */
-static int
-buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct cx8800_fh *fh = q->priv_data;
- struct cx8800_dev *dev = fh->dev;
-
- *size = dev->fmt->depth * dev->width * dev->height >> 3;
- if (0 == *count)
- *count = 32;
- if (*size * *count > vid_limit * 1024 * 1024)
- *count = (vid_limit * 1024 * 1024) / *size;
+ struct cx8800_dev *dev = q->drv_priv;
+ struct cx88_core *core = dev->core;
+
+ *num_planes = 1;
+ sizes[0] = (dev->fmt->depth * core->width * core->height) >> 3;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
-static int
-buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct cx8800_fh *fh = q->priv_data;
- struct cx8800_dev *dev = fh->dev;
+ struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_core *core = dev->core;
- struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
- struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
- int rc, init_buffer = 0;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
- BUG_ON(NULL == dev->fmt);
- if (dev->width < 48 || dev->width > norm_maxw(core->tvnorm) ||
- dev->height < 32 || dev->height > norm_maxh(core->tvnorm))
- return -EINVAL;
- buf->vb.size = (dev->width * dev->height * dev->fmt->depth) >> 3;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
-
- if (buf->fmt != dev->fmt ||
- buf->vb.width != dev->width ||
- buf->vb.height != dev->height ||
- buf->vb.field != field) {
- buf->fmt = dev->fmt;
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
- init_buffer = 1;
- }
+ buf->bpl = core->width * dev->fmt->depth >> 3;
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- init_buffer = 1;
- if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL)))
- goto fail;
- }
+ if (vb2_plane_size(vb, 0) < core->height * buf->bpl)
+ return -EINVAL;
+ vb2_set_plane_payload(vb, 0, core->height * buf->bpl);
- if (init_buffer) {
- buf->bpl = buf->vb.width * buf->fmt->depth >> 3;
- switch (buf->vb.field) {
- case V4L2_FIELD_TOP:
- cx88_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, 0, UNSET,
- buf->bpl, 0, buf->vb.height);
- break;
- case V4L2_FIELD_BOTTOM:
- cx88_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, UNSET, 0,
- buf->bpl, 0, buf->vb.height);
- break;
- case V4L2_FIELD_INTERLACED:
- cx88_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, 0, buf->bpl,
- buf->bpl, buf->bpl,
- buf->vb.height >> 1);
- break;
- case V4L2_FIELD_SEQ_TB:
- cx88_risc_buffer(dev->pci, &buf->risc,
- dma->sglist,
- 0, buf->bpl * (buf->vb.height >> 1),
- buf->bpl, 0,
- buf->vb.height >> 1);
- break;
- case V4L2_FIELD_SEQ_BT:
- cx88_risc_buffer(dev->pci, &buf->risc,
- dma->sglist,
- buf->bpl * (buf->vb.height >> 1), 0,
- buf->bpl, 0,
- buf->vb.height >> 1);
- break;
- default:
- BUG();
- }
+ switch (core->field) {
+ case V4L2_FIELD_TOP:
+ cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, UNSET,
+ buf->bpl, 0, core->height);
+ break;
+ case V4L2_FIELD_BOTTOM:
+ cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, UNSET, 0,
+ buf->bpl, 0, core->height);
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ 0, buf->bpl * (core->height >> 1),
+ buf->bpl, 0,
+ core->height >> 1);
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ buf->bpl * (core->height >> 1), 0,
+ buf->bpl, 0,
+ core->height >> 1);
+ break;
+ case V4L2_FIELD_INTERLACED:
+ default:
+ cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, buf->bpl,
+ buf->bpl, buf->bpl,
+ core->height >> 1);
+ break;
}
dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
- buf, buf->vb.i,
- dev->width, dev->height, dev->fmt->depth, dev->fmt->name,
+ buf, buf->vb.v4l2_buf.index,
+ core->width, core->height, dev->fmt->depth, dev->fmt->name,
(unsigned long)buf->risc.dma);
-
- buf->vb.state = VIDEOBUF_PREPARED;
return 0;
+}
- fail:
- cx88_free_buffer(q,buf);
- return rc;
+static void buffer_finish(struct vb2_buffer *vb)
+{
+ struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
+ struct cx88_riscmem *risc = &buf->risc;
+
+ if (risc->cpu)
+ pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+ memset(risc, 0, sizeof(*risc));
}
-static void
-buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
+ struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
struct cx88_buffer *prev;
- struct cx8800_fh *fh = vq->priv_data;
- struct cx8800_dev *dev = fh->dev;
struct cx88_core *core = dev->core;
struct cx88_dmaqueue *q = &dev->vidq;
- /* add jump to stopper */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
-
- if (!list_empty(&q->queued)) {
- list_add_tail(&buf->vb.queue,&q->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2,"[%p/%d] buffer_queue - append to queued\n",
- buf, buf->vb.i);
+ /* add jump to start */
+ buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
- } else if (list_empty(&q->active)) {
- list_add_tail(&buf->vb.queue,&q->active);
- start_video_dma(dev, q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
+ if (list_empty(&q->active)) {
+ list_add_tail(&buf->list, &q->active);
buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
dprintk(2,"[%p/%d] buffer_queue - first active\n",
- buf, buf->vb.i);
+ buf, buf->vb.v4l2_buf.index);
} else {
- prev = list_entry(q->active.prev, struct cx88_buffer, vb.queue);
- if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_add_tail(&buf->vb.queue,&q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- dprintk(2,"[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.i);
-
- } else {
- list_add_tail(&buf->vb.queue,&q->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2,"[%p/%d] buffer_queue - first queued\n",
- buf, buf->vb.i);
- }
+ buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
+ prev = list_entry(q->active.prev, struct cx88_buffer, list);
+ list_add_tail(&buf->list, &q->active);
+ buf->count = q->count++;
+ prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
+ dprintk(2, "[%p/%d] buffer_queue - append to active\n",
+ buf, buf->vb.v4l2_buf.index);
}
}
-static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
+ struct cx8800_dev *dev = q->drv_priv;
+ struct cx88_dmaqueue *dmaq = &dev->vidq;
+ struct cx88_buffer *buf = list_entry(dmaq->active.next,
+ struct cx88_buffer, list);
- cx88_free_buffer(q,buf);
+ start_video_dma(dev, dmaq, buf);
+ return 0;
}
-static const struct videobuf_queue_ops cx8800_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
-};
-
-/* ------------------------------------------------------------------ */
-
-
-/* ------------------------------------------------------------------ */
-
-static struct videobuf_queue *get_queue(struct file *file)
+static void stop_streaming(struct vb2_queue *q)
{
- struct video_device *vdev = video_devdata(file);
- struct cx8800_fh *fh = file->private_data;
+ struct cx8800_dev *dev = q->drv_priv;
+ struct cx88_core *core = dev->core;
+ struct cx88_dmaqueue *dmaq = &dev->vidq;
+ unsigned long flags;
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- return &fh->vidq;
- case VFL_TYPE_VBI:
- return &fh->vbiq;
- default:
- BUG();
+ cx_clear(MO_VID_DMACNTRL, 0x11);
+ cx_clear(VID_CAPTURE_CONTROL, 0x06);
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx88_buffer *buf = list_entry(dmaq->active.next,
+ struct cx88_buffer, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-static int get_resource(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
+static struct vb2_ops cx8800_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- return RESOURCE_VIDEO;
- case VFL_TYPE_VBI:
- return RESOURCE_VBI;
- default:
- BUG();
- }
-}
+/* ------------------------------------------------------------------ */
-static int video_open(struct file *file)
+static int radio_open(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core = dev->core;
- struct cx8800_fh *fh;
- enum v4l2_buf_type type = 0;
- int radio = 0;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
- case VFL_TYPE_VBI:
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- break;
- case VFL_TYPE_RADIO:
- radio = 1;
- break;
- }
-
- dprintk(1, "open dev=%s radio=%d type=%s\n",
- video_device_node_name(vdev), radio, v4l2_type_names[type]);
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh),GFP_KERNEL);
- if (unlikely(!fh))
- return -ENOMEM;
+ int ret = v4l2_fh_open(file);
- v4l2_fh_init(&fh->fh, vdev);
- file->private_data = fh;
- fh->dev = dev;
+ if (ret)
+ return ret;
- mutex_lock(&core->lock);
-
- videobuf_queue_sg_init(&fh->vidq, &cx8800_video_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx88_buffer),
- fh, NULL);
- videobuf_queue_sg_init(&fh->vbiq, &cx8800_vbi_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_SEQ_TB,
- sizeof(struct cx88_buffer),
- fh, NULL);
-
- if (vdev->vfl_type == VFL_TYPE_RADIO) {
- dprintk(1,"video_open: setting radio device\n");
- cx_write(MO_GP3_IO, core->board.radio.gpio3);
- cx_write(MO_GP0_IO, core->board.radio.gpio0);
- cx_write(MO_GP1_IO, core->board.radio.gpio1);
- cx_write(MO_GP2_IO, core->board.radio.gpio2);
- if (core->board.radio.audioroute) {
- if (core->sd_wm8775) {
- call_all(core, audio, s_routing,
+ cx_write(MO_GP3_IO, core->board.radio.gpio3);
+ cx_write(MO_GP0_IO, core->board.radio.gpio0);
+ cx_write(MO_GP1_IO, core->board.radio.gpio1);
+ cx_write(MO_GP2_IO, core->board.radio.gpio2);
+ if (core->board.radio.audioroute) {
+ if (core->sd_wm8775) {
+ call_all(core, audio, s_routing,
core->board.radio.audioroute, 0, 0);
- }
- /* "I2S ADC mode" */
- core->tvaudio = WW_I2SADC;
- cx88_set_tvaudio(core);
- } else {
- /* FM Mode */
- core->tvaudio = WW_FM;
- cx88_set_tvaudio(core);
- cx88_set_stereo(core,V4L2_TUNER_MODE_STEREO,1);
}
- call_all(core, tuner, s_radio);
- }
-
- core->users++;
- mutex_unlock(&core->lock);
- v4l2_fh_add(&fh->fh);
-
- return 0;
-}
-
-static ssize_t
-video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
-{
- struct video_device *vdev = video_devdata(file);
- struct cx8800_fh *fh = file->private_data;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- if (res_locked(fh->dev,RESOURCE_VIDEO))
- return -EBUSY;
- return videobuf_read_one(&fh->vidq, data, count, ppos,
- file->f_flags & O_NONBLOCK);
- case VFL_TYPE_VBI:
- if (!res_get(fh->dev,fh,RESOURCE_VBI))
- return -EBUSY;
- return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1,
- file->f_flags & O_NONBLOCK);
- default:
- BUG();
- }
-}
-
-static unsigned int
-video_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct video_device *vdev = video_devdata(file);
- struct cx8800_fh *fh = file->private_data;
- struct cx88_buffer *buf;
- unsigned int rc = v4l2_ctrl_poll(file, wait);
-
- if (vdev->vfl_type == VFL_TYPE_VBI) {
- if (!res_get(fh->dev,fh,RESOURCE_VBI))
- return rc | POLLERR;
- return rc | videobuf_poll_stream(file, &fh->vbiq, wait);
- }
- mutex_lock(&fh->vidq.vb_lock);
- if (res_check(fh,RESOURCE_VIDEO)) {
- /* streaming capture */
- if (list_empty(&fh->vidq.stream))
- goto done;
- buf = list_entry(fh->vidq.stream.next,struct cx88_buffer,vb.stream);
+ /* "I2S ADC mode" */
+ core->tvaudio = WW_I2SADC;
+ cx88_set_tvaudio(core);
} else {
- /* read() capture */
- buf = (struct cx88_buffer*)fh->vidq.read_buf;
- if (NULL == buf)
- goto done;
- }
- poll_wait(file, &buf->vb.done, wait);
- if (buf->vb.state == VIDEOBUF_DONE ||
- buf->vb.state == VIDEOBUF_ERROR)
- rc |= POLLIN|POLLRDNORM;
-done:
- mutex_unlock(&fh->vidq.vb_lock);
- return rc;
-}
-
-static int video_release(struct file *file)
-{
- struct cx8800_fh *fh = file->private_data;
- struct cx8800_dev *dev = fh->dev;
-
- /* turn off overlay */
- if (res_check(fh, RESOURCE_OVERLAY)) {
- /* FIXME */
- res_free(dev,fh,RESOURCE_OVERLAY);
- }
-
- /* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO)) {
- videobuf_queue_cancel(&fh->vidq);
- res_free(dev,fh,RESOURCE_VIDEO);
- }
- if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq,fh->vidq.read_buf);
- kfree(fh->vidq.read_buf);
+ /* FM Mode */
+ core->tvaudio = WW_FM;
+ cx88_set_tvaudio(core);
+ cx88_set_stereo(core, V4L2_TUNER_MODE_STEREO, 1);
}
-
- /* stop vbi capture */
- if (res_check(fh, RESOURCE_VBI)) {
- videobuf_stop(&fh->vbiq);
- res_free(dev,fh,RESOURCE_VBI);
- }
-
- videobuf_mmap_free(&fh->vidq);
- videobuf_mmap_free(&fh->vbiq);
-
- mutex_lock(&dev->core->lock);
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- file->private_data = NULL;
- kfree(fh);
-
- dev->core->users--;
- if (!dev->core->users)
- call_all(dev->core, core, s_power, 0);
- mutex_unlock(&dev->core->lock);
-
+ call_all(core, tuner, s_radio);
return 0;
}
-static int
-video_mmap(struct file *file, struct vm_area_struct * vma)
-{
- return videobuf_mmap_mapper(get_queue(file), vma);
-}
-
/* ------------------------------------------------------------------ */
/* VIDEO CTRL IOCTLS */
@@ -999,12 +715,12 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx8800_fh *fh = priv;
- struct cx8800_dev *dev = fh->dev;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
- f->fmt.pix.width = dev->width;
- f->fmt.pix.height = dev->height;
- f->fmt.pix.field = fh->vidq.field;
+ f->fmt.pix.width = core->width;
+ f->fmt.pix.height = core->height;
+ f->fmt.pix.field = core->field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * dev->fmt->depth) >> 3;
@@ -1017,7 +733,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
const struct cx8800_fmt *fmt;
enum v4l2_field field;
unsigned int maxw, maxh;
@@ -1026,30 +743,30 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
if (NULL == fmt)
return -EINVAL;
- field = f->fmt.pix.field;
- maxw = norm_maxw(core->tvnorm);
- maxh = norm_maxh(core->tvnorm);
+ maxw = norm_maxw(core->tvnorm);
+ maxh = norm_maxh(core->tvnorm);
- if (V4L2_FIELD_ANY == field) {
- field = (f->fmt.pix.height > maxh/2)
- ? V4L2_FIELD_INTERLACED
- : V4L2_FIELD_BOTTOM;
- }
+ field = f->fmt.pix.field;
switch (field) {
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
- maxh = maxh / 2;
- break;
case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_SEQ_TB:
break;
default:
- return -EINVAL;
+ field = (f->fmt.pix.height > maxh / 2)
+ ? V4L2_FIELD_INTERLACED
+ : V4L2_FIELD_BOTTOM;
+ break;
}
+ if (V4L2_FIELD_HAS_T_OR_B(field))
+ maxh /= 2;
- f->fmt.pix.field = field;
v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
&f->fmt.pix.height, 32, maxh, 0, 0);
+ f->fmt.pix.field = field;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
@@ -1061,16 +778,20 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx8800_fh *fh = priv;
- struct cx8800_dev *dev = fh->dev;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
int err = vidioc_try_fmt_vid_cap (file,priv,f);
if (0 != err)
return err;
- dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- dev->width = f->fmt.pix.width;
- dev->height = f->fmt.pix.height;
- fh->vidq.field = f->fmt.pix.field;
+ if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq))
+ return -EBUSY;
+ if (core->dvbdev && vb2_is_busy(&core->dvbdev->vb2_mpegq))
+ return -EBUSY;
+ dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ core->width = f->fmt.pix.width;
+ core->height = f->fmt.pix.height;
+ core->field = f->fmt.pix.field;
return 0;
}
@@ -1104,8 +825,8 @@ EXPORT_SYMBOL(cx88_querycap);
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev;
- struct cx88_core *core = dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
strcpy(cap->driver, "cx8800");
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
@@ -1125,64 +846,10 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
return 0;
}
-static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p)
-{
- return videobuf_reqbufs(get_queue(file), p);
-}
-
-static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p)
-{
- return videobuf_querybuf(get_queue(file), p);
-}
-
-static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
-{
- return videobuf_qbuf(get_queue(file), p);
-}
-
-static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
-{
- return videobuf_dqbuf(get_queue(file), p,
- file->f_flags & O_NONBLOCK);
-}
-
-static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct video_device *vdev = video_devdata(file);
- struct cx8800_fh *fh = priv;
- struct cx8800_dev *dev = fh->dev;
-
- if ((vdev->vfl_type == VFL_TYPE_GRABBER && i != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
- (vdev->vfl_type == VFL_TYPE_VBI && i != V4L2_BUF_TYPE_VBI_CAPTURE))
- return -EINVAL;
-
- if (unlikely(!res_get(dev, fh, get_resource(file))))
- return -EBUSY;
- return videobuf_streamon(get_queue(file));
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct video_device *vdev = video_devdata(file);
- struct cx8800_fh *fh = priv;
- struct cx8800_dev *dev = fh->dev;
- int err, res;
-
- if ((vdev->vfl_type == VFL_TYPE_GRABBER && i != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
- (vdev->vfl_type == VFL_TYPE_VBI && i != V4L2_BUF_TYPE_VBI_CAPTURE))
- return -EINVAL;
-
- res = get_resource(file);
- err = videobuf_streamoff(get_queue(file));
- if (err < 0)
- return err;
- res_free(dev,fh,res);
- return 0;
-}
-
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
*tvnorm = core->tvnorm;
return 0;
@@ -1190,13 +857,10 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
-
- mutex_lock(&core->lock);
- cx88_set_tvnorm(core, tvnorms);
- mutex_unlock(&core->lock);
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
- return 0;
+ return cx88_set_tvnorm(core, tvnorms);
}
/* only one input in this sample driver */
@@ -1233,13 +897,15 @@ EXPORT_SYMBOL(cx88_enum_input);
static int vidioc_enum_input (struct file *file, void *priv,
struct v4l2_input *i)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
return cx88_enum_input (core,i);
}
static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
*i = core->input;
return 0;
@@ -1247,24 +913,24 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
if (i >= 4)
return -EINVAL;
if (0 == INPUT(i).type)
return -EINVAL;
- mutex_lock(&core->lock);
cx88_newstation(core);
cx88_video_mux(core,i);
- mutex_unlock(&core->lock);
return 0;
}
static int vidioc_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
u32 reg;
if (unlikely(UNSET == core->board.tuner_type))
@@ -1286,7 +952,8 @@ static int vidioc_g_tuner (struct file *file, void *priv,
static int vidioc_s_tuner (struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
if (UNSET == core->board.tuner_type)
return -EINVAL;
@@ -1300,8 +967,8 @@ static int vidioc_s_tuner (struct file *file, void *priv,
static int vidioc_g_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct cx8800_fh *fh = priv;
- struct cx88_core *core = fh->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
@@ -1325,7 +992,6 @@ int cx88_set_freq (struct cx88_core *core,
if (unlikely(f->tuner != 0))
return -EINVAL;
- mutex_lock(&core->lock);
cx88_newstation(core);
call_all(core, tuner, s_frequency, f);
call_all(core, tuner, g_frequency, &new_freq);
@@ -1335,8 +1001,6 @@ int cx88_set_freq (struct cx88_core *core,
msleep (10);
cx88_set_tvaudio(core);
- mutex_unlock(&core->lock);
-
return 0;
}
EXPORT_SYMBOL(cx88_set_freq);
@@ -1344,8 +1008,8 @@ EXPORT_SYMBOL(cx88_set_freq);
static int vidioc_s_frequency (struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct cx8800_fh *fh = priv;
- struct cx88_core *core = fh->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
return cx88_set_freq(core, f);
}
@@ -1354,7 +1018,8 @@ static int vidioc_s_frequency (struct file *file, void *priv,
static int vidioc_g_register (struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
/* cx2388x has a 24-bit register space */
reg->val = cx_read(reg->reg & 0xfffffc);
@@ -1365,7 +1030,8 @@ static int vidioc_g_register (struct file *file, void *fh,
static int vidioc_s_register (struct file *file, void *fh,
const struct v4l2_dbg_register *reg)
{
- struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
cx_write(reg->reg & 0xfffffc, reg->val);
return 0;
@@ -1379,7 +1045,8 @@ static int vidioc_s_register (struct file *file, void *fh,
static int radio_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
if (unlikely(t->index > 0))
return -EINVAL;
@@ -1393,7 +1060,8 @@ static int radio_g_tuner (struct file *file, void *priv,
static int radio_s_tuner (struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+ struct cx8800_dev *dev = video_drvdata(file);
+ struct cx88_core *core = dev->core;
if (0 != t->index)
return -EINVAL;
@@ -1404,32 +1072,6 @@ static int radio_s_tuner (struct file *file, void *priv,
/* ----------------------------------------------------------- */
-static void cx8800_vid_timeout(unsigned long data)
-{
- struct cx8800_dev *dev = (struct cx8800_dev*)data;
- struct cx88_core *core = dev->core;
- struct cx88_dmaqueue *q = &dev->vidq;
- struct cx88_buffer *buf;
- unsigned long flags;
-
- cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]);
-
- cx_clear(MO_VID_DMACNTRL, 0x11);
- cx_clear(VID_CAPTURE_CONTROL, 0x06);
-
- spin_lock_irqsave(&dev->slock,flags);
- while (!list_empty(&q->active)) {
- buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
- list_del(&buf->vb.queue);
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
- printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", core->name,
- buf, buf->vb.i, (unsigned long)buf->risc.dma);
- }
- restart_video_queue(dev,q);
- spin_unlock_irqrestore(&dev->slock,flags);
-}
-
static const char *cx88_vid_irqs[32] = {
"y_risci1", "u_risci1", "v_risci1", "vbi_risc1",
"y_risci2", "u_risci2", "v_risci2", "vbi_risc2",
@@ -1476,22 +1118,6 @@ static void cx8800_vid_irq(struct cx8800_dev *dev)
cx88_wakeup(core, &dev->vbiq, count);
spin_unlock(&dev->slock);
}
-
- /* risc2 y */
- if (status & 0x10) {
- dprintk(2,"stopper video\n");
- spin_lock(&dev->slock);
- restart_video_queue(dev,&dev->vidq);
- spin_unlock(&dev->slock);
- }
-
- /* risc2 vbi */
- if (status & 0x80) {
- dprintk(2,"stopper vbi\n");
- spin_lock(&dev->slock);
- cx8800_restart_vbi_queue(dev,&dev->vbiq);
- spin_unlock(&dev->slock);
- }
}
static irqreturn_t cx8800_irq(int irq, void *dev_id)
@@ -1530,11 +1156,11 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
static const struct v4l2_file_operations video_fops =
{
.owner = THIS_MODULE,
- .open = video_open,
- .release = video_release,
- .read = video_read,
- .poll = video_poll,
- .mmap = video_mmap,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -1544,17 +1170,17 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_g_std = vidioc_g_std,
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
@@ -1579,17 +1205,17 @@ static const struct v4l2_ioctl_ops vbi_ioctl_ops = {
.vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt,
.vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt,
.vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_g_std = vidioc_g_std,
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
@@ -1610,9 +1236,9 @@ static const struct video_device cx8800_vbi_template = {
static const struct v4l2_file_operations radio_fops =
{
.owner = THIS_MODULE,
- .open = video_open,
+ .open = radio_open,
.poll = v4l2_ctrl_poll,
- .release = video_release,
+ .release = v4l2_fh_release,
.unlocked_ioctl = video_ioctl2,
};
@@ -1676,6 +1302,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
{
struct cx8800_dev *dev;
struct cx88_core *core;
+ struct vb2_queue *q;
int err;
int i;
@@ -1710,28 +1337,21 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
err = -EIO;
goto fail_core;
}
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail_core;
+ }
+
/* initialize driver struct */
spin_lock_init(&dev->slock);
- core->tvnorm = V4L2_STD_NTSC_M;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
- INIT_LIST_HEAD(&dev->vidq.queued);
- dev->vidq.timeout.function = cx8800_vid_timeout;
- dev->vidq.timeout.data = (unsigned long)dev;
- init_timer(&dev->vidq.timeout);
- cx88_risc_stopper(dev->pci,&dev->vidq.stopper,
- MO_VID_DMACNTRL,0x11,0x00);
/* init vbi dma queues */
INIT_LIST_HEAD(&dev->vbiq.active);
- INIT_LIST_HEAD(&dev->vbiq.queued);
- dev->vbiq.timeout.function = cx8800_vbi_timeout;
- dev->vbiq.timeout.data = (unsigned long)dev;
- init_timer(&dev->vbiq.timeout);
- cx88_risc_stopper(dev->pci,&dev->vbiq.stopper,
- MO_VID_DMACNTRL,0x88,0x00);
/* get irq */
err = request_irq(pci_dev->irq, cx8800_irq,
@@ -1820,9 +1440,10 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
/* Sets device info at pci_dev */
pci_set_drvdata(pci_dev, dev);
- dev->width = 320;
- dev->height = 240;
- dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
+ dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
+
+ /* Maintain a reference so cx88-blackbird can query the 8800 device. */
+ core->v4ldev = dev;
/* initial device configuration */
mutex_lock(&core->lock);
@@ -1831,11 +1452,44 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
v4l2_ctrl_handler_setup(&core->audio_hdl);
cx88_video_mux(core, 0);
+ q = &dev->vb2_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx88_buffer);
+ q->ops = &cx8800_video_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &core->lock;
+
+ err = vb2_queue_init(q);
+ if (err < 0)
+ goto fail_unreg;
+
+ q = &dev->vb2_vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx88_buffer);
+ q->ops = &cx8800_vbi_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &core->lock;
+
+ err = vb2_queue_init(q);
+ if (err < 0)
+ goto fail_unreg;
+
/* register v4l devices */
dev->video_dev = cx88_vdev_init(core,dev->pci,
&cx8800_video_template,"video");
video_set_drvdata(dev->video_dev, dev);
dev->video_dev->ctrl_handler = &core->video_hdl;
+ dev->video_dev->queue = &dev->vb2_vidq;
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[core->nr]);
if (err < 0) {
@@ -1848,6 +1502,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
video_set_drvdata(dev->vbi_dev, dev);
+ dev->vbi_dev->queue = &dev->vb2_vbiq;
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[core->nr]);
if (err < 0) {
@@ -1875,7 +1530,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
}
/* start tvaudio thread */
- if (core->board.tuner_type != TUNER_ABSENT) {
+ if (core->board.tuner_type != UNSET) {
core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio");
if (IS_ERR(core->kthread)) {
err = PTR_ERR(core->kthread);
@@ -1892,6 +1547,8 @@ fail_unreg:
free_irq(pci_dev->irq, dev);
mutex_unlock(&core->lock);
fail_core:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
+ core->v4ldev = NULL;
cx88_core_put(core,dev->pci);
fail_free:
kfree(dev);
@@ -1913,16 +1570,18 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
cx88_ir_stop(core);
cx88_shutdown(core); /* FIXME */
- pci_disable_device(pci_dev);
/* unregister stuff */
free_irq(pci_dev->irq, dev);
cx8800_unregister_video(dev);
+ pci_disable_device(pci_dev);
+
+ core->v4ldev = NULL;
/* free memory */
- btcx_riscmem_free(dev->pci,&dev->vidq.stopper);
cx88_core_put(core,dev->pci);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
kfree(dev);
}
@@ -1938,12 +1597,10 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
if (!list_empty(&dev->vidq.active)) {
printk("%s/0: suspend video\n", core->name);
stop_video_dma(dev);
- del_timer(&dev->vidq.timeout);
}
if (!list_empty(&dev->vbiq.active)) {
printk("%s/0: suspend vbi\n", core->name);
cx8800_stop_vbi_dma(dev);
- del_timer(&dev->vbiq.timeout);
}
spin_unlock_irqrestore(&dev->slock, flags);
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 28893a6b249e..7748ca9abb09 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -29,19 +29,18 @@
#include <media/v4l2-fh.h>
#include <media/tuner.h>
#include <media/tveeprom.h>
-#include <media/videobuf-dma-sg.h>
+#include <media/videobuf2-dma-sg.h>
#include <media/cx2341x.h>
-#include <media/videobuf-dvb.h>
+#include <media/videobuf2-dvb.h>
#include <media/ir-kbd-i2c.h>
#include <media/wm8775.h>
-#include "btcx-risc.h"
#include "cx88-reg.h"
#include "tuner-xc2028.h"
#include <linux/mutex.h>
-#define CX88_VERSION "0.0.9"
+#define CX88_VERSION "1.0.0"
#define UNSET (-1U)
@@ -62,7 +61,8 @@
#define FORMAT_FLAGS_PACKED 0x01
#define FORMAT_FLAGS_PLANAR 0x02
-#define VBI_LINE_COUNT 17
+#define VBI_LINE_PAL_COUNT 18
+#define VBI_LINE_NTSC_COUNT 12
#define VBI_LINE_LENGTH 2048
#define AUD_RDS_LINES 4
@@ -95,13 +95,13 @@ enum cx8802_board_access {
static inline unsigned int norm_maxw(v4l2_std_id norm)
{
- return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 720 : 768;
+ return 720;
}
static inline unsigned int norm_maxh(v4l2_std_id norm)
{
- return (norm & V4L2_STD_625_50) ? 576 : 480;
+ return (norm & V4L2_STD_525_60) ? 480 : 576;
}
/* ----------------------------------------------------------- */
@@ -311,26 +311,33 @@ enum cx88_tvaudio {
#define BUFFER_TIMEOUT msecs_to_jiffies(2000)
+struct cx88_riscmem {
+ unsigned int size;
+ __le32 *cpu;
+ __le32 *jmp;
+ dma_addr_t dma;
+};
+
/* buffer for one video frame */
struct cx88_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_buffer vb;
+ struct list_head list;
/* cx88 specific */
unsigned int bpl;
- struct btcx_riscmem risc;
- const struct cx8800_fmt *fmt;
+ struct cx88_riscmem risc;
u32 count;
};
struct cx88_dmaqueue {
struct list_head active;
- struct list_head queued;
- struct timer_list timeout;
- struct btcx_riscmem stopper;
u32 count;
};
+struct cx8800_dev;
+struct cx8802_dev;
+
struct cx88_core {
struct list_head devlist;
atomic_t refcount;
@@ -376,6 +383,8 @@ struct cx88_core {
/* state info */
struct task_struct *kthread;
v4l2_std_id tvnorm;
+ unsigned width, height;
+ unsigned field;
enum cx88_tvaudio tvaudio;
u32 audiomode_manual;
u32 audiomode_current;
@@ -395,11 +404,14 @@ struct cx88_core {
struct mutex lock;
/* various v4l controls */
u32 freq;
- int users;
- int mpeg_users;
- /* cx88-video needs to access cx8802 for hybrid tuner pll access. */
+ /*
+ * cx88-video needs to access cx8802 for hybrid tuner pll access and
+ * for vb2_is_busy() checks.
+ */
struct cx8802_dev *dvbdev;
+ /* cx88-blackbird needs to access cx8800 for vb2_is_busy() checks */
+ struct cx8800_dev *v4ldev;
enum cx88_board_type active_type_id;
int active_ref;
int active_fe_id;
@@ -453,24 +465,9 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
val; \
})
-struct cx8800_dev;
-struct cx8802_dev;
-
/* ----------------------------------------------------------- */
/* function 0: video stuff */
-struct cx8800_fh {
- struct v4l2_fh fh;
- struct cx8800_dev *dev;
- unsigned int resources;
-
- /* video capture */
- struct videobuf_queue vidq;
-
- /* vbi capture */
- struct videobuf_queue vbiq;
-};
-
struct cx8800_suspend_state {
int disabled;
};
@@ -488,13 +485,15 @@ struct cx8800_dev {
/* pci i/o */
struct pci_dev *pci;
unsigned char pci_rev,pci_lat;
+ void *alloc_ctx;
const struct cx8800_fmt *fmt;
- unsigned int width, height;
/* capture queues */
struct cx88_dmaqueue vidq;
+ struct vb2_queue vb2_vidq;
struct cx88_dmaqueue vbiq;
+ struct vb2_queue vb2_vbiq;
/* various v4l controls */
@@ -510,12 +509,6 @@ struct cx8800_dev {
/* ----------------------------------------------------------- */
/* function 2: mpeg stuff */
-struct cx8802_fh {
- struct v4l2_fh fh;
- struct cx8802_dev *dev;
- struct videobuf_queue mpegq;
-};
-
struct cx8802_suspend_state {
int disabled;
};
@@ -556,9 +549,11 @@ struct cx8802_dev {
/* pci i/o */
struct pci_dev *pci;
unsigned char pci_rev,pci_lat;
+ void *alloc_ctx;
/* dma queues */
struct cx88_dmaqueue mpegq;
+ struct vb2_queue vb2_mpegq;
u32 ts_packet_size;
u32 ts_packet_count;
@@ -570,9 +565,6 @@ struct cx8802_dev {
#if IS_ENABLED(CONFIG_VIDEO_CX88_BLACKBIRD)
struct video_device *mpeg_dev;
u32 mailbox;
- int width;
- int height;
- unsigned char mpeg_active; /* nonzero if mpeg encoder is active */
/* mpeg params */
struct cx2341x_handler cxhdl;
@@ -580,7 +572,7 @@ struct cx8802_dev {
#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
/* for dvb only */
- struct videobuf_dvb_frontends frontends;
+ struct vb2_dvb_frontends frontends;
#endif
#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
@@ -634,22 +626,17 @@ extern void cx88_shutdown(struct cx88_core *core);
extern int cx88_reset(struct cx88_core *core);
extern int
-cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
struct scatterlist *sglist,
unsigned int top_offset, unsigned int bottom_offset,
unsigned int bpl, unsigned int padding, unsigned int lines);
extern int
-cx88_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
struct scatterlist *sglist, unsigned int bpl,
unsigned int lines, unsigned int lpi);
-extern int
-cx88_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
- u32 reg, u32 mask, u32 value);
-extern void
-cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf);
extern void cx88_risc_disasm(struct cx88_core *core,
- struct btcx_riscmem *risc);
+ struct cx88_riscmem *risc);
extern int cx88_sram_channel_setup(struct cx88_core *core,
const struct sram_channel *ch,
unsigned int bpl, u32 risc);
@@ -664,7 +651,7 @@ extern struct video_device *cx88_vdev_init(struct cx88_core *core,
struct pci_dev *pci,
const struct video_device *template_,
const char *type);
-extern struct cx88_core* cx88_core_get(struct pci_dev *pci);
+extern struct cx88_core *cx88_core_get(struct pci_dev *pci);
extern void cx88_core_put(struct cx88_core *core,
struct pci_dev *pci);
@@ -684,12 +671,10 @@ int cx8800_start_vbi_dma(struct cx8800_dev *dev,
struct cx88_dmaqueue *q,
struct cx88_buffer *buf);
*/
-int cx8800_stop_vbi_dma(struct cx8800_dev *dev);
-int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
- struct cx88_dmaqueue *q);
-void cx8800_vbi_timeout(unsigned long data);
+void cx8800_stop_vbi_dma(struct cx8800_dev *dev);
+int cx8800_restart_vbi_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q);
-extern const struct videobuf_queue_ops cx8800_vbi_qops;
+extern const struct vb2_ops cx8800_vbi_qops;
/* ----------------------------------------------------------- */
/* cx88-i2c.c */
@@ -739,14 +724,17 @@ extern void cx88_i2c_init_ir(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-mpeg.c */
-int cx8802_buf_prepare(struct videobuf_queue *q,struct cx8802_dev *dev,
- struct cx88_buffer *buf, enum v4l2_field field);
+int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
+ struct cx88_buffer *buf);
void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf);
void cx8802_cancel_buffers(struct cx8802_dev *dev);
+int cx8802_start_dma(struct cx8802_dev *dev,
+ struct cx88_dmaqueue *q,
+ struct cx88_buffer *buf);
/* ----------------------------------------------------------- */
/* cx88-video.c*/
-int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i);
+int cx88_enum_input(struct cx88_core *core, struct v4l2_input *i);
int cx88_set_freq(struct cx88_core *core, const struct v4l2_frequency *f);
int cx88_video_mux(struct cx88_core *core, unsigned int input);
void cx88_querycap(struct file *file, struct cx88_core *core,
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index c82e855a0814..9e3492e20766 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1118,8 +1118,7 @@ static void ddb_ports_detach(struct ddb *dev)
dvb_input_detach(port->input[1]);
break;
case DDB_PORT_CI:
- if (port->output->dev)
- dvb_unregister_device(port->output->dev);
+ dvb_unregister_device(port->output->dev);
if (port->en) {
ddb_input_stop(port->input[0]);
ddb_output_stop(port->output);
diff --git a/drivers/media/pci/ivtv/ivtv-controls.c b/drivers/media/pci/ivtv/ivtv-controls.c
index 2b0ab26e11e8..ccf548c255f1 100644
--- a/drivers/media/pci/ivtv/ivtv-controls.c
+++ b/drivers/media/pci/ivtv/ivtv-controls.c
@@ -69,7 +69,7 @@ static int ivtv_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
/* fix videodecoder resolution */
fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
fmt.height = cxhdl->height;
- fmt.code = V4L2_MBUS_FMT_FIXED;
+ fmt.code = MEDIA_BUS_FMT_FIXED;
v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &fmt);
return 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 3e0cb77d5930..4d8ee18c3feb 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -595,7 +595,7 @@ static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
fmt->fmt.pix.width /= 2;
mbus_fmt.width = fmt->fmt.pix.width;
mbus_fmt.height = h;
- mbus_fmt.code = V4L2_MBUS_FMT_FIXED;
+ mbus_fmt.code = MEDIA_BUS_FMT_FIXED;
v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &mbus_fmt);
return ivtv_g_fmt_vid_cap(file, fh, fmt);
}
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 7338cb2d0a38..bee2329e0b2e 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -76,7 +76,7 @@ void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32
int i;
struct scatterlist *sg;
- for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg++) {
+ for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg = sg_next(sg)) {
dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index aeae54708811..9d9f90cb7740 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1031,9 +1031,6 @@ static int vidioc_querycap(struct file *file, void *fh,
strcpy(cap->card, "meye");
sprintf(cap->bus_info, "PCI:%s", pci_name(meye.mchip_dev));
- cap->version = (MEYE_DRIVER_MAJORVERSION << 8) +
- MEYE_DRIVER_MINORVERSION;
-
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index db887b0c37b1..acc35b42e53c 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -109,9 +109,6 @@ struct pt1_adapter {
int sleep;
};
-#define pt1_printk(level, pt1, format, arg...) \
- dev_printk(level, &(pt1)->pdev->dev, format, ##arg)
-
static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
{
writel(data, pt1->regs + reg * 4);
@@ -154,7 +151,7 @@ static int pt1_sync(struct pt1 *pt1)
return 0;
pt1_write_reg(pt1, 0, 0x00000008);
}
- pt1_printk(KERN_ERR, pt1, "could not sync\n");
+ dev_err(&pt1->pdev->dev, "could not sync\n");
return -EIO;
}
@@ -179,7 +176,7 @@ static int pt1_unlock(struct pt1 *pt1)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
- pt1_printk(KERN_ERR, pt1, "could not unlock\n");
+ dev_err(&pt1->pdev->dev, "could not unlock\n");
return -EIO;
}
@@ -193,7 +190,7 @@ static int pt1_reset_pci(struct pt1 *pt1)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
- pt1_printk(KERN_ERR, pt1, "could not reset PCI\n");
+ dev_err(&pt1->pdev->dev, "could not reset PCI\n");
return -EIO;
}
@@ -207,7 +204,7 @@ static int pt1_reset_ram(struct pt1 *pt1)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
- pt1_printk(KERN_ERR, pt1, "could not reset RAM\n");
+ dev_err(&pt1->pdev->dev, "could not reset RAM\n");
return -EIO;
}
@@ -224,7 +221,7 @@ static int pt1_do_enable_ram(struct pt1 *pt1)
}
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
- pt1_printk(KERN_ERR, pt1, "could not enable RAM\n");
+ dev_err(&pt1->pdev->dev, "could not enable RAM\n");
return -EIO;
}
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 1fdeac11501a..7a37e8fe2ee2 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -255,7 +255,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
pt3_i2c_reset(pt3);
ret = pt3_init_all_demods(pt3);
if (ret < 0) {
- dev_warn(&pt3->pdev->dev, "Failed to init demod chips.");
+ dev_warn(&pt3->pdev->dev, "Failed to init demod chips\n");
return ret;
}
@@ -271,7 +271,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
init0_ter, ARRAY_SIZE(init0_ter));
if (ret < 0) {
dev_warn(&pt3->pdev->dev,
- "demod[%d] faild in init sequence0.", i);
+ "demod[%d] failed in init sequence0\n", i);
return ret;
}
ret = fe->ops.init(fe);
@@ -282,7 +282,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
usleep_range(2000, 4000);
ret = pt3_set_tuner_power(pt3, true, false);
if (ret < 0) {
- dev_warn(&pt3->pdev->dev, "Failed to control tuner module.");
+ dev_warn(&pt3->pdev->dev, "Failed to control tuner module\n");
return ret;
}
@@ -297,7 +297,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
cfg_ter, ARRAY_SIZE(cfg_ter));
if (ret < 0) {
dev_warn(&pt3->pdev->dev,
- "demod[%d] faild in init sequence1.", i);
+ "demod[%d] failed in init sequence1\n", i);
return ret;
}
}
@@ -311,19 +311,19 @@ static int pt3_fe_init(struct pt3_board *pt3)
ret = fe->ops.tuner_ops.init(fe);
if (ret < 0) {
dev_warn(&pt3->pdev->dev,
- "Failed to init SAT-tuner[%d].", i);
+ "Failed to init SAT-tuner[%d]\n", i);
return ret;
}
}
ret = pt3_init_all_mxl301rf(pt3);
if (ret < 0) {
- dev_warn(&pt3->pdev->dev, "Failed to init TERR-tuners.");
+ dev_warn(&pt3->pdev->dev, "Failed to init TERR-tuners\n");
return ret;
}
ret = pt3_set_tuner_power(pt3, true, true);
if (ret < 0) {
- dev_warn(&pt3->pdev->dev, "Failed to control tuner module.");
+ dev_warn(&pt3->pdev->dev, "Failed to control tuner module\n");
return ret;
}
@@ -344,7 +344,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
}
if (ret < 0) {
dev_warn(&pt3->pdev->dev,
- "Failed in initial tuning of tuner[%d].", i);
+ "Failed in initial tuning of tuner[%d]\n", i);
return ret;
}
}
@@ -366,7 +366,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
fe->ops.set_lna = &pt3_set_lna;
}
if (i < PT3_NUM_FE) {
- dev_warn(&pt3->pdev->dev, "FE[%d] failed to standby.", i);
+ dev_warn(&pt3->pdev->dev, "FE[%d] failed to standby\n", i);
return ret;
}
return 0;
@@ -453,8 +453,8 @@ static int pt3_fetch_thread(void *data)
pt3_init_dmabuf(adap);
adap->num_discard = PT3_INITIAL_BUF_DROPS;
- dev_dbg(adap->dvb_adap.device,
- "PT3: [%s] started.\n", adap->thread->comm);
+ dev_dbg(adap->dvb_adap.device, "PT3: [%s] started\n",
+ adap->thread->comm);
set_freezable();
while (!kthread_freezable_should_stop(&was_frozen)) {
if (was_frozen)
@@ -468,8 +468,8 @@ static int pt3_fetch_thread(void *data)
PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}
- dev_dbg(adap->dvb_adap.device,
- "PT3: [%s] exited.\n", adap->thread->comm);
+ dev_dbg(adap->dvb_adap.device, "PT3: [%s] exited\n",
+ adap->thread->comm);
adap->thread = NULL;
return 0;
}
@@ -485,8 +485,8 @@ static int pt3_start_streaming(struct pt3_adapter *adap)
int ret = PTR_ERR(thread);
dev_warn(adap->dvb_adap.device,
- "PT3 (adap:%d, dmx:%d): failed to start kthread.\n",
- adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
+ "PT3 (adap:%d, dmx:%d): failed to start kthread\n",
+ adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
return ret;
}
adap->thread = thread;
@@ -501,8 +501,8 @@ static int pt3_stop_streaming(struct pt3_adapter *adap)
ret = pt3_stop_dma(adap);
if (ret)
dev_warn(adap->dvb_adap.device,
- "PT3: failed to stop streaming of adap:%d/FE:%d\n",
- adap->dvb_adap.num, adap->fe->id);
+ "PT3: failed to stop streaming of adap:%d/FE:%d\n",
+ adap->dvb_adap.num, adap->fe->id);
/* kill the fetching thread */
ret = kthread_stop(adap->thread);
@@ -522,8 +522,8 @@ static int pt3_start_feed(struct dvb_demux_feed *feed)
return 0;
if (adap->num_feeds != 1) {
dev_warn(adap->dvb_adap.device,
- "%s: unmatched start/stop_feed in adap:%i/dmx:%i.\n",
- __func__, adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
+ "%s: unmatched start/stop_feed in adap:%i/dmx:%i\n",
+ __func__, adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
adap->num_feeds = 1;
}
@@ -553,10 +553,9 @@ static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
struct dvb_adapter *da;
adap = kzalloc(sizeof(*adap), GFP_KERNEL);
- if (!adap) {
- dev_err(&pt3->pdev->dev, "failed to alloc mem for adapter.\n");
+ if (!adap)
return -ENOMEM;
- }
+
pt3->adaps[index] = adap;
adap->adap_idx = index;
@@ -565,7 +564,7 @@ static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
THIS_MODULE, &pt3->pdev->dev, adapter_nr);
if (ret < 0) {
dev_err(&pt3->pdev->dev,
- "failed to register adapter dev.\n");
+ "failed to register adapter dev\n");
goto err_mem;
}
da = &adap->dvb_adap;
@@ -581,7 +580,7 @@ static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
adap->demux.stop_feed = pt3_stop_feed;
ret = dvb_dmx_init(&adap->demux);
if (ret < 0) {
- dev_err(&pt3->pdev->dev, "failed to init dmx dev.\n");
+ dev_err(&pt3->pdev->dev, "failed to init dmx dev\n");
goto err_adap;
}
@@ -589,13 +588,13 @@ static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
adap->dmxdev.demux = &adap->demux.dmx;
ret = dvb_dmxdev_init(&adap->dmxdev, da);
if (ret < 0) {
- dev_err(&pt3->pdev->dev, "failed to init dmxdev.\n");
+ dev_err(&pt3->pdev->dev, "failed to init dmxdev\n");
goto err_demux;
}
ret = pt3_alloc_dmabuf(adap);
if (ret) {
- dev_err(&pt3->pdev->dev, "failed to alloc DMA buffers.\n");
+ dev_err(&pt3->pdev->dev, "failed to alloc DMA buffers\n");
goto err_dmabuf;
}
@@ -695,7 +694,7 @@ static int pt3_resume(struct device *dev)
dvb_frontend_resume(adap->fe);
ret = pt3_alloc_dmabuf(adap);
if (ret) {
- dev_err(&pt3->pdev->dev, "failed to alloc DMA bufs.\n");
+ dev_err(&pt3->pdev->dev, "failed to alloc DMA bufs\n");
continue;
}
if (adap->num_feeds > 0)
@@ -753,15 +752,14 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret == 0)
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
else {
- dev_err(&pdev->dev, "Failed to set DMA mask.\n");
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
goto err_release_regions;
}
- dev_info(&pdev->dev, "Use 32bit DMA.\n");
+ dev_info(&pdev->dev, "Use 32bit DMA\n");
}
pt3 = kzalloc(sizeof(*pt3), GFP_KERNEL);
if (!pt3) {
- dev_err(&pdev->dev, "Failed to alloc mem for this dev.\n");
ret = -ENOMEM;
goto err_release_regions;
}
@@ -771,15 +769,15 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pt3->regs[0] = pci_ioremap_bar(pdev, 0);
pt3->regs[1] = pci_ioremap_bar(pdev, 2);
if (pt3->regs[0] == NULL || pt3->regs[1] == NULL) {
- dev_err(&pdev->dev, "Failed to ioremap.\n");
+ dev_err(&pdev->dev, "Failed to ioremap\n");
ret = -ENOMEM;
goto err_kfree;
}
ver = ioread32(pt3->regs[0] + REG_VERSION);
if ((ver >> 16) != 0x0301) {
- dev_warn(&pdev->dev, "PT%d, I/F-ver.:%d not supported",
- ver >> 24, (ver & 0x00ff0000) >> 16);
+ dev_warn(&pdev->dev, "PT%d, I/F-ver.:%d not supported\n",
+ ver >> 24, (ver & 0x00ff0000) >> 16);
ret = -ENODEV;
goto err_iounmap;
}
@@ -788,7 +786,6 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pt3->i2c_buf = kmalloc(sizeof(*pt3->i2c_buf), GFP_KERNEL);
if (pt3->i2c_buf == NULL) {
- dev_err(&pdev->dev, "Failed to alloc mem for i2c.\n");
ret = -ENOMEM;
goto err_iounmap;
}
@@ -801,7 +798,7 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i2c_set_adapdata(i2c, pt3);
ret = i2c_add_adapter(i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add i2c adapter.\n");
+ dev_err(&pdev->dev, "Failed to add i2c adapter\n");
goto err_i2cbuf;
}
@@ -815,20 +812,20 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
if (i < PT3_NUM_FE) {
- dev_err(&pdev->dev, "Failed to create FE%d.\n", i);
+ dev_err(&pdev->dev, "Failed to create FE%d\n", i);
goto err_cleanup_adapters;
}
ret = pt3_fe_init(pt3);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to init frontends.\n");
+ dev_err(&pdev->dev, "Failed to init frontends\n");
i = PT3_NUM_FE - 1;
goto err_cleanup_adapters;
}
dev_info(&pdev->dev,
- "successfully init'ed PT%d (fw:0x%02x, I/F:0x%02x).\n",
- ver >> 24, (ver >> 8) & 0xff, (ver >> 16) & 0xff);
+ "successfully init'ed PT%d (fw:0x%02x, I/F:0x%02x)\n",
+ ver >> 24, (ver >> 8) & 0xff, (ver >> 16) & 0xff);
return 0;
err_cleanup_adapters:
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 40569894c1c9..ac3cd74e824e 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -173,9 +173,7 @@ static void saa7134_irq_alsa_done(struct saa7134_dev *dev,
dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count,
dev->dmasound.bufsize, dev->dmasound.blocks);
spin_unlock(&dev->slock);
- snd_pcm_stream_lock(dev->dmasound.substream);
- snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock(dev->dmasound.substream);
+ snd_pcm_stop_xrun(dev->dmasound.substream);
return;
}
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 236ed725f933..a349e964e0bc 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -1001,13 +1001,18 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
saa7134_board_init1(dev);
saa7134_hwinit1(dev);
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail3;
+ }
/* get irq */
err = request_irq(pci_dev->irq, saa7134_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name,pci_dev->irq);
- goto fail3;
+ goto fail4;
}
/* wait a bit, register i2c bus */
@@ -1065,7 +1070,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (err < 0) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
- goto fail4;
+ goto fail5;
}
printk(KERN_INFO "%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
@@ -1078,7 +1083,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0)
- goto fail4;
+ goto fail5;
printk(KERN_INFO "%s: registered device %s\n",
dev->name, video_device_node_name(dev->vbi_dev));
@@ -1089,7 +1094,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
- goto fail4;
+ goto fail5;
printk(KERN_INFO "%s: registered device %s\n",
dev->name, video_device_node_name(dev->radio_dev));
}
@@ -1103,10 +1108,12 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
request_submodules(dev);
return 0;
- fail4:
+ fail5:
saa7134_unregister_video(dev);
saa7134_i2c_unregister(dev);
free_irq(pci_dev->irq, dev);
+ fail4:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
fail3:
saa7134_hwfini(dev);
iounmap(dev->lmmio);
@@ -1173,6 +1180,7 @@ static void saa7134_finidev(struct pci_dev *pci_dev)
/* release resources */
free_irq(pci_dev->irq, dev);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
iounmap(dev->lmmio);
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e4ea85fd1b23..594dc3ad4750 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -96,7 +96,6 @@ static struct vb2_ops saa7134_empress_qops = {
.queue_setup = saa7134_ts_queue_setup,
.buf_init = saa7134_ts_buffer_init,
.buf_prepare = saa7134_ts_buffer_prepare,
- .buf_finish = saa7134_ts_buffer_finish,
.buf_queue = saa7134_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
@@ -140,7 +139,7 @@ static int empress_s_fmt_vid_cap(struct file *file, void *priv,
struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
- v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
+ v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
saa_call_all(dev, video, s_mbus_fmt, &mbus_fmt);
v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
@@ -157,7 +156,7 @@ static int empress_try_fmt_vid_cap(struct file *file, void *priv,
struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
- v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
+ v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
saa_call_all(dev, video, try_mbus_fmt, &mbus_fmt);
v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index bd25323bd947..2709b83d57b1 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -94,7 +94,6 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
unsigned int lines, llength, size;
- int ret;
dprintk("buffer_prepare [%p]\n", buf);
@@ -108,25 +107,11 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
vb2_set_plane_payload(vb2, 0, size);
vb2->v4l2_buf.field = dev->field;
- ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
- if (!ret)
- return -EIO;
return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
saa7134_buffer_startpage(buf));
}
EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare);
-void saa7134_ts_buffer_finish(struct vb2_buffer *vb2)
-{
- struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
- struct saa7134_dev *dev = dmaq->dev;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
- struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
-
- dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
-}
-EXPORT_SYMBOL_GPL(saa7134_ts_buffer_finish);
-
int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
@@ -142,6 +127,7 @@ int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
*nbuffers = 3;
*nplanes = 1;
sizes[0] = size;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
EXPORT_SYMBOL_GPL(saa7134_ts_queue_setup);
@@ -187,7 +173,6 @@ struct vb2_ops saa7134_ts_qops = {
.queue_setup = saa7134_ts_queue_setup,
.buf_init = saa7134_ts_buffer_init,
.buf_prepare = saa7134_ts_buffer_prepare,
- .buf_finish = saa7134_ts_buffer_finish,
.buf_queue = saa7134_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index 4f0b1012e4f3..5306e549e526 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -120,7 +120,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
unsigned int size;
- int ret;
if (dma->sgl->offset) {
pr_err("The buffer is not page-aligned\n");
@@ -132,9 +131,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
vb2_set_plane_payload(vb2, 0, size);
- ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
- if (!ret)
- return -EIO;
return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
saa7134_buffer_startpage(buf));
}
@@ -156,6 +152,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
*nbuffers = saa7134_buffer_count(size, *nbuffers);
*nplanes = 1;
sizes[0] = size;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -169,21 +166,10 @@ static int buffer_init(struct vb2_buffer *vb2)
return 0;
}
-static void buffer_finish(struct vb2_buffer *vb2)
-{
- struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
- struct saa7134_dev *dev = dmaq->dev;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
- struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
-
- dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
-}
-
struct vb2_ops saa7134_vbi_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
- .buf_finish = buffer_finish,
.buf_queue = saa7134_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index fc4a427cb51f..701b52f34689 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -883,7 +883,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
unsigned int size;
- int ret;
if (dma->sgl->offset) {
pr_err("The buffer is not page-aligned\n");
@@ -896,23 +895,10 @@ static int buffer_prepare(struct vb2_buffer *vb2)
vb2_set_plane_payload(vb2, 0, size);
vb2->v4l2_buf.field = dev->field;
- ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
- if (!ret)
- return -EIO;
return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
saa7134_buffer_startpage(buf));
}
-static void buffer_finish(struct vb2_buffer *vb2)
-{
- struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
- struct saa7134_dev *dev = dmaq->dev;
- struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
- struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
-
- dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
-}
-
static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
@@ -932,6 +918,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
*nbuffers = saa7134_buffer_count(size, *nbuffers);
*nplanes = 1;
sizes[0] = size;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -1004,7 +991,6 @@ static struct vb2_ops vb2_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
- .buf_finish = buffer_finish,
.buf_queue = saa7134_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 1a82dd07205b..8bf0553b8d2f 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -588,6 +588,7 @@ struct saa7134_dev {
/* video+ts+vbi capture */
+ void *alloc_ctx;
struct saa7134_dmaqueue video_q;
struct vb2_queue video_vbq;
struct saa7134_dmaqueue vbi_q;
@@ -814,7 +815,6 @@ void saa7134_video_fini(struct saa7134_dev *dev);
int saa7134_ts_buffer_init(struct vb2_buffer *vb2);
int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2);
-void saa7134_ts_buffer_finish(struct vb2_buffer *vb2);
int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[]);
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index 66696fa8341d..9bd1f73f82da 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -130,9 +130,9 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
goto fail2;
/* init the buffers to a known pattern, easier during debugging */
- memset_io(buf->cpu, 0xff, buf->pci_size);
+ memset(buf->cpu, 0xff, buf->pci_size);
buf->crc = crc32(0, buf->cpu, buf->actual_size);
- memset_io(buf->pt_cpu, 0xff, buf->pt_size);
+ memset(buf->pt_cpu, 0xff, buf->pt_size);
dprintk(DBGLVL_BUF, "%s() allocated buffer @ 0x%p (%d pageptrs)\n",
__func__, buf, params->numpagetables);
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index 5f6f3094c44e..6c73f5b155f6 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -33,12 +33,12 @@ int saa7164_bus_setup(struct saa7164_dev *dev)
b->Type = TYPE_BUS_PCIe;
b->m_wMaxReqSize = SAA_DEVICE_MAXREQUESTSIZE;
- b->m_pdwSetRing = (u8 *)(dev->bmmio +
+ b->m_pdwSetRing = (u8 __iomem *)(dev->bmmio +
((u32)dev->busdesc.CommandRing));
b->m_dwSizeSetRing = SAA_DEVICE_BUFFERBLOCKSIZE;
- b->m_pdwGetRing = (u8 *)(dev->bmmio +
+ b->m_pdwGetRing = (u8 __iomem *)(dev->bmmio +
((u32)dev->busdesc.ResponseRing));
b->m_dwSizeGetRing = SAA_DEVICE_BUFFERBLOCKSIZE;
@@ -138,6 +138,7 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
u32 bytes_to_write, free_write_space, timeout, curr_srp, curr_swp;
u32 new_swp, space_rem;
int ret = SAA_ERR_BAD_PARAMETER;
+ u16 size;
if (!msg) {
printk(KERN_ERR "%s() !msg\n", __func__);
@@ -148,10 +149,6 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
saa7164_bus_verify(dev);
- msg->size = cpu_to_le16(msg->size);
- msg->command = cpu_to_le32(msg->command);
- msg->controlselector = cpu_to_le16(msg->controlselector);
-
if (msg->size > dev->bus.m_wMaxReqSize) {
printk(KERN_ERR "%s() Exceeded dev->bus.m_wMaxReqSize\n",
__func__);
@@ -169,8 +166,8 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
bytes_to_write = sizeof(*msg) + msg->size;
free_write_space = 0;
timeout = SAA_BUS_TIMEOUT;
- curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos));
- curr_swp = le32_to_cpu(saa7164_readl(bus->m_dwSetWritePos));
+ curr_srp = saa7164_readl(bus->m_dwSetReadPos);
+ curr_swp = saa7164_readl(bus->m_dwSetWritePos);
/* Deal with ring wrapping issues */
if (curr_srp > curr_swp)
@@ -203,7 +200,7 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
mdelay(1);
/* Check the space usage again */
- curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos));
+ curr_srp = saa7164_readl(bus->m_dwSetReadPos);
/* Deal with ring wrapping issues */
if (curr_srp > curr_swp)
@@ -223,6 +220,16 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
dprintk(DBGLVL_BUS, "%s() bus->m_dwSizeSetRing = %x\n", __func__,
bus->m_dwSizeSetRing);
+ /*
+ * Make a copy of msg->size before it is converted to le16 since it is
+ * used in the code below.
+ */
+ size = msg->size;
+ /* Convert to le16/le32 */
+ msg->size = (__force u16)cpu_to_le16(msg->size);
+ msg->command = (__force u32)cpu_to_le32(msg->command);
+ msg->controlselector = (__force u16)cpu_to_le16(msg->controlselector);
+
/* Mental Note: line 462 tmmhComResBusPCIe.cpp */
/* Check if we're going to wrap again */
@@ -243,28 +250,28 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
dprintk(DBGLVL_BUS, "%s() tr4\n", __func__);
/* Split the msg into pieces as the ring wraps */
- memcpy(bus->m_pdwSetRing + curr_swp, msg, space_rem);
- memcpy(bus->m_pdwSetRing, (u8 *)msg + space_rem,
+ memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, space_rem);
+ memcpy_toio(bus->m_pdwSetRing, (u8 *)msg + space_rem,
sizeof(*msg) - space_rem);
- memcpy(bus->m_pdwSetRing + sizeof(*msg) - space_rem,
- buf, msg->size);
+ memcpy_toio(bus->m_pdwSetRing + sizeof(*msg) - space_rem,
+ buf, size);
} else if (space_rem == sizeof(*msg)) {
dprintk(DBGLVL_BUS, "%s() tr5\n", __func__);
/* Additional data at the beginning of the ring */
- memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
- memcpy(bus->m_pdwSetRing, buf, msg->size);
+ memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
+ memcpy_toio(bus->m_pdwSetRing, buf, size);
} else {
/* Additional data wraps around the ring */
- memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
- if (msg->size > 0) {
- memcpy(bus->m_pdwSetRing + curr_swp +
+ memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
+ if (size > 0) {
+ memcpy_toio(bus->m_pdwSetRing + curr_swp +
sizeof(*msg), buf, space_rem -
sizeof(*msg));
- memcpy(bus->m_pdwSetRing, (u8 *)buf +
+ memcpy_toio(bus->m_pdwSetRing, (u8 *)buf +
space_rem - sizeof(*msg),
bytes_to_write - space_rem);
}
@@ -276,15 +283,20 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
dprintk(DBGLVL_BUS, "%s() tr6\n", __func__);
/* The ring buffer doesn't wrap, two simple copies */
- memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
- memcpy(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf,
- msg->size);
+ memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
+ memcpy_toio(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf,
+ size);
}
dprintk(DBGLVL_BUS, "%s() new_swp = %x\n", __func__, new_swp);
/* Update the bus write position */
- saa7164_writel(bus->m_dwSetWritePos, cpu_to_le32(new_swp));
+ saa7164_writel(bus->m_dwSetWritePos, new_swp);
+
+ /* Convert back to cpu after writing the msg to the ringbuffer. */
+ msg->size = le16_to_cpu((__force __le16)msg->size);
+ msg->command = le32_to_cpu((__force __le32)msg->command);
+ msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
ret = SAA_OK;
out:
@@ -336,8 +348,8 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
/* Peek the bus to see if a msg exists, if it's not what we're expecting
* then return cleanly else read the message from the bus.
*/
- curr_gwp = le32_to_cpu(saa7164_readl(bus->m_dwGetWritePos));
- curr_grp = le32_to_cpu(saa7164_readl(bus->m_dwGetReadPos));
+ curr_gwp = saa7164_readl(bus->m_dwGetWritePos);
+ curr_grp = saa7164_readl(bus->m_dwGetReadPos);
if (curr_gwp == curr_grp) {
ret = SAA_ERR_EMPTY;
@@ -369,14 +381,18 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
new_grp -= bus->m_dwSizeGetRing;
space_rem = bus->m_dwSizeGetRing - curr_grp;
- memcpy(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem);
- memcpy((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing,
+ memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem);
+ memcpy_fromio((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing,
bytes_to_read - space_rem);
} else {
/* No wrapping */
- memcpy(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read);
+ memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read);
}
+ /* Convert from little endian to CPU */
+ msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
+ msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
+ msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
/* No need to update the read positions, because this was a peek */
/* If the caller specifically want to peek, return */
@@ -427,24 +443,24 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
if (space_rem < sizeof(*msg)) {
/* msg wraps around the ring */
- memcpy(msg, bus->m_pdwGetRing + curr_grp, space_rem);
- memcpy((u8 *)msg + space_rem, bus->m_pdwGetRing,
+ memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
+ memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
sizeof(*msg) - space_rem);
if (buf)
- memcpy(buf, bus->m_pdwGetRing + sizeof(*msg) -
+ memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
space_rem, buf_size);
} else if (space_rem == sizeof(*msg)) {
- memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf)
- memcpy(buf, bus->m_pdwGetRing, buf_size);
+ memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
} else {
/* Additional data wraps around the ring */
- memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf) {
- memcpy(buf, bus->m_pdwGetRing + curr_grp +
+ memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
sizeof(*msg), space_rem - sizeof(*msg));
- memcpy(buf + space_rem - sizeof(*msg),
+ memcpy_fromio(buf + space_rem - sizeof(*msg),
bus->m_pdwGetRing, bytes_to_read -
space_rem);
}
@@ -453,19 +469,20 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
} else {
/* No wrapping */
- memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf)
- memcpy(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
+ memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
buf_size);
}
+ /* Convert from little endian to CPU */
+ msg->size = le16_to_cpu((__force __le16)msg->size);
+ msg->command = le32_to_cpu((__force __le32)msg->command);
+ msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
/* Update the read positions, adjusting the ring */
- saa7164_writel(bus->m_dwGetReadPos, cpu_to_le32(new_grp));
+ saa7164_writel(bus->m_dwGetReadPos, new_grp);
peekout:
- msg->size = le16_to_cpu(msg->size);
- msg->command = le32_to_cpu(msg->command);
- msg->controlselector = le16_to_cpu(msg->controlselector);
ret = SAA_OK;
out:
mutex_unlock(&bus->lock);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index cc1be8a7a451..4b0bec3766ed 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -119,7 +119,7 @@ static void saa7164_ts_verifier(struct saa7164_buffer *buf)
u32 i;
u8 cc, a;
u16 pid;
- u8 __iomem *bufcpu = (u8 *)buf->cpu;
+ u8 *bufcpu = (u8 *)buf->cpu;
port->sync_errors = 0;
port->v_cc_errors = 0;
@@ -260,7 +260,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr)
struct saa7164_user_buffer *ubuf = NULL;
struct list_head *c, *n;
int i = 0;
- u8 __iomem *p;
+ u8 *p;
mutex_lock(&port->dmaqueue_lock);
list_for_each_safe(c, n, &port->dmaqueue.list) {
@@ -318,8 +318,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr)
if (buf->actual_size <= ubuf->actual_size) {
- memcpy_fromio(ubuf->data, buf->cpu,
- ubuf->actual_size);
+ memcpy(ubuf->data, buf->cpu, ubuf->actual_size);
if (crc_checking) {
/* Throw a new checksum on the read buffer */
@@ -346,7 +345,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr)
* with known bad data. We check for this data at a later point
* in time. */
saa7164_buffer_zero_offsets(port, bufnr);
- memset_io(buf->cpu, 0xff, buf->pci_size);
+ memset(buf->cpu, 0xff, buf->pci_size);
if (crc_checking) {
/* Throw yet aanother new checksum on the dma buffer */
buf->crc = crc32(0, buf->cpu, buf->actual_size);
@@ -1096,7 +1095,7 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
if (c == 0)
seq_printf(m, " %04x:", i);
- seq_printf(m, " %02x", *(b->m_pdwSetRing + i));
+ seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
if (++c == 16) {
seq_printf(m, "\n");
@@ -1111,7 +1110,7 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
if (c == 0)
seq_printf(m, " %04x:", i);
- seq_printf(m, " %02x", *(b->m_pdwGetRing + i));
+ seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
if (++c == 16) {
seq_printf(m, "\n");
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 86763203d61d..add06ab5124d 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -72,7 +72,7 @@ static int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
/* TODO: move dlflags into dev-> and change to write/readl/b */
/* TODO: Excessive levels of debug */
static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
- u32 dlflags, u8 *dst, u32 dstsize)
+ u32 dlflags, u8 __iomem *dst, u32 dstsize)
{
u32 reg, timeout, offset;
u8 *srcbuf = NULL;
@@ -136,7 +136,7 @@ static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
srcsize -= dstsize, offset += dstsize) {
dprintk(DBGLVL_FW, "%s() memcpy %d\n", __func__, dstsize);
- memcpy(dst, srcbuf + offset, dstsize);
+ memcpy_toio(dst, srcbuf + offset, dstsize);
/* Flag the data as ready */
saa7164_writel(drflag, 1);
@@ -154,7 +154,7 @@ static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
dprintk(DBGLVL_FW, "%s() memcpy(l) %d\n", __func__, dstsize);
/* Write last block to the device */
- memcpy(dst, srcbuf+offset, srcsize);
+ memcpy_toio(dst, srcbuf+offset, srcsize);
/* Flag the data as ready */
saa7164_writel(drflag, 1);
diff --git a/drivers/media/pci/saa7164/saa7164-types.h b/drivers/media/pci/saa7164/saa7164-types.h
index 1d2140a3eb38..f48ba978f835 100644
--- a/drivers/media/pci/saa7164/saa7164-types.h
+++ b/drivers/media/pci/saa7164/saa7164-types.h
@@ -78,9 +78,9 @@ enum tmBusType {
struct tmComResBusInfo {
enum tmBusType Type;
u16 m_wMaxReqSize;
- u8 *m_pdwSetRing;
+ u8 __iomem *m_pdwSetRing;
u32 m_dwSizeSetRing;
- u8 *m_pdwGetRing;
+ u8 __iomem *m_pdwGetRing;
u32 m_dwSizeGetRing;
u32 m_dwSetWritePos;
u32 m_dwSetReadPos;
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index 8b29e8990301..cd1a07ce27cb 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -313,13 +313,13 @@ struct saa7164_buffer {
/* A block of page align PCI memory */
u32 pci_size; /* PCI allocation size in bytes */
- u64 __iomem *cpu; /* Virtual address */
+ u64 *cpu; /* Virtual address */
dma_addr_t dma; /* Physical address */
u32 crc; /* Checksum for the entire buffer data */
/* A page table that splits the block into a number of entries */
u32 pt_size; /* PCI allocation size in bytes */
- u64 __iomem *pt_cpu; /* Virtual address */
+ u64 *pt_cpu; /* Virtual address */
dma_addr_t pt_dma; /* Physical address */
/* Encoder fops */
diff --git a/drivers/media/pci/smipcie/Kconfig b/drivers/media/pci/smipcie/Kconfig
new file mode 100644
index 000000000000..c8de53f5ea28
--- /dev/null
+++ b/drivers/media/pci/smipcie/Kconfig
@@ -0,0 +1,17 @@
+config DVB_SMIPCIE
+ tristate "SMI PCIe DVBSky cards"
+ depends on DVB_CORE && PCI && I2C
+ select I2C_ALGOBIT
+ select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_M88RS6000T if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for cards with SMI PCIe bridge:
+ - DVBSky S950 V3
+ - DVBSky S952 V3
+ - DVBSky T9580 V3
+
+ Say Y or M if you own such a device and want to use it.
+ If unsure say N.
diff --git a/drivers/media/pci/smipcie/Makefile b/drivers/media/pci/smipcie/Makefile
new file mode 100644
index 000000000000..be55481a6e95
--- /dev/null
+++ b/drivers/media/pci/smipcie/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_DVB_SMIPCIE) += smipcie.o
+
+ccflags-y += -Idrivers/media/tuners
+ccflags-y += -Idrivers/media/dvb-core
+ccflags-y += -Idrivers/media/dvb-frontends
+
diff --git a/drivers/media/pci/smipcie/smipcie.c b/drivers/media/pci/smipcie/smipcie.c
new file mode 100644
index 000000000000..f773350e67b9
--- /dev/null
+++ b/drivers/media/pci/smipcie/smipcie.c
@@ -0,0 +1,1099 @@
+/*
+ * SMI PCIe driver for DVBSky cards.
+ *
+ * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "smipcie.h"
+#include "m88ds3103.h"
+#include "m88ts2022.h"
+#include "m88rs6000t.h"
+#include "si2168.h"
+#include "si2157.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+static int smi_hw_init(struct smi_dev *dev)
+{
+ u32 port_mux, port_ctrl, int_stat;
+
+ /* set port mux.*/
+ port_mux = smi_read(MUX_MODE_CTRL);
+ port_mux &= ~(rbPaMSMask);
+ port_mux |= rbPaMSDtvNoGpio;
+ port_mux &= ~(rbPbMSMask);
+ port_mux |= rbPbMSDtvNoGpio;
+ port_mux &= ~(0x0f0000);
+ port_mux |= 0x50000;
+ smi_write(MUX_MODE_CTRL, port_mux);
+
+ /* set DTV register.*/
+ /* Port A */
+ port_ctrl = smi_read(VIDEO_CTRL_STATUS_A);
+ port_ctrl &= ~0x01;
+ smi_write(VIDEO_CTRL_STATUS_A, port_ctrl);
+ port_ctrl = smi_read(MPEG2_CTRL_A);
+ port_ctrl &= ~0x40;
+ port_ctrl |= 0x80;
+ smi_write(MPEG2_CTRL_A, port_ctrl);
+ /* Port B */
+ port_ctrl = smi_read(VIDEO_CTRL_STATUS_B);
+ port_ctrl &= ~0x01;
+ smi_write(VIDEO_CTRL_STATUS_B, port_ctrl);
+ port_ctrl = smi_read(MPEG2_CTRL_B);
+ port_ctrl &= ~0x40;
+ port_ctrl |= 0x80;
+ smi_write(MPEG2_CTRL_B, port_ctrl);
+
+ /* disable and clear interrupt.*/
+ smi_write(MSI_INT_ENA_CLR, ALL_INT);
+ int_stat = smi_read(MSI_INT_STATUS);
+ smi_write(MSI_INT_STATUS_CLR, int_stat);
+
+ /* reset demod.*/
+ smi_clear(PERIPHERAL_CTRL, 0x0303);
+ msleep(50);
+ smi_set(PERIPHERAL_CTRL, 0x0101);
+ return 0;
+}
+
+/* i2c bit bus.*/
+static void smi_i2c_cfg(struct smi_dev *dev, u32 sw_ctl)
+{
+ u32 dwCtrl;
+
+ dwCtrl = smi_read(sw_ctl);
+ dwCtrl &= ~0x18; /* disable output.*/
+ dwCtrl |= 0x21; /* reset and software mode.*/
+ dwCtrl &= ~0xff00;
+ dwCtrl |= 0x6400;
+ smi_write(sw_ctl, dwCtrl);
+ msleep(20);
+ dwCtrl = smi_read(sw_ctl);
+ dwCtrl &= ~0x20;
+ smi_write(sw_ctl, dwCtrl);
+}
+
+static void smi_i2c_setsda(struct smi_dev *dev, int state, u32 sw_ctl)
+{
+ if (state) {
+ /* set as input.*/
+ smi_clear(sw_ctl, SW_I2C_MSK_DAT_EN);
+ } else {
+ smi_clear(sw_ctl, SW_I2C_MSK_DAT_OUT);
+ /* set as output.*/
+ smi_set(sw_ctl, SW_I2C_MSK_DAT_EN);
+ }
+}
+
+static void smi_i2c_setscl(void *data, int state, u32 sw_ctl)
+{
+ struct smi_dev *dev = data;
+
+ if (state) {
+ /* set as input.*/
+ smi_clear(sw_ctl, SW_I2C_MSK_CLK_EN);
+ } else {
+ smi_clear(sw_ctl, SW_I2C_MSK_CLK_OUT);
+ /* set as output.*/
+ smi_set(sw_ctl, SW_I2C_MSK_CLK_EN);
+ }
+}
+
+static int smi_i2c_getsda(void *data, u32 sw_ctl)
+{
+ struct smi_dev *dev = data;
+ /* set as input.*/
+ smi_clear(sw_ctl, SW_I2C_MSK_DAT_EN);
+ udelay(1);
+ return (smi_read(sw_ctl) & SW_I2C_MSK_DAT_IN) ? 1 : 0;
+}
+
+static int smi_i2c_getscl(void *data, u32 sw_ctl)
+{
+ struct smi_dev *dev = data;
+ /* set as input.*/
+ smi_clear(sw_ctl, SW_I2C_MSK_CLK_EN);
+ udelay(1);
+ return (smi_read(sw_ctl) & SW_I2C_MSK_CLK_IN) ? 1 : 0;
+}
+/* i2c 0.*/
+static void smi_i2c0_setsda(void *data, int state)
+{
+ struct smi_dev *dev = data;
+
+ smi_i2c_setsda(dev, state, I2C_A_SW_CTL);
+}
+
+static void smi_i2c0_setscl(void *data, int state)
+{
+ struct smi_dev *dev = data;
+
+ smi_i2c_setscl(dev, state, I2C_A_SW_CTL);
+}
+
+static int smi_i2c0_getsda(void *data)
+{
+ struct smi_dev *dev = data;
+
+ return smi_i2c_getsda(dev, I2C_A_SW_CTL);
+}
+
+static int smi_i2c0_getscl(void *data)
+{
+ struct smi_dev *dev = data;
+
+ return smi_i2c_getscl(dev, I2C_A_SW_CTL);
+}
+/* i2c 1.*/
+static void smi_i2c1_setsda(void *data, int state)
+{
+ struct smi_dev *dev = data;
+
+ smi_i2c_setsda(dev, state, I2C_B_SW_CTL);
+}
+
+static void smi_i2c1_setscl(void *data, int state)
+{
+ struct smi_dev *dev = data;
+
+ smi_i2c_setscl(dev, state, I2C_B_SW_CTL);
+}
+
+static int smi_i2c1_getsda(void *data)
+{
+ struct smi_dev *dev = data;
+
+ return smi_i2c_getsda(dev, I2C_B_SW_CTL);
+}
+
+static int smi_i2c1_getscl(void *data)
+{
+ struct smi_dev *dev = data;
+
+ return smi_i2c_getscl(dev, I2C_B_SW_CTL);
+}
+
+static int smi_i2c_init(struct smi_dev *dev)
+{
+ int ret;
+
+ /* i2c bus 0 */
+ smi_i2c_cfg(dev, I2C_A_SW_CTL);
+ i2c_set_adapdata(&dev->i2c_bus[0], dev);
+ strcpy(dev->i2c_bus[0].name, "SMI-I2C0");
+ dev->i2c_bus[0].owner = THIS_MODULE;
+ dev->i2c_bus[0].dev.parent = &dev->pci_dev->dev;
+ dev->i2c_bus[0].algo_data = &dev->i2c_bit[0];
+ dev->i2c_bit[0].data = dev;
+ dev->i2c_bit[0].setsda = smi_i2c0_setsda;
+ dev->i2c_bit[0].setscl = smi_i2c0_setscl;
+ dev->i2c_bit[0].getsda = smi_i2c0_getsda;
+ dev->i2c_bit[0].getscl = smi_i2c0_getscl;
+ dev->i2c_bit[0].udelay = 12;
+ dev->i2c_bit[0].timeout = 10;
+ /* Raise SCL and SDA */
+ smi_i2c0_setsda(dev, 1);
+ smi_i2c0_setscl(dev, 1);
+
+ ret = i2c_bit_add_bus(&dev->i2c_bus[0]);
+ if (ret < 0)
+ return ret;
+
+ /* i2c bus 1 */
+ smi_i2c_cfg(dev, I2C_B_SW_CTL);
+ i2c_set_adapdata(&dev->i2c_bus[1], dev);
+ strcpy(dev->i2c_bus[1].name, "SMI-I2C1");
+ dev->i2c_bus[1].owner = THIS_MODULE;
+ dev->i2c_bus[1].dev.parent = &dev->pci_dev->dev;
+ dev->i2c_bus[1].algo_data = &dev->i2c_bit[1];
+ dev->i2c_bit[1].data = dev;
+ dev->i2c_bit[1].setsda = smi_i2c1_setsda;
+ dev->i2c_bit[1].setscl = smi_i2c1_setscl;
+ dev->i2c_bit[1].getsda = smi_i2c1_getsda;
+ dev->i2c_bit[1].getscl = smi_i2c1_getscl;
+ dev->i2c_bit[1].udelay = 12;
+ dev->i2c_bit[1].timeout = 10;
+ /* Raise SCL and SDA */
+ smi_i2c1_setsda(dev, 1);
+ smi_i2c1_setscl(dev, 1);
+
+ ret = i2c_bit_add_bus(&dev->i2c_bus[1]);
+ if (ret < 0)
+ i2c_del_adapter(&dev->i2c_bus[0]);
+
+ return ret;
+}
+
+static void smi_i2c_exit(struct smi_dev *dev)
+{
+ i2c_del_adapter(&dev->i2c_bus[0]);
+ i2c_del_adapter(&dev->i2c_bus[1]);
+}
+
+static int smi_read_eeprom(struct i2c_adapter *i2c, u16 reg, u8 *data, u16 size)
+{
+ int ret;
+ u8 b0[2] = { (reg >> 8) & 0xff, reg & 0xff };
+
+ struct i2c_msg msg[] = {
+ { .addr = 0x50, .flags = 0,
+ .buf = b0, .len = 2 },
+ { .addr = 0x50, .flags = I2C_M_RD,
+ .buf = data, .len = size }
+ };
+
+ ret = i2c_transfer(i2c, msg, 2);
+
+ if (ret != 2) {
+ dev_err(&i2c->dev, "%s: reg=0x%x (error=%d)\n",
+ __func__, reg, ret);
+ return ret;
+ }
+ return ret;
+}
+
+/* ts port interrupt operations */
+static void smi_port_disableInterrupt(struct smi_port *port)
+{
+ struct smi_dev *dev = port->dev;
+
+ smi_write(MSI_INT_ENA_CLR,
+ (port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
+}
+
+static void smi_port_enableInterrupt(struct smi_port *port)
+{
+ struct smi_dev *dev = port->dev;
+
+ smi_write(MSI_INT_ENA_SET,
+ (port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
+}
+
+static void smi_port_clearInterrupt(struct smi_port *port)
+{
+ struct smi_dev *dev = port->dev;
+
+ smi_write(MSI_INT_STATUS_CLR,
+ (port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
+}
+
+/* tasklet handler: DMA data to dmx.*/
+static void smi_dma_xfer(unsigned long data)
+{
+ struct smi_port *port = (struct smi_port *) data;
+ struct smi_dev *dev = port->dev;
+ u32 intr_status, finishedData, dmaManagement;
+ u8 dmaChan0State, dmaChan1State;
+
+ intr_status = port->_int_status;
+ dmaManagement = smi_read(port->DMA_MANAGEMENT);
+ dmaChan0State = (u8)((dmaManagement & 0x00000030) >> 4);
+ dmaChan1State = (u8)((dmaManagement & 0x00300000) >> 20);
+
+ /* CH-0 DMA interrupt.*/
+ if ((intr_status & port->_dmaInterruptCH0) && (dmaChan0State == 0x01)) {
+ dev_dbg(&dev->pci_dev->dev,
+ "Port[%d]-DMA CH0 engine complete successful !\n",
+ port->idx);
+ finishedData = smi_read(port->DMA_CHAN0_TRANS_STATE);
+ finishedData &= 0x003FFFFF;
+ /* value of DMA_PORT0_CHAN0_TRANS_STATE register [21:0]
+ * indicate dma total transfer length and
+ * zero of [21:0] indicate dma total transfer length
+ * equal to 0x400000 (4MB)*/
+ if (finishedData == 0)
+ finishedData = 0x00400000;
+ if (finishedData != SMI_TS_DMA_BUF_SIZE) {
+ dev_dbg(&dev->pci_dev->dev,
+ "DMA CH0 engine complete length mismatched, finish data=%d !\n",
+ finishedData);
+ }
+ dvb_dmx_swfilter_packets(&port->demux,
+ port->cpu_addr[0], (finishedData / 188));
+ /*dvb_dmx_swfilter(&port->demux,
+ port->cpu_addr[0], finishedData);*/
+ }
+ /* CH-1 DMA interrupt.*/
+ if ((intr_status & port->_dmaInterruptCH1) && (dmaChan1State == 0x01)) {
+ dev_dbg(&dev->pci_dev->dev,
+ "Port[%d]-DMA CH1 engine complete successful !\n",
+ port->idx);
+ finishedData = smi_read(port->DMA_CHAN1_TRANS_STATE);
+ finishedData &= 0x003FFFFF;
+ /* value of DMA_PORT0_CHAN0_TRANS_STATE register [21:0]
+ * indicate dma total transfer length and
+ * zero of [21:0] indicate dma total transfer length
+ * equal to 0x400000 (4MB)*/
+ if (finishedData == 0)
+ finishedData = 0x00400000;
+ if (finishedData != SMI_TS_DMA_BUF_SIZE) {
+ dev_dbg(&dev->pci_dev->dev,
+ "DMA CH1 engine complete length mismatched, finish data=%d !\n",
+ finishedData);
+ }
+ dvb_dmx_swfilter_packets(&port->demux,
+ port->cpu_addr[1], (finishedData / 188));
+ /*dvb_dmx_swfilter(&port->demux,
+ port->cpu_addr[1], finishedData);*/
+ }
+ /* restart DMA.*/
+ if (intr_status & port->_dmaInterruptCH0)
+ dmaManagement |= 0x00000002;
+ if (intr_status & port->_dmaInterruptCH1)
+ dmaManagement |= 0x00020000;
+ smi_write(port->DMA_MANAGEMENT, dmaManagement);
+ /* Re-enable interrupts */
+ smi_port_enableInterrupt(port);
+}
+
+static void smi_port_dma_free(struct smi_port *port)
+{
+ if (port->cpu_addr[0]) {
+ pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
+ port->cpu_addr[0], port->dma_addr[0]);
+ port->cpu_addr[0] = NULL;
+ }
+ if (port->cpu_addr[1]) {
+ pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
+ port->cpu_addr[1], port->dma_addr[1]);
+ port->cpu_addr[1] = NULL;
+ }
+}
+
+static int smi_port_init(struct smi_port *port, int dmaChanUsed)
+{
+ dev_dbg(&port->dev->pci_dev->dev,
+ "%s, port %d, dmaused %d\n", __func__, port->idx, dmaChanUsed);
+ port->enable = 0;
+ if (port->idx == 0) {
+ /* Port A */
+ port->_dmaInterruptCH0 = dmaChanUsed & 0x01;
+ port->_dmaInterruptCH1 = dmaChanUsed & 0x02;
+
+ port->DMA_CHAN0_ADDR_LOW = DMA_PORTA_CHAN0_ADDR_LOW;
+ port->DMA_CHAN0_ADDR_HI = DMA_PORTA_CHAN0_ADDR_HI;
+ port->DMA_CHAN0_TRANS_STATE = DMA_PORTA_CHAN0_TRANS_STATE;
+ port->DMA_CHAN0_CONTROL = DMA_PORTA_CHAN0_CONTROL;
+ port->DMA_CHAN1_ADDR_LOW = DMA_PORTA_CHAN1_ADDR_LOW;
+ port->DMA_CHAN1_ADDR_HI = DMA_PORTA_CHAN1_ADDR_HI;
+ port->DMA_CHAN1_TRANS_STATE = DMA_PORTA_CHAN1_TRANS_STATE;
+ port->DMA_CHAN1_CONTROL = DMA_PORTA_CHAN1_CONTROL;
+ port->DMA_MANAGEMENT = DMA_PORTA_MANAGEMENT;
+ } else {
+ /* Port B */
+ port->_dmaInterruptCH0 = (dmaChanUsed << 2) & 0x04;
+ port->_dmaInterruptCH1 = (dmaChanUsed << 2) & 0x08;
+
+ port->DMA_CHAN0_ADDR_LOW = DMA_PORTB_CHAN0_ADDR_LOW;
+ port->DMA_CHAN0_ADDR_HI = DMA_PORTB_CHAN0_ADDR_HI;
+ port->DMA_CHAN0_TRANS_STATE = DMA_PORTB_CHAN0_TRANS_STATE;
+ port->DMA_CHAN0_CONTROL = DMA_PORTB_CHAN0_CONTROL;
+ port->DMA_CHAN1_ADDR_LOW = DMA_PORTB_CHAN1_ADDR_LOW;
+ port->DMA_CHAN1_ADDR_HI = DMA_PORTB_CHAN1_ADDR_HI;
+ port->DMA_CHAN1_TRANS_STATE = DMA_PORTB_CHAN1_TRANS_STATE;
+ port->DMA_CHAN1_CONTROL = DMA_PORTB_CHAN1_CONTROL;
+ port->DMA_MANAGEMENT = DMA_PORTB_MANAGEMENT;
+ }
+
+ if (port->_dmaInterruptCH0) {
+ port->cpu_addr[0] = pci_alloc_consistent(port->dev->pci_dev,
+ SMI_TS_DMA_BUF_SIZE,
+ &port->dma_addr[0]);
+ if (!port->cpu_addr[0]) {
+ dev_err(&port->dev->pci_dev->dev,
+ "Port[%d] DMA CH0 memory allocation failed!\n",
+ port->idx);
+ goto err;
+ }
+ }
+
+ if (port->_dmaInterruptCH1) {
+ port->cpu_addr[1] = pci_alloc_consistent(port->dev->pci_dev,
+ SMI_TS_DMA_BUF_SIZE,
+ &port->dma_addr[1]);
+ if (!port->cpu_addr[1]) {
+ dev_err(&port->dev->pci_dev->dev,
+ "Port[%d] DMA CH1 memory allocation failed!\n",
+ port->idx);
+ goto err;
+ }
+ }
+
+ smi_port_disableInterrupt(port);
+ tasklet_init(&port->tasklet, smi_dma_xfer, (unsigned long)port);
+ tasklet_disable(&port->tasklet);
+ port->enable = 1;
+ return 0;
+err:
+ smi_port_dma_free(port);
+ return -ENOMEM;
+}
+
+static void smi_port_exit(struct smi_port *port)
+{
+ smi_port_disableInterrupt(port);
+ tasklet_kill(&port->tasklet);
+ smi_port_dma_free(port);
+ port->enable = 0;
+}
+
+static void smi_port_irq(struct smi_port *port, u32 int_status)
+{
+ u32 port_req_irq = port->_dmaInterruptCH0 | port->_dmaInterruptCH1;
+
+ if (int_status & port_req_irq) {
+ smi_port_disableInterrupt(port);
+ port->_int_status = int_status;
+ smi_port_clearInterrupt(port);
+ tasklet_schedule(&port->tasklet);
+ }
+}
+
+static irqreturn_t smi_irq_handler(int irq, void *dev_id)
+{
+ struct smi_dev *dev = dev_id;
+ struct smi_port *port0 = &dev->ts_port[0];
+ struct smi_port *port1 = &dev->ts_port[1];
+
+ u32 intr_status = smi_read(MSI_INT_STATUS);
+
+ /* ts0 interrupt.*/
+ if (dev->info->ts_0)
+ smi_port_irq(port0, intr_status);
+
+ /* ts1 interrupt.*/
+ if (dev->info->ts_1)
+ smi_port_irq(port1, intr_status);
+
+ return IRQ_HANDLED;
+}
+
+static struct i2c_client *smi_add_i2c_client(struct i2c_adapter *adapter,
+ struct i2c_board_info *info)
+{
+ struct i2c_client *client;
+
+ request_module(info->type);
+ client = i2c_new_device(adapter, info);
+ if (client == NULL || client->dev.driver == NULL)
+ goto err_add_i2c_client;
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ goto err_add_i2c_client;
+ }
+ return client;
+
+err_add_i2c_client:
+ client = NULL;
+ return client;
+}
+
+static void smi_del_i2c_client(struct i2c_client *client)
+{
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+}
+
+static const struct m88ds3103_config smi_dvbsky_m88ds3103_cfg = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+ .ts_mode = M88DS3103_TS_PARALLEL,
+ .ts_clk = 16000,
+ .ts_clk_pol = 1,
+ .agc = 0x99,
+ .lnb_hv_pol = 0,
+ .lnb_en_pol = 1,
+};
+
+static int smi_dvbsky_m88ds3103_fe_attach(struct smi_port *port)
+{
+ int ret = 0;
+ struct smi_dev *dev = port->dev;
+ struct i2c_adapter *i2c;
+ /* tuner I2C module */
+ struct i2c_adapter *tuner_i2c_adapter;
+ struct i2c_client *tuner_client;
+ struct i2c_board_info tuner_info;
+ struct m88ts2022_config m88ts2022_config = {
+ .clock = 27000000,
+ };
+ memset(&tuner_info, 0, sizeof(struct i2c_board_info));
+ i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
+
+ /* attach demod */
+ port->fe = dvb_attach(m88ds3103_attach,
+ &smi_dvbsky_m88ds3103_cfg, i2c, &tuner_i2c_adapter);
+ if (!port->fe) {
+ ret = -ENODEV;
+ return ret;
+ }
+ /* attach tuner */
+ m88ts2022_config.fe = port->fe;
+ strlcpy(tuner_info.type, "m88ts2022", I2C_NAME_SIZE);
+ tuner_info.addr = 0x60;
+ tuner_info.platform_data = &m88ts2022_config;
+ tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
+ if (!tuner_client) {
+ ret = -ENODEV;
+ goto err_tuner_i2c_device;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ port->fe->ops.read_signal_strength =
+ port->fe->ops.tuner_ops.get_rf_strength;
+
+ port->i2c_client_tuner = tuner_client;
+ return ret;
+
+err_tuner_i2c_device:
+ dvb_frontend_detach(port->fe);
+ return ret;
+}
+
+static const struct m88ds3103_config smi_dvbsky_m88rs6000_cfg = {
+ .i2c_addr = 0x69,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .ts_mode = M88DS3103_TS_PARALLEL,
+ .ts_clk = 16000,
+ .ts_clk_pol = 1,
+ .agc = 0x99,
+ .lnb_hv_pol = 0,
+ .lnb_en_pol = 1,
+};
+
+static int smi_dvbsky_m88rs6000_fe_attach(struct smi_port *port)
+{
+ int ret = 0;
+ struct smi_dev *dev = port->dev;
+ struct i2c_adapter *i2c;
+ /* tuner I2C module */
+ struct i2c_adapter *tuner_i2c_adapter;
+ struct i2c_client *tuner_client;
+ struct i2c_board_info tuner_info;
+ struct m88rs6000t_config m88rs6000t_config;
+
+ memset(&tuner_info, 0, sizeof(struct i2c_board_info));
+ i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
+
+ /* attach demod */
+ port->fe = dvb_attach(m88ds3103_attach,
+ &smi_dvbsky_m88rs6000_cfg, i2c, &tuner_i2c_adapter);
+ if (!port->fe) {
+ ret = -ENODEV;
+ return ret;
+ }
+ /* attach tuner */
+ m88rs6000t_config.fe = port->fe;
+ strlcpy(tuner_info.type, "m88rs6000t", I2C_NAME_SIZE);
+ tuner_info.addr = 0x21;
+ tuner_info.platform_data = &m88rs6000t_config;
+ tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
+ if (!tuner_client) {
+ ret = -ENODEV;
+ goto err_tuner_i2c_device;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ port->fe->ops.read_signal_strength =
+ port->fe->ops.tuner_ops.get_rf_strength;
+
+ port->i2c_client_tuner = tuner_client;
+ return ret;
+
+err_tuner_i2c_device:
+ dvb_frontend_detach(port->fe);
+ return ret;
+}
+
+static int smi_dvbsky_sit2_fe_attach(struct smi_port *port)
+{
+ int ret = 0;
+ struct smi_dev *dev = port->dev;
+ struct i2c_adapter *i2c;
+ struct i2c_adapter *tuner_i2c_adapter;
+ struct i2c_client *client_tuner, *client_demod;
+ struct i2c_board_info client_info;
+ struct si2168_config si2168_config;
+ struct si2157_config si2157_config;
+
+ /* select i2c bus */
+ i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
+
+ /* attach demod */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &tuner_i2c_adapter;
+ si2168_config.fe = &port->fe;
+ si2168_config.ts_mode = SI2168_TS_PARALLEL;
+
+ memset(&client_info, 0, sizeof(struct i2c_board_info));
+ strlcpy(client_info.type, "si2168", I2C_NAME_SIZE);
+ client_info.addr = 0x64;
+ client_info.platform_data = &si2168_config;
+
+ client_demod = smi_add_i2c_client(i2c, &client_info);
+ if (!client_demod) {
+ ret = -ENODEV;
+ return ret;
+ }
+ port->i2c_client_demod = client_demod;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = port->fe;
+
+ memset(&client_info, 0, sizeof(struct i2c_board_info));
+ strlcpy(client_info.type, "si2157", I2C_NAME_SIZE);
+ client_info.addr = 0x60;
+ client_info.platform_data = &si2157_config;
+
+ client_tuner = smi_add_i2c_client(tuner_i2c_adapter, &client_info);
+ if (!client_tuner) {
+ smi_del_i2c_client(port->i2c_client_demod);
+ port->i2c_client_demod = NULL;
+ ret = -ENODEV;
+ return ret;
+ }
+ port->i2c_client_tuner = client_tuner;
+ return ret;
+}
+
+static int smi_fe_init(struct smi_port *port)
+{
+ int ret = 0;
+ struct smi_dev *dev = port->dev;
+ struct dvb_adapter *adap = &port->dvb_adapter;
+ u8 mac_ee[16];
+
+ dev_dbg(&port->dev->pci_dev->dev,
+ "%s: port %d, fe_type = %d\n",
+ __func__, port->idx, port->fe_type);
+ switch (port->fe_type) {
+ case DVBSKY_FE_M88DS3103:
+ ret = smi_dvbsky_m88ds3103_fe_attach(port);
+ break;
+ case DVBSKY_FE_M88RS6000:
+ ret = smi_dvbsky_m88rs6000_fe_attach(port);
+ break;
+ case DVBSKY_FE_SIT2:
+ ret = smi_dvbsky_sit2_fe_attach(port);
+ break;
+ }
+ if (ret < 0)
+ return ret;
+
+ /* register dvb frontend */
+ ret = dvb_register_frontend(adap, port->fe);
+ if (ret < 0) {
+ if (port->i2c_client_tuner)
+ smi_del_i2c_client(port->i2c_client_tuner);
+ if (port->i2c_client_demod)
+ smi_del_i2c_client(port->i2c_client_demod);
+ dvb_frontend_detach(port->fe);
+ return ret;
+ }
+ /* init MAC.*/
+ ret = smi_read_eeprom(&dev->i2c_bus[0], 0xc0, mac_ee, 16);
+ dev_info(&port->dev->pci_dev->dev,
+ "DVBSky SMI PCIe MAC= %pM\n", mac_ee + (port->idx)*8);
+ memcpy(adap->proposed_mac, mac_ee + (port->idx)*8, 6);
+ return ret;
+}
+
+static void smi_fe_exit(struct smi_port *port)
+{
+ dvb_unregister_frontend(port->fe);
+ /* remove I2C demod and tuner */
+ if (port->i2c_client_tuner)
+ smi_del_i2c_client(port->i2c_client_tuner);
+ if (port->i2c_client_demod)
+ smi_del_i2c_client(port->i2c_client_demod);
+ dvb_frontend_detach(port->fe);
+}
+
+static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
+ int (*start_feed)(struct dvb_demux_feed *),
+ int (*stop_feed)(struct dvb_demux_feed *),
+ void *priv)
+{
+ dvbdemux->priv = priv;
+
+ dvbdemux->filternum = 256;
+ dvbdemux->feednum = 256;
+ dvbdemux->start_feed = start_feed;
+ dvbdemux->stop_feed = stop_feed;
+ dvbdemux->write_to_decoder = NULL;
+ dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING);
+ return dvb_dmx_init(dvbdemux);
+}
+
+static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
+ struct dvb_demux *dvbdemux,
+ struct dmx_frontend *hw_frontend,
+ struct dmx_frontend *mem_frontend,
+ struct dvb_adapter *dvb_adapter)
+{
+ int ret;
+
+ dmxdev->filternum = 256;
+ dmxdev->demux = &dvbdemux->dmx;
+ dmxdev->capabilities = 0;
+ ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
+ if (ret < 0)
+ return ret;
+
+ hw_frontend->source = DMX_FRONTEND_0;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
+ mem_frontend->source = DMX_MEMORY_FE;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
+ return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
+}
+
+static u32 smi_config_DMA(struct smi_port *port)
+{
+ struct smi_dev *dev = port->dev;
+ u32 totalLength = 0, dmaMemPtrLow, dmaMemPtrHi, dmaCtlReg;
+ u8 chanLatencyTimer = 0, dmaChanEnable = 1, dmaTransStart = 1;
+ u32 dmaManagement = 0, tlpTransUnit = DMA_TRANS_UNIT_188;
+ u8 tlpTc = 0, tlpTd = 1, tlpEp = 0, tlpAttr = 0;
+ u64 mem;
+
+ dmaManagement = smi_read(port->DMA_MANAGEMENT);
+ /* Setup Channel-0 */
+ if (port->_dmaInterruptCH0) {
+ totalLength = SMI_TS_DMA_BUF_SIZE;
+ mem = port->dma_addr[0];
+ dmaMemPtrLow = mem & 0xffffffff;
+ dmaMemPtrHi = mem >> 32;
+ dmaCtlReg = (totalLength) | (tlpTransUnit << 22) | (tlpTc << 25)
+ | (tlpTd << 28) | (tlpEp << 29) | (tlpAttr << 30);
+ dmaManagement |= dmaChanEnable | (dmaTransStart << 1)
+ | (chanLatencyTimer << 8);
+ /* write DMA register, start DMA engine */
+ smi_write(port->DMA_CHAN0_ADDR_LOW, dmaMemPtrLow);
+ smi_write(port->DMA_CHAN0_ADDR_HI, dmaMemPtrHi);
+ smi_write(port->DMA_CHAN0_CONTROL, dmaCtlReg);
+ }
+ /* Setup Channel-1 */
+ if (port->_dmaInterruptCH1) {
+ totalLength = SMI_TS_DMA_BUF_SIZE;
+ mem = port->dma_addr[1];
+ dmaMemPtrLow = mem & 0xffffffff;
+ dmaMemPtrHi = mem >> 32;
+ dmaCtlReg = (totalLength) | (tlpTransUnit << 22) | (tlpTc << 25)
+ | (tlpTd << 28) | (tlpEp << 29) | (tlpAttr << 30);
+ dmaManagement |= (dmaChanEnable << 16) | (dmaTransStart << 17)
+ | (chanLatencyTimer << 24);
+ /* write DMA register, start DMA engine */
+ smi_write(port->DMA_CHAN1_ADDR_LOW, dmaMemPtrLow);
+ smi_write(port->DMA_CHAN1_ADDR_HI, dmaMemPtrHi);
+ smi_write(port->DMA_CHAN1_CONTROL, dmaCtlReg);
+ }
+ return dmaManagement;
+}
+
+static int smi_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct smi_port *port = dvbdmx->priv;
+ struct smi_dev *dev = port->dev;
+ u32 dmaManagement;
+
+ if (port->users++ == 0) {
+ dmaManagement = smi_config_DMA(port);
+ smi_port_clearInterrupt(port);
+ smi_port_enableInterrupt(port);
+ smi_write(port->DMA_MANAGEMENT, dmaManagement);
+ tasklet_enable(&port->tasklet);
+ }
+ return port->users;
+}
+
+static int smi_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct smi_port *port = dvbdmx->priv;
+ struct smi_dev *dev = port->dev;
+
+ if (--port->users)
+ return port->users;
+
+ tasklet_disable(&port->tasklet);
+ smi_port_disableInterrupt(port);
+ smi_clear(port->DMA_MANAGEMENT, 0x30003);
+ return 0;
+}
+
+static int smi_dvb_init(struct smi_port *port)
+{
+ int ret;
+ struct dvb_adapter *adap = &port->dvb_adapter;
+ struct dvb_demux *dvbdemux = &port->demux;
+
+ dev_dbg(&port->dev->pci_dev->dev,
+ "%s, port %d\n", __func__, port->idx);
+
+ ret = dvb_register_adapter(adap, "SMI_DVB", THIS_MODULE,
+ &port->dev->pci_dev->dev,
+ adapter_nr);
+ if (ret < 0) {
+ dev_err(&port->dev->pci_dev->dev, "Fail to register DVB adapter.\n");
+ return ret;
+ }
+ ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
+ smi_start_feed,
+ smi_stop_feed, port);
+ if (ret < 0)
+ goto err_del_dvb_register_adapter;
+
+ ret = my_dvb_dmxdev_ts_card_init(&port->dmxdev, &port->demux,
+ &port->hw_frontend,
+ &port->mem_frontend, adap);
+ if (ret < 0)
+ goto err_del_dvb_dmx;
+
+ ret = dvb_net_init(adap, &port->dvbnet, port->dmxdev.demux);
+ if (ret < 0)
+ goto err_del_dvb_dmxdev;
+ return 0;
+err_del_dvb_dmxdev:
+ dvbdemux->dmx.close(&dvbdemux->dmx);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->hw_frontend);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->mem_frontend);
+ dvb_dmxdev_release(&port->dmxdev);
+err_del_dvb_dmx:
+ dvb_dmx_release(&port->demux);
+err_del_dvb_register_adapter:
+ dvb_unregister_adapter(&port->dvb_adapter);
+ return ret;
+}
+
+static void smi_dvb_exit(struct smi_port *port)
+{
+ struct dvb_demux *dvbdemux = &port->demux;
+
+ dvb_net_release(&port->dvbnet);
+
+ dvbdemux->dmx.close(&dvbdemux->dmx);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->hw_frontend);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->mem_frontend);
+ dvb_dmxdev_release(&port->dmxdev);
+ dvb_dmx_release(&port->demux);
+
+ dvb_unregister_adapter(&port->dvb_adapter);
+}
+
+static int smi_port_attach(struct smi_dev *dev,
+ struct smi_port *port, int index)
+{
+ int ret, dmachs;
+
+ port->dev = dev;
+ port->idx = index;
+ port->fe_type = (index == 0) ? dev->info->fe_0 : dev->info->fe_1;
+ dmachs = (index == 0) ? dev->info->ts_0 : dev->info->ts_1;
+ /* port init.*/
+ ret = smi_port_init(port, dmachs);
+ if (ret < 0)
+ return ret;
+ /* dvb init.*/
+ ret = smi_dvb_init(port);
+ if (ret < 0)
+ goto err_del_port_init;
+ /* fe init.*/
+ ret = smi_fe_init(port);
+ if (ret < 0)
+ goto err_del_dvb_init;
+ return 0;
+err_del_dvb_init:
+ smi_dvb_exit(port);
+err_del_port_init:
+ smi_port_exit(port);
+ return ret;
+}
+
+static void smi_port_detach(struct smi_port *port)
+{
+ smi_fe_exit(port);
+ smi_dvb_exit(port);
+ smi_port_exit(port);
+}
+
+static int smi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct smi_dev *dev;
+ int ret = -ENOMEM;
+
+ if (pci_enable_device(pdev) < 0)
+ return -ENODEV;
+
+ dev = kzalloc(sizeof(struct smi_dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err_pci_disable_device;
+ }
+
+ dev->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+ dev->info = (struct smi_cfg_info *) id->driver_data;
+ dev_info(&dev->pci_dev->dev,
+ "card detected: %s\n", dev->info->name);
+
+ dev->nr = dev->info->type;
+ dev->lmmio = ioremap(pci_resource_start(dev->pci_dev, 0),
+ pci_resource_len(dev->pci_dev, 0));
+ if (!dev->lmmio) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ /* should we set to 32bit DMA? */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret < 0)
+ goto err_pci_iounmap;
+
+ pci_set_master(pdev);
+
+ ret = smi_hw_init(dev);
+ if (ret < 0)
+ goto err_pci_iounmap;
+
+ ret = smi_i2c_init(dev);
+ if (ret < 0)
+ goto err_pci_iounmap;
+
+ if (dev->info->ts_0) {
+ ret = smi_port_attach(dev, &dev->ts_port[0], 0);
+ if (ret < 0)
+ goto err_del_i2c_adaptor;
+ }
+
+ if (dev->info->ts_1) {
+ ret = smi_port_attach(dev, &dev->ts_port[1], 1);
+ if (ret < 0)
+ goto err_del_port0_attach;
+ }
+
+#ifdef CONFIG_PCI_MSI /* to do msi interrupt.???*/
+ if (pci_msi_enabled())
+ ret = pci_enable_msi(dev->pci_dev);
+ if (ret)
+ dev_info(&dev->pci_dev->dev, "MSI not available.\n");
+#endif
+
+ ret = request_irq(dev->pci_dev->irq, smi_irq_handler,
+ IRQF_SHARED, "SMI_PCIE", dev);
+ if (ret < 0)
+ goto err_del_port1_attach;
+
+ return 0;
+
+err_del_port1_attach:
+ if (dev->info->ts_1)
+ smi_port_detach(&dev->ts_port[1]);
+err_del_port0_attach:
+ if (dev->info->ts_0)
+ smi_port_detach(&dev->ts_port[0]);
+err_del_i2c_adaptor:
+ smi_i2c_exit(dev);
+err_pci_iounmap:
+ iounmap(dev->lmmio);
+err_kfree:
+ pci_set_drvdata(pdev, NULL);
+ kfree(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void smi_remove(struct pci_dev *pdev)
+{
+ struct smi_dev *dev = pci_get_drvdata(pdev);
+
+ smi_write(MSI_INT_ENA_CLR, ALL_INT);
+ free_irq(dev->pci_dev->irq, dev);
+#ifdef CONFIG_PCI_MSI
+ pci_disable_msi(dev->pci_dev);
+#endif
+ if (dev->info->ts_1)
+ smi_port_detach(&dev->ts_port[1]);
+ if (dev->info->ts_0)
+ smi_port_detach(&dev->ts_port[0]);
+
+ smi_i2c_exit(dev);
+ iounmap(dev->lmmio);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ kfree(dev);
+}
+
+/* DVBSky cards */
+static struct smi_cfg_info dvbsky_s950_cfg = {
+ .type = SMI_DVBSKY_S950,
+ .name = "DVBSky S950 V3",
+ .ts_0 = SMI_TS_NULL,
+ .ts_1 = SMI_TS_DMA_BOTH,
+ .fe_0 = DVBSKY_FE_NULL,
+ .fe_1 = DVBSKY_FE_M88DS3103,
+};
+
+static struct smi_cfg_info dvbsky_s952_cfg = {
+ .type = SMI_DVBSKY_S952,
+ .name = "DVBSky S952 V3",
+ .ts_0 = SMI_TS_DMA_BOTH,
+ .ts_1 = SMI_TS_DMA_BOTH,
+ .fe_0 = DVBSKY_FE_M88RS6000,
+ .fe_1 = DVBSKY_FE_M88RS6000,
+};
+
+static struct smi_cfg_info dvbsky_t9580_cfg = {
+ .type = SMI_DVBSKY_T9580,
+ .name = "DVBSky T9580 V3",
+ .ts_0 = SMI_TS_DMA_BOTH,
+ .ts_1 = SMI_TS_DMA_BOTH,
+ .fe_0 = DVBSKY_FE_SIT2,
+ .fe_1 = DVBSKY_FE_M88DS3103,
+};
+
+/* PCI IDs */
+#define SMI_ID(_subvend, _subdev, _driverdata) { \
+ .vendor = SMI_VID, .device = SMI_PID, \
+ .subvendor = _subvend, .subdevice = _subdev, \
+ .driver_data = (unsigned long)&_driverdata }
+
+static const struct pci_device_id smi_id_table[] = {
+ SMI_ID(0x4254, 0x0550, dvbsky_s950_cfg),
+ SMI_ID(0x4254, 0x0552, dvbsky_s952_cfg),
+ SMI_ID(0x4254, 0x5580, dvbsky_t9580_cfg),
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, smi_id_table);
+
+static struct pci_driver smipcie_driver = {
+ .name = "SMI PCIe driver",
+ .id_table = smi_id_table,
+ .probe = smi_probe,
+ .remove = smi_remove,
+};
+
+module_pci_driver(smipcie_driver);
+
+MODULE_AUTHOR("Max nibble <nibble.max@gmail.com>");
+MODULE_DESCRIPTION("SMI PCIe driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
new file mode 100644
index 000000000000..10cdf20f4839
--- /dev/null
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -0,0 +1,299 @@
+/*
+ * SMI PCIe driver for DVBSky cards.
+ *
+ * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SMI_PCIE_H_
+#define _SMI_PCIE_H_
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <media/rc-core.h>
+
+#include "demux.h"
+#include "dmxdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+#include "dvbdev.h"
+
+/* -------- Register Base -------- */
+#define MSI_CONTROL_REG_BASE 0x0800
+#define SYSTEM_CONTROL_REG_BASE 0x0880
+#define PCIE_EP_DEBUG_REG_BASE 0x08C0
+#define IR_CONTROL_REG_BASE 0x0900
+#define I2C_A_CONTROL_REG_BASE 0x0940
+#define I2C_B_CONTROL_REG_BASE 0x0980
+#define ATV_PORTA_CONTROL_REG_BASE 0x09C0
+#define DTV_PORTA_CONTROL_REG_BASE 0x0A00
+#define AES_PORTA_CONTROL_REG_BASE 0x0A80
+#define DMA_PORTA_CONTROL_REG_BASE 0x0AC0
+#define ATV_PORTB_CONTROL_REG_BASE 0x0B00
+#define DTV_PORTB_CONTROL_REG_BASE 0x0B40
+#define AES_PORTB_CONTROL_REG_BASE 0x0BC0
+#define DMA_PORTB_CONTROL_REG_BASE 0x0C00
+#define UART_A_REGISTER_BASE 0x0C40
+#define UART_B_REGISTER_BASE 0x0C80
+#define GPS_CONTROL_REG_BASE 0x0CC0
+#define DMA_PORTC_CONTROL_REG_BASE 0x0D00
+#define DMA_PORTD_CONTROL_REG_BASE 0x0D00
+#define AES_RANDOM_DATA_BASE 0x0D80
+#define AES_KEY_IN_BASE 0x0D90
+#define RANDOM_DATA_LIB_BASE 0x0E00
+#define IR_DATA_BUFFER_BASE 0x0F00
+#define PORTA_TS_BUFFER_BASE 0x1000
+#define PORTA_I2S_BUFFER_BASE 0x1400
+#define PORTB_TS_BUFFER_BASE 0x1800
+#define PORTB_I2S_BUFFER_BASE 0x1C00
+
+/* -------- MSI control and state register -------- */
+#define MSI_DELAY_TIMER (MSI_CONTROL_REG_BASE + 0x00)
+#define MSI_INT_STATUS (MSI_CONTROL_REG_BASE + 0x08)
+#define MSI_INT_STATUS_CLR (MSI_CONTROL_REG_BASE + 0x0C)
+#define MSI_INT_STATUS_SET (MSI_CONTROL_REG_BASE + 0x10)
+#define MSI_INT_ENA (MSI_CONTROL_REG_BASE + 0x14)
+#define MSI_INT_ENA_CLR (MSI_CONTROL_REG_BASE + 0x18)
+#define MSI_INT_ENA_SET (MSI_CONTROL_REG_BASE + 0x1C)
+#define MSI_SOFT_RESET (MSI_CONTROL_REG_BASE + 0x20)
+#define MSI_CFG_SRC0 (MSI_CONTROL_REG_BASE + 0x24)
+
+/* -------- Hybird Controller System Control register -------- */
+#define MUX_MODE_CTRL (SYSTEM_CONTROL_REG_BASE + 0x00)
+ #define rbPaMSMask 0x07
+ #define rbPaMSDtvNoGpio 0x00 /*[2:0], DTV Simple mode */
+ #define rbPaMSDtv4bitGpio 0x01 /*[2:0], DTV TS2 Serial mode)*/
+ #define rbPaMSDtv7bitGpio 0x02 /*[2:0], DTV TS0 Serial mode*/
+ #define rbPaMS8bitGpio 0x03 /*[2:0], GPIO mode selected;(8bit GPIO)*/
+ #define rbPaMSAtv 0x04 /*[2:0], 3'b1xx: ATV mode select*/
+ #define rbPbMSMask 0x38
+ #define rbPbMSDtvNoGpio 0x00 /*[5:3], DTV Simple mode */
+ #define rbPbMSDtv4bitGpio 0x08 /*[5:3], DTV TS2 Serial mode*/
+ #define rbPbMSDtv7bitGpio 0x10 /*[5:3], DTV TS0 Serial mode*/
+ #define rbPbMS8bitGpio 0x18 /*[5:3], GPIO mode selected;(8bit GPIO)*/
+ #define rbPbMSAtv 0x20 /*[5:3], 3'b1xx: ATV mode select*/
+ #define rbPaAESEN 0x40 /*[6], port A AES enable bit*/
+ #define rbPbAESEN 0x80 /*[7], port B AES enable bit*/
+
+#define INTERNAL_RST (SYSTEM_CONTROL_REG_BASE + 0x04)
+#define PERIPHERAL_CTRL (SYSTEM_CONTROL_REG_BASE + 0x08)
+#define GPIO_0to7_CTRL (SYSTEM_CONTROL_REG_BASE + 0x0C)
+#define GPIO_8to15_CTRL (SYSTEM_CONTROL_REG_BASE + 0x10)
+#define GPIO_16to24_CTRL (SYSTEM_CONTROL_REG_BASE + 0x14)
+#define GPIO_INT_SRC_CFG (SYSTEM_CONTROL_REG_BASE + 0x18)
+#define SYS_BUF_STATUS (SYSTEM_CONTROL_REG_BASE + 0x1C)
+#define PCIE_IP_REG_ACS (SYSTEM_CONTROL_REG_BASE + 0x20)
+#define PCIE_IP_REG_ACS_ADDR (SYSTEM_CONTROL_REG_BASE + 0x24)
+#define PCIE_IP_REG_ACS_DATA (SYSTEM_CONTROL_REG_BASE + 0x28)
+
+/* -------- IR Control register -------- */
+#define IR_Init_Reg (IR_CONTROL_REG_BASE + 0x00)
+#define IR_Idle_Cnt_Low (IR_CONTROL_REG_BASE + 0x04)
+#define IR_Idle_Cnt_High (IR_CONTROL_REG_BASE + 0x05)
+#define IR_Unit_Cnt_Low (IR_CONTROL_REG_BASE + 0x06)
+#define IR_Unit_Cnt_High (IR_CONTROL_REG_BASE + 0x07)
+#define IR_Data_Cnt (IR_CONTROL_REG_BASE + 0x08)
+#define rbIRen 0x80
+#define rbIRhighidle 0x10
+#define rbIRlowidle 0x00
+#define rbIRVld 0x04
+
+/* -------- I2C A control and state register -------- */
+#define I2C_A_CTL_STATUS (I2C_A_CONTROL_REG_BASE + 0x00)
+#define I2C_A_ADDR (I2C_A_CONTROL_REG_BASE + 0x04)
+#define I2C_A_SW_CTL (I2C_A_CONTROL_REG_BASE + 0x08)
+#define I2C_A_TIME_OUT_CNT (I2C_A_CONTROL_REG_BASE + 0x0C)
+#define I2C_A_FIFO_STATUS (I2C_A_CONTROL_REG_BASE + 0x10)
+#define I2C_A_FS_EN (I2C_A_CONTROL_REG_BASE + 0x14)
+#define I2C_A_FIFO_DATA (I2C_A_CONTROL_REG_BASE + 0x20)
+
+/* -------- I2C B control and state register -------- */
+#define I2C_B_CTL_STATUS (I2C_B_CONTROL_REG_BASE + 0x00)
+#define I2C_B_ADDR (I2C_B_CONTROL_REG_BASE + 0x04)
+#define I2C_B_SW_CTL (I2C_B_CONTROL_REG_BASE + 0x08)
+#define I2C_B_TIME_OUT_CNT (I2C_B_CONTROL_REG_BASE + 0x0C)
+#define I2C_B_FIFO_STATUS (I2C_B_CONTROL_REG_BASE + 0x10)
+#define I2C_B_FS_EN (I2C_B_CONTROL_REG_BASE + 0x14)
+#define I2C_B_FIFO_DATA (I2C_B_CONTROL_REG_BASE + 0x20)
+
+#define VIDEO_CTRL_STATUS_A (ATV_PORTA_CONTROL_REG_BASE + 0x04)
+
+/* -------- Digital TV control register, Port A -------- */
+#define MPEG2_CTRL_A (DTV_PORTA_CONTROL_REG_BASE + 0x00)
+#define SERIAL_IN_ADDR_A (DTV_PORTA_CONTROL_REG_BASE + 0x4C)
+#define VLD_CNT_ADDR_A (DTV_PORTA_CONTROL_REG_BASE + 0x60)
+#define ERR_CNT_ADDR_A (DTV_PORTA_CONTROL_REG_BASE + 0x64)
+#define BRD_CNT_ADDR_A (DTV_PORTA_CONTROL_REG_BASE + 0x68)
+
+/* -------- DMA Control Register, Port A -------- */
+#define DMA_PORTA_CHAN0_ADDR_LOW (DMA_PORTA_CONTROL_REG_BASE + 0x00)
+#define DMA_PORTA_CHAN0_ADDR_HI (DMA_PORTA_CONTROL_REG_BASE + 0x04)
+#define DMA_PORTA_CHAN0_TRANS_STATE (DMA_PORTA_CONTROL_REG_BASE + 0x08)
+#define DMA_PORTA_CHAN0_CONTROL (DMA_PORTA_CONTROL_REG_BASE + 0x0C)
+#define DMA_PORTA_CHAN1_ADDR_LOW (DMA_PORTA_CONTROL_REG_BASE + 0x10)
+#define DMA_PORTA_CHAN1_ADDR_HI (DMA_PORTA_CONTROL_REG_BASE + 0x14)
+#define DMA_PORTA_CHAN1_TRANS_STATE (DMA_PORTA_CONTROL_REG_BASE + 0x18)
+#define DMA_PORTA_CHAN1_CONTROL (DMA_PORTA_CONTROL_REG_BASE + 0x1C)
+#define DMA_PORTA_MANAGEMENT (DMA_PORTA_CONTROL_REG_BASE + 0x20)
+#define VIDEO_CTRL_STATUS_B (ATV_PORTB_CONTROL_REG_BASE + 0x04)
+
+/* -------- Digital TV control register, Port B -------- */
+#define MPEG2_CTRL_B (DTV_PORTB_CONTROL_REG_BASE + 0x00)
+#define SERIAL_IN_ADDR_B (DTV_PORTB_CONTROL_REG_BASE + 0x4C)
+#define VLD_CNT_ADDR_B (DTV_PORTB_CONTROL_REG_BASE + 0x60)
+#define ERR_CNT_ADDR_B (DTV_PORTB_CONTROL_REG_BASE + 0x64)
+#define BRD_CNT_ADDR_B (DTV_PORTB_CONTROL_REG_BASE + 0x68)
+
+/* -------- AES control register, Port B -------- */
+#define AES_CTRL_B (AES_PORTB_CONTROL_REG_BASE + 0x00)
+#define AES_KEY_BASE_B (AES_PORTB_CONTROL_REG_BASE + 0x04)
+
+/* -------- DMA Control Register, Port B -------- */
+#define DMA_PORTB_CHAN0_ADDR_LOW (DMA_PORTB_CONTROL_REG_BASE + 0x00)
+#define DMA_PORTB_CHAN0_ADDR_HI (DMA_PORTB_CONTROL_REG_BASE + 0x04)
+#define DMA_PORTB_CHAN0_TRANS_STATE (DMA_PORTB_CONTROL_REG_BASE + 0x08)
+#define DMA_PORTB_CHAN0_CONTROL (DMA_PORTB_CONTROL_REG_BASE + 0x0C)
+#define DMA_PORTB_CHAN1_ADDR_LOW (DMA_PORTB_CONTROL_REG_BASE + 0x10)
+#define DMA_PORTB_CHAN1_ADDR_HI (DMA_PORTB_CONTROL_REG_BASE + 0x14)
+#define DMA_PORTB_CHAN1_TRANS_STATE (DMA_PORTB_CONTROL_REG_BASE + 0x18)
+#define DMA_PORTB_CHAN1_CONTROL (DMA_PORTB_CONTROL_REG_BASE + 0x1C)
+#define DMA_PORTB_MANAGEMENT (DMA_PORTB_CONTROL_REG_BASE + 0x20)
+
+#define DMA_TRANS_UNIT_188 (0x00000007)
+
+/* -------- Macro define of 24 interrupt resource --------*/
+#define DMA_A_CHAN0_DONE_INT (0x00000001)
+#define DMA_A_CHAN1_DONE_INT (0x00000002)
+#define DMA_B_CHAN0_DONE_INT (0x00000004)
+#define DMA_B_CHAN1_DONE_INT (0x00000008)
+#define DMA_C_CHAN0_DONE_INT (0x00000010)
+#define DMA_C_CHAN1_DONE_INT (0x00000020)
+#define DMA_D_CHAN0_DONE_INT (0x00000040)
+#define DMA_D_CHAN1_DONE_INT (0x00000080)
+#define DATA_BUF_OVERFLOW_INT (0x00000100)
+#define UART_0_X_INT (0x00000200)
+#define UART_1_X_INT (0x00000400)
+#define IR_X_INT (0x00000800)
+#define GPIO_0_INT (0x00001000)
+#define GPIO_1_INT (0x00002000)
+#define GPIO_2_INT (0x00004000)
+#define GPIO_3_INT (0x00008000)
+#define ALL_INT (0x0000FFFF)
+
+/* software I2C bit mask */
+#define SW_I2C_MSK_MODE 0x01
+#define SW_I2C_MSK_CLK_OUT 0x02
+#define SW_I2C_MSK_DAT_OUT 0x04
+#define SW_I2C_MSK_CLK_EN 0x08
+#define SW_I2C_MSK_DAT_EN 0x10
+#define SW_I2C_MSK_DAT_IN 0x40
+#define SW_I2C_MSK_CLK_IN 0x80
+
+#define SMI_VID 0x1ADE
+#define SMI_PID 0x3038
+#define SMI_TS_DMA_BUF_SIZE (1024 * 188)
+
+struct smi_cfg_info {
+#define SMI_DVBSKY_S952 0
+#define SMI_DVBSKY_S950 1
+#define SMI_DVBSKY_T9580 2
+#define SMI_DVBSKY_T982 3
+ int type;
+ char *name;
+#define SMI_TS_NULL 0
+#define SMI_TS_DMA_SINGLE 1
+#define SMI_TS_DMA_BOTH 3
+/* SMI_TS_NULL: not use;
+ * SMI_TS_DMA_SINGLE: use DMA 0 only;
+ * SMI_TS_DMA_BOTH:use DMA 0 and 1.*/
+ int ts_0;
+ int ts_1;
+#define DVBSKY_FE_NULL 0
+#define DVBSKY_FE_M88RS6000 1
+#define DVBSKY_FE_M88DS3103 2
+#define DVBSKY_FE_SIT2 3
+ int fe_0;
+ int fe_1;
+};
+
+struct smi_port {
+ struct smi_dev *dev;
+ int idx;
+ int enable;
+ int fe_type;
+ /* regs */
+ u32 DMA_CHAN0_ADDR_LOW;
+ u32 DMA_CHAN0_ADDR_HI;
+ u32 DMA_CHAN0_TRANS_STATE;
+ u32 DMA_CHAN0_CONTROL;
+ u32 DMA_CHAN1_ADDR_LOW;
+ u32 DMA_CHAN1_ADDR_HI;
+ u32 DMA_CHAN1_TRANS_STATE;
+ u32 DMA_CHAN1_CONTROL;
+ u32 DMA_MANAGEMENT;
+ /* dma */
+ dma_addr_t dma_addr[2];
+ u8 *cpu_addr[2];
+ u32 _dmaInterruptCH0;
+ u32 _dmaInterruptCH1;
+ u32 _int_status;
+ struct tasklet_struct tasklet;
+ /* dvb */
+ struct dmx_frontend hw_frontend;
+ struct dmx_frontend mem_frontend;
+ struct dmxdev dmxdev;
+ struct dvb_adapter dvb_adapter;
+ struct dvb_demux demux;
+ struct dvb_net dvbnet;
+ int users;
+ struct dvb_frontend *fe;
+ /* frontend i2c module */
+ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+};
+
+struct smi_dev {
+ int nr;
+ struct smi_cfg_info *info;
+
+ /* pcie */
+ struct pci_dev *pci_dev;
+ u32 __iomem *lmmio;
+
+ /* ts port */
+ struct smi_port ts_port[2];
+
+ /* i2c */
+ struct i2c_adapter i2c_bus[2];
+ struct i2c_algo_bit_data i2c_bit[2];
+};
+
+#define smi_read(reg) readl(dev->lmmio + ((reg)>>2))
+#define smi_write(reg, value) writel((value), dev->lmmio + ((reg)>>2))
+
+#define smi_andor(reg, mask, value) \
+ writel((readl(dev->lmmio+((reg)>>2)) & ~(mask)) |\
+ ((value) & (mask)), dev->lmmio+((reg)>>2))
+
+#define smi_set(reg, bit) smi_andor((reg), (bit), (bit))
+#define smi_clear(reg, bit) smi_andor((reg), (bit), 0)
+
+#endif /* #ifndef _SMI_PCIE_H_ */
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 28023f9f1dc7..6e933d383fa2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -463,7 +463,6 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
struct solo_dev *solo_dev = solo_enc->solo_dev;
struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
int frame_size;
- int ret;
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
@@ -473,22 +472,10 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
- /* may discard all previous data in vbuf->sgl */
- if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
- DMA_FROM_DEVICE))
- return -ENOMEM;
- ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
+ return solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
SOLO_JPEG_EXT_SIZE(solo_dev));
- dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
- DMA_FROM_DEVICE);
-
- /* add the header only after dma_unmap_sg() */
- sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
- solo_enc->jpeg_header, solo_enc->jpeg_len);
-
- return ret;
}
static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
@@ -498,7 +485,6 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
int frame_off, frame_size;
int skip = 0;
- int ret;
if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh))
return -EIO;
@@ -521,21 +507,9 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
- /* may discard all previous data in vbuf->sgl */
- if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
- DMA_FROM_DEVICE))
- return -ENOMEM;
- ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
+ return solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
SOLO_MP4E_EXT_ADDR(solo_dev),
SOLO_MP4E_EXT_SIZE(solo_dev));
- dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
- DMA_FROM_DEVICE);
-
- /* add the header only after dma_unmap_sg() */
- if (!vop_type(vh))
- sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
- solo_enc->vop, solo_enc->vop_len);
- return ret;
}
static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
@@ -703,9 +677,7 @@ static int solo_ring_thread(void *data)
if (timeout == -ERESTARTSYS || kthread_should_stop())
break;
- solo_irq_off(solo_dev, SOLO_IRQ_ENCODER);
solo_handle_ring(solo_dev);
- solo_irq_on(solo_dev, SOLO_IRQ_ENCODER);
try_to_freeze();
}
@@ -720,7 +692,10 @@ static int solo_enc_queue_setup(struct vb2_queue *q,
unsigned int *num_planes, unsigned int sizes[],
void *alloc_ctxs[])
{
+ struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
+
sizes[0] = FRAME_BUF_SIZE;
+ alloc_ctxs[0] = solo_enc->alloc_ctx;
*num_planes = 1;
if (*num_buffers < MIN_VID_BUFFERS)
@@ -770,26 +745,51 @@ static void solo_ring_stop(struct solo_dev *solo_dev)
static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
- int ret;
- ret = solo_enc_on(solo_enc);
- if (ret)
- return ret;
- return solo_ring_start(solo_enc->solo_dev);
+ return solo_enc_on(solo_enc);
}
static void solo_enc_stop_streaming(struct vb2_queue *q)
{
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
+ unsigned long flags;
+ spin_lock_irqsave(&solo_enc->av_lock, flags);
solo_enc_off(solo_enc);
- INIT_LIST_HEAD(&solo_enc->vidq_active);
- solo_ring_stop(solo_enc->solo_dev);
+ while (!list_empty(&solo_enc->vidq_active)) {
+ struct solo_vb2_buf *buf = list_entry(
+ solo_enc->vidq_active.next,
+ struct solo_vb2_buf, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&solo_enc->av_lock, flags);
+}
+
+static void solo_enc_buf_finish(struct vb2_buffer *vb)
+{
+ struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue);
+ struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+
+ switch (solo_enc->fmt) {
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_H264:
+ if (vb->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME)
+ sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+ solo_enc->vop, solo_enc->vop_len);
+ break;
+ default: /* V4L2_PIX_FMT_MJPEG */
+ sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
+ solo_enc->jpeg_header, solo_enc->jpeg_len);
+ break;
+ }
}
static struct vb2_ops solo_enc_video_qops = {
.queue_setup = solo_enc_queue_setup,
.buf_queue = solo_enc_buf_queue,
+ .buf_finish = solo_enc_buf_finish,
.start_streaming = solo_enc_start_streaming,
.stop_streaming = solo_enc_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
@@ -1263,6 +1263,11 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
return ERR_PTR(-ENOMEM);
hdl = &solo_enc->hdl;
+ solo_enc->alloc_ctx = vb2_dma_sg_init_ctx(&solo_dev->pdev->dev);
+ if (IS_ERR(solo_enc->alloc_ctx)) {
+ ret = PTR_ERR(solo_enc->alloc_ctx);
+ goto hdl_free;
+ }
v4l2_ctrl_handler_init(hdl, 10);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
@@ -1366,6 +1371,7 @@ pci_free:
solo_enc->desc_items, solo_enc->desc_dma);
hdl_free:
v4l2_ctrl_handler_free(hdl);
+ vb2_dma_sg_cleanup_ctx(solo_enc->alloc_ctx);
kfree(solo_enc);
return ERR_PTR(ret);
}
@@ -1375,8 +1381,12 @@ static void solo_enc_free(struct solo_enc_dev *solo_enc)
if (solo_enc == NULL)
return;
+ pci_free_consistent(solo_enc->solo_dev->pdev,
+ sizeof(struct solo_p2m_desc) * solo_enc->desc_nelts,
+ solo_enc->desc_items, solo_enc->desc_dma);
video_unregister_device(solo_enc->vfd);
v4l2_ctrl_handler_free(&solo_enc->hdl);
+ vb2_dma_sg_cleanup_ctx(solo_enc->alloc_ctx);
kfree(solo_enc);
}
@@ -1419,13 +1429,15 @@ int solo_enc_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
solo_dev->v4l2_enc[0]->vfd->num,
solo_dev->v4l2_enc[solo_dev->nr_chans - 1]->vfd->num);
- return 0;
+ return solo_ring_start(solo_dev);
}
void solo_enc_v4l2_exit(struct solo_dev *solo_dev)
{
int i;
+ solo_ring_stop(solo_dev);
+
for (i = 0; i < solo_dev->nr_chans; i++)
solo_enc_free(solo_dev->v4l2_enc[i]);
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 72017b7f0a75..bd8edfa319b8 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -180,6 +180,7 @@ struct solo_enc_dev {
u32 sequence;
struct vb2_queue vidq;
struct list_head vidq_active;
+ void *alloc_ctx;
int desc_count;
int desc_nelts;
struct solo_p2m_desc *desc_items;
diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c
index 2cb35c23d2ac..a4d8867e1d7b 100644
--- a/drivers/media/pci/ttpci/budget-patch.c
+++ b/drivers/media/pci/ttpci/budget-patch.c
@@ -490,7 +490,7 @@ static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_exte
if(detected == 0)
printk("budget-patch not detected or saa7146 in non-default state.\n"
- "try enabling ressetting of 7146 with MASK_31 in MC1 register\n");
+ "try enabling resetting of 7146 with MASK_31 in MC1 register\n");
else
printk("BUDGET-PATCH DETECTED.\n");
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 63f0b64057cb..c135165a8b26 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -304,13 +304,19 @@ static int tw68_initdev(struct pci_dev *pci_dev,
/* Then do any initialisation wanted before interrupts are on */
tw68_hw_init1(dev);
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail3;
+ }
+
/* get irq */
err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
- goto fail3;
+ goto fail4;
}
/*
@@ -324,7 +330,7 @@ static int tw68_initdev(struct pci_dev *pci_dev,
if (err < 0) {
pr_err("%s: can't register video device\n",
dev->name);
- goto fail4;
+ goto fail5;
}
tw_setl(TW68_INTMASK, dev->pci_irqmask);
@@ -333,8 +339,10 @@ static int tw68_initdev(struct pci_dev *pci_dev,
return 0;
-fail4:
+fail5:
video_unregister_device(&dev->vdev);
+fail4:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
fail3:
iounmap(dev->lmmio);
fail2:
@@ -358,6 +366,7 @@ static void tw68_finidev(struct pci_dev *pci_dev)
/* unregister */
video_unregister_device(&dev->vdev);
v4l2_ctrl_handler_free(&dev->hdl);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
/* release resources */
iounmap(dev->lmmio);
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 5c94ac7c88d9..8355e55b4e8e 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -384,6 +384,7 @@ static int tw68_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned tot_bufs = q->num_buffers + *num_buffers;
sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
+ alloc_ctxs[0] = dev->alloc_ctx;
/*
* We allow create_bufs, but only if the sizeimage is the same as the
* current sizeimage. The tw68_buffer_count calculation becomes quite
@@ -461,17 +462,12 @@ static int tw68_buf_prepare(struct vb2_buffer *vb)
struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
unsigned size, bpl;
- int rc;
size = (dev->width * dev->height * dev->fmt->depth) >> 3;
if (vb2_plane_size(vb, 0) < size)
return -EINVAL;
vb2_set_plane_payload(vb, 0, size);
- rc = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
- if (!rc)
- return -EIO;
-
bpl = (dev->width * dev->fmt->depth) >> 3;
switch (dev->field) {
case V4L2_FIELD_TOP:
@@ -505,11 +501,8 @@ static void tw68_buf_finish(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct tw68_dev *dev = vb2_get_drv_priv(vq);
- struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
- dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
-
pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
}
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index 2c8abe26b13b..7a7501bd165f 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -181,6 +181,7 @@ struct tw68_dev {
unsigned field;
struct vb2_queue vidq;
struct list_head active;
+ void *alloc_ctx;
/* various v4l controls */
const struct tw68_tvnorm *tvnorm; /* video */
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 099d5fbebb7c..2b25d31c46f6 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -1528,8 +1528,9 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability
strncpy(cap->driver, "zoran", sizeof(cap->driver)-1);
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(zr->pci_dev));
- cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY;
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 1412c3edd5a2..765bffb49a72 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -65,14 +65,6 @@ config VIDEO_TIMBERDALE
---help---
Add support for the Video In peripherial of the timberdale FPGA.
-config VIDEO_VINO
- tristate "SGI Vino Video For Linux"
- depends on I2C && SGI_IP22 && VIDEO_V4L2
- select VIDEO_SAA7191 if MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y here to build in support for the Vino video input system found
- on SGI Indy machines.
-
config VIDEO_M32R_AR
tristate "AR devices"
depends on VIDEO_V4L2
@@ -267,8 +259,8 @@ if V4L_TEST_DRIVERS
source "drivers/media/platform/vivid/Kconfig"
-config VIDEO_MEM2MEM_TESTDEV
- tristate "Virtual test device for mem2mem framework"
+config VIDEO_VIM2M
+ tristate "Virtual Memory-to-Memory Driver"
depends on VIDEO_DEV && VIDEO_V4L2
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 579046bc276f..a49936b8ce8a 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -2,9 +2,6 @@
# Makefile for the video capture/playback device drivers.
#
-obj-$(CONFIG_VIDEO_VINO) += indycam.o
-obj-$(CONFIG_VIDEO_VINO) += vino.o
-
obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
@@ -17,7 +14,7 @@ obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
obj-$(CONFIG_VIDEO_VIVID) += vivid/
-obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
+obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 9b5daa65841c..8f6698668ecf 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -49,7 +49,7 @@
struct bcap_format {
char *desc;
u32 pixelformat;
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
int bpp; /* bits per pixel */
int dlen; /* data length for ppi in bits */
};
@@ -116,35 +116,35 @@ static const struct bcap_format bcap_formats[] = {
{
.desc = "YCbCr 4:2:2 Interleaved UYVY",
.pixelformat = V4L2_PIX_FMT_UYVY,
- .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.bpp = 16,
.dlen = 8,
},
{
.desc = "YCbCr 4:2:2 Interleaved YUYV",
.pixelformat = V4L2_PIX_FMT_YUYV,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 16,
.dlen = 8,
},
{
.desc = "YCbCr 4:2:2 Interleaved UYVY",
.pixelformat = V4L2_PIX_FMT_UYVY,
- .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
.bpp = 16,
.dlen = 16,
},
{
.desc = "RGB 565",
.pixelformat = V4L2_PIX_FMT_RGB565,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.bpp = 16,
.dlen = 8,
},
{
.desc = "RGB 444",
.pixelformat = V4L2_PIX_FMT_RGB444,
- .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
.bpp = 16,
.dlen = 8,
},
@@ -161,7 +161,7 @@ static struct bcap_buffer *to_bcap_vb(struct vb2_buffer *vb)
static int bcap_init_sensor_formats(struct bcap_device *bcap_dev)
{
- enum v4l2_mbus_pixelcode code;
+ u32 code;
struct bcap_format *sf;
unsigned int num_formats = 0;
int i, j;
@@ -349,18 +349,6 @@ static void bcap_buffer_cleanup(struct vb2_buffer *vb)
spin_unlock_irqrestore(&bcap_dev->lock, flags);
}
-static void bcap_lock(struct vb2_queue *vq)
-{
- struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
- mutex_lock(&bcap_dev->mutex);
-}
-
-static void bcap_unlock(struct vb2_queue *vq)
-{
- struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
- mutex_unlock(&bcap_dev->mutex);
-}
-
static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
@@ -457,8 +445,8 @@ static struct vb2_ops bcap_video_qops = {
.buf_prepare = bcap_buffer_prepare,
.buf_cleanup = bcap_buffer_cleanup,
.buf_queue = bcap_buffer_queue,
- .wait_prepare = bcap_unlock,
- .wait_finish = bcap_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.start_streaming = bcap_start_streaming,
.stop_streaming = bcap_stop_streaming,
};
@@ -841,7 +829,8 @@ static int bcap_querycap(struct file *file, void *priv,
{
struct bcap_device *bcap_dev = video_drvdata(file);
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
strlcpy(cap->bus_info, "Blackfin Platform", sizeof(cap->bus_info));
strlcpy(cap->card, bcap_dev->cfg->card_name, sizeof(cap->card));
@@ -995,6 +984,7 @@ static int bcap_probe(struct platform_device *pdev)
q->ops = &bcap_video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &bcap_dev->mutex;
ret = vb2_queue_init(q);
if (ret)
@@ -1130,7 +1120,6 @@ static int bcap_remove(struct platform_device *pdev)
static struct platform_driver bcap_driver = {
.driver = {
.name = CAPTURE_DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = bcap_probe,
.remove = bcap_remove,
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
index 3543291e6273..25ce15561695 100644
--- a/drivers/media/platform/coda/Makefile
+++ b/drivers/media/platform/coda/Makefile
@@ -1,3 +1,3 @@
-coda-objs := coda-common.o coda-bit.o coda-h264.o
+coda-objs := coda-common.o coda-bit.o coda-h264.o coda-jpeg.o
obj-$(CONFIG_VIDEO_CODA) += coda.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 9b8ea8bbeb4e..b4029ae293d3 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -217,11 +217,34 @@ static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
void coda_fill_bitstream(struct coda_ctx *ctx)
{
struct vb2_buffer *src_buf;
- struct coda_timestamp *ts;
+ struct coda_buffer_meta *meta;
+ u32 start;
while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
+ /*
+ * Only queue a single JPEG into the bitstream buffer, except
+ * to increase payload over 512 bytes or if in hold state.
+ */
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
+ (coda_get_bitstream_payload(ctx) >= 512) && !ctx->hold)
+ break;
+
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ /* Drop frames that do not start/end with a SOI/EOI markers */
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
+ !coda_jpeg_check_buffer(ctx, src_buf)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "dropping invalid JPEG frame\n");
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ continue;
+ }
+
+ /* Buffer start position */
+ start = ctx->bitstream_fifo.kfifo.in &
+ ctx->bitstream_fifo.kfifo.mask;
+
if (coda_bitstream_try_queue(ctx, src_buf)) {
/*
* Source buffer is queued in the bitstream ringbuffer;
@@ -229,12 +252,16 @@ void coda_fill_bitstream(struct coda_ctx *ctx)
*/
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- ts = kmalloc(sizeof(*ts), GFP_KERNEL);
- if (ts) {
- ts->sequence = src_buf->v4l2_buf.sequence;
- ts->timecode = src_buf->v4l2_buf.timecode;
- ts->timestamp = src_buf->v4l2_buf.timestamp;
- list_add_tail(&ts->list, &ctx->timestamp_list);
+ meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ if (meta) {
+ meta->sequence = src_buf->v4l2_buf.sequence;
+ meta->timecode = src_buf->v4l2_buf.timecode;
+ meta->timestamp = src_buf->v4l2_buf.timestamp;
+ meta->start = start;
+ meta->end = ctx->bitstream_fifo.kfifo.in &
+ ctx->bitstream_fifo.kfifo.mask;
+ list_add_tail(&meta->list,
+ &ctx->buffer_meta_list);
}
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
@@ -691,6 +718,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
struct vb2_buffer *buf;
int gamma, ret, value;
u32 dst_fourcc;
+ u32 stride;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
@@ -710,6 +738,14 @@ static int coda_start_encoding(struct coda_ctx *ctx)
return -EFAULT;
}
+ if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
+ if (!ctx->params.jpeg_qmat_tab[0])
+ ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+ if (!ctx->params.jpeg_qmat_tab[1])
+ ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
+ coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+ }
+
mutex_lock(&dev->coda_mutex);
coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
@@ -729,10 +765,10 @@ static int coda_start_encoding(struct coda_ctx *ctx)
break;
}
- value = coda_read(dev, CODA_REG_BIT_FRAME_MEM_CTRL);
- value &= ~(1 << 2 | 0x7 << 9);
- ctx->frame_mem_ctrl = value;
- coda_write(dev, value, CODA_REG_BIT_FRAME_MEM_CTRL);
+ ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE;
+ if (q_data_src->fourcc == V4L2_PIX_FMT_NV12)
+ ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
+ coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
if (dev->devtype->product == CODA_DX6) {
/* Configure the coda */
@@ -741,6 +777,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
}
/* Could set rotation here if needed */
+ value = 0;
switch (dev->devtype->product) {
case CODA_DX6:
value = (q_data_src->width & CODADX6_PICWIDTH_MASK)
@@ -764,6 +801,8 @@ static int coda_start_encoding(struct coda_ctx *ctx)
<< CODA_PICHEIGHT_OFFSET;
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
+ if (dst_fourcc == V4L2_PIX_FMT_JPEG)
+ ctx->params.framerate = 0;
coda_write(dev, ctx->params.framerate,
CODA_CMD_ENC_SEQ_SRC_F_RATE);
@@ -797,6 +836,16 @@ static int coda_start_encoding(struct coda_ctx *ctx)
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
break;
+ case V4L2_PIX_FMT_JPEG:
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_PARA);
+ coda_write(dev, ctx->params.jpeg_restart_interval,
+ CODA_CMD_ENC_SEQ_JPG_RST_INTERVAL);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_EN);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET);
+
+ coda_jpeg_write_tables(ctx);
+ break;
default:
v4l2_err(v4l2_dev,
"dst format (0x%08x) invalid.\n", dst_fourcc);
@@ -804,28 +853,36 @@ static int coda_start_encoding(struct coda_ctx *ctx)
goto out;
}
- switch (ctx->params.slice_mode) {
- case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
- value = 0;
- break;
- case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
- value = (ctx->params.slice_max_mb & CODA_SLICING_SIZE_MASK)
- << CODA_SLICING_SIZE_OFFSET;
- value |= (1 & CODA_SLICING_UNIT_MASK)
- << CODA_SLICING_UNIT_OFFSET;
- value |= 1 & CODA_SLICING_MODE_MASK;
- break;
- case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
- value = (ctx->params.slice_max_bits & CODA_SLICING_SIZE_MASK)
- << CODA_SLICING_SIZE_OFFSET;
- value |= (0 & CODA_SLICING_UNIT_MASK)
- << CODA_SLICING_UNIT_OFFSET;
- value |= 1 & CODA_SLICING_MODE_MASK;
- break;
+ /*
+ * slice mode and GOP size registers are used for thumb size/offset
+ * in JPEG mode
+ */
+ if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
+ switch (ctx->params.slice_mode) {
+ case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
+ value = 0;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
+ value = (ctx->params.slice_max_mb &
+ CODA_SLICING_SIZE_MASK)
+ << CODA_SLICING_SIZE_OFFSET;
+ value |= (1 & CODA_SLICING_UNIT_MASK)
+ << CODA_SLICING_UNIT_OFFSET;
+ value |= 1 & CODA_SLICING_MODE_MASK;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
+ value = (ctx->params.slice_max_bits &
+ CODA_SLICING_SIZE_MASK)
+ << CODA_SLICING_SIZE_OFFSET;
+ value |= (0 & CODA_SLICING_UNIT_MASK)
+ << CODA_SLICING_UNIT_OFFSET;
+ value |= 1 & CODA_SLICING_MODE_MASK;
+ break;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
+ value = ctx->params.gop_size & CODA_GOP_SIZE_MASK;
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
}
- coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
- value = ctx->params.gop_size & CODA_GOP_SIZE_MASK;
- coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
if (ctx->params.bitrate) {
/* Rate control enabled */
@@ -916,19 +973,24 @@ static int coda_start_encoding(struct coda_ctx *ctx)
goto out;
}
- if (dev->devtype->product == CODA_960)
- ctx->num_internal_frames = 4;
- else
- ctx->num_internal_frames = 2;
- ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
- if (ret < 0) {
- v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
- goto out;
+ if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
+ if (dev->devtype->product == CODA_960)
+ ctx->num_internal_frames = 4;
+ else
+ ctx->num_internal_frames = 2;
+ ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
+ goto out;
+ }
+ stride = q_data_src->bytesperline;
+ } else {
+ ctx->num_internal_frames = 0;
+ stride = 0;
}
-
coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
- coda_write(dev, q_data_src->bytesperline,
- CODA_CMD_SET_FRAME_BUF_STRIDE);
+ coda_write(dev, stride, CODA_CMD_SET_FRAME_BUF_STRIDE);
+
if (dev->devtype->product == CODA_7541) {
coda_write(dev, q_data_src->bytesperline,
CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
@@ -1036,9 +1098,10 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
struct coda_dev *dev = ctx->dev;
int force_ipicture;
int quant_param = 0;
- u32 picture_y, picture_cb, picture_cr;
u32 pic_stream_buffer_addr, pic_stream_buffer_size;
+ u32 rot_mode = 0;
u32 dst_fourcc;
+ u32 reg;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1076,7 +1139,7 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2];
- pic_stream_buffer_size = CODA_MAX_FRAME_SIZE -
+ pic_stream_buffer_size = q_data_dst->sizeimage -
ctx->vpu_header_size[0] -
ctx->vpu_header_size[1] -
ctx->vpu_header_size[2];
@@ -1090,7 +1153,7 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
} else {
pic_stream_buffer_addr =
vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- pic_stream_buffer_size = CODA_MAX_FRAME_SIZE;
+ pic_stream_buffer_size = q_data_dst->sizeimage;
}
if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
@@ -1102,6 +1165,9 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
case V4L2_PIX_FMT_MPEG4:
quant_param = ctx->params.mpeg4_intra_qp;
break;
+ case V4L2_PIX_FMT_JPEG:
+ quant_param = 30;
+ break;
default:
v4l2_warn(&ctx->dev->v4l2_dev,
"cannot set intra qp, fmt not supported\n");
@@ -1124,42 +1190,22 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
}
/* submit */
- coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
- CODA_CMD_ENC_PIC_ROT_MODE);
+ if (ctx->params.rot_mode)
+ rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
+ coda_write(dev, rot_mode, CODA_CMD_ENC_PIC_ROT_MODE);
coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
-
- picture_y = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- switch (q_data_src->fourcc) {
- case V4L2_PIX_FMT_YVU420:
- /* Switch Cb and Cr for YVU420 format */
- picture_cr = picture_y + q_data_src->bytesperline *
- q_data_src->height;
- picture_cb = picture_cr + q_data_src->bytesperline / 2 *
- q_data_src->height / 2;
- break;
- case V4L2_PIX_FMT_YUV420:
- default:
- picture_cb = picture_y + q_data_src->bytesperline *
- q_data_src->height;
- picture_cr = picture_cb + q_data_src->bytesperline / 2 *
- q_data_src->height / 2;
- break;
- }
-
if (dev->devtype->product == CODA_960) {
coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
coda_write(dev, q_data_src->width, CODA9_CMD_ENC_PIC_SRC_STRIDE);
coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
- coda_write(dev, picture_y, CODA9_CMD_ENC_PIC_SRC_ADDR_Y);
- coda_write(dev, picture_cb, CODA9_CMD_ENC_PIC_SRC_ADDR_CB);
- coda_write(dev, picture_cr, CODA9_CMD_ENC_PIC_SRC_ADDR_CR);
+ reg = CODA9_CMD_ENC_PIC_SRC_ADDR_Y;
} else {
- coda_write(dev, picture_y, CODA_CMD_ENC_PIC_SRC_ADDR_Y);
- coda_write(dev, picture_cb, CODA_CMD_ENC_PIC_SRC_ADDR_CB);
- coda_write(dev, picture_cr, CODA_CMD_ENC_PIC_SRC_ADDR_CR);
+ reg = CODA_CMD_ENC_PIC_SRC_ADDR_Y;
}
+ coda_write_base(ctx, q_data_src, src_buf, reg);
+
coda_write(dev, force_ipicture << 1 & 0x2,
CODA_CMD_ENC_PIC_OPTION);
@@ -1293,7 +1339,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
u32 bitstream_buf, bitstream_size;
struct coda_dev *dev = ctx->dev;
int width, height;
- u32 src_fourcc;
+ u32 src_fourcc, dst_fourcc;
u32 val;
int ret;
@@ -1303,6 +1349,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
bitstream_buf = ctx->bitstream.paddr;
bitstream_size = ctx->bitstream.size;
src_fourcc = q_data_src->fourcc;
+ dst_fourcc = q_data_dst->fourcc;
/* Allocate per-instance buffers */
ret = coda_alloc_context_buffers(ctx, q_data_src);
@@ -1314,6 +1361,11 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
/* Update coda bitstream read and write pointers from kfifo */
coda_kfifo_sync_to_device_full(ctx);
+ ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE;
+ if (dst_fourcc == V4L2_PIX_FMT_NV12)
+ ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
+ coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
+
ctx->display_idx = -1;
ctx->frm_dis_flg = 0;
coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
@@ -1327,6 +1379,8 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
if ((dev->devtype->product == CODA_7541) ||
(dev->devtype->product == CODA_960))
val |= CODA_REORDER_ENABLE;
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
+ val |= CODA_NO_INT_ENABLE;
coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
ctx->params.codec_mode = ctx->codec->mode;
@@ -1442,13 +1496,23 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
}
if (dev->devtype->product == CODA_960) {
- coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
+ int cbb_size, crb_size;
+ coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
+ /* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
coda_write(dev, 0x20262024, CODA9_CMD_SET_FRAME_CACHE_SIZE);
+
+ if (dst_fourcc == V4L2_PIX_FMT_NV12) {
+ cbb_size = 0;
+ crb_size = 16;
+ } else {
+ cbb_size = 8;
+ crb_size = 8;
+ }
coda_write(dev, 2 << CODA9_CACHE_PAGEMERGE_OFFSET |
32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
- 8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET |
- 8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET,
+ cbb_size << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET |
+ crb_size << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET,
CODA9_CMD_SET_FRAME_CACHE_CONFIG);
}
@@ -1501,20 +1565,11 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
struct vb2_buffer *dst_buf;
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_dst;
- u32 stridey, height;
- u32 picture_y, picture_cb, picture_cr;
+ u32 reg_addr, reg_stride;
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if (ctx->params.rot_mode & CODA_ROT_90) {
- stridey = q_data_dst->height;
- height = q_data_dst->width;
- } else {
- stridey = q_data_dst->width;
- height = q_data_dst->height;
- }
-
/* Try to copy source buffer contents into the bitstream ringbuffer */
mutex_lock(&ctx->bitstream_mutex);
coda_fill_bitstream(ctx);
@@ -1545,17 +1600,6 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
if (dev->devtype->product == CODA_960)
coda_set_gdi_regs(ctx);
- /* Set rotator output */
- picture_y = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- if (q_data_dst->fourcc == V4L2_PIX_FMT_YVU420) {
- /* Switch Cr and Cb for YVU420 format */
- picture_cr = picture_y + stridey * height;
- picture_cb = picture_cr + stridey / 2 * height / 2;
- } else {
- picture_cb = picture_y + stridey * height;
- picture_cr = picture_cb + stridey / 2 * height / 2;
- }
-
if (dev->devtype->product == CODA_960) {
/*
* The CODA960 seems to have an internal list of buffers with
@@ -1565,16 +1609,16 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
*/
coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->v4l2_buf.index,
CODA9_CMD_DEC_PIC_ROT_INDEX);
- coda_write(dev, picture_y, CODA9_CMD_DEC_PIC_ROT_ADDR_Y);
- coda_write(dev, picture_cb, CODA9_CMD_DEC_PIC_ROT_ADDR_CB);
- coda_write(dev, picture_cr, CODA9_CMD_DEC_PIC_ROT_ADDR_CR);
- coda_write(dev, stridey, CODA9_CMD_DEC_PIC_ROT_STRIDE);
+
+ reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
+ reg_stride = CODA9_CMD_DEC_PIC_ROT_STRIDE;
} else {
- coda_write(dev, picture_y, CODA_CMD_DEC_PIC_ROT_ADDR_Y);
- coda_write(dev, picture_cb, CODA_CMD_DEC_PIC_ROT_ADDR_CB);
- coda_write(dev, picture_cr, CODA_CMD_DEC_PIC_ROT_ADDR_CR);
- coda_write(dev, stridey, CODA_CMD_DEC_PIC_ROT_STRIDE);
+ reg_addr = CODA_CMD_DEC_PIC_ROT_ADDR_Y;
+ reg_stride = CODA_CMD_DEC_PIC_ROT_STRIDE;
}
+ coda_write_base(ctx, q_data_dst, dst_buf, reg_addr);
+ coda_write(dev, q_data_dst->bytesperline, reg_stride);
+
coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
CODA_CMD_DEC_PIC_ROT_MODE);
@@ -1599,6 +1643,26 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
coda_write(dev, ctx->iram_info.axi_sram_use,
CODA7_REG_BIT_AXI_SRAM_USE);
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
+ struct coda_buffer_meta *meta;
+
+ /* If this is the last buffer in the bitstream, add padding */
+ meta = list_first_entry(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+ if (meta->end == (ctx->bitstream_fifo.kfifo.in &
+ ctx->bitstream_fifo.kfifo.mask)) {
+ static unsigned char buf[512];
+ unsigned int pad;
+
+ /* Pad to multiple of 256 and then add 256 more */
+ pad = ((0 - meta->end) & 0xff) + 256;
+
+ memset(buf, 0xff, sizeof(buf));
+
+ kfifo_in(&ctx->bitstream_fifo, buf, pad);
+ }
+ }
+
coda_kfifo_sync_to_device_full(ctx);
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
@@ -1612,7 +1676,8 @@ static void coda_finish_decode(struct coda_ctx *ctx)
struct coda_q_data *q_data_src;
struct coda_q_data *q_data_dst;
struct vb2_buffer *dst_buf;
- struct coda_timestamp *ts;
+ struct coda_buffer_meta *meta;
+ unsigned long payload;
int width, height;
int decoded_idx;
int display_idx;
@@ -1739,23 +1804,23 @@ static void coda_finish_decode(struct coda_ctx *ctx)
val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
val -= ctx->sequence_offset;
mutex_lock(&ctx->bitstream_mutex);
- if (!list_empty(&ctx->timestamp_list)) {
- ts = list_first_entry(&ctx->timestamp_list,
- struct coda_timestamp, list);
- list_del(&ts->list);
- if (val != (ts->sequence & 0xffff)) {
+ if (!list_empty(&ctx->buffer_meta_list)) {
+ meta = list_first_entry(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+ list_del(&meta->list);
+ if (val != (meta->sequence & 0xffff)) {
v4l2_err(&dev->v4l2_dev,
"sequence number mismatch (%d(%d) != %d)\n",
val, ctx->sequence_offset,
- ts->sequence);
+ meta->sequence);
}
- ctx->frame_timestamps[decoded_idx] = *ts;
- kfree(ts);
+ ctx->frame_metas[decoded_idx] = *meta;
+ kfree(meta);
} else {
v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
- memset(&ctx->frame_timestamps[decoded_idx], 0,
- sizeof(struct coda_timestamp));
- ctx->frame_timestamps[decoded_idx].sequence = val;
+ memset(&ctx->frame_metas[decoded_idx], 0,
+ sizeof(struct coda_buffer_meta));
+ ctx->frame_metas[decoded_idx].sequence = val;
}
mutex_unlock(&ctx->bitstream_mutex);
@@ -1794,11 +1859,22 @@ static void coda_finish_decode(struct coda_ctx *ctx)
V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_BFRAME);
dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx];
- ts = &ctx->frame_timestamps[ctx->display_idx];
- dst_buf->v4l2_buf.timecode = ts->timecode;
- dst_buf->v4l2_buf.timestamp = ts->timestamp;
-
- vb2_set_plane_payload(dst_buf, 0, width * height * 3 / 2);
+ meta = &ctx->frame_metas[ctx->display_idx];
+ dst_buf->v4l2_buf.timecode = meta->timecode;
+ dst_buf->v4l2_buf.timestamp = meta->timestamp;
+
+ switch (q_data_dst->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_NV12:
+ default:
+ payload = width * height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ payload = width * height * 2;
+ break;
+ }
+ vb2_set_plane_payload(dst_buf, 0, payload);
v4l2_m2m_buf_done(dst_buf, ctx->frame_errors[display_idx] ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 5f0cd5cafea2..39330a70f752 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -43,6 +43,7 @@
#define CODA_NAME "coda"
#define CODADX6_MAX_INSTANCES 4
+#define CODA_MAX_FORMATS 4
#define CODA_PARA_BUF_SIZE (10 * 1024)
#define CODA_ISRAM_SIZE (2048 * 2)
@@ -82,6 +83,34 @@ unsigned int coda_read(struct coda_dev *dev, u32 reg)
return data;
}
+void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
+ struct vb2_buffer *buf, unsigned int reg_y)
+{
+ u32 base_y = vb2_dma_contig_plane_dma_addr(buf, 0);
+ u32 base_cb, base_cr;
+
+ switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YVU420:
+ /* Switch Cb and Cr for YVU420 format */
+ base_cr = base_y + q_data->bytesperline * q_data->height;
+ base_cb = base_cr + q_data->bytesperline * q_data->height / 4;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ default:
+ base_cb = base_y + q_data->bytesperline * q_data->height;
+ base_cr = base_cb + q_data->bytesperline * q_data->height / 4;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ base_cb = base_y + q_data->bytesperline * q_data->height;
+ base_cr = base_cb + q_data->bytesperline * q_data->height / 2;
+ }
+
+ coda_write(ctx->dev, base_y, reg_y);
+ coda_write(ctx->dev, base_cb, reg_y + 4);
+ coda_write(ctx->dev, base_cr, reg_y + 8);
+}
+
/*
* Array of all formats supported by any version of Coda:
*/
@@ -95,6 +124,14 @@ static const struct coda_fmt coda_formats[] = {
.fourcc = V4L2_PIX_FMT_YVU420,
},
{
+ .name = "YUV 4:2:0 Partial interleaved Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ },
+ {
+ .name = "YUV 4:2:2 Planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ },
+ {
.name = "H264 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H264,
},
@@ -102,6 +139,10 @@ static const struct coda_fmt coda_formats[] = {
.name = "MPEG4 Encoded Stream",
.fourcc = V4L2_PIX_FMT_MPEG4,
},
+ {
+ .name = "JPEG Encoded Images",
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ },
};
#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
@@ -122,8 +163,10 @@ static const struct coda_codec codadx6_codecs[] = {
static const struct coda_codec coda7_codecs[] = {
CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720),
CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720),
+ CODA_CODEC(CODA7_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG, 8192, 8192),
CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA7_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192),
};
static const struct coda_codec coda9_codecs[] = {
@@ -133,17 +176,115 @@ static const struct coda_codec coda9_codecs[] = {
CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
};
+struct coda_video_device {
+ const char *name;
+ enum coda_inst_type type;
+ const struct coda_context_ops *ops;
+ u32 src_formats[CODA_MAX_FORMATS];
+ u32 dst_formats[CODA_MAX_FORMATS];
+};
+
+static const struct coda_video_device coda_bit_encoder = {
+ .name = "coda-encoder",
+ .type = CODA_INST_ENCODER,
+ .ops = &coda_bit_encode_ops,
+ .src_formats = {
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_NV12,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_H264,
+ V4L2_PIX_FMT_MPEG4,
+ },
+};
+
+static const struct coda_video_device coda_bit_jpeg_encoder = {
+ .name = "coda-jpeg-encoder",
+ .type = CODA_INST_ENCODER,
+ .ops = &coda_bit_encode_ops,
+ .src_formats = {
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV422P,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_JPEG,
+ },
+};
+
+static const struct coda_video_device coda_bit_decoder = {
+ .name = "coda-decoder",
+ .type = CODA_INST_DECODER,
+ .ops = &coda_bit_decode_ops,
+ .src_formats = {
+ V4L2_PIX_FMT_H264,
+ V4L2_PIX_FMT_MPEG4,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_NV12,
+ },
+};
+
+static const struct coda_video_device coda_bit_jpeg_decoder = {
+ .name = "coda-jpeg-decoder",
+ .type = CODA_INST_DECODER,
+ .ops = &coda_bit_decode_ops,
+ .src_formats = {
+ V4L2_PIX_FMT_JPEG,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV422P,
+ },
+};
+
+static const struct coda_video_device *codadx6_video_devices[] = {
+ &coda_bit_encoder,
+};
+
+static const struct coda_video_device *coda7_video_devices[] = {
+ &coda_bit_jpeg_encoder,
+ &coda_bit_jpeg_decoder,
+ &coda_bit_encoder,
+ &coda_bit_decoder,
+};
+
+static const struct coda_video_device *coda9_video_devices[] = {
+ &coda_bit_encoder,
+ &coda_bit_decoder,
+};
+
static bool coda_format_is_yuv(u32 fourcc)
{
switch (fourcc) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_YUV422P:
return true;
default:
return false;
}
}
+static const char *coda_format_name(u32 fourcc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coda_formats); i++) {
+ if (coda_formats[i].fourcc == fourcc)
+ return coda_formats[i].name;
+ }
+
+ return NULL;
+}
+
/*
* Normalize all supported YUV 4:2:0 formats to the value used in the codec
* tables.
@@ -202,6 +343,17 @@ static void coda_get_max_dimensions(struct coda_dev *dev,
*max_h = h;
}
+const struct coda_video_device *to_coda_video_device(struct video_device *vdev)
+{
+ struct coda_dev *dev = video_get_drvdata(vdev);
+ unsigned int i = vdev - dev->vfd;
+
+ if (i >= dev->devtype->num_vdevs)
+ return NULL;
+
+ return dev->devtype->vdevs[i];
+}
+
const char *coda_product_name(int product)
{
static char buf[9];
@@ -240,58 +392,28 @@ static int coda_querycap(struct file *file, void *priv,
static int coda_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
- const struct coda_codec *codecs = ctx->dev->devtype->codecs;
- const struct coda_fmt *formats = coda_formats;
- const struct coda_fmt *fmt;
- int num_codecs = ctx->dev->devtype->num_codecs;
- int num_formats = ARRAY_SIZE(coda_formats);
- int i, k, num = 0;
- bool yuv;
-
- if (ctx->inst_type == CODA_INST_ENCODER)
- yuv = (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ struct video_device *vdev = video_devdata(file);
+ const struct coda_video_device *cvd = to_coda_video_device(vdev);
+ const u32 *formats;
+ const char *name;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ formats = cvd->src_formats;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ formats = cvd->dst_formats;
else
- yuv = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE);
-
- for (i = 0; i < num_formats; i++) {
- /* Skip either raw or compressed formats */
- if (yuv != coda_format_is_yuv(formats[i].fourcc))
- continue;
- /* All uncompressed formats are always supported */
- if (yuv) {
- if (num == f->index)
- break;
- ++num;
- continue;
- }
- /* Compressed formats may be supported, check the codec list */
- for (k = 0; k < num_codecs; k++) {
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- formats[i].fourcc == codecs[k].dst_fourcc)
- break;
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- formats[i].fourcc == codecs[k].src_fourcc)
- break;
- }
- if (k < num_codecs) {
- if (num == f->index)
- break;
- ++num;
- }
- }
+ return -EINVAL;
- if (i < num_formats) {
- fmt = &formats[i];
- strlcpy(f->description, fmt->name, sizeof(f->description));
- f->pixelformat = fmt->fourcc;
- if (!yuv)
- f->flags |= V4L2_FMT_FLAG_COMPRESSED;
- return 0;
- }
+ if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
+ return -EINVAL;
+
+ name = coda_format_name(formats[f->index]);
+ strlcpy(f->description, name, sizeof(f->description));
+ f->pixelformat = formats[f->index];
+ if (!coda_format_is_yuv(formats[f->index]))
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
- /* Format not found */
- return -EINVAL;
+ return 0;
}
static int coda_g_fmt(struct file *file, void *priv,
@@ -311,7 +433,37 @@ static int coda_g_fmt(struct file *file, void *priv,
f->fmt.pix.bytesperline = q_data->bytesperline;
f->fmt.pix.sizeimage = q_data->sizeimage;
- f->fmt.pix.colorspace = ctx->colorspace;
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
+ else
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ return 0;
+}
+
+static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f)
+{
+ struct coda_q_data *q_data;
+ const u32 *formats;
+ int i;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ formats = ctx->cvd->src_formats;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ formats = ctx->cvd->dst_formats;
+ else
+ return -EINVAL;
+
+ for (i = 0; i < CODA_MAX_FORMATS; i++) {
+ if (formats[i] == f->fmt.pix.pixelformat) {
+ f->fmt.pix.pixelformat = formats[i];
+ return 0;
+ }
+ }
+
+ /* Fall back to currently set pixelformat */
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix.pixelformat = q_data->fourcc;
return 0;
}
@@ -320,7 +472,6 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
struct v4l2_format *f)
{
struct coda_dev *dev = ctx->dev;
- struct coda_q_data *q_data;
unsigned int max_w, max_h;
enum v4l2_field field;
@@ -342,30 +493,35 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
switch (f->fmt.pix.pixelformat) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_H264:
- case V4L2_PIX_FMT_MPEG4:
- case V4L2_PIX_FMT_JPEG:
- break;
- default:
- q_data = get_q_data(ctx, f->type);
- if (!q_data)
- return -EINVAL;
- f->fmt.pix.pixelformat = q_data->fourcc;
- }
-
- switch (f->fmt.pix.pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- /* Frame stride must be multiple of 8, but 16 for h.264 */
+ case V4L2_PIX_FMT_NV12:
+ /*
+ * Frame stride must be at least multiple of 8,
+ * but multiple of 16 for h.264 or JPEG 4:2:x
+ */
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 3 / 2;
break;
+ case V4L2_PIX_FMT_YUV422P:
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 2;
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
+ /* fallthrough */
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_MPEG4:
- case V4L2_PIX_FMT_JPEG:
f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE;
+ /*
+ * This is a rough estimate for sensible compressed buffer
+ * sizes (between 1 and 16 bits per pixel). This could be
+ * improved by better format specific worst case estimates.
+ */
+ f->fmt.pix.sizeimage = round_up(clamp(f->fmt.pix.sizeimage,
+ f->fmt.pix.width * f->fmt.pix.height / 8,
+ f->fmt.pix.width * f->fmt.pix.height * 2),
+ PAGE_SIZE);
break;
default:
BUG();
@@ -378,34 +534,35 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
- const struct coda_codec *codec = NULL;
+ const struct coda_q_data *q_data_src;
+ const struct coda_codec *codec;
struct vb2_queue *src_vq;
int ret;
+ ret = coda_try_pixelformat(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
/*
- * If the source format is already fixed, try to find a codec that
- * converts to the given destination format
+ * If the source format is already fixed, only allow the same output
+ * resolution
*/
src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (vb2_is_streaming(src_vq)) {
- struct coda_q_data *q_data_src;
-
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
- f->fmt.pix.pixelformat);
- if (!codec)
- return -EINVAL;
-
f->fmt.pix.width = q_data_src->width;
f->fmt.pix.height = q_data_src->height;
- } else {
- /* Otherwise determine codec by encoded format, if possible */
- codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
- f->fmt.pix.pixelformat);
}
f->fmt.pix.colorspace = ctx->colorspace;
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ f->fmt.pix.pixelformat);
+ if (!codec)
+ return -EINVAL;
+
ret = coda_try_fmt(ctx, codec, f);
if (ret < 0)
return ret;
@@ -426,21 +583,24 @@ static int coda_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
- const struct coda_codec *codec = NULL;
+ struct coda_dev *dev = ctx->dev;
+ const struct coda_q_data *q_data_dst;
+ const struct coda_codec *codec;
+ int ret;
- /* Determine codec by encoded format, returns NULL if raw or invalid */
- if (ctx->inst_type == CODA_INST_DECODER) {
- codec = coda_find_codec(ctx->dev, f->fmt.pix.pixelformat,
- V4L2_PIX_FMT_YUV420);
- if (!codec)
- codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_H264,
- V4L2_PIX_FMT_YUV420);
- if (!codec)
- return -EINVAL;
+ ret = coda_try_pixelformat(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ if (!f->fmt.pix.colorspace) {
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
}
- if (!f->fmt.pix.colorspace)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ codec = coda_find_codec(dev, f->fmt.pix.pixelformat, q_data_dst->fourcc);
return coda_try_fmt(ctx, codec, f);
}
@@ -781,6 +941,7 @@ static int coda_job_ready(void *m2m_priv)
if (ctx->hold ||
((ctx->inst_type == CODA_INST_DECODER) &&
+ !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
(coda_get_bitstream_payload(ctx) < 512) &&
!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
@@ -862,25 +1023,17 @@ static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type)
static void set_default_params(struct coda_ctx *ctx)
{
- u32 src_fourcc, dst_fourcc;
- int max_w;
- int max_h;
+ unsigned int max_w, max_h, size;
- if (ctx->inst_type == CODA_INST_ENCODER) {
- src_fourcc = V4L2_PIX_FMT_YUV420;
- dst_fourcc = V4L2_PIX_FMT_H264;
- } else {
- src_fourcc = V4L2_PIX_FMT_H264;
- dst_fourcc = V4L2_PIX_FMT_YUV420;
- }
- ctx->codec = coda_find_codec(ctx->dev, src_fourcc, dst_fourcc);
- max_w = ctx->codec->max_w;
- max_h = ctx->codec->max_h;
+ ctx->codec = coda_find_codec(ctx->dev, ctx->cvd->src_formats[0],
+ ctx->cvd->dst_formats[0]);
+ max_w = min(ctx->codec->max_w, 1920U);
+ max_h = min(ctx->codec->max_h, 1088U);
+ size = max_w * max_h * 3 / 2;
ctx->params.codec_mode = ctx->codec->mode;
ctx->colorspace = V4L2_COLORSPACE_REC709;
ctx->params.framerate = 30;
- ctx->aborting = 0;
/* Default formats for output and input queues */
ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc;
@@ -891,14 +1044,14 @@ static void set_default_params(struct coda_ctx *ctx)
ctx->q_data[V4L2_M2M_DST].height = max_h;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) {
ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w;
- ctx->q_data[V4L2_M2M_SRC].sizeimage = (max_w * max_h * 3) / 2;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
ctx->q_data[V4L2_M2M_DST].bytesperline = 0;
- ctx->q_data[V4L2_M2M_DST].sizeimage = CODA_MAX_FRAME_SIZE;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = round_up(size, PAGE_SIZE);
} else {
ctx->q_data[V4L2_M2M_SRC].bytesperline = 0;
- ctx->q_data[V4L2_M2M_SRC].sizeimage = CODA_MAX_FRAME_SIZE;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = round_up(size, PAGE_SIZE);
ctx->q_data[V4L2_M2M_DST].bytesperline = max_w;
- ctx->q_data[V4L2_M2M_DST].sizeimage = (max_w * max_h * 3) / 2;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = size;
}
ctx->q_data[V4L2_M2M_SRC].rect.width = max_w;
ctx->q_data[V4L2_M2M_SRC].rect.height = max_h;
@@ -964,7 +1117,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
* In the decoder case, immediately try to copy the buffer into the
* bitstream ringbuffer and mark it as ready to be dequeued.
*/
- if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
+ if (ctx->inst_type == CODA_INST_DECODER &&
vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/*
* For backwards compatibility, queuing an empty buffer marks
@@ -1027,12 +1180,13 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
struct coda_q_data *q_data_src, *q_data_dst;
struct vb2_buffer *buf;
- u32 dst_fourcc;
int ret = 0;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- if (q_data_src->fourcc == V4L2_PIX_FMT_H264) {
+ if (q_data_src->fourcc == V4L2_PIX_FMT_H264 ||
+ (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
+ ctx->dev->devtype->product == CODA_7541)) {
/* copy the buffers that where queued before streamon */
mutex_lock(&ctx->bitstream_mutex);
coda_fill_bitstream(ctx);
@@ -1063,13 +1217,12 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
if (!(ctx->streamon_out & ctx->streamon_cap))
return 0;
- /* Allow decoder device_run with no new buffers queued */
+ /* Allow BIT decoder device_run with no new buffers queued */
if (ctx->inst_type == CODA_INST_DECODER)
v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
ctx->gopcounter = ctx->params.gop_size - 1;
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- dst_fourcc = q_data_dst->fourcc;
ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
q_data_dst->fourcc);
@@ -1079,6 +1232,10 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
goto err;
}
+ if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
+ ctx->params.gop_size = 1;
+ ctx->gopcounter = ctx->params.gop_size - 1;
+
ret = ctx->ops->start_streaming(ctx);
if (ctx->inst_type == CODA_INST_DECODER) {
if (ret == -EAGAIN)
@@ -1093,10 +1250,10 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
err:
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
- v4l2_m2m_buf_done(buf, VB2_BUF_STATE_DEQUEUED);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
} else {
while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
- v4l2_m2m_buf_done(buf, VB2_BUF_STATE_DEQUEUED);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
}
return ret;
}
@@ -1131,19 +1288,20 @@ static void coda_stop_streaming(struct vb2_queue *q)
}
if (!ctx->streamon_out && !ctx->streamon_cap) {
- struct coda_timestamp *ts;
+ struct coda_buffer_meta *meta;
mutex_lock(&ctx->bitstream_mutex);
- while (!list_empty(&ctx->timestamp_list)) {
- ts = list_first_entry(&ctx->timestamp_list,
- struct coda_timestamp, list);
- list_del(&ts->list);
- kfree(ts);
+ while (!list_empty(&ctx->buffer_meta_list)) {
+ meta = list_first_entry(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+ list_del(&meta->list);
+ kfree(meta);
}
mutex_unlock(&ctx->bitstream_mutex);
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
ctx->runcounter = 0;
+ ctx->aborting = 0;
}
}
@@ -1226,6 +1384,12 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
ctx->params.intra_refresh = ctrl->val;
break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ coda_set_jpeg_compression_quality(ctx, ctrl->val);
+ break;
+ case V4L2_CID_JPEG_RESTART_INTERVAL:
+ ctx->params.jpeg_restart_interval = ctrl->val;
+ break;
default:
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"Invalid control, id=%d, val=%d\n",
@@ -1240,14 +1404,8 @@ static const struct v4l2_ctrl_ops coda_ctrl_ops = {
.s_ctrl = coda_s_ctrl,
};
-static int coda_ctrls_setup(struct coda_ctx *ctx)
+static void coda_encode_ctrls(struct coda_ctx *ctx)
{
- v4l2_ctrl_handler_init(&ctx->ctrls, 9);
-
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
@@ -1291,6 +1449,30 @@ static int coda_ctrls_setup(struct coda_ctx *ctx)
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0,
1920 * 1088 / 256, 1, 0);
+}
+
+static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
+{
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 5, 100, 1, 50);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_JPEG_RESTART_INTERVAL, 0, 100, 1, 0);
+}
+
+static int coda_ctrls_setup(struct coda_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrls, 2);
+
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ctx->inst_type == CODA_INST_ENCODER) {
+ if (ctx->cvd->dst_formats[0] == V4L2_PIX_FMT_JPEG)
+ coda_jpeg_encode_ctrls(ctx);
+ else
+ coda_encode_ctrls(ctx);
+ }
if (ctx->ctrls.error) {
v4l2_err(&ctx->dev->v4l2_dev,
@@ -1364,10 +1546,14 @@ static int coda_next_free_instance(struct coda_dev *dev)
return idx;
}
-static int coda_open(struct file *file, enum coda_inst_type inst_type,
- const struct coda_context_ops *ctx_ops)
+/*
+ * File operations
+ */
+
+static int coda_open(struct file *file)
{
- struct coda_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+ struct coda_dev *dev = video_get_drvdata(vdev);
struct coda_ctx *ctx = NULL;
char *name;
int ret;
@@ -1388,8 +1574,9 @@ static int coda_open(struct file *file, enum coda_inst_type inst_type,
ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root);
kfree(name);
- ctx->inst_type = inst_type;
- ctx->ops = ctx_ops;
+ ctx->cvd = to_coda_video_device(vdev);
+ ctx->inst_type = ctx->cvd->type;
+ ctx->ops = ctx->cvd->ops;
init_completion(&ctx->completion);
INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
@@ -1399,8 +1586,10 @@ static int coda_open(struct file *file, enum coda_inst_type inst_type,
ctx->dev = dev;
ctx->idx = idx;
switch (dev->devtype->product) {
- case CODA_7541:
case CODA_960:
+ ctx->frame_mem_ctrl = 1 << 12;
+ /* fallthrough */
+ case CODA_7541:
ctx->reg_idx = 0;
break;
default:
@@ -1441,16 +1630,17 @@ static int coda_open(struct file *file, enum coda_inst_type inst_type,
ctx->fh.ctrl_handler = &ctx->ctrls;
- ret = coda_alloc_context_buf(ctx, &ctx->parabuf, CODA_PARA_BUF_SIZE,
- "parabuf");
+ ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
+ CODA_PARA_BUF_SIZE, "parabuf");
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
goto err_dma_alloc;
}
ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
- ctx->bitstream.vaddr = dma_alloc_writecombine(&dev->plat_dev->dev,
- ctx->bitstream.size, &ctx->bitstream.paddr, GFP_KERNEL);
+ ctx->bitstream.vaddr = dma_alloc_writecombine(
+ &dev->plat_dev->dev, ctx->bitstream.size,
+ &ctx->bitstream.paddr, GFP_KERNEL);
if (!ctx->bitstream.vaddr) {
v4l2_err(&dev->v4l2_dev,
"failed to allocate bitstream ringbuffer");
@@ -1461,7 +1651,7 @@ static int coda_open(struct file *file, enum coda_inst_type inst_type,
ctx->bitstream.vaddr, ctx->bitstream.size);
mutex_init(&ctx->bitstream_mutex);
mutex_init(&ctx->buffer_mutex);
- INIT_LIST_HEAD(&ctx->timestamp_list);
+ INIT_LIST_HEAD(&ctx->buffer_meta_list);
coda_lock(ctx);
list_add(&ctx->list, &dev->instances);
@@ -1495,16 +1685,6 @@ err_coda_max:
return ret;
}
-static int coda_encoder_open(struct file *file)
-{
- return coda_open(file, CODA_INST_ENCODER, &coda_bit_encode_ops);
-}
-
-static int coda_decoder_open(struct file *file)
-{
- return coda_open(file, CODA_INST_DECODER, &coda_bit_decode_ops);
-}
-
static int coda_release(struct file *file)
{
struct coda_dev *dev = video_drvdata(file);
@@ -1515,6 +1695,9 @@ static int coda_release(struct file *file)
debugfs_remove_recursive(ctx->debugfs_entry);
+ if (ctx->inst_type == CODA_INST_DECODER)
+ coda_bit_stream_end_flag(ctx);
+
/* If this instance is running, call .job_abort and wait for it to end */
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
@@ -1528,8 +1711,10 @@ static int coda_release(struct file *file)
list_del(&ctx->list);
coda_unlock(ctx);
- dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
- ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ if (ctx->bitstream.vaddr) {
+ dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ }
if (ctx->dev->devtype->product == CODA_DX6)
coda_free_aux_buf(dev, &ctx->workbuf);
@@ -1548,18 +1733,9 @@ static int coda_release(struct file *file)
return 0;
}
-static const struct v4l2_file_operations coda_encoder_fops = {
- .owner = THIS_MODULE,
- .open = coda_encoder_open,
- .release = coda_release,
- .poll = v4l2_m2m_fop_poll,
- .unlocked_ioctl = video_ioctl2,
- .mmap = v4l2_m2m_fop_mmap,
-};
-
-static const struct v4l2_file_operations coda_decoder_fops = {
+static const struct v4l2_file_operations coda_fops = {
.owner = THIS_MODULE,
- .open = coda_decoder_open,
+ .open = coda_open,
.release = coda_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
@@ -1664,8 +1840,16 @@ err_clk_per:
return ret;
}
-static int coda_register_device(struct coda_dev *dev, struct video_device *vfd)
+static int coda_register_device(struct coda_dev *dev, int i)
{
+ struct video_device *vfd = &dev->vfd[i];
+
+ if (i > ARRAY_SIZE(dev->vfd))
+ return -EINVAL;
+
+ snprintf(vfd->name, sizeof(vfd->name), dev->devtype->vdevs[i]->name);
+ vfd->fops = &coda_fops;
+ vfd->ioctl_ops = &coda_ioctl_ops;
vfd->release = video_device_release_empty,
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
@@ -1684,7 +1868,7 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
{
struct coda_dev *dev = context;
struct platform_device *pdev = dev->plat_dev;
- int ret;
+ int i, ret;
if (!fw) {
v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
@@ -1725,33 +1909,25 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
goto rel_ctx;
}
- dev->vfd[0].fops = &coda_encoder_fops,
- dev->vfd[0].ioctl_ops = &coda_ioctl_ops;
- snprintf(dev->vfd[0].name, sizeof(dev->vfd[0].name), "coda-encoder");
- ret = coda_register_device(dev, &dev->vfd[0]);
- if (ret) {
- v4l2_err(&dev->v4l2_dev,
- "Failed to register encoder video device\n");
- goto rel_m2m;
- }
-
- dev->vfd[1].fops = &coda_decoder_fops,
- dev->vfd[1].ioctl_ops = &coda_ioctl_ops;
- snprintf(dev->vfd[1].name, sizeof(dev->vfd[1].name), "coda-decoder");
- ret = coda_register_device(dev, &dev->vfd[1]);
- if (ret) {
- v4l2_err(&dev->v4l2_dev,
- "Failed to register decoder video device\n");
- goto rel_m2m;
+ for (i = 0; i < dev->devtype->num_vdevs; i++) {
+ ret = coda_register_device(dev, i);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to register %s video device: %d\n",
+ dev->devtype->vdevs[i]->name, ret);
+ goto rel_vfd;
+ }
}
v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video[%d-%d]\n",
- dev->vfd[0].num, dev->vfd[1].num);
+ dev->vfd[0].num, dev->vfd[i - 1].num);
pm_runtime_put_sync(&pdev->dev);
return;
-rel_m2m:
+rel_vfd:
+ while (--i >= 0)
+ video_unregister_device(&dev->vfd[i]);
v4l2_m2m_release(dev->m2m_dev);
rel_ctx:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
@@ -1783,6 +1959,8 @@ static const struct coda_devtype coda_devdata[] = {
.product = CODA_DX6,
.codecs = codadx6_codecs,
.num_codecs = ARRAY_SIZE(codadx6_codecs),
+ .vdevs = codadx6_video_devices,
+ .num_vdevs = ARRAY_SIZE(codadx6_video_devices),
.workbuf_size = 288 * 1024 + FMO_SLICE_SAVE_BUF_SIZE * 8 * 1024,
.iram_size = 0xb000,
},
@@ -1791,6 +1969,8 @@ static const struct coda_devtype coda_devdata[] = {
.product = CODA_7541,
.codecs = coda7_codecs,
.num_codecs = ARRAY_SIZE(coda7_codecs),
+ .vdevs = coda7_video_devices,
+ .num_vdevs = ARRAY_SIZE(coda7_video_devices),
.workbuf_size = 128 * 1024,
.tempbuf_size = 304 * 1024,
.iram_size = 0x14000,
@@ -1800,6 +1980,8 @@ static const struct coda_devtype coda_devdata[] = {
.product = CODA_960,
.codecs = coda9_codecs,
.num_codecs = ARRAY_SIZE(coda9_codecs),
+ .vdevs = coda9_video_devices,
+ .num_vdevs = ARRAY_SIZE(coda9_video_devices),
.workbuf_size = 80 * 1024,
.tempbuf_size = 204 * 1024,
.iram_size = 0x21000,
@@ -1809,6 +1991,8 @@ static const struct coda_devtype coda_devdata[] = {
.product = CODA_960,
.codecs = coda9_codecs,
.num_codecs = ARRAY_SIZE(coda9_codecs),
+ .vdevs = coda9_video_devices,
+ .num_vdevs = ARRAY_SIZE(coda9_video_devices),
.workbuf_size = 80 * 1024,
.tempbuf_size = 204 * 1024,
.iram_size = 0x20000,
@@ -1846,10 +2030,18 @@ static int coda_probe(struct platform_device *pdev)
int ret, irq;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&pdev->dev, "Not enough memory for %s\n",
- CODA_NAME);
+ if (!dev)
return -ENOMEM;
+
+ pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+
+ if (of_id) {
+ dev->devtype = of_id->data;
+ } else if (pdev_id) {
+ dev->devtype = &coda_devdata[pdev_id->driver_data];
+ } else {
+ ret = -EINVAL;
+ goto err_v4l2_register;
}
spin_lock_init(&dev->irqlock);
@@ -1919,17 +2111,6 @@ static int coda_probe(struct platform_device *pdev)
mutex_init(&dev->dev_mutex);
mutex_init(&dev->coda_mutex);
- pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
-
- if (of_id) {
- dev->devtype = of_id->data;
- } else if (pdev_id) {
- dev->devtype = &coda_devdata[pdev_id->driver_data];
- } else {
- v4l2_device_unregister(&dev->v4l2_dev);
- return -EINVAL;
- }
-
dev->debugfs_root = debugfs_create_dir("coda", NULL);
if (!dev->debugfs_root)
dev_warn(&pdev->dev, "failed to create debugfs root\n");
@@ -1941,8 +2122,7 @@ static int coda_probe(struct platform_device *pdev)
dev->debugfs_root);
if (ret < 0) {
dev_err(&pdev->dev, "failed to allocate work buffer\n");
- v4l2_device_unregister(&dev->v4l2_dev);
- return ret;
+ goto err_v4l2_register;
}
}
@@ -1952,8 +2132,7 @@ static int coda_probe(struct platform_device *pdev)
dev->debugfs_root);
if (ret < 0) {
dev_err(&pdev->dev, "failed to allocate temp buffer\n");
- v4l2_device_unregister(&dev->v4l2_dev);
- return ret;
+ goto err_v4l2_register;
}
}
@@ -1973,7 +2152,8 @@ static int coda_probe(struct platform_device *pdev)
dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
if (!dev->workqueue) {
dev_err(&pdev->dev, "unable to alloc workqueue\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_v4l2_register;
}
platform_set_drvdata(pdev, dev);
@@ -1988,14 +2168,21 @@ static int coda_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
return coda_firmware_request(dev);
+
+err_v4l2_register:
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
}
static int coda_remove(struct platform_device *pdev)
{
struct coda_dev *dev = platform_get_drvdata(pdev);
+ int i;
- video_unregister_device(&dev->vfd[0]);
- video_unregister_device(&dev->vfd[1]);
+ for (i = 0; i < ARRAY_SIZE(dev->vfd); i++) {
+ if (video_get_drvdata(&dev->vfd[i]))
+ video_unregister_device(&dev->vfd[i]);
+ }
if (dev->m2m_dev)
v4l2_m2m_release(dev->m2m_dev);
pm_runtime_disable(&pdev->dev);
@@ -2038,7 +2225,6 @@ static struct platform_driver coda_driver = {
.remove = coda_remove,
.driver = {
.name = CODA_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(coda_dt_ids),
.pm = &coda_pm_ops,
},
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
new file mode 100644
index 000000000000..8fa3e353f9e2
--- /dev/null
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -0,0 +1,238 @@
+/*
+ * Coda multi-standard codec IP - JPEG support functions
+ *
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/swab.h>
+
+#include "coda.h"
+
+#define SOI_MARKER 0xffd8
+#define EOI_MARKER 0xffd9
+
+/*
+ * Typical Huffman tables for 8-bit precision luminance and
+ * chrominance from JPEG ITU-T.81 (ISO/IEC 10918-1) Annex K.3
+ */
+
+static const unsigned char luma_dc_bits[16] = {
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const unsigned char luma_dc_value[12] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char chroma_dc_bits[16] = {
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const unsigned char chroma_dc_value[12] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char luma_ac_bits[16] = {
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
+ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
+};
+
+static const unsigned char luma_ac_value[162 + 2] = {
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa, /* padded to 32-bit */
+};
+
+static const unsigned char chroma_ac_bits[16] = {
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
+ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
+};
+
+static const unsigned char chroma_ac_value[162 + 2] = {
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
+ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
+ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
+ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
+ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
+ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa, /* padded to 32-bit */
+};
+
+/*
+ * Quantization tables for luminance and chrominance components in
+ * zig-zag scan order from the Freescale i.MX VPU libaries
+ */
+
+static unsigned char luma_q[64] = {
+ 0x06, 0x04, 0x04, 0x04, 0x05, 0x04, 0x06, 0x05,
+ 0x05, 0x06, 0x09, 0x06, 0x05, 0x06, 0x09, 0x0b,
+ 0x08, 0x06, 0x06, 0x08, 0x0b, 0x0c, 0x0a, 0x0a,
+ 0x0b, 0x0a, 0x0a, 0x0c, 0x10, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x10, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+};
+
+static unsigned char chroma_q[64] = {
+ 0x07, 0x07, 0x07, 0x0d, 0x0c, 0x0d, 0x18, 0x10,
+ 0x10, 0x18, 0x14, 0x0e, 0x0e, 0x0e, 0x14, 0x14,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x14, 0x11, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x11, 0x11, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+};
+
+struct coda_memcpy_desc {
+ int offset;
+ const void *src;
+ size_t len;
+};
+
+static void coda_memcpy_parabuf(void *parabuf,
+ const struct coda_memcpy_desc *desc)
+{
+ u32 *dst = parabuf + desc->offset;
+ const u32 *src = desc->src;
+ int len = desc->len / 4;
+ int i;
+
+ for (i = 0; i < len; i += 2) {
+ dst[i + 1] = swab32(src[i]);
+ dst[i] = swab32(src[i + 1]);
+ }
+}
+
+int coda_jpeg_write_tables(struct coda_ctx *ctx)
+{
+ int i;
+ static const struct coda_memcpy_desc huff[8] = {
+ { 0, luma_dc_bits, sizeof(luma_dc_bits) },
+ { 16, luma_dc_value, sizeof(luma_dc_value) },
+ { 32, luma_ac_bits, sizeof(luma_ac_bits) },
+ { 48, luma_ac_value, sizeof(luma_ac_value) },
+ { 216, chroma_dc_bits, sizeof(chroma_dc_bits) },
+ { 232, chroma_dc_value, sizeof(chroma_dc_value) },
+ { 248, chroma_ac_bits, sizeof(chroma_ac_bits) },
+ { 264, chroma_ac_value, sizeof(chroma_ac_value) },
+ };
+ struct coda_memcpy_desc qmat[3] = {
+ { 512, ctx->params.jpeg_qmat_tab[0], 64 },
+ { 576, ctx->params.jpeg_qmat_tab[1], 64 },
+ { 640, ctx->params.jpeg_qmat_tab[1], 64 },
+ };
+
+ /* Write huffman tables to parameter memory */
+ for (i = 0; i < ARRAY_SIZE(huff); i++)
+ coda_memcpy_parabuf(ctx->parabuf.vaddr, huff + i);
+
+ /* Write Q-matrix to parameter memory */
+ for (i = 0; i < ARRAY_SIZE(qmat); i++)
+ coda_memcpy_parabuf(ctx->parabuf.vaddr, qmat + i);
+
+ return 0;
+}
+
+bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
+{
+ void *vaddr = vb2_plane_vaddr(vb, 0);
+ u16 soi = be16_to_cpup((__be16 *)vaddr);
+ u16 eoi = be16_to_cpup((__be16 *)(vaddr +
+ vb2_get_plane_payload(vb, 0) - 2));
+
+ return soi == SOI_MARKER && eoi == EOI_MARKER;
+}
+
+/*
+ * Scale quantization table using nonlinear scaling factor
+ * u8 qtab[64], scale [50,190]
+ */
+static void coda_scale_quant_table(u8 *q_tab, int scale)
+{
+ unsigned int temp;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ temp = DIV_ROUND_CLOSEST((unsigned int)q_tab[i] * scale, 100);
+ if (temp <= 0)
+ temp = 1;
+ if (temp > 255)
+ temp = 255;
+ q_tab[i] = (unsigned char)temp;
+ }
+}
+
+void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality)
+{
+ unsigned int scale;
+
+ ctx->params.jpeg_quality = quality;
+
+ /* Clip quality setting to [5,100] interval */
+ if (quality > 100)
+ quality = 100;
+ if (quality < 5)
+ quality = 5;
+
+ /*
+ * Non-linear scaling factor:
+ * [5,50] -> [1000..100], [51,100] -> [98..0]
+ */
+ if (quality < 50)
+ scale = 5000 / quality;
+ else
+ scale = 200 - 2 * quality;
+
+ if (ctx->params.jpeg_qmat_tab[0]) {
+ memcpy(ctx->params.jpeg_qmat_tab[0], luma_q, 64);
+ coda_scale_quant_table(ctx->params.jpeg_qmat_tab[0], scale);
+ }
+ if (ctx->params.jpeg_qmat_tab[1]) {
+ memcpy(ctx->params.jpeg_qmat_tab[1], chroma_q, 64);
+ coda_scale_quant_table(ctx->params.jpeg_qmat_tab[1], scale);
+ }
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index bbc18c0dacd9..5dd47e5f97c1 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -45,11 +45,15 @@ enum coda_product {
CODA_960 = 0xf020,
};
+struct coda_video_device;
+
struct coda_devtype {
char *firmware;
enum coda_product product;
const struct coda_codec *codecs;
unsigned int num_codecs;
+ const struct coda_video_device **vdevs;
+ unsigned int num_vdevs;
size_t workbuf_size;
size_t tempbuf_size;
size_t iram_size;
@@ -65,7 +69,7 @@ struct coda_aux_buf {
struct coda_dev {
struct v4l2_device v4l2_dev;
- struct video_device vfd[2];
+ struct video_device vfd[5];
struct platform_device *plat_dev;
const struct coda_devtype *devtype;
@@ -114,6 +118,9 @@ struct coda_params {
u8 mpeg4_inter_qp;
u8 gop_size;
int intra_refresh;
+ u8 jpeg_quality;
+ u8 jpeg_restart_interval;
+ u8 *jpeg_qmat_tab[3];
int codec_mode;
int codec_mode_aux;
enum v4l2_mpeg_video_multi_slice_mode slice_mode;
@@ -123,11 +130,13 @@ struct coda_params {
u32 slice_max_mb;
};
-struct coda_timestamp {
+struct coda_buffer_meta {
struct list_head list;
u32 sequence;
struct v4l2_timecode timecode;
struct timeval timestamp;
+ u32 start;
+ u32 end;
};
/* Per-queue, driver-specific private data */
@@ -183,6 +192,7 @@ struct coda_ctx {
struct work_struct pic_run_work;
struct work_struct seq_end_work;
struct completion completion;
+ const struct coda_video_device *cvd;
const struct coda_context_ops *ops;
int aborting;
int initialized;
@@ -212,9 +222,9 @@ struct coda_ctx {
struct coda_aux_buf slicebuf;
struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS];
u32 frame_types[CODA_MAX_FRAMEBUFFERS];
- struct coda_timestamp frame_timestamps[CODA_MAX_FRAMEBUFFERS];
+ struct coda_buffer_meta frame_metas[CODA_MAX_FRAMEBUFFERS];
u32 frame_errors[CODA_MAX_FRAMEBUFFERS];
- struct list_head timestamp_list;
+ struct list_head buffer_meta_list;
struct coda_aux_buf workbuf;
int num_internal_frames;
int idx;
@@ -232,6 +242,8 @@ extern int coda_debug;
void coda_write(struct coda_dev *dev, u32 data, u32 reg);
unsigned int coda_read(struct coda_dev *dev, u32 reg);
+void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
+ struct vb2_buffer *buf, unsigned int reg_y);
int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
size_t size, const char *name, struct dentry *parent);
@@ -281,6 +293,10 @@ void coda_bit_stream_end_flag(struct coda_ctx *ctx);
int coda_h264_padding(int size, char *p);
+bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
+int coda_jpeg_write_tables(struct coda_ctx *ctx);
+void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
+
extern const struct coda_context_ops coda_bit_encode_ops;
extern const struct coda_context_ops coda_bit_decode_ops;
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index c791275e307b..8e015b8aa8fa 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -147,6 +147,7 @@
#define CODA_CMD_DEC_SEQ_BB_START 0x180
#define CODA_CMD_DEC_SEQ_BB_SIZE 0x184
#define CODA_CMD_DEC_SEQ_OPTION 0x188
+#define CODA_NO_INT_ENABLE (1 << 10)
#define CODA_REORDER_ENABLE (1 << 1)
#define CODADX6_QP_REPORT (1 << 0)
#define CODA7_MP4_DEBLK_ENABLE (1 << 0)
@@ -332,6 +333,12 @@
#define CODA9_CMD_ENC_SEQ_ME_OPTION 0x1d8
#define CODA_RET_ENC_SEQ_SUCCESS 0x1c0
+#define CODA_CMD_ENC_SEQ_JPG_PARA 0x198
+#define CODA_CMD_ENC_SEQ_JPG_RST_INTERVAL 0x19C
+#define CODA_CMD_ENC_SEQ_JPG_THUMB_EN 0x1a0
+#define CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE 0x1a4
+#define CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET 0x1a8
+
/* Encoder Picture Run */
#define CODA9_CMD_ENC_PIC_SRC_INDEX 0x180
#define CODA9_CMD_ENC_PIC_SRC_STRIDE 0x184
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 3f44deb5b7a7..c90b9a4f0c24 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -1030,7 +1030,6 @@ static int dm355_ccdc_remove(struct platform_device *pdev)
static struct platform_driver dm355_ccdc_driver = {
.driver = {
.name = "dm355_ccdc",
- .owner = THIS_MODULE,
},
.remove = dm355_ccdc_remove,
.probe = dm355_ccdc_probe,
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 62a0ebb01056..ffbefdff6b5e 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -1029,7 +1029,6 @@ static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
static struct platform_driver dm644x_ccdc_driver = {
.driver = {
.name = "dm644x_ccdc",
- .owner = THIS_MODULE,
.pm = &dm644x_ccdc_pm_ops,
},
.remove = dm644x_ccdc_remove,
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 3332cca632e5..99faea2e84c6 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1134,7 +1134,6 @@ static int isif_remove(struct platform_device *pdev)
static struct platform_driver isif_driver = {
.driver = {
.name = "isif",
- .owner = THIS_MODULE,
},
.remove = isif_remove,
.probe = isif_probe,
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 33b9660b7f77..9a6c2cc38acb 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -227,7 +227,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
vpbe_current_encoder_info(vpbe_dev);
struct vpbe_config *cfg = vpbe_dev->cfg;
struct venc_platform_data *venc_device = vpbe_dev->venc_device;
- enum v4l2_mbus_pixelcode if_params;
+ u32 if_params;
int enc_out_index;
int sd_index;
int ret = 0;
@@ -341,7 +341,7 @@ static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
if (!(cfg->outputs[out_index].output.capabilities &
V4L2_OUT_CAP_DV_TIMINGS))
- return -EINVAL;
+ return -ENODATA;
for (i = 0; i < output->num_modes; i++) {
if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS &&
@@ -384,6 +384,13 @@ static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
static int vpbe_g_dv_timings(struct vpbe_device *vpbe_dev,
struct v4l2_dv_timings *dv_timings)
{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_DV_TIMINGS))
+ return -ENODATA;
+
if (vpbe_dev->current_timings.timings_type &
VPBE_ENC_DV_TIMINGS) {
*dv_timings = vpbe_dev->current_timings.dv_timings;
@@ -409,7 +416,7 @@ static int vpbe_enum_dv_timings(struct vpbe_device *vpbe_dev,
int i;
if (!(output->output.capabilities & V4L2_OUT_CAP_DV_TIMINGS))
- return -EINVAL;
+ return -ENODATA;
for (i = 0; i < output->num_modes; i++) {
if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS) {
@@ -440,7 +447,7 @@ static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id std_id)
if (!(cfg->outputs[out_index].output.capabilities &
V4L2_OUT_CAP_STD))
- return -EINVAL;
+ return -ENODATA;
ret = vpbe_get_std_info(vpbe_dev, std_id);
if (ret)
@@ -473,6 +480,11 @@ static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id std_id)
static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
{
struct vpbe_enc_mode_info *cur_timings = &vpbe_dev->current_timings;
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+
+ if (!(cfg->outputs[out_index].output.capabilities & V4L2_OUT_CAP_STD))
+ return -ENODATA;
if (cur_timings->timings_type & VPBE_ENC_STD) {
*std_id = cur_timings->std_id;
@@ -613,6 +625,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
}
if (clk_prepare_enable(vpbe_dev->dac_clk)) {
ret = -ENODEV;
+ clk_put(vpbe_dev->dac_clk);
goto fail_mutex_unlock;
}
}
@@ -863,7 +876,6 @@ static int vpbe_remove(struct platform_device *device)
static struct platform_driver vpbe_driver = {
.driver = {
.name = "vpbe_controller",
- .owner = THIS_MODULE,
},
.probe = vpbe_probe,
.remove = vpbe_remove,
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 73496d953ba0..c4ab46f5bd92 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -152,8 +152,8 @@ static irqreturn_t venc_isr(int irq, void *arg)
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
layer = disp_dev->dev[i];
- /* If streaming is started in this layer */
- if (!layer->started)
+
+ if (!vb2_start_streaming_called(&layer->buffer_queue))
continue;
if (layer->layer_first_int) {
@@ -207,31 +207,23 @@ static irqreturn_t venc_isr(int irq, void *arg)
*/
static int vpbe_buffer_prepare(struct vb2_buffer *vb)
{
- struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_queue *q = vb->vb2_queue;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = vb2_get_drv_priv(q);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
unsigned long addr;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"vpbe_buffer_prepare\n");
- if (vb->state != VB2_BUF_STATE_ACTIVE &&
- vb->state != VB2_BUF_STATE_PREPARED) {
- vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
- if (vb2_plane_vaddr(vb, 0) &&
- vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
- return -EINVAL;
+ vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
- addr = vb2_dma_contig_plane_dma_addr(vb, 0);
- if (q->streaming) {
- if (!IS_ALIGNED(addr, 8)) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "buffer_prepare:offset is \
- not aligned to 32 bytes\n");
- return -EINVAL;
- }
- }
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (!IS_ALIGNED(addr, 8)) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "buffer_prepare:offset is not aligned to 32 bytes\n");
+ return -EINVAL;
}
return 0;
}
@@ -247,18 +239,20 @@ vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = vb2_get_drv_priv(vq);
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = vb2_get_drv_priv(vq);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
+ if (fmt && fmt->fmt.pix.sizeimage < layer->pix_fmt.sizeimage)
+ return -EINVAL;
+
/* Store number of buffers allocated in numbuffer member */
- if (*nbuffers < VPBE_DEFAULT_NUM_BUFS)
- *nbuffers = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+ if (vq->num_buffers + *nbuffers < VPBE_DEFAULT_NUM_BUFS)
+ *nbuffers = VPBE_DEFAULT_NUM_BUFS - vq->num_buffers;
*nplanes = 1;
- sizes[0] = layer->pix_fmt.sizeimage;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : layer->pix_fmt.sizeimage;
alloc_ctxs[0] = layer->alloc_ctx;
return 0;
@@ -271,12 +265,11 @@ vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
static void vpbe_buffer_queue(struct vb2_buffer *vb)
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
struct vpbe_disp_buffer *buf = container_of(vb,
struct vpbe_disp_buffer, vb);
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_display *disp = fh->disp_dev;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpbe_display *disp = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
unsigned long flags;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
@@ -288,61 +281,14 @@ static void vpbe_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
}
-/*
- * vpbe_buf_cleanup()
- * This function is called from the vb2 layer to free memory allocated to
- * the buffers
- */
-static void vpbe_buf_cleanup(struct vb2_buffer *vb)
-{
- /* Get the file handle object and layer object */
- struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- struct vpbe_disp_buffer *buf = container_of(vb,
- struct vpbe_disp_buffer, vb);
- unsigned long flags;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "vpbe_buf_cleanup\n");
-
- spin_lock_irqsave(&layer->irqlock, flags);
- if (vb->state == VB2_BUF_STATE_ACTIVE)
- list_del_init(&buf->list);
- spin_unlock_irqrestore(&layer->irqlock, flags);
-}
-
-static void vpbe_wait_prepare(struct vb2_queue *vq)
-{
- struct vpbe_fh *fh = vb2_get_drv_priv(vq);
- struct vpbe_layer *layer = fh->layer;
-
- mutex_unlock(&layer->opslock);
-}
-
-static void vpbe_wait_finish(struct vb2_queue *vq)
-{
- struct vpbe_fh *fh = vb2_get_drv_priv(vq);
- struct vpbe_layer *layer = fh->layer;
-
- mutex_lock(&layer->opslock);
-}
-
-static int vpbe_buffer_init(struct vb2_buffer *vb)
-{
- struct vpbe_disp_buffer *buf = container_of(vb,
- struct vpbe_disp_buffer, vb);
-
- INIT_LIST_HEAD(&buf->list);
- return 0;
-}
-
static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct vpbe_fh *fh = vb2_get_drv_priv(vq);
- struct vpbe_layer *layer = fh->layer;
+ struct vpbe_layer *layer = vb2_get_drv_priv(vq);
+ struct osd_state *osd_device = layer->disp_dev->osd_device;
int ret;
+ osd_device->ops.disable_layer(osd_device, layer->layer_info.id);
+
/* Get the next frame from the buffer queue */
layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
struct vpbe_disp_buffer, list);
@@ -354,7 +300,7 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
layer->field_id = 0;
/* Set parameters in OSD and VENC */
- ret = vpbe_set_osd_display_params(fh->disp_dev, layer);
+ ret = vpbe_set_osd_display_params(layer->disp_dev, layer);
if (ret < 0) {
struct vpbe_disp_buffer *buf, *tmp;
@@ -371,7 +317,6 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
* if request format is yuv420 semiplanar, need to
* enable both video windows
*/
- layer->started = 1;
layer->layer_first_int = 1;
return ret;
@@ -379,14 +324,16 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
static void vpbe_stop_streaming(struct vb2_queue *vq)
{
- struct vpbe_fh *fh = vb2_get_drv_priv(vq);
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_display *disp = fh->disp_dev;
+ struct vpbe_layer *layer = vb2_get_drv_priv(vq);
+ struct osd_state *osd_device = layer->disp_dev->osd_device;
+ struct vpbe_display *disp = layer->disp_dev;
unsigned long flags;
if (!vb2_is_streaming(vq))
return;
+ osd_device->ops.disable_layer(osd_device, layer->layer_info.id);
+
/* release all active buffers */
spin_lock_irqsave(&disp->dma_queue_lock, flags);
if (layer->cur_frm == layer->next_frm) {
@@ -411,13 +358,11 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
static struct vb2_ops video_qops = {
.queue_setup = vpbe_buffer_queue_setup,
- .wait_prepare = vpbe_wait_prepare,
- .wait_finish = vpbe_wait_finish,
- .buf_init = vpbe_buffer_init,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.buf_prepare = vpbe_buffer_prepare,
.start_streaming = vpbe_start_streaming,
.stop_streaming = vpbe_stop_streaming,
- .buf_cleanup = vpbe_buf_cleanup,
.buf_queue = vpbe_buffer_queue,
};
@@ -691,10 +636,9 @@ static int vpbe_try_format(struct vpbe_display *disp_dev,
static int vpbe_display_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- cap->version = VPBE_DISPLAY_VERSION_CODE;
cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
snprintf(cap->driver, sizeof(cap->driver), "%s",
@@ -709,9 +653,8 @@ static int vpbe_display_querycap(struct file *file, void *priv,
static int vpbe_display_s_crop(struct file *file, void *priv,
const struct v4l2_crop *crop)
{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
struct osd_layer_config *cfg = &layer->layer_info.config;
struct osd_state *osd_device = disp_dev->osd_device;
@@ -778,11 +721,10 @@ static int vpbe_display_s_crop(struct file *file, void *priv,
static int vpbe_display_g_crop(struct file *file, void *priv,
struct v4l2_crop *crop)
{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
+ struct vpbe_layer *layer = video_drvdata(file);
struct osd_layer_config *cfg = &layer->layer_info.config;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- struct osd_state *osd_device = fh->disp_dev->osd_device;
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = layer->disp_dev->osd_device;
struct v4l2_rect *rect = &crop->c;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
@@ -806,8 +748,8 @@ static int vpbe_display_g_crop(struct file *file, void *priv,
static int vpbe_display_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *cropcap)
{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
@@ -824,9 +766,8 @@ static int vpbe_display_cropcap(struct file *file, void *priv,
static int vpbe_display_g_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"VIDIOC_G_FMT, layer id = %d\n",
@@ -846,9 +787,8 @@ static int vpbe_display_g_fmt(struct file *file, void *priv,
static int vpbe_display_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
unsigned int index = 0;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
@@ -878,9 +818,8 @@ static int vpbe_display_enum_fmt(struct file *file, void *priv,
static int vpbe_display_s_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
struct osd_layer_config *cfg = &layer->layer_info.config;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -891,11 +830,9 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
"VIDIOC_S_FMT, layer id = %d\n",
layer->device_id);
- /* If streaming is started, return error */
- if (layer->started) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- }
+
if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
return -EINVAL;
@@ -967,9 +904,9 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
static int vpbe_display_try_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_display *disp_dev = fh->disp_dev;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
@@ -993,18 +930,15 @@ static int vpbe_display_try_fmt(struct file *file, void *priv,
static int vpbe_display_s_std(struct file *file, void *priv,
v4l2_std_id std_id)
{
- struct vpbe_fh *fh = priv;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
- /* If streaming is started, return error */
- if (layer->started) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- }
+
if (NULL != vpbe_dev->ops.s_std) {
ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
if (ret) {
@@ -1028,8 +962,8 @@ static int vpbe_display_s_std(struct file *file, void *priv,
static int vpbe_display_g_std(struct file *file, void *priv,
v4l2_std_id *std_id)
{
- struct vpbe_fh *fh = priv;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n");
@@ -1051,8 +985,8 @@ static int vpbe_display_g_std(struct file *file, void *priv,
static int vpbe_display_enum_output(struct file *file, void *priv,
struct v4l2_output *output)
{
- struct vpbe_fh *fh = priv;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
@@ -1079,17 +1013,15 @@ static int vpbe_display_enum_output(struct file *file, void *priv,
static int vpbe_display_s_output(struct file *file, void *priv,
unsigned int i)
{
- struct vpbe_fh *fh = priv;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n");
- /* If streaming is started, return error */
- if (layer->started) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+
+ if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- }
+
if (NULL == vpbe_dev->ops.set_output)
return -EINVAL;
@@ -1110,8 +1042,8 @@ static int vpbe_display_s_output(struct file *file, void *priv,
static int vpbe_display_g_output(struct file *file, void *priv,
unsigned int *i)
{
- struct vpbe_fh *fh = priv;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
/* Get the standard from the current encoder */
@@ -1130,8 +1062,8 @@ static int
vpbe_display_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
- struct vpbe_fh *fh = priv;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
@@ -1160,19 +1092,14 @@ static int
vpbe_display_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
- struct vpbe_fh *fh = priv;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_TIMINGS\n");
-
- /* If streaming is started, return error */
- if (layer->started) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- }
/* Set the given standard in the encoder */
if (!vpbe_dev->ops.s_dv_timings)
@@ -1198,8 +1125,8 @@ static int
vpbe_display_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *dv_timings)
{
- struct vpbe_fh *fh = priv;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_TIMINGS\n");
@@ -1215,259 +1142,6 @@ vpbe_display_g_dv_timings(struct file *file, void *priv,
return 0;
}
-static int vpbe_display_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- struct osd_state *osd_device = fh->disp_dev->osd_device;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_STREAMOFF,layer id = %d\n",
- layer->device_id);
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
-
- /* If io is allowed for this file handle, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
- return -EACCES;
- }
-
- /* If streaming is not started, return error */
- if (!layer->started) {
- v4l2_err(&vpbe_dev->v4l2_dev, "streaming not started in layer"
- " id = %d\n", layer->device_id);
- return -EINVAL;
- }
-
- osd_device->ops.disable_layer(osd_device,
- layer->layer_info.id);
- layer->started = 0;
- ret = vb2_streamoff(&layer->buffer_queue, buf_type);
-
- return ret;
-}
-
-static int vpbe_display_streamon(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_display *disp_dev = fh->disp_dev;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- struct osd_state *osd_device = disp_dev->osd_device;
- int ret;
-
- osd_device->ops.disable_layer(osd_device,
- layer->layer_info.id);
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_STREAMON, layerid=%d\n",
- layer->device_id);
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
-
- /* If file handle is not allowed IO, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
- return -EACCES;
- }
- /* If Streaming is already started, return error */
- if (layer->started) {
- v4l2_err(&vpbe_dev->v4l2_dev, "layer is already streaming\n");
- return -EBUSY;
- }
-
- /*
- * Call vb2_streamon to start streaming
- * in videobuf
- */
- ret = vb2_streamon(&layer->buffer_queue, buf_type);
- if (ret) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "error in vb2_streamon\n");
- return ret;
- }
- return ret;
-}
-
-static int vpbe_display_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_DQBUF, layer id = %d\n",
- layer->device_id);
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
- /* If this file handle is not allowed to do IO, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
- return -EACCES;
- }
- if (file->f_flags & O_NONBLOCK)
- /* Call videobuf_dqbuf for non blocking mode */
- ret = vb2_dqbuf(&layer->buffer_queue, buf, 1);
- else
- /* Call videobuf_dqbuf for blocking mode */
- ret = vb2_dqbuf(&layer->buffer_queue, buf, 0);
-
- return ret;
-}
-
-static int vpbe_display_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_QBUF, layer id = %d\n",
- layer->device_id);
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
-
- /* If this file handle is not allowed to do IO, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
- return -EACCES;
- }
-
- return vb2_qbuf(&layer->buffer_queue, p);
-}
-
-static int vpbe_display_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_QUERYBUF, layer id = %d\n",
- layer->device_id);
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
- /* Call vb2_querybuf to get information */
- return vb2_querybuf(&layer->buffer_queue, buf);
-}
-
-static int vpbe_display_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *req_buf)
-{
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- struct vb2_queue *q;
- int ret;
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
-
- /* If io users of the layer is not zero, return error */
- if (0 != layer->io_usrs) {
- v4l2_err(&vpbe_dev->v4l2_dev, "not IO user\n");
- return -EBUSY;
- }
- /* Initialize videobuf queue as per the buffer type */
- layer->alloc_ctx = vb2_dma_contig_init_ctx(vpbe_dev->pdev);
- if (IS_ERR(layer->alloc_ctx)) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Failed to get the context\n");
- return PTR_ERR(layer->alloc_ctx);
- }
- q = &layer->buffer_queue;
- memset(q, 0, sizeof(*q));
- q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_USERPTR;
- q->drv_priv = fh;
- q->ops = &video_qops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 1;
-
- ret = vb2_queue_init(q);
- if (ret) {
- v4l2_err(&vpbe_dev->v4l2_dev, "vb2_queue_init() failed\n");
- vb2_dma_contig_cleanup_ctx(layer->alloc_ctx);
- return ret;
- }
- /* Set io allowed member of file handle to TRUE */
- fh->io_allowed = 1;
- /* Increment io usrs member of layer object to 1 */
- layer->io_usrs = 1;
- /* Store type of memory requested in layer object */
- layer->memory = req_buf->memory;
- /* Initialize buffer queue */
- INIT_LIST_HEAD(&layer->dma_queue);
- /* Allocate buffers */
- return vb2_reqbufs(q, req_buf);
-}
-
-/*
- * vpbe_display_mmap()
- * It is used to map kernel space buffers into user spaces
- */
-static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
-{
- /* Get the layer object and file handle object */
- struct vpbe_fh *fh = filep->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_mmap\n");
-
- if (mutex_lock_interruptible(&layer->opslock))
- return -ERESTARTSYS;
- ret = vb2_mmap(&layer->buffer_queue, vma);
- mutex_unlock(&layer->opslock);
- return ret;
-}
-
-/* vpbe_display_poll(): It is used for select/poll system call
- */
-static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
-{
- struct vpbe_fh *fh = filep->private_data;
- struct vpbe_layer *layer = fh->layer;
- struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- unsigned int err = 0;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
- if (layer->started) {
- mutex_lock(&layer->opslock);
- err = vb2_poll(&layer->buffer_queue, filep, wait);
- mutex_unlock(&layer->opslock);
- }
- return err;
-}
-
/*
* vpbe_display_open()
* It creates object of file handle structure and stores it in private_data
@@ -1475,30 +1149,22 @@ static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
*/
static int vpbe_display_open(struct file *file)
{
- struct vpbe_fh *fh = NULL;
struct vpbe_layer *layer = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
struct vpbe_display *disp_dev = layer->disp_dev;
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
struct osd_state *osd_device = disp_dev->osd_device;
int err;
- /* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(struct vpbe_fh), GFP_KERNEL);
- if (fh == NULL) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "unable to allocate memory for file handle object\n");
- return -ENOMEM;
+ /* creating context for file descriptor */
+ err = v4l2_fh_open(file);
+ if (err) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "v4l2_fh_open failed\n");
+ return err;
}
- v4l2_fh_init(&fh->fh, vdev);
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "vpbe display open plane = %d\n",
- layer->device_id);
- /* store pointer to fh in private_data member of filep */
- file->private_data = fh;
- fh->layer = layer;
- fh->disp_dev = disp_dev;
+ /* leaving if layer is already initialized */
+ if (!v4l2_fh_is_singular_file(file))
+ return err;
if (!layer->usrs) {
if (mutex_lock_interruptible(&layer->opslock))
@@ -1511,15 +1177,12 @@ static int vpbe_display_open(struct file *file)
/* Couldn't get layer */
v4l2_err(&vpbe_dev->v4l2_dev,
"Display Manager failed to allocate layer\n");
- kfree(fh);
+ v4l2_fh_release(file);
return -EINVAL;
}
}
/* Increment layer usrs counter */
layer->usrs++;
- /* Set io_allowed member to false */
- fh->io_allowed = 0;
- v4l2_fh_add(&fh->fh);
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"vpbe display device opened successfully\n");
return 0;
@@ -1532,30 +1195,18 @@ static int vpbe_display_open(struct file *file)
*/
static int vpbe_display_release(struct file *file)
{
- /* Get the layer object and file handle object */
- struct vpbe_fh *fh = file->private_data;
- struct vpbe_layer *layer = fh->layer;
+ struct vpbe_layer *layer = video_drvdata(file);
struct osd_layer_config *cfg = &layer->layer_info.config;
- struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_display *disp_dev = layer->disp_dev;
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
struct osd_state *osd_device = disp_dev->osd_device;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
mutex_lock(&layer->opslock);
- /* if this instance is doing IO */
- if (fh->io_allowed) {
- /* Reset io_usrs member of layer object */
- layer->io_usrs = 0;
-
- osd_device->ops.disable_layer(osd_device,
- layer->layer_info.id);
- layer->started = 0;
- /* Free buffers allocated */
- vb2_queue_release(&layer->buffer_queue);
- vb2_dma_contig_cleanup_ctx(&layer->buffer_queue);
- }
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
/* Decrement layer usrs counter */
layer->usrs--;
/* If this file handle has initialize encoder device, reset it */
@@ -1575,14 +1226,9 @@ static int vpbe_display_release(struct file *file)
layer->layer_info.id);
}
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- file->private_data = NULL;
+ _vb2_fop_release(file, NULL);
mutex_unlock(&layer->opslock);
- /* Free memory allocated to file handle object */
- kfree(fh);
-
disp_dev->cbcr_ofst = 0;
return 0;
@@ -1595,20 +1241,27 @@ static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
.vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
.vidioc_s_fmt_vid_out = vpbe_display_s_fmt,
.vidioc_try_fmt_vid_out = vpbe_display_try_fmt,
- .vidioc_reqbufs = vpbe_display_reqbufs,
- .vidioc_querybuf = vpbe_display_querybuf,
- .vidioc_qbuf = vpbe_display_qbuf,
- .vidioc_dqbuf = vpbe_display_dqbuf,
- .vidioc_streamon = vpbe_display_streamon,
- .vidioc_streamoff = vpbe_display_streamoff,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+
.vidioc_cropcap = vpbe_display_cropcap,
.vidioc_g_crop = vpbe_display_g_crop,
.vidioc_s_crop = vpbe_display_s_crop,
+
.vidioc_s_std = vpbe_display_s_std,
.vidioc_g_std = vpbe_display_g_std,
+
.vidioc_enum_output = vpbe_display_enum_output,
.vidioc_s_output = vpbe_display_s_output,
.vidioc_g_output = vpbe_display_g_output,
+
.vidioc_s_dv_timings = vpbe_display_s_dv_timings,
.vidioc_g_dv_timings = vpbe_display_g_dv_timings,
.vidioc_enum_dv_timings = vpbe_display_enum_dv_timings,
@@ -1619,8 +1272,8 @@ static struct v4l2_file_operations vpbe_fops = {
.open = vpbe_display_open,
.release = vpbe_display_release,
.unlocked_ioctl = video_ioctl2,
- .mmap = vpbe_display_mmap,
- .poll = vpbe_display_poll
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
};
static int vpbe_device_get(struct device *dev, void *data)
@@ -1700,6 +1353,7 @@ static int register_device(struct vpbe_layer *vpbe_display_layer,
(int)vpbe_display_layer,
(int)&vpbe_display_layer->video_dev);
+ vpbe_display_layer->video_dev.queue = &vpbe_display_layer->buffer_queue;
err = video_register_device(&vpbe_display_layer->video_dev,
VFL_TYPE_GRABBER,
-1);
@@ -1724,9 +1378,10 @@ static int register_device(struct vpbe_layer *vpbe_display_layer,
*/
static int vpbe_display_probe(struct platform_device *pdev)
{
- struct vpbe_layer *vpbe_display_layer;
struct vpbe_display *disp_dev;
+ struct v4l2_device *v4l2_dev;
struct resource *res = NULL;
+ struct vb2_queue *q;
int k;
int i;
int err;
@@ -1748,13 +1403,14 @@ static int vpbe_display_probe(struct platform_device *pdev)
vpbe_device_get);
if (err < 0)
return err;
+
+ v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
/* Initialize the vpbe display controller */
if (NULL != disp_dev->vpbe_dev->ops.initialize) {
err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
disp_dev->vpbe_dev);
if (err) {
- v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
- "Error initing vpbe\n");
+ v4l2_err(v4l2_dev, "Error initing vpbe\n");
err = -ENOMEM;
goto probe_out;
}
@@ -1769,8 +1425,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
- v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
- "Unable to get VENC interrupt resource\n");
+ v4l2_err(v4l2_dev, "Unable to get VENC interrupt resource\n");
err = -ENODEV;
goto probe_out;
}
@@ -1779,30 +1434,57 @@ static int vpbe_display_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, irq, venc_isr, 0,
VPBE_DISPLAY_DRIVER, disp_dev);
if (err) {
- v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
- "Unable to request interrupt\n");
+ v4l2_err(v4l2_dev, "VPBE IRQ request failed\n");
goto probe_out;
}
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ /* initialize vb2 queue */
+ q = &disp_dev->dev[i]->buffer_queue;
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = disp_dev->dev[i];
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &disp_dev->dev[i]->opslock;
+ err = vb2_queue_init(q);
+ if (err) {
+ v4l2_err(v4l2_dev, "vb2_queue_init() failed\n");
+ goto probe_out;
+ }
+
+ disp_dev->dev[i]->alloc_ctx =
+ vb2_dma_contig_init_ctx(disp_dev->vpbe_dev->pdev);
+ if (IS_ERR(disp_dev->dev[i]->alloc_ctx)) {
+ v4l2_err(v4l2_dev, "Failed to get the context\n");
+ err = PTR_ERR(disp_dev->dev[i]->alloc_ctx);
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&disp_dev->dev[i]->dma_queue);
+
if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
err = -ENODEV;
goto probe_out;
}
}
- printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
+ v4l2_dbg(1, debug, v4l2_dev,
+ "Successfully completed the probing of vpbe v4l2 device\n");
+
return 0;
probe_out:
for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
- /* Get the pointer to the layer object */
- vpbe_display_layer = disp_dev->dev[k];
/* Unregister video device */
- if (vpbe_display_layer) {
- video_unregister_device(
- &vpbe_display_layer->video_dev);
- kfree(disp_dev->dev[k]);
+ if (disp_dev->dev[k] != NULL) {
+ vb2_dma_contig_cleanup_ctx(disp_dev->dev[k]->alloc_ctx);
+ video_unregister_device(&disp_dev->dev[k]->video_dev);
+ kfree(disp_dev->dev[k]);
}
}
return err;
@@ -1828,6 +1510,7 @@ static int vpbe_display_remove(struct platform_device *pdev)
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
/* Get the pointer to the layer object */
vpbe_display_layer = disp_dev->dev[i];
+ vb2_dma_contig_cleanup_ctx(vpbe_display_layer->alloc_ctx);
/* Unregister video device */
video_unregister_device(&vpbe_display_layer->video_dev);
@@ -1843,7 +1526,6 @@ static int vpbe_display_remove(struct platform_device *pdev)
static struct platform_driver vpbe_display_driver = {
.driver = {
.name = VPBE_DISPLAY_DRIVER,
- .owner = THIS_MODULE,
.bus = &platform_bus_type,
},
.probe = vpbe_display_probe,
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index d053c2669c1f..7d96a4b13b32 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -1585,7 +1585,6 @@ static struct platform_driver osd_driver = {
.remove = osd_remove,
.driver = {
.name = MODULE_NAME,
- .owner = THIS_MODULE,
},
.id_table = vpbe_osd_devtype
};
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index 14a023a75d2d..36ed1466b290 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -687,7 +687,6 @@ static struct platform_driver venc_driver = {
.remove = venc_remove,
.driver = {
.name = MODULE_NAME,
- .owner = THIS_MODULE,
},
.id_table = vpbe_venc_devtype
};
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index de55f47a77db..b41bf7e822c8 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -414,13 +414,13 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
/* assume V4L2_PIX_FMT_UYVY as default */
pix->pixelformat = V4L2_PIX_FMT_UYVY;
v4l2_fill_mbus_format(&mbus_fmt, pix,
- V4L2_MBUS_FMT_YUYV10_2X10);
+ MEDIA_BUS_FMT_YUYV10_2X10);
} else {
pix->field = V4L2_FIELD_NONE;
/* assume V4L2_PIX_FMT_SBGGR8 */
pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
v4l2_fill_mbus_format(&mbus_fmt, pix,
- V4L2_MBUS_FMT_SBGGR8_1X8);
+ MEDIA_BUS_FMT_SBGGR8_1X8);
}
/* if sub device supports g_mbus_fmt, override the defaults */
@@ -930,8 +930,8 @@ static int vpfe_querycap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
- cap->version = VPFE_CAPTURE_VERSION_CODE;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
@@ -2031,7 +2031,6 @@ static const struct dev_pm_ops vpfe_dev_pm_ops = {
static struct platform_driver vpfe_driver = {
.driver = {
.name = CAPTURE_DRV_NAME,
- .owner = THIS_MODULE,
.pm = &vpfe_dev_pm_ops,
},
.probe = vpfe_probe,
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index 3dad5bd7fe0a..0380cf2e5775 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -467,7 +467,6 @@ static const struct dev_pm_ops vpif_pm = {
static struct platform_driver vpif_driver = {
.driver = {
.name = "vpif",
- .owner = THIS_MODULE,
.pm = vpif_pm_ops,
},
.remove = vpif_remove,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 3ccb26ff43c8..fa0a51521772 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -311,6 +311,8 @@ static struct vb2_ops video_qops = {
.start_streaming = vpif_start_streaming,
.stop_streaming = vpif_stop_streaming,
.buf_queue = vpif_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/**
@@ -1656,7 +1658,6 @@ static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = VPIF_DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &vpif_pm_ops,
},
.probe = vpif_probe,
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 8d6ced56253c..839c24de1fd8 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1459,7 +1459,6 @@ static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = VPIF_DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &vpif_pm_ops,
},
.probe = vpif_probe,
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 31120b4a4a33..fce86f17dffc 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -504,7 +504,6 @@ static const struct dev_pm_ops vpss_pm_ops = {
static struct platform_driver vpss_driver = {
.driver = {
.name = "vpss",
- .owner = THIS_MODULE,
.pm = &vpss_pm_ops,
},
.remove = vpss_remove,
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index b4c9f1d08968..fd2891c886a3 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -54,7 +54,7 @@ static const struct gsc_fmt gsc_formats[] = {
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 1,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.name = "YUV 4:2:2 packed, CbYCrY",
.pixelformat = V4L2_PIX_FMT_UYVY,
@@ -64,7 +64,7 @@ static const struct gsc_fmt gsc_formats[] = {
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 1,
- .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
}, {
.name = "YUV 4:2:2 packed, CrYCbY",
.pixelformat = V4L2_PIX_FMT_VYUY,
@@ -74,7 +74,7 @@ static const struct gsc_fmt gsc_formats[] = {
.corder = GSC_CRCB,
.num_planes = 1,
.num_comp = 1,
- .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
}, {
.name = "YUV 4:2:2 packed, YCrYCb",
.pixelformat = V4L2_PIX_FMT_YVYU,
@@ -84,7 +84,7 @@ static const struct gsc_fmt gsc_formats[] = {
.corder = GSC_CRCB,
.num_planes = 1,
.num_comp = 1,
- .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
}, {
.name = "YUV 4:4:4 planar, YCbYCr",
.pixelformat = V4L2_PIX_FMT_YUV32,
@@ -319,21 +319,22 @@ int gsc_enum_fmt_mplane(struct v4l2_fmtdesc *f)
return 0;
}
-static u32 get_plane_info(struct gsc_frame *frm, u32 addr, u32 *index)
+static int get_plane_info(struct gsc_frame *frm, u32 addr, u32 *index, u32 *ret_addr)
{
if (frm->addr.y == addr) {
*index = 0;
- return frm->addr.y;
+ *ret_addr = frm->addr.y;
} else if (frm->addr.cb == addr) {
*index = 1;
- return frm->addr.cb;
+ *ret_addr = frm->addr.cb;
} else if (frm->addr.cr == addr) {
*index = 2;
- return frm->addr.cr;
+ *ret_addr = frm->addr.cr;
} else {
pr_err("Plane address is wrong");
return -EINVAL;
}
+ return 0;
}
void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm)
@@ -352,9 +353,11 @@ void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm)
u32 t_min, t_max;
t_min = min3(frm->addr.y, frm->addr.cb, frm->addr.cr);
- low_addr = get_plane_info(frm, t_min, &low_plane);
+ if (get_plane_info(frm, t_min, &low_plane, &low_addr))
+ return;
t_max = max3(frm->addr.y, frm->addr.cb, frm->addr.cr);
- high_addr = get_plane_info(frm, t_max, &high_plane);
+ if (get_plane_info(frm, t_max, &high_plane, &high_addr))
+ return;
mid_plane = 3 - (low_plane + high_plane);
if (mid_plane == 0)
@@ -1253,7 +1256,6 @@ static struct platform_driver gsc_driver = {
.id_table = gsc_driver_ids,
.driver = {
.name = GSC_MODULE_NAME,
- .owner = THIS_MODULE,
.pm = &gsc_pm_ops,
.of_match_table = exynos_gsc_match,
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index ef0a6564cef9..0abdb17fb19c 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -117,7 +117,7 @@ enum gsc_yuv_fmt {
* @flags: flags indicating which operation mode format applies to
*/
struct gsc_fmt {
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
char *name;
u32 pixelformat;
u32 color;
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 3d2babd5067a..8a2fd8c33d42 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -749,7 +749,7 @@ static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
return -EINVAL;
strncpy(f->description, fmt->name, sizeof(f->description) - 1);
f->pixelformat = fmt->fourcc;
- if (fmt->fourcc == V4L2_MBUS_FMT_JPEG_1X8)
+ if (fmt->fourcc == MEDIA_BUS_FMT_JPEG_1X8)
f->flags |= V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index f5d85520caf3..1101c41ac117 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -81,7 +81,7 @@ static struct fimc_fmt fimc_formats[] = {
.flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
}, {
.name = "YUV 4:4:4",
- .mbus_code = V4L2_MBUS_FMT_YUV10_1X30,
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
.flags = FMT_FLAGS_WRITEBACK,
}, {
.name = "YUV 4:2:2 packed, YCbYCr",
@@ -90,7 +90,7 @@ static struct fimc_fmt fimc_formats[] = {
.color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
.colplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 packed, CbYCrY",
@@ -99,7 +99,7 @@ static struct fimc_fmt fimc_formats[] = {
.color = FIMC_FMT_CBYCRY422,
.memplanes = 1,
.colplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 packed, CrYCbY",
@@ -108,7 +108,7 @@ static struct fimc_fmt fimc_formats[] = {
.color = FIMC_FMT_CRYCBY422,
.memplanes = 1,
.colplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
.flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 packed, YCrYCb",
@@ -117,7 +117,7 @@ static struct fimc_fmt fimc_formats[] = {
.color = FIMC_FMT_YCRYCB422,
.memplanes = 1,
.colplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
.flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 planar, Y/Cb/Cr",
@@ -190,7 +190,7 @@ static struct fimc_fmt fimc_formats[] = {
.depth = { 8 },
.memplanes = 1,
.colplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_JPEG_1X8,
+ .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
.flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
}, {
.name = "S5C73MX interleaved UYVY/JPEG",
@@ -200,7 +200,7 @@ static struct fimc_fmt fimc_formats[] = {
.memplanes = 2,
.colplanes = 1,
.mdataplanes = 0x2, /* plane 1 holds frame meta data */
- .mbus_code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8,
+ .mbus_code = MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8,
.flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
},
};
@@ -1294,7 +1294,6 @@ static struct platform_driver fimc_driver = {
.driver = {
.of_match_table = fimc_of_match,
.name = FIMC_DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &fimc_pm_ops,
}
};
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index 6c75c6ced1f7..7328f0845065 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -579,8 +579,8 @@ static inline bool fimc_jpeg_fourcc(u32 pixelformat)
static inline bool fimc_user_defined_mbus_fmt(u32 code)
{
- return (code == V4L2_MBUS_FMT_JPEG_1X8 ||
- code == V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8);
+ return (code == MEDIA_BUS_FMT_JPEG_1X8 ||
+ code == MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8);
}
/* Return the alpha component bit mask */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
index d83033170789..7521aa59b064 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -133,7 +133,6 @@ static struct platform_driver fimc_is_i2c_driver = {
.driver = {
.of_match_table = fimc_is_i2c_of_match,
.name = "fimc-isp-i2c",
- .owner = THIS_MODULE,
.pm = &fimc_is_i2c_pm_ops,
}
};
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 22162b2567da..49658ca39e51 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -428,8 +428,7 @@ static void fimc_is_load_firmware(const struct firmware *fw, void *context)
* needed around for copying to the IS working memory every
* time before the Cortex-A5 is restarted.
*/
- if (is->fw.f_w)
- release_firmware(is->fw.f_w);
+ release_firmware(is->fw.f_w);
is->fw.f_w = fw;
done:
mutex_unlock(&is->lock);
@@ -814,9 +813,9 @@ static int fimc_is_probe(struct platform_device *pdev)
return -ENOMEM;
is->irq = irq_of_parse_and_map(dev->of_node, 0);
- if (is->irq < 0) {
+ if (!is->irq) {
dev_err(dev, "no irq found\n");
- return is->irq;
+ return -EINVAL;
}
ret = fimc_is_get_clocks(is);
@@ -937,8 +936,7 @@ static int fimc_is_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
fimc_is_put_clocks(is);
fimc_is_debugfs_remove(is);
- if (is->fw.f_w)
- release_firmware(is->fw.f_w);
+ release_firmware(is->fw.f_w);
fimc_is_free_cpu_memory(is);
return 0;
@@ -962,7 +960,6 @@ static struct platform_driver fimc_is_driver = {
.driver = {
.of_match_table = fimc_is_of_match,
.name = FIMC_IS_DRV_NAME,
- .owner = THIS_MODULE,
.pm = &fimc_is_pm_ops,
}
};
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index be62d6b9ac48..60c744915549 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -41,21 +41,21 @@ static const struct fimc_fmt fimc_isp_formats[FIMC_ISP_NUM_FORMATS] = {
.depth = { 8 },
.color = FIMC_FMT_RAW8,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
}, {
.name = "RAW10 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG10,
.depth = { 10 },
.color = FIMC_FMT_RAW10,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
}, {
.name = "RAW12 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG12,
.depth = { 12 },
.color = FIMC_FMT_RAW12,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
},
};
@@ -149,7 +149,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) {
mf->colorspace = V4L2_COLORSPACE_JPEG;
- mf->code = V4L2_MBUS_FMT_YUV10_1X30;
+ mf->code = MEDIA_BUS_FMT_YUV10_1X30;
}
}
@@ -175,7 +175,7 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
FIMC_ISP_SINK_WIDTH_MAX, 0,
&mf->height, FIMC_ISP_SINK_HEIGHT_MIN,
FIMC_ISP_SINK_HEIGHT_MAX, 0, 0);
- mf->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ mf->code = MEDIA_BUS_FMT_SGRBG10_1X10;
} else {
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
format = v4l2_subdev_get_try_format(fh,
@@ -188,7 +188,7 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
mf->height = format->height - FIMC_ISP_CAC_MARGIN_HEIGHT;
if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) {
- mf->code = V4L2_MBUS_FMT_YUV10_1X30;
+ mf->code = MEDIA_BUS_FMT_YUV10_1X30;
mf->colorspace = V4L2_COLORSPACE_JPEG;
} else {
mf->code = format->code;
@@ -680,11 +680,11 @@ static void __isp_subdev_set_default_format(struct fimc_isp *isp)
FIMC_ISP_CAC_MARGIN_WIDTH;
isp->sink_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT +
FIMC_ISP_CAC_MARGIN_HEIGHT;
- isp->sink_fmt.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ isp->sink_fmt.code = MEDIA_BUS_FMT_SGRBG10_1X10;
isp->src_fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
isp->src_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
- isp->src_fmt.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ isp->src_fmt.code = MEDIA_BUS_FMT_SGRBG10_1X10;
__is_set_frame_size(is, &isp->src_fmt);
}
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
index bc3ec7d25a32..0477716a20db 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite-reg.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
@@ -112,24 +112,24 @@ void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on)
}
static const u32 src_pixfmt_map[8][3] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR,
+ { MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR,
FLITE_REG_CIGCTRL_YUV422_1P },
- { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB,
+ { MEDIA_BUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB,
FLITE_REG_CIGCTRL_YUV422_1P },
- { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY,
+ { MEDIA_BUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY,
FLITE_REG_CIGCTRL_YUV422_1P },
- { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY,
+ { MEDIA_BUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY,
FLITE_REG_CIGCTRL_YUV422_1P },
- { V4L2_MBUS_FMT_SGRBG8_1X8, 0, FLITE_REG_CIGCTRL_RAW8 },
- { V4L2_MBUS_FMT_SGRBG10_1X10, 0, FLITE_REG_CIGCTRL_RAW10 },
- { V4L2_MBUS_FMT_SGRBG12_1X12, 0, FLITE_REG_CIGCTRL_RAW12 },
- { V4L2_MBUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 0, FLITE_REG_CIGCTRL_RAW8 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 0, FLITE_REG_CIGCTRL_RAW10 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 0, FLITE_REG_CIGCTRL_RAW12 },
+ { MEDIA_BUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) },
};
/* Set camera input pixel format and resolution */
void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
{
- enum v4l2_mbus_pixelcode pixelcode = f->fmt->mbus_code;
+ u32 pixelcode = f->fmt->mbus_code;
int i = ARRAY_SIZE(src_pixfmt_map);
u32 cfg;
@@ -232,10 +232,10 @@ static void flite_hw_set_pack12(struct fimc_lite *dev, int on)
static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
{
static const u32 pixcode[4][2] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
- { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB },
- { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY },
- { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
+ { MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
+ { MEDIA_BUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB },
+ { MEDIA_BUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY },
+ { MEDIA_BUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
};
u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
int i = ARRAY_SIZE(pixcode);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 6c1eb308f7b5..2510f189e242 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -48,7 +48,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.depth = { 16 },
.color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.flags = FMT_FLAGS_YUV,
}, {
.name = "YUV 4:2:2 packed, CbYCrY",
@@ -57,7 +57,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.depth = { 16 },
.color = FIMC_FMT_CBYCRY422,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.flags = FMT_FLAGS_YUV,
}, {
.name = "YUV 4:2:2 packed, CrYCbY",
@@ -66,7 +66,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.depth = { 16 },
.color = FIMC_FMT_CRYCBY422,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
.flags = FMT_FLAGS_YUV,
}, {
.name = "YUV 4:2:2 packed, YCrYCb",
@@ -75,7 +75,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.depth = { 16 },
.color = FIMC_FMT_YCRYCB422,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
.flags = FMT_FLAGS_YUV,
}, {
.name = "RAW8 (GRBG)",
@@ -84,7 +84,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.depth = { 8 },
.color = FIMC_FMT_RAW8,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.flags = FMT_FLAGS_RAW_BAYER,
}, {
.name = "RAW10 (GRBG)",
@@ -93,7 +93,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.depth = { 16 },
.color = FIMC_FMT_RAW10,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.flags = FMT_FLAGS_RAW_BAYER,
}, {
.name = "RAW12 (GRBG)",
@@ -102,7 +102,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.depth = { 16 },
.color = FIMC_FMT_RAW12,
.memplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
.flags = FMT_FLAGS_RAW_BAYER,
},
};
@@ -1720,7 +1720,6 @@ static struct platform_driver fimc_lite_driver = {
.driver = {
.of_match_table = flite_of_match,
.name = FIMC_LITE_DRV_NAME,
- .owner = THIS_MODULE,
.pm = &fimc_lite_pm_ops,
}
};
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.c b/drivers/media/platform/exynos4-is/fimc-reg.c
index 2d77fd8f440a..df0cbcb69b6b 100644
--- a/drivers/media/platform/exynos4-is/fimc-reg.c
+++ b/drivers/media/platform/exynos4-is/fimc-reg.c
@@ -592,10 +592,10 @@ struct mbus_pixfmt_desc {
};
static const struct mbus_pixfmt_desc pix_desc[] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 },
- { V4L2_MBUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 },
- { V4L2_MBUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 },
- { V4L2_MBUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 },
+ { MEDIA_BUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 },
};
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
@@ -689,11 +689,11 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
/* TODO: add remaining supported formats. */
switch (vid_cap->ci_fmt.code) {
- case V4L2_MBUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT;
break;
- case V4L2_MBUS_FMT_JPEG_1X8:
- case V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8:
+ case MEDIA_BUS_FMT_JPEG_1X8:
+ case MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8:
tmp = FIMC_REG_CSIIMGFMT_USER(1);
cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
break;
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 54c49d5e7690..f315ef946cd4 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1469,7 +1469,6 @@ static struct platform_driver fimc_md_driver = {
.driver = {
.of_match_table = of_match_ptr(fimc_md_of_match),
.name = "s5p-fimc-md",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index be5d6fc895cb..2504aa89a6f4 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -238,34 +238,34 @@ struct csis_state {
*/
struct csis_pix_format {
unsigned int pix_width_alignment;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
u32 fmt_reg;
u8 data_alignment;
};
static const struct csis_pix_format s5pcsis_formats[] = {
{
- .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
.fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
.data_alignment = 32,
}, {
- .code = V4L2_MBUS_FMT_JPEG_1X8,
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
.data_alignment = 32,
}, {
- .code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8,
+ .code = MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
.data_alignment = 32,
}, {
- .code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_RAW8,
.data_alignment = 24,
}, {
- .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
.fmt_reg = S5PCSIS_CFG_FMT_RAW10,
.data_alignment = 24,
}, {
- .code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
.fmt_reg = S5PCSIS_CFG_FMT_RAW12,
.data_alignment = 24,
}
@@ -1041,7 +1041,6 @@ static struct platform_driver s5pcsis_driver = {
.driver = {
.of_match_table = s5pcsis_of_match,
.name = CSIS_DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &s5pcsis_pm_ops,
},
};
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index d5dc198502ef..bbf428104871 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -604,10 +604,11 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strcpy(cap->driver, "viu");
strcpy(cap->card, "viu");
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_OVERLAY |
V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1680,7 +1681,6 @@ static struct platform_driver viu_of_platform_driver = {
#endif
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = mpc512x_viu_of_match,
},
};
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index d36c507a0ba2..b70c1aecca37 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -1101,7 +1101,6 @@ static struct platform_driver deinterlace_pdrv = {
.remove = deinterlace_remove,
.driver = {
.name = MEM2MEM_NAME,
- .owner = THIS_MODULE,
},
};
module_platform_driver(deinterlace_pdrv);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 7a86c77bffa0..193373ff268d 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -106,61 +106,61 @@ static struct mcam_format_struct {
__u32 pixelformat;
int bpp; /* Bytes per pixel */
bool planar;
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
} mcam_formats[] = {
{
.desc = "YUYV 4:2:2",
.pixelformat = V4L2_PIX_FMT_YUYV,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = false,
},
{
.desc = "UYVY 4:2:2",
.pixelformat = V4L2_PIX_FMT_UYVY,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = false,
},
{
.desc = "YUV 4:2:2 PLANAR",
.pixelformat = V4L2_PIX_FMT_YUV422P,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = true,
},
{
.desc = "YUV 4:2:0 PLANAR",
.pixelformat = V4L2_PIX_FMT_YUV420,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = true,
},
{
.desc = "YVU 4:2:0 PLANAR",
.pixelformat = V4L2_PIX_FMT_YVU420,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = true,
},
{
.desc = "RGB 444",
.pixelformat = V4L2_PIX_FMT_RGB444,
- .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
.bpp = 2,
.planar = false,
},
{
.desc = "RGB 565",
.pixelformat = V4L2_PIX_FMT_RGB565,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.bpp = 2,
.planar = false,
},
{
.desc = "Raw RGB Bayer",
.pixelformat = V4L2_PIX_FMT_SBGGR8,
- .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.bpp = 1,
.planar = false,
},
@@ -190,8 +190,7 @@ static const struct v4l2_pix_format mcam_def_pix_format = {
.sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
};
-static const enum v4l2_mbus_pixelcode mcam_def_mbus_code =
- V4L2_MBUS_FMT_YUYV8_2X8;
+static const u32 mcam_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
/*
@@ -1080,6 +1079,8 @@ static int mcam_vb_queue_setup(struct vb2_queue *vq,
*nbufs = minbufs;
if (cam->buffer_mode == B_DMA_contig)
alloc_ctxs[0] = cam->vb_alloc_ctx;
+ else if (cam->buffer_mode == B_DMA_sg)
+ alloc_ctxs[0] = cam->vb_alloc_ctx_sg;
return 0;
}
@@ -1101,26 +1102,6 @@ static void mcam_vb_buf_queue(struct vb2_buffer *vb)
mcam_read_setup(cam);
}
-
-/*
- * vb2 uses these to release the mutex when waiting in dqbuf. I'm
- * not actually sure we need to do this (I'm not sure that vb2_dqbuf() needs
- * to be called with the mutex held), but better safe than sorry.
- */
-static void mcam_vb_wait_prepare(struct vb2_queue *vq)
-{
- struct mcam_camera *cam = vb2_get_drv_priv(vq);
-
- mutex_unlock(&cam->s_mutex);
-}
-
-static void mcam_vb_wait_finish(struct vb2_queue *vq)
-{
- struct mcam_camera *cam = vb2_get_drv_priv(vq);
-
- mutex_lock(&cam->s_mutex);
-}
-
/*
* These need to be called with the mutex held from vb2
*/
@@ -1190,8 +1171,8 @@ static const struct vb2_ops mcam_vb2_ops = {
.buf_queue = mcam_vb_buf_queue,
.start_streaming = mcam_vb_start_streaming,
.stop_streaming = mcam_vb_stop_streaming,
- .wait_prepare = mcam_vb_wait_prepare,
- .wait_finish = mcam_vb_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
@@ -1219,17 +1200,12 @@ static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
{
struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
- struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
struct mcam_dma_desc *desc = mvb->dma_desc;
struct scatterlist *sg;
int i;
- mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl,
- sg_table->nents, DMA_FROM_DEVICE);
- if (mvb->dma_desc_nent <= 0)
- return -EIO; /* Not sure what's right here */
- for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
+ for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
desc->dma_addr = sg_dma_address(sg);
desc->segment_len = sg_dma_len(sg);
desc++;
@@ -1237,16 +1213,6 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static void mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
-{
- struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
- struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
-
- if (sg_table)
- dma_unmap_sg(cam->dev, sg_table->sgl,
- sg_table->nents, DMA_FROM_DEVICE);
-}
-
static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
{
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
@@ -1263,12 +1229,11 @@ static const struct vb2_ops mcam_vb2_sg_ops = {
.buf_init = mcam_vb_sg_buf_init,
.buf_prepare = mcam_vb_sg_buf_prepare,
.buf_queue = mcam_vb_buf_queue,
- .buf_finish = mcam_vb_sg_buf_finish,
.buf_cleanup = mcam_vb_sg_buf_cleanup,
.start_streaming = mcam_vb_start_streaming,
.stop_streaming = mcam_vb_stop_streaming,
- .wait_prepare = mcam_vb_wait_prepare,
- .wait_finish = mcam_vb_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
#endif /* MCAM_MODE_DMA_SG */
@@ -1280,6 +1245,7 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
memset(vq, 0, sizeof(*vq));
vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vq->drv_priv = cam;
+ vq->lock = &cam->s_mutex;
INIT_LIST_HEAD(&cam->buffers);
switch (cam->buffer_mode) {
case B_DMA_contig:
@@ -1287,10 +1253,12 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
vq->ops = &mcam_vb2_ops;
vq->mem_ops = &vb2_dma_contig_memops;
vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
- cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
vq->io_modes = VB2_MMAP | VB2_USERPTR;
cam->dma_setup = mcam_ctlr_dma_contig;
cam->frame_complete = mcam_dma_contig_done;
+ cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
+ if (IS_ERR(cam->vb_alloc_ctx))
+ return PTR_ERR(cam->vb_alloc_ctx);
#endif
break;
case B_DMA_sg:
@@ -1301,6 +1269,9 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
vq->io_modes = VB2_MMAP | VB2_USERPTR;
cam->dma_setup = mcam_ctlr_dma_sg;
cam->frame_complete = mcam_dma_sg_done;
+ cam->vb_alloc_ctx_sg = vb2_dma_sg_init_ctx(cam->dev);
+ if (IS_ERR(cam->vb_alloc_ctx_sg))
+ return PTR_ERR(cam->vb_alloc_ctx_sg);
#endif
break;
case B_vmalloc:
@@ -1326,6 +1297,10 @@ static void mcam_cleanup_vb2(struct mcam_camera *cam)
if (cam->buffer_mode == B_DMA_contig)
vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
#endif
+#ifdef MCAM_MODE_DMA_SG
+ if (cam->buffer_mode == B_DMA_sg)
+ vb2_dma_sg_cleanup_ctx(cam->vb_alloc_ctx_sg);
+#endif
}
@@ -1414,9 +1389,9 @@ static int mcam_vidioc_querycap(struct file *file, void *priv,
{
strcpy(cap->driver, "marvell_ccic");
strcpy(cap->card, "marvell_ccic");
- cap->version = 1;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index e0e628cb98f9..aa0c6eac254a 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -176,6 +176,7 @@ struct mcam_camera {
/* DMA buffers - DMA modes */
struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
struct vb2_alloc_ctx *vb_alloc_ctx;
+ struct vb2_alloc_ctx *vb_alloc_ctx_sg;
/* Mode-specific ops, set at open time */
void (*dma_setup)(struct mcam_camera *cam);
@@ -183,7 +184,7 @@ struct mcam_camera {
/* Current operating parameters */
struct v4l2_pix_format pix_format;
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
/* Locks */
struct mutex s_mutex; /* Access to this structure */
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 054507f16734..0ed9b3adfcdf 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -512,7 +512,6 @@ static struct platform_driver mmpcam_driver = {
#endif
.driver = {
.name = "mmp-camera",
- .owner = THIS_MODULE
}
};
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 4971ff21f82b..87314b743f55 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -402,13 +402,8 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
- /*
- * This is only a mem-to-mem video device. The capture and output
- * device capability flags are left only for backward compatibility
- * and are scheduled for removal.
- */
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1010,7 +1005,6 @@ static struct platform_driver emmaprp_pdrv = {
.remove = emmaprp_remove,
.driver = {
.name = MEM2MEM_NAME,
- .owner = THIS_MODULE,
},
};
module_platform_driver(emmaprp_pdrv);
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index 05de442d24e4..dc2aaab54aef 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -3,7 +3,8 @@ config VIDEO_OMAP2_VOUT_VRFB
config VIDEO_OMAP2_VOUT
tristate "OMAP2/OMAP3 V4L2-Display driver"
- depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && HAS_MMU)
+ depends on MMU
+ depends on ARCH_OMAP2 || ARCH_OMAP3
select VIDEOBUF_GEN
select VIDEOBUF_DMA_CONTIG
select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 64ab6fb06b9c..ba2d8f973d58 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -198,7 +198,7 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
* omap_vout_uservirt_to_phys: This inline function is used to convert user
* space virtual address to physical address.
*/
-static u32 omap_vout_uservirt_to_phys(u32 virtp)
+static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp)
{
unsigned long physp = 0;
struct vm_area_struct *vma;
@@ -418,10 +418,10 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
}
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
- "%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
+ "%s enable=%d addr=%pad width=%d\n height=%d color_mode=%d\n"
"rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
"out_height=%d rotation_type=%d screen_width=%d\n",
- __func__, ovl->is_enabled(ovl), info.paddr, info.width, info.height,
+ __func__, ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
info.color_mode, info.rotation, info.mirror, info.pos_x,
info.pos_y, info.out_width, info.out_height, info.rotation_type,
info.screen_width);
@@ -794,7 +794,7 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
vout->queued_buf_addr[vb->i] = (u8 *)
omap_vout_uservirt_to_phys(vb->baddr);
} else {
- u32 addr, dma_addr;
+ unsigned long addr, dma_addr;
unsigned long size;
addr = (unsigned long) vout->buf_virt_addr[vb->i];
@@ -1054,8 +1054,9 @@ static int vidioc_querycap(struct file *file, void *fh,
strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
cap->bus_info[0] = '\0';
- cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 72265e58ca60..51c2129bdcc6 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2380,7 +2380,6 @@ static struct platform_driver omap3isp_driver = {
.remove = isp_remove,
.id_table = omap3isp_id_table,
.driver = {
- .owner = THIS_MODULE,
.name = "omap3isp",
.pm = &omap3isp_pm_ops,
},
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 81a9dc053d58..587489a072d5 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -36,23 +36,23 @@ __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
unsigned int pad, enum v4l2_subdev_format_whence which);
static const unsigned int ccdc_fmts[] = {
- V4L2_MBUS_FMT_Y8_1X8,
- V4L2_MBUS_FMT_Y10_1X10,
- V4L2_MBUS_FMT_Y12_1X12,
- V4L2_MBUS_FMT_SGRBG8_1X8,
- V4L2_MBUS_FMT_SRGGB8_1X8,
- V4L2_MBUS_FMT_SBGGR8_1X8,
- V4L2_MBUS_FMT_SGBRG8_1X8,
- V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SGBRG10_1X10,
- V4L2_MBUS_FMT_SGRBG12_1X12,
- V4L2_MBUS_FMT_SRGGB12_1X12,
- V4L2_MBUS_FMT_SBGGR12_1X12,
- V4L2_MBUS_FMT_SGBRG12_1X12,
- V4L2_MBUS_FMT_YUYV8_2X8,
- V4L2_MBUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
};
/*
@@ -266,10 +266,10 @@ static int __ccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable)
__ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
V4L2_SUBDEV_FORMAT_ACTIVE);
- if ((format->code != V4L2_MBUS_FMT_SGRBG10_1X10) &&
- (format->code != V4L2_MBUS_FMT_SRGGB10_1X10) &&
- (format->code != V4L2_MBUS_FMT_SBGGR10_1X10) &&
- (format->code != V4L2_MBUS_FMT_SGBRG10_1X10))
+ if ((format->code != MEDIA_BUS_FMT_SGRBG10_1X10) &&
+ (format->code != MEDIA_BUS_FMT_SRGGB10_1X10) &&
+ (format->code != MEDIA_BUS_FMT_SBGGR10_1X10) &&
+ (format->code != MEDIA_BUS_FMT_SGBRG10_1X10))
return -EINVAL;
if (enable)
@@ -971,8 +971,8 @@ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
format = &ccdc->formats[CCDC_PAD_SINK];
- if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
- format->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ format->code == MEDIA_BUS_FMT_UYVY8_2X8) {
/* According to the OMAP3 TRM the input mode only affects SYNC
* mode, enabling BT.656 mode should take precedence. However,
* in practice setting the input mode to YCbCr data on 8 bits
@@ -1020,7 +1020,7 @@ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
/* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The
* hardware seems to ignore it in all other input modes.
*/
- if (format->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ if (format->code == MEDIA_BUS_FMT_UYVY8_2X8)
isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
ISPCCDC_CFG_Y8POS);
else
@@ -1168,9 +1168,9 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
if (ccdc->bt656)
bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
- else if (fmt_info->code == V4L2_MBUS_FMT_YUYV8_2X8)
+ else if (fmt_info->code == MEDIA_BUS_FMT_YUYV8_2X8)
bridge = ISPCTRL_PAR_BRIDGE_LENDIAN;
- else if (fmt_info->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ else if (fmt_info->code == MEDIA_BUS_FMT_UYVY8_2X8)
bridge = ISPCTRL_PAR_BRIDGE_BENDIAN;
else
bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
@@ -1199,16 +1199,16 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
/* Mosaic filter */
switch (format->code) {
- case V4L2_MBUS_FMT_SRGGB10_1X10:
- case V4L2_MBUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
ccdc_pattern = ccdc_srggb_pattern;
break;
- case V4L2_MBUS_FMT_SBGGR10_1X10:
- case V4L2_MBUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
ccdc_pattern = ccdc_sbggr_pattern;
break;
- case V4L2_MBUS_FMT_SGBRG10_1X10:
- case V4L2_MBUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
ccdc_pattern = ccdc_sgbrg_pattern;
break;
default:
@@ -1267,7 +1267,7 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
/* The CCDC outputs data in UYVY order by default. Swap bytes to get
* YUYV.
*/
- if (format->code == V4L2_MBUS_FMT_YUYV8_1X16)
+ if (format->code == MEDIA_BUS_FMT_YUYV8_1X16)
isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
ISPCCDC_CFG_BSWD);
else
@@ -1967,7 +1967,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
enum v4l2_subdev_format_whence which)
{
const struct isp_format_info *info;
- enum v4l2_mbus_pixelcode pixelcode;
+ u32 pixelcode;
unsigned int width = fmt->width;
unsigned int height = fmt->height;
struct v4l2_rect *crop;
@@ -1983,7 +1983,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(ccdc_fmts))
- fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
/* Clamp the input size. */
fmt->width = clamp_t(u32, width, 32, 4096);
@@ -2007,19 +2007,19 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
* configured to pack bytes in BT.656, hiding the inaccuracy.
* In all cases bytes can be swapped.
*/
- if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
- fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) {
/* Use the user requested format if YUV. */
- if (pixelcode == V4L2_MBUS_FMT_YUYV8_2X8 ||
- pixelcode == V4L2_MBUS_FMT_UYVY8_2X8 ||
- pixelcode == V4L2_MBUS_FMT_YUYV8_1X16 ||
- pixelcode == V4L2_MBUS_FMT_UYVY8_1X16)
+ if (pixelcode == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ pixelcode == MEDIA_BUS_FMT_UYVY8_2X8 ||
+ pixelcode == MEDIA_BUS_FMT_YUYV8_1X16 ||
+ pixelcode == MEDIA_BUS_FMT_UYVY8_1X16)
fmt->code = pixelcode;
- if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8)
- fmt->code = V4L2_MBUS_FMT_YUYV8_1X16;
- else if (fmt->code == V4L2_MBUS_FMT_UYVY8_2X8)
- fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8)
+ fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
+ else if (fmt->code == MEDIA_BUS_FMT_UYVY8_2X8)
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
}
/* Hardcode the output size to the crop rectangle size. */
@@ -2047,8 +2047,8 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
fmt->code = info->truncated;
/* YUV formats are not supported by the video port. */
- if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
- fmt->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ fmt->code == MEDIA_BUS_FMT_UYVY8_2X8)
fmt->code = 0;
/* The number of lines that can be clocked out from the video
@@ -2083,7 +2083,7 @@ static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
* to keep the Bayer pattern.
*/
info = omap3isp_video_format_info(sink->code);
- if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+ if (info->flavor != MEDIA_BUS_FMT_Y8_1X8) {
crop->left &= ~1;
crop->top &= ~1;
}
@@ -2103,7 +2103,7 @@ static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
sink->height - crop->top);
/* Odd width/height values don't make sense for Bayer formats. */
- if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+ if (info->flavor != MEDIA_BUS_FMT_Y8_1X8) {
crop->width &= ~1;
crop->height &= ~1;
}
@@ -2135,13 +2135,13 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
format = __ccdc_get_format(ccdc, fh, code->pad,
V4L2_SUBDEV_FORMAT_TRY);
- if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
- format->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ format->code == MEDIA_BUS_FMT_UYVY8_2X8) {
/* In YUV mode the CCDC can swap bytes. */
if (code->index == 0)
- code->code = V4L2_MBUS_FMT_YUYV8_1X16;
+ code->code = MEDIA_BUS_FMT_YUYV8_1X16;
else if (code->index == 1)
- code->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ code->code = MEDIA_BUS_FMT_UYVY8_1X16;
else
return -EINVAL;
} else {
@@ -2383,9 +2383,7 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
* return true if the combination is possible
* return false otherwise
*/
-static bool ccdc_is_shiftable(enum v4l2_mbus_pixelcode in,
- enum v4l2_mbus_pixelcode out,
- unsigned int additional_shift)
+static bool ccdc_is_shiftable(u32 in, u32 out, unsigned int additional_shift)
{
const struct isp_format_info *in_info, *out_info;
@@ -2452,7 +2450,7 @@ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = CCDC_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
ccdc_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index 9cb49b3c04bd..f4aedb37e41e 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -289,10 +289,10 @@ static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
u32 val, format;
switch (config->format) {
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
format = ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP;
break;
- case V4L2_MBUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
default:
format = ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP; /* RAW10+VP */
break;
@@ -438,7 +438,7 @@ static void ccp2_mem_configure(struct isp_ccp2_device *ccp2,
u32 val, hwords;
if (sink_pixcode != source_pixcode &&
- sink_pixcode == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8)
+ sink_pixcode == MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8)
dpcm_decompress = 1;
ccp2_pwr_cfg(ccp2);
@@ -604,8 +604,8 @@ void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
*/
static const unsigned int ccp2_fmts[] = {
- V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
};
/*
@@ -643,8 +643,8 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
switch (pad) {
case CCP2_PAD_SINK:
- if (fmt->code != V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8)
- fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ if (fmt->code != MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8)
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (ccp2->input == CCP2_INPUT_SENSOR) {
fmt->width = clamp_t(u32, fmt->width,
@@ -671,7 +671,7 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
*/
format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK, which);
memcpy(fmt, format, sizeof(*fmt));
- fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
break;
}
@@ -808,7 +808,7 @@ static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = CCP2_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
ccp2_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 6530b255f103..09c686d96ae8 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -78,15 +78,15 @@ static void csi2_recv_config(struct isp_device *isp,
}
static const unsigned int csi2_input_fmts[] = {
- V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
- V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
- V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
- V4L2_MBUS_FMT_SGBRG10_1X10,
- V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
- V4L2_MBUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
};
/* To set the format on the CSI2 requires a mapping function that takes
@@ -171,19 +171,19 @@ static u16 csi2_ctx_map_format(struct isp_csi2_device *csi2)
int fmtidx, destidx, is_3630;
switch (fmt->code) {
- case V4L2_MBUS_FMT_SGRBG10_1X10:
- case V4L2_MBUS_FMT_SRGGB10_1X10:
- case V4L2_MBUS_FMT_SBGGR10_1X10:
- case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
fmtidx = 0;
break;
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
fmtidx = 1;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
fmtidx = 2;
break;
default:
@@ -843,7 +843,7 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
- enum v4l2_mbus_pixelcode pixelcode;
+ u32 pixelcode;
struct v4l2_mbus_framefmt *format;
const struct isp_format_info *info;
unsigned int i;
@@ -858,7 +858,7 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(csi2_input_fmts))
- fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -1029,7 +1029,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = CSI2_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
csi2_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 605f57ef0a49..dd9eed45d853 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -964,18 +964,16 @@ static void preview_setup_hw(struct isp_prev_device *prev, u32 update,
* @prev: pointer to previewer private structure
* @pixelcode: pixel code
*/
-static void
-preview_config_ycpos(struct isp_prev_device *prev,
- enum v4l2_mbus_pixelcode pixelcode)
+static void preview_config_ycpos(struct isp_prev_device *prev, u32 pixelcode)
{
struct isp_device *isp = to_isp_device(prev);
enum preview_ycpos_mode mode;
switch (pixelcode) {
- case V4L2_MBUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
mode = YCPOS_CrYCbY;
break;
- case V4L2_MBUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
mode = YCPOS_YCrYCb;
break;
default:
@@ -1028,16 +1026,16 @@ static void preview_config_input_format(struct isp_prev_device *prev,
ISPPRV_PCR_WIDTH);
switch (info->flavor) {
- case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
prev->params.cfa_order = 0;
break;
- case V4L2_MBUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
prev->params.cfa_order = 1;
break;
- case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
prev->params.cfa_order = 2;
break;
- case V4L2_MBUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
prev->params.cfa_order = 3;
break;
default:
@@ -1078,8 +1076,8 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
- if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
- format->code != V4L2_MBUS_FMT_Y10_1X10) {
+ if (format->code != MEDIA_BUS_FMT_Y8_1X8 &&
+ format->code != MEDIA_BUS_FMT_Y10_1X10) {
sph -= 2;
eph += 2;
slv -= 2;
@@ -1709,21 +1707,21 @@ __preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
/* previewer format descriptions */
static const unsigned int preview_input_fmts[] = {
- V4L2_MBUS_FMT_Y8_1X8,
- V4L2_MBUS_FMT_SGRBG8_1X8,
- V4L2_MBUS_FMT_SRGGB8_1X8,
- V4L2_MBUS_FMT_SBGGR8_1X8,
- V4L2_MBUS_FMT_SGBRG8_1X8,
- V4L2_MBUS_FMT_Y10_1X10,
- V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
};
static const unsigned int preview_output_fmts[] = {
- V4L2_MBUS_FMT_UYVY8_1X16,
- V4L2_MBUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
};
/*
@@ -1742,7 +1740,7 @@ static void preview_try_format(struct isp_prev_device *prev,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
- enum v4l2_mbus_pixelcode pixelcode;
+ u32 pixelcode;
struct v4l2_rect *crop;
unsigned int i;
@@ -1774,7 +1772,7 @@ static void preview_try_format(struct isp_prev_device *prev,
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(preview_input_fmts))
- fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
break;
case PREV_PAD_SOURCE:
@@ -1782,13 +1780,13 @@ static void preview_try_format(struct isp_prev_device *prev,
*fmt = *__preview_get_format(prev, fh, PREV_PAD_SINK, which);
switch (pixelcode) {
- case V4L2_MBUS_FMT_YUYV8_1X16:
- case V4L2_MBUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
fmt->code = pixelcode;
break;
default:
- fmt->code = V4L2_MBUS_FMT_YUYV8_1X16;
+ fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
break;
}
@@ -1843,8 +1841,8 @@ static void preview_try_crop(struct isp_prev_device *prev,
* and no columns in other modes. Increase the margins based on the sink
* format.
*/
- if (sink->code != V4L2_MBUS_FMT_Y8_1X8 &&
- sink->code != V4L2_MBUS_FMT_Y10_1X10) {
+ if (sink->code != MEDIA_BUS_FMT_Y8_1X8 &&
+ sink->code != MEDIA_BUS_FMT_Y10_1X10) {
left += 2;
right -= 2;
top += 2;
@@ -2092,7 +2090,7 @@ static int preview_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = PREV_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
preview_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 05d1ace57451..2b9bc4839876 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -198,17 +198,16 @@ static void resizer_set_bilinear(struct isp_res_device *res,
* @res: Device context.
* @pixelcode: pixel code.
*/
-static void resizer_set_ycpos(struct isp_res_device *res,
- enum v4l2_mbus_pixelcode pixelcode)
+static void resizer_set_ycpos(struct isp_res_device *res, u32 pixelcode)
{
struct isp_device *isp = to_isp_device(res);
switch (pixelcode) {
- case V4L2_MBUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
ISPRSZ_CNT_YCPOS);
break;
- case V4L2_MBUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
ISPRSZ_CNT_YCPOS);
break;
@@ -1348,8 +1347,8 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
/* resizer pixel formats */
static const unsigned int resizer_formats[] = {
- V4L2_MBUS_FMT_UYVY8_1X16,
- V4L2_MBUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
};
static unsigned int resizer_max_in_width(struct isp_res_device *res)
@@ -1385,9 +1384,9 @@ static void resizer_try_format(struct isp_res_device *res,
switch (pad) {
case RESZ_PAD_SINK:
- if (fmt->code != V4L2_MBUS_FMT_YUYV8_1X16 &&
- fmt->code != V4L2_MBUS_FMT_UYVY8_1X16)
- fmt->code = V4L2_MBUS_FMT_YUYV8_1X16;
+ if (fmt->code != MEDIA_BUS_FMT_YUYV8_1X16 &&
+ fmt->code != MEDIA_BUS_FMT_UYVY8_1X16)
+ fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
resizer_max_in_width(res));
@@ -1571,7 +1570,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESZ_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_YUYV8_1X16;
+ format.format.code = MEDIA_BUS_FMT_YUYV8_1X16;
format.format.width = 4096;
format.format.height = 4096;
resizer_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index bc38c88c7bd9..b463fe172d16 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -39,74 +39,74 @@
* corresponding in-memory formats to the table below!!!
*/
static struct isp_format_info formats[] = {
- { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
- V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+ { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_GREY, 8, 1, },
- { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
- V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
+ { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_Y10, 10, 2, },
- { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
- V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
+ { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_Y12, 12, 2, },
- { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
- V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+ { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR8, 8, 1, },
- { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
- V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+ { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG8, 8, 1, },
- { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
- V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+ { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG8, 8, 1, },
- { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
- V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+ { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB8, 8, 1, },
- { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
- V4L2_MBUS_FMT_SBGGR10_1X10, 0,
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10, 0,
V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
- { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
- V4L2_MBUS_FMT_SGBRG10_1X10, 0,
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGBRG10_1X10, 0,
V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
- { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
- V4L2_MBUS_FMT_SGRBG10_1X10, 0,
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_1X10, 0,
V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
- { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
- V4L2_MBUS_FMT_SRGGB10_1X10, 0,
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_1X10, 0,
V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
- { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR10, 10, 2, },
- { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
- V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG10, 10, 2, },
- { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG10, 10, 2, },
- { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB10, 10, 2, },
- { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
+ { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR12, 12, 2, },
- { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
- V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
+ { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG12, 12, 2, },
- { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
+ { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG12, 12, 2, },
- { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
+ { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB12, 12, 2, },
- { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
- V4L2_MBUS_FMT_UYVY8_1X16, 0,
+ { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16, 0,
V4L2_PIX_FMT_UYVY, 16, 2, },
- { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
- V4L2_MBUS_FMT_YUYV8_1X16, 0,
+ { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16, 0,
V4L2_PIX_FMT_YUYV, 16, 2, },
- { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_UYVY8_2X8, 0,
+ { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8, 0,
V4L2_PIX_FMT_UYVY, 8, 2, },
- { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_YUYV8_2X8,
- V4L2_MBUS_FMT_YUYV8_2X8, 0,
+ { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8, 0,
V4L2_PIX_FMT_YUYV, 8, 2, },
/* Empty entry to catch the unsupported pixel code (0) used by the CCDC
* module and avoid NULL pointer dereferences.
@@ -114,8 +114,7 @@ static struct isp_format_info formats[] = {
{ 0, }
};
-const struct isp_format_info *
-omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
+const struct isp_format_info *omap3isp_video_format_info(u32 code)
{
unsigned int i;
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 0b7efedc3da9..4071dd7060ea 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -44,10 +44,10 @@ struct v4l2_pix_format;
* @bpp: Bytes per pixel (when stored in memory)
*/
struct isp_format_info {
- enum v4l2_mbus_pixelcode code;
- enum v4l2_mbus_pixelcode truncated;
- enum v4l2_mbus_pixelcode uncompressed;
- enum v4l2_mbus_pixelcode flavor;
+ u32 code;
+ u32 truncated;
+ u32 uncompressed;
+ u32 flavor;
u32 pixelformat;
unsigned int width;
unsigned int bpp;
@@ -206,6 +206,6 @@ void omap3isp_video_resume(struct isp_video *video, int continuous);
struct media_pad *omap3isp_video_remote_pad(struct isp_video *video);
const struct isp_format_info *
-omap3isp_video_format_info(enum v4l2_mbus_pixelcode code);
+omap3isp_video_format_info(u32 code);
#endif /* OMAP3_ISP_VIDEO_H */
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 4f81b4c9d113..aa40c8269ab8 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1218,11 +1218,11 @@ void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx)
}
/* Media bus pixel formats supported at the camif input */
-static const enum v4l2_mbus_pixelcode camif_mbus_formats[] = {
- V4L2_MBUS_FMT_YUYV8_2X8,
- V4L2_MBUS_FMT_YVYU8_2X8,
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_VYUY8_2X8,
+static const u32 camif_mbus_formats[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
};
/*
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index b38574702fe9..2d5bd3ac7f81 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -256,8 +256,7 @@ static void camif_unregister_sensor(struct camif_dev *camif)
v4l2_device_unregister_subdev(sd);
camif->sensor.sd = NULL;
i2c_unregister_device(client);
- if (adapter)
- i2c_put_adapter(adapter);
+ i2c_put_adapter(adapter);
}
static int camif_create_media_links(struct camif_dev *camif)
@@ -652,7 +651,6 @@ static struct platform_driver s3c_camif_driver = {
.id_table = s3c_camif_driver_ids,
.driver = {
.name = S3C_CAMIF_DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &s3c_camif_pm_ops,
}
};
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
index 6e0c9988a191..812fb3a7c4e3 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -96,10 +96,10 @@ void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
}
static const u32 src_pixfmt_map[8][2] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR },
- { V4L2_MBUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB },
- { V4L2_MBUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY },
- { V4L2_MBUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY },
+ { MEDIA_BUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR },
+ { MEDIA_BUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB },
+ { MEDIA_BUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY },
+ { MEDIA_BUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY },
};
/* Set camera input pixel format and resolution */
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index d79e214ce8ce..47ba8fbb0426 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -297,14 +297,8 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->driver, G2D_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(1, 0, 0);
- /*
- * This is only a mem-to-mem video device. The capture and output
- * device capability flags are left only for backward compatibility
- * and are scheduled for removal.
- */
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -812,7 +806,6 @@ static struct platform_driver g2d_pdrv = {
.id_table = g2d_driver_ids,
.driver = {
.name = G2D_NAME,
- .owner = THIS_MODULE,
.of_match_table = exynos_g2d_match,
},
};
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index fe2727413f3a..12f7452edce3 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1001,13 +1001,8 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
sizeof(cap->card));
}
cap->bus_info[0] = 0;
- /*
- * This is only a mem-to-mem video device. The capture and output
- * device capability flags are left only for backward compatibility
- * and are scheduled for removal.
- */
- cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M |
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -2768,7 +2763,6 @@ static struct platform_driver s5p_jpeg_driver = {
.remove = s5p_jpeg_remove,
.driver = {
.of_match_table = of_match_ptr(samsung_jpeg_match),
- .owner = THIS_MODULE,
.name = S5P_JPEG_M2M_NAME,
.pm = &s5p_jpeg_pm_ops,
},
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
index 51cb2dd0e13a..83e01f3466e9 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -71,6 +71,7 @@
#define S5P_FIMV_R2H_CMD_ENC_BUFFER_FUL_RET_V6 16
#define S5P_FIMV_R2H_CMD_ERR_RET_V6 32
+#define S5P_FIMV_MFC_BUS_RESET_CTRL 0x7110
#define S5P_FIMV_FW_VERSION_V6 0xf000
#define S5P_FIMV_INSTANCE_ID_V6 0xf008
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 363fd8c0a699..fbfdf03b9054 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -159,6 +159,10 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
}
clear_bit(0, &dev->hw_lock);
spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* De-init MFC */
+ s5p_mfc_deinit_hw(dev);
+
/* Double check if there is at least one instance running.
* If no instance is in memory than no firmware should be present */
if (dev->num_inst > 0) {
@@ -220,11 +224,14 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
size_t dec_y_addr;
unsigned int frame_type;
- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
+ /* Make sure we actually have a new frame before continuing. */
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
+ if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
+ return;
+ dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
/* Copy timestamp / timecode from decoded src to dst and set
- appropriate flags */
+ appropriate flags. */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
@@ -250,6 +257,11 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
dst_buf->b->v4l2_buf.flags |=
V4L2_BUF_FLAG_BFRAME;
break;
+ default:
+ /* Don't know how to handle
+ S5P_FIMV_DECODE_FRAME_OTHER_FRAME. */
+ mfc_debug(2, "Unexpected frame type: %d\n",
+ frame_type);
}
break;
}
@@ -334,8 +346,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
ctx->state = MFCINST_RES_CHANGE_INIT;
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
return;
@@ -407,8 +418,7 @@ leave_handle_frame:
clear_work_bit(ctx);
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
/* if suspending, wake up device and do not try_run again*/
if (test_bit(0, &dev->enter_suspend))
@@ -455,8 +465,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
break;
}
}
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
s5p_mfc_clock_off();
wake_up_dev(dev, reason, err);
@@ -510,8 +519,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
}
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
clear_work_bit(ctx);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
wake_up_ctx(ctx, reason, err);
@@ -549,16 +557,14 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
} else {
ctx->dpb_flush_flag = 0;
}
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
wake_up(&ctx->queue);
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
} else {
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
@@ -635,8 +641,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
mfc_err("post_frame_start() failed\n");
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
} else {
@@ -815,7 +820,7 @@ static int s5p_mfc_open(struct file *file)
ret = -ENOENT;
goto err_queue_init;
}
- q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
+ q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
if (ret) {
@@ -837,7 +842,7 @@ static int s5p_mfc_open(struct file *file)
ret = -ENOENT;
goto err_queue_init;
}
- q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
+ q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
ret = vb2_queue_init(q);
if (ret) {
@@ -1284,11 +1289,17 @@ static int s5p_mfc_suspend(struct device *dev)
m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
if (ret == 0) {
mfc_err("Waiting for hardware to finish timed out\n");
+ clear_bit(0, &m_dev->enter_suspend);
return -EIO;
}
}
- return s5p_mfc_sleep(m_dev);
+ ret = s5p_mfc_sleep(m_dev);
+ if (ret) {
+ clear_bit(0, &m_dev->enter_suspend);
+ clear_bit(0, &m_dev->hw_lock);
+ }
+ return ret;
}
static int s5p_mfc_resume(struct device *dev)
@@ -1505,7 +1516,6 @@ static struct platform_driver s5p_mfc_driver = {
.id_table = mfc_driver_ids,
.driver = {
.name = S5P_MFC_NAME,
- .owner = THIS_MODULE,
.pm = &s5p_mfc_pm_ops,
.of_match_table = exynos_mfc_match,
},
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 3e41ca1293ed..15f7663dd9f5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -237,8 +237,6 @@ struct s5p_mfc_variant {
/**
* struct s5p_mfc_priv_buf - represents internal used buffer
- * @alloc: allocation-specific context for each buffer
- * (videobuf2 allocator)
* @ofs: offset of each buffer, will be used for MFC
* @virt: kernel virtual address, only valid when the
* buffer accessed by driver
@@ -246,7 +244,6 @@ struct s5p_mfc_variant {
* @size: size of the buffer
*/
struct s5p_mfc_priv_buf {
- void *alloc;
unsigned long ofs;
void *virt;
dma_addr_t dma;
@@ -340,6 +337,7 @@ struct s5p_mfc_dev {
struct s5p_mfc_hw_cmds *mfc_cmds;
const struct s5p_mfc_regs *mfc_regs;
enum s5p_mfc_fw_ver fw_ver;
+ bool risc_on; /* indicates if RISC is on or off */
};
/**
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 0c885a8a0e9f..40d8a03a141d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -129,6 +129,25 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
return 0;
}
+static int s5p_mfc_bus_reset(struct s5p_mfc_dev *dev)
+{
+ unsigned int status;
+ unsigned long timeout;
+
+ /* Reset */
+ mfc_write(dev, 0x1, S5P_FIMV_MFC_BUS_RESET_CTRL);
+ timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+ /* Check bus status */
+ do {
+ if (time_after(jiffies, timeout)) {
+ mfc_err("Timeout while resetting MFC.\n");
+ return -EIO;
+ }
+ status = mfc_read(dev, S5P_FIMV_MFC_BUS_RESET_CTRL);
+ } while ((status & 0x2) == 0);
+ return 0;
+}
+
/* Reset the device */
int s5p_mfc_reset(struct s5p_mfc_dev *dev)
{
@@ -139,12 +158,6 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
mfc_debug_enter();
if (IS_MFCV6_PLUS(dev)) {
- /* Reset IP */
- /* except RISC, reset */
- mfc_write(dev, 0xFEE, S5P_FIMV_MFC_RESET_V6);
- /* reset release */
- mfc_write(dev, 0x0, S5P_FIMV_MFC_RESET_V6);
-
/* Zero Initialization of MFC registers */
mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD_V6);
mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD_V6);
@@ -153,8 +166,17 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
for (i = 0; i < S5P_FIMV_REG_CLEAR_COUNT_V6; i++)
mfc_write(dev, 0, S5P_FIMV_REG_CLEAR_BEGIN_V6 + (i*4));
- /* Reset */
- mfc_write(dev, 0, S5P_FIMV_RISC_ON_V6);
+ /* check bus reset control before reset */
+ if (dev->risc_on)
+ if (s5p_mfc_bus_reset(dev))
+ return -EIO;
+ /* Reset
+ * set RISC_ON to 0 during power_on & wake_up.
+ * V6 needs RISC_ON set to 0 during reset also.
+ */
+ if ((!dev->risc_on) || (!IS_MFCV7_PLUS(dev)))
+ mfc_write(dev, 0, S5P_FIMV_RISC_ON_V6);
+
mfc_write(dev, 0x1FFF, S5P_FIMV_MFC_RESET_V6);
mfc_write(dev, 0, S5P_FIMV_MFC_RESET_V6);
} else {
@@ -226,6 +248,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
/* 0. MFC reset */
mfc_debug(2, "MFC reset..\n");
s5p_mfc_clock_on();
+ dev->risc_on = 0;
ret = s5p_mfc_reset(dev);
if (ret) {
mfc_err("Failed to reset MFC - timeout\n");
@@ -238,8 +261,10 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
s5p_mfc_clear_cmds(dev);
/* 3. Release reset signal to the RISC */
s5p_mfc_clean_dev_int_flags(dev);
- if (IS_MFCV6_PLUS(dev))
+ if (IS_MFCV6_PLUS(dev)) {
+ dev->risc_on = 1;
mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
+ }
else
mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
mfc_debug(2, "Will now wait for completion of firmware transfer\n");
@@ -328,6 +353,58 @@ int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
return ret;
}
+static int s5p_mfc_v8_wait_wakeup(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ /* Release reset signal to the RISC */
+ dev->risc_on = 1;
+ mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
+
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_FW_STATUS_RET)) {
+ mfc_err("Failed to reset MFCV8\n");
+ return -EIO;
+ }
+ mfc_debug(2, "Write command to wakeup MFCV8\n");
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFCV8 - timeout\n");
+ return ret;
+ }
+
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
+ mfc_err("Failed to wakeup MFC\n");
+ return -EIO;
+ }
+ return ret;
+}
+
+static int s5p_mfc_wait_wakeup(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ /* Send MFC wakeup command */
+ ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ return ret;
+ }
+
+ /* Release reset signal to the RISC */
+ if (IS_MFCV6_PLUS(dev)) {
+ dev->risc_on = 1;
+ mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
+ } else {
+ mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+ }
+
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
+ mfc_err("Failed to wakeup MFC\n");
+ return -EIO;
+ }
+ return ret;
+}
+
int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
{
int ret;
@@ -336,9 +413,11 @@ int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
/* 0. MFC reset */
mfc_debug(2, "MFC reset..\n");
s5p_mfc_clock_on();
+ dev->risc_on = 0;
ret = s5p_mfc_reset(dev);
if (ret) {
mfc_err("Failed to reset MFC - timeout\n");
+ s5p_mfc_clock_off();
return ret;
}
mfc_debug(2, "Done MFC reset..\n");
@@ -347,23 +426,16 @@ int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
/* 2. Initialize registers of channel I/F */
s5p_mfc_clear_cmds(dev);
s5p_mfc_clean_dev_int_flags(dev);
- /* 3. Initialize firmware */
- ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev);
- if (ret) {
- mfc_err("Failed to send command to MFC - timeout\n");
- return ret;
- }
- /* 4. Release reset signal to the RISC */
- if (IS_MFCV6_PLUS(dev))
- mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
+ /* 3. Send MFC wakeup command and wait for completion*/
+ if (IS_MFCV8(dev))
+ ret = s5p_mfc_v8_wait_wakeup(dev);
else
- mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
- mfc_debug(2, "Ok, now will write a command to wakeup the system\n");
- if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
- mfc_err("Failed to load firmware\n");
- return -EIO;
- }
+ ret = s5p_mfc_wait_wakeup(dev);
+
s5p_mfc_clock_off();
+ if (ret)
+ return ret;
+
dev->int_cond = 0;
if (dev->int_err != 0 || dev->int_type !=
S5P_MFC_R2H_CMD_WAKEUP_RET) {
@@ -396,7 +468,6 @@ int s5p_mfc_open_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
}
set_work_bit_irqsave(ctx);
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
if (s5p_mfc_wait_for_done_ctx(ctx,
S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
@@ -422,7 +493,6 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
{
ctx->state = MFCINST_RETURN_INST;
set_work_bit_irqsave(ctx);
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
/* Wait until instance is returned or timeout occurred */
if (s5p_mfc_wait_for_done_ctx(ctx,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index a98fe023deaf..c6c3452ccca1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -269,15 +269,13 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(1, 0, 0);
/*
* This is only a mem-to-mem video device. The capture and output
* device capability flags are left only for backward compatibility
* and are scheduled for removal.
*/
- cap->capabilities = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -334,7 +332,6 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
MFCINST_RES_CHANGE_END)) {
/* If the MFC is parsing the header,
* so wait until it is finished */
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_SEQ_DONE_RET,
0);
}
@@ -740,12 +737,12 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
ctx->state < MFCINST_ABORT) {
ctrl->val = ctx->pb_count;
break;
- } else if (ctx->state != MFCINST_INIT) {
+ } else if (ctx->state != MFCINST_INIT &&
+ ctx->state != MFCINST_RES_CHANGE_END) {
v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
return -EINVAL;
}
/* Should wait for the header to be parsed */
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_wait_for_done_ctx(ctx,
S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
if (ctx->state >= MFCINST_HEAD_PARSED &&
@@ -1057,7 +1054,6 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) {
ctx->state = MFCINST_FLUSH;
set_work_bit_irqsave(ctx);
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
if (s5p_mfc_wait_for_done_ctx(ctx,
S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0))
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index a904a1c7bb21..bd64f1dcbdb5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -32,7 +32,7 @@
#include "s5p_mfc_intr.h"
#include "s5p_mfc_opr.h"
-#define DEF_SRC_FMT_ENC V4L2_PIX_FMT_NV12MT
+#define DEF_SRC_FMT_ENC V4L2_PIX_FMT_NV12M
#define DEF_DST_FMT_ENC V4L2_PIX_FMT_H264
static struct s5p_mfc_fmt formats[] = {
@@ -67,8 +67,7 @@ static struct s5p_mfc_fmt formats[] = {
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
.num_planes = 2,
- .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT |
- MFC_V8_BIT,
+ .versions = MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT,
},
{
.name = "H264 Encoded Stream",
@@ -690,6 +689,16 @@ static struct mfc_control controls[] = {
.step = 1,
.default_value = 0,
},
+ {
+ .id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Minimum number of output bufs",
+ .minimum = 1,
+ .maximum = 32,
+ .step = 1,
+ .default_value = 1,
+ .is_volatile = 1,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(controls)
@@ -938,15 +947,13 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(1, 0, 0);
/*
* This is only a mem-to-mem video device. The capture and output
* device capability flags are left only for backward compatibility
* and are scheduled for removal.
*/
- cap->capabilities = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1137,6 +1144,11 @@ static int vidioc_reqbufs(struct file *file, void *priv,
(reqbufs->memory != V4L2_MEMORY_USERPTR))
return -EINVAL;
if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (reqbufs->count == 0) {
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ ctx->capture_state = QUEUE_FREE;
+ return ret;
+ }
if (ctx->capture_state != QUEUE_FREE) {
mfc_err("invalid capture state: %d\n",
ctx->capture_state);
@@ -1158,6 +1170,14 @@ static int vidioc_reqbufs(struct file *file, void *priv,
return -ENOMEM;
}
} else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers,
+ ctx);
+ ctx->output_state = QUEUE_FREE;
+ return ret;
+ }
if (ctx->output_state != QUEUE_FREE) {
mfc_err("invalid output state: %d\n",
ctx->output_state);
@@ -1624,8 +1644,39 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
+static int s5p_mfc_enc_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->pb_count;
+ break;
+ } else if (ctx->state != MFCINST_INIT) {
+ v4l2_err(&dev->v4l2_dev, "Encoding not initialised\n");
+ return -EINVAL;
+ }
+ /* Should wait for the header to be produced */
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->pb_count;
+ } else {
+ v4l2_err(&dev->v4l2_dev, "Encoding not initialised\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ return 0;
+}
+
static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
.s_ctrl = s5p_mfc_enc_s_ctrl,
+ .g_volatile_ctrl = s5p_mfc_enc_g_v_ctrl,
};
static int vidioc_s_parm(struct file *file, void *priv,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 7cf07963187d..0c4fcf2dfd09 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1178,7 +1178,6 @@ static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_decode_one_frame_v5(ctx, MFC_DEC_RES_CHANGE);
}
@@ -1192,7 +1191,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
last_frame = MFC_DEC_LAST_FRAME;
s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_decode_one_frame_v5(ctx, last_frame);
return 0;
}
@@ -1212,7 +1210,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
last_frame = MFC_DEC_LAST_FRAME;
mfc_debug(2, "Setting ctx->state to FINISHING\n");
@@ -1273,7 +1270,6 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
mfc_debug(2, "encoding buffer with index=%d state=%d\n",
src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
s5p_mfc_encode_one_frame_v5(ctx);
@@ -1297,7 +1293,6 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
0, temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_init_decode_v5(ctx);
}
@@ -1317,7 +1312,6 @@ static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_init_encode_v5(ctx);
}
@@ -1352,7 +1346,6 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
0, temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_set_dec_frame_buffer_v5(ctx);
if (ret) {
mfc_err("Failed to alloc frame mem\n");
@@ -1396,6 +1389,8 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
* Now obtaining frames from MFC buffer
*/
s5p_mfc_clock_on();
+ s5p_mfc_clean_ctx_int_flags(ctx);
+
if (ctx->type == MFCINST_DECODER) {
s5p_mfc_set_dec_desc_buffer(ctx);
switch (ctx->state) {
@@ -1406,12 +1401,10 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
break;
case MFCINST_INIT:
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
ctx);
break;
case MFCINST_RETURN_INST:
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
ctx);
break;
@@ -1444,12 +1437,10 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
ret = s5p_mfc_run_enc_frame(ctx);
break;
case MFCINST_INIT:
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
ctx);
break;
case MFCINST_RETURN_INST:
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
ctx);
break;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 8798b14bacce..9aea179943ce 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1394,7 +1394,6 @@ static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
if (flush) {
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
writel(ctx->inst_no, mfc_regs->instance_id);
s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_H2R_CMD_FLUSH_V6, NULL);
@@ -1532,27 +1531,10 @@ static inline int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
static inline void s5p_mfc_run_dec_last_frames(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_buf *temp_vb;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->irqlock, flags);
-
- /* Frames are being decoded */
- if (list_empty(&ctx->src_queue)) {
- mfc_debug(2, "No src buffers.\n");
- spin_unlock_irqrestore(&dev->irqlock, flags);
- return;
- }
- /* Get the next source buffer */
- temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
- temp_vb->flags |= MFC_BUF_FLAG_USED;
- s5p_mfc_set_dec_stream_buffer_v6(ctx,
- vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, 0);
- spin_unlock_irqrestore(&dev->irqlock, flags);
+ s5p_mfc_set_dec_stream_buffer_v6(ctx, 0, 0, 0);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
- s5p_mfc_decode_one_frame_v6(ctx, 1);
+ s5p_mfc_decode_one_frame_v6(ctx, MFC_DEC_LAST_FRAME);
}
static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
@@ -1588,7 +1570,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
last_frame = 1;
mfc_debug(2, "Setting ctx->state to FINISHING\n");
@@ -1645,7 +1626,6 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_encode_one_frame_v6(ctx);
return 0;
@@ -1667,7 +1647,6 @@ static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_init_decode_v6(ctx);
}
@@ -1687,7 +1666,6 @@ static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_init_encode_v6(ctx);
}
@@ -1707,7 +1685,6 @@ static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
}
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_set_dec_frame_buffer_v6(ctx);
if (ret) {
mfc_err("Failed to alloc frame mem.\n");
@@ -1722,7 +1699,6 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
int ret;
dev->curr_ctx = ctx->num;
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
if (ret) {
mfc_err("Failed to alloc frame mem.\n");
@@ -1771,6 +1747,8 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
* Now obtaining frames from MFC buffer */
s5p_mfc_clock_on();
+ s5p_mfc_clean_ctx_int_flags(ctx);
+
if (ctx->type == MFCINST_DECODER) {
switch (ctx->state) {
case MFCINST_FINISHING:
@@ -1780,12 +1758,10 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
ret = s5p_mfc_run_dec_frame(ctx);
break;
case MFCINST_INIT:
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
ctx);
break;
case MFCINST_RETURN_INST:
- s5p_mfc_clean_ctx_int_flags(ctx);
ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
ctx);
break;
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
index 37c8bd694c5f..0e74aabf5f9a 100644
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -660,7 +660,7 @@ static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
memset(fmt, 0, sizeof(*fmt));
fmt->width = t->hact.end - t->hact.beg;
fmt->height = t->vact[0].end - t->vact[0].beg;
- fmt->code = V4L2_MBUS_FMT_FIXED; /* means RGB888 */
+ fmt->code = MEDIA_BUS_FMT_FIXED; /* means RGB888 */
fmt->colorspace = V4L2_COLORSPACE_SRGB;
if (t->interlaced) {
fmt->field = V4L2_FIELD_INTERLACED;
@@ -1046,7 +1046,6 @@ static struct platform_driver hdmi_driver __refdata = {
.id_table = hdmi_driver_types,
.driver = {
.name = "s5p-hdmi",
- .owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
}
};
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index bc08b5f28e44..2a9501d7e7c8 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -487,7 +487,6 @@ static struct platform_driver mxr_driver __refdata = {
.remove = mxr_remove,
.driver = {
.name = MXR_DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &mxr_pm_ops,
}
};
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index 72cf892dd008..3621af91d460 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -170,7 +170,7 @@ static int sdo_g_mbus_fmt(struct v4l2_subdev *sd,
/* all modes are 720 pixels wide */
fmt->width = 720;
fmt->height = sdev->fmt->height;
- fmt->code = V4L2_MBUS_FMT_FIXED;
+ fmt->code = MEDIA_BUS_FMT_FIXED;
fmt->field = V4L2_FIELD_INTERLACED;
fmt->colorspace = V4L2_COLORSPACE_JPEG;
return 0;
@@ -482,7 +482,6 @@ static struct platform_driver sdo_driver __refdata = {
.remove = sdo_remove,
.driver = {
.name = "s5p-sdo",
- .owner = THIS_MODULE,
.pm = &sdo_pm_ops,
}
};
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index be3b3bc71a0f..aaa1f6f25a29 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1235,7 +1235,6 @@ static struct platform_driver __refdata sh_veu_pdrv = {
.remove = sh_veu_remove,
.driver = {
.name = "sh_veu",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index e5f1d4c14f2c..261f1195b49f 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -396,7 +396,8 @@ static int sh_vou_querycap(struct file *file, void *priv,
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -680,7 +681,7 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
struct sh_vou_geometry geo;
struct v4l2_mbus_framefmt mbfmt = {
/* Revisit: is this the correct code? */
- .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
.field = V4L2_FIELD_INTERLACED,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
};
@@ -733,7 +734,7 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
/* Sanity checks */
if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
(unsigned)mbfmt.height > img_height_max ||
- mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8)
+ mbfmt.code != MEDIA_BUS_FMT_YUYV8_2X8)
return -EIO;
if (mbfmt.width != geo.output.width ||
@@ -943,7 +944,7 @@ static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
struct sh_vou_geometry geo;
struct v4l2_mbus_framefmt mbfmt = {
/* Revisit: is this the correct code? */
- .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
.field = V4L2_FIELD_INTERLACED,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
};
@@ -994,7 +995,7 @@ static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
/* Sanity checks */
if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
(unsigned)mbfmt.height > img_height_max ||
- mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8)
+ mbfmt.code != MEDIA_BUS_FMT_YUYV8_2X8)
return -EIO;
geo.output.width = mbfmt.width;
@@ -1449,7 +1450,6 @@ static struct platform_driver __refdata sh_vou = {
.remove = sh_vou_remove,
.driver = {
.name = "sh-vou",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index c5291b001057..8efe40337608 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -105,25 +105,25 @@ static u32 isi_readl(struct atmel_isi *isi, u32 reg)
}
static int configure_geometry(struct atmel_isi *isi, u32 width,
- u32 height, enum v4l2_mbus_pixelcode code)
+ u32 height, u32 code)
{
u32 cfg2, cr;
switch (code) {
/* YUV, including grey */
- case V4L2_MBUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
cr = ISI_CFG2_GRAYSCALE;
break;
- case V4L2_MBUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_3;
break;
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_2;
break;
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
cr = ISI_CFG2_YCC_SWAP_MODE_1;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
cr = ISI_CFG2_YCC_SWAP_DEFAULT;
break;
/* RGB, TODO */
@@ -645,7 +645,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int formats = 0, ret;
/* sensor format */
- enum v4l2_mbus_pixelcode code;
+ u32 code;
/* soc camera host format */
const struct soc_mbus_pixelfmt *fmt;
@@ -670,10 +670,10 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
}
switch (code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
- case V4L2_MBUS_FMT_VYUY8_2X8:
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
formats++;
if (xlate) {
xlate->host_fmt = &isi_camera_formats[0];
@@ -1068,7 +1068,6 @@ static struct platform_driver atmel_isi_driver = {
.remove = atmel_isi_remove,
.driver = {
.name = "atmel_isi",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_isi_of_match),
},
};
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 2347612a4cc1..ce72bd26a6ac 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -211,7 +211,7 @@ struct emma_prp_resize {
/* prp configuration for a client-host fmt pair */
struct mx2_fmt_cfg {
- enum v4l2_mbus_pixelcode in_fmt;
+ u32 in_fmt;
u32 out_fmt;
struct mx2_prp_cfg cfg;
};
@@ -309,7 +309,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
}
},
{
- .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8,
+ .in_fmt = MEDIA_BUS_FMT_UYVY8_2X8,
.out_fmt = V4L2_PIX_FMT_YUYV,
.cfg = {
.channel = 1,
@@ -323,7 +323,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
}
},
{
- .in_fmt = V4L2_MBUS_FMT_YUYV8_2X8,
+ .in_fmt = MEDIA_BUS_FMT_YUYV8_2X8,
.out_fmt = V4L2_PIX_FMT_YUYV,
.cfg = {
.channel = 1,
@@ -337,7 +337,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
}
},
{
- .in_fmt = V4L2_MBUS_FMT_YUYV8_2X8,
+ .in_fmt = MEDIA_BUS_FMT_YUYV8_2X8,
.out_fmt = V4L2_PIX_FMT_YUV420,
.cfg = {
.channel = 2,
@@ -351,7 +351,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
}
},
{
- .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8,
+ .in_fmt = MEDIA_BUS_FMT_UYVY8_2X8,
.out_fmt = V4L2_PIX_FMT_YUV420,
.cfg = {
.channel = 2,
@@ -366,9 +366,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
},
};
-static struct mx2_fmt_cfg *mx27_emma_prp_get_format(
- enum v4l2_mbus_pixelcode in_fmt,
- u32 out_fmt)
+static struct mx2_fmt_cfg *mx27_emma_prp_get_format(u32 in_fmt, u32 out_fmt)
{
int i;
@@ -945,7 +943,7 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_mbus_pixelfmt *fmt;
struct device *dev = icd->parent;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
int ret, formats = 0;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -959,8 +957,8 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
return 0;
}
- if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
- code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ if (code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ code == MEDIA_BUS_FMT_UYVY8_2X8) {
formats++;
if (xlate) {
/*
@@ -968,7 +966,7 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
* soc_mediabus.c
*/
xlate->host_fmt =
- soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_1_5X8);
+ soc_mbus_get_fmtdesc(MEDIA_BUS_FMT_YUYV8_1_5X8);
xlate->code = code;
dev_dbg(dev, "Providing host format %s for sensor code %d\n",
xlate->host_fmt->name, code);
@@ -976,11 +974,11 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
}
}
- if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ if (code == MEDIA_BUS_FMT_UYVY8_2X8) {
formats++;
if (xlate) {
xlate->host_fmt =
- soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
+ soc_mbus_get_fmtdesc(MEDIA_BUS_FMT_YUYV8_2X8);
xlate->code = code;
dev_dbg(dev, "Providing host format %s for sensor code %d\n",
xlate->host_fmt->name, code);
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 7696a873510d..a60c3bb0e4cc 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -656,7 +656,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->parent;
int formats = 0, ret;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
const struct soc_mbus_pixelfmt *fmt;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -677,7 +677,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
return 0;
switch (code) {
- case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
formats++;
if (xlate) {
xlate->host_fmt = &mx3_camera_formats[0];
@@ -687,7 +687,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
mx3_camera_formats[0].name, code);
}
break;
- case V4L2_MBUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_Y10_1X10:
formats++;
if (xlate) {
xlate->host_fmt = &mx3_camera_formats[1];
@@ -1253,7 +1253,6 @@ static int mx3_camera_remove(struct platform_device *pdev)
static struct platform_driver mx3_camera_driver = {
.driver = {
.name = MX3_CAM_DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = mx3_camera_probe,
.remove = mx3_camera_remove,
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 74ce8b6b79fa..e6b93281f246 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -140,7 +140,7 @@
/* buffer for one video frame */
struct omap1_cam_buf {
struct videobuf_buffer vb;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
int inwork;
struct scatterlist *sgbuf;
int sgcount;
@@ -980,7 +980,7 @@ static void omap1_cam_clock_stop(struct soc_camera_host *ici)
/* Duplicate standard formats based on host capability of byte swapping */
static const struct soc_mbus_lookup omap1_cam_formats[] = {
{
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_YUYV,
.name = "YUYV",
@@ -990,7 +990,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_YVYU,
.name = "YVYU",
@@ -1000,7 +1000,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_UYVY,
.name = "UYVY",
@@ -1010,7 +1010,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_VYUY,
.name = "VYUY",
@@ -1020,7 +1020,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB555,
.name = "RGB555",
@@ -1030,7 +1030,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB555X,
.name = "RGB555X",
@@ -1040,7 +1040,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB565,
.name = "RGB565",
@@ -1050,7 +1050,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB565X,
.name = "RGB565X",
@@ -1068,7 +1068,7 @@ static int omap1_cam_get_formats(struct soc_camera_device *icd,
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->parent;
int formats = 0, ret;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
const struct soc_mbus_pixelfmt *fmt;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -1088,14 +1088,14 @@ static int omap1_cam_get_formats(struct soc_camera_device *icd,
return 0;
switch (code) {
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_YVYU8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
- case V4L2_MBUS_FMT_VYUY8_2X8:
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
formats++;
if (xlate) {
xlate->host_fmt = soc_mbus_find_fmtdesc(code,
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 66178fc9f9eb..951226af0eba 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -187,7 +187,7 @@ struct pxa_cam_dma {
struct pxa_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
/* our descriptor lists for Y, U and V channels */
struct pxa_cam_dma dmas[3];
int inwork;
@@ -1253,7 +1253,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
struct device *dev = icd->parent;
int formats = 0, ret;
struct pxa_cam *cam;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
const struct soc_mbus_pixelfmt *fmt;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -1283,7 +1283,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
}
switch (code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
formats++;
if (xlate) {
xlate->host_fmt = &pxa_camera_formats[0];
@@ -1292,11 +1292,11 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
dev_dbg(dev, "Providing format %s using code %d\n",
pxa_camera_formats[0].name, code);
}
- case V4L2_MBUS_FMT_VYUY8_2X8:
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_YVYU8_2X8:
- case V4L2_MBUS_FMT_RGB565_2X8_LE:
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
if (xlate)
dev_dbg(dev, "Providing format %s packed\n",
fmt->name);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 20defcb8b31b..0c1f55648106 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -64,6 +64,30 @@
#define VNDMR_REG 0x58 /* Video n Data Mode Register */
#define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */
#define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */
+#define VNC1A_REG 0x80 /* Video n Coefficient Set C1A Register */
+#define VNC1B_REG 0x84 /* Video n Coefficient Set C1B Register */
+#define VNC1C_REG 0x88 /* Video n Coefficient Set C1C Register */
+#define VNC2A_REG 0x90 /* Video n Coefficient Set C2A Register */
+#define VNC2B_REG 0x94 /* Video n Coefficient Set C2B Register */
+#define VNC2C_REG 0x98 /* Video n Coefficient Set C2C Register */
+#define VNC3A_REG 0xA0 /* Video n Coefficient Set C3A Register */
+#define VNC3B_REG 0xA4 /* Video n Coefficient Set C3B Register */
+#define VNC3C_REG 0xA8 /* Video n Coefficient Set C3C Register */
+#define VNC4A_REG 0xB0 /* Video n Coefficient Set C4A Register */
+#define VNC4B_REG 0xB4 /* Video n Coefficient Set C4B Register */
+#define VNC4C_REG 0xB8 /* Video n Coefficient Set C4C Register */
+#define VNC5A_REG 0xC0 /* Video n Coefficient Set C5A Register */
+#define VNC5B_REG 0xC4 /* Video n Coefficient Set C5B Register */
+#define VNC5C_REG 0xC8 /* Video n Coefficient Set C5C Register */
+#define VNC6A_REG 0xD0 /* Video n Coefficient Set C6A Register */
+#define VNC6B_REG 0xD4 /* Video n Coefficient Set C6B Register */
+#define VNC6C_REG 0xD8 /* Video n Coefficient Set C6C Register */
+#define VNC7A_REG 0xE0 /* Video n Coefficient Set C7A Register */
+#define VNC7B_REG 0xE4 /* Video n Coefficient Set C7B Register */
+#define VNC7C_REG 0xE8 /* Video n Coefficient Set C7C Register */
+#define VNC8A_REG 0xF0 /* Video n Coefficient Set C8A Register */
+#define VNC8B_REG 0xF4 /* Video n Coefficient Set C8B Register */
+#define VNC8C_REG 0xF8 /* Video n Coefficient Set C8C Register */
/* Register bit fields for R-Car VIN */
/* Video n Main Control Register bits */
@@ -106,6 +130,7 @@
#define VNDMR2_VPS (1 << 30)
#define VNDMR2_HPS (1 << 29)
#define VNDMR2_FTEV (1 << 17)
+#define VNDMR2_VLV(n) ((n & 0xf) << 12)
#define VIN_MAX_WIDTH 2048
#define VIN_MAX_HEIGHT 2048
@@ -117,6 +142,324 @@ enum chip_id {
RCAR_E1,
};
+struct vin_coeff {
+ unsigned short xs_value;
+ u32 coeff_set[24];
+};
+
+static const struct vin_coeff vin_coeff_set[] = {
+ { 0x0000, {
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000 },
+ },
+ { 0x1000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x000003f8, 0x00000403, 0x3de0d9f0,
+ 0x001fffed, 0x00000804, 0x3cc1f9c3,
+ 0x001003de, 0x00000c01, 0x3cb34d7f,
+ 0x002003d2, 0x00000c00, 0x3d24a92d,
+ 0x00200bca, 0x00000bff, 0x3df600d2,
+ 0x002013cc, 0x000007ff, 0x3ed70c7e,
+ 0x00100fde, 0x00000000, 0x3f87c036 },
+ },
+ { 0x1200, {
+ 0x002ffff1, 0x002ffff1, 0x02a0a9c8,
+ 0x002003e7, 0x001ffffa, 0x000185bc,
+ 0x002007dc, 0x000003ff, 0x3e52859c,
+ 0x00200bd4, 0x00000002, 0x3d53996b,
+ 0x00100fd0, 0x00000403, 0x3d04ad2d,
+ 0x00000bd5, 0x00000403, 0x3d35ace7,
+ 0x3ff003e4, 0x00000801, 0x3dc674a1,
+ 0x3fffe800, 0x00000800, 0x3e76f461 },
+ },
+ { 0x1400, {
+ 0x00100be3, 0x00100be3, 0x04d1359a,
+ 0x00000fdb, 0x002003ed, 0x0211fd93,
+ 0x00000fd6, 0x002003f4, 0x0002d97b,
+ 0x000007d6, 0x002ffffb, 0x3e93b956,
+ 0x3ff003da, 0x001003ff, 0x3db49926,
+ 0x3fffefe9, 0x00100001, 0x3d655cee,
+ 0x3fffd400, 0x00000003, 0x3d65f4b6,
+ 0x000fb421, 0x00000402, 0x3dc6547e },
+ },
+ { 0x1600, {
+ 0x00000bdd, 0x00000bdd, 0x06519578,
+ 0x3ff007da, 0x00000be3, 0x03c24973,
+ 0x3ff003d9, 0x00000be9, 0x01b30d5f,
+ 0x3ffff7df, 0x001003f1, 0x0003c542,
+ 0x000fdfec, 0x001003f7, 0x3ec4711d,
+ 0x000fc400, 0x002ffffd, 0x3df504f1,
+ 0x001fa81a, 0x002ffc00, 0x3d957cc2,
+ 0x002f8c3c, 0x00100000, 0x3db5c891 },
+ },
+ { 0x1800, {
+ 0x3ff003dc, 0x3ff003dc, 0x0791e558,
+ 0x000ff7dd, 0x3ff007de, 0x05328554,
+ 0x000fe7e3, 0x3ff00be2, 0x03232546,
+ 0x000fd7ee, 0x000007e9, 0x0143bd30,
+ 0x001fb800, 0x000007ee, 0x00044511,
+ 0x002fa015, 0x000007f4, 0x3ef4bcee,
+ 0x002f8832, 0x001003f9, 0x3e4514c7,
+ 0x001f7853, 0x001003fd, 0x3de54c9f },
+ },
+ { 0x1a00, {
+ 0x000fefe0, 0x000fefe0, 0x08721d3c,
+ 0x001fdbe7, 0x000ffbde, 0x0652a139,
+ 0x001fcbf0, 0x000003df, 0x0463292e,
+ 0x002fb3ff, 0x3ff007e3, 0x0293a91d,
+ 0x002f9c12, 0x3ff00be7, 0x01241905,
+ 0x001f8c29, 0x000007ed, 0x3fe470eb,
+ 0x000f7c46, 0x000007f2, 0x3f04b8ca,
+ 0x3fef7865, 0x000007f6, 0x3e74e4a8 },
+ },
+ { 0x1c00, {
+ 0x001fd3e9, 0x001fd3e9, 0x08f23d26,
+ 0x002fbff3, 0x001fe3e4, 0x0712ad23,
+ 0x002fa800, 0x000ff3e0, 0x05631d1b,
+ 0x001f9810, 0x000ffbe1, 0x03b3890d,
+ 0x000f8c23, 0x000003e3, 0x0233e8fa,
+ 0x3fef843b, 0x000003e7, 0x00f430e4,
+ 0x3fbf8456, 0x3ff00bea, 0x00046cc8,
+ 0x3f8f8c72, 0x3ff00bef, 0x3f3490ac },
+ },
+ { 0x1e00, {
+ 0x001fbbf4, 0x001fbbf4, 0x09425112,
+ 0x001fa800, 0x002fc7ed, 0x0792b110,
+ 0x000f980e, 0x001fdbe6, 0x0613110a,
+ 0x3fff8c20, 0x001fe7e3, 0x04a368fd,
+ 0x3fcf8c33, 0x000ff7e2, 0x0343b8ed,
+ 0x3f9f8c4a, 0x000fffe3, 0x0203f8da,
+ 0x3f5f9c61, 0x000003e6, 0x00e428c5,
+ 0x3f1fb07b, 0x000003eb, 0x3fe440af },
+ },
+ { 0x2000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x3fff980c, 0x001fb7f5, 0x0812b0ff,
+ 0x3fdf901c, 0x001fc7ed, 0x06b2fcfa,
+ 0x3faf902d, 0x001fd3e8, 0x055348f1,
+ 0x3f7f983f, 0x001fe3e5, 0x04038ce3,
+ 0x3f3fa454, 0x001fefe3, 0x02e3c8d1,
+ 0x3f0fb86a, 0x001ff7e4, 0x01c3e8c0,
+ 0x3ecfd880, 0x000fffe6, 0x00c404ac },
+ },
+ { 0x2200, {
+ 0x3fdf9c0b, 0x3fdf9c0b, 0x09725cf4,
+ 0x3fbf9818, 0x3fffa400, 0x0842a8f1,
+ 0x3f8f9827, 0x000fb3f7, 0x0702f0ec,
+ 0x3f5fa037, 0x000fc3ef, 0x05d330e4,
+ 0x3f2fac49, 0x001fcfea, 0x04a364d9,
+ 0x3effc05c, 0x001fdbe7, 0x038394ca,
+ 0x3ecfdc6f, 0x001fe7e6, 0x0273b0bb,
+ 0x3ea00083, 0x001fefe6, 0x0183c0a9 },
+ },
+ { 0x2400, {
+ 0x3f9fa014, 0x3f9fa014, 0x098260e6,
+ 0x3f7f9c23, 0x3fcf9c0a, 0x08629ce5,
+ 0x3f4fa431, 0x3fefa400, 0x0742d8e1,
+ 0x3f1fb440, 0x3fffb3f8, 0x062310d9,
+ 0x3eefc850, 0x000fbbf2, 0x050340d0,
+ 0x3ecfe062, 0x000fcbec, 0x041364c2,
+ 0x3ea00073, 0x001fd3ea, 0x03037cb5,
+ 0x3e902086, 0x001fdfe8, 0x022388a5 },
+ },
+ { 0x2600, {
+ 0x3f5fa81e, 0x3f5fa81e, 0x096258da,
+ 0x3f3fac2b, 0x3f8fa412, 0x088290d8,
+ 0x3f0fbc38, 0x3fafa408, 0x0772c8d5,
+ 0x3eefcc47, 0x3fcfa800, 0x0672f4ce,
+ 0x3ecfe456, 0x3fefaffa, 0x05531cc6,
+ 0x3eb00066, 0x3fffbbf3, 0x047334bb,
+ 0x3ea01c77, 0x000fc7ee, 0x039348ae,
+ 0x3ea04486, 0x000fd3eb, 0x02b350a1 },
+ },
+ { 0x2800, {
+ 0x3f2fb426, 0x3f2fb426, 0x094250ce,
+ 0x3f0fc032, 0x3f4fac1b, 0x086284cd,
+ 0x3eefd040, 0x3f7fa811, 0x0782acc9,
+ 0x3ecfe84c, 0x3f9fa807, 0x06a2d8c4,
+ 0x3eb0005b, 0x3fbfac00, 0x05b2f4bc,
+ 0x3eb0186a, 0x3fdfb3fa, 0x04c308b4,
+ 0x3eb04077, 0x3fefbbf4, 0x03f31ca8,
+ 0x3ec06884, 0x000fbff2, 0x03031c9e },
+ },
+ { 0x2a00, {
+ 0x3f0fc42d, 0x3f0fc42d, 0x090240c4,
+ 0x3eefd439, 0x3f2fb822, 0x08526cc2,
+ 0x3edfe845, 0x3f4fb018, 0x078294bf,
+ 0x3ec00051, 0x3f6fac0f, 0x06b2b4bb,
+ 0x3ec0185f, 0x3f8fac07, 0x05e2ccb4,
+ 0x3ec0386b, 0x3fafac00, 0x0502e8ac,
+ 0x3ed05c77, 0x3fcfb3fb, 0x0432f0a3,
+ 0x3ef08482, 0x3fdfbbf6, 0x0372f898 },
+ },
+ { 0x2c00, {
+ 0x3eefdc31, 0x3eefdc31, 0x08e238b8,
+ 0x3edfec3d, 0x3f0fc828, 0x082258b9,
+ 0x3ed00049, 0x3f1fc01e, 0x077278b6,
+ 0x3ed01455, 0x3f3fb815, 0x06c294b2,
+ 0x3ed03460, 0x3f5fb40d, 0x0602acac,
+ 0x3ef0506c, 0x3f7fb006, 0x0542c0a4,
+ 0x3f107476, 0x3f9fb400, 0x0472c89d,
+ 0x3f309c80, 0x3fbfb7fc, 0x03b2cc94 },
+ },
+ { 0x2e00, {
+ 0x3eefec37, 0x3eefec37, 0x088220b0,
+ 0x3ee00041, 0x3effdc2d, 0x07f244ae,
+ 0x3ee0144c, 0x3f0fd023, 0x07625cad,
+ 0x3ef02c57, 0x3f1fc81a, 0x06c274a9,
+ 0x3f004861, 0x3f3fbc13, 0x060288a6,
+ 0x3f20686b, 0x3f5fb80c, 0x05529c9e,
+ 0x3f408c74, 0x3f6fb805, 0x04b2ac96,
+ 0x3f80ac7e, 0x3f8fb800, 0x0402ac8e },
+ },
+ { 0x3000, {
+ 0x3ef0003a, 0x3ef0003a, 0x084210a6,
+ 0x3ef01045, 0x3effec32, 0x07b228a7,
+ 0x3f00284e, 0x3f0fdc29, 0x073244a4,
+ 0x3f104058, 0x3f0fd420, 0x06a258a2,
+ 0x3f305c62, 0x3f2fc818, 0x0612689d,
+ 0x3f508069, 0x3f3fc011, 0x05728496,
+ 0x3f80a072, 0x3f4fc00a, 0x04d28c90,
+ 0x3fc0c07b, 0x3f6fbc04, 0x04429088 },
+ },
+ { 0x3200, {
+ 0x3f00103e, 0x3f00103e, 0x07f1fc9e,
+ 0x3f102447, 0x3f000035, 0x0782149d,
+ 0x3f203c4f, 0x3f0ff02c, 0x07122c9c,
+ 0x3f405458, 0x3f0fe424, 0x06924099,
+ 0x3f607061, 0x3f1fd41d, 0x06024c97,
+ 0x3f909068, 0x3f2fcc16, 0x05726490,
+ 0x3fc0b070, 0x3f3fc80f, 0x04f26c8a,
+ 0x0000d077, 0x3f4fc409, 0x04627484 },
+ },
+ { 0x3400, {
+ 0x3f202040, 0x3f202040, 0x07a1e898,
+ 0x3f303449, 0x3f100c38, 0x0741fc98,
+ 0x3f504c50, 0x3f10002f, 0x06e21495,
+ 0x3f706459, 0x3f1ff028, 0x06722492,
+ 0x3fa08060, 0x3f1fe421, 0x05f2348f,
+ 0x3fd09c67, 0x3f1fdc19, 0x05824c89,
+ 0x0000bc6e, 0x3f2fd014, 0x04f25086,
+ 0x0040dc74, 0x3f3fcc0d, 0x04825c7f },
+ },
+ { 0x3600, {
+ 0x3f403042, 0x3f403042, 0x0761d890,
+ 0x3f504848, 0x3f301c3b, 0x0701f090,
+ 0x3f805c50, 0x3f200c33, 0x06a2008f,
+ 0x3fa07458, 0x3f10002b, 0x06520c8d,
+ 0x3fd0905e, 0x3f1ff424, 0x05e22089,
+ 0x0000ac65, 0x3f1fe81d, 0x05823483,
+ 0x0030cc6a, 0x3f2fdc18, 0x04f23c81,
+ 0x0080e871, 0x3f2fd412, 0x0482407c },
+ },
+ { 0x3800, {
+ 0x3f604043, 0x3f604043, 0x0721c88a,
+ 0x3f80544a, 0x3f502c3c, 0x06d1d88a,
+ 0x3fb06851, 0x3f301c35, 0x0681e889,
+ 0x3fd08456, 0x3f30082f, 0x0611fc88,
+ 0x00009c5d, 0x3f200027, 0x05d20884,
+ 0x0030b863, 0x3f2ff421, 0x05621880,
+ 0x0070d468, 0x3f2fe81b, 0x0502247c,
+ 0x00c0ec6f, 0x3f2fe015, 0x04a22877 },
+ },
+ { 0x3a00, {
+ 0x3f904c44, 0x3f904c44, 0x06e1b884,
+ 0x3fb0604a, 0x3f70383e, 0x0691c885,
+ 0x3fe07451, 0x3f502c36, 0x0661d483,
+ 0x00009055, 0x3f401831, 0x0601ec81,
+ 0x0030a85b, 0x3f300c2a, 0x05b1f480,
+ 0x0070c061, 0x3f300024, 0x0562047a,
+ 0x00b0d867, 0x3f3ff41e, 0x05020c77,
+ 0x00f0f46b, 0x3f2fec19, 0x04a21474 },
+ },
+ { 0x3c00, {
+ 0x3fb05c43, 0x3fb05c43, 0x06c1b07e,
+ 0x3fe06c4b, 0x3f902c3f, 0x0681c081,
+ 0x0000844f, 0x3f703838, 0x0631cc7d,
+ 0x00309855, 0x3f602433, 0x05d1d47e,
+ 0x0060b459, 0x3f50142e, 0x0581e47b,
+ 0x00a0c85f, 0x3f400828, 0x0531f078,
+ 0x00e0e064, 0x3f300021, 0x0501fc73,
+ 0x00b0fc6a, 0x3f3ff41d, 0x04a20873 },
+ },
+ { 0x3e00, {
+ 0x3fe06444, 0x3fe06444, 0x0681a07a,
+ 0x00007849, 0x3fc0503f, 0x0641b07a,
+ 0x0020904d, 0x3fa0403a, 0x05f1c07a,
+ 0x0060a453, 0x3f803034, 0x05c1c878,
+ 0x0090b858, 0x3f70202f, 0x0571d477,
+ 0x00d0d05d, 0x3f501829, 0x0531e073,
+ 0x0110e462, 0x3f500825, 0x04e1e471,
+ 0x01510065, 0x3f40001f, 0x04a1f06d },
+ },
+ { 0x4000, {
+ 0x00007044, 0x00007044, 0x06519476,
+ 0x00208448, 0x3fe05c3f, 0x0621a476,
+ 0x0050984d, 0x3fc04c3a, 0x05e1b075,
+ 0x0080ac52, 0x3fa03c35, 0x05a1b875,
+ 0x00c0c056, 0x3f803030, 0x0561c473,
+ 0x0100d45b, 0x3f70202b, 0x0521d46f,
+ 0x0140e860, 0x3f601427, 0x04d1d46e,
+ 0x01810064, 0x3f500822, 0x0491dc6b },
+ },
+ { 0x5000, {
+ 0x0110a442, 0x0110a442, 0x0551545e,
+ 0x0140b045, 0x00e0983f, 0x0531585f,
+ 0x0160c047, 0x00c08c3c, 0x0511645e,
+ 0x0190cc4a, 0x00908039, 0x04f1685f,
+ 0x01c0dc4c, 0x00707436, 0x04d1705e,
+ 0x0200e850, 0x00506833, 0x04b1785b,
+ 0x0230f453, 0x00305c30, 0x0491805a,
+ 0x02710056, 0x0010542d, 0x04718059 },
+ },
+ { 0x6000, {
+ 0x01c0bc40, 0x01c0bc40, 0x04c13052,
+ 0x01e0c841, 0x01a0b43d, 0x04c13851,
+ 0x0210cc44, 0x0180a83c, 0x04a13453,
+ 0x0230d845, 0x0160a03a, 0x04913c52,
+ 0x0260e047, 0x01409838, 0x04714052,
+ 0x0280ec49, 0x01208c37, 0x04514c50,
+ 0x02b0f44b, 0x01008435, 0x04414c50,
+ 0x02d1004c, 0x00e07c33, 0x0431544f },
+ },
+ { 0x7000, {
+ 0x0230c83e, 0x0230c83e, 0x04711c4c,
+ 0x0250d03f, 0x0210c43c, 0x0471204b,
+ 0x0270d840, 0x0200b83c, 0x0451244b,
+ 0x0290dc42, 0x01e0b43a, 0x0441244c,
+ 0x02b0e443, 0x01c0b038, 0x0441284b,
+ 0x02d0ec44, 0x01b0a438, 0x0421304a,
+ 0x02f0f445, 0x0190a036, 0x04213449,
+ 0x0310f847, 0x01709c34, 0x04213848 },
+ },
+ { 0x8000, {
+ 0x0280d03d, 0x0280d03d, 0x04310c48,
+ 0x02a0d43e, 0x0270c83c, 0x04311047,
+ 0x02b0dc3e, 0x0250c83a, 0x04311447,
+ 0x02d0e040, 0x0240c03a, 0x04211446,
+ 0x02e0e840, 0x0220bc39, 0x04111847,
+ 0x0300e842, 0x0210b438, 0x04012445,
+ 0x0310f043, 0x0200b037, 0x04012045,
+ 0x0330f444, 0x01e0ac36, 0x03f12445 },
+ },
+ { 0xefff, {
+ 0x0340dc3a, 0x0340dc3a, 0x03b0ec40,
+ 0x0340e03a, 0x0330e039, 0x03c0f03e,
+ 0x0350e03b, 0x0330dc39, 0x03c0ec3e,
+ 0x0350e43a, 0x0320dc38, 0x03c0f43e,
+ 0x0360e43b, 0x0320d839, 0x03b0f03e,
+ 0x0360e83b, 0x0310d838, 0x03c0fc3b,
+ 0x0370e83b, 0x0310d439, 0x03a0f83d,
+ 0x0370e83c, 0x0300d438, 0x03b0fc3c },
+ }
+};
+
enum rcar_vin_state {
STOPPED = 0,
RUNNING,
@@ -161,6 +504,9 @@ struct rcar_vin_cam {
/* Client output, as seen by the VIN */
unsigned int width;
unsigned int height;
+ /* User window from S_FMT */
+ unsigned int out_width;
+ unsigned int out_height;
/*
* User window from S_CROP / G_CROP, produced by client cropping and
* scaling, VIN scaling and VIN cropping, mapped back onto the client
@@ -272,16 +618,16 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
/* input interface */
switch (icd->current_fmt->code) {
- case V4L2_MBUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
/* BT.601/BT.1358 16bit YCbCr422 */
vnmc |= VNMC_INF_YUV16;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
break;
- case V4L2_MBUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
@@ -332,7 +678,7 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
vnmc |= VNMC_BPS;
/* progressive or interlaced mode */
- interrupts = progressive ? VNIE_FIE | VNIE_EFE : VNIE_EFE;
+ interrupts = progressive ? VNIE_FIE : VNIE_EFE;
/* ack interrupts */
iowrite32(interrupts, priv->base + VNINTS_REG);
@@ -667,6 +1013,60 @@ static void rcar_vin_clock_stop(struct soc_camera_host *ici)
/* VIN does not have "mclk" */
}
+static void set_coeff(struct rcar_vin_priv *priv, unsigned short xs)
+{
+ int i;
+ const struct vin_coeff *p_prev_set = NULL;
+ const struct vin_coeff *p_set = NULL;
+
+ /* Look for suitable coefficient values */
+ for (i = 0; i < ARRAY_SIZE(vin_coeff_set); i++) {
+ p_prev_set = p_set;
+ p_set = &vin_coeff_set[i];
+
+ if (xs < p_set->xs_value)
+ break;
+ }
+
+ /* Use previous value if its XS value is closer */
+ if (p_prev_set && p_set &&
+ xs - p_prev_set->xs_value < p_set->xs_value - xs)
+ p_set = p_prev_set;
+
+ /* Set coefficient registers */
+ iowrite32(p_set->coeff_set[0], priv->base + VNC1A_REG);
+ iowrite32(p_set->coeff_set[1], priv->base + VNC1B_REG);
+ iowrite32(p_set->coeff_set[2], priv->base + VNC1C_REG);
+
+ iowrite32(p_set->coeff_set[3], priv->base + VNC2A_REG);
+ iowrite32(p_set->coeff_set[4], priv->base + VNC2B_REG);
+ iowrite32(p_set->coeff_set[5], priv->base + VNC2C_REG);
+
+ iowrite32(p_set->coeff_set[6], priv->base + VNC3A_REG);
+ iowrite32(p_set->coeff_set[7], priv->base + VNC3B_REG);
+ iowrite32(p_set->coeff_set[8], priv->base + VNC3C_REG);
+
+ iowrite32(p_set->coeff_set[9], priv->base + VNC4A_REG);
+ iowrite32(p_set->coeff_set[10], priv->base + VNC4B_REG);
+ iowrite32(p_set->coeff_set[11], priv->base + VNC4C_REG);
+
+ iowrite32(p_set->coeff_set[12], priv->base + VNC5A_REG);
+ iowrite32(p_set->coeff_set[13], priv->base + VNC5B_REG);
+ iowrite32(p_set->coeff_set[14], priv->base + VNC5C_REG);
+
+ iowrite32(p_set->coeff_set[15], priv->base + VNC6A_REG);
+ iowrite32(p_set->coeff_set[16], priv->base + VNC6B_REG);
+ iowrite32(p_set->coeff_set[17], priv->base + VNC6C_REG);
+
+ iowrite32(p_set->coeff_set[18], priv->base + VNC7A_REG);
+ iowrite32(p_set->coeff_set[19], priv->base + VNC7B_REG);
+ iowrite32(p_set->coeff_set[20], priv->base + VNC7C_REG);
+
+ iowrite32(p_set->coeff_set[21], priv->base + VNC8A_REG);
+ iowrite32(p_set->coeff_set[22], priv->base + VNC8B_REG);
+ iowrite32(p_set->coeff_set[23], priv->base + VNC8C_REG);
+}
+
/* rect is guaranteed to not exceed the scaled camera rectangle */
static int rcar_vin_set_rect(struct soc_camera_device *icd)
{
@@ -676,6 +1076,7 @@ static int rcar_vin_set_rect(struct soc_camera_device *icd)
unsigned int left_offset, top_offset;
unsigned char dsize = 0;
struct v4l2_rect *cam_subrect = &cam->subrect;
+ u32 value;
dev_dbg(icd->parent, "Crop %ux%u@%u:%u\n",
icd->user_width, icd->user_height, cam->vin_left, cam->vin_top);
@@ -695,40 +1096,64 @@ static int rcar_vin_set_rect(struct soc_camera_device *icd)
/* Set Start/End Pixel/Line Pre-Clip */
iowrite32(left_offset << dsize, priv->base + VNSPPRC_REG);
- iowrite32((left_offset + cam->width - 1) << dsize,
+ iowrite32((left_offset + cam_subrect->width - 1) << dsize,
priv->base + VNEPPRC_REG);
switch (priv->field) {
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
iowrite32(top_offset / 2, priv->base + VNSLPRC_REG);
- iowrite32((top_offset + cam->height) / 2 - 1,
+ iowrite32((top_offset + cam_subrect->height) / 2 - 1,
priv->base + VNELPRC_REG);
break;
default:
iowrite32(top_offset, priv->base + VNSLPRC_REG);
- iowrite32(top_offset + cam->height - 1,
+ iowrite32(top_offset + cam_subrect->height - 1,
priv->base + VNELPRC_REG);
break;
}
+ /* Set scaling coefficient */
+ value = 0;
+ if (cam_subrect->height != cam->out_height)
+ value = (4096 * cam_subrect->height) / cam->out_height;
+ dev_dbg(icd->parent, "YS Value: %x\n", value);
+ iowrite32(value, priv->base + VNYS_REG);
+
+ value = 0;
+ if (cam_subrect->width != cam->out_width)
+ value = (4096 * cam_subrect->width) / cam->out_width;
+
+ /* Horizontal upscaling is up to double size */
+ if (0 < value && value < 2048)
+ value = 2048;
+
+ dev_dbg(icd->parent, "XS Value: %x\n", value);
+ iowrite32(value, priv->base + VNXS_REG);
+
+ /* Horizontal upscaling is carried out by scaling down from double size */
+ if (value < 4096)
+ value *= 2;
+
+ set_coeff(priv, value);
+
/* Set Start/End Pixel/Line Post-Clip */
iowrite32(0, priv->base + VNSPPOC_REG);
iowrite32(0, priv->base + VNSLPOC_REG);
- iowrite32((cam_subrect->width - 1) << dsize, priv->base + VNEPPOC_REG);
+ iowrite32((cam->out_width - 1) << dsize, priv->base + VNEPPOC_REG);
switch (priv->field) {
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
- iowrite32(cam_subrect->height / 2 - 1,
+ iowrite32(cam->out_height / 2 - 1,
priv->base + VNELPOC_REG);
break;
default:
- iowrite32(cam_subrect->height - 1, priv->base + VNELPOC_REG);
+ iowrite32(cam->out_height - 1, priv->base + VNELPOC_REG);
break;
}
- iowrite32(ALIGN(cam->width, 0x10), priv->base + VNIS_REG);
+ iowrite32(ALIGN(cam->out_width, 0x10), priv->base + VNIS_REG);
return 0;
}
@@ -819,7 +1244,7 @@ static int rcar_vin_set_bus_param(struct soc_camera_device *icd)
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
- val = priv->field == V4L2_FIELD_NONE ? VNDMR2_FTEV : 0;
+ val = VNDMR2_FTEV | VNDMR2_VLV(1);
if (!(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
val |= VNDMR2_VPS;
if (!(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
@@ -880,6 +1305,14 @@ static const struct soc_mbus_pixelfmt rcar_vin_formats[] = {
.layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
},
{
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+ {
.fourcc = V4L2_PIX_FMT_UYVY,
.name = "UYVY",
.bits_per_sample = 16,
@@ -921,7 +1354,7 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
int ret, k, n;
int formats = 0;
struct rcar_vin_cam *cam;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
const struct soc_mbus_pixelfmt *fmt;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -999,6 +1432,8 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
cam->subrect = rect;
cam->width = mf.width;
cam->height = mf.height;
+ cam->out_width = mf.width;
+ cam->out_height = mf.height;
icd->host_priv = cam;
} else {
@@ -1010,9 +1445,9 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
cam->extra_fmt = NULL;
switch (code) {
- case V4L2_MBUS_FMT_YUYV8_1X16:
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
if (cam->extra_fmt)
break;
@@ -1259,6 +1694,9 @@ static int rcar_vin_set_fmt(struct soc_camera_device *icd,
dev_dbg(dev, "W: %u : %u, H: %u : %u\n",
vin_sub_width, pix->width, vin_sub_height, pix->height);
+ cam->out_width = pix->width;
+ cam->out_height = pix->height;
+
icd->current_fmt = xlate;
priv->field = field;
@@ -1310,8 +1748,12 @@ static int rcar_vin_try_fmt(struct soc_camera_device *icd,
if (ret < 0)
return ret;
- pix->width = mf.width;
- pix->height = mf.height;
+ /* Adjust only if VIN cannot scale */
+ if (pix->width > mf.width * 2)
+ pix->width = mf.width * 2;
+ if (pix->height > mf.height * 3)
+ pix->height = mf.height * 3;
+
pix->field = mf.field;
pix->colorspace = mf.colorspace;
@@ -1395,6 +1837,8 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
#ifdef CONFIG_OF
static struct of_device_id rcar_vin_of_table[] = {
+ { .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
@@ -1543,7 +1987,6 @@ static struct platform_driver rcar_vin_driver = {
.remove = rcar_vin_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rcar_vin_of_table),
},
.id_table = rcar_vin_id_table,
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 20ad4a571d37..8b27b3eb2b25 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -149,7 +149,7 @@ struct sh_mobile_ceu_cam {
/* Camera cropping rectangle */
struct v4l2_rect rect;
const struct soc_mbus_pixelfmt *extra_fmt;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
};
static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb)
@@ -861,16 +861,16 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
switch (cam->code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
break;
- case V4L2_MBUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
break;
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
break;
default:
@@ -1048,7 +1048,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
int ret, k, n;
int formats = 0;
struct sh_mobile_ceu_cam *cam;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
const struct soc_mbus_pixelfmt *fmt;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -1141,10 +1141,10 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
cam->extra_fmt = NULL;
switch (code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
- case V4L2_MBUS_FMT_VYUY8_2X8:
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
if (cam->extra_fmt)
break;
@@ -2009,7 +2009,6 @@ MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match);
static struct platform_driver sh_mobile_ceu_driver = {
.driver = {
.name = "sh_mobile_ceu",
- .owner = THIS_MODULE,
.pm = &sh_mobile_ceu_dev_pm_ops,
.of_match_table = sh_mobile_ceu_of_match,
},
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
index 05dd21a35d63..c4e7aa0ee7e1 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
@@ -59,28 +59,28 @@ static int sh_csi2_try_fmt(struct v4l2_subdev *sd,
switch (pdata->type) {
case SH_CSI2C:
switch (mf->code) {
- case V4L2_MBUS_FMT_UYVY8_2X8: /* YUV422 */
- case V4L2_MBUS_FMT_YUYV8_1_5X8: /* YUV420 */
- case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */
- case V4L2_MBUS_FMT_SBGGR8_1X8:
- case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8: /* YUV422 */
+ case MEDIA_BUS_FMT_YUYV8_1_5X8: /* YUV420 */
+ case MEDIA_BUS_FMT_Y8_1X8: /* RAW8 */
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
break;
default:
/* All MIPI CSI-2 devices must support one of primary formats */
- mf->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ mf->code = MEDIA_BUS_FMT_YUYV8_2X8;
}
break;
case SH_CSI2I:
switch (mf->code) {
- case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */
- case V4L2_MBUS_FMT_SBGGR8_1X8:
- case V4L2_MBUS_FMT_SGRBG8_1X8:
- case V4L2_MBUS_FMT_SBGGR10_1X10: /* RAW10 */
- case V4L2_MBUS_FMT_SBGGR12_1X12: /* RAW12 */
+ case MEDIA_BUS_FMT_Y8_1X8: /* RAW8 */
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10: /* RAW10 */
+ case MEDIA_BUS_FMT_SBGGR12_1X12: /* RAW12 */
break;
default:
/* All MIPI CSI-2 devices must support one of primary formats */
- mf->code = V4L2_MBUS_FMT_SBGGR8_1X8;
+ mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
}
break;
}
@@ -104,21 +104,21 @@ static int sh_csi2_s_fmt(struct v4l2_subdev *sd,
return -EINVAL;
switch (mf->code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
tmp |= 0x1e; /* YUV422 8 bit */
break;
- case V4L2_MBUS_FMT_YUYV8_1_5X8:
+ case MEDIA_BUS_FMT_YUYV8_1_5X8:
tmp |= 0x18; /* YUV420 8 bit */
break;
- case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
tmp |= 0x21; /* RGB555 */
break;
- case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
tmp |= 0x22; /* RGB565 */
break;
- case V4L2_MBUS_FMT_Y8_1X8:
- case V4L2_MBUS_FMT_SBGGR8_1X8:
- case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
tmp |= 0x2a; /* RAW8 */
break;
default:
@@ -391,7 +391,6 @@ static struct platform_driver __refdata sh_csi2_pdrv = {
.probe = sh_csi2_probe,
.driver = {
.name = "sh-mobile-csi2",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 8e61b976da19..b3db51c82bde 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -460,7 +460,7 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
unsigned int i, fmts = 0, raw_fmts = 0;
int ret;
- enum v4l2_mbus_pixelcode code;
+ u32 code;
while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code))
raw_fmts++;
@@ -2236,7 +2236,6 @@ static struct platform_driver __refdata soc_camera_pdrv = {
.remove = soc_camera_pdrv_remove,
.driver = {
.name = "soc-camera-pdrv",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index ceaddfb85e49..f535910b4187 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -62,7 +62,7 @@ static struct v4l2_subdev_core_ops platform_subdev_core_ops = {
};
static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code)
+ u32 *code)
{
struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
@@ -180,7 +180,6 @@ static int soc_camera_platform_remove(struct platform_device *pdev)
static struct platform_driver soc_camera_platform_driver = {
.driver = {
.name = "soc_camera_platform",
- .owner = THIS_MODULE,
},
.probe = soc_camera_platform_probe,
.remove = soc_camera_platform_remove,
diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c
index dc02deca7563..1dbcd426683c 100644
--- a/drivers/media/platform/soc_camera/soc_mediabus.c
+++ b/drivers/media/platform/soc_camera/soc_mediabus.c
@@ -17,7 +17,7 @@
static const struct soc_mbus_lookup mbus_fmt[] = {
{
- .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_YUYV,
.name = "YUYV",
@@ -27,7 +27,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_YVYU,
.name = "YVYU",
@@ -37,7 +37,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_UYVY,
.name = "UYVY",
@@ -47,7 +47,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_VYUY,
.name = "VYUY",
@@ -57,7 +57,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB555,
.name = "RGB555",
@@ -67,7 +67,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB555X,
.name = "RGB555X",
@@ -77,7 +77,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB565,
.name = "RGB565",
@@ -87,7 +87,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB565X,
.name = "RGB565X",
@@ -97,7 +97,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB666_1X18,
+ .code = MEDIA_BUS_FMT_RGB666_1X18,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB32,
.name = "RGB666/32bpp",
@@ -106,7 +106,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.order = SOC_MBUS_ORDER_LE,
},
}, {
- .code = V4L2_MBUS_FMT_RGB888_1X24,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB32,
.name = "RGB888/32bpp",
@@ -115,7 +115,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.order = SOC_MBUS_ORDER_LE,
},
}, {
- .code = V4L2_MBUS_FMT_RGB888_2X12_BE,
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB32,
.name = "RGB888/32bpp",
@@ -124,7 +124,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.order = SOC_MBUS_ORDER_BE,
},
}, {
- .code = V4L2_MBUS_FMT_RGB888_2X12_LE,
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB32,
.name = "RGB888/32bpp",
@@ -133,7 +133,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.order = SOC_MBUS_ORDER_LE,
},
}, {
- .code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.name = "Bayer 8 BGGR",
@@ -143,7 +143,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SBGGR10_1X10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
.fmt = {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.name = "Bayer 10 BGGR",
@@ -153,7 +153,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_Y8_1X8,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_GREY,
.name = "Grey",
@@ -163,7 +163,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_Y10_1X10,
+ .code = MEDIA_BUS_FMT_Y10_1X10,
.fmt = {
.fourcc = V4L2_PIX_FMT_Y10,
.name = "Grey 10bit",
@@ -173,7 +173,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
.fmt = {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.name = "Bayer 10 BGGR",
@@ -183,7 +183,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
.fmt = {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.name = "Bayer 10 BGGR",
@@ -193,7 +193,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
.fmt = {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.name = "Bayer 10 BGGR",
@@ -203,7 +203,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
.fmt = {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.name = "Bayer 10 BGGR",
@@ -213,7 +213,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_JPEG_1X8,
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_JPEG,
.name = "JPEG",
@@ -223,7 +223,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE,
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
.fmt = {
.fourcc = V4L2_PIX_FMT_RGB444,
.name = "RGB444",
@@ -233,7 +233,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_YUYV8_1_5X8,
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_YUV420,
.name = "YUYV 4:2:0",
@@ -243,7 +243,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_YVYU8_1_5X8,
+ .code = MEDIA_BUS_FMT_YVYU8_1_5X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_YVU420,
.name = "YVYU 4:2:0",
@@ -253,7 +253,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_UYVY8_1X16,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
.fmt = {
.fourcc = V4L2_PIX_FMT_UYVY,
.name = "UYVY 16bit",
@@ -263,7 +263,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_VYUY8_1X16,
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
.fmt = {
.fourcc = V4L2_PIX_FMT_VYUY,
.name = "VYUY 16bit",
@@ -273,7 +273,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_YUYV8_1X16,
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
.fmt = {
.fourcc = V4L2_PIX_FMT_YUYV,
.name = "YUYV 16bit",
@@ -283,7 +283,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_YVYU8_1X16,
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
.fmt = {
.fourcc = V4L2_PIX_FMT_YVYU,
.name = "YVYU 16bit",
@@ -293,7 +293,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.name = "Bayer 8 GRBG",
@@ -303,7 +303,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_SGRBG10DPCM8,
.name = "Bayer 10 BGGR DPCM 8",
@@ -313,7 +313,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SGBRG10_1X10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
.fmt = {
.fourcc = V4L2_PIX_FMT_SGBRG10,
.name = "Bayer 10 GBRG",
@@ -323,7 +323,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
.fmt = {
.fourcc = V4L2_PIX_FMT_SGRBG10,
.name = "Bayer 10 GRBG",
@@ -333,7 +333,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SRGGB10_1X10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
.fmt = {
.fourcc = V4L2_PIX_FMT_SRGGB10,
.name = "Bayer 10 RGGB",
@@ -343,7 +343,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SBGGR12_1X12,
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
.fmt = {
.fourcc = V4L2_PIX_FMT_SBGGR12,
.name = "Bayer 12 BGGR",
@@ -353,7 +353,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SGBRG12_1X12,
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
.fmt = {
.fourcc = V4L2_PIX_FMT_SGBRG12,
.name = "Bayer 12 GBRG",
@@ -363,7 +363,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
.fmt = {
.fourcc = V4L2_PIX_FMT_SGRBG12,
.name = "Bayer 12 GRBG",
@@ -373,7 +373,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
- .code = V4L2_MBUS_FMT_SRGGB12_1X12,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
.fmt = {
.fourcc = V4L2_PIX_FMT_SRGGB12,
.name = "Bayer 12 RGGB",
@@ -458,7 +458,7 @@ s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
EXPORT_SYMBOL(soc_mbus_image_size);
const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
- enum v4l2_mbus_pixelcode code,
+ u32 code,
const struct soc_mbus_lookup *lookup,
int n)
{
@@ -473,7 +473,7 @@ const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
EXPORT_SYMBOL(soc_mbus_find_fmtdesc);
const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
- enum v4l2_mbus_pixelcode code)
+ u32 code)
{
return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
}
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index 940df4000c42..bec674994752 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -93,12 +93,8 @@ void csc_dump_regs(struct csc_data *csc)
{
struct device *dev = &csc->pdev->dev;
- u32 read_reg(struct csc_data *csc, int offset)
- {
- return ioread32(csc->base + offset);
- }
-
-#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(csc, CSC_##r))
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
+ ioread32(csc->base + CSC_##r))
DUMPREG(CSC00);
DUMPREG(CSC01);
@@ -189,7 +185,7 @@ struct csc_data *csc_create(struct platform_device *pdev)
csc->base = devm_ioremap_resource(&pdev->dev, csc->res);
if (IS_ERR(csc->base)) {
dev_err(&pdev->dev, "failed to ioremap\n");
- return csc->base;
+ return ERR_CAST(csc->base);
}
return csc;
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
index 6314171ffe9b..f82d1c7f667f 100644
--- a/drivers/media/platform/ti-vpe/sc.c
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -24,12 +24,8 @@ void sc_dump_regs(struct sc_data *sc)
{
struct device *dev = &sc->pdev->dev;
- u32 read_reg(struct sc_data *sc, int offset)
- {
- return ioread32(sc->base + offset);
- }
-
-#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(sc, CFG_##r))
+#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
+ ioread32(sc->base + CFG_##r))
DUMPREG(SC0);
DUMPREG(SC1);
@@ -304,7 +300,7 @@ struct sc_data *sc_create(struct platform_device *pdev)
sc->base = devm_ioremap_resource(&pdev->dev, sc->res);
if (IS_ERR(sc->base)) {
dev_err(&pdev->dev, "failed to ioremap\n");
- return sc->base;
+ return ERR_CAST(sc->base);
}
return sc;
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 9a081c291159..d628d1a7cf9e 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -2376,7 +2376,6 @@ static struct platform_driver vpe_pdrv = {
.remove = vpe_remove,
.driver = {
.name = VPE_MODULE_NAME,
- .owner = THIS_MODULE,
.of_match_table = vpe_of_match,
},
};
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index 3cb2f3564873..5820e45b3a9f 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -857,7 +857,6 @@ static int timblogiw_remove(struct platform_device *pdev)
static struct platform_driver timblogiw_platform_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = timblogiw_probe,
.remove = timblogiw_remove,
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index ae6870cb8339..86989d86abfa 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -101,7 +101,7 @@ struct via_camera {
*/
struct v4l2_pix_format sensor_format;
struct v4l2_pix_format user_format;
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
};
/*
@@ -143,12 +143,12 @@ static struct via_format {
__u8 *desc;
__u32 pixelformat;
int bpp; /* Bytes per pixel */
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
} via_formats[] = {
{
.desc = "YUYV 4:2:2",
.pixelformat = V4L2_PIX_FMT_YUYV,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
},
/* RGB444 and Bayer should be doable, but have never been
@@ -849,7 +849,7 @@ static const struct v4l2_pix_format viacam_def_pix_format = {
.sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
};
-static const enum v4l2_mbus_pixelcode via_def_mbus_code = V4L2_MBUS_FMT_YUYV8_2X8;
+static const u32 via_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_fmtdesc *fmt)
@@ -985,9 +985,9 @@ static int viacam_querycap(struct file *filp, void *priv,
{
strcpy(cap->driver, "via-camera");
strcpy(cap->card, "via-camera");
- cap->version = 1;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/vim2m.c
index c1b03cfd6ded..d9d844aab39b 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/vim2m.c
@@ -31,12 +31,11 @@
#include <media/v4l2-event.h>
#include <media/videobuf2-vmalloc.h>
-#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
-
MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1.1");
+MODULE_ALIAS("mem2mem_testdev");
static unsigned debug;
module_param(debug, uint, 0644);
@@ -52,7 +51,7 @@ MODULE_PARM_DESC(debug, "activates debug info");
#define MEM2MEM_CAPTURE (1 << 0)
#define MEM2MEM_OUTPUT (1 << 1)
-#define MEM2MEM_NAME "m2m-testdev"
+#define MEM2MEM_NAME "vim2m"
/* Per queue */
#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME
@@ -72,15 +71,15 @@ MODULE_PARM_DESC(debug, "activates debug info");
v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
-static void m2mtest_dev_release(struct device *dev)
+static void vim2m_dev_release(struct device *dev)
{}
-static struct platform_device m2mtest_pdev = {
+static struct platform_device vim2m_pdev = {
.name = MEM2MEM_NAME,
- .dev.release = m2mtest_dev_release,
+ .dev.release = vim2m_dev_release,
};
-struct m2mtest_fmt {
+struct vim2m_fmt {
char *name;
u32 fourcc;
int depth;
@@ -88,7 +87,7 @@ struct m2mtest_fmt {
u32 types;
};
-static struct m2mtest_fmt formats[] = {
+static struct vim2m_fmt formats[] = {
{
.name = "RGB565 (BE)",
.fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
@@ -108,12 +107,12 @@ static struct m2mtest_fmt formats[] = {
#define NUM_FORMATS ARRAY_SIZE(formats)
/* Per-queue, driver-specific private data */
-struct m2mtest_q_data {
+struct vim2m_q_data {
unsigned int width;
unsigned int height;
unsigned int sizeimage;
unsigned int sequence;
- struct m2mtest_fmt *fmt;
+ struct vim2m_fmt *fmt;
};
enum {
@@ -124,9 +123,9 @@ enum {
#define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000)
#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001)
-static struct m2mtest_fmt *find_format(struct v4l2_format *f)
+static struct vim2m_fmt *find_format(struct v4l2_format *f)
{
- struct m2mtest_fmt *fmt;
+ struct vim2m_fmt *fmt;
unsigned int k;
for (k = 0; k < NUM_FORMATS; k++) {
@@ -141,7 +140,7 @@ static struct m2mtest_fmt *find_format(struct v4l2_format *f)
return &formats[k];
}
-struct m2mtest_dev {
+struct vim2m_dev {
struct v4l2_device v4l2_dev;
struct video_device *vfd;
@@ -154,9 +153,9 @@ struct m2mtest_dev {
struct v4l2_m2m_dev *m2m_dev;
};
-struct m2mtest_ctx {
+struct vim2m_ctx {
struct v4l2_fh fh;
- struct m2mtest_dev *dev;
+ struct vim2m_dev *dev;
struct v4l2_ctrl_handler hdl;
@@ -177,15 +176,15 @@ struct m2mtest_ctx {
enum v4l2_colorspace colorspace;
/* Source and destination queue data */
- struct m2mtest_q_data q_data[2];
+ struct vim2m_q_data q_data[2];
};
-static inline struct m2mtest_ctx *file2ctx(struct file *file)
+static inline struct vim2m_ctx *file2ctx(struct file *file)
{
- return container_of(file->private_data, struct m2mtest_ctx, fh);
+ return container_of(file->private_data, struct vim2m_ctx, fh);
}
-static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
enum v4l2_buf_type type)
{
switch (type) {
@@ -200,12 +199,12 @@ static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
}
-static int device_process(struct m2mtest_ctx *ctx,
+static int device_process(struct vim2m_ctx *ctx,
struct vb2_buffer *in_vb,
struct vb2_buffer *out_vb)
{
- struct m2mtest_dev *dev = ctx->dev;
- struct m2mtest_q_data *q_data;
+ struct vim2m_dev *dev = ctx->dev;
+ struct vim2m_q_data *q_data;
u8 *p_in, *p_out;
int x, y, t, w;
int tile_w, bytes_left;
@@ -334,7 +333,7 @@ static int device_process(struct m2mtest_ctx *ctx,
return 0;
}
-static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
+static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
{
dprintk(dev, "Scheduling a simulated irq\n");
mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
@@ -349,7 +348,7 @@ static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
*/
static int job_ready(void *priv)
{
- struct m2mtest_ctx *ctx = priv;
+ struct vim2m_ctx *ctx = priv;
if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen
|| v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) {
@@ -362,7 +361,7 @@ static int job_ready(void *priv)
static void job_abort(void *priv)
{
- struct m2mtest_ctx *ctx = priv;
+ struct vim2m_ctx *ctx = priv;
/* Will cancel the transaction in the next interrupt handler */
ctx->aborting = 1;
@@ -376,8 +375,8 @@ static void job_abort(void *priv)
*/
static void device_run(void *priv)
{
- struct m2mtest_ctx *ctx = priv;
- struct m2mtest_dev *dev = ctx->dev;
+ struct vim2m_ctx *ctx = priv;
+ struct vim2m_dev *dev = ctx->dev;
struct vb2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
@@ -391,12 +390,12 @@ static void device_run(void *priv)
static void device_isr(unsigned long priv)
{
- struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv;
- struct m2mtest_ctx *curr_ctx;
+ struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
+ struct vim2m_ctx *curr_ctx;
struct vb2_buffer *src_vb, *dst_vb;
unsigned long flags;
- curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
+ curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev);
if (NULL == curr_ctx) {
pr_err("Instance released before the end of transaction\n");
@@ -408,16 +407,16 @@ static void device_isr(unsigned long priv)
curr_ctx->num_processed++;
- spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ spin_lock_irqsave(&vim2m_dev->irqlock, flags);
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
- spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+ spin_unlock_irqrestore(&vim2m_dev->irqlock, flags);
if (curr_ctx->num_processed == curr_ctx->translen
|| curr_ctx->aborting) {
dprintk(curr_ctx->dev, "Finishing transaction\n");
curr_ctx->num_processed = 0;
- v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
+ v4l2_m2m_job_finish(vim2m_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
} else {
device_run(curr_ctx);
}
@@ -441,7 +440,7 @@ static int vidioc_querycap(struct file *file, void *priv,
static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
{
int i, num;
- struct m2mtest_fmt *fmt;
+ struct vim2m_fmt *fmt;
num = 0;
@@ -480,10 +479,10 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
return enum_fmt(f, MEM2MEM_OUTPUT);
}
-static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
{
struct vb2_queue *vq;
- struct m2mtest_q_data *q_data;
+ struct vim2m_q_data *q_data;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
@@ -514,7 +513,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
return vidioc_g_fmt(file2ctx(file), f);
}
-static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
+static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt)
{
/* V4L2 specification suggests the driver corrects the format struct
* if any of the dimensions is unsupported */
@@ -539,8 +538,8 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct m2mtest_fmt *fmt;
- struct m2mtest_ctx *ctx = file2ctx(file);
+ struct vim2m_fmt *fmt;
+ struct vim2m_ctx *ctx = file2ctx(file);
fmt = find_format(f);
if (!fmt) {
@@ -561,8 +560,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct m2mtest_fmt *fmt;
- struct m2mtest_ctx *ctx = file2ctx(file);
+ struct vim2m_fmt *fmt;
+ struct vim2m_ctx *ctx = file2ctx(file);
fmt = find_format(f);
if (!fmt) {
@@ -581,9 +580,9 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
return vidioc_try_fmt(f, fmt);
}
-static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
{
- struct m2mtest_q_data *q_data;
+ struct vim2m_q_data *q_data;
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
@@ -627,7 +626,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct m2mtest_ctx *ctx = file2ctx(file);
+ struct vim2m_ctx *ctx = file2ctx(file);
int ret;
ret = vidioc_try_fmt_vid_out(file, priv, f);
@@ -640,10 +639,10 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
}
-static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
+static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct m2mtest_ctx *ctx =
- container_of(ctrl->handler, struct m2mtest_ctx, hdl);
+ struct vim2m_ctx *ctx =
+ container_of(ctrl->handler, struct vim2m_ctx, hdl);
switch (ctrl->id) {
case V4L2_CID_HFLIP:
@@ -676,12 +675,12 @@ static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
}
-static const struct v4l2_ctrl_ops m2mtest_ctrl_ops = {
- .s_ctrl = m2mtest_s_ctrl,
+static const struct v4l2_ctrl_ops vim2m_ctrl_ops = {
+ .s_ctrl = vim2m_s_ctrl,
};
-static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
+static const struct v4l2_ioctl_ops vim2m_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
@@ -698,6 +697,7 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
@@ -711,13 +711,13 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
* Queue operations
*/
-static int m2mtest_queue_setup(struct vb2_queue *vq,
+static int vim2m_queue_setup(struct vb2_queue *vq,
const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq);
- struct m2mtest_q_data *q_data;
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vim2m_q_data *q_data;
unsigned int size, count = *nbuffers;
q_data = get_q_data(ctx, vq->type);
@@ -741,10 +741,10 @@ static int m2mtest_queue_setup(struct vb2_queue *vq,
return 0;
}
-static int m2mtest_buf_prepare(struct vb2_buffer *vb)
+static int vim2m_buf_prepare(struct vb2_buffer *vb)
{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct m2mtest_q_data *q_data;
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vim2m_q_data *q_data;
dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
@@ -770,25 +770,25 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static void m2mtest_buf_queue(struct vb2_buffer *vb)
+static void vim2m_buf_queue(struct vb2_buffer *vb)
{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
-static int m2mtest_start_streaming(struct vb2_queue *q, unsigned count)
+static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
- struct m2mtest_q_data *q_data = get_q_data(ctx, q->type);
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct vim2m_q_data *q_data = get_q_data(ctx, q->type);
q_data->sequence = 0;
return 0;
}
-static void m2mtest_stop_streaming(struct vb2_queue *q)
+static void vim2m_stop_streaming(struct vb2_queue *q)
{
- struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
struct vb2_buffer *vb;
unsigned long flags;
@@ -805,26 +805,26 @@ static void m2mtest_stop_streaming(struct vb2_queue *q)
}
}
-static struct vb2_ops m2mtest_qops = {
- .queue_setup = m2mtest_queue_setup,
- .buf_prepare = m2mtest_buf_prepare,
- .buf_queue = m2mtest_buf_queue,
- .start_streaming = m2mtest_start_streaming,
- .stop_streaming = m2mtest_stop_streaming,
+static struct vb2_ops vim2m_qops = {
+ .queue_setup = vim2m_queue_setup,
+ .buf_prepare = vim2m_buf_prepare,
+ .buf_queue = vim2m_buf_queue,
+ .start_streaming = vim2m_start_streaming,
+ .stop_streaming = vim2m_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
{
- struct m2mtest_ctx *ctx = priv;
+ struct vim2m_ctx *ctx = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->ops = &m2mtest_qops;
+ src_vq->ops = &vim2m_qops;
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
@@ -837,7 +837,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->ops = &m2mtest_qops;
+ dst_vq->ops = &vim2m_qops;
dst_vq->mem_ops = &vb2_vmalloc_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->dev_mutex;
@@ -845,8 +845,8 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
return vb2_queue_init(dst_vq);
}
-static const struct v4l2_ctrl_config m2mtest_ctrl_trans_time_msec = {
- .ops = &m2mtest_ctrl_ops,
+static const struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = {
+ .ops = &vim2m_ctrl_ops,
.id = V4L2_CID_TRANS_TIME_MSEC,
.name = "Transaction Time (msec)",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -856,8 +856,8 @@ static const struct v4l2_ctrl_config m2mtest_ctrl_trans_time_msec = {
.step = 1,
};
-static const struct v4l2_ctrl_config m2mtest_ctrl_trans_num_bufs = {
- .ops = &m2mtest_ctrl_ops,
+static const struct v4l2_ctrl_config vim2m_ctrl_trans_num_bufs = {
+ .ops = &vim2m_ctrl_ops,
.id = V4L2_CID_TRANS_NUM_BUFS,
.name = "Buffers Per Transaction",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -870,10 +870,10 @@ static const struct v4l2_ctrl_config m2mtest_ctrl_trans_num_bufs = {
/*
* File operations
*/
-static int m2mtest_open(struct file *file)
+static int vim2m_open(struct file *file)
{
- struct m2mtest_dev *dev = video_drvdata(file);
- struct m2mtest_ctx *ctx = NULL;
+ struct vim2m_dev *dev = video_drvdata(file);
+ struct vim2m_ctx *ctx = NULL;
struct v4l2_ctrl_handler *hdl;
int rc = 0;
@@ -890,10 +890,10 @@ static int m2mtest_open(struct file *file)
ctx->dev = dev;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 4);
- v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_time_msec, NULL);
- v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_num_bufs, NULL);
+ v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_time_msec, NULL);
+ v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_num_bufs, NULL);
if (hdl->error) {
rc = hdl->error;
v4l2_ctrl_handler_free(hdl);
@@ -933,10 +933,10 @@ open_unlock:
return rc;
}
-static int m2mtest_release(struct file *file)
+static int vim2m_release(struct file *file)
{
- struct m2mtest_dev *dev = video_drvdata(file);
- struct m2mtest_ctx *ctx = file2ctx(file);
+ struct vim2m_dev *dev = video_drvdata(file);
+ struct vim2m_ctx *ctx = file2ctx(file);
dprintk(dev, "Releasing instance %p\n", ctx);
@@ -953,20 +953,20 @@ static int m2mtest_release(struct file *file)
return 0;
}
-static const struct v4l2_file_operations m2mtest_fops = {
+static const struct v4l2_file_operations vim2m_fops = {
.owner = THIS_MODULE,
- .open = m2mtest_open,
- .release = m2mtest_release,
+ .open = vim2m_open,
+ .release = vim2m_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
-static struct video_device m2mtest_videodev = {
+static struct video_device vim2m_videodev = {
.name = MEM2MEM_NAME,
.vfl_dir = VFL_DIR_M2M,
- .fops = &m2mtest_fops,
- .ioctl_ops = &m2mtest_ioctl_ops,
+ .fops = &vim2m_fops,
+ .ioctl_ops = &vim2m_ioctl_ops,
.minor = -1,
.release = video_device_release,
};
@@ -977,9 +977,9 @@ static struct v4l2_m2m_ops m2m_ops = {
.job_abort = job_abort,
};
-static int m2mtest_probe(struct platform_device *pdev)
+static int vim2m_probe(struct platform_device *pdev)
{
- struct m2mtest_dev *dev;
+ struct vim2m_dev *dev;
struct video_device *vfd;
int ret;
@@ -1003,7 +1003,7 @@ static int m2mtest_probe(struct platform_device *pdev)
goto unreg_dev;
}
- *vfd = m2mtest_videodev;
+ *vfd = vim2m_videodev;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
@@ -1014,7 +1014,7 @@ static int m2mtest_probe(struct platform_device *pdev)
}
video_set_drvdata(vfd, dev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", vim2m_videodev.name);
dev->vfd = vfd;
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
@@ -1042,11 +1042,11 @@ unreg_dev:
return ret;
}
-static int m2mtest_remove(struct platform_device *pdev)
+static int vim2m_remove(struct platform_device *pdev)
{
- struct m2mtest_dev *dev = platform_get_drvdata(pdev);
+ struct vim2m_dev *dev = platform_get_drvdata(pdev);
- v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+ v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME);
v4l2_m2m_release(dev->m2m_dev);
del_timer_sync(&dev->timer);
video_unregister_device(dev->vfd);
@@ -1055,35 +1055,34 @@ static int m2mtest_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver m2mtest_pdrv = {
- .probe = m2mtest_probe,
- .remove = m2mtest_remove,
+static struct platform_driver vim2m_pdrv = {
+ .probe = vim2m_probe,
+ .remove = vim2m_remove,
.driver = {
.name = MEM2MEM_NAME,
- .owner = THIS_MODULE,
},
};
-static void __exit m2mtest_exit(void)
+static void __exit vim2m_exit(void)
{
- platform_driver_unregister(&m2mtest_pdrv);
- platform_device_unregister(&m2mtest_pdev);
+ platform_driver_unregister(&vim2m_pdrv);
+ platform_device_unregister(&vim2m_pdev);
}
-static int __init m2mtest_init(void)
+static int __init vim2m_init(void)
{
int ret;
- ret = platform_device_register(&m2mtest_pdev);
+ ret = platform_device_register(&vim2m_pdev);
if (ret)
return ret;
- ret = platform_driver_register(&m2mtest_pdrv);
+ ret = platform_driver_register(&vim2m_pdrv);
if (ret)
- platform_device_unregister(&m2mtest_pdev);
+ platform_device_unregister(&vim2m_pdev);
return 0;
}
-module_init(m2mtest_init);
-module_exit(m2mtest_exit);
+module_init(vim2m_init);
+module_exit(vim2m_exit);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 686c3c2ad05b..a7e033a5d291 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -195,20 +195,6 @@ static const u8 vivid_hdmi_edid[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7
};
-void vivid_lock(struct vb2_queue *vq)
-{
- struct vivid_dev *dev = vb2_get_drv_priv(vq);
-
- mutex_lock(&dev->mutex);
-}
-
-void vivid_unlock(struct vb2_queue *vq)
-{
- struct vivid_dev *dev = vb2_get_drv_priv(vq);
-
- mutex_unlock(&dev->mutex);
-}
-
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -586,7 +572,7 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
-/* Not yet .vidioc_expbuf = vb2_ioctl_expbuf,*/
+ .vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
@@ -1018,6 +1004,7 @@ static int __init vivid_create_instance(int inst)
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
+ q->lock = &dev->mutex;
ret = vb2_queue_init(q);
if (ret)
@@ -1036,6 +1023,7 @@ static int __init vivid_create_instance(int inst)
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
+ q->lock = &dev->mutex;
ret = vb2_queue_init(q);
if (ret)
@@ -1054,6 +1042,7 @@ static int __init vivid_create_instance(int inst)
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
+ q->lock = &dev->mutex;
ret = vb2_queue_init(q);
if (ret)
@@ -1072,6 +1061,7 @@ static int __init vivid_create_instance(int inst)
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
+ q->lock = &dev->mutex;
ret = vb2_queue_init(q);
if (ret)
@@ -1089,6 +1079,7 @@ static int __init vivid_create_instance(int inst)
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 8;
+ q->lock = &dev->mutex;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 811c286491a5..4b497df4b6a4 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -116,6 +116,17 @@ enum vivid_signal_mode {
CUSTOM_DV_TIMINGS,
};
+enum vivid_colorspace {
+ VIVID_CS_170M,
+ VIVID_CS_709,
+ VIVID_CS_SRGB,
+ VIVID_CS_ADOBERGB,
+ VIVID_CS_2020,
+ VIVID_CS_240M,
+ VIVID_CS_SYS_M,
+ VIVID_CS_SYS_BG,
+};
+
#define VIVID_INVALID_SIGNAL(mode) \
((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE)
@@ -318,6 +329,8 @@ struct vivid_dev {
v4l2_std_id std_out;
struct v4l2_dv_timings dv_timings_out;
u32 colorspace_out;
+ u32 ycbcr_enc_out;
+ u32 quantization_out;
u32 service_set_out;
u32 bytesperline_out[2];
unsigned tv_field_out;
@@ -514,7 +527,4 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
return dev->output_type[dev->output] == HDMI;
}
-void vivid_lock(struct vb2_queue *vq);
-void vivid_unlock(struct vb2_queue *vq);
-
#endif
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index d5cbf0038f24..857e7866e8bc 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -40,6 +40,9 @@
#define VIVID_CID_STRING (VIVID_CID_CUSTOM_BASE + 5)
#define VIVID_CID_BITMASK (VIVID_CID_CUSTOM_BASE + 6)
#define VIVID_CID_INTMENU (VIVID_CID_CUSTOM_BASE + 7)
+#define VIVID_CID_U32_ARRAY (VIVID_CID_CUSTOM_BASE + 8)
+#define VIVID_CID_U16_MATRIX (VIVID_CID_CUSTOM_BASE + 9)
+#define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -59,19 +62,21 @@
#define VIVID_CID_DV_TIMINGS_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 23)
#define VIVID_CID_TSTAMP_SRC (VIVID_CID_VIVID_BASE + 24)
#define VIVID_CID_COLORSPACE (VIVID_CID_VIVID_BASE + 25)
-#define VIVID_CID_LIMITED_RGB_RANGE (VIVID_CID_VIVID_BASE + 26)
-#define VIVID_CID_ALPHA_MODE (VIVID_CID_VIVID_BASE + 27)
-#define VIVID_CID_HAS_CROP_CAP (VIVID_CID_VIVID_BASE + 28)
-#define VIVID_CID_HAS_COMPOSE_CAP (VIVID_CID_VIVID_BASE + 29)
-#define VIVID_CID_HAS_SCALER_CAP (VIVID_CID_VIVID_BASE + 30)
-#define VIVID_CID_HAS_CROP_OUT (VIVID_CID_VIVID_BASE + 31)
-#define VIVID_CID_HAS_COMPOSE_OUT (VIVID_CID_VIVID_BASE + 32)
-#define VIVID_CID_HAS_SCALER_OUT (VIVID_CID_VIVID_BASE + 33)
-#define VIVID_CID_LOOP_VIDEO (VIVID_CID_VIVID_BASE + 34)
-#define VIVID_CID_SEQ_WRAP (VIVID_CID_VIVID_BASE + 35)
-#define VIVID_CID_TIME_WRAP (VIVID_CID_VIVID_BASE + 36)
-#define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 37)
-#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 38)
+#define VIVID_CID_YCBCR_ENC (VIVID_CID_VIVID_BASE + 26)
+#define VIVID_CID_QUANTIZATION (VIVID_CID_VIVID_BASE + 27)
+#define VIVID_CID_LIMITED_RGB_RANGE (VIVID_CID_VIVID_BASE + 28)
+#define VIVID_CID_ALPHA_MODE (VIVID_CID_VIVID_BASE + 29)
+#define VIVID_CID_HAS_CROP_CAP (VIVID_CID_VIVID_BASE + 30)
+#define VIVID_CID_HAS_COMPOSE_CAP (VIVID_CID_VIVID_BASE + 31)
+#define VIVID_CID_HAS_SCALER_CAP (VIVID_CID_VIVID_BASE + 32)
+#define VIVID_CID_HAS_CROP_OUT (VIVID_CID_VIVID_BASE + 33)
+#define VIVID_CID_HAS_COMPOSE_OUT (VIVID_CID_VIVID_BASE + 34)
+#define VIVID_CID_HAS_SCALER_OUT (VIVID_CID_VIVID_BASE + 35)
+#define VIVID_CID_LOOP_VIDEO (VIVID_CID_VIVID_BASE + 36)
+#define VIVID_CID_SEQ_WRAP (VIVID_CID_VIVID_BASE + 37)
+#define VIVID_CID_TIME_WRAP (VIVID_CID_VIVID_BASE + 38)
+#define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 39)
+#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 40)
#define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60)
#define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61)
@@ -163,6 +168,42 @@ static const struct v4l2_ctrl_config vivid_ctrl_int64 = {
.step = 1,
};
+static const struct v4l2_ctrl_config vivid_ctrl_u32_array = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_U32_ARRAY,
+ .name = "U32 1 Element Array",
+ .type = V4L2_CTRL_TYPE_U32,
+ .def = 0x18,
+ .min = 0x10,
+ .max = 0x20000,
+ .step = 1,
+ .dims = { 1 },
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_u16_matrix = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_U16_MATRIX,
+ .name = "U16 8x16 Matrix",
+ .type = V4L2_CTRL_TYPE_U16,
+ .def = 0x18,
+ .min = 0x10,
+ .max = 0x2000,
+ .step = 1,
+ .dims = { 8, 16 },
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_u8_4d_array = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_U8_4D_ARRAY,
+ .name = "U8 2x3x4x5 Array",
+ .type = V4L2_CTRL_TYPE_U8,
+ .def = 0x18,
+ .min = 0x10,
+ .max = 0x20,
+ .step = 1,
+ .dims = { 2, 3, 4, 5 },
+};
+
static const char * const vivid_ctrl_menu_strings[] = {
"Menu Item 0 (Skipped)",
"Menu Item 1",
@@ -294,6 +335,16 @@ static const struct v4l2_ctrl_ops vivid_user_vid_ctrl_ops = {
static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ static const u32 colorspaces[] = {
+ V4L2_COLORSPACE_SMPTE170M,
+ V4L2_COLORSPACE_REC709,
+ V4L2_COLORSPACE_SRGB,
+ V4L2_COLORSPACE_ADOBERGB,
+ V4L2_COLORSPACE_BT2020,
+ V4L2_COLORSPACE_SMPTE240M,
+ V4L2_COLORSPACE_470_SYSTEM_M,
+ V4L2_COLORSPACE_470_SYSTEM_BG,
+ };
struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_cap);
unsigned i;
@@ -303,7 +354,21 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
tpg_s_pattern(&dev->tpg, ctrl->val);
break;
case VIVID_CID_COLORSPACE:
- tpg_s_colorspace(&dev->tpg, ctrl->val);
+ tpg_s_colorspace(&dev->tpg, colorspaces[ctrl->val]);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
+ case VIVID_CID_YCBCR_ENC:
+ tpg_s_ycbcr_enc(&dev->tpg, ctrl->val);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
+ case VIVID_CID_QUANTIZATION:
+ tpg_s_quantization(&dev->tpg, ctrl->val);
vivid_send_source_change(dev, TV);
vivid_send_source_change(dev, SVID);
vivid_send_source_change(dev, HDMI);
@@ -623,15 +688,14 @@ static const struct v4l2_ctrl_config vivid_ctrl_max_edid_blocks = {
};
static const char * const vivid_ctrl_colorspace_strings[] = {
- "",
"SMPTE 170M",
- "SMPTE 240M",
"REC 709",
- "", /* Skip Bt878 entry */
+ "sRGB",
+ "AdobeRGB",
+ "BT.2020",
+ "SMPTE 240M",
"470 System M",
"470 System BG",
- "", /* Skip JPEG entry */
- "sRGB",
NULL,
};
@@ -640,13 +704,49 @@ static const struct v4l2_ctrl_config vivid_ctrl_colorspace = {
.id = VIVID_CID_COLORSPACE,
.name = "Colorspace",
.type = V4L2_CTRL_TYPE_MENU,
- .min = 1,
- .max = 8,
- .menu_skip_mask = (1 << 4) | (1 << 7),
- .def = 8,
+ .max = 7,
+ .def = 2,
.qmenu = vivid_ctrl_colorspace_strings,
};
+static const char * const vivid_ctrl_ycbcr_enc_strings[] = {
+ "Default",
+ "ITU-R 601",
+ "Rec. 709",
+ "xvYCC 601",
+ "xvYCC 709",
+ "sYCC",
+ "BT.2020 Non-Constant Luminance",
+ "BT.2020 Constant Luminance",
+ "SMPTE 240M",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_YCBCR_ENC,
+ .name = "Y'CbCr Encoding",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 8,
+ .qmenu = vivid_ctrl_ycbcr_enc_strings,
+};
+
+static const char * const vivid_ctrl_quantization_strings[] = {
+ "Default",
+ "Full Range",
+ "Limited Range",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_quantization = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_QUANTIZATION,
+ .name = "Quantization",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 2,
+ .qmenu = vivid_ctrl_quantization_strings,
+};
+
static const struct v4l2_ctrl_config vivid_ctrl_alpha_mode = {
.ops = &vivid_vid_cap_ctrl_ops,
.id = VIVID_CID_ALPHA_MODE,
@@ -723,8 +823,12 @@ static int vivid_vid_out_s_ctrl(struct v4l2_ctrl *ctrl)
dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
else
dev->colorspace_out = V4L2_COLORSPACE_REC709;
+ dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
} else {
dev->colorspace_out = V4L2_COLORSPACE_SRGB;
+ dev->quantization_out = dev->dvi_d_out ?
+ V4L2_QUANTIZATION_LIM_RANGE :
+ V4L2_QUANTIZATION_DEFAULT;
}
if (dev->loop_video)
vivid_send_source_change(dev, HDMI);
@@ -792,15 +896,15 @@ static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl)
dev->start_streaming_error = true;
break;
case VIVID_CID_QUEUE_ERROR:
- if (dev->vb_vid_cap_q.start_streaming_called)
+ if (vb2_start_streaming_called(&dev->vb_vid_cap_q))
vb2_queue_error(&dev->vb_vid_cap_q);
- if (dev->vb_vbi_cap_q.start_streaming_called)
+ if (vb2_start_streaming_called(&dev->vb_vbi_cap_q))
vb2_queue_error(&dev->vb_vbi_cap_q);
- if (dev->vb_vid_out_q.start_streaming_called)
+ if (vb2_start_streaming_called(&dev->vb_vid_out_q))
vb2_queue_error(&dev->vb_vid_out_q);
- if (dev->vb_vbi_out_q.start_streaming_called)
+ if (vb2_start_streaming_called(&dev->vb_vbi_out_q))
vb2_queue_error(&dev->vb_vbi_out_q);
- if (dev->vb_sdr_cap_q.start_streaming_called)
+ if (vb2_start_streaming_called(&dev->vb_sdr_cap_q))
vb2_queue_error(&dev->vb_sdr_cap_q);
break;
case VIVID_CID_SEQ_WRAP:
@@ -1222,6 +1326,9 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL);
dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL);
dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
if (dev->has_vid_cap) {
/* Image Processing Controls */
@@ -1258,6 +1365,8 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_tstamp_src, NULL);
dev->colorspace = v4l2_ctrl_new_custom(hdl_vid_cap,
&vivid_ctrl_colorspace, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_ycbcr_enc, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_quantization, NULL);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL);
}
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 8c5d661cfc49..4af55f18829f 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -297,8 +297,8 @@ const struct vb2_ops vivid_sdr_cap_qops = {
.buf_queue = sdr_cap_buf_queue,
.start_streaming = sdr_cap_start_streaming,
.stop_streaming = sdr_cap_stop_streaming,
- .wait_prepare = vivid_unlock,
- .wait_finish = vivid_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.c b/drivers/media/platform/vivid/vivid-tpg-colors.c
index 2adddc0ca662..424aa7abc723 100644
--- a/drivers/media/platform/vivid/vivid-tpg-colors.c
+++ b/drivers/media/platform/vivid/vivid-tpg-colors.c
@@ -12,7 +12,7 @@
* This source also contains the code used to generate the tpg_csc_colors
* table. Run the following command to compile it:
*
- * gcc vivid-colors.c -DCOMPILE_APP -o gen-colors -lm
+ * gcc vivid-tpg-colors.c -DCOMPILE_APP -o gen-colors -lm
*
* and run the utility.
*
@@ -78,22 +78,542 @@ const struct color tpg_colors[TPG_COLOR_MAX] = {
#ifndef COMPILE_APP
/* Generated table */
-const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1] = {
- [V4L2_COLORSPACE_SMPTE170M][0] = { 2953, 2939, 2939 },
- [V4L2_COLORSPACE_SMPTE170M][1] = { 2954, 2963, 585 },
- [V4L2_COLORSPACE_SMPTE170M][2] = { 84, 2967, 2937 },
- [V4L2_COLORSPACE_SMPTE170M][3] = { 93, 2990, 575 },
- [V4L2_COLORSPACE_SMPTE170M][4] = { 3030, 259, 2933 },
- [V4L2_COLORSPACE_SMPTE170M][5] = { 3031, 406, 557 },
- [V4L2_COLORSPACE_SMPTE170M][6] = { 544, 428, 2931 },
- [V4L2_COLORSPACE_SMPTE170M][7] = { 551, 547, 547 },
+const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7,
+ 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18,
+ 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 20, 21, 21, 21,
+ 21, 22, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 28,
+ 28, 29, 29, 29, 29, 30, 30, 30, 30, 30, 31, 31, 31, 31, 32, 32,
+ 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 34, 34, 35, 35, 35, 35,
+ 36, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 38, 39, 39,
+ 39, 39, 40, 40, 40, 40, 40, 41, 41, 41, 41, 42, 42, 42, 42, 42,
+ 43, 43, 43, 43, 44, 44, 44, 44, 44, 45, 45, 45, 45, 46, 46, 46,
+ 46, 46, 47, 47, 47, 47, 48, 48, 48, 48, 48, 49, 49, 49, 49, 50,
+ 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52, 52, 52, 53, 53, 53,
+ 53, 54, 54, 54, 54, 54, 55, 55, 55, 55, 56, 56, 56, 56, 56, 57,
+ 57, 57, 57, 58, 58, 58, 58, 58, 59, 59, 59, 59, 60, 60, 60, 60,
+ 60, 61, 61, 61, 61, 62, 62, 62, 62, 62, 63, 63, 63, 63, 64, 64,
+ 64, 64, 64, 65, 65, 65, 65, 66, 66, 66, 66, 66, 67, 67, 67, 67,
+ 68, 68, 68, 68, 68, 69, 69, 69, 69, 70, 70, 70, 70, 70, 71, 71,
+ 71, 71, 72, 72, 72, 72, 72, 73, 73, 73, 73, 73, 74, 74, 74, 74,
+ 74, 75, 75, 75, 75, 76, 76, 76, 76, 76, 77, 77, 77, 77, 78, 78,
+ 78, 78, 79, 79, 79, 79, 79, 80, 80, 80, 80, 81, 81, 81, 81, 82,
+ 82, 82, 82, 82, 83, 83, 83, 83, 84, 84, 84, 84, 85, 85, 85, 85,
+ 86, 86, 86, 86, 87, 87, 87, 87, 88, 88, 88, 88, 89, 89, 89, 89,
+ 90, 90, 90, 90, 91, 91, 91, 91, 92, 92, 92, 92, 93, 93, 93, 93,
+ 94, 94, 94, 94, 95, 95, 95, 95, 96, 96, 96, 96, 97, 97, 97, 97,
+ 98, 98, 98, 98, 99, 99, 99, 99, 100, 100, 100, 101, 101, 101, 101, 102,
+ 102, 102, 102, 103, 103, 103, 103, 104, 104, 104, 105, 105, 105, 105, 106, 106,
+ 106, 106, 107, 107, 107, 107, 108, 108, 108, 109, 109, 109, 109, 110, 110, 110,
+ 111, 111, 111, 111, 112, 112, 112, 112, 113, 113, 113, 114, 114, 114, 114, 115,
+ 115, 115, 116, 116, 116, 116, 117, 117, 117, 118, 118, 118, 118, 119, 119, 119,
+ 120, 120, 120, 120, 121, 121, 121, 122, 122, 122, 123, 123, 123, 123, 124, 124,
+ 124, 125, 125, 125, 125, 126, 126, 126, 127, 127, 127, 128, 128, 128, 128, 129,
+ 129, 129, 130, 130, 130, 131, 131, 131, 132, 132, 132, 132, 133, 133, 133, 134,
+ 134, 134, 135, 135, 135, 136, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139,
+ 139, 139, 140, 140, 140, 141, 141, 141, 142, 142, 142, 142, 143, 143, 143, 144,
+ 144, 144, 145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148, 149, 149,
+ 149, 150, 150, 150, 151, 151, 151, 152, 152, 152, 153, 153, 153, 154, 154, 154,
+ 155, 155, 155, 156, 156, 156, 157, 157, 157, 158, 158, 158, 159, 159, 159, 160,
+ 160, 160, 161, 161, 161, 162, 162, 162, 163, 163, 163, 164, 164, 164, 165, 165,
+ 165, 166, 166, 167, 167, 167, 168, 168, 168, 169, 169, 169, 170, 170, 170, 171,
+ 171, 171, 172, 172, 172, 173, 173, 174, 174, 174, 175, 175, 175, 176, 176, 176,
+ 177, 177, 177, 178, 178, 179, 179, 179, 180, 180, 180, 181, 181, 181, 182, 182,
+ 183, 183, 183, 184, 184, 184, 185, 185, 186, 186, 186, 187, 187, 187, 188, 188,
+ 188, 189, 189, 190, 190, 190, 191, 191, 191, 192, 192, 193, 193, 193, 194, 194,
+ 194, 195, 195, 196, 196, 196, 197, 197, 198, 198, 198, 199, 199, 199, 200, 200,
+ 201, 201, 201, 202, 202, 203, 203, 203, 204, 204, 204, 205, 205, 206, 206, 206,
+ 207, 207, 208, 208, 208, 209, 209, 210, 210, 210, 211, 211, 212, 212, 212, 213,
+ 213, 214, 214, 214, 215, 215, 216, 216, 216, 217, 217, 218, 218, 218, 219, 219,
+ 220, 220, 220, 221, 221, 222, 222, 222, 223, 223, 224, 224, 224, 225, 225, 226,
+ 226, 227, 227, 227, 228, 228, 229, 229, 229, 230, 230, 231, 231, 232, 232, 232,
+ 233, 233, 234, 234, 234, 235, 235, 236, 236, 237, 237, 237, 238, 238, 239, 239,
+ 240, 240, 240, 241, 241, 242, 242, 243, 243, 243, 244, 244, 245, 245, 246, 246,
+ 246, 247, 247, 248, 248, 249, 249, 249, 250, 250, 251, 251, 252, 252, 252, 253,
+ 253, 254, 254, 255, 255, 256, 256, 256, 257, 257, 258, 258, 259, 259, 260, 260,
+ 260, 261, 261, 262, 262, 263, 263, 264, 264, 264, 265, 265, 266, 266, 267, 267,
+ 268, 268, 269, 269, 269, 270, 270, 271, 271, 272, 272, 273, 273, 274, 274, 274,
+ 275, 275, 276, 276, 277, 277, 278, 278, 279, 279, 279, 280, 280, 281, 281, 282,
+ 282, 283, 283, 284, 284, 285, 285, 286, 286, 286, 287, 287, 288, 288, 289, 289,
+ 290, 290, 291, 291, 292, 292, 293, 293, 294, 294, 295, 295, 295, 296, 296, 297,
+ 297, 298, 298, 299, 299, 300, 300, 301, 301, 302, 302, 303, 303, 304, 304, 305,
+ 305, 306, 306, 307, 307, 308, 308, 309, 309, 309, 310, 310, 311, 311, 312, 312,
+ 313, 313, 314, 314, 315, 315, 316, 316, 317, 317, 318, 318, 319, 319, 320, 320,
+ 321, 321, 322, 322, 323, 323, 324, 324, 325, 325, 326, 326, 327, 327, 328, 328,
+ 329, 329, 330, 330, 331, 331, 332, 332, 333, 333, 334, 335, 335, 336, 336, 337,
+ 337, 338, 338, 339, 339, 340, 340, 341, 341, 342, 342, 343, 343, 344, 344, 345,
+ 345, 346, 346, 347, 347, 348, 348, 349, 349, 350, 351, 351, 352, 352, 353, 353,
+ 354, 354, 355, 355, 356, 356, 357, 357, 358, 358, 359, 360, 360, 361, 361, 362,
+ 362, 363, 363, 364, 364, 365, 365, 366, 366, 367, 368, 368, 369, 369, 370, 370,
+ 371, 371, 372, 372, 373, 373, 374, 375, 375, 376, 376, 377, 377, 378, 378, 379,
+ 379, 380, 381, 381, 382, 382, 383, 383, 384, 384, 385, 386, 386, 387, 387, 388,
+ 388, 389, 389, 390, 391, 391, 392, 392, 393, 393, 394, 394, 395, 396, 396, 397,
+ 397, 398, 398, 399, 399, 400, 401, 401, 402, 402, 403, 403, 404, 405, 405, 406,
+ 406, 407, 407, 408, 409, 409, 410, 410, 411, 411, 412, 413, 413, 414, 414, 415,
+ 415, 416, 417, 417, 418, 418, 419, 419, 420, 421, 421, 422, 422, 423, 424, 424,
+ 425, 425, 426, 426, 427, 428, 428, 429, 429, 430, 431, 431, 432, 432, 433, 433,
+ 434, 435, 435, 436, 436, 437, 438, 438, 439, 439, 440, 441, 441, 442, 442, 443,
+ 444, 444, 445, 445, 446, 447, 447, 448, 448, 449, 450, 450, 451, 451, 452, 453,
+ 453, 454, 454, 455, 456, 456, 457, 457, 458, 459, 459, 460, 460, 461, 462, 462,
+ 463, 463, 464, 465, 465, 466, 467, 467, 468, 468, 469, 470, 470, 471, 471, 472,
+ 473, 473, 474, 475, 475, 476, 476, 477, 478, 478, 479, 480, 480, 481, 481, 482,
+ 483, 483, 484, 485, 485, 486, 486, 487, 488, 488, 489, 490, 490, 491, 491, 492,
+ 493, 493, 494, 495, 495, 496, 497, 497, 498, 498, 499, 500, 500, 501, 502, 502,
+ 503, 504, 504, 505, 505, 506, 507, 507, 508, 509, 509, 510, 511, 511, 512, 513,
+ 513, 514, 514, 515, 516, 516, 517, 518, 518, 519, 520, 520, 521, 522, 522, 523,
+ 524, 524, 525, 526, 526, 527, 528, 528, 529, 529, 530, 531, 531, 532, 533, 533,
+ 534, 535, 535, 536, 537, 537, 538, 539, 539, 540, 541, 541, 542, 543, 543, 544,
+ 545, 545, 546, 547, 547, 548, 549, 549, 550, 551, 551, 552, 553, 553, 554, 555,
+ 555, 556, 557, 557, 558, 559, 560, 560, 561, 562, 562, 563, 564, 564, 565, 566,
+ 566, 567, 568, 568, 569, 570, 570, 571, 572, 572, 573, 574, 575, 575, 576, 577,
+ 577, 578, 579, 579, 580, 581, 581, 582, 583, 584, 584, 585, 586, 586, 587, 588,
+ 588, 589, 590, 590, 591, 592, 593, 593, 594, 595, 595, 596, 597, 598, 598, 599,
+ 600, 600, 601, 602, 602, 603, 604, 605, 605, 606, 607, 607, 608, 609, 610, 610,
+ 611, 612, 612, 613, 614, 615, 615, 616, 617, 617, 618, 619, 620, 620, 621, 622,
+ 622, 623, 624, 625, 625, 626, 627, 627, 628, 629, 630, 630, 631, 632, 632, 633,
+ 634, 635, 635, 636, 637, 638, 638, 639, 640, 640, 641, 642, 643, 643, 644, 645,
+ 646, 646, 647, 648, 649, 649, 650, 651, 652, 652, 653, 654, 654, 655, 656, 657,
+ 657, 658, 659, 660, 660, 661, 662, 663, 663, 664, 665, 666, 666, 667, 668, 669,
+ 669, 670, 671, 672, 672, 673, 674, 675, 675, 676, 677, 678, 678, 679, 680, 681,
+ 681, 682, 683, 684, 684, 685, 686, 687, 687, 688, 689, 690, 690, 691, 692, 693,
+ 694, 694, 695, 696, 697, 697, 698, 699, 700, 700, 701, 702, 703, 703, 704, 705,
+ 706, 707, 707, 708, 709, 710, 710, 711, 712, 713, 714, 714, 715, 716, 717, 717,
+ 718, 719, 720, 720, 721, 722, 723, 724, 724, 725, 726, 727, 728, 728, 729, 730,
+ 731, 731, 732, 733, 734, 735, 735, 736, 737, 738, 739, 739, 740, 741, 742, 742,
+ 743, 744, 745, 746, 746, 747, 748, 749, 750, 750, 751, 752, 753, 754, 754, 755,
+ 756, 757, 758, 758, 759, 760, 761, 762, 762, 763, 764, 765, 766, 766, 767, 768,
+ 769, 770, 771, 771, 772, 773, 774, 775, 775, 776, 777, 778, 779, 779, 780, 781,
+ 782, 783, 783, 784, 785, 786, 787, 788, 788, 789, 790, 791, 792, 793, 793, 794,
+ 795, 796, 797, 797, 798, 799, 800, 801, 802, 802, 803, 804, 805, 806, 807, 807,
+ 808, 809, 810, 811, 812, 812, 813, 814, 815, 816, 817, 817, 818, 819, 820, 821,
+ 822, 822, 823, 824, 825, 826, 827, 827, 828, 829, 830, 831, 832, 832, 833, 834,
+ 835, 836, 837, 838, 838, 839, 840, 841, 842, 843, 843, 844, 845, 846, 847, 848,
+ 849, 849, 850, 851, 852, 853, 854, 855, 855, 856, 857, 858, 859, 860, 861, 861,
+ 862, 863, 864, 865, 866, 867, 867, 868, 869, 870, 871, 872, 873, 873, 874, 875,
+ 876, 877, 878, 879, 880, 880, 881, 882, 883, 884, 885, 886, 887, 887, 888, 889,
+ 890, 891, 892, 893, 894, 894, 895, 896, 897, 898, 899, 900, 901, 901, 902, 903,
+ 904, 905, 906, 907, 908, 909, 909, 910, 911, 912, 913, 914, 915, 916, 916, 917,
+ 918, 919, 920, 921, 922, 923, 924, 925, 925, 926, 927, 928, 929, 930, 931, 932,
+ 933, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 942, 943, 944, 945, 946,
+ 947, 948, 949, 950, 951, 952, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961,
+ 962, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 973, 974, 975,
+ 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 985, 986, 987, 988, 989, 990,
+ 991, 992, 993, 994, 995, 996, 997, 998, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005,
+ 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020,
+ 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1030, 1031, 1032, 1033, 1034, 1035,
+ 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1050,
+ 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066,
+ 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1078, 1079, 1080, 1081,
+ 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097,
+ 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113,
+ 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129,
+ 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145,
+ 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161,
+ 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177,
+ 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1193, 1194,
+ 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210,
+ 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1223, 1224, 1225, 1226, 1227,
+ 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243,
+ 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260,
+ 1261, 1262, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277,
+ 1278, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1295,
+ 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1309, 1310, 1311, 1312,
+ 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329,
+ 1330, 1331, 1332, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1346, 1347,
+ 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364,
+ 1365, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1377, 1378, 1379, 1380, 1381, 1382,
+ 1383, 1384, 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1396, 1397, 1398, 1399, 1400,
+ 1401, 1402, 1403, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1414, 1415, 1416, 1417, 1418,
+ 1419, 1420, 1421, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436,
+ 1437, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1448, 1449, 1450, 1451, 1452, 1453, 1455,
+ 1456, 1457, 1458, 1459, 1460, 1461, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1471, 1472, 1473,
+ 1474, 1475, 1476, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1486, 1487, 1488, 1489, 1490, 1491,
+ 1493, 1494, 1495, 1496, 1497, 1498, 1500, 1501, 1502, 1503, 1504, 1505, 1507, 1508, 1509, 1510,
+ 1511, 1512, 1514, 1515, 1516, 1517, 1518, 1519, 1521, 1522, 1523, 1524, 1525, 1527, 1528, 1529,
+ 1530, 1531, 1532, 1534, 1535, 1536, 1537, 1538, 1540, 1541, 1542, 1543, 1544, 1545, 1547, 1548,
+ 1549, 1550, 1551, 1553, 1554, 1555, 1556, 1557, 1559, 1560, 1561, 1562, 1563, 1564, 1566, 1567,
+ 1568, 1569, 1570, 1572, 1573, 1574, 1575, 1576, 1578, 1579, 1580, 1581, 1582, 1584, 1585, 1586,
+ 1587, 1588, 1590, 1591, 1592, 1593, 1594, 1596, 1597, 1598, 1599, 1601, 1602, 1603, 1604, 1605,
+ 1607, 1608, 1609, 1610, 1611, 1613, 1614, 1615, 1616, 1617, 1619, 1620, 1621, 1622, 1624, 1625,
+ 1626, 1627, 1628, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1641, 1642, 1643, 1644,
+ 1646, 1647, 1648, 1649, 1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1660, 1662, 1663, 1664,
+ 1665, 1667, 1668, 1669, 1670, 1671, 1673, 1674, 1675, 1676, 1678, 1679, 1680, 1681, 1683, 1684,
+ 1685, 1686, 1688, 1689, 1690, 1691, 1693, 1694, 1695, 1696, 1698, 1699, 1700, 1701, 1703, 1704,
+ 1705, 1706, 1708, 1709, 1710, 1711, 1713, 1714, 1715, 1716, 1718, 1719, 1720, 1721, 1723, 1724,
+ 1725, 1726, 1728, 1729, 1730, 1731, 1733, 1734, 1735, 1737, 1738, 1739, 1740, 1742, 1743, 1744,
+ 1745, 1747, 1748, 1749, 1750, 1752, 1753, 1754, 1756, 1757, 1758, 1759, 1761, 1762, 1763, 1764,
+ 1766, 1767, 1768, 1770, 1771, 1772, 1773, 1775, 1776, 1777, 1778, 1780, 1781, 1782, 1784, 1785,
+ 1786, 1787, 1789, 1790, 1791, 1793, 1794, 1795, 1796, 1798, 1799, 1800, 1802, 1803, 1804, 1806,
+ 1807, 1808, 1809, 1811, 1812, 1813, 1815, 1816, 1817, 1818, 1820, 1821, 1822, 1824, 1825, 1826,
+ 1828, 1829, 1830, 1831, 1833, 1834, 1835, 1837, 1838, 1839, 1841, 1842, 1843, 1844, 1846, 1847,
+ 1848, 1850, 1851, 1852, 1854, 1855, 1856, 1858, 1859, 1860, 1862, 1863, 1864, 1865, 1867, 1868,
+ 1869, 1871, 1872, 1873, 1875, 1876, 1877, 1879, 1880, 1881, 1883, 1884, 1885, 1887, 1888, 1889,
+ 1891, 1892, 1893, 1894, 1896, 1897, 1898, 1900, 1901, 1902, 1904, 1905, 1906, 1908, 1909, 1910,
+ 1912, 1913, 1914, 1916, 1917, 1918, 1920, 1921, 1922, 1924, 1925, 1926, 1928, 1929, 1930, 1932,
+ 1933, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1947, 1948, 1949, 1951, 1952, 1953,
+ 1955, 1956, 1957, 1959, 1960, 1961, 1963, 1964, 1965, 1967, 1968, 1970, 1971, 1972, 1974, 1975,
+ 1976, 1978, 1979, 1980, 1982, 1983, 1984, 1986, 1987, 1989, 1990, 1991, 1993, 1994, 1995, 1997,
+ 1998, 1999, 2001, 2002, 2004, 2005, 2006, 2008, 2009, 2010, 2012, 2013, 2015, 2016, 2017, 2019,
+ 2020, 2021, 2023, 2024, 2026, 2027, 2028, 2030, 2031, 2032, 2034, 2035, 2037, 2038, 2039, 2041,
+ 2042, 2043, 2045, 2046, 2048, 2049, 2050, 2052, 2053, 2055, 2056, 2057, 2059, 2060, 2061, 2063,
+ 2064, 2066, 2067, 2068, 2070, 2071, 2073, 2074, 2075, 2077, 2078, 2080, 2081, 2082, 2084, 2085,
+ 2087, 2088, 2089, 2091, 2092, 2094, 2095, 2096, 2098, 2099, 2101, 2102, 2103, 2105, 2106, 2108,
+ 2109, 2110, 2112, 2113, 2115, 2116, 2117, 2119, 2120, 2122, 2123, 2124, 2126, 2127, 2129, 2130,
+ 2132, 2133, 2134, 2136, 2137, 2139, 2140, 2141, 2143, 2144, 2146, 2147, 2149, 2150, 2151, 2153,
+ 2154, 2156, 2157, 2159, 2160, 2161, 2163, 2164, 2166, 2167, 2169, 2170, 2171, 2173, 2174, 2176,
+ 2177, 2179, 2180, 2181, 2183, 2184, 2186, 2187, 2189, 2190, 2191, 2193, 2194, 2196, 2197, 2199,
+ 2200, 2202, 2203, 2204, 2206, 2207, 2209, 2210, 2212, 2213, 2214, 2216, 2217, 2219, 2220, 2222,
+ 2223, 2225, 2226, 2228, 2229, 2230, 2232, 2233, 2235, 2236, 2238, 2239, 2241, 2242, 2243, 2245,
+ 2246, 2248, 2249, 2251, 2252, 2254, 2255, 2257, 2258, 2260, 2261, 2262, 2264, 2265, 2267, 2268,
+ 2270, 2271, 2273, 2274, 2276, 2277, 2279, 2280, 2282, 2283, 2284, 2286, 2287, 2289, 2290, 2292,
+ 2293, 2295, 2296, 2298, 2299, 2301, 2302, 2304, 2305, 2307, 2308, 2310, 2311, 2312, 2314, 2315,
+ 2317, 2318, 2320, 2321, 2323, 2324, 2326, 2327, 2329, 2330, 2332, 2333, 2335, 2336, 2338, 2339,
+ 2341, 2342, 2344, 2345, 2347, 2348, 2350, 2351, 2353, 2354, 2356, 2357, 2359, 2360, 2362, 2363,
+ 2365, 2366, 2368, 2369, 2371, 2372, 2374, 2375, 2377, 2378, 2380, 2381, 2383, 2384, 2386, 2387,
+ 2389, 2390, 2392, 2393, 2395, 2396, 2398, 2399, 2401, 2402, 2404, 2405, 2407, 2408, 2410, 2411,
+ 2413, 2414, 2416, 2417, 2419, 2420, 2422, 2423, 2425, 2426, 2428, 2429, 2431, 2433, 2434, 2436,
+ 2437, 2439, 2440, 2442, 2443, 2445, 2446, 2448, 2449, 2451, 2452, 2454, 2455, 2457, 2458, 2460,
+ 2462, 2463, 2465, 2466, 2468, 2469, 2471, 2472, 2474, 2475, 2477, 2478, 2480, 2481, 2483, 2485,
+ 2486, 2488, 2489, 2491, 2492, 2494, 2495, 2497, 2498, 2500, 2502, 2503, 2505, 2506, 2508, 2509,
+ 2511, 2512, 2514, 2515, 2517, 2519, 2520, 2522, 2523, 2525, 2526, 2528, 2529, 2531, 2533, 2534,
+ 2536, 2537, 2539, 2540, 2542, 2543, 2545, 2547, 2548, 2550, 2551, 2553, 2554, 2556, 2557, 2559,
+ 2561, 2562, 2564, 2565, 2567, 2568, 2570, 2572, 2573, 2575, 2576, 2578, 2579, 2581, 2583, 2584,
+ 2586, 2587, 2589, 2590, 2592, 2594, 2595, 2597, 2598, 2600, 2601, 2603, 2605, 2606, 2608, 2609,
+ 2611, 2613, 2614, 2616, 2617, 2619, 2620, 2622, 2624, 2625, 2627, 2628, 2630, 2632, 2633, 2635,
+ 2636, 2638, 2640, 2641, 2643, 2644, 2646, 2647, 2649, 2651, 2652, 2654, 2655, 2657, 2659, 2660,
+ 2662, 2663, 2665, 2667, 2668, 2670, 2671, 2673, 2675, 2676, 2678, 2679, 2681, 2683, 2684, 2686,
+ 2687, 2689, 2691, 2692, 2694, 2696, 2697, 2699, 2700, 2702, 2704, 2705, 2707, 2708, 2710, 2712,
+ 2713, 2715, 2716, 2718, 2720, 2721, 2723, 2725, 2726, 2728, 2729, 2731, 2733, 2734, 2736, 2738,
+ 2739, 2741, 2742, 2744, 2746, 2747, 2749, 2751, 2752, 2754, 2755, 2757, 2759, 2760, 2762, 2764,
+ 2765, 2767, 2769, 2770, 2772, 2773, 2775, 2777, 2778, 2780, 2782, 2783, 2785, 2787, 2788, 2790,
+ 2791, 2793, 2795, 2796, 2798, 2800, 2801, 2803, 2805, 2806, 2808, 2810, 2811, 2813, 2814, 2816,
+ 2818, 2819, 2821, 2823, 2824, 2826, 2828, 2829, 2831, 2833, 2834, 2836, 2838, 2839, 2841, 2843,
+ 2844, 2846, 2848, 2849, 2851, 2853, 2854, 2856, 2857, 2859, 2861, 2862, 2864, 2866, 2867, 2869,
+ 2871, 2872, 2874, 2876, 2877, 2879, 2881, 2882, 2884, 2886, 2888, 2889, 2891, 2893, 2894, 2896,
+ 2898, 2899, 2901, 2903, 2904, 2906, 2908, 2909, 2911, 2913, 2914, 2916, 2918, 2919, 2921, 2923,
+ 2924, 2926, 2928, 2929, 2931, 2933, 2935, 2936, 2938, 2940, 2941, 2943, 2945, 2946, 2948, 2950,
+ 2951, 2953, 2955, 2956, 2958, 2960, 2962, 2963, 2965, 2967, 2968, 2970, 2972, 2973, 2975, 2977,
+ 2979, 2980, 2982, 2984, 2985, 2987, 2989, 2990, 2992, 2994, 2996, 2997, 2999, 3001, 3002, 3004,
+ 3006, 3008, 3009, 3011, 3013, 3014, 3016, 3018, 3020, 3021, 3023, 3025, 3026, 3028, 3030, 3032,
+ 3033, 3035, 3037, 3038, 3040, 3042, 3044, 3045, 3047, 3049, 3050, 3052, 3054, 3056, 3057, 3059,
+ 3061, 3063, 3064, 3066, 3068, 3069, 3071, 3073, 3075, 3076, 3078, 3080, 3082, 3083, 3085, 3087,
+ 3089, 3090, 3092, 3094, 3095, 3097, 3099, 3101, 3102, 3104, 3106, 3108, 3109, 3111, 3113, 3115,
+ 3116, 3118, 3120, 3122, 3123, 3125, 3127, 3129, 3130, 3132, 3134, 3136, 3137, 3139, 3141, 3143,
+ 3144, 3146, 3148, 3150, 3151, 3153, 3155, 3157, 3158, 3160, 3162, 3164, 3165, 3167, 3169, 3171,
+ 3172, 3174, 3176, 3178, 3179, 3181, 3183, 3185, 3187, 3188, 3190, 3192, 3194, 3195, 3197, 3199,
+ 3201, 3202, 3204, 3206, 3208, 3209, 3211, 3213, 3215, 3217, 3218, 3220, 3222, 3224, 3225, 3227,
+ 3229, 3231, 3233, 3234, 3236, 3238, 3240, 3241, 3243, 3245, 3247, 3249, 3250, 3252, 3254, 3256,
+ 3258, 3259, 3261, 3263, 3265, 3266, 3268, 3270, 3272, 3274, 3275, 3277, 3279, 3281, 3283, 3284,
+ 3286, 3288, 3290, 3292, 3293, 3295, 3297, 3299, 3301, 3302, 3304, 3306, 3308, 3310, 3311, 3313,
+ 3315, 3317, 3319, 3320, 3322, 3324, 3326, 3328, 3329, 3331, 3333, 3335, 3337, 3338, 3340, 3342,
+ 3344, 3346, 3348, 3349, 3351, 3353, 3355, 3357, 3358, 3360, 3362, 3364, 3366, 3368, 3369, 3371,
+ 3373, 3375, 3377, 3378, 3380, 3382, 3384, 3386, 3388, 3389, 3391, 3393, 3395, 3397, 3399, 3400,
+ 3402, 3404, 3406, 3408, 3410, 3411, 3413, 3415, 3417, 3419, 3421, 3422, 3424, 3426, 3428, 3430,
+ 3432, 3433, 3435, 3437, 3439, 3441, 3443, 3444, 3446, 3448, 3450, 3452, 3454, 3455, 3457, 3459,
+ 3461, 3463, 3465, 3467, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3481, 3483, 3485, 3487, 3489,
+ 3491, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3507, 3509, 3511, 3513, 3515, 3517, 3519,
+ 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3548,
+ 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3578,
+ 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609,
+ 3611, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3629, 3631, 3633, 3635, 3637, 3639,
+ 3641, 3643, 3645, 3647, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3667, 3669,
+ 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3688, 3690, 3692, 3694, 3696, 3698, 3700,
+ 3702, 3704, 3706, 3708, 3710, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731,
+ 3733, 3735, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762,
+ 3764, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793,
+ 3795, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824,
+ 3826, 3828, 3830, 3832, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855,
+ 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3884, 3886,
+ 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918,
+ 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950,
+ 3952, 3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982,
+ 3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014,
+ 4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046,
+ 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078,
+ 4080,
+};
+
+/* Generated table */
+const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {
+ 0, 5, 9, 14, 18, 22, 27, 32, 36, 41, 45, 50, 54, 59, 63, 68,
+ 72, 77, 81, 86, 90, 95, 99, 104, 108, 113, 117, 122, 126, 131, 135, 139,
+ 144, 149, 153, 158, 162, 167, 171, 176, 180, 185, 189, 194, 198, 203, 207, 212,
+ 216, 221, 225, 230, 234, 239, 243, 248, 252, 257, 261, 266, 270, 275, 279, 284,
+ 288, 293, 297, 302, 306, 311, 315, 320, 324, 328, 334, 338, 343, 347, 352, 356,
+ 360, 365, 369, 373, 377, 381, 386, 390, 394, 398, 402, 406, 410, 414, 418, 422,
+ 426, 430, 433, 437, 441, 445, 449, 452, 456, 460, 464, 467, 471, 475, 478, 482,
+ 485, 489, 492, 496, 499, 503, 506, 510, 513, 517, 520, 524, 527, 530, 534, 537,
+ 540, 544, 547, 550, 554, 557, 560, 563, 566, 570, 573, 576, 579, 582, 586, 589,
+ 592, 595, 598, 601, 604, 607, 610, 613, 616, 619, 622, 625, 628, 631, 634, 637,
+ 640, 643, 646, 649, 652, 655, 658, 660, 663, 666, 669, 672, 675, 677, 680, 683,
+ 686, 689, 691, 694, 697, 700, 702, 705, 708, 711, 713, 716, 719, 721, 724, 727,
+ 729, 732, 735, 737, 740, 743, 745, 748, 750, 753, 756, 758, 761, 763, 766, 768,
+ 771, 773, 776, 779, 781, 784, 786, 789, 791, 794, 796, 799, 801, 803, 806, 808,
+ 811, 813, 816, 818, 821, 823, 825, 828, 830, 833, 835, 837, 840, 842, 844, 847,
+ 849, 851, 854, 856, 858, 861, 863, 865, 868, 870, 872, 875, 877, 879, 881, 884,
+ 886, 888, 891, 893, 895, 897, 900, 902, 904, 906, 908, 911, 913, 915, 917, 919,
+ 922, 924, 926, 928, 930, 933, 935, 937, 939, 941, 943, 946, 948, 950, 952, 954,
+ 956, 958, 960, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 984, 986, 988,
+ 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020,
+ 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1042, 1044, 1046, 1048, 1050, 1052,
+ 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083,
+ 1085, 1087, 1089, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1107, 1109, 1111, 1113,
+ 1115, 1117, 1119, 1120, 1122, 1124, 1126, 1128, 1130, 1131, 1133, 1135, 1137, 1139, 1141, 1142,
+ 1144, 1146, 1148, 1150, 1151, 1153, 1155, 1157, 1159, 1160, 1162, 1164, 1166, 1168, 1169, 1171,
+ 1173, 1175, 1176, 1178, 1180, 1182, 1184, 1185, 1187, 1189, 1191, 1192, 1194, 1196, 1198, 1199,
+ 1201, 1203, 1204, 1206, 1208, 1210, 1211, 1213, 1215, 1217, 1218, 1220, 1222, 1223, 1225, 1227,
+ 1228, 1230, 1232, 1234, 1235, 1237, 1239, 1240, 1242, 1244, 1245, 1247, 1249, 1250, 1252, 1254,
+ 1255, 1257, 1259, 1260, 1262, 1264, 1265, 1267, 1269, 1270, 1272, 1274, 1275, 1277, 1279, 1280,
+ 1282, 1283, 1285, 1287, 1288, 1290, 1292, 1293, 1295, 1296, 1298, 1300, 1301, 1303, 1305, 1306,
+ 1308, 1309, 1311, 1313, 1314, 1316, 1317, 1319, 1321, 1322, 1324, 1325, 1327, 1328, 1330, 1332,
+ 1333, 1335, 1336, 1338, 1339, 1341, 1343, 1344, 1346, 1347, 1349, 1350, 1352, 1354, 1355, 1357,
+ 1358, 1360, 1361, 1363, 1364, 1366, 1367, 1369, 1371, 1372, 1374, 1375, 1377, 1378, 1380, 1381,
+ 1383, 1384, 1386, 1387, 1389, 1390, 1392, 1393, 1395, 1396, 1398, 1399, 1401, 1402, 1404, 1405,
+ 1407, 1408, 1410, 1411, 1413, 1414, 1416, 1417, 1419, 1420, 1422, 1423, 1425, 1426, 1428, 1429,
+ 1431, 1432, 1434, 1435, 1437, 1438, 1440, 1441, 1442, 1444, 1445, 1447, 1448, 1450, 1451, 1453,
+ 1454, 1456, 1457, 1458, 1460, 1461, 1463, 1464, 1466, 1467, 1469, 1470, 1471, 1473, 1474, 1476,
+ 1477, 1479, 1480, 1481, 1483, 1484, 1486, 1487, 1489, 1490, 1491, 1493, 1494, 1496, 1497, 1498,
+ 1500, 1501, 1503, 1504, 1505, 1507, 1508, 1510, 1511, 1512, 1514, 1515, 1517, 1518, 1519, 1521,
+ 1522, 1524, 1525, 1526, 1528, 1529, 1531, 1532, 1533, 1535, 1536, 1537, 1539, 1540, 1542, 1543,
+ 1544, 1546, 1547, 1548, 1550, 1551, 1553, 1554, 1555, 1557, 1558, 1559, 1561, 1562, 1563, 1565,
+ 1566, 1567, 1569, 1570, 1571, 1573, 1574, 1576, 1577, 1578, 1580, 1581, 1582, 1584, 1585, 1586,
+ 1588, 1589, 1590, 1592, 1593, 1594, 1596, 1597, 1598, 1600, 1601, 1602, 1603, 1605, 1606, 1607,
+ 1609, 1610, 1611, 1613, 1614, 1615, 1617, 1618, 1619, 1621, 1622, 1623, 1624, 1626, 1627, 1628,
+ 1630, 1631, 1632, 1634, 1635, 1636, 1637, 1639, 1640, 1641, 1643, 1644, 1645, 1647, 1648, 1649,
+ 1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1661, 1662, 1663, 1664, 1666, 1667, 1668, 1670,
+ 1671, 1672, 1673, 1675, 1676, 1677, 1678, 1680, 1681, 1682, 1683, 1685, 1686, 1687, 1688, 1690,
+ 1691, 1692, 1693, 1695, 1696, 1697, 1698, 1700, 1701, 1702, 1703, 1705, 1706, 1707, 1708, 1710,
+ 1711, 1712, 1713, 1715, 1716, 1717, 1718, 1720, 1721, 1722, 1723, 1724, 1726, 1727, 1728, 1729,
+ 1731, 1732, 1733, 1734, 1736, 1737, 1738, 1739, 1740, 1742, 1743, 1744, 1745, 1746, 1748, 1749,
+ 1750, 1751, 1753, 1754, 1755, 1756, 1757, 1759, 1760, 1761, 1762, 1763, 1765, 1766, 1767, 1768,
+ 1769, 1771, 1772, 1773, 1774, 1775, 1777, 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, 1787,
+ 1788, 1790, 1791, 1792, 1793, 1794, 1796, 1797, 1798, 1799, 1800, 1801, 1803, 1804, 1805, 1806,
+ 1807, 1809, 1810, 1811, 1812, 1813, 1814, 1816, 1817, 1818, 1819, 1820, 1821, 1823, 1824, 1825,
+ 1826, 1827, 1828, 1829, 1831, 1832, 1833, 1834, 1835, 1836, 1838, 1839, 1840, 1841, 1842, 1843,
+ 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1861, 1862,
+ 1863, 1864, 1865, 1866, 1867, 1868, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1878, 1879, 1880,
+ 1881, 1882, 1883, 1884, 1885, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1896, 1897, 1898,
+ 1899, 1900, 1901, 1902, 1903, 1904, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1916,
+ 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1927, 1928, 1929, 1930, 1931, 1932, 1933,
+ 1934, 1935, 1936, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1950, 1951,
+ 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1963, 1964, 1965, 1966, 1967, 1968,
+ 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985,
+ 1986, 1987, 1988, 1989, 1990, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+ 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
+ 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2031, 2032, 2033, 2034, 2035, 2036,
+ 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052,
+ 2053, 2054, 2055, 2056, 2057, 2058, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069,
+ 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085,
+ 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101,
+ 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117,
+ 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133,
+ 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149,
+ 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165,
+ 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180,
+ 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196,
+ 2197, 2198, 2199, 2200, 2201, 2202, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211,
+ 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2224, 2225, 2226,
+ 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2241,
+ 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257,
+ 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2271,
+ 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2283, 2284, 2285, 2286,
+ 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2295, 2296, 2297, 2298, 2299, 2300, 2301,
+ 2302, 2303, 2304, 2305, 2306, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316,
+ 2317, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2327, 2328, 2329, 2330,
+ 2331, 2332, 2333, 2334, 2335, 2336, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345,
+ 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2354, 2355, 2356, 2357, 2358, 2359,
+ 2360, 2361, 2362, 2363, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2371, 2372, 2373,
+ 2374, 2375, 2376, 2377, 2378, 2379, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2386, 2387,
+ 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2401,
+ 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2415,
+ 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2428, 2429,
+ 2430, 2431, 2432, 2433, 2434, 2435, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2441, 2442, 2443,
+ 2444, 2445, 2446, 2447, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2453, 2454, 2455, 2456, 2457,
+ 2458, 2459, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2465, 2466, 2467, 2468, 2469, 2470, 2471,
+ 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2477, 2478, 2479, 2480, 2481, 2482, 2482, 2483, 2484,
+ 2485, 2486, 2487, 2488, 2488, 2489, 2490, 2491, 2492, 2493, 2493, 2494, 2495, 2496, 2497, 2498,
+ 2499, 2499, 2500, 2501, 2502, 2503, 2504, 2504, 2505, 2506, 2507, 2508, 2509, 2509, 2510, 2511,
+ 2512, 2513, 2514, 2514, 2515, 2516, 2517, 2518, 2519, 2519, 2520, 2521, 2522, 2523, 2524, 2524,
+ 2525, 2526, 2527, 2528, 2529, 2529, 2530, 2531, 2532, 2533, 2534, 2534, 2535, 2536, 2537, 2538,
+ 2539, 2539, 2540, 2541, 2542, 2543, 2544, 2544, 2545, 2546, 2547, 2548, 2548, 2549, 2550, 2551,
+ 2552, 2553, 2553, 2554, 2555, 2556, 2557, 2558, 2558, 2559, 2560, 2561, 2562, 2562, 2563, 2564,
+ 2565, 2566, 2567, 2567, 2568, 2569, 2570, 2571, 2571, 2572, 2573, 2574, 2575, 2576, 2576, 2577,
+ 2578, 2579, 2580, 2580, 2581, 2582, 2583, 2584, 2584, 2585, 2586, 2587, 2588, 2589, 2589, 2590,
+ 2591, 2592, 2593, 2593, 2594, 2595, 2596, 2597, 2597, 2598, 2599, 2600, 2601, 2601, 2602, 2603,
+ 2604, 2605, 2605, 2606, 2607, 2608, 2609, 2610, 2610, 2611, 2612, 2613, 2614, 2614, 2615, 2616,
+ 2617, 2618, 2618, 2619, 2620, 2621, 2622, 2622, 2623, 2624, 2625, 2626, 2626, 2627, 2628, 2629,
+ 2630, 2630, 2631, 2632, 2633, 2634, 2634, 2635, 2636, 2637, 2637, 2638, 2639, 2640, 2641, 2641,
+ 2642, 2643, 2644, 2645, 2645, 2646, 2647, 2648, 2649, 2649, 2650, 2651, 2652, 2653, 2653, 2654,
+ 2655, 2656, 2656, 2657, 2658, 2659, 2660, 2660, 2661, 2662, 2663, 2664, 2664, 2665, 2666, 2667,
+ 2668, 2668, 2669, 2670, 2671, 2671, 2672, 2673, 2674, 2675, 2675, 2676, 2677, 2678, 2678, 2679,
+ 2680, 2681, 2682, 2682, 2683, 2684, 2685, 2686, 2686, 2687, 2688, 2689, 2689, 2690, 2691, 2692,
+ 2693, 2693, 2694, 2695, 2696, 2696, 2697, 2698, 2699, 2700, 2700, 2701, 2702, 2703, 2703, 2704,
+ 2705, 2706, 2706, 2707, 2708, 2709, 2710, 2710, 2711, 2712, 2713, 2713, 2714, 2715, 2716, 2717,
+ 2717, 2718, 2719, 2720, 2720, 2721, 2722, 2723, 2723, 2724, 2725, 2726, 2727, 2727, 2728, 2729,
+ 2730, 2730, 2731, 2732, 2733, 2733, 2734, 2735, 2736, 2736, 2737, 2738, 2739, 2740, 2740, 2741,
+ 2742, 2743, 2743, 2744, 2745, 2746, 2746, 2747, 2748, 2749, 2749, 2750, 2751, 2752, 2752, 2753,
+ 2754, 2755, 2755, 2756, 2757, 2758, 2759, 2759, 2760, 2761, 2762, 2762, 2763, 2764, 2765, 2765,
+ 2766, 2767, 2768, 2768, 2769, 2770, 2771, 2771, 2772, 2773, 2774, 2774, 2775, 2776, 2777, 2777,
+ 2778, 2779, 2780, 2780, 2781, 2782, 2783, 2783, 2784, 2785, 2786, 2786, 2787, 2788, 2789, 2789,
+ 2790, 2791, 2792, 2792, 2793, 2794, 2795, 2795, 2796, 2797, 2798, 2798, 2799, 2800, 2801, 2801,
+ 2802, 2803, 2804, 2804, 2805, 2806, 2807, 2807, 2808, 2809, 2810, 2810, 2811, 2812, 2813, 2813,
+ 2814, 2815, 2815, 2816, 2817, 2818, 2818, 2819, 2820, 2821, 2821, 2822, 2823, 2824, 2824, 2825,
+ 2826, 2827, 2827, 2828, 2829, 2830, 2830, 2831, 2832, 2832, 2833, 2834, 2835, 2835, 2836, 2837,
+ 2838, 2838, 2839, 2840, 2841, 2841, 2842, 2843, 2844, 2844, 2845, 2846, 2846, 2847, 2848, 2849,
+ 2849, 2850, 2851, 2852, 2852, 2853, 2854, 2855, 2855, 2856, 2857, 2857, 2858, 2859, 2860, 2860,
+ 2861, 2862, 2863, 2863, 2864, 2865, 2865, 2866, 2867, 2868, 2868, 2869, 2870, 2871, 2871, 2872,
+ 2873, 2873, 2874, 2875, 2876, 2876, 2877, 2878, 2879, 2879, 2880, 2881, 2881, 2882, 2883, 2884,
+ 2884, 2885, 2886, 2886, 2887, 2888, 2889, 2889, 2890, 2891, 2892, 2892, 2893, 2894, 2894, 2895,
+ 2896, 2897, 2897, 2898, 2899, 2899, 2900, 2901, 2902, 2902, 2903, 2904, 2904, 2905, 2906, 2907,
+ 2907, 2908, 2909, 2909, 2910, 2911, 2912, 2912, 2913, 2914, 2914, 2915, 2916, 2917, 2917, 2918,
+ 2919, 2919, 2920, 2921, 2922, 2922, 2923, 2924, 2924, 2925, 2926, 2927, 2927, 2928, 2929, 2929,
+ 2930, 2931, 2932, 2932, 2933, 2934, 2934, 2935, 2936, 2937, 2937, 2938, 2939, 2939, 2940, 2941,
+ 2941, 2942, 2943, 2944, 2944, 2945, 2946, 2946, 2947, 2948, 2949, 2949, 2950, 2951, 2951, 2952,
+ 2953, 2953, 2954, 2955, 2956, 2956, 2957, 2958, 2958, 2959, 2960, 2961, 2961, 2962, 2963, 2963,
+ 2964, 2965, 2965, 2966, 2967, 2968, 2968, 2969, 2970, 2970, 2971, 2972, 2972, 2973, 2974, 2975,
+ 2975, 2976, 2977, 2977, 2978, 2979, 2979, 2980, 2981, 2982, 2982, 2983, 2984, 2984, 2985, 2986,
+ 2986, 2987, 2988, 2988, 2989, 2990, 2991, 2991, 2992, 2993, 2993, 2994, 2995, 2995, 2996, 2997,
+ 2998, 2998, 2999, 3000, 3000, 3001, 3002, 3002, 3003, 3004, 3004, 3005, 3006, 3006, 3007, 3008,
+ 3009, 3009, 3010, 3011, 3011, 3012, 3013, 3013, 3014, 3015, 3015, 3016, 3017, 3018, 3018, 3019,
+ 3020, 3020, 3021, 3022, 3022, 3023, 3024, 3024, 3025, 3026, 3026, 3027, 3028, 3029, 3029, 3030,
+ 3031, 3031, 3032, 3033, 3033, 3034, 3035, 3035, 3036, 3037, 3037, 3038, 3039, 3039, 3040, 3041,
+ 3042, 3042, 3043, 3044, 3044, 3045, 3046, 3046, 3047, 3048, 3048, 3049, 3050, 3050, 3051, 3052,
+ 3052, 3053, 3054, 3054, 3055, 3056, 3056, 3057, 3058, 3059, 3059, 3060, 3061, 3061, 3062, 3063,
+ 3063, 3064, 3065, 3065, 3066, 3067, 3067, 3068, 3069, 3069, 3070, 3071, 3071, 3072, 3073, 3073,
+ 3074, 3075, 3075, 3076, 3077, 3077, 3078, 3079, 3079, 3080, 3081, 3081, 3082, 3083, 3084, 3084,
+ 3085, 3086, 3086, 3087, 3088, 3088, 3089, 3090, 3090, 3091, 3092, 3092, 3093, 3094, 3094, 3095,
+ 3096, 3096, 3097, 3098, 3098, 3099, 3100, 3100, 3101, 3102, 3102, 3103, 3104, 3104, 3105, 3106,
+ 3106, 3107, 3108, 3108, 3109, 3110, 3110, 3111, 3112, 3112, 3113, 3114, 3114, 3115, 3116, 3116,
+ 3117, 3118, 3118, 3119, 3120, 3120, 3121, 3122, 3122, 3123, 3124, 3124, 3125, 3126, 3126, 3127,
+ 3128, 3128, 3129, 3130, 3130, 3131, 3132, 3132, 3133, 3134, 3134, 3135, 3135, 3136, 3137, 3137,
+ 3138, 3139, 3139, 3140, 3141, 3141, 3142, 3143, 3143, 3144, 3145, 3145, 3146, 3147, 3147, 3148,
+ 3149, 3149, 3150, 3151, 3151, 3152, 3153, 3153, 3154, 3155, 3155, 3156, 3157, 3157, 3158, 3159,
+ 3159, 3160, 3160, 3161, 3162, 3162, 3163, 3164, 3164, 3165, 3166, 3166, 3167, 3168, 3168, 3169,
+ 3170, 3170, 3171, 3172, 3172, 3173, 3174, 3174, 3175, 3175, 3176, 3177, 3177, 3178, 3179, 3179,
+ 3180, 3181, 3181, 3182, 3183, 3183, 3184, 3185, 3185, 3186, 3187, 3187, 3188, 3188, 3189, 3190,
+ 3190, 3191, 3192, 3192, 3193, 3194, 3194, 3195, 3196, 3196, 3197, 3198, 3198, 3199, 3199, 3200,
+ 3201, 3201, 3202, 3203, 3203, 3204, 3205, 3205, 3206, 3207, 3207, 3208, 3209, 3209, 3210, 3210,
+ 3211, 3212, 3212, 3213, 3214, 3214, 3215, 3216, 3216, 3217, 3218, 3218, 3219, 3219, 3220, 3221,
+ 3221, 3222, 3223, 3223, 3224, 3225, 3225, 3226, 3227, 3227, 3228, 3228, 3229, 3230, 3230, 3231,
+ 3232, 3232, 3233, 3234, 3234, 3235, 3235, 3236, 3237, 3237, 3238, 3239, 3239, 3240, 3241, 3241,
+ 3242, 3242, 3243, 3244, 3244, 3245, 3246, 3246, 3247, 3248, 3248, 3249, 3249, 3250, 3251, 3251,
+ 3252, 3253, 3253, 3254, 3255, 3255, 3256, 3256, 3257, 3258, 3258, 3259, 3260, 3260, 3261, 3262,
+ 3262, 3263, 3263, 3264, 3265, 3265, 3266, 3267, 3267, 3268, 3268, 3269, 3270, 3270, 3271, 3272,
+ 3272, 3273, 3274, 3274, 3275, 3275, 3276, 3277, 3277, 3278, 3279, 3279, 3280, 3280, 3281, 3282,
+ 3282, 3283, 3284, 3284, 3285, 3285, 3286, 3287, 3287, 3288, 3289, 3289, 3290, 3290, 3291, 3292,
+ 3292, 3293, 3294, 3294, 3295, 3295, 3296, 3297, 3297, 3298, 3299, 3299, 3300, 3300, 3301, 3302,
+ 3302, 3303, 3304, 3304, 3305, 3305, 3306, 3307, 3307, 3308, 3309, 3309, 3310, 3310, 3311, 3312,
+ 3312, 3313, 3314, 3314, 3315, 3315, 3316, 3317, 3317, 3318, 3319, 3319, 3320, 3320, 3321, 3322,
+ 3322, 3323, 3323, 3324, 3325, 3325, 3326, 3327, 3327, 3328, 3328, 3329, 3330, 3330, 3331, 3332,
+ 3332, 3333, 3333, 3334, 3335, 3335, 3336, 3336, 3337, 3338, 3338, 3339, 3340, 3340, 3341, 3341,
+ 3342, 3343, 3343, 3344, 3345, 3345, 3346, 3346, 3347, 3348, 3348, 3349, 3349, 3350, 3351, 3351,
+ 3352, 3352, 3353, 3354, 3354, 3355, 3356, 3356, 3357, 3357, 3358, 3359, 3359, 3360, 3360, 3361,
+ 3362, 3362, 3363, 3364, 3364, 3365, 3365, 3366, 3367, 3367, 3368, 3368, 3369, 3370, 3370, 3371,
+ 3371, 3372, 3373, 3373, 3374, 3375, 3375, 3376, 3376, 3377, 3378, 3378, 3379, 3379, 3380, 3381,
+ 3381, 3382, 3382, 3383, 3384, 3384, 3385, 3385, 3386, 3387, 3387, 3388, 3389, 3389, 3390, 3390,
+ 3391, 3392, 3392, 3393, 3393, 3394, 3395, 3395, 3396, 3396, 3397, 3398, 3398, 3399, 3399, 3400,
+ 3401, 3401, 3402, 3402, 3403, 3404, 3404, 3405, 3405, 3406, 3407, 3407, 3408, 3408, 3409, 3410,
+ 3410, 3411, 3411, 3412, 3413, 3413, 3414, 3414, 3415, 3416, 3416, 3417, 3418, 3418, 3419, 3419,
+ 3420, 3421, 3421, 3422, 3422, 3423, 3424, 3424, 3425, 3425, 3426, 3427, 3427, 3428, 3428, 3429,
+ 3430, 3430, 3431, 3431, 3432, 3433, 3433, 3434, 3434, 3435, 3435, 3436, 3437, 3437, 3438, 3438,
+ 3439, 3440, 3440, 3441, 3441, 3442, 3443, 3443, 3444, 3444, 3445, 3446, 3446, 3447, 3447, 3448,
+ 3449, 3449, 3450, 3450, 3451, 3452, 3452, 3453, 3453, 3454, 3455, 3455, 3456, 3456, 3457, 3458,
+ 3458, 3459, 3459, 3460, 3461, 3461, 3462, 3462, 3463, 3463, 3464, 3465, 3465, 3466, 3466, 3467,
+ 3468, 3468, 3469, 3469, 3470, 3471, 3471, 3472, 3472, 3473, 3474, 3474, 3475, 3475, 3476, 3476,
+ 3477, 3478, 3478, 3479, 3479, 3480, 3481, 3481, 3482, 3482, 3483, 3484, 3484, 3485, 3485, 3486,
+ 3486, 3487, 3488, 3488, 3489, 3489, 3490, 3491, 3491, 3492, 3492, 3493, 3494, 3494, 3495, 3495,
+ 3496, 3496, 3497, 3498, 3498, 3499, 3499, 3500, 3501, 3501, 3502, 3502, 3503, 3504, 3504, 3505,
+ 3505, 3506, 3506, 3507, 3508, 3508, 3509, 3509, 3510, 3511, 3511, 3512, 3512, 3513, 3513, 3514,
+ 3515, 3515, 3516, 3516, 3517, 3518, 3518, 3519, 3519, 3520, 3520, 3521, 3522, 3522, 3523, 3523,
+ 3524, 3525, 3525, 3526, 3526, 3527, 3527, 3528, 3529, 3529, 3530, 3530, 3531, 3531, 3532, 3533,
+ 3533, 3534, 3534, 3535, 3536, 3536, 3537, 3537, 3538, 3538, 3539, 3540, 3540, 3541, 3541, 3542,
+ 3542, 3543, 3544, 3544, 3545, 3545, 3546, 3547, 3547, 3548, 3548, 3549, 3549, 3550, 3551, 3551,
+ 3552, 3552, 3553, 3553, 3554, 3555, 3555, 3556, 3556, 3557, 3557, 3558, 3559, 3559, 3560, 3560,
+ 3561, 3561, 3562, 3563, 3563, 3564, 3564, 3565, 3566, 3566, 3567, 3567, 3568, 3568, 3569, 3570,
+ 3570, 3571, 3571, 3572, 3572, 3573, 3574, 3574, 3575, 3575, 3576, 3576, 3577, 3578, 3578, 3579,
+ 3579, 3580, 3580, 3581, 3582, 3582, 3583, 3583, 3584, 3584, 3585, 3586, 3586, 3587, 3587, 3588,
+ 3588, 3589, 3590, 3590, 3591, 3591, 3592, 3592, 3593, 3594, 3594, 3595, 3595, 3596, 3596, 3597,
+ 3597, 3598, 3599, 3599, 3600, 3600, 3601, 3601, 3602, 3603, 3603, 3604, 3604, 3605, 3605, 3606,
+ 3607, 3607, 3608, 3608, 3609, 3609, 3610, 3611, 3611, 3612, 3612, 3613, 3613, 3614, 3615, 3615,
+ 3616, 3616, 3617, 3617, 3618, 3618, 3619, 3620, 3620, 3621, 3621, 3622, 3622, 3623, 3624, 3624,
+ 3625, 3625, 3626, 3626, 3627, 3627, 3628, 3629, 3629, 3630, 3630, 3631, 3631, 3632, 3633, 3633,
+ 3634, 3634, 3635, 3635, 3636, 3636, 3637, 3638, 3638, 3639, 3639, 3640, 3640, 3641, 3642, 3642,
+ 3643, 3643, 3644, 3644, 3645, 3645, 3646, 3647, 3647, 3648, 3648, 3649, 3649, 3650, 3650, 3651,
+ 3652, 3652, 3653, 3653, 3654, 3654, 3655, 3656, 3656, 3657, 3657, 3658, 3658, 3659, 3659, 3660,
+ 3661, 3661, 3662, 3662, 3663, 3663, 3664, 3664, 3665, 3666, 3666, 3667, 3667, 3668, 3668, 3669,
+ 3669, 3670, 3671, 3671, 3672, 3672, 3673, 3673, 3674, 3674, 3675, 3676, 3676, 3677, 3677, 3678,
+ 3678, 3679, 3679, 3680, 3681, 3681, 3682, 3682, 3683, 3683, 3684, 3684, 3685, 3686, 3686, 3687,
+ 3687, 3688, 3688, 3689, 3689, 3690, 3691, 3691, 3692, 3692, 3693, 3693, 3694, 3694, 3695, 3695,
+ 3696, 3697, 3697, 3698, 3698, 3699, 3699, 3700, 3700, 3701, 3702, 3702, 3703, 3703, 3704, 3704,
+ 3705, 3705, 3706, 3707, 3707, 3708, 3708, 3709, 3709, 3710, 3710, 3711, 3711, 3712, 3713, 3713,
+ 3714, 3714, 3715, 3715, 3716, 3716, 3717, 3717, 3718, 3719, 3719, 3720, 3720, 3721, 3721, 3722,
+ 3722, 3723, 3724, 3724, 3725, 3725, 3726, 3726, 3727, 3727, 3728, 3728, 3729, 3730, 3730, 3731,
+ 3731, 3732, 3732, 3733, 3733, 3734, 3734, 3735, 3736, 3736, 3737, 3737, 3738, 3738, 3739, 3739,
+ 3740, 3740, 3741, 3742, 3742, 3743, 3743, 3744, 3744, 3745, 3745, 3746, 3746, 3747, 3748, 3748,
+ 3749, 3749, 3750, 3750, 3751, 3751, 3752, 3752, 3753, 3753, 3754, 3755, 3755, 3756, 3756, 3757,
+ 3757, 3758, 3758, 3759, 3759, 3760, 3761, 3761, 3762, 3762, 3763, 3763, 3764, 3764, 3765, 3765,
+ 3766, 3766, 3767, 3768, 3768, 3769, 3769, 3770, 3770, 3771, 3771, 3772, 3772, 3773, 3773, 3774,
+ 3775, 3775, 3776, 3776, 3777, 3777, 3778, 3778, 3779, 3779, 3780, 3781, 3781, 3782, 3782, 3783,
+ 3783, 3784, 3784, 3785, 3785, 3786, 3786, 3787, 3787, 3788, 3789, 3789, 3790, 3790, 3791, 3791,
+ 3792, 3792, 3793, 3793, 3794, 3794, 3795, 3796, 3796, 3797, 3797, 3798, 3798, 3799, 3799, 3800,
+ 3800, 3801, 3801, 3802, 3802, 3803, 3804, 3804, 3805, 3805, 3806, 3806, 3807, 3807, 3808, 3808,
+ 3809, 3809, 3810, 3811, 3811, 3812, 3812, 3813, 3813, 3814, 3814, 3815, 3815, 3816, 3816, 3817,
+ 3817, 3818, 3819, 3819, 3820, 3820, 3821, 3821, 3822, 3822, 3823, 3823, 3824, 3824, 3825, 3825,
+ 3826, 3826, 3827, 3828, 3828, 3829, 3829, 3830, 3830, 3831, 3831, 3832, 3832, 3833, 3833, 3834,
+ 3834, 3835, 3835, 3836, 3837, 3837, 3838, 3838, 3839, 3839, 3840, 3840, 3841, 3841, 3842, 3842,
+ 3843, 3843, 3844, 3844, 3845, 3846, 3846, 3847, 3847, 3848, 3848, 3849, 3849, 3850, 3850, 3851,
+ 3851, 3852, 3852, 3853, 3853, 3854, 3855, 3855, 3856, 3856, 3857, 3857, 3858, 3858, 3859, 3859,
+ 3860, 3860, 3861, 3861, 3862, 3862, 3863, 3863, 3864, 3864, 3865, 3866, 3866, 3867, 3867, 3868,
+ 3868, 3869, 3869, 3870, 3870, 3871, 3871, 3872, 3872, 3873, 3873, 3874, 3874, 3875, 3876, 3876,
+ 3877, 3877, 3878, 3878, 3879, 3879, 3880, 3880, 3881, 3881, 3882, 3882, 3883, 3883, 3884, 3884,
+ 3885, 3885, 3886, 3886, 3887, 3888, 3888, 3889, 3889, 3890, 3890, 3891, 3891, 3892, 3892, 3893,
+ 3893, 3894, 3894, 3895, 3895, 3896, 3896, 3897, 3897, 3898, 3898, 3899, 3900, 3900, 3901, 3901,
+ 3902, 3902, 3903, 3903, 3904, 3904, 3905, 3905, 3906, 3906, 3907, 3907, 3908, 3908, 3909, 3909,
+ 3910, 3910, 3911, 3911, 3912, 3912, 3913, 3914, 3914, 3915, 3915, 3916, 3916, 3917, 3917, 3918,
+ 3918, 3919, 3919, 3920, 3920, 3921, 3921, 3922, 3922, 3923, 3923, 3924, 3924, 3925, 3925, 3926,
+ 3926, 3927, 3927, 3928, 3929, 3929, 3930, 3930, 3931, 3931, 3932, 3932, 3933, 3933, 3934, 3934,
+ 3935, 3935, 3936, 3936, 3937, 3937, 3938, 3938, 3939, 3939, 3940, 3940, 3941, 3941, 3942, 3942,
+ 3943, 3943, 3944, 3944, 3945, 3945, 3946, 3947, 3947, 3948, 3948, 3949, 3949, 3950, 3950, 3951,
+ 3951, 3952, 3952, 3953, 3953, 3954, 3954, 3955, 3955, 3956, 3956, 3957, 3957, 3958, 3958, 3959,
+ 3959, 3960, 3960, 3961, 3961, 3962, 3962, 3963, 3963, 3964, 3964, 3965, 3965, 3966, 3966, 3967,
+ 3967, 3968, 3969, 3969, 3970, 3970, 3971, 3971, 3972, 3972, 3973, 3973, 3974, 3974, 3975, 3975,
+ 3976, 3976, 3977, 3977, 3978, 3978, 3979, 3979, 3980, 3980, 3981, 3981, 3982, 3982, 3983, 3983,
+ 3984, 3984, 3985, 3985, 3986, 3986, 3987, 3987, 3988, 3988, 3989, 3989, 3990, 3990, 3991, 3991,
+ 3992, 3992, 3993, 3993, 3994, 3994, 3995, 3995, 3996, 3996, 3997, 3997, 3998, 3998, 3999, 3999,
+ 4000, 4001, 4001, 4002, 4002, 4003, 4003, 4004, 4004, 4005, 4005, 4006, 4006, 4007, 4007, 4008,
+ 4008, 4009, 4009, 4010, 4010, 4011, 4011, 4012, 4012, 4013, 4013, 4014, 4014, 4015, 4015, 4016,
+ 4016, 4017, 4017, 4018, 4018, 4019, 4019, 4020, 4020, 4021, 4021, 4022, 4022, 4023, 4023, 4024,
+ 4024, 4025, 4025, 4026, 4026, 4027, 4027, 4028, 4028, 4029, 4029, 4030, 4030, 4031, 4031, 4032,
+ 4032, 4033, 4033, 4034, 4034, 4035, 4035, 4036, 4036, 4037, 4037, 4038, 4038, 4039, 4039, 4040,
+ 4040, 4041, 4041, 4042, 4042, 4043, 4043, 4044, 4044, 4045, 4045, 4046, 4046, 4047, 4047, 4048,
+ 4048, 4049, 4049, 4050, 4050, 4051, 4051, 4052, 4052, 4053, 4053, 4054, 4054, 4055, 4055, 4056,
+ 4056, 4057, 4057, 4058, 4058, 4059, 4059, 4060, 4060, 4061, 4061, 4062, 4062, 4063, 4063, 4064,
+ 4064, 4065, 4065, 4066, 4066, 4067, 4067, 4068, 4068, 4069, 4069, 4070, 4070, 4071, 4071, 4072,
+ 4072, 4073, 4073, 4074, 4074, 4075, 4075, 4076, 4076, 4077, 4077, 4078, 4078, 4079, 4079, 4080,
+ 4080,
+};
+
+/* Generated table */
+const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][TPG_COLOR_CSC_BLACK + 1] = {
+ [V4L2_COLORSPACE_SMPTE170M][0] = { 2939, 2939, 2939 },
+ [V4L2_COLORSPACE_SMPTE170M][1] = { 2953, 2963, 586 },
+ [V4L2_COLORSPACE_SMPTE170M][2] = { 0, 2967, 2937 },
+ [V4L2_COLORSPACE_SMPTE170M][3] = { 88, 2990, 575 },
+ [V4L2_COLORSPACE_SMPTE170M][4] = { 3016, 259, 2933 },
+ [V4L2_COLORSPACE_SMPTE170M][5] = { 3030, 405, 558 },
+ [V4L2_COLORSPACE_SMPTE170M][6] = { 478, 428, 2931 },
+ [V4L2_COLORSPACE_SMPTE170M][7] = { 547, 547, 547 },
[V4L2_COLORSPACE_SMPTE240M][0] = { 2926, 2926, 2926 },
- [V4L2_COLORSPACE_SMPTE240M][1] = { 2926, 2926, 857 },
- [V4L2_COLORSPACE_SMPTE240M][2] = { 1594, 2901, 2901 },
- [V4L2_COLORSPACE_SMPTE240M][3] = { 1594, 2901, 774 },
- [V4L2_COLORSPACE_SMPTE240M][4] = { 2484, 618, 2858 },
- [V4L2_COLORSPACE_SMPTE240M][5] = { 2484, 618, 617 },
- [V4L2_COLORSPACE_SMPTE240M][6] = { 507, 507, 2832 },
+ [V4L2_COLORSPACE_SMPTE240M][1] = { 2941, 2950, 546 },
+ [V4L2_COLORSPACE_SMPTE240M][2] = { 0, 2954, 2924 },
+ [V4L2_COLORSPACE_SMPTE240M][3] = { 78, 2978, 536 },
+ [V4L2_COLORSPACE_SMPTE240M][4] = { 3004, 230, 2920 },
+ [V4L2_COLORSPACE_SMPTE240M][5] = { 3018, 363, 518 },
+ [V4L2_COLORSPACE_SMPTE240M][6] = { 437, 387, 2918 },
[V4L2_COLORSPACE_SMPTE240M][7] = { 507, 507, 507 },
[V4L2_COLORSPACE_REC709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_REC709][1] = { 2939, 2939, 547 },
@@ -103,21 +623,21 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLAC
[V4L2_COLORSPACE_REC709][5] = { 2939, 547, 547 },
[V4L2_COLORSPACE_REC709][6] = { 547, 547, 2939 },
[V4L2_COLORSPACE_REC709][7] = { 547, 547, 547 },
- [V4L2_COLORSPACE_470_SYSTEM_M][0] = { 2894, 2988, 2808 },
- [V4L2_COLORSPACE_470_SYSTEM_M][1] = { 2847, 3070, 843 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][0] = { 2892, 2988, 2807 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][1] = { 2846, 3070, 843 },
[V4L2_COLORSPACE_470_SYSTEM_M][2] = { 1656, 2962, 2783 },
[V4L2_COLORSPACE_470_SYSTEM_M][3] = { 1572, 3045, 763 },
- [V4L2_COLORSPACE_470_SYSTEM_M][4] = { 2477, 229, 2743 },
- [V4L2_COLORSPACE_470_SYSTEM_M][5] = { 2422, 672, 614 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][4] = { 2476, 229, 2742 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][5] = { 2420, 672, 614 },
[V4L2_COLORSPACE_470_SYSTEM_M][6] = { 725, 63, 2718 },
[V4L2_COLORSPACE_470_SYSTEM_M][7] = { 534, 561, 509 },
[V4L2_COLORSPACE_470_SYSTEM_BG][0] = { 2939, 2939, 2939 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][1] = { 2939, 2939, 621 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][1] = { 2939, 2939, 464 },
[V4L2_COLORSPACE_470_SYSTEM_BG][2] = { 786, 2939, 2939 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][3] = { 786, 2939, 621 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][4] = { 2879, 547, 2923 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][3] = { 786, 2939, 464 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][4] = { 2879, 547, 2956 },
[V4L2_COLORSPACE_470_SYSTEM_BG][5] = { 2879, 547, 547 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][6] = { 547, 547, 2923 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][6] = { 547, 547, 2956 },
[V4L2_COLORSPACE_470_SYSTEM_BG][7] = { 547, 547, 547 },
[V4L2_COLORSPACE_SRGB][0] = { 3056, 3056, 3056 },
[V4L2_COLORSPACE_SRGB][1] = { 3056, 3056, 800 },
@@ -127,6 +647,22 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLAC
[V4L2_COLORSPACE_SRGB][5] = { 3056, 800, 800 },
[V4L2_COLORSPACE_SRGB][6] = { 800, 800, 3056 },
[V4L2_COLORSPACE_SRGB][7] = { 800, 800, 800 },
+ [V4L2_COLORSPACE_ADOBERGB][0] = { 3033, 3033, 3033 },
+ [V4L2_COLORSPACE_ADOBERGB][1] = { 3033, 3033, 1063 },
+ [V4L2_COLORSPACE_ADOBERGB][2] = { 1828, 3033, 3033 },
+ [V4L2_COLORSPACE_ADOBERGB][3] = { 1828, 3033, 1063 },
+ [V4L2_COLORSPACE_ADOBERGB][4] = { 2633, 851, 2979 },
+ [V4L2_COLORSPACE_ADOBERGB][5] = { 2633, 851, 851 },
+ [V4L2_COLORSPACE_ADOBERGB][6] = { 851, 851, 2979 },
+ [V4L2_COLORSPACE_ADOBERGB][7] = { 851, 851, 851 },
+ [V4L2_COLORSPACE_BT2020][0] = { 2939, 2939, 2939 },
+ [V4L2_COLORSPACE_BT2020][1] = { 2877, 2923, 1058 },
+ [V4L2_COLORSPACE_BT2020][2] = { 1837, 2840, 2916 },
+ [V4L2_COLORSPACE_BT2020][3] = { 1734, 2823, 993 },
+ [V4L2_COLORSPACE_BT2020][4] = { 2427, 961, 2812 },
+ [V4L2_COLORSPACE_BT2020][5] = { 2351, 912, 648 },
+ [V4L2_COLORSPACE_BT2020][6] = { 792, 618, 2788 },
+ [V4L2_COLORSPACE_BT2020][7] = { 547, 547, 547 },
};
#else
@@ -138,29 +674,40 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLAC
#include <stdlib.h>
static const double rec709_to_ntsc1953[3][3] = {
- { 0.6698, 0.2678, 0.0323 },
- { 0.0185, 1.0742, -0.0603 },
- { 0.0162, 0.0432, 0.8551 }
+ { 0.6689794, 0.2678309, 0.0323187 },
+ { 0.0184901, 1.0742442, -0.0602820 },
+ { 0.0162259, 0.0431716, 0.8549253 }
};
static const double rec709_to_ebu[3][3] = {
- { 0.9578, 0.0422, 0 },
- { 0 , 1 , 0 },
- { 0 , 0.0118, 0.9882 }
+ { 0.9578221, 0.0421779, -0.0000000 },
+ { -0.0000000, 1.0000000, 0.0000000 },
+ { -0.0000000, -0.0119367, 1.0119367 }
};
static const double rec709_to_170m[3][3] = {
- { 1.0654, -0.0554, -0.0010 },
- { -0.0196, 1.0364, -0.0167 },
- { 0.0016, 0.0044, 0.9940 }
+ { 1.0653640, -0.0553900, -0.0099740 },
+ { -0.0196361, 1.0363630, -0.0167269 },
+ { 0.0016327, 0.0044133, 0.9939540 },
};
static const double rec709_to_240m[3][3] = {
- { 0.7151, 0.2849, 0 },
- { 0.0179, 0.9821, 0 },
- { 0.0177, 0.0472, 0.9350 }
+ { 1.0653640, -0.0553900, -0.0099740 },
+ { -0.0196361, 1.0363630, -0.0167269 },
+ { 0.0016327, 0.0044133, 0.9939540 },
+};
+
+static const double rec709_to_adobergb[3][3] = {
+ { 0.7151627, 0.2848373, -0.0000000 },
+ { 0.0000000, 1.0000000, 0.0000000 },
+ { -0.0000000, 0.0411705, 0.9588295 },
};
+static const double rec709_to_bt2020[3][3] = {
+ { 0.6274524, 0.3292485, 0.0432991 },
+ { 0.0691092, 0.9195311, 0.0113597 },
+ { 0.0163976, 0.0880301, 0.8955723 },
+};
static void mult_matrix(double *r, double *g, double *b, const double m[3][3])
{
@@ -176,7 +723,18 @@ static void mult_matrix(double *r, double *g, double *b, const double m[3][3])
static double transfer_srgb_to_rgb(double v)
{
- return (v <= 0.03928) ? v / 12.92 : pow((v + 0.055) / 1.055, 2.4);
+ if (v < -0.04045)
+ return pow((-v + 0.055) / 1.055, 2.4);
+ return (v <= 0.04045) ? v / 12.92 : pow((v + 0.055) / 1.055, 2.4);
+}
+
+static double transfer_rgb_to_srgb(double v)
+{
+ if (v <= -0.0031308)
+ return -1.055 * pow(-v, 1.0 / 2.4) + 0.055;
+ if (v <= 0.0031308)
+ return v * 12.92;
+ return 1.055 * pow(v, 1.0 / 2.4) - 0.055;
}
static double transfer_rgb_to_smpte240m(double v)
@@ -186,9 +744,21 @@ static double transfer_rgb_to_smpte240m(double v)
static double transfer_rgb_to_rec709(double v)
{
+ if (v <= -0.018)
+ return -1.099 * pow(-v, 0.45) + 0.099;
return (v < 0.018) ? v * 4.5 : 1.099 * pow(v, 0.45) - 0.099;
}
+static double transfer_rec709_to_rgb(double v)
+{
+ return (v < 0.081) ? v / 4.5 : pow((v + 0.099) / 1.099, 1.0 / 0.45);
+}
+
+static double transfer_rgb_to_adobergb(double v)
+{
+ return pow(v, 1.0 / 2.19921875);
+}
+
static double transfer_srgb_to_rec709(double v)
{
return transfer_rgb_to_rec709(transfer_srgb_to_rgb(v));
@@ -196,6 +766,8 @@ static double transfer_srgb_to_rec709(double v)
static void csc(enum v4l2_colorspace colorspace, double *r, double *g, double *b)
{
+ int clamp = 1;
+
/* Convert the primaries of Rec. 709 Linear RGB */
switch (colorspace) {
case V4L2_COLORSPACE_SMPTE240M:
@@ -222,15 +794,29 @@ static void csc(enum v4l2_colorspace colorspace, double *r, double *g, double *b
*b = transfer_srgb_to_rgb(*b);
mult_matrix(r, g, b, rec709_to_ntsc1953);
break;
+ case V4L2_COLORSPACE_ADOBERGB:
+ *r = transfer_srgb_to_rgb(*r);
+ *g = transfer_srgb_to_rgb(*g);
+ *b = transfer_srgb_to_rgb(*b);
+ mult_matrix(r, g, b, rec709_to_adobergb);
+ break;
+ case V4L2_COLORSPACE_BT2020:
+ *r = transfer_srgb_to_rgb(*r);
+ *g = transfer_srgb_to_rgb(*g);
+ *b = transfer_srgb_to_rgb(*b);
+ mult_matrix(r, g, b, rec709_to_bt2020);
+ break;
case V4L2_COLORSPACE_SRGB:
case V4L2_COLORSPACE_REC709:
default:
break;
}
- *r = ((*r) < 0) ? 0 : (((*r) > 1) ? 1 : (*r));
- *g = ((*g) < 0) ? 0 : (((*g) > 1) ? 1 : (*g));
- *b = ((*b) < 0) ? 0 : (((*b) > 1) ? 1 : (*b));
+ if (clamp) {
+ *r = ((*r) < 0) ? 0 : (((*r) > 1) ? 1 : (*r));
+ *g = ((*g) < 0) ? 0 : (((*g) > 1) ? 1 : (*g));
+ *b = ((*b) < 0) ? 0 : (((*b) > 1) ? 1 : (*b));
+ }
/* Encode to gamma corrected colorspace */
switch (colorspace) {
@@ -242,12 +828,18 @@ static void csc(enum v4l2_colorspace colorspace, double *r, double *g, double *b
case V4L2_COLORSPACE_SMPTE170M:
case V4L2_COLORSPACE_470_SYSTEM_M:
case V4L2_COLORSPACE_470_SYSTEM_BG:
+ case V4L2_COLORSPACE_BT2020:
*r = transfer_rgb_to_rec709(*r);
*g = transfer_rgb_to_rec709(*g);
*b = transfer_rgb_to_rec709(*b);
break;
case V4L2_COLORSPACE_SRGB:
break;
+ case V4L2_COLORSPACE_ADOBERGB:
+ *r = transfer_rgb_to_adobergb(*r);
+ *g = transfer_rgb_to_adobergb(*g);
+ *b = transfer_rgb_to_adobergb(*b);
+ break;
case V4L2_COLORSPACE_REC709:
default:
*r = transfer_srgb_to_rec709(*r);
@@ -269,6 +861,8 @@ int main(int argc, char **argv)
V4L2_COLORSPACE_470_SYSTEM_BG,
0,
V4L2_COLORSPACE_SRGB,
+ V4L2_COLORSPACE_ADOBERGB,
+ V4L2_COLORSPACE_BT2020,
};
static const char * const colorspace_names[] = {
"",
@@ -280,13 +874,39 @@ int main(int argc, char **argv)
"V4L2_COLORSPACE_470_SYSTEM_BG",
"",
"V4L2_COLORSPACE_SRGB",
+ "V4L2_COLORSPACE_ADOBERGB",
+ "V4L2_COLORSPACE_BT2020",
};
int i;
int c;
printf("/* Generated table */\n");
- printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1] = {\n");
- for (c = 0; c <= V4L2_COLORSPACE_SRGB; c++) {
+ printf("const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {");
+ for (i = 0; i <= 255 * 16; i++) {
+ if (i % 16 == 0)
+ printf("\n\t");
+ printf("%4d,%s",
+ (int)(0.5 + 16.0 * 255.0 *
+ transfer_rec709_to_rgb(i / (16.0 * 255.0))),
+ i % 16 == 15 || i == 255 * 16 ? "" : " ");
+ }
+ printf("\n};\n\n");
+
+ printf("/* Generated table */\n");
+ printf("const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {");
+ for (i = 0; i <= 255 * 16; i++) {
+ if (i % 16 == 0)
+ printf("\n\t");
+ printf("%4d,%s",
+ (int)(0.5 + 16.0 * 255.0 *
+ transfer_rgb_to_rec709(i / (16.0 * 255.0))),
+ i % 16 == 15 || i == 255 * 16 ? "" : " ");
+ }
+ printf("\n};\n\n");
+
+ printf("/* Generated table */\n");
+ printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][TPG_COLOR_CSC_BLACK + 1] = {\n");
+ for (c = 0; c <= V4L2_COLORSPACE_BT2020; c++) {
for (i = 0; i <= TPG_COLOR_CSC_BLACK; i++) {
double r, g, b;
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.h b/drivers/media/platform/vivid/vivid-tpg-colors.h
index a2678fbec256..2c333356451c 100644
--- a/drivers/media/platform/vivid/vivid-tpg-colors.h
+++ b/drivers/media/platform/vivid/vivid-tpg-colors.h
@@ -59,6 +59,8 @@ enum tpg_color {
};
extern const struct color tpg_colors[TPG_COLOR_MAX];
-extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1];
+extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
+extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
+extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][TPG_COLOR_CSC_BLACK + 1];
#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index cbcd6250e7b2..fc9c6536ba02 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -296,127 +296,193 @@ static enum tpg_color tpg_get_textfg_color(struct tpg_data *tpg)
}
}
-static u16 color_to_y(struct tpg_data *tpg, int r, int g, int b)
+static inline int rec709_to_linear(int v)
{
- switch (tpg->colorspace) {
- case V4L2_COLORSPACE_SMPTE170M:
- case V4L2_COLORSPACE_470_SYSTEM_M:
- case V4L2_COLORSPACE_470_SYSTEM_BG:
- return ((16829 * r + 33039 * g + 6416 * b + 16 * 32768) >> 16) + (16 << 4);
- case V4L2_COLORSPACE_SMPTE240M:
- return ((11932 * r + 39455 * g + 4897 * b + 16 * 32768) >> 16) + (16 << 4);
- case V4L2_COLORSPACE_REC709:
- case V4L2_COLORSPACE_SRGB:
- default:
- return ((11966 * r + 40254 * g + 4064 * b + 16 * 32768) >> 16) + (16 << 4);
- }
+ v = clamp(v, 0, 0xff0);
+ return tpg_rec709_to_linear[v];
}
-static u16 color_to_cb(struct tpg_data *tpg, int r, int g, int b)
+static inline int linear_to_rec709(int v)
{
- switch (tpg->colorspace) {
- case V4L2_COLORSPACE_SMPTE170M:
- case V4L2_COLORSPACE_470_SYSTEM_M:
- case V4L2_COLORSPACE_470_SYSTEM_BG:
- return ((-9714 * r - 19070 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
- case V4L2_COLORSPACE_SMPTE240M:
- return ((-6684 * r - 22100 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
- case V4L2_COLORSPACE_REC709:
- case V4L2_COLORSPACE_SRGB:
- default:
- return ((-6596 * r - 22189 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
- }
+ v = clamp(v, 0, 0xff0);
+ return tpg_linear_to_rec709[v];
}
-static u16 color_to_cr(struct tpg_data *tpg, int r, int g, int b)
+static void rgb2ycbcr(const int m[3][3], int r, int g, int b,
+ int y_offset, int *y, int *cb, int *cr)
{
- switch (tpg->colorspace) {
- case V4L2_COLORSPACE_SMPTE170M:
- case V4L2_COLORSPACE_470_SYSTEM_M:
- case V4L2_COLORSPACE_470_SYSTEM_BG:
- return ((28784 * r - 24103 * g - 4681 * b + 16 * 32768) >> 16) + (128 << 4);
- case V4L2_COLORSPACE_SMPTE240M:
- return ((28784 * r - 25606 * g - 3178 * b + 16 * 32768) >> 16) + (128 << 4);
- case V4L2_COLORSPACE_REC709:
- case V4L2_COLORSPACE_SRGB:
- default:
- return ((28784 * r - 26145 * g - 2639 * b + 16 * 32768) >> 16) + (128 << 4);
- }
+ *y = ((m[0][0] * r + m[0][1] * g + m[0][2] * b) >> 16) + (y_offset << 4);
+ *cb = ((m[1][0] * r + m[1][1] * g + m[1][2] * b) >> 16) + (128 << 4);
+ *cr = ((m[2][0] * r + m[2][1] * g + m[2][2] * b) >> 16) + (128 << 4);
}
-static u16 ycbcr_to_r(struct tpg_data *tpg, int y, int cb, int cr)
+static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
+ int *y, int *cb, int *cr)
{
- int r;
+#define COEFF(v, r) ((int)(0.5 + (v) * (r) * 256.0))
- y -= 16 << 4;
- cb -= 128 << 4;
- cr -= 128 << 4;
- switch (tpg->colorspace) {
- case V4L2_COLORSPACE_SMPTE170M:
- case V4L2_COLORSPACE_470_SYSTEM_M:
- case V4L2_COLORSPACE_470_SYSTEM_BG:
- r = 4769 * y + 6537 * cr;
+ static const int bt601[3][3] = {
+ { COEFF(0.299, 219), COEFF(0.587, 219), COEFF(0.114, 219) },
+ { COEFF(-0.169, 224), COEFF(-0.331, 224), COEFF(0.5, 224) },
+ { COEFF(0.5, 224), COEFF(-0.419, 224), COEFF(-0.081, 224) },
+ };
+ static const int bt601_full[3][3] = {
+ { COEFF(0.299, 255), COEFF(0.587, 255), COEFF(0.114, 255) },
+ { COEFF(-0.169, 255), COEFF(-0.331, 255), COEFF(0.5, 255) },
+ { COEFF(0.5, 255), COEFF(-0.419, 255), COEFF(-0.081, 255) },
+ };
+ static const int rec709[3][3] = {
+ { COEFF(0.2126, 219), COEFF(0.7152, 219), COEFF(0.0722, 219) },
+ { COEFF(-0.1146, 224), COEFF(-0.3854, 224), COEFF(0.5, 224) },
+ { COEFF(0.5, 224), COEFF(-0.4542, 224), COEFF(-0.0458, 224) },
+ };
+ static const int rec709_full[3][3] = {
+ { COEFF(0.2126, 255), COEFF(0.7152, 255), COEFF(0.0722, 255) },
+ { COEFF(-0.1146, 255), COEFF(-0.3854, 255), COEFF(0.5, 255) },
+ { COEFF(0.5, 255), COEFF(-0.4542, 255), COEFF(-0.0458, 255) },
+ };
+ static const int smpte240m[3][3] = {
+ { COEFF(0.212, 219), COEFF(0.701, 219), COEFF(0.087, 219) },
+ { COEFF(-0.116, 224), COEFF(-0.384, 224), COEFF(0.5, 224) },
+ { COEFF(0.5, 224), COEFF(-0.445, 224), COEFF(-0.055, 224) },
+ };
+ static const int bt2020[3][3] = {
+ { COEFF(0.2726, 219), COEFF(0.6780, 219), COEFF(0.0593, 219) },
+ { COEFF(-0.1396, 224), COEFF(-0.3604, 224), COEFF(0.5, 224) },
+ { COEFF(0.5, 224), COEFF(-0.4629, 224), COEFF(-0.0405, 224) },
+ };
+ bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
+ int lin_y, yc;
+
+ switch (tpg->real_ycbcr_enc) {
+ case V4L2_YCBCR_ENC_601:
+ case V4L2_YCBCR_ENC_XV601:
+ case V4L2_YCBCR_ENC_SYCC:
+ rgb2ycbcr(full ? bt601_full : bt601, r, g, b, 16, y, cb, cr);
+ break;
+ case V4L2_YCBCR_ENC_BT2020:
+ rgb2ycbcr(bt2020, r, g, b, 16, y, cb, cr);
break;
- case V4L2_COLORSPACE_SMPTE240M:
- r = 4769 * y + 7376 * cr;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ lin_y = (COEFF(0.2627, 255) * rec709_to_linear(r) +
+ COEFF(0.6780, 255) * rec709_to_linear(g) +
+ COEFF(0.0593, 255) * rec709_to_linear(b)) >> 16;
+ yc = linear_to_rec709(lin_y);
+ *y = (yc * 219) / 255 + (16 << 4);
+ if (b <= yc)
+ *cb = (((b - yc) * COEFF(1.0 / 1.9404, 224)) >> 16) + (128 << 4);
+ else
+ *cb = (((b - yc) * COEFF(1.0 / 1.5816, 224)) >> 16) + (128 << 4);
+ if (r <= yc)
+ *cr = (((r - yc) * COEFF(1.0 / 1.7184, 224)) >> 16) + (128 << 4);
+ else
+ *cr = (((r - yc) * COEFF(1.0 / 0.9936, 224)) >> 16) + (128 << 4);
break;
- case V4L2_COLORSPACE_REC709:
- case V4L2_COLORSPACE_SRGB:
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ rgb2ycbcr(smpte240m, r, g, b, 16, y, cb, cr);
+ break;
+ case V4L2_YCBCR_ENC_709:
+ case V4L2_YCBCR_ENC_XV709:
default:
- r = 4769 * y + 7343 * cr;
+ rgb2ycbcr(full ? rec709_full : rec709, r, g, b, 0, y, cb, cr);
break;
}
- return clamp(r >> 12, 0, 0xff0);
}
-static u16 ycbcr_to_g(struct tpg_data *tpg, int y, int cb, int cr)
+static void ycbcr2rgb(const int m[3][3], int y, int cb, int cr,
+ int y_offset, int *r, int *g, int *b)
{
- int g;
-
- y -= 16 << 4;
+ y -= y_offset << 4;
cb -= 128 << 4;
cr -= 128 << 4;
- switch (tpg->colorspace) {
- case V4L2_COLORSPACE_SMPTE170M:
- case V4L2_COLORSPACE_470_SYSTEM_M:
- case V4L2_COLORSPACE_470_SYSTEM_BG:
- g = 4769 * y - 1605 * cb - 3330 * cr;
- break;
- case V4L2_COLORSPACE_SMPTE240M:
- g = 4769 * y - 1055 * cb - 2341 * cr;
- break;
- case V4L2_COLORSPACE_REC709:
- case V4L2_COLORSPACE_SRGB:
- default:
- g = 4769 * y - 873 * cb - 2183 * cr;
- break;
- }
- return clamp(g >> 12, 0, 0xff0);
+ *r = m[0][0] * y + m[0][1] * cb + m[0][2] * cr;
+ *g = m[1][0] * y + m[1][1] * cb + m[1][2] * cr;
+ *b = m[2][0] * y + m[2][1] * cb + m[2][2] * cr;
+ *r = clamp(*r >> 12, 0, 0xff0);
+ *g = clamp(*g >> 12, 0, 0xff0);
+ *b = clamp(*b >> 12, 0, 0xff0);
}
-static u16 ycbcr_to_b(struct tpg_data *tpg, int y, int cb, int cr)
+static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
+ int *r, int *g, int *b)
{
- int b;
+#undef COEFF
+#define COEFF(v, r) ((int)(0.5 + (v) * ((255.0 * 255.0 * 16.0) / (r))))
+ static const int bt601[3][3] = {
+ { COEFF(1, 219), COEFF(0, 224), COEFF(1.4020, 224) },
+ { COEFF(1, 219), COEFF(-0.3441, 224), COEFF(-0.7141, 224) },
+ { COEFF(1, 219), COEFF(1.7720, 224), COEFF(0, 224) },
+ };
+ static const int bt601_full[3][3] = {
+ { COEFF(1, 255), COEFF(0, 255), COEFF(1.4020, 255) },
+ { COEFF(1, 255), COEFF(-0.3441, 255), COEFF(-0.7141, 255) },
+ { COEFF(1, 255), COEFF(1.7720, 255), COEFF(0, 255) },
+ };
+ static const int rec709[3][3] = {
+ { COEFF(1, 219), COEFF(0, 224), COEFF(1.5748, 224) },
+ { COEFF(1, 219), COEFF(-0.1873, 224), COEFF(-0.4681, 224) },
+ { COEFF(1, 219), COEFF(1.8556, 224), COEFF(0, 224) },
+ };
+ static const int rec709_full[3][3] = {
+ { COEFF(1, 255), COEFF(0, 255), COEFF(1.5748, 255) },
+ { COEFF(1, 255), COEFF(-0.1873, 255), COEFF(-0.4681, 255) },
+ { COEFF(1, 255), COEFF(1.8556, 255), COEFF(0, 255) },
+ };
+ static const int smpte240m[3][3] = {
+ { COEFF(1, 219), COEFF(0, 224), COEFF(1.5756, 224) },
+ { COEFF(1, 219), COEFF(-0.2253, 224), COEFF(-0.4767, 224) },
+ { COEFF(1, 219), COEFF(1.8270, 224), COEFF(0, 224) },
+ };
+ static const int bt2020[3][3] = {
+ { COEFF(1, 219), COEFF(0, 224), COEFF(1.4746, 224) },
+ { COEFF(1, 219), COEFF(-0.1646, 224), COEFF(-0.5714, 224) },
+ { COEFF(1, 219), COEFF(1.8814, 224), COEFF(0, 224) },
+ };
+ bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
+ int lin_r, lin_g, lin_b, lin_y;
+
+ switch (tpg->real_ycbcr_enc) {
+ case V4L2_YCBCR_ENC_601:
+ case V4L2_YCBCR_ENC_XV601:
+ case V4L2_YCBCR_ENC_SYCC:
+ ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, 16, r, g, b);
+ break;
+ case V4L2_YCBCR_ENC_BT2020:
+ ycbcr2rgb(bt2020, y, cb, cr, 16, r, g, b);
+ break;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ y -= 16 << 4;
+ cb -= 128 << 4;
+ cr -= 128 << 4;
- y -= 16 << 4;
- cb -= 128 << 4;
- cr -= 128 << 4;
- switch (tpg->colorspace) {
- case V4L2_COLORSPACE_SMPTE170M:
- case V4L2_COLORSPACE_470_SYSTEM_M:
- case V4L2_COLORSPACE_470_SYSTEM_BG:
- b = 4769 * y + 7343 * cb;
+ if (cb <= 0)
+ *b = COEFF(1.0, 219) * y + COEFF(1.9404, 224) * cb;
+ else
+ *b = COEFF(1.0, 219) * y + COEFF(1.5816, 224) * cb;
+ *b = *b >> 12;
+ if (cr <= 0)
+ *r = COEFF(1.0, 219) * y + COEFF(1.7184, 224) * cr;
+ else
+ *r = COEFF(1.0, 219) * y + COEFF(0.9936, 224) * cr;
+ *r = *r >> 12;
+ lin_r = rec709_to_linear(*r);
+ lin_b = rec709_to_linear(*b);
+ lin_y = rec709_to_linear((y * 255) / 219);
+
+ lin_g = COEFF(1.0 / 0.6780, 255) * lin_y -
+ COEFF(0.2627 / 0.6780, 255) * lin_r -
+ COEFF(0.0593 / 0.6780, 255) * lin_b;
+ *g = linear_to_rec709(lin_g >> 12);
break;
- case V4L2_COLORSPACE_SMPTE240M:
- b = 4769 * y + 8552 * cb;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ ycbcr2rgb(smpte240m, y, cb, cr, 16, r, g, b);
break;
- case V4L2_COLORSPACE_REC709:
- case V4L2_COLORSPACE_SRGB:
+ case V4L2_YCBCR_ENC_709:
+ case V4L2_YCBCR_ENC_XV709:
default:
- b = 4769 * y + 8652 * cb;
+ ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, 16, r, g, b);
break;
}
- return clamp(b >> 12, 0, 0xff0);
}
/* precalculate color bar values to speed up rendering */
@@ -456,18 +522,17 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g <<= 4;
b <<= 4;
}
- if (tpg->qual == TPG_QUAL_GRAY)
- r = g = b = color_to_y(tpg, r, g, b);
+ if (tpg->qual == TPG_QUAL_GRAY) {
+ /* Rec. 709 Luma function */
+ /* (0.2126, 0.7152, 0.0722) * (255 * 256) */
+ r = g = b = ((13879 * r + 46688 * g + 4713 * b) >> 16) + (16 << 4);
+ }
/*
* The assumption is that the RGB output is always full range,
* so only if the rgb_range overrides the 'real' rgb range do
* we need to convert the RGB values.
*
- * Currently there is no way of signalling to userspace if you
- * are actually giving it limited range RGB (or full range
- * YUV for that matter).
- *
* Remember that r, g and b are still in the 0 - 0xff0 range.
*/
if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
@@ -497,12 +562,12 @@ static void precalculate_color(struct tpg_data *tpg, int k)
if (tpg->brightness != 128 || tpg->contrast != 128 ||
tpg->saturation != 128 || tpg->hue) {
/* Implement these operations */
+ int y, cb, cr;
+ int tmp_cb, tmp_cr;
/* First convert to YCbCr */
- int y = color_to_y(tpg, r, g, b); /* Luma */
- int cb = color_to_cb(tpg, r, g, b); /* Cb */
- int cr = color_to_cr(tpg, r, g, b); /* Cr */
- int tmp_cb, tmp_cr;
+
+ color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
y = (16 << 4) + ((y - (16 << 4)) * tpg->contrast) / 128;
y += (tpg->brightness << 4) - (128 << 4);
@@ -520,21 +585,29 @@ static void precalculate_color(struct tpg_data *tpg, int k)
tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
return;
}
- r = ycbcr_to_r(tpg, y, cb, cr);
- g = ycbcr_to_g(tpg, y, cb, cr);
- b = ycbcr_to_b(tpg, y, cb, cr);
+ ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
}
if (tpg->is_yuv) {
/* Convert to YCbCr */
- u16 y = color_to_y(tpg, r, g, b); /* Luma */
- u16 cb = color_to_cb(tpg, r, g, b); /* Cb */
- u16 cr = color_to_cr(tpg, r, g, b); /* Cr */
+ int y, cb, cr;
+
+ color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
+ if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
+ y = clamp(y, 16 << 4, 235 << 4);
+ cb = clamp(cb, 16 << 4, 240 << 4);
+ cr = clamp(cr, 16 << 4, 240 << 4);
+ }
tpg->colors[k][0] = clamp(y >> 4, 1, 254);
tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
} else {
+ if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
+ r = (r * 219) / 255 + (16 << 4);
+ g = (g * 219) / 255 + (16 << 4);
+ b = (b * 219) / 255 + (16 << 4);
+ }
switch (tpg->fourcc) {
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB565X:
@@ -1152,6 +1225,46 @@ static void tpg_recalc(struct tpg_data *tpg)
if (tpg->recalc_colors) {
tpg->recalc_colors = false;
tpg->recalc_lines = true;
+ tpg->real_ycbcr_enc = tpg->ycbcr_enc;
+ tpg->real_quantization = tpg->quantization;
+ if (tpg->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) {
+ switch (tpg->colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_709;
+ break;
+ case V4L2_COLORSPACE_SRGB:
+ tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_SYCC;
+ break;
+ case V4L2_COLORSPACE_BT2020:
+ tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_BT2020;
+ break;
+ case V4L2_COLORSPACE_SMPTE240M:
+ tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_SMPTE240M;
+ break;
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ case V4L2_COLORSPACE_ADOBERGB:
+ default:
+ tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_601;
+ break;
+ }
+ }
+ if (tpg->quantization == V4L2_QUANTIZATION_DEFAULT) {
+ tpg->real_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ if (tpg->is_yuv) {
+ switch (tpg->real_ycbcr_enc) {
+ case V4L2_YCBCR_ENC_SYCC:
+ case V4L2_YCBCR_ENC_XV601:
+ case V4L2_YCBCR_ENC_XV709:
+ break;
+ default:
+ tpg->real_quantization =
+ V4L2_QUANTIZATION_LIM_RANGE;
+ break;
+ }
+ }
+ }
tpg_precalculate_colors(tpg);
}
if (tpg->recalc_square_border) {
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
index 8ef3e52ba3be..9dc463a40ed3 100644
--- a/drivers/media/platform/vivid/vivid-tpg.h
+++ b/drivers/media/platform/vivid/vivid-tpg.h
@@ -119,6 +119,18 @@ struct tpg_data {
u32 fourcc;
bool is_yuv;
u32 colorspace;
+ u32 ycbcr_enc;
+ /*
+ * Stores the actual Y'CbCr encoding, i.e. will never be
+ * V4L2_YCBCR_ENC_DEFAULT.
+ */
+ u32 real_ycbcr_enc;
+ u32 quantization;
+ /*
+ * Stores the actual quantization, i.e. will never be
+ * V4L2_QUANTIZATION_DEFAULT.
+ */
+ u32 real_quantization;
enum tpg_video_aspect vid_aspect;
enum tpg_pixel_aspect pix_aspect;
unsigned rgb_range;
@@ -286,6 +298,32 @@ static inline u32 tpg_g_colorspace(const struct tpg_data *tpg)
return tpg->colorspace;
}
+static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc)
+{
+ if (tpg->ycbcr_enc == ycbcr_enc)
+ return;
+ tpg->ycbcr_enc = ycbcr_enc;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
+{
+ return tpg->ycbcr_enc;
+}
+
+static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization)
+{
+ if (tpg->quantization == quantization)
+ return;
+ tpg->quantization = quantization;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_quantization(const struct tpg_data *tpg)
+{
+ return tpg->quantization;
+}
+
static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
{
return tpg->planes;
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index 2166d0bf6fe2..ef81b01b53d2 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -236,8 +236,8 @@ const struct vb2_ops vivid_vbi_cap_qops = {
.buf_queue = vbi_cap_buf_queue,
.start_streaming = vbi_cap_start_streaming,
.stop_streaming = vbi_cap_stop_streaming,
- .wait_prepare = vivid_unlock,
- .wait_finish = vivid_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 9d00a07ecdcd..4e4c70e1e04a 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -131,8 +131,8 @@ const struct vb2_ops vivid_vbi_out_qops = {
.buf_queue = vbi_out_buf_queue,
.start_streaming = vbi_out_start_streaming,
.stop_streaming = vbi_out_stop_streaming,
- .wait_prepare = vivid_unlock,
- .wait_finish = vivid_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
int vidioc_g_fmt_vbi_out(struct file *file, void *priv,
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 331c54429b40..867a29a6d18f 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -288,8 +288,8 @@ const struct vb2_ops vivid_vid_cap_qops = {
.buf_queue = vid_cap_buf_queue,
.start_streaming = vid_cap_start_streaming,
.stop_streaming = vid_cap_stop_streaming,
- .wait_prepare = vivid_unlock,
- .wait_finish = vivid_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/*
@@ -443,12 +443,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
break;
if (bt->standards & V4L2_DV_BT_STD_CEA861) {
if (bt->width == 720 && bt->height <= 576)
- v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
else
- v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_REC709);
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
} else {
- v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
}
tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
@@ -498,6 +498,20 @@ static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
return dev->colorspace_out;
}
+static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_ycbcr_enc(&dev->tpg);
+ return dev->ycbcr_enc_out;
+}
+
+static unsigned vivid_quantization_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_quantization(&dev->tpg);
+ return dev->quantization_out;
+}
+
int vivid_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -510,6 +524,8 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv,
mp->field = dev->field_cap;
mp->pixelformat = dev->fmt_cap->fourcc;
mp->colorspace = vivid_colorspace_cap(dev);
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ mp->quantization = vivid_quantization_cap(dev);
mp->num_planes = dev->fmt_cap->planes;
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
@@ -595,6 +611,8 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
mp->colorspace = vivid_colorspace_cap(dev);
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ mp->quantization = vivid_quantization_cap(dev);
memset(mp->reserved, 0, sizeof(mp->reserved));
return 0;
}
@@ -1307,20 +1325,20 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
if (dev->colorspace) {
switch (dev->input_type[i]) {
case WEBCAM:
- v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
break;
case TV:
case SVID:
- v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
break;
case HDMI:
if (bt->standards & V4L2_DV_BT_STD_CEA861) {
if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
- v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
else
- v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_REC709);
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
} else {
- v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
+ v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
}
break;
}
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 16cd6d2d2ed6..6bef1e6d6788 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -259,6 +259,8 @@ void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
mp->pixelformat = pix->pixelformat;
mp->field = pix->field;
mp->colorspace = pix->colorspace;
+ mp->ycbcr_enc = pix->ycbcr_enc;
+ mp->quantization = pix->quantization;
mp->num_planes = 1;
mp->flags = pix->flags;
ppix->sizeimage = pix->sizeimage;
@@ -285,6 +287,8 @@ int fmt_sp2mp_func(struct file *file, void *priv,
pix->pixelformat = mp->pixelformat;
pix->field = mp->field;
pix->colorspace = mp->colorspace;
+ pix->ycbcr_enc = mp->ycbcr_enc;
+ pix->quantization = mp->quantization;
pix->sizeimage = ppix->sizeimage;
pix->bytesperline = ppix->bytesperline;
pix->flags = mp->flags;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 69c2dbd2d165..39ff79f6aa67 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -209,8 +209,8 @@ const struct vb2_ops vivid_vid_out_qops = {
.buf_queue = vid_out_buf_queue,
.start_streaming = vid_out_start_streaming,
.stop_streaming = vid_out_stop_streaming,
- .wait_prepare = vivid_unlock,
- .wait_finish = vivid_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/*
@@ -259,6 +259,8 @@ void vivid_update_format_out(struct vivid_dev *dev)
}
break;
}
+ dev->ycbcr_enc_out = V4L2_YCBCR_ENC_DEFAULT;
+ dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
dev->compose_out = dev->sink_rect;
dev->compose_bounds_out = dev->sink_rect;
dev->crop_out = dev->compose_out;
@@ -318,6 +320,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
mp->field = dev->field_out;
mp->pixelformat = dev->fmt_out->fourcc;
mp->colorspace = dev->colorspace_out;
+ mp->ycbcr_enc = dev->ycbcr_enc_out;
+ mp->quantization = dev->quantization_out;
mp->num_planes = dev->fmt_out->planes;
for (p = 0; p < mp->num_planes; p++) {
mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
@@ -394,16 +398,23 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height;
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
- if (vivid_is_svid_out(dev))
+ mp->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mp->quantization = V4L2_QUANTIZATION_DEFAULT;
+ if (vivid_is_svid_out(dev)) {
mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
- else if (dev->dvi_d_out || !(bt->standards & V4L2_DV_BT_STD_CEA861))
+ } else if (dev->dvi_d_out || !(bt->standards & V4L2_DV_BT_STD_CEA861)) {
mp->colorspace = V4L2_COLORSPACE_SRGB;
- else if (bt->width == 720 && bt->height <= 576)
+ if (dev->dvi_d_out)
+ mp->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ } else if (bt->width == 720 && bt->height <= 576) {
mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
- else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M &&
- mp->colorspace != V4L2_COLORSPACE_REC709 &&
- mp->colorspace != V4L2_COLORSPACE_SRGB)
+ } else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M &&
+ mp->colorspace != V4L2_COLORSPACE_REC709 &&
+ mp->colorspace != V4L2_COLORSPACE_ADOBERGB &&
+ mp->colorspace != V4L2_COLORSPACE_BT2020 &&
+ mp->colorspace != V4L2_COLORSPACE_SRGB) {
mp->colorspace = V4L2_COLORSPACE_REC709;
+ }
memset(mp->reserved, 0, sizeof(mp->reserved));
return 0;
}
@@ -522,6 +533,8 @@ int vivid_s_fmt_vid_out(struct file *file, void *priv,
set_colorspace:
dev->colorspace_out = mp->colorspace;
+ dev->ycbcr_enc_out = mp->ycbcr_enc;
+ dev->quantization_out = mp->quantization;
if (dev->loop_video) {
vivid_send_source_change(dev, SVID);
vivid_send_source_change(dev, HDMI);
@@ -612,7 +625,7 @@ int vivid_vid_out_g_selection(struct file *file, void *priv,
sel->r = dev->fmt_out_rect;
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
- if (!dev->has_compose_out)
+ if (!dev->has_crop_out)
return -EINVAL;
sel->r = vivid_max_rect;
break;
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index a0c1984c733e..b21f381a9862 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -187,8 +187,8 @@ static int bru_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
- V4L2_MBUS_FMT_ARGB8888_1X32,
- V4L2_MBUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
};
struct v4l2_mbus_framefmt *format;
@@ -215,8 +215,8 @@ static int bru_enum_frame_size(struct v4l2_subdev *subdev,
if (fse->index)
return -EINVAL;
- if (fse->code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
- fse->code != V4L2_MBUS_FMT_AYUV8_1X32)
+ if (fse->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fse->code != MEDIA_BUS_FMT_AYUV8_1X32)
return -EINVAL;
fse->min_width = BRU_MIN_SIZE;
@@ -261,9 +261,9 @@ static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_fh *fh,
switch (pad) {
case BRU_PAD_SINK(0):
/* Default to YUV if the requested format is not supported. */
- if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
- fmt->code != V4L2_MBUS_FMT_AYUV8_1X32)
- fmt->code = V4L2_MBUS_FMT_AYUV8_1X32;
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
break;
default:
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 3e6601b5b4de..5eb16e87d53f 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -562,7 +562,6 @@ static struct platform_driver vsp1_platform_driver = {
.probe = vsp1_probe,
.remove = vsp1_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "vsp1",
.pm = &vsp1_pm_ops,
.of_match_table = vsp1_of_match,
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index db2950a73c60..80bedc554ee3 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -70,9 +70,9 @@ static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) |
(code->pad == HSIT_PAD_SOURCE && hsit->inverse))
- code->code = V4L2_MBUS_FMT_ARGB8888_1X32;
+ code->code = MEDIA_BUS_FMT_ARGB8888_1X32;
else
- code->code = V4L2_MBUS_FMT_AHSV8888_1X32;
+ code->code = MEDIA_BUS_FMT_AHSV8888_1X32;
return 0;
}
@@ -136,8 +136,8 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
return 0;
}
- format->code = hsit->inverse ? V4L2_MBUS_FMT_AHSV8888_1X32
- : V4L2_MBUS_FMT_ARGB8888_1X32;
+ format->code = hsit->inverse ? MEDIA_BUS_FMT_AHSV8888_1X32
+ : MEDIA_BUS_FMT_ARGB8888_1X32;
format->width = clamp_t(unsigned int, fmt->format.width,
HSIT_MIN_SIZE, HSIT_MAX_SIZE);
format->height = clamp_t(unsigned int, fmt->format.height,
@@ -151,8 +151,8 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
format = vsp1_entity_get_pad_format(&hsit->entity, fh, HSIT_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- format->code = hsit->inverse ? V4L2_MBUS_FMT_ARGB8888_1X32
- : V4L2_MBUS_FMT_AHSV8888_1X32;
+ format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
+ : MEDIA_BUS_FMT_AHSV8888_1X32;
return 0;
}
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index d4fb23e9c4a8..17a6ca7dafe6 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -78,8 +78,8 @@ static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
- V4L2_MBUS_FMT_ARGB8888_1X32,
- V4L2_MBUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
};
if (code->pad == LIF_PAD_SINK) {
@@ -147,9 +147,9 @@ static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_mbus_framefmt *format;
/* Default to YUV if the requested format is not supported. */
- if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
- fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32)
- fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32;
+ if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
format = vsp1_entity_get_pad_format(&lif->entity, fh, fmt->pad,
fmt->which);
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index fea36ebe2565..6f185c3621fe 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -86,9 +86,9 @@ static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
- V4L2_MBUS_FMT_ARGB8888_1X32,
- V4L2_MBUS_FMT_AHSV8888_1X32,
- V4L2_MBUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
};
struct v4l2_mbus_framefmt *format;
@@ -158,10 +158,10 @@ static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_mbus_framefmt *format;
/* Default to YUV if the requested format is not supported. */
- if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
- fmt->format.code != V4L2_MBUS_FMT_AHSV8888_1X32 &&
- fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32)
- fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32;
+ if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
format = vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
fmt->which);
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index ec3dab6a9b9b..1f1ba26a834a 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -29,8 +29,8 @@ int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
- V4L2_MBUS_FMT_ARGB8888_1X32,
- V4L2_MBUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
};
if (code->index >= ARRAY_SIZE(codes))
@@ -103,9 +103,9 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
struct v4l2_rect *crop;
/* Default to YUV if the requested format is not supported. */
- if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
- fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32)
- fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32;
+ if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
format = vsp1_entity_get_pad_format(&rwpf->entity, fh, fmt->pad,
fmt->which);
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index b7d3c8b9f189..1129494c7cfc 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -139,7 +139,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
input = &sru->entity.formats[SRU_PAD_SINK];
output = &sru->entity.formats[SRU_PAD_SOURCE];
- if (input->code == V4L2_MBUS_FMT_ARGB8888_1X32)
+ if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
| VI6_SRU_CTRL0_PARAM4;
else
@@ -170,8 +170,8 @@ static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
- V4L2_MBUS_FMT_ARGB8888_1X32,
- V4L2_MBUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
};
struct v4l2_mbus_framefmt *format;
@@ -248,9 +248,9 @@ static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_fh *fh,
switch (pad) {
case SRU_PAD_SINK:
/* Default to YUV if the requested format is not supported. */
- if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
- fmt->code != V4L2_MBUS_FMT_AYUV8_1X32)
- fmt->code = V4L2_MBUS_FMT_AYUV8_1X32;
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index de92ef4944b3..a4afec133800 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -173,8 +173,8 @@ static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
- V4L2_MBUS_FMT_ARGB8888_1X32,
- V4L2_MBUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
};
if (code->pad == UDS_PAD_SINK) {
@@ -246,9 +246,9 @@ static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_fh *fh,
switch (pad) {
case UDS_PAD_SINK:
/* Default to YUV if the requested format is not supported. */
- if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
- fmt->code != V4L2_MBUS_FMT_AYUV8_1X32)
- fmt->code = V4L2_MBUS_FMT_AYUV8_1X32;
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE);
fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 915a20eb003e..d91f19a9e1c1 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -48,85 +48,85 @@
*/
static const struct vsp1_format_info vsp1_video_formats[] = {
- { V4L2_PIX_FMT_RGB332, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 8, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_ARGB444, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
1, { 16, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_XRGB444, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
1, { 16, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_ARGB555, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
1, { 16, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_XRGB555, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
1, { 16, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_RGB565, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
1, { 16, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_BGR24, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 24, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_RGB24, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 24, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_ABGR32, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
1, { 32, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_XBGR32, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
1, { 32, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_ARGB32, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 32, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_XRGB32, V4L2_MBUS_FMT_ARGB8888_1X32,
+ { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 32, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_UYVY, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 16, 0, 0 }, false, false, 2, 1, false },
- { V4L2_PIX_FMT_VYUY, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 16, 0, 0 }, false, true, 2, 1, false },
- { V4L2_PIX_FMT_YUYV, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 16, 0, 0 }, true, false, 2, 1, false },
- { V4L2_PIX_FMT_YVYU, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 16, 0, 0 }, true, true, 2, 1, false },
- { V4L2_PIX_FMT_NV12M, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
2, { 8, 16, 0 }, false, false, 2, 2, false },
- { V4L2_PIX_FMT_NV21M, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
2, { 8, 16, 0 }, false, true, 2, 2, false },
- { V4L2_PIX_FMT_NV16M, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
2, { 8, 16, 0 }, false, false, 2, 1, false },
- { V4L2_PIX_FMT_NV61M, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
2, { 8, 16, 0 }, false, true, 2, 1, false },
- { V4L2_PIX_FMT_YUV420M, V4L2_MBUS_FMT_AYUV8_1X32,
+ { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
3, { 8, 8, 8 }, false, false, 2, 2, false },
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 633022b45f33..dccf58691650 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -1575,7 +1575,6 @@ MODULE_ALIAS("platform:si476x-radio");
static struct platform_driver si476x_radio_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = si476x_radio_probe,
.remove = si476x_radio_remove,
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index b9285e6584af..e6b55edc8f8d 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -174,7 +174,6 @@ static int timbradio_remove(struct platform_device *pdev)
static struct platform_driver timbradio_platform_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = timbradio_probe,
.remove = timbradio_remove,
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 9cf6731fb816..b8f36445516b 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1279,10 +1279,12 @@ static int wl1273_fm_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->bus_info, radio->bus_type,
sizeof(capability->bus_info));
- capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
+ capability->device_caps = V4L2_CAP_HW_FREQ_SEEK |
V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_AUDIO |
V4L2_CAP_RDS_CAPTURE | V4L2_CAP_MODULATOR |
V4L2_CAP_RDS_OUTPUT;
+ capability->capabilities = capability->device_caps |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -2148,7 +2150,6 @@ static struct platform_driver wl1273_fm_radio_driver = {
.remove = wl1273_fm_radio_remove,
.driver = {
.name = "wl1273_fm_radio",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/media/radio/si4713/radio-platform-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index a47502a330f0..6c7597383ca2 100644
--- a/drivers/media/radio/si4713/radio-platform-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
@@ -34,7 +34,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
-#include <media/radio-si4713.h>
+#include "si4713.h"
/* module parameters */
static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */
@@ -153,7 +153,6 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
{
struct radio_si4713_platform_data *pdata = pdev->dev.platform_data;
struct radio_si4713_device *rsdev;
- struct i2c_adapter *adapter;
struct v4l2_subdev *sd;
int rval = 0;
@@ -177,20 +176,11 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
goto exit;
}
- adapter = i2c_get_adapter(pdata->i2c_bus);
- if (!adapter) {
- dev_err(&pdev->dev, "Cannot get i2c adapter %d\n",
- pdata->i2c_bus);
- rval = -ENODEV;
- goto unregister_v4l2_dev;
- }
-
- sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter,
- pdata->subdev_board_info, NULL);
- if (!sd) {
+ sd = i2c_get_clientdata(pdata->subdev);
+ rval = v4l2_device_register_subdev(&rsdev->v4l2_dev, sd);
+ if (rval) {
dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n");
- rval = -ENODEV;
- goto put_adapter;
+ goto unregister_v4l2_dev;
}
rsdev->radio_dev = radio_si4713_vdev_template;
@@ -202,14 +192,12 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
if (video_register_device(&rsdev->radio_dev, VFL_TYPE_RADIO, radio_nr)) {
dev_err(&pdev->dev, "Could not register video device.\n");
rval = -EIO;
- goto put_adapter;
+ goto unregister_v4l2_dev;
}
dev_info(&pdev->dev, "New device successfully probed\n");
goto exit;
-put_adapter:
- i2c_put_adapter(adapter);
unregister_v4l2_dev:
v4l2_device_unregister(&rsdev->v4l2_dev);
exit:
@@ -220,14 +208,10 @@ exit:
static int radio_si4713_pdriver_remove(struct platform_device *pdev)
{
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
- struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
- struct v4l2_subdev, list);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct radio_si4713_device *rsdev;
rsdev = container_of(v4l2_dev, struct radio_si4713_device, v4l2_dev);
video_unregister_device(&rsdev->radio_dev);
- i2c_put_adapter(client->adapter);
v4l2_device_unregister(&rsdev->v4l2_dev);
return 0;
@@ -236,7 +220,6 @@ static int radio_si4713_pdriver_remove(struct platform_device *pdev)
static struct platform_driver radio_si4713_pdriver = {
.driver = {
.name = "radio-si4713",
- .owner = THIS_MODULE,
},
.probe = radio_si4713_pdriver_probe,
.remove = radio_si4713_pdriver_remove,
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index b5765557ea3d..c90004dac170 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -23,6 +23,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -366,16 +367,25 @@ static int si4713_powerup(struct si4713_device *sdev)
if (sdev->power_state)
return 0;
- if (sdev->supplies) {
- err = regulator_bulk_enable(sdev->supplies, sdev->supply_data);
+ if (sdev->vdd) {
+ err = regulator_enable(sdev->vdd);
if (err) {
- v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err);
+ v4l2_err(&sdev->sd, "Failed to enable vdd: %d\n", err);
return err;
}
}
- if (gpio_is_valid(sdev->gpio_reset)) {
+
+ if (sdev->vio) {
+ err = regulator_enable(sdev->vio);
+ if (err) {
+ v4l2_err(&sdev->sd, "Failed to enable vio: %d\n", err);
+ return err;
+ }
+ }
+
+ if (!IS_ERR(sdev->gpio_reset)) {
udelay(50);
- gpio_set_value(sdev->gpio_reset, 1);
+ gpiod_set_value(sdev->gpio_reset, 1);
}
if (client->irq)
@@ -397,13 +407,20 @@ static int si4713_powerup(struct si4713_device *sdev)
SI4713_STC_INT | SI4713_CTS);
return err;
}
- if (gpio_is_valid(sdev->gpio_reset))
- gpio_set_value(sdev->gpio_reset, 0);
- if (sdev->supplies) {
- err = regulator_bulk_disable(sdev->supplies, sdev->supply_data);
+ if (!IS_ERR(sdev->gpio_reset))
+ gpiod_set_value(sdev->gpio_reset, 0);
+
+
+ if (sdev->vdd) {
+ err = regulator_disable(sdev->vdd);
if (err)
- v4l2_err(&sdev->sd,
- "Failed to disable supplies: %d\n", err);
+ v4l2_err(&sdev->sd, "Failed to disable vdd: %d\n", err);
+ }
+
+ if (sdev->vio) {
+ err = regulator_disable(sdev->vio);
+ if (err)
+ v4l2_err(&sdev->sd, "Failed to disable vio: %d\n", err);
}
return err;
@@ -430,14 +447,23 @@ static int si4713_powerdown(struct si4713_device *sdev)
v4l2_dbg(1, debug, &sdev->sd, "Power down response: 0x%02x\n",
resp[0]);
v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n");
- if (gpio_is_valid(sdev->gpio_reset))
- gpio_set_value(sdev->gpio_reset, 0);
- if (sdev->supplies) {
- err = regulator_bulk_disable(sdev->supplies,
- sdev->supply_data);
- if (err)
+ if (!IS_ERR(sdev->gpio_reset))
+ gpiod_set_value(sdev->gpio_reset, 0);
+
+ if (sdev->vdd) {
+ err = regulator_disable(sdev->vdd);
+ if (err) {
v4l2_err(&sdev->sd,
- "Failed to disable supplies: %d\n", err);
+ "Failed to disable vdd: %d\n", err);
+ }
+ }
+
+ if (sdev->vio) {
+ err = regulator_disable(sdev->vio);
+ if (err) {
+ v4l2_err(&sdev->sd,
+ "Failed to disable vio: %d\n", err);
+ }
}
sdev->power_state = POWER_OFF;
}
@@ -1420,38 +1446,51 @@ static int si4713_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct si4713_device *sdev;
- struct si4713_platform_data *pdata = client->dev.platform_data;
struct v4l2_ctrl_handler *hdl;
- int rval, i;
+ struct si4713_platform_data *pdata = client->dev.platform_data;
+ struct device_node *np = client->dev.of_node;
+ struct radio_si4713_platform_data si4713_pdev_pdata;
+ struct platform_device *si4713_pdev;
+ int rval;
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ sdev = devm_kzalloc(&client->dev, sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
dev_err(&client->dev, "Failed to alloc video device.\n");
rval = -ENOMEM;
goto exit;
}
- sdev->gpio_reset = -1;
- if (pdata && gpio_is_valid(pdata->gpio_reset)) {
- rval = gpio_request(pdata->gpio_reset, "si4713 reset");
- if (rval) {
- dev_err(&client->dev,
- "Failed to request gpio: %d\n", rval);
- goto free_sdev;
- }
- sdev->gpio_reset = pdata->gpio_reset;
- gpio_direction_output(sdev->gpio_reset, 0);
- sdev->supplies = pdata->supplies;
+ sdev->gpio_reset = devm_gpiod_get(&client->dev, "reset");
+ if (!IS_ERR(sdev->gpio_reset)) {
+ gpiod_direction_output(sdev->gpio_reset, 0);
+ } else if (PTR_ERR(sdev->gpio_reset) == -ENOENT) {
+ dev_dbg(&client->dev, "No reset GPIO assigned\n");
+ } else if (PTR_ERR(sdev->gpio_reset) == -ENOSYS) {
+ dev_dbg(&client->dev, "No reset GPIO support\n");
+ } else {
+ rval = PTR_ERR(sdev->gpio_reset);
+ dev_err(&client->dev, "Failed to request gpio: %d\n", rval);
+ goto exit;
}
- for (i = 0; i < sdev->supplies; i++)
- sdev->supply_data[i].supply = pdata->supply_names[i];
+ sdev->vdd = devm_regulator_get_optional(&client->dev, "vdd");
+ if (IS_ERR(sdev->vdd)) {
+ rval = PTR_ERR(sdev->vdd);
+ if (rval == -EPROBE_DEFER)
+ goto exit;
- rval = regulator_bulk_get(&client->dev, sdev->supplies,
- sdev->supply_data);
- if (rval) {
- dev_err(&client->dev, "Cannot get regulators: %d\n", rval);
- goto free_gpio;
+ dev_dbg(&client->dev, "no vdd regulator found: %d\n", rval);
+ sdev->vdd = NULL;
+ }
+
+ sdev->vio = devm_regulator_get_optional(&client->dev, "vio");
+ if (IS_ERR(sdev->vio)) {
+ rval = PTR_ERR(sdev->vio);
+ if (rval == -EPROBE_DEFER)
+ goto exit;
+
+ dev_dbg(&client->dev, "no vio regulator found: %d\n", rval);
+ sdev->vio = NULL;
}
v4l2_i2c_subdev_init(&sdev->sd, client, &si4713_subdev_ops);
@@ -1554,12 +1593,12 @@ static int si4713_probe(struct i2c_client *client,
sdev->sd.ctrl_handler = hdl;
if (client->irq) {
- rval = request_irq(client->irq,
+ rval = devm_request_irq(&client->dev, client->irq,
si4713_handler, IRQF_TRIGGER_FALLING,
client->name, sdev);
if (rval < 0) {
v4l2_err(&sdev->sd, "Could not request IRQ\n");
- goto put_reg;
+ goto free_ctrls;
}
v4l2_dbg(1, debug, &sdev->sd, "IRQ requested.\n");
} else {
@@ -1569,23 +1608,35 @@ static int si4713_probe(struct i2c_client *client,
rval = si4713_initialize(sdev);
if (rval < 0) {
v4l2_err(&sdev->sd, "Failed to probe device information.\n");
- goto free_irq;
+ goto free_ctrls;
}
+ if (!np && (!pdata || !pdata->is_platform_device))
+ return 0;
+
+ si4713_pdev = platform_device_alloc("radio-si4713", -1);
+ if (!si4713_pdev)
+ goto put_main_pdev;
+
+ si4713_pdev_pdata.subdev = client;
+ rval = platform_device_add_data(si4713_pdev, &si4713_pdev_pdata,
+ sizeof(si4713_pdev_pdata));
+ if (rval)
+ goto put_main_pdev;
+
+ rval = platform_device_add(si4713_pdev);
+ if (rval)
+ goto put_main_pdev;
+
+ sdev->pd = si4713_pdev;
+
return 0;
-free_irq:
- if (client->irq)
- free_irq(client->irq, sdev);
+put_main_pdev:
+ platform_device_put(si4713_pdev);
+ v4l2_device_unregister_subdev(&sdev->sd);
free_ctrls:
v4l2_ctrl_handler_free(hdl);
-put_reg:
- regulator_bulk_free(sdev->supplies, sdev->supply_data);
-free_gpio:
- if (gpio_is_valid(sdev->gpio_reset))
- gpio_free(sdev->gpio_reset);
-free_sdev:
- kfree(sdev);
exit:
return rval;
}
@@ -1596,18 +1647,13 @@ static int si4713_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct si4713_device *sdev = to_si4713_device(sd);
+ platform_device_unregister(sdev->pd);
+
if (sdev->power_state)
si4713_set_power_state(sdev, POWER_DOWN);
- if (client->irq > 0)
- free_irq(client->irq, sdev);
-
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- regulator_bulk_free(sdev->supplies, sdev->supply_data);
- if (gpio_is_valid(sdev->gpio_reset))
- gpio_free(sdev->gpio_reset);
- kfree(sdev);
return 0;
}
diff --git a/drivers/media/radio/si4713/si4713.h b/drivers/media/radio/si4713/si4713.h
index ed700e387605..8a376e142188 100644
--- a/drivers/media/radio/si4713/si4713.h
+++ b/drivers/media/radio/si4713/si4713.h
@@ -15,7 +15,9 @@
#ifndef SI4713_I2C_H
#define SI4713_I2C_H
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
#include <media/si4713.h>
@@ -190,8 +192,6 @@
#define MIN_ACOMP_THRESHOLD (-40)
#define MAX_ACOMP_GAIN 20
-#define SI4713_NUM_SUPPLIES 2
-
/*
* si4713_device - private data
*/
@@ -236,9 +236,10 @@ struct si4713_device {
struct v4l2_ctrl *tune_ant_cap;
};
struct completion work;
- unsigned supplies;
- struct regulator_bulk_data supply_data[SI4713_NUM_SUPPLIES];
- int gpio_reset;
+ struct regulator *vdd;
+ struct regulator *vio;
+ struct gpio_desc *gpio_reset;
+ struct platform_device *pd;
u32 power_state;
u32 rds_enabled;
u32 frequency;
@@ -246,4 +247,8 @@ struct si4713_device {
u32 stereo;
u32 tune_rnl;
};
+
+struct radio_si4713_platform_data {
+ struct i2c_client *subdev;
+};
#endif /* ifndef SI4713_I2C_H */
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index b55012c11842..a5bd3f674bbd 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -198,10 +198,12 @@ static int fm_v4l2_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME,
sizeof(capability->card));
sprintf(capability->bus_info, "UART");
- capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
+ capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
V4L2_CAP_RADIO | V4L2_CAP_MODULATOR |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
V4L2_CAP_RDS_CAPTURE;
+ capability->capabilities = capability->device_caps |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 8ce08107a69d..ddfab256b9a5 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -223,6 +223,17 @@ config IR_FINTEK
To compile this driver as a module, choose M here: the
module will be called fintek-cir.
+config IR_MESON
+ tristate "Amlogic Meson IR remote receiver"
+ depends on RC_CORE
+ depends on ARCH_MESON || COMPILE_TEST
+ ---help---
+ Say Y if you want to use the IR remote receiver available
+ on Amlogic Meson SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called meson-ir.
+
config IR_NUVOTON
tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP
@@ -277,6 +288,21 @@ config IR_WINBOND_CIR
To compile this driver as a module, choose M here: the module will
be called winbond_cir.
+config IR_IGORPLUGUSB
+ tristate "IgorPlug-USB IR Receiver"
+ depends on USB_ARCH_HAS_HCD
+ depends on RC_CORE
+ select USB
+ ---help---
+ Say Y here if you want to use the IgorPlug-USB IR Receiver by
+ Igor Cesko. This device is included on the Fit-PC2.
+
+ Note that this device can only record bursts of 36 IR pulses and
+ spaces, which is not enough for the NEC, Sanyo and RC-6 protocol.
+
+ To compile this driver as a module, choose M here: the module will
+ be called igorplugusb.
+
config IR_IGUANA
tristate "IguanaWorks USB IR Transceiver"
depends on USB_ARCH_HAS_HCD
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 0989f940e9cf..379a5c0f1379 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
+obj-$(CONFIG_IR_MESON) += meson-ir.o
obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
obj-$(CONFIG_IR_ENE) += ene_ir.o
obj-$(CONFIG_IR_REDRAT3) += redrat3.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
obj-$(CONFIG_IR_WINBOND_CIR) += winbond-cir.o
obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o
obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
+obj-$(CONFIG_IR_IGORPLUGUSB) += igorplugusb.o
obj-$(CONFIG_IR_IGUANA) += iguanair.o
obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
obj-$(CONFIG_RC_ST) += st_rc.o
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 59853085bc88..229853d68451 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -240,7 +240,6 @@ static struct platform_driver gpio_ir_recv_driver = {
.remove = gpio_ir_recv_remove,
.driver = {
.name = GPIO_IR_DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(gpio_ir_recv_of_match),
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
new file mode 100644
index 000000000000..b36e51576f8e
--- /dev/null
+++ b/drivers/media/rc/igorplugusb.c
@@ -0,0 +1,261 @@
+/*
+ * IgorPlug-USB IR Receiver
+ *
+ * Copyright (C) 2014 Sean Young <sean@mess.org>
+ *
+ * Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware.
+ * See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm
+ *
+ * Based on the lirc_igorplugusb.c driver:
+ * Copyright (C) 2004 Jan M. Hochstein
+ * <hochstein@algo.informatik.tu-darmstadt.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <media/rc-core.h>
+
+#define DRIVER_DESC "IgorPlug-USB IR Receiver"
+#define DRIVER_NAME "igorplugusb"
+
+#define HEADERLEN 3
+#define BUFLEN 36
+#define MAX_PACKET (HEADERLEN + BUFLEN)
+
+#define SET_INFRABUFFER_EMPTY 1
+#define GET_INFRACODE 2
+
+
+struct igorplugusb {
+ struct rc_dev *rc;
+ struct device *dev;
+
+ struct urb *urb;
+ struct usb_ctrlrequest request;
+
+ struct timer_list timer;
+
+ uint8_t buf_in[MAX_PACKET];
+
+ char phys[64];
+};
+
+static void igorplugusb_cmd(struct igorplugusb *ir, int cmd);
+
+static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len)
+{
+ DEFINE_IR_RAW_EVENT(rawir);
+ unsigned i, start, overflow;
+
+ dev_dbg(ir->dev, "irdata: %*ph (len=%u)", len, ir->buf_in, len);
+
+ /*
+ * If more than 36 pulses and spaces follow each other, the igorplugusb
+ * overwrites its buffer from the beginning. The overflow value is the
+ * last offset which was not overwritten. Everything from this offset
+ * onwards occurred before everything until this offset.
+ */
+ overflow = ir->buf_in[2];
+ i = start = overflow + HEADERLEN;
+
+ if (start >= len) {
+ dev_err(ir->dev, "receive overflow invalid: %u", overflow);
+ } else {
+ if (overflow > 0)
+ dev_warn(ir->dev, "receive overflow, at least %u lost",
+ overflow);
+
+ do {
+ rawir.duration = ir->buf_in[i] * 85333;
+ rawir.pulse = i & 1;
+
+ ir_raw_event_store_with_filter(ir->rc, &rawir);
+
+ if (++i == len)
+ i = HEADERLEN;
+ } while (i != start);
+
+ /* add a trailing space */
+ rawir.duration = ir->rc->timeout;
+ rawir.pulse = false;
+ ir_raw_event_store_with_filter(ir->rc, &rawir);
+
+ ir_raw_event_handle(ir->rc);
+ }
+
+ igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
+}
+
+static void igorplugusb_callback(struct urb *urb)
+{
+ struct usb_ctrlrequest *req;
+ struct igorplugusb *ir = urb->context;
+
+ req = (struct usb_ctrlrequest *)urb->setup_packet;
+
+ switch (urb->status) {
+ case 0:
+ if (req->bRequest == GET_INFRACODE &&
+ urb->actual_length > HEADERLEN)
+ igorplugusb_irdata(ir, urb->actual_length);
+ else /* request IR */
+ mod_timer(&ir->timer, jiffies + msecs_to_jiffies(50));
+ break;
+ case -EPROTO:
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ usb_unlink_urb(urb);
+ return;
+ default:
+ dev_warn(ir->dev, "Error: urb status = %d\n", urb->status);
+ igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
+ break;
+ }
+}
+
+static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
+{
+ int ret;
+
+ ir->request.bRequest = cmd;
+ ir->urb->transfer_flags = 0;
+ ret = usb_submit_urb(ir->urb, GFP_ATOMIC);
+ if (ret)
+ dev_err(ir->dev, "submit urb failed: %d", ret);
+}
+
+static void igorplugusb_timer(unsigned long data)
+{
+ struct igorplugusb *ir = (struct igorplugusb *)data;
+
+ igorplugusb_cmd(ir, GET_INFRACODE);
+}
+
+static int igorplugusb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev;
+ struct usb_host_interface *idesc;
+ struct usb_endpoint_descriptor *ep;
+ struct igorplugusb *ir;
+ struct rc_dev *rc;
+ int ret;
+
+ udev = interface_to_usbdev(intf);
+ idesc = intf->cur_altsetting;
+
+ if (idesc->desc.bNumEndpoints != 1) {
+ dev_err(&intf->dev, "incorrect number of endpoints");
+ return -ENODEV;
+ }
+
+ ep = &idesc->endpoint[0].desc;
+ if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_control(ep)) {
+ dev_err(&intf->dev, "endpoint incorrect");
+ return -ENODEV;
+ }
+
+ ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL);
+ if (!ir)
+ return -ENOMEM;
+
+ ir->dev = &intf->dev;
+
+ setup_timer(&ir->timer, igorplugusb_timer, (unsigned long)ir);
+
+ ir->request.bRequest = GET_INFRACODE;
+ ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
+ ir->request.wLength = cpu_to_le16(sizeof(ir->buf_in));
+
+ ir->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ir->urb)
+ return -ENOMEM;
+
+ usb_fill_control_urb(ir->urb, udev,
+ usb_rcvctrlpipe(udev, 0), (uint8_t *)&ir->request,
+ ir->buf_in, sizeof(ir->buf_in), igorplugusb_callback, ir);
+
+ usb_make_path(udev, ir->phys, sizeof(ir->phys));
+
+ rc = rc_allocate_device();
+ rc->input_name = DRIVER_DESC;
+ rc->input_phys = ir->phys;
+ usb_to_input_id(udev, &rc->input_id);
+ rc->dev.parent = &intf->dev;
+ rc->driver_type = RC_DRIVER_IR_RAW;
+ /*
+ * This device can only store 36 pulses + spaces, which is not enough
+ * for the NEC protocol and many others.
+ */
+ rc->allowed_protocols = RC_BIT_ALL & ~(RC_BIT_NEC | RC_BIT_RC6_6A_20 |
+ RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE |
+ RC_BIT_SONY20 | RC_BIT_MCE_KBD | RC_BIT_SANYO);
+
+ rc->priv = ir;
+ rc->driver_name = DRIVER_NAME;
+ rc->map_name = RC_MAP_HAUPPAUGE;
+ rc->timeout = MS_TO_NS(100);
+ rc->rx_resolution = 85333;
+
+ ir->rc = rc;
+ ret = rc_register_device(rc);
+ if (ret) {
+ dev_err(&intf->dev, "failed to register rc device: %d", ret);
+ rc_free_device(rc);
+ usb_free_urb(ir->urb);
+ return ret;
+ }
+
+ usb_set_intfdata(intf, ir);
+
+ igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
+
+ return 0;
+}
+
+static void igorplugusb_disconnect(struct usb_interface *intf)
+{
+ struct igorplugusb *ir = usb_get_intfdata(intf);
+
+ rc_unregister_device(ir->rc);
+ del_timer_sync(&ir->timer);
+ usb_set_intfdata(intf, NULL);
+ usb_kill_urb(ir->urb);
+ usb_free_urb(ir->urb);
+}
+
+static struct usb_device_id igorplugusb_table[] = {
+ /* Igor Plug USB (Atmel's Manufact. ID) */
+ { USB_DEVICE(0x03eb, 0x0002) },
+ /* Fit PC2 Infrared Adapter */
+ { USB_DEVICE(0x03eb, 0x21fe) },
+ /* Terminating entry */
+ { }
+};
+
+static struct usb_driver igorplugusb_driver = {
+ .name = DRIVER_NAME,
+ .probe = igorplugusb_probe,
+ .disconnect = igorplugusb_disconnect,
+ .id_table = igorplugusb_table
+};
+
+module_usb_driver(igorplugusb_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(usb, igorplugusb_table);
diff --git a/drivers/media/rc/img-ir/Kconfig b/drivers/media/rc/img-ir/Kconfig
index 03ba9fc170fb..580715c7fc5e 100644
--- a/drivers/media/rc/img-ir/Kconfig
+++ b/drivers/media/rc/img-ir/Kconfig
@@ -1,6 +1,7 @@
config IR_IMG
tristate "ImgTec IR Decoder"
depends on RC_CORE
+ depends on METAG || MIPS || COMPILE_TEST
select IR_IMG_HW if !IR_IMG_RAW
help
Say Y or M here if you want to use the ImgTec infrared decoder
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index a0cac2f09109..77c78de4f5bf 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -166,7 +166,6 @@ MODULE_DEVICE_TABLE(of, img_ir_match);
static struct platform_driver img_ir_driver = {
.driver = {
.name = "img-ir",
- .owner = THIS_MODULE,
.of_match_table = img_ir_match,
.pm = &img_ir_pmops,
},
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index ec49f94425fc..2fd47c9bf5d8 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -530,6 +530,22 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
u32 ir_status, irq_en;
spin_lock_irq(&priv->lock);
+ /*
+ * First record that the protocol is being stopped so that the end timer
+ * isn't restarted while we're trying to stop it.
+ */
+ hw->stopping = true;
+
+ /*
+ * Release the lock to stop the end timer, since the end timer handler
+ * acquires the lock and we don't want to deadlock waiting for it.
+ */
+ spin_unlock_irq(&priv->lock);
+ del_timer_sync(&hw->end_timer);
+ spin_lock_irq(&priv->lock);
+
+ hw->stopping = false;
+
/* switch off and disable interrupts */
img_ir_write(priv, IMG_IR_CONTROL, 0);
irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
@@ -541,12 +557,13 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
if (ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)) {
ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
img_ir_write(priv, IMG_IR_STATUS, ir_status);
- img_ir_read(priv, IMG_IR_DATA_LW);
- img_ir_read(priv, IMG_IR_DATA_UP);
}
- /* stop the end timer and switch back to normal mode */
- del_timer_sync(&hw->end_timer);
+ /* always read data to clear buffer if IR wakes the device */
+ img_ir_read(priv, IMG_IR_DATA_LW);
+ img_ir_read(priv, IMG_IR_DATA_UP);
+
+ /* switch back to normal mode */
hw->mode = IMG_IR_M_NORMAL;
/* clear the wakeup scancode filter */
@@ -817,7 +834,8 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
}
- if (dec->repeat) {
+ /* we mustn't update the end timer while trying to stop it */
+ if (dec->repeat && !hw->stopping) {
unsigned long interval;
img_ir_begin_repeat(priv);
diff --git a/drivers/media/rc/img-ir/img-ir-hw.h b/drivers/media/rc/img-ir/img-ir-hw.h
index 8fcc16c32c5b..5c2b216c5fe3 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.h
+++ b/drivers/media/rc/img-ir/img-ir-hw.h
@@ -186,9 +186,6 @@ struct img_ir_reg_timings {
struct img_ir_timing_regvals rtimings;
};
-int img_ir_register_decoder(struct img_ir_decoder *dec);
-void img_ir_unregister_decoder(struct img_ir_decoder *dec);
-
struct img_ir_priv;
#ifdef CONFIG_IR_IMG_HW
@@ -214,6 +211,8 @@ enum img_ir_mode {
* @flags: IMG_IR_F_*.
* @filters: HW filters (derived from scancode filters).
* @mode: Current decode mode.
+ * @stopping: Indicates that decoder is being taken down and timers
+ * should not be restarted.
* @suspend_irqen: Saved IRQ enable mask over suspend.
*/
struct img_ir_priv_hw {
@@ -229,6 +228,7 @@ struct img_ir_priv_hw {
struct img_ir_filter filters[RC_FILTER_MAX];
enum img_ir_mode mode;
+ bool stopping;
u32 suspend_irqen;
};
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index ed2c8a1ed8ca..98893a8332c7 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -42,11 +42,17 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
return -EINVAL;
/* Packet start */
- if (ev.reset)
- return 0;
+ if (ev.reset) {
+ /* Userspace expects a long space event before the start of
+ * the signal to use as a sync. This may be done with repeat
+ * packets and normal samples. But if a reset has been sent
+ * then we assume that a long time has passed, so we send a
+ * space with the maximum time value. */
+ sample = LIRC_SPACE(LIRC_VALUE_MASK);
+ IR_dprintk(2, "delivering reset sync space to lirc_dev\n");
/* Carrier reports */
- if (ev.carrier_report) {
+ } else if (ev.carrier_report) {
sample = LIRC_FREQUENCY(ev.carrier);
IR_dprintk(2, "carrier report (freq: %d)\n", sample);
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 249d2fbc8f37..1e0545a67959 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -518,8 +518,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
WARN_ON(mutex_lock_killable(&lirc_dev_lock));
- if (ir->d.rdev)
- rc_close(ir->d.rdev);
+ rc_close(ir->d.rdev);
ir->open--;
if (ir->attached) {
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
new file mode 100644
index 000000000000..fcc3b82d1454
--- /dev/null
+++ b/drivers/media/rc/meson-ir.c
@@ -0,0 +1,216 @@
+/*
+ * Driver for Amlogic Meson IR remote receiver
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <media/rc-core.h>
+
+#define DRIVER_NAME "meson-ir"
+
+#define IR_DEC_LDR_ACTIVE 0x00
+#define IR_DEC_LDR_IDLE 0x04
+#define IR_DEC_LDR_REPEAT 0x08
+#define IR_DEC_BIT_0 0x0c
+#define IR_DEC_REG0 0x10
+#define IR_DEC_FRAME 0x14
+#define IR_DEC_STATUS 0x18
+#define IR_DEC_REG1 0x1c
+
+#define REG0_RATE_MASK (BIT(11) - 1)
+
+#define REG1_MODE_MASK (BIT(7) | BIT(8))
+#define REG1_MODE_NEC (0 << 7)
+#define REG1_MODE_GENERAL (2 << 7)
+
+#define REG1_TIME_IV_SHIFT 16
+#define REG1_TIME_IV_MASK ((BIT(13) - 1) << REG1_TIME_IV_SHIFT)
+
+#define REG1_IRQSEL_MASK (BIT(2) | BIT(3))
+#define REG1_IRQSEL_NEC_MODE (0 << 2)
+#define REG1_IRQSEL_RISE_FALL (1 << 2)
+#define REG1_IRQSEL_FALL (2 << 2)
+#define REG1_IRQSEL_RISE (3 << 2)
+
+#define REG1_RESET BIT(0)
+#define REG1_ENABLE BIT(15)
+
+#define STATUS_IR_DEC_IN BIT(8)
+
+#define MESON_TRATE 10 /* us */
+
+struct meson_ir {
+ void __iomem *reg;
+ struct rc_dev *rc;
+ int irq;
+ spinlock_t lock;
+};
+
+static void meson_ir_set_mask(struct meson_ir *ir, unsigned int reg,
+ u32 mask, u32 value)
+{
+ u32 data;
+
+ data = readl(ir->reg + reg);
+ data &= ~mask;
+ data |= (value & mask);
+ writel(data, ir->reg + reg);
+}
+
+static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
+{
+ struct meson_ir *ir = dev_id;
+ u32 duration;
+ DEFINE_IR_RAW_EVENT(rawir);
+
+ spin_lock(&ir->lock);
+
+ duration = readl(ir->reg + IR_DEC_REG1);
+ duration = (duration & REG1_TIME_IV_MASK) >> REG1_TIME_IV_SHIFT;
+ rawir.duration = US_TO_NS(duration * MESON_TRATE);
+
+ rawir.pulse = !!(readl(ir->reg + IR_DEC_STATUS) & STATUS_IR_DEC_IN);
+
+ ir_raw_event_store_with_filter(ir->rc, &rawir);
+ ir_raw_event_handle(ir->rc);
+
+ spin_unlock(&ir->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int meson_ir_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ const char *map_name;
+ struct meson_ir *ir;
+ int ret;
+
+ ir = devm_kzalloc(dev, sizeof(struct meson_ir), GFP_KERNEL);
+ if (!ir)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ir->reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ir->reg)) {
+ dev_err(dev, "failed to map registers\n");
+ return PTR_ERR(ir->reg);
+ }
+
+ ir->irq = platform_get_irq(pdev, 0);
+ if (ir->irq < 0) {
+ dev_err(dev, "no irq resource\n");
+ return ir->irq;
+ }
+
+ ir->rc = rc_allocate_device();
+ if (!ir->rc) {
+ dev_err(dev, "failed to allocate rc device\n");
+ return -ENOMEM;
+ }
+
+ ir->rc->priv = ir;
+ ir->rc->input_name = DRIVER_NAME;
+ ir->rc->input_phys = DRIVER_NAME "/input0";
+ ir->rc->input_id.bustype = BUS_HOST;
+ map_name = of_get_property(node, "linux,rc-map-name", NULL);
+ ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
+ ir->rc->dev.parent = dev;
+ ir->rc->driver_type = RC_DRIVER_IR_RAW;
+ ir->rc->allowed_protocols = RC_BIT_ALL;
+ ir->rc->rx_resolution = US_TO_NS(MESON_TRATE);
+ ir->rc->timeout = MS_TO_NS(200);
+ ir->rc->driver_name = DRIVER_NAME;
+
+ spin_lock_init(&ir->lock);
+ platform_set_drvdata(pdev, ir);
+
+ ret = rc_register_device(ir->rc);
+ if (ret) {
+ dev_err(dev, "failed to register rc device\n");
+ goto out_free;
+ }
+
+ ret = devm_request_irq(dev, ir->irq, meson_ir_irq, 0, "ir-meson", ir);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto out_unreg;
+ }
+
+ /* Reset the decoder */
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, REG1_RESET);
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, 0);
+ /* Set general operation mode */
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK, REG1_MODE_GENERAL);
+ /* Set rate */
+ meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1);
+ /* IRQ on rising and falling edges */
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_IRQSEL_MASK,
+ REG1_IRQSEL_RISE_FALL);
+ /* Enable the decoder */
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, REG1_ENABLE);
+
+ dev_info(dev, "receiver initialized\n");
+
+ return 0;
+out_unreg:
+ rc_unregister_device(ir->rc);
+ ir->rc = NULL;
+out_free:
+ rc_free_device(ir->rc);
+
+ return ret;
+}
+
+static int meson_ir_remove(struct platform_device *pdev)
+{
+ struct meson_ir *ir = platform_get_drvdata(pdev);
+ unsigned long flags;
+
+ /* Disable the decoder */
+ spin_lock_irqsave(&ir->lock, flags);
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, 0);
+ spin_unlock_irqrestore(&ir->lock, flags);
+
+ rc_unregister_device(ir->rc);
+
+ return 0;
+}
+
+static const struct of_device_id meson_ir_match[] = {
+ { .compatible = "amlogic,meson6-ir" },
+ { },
+};
+
+static struct platform_driver meson_ir_driver = {
+ .probe = meson_ir_probe,
+ .remove = meson_ir_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = meson_ir_match,
+ },
+};
+
+module_platform_driver(meson_ir_driver);
+
+MODULE_DESCRIPTION("Amlogic Meson IR remote receiver driver");
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 8d3b74c5a717..86ffcd54339e 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1298,8 +1298,7 @@ void rc_free_device(struct rc_dev *dev)
if (!dev)
return;
- if (dev->input_dev)
- input_free_device(dev->input_dev);
+ input_free_device(dev->input_dev);
put_device(&dev->dev);
@@ -1414,13 +1413,16 @@ int rc_register_device(struct rc_dev *dev)
ir_raw_init();
raw_init = true;
}
+ /* calls ir_register_device so unlock mutex here*/
+ mutex_unlock(&dev->lock);
rc = ir_raw_event_register(dev);
+ mutex_lock(&dev->lock);
if (rc < 0)
goto out_input;
}
if (dev->change_protocol) {
- u64 rc_type = (1 << rc_map->rc_type);
+ u64 rc_type = (1ll << rc_map->rc_type);
if (dev->driver_type == RC_DRIVER_IR_RAW)
rc_type |= RC_BIT_LIRC;
rc = dev->change_protocol(dev, &rc_type);
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 795b394a5d84..c4def66f9aa2 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -966,7 +966,7 @@ static int redrat3_dev_probe(struct usb_interface *intf,
rr3->ep_in = ep_in;
rr3->bulk_in_buf = usb_alloc_coherent(udev,
- le16_to_cpu(ep_in->wMaxPacketSize), GFP_ATOMIC, &rr3->dma_in);
+ le16_to_cpu(ep_in->wMaxPacketSize), GFP_KERNEL, &rr3->dma_in);
if (!rr3->bulk_in_buf) {
dev_err(dev, "Read buffer allocation failure\n");
goto error;
@@ -975,6 +975,8 @@ static int redrat3_dev_probe(struct usb_interface *intf,
pipe = usb_rcvbulkpipe(udev, ep_in->bEndpointAddress);
usb_fill_bulk_urb(rr3->read_urb, udev, pipe, rr3->bulk_in_buf,
le16_to_cpu(ep_in->wMaxPacketSize), redrat3_handle_async, rr3);
+ rr3->read_urb->transfer_dma = rr3->dma_in;
+ rr3->read_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
rr3->ep_out = ep_out;
rr3->udev = udev;
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index bcee8e1a4e9e..340f7f51eed4 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -306,7 +306,6 @@ static struct platform_driver sunxi_ir_driver = {
.remove = sunxi_ir_remove,
.driver = {
.name = SUNXI_IR_DEV,
- .owner = THIS_MODULE,
.of_match_table = sunxi_ir_match,
},
};
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index f039dc2a21cf..42e5a01b9192 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -232,6 +232,14 @@ config MEDIA_TUNER_M88TS2022
help
Montage M88TS2022 silicon tuner driver.
+config MEDIA_TUNER_M88RS6000T
+ tristate "Montage M88RS6000 internal tuner"
+ depends on MEDIA_SUPPORT && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Montage M88RS6000 internal tuner.
+
config MEDIA_TUNER_TUA9001
tristate "Infineon TUA 9001 silicon tuner"
depends on MEDIA_SUPPORT && I2C
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 49fcf8033848..da4fe6ef73e7 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_MEDIA_TUNER_IT913X) += it913x.o
obj-$(CONFIG_MEDIA_TUNER_R820T) += r820t.o
obj-$(CONFIG_MEDIA_TUNER_MXL301RF) += mxl301rf.o
obj-$(CONFIG_MEDIA_TUNER_QM1D1C0042) += qm1d1c0042.o
+obj-$(CONFIG_MEDIA_TUNER_M88RS6000T) += m88rs6000t.o
ccflags-y += -I$(srctree)/drivers/media/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
new file mode 100644
index 000000000000..d4c13fe6e7b3
--- /dev/null
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -0,0 +1,744 @@
+/*
+ * Driver for the internal tuner of Montage M88RS6000
+ *
+ * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "m88rs6000t.h"
+#include <linux/regmap.h>
+
+struct m88rs6000t_dev {
+ struct m88rs6000t_config cfg;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ u32 frequency_khz;
+};
+
+struct m88rs6000t_reg_val {
+ u8 reg;
+ u8 val;
+};
+
+/* set demod main mclk and ts mclk */
+static int m88rs6000t_set_demod_mclk(struct dvb_frontend *fe)
+{
+ struct m88rs6000t_dev *dev = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u8 reg11, reg15, reg16, reg1D, reg1E, reg1F;
+ u8 N, f0 = 0, f1 = 0, f2 = 0, f3 = 0;
+ u16 pll_div_fb;
+ u32 div, ts_mclk;
+ unsigned int utmp;
+ int ret;
+
+ /* select demod main mclk */
+ ret = regmap_read(dev->regmap, 0x15, &utmp);
+ if (ret)
+ goto err;
+ reg15 = utmp;
+ if (c->symbol_rate > 45010000) {
+ reg11 = 0x0E;
+ reg15 |= 0x02;
+ reg16 = 115; /* mclk = 110.25MHz */
+ } else {
+ reg11 = 0x0A;
+ reg15 &= ~0x02;
+ reg16 = 96; /* mclk = 96MHz */
+ }
+
+ /* set ts mclk */
+ if (c->delivery_system == SYS_DVBS)
+ ts_mclk = 96000;
+ else
+ ts_mclk = 144000;
+
+ pll_div_fb = (reg15 & 0x01) << 8;
+ pll_div_fb += reg16;
+ pll_div_fb += 32;
+
+ div = 36000 * pll_div_fb;
+ div /= ts_mclk;
+
+ if (div <= 32) {
+ N = 2;
+ f0 = 0;
+ f1 = div / 2;
+ f2 = div - f1;
+ f3 = 0;
+ } else if (div <= 48) {
+ N = 3;
+ f0 = div / 3;
+ f1 = (div - f0) / 2;
+ f2 = div - f0 - f1;
+ f3 = 0;
+ } else if (div <= 64) {
+ N = 4;
+ f0 = div / 4;
+ f1 = (div - f0) / 3;
+ f2 = (div - f0 - f1) / 2;
+ f3 = div - f0 - f1 - f2;
+ } else {
+ N = 4;
+ f0 = 16;
+ f1 = 16;
+ f2 = 16;
+ f3 = 16;
+ }
+
+ if (f0 == 16)
+ f0 = 0;
+ if (f1 == 16)
+ f1 = 0;
+ if (f2 == 16)
+ f2 = 0;
+ if (f3 == 16)
+ f3 = 0;
+
+ ret = regmap_read(dev->regmap, 0x1D, &utmp);
+ if (ret)
+ goto err;
+ reg1D = utmp;
+ reg1D &= ~0x03;
+ reg1D |= N - 1;
+ reg1E = ((f3 << 4) + f2) & 0xFF;
+ reg1F = ((f1 << 4) + f0) & 0xFF;
+
+ /* program and recalibrate demod PLL */
+ ret = regmap_write(dev->regmap, 0x05, 0x40);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x11, 0x08);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x15, reg15);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x16, reg16);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x1D, reg1D);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x1E, reg1E);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x1F, reg1F);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x17, 0xc1);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x17, 0x81);
+ if (ret)
+ goto err;
+ usleep_range(5000, 50000);
+ ret = regmap_write(dev->regmap, 0x05, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x11, reg11);
+ if (ret)
+ goto err;
+ usleep_range(5000, 50000);
+err:
+ if (ret)
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int m88rs6000t_set_pll_freq(struct m88rs6000t_dev *dev,
+ u32 tuner_freq_MHz)
+{
+ u32 fcry_KHz, ulNDiv1, ulNDiv2, ulNDiv;
+ u8 refDiv, ucLoDiv1, ucLomod1, ucLoDiv2, ucLomod2, ucLoDiv, ucLomod;
+ u8 reg27, reg29, reg42, reg42buf;
+ unsigned int utmp;
+ int ret;
+
+ fcry_KHz = 27000; /* in kHz */
+ refDiv = 27;
+
+ ret = regmap_write(dev->regmap, 0x36, (refDiv - 8));
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x31, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x2c, 0x02);
+ if (ret)
+ goto err;
+
+ if (tuner_freq_MHz >= 1550) {
+ ucLoDiv1 = 2;
+ ucLomod1 = 0;
+ ucLoDiv2 = 2;
+ ucLomod2 = 0;
+ } else if (tuner_freq_MHz >= 1380) {
+ ucLoDiv1 = 3;
+ ucLomod1 = 16;
+ ucLoDiv2 = 2;
+ ucLomod2 = 0;
+ } else if (tuner_freq_MHz >= 1070) {
+ ucLoDiv1 = 3;
+ ucLomod1 = 16;
+ ucLoDiv2 = 3;
+ ucLomod2 = 16;
+ } else if (tuner_freq_MHz >= 1000) {
+ ucLoDiv1 = 3;
+ ucLomod1 = 16;
+ ucLoDiv2 = 4;
+ ucLomod2 = 64;
+ } else if (tuner_freq_MHz >= 775) {
+ ucLoDiv1 = 4;
+ ucLomod1 = 64;
+ ucLoDiv2 = 4;
+ ucLomod2 = 64;
+ } else if (tuner_freq_MHz >= 700) {
+ ucLoDiv1 = 6;
+ ucLomod1 = 48;
+ ucLoDiv2 = 4;
+ ucLomod2 = 64;
+ } else if (tuner_freq_MHz >= 520) {
+ ucLoDiv1 = 6;
+ ucLomod1 = 48;
+ ucLoDiv2 = 6;
+ ucLomod2 = 48;
+ } else {
+ ucLoDiv1 = 8;
+ ucLomod1 = 96;
+ ucLoDiv2 = 8;
+ ucLomod2 = 96;
+ }
+
+ ulNDiv1 = ((tuner_freq_MHz * ucLoDiv1 * 1000) * refDiv
+ / fcry_KHz - 1024) / 2;
+ ulNDiv2 = ((tuner_freq_MHz * ucLoDiv2 * 1000) * refDiv
+ / fcry_KHz - 1024) / 2;
+
+ reg27 = (((ulNDiv1 >> 8) & 0x0F) + ucLomod1) & 0x7F;
+ ret = regmap_write(dev->regmap, 0x27, reg27);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x28, (u8)(ulNDiv1 & 0xFF));
+ if (ret)
+ goto err;
+ reg29 = (((ulNDiv2 >> 8) & 0x0F) + ucLomod2) & 0x7f;
+ ret = regmap_write(dev->regmap, 0x29, reg29);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x2a, (u8)(ulNDiv2 & 0xFF));
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x2F, 0xf5);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x30, 0x05);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x08, 0x1f);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x08, 0x3f);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x09, 0x20);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x09, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x3e, 0x11);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x08, 0x2f);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x08, 0x3f);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x09, 0x10);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x09, 0x00);
+ if (ret)
+ goto err;
+ usleep_range(2000, 50000);
+
+ ret = regmap_read(dev->regmap, 0x42, &utmp);
+ if (ret)
+ goto err;
+ reg42 = utmp;
+
+ ret = regmap_write(dev->regmap, 0x3e, 0x10);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x08, 0x2f);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x08, 0x3f);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x09, 0x10);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x09, 0x00);
+ if (ret)
+ goto err;
+ usleep_range(2000, 50000);
+
+ ret = regmap_read(dev->regmap, 0x42, &utmp);
+ if (ret)
+ goto err;
+ reg42buf = utmp;
+ if (reg42buf < reg42) {
+ ret = regmap_write(dev->regmap, 0x3e, 0x11);
+ if (ret)
+ goto err;
+ }
+ usleep_range(5000, 50000);
+
+ ret = regmap_read(dev->regmap, 0x2d, &utmp);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x2d, utmp);
+ if (ret)
+ goto err;
+ ret = regmap_read(dev->regmap, 0x2e, &utmp);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x2e, utmp);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(dev->regmap, 0x27, &utmp);
+ if (ret)
+ goto err;
+ reg27 = utmp & 0x70;
+ ret = regmap_read(dev->regmap, 0x83, &utmp);
+ if (ret)
+ goto err;
+ if (reg27 == (utmp & 0x70)) {
+ ucLoDiv = ucLoDiv1;
+ ulNDiv = ulNDiv1;
+ ucLomod = ucLomod1 / 16;
+ } else {
+ ucLoDiv = ucLoDiv2;
+ ulNDiv = ulNDiv2;
+ ucLomod = ucLomod2 / 16;
+ }
+
+ if ((ucLoDiv == 3) || (ucLoDiv == 6)) {
+ refDiv = 18;
+ ret = regmap_write(dev->regmap, 0x36, (refDiv - 8));
+ if (ret)
+ goto err;
+ ulNDiv = ((tuner_freq_MHz * ucLoDiv * 1000) * refDiv
+ / fcry_KHz - 1024) / 2;
+ }
+
+ reg27 = (0x80 + ((ucLomod << 4) & 0x70)
+ + ((ulNDiv >> 8) & 0x0F)) & 0xFF;
+ ret = regmap_write(dev->regmap, 0x27, reg27);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x28, (u8)(ulNDiv & 0xFF));
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x29, 0x80);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x31, 0x03);
+ if (ret)
+ goto err;
+
+ if (ucLoDiv == 3)
+ utmp = 0xCE;
+ else
+ utmp = 0x8A;
+ ret = regmap_write(dev->regmap, 0x3b, utmp);
+ if (ret)
+ goto err;
+
+ dev->frequency_khz = fcry_KHz * (ulNDiv * 2 + 1024) / refDiv / ucLoDiv;
+
+ dev_dbg(&dev->client->dev,
+ "actual tune frequency=%d\n", dev->frequency_khz);
+err:
+ if (ret)
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int m88rs6000t_set_bb(struct m88rs6000t_dev *dev,
+ u32 symbol_rate_KSs, s32 lpf_offset_KHz)
+{
+ u32 f3dB;
+ u8 reg40;
+
+ f3dB = symbol_rate_KSs * 9 / 14 + 2000;
+ f3dB += lpf_offset_KHz;
+ f3dB = clamp_val(f3dB, 6000U, 43000U);
+ reg40 = f3dB / 1000;
+ return regmap_write(dev->regmap, 0x40, reg40);
+}
+
+static int m88rs6000t_set_params(struct dvb_frontend *fe)
+{
+ struct m88rs6000t_dev *dev = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ s32 lpf_offset_KHz;
+ u32 realFreq, freq_MHz;
+
+ dev_dbg(&dev->client->dev,
+ "frequency=%d symbol_rate=%d\n",
+ c->frequency, c->symbol_rate);
+
+ if (c->symbol_rate < 5000000)
+ lpf_offset_KHz = 3000;
+ else
+ lpf_offset_KHz = 0;
+
+ realFreq = c->frequency + lpf_offset_KHz;
+ /* set tuner pll.*/
+ freq_MHz = (realFreq + 500) / 1000;
+ ret = m88rs6000t_set_pll_freq(dev, freq_MHz);
+ if (ret)
+ goto err;
+ ret = m88rs6000t_set_bb(dev, c->symbol_rate / 1000, lpf_offset_KHz);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x00, 0x01);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x00, 0x00);
+ if (ret)
+ goto err;
+ /* set demod mlck */
+ ret = m88rs6000t_set_demod_mclk(fe);
+err:
+ if (ret)
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int m88rs6000t_init(struct dvb_frontend *fe)
+{
+ struct m88rs6000t_dev *dev = fe->tuner_priv;
+ int ret;
+
+ dev_dbg(&dev->client->dev, "%s:\n", __func__);
+
+ ret = regmap_update_bits(dev->regmap, 0x11, 0x08, 0x08);
+ if (ret)
+ goto err;
+ usleep_range(5000, 50000);
+ ret = regmap_update_bits(dev->regmap, 0x10, 0x01, 0x01);
+ if (ret)
+ goto err;
+ usleep_range(10000, 50000);
+ ret = regmap_write(dev->regmap, 0x07, 0x7d);
+err:
+ if (ret)
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int m88rs6000t_sleep(struct dvb_frontend *fe)
+{
+ struct m88rs6000t_dev *dev = fe->tuner_priv;
+ int ret;
+
+ dev_dbg(&dev->client->dev, "%s:\n", __func__);
+
+ ret = regmap_write(dev->regmap, 0x07, 0x6d);
+ if (ret)
+ goto err;
+ usleep_range(5000, 10000);
+err:
+ if (ret)
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int m88rs6000t_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct m88rs6000t_dev *dev = fe->tuner_priv;
+
+ dev_dbg(&dev->client->dev, "\n");
+
+ *frequency = dev->frequency_khz;
+ return 0;
+}
+
+static int m88rs6000t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct m88rs6000t_dev *dev = fe->tuner_priv;
+
+ dev_dbg(&dev->client->dev, "\n");
+
+ *frequency = 0; /* Zero-IF */
+ return 0;
+}
+
+
+static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct m88rs6000t_dev *dev = fe->tuner_priv;
+ unsigned int val, i;
+ int ret;
+ u16 gain;
+ u32 PGA2_cri_GS = 46, PGA2_crf_GS = 290, TIA_GS = 290;
+ u32 RF_GC = 1200, IF_GC = 1100, BB_GC = 300;
+ u32 PGA2_GC = 300, TIA_GC = 300, PGA2_cri = 0, PGA2_crf = 0;
+ u32 RFG = 0, IFG = 0, BBG = 0, PGA2G = 0, TIAG = 0;
+ u32 RFGS[13] = {0, 245, 266, 268, 270, 285,
+ 298, 295, 283, 285, 285, 300, 300};
+ u32 IFGS[12] = {0, 300, 230, 270, 270, 285,
+ 295, 285, 290, 295, 295, 310};
+ u32 BBGS[14] = {0, 286, 275, 290, 294, 300, 290,
+ 290, 285, 283, 260, 295, 290, 260};
+
+ ret = regmap_read(dev->regmap, 0x5A, &val);
+ if (ret)
+ goto err;
+ RF_GC = val & 0x0f;
+
+ ret = regmap_read(dev->regmap, 0x5F, &val);
+ if (ret)
+ goto err;
+ IF_GC = val & 0x0f;
+
+ ret = regmap_read(dev->regmap, 0x3F, &val);
+ if (ret)
+ goto err;
+ TIA_GC = (val >> 4) & 0x07;
+
+ ret = regmap_read(dev->regmap, 0x77, &val);
+ if (ret)
+ goto err;
+ BB_GC = (val >> 4) & 0x0f;
+
+ ret = regmap_read(dev->regmap, 0x76, &val);
+ if (ret)
+ goto err;
+ PGA2_GC = val & 0x3f;
+ PGA2_cri = PGA2_GC >> 2;
+ PGA2_crf = PGA2_GC & 0x03;
+
+ for (i = 0; i <= RF_GC; i++)
+ RFG += RFGS[i];
+
+ if (RF_GC == 0)
+ RFG += 400;
+ if (RF_GC == 1)
+ RFG += 300;
+ if (RF_GC == 2)
+ RFG += 200;
+ if (RF_GC == 3)
+ RFG += 100;
+
+ for (i = 0; i <= IF_GC; i++)
+ IFG += IFGS[i];
+
+ TIAG = TIA_GC * TIA_GS;
+
+ for (i = 0; i <= BB_GC; i++)
+ BBG += BBGS[i];
+
+ PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
+
+ gain = RFG + IFG - TIAG + BBG + PGA2G;
+
+ /* scale value to 0x0000-0xffff */
+ gain = clamp_val(gain, 1000U, 10500U);
+ *strength = (10500 - gain) * 0xffff / (10500 - 1000);
+err:
+ if (ret)
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static const struct dvb_tuner_ops m88rs6000t_tuner_ops = {
+ .info = {
+ .name = "Montage M88RS6000 Internal Tuner",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ },
+
+ .init = m88rs6000t_init,
+ .sleep = m88rs6000t_sleep,
+ .set_params = m88rs6000t_set_params,
+ .get_frequency = m88rs6000t_get_frequency,
+ .get_if_frequency = m88rs6000t_get_if_frequency,
+ .get_rf_strength = m88rs6000t_get_rf_strength,
+};
+
+static int m88rs6000t_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct m88rs6000t_config *cfg = client->dev.platform_data;
+ struct dvb_frontend *fe = cfg->fe;
+ struct m88rs6000t_dev *dev;
+ int ret, i;
+ unsigned int utmp;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+ static const struct m88rs6000t_reg_val reg_vals[] = {
+ {0x10, 0xfb},
+ {0x24, 0x38},
+ {0x11, 0x0a},
+ {0x12, 0x00},
+ {0x2b, 0x1c},
+ {0x44, 0x48},
+ {0x54, 0x24},
+ {0x55, 0x06},
+ {0x59, 0x00},
+ {0x5b, 0x4c},
+ {0x60, 0x8b},
+ {0x61, 0xf4},
+ {0x65, 0x07},
+ {0x6d, 0x6f},
+ {0x6e, 0x31},
+ {0x3c, 0xf3},
+ {0x37, 0x0f},
+ {0x48, 0x28},
+ {0x49, 0xd8},
+ {0x70, 0x66},
+ {0x71, 0xCF},
+ {0x72, 0x81},
+ {0x73, 0xA7},
+ {0x74, 0x4F},
+ {0x75, 0xFC},
+ };
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "kzalloc() failed\n");
+ goto err;
+ }
+
+ memcpy(&dev->cfg, cfg, sizeof(struct m88rs6000t_config));
+ dev->client = client;
+ dev->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ goto err;
+ }
+
+ ret = regmap_update_bits(dev->regmap, 0x11, 0x08, 0x08);
+ if (ret)
+ goto err;
+ usleep_range(5000, 50000);
+ ret = regmap_update_bits(dev->regmap, 0x10, 0x01, 0x01);
+ if (ret)
+ goto err;
+ usleep_range(10000, 50000);
+ ret = regmap_write(dev->regmap, 0x07, 0x7d);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x04, 0x01);
+ if (ret)
+ goto err;
+
+ /* check tuner chip id */
+ ret = regmap_read(dev->regmap, 0x01, &utmp);
+ if (ret)
+ goto err;
+ dev_info(&dev->client->dev, "chip_id=%02x\n", utmp);
+ if (utmp != 0x64) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* tuner init. */
+ ret = regmap_write(dev->regmap, 0x05, 0x40);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x11, 0x08);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x15, 0x6c);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x17, 0xc1);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x17, 0x81);
+ if (ret)
+ goto err;
+ usleep_range(10000, 50000);
+ ret = regmap_write(dev->regmap, 0x05, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap, 0x11, 0x0a);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ ret = regmap_write(dev->regmap,
+ reg_vals[i].reg, reg_vals[i].val);
+ if (ret)
+ goto err;
+ }
+
+ dev_info(&dev->client->dev, "Montage M88RS6000 internal tuner successfully identified\n");
+
+ fe->tuner_priv = dev;
+ memcpy(&fe->ops.tuner_ops, &m88rs6000t_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ i2c_set_clientdata(client, dev);
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ kfree(dev);
+ return ret;
+}
+
+static int m88rs6000t_remove(struct i2c_client *client)
+{
+ struct m88rs6000t_dev *dev = i2c_get_clientdata(client);
+ struct dvb_frontend *fe = dev->cfg.fe;
+
+ dev_dbg(&client->dev, "\n");
+
+ memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = NULL;
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id m88rs6000t_id[] = {
+ {"m88rs6000t", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, m88rs6000t_id);
+
+static struct i2c_driver m88rs6000t_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "m88rs6000t",
+ },
+ .probe = m88rs6000t_probe,
+ .remove = m88rs6000t_remove,
+ .id_table = m88rs6000t_id,
+};
+
+module_i2c_driver(m88rs6000t_driver);
+
+MODULE_AUTHOR("Max nibble <nibble.max@gmail.com>");
+MODULE_DESCRIPTION("Montage M88RS6000 internal tuner driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/m88rs6000t.h b/drivers/media/tuners/m88rs6000t.h
new file mode 100644
index 000000000000..264c13e2cd39
--- /dev/null
+++ b/drivers/media/tuners/m88rs6000t.h
@@ -0,0 +1,29 @@
+/*
+ * Driver for the internal tuner of Montage M88RS6000
+ *
+ * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _M88RS6000T_H_
+#define _M88RS6000T_H_
+
+#include "dvb_frontend.h"
+
+struct m88rs6000t_config {
+ /*
+ * pointer to DVB frontend
+ */
+ struct dvb_frontend *fe;
+};
+
+#endif
diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
index caa542346891..066e5431da93 100644
--- a/drivers/media/tuners/m88ts2022.c
+++ b/drivers/media/tuners/m88ts2022.c
@@ -488,6 +488,7 @@ static int m88ts2022_probe(struct i2c_client *client,
case 0x83:
break;
default:
+ ret = -ENODEV;
goto err;
}
@@ -505,6 +506,7 @@ static int m88ts2022_probe(struct i2c_client *client,
u8tmp = 0x6c;
break;
default:
+ ret = -EINVAL;
goto err;
}
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 69e453ef0a1a..1810ad66888e 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -374,7 +374,6 @@ static struct reg_pair_t *mxl5007t_calc_init_regs(struct mxl5007t_state *state,
mxl5007t_set_if_freq_bits(state, cfg->if_freq_hz, cfg->invert_if);
mxl5007t_set_xtal_freq_bits(state, cfg->xtal_freq_hz);
- set_reg_bits(state->tab_init, 0x04, 0x01, cfg->loop_thru_enable);
set_reg_bits(state->tab_init, 0x03, 0x08, cfg->clk_out_enable << 3);
set_reg_bits(state->tab_init, 0x03, 0x07, cfg->clk_out_amp);
@@ -531,10 +530,6 @@ static int mxl5007t_tuner_init(struct mxl5007t_state *state,
struct reg_pair_t *init_regs;
int ret;
- ret = mxl5007t_soft_reset(state);
- if (mxl_fail(ret))
- goto fail;
-
/* calculate initialization reg array */
init_regs = mxl5007t_calc_init_regs(state, mode);
@@ -900,7 +895,32 @@ struct dvb_frontend *mxl5007t_attach(struct dvb_frontend *fe,
/* existing tuner instance */
break;
}
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = mxl5007t_soft_reset(state);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (mxl_fail(ret))
+ goto fail;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = mxl5007t_write_reg(state, 0x04,
+ state->config->loop_thru_enable);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (mxl_fail(ret))
+ goto fail;
+
fe->tuner_priv = state;
+
mutex_unlock(&mxl5007t_list_mutex);
memcpy(&fe->ops.tuner_ops, &mxl5007t_tuner_ops,
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index a759742cae7b..8e040cf9cf13 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -959,6 +959,18 @@ static int r820t_set_tv_standard(struct r820t_priv *priv,
lt_att = 0x00; /* r31[7], lt att enable */
flt_ext_widest = 0x00; /* r15[7]: flt_ext_wide off */
polyfil_cur = 0x60; /* r25[6:5]:min */
+ } else if (delsys == SYS_DVBC_ANNEX_A) {
+ if_khz = 5070;
+ filt_cal_lo = 73500;
+ filt_gain = 0x10; /* +3db, 6mhz on */
+ img_r = 0x00; /* image negative */
+ filt_q = 0x10; /* r10[4]:low q(1'b1) */
+ hp_cor = 0x0b; /* 1.7m disable, +0cap, 1.0mhz */
+ ext_enable = 0x40; /* r30[6]=1 ext enable; r30[5]:1 ext at lna max-1 */
+ loop_through = 0x00; /* r5[7], lt on */
+ lt_att = 0x00; /* r31[7], lt att enable */
+ flt_ext_widest = 0x00; /* r15[7]: flt_ext_wide off */
+ polyfil_cur = 0x60; /* r25[6:5]:min */
} else {
if (bw <= 6) {
if_khz = 3570;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index cf97142e01e6..2180de9d654a 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -1,5 +1,5 @@
/*
- * Silicon Labs Si2147/2157/2158 silicon tuner driver
+ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
*
* Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
*
@@ -93,8 +93,13 @@ static int si2157_init(struct dvb_frontend *fe)
goto warm;
/* power up */
- memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15);
- cmd.wlen = 15;
+ if (s->chiptype == SI2157_CHIPTYPE_SI2146) {
+ memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
+ cmd.wlen = 9;
+ } else {
+ memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15);
+ cmd.wlen = 15;
+ }
cmd.rlen = 1;
ret = si2157_cmd_execute(s, &cmd);
if (ret)
@@ -112,17 +117,20 @@ static int si2157_init(struct dvb_frontend *fe)
cmd.args[4] << 0;
#define SI2158_A20 ('A' << 24 | 58 << 16 | '2' << 8 | '0' << 0)
+ #define SI2148_A20 ('A' << 24 | 48 << 16 | '2' << 8 | '0' << 0)
#define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
#define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0)
+ #define SI2146_A10 ('A' << 24 | 46 << 16 | '1' << 8 | '0' << 0)
switch (chip_id) {
case SI2158_A20:
+ case SI2148_A20:
fw_file = SI2158_A20_FIRMWARE;
break;
case SI2157_A30:
case SI2147_A30:
+ case SI2146_A10:
goto skip_fw_download;
- break;
default:
dev_err(&s->client->dev,
"unknown chip version Si21%d-%c%c%c\n",
@@ -149,7 +157,7 @@ static int si2157_init(struct dvb_frontend *fe)
dev_err(&s->client->dev, "firmware file '%s' is invalid\n",
fw_file);
ret = -EINVAL;
- goto err;
+ goto fw_release_exit;
}
dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
@@ -165,7 +173,7 @@ static int si2157_init(struct dvb_frontend *fe)
dev_err(&s->client->dev,
"firmware download failed=%d\n",
ret);
- goto err;
+ goto fw_release_exit;
}
}
@@ -187,10 +195,9 @@ warm:
s->active = true;
return 0;
+fw_release_exit:
+ release_firmware(fw);
err:
- if (fw)
- release_firmware(fw);
-
dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -250,6 +257,9 @@ static int si2157_set_params(struct dvb_frontend *fe)
case SYS_ATSC:
delivery_system = 0x00;
break;
+ case SYS_DVBC_ANNEX_B:
+ delivery_system = 0x10;
+ break;
case SYS_DVBT:
case SYS_DVBT2: /* it seems DVB-T and DVB-T2 both are 0x20 here */
delivery_system = 0x20;
@@ -272,7 +282,10 @@ static int si2157_set_params(struct dvb_frontend *fe)
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x02\x07\x01\x00", 6);
+ if (s->chiptype == SI2157_CHIPTYPE_SI2146)
+ memcpy(cmd.args, "\x14\x00\x02\x07\x00\x01", 6);
+ else
+ memcpy(cmd.args, "\x14\x00\x02\x07\x01\x00", 6);
cmd.wlen = 6;
cmd.rlen = 4;
ret = si2157_cmd_execute(s, &cmd);
@@ -305,7 +318,7 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops si2157_ops = {
.info = {
- .name = "Silicon Labs Si2157/Si2158",
+ .name = "Silicon Labs Si2146/2147/2148/2157/2158",
.frequency_min = 110000000,
.frequency_max = 862000000,
},
@@ -336,6 +349,7 @@ static int si2157_probe(struct i2c_client *client,
s->fe = cfg->fe;
s->inversion = cfg->inversion;
s->fw_loaded = false;
+ s->chiptype = (u8)id->driver_data;
mutex_init(&s->i2c_mutex);
/* check if the tuner is there */
@@ -352,7 +366,10 @@ static int si2157_probe(struct i2c_client *client,
i2c_set_clientdata(client, s);
dev_info(&s->client->dev,
- "Silicon Labs Si2157/Si2158 successfully attached\n");
+ "Silicon Labs %s successfully attached\n",
+ s->chiptype == SI2157_CHIPTYPE_SI2146 ?
+ "Si2146" : "Si2147/2148/2157/2158");
+
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -377,6 +394,7 @@ static int si2157_remove(struct i2c_client *client)
static const struct i2c_device_id si2157_id[] = {
{"si2157", 0},
+ {"si2146", 1},
{}
};
MODULE_DEVICE_TABLE(i2c, si2157_id);
@@ -393,7 +411,7 @@ static struct i2c_driver si2157_driver = {
module_i2c_driver(si2157_driver);
-MODULE_DESCRIPTION("Silicon Labs Si2157/Si2158 silicon tuner driver");
+MODULE_DESCRIPTION("Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(SI2158_A20_FIRMWARE);
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index d3b19cadb4a1..a564c4a9fba7 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -1,5 +1,5 @@
/*
- * Silicon Labs Si2147/2157/2158 silicon tuner driver
+ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
*
* Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
*
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index e71ffafed951..d6e07cdd2a07 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -1,5 +1,5 @@
/*
- * Silicon Labs Si2147/2157/2158 silicon tuner driver
+ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
*
* Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
*
@@ -28,9 +28,13 @@ struct si2157 {
bool active;
bool fw_loaded;
bool inversion;
+ u8 chiptype;
};
-/* firmare command struct */
+#define SI2157_CHIPTYPE_SI2157 0
+#define SI2157_CHIPTYPE_SI2146 1
+
+/* firmware command struct */
#define SI2157_ARGLEN 30
struct si2157_cmd {
u8 args[SI2157_ARGLEN];
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index 86e5e3110118..a26bb33102b8 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -178,7 +178,7 @@ int tda18271_read_extended(struct dvb_frontend *fe)
(i != R_EB17) &&
(i != R_EB19) &&
(i != R_EB20))
- regs[i] = regdump[i];
+ regs[i] = regdump[i];
}
if (tda18271_debug & DBG_REG)
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 803a0e63d47e..2a039de8ab9a 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -62,6 +62,7 @@ struct xc5000_priv {
unsigned int mode;
u8 rf_mode;
u8 radio_input;
+ u16 output_amp;
int chip_id;
u16 pll_register_no;
@@ -744,7 +745,9 @@ static int xc5000_tune_digital(struct dvb_frontend *fe)
return -EIO;
}
- xc_write_reg(priv, XREG_OUTPUT_AMP, 0x8a);
+ dprintk(1, "%s() setting OUTPUT_AMP to 0x%x\n",
+ __func__, priv->output_amp);
+ xc_write_reg(priv, XREG_OUTPUT_AMP, priv->output_amp);
xc_tune_channel(priv, priv->freq_hz, XC_TUNE_DIGITAL);
@@ -1333,8 +1336,7 @@ static int xc5000_release(struct dvb_frontend *fe)
if (priv) {
cancel_delayed_work(&priv->timer_sleep);
- if (priv->firmware)
- release_firmware(priv->firmware);
+ release_firmware(priv->firmware);
hybrid_tuner_release_state(priv);
}
@@ -1358,6 +1360,9 @@ static int xc5000_set_config(struct dvb_frontend *fe, void *priv_cfg)
if (p->radio_input)
priv->radio_input = p->radio_input;
+ if (p->output_amp)
+ priv->output_amp = p->output_amp;
+
return 0;
}
@@ -1438,6 +1443,12 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
it can be overridden if this is a hybrid driver */
priv->chip_id = (cfg->chip_id) ? cfg->chip_id : 0;
+ /* don't override output_amp if it's already been set
+ unless explicitly specified */
+ if ((priv->output_amp == 0) || (cfg->output_amp))
+ /* use default output_amp value if none specified */
+ priv->output_amp = (cfg->output_amp) ? cfg->output_amp : 0x8a;
+
/* Check if firmware has been loaded. It is possible that another
instance of the driver has loaded the firmware.
*/
diff --git a/drivers/media/tuners/xc5000.h b/drivers/media/tuners/xc5000.h
index 7245cae19f0c..6aa534f17a30 100644
--- a/drivers/media/tuners/xc5000.h
+++ b/drivers/media/tuners/xc5000.h
@@ -36,6 +36,7 @@ struct xc5000_config {
u32 if_khz;
u8 radio_input;
u16 xtal_khz;
+ u16 output_amp;
int chip_id;
};
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 056181f2f569..7496f332f3f5 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -24,7 +24,6 @@ if MEDIA_ANALOG_TV_SUPPORT
comment "Analog TV USB devices"
source "drivers/media/usb/pvrusb2/Kconfig"
source "drivers/media/usb/hdpvr/Kconfig"
-source "drivers/media/usb/tlg2300/Kconfig"
source "drivers/media/usb/usbvision/Kconfig"
source "drivers/media/usb/stk1160/Kconfig"
source "drivers/media/usb/go7007/Kconfig"
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 6f2eb7c8416c..8874ba774a34 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
-obj-$(CONFIG_VIDEO_TLG2300) += tlg2300/
obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
obj-$(CONFIG_VIDEO_STK1160) += stk1160/
obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 9eb77ac2153b..da87f1cc31a9 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -36,6 +36,11 @@ static void hvr950q_cs5340_audio(void *priv, int enable)
au0828_clear(dev, REG_000, 0x10);
}
+/*
+ * WARNING: There's a quirks table at sound/usb/quirks-table.h
+ * that should also be updated every time a new device with V4L2 support
+ * is added here.
+ */
struct au0828_board au0828_boards[] = {
[AU0828_BOARD_UNKNOWN] = {
.name = "Unknown board",
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index bc064803b6c7..082ae6ba492f 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -153,6 +153,14 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
dprintk(1, "%s()\n", __func__);
+ /* there is a small window after disconnect, before
+ dev->usbdev is NULL, for poll (e.g: IR) try to access
+ the device and fill the dmesg with error messages.
+ Set the status so poll routines can check and avoid
+ access after disconnect.
+ */
+ dev->dev_state = DEV_DISCONNECTED;
+
au0828_rc_unregister(dev);
/* Digital TV */
au0828_dvb_unregister(dev);
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 00ab1563d142..c267d76f5b3c 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -88,12 +88,14 @@ static struct xc5000_config hauppauge_xc5000a_config = {
.i2c_address = 0x61,
.if_khz = 6000,
.chip_id = XC5000A,
+ .output_amp = 0x8f,
};
static struct xc5000_config hauppauge_xc5000c_config = {
.i2c_address = 0x61,
.if_khz = 6000,
.chip_id = XC5000C,
+ .output_amp = 0x8f,
};
static struct mxl5007t_config mxl5007t_hvr950q_config = {
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 63995f97dc65..b0f067971979 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -129,6 +129,10 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
int prv_bit, bit, width;
bool first = true;
+ /* do nothing if device is disconnected */
+ if (ir->dev->dev_state == DEV_DISCONNECTED)
+ return 0;
+
/* Check IR int */
rc = au8522_rc_read(ir, 0xe1, -1, buf, 1);
if (rc < 0 || !(buf[0] & (1 << 4))) {
@@ -255,8 +259,11 @@ static void au0828_rc_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
- /* Disable IR */
- au8522_rc_clear(ir, 0xe0, 1 << 4);
+ /* do nothing if device is disconnected */
+ if (ir->dev->dev_state != DEV_DISCONNECTED) {
+ /* Disable IR */
+ au8522_rc_clear(ir, 0xe0, 1 << 4);
+ }
}
static int au0828_probe_i2c_ir(struct au0828_dev *dev)
@@ -363,8 +370,7 @@ void au0828_rc_unregister(struct au0828_dev *dev)
if (!ir)
return;
- if (ir->rc)
- rc_unregister_device(ir->rc);
+ rc_unregister_device(ir->rc);
/* done */
kfree(ir);
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 569aa298c03f..173c0e287a08 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -7,6 +7,7 @@ config VIDEO_CX231XX
select VIDEOBUF_VMALLOC
select VIDEO_CX25840
select VIDEO_CX2341X
+ select I2C_MUX
---help---
This is a video4linux driver for Conexant 231xx USB based TV cards.
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 459bb0e98971..3f295b4d1a3d 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -24,6 +24,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -37,9 +39,6 @@
#include <media/v4l2-event.h>
#include <media/cx2341x.h>
#include <media/tuner.h>
-#include <linux/usb.h>
-
-#include "cx231xx.h"
#define CX231xx_FIRM_IMAGE_SIZE 376836
#define CX231xx_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw"
@@ -90,10 +89,10 @@ static unsigned int v4l_debug = 1;
module_param(v4l_debug, int, 0644);
MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
-#define dprintk(level, fmt, arg...)\
- do { if (v4l_debug >= level) \
- pr_info("%s: " fmt, \
- (dev) ? dev->name : "cx231xx[?]", ## arg); \
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (v4l_debug >= level) \
+ printk(KERN_DEBUG pr_fmt(fmt), ## arg); \
} while (0)
static struct cx231xx_tvnorm cx231xx_tvnorms[] = {
@@ -988,29 +987,34 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
IVTV_REG_APU, 0);
if (retval != 0) {
- pr_err("%s: Error with mc417_register_write\n", __func__);
+ dev_err(dev->dev,
+ "%s: Error with mc417_register_write\n", __func__);
return -1;
}
retval = request_firmware(&firmware, CX231xx_FIRM_IMAGE_NAME,
- &dev->udev->dev);
+ dev->dev);
if (retval != 0) {
- pr_err("ERROR: Hotplug firmware request failed (%s).\n",
+ dev_err(dev->dev,
+ "ERROR: Hotplug firmware request failed (%s).\n",
CX231xx_FIRM_IMAGE_NAME);
- pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
+ dev_err(dev->dev,
+ "Please fix your hotplug setup, the board will not work without firmware loaded!\n");
return -1;
}
if (firmware->size != CX231xx_FIRM_IMAGE_SIZE) {
- pr_err("ERROR: Firmware size mismatch (have %zd, expected %d)\n",
+ dev_err(dev->dev,
+ "ERROR: Firmware size mismatch (have %zd, expected %d)\n",
firmware->size, CX231xx_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
}
if (0 != memcmp(firmware->data, magic, 8)) {
- pr_err("ERROR: Firmware magic mismatch, wrong file?\n");
+ dev_err(dev->dev,
+ "ERROR: Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
return -1;
}
@@ -1057,7 +1061,8 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
retval |= mc417_register_write(dev, IVTV_REG_HW_BLOCKS,
IVTV_CMD_HW_BLOCKS_RST);
if (retval < 0) {
- pr_err("%s: Error with mc417_register_write\n",
+ dev_err(dev->dev,
+ "%s: Error with mc417_register_write\n",
__func__);
return retval;
}
@@ -1069,7 +1074,8 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
retval |= mc417_register_write(dev, IVTV_REG_VPU, value & 0xFFFFFFE8);
if (retval < 0) {
- pr_err("%s: Error with mc417_register_write\n",
+ dev_err(dev->dev,
+ "%s: Error with mc417_register_write\n",
__func__);
return retval;
}
@@ -1114,28 +1120,31 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
cx231xx_disable656(dev);
retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
- dprintk(2, "%s() PING OK\n", __func__);
+ dprintk(2, "%s: PING OK\n", __func__);
retval = cx231xx_load_firmware(dev);
if (retval < 0) {
- pr_err("%s() f/w load failed\n", __func__);
+ dev_err(dev->dev,
+ "%s: f/w load failed\n", __func__);
return retval;
}
retval = cx231xx_find_mailbox(dev);
if (retval < 0) {
- pr_err("%s() mailbox < 0, error\n",
+ dev_err(dev->dev, "%s: mailbox < 0, error\n",
__func__);
return -1;
}
dev->cx23417_mailbox = retval;
retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
if (retval < 0) {
- pr_err("ERROR: cx23417 firmware ping failed!\n");
+ dev_err(dev->dev,
+ "ERROR: cx23417 firmware ping failed!\n");
return -1;
}
retval = cx231xx_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
&version);
if (retval < 0) {
- pr_err("ERROR: cx23417 firmware get encoder: version failed!\n");
+ dev_err(dev->dev,
+ "ERROR: cx23417 firmware get encoder: version failed!\n");
return -1;
}
dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
@@ -1416,8 +1425,9 @@ static int bb_buf_prepare(struct videobuf_queue *q,
if (!dev->video_mode.bulk_ctl.num_bufs)
urb_init = 1;
}
- /*cx231xx_info("urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);*/
+ dev_dbg(dev->dev,
+ "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
+ urb_init, dev->video_mode.max_pkt_size);
dev->mode_tv = 1;
if (urb_init) {
@@ -1688,7 +1698,7 @@ static int mpeg_open(struct file *file)
sizeof(struct cx231xx_buffer), fh, &dev->lock);
/*
videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops,
- &dev->udev->dev, &dev->ts1.slock,
+ dev->dev, &dev->ts1.slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx231xx_buffer),
@@ -1798,7 +1808,6 @@ static unsigned int mpeg_poll(struct file *file,
static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
{
struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
dprintk(2, "%s()\n", __func__);
@@ -1878,7 +1887,7 @@ static int cx231xx_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
/* fix videodecoder resolution */
fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
fmt.height = cxhdl->height;
- fmt.code = V4L2_MBUS_FMT_FIXED;
+ fmt.code = MEDIA_BUS_FMT_FIXED;
v4l2_subdev_call(dev->sd_cx25840, video, s_mbus_fmt, &fmt);
return 0;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 9b925874d392..de4ae5eb4830 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -20,8 +20,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
#include <linux/kernel.h>
-#include <linux/usb.h>
#include <linux/init.h>
#include <linux/sound.h>
#include <linux/spinlock.h>
@@ -37,25 +37,18 @@
#include <sound/initval.h>
#include <sound/control.h>
#include <media/v4l2-common.h>
-#include "cx231xx.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
-#define dprintk(fmt, arg...) do { \
- if (debug) \
- printk(KERN_INFO "cx231xx-audio %s: " fmt, \
- __func__, ##arg); \
- } while (0)
-
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static int cx231xx_isoc_audio_deinit(struct cx231xx *dev)
{
int i;
- dprintk("Stopping isoc\n");
+ dev_dbg(dev->dev, "Stopping isoc\n");
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
if (dev->adev.urb[i]) {
@@ -79,7 +72,7 @@ static int cx231xx_bulk_audio_deinit(struct cx231xx *dev)
{
int i;
- dprintk("Stopping bulk\n");
+ dev_dbg(dev->dev, "Stopping bulk\n");
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
if (dev->adev.urb[i]) {
@@ -123,7 +116,8 @@ static void cx231xx_audio_isocirq(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dprintk("urb completition error %d.\n", urb->status);
+ dev_dbg(dev->dev, "urb completition error %d.\n",
+ urb->status);
break;
}
@@ -182,8 +176,9 @@ static void cx231xx_audio_isocirq(struct urb *urb)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
- cx231xx_errdev("resubmit of audio urb failed (error=%i)\n",
- status);
+ dev_err(dev->dev,
+ "resubmit of audio urb failed (error=%i)\n",
+ status);
}
return;
}
@@ -211,7 +206,8 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dprintk("urb completition error %d.\n", urb->status);
+ dev_dbg(dev->dev, "urb completition error %d.\n",
+ urb->status);
break;
}
@@ -266,8 +262,9 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
- cx231xx_errdev("resubmit of audio urb failed (error=%i)\n",
- status);
+ dev_err(dev->dev,
+ "resubmit of audio urb failed (error=%i)\n",
+ status);
}
return;
}
@@ -277,7 +274,8 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
int i, errCode;
int sb_size;
- cx231xx_info("%s: Starting ISO AUDIO transfers\n", __func__);
+ dev_dbg(dev->dev,
+ "%s: Starting ISO AUDIO transfers\n", __func__);
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
@@ -295,7 +293,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
urb = usb_alloc_urb(CX231XX_ISO_NUM_AUDIO_PACKETS, GFP_ATOMIC);
if (!urb) {
- cx231xx_errdev("usb_alloc_urb failed!\n");
+ dev_err(dev->dev, "usb_alloc_urb failed!\n");
for (j = 0; j < i; j++) {
usb_free_urb(dev->adev.urb[j]);
kfree(dev->adev.transfer_buffer[j]);
@@ -338,7 +336,8 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
int i, errCode;
int sb_size;
- cx231xx_info("%s: Starting BULK AUDIO transfers\n", __func__);
+ dev_dbg(dev->dev,
+ "%s: Starting BULK AUDIO transfers\n", __func__);
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
@@ -356,7 +355,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
if (!urb) {
- cx231xx_errdev("usb_alloc_urb failed!\n");
+ dev_err(dev->dev, "usb_alloc_urb failed!\n");
for (j = 0; j < i; j++) {
usb_free_urb(dev->adev.urb[j]);
kfree(dev->adev.transfer_buffer[j]);
@@ -392,8 +391,9 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
size_t size)
{
struct snd_pcm_runtime *runtime = subs->runtime;
+ struct cx231xx *dev = snd_pcm_substream_chip(subs);
- dprintk("Allocating vbuffer\n");
+ dev_dbg(dev->dev, "Allocating vbuffer\n");
if (runtime->dma_area) {
if (runtime->dma_bytes > size)
return 0;
@@ -436,16 +436,12 @@ static int snd_cx231xx_capture_open(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int ret = 0;
- dprintk("opening device and trying to acquire exclusive lock\n");
-
- if (!dev) {
- cx231xx_errdev("BUG: cx231xx can't find device struct."
- " Can't proceed with open\n");
- return -ENODEV;
- }
+ dev_dbg(dev->dev,
+ "opening device and trying to acquire exclusive lock\n");
if (dev->state & DEV_DISCONNECTED) {
- cx231xx_errdev("Can't open. the device was removed.\n");
+ dev_err(dev->dev,
+ "Can't open. the device was removed.\n");
return -ENODEV;
}
@@ -458,7 +454,8 @@ static int snd_cx231xx_capture_open(struct snd_pcm_substream *substream)
ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
mutex_unlock(&dev->lock);
if (ret < 0) {
- cx231xx_errdev("failed to set alternate setting !\n");
+ dev_err(dev->dev,
+ "failed to set alternate setting !\n");
return ret;
}
@@ -484,7 +481,7 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
int ret;
struct cx231xx *dev = snd_pcm_substream_chip(substream);
- dprintk("closing device\n");
+ dev_dbg(dev->dev, "closing device\n");
/* inform hardware to stop streaming */
mutex_lock(&dev->lock);
@@ -494,7 +491,8 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
/* 1 - 48000 samples per sec */
ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
if (ret < 0) {
- cx231xx_errdev("failed to set alternate setting !\n");
+ dev_err(dev->dev,
+ "failed to set alternate setting !\n");
mutex_unlock(&dev->lock);
return ret;
@@ -504,10 +502,10 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
mutex_unlock(&dev->lock);
if (dev->adev.users == 0 && dev->adev.shutdown == 1) {
- dprintk("audio users: %d\n", dev->adev.users);
- dprintk("disabling audio stream!\n");
+ dev_dbg(dev->dev, "audio users: %d\n", dev->adev.users);
+ dev_dbg(dev->dev, "disabling audio stream!\n");
dev->adev.shutdown = 0;
- dprintk("released lock\n");
+ dev_dbg(dev->dev, "released lock\n");
if (atomic_read(&dev->stream_started) > 0) {
atomic_set(&dev->stream_started, 0);
schedule_work(&dev->wq_trigger);
@@ -519,9 +517,10 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
static int snd_cx231xx_hw_capture_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
+ struct cx231xx *dev = snd_pcm_substream_chip(substream);
int ret;
- dprintk("Setting capture parameters\n");
+ dev_dbg(dev->dev, "Setting capture parameters\n");
ret = snd_pcm_alloc_vmalloc_buffer(substream,
params_buffer_bytes(hw_params));
@@ -543,7 +542,7 @@ static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
- dprintk("Stop capture, if needed\n");
+ dev_dbg(dev->dev, "Stop capture, if needed\n");
if (atomic_read(&dev->stream_started) > 0) {
atomic_set(&dev->stream_started, 0);
@@ -568,7 +567,7 @@ static void audio_trigger(struct work_struct *work)
struct cx231xx *dev = container_of(work, struct cx231xx, wq_trigger);
if (atomic_read(&dev->stream_started)) {
- dprintk("starting capture");
+ dev_dbg(dev->dev, "starting capture");
if (is_fw_load(dev) == 0)
cx25840_call(dev, core, load_fw);
if (dev->USE_ISO)
@@ -576,7 +575,7 @@ static void audio_trigger(struct work_struct *work)
else
cx231xx_init_audio_bulk(dev);
} else {
- dprintk("stopping capture");
+ dev_dbg(dev->dev, "stopping capture");
cx231xx_isoc_audio_deinit(dev);
}
}
@@ -662,10 +661,10 @@ static int cx231xx_audio_init(struct cx231xx *dev)
return 0;
}
- cx231xx_info("cx231xx-audio.c: probing for cx231xx "
- "non standard usbaudio\n");
+ dev_dbg(dev->dev,
+ "probing for cx231xx non standard usbaudio\n");
- err = snd_card_new(&dev->udev->dev, index[devnr], "Cx231xx Audio",
+ err = snd_card_new(dev->dev, index[devnr], "Cx231xx Audio",
THIS_MODULE, 0, &card);
if (err < 0)
return err;
@@ -707,14 +706,13 @@ static int cx231xx_audio_init(struct cx231xx *dev)
bEndpointAddress;
adev->num_alt = uif->num_altsetting;
- cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n",
- adev->end_point_addr, adev->num_alt);
+ dev_info(dev->dev,
+ "audio EndPoint Addr 0x%x, Alternate settings: %i\n",
+ adev->end_point_addr, adev->num_alt);
adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
- if (adev->alt_max_pkt_size == NULL) {
- cx231xx_errdev("out of memory!\n");
+ if (adev->alt_max_pkt_size == NULL)
return -ENOMEM;
- }
for (i = 0; i < adev->num_alt; i++) {
u16 tmp =
@@ -722,8 +720,9 @@ static int cx231xx_audio_init(struct cx231xx *dev)
wMaxPacketSize);
adev->alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
- cx231xx_info("Alternate setting %i, max size= %i\n", i,
- adev->alt_max_pkt_size[i]);
+ dev_dbg(dev->dev,
+ "audio alternate setting %i, max size= %i\n", i,
+ adev->alt_max_pkt_size[i]);
}
return 0;
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 40a69879fc0a..39e887925e3d 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -22,12 +22,12 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
-#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/mm.h>
#include <linux/mutex.h>
@@ -36,7 +36,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
-#include "cx231xx.h"
#include "cx231xx-dif.h"
#define TUNER_MODE_FM_RADIO 0
@@ -83,10 +82,10 @@ void initGPIO(struct cx231xx *dev)
cx231xx_send_gpio_cmd(dev, _gpio_direction, (u8 *)&value, 4, 0, 0);
verve_read_byte(dev, 0x07, &val);
- cx231xx_info(" verve_read_byte address0x07=0x%x\n", val);
+ dev_dbg(dev->dev, "verve_read_byte address0x07=0x%x\n", val);
verve_write_byte(dev, 0x07, 0xF4);
verve_read_byte(dev, 0x07, &val);
- cx231xx_info(" verve_read_byte address0x07=0x%x\n", val);
+ dev_dbg(dev->dev, "verve_read_byte address0x07=0x%x\n", val);
cx231xx_capture_start(dev, 1, Vbi);
@@ -156,22 +155,25 @@ int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count)
while (afe_power_status != 0x18) {
status = afe_write_byte(dev, SUP_BLK_PWRDN, 0x18);
if (status < 0) {
- cx231xx_info(
- ": Init Super Block failed in send cmd\n");
+ dev_dbg(dev->dev,
+ "%s: Init Super Block failed in send cmd\n",
+ __func__);
break;
}
status = afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status);
afe_power_status &= 0xff;
if (status < 0) {
- cx231xx_info(
- ": Init Super Block failed in receive cmd\n");
+ dev_dbg(dev->dev,
+ "%s: Init Super Block failed in receive cmd\n",
+ __func__);
break;
}
i++;
if (i == 10) {
- cx231xx_info(
- ": Init Super Block force break in loop !!!!\n");
+ dev_dbg(dev->dev,
+ "%s: Init Super Block force break in loop !!!!\n",
+ __func__);
status = -1;
break;
}
@@ -410,7 +412,7 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3,
0x00);
} else {
- cx231xx_info("Invalid AV mode input\n");
+ dev_dbg(dev->dev, "Invalid AV mode input\n");
status = -1;
}
break;
@@ -467,7 +469,7 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3,
0x40);
} else {
- cx231xx_info("Invalid AV mode input\n");
+ dev_dbg(dev->dev, "Invalid AV mode input\n");
status = -1;
}
} /* switch */
@@ -573,9 +575,9 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
status = cx231xx_set_power_mode(dev,
POLARIS_AVMODE_ENXTERNAL_AV);
if (status < 0) {
- cx231xx_errdev("%s: set_power_mode : Failed to"
- " set Power - errCode [%d]!\n",
- __func__, status);
+ dev_err(dev->dev,
+ "%s: Failed to set Power - errCode [%d]!\n",
+ __func__, status);
return status;
}
}
@@ -591,8 +593,8 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
status = cx231xx_set_power_mode(dev,
POLARIS_AVMODE_ANALOGT_TV);
if (status < 0) {
- cx231xx_errdev("%s: set_power_mode:Failed"
- " to set Power - errCode [%d]!\n",
+ dev_err(dev->dev,
+ "%s: Failed to set Power - errCode [%d]!\n",
__func__, status);
return status;
}
@@ -608,8 +610,8 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
break;
default:
- cx231xx_errdev("%s: set_power_mode : Unknown Input %d !\n",
- __func__, INPUT(input)->type);
+ dev_err(dev->dev, "%s: Unknown Input %d !\n",
+ __func__, INPUT(input)->type);
break;
}
@@ -628,8 +630,8 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
if (pin_type != dev->video_input) {
status = cx231xx_afe_adjust_ref_count(dev, pin_type);
if (status < 0) {
- cx231xx_errdev("%s: adjust_ref_count :Failed to set"
- "AFE input mux - errCode [%d]!\n",
+ dev_err(dev->dev,
+ "%s: adjust_ref_count :Failed to set AFE input mux - errCode [%d]!\n",
__func__, status);
return status;
}
@@ -638,9 +640,9 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
/* call afe block to set video inputs */
status = cx231xx_afe_set_input_mux(dev, input);
if (status < 0) {
- cx231xx_errdev("%s: set_input_mux :Failed to set"
- " AFE input mux - errCode [%d]!\n",
- __func__, status);
+ dev_err(dev->dev,
+ "%s: set_input_mux :Failed to set AFE input mux - errCode [%d]!\n",
+ __func__, status);
return status;
}
@@ -670,8 +672,8 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
/* Tell DIF object to go to baseband mode */
status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND);
if (status < 0) {
- cx231xx_errdev("%s: cx231xx_dif set to By pass"
- " mode- errCode [%d]!\n",
+ dev_err(dev->dev,
+ "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n",
__func__, status);
return status;
}
@@ -715,8 +717,8 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
/* Tell DIF object to go to baseband mode */
status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND);
if (status < 0) {
- cx231xx_errdev("%s: cx231xx_dif set to By pass"
- " mode- errCode [%d]!\n",
+ dev_err(dev->dev,
+ "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n",
__func__, status);
return status;
}
@@ -790,9 +792,9 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
status = cx231xx_dif_set_standard(dev,
DIF_USE_BASEBAND);
if (status < 0) {
- cx231xx_errdev("%s: cx231xx_dif set to By pass"
- " mode- errCode [%d]!\n",
- __func__, status);
+ dev_err(dev->dev,
+ "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n",
+ __func__, status);
return status;
}
@@ -826,9 +828,9 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
/* Reinitialize the DIF */
status = cx231xx_dif_set_standard(dev, dev->norm);
if (status < 0) {
- cx231xx_errdev("%s: cx231xx_dif set to By pass"
- " mode- errCode [%d]!\n",
- __func__, status);
+ dev_err(dev->dev,
+ "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n",
+ __func__, status);
return status;
}
@@ -970,14 +972,14 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
{
int status = 0;
- cx231xx_info("do_mode_ctrl_overrides : 0x%x\n",
- (unsigned int)dev->norm);
+ dev_dbg(dev->dev, "%s: 0x%x\n",
+ __func__, (unsigned int)dev->norm);
/* Change the DFE_CTRL3 bp_percent to fix flagging */
status = vid_blk_write_word(dev, DFE_CTRL3, 0xCD3F0280);
if (dev->norm & (V4L2_STD_NTSC | V4L2_STD_PAL_M)) {
- cx231xx_info("do_mode_ctrl_overrides NTSC\n");
+ dev_dbg(dev->dev, "%s: NTSC\n", __func__);
/* Move the close caption lines out of active video,
adjust the active video start point */
@@ -1004,7 +1006,7 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
(FLD_HBLANK_CNT, 0x79));
} else if (dev->norm & V4L2_STD_SECAM) {
- cx231xx_info("do_mode_ctrl_overrides SECAM\n");
+ dev_dbg(dev->dev, "%s: SECAM\n", __func__);
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
@@ -1031,7 +1033,7 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
cx231xx_set_field
(FLD_HBLANK_CNT, 0x85));
} else {
- cx231xx_info("do_mode_ctrl_overrides PAL\n");
+ dev_dbg(dev->dev, "%s: PAL\n", __func__);
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
@@ -1206,7 +1208,8 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
/* This is just a casual suggestion to people adding
new boards in case they use a tuner type we don't
currently know about */
- printk(KERN_INFO "Unknown tuner type configuring SIF");
+ dev_info(dev->dev,
+ "Unknown tuner type configuring SIF");
break;
}
break;
@@ -1270,8 +1273,13 @@ int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
int status = 0;
bool current_is_port_3;
- if (dev->board.dont_use_port_3)
- is_port_3 = false;
+ /*
+ * Should this code check dev->port_3_switch_enabled first
+ * to skip unnecessary reading of the register?
+ * If yes, the flag dev->port_3_switch_enabled must be initialized
+ * correctly.
+ */
+
status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
PWR_CTL_EN, value, 4);
if (status < 0)
@@ -1288,12 +1296,13 @@ int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
else
value[0] &= ~I2C_DEMOD_EN;
- cx231xx_info("Changing the i2c master port to %d\n",
- is_port_3 ? 3 : 1);
-
status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
PWR_CTL_EN, value, 4);
+ /* remember status of the switch for usage in is_tuner */
+ if (status >= 0)
+ dev->port_3_switch_enabled = is_port_3;
+
return status;
}
@@ -1325,113 +1334,131 @@ void cx231xx_dump_HH_reg(struct cx231xx *dev)
for (i = 0x100; i < 0x140; i++) {
vid_blk_read_word(dev, i, &value);
- cx231xx_info("reg0x%x=0x%x\n", i, value);
+ dev_dbg(dev->dev, "reg0x%x=0x%x\n", i, value);
i = i+3;
}
for (i = 0x300; i < 0x400; i++) {
vid_blk_read_word(dev, i, &value);
- cx231xx_info("reg0x%x=0x%x\n", i, value);
+ dev_dbg(dev->dev, "reg0x%x=0x%x\n", i, value);
i = i+3;
}
for (i = 0x400; i < 0x440; i++) {
vid_blk_read_word(dev, i, &value);
- cx231xx_info("reg0x%x=0x%x\n", i, value);
+ dev_dbg(dev->dev, "reg0x%x=0x%x\n", i, value);
i = i+3;
}
vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
- cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
+ dev_dbg(dev->dev, "AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390);
vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
- cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
+ dev_dbg(dev->dev, "AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
}
-void cx231xx_dump_SC_reg(struct cx231xx *dev)
+#if 0
+static void cx231xx_dump_SC_reg(struct cx231xx *dev)
{
u8 value[4] = { 0, 0, 0, 0 };
- cx231xx_info("cx231xx_dump_SC_reg!\n");
+ dev_dbg(dev->dev, "%s!\n", __func__);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0],
- value[1], value[2], value[3]);
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0],
+ value[1], value[2], value[3]);
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
value, 4);
- cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0],
- value[1], value[2], value[3]);
-
-
+ dev_dbg(dev->dev,
+ "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0],
+ value[1], value[2], value[3]);
}
+#endif
void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev)
@@ -1497,7 +1524,7 @@ void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
u32 standard = 0;
u8 value[4] = { 0, 0, 0, 0 };
- cx231xx_info("Enter cx231xx_set_Colibri_For_LowIF()\n");
+ dev_dbg(dev->dev, "Enter cx231xx_set_Colibri_For_LowIF()\n");
value[0] = (u8) 0x6F;
value[1] = (u8) 0x6F;
value[2] = (u8) 0x6F;
@@ -1517,7 +1544,7 @@ void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
colibri_carrier_offset = cx231xx_Get_Colibri_CarrierOffset(mode,
standard);
- cx231xx_info("colibri_carrier_offset=%d, standard=0x%x\n",
+ dev_dbg(dev->dev, "colibri_carrier_offset=%d, standard=0x%x\n",
colibri_carrier_offset, standard);
/* Set the band Pass filter for DIF*/
@@ -1551,8 +1578,8 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
u64 pll_freq_u64 = 0;
u32 i = 0;
- cx231xx_info("if_freq=%d;spectral_invert=0x%x;mode=0x%x\n",
- if_freq, spectral_invert, mode);
+ dev_dbg(dev->dev, "if_freq=%d;spectral_invert=0x%x;mode=0x%x\n",
+ if_freq, spectral_invert, mode);
if (mode == TUNER_MODE_FM_RADIO) {
@@ -1595,8 +1622,7 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
if_freq = 16000000;
}
- cx231xx_info("Enter IF=%zu\n",
- ARRAY_SIZE(Dif_set_array));
+ dev_dbg(dev->dev, "Enter IF=%zu\n", ARRAY_SIZE(Dif_set_array));
for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) {
if (Dif_set_array[i].if_freq == if_freq) {
vid_blk_write_word(dev,
@@ -1708,7 +1734,7 @@ int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard)
u32 dif_misc_ctrl_value = 0;
u32 func_mode = 0;
- cx231xx_info("%s: setStandard to %x\n", __func__, standard);
+ dev_dbg(dev->dev, "%s: setStandard to %x\n", __func__, standard);
status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value);
if (standard != DIF_USE_BASEBAND)
@@ -2111,8 +2137,8 @@ int cx231xx_tuner_post_channel_change(struct cx231xx *dev)
{
int status = 0;
u32 dwval;
- cx231xx_info("cx231xx_tuner_post_channel_change dev->tuner_type =0%d\n",
- dev->tuner_type);
+ dev_dbg(dev->dev, "%s: dev->tuner_type =0%d\n",
+ __func__, dev->tuner_type);
/* Set the RF and IF k_agc values to 4 for PAL/NTSC and 8 for
* SECAM L/B/D standards */
status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval);
@@ -2213,8 +2239,8 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
if (dev->power_mode != mode)
dev->power_mode = mode;
else {
- cx231xx_info(" setPowerMode::mode = %d, No Change req.\n",
- mode);
+ dev_dbg(dev->dev, "%s: mode = %d, No Change req.\n",
+ __func__, mode);
return 0;
}
@@ -2264,7 +2290,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
case POLARIS_AVMODE_ANALOGT_TV:
tmp |= PWR_DEMOD_EN;
- tmp |= (I2C_DEMOD_EN);
value[0] = (u8) tmp;
value[1] = (u8) (tmp >> 8);
value[2] = (u8) (tmp >> 16);
@@ -2317,9 +2342,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
}
if (dev->board.tuner_type != TUNER_ABSENT) {
- /* Enable tuner */
- cx231xx_enable_i2c_port_3(dev, true);
-
/* reset the Tuner */
if (dev->board.tuner_gpio)
cx231xx_gpio_set(dev, dev->board.tuner_gpio);
@@ -2363,7 +2385,7 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
}
tmp &= (~PWR_AV_MODE);
- tmp |= POLARIS_AVMODE_DIGITAL | I2C_DEMOD_EN;
+ tmp |= POLARIS_AVMODE_DIGITAL;
value[0] = (u8) tmp;
value[1] = (u8) (tmp >> 8);
value[2] = (u8) (tmp >> 16);
@@ -2384,15 +2406,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
}
if (dev->board.tuner_type != TUNER_ABSENT) {
- /*
- * Enable tuner
- * Hauppauge Exeter seems to need to do something different!
- */
- if (dev->model == CX231XX_BOARD_HAUPPAUGE_EXETER)
- cx231xx_enable_i2c_port_3(dev, false);
- else
- cx231xx_enable_i2c_port_3(dev, true);
-
/* reset the Tuner */
if (dev->board.tuner_gpio)
cx231xx_gpio_set(dev, dev->board.tuner_gpio);
@@ -2466,7 +2479,7 @@ int cx231xx_start_stream(struct cx231xx *dev, u32 ep_mask)
u32 tmp = 0;
int status = 0;
- cx231xx_info("cx231xx_start_stream():: ep_mask = %x\n", ep_mask);
+ dev_dbg(dev->dev, "%s: ep_mask = %x\n", __func__, ep_mask);
status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
value, 4);
if (status < 0)
@@ -2491,7 +2504,7 @@ int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask)
u32 tmp = 0;
int status = 0;
- cx231xx_info("cx231xx_stop_stream():: ep_mask = %x\n", ep_mask);
+ dev_dbg(dev->dev, "%s: ep_mask = %x\n", __func__, ep_mask);
status =
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4);
if (status < 0)
@@ -2519,61 +2532,72 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
if (dev->udev->speed == USB_SPEED_HIGH) {
switch (media_type) {
case Audio:
- cx231xx_info("%s: Audio enter HANC\n", __func__);
+ dev_dbg(dev->dev,
+ "%s: Audio enter HANC\n", __func__);
status =
cx231xx_mode_register(dev, TS_MODE_REG, 0x9300);
break;
case Vbi:
- cx231xx_info("%s: set vanc registers\n", __func__);
+ dev_dbg(dev->dev,
+ "%s: set vanc registers\n", __func__);
status = cx231xx_mode_register(dev, TS_MODE_REG, 0x300);
break;
case Sliced_cc:
- cx231xx_info("%s: set hanc registers\n", __func__);
+ dev_dbg(dev->dev,
+ "%s: set hanc registers\n", __func__);
status =
cx231xx_mode_register(dev, TS_MODE_REG, 0x1300);
break;
case Raw_Video:
- cx231xx_info("%s: set video registers\n", __func__);
+ dev_dbg(dev->dev,
+ "%s: set video registers\n", __func__);
status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100);
break;
case TS1_serial_mode:
- cx231xx_info("%s: set ts1 registers", __func__);
-
- if (dev->board.has_417) {
- cx231xx_info(" MPEG\n");
- value &= 0xFFFFFFFC;
- value |= 0x3;
-
- status = cx231xx_mode_register(dev, TS_MODE_REG, value);
-
- val[0] = 0x04;
- val[1] = 0xA3;
- val[2] = 0x3B;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS1_CFG_REG, val, 4);
-
- val[0] = 0x00;
- val[1] = 0x08;
- val[2] = 0x00;
- val[3] = 0x08;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS1_LENGTH_REG, val, 4);
-
- } else {
- cx231xx_info(" BDA\n");
- status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101);
- status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010);
- }
+ dev_dbg(dev->dev,
+ "%s: set ts1 registers", __func__);
+
+ if (dev->board.has_417) {
+ dev_dbg(dev->dev,
+ "%s: MPEG\n", __func__);
+ value &= 0xFFFFFFFC;
+ value |= 0x3;
+
+ status = cx231xx_mode_register(dev,
+ TS_MODE_REG, value);
+
+ val[0] = 0x04;
+ val[1] = 0xA3;
+ val[2] = 0x3B;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev,
+ VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
+
+ val[0] = 0x00;
+ val[1] = 0x08;
+ val[2] = 0x00;
+ val[3] = 0x08;
+ status = cx231xx_write_ctrl_reg(dev,
+ VRT_SET_REGISTER,
+ TS1_LENGTH_REG, val, 4);
+ } else {
+ dev_dbg(dev->dev, "%s: BDA\n", __func__);
+ status = cx231xx_mode_register(dev,
+ TS_MODE_REG, 0x101);
+ status = cx231xx_mode_register(dev,
+ TS1_CFG_REG, 0x010);
+ }
break;
case TS1_parallel_mode:
- cx231xx_info("%s: set ts1 parallel mode registers\n",
- __func__);
+ dev_dbg(dev->dev,
+ "%s: set ts1 parallel mode registers\n",
+ __func__);
status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100);
status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x400);
break;
@@ -2926,8 +2950,9 @@ int cx231xx_gpio_i2c_read_ack(struct cx231xx *dev)
(nCnt > 0));
if (nCnt == 0)
- cx231xx_info("No ACK after %d msec -GPIO I2C failed!",
- nInit * 10);
+ dev_dbg(dev->dev,
+ "No ACK after %d msec -GPIO I2C failed!",
+ nInit * 10);
/*
* readAck
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 791f00c6276b..ae05d591f228 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -20,12 +20,12 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/usb.h>
#include <media/tuner.h>
#include <media/tveeprom.h>
#include <media/v4l2-common.h>
@@ -35,7 +35,6 @@
#include "xc5000.h"
#include "tda18271.h"
-#include "cx231xx.h"
static int tuner = -1;
module_param(tuner, int, 0444);
@@ -104,8 +103,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x02,
.norm = V4L2_STD_PAL,
@@ -144,8 +143,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x32,
.norm = V4L2_STD_NTSC,
@@ -184,8 +183,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x1c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x02,
.norm = V4L2_STD_PAL,
@@ -225,8 +224,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x1c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x02,
.norm = V4L2_STD_PAL,
@@ -262,7 +261,6 @@ struct cx231xx_board cx231xx_boards[] = {
.norm = V4L2_STD_PAL,
.no_alt_vanc = 1,
.external_av = 1,
- .dont_use_port_3 = 1,
/* Actually, it has a 417, but it isn't working correctly.
* So set to 0 for now until someone can manage to get this
* to work reliably. */
@@ -297,8 +295,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x02,
.norm = V4L2_STD_PAL,
@@ -325,8 +323,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x32,
.norm = V4L2_STD_NTSC,
@@ -353,8 +351,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_1,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x0e,
.norm = V4L2_STD_NTSC,
@@ -390,7 +388,6 @@ struct cx231xx_board cx231xx_boards[] = {
.norm = V4L2_STD_NTSC,
.no_alt_vanc = 1,
.external_av = 1,
- .dont_use_port_3 = 1,
.input = {{
.type = CX231XX_VMUX_COMPOSITE1,
.vmux = CX231XX_VIN_2_1,
@@ -418,9 +415,9 @@ struct cx231xx_board cx231xx_boards[] = {
.tuner_scl_gpio = -1,
.tuner_sda_gpio = -1,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 2,
- .demod_i2c_master = 1,
- .ir_i2c_master = 2,
+ .tuner_i2c_master = I2C_2,
+ .demod_i2c_master = I2C_1_MUX_3,
+ .ir_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x10,
.norm = V4L2_STD_PAL_M,
@@ -456,9 +453,9 @@ struct cx231xx_board cx231xx_boards[] = {
.tuner_scl_gpio = -1,
.tuner_sda_gpio = -1,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 2,
- .demod_i2c_master = 1,
- .ir_i2c_master = 2,
+ .tuner_i2c_master = I2C_2,
+ .demod_i2c_master = I2C_1_MUX_3,
+ .ir_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x10,
.norm = V4L2_STD_NTSC_M,
@@ -494,9 +491,9 @@ struct cx231xx_board cx231xx_boards[] = {
.tuner_scl_gpio = -1,
.tuner_sda_gpio = -1,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 2,
- .demod_i2c_master = 1,
- .ir_i2c_master = 2,
+ .tuner_i2c_master = I2C_2,
+ .demod_i2c_master = I2C_1_MUX_3,
+ .ir_i2c_master = I2C_2,
.rc_map_name = RC_MAP_PIXELVIEW_002T,
.has_dvb = 1,
.demod_addr = 0x10,
@@ -532,7 +529,6 @@ struct cx231xx_board cx231xx_boards[] = {
.norm = V4L2_STD_NTSC,
.no_alt_vanc = 1,
.external_av = 1,
- .dont_use_port_3 = 1,
.input = {{
.type = CX231XX_VMUX_COMPOSITE1,
@@ -587,7 +583,7 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
+ .tuner_i2c_master = I2C_1_MUX_3,
.norm = V4L2_STD_PAL,
.input = {{
@@ -622,7 +618,7 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
+ .tuner_i2c_master = I2C_1_MUX_3,
.norm = V4L2_STD_NTSC,
.input = {{
@@ -656,7 +652,6 @@ struct cx231xx_board cx231xx_boards[] = {
.norm = V4L2_STD_NTSC,
.no_alt_vanc = 1,
.external_av = 1,
- .dont_use_port_3 = 1,
.input = {{
.type = CX231XX_VMUX_COMPOSITE1,
.vmux = CX231XX_VIN_2_1,
@@ -677,13 +672,12 @@ struct cx231xx_board cx231xx_boards[] = {
.decoder = CX231XX_AVDECODER,
.output_mode = OUT_MODE_VIP11,
.ctl_pin_status_mask = 0xFFFFFFC4,
- .agc_analog_digital_select_gpio = 0x0c,
+ .agc_analog_digital_select_gpio = 0x0c,
/* According with PV CxPlrCAP.inf file */
.gpio_pin_status_mask = 0x4001000,
.norm = V4L2_STD_NTSC,
.no_alt_vanc = 1,
.external_av = 1,
- .dont_use_port_3 = 1,
/*.has_417 = 1, */
/* This board is believed to have a hardware encoding chip
* supporting mpeg1/2/4, but as the 417 is apparently not
@@ -718,8 +712,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x0e,
.norm = V4L2_STD_PAL,
@@ -757,8 +751,8 @@ struct cx231xx_board cx231xx_boards[] = {
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
.gpio_pin_status_mask = 0x4001000,
- .tuner_i2c_master = 1,
- .demod_i2c_master = 2,
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .demod_i2c_master = I2C_2,
.has_dvb = 1,
.demod_addr = 0x0e,
.norm = V4L2_STD_PAL,
@@ -861,9 +855,9 @@ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg)
if (dev->tuner_type == TUNER_XC5000) {
if (command == XC5000_TUNER_RESET) {
- cx231xx_info
- ("Tuner CB: RESET: cmd %d : tuner type %d \n",
- command, dev->tuner_type);
+ dev_dbg(dev->dev,
+ "Tuner CB: RESET: cmd %d : tuner type %d\n",
+ command, dev->tuner_type);
cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit,
1);
msleep(10);
@@ -921,8 +915,8 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
cx231xx_set_model(dev);
- cx231xx_info("Identified as %s (card=%d)\n",
- dev->board.name, dev->model);
+ dev_info(dev->dev, "Identified as %s (card=%d)\n",
+ dev->board.name, dev->model);
/* set the direction for GPIO pins */
if (dev->board.tuner_gpio) {
@@ -980,25 +974,22 @@ static void cx231xx_config_tuner(struct cx231xx *dev)
}
-static int read_eeprom(struct cx231xx *dev, u8 *eedata, int len)
+static int read_eeprom(struct cx231xx *dev, struct i2c_client *client,
+ u8 *eedata, int len)
{
int ret = 0;
- u8 addr = 0xa0 >> 1;
u8 start_offset = 0;
int len_todo = len;
u8 *eedata_cur = eedata;
int i;
- struct i2c_msg msg_write = { .addr = addr, .flags = 0,
+ struct i2c_msg msg_write = { .addr = client->addr, .flags = 0,
.buf = &start_offset, .len = 1 };
- struct i2c_msg msg_read = { .addr = addr, .flags = I2C_M_RD };
-
- /* mutex_lock(&dev->i2c_lock); */
- cx231xx_enable_i2c_port_3(dev, false);
+ struct i2c_msg msg_read = { .addr = client->addr, .flags = I2C_M_RD };
/* start reading at offset 0 */
- ret = i2c_transfer(&dev->i2c_bus[1].i2c_adap, &msg_write, 1);
+ ret = i2c_transfer(client->adapter, &msg_write, 1);
if (ret < 0) {
- cx231xx_err("Can't read eeprom\n");
+ dev_err(dev->dev, "Can't read eeprom\n");
return ret;
}
@@ -1006,20 +997,18 @@ static int read_eeprom(struct cx231xx *dev, u8 *eedata, int len)
msg_read.len = (len_todo > 64) ? 64 : len_todo;
msg_read.buf = eedata_cur;
- ret = i2c_transfer(&dev->i2c_bus[1].i2c_adap, &msg_read, 1);
+ ret = i2c_transfer(client->adapter, &msg_read, 1);
if (ret < 0) {
- cx231xx_err("Can't read eeprom\n");
+ dev_err(dev->dev, "Can't read eeprom\n");
return ret;
}
eedata_cur += msg_read.len;
len_todo -= msg_read.len;
}
- cx231xx_enable_i2c_port_3(dev, true);
- /* mutex_unlock(&dev->i2c_lock); */
-
for (i = 0; i + 15 < len; i += 16)
- cx231xx_info("i2c eeprom %02x: %*ph\n", i, 16, &eedata[i]);
+ dev_dbg(dev->dev, "i2c eeprom %02x: %*ph\n",
+ i, 16, &eedata[i]);
return 0;
}
@@ -1036,22 +1025,26 @@ void cx231xx_card_setup(struct cx231xx *dev)
/* request some modules */
if (dev->board.decoder == CX231XX_AVDECODER) {
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_bus[0].i2c_adap,
+ cx231xx_get_i2c_adap(dev, I2C_0),
"cx25840", 0x88 >> 1, NULL);
if (dev->sd_cx25840 == NULL)
- cx231xx_info("cx25840 subdev registration failure\n");
+ dev_err(dev->dev,
+ "cx25840 subdev registration failure\n");
cx25840_call(dev, core, load_fw);
}
/* Initialize the tuner */
if (dev->board.tuner_type != TUNER_ABSENT) {
+ struct i2c_adapter *tuner_i2c = cx231xx_get_i2c_adap(dev,
+ dev->board.tuner_i2c_master);
dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ tuner_i2c,
"tuner",
dev->tuner_addr, NULL);
if (dev->sd_tuner == NULL)
- cx231xx_info("tuner subdev registration failure\n");
+ dev_err(dev->dev,
+ "tuner subdev registration failure\n");
else
cx231xx_config_tuner(dev);
}
@@ -1062,9 +1055,14 @@ void cx231xx_card_setup(struct cx231xx *dev)
{
struct tveeprom tvee;
static u8 eeprom[256];
+ struct i2c_client client;
+
+ memset(&client, 0, sizeof(client));
+ client.adapter = cx231xx_get_i2c_adap(dev, I2C_1_MUX_1);
+ client.addr = 0xa0 >> 1;
- read_eeprom(dev, eeprom, sizeof(eeprom));
- tveeprom_hauppauge_analog(&dev->i2c_bus[1].i2c_client,
+ read_eeprom(dev, &client, eeprom, sizeof(eeprom));
+ tveeprom_hauppauge_analog(&client,
&tvee, eeprom + 0xc0);
break;
}
@@ -1152,7 +1150,7 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
/* Query cx231xx to find what pcb config it is related to */
retval = initialize_cx231xx(dev);
if (retval < 0) {
- cx231xx_errdev("Failed to read PCB config\n");
+ dev_err(dev->dev, "Failed to read PCB config\n");
return retval;
}
@@ -1168,7 +1166,7 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
retval = cx231xx_config(dev);
if (retval) {
- cx231xx_errdev("error configuring device\n");
+ dev_err(dev->dev, "error configuring device\n");
return -ENOMEM;
}
@@ -1178,8 +1176,9 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
/* register i2c bus */
retval = cx231xx_dev_init(dev);
if (retval) {
- cx231xx_errdev("%s: cx231xx_i2c_register - errCode [%d]!\n",
- __func__, retval);
+ dev_err(dev->dev,
+ "%s: cx231xx_i2c_register - errCode [%d]!\n",
+ __func__, retval);
goto err_dev_init;
}
@@ -1200,8 +1199,8 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
retval = cx231xx_config(dev);
if (retval) {
- cx231xx_errdev("%s: cx231xx_config - errCode [%d]!\n",
- __func__, retval);
+ dev_err(dev->dev, "%s: cx231xx_config - errCode [%d]!\n",
+ __func__, retval);
goto err_dev_init;
}
@@ -1217,11 +1216,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
cx231xx_add_into_devlist(dev);
if (dev->board.has_417) {
- printk(KERN_INFO "attach 417 %d\n", dev->model);
+ dev_info(dev->dev, "attach 417 %d\n", dev->model);
if (cx231xx_417_register(dev) < 0) {
- printk(KERN_ERR
+ dev_err(dev->dev,
"%s() Failed to register 417 on VID_B\n",
- __func__);
+ __func__);
}
}
@@ -1285,7 +1284,8 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
/* compute alternate max packet sizes for video */
idx = dev->current_pcb_config.hs_config_info[0].interface_info.video_index + 1;
if (idx >= dev->max_iad_interface_count) {
- cx231xx_errdev("Video PCB interface #%d doesn't exist\n", idx);
+ dev_err(dev->dev,
+ "Video PCB interface #%d doesn't exist\n", idx);
return -ENODEV;
}
@@ -1294,28 +1294,29 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
dev->video_mode.num_alt = uif->num_altsetting;
- cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n",
- dev->video_mode.end_point_addr,
- dev->video_mode.num_alt);
+ dev_info(dev->dev,
+ "video EndPoint Addr 0x%x, Alternate settings: %i\n",
+ dev->video_mode.end_point_addr,
+ dev->video_mode.num_alt);
dev->video_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->video_mode.num_alt, GFP_KERNEL);
- if (dev->video_mode.alt_max_pkt_size == NULL) {
- cx231xx_errdev("out of memory!\n");
+ if (dev->video_mode.alt_max_pkt_size == NULL)
return -ENOMEM;
- }
for (i = 0; i < dev->video_mode.num_alt; i++) {
u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
- cx231xx_info("Alternate setting %i, max size= %i\n", i,
- dev->video_mode.alt_max_pkt_size[i]);
+ dev_dbg(dev->dev,
+ "Alternate setting %i, max size= %i\n", i,
+ dev->video_mode.alt_max_pkt_size[i]);
}
/* VBI Init */
idx = dev->current_pcb_config.hs_config_info[0].interface_info.vanc_index + 1;
if (idx >= dev->max_iad_interface_count) {
- cx231xx_errdev("VBI PCB interface #%d doesn't exist\n", idx);
+ dev_err(dev->dev,
+ "VBI PCB interface #%d doesn't exist\n", idx);
return -ENODEV;
}
uif = udev->actconfig->interface[idx];
@@ -1325,16 +1326,15 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
bEndpointAddress;
dev->vbi_mode.num_alt = uif->num_altsetting;
- cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n",
- dev->vbi_mode.end_point_addr,
- dev->vbi_mode.num_alt);
+ dev_info(dev->dev,
+ "VBI EndPoint Addr 0x%x, Alternate settings: %i\n",
+ dev->vbi_mode.end_point_addr,
+ dev->vbi_mode.num_alt);
/* compute alternate max packet sizes for vbi */
dev->vbi_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->vbi_mode.num_alt, GFP_KERNEL);
- if (dev->vbi_mode.alt_max_pkt_size == NULL) {
- cx231xx_errdev("out of memory!\n");
+ if (dev->vbi_mode.alt_max_pkt_size == NULL)
return -ENOMEM;
- }
for (i = 0; i < dev->vbi_mode.num_alt; i++) {
u16 tmp =
@@ -1342,8 +1342,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
desc.wMaxPacketSize);
dev->vbi_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
- cx231xx_info("Alternate setting %i, max size= %i\n", i,
- dev->vbi_mode.alt_max_pkt_size[i]);
+ dev_dbg(dev->dev,
+ "Alternate setting %i, max size= %i\n", i,
+ dev->vbi_mode.alt_max_pkt_size[i]);
}
/* Sliced CC VBI init */
@@ -1351,7 +1352,8 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
/* compute alternate max packet sizes for sliced CC */
idx = dev->current_pcb_config.hs_config_info[0].interface_info.hanc_index + 1;
if (idx >= dev->max_iad_interface_count) {
- cx231xx_errdev("Sliced CC PCB interface #%d doesn't exist\n", idx);
+ dev_err(dev->dev,
+ "Sliced CC PCB interface #%d doesn't exist\n", idx);
return -ENODEV;
}
uif = udev->actconfig->interface[idx];
@@ -1361,23 +1363,22 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
bEndpointAddress;
dev->sliced_cc_mode.num_alt = uif->num_altsetting;
- cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n",
- dev->sliced_cc_mode.end_point_addr,
- dev->sliced_cc_mode.num_alt);
+ dev_info(dev->dev,
+ "sliced CC EndPoint Addr 0x%x, Alternate settings: %i\n",
+ dev->sliced_cc_mode.end_point_addr,
+ dev->sliced_cc_mode.num_alt);
dev->sliced_cc_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->sliced_cc_mode.num_alt, GFP_KERNEL);
-
- if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) {
- cx231xx_errdev("out of memory!\n");
+ if (dev->sliced_cc_mode.alt_max_pkt_size == NULL)
return -ENOMEM;
- }
for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
desc.wMaxPacketSize);
dev->sliced_cc_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
- cx231xx_info("Alternate setting %i, max size= %i\n", i,
- dev->sliced_cc_mode.alt_max_pkt_size[i]);
+ dev_dbg(dev->dev,
+ "Alternate setting %i, max size= %i\n", i,
+ dev->sliced_cc_mode.alt_max_pkt_size[i]);
}
return 0;
@@ -1391,6 +1392,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev;
+ struct device *d = &interface->dev;
struct usb_interface *uif;
struct cx231xx *dev = NULL;
int retval = -ENODEV;
@@ -1401,6 +1403,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
struct usb_interface_assoc_descriptor *assoc_desc;
ifnum = interface->altsetting[0].desc.bInterfaceNumber;
+ udev = usb_get_dev(interface_to_usbdev(interface));
/*
* Interface number 0 - IR interface (handled by mceusb driver)
@@ -1414,18 +1417,16 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
nr = find_first_zero_bit(&cx231xx_devused, CX231XX_MAXBOARDS);
if (nr >= CX231XX_MAXBOARDS) {
/* No free device slots */
- cx231xx_err(DRIVER_NAME ": Supports only %i devices.\n",
- CX231XX_MAXBOARDS);
+ dev_err(d,
+ "Supports only %i devices.\n",
+ CX231XX_MAXBOARDS);
return -ENOMEM;
}
} while (test_and_set_bit(nr, &cx231xx_devused));
- udev = usb_get_dev(interface_to_usbdev(interface));
-
/* allocate memory for our device state and initialize it */
dev = devm_kzalloc(&udev->dev, sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
- cx231xx_err(DRIVER_NAME ": out of memory!\n");
clear_bit(nr, &cx231xx_devused);
return -ENOMEM;
}
@@ -1434,6 +1435,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
dev->devno = nr;
dev->model = id->driver_info;
dev->video_mode.alt = -1;
+ dev->dev = d;
dev->interface_count++;
/* reset gpio dir and value */
@@ -1472,14 +1474,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
speed = "unknown";
}
- cx231xx_info("New device %s %s @ %s Mbps "
- "(%04x:%04x) with %d interfaces\n",
- udev->manufacturer ? udev->manufacturer : "",
- udev->product ? udev->product : "",
- speed,
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct),
- dev->max_iad_interface_count);
+ dev_info(d,
+ "New device %s %s @ %s Mbps (%04x:%04x) with %d interfaces\n",
+ udev->manufacturer ? udev->manufacturer : "",
+ udev->product ? udev->product : "",
+ speed,
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct),
+ dev->max_iad_interface_count);
/* increment interface count */
dev->interface_count++;
@@ -1489,13 +1491,12 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
assoc_desc = udev->actconfig->intf_assoc[0];
if (assoc_desc->bFirstInterface != ifnum) {
- cx231xx_err(DRIVER_NAME ": Not found "
- "matching IAD interface\n");
+ dev_err(d, "Not found matching IAD interface\n");
retval = -ENODEV;
goto err_if;
}
- cx231xx_info("registering interface %d\n", ifnum);
+ dev_dbg(d, "registering interface %d\n", ifnum);
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
@@ -1503,7 +1504,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* Create v4l2 device */
retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
if (retval) {
- cx231xx_errdev("v4l2_device_register failed\n");
+ dev_err(d, "v4l2_device_register failed\n");
goto err_v4l2;
}
@@ -1520,7 +1521,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* compute alternate max packet sizes for TS1 */
idx = dev->current_pcb_config.hs_config_info[0].interface_info.ts1_index + 1;
if (idx >= dev->max_iad_interface_count) {
- cx231xx_errdev("TS1 PCB interface #%d doesn't exist\n", idx);
+ dev_err(d, "TS1 PCB interface #%d doesn't exist\n",
+ idx);
retval = -ENODEV;
goto err_video_alt;
}
@@ -1531,13 +1533,13 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
desc.bEndpointAddress;
dev->ts1_mode.num_alt = uif->num_altsetting;
- cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n",
- dev->ts1_mode.end_point_addr,
- dev->ts1_mode.num_alt);
+ dev_info(d,
+ "TS EndPoint Addr 0x%x, Alternate settings: %i\n",
+ dev->ts1_mode.end_point_addr,
+ dev->ts1_mode.num_alt);
dev->ts1_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->ts1_mode.num_alt, GFP_KERNEL);
if (dev->ts1_mode.alt_max_pkt_size == NULL) {
- cx231xx_errdev("out of memory!\n");
retval = -ENOMEM;
goto err_video_alt;
}
@@ -1548,8 +1550,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
wMaxPacketSize);
dev->ts1_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
- cx231xx_info("Alternate setting %i, max size= %i\n", i,
- dev->ts1_mode.alt_max_pkt_size[i]);
+ dev_dbg(d, "Alternate setting %i, max size= %i\n",
+ i, dev->ts1_mode.alt_max_pkt_size[i]);
}
}
@@ -1613,10 +1615,9 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
wake_up_interruptible_all(&dev->open);
if (dev->users) {
- cx231xx_warn
- ("device %s is open! Deregistration and memory "
- "deallocation are deferred on close.\n",
- video_device_node_name(dev->vdev));
+ dev_warn(dev->dev,
+ "device %s is open! Deregistration and memory deallocation are deferred on close.\n",
+ video_device_node_name(dev->vdev));
/* Even having users, it is safe to remove the RC i2c driver */
cx231xx_ir_exit(dev);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 180103e48036..4a3f28c4e8d3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -20,16 +20,15 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <media/v4l2-common.h>
#include <media/tuner.h>
-#include "cx231xx.h"
#include "cx231xx-reg.h"
/* #define ENABLE_DEBUG_ISOC_FRAMES */
@@ -99,10 +98,10 @@ int cx231xx_register_extension(struct cx231xx_ops *ops)
mutex_lock(&cx231xx_devlist_mutex);
list_add_tail(&ops->next, &cx231xx_extension_devlist);
- list_for_each_entry(dev, &cx231xx_devlist, devlist)
+ list_for_each_entry(dev, &cx231xx_devlist, devlist) {
ops->init(dev);
-
- printk(KERN_INFO DRIVER_NAME ": %s initialized\n", ops->name);
+ dev_info(dev->dev, "%s initialized\n", ops->name);
+ }
mutex_unlock(&cx231xx_devlist_mutex);
return 0;
}
@@ -113,11 +112,11 @@ void cx231xx_unregister_extension(struct cx231xx_ops *ops)
struct cx231xx *dev = NULL;
mutex_lock(&cx231xx_devlist_mutex);
- list_for_each_entry(dev, &cx231xx_devlist, devlist)
+ list_for_each_entry(dev, &cx231xx_devlist, devlist) {
ops->fini(dev);
+ dev_info(dev->dev, "%s removed\n", ops->name);
+ }
-
- printk(KERN_INFO DRIVER_NAME ": %s removed\n", ops->name);
list_del(&ops->next);
mutex_unlock(&cx231xx_devlist_mutex);
}
@@ -227,10 +226,9 @@ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus,
/* call common vendor command request */
status = cx231xx_send_vendor_cmd(dev, &ven_req);
- if (status < 0) {
- cx231xx_info
- ("UsbInterface::sendCommand, failed with status -%d\n",
- status);
+ if (status < 0 && !dev->i2c_scan_running) {
+ dev_err(dev->dev, "%s: failed with status -%d\n",
+ __func__, status);
}
return status;
@@ -524,9 +522,9 @@ int cx231xx_set_video_alternate(struct cx231xx *dev)
usb_set_interface(dev->udev, usb_interface_index,
dev->video_mode.alt);
if (errCode < 0) {
- cx231xx_errdev
- ("cannot change alt number to %d (error=%i)\n",
- dev->video_mode.alt, errCode);
+ dev_err(dev->dev,
+ "cannot change alt number to %d (error=%i)\n",
+ dev->video_mode.alt, errCode);
return errCode;
}
}
@@ -600,9 +598,9 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
}
if (alt > 0 && max_pkt_size == 0) {
- cx231xx_errdev
- ("can't change interface %d alt no. to %d: Max. Pkt size = 0\n",
- usb_interface_index, alt);
+ dev_err(dev->dev,
+ "can't change interface %d alt no. to %d: Max. Pkt size = 0\n",
+ usb_interface_index, alt);
/*To workaround error number=-71 on EP0 for videograbber,
need add following codes.*/
if (dev->board.no_alt_vanc)
@@ -616,9 +614,9 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
if (usb_interface_index > 0) {
status = usb_set_interface(dev->udev, usb_interface_index, alt);
if (status < 0) {
- cx231xx_errdev
- ("can't change interface %d alt no. to %d (err=%i)\n",
- usb_interface_index, alt, status);
+ dev_err(dev->dev,
+ "can't change interface %d alt no. to %d (err=%i)\n",
+ usb_interface_index, alt, status);
return status;
}
}
@@ -767,18 +765,17 @@ int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
u32 *buffer;
buffer = kzalloc(4096, GFP_KERNEL);
- if (buffer == NULL) {
- cx231xx_info("out of mem\n");
+ if (buffer == NULL)
return -ENOMEM;
- }
memcpy(&buffer[0], firmware, 4096);
ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5),
buffer, 4096, &actlen, 2000);
if (ret)
- cx231xx_info("bulk message failed: %d (%d/%d)", ret,
- size, actlen);
+ dev_err(dev->dev,
+ "bulk message failed: %d (%d/%d)", ret,
+ size, actlen);
else {
errCode = actlen != size ? -1 : 0;
}
@@ -987,12 +984,8 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
cx231xx_uninit_isoc(dev);
dma_q->p_left_data = kzalloc(4096, GFP_KERNEL);
- if (dma_q->p_left_data == NULL) {
- cx231xx_info("out of mem\n");
+ if (dma_q->p_left_data == NULL)
return -ENOMEM;
- }
-
-
dev->video_mode.isoc_ctl.isoc_copy = isoc_copy;
dev->video_mode.isoc_ctl.num_bufs = num_bufs;
@@ -1018,14 +1011,16 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
dev->video_mode.isoc_ctl.urb =
kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
if (!dev->video_mode.isoc_ctl.urb) {
- cx231xx_errdev("cannot alloc memory for usb buffers\n");
+ dev_err(dev->dev,
+ "cannot alloc memory for usb buffers\n");
return -ENOMEM;
}
dev->video_mode.isoc_ctl.transfer_buffer =
kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
if (!dev->video_mode.isoc_ctl.transfer_buffer) {
- cx231xx_errdev("cannot allocate memory for usbtransfer\n");
+ dev_err(dev->dev,
+ "cannot allocate memory for usbtransfer\n");
kfree(dev->video_mode.isoc_ctl.urb);
return -ENOMEM;
}
@@ -1045,7 +1040,8 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
if (!urb) {
- cx231xx_err("cannot alloc isoc_ctl.urb %i\n", i);
+ dev_err(dev->dev,
+ "cannot alloc isoc_ctl.urb %i\n", i);
cx231xx_uninit_isoc(dev);
return -ENOMEM;
}
@@ -1055,10 +1051,10 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
&urb->transfer_dma);
if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) {
- cx231xx_err("unable to allocate %i bytes for transfer"
- " buffer %i%s\n",
- sb_size, i,
- in_interrupt() ? " while in int" : "");
+ dev_err(dev->dev,
+ "unable to allocate %i bytes for transfer buffer %i%s\n",
+ sb_size, i,
+ in_interrupt() ? " while in int" : "");
cx231xx_uninit_isoc(dev);
return -ENOMEM;
}
@@ -1090,8 +1086,9 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
rc = usb_submit_urb(dev->video_mode.isoc_ctl.urb[i],
GFP_ATOMIC);
if (rc) {
- cx231xx_err("submit of urb %i failed (error=%i)\n", i,
- rc);
+ dev_err(dev->dev,
+ "submit of urb %i failed (error=%i)\n", i,
+ rc);
cx231xx_uninit_isoc(dev);
return rc;
}
@@ -1151,14 +1148,16 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
dev->video_mode.bulk_ctl.urb =
kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
if (!dev->video_mode.bulk_ctl.urb) {
- cx231xx_errdev("cannot alloc memory for usb buffers\n");
+ dev_err(dev->dev,
+ "cannot alloc memory for usb buffers\n");
return -ENOMEM;
}
dev->video_mode.bulk_ctl.transfer_buffer =
kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
if (!dev->video_mode.bulk_ctl.transfer_buffer) {
- cx231xx_errdev("cannot allocate memory for usbtransfer\n");
+ dev_err(dev->dev,
+ "cannot allocate memory for usbtransfer\n");
kfree(dev->video_mode.bulk_ctl.urb);
return -ENOMEM;
}
@@ -1178,7 +1177,8 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- cx231xx_err("cannot alloc bulk_ctl.urb %i\n", i);
+ dev_err(dev->dev,
+ "cannot alloc bulk_ctl.urb %i\n", i);
cx231xx_uninit_bulk(dev);
return -ENOMEM;
}
@@ -1189,10 +1189,10 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
&urb->transfer_dma);
if (!dev->video_mode.bulk_ctl.transfer_buffer[i]) {
- cx231xx_err("unable to allocate %i bytes for transfer"
- " buffer %i%s\n",
- sb_size, i,
- in_interrupt() ? " while in int" : "");
+ dev_err(dev->dev,
+ "unable to allocate %i bytes for transfer buffer %i%s\n",
+ sb_size, i,
+ in_interrupt() ? " while in int" : "");
cx231xx_uninit_bulk(dev);
return -ENOMEM;
}
@@ -1212,8 +1212,8 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
rc = usb_submit_urb(dev->video_mode.bulk_ctl.urb[i],
GFP_ATOMIC);
if (rc) {
- cx231xx_err("submit of urb %i failed (error=%i)\n", i,
- rc);
+ dev_err(dev->dev,
+ "submit of urb %i failed (error=%i)\n", i, rc);
cx231xx_uninit_bulk(dev);
return rc;
}
@@ -1300,6 +1300,15 @@ int cx231xx_dev_init(struct cx231xx *dev)
cx231xx_i2c_register(&dev->i2c_bus[1]);
cx231xx_i2c_register(&dev->i2c_bus[2]);
+ cx231xx_i2c_mux_register(dev, 0);
+ cx231xx_i2c_mux_register(dev, 1);
+
+ /* scan the real bus segments in the order of physical port numbers */
+ cx231xx_do_i2c_scan(dev, I2C_0);
+ cx231xx_do_i2c_scan(dev, I2C_1_MUX_1);
+ cx231xx_do_i2c_scan(dev, I2C_2);
+ cx231xx_do_i2c_scan(dev, I2C_1_MUX_3);
+
/* init hardware */
/* Note : with out calling set power mode function,
afe can not be set up correctly */
@@ -1307,18 +1316,18 @@ int cx231xx_dev_init(struct cx231xx *dev)
errCode = cx231xx_set_power_mode(dev,
POLARIS_AVMODE_ENXTERNAL_AV);
if (errCode < 0) {
- cx231xx_errdev
- ("%s: Failed to set Power - errCode [%d]!\n",
- __func__, errCode);
+ dev_err(dev->dev,
+ "%s: Failed to set Power - errCode [%d]!\n",
+ __func__, errCode);
return errCode;
}
} else {
errCode = cx231xx_set_power_mode(dev,
POLARIS_AVMODE_ANALOGT_TV);
if (errCode < 0) {
- cx231xx_errdev
- ("%s: Failed to set Power - errCode [%d]!\n",
- __func__, errCode);
+ dev_err(dev->dev,
+ "%s: Failed to set Power - errCode [%d]!\n",
+ __func__, errCode);
return errCode;
}
}
@@ -1331,42 +1340,43 @@ int cx231xx_dev_init(struct cx231xx *dev)
/* initialize Colibri block */
errCode = cx231xx_afe_init_super_block(dev, 0x23c);
if (errCode < 0) {
- cx231xx_errdev
- ("%s: cx231xx_afe init super block - errCode [%d]!\n",
- __func__, errCode);
+ dev_err(dev->dev,
+ "%s: cx231xx_afe init super block - errCode [%d]!\n",
+ __func__, errCode);
return errCode;
}
errCode = cx231xx_afe_init_channels(dev);
if (errCode < 0) {
- cx231xx_errdev
- ("%s: cx231xx_afe init channels - errCode [%d]!\n",
- __func__, errCode);
+ dev_err(dev->dev,
+ "%s: cx231xx_afe init channels - errCode [%d]!\n",
+ __func__, errCode);
return errCode;
}
/* Set DIF in By pass mode */
errCode = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND);
if (errCode < 0) {
- cx231xx_errdev
- ("%s: cx231xx_dif set to By pass mode - errCode [%d]!\n",
- __func__, errCode);
+ dev_err(dev->dev,
+ "%s: cx231xx_dif set to By pass mode - errCode [%d]!\n",
+ __func__, errCode);
return errCode;
}
/* I2S block related functions */
errCode = cx231xx_i2s_blk_initialize(dev);
if (errCode < 0) {
- cx231xx_errdev
- ("%s: cx231xx_i2s block initialize - errCode [%d]!\n",
- __func__, errCode);
+ dev_err(dev->dev,
+ "%s: cx231xx_i2s block initialize - errCode [%d]!\n",
+ __func__, errCode);
return errCode;
}
/* init control pins */
errCode = cx231xx_init_ctrl_pin_status(dev);
if (errCode < 0) {
- cx231xx_errdev("%s: cx231xx_init ctrl pins - errCode [%d]!\n",
- __func__, errCode);
+ dev_err(dev->dev,
+ "%s: cx231xx_init ctrl pins - errCode [%d]!\n",
+ __func__, errCode);
return errCode;
}
@@ -1391,9 +1401,9 @@ int cx231xx_dev_init(struct cx231xx *dev)
break;
}
if (errCode < 0) {
- cx231xx_errdev
- ("%s: cx231xx_AGC mode to Analog - errCode [%d]!\n",
- __func__, errCode);
+ dev_err(dev->dev,
+ "%s: cx231xx_AGC mode to Analog - errCode [%d]!\n",
+ __func__, errCode);
return errCode;
}
@@ -1404,9 +1414,7 @@ int cx231xx_dev_init(struct cx231xx *dev)
if (dev->board.has_dvb)
cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- /* set the I2C master port to 3 on channel 1 */
- errCode = cx231xx_enable_i2c_port_3(dev, true);
-
+ errCode = 0;
return errCode;
}
EXPORT_SYMBOL_GPL(cx231xx_dev_init);
@@ -1414,6 +1422,8 @@ EXPORT_SYMBOL_GPL(cx231xx_dev_init);
void cx231xx_dev_uninit(struct cx231xx *dev)
{
/* Un Initialize I2C bus */
+ cx231xx_i2c_mux_unregister(dev, 1);
+ cx231xx_i2c_mux_unregister(dev, 0);
cx231xx_i2c_unregister(&dev->i2c_bus[2]);
cx231xx_i2c_unregister(&dev->i2c_bus[1]);
cx231xx_i2c_unregister(&dev->i2c_bus[0]);
@@ -1468,9 +1478,8 @@ int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val,
/* call common vendor command request */
status = cx231xx_send_vendor_cmd(dev, &ven_req);
if (status < 0) {
- cx231xx_info
- ("UsbInterface::sendCommand, failed with status -%d\n",
- status);
+ dev_err(dev->dev, "%s: failed with status -%d\n",
+ __func__, status);
}
return status;
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 6c7b5e250eed..dd600b994e69 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -19,11 +19,10 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/usb.h>
-#include "cx231xx.h"
#include <media/v4l2-common.h>
#include <media/videobuf-vmalloc.h>
@@ -46,11 +45,6 @@ MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define dprintk(level, fmt, arg...) do { \
-if (debug >= level) \
- printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \
-} while (0)
-
#define CX231XX_DVB_NUM_BUFS 5
#define CX231XX_DVB_MAX_PACKETSIZE 564
#define CX231XX_DVB_MAX_PACKETS 64
@@ -197,9 +191,11 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
break;
}
if (packet < 0) {
- dprintk(1, "URB status %d [%s].\n", status, errmsg);
+ dev_dbg(dev->dev,
+ "URB status %d [%s].\n", status, errmsg);
} else {
- dprintk(1, "URB packet %d, status %d [%s].\n",
+ dev_dbg(dev->dev,
+ "URB packet %d, status %d [%s].\n",
packet, status, errmsg);
}
}
@@ -265,12 +261,8 @@ static int start_streaming(struct cx231xx_dvb *dvb)
struct cx231xx *dev = dvb->adapter.priv;
if (dev->USE_ISO) {
- cx231xx_info("DVB transfer mode is ISO.\n");
- mutex_lock(&dev->i2c_lock);
- cx231xx_enable_i2c_port_3(dev, false);
+ dev_dbg(dev->dev, "DVB transfer mode is ISO.\n");
cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
- cx231xx_enable_i2c_port_3(dev, true);
- mutex_unlock(&dev->i2c_lock);
rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
if (rc < 0)
return rc;
@@ -280,7 +272,7 @@ static int start_streaming(struct cx231xx_dvb *dvb)
dev->ts1_mode.max_pkt_size,
dvb_isoc_copy);
} else {
- cx231xx_info("DVB transfer mode is BULK.\n");
+ dev_dbg(dev->dev, "DVB transfer mode is BULK.\n");
cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
if (rc < 0)
@@ -378,24 +370,24 @@ static int attach_xc5000(u8 addr, struct cx231xx *dev)
struct xc5000_config cfg;
memset(&cfg, 0, sizeof(cfg));
- cfg.i2c_adap = &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap;
+ cfg.i2c_adap = cx231xx_get_i2c_adap(dev, dev->board.tuner_i2c_master);
cfg.i2c_addr = addr;
if (!dev->dvb->frontend) {
- printk(KERN_ERR "%s/2: dvb frontend not attached. "
+ dev_err(dev->dev, "%s/2: dvb frontend not attached. "
"Can't attach xc5000\n", dev->name);
return -EINVAL;
}
fe = dvb_attach(xc5000_attach, dev->dvb->frontend, &cfg);
if (!fe) {
- printk(KERN_ERR "%s/2: xc5000 attach failed\n", dev->name);
+ dev_err(dev->dev, "%s/2: xc5000 attach failed\n", dev->name);
dvb_frontend_detach(dev->dvb->frontend);
dev->dvb->frontend = NULL;
return -EINVAL;
}
- printk(KERN_INFO "%s/2: xc5000 attached\n", dev->name);
+ dev_info(dev->dev, "%s/2: xc5000 attached\n", dev->name);
return 0;
}
@@ -434,16 +426,17 @@ int cx231xx_reset_analog_tuner(struct cx231xx *dev)
if (dops->init != NULL && !dev->xc_fw_load_done) {
- cx231xx_info("Reloading firmware for XC5000\n");
+ dev_dbg(dev->dev,
+ "Reloading firmware for XC5000\n");
status = dops->init(dev->dvb->frontend);
if (status == 0) {
dev->xc_fw_load_done = 1;
- cx231xx_info
- ("XC5000 firmware download completed\n");
+ dev_dbg(dev->dev,
+ "XC5000 firmware download completed\n");
} else {
dev->xc_fw_load_done = 0;
- cx231xx_info
- ("XC5000 firmware download failed !!!\n");
+ dev_dbg(dev->dev,
+ "XC5000 firmware download failed !!!\n");
}
}
@@ -466,7 +459,7 @@ static int register_dvb(struct cx231xx_dvb *dvb,
result = dvb_register_adapter(&dvb->adapter, dev->name, module, device,
adapter_nr);
if (result < 0) {
- printk(KERN_WARNING
+ dev_warn(dev->dev,
"%s: dvb_register_adapter failed (errno = %d)\n",
dev->name, result);
goto fail_adapter;
@@ -480,7 +473,7 @@ static int register_dvb(struct cx231xx_dvb *dvb,
/* register frontend */
result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
if (result < 0) {
- printk(KERN_WARNING
+ dev_warn(dev->dev,
"%s: dvb_register_frontend failed (errno = %d)\n",
dev->name, result);
goto fail_frontend;
@@ -498,7 +491,8 @@ static int register_dvb(struct cx231xx_dvb *dvb,
result = dvb_dmx_init(&dvb->demux);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
+ dev_warn(dev->dev,
+ "%s: dvb_dmx_init failed (errno = %d)\n",
dev->name, result);
goto fail_dmx;
}
@@ -508,15 +502,16 @@ static int register_dvb(struct cx231xx_dvb *dvb,
dvb->dmxdev.capabilities = 0;
result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
if (result < 0) {
- printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
- dev->name, result);
+ dev_warn(dev->dev,
+ "%s: dvb_dmxdev_init failed (errno = %d)\n",
+ dev->name, result);
goto fail_dmxdev;
}
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_WARNING
+ dev_warn(dev->dev,
"%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
dev->name, result);
goto fail_fe_hw;
@@ -525,17 +520,17 @@ static int register_dvb(struct cx231xx_dvb *dvb,
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
- printk(KERN_WARNING
- "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
- dev->name, result);
+ dev_warn(dev->dev,
+ "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ dev->name, result);
goto fail_fe_mem;
}
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_WARNING
- "%s: connect_frontend failed (errno = %d)\n", dev->name,
- result);
+ dev_warn(dev->dev,
+ "%s: connect_frontend failed (errno = %d)\n",
+ dev->name, result);
goto fail_fe_conn;
}
@@ -583,6 +578,8 @@ static int dvb_init(struct cx231xx *dev)
{
int result = 0;
struct cx231xx_dvb *dvb;
+ struct i2c_adapter *tuner_i2c;
+ struct i2c_adapter *demod_i2c;
if (!dev->board.has_dvb) {
/* This device does not support the extension */
@@ -592,13 +589,16 @@ static int dvb_init(struct cx231xx *dev)
dvb = kzalloc(sizeof(struct cx231xx_dvb), GFP_KERNEL);
if (dvb == NULL) {
- printk(KERN_INFO "cx231xx_dvb: memory allocation failed\n");
+ dev_info(dev->dev,
+ "cx231xx_dvb: memory allocation failed\n");
return -ENOMEM;
}
dev->dvb = dvb;
dev->cx231xx_set_analog_freq = cx231xx_set_analog_freq;
dev->cx231xx_reset_analog_tuner = cx231xx_reset_analog_tuner;
+ tuner_i2c = cx231xx_get_i2c_adap(dev, dev->board.tuner_i2c_master);
+ demod_i2c = cx231xx_get_i2c_adap(dev, dev->board.demod_i2c_master);
mutex_lock(&dev->lock);
cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
cx231xx_demod_reset(dev);
@@ -609,11 +609,11 @@ static int dvb_init(struct cx231xx *dev)
dev->dvb->frontend = dvb_attach(s5h1432_attach,
&dvico_s5h1432_config,
- &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+ demod_i2c);
if (dev->dvb->frontend == NULL) {
- printk(DRIVER_NAME
- ": Failed to attach s5h1432 front end\n");
+ dev_err(dev->dev,
+ "Failed to attach s5h1432 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -622,7 +622,7 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
- &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ tuner_i2c,
&cnxt_rde250_tunerconfig)) {
result = -EINVAL;
goto out_free;
@@ -634,11 +634,11 @@ static int dvb_init(struct cx231xx *dev)
dev->dvb->frontend = dvb_attach(s5h1411_attach,
&xc5000_s5h1411_config,
- &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+ demod_i2c);
if (dev->dvb->frontend == NULL) {
- printk(DRIVER_NAME
- ": Failed to attach s5h1411 front end\n");
+ dev_err(dev->dev,
+ "Failed to attach s5h1411 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -647,7 +647,7 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
- &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ tuner_i2c,
&cnxt_rdu250_tunerconfig)) {
result = -EINVAL;
goto out_free;
@@ -657,11 +657,11 @@ static int dvb_init(struct cx231xx *dev)
dev->dvb->frontend = dvb_attach(s5h1432_attach,
&dvico_s5h1432_config,
- &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+ demod_i2c);
if (dev->dvb->frontend == NULL) {
- printk(DRIVER_NAME
- ": Failed to attach s5h1432 front end\n");
+ dev_err(dev->dev,
+ "Failed to attach s5h1432 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -670,7 +670,7 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
- 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ 0x60, tuner_i2c,
&cnxt_rde253s_tunerconfig)) {
result = -EINVAL;
goto out_free;
@@ -681,11 +681,11 @@ static int dvb_init(struct cx231xx *dev)
dev->dvb->frontend = dvb_attach(s5h1411_attach,
&tda18271_s5h1411_config,
- &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+ demod_i2c);
if (dev->dvb->frontend == NULL) {
- printk(DRIVER_NAME
- ": Failed to attach s5h1411 front end\n");
+ dev_err(dev->dev,
+ "Failed to attach s5h1411 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -694,7 +694,7 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
- 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ 0x60, tuner_i2c,
&cnxt_rde253s_tunerconfig)) {
result = -EINVAL;
goto out_free;
@@ -702,16 +702,17 @@ static int dvb_init(struct cx231xx *dev)
break;
case CX231XX_BOARD_HAUPPAUGE_EXETER:
- printk(KERN_INFO "%s: looking for tuner / demod on i2c bus: %d\n",
- __func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap));
+ dev_info(dev->dev,
+ "%s: looking for tuner / demod on i2c bus: %d\n",
+ __func__, i2c_adapter_id(tuner_i2c));
dev->dvb->frontend = dvb_attach(lgdt3305_attach,
&hcw_lgdt3305_config,
- &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap);
+ tuner_i2c);
if (dev->dvb->frontend == NULL) {
- printk(DRIVER_NAME
- ": Failed to attach LG3305 front end\n");
+ dev_err(dev->dev,
+ "Failed to attach LG3305 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -720,7 +721,7 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
dvb_attach(tda18271_attach, dev->dvb->frontend,
- 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ 0x60, tuner_i2c,
&hcw_tda18271_config);
break;
@@ -728,12 +729,12 @@ static int dvb_init(struct cx231xx *dev)
dev->dvb->frontend = dvb_attach(si2165_attach,
&hauppauge_930C_HD_1113xx_si2165_config,
- &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap
+ tuner_i2c
);
if (dev->dvb->frontend == NULL) {
- printk(DRIVER_NAME
- ": Failed to attach SI2165 front end\n");
+ dev_err(dev->dev,
+ "Failed to attach SI2165 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -745,7 +746,7 @@ static int dvb_init(struct cx231xx *dev)
dvb_attach(tda18271_attach, dev->dvb->frontend,
0x60,
- &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ tuner_i2c,
&hcw_tda18271_config);
dev->cx231xx_reset_analog_tuner = NULL;
@@ -761,12 +762,12 @@ static int dvb_init(struct cx231xx *dev)
dev->dvb->frontend = dvb_attach(si2165_attach,
&pctv_quatro_stick_1114xx_si2165_config,
- &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap
+ tuner_i2c
);
if (dev->dvb->frontend == NULL) {
- printk(DRIVER_NAME
- ": Failed to attach SI2165 front end\n");
+ dev_err(dev->dev,
+ "Failed to attach SI2165 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -786,7 +787,7 @@ static int dvb_init(struct cx231xx *dev)
request_module("si2157");
client = i2c_new_device(
- &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ tuner_i2c,
&info);
if (client == NULL || client->dev.driver == NULL) {
dvb_frontend_detach(dev->dvb->frontend);
@@ -810,16 +811,17 @@ static int dvb_init(struct cx231xx *dev)
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
case CX231XX_BOARD_KWORLD_UB430_USB_HYBRID:
- printk(KERN_INFO "%s: looking for demod on i2c bus: %d\n",
- __func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap));
+ dev_info(dev->dev,
+ "%s: looking for demod on i2c bus: %d\n",
+ __func__, i2c_adapter_id(tuner_i2c));
dev->dvb->frontend = dvb_attach(mb86a20s_attach,
&pv_mb86a20s_config,
- &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+ demod_i2c);
if (dev->dvb->frontend == NULL) {
- printk(DRIVER_NAME
- ": Failed to attach mb86a20s demod\n");
+ dev_err(dev->dev,
+ "Failed to attach mb86a20s demod\n");
result = -EINVAL;
goto out_free;
}
@@ -828,30 +830,31 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
dvb_attach(tda18271_attach, dev->dvb->frontend,
- 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ 0x60, tuner_i2c,
&pv_tda18271_config);
break;
default:
- printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card"
- " isn't supported yet\n", dev->name);
+ dev_err(dev->dev,
+ "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
+ dev->name);
break;
}
if (NULL == dvb->frontend) {
- printk(KERN_ERR
+ dev_err(dev->dev,
"%s/2: frontend initialization failed\n", dev->name);
result = -EINVAL;
goto out_free;
}
/* register everything */
- result = register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
+ result = register_dvb(dvb, THIS_MODULE, dev, dev->dev);
if (result < 0)
goto out_free;
- printk(KERN_INFO "Successfully loaded cx231xx-dvb\n");
+ dev_info(dev->dev, "Successfully loaded cx231xx-dvb\n");
ret:
cx231xx_set_mode(dev, CX231XX_SUSPEND);
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 7c0f797f1057..a29c345b027d 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -20,14 +20,14 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/usb.h>
#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
#include <media/v4l2-common.h>
#include <media/tuner.h>
-#include "cx231xx.h"
/* ----------------------------------------------------------- */
@@ -54,10 +54,19 @@ do { \
} \
} while (0)
+static inline int get_real_i2c_port(struct cx231xx *dev, int bus_nr)
+{
+ if (bus_nr == 1)
+ return dev->port_3_switch_enabled ? I2C_1_MUX_3 : I2C_1_MUX_1;
+ return bus_nr;
+}
+
static inline bool is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus,
const struct i2c_msg *msg, int tuner_type)
{
- if (bus->nr != dev->board.tuner_i2c_master)
+ int i2c_port = get_real_i2c_port(dev, bus->nr);
+
+ if (i2c_port != dev->board.tuner_i2c_master)
return false;
if (msg->addr != dev->board.tuner_addr)
@@ -340,14 +349,15 @@ static int cx231xx_i2c_check_for_device(struct i2c_adapter *i2c_adap,
struct cx231xx *dev = bus->dev;
struct cx231xx_i2c_xfer_data req_data;
int status = 0;
+ u8 buf[1];
/* prepare xfer_data struct */
req_data.dev_addr = msg->addr;
- req_data.direction = msg->flags;
+ req_data.direction = I2C_M_RD;
req_data.saddr_len = 0;
req_data.saddr_dat = 0;
- req_data.buf_size = 0;
- req_data.p_buffer = NULL;
+ req_data.buf_size = 1;
+ req_data.p_buffer = buf;
/* usb send command */
status = dev->cx231xx_send_usb_command(bus, &req_data);
@@ -455,17 +465,14 @@ static struct i2c_adapter cx231xx_adap_template = {
.algo = &cx231xx_algo,
};
-static struct i2c_client cx231xx_client_template = {
- .name = "cx231xx internal",
-};
-
/* ----------------------------------------------------------- */
/*
* i2c_devs
* incomplete list of known devices
*/
-static char *i2c_devs[128] = {
+static const char *i2c_devs[128] = {
+ [0x20 >> 1] = "demod",
[0x60 >> 1] = "colibri",
[0x88 >> 1] = "hammerhead",
[0x8e >> 1] = "CIR",
@@ -480,22 +487,34 @@ static char *i2c_devs[128] = {
* cx231xx_do_i2c_scan()
* check i2c address range for devices
*/
-void cx231xx_do_i2c_scan(struct cx231xx *dev, struct i2c_client *c)
+void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port)
{
unsigned char buf;
int i, rc;
+ struct i2c_client client;
+
+ if (!i2c_scan)
+ return;
+
+ /* Don't generate I2C errors during scan */
+ dev->i2c_scan_running = true;
+
+ memset(&client, 0, sizeof(client));
+ client.adapter = cx231xx_get_i2c_adap(dev, i2c_port);
- cx231xx_info(": Checking for I2C devices ..\n");
for (i = 0; i < 128; i++) {
- c->addr = i;
- rc = i2c_master_recv(c, &buf, 0);
+ client.addr = i;
+ rc = i2c_master_recv(&client, &buf, 0);
if (rc < 0)
continue;
- cx231xx_info("%s: i2c scan: found device @ 0x%x [%s]\n",
- dev->name, i << 1,
- i2c_devs[i] ? i2c_devs[i] : "???");
+ dev_info(dev->dev,
+ "i2c scan: found device @ port %d addr 0x%x [%s]\n",
+ i2c_port,
+ i << 1,
+ i2c_devs[i] ? i2c_devs[i] : "???");
}
- cx231xx_info(": Completed Checking for I2C devices.\n");
+
+ dev->i2c_scan_running = false;
}
/*
@@ -509,23 +528,17 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
BUG_ON(!dev->cx231xx_send_usb_command);
bus->i2c_adap = cx231xx_adap_template;
- bus->i2c_client = cx231xx_client_template;
- bus->i2c_adap.dev.parent = &dev->udev->dev;
+ bus->i2c_adap.dev.parent = dev->dev;
- strlcpy(bus->i2c_adap.name, bus->dev->name, sizeof(bus->i2c_adap.name));
+ snprintf(bus->i2c_adap.name, sizeof(bus->i2c_adap.name), "%s-%d", bus->dev->name, bus->nr);
bus->i2c_adap.algo_data = bus;
i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev);
i2c_add_adapter(&bus->i2c_adap);
- bus->i2c_client.adapter = &bus->i2c_adap;
-
- if (0 == bus->i2c_rc) {
- if (i2c_scan)
- cx231xx_do_i2c_scan(dev, &bus->i2c_client);
- } else
- cx231xx_warn("%s: i2c bus %d register FAILED\n",
- dev->name, bus->nr);
+ if (0 != bus->i2c_rc)
+ dev_warn(dev->dev,
+ "i2c bus %d register FAILED\n", bus->nr);
return bus->i2c_rc;
}
@@ -539,3 +552,62 @@ int cx231xx_i2c_unregister(struct cx231xx_i2c *bus)
i2c_del_adapter(&bus->i2c_adap);
return 0;
}
+
+/*
+ * cx231xx_i2c_mux_select()
+ * switch i2c master number 1 between port1 and port3
+ */
+static int cx231xx_i2c_mux_select(struct i2c_adapter *adap,
+ void *mux_priv, u32 chan_id)
+{
+ struct cx231xx *dev = mux_priv;
+
+ return cx231xx_enable_i2c_port_3(dev, chan_id);
+}
+
+int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no)
+{
+ struct i2c_adapter *i2c_parent = &dev->i2c_bus[1].i2c_adap;
+ /* what is the correct mux_dev? */
+ struct device *mux_dev = dev->dev;
+
+ dev->i2c_mux_adap[mux_no] = i2c_add_mux_adapter(i2c_parent,
+ mux_dev,
+ dev /* mux_priv */,
+ 0,
+ mux_no /* chan_id */,
+ 0 /* class */,
+ &cx231xx_i2c_mux_select,
+ NULL);
+
+ if (!dev->i2c_mux_adap[mux_no])
+ dev_warn(dev->dev,
+ "i2c mux %d register FAILED\n", mux_no);
+
+ return 0;
+}
+
+void cx231xx_i2c_mux_unregister(struct cx231xx *dev, int mux_no)
+{
+ i2c_del_mux_adapter(dev->i2c_mux_adap[mux_no]);
+ dev->i2c_mux_adap[mux_no] = NULL;
+}
+
+struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port)
+{
+ switch (i2c_port) {
+ case I2C_0:
+ return &dev->i2c_bus[0].i2c_adap;
+ case I2C_1:
+ return &dev->i2c_bus[1].i2c_adap;
+ case I2C_2:
+ return &dev->i2c_bus[2].i2c_adap;
+ case I2C_1_MUX_1:
+ return dev->i2c_mux_adap[0];
+ case I2C_1_MUX_3:
+ return dev->i2c_mux_adap[1];
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(cx231xx_get_i2c_adap);
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 05f0434919d4..15d8d1b5f05c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -19,7 +19,6 @@
*/
#include "cx231xx.h"
-#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -63,7 +62,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
struct i2c_board_info info;
u8 ir_i2c_bus;
- dev_dbg(&dev->udev->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
/* Only initialize if a rc keycode map is defined */
if (!cx231xx_boards[dev->model].rc_map_name)
@@ -98,9 +97,10 @@ int cx231xx_ir_init(struct cx231xx *dev)
/* Load and bind ir-kbd-i2c */
ir_i2c_bus = cx231xx_boards[dev->model].ir_i2c_master;
- dev_dbg(&dev->udev->dev, "Trying to bind ir at bus %d, addr 0x%02x\n",
+ dev_dbg(dev->dev, "Trying to bind ir at bus %d, addr 0x%02x\n",
ir_i2c_bus, info.addr);
- dev->ir_i2c_client = i2c_new_device(&dev->i2c_bus[ir_i2c_bus].i2c_adap, &info);
+ dev->ir_i2c_client = i2c_new_device(
+ cx231xx_get_i2c_adap(dev, ir_i2c_bus), &info);
return 0;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
index 3052c4c20229..5bc74149fcb9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
+++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
@@ -703,9 +703,9 @@ int initialize_cx231xx(struct cx231xx *dev)
_current_scenario_idx = INDEX_BUSPOWER_DIF_ONLY;
break;
default:
- cx231xx_info("bad config in buspower!!!!\n");
- cx231xx_info("config_info=%x\n",
- (config_info & BUSPOWER_MASK));
+ dev_err(dev->dev,
+ "bad config in buspower!!!!\nconfig_info=%x\n",
+ config_info & BUSPOWER_MASK);
return 1;
}
} else { /* self-power */
@@ -768,9 +768,9 @@ int initialize_cx231xx(struct cx231xx *dev)
_current_scenario_idx = INDEX_SELFPOWER_COMPRESSOR;
break;
default:
- cx231xx_info("bad senario!!!!!\n");
- cx231xx_info("config_info=%x\n",
- (config_info & SELFPOWER_MASK));
+ dev_err(dev->dev,
+ "bad senario!!!!!\nconfig_info=%x\n",
+ config_info & SELFPOWER_MASK);
return -ENODEV;
}
}
@@ -781,18 +781,29 @@ int initialize_cx231xx(struct cx231xx *dev)
sizeof(struct pcb_config));
if (pcb_debug) {
- cx231xx_info("SC(0x00) register = 0x%x\n", config_info);
- cx231xx_info("scenario %d\n",
- (dev->current_pcb_config.index) + 1);
- cx231xx_info("type=%x\n", dev->current_pcb_config.type);
- cx231xx_info("mode=%x\n", dev->current_pcb_config.mode);
- cx231xx_info("speed=%x\n", dev->current_pcb_config.speed);
- cx231xx_info("ts1_source=%x\n",
- dev->current_pcb_config.ts1_source);
- cx231xx_info("ts2_source=%x\n",
- dev->current_pcb_config.ts2_source);
- cx231xx_info("analog_source=%x\n",
- dev->current_pcb_config.analog_source);
+ dev_info(dev->dev,
+ "SC(0x00) register = 0x%x\n", config_info);
+ dev_info(dev->dev,
+ "scenario %d\n",
+ (dev->current_pcb_config.index) + 1);
+ dev_info(dev->dev,
+ "type=%x\n",
+ dev->current_pcb_config.type);
+ dev_info(dev->dev,
+ "mode=%x\n",
+ dev->current_pcb_config.mode);
+ dev_info(dev->dev,
+ "speed=%x\n",
+ dev->current_pcb_config.speed);
+ dev_info(dev->dev,
+ "ts1_source=%x\n",
+ dev->current_pcb_config.ts1_source);
+ dev_info(dev->dev,
+ "ts2_source=%x\n",
+ dev->current_pcb_config.ts2_source);
+ dev_info(dev->dev,
+ "analog_source=%x\n",
+ dev->current_pcb_config.analog_source);
}
return 0;
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index c02794274f51..80261ac40208 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -19,12 +19,12 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
-#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/mm.h>
#include <linux/mutex.h>
@@ -35,7 +35,6 @@
#include <media/msp3400.h>
#include <media/tuner.h>
-#include "cx231xx.h"
#include "cx231xx-vbi.h"
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
@@ -69,11 +68,12 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
break;
}
if (packet < 0) {
- cx231xx_err("URB status %d [%s].\n", status,
- errmsg);
+ dev_err(dev->dev,
+ "URB status %d [%s].\n", status, errmsg);
} else {
- cx231xx_err("URB packet %d, status %d [%s].\n",
- packet, status, errmsg);
+ dev_err(dev->dev,
+ "URB packet %d, status %d [%s].\n",
+ packet, status, errmsg);
}
}
@@ -316,8 +316,8 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- cx231xx_err("urb completition error %d.\n",
- urb->status);
+ dev_err(dev->dev,
+ "urb completition error %d.\n", urb->status);
break;
}
@@ -331,8 +331,8 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
urb->status = usb_submit_urb(urb, GFP_ATOMIC);
if (urb->status) {
- cx231xx_err("urb resubmit failed (error=%i)\n",
- urb->status);
+ dev_err(dev->dev, "urb resubmit failed (error=%i)\n",
+ urb->status);
}
}
@@ -344,7 +344,7 @@ void cx231xx_uninit_vbi_isoc(struct cx231xx *dev)
struct urb *urb;
int i;
- cx231xx_info("called cx231xx_uninit_vbi_isoc\n");
+ dev_dbg(dev->dev, "called cx231xx_uninit_vbi_isoc\n");
dev->vbi_mode.bulk_ctl.nfields = -1;
for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
@@ -393,7 +393,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
struct urb *urb;
int rc;
- cx231xx_info("called cx231xx_vbi_isoc\n");
+ dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n");
/* De-allocates all pending stuff */
cx231xx_uninit_vbi_isoc(dev);
@@ -419,14 +419,16 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
GFP_KERNEL);
if (!dev->vbi_mode.bulk_ctl.urb) {
- cx231xx_errdev("cannot alloc memory for usb buffers\n");
+ dev_err(dev->dev,
+ "cannot alloc memory for usb buffers\n");
return -ENOMEM;
}
dev->vbi_mode.bulk_ctl.transfer_buffer =
kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
if (!dev->vbi_mode.bulk_ctl.transfer_buffer) {
- cx231xx_errdev("cannot allocate memory for usbtransfer\n");
+ dev_err(dev->dev,
+ "cannot allocate memory for usbtransfer\n");
kfree(dev->vbi_mode.bulk_ctl.urb);
return -ENOMEM;
}
@@ -441,7 +443,8 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- cx231xx_err("cannot alloc bulk_ctl.urb %i\n", i);
+ dev_err(dev->dev,
+ "cannot alloc bulk_ctl.urb %i\n", i);
cx231xx_uninit_vbi_isoc(dev);
return -ENOMEM;
}
@@ -451,9 +454,10 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
kzalloc(sb_size, GFP_KERNEL);
if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
- cx231xx_err("unable to allocate %i bytes for transfer"
- " buffer %i%s\n", sb_size, i,
- in_interrupt() ? " while in int" : "");
+ dev_err(dev->dev,
+ "unable to allocate %i bytes for transfer buffer %i%s\n",
+ sb_size, i,
+ in_interrupt() ? " while in int" : "");
cx231xx_uninit_vbi_isoc(dev);
return -ENOMEM;
}
@@ -470,8 +474,8 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC);
if (rc) {
- cx231xx_err("submit of urb %i failed (error=%i)\n", i,
- rc);
+ dev_err(dev->dev,
+ "submit of urb %i failed (error=%i)\n", i, rc);
cx231xx_uninit_vbi_isoc(dev);
return rc;
}
@@ -522,7 +526,7 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- /* cx231xx_info("[%p/%d] wakeup\n", buf, buf->vb.i); */
+ /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
@@ -614,7 +618,7 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
char *outp;
if (list_empty(&dma_q->active)) {
- cx231xx_err("No active queue to serve\n");
+ dev_err(dev->dev, "No active queue to serve\n");
dev->vbi_mode.bulk_ctl.buf = NULL;
*buf = NULL;
return;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 3b3ada6562ca..53ca12c1ff69 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -22,12 +22,12 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "cx231xx.h"
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
-#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/mm.h>
#include <linux/mutex.h>
@@ -41,10 +41,9 @@
#include "dvb_frontend.h"
-#include "cx231xx.h"
#include "cx231xx-vbi.h"
-#define CX231XX_VERSION "0.0.2"
+#define CX231XX_VERSION "0.0.3"
#define DRIVER_AUTHOR "Srinivasa Deevi <srinivasa.deevi@conexant.com>"
#define DRIVER_DESC "Conexant cx231xx based USB video device driver"
@@ -737,8 +736,9 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
if (!dev->video_mode.bulk_ctl.num_bufs)
urb_init = 1;
}
- /*cx231xx_info("urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);*/
+ dev_dbg(dev->dev,
+ "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
+ urb_init, dev->video_mode.max_pkt_size);
if (urb_init) {
dev->mode_tv = 0;
if (dev->USE_ISO)
@@ -809,7 +809,7 @@ void video_mux(struct cx231xx *dev, int index)
cx231xx_set_audio_input(dev, dev->ctl_ainput);
- cx231xx_info("video_mux : %d\n", index);
+ dev_dbg(dev->dev, "video_mux : %d\n", index);
/* do mode control overrides if required */
cx231xx_do_mode_ctrl_overrides(dev);
@@ -861,7 +861,7 @@ static void res_free(struct cx231xx_fh *fh)
static int check_dev(struct cx231xx *dev)
{
if (dev->state & DEV_DISCONNECTED) {
- cx231xx_errdev("v4l2 ioctl: device not present\n");
+ dev_err(dev->dev, "v4l2 ioctl: device not present\n");
return -ENODEV;
}
return 0;
@@ -953,12 +953,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
- cx231xx_errdev("%s queue busy\n", __func__);
+ dev_err(dev->dev, "%s: queue busy\n", __func__);
return -EBUSY;
}
if (dev->stream_on && !fh->stream_on) {
- cx231xx_errdev("%s device in use by another fh\n", __func__);
+ dev_err(dev->dev,
+ "%s: device in use by another fh\n", __func__);
return -EBUSY;
}
@@ -967,7 +968,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
dev->height = f->fmt.pix.height;
dev->format = fmt;
- v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
+ v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
call_all(dev, video, s_mbus_fmt, &mbus_fmt);
v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
@@ -1012,7 +1013,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
resolution (since a standard change effects things like the number
of lines in VACT, etc) */
memset(&mbus_fmt, 0, sizeof(mbus_fmt));
- mbus_fmt.code = V4L2_MBUS_FMT_FIXED;
+ mbus_fmt.code = MEDIA_BUS_FMT_FIXED;
mbus_fmt.width = dev->width;
mbus_fmt.height = dev->height;
call_all(dev, video, s_mbus_fmt, &mbus_fmt);
@@ -1176,9 +1177,9 @@ int cx231xx_s_frequency(struct file *file, void *priv,
int rc;
u32 if_frequency = 5400000;
- cx231xx_info("Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
- f->frequency, f->type);
- /*cx231xx_info("f->type: 1-radio 2-analogTV 3-digitalTV\n");*/
+ dev_dbg(dev->dev,
+ "Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
+ f->frequency, f->type);
rc = check_dev(dev);
if (rc < 0)
@@ -1213,13 +1214,14 @@ int cx231xx_s_frequency(struct file *file, void *priv,
else if (dev->norm & V4L2_STD_SECAM_LC)
if_frequency = 1250000; /*1.25MHz */
- cx231xx_info("if_frequency is set to %d\n", if_frequency);
+ dev_dbg(dev->dev,
+ "if_frequency is set to %d\n", if_frequency);
cx231xx_set_Colibri_For_LowIF(dev, if_frequency, 1, 1);
update_HH_register_after_set_DIF(dev);
}
- cx231xx_info("Set New FREQUENCY to %d\n", f->frequency);
+ dev_dbg(dev->dev, "Set New FREQUENCY to %d\n", f->frequency);
return rc;
}
@@ -1523,7 +1525,8 @@ static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
struct cx231xx *dev = fh->dev;
if (dev->vbi_stream_on && !fh->stream_on) {
- cx231xx_errdev("%s device in use by another fh\n", __func__);
+ dev_err(dev->dev,
+ "%s device in use by another fh\n", __func__);
return -EBUSY;
}
return vidioc_try_fmt_vbi_cap(file, priv, f);
@@ -1642,17 +1645,15 @@ static int cx231xx_v4l2_open(struct file *filp)
#if 0
errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
if (errCode < 0) {
- cx231xx_errdev
- ("Device locked on digital mode. Can't open analog\n");
+ dev_err(dev->dev,
+ "Device locked on digital mode. Can't open analog\n");
return -EBUSY;
}
#endif
fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
- if (!fh) {
- cx231xx_errdev("cx231xx-video.c: Out of memory?!\n");
+ if (!fh)
return -ENOMEM;
- }
if (mutex_lock_interruptible(&dev->lock)) {
kfree(fh);
return -ERESTARTSYS;
@@ -1736,8 +1737,8 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
dev->radio_dev = NULL;
}
if (dev->vbi_dev) {
- cx231xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(dev->vbi_dev));
+ dev_info(dev->dev, "V4L2 device %s deregistered\n",
+ video_device_node_name(dev->vbi_dev));
if (video_is_registered(dev->vbi_dev))
video_unregister_device(dev->vbi_dev);
else
@@ -1745,8 +1746,8 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
dev->vbi_dev = NULL;
}
if (dev->vdev) {
- cx231xx_info("V4L2 device %s deregistered\n",
- video_device_node_name(dev->vdev));
+ dev_info(dev->dev, "V4L2 device %s deregistered\n",
+ video_device_node_name(dev->vdev));
if (dev->board.has_417)
cx231xx_417_unregister(dev);
@@ -2080,8 +2081,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
{
int ret;
- cx231xx_info("%s: v4l2 driver version %s\n",
- dev->name, CX231XX_VERSION);
+ dev_info(dev->dev, "v4l2 driver version %s\n", CX231XX_VERSION);
/* set default norm */
dev->norm = V4L2_STD_PAL;
@@ -2119,7 +2119,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
/* allocate and fill video video_device struct */
dev->vdev = cx231xx_vdev_init(dev, &cx231xx_video_template, "video");
if (!dev->vdev) {
- cx231xx_errdev("cannot allocate video_device.\n");
+ dev_err(dev->dev, "cannot allocate video_device.\n");
return -ENODEV;
}
@@ -2128,13 +2128,14 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
video_nr[dev->devno]);
if (ret) {
- cx231xx_errdev("unable to register video device (error=%i).\n",
- ret);
+ dev_err(dev->dev,
+ "unable to register video device (error=%i).\n",
+ ret);
return ret;
}
- cx231xx_info("%s/0: registered device %s [v4l2]\n",
- dev->name, video_device_node_name(dev->vdev));
+ dev_info(dev->dev, "Registered video device %s [v4l2]\n",
+ video_device_node_name(dev->vdev));
/* Initialize VBI template */
cx231xx_vbi_template = cx231xx_video_template;
@@ -2144,7 +2145,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev->vbi_dev = cx231xx_vdev_init(dev, &cx231xx_vbi_template, "vbi");
if (!dev->vbi_dev) {
- cx231xx_errdev("cannot allocate video_device.\n");
+ dev_err(dev->dev, "cannot allocate video_device.\n");
return -ENODEV;
}
dev->vbi_dev->ctrl_handler = &dev->ctrl_handler;
@@ -2152,34 +2153,32 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
if (ret < 0) {
- cx231xx_errdev("unable to register vbi device\n");
+ dev_err(dev->dev, "unable to register vbi device\n");
return ret;
}
- cx231xx_info("%s/0: registered device %s\n",
- dev->name, video_device_node_name(dev->vbi_dev));
+ dev_info(dev->dev, "Registered VBI device %s\n",
+ video_device_node_name(dev->vbi_dev));
if (cx231xx_boards[dev->model].radio.type == CX231XX_RADIO) {
dev->radio_dev = cx231xx_vdev_init(dev, &cx231xx_radio_template,
"radio");
if (!dev->radio_dev) {
- cx231xx_errdev("cannot allocate video_device.\n");
+ dev_err(dev->dev,
+ "cannot allocate video_device.\n");
return -ENODEV;
}
dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
- cx231xx_errdev("can't register radio device\n");
+ dev_err(dev->dev,
+ "can't register radio device\n");
return ret;
}
- cx231xx_info("Registered radio device as %s\n",
- video_device_node_name(dev->radio_dev));
+ dev_info(dev->dev, "Registered radio device as %s\n",
+ video_device_node_name(dev->radio_dev));
}
- cx231xx_info("V4L2 device registered as %s and %s\n",
- video_device_node_name(dev->vdev),
- video_device_node_name(dev->vbi_dev));
-
return 0;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index aeb1bf42b88d..f9e262eb0db9 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/usb.h>
#include <media/cx2341x.h>
@@ -322,10 +323,11 @@ enum cx231xx_decoder {
};
enum CX231XX_I2C_MASTER_PORT {
- I2C_0 = 0,
- I2C_1 = 1,
- I2C_2 = 2,
- I2C_3 = 3
+ I2C_0 = 0, /* master 0 - internal connection */
+ I2C_1 = 1, /* master 1 - used with mux */
+ I2C_2 = 2, /* master 2 */
+ I2C_1_MUX_1 = 3, /* master 1 - port 1 (I2C_DEMOD_EN = 0) */
+ I2C_1_MUX_3 = 4 /* master 1 - port 3 (I2C_DEMOD_EN = 1) */
};
struct cx231xx_board {
@@ -367,7 +369,6 @@ struct cx231xx_board {
unsigned int valid:1;
unsigned int no_alt_vanc:1;
unsigned int external_av:1;
- unsigned int dont_use_port_3:1;
unsigned char xclk, i2c_speed;
@@ -472,7 +473,6 @@ struct cx231xx_i2c {
/* i2c i/o */
struct i2c_adapter i2c_adap;
- struct i2c_client i2c_client;
u32 i2c_rc;
/* different settings for each bus */
@@ -597,6 +597,7 @@ struct cx231xx {
char name[30]; /* name (including minor) of the device */
int model; /* index in the device_data struct */
int devno; /* marks the number of this device */
+ struct device *dev; /* pointer to USB interface's dev */
struct cx231xx_board board;
@@ -609,6 +610,8 @@ struct cx231xx {
unsigned int has_audio_class:1;
unsigned int has_alsa_audio:1;
+ unsigned int i2c_scan_running:1; /* true only during i2c_scan */
+
struct cx231xx_fmt *format;
struct v4l2_device v4l2_dev;
@@ -628,7 +631,10 @@ struct cx231xx {
/* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */
struct cx231xx_i2c i2c_bus[3];
+ struct i2c_adapter *i2c_mux_adap[2];
+
unsigned int xc_fw_load_done:1;
+ unsigned int port_3_switch_enabled:1;
/* locks */
struct mutex gpio_i2c_lock;
struct mutex i2c_lock;
@@ -751,9 +757,12 @@ int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq);
int cx231xx_reset_analog_tuner(struct cx231xx *dev);
/* Provided by cx231xx-i2c.c */
-void cx231xx_do_i2c_scan(struct cx231xx *dev, struct i2c_client *c);
+void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port);
int cx231xx_i2c_register(struct cx231xx_i2c *bus);
int cx231xx_i2c_unregister(struct cx231xx_i2c *bus);
+int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no);
+void cx231xx_i2c_mux_unregister(struct cx231xx *dev, int mux_no);
+struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port);
/* Internal block control functions */
int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
@@ -802,7 +811,6 @@ void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev);
void reset_s5h1432_demod(struct cx231xx *dev);
void cx231xx_dump_HH_reg(struct cx231xx *dev);
void update_HH_register_after_set_DIF(struct cx231xx *dev);
-void cx231xx_dump_SC_reg(struct cx231xx *dev);
@@ -976,23 +984,6 @@ void cx231xx_ir_exit(struct cx231xx *dev);
#define cx231xx_ir_exit(dev) (0)
#endif
-
-/* printk macros */
-
-#define cx231xx_err(fmt, arg...) do {\
- printk(KERN_ERR fmt , ##arg); } while (0)
-
-#define cx231xx_errdev(fmt, arg...) do {\
- printk(KERN_ERR "%s: "fmt,\
- dev->name , ##arg); } while (0)
-
-#define cx231xx_info(fmt, arg...) do {\
- printk(KERN_INFO "%s: "fmt,\
- dev->name , ##arg); } while (0)
-#define cx231xx_warn(fmt, arg...) do {\
- printk(KERN_WARNING "%s: "fmt,\
- dev->name , ##arg); } while (0)
-
static inline unsigned int norm_maxw(struct cx231xx *dev)
{
if (dev->board.max_range_640_480)
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 5b34323ad207..0982e734fab5 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -145,6 +145,9 @@ config DVB_USB_DVBSKY
tristate "DVBSky USB support"
depends on DVB_USB_V2
select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SP2 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the USB receivers from DVBSky.
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 1896ab218b11..80a29f5377ea 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1171,6 +1171,7 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id);
+ memset(&si2168_config, 0, sizeof(si2168_config));
si2168_config.i2c_adapter = &adapter;
si2168_config.fe = &adap->fe[0];
si2168_config.ts_mode = SI2168_TS_SERIAL;
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 34688c89df11..9b5add4499e3 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -21,10 +21,17 @@
#include "dvb_usb.h"
#include "m88ds3103.h"
#include "m88ts2022.h"
+#include "sp2.h"
+#include "si2168.h"
+#include "si2157.h"
#define DVBSKY_MSG_DELAY 0/*2000*/
#define DVBSKY_BUF_LEN 64
+static int dvb_usb_dvbsky_disable_rc;
+module_param_named(disable_rc, dvb_usb_dvbsky_disable_rc, int, 0644);
+MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver.");
+
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct dvbsky_state {
@@ -32,7 +39,9 @@ struct dvbsky_state {
u8 ibuf[DVBSKY_BUF_LEN];
u8 obuf[DVBSKY_BUF_LEN];
u8 last_lock;
+ struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
+ struct i2c_client *i2c_client_ci;
/* fe hook functions*/
int (*fe_set_voltage)(struct dvb_frontend *fe,
@@ -96,8 +105,7 @@ static int dvbsky_gpio_ctrl(struct dvb_usb_device *d, u8 gport, u8 value)
obuf[2] = value;
ret = dvbsky_usb_generic_rw(d, obuf, 3, ibuf, 1);
if (ret)
- dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
- KBUILD_MODNAME, __func__, ret);
+ dev_err(&d->udev->dev, "failed=%d\n", ret);
return ret;
}
@@ -114,7 +122,7 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (num > 2) {
dev_err(&d->udev->dev,
- "dvbsky_usb: too many i2c messages[%d] than 2.", num);
+ "too many i2c messages[%d], max 2.", num);
ret = -EOPNOTSUPP;
goto i2c_error;
}
@@ -122,7 +130,7 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (num == 1) {
if (msg[0].len > 60) {
dev_err(&d->udev->dev,
- "dvbsky_usb: too many i2c bytes[%d] than 60.",
+ "too many i2c bytes[%d], max 60.",
msg[0].len);
ret = -EOPNOTSUPP;
goto i2c_error;
@@ -136,8 +144,7 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = dvbsky_usb_generic_rw(d, obuf, 4,
ibuf, msg[0].len + 1);
if (ret)
- dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
- KBUILD_MODNAME, __func__, ret);
+ dev_err(&d->udev->dev, "failed=%d\n", ret);
if (!ret)
memcpy(msg[0].buf, &ibuf[1], msg[0].len);
} else {
@@ -149,13 +156,12 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = dvbsky_usb_generic_rw(d, obuf,
msg[0].len + 3, ibuf, 1);
if (ret)
- dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
- KBUILD_MODNAME, __func__, ret);
+ dev_err(&d->udev->dev, "failed=%d\n", ret);
}
} else {
if ((msg[0].len > 60) || (msg[1].len > 60)) {
dev_err(&d->udev->dev,
- "dvbsky_usb: too many i2c bytes[w-%d][r-%d] than 60.",
+ "too many i2c bytes[w-%d][r-%d], max 60.",
msg[0].len, msg[1].len);
ret = -EOPNOTSUPP;
goto i2c_error;
@@ -169,8 +175,7 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = dvbsky_usb_generic_rw(d, obuf,
msg[0].len + 4, ibuf, msg[1].len + 1);
if (ret)
- dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
- KBUILD_MODNAME, __func__, ret);
+ dev_err(&d->udev->dev, "failed=%d\n", ret);
if (!ret)
memcpy(msg[1].buf, &ibuf[1], msg[1].len);
@@ -201,8 +206,7 @@ static int dvbsky_rc_query(struct dvb_usb_device *d)
obuf[0] = 0x10;
ret = dvbsky_usb_generic_rw(d, obuf, 1, ibuf, 2);
if (ret)
- dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
- KBUILD_MODNAME, __func__, ret);
+ dev_err(&d->udev->dev, "failed=%d\n", ret);
if (ret == 0)
code = (ibuf[0] << 8) | ibuf[1];
if (code != 0xffff) {
@@ -218,6 +222,11 @@ static int dvbsky_rc_query(struct dvb_usb_device *d)
static int dvbsky_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
+ if (dvb_usb_dvbsky_disable_rc) {
+ rc->map_name = NULL;
+ return 0;
+ }
+
rc->allowed_protos = RC_BIT_RC5;
rc->query = dvbsky_rc_query;
rc->interval = 300;
@@ -265,8 +274,6 @@ static int dvbsky_read_mac_addr(struct dvb_usb_adapter *adap, u8 mac[6])
if (i2c_transfer(&d->i2c_adap, msg, 2) == 2)
memcpy(mac, ibuf, 6);
- dev_info(&d->udev->dev, "dvbsky_usb MAC address=%pM\n", mac);
-
return 0;
}
@@ -362,6 +369,300 @@ fail_attach:
return ret;
}
+static int dvbsky_usb_ci_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct dvb_usb_device *d = fe_to_d(fe);
+ struct dvbsky_state *state = d_to_priv(d);
+ u8 value;
+
+ if (voltage == SEC_VOLTAGE_OFF)
+ value = 0;
+ else
+ value = 1;
+ dvbsky_gpio_ctrl(d, 0x00, value);
+
+ return state->fe_set_voltage(fe, voltage);
+}
+
+static int dvbsky_ci_ctrl(void *priv, u8 read, int addr,
+ u8 data, int *mem)
+{
+ struct dvb_usb_device *d = priv;
+ int ret = 0;
+ u8 command[4], respond[2], command_size, respond_size;
+
+ command[1] = (u8)((addr >> 8) & 0xff); /*high part of address*/
+ command[2] = (u8)(addr & 0xff); /*low part of address*/
+ if (read) {
+ command[0] = 0x71;
+ command_size = 3;
+ respond_size = 2;
+ } else {
+ command[0] = 0x70;
+ command[3] = data;
+ command_size = 4;
+ respond_size = 1;
+ }
+ ret = dvbsky_usb_generic_rw(d, command, command_size,
+ respond, respond_size);
+ if (ret)
+ goto err;
+ if (read)
+ *mem = respond[1];
+ return ret;
+err:
+ dev_err(&d->udev->dev, "ci control failed=%d\n", ret);
+ return ret;
+}
+
+static const struct m88ds3103_config dvbsky_s960c_m88ds3103_config = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+ .ts_mode = M88DS3103_TS_CI,
+ .ts_clk = 10000,
+ .ts_clk_pol = 1,
+ .agc = 0x99,
+ .lnb_hv_pol = 0,
+ .lnb_en_pol = 1,
+};
+
+static int dvbsky_s960c_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvbsky_state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+ int ret = 0;
+ /* demod I2C adapter */
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_client *client_tuner, *client_ci;
+ struct i2c_board_info info;
+ struct sp2_config sp2_config;
+ struct m88ts2022_config m88ts2022_config = {
+ .clock = 27000000,
+ };
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
+ /* attach demod */
+ adap->fe[0] = dvb_attach(m88ds3103_attach,
+ &dvbsky_s960c_m88ds3103_config,
+ &d->i2c_adap,
+ &i2c_adapter);
+ if (!adap->fe[0]) {
+ dev_err(&d->udev->dev, "dvbsky_s960ci_attach fail.\n");
+ ret = -ENODEV;
+ goto fail_attach;
+ }
+
+ /* attach tuner */
+ m88ts2022_config.fe = adap->fe[0];
+ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &m88ts2022_config;
+ request_module("m88ts2022");
+ client_tuner = i2c_new_device(i2c_adapter, &info);
+ if (client_tuner == NULL || client_tuner->dev.driver == NULL) {
+ ret = -ENODEV;
+ goto fail_tuner_device;
+ }
+
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ ret = -ENODEV;
+ goto fail_tuner_module;
+ }
+
+ /* attach ci controller */
+ memset(&sp2_config, 0, sizeof(sp2_config));
+ sp2_config.dvb_adap = &adap->dvb_adap;
+ sp2_config.priv = d;
+ sp2_config.ci_control = dvbsky_ci_ctrl;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "sp2", I2C_NAME_SIZE);
+ info.addr = 0x40;
+ info.platform_data = &sp2_config;
+ request_module("sp2");
+ client_ci = i2c_new_device(&d->i2c_adap, &info);
+ if (client_ci == NULL || client_ci->dev.driver == NULL) {
+ ret = -ENODEV;
+ goto fail_ci_device;
+ }
+
+ if (!try_module_get(client_ci->dev.driver->owner)) {
+ ret = -ENODEV;
+ goto fail_ci_module;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ adap->fe[0]->ops.read_signal_strength =
+ adap->fe[0]->ops.tuner_ops.get_rf_strength;
+
+ /* hook fe: need to resync the slave fifo when signal locks. */
+ state->fe_read_status = adap->fe[0]->ops.read_status;
+ adap->fe[0]->ops.read_status = dvbsky_usb_read_status;
+
+ /* hook fe: LNB off/on is control by Cypress usb chip. */
+ state->fe_set_voltage = adap->fe[0]->ops.set_voltage;
+ adap->fe[0]->ops.set_voltage = dvbsky_usb_ci_set_voltage;
+
+ state->i2c_client_tuner = client_tuner;
+ state->i2c_client_ci = client_ci;
+ return ret;
+fail_ci_module:
+ i2c_unregister_device(client_ci);
+fail_ci_device:
+ module_put(client_tuner->dev.driver->owner);
+fail_tuner_module:
+ i2c_unregister_device(client_tuner);
+fail_tuner_device:
+ dvb_frontend_detach(adap->fe[0]);
+fail_attach:
+ return ret;
+}
+
+static int dvbsky_t680c_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvbsky_state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+ int ret = 0;
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_client *client_demod, *client_tuner, *client_ci;
+ struct i2c_board_info info;
+ struct si2168_config si2168_config;
+ struct si2157_config si2157_config;
+ struct sp2_config sp2_config;
+
+ /* attach demod */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &i2c_adapter;
+ si2168_config.fe = &adap->fe[0];
+ si2168_config.ts_mode = SI2168_TS_PARALLEL;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+
+ request_module(info.type);
+ client_demod = i2c_new_device(&d->i2c_adap, &info);
+ if (client_demod == NULL ||
+ client_demod->dev.driver == NULL)
+ goto fail_demod_device;
+ if (!try_module_get(client_demod->dev.driver->owner))
+ goto fail_demod_module;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = adap->fe[0];
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+
+ request_module(info.type);
+ client_tuner = i2c_new_device(i2c_adapter, &info);
+ if (client_tuner == NULL ||
+ client_tuner->dev.driver == NULL)
+ goto fail_tuner_device;
+ if (!try_module_get(client_tuner->dev.driver->owner))
+ goto fail_tuner_module;
+
+ /* attach ci controller */
+ memset(&sp2_config, 0, sizeof(sp2_config));
+ sp2_config.dvb_adap = &adap->dvb_adap;
+ sp2_config.priv = d;
+ sp2_config.ci_control = dvbsky_ci_ctrl;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "sp2", I2C_NAME_SIZE);
+ info.addr = 0x40;
+ info.platform_data = &sp2_config;
+
+ request_module(info.type);
+ client_ci = i2c_new_device(&d->i2c_adap, &info);
+
+ if (client_ci == NULL || client_ci->dev.driver == NULL)
+ goto fail_ci_device;
+
+ if (!try_module_get(client_ci->dev.driver->owner))
+ goto fail_ci_module;
+
+ state->i2c_client_demod = client_demod;
+ state->i2c_client_tuner = client_tuner;
+ state->i2c_client_ci = client_ci;
+ return ret;
+fail_ci_module:
+ i2c_unregister_device(client_ci);
+fail_ci_device:
+ module_put(client_tuner->dev.driver->owner);
+fail_tuner_module:
+ i2c_unregister_device(client_tuner);
+fail_tuner_device:
+ module_put(client_demod->dev.driver->owner);
+fail_demod_module:
+ i2c_unregister_device(client_demod);
+fail_demod_device:
+ ret = -ENODEV;
+ return ret;
+}
+
+static int dvbsky_t330_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvbsky_state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+ int ret = 0;
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_client *client_demod, *client_tuner;
+ struct i2c_board_info info;
+ struct si2168_config si2168_config;
+ struct si2157_config si2157_config;
+
+ /* attach demod */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &i2c_adapter;
+ si2168_config.fe = &adap->fe[0];
+ si2168_config.ts_mode = SI2168_TS_PARALLEL | 0x40;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+
+ request_module(info.type);
+ client_demod = i2c_new_device(&d->i2c_adap, &info);
+ if (client_demod == NULL ||
+ client_demod->dev.driver == NULL)
+ goto fail_demod_device;
+ if (!try_module_get(client_demod->dev.driver->owner))
+ goto fail_demod_module;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = adap->fe[0];
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+
+ request_module(info.type);
+ client_tuner = i2c_new_device(i2c_adapter, &info);
+ if (client_tuner == NULL ||
+ client_tuner->dev.driver == NULL)
+ goto fail_tuner_device;
+ if (!try_module_get(client_tuner->dev.driver->owner))
+ goto fail_tuner_module;
+
+ state->i2c_client_demod = client_demod;
+ state->i2c_client_tuner = client_tuner;
+ return ret;
+fail_tuner_module:
+ i2c_unregister_device(client_tuner);
+fail_tuner_device:
+ module_put(client_demod->dev.driver->owner);
+fail_demod_module:
+ i2c_unregister_device(client_demod);
+fail_demod_device:
+ ret = -ENODEV;
+ return ret;
+}
+
static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name)
{
dvbsky_gpio_ctrl(d, 0x04, 1);
@@ -404,6 +705,18 @@ static void dvbsky_exit(struct dvb_usb_device *d)
module_put(client->dev.driver->owner);
i2c_unregister_device(client);
}
+ client = state->i2c_client_demod;
+ /* remove I2C demod */
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+ client = state->i2c_client_ci;
+ /* remove I2C ci */
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
}
/* DVB USB Driver stuff */
@@ -434,9 +747,104 @@ static struct dvb_usb_device_properties dvbsky_s960_props = {
}
};
+static struct dvb_usb_device_properties dvbsky_s960c_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct dvbsky_state),
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+ .generic_bulk_ctrl_endpoint_response = 0x81,
+ .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_s960c_attach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+ .exit = dvbsky_exit,
+ .read_mac_address = dvbsky_read_mac_addr,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
+ }
+ }
+};
+
+static struct dvb_usb_device_properties dvbsky_t680c_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct dvbsky_state),
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+ .generic_bulk_ctrl_endpoint_response = 0x81,
+ .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_t680c_attach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+ .exit = dvbsky_exit,
+ .read_mac_address = dvbsky_read_mac_addr,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
+ }
+ }
+};
+
+static struct dvb_usb_device_properties dvbsky_t330_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct dvbsky_state),
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+ .generic_bulk_ctrl_endpoint_response = 0x81,
+ .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_t330_attach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+ .exit = dvbsky_exit,
+ .read_mac_address = dvbsky_read_mac_addr,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
+ }
+ }
+};
+
static const struct usb_device_id dvbsky_id_table[] = {
{ DVB_USB_DEVICE(0x0572, 0x6831,
&dvbsky_s960_props, "DVBSky S960/S860", RC_MAP_DVBSKY) },
+ { DVB_USB_DEVICE(0x0572, 0x960c,
+ &dvbsky_s960c_props, "DVBSky S960CI", RC_MAP_DVBSKY) },
+ { DVB_USB_DEVICE(0x0572, 0x680c,
+ &dvbsky_t680c_props, "DVBSky T680CI", RC_MAP_DVBSKY) },
+ { DVB_USB_DEVICE(0x0572, 0x0320,
+ &dvbsky_t330_props, "DVBSky T330", RC_MAP_DVBSKY) },
+ { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_TVSTICK_CT2_4400,
+ &dvbsky_t330_props, "TechnoTrend TVStick CT2-4400",
+ RC_MAP_TT_1500) },
+ { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI,
+ &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI",
+ RC_MAP_TT_1500) },
{ }
};
MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 9f2c5459b73a..994de53a574b 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -817,20 +817,22 @@ static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
case 0x1122:
switch (st->dvb_usb_lme2510_firmware) {
default:
- st->dvb_usb_lme2510_firmware = TUNER_S0194;
case TUNER_S0194:
fw_lme = fw_s0194;
ret = request_firmware(&fw, fw_lme, &udev->dev);
if (ret == 0) {
+ st->dvb_usb_lme2510_firmware = TUNER_S0194;
cold = 0;
break;
}
- st->dvb_usb_lme2510_firmware = TUNER_LG;
+ /* fall through */
case TUNER_LG:
fw_lme = fw_lg;
ret = request_firmware(&fw, fw_lme, &udev->dev);
- if (ret == 0)
+ if (ret == 0) {
+ st->dvb_usb_lme2510_firmware = TUNER_LG;
break;
+ }
st->dvb_usb_lme2510_firmware = TUNER_DEFAULT;
break;
}
@@ -838,26 +840,30 @@ static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
case 0x1120:
switch (st->dvb_usb_lme2510_firmware) {
default:
- st->dvb_usb_lme2510_firmware = TUNER_S7395;
case TUNER_S7395:
fw_lme = fw_c_s7395;
ret = request_firmware(&fw, fw_lme, &udev->dev);
if (ret == 0) {
+ st->dvb_usb_lme2510_firmware = TUNER_S7395;
cold = 0;
break;
}
- st->dvb_usb_lme2510_firmware = TUNER_LG;
+ /* fall through */
case TUNER_LG:
fw_lme = fw_c_lg;
ret = request_firmware(&fw, fw_lme, &udev->dev);
- if (ret == 0)
+ if (ret == 0) {
+ st->dvb_usb_lme2510_firmware = TUNER_LG;
break;
- st->dvb_usb_lme2510_firmware = TUNER_S0194;
+ }
+ /* fall through */
case TUNER_S0194:
fw_lme = fw_c_s0194;
ret = request_firmware(&fw, fw_lme, &udev->dev);
- if (ret == 0)
+ if (ret == 0) {
+ st->dvb_usb_lme2510_firmware = TUNER_S0194;
break;
+ }
st->dvb_usb_lme2510_firmware = TUNER_DEFAULT;
cold = 0;
break;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 27b1e0397e71..896a225ee011 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -24,6 +24,9 @@
#include "rtl2830.h"
#include "rtl2832.h"
+#include "rtl2832_sdr.h"
+#include "mn88472.h"
+#include "mn88473.h"
#include "qt1010.h"
#include "mt2060.h"
@@ -35,25 +38,6 @@
#include "tua9001.h"
#include "r820t.h"
-/*
- * RTL2832_SDR module is in staging. That logic is added in order to avoid any
- * hard dependency to drivers/staging/ directory as we want compile mainline
- * driver even whole staging directory is missing.
- */
-#include <media/v4l2-subdev.h>
-
-#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
-struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
- struct v4l2_subdev *sd);
-#else
-static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
- struct v4l2_subdev *sd)
-{
- return NULL;
-}
-#endif
#ifdef CONFIG_MEDIA_ATTACH
#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
@@ -420,6 +404,8 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_r828d = {0x0074, CMD_I2C_RD, 1, buf};
+ struct rtl28xxu_req req_mn88472 = {0xff38, CMD_I2C_RD, 1, buf};
+ struct rtl28xxu_req req_mn88473 = {0xff38, CMD_I2C_RD, 1, buf};
dev_dbg(&d->udev->dev, "%s:\n", __func__);
@@ -449,7 +435,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0xa1) {
priv->tuner = TUNER_RTL2832_FC0012;
priv->tuner_name = "FC0012";
- goto found;
+ goto tuner_found;
}
/* check FC0013 ID register; reg=00 val=a3 */
@@ -457,7 +443,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0xa3) {
priv->tuner = TUNER_RTL2832_FC0013;
priv->tuner_name = "FC0013";
- goto found;
+ goto tuner_found;
}
/* check MT2266 ID register; reg=00 val=85 */
@@ -465,7 +451,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0x85) {
priv->tuner = TUNER_RTL2832_MT2266;
priv->tuner_name = "MT2266";
- goto found;
+ goto tuner_found;
}
/* check FC2580 ID register; reg=01 val=56 */
@@ -473,7 +459,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0x56) {
priv->tuner = TUNER_RTL2832_FC2580;
priv->tuner_name = "FC2580";
- goto found;
+ goto tuner_found;
}
/* check MT2063 ID register; reg=00 val=9e || 9c */
@@ -481,7 +467,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && (buf[0] == 0x9e || buf[0] == 0x9c)) {
priv->tuner = TUNER_RTL2832_MT2063;
priv->tuner_name = "MT2063";
- goto found;
+ goto tuner_found;
}
/* check MAX3543 ID register; reg=00 val=38 */
@@ -489,7 +475,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0x38) {
priv->tuner = TUNER_RTL2832_MAX3543;
priv->tuner_name = "MAX3543";
- goto found;
+ goto tuner_found;
}
/* check TUA9001 ID register; reg=7e val=2328 */
@@ -497,7 +483,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0x23 && buf[1] == 0x28) {
priv->tuner = TUNER_RTL2832_TUA9001;
priv->tuner_name = "TUA9001";
- goto found;
+ goto tuner_found;
}
/* check MXL5007R ID register; reg=d9 val=14 */
@@ -505,7 +491,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0x14) {
priv->tuner = TUNER_RTL2832_MXL5007T;
priv->tuner_name = "MXL5007T";
- goto found;
+ goto tuner_found;
}
/* check E4000 ID register; reg=02 val=40 */
@@ -513,7 +499,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0x40) {
priv->tuner = TUNER_RTL2832_E4000;
priv->tuner_name = "E4000";
- goto found;
+ goto tuner_found;
}
/* check TDA18272 ID register; reg=00 val=c760 */
@@ -521,7 +507,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && (buf[0] == 0xc7 || buf[1] == 0x60)) {
priv->tuner = TUNER_RTL2832_TDA18272;
priv->tuner_name = "TDA18272";
- goto found;
+ goto tuner_found;
}
/* check R820T ID register; reg=00 val=69 */
@@ -529,7 +515,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0x69) {
priv->tuner = TUNER_RTL2832_R820T;
priv->tuner_name = "R820T";
- goto found;
+ goto tuner_found;
}
/* check R828D ID register; reg=00 val=69 */
@@ -537,13 +523,44 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
if (ret == 0 && buf[0] == 0x69) {
priv->tuner = TUNER_RTL2832_R828D;
priv->tuner_name = "R828D";
- goto found;
+ goto tuner_found;
}
-
-found:
+tuner_found:
dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name);
+ /* probe slave demod */
+ if (priv->tuner == TUNER_RTL2832_R828D) {
+ /* power on MN88472 demod on GPIO0 */
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01);
+ if (ret)
+ goto err;
+
+ /* check MN88472 answers */
+ ret = rtl28xxu_ctrl_msg(d, &req_mn88472);
+ if (ret == 0 && buf[0] == 0x02) {
+ dev_dbg(&d->udev->dev, "%s: MN88472 found\n", __func__);
+ priv->slave_demod = SLAVE_DEMOD_MN88472;
+ goto demod_found;
+ }
+
+ ret = rtl28xxu_ctrl_msg(d, &req_mn88473);
+ if (ret == 0 && buf[0] == 0x03) {
+ dev_dbg(&d->udev->dev, "%s: MN88473 found\n", __func__);
+ priv->slave_demod = SLAVE_DEMOD_MN88473;
+ goto demod_found;
+ }
+ }
+
+demod_found:
/* close demod I2C gate */
ret = rtl28xxu_ctrl_msg(d, &req_gate_close);
if (ret < 0)
@@ -818,7 +835,66 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
/* set fe callback */
adap->fe[0]->callback = rtl2832u_frontend_callback;
+ if (priv->slave_demod) {
+ struct i2c_board_info info = {};
+ struct i2c_client *client;
+
+ /*
+ * We continue on reduced mode, without DVB-T2/C, using master
+ * demod, when slave demod fails.
+ */
+ ret = 0;
+
+ /* attach slave demodulator */
+ if (priv->slave_demod == SLAVE_DEMOD_MN88472) {
+ struct mn88472_config mn88472_config = {};
+
+ mn88472_config.fe = &adap->fe[1];
+ mn88472_config.i2c_wr_max = 22,
+ strlcpy(info.type, "mn88472", I2C_NAME_SIZE);
+ info.addr = 0x18;
+ info.platform_data = &mn88472_config;
+ request_module(info.type);
+ client = i2c_new_device(priv->demod_i2c_adapter, &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ priv->slave_demod = SLAVE_DEMOD_NONE;
+ goto err_slave_demod_failed;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ priv->slave_demod = SLAVE_DEMOD_NONE;
+ goto err_slave_demod_failed;
+ }
+
+ priv->i2c_client_slave_demod = client;
+ } else {
+ struct mn88473_config mn88473_config = {};
+
+ mn88473_config.fe = &adap->fe[1];
+ mn88473_config.i2c_wr_max = 22,
+ strlcpy(info.type, "mn88473", I2C_NAME_SIZE);
+ info.addr = 0x18;
+ info.platform_data = &mn88473_config;
+ request_module(info.type);
+ client = i2c_new_device(priv->demod_i2c_adapter, &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ priv->slave_demod = SLAVE_DEMOD_NONE;
+ goto err_slave_demod_failed;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ priv->slave_demod = SLAVE_DEMOD_NONE;
+ goto err_slave_demod_failed;
+ }
+
+ priv->i2c_client_slave_demod = client;
+ }
+ }
+
return 0;
+err_slave_demod_failed:
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
@@ -984,7 +1060,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
break;
}
- priv->client = client;
+ priv->i2c_client_tuner = client;
sd = i2c_get_clientdata(client);
i2c_set_adapdata(i2c_adap_internal, d);
@@ -1024,32 +1100,30 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
&rtl28xxu_rtl2832_r820t_config, NULL);
break;
case TUNER_RTL2832_R828D:
- /* power off mn88472 demod on GPIO0 */
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x00, 0x01);
- if (ret)
- goto err;
-
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01);
- if (ret)
- goto err;
-
- ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01);
- if (ret)
- goto err;
-
- fe = dvb_attach(r820t_attach, adap->fe[0], &d->i2c_adap,
+ fe = dvb_attach(r820t_attach, adap->fe[0],
+ priv->demod_i2c_adapter,
&rtl2832u_r828d_config);
-
- /* Use tuner to get the signal strength */
adap->fe[0]->ops.read_signal_strength =
adap->fe[0]->ops.tuner_ops.get_rf_strength;
+
+ if (adap->fe[1]) {
+ fe = dvb_attach(r820t_attach, adap->fe[1],
+ priv->demod_i2c_adapter,
+ &rtl2832u_r828d_config);
+ adap->fe[1]->ops.read_signal_strength =
+ adap->fe[1]->ops.tuner_ops.get_rf_strength;
+ }
+
+ /* attach SDR */
+ dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+ &rtl28xxu_rtl2832_r820t_config, NULL);
break;
default:
dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME,
priv->tuner);
}
- if (fe == NULL && priv->client == NULL) {
+ if (fe == NULL && priv->i2c_client_tuner == NULL) {
ret = -ENODEV;
goto err;
}
@@ -1097,11 +1171,19 @@ err:
static void rtl28xxu_exit(struct dvb_usb_device *d)
{
struct rtl28xxu_priv *priv = d->priv;
- struct i2c_client *client = priv->client;
+ struct i2c_client *client;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
/* remove I2C tuner */
+ client = priv->i2c_client_tuner;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ /* remove I2C slave demod */
+ client = priv->i2c_client_slave_demod;
if (client) {
module_put(client->dev.driver->owner);
i2c_unregister_device(client);
@@ -1201,13 +1283,6 @@ static int rtl2832u_power_ctrl(struct dvb_usb_device *d, int onoff)
if (ret)
goto err;
- mdelay(5);
-
- /* enable ADC */
- ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x48, 0x48);
- if (ret)
- goto err;
-
/* streaming EP: clear stall & reset */
ret = rtl28xx_wr_regs(d, USB_EPA_CTL, "\x00\x00", 2);
if (ret)
@@ -1222,11 +1297,6 @@ static int rtl2832u_power_ctrl(struct dvb_usb_device *d, int onoff)
if (ret)
goto err;
- /* disable ADC */
- ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x00, 0x48);
- if (ret)
- goto err;
-
/* disable PLL */
ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x00, 0x80);
if (ret)
@@ -1244,6 +1314,38 @@ err:
return ret;
}
+static int rtl2832u_frontend_ctrl(struct dvb_frontend *fe, int onoff)
+{
+ struct dvb_usb_device *d = fe_to_d(fe);
+ struct dvb_usb_adapter *adap = fe_to_adap(fe);
+ int ret;
+ u8 val;
+
+ dev_dbg(&d->udev->dev, "%s: fe=%d onoff=%d\n", __func__, fe->id, onoff);
+
+ /* control internal demod ADC */
+ if (fe->id == 0 && onoff)
+ val = 0x48; /* enable ADC */
+ else
+ val = 0x00; /* disable ADC */
+
+ ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, val, 0x48);
+ if (ret)
+ goto err;
+
+ /* bypass slave demod TS through master demod */
+ if (fe->id == 1 && onoff) {
+ ret = rtl2832_enable_external_ts_if(adap->fe[0]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
#if IS_ENABLED(CONFIG_RC_CORE)
static int rtl2831u_rc_query(struct dvb_usb_device *d)
{
@@ -1467,6 +1569,7 @@ static const struct dvb_usb_device_properties rtl2832u_props = {
.size_of_priv = sizeof(struct rtl28xxu_priv),
.power_ctrl = rtl2832u_power_ctrl,
+ .frontend_ctrl = rtl2832u_frontend_ctrl,
.i2c_algo = &rtl28xxu_i2c_algo,
.read_config = rtl2832u_read_config,
.frontend_attach = rtl2832u_frontend_attach,
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index a26cab10f382..3e3ea9d64a38 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -57,7 +57,12 @@ struct rtl28xxu_priv {
u8 page; /* integrated demod active register page */
struct i2c_adapter *demod_i2c_adapter;
bool rc_active;
- struct i2c_client *client;
+ struct i2c_client *i2c_client_tuner;
+ struct i2c_client *i2c_client_slave_demod;
+ #define SLAVE_DEMOD_NONE 0
+ #define SLAVE_DEMOD_MN88472 1
+ #define SLAVE_DEMOD_MN88473 2
+ unsigned int slave_demod:2;
};
enum rtl28xxu_chip_id {
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index 41d3eb922a00..3364200db093 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -130,7 +130,6 @@ config DVB_USB_CXUSB
Medion MD95700 hybrid USB2.0 device.
DViCO FusionHDTV (Bluebird) USB2.0 devices
- TechnoTrend TVStick CT2-4400 and CT2-4650 CI devices
config DVB_USB_M920X
tristate "Uli m920x DVB-T USB2.0 support"
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 3f4361e48a32..efa782ed6e2d 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void)
err("usb_register failed. (%d)", result);
return result;
}
+#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE)
+ /* FIXME: convert to todays kernel IR infrastructure */
rc_decode = symbol_request(af9005_rc_decode);
rc_keys = symbol_request(rc_map_af9005_table);
rc_keys_size = symbol_request(rc_map_af9005_table_size);
+#endif
if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
err("af9005_rc_decode function not found, disabling remote");
af9005_properties.rc.legacy.rc_query = NULL;
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 356abb369c20..0f345b1f9014 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -44,7 +44,6 @@
#include "atbm8830.h"
#include "si2168.h"
#include "si2157.h"
-#include "sp2.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 80
@@ -147,22 +146,6 @@ static int cxusb_d680_dmb_gpio_tuner(struct dvb_usb_device *d,
}
}
-static int cxusb_tt_ct2_4400_gpio_tuner(struct dvb_usb_device *d, int onoff)
-{
- u8 o[2], i;
- int rc;
-
- o[0] = 0x83;
- o[1] = onoff;
- rc = cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
-
- if (rc) {
- deb_info("gpio_write failed.\n");
- return -EIO;
- }
- return 0;
-}
-
/* I2C */
static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
@@ -524,30 +507,6 @@ static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d, u32 *event,
return 0;
}
-static int cxusb_tt_ct2_4400_rc_query(struct dvb_usb_device *d)
-{
- u8 i[2];
- int ret;
- u32 cmd, keycode;
- u8 rc5_cmd, rc5_addr, rc5_toggle;
-
- ret = cxusb_ctrl_msg(d, 0x10, NULL, 0, i, 2);
- if (ret)
- return ret;
-
- cmd = (i[0] << 8) | i[1];
-
- if (cmd != 0xffff) {
- rc5_cmd = cmd & 0x3F; /* bits 1-6 for command */
- rc5_addr = (cmd & 0x07C0) >> 6; /* bits 7-11 for address */
- rc5_toggle = (cmd & 0x0800) >> 11; /* bit 12 for toggle */
- keycode = (rc5_addr << 8) | rc5_cmd;
- rc_keydown(d->rc_dev, RC_BIT_RC5, keycode, rc5_toggle);
- }
-
- return 0;
-}
-
static struct rc_map_table rc_map_dvico_mce_table[] = {
{ 0xfe02, KEY_TV },
{ 0xfe0e, KEY_MP3 },
@@ -673,70 +632,6 @@ static struct rc_map_table rc_map_d680_dmb_table[] = {
{ 0x0025, KEY_POWER },
};
-static int cxusb_tt_ct2_4400_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
-{
- u8 wbuf[2];
- u8 rbuf[6];
- int ret;
- struct i2c_msg msg[] = {
- {
- .addr = 0x51,
- .flags = 0,
- .buf = wbuf,
- .len = 2,
- }, {
- .addr = 0x51,
- .flags = I2C_M_RD,
- .buf = rbuf,
- .len = 6,
- }
- };
-
- wbuf[0] = 0x1e;
- wbuf[1] = 0x00;
- ret = cxusb_i2c_xfer(&d->i2c_adap, msg, 2);
-
- if (ret == 2) {
- memcpy(mac, rbuf, 6);
- return 0;
- } else {
- if (ret < 0)
- return ret;
- return -EIO;
- }
-}
-
-static int cxusb_tt_ct2_4650_ci_ctrl(void *priv, u8 read, int addr,
- u8 data, int *mem)
-{
- struct dvb_usb_device *d = priv;
- u8 wbuf[3];
- u8 rbuf[2];
- int ret;
-
- wbuf[0] = (addr >> 8) & 0xff;
- wbuf[1] = addr & 0xff;
-
- if (read) {
- ret = cxusb_ctrl_msg(d, CMD_SP2_CI_READ, wbuf, 2, rbuf, 2);
- } else {
- wbuf[2] = data;
- ret = cxusb_ctrl_msg(d, CMD_SP2_CI_WRITE, wbuf, 3, rbuf, 1);
- }
-
- if (ret)
- goto err;
-
- if (read)
- *mem = rbuf[1];
-
- return 0;
-err:
- deb_info("%s: ci usb write returned %d\n", __func__, ret);
- return ret;
-
-}
-
static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
{
static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 };
@@ -1408,36 +1303,34 @@ static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int cxusb_tt_ct2_4400_attach(struct dvb_usb_adapter *adap)
+static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap->dev;
struct cxusb_state *st = d->priv;
struct i2c_adapter *adapter;
struct i2c_client *client_demod;
struct i2c_client *client_tuner;
- struct i2c_client *client_ci;
struct i2c_board_info info;
struct si2168_config si2168_config;
struct si2157_config si2157_config;
- struct sp2_config sp2_config;
- u8 o[2], i;
- /* reset the tuner */
- if (cxusb_tt_ct2_4400_gpio_tuner(d, 0) < 0) {
- err("clear tuner gpio failed");
- return -EIO;
- }
- msleep(100);
- if (cxusb_tt_ct2_4400_gpio_tuner(d, 1) < 0) {
- err("set tuner gpio failed");
- return -EIO;
- }
- msleep(100);
+ /* Select required USB configuration */
+ if (usb_set_interface(d->udev, 0, 0) < 0)
+ err("set interface failed");
+
+ /* Unblock all USB pipes */
+ usb_clear_halt(d->udev,
+ usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_clear_halt(d->udev,
+ usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_clear_halt(d->udev,
+ usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint));
/* attach frontend */
si2168_config.i2c_adapter = &adapter;
si2168_config.fe = &adap->fe_adap[0].fe;
si2168_config.ts_mode = SI2168_TS_PARALLEL;
+ si2168_config.ts_clock_inv = 1;
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "si2168", I2C_NAME_SIZE);
info.addr = 0x64;
@@ -1477,48 +1370,6 @@ static int cxusb_tt_ct2_4400_attach(struct dvb_usb_adapter *adap)
st->i2c_client_tuner = client_tuner;
- /* initialize CI */
- if (d->udev->descriptor.idProduct ==
- USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI) {
-
- memcpy(o, "\xc0\x01", 2);
- cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
- msleep(100);
-
- memcpy(o, "\xc0\x00", 2);
- cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
- msleep(100);
-
- memset(&sp2_config, 0, sizeof(sp2_config));
- sp2_config.dvb_adap = &adap->dvb_adap;
- sp2_config.priv = d;
- sp2_config.ci_control = cxusb_tt_ct2_4650_ci_ctrl;
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "sp2", I2C_NAME_SIZE);
- info.addr = 0x40;
- info.platform_data = &sp2_config;
- request_module(info.type);
- client_ci = i2c_new_device(&d->i2c_adap, &info);
- if (client_ci == NULL || client_ci->dev.driver == NULL) {
- module_put(client_tuner->dev.driver->owner);
- i2c_unregister_device(client_tuner);
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- return -ENODEV;
- }
- if (!try_module_get(client_ci->dev.driver->owner)) {
- i2c_unregister_device(client_ci);
- module_put(client_tuner->dev.driver->owner);
- i2c_unregister_device(client_tuner);
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- return -ENODEV;
- }
-
- st->i2c_client_ci = client_ci;
-
- }
-
return 0;
}
@@ -1603,7 +1454,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
static struct dvb_usb_device_properties cxusb_aver_a868r_properties;
static struct dvb_usb_device_properties cxusb_d680_dmb_properties;
static struct dvb_usb_device_properties cxusb_mygica_d689_properties;
-static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties;
+static struct dvb_usb_device_properties cxusb_mygica_t230_properties;
static int cxusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -1634,7 +1485,7 @@ static int cxusb_probe(struct usb_interface *intf,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties,
THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_tt_ct2_4400_properties,
+ 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
THIS_MODULE, NULL, adapter_nr) ||
0)
return 0;
@@ -1648,13 +1499,6 @@ static void cxusb_disconnect(struct usb_interface *intf)
struct cxusb_state *st = d->priv;
struct i2c_client *client;
- /* remove I2C client for CI */
- client = st->i2c_client_ci;
- if (client) {
- module_put(client->dev.driver->owner);
- i2c_unregister_device(client);
- }
-
/* remove I2C client for tuner */
client = st->i2c_client_tuner;
if (client) {
@@ -1693,8 +1537,7 @@ static struct usb_device_id cxusb_table [] = {
{ USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2) },
{ USB_DEVICE(USB_VID_CONEXANT, USB_PID_CONEXANT_D680_DMB) },
{ USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_D689) },
- { USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_TVSTICK_CT2_4400) },
- { USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI) },
+ { USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, cxusb_table);
@@ -2341,7 +2184,7 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
}
};
-static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties = {
+static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = CYPRESS_FX2,
@@ -2349,25 +2192,21 @@ static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties = {
.size_of_priv = sizeof(struct cxusb_state),
.num_adapters = 1,
- .read_mac_address = cxusb_tt_ct2_4400_read_mac_address,
-
.adapter = {
{
.num_frontends = 1,
.fe = {{
.streaming_ctrl = cxusb_streaming_ctrl,
- /* both frontend and tuner attached in the
- same function */
- .frontend_attach = cxusb_tt_ct2_4400_attach,
+ .frontend_attach = cxusb_mygica_t230_frontend_attach,
/* parameter for the MPEG2-data transfer */
.stream = {
.type = USB_BULK,
- .count = 8,
- .endpoint = 0x82,
+ .count = 5,
+ .endpoint = 0x02,
.u = {
.bulk = {
- .buffersize = 4096,
+ .buffersize = 8192,
}
}
},
@@ -2375,28 +2214,25 @@ static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties = {
},
},
- .i2c_algo = &cxusb_i2c_algo,
+ .power_ctrl = cxusb_d680_dmb_power_ctrl,
+
+ .i2c_algo = &cxusb_i2c_algo,
+
.generic_bulk_ctrl_endpoint = 0x01,
- .generic_bulk_ctrl_endpoint_response = 0x81,
- .rc.core = {
- .rc_codes = RC_MAP_TT_1500,
- .allowed_protos = RC_BIT_RC5,
- .rc_query = cxusb_tt_ct2_4400_rc_query,
- .rc_interval = 150,
+ .rc.legacy = {
+ .rc_interval = 100,
+ .rc_map_table = rc_map_d680_dmb_table,
+ .rc_map_size = ARRAY_SIZE(rc_map_d680_dmb_table),
+ .rc_query = cxusb_d680_dmb_rc_query,
},
- .num_device_descs = 2,
+ .num_device_descs = 1,
.devices = {
{
- "TechnoTrend TVStick CT2-4400",
- { NULL },
- { &cxusb_table[20], NULL },
- },
- {
- "TechnoTrend TT-connect CT2-4650 CI",
+ "Mygica T230 DVB-T/T2/C",
{ NULL },
- { &cxusb_table[21], NULL },
+ { &cxusb_table[22], NULL },
},
}
};
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 29f3e2ea2476..527ff7905e15 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,14 +28,10 @@
#define CMD_ANALOG 0x50
#define CMD_DIGITAL 0x51
-#define CMD_SP2_CI_WRITE 0x70
-#define CMD_SP2_CI_READ 0x71
-
struct cxusb_state {
u8 gpio_write_state[3];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
- struct i2c_client *i2c_client_ci;
};
#endif
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 6b0b8b6b9e2a..5801ae7f672a 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -449,6 +449,8 @@ static int technisat_usb2_read_mac_address(struct dvb_usb_device *d,
return 0;
}
+static struct stv090x_config technisat_usb2_stv090x_config;
+
/* frontend attach */
static int technisat_usb2_set_voltage(struct dvb_frontend *fe,
fe_sec_voltage_t voltage)
@@ -472,7 +474,8 @@ static int technisat_usb2_set_voltage(struct dvb_frontend *fe,
}
for (i = 0; i < 3; i++)
- if (stv090x_set_gpio(fe, i+2, 0, gpio[i], 0) != 0)
+ if (technisat_usb2_stv090x_config.set_gpio(fe, i+2, 0,
+ gpio[i], 0) != 0)
return -EREMOTEIO;
return 0;
}
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 957c7ae30efe..44ae1e0661e6 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(debug, "activates debug info");
#define dprintk(fmt, arg...) do { \
if (debug) \
printk(KERN_INFO "em28xx-audio %s: " fmt, \
- __func__, ##arg); \
+ __func__, ##arg); \
} while (0)
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@@ -232,7 +232,6 @@ static struct snd_pcm_hardware snd_em28xx_hw_capture = {
.channels_max = 2,
.buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */
-
/*
* The period is 12.288 bytes. Allow a 10% of variation along its
* value, in order to avoid overruns/underruns due to some clock
@@ -361,7 +360,7 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
dprintk("Setting capture parameters\n");
ret = snd_pcm_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ params_buffer_bytes(hw_params));
if (ret < 0)
return ret;
#if 0
@@ -478,7 +477,7 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
* AC97 volume control support
*/
static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *info)
+ struct snd_ctl_elem_info *info)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
@@ -494,7 +493,7 @@ static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
}
static int em28xx_vol_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
@@ -534,7 +533,7 @@ err:
}
static int em28xx_vol_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+ struct snd_ctl_elem_value *value)
{
struct em28xx *dev = snd_kcontrol_chip(kcontrol);
struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
@@ -655,7 +654,7 @@ static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
struct snd_kcontrol *kctl;
struct snd_kcontrol_new tmp;
- memset (&tmp, 0, sizeof(tmp));
+ memset(&tmp, 0, sizeof(tmp));
tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
tmp.private_value = id,
tmp.name = ctl_name,
@@ -672,7 +671,7 @@ static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
dprintk("Added control %s for ac97 volume control 0x%04x\n",
ctl_name, id);
- memset (&tmp, 0, sizeof(tmp));
+ memset(&tmp, 0, sizeof(tmp));
tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
tmp.private_value = id,
tmp.name = ctl_name,
@@ -731,7 +730,7 @@ static void em28xx_audio_free_urb(struct em28xx *dev)
/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
static int em28xx_audio_ep_packet_size(struct usb_device *udev,
- struct usb_endpoint_descriptor *e)
+ struct usb_endpoint_descriptor *e)
{
int size = le16_to_cpu(e->wMaxPacketSize);
@@ -781,7 +780,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
interval = 1 << (ep->bInterval - 1);
em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
- EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
+ EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
dev->ifnum, alt,
interval,
ep_size);
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 6d2ea9afd57b..7be661f73930 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -27,7 +27,6 @@
#include "em28xx.h"
-
/* Possible i2c addresses of Micron sensors */
static unsigned short micron_sensor_addrs[] = {
0xb8 >> 1, /* MT9V111, MT9V403 */
@@ -43,7 +42,6 @@ static unsigned short omnivision_sensor_addrs[] = {
I2C_CLIENT_END
};
-
static struct soc_camera_link camlink = {
.bus_id = 0,
.flags = 0,
@@ -51,7 +49,6 @@ static struct soc_camera_link camlink = {
.unbalanced_power = true,
};
-
/* FIXME: Should be replaced by a proper mt9m111 driver */
static int em28xx_initialize_mt9m111(struct em28xx *dev)
{
@@ -70,7 +67,6 @@ static int em28xx_initialize_mt9m111(struct em28xx *dev)
return 0;
}
-
/* FIXME: Should be replaced by a proper mt9m001 driver */
static int em28xx_initialize_mt9m001(struct em28xx *dev)
{
@@ -98,7 +94,6 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev)
return 0;
}
-
/*
* Probes Micron sensors with 8 bit address and 16 bit register width
*/
@@ -430,7 +425,7 @@ int em28xx_init_camera(struct em28xx *dev)
break;
}
- fmt.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ fmt.code = MEDIA_BUS_FMT_YUYV8_2X8;
fmt.width = 640;
fmt.height = 480;
v4l2_subdev_call(subdev, video, s_mbus_fmt, &fmt);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 71fa51e7984e..d9704e66b8c9 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -64,7 +64,6 @@ module_param(usb_xfer_mode, int, 0444);
MODULE_PARM_DESC(usb_xfer_mode,
"USB transfer mode for frame data (-1 = auto, 0 = prefer isoc, 1 = prefer bulk)");
-
/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
static DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS);
@@ -190,8 +189,8 @@ static struct em28xx_reg_seq kworld_a340_digital[] = {
};
static struct em28xx_reg_seq kworld_ub435q_v3_digital[] = {
- {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
- {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100},
{ -1, -1, -1, -1},
@@ -301,7 +300,6 @@ static struct em28xx_reg_seq dikom_dk300_digital[] = {
{ -1, -1, -1, -1},
};
-
/* Reset for the most [digital] boards */
static struct em28xx_reg_seq leadership_digital[] = {
{EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10},
@@ -479,6 +477,20 @@ static struct em28xx_reg_seq pctv_292e[] = {
{-1, -1, -1, -1},
};
+static struct em28xx_reg_seq terratec_t2_stick_hd[] = {
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0},
+ {0x0d, 0xff, 0xff, 600},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 10},
+ {EM2874_R80_GPIO_P0_CTRL, 0xbc, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 300},
+ {EM2874_R80_GPIO_P0_CTRL, 0xf8, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 300},
+ {0x0d, 0x42, 0xff, 1000},
+ {EM2874_R5F_TS_ENABLE, 0x85, 0xff, 0},
+ {-1, -1, -1, -1},
+};
+
/*
* Button definitions
*/
@@ -548,7 +560,6 @@ static struct em28xx_led pctv_80e_leds[] = {
{-1, 0, 0, 0},
};
-
/*
* Board definitions
*/
@@ -1514,7 +1525,7 @@ struct em28xx_board em28xx_boards[] = {
.type = EM28XX_VMUX_TELEVISION,
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
- .aout = EM28XX_AOUT_MONO | /* I2S */
+ .aout = EM28XX_AOUT_MONO | /* I2S */
EM28XX_AOUT_MASTER, /* Line out pin */
}, {
.type = EM28XX_VMUX_COMPOSITE1,
@@ -1536,7 +1547,7 @@ struct em28xx_board em28xx_boards[] = {
.type = EM28XX_VMUX_TELEVISION,
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
- .aout = EM28XX_AOUT_MONO | /* I2S */
+ .aout = EM28XX_AOUT_MONO | /* I2S */
EM28XX_AOUT_MASTER, /* Line out pin */
}, {
.type = EM28XX_VMUX_COMPOSITE1,
@@ -2243,6 +2254,31 @@ struct em28xx_board em28xx_boards[] = {
.has_dvb = 1,
.ir_codes = RC_MAP_PINNACLE_PCTV_HD,
},
+ [EM2861_BOARD_LEADTEK_VC100] = {
+ .name = "Leadtek VC100",
+ .tuner_type = TUNER_ABSENT, /* Capture only device */
+ .decoder = EM28XX_TVP5150,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
+ /* eb1a:8179 Terratec Cinergy T2 Stick HD.
+ * Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2146 */
+ [EM28178_BOARD_TERRATEC_T2_STICK_HD] = {
+ .name = "Terratec Cinergy T2 Stick HD",
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = terratec_t2_stick_hd,
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_TERRATEC_SLIM_2,
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2424,6 +2460,10 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28178_BOARD_PCTV_461E },
{ USB_DEVICE(0x2013, 0x025f),
.driver_info = EM28178_BOARD_PCTV_292E },
+ { USB_DEVICE(0x0413, 0x6f07),
+ .driver_info = EM2861_BOARD_LEADTEK_VC100 },
+ { USB_DEVICE(0xeb1a, 0x8179),
+ .driver_info = EM28178_BOARD_TERRATEC_T2_STICK_HD },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2453,6 +2493,7 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
{0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF},
{0x6b800080, EM2874_BOARD_LEADERSHIP_ISDBT, TUNER_ABSENT},
};
+
/* NOTE: introduce a separate hash table for devices with 16 bit eeproms */
int em28xx_tuner_callback(void *ptr, int component, int command, int arg)
@@ -2695,7 +2736,7 @@ static int em28xx_hint_board(struct em28xx *dev)
" insmod option:\n");
for (i = 0; i < em28xx_bcount; i++) {
em28xx_errdev(" card=%d -> %s\n",
- i, em28xx_boards[i].name);
+ i, em28xx_boards[i].name);
}
return -1;
}
@@ -3051,6 +3092,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
if (le16_to_cpu(dev->udev->descriptor.idVendor)
== 0xeb1a) {
__le16 idProd = dev->udev->descriptor.idProduct;
+
if (le16_to_cpu(idProd) == 0x2710)
chip_name = "em2710";
else if (le16_to_cpu(idProd) == 0x2820)
@@ -3139,7 +3181,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM28XX);
if (retval < 0) {
em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n",
- __func__, retval);
+ __func__, retval);
return retval;
}
@@ -3147,13 +3189,13 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
if (dev->def_i2c_bus) {
if (dev->is_em25xx)
retval = em28xx_i2c_register(dev, 1,
- EM28XX_I2C_ALGO_EM25XX_BUS_B);
+ EM28XX_I2C_ALGO_EM25XX_BUS_B);
else
retval = em28xx_i2c_register(dev, 1,
- EM28XX_I2C_ALGO_EM28XX);
+ EM28XX_I2C_ALGO_EM28XX);
if (retval < 0) {
em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n",
- __func__, retval);
+ __func__, retval);
em28xx_i2c_unregister(dev, 0);
@@ -3193,7 +3235,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (nr >= EM28XX_MAXBOARDS) {
/* No free device slots */
printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
- EM28XX_MAXBOARDS);
+ EM28XX_MAXBOARDS);
retval = -ENOMEM;
goto err_no_slot;
}
@@ -3377,6 +3419,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* Checks if audio is provided by a USB Audio Class interface */
for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
struct usb_interface *uif = udev->config->interface[i];
+
if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
if (has_vendor_audio)
em28xx_err("em28xx: device seems to have vendor AND usb audio class interfaces !\n"
@@ -3487,7 +3530,7 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
}
static int em28xx_usb_suspend(struct usb_interface *interface,
- pm_message_t message)
+ pm_message_t message)
{
struct em28xx *dev;
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 901cf2b952d7..86461a708abe 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -75,7 +75,7 @@ MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
* reads data from the usb device specifying bRequest
*/
int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
- char *buf, int len)
+ char *buf, int len)
{
int ret;
int pipe = usb_rcvctrlpipe(dev->udev, 0);
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(em28xx_read_reg);
* sends data to the usb device, specifying bRequest
*/
int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
- int len)
+ int len)
{
int ret;
int pipe = usb_sndctrlpipe(dev->udev, 0);
@@ -213,7 +213,7 @@ EXPORT_SYMBOL_GPL(em28xx_write_reg);
* the actual value
*/
int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
- u8 bitmask)
+ u8 bitmask)
{
int oldval;
u8 newval;
@@ -222,7 +222,7 @@ int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
if (oldval < 0)
return oldval;
- newval = (((u8) oldval) & ~bitmask) | (val & bitmask);
+ newval = (((u8)oldval) & ~bitmask) | (val & bitmask);
return em28xx_write_regs(dev, reg, &newval, 1);
}
@@ -314,7 +314,7 @@ int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val)
if (ret < 0)
return ret;
- ret = em28xx_write_regs(dev, EM28XX_R40_AC97LSB, (u8 *) &value, 2);
+ ret = em28xx_write_regs(dev, EM28XX_R40_AC97LSB, (u8 *)&value, 2);
if (ret < 0)
return ret;
@@ -361,7 +361,7 @@ static int set_ac97_input(struct em28xx *dev)
if (ret < 0)
em28xx_warn("couldn't setup AC97 register %d\n",
- inputs[i].reg);
+ inputs[i].reg);
}
return 0;
}
@@ -445,7 +445,7 @@ int em28xx_audio_analog_set(struct em28xx *dev)
ret = em28xx_write_ac97(dev, outputs[i].reg, 0x8000);
if (ret < 0)
em28xx_warn("couldn't setup AC97 register %d\n",
- outputs[i].reg);
+ outputs[i].reg);
}
}
@@ -483,7 +483,7 @@ int em28xx_audio_analog_set(struct em28xx *dev)
vol);
if (ret < 0)
em28xx_warn("couldn't setup AC97 register %d\n",
- outputs[i].reg);
+ outputs[i].reg);
}
if (dev->ctl_aoutput & EM28XX_AOUT_PCM_IN) {
@@ -531,7 +531,7 @@ int em28xx_audio_setup(struct em28xx *dev)
} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
dev->int_audio_type = EM28XX_INT_AUDIO_I2S;
if (dev->chip_id < CHIP_ID_EM2860 &&
- (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
+ (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
EM2820_CHIPCFG_I2S_1_SAMPRATE)
i2s_samplerates = 1;
else if (dev->chip_id >= CHIP_ID_EM2860 &&
@@ -541,7 +541,7 @@ int em28xx_audio_setup(struct em28xx *dev)
else
i2s_samplerates = 3;
em28xx_info("I2S Audio (%d sample rate(s))\n",
- i2s_samplerates);
+ i2s_samplerates);
/* Skip the code that does AC97 vendor detection */
dev->audio_mode.ac97 = EM28XX_NO_AC97;
goto init_audio;
@@ -614,8 +614,9 @@ const struct em28xx_led *em28xx_find_led(struct em28xx *dev,
{
if (dev->board.leds) {
u8 k = 0;
+
while (dev->board.leds[k].role >= 0 &&
- dev->board.leds[k].role < EM28XX_NUM_LED_ROLES) {
+ dev->board.leds[k].role < EM28XX_NUM_LED_ROLES) {
if (dev->board.leds[k].role == role)
return &dev->board.leds[k];
k++;
@@ -658,10 +659,12 @@ int em28xx_capture_start(struct em28xx *dev, int start)
if (dev->mode == EM28XX_ANALOG_MODE)
rc = em28xx_write_reg(dev,
- EM28XX_R12_VINENABLE, 0x67);
+ EM28XX_R12_VINENABLE,
+ 0x67);
else
rc = em28xx_write_reg(dev,
- EM28XX_R12_VINENABLE, 0x37);
+ EM28XX_R12_VINENABLE,
+ 0x37);
if (rc < 0)
return rc;
@@ -815,9 +818,9 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
if (usb_bufs->transfer_buffer[i]) {
usb_free_coherent(dev->udev,
- urb->transfer_buffer_length,
- usb_bufs->transfer_buffer[i],
- urb->transfer_dma);
+ urb->transfer_buffer_length,
+ usb_bufs->transfer_buffer[i],
+ urb->transfer_dma);
}
usb_free_urb(urb);
usb_bufs->urb[i] = NULL;
@@ -889,7 +892,7 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
if ((xfer_bulk && !dev->analog_ep_bulk) ||
(!xfer_bulk && !dev->analog_ep_isoc)) {
em28xx_errdev("no endpoint for analog mode and transfer type %d\n",
- xfer_bulk > 0);
+ xfer_bulk > 0);
return -EINVAL;
}
usb_bufs = &dev->usb_ctl.analog_bufs;
@@ -988,9 +991,9 @@ EXPORT_SYMBOL_GPL(em28xx_alloc_urbs);
* Allocate URBs and start IRQ
*/
int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
- int xfer_bulk, int num_bufs, int max_pkt_size,
+ int xfer_bulk, int num_bufs, int max_pkt_size,
int packet_multiplier,
- int (*urb_data_copy) (struct em28xx *dev, struct urb *urb))
+ int (*urb_data_copy)(struct em28xx *dev, struct urb *urb))
{
struct em28xx_dmaqueue *dma_q = &dev->vidq;
struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 9682c52d67d1..9877b699c6bc 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -63,7 +63,6 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface");
MODULE_VERSION(EM28XX_VERSION);
-
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
@@ -71,7 +70,7 @@ MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define dprintk(level, fmt, arg...) do { \
-if (debug >= level) \
+if (debug >= level) \
printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \
} while (0)
@@ -99,9 +98,8 @@ struct em28xx_dvb {
struct i2c_client *i2c_client_tuner;
};
-
static inline void print_err_status(struct em28xx *dev,
- int packet, int status)
+ int packet, int status)
{
char *errmsg = "Unknown";
@@ -169,7 +167,7 @@ static inline int em28xx_dvb_urb_data_copy(struct em28xx *dev, struct urb *urb)
if (!urb->actual_length)
continue;
dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
- urb->actual_length);
+ urb->actual_length);
} else {
if (urb->iso_frame_desc[i].status < 0) {
print_err_status(dev, i,
@@ -280,7 +278,6 @@ static int em28xx_stop_feed(struct dvb_demux_feed *feed)
}
-
/* ------------------------------------------------------------------ */
static int em28xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
{
@@ -740,7 +737,7 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
return ret;
#else
dev_warn(&dev->udev->dev, "%s: LNA control is disabled (lna=%u)\n",
- KBUILD_MODNAME, c->lna);
+ KBUILD_MODNAME, c->lna);
return 0;
#endif
}
@@ -830,6 +827,7 @@ static struct zl10353_config em28xx_zl10353_no_i2c_gate_dev = {
.no_tuner = 1,
.parallel_ts = 1,
};
+
static struct qt1010_config em28xx_qt1010_config = {
.i2c_address = 0x62
};
@@ -861,7 +859,6 @@ static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
.agc = 0x99,
};
-
static struct tda18271_std_map drx_j_std_map = {
.atsc_6 = { .if_freq = 5000, .agc_mode = 3, .std = 0, .if_lvl = 1,
.rfagc_top = 0x37, },
@@ -948,7 +945,7 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
result = dvb_register_frontend(&dvb->adapter, dvb->fe[1]);
if (result < 0) {
printk(KERN_WARNING "%s: 2nd dvb_register_frontend failed (errno = %d)\n",
- dev->name, result);
+ dev->name, result);
goto fail_frontend1;
}
}
@@ -1047,7 +1044,7 @@ static void em28xx_unregister_dvb(struct em28xx_dvb *dvb)
static int em28xx_dvb_init(struct em28xx *dev)
{
- int result = 0, mfe_shared = 0;
+ int result = 0;
struct em28xx_dvb *dvb;
if (dev->is_audio_only) {
@@ -1182,7 +1179,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
&dev->i2c_adap[dev->def_i2c_bus]);
if (dvb->fe[0] != NULL) {
if (!dvb_attach(simple_tuner_attach, dvb->fe[0],
- &dev->i2c_adap[dev->def_i2c_bus], 0x61, TUNER_THOMSON_DTT761X)) {
+ &dev->i2c_adap[dev->def_i2c_bus],
+ 0x61, TUNER_THOMSON_DTT761X)) {
result = -EINVAL;
goto out_free;
}
@@ -1204,7 +1202,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
&dev->i2c_adap[dev->def_i2c_bus], 0x48);
if (dvb->fe[0]) {
if (!dvb_attach(simple_tuner_attach, dvb->fe[0],
- &dev->i2c_adap[dev->def_i2c_bus], 0x60, TUNER_PHILIPS_CU1216L)) {
+ &dev->i2c_adap[dev->def_i2c_bus],
+ 0x60, TUNER_PHILIPS_CU1216L)) {
result = -EINVAL;
goto out_free;
}
@@ -1219,7 +1218,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
- &dev->i2c_adap[dev->def_i2c_bus],
+ &dev->i2c_adap[dev->def_i2c_bus],
&kworld_a340_config)) {
dvb_frontend_detach(dvb->fe[0]);
result = -EINVAL;
@@ -1250,10 +1249,10 @@ static int em28xx_dvb_init(struct em28xx *dev)
#ifdef CONFIG_GPIOLIB
/* enable LNA for DVB-T, DVB-T2 and DVB-C */
result = gpio_request_one(dvb->lna_gpio,
- GPIOF_OUT_INIT_LOW, NULL);
+ GPIOF_OUT_INIT_LOW, NULL);
if (result)
em28xx_errdev("gpio request failed %d\n",
- result);
+ result);
else
gpio_free(dvb->lna_gpio);
@@ -1266,6 +1265,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C:
{
struct xc5000_config cfg;
+
hauppauge_hvr930c_init(dev);
dvb->fe[0] = dvb_attach(drxk_attach,
@@ -1339,7 +1339,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
/* attach SEC */
if (dvb->fe[0])
dvb_attach(a8293_attach, dvb->fe[0], &dev->i2c_adap[dev->def_i2c_bus],
- &em28xx_a8293_config);
+ &em28xx_a8293_config);
break;
case EM2874_BOARD_DELOCK_61959:
case EM2874_BOARD_MAXMEDIA_UB425_TC:
@@ -1553,6 +1553,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
struct si2157_config si2157_config;
/* attach demod */
+ memset(&si2168_config, 0, sizeof(si2168_config));
si2168_config.i2c_adapter = &adapter;
si2168_config.fe = &dvb->fe[0];
si2168_config.ts_mode = SI2168_TS_PARALLEL;
@@ -1603,6 +1604,65 @@ static int em28xx_dvb_init(struct em28xx *dev)
dvb->fe[0]->ops.set_lna = em28xx_pctv_292e_set_lna;
}
break;
+ case EM28178_BOARD_TERRATEC_T2_STICK_HD:
+ {
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ struct si2168_config si2168_config;
+ struct si2157_config si2157_config;
+
+ /* attach demod */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &dvb->fe[0];
+ si2168_config.ts_mode = SI2168_TS_PARALLEL;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+ request_module(info.type);
+ client = i2c_new_device(&dev->i2c_adap[dev->def_i2c_bus], &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_demod = client;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = dvb->fe[0];
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2146", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module("si2157");
+ client = i2c_new_device(adapter, &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_tuner = client;
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
@@ -1624,9 +1684,6 @@ static int em28xx_dvb_init(struct em28xx *dev)
if (result < 0)
goto out_free;
- /* MFE lock */
- dvb->adapter.mfe_shared = mfe_shared;
-
em28xx_info("DVB extension successfully initialized\n");
kref_get(&dev->ref);
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 1048c1a23fb6..a19b5c8b56ff 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -593,6 +593,7 @@ static inline unsigned long em28xx_hash_mem(char *buf, int length, int bits)
unsigned long l = 0;
int len = 0;
unsigned char c;
+
do {
if (len == length) {
c = (char)len;
@@ -877,6 +878,7 @@ static struct i2c_client em28xx_client_template = {
* incomplete list of known devices
*/
static char *i2c_devs[128] = {
+ [0x1c >> 1] = "lgdt330x",
[0x3e >> 1] = "remote IR sensor",
[0x4a >> 1] = "saa7113h",
[0x52 >> 1] = "drxk",
@@ -949,7 +951,7 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
retval = i2c_add_adapter(&dev->i2c_adap[bus]);
if (retval < 0) {
em28xx_errdev("%s: i2c_add_adapter failed! retval [%d]\n",
- __func__, retval);
+ __func__, retval);
return retval;
}
@@ -961,7 +963,7 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
retval = em28xx_i2c_eeprom(dev, bus, &dev->eedata, &dev->eedata_len);
if ((retval < 0) && (retval != -ENODEV)) {
em28xx_errdev("%s: em28xx_i2_eeprom failed! retval [%d]\n",
- __func__, retval);
+ __func__, retval);
return retval;
}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 23f8f6afa2e0..d8dc03aadfbd 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -459,7 +459,7 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
return em2874_ir_change_protocol(rc_dev, rc_type);
default:
printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
- dev->chip_id);
+ dev->chip_id);
return -EINVAL;
}
}
@@ -505,7 +505,7 @@ static void em28xx_query_buttons(struct work_struct *work)
/* Check states of the buttons and act */
j = 0;
while (dev->board.buttons[j].role >= 0 &&
- dev->board.buttons[j].role < EM28XX_NUM_BUTTON_ROLES) {
+ dev->board.buttons[j].role < EM28XX_NUM_BUTTON_ROLES) {
struct em28xx_button *button = &dev->board.buttons[j];
/* Check if button uses the current address */
if (button->reg_r != dev->button_polling_addresses[i]) {
@@ -607,7 +607,7 @@ static void em28xx_init_buttons(struct em28xx *dev)
dev->button_polling_interval = EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL;
while (dev->board.buttons[i].role >= 0 &&
- dev->board.buttons[i].role < EM28XX_NUM_BUTTON_ROLES) {
+ dev->board.buttons[i].role < EM28XX_NUM_BUTTON_ROLES) {
struct em28xx_button *button = &dev->board.buttons[i];
/* Check if polling address is already on the list */
addr_new = true;
@@ -653,11 +653,11 @@ next_button:
/* Start polling */
if (dev->num_button_polling_addresses) {
memset(dev->button_polling_last_values, 0,
- EM28XX_NUM_BUTTON_ADDRESSES_MAX);
+ EM28XX_NUM_BUTTON_ADDRESSES_MAX);
INIT_DELAYED_WORK(&dev->buttons_query_work,
- em28xx_query_buttons);
+ em28xx_query_buttons);
schedule_delayed_work(&dev->buttons_query_work,
- msecs_to_jiffies(dev->button_polling_interval));
+ msecs_to_jiffies(dev->button_polling_interval));
}
}
@@ -841,8 +841,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
if (!ir)
goto ref_put;
- if (ir->rc)
- rc_unregister_device(ir->rc);
+ rc_unregister_device(ir->rc);
kfree(ir->i2c_client);
@@ -887,7 +886,7 @@ static int em28xx_ir_resume(struct em28xx *dev)
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
if (dev->num_button_polling_addresses)
schedule_delayed_work(&dev->buttons_query_work,
- msecs_to_jiffies(dev->button_polling_interval));
+ msecs_to_jiffies(dev->button_polling_interval));
return 0;
}
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 311fb349dafa..13cbb7f3ea10 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -49,7 +49,6 @@
#define EM28XX_CHIPCFG2_TS_PACKETSIZE_564 0x02
#define EM28XX_CHIPCFG2_TS_PACKETSIZE_752 0x03
-
/* GPIO/GPO registers */
#define EM2880_R04_GPO 0x04 /* em2880-em2883 only */
#define EM2820_R08_GPIO_CTRL 0x08 /* em2820-em2873/83 only */
@@ -68,7 +67,6 @@
#define EM28XX_I2C_FREQ_400_KHZ 0x01
#define EM28XX_I2C_FREQ_100_KHZ 0x00
-
#define EM28XX_R0A_CHIPID 0x0a
#define EM28XX_R0C_USBSUSP 0x0c
#define EM28XX_R0C_USBSUSP_SNAPSHOT 0x20 /* 1=button pressed, needs reset */
@@ -157,7 +155,6 @@
#define EM28XX_OUTFMT_YUV422_Y1UY0V 0x15
#define EM28XX_OUTFMT_YUV411 0x18
-
#define EM28XX_R28_XMIN 0x28
#define EM28XX_R29_XMAX 0x29
#define EM28XX_R2A_YMIN 0x2a
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
index 432862c20bbf..8dfcb56bf4b3 100644
--- a/drivers/media/usb/em28xx/em28xx-v4l.h
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -14,7 +14,6 @@
GNU General Public License for more details.
*/
-
int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
void em28xx_stop_vbi_streaming(struct vb2_queue *vq);
extern struct vb2_ops em28xx_vbi_qops;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 34ee1e03a732..744e7ed743e1 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -92,7 +92,6 @@ vbi_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-
struct vb2_ops em28xx_vbi_qops = {
.queue_setup = vbi_queue_setup,
.buf_prepare = vbi_buffer_prepare,
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 03d5ece0319c..cf7f58b76292 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -81,7 +81,6 @@ MODULE_DESCRIPTION(DRIVER_DESC " - v4l2 interface");
MODULE_LICENSE("GPL");
MODULE_VERSION(EM28XX_VERSION);
-
#define EM25XX_FRMDATAHDR_BYTE1 0x02
#define EM25XX_FRMDATAHDR_BYTE2_STILL_IMAGE 0x20
#define EM25XX_FRMDATAHDR_BYTE2_FRAME_END 0x02
@@ -90,7 +89,6 @@ MODULE_VERSION(EM28XX_VERSION);
EM25XX_FRMDATAHDR_BYTE2_FRAME_END | \
EM25XX_FRMDATAHDR_BYTE2_FRAME_ID)
-
static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
@@ -194,9 +192,10 @@ static int em28xx_vbi_supported(struct em28xx *dev)
static void em28xx_wake_i2c(struct em28xx *dev)
{
struct v4l2_device *v4l2_dev = &dev->v4l2->v4l2_dev;
+
v4l2_device_call_all(v4l2_dev, 0, core, reset, 0);
v4l2_device_call_all(v4l2_dev, 0, video, s_routing,
- INPUT(dev->ctl_input)->vmux, 0, 0);
+ INPUT(dev->ctl_input)->vmux, 0, 0);
v4l2_device_call_all(v4l2_dev, 0, video, s_stream, 0);
}
@@ -275,7 +274,7 @@ static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
}
static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
- u16 width, u16 height)
+ u16 width, u16 height)
{
u8 cwidth = width >> 2;
u8 cheight = height >> 2;
@@ -283,7 +282,7 @@ static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
/* NOTE: size limit: 2047x1023 = 2MPix */
em28xx_videodbg("capture area set to (%d,%d): %dx%d\n",
- hstart, vstart,
+ hstart, vstart,
((overflow & 2) << 9 | cwidth << 2),
((overflow & 1) << 10 | cheight << 2));
@@ -406,13 +405,13 @@ set_alt:
dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
} else { /* isoc */
em28xx_videodbg("minimum isoc packet size: %u (alt=%d)\n",
- min_pkt_size, dev->alt);
+ min_pkt_size, dev->alt);
dev->max_pkt_size =
dev->alt_max_pkt_size_isoc[dev->alt];
dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
}
em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n",
- dev->alt, dev->max_pkt_size);
+ dev->alt, dev->max_pkt_size);
errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt);
if (errCode < 0) {
em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
@@ -482,7 +481,7 @@ static void em28xx_copy_video(struct em28xx *dev,
if ((char *)startwrite + lencopy > (char *)buf->vb_buf + buf->length) {
em28xx_isocdbg("Overflow of %zu bytes past buffer end (1)\n",
- ((char *)startwrite + lencopy) -
+ ((char *)startwrite + lencopy) -
((char *)buf->vb_buf + buf->length));
remain = (char *)buf->vb_buf + buf->length -
(char *)startwrite;
@@ -548,7 +547,7 @@ static void em28xx_copy_vbi(struct em28xx *dev,
}
static inline void print_err_status(struct em28xx *dev,
- int packet, int status)
+ int packet, int status)
{
char *errmsg = "Unknown";
@@ -831,7 +830,6 @@ static inline int em28xx_urb_data_copy(struct em28xx *dev, struct urb *urb)
return 1;
}
-
static int get_ressource(enum v4l2_buf_type f_type)
{
switch (f_type) {
@@ -1003,6 +1001,7 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
}
while (!list_empty(&vidq->active)) {
struct em28xx_buffer *buf;
+
buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
@@ -1033,6 +1032,7 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
}
while (!list_empty(&vbiq->active)) {
struct em28xx_buffer *buf;
+
buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
@@ -1109,6 +1109,7 @@ static int em28xx_vb2_setup(struct em28xx *dev)
static void video_mux(struct em28xx *dev, int index)
{
struct v4l2_device *v4l2_dev = &dev->v4l2->v4l2_dev;
+
dev->ctl_input = index;
dev->ctl_ainput = INPUT(index)->amux;
dev->ctl_aoutput = INPUT(index)->aout;
@@ -1117,21 +1118,22 @@ static void video_mux(struct em28xx *dev, int index)
dev->ctl_aoutput = EM28XX_AOUT_MASTER;
v4l2_device_call_all(v4l2_dev, 0, video, s_routing,
- INPUT(index)->vmux, 0, 0);
+ INPUT(index)->vmux, 0, 0);
if (dev->board.has_msp34xx) {
if (dev->i2s_speed) {
v4l2_device_call_all(v4l2_dev, 0, audio,
- s_i2s_clock_freq, dev->i2s_speed);
+ s_i2s_clock_freq, dev->i2s_speed);
}
/* Note: this is msp3400 specific */
v4l2_device_call_all(v4l2_dev, 0, audio, s_routing,
- dev->ctl_ainput, MSP_OUTPUT(MSP_SC_IN_DSP_SCART1), 0);
+ dev->ctl_ainput,
+ MSP_OUTPUT(MSP_SC_IN_DSP_SCART1), 0);
}
if (dev->board.adecoder != EM28XX_NOADECODER) {
v4l2_device_call_all(v4l2_dev, 0, audio, s_routing,
- dev->ctl_ainput, dev->ctl_aoutput, 0);
+ dev->ctl_ainput, dev->ctl_aoutput, 0);
}
em28xx_audio_analog_set(dev);
@@ -1203,7 +1205,7 @@ static const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
};
static void size_to_scale(struct em28xx *dev,
- unsigned int width, unsigned int height,
+ unsigned int width, unsigned int height,
unsigned int *hscale, unsigned int *vscale)
{
unsigned int maxw = norm_maxw(dev);
@@ -1234,7 +1236,7 @@ static void scale_to_size(struct em28xx *dev,
------------------------------------------------------------------*/
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct em28xx *dev = video_drvdata(file);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
@@ -1267,7 +1269,7 @@ static struct em28xx_fmt *format_by_fourcc(unsigned int fourcc)
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct em28xx *dev = video_drvdata(file);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
@@ -1338,7 +1340,7 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
/* set new image size */
size_to_scale(dev, v4l2->width, v4l2->height,
- &v4l2->hscale, &v4l2->vscale);
+ &v4l2->hscale, &v4l2->vscale);
em28xx_resolution_set(dev);
@@ -1346,7 +1348,7 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
struct em28xx *dev = video_drvdata(file);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
@@ -1401,7 +1403,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
v4l2->width = f.fmt.pix.width;
v4l2->height = f.fmt.pix.height;
size_to_scale(dev, v4l2->width, v4l2->height,
- &v4l2->hscale, &v4l2->vscale);
+ &v4l2->hscale, &v4l2->vscale);
em28xx_resolution_set(dev);
v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_std, v4l2->norm);
@@ -1422,7 +1424,7 @@ static int vidioc_g_parm(struct file *file, void *priv,
video, g_parm, p);
else
v4l2_video_std_frame_period(v4l2->norm,
- &p->parm.capture.timeperframe);
+ &p->parm.capture.timeperframe);
return rc;
}
@@ -1450,7 +1452,7 @@ static const char *iname[] = {
};
static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
+ struct v4l2_input *i)
{
struct em28xx *dev = video_drvdata(file);
unsigned int n;
@@ -1467,7 +1469,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
strcpy(i->name, iname[INPUT(n)->type]);
if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) ||
- (EM28XX_VMUX_CABLE == INPUT(n)->type))
+ (EM28XX_VMUX_CABLE == INPUT(n)->type))
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = dev->v4l2->vdev->tvnorms;
@@ -1558,7 +1560,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
}
static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *t)
+ struct v4l2_tuner *t)
{
struct em28xx *dev = video_drvdata(file);
@@ -1572,7 +1574,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
}
static int vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *t)
+ const struct v4l2_tuner *t)
{
struct em28xx *dev = video_drvdata(file);
@@ -1584,7 +1586,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
}
static int vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
+ struct v4l2_frequency *f)
{
struct em28xx *dev = video_drvdata(file);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
@@ -1597,7 +1599,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
}
static int vidioc_s_frequency(struct file *file, void *priv,
- const struct v4l2_frequency *f)
+ const struct v4l2_frequency *f)
{
struct v4l2_frequency new_freq = *f;
struct em28xx *dev = video_drvdata(file);
@@ -1615,7 +1617,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vidioc_g_chip_info(struct file *file, void *priv,
- struct v4l2_dbg_chip_info *chip)
+ struct v4l2_dbg_chip_info *chip)
{
struct em28xx *dev = video_drvdata(file);
@@ -1670,6 +1672,7 @@ static int vidioc_g_register(struct file *file, void *priv,
reg->val = ret;
} else {
__le16 val = 0;
+
ret = dev->em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
reg->reg, (char *)&val, 2);
if (ret < 0)
@@ -1700,9 +1703,8 @@ static int vidioc_s_register(struct file *file, void *priv,
}
#endif
-
static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
+ struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
struct em28xx *dev = video_drvdata(file);
@@ -1736,7 +1738,7 @@ static int vidioc_querycap(struct file *file, void *priv,
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+ struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(format)))
return -EINVAL;
@@ -2177,9 +2179,10 @@ static unsigned short msp3400_addrs[] = {
/******************************** usb interface ******************************/
-static struct video_device *em28xx_vdev_init(struct em28xx *dev,
- const struct video_device *template,
- const char *type_name)
+static struct video_device
+*em28xx_vdev_init(struct em28xx *dev,
+ const struct video_device *template,
+ const char *type_name)
{
struct video_device *vfd;
@@ -2344,21 +2347,24 @@ static int em28xx_v4l2_init(struct em28xx *dev)
if (dev->board.radio.type)
v4l2_i2c_new_subdev(&v4l2->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus],
- "tuner", dev->board.radio_addr, NULL);
+ &dev->i2c_adap[dev->def_i2c_bus],
+ "tuner", dev->board.radio_addr,
+ NULL);
if (has_demod)
v4l2_i2c_new_subdev(&v4l2->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus], "tuner",
- 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
+ &dev->i2c_adap[dev->def_i2c_bus],
+ "tuner", 0,
+ v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (tuner_addr == 0) {
enum v4l2_i2c_tuner_type type =
has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&v4l2->v4l2_dev,
- &dev->i2c_adap[dev->def_i2c_bus], "tuner",
- 0, v4l2_i2c_tuner_addrs(type));
+ &dev->i2c_adap[dev->def_i2c_bus],
+ "tuner", 0,
+ v4l2_i2c_tuner_addrs(type));
if (sd)
tuner_addr = v4l2_i2c_subdev_addr(sd);
@@ -2378,20 +2384,20 @@ static int em28xx_v4l2_init(struct em28xx *dev)
ret = em28xx_audio_setup(dev);
if (ret < 0) {
em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
- __func__, ret);
+ __func__, ret);
goto unregister_dev;
}
if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
- V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
- V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
+ V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
} else {
/* install the em28xx notify callback */
v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
- em28xx_ctrl_notify, dev);
+ em28xx_ctrl_notify, dev);
v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
- em28xx_ctrl_notify, dev);
+ em28xx_ctrl_notify, dev);
}
/* wake i2c devices */
@@ -2518,7 +2524,7 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* register v4l2 video video_device */
ret = video_register_device(v4l2->vdev, VFL_TYPE_GRABBER,
- video_nr[dev->devno]);
+ video_nr[dev->devno]);
if (ret) {
em28xx_errdev("unable to register video device (error=%i).\n",
ret);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index a21a7463b557..9c7075344109 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -141,6 +141,8 @@
#define EM28178_BOARD_PCTV_461E 92
#define EM2874_BOARD_KWORLD_UB435Q_V3 93
#define EM28178_BOARD_PCTV_292E 94
+#define EM2861_BOARD_LEADTEK_VC100 95
+#define EM28178_BOARD_TERRATEC_T2_STICK_HD 96
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -215,7 +217,6 @@ enum em28xx_mode {
EM28XX_DIGITAL_MODE,
};
-
struct em28xx;
struct em28xx_usb_bufs {
@@ -243,11 +244,11 @@ struct em28xx_usb_ctl {
struct em28xx_usb_bufs digital_bufs;
/* Stores already requested buffers */
- struct em28xx_buffer *vid_buf;
- struct em28xx_buffer *vbi_buf;
+ struct em28xx_buffer *vid_buf;
+ struct em28xx_buffer *vbi_buf;
/* copy data from URB */
- int (*urb_data_copy) (struct em28xx *dev, struct urb *urb);
+ int (*urb_data_copy)(struct em28xx *dev, struct urb *urb);
};
@@ -695,14 +696,14 @@ struct em28xx {
char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */
/* helper funcs that call usb_control_msg */
- int (*em28xx_write_regs) (struct em28xx *dev, u16 reg,
- char *buf, int len);
- int (*em28xx_read_reg) (struct em28xx *dev, u16 reg);
- int (*em28xx_read_reg_req_len) (struct em28xx *dev, u8 req, u16 reg,
- char *buf, int len);
- int (*em28xx_write_regs_req) (struct em28xx *dev, u8 req, u16 reg,
- char *buf, int len);
- int (*em28xx_read_reg_req) (struct em28xx *dev, u8 req, u16 reg);
+ int (*em28xx_write_regs)(struct em28xx *dev, u16 reg,
+ char *buf, int len);
+ int (*em28xx_read_reg)(struct em28xx *dev, u16 reg);
+ int (*em28xx_read_reg_req_len)(struct em28xx *dev, u8 req, u16 reg,
+ char *buf, int len);
+ int (*em28xx_write_regs_req)(struct em28xx *dev, u8 req, u16 reg,
+ char *buf, int len);
+ int (*em28xx_read_reg_req)(struct em28xx *dev, u8 req, u16 reg);
enum em28xx_mode mode;
@@ -745,7 +746,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len);
int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val);
int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
- u8 bitmask);
+ u8 bitmask);
int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask);
int em28xx_read_ac97(struct em28xx *dev, u8 reg);
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index ec799b4d88be..d6bf982efa42 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -252,7 +252,7 @@ static int set_capture_size(struct go7007 *go, struct v4l2_format *fmt, int try)
if (go->board_info->sensor_flags & GO7007_SENSOR_SCALING) {
struct v4l2_mbus_framefmt mbus_fmt;
- mbus_fmt.code = V4L2_MBUS_FMT_FIXED;
+ mbus_fmt.code = MEDIA_BUS_FMT_FIXED;
mbus_fmt.width = fmt ? fmt->fmt.pix.width : width;
mbus_fmt.height = height;
go->encoder_h_halve = 0;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index 7c19ff72e6b3..c8761c71c9d2 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -80,7 +80,7 @@ static void pvr2_context_set_notify(struct pvr2_context *mp, int fl)
static void pvr2_context_destroy(struct pvr2_context *mp)
{
pvr2_trace(PVR2_TRACE_CTXT,"pvr2_context %p (destroy)",mp);
- if (mp->hdw) pvr2_hdw_destroy(mp->hdw);
+ pvr2_hdw_destroy(mp->hdw);
pvr2_context_set_notify(mp, 0);
mutex_lock(&pvr2_context_mutex);
if (mp->exist_next) {
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 9623b6218214..2fd9b5e0e2a9 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2966,7 +2966,7 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
memset(&fmt, 0, sizeof(fmt));
fmt.width = hdw->res_hor_val;
fmt.height = hdw->res_ver_val;
- fmt.code = V4L2_MBUS_FMT_FIXED;
+ fmt.code = MEDIA_BUS_FMT_FIXED;
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_size(%dx%d)",
fmt.width, fmt.height);
v4l2_device_call_all(&hdw->v4l2_dev, 0, video, s_mbus_fmt, &fmt);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 1c0dbf428a3a..de55e96fed15 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -558,27 +558,30 @@ static void s2255_fwchunk_complete(struct urb *urb)
}
-static int s2255_got_frame(struct s2255_vc *vc, int jpgsize)
+static void s2255_got_frame(struct s2255_vc *vc, int jpgsize)
{
struct s2255_buffer *buf;
struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
unsigned long flags = 0;
- int rc = 0;
+
spin_lock_irqsave(&vc->qlock, flags);
if (list_empty(&vc->buf_list)) {
dprintk(dev, 1, "No active queue to serve\n");
- rc = -1;
- goto unlock;
+ spin_unlock_irqrestore(&vc->qlock, flags);
+ return;
}
buf = list_entry(vc->buf_list.next,
struct s2255_buffer, list);
list_del(&buf->list);
v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.v4l2_buf.field = vc->field;
+ buf->vb.v4l2_buf.sequence = vc->frame_count;
+ spin_unlock_irqrestore(&vc->qlock, flags);
+
s2255_fillbuff(vc, buf, jpgsize);
+ /* tell v4l buffer was filled */
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf);
-unlock:
- spin_unlock_irqrestore(&vc->qlock, flags);
- return rc;
}
static const struct s2255_fmt *format_by_fourcc(int fourcc)
@@ -649,11 +652,6 @@ static void s2255_fillbuff(struct s2255_vc *vc,
}
dprintk(dev, 2, "s2255fill at : Buffer 0x%08lx size= %d\n",
(unsigned long)vbuf, pos);
- /* tell v4l buffer was filled */
- buf->vb.v4l2_buf.field = vc->field;
- buf->vb.v4l2_buf.sequence = vc->frame_count;
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
@@ -1976,8 +1974,7 @@ static int s2255_release_sys_buffers(struct s2255_vc *vc)
{
unsigned long i;
for (i = 0; i < SYS_FRAMES; i++) {
- if (vc->buffer.frame[i].lpvbits)
- vfree(vc->buffer.frame[i].lpvbits);
+ vfree(vc->buffer.frame[i].lpvbits);
vc->buffer.frame[i].lpvbits = NULL;
}
return 0;
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 233054311a62..a47629108c1b 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -475,7 +475,7 @@ static int vidioc_s_register(struct file *file, void *priv,
struct stk1160 *dev = video_drvdata(file);
/* Match host */
- return stk1160_write_reg(dev, reg->reg, cpu_to_le16(reg->val));
+ return stk1160_write_reg(dev, reg->reg, reg->val);
}
#endif
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 9bfa041e3316..693d5f409138 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -509,11 +509,12 @@ static int vidioc_querycap(struct file *file, void *priv,
usbvision_device_data[usbvision->dev_model].model_string,
sizeof(vc->card));
usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
- vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ vc->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_AUDIO |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING |
(usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
+ vc->capabilities = vc->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 7c8322d4fc63..6a4b0b8cd270 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -331,6 +331,7 @@ static int uvc_parse_format(struct uvc_device *dev,
struct uvc_format_desc *fmtdesc;
struct uvc_frame *frame;
const unsigned char *start = buffer;
+ unsigned int width_multiplier = 1;
unsigned int interval;
unsigned int i, n;
__u8 ftype;
@@ -366,6 +367,20 @@ static int uvc_parse_format(struct uvc_device *dev,
}
format->bpp = buffer[21];
+
+ /* Some devices report a format that doesn't match what they
+ * really send.
+ */
+ if (dev->quirks & UVC_QUIRK_FORCE_Y8) {
+ if (format->fcc == V4L2_PIX_FMT_YUYV) {
+ strlcpy(format->name, "Greyscale 8-bit (Y8 )",
+ sizeof(format->name));
+ format->fcc = V4L2_PIX_FMT_GREY;
+ format->bpp = 8;
+ width_multiplier = 2;
+ }
+ }
+
if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) {
ftype = UVC_VS_FRAME_UNCOMPRESSED;
} else {
@@ -474,7 +489,8 @@ static int uvc_parse_format(struct uvc_device *dev,
frame->bFrameIndex = buffer[3];
frame->bmCapabilities = buffer[4];
- frame->wWidth = get_unaligned_le16(&buffer[5]);
+ frame->wWidth = get_unaligned_le16(&buffer[5])
+ * width_multiplier;
frame->wHeight = get_unaligned_le16(&buffer[7]);
frame->dwMinBitRate = get_unaligned_le32(&buffer[9]);
frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]);
@@ -1623,12 +1639,12 @@ static void uvc_delete(struct uvc_device *dev)
{
struct list_head *p, *n;
- usb_put_intf(dev->intf);
- usb_put_dev(dev->udev);
-
uvc_status_cleanup(dev);
uvc_ctrl_cleanup_device(dev);
+ usb_put_intf(dev->intf);
+ usb_put_dev(dev->udev);
+
if (dev->vdev.dev)
v4l2_device_unregister(&dev->vdev);
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -1718,6 +1734,11 @@ static int uvc_register_video(struct uvc_device *dev,
struct video_device *vdev;
int ret;
+ /* Initialize the video buffers queue. */
+ ret = uvc_queue_init(&stream->queue, stream->type, !uvc_no_drop_param);
+ if (ret)
+ return ret;
+
/* Initialize the streaming interface with default streaming
* parameters.
*/
@@ -1744,6 +1765,7 @@ static int uvc_register_video(struct uvc_device *dev,
*/
vdev->v4l2_dev = &dev->vdev;
vdev->fops = &uvc_fops;
+ vdev->ioctl_ops = &uvc_ioctl_ops;
vdev->release = uvc_release;
vdev->prio = &stream->chain->prio;
if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -1991,14 +2013,13 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
{
struct uvc_device *dev = usb_get_intfdata(intf);
struct uvc_streaming *stream;
+ int ret = 0;
uvc_trace(UVC_TRACE_SUSPEND, "Resuming interface %u\n",
intf->cur_altsetting->desc.bInterfaceNumber);
if (intf->cur_altsetting->desc.bInterfaceSubClass ==
UVC_SC_VIDEOCONTROL) {
- int ret = 0;
-
if (reset) {
ret = uvc_ctrl_restore_values(dev);
if (ret < 0)
@@ -2014,8 +2035,13 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
}
list_for_each_entry(stream, &dev->streams, list) {
- if (stream->intf == intf)
- return uvc_video_resume(stream, reset);
+ if (stream->intf == intf) {
+ ret = uvc_video_resume(stream, reset);
+ if (ret < 0)
+ uvc_queue_streamoff(&stream->queue,
+ stream->queue.queue.type);
+ return ret;
+ }
}
uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
@@ -2504,6 +2530,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_IGNORE_SELECTOR_UNIT },
+ /* Oculus VR Positional Tracker DK2 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x2833,
+ .idProduct = 0x0201,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_FORCE_Y8 },
/* Generic USB Video Class */
{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) },
{}
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 6e92d2080255..cc960723b926 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -36,6 +36,34 @@
* the driver.
*/
+static inline struct uvc_streaming *
+uvc_queue_to_stream(struct uvc_video_queue *queue)
+{
+ return container_of(queue, struct uvc_streaming, queue);
+}
+
+/*
+ * Return all queued buffers to videobuf2 in the requested state.
+ *
+ * This function must be called with the queue spinlock held.
+ */
+static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
+ enum uvc_buffer_state state)
+{
+ enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
+ ? VB2_BUF_STATE_ERROR
+ : VB2_BUF_STATE_QUEUED;
+
+ while (!list_empty(&queue->irqqueue)) {
+ struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
+ struct uvc_buffer,
+ queue);
+ list_del(&buf->queue);
+ buf->state = state;
+ vb2_buffer_done(&buf->buf, vb2_state);
+ }
+}
+
/* -----------------------------------------------------------------------------
* videobuf2 queue operations
*/
@@ -45,8 +73,7 @@ static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
unsigned int sizes[], void *alloc_ctxs[])
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
- struct uvc_streaming *stream =
- container_of(queue, struct uvc_streaming, queue);
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
/* Make sure the image size is large enough. */
if (fmt && fmt->fmt.pix.sizeimage < stream->ctrl.dwMaxVideoFrameSize)
@@ -109,8 +136,7 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
static void uvc_buffer_finish(struct vb2_buffer *vb)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
- struct uvc_streaming *stream =
- container_of(queue, struct uvc_streaming, queue);
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
if (vb->state == VB2_BUF_STATE_DONE)
@@ -131,6 +157,39 @@ static void uvc_wait_finish(struct vb2_queue *vq)
mutex_lock(&queue->mutex);
}
+static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
+ unsigned long flags;
+ int ret;
+
+ queue->buf_used = 0;
+
+ ret = uvc_video_enable(stream, 1);
+ if (ret == 0)
+ return 0;
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+ uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
+ return ret;
+}
+
+static void uvc_stop_streaming(struct vb2_queue *vq)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
+ unsigned long flags;
+
+ uvc_video_enable(stream, 0);
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+ uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+}
+
static struct vb2_ops uvc_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
@@ -138,6 +197,8 @@ static struct vb2_ops uvc_queue_qops = {
.buf_finish = uvc_buffer_finish,
.wait_prepare = uvc_wait_prepare,
.wait_finish = uvc_wait_finish,
+ .start_streaming = uvc_start_streaming,
+ .stop_streaming = uvc_stop_streaming,
};
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
@@ -165,12 +226,19 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
return 0;
}
+void uvc_queue_release(struct uvc_video_queue *queue)
+{
+ mutex_lock(&queue->mutex);
+ vb2_queue_release(&queue->queue);
+ mutex_unlock(&queue->mutex);
+}
+
/* -----------------------------------------------------------------------------
* V4L2 queue operations
*/
-int uvc_alloc_buffers(struct uvc_video_queue *queue,
- struct v4l2_requestbuffers *rb)
+int uvc_request_buffers(struct uvc_video_queue *queue,
+ struct v4l2_requestbuffers *rb)
{
int ret;
@@ -181,13 +249,6 @@ int uvc_alloc_buffers(struct uvc_video_queue *queue,
return ret ? ret : rb->count;
}
-void uvc_free_buffers(struct uvc_video_queue *queue)
-{
- mutex_lock(&queue->mutex);
- vb2_queue_release(&queue->queue);
- mutex_unlock(&queue->mutex);
-}
-
int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
int ret;
@@ -234,6 +295,28 @@ int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
return ret;
}
+int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
+{
+ int ret;
+
+ mutex_lock(&queue->mutex);
+ ret = vb2_streamon(&queue->queue, type);
+ mutex_unlock(&queue->mutex);
+
+ return ret;
+}
+
+int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
+{
+ int ret;
+
+ mutex_lock(&queue->mutex);
+ ret = vb2_streamoff(&queue->queue, type);
+ mutex_unlock(&queue->mutex);
+
+ return ret;
+}
+
int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
{
int ret;
@@ -289,49 +372,6 @@ int uvc_queue_allocated(struct uvc_video_queue *queue)
}
/*
- * Enable or disable the video buffers queue.
- *
- * The queue must be enabled before starting video acquisition and must be
- * disabled after stopping it. This ensures that the video buffers queue
- * state can be properly initialized before buffers are accessed from the
- * interrupt handler.
- *
- * Enabling the video queue returns -EBUSY if the queue is already enabled.
- *
- * Disabling the video queue cancels the queue and removes all buffers from
- * the main queue.
- *
- * This function can't be called from interrupt context. Use
- * uvc_queue_cancel() instead.
- */
-int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
-{
- unsigned long flags;
- int ret;
-
- mutex_lock(&queue->mutex);
- if (enable) {
- ret = vb2_streamon(&queue->queue, queue->queue.type);
- if (ret < 0)
- goto done;
-
- queue->buf_used = 0;
- } else {
- ret = vb2_streamoff(&queue->queue, queue->queue.type);
- if (ret < 0)
- goto done;
-
- spin_lock_irqsave(&queue->irqlock, flags);
- INIT_LIST_HEAD(&queue->irqqueue);
- spin_unlock_irqrestore(&queue->irqlock, flags);
- }
-
-done:
- mutex_unlock(&queue->mutex);
- return ret;
-}
-
-/*
* Cancel the video buffers queue.
*
* Cancelling the queue marks all buffers on the irq queue as erroneous,
@@ -345,17 +385,10 @@ done:
*/
void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
{
- struct uvc_buffer *buf;
unsigned long flags;
spin_lock_irqsave(&queue->irqlock, flags);
- while (!list_empty(&queue->irqqueue)) {
- buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
- queue);
- list_del(&buf->queue);
- buf->state = UVC_BUF_STATE_ERROR;
- vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
- }
+ uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
/* This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_buffer_queue and the disconnection event that
* could result in an interruptible wait in uvc_dequeue_buffer. Do not
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 378ae02e593b..9c5cbcf16529 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -530,10 +530,8 @@ static int uvc_v4l2_release(struct file *file)
uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_release\n");
/* Only free resources if this is a privileged handle. */
- if (uvc_has_privileges(handle)) {
- uvc_video_enable(stream, 0);
- uvc_free_buffers(&stream->queue);
- }
+ if (uvc_has_privileges(handle))
+ uvc_queue_release(&stream->queue);
/* Release the file handle. */
uvc_dismiss_privileges(handle);
@@ -551,553 +549,628 @@ static int uvc_v4l2_release(struct file *file)
return 0;
}
-static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int uvc_ioctl_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
struct uvc_fh *handle = file->private_data;
struct uvc_video_chain *chain = handle->chain;
struct uvc_streaming *stream = handle->stream;
- long ret = 0;
- switch (cmd) {
- /* Query capabilities */
- case VIDIOC_QUERYCAP:
- {
- struct v4l2_capability *cap = arg;
-
- memset(cap, 0, sizeof *cap);
- strlcpy(cap->driver, "uvcvideo", sizeof cap->driver);
- strlcpy(cap->card, vdev->name, sizeof cap->card);
- usb_make_path(stream->dev->udev,
- cap->bus_info, sizeof(cap->bus_info));
- cap->version = LINUX_VERSION_CODE;
- cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
- | chain->caps;
- if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
- | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT
- | V4L2_CAP_STREAMING;
- break;
- }
+ strlcpy(cap->driver, "uvcvideo", sizeof(cap->driver));
+ strlcpy(cap->card, vdev->name, sizeof(cap->card));
+ usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info));
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | chain->caps;
+ if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ else
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- /* Priority */
- case VIDIOC_G_PRIORITY:
- *(u32 *)arg = v4l2_prio_max(vdev->prio);
- break;
+ return 0;
+}
- case VIDIOC_S_PRIORITY:
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
+static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct uvc_format *format;
+ enum v4l2_buf_type type = fmt->type;
+ __u32 index = fmt->index;
- return v4l2_prio_change(vdev->prio, &handle->vfh.prio,
- *(u32 *)arg);
+ if (fmt->type != stream->type || fmt->index >= stream->nformats)
+ return -EINVAL;
- /* Get, Set & Query control */
- case VIDIOC_QUERYCTRL:
- return uvc_query_v4l2_ctrl(chain, arg);
+ memset(fmt, 0, sizeof(*fmt));
+ fmt->index = index;
+ fmt->type = type;
+
+ format = &stream->format[fmt->index];
+ fmt->flags = 0;
+ if (format->flags & UVC_FMT_FLAG_COMPRESSED)
+ fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
+ strlcpy(fmt->description, format->name, sizeof(fmt->description));
+ fmt->description[sizeof(fmt->description) - 1] = 0;
+ fmt->pixelformat = format->fcc;
+ return 0;
+}
- case VIDIOC_G_CTRL:
- {
- struct v4l2_control *ctrl = arg;
- struct v4l2_ext_control xctrl;
+static int uvc_ioctl_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
- memset(&xctrl, 0, sizeof xctrl);
- xctrl.id = ctrl->id;
+ return uvc_ioctl_enum_fmt(stream, fmt);
+}
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
+static int uvc_ioctl_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
- ret = uvc_ctrl_get(chain, &xctrl);
- uvc_ctrl_rollback(handle);
- if (ret >= 0)
- ctrl->value = xctrl.value;
- break;
- }
+ return uvc_ioctl_enum_fmt(stream, fmt);
+}
- case VIDIOC_S_CTRL:
- {
- struct v4l2_control *ctrl = arg;
- struct v4l2_ext_control xctrl;
+static int uvc_ioctl_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *fmt)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
+ return uvc_v4l2_get_format(stream, fmt);
+}
- memset(&xctrl, 0, sizeof xctrl);
- xctrl.id = ctrl->id;
- xctrl.value = ctrl->value;
+static int uvc_ioctl_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *fmt)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
+ return uvc_v4l2_get_format(stream, fmt);
+}
- ret = uvc_ctrl_set(chain, &xctrl);
- if (ret < 0) {
- uvc_ctrl_rollback(handle);
- return ret;
- }
- ret = uvc_ctrl_commit(handle, &xctrl, 1);
- if (ret == 0)
- ctrl->value = xctrl.value;
- break;
- }
+static int uvc_ioctl_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *fmt)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ int ret;
- case VIDIOC_QUERYMENU:
- return uvc_query_v4l2_menu(chain, arg);
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
- case VIDIOC_G_EXT_CTRLS:
- {
- struct v4l2_ext_controls *ctrls = arg;
- struct v4l2_ext_control *ctrl = ctrls->controls;
- unsigned int i;
+ return uvc_v4l2_set_format(stream, fmt);
+}
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
+static int uvc_ioctl_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *fmt)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ int ret;
- for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- ret = uvc_ctrl_get(chain, ctrl);
- if (ret < 0) {
- uvc_ctrl_rollback(handle);
- ctrls->error_idx = i;
- return ret;
- }
- }
- ctrls->error_idx = 0;
- ret = uvc_ctrl_rollback(handle);
- break;
- }
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
- case VIDIOC_S_EXT_CTRLS:
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
- /* Fall through */
- case VIDIOC_TRY_EXT_CTRLS:
- {
- struct v4l2_ext_controls *ctrls = arg;
- struct v4l2_ext_control *ctrl = ctrls->controls;
- unsigned int i;
-
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
+ return uvc_v4l2_set_format(stream, fmt);
+}
- for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- ret = uvc_ctrl_set(chain, ctrl);
- if (ret < 0) {
- uvc_ctrl_rollback(handle);
- ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS
- ? ctrls->count : i;
- return ret;
- }
- }
+static int uvc_ioctl_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *fmt)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ struct uvc_streaming_control probe;
- ctrls->error_idx = 0;
+ return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
+}
- if (cmd == VIDIOC_S_EXT_CTRLS)
- ret = uvc_ctrl_commit(handle,
- ctrls->controls, ctrls->count);
- else
- ret = uvc_ctrl_rollback(handle);
- break;
- }
+static int uvc_ioctl_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *fmt)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ struct uvc_streaming_control probe;
- /* Get, Set & Enum input */
- case VIDIOC_ENUMINPUT:
- {
- const struct uvc_entity *selector = chain->selector;
- struct v4l2_input *input = arg;
- struct uvc_entity *iterm = NULL;
- u32 index = input->index;
- int pin = 0;
-
- if (selector == NULL ||
- (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
- if (index != 0)
- return -EINVAL;
- list_for_each_entry(iterm, &chain->entities, chain) {
- if (UVC_ENTITY_IS_ITERM(iterm))
- break;
- }
- pin = iterm->id;
- } else if (index < selector->bNrInPins) {
- pin = selector->baSourceID[index];
- list_for_each_entry(iterm, &chain->entities, chain) {
- if (!UVC_ENTITY_IS_ITERM(iterm))
- continue;
- if (iterm->id == pin)
- break;
- }
- }
+ return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
+}
- if (iterm == NULL || iterm->id != pin)
- return -EINVAL;
+static int uvc_ioctl_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *rb)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ int ret;
- memset(input, 0, sizeof *input);
- input->index = index;
- strlcpy(input->name, iterm->name, sizeof input->name);
- if (UVC_ENTITY_TYPE(iterm) == UVC_ITT_CAMERA)
- input->type = V4L2_INPUT_TYPE_CAMERA;
- break;
- }
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
- case VIDIOC_G_INPUT:
- {
- u8 input;
+ mutex_lock(&stream->mutex);
+ ret = uvc_request_buffers(&stream->queue, rb);
+ mutex_unlock(&stream->mutex);
+ if (ret < 0)
+ return ret;
- if (chain->selector == NULL ||
- (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
- *(int *)arg = 0;
- break;
- }
+ if (ret == 0)
+ uvc_dismiss_privileges(handle);
- ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
- chain->selector->id, chain->dev->intfnum,
- UVC_SU_INPUT_SELECT_CONTROL, &input, 1);
- if (ret < 0)
- return ret;
+ return 0;
+}
- *(int *)arg = input - 1;
- break;
- }
+static int uvc_ioctl_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
- case VIDIOC_S_INPUT:
- {
- u32 input = *(u32 *)arg + 1;
+ if (!uvc_has_privileges(handle))
+ return -EBUSY;
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
+ return uvc_query_buffer(&stream->queue, buf);
+}
- if ((ret = uvc_acquire_privileges(handle)) < 0)
- return ret;
+static int uvc_ioctl_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
- if (chain->selector == NULL ||
- (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
- if (input != 1)
- return -EINVAL;
- break;
- }
+ if (!uvc_has_privileges(handle))
+ return -EBUSY;
- if (input == 0 || input > chain->selector->bNrInPins)
- return -EINVAL;
+ return uvc_queue_buffer(&stream->queue, buf);
+}
- return uvc_query_ctrl(chain->dev, UVC_SET_CUR,
- chain->selector->id, chain->dev->intfnum,
- UVC_SU_INPUT_SELECT_CONTROL, &input, 1);
- }
+static int uvc_ioctl_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
- /* Try, Get, Set & Enum format */
- case VIDIOC_ENUM_FMT:
- {
- struct v4l2_fmtdesc *fmt = arg;
- struct uvc_format *format;
- enum v4l2_buf_type type = fmt->type;
- __u32 index = fmt->index;
+ if (!uvc_has_privileges(handle))
+ return -EBUSY;
- if (fmt->type != stream->type ||
- fmt->index >= stream->nformats)
- return -EINVAL;
+ return uvc_dequeue_buffer(&stream->queue, buf,
+ file->f_flags & O_NONBLOCK);
+}
- memset(fmt, 0, sizeof(*fmt));
- fmt->index = index;
- fmt->type = type;
-
- format = &stream->format[fmt->index];
- fmt->flags = 0;
- if (format->flags & UVC_FMT_FLAG_COMPRESSED)
- fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
- strlcpy(fmt->description, format->name,
- sizeof fmt->description);
- fmt->description[sizeof fmt->description - 1] = 0;
- fmt->pixelformat = format->fcc;
- break;
- }
+static int uvc_ioctl_create_bufs(struct file *file, void *fh,
+ struct v4l2_create_buffers *cb)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ int ret;
- case VIDIOC_TRY_FMT:
- {
- struct uvc_streaming_control probe;
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
- return uvc_v4l2_try_format(stream, arg, &probe, NULL, NULL);
- }
+ return uvc_create_buffers(&stream->queue, cb);
+}
- case VIDIOC_S_FMT:
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
+static int uvc_ioctl_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ int ret;
- if ((ret = uvc_acquire_privileges(handle)) < 0)
- return ret;
+ if (!uvc_has_privileges(handle))
+ return -EBUSY;
- return uvc_v4l2_set_format(stream, arg);
+ mutex_lock(&stream->mutex);
+ ret = uvc_queue_streamon(&stream->queue, type);
+ mutex_unlock(&stream->mutex);
- case VIDIOC_G_FMT:
- return uvc_v4l2_get_format(stream, arg);
+ return ret;
+}
- /* Frame size enumeration */
- case VIDIOC_ENUM_FRAMESIZES:
- {
- struct v4l2_frmsizeenum *fsize = arg;
- struct uvc_format *format = NULL;
- struct uvc_frame *frame;
- int i;
+static int uvc_ioctl_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
- /* Look for the given pixel format */
- for (i = 0; i < stream->nformats; i++) {
- if (stream->format[i].fcc ==
- fsize->pixel_format) {
- format = &stream->format[i];
- break;
- }
- }
- if (format == NULL)
- return -EINVAL;
+ if (!uvc_has_privileges(handle))
+ return -EBUSY;
- if (fsize->index >= format->nframes)
- return -EINVAL;
+ mutex_lock(&stream->mutex);
+ uvc_queue_streamoff(&stream->queue, type);
+ mutex_unlock(&stream->mutex);
- frame = &format->frame[fsize->index];
- fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete.width = frame->wWidth;
- fsize->discrete.height = frame->wHeight;
- break;
- }
+ return 0;
+}
- /* Frame interval enumeration */
- case VIDIOC_ENUM_FRAMEINTERVALS:
- {
- struct v4l2_frmivalenum *fival = arg;
- struct uvc_format *format = NULL;
- struct uvc_frame *frame = NULL;
- int i;
-
- /* Look for the given pixel format and frame size */
- for (i = 0; i < stream->nformats; i++) {
- if (stream->format[i].fcc ==
- fival->pixel_format) {
- format = &stream->format[i];
- break;
- }
- }
- if (format == NULL)
+static int uvc_ioctl_enum_input(struct file *file, void *fh,
+ struct v4l2_input *input)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+ const struct uvc_entity *selector = chain->selector;
+ struct uvc_entity *iterm = NULL;
+ u32 index = input->index;
+ int pin = 0;
+
+ if (selector == NULL ||
+ (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
+ if (index != 0)
return -EINVAL;
-
- for (i = 0; i < format->nframes; i++) {
- if (format->frame[i].wWidth == fival->width &&
- format->frame[i].wHeight == fival->height) {
- frame = &format->frame[i];
+ list_for_each_entry(iterm, &chain->entities, chain) {
+ if (UVC_ENTITY_IS_ITERM(iterm))
break;
- }
}
- if (frame == NULL)
- return -EINVAL;
-
- if (frame->bFrameIntervalType) {
- if (fival->index >= frame->bFrameIntervalType)
- return -EINVAL;
-
- fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
- fival->discrete.numerator =
- frame->dwFrameInterval[fival->index];
- fival->discrete.denominator = 10000000;
- uvc_simplify_fraction(&fival->discrete.numerator,
- &fival->discrete.denominator, 8, 333);
- } else {
- fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
- fival->stepwise.min.numerator =
- frame->dwFrameInterval[0];
- fival->stepwise.min.denominator = 10000000;
- fival->stepwise.max.numerator =
- frame->dwFrameInterval[1];
- fival->stepwise.max.denominator = 10000000;
- fival->stepwise.step.numerator =
- frame->dwFrameInterval[2];
- fival->stepwise.step.denominator = 10000000;
- uvc_simplify_fraction(&fival->stepwise.min.numerator,
- &fival->stepwise.min.denominator, 8, 333);
- uvc_simplify_fraction(&fival->stepwise.max.numerator,
- &fival->stepwise.max.denominator, 8, 333);
- uvc_simplify_fraction(&fival->stepwise.step.numerator,
- &fival->stepwise.step.denominator, 8, 333);
+ pin = iterm->id;
+ } else if (index < selector->bNrInPins) {
+ pin = selector->baSourceID[index];
+ list_for_each_entry(iterm, &chain->entities, chain) {
+ if (!UVC_ENTITY_IS_ITERM(iterm))
+ continue;
+ if (iterm->id == pin)
+ break;
}
- break;
}
- /* Get & Set streaming parameters */
- case VIDIOC_G_PARM:
- return uvc_v4l2_get_streamparm(stream, arg);
+ if (iterm == NULL || iterm->id != pin)
+ return -EINVAL;
- case VIDIOC_S_PARM:
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
+ memset(input, 0, sizeof(*input));
+ input->index = index;
+ strlcpy(input->name, iterm->name, sizeof(input->name));
+ if (UVC_ENTITY_TYPE(iterm) == UVC_ITT_CAMERA)
+ input->type = V4L2_INPUT_TYPE_CAMERA;
- if ((ret = uvc_acquire_privileges(handle)) < 0)
- return ret;
+ return 0;
+}
- return uvc_v4l2_set_streamparm(stream, arg);
+static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+ int ret;
+ u8 i;
- /* Cropping and scaling */
- case VIDIOC_CROPCAP:
- {
- struct v4l2_cropcap *ccap = arg;
+ if (chain->selector == NULL ||
+ (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
+ *input = 0;
+ return 0;
+ }
- if (ccap->type != stream->type)
- return -EINVAL;
+ ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id,
+ chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
+ &i, 1);
+ if (ret < 0)
+ return ret;
- ccap->bounds.left = 0;
- ccap->bounds.top = 0;
+ *input = i - 1;
+ return 0;
+}
- mutex_lock(&stream->mutex);
- ccap->bounds.width = stream->cur_frame->wWidth;
- ccap->bounds.height = stream->cur_frame->wHeight;
- mutex_unlock(&stream->mutex);
+static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+ int ret;
+ u32 i;
- ccap->defrect = ccap->bounds;
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
- ccap->pixelaspect.numerator = 1;
- ccap->pixelaspect.denominator = 1;
- break;
+ if (chain->selector == NULL ||
+ (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
+ if (input)
+ return -EINVAL;
+ return 0;
}
- case VIDIOC_G_CROP:
- case VIDIOC_S_CROP:
- return -ENOTTY;
+ if (input >= chain->selector->bNrInPins)
+ return -EINVAL;
- /* Buffers & streaming */
- case VIDIOC_REQBUFS:
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
+ i = input + 1;
+ return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
+ chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
+ &i, 1);
+}
- if ((ret = uvc_acquire_privileges(handle)) < 0)
- return ret;
+static int uvc_ioctl_queryctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *qc)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
- mutex_lock(&stream->mutex);
- ret = uvc_alloc_buffers(&stream->queue, arg);
- mutex_unlock(&stream->mutex);
- if (ret < 0)
- return ret;
+ return uvc_query_v4l2_ctrl(chain, qc);
+}
- if (ret == 0)
- uvc_dismiss_privileges(handle);
+static int uvc_ioctl_g_ctrl(struct file *file, void *fh,
+ struct v4l2_control *ctrl)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+ struct v4l2_ext_control xctrl;
+ int ret;
- ret = 0;
- break;
+ memset(&xctrl, 0, sizeof(xctrl));
+ xctrl.id = ctrl->id;
+
+ ret = uvc_ctrl_begin(chain);
+ if (ret < 0)
+ return ret;
- case VIDIOC_QUERYBUF:
- {
- struct v4l2_buffer *buf = arg;
+ ret = uvc_ctrl_get(chain, &xctrl);
+ uvc_ctrl_rollback(handle);
+ if (ret < 0)
+ return ret;
- if (!uvc_has_privileges(handle))
- return -EBUSY;
+ ctrl->value = xctrl.value;
+ return 0;
+}
- return uvc_query_buffer(&stream->queue, buf);
- }
+static int uvc_ioctl_s_ctrl(struct file *file, void *fh,
+ struct v4l2_control *ctrl)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+ struct v4l2_ext_control xctrl;
+ int ret;
- case VIDIOC_CREATE_BUFS:
- {
- struct v4l2_create_buffers *cb = arg;
+ memset(&xctrl, 0, sizeof(xctrl));
+ xctrl.id = ctrl->id;
+ xctrl.value = ctrl->value;
- ret = uvc_acquire_privileges(handle);
- if (ret < 0)
- return ret;
+ ret = uvc_ctrl_begin(chain);
+ if (ret < 0)
+ return ret;
- return uvc_create_buffers(&stream->queue, cb);
+ ret = uvc_ctrl_set(chain, &xctrl);
+ if (ret < 0) {
+ uvc_ctrl_rollback(handle);
+ return ret;
}
- case VIDIOC_QBUF:
- if (!uvc_has_privileges(handle))
- return -EBUSY;
+ ret = uvc_ctrl_commit(handle, &xctrl, 1);
+ if (ret < 0)
+ return ret;
- return uvc_queue_buffer(&stream->queue, arg);
+ ctrl->value = xctrl.value;
+ return 0;
+}
+
+static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+ struct v4l2_ext_control *ctrl = ctrls->controls;
+ unsigned int i;
+ int ret;
- case VIDIOC_DQBUF:
- if (!uvc_has_privileges(handle))
- return -EBUSY;
+ ret = uvc_ctrl_begin(chain);
+ if (ret < 0)
+ return ret;
- return uvc_dequeue_buffer(&stream->queue, arg,
- file->f_flags & O_NONBLOCK);
+ for (i = 0; i < ctrls->count; ++ctrl, ++i) {
+ ret = uvc_ctrl_get(chain, ctrl);
+ if (ret < 0) {
+ uvc_ctrl_rollback(handle);
+ ctrls->error_idx = i;
+ return ret;
+ }
+ }
- case VIDIOC_STREAMON:
- {
- int *type = arg;
+ ctrls->error_idx = 0;
- if (*type != stream->type)
- return -EINVAL;
+ return uvc_ctrl_rollback(handle);
+}
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
+static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
+ struct v4l2_ext_controls *ctrls,
+ bool commit)
+{
+ struct v4l2_ext_control *ctrl = ctrls->controls;
+ struct uvc_video_chain *chain = handle->chain;
+ unsigned int i;
+ int ret;
- if (!uvc_has_privileges(handle))
- return -EBUSY;
+ ret = uvc_ctrl_begin(chain);
+ if (ret < 0)
+ return ret;
- mutex_lock(&stream->mutex);
- ret = uvc_video_enable(stream, 1);
- mutex_unlock(&stream->mutex);
- if (ret < 0)
+ for (i = 0; i < ctrls->count; ++ctrl, ++i) {
+ ret = uvc_ctrl_set(chain, ctrl);
+ if (ret < 0) {
+ uvc_ctrl_rollback(handle);
+ ctrls->error_idx = commit ? ctrls->count : i;
return ret;
- break;
+ }
}
- case VIDIOC_STREAMOFF:
- {
- int *type = arg;
+ ctrls->error_idx = 0;
- if (*type != stream->type)
- return -EINVAL;
+ if (commit)
+ return uvc_ctrl_commit(handle, ctrls->controls, ctrls->count);
+ else
+ return uvc_ctrl_rollback(handle);
+}
- ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
- if (ret < 0)
- return ret;
+static int uvc_ioctl_s_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct uvc_fh *handle = fh;
+
+ return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, true);
+}
- if (!uvc_has_privileges(handle))
- return -EBUSY;
+static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct uvc_fh *handle = fh;
- return uvc_video_enable(stream, 0);
+ return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, false);
+}
+
+static int uvc_ioctl_querymenu(struct file *file, void *fh,
+ struct v4l2_querymenu *qm)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+
+ return uvc_query_v4l2_menu(chain, qm);
+}
+
+static int uvc_ioctl_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *ccap)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+
+ if (ccap->type != stream->type)
+ return -EINVAL;
+
+ ccap->bounds.left = 0;
+ ccap->bounds.top = 0;
+ mutex_lock(&stream->mutex);
+ ccap->bounds.width = stream->cur_frame->wWidth;
+ ccap->bounds.height = stream->cur_frame->wHeight;
+ mutex_unlock(&stream->mutex);
+
+ ccap->defrect = ccap->bounds;
+
+ ccap->pixelaspect.numerator = 1;
+ ccap->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int uvc_ioctl_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+
+ return uvc_v4l2_get_streamparm(stream, parm);
+}
+
+static int uvc_ioctl_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ int ret;
+
+ ret = uvc_acquire_privileges(handle);
+ if (ret < 0)
+ return ret;
+
+ return uvc_v4l2_set_streamparm(stream, parm);
+}
+
+static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ struct uvc_format *format = NULL;
+ struct uvc_frame *frame;
+ int i;
+
+ /* Look for the given pixel format */
+ for (i = 0; i < stream->nformats; i++) {
+ if (stream->format[i].fcc == fsize->pixel_format) {
+ format = &stream->format[i];
+ break;
+ }
}
+ if (format == NULL)
+ return -EINVAL;
- case VIDIOC_SUBSCRIBE_EVENT:
- {
- struct v4l2_event_subscription *sub = arg;
+ if (fsize->index >= format->nframes)
+ return -EINVAL;
- switch (sub->type) {
- case V4L2_EVENT_CTRL:
- return v4l2_event_subscribe(&handle->vfh, sub, 0,
- &uvc_ctrl_sub_ev_ops);
- default:
- return -EINVAL;
+ frame = &format->frame[fsize->index];
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = frame->wWidth;
+ fsize->discrete.height = frame->wHeight;
+ return 0;
+}
+
+static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_streaming *stream = handle->stream;
+ struct uvc_format *format = NULL;
+ struct uvc_frame *frame = NULL;
+ int i;
+
+ /* Look for the given pixel format and frame size */
+ for (i = 0; i < stream->nformats; i++) {
+ if (stream->format[i].fcc == fival->pixel_format) {
+ format = &stream->format[i];
+ break;
}
}
+ if (format == NULL)
+ return -EINVAL;
- case VIDIOC_UNSUBSCRIBE_EVENT:
- return v4l2_event_unsubscribe(&handle->vfh, arg);
+ for (i = 0; i < format->nframes; i++) {
+ if (format->frame[i].wWidth == fival->width &&
+ format->frame[i].wHeight == fival->height) {
+ frame = &format->frame[i];
+ break;
+ }
+ }
+ if (frame == NULL)
+ return -EINVAL;
- case VIDIOC_DQEVENT:
- return v4l2_event_dequeue(&handle->vfh, arg,
- file->f_flags & O_NONBLOCK);
+ if (frame->bFrameIntervalType) {
+ if (fival->index >= frame->bFrameIntervalType)
+ return -EINVAL;
- /* Analog video standards make no sense for digital cameras. */
- case VIDIOC_ENUMSTD:
- case VIDIOC_QUERYSTD:
- case VIDIOC_G_STD:
- case VIDIOC_S_STD:
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete.numerator =
+ frame->dwFrameInterval[fival->index];
+ fival->discrete.denominator = 10000000;
+ uvc_simplify_fraction(&fival->discrete.numerator,
+ &fival->discrete.denominator, 8, 333);
+ } else {
+ fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+ fival->stepwise.min.numerator = frame->dwFrameInterval[0];
+ fival->stepwise.min.denominator = 10000000;
+ fival->stepwise.max.numerator = frame->dwFrameInterval[1];
+ fival->stepwise.max.denominator = 10000000;
+ fival->stepwise.step.numerator = frame->dwFrameInterval[2];
+ fival->stepwise.step.denominator = 10000000;
+ uvc_simplify_fraction(&fival->stepwise.min.numerator,
+ &fival->stepwise.min.denominator, 8, 333);
+ uvc_simplify_fraction(&fival->stepwise.max.numerator,
+ &fival->stepwise.max.denominator, 8, 333);
+ uvc_simplify_fraction(&fival->stepwise.step.numerator,
+ &fival->stepwise.step.denominator, 8, 333);
+ }
- case VIDIOC_OVERLAY:
+ return 0;
+}
- case VIDIOC_ENUMAUDIO:
- case VIDIOC_ENUMAUDOUT:
+static int uvc_ioctl_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_CTRL:
+ return v4l2_event_subscribe(fh, sub, 0, &uvc_ctrl_sub_ev_ops);
+ default:
+ return -EINVAL;
+ }
+}
- case VIDIOC_ENUMOUTPUT:
- uvc_trace(UVC_TRACE_IOCTL, "Unsupported ioctl 0x%08x\n", cmd);
- return -ENOTTY;
+static long uvc_ioctl_default(struct file *file, void *fh, bool valid_prio,
+ unsigned int cmd, void *arg)
+{
+ struct uvc_fh *handle = fh;
+ struct uvc_video_chain *chain = handle->chain;
+ switch (cmd) {
+ /* Dynamic controls. */
case UVCIOC_CTRL_MAP:
return uvc_ioctl_ctrl_map(chain, arg);
@@ -1105,23 +1178,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_xu_ctrl_query(chain, arg);
default:
- uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n", cmd);
return -ENOTTY;
}
-
- return ret;
-}
-
-static long uvc_v4l2_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- if (uvc_trace_param & UVC_TRACE_IOCTL) {
- uvc_printk(KERN_DEBUG, "uvc_v4l2_ioctl(");
- v4l_printk_ioctl(NULL, cmd);
- printk(")\n");
- }
-
- return video_usercopy(file, cmd, arg, uvc_v4l2_do_ioctl);
}
#ifdef CONFIG_COMPAT
@@ -1304,7 +1362,7 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = uvc_v4l2_ioctl(file, cmd, (unsigned long)&karg);
+ ret = video_ioctl2(file, cmd, (unsigned long)&karg);
set_fs(old_fs);
if (ret < 0)
@@ -1365,11 +1423,48 @@ static unsigned long uvc_v4l2_get_unmapped_area(struct file *file,
}
#endif
+const struct v4l2_ioctl_ops uvc_ioctl_ops = {
+ .vidioc_querycap = uvc_ioctl_querycap,
+ .vidioc_enum_fmt_vid_cap = uvc_ioctl_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = uvc_ioctl_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_cap = uvc_ioctl_g_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out = uvc_ioctl_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_cap = uvc_ioctl_s_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out = uvc_ioctl_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_cap = uvc_ioctl_try_fmt_vid_cap,
+ .vidioc_try_fmt_vid_out = uvc_ioctl_try_fmt_vid_out,
+ .vidioc_reqbufs = uvc_ioctl_reqbufs,
+ .vidioc_querybuf = uvc_ioctl_querybuf,
+ .vidioc_qbuf = uvc_ioctl_qbuf,
+ .vidioc_dqbuf = uvc_ioctl_dqbuf,
+ .vidioc_create_bufs = uvc_ioctl_create_bufs,
+ .vidioc_streamon = uvc_ioctl_streamon,
+ .vidioc_streamoff = uvc_ioctl_streamoff,
+ .vidioc_enum_input = uvc_ioctl_enum_input,
+ .vidioc_g_input = uvc_ioctl_g_input,
+ .vidioc_s_input = uvc_ioctl_s_input,
+ .vidioc_queryctrl = uvc_ioctl_queryctrl,
+ .vidioc_g_ctrl = uvc_ioctl_g_ctrl,
+ .vidioc_s_ctrl = uvc_ioctl_s_ctrl,
+ .vidioc_g_ext_ctrls = uvc_ioctl_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = uvc_ioctl_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = uvc_ioctl_try_ext_ctrls,
+ .vidioc_querymenu = uvc_ioctl_querymenu,
+ .vidioc_cropcap = uvc_ioctl_cropcap,
+ .vidioc_g_parm = uvc_ioctl_g_parm,
+ .vidioc_s_parm = uvc_ioctl_s_parm,
+ .vidioc_enum_framesizes = uvc_ioctl_enum_framesizes,
+ .vidioc_enum_frameintervals = uvc_ioctl_enum_frameintervals,
+ .vidioc_subscribe_event = uvc_ioctl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_default = uvc_ioctl_default,
+};
+
const struct v4l2_file_operations uvc_fops = {
.owner = THIS_MODULE,
.open = uvc_v4l2_open,
.release = uvc_v4l2_release,
- .unlocked_ioctl = uvc_v4l2_ioctl,
+ .unlocked_ioctl = video_ioctl2,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = uvc_v4l2_compat_ioctl32,
#endif
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index df81b9c4faf1..9637e8b86949 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1021,6 +1021,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
uvc_video_get_ts(&ts);
+ buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
buf->buf.v4l2_buf.sequence = stream->sequence;
buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
buf->buf.v4l2_buf.timestamp.tv_usec =
@@ -1734,19 +1735,13 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
uvc_video_clock_reset(stream);
ret = uvc_commit_video(stream, &stream->ctrl);
- if (ret < 0) {
- uvc_queue_enable(&stream->queue, 0);
+ if (ret < 0)
return ret;
- }
if (!uvc_queue_streaming(&stream->queue))
return 0;
- ret = uvc_init_video(stream, GFP_NOIO);
- if (ret < 0)
- uvc_queue_enable(&stream->queue, 0);
-
- return ret;
+ return uvc_init_video(stream, GFP_NOIO);
}
/* ------------------------------------------------------------------------
@@ -1778,11 +1773,6 @@ int uvc_video_init(struct uvc_streaming *stream)
atomic_set(&stream->active, 0);
- /* Initialize the video buffers queue. */
- ret = uvc_queue_init(&stream->queue, stream->type, !uvc_no_drop_param);
- if (ret)
- return ret;
-
/* Alternate setting 0 should be the default, yet the XBox Live Vision
* Cam (and possibly other devices) crash or otherwise misbehave if
* they don't receive a SET_INTERFACE request before any other video
@@ -1889,7 +1879,6 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
usb_clear_halt(stream->dev->udev, pipe);
}
- uvc_queue_enable(&stream->queue, 0);
uvc_video_clock_cleanup(stream);
return 0;
}
@@ -1898,10 +1887,6 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
if (ret < 0)
return ret;
- ret = uvc_queue_enable(&stream->queue, 1);
- if (ret < 0)
- goto error_queue;
-
/* Commit the streaming parameters. */
ret = uvc_commit_video(stream, &stream->ctrl);
if (ret < 0)
@@ -1916,8 +1901,6 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
error_video:
usb_set_interface(stream->dev->udev, stream->intfnum, 0);
error_commit:
- uvc_queue_enable(&stream->queue, 0);
-error_queue:
uvc_video_clock_cleanup(stream);
return ret;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 864ada740360..f0a04b532ede 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -148,6 +148,7 @@
#define UVC_QUIRK_PROBE_DEF 0x00000100
#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
#define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
+#define UVC_QUIRK_FORCE_Y8 0x00000800
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -579,7 +580,6 @@ struct uvc_driver {
#define UVC_TRACE_FORMAT (1 << 3)
#define UVC_TRACE_CAPTURE (1 << 4)
#define UVC_TRACE_CALLS (1 << 5)
-#define UVC_TRACE_IOCTL (1 << 6)
#define UVC_TRACE_FRAME (1 << 7)
#define UVC_TRACE_SUSPEND (1 << 8)
#define UVC_TRACE_STATUS (1 << 9)
@@ -623,9 +623,9 @@ extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
/* Video buffers queue management. */
extern int uvc_queue_init(struct uvc_video_queue *queue,
enum v4l2_buf_type type, int drop_corrupted);
-extern int uvc_alloc_buffers(struct uvc_video_queue *queue,
+extern void uvc_queue_release(struct uvc_video_queue *queue);
+extern int uvc_request_buffers(struct uvc_video_queue *queue,
struct v4l2_requestbuffers *rb);
-extern void uvc_free_buffers(struct uvc_video_queue *queue);
extern int uvc_query_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *v4l2_buf);
extern int uvc_create_buffers(struct uvc_video_queue *queue,
@@ -634,7 +634,10 @@ extern int uvc_queue_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *v4l2_buf);
extern int uvc_dequeue_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *v4l2_buf, int nonblocking);
-extern int uvc_queue_enable(struct uvc_video_queue *queue, int enable);
+extern int uvc_queue_streamon(struct uvc_video_queue *queue,
+ enum v4l2_buf_type type);
+extern int uvc_queue_streamoff(struct uvc_video_queue *queue,
+ enum v4l2_buf_type type);
extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect);
extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
@@ -653,6 +656,7 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
}
/* V4L2 interface */
+extern const struct v4l2_ioctl_ops uvc_ioctl_ops;
extern const struct v4l2_file_operations uvc_fops;
/* Media controller */
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 2e9d81f4c1a5..5b808500e7e7 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -80,36 +80,6 @@ MODULE_LICENSE("GPL");
/* Helper functions for control handling */
-/* Check for correctness of the ctrl's value based on the data from
- struct v4l2_queryctrl and the available menu items. Note that
- menu_items may be NULL, in that case it is ignored. */
-int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
- const char * const *menu_items)
-{
- if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED)
- return -EINVAL;
- if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED)
- return -EBUSY;
- if (qctrl->type == V4L2_CTRL_TYPE_STRING)
- return 0;
- if (qctrl->type == V4L2_CTRL_TYPE_BUTTON ||
- qctrl->type == V4L2_CTRL_TYPE_INTEGER64 ||
- qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
- return 0;
- if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum)
- return -ERANGE;
- if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) {
- if (menu_items[ctrl->value] == NULL ||
- menu_items[ctrl->value][0] == '\0')
- return -EINVAL;
- }
- if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
- (ctrl->value & ~qctrl->maximum))
- return -ERANGE;
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_check);
-
/* Fill in a struct v4l2_queryctrl */
int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
{
@@ -135,101 +105,6 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _
}
EXPORT_SYMBOL(v4l2_ctrl_query_fill);
-/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and
- the menu. The qctrl pointer may be NULL, in which case it is ignored.
- If menu_items is NULL, then the menu items are retrieved using
- v4l2_ctrl_get_menu. */
-int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl,
- const char * const *menu_items)
-{
- int i;
-
- qmenu->reserved = 0;
- if (menu_items == NULL)
- menu_items = v4l2_ctrl_get_menu(qmenu->id);
- if (menu_items == NULL ||
- (qctrl && (qmenu->index < qctrl->minimum || qmenu->index > qctrl->maximum)))
- return -EINVAL;
- for (i = 0; i < qmenu->index && menu_items[i]; i++) ;
- if (menu_items[i] == NULL || menu_items[i][0] == '\0')
- return -EINVAL;
- strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name));
- return 0;
-}
-EXPORT_SYMBOL(v4l2_ctrl_query_menu);
-
-/* Fill in a struct v4l2_querymenu based on the specified array of valid
- menu items (terminated by V4L2_CTRL_MENU_IDS_END).
- Use this if there are 'holes' in the list of valid menu items. */
-int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids)
-{
- const char * const *menu_items = v4l2_ctrl_get_menu(qmenu->id);
-
- qmenu->reserved = 0;
- if (menu_items == NULL || ids == NULL)
- return -EINVAL;
- while (*ids != V4L2_CTRL_MENU_IDS_END) {
- if (*ids++ == qmenu->index) {
- strlcpy(qmenu->name, menu_items[qmenu->index],
- sizeof(qmenu->name));
- return 0;
- }
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL(v4l2_ctrl_query_menu_valid_items);
-
-/* ctrl_classes points to an array of u32 pointers, the last element is
- a NULL pointer. Each u32 array is a 0-terminated array of control IDs.
- Each array must be sorted low to high and belong to the same control
- class. The array of u32 pointers must also be sorted, from low class IDs
- to high class IDs.
-
- This function returns the first ID that follows after the given ID.
- When no more controls are available 0 is returned. */
-u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
-{
- u32 ctrl_class = V4L2_CTRL_ID2CLASS(id);
- const u32 *pctrl;
-
- if (ctrl_classes == NULL)
- return 0;
-
- /* if no query is desired, then check if the ID is part of ctrl_classes */
- if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) {
- /* find class */
- while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class)
- ctrl_classes++;
- if (*ctrl_classes == NULL)
- return 0;
- pctrl = *ctrl_classes;
- /* find control ID */
- while (*pctrl && *pctrl != id) pctrl++;
- return *pctrl ? id : 0;
- }
- id &= V4L2_CTRL_ID_MASK;
- id++; /* select next control */
- /* find first class that matches (or is greater than) the class of
- the ID */
- while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) < ctrl_class)
- ctrl_classes++;
- /* no more classes */
- if (*ctrl_classes == NULL)
- return 0;
- pctrl = *ctrl_classes;
- /* find first ctrl within the class that is >= ID */
- while (*pctrl && *pctrl < id) pctrl++;
- if (*pctrl)
- return *pctrl;
- /* we are at the end of the controls of the current class. */
- /* continue with next class if available */
- ctrl_classes++;
- if (*ctrl_classes == NULL)
- return 0;
- return **ctrl_classes;
-}
-EXPORT_SYMBOL(v4l2_ctrl_next);
-
/* I2C Helper functions */
#if IS_ENABLED(CONFIG_I2C)
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index e502a5fb2994..af635430524e 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -222,6 +222,9 @@ static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_
static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
+ if (put_user(kp->type, &up->type))
+ return -EFAULT;
+
switch (kp->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -248,8 +251,7 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
- put_user(kp->type, &up->type))
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
return -EFAULT;
return __put_v4l2_format32(kp, up);
}
@@ -257,8 +259,8 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
{
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format.fmt)))
- return -EFAULT;
+ copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)))
+ return -EFAULT;
return __put_v4l2_format32(&kp->format, &up->format);
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 86012140923f..45c5b4710601 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1658,10 +1658,8 @@ static int check_range(enum v4l2_ctrl_type type,
}
/* Validate a new control */
-static int validate_new(const struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c)
+static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
{
- union v4l2_ctrl_ptr ptr;
unsigned idx;
int err = 0;
@@ -1674,19 +1672,14 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
- ptr.p_s32 = &c->value;
- return ctrl->type_ops->validate(ctrl, 0, ptr);
-
case V4L2_CTRL_TYPE_INTEGER64:
- ptr.p_s64 = &c->value64;
- return ctrl->type_ops->validate(ctrl, 0, ptr);
+ return ctrl->type_ops->validate(ctrl, 0, p_new);
default:
break;
}
}
- ptr.p = c->ptr;
- for (idx = 0; !err && idx < c->size / ctrl->elem_size; idx++)
- err = ctrl->type_ops->validate(ctrl, idx, ptr);
+ for (idx = 0; !err && idx < ctrl->elems; idx++)
+ err = ctrl->type_ops->validate(ctrl, idx, p_new);
return err;
}
@@ -3012,6 +3005,7 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
cs->error_idx = cs->count;
for (i = 0; i < cs->count; i++) {
struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ union v4l2_ctrl_ptr p_new;
cs->error_idx = i;
@@ -3025,7 +3019,17 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
best-effort to avoid that. */
if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
return -EBUSY;
- ret = validate_new(ctrl, &cs->controls[i]);
+ /*
+ * Skip validation for now if the payload needs to be copied
+ * from userspace into kernelspace. We'll validate those later.
+ */
+ if (ctrl->is_ptr)
+ continue;
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ p_new.p_s64 = &cs->controls[i].value64;
+ else
+ p_new.p_s32 = &cs->controls[i].value;
+ ret = validate_new(ctrl, p_new);
if (ret)
return ret;
}
@@ -3120,7 +3124,11 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
/* Copy the new caller-supplied control values.
user_to_new() sets 'is_new' to 1. */
do {
- ret = user_to_new(cs->controls + idx, helpers[idx].ctrl);
+ struct v4l2_ctrl *ctrl = helpers[idx].ctrl;
+
+ ret = user_to_new(cs->controls + idx, ctrl);
+ if (!ret && ctrl->is_ptr)
+ ret = validate_new(ctrl, ctrl->p_new);
idx = helpers[idx].next;
} while (!ret && idx);
@@ -3170,10 +3178,10 @@ int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs
EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
/* Helper function for VIDIOC_S_CTRL compatibility */
-static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c, u32 ch_flags)
+static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret;
int i;
/* Reset the 'is_new' flags of the cluster */
@@ -3181,8 +3189,9 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
if (master->cluster[i])
master->cluster[i]->is_new = 0;
- if (c)
- user_to_new(c, ctrl);
+ ret = validate_new(ctrl, ctrl->p_new);
+ if (ret)
+ return ret;
/* For autoclusters with volatiles that are switched from auto to
manual mode we have to update the current volatile values since
@@ -3199,15 +3208,14 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
struct v4l2_ext_control *c)
{
- int ret = validate_new(ctrl, c);
+ int ret;
- if (!ret) {
- v4l2_ctrl_lock(ctrl);
- ret = set_ctrl(fh, ctrl, c, 0);
- if (!ret)
- cur_to_user(c, ctrl);
- v4l2_ctrl_unlock(ctrl);
- }
+ v4l2_ctrl_lock(ctrl);
+ user_to_new(c, ctrl);
+ ret = set_ctrl(fh, ctrl, 0);
+ if (!ret)
+ cur_to_user(c, ctrl);
+ v4l2_ctrl_unlock(ctrl);
return ret;
}
@@ -3215,7 +3223,7 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
struct v4l2_control *control)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
- struct v4l2_ext_control c;
+ struct v4l2_ext_control c = { control->id };
int ret;
if (ctrl == NULL || !ctrl->is_int)
@@ -3244,7 +3252,7 @@ int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
/* It's a driver bug if this happens. */
WARN_ON(!ctrl->is_int);
ctrl->val = val;
- return set_ctrl(NULL, ctrl, NULL, 0);
+ return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl);
@@ -3255,7 +3263,7 @@ int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
/* It's a driver bug if this happens. */
WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
*ctrl->p_new.p_s64 = val;
- return set_ctrl(NULL, ctrl, NULL, 0);
+ return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64);
@@ -3266,7 +3274,7 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
/* It's a driver bug if this happens. */
WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING);
strlcpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
- return set_ctrl(NULL, ctrl, NULL, 0);
+ return set_ctrl(NULL, ctrl, 0);
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
@@ -3289,8 +3297,8 @@ EXPORT_SYMBOL(v4l2_ctrl_notify);
int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
s64 min, s64 max, u64 step, s64 def)
{
+ bool changed;
int ret;
- struct v4l2_ext_control c;
lockdep_assert_held(ctrl->handler->lock);
@@ -3317,11 +3325,20 @@ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
ctrl->maximum = max;
ctrl->step = step;
ctrl->default_value = def;
- c.value = *ctrl->p_cur.p_s32;
- if (validate_new(ctrl, &c))
- c.value = def;
- if (c.value != *ctrl->p_cur.p_s32)
- ret = set_ctrl(NULL, ctrl, &c, V4L2_EVENT_CTRL_CH_RANGE);
+ cur_to_new(ctrl);
+ if (validate_new(ctrl, ctrl->p_new)) {
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ *ctrl->p_new.p_s64 = def;
+ else
+ *ctrl->p_new.p_s32 = def;
+ }
+
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64;
+ else
+ changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32;
+ if (changed)
+ ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
else
send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
return ret;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 33617c365acc..9aa530a8bea9 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -194,7 +194,7 @@ static void v4l2_device_release(struct device *cd)
mutex_unlock(&videodev_lock);
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (v4l2_dev && v4l2_dev->mdev &&
+ if (v4l2_dev->mdev &&
vdev->vfl_type != VFL_TYPE_SUBDEV)
media_device_unregister_entity(&vdev->entity);
#endif
@@ -207,7 +207,7 @@ static void v4l2_device_release(struct device *cd)
* TODO: In the long run all drivers that use v4l2_device should use the
* v4l2_device release callback. This check will then be unnecessary.
*/
- if (v4l2_dev && v4l2_dev->release == NULL)
+ if (v4l2_dev->release == NULL)
v4l2_dev = NULL;
/* Release video_device and perform other
@@ -360,27 +360,22 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* hack but it will have to do for those drivers that are not
* yet converted to use unlocked_ioctl.
*
- * There are two options: if the driver implements struct
- * v4l2_device, then the lock defined there is used to
- * serialize the ioctls. Otherwise the v4l2 core lock defined
- * below is used. This lock is really bad since it serializes
- * completely independent devices.
+ * All drivers implement struct v4l2_device, so we use the
+ * lock defined there to serialize the ioctls.
*
- * Both variants suffer from the same problem: if the driver
- * sleeps, then it blocks all ioctls since the lock is still
- * held. This is very common for VIDIOC_DQBUF since that
- * normally waits for a frame to arrive. As a result any other
- * ioctl calls will proceed very, very slowly since each call
- * will have to wait for the VIDIOC_QBUF to finish. Things that
- * should take 0.01s may now take 10-20 seconds.
+ * However, if the driver sleeps, then it blocks all ioctls
+ * since the lock is still held. This is very common for
+ * VIDIOC_DQBUF since that normally waits for a frame to arrive.
+ * As a result any other ioctl calls will proceed very, very
+ * slowly since each call will have to wait for the VIDIOC_QBUF
+ * to finish. Things that should take 0.01s may now take 10-20
+ * seconds.
*
* The workaround is to *not* take the lock for VIDIOC_DQBUF.
* This actually works OK for videobuf-based drivers, since
* videobuf will take its own internal lock.
*/
- static DEFINE_MUTEX(v4l2_ioctl_mutex);
- struct mutex *m = vdev->v4l2_dev ?
- &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex;
+ struct mutex *m = &vdev->v4l2_dev->ioctl_lock;
if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m))
return -ERESTARTSYS;
@@ -938,12 +933,11 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
name_base, nr, video_device_node_name(vdev));
/* Increase v4l2_device refcount */
- if (vdev->v4l2_dev)
- v4l2_device_get(vdev->v4l2_dev);
+ v4l2_device_get(vdev->v4l2_dev);
#if defined(CONFIG_MEDIA_CONTROLLER)
/* Part 5: Register the entity. */
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
+ if (vdev->v4l2_dev->mdev &&
vdev->vfl_type != VFL_TYPE_SUBDEV) {
vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
vdev->entity.name = vdev->name;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 9ccb19a435ef..faac2f4e0f3a 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -257,7 +257,7 @@ static void v4l_print_format(const void *arg, bool write_only)
pr_cont(", width=%u, height=%u, "
"pixelformat=%c%c%c%c, field=%s, "
"bytesperline=%u, sizeimage=%u, colorspace=%d, "
- "flags %u\n",
+ "flags %x, ycbcr_enc=%u, quantization=%u\n",
pix->width, pix->height,
(pix->pixelformat & 0xff),
(pix->pixelformat >> 8) & 0xff,
@@ -265,21 +265,24 @@ static void v4l_print_format(const void *arg, bool write_only)
(pix->pixelformat >> 24) & 0xff,
prt_names(pix->field, v4l2_field_names),
pix->bytesperline, pix->sizeimage,
- pix->colorspace, pix->flags);
+ pix->colorspace, pix->flags, pix->ycbcr_enc,
+ pix->quantization);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
mp = &p->fmt.pix_mp;
pr_cont(", width=%u, height=%u, "
"format=%c%c%c%c, field=%s, "
- "colorspace=%d, num_planes=%u\n",
+ "colorspace=%d, num_planes=%u, flags=%x, "
+ "ycbcr_enc=%u, quantization=%u\n",
mp->width, mp->height,
(mp->pixelformat & 0xff),
(mp->pixelformat >> 8) & 0xff,
(mp->pixelformat >> 16) & 0xff,
(mp->pixelformat >> 24) & 0xff,
prt_names(mp->field, v4l2_field_names),
- mp->colorspace, mp->num_planes);
+ mp->colorspace, mp->num_planes, mp->flags,
+ mp->ycbcr_enc, mp->quantization);
for (i = 0; i < mp->num_planes; i++)
printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
mp->plane_fmt[i].bytesperline,
@@ -1014,6 +1017,12 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_querycap(file, fh, cap);
cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT;
+ /*
+ * Drivers MUST fill in device_caps, so check for this and
+ * warn if it was forgotten.
+ */
+ WARN_ON(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) ||
+ !cap->device_caps);
cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
return ret;
@@ -1040,7 +1049,7 @@ static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
if (ops->vidioc_g_priority)
return ops->vidioc_g_priority(file, fh, arg);
vfd = video_devdata(file);
- *p = v4l2_prio_max(&vfd->v4l2_dev->prio);
+ *p = v4l2_prio_max(vfd->prio);
return 0;
}
@@ -1055,7 +1064,7 @@ static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
return ops->vidioc_s_priority(file, fh, *p);
vfd = video_devdata(file);
vfh = file->private_data;
- return v4l2_prio_change(&vfd->v4l2_dev->prio, &vfh->prio, *p);
+ return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
}
static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index b91a266d0b7e..926836d1813a 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -51,6 +51,8 @@ MODULE_LICENSE("GPL");
#define CALL(q, f, arg...) \
((q->int_ops->f) ? q->int_ops->f(arg) : 0)
+#define CALLPTR(q, f, arg...) \
+ ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
{
@@ -831,7 +833,7 @@ static int __videobuf_copy_to_user(struct videobuf_queue *q,
char __user *data, size_t count,
int nonblocking)
{
- void *vaddr = CALL(q, vaddr, buf);
+ void *vaddr = CALLPTR(q, vaddr, buf);
/* copy to userspace */
if (count > buf->size - q->read_off)
@@ -848,7 +850,7 @@ static int __videobuf_copy_stream(struct videobuf_queue *q,
char __user *data, size_t count, size_t pos,
int vbihack, int nonblocking)
{
- unsigned int *fc = CALL(q, vaddr, buf);
+ unsigned int *fc = CALLPTR(q, vaddr, buf);
if (vbihack) {
/* dirty, undocumented hack -- pass the frame counter
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index f2e43de3dd87..d09a8916e940 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -189,6 +189,8 @@ static void __vb2_queue_cancel(struct vb2_queue *q);
static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
+ enum dma_data_direction dma_dir =
+ V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
void *mem_priv;
int plane;
@@ -200,7 +202,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
- size, q->gfp_flags);
+ size, dma_dir, q->gfp_flags);
if (IS_ERR_OR_NULL(mem_priv))
goto free;
@@ -1358,7 +1360,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
void *mem_priv;
unsigned int plane;
int ret;
- int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+ enum dma_data_direction dma_dir =
+ V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
@@ -1400,7 +1403,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Acquire each plane's memory */
mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
planes[plane].m.userptr,
- planes[plane].length, write);
+ planes[plane].length, dma_dir);
if (IS_ERR_OR_NULL(mem_priv)) {
dprintk(1, "failed acquiring userspace "
"memory for plane %d\n", plane);
@@ -1461,7 +1464,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
void *mem_priv;
unsigned int plane;
int ret;
- int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+ enum dma_data_direction dma_dir =
+ V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
@@ -1509,7 +1513,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Acquire each plane's memory */
mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
- dbuf, planes[plane].length, write);
+ dbuf, planes[plane].length, dma_dir);
if (IS_ERR(mem_priv)) {
dprintk(1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv);
@@ -3385,14 +3389,14 @@ int _vb2_fop_release(struct file *file, struct mutex *lock)
{
struct video_device *vdev = video_devdata(file);
+ if (lock)
+ mutex_lock(lock);
if (file->private_data == vdev->queue->owner) {
- if (lock)
- mutex_lock(lock);
vb2_queue_release(vdev->queue);
vdev->queue->owner = NULL;
- if (lock)
- mutex_unlock(lock);
}
+ if (lock)
+ mutex_unlock(lock);
return v4l2_fh_release(file);
}
EXPORT_SYMBOL_GPL(_vb2_fop_release);
@@ -3455,27 +3459,16 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
struct video_device *vdev = video_devdata(file);
struct vb2_queue *q = vdev->queue;
struct mutex *lock = q->lock ? q->lock : vdev->lock;
- unsigned long req_events = poll_requested_events(wait);
unsigned res;
void *fileio;
- bool must_lock = false;
-
- /* Try to be smart: only lock if polling might start fileio,
- otherwise locking will only introduce unwanted delays. */
- if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
- if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
- (req_events & (POLLIN | POLLRDNORM)))
- must_lock = true;
- else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
- (req_events & (POLLOUT | POLLWRNORM)))
- must_lock = true;
- }
- /* If locking is needed, but this helper doesn't know how, then you
- shouldn't be using this helper but you should write your own. */
- WARN_ON(must_lock && !lock);
+ /*
+ * If this helper doesn't know how to lock, then you shouldn't be using
+ * it but you should write your own.
+ */
+ WARN_ON(!lock);
- if (must_lock && lock && mutex_lock_interruptible(lock))
+ if (lock && mutex_lock_interruptible(lock))
return POLLERR;
fileio = q->fileio;
@@ -3483,9 +3476,9 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
res = vb2_poll(vdev->queue, file, wait);
/* If fileio was started, then we have a new queue owner. */
- if (must_lock && !fileio && q->fileio)
+ if (!fileio && q->fileio)
q->owner = file->private_data;
- if (must_lock && lock)
+ if (lock)
mutex_unlock(lock);
return res;
}
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 4a02ade14b4f..b481d20c8372 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -155,7 +155,8 @@ static void vb2_dc_put(void *buf_priv)
kfree(buf);
}
-static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
+static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir, gfp_t gfp_flags)
{
struct vb2_dc_conf *conf = alloc_ctx;
struct device *dev = conf->dev;
@@ -176,6 +177,7 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
/* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev);
buf->size = size;
+ buf->dma_dir = dma_dir;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dc_put;
@@ -229,7 +231,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
struct vb2_dc_attachment {
struct sg_table sgt;
- enum dma_data_direction dir;
+ enum dma_data_direction dma_dir;
};
static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
@@ -264,7 +266,7 @@ static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
wr = sg_next(wr);
}
- attach->dir = DMA_NONE;
+ attach->dma_dir = DMA_NONE;
dbuf_attach->priv = attach;
return 0;
@@ -282,16 +284,16 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
sgt = &attach->sgt;
/* release the scatterlist cache */
- if (attach->dir != DMA_NONE)
+ if (attach->dma_dir != DMA_NONE)
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dir);
+ attach->dma_dir);
sg_free_table(sgt);
kfree(attach);
db_attach->priv = NULL;
}
static struct sg_table *vb2_dc_dmabuf_ops_map(
- struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{
struct vb2_dc_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
@@ -303,27 +305,27 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
sgt = &attach->sgt;
/* return previously mapped sg table */
- if (attach->dir == dir) {
+ if (attach->dma_dir == dma_dir) {
mutex_unlock(lock);
return sgt;
}
/* release any previous cache */
- if (attach->dir != DMA_NONE) {
+ if (attach->dma_dir != DMA_NONE) {
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dir);
- attach->dir = DMA_NONE;
+ attach->dma_dir);
+ attach->dma_dir = DMA_NONE;
}
/* mapping to the client with new direction */
- ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
if (ret <= 0) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);
}
- attach->dir = dir;
+ attach->dma_dir = dma_dir;
mutex_unlock(lock);
@@ -331,7 +333,7 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
}
static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
- struct sg_table *sgt, enum dma_data_direction dir)
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
{
/* nothing to be done here */
}
@@ -460,7 +462,8 @@ static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
}
static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
- int n_pages, struct vm_area_struct *vma, int write)
+ int n_pages, struct vm_area_struct *vma,
+ enum dma_data_direction dma_dir)
{
if (vma_is_io(vma)) {
unsigned int i;
@@ -482,7 +485,7 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
int n;
n = get_user_pages(current, current->mm, start & PAGE_MASK,
- n_pages, write, 1, pages, NULL);
+ n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
/* negative error means that no page was pinned */
n = max(n, 0);
if (n != n_pages) {
@@ -508,7 +511,15 @@ static void vb2_dc_put_userptr(void *buf_priv)
struct sg_table *sgt = buf->dma_sgt;
if (sgt) {
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+ /*
+ * No need to sync to CPU, it's already synced to the CPU
+ * since the finish() memop will have been called before this.
+ */
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
if (!vma_is_io(buf->vma))
vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
@@ -551,7 +562,7 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn
#endif
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
+ unsigned long size, enum dma_data_direction dma_dir)
{
struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
@@ -565,6 +576,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
struct sg_table *sgt;
unsigned long contig_size;
unsigned long dma_align = dma_get_cache_alignment();
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/* Only cache aligned DMA transfers are reliable */
if (!IS_ALIGNED(vaddr | size, dma_align)) {
@@ -582,7 +596,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
return ERR_PTR(-ENOMEM);
buf->dev = conf->dev;
- buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ buf->dma_dir = dma_dir;
start = vaddr & PAGE_MASK;
offset = vaddr & ~PAGE_MASK;
@@ -618,7 +632,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
}
/* extract page list from userspace mapping */
- ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
+ ret = vb2_dc_get_user_pages(start, pages, n_pages, vma,
+ dma_dir == DMA_FROM_DEVICE);
if (ret) {
unsigned long pfn;
if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
@@ -650,8 +665,12 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
kfree(pages);
pages = NULL;
- sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir);
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
if (sgt->nents <= 0) {
pr_err("failed to map scatterlist\n");
ret = -EIO;
@@ -673,7 +692,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
return buf;
fail_map_sg:
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
fail_sgt_init:
if (!vma_is_io(buf->vma))
@@ -782,7 +802,7 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
}
static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
- unsigned long size, int write)
+ unsigned long size, enum dma_data_direction dma_dir)
{
struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
@@ -804,7 +824,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
return dba;
}
- buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ buf->dma_dir = dma_dir;
buf->size = size;
buf->db_attach = dba;
@@ -850,7 +870,8 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
{
- kfree(alloc_ctx);
+ if (!IS_ERR_OR_NULL(alloc_ctx))
+ kfree(alloc_ctx);
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 9b163a440f89..b1838abb6d00 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -30,17 +30,30 @@ module_param(debug, int, 0644);
printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
} while (0)
+struct vb2_dma_sg_conf {
+ struct device *dev;
+};
+
struct vb2_dma_sg_buf {
+ struct device *dev;
void *vaddr;
struct page **pages;
- int write;
int offset;
+ enum dma_data_direction dma_dir;
struct sg_table sg_table;
+ /*
+ * This will point to sg_table when used with the MMAP or USERPTR
+ * memory model, and to the dma_buf sglist when used with the
+ * DMABUF memory model.
+ */
+ struct sg_table *dma_sgt;
size_t size;
unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
struct vm_area_struct *vma;
+
+ struct dma_buf_attachment *db_attach;
};
static void vb2_dma_sg_put(void *buf_priv);
@@ -86,22 +99,31 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
return 0;
}
-static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
+static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir, gfp_t gfp_flags)
{
+ struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf;
+ struct sg_table *sgt;
int ret;
int num_pages;
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+
+ if (WARN_ON(alloc_ctx == NULL))
+ return NULL;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
buf->vaddr = NULL;
- buf->write = 0;
+ buf->dma_dir = dma_dir;
buf->offset = 0;
buf->size = size;
/* size is already page aligned */
buf->num_pages = size >> PAGE_SHIFT;
+ buf->dma_sgt = &buf->sg_table;
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
@@ -112,11 +134,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
if (ret)
goto fail_pages_alloc;
- ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, 0, size, GFP_KERNEL);
if (ret)
goto fail_table_alloc;
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(conf->dev);
+
+ sgt = &buf->sg_table;
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs) == 0)
+ goto fail_map;
+
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
buf->handler.arg = buf;
@@ -127,6 +161,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
__func__, buf->num_pages);
return buf;
+fail_map:
+ put_device(buf->dev);
+ sg_free_table(buf->dma_sgt);
fail_table_alloc:
num_pages = buf->num_pages;
while (num_pages--)
@@ -141,42 +178,81 @@ fail_pages_array_alloc:
static void vb2_dma_sg_put(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
- sg_free_table(&buf->sg_table);
+ sg_free_table(buf->dma_sgt);
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
+ put_device(buf->dev);
kfree(buf);
}
}
+static void vb2_dma_sg_prepare(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (buf->db_attach)
+ return;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+static void vb2_dma_sg_finish(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (buf->db_attach)
+ return;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
static inline int vma_is_io(struct vm_area_struct *vma)
{
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
+ unsigned long size,
+ enum dma_data_direction dma_dir)
{
+ struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf;
unsigned long first, last;
int num_pages_from_user;
struct vm_area_struct *vma;
+ struct sg_table *sgt;
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
buf->vaddr = NULL;
- buf->write = write;
+ buf->dev = conf->dev;
+ buf->dma_dir = dma_dir;
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
+ buf->dma_sgt = &buf->sg_table;
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
@@ -221,7 +297,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
buf->num_pages,
- write,
+ buf->dma_dir == DMA_FROM_DEVICE,
1, /* force */
buf->pages,
NULL);
@@ -229,12 +305,22 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
- if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, buf->offset, size, 0))
goto userptr_fail_alloc_table_from_pages;
+ sgt = &buf->sg_table;
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ buf->dma_dir, &attrs) == 0)
+ goto userptr_fail_map;
return buf;
+userptr_fail_map:
+ sg_free_table(&buf->sg_table);
userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
dprintk(1, "get_user_pages requested/got: %d/%d]\n",
@@ -257,15 +343,20 @@ userptr_fail_alloc_pages:
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
- sg_free_table(&buf->sg_table);
+ sg_free_table(buf->dma_sgt);
while (--i >= 0) {
- if (buf->write)
+ if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]);
if (!vma_is_io(buf->vma))
put_page(buf->pages[i]);
@@ -281,14 +372,16 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
BUG_ON(!buf);
- if (!buf->vaddr)
- buf->vaddr = vm_map_ram(buf->pages,
- buf->num_pages,
- -1,
- PAGE_KERNEL);
+ if (!buf->vaddr) {
+ if (buf->db_attach)
+ buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+ else
+ buf->vaddr = vm_map_ram(buf->pages,
+ buf->num_pages, -1, PAGE_KERNEL);
+ }
/* add offset in case userptr is not page-aligned */
- return buf->vaddr + buf->offset;
+ return buf->vaddr ? buf->vaddr + buf->offset : NULL;
}
static unsigned int vb2_dma_sg_num_users(void *buf_priv)
@@ -335,11 +428,279 @@ static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
return 0;
}
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dma_sg_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_dma_sg_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ int ret;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->dma_sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
+}
+
+static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_dma_sg_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_dma_sg_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dma_sg_get_dmabuf */
+ vb2_dma_sg_put(dbuf->priv);
+}
+
+static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+
+ return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
+}
+
+static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+
+ return vb2_dma_sg_vaddr(buf);
+}
+
+static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_dma_sg_mmap(dbuf->priv, vma);
+}
+
+static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
+ .attach = vb2_dma_sg_dmabuf_ops_attach,
+ .detach = vb2_dma_sg_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+ .kmap = vb2_dma_sg_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
+ .vmap = vb2_dma_sg_dmabuf_ops_vmap,
+ .mmap = vb2_dma_sg_dmabuf_ops_mmap,
+ .release = vb2_dma_sg_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+
+ if (WARN_ON(!buf->dma_sgt))
+ return NULL;
+
+ dbuf = dma_buf_export(buf, &vb2_dma_sg_dmabuf_ops, buf->size, flags, NULL);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dma_sg_map_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+ struct sg_table *sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ buf->dma_sgt = sgt;
+ buf->vaddr = NULL;
+
+ return 0;
+}
+
+static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
+ return;
+ }
+
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ if (buf->vaddr) {
+ dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ buf->vaddr = NULL;
+ }
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_sgt))
+ vb2_dma_sg_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
+ kfree(buf);
+}
+
+static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, enum dma_data_direction dma_dir)
+{
+ struct vb2_dma_sg_conf *conf = alloc_ctx;
+ struct vb2_dma_sg_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = dma_dir;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
static void *vb2_dma_sg_cookie(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- return &buf->sg_table;
+ return buf->dma_sgt;
}
const struct vb2_mem_ops vb2_dma_sg_memops = {
@@ -347,13 +708,41 @@ const struct vb2_mem_ops vb2_dma_sg_memops = {
.put = vb2_dma_sg_put,
.get_userptr = vb2_dma_sg_get_userptr,
.put_userptr = vb2_dma_sg_put_userptr,
+ .prepare = vb2_dma_sg_prepare,
+ .finish = vb2_dma_sg_finish,
.vaddr = vb2_dma_sg_vaddr,
.mmap = vb2_dma_sg_mmap,
.num_users = vb2_dma_sg_num_users,
+ .get_dmabuf = vb2_dma_sg_get_dmabuf,
+ .map_dmabuf = vb2_dma_sg_map_dmabuf,
+ .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
+ .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
+ .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
.cookie = vb2_dma_sg_cookie,
};
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
+void *vb2_dma_sg_init_ctx(struct device *dev)
+{
+ struct vb2_dma_sg_conf *conf;
+
+ conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return ERR_PTR(-ENOMEM);
+
+ conf->dev = dev;
+
+ return conf;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
+
+void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
+{
+ if (!IS_ERR_OR_NULL(alloc_ctx))
+ kfree(alloc_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
+
MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
MODULE_AUTHOR("Andrzej Pietrasiewicz");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 313d9771b2bc..fba944e50227 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -25,7 +25,7 @@ struct vb2_vmalloc_buf {
void *vaddr;
struct page **pages;
struct vm_area_struct *vma;
- int write;
+ enum dma_data_direction dma_dir;
unsigned long size;
unsigned int n_pages;
atomic_t refcount;
@@ -35,7 +35,8 @@ struct vb2_vmalloc_buf {
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
+static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir, gfp_t gfp_flags)
{
struct vb2_vmalloc_buf *buf;
@@ -45,6 +46,7 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fl
buf->size = size;
buf->vaddr = vmalloc_user(buf->size);
+ buf->dma_dir = dma_dir;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_vmalloc_put;
buf->handler.arg = buf;
@@ -70,7 +72,8 @@ static void vb2_vmalloc_put(void *buf_priv)
}
static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
+ unsigned long size,
+ enum dma_data_direction dma_dir)
{
struct vb2_vmalloc_buf *buf;
unsigned long first, last;
@@ -82,7 +85,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf)
return NULL;
- buf->write = write;
+ buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
@@ -107,7 +110,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
/* current->mm->mmap_sem is taken by videobuf2 core */
n_pages = get_user_pages(current, current->mm,
vaddr & PAGE_MASK, buf->n_pages,
- write, 1, /* force */
+ dma_dir == DMA_FROM_DEVICE,
+ 1, /* force */
buf->pages, NULL);
if (n_pages != buf->n_pages)
goto fail_get_user_pages;
@@ -144,14 +148,13 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
if (vaddr)
vm_unmap_ram((void *)vaddr, buf->n_pages);
for (i = 0; i < buf->n_pages; ++i) {
- if (buf->write)
+ if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]);
put_page(buf->pages[i]);
}
kfree(buf->pages);
} else {
- if (buf->vma)
- vb2_put_vma(buf->vma);
+ vb2_put_vma(buf->vma);
iounmap(buf->vaddr);
}
kfree(buf);
@@ -209,6 +212,176 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
}
/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_vmalloc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_vmalloc_attachment *attach;
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+ int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ void *vaddr = buf->vaddr;
+ int ret;
+ int i;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return ret;
+ }
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ struct page *page = vmalloc_to_page(vaddr);
+
+ if (!page) {
+ sg_free_table(sgt);
+ kfree(attach);
+ return -ENOMEM;
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ vaddr += PAGE_SIZE;
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+ return 0;
+}
+
+static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_vmalloc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_vmalloc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_vmalloc_get_dmabuf */
+ vb2_vmalloc_put(dbuf->priv);
+}
+
+static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+ return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+ return buf->vaddr;
+}
+
+static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_vmalloc_mmap(dbuf->priv, vma);
+}
+
+static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
+ .attach = vb2_vmalloc_dmabuf_ops_attach,
+ .detach = vb2_vmalloc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
+ .kmap = vb2_vmalloc_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
+ .vmap = vb2_vmalloc_dmabuf_ops_vmap,
+ .mmap = vb2_vmalloc_dmabuf_ops_mmap,
+ .release = vb2_vmalloc_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+
+ if (WARN_ON(!buf->vaddr))
+ return NULL;
+
+ dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
/* callbacks for DMABUF buffers */
/*********************************************/
@@ -240,7 +413,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
}
static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
- unsigned long size, int write)
+ unsigned long size, enum dma_data_direction dma_dir)
{
struct vb2_vmalloc_buf *buf;
@@ -252,7 +425,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dbuf = dbuf;
- buf->write = write;
+ buf->dma_dir = dma_dir;
buf->size = size;
return buf;
@@ -264,6 +437,7 @@ const struct vb2_mem_ops vb2_vmalloc_memops = {
.put = vb2_vmalloc_put,
.get_userptr = vb2_vmalloc_get_userptr,
.put_userptr = vb2_vmalloc_put_userptr,
+ .get_dmabuf = vb2_vmalloc_get_dmabuf,
.map_dmabuf = vb2_vmalloc_map_dmabuf,
.unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
.attach_dmabuf = vb2_vmalloc_attach_dmabuf,
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index fed04e8efe75..a3ebc8a87479 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -82,7 +82,6 @@ static struct platform_driver atmel_ramc_driver = {
.probe = atmel_ramc_probe,
.driver = {
.name = "atmel-ramc",
- .owner = THIS_MODULE,
.of_match_table = atmel_ramc_of_match,
},
};
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
index c9443fc136db..fc7ab5a3561e 100644
--- a/drivers/memory/fsl-corenet-cf.c
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -237,7 +237,6 @@ static int ccf_remove(struct platform_device *pdev)
static struct platform_driver ccf_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
.of_match_table = ccf_matches,
},
.probe = ccf_probe,
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 3d5d792d5cb2..410c39749872 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -61,7 +61,7 @@ int fsl_ifc_find(phys_addr_t addr_base)
if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) {
+ for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
if (cspr & CSPR_V && (cspr & CSPR_BA) ==
convert_ifc_address(addr_base))
@@ -213,7 +213,7 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
static int fsl_ifc_ctrl_probe(struct platform_device *dev)
{
int ret = 0;
-
+ int version, banks;
dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
@@ -231,6 +231,15 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
goto err;
}
+ version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+ FSL_IFC_VERSION_MASK;
+ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
+ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
+ version >> 24, (version >> 16) & 0xf, banks);
+
+ fsl_ifc_ctrl_dev->version = version;
+ fsl_ifc_ctrl_dev->banks = banks;
+
/* get the Controller level irq */
fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index ff7138fd66d1..24852812fd44 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -346,7 +346,6 @@ static struct platform_driver mvebu_devbus_driver = {
.probe = mvebu_devbus_probe,
.driver = {
.name = "mvebu-devbus",
- .owner = THIS_MODULE,
.of_match_table = mvebu_devbus_of_match,
},
};
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index ffc5e60c0664..24696f59215b 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1987,7 +1987,6 @@ static struct platform_driver gpmc_driver = {
.remove = gpmc_remove,
.driver = {
.name = DEVICE_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(gpmc_dt_ids),
.pm = &gpmc_pm_ops,
},
diff --git a/drivers/memory/tegra20-mc.c b/drivers/memory/tegra20-mc.c
index 7cd82b874abd..cc309a05289a 100644
--- a/drivers/memory/tegra20-mc.c
+++ b/drivers/memory/tegra20-mc.c
@@ -243,7 +243,6 @@ static struct platform_driver tegra20_mc_driver = {
.probe = tegra20_mc_probe,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = tegra20_mc_of_match,
},
};
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index d3df7602f406..ca7d97a9a9ba 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -413,7 +413,6 @@ static struct platform_driver aemif_driver = {
.remove = aemif_remove,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(aemif_of_match),
},
};
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index c880ba685754..818fa94354ae 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -645,7 +645,6 @@ static struct platform_driver rtsx_pci_ms_driver = {
.suspend = rtsx_pci_ms_suspend,
.resume = rtsx_pci_ms_resume,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME_RTSX_PCI_MS,
},
};
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index a7282b7d4de8..1105db2355d2 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -827,7 +827,6 @@ static struct platform_driver rtsx_usb_ms_driver = {
.remove = rtsx_usb_ms_drv_remove,
.id_table = rtsx_usb_ms_ids,
.driver = {
- .owner = THIS_MODULE,
.name = "rtsx_usb_ms",
.pm = &rtsx_usb_ms_pm_ops,
},
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index c9af16cc7310..f391c5fee1b0 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -239,7 +239,6 @@ static int __exit ab3100_otp_remove(struct platform_device *pdev)
static struct platform_driver ab3100_otp_driver = {
.driver = {
.name = "ab3100-otp",
- .owner = THIS_MODULE,
},
.remove = __exit_p(ab3100_otp_remove),
};
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index bde2fc072410..c80a2925f8e5 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -1848,7 +1848,6 @@ static const struct platform_device_id ab8500_id[] = {
static struct platform_driver ab8500_core_driver = {
.driver = {
.name = "ab8500-core",
- .owner = THIS_MODULE,
},
.probe = ab8500_probe,
.remove = ab8500_remove,
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index b2c7e3b1edfa..9a8e185f11df 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -3263,7 +3263,6 @@ static int ab8500_debug_remove(struct platform_device *plf)
static struct platform_driver ab8500_debug_driver = {
.driver = {
.name = "ab8500-debug",
- .owner = THIS_MODULE,
},
.probe = ab8500_debug_probe,
.remove = ab8500_debug_remove
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 8e3168d160b2..dabbc93abdd7 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -1044,7 +1044,6 @@ static struct platform_driver ab8500_gpadc_driver = {
.remove = ab8500_gpadc_remove,
.driver = {
.name = "ab8500-gpadc",
- .owner = THIS_MODULE,
.pm = &ab8500_gpadc_pm_ops,
},
};
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 94dbcdd2a1ff..cfff0b643f1b 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -180,7 +180,6 @@ static int ab8500_sysctrl_remove(struct platform_device *pdev)
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
- .owner = THIS_MODULE,
},
.probe = ab8500_sysctrl_probe,
.remove = ab8500_sysctrl_remove,
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 013ba8159dcd..c835e85539b2 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -143,7 +143,6 @@ static int davinci_vc_remove(struct platform_device *pdev)
static struct platform_driver davinci_vc_driver = {
.driver = {
.name = "davinci_voicecodec",
- .owner = THIS_MODULE,
},
.remove = davinci_vc_remove,
};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index a8204730f01c..16162bf43656 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -3232,7 +3232,6 @@ static const struct of_device_id db8500_prcmu_match[] = {
static struct platform_driver db8500_prcmu_driver = {
.driver = {
.name = "db8500-prcmu",
- .owner = THIS_MODULE,
.of_match_table = db8500_prcmu_match,
},
.probe = db8500_prcmu_probe,
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 443e7cddff28..25d486c543cb 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -447,7 +447,6 @@ static struct platform_driver intel_msic_driver = {
.remove = intel_msic_remove,
.driver = {
.name = "intel_msic",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 7a51c0d0d4f1..b31c54e4ecb2 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -324,7 +324,6 @@ static struct platform_driver jz4740_adc_driver = {
.remove = jz4740_adc_remove,
.driver = {
.name = "jz4740-adc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index bd2696136eee..f38ec424872e 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -494,7 +494,6 @@ static int kempld_remove(struct platform_device *pdev)
static struct platform_driver kempld_driver = {
.driver = {
.name = "kempld",
- .owner = THIS_MODULE,
},
.probe = kempld_probe,
.remove = kempld_remove,
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 29d76986b40b..1fc458128405 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -299,7 +299,6 @@ static struct platform_driver mcp_sa11x0_driver = {
.remove = mcp_sa11x0_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &mcp_sa11x0_pm_ops,
},
};
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 83dab2f0a50e..04cd54dd507c 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -882,7 +882,6 @@ MODULE_DEVICE_TABLE(of, usbhs_omap_dt_ids);
static struct platform_driver usbhs_omap_driver = {
.driver = {
.name = (char *)usbhs_driver_name,
- .owner = THIS_MODULE,
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 532eacab6b46..b7b3e8ee64f2 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -319,7 +319,6 @@ MODULE_DEVICE_TABLE(of, usbtll_omap_dt_ids);
static struct platform_driver usbtll_omap_driver = {
.driver = {
.name = (char *)usbtll_driver_name,
- .owner = THIS_MODULE,
.of_match_table = usbtll_omap_dt_ids,
},
.probe = usbtll_omap_probe,
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 39904f77c049..5a92646a2ccb 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -374,7 +374,6 @@ static struct platform_driver pm8921_driver = {
.remove = pm8921_remove,
.driver = {
.name = "pm8921-core",
- .owner = THIS_MODULE,
.of_match_table = pm8921_id_table,
},
};
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 6ce6e6200359..91077efc8050 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1723,7 +1723,6 @@ static const struct of_device_id of_sm501_match_tbl[] = {
static struct platform_driver sm501_plat_driver = {
.driver = {
.name = "sm501",
- .owner = THIS_MODULE,
.of_match_table = of_sm501_match_tbl,
},
.probe = sm501_plat_probe,
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index b78942ed4c67..27986f641f7d 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -327,7 +327,6 @@ static struct platform_driver ssbi_driver = {
.probe = ssbi_probe,
.driver = {
.name = "ssbi",
- .owner = THIS_MODULE,
.of_match_table = ssbi_match_table,
},
};
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
index 5b72db07d9de..b3e5c6f45105 100644
--- a/drivers/mfd/sta2x11-mfd.c
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -368,7 +368,6 @@ static int sta2x11_scr_probe(struct platform_device *dev)
static struct platform_driver sta2x11_sctl_platform_driver = {
.driver = {
.name = STA2X11_MFD_SCTL_NAME,
- .owner = THIS_MODULE,
},
.probe = sta2x11_sctl_probe,
};
@@ -382,7 +381,6 @@ static int __init sta2x11_sctl_init(void)
static struct platform_driver sta2x11_platform_driver = {
.driver = {
.name = STA2X11_MFD_APBREG_NAME,
- .owner = THIS_MODULE,
},
.probe = sta2x11_apbreg_probe,
};
@@ -396,7 +394,6 @@ static int __init sta2x11_apbreg_init(void)
static struct platform_driver sta2x11_apb_soc_regs_platform_driver = {
.driver = {
.name = STA2X11_MFD_APB_SOC_REGS_NAME,
- .owner = THIS_MODULE,
},
.probe = sta2x11_apb_soc_regs_probe,
};
@@ -410,7 +407,6 @@ static int __init sta2x11_apb_soc_regs_init(void)
static struct platform_driver sta2x11_scr_platform_driver = {
.driver = {
.name = STA2X11_MFD_SCR_NAME,
- .owner = THIS_MODULE,
},
.probe = sta2x11_scr_probe,
};
diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c
index 283ab8d197e4..2f2e9f062571 100644
--- a/drivers/mfd/sun6i-prcm.c
+++ b/drivers/mfd/sun6i-prcm.c
@@ -152,7 +152,6 @@ static int sun6i_prcm_probe(struct platform_device *pdev)
static struct platform_driver sun6i_prcm_driver = {
.driver = {
.name = "sun6i-prcm",
- .owner = THIS_MODULE,
.of_match_table = sun6i_prcm_dt_ids,
},
.probe = sun6i_prcm_probe,
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 72373b113885..176bf0fa2685 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -219,7 +219,6 @@ static const struct platform_device_id syscon_ids[] = {
static struct platform_driver syscon_driver = {
.driver = {
.name = "syscon",
- .owner = THIS_MODULE,
},
.probe = syscon_probe,
.id_table = syscon_ids,
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 439d905bb219..c09fb5dccd50 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -431,7 +431,6 @@ static int t7l66xb_remove(struct platform_device *dev)
static struct platform_driver t7l66xb_platform_driver = {
.driver = {
.name = "t7l66xb",
- .owner = THIS_MODULE,
},
.suspend = t7l66xb_suspend,
.resume = t7l66xb_resume,
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index d35f11fbeab7..63458b39a97d 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -842,7 +842,6 @@ static struct platform_driver tc6393xb_driver = {
.driver = {
.name = "tc6393xb",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index d877e777cce6..467c80e1c4ae 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -368,7 +368,6 @@ MODULE_DEVICE_TABLE(of, ti_tscadc_dt_ids);
static struct platform_driver ti_tscadc_driver = {
.driver = {
.name = "ti_am3359-tscadc",
- .owner = THIS_MODULE,
.pm = TSCADC_PM_OPS,
.of_match_table = ti_tscadc_dt_ids,
},
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index c0816ebd9d7e..c0789f81a1c5 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -166,7 +166,6 @@ static int tps65911_comparator_remove(struct platform_device *pdev)
static struct platform_driver tps65911_comparator_driver = {
.driver = {
.name = "tps65911-comparator",
- .owner = THIS_MODULE,
},
.probe = tps65911_comparator_probe,
.remove = tps65911_comparator_remove,
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index 07fe542e6fc0..0a1606480023 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -283,7 +283,6 @@ MODULE_DEVICE_TABLE(of, twl4030_audio_of_match);
static struct platform_driver twl4030_audio_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "twl4030-audio",
.of_match_table = twl4030_audio_of_match,
},
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 7d63e324e6a8..393509246037 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -933,7 +933,6 @@ static int twl4030_power_remove(struct platform_device *pdev)
static struct platform_driver twl4030_power_driver = {
.driver = {
.name = "twl4030_power",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(twl4030_power_of_match),
},
.probe = twl4030_power_probe,
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index 9e21e4fc9599..8f43ab8fd2d6 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -223,7 +223,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
vexpress_config_set_master(vexpress_sysreg_get_master());
/* Confirm board type against DT property, if available */
- if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) {
+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER);
u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index c72e96b523ed..c65b5ea5d5ef 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -375,7 +375,6 @@ static const struct of_device_id charlcd_match[] = {
static struct platform_driver charlcd_driver = {
.driver = {
.name = DRIVERNAME,
- .owner = THIS_MODULE,
.pm = &charlcd_pm_ops,
.of_match_table = of_match_ptr(charlcd_match),
},
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 60843a275abd..e11a0bd6c66e 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -57,7 +57,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
ssc->user++;
spin_unlock(&user_lock);
- clk_prepare_enable(ssc->clk);
+ clk_prepare(ssc->clk);
return ssc;
}
@@ -77,7 +77,7 @@ void ssc_free(struct ssc_device *ssc)
spin_unlock(&user_lock);
if (disable_clk)
- clk_disable_unprepare(ssc->clk);
+ clk_unprepare(ssc->clk);
}
EXPORT_SYMBOL(ssc_free);
@@ -220,7 +220,6 @@ static int ssc_remove(struct platform_device *pdev)
static struct platform_driver ssc_driver = {
.driver = {
.name = "ssc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_ssc_dt_ids),
},
.id_table = atmel_ssc_devtypes,
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
index c90370ed712b..295882bfb14e 100644
--- a/drivers/misc/carma/Kconfig
+++ b/drivers/misc/carma/Kconfig
@@ -1,7 +1,6 @@
config CARMA_FPGA
tristate "CARMA DATA-FPGA Access Driver"
- depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
- select VIDEOBUF_DMA_SG
+ depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
default n
help
Say Y here to include support for communicating with the data
@@ -9,8 +8,7 @@ config CARMA_FPGA
config CARMA_FPGA_PROGRAM
tristate "CARMA DATA-FPGA Programmer"
- depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
- select VIDEOBUF_DMA_SG
+ depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
default n
help
Say Y here to include support for programming the data processing
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 339b252fcedd..06166ac000e0 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -19,6 +19,7 @@
#include <linux/fsldma.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
+#include <linux/vmalloc.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -30,8 +31,6 @@
#include <linux/fs.h>
#include <linux/io.h>
-#include <media/videobuf-dma-sg.h>
-
/* MPC8349EMDS specific get_immrbase() */
#include <sysdev/fsl_soc.h>
@@ -67,14 +66,79 @@ struct fpga_dev {
/* FPGA Bitfile */
struct mutex lock;
- struct videobuf_dmabuf vb;
- bool vb_allocated;
+ void *vaddr;
+ struct scatterlist *sglist;
+ int sglen;
+ int nr_pages;
+ bool buf_allocated;
/* max size and written bytes */
size_t fw_size;
size_t bytes;
};
+static int fpga_dma_init(struct fpga_dev *priv, int nr_pages)
+{
+ struct page *pg;
+ int i;
+
+ priv->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
+ if (NULL == priv->vaddr) {
+ pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
+ return -ENOMEM;
+ }
+
+ pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
+ (unsigned long)priv->vaddr,
+ nr_pages << PAGE_SHIFT);
+
+ memset(priv->vaddr, 0, nr_pages << PAGE_SHIFT);
+ priv->nr_pages = nr_pages;
+
+ priv->sglist = vzalloc(priv->nr_pages * sizeof(*priv->sglist));
+ if (NULL == priv->sglist)
+ goto vzalloc_err;
+
+ sg_init_table(priv->sglist, priv->nr_pages);
+ for (i = 0; i < priv->nr_pages; i++) {
+ pg = vmalloc_to_page(priv->vaddr + i * PAGE_SIZE);
+ if (NULL == pg)
+ goto vmalloc_to_page_err;
+ sg_set_page(&priv->sglist[i], pg, PAGE_SIZE, 0);
+ }
+ return 0;
+
+vmalloc_to_page_err:
+ vfree(priv->sglist);
+ priv->sglist = NULL;
+vzalloc_err:
+ vfree(priv->vaddr);
+ priv->vaddr = NULL;
+ return -ENOMEM;
+}
+
+static int fpga_dma_map(struct fpga_dev *priv)
+{
+ priv->sglen = dma_map_sg(priv->dev, priv->sglist,
+ priv->nr_pages, DMA_TO_DEVICE);
+
+ if (0 == priv->sglen) {
+ pr_warn("%s: dma_map_sg failed\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int fpga_dma_unmap(struct fpga_dev *priv)
+{
+ if (!priv->sglen)
+ return 0;
+
+ dma_unmap_sg(priv->dev, priv->sglist, priv->sglen, DMA_TO_DEVICE);
+ priv->sglen = 0;
+ return 0;
+}
+
/*
* FPGA Bitfile Helpers
*/
@@ -87,8 +151,9 @@ struct fpga_dev {
*/
static void fpga_drop_firmware_data(struct fpga_dev *priv)
{
- videobuf_dma_free(&priv->vb);
- priv->vb_allocated = false;
+ vfree(priv->sglist);
+ vfree(priv->vaddr);
+ priv->buf_allocated = false;
priv->bytes = 0;
}
@@ -427,7 +492,7 @@ static noinline int fpga_program_cpu(struct fpga_dev *priv)
dev_dbg(priv->dev, "enabled the controller\n");
/* Write each chunk of the FPGA bitfile to FPGA programmer */
- ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes);
+ ret = fpga_program_block(priv, priv->vaddr, priv->bytes);
if (ret)
goto out_disable_controller;
@@ -463,7 +528,6 @@ out_disable_controller:
*/
static noinline int fpga_program_dma(struct fpga_dev *priv)
{
- struct videobuf_dmabuf *vb = &priv->vb;
struct dma_chan *chan = priv->chan;
struct dma_async_tx_descriptor *tx;
size_t num_pages, len, avail = 0;
@@ -505,7 +569,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
}
/* Map the buffer for DMA */
- ret = videobuf_dma_map(priv->dev, &priv->vb);
+ ret = fpga_dma_map(priv);
if (ret) {
dev_err(priv->dev, "Unable to map buffer for DMA\n");
goto out_free_table;
@@ -525,7 +589,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
goto out_dma_unmap;
}
- ret = fsl_dma_external_start(chan, 1)
+ ret = fsl_dma_external_start(chan, 1);
if (ret) {
dev_err(priv->dev, "DMA external control setup failed\n");
goto out_dma_unmap;
@@ -534,7 +598,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
/* setup and submit the DMA transaction */
tx = dmaengine_prep_dma_sg(chan, table.sgl, num_pages,
- vb->sglist, vb->sglen, 0);
+ priv->sglist, priv->sglen, 0);
if (!tx) {
dev_err(priv->dev, "Unable to prep DMA transaction\n");
ret = -ENOMEM;
@@ -572,7 +636,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
out_disable_controller:
fpga_programmer_disable(priv);
out_dma_unmap:
- videobuf_dma_unmap(priv->dev, vb);
+ fpga_dma_unmap(priv);
out_free_table:
sg_free_table(&table);
out_return:
@@ -702,12 +766,12 @@ static int fpga_open(struct inode *inode, struct file *filp)
priv->bytes = 0;
/* Check if we have already allocated a buffer */
- if (priv->vb_allocated)
+ if (priv->buf_allocated)
return 0;
/* Allocate a buffer to hold enough data for the bitfile */
nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
- ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages);
+ ret = fpga_dma_init(priv, nr_pages);
if (ret) {
dev_err(priv->dev, "unable to allocate data buffer\n");
mutex_unlock(&priv->lock);
@@ -715,7 +779,7 @@ static int fpga_open(struct inode *inode, struct file *filp)
return ret;
}
- priv->vb_allocated = true;
+ priv->buf_allocated = true;
return 0;
}
@@ -738,7 +802,7 @@ static ssize_t fpga_write(struct file *filp, const char __user *buf,
return -ENOSPC;
count = min_t(size_t, priv->fw_size - priv->bytes, count);
- if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count))
+ if (copy_from_user(priv->vaddr + priv->bytes, buf, count))
return -EFAULT;
priv->bytes += count;
@@ -749,20 +813,19 @@ static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
struct fpga_dev *priv = filp->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- priv->vb.vaddr, priv->bytes);
+ return simple_read_from_buffer(buf, count, f_pos,
+ priv->vaddr, priv->bytes);
}
static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
{
struct fpga_dev *priv = filp->private_data;
- loff_t newpos;
/* only read-only opens are allowed to seek */
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
return -EINVAL;
- return fixed_size_llseek(file, offset, origin, priv->fw_size);
+ return fixed_size_llseek(filp, offset, origin, priv->fw_size);
}
static const struct file_operations fpga_fops = {
@@ -953,7 +1016,6 @@ static int fpga_of_probe(struct platform_device *op)
priv->dev = &op->dev;
mutex_init(&priv->lock);
init_completion(&priv->completion);
- videobuf_dma_init(&priv->vb);
dev_set_drvdata(priv->dev, priv);
dma_cap_zero(mask);
@@ -1091,7 +1153,6 @@ static struct platform_driver fpga_of_driver = {
.driver = {
.name = drv_name,
.of_match_table = fpga_of_match,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 55e913b7eb11..68cdfe151bdb 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -98,6 +98,7 @@
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/poll.h>
@@ -105,8 +106,6 @@
#include <linux/kref.h>
#include <linux/io.h>
-#include <media/videobuf-dma-sg.h>
-
/* system controller registers */
#define SYS_IRQ_SOURCE_CTL 0x24
#define SYS_IRQ_OUTPUT_EN 0x28
@@ -142,7 +141,10 @@ struct fpga_info {
struct data_buf {
struct list_head entry;
- struct videobuf_dmabuf vb;
+ void *vaddr;
+ struct scatterlist *sglist;
+ int sglen;
+ int nr_pages;
size_t size;
};
@@ -207,6 +209,68 @@ static void fpga_device_release(struct kref *ref)
* Data Buffer Allocation Helpers
*/
+static int carma_dma_init(struct data_buf *buf, int nr_pages)
+{
+ struct page *pg;
+ int i;
+
+ buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
+ if (NULL == buf->vaddr) {
+ pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
+ return -ENOMEM;
+ }
+
+ pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
+ (unsigned long)buf->vaddr,
+ nr_pages << PAGE_SHIFT);
+
+ memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
+ buf->nr_pages = nr_pages;
+
+ buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
+ if (NULL == buf->sglist)
+ goto vzalloc_err;
+
+ sg_init_table(buf->sglist, buf->nr_pages);
+ for (i = 0; i < buf->nr_pages; i++) {
+ pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
+ if (NULL == pg)
+ goto vmalloc_to_page_err;
+ sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
+ }
+ return 0;
+
+vmalloc_to_page_err:
+ vfree(buf->sglist);
+ buf->sglist = NULL;
+vzalloc_err:
+ vfree(buf->vaddr);
+ buf->vaddr = NULL;
+ return -ENOMEM;
+}
+
+static int carma_dma_map(struct device *dev, struct data_buf *buf)
+{
+ buf->sglen = dma_map_sg(dev, buf->sglist,
+ buf->nr_pages, DMA_FROM_DEVICE);
+
+ if (0 == buf->sglen) {
+ pr_warn("%s: dma_map_sg failed\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int carma_dma_unmap(struct device *dev, struct data_buf *buf)
+{
+ if (!buf->sglen)
+ return 0;
+
+ dma_unmap_sg(dev, buf->sglist, buf->sglen, DMA_FROM_DEVICE);
+ buf->sglen = 0;
+ return 0;
+}
+
/**
* data_free_buffer() - free a single data buffer and all allocated memory
* @buf: the buffer to free
@@ -221,7 +285,8 @@ static void data_free_buffer(struct data_buf *buf)
return;
/* free all memory */
- videobuf_dma_free(&buf->vb);
+ vfree(buf->sglist);
+ vfree(buf->vaddr);
kfree(buf);
}
@@ -230,7 +295,7 @@ static void data_free_buffer(struct data_buf *buf)
* @bytes: the number of bytes required
*
* This allocates all space needed for a data buffer. It must be mapped before
- * use in a DMA transaction using videobuf_dma_map().
+ * use in a DMA transaction using carma_dma_map().
*
* Returns NULL on failure
*/
@@ -252,9 +317,8 @@ static struct data_buf *data_alloc_buffer(const size_t bytes)
INIT_LIST_HEAD(&buf->entry);
buf->size = bytes;
- /* allocate the videobuf */
- videobuf_dma_init(&buf->vb);
- ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages);
+ /* allocate the buffer */
+ ret = carma_dma_init(buf, nr_pages);
if (ret)
goto out_free_buf;
@@ -285,13 +349,13 @@ static void data_free_buffers(struct fpga_device *priv)
list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
list_del_init(&buf->entry);
- videobuf_dma_unmap(priv->dev, &buf->vb);
+ carma_dma_unmap(priv->dev, buf);
data_free_buffer(buf);
}
list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
list_del_init(&buf->entry);
- videobuf_dma_unmap(priv->dev, &buf->vb);
+ carma_dma_unmap(priv->dev, buf);
data_free_buffer(buf);
}
@@ -330,7 +394,7 @@ static int data_alloc_buffers(struct fpga_device *priv)
break;
/* map it for DMA */
- ret = videobuf_dma_map(priv->dev, &buf->vb);
+ ret = carma_dma_map(priv->dev, buf);
if (ret) {
data_free_buffer(buf);
break;
@@ -634,8 +698,8 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
dma_addr_t dst, src;
unsigned long dma_flags = 0;
- dst_sg = buf->vb.sglist;
- dst_nents = buf->vb.sglen;
+ dst_sg = buf->sglist;
+ dst_nents = buf->sglen;
src_sg = priv->corl_table.sgl;
src_nents = priv->corl_nents;
@@ -1134,7 +1198,7 @@ static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
spin_unlock_irq(&priv->lock);
/* Buffers are always mapped: unmap it */
- videobuf_dma_unmap(priv->dev, &dbuf->vb);
+ carma_dma_unmap(priv->dev, dbuf);
/* save the buffer for later */
reader->buf = dbuf;
@@ -1143,7 +1207,7 @@ static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
have_buffer:
/* Get the number of bytes available */
avail = dbuf->size - reader->buf_start;
- data = dbuf->vb.vaddr + reader->buf_start;
+ data = dbuf->vaddr + reader->buf_start;
/* Get the number of bytes we can transfer */
count = min(count, avail);
@@ -1171,7 +1235,7 @@ have_buffer:
* If it fails, we pretend that the read never happed and return
* -EFAULT to userspace. The read will be retried.
*/
- ret = videobuf_dma_map(priv->dev, &dbuf->vb);
+ ret = carma_dma_map(priv->dev, dbuf);
if (ret) {
dev_err(priv->dev, "unable to remap buffer for DMA\n");
return -EFAULT;
@@ -1203,7 +1267,7 @@ out_unlock:
spin_unlock_irq(&priv->lock);
if (drop_buffer) {
- videobuf_dma_unmap(priv->dev, &dbuf->vb);
+ carma_dma_unmap(priv->dev, dbuf);
data_free_buffer(dbuf);
}
@@ -1433,7 +1497,6 @@ static struct platform_driver data_of_driver = {
.driver = {
.name = drv_name,
.of_match_table = data_of_match,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index effd8c6b2b94..347f08f2fd48 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -365,7 +365,6 @@ done:
static struct platform_driver cs5535_mfgpt_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = cs5535_mfgpt_probe,
};
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 3d2b8677ec8a..b5b6bda44a00 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -336,6 +336,8 @@ struct cxl_sste {
struct cxl_afu {
irq_hw_number_t psl_hwirq;
irq_hw_number_t serr_hwirq;
+ char *err_irq_name;
+ char *psl_irq_name;
unsigned int serr_virq;
void __iomem *p1n_mmio;
void __iomem *p2n_mmio;
@@ -379,6 +381,12 @@ struct cxl_afu {
bool enabled;
};
+
+struct cxl_irq_name {
+ struct list_head list;
+ char *name;
+};
+
/*
* This is a cxl context. If the PSL is in dedicated mode, there will be one
* of these per AFU. If in AFU directed there can be lots of these.
@@ -403,6 +411,7 @@ struct cxl_context {
unsigned long *irq_bitmap; /* Accessed from IRQ context */
struct cxl_irq_ranges irqs;
+ struct list_head irq_names;
u64 fault_addr;
u64 fault_dsisr;
u64 afu_err;
@@ -444,6 +453,7 @@ struct cxl {
struct dentry *trace;
struct dentry *psl_err_chk;
struct dentry *debugfs;
+ char *irq_name;
struct bin_attribute cxl_attr;
int adapter_num;
int user_irqs;
@@ -563,9 +573,6 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode);
int cxl_afu_deactivate_mode(struct cxl_afu *afu);
int cxl_afu_select_best_mode(struct cxl_afu *afu);
-unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
- irq_handler_t handler, void *cookie);
-void cxl_unmap_irq(unsigned int virq, void *cookie);
int cxl_register_psl_irq(struct cxl_afu *afu);
void cxl_release_psl_irq(struct cxl_afu *afu);
int cxl_register_psl_err_irq(struct cxl *adapter);
@@ -612,7 +619,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed,
u64 amr);
int cxl_detach_process(struct cxl_context *ctx);
-int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info);
+int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info);
int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
int cxl_check_error(struct cxl_afu *afu);
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index c99e896604ee..f8684bca2d79 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -133,7 +133,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
{
unsigned flt = 0;
int result;
- unsigned long access, flags;
+ unsigned long access, flags, inv_flags = 0;
if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
pr_devel("copro_handle_mm_fault failed: %#x\n", result);
@@ -149,8 +149,12 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
access |= _PAGE_RW;
if ((!ctx->kernel) || ~(dar & (1ULL << 63)))
access |= _PAGE_USER;
+
+ if (dsisr & DSISR_NOHPTE)
+ inv_flags |= HPTE_NOHPTE_UPDATE;
+
local_irq_save(flags);
- hash_page_mm(mm, dar, access, 0x300);
+ hash_page_mm(mm, dar, access, 0x300, inv_flags);
local_irq_restore(flags);
pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 336020c8e1af..c294925f73ee 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -92,20 +92,13 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da
return IRQ_HANDLED;
}
-static irqreturn_t cxl_irq(int irq, void *data)
+static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
{
struct cxl_context *ctx = data;
- struct cxl_irq_info irq_info;
u64 dsisr, dar;
- int result;
-
- if ((result = cxl_get_irq(ctx, &irq_info))) {
- WARN(1, "Unable to get CXL IRQ Info: %i\n", result);
- return IRQ_HANDLED;
- }
- dsisr = irq_info.dsisr;
- dar = irq_info.dar;
+ dsisr = irq_info->dsisr;
+ dar = irq_info->dar;
pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
@@ -149,9 +142,9 @@ static irqreturn_t cxl_irq(int irq, void *data)
if (dsisr & CXL_PSL_DSISR_An_UR)
pr_devel("CXL interrupt: AURP PTE not found\n");
if (dsisr & CXL_PSL_DSISR_An_PE)
- return handle_psl_slice_error(ctx, dsisr, irq_info.errstat);
+ return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
if (dsisr & CXL_PSL_DSISR_An_AE) {
- pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err);
+ pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err);
if (ctx->pending_afu_err) {
/*
@@ -163,10 +156,10 @@ static irqreturn_t cxl_irq(int irq, void *data)
*/
dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
"undelivered to pe %i: %.llx\n",
- ctx->pe, irq_info.afu_err);
+ ctx->pe, irq_info->afu_err);
} else {
spin_lock(&ctx->lock);
- ctx->afu_err = irq_info.afu_err;
+ ctx->afu_err = irq_info->afu_err;
ctx->pending_afu_err = 1;
spin_unlock(&ctx->lock);
@@ -182,24 +175,43 @@ static irqreturn_t cxl_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
+{
+ if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
+ else
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
{
struct cxl_afu *afu = data;
struct cxl_context *ctx;
+ struct cxl_irq_info irq_info;
int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
int ret;
+ if ((ret = cxl_get_irq(afu, &irq_info))) {
+ WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
+ return fail_psl_irq(afu, &irq_info);
+ }
+
rcu_read_lock();
ctx = idr_find(&afu->contexts_idr, ph);
if (ctx) {
- ret = cxl_irq(irq, ctx);
+ ret = cxl_irq(irq, ctx, &irq_info);
rcu_read_unlock();
return ret;
}
rcu_read_unlock();
- WARN(1, "Unable to demultiplex CXL PSL IRQ\n");
- return IRQ_HANDLED;
+ WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR"
+ " %.16llx\n(Possible AFU HW issue - was a term/remove acked"
+ " with outstanding transactions?)\n", ph, irq_info.dsisr,
+ irq_info.dar);
+ return fail_psl_irq(afu, &irq_info);
}
static irqreturn_t cxl_irq_afu(int irq, void *data)
@@ -243,7 +255,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
}
unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
- irq_handler_t handler, void *cookie)
+ irq_handler_t handler, void *cookie, const char *name)
{
unsigned int virq;
int result;
@@ -259,7 +271,7 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
- result = request_irq(virq, handler, 0, "cxl", cookie);
+ result = request_irq(virq, handler, 0, name, cookie);
if (result) {
dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
return 0;
@@ -278,14 +290,15 @@ static int cxl_register_one_irq(struct cxl *adapter,
irq_handler_t handler,
void *cookie,
irq_hw_number_t *dest_hwirq,
- unsigned int *dest_virq)
+ unsigned int *dest_virq,
+ const char *name)
{
int hwirq, virq;
if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
return hwirq;
- if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie)))
+ if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
goto err;
*dest_hwirq = hwirq;
@@ -302,10 +315,19 @@ int cxl_register_psl_err_irq(struct cxl *adapter)
{
int rc;
+ adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
+ dev_name(&adapter->dev));
+ if (!adapter->irq_name)
+ return -ENOMEM;
+
if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
&adapter->err_hwirq,
- &adapter->err_virq)))
+ &adapter->err_virq,
+ adapter->irq_name))) {
+ kfree(adapter->irq_name);
+ adapter->irq_name = NULL;
return rc;
+ }
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
@@ -317,6 +339,7 @@ void cxl_release_psl_err_irq(struct cxl *adapter)
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
cxl_unmap_irq(adapter->err_virq, adapter);
cxl_release_one_irq(adapter, adapter->err_hwirq);
+ kfree(adapter->irq_name);
}
int cxl_register_serr_irq(struct cxl_afu *afu)
@@ -324,10 +347,18 @@ int cxl_register_serr_irq(struct cxl_afu *afu)
u64 serr;
int rc;
+ afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
+ dev_name(&afu->dev));
+ if (!afu->err_irq_name)
+ return -ENOMEM;
+
if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
&afu->serr_hwirq,
- &afu->serr_virq)))
+ &afu->serr_virq, afu->err_irq_name))) {
+ kfree(afu->err_irq_name);
+ afu->err_irq_name = NULL;
return rc;
+ }
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
@@ -341,24 +372,50 @@ void cxl_release_serr_irq(struct cxl_afu *afu)
cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
cxl_unmap_irq(afu->serr_virq, afu);
cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
+ kfree(afu->err_irq_name);
}
int cxl_register_psl_irq(struct cxl_afu *afu)
{
- return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
- &afu->psl_hwirq, &afu->psl_virq);
+ int rc;
+
+ afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
+ dev_name(&afu->dev));
+ if (!afu->psl_irq_name)
+ return -ENOMEM;
+
+ if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
+ &afu->psl_hwirq, &afu->psl_virq,
+ afu->psl_irq_name))) {
+ kfree(afu->psl_irq_name);
+ afu->psl_irq_name = NULL;
+ }
+ return rc;
}
void cxl_release_psl_irq(struct cxl_afu *afu)
{
cxl_unmap_irq(afu->psl_virq, afu);
cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
+ kfree(afu->psl_irq_name);
+}
+
+void afu_irq_name_free(struct cxl_context *ctx)
+{
+ struct cxl_irq_name *irq_name, *tmp;
+
+ list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
+ kfree(irq_name->name);
+ list_del(&irq_name->list);
+ kfree(irq_name);
+ }
}
int afu_register_irqs(struct cxl_context *ctx, u32 count)
{
irq_hw_number_t hwirq;
- int rc, r, i;
+ int rc, r, i, j = 1;
+ struct cxl_irq_name *irq_name;
if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
return rc;
@@ -372,15 +429,47 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
sizeof(*ctx->irq_bitmap), GFP_KERNEL);
if (!ctx->irq_bitmap)
return -ENOMEM;
+
+ /*
+ * Allocate names first. If any fail, bail out before allocating
+ * actual hardware IRQs.
+ */
+ INIT_LIST_HEAD(&ctx->irq_names);
+ for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
+ irq_name = kmalloc(sizeof(struct cxl_irq_name),
+ GFP_KERNEL);
+ if (!irq_name)
+ goto out;
+ irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
+ dev_name(&ctx->afu->dev),
+ ctx->pe, j);
+ if (!irq_name->name) {
+ kfree(irq_name);
+ goto out;
+ }
+ /* Add to tail so next look get the correct order */
+ list_add_tail(&irq_name->list, &ctx->irq_names);
+ j++;
+ }
+ }
+
+ /* We've allocated all memory now, so let's do the irq allocations */
+ irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
for (r = 1; r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
cxl_map_irq(ctx->afu->adapter, hwirq,
- cxl_irq_afu, ctx);
+ cxl_irq_afu, ctx, irq_name->name);
+ irq_name = list_next_entry(irq_name, list);
}
}
return 0;
+
+out:
+ afu_irq_name_free(ctx);
+ return -ENOMEM;
}
void afu_release_irqs(struct cxl_context *ctx)
@@ -398,5 +487,6 @@ void afu_release_irqs(struct cxl_context *ctx)
}
}
+ afu_irq_name_free(ctx);
cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
}
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index d47532e8f4f1..9a5a442269a8 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -637,18 +637,18 @@ int cxl_detach_process(struct cxl_context *ctx)
return detach_process_native_afu_directed(ctx);
}
-int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info)
+int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
{
u64 pidtid;
- info->dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
- info->dar = cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An);
- info->dsr = cxl_p2n_read(ctx->afu, CXL_PSL_DSR_An);
- pidtid = cxl_p2n_read(ctx->afu, CXL_PSL_PID_TID_An);
+ info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
+ info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
+ pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
info->pid = pidtid >> 32;
info->tid = pidtid & 0xffffffff;
- info->afu_err = cxl_p2n_read(ctx->afu, CXL_AFU_ERR_An);
- info->errstat = cxl_p2n_read(ctx->afu, CXL_PSL_ErrStat_An);
+ info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
+ info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
return 0;
}
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index d87f77f790d6..2d3db81be099 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -56,6 +56,7 @@ struct at24_data {
struct at24_platform_data chip;
struct memory_accessor macc;
int use_smbus;
+ int use_smbus_write;
/*
* Lock protects against activities from other Linux tasks,
@@ -324,7 +325,7 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
{
struct i2c_client *client;
struct i2c_msg msg;
- ssize_t status;
+ ssize_t status = 0;
unsigned long timeout, write_time;
unsigned next_page;
@@ -365,9 +366,18 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
timeout = jiffies + msecs_to_jiffies(write_timeout);
do {
write_time = jiffies;
- if (at24->use_smbus) {
- status = i2c_smbus_write_i2c_block_data(client,
- offset, count, buf);
+ if (at24->use_smbus_write) {
+ switch (at24->use_smbus_write) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ status = i2c_smbus_write_i2c_block_data(client,
+ offset, count, buf);
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ status = i2c_smbus_write_byte_data(client,
+ offset, buf[0]);
+ break;
+ }
+
if (status == 0)
status = count;
} else {
@@ -487,6 +497,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct at24_platform_data chip;
bool writable;
int use_smbus = 0;
+ int use_smbus_write = 0;
struct at24_data *at24;
int err;
unsigned i, num_addresses;
@@ -546,6 +557,18 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ /* Use I2C operations unless we're stuck with SMBus extensions. */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+ use_smbus_write = I2C_SMBUS_I2C_BLOCK_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
+ use_smbus_write = I2C_SMBUS_BYTE_DATA;
+ chip.page_size = 1;
+ }
+ }
+
if (chip.flags & AT24_FLAG_TAKE8ADDR)
num_addresses = 8;
else
@@ -559,6 +582,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
mutex_init(&at24->lock);
at24->use_smbus = use_smbus;
+ at24->use_smbus_write = use_smbus_write;
at24->chip = chip;
at24->num_addresses = num_addresses;
@@ -576,8 +600,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
writable = !(chip.flags & AT24_FLAG_READONLY);
if (writable) {
- if (!use_smbus || i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+ if (!use_smbus || use_smbus_write) {
unsigned write_max = chip.page_size;
diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
index 3f2b625b2032..8385177ff32b 100644
--- a/drivers/misc/eeprom/sunxi_sid.c
+++ b/drivers/misc/eeprom/sunxi_sid.c
@@ -146,7 +146,6 @@ static struct platform_driver sunxi_sid_driver = {
.remove = sunxi_sid_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = sunxi_sid_of_match,
},
};
diff --git a/drivers/misc/fuse/Makefile b/drivers/misc/fuse/Makefile
deleted file mode 100644
index 0679c4febc89..000000000000
--- a/drivers/misc/fuse/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 7cb3b7e41739..1ca94e6fa8fb 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -590,6 +590,8 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
m->nr_pages,
1, /* write by caller */
m->page_list); /* ptrs to pages */
+ if (rc < 0)
+ goto fail_get_user_pages;
/* assumption: get_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 6cdce8477f57..79f53941779d 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -262,6 +262,7 @@ out:
static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
{
struct mei_msg_hdr mei_hdr;
+ struct mei_cl *cl;
int ret;
if (!dev || !cb)
@@ -277,8 +278,9 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_msg_buf_size = cb->request_buffer.size;
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
cb->request_buffer.size);
+ cl = &dev->iamthif_cl;
- ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl);
+ ret = mei_cl_flow_ctrl_creds(cl);
if (ret < 0)
return ret;
@@ -292,8 +294,8 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.msg_complete = 1;
}
- mei_hdr.host_addr = dev->iamthif_cl.host_client_id;
- mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
mei_hdr.internal = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
@@ -302,7 +304,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
return ret;
if (mei_hdr.msg_complete) {
- if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
+ if (mei_cl_flow_ctrl_reduce(cl))
return -EIO;
dev->iamthif_flow_control_pending = true;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
@@ -360,8 +362,7 @@ int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb)
void mei_amthif_run_next_cmd(struct mei_device *dev)
{
struct mei_cl_cb *cb;
- struct mei_cl_cb *next;
- int status;
+ int ret;
if (!dev)
return;
@@ -376,16 +377,14 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
- list_for_each_entry_safe(cb, next, &dev->amthif_cmd_list.list, list) {
- list_del(&cb->list);
- if (!cb->cl)
- continue;
- status = mei_amthif_send_cmd(dev, cb);
- if (status)
- dev_warn(dev->dev, "amthif write failed status = %d\n",
- status);
- break;
- }
+ cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
+ typeof(*cb), list);
+ if (!cb)
+ return;
+ list_del(&cb->list);
+ ret = mei_amthif_send_cmd(dev, cb);
+ if (ret)
+ dev_warn(dev->dev, "amthif write failed status = %d\n", ret);
}
@@ -536,9 +535,6 @@ int mei_amthif_irq_read_msg(struct mei_device *dev,
cb = dev->iamthif_current_cb;
dev->iamthif_current_cb = NULL;
- if (!cb->cl)
- return -ENODEV;
-
dev->iamthif_stall_timer = 0;
cb->buf_idx = dev->iamthif_msg_buf_index;
cb->read_time = jiffies;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 4d20d60ca38d..b3a72bca5242 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -140,7 +140,7 @@ static struct device_type mei_cl_device_type = {
.release = mei_cl_dev_release,
};
-static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev,
+struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev,
uuid_le uuid)
{
struct mei_cl *cl;
@@ -160,7 +160,7 @@ struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
struct mei_cl *cl;
int status;
- cl = mei_bus_find_mei_cl_by_uuid(dev, uuid);
+ cl = mei_cl_bus_find_cl_by_uuid(dev, uuid);
if (cl == NULL)
return NULL;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index bc9ba5359bc6..1382d551d7ed 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -146,7 +146,7 @@ static void __mei_io_list_flush(struct mei_cl_cb *list,
/* enable removing everything if no cl is specified */
list_for_each_entry_safe(cb, next, &list->list, list) {
- if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
+ if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
list_del(&cb->list);
if (free)
mei_io_cb_free(cb);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index ce1566715f80..b60b4263cf0f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -34,7 +34,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
int pos = 0;
int ret;
-#define HDR " |id|addr| UUID |con|msg len|sb|\n"
+#define HDR " |id|fix| UUID |con|msg len|sb|\n"
mutex_lock(&dev->device_lock);
@@ -56,12 +56,8 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
list_for_each_entry(me_cl, &dev->me_clients, list) {
- /* skip me clients that cannot be connected */
- if (me_cl->props.max_number_of_connections == 0)
- continue;
-
pos += scnprintf(buf + pos, bufsz - pos,
- "%2d|%2d|%4d|%pUl|%3d|%7d|%2d|\n",
+ "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|\n",
i++, me_cl->client_id,
me_cl->props.fixed_address,
&me_cl->props.protocol_name,
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 49a2653d91a5..239d7f5d6a92 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -562,17 +562,17 @@ int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
* mei_hbm_cl_disconnect_res - update the client state according
* disconnect response
*
+ * @dev: the device structure
* @cl: mei host client
* @cmd: disconnect client response host bus message
*/
-static void mei_hbm_cl_disconnect_res(struct mei_cl *cl,
+static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
struct mei_hbm_cl_cmd *cmd)
{
struct hbm_client_connect_response *rs =
(struct hbm_client_connect_response *)cmd;
- dev_dbg(cl->dev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n",
- rs->me_addr, rs->host_addr, rs->status);
+ cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status);
if (rs->status == MEI_CL_DISCONN_SUCCESS)
cl->state = MEI_FILE_DISCONNECTED;
@@ -598,17 +598,17 @@ int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
* mei_hbm_cl_connect_res - update the client state according
* connection response
*
+ * @dev: the device structure
* @cl: mei host client
* @cmd: connect client response host bus message
*/
-static void mei_hbm_cl_connect_res(struct mei_cl *cl,
+static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
struct mei_hbm_cl_cmd *cmd)
{
struct hbm_client_connect_response *rs =
(struct hbm_client_connect_response *)cmd;
- dev_dbg(cl->dev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n",
- rs->me_addr, rs->host_addr,
+ cl_dbg(dev, cl, "hbm: connect response status=%s\n",
mei_cl_conn_status_str(rs->status));
if (rs->status == MEI_CL_CONN_SUCCESS)
@@ -637,11 +637,6 @@ static void mei_hbm_cl_res(struct mei_device *dev,
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
cl = cb->cl;
- /* this should not happen */
- if (WARN_ON(!cl)) {
- list_del_init(&cb->list);
- continue;
- }
if (cb->fop_type != fop_type)
continue;
@@ -657,10 +652,10 @@ static void mei_hbm_cl_res(struct mei_device *dev,
switch (fop_type) {
case MEI_FOP_CONNECT:
- mei_hbm_cl_connect_res(cl, rs);
+ mei_hbm_cl_connect_res(dev, cl, rs);
break;
case MEI_FOP_DISCONNECT:
- mei_hbm_cl_disconnect_res(cl, rs);
+ mei_hbm_cl_disconnect_res(dev, cl, rs);
break;
default:
return;
@@ -811,8 +806,6 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
return -EPROTO;
}
- dev->hbm_state = MEI_HBM_STARTED;
-
if (mei_hbm_enum_clients_req(dev)) {
dev_err(dev->dev, "hbm: start: failed to send enumeration request\n");
return -EIO;
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index b7cd3d857fd5..2544db7d1649 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -26,17 +26,17 @@ struct mei_cl;
*
* @MEI_HBM_IDLE : protocol not started
* @MEI_HBM_STARTING : start request message was sent
- * @MEI_HBM_STARTED : start reply message was received
* @MEI_HBM_ENUM_CLIENTS : enumeration request was sent
* @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties
+ * @MEI_HBM_STARTED : enumeration was completed
* @MEI_HBM_STOPPED : stopping exchange
*/
enum mei_hbm_state {
MEI_HBM_IDLE = 0,
MEI_HBM_STARTING,
- MEI_HBM_STARTED,
MEI_HBM_ENUM_CLIENTS,
MEI_HBM_CLIENT_PROPERTIES,
+ MEI_HBM_STARTED,
MEI_HBM_STOPPED,
};
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c5feafdd58a8..9eb7ed70ace2 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,14 +117,18 @@
#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
#define MEI_DEV_ID_WPT_LP_2 0x9CBB /* Wildcat Point LP 2 */
-/* Host Firmware Status Registers in PCI Config Space */
-#define PCI_CFG_HFS_1 0x40
-#define PCI_CFG_HFS_2 0x48
-
/*
* MEI HW Section
*/
+/* Host Firmware Status Registers in PCI Config Space */
+#define PCI_CFG_HFS_1 0x40
+#define PCI_CFG_HFS_2 0x48
+#define PCI_CFG_HFS_3 0x60
+#define PCI_CFG_HFS_4 0x64
+#define PCI_CFG_HFS_5 0x68
+#define PCI_CFG_HFS_6 0x6C
+
/* MEI registers */
/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
#define H_CB_WW 0
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 4f2fd6fc1e23..ff2755062b44 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -270,10 +270,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
static void mei_me_host_set_ready(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
- hw->host_hw_state = mei_hcsr_read(hw);
- hw->host_hw_state |= H_IE | H_IG | H_RDY;
- mei_hcsr_set(hw, hw->host_hw_state);
+ hcsr |= H_IE | H_IG | H_RDY;
+ mei_hcsr_set(hw, hcsr);
}
/**
@@ -285,9 +285,9 @@ static void mei_me_host_set_ready(struct mei_device *dev)
static bool mei_me_host_is_ready(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
- hw->host_hw_state = mei_hcsr_read(hw);
- return (hw->host_hw_state & H_RDY) == H_RDY;
+ return (hcsr & H_RDY) == H_RDY;
}
/**
@@ -299,9 +299,9 @@ static bool mei_me_host_is_ready(struct mei_device *dev)
static bool mei_me_hw_is_ready(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ u32 mecsr = mei_me_mecsr_read(hw);
- hw->me_hw_state = mei_me_mecsr_read(hw);
- return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
+ return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
}
/**
@@ -356,12 +356,13 @@ static int mei_me_hw_start(struct mei_device *dev)
static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr;
char read_ptr, write_ptr;
- hw->host_hw_state = mei_hcsr_read(hw);
+ hcsr = mei_hcsr_read(hw);
- read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
- write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
+ read_ptr = (char) ((hcsr & H_CBRP) >> 8);
+ write_ptr = (char) ((hcsr & H_CBWP) >> 16);
return (unsigned char) (write_ptr - read_ptr);
}
@@ -474,13 +475,14 @@ static int mei_me_write_message(struct mei_device *dev,
static int mei_me_count_full_read_slots(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ u32 me_csr;
char read_ptr, write_ptr;
unsigned char buffer_depth, filled_slots;
- hw->me_hw_state = mei_me_mecsr_read(hw);
- buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
- read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
- write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
+ me_csr = mei_me_mecsr_read(hw);
+ buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
+ read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
+ write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
filled_slots = (unsigned char) (write_ptr - read_ptr);
/* check for overflow */
@@ -833,6 +835,14 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.fw_status.status[0] = PCI_CFG_HFS_1, \
.fw_status.status[1] = PCI_CFG_HFS_2
+#define MEI_CFG_PCH8_HFS \
+ .fw_status.count = 6, \
+ .fw_status.status[0] = PCI_CFG_HFS_1, \
+ .fw_status.status[1] = PCI_CFG_HFS_2, \
+ .fw_status.status[2] = PCI_CFG_HFS_3, \
+ .fw_status.status[3] = PCI_CFG_HFS_4, \
+ .fw_status.status[4] = PCI_CFG_HFS_5, \
+ .fw_status.status[5] = PCI_CFG_HFS_6
/* ICH Legacy devices */
const struct mei_cfg mei_me_legacy_cfg = {
@@ -856,9 +866,14 @@ const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
MEI_CFG_FW_NM,
};
-/* PCH Lynx Point with quirk for SPS Firmware exclusion */
-const struct mei_cfg mei_me_lpt_cfg = {
- MEI_CFG_PCH_HFS,
+/* PCH8 Lynx Point and newer devices */
+const struct mei_cfg mei_me_pch8_cfg = {
+ MEI_CFG_PCH8_HFS,
+};
+
+/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
+const struct mei_cfg mei_me_pch8_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
MEI_CFG_FW_SPS,
};
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index e6a59a62573a..d6567af44377 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -51,18 +51,11 @@ struct mei_cfg {
*
* @cfg: per device generation config and ops
* @mem_addr: io memory address
- * @host_hw_state: cached host state
- * @me_hw_state: cached me (fw) state
* @pg_state: power gating state
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
- /*
- * hw states of host and fw(ME)
- */
- u32 host_hw_state;
- u32 me_hw_state;
enum mei_pg_state pg_state;
};
@@ -72,7 +65,8 @@ extern const struct mei_cfg mei_me_legacy_cfg;
extern const struct mei_cfg mei_me_ich_cfg;
extern const struct mei_cfg mei_me_pch_cfg;
extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg;
-extern const struct mei_cfg mei_me_lpt_cfg;
+extern const struct mei_cfg mei_me_pch8_cfg;
+extern const struct mei_cfg mei_me_pch8_sps_cfg;
struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
const struct mei_cfg *cfg);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index c5e1902e493f..618ea721aca8 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -700,11 +700,10 @@ static int mei_txe_write(struct mei_device *dev,
mei_txe_input_ready_interrupt_enable(dev);
if (!mei_txe_is_input_ready(dev)) {
- struct mei_fw_status fw_status;
+ char fw_sts_str[MEI_FW_STATUS_STR_SZ];
- mei_fw_status(dev, &fw_status);
- dev_err(dev->dev, "Input is not ready " FW_STS_FMT "\n",
- FW_STS_PRM(fw_status));
+ mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
+ dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
return -EAGAIN;
}
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 7901d076c127..9306219d5675 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -54,6 +54,35 @@ const char *mei_pg_state_str(enum mei_pg_state state)
#undef MEI_PG_STATE
}
+/**
+ * mei_fw_status2str - convert fw status registers to printable string
+ *
+ * @fw_status: firmware status
+ * @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ
+ * @len: buffer len must be >= MEI_FW_STATUS_STR_SZ
+ *
+ * Return: number of bytes written or -EINVAL if buffer is to small
+ */
+ssize_t mei_fw_status2str(struct mei_fw_status *fw_status,
+ char *buf, size_t len)
+{
+ ssize_t cnt = 0;
+ int i;
+
+ buf[0] = '\0';
+
+ if (len < MEI_FW_STATUS_STR_SZ)
+ return -EINVAL;
+
+ for (i = 0; i < fw_status->count; i++)
+ cnt += scnprintf(buf + cnt, len - cnt, "%08X ",
+ fw_status->status[i]);
+
+ /* drop last space */
+ buf[cnt] = '\0';
+ return cnt;
+}
+EXPORT_SYMBOL_GPL(mei_fw_status2str);
/**
* mei_cancel_work - Cancel mei background jobs
@@ -86,12 +115,11 @@ int mei_reset(struct mei_device *dev)
state != MEI_DEV_DISABLED &&
state != MEI_DEV_POWER_DOWN &&
state != MEI_DEV_POWER_UP) {
- struct mei_fw_status fw_status;
+ char fw_sts_str[MEI_FW_STATUS_STR_SZ];
- mei_fw_status(dev, &fw_status);
- dev_warn(dev->dev,
- "unexpected reset: dev_state = %s " FW_STS_FMT "\n",
- mei_dev_state_str(state), FW_STS_PRM(fw_status));
+ mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
+ dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
+ mei_dev_state_str(state), fw_sts_str);
}
/* we're already in reset, cancel the init timer
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 20c6c511f438..711cddfa9c99 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -44,8 +44,6 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
list_for_each_entry_safe(cb, next, &compl_list->list, list) {
cl = cb->cl;
list_del(&cb->list);
- if (!cl)
- continue;
dev_dbg(dev->dev, "completing call back.\n");
if (cl == &dev->iamthif_cl)
@@ -105,7 +103,7 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,
list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
cl = cb->cl;
- if (!cl || !mei_cl_is_reading(cl, mei_hdr))
+ if (!mei_cl_is_reading(cl, mei_hdr))
continue;
cl->reading_state = MEI_READING;
@@ -449,8 +447,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
list = &dev->write_waiting_list;
list_for_each_entry_safe(cb, next, &list->list, list) {
cl = cb->cl;
- if (cl == NULL)
- continue;
cl->status = 0;
list_del(&cb->list);
@@ -489,10 +485,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
dev_dbg(dev->dev, "complete control write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
cl = cb->cl;
- if (!cl) {
- list_del(&cb->list);
- return -ENODEV;
- }
switch (cb->fop_type) {
case MEI_FOP_DISCONNECT:
/* send disconnect message */
@@ -530,8 +522,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
dev_dbg(dev->dev, "complete write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
cl = cb->cl;
- if (cl == NULL)
- continue;
if (cl == &dev->iamthif_cl)
ret = mei_amthif_irq_write(cl, cb, cmpl_list);
else
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index beedc91f03a6..ae56ba6ca0e3 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -631,6 +631,44 @@ out:
return mask;
}
+/**
+ * fw_status_show - mei device attribute show method
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t fw_status_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ struct mei_fw_status fw_status;
+ int err, i;
+ ssize_t cnt = 0;
+
+ mutex_lock(&dev->device_lock);
+ err = mei_fw_status(dev, &fw_status);
+ mutex_unlock(&dev->device_lock);
+ if (err) {
+ dev_err(device, "read fw_status error = %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < fw_status.count; i++)
+ cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
+ fw_status.status[i]);
+ return cnt;
+}
+static DEVICE_ATTR_RO(fw_status);
+
+static struct attribute *mei_attrs[] = {
+ &dev_attr_fw_status.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(mei);
+
/*
* file operations structure will be used for mei char device.
*/
@@ -710,8 +748,9 @@ int mei_register(struct mei_device *dev, struct device *parent)
goto err_dev_add;
}
- clsdev = device_create(mei_class, parent, devno,
- NULL, "mei%d", dev->minor);
+ clsdev = device_create_with_groups(mei_class, parent, devno,
+ dev, mei_groups,
+ "mei%d", dev->minor);
if (IS_ERR(clsdev)) {
dev_err(parent, "unable to create device %d:%d\n",
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 61b04d7646f1..3dad74a8d496 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -152,7 +152,10 @@ struct mei_msg_data {
};
/* Maximum number of processed FW status registers */
-#define MEI_FW_STATUS_MAX 2
+#define MEI_FW_STATUS_MAX 6
+/* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */
+#define MEI_FW_STATUS_STR_SZ (MEI_FW_STATUS_MAX * (8 + 1))
+
/*
* struct mei_fw_status - storage of FW status data
@@ -349,6 +352,7 @@ void mei_cl_bus_rx_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *dev);
int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
+struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
/**
@@ -804,11 +808,6 @@ static inline int mei_fw_status(struct mei_device *dev,
return dev->ops->fw_status(dev, fw_status);
}
-#define FW_STS_FMT "%08X %08X"
-#define FW_STS_PRM(fw_status) \
- (fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \
- (fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF
-
bool mei_hbuf_acquire(struct mei_device *dev);
bool mei_write_is_idle(struct mei_device *dev);
@@ -832,4 +831,32 @@ void mei_deregister(struct mei_device *dev);
(hdr)->host_addr, (hdr)->me_addr, \
(hdr)->length, (hdr)->internal, (hdr)->msg_complete
+ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len);
+/**
+ * mei_fw_status_str - fetch and convert fw status registers to printable string
+ *
+ * @dev: the device structure
+ * @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ
+ * @len: buffer len must be >= MEI_FW_STATUS_STR_SZ
+ *
+ * Return: number of bytes written or < 0 on failure
+ */
+static inline ssize_t mei_fw_status_str(struct mei_device *dev,
+ char *buf, size_t len)
+{
+ struct mei_fw_status fw_status;
+ int ret;
+
+ buf[0] = '\0';
+
+ ret = mei_fw_status(dev, &fw_status);
+ if (ret)
+ return ret;
+
+ ret = mei_fw_status2str(&fw_status, buf, MEI_FW_STATUS_STR_SZ);
+
+ return ret;
+}
+
+
#endif
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 622654323177..60ca9240368e 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -117,8 +117,6 @@ struct mei_nfc_dev {
u16 recv_req_id;
};
-static struct mei_nfc_dev nfc_dev;
-
/* UUIDs for NFC F/W clients */
const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50,
0x94, 0xd4, 0x50, 0x26,
@@ -138,6 +136,9 @@ static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d,
static void mei_nfc_free(struct mei_nfc_dev *ndev)
{
+ if (!ndev)
+ return;
+
if (ndev->cl) {
list_del(&ndev->cl->device_link);
mei_cl_unlink(ndev->cl);
@@ -150,7 +151,7 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
kfree(ndev->cl_info);
}
- memset(ndev, 0, sizeof(struct mei_nfc_dev));
+ kfree(ndev);
}
static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
@@ -319,9 +320,10 @@ err:
static int mei_nfc_enable(struct mei_cl_device *cldev)
{
struct mei_device *dev;
- struct mei_nfc_dev *ndev = &nfc_dev;
+ struct mei_nfc_dev *ndev;
int ret;
+ ndev = (struct mei_nfc_dev *)cldev->priv_data;
dev = ndev->cl->dev;
ret = mei_nfc_connect(ndev);
@@ -479,15 +481,25 @@ err:
int mei_nfc_host_init(struct mei_device *dev)
{
- struct mei_nfc_dev *ndev = &nfc_dev;
+ struct mei_nfc_dev *ndev;
struct mei_cl *cl_info, *cl = NULL;
struct mei_me_client *me_cl;
int ret;
- /* already initialized */
- if (ndev->cl_info)
+
+ /* in case of internal reset bail out
+ * as the device is already setup
+ */
+ cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
+ if (cl)
return 0;
+ ndev = kzalloc(sizeof(struct mei_nfc_dev), GFP_KERNEL);
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
ndev->cl_info = mei_cl_allocate(dev);
ndev->cl = mei_cl_allocate(dev);
@@ -550,9 +562,31 @@ err:
void mei_nfc_host_exit(struct mei_device *dev)
{
- struct mei_nfc_dev *ndev = &nfc_dev;
+ struct mei_nfc_dev *ndev;
+ struct mei_cl *cl;
+ struct mei_cl_device *cldev;
+
+ cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
+ if (!cl)
+ return;
+
+ cldev = cl->device;
+ if (!cldev)
+ return;
- cancel_work_sync(&ndev->init_work);
+ ndev = (struct mei_nfc_dev *)cldev->priv_data;
+ if (ndev)
+ cancel_work_sync(&ndev->init_work);
+
+ cldev->priv_data = NULL;
+
+ mutex_lock(&dev->device_lock);
+ /* Need to remove the device here
+ * since mei_nfc_free will unlink the clients
+ */
+ mei_cl_remove_device(cldev);
+ mei_nfc_free(ndev);
+ mutex_unlock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index cf20d397068a..bd3039ab8f98 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -76,12 +76,12 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
/* required last entry */
{0, }
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 1f572deacf54..c86e2ddbe30a 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -37,6 +37,7 @@
static const struct pci_device_id mei_txe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
+ {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
{0, }
};
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index b836dfffceb5..b1d892cea94d 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -270,15 +270,18 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
{
struct mei_device *dev;
+ struct mei_cl *cl;
int ret;
dev = watchdog_get_drvdata(wd_dev);
if (!dev)
return -ENODEV;
+ cl = &dev->wd_cl;
+
mutex_lock(&dev->device_lock);
- if (dev->wd_cl.state != MEI_FILE_CONNECTED) {
+ if (cl->state != MEI_FILE_CONNECTED) {
dev_err(dev->dev, "wd: not connected.\n");
ret = -ENODEV;
goto end;
@@ -286,12 +289,12 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
dev->wd_state = MEI_WD_RUNNING;
- ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
+ ret = mei_cl_flow_ctrl_creds(cl);
if (ret < 0)
goto end;
+
/* Check if we can send the ping to HW*/
if (ret && mei_hbuf_acquire(dev)) {
-
dev_dbg(dev->dev, "wd: sending ping\n");
ret = mei_wd_send(dev);
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index e64794730e21..e486a0c26267 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -68,7 +68,7 @@ static inline struct device *mic_dev(struct mic_vdev *mvdev)
}
/* This gets the device's feature bits. */
-static u32 mic_get_features(struct virtio_device *vdev)
+static u64 mic_get_features(struct virtio_device *vdev)
{
unsigned int i, bits;
u32 features = 0;
@@ -76,8 +76,7 @@ static u32 mic_get_features(struct virtio_device *vdev)
u8 __iomem *in_features = mic_vq_features(desc);
int feature_len = ioread8(&desc->feature_len);
- bits = min_t(unsigned, feature_len,
- sizeof(vdev->features)) * 8;
+ bits = min_t(unsigned, feature_len, sizeof(features)) * 8;
for (i = 0; i < bits; i++)
if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
features |= BIT(i);
@@ -85,7 +84,7 @@ static u32 mic_get_features(struct virtio_device *vdev)
return features;
}
-static void mic_finalize_features(struct virtio_device *vdev)
+static int mic_finalize_features(struct virtio_device *vdev)
{
unsigned int i, bits;
struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
@@ -97,14 +96,19 @@ static void mic_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
memset_io(out_features, 0, feature_len);
bits = min_t(unsigned, feature_len,
sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++) {
- if (test_bit(i, vdev->features))
+ if (__virtio_test_bit(vdev, i))
iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
&out_features[i / 8]);
}
+
+ return 0;
}
/*
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index 9d57545d64f6..e98e537d68e3 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -255,7 +255,6 @@ static struct platform_driver __refdata mic_platform_driver = {
.shutdown = mic_platform_shutdown,
.driver = {
.name = mic_driver_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 956597321d2a..9a17a9bab8d6 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -158,6 +158,7 @@ static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
}
+#ifdef CONFIG_PM
/* pch_phub_save_reg_conf - saves register configuration */
static void pch_phub_save_reg_conf(struct pci_dev *pdev)
{
@@ -280,6 +281,7 @@ static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
}
+#endif
/**
* pch_phub_read_serial_rom() - Reading Serial ROM
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 21c2337bad68..e4b7ee4f57b8 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -858,7 +858,6 @@ static struct platform_driver kim_platform_driver = {
.resume = kim_resume,
.driver = {
.name = "kim",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 1b7b303085d2..7aaaf51e1596 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -27,6 +27,7 @@
#include <linux/uio.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
+#include <linux/skbuff.h>
#include "vmci_handle_array.h"
#include "vmci_queue_pair.h"
@@ -429,11 +430,11 @@ static int __qp_memcpy_from_queue(void *dest,
to_copy = size - bytes_copied;
if (is_iovec) {
- struct iovec *iov = (struct iovec *)dest;
+ struct msghdr *msg = dest;
int err;
/* The iovec will track bytes_copied internally. */
- err = memcpy_toiovec(iov, (u8 *)va + page_offset,
+ err = memcpy_to_msg(msg, (u8 *)va + page_offset,
to_copy);
if (err != 0) {
if (kernel_if->host)
@@ -3264,13 +3265,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
* of bytes dequeued or < 0 on error.
*/
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
- void *iov,
+ struct msghdr *msg,
size_t iov_size,
int buf_type)
{
ssize_t result;
- if (!qpair || !iov)
+ if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
@@ -3279,7 +3280,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- iov, iov_size,
+ msg, iov_size,
qp_memcpy_from_queue_iov,
true);
@@ -3308,13 +3309,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
* of bytes peeked or < 0 on error.
*/
ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
- void *iov,
+ struct msghdr *msg,
size_t iov_size,
int buf_type)
{
ssize_t result;
- if (!qpair || !iov)
+ if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
@@ -3323,7 +3324,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- iov, iov_size,
+ msg, iov_size,
qp_memcpy_from_queue_iov,
false);
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 725f6a6fd89b..ed77fbfa4774 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1206,7 +1206,6 @@ static struct platform_driver au1xmmc_driver = {
.resume = au1xmmc_resume,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 5d4c5e0fba2f..1625f908dc70 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1466,7 +1466,6 @@ static const struct dev_pm_ops davinci_mmcsd_pm = {
static struct platform_driver davinci_mmcsd_driver = {
.driver = {
.name = "davinci_mmc",
- .owner = THIS_MODULE,
.pm = davinci_mmcsd_pm_ops,
.of_match_table = davinci_mmc_dt_ids,
},
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index c01eac7c8196..e3e56d35f0ee 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -463,7 +463,6 @@ static const struct dev_pm_ops sdhci_acpi_pm_ops = {
static struct platform_driver sdhci_acpi_driver = {
.driver = {
.name = "sdhci-acpi",
- .owner = THIS_MODULE,
.acpi_match_table = sdhci_acpi_ids,
.pm = &sdhci_acpi_pm_ops,
},
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 2616fdfdbbeb..2ca0afaab792 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -143,7 +143,6 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
static struct platform_driver tmio_mmc_driver = {
.driver = {
.name = "tmio-mmc",
- .owner = THIS_MODULE,
.pm = &tmio_mmc_dev_pm_ops,
},
.probe = tmio_mmc_probe,
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index f0a39eb049af..54b082b1804a 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1834,7 +1834,6 @@ static struct platform_driver usdhi6_driver = {
.remove = usdhi6_remove,
.driver = {
.name = "usdhi6rol0",
- .owner = THIS_MODULE,
.of_match_table = usdhi6_of_match,
},
};
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 9a6dfb0c4ecc..ca183ea767b3 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1914,7 +1914,6 @@ static struct platform_driver wbsd_driver = {
.resume = wbsd_platform_resume,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 54181b4f6e9e..dd2e1aa95ba3 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -991,7 +991,6 @@ static struct platform_driver wmt_mci_driver = {
.remove = wmt_mci_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = wmt_mci_pm_ops,
.of_match_table = wmt_mci_dt_ids,
},
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 94b821042d9d..71fea895ce38 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -133,7 +133,7 @@ config MTD_OF_PARTS
help
This provides a partition parsing function which derives
the partition map from the children of the flash node,
- as described in Documentation/devicetree/booting-without-of.txt.
+ as described in Documentation/devicetree/bindings/mtd/partition.txt.
config MTD_AR7_PARTS
tristate "TI AR7 partitioning support"
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 8057f52a45b7..cc13ea5ce4d5 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -15,8 +15,12 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-/* 10 parts were found on sflash on Netgear WNDR4500 */
-#define BCM47XXPART_MAX_PARTS 12
+/*
+ * NAND flash on Netgear R6250 was verified to contain 15 partitions.
+ * This will result in allocating too big array for some old devices, but the
+ * memory will be freed soon anyway (see mtd_device_parse_register).
+ */
+#define BCM47XXPART_MAX_PARTS 20
/*
* Amount of bytes we read when analyzing each block of flash memory.
@@ -168,18 +172,26 @@ static int bcm47xxpart_parse(struct mtd_info *master,
i++;
}
- bcm47xxpart_add_part(&parts[curr_part++], "linux",
- offset + trx->offset[i], 0);
- i++;
+ if (trx->offset[i]) {
+ bcm47xxpart_add_part(&parts[curr_part++],
+ "linux",
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
/*
* Pure rootfs size is known and can be calculated as:
* trx->length - trx->offset[i]. We don't fill it as
* we want to have jffs2 (overlay) in the same mtd.
*/
- bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
- offset + trx->offset[i], 0);
- i++;
+ if (trx->offset[i]) {
+ bcm47xxpart_add_part(&parts[curr_part++],
+ "rootfs",
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
last_trx_part = curr_part - 1;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 3096f3ded3ad..286b97a304cf 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -2654,8 +2654,7 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
kfree(cfi);
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
- if (region->lockmap)
- kfree(region->lockmap);
+ kfree(region->lockmap);
}
kfree(mtd->eraseregions);
}
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 77de29bc02ba..3d008a9410be 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -329,7 +329,6 @@ static struct platform_driver bcma_sflash_driver = {
.remove = bcm47xxsflash_bcma_remove,
.driver = {
.name = "bcma_sflash",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 21cc4b66feaa..448ce42f951e 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -1655,22 +1656,21 @@ static int dbg_flashctrl_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0;
u8 fctrl;
mutex_lock(&docg3->cascade->lock);
fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
mutex_unlock(&docg3->cascade->lock);
- pos += seq_printf(s,
- "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
- fctrl,
- fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
- fctrl & DOC_CTRL_CE ? "active" : "inactive",
- fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
- fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
- fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
- return pos;
+ seq_printf(s, "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+ fctrl,
+ fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+ fctrl & DOC_CTRL_CE ? "active" : "inactive",
+ fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+ fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+ fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+
+ return 0;
}
DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
@@ -1678,58 +1678,56 @@ static int dbg_asicmode_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0, pctrl, mode;
+ int pctrl, mode;
mutex_lock(&docg3->cascade->lock);
pctrl = doc_register_readb(docg3, DOC_ASICMODE);
mode = pctrl & 0x03;
mutex_unlock(&docg3->cascade->lock);
- pos += seq_printf(s,
- "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
- pctrl,
- pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
- pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
- pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
- pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
- pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
- mode >> 1, mode & 0x1);
+ seq_printf(s,
+ "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
+ pctrl,
+ pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
+ pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
+ pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
+ pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
+ mode >> 1, mode & 0x1);
switch (mode) {
case DOC_ASICMODE_RESET:
- pos += seq_puts(s, "reset");
+ seq_puts(s, "reset");
break;
case DOC_ASICMODE_NORMAL:
- pos += seq_puts(s, "normal");
+ seq_puts(s, "normal");
break;
case DOC_ASICMODE_POWERDOWN:
- pos += seq_puts(s, "powerdown");
+ seq_puts(s, "powerdown");
break;
}
- pos += seq_puts(s, ")\n");
- return pos;
+ seq_puts(s, ")\n");
+ return 0;
}
DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
static int dbg_device_id_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0;
int id;
mutex_lock(&docg3->cascade->lock);
id = doc_register_readb(docg3, DOC_DEVICESELECT);
mutex_unlock(&docg3->cascade->lock);
- pos += seq_printf(s, "DeviceId = %d\n", id);
- return pos;
+ seq_printf(s, "DeviceId = %d\n", id);
+ return 0;
}
DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
static int dbg_protection_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
mutex_lock(&docg3->cascade->lock);
@@ -1742,45 +1740,40 @@ static int dbg_protection_show(struct seq_file *s, void *p)
dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
mutex_unlock(&docg3->cascade->lock);
- pos += seq_printf(s, "Protection = 0x%02x (",
- protect);
+ seq_printf(s, "Protection = 0x%02x (", protect);
if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
- pos += seq_puts(s, "FOUNDRY_OTP_LOCK,");
+ seq_puts(s, "FOUNDRY_OTP_LOCK,");
if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
- pos += seq_puts(s, "CUSTOMER_OTP_LOCK,");
+ seq_puts(s, "CUSTOMER_OTP_LOCK,");
if (protect & DOC_PROTECT_LOCK_INPUT)
- pos += seq_puts(s, "LOCK_INPUT,");
+ seq_puts(s, "LOCK_INPUT,");
if (protect & DOC_PROTECT_STICKY_LOCK)
- pos += seq_puts(s, "STICKY_LOCK,");
+ seq_puts(s, "STICKY_LOCK,");
if (protect & DOC_PROTECT_PROTECTION_ENABLED)
- pos += seq_puts(s, "PROTECTION ON,");
+ seq_puts(s, "PROTECTION ON,");
if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
- pos += seq_puts(s, "IPL_DOWNLOAD_LOCK,");
+ seq_puts(s, "IPL_DOWNLOAD_LOCK,");
if (protect & DOC_PROTECT_PROTECTION_ERROR)
- pos += seq_puts(s, "PROTECT_ERR,");
+ seq_puts(s, "PROTECT_ERR,");
else
- pos += seq_puts(s, "NO_PROTECT_ERR");
- pos += seq_puts(s, ")\n");
-
- pos += seq_printf(s, "DPS0 = 0x%02x : "
- "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
- "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
- dps0, dps0_low, dps0_high,
- !!(dps0 & DOC_DPS_OTP_PROTECTED),
- !!(dps0 & DOC_DPS_READ_PROTECTED),
- !!(dps0 & DOC_DPS_WRITE_PROTECTED),
- !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
- !!(dps0 & DOC_DPS_KEY_OK));
- pos += seq_printf(s, "DPS1 = 0x%02x : "
- "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
- "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
- dps1, dps1_low, dps1_high,
- !!(dps1 & DOC_DPS_OTP_PROTECTED),
- !!(dps1 & DOC_DPS_READ_PROTECTED),
- !!(dps1 & DOC_DPS_WRITE_PROTECTED),
- !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
- !!(dps1 & DOC_DPS_KEY_OK));
- return pos;
+ seq_puts(s, "NO_PROTECT_ERR");
+ seq_puts(s, ")\n");
+
+ seq_printf(s, "DPS0 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps0, dps0_low, dps0_high,
+ !!(dps0 & DOC_DPS_OTP_PROTECTED),
+ !!(dps0 & DOC_DPS_READ_PROTECTED),
+ !!(dps0 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps0 & DOC_DPS_KEY_OK));
+ seq_printf(s, "DPS1 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+ dps1, dps1_low, dps1_high,
+ !!(dps1 & DOC_DPS_OTP_PROTECTED),
+ !!(dps1 & DOC_DPS_READ_PROTECTED),
+ !!(dps1 & DOC_DPS_WRITE_PROTECTED),
+ !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
+ !!(dps1 & DOC_DPS_KEY_OK));
+ return 0;
}
DEBUGFS_RO_ATTR(protection, dbg_protection_show);
@@ -2126,10 +2119,18 @@ static int __exit docg3_release(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id docg3_dt_ids[] = {
+ { .compatible = "m-systems,diskonchip-g3" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, docg3_dt_ids);
+#endif
+
static struct platform_driver g3_driver = {
.driver = {
.name = "docg3",
- .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(docg3_dt_ids),
},
.suspend = docg3_suspend,
.resume = docg3_resume,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ed827cf894e4..85e35467fba6 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -128,13 +128,10 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
struct spi_device *spi = flash->spi;
struct spi_transfer t[2];
struct spi_message m;
- int dummy = nor->read_dummy;
- int ret;
+ unsigned int dummy = nor->read_dummy;
- /* Wait till previous write/erase is done. */
- ret = nor->wait_till_ready(nor);
- if (ret)
- return ret;
+ /* convert the dummy cycles to the number of bytes */
+ dummy /= 8;
spi_message_init(&m);
memset(t, 0, (sizeof t));
@@ -160,21 +157,10 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
static int m25p80_erase(struct spi_nor *nor, loff_t offset)
{
struct m25p *flash = nor->priv;
- int ret;
dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
flash->mtd.erasesize / 1024, (u32)offset);
- /* Wait until finished previous write command. */
- ret = nor->wait_till_ready(nor);
- if (ret)
- return ret;
-
- /* Send write enable, then erase commands. */
- ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
- if (ret)
- return ret;
-
/* Set up command buffer. */
flash->command[0] = nor->erase_opcode;
m25p_addr2cmd(nor, offset, flash->command);
@@ -260,7 +246,6 @@ static int m25p_remove(struct spi_device *spi)
return mtd_device_unregister(&flash->mtd);
}
-
/*
* XXX This needs to be kept in sync with spi_nor_ids. We can't share
* it with spi-nor, because if this is built as a module then modpost
@@ -287,7 +272,7 @@ static const struct spi_device_id m25p_ids[] = {
{"s25fl512s"}, {"s70fl01gs"}, {"s25sl12800"}, {"s25sl12801"},
{"s25fl129p0"}, {"s25fl129p1"}, {"s25sl004a"}, {"s25sl008a"},
{"s25sl016a"}, {"s25sl032a"}, {"s25sl064a"}, {"s25fl008k"},
- {"s25fl016k"}, {"s25fl064k"},
+ {"s25fl016k"}, {"s25fl064k"}, {"s25fl132k"},
{"sst25vf040b"},{"sst25vf080b"},{"sst25vf016b"},{"sst25vf032b"},
{"sst25vf064c"},{"sst25wf512"}, {"sst25wf010"}, {"sst25wf020"},
{"sst25wf040"},
@@ -300,17 +285,16 @@ static const struct spi_device_id m25p_ids[] = {
{"m45pe10"}, {"m45pe80"}, {"m45pe16"},
{"m25pe20"}, {"m25pe80"}, {"m25pe16"},
{"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"},
- {"m25px64"},
+ {"m25px64"}, {"m25px80"},
{"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"},
{"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
- {"w25x64"}, {"w25q64"}, {"w25q128"}, {"w25q80"},
- {"w25q80bl"}, {"w25q128"}, {"w25q256"}, {"cat25c11"},
+ {"w25x64"}, {"w25q64"}, {"w25q80"}, {"w25q80bl"},
+ {"w25q128"}, {"w25q256"}, {"cat25c11"},
{"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
-
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index dd22ce2cc9ad..0099aba72a8b 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -149,7 +149,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct dataflash *priv = mtd->priv;
struct spi_device *spi = priv->spi;
- struct spi_transfer x = { .tx_dma = 0, };
+ struct spi_transfer x = { };
struct spi_message msg;
unsigned blocksize = priv->page_size << 3;
uint8_t *command;
@@ -235,7 +235,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct dataflash *priv = mtd->priv;
- struct spi_transfer x[2] = { { .tx_dma = 0, }, };
+ struct spi_transfer x[2] = { };
struct spi_message msg;
unsigned int addr;
uint8_t *command;
@@ -301,7 +301,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
{
struct dataflash *priv = mtd->priv;
struct spi_device *spi = priv->spi;
- struct spi_transfer x[2] = { { .tx_dma = 0, }, };
+ struct spi_transfer x[2] = { };
struct spi_message msg;
unsigned int pageaddr, addr, offset, writelen;
size_t remaining = len;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index effd9a4ef7ee..8b66e52ca3cc 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -17,7 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index f02603e1bfeb..708b7e8c8b18 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -812,8 +812,7 @@ static int __init init_pmc551(void)
}
/* Exited early, reference left over */
- if (PCI_Device)
- pci_dev_put(PCI_Device);
+ pci_dev_put(PCI_Device);
if (!pmc551list) {
printk(KERN_NOTICE "pmc551: not detected\n");
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index c4176b0f382d..508bab3bd0c4 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -1079,7 +1079,6 @@ static struct platform_driver spear_smi_driver = {
.driver = {
.name = "smi",
.bus = &platform_bus_type,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(spear_smi_id_table),
.pm = &spear_smi_pm_ops,
},
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index d252514d3e98..54ffe5223e64 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2069,7 +2069,6 @@ static struct platform_driver stfsm_driver = {
.remove = stfsm_remove,
.driver = {
.name = "st-spi-fsm",
- .owner = THIS_MODULE,
.of_match_table = stfsm_match,
},
};
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 487e64f411a5..1388c8d7f309 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -518,7 +518,7 @@ void INFTL_dumpVUchains(struct INFTLrecord *s)
pr_debug("INFTL Virtual Unit Chains:\n");
for (logical = 0; logical < s->nb_blocks; logical++) {
block = s->VUtable[logical];
- if (block > s->nb_blocks)
+ if (block >= s->nb_blocks)
continue;
pr_debug(" LOGICAL %d --> %d ", logical, block);
for (i = 0; i < s->nb_blocks; i++) {
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 6ea51e549045..41730feeace8 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -126,7 +126,6 @@ static const char * const part_probe_types[] = {
static int bfin_flash_probe(struct platform_device *pdev)
{
- int ret;
struct physmap_flash_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 6a589f1e2880..b4430741024e 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -250,7 +250,6 @@ static struct platform_driver ixp4xx_flash_driver = {
.remove = ixp4xx_flash_remove,
.driver = {
.name = "IXP4XX-Flash",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 7aa682cd4d7e..33d26f5bee54 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -205,7 +205,6 @@ static struct platform_driver ltq_mtd_driver = {
.remove = ltq_mtd_remove,
.driver = {
.name = "ltq-nor",
- .owner = THIS_MODULE,
.of_match_table = ltq_mtd_match,
},
};
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index f73cd461257c..4305fd607015 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -217,7 +217,6 @@ static struct platform_driver physmap_flash_driver = {
.shutdown = physmap_flash_shutdown,
.driver = {
.name = "physmap-flash",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c1d21cb501ca..f35cd2081314 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -47,14 +47,12 @@ static int of_flash_remove(struct platform_device *dev)
return 0;
dev_set_drvdata(&dev->dev, NULL);
- if (info->cmtd != info->list[0].mtd) {
+ if (info->cmtd) {
mtd_device_unregister(info->cmtd);
- mtd_concat_destroy(info->cmtd);
+ if (info->cmtd != info->list[0].mtd)
+ mtd_concat_destroy(info->cmtd);
}
- if (info->cmtd)
- mtd_device_unregister(info->cmtd);
-
for (i = 0; i < info->list_size; i++) {
if (info->list[i].mtd)
map_destroy(info->list[i].mtd);
@@ -354,7 +352,6 @@ MODULE_DEVICE_TABLE(of, of_flash_match);
static struct platform_driver of_flash_driver = {
.driver = {
.name = "of-flash",
- .owner = THIS_MODULE,
.of_match_table = of_flash_match,
},
.probe = of_flash_probe,
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index d597e89f2692..4b65c08d15f6 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -251,7 +251,6 @@ static struct platform_driver platram_driver = {
.remove = platram_remove,
.driver = {
.name = "mtd-ram",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index cb4d92eea9fe..12fa75df5008 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -131,7 +131,6 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
static struct platform_driver pxa2xx_flash_driver = {
.driver = {
.name = "pxa2xx-flash",
- .owner = THIS_MODULE,
},
.probe = pxa2xx_flash_probe,
.remove = pxa2xx_flash_remove,
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index a84fdfb10518..5a7551aa2d89 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -127,7 +127,6 @@ static struct platform_driver rbtx4939_flash_driver = {
.shutdown = rbtx4939_flash_shutdown,
.driver = {
.name = "rbtx4939-flash",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 8fc06bf111c4..ea697202935a 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -289,7 +289,6 @@ static struct platform_driver sa1100_mtd_driver = {
.remove = __exit_p(sa1100_mtd_remove),
.driver = {
.name = "sa1100-mtd",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index b6f1aac3510c..d459aca07881 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -150,7 +150,6 @@ MODULE_DEVICE_TABLE(of, uflash_match);
static struct platform_driver uflash_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = uflash_match,
},
.probe = uflash_probe,
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index dd10646982ae..7d0150d20432 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -75,10 +75,12 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
boards, the scratch register is at 0xFF108018.
config MTD_NAND_GPIO
- tristate "GPIO NAND Flash driver"
+ tristate "GPIO assisted NAND Flash driver"
depends on GPIOLIB
help
- This enables a GPIO based NAND flash driver.
+ This enables a NAND flash driver where control signals are
+ connected to GPIO pins, and commands and data are communicated
+ via a memory mapped interface.
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
@@ -516,4 +518,10 @@ config MTD_NAND_XWAY
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
to the External Bus Unit (EBU).
+config MTD_NAND_SUNXI
+ tristate "Support for NAND on Allwinner SoCs"
+ depends on ARCH_SUNXI
+ help
+ Enables support for NAND Flash chips on Allwinner SoCs.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 9c847e469ca7..bd38f21d2e28 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -50,5 +50,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
+obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 4936e9e0002f..f1d555cfb332 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -290,7 +290,6 @@ static struct platform_driver ams_delta_nand_driver = {
.remove = ams_delta_cleanup,
.driver = {
.name = "ams-delta-nand",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 19d1e9d17bf9..a345e7b2463a 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -92,7 +92,7 @@ static struct nand_ecclayout atmel_oobinfo_small = {
struct atmel_nfc {
void __iomem *base_cmd_regs;
void __iomem *hsmc_regs;
- void __iomem *sram_bank0;
+ void *sram_bank0;
dma_addr_t sram_bank0_phys;
bool use_nfc_sram;
bool write_by_sram;
@@ -105,7 +105,7 @@ struct atmel_nfc {
struct completion comp_xfer_done;
/* Point to the sram bank which include readed data via NFC */
- void __iomem *data_in_sram;
+ void *data_in_sram;
bool will_write_sram;
};
static struct atmel_nfc nand_nfc;
@@ -127,6 +127,7 @@ struct atmel_nand_host {
bool has_pmecc;
u8 pmecc_corr_cap;
u16 pmecc_sector_size;
+ bool has_no_lookup_table;
u32 pmecc_lookup_table_offset;
u32 pmecc_lookup_table_offset_512;
u32 pmecc_lookup_table_offset_1024;
@@ -256,26 +257,6 @@ static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
return res;
}
-static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
-{
- int i;
- u32 *t = trg;
- const __iomem u32 *s = src;
-
- for (i = 0; i < (size >> 2); i++)
- *t++ = readl_relaxed(s++);
-}
-
-static void memcpy32_toio(void __iomem *trg, const void *src, int size)
-{
- int i;
- u32 __iomem *t = trg;
- const u32 *s = src;
-
- for (i = 0; i < (size >> 2); i++)
- writel_relaxed(*s++, t++);
-}
-
/*
* Minimal-overhead PIO for data access.
*/
@@ -285,7 +266,7 @@ static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
struct atmel_nand_host *host = nand_chip->priv;
if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
- memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+ memcpy(buf, host->nfc->data_in_sram, len);
host->nfc->data_in_sram += len;
} else {
__raw_readsb(nand_chip->IO_ADDR_R, buf, len);
@@ -298,7 +279,7 @@ static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
struct atmel_nand_host *host = nand_chip->priv;
if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
- memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+ memcpy(buf, host->nfc->data_in_sram, len);
host->nfc->data_in_sram += len;
} else {
__raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
@@ -1112,12 +1093,66 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
return 0;
}
+static inline int deg(unsigned int poly)
+{
+ /* polynomial degree is the most-significant bit index */
+ return fls(poly) - 1;
+}
+
+static int build_gf_tables(int mm, unsigned int poly,
+ int16_t *index_of, int16_t *alpha_to)
+{
+ unsigned int i, x = 1;
+ const unsigned int k = 1 << deg(poly);
+ unsigned int nn = (1 << mm) - 1;
+
+ /* primitive polynomial must be of degree m */
+ if (k != (1u << mm))
+ return -EINVAL;
+
+ for (i = 0; i < nn; i++) {
+ alpha_to[i] = x;
+ index_of[x] = i;
+ if (i && (x == 1))
+ /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
+ return -EINVAL;
+ x <<= 1;
+ if (x & k)
+ x ^= poly;
+ }
+ alpha_to[nn] = 1;
+ index_of[0] = 0;
+
+ return 0;
+}
+
+static uint16_t *create_lookup_table(struct device *dev, int sector_size)
+{
+ int degree = (sector_size == 512) ?
+ PMECC_GF_DIMENSION_13 :
+ PMECC_GF_DIMENSION_14;
+ unsigned int poly = (sector_size == 512) ?
+ PMECC_GF_13_PRIMITIVE_POLY :
+ PMECC_GF_14_PRIMITIVE_POLY;
+ int table_size = (sector_size == 512) ?
+ PMECC_LOOKUP_TABLE_SIZE_512 :
+ PMECC_LOOKUP_TABLE_SIZE_1024;
+
+ int16_t *addr = devm_kzalloc(dev, 2 * table_size * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (addr && build_gf_tables(degree, poly, addr, addr + table_size))
+ return NULL;
+
+ return addr;
+}
+
static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
struct mtd_info *mtd = &host->mtd;
struct nand_chip *nand_chip = &host->nand_chip;
struct resource *regs, *regs_pmerr, *regs_rom;
+ uint16_t *galois_table;
int cap, sector_size, err_no;
err_no = pmecc_choose_ecc(host, &cap, &sector_size);
@@ -1163,8 +1198,24 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, regs_rom);
if (IS_ERR(host->pmecc_rom_base)) {
- err_no = PTR_ERR(host->pmecc_rom_base);
- goto err;
+ if (!host->has_no_lookup_table)
+ /* Don't display the information again */
+ dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n");
+
+ host->has_no_lookup_table = true;
+ }
+
+ if (host->has_no_lookup_table) {
+ /* Build the look-up table in runtime */
+ galois_table = create_lookup_table(host->dev, sector_size);
+ if (!galois_table) {
+ dev_err(host->dev, "Failed to build a lookup table in runtime!\n");
+ err_no = -EINVAL;
+ goto err;
+ }
+
+ host->pmecc_rom_base = (void __iomem *)galois_table;
+ host->pmecc_lookup_table_offset = 0;
}
nand_chip->ecc.size = sector_size;
@@ -1501,8 +1552,10 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
offset, 2) != 0) {
- dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
- return -EINVAL;
+ dev_err(host->dev, "Cannot get PMECC lookup table offset, will build a lookup table in runtime.\n");
+ host->has_no_lookup_table = true;
+ /* Will build a lookup table and initialize the offset later */
+ return 0;
}
if (!offset[0] && !offset[1]) {
dev_err(host->dev, "Invalid PMECC lookup table offset\n");
@@ -1899,7 +1952,7 @@ static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
int cfg, len;
int status = 0;
struct atmel_nand_host *host = chip->priv;
- void __iomem *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
+ void *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
/* Subpage write is not supported */
if (offset || (data_len < mtd->writesize))
@@ -1910,14 +1963,14 @@ static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (use_dma) {
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
/* Fall back to use cpu copy */
- memcpy32_toio(sram, buf, len);
+ memcpy(sram, buf, len);
} else {
- memcpy32_toio(sram, buf, len);
+ memcpy(sram, buf, len);
}
cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
if (unlikely(raw) && oob_required) {
- memcpy32_toio(sram + len, chip->oob_poi, mtd->oobsize);
+ memcpy(sram + len, chip->oob_poi, mtd->oobsize);
len += mtd->oobsize;
nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
} else {
@@ -2260,7 +2313,8 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (nfc_sram) {
- nfc->sram_bank0 = devm_ioremap_resource(&pdev->dev, nfc_sram);
+ nfc->sram_bank0 = (void * __force)
+ devm_ioremap_resource(&pdev->dev, nfc_sram);
if (IS_ERR(nfc->sram_bank0)) {
dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
PTR_ERR(nfc->sram_bank0));
@@ -2312,7 +2366,6 @@ MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
static struct platform_driver atmel_nand_nfc_driver = {
.driver = {
.name = "atmel_nand_nfc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_nand_nfc_match),
},
.probe = atmel_nand_nfc_probe,
@@ -2324,7 +2377,6 @@ static struct platform_driver atmel_nand_driver = {
.remove = atmel_nand_remove,
.driver = {
.name = "atmel_nand",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_nand_dt_ids),
},
};
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 8a1e9a686759..d4035e335ad8 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -142,6 +142,10 @@
#define PMECC_GF_DIMENSION_13 13
#define PMECC_GF_DIMENSION_14 14
+/* Primitive Polynomial used by PMECC */
+#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
+#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
+
#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 77d6c17b38c2..c0c3be180012 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -503,7 +503,6 @@ static int au1550nd_remove(struct platform_device *pdev)
static struct platform_driver au1550nd_driver = {
.driver = {
.name = "au1550-nand",
- .owner = THIS_MODULE,
},
.probe = au1550nd_probe,
.remove = au1550nd_remove,
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
index 107445911315..461577cfb5bc 100644
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -73,7 +73,6 @@ static struct platform_driver bcm47xxnflash_driver = {
.remove = bcm47xxnflash_remove,
.driver = {
.name = "bcma_nflash",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 871c4f712654..4d8d4ba4b9c1 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -836,7 +836,6 @@ static struct platform_driver bf5xx_nand_driver = {
.remove = bf5xx_nand_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 4e66726da9aa..9a0f45f1d932 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -529,50 +529,6 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
return 0;
}
-static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw)
-{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
- if (unlikely(raw))
- status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
- else
- status = chip->ecc.write_page(mtd, chip, buf, oob_required);
-
- if (status < 0)
- return status;
-
- /*
- * Cached progamming disabled for now, Not sure if its worth the
- * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
- */
- cached = 0;
-
- if (!cached || !(chip->options & NAND_CACHEPRG)) {
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
- /*
- * See if operation failed and additional status checks are
- * available
- */
- if ((status & NAND_STATUS_FAIL) && (chip->errstat))
- status = chip->errstat(mtd, chip, FL_WRITING, status,
- page);
-
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- } else {
- chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
- }
-
- return 0;
-}
-
static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
return 0;
@@ -800,7 +756,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
cafe->nand.ecc.correct = (void *)cafe_nand_bug;
- cafe->nand.write_page = cafe_nand_write_page;
cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
cafe->nand.ecc.write_oob = cafe_nand_write_oob;
cafe->nand.ecc.read_page = cafe_nand_read_page;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index b922c8efcf40..feb6d18de78d 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -870,7 +870,6 @@ static struct platform_driver nand_davinci_driver = {
.remove = nand_davinci_remove,
.driver = {
.name = "davinci_nand",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(davinci_nand_of_match),
},
};
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 35cb17f57800..0cb1e8d9fbfc 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -120,7 +120,6 @@ static struct platform_driver denali_dt_driver = {
.remove = denali_dt_remove,
.driver = {
.name = "denali-nand-dt",
- .owner = THIS_MODULE,
.of_match_table = denali_nand_dt_ids,
},
};
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index ce24637e14f1..e5d7bcaafa7d 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -1380,7 +1380,6 @@ static int __exit cleanup_docg4(struct platform_device *pdev)
static struct platform_driver docg4_driver = {
.driver = {
.name = "docg4",
- .owner = THIS_MODULE,
},
.suspend = docg4_suspend,
.resume = docg4_resume,
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 545a5c002f09..04b22fd3732d 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -950,7 +950,6 @@ static const struct of_device_id fsl_elbc_nand_match[] = {
static struct platform_driver fsl_elbc_nand_driver = {
.driver = {
.name = "fsl,elbc-fcm-nand",
- .owner = THIS_MODULE,
.of_match_table = fsl_elbc_nand_match,
},
.probe = fsl_elbc_nand_probe,
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 2338124dd05f..4c05f4f6a5c6 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -31,7 +31,6 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/fsl_ifc.h>
-#define FSL_IFC_V1_1_0 0x01010000
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
@@ -877,7 +876,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
struct nand_chip *chip = &priv->chip;
struct nand_ecclayout *layout;
- u32 csor, ver;
+ u32 csor;
/* Fill in fsl_ifc_mtd structure */
priv->mtd.priv = chip;
@@ -984,8 +983,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.mode = NAND_ECC_SOFT;
}
- ver = ioread32be(&ifc->ifc_rev);
- if (ver == FSL_IFC_V1_1_0)
+ if (ctrl->version == FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
return 0;
@@ -1045,12 +1043,12 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
}
/* find which chip select it is connected to */
- for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) {
+ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
if (match_bank(ifc, bank, res.start))
break;
}
- if (bank >= FSL_IFC_BANK_COUNT) {
+ if (bank >= fsl_ifc_ctrl_dev->banks) {
dev_err(&dev->dev, "%s: address did not match any chip selects\n",
__func__);
return -ENODEV;
@@ -1167,7 +1165,6 @@ static const struct of_device_id fsl_ifc_nand_match[] = {
static struct platform_driver fsl_ifc_nand_driver = {
.driver = {
.name = "fsl,ifc-nand",
- .owner = THIS_MODULE,
.of_match_table = fsl_ifc_nand_match,
},
.probe = fsl_ifc_nand_probe,
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 4d203e84e8ca..72755d7ec25d 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -347,7 +347,6 @@ MODULE_DEVICE_TABLE(of, of_fun_match);
static struct platform_driver of_fun_driver = {
.driver = {
.name = "fsl,upm-nand",
- .owner = THIS_MODULE,
.of_match_table = of_fun_match,
},
.probe = fun_probe,
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 7a915870d9d6..edfaa21b1817 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -1224,7 +1224,6 @@ MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
static struct platform_driver fsmc_nand_driver = {
.remove = fsmc_nand_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "fsmc-nand",
.of_match_table = of_match_ptr(fsmc_nand_id_table),
.pm = &fsmc_nand_pm_ops,
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 117ce333fdd4..73c4048c3a56 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -8,7 +8,9 @@
*
* © 2004 Simtec Electronics
*
- * Device driver for NAND connected via GPIO
+ * Device driver for NAND flash that uses a memory mapped interface to
+ * read/write the NAND commands and data, and GPIO pins for control signals
+ * (the DT binding refers to this as "GPIO assisted NAND flash")
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -308,7 +310,6 @@ static struct platform_driver gpio_nand_driver = {
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(gpio_nand_id_table),
},
};
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 87e658ce23ef..27f272ed502a 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -1353,3 +1353,156 @@ int gpmi_read_page(struct gpmi_nand_data *this,
set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
return start_dma_with_bch_irq(this, desc);
}
+
+/**
+ * gpmi_copy_bits - copy bits from one memory region to another
+ * @dst: destination buffer
+ * @dst_bit_off: bit offset we're starting to write at
+ * @src: source buffer
+ * @src_bit_off: bit offset we're starting to read from
+ * @nbits: number of bits to copy
+ *
+ * This functions copies bits from one memory region to another, and is used by
+ * the GPMI driver to copy ECC sections which are not guaranteed to be byte
+ * aligned.
+ *
+ * src and dst should not overlap.
+ *
+ */
+void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
+ const u8 *src, size_t src_bit_off,
+ size_t nbits)
+{
+ size_t i;
+ size_t nbytes;
+ u32 src_buffer = 0;
+ size_t bits_in_src_buffer = 0;
+
+ if (!nbits)
+ return;
+
+ /*
+ * Move src and dst pointers to the closest byte pointer and store bit
+ * offsets within a byte.
+ */
+ src += src_bit_off / 8;
+ src_bit_off %= 8;
+
+ dst += dst_bit_off / 8;
+ dst_bit_off %= 8;
+
+ /*
+ * Initialize the src_buffer value with bits available in the first
+ * byte of data so that we end up with a byte aligned src pointer.
+ */
+ if (src_bit_off) {
+ src_buffer = src[0] >> src_bit_off;
+ if (nbits >= (8 - src_bit_off)) {
+ bits_in_src_buffer += 8 - src_bit_off;
+ } else {
+ src_buffer &= GENMASK(nbits - 1, 0);
+ bits_in_src_buffer += nbits;
+ }
+ nbits -= bits_in_src_buffer;
+ src++;
+ }
+
+ /* Calculate the number of bytes that can be copied from src to dst. */
+ nbytes = nbits / 8;
+
+ /* Try to align dst to a byte boundary. */
+ if (dst_bit_off) {
+ if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
+ src_buffer |= src[0] << bits_in_src_buffer;
+ bits_in_src_buffer += 8;
+ src++;
+ nbytes--;
+ }
+
+ if (bits_in_src_buffer >= (8 - dst_bit_off)) {
+ dst[0] &= GENMASK(dst_bit_off - 1, 0);
+ dst[0] |= src_buffer << dst_bit_off;
+ src_buffer >>= (8 - dst_bit_off);
+ bits_in_src_buffer -= (8 - dst_bit_off);
+ dst_bit_off = 0;
+ dst++;
+ if (bits_in_src_buffer > 7) {
+ bits_in_src_buffer -= 8;
+ dst[0] = src_buffer;
+ dst++;
+ src_buffer >>= 8;
+ }
+ }
+ }
+
+ if (!bits_in_src_buffer && !dst_bit_off) {
+ /*
+ * Both src and dst pointers are byte aligned, thus we can
+ * just use the optimized memcpy function.
+ */
+ if (nbytes)
+ memcpy(dst, src, nbytes);
+ } else {
+ /*
+ * src buffer is not byte aligned, hence we have to copy each
+ * src byte to the src_buffer variable before extracting a byte
+ * to store in dst.
+ */
+ for (i = 0; i < nbytes; i++) {
+ src_buffer |= src[i] << bits_in_src_buffer;
+ dst[i] = src_buffer;
+ src_buffer >>= 8;
+ }
+ }
+ /* Update dst and src pointers */
+ dst += nbytes;
+ src += nbytes;
+
+ /*
+ * nbits is the number of remaining bits. It should not exceed 8 as
+ * we've already copied as much bytes as possible.
+ */
+ nbits %= 8;
+
+ /*
+ * If there's no more bits to copy to the destination and src buffer
+ * was already byte aligned, then we're done.
+ */
+ if (!nbits && !bits_in_src_buffer)
+ return;
+
+ /* Copy the remaining bits to src_buffer */
+ if (nbits)
+ src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
+ bits_in_src_buffer;
+ bits_in_src_buffer += nbits;
+
+ /*
+ * In case there were not enough bits to get a byte aligned dst buffer
+ * prepare the src_buffer variable to match the dst organization (shift
+ * src_buffer by dst_bit_off and retrieve the least significant bits
+ * from dst).
+ */
+ if (dst_bit_off)
+ src_buffer = (src_buffer << dst_bit_off) |
+ (*dst & GENMASK(dst_bit_off - 1, 0));
+ bits_in_src_buffer += dst_bit_off;
+
+ /*
+ * Keep most significant bits from dst if we end up with an unaligned
+ * number of bits.
+ */
+ nbytes = bits_in_src_buffer / 8;
+ if (bits_in_src_buffer % 8) {
+ src_buffer |= (dst[nbytes] &
+ GENMASK(7, bits_in_src_buffer % 8)) <<
+ (nbytes * 8);
+ nbytes++;
+ }
+
+ /* Copy the remaining bytes to dst */
+ for (i = 0; i < nbytes; i++) {
+ dst[i] = src_buffer;
+ src_buffer >>= 8;
+ }
+}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 959cb9b70310..4f3851a24bb2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -791,6 +791,7 @@ static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
this->page_buffer_phys);
kfree(this->cmd_buffer);
kfree(this->data_buffer_dma);
+ kfree(this->raw_buffer);
this->cmd_buffer = NULL;
this->data_buffer_dma = NULL;
@@ -837,6 +838,9 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
if (!this->page_buffer_virt)
goto error_alloc;
+ this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!this->raw_buffer)
+ goto error_alloc;
/* Slice up the page buffer. */
this->payload_virt = this->page_buffer_virt;
@@ -1347,6 +1351,199 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
return status & NAND_STATUS_FAIL ? -EIO : 0;
}
+/*
+ * This function reads a NAND page without involving the ECC engine (no HW
+ * ECC correction).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the payload data and ECC bits stored in the
+ * page into the provided buffers, which is why we're using gpmi_copy_bits.
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->ecc_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ size_t src_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ uint8_t *oob = chip->oob_poi;
+ int step;
+
+ chip->read_buf(mtd, tmp_buf,
+ mtd->writesize + mtd->oobsize);
+
+ /*
+ * If required, swap the bad block marker and the data stored in the
+ * metadata section, so that we don't wrongly consider a block as bad.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark) {
+ u8 swap = tmp_buf[0];
+
+ tmp_buf[0] = tmp_buf[mtd->writesize];
+ tmp_buf[mtd->writesize] = swap;
+ }
+
+ /*
+ * Copy the metadata section into the oob buffer (this section is
+ * guaranteed to be aligned on a byte boundary).
+ */
+ if (oob_required)
+ memcpy(oob, tmp_buf, nfc_geo->metadata_size);
+
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ src_bit_off = oob_bit_off;
+
+ /* Extract interleaved payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ gpmi_copy_bits(buf, step * eccsize * 8,
+ tmp_buf, src_bit_off,
+ eccsize * 8);
+ src_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ gpmi_copy_bits(oob, oob_bit_off,
+ tmp_buf, src_bit_off,
+ eccbits);
+
+ src_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ if (oob_required) {
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_byte_off < mtd->oobsize)
+ memcpy(oob + oob_byte_off,
+ tmp_buf + mtd->writesize + oob_byte_off,
+ mtd->oobsize - oob_byte_off);
+ }
+
+ return 0;
+}
+
+/*
+ * This function writes a NAND page without involving the ECC engine (no HW
+ * ECC generation).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the OOB area at the right place in the
+ * final page, which is why we're using gpmi_copy_bits.
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ struct gpmi_nand_data *this = chip->priv;
+ struct bch_geometry *nfc_geo = &this->bch_geometry;
+ int eccsize = nfc_geo->ecc_chunk_size;
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *tmp_buf = this->raw_buffer;
+ uint8_t *oob = chip->oob_poi;
+ size_t dst_bit_off;
+ size_t oob_bit_off;
+ size_t oob_byte_off;
+ int step;
+
+ /*
+ * Initialize all bits to 1 in case we don't have a buffer for the
+ * payload or oob data in order to leave unspecified bits of data
+ * to their initial state.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ /*
+ * First copy the metadata section (stored in oob buffer) at the
+ * beginning of the page, as imposed by the GPMI layout.
+ */
+ memcpy(tmp_buf, oob, nfc_geo->metadata_size);
+ oob_bit_off = nfc_geo->metadata_size * 8;
+ dst_bit_off = oob_bit_off;
+
+ /* Interleave payload data and ECC bits */
+ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+ if (buf)
+ gpmi_copy_bits(tmp_buf, dst_bit_off,
+ buf, step * eccsize * 8, eccsize * 8);
+ dst_bit_off += eccsize * 8;
+
+ /* Align last ECC block to align a byte boundary */
+ if (step == nfc_geo->ecc_chunk_count - 1 &&
+ (oob_bit_off + eccbits) % 8)
+ eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+ if (oob_required)
+ gpmi_copy_bits(tmp_buf, dst_bit_off,
+ oob, oob_bit_off, eccbits);
+
+ dst_bit_off += eccbits;
+ oob_bit_off += eccbits;
+ }
+
+ oob_byte_off = oob_bit_off / 8;
+
+ if (oob_required && oob_byte_off < mtd->oobsize)
+ memcpy(tmp_buf + mtd->writesize + oob_byte_off,
+ oob + oob_byte_off, mtd->oobsize - oob_byte_off);
+
+ /*
+ * If required, swap the bad block marker and the first byte of the
+ * metadata section, so that we don't modify the bad block marker.
+ *
+ * See the layout description for a detailed explanation on why this
+ * is needed.
+ */
+ if (this->swap_block_mark) {
+ u8 swap = tmp_buf[0];
+
+ tmp_buf[0] = tmp_buf[mtd->writesize];
+ tmp_buf[mtd->writesize] = swap;
+ }
+
+ chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
+
+ return 0;
+}
+
+static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+
+ return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1);
+}
+
static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd->priv;
@@ -1664,6 +1861,10 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
ecc->write_page = gpmi_ecc_write_page;
ecc->read_oob = gpmi_ecc_read_oob;
ecc->write_oob = gpmi_ecc_write_oob;
+ ecc->read_page_raw = gpmi_ecc_read_page_raw;
+ ecc->write_page_raw = gpmi_ecc_write_page_raw;
+ ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
+ ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
ecc->mode = NAND_ECC_HW;
ecc->size = bch_geo->ecc_chunk_size;
ecc->strength = bch_geo->ecc_strength;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 32c6ba49f986..544062f65020 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -189,6 +189,8 @@ struct gpmi_nand_data {
void *auxiliary_virt;
dma_addr_t auxiliary_phys;
+ void *raw_buffer;
+
/* DMA channels */
#define DMA_CHANS 8
struct dma_chan *dma_chans[DMA_CHANS];
@@ -290,6 +292,10 @@ extern int gpmi_send_page(struct gpmi_nand_data *,
extern int gpmi_read_page(struct gpmi_nand_data *,
dma_addr_t payload, dma_addr_t auxiliary);
+void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
+ const u8 *src, size_t src_bit_off,
+ size_t nbits);
+
/* BCH : Status Block Completion Codes */
#define STATUS_GOOD 0x00
#define STATUS_ERASED 0xff
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index a2c804de156b..1633ec9c5108 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -577,7 +577,6 @@ static struct platform_driver jz_nand_driver = {
.remove = jz_nand_remove,
.driver = {
.name = "jz4740-nand",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 7335346dc126..79c3b7801e1f 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -878,7 +878,6 @@ static struct platform_driver lpc32xx_nand_driver = {
.suspend = lpc32xx_nand_suspend,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = lpc32xx_nand_match,
},
};
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 8caef28e0756..abfec13868e5 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -999,7 +999,6 @@ static struct platform_driver lpc32xx_nand_driver = {
.suspend = lpc32xx_nand_suspend,
.driver = {
.name = LPC32XX_MODNAME,
- .owner = THIS_MODULE,
.of_match_table = lpc32xx_nand_match,
},
};
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index e78841a2dcc3..1f12e5bfbced 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -847,7 +847,6 @@ static struct platform_driver mpc5121_nfc_driver = {
.remove = mpc5121_nfc_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = mpc5121_nfc_match,
},
};
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index dba262bf766f..a8f550fec35e 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -280,14 +280,10 @@ static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
*t++ = __raw_readl(s++);
}
-static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
{
- int i;
- u32 __iomem *t = trg;
- const u32 *s = src;
-
- for (i = 0; i < (size >> 2); i++)
- __raw_writel(*s++, t++);
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy(trg, src, size / 4);
}
static int check_int_v3(struct mxc_nand_host *host)
@@ -1600,7 +1596,6 @@ static int mxcnd_remove(struct platform_device *pdev)
static struct platform_driver mxcnd_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mxcnd_dt_ids),
},
.id_table = mxcnd_devtype,
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 5b5c62712814..41585dfb206f 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -485,11 +485,11 @@ static int nand_check_wp(struct mtd_info *mtd)
}
/**
- * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
* @mtd: MTD device structure
* @ofs: offset from device start
*
- * Check if the block is mark as reserved.
+ * Check if the block is marked as reserved.
*/
static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
@@ -720,7 +720,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
/*
* Program and erase have their own busy handlers status, sequential
- * in, and deplete1 need no delay.
+ * in and status need no delay.
*/
switch (command) {
@@ -3765,9 +3765,9 @@ ident_done:
pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
type->name);
- pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
+ pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
- mtd->writesize, mtd->oobsize);
+ mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
return type;
}
@@ -4035,7 +4035,7 @@ int nand_scan_tail(struct mtd_info *mtd)
*/
if (!ecc->size && (mtd->oobsize >= 64)) {
ecc->size = 512;
- ecc->bytes = 7;
+ ecc->bytes = DIV_ROUND_UP(13 * ecc->strength, 8);
}
ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
&ecc->layout);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index fbde89105245..dd620c19c619 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -178,6 +178,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_EON, "Eon"},
{NAND_MFR_SANDISK, "SanDisk"},
{NAND_MFR_INTEL, "Intel"},
+ {NAND_MFR_ATO, "ATO"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 7dc1dd28d896..ab5bbf567439 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -87,10 +87,6 @@
#define CONFIG_NANDSIM_MAX_PARTS 32
#endif
-static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
-static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
-static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
-static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
@@ -111,11 +107,19 @@ static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
static unsigned int bch;
+static u_char id_bytes[8] = {
+ [0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
+ [1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
+ [2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
+ [3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
+ [4 ... 7] = 0xFF,
+};
-module_param(first_id_byte, uint, 0400);
-module_param(second_id_byte, uint, 0400);
-module_param(third_id_byte, uint, 0400);
-module_param(fourth_id_byte, uint, 0400);
+module_param_array(id_bytes, byte, NULL, 0400);
+module_param_named(first_id_byte, id_bytes[0], byte, 0400);
+module_param_named(second_id_byte, id_bytes[1], byte, 0400);
+module_param_named(third_id_byte, id_bytes[2], byte, 0400);
+module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
module_param(access_delay, uint, 0400);
module_param(programm_delay, uint, 0400);
module_param(erase_delay, uint, 0400);
@@ -136,10 +140,11 @@ module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
module_param(bch, uint, 0400);
-MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
-MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
-MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
-MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(id_bytes, "The ID bytes returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
+MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
+MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
@@ -304,7 +309,7 @@ struct nandsim {
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
- u_char ids[4]; /* chip's ID bytes */
+ u_char ids[8]; /* chip's ID bytes */
uint32_t options; /* chip's characteristic bits */
uint32_t state; /* current chip state */
uint32_t nxstate; /* next expected state */
@@ -2279,17 +2284,18 @@ static int __init ns_init_module(void)
* Perform minimum nandsim structure initialization to handle
* the initial ID read command correctly
*/
- if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
+ if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
+ nand->geom.idbytes = 8;
+ else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
+ nand->geom.idbytes = 6;
+ else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
nand->geom.idbytes = 4;
else
nand->geom.idbytes = 2;
nand->regs.status = NS_STATUS_OK(nand);
nand->nxstate = STATE_UNKNOWN;
nand->options |= OPT_PAGE512; /* temporary value */
- nand->ids[0] = first_id_byte;
- nand->ids[1] = second_id_byte;
- nand->ids[2] = third_id_byte;
- nand->ids[3] = fourth_id_byte;
+ memcpy(nand->ids, id_bytes, sizeof(nand->ids));
if (bus_width == 16) {
nand->busw = 16;
chip->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 253a644da76a..3187c6b92d9a 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -277,7 +277,6 @@ MODULE_DEVICE_TABLE(of, ndfc_match);
static struct platform_driver ndfc_driver = {
.driver = {
.name = "ndfc",
- .owner = THIS_MODULE,
.of_match_table = ndfc_match,
},
.probe = ndfc_probe,
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index e8a5fffd6ab2..e58c644dd220 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -300,7 +300,6 @@ static struct platform_driver nuc900_nand_driver = {
.remove = nuc900_nand_remove,
.driver = {
.name = "nuc900-fmi",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 3b357e920a0c..63f858e6bf39 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -144,11 +144,13 @@ static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0xac, 0x6b, 0xff, 0x99, 0x7b};
static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
-/* oob info generated runtime depending on ecc algorithm and layout selected */
-static struct nand_ecclayout omap_oobinfo;
+/* Shared among all NAND instances to synchronize access to the ECC Engine */
+static struct nand_hw_control omap_gpmc_controller = {
+ .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
+};
struct omap_nand_info {
- struct nand_hw_control controller;
struct omap_nand_platform_data *pdata;
struct mtd_info mtd;
struct nand_chip nand;
@@ -168,6 +170,8 @@ struct omap_nand_info {
u_char *buf;
int buf_len;
struct gpmc_nand_regs reg;
+ /* generated at runtime depending on ECC algorithm and layout selected */
+ struct nand_ecclayout oobinfo;
/* fields specific for BCHx_HW ECC scheme */
struct device *elm_dev;
struct device_node *of_node;
@@ -1686,9 +1690,6 @@ static int omap_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- spin_lock_init(&info->controller.lock);
- init_waitqueue_head(&info->controller.wq);
-
info->pdev = pdev;
info->gpmc_cs = pdata->cs;
info->reg = pdata->reg;
@@ -1708,7 +1709,7 @@ static int omap_nand_probe(struct platform_device *pdev)
info->phys_base = res->start;
- nand_chip->controller = &info->controller;
+ nand_chip->controller = &omap_gpmc_controller;
nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
nand_chip->cmd_ctrl = omap_hwcontrol;
@@ -1741,13 +1742,6 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- /* check for small page devices */
- if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) {
- dev_err(&info->pdev->dev, "small page devices are not supported\n");
- err = -EINVAL;
- goto return_error;
- }
-
/* re-populate low-level callbacks based on xfer modes */
switch (pdata->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
@@ -1840,7 +1834,7 @@ static int omap_nand_probe(struct platform_device *pdev)
}
/* populate MTD interface based on ECC scheme */
- ecclayout = &omap_oobinfo;
+ ecclayout = &info->oobinfo;
switch (info->ecc_opt) {
case OMAP_ECC_HAM1_CODE_SW:
nand_chip->ecc.mode = NAND_ECC_SOFT;
@@ -2095,7 +2089,6 @@ static struct platform_driver omap_nand_driver = {
.remove = omap_nand_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c
index 058531044ceb..376bfe19104f 100644
--- a/drivers/mtd/nand/omap_elm.c
+++ b/drivers/mtd/nand/omap_elm.c
@@ -563,7 +563,6 @@ MODULE_DEVICE_TABLE(of, elm_of_match);
static struct platform_driver elm_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(elm_of_match),
.pm = &elm_pm_ops,
},
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 471b4df3a5ac..c3c6d305caa7 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -19,7 +19,7 @@
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/sizes.h>
#include <linux/platform_data/mtd-orion_nand.h>
@@ -85,33 +85,24 @@ static int __init orion_nand_probe(struct platform_device *pdev)
int ret = 0;
u32 val = 0;
- nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
- if (!nc) {
- ret = -ENOMEM;
- goto no_res;
- }
+ nc = devm_kzalloc(&pdev->dev,
+ sizeof(struct nand_chip) + sizeof(struct mtd_info),
+ GFP_KERNEL);
+ if (!nc)
+ return -ENOMEM;
mtd = (struct mtd_info *)(nc + 1);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto no_res;
- }
+ io_base = devm_ioremap_resource(&pdev->dev, res);
- io_base = ioremap(res->start, resource_size(res));
- if (!io_base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EIO;
- goto no_res;
- }
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
if (pdev->dev.of_node) {
board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
GFP_KERNEL);
- if (!board) {
- ret = -ENOMEM;
- goto no_res;
- }
+ if (!board)
+ return -ENOMEM;
if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
board->cle = (u8)val;
else
@@ -185,9 +176,6 @@ no_dev:
clk_disable_unprepare(clk);
clk_put(clk);
}
- iounmap(io_base);
-no_res:
- kfree(nc);
return ret;
}
@@ -195,15 +183,10 @@ no_res:
static int orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct nand_chip *nc = mtd->priv;
struct clk *clk;
nand_release(mtd);
- iounmap(nc->IO_ADDR_W);
-
- kfree(nc);
-
clk = clk_get(&pdev->dev, NULL);
if (!IS_ERR(clk)) {
clk_disable_unprepare(clk);
@@ -224,7 +207,6 @@ static struct platform_driver orion_nand_driver = {
.remove = orion_nand_remove,
.driver = {
.name = "orion_nand",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(orion_nand_of_match_table),
},
};
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 2c98f9da7471..66c345b42097 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -223,7 +223,6 @@ static struct platform_driver pasemi_nand_driver =
{
.driver = {
.name = driver_name,
- .owner = THIS_MODULE,
.of_match_table = pasemi_nand_match,
},
.probe = pasemi_nand_probe,
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 0b068a5c0bff..4535c263fae5 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -138,7 +138,6 @@ static struct platform_driver plat_nand_driver = {
.remove = plat_nand_remove,
.driver = {
.name = "gen_nand",
- .owner = THIS_MODULE,
.of_match_table = plat_nand_match,
},
};
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 6b97bf17ce5d..35aef5edb588 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -1135,7 +1135,6 @@ static struct platform_driver s3c24xx_nand_driver = {
.id_table = s3c24xx_driver_ids,
.driver = {
.name = "s3c24xx-nand",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 0ed7c603298f..a21c378f096a 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1190,7 +1190,6 @@ static struct platform_driver flctl_driver = {
.remove = flctl_remove,
.driver = {
.name = "sh_flctl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_flctl_match),
},
};
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index e81059b58382..842c47a451a0 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -220,7 +220,6 @@ static int sharpsl_nand_remove(struct platform_device *pdev)
static struct platform_driver sharpsl_nand_driver = {
.driver = {
.name = "sharpsl-nand",
- .owner = THIS_MODULE,
},
.probe = sharpsl_nand_probe,
.remove = sharpsl_nand_remove,
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index fe8058a45054..d71062273f55 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -240,7 +240,6 @@ MODULE_DEVICE_TABLE(of, socrates_nand_match);
static struct platform_driver socrates_nand_driver = {
.driver = {
.name = "socrates_nand",
- .owner = THIS_MODULE,
.of_match_table = socrates_nand_match,
},
.probe = socrates_nand_probe,
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
new file mode 100644
index 000000000000..ccaa8e283388
--- /dev/null
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -0,0 +1,1432 @@
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
+ *
+ * Derived from:
+ * https://github.com/yuq/sunxi-nfc-mtd
+ * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
+ *
+ * https://github.com/hno/Allwinner-Info
+ * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
+ *
+ * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
+ * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mtd.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#define NFC_REG_CTL 0x0000
+#define NFC_REG_ST 0x0004
+#define NFC_REG_INT 0x0008
+#define NFC_REG_TIMING_CTL 0x000C
+#define NFC_REG_TIMING_CFG 0x0010
+#define NFC_REG_ADDR_LOW 0x0014
+#define NFC_REG_ADDR_HIGH 0x0018
+#define NFC_REG_SECTOR_NUM 0x001C
+#define NFC_REG_CNT 0x0020
+#define NFC_REG_CMD 0x0024
+#define NFC_REG_RCMD_SET 0x0028
+#define NFC_REG_WCMD_SET 0x002C
+#define NFC_REG_IO_DATA 0x0030
+#define NFC_REG_ECC_CTL 0x0034
+#define NFC_REG_ECC_ST 0x0038
+#define NFC_REG_DEBUG 0x003C
+#define NFC_REG_ECC_CNT0 0x0040
+#define NFC_REG_ECC_CNT1 0x0044
+#define NFC_REG_ECC_CNT2 0x0048
+#define NFC_REG_ECC_CNT3 0x004c
+#define NFC_REG_USER_DATA_BASE 0x0050
+#define NFC_REG_SPARE_AREA 0x00A0
+#define NFC_RAM0_BASE 0x0400
+#define NFC_RAM1_BASE 0x0800
+
+/* define bit use in NFC_CTL */
+#define NFC_EN BIT(0)
+#define NFC_RESET BIT(1)
+#define NFC_BUS_WIDYH BIT(2)
+#define NFC_RB_SEL BIT(3)
+#define NFC_CE_SEL GENMASK(26, 24)
+#define NFC_CE_CTL BIT(6)
+#define NFC_CE_CTL1 BIT(7)
+#define NFC_PAGE_SIZE GENMASK(11, 8)
+#define NFC_SAM BIT(12)
+#define NFC_RAM_METHOD BIT(14)
+#define NFC_DEBUG_CTL BIT(31)
+
+/* define bit use in NFC_ST */
+#define NFC_RB_B2R BIT(0)
+#define NFC_CMD_INT_FLAG BIT(1)
+#define NFC_DMA_INT_FLAG BIT(2)
+#define NFC_CMD_FIFO_STATUS BIT(3)
+#define NFC_STA BIT(4)
+#define NFC_NATCH_INT_FLAG BIT(5)
+#define NFC_RB_STATE0 BIT(8)
+#define NFC_RB_STATE1 BIT(9)
+#define NFC_RB_STATE2 BIT(10)
+#define NFC_RB_STATE3 BIT(11)
+
+/* define bit use in NFC_INT */
+#define NFC_B2R_INT_ENABLE BIT(0)
+#define NFC_CMD_INT_ENABLE BIT(1)
+#define NFC_DMA_INT_ENABLE BIT(2)
+#define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
+ NFC_CMD_INT_ENABLE | \
+ NFC_DMA_INT_ENABLE)
+
+/* define bit use in NFC_CMD */
+#define NFC_CMD_LOW_BYTE GENMASK(7, 0)
+#define NFC_CMD_HIGH_BYTE GENMASK(15, 8)
+#define NFC_ADR_NUM GENMASK(18, 16)
+#define NFC_SEND_ADR BIT(19)
+#define NFC_ACCESS_DIR BIT(20)
+#define NFC_DATA_TRANS BIT(21)
+#define NFC_SEND_CMD1 BIT(22)
+#define NFC_WAIT_FLAG BIT(23)
+#define NFC_SEND_CMD2 BIT(24)
+#define NFC_SEQ BIT(25)
+#define NFC_DATA_SWAP_METHOD BIT(26)
+#define NFC_ROW_AUTO_INC BIT(27)
+#define NFC_SEND_CMD3 BIT(28)
+#define NFC_SEND_CMD4 BIT(29)
+#define NFC_CMD_TYPE GENMASK(31, 30)
+
+/* define bit use in NFC_RCMD_SET */
+#define NFC_READ_CMD GENMASK(7, 0)
+#define NFC_RANDOM_READ_CMD0 GENMASK(15, 8)
+#define NFC_RANDOM_READ_CMD1 GENMASK(23, 16)
+
+/* define bit use in NFC_WCMD_SET */
+#define NFC_PROGRAM_CMD GENMASK(7, 0)
+#define NFC_RANDOM_WRITE_CMD GENMASK(15, 8)
+#define NFC_READ_CMD0 GENMASK(23, 16)
+#define NFC_READ_CMD1 GENMASK(31, 24)
+
+/* define bit use in NFC_ECC_CTL */
+#define NFC_ECC_EN BIT(0)
+#define NFC_ECC_PIPELINE BIT(3)
+#define NFC_ECC_EXCEPTION BIT(4)
+#define NFC_ECC_BLOCK_SIZE BIT(5)
+#define NFC_RANDOM_EN BIT(9)
+#define NFC_RANDOM_DIRECTION BIT(10)
+#define NFC_ECC_MODE_SHIFT 12
+#define NFC_ECC_MODE GENMASK(15, 12)
+#define NFC_RANDOM_SEED GENMASK(30, 16)
+
+#define NFC_DEFAULT_TIMEOUT_MS 1000
+
+#define NFC_SRAM_SIZE 1024
+
+#define NFC_MAX_CS 7
+
+/*
+ * Ready/Busy detection type: describes the Ready/Busy detection modes
+ *
+ * @RB_NONE: no external detection available, rely on STATUS command
+ * and software timeouts
+ * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
+ * pin of the NAND flash chip must be connected to one of the
+ * native NAND R/B pins (those which can be muxed to the NAND
+ * Controller)
+ * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
+ * pin of the NAND flash chip must be connected to a GPIO capable
+ * pin.
+ */
+enum sunxi_nand_rb_type {
+ RB_NONE,
+ RB_NATIVE,
+ RB_GPIO,
+};
+
+/*
+ * Ready/Busy structure: stores information related to Ready/Busy detection
+ *
+ * @type: the Ready/Busy detection mode
+ * @info: information related to the R/B detection mode. Either a gpio
+ * id or a native R/B id (those supported by the NAND controller).
+ */
+struct sunxi_nand_rb {
+ enum sunxi_nand_rb_type type;
+ union {
+ int gpio;
+ int nativeid;
+ } info;
+};
+
+/*
+ * Chip Select structure: stores information related to NAND Chip Select
+ *
+ * @cs: the NAND CS id used to communicate with a NAND Chip
+ * @rb: the Ready/Busy description
+ */
+struct sunxi_nand_chip_sel {
+ u8 cs;
+ struct sunxi_nand_rb rb;
+};
+
+/*
+ * sunxi HW ECC infos: stores information related to HW ECC support
+ *
+ * @mode: the sunxi ECC mode field deduced from ECC requirements
+ * @layout: the OOB layout depending on the ECC requirements and the
+ * selected ECC mode
+ */
+struct sunxi_nand_hw_ecc {
+ int mode;
+ struct nand_ecclayout layout;
+};
+
+/*
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @node: used to store NAND chips into a list
+ * @nand: base NAND chip structure
+ * @mtd: base MTD structure
+ * @clk_rate: clk_rate required for this NAND chip
+ * @selected: current active CS
+ * @nsels: number of CS lines required by the NAND chip
+ * @sels: array of CS lines descriptions
+ */
+struct sunxi_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ struct mtd_info mtd;
+ unsigned long clk_rate;
+ int selected;
+ int nsels;
+ struct sunxi_nand_chip_sel sels[0];
+};
+
+static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct sunxi_nand_chip, nand);
+}
+
+/*
+ * NAND Controller structure: stores sunxi NAND controller information
+ *
+ * @controller: base controller structure
+ * @dev: parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ahb_clk: NAND Controller AHB clock
+ * @mod_clk: NAND Controller mod clock
+ * @assigned_cs: bitmask describing already assigned CS lines
+ * @clk_rate: NAND controller current clock rate
+ * @chips: a list containing all the NAND chips attached to
+ * this NAND controller
+ * @complete: a completion object used to wait for NAND
+ * controller events
+ */
+struct sunxi_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ahb_clk;
+ struct clk *mod_clk;
+ unsigned long assigned_cs;
+ unsigned long clk_rate;
+ struct list_head chips;
+ struct completion complete;
+};
+
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct sunxi_nfc, controller);
+}
+
+static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
+{
+ struct sunxi_nfc *nfc = dev_id;
+ u32 st = readl(nfc->regs + NFC_REG_ST);
+ u32 ien = readl(nfc->regs + NFC_REG_INT);
+
+ if (!(ien & st))
+ return IRQ_NONE;
+
+ if ((ien & st) == ien)
+ complete(&nfc->complete);
+
+ writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+ writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags,
+ unsigned int timeout_ms)
+{
+ init_completion(&nfc->complete);
+
+ writel(flags, nfc->regs + NFC_REG_INT);
+
+ if (!timeout_ms)
+ timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
+
+ if (!wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms))) {
+ dev_err(nfc->dev, "wait interrupt timedout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
+{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+
+ do {
+ if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
+{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+
+ writel(0, nfc->regs + NFC_REG_ECC_CTL);
+ writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
+
+ do {
+ if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_rb *rb;
+ unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20);
+ int ret;
+
+ if (sunxi_nand->selected < 0)
+ return 0;
+
+ rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
+
+ switch (rb->type) {
+ case RB_NATIVE:
+ ret = !!(readl(nfc->regs + NFC_REG_ST) &
+ (NFC_RB_STATE0 << rb->info.nativeid));
+ if (ret)
+ break;
+
+ sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo);
+ ret = !!(readl(nfc->regs + NFC_REG_ST) &
+ (NFC_RB_STATE0 << rb->info.nativeid));
+ break;
+ case RB_GPIO:
+ ret = gpio_get_value(rb->info.gpio);
+ break;
+ case RB_NONE:
+ default:
+ ret = 0;
+ dev_err(nfc->dev, "cannot check R/B NAND status!\n");
+ break;
+ }
+
+ return ret;
+}
+
+static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_chip_sel *sel;
+ u32 ctl;
+
+ if (chip > 0 && chip >= sunxi_nand->nsels)
+ return;
+
+ if (chip == sunxi_nand->selected)
+ return;
+
+ ctl = readl(nfc->regs + NFC_REG_CTL) &
+ ~(NFC_CE_SEL | NFC_RB_SEL | NFC_EN);
+
+ if (chip >= 0) {
+ sel = &sunxi_nand->sels[chip];
+
+ ctl |= (sel->cs << 24) | NFC_EN |
+ (((nand->page_shift - 10) & 0xf) << 8);
+ if (sel->rb.type == RB_NONE) {
+ nand->dev_ready = NULL;
+ } else {
+ nand->dev_ready = sunxi_nfc_dev_ready;
+ if (sel->rb.type == RB_NATIVE)
+ ctl |= (sel->rb.info.nativeid << 3);
+ }
+
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+
+ if (nfc->clk_rate != sunxi_nand->clk_rate) {
+ clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ nfc->clk_rate = sunxi_nand->clk_rate;
+ }
+ }
+
+ writel(ctl, nfc->regs + NFC_REG_CTL);
+
+ sunxi_nand->selected = chip;
+}
+
+static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ break;
+
+ if (buf)
+ memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
+ cnt);
+ offs += cnt;
+ }
+}
+
+static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ break;
+
+ offs += cnt;
+ }
+}
+
+static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
+{
+ uint8_t ret;
+
+ sunxi_nfc_read_buf(mtd, &ret, 1);
+
+ return ret;
+}
+
+static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ u32 tmp;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ tmp = readl(nfc->regs + NFC_REG_CTL);
+ if (ctrl & NAND_NCE)
+ tmp |= NFC_CE_CTL;
+ else
+ tmp &= ~NFC_CE_CTL;
+ writel(tmp, nfc->regs + NFC_REG_CTL);
+ }
+
+ if (dat == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE) {
+ writel(NFC_SEND_CMD1 | dat, nfc->regs + NFC_REG_CMD);
+ } else {
+ writel(dat, nfc->regs + NFC_REG_ADDR_LOW);
+ writel(NFC_SEND_ADR, nfc->regs + NFC_REG_CMD);
+ }
+
+ sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+}
+
+static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_ecclayout *layout = ecc->layout;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ unsigned int max_bitflips = 0;
+ int offset;
+ int ret;
+ u32 tmp;
+ int i;
+ int cnt;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (i)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, i * ecc->size, -1);
+
+ offset = mtd->writesize + layout->eccpos[i * ecc->bytes] - 4;
+
+ chip->read_buf(mtd, NULL, ecc->size);
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf + (i * ecc->size),
+ nfc->regs + NFC_RAM0_BASE, ecc->size);
+
+ if (readl(nfc->regs + NFC_REG_ECC_ST) & 0x1) {
+ mtd->ecc_stats.failed++;
+ } else {
+ tmp = readl(nfc->regs + NFC_REG_ECC_CNT0) & 0xff;
+ mtd->ecc_stats.corrected += tmp;
+ max_bitflips = max_t(unsigned int, max_bitflips, tmp);
+ }
+
+ if (oob_required) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ offset -= mtd->writesize;
+ chip->read_buf(mtd, chip->oob_poi + offset,
+ ecc->bytes + 4);
+ }
+ }
+
+ if (oob_required) {
+ cnt = ecc->layout->oobfree[ecc->steps].length;
+ if (cnt > 0) {
+ offset = mtd->writesize +
+ ecc->layout->oobfree[ecc->steps].offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ offset -= mtd->writesize;
+ chip->read_buf(mtd, chip->oob_poi + offset, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_ecclayout *layout = ecc->layout;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ int offset;
+ int ret;
+ u32 tmp;
+ int i;
+ int cnt;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ if (i)
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, i * ecc->size, -1);
+
+ chip->write_buf(mtd, buf + (i * ecc->size), ecc->size);
+
+ offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
+
+ /* Fill OOB data in */
+ if (oob_required) {
+ tmp = 0xffffffff;
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
+ 4);
+ } else {
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
+ chip->oob_poi + offset - mtd->writesize,
+ 4);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
+ (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_required) {
+ cnt = ecc->layout->oobfree[i].length;
+ if (cnt > 0) {
+ offset = mtd->writesize +
+ ecc->layout->oobfree[i].offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+ offset -= mtd->writesize;
+ chip->write_buf(mtd, chip->oob_poi + offset, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return 0;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ unsigned int max_bitflips = 0;
+ uint8_t *oob = chip->oob_poi;
+ int offset = 0;
+ int ret;
+ int cnt;
+ u32 tmp;
+ int i;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ chip->read_buf(mtd, NULL, ecc->size);
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, nfc->regs + NFC_RAM0_BASE, ecc->size);
+ buf += ecc->size;
+ offset += ecc->size;
+
+ if (readl(nfc->regs + NFC_REG_ECC_ST) & 0x1) {
+ mtd->ecc_stats.failed++;
+ } else {
+ tmp = readl(nfc->regs + NFC_REG_ECC_CNT0) & 0xff;
+ mtd->ecc_stats.corrected += tmp;
+ max_bitflips = max_t(unsigned int, max_bitflips, tmp);
+ }
+
+ if (oob_required) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, oob, ecc->bytes + ecc->prepad);
+ oob += ecc->bytes + ecc->prepad;
+ }
+
+ offset += ecc->bytes + ecc->prepad;
+ }
+
+ if (oob_required) {
+ cnt = mtd->oobsize - (oob - chip->oob_poi);
+ if (cnt > 0) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, oob, cnt);
+ }
+ }
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct sunxi_nand_hw_ecc *data = ecc->priv;
+ uint8_t *oob = chip->oob_poi;
+ int offset = 0;
+ int ret;
+ int cnt;
+ u32 tmp;
+ int i;
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
+ tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
+ NFC_ECC_EXCEPTION;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ for (i = 0; i < ecc->steps; i++) {
+ chip->write_buf(mtd, buf + (i * ecc->size), ecc->size);
+ offset += ecc->size;
+
+ /* Fill OOB data in */
+ if (oob_required) {
+ tmp = 0xffffffff;
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
+ 4);
+ } else {
+ memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
+ 4);
+ }
+
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
+ (1 << 30);
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
+
+ offset += ecc->bytes + ecc->prepad;
+ oob += ecc->bytes + ecc->prepad;
+ }
+
+ if (oob_required) {
+ cnt = mtd->oobsize - (oob - chip->oob_poi);
+ if (cnt > 0) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+ chip->write_buf(mtd, oob, cnt);
+ }
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
+ tmp &= ~NFC_ECC_EN;
+
+ writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+
+ return 0;
+}
+
+static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
+ const struct nand_sdr_timings *timings)
+{
+ u32 min_clk_period = 0;
+
+ /* T1 <=> tCLS */
+ if (timings->tCLS_min > min_clk_period)
+ min_clk_period = timings->tCLS_min;
+
+ /* T2 <=> tCLH */
+ if (timings->tCLH_min > min_clk_period)
+ min_clk_period = timings->tCLH_min;
+
+ /* T3 <=> tCS */
+ if (timings->tCS_min > min_clk_period)
+ min_clk_period = timings->tCS_min;
+
+ /* T4 <=> tCH */
+ if (timings->tCH_min > min_clk_period)
+ min_clk_period = timings->tCH_min;
+
+ /* T5 <=> tWP */
+ if (timings->tWP_min > min_clk_period)
+ min_clk_period = timings->tWP_min;
+
+ /* T6 <=> tWH */
+ if (timings->tWH_min > min_clk_period)
+ min_clk_period = timings->tWH_min;
+
+ /* T7 <=> tALS */
+ if (timings->tALS_min > min_clk_period)
+ min_clk_period = timings->tALS_min;
+
+ /* T8 <=> tDS */
+ if (timings->tDS_min > min_clk_period)
+ min_clk_period = timings->tDS_min;
+
+ /* T9 <=> tDH */
+ if (timings->tDH_min > min_clk_period)
+ min_clk_period = timings->tDH_min;
+
+ /* T10 <=> tRR */
+ if (timings->tRR_min > (min_clk_period * 3))
+ min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
+
+ /* T11 <=> tALH */
+ if (timings->tALH_min > min_clk_period)
+ min_clk_period = timings->tALH_min;
+
+ /* T12 <=> tRP */
+ if (timings->tRP_min > min_clk_period)
+ min_clk_period = timings->tRP_min;
+
+ /* T13 <=> tREH */
+ if (timings->tREH_min > min_clk_period)
+ min_clk_period = timings->tREH_min;
+
+ /* T14 <=> tRC */
+ if (timings->tRC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
+
+ /* T15 <=> tWC */
+ if (timings->tWC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+
+
+ /* Convert min_clk_period from picoseconds to nanoseconds */
+ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
+
+ /*
+ * Convert min_clk_period into a clk frequency, then get the
+ * appropriate rate for the NAND controller IP given this formula
+ * (specified in the datasheet):
+ * nand clk_rate = 2 * min_clk_rate
+ */
+ chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period;
+
+ /* TODO: configure T16-T19 */
+
+ return 0;
+}
+
+static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip,
+ struct device_node *np)
+{
+ const struct nand_sdr_timings *timings;
+ int ret;
+ int mode;
+
+ mode = onfi_get_async_timing_mode(&chip->nand);
+ if (mode == ONFI_TIMING_MODE_UNKNOWN) {
+ mode = chip->nand.onfi_timing_mode_default;
+ } else {
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+
+ mode = fls(mode) - 1;
+ if (mode < 0)
+ mode = 0;
+
+ feature[0] = mode;
+ ret = chip->nand.onfi_set_features(&chip->mtd, &chip->nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE,
+ feature);
+ if (ret)
+ return ret;
+ }
+
+ timings = onfi_async_timing_mode_to_sdr_timings(mode);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ return sunxi_nand_chip_set_timings(chip, timings);
+}
+
+static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_hw_ecc *data;
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int ret;
+ int i;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Add ECC info retrieval from DT */
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (ecc->strength <= strengths[i])
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(strengths)) {
+ dev_err(nfc->dev, "unsupported strength\n");
+ ret = -ENOTSUPP;
+ goto err;
+ }
+
+ data->mode = i;
+
+ /* HW ECC always request ECC bytes for 1024 bytes blocks */
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
+
+ /* HW ECC always work with even numbers of ECC bytes */
+ ecc->bytes = ALIGN(ecc->bytes, 2);
+
+ layout = &data->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ layout->eccbytes = (ecc->bytes * nsectors);
+
+ ecc->layout = layout;
+ ecc->priv = data;
+
+ return 0;
+
+err:
+ kfree(data);
+
+ return ret;
+}
+
+static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ kfree(ecc->priv);
+}
+
+static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int i, j;
+ int ret;
+
+ ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ layout = ecc->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ for (i = 0; i < nsectors; i++) {
+ if (i) {
+ layout->oobfree[i].offset =
+ layout->oobfree[i - 1].offset +
+ layout->oobfree[i - 1].length +
+ ecc->bytes;
+ layout->oobfree[i].length = 4;
+ } else {
+ /*
+ * The first 2 bytes are used for BB markers, hence we
+ * only have 2 bytes available in the first user data
+ * section.
+ */
+ layout->oobfree[i].length = 2;
+ layout->oobfree[i].offset = 2;
+ }
+
+ for (j = 0; j < ecc->bytes; j++)
+ layout->eccpos[(ecc->bytes * i) + j] =
+ layout->oobfree[i].offset +
+ layout->oobfree[i].length + j;
+ }
+
+ if (mtd->oobsize > (ecc->bytes + 4) * nsectors) {
+ layout->oobfree[nsectors].offset =
+ layout->oobfree[nsectors - 1].offset +
+ layout->oobfree[nsectors - 1].length +
+ ecc->bytes;
+ layout->oobfree[nsectors].length = mtd->oobsize -
+ ((ecc->bytes + 4) * nsectors);
+ }
+
+ return 0;
+}
+
+static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int i;
+ int ret;
+
+ ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+
+ ecc->prepad = 4;
+ ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
+ ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
+
+ layout = ecc->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ for (i = 0; i < (ecc->bytes * nsectors); i++)
+ layout->eccpos[i] = i;
+
+ layout->oobfree[0].length = mtd->oobsize - i;
+ layout->oobfree[0].offset = i;
+
+ return 0;
+}
+
+static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ case NAND_ECC_HW_SYNDROME:
+ sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
+ break;
+ case NAND_ECC_NONE:
+ kfree(ecc->layout);
+ default:
+ break;
+ }
+}
+
+static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
+ struct device_node *np)
+{
+ struct nand_chip *nand = mtd->priv;
+ int strength;
+ int blk_size;
+ int ret;
+
+ blk_size = of_get_nand_ecc_step_size(np);
+ strength = of_get_nand_ecc_strength(np);
+ if (blk_size > 0 && strength > 0) {
+ ecc->size = blk_size;
+ ecc->strength = strength;
+ } else {
+ ecc->size = nand->ecc_step_ds;
+ ecc->strength = nand->ecc_strength_ds;
+ }
+
+ if (!ecc->size || !ecc->strength)
+ return -EINVAL;
+
+ ecc->mode = NAND_ECC_HW;
+
+ ret = of_get_nand_ecc_mode(np);
+ if (ret >= 0)
+ ecc->mode = ret;
+
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT_BCH:
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * ecc->size),
+ 8);
+ break;
+ case NAND_ECC_HW:
+ ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_HW_SYNDROME:
+ ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL);
+ if (!ecc->layout)
+ return -ENOMEM;
+ ecc->layout->oobfree[0].length = mtd->oobsize;
+ case NAND_ECC_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
+ struct device_node *np)
+{
+ const struct nand_sdr_timings *timings;
+ struct sunxi_nand_chip *chip;
+ struct mtd_part_parser_data ppdata;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ int nsels;
+ int ret;
+ int i;
+ u32 tmp;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -EINVAL;
+
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(dev,
+ sizeof(*chip) +
+ (nsels * sizeof(struct sunxi_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!chip) {
+ dev_err(dev, "could not allocate chip\n");
+ return -ENOMEM;
+ }
+
+ chip->nsels = nsels;
+ chip->selected = -1;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (tmp > NFC_MAX_CS) {
+ dev_err(dev,
+ "invalid reg value: %u (max CS = 7)\n",
+ tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ chip->sels[i].cs = tmp;
+
+ if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
+ tmp < 2) {
+ chip->sels[i].rb.type = RB_NATIVE;
+ chip->sels[i].rb.info.nativeid = tmp;
+ } else {
+ ret = of_get_named_gpio(np, "rb-gpios", i);
+ if (ret >= 0) {
+ tmp = ret;
+ chip->sels[i].rb.type = RB_GPIO;
+ chip->sels[i].rb.info.gpio = tmp;
+ ret = devm_gpio_request(dev, tmp, "nand-rb");
+ if (ret)
+ return ret;
+
+ ret = gpio_direction_input(tmp);
+ if (ret)
+ return ret;
+ } else {
+ chip->sels[i].rb.type = RB_NONE;
+ }
+ }
+ }
+
+ timings = onfi_async_timing_mode_to_sdr_timings(0);
+ if (IS_ERR(timings)) {
+ ret = PTR_ERR(timings);
+ dev_err(dev,
+ "could not retrieve timings for ONFI mode 0: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_chip_set_timings(chip, timings);
+ if (ret) {
+ dev_err(dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
+ nand = &chip->nand;
+ /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
+ nand->chip_delay = 200;
+ nand->controller = &nfc->controller;
+ nand->select_chip = sunxi_nfc_select_chip;
+ nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
+ nand->read_buf = sunxi_nfc_read_buf;
+ nand->write_buf = sunxi_nfc_write_buf;
+ nand->read_byte = sunxi_nfc_read_byte;
+
+ if (of_get_nand_on_flash_bbt(np))
+ nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+
+ mtd = &chip->mtd;
+ mtd->dev.parent = dev;
+ mtd->priv = nand;
+ mtd->owner = THIS_MODULE;
+
+ ret = nand_scan_ident(mtd, nsels, NULL);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nand_chip_init_timings(chip, np);
+ if (ret) {
+ dev_err(dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
+ if (ret) {
+ dev_err(dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (nchips > 8) {
+ dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = sunxi_nand_chip_init(dev, nfc, nand_np);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
+{
+ struct sunxi_nand_chip *chip;
+
+ while (!list_empty(&nfc->chips)) {
+ chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
+ node);
+ nand_release(&chip->mtd);
+ sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+ }
+}
+
+static int sunxi_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct sunxi_nfc *nfc;
+ int irq;
+ int ret;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ spin_lock_init(&nfc->controller.lock);
+ init_waitqueue_head(&nfc->controller.wq);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq\n");
+ return irq;
+ }
+
+ nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(nfc->ahb_clk)) {
+ dev_err(dev, "failed to retrieve ahb clk\n");
+ return PTR_ERR(nfc->ahb_clk);
+ }
+
+ ret = clk_prepare_enable(nfc->ahb_clk);
+ if (ret)
+ return ret;
+
+ nfc->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(nfc->mod_clk)) {
+ dev_err(dev, "failed to retrieve mod clk\n");
+ ret = PTR_ERR(nfc->mod_clk);
+ goto out_ahb_clk_unprepare;
+ }
+
+ ret = clk_prepare_enable(nfc->mod_clk);
+ if (ret)
+ goto out_ahb_clk_unprepare;
+
+ ret = sunxi_nfc_rst(nfc);
+ if (ret)
+ goto out_mod_clk_unprepare;
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
+ 0, "sunxi-nand", nfc);
+ if (ret)
+ goto out_mod_clk_unprepare;
+
+ platform_set_drvdata(pdev, nfc);
+
+ /*
+ * TODO: replace these magic values with proper flags as soon as we
+ * know what they are encoding.
+ */
+ writel(0x100, nfc->regs + NFC_REG_TIMING_CTL);
+ writel(0x7ff, nfc->regs + NFC_REG_TIMING_CFG);
+
+ ret = sunxi_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto out_mod_clk_unprepare;
+ }
+
+ return 0;
+
+out_mod_clk_unprepare:
+ clk_disable_unprepare(nfc->mod_clk);
+out_ahb_clk_unprepare:
+ clk_disable_unprepare(nfc->ahb_clk);
+
+ return ret;
+}
+
+static int sunxi_nfc_remove(struct platform_device *pdev)
+{
+ struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
+
+ sunxi_nand_chips_cleanup(nfc);
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_nfc_ids[] = {
+ { .compatible = "allwinner,sun4i-a10-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
+
+static struct platform_driver sunxi_nfc_driver = {
+ .driver = {
+ .name = "sunxi_nand",
+ .of_match_table = sunxi_nfc_ids,
+ },
+ .probe = sunxi_nfc_probe,
+ .remove = sunxi_nfc_remove,
+};
+module_platform_driver(sunxi_nfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Boris BREZILLON");
+MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
+MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index c1622a5ba814..9c0bc45e28a9 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -417,7 +417,6 @@ static struct platform_driver txx9ndfmc_driver = {
.resume = txx9ndfmc_resume,
.driver = {
.name = "txx9ndfmc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 093c29ac1a13..32a216d31141 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -106,7 +106,6 @@ static int generic_onenand_remove(struct platform_device *pdev)
static struct platform_driver generic_onenand_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = generic_onenand_probe,
.remove = generic_onenand_remove,
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index d945473c3882..646ddd6db1b4 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -804,7 +804,6 @@ static struct platform_driver omap2_onenand_driver = {
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index d5269a26c839..39763b94f67d 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -719,16 +719,10 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
{
struct fsl_qspi *q = nor->priv;
u8 cmd = nor->read_opcode;
- int ret;
dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
- /* Wait until the previous command is finished. */
- ret = nor->wait_till_ready(nor);
- if (ret)
- return ret;
-
/* Read out the data directly from the AHB buffer.*/
memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
@@ -744,16 +738,6 @@ static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs);
- /* Wait until finished previous write command. */
- ret = nor->wait_till_ready(nor);
- if (ret)
- return ret;
-
- /* Send write enable, then erase commands. */
- ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
- if (ret)
- return ret;
-
ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
if (ret)
return ret;
@@ -849,9 +833,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ret = clk_prepare_enable(q->clk);
if (ret) {
- clk_disable_unprepare(q->clk_en);
dev_err(dev, "can not enable the qspi clock\n");
- goto map_failed;
+ goto clk_failed;
}
/* find the irq */
@@ -905,7 +888,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
nor->prepare = fsl_qspi_prep;
nor->unprepare = fsl_qspi_unprep;
- if (of_modalias_node(np, modalias, sizeof(modalias)) < 0)
+ ret = of_modalias_node(np, modalias, sizeof(modalias));
+ if (ret < 0)
goto map_failed;
ret = of_property_read_u32(np, "spi-max-frequency",
@@ -964,6 +948,7 @@ last_init_failed:
irq_failed:
clk_disable_unprepare(q->clk);
+clk_failed:
clk_disable_unprepare(q->clk_en);
map_failed:
dev_err(dev, "Freescale QuadSPI probe failed\n");
@@ -991,7 +976,6 @@ static struct platform_driver fsl_qspi_driver = {
.driver = {
.name = "fsl-quadspi",
.bus = &platform_bus_type,
- .owner = THIS_MODULE,
.of_match_table = fsl_qspi_dt_ids,
},
.probe = fsl_qspi_probe,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index c51ee52386a7..0f8ec3c2d015 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -26,7 +26,38 @@
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
-#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+#define SPI_NOR_MAX_ID_LEN 6
+
+struct flash_info {
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u16 flags;
+#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
+#define SST_WRITE 0x04 /* use SST byte programming */
+#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
+#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
+#define USE_FSR 0x80 /* use flag status register */
+};
+
+#define JEDEC_MFR(info) ((info)->id[0])
static const struct spi_device_id *spi_nor_match_id(const char *name);
@@ -98,7 +129,7 @@ static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
case SPI_NOR_FAST:
case SPI_NOR_DUAL:
case SPI_NOR_QUAD:
- return 1;
+ return 8;
case SPI_NOR_NORMAL:
return 0;
}
@@ -138,13 +169,14 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
}
/* Enable/disable 4-byte addressing mode. */
-static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
+static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
+ int enable)
{
int status;
bool need_wren = false;
u8 cmd;
- switch (JEDEC_MFR(jedec_id)) {
+ switch (JEDEC_MFR(info)) {
case CFI_MFR_ST: /* Micron, actually */
/* Some Micron need WREN command; all will accept it */
need_wren = true;
@@ -165,81 +197,74 @@ static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0);
}
}
-
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
+static inline int spi_nor_sr_ready(struct spi_nor *nor)
{
- unsigned long deadline;
- int sr;
-
- deadline = jiffies + MAX_READY_WAIT_JIFFIES;
-
- do {
- cond_resched();
+ int sr = read_sr(nor);
+ if (sr < 0)
+ return sr;
+ else
+ return !(sr & SR_WIP);
+}
- sr = read_sr(nor);
- if (sr < 0)
- break;
- else if (!(sr & SR_WIP))
- return 0;
- } while (!time_after_eq(jiffies, deadline));
+static inline int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int fsr = read_fsr(nor);
+ if (fsr < 0)
+ return fsr;
+ else
+ return fsr & FSR_READY;
+}
- return -ETIMEDOUT;
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
}
-static int spi_nor_wait_till_fsr_ready(struct spi_nor *nor)
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
{
unsigned long deadline;
- int sr;
- int fsr;
+ int timeout = 0, ret;
deadline = jiffies + MAX_READY_WAIT_JIFFIES;
- do {
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
cond_resched();
+ }
- sr = read_sr(nor);
- if (sr < 0) {
- break;
- } else if (!(sr & SR_WIP)) {
- fsr = read_fsr(nor);
- if (fsr < 0)
- break;
- if (fsr & FSR_READY)
- return 0;
- }
- } while (!time_after_eq(jiffies, deadline));
+ dev_err(nor->dev, "flash operation timed out\n");
return -ETIMEDOUT;
}
/*
- * Service routine to read status register until ready, or timeout occurs.
- * Returns non-zero if error.
- */
-static int wait_till_ready(struct spi_nor *nor)
-{
- return nor->wait_till_ready(nor);
-}
-
-/*
* Erase the whole flash memory
*
* Returns 0 if successful, non-zero otherwise.
*/
static int erase_chip(struct spi_nor *nor)
{
- int ret;
-
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
- /* Wait until finished previous write command. */
- ret = wait_till_ready(nor);
- if (ret)
- return ret;
-
- /* Send write enable, then erase commands. */
- write_enable(nor);
-
return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0);
}
@@ -294,11 +319,17 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* whole-chip erase? */
if (len == mtd->size) {
+ write_enable(nor);
+
if (erase_chip(nor)) {
ret = -EIO;
goto erase_err;
}
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+
/* REVISIT in some cases we could speed up erasing large regions
* by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
* to use "small sector erase", but that's not always optimal.
@@ -307,6 +338,8 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* "sector"-at-a-time erase */
} else {
while (len) {
+ write_enable(nor);
+
if (nor->erase(nor, addr)) {
ret = -EIO;
goto erase_err;
@@ -314,9 +347,15 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
addr += mtd->erasesize;
len -= mtd->erasesize;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
}
}
+ write_disable(nor);
+
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
instr->state = MTD_ERASE_DONE;
@@ -341,11 +380,6 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (ret)
return ret;
- /* Wait until finished previous command */
- ret = wait_till_ready(nor);
- if (ret)
- goto err;
-
status_old = read_sr(nor);
if (offset < mtd->size - (mtd->size / 2))
@@ -388,11 +422,6 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (ret)
return ret;
- /* Wait until finished previous command */
- ret = wait_till_ready(nor);
- if (ret)
- goto err;
-
status_old = read_sr(nor);
if (offset+len > mtd->size - (mtd->size / 64))
@@ -424,38 +453,34 @@ err:
return ret;
}
-struct flash_info {
- /* JEDEC id zero means "no ID" (most older chips); otherwise it has
- * a high byte of zero plus three data bytes: the manufacturer id,
- * then a two byte device id.
- */
- u32 jedec_id;
- u16 ext_id;
-
- /* The size listed here is what works with SPINOR_OP_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u16 flags;
-#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
-#define SST_WRITE 0x04 /* use SST byte programming */
-#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
-#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
-#define USE_FSR 0x80 /* use flag status register */
-};
-
+/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
((kernel_ulong_t)&(struct flash_info) { \
- .jedec_id = (_jedec_id), \
- .ext_id = (_ext_id), \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags), \
+ })
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 16) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = 6, \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
@@ -507,6 +532,9 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ /* Fujitsu */
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+
/* GigaDevice */
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
@@ -532,6 +560,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron */
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
@@ -556,6 +585,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
@@ -566,6 +596,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, 0) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -577,6 +608,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
/* ST Microelectronics -- newer production may have feature updates */
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
@@ -588,7 +620,6 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
@@ -643,32 +674,24 @@ static const struct spi_device_id spi_nor_ids[] = {
static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
{
int tmp;
- u8 id[5];
- u32 jedec;
- u16 ext_jedec;
+ u8 id[SPI_NOR_MAX_ID_LEN];
struct flash_info *info;
- tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, 5);
+ tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
if (tmp < 0) {
dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
return ERR_PTR(tmp);
}
- jedec = id[0];
- jedec = jedec << 8;
- jedec |= id[1];
- jedec = jedec << 8;
- jedec |= id[2];
-
- ext_jedec = id[3] << 8 | id[4];
for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
info = (void *)spi_nor_ids[tmp].driver_data;
- if (info->jedec_id == jedec) {
- if (info->ext_id == 0 || info->ext_id == ext_jedec)
+ if (info->id_len) {
+ if (!memcmp(info->id, id, info->id_len))
return &spi_nor_ids[tmp];
}
}
- dev_err(nor->dev, "unrecognized JEDEC id %06x\n", jedec);
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n",
+ id[0], id[1], id[2]);
return ERR_PTR(-ENODEV);
}
@@ -703,11 +726,6 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- /* Wait until finished previous write command. */
- ret = wait_till_ready(nor);
- if (ret)
- goto time_out;
-
write_enable(nor);
nor->sst_write_second = false;
@@ -719,7 +737,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* write one byte. */
nor->write(nor, to, 1, retlen, buf);
- ret = wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
if (ret)
goto time_out;
}
@@ -731,7 +749,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* write two bytes. */
nor->write(nor, to, 2, retlen, buf + actual);
- ret = wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
if (ret)
goto time_out;
to += 2;
@@ -740,7 +758,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
nor->sst_write_second = false;
write_disable(nor);
- ret = wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
if (ret)
goto time_out;
@@ -751,7 +769,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
nor->program_opcode = SPINOR_OP_BP;
nor->write(nor, to, 1, retlen, buf + actual);
- ret = wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
if (ret)
goto time_out;
write_disable(nor);
@@ -779,11 +797,6 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- /* Wait until finished previous write command. */
- ret = wait_till_ready(nor);
- if (ret)
- goto write_err;
-
write_enable(nor);
page_offset = to & (nor->page_size - 1);
@@ -802,16 +815,20 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (page_size > nor->page_size)
page_size = nor->page_size;
- wait_till_ready(nor);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+
write_enable(nor);
nor->write(nor, to + i, page_size, retlen, buf + i);
}
}
+ ret = spi_nor_wait_till_ready(nor);
write_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
- return 0;
+ return ret;
}
static int macronix_quad_enable(struct spi_nor *nor)
@@ -824,7 +841,7 @@ static int macronix_quad_enable(struct spi_nor *nor)
nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
- if (wait_till_ready(nor))
+ if (spi_nor_wait_till_ready(nor))
return 1;
ret = read_sr(nor);
@@ -874,11 +891,11 @@ static int spansion_quad_enable(struct spi_nor *nor)
return 0;
}
-static int set_quad_mode(struct spi_nor *nor, u32 jedec_id)
+static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
{
int status;
- switch (JEDEC_MFR(jedec_id)) {
+ switch (JEDEC_MFR(info)) {
case CFI_MFR_MACRONIX:
status = macronix_quad_enable(nor);
if (status) {
@@ -904,11 +921,6 @@ static int spi_nor_check(struct spi_nor *nor)
return -EINVAL;
}
- if (!nor->read_id)
- nor->read_id = spi_nor_read_id;
- if (!nor->wait_till_ready)
- nor->wait_till_ready = spi_nor_wait_till_ready;
-
return 0;
}
@@ -926,16 +938,24 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (ret)
return ret;
- id = spi_nor_match_id(name);
- if (!id)
+ /* Try to auto-detect if chip name wasn't specified */
+ if (!name)
+ id = spi_nor_read_id(nor);
+ else
+ id = spi_nor_match_id(name);
+ if (IS_ERR_OR_NULL(id))
return -ENOENT;
info = (void *)id->driver_data;
- if (info->jedec_id) {
+ /*
+ * If caller has specified name of flash model that can normally be
+ * detected using JEDEC, let's verify it.
+ */
+ if (name && info->id_len) {
const struct spi_device_id *jid;
- jid = nor->read_id(nor);
+ jid = spi_nor_read_id(nor);
if (IS_ERR(jid)) {
return PTR_ERR(jid);
} else if (jid != id) {
@@ -960,9 +980,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
* up with the software protection bits set
*/
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
- JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
- JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
+ if (JEDEC_MFR(info) == CFI_MFR_ATMEL ||
+ JEDEC_MFR(info) == CFI_MFR_INTEL ||
+ JEDEC_MFR(info) == CFI_MFR_SST) {
write_enable(nor);
write_sr(nor, 0);
}
@@ -977,7 +997,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
mtd->_read = spi_nor_read;
/* nor protection support for STmicro chips */
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
+ if (JEDEC_MFR(info) == CFI_MFR_ST) {
mtd->_lock = spi_nor_lock;
mtd->_unlock = spi_nor_unlock;
}
@@ -988,9 +1008,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
else
mtd->_write = spi_nor_write;
- if ((info->flags & USE_FSR) &&
- nor->wait_till_ready == spi_nor_wait_till_ready)
- nor->wait_till_ready = spi_nor_wait_till_fsr_ready;
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
/* prefer "small sector" erase if possible */
@@ -1031,7 +1050,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
/* Quad/Dual-read mode takes precedence over fast/normal */
if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
- ret = set_quad_mode(nor, info->jedec_id);
+ ret = set_quad_mode(nor, info);
if (ret) {
dev_err(dev, "quad mode not supported\n");
return ret;
@@ -1067,7 +1086,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
else if (mtd->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
+ if (JEDEC_MFR(info) == CFI_MFR_AMD) {
/* Dedicated 4-byte command set */
switch (nor->flash_read) {
case SPI_NOR_QUAD:
@@ -1088,7 +1107,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
nor->erase_opcode = SPINOR_OP_SE_4B;
mtd->erasesize = info->sector_size;
} else
- set_4byte(nor, info->jedec_id, 1);
+ set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
}
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index dc4f9602b97e..5e061186eab1 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -34,8 +34,11 @@
#include "mtd_test.h"
static int dev = -EINVAL;
+static int bitflip_limit;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
+module_param(bitflip_limit, int, S_IRUGO);
+MODULE_PARM_DESC(bitflip_limit, "Max. allowed bitflips per page");
static struct mtd_info *mtd;
static unsigned char *readbuf;
@@ -115,12 +118,36 @@ static int write_whole_device(void)
return 0;
}
+/*
+ * Display the address, offset and data bytes at comparison failure.
+ * Return number of bitflips encountered.
+ */
+static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res;
+ size_t i = 0;
+ size_t bitflips = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
+ res = *su1 ^ *su2;
+ if (res) {
+ pr_info("error @addr[0x%lx:0x%zx] 0x%x -> 0x%x diff 0x%x\n",
+ (unsigned long)addr, i, *su1, *su2, res);
+ bitflips += hweight8(res);
+ }
+ }
+
+ return bitflips;
+}
+
static int verify_eraseblock(int ebnum)
{
int i;
struct mtd_oob_ops ops;
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
+ size_t bitflips;
prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
@@ -139,8 +166,11 @@ static int verify_eraseblock(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf, writebuf + (use_len_max * i) + use_offset,
- use_len)) {
+
+ bitflips = memcmpshow(addr, readbuf,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
+ if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
@@ -148,7 +178,10 @@ static int verify_eraseblock(int ebnum)
pr_err("error: too many errors\n");
return -1;
}
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
}
+
if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
int k;
@@ -167,9 +200,10 @@ static int verify_eraseblock(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf + use_offset,
- writebuf + (use_len_max * i) + use_offset,
- use_len)) {
+ bitflips = memcmpshow(addr, readbuf + use_offset,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
+ if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
@@ -177,7 +211,10 @@ static int verify_eraseblock(int ebnum)
pr_err("error: too many errors\n");
return -1;
}
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
}
+
for (k = 0; k < use_offset; ++k)
if (readbuf[k] != 0xff) {
pr_err("error: verify 0xff "
@@ -216,6 +253,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->ecclayout->oobavail * pgcnt;
+ size_t oobavail = mtd->ecclayout->oobavail;
+ size_t bitflips;
+ int i;
prandom_bytes_state(&rnd_state, writebuf, len);
ops.mode = MTD_OPS_AUTO_OOB;
@@ -226,6 +266,8 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
+
+ /* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
@@ -233,13 +275,21 @@ static int verify_eraseblock_in_one_go(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- if (memcmp(readbuf, writebuf, len)) {
- pr_err("error: verify failed at %#llx\n",
- (long long)addr);
- errcnt += 1;
- if (errcnt > 1000) {
- pr_err("error: too many errors\n");
- return -1;
+
+ /* verify one page OOB at a time for bitflip per page limit check */
+ for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+ bitflips = memcmpshow(addr, readbuf + (i * oobavail),
+ writebuf + (i * oobavail), oobavail);
+ if (bitflips > bitflip_limit) {
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
+ errcnt += 1;
+ if (errcnt > 1000) {
+ pr_err("error: too many errors\n");
+ return -1;
+ }
+ } else if (bitflips) {
+ pr_info("ignoring error as within bitflip_limit\n");
}
}
@@ -610,7 +660,8 @@ static int __init mtd_oobtest_init(void)
err = mtd_read_oob(mtd, addr, &ops);
if (err)
goto out;
- if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
+ if (memcmpshow(addr, readbuf, writebuf,
+ mtd->ecclayout->oobavail * 2)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
index eeab96973cf0..b55bc52a1340 100644
--- a/drivers/mtd/tests/torturetest.c
+++ b/drivers/mtd/tests/torturetest.c
@@ -264,7 +264,9 @@ static int __init tort_init(void)
int i;
void *patt;
- mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+ if (err)
+ goto out;
/* Check if the eraseblocks contain only 0xFF bytes */
if (check) {
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 6f27d9a1be3b..9d2e16f3150a 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -176,6 +176,7 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
/**
* validate_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
* @vid_hdr: the volume identifier header to check
* @av: information about the volume this logical eraseblock belongs to
* @pnum: physical eraseblock number the VID header came from
@@ -188,7 +189,8 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
* information in the VID header is consistent to the information in other VID
* headers of the same volume.
*/
-static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
+static int validate_vid_hdr(const struct ubi_device *ubi,
+ const struct ubi_vid_hdr *vid_hdr,
const struct ubi_ainf_volume *av, int pnum)
{
int vol_type = vid_hdr->vol_type;
@@ -206,7 +208,7 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
*/
if (vol_id != av->vol_id) {
- ubi_err("inconsistent vol_id");
+ ubi_err(ubi, "inconsistent vol_id");
goto bad;
}
@@ -216,17 +218,17 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
av_vol_type = UBI_VID_DYNAMIC;
if (vol_type != av_vol_type) {
- ubi_err("inconsistent vol_type");
+ ubi_err(ubi, "inconsistent vol_type");
goto bad;
}
if (used_ebs != av->used_ebs) {
- ubi_err("inconsistent used_ebs");
+ ubi_err(ubi, "inconsistent used_ebs");
goto bad;
}
if (data_pad != av->data_pad) {
- ubi_err("inconsistent data_pad");
+ ubi_err(ubi, "inconsistent data_pad");
goto bad;
}
}
@@ -234,7 +236,7 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
return 0;
bad:
- ubi_err("inconsistent VID header at PEB %d", pnum);
+ ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
ubi_dump_vid_hdr(vid_hdr);
ubi_dump_av(av);
return -EINVAL;
@@ -336,7 +338,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
* support these images anymore. Well, those images still work,
* but only if no unclean reboots happened.
*/
- ubi_err("unsupported on-flash UBI format");
+ ubi_err(ubi, "unsupported on-flash UBI format");
return -EINVAL;
}
@@ -377,7 +379,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else {
- ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+ ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
pnum, err);
if (err > 0)
err = -EIO;
@@ -507,7 +509,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
* logical eraseblocks because there was an unclean reboot.
*/
if (aeb->sqnum == sqnum && sqnum != 0) {
- ubi_err("two LEBs with same sequence number %llu",
+ ubi_err(ubi, "two LEBs with same sequence number %llu",
sqnum);
ubi_dump_aeb(aeb, 0);
ubi_dump_vid_hdr(vid_hdr);
@@ -527,7 +529,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
* This logical eraseblock is newer than the one
* found earlier.
*/
- err = validate_vid_hdr(vid_hdr, av, pnum);
+ err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
if (err)
return err;
@@ -565,7 +567,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
* attaching information.
*/
- err = validate_vid_hdr(vid_hdr, av, pnum);
+ err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
if (err)
return err;
@@ -668,7 +670,8 @@ static int early_erase_peb(struct ubi_device *ubi,
* Erase counter overflow. Upgrade UBI and use 64-bit
* erase counters internally.
*/
- ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
+ ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
+ pnum, ec);
return -EINVAL;
}
@@ -736,7 +739,7 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
return aeb;
}
- ubi_err("no free eraseblocks");
+ ubi_err(ubi, "no free eraseblocks");
return ERR_PTR(-ENOSPC);
}
@@ -785,9 +788,9 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
goto out_unlock;
- ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
pnum);
- ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+ ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
ubi_dump_vid_hdr(vid_hdr);
pr_err("hexdump of PEB %d offset %d, length %d",
pnum, ubi->leb_start, ubi->leb_size);
@@ -859,7 +862,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
bitflips = 1;
break;
default:
- ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
+ ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
+ err);
return -EINVAL;
}
@@ -868,7 +872,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* Make sure UBI version is OK */
if (ech->version != UBI_VERSION) {
- ubi_err("this UBI version is %d, image version is %d",
+ ubi_err(ubi, "this UBI version is %d, image version is %d",
UBI_VERSION, (int)ech->version);
return -EINVAL;
}
@@ -882,7 +886,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
* flash. Upgrade UBI and use 64-bit erase counters
* internally.
*/
- ubi_err("erase counter overflow, max is %d",
+ ubi_err(ubi, "erase counter overflow, max is %d",
UBI_MAX_ERASECOUNTER);
ubi_dump_ec_hdr(ech);
return -EINVAL;
@@ -903,7 +907,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (!ubi->image_seq)
ubi->image_seq = image_seq;
if (image_seq && ubi->image_seq != image_seq) {
- ubi_err("bad image sequence number %d in PEB %d, expected %d",
+ ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
image_seq, pnum, ubi->image_seq);
ubi_dump_ec_hdr(ech);
return -EINVAL;
@@ -981,7 +985,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
return err;
goto adjust_mean_ec;
default:
- ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
+ ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
err);
return -EINVAL;
}
@@ -999,7 +1003,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
case UBI_COMPAT_DELETE:
if (vol_id != UBI_FM_SB_VOLUME_ID
&& vol_id != UBI_FM_DATA_VOLUME_ID) {
- ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
vol_id, lnum);
}
err = add_to_list(ai, pnum, vol_id, lnum,
@@ -1009,13 +1013,13 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
return 0;
case UBI_COMPAT_RO:
- ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
+ ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
vol_id, lnum);
ubi->ro_mode = 1;
break;
case UBI_COMPAT_PRESERVE:
- ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+ ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
vol_id, lnum);
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 0, &ai->alien);
@@ -1024,14 +1028,14 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
return 0;
case UBI_COMPAT_REJECT:
- ubi_err("incompatible internal volume %d:%d found",
+ ubi_err(ubi, "incompatible internal volume %d:%d found",
vol_id, lnum);
return -EINVAL;
}
}
if (ec_err)
- ubi_warn("valid VID header but corrupted EC header at PEB %d",
+ ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
pnum);
err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
if (err)
@@ -1075,7 +1079,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
* with the flash HW or driver.
*/
if (ai->corr_peb_count) {
- ubi_err("%d PEBs are corrupted and preserved",
+ ubi_err(ubi, "%d PEBs are corrupted and preserved",
ai->corr_peb_count);
pr_err("Corrupted PEBs are:");
list_for_each_entry(aeb, &ai->corr, u.list)
@@ -1087,7 +1091,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
* otherwise, only print a warning.
*/
if (ai->corr_peb_count >= max_corr) {
- ubi_err("too many corrupted PEBs, refusing");
+ ubi_err(ubi, "too many corrupted PEBs, refusing");
return -EINVAL;
}
}
@@ -1110,11 +1114,11 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
*/
if (ai->maybe_bad_peb_count <= 2) {
ai->is_empty = 1;
- ubi_msg("empty MTD device detected");
+ ubi_msg(ubi, "empty MTD device detected");
get_random_bytes(&ubi->image_seq,
sizeof(ubi->image_seq));
} else {
- ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
+ ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
return -EINVAL;
}
@@ -1248,7 +1252,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto out_vidh;
}
- ubi_msg("scanning is finished");
+ ubi_msg(ubi, "scanning is finished");
/* Calculate mean erase counter */
if (ai->ec_count)
@@ -1515,37 +1519,37 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
vols_found += 1;
if (ai->is_empty) {
- ubi_err("bad is_empty flag");
+ ubi_err(ubi, "bad is_empty flag");
goto bad_av;
}
if (av->vol_id < 0 || av->highest_lnum < 0 ||
av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
av->data_pad < 0 || av->last_data_size < 0) {
- ubi_err("negative values");
+ ubi_err(ubi, "negative values");
goto bad_av;
}
if (av->vol_id >= UBI_MAX_VOLUMES &&
av->vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("bad vol_id");
+ ubi_err(ubi, "bad vol_id");
goto bad_av;
}
if (av->vol_id > ai->highest_vol_id) {
- ubi_err("highest_vol_id is %d, but vol_id %d is there",
+ ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
ai->highest_vol_id, av->vol_id);
goto out;
}
if (av->vol_type != UBI_DYNAMIC_VOLUME &&
av->vol_type != UBI_STATIC_VOLUME) {
- ubi_err("bad vol_type");
+ ubi_err(ubi, "bad vol_type");
goto bad_av;
}
if (av->data_pad > ubi->leb_size / 2) {
- ubi_err("bad data_pad");
+ ubi_err(ubi, "bad data_pad");
goto bad_av;
}
@@ -1557,48 +1561,48 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
leb_count += 1;
if (aeb->pnum < 0 || aeb->ec < 0) {
- ubi_err("negative values");
+ ubi_err(ubi, "negative values");
goto bad_aeb;
}
if (aeb->ec < ai->min_ec) {
- ubi_err("bad ai->min_ec (%d), %d found",
+ ubi_err(ubi, "bad ai->min_ec (%d), %d found",
ai->min_ec, aeb->ec);
goto bad_aeb;
}
if (aeb->ec > ai->max_ec) {
- ubi_err("bad ai->max_ec (%d), %d found",
+ ubi_err(ubi, "bad ai->max_ec (%d), %d found",
ai->max_ec, aeb->ec);
goto bad_aeb;
}
if (aeb->pnum >= ubi->peb_count) {
- ubi_err("too high PEB number %d, total PEBs %d",
+ ubi_err(ubi, "too high PEB number %d, total PEBs %d",
aeb->pnum, ubi->peb_count);
goto bad_aeb;
}
if (av->vol_type == UBI_STATIC_VOLUME) {
if (aeb->lnum >= av->used_ebs) {
- ubi_err("bad lnum or used_ebs");
+ ubi_err(ubi, "bad lnum or used_ebs");
goto bad_aeb;
}
} else {
if (av->used_ebs != 0) {
- ubi_err("non-zero used_ebs");
+ ubi_err(ubi, "non-zero used_ebs");
goto bad_aeb;
}
}
if (aeb->lnum > av->highest_lnum) {
- ubi_err("incorrect highest_lnum or lnum");
+ ubi_err(ubi, "incorrect highest_lnum or lnum");
goto bad_aeb;
}
}
if (av->leb_count != leb_count) {
- ubi_err("bad leb_count, %d objects in the tree",
+ ubi_err(ubi, "bad leb_count, %d objects in the tree",
leb_count);
goto bad_av;
}
@@ -1609,13 +1613,13 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
aeb = last_aeb;
if (aeb->lnum != av->highest_lnum) {
- ubi_err("bad highest_lnum");
+ ubi_err(ubi, "bad highest_lnum");
goto bad_aeb;
}
}
if (vols_found != ai->vols_found) {
- ubi_err("bad ai->vols_found %d, should be %d",
+ ubi_err(ubi, "bad ai->vols_found %d, should be %d",
ai->vols_found, vols_found);
goto out;
}
@@ -1632,7 +1636,8 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
if (err && err != UBI_IO_BITFLIPS) {
- ubi_err("VID header is not OK (%d)", err);
+ ubi_err(ubi, "VID header is not OK (%d)",
+ err);
if (err > 0)
err = -EIO;
return err;
@@ -1641,37 +1646,37 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
if (av->vol_type != vol_type) {
- ubi_err("bad vol_type");
+ ubi_err(ubi, "bad vol_type");
goto bad_vid_hdr;
}
if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
- ubi_err("bad sqnum %llu", aeb->sqnum);
+ ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
goto bad_vid_hdr;
}
if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
- ubi_err("bad vol_id %d", av->vol_id);
+ ubi_err(ubi, "bad vol_id %d", av->vol_id);
goto bad_vid_hdr;
}
if (av->compat != vidh->compat) {
- ubi_err("bad compat %d", vidh->compat);
+ ubi_err(ubi, "bad compat %d", vidh->compat);
goto bad_vid_hdr;
}
if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad lnum %d", aeb->lnum);
+ ubi_err(ubi, "bad lnum %d", aeb->lnum);
goto bad_vid_hdr;
}
if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
- ubi_err("bad used_ebs %d", av->used_ebs);
+ ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
goto bad_vid_hdr;
}
if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
- ubi_err("bad data_pad %d", av->data_pad);
+ ubi_err(ubi, "bad data_pad %d", av->data_pad);
goto bad_vid_hdr;
}
}
@@ -1680,12 +1685,13 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
continue;
if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad highest_lnum %d", av->highest_lnum);
+ ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
goto bad_vid_hdr;
}
if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
- ubi_err("bad last_data_size %d", av->last_data_size);
+ ubi_err(ubi, "bad last_data_size %d",
+ av->last_data_size);
goto bad_vid_hdr;
}
}
@@ -1726,7 +1732,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
err = 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++)
if (!buf[pnum]) {
- ubi_err("PEB %d is not referred", pnum);
+ ubi_err(ubi, "PEB %d is not referred", pnum);
err = 1;
}
@@ -1736,18 +1742,18 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
return 0;
bad_aeb:
- ubi_err("bad attaching information about LEB %d", aeb->lnum);
+ ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
ubi_dump_aeb(aeb, 0);
ubi_dump_av(av);
goto out;
bad_av:
- ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
ubi_dump_av(av);
goto out;
bad_vid_hdr:
- ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
ubi_dump_av(av);
ubi_dump_vid_hdr(vidh);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 8876c7d3d712..6b6bce28bd63 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -111,13 +111,13 @@ static int __init ubiblock_set_param(const char *val,
len = strnlen(val, UBIBLOCK_PARAM_LEN);
if (len == 0) {
- ubi_warn("block: empty 'block=' parameter - ignored\n");
+ pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
return 0;
}
if (len == UBIBLOCK_PARAM_LEN) {
- ubi_err("block: parameter \"%s\" is too long, max. is %d\n",
- val, UBIBLOCK_PARAM_LEN);
+ pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
+ val, UBIBLOCK_PARAM_LEN);
return -EINVAL;
}
@@ -188,9 +188,8 @@ static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
ret = ubi_read(dev->desc, leb, buffer, offset, len);
if (ret) {
- ubi_err("%s: error %d while reading from LEB %d (offset %d, "
- "length %d)", dev->gd->disk_name, ret, leb, offset,
- len);
+ dev_err(disk_to_dev(dev->gd), "%d while reading from LEB %d (offset %d, length %d)",
+ ret, leb, offset, len);
return ret;
}
return 0;
@@ -328,8 +327,8 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
if (IS_ERR(dev->desc)) {
- ubi_err("%s failed to open ubi volume %d_%d",
- dev->gd->disk_name, dev->ubi_num, dev->vol_id);
+ dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
+ dev->ubi_num, dev->vol_id);
ret = PTR_ERR(dev->desc);
dev->desc = NULL;
goto out_unlock;
@@ -405,7 +404,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Initialize the gendisk of this ubiblock device */
gd = alloc_disk(1);
if (!gd) {
- ubi_err("block: alloc_disk failed");
+ pr_err("UBI: block: alloc_disk failed");
ret = -ENODEV;
goto out_free_dev;
}
@@ -421,7 +420,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
spin_lock_init(&dev->queue_lock);
dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock);
if (!dev->rq) {
- ubi_err("block: blk_init_queue failed");
+ dev_err(disk_to_dev(gd), "blk_init_queue failed");
ret = -ENODEV;
goto out_put_disk;
}
@@ -446,8 +445,8 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
- ubi_msg("%s created from ubi%d:%d(%s)",
- dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name);
+ dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
+ dev->ubi_num, dev->vol_id, vi->name);
return 0;
out_free_queue:
@@ -464,7 +463,7 @@ static void ubiblock_cleanup(struct ubiblock *dev)
{
del_gendisk(dev->gd);
blk_cleanup_queue(dev->rq);
- ubi_msg("%s released", dev->gd->disk_name);
+ dev_info(disk_to_dev(dev->gd), "released");
put_disk(dev->gd);
}
@@ -518,8 +517,8 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
}
if ((sector_t)disk_capacity != disk_capacity) {
mutex_unlock(&devices_mutex);
- ubi_warn("%s: the volume is too big (%d LEBs), cannot resize",
- dev->gd->disk_name, vi->size);
+ dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
+ vi->size);
return -EFBIG;
}
@@ -527,8 +526,8 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
if (get_capacity(dev->gd) != disk_capacity) {
set_capacity(dev->gd, disk_capacity);
- ubi_msg("%s resized to %lld bytes", dev->gd->disk_name,
- vi->used_bytes);
+ dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
+ vi->used_bytes);
}
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
@@ -596,8 +595,8 @@ static int __init ubiblock_create_from_param(void)
desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
if (IS_ERR(desc)) {
- ubi_err("block: can't open volume, err=%ld\n",
- PTR_ERR(desc));
+ pr_err("UBI: block: can't open volume, err=%ld\n",
+ PTR_ERR(desc));
ret = PTR_ERR(desc);
break;
}
@@ -607,8 +606,8 @@ static int __init ubiblock_create_from_param(void)
ret = ubiblock_create(&vi);
if (ret) {
- ubi_err("block: can't add '%s' volume, err=%d\n",
- vi.name, ret);
+ pr_err("UBI: block: can't add '%s' volume, err=%d\n",
+ vi.name, ret);
break;
}
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 6e30a3c280d0..3405be46ebe9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -166,7 +166,7 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
case UBI_VOLUME_RESIZED:
case UBI_VOLUME_RENAMED:
if (ubi_update_fastmap(ubi)) {
- ubi_err("Unable to update fastmap!");
+ ubi_err(ubi, "Unable to update fastmap!");
ubi_ro_mode(ubi);
}
}
@@ -517,7 +517,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
*/
err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
if (err) {
- ubi_err("cannot register UBI character devices");
+ ubi_err(ubi, "cannot register UBI character devices");
return err;
}
@@ -528,7 +528,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
err = cdev_add(&ubi->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device");
+ ubi_err(ubi, "cannot add character device");
goto out_unreg;
}
@@ -540,7 +540,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
if (ubi->volumes[i]) {
err = ubi_add_volume(ubi, ubi->volumes[i]);
if (err) {
- ubi_err("cannot add volume %d", i);
+ ubi_err(ubi, "cannot add volume %d", i);
goto out_volumes;
}
}
@@ -556,7 +556,8 @@ out_sysfs:
cdev_del(&ubi->cdev);
out_unreg:
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
- ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
+ ubi_err(ubi, "cannot initialize UBI %s, error %d",
+ ubi->ubi_name, err);
return err;
}
@@ -650,7 +651,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
* guess we should just pick the largest region. But this is
* not implemented.
*/
- ubi_err("multiple regions, not implemented");
+ ubi_err(ubi, "multiple regions, not implemented");
return -EINVAL;
}
@@ -685,7 +686,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
* which allows us to avoid costly division operations.
*/
if (!is_power_of_2(ubi->min_io_size)) {
- ubi_err("min. I/O unit (%d) is not power of 2",
+ ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
ubi->min_io_size);
return -EINVAL;
}
@@ -702,7 +703,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
if (ubi->max_write_size < ubi->min_io_size ||
ubi->max_write_size % ubi->min_io_size ||
!is_power_of_2(ubi->max_write_size)) {
- ubi_err("bad write buffer size %d for %d min. I/O unit",
+ ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
ubi->max_write_size, ubi->min_io_size);
return -EINVAL;
}
@@ -739,7 +740,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
/* The shift must be aligned to 32-bit boundary */
if (ubi->vid_hdr_shift % 4) {
- ubi_err("unaligned VID header shift %d",
+ ubi_err(ubi, "unaligned VID header shift %d",
ubi->vid_hdr_shift);
return -EINVAL;
}
@@ -749,7 +750,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
ubi->leb_start & (ubi->min_io_size - 1)) {
- ubi_err("bad VID header (%d) or data offsets (%d)",
+ ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
ubi->vid_hdr_offset, ubi->leb_start);
return -EINVAL;
}
@@ -769,14 +770,14 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
* read-only mode.
*/
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
- ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
+ ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
ubi->ro_mode = 1;
}
ubi->leb_size = ubi->peb_size - ubi->leb_start;
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
- ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+ ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
ubi->mtd->index);
ubi->ro_mode = 1;
}
@@ -809,7 +810,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
int err, old_reserved_pebs = vol->reserved_pebs;
if (ubi->ro_mode) {
- ubi_warn("skip auto-resize because of R/O mode");
+ ubi_warn(ubi, "skip auto-resize because of R/O mode");
return 0;
}
@@ -830,21 +831,22 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
- ubi_err("cannot clean auto-resize flag for volume %d",
+ ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
vol_id);
} else {
desc.vol = vol;
err = ubi_resize_volume(&desc,
old_reserved_pebs + ubi->avail_pebs);
if (err)
- ubi_err("cannot auto-resize volume %d", vol_id);
+ ubi_err(ubi, "cannot auto-resize volume %d",
+ vol_id);
}
if (err)
return err;
- ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
- vol->name, old_reserved_pebs, vol->reserved_pebs);
+ ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
+ vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
return 0;
}
@@ -885,7 +887,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
- ubi_err("mtd%d is already attached to ubi%d",
+ ubi_err(ubi, "mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -900,7 +902,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+ ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
mtd->index);
return -EINVAL;
}
@@ -911,7 +913,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- ubi_err("only %d UBI devices may be created",
+ ubi_err(ubi, "only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
@@ -921,7 +923,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- ubi_err("ubi%d already exists", ubi_num);
+ ubi_err(ubi, "ubi%d already exists", ubi_num);
return -EEXIST;
}
}
@@ -953,13 +955,14 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
<= UBI_FM_MAX_START) {
- ubi_err("More than %i PEBs are needed for fastmap, sorry.",
+ ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
UBI_FM_MAX_START);
ubi->fm_disabled = 1;
}
- ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
- ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+ ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg(ubi, "default fastmap WL pool size: %d",
+ ubi->fm_wl_pool.max_size);
#else
ubi->fm_disabled = 1;
#endif
@@ -970,7 +973,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
mutex_init(&ubi->fm_mutex);
init_rwsem(&ubi->fm_sem);
- ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
+ ubi_msg(ubi, "attaching mtd%d to ubi%d", mtd->index, ubi_num);
err = io_init(ubi, max_beb_per1024);
if (err)
@@ -989,7 +992,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
#endif
err = ubi_attach(ubi, 0);
if (err) {
- ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
+ ubi_err(ubi, "failed to attach mtd%d, error %d",
+ mtd->index, err);
goto out_free;
}
@@ -1010,28 +1014,28 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
- ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
- err);
+ ubi_err(ubi, "cannot spawn \"%s\", error %d",
+ ubi->bgt_name, err);
goto out_debugfs;
}
- ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
- mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
- ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
+ mtd->index, mtd->name, ubi->flash_size >> 20);
+ ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
- ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
- ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+ ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
- ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
- ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
ubi->vtbl_slots);
- ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
ubi->image_seq);
- ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
/*
@@ -1100,7 +1104,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
return -EBUSY;
}
/* This may only happen if there is a bug */
- ubi_err("%s reference count %d, destroy anyway",
+ ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
ubi_devices[ubi_num] = NULL;
@@ -1108,7 +1112,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
- ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+ ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
#ifdef CONFIG_MTD_UBI_FASTMAP
/* If we don't write a new fastmap at detach time we lose all
* EC updates that have been made since the last written fastmap. */
@@ -1136,7 +1140,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
- ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
+ ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
put_device(&ubi->dev);
return 0;
}
@@ -1218,7 +1222,8 @@ static int __init ubi_init(void)
BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
if (mtd_devs > UBI_MAX_DEVICES) {
- ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
+ pr_err("UBI error: too many MTD devices, maximum is %d",
+ UBI_MAX_DEVICES);
return -EINVAL;
}
@@ -1226,19 +1231,19 @@ static int __init ubi_init(void)
ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
if (IS_ERR(ubi_class)) {
err = PTR_ERR(ubi_class);
- ubi_err("cannot create UBI class");
+ pr_err("UBI error: cannot create UBI class");
goto out;
}
err = class_create_file(ubi_class, &ubi_version);
if (err) {
- ubi_err("cannot create sysfs file");
+ pr_err("UBI error: cannot create sysfs file");
goto out_class;
}
err = misc_register(&ubi_ctrl_cdev);
if (err) {
- ubi_err("cannot register device");
+ pr_err("UBI error: cannot register device");
goto out_version;
}
@@ -1265,7 +1270,8 @@ static int __init ubi_init(void)
mtd = open_mtd_device(p->name);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- ubi_err("cannot open mtd %s, error %d", p->name, err);
+ pr_err("UBI error: cannot open mtd %s, error %d",
+ p->name, err);
/* See comment below re-ubi_is_module(). */
if (ubi_is_module())
goto out_detach;
@@ -1277,7 +1283,8 @@ static int __init ubi_init(void)
p->vid_hdr_offs, p->max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
- ubi_err("cannot attach mtd%d", mtd->index);
+ pr_err("UBI error: cannot attach mtd%d",
+ mtd->index);
put_mtd_device(mtd);
/*
@@ -1300,7 +1307,7 @@ static int __init ubi_init(void)
err = ubiblock_init();
if (err) {
- ubi_err("block: cannot initialize, error %d", err);
+ pr_err("UBI error: block: cannot initialize, error %d", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
@@ -1326,7 +1333,7 @@ out_version:
out_class:
class_destroy(ubi_class);
out:
- ubi_err("cannot initialize UBI, error %d", err);
+ pr_err("UBI error: cannot initialize UBI, error %d", err);
return err;
}
late_initcall(ubi_init);
@@ -1365,7 +1372,7 @@ static int __init bytes_str_to_int(const char *str)
result = simple_strtoul(str, &endp, 0);
if (str == endp || result >= INT_MAX) {
- ubi_err("incorrect bytes count: \"%s\"\n", str);
+ pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1381,7 +1388,7 @@ static int __init bytes_str_to_int(const char *str)
case '\0':
break;
default:
- ubi_err("incorrect bytes count: \"%s\"\n", str);
+ pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1408,20 +1415,20 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
return -EINVAL;
if (mtd_devs == UBI_MAX_DEVICES) {
- ubi_err("too many parameters, max. is %d\n",
- UBI_MAX_DEVICES);
+ pr_err("UBI error: too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
- ubi_err("parameter \"%s\" is too long, max. is %d\n",
- val, MTD_PARAM_LEN_MAX);
+ pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+ val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
- pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
+ pr_err("UBI warning: empty 'mtd=' parameter - ignored\n");
return 0;
}
@@ -1435,7 +1442,7 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
- ubi_err("too many arguments at \"%s\"\n", val);
+ pr_err("UBI error: too many arguments at \"%s\"\n", val);
return -EINVAL;
}
@@ -1455,8 +1462,8 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
int err = kstrtoint(token, 10, &p->max_beb_per1024);
if (err) {
- ubi_err("bad value for max_beb_per1024 parameter: %s",
- token);
+ pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+ token);
return -EINVAL;
}
}
@@ -1466,7 +1473,8 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
int err = kstrtoint(token, 10, &p->ubi_num);
if (err) {
- ubi_err("bad value for ubi_num parameter: %s", token);
+ pr_err("UBI error: bad value for ubi_num parameter: %s",
+ token);
return -EINVAL;
}
} else
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 59de69a24e40..3410ea8109f8 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -48,13 +48,14 @@
/**
* get_exclusive - get exclusive access to an UBI volume.
+ * @ubi: UBI device description object
* @desc: volume descriptor
*
* This function changes UBI volume open mode to "exclusive". Returns previous
* mode value (positive integer) in case of success and a negative error code
* in case of failure.
*/
-static int get_exclusive(struct ubi_volume_desc *desc)
+static int get_exclusive(struct ubi_device *ubi, struct ubi_volume_desc *desc)
{
int users, err;
struct ubi_volume *vol = desc->vol;
@@ -63,7 +64,7 @@ static int get_exclusive(struct ubi_volume_desc *desc)
users = vol->readers + vol->writers + vol->exclusive;
ubi_assert(users > 0);
if (users > 1) {
- ubi_err("%d users for volume %d", users, vol->vol_id);
+ ubi_err(ubi, "%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = 0;
@@ -134,7 +135,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
vol->ubi->ubi_num, vol->vol_id, desc->mode);
if (vol->updating) {
- ubi_warn("update of volume %d not finished, volume is damaged",
+ ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged",
vol->vol_id);
ubi_assert(!vol->changing_leb);
vol->updating = 0;
@@ -158,7 +159,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
- ubi_err("updating");
+ ubi_err(vol->ubi, "updating");
return -EBUSY;
}
@@ -193,11 +194,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
count, *offp, vol->vol_id);
if (vol->updating) {
- ubi_err("updating");
+ ubi_err(vol->ubi, "updating");
return -EBUSY;
}
if (vol->upd_marker) {
- ubi_err("damaged volume, update marker is set");
+ ubi_err(vol->ubi, "damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
@@ -277,7 +278,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
- ubi_err("unaligned position");
+ ubi_err(ubi, "unaligned position");
return -EINVAL;
}
@@ -286,7 +287,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
- ubi_err("unaligned write length");
+ ubi_err(ubi, "unaligned write length");
return -EINVAL;
}
@@ -348,7 +349,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
err = ubi_more_leb_change_data(ubi, vol, buf, count);
if (err < 0) {
- ubi_err("cannot accept more %zd bytes of data, error %d",
+ ubi_err(ubi, "cannot accept more %zd bytes of data, error %d",
count, err);
return err;
}
@@ -370,7 +371,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
return err;
if (err) {
- ubi_warn("volume %d on UBI device %d is corrupted",
+ ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
vol->vol_id, ubi->ubi_num);
vol->corrupted = 1;
}
@@ -420,7 +421,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- err = get_exclusive(desc);
+ err = get_exclusive(ubi, desc);
if (err < 0)
break;
@@ -456,7 +457,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
- err = get_exclusive(desc);
+ err = get_exclusive(ubi, desc);
if (err < 0)
break;
@@ -642,7 +643,7 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
return 0;
bad:
- ubi_err("bad volume creation request");
+ ubi_err(ubi, "bad volume creation request");
ubi_dump_mkvol_req(req);
return err;
}
@@ -708,12 +709,12 @@ static int rename_volumes(struct ubi_device *ubi,
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
- ubi_err("duplicated volume id %d",
+ ubi_err(ubi, "duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
- ubi_err("duplicated volume name \"%s\"",
+ ubi_err(ubi, "duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
@@ -736,7 +737,8 @@ static int rename_volumes(struct ubi_device *ubi,
re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
- ubi_err("cannot open volume %d, error %d", vol_id, err);
+ ubi_err(ubi, "cannot open volume %d, error %d",
+ vol_id, err);
kfree(re);
goto out_free;
}
@@ -795,7 +797,7 @@ static int rename_volumes(struct ubi_device *ubi,
continue;
/* The volume exists but busy, or an error occurred */
- ubi_err("cannot open volume \"%s\", error %d",
+ ubi_err(ubi, "cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 63cb1d7236ce..7335c9ff9d99 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -43,12 +43,12 @@ void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
return;
err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
err, len, pnum, offset, read);
goto out;
}
- ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+ ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
len, pnum, offset);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
out:
@@ -238,8 +238,8 @@ int ubi_debugfs_init(void)
if (IS_ERR_OR_NULL(dfs_rootdir)) {
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
- ubi_err("cannot create \"ubi\" debugfs directory, error %d\n",
- err);
+ pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
+ err);
return err;
}
@@ -433,7 +433,7 @@ out_remove:
debugfs_remove_recursive(d->dfs_dir);
out:
err = dent ? PTR_ERR(dent) : -ENODEV;
- ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n",
+ ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
fname, err);
return err;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 2402d3b50171..a40020cf0923 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -422,7 +422,7 @@ retry:
*/
if (err == UBI_IO_BAD_HDR_EBADMSG ||
err == UBI_IO_BAD_HDR) {
- ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+ ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
pnum, vol_id, lnum);
err = -EBADMSG;
} else
@@ -448,7 +448,7 @@ retry:
goto out_unlock;
scrub = 1;
if (!check) {
- ubi_msg("force data checking");
+ ubi_msg(ubi, "force data checking");
check = 1;
goto retry;
}
@@ -459,7 +459,7 @@ retry:
if (check) {
uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
if (crc1 != crc) {
- ubi_warn("CRC error: calculated %#08x, must be %#08x",
+ ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
crc1, crc);
err = -EBADMSG;
goto out_unlock;
@@ -513,7 +513,8 @@ retry:
return new_pnum;
}
- ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
+ ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
+ pnum, new_pnum);
err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
if (err && err != UBI_IO_BITFLIPS) {
@@ -554,7 +555,7 @@ retry:
up_read(&ubi->fm_sem);
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- ubi_msg("data was successfully recovered");
+ ubi_msg(ubi, "data was successfully recovered");
return 0;
out_unlock:
@@ -569,13 +570,13 @@ write_error:
* Bad luck? This physical eraseblock is bad too? Crud. Let's try to
* get another one.
*/
- ubi_warn("failed to write to PEB %d", new_pnum);
+ ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
- ubi_msg("try again");
+ ubi_msg(ubi, "try again");
goto retry;
}
@@ -613,7 +614,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
- ubi_warn("failed to write data to PEB %d", pnum);
+ ubi_warn(ubi, "failed to write data to PEB %d", pnum);
if (err == -EIO && ubi->bad_allowed)
err = recover_peb(ubi, pnum, vol_id, lnum, buf,
offset, len);
@@ -654,7 +655,7 @@ retry:
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
- ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
goto write_error;
}
@@ -662,7 +663,7 @@ retry:
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
- ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
goto write_error;
}
@@ -698,7 +699,7 @@ write_error:
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- ubi_msg("try another PEB");
+ ubi_msg(ubi, "try another PEB");
goto retry;
}
@@ -775,14 +776,14 @@ retry:
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
- ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
goto write_error;
}
err = ubi_io_write_data(ubi, buf, pnum, 0, len);
if (err) {
- ubi_warn("failed to write %d bytes of data to PEB %d",
+ ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
len, pnum);
goto write_error;
}
@@ -818,7 +819,7 @@ write_error:
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- ubi_msg("try another PEB");
+ ubi_msg(ubi, "try another PEB");
goto retry;
}
@@ -893,14 +894,14 @@ retry:
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
- ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
goto write_error;
}
err = ubi_io_write_data(ubi, buf, pnum, 0, len);
if (err) {
- ubi_warn("failed to write %d bytes of data to PEB %d",
+ ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
len, pnum);
goto write_error;
}
@@ -940,7 +941,7 @@ write_error:
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- ubi_msg("try another PEB");
+ ubi_msg(ubi, "try another PEB");
goto retry;
}
@@ -1063,7 +1064,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
dbg_wl("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading data from PEB %d",
+ ubi_warn(ubi, "error %d while reading data from PEB %d",
err, from);
err = MOVE_SOURCE_RD_ERR;
goto out_unlock_buf;
@@ -1113,7 +1114,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading VID header back from PEB %d",
+ ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
@@ -1140,7 +1141,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading data back from PEB %d",
+ ubi_warn(ubi, "error %d while reading data back from PEB %d",
err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
@@ -1152,7 +1153,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
- ubi_warn("read data back from PEB %d and it is different",
+ ubi_warn(ubi, "read data back from PEB %d and it is different",
to);
err = -EINVAL;
goto out_unlock_buf;
@@ -1205,10 +1206,10 @@ static void print_rsvd_warning(struct ubi_device *ubi,
return;
}
- ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
if (ubi->corr_peb_count)
- ubi_warn("%d PEBs are corrupted and not used",
+ ubi_warn(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
}
@@ -1286,7 +1287,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
fm_eba[i][j] == UBI_LEB_UNMAPPED)
continue;
- ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
+ ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
vol->vol_id, i, fm_eba[i][j],
scan_eba[i][j]);
ubi_assert(0);
@@ -1366,10 +1367,10 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
}
if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
- ubi_err("no enough physical eraseblocks (%d, need %d)",
+ ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, EBA_RESERVED_PEBS);
if (ubi->corr_peb_count)
- ubi_err("%d PEBs are corrupted and not used",
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_free;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index cfd5b5e90156..b56672bf3294 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -330,7 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (found)
av = tmp_av;
else {
- ubi_err("orphaned volume in fastmap pool!");
+ ubi_err(ubi, "orphaned volume in fastmap pool!");
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
return UBI_BAD_FASTMAP;
}
@@ -414,14 +414,14 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
pnum = be32_to_cpu(pebs[i]);
if (ubi_io_is_bad(ubi, pnum)) {
- ubi_err("bad PEB in fastmap pool!");
+ ubi_err(ubi, "bad PEB in fastmap pool!");
ret = UBI_BAD_FASTMAP;
goto out;
}
err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (err && err != UBI_IO_BITFLIPS) {
- ubi_err("unable to read EC header! PEB:%i err:%i",
+ ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
@@ -435,7 +435,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
image_seq = be32_to_cpu(ech->image_seq);
if (image_seq && (image_seq != ubi->image_seq)) {
- ubi_err("bad image seq: 0x%x, expected: 0x%x",
+ ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto out;
@@ -493,7 +493,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
} else {
/* We are paranoid and fall back to scanning mode */
- ubi_err("fastmap pool PEBs contains damaged PEBs!");
+ ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
}
@@ -588,7 +588,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
goto fail_bad;
if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
- ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+ ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
goto fail_bad;
}
@@ -598,7 +598,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
- ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
@@ -608,7 +608,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
- ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
@@ -619,25 +619,26 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
- ubi_err("bad pool size: %i", pool_size);
+ ubi_err(ubi, "bad pool size: %i", pool_size);
goto fail_bad;
}
if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
- ubi_err("bad WL pool size: %i", wl_pool_size);
+ ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
goto fail_bad;
}
if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_pool_size < 0) {
- ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+ ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
goto fail_bad;
}
if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_wl_pool_size < 0) {
- ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+ ubi_err(ubi, "bad maximal WL pool size: %i",
+ fm->max_wl_pool_size);
goto fail_bad;
}
@@ -696,8 +697,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
goto fail_bad;
if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
- ubi_err("bad fastmap vol header magic: 0x%x, " \
- "expected: 0x%x",
+ ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
goto fail_bad;
}
@@ -722,8 +722,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
goto fail_bad;
if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
- ubi_err("bad fastmap EBA header magic: 0x%x, " \
- "expected: 0x%x",
+ ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
goto fail_bad;
}
@@ -788,7 +787,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
int err;
if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
- ubi_err("bad PEB in fastmap EBA orphan list");
+ ubi_err(ubi, "bad PEB in fastmap EBA orphan list");
ret = UBI_BAD_FASTMAP;
kfree(ech);
goto fail;
@@ -796,8 +795,8 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
if (err && err != UBI_IO_BITFLIPS) {
- ubi_err("unable to read EC header! PEB:%i " \
- "err:%i", tmp_aeb->pnum, err);
+ ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
+ tmp_aeb->pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err;
kfree(ech);
@@ -908,14 +907,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
fm->to_be_tortured[0] = 1;
if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
- ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+ ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
if (fmsb->version != UBI_FM_FMT_VERSION) {
- ubi_err("bad fastmap version: %i, expected: %i",
+ ubi_err(ubi, "bad fastmap version: %i, expected: %i",
fmsb->version, UBI_FM_FMT_VERSION);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
@@ -923,15 +922,16 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
used_blocks = be32_to_cpu(fmsb->used_blocks);
if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
- ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+ ubi_err(ubi, "number of fastmap blocks is invalid: %i",
+ used_blocks);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
fm_size = ubi->leb_size * used_blocks;
if (fm_size != ubi->fm_size) {
- ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
- ubi->fm_size);
+ ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
+ fm_size, ubi->fm_size);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
@@ -960,7 +960,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
- ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+ ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
i, pnum);
if (ret > 0)
ret = UBI_BAD_FASTMAP;
@@ -977,7 +977,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
* we shouldn't fail if image_seq == 0.
*/
if (image_seq && (image_seq != ubi->image_seq)) {
- ubi_err("wrong image seq:%d instead of %d",
+ ubi_err(ubi, "wrong image seq:%d instead of %d",
be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
@@ -985,15 +985,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
- ubi_err("unable to read fastmap block# %i (PEB: %i)",
+ ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
i, pnum);
goto free_hdr;
}
if (i == 0) {
if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
- ubi_err("bad fastmap anchor vol_id: 0x%x," \
- " expected: 0x%x",
+ ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_SB_VOLUME_ID);
ret = UBI_BAD_FASTMAP;
@@ -1001,8 +1000,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
} else {
if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
- ubi_err("bad fastmap data vol_id: 0x%x," \
- " expected: 0x%x",
+ ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_DATA_VOLUME_ID);
ret = UBI_BAD_FASTMAP;
@@ -1016,7 +1014,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
ubi->leb_start, ubi->leb_size);
if (ret && ret != UBI_IO_BITFLIPS) {
- ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+ ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
"err: %i)", i, pnum, ret);
goto free_hdr;
}
@@ -1030,8 +1028,9 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
fmsb2->data_crc = 0;
crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
if (crc != tmp_crc) {
- ubi_err("fastmap data CRC is invalid");
- ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+ ubi_err(ubi, "fastmap data CRC is invalid");
+ ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
+ tmp_crc, crc);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
@@ -1067,9 +1066,10 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi->fm = fm;
ubi->fm_pool.max_size = ubi->fm->max_pool_size;
ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
- ubi_msg("attached by fastmap");
- ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
- ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+ ubi_msg(ubi, "attached by fastmap");
+ ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg(ubi, "fastmap WL pool size: %d",
+ ubi->fm_wl_pool.max_size);
ubi->fm_disabled = 0;
ubi_free_vid_hdr(ubi, vh);
@@ -1077,7 +1077,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
out:
mutex_unlock(&ubi->fm_mutex);
if (ret == UBI_BAD_FASTMAP)
- ubi_err("Attach by fastmap failed, doing a full scan!");
+ ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
return ret;
free_hdr:
@@ -1273,7 +1273,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
if (ret) {
- ubi_err("unable to write vid_hdr to fastmap SB!");
+ ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
goto out_kfree;
}
@@ -1293,7 +1293,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
if (ret) {
- ubi_err("unable to write vid_hdr to PEB %i!",
+ ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
goto out_kfree;
}
@@ -1303,7 +1303,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
if (ret) {
- ubi_err("unable to write fastmap to PEB %i!",
+ ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
goto out_kfree;
}
@@ -1450,7 +1450,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
ubi->fm = NULL;
if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
- ubi_err("fastmap too large");
+ ubi_err(ubi, "fastmap too large");
ret = -ENOSPC;
goto err;
}
@@ -1462,7 +1462,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
if (!tmp_e && !old_fm) {
int j;
- ubi_err("could not get any free erase block");
+ ubi_err(ubi, "could not get any free erase block");
for (j = 1; j < i; j++)
ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
@@ -1478,7 +1478,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
ubi_wl_put_fm_peb(ubi, new_fm->e[j],
j, 0);
- ubi_err("could not erase old fastmap PEB");
+ ubi_err(ubi, "could not erase old fastmap PEB");
goto err;
}
@@ -1504,7 +1504,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
ret = erase_block(ubi, old_fm->e[0]->pnum);
if (ret < 0) {
int i;
- ubi_err("could not erase old anchor PEB");
+ ubi_err(ubi, "could not erase old anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++)
ubi_wl_put_fm_peb(ubi, new_fm->e[i],
@@ -1525,7 +1525,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
} else {
if (!tmp_e) {
int i;
- ubi_err("could not find any anchor PEB");
+ ubi_err(ubi, "could not find any anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++)
ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
@@ -1555,13 +1555,13 @@ out_unlock:
err:
kfree(new_fm);
- ubi_warn("Unable to write new fastmap, err=%i", ret);
+ ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
ret = 0;
if (old_fm) {
ret = invalidate_fastmap(ubi, old_fm);
if (ret < 0)
- ubi_err("Unable to invalidiate current fastmap!");
+ ubi_err(ubi, "Unable to invalidiate current fastmap!");
else if (ret)
ret = 0;
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index d36134925d31..396aaa543362 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -177,19 +177,20 @@ retry:
* enabled. A corresponding message will be printed
* later, when it is has been scrubbed.
*/
- ubi_msg("fixable bit-flip detected at PEB %d", pnum);
+ ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
+ pnum);
ubi_assert(len == read);
return UBI_IO_BITFLIPS;
}
if (retries++ < UBI_IO_RETRIES) {
- ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+ ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
err, errstr, len, pnum, offset, read);
yield();
goto retry;
}
- ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
err, errstr, len, pnum, offset, read);
dump_stack();
@@ -246,7 +247,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
if (ubi->ro_mode) {
- ubi_err("read-only mode");
+ ubi_err(ubi, "read-only mode");
return -EROFS;
}
@@ -273,7 +274,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
}
if (ubi_dbg_is_write_failure(ubi)) {
- ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+ ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
len, pnum, offset);
dump_stack();
return -EIO;
@@ -282,7 +283,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
addr = (loff_t)pnum * ubi->peb_size + offset;
err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
- ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
err, len, pnum, offset, written);
dump_stack();
ubi_dump_flash(ubi, pnum, offset, len);
@@ -338,7 +339,7 @@ static int do_sync_erase(struct ubi_device *ubi, int pnum)
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
if (ubi->ro_mode) {
- ubi_err("read-only mode");
+ ubi_err(ubi, "read-only mode");
return -EROFS;
}
@@ -355,12 +356,12 @@ retry:
err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
- ubi_warn("error %d while erasing PEB %d, retry",
+ ubi_warn(ubi, "error %d while erasing PEB %d, retry",
err, pnum);
yield();
goto retry;
}
- ubi_err("cannot erase PEB %d, error %d", pnum, err);
+ ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
dump_stack();
return err;
}
@@ -368,17 +369,18 @@ retry:
err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
ei.state == MTD_ERASE_FAILED);
if (err) {
- ubi_err("interrupted PEB %d erasure", pnum);
+ ubi_err(ubi, "interrupted PEB %d erasure", pnum);
return -EINTR;
}
if (ei.state == MTD_ERASE_FAILED) {
if (retries++ < UBI_IO_RETRIES) {
- ubi_warn("error while erasing PEB %d, retry", pnum);
+ ubi_warn(ubi, "error while erasing PEB %d, retry",
+ pnum);
yield();
goto retry;
}
- ubi_err("cannot erase PEB %d", pnum);
+ ubi_err(ubi, "cannot erase PEB %d", pnum);
dump_stack();
return -EIO;
}
@@ -388,7 +390,7 @@ retry:
return err;
if (ubi_dbg_is_erase_failure(ubi)) {
- ubi_err("cannot erase PEB %d (emulated)", pnum);
+ ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
@@ -411,7 +413,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
{
int err, i, patt_count;
- ubi_msg("run torture test for PEB %d", pnum);
+ ubi_msg(ubi, "run torture test for PEB %d", pnum);
patt_count = ARRAY_SIZE(patterns);
ubi_assert(patt_count > 0);
@@ -428,7 +430,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
if (err == 0) {
- ubi_err("erased PEB %d, but a non-0xFF byte found",
+ ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
pnum);
err = -EIO;
goto out;
@@ -448,7 +450,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
err = ubi_check_pattern(ubi->peb_buf, patterns[i],
ubi->peb_size);
if (err == 0) {
- ubi_err("pattern %x checking failed for PEB %d",
+ ubi_err(ubi, "pattern %x checking failed for PEB %d",
patterns[i], pnum);
err = -EIO;
goto out;
@@ -456,7 +458,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
}
err = patt_count;
- ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
+ ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
out:
mutex_unlock(&ubi->buf_mutex);
@@ -466,7 +468,7 @@ out:
* has not passed because it happened on a freshly erased
* physical eraseblock which means something is wrong with it.
*/
- ubi_err("read problems on freshly erased PEB %d, must be bad",
+ ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
pnum);
err = -EIO;
}
@@ -542,7 +544,7 @@ error:
* it. Supposedly the flash media or the driver is screwed up, so
* return an error.
*/
- ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
+ ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
return -EIO;
}
@@ -574,7 +576,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
return err;
if (ubi->ro_mode) {
- ubi_err("read-only mode");
+ ubi_err(ubi, "read-only mode");
return -EROFS;
}
@@ -616,7 +618,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
if (ret < 0)
- ubi_err("error %d while checking if PEB %d is bad",
+ ubi_err(ubi, "error %d while checking if PEB %d is bad",
ret, pnum);
else if (ret)
dbg_io("PEB %d is bad", pnum);
@@ -642,7 +644,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
if (ubi->ro_mode) {
- ubi_err("read-only mode");
+ ubi_err(ubi, "read-only mode");
return -EROFS;
}
@@ -651,7 +653,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
if (err)
- ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
+ ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
return err;
}
@@ -674,32 +676,32 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
- ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
+ ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
UBI_VERSION, (int)ec_hdr->version);
goto bad;
}
if (vid_hdr_offset != ubi->vid_hdr_offset) {
- ubi_err("bad VID header offset %d, expected %d",
+ ubi_err(ubi, "bad VID header offset %d, expected %d",
vid_hdr_offset, ubi->vid_hdr_offset);
goto bad;
}
if (leb_start != ubi->leb_start) {
- ubi_err("bad data offset %d, expected %d",
+ ubi_err(ubi, "bad data offset %d, expected %d",
leb_start, ubi->leb_start);
goto bad;
}
if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
- ubi_err("bad erase counter %lld", ec);
+ ubi_err(ubi, "bad erase counter %lld", ec);
goto bad;
}
return 0;
bad:
- ubi_err("bad EC header");
+ ubi_err(ubi, "bad EC header");
ubi_dump_ec_hdr(ec_hdr);
dump_stack();
return 1;
@@ -765,7 +767,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
if (verbose)
- ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+ ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
pnum);
dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
pnum);
@@ -780,7 +782,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* 0xFF bytes. Report that the header is corrupted.
*/
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
pnum, magic, UBI_EC_HDR_MAGIC);
ubi_dump_ec_hdr(ec_hdr);
}
@@ -794,7 +796,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
pnum, crc, hdr_crc);
ubi_dump_ec_hdr(ec_hdr);
}
@@ -810,7 +812,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
/* And of course validate what has just been read from the media */
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
- ubi_err("validation failed for PEB %d", pnum);
+ ubi_err(ubi, "validation failed for PEB %d", pnum);
return -EINVAL;
}
@@ -884,40 +886,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
- ubi_err("bad copy_flag");
+ ubi_err(ubi, "bad copy_flag");
goto bad;
}
if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
data_pad < 0) {
- ubi_err("negative values");
+ ubi_err(ubi, "negative values");
goto bad;
}
if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("bad vol_id");
+ ubi_err(ubi, "bad vol_id");
goto bad;
}
if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
- ubi_err("bad compat");
+ ubi_err(ubi, "bad compat");
goto bad;
}
if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
compat != UBI_COMPAT_REJECT) {
- ubi_err("bad compat");
+ ubi_err(ubi, "bad compat");
goto bad;
}
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
- ubi_err("bad vol_type");
+ ubi_err(ubi, "bad vol_type");
goto bad;
}
if (data_pad >= ubi->leb_size / 2) {
- ubi_err("bad data_pad");
+ ubi_err(ubi, "bad data_pad");
goto bad;
}
@@ -929,45 +931,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
* mapped logical eraseblocks.
*/
if (used_ebs == 0) {
- ubi_err("zero used_ebs");
+ ubi_err(ubi, "zero used_ebs");
goto bad;
}
if (data_size == 0) {
- ubi_err("zero data_size");
+ ubi_err(ubi, "zero data_size");
goto bad;
}
if (lnum < used_ebs - 1) {
if (data_size != usable_leb_size) {
- ubi_err("bad data_size");
+ ubi_err(ubi, "bad data_size");
goto bad;
}
} else if (lnum == used_ebs - 1) {
if (data_size == 0) {
- ubi_err("bad data_size at last LEB");
+ ubi_err(ubi, "bad data_size at last LEB");
goto bad;
}
} else {
- ubi_err("too high lnum");
+ ubi_err(ubi, "too high lnum");
goto bad;
}
} else {
if (copy_flag == 0) {
if (data_crc != 0) {
- ubi_err("non-zero data CRC");
+ ubi_err(ubi, "non-zero data CRC");
goto bad;
}
if (data_size != 0) {
- ubi_err("non-zero data_size");
+ ubi_err(ubi, "non-zero data_size");
goto bad;
}
} else {
if (data_size == 0) {
- ubi_err("zero data_size of copy");
+ ubi_err(ubi, "zero data_size of copy");
goto bad;
}
}
if (used_ebs != 0) {
- ubi_err("bad used_ebs");
+ ubi_err(ubi, "bad used_ebs");
goto bad;
}
}
@@ -975,7 +977,7 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
return 0;
bad:
- ubi_err("bad VID header");
+ ubi_err(ubi, "bad VID header");
ubi_dump_vid_hdr(vid_hdr);
dump_stack();
return 1;
@@ -1020,7 +1022,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
if (verbose)
- ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+ ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
pnum);
dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
pnum);
@@ -1031,7 +1033,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
}
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
pnum, magic, UBI_VID_HDR_MAGIC);
ubi_dump_vid_hdr(vid_hdr);
}
@@ -1045,7 +1047,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
pnum, crc, hdr_crc);
ubi_dump_vid_hdr(vid_hdr);
}
@@ -1059,7 +1061,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
- ubi_err("validation failed for PEB %d", pnum);
+ ubi_err(ubi, "validation failed for PEB %d", pnum);
return -EINVAL;
}
@@ -1129,7 +1131,7 @@ static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
if (!err)
return err;
- ubi_err("self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
dump_stack();
return err > 0 ? -EINVAL : err;
}
@@ -1154,14 +1156,14 @@ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
- ubi_err("bad magic %#08x, must be %#08x",
+ ubi_err(ubi, "bad magic %#08x, must be %#08x",
magic, UBI_EC_HDR_MAGIC);
goto fail;
}
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
- ubi_err("self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
goto fail;
}
@@ -1201,8 +1203,9 @@ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
- ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
- ubi_err("self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
+ crc, hdr_crc);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi_dump_ec_hdr(ec_hdr);
dump_stack();
err = -EINVAL;
@@ -1236,21 +1239,21 @@ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
- ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
+ ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
magic, pnum, UBI_VID_HDR_MAGIC);
goto fail;
}
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
- ubi_err("self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
goto fail;
}
return err;
fail:
- ubi_err("self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi_dump_vid_hdr(vid_hdr);
dump_stack();
return -EINVAL;
@@ -1288,9 +1291,9 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
- ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
pnum, crc, hdr_crc);
- ubi_err("self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi_dump_vid_hdr(vid_hdr);
dump_stack();
err = -EINVAL;
@@ -1329,7 +1332,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
if (!buf1) {
- ubi_err("cannot allocate memory to check writes");
+ ubi_err(ubi, "cannot allocate memory to check writes");
return 0;
}
@@ -1345,15 +1348,15 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
if (c == c1)
continue;
- ubi_err("self-check failed for PEB %d:%d, len %d",
+ ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
pnum, offset, len);
- ubi_msg("data differ at position %d", i);
+ ubi_msg(ubi, "data differ at position %d", i);
dump_len = max_t(int, 128, len - i);
- ubi_msg("hex dump of the original buffer from %d to %d",
+ ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
buf + i, dump_len, 1);
- ubi_msg("hex dump of the read buffer from %d to %d",
+ ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
buf1 + i, dump_len, 1);
@@ -1393,20 +1396,20 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
- ubi_err("cannot allocate memory to check for 0xFFs");
+ ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
return 0;
}
err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && !mtd_is_bitflip(err)) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
err, len, pnum, offset, read);
goto error;
}
err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
- ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
pnum, offset, len);
goto fail;
}
@@ -1415,8 +1418,9 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
fail:
- ubi_err("self-check failed for PEB %d", pnum);
- ubi_msg("hex dump of the %d-%d region", offset, offset + len);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_msg(ubi, "hex dump of the %d-%d region",
+ offset, offset + len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 3aac1acceeb4..f3bab669f6bb 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -204,7 +204,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
return ERR_PTR(err);
}
if (err == 1) {
- ubi_warn("volume %d on UBI device %d is corrupted",
+ ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
vol_id, ubi->ubi_num);
vol->corrupted = 1;
}
@@ -221,7 +221,7 @@ out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
- ubi_err("cannot open device %d, volume %d, error %d",
+ ubi_err(ubi, "cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err);
return ERR_PTR(err);
}
@@ -411,7 +411,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
- ubi_warn("mark volume %d as corrupted", vol_id);
+ ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
}
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index f913d701a5b3..dbda77e556cb 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -111,7 +111,7 @@ void ubi_update_reserved(struct ubi_device *ubi)
ubi->avail_pebs -= need;
ubi->rsvd_pebs += need;
ubi->beb_rsvd_pebs += need;
- ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+ ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
}
/**
@@ -128,7 +128,7 @@ void ubi_calculate_reserved(struct ubi_device *ubi)
ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
if (ubi->beb_rsvd_level < 0) {
ubi->beb_rsvd_level = 0;
- ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
ubi->bad_peb_count, ubi->bad_peb_limit);
}
}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 320fc38fa2a1..f80ffaba9058 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -50,13 +50,14 @@
#define UBI_NAME_STR "ubi"
/* Normal UBI messages */
-#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(ubi, fmt, ...) pr_notice("UBI-%d: %s:" fmt "\n", \
+ ubi->ubi_num, __func__, ##__VA_ARGS__)
/* UBI warning messages */
-#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \
- __func__, ##__VA_ARGS__)
+#define ubi_warn(ubi, fmt, ...) pr_warn("UBI-%d warning: %s: " fmt "\n", \
+ ubi->ubi_num, __func__, ##__VA_ARGS__)
/* UBI error messages */
-#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \
- __func__, ##__VA_ARGS__)
+#define ubi_err(ubi, fmt, ...) pr_err("UBI-%d error: %s: " fmt "\n", \
+ ubi->ubi_num, __func__, ##__VA_ARGS__)
/* Background thread name pattern */
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
@@ -987,7 +988,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
{
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
- ubi_warn("switch to read-only mode");
+ ubi_warn(ubi, "switch to read-only mode");
dump_stack();
}
}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index ec2c2dc1c1ca..2a1b6e037e1a 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
ubi_assert(!vol->updating && !vol->changing_leb);
vol->updating = 1;
+ vol->upd_buf = vmalloc(ubi->leb_size);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
err = set_update_marker(ubi, vol);
if (err)
return err;
@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
+
+ vfree(vol->upd_buf);
vol->updating = 0;
return 0;
}
- vol->upd_buf = vmalloc(ubi->leb_size);
- if (!vol->upd_buf)
- return -ENOMEM;
-
vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
vol->usable_leb_size);
vol->upd_bytes = bytes;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 8330703c098f..ff4d97848d1c 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -223,7 +223,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
if (vol_id == UBI_VOL_NUM_AUTO) {
- ubi_err("out of volume IDs");
+ ubi_err(ubi, "out of volume IDs");
err = -ENFILE;
goto out_unlock;
}
@@ -237,7 +237,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that this volume does not exist */
err = -EEXIST;
if (ubi->volumes[vol_id]) {
- ubi_err("volume %d already exists", vol_id);
+ ubi_err(ubi, "volume %d already exists", vol_id);
goto out_unlock;
}
@@ -246,7 +246,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
- ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
+ ubi_err(ubi, "volume \"%s\" exists (ID %d)",
+ req->name, i);
goto out_unlock;
}
@@ -257,9 +258,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
- ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
+ ubi_err(ubi, "not enough PEBs, only %d available",
+ ubi->avail_pebs);
if (ubi->corr_peb_count)
- ubi_err("%d PEBs are corrupted and not used",
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_unlock;
@@ -314,7 +316,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
err = cdev_add(&vol->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device");
+ ubi_err(ubi, "cannot add character device");
goto out_mapping;
}
@@ -326,7 +328,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err) {
- ubi_err("cannot register device");
+ ubi_err(ubi, "cannot register device");
goto out_cdev;
}
@@ -386,7 +388,7 @@ out_unlock:
kfree(vol);
else
put_device(&vol->dev);
- ubi_err("cannot create volume %d, error %d", vol_id, err);
+ ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
return err;
}
@@ -454,7 +456,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
return err;
out_err:
- ubi_err("cannot remove volume %d, error %d", vol_id, err);
+ ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
out_unlock:
@@ -487,7 +489,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
- ubi_err("too small size %d, %d LEBs contain data",
+ ubi_err(ubi, "too small size %d, %d LEBs contain data",
reserved_pebs, vol->used_ebs);
return -EINVAL;
}
@@ -516,10 +518,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
if (pebs > ubi->avail_pebs) {
- ubi_err("not enough PEBs: requested %d, available %d",
+ ubi_err(ubi, "not enough PEBs: requested %d, available %d",
pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
- ubi_err("%d PEBs are corrupted and not used",
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
@@ -643,7 +645,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
err = cdev_add(&vol->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device for volume %d, error %d",
+ ubi_err(ubi, "cannot add character device for volume %d, error %d",
vol_id, err);
return err;
}
@@ -710,7 +712,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
if (!vol) {
if (reserved_pebs) {
- ubi_err("no volume info, but volume exists");
+ ubi_err(ubi, "no volume info, but volume exists");
goto fail;
}
spin_unlock(&ubi->volumes_lock);
@@ -719,90 +721,91 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
vol->name_len < 0) {
- ubi_err("negative values");
+ ubi_err(ubi, "negative values");
goto fail;
}
if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
- ubi_err("bad alignment");
+ ubi_err(ubi, "bad alignment");
goto fail;
}
n = vol->alignment & (ubi->min_io_size - 1);
if (vol->alignment != 1 && n) {
- ubi_err("alignment is not multiple of min I/O unit");
+ ubi_err(ubi, "alignment is not multiple of min I/O unit");
goto fail;
}
n = ubi->leb_size % vol->alignment;
if (vol->data_pad != n) {
- ubi_err("bad data_pad, has to be %lld", n);
+ ubi_err(ubi, "bad data_pad, has to be %lld", n);
goto fail;
}
if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
vol->vol_type != UBI_STATIC_VOLUME) {
- ubi_err("bad vol_type");
+ ubi_err(ubi, "bad vol_type");
goto fail;
}
if (vol->upd_marker && vol->corrupted) {
- ubi_err("update marker and corrupted simultaneously");
+ ubi_err(ubi, "update marker and corrupted simultaneously");
goto fail;
}
if (vol->reserved_pebs > ubi->good_peb_count) {
- ubi_err("too large reserved_pebs");
+ ubi_err(ubi, "too large reserved_pebs");
goto fail;
}
n = ubi->leb_size - vol->data_pad;
if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
- ubi_err("bad usable_leb_size, has to be %lld", n);
+ ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
goto fail;
}
if (vol->name_len > UBI_VOL_NAME_MAX) {
- ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX);
+ ubi_err(ubi, "too long volume name, max is %d",
+ UBI_VOL_NAME_MAX);
goto fail;
}
n = strnlen(vol->name, vol->name_len + 1);
if (n != vol->name_len) {
- ubi_err("bad name_len %lld", n);
+ ubi_err(ubi, "bad name_len %lld", n);
goto fail;
}
n = (long long)vol->used_ebs * vol->usable_leb_size;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
if (vol->corrupted) {
- ubi_err("corrupted dynamic volume");
+ ubi_err(ubi, "corrupted dynamic volume");
goto fail;
}
if (vol->used_ebs != vol->reserved_pebs) {
- ubi_err("bad used_ebs");
+ ubi_err(ubi, "bad used_ebs");
goto fail;
}
if (vol->last_eb_bytes != vol->usable_leb_size) {
- ubi_err("bad last_eb_bytes");
+ ubi_err(ubi, "bad last_eb_bytes");
goto fail;
}
if (vol->used_bytes != n) {
- ubi_err("bad used_bytes");
+ ubi_err(ubi, "bad used_bytes");
goto fail;
}
} else {
if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
- ubi_err("bad used_ebs");
+ ubi_err(ubi, "bad used_ebs");
goto fail;
}
if (vol->last_eb_bytes < 0 ||
vol->last_eb_bytes > vol->usable_leb_size) {
- ubi_err("bad last_eb_bytes");
+ ubi_err(ubi, "bad last_eb_bytes");
goto fail;
}
if (vol->used_bytes < 0 || vol->used_bytes > n ||
vol->used_bytes < n - vol->usable_leb_size) {
- ubi_err("bad used_bytes");
+ ubi_err(ubi, "bad used_bytes");
goto fail;
}
}
@@ -820,7 +823,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
if (alignment != vol->alignment || data_pad != vol->data_pad ||
upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
- ubi_err("volume info is different");
+ ubi_err(ubi, "volume info is different");
goto fail;
}
@@ -828,7 +831,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
return 0;
fail:
- ubi_err("self-check failed for volume %d", vol_id);
+ ubi_err(ubi, "self-check failed for volume %d", vol_id);
if (vol)
ubi_dump_vol_info(vol);
ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 07cac5f9ffb8..f8fc3081bbb4 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -30,9 +30,12 @@
* eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
* other. This redundancy guarantees robustness to unclean reboots. The volume
* table is basically an array of volume table records. Each record contains
- * full information about the volume and protected by a CRC checksum.
+ * full information about the volume and protected by a CRC checksum. Note,
+ * nowadays we use the atomic LEB change operation when updating the volume
+ * table, so we do not really need 2 LEBs anymore, but we preserve the older
+ * design for the backward compatibility reasons.
*
- * The volume table is changed, it is first changed in RAM. Then LEB 0 is
+ * When the volume table is changed, it is first changed in RAM. Then LEB 0 is
* erased, and the updated volume table is written back to LEB 0. Then same for
* LEB 1. This scheme guarantees recoverability from unclean reboots.
*
@@ -96,12 +99,8 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
- err = ubi_eba_unmap_leb(ubi, layout_vol, i);
- if (err)
- return err;
-
- err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size);
+ err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
+ ubi->vtbl_size);
if (err)
return err;
}
@@ -148,12 +147,8 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
- err = ubi_eba_unmap_leb(ubi, layout_vol, i);
- if (err)
- return err;
-
- err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size);
+ err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
+ ubi->vtbl_size);
if (err)
return err;
}
@@ -190,7 +185,7 @@ static int vtbl_check(const struct ubi_device *ubi,
crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
if (be32_to_cpu(vtbl[i].crc) != crc) {
- ubi_err("bad CRC at record %u: %#08x, not %#08x",
+ ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
i, crc, be32_to_cpu(vtbl[i].crc));
ubi_dump_vtbl_record(&vtbl[i], i);
return 1;
@@ -224,7 +219,7 @@ static int vtbl_check(const struct ubi_device *ubi,
n = ubi->leb_size % alignment;
if (data_pad != n) {
- ubi_err("bad data_pad, has to be %d", n);
+ ubi_err(ubi, "bad data_pad, has to be %d", n);
err = 6;
goto bad;
}
@@ -240,7 +235,7 @@ static int vtbl_check(const struct ubi_device *ubi,
}
if (reserved_pebs > ubi->good_peb_count) {
- ubi_err("too large reserved_pebs %d, good PEBs %d",
+ ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
reserved_pebs, ubi->good_peb_count);
err = 9;
goto bad;
@@ -270,7 +265,7 @@ static int vtbl_check(const struct ubi_device *ubi,
if (len1 > 0 && len1 == len2 &&
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
- ubi_err("volumes %d and %d have the same name \"%s\"",
+ ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
i, n, vtbl[i].name);
ubi_dump_vtbl_record(&vtbl[i], i);
ubi_dump_vtbl_record(&vtbl[n], n);
@@ -282,7 +277,7 @@ static int vtbl_check(const struct ubi_device *ubi,
return 0;
bad:
- ubi_err("volume table check failed: record %d, error %d", i, err);
+ ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
ubi_dump_vtbl_record(&vtbl[i], i);
return -EINVAL;
}
@@ -446,11 +441,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
leb_corrupted[1] = memcmp(leb[0], leb[1],
ubi->vtbl_size);
if (leb_corrupted[1]) {
- ubi_warn("volume table copy #2 is corrupted");
+ ubi_warn(ubi, "volume table copy #2 is corrupted");
err = create_vtbl(ubi, ai, 1, leb[0]);
if (err)
goto out_free;
- ubi_msg("volume table was restored");
+ ubi_msg(ubi, "volume table was restored");
}
/* Both LEB 1 and LEB 2 are OK and consistent */
@@ -465,15 +460,15 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
}
if (leb_corrupted[1]) {
/* Both LEB 0 and LEB 1 are corrupted */
- ubi_err("both volume tables are corrupted");
+ ubi_err(ubi, "both volume tables are corrupted");
goto out_free;
}
- ubi_warn("volume table copy #1 is corrupted");
+ ubi_warn(ubi, "volume table copy #1 is corrupted");
err = create_vtbl(ubi, ai, 0, leb[1]);
if (err)
goto out_free;
- ubi_msg("volume table was restored");
+ ubi_msg(ubi, "volume table was restored");
vfree(leb[0]);
return leb[1];
@@ -562,7 +557,7 @@ static int init_volumes(struct ubi_device *ubi,
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
- ubi_err("more than one auto-resize volume (%d and %d)",
+ ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
ubi->autoresize_vol_id, i);
kfree(vol);
return -EINVAL;
@@ -608,7 +603,7 @@ static int init_volumes(struct ubi_device *ubi,
* We found a static volume which misses several
* eraseblocks. Treat it as corrupted.
*/
- ubi_warn("static volume %d misses %d LEBs - corrupted",
+ ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
av->vol_id, av->used_ebs - av->leb_count);
vol->corrupted = 1;
continue;
@@ -646,10 +641,10 @@ static int init_volumes(struct ubi_device *ubi,
vol->ubi = ubi;
if (reserved_pebs > ubi->avail_pebs) {
- ubi_err("not enough PEBs, required %d, available %d",
+ ubi_err(ubi, "not enough PEBs, required %d, available %d",
reserved_pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
- ubi_err("%d PEBs are corrupted and not used",
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
}
ubi->rsvd_pebs += reserved_pebs;
@@ -660,13 +655,14 @@ static int init_volumes(struct ubi_device *ubi,
/**
* check_av - check volume attaching information.
+ * @ubi: UBI device description object
* @vol: UBI volume description object
* @av: volume attaching information
*
* This function returns zero if the volume attaching information is consistent
* to the data read from the volume tabla, and %-EINVAL if not.
*/
-static int check_av(const struct ubi_volume *vol,
+static int check_av(const struct ubi_device *ubi, const struct ubi_volume *vol,
const struct ubi_ainf_volume *av)
{
int err;
@@ -694,7 +690,7 @@ static int check_av(const struct ubi_volume *vol,
return 0;
bad:
- ubi_err("bad attaching information, error %d", err);
+ ubi_err(ubi, "bad attaching information, error %d", err);
ubi_dump_av(av);
ubi_dump_vol_info(vol);
return -EINVAL;
@@ -718,14 +714,15 @@ static int check_attaching_info(const struct ubi_device *ubi,
struct ubi_volume *vol;
if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
- ubi_err("found %d volumes while attaching, maximum is %d + %d",
+ ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
return -EINVAL;
}
if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("too large volume ID %d found", ai->highest_vol_id);
+ ubi_err(ubi, "too large volume ID %d found",
+ ai->highest_vol_id);
return -EINVAL;
}
@@ -753,10 +750,10 @@ static int check_attaching_info(const struct ubi_device *ubi,
* reboot while the volume was being removed. Discard
* these eraseblocks.
*/
- ubi_msg("finish volume %d removal", av->vol_id);
+ ubi_msg(ubi, "finish volume %d removal", av->vol_id);
ubi_remove_av(ai, av);
} else if (av) {
- err = check_av(vol, av);
+ err = check_av(ubi, vol, av);
if (err)
return err;
}
@@ -807,13 +804,13 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
} else {
- ubi_err("the layout volume was not found");
+ ubi_err(ubi, "the layout volume was not found");
return -EINVAL;
}
} else {
if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
/* This must not happen with proper UBI images */
- ubi_err("too many LEBs (%d) in layout volume",
+ ubi_err(ubi, "too many LEBs (%d) in layout volume",
av->leb_count);
return -EINVAL;
}
@@ -862,7 +859,7 @@ static void self_vtbl_check(const struct ubi_device *ubi)
return;
if (vtbl_check(ubi, ubi->vtbl)) {
- ubi_err("self-check failed");
+ ubi_err(ubi, "self-check failed");
BUG();
}
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6654f191868e..834f6fe1f5fa 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -253,7 +253,7 @@ static int do_work(struct ubi_device *ubi)
*/
err = wrk->func(ubi, wrk, 0);
if (err)
- ubi_err("work failed with error code %d", err);
+ ubi_err(ubi, "work failed with error code %d", err);
up_read(&ubi->work_sem);
return err;
@@ -470,8 +470,11 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
- if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) {
+ ubi_warn(ubi, "Can't get peb for fastmap:anchor=%d, free_cnt=%d, reserved=%d",
+ anchor, ubi->free_count, ubi->beb_rsvd_pebs);
goto out;
+ }
if (anchor)
e = find_anchor_wl_entry(&ubi->free);
@@ -507,7 +510,7 @@ static int __wl_get_peb(struct ubi_device *ubi)
retry:
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
- ubi_err("no free eraseblocks");
+ ubi_err(ubi, "no free eraseblocks");
ubi_assert(list_empty(&ubi->works));
return -ENOSPC;
}
@@ -520,7 +523,7 @@ retry:
e = find_mean_wl_entry(ubi, &ubi->free);
if (!e) {
- ubi_err("no free eraseblocks");
+ ubi_err(ubi, "no free eraseblocks");
return -ENOSPC;
}
@@ -692,7 +695,8 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
- ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
+ ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes",
+ peb);
return err;
}
@@ -760,7 +764,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
* Erase counter overflow. Upgrade UBI and use 64-bit
* erase counters internally.
*/
- ubi_err("erase counter overflow at PEB %d, EC %llu",
+ ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
e->pnum, ec);
err = -EINVAL;
goto out_free;
@@ -1137,7 +1141,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_not_moved;
}
- ubi_err("error %d while reading VID header from PEB %d",
+ ubi_err(ubi, "error %d while reading VID header from PEB %d",
err, e1->pnum);
goto out_error;
}
@@ -1181,7 +1185,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* UBI from trying to move it over and over again.
*/
if (ubi->erroneous_peb_count > ubi->max_erroneous) {
- ubi_err("too many erroneous eraseblocks (%d)",
+ ubi_err(ubi, "too many erroneous eraseblocks (%d)",
ubi->erroneous_peb_count);
goto out_error;
}
@@ -1197,7 +1201,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/* The PEB has been successfully moved */
if (scrubbing)
- ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+ ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
e1->pnum, vol_id, lnum, e2->pnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1212,7 +1216,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
- kmem_cache_free(ubi_wl_entry_slab, e1);
if (e2)
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -1226,10 +1229,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
e2->pnum, vol_id, lnum);
err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
- if (err) {
- kmem_cache_free(ubi_wl_entry_slab, e2);
+ if (err)
goto out_ro;
- }
}
dbg_wl("done");
@@ -1265,19 +1266,18 @@ out_not_moved:
ubi_free_vid_hdr(ubi, vid_hdr);
err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
- if (err) {
- kmem_cache_free(ubi_wl_entry_slab, e2);
+ if (err)
goto out_ro;
- }
+
mutex_unlock(&ubi->move_mutex);
return 0;
out_error:
if (vol_id != -1)
- ubi_err("error %d while moving PEB %d to PEB %d",
+ ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
err, e1->pnum, e2->pnum);
else
- ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+ ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
err, e1->pnum, vol_id, lnum, e2->pnum);
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
@@ -1458,7 +1458,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
return err;
}
- ubi_err("failed to erase PEB %d, error %d", pnum, err);
+ ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
@@ -1486,7 +1486,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
/* It is %-EIO, the PEB went bad */
if (!ubi->bad_allowed) {
- ubi_err("bad physical eraseblock %d detected", pnum);
+ ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
goto out_ro;
}
@@ -1494,7 +1494,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
if (ubi->beb_rsvd_pebs == 0) {
if (ubi->avail_pebs == 0) {
spin_unlock(&ubi->volumes_lock);
- ubi_err("no reserved/available physical eraseblocks");
+ ubi_err(ubi, "no reserved/available physical eraseblocks");
goto out_ro;
}
ubi->avail_pebs -= 1;
@@ -1502,7 +1502,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
}
spin_unlock(&ubi->volumes_lock);
- ubi_msg("mark PEB %d as bad", pnum);
+ ubi_msg(ubi, "mark PEB %d as bad", pnum);
err = ubi_io_mark_bad(ubi, pnum);
if (err)
goto out_ro;
@@ -1523,11 +1523,12 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
if (available_consumed)
- ubi_warn("no PEBs in the reserved pool, used an available PEB");
+ ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
else if (ubi->beb_rsvd_pebs)
- ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
+ ubi_msg(ubi, "%d PEBs left in the reserve",
+ ubi->beb_rsvd_pebs);
else
- ubi_warn("last PEB from the reserve was used");
+ ubi_warn(ubi, "last PEB from the reserve was used");
spin_unlock(&ubi->volumes_lock);
return err;
@@ -1613,7 +1614,7 @@ retry:
} else {
err = prot_queue_del(ubi, e->pnum);
if (err) {
- ubi_err("PEB %d not found", pnum);
+ ubi_err(ubi, "PEB %d not found", pnum);
ubi_ro_mode(ubi);
spin_unlock(&ubi->wl_lock);
return err;
@@ -1646,7 +1647,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
{
struct ubi_wl_entry *e;
- ubi_msg("schedule PEB %d for scrubbing", pnum);
+ ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
retry:
spin_lock(&ubi->wl_lock);
@@ -1678,7 +1679,7 @@ retry:
err = prot_queue_del(ubi, e->pnum);
if (err) {
- ubi_err("PEB %d not found", pnum);
+ ubi_err(ubi, "PEB %d not found", pnum);
ubi_ro_mode(ubi);
spin_unlock(&ubi->wl_lock);
return err;
@@ -1798,15 +1799,18 @@ int ubi_thread(void *u)
int failures = 0;
struct ubi_device *ubi = u;
- ubi_msg("background thread \"%s\" started, PID %d",
+ ubi_msg(ubi, "background thread \"%s\" started, PID %d",
ubi->bgt_name, task_pid_nr(current));
set_freezable();
for (;;) {
int err;
- if (kthread_should_stop())
+ if (kthread_should_stop()) {
+ ubi_msg(ubi, "background thread \"%s\" should stop, PID %d",
+ ubi->bgt_name, task_pid_nr(current));
break;
+ }
if (try_to_freeze())
continue;
@@ -1823,14 +1827,14 @@ int ubi_thread(void *u)
err = do_work(ubi);
if (err) {
- ubi_err("%s: work failed with error code %d",
+ ubi_err(ubi, "%s: work failed with error code %d",
ubi->bgt_name, err);
if (failures++ > WL_MAX_FAILURES) {
/*
* Too many failures, disable the thread and
* switch to read-only mode.
*/
- ubi_msg("%s: %d consecutive failures",
+ ubi_msg(ubi, "%s: %d consecutive failures",
ubi->bgt_name, WL_MAX_FAILURES);
ubi_ro_mode(ubi);
ubi->thread_enabled = 0;
@@ -1981,10 +1985,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
#endif
if (ubi->avail_pebs < reserved_pebs) {
- ubi_err("no enough physical eraseblocks (%d, need %d)",
+ ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, reserved_pebs);
if (ubi->corr_peb_count)
- ubi_err("%d PEBs are corrupted and not used",
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
goto out_free;
}
@@ -2072,8 +2076,8 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
read_ec = be64_to_cpu(ec_hdr->ec);
if (ec != read_ec && read_ec - ec > 1) {
- ubi_err("self-check failed for PEB %d", pnum);
- ubi_err("read EC is %lld, should be %d", read_ec, ec);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
dump_stack();
err = 1;
} else
@@ -2102,7 +2106,7 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
if (in_wl_tree(e, root))
return 0;
- ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
+ ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
e->pnum, e->ec, root);
dump_stack();
return -EINVAL;
@@ -2130,7 +2134,7 @@ static int self_check_in_pq(const struct ubi_device *ubi,
if (p == e)
return 0;
- ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
+ ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
dump_stack();
return -EINVAL;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f9009be3f307..d6607ee9c855 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -145,6 +145,26 @@ config MACVTAP
To compile this driver as a module, choose M here: the module
will be called macvtap.
+
+config IPVLAN
+ tristate "IP-VLAN support"
+ depends on INET
+ depends on IPV6
+ ---help---
+ This allows one to create virtual devices off of a main interface
+ and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
+ on packets. All interfaces (including the main interface) share L2
+ making it transparent to the connected L2 switch.
+
+ Ipvlan devices can be added using the "ip" command from the
+ iproute2 package starting with the iproute2-X.Y.ZZ release:
+
+ "ip link add link <main-dev> [ NAME ] type ipvlan"
+
+ To compile this driver as a module, choose M here: the module
+ will be called ipvlan.
+
+
config VXLAN
tristate "Virtual eXtensible Local Area Network (VXLAN)"
depends on INET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 61aefdd1e173..e25fdd7d905e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@
# Networking Core Drivers
#
obj-$(CONFIG_BONDING) += bonding/
+obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 2110215f3528..8baa87df1738 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -29,8 +29,8 @@
#include <linux/if_bonding.h>
#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
-#include "bonding.h"
-#include "bond_3ad.h"
+#include <net/bonding.h>
+#include <net/bond_3ad.h>
/* General definitions */
#define AD_SHORT_TIMEOUT 1
@@ -79,15 +79,21 @@
* --------------------------------------------------------------
* 16 6 1 0
*/
-#define AD_DUPLEX_KEY_BITS 0x1
-#define AD_SPEED_KEY_BITS 0x3E
-#define AD_USER_KEY_BITS 0xFFC0
-
-#define AD_LINK_SPEED_BITMASK_1MBPS 0x1
-#define AD_LINK_SPEED_BITMASK_10MBPS 0x2
-#define AD_LINK_SPEED_BITMASK_100MBPS 0x4
-#define AD_LINK_SPEED_BITMASK_1000MBPS 0x8
-#define AD_LINK_SPEED_BITMASK_10000MBPS 0x10
+#define AD_DUPLEX_KEY_MASKS 0x1
+#define AD_SPEED_KEY_MASKS 0x3E
+#define AD_USER_KEY_MASKS 0xFFC0
+
+enum ad_link_speed_type {
+ AD_LINK_SPEED_1MBPS = 1,
+ AD_LINK_SPEED_10MBPS,
+ AD_LINK_SPEED_100MBPS,
+ AD_LINK_SPEED_1000MBPS,
+ AD_LINK_SPEED_2500MBPS,
+ AD_LINK_SPEED_10000MBPS,
+ AD_LINK_SPEED_20000MBPS,
+ AD_LINK_SPEED_40000MBPS,
+ AD_LINK_SPEED_56000MBPS
+};
/* compare MAC addresses */
#define MAC_ADDRESS_EQUAL(A, B) \
@@ -240,12 +246,16 @@ static inline int __check_agg_selection_timer(struct port *port)
* __get_link_speed - get a port's speed
* @port: the port we're looking at
*
- * Return @port's speed in 802.3ad bitmask format. i.e. one of:
+ * Return @port's speed in 802.3ad enum format. i.e. one of:
* 0,
- * %AD_LINK_SPEED_BITMASK_10MBPS,
- * %AD_LINK_SPEED_BITMASK_100MBPS,
- * %AD_LINK_SPEED_BITMASK_1000MBPS,
- * %AD_LINK_SPEED_BITMASK_10000MBPS
+ * %AD_LINK_SPEED_10MBPS,
+ * %AD_LINK_SPEED_100MBPS,
+ * %AD_LINK_SPEED_1000MBPS,
+ * %AD_LINK_SPEED_2500MBPS,
+ * %AD_LINK_SPEED_10000MBPS
+ * %AD_LINK_SPEED_20000MBPS
+ * %AD_LINK_SPEED_40000MBPS
+ * %AD_LINK_SPEED_56000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -262,19 +272,35 @@ static u16 __get_link_speed(struct port *port)
else {
switch (slave->speed) {
case SPEED_10:
- speed = AD_LINK_SPEED_BITMASK_10MBPS;
+ speed = AD_LINK_SPEED_10MBPS;
break;
case SPEED_100:
- speed = AD_LINK_SPEED_BITMASK_100MBPS;
+ speed = AD_LINK_SPEED_100MBPS;
break;
case SPEED_1000:
- speed = AD_LINK_SPEED_BITMASK_1000MBPS;
+ speed = AD_LINK_SPEED_1000MBPS;
+ break;
+
+ case SPEED_2500:
+ speed = AD_LINK_SPEED_2500MBPS;
break;
case SPEED_10000:
- speed = AD_LINK_SPEED_BITMASK_10000MBPS;
+ speed = AD_LINK_SPEED_10000MBPS;
+ break;
+
+ case SPEED_20000:
+ speed = AD_LINK_SPEED_20000MBPS;
+ break;
+
+ case SPEED_40000:
+ speed = AD_LINK_SPEED_40000MBPS;
+ break;
+
+ case SPEED_56000:
+ speed = AD_LINK_SPEED_56000MBPS;
break;
default:
@@ -625,21 +651,33 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
if (aggregator->num_of_ports) {
switch (__get_link_speed(aggregator->lag_ports)) {
- case AD_LINK_SPEED_BITMASK_1MBPS:
+ case AD_LINK_SPEED_1MBPS:
bandwidth = aggregator->num_of_ports;
break;
- case AD_LINK_SPEED_BITMASK_10MBPS:
+ case AD_LINK_SPEED_10MBPS:
bandwidth = aggregator->num_of_ports * 10;
break;
- case AD_LINK_SPEED_BITMASK_100MBPS:
+ case AD_LINK_SPEED_100MBPS:
bandwidth = aggregator->num_of_ports * 100;
break;
- case AD_LINK_SPEED_BITMASK_1000MBPS:
+ case AD_LINK_SPEED_1000MBPS:
bandwidth = aggregator->num_of_ports * 1000;
break;
- case AD_LINK_SPEED_BITMASK_10000MBPS:
+ case AD_LINK_SPEED_2500MBPS:
+ bandwidth = aggregator->num_of_ports * 2500;
+ break;
+ case AD_LINK_SPEED_10000MBPS:
bandwidth = aggregator->num_of_ports * 10000;
break;
+ case AD_LINK_SPEED_20000MBPS:
+ bandwidth = aggregator->num_of_ports * 20000;
+ break;
+ case AD_LINK_SPEED_40000MBPS:
+ bandwidth = aggregator->num_of_ports * 40000;
+ break;
+ case AD_LINK_SPEED_56000MBPS:
+ bandwidth = aggregator->num_of_ports * 56000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
@@ -1011,7 +1049,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_rx_state);
switch (port->sm_rx_state) {
case AD_RX_INITIALIZE:
- if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
+ if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS))
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
else
port->sm_vars |= AD_PORT_LACP_ENABLED;
@@ -1318,7 +1356,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
/* update the new aggregator's parameters
* if port was responsed from the end-user
*/
- if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)
+ if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
/* if port is full duplex */
port->aggregator->is_individual = false;
else
@@ -1846,7 +1884,7 @@ void bond_3ad_bind_slave(struct slave *slave)
/* if the port is not full duplex, then the port should be not
* lacp Enabled
*/
- if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
+ if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS))
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
/* actor system is the bond's system */
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
@@ -2214,7 +2252,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
spin_lock_bh(&slave->bond->mode_lock);
- port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
netdev_dbg(slave->bond->dev, "Port %d changed speed\n", port->actor_port_number);
@@ -2247,7 +2285,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
spin_lock_bh(&slave->bond->mode_lock);
- port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number);
@@ -2289,18 +2327,18 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
*/
if (link == BOND_LINK_UP) {
port->is_enabled = true;
- port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
- port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
} else {
/* link has failed */
port->is_enabled = false;
- port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
port->actor_oper_port_key = (port->actor_admin_port_key &=
- ~AD_SPEED_KEY_BITS);
+ ~AD_SPEED_KEY_MASKS);
}
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index d2eadab787c5..bb9e9fc45e1b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -37,8 +37,8 @@
#include <net/arp.h>
#include <net/ipv6.h>
#include <asm/byteorder.h>
-#include "bonding.h"
-#include "bond_alb.h"
+#include <net/bonding.h>
+#include <net/bond_alb.h>
@@ -475,12 +475,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb->dev = client_info->slave->dev;
if (client_info->vlan_id) {
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
- if (!skb) {
- netdev_err(client_info->slave->bond->dev,
- "failed to insert VLAN tag\n");
- continue;
- }
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ client_info->vlan_id);
}
arp_xmit(skb);
@@ -951,13 +947,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
- if (vid) {
- skb = vlan_put_tag(skb, vlan_proto, vid);
- if (!skb) {
- netdev_err(slave->bond->dev, "failed to insert VLAN tag\n");
- return;
- }
- }
+ if (vid)
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
dev_queue_xmit(skb);
}
@@ -1326,7 +1317,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
}
/* no suitable interface, frame not sent */
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond->dev, skb);
out:
return NETDEV_TX_OK;
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 8f99082f90eb..e52e25a977fa 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -3,8 +3,8 @@
#include <linux/device.h>
#include <linux/netdevice.h>
-#include "bonding.h"
-#include "bond_alb.h"
+#include <net/bonding.h>
+#include <net/bond_alb.h>
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a5115fb7cf33..184c434ae305 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -77,9 +77,9 @@
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_keys.h>
-#include "bonding.h"
-#include "bond_3ad.h"
-#include "bond_alb.h"
+#include <net/bonding.h>
+#include <net/bond_3ad.h>
+#include <net/bond_alb.h>
/*---------------------------- Module parameters ----------------------------*/
@@ -1526,6 +1526,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
#endif
+ if (!(bond_dev->features & NETIF_F_LRO))
+ dev_disable_lro(slave_dev);
+
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
@@ -2143,8 +2146,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), tags->vlan_id);
- skb = __vlan_put_tag(skb, tags->vlan_proto,
- tags->vlan_id);
+ skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
+ tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
return;
@@ -2156,12 +2159,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
if (outer_tag->vlan_id) {
netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
- skb = vlan_put_tag(skb, outer_tag->vlan_proto,
- outer_tag->vlan_id);
- if (!skb) {
- net_err_ratelimited("failed to insert outer VLAN tag\n");
- return;
- }
+ __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
+ outer_tag->vlan_id);
}
xmit:
@@ -3523,7 +3522,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
}
}
/* no slave that can tx has been found */
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond->dev, skb);
}
/**
@@ -3585,7 +3584,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
slave_id = bond_rr_gen_slave_id(bond);
bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
} else {
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond_dev, skb);
}
}
@@ -3604,7 +3603,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond_dev, skb);
return NETDEV_TX_OK;
}
@@ -3748,8 +3747,7 @@ int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
} else {
- dev_kfree_skb_any(skb);
- atomic_long_inc(&dev->tx_dropped);
+ bond_tx_drop(dev, skb);
}
return NETDEV_TX_OK;
@@ -3779,7 +3777,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond_dev, skb);
return NETDEV_TX_OK;
}
@@ -3859,7 +3857,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Should never happen, mode already checked */
netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
WARN_ON_ONCE(1);
- dev_kfree_skb_any(skb);
+ bond_tx_drop(dev, skb);
return NETDEV_TX_OK;
}
}
@@ -3879,7 +3877,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
- dev_kfree_skb_any(skb);
+ bond_tx_drop(dev, skb);
rcu_read_unlock();
return ret;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 45f09a66e6c9..7b1124366011 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -17,7 +17,7 @@
#include <linux/if_ether.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
-#include "bonding.h"
+#include <net/bonding.h>
static size_t bond_get_slave_size(const struct net_device *bond_dev,
const struct net_device *slave_dev)
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index b62697f4a3de..1a61cc9b3402 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -16,7 +16,7 @@
#include <linux/rcupdate.h>
#include <linux/ctype.h>
#include <linux/inet.h>
-#include "bonding.h"
+#include <net/bonding.h>
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval);
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index a3948f8d1e53..976f5ad2a0f2 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -2,7 +2,7 @@
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#include "bonding.h"
+#include <net/bonding.h>
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8ffbafd500fd..7e9e151d4d61 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -40,7 +40,7 @@
#include <net/netns/generic.h>
#include <linux/nsproxy.h>
-#include "bonding.h"
+#include <net/bonding.h>
#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index b01b0ce4d1be..23618a831612 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include "bonding.h"
+#include <net/bonding.h>
struct slave_attribute {
struct attribute attr;
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index fc9304143f44..c533c62b0f5e 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -29,4 +29,5 @@ obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_RCAR) += rcar_can.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
-subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
+subdir-ccflags-y += -D__CHECK_ENDIAN__
+subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 05e1aa090add..d0c2463b573f 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1428,7 +1428,6 @@ static struct platform_driver at91_can_driver = {
.remove = at91_can_remove,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91_can_dt_ids),
},
.id_table = at91_can_id_table,
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 543ecceb33e9..417d50998e31 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -680,7 +680,6 @@ static struct platform_driver bfin_can_driver = {
.resume = bfin_can_resume,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 8e78bb48f5a4..f94a9fa60488 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -35,6 +35,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -603,6 +604,8 @@ static int c_can_start(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* activate pins */
+ pinctrl_pm_select_default_state(dev->dev.parent);
return 0;
}
@@ -611,6 +614,9 @@ static void c_can_stop(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
c_can_irq_control(priv, false);
+
+ /* deactivate pins */
+ pinctrl_pm_select_sleep_state(dev->dev.parent);
priv->can.state = CAN_STATE_STOPPED;
}
@@ -1244,6 +1250,13 @@ int register_c_can_dev(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
int err;
+ /* Deactivate pins to prevent DRA7 DCAN IP from being
+ * stuck in transition when module is disabled.
+ * Pins are activated in c_can_start() and deactivated
+ * in c_can_stop()
+ */
+ pinctrl_pm_select_sleep_state(dev->dev.parent);
+
c_can_pm_runtime_enable(priv);
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 99ad1aa576b0..8acdc7fa4792 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -169,6 +169,28 @@ enum c_can_dev_id {
BOSCH_D_CAN,
};
+struct raminit_bits {
+ u8 start;
+ u8 done;
+};
+
+struct c_can_driver_data {
+ enum c_can_dev_id id;
+
+ /* RAMINIT register description. Optional. */
+ const struct raminit_bits *raminit_bits; /* Array of START/DONE bit positions */
+ u8 raminit_num; /* Number of CAN instances on the SoC */
+ bool raminit_pulse; /* If set, sets and clears START bit (pulse) */
+};
+
+/* Out of band RAMINIT register access via syscon regmap */
+struct c_can_raminit {
+ struct regmap *syscon; /* for raminit ctrl. reg. access */
+ unsigned int reg; /* register index within syscon */
+ struct raminit_bits bits;
+ bool needs_pulse;
+};
+
/* c_can private data structure */
struct c_can_priv {
struct can_priv can; /* must be the first member */
@@ -186,8 +208,7 @@ struct c_can_priv {
const u16 *regs;
void *priv; /* for board-specific data */
enum c_can_dev_id type;
- u32 __iomem *raminit_ctrlreg;
- int instance;
+ struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
void (*raminit) (const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
u32 rxmasked;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index fb279d6ae484..f363972cd77d 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -32,14 +32,13 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/can/dev.h>
#include "c_can.h"
-#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
-#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
-#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
#define DCAN_RAM_INIT_BIT (1 << 3)
static DEFINE_SPINLOCK(raminit_lock);
/*
@@ -72,39 +71,63 @@ static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
-static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
- u32 val)
+static void c_can_hw_raminit_wait_syscon(const struct c_can_priv *priv,
+ u32 mask, u32 val)
{
+ const struct c_can_raminit *raminit = &priv->raminit_sys;
+ int timeout = 0;
+ u32 ctrl = 0;
+
/* We look only at the bits of our instance. */
val &= mask;
- while ((readl(priv->raminit_ctrlreg) & mask) != val)
+ do {
udelay(1);
+ timeout++;
+
+ regmap_read(raminit->syscon, raminit->reg, &ctrl);
+ if (timeout == 1000) {
+ dev_err(&priv->dev->dev, "%s: time out\n", __func__);
+ break;
+ }
+ } while ((ctrl & mask) != val);
}
-static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
+static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable)
{
- u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
- u32 ctrl;
+ const struct c_can_raminit *raminit = &priv->raminit_sys;
+ u32 ctrl = 0;
+ u32 mask;
spin_lock(&raminit_lock);
- ctrl = readl(priv->raminit_ctrlreg);
+ mask = 1 << raminit->bits.start | 1 << raminit->bits.done;
+ regmap_read(raminit->syscon, raminit->reg, &ctrl);
+
/* We clear the done and start bit first. The start bit is
* looking at the 0 -> transition, but is not self clearing;
* And we clear the init done bit as well.
+ * NOTE: DONE must be written with 1 to clear it.
*/
- ctrl &= ~CAN_RAMINIT_START_MASK(priv->instance);
- ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
- writel(ctrl, priv->raminit_ctrlreg);
- ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait_ti(priv, mask, ctrl);
+ ctrl &= ~(1 << raminit->bits.start);
+ ctrl |= 1 << raminit->bits.done;
+ regmap_write(raminit->syscon, raminit->reg, ctrl);
+
+ ctrl &= ~(1 << raminit->bits.done);
+ c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
if (enable) {
/* Set start bit and wait for the done bit. */
- ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
- writel(ctrl, priv->raminit_ctrlreg);
- ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait_ti(priv, mask, ctrl);
+ ctrl |= 1 << raminit->bits.start;
+ regmap_write(raminit->syscon, raminit->reg, ctrl);
+
+ /* clear START bit if start pulse is needed */
+ if (raminit->needs_pulse) {
+ ctrl &= ~(1 << raminit->bits.start);
+ regmap_write(raminit->syscon, raminit->reg, ctrl);
+ }
+
+ ctrl |= 1 << raminit->bits.done;
+ c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
}
spin_unlock(&raminit_lock);
}
@@ -159,26 +182,60 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
}
}
+static const struct c_can_driver_data c_can_drvdata = {
+ .id = BOSCH_C_CAN,
+};
+
+static const struct c_can_driver_data d_can_drvdata = {
+ .id = BOSCH_D_CAN,
+};
+
+static const struct raminit_bits dra7_raminit_bits[] = {
+ [0] = { .start = 3, .done = 1, },
+ [1] = { .start = 5, .done = 2, },
+};
+
+static const struct c_can_driver_data dra7_dcan_drvdata = {
+ .id = BOSCH_D_CAN,
+ .raminit_num = ARRAY_SIZE(dra7_raminit_bits),
+ .raminit_bits = dra7_raminit_bits,
+ .raminit_pulse = true,
+};
+
+static const struct raminit_bits am3352_raminit_bits[] = {
+ [0] = { .start = 0, .done = 8, },
+ [1] = { .start = 1, .done = 9, },
+};
+
+static const struct c_can_driver_data am3352_dcan_drvdata = {
+ .id = BOSCH_D_CAN,
+ .raminit_num = ARRAY_SIZE(am3352_raminit_bits),
+ .raminit_bits = am3352_raminit_bits,
+};
+
static struct platform_device_id c_can_id_table[] = {
- [BOSCH_C_CAN_PLATFORM] = {
+ {
.name = KBUILD_MODNAME,
- .driver_data = BOSCH_C_CAN,
+ .driver_data = (kernel_ulong_t)&c_can_drvdata,
},
- [BOSCH_C_CAN] = {
+ {
.name = "c_can",
- .driver_data = BOSCH_C_CAN,
+ .driver_data = (kernel_ulong_t)&c_can_drvdata,
},
- [BOSCH_D_CAN] = {
+ {
.name = "d_can",
- .driver_data = BOSCH_D_CAN,
- }, {
- }
+ .driver_data = (kernel_ulong_t)&d_can_drvdata,
+ },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, c_can_id_table);
static const struct of_device_id c_can_of_table[] = {
- { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
- { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
+ { .compatible = "bosch,c_can", .data = &c_can_drvdata },
+ { .compatible = "bosch,d_can", .data = &d_can_drvdata },
+ { .compatible = "ti,dra7-d_can", .data = &dra7_dcan_drvdata },
+ { .compatible = "ti,am3352-d_can", .data = &am3352_dcan_drvdata },
+ { .compatible = "ti,am4372-d_can", .data = &am3352_dcan_drvdata },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, c_can_of_table);
@@ -190,21 +247,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
struct net_device *dev;
struct c_can_priv *priv;
const struct of_device_id *match;
- const struct platform_device_id *id;
- struct resource *mem, *res;
+ struct resource *mem;
int irq;
struct clk *clk;
-
- if (pdev->dev.of_node) {
- match = of_match_device(c_can_of_table, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Failed to find matching dt id\n");
- ret = -EINVAL;
- goto exit;
- }
- id = match->data;
+ const struct c_can_driver_data *drvdata;
+ struct device_node *np = pdev->dev.of_node;
+
+ match = of_match_device(c_can_of_table, &pdev->dev);
+ if (match) {
+ drvdata = match->data;
+ } else if (pdev->id_entry->driver_data) {
+ drvdata = (struct c_can_driver_data *)
+ platform_get_device_id(pdev)->driver_data;
} else {
- id = platform_get_device_id(pdev);
+ return -ENODEV;
}
/* get the appropriate clk */
@@ -236,7 +292,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
}
priv = netdev_priv(dev);
- switch (id->driver_data) {
+ switch (drvdata->id) {
case BOSCH_C_CAN:
priv->regs = reg_map_c_can;
switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
@@ -263,27 +319,50 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->read_reg32 = d_can_plat_read_reg32;
priv->write_reg32 = d_can_plat_write_reg32;
- if (pdev->dev.of_node)
- priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
- else
- priv->instance = pdev->id;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- /* Not all D_CAN modules have a separate register for the D_CAN
- * RAM initialization. Use default RAM init bit in D_CAN module
- * if not specified in DT.
+ /* Check if we need custom RAMINIT via syscon. Mostly for TI
+ * platforms. Only supported with DT boot.
*/
- if (!res) {
+ if (np && of_property_read_bool(np, "syscon-raminit")) {
+ u32 id;
+ struct c_can_raminit *raminit = &priv->raminit_sys;
+
+ ret = -EINVAL;
+ raminit->syscon = syscon_regmap_lookup_by_phandle(np,
+ "syscon-raminit");
+ if (IS_ERR(raminit->syscon)) {
+ /* can fail with -EPROBE_DEFER */
+ ret = PTR_ERR(raminit->syscon);
+ free_c_can_dev(dev);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "syscon-raminit", 1,
+ &raminit->reg)) {
+ dev_err(&pdev->dev,
+ "couldn't get the RAMINIT reg. offset!\n");
+ goto exit_free_device;
+ }
+
+ if (of_property_read_u32_index(np, "syscon-raminit", 2,
+ &id)) {
+ dev_err(&pdev->dev,
+ "couldn't get the CAN instance ID\n");
+ goto exit_free_device;
+ }
+
+ if (id >= drvdata->raminit_num) {
+ dev_err(&pdev->dev,
+ "Invalid CAN instance ID\n");
+ goto exit_free_device;
+ }
+
+ raminit->bits = drvdata->raminit_bits[id];
+ raminit->needs_pulse = drvdata->raminit_pulse;
+
+ priv->raminit = c_can_hw_raminit_syscon;
+ } else {
priv->raminit = c_can_hw_raminit;
- break;
}
-
- priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!priv->raminit_ctrlreg || priv->instance < 0)
- dev_info(&pdev->dev, "control memory is not used for raminit\n");
- else
- priv->raminit = c_can_hw_raminit_ti;
break;
default:
ret = -EINVAL;
@@ -295,7 +374,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->device = &pdev->dev;
priv->can.clock.freq = clk_get_rate(clk);
priv->priv = clk;
- priv->type = id->driver_data;
+ priv->type = drvdata->id;
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -392,7 +471,6 @@ static int c_can_resume(struct platform_device *pdev)
static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
.of_match_table = c_can_of_table,
},
.probe = c_can_plat_probe,
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d8379278d648..c486fe510f37 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -60,7 +60,7 @@ MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
*
* The message objects 1..14 can be used for TX and RX while the message
* objects 15 is optimized for RX. It has a shadow register for reliable
- * data receiption under heavy bus load. Therefore it makes sense to use
+ * data reception under heavy bus load. Therefore it makes sense to use
* this message object for the needed use case. The frame type (EFF/SFF)
* for the message object 15 can be defined via kernel module parameter
* "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 87a47c0cfd49..e0d15711e9ac 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -318,7 +318,6 @@ static struct platform_driver cc770_isa_driver = {
.remove = cc770_isa_remove,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index ad76734b3ecc..b1e8851d3cc4 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -264,7 +264,6 @@ MODULE_DEVICE_TABLE(of, cc770_platform_table);
static struct platform_driver cc770_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = cc770_platform_table,
},
.probe = cc770_platform_probe,
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 2cfe5012e4e5..3ec8f6f25e5f 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -273,6 +273,84 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
return err;
}
+static void can_update_state_error_stats(struct net_device *dev,
+ enum can_state new_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (new_state <= priv->state)
+ return;
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_WARNING:
+ priv->can_stats.error_warning++;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can_stats.error_passive++;
+ break;
+ case CAN_STATE_BUS_OFF:
+ default:
+ break;
+ };
+}
+
+static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_TX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_TX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_RX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_RX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ enum can_state new_state = max(tx_state, rx_state);
+
+ if (unlikely(new_state == priv->state)) {
+ netdev_warn(dev, "%s: oops, state did not change", __func__);
+ return;
+ }
+
+ netdev_dbg(dev, "New error state: %d\n", new_state);
+
+ can_update_state_error_stats(dev, new_state);
+ priv->state = new_state;
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_BUSOFF;
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= tx_state >= rx_state ?
+ can_tx_state_to_frame(dev, tx_state) : 0;
+ cf->data[1] |= tx_state <= rx_state ?
+ can_rx_state_to_frame(dev, rx_state) : 0;
+}
+EXPORT_SYMBOL_GPL(can_change_state);
+
/*
* Local echo of CAN messages
*
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 60f86bd0434a..b1d583ba9674 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,98 +577,30 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
return 1;
}
-static void do_state(struct net_device *dev,
- struct can_frame *cf, enum can_state new_state)
-{
- struct flexcan_priv *priv = netdev_priv(dev);
- struct can_berr_counter bec;
-
- __flexcan_get_berr_counter(dev, &bec);
-
- switch (priv->can.state) {
- case CAN_STATE_ERROR_ACTIVE:
- /*
- * from: ERROR_ACTIVE
- * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
- * => : there was a warning int
- */
- if (new_state >= CAN_STATE_ERROR_WARNING &&
- new_state <= CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "Error Warning IRQ\n");
- priv->can.can_stats.error_warning++;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (bec.txerr > bec.rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- }
- case CAN_STATE_ERROR_WARNING: /* fallthrough */
- /*
- * from: ERROR_ACTIVE, ERROR_WARNING
- * to : ERROR_PASSIVE, BUS_OFF
- * => : error passive int
- */
- if (new_state >= CAN_STATE_ERROR_PASSIVE &&
- new_state <= CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "Error Passive IRQ\n");
- priv->can.can_stats.error_passive++;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (bec.txerr > bec.rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- break;
- case CAN_STATE_BUS_OFF:
- netdev_err(dev, "BUG! "
- "hardware recovered automatically from BUS_OFF\n");
- break;
- default:
- break;
- }
-
- /* process state changes depending on the new state */
- switch (new_state) {
- case CAN_STATE_ERROR_WARNING:
- netdev_dbg(dev, "Error Warning\n");
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (bec.txerr > bec.rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- break;
- case CAN_STATE_ERROR_ACTIVE:
- netdev_dbg(dev, "Error Active\n");
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_ACTIVE;
- break;
- case CAN_STATE_BUS_OFF:
- cf->can_id |= CAN_ERR_BUSOFF;
- can_bus_off(dev);
- break;
- default:
- break;
- }
-}
-
static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
- enum can_state new_state;
+ enum can_state new_state = 0, rx_state = 0, tx_state = 0;
int flt;
+ struct can_berr_counter bec;
flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
- if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN |
- FLEXCAN_ESR_RX_WRN))))
- new_state = CAN_STATE_ERROR_ACTIVE;
- else
- new_state = CAN_STATE_ERROR_WARNING;
- } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE))
+ tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ?
+ CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+ rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
+ CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+ new_state = max(tx_state, rx_state);
+ } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
+ __flexcan_get_berr_counter(dev, &bec);
new_state = CAN_STATE_ERROR_PASSIVE;
- else
+ rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
+ tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
+ } else {
new_state = CAN_STATE_BUS_OFF;
+ }
/* state hasn't changed */
if (likely(new_state == priv->can.state))
@@ -678,8 +610,11 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
if (unlikely(!skb))
return 0;
- do_state(dev, cf, new_state);
- priv->can.state = new_state;
+ can_change_state(dev, cf, tx_state, rx_state);
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF))
+ can_bus_off(dev);
+
netif_receive_skb(skb);
dev->stats.rx_packets++;
@@ -1367,7 +1302,6 @@ static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
static struct platform_driver flexcan_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &flexcan_pm_ops,
.of_match_table = flexcan_of_match,
},
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 3fd9fd942c6e..fed1bbd0b0d2 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1738,7 +1738,6 @@ MODULE_DEVICE_TABLE(of, grcan_match);
static struct platform_driver grcan_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = grcan_match,
},
.probe = grcan_probe,
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 2382c04dc780..1b118394907f 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1910,7 +1910,6 @@ static int ican3_remove(struct platform_device *pdev)
static struct platform_driver ican3_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ican3_probe,
.remove = ican3_remove,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 44725296f72a..ad024e60ba8c 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -441,7 +441,6 @@ MODULE_DEVICE_TABLE(of, mpc5xxx_can_table);
static struct platform_driver mpc5xxx_can_driver = {
.driver = {
.name = "mpc5xxx_can",
- .owner = THIS_MODULE,
.of_match_table = mpc5xxx_can_table,
},
.probe = mpc5xxx_can_probe,
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index e0c9be5e2ab7..e36b7400d5cc 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -289,18 +289,15 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-/* This function returns the old state to see where we came from */
-static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
+static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
{
struct mscan_priv *priv = netdev_priv(dev);
- enum can_state state, old_state = priv->can.state;
- if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
- state = state_map[max(MSCAN_STATE_RX(canrflg),
- MSCAN_STATE_TX(canrflg))];
- priv->can.state = state;
- }
- return old_state;
+ if (unlikely(canrflg & MSCAN_CSCIF))
+ return state_map[max(MSCAN_STATE_RX(canrflg),
+ MSCAN_STATE_TX(canrflg))];
+
+ return priv->can.state;
}
static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
@@ -349,7 +346,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
- enum can_state old_state;
+ enum can_state new_state;
netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
frame->can_id = CAN_ERR_FLAG;
@@ -363,27 +360,13 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
frame->data[1] = 0;
}
- old_state = check_set_state(dev, canrflg);
- /* State changed */
- if (old_state != priv->can.state) {
- switch (priv->can.state) {
- case CAN_STATE_ERROR_WARNING:
- frame->can_id |= CAN_ERR_CRTL;
- priv->can.can_stats.error_warning++;
- if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
- (canrflg & MSCAN_RSTAT_MSK))
- frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
- (canrflg & MSCAN_TSTAT_MSK))
- frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
- break;
- case CAN_STATE_ERROR_PASSIVE:
- frame->can_id |= CAN_ERR_CRTL;
- priv->can.can_stats.error_passive++;
- frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
- break;
- case CAN_STATE_BUS_OFF:
- frame->can_id |= CAN_ERR_BUSOFF;
+ new_state = get_new_state(dev, canrflg);
+ if (new_state != priv->can.state) {
+ can_change_state(dev, frame,
+ state_map[MSCAN_STATE_TX(canrflg)],
+ state_map[MSCAN_STATE_RX(canrflg)]);
+
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
/*
* The MSCAN on the MPC5200 does recover from bus-off
* automatically. To avoid that we stop the chip doing
@@ -396,9 +379,6 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
MSCAN_SLPRQ | MSCAN_INITRQ);
}
can_bus_off(dev);
- break;
- default:
- break;
}
}
priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 9718248e55f1..91cd48ca0efc 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -907,7 +907,6 @@ MODULE_DEVICE_TABLE(of, rcar_can_of_table);
static struct platform_driver rcar_can_driver = {
.driver = {
.name = RCAR_CAN_DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rcar_can_of_table),
.pm = &rcar_can_pm_ops,
},
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index b27ac6074afb..32bd7f451aa4 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -392,12 +392,20 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
struct can_frame *cf;
struct sk_buff *skb;
enum can_state state = priv->can.state;
+ enum can_state rx_state, tx_state;
+ unsigned int rxerr, txerr;
uint8_t ecc, alc;
skb = alloc_can_err_skb(dev, &cf);
if (skb == NULL)
return -ENOMEM;
+ txerr = priv->read_reg(priv, SJA1000_TXERR);
+ rxerr = priv->read_reg(priv, SJA1000_RXERR);
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -412,13 +420,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
/* error warning interrupt */
netdev_dbg(dev, "error warning interrupt\n");
- if (status & SR_BS) {
+ if (status & SR_BS)
state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- can_bus_off(dev);
- } else if (status & SR_ES) {
+ else if (status & SR_ES)
state = CAN_STATE_ERROR_WARNING;
- } else
+ else
state = CAN_STATE_ERROR_ACTIVE;
}
if (isrc & IRQ_BEI) {
@@ -452,10 +458,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_EPI) {
/* error passive interrupt */
netdev_dbg(dev, "error passive interrupt\n");
- if (status & SR_ES)
- state = CAN_STATE_ERROR_PASSIVE;
+
+ if (state == CAN_STATE_ERROR_PASSIVE)
+ state = CAN_STATE_ERROR_WARNING;
else
- state = CAN_STATE_ERROR_ACTIVE;
+ state = CAN_STATE_ERROR_PASSIVE;
}
if (isrc & IRQ_ALI) {
/* arbitration lost interrupt */
@@ -467,27 +474,15 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[0] = alc & 0x1f;
}
- if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
- state == CAN_STATE_ERROR_PASSIVE)) {
- uint8_t rxerr = priv->read_reg(priv, SJA1000_RXERR);
- uint8_t txerr = priv->read_reg(priv, SJA1000_TXERR);
- cf->can_id |= CAN_ERR_CRTL;
- if (state == CAN_STATE_ERROR_WARNING) {
- priv->can.can_stats.error_warning++;
- cf->data[1] = (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- } else {
- priv->can.can_stats.error_passive++;
- cf->data[1] = (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
+ if (state != priv->can.state) {
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
- priv->can.state = state;
+ can_change_state(dev, cf, tx_state, rx_state);
+
+ if(state == CAN_STATE_BUS_OFF)
+ can_bus_off(dev);
+ }
netif_rx(skb);
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 014695d7e6a3..e97e6d35b300 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -259,7 +259,6 @@ static struct platform_driver sja1000_isa_driver = {
.remove = sja1000_isa_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 95a844a7ee7b..93115250eaf5 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -253,7 +253,6 @@ static struct platform_driver sp_driver = {
.remove = sp_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = sp_of_table,
},
};
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index acb5b92ace92..c837eb91d43e 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -56,9 +56,6 @@
#include <linux/can.h>
#include <linux/can/skb.h>
-static __initconst const char banner[] =
- KERN_INFO "slcan: serial line CAN interface driver\n";
-
MODULE_ALIAS_LDISC(N_SLCAN);
MODULE_DESCRIPTION("serial line CAN interface");
MODULE_LICENSE("GPL");
@@ -702,8 +699,8 @@ static int __init slcan_init(void)
if (maxdev < 4)
maxdev = 4; /* Sanity */
- printk(banner);
- printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
+ pr_info("slcan: serial line CAN interface driver\n");
+ pr_info("slcan: %d dynamic interface channels.\n", maxdev);
slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
if (!slcan_devs)
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index bacd236ce306..2bf98d862302 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -856,7 +856,6 @@ platform_resource_failed:
static struct platform_driver softing_driver = {
.driver = {
.name = "softing",
- .owner = THIS_MODULE,
},
.probe = softing_pdev_probe,
.remove = softing_pdev_remove,
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 258b9c4856ec..9a07eafe554b 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1039,7 +1039,6 @@ static int ti_hecc_resume(struct platform_device *pdev)
static struct platform_driver ti_hecc_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ti_hecc_probe,
.remove = ti_hecc_remove,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 925ab8ec9329..4e1659d07979 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -316,7 +316,7 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
if (err) {
netdev_err(dev->netdev, "getting serial failure: %d\n", err);
} else if (serial_number) {
- u32 tmp32;
+ __le32 tmp32;
memcpy(&tmp32, args, 4);
*serial_number = le32_to_cpu(tmp32);
@@ -347,7 +347,7 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
*/
static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc)
{
- u16 tmp16;
+ __le16 tmp16;
if ((mc->ptr+2) > mc->end)
return -EINVAL;
@@ -371,7 +371,7 @@ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet)
{
/* only 1st packet supplies a word timestamp */
if (first_packet) {
- u16 tmp16;
+ __le16 tmp16;
if ((mc->ptr + 2) > mc->end)
return -EINVAL;
@@ -614,7 +614,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
return -ENOMEM;
if (status_len & PCAN_USB_STATUSLEN_EXT_ID) {
- u32 tmp32;
+ __le32 tmp32;
if ((mc->ptr + 4) > mc->end)
goto decode_failed;
@@ -622,9 +622,9 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
memcpy(&tmp32, mc->ptr, 4);
mc->ptr += 4;
- cf->can_id = le32_to_cpu(tmp32 >> 3) | CAN_EFF_FLAG;
+ cf->can_id = (le32_to_cpu(tmp32) >> 3) | CAN_EFF_FLAG;
} else {
- u16 tmp16;
+ __le16 tmp16;
if ((mc->ptr + 2) > mc->end)
goto decode_failed;
@@ -632,7 +632,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
memcpy(&tmp16, mc->ptr, 2);
mc->ptr += 2;
- cf->can_id = le16_to_cpu(tmp16 >> 5);
+ cf->can_id = le16_to_cpu(tmp16) >> 5;
}
cf->can_dlc = get_can_dlc(rec_len);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 644e6ab8a489..c62f48a1161d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -735,7 +735,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) {
err = -ENOMEM;
- goto lbl_set_intf_data;
+ goto lbl_free_candev;
}
dev->udev = usb_dev;
@@ -775,7 +775,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
err = register_candev(netdev);
if (err) {
dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
- goto lbl_free_cmd_buf;
+ goto lbl_restore_intf_data;
}
if (dev->prev_siblings)
@@ -788,14 +788,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
if (dev->adapter->dev_init) {
err = dev->adapter->dev_init(dev);
if (err)
- goto lbl_free_cmd_buf;
+ goto lbl_unregister_candev;
}
/* set bus off */
if (dev->adapter->dev_set_bus) {
err = dev->adapter->dev_set_bus(dev, 0);
if (err)
- goto lbl_free_cmd_buf;
+ goto lbl_unregister_candev;
}
/* get device number early */
@@ -807,11 +807,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
return 0;
-lbl_free_cmd_buf:
- kfree(dev->cmd_buf);
+lbl_unregister_candev:
+ unregister_candev(netdev);
-lbl_set_intf_data:
+lbl_restore_intf_data:
usb_set_intfdata(intf, dev->prev_siblings);
+ kfree(dev->cmd_buf);
+
+lbl_free_candev:
free_candev(netdev);
return err;
@@ -853,6 +856,7 @@ static int peak_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct);
struct peak_usb_adapter *peak_usb_adapter, **pp;
int i, err = -ENOMEM;
@@ -860,7 +864,7 @@ static int peak_usb_probe(struct usb_interface *intf,
/* get corresponding PCAN-USB adapter */
for (pp = peak_usb_adapters_list; *pp; pp++)
- if ((*pp)->device_id == usb_dev->descriptor.idProduct)
+ if ((*pp)->device_id == usb_id_product)
break;
peak_usb_adapter = *pp;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 263dd921edc4..4cfa3b8605b1 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -78,8 +78,8 @@ struct pcan_usb_pro_msg {
int rec_buffer_size;
int rec_buffer_len;
union {
- u16 *rec_cnt_rd;
- u32 *rec_cnt;
+ __le16 *rec_cnt_rd;
+ __le32 *rec_cnt;
u8 *rec_buffer;
} u;
};
@@ -155,7 +155,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
- *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
pc += 4;
memcpy(pc, va_arg(ap, int *), i);
pc += i;
@@ -165,7 +165,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
case PCAN_USBPRO_GETDEVID:
*pc++ = va_arg(ap, int);
pc += 2;
- *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
pc += 4;
break;
@@ -173,21 +173,21 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
case PCAN_USBPRO_SETBUSACT:
case PCAN_USBPRO_SETSILENT:
*pc++ = va_arg(ap, int);
- *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ *(__le16 *)pc = cpu_to_le16(va_arg(ap, int));
pc += 2;
break;
case PCAN_USBPRO_SETLED:
*pc++ = va_arg(ap, int);
- *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ *(__le16 *)pc = cpu_to_le16(va_arg(ap, int));
pc += 2;
- *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
pc += 4;
break;
case PCAN_USBPRO_SETTS:
pc++;
- *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ *(__le16 *)pc = cpu_to_le16(va_arg(ap, int));
pc += 2;
break;
@@ -200,7 +200,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
len = pc - pm->rec_ptr;
if (len > 0) {
- *pm->u.rec_cnt = cpu_to_le32(*pm->u.rec_cnt+1);
+ *pm->u.rec_cnt = cpu_to_le32(le32_to_cpu(*pm->u.rec_cnt) + 1);
*pm->rec_ptr = id;
pm->rec_ptr = pc;
@@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
if (!(dev->state & PCAN_USB_STATE_CONNECTED))
return 0;
- memset(req_addr, '\0', req_size);
-
req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
switch (req_id) {
@@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
default:
p = usb_rcvctrlpipe(dev->udev, 0);
req_type |= USB_DIR_IN;
+ memset(req_addr, '\0', req_size);
break;
}
@@ -572,7 +571,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
struct pcan_usb_pro_rxstatus *er)
{
- const u32 raw_status = le32_to_cpu(er->status);
+ const u16 raw_status = le16_to_cpu(er->status);
const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f;
struct peak_usb_device *dev = usb_if->dev[ctrl_idx];
struct net_device *netdev = dev->netdev;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 32275af547e0..837cee267132 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -33,27 +33,27 @@
/* PCAN_USBPRO_INFO_BL vendor request record type */
struct __packed pcan_usb_pro_blinfo {
- u32 ctrl_type;
+ __le32 ctrl_type;
u8 version[4];
u8 day;
u8 month;
u8 year;
u8 dummy;
- u32 serial_num_hi;
- u32 serial_num_lo;
- u32 hw_type;
- u32 hw_rev;
+ __le32 serial_num_hi;
+ __le32 serial_num_lo;
+ __le32 hw_type;
+ __le32 hw_rev;
};
/* PCAN_USBPRO_INFO_FW vendor request record type */
struct __packed pcan_usb_pro_fwinfo {
- u32 ctrl_type;
+ __le32 ctrl_type;
u8 version[4];
u8 day;
u8 month;
u8 year;
u8 dummy;
- u32 fw_type;
+ __le32 fw_type;
};
/*
@@ -80,46 +80,46 @@ struct __packed pcan_usb_pro_fwinfo {
struct __packed pcan_usb_pro_btr {
u8 data_type;
u8 channel;
- u16 dummy;
- u32 CCBT;
+ __le16 dummy;
+ __le32 CCBT;
};
struct __packed pcan_usb_pro_busact {
u8 data_type;
u8 channel;
- u16 onoff;
+ __le16 onoff;
};
struct __packed pcan_usb_pro_silent {
u8 data_type;
u8 channel;
- u16 onoff;
+ __le16 onoff;
};
struct __packed pcan_usb_pro_filter {
u8 data_type;
u8 dummy;
- u16 filter_mode;
+ __le16 filter_mode;
};
struct __packed pcan_usb_pro_setts {
u8 data_type;
u8 dummy;
- u16 mode;
+ __le16 mode;
};
struct __packed pcan_usb_pro_devid {
u8 data_type;
u8 channel;
- u16 dummy;
- u32 serial_num;
+ __le16 dummy;
+ __le32 serial_num;
};
struct __packed pcan_usb_pro_setled {
u8 data_type;
u8 channel;
- u16 mode;
- u32 timeout;
+ __le16 mode;
+ __le32 timeout;
};
struct __packed pcan_usb_pro_rxmsg {
@@ -127,8 +127,8 @@ struct __packed pcan_usb_pro_rxmsg {
u8 client;
u8 flags;
u8 len;
- u32 ts32;
- u32 id;
+ __le32 ts32;
+ __le32 id;
u8 data[8];
};
@@ -141,15 +141,15 @@ struct __packed pcan_usb_pro_rxmsg {
struct __packed pcan_usb_pro_rxstatus {
u8 data_type;
u8 channel;
- u16 status;
- u32 ts32;
- u32 err_frm;
+ __le16 status;
+ __le32 ts32;
+ __le32 err_frm;
};
struct __packed pcan_usb_pro_rxts {
u8 data_type;
u8 dummy[3];
- u32 ts64[2];
+ __le32 ts64[2];
};
struct __packed pcan_usb_pro_txmsg {
@@ -157,7 +157,7 @@ struct __packed pcan_usb_pro_txmsg {
u8 client;
u8 flags;
u8 len;
- u32 id;
+ __le32 id;
u8 data[8];
};
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 4e94057ef5cf..674f367087c5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -50,9 +50,6 @@
#include <linux/slab.h>
#include <net/rtnetlink.h>
-static __initconst const char banner[] =
- KERN_INFO "vcan: Virtual CAN interface driver\n";
-
MODULE_DESCRIPTION("virtual CAN interface");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
@@ -173,7 +170,7 @@ static struct rtnl_link_ops vcan_link_ops __read_mostly = {
static __init int vcan_init_module(void)
{
- printk(banner);
+ pr_info("vcan: Virtual CAN interface driver\n");
if (echo)
printk(KERN_INFO "vcan: enabled echo on driver level.\n");
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 8a998e3884ce..6c6764312285 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1196,7 +1196,6 @@ static struct platform_driver xcan_driver = {
.probe = xcan_probe,
.remove = xcan_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
.pm = &xcan_dev_pm_ops,
.of_match_table = xcan_of_match,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 9234d808cbb3..48e62a34f7f2 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,20 +37,29 @@ config NET_DSA_MV88E6123_61_65
ethernet switch chips.
config NET_DSA_MV88E6171
- tristate "Marvell 88E6171 ethernet switch chip support"
+ tristate "Marvell 88E6171/6172 ethernet switch chip support"
select NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6171 ethernet switch
- chip.
+ This enables support for the Marvell 88E6171/6172 ethernet switch
+ chips.
+
+config NET_DSA_MV88E6352
+ tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
+ select NET_DSA
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_TAG_EDSA
+ ---help---
+ This enables support for the Marvell 88E6176 and 88E6352 ethernet
+ switch chips.
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
depends on HAS_IOMEM
select NET_DSA
select NET_DSA_TAG_BRCM
- select FIXED_PHY if NET_DSA_BCM_SF2=y
+ select FIXED_PHY
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
---help---
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 23a90de9830e..e2d51c4b9382 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -7,6 +7,9 @@ endif
ifdef CONFIG_NET_DSA_MV88E6131
mv88e6xxx_drv-y += mv88e6131.o
endif
+ifdef CONFIG_NET_DSA_MV88E6352
+mv88e6xxx_drv-y += mv88e6352.o
+endif
ifdef CONFIG_NET_DSA_MV88E6171
mv88e6xxx_drv-y += mv88e6171.o
endif
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 4f4c2a7888e5..feb29c4526f7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -684,10 +684,9 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
struct fixed_phy_status *status)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
- u32 link, duplex, pause, speed;
+ u32 duplex, pause, speed;
u32 reg;
- link = core_readl(priv, CORE_LNKSTS);
duplex = core_readl(priv, CORE_DUPSTS);
pause = core_readl(priv, CORE_PAUSESTS);
speed = core_readl(priv, CORE_SPDSTS);
@@ -701,22 +700,26 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
* which means that we need to force the link at the port override
* level to get the data to flow. We do use what the interrupt handler
* did determine before.
+ *
+ * For the other ports, we just force the link status, since this is
+ * a fixed PHY device.
*/
if (port == 7) {
status->link = priv->port_sts[port].link;
- reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7));
- reg |= SW_OVERRIDE;
- if (status->link)
- reg |= LINK_STS;
- else
- reg &= ~LINK_STS;
- core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7));
status->duplex = 1;
} else {
- status->link = !!(link & (1 << port));
+ status->link = 1;
status->duplex = !!(duplex & (1 << port));
}
+ reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ reg |= SW_OVERRIDE;
+ if (status->link)
+ reg |= LINK_STS;
+ else
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+
switch (speed) {
case SPDSTS_10:
status->speed = SPEED_10;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 05b0ca3bf71d..c29aebe1e62b 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -69,8 +69,11 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
if (ret >= 0) {
- ret &= 0xfff0;
if (ret == 0x0600)
+ return "Marvell 88E6060 (A0)";
+ if (ret == 0x0601 || ret == 0x0602)
+ return "Marvell 88E6060 (B0)";
+ if ((ret & 0xfff0) == 0x0600)
return "Marvell 88E6060";
}
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index a332c53ff955..e9c736e1cef3 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -299,6 +299,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
mutex_init(&ps->smi_mutex);
mutex_init(&ps->stats_mutex);
+ mutex_init(&ps->phy_mutex);
ret = mv88e6123_61_65_switch_reset(ds);
if (ret < 0)
@@ -329,16 +330,28 @@ static int mv88e6123_61_65_port_to_phy_addr(int port)
static int
mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = mv88e6123_61_65_port_to_phy_addr(port);
- return mv88e6xxx_phy_read(ds, addr, regnum);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = mv88e6xxx_phy_read(ds, addr, regnum);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
}
static int
mv88e6123_61_65_phy_write(struct dsa_switch *ds,
int port, int regnum, u16 val)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = mv88e6123_61_65_port_to_phy_addr(port);
- return mv88e6xxx_phy_write(ds, addr, regnum, val);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
}
static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
@@ -372,6 +385,9 @@ static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
{ "hist_256_511bytes", 4, 0x0b, },
{ "hist_512_1023bytes", 4, 0x0c, },
{ "hist_1024_max_bytes", 4, 0x0d, },
+ { "sw_in_discards", 4, 0x110, },
+ { "sw_in_filtered", 2, 0x112, },
+ { "sw_out_filtered", 2, 0x113, },
};
static void
@@ -406,6 +422,11 @@ struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
.get_strings = mv88e6123_61_65_get_strings,
.get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats,
.get_sset_count = mv88e6123_61_65_get_sset_count,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6xxx_get_temp,
+#endif
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
};
MODULE_ALIAS("platform:mv88e6123");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 244c735014fa..1230f52aa70e 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -21,6 +21,7 @@
#define ID_6085 0x04a0
#define ID_6095 0x0950
#define ID_6131 0x1060
+#define ID_6131_B2 0x1066
static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
{
@@ -32,12 +33,15 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
if (ret >= 0) {
- ret &= 0xfff0;
- if (ret == ID_6085)
+ int ret_masked = ret & 0xfff0;
+
+ if (ret_masked == ID_6085)
return "Marvell 88E6085";
- if (ret == ID_6095)
+ if (ret_masked == ID_6095)
return "Marvell 88E6095/88E6095F";
- if (ret == ID_6131)
+ if (ret == ID_6131_B2)
+ return "Marvell 88E6131 (B2)";
+ if (ret_masked == ID_6131)
return "Marvell 88E6131";
}
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 78d8e876f3aa..aa33d16f2e22 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -1,4 +1,4 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
+/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
*
@@ -29,6 +29,8 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
if (ret >= 0) {
if ((ret & 0xfff0) == 0x1710)
return "Marvell 88E6171";
+ if ((ret & 0xfff0) == 0x1720)
+ return "Marvell 88E6172";
}
return NULL;
@@ -314,6 +316,8 @@ static int mv88e6171_setup(struct dsa_switch *ds)
return ret;
}
+ mutex_init(&ps->phy_mutex);
+
return 0;
}
@@ -327,18 +331,28 @@ static int mv88e6171_port_to_phy_addr(int port)
static int
mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = mv88e6171_port_to_phy_addr(port);
+ int ret;
- return mv88e6xxx_phy_read(ds, addr, regnum);
+ mutex_lock(&ps->phy_mutex);
+ ret = mv88e6xxx_phy_read(ds, addr, regnum);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
}
static int
mv88e6171_phy_write(struct dsa_switch *ds,
int port, int regnum, u16 val)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = mv88e6171_port_to_phy_addr(port);
+ int ret;
- return mv88e6xxx_phy_write(ds, addr, regnum, val);
+ mutex_lock(&ps->phy_mutex);
+ ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
}
static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = {
@@ -406,6 +420,12 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.get_strings = mv88e6171_get_strings,
.get_ethtool_stats = mv88e6171_get_ethtool_stats,
.get_sset_count = mv88e6171_get_sset_count,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6xxx_get_temp,
+#endif
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
};
MODULE_ALIAS("platform:mv88e6171");
+MODULE_ALIAS("platform:mv88e6172");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
new file mode 100644
index 000000000000..258d9ef5ef25
--- /dev/null
+++ b/drivers/net/dsa/mv88e6352.c
@@ -0,0 +1,788 @@
+/*
+ * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * Derived from mv88e6123_61_65.c
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask)
+{
+ unsigned long timeout = jiffies + HZ / 10;
+
+ while (time_before(jiffies, timeout)) {
+ int ret;
+
+ ret = REG_READ(REG_GLOBAL2, reg);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & mask))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+ return -ETIMEDOUT;
+}
+
+static inline int mv88e6352_phy_wait(struct dsa_switch *ds)
+{
+ return mv88e6352_wait(ds, 0x18, 0x8000);
+}
+
+static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds)
+{
+ return mv88e6352_wait(ds, 0x14, 0x0800);
+}
+
+static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds)
+{
+ return mv88e6352_wait(ds, 0x14, 0x8000);
+}
+
+static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum)
+{
+ int ret;
+
+ REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum);
+
+ ret = mv88e6352_phy_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ return REG_READ(REG_GLOBAL2, 0x19);
+}
+
+static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum,
+ u16 val)
+{
+ REG_WRITE(REG_GLOBAL2, 0x19, val);
+ REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum);
+
+ return mv88e6352_phy_wait(ds);
+}
+
+static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
+{
+ struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
+ int ret;
+
+ if (bus == NULL)
+ return NULL;
+
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ if ((ret & 0xfff0) == 0x1760)
+ return "Marvell 88E6176";
+ if (ret == 0x3521)
+ return "Marvell 88E6352 (A0)";
+ if (ret == 0x3522)
+ return "Marvell 88E6352 (A1)";
+ if ((ret & 0xfff0) == 0x3520)
+ return "Marvell 88E6352";
+ }
+
+ return NULL;
+}
+
+static int mv88e6352_switch_reset(struct dsa_switch *ds)
+{
+ unsigned long timeout;
+ int ret;
+ int i;
+
+ /* Set all ports to the disabled state. */
+ for (i = 0; i < 7; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
+
+ /* Reset the switch. Keep PPU active (bit 14, undocumented).
+ * The PPU needs to be active to support indirect phy register
+ * accesses through global registers 0x18 and 0x19.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
+
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0x8800) == 0x8800)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6352_setup_global(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ /* Discard packets with excessive collisions,
+ * mask all interrupt sources, enable PPU (bit 14, undocumented).
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
+
+ /* Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+ /* Configure the priority mapping registers. */
+ ret = mv88e6xxx_config_prio(ds);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the upstream port, and configure the upstream
+ * port as the port to which ingress and egress monitor frames
+ * are to be sent.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+
+ /* Disable remote management for now, and set the switch's
+ * DSA device number.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
+
+ /* Send all frames with destination addresses matching
+ * 01:80:c2:00:00:2x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
+
+ /* Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+ /* Disable the loopback filter, disable flow control
+ * messages, disable flood broadcast override, disable
+ * removing of provider tags, disable ATU age violation
+ * interrupts, disable tag flow control, force flow
+ * control priority to the highest, and send all special
+ * multicast frames to the CPU at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+ /* Program the DSA routing table. */
+ for (i = 0; i < 32; i++) {
+ int nexthop = 0x1f;
+
+ if (i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+ }
+
+ /* Clear all trunk masks. */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f);
+
+ /* Clear all trunk mappings. */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+ /* Disable ingress rate limiting by resetting all ingress
+ * rate limit registers to their initial state.
+ */
+ for (i = 0; i < 7; i++)
+ REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
+
+ /* Initialise cross-chip port VLAN table to reset defaults. */
+ REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
+
+ /* Clear the priority override table. */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
+
+ /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+
+ return 0;
+}
+
+static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
+{
+ int addr = REG_PORT(p);
+ u16 val;
+
+ /* MAC Forcing register: don't force link, speed, duplex
+ * or flow control state to any particular values on physical
+ * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+ * full duplex.
+ */
+ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+ REG_WRITE(addr, 0x01, 0x003e);
+ else
+ REG_WRITE(addr, 0x01, 0x0003);
+
+ /* Do not limit the period of time that this port can be
+ * paused for by the remote end or the period of time that
+ * this port can pause the remote end.
+ */
+ REG_WRITE(addr, 0x02, 0x0000);
+
+ /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ val = 0x0433;
+ if (dsa_is_cpu_port(ds, p)) {
+ if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+ val |= 0x3300;
+ else
+ val |= 0x0100;
+ }
+ if (ds->dsa_port_mask & (1 << p))
+ val |= 0x0100;
+ if (p == dsa_upstream_port(ds))
+ val |= 0x000c;
+ REG_WRITE(addr, 0x04, val);
+
+ /* Port Control 1: disable trunking. Also, if this is the
+ * CPU port, enable learn messages to be sent to this port.
+ */
+ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+ /* Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ val = (p & 0xf) << 12;
+ if (dsa_is_cpu_port(ds, p))
+ val |= ds->phys_port_mask;
+ else
+ val |= 1 << dsa_upstream_port(ds);
+ REG_WRITE(addr, 0x06, val);
+
+ /* Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ REG_WRITE(addr, 0x07, 0x0000);
+
+ /* Port Control 2: don't force a good FCS, set the maximum
+ * frame size to 10240 bytes, don't let the switch add or
+ * strip 802.1q tags, don't discard tagged or untagged frames
+ * on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
+ */
+ REG_WRITE(addr, 0x08, 0x2080);
+
+ /* Egress rate control: disable egress rate control. */
+ REG_WRITE(addr, 0x09, 0x0001);
+
+ /* Egress rate control 2: disable egress rate control. */
+ REG_WRITE(addr, 0x0a, 0x0000);
+
+ /* Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ /* Port ATU control: disable limiting the number of address
+ * database entries that this port is allowed to use.
+ */
+ REG_WRITE(addr, 0x0c, 0x0000);
+
+ /* Priority Override: disable DA, SA and VTU priority override. */
+ REG_WRITE(addr, 0x0d, 0x0000);
+
+ /* Port Ethertype: use the Ethertype DSA Ethertype value. */
+ REG_WRITE(addr, 0x0f, ETH_P_EDSA);
+
+ /* Tag Remap: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x18, 0x3210);
+
+ /* Tag Remap 2: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x19, 0x7654);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e6352_phy_page_read(struct dsa_switch *ds,
+ int port, int page, int reg)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = __mv88e6352_phy_write(ds, port, 0x16, page);
+ if (ret < 0)
+ goto error;
+ ret = __mv88e6352_phy_read(ds, port, reg);
+error:
+ __mv88e6352_phy_write(ds, port, 0x16, 0x0);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+static int mv88e6352_phy_page_write(struct dsa_switch *ds,
+ int port, int page, int reg, int val)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = __mv88e6352_phy_write(ds, port, 0x16, page);
+ if (ret < 0)
+ goto error;
+
+ ret = __mv88e6352_phy_write(ds, port, reg, val);
+error:
+ __mv88e6352_phy_write(ds, port, 0x16, 0x0);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ ret = mv88e6352_phy_page_read(ds, 0, 6, 27);
+ if (ret < 0)
+ return ret;
+
+ *temp = (ret & 0xff) - 25;
+
+ return 0;
+}
+
+static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+ return 0;
+}
+
+static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+ int ret;
+
+ ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ if (ret < 0)
+ return ret;
+ temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+ return mv88e6352_phy_page_write(ds, 0, 6, 26,
+ (ret & 0xe0ff) | (temp << 8));
+}
+
+static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+ int ret;
+
+ *alarm = false;
+
+ ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *alarm = !!(ret & 0x40);
+
+ return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
+static int mv88e6352_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+ int i;
+
+ mutex_init(&ps->smi_mutex);
+ mutex_init(&ps->stats_mutex);
+ mutex_init(&ps->phy_mutex);
+ mutex_init(&ps->eeprom_mutex);
+
+ ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
+ ret = mv88e6352_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise vtu and atu */
+
+ ret = mv88e6352_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 7; i++) {
+ ret = mv88e6352_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6352_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 4)
+ return port;
+ return -EINVAL;
+}
+
+static int
+mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int addr = mv88e6352_port_to_phy_addr(port);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = __mv88e6352_phy_read(ds, addr, regnum);
+ mutex_unlock(&ps->phy_mutex);
+
+ return ret;
+}
+
+static int
+mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int addr = mv88e6352_port_to_phy_addr(port);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = __mv88e6352_phy_write(ds, addr, regnum, val);
+ mutex_unlock(&ps->phy_mutex);
+
+ return ret;
+}
+
+static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+ { "sw_in_discards", 4, 0x110, },
+ { "sw_in_filtered", 2, 0x112, },
+ { "sw_out_filtered", 2, 0x113, },
+};
+
+static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
+ 0xc000 | (addr & 0xff));
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6352_eeprom_busy_wait(ds);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15);
+error:
+ mutex_unlock(&ps->eeprom_mutex);
+ return ret;
+}
+
+static int mv88e6352_get_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int offset;
+ int len;
+ int ret;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ eeprom->magic = 0xc3ec4951;
+
+ ret = mv88e6352_eeprom_load_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (offset & 1) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = (word >> 8) & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = word & 0xff;
+ *data++ = (word >> 8) & 0xff;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = word & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
+{
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & 0x0400))
+ return -EROFS;
+
+ return 0;
+}
+
+static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
+ u16 data)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
+ 0xb000 | (addr & 0xff));
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6352_eeprom_busy_wait(ds);
+error:
+ mutex_unlock(&ps->eeprom_mutex);
+ return ret;
+}
+
+static int mv88e6352_set_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int offset;
+ int ret;
+ int len;
+
+ if (eeprom->magic != 0xc3ec4951)
+ return -EINVAL;
+
+ ret = mv88e6352_eeprom_is_readonly(ds);
+ if (ret)
+ return ret;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ ret = mv88e6352_eeprom_load_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (offset & 1) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ word = (*data++ << 8) | (word & 0xff);
+
+ ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ int word;
+
+ word = *data++;
+ word |= *data++ << 8;
+
+ ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ word = (word & 0xff00) | *data++;
+
+ ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static void
+mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats),
+ mv88e6352_hw_stats, port, data);
+}
+
+static void
+mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats),
+ mv88e6352_hw_stats, port, data);
+}
+
+static int mv88e6352_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mv88e6352_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6352_switch_driver = {
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .priv_size = sizeof(struct mv88e6xxx_priv_state),
+ .probe = mv88e6352_probe,
+ .setup = mv88e6352_setup,
+ .set_addr = mv88e6xxx_set_addr_indirect,
+ .phy_read = mv88e6352_phy_read,
+ .phy_write = mv88e6352_phy_write,
+ .poll_link = mv88e6xxx_poll_link,
+ .get_strings = mv88e6352_get_strings,
+ .get_ethtool_stats = mv88e6352_get_ethtool_stats,
+ .get_sset_count = mv88e6352_get_sset_count,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6352_get_temp,
+ .get_temp_limit = mv88e6352_get_temp_limit,
+ .set_temp_limit = mv88e6352_set_temp_limit,
+ .get_temp_alarm = mv88e6352_get_temp_alarm,
+#endif
+ .get_eeprom = mv88e6352_get_eeprom,
+ .set_eeprom = mv88e6352_set_eeprom,
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
+};
+
+MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index a6c90cf5634d..cd6807c6b4ed 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -485,20 +485,108 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
for (i = 0; i < nr_stats; i++) {
struct mv88e6xxx_hw_stat *s = stats + i;
u32 low;
- u32 high;
-
+ u32 high = 0;
+
+ if (s->reg >= 0x100) {
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100);
+ if (ret < 0)
+ goto error;
+ low = ret;
+ if (s->sizeof_stat == 4) {
+ ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100 + 1);
+ if (ret < 0)
+ goto error;
+ high = ret;
+ }
+ data[i] = (((u64)high) << 16) | low;
+ continue;
+ }
mv88e6xxx_stats_read(ds, s->reg, &low);
if (s->sizeof_stat == 8)
mv88e6xxx_stats_read(ds, s->reg + 1, &high);
- else
- high = 0;
data[i] = (((u64)high) << 32) | low;
}
-
+error:
mutex_unlock(&ps->stats_mutex);
}
+int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+{
+ return 32 * sizeof(u16);
+}
+
+void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *_p)
+{
+ u16 *p = _p;
+ int i;
+
+ regs->version = 0;
+
+ memset(p, 0xff, 32 * sizeof(u16));
+
+ for (i = 0; i < 32; i++) {
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
+ if (ret >= 0)
+ p[i] = ret;
+ }
+}
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+ int val;
+
+ *temp = 0;
+
+ mutex_lock(&ps->phy_mutex);
+
+ ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+ if (ret < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ val = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ if (val < 0) {
+ ret = val;
+ goto error;
+ }
+
+ /* Disable temperature sensor */
+ ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+ if (ret < 0)
+ goto error;
+
+ *temp = ((val & 0x1f) - 5) * 5;
+
+error:
+ mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
static int __init mv88e6xxx_init(void)
{
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
@@ -507,6 +595,9 @@ static int __init mv88e6xxx_init(void)
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
register_switch_driver(&mv88e6123_61_65_switch_driver);
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
+ register_switch_driver(&mv88e6352_switch_driver);
+#endif
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
register_switch_driver(&mv88e6171_switch_driver);
#endif
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 5e5145ad9525..03e397efde36 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -37,6 +37,17 @@ struct mv88e6xxx_priv_state {
*/
struct mutex stats_mutex;
+ /* This mutex serializes phy access for chips with
+ * indirect phy addressing. It is unused for chips
+ * with direct phy access.
+ */
+ struct mutex phy_mutex;
+
+ /* This mutex serializes eeprom access for chips with
+ * eeprom support.
+ */
+ struct mutex eeprom_mutex;
+
int id; /* switch product id */
};
@@ -67,9 +78,14 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds,
void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int nr_stats, struct mv88e6xxx_hw_stat *stats,
int port, uint64_t *data);
+int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
+void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *_p);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
extern struct dsa_switch_driver mv88e6131_switch_driver;
extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+extern struct dsa_switch_driver mv88e6352_switch_driver;
extern struct dsa_switch_driver mv88e6171_switch_driver;
#define REG_READ(addr, reg) \
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index ff435fbd1ad0..49adbf1b7574 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -38,6 +38,9 @@
#include <net/rtnetlink.h>
#include <linux/u64_stats_sync.h>
+#define DRV_NAME "dummy"
+#define DRV_VERSION "1.0"
+
static int numdummies = 1;
/* fake multicast ability */
@@ -120,12 +123,24 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_change_carrier = dummy_change_carrier,
};
+static void dummy_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static const struct ethtool_ops dummy_ethtool_ops = {
+ .get_drvinfo = dummy_get_drvinfo,
+};
+
static void dummy_setup(struct net_device *dev)
{
ether_setup(dev);
/* Initialize the device structure. */
dev->netdev_ops = &dummy_netdev_ops;
+ dev->ethtool_ops = &dummy_ethtool_ops;
dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
@@ -150,7 +165,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
}
static struct rtnl_link_ops dummy_link_ops __read_mostly = {
- .kind = "dummy",
+ .kind = DRV_NAME,
.setup = dummy_setup,
.validate = dummy_validate,
};
@@ -209,4 +224,5 @@ static void __exit dummy_cleanup_module(void)
module_init(dummy_init_module);
module_exit(dummy_cleanup_module);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_RTNL_LINK("dummy");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 48775b88bac7..dede43f4ce09 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1285,7 +1285,7 @@ typhoon_request_firmware(struct typhoon *tp)
return err;
}
- image_data = (u8 *) typhoon_fw->data;
+ image_data = typhoon_fw->data;
remaining = typhoon_fw->size;
if (remaining < sizeof(struct typhoon_file_header))
goto invalid_fw;
@@ -1343,7 +1343,7 @@ typhoon_download_firmware(struct typhoon *tp)
int i;
int err;
- image_data = (u8 *) typhoon_fw->data;
+ image_data = typhoon_fw->data;
fHdr = (struct typhoon_file_header *) image_data;
/* Cannot just map the firmware image using pci_map_single() as
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 1d162ccb4733..0443654f0339 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -1000,7 +1000,6 @@ static int ax_resume(struct platform_device *pdev)
static struct platform_driver axdrv = {
.driver = {
.name = "ax88796",
- .owner = THIS_MODULE,
},
.probe = ax_probe,
.remove = ax_remove,
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 38fcdcf7c4c7..e1c055574a11 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -467,7 +467,6 @@ static int mcf8390_remove(struct platform_device *pdev)
static struct platform_driver mcf8390_drv = {
.driver = {
.name = "mcf8390",
- .owner = THIS_MODULE,
},
.probe = mcf8390_probe,
.remove = mcf8390_remove,
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index de566fb6e0f7..c063b410a163 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -920,7 +920,6 @@ static struct platform_driver ne_driver = {
.resume = ne_drv_resume,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 1ed1fbba5d58..df76050d0a9d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -155,6 +155,7 @@ source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
+source "drivers/net/ethernet/rocker/Kconfig"
config S6GMAC
tristate "S6105 GMAC ethernet support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 6e0b629e9859..bf56f8b36e90 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
+obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 8ed4d3408ef6..ec20611e9de2 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1901,7 +1901,6 @@ static struct platform_driver bfin_mii_bus_driver = {
.remove = bfin_mii_bus_remove,
.driver = {
.name = "bfin_mii_bus",
- .owner = THIS_MODULE,
},
};
@@ -1912,7 +1911,6 @@ static struct platform_driver bfin_mac_driver = {
.suspend = bfin_mac_suspend,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 3005155e412b..2b8bfeeee9cf 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1603,7 +1603,6 @@ MODULE_DEVICE_TABLE(of, greth_of_match);
static struct platform_driver greth_of_driver = {
.driver = {
.name = "grlib-greth",
- .owner = THIS_MODULE,
.of_match_table = greth_of_match,
},
.probe = greth_of_probe,
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 4efc4355d345..3498760dc22a 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1587,7 +1587,6 @@ static struct platform_driver altera_tse_driver = {
.resume = NULL,
.driver = {
.name = ALTERA_TSE_RESOURCE_NAME,
- .owner = THIS_MODULE,
.of_match_table = altera_tse_ids,
},
};
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 8319c99331b0..7a5e4aa5415e 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on OF_NET
+ depends on OF_NET && HAS_IOMEM
select PHYLIB
select AMD_XGBE_PHY
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 6c323f4f457b..cb367cc59e0b 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1461,7 +1461,6 @@ static struct platform_driver au1000_eth_driver = {
.remove = au1000_remove,
.driver = {
.name = "au1000-eth",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 5e4273b7aa27..7847638bdd22 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1524,7 +1524,6 @@ MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
static struct platform_driver sunlance_sbus_driver = {
.driver = {
.name = "sunlance",
- .owner = THIS_MODULE,
.of_match_table = sunlance_sbus_match,
},
.probe = sunlance_sbus_probe,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index caade30820d5..75b08c63d39f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -125,9 +125,6 @@
#define DMA_AXIAWCR 0x3018
#define DMA_DSR0 0x3020
#define DMA_DSR1 0x3024
-#define DMA_DSR2 0x3028
-#define DMA_DSR3 0x302c
-#define DMA_DSR4 0x3030
/* DMA register entry bit positions and sizes */
#define DMA_AXIARCR_DRC_INDEX 0
@@ -158,10 +155,6 @@
#define DMA_AXIAWCR_TDC_WIDTH 4
#define DMA_AXIAWCR_TDD_INDEX 28
#define DMA_AXIAWCR_TDD_WIDTH 2
-#define DMA_DSR0_RPS_INDEX 8
-#define DMA_DSR0_RPS_WIDTH 4
-#define DMA_DSR0_TPS_INDEX 12
-#define DMA_DSR0_TPS_WIDTH 4
#define DMA_ISR_MACIS_INDEX 17
#define DMA_ISR_MACIS_WIDTH 1
#define DMA_ISR_MTLIS_INDEX 16
@@ -175,6 +168,20 @@
#define DMA_SBMR_UNDEF_INDEX 0
#define DMA_SBMR_UNDEF_WIDTH 1
+/* DMA register values */
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
/* DMA channel register offsets
* Multiple channels can be active. The first channel has registers
* that begin at 0x3100. Each subsequent channel has registers that
@@ -207,6 +214,8 @@
/* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
#define DMA_CH_IER_AIE_INDEX 15
#define DMA_CH_IER_AIE_WIDTH 1
#define DMA_CH_IER_FBEE_INDEX 12
@@ -306,6 +315,9 @@
#define MAC_MACA0LR 0x0304
#define MAC_MACA1HR 0x0308
#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
#define MAC_TSCR 0x0d00
#define MAC_SSIR 0x0d04
#define MAC_STSR 0x0d08
@@ -429,6 +441,8 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
#define MAC_RCR_IPC_WIDTH 1
#define MAC_RCR_JE_INDEX 8
@@ -445,6 +459,24 @@
#define MAC_RFCR_UP_WIDTH 1
#define MAC_RQC0R_RXQ0EN_INDEX 0
#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
#define MAC_SSIR_SNSINC_INDEX 8
#define MAC_SSIR_SNSINC_WIDTH 8
#define MAC_SSIR_SSINC_INDEX 16
@@ -844,9 +876,13 @@
#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
#define RX_NORMAL_DESC3_CDA_INDEX 27
#define RX_NORMAL_DESC3_CDA_WIDTH 1
#define RX_NORMAL_DESC3_CTXT_INDEX 30
@@ -855,14 +891,27 @@
#define RX_NORMAL_DESC3_ES_WIDTH 1
#define RX_NORMAL_DESC3_ETLT_INDEX 16
#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
#define RX_NORMAL_DESC3_INTE_INDEX 30
#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
#define RX_NORMAL_DESC3_LD_INDEX 28
#define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_NORMAL_DESC3_OWN_INDEX 31
#define RX_NORMAL_DESC3_OWN_WIDTH 1
#define RX_NORMAL_DESC3_PL_INDEX 0
#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
#define RX_CONTEXT_DESC3_TSA_INDEX 4
#define RX_CONTEXT_DESC3_TSA_WIDTH 1
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 6fc5da01437d..a50891f52197 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -117,7 +117,7 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
+static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
static void xgbe_free_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring)
@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
}
kfree(ring->rdata);
ring->rdata = NULL;
}
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
if (ring->rdesc) {
dma_free_coherent(pdata->dev,
(sizeof(struct xgbe_ring_desc) *
@@ -233,6 +255,96 @@ err_ring:
return ret;
}
+static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct xgbe_page_alloc *pa, gfp_t gfp, int order)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ int ret;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(pdata->dev, pages_dma);
+ if (ret) {
+ put_page(pages);
+ return ret;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
+ struct xgbe_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring,
+ struct xgbe_ring_data *rdata)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+ order);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -266,7 +378,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
ring->cur = 0;
ring->dirty = 0;
- ring->tx.queue_stopped = 0;
+ memset(&ring->tx, 0, sizeof(ring->tx));
hw_if->tx_desc_init(channel);
}
@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata;
- dma_addr_t rdesc_dma, skb_dma;
- struct sk_buff *skb = NULL;
+ dma_addr_t rdesc_dma;
unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma;
- /* Allocate skb & assign to each rdesc */
- skb = dev_alloc_skb(pdata->rx_buf_size);
- if (skb == NULL)
- break;
- skb_dma = dma_map_single(pdata->dev, skb->data,
- pdata->rx_buf_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev,
- "failed to do the dma map\n");
- dev_kfree_skb_any(skb);
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
- }
- rdata->skb = skb;
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = pdata->rx_buf_size;
rdesc++;
rdesc_dma += sizeof(struct xgbe_ring_desc);
@@ -325,8 +422,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
ring->cur = 0;
ring->dirty = 0;
- ring->rx.realloc_index = 0;
- ring->rx.realloc_threshold = 0;
+ memset(&ring->rx, 0, sizeof(ring->rx));
hw_if->rx_desc_init(channel);
}
@@ -334,8 +430,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
}
-static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
- struct xgbe_ring_data *rdata)
+static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
{
if (rdata->skb_dma) {
if (rdata->mapped_as_page) {
@@ -354,9 +450,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
rdata->skb = NULL;
}
- rdata->tso_header = 0;
- rdata->len = 0;
- rdata->interrupt = 0;
+ if (rdata->rx.hdr.pa.pages)
+ put_page(rdata->rx.hdr.pa.pages);
+
+ if (rdata->rx.hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
+ rdata->rx.hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.hdr.pa_unmap.pages);
+ }
+
+ if (rdata->rx.buf.pa.pages)
+ put_page(rdata->rx.buf.pa.pages);
+
+ if (rdata->rx.buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
+ rdata->rx.buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.buf.pa_unmap.pages);
+ }
+
+ memset(&rdata->tx, 0, sizeof(rdata->tx));
+ memset(&rdata->rx, 0, sizeof(rdata->rx));
+
rdata->mapped_as_page = 0;
if (rdata->state_saved) {
@@ -414,7 +530,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = packet->header_len;
- rdata->tso_header = 1;
offset = packet->header_len;
@@ -481,7 +596,11 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
}
- /* Save the skb address in the last entry */
+ /* Save the skb address in the last entry. We always have some data
+ * that has been mapped so rdata is always advanced past the last
+ * piece of mapped data - use the entry pointed to by cur_index - 1.
+ */
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
rdata->skb = skb;
/* Save the number of descriptor entries used */
@@ -494,7 +613,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
err_out:
while (start_index < cur_index) {
rdata = XGBE_GET_DESC_DATA(ring, start_index++);
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
}
DBGPR("<--xgbe_map_tx_skb: count=0\n");
@@ -502,40 +621,25 @@ err_out:
return 0;
}
-static void xgbe_realloc_skb(struct xgbe_channel *channel)
+static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- struct sk_buff *skb = NULL;
- dma_addr_t skb_dma;
int i;
- DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
+ DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
ring->rx.realloc_index);
for (i = 0; i < ring->dirty; i++) {
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
/* Reset rdata values */
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
- /* Allocate skb & assign to each rdesc */
- skb = dev_alloc_skb(pdata->rx_buf_size);
- if (skb == NULL)
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
- skb_dma = dma_map_single(pdata->dev, skb->data,
- pdata->rx_buf_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev,
- "failed to do the dma map\n");
- dev_kfree_skb_any(skb);
- break;
- }
- rdata->skb = skb;
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = pdata->rx_buf_size;
hw_if->rx_desc_reset(rdata);
@@ -543,7 +647,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
}
ring->dirty = 0;
- DBGPR("<--xgbe_realloc_skb\n");
+ DBGPR("<--xgbe_realloc_rx_buffer\n");
}
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
@@ -553,8 +657,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
desc_if->free_ring_resources = xgbe_free_ring_resources;
desc_if->map_tx_skb = xgbe_map_tx_skb;
- desc_if->realloc_skb = xgbe_realloc_skb;
- desc_if->unmap_skb = xgbe_unmap_skb;
+ desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
+ desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 9da3a03e8c07..53f5f66ec2ee 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
}
}
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
+static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xgbe_write_rss_reg(pdata,
+ XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xgbe_write_rss_hash_key(pdata);
+}
+
+static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+ return xgbe_write_rss_lookup_table(pdata);
+}
+
+static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+
+ return 0;
+}
+
+static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xgbe_enable_rss(pdata);
+ else
+ ret = xgbe_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
@@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
if (channel->tx_ring) {
/* Enable the following Tx interrupts
- * TIE - Transmit Interrupt Enable (unless polling)
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
*/
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
}
if (channel->rx_ring) {
/* Enable following Rx interrupts
* RBUE - Receive Buffer Unavailable Enable
- * RIE - Receive Interrupt Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
*/
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
}
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
@@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc1 = 0;
rdesc->desc2 = 0;
rdesc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
{
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
int i;
int start_index = ring->cur;
@@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Initialze all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- rdesc = rdata->rdesc;
- /* Initialize Tx descriptor
- * Set buffer 1 (lo) address to zero
- * Set buffer 1 (hi) address to zero
- * Reset all other control bits (IC, TTSE, B2L & B1L)
- * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
- * etc)
- */
- rdesc->desc0 = 0;
- rdesc->desc1 = 0;
- rdesc->desc2 = 0;
- rdesc->desc3 = 0;
+ /* Initialize Tx descriptor */
+ xgbe_tx_desc_reset(rdata);
}
- /* Make sure everything is written to the descriptor(s) before
- * telling the device about them
- */
- wmb();
-
/* Update the total number of Tx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
@@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
struct xgbe_ring_desc *rdesc = rdata->rdesc;
/* Reset the Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control bits
- * OWN and INTE
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
- rdesc->desc2 = 0;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
- rdesc->desc3 = 0;
- if (rdata->interrupt)
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ rdata->interrupt ? 1 : 0);
/* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
@@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
unsigned int start_index = ring->cur;
unsigned int rx_coalesce, rx_frames;
unsigned int i;
@@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
/* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- rdesc = rdata->rdesc;
- /* Initialize Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control
- * bits OWN and INTE appropriateley
- */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
- rdesc->desc2 = 0;
- rdesc->desc3 = 0;
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
- rdata->interrupt = 1;
- if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
- /* Clear interrupt on completion bit */
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
- 0);
+ /* Set interrupt on completion bit as appropriate */
+ if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
rdata->interrupt = 0;
- }
- }
+ else
+ rdata->interrupt = 1;
- /* Make sure everything is written to the descriptors before
- * telling the device about them
- */
- wmb();
+ /* Initialize Rx descriptor */
+ xgbe_rx_desc_reset(rdata);
+ }
/* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
@@ -1198,7 +1325,30 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
xgbe_config_flow_control(pdata);
}
-static void xgbe_pre_xmit(struct xgbe_channel *channel)
+static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring_data *rdata;
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx coalescing timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ hrtimer_start(&channel->tx_timer,
+ ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xgbe_dev_xmit(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring;
@@ -1207,11 +1357,11 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
struct xgbe_packet_data *packet = &ring->packet_data;
unsigned int csum, tso, vlan;
unsigned int tso_context, vlan_context;
- unsigned int tx_coalesce, tx_frames;
+ unsigned int tx_set_ic;
int start_index = ring->cur;
int i;
- DBGPR("-->xgbe_pre_xmit\n");
+ DBGPR("-->xgbe_dev_xmit\n");
csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
CSUM_ENABLE);
@@ -1230,10 +1380,26 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
else
vlan_context = 0;
- tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
- tx_frames = pdata->tx_frames;
- if (tx_coalesce && !channel->tx_timer_active)
- ring->coalesce_count = 0;
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += packet->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (packet->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) <
+ packet->tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
@@ -1300,13 +1466,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
- /* Set IC bit based on Tx coalescing settings */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
- if (tx_coalesce && (!tx_frames ||
- (++ring->coalesce_count % tx_frames)))
- /* Clear IC bit */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
-
/* Mark it as First Descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
@@ -1351,13 +1510,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
rdata->skb_dma_len);
- /* Set IC bit based on Tx coalescing settings */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
- if (tx_coalesce && (!tx_frames ||
- (++ring->coalesce_count % tx_frames)))
- /* Clear IC bit */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
-
/* Set OWN bit */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
@@ -1373,6 +1525,14 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
/* Set LAST bit for the last descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ rdata->tx.packets = packet->tx_packets;
+ rdata->tx.bytes = packet->tx_bytes;
+
/* In case the Tx DMA engine is running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
* for the first descriptor
@@ -1391,26 +1551,19 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
/* Make sure ownership is written to the descriptor */
wmb();
- /* Issue a poll command to Tx DMA by writing address
- * of next immediate free descriptor */
ring->cur++;
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
-
- /* Start the Tx coalescing timer */
- if (tx_coalesce && !channel->tx_timer_active) {
- channel->tx_timer_active = 1;
- hrtimer_start(&channel->tx_timer,
- ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
- HRTIMER_MODE_REL);
- }
+ if (!packet->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xgbe_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
DBGPR(" %s: descriptors %u to %u written\n",
channel->name, start_index & (ring->rdesc_count - 1),
(ring->cur - 1) & (ring->rdesc_count - 1));
- DBGPR("<--xgbe_pre_xmit\n");
+ DBGPR("<--xgbe_dev_xmit\n");
}
static int xgbe_dev_read(struct xgbe_channel *channel)
@@ -1420,7 +1573,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
struct net_device *netdev = channel->pdata->netdev;
- unsigned int err, etlt;
+ unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1431,6 +1584,9 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
return 1;
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ rmb();
+
#ifdef XGMAC_ENABLE_RX_DESC_DUMP
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
#endif
@@ -1454,8 +1610,33 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT, 1);
+ /* Get the header length */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+ RX_NORMAL_DESC2, HL);
+
+ /* Get the RSS hash */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RSS_HASH, 1);
+
+ packet->rss_hash = le32_to_cpu(rdesc->desc1);
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ packet->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ packet->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
/* Get the packet length */
- rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
/* Not all the data has been transferred for this packet */
@@ -1478,7 +1659,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
DBGPR(" err=%u, etlt=%#x\n", err, etlt);
- if (!err || (err && !etlt)) {
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
if ((etlt == 0x09) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
@@ -2298,6 +2480,47 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
}
+static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* Calculate the status register to read and the position within */
+ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
+ DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = XGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ channel->queue_index);
+}
+
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
@@ -2326,6 +2549,15 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -2417,6 +2649,15 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -2485,6 +2726,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -2561,7 +2804,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->powerup_rx = xgbe_powerup_rx;
hw_if->powerdown_rx = xgbe_powerdown_rx;
- hw_if->pre_xmit = xgbe_pre_xmit;
+ hw_if->dev_xmit = xgbe_dev_xmit;
hw_if->dev_read = xgbe_dev_read;
hw_if->enable_int = xgbe_enable_int;
hw_if->disable_int = xgbe_disable_int;
@@ -2575,6 +2818,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_desc_reset = xgbe_rx_desc_reset;
hw_if->is_last_desc = xgbe_is_last_desc;
hw_if->is_context_desc = xgbe_is_context_desc;
+ hw_if->tx_start_xmit = xgbe_tx_start_xmit;
/* For FLOW ctrl */
hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
@@ -2620,5 +2864,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+ /* For Receive Side Scaling */
+ hw_if->enable_rss = xgbe_enable_rss;
+ hw_if->disable_rss = xgbe_disable_rss;
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2349ea970255..7bb5f07dbeef 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -114,6 +114,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
@@ -126,14 +127,126 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_poll(struct napi_struct *, int);
+static int xgbe_one_poll(struct napi_struct *, int);
+static int xgbe_all_poll(struct napi_struct *, int);
static void xgbe_set_rx_mode(struct net_device *);
+static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel_mem, *channel;
+ struct xgbe_ring *tx_ring, *rx_ring;
+ unsigned int count, i;
+ int ret = -ENOMEM;
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
+ if (!channel_mem)
+ goto err_channel;
+
+ tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!tx_ring)
+ goto err_tx_ring;
+
+ rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!rx_ring)
+ goto err_rx_ring;
+
+ for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (pdata->per_channel_irq) {
+ /* Get the DMA interrupt (offset 1) */
+ ret = platform_get_irq(pdata->pdev, i + 1);
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "platform_get_irq %u failed\n",
+ i + 1);
+ goto err_irq;
+ }
+
+ channel->dma_irq = ret;
+ }
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->tx_ring = tx_ring++;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ spin_lock_init(&rx_ring->lock);
+ channel->rx_ring = rx_ring++;
+ }
+
+ DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->queue_index, channel->dma_regs,
+ channel->dma_irq, channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel = channel_mem;
+ pdata->channel_count = count;
+
+ return 0;
+
+err_irq:
+ kfree(rx_ring);
+
+err_rx_ring:
+ kfree(tx_ring);
+
+err_tx_ring:
+ kfree(channel_mem);
+
+err_channel:
+ return ret;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->channel)
+ return;
+
+ kfree(pdata->channel->rx_ring);
+ kfree(pdata->channel->tx_ring);
+ kfree(pdata->channel);
+
+ pdata->channel = NULL;
+ pdata->channel_count = 0;
+}
+
static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
{
return (ring->rdesc_count - (ring->cur - ring->dirty));
}
+static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
+ struct xgbe_ring *ring, unsigned int count)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+
+ if (count > xgbe_tx_avail_desc(ring)) {
+ DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(pdata->netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+
+ /* If we haven't notified the hardware because of xmit_more
+ * support, tell it now
+ */
+ if (ring->tx.xmit_more)
+ pdata->hw_if.tx_start_xmit(channel, ring);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return 0;
+}
+
static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
{
unsigned int rx_buf_size;
@@ -144,8 +257,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
}
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
- rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
+ rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
~(XGBE_RX_BUF_ALIGN - 1);
@@ -213,11 +326,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!dma_isr)
goto isr_done;
- DBGPR("-->xgbe_isr\n");
-
DBGPR(" DMA_ISR = %08x\n", dma_isr);
- DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
- DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i)))
@@ -228,6 +337,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+ /* If we get a TI or RI interrupt that means per channel DMA
+ * interrupts are not enabled, so we use the private data napi
+ * structure, not the per channel napi structure
+ */
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
if (napi_schedule_prep(&pdata->napi)) {
@@ -270,12 +383,28 @@ static irqreturn_t xgbe_isr(int irq, void *data)
DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
- DBGPR("<--xgbe_isr\n");
-
isr_done:
return IRQ_HANDLED;
}
+static irqreturn_t xgbe_dma_isr(int irq, void *data)
+{
+ struct xgbe_channel *channel = data;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule(&channel->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
{
struct xgbe_channel *channel = container_of(timer,
@@ -283,18 +412,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
tx_timer);
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_prv_data *pdata = channel->pdata;
+ struct napi_struct *napi;
unsigned long flags;
DBGPR("-->xgbe_tx_timer\n");
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
spin_lock_irqsave(&ring->lock, flags);
- if (napi_schedule_prep(&pdata->napi)) {
+ if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */
- xgbe_disable_rx_tx_ints(pdata);
+ if (pdata->per_channel_irq)
+ disable_irq(channel->dma_irq);
+ else
+ xgbe_disable_rx_tx_ints(pdata);
/* Turn on polling */
- __napi_schedule(&pdata->napi);
+ __napi_schedule(napi);
}
channel->tx_timer_active = 0;
@@ -430,18 +565,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
{
- if (add)
- netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
- NAPI_POLL_WEIGHT);
- napi_enable(&pdata->napi);
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xgbe_one_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&channel->napi);
+ }
+ } else {
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi,
+ xgbe_all_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&pdata->napi);
+ }
}
static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
{
- napi_disable(&pdata->napi);
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ napi_disable(&channel->napi);
- if (del)
- netif_napi_del(&pdata->napi);
+ if (del)
+ netif_napi_del(&channel->napi);
+ }
+ } else {
+ napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
+ }
}
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -472,7 +635,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_init_rx_coalesce\n");
}
-static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
+static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
@@ -480,7 +643,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
struct xgbe_ring_data *rdata;
unsigned int i, j;
- DBGPR("-->xgbe_free_tx_skbuff\n");
+ DBGPR("-->xgbe_free_tx_data\n");
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -490,14 +653,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
- desc_if->unmap_skb(pdata, rdata);
+ desc_if->unmap_rdata(pdata, rdata);
}
}
- DBGPR("<--xgbe_free_tx_skbuff\n");
+ DBGPR("<--xgbe_free_tx_data\n");
}
-static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
+static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
@@ -505,7 +668,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
struct xgbe_ring_data *rdata;
unsigned int i, j;
- DBGPR("-->xgbe_free_rx_skbuff\n");
+ DBGPR("-->xgbe_free_rx_data\n");
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -515,11 +678,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
- desc_if->unmap_skb(pdata, rdata);
+ desc_if->unmap_rdata(pdata, rdata);
}
}
- DBGPR("<--xgbe_free_rx_skbuff\n");
+ DBGPR("<--xgbe_free_rx_data\n");
}
static void xgbe_adjust_link(struct net_device *netdev)
@@ -735,7 +898,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
static void xgbe_stop(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+ unsigned int i;
DBGPR("-->xgbe_stop\n");
@@ -749,12 +915,23 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ continue;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ netdev_tx_reset_queue(txq);
+ }
+
DBGPR("<--xgbe_stop\n");
}
static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
{
+ struct xgbe_channel *channel;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int i;
DBGPR("-->xgbe_restart_dev\n");
@@ -763,10 +940,15 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
return;
xgbe_stop(pdata);
- synchronize_irq(pdata->irq_number);
+ synchronize_irq(pdata->dev_irq);
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ synchronize_irq(channel->dma_irq);
+ }
- xgbe_free_tx_skbuff(pdata);
- xgbe_free_rx_skbuff(pdata);
+ xgbe_free_tx_data(pdata);
+ xgbe_free_rx_data(pdata);
/* Issue software reset to device if requested */
if (reset)
@@ -1008,6 +1190,12 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
packet->tcp_header_len, packet->tcp_payload_len);
DBGPR(" packet->mss=%u\n", packet->mss);
+ /* Update the number of packets that will ultimately be transmitted
+ * along with the extra bytes for each extra packet
+ */
+ packet->tx_packets = skb_shinfo(skb)->gso_segs;
+ packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
+
return 0;
}
@@ -1033,17 +1221,22 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
unsigned int len;
unsigned int i;
+ packet->skb = skb;
+
context_desc = 0;
packet->rdesc_count = 0;
+ packet->tx_packets = 1;
+ packet->tx_bytes = skb->len;
+
if (xgbe_is_tso(skb)) {
- /* TSO requires an extra desriptor if mss is different */
+ /* TSO requires an extra descriptor if mss is different */
if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
context_desc = 1;
packet->rdesc_count++;
}
- /* TSO requires an extra desriptor for TSO header */
+ /* TSO requires an extra descriptor for TSO header */
packet->rdesc_count++;
XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
@@ -1091,6 +1284,8 @@ static int xgbe_open(struct net_device *netdev)
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel = NULL;
+ unsigned int i = 0;
int ret;
DBGPR("-->xgbe_open\n");
@@ -1119,24 +1314,48 @@ static int xgbe_open(struct net_device *netdev)
goto err_ptpclk;
pdata->rx_buf_size = ret;
+ /* Allocate the channel and ring structures */
+ ret = xgbe_alloc_channels(pdata);
+ if (ret)
+ goto err_ptpclk;
+
/* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata);
if (ret)
- goto err_ptpclk;
+ goto err_channels;
/* Initialize the device restart and Tx timestamp work struct */
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
/* Request interrupts */
- ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev->name, pdata);
if (ret) {
netdev_alert(netdev, "error requesting irq %d\n",
- pdata->irq_number);
- goto err_irq;
+ pdata->dev_irq);
+ goto err_rings;
+ }
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev,
+ "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
}
- pdata->irq_number = netdev->irq;
ret = xgbe_start(pdata);
if (ret)
@@ -1149,12 +1368,21 @@ static int xgbe_open(struct net_device *netdev)
err_start:
hw_if->exit(pdata);
- devm_free_irq(pdata->dev, pdata->irq_number, pdata);
- pdata->irq_number = 0;
-
err_irq:
+ if (pdata->per_channel_irq) {
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+err_rings:
desc_if->free_ring_resources(pdata);
+err_channels:
+ xgbe_free_channels(pdata);
+
err_ptpclk:
clk_disable_unprepare(pdata->ptpclk);
@@ -1172,6 +1400,8 @@ static int xgbe_close(struct net_device *netdev)
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
DBGPR("-->xgbe_close\n");
@@ -1181,15 +1411,20 @@ static int xgbe_close(struct net_device *netdev)
/* Issue software reset to device */
hw_if->exit(pdata);
- /* Free all the ring data */
+ /* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
- /* Release the interrupt */
- if (pdata->irq_number != 0) {
- devm_free_irq(pdata->dev, pdata->irq_number, pdata);
- pdata->irq_number = 0;
+ /* Release the interrupts */
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
}
+ /* Free the channel and ring structures */
+ xgbe_free_channels(pdata);
+
/* Disable the clocks */
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
@@ -1210,12 +1445,14 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
struct xgbe_channel *channel;
struct xgbe_ring *ring;
struct xgbe_packet_data *packet;
+ struct netdev_queue *txq;
unsigned long flags;
int ret;
DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
channel = pdata->channel + skb->queue_mapping;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
ring = channel->tx_ring;
packet = &ring->packet_data;
@@ -1234,13 +1471,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
xgbe_packet_info(pdata, ring, skb, packet);
/* Check that there are enough descriptors available */
- if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
- DBGPR(" Tx queue stopped, not enough descriptors available\n");
- netif_stop_subqueue(netdev, channel->queue_index);
- ring->tx.queue_stopped = 1;
- ret = NETDEV_TX_BUSY;
+ ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
+ if (ret)
goto tx_netdev_return;
- }
ret = xgbe_prep_tso(skb, packet);
if (ret) {
@@ -1257,13 +1490,21 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
xgbe_prep_tx_tstamp(pdata, skb, packet);
+ /* Report on the actual number of bytes (to be) sent */
+ netdev_tx_sent_queue(txq, packet->tx_bytes);
+
/* Configure required descriptor fields for transmission */
- hw_if->pre_xmit(channel);
+ hw_if->dev_xmit(channel);
#ifdef XGMAC_ENABLE_TX_PKT_DUMP
xgbe_print_pkt(netdev, skb, true);
#endif
+ /* Stop the queue in advance if there may not be enough descriptors */
+ xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
+
+ ret = NETDEV_TX_OK;
+
tx_netdev_return:
spin_unlock_irqrestore(&ring->lock, flags);
@@ -1420,14 +1661,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
static void xgbe_poll_controller(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_channel *channel;
+ unsigned int i;
DBGPR("-->xgbe_poll_controller\n");
- disable_irq(pdata->irq_number);
-
- xgbe_isr(pdata->irq_number, pdata);
-
- enable_irq(pdata->irq_number);
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_dma_isr(channel->dma_irq, channel);
+ } else {
+ disable_irq(pdata->dev_irq);
+ xgbe_isr(pdata->dev_irq, pdata);
+ enable_irq(pdata->dev_irq);
+ }
DBGPR("<--xgbe_poll_controller\n");
}
@@ -1465,12 +1712,21 @@ static int xgbe_set_features(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- netdev_features_t rxcsum, rxvlan, rxvlan_filter;
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+ int ret = 0;
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_if->enable_rss(pdata);
+ else if (!(features & NETIF_F_RXHASH) && rxhash)
+ ret = hw_if->disable_rss(pdata);
+ if (ret)
+ return ret;
+
if ((features & NETIF_F_RXCSUM) && !rxcsum)
hw_if->enable_rx_csum(pdata);
else if (!(features & NETIF_F_RXCSUM) && rxcsum)
@@ -1524,7 +1780,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- desc_if->realloc_skb(channel);
+ desc_if->realloc_rx_buffer(channel);
/* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */
@@ -1533,6 +1789,31 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma));
}
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata,
+ unsigned int *len)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct sk_buff *skb;
+ u8 *packet;
+ unsigned int copy_len;
+
+ skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+ packet = page_address(rdata->rx.hdr.pa.pages) +
+ rdata->rx.hdr.pa.pages_offset;
+ copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+ copy_len = min(rdata->rx.hdr.dma_len, copy_len);
+ skb_copy_to_linear_data(skb, packet, copy_len);
+ skb_put(skb, copy_len);
+
+ *len -= copy_len;
+
+ return skb;
+}
+
static int xgbe_tx_poll(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -1542,8 +1823,10 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
unsigned long flags;
int processed = 0;
+ unsigned int tx_packets = 0, tx_bytes = 0;
DBGPR("-->xgbe_tx_poll\n");
@@ -1551,36 +1834,53 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
if (!ring)
return 0;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
spin_lock_irqsave(&ring->lock, flags);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
- (ring->dirty < ring->cur)) {
+ (ring->dirty != ring->cur)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
rdesc = rdata->rdesc;
if (!hw_if->tx_complete(rdesc))
break;
+ /* Make sure descriptor fields are read after reading the OWN
+ * bit */
+ rmb();
+
#ifdef XGMAC_ENABLE_TX_DESC_DUMP
xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
#endif
+ if (hw_if->is_last_desc(rdesc)) {
+ tx_packets += rdata->tx.packets;
+ tx_bytes += rdata->tx.bytes;
+ }
+
/* Free the SKB and reset the descriptor for re-use */
- desc_if->unmap_skb(pdata, rdata);
+ desc_if->unmap_rdata(pdata, rdata);
hw_if->tx_desc_reset(rdata);
processed++;
ring->dirty++;
}
+ if (!processed)
+ goto unlock;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
if ((ring->tx.queue_stopped == 1) &&
(xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
ring->tx.queue_stopped = 0;
- netif_wake_subqueue(netdev, channel->queue_index);
+ netif_tx_wake_queue(txq);
}
DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+unlock:
spin_unlock_irqrestore(&ring->lock, flags);
return processed;
@@ -1594,6 +1894,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
struct net_device *netdev = pdata->netdev;
+ struct napi_struct *napi;
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context;
@@ -1607,6 +1908,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
packet = &ring->packet_data;
while (packet_count < budget) {
@@ -1641,10 +1944,6 @@ read_again:
ring->cur++;
ring->dirty++;
- dma_unmap_single(pdata->dev, rdata->skb_dma,
- rdata->skb_dma_len, DMA_FROM_DEVICE);
- rdata->skb_dma = 0;
-
incomplete = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
INCOMPLETE);
@@ -1667,33 +1966,40 @@ read_again:
}
if (!context) {
- put_len = rdata->len - len;
- if (skb) {
- if (pskb_expand_head(skb, 0, put_len,
- GFP_ATOMIC)) {
- DBGPR("pskb_expand_head error\n");
- if (incomplete) {
- error = 1;
- goto read_again;
- }
-
- dev_kfree_skb(skb);
- goto next_packet;
+ put_len = rdata->rx.len - len;
+ len += put_len;
+
+ if (!skb) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx.hdr.dma,
+ rdata->rx.hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb = xgbe_create_skb(pdata, rdata, &put_len);
+ if (!skb) {
+ error = 1;
+ goto skip_data;
}
- memcpy(skb_tail_pointer(skb), rdata->skb->data,
- put_len);
- } else {
- skb = rdata->skb;
- rdata->skb = NULL;
}
- skb_put(skb, put_len);
- len += put_len;
+
+ if (put_len) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx.buf.dma,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx.buf.pa.pages,
+ rdata->rx.buf.pa.pages_offset,
+ put_len, rdata->rx.buf.dma_len);
+ rdata->rx.buf.pa.pages = NULL;
+ }
}
+skip_data:
if (incomplete || context_next)
goto read_again;
- /* Stray Context Descriptor? */
if (!skb)
goto next_packet;
@@ -1733,13 +2039,18 @@ read_again:
hwtstamps->hwtstamp = ns_to_ktime(nsec);
}
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RSS_HASH))
+ skb_set_hash(skb, packet->rss_hash,
+ packet->rss_hash_type);
+
skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, channel->queue_index);
- skb_mark_napi_id(skb, &pdata->napi);
+ skb_mark_napi_id(skb, napi);
netdev->last_rx = jiffies;
- napi_gro_receive(&pdata->napi, skb);
+ napi_gro_receive(napi, skb);
next_packet:
packet_count++;
@@ -1761,7 +2072,35 @@ next_packet:
return packet_count;
}
-static int xgbe_poll(struct napi_struct *napi, int budget)
+static int xgbe_one_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
+ napi);
+ int processed = 0;
+
+ DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = xgbe_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ enable_irq(channel->dma_irq);
+ }
+
+ DBGPR("<--xgbe_one_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+static int xgbe_all_poll(struct napi_struct *napi, int budget)
{
struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
napi);
@@ -1770,7 +2109,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
int processed, last_processed;
unsigned int i;
- DBGPR("-->xgbe_poll: budget=%d\n", budget);
+ DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
processed = 0;
ring_budget = budget / pdata->rx_ring_count;
@@ -1798,7 +2137,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
xgbe_enable_rx_tx_ints(pdata);
}
- DBGPR("<--xgbe_poll: received = %d\n", processed);
+ DBGPR("<--xgbe_all_poll: received = %d\n", processed);
return processed;
}
@@ -1812,10 +2151,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
while (count--) {
rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc;
- DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
- (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
- le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
idx++;
}
}
@@ -1823,9 +2162,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
unsigned int idx)
{
- DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
- le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
- le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+ pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+ le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+ le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
}
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 49508ec98b72..ebf489351555 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
rx_usecs);
return -EINVAL;
}
- if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
+ if (rx_frames > pdata->rx_desc_count) {
netdev_alert(netdev, "rx-frames is limited to %d frames\n",
- pdata->channel->rx_ring->rdesc_count);
+ pdata->rx_desc_count);
return -EINVAL;
}
@@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
- if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
+ if (tx_frames > pdata->tx_desc_count) {
netdev_alert(netdev, "tx-frames is limited to %d frames\n",
- pdata->channel->tx_ring->rdesc_count);
+ pdata->tx_desc_count);
return -EINVAL;
}
@@ -481,6 +481,82 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return 0;
}
+static int xgbe_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = pdata->rx_ring_count;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return sizeof(pdata->rss_key);
+}
+
+static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return ARRAY_SIZE(pdata->rss_table);
+}
+
+static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+ MAC_RSSDR, DMCH);
+ }
+
+ if (key)
+ memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int ret;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ ret = hw_if->set_rss_lookup_table(pdata, indir);
+ if (ret)
+ return ret;
+ }
+
+ if (key) {
+ ret = hw_if->set_rss_hash_key(pdata, key);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int xgbe_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *ts_info)
{
@@ -526,6 +602,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_strings = xgbe_get_strings,
.get_ethtool_stats = xgbe_get_ethtool_stats,
.get_sset_count = xgbe_get_sset_count,
+ .get_rxnfc = xgbe_get_rxnfc,
+ .get_rxfh_key_size = xgbe_get_rxfh_key_size,
+ .get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
+ .get_rxfh = xgbe_get_rxfh,
+ .set_rxfh = xgbe_set_rxfh,
.get_ts_info = xgbe_get_ts_info,
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index f5a8fa03921a..dbd3850b8b0a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
-static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel_mem, *channel;
- struct xgbe_ring *tx_ring, *rx_ring;
- unsigned int count, i;
-
- DBGPR("-->xgbe_alloc_rings\n");
-
- count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
-
- channel_mem = devm_kcalloc(pdata->dev, count,
- sizeof(struct xgbe_channel), GFP_KERNEL);
- if (!channel_mem)
- return NULL;
-
- tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
- sizeof(struct xgbe_ring), GFP_KERNEL);
- if (!tx_ring)
- return NULL;
-
- rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
- sizeof(struct xgbe_ring), GFP_KERNEL);
- if (!rx_ring)
- return NULL;
-
- for (i = 0, channel = channel_mem; i < count; i++, channel++) {
- snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
- channel->pdata = pdata;
- channel->queue_index = i;
- channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
- (DMA_CH_INC * i);
-
- if (i < pdata->tx_ring_count) {
- spin_lock_init(&tx_ring->lock);
- channel->tx_ring = tx_ring++;
- }
-
- if (i < pdata->rx_ring_count) {
- spin_lock_init(&rx_ring->lock);
- channel->rx_ring = rx_ring++;
- }
-
- DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
- channel->name, channel->queue_index, channel->dma_regs,
- channel->tx_ring, channel->rx_ring);
- }
-
- pdata->channel_count = count;
-
- DBGPR("<--xgbe_alloc_rings\n");
-
- return channel_mem;
-}
-
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
@@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
const u8 *mac_addr;
+ unsigned int i;
int ret;
DBGPR("--> xgbe_probe\n");
@@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
mutex_init(&pdata->xpcs_mutex);
+ mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
/* Set and validate the number of descriptors for a ring */
@@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
+ /* Check for per channel interrupt support */
+ if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
+ pdata->per_channel_irq = 1;
+
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- dev_err(dev, "platform_get_irq failed\n");
+ dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io;
}
- netdev->irq = ret;
+ pdata->dev_irq = ret;
+
+ netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
/* Set all the function pointers */
@@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_io;
}
- /* Allocate the rings for the DMA channels */
- pdata->channel = xgbe_alloc_rings(pdata);
- if (!pdata->channel) {
- dev_err(dev, "ring allocation failed\n");
- ret = -ENOMEM;
- goto err_io;
- }
+ /* Initialize RSS hash key and lookup table */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->rx_ring_count);
+
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Prepare to regsiter with MDIO */
pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
@@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
netdev->vlan_features |= NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 789957d43a13..f9ec762ac3f0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -140,11 +140,25 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+/* Descriptors required for maximum contigous TSO/GSO packet */
+#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for an SKB:
+ * - Maximum number of SKB frags
+ * - Maximum descriptors for contiguous TSO/GSO packet
+ * - Possible context descriptor
+ * - Possible TSO header descriptor
+ */
+#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
+
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGBE_RX_BUF_ALIGN 64
+#define XGBE_SKB_ALLOC_SIZE 256
+#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
#define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16
+#define XGBE_DMA_STOP_TIMEOUT 5
/* DMA cache settings - Outer sharable, write-back, write-allocate */
#define XGBE_DMA_OS_AXDOMAIN 0x2
@@ -171,6 +185,7 @@
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
#define XGBE_PTP_CLOCK "ptp_clk"
+#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
/* Timestamp support - values based on 50MHz PTP clock
* 50MHz => 20 nsec
@@ -212,9 +227,17 @@
/* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8
+/* Receive Side Scaling */
+#define XGBE_RSS_HASH_KEY_SIZE 40
+#define XGBE_RSS_MAX_TABLE_SIZE 256
+#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define XGBE_RSS_HASH_KEY_TYPE 1
+
struct xgbe_prv_data;
struct xgbe_packet_data {
+ struct sk_buff *skb;
+
unsigned int attributes;
unsigned int errors;
@@ -230,14 +253,53 @@ struct xgbe_packet_data {
unsigned short vlan_ctag;
u64 rx_tstamp;
+
+ u32 rss_hash;
+ enum pkt_hash_types rss_hash_type;
+
+ unsigned int tx_packets;
+ unsigned int tx_bytes;
};
/* Common Rx and Tx descriptor mapping */
struct xgbe_ring_desc {
- unsigned int desc0;
- unsigned int desc1;
- unsigned int desc2;
- unsigned int desc3;
+ __le32 desc0;
+ __le32 desc1;
+ __le32 desc2;
+ __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xgbe_page_alloc {
+ struct page *pages;
+ unsigned int pages_len;
+ unsigned int pages_offset;
+
+ dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xgbe_buffer_data {
+ struct xgbe_page_alloc pa;
+ struct xgbe_page_alloc pa_unmap;
+
+ dma_addr_t dma;
+ unsigned int dma_len;
+};
+
+/* Tx-related ring data */
+struct xgbe_tx_ring_data {
+ unsigned int packets; /* BQL packet count */
+ unsigned int bytes; /* BQL byte count */
+};
+
+/* Rx-related ring data */
+struct xgbe_rx_ring_data {
+ struct xgbe_buffer_data hdr; /* Header locations */
+ struct xgbe_buffer_data buf; /* Payload locations */
+
+ unsigned short hdr_len; /* Length of received header */
+ unsigned short len; /* Length of received packet */
};
/* Structure used to hold information related to the descriptor
@@ -251,9 +313,9 @@ struct xgbe_ring_data {
struct sk_buff *skb; /* Virtual address of SKB */
dma_addr_t skb_dma; /* DMA address of SKB data */
unsigned int skb_dma_len; /* Length of SKB DMA area */
- unsigned int tso_header; /* TSO header indicator */
- unsigned short len; /* Length of received Rx packet */
+ struct xgbe_tx_ring_data tx; /* Tx-related data */
+ struct xgbe_rx_ring_data rx; /* Rx-related data */
unsigned int interrupt; /* Interrupt indicator */
@@ -291,6 +353,10 @@ struct xgbe_ring {
*/
struct xgbe_ring_data *rdata;
+ /* Page allocation for RX buffers */
+ struct xgbe_page_alloc rx_hdr_pa;
+ struct xgbe_page_alloc rx_buf_pa;
+
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability
@@ -307,6 +373,7 @@ struct xgbe_ring {
union {
struct {
unsigned int queue_stopped;
+ unsigned int xmit_more;
unsigned short cur_mss;
unsigned short cur_vlan_ctag;
} tx;
@@ -331,6 +398,13 @@ struct xgbe_channel {
unsigned int queue_index;
void __iomem *dma_regs;
+ /* Per channel interrupt irq number */
+ int dma_irq;
+ char dma_irq_name[IFNAMSIZ + 32];
+
+ /* Netdev related settings */
+ struct napi_struct napi;
+
unsigned int saved_ier;
unsigned int tx_timer_active;
@@ -456,7 +530,7 @@ struct xgbe_hw_if {
int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
- void (*pre_xmit)(struct xgbe_channel *);
+ void (*dev_xmit)(struct xgbe_channel *);
int (*dev_read)(struct xgbe_channel *);
void (*tx_desc_init)(struct xgbe_channel *);
void (*rx_desc_init)(struct xgbe_channel *);
@@ -464,6 +538,7 @@ struct xgbe_hw_if {
void (*tx_desc_reset)(struct xgbe_ring_data *);
int (*is_last_desc)(struct xgbe_ring_desc *);
int (*is_context_desc)(struct xgbe_ring_desc *);
+ void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
/* For FLOW ctrl */
int (*config_tx_flow_control)(struct xgbe_prv_data *);
@@ -509,14 +584,20 @@ struct xgbe_hw_if {
/* For Data Center Bridging config */
void (*config_dcb_tc)(struct xgbe_prv_data *);
void (*config_dcb_pfc)(struct xgbe_prv_data *);
+
+ /* For Receive Side Scaling */
+ int (*enable_rss)(struct xgbe_prv_data *);
+ int (*disable_rss)(struct xgbe_prv_data *);
+ int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
+ int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
- void (*realloc_skb)(struct xgbe_channel *);
- void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*realloc_rx_buffer)(struct xgbe_channel *);
+ void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
};
@@ -581,7 +662,11 @@ struct xgbe_prv_data {
/* XPCS indirect addressing mutex */
struct mutex xpcs_mutex;
- int irq_number;
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+
+ int dev_irq;
+ unsigned int per_channel_irq;
struct xgbe_hw_if hw_if;
struct xgbe_desc_if desc_if;
@@ -624,7 +709,7 @@ struct xgbe_prv_data {
unsigned int rx_riwt;
unsigned int rx_frames;
- /* Current MTU */
+ /* Current Rx buffer size */
unsigned int rx_buf_size;
/* Flow control settings */
@@ -632,6 +717,11 @@ struct xgbe_prv_data {
unsigned int tx_pause;
unsigned int rx_pause;
+ /* Receive Side Scaling settings */
+ u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
+ u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
/* MDIO settings */
struct module *phy_module;
char *mii_bus_id;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 123669696184..83a50280bb70 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -761,10 +761,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
ndev = pdata->ndev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
- if (!res) {
- dev_err(dev, "Resource enet_csr not defined\n");
- return -ENODEV;
- }
pdata->base_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->base_addr)) {
dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
@@ -772,10 +768,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
- if (!res) {
- dev_err(dev, "Resource ring_csr not defined\n");
- return -ENODEV;
- }
pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ring_csr_addr)) {
dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
@@ -783,10 +775,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
- if (!res) {
- dev_err(dev, "Resource ring_cmd not defined\n");
- return -ENODEV;
- }
pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ring_cmd_addr)) {
dev_err(dev, "Unable to retrieve ENET Ring command region\n");
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 58a200df4c35..6e66127e6abf 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -768,7 +768,6 @@ static struct platform_driver mac_mace_driver = {
.remove = mac_mace_device_remove,
.driver = {
.name = mac_mace_string,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 72fb86b9aa24..c9946c6c119e 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -48,7 +48,7 @@ MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
MODULE_AUTHOR("Jie Yang");
MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
-MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver");
+MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(ATL1C_DRV_VERSION);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index c3e260c21734..41a3c9804427 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -62,10 +62,9 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
- depends on OF
select MII
select PHYLIB
- select FIXED_PHY if BCMGENET=y
+ select FIXED_PHY
select BCM7XXX_PHY
help
This driver supports the built-in Ethernet MACs found in the
@@ -156,7 +155,7 @@ config SYSTEMPORT
depends on OF
select MII
select PHYLIB
- select FIXED_PHY if SYSTEMPORT=y
+ select FIXED_PHY
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset using an internal
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 416620fa8fac..d86d6baf9681 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -836,7 +836,7 @@ static int b44_rx(struct b44 *bp, int budget)
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
- copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
+ copy_skb = napi_alloc_skb(&bp->napi, len);
if (copy_skb == NULL)
goto drop_it_no_recycle;
@@ -2104,6 +2104,7 @@ static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
bp->flags &= ~B44_FLAG_WOL_ENABLE;
spin_unlock_irq(&bp->lock);
+ device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
return 0;
}
@@ -2452,6 +2453,7 @@ static int b44_init_one(struct ssb_device *sdev,
}
}
+ device_set_wakeup_capable(sdev->dev, true);
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3e8d1a88ed3d..21206d33b638 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -385,7 +385,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
if (len < copybreak) {
struct sk_buff *nskb;
- nskb = netdev_alloc_skb_ip_align(dev, len);
+ nskb = napi_alloc_skb(&priv->napi, len);
if (!nskb) {
/* forget packet, just rearm desc */
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 531bb7c57531..5b308a4a4d0e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,6 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+ STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
+ STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -477,6 +480,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
RX_BUF_LENGTH, DMA_FROM_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.rx_dma_failed++;
bcm_sysport_free_cb(cb);
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
return ret;
@@ -526,6 +530,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int p_index;
u16 len, status;
struct bcm_rsb *rsb;
+ int ret;
/* Determine how much we should process since last call */
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -620,7 +625,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
napi_gro_receive(&priv->napi, skb);
refill:
- bcm_sysport_rx_refill(priv, cb);
+ ret = bcm_sysport_rx_refill(priv, cb);
+ if (ret)
+ priv->mib.alloc_rx_buff_failed++;
}
return processed;
@@ -731,9 +738,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
/* re-enable TX interrupt */
intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+
+ return 0;
}
- return 0;
+ return budget;
}
static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -971,6 +980,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
if (dma_mapping_error(kdev, mapping)) {
+ priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
skb->data, skb_len);
ret = NETDEV_TX_OK;
@@ -1399,6 +1409,27 @@ static void topctrl_flush(struct bcm_sysport_priv *priv)
topctrl_writel(priv, 0, TX_FLUSH_CNTL);
}
+static int bcm_sysport_change_mac(struct net_device *dev, void *p)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ /* interface is disabled, changes to MAC will be reflected on next
+ * open call
+ */
+ if (!netif_running(dev))
+ return 0;
+
+ umac_set_hw_addr(priv, dev->dev_addr);
+
+ return 0;
+}
+
static void bcm_sysport_netif_start(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1618,6 +1649,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_stop = bcm_sysport_stop,
.ndo_set_features = bcm_sysport_set_features,
.ndo_set_rx_mode = bcm_sysport_set_rx_mode,
+ .ndo_set_mac_address = bcm_sysport_change_mac,
};
#define REV_FMT "v%2x.%02x"
@@ -1962,7 +1994,6 @@ static struct platform_driver bcm_sysport_driver = {
.remove = bcm_sysport_remove,
.driver = {
.name = "brcm-systemport",
- .owner = THIS_MODULE,
.of_match_table = bcm_sysport_of_match,
.pm = &bcm_sysport_pm_ops,
},
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index b08dab828101..fc19417d82a5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -557,6 +557,9 @@ struct bcm_sysport_mib {
u32 rxchk_other_pkt_disc;
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
+ u32 alloc_rx_buff_failed;
+ u32 rx_dma_failed;
+ u32 tx_dma_failed;
};
/* HW maintains a large list of counters */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 40beef5bca88..1d1147c93d59 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1015,7 +1015,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
*/
if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
(len <= RX_COPY_THRESH)) {
- skb = netdev_alloc_skb_ip_align(bp->dev, len);
+ skb = napi_alloc_skb(&fp->napi, len);
if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
@@ -1139,7 +1139,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
return IRQ_HANDLED;
}
@@ -2099,7 +2099,7 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
+ netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 1edc931b1458..ffe4e003e636 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3358,12 +3358,18 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+
/* Get the current configuration of the RSS indirection table */
bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
@@ -3383,11 +3389,21 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
}
static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
+ /* We require at least one supported parameter to be changed and no
+ * change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
* The same as in bnx2x_get_rxfh: we can't use a memcpy()
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 549549eaf580..778e4cd32571 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -8119,10 +8119,11 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
case SFP_EEPROM_CON_TYPE_VAL_LC:
case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
- if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+ if (((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
(SFP_EEPROM_10G_COMP_CODE_SR_MASK |
SFP_EEPROM_10G_COMP_CODE_LR_MASK |
- SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
+ SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) &&
+ (val[SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) {
DP(NETIF_MSG_LINK, "1G SFP module detected\n");
phy->media_type = ETH_PHY_SFP_1G_FIBER;
if (phy->req_line_speed != SPEED_1000) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 74fbf9ea7bd8..9f5e38769a29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -45,6 +45,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/vxlan.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/workqueue.h>
@@ -1931,7 +1932,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
for_each_cos_in_tx_queue(fp, cos)
prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
}
}
@@ -3163,6 +3164,8 @@ static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
gen_init->mtu = bp->dev->mtu;
gen_init->cos = cos;
+
+ gen_init->fp_hsi = ETH_FP_HSI_VERSION;
}
static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
@@ -12537,7 +12540,7 @@ static int bnx2x_validate_addr(struct net_device *dev)
}
static int bnx2x_get_phys_port_id(struct net_device *netdev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
struct bnx2x *bp = netdev_priv(netdev);
@@ -12550,6 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev,
return 0;
}
+static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev)
+{
+ return vxlan_gso_check(skb);
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12581,6 +12589,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
+ .ndo_gso_check = bnx2x_gso_check,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -13247,7 +13256,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
return -EFAULT;
}
- DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val,
+ DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
best_period);
return 0;
@@ -14775,7 +14784,7 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
-EFAULT : 0;
}
-/* Configrues HW for PTP */
+/* Configures HW for PTP */
static int bnx2x_configure_ptp(struct bnx2x *bp)
{
int rc, port = BP_PORT(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index b0779d773343..6fe547c93e74 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7549,7 +7549,7 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
#define IGU_REG_RESERVED_UPPER 0x05ff
-/* Fields of IGU PF CONFIGRATION REGISTER */
+/* Fields of IGU PF CONFIGURATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
@@ -7557,7 +7557,7 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
-/* Fields of IGU VF CONFIGRATION REGISTER */
+/* Fields of IGU VF CONFIGURATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7bc2924a7e24..07cdf9bbffef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4336,7 +4336,7 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
test_bit(BNX2X_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
- gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION;
+ gen_data->fp_hsi_ver = params->fp_hsi;
DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index e97275f456c0..86baecb7c60c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -937,6 +937,8 @@ struct bnx2x_general_setup_params {
u8 spcl_id;
u16 mtu;
u8 cos;
+
+ u8 fp_hsi;
};
struct bnx2x_rxq_setup_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index c88b20af87df..e5aca2de1871 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -193,6 +193,7 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
/* Setup-op general parameters */
setup_p->gen_params.spcl_id = vf->sp_cl_id;
setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
+ setup_p->gen_params.fp_hsi = vf->fp_hsi;
/* Setup-op pause params:
* Nothing to do, the pause thresholds are set by default to 0 which
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 01bafa4ac045..66ee62a0401a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -205,6 +205,8 @@ struct bnx2x_virtf {
/* slow-path operations */
struct mutex op_mutex; /* one vfop at a time mutex */
enum channel_tlvs op_current;
+
+ u8 fp_hsi;
};
#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index b1d9c44aa56c..be40eabc5304 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -224,6 +224,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
+ struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
u32 vf_id;
bool resources_acquired = false;
@@ -237,6 +238,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
req->vfdev_info.vf_id = vf_id;
req->vfdev_info.vf_os = 0;
+ req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
req->resc_request.num_rxqs = rx_count;
req->resc_request.num_txqs = tx_count;
@@ -316,9 +318,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
memset(&bp->vf2pf_mbox->resp, 0,
sizeof(union pfvf_tlvs));
} else {
- /* PF reports error */
- BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
- bp->acquire_resp.hdr.status);
+ /* Determine reason of PF failure of acquire process */
+ fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_FP_HSI_SUPPORT);
+ if (fp_hsi_resp && !fp_hsi_resp->is_supported)
+ BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
+ else
+ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+ bp->acquire_resp.hdr.status);
rc = -EAGAIN;
goto out;
}
@@ -333,6 +340,25 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->flags |= HAS_PHYS_PORT_ID;
}
+ /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
+ * If that's the case, we need to make certain required FW was
+ * supported by such a hypervisor [i.e., v0-v2].
+ */
+ fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_FP_HSI_SUPPORT);
+ if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
+ BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
+
+ /* Since acquire succeeded on the PF side, we need to send a
+ * release message in order to allow future probes.
+ */
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ bnx2x_vfpf_release(bp);
+
+ rc = -EINVAL;
+ goto out;
+ }
+
/* get HW info */
bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
bp->link_params.chip_id = bp->common.chip_id;
@@ -1125,6 +1151,26 @@ static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
*offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
}
+static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_fp_hsi_resp_tlv *fp_hsi;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
+ sizeof(struct vfpf_fp_hsi_resp_tlv));
+
+ fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
+}
+
static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx, int vfop_status)
{
@@ -1219,6 +1265,12 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
CHANNEL_TLV_PHYS_PORT_ID))
bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+ /* `New' vfs will want to know if fastpath HSI is supported, since
+ * if that's not the case they could print into system log the fact
+ * the driver version must be updated.
+ */
+ bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
+
bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1288,6 +1340,23 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
goto out;
}
+ /* Verify the VF fastpath HSI can be supported by the loaded FW.
+ * Linux vfs should be oblivious to changes between v0 and v2.
+ */
+ if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
+ vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
+ else
+ vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
+ ETH_FP_HSI_VER_2);
+ if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
+ DP(BNX2X_MSG_IOV,
+ "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
+ vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
+ ETH_FP_HSI_VERSION);
+ rc = -EINVAL;
+ goto out;
+ }
+
/* acquire the resources */
rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 15670c499a20..b86479fc0d2f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -124,7 +124,7 @@ struct vfpf_acquire_tlv {
#define VF_OS_UNDEFINED (0 << VF_OS_SHIFT)
#define VF_OS_WINDOWS (1 << VF_OS_SHIFT)
- u8 padding;
+ u8 fp_hsi_ver;
u8 caps;
#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0)
} vfdev_info;
@@ -204,6 +204,12 @@ struct vfpf_port_phys_id_resp_tlv {
u8 padding[2];
};
+struct vfpf_fp_hsi_resp_tlv {
+ struct channel_tlv tl;
+ u8 is_supported;
+ u8 padding[3];
+};
+
#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
* stats will be coalesced on
* the leading RSS queue
@@ -448,6 +454,7 @@ enum channel_tlvs {
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_UPDATE_TPA,
+ CHANNEL_TLV_FP_HSI_SUPPORT,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index da1a2500c91c..ff83c46bc389 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -42,6 +42,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/phy.h>
+#include <linux/platform_data/bcmgenet.h>
#include <asm/unaligned.h>
@@ -613,6 +614,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
UMAC_RBUF_OVFL_CNT),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+ STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
+ STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -711,6 +715,98 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
}
}
+static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
+ u32 reg;
+
+ if (enable && !priv->clk_eee_enabled) {
+ clk_prepare_enable(priv->clk_eee);
+ priv->clk_eee_enabled = true;
+ }
+
+ reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
+ if (enable)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
+
+ /* Enable EEE and switch to a 27Mhz clock automatically */
+ reg = __raw_readl(priv->base + off);
+ if (enable)
+ reg |= TBUF_EEE_EN | TBUF_PM_EN;
+ else
+ reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
+ __raw_writel(reg, priv->base + off);
+
+ /* Do the same for thing for RBUF */
+ reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
+ if (enable)
+ reg |= RBUF_EEE_EN | RBUF_PM_EN;
+ else
+ reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
+ bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
+
+ if (!enable && priv->clk_eee_enabled) {
+ clk_disable_unprepare(priv->clk_eee);
+ priv->clk_eee_enabled = false;
+ }
+
+ priv->eee.eee_enabled = enable;
+ priv->eee.eee_active = enable;
+}
+
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct ethtool_eee *p = &priv->eee;
+
+ if (GENET_IS_V1(priv))
+ return -EOPNOTSUPP;
+
+ e->eee_enabled = p->eee_enabled;
+ e->eee_active = p->eee_active;
+ e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
+
+ return phy_ethtool_get_eee(priv->phydev, e);
+}
+
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct ethtool_eee *p = &priv->eee;
+ int ret = 0;
+
+ if (GENET_IS_V1(priv))
+ return -EOPNOTSUPP;
+
+ p->eee_enabled = e->eee_enabled;
+
+ if (!p->eee_enabled) {
+ bcmgenet_eee_enable_set(dev, false);
+ } else {
+ ret = phy_init_eee(priv->phydev, 0);
+ if (ret) {
+ netif_err(priv, hw, dev, "EEE initialization failed\n");
+ return ret;
+ }
+
+ bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
+ bcmgenet_eee_enable_set(dev, true);
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, e);
+}
+
+static int bcmgenet_nway_reset(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return genphy_restart_aneg(priv->phydev);
+}
+
/* standard ethtool support functions. */
static struct ethtool_ops bcmgenet_ethtool_ops = {
.get_strings = bcmgenet_get_strings,
@@ -724,6 +820,9 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
.set_msglevel = bcmgenet_set_msglevel,
.get_wol = bcmgenet_get_wol,
.set_wol = bcmgenet_set_wol,
+ .get_eee = bcmgenet_get_eee,
+ .set_eee = bcmgenet_set_eee,
+ .nway_reset = bcmgenet_nway_reset,
};
/* Power down the unimac, based on mode. */
@@ -989,6 +1088,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
dev_kfree_skb(skb);
return ret;
@@ -1035,6 +1135,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
skb_frag_size(frag), DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
__func__);
return ret;
@@ -1231,6 +1332,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
priv->rx_buf_len, DMA_FROM_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.rx_dma_failed++;
bcmgenet_free_cb(cb);
netif_err(priv, rx_err, priv->dev,
"%s DMA map failed\n", __func__);
@@ -1397,8 +1499,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
/* refill RX path on the current control block */
refill:
err = bcmgenet_rx_refill(priv, cb);
- if (err)
+ if (err) {
+ priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, dev, "Rx refill failed\n");
+ }
rxpktprocessed++;
priv->rx_read_ptr++;
@@ -2400,6 +2504,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
+ u16 gphy_rev;
if (GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
@@ -2448,8 +2553,29 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
* to pass this information to the PHY driver. The PHY driver expects
* to find the PHY major revision in bits 15:8 while the GENET register
* stores that information in bits 7:0, account for that.
+ *
+ * On newer chips, starting with PHY revision G0, a new scheme is
+ * deployed similar to the Starfighter 2 switch with GPHY major
+ * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
+ * is reserved as well as special value 0x01ff, we have a small
+ * heuristic to check for the new GPHY revision and re-arrange things
+ * so the GPHY driver is happy.
*/
- priv->gphy_rev = (reg & 0xffff) << 8;
+ gphy_rev = reg & 0xffff;
+
+ /* This is the good old scheme, just GPHY major, no minor nor patch */
+ if ((gphy_rev & 0xf0) != 0)
+ priv->gphy_rev = gphy_rev << 8;
+
+ /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
+ else if ((gphy_rev & 0xff00) != 0)
+ priv->gphy_rev = gphy_rev;
+
+ /* This is reserved so should require special treatment */
+ else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+ return;
+ }
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
@@ -2483,8 +2609,9 @@ static const struct of_device_id bcmgenet_match[] = {
static int bcmgenet_probe(struct platform_device *pdev)
{
+ struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
- const struct of_device_id *of_id;
+ const struct of_device_id *of_id = NULL;
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
@@ -2498,9 +2625,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
return -ENOMEM;
}
- of_id = of_match_node(bcmgenet_match, dn);
- if (!of_id)
- return -EINVAL;
+ if (dn) {
+ of_id = of_match_node(bcmgenet_match, dn);
+ if (!of_id)
+ return -EINVAL;
+ }
priv = netdev_priv(dev);
priv->irq0 = platform_get_irq(pdev, 0);
@@ -2512,11 +2641,15 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
- macaddr = of_get_mac_address(dn);
- if (!macaddr) {
- dev_err(&pdev->dev, "can't find MAC address\n");
- err = -EINVAL;
- goto err;
+ if (dn) {
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr) {
+ dev_err(&pdev->dev, "can't find MAC address\n");
+ err = -EINVAL;
+ goto err;
+ }
+ } else {
+ macaddr = pd->mac_address;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2556,7 +2689,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dev = dev;
priv->pdev = pdev;
- priv->version = (enum bcmgenet_version)of_id->data;
+ if (of_id)
+ priv->version = (enum bcmgenet_version)of_id->data;
+ else
+ priv->version = pd->genet_version;
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk))
@@ -2577,6 +2713,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_wol))
dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+ priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
+ if (IS_ERR(priv->clk_eee)) {
+ dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
+ priv->clk_eee = NULL;
+ }
+
err = reset_umac(priv);
if (err)
goto err_clk_disable;
@@ -2727,6 +2869,9 @@ static int bcmgenet_resume(struct device *d)
phy_resume(priv->phydev);
+ if (priv->eee.eee_enabled)
+ bcmgenet_eee_enable_set(dev, true);
+
bcmgenet_netif_start(dev);
return 0;
@@ -2744,7 +2889,6 @@ static struct platform_driver bcmgenet_driver = {
.remove = bcmgenet_remove,
.driver = {
.name = "bcmgenet",
- .owner = THIS_MODULE,
.of_match_table = bcmgenet_match,
.pm = &bcmgenet_pm_ops,
},
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 31b2da5f9b82..b36ddec0cc0a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -143,6 +143,9 @@ struct bcmgenet_mib_counters {
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
u32 mdf_err_cnt;
+ u32 alloc_rx_buff_failed;
+ u32 rx_dma_failed;
+ u32 tx_dma_failed;
};
#define UMAC_HD_BKP_CTRL 0x004
@@ -182,6 +185,21 @@ struct bcmgenet_mib_counters {
#define UMAC_MAC1 0x010
#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_EEE_CTRL 0x064
+#define EN_LPI_RX_PAUSE (1 << 0)
+#define EN_LPI_TX_PFC (1 << 1)
+#define EN_LPI_TX_PAUSE (1 << 2)
+#define EEE_EN (1 << 3)
+#define RX_FIFO_CHECK (1 << 4)
+#define EEE_TX_CLK_DIS (1 << 5)
+#define DIS_EEE_10M (1 << 6)
+#define LP_IDLE_PREDICTION_MODE (1 << 7)
+
+#define UMAC_EEE_LPI_TIMER 0x068
+#define UMAC_EEE_WAKE_TIMER 0x06C
+#define UMAC_EEE_REF_COUNT 0x070
+#define EEE_REFERENCE_COUNT_MASK 0xffff
+
#define UMAC_TX_FLUSH 0x334
#define UMAC_MIB_START 0x400
@@ -229,6 +247,10 @@ struct bcmgenet_mib_counters {
#define RBUF_RXCHK_EN (1 << 0)
#define RBUF_SKIP_FCS (1 << 4)
+#define RBUF_ENERGY_CTRL 0x9c
+#define RBUF_EEE_EN (1 << 0)
+#define RBUF_PM_EN (1 << 1)
+
#define RBUF_TBUF_SIZE_CTRL 0xb4
#define RBUF_HFB_CTRL_V1 0x38
@@ -244,6 +266,9 @@ struct bcmgenet_mib_counters {
#define TBUF_CTRL 0x00
#define TBUF_BP_MC 0x0C
+#define TBUF_ENERGY_CTRL 0x14
+#define TBUF_EEE_EN (1 << 0)
+#define TBUF_PM_EN (1 << 1)
#define TBUF_CTRL_V1 0x80
#define TBUF_BP_MC_V1 0xA0
@@ -548,6 +573,8 @@ struct bcmgenet_priv {
struct device_node *phy_dn;
struct mii_bus *mii_bus;
u16 gphy_rev;
+ struct clk *clk_eee;
+ bool clk_eee_enabled;
/* PHY device variables */
int old_link;
@@ -584,6 +611,8 @@ struct bcmgenet_priv {
u32 wolopts;
struct bcmgenet_mib_counters mib;
+
+ struct ethtool_eee eee;
};
#define GENET_IO_MACRO(name, offset) \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 933cd7e7cd33..446889cc3c6a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/platform_data/bcmgenet.h>
#include "bcmgenet.h"
@@ -312,22 +313,6 @@ static int bcmgenet_mii_probe(struct net_device *dev)
u32 phy_flags;
int ret;
- if (priv->phydev) {
- pr_info("PHY already attached\n");
- return 0;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
- ret = of_phy_register_fixed_link(dn);
- if (ret)
- return ret;
-
- priv->phy_dn = of_node_get(dn);
- }
-
/* Communicate the integrated PHY revision */
phy_flags = priv->gphy_rev;
@@ -337,11 +322,39 @@ static int bcmgenet_mii_probe(struct net_device *dev)
priv->old_duplex = -1;
priv->old_pause = -1;
- phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
- if (!phydev) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
+ if (dn) {
+ if (priv->phydev) {
+ pr_info("PHY already attached\n");
+ return 0;
+ }
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret)
+ return ret;
+
+ priv->phy_dn = of_node_get(dn);
+ }
+
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+ phy_flags, priv->phy_interface);
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ } else {
+ phydev = priv->phydev;
+ phydev->dev_flags = phy_flags;
+
+ ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+ if (ret) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
}
priv->phydev = phydev;
@@ -438,6 +451,75 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
return 0;
}
+static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct bcmgenet_platform_data *pd = kdev->platform_data;
+ struct mii_bus *mdio = priv->mii_bus;
+ struct phy_device *phydev;
+ int ret;
+
+ if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
+ /*
+ * Internal or external PHY with MDIO access
+ */
+ if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
+ mdio->phy_mask = ~(1 << pd->phy_address);
+ else
+ mdio->phy_mask = 0;
+
+ ret = mdiobus_register(mdio);
+ if (ret) {
+ dev_err(kdev, "failed to register MDIO bus\n");
+ return ret;
+ }
+
+ if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
+ phydev = mdio->phy_map[pd->phy_address];
+ else
+ phydev = phy_find_first(mdio);
+
+ if (!phydev) {
+ dev_err(kdev, "failed to register PHY device\n");
+ mdiobus_unregister(mdio);
+ return -ENODEV;
+ }
+ } else {
+ /*
+ * MoCA port or no MDIO access.
+ * Use fixed PHY to represent the link layer.
+ */
+ struct fixed_phy_status fphy_status = {
+ .link = 1,
+ .speed = pd->phy_speed,
+ .duplex = pd->phy_duplex,
+ .pause = 0,
+ .asym_pause = 0,
+ };
+
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ if (!phydev || IS_ERR(phydev)) {
+ dev_err(kdev, "failed to register fixed PHY device\n");
+ return -ENODEV;
+ }
+ }
+
+ priv->phydev = phydev;
+ priv->phy_interface = pd->phy_interface;
+
+ return 0;
+}
+
+static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
+{
+ struct device_node *dn = priv->pdev->dev.of_node;
+
+ if (dn)
+ return bcmgenet_mii_of_init(priv);
+ else
+ return bcmgenet_mii_pd_init(priv);
+}
+
int bcmgenet_mii_init(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -447,7 +529,7 @@ int bcmgenet_mii_init(struct net_device *dev)
if (ret)
return ret;
- ret = bcmgenet_mii_of_init(priv);
+ ret = bcmgenet_mii_bus_init(priv);
if (ret)
goto out_free;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index b61c14ed9b8d..ac27e24264a5 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2664,7 +2664,6 @@ static struct platform_driver sbmac_driver = {
.remove = __exit_p(sbmac_remove),
.driver = {
.name = sbmac_string,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 77f8f836cbbe..bb48a610b72a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10541,19 +10541,14 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
udelay(100);
if (tg3_flag(tp, ENABLE_RSS)) {
+ u32 rss_key[10];
+
tg3_rss_write_indir_tbl(tp);
- /* Setup the "secret" hash key. */
- tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
- tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
- tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
- tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
- tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
- tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
- tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
- tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
- tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
- tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
+ netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
+
+ for (i = 0; i < 10 ; i++)
+ tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
}
tp->rx_mode = RX_MODE_ENABLE;
@@ -12566,22 +12561,38 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
+static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
{
struct tg3 *tp = netdev_priv(dev);
int i;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
indir[i] = tp->rss_ind_tbl[i];
return 0;
}
-static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
+static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
+ const u8 hfunc)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
+ /* We require at least one supported parameter to be changed and no
+ * change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
tp->rss_ind_tbl[i] = indir[i];
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index c3861de9dc81..323721838cf9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2054,7 +2054,7 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
BFI_ENET_RSS_IPV4_TCP);
rx_config->rss_config.hash_mask =
bnad->num_rxp_per_rx - 1;
- get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
+ netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
sizeof(rx_config->rss_config.toeplitz_hash_key));
} else {
rx_config->rss_status = BNA_STATUS_T_DISABLED;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 3564fe9d3f69..321d2ad235d9 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -35,8 +35,8 @@ config MACB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
- Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode
- is not yet supported. Say Y to include support for the MACB/GEM chip.
+ Ethernet MAC found in some ARM SoC devices). Say Y to include
+ support for the MACB/GEM chip.
To compile this driver as a module, choose M here: the module
will be called macb.
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 4a24b9a6ad75..55eb7f2af2b4 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -469,7 +469,6 @@ static struct platform_driver at91ether_driver = {
.resume = at91ether_resume,
.driver = {
.name = "at91_ether",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91ether_dt_ids),
},
};
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 4d9fc0509af6..3767271c7667 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -66,23 +66,25 @@ static unsigned int macb_tx_ring_wrap(unsigned int index)
return index & (TX_RING_SIZE - 1);
}
-static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
+ unsigned int index)
{
- return &bp->tx_ring[macb_tx_ring_wrap(index)];
+ return &queue->tx_ring[macb_tx_ring_wrap(index)];
}
-static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
+ unsigned int index)
{
- return &bp->tx_skb[macb_tx_ring_wrap(index)];
+ return &queue->tx_skb[macb_tx_ring_wrap(index)];
}
-static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
+static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
{
dma_addr_t offset;
offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
- return bp->tx_ring_dma + offset;
+ return queue->tx_ring_dma + offset;
}
static unsigned int macb_rx_ring_wrap(unsigned int index)
@@ -490,38 +492,49 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
static void macb_tx_error_task(struct work_struct *work)
{
- struct macb *bp = container_of(work, struct macb, tx_error_task);
+ struct macb_queue *queue = container_of(work, struct macb_queue,
+ tx_error_task);
+ struct macb *bp = queue->bp;
struct macb_tx_skb *tx_skb;
+ struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
+ unsigned long flags;
+
+ netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
+ (unsigned int)(queue - bp->queues),
+ queue->tx_tail, queue->tx_head);
- netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
- bp->tx_tail, bp->tx_head);
+ /* Prevent the queue IRQ handlers from running: each of them may call
+ * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
+ * As explained below, we have to halt the transmission before updating
+ * TBQP registers so we call netif_tx_stop_all_queues() to notify the
+ * network engine about the macb/gem being halted.
+ */
+ spin_lock_irqsave(&bp->lock, flags);
/* Make sure nobody is trying to queue up new packets */
- netif_stop_queue(bp->dev);
+ netif_tx_stop_all_queues(bp->dev);
/*
* Stop transmission now
* (in case we have just queued new packets)
+ * macb/gem must be halted to write TBQP register
*/
if (macb_halt_tx(bp))
/* Just complain for now, reinitializing TX path can be good */
netdev_err(bp->dev, "BUG: halt tx timed out\n");
- /* No need for the lock here as nobody will interrupt us anymore */
-
/*
* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
*/
- for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
- struct macb_dma_desc *desc;
- u32 ctrl;
+ for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
+ u32 ctrl;
- desc = macb_tx_desc(bp, tail);
+ desc = macb_tx_desc(queue, tail);
ctrl = desc->ctrl;
- tx_skb = macb_tx_skb(bp, tail);
+ tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
if (ctrl & MACB_BIT(TX_USED)) {
@@ -529,7 +542,7 @@ static void macb_tx_error_task(struct work_struct *work)
while (!skb) {
macb_tx_unmap(bp, tx_skb);
tail++;
- tx_skb = macb_tx_skb(bp, tail);
+ tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
}
@@ -558,45 +571,56 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb);
}
+ /* Set end of TX queue */
+ desc = macb_tx_desc(queue, 0);
+ desc->addr = 0;
+ desc->ctrl = MACB_BIT(TX_USED);
+
/* Make descriptor updates visible to hardware */
wmb();
/* Reinitialize the TX desc queue */
- macb_writel(bp, TBQP, bp->tx_ring_dma);
+ queue_writel(queue, TBQP, queue->tx_ring_dma);
/* Make TX ring reflect state of hardware */
- bp->tx_head = bp->tx_tail = 0;
-
- /* Now we are ready to start transmission again */
- netif_wake_queue(bp->dev);
+ queue->tx_head = 0;
+ queue->tx_tail = 0;
/* Housework before enabling TX IRQ */
macb_writel(bp, TSR, macb_readl(bp, TSR));
- macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+ queue_writel(queue, IER, MACB_TX_INT_FLAGS);
+
+ /* Now we are ready to start transmission again */
+ netif_tx_start_all_queues(bp->dev);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+
+ spin_unlock_irqrestore(&bp->lock, flags);
}
-static void macb_tx_interrupt(struct macb *bp)
+static void macb_tx_interrupt(struct macb_queue *queue)
{
unsigned int tail;
unsigned int head;
u32 status;
+ struct macb *bp = queue->bp;
+ u16 queue_index = queue - bp->queues;
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(TCOMP));
+ queue_writel(queue, ISR, MACB_BIT(TCOMP));
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
(unsigned long)status);
- head = bp->tx_head;
- for (tail = bp->tx_tail; tail != head; tail++) {
+ head = queue->tx_head;
+ for (tail = queue->tx_tail; tail != head; tail++) {
struct macb_tx_skb *tx_skb;
struct sk_buff *skb;
struct macb_dma_desc *desc;
u32 ctrl;
- desc = macb_tx_desc(bp, tail);
+ desc = macb_tx_desc(queue, tail);
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -611,7 +635,7 @@ static void macb_tx_interrupt(struct macb *bp)
/* Process all buffers of the current transmitted frame */
for (;; tail++) {
- tx_skb = macb_tx_skb(bp, tail);
+ tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
/* First, update TX stats if needed */
@@ -634,11 +658,11 @@ static void macb_tx_interrupt(struct macb *bp)
}
}
- bp->tx_tail = tail;
- if (netif_queue_stopped(bp->dev)
- && CIRC_CNT(bp->tx_head, bp->tx_tail,
- TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
- netif_wake_queue(bp->dev);
+ queue->tx_tail = tail;
+ if (__netif_subqueue_stopped(bp->dev, queue_index) &&
+ CIRC_CNT(queue->tx_head, queue->tx_tail,
+ TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
+ netif_wake_subqueue(bp->dev, queue_index);
}
static void gem_rx_refill(struct macb *bp)
@@ -776,7 +800,7 @@ static int gem_rx(struct macb *bp, int budget)
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
- skb->mac_header, 16, true);
+ skb_mac_header(skb), 16, true);
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
skb->data, 32, true);
#endif
@@ -949,11 +973,12 @@ static int macb_poll(struct napi_struct *napi, int budget)
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue = dev_id;
+ struct macb *bp = queue->bp;
+ struct net_device *dev = bp->dev;
u32 status;
- status = macb_readl(bp, ISR);
+ status = queue_readl(queue, ISR);
if (unlikely(!status))
return IRQ_NONE;
@@ -963,11 +988,13 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
while (status) {
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
- macb_writel(bp, IDR, -1);
+ queue_writel(queue, IDR, -1);
break;
}
- netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+ netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
+ (unsigned int)(queue - bp->queues),
+ (unsigned long)status);
if (status & MACB_RX_INT_FLAGS) {
/*
@@ -977,9 +1004,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* is already scheduled, so disable interrupts
* now.
*/
- macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&bp->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -988,17 +1015,17 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
- macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
- schedule_work(&bp->tx_error_task);
+ queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
+ schedule_work(&queue->tx_error_task);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+ queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
break;
}
if (status & MACB_BIT(TCOMP))
- macb_tx_interrupt(bp);
+ macb_tx_interrupt(queue);
/*
* Link change detection isn't possible with RMII, so we'll
@@ -1013,7 +1040,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
bp->hw_stats.macb.rx_overruns++;
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
+ queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
}
if (status & MACB_BIT(HRESP)) {
@@ -1025,10 +1052,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(HRESP));
+ queue_writel(queue, ISR, MACB_BIT(HRESP));
}
- status = macb_readl(bp, ISR);
+ status = queue_readl(queue, ISR);
}
spin_unlock(&bp->lock);
@@ -1043,10 +1070,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
*/
static void macb_poll_controller(struct net_device *dev)
{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned long flags;
+ unsigned int q;
local_irq_save(flags);
- macb_interrupt(dev->irq, dev);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ macb_interrupt(dev->irq, queue);
local_irq_restore(flags);
}
#endif
@@ -1058,10 +1089,11 @@ static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
}
static unsigned int macb_tx_map(struct macb *bp,
+ struct macb_queue *queue,
struct sk_buff *skb)
{
dma_addr_t mapping;
- unsigned int len, entry, i, tx_head = bp->tx_head;
+ unsigned int len, entry, i, tx_head = queue->tx_head;
struct macb_tx_skb *tx_skb = NULL;
struct macb_dma_desc *desc;
unsigned int offset, size, count = 0;
@@ -1075,7 +1107,7 @@ static unsigned int macb_tx_map(struct macb *bp,
while (len) {
size = min(len, bp->max_tx_length);
entry = macb_tx_ring_wrap(tx_head);
- tx_skb = &bp->tx_skb[entry];
+ tx_skb = &queue->tx_skb[entry];
mapping = dma_map_single(&bp->pdev->dev,
skb->data + offset,
@@ -1104,7 +1136,7 @@ static unsigned int macb_tx_map(struct macb *bp,
while (len) {
size = min(len, bp->max_tx_length);
entry = macb_tx_ring_wrap(tx_head);
- tx_skb = &bp->tx_skb[entry];
+ tx_skb = &queue->tx_skb[entry];
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
offset, size, DMA_TO_DEVICE);
@@ -1143,14 +1175,14 @@ static unsigned int macb_tx_map(struct macb *bp,
i = tx_head;
entry = macb_tx_ring_wrap(i);
ctrl = MACB_BIT(TX_USED);
- desc = &bp->tx_ring[entry];
+ desc = &queue->tx_ring[entry];
desc->ctrl = ctrl;
do {
i--;
entry = macb_tx_ring_wrap(i);
- tx_skb = &bp->tx_skb[entry];
- desc = &bp->tx_ring[entry];
+ tx_skb = &queue->tx_skb[entry];
+ desc = &queue->tx_ring[entry];
ctrl = (u32)tx_skb->size;
if (eof) {
@@ -1167,17 +1199,17 @@ static unsigned int macb_tx_map(struct macb *bp,
*/
wmb();
desc->ctrl = ctrl;
- } while (i != bp->tx_head);
+ } while (i != queue->tx_head);
- bp->tx_head = tx_head;
+ queue->tx_head = tx_head;
return count;
dma_error:
netdev_err(bp->dev, "TX DMA map failed\n");
- for (i = bp->tx_head; i != tx_head; i++) {
- tx_skb = macb_tx_skb(bp, i);
+ for (i = queue->tx_head; i != tx_head; i++) {
+ tx_skb = macb_tx_skb(queue, i);
macb_tx_unmap(bp, tx_skb);
}
@@ -1187,14 +1219,16 @@ dma_error:
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue = &bp->queues[queue_index];
unsigned long flags;
unsigned int count, nr_frags, frag_size, f;
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
- "start_xmit: len %u head %p data %p tail %p end %p\n",
- skb->len, skb->head, skb->data,
+ "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
+ queue_index, skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, 16, true);
@@ -1214,16 +1248,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
- if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) {
- netif_stop_queue(dev);
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
+ netif_stop_subqueue(dev, queue_index);
spin_unlock_irqrestore(&bp->lock, flags);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
- bp->tx_head, bp->tx_tail);
+ queue->tx_head, queue->tx_tail);
return NETDEV_TX_BUSY;
}
/* Map socket buffer for DMA transfer */
- if (!macb_tx_map(bp, skb)) {
+ if (!macb_tx_map(bp, queue, skb)) {
dev_kfree_skb_any(skb);
goto unlock;
}
@@ -1235,8 +1269,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
- netif_stop_queue(dev);
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
+ netif_stop_subqueue(dev, queue_index);
unlock:
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1304,20 +1338,24 @@ static void macb_free_rx_buffers(struct macb *bp)
static void macb_free_consistent(struct macb *bp)
{
- if (bp->tx_skb) {
- kfree(bp->tx_skb);
- bp->tx_skb = NULL;
- }
+ struct macb_queue *queue;
+ unsigned int q;
+
bp->macbgem_ops.mog_free_rx_buffers(bp);
if (bp->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
}
- if (bp->tx_ring) {
- dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
- bp->tx_ring, bp->tx_ring_dma);
- bp->tx_ring = NULL;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ kfree(queue->tx_skb);
+ queue->tx_skb = NULL;
+ if (queue->tx_ring) {
+ dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+ queue->tx_ring, queue->tx_ring_dma);
+ queue->tx_ring = NULL;
+ }
}
}
@@ -1354,12 +1392,27 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
int size;
- size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
- bp->tx_skb = kmalloc(size, GFP_KERNEL);
- if (!bp->tx_skb)
- goto out_err;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ size = TX_RING_BYTES;
+ queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->tx_ring_dma,
+ GFP_KERNEL);
+ if (!queue->tx_ring)
+ goto out_err;
+ netdev_dbg(bp->dev,
+ "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
+ q, size, (unsigned long)queue->tx_ring_dma,
+ queue->tx_ring);
+
+ size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
+ queue->tx_skb = kmalloc(size, GFP_KERNEL);
+ if (!queue->tx_skb)
+ goto out_err;
+ }
size = RX_RING_BYTES;
bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
@@ -1370,15 +1423,6 @@ static int macb_alloc_consistent(struct macb *bp)
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
- size = TX_RING_BYTES;
- bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->tx_ring_dma, GFP_KERNEL);
- if (!bp->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
-
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -1391,15 +1435,22 @@ out_err:
static void gem_init_rings(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
int i;
- for (i = 0; i < TX_RING_SIZE; i++) {
- bp->tx_ring[i].addr = 0;
- bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ queue->tx_ring[i].addr = 0;
+ queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ }
+ queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ queue->tx_head = 0;
+ queue->tx_tail = 0;
}
- bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
- bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
+ bp->rx_tail = 0;
+ bp->rx_prepared_head = 0;
gem_rx_refill(bp);
}
@@ -1418,16 +1469,21 @@ static void macb_init_rings(struct macb *bp)
bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
for (i = 0; i < TX_RING_SIZE; i++) {
- bp->tx_ring[i].addr = 0;
- bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ bp->queues[0].tx_ring[i].addr = 0;
+ bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ bp->queues[0].tx_head = 0;
+ bp->queues[0].tx_tail = 0;
}
- bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
- bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
+ bp->rx_tail = 0;
}
static void macb_reset_hw(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
+
/*
* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
@@ -1442,8 +1498,10 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, RSR, -1);
/* Disable all interrupts */
- macb_writel(bp, IDR, -1);
- macb_readl(bp, ISR);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, IDR, -1);
+ queue_readl(queue, ISR);
+ }
}
static u32 gem_mdc_clk_div(struct macb *bp)
@@ -1540,6 +1598,9 @@ static void macb_configure_dma(struct macb *bp)
static void macb_init_hw(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
+
u32 config;
macb_reset_hw(bp);
@@ -1565,16 +1626,18 @@ static void macb_init_hw(struct macb *bp)
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, bp->rx_ring_dma);
- macb_writel(bp, TBQP, bp->tx_ring_dma);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, TBQP, queue->tx_ring_dma);
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+ MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
/* Enable TX and RX */
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
-
- /* Enable interrupts */
- macb_writel(bp, IER, (MACB_RX_INT_FLAGS
- | MACB_TX_INT_FLAGS
- | MACB_BIT(HRESP)));
-
}
/*
@@ -1736,7 +1799,7 @@ static int macb_open(struct net_device *dev)
/* schedule a link state check */
phy_start(bp->phy_dev);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -1746,7 +1809,7 @@ static int macb_close(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
unsigned long flags;
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
napi_disable(&bp->napi);
if (bp->phy_dev)
@@ -1895,8 +1958,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
| MACB_GREGS_VERSION;
- tail = macb_tx_ring_wrap(bp->tx_tail);
- head = macb_tx_ring_wrap(bp->tx_head);
+ tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
+ head = macb_tx_ring_wrap(bp->queues[0].tx_head);
regs_buff[0] = macb_readl(bp, NCR);
regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
@@ -1909,8 +1972,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs_buff[8] = tail;
regs_buff[9] = head;
- regs_buff[10] = macb_tx_dma(bp, tail);
- regs_buff[11] = macb_tx_dma(bp, head);
+ regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
+ regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
if (macb_is_gem(bp)) {
regs_buff[12] = gem_readl(bp, USRIO);
@@ -2061,16 +2124,44 @@ static void macb_configure_caps(struct macb *bp)
netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
}
+static void macb_probe_queues(void __iomem *mem,
+ unsigned int *queue_mask,
+ unsigned int *num_queues)
+{
+ unsigned int hw_q;
+ u32 mid;
+
+ *queue_mask = 0x1;
+ *num_queues = 1;
+
+ /* is it macb or gem ? */
+ mid = __raw_readl(mem + MACB_MID);
+ if (MACB_BFEXT(IDNUM, mid) != 0x2)
+ return;
+
+ /* bit 0 is never set but queue 0 always exists */
+ *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff;
+ *queue_mask |= 0x1;
+
+ for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
+ if (*queue_mask & (1 << hw_q))
+ (*num_queues)++;
+}
+
static int __init macb_probe(struct platform_device *pdev)
{
struct macb_platform_data *pdata;
struct resource *regs;
struct net_device *dev;
struct macb *bp;
+ struct macb_queue *queue;
struct phy_device *phydev;
u32 config;
int err = -ENXIO;
const char *mac;
+ void __iomem *mem;
+ unsigned int hw_q, queue_mask, q, num_queues;
+ struct clk *pclk, *hclk, *tx_clk;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -2078,72 +2169,112 @@ static int __init macb_probe(struct platform_device *pdev)
goto err_out;
}
- err = -ENOMEM;
- dev = alloc_etherdev(sizeof(*bp));
- if (!dev)
- goto err_out;
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- bp = netdev_priv(dev);
- bp->pdev = pdev;
- bp->dev = dev;
-
- spin_lock_init(&bp->lock);
- INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
-
- bp->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(bp->pclk)) {
- err = PTR_ERR(bp->pclk);
+ pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(pclk)) {
+ err = PTR_ERR(pclk);
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
- goto err_out_free_dev;
+ goto err_out;
}
- bp->hclk = devm_clk_get(&pdev->dev, "hclk");
- if (IS_ERR(bp->hclk)) {
- err = PTR_ERR(bp->hclk);
+ hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(hclk)) {
+ err = PTR_ERR(hclk);
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
- goto err_out_free_dev;
+ goto err_out;
}
- bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- err = clk_prepare_enable(bp->pclk);
+ err = clk_prepare_enable(pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
- goto err_out_free_dev;
+ goto err_out;
}
- err = clk_prepare_enable(bp->hclk);
+ err = clk_prepare_enable(hclk);
if (err) {
dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
goto err_out_disable_pclk;
}
- if (!IS_ERR(bp->tx_clk)) {
- err = clk_prepare_enable(bp->tx_clk);
+ if (!IS_ERR(tx_clk)) {
+ err = clk_prepare_enable(tx_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
- err);
+ err);
goto err_out_disable_hclk;
}
}
- bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
- if (!bp->regs) {
+ err = -ENOMEM;
+ mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!mem) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
- err = -ENOMEM;
goto err_out_disable_clocks;
}
- dev->irq = platform_get_irq(pdev, 0);
- err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
- dev->name, dev);
- if (err) {
- dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
- dev->irq, err);
+ macb_probe_queues(mem, &queue_mask, &num_queues);
+ dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
+ if (!dev)
goto err_out_disable_clocks;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ bp = netdev_priv(dev);
+ bp->pdev = pdev;
+ bp->dev = dev;
+ bp->regs = mem;
+ bp->num_queues = num_queues;
+ bp->pclk = pclk;
+ bp->hclk = hclk;
+ bp->tx_clk = tx_clk;
+
+ spin_lock_init(&bp->lock);
+
+ /* set the queue register mapping once for all: queue0 has a special
+ * register mapping but we don't want to test the queue index then
+ * compute the corresponding register offset at run time.
+ */
+ for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
+ if (!(queue_mask & (1 << hw_q)))
+ continue;
+
+ queue = &bp->queues[q];
+ queue->bp = bp;
+ if (hw_q) {
+ queue->ISR = GEM_ISR(hw_q - 1);
+ queue->IER = GEM_IER(hw_q - 1);
+ queue->IDR = GEM_IDR(hw_q - 1);
+ queue->IMR = GEM_IMR(hw_q - 1);
+ queue->TBQP = GEM_TBQP(hw_q - 1);
+ } else {
+ /* queue0 uses legacy registers */
+ queue->ISR = MACB_ISR;
+ queue->IER = MACB_IER;
+ queue->IDR = MACB_IDR;
+ queue->IMR = MACB_IMR;
+ queue->TBQP = MACB_TBQP;
+ }
+
+ /* get irq: here we use the linux queue index, not the hardware
+ * queue index. the queue irq definitions in the device tree
+ * must remove the optional gaps that could exist in the
+ * hardware queue mask.
+ */
+ queue->irq = platform_get_irq(pdev, q);
+ err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
+ 0, dev->name, queue);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to request IRQ %d (error %d)\n",
+ queue->irq, err);
+ goto err_out_free_netdev;
+ }
+
+ INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
+ q++;
}
+ dev->irq = bp->queues[0].irq;
dev->netdev_ops = &macb_netdev_ops;
netif_napi_add(dev, &bp->napi, macb_poll, 64);
@@ -2219,7 +2350,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_disable_clocks;
+ goto err_out_free_netdev;
}
err = macb_mii_init(bp);
@@ -2242,15 +2373,15 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
+err_out_free_netdev:
+ free_netdev(dev);
err_out_disable_clocks:
- if (!IS_ERR(bp->tx_clk))
- clk_disable_unprepare(bp->tx_clk);
+ if (!IS_ERR(tx_clk))
+ clk_disable_unprepare(tx_clk);
err_out_disable_hclk:
- clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(hclk);
err_out_disable_pclk:
- clk_disable_unprepare(bp->pclk);
-err_out_free_dev:
- free_netdev(dev);
+ clk_disable_unprepare(pclk);
err_out:
return err;
}
@@ -2321,7 +2452,6 @@ static struct platform_driver macb_driver = {
.remove = __exit_p(macb_remove),
.driver = {
.name = "macb",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(macb_dt_ids),
.pm = &macb_pm_ops,
},
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 517c09d72c4a..084191b6fad2 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -12,6 +12,7 @@
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 1
+#define MACB_MAX_QUEUES 8
/* MACB register offsets */
#define MACB_NCR 0x0000
@@ -89,6 +90,13 @@
#define GEM_DCFG6 0x0294
#define GEM_DCFG7 0x0298
+#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
+#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
+#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
+#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
+#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
+#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
+
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0
#define MACB_LB_SIZE 1
@@ -376,6 +384,10 @@
__raw_readl((port)->regs + GEM_##reg)
#define gem_writel(port, reg, value) \
__raw_writel((value), (port)->regs + GEM_##reg)
+#define queue_readl(queue, reg) \
+ __raw_readl((queue)->bp->regs + (queue)->reg)
+#define queue_writel(queue, reg, value) \
+ __raw_writel((value), (queue)->bp->regs + (queue)->reg)
/*
* Conditional GEM/MACB macros. These perform the operation to the correct
@@ -597,6 +609,23 @@ struct macb_config {
unsigned int dma_burst_length;
};
+struct macb_queue {
+ struct macb *bp;
+ int irq;
+
+ unsigned int ISR;
+ unsigned int IER;
+ unsigned int IDR;
+ unsigned int IMR;
+ unsigned int TBQP;
+
+ unsigned int tx_head, tx_tail;
+ struct macb_dma_desc *tx_ring;
+ struct macb_tx_skb *tx_skb;
+ dma_addr_t tx_ring_dma;
+ struct work_struct tx_error_task;
+};
+
struct macb {
void __iomem *regs;
@@ -607,9 +636,8 @@ struct macb {
void *rx_buffers;
size_t rx_buffer_size;
- unsigned int tx_head, tx_tail;
- struct macb_dma_desc *tx_ring;
- struct macb_tx_skb *tx_skb;
+ unsigned int num_queues;
+ struct macb_queue queues[MACB_MAX_QUEUES];
spinlock_t lock;
struct platform_device *pdev;
@@ -618,7 +646,6 @@ struct macb {
struct clk *tx_clk;
struct net_device *dev;
struct napi_struct napi;
- struct work_struct tx_error_task;
struct net_device_stats stats;
union {
struct macb_stats macb;
@@ -626,7 +653,6 @@ struct macb {
} hw_stats;
dma_addr_t rx_ring_dma;
- dma_addr_t tx_ring_dma;
dma_addr_t rx_buffers_dma;
struct macb_or_gem_ops macbgem_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 4c5879389003..babe2a915b00 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -301,7 +301,7 @@ unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
struct sched_port *p = &s->p[port];
unsigned int max_avail_segs;
- pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
+ pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed);
if (speed)
p->speed = speed;
if (mtu)
@@ -1025,7 +1025,7 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
/**
* get_packet - return the next ingress packet buffer
- * @pdev: the PCI device that received the packet
+ * @adapter: the adapter that received the packet
* @fl: the SGE free list holding the packet
* @len: the actual packet length, excluding any SGE padding
*
@@ -1037,14 +1037,15 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
* threshold and the packet is too big to copy, or (b) the packet should
* be copied but there is no memory for the copy.
*/
-static inline struct sk_buff *get_packet(struct pci_dev *pdev,
+static inline struct sk_buff *get_packet(struct adapter *adapter,
struct freelQ *fl, unsigned int len)
{
- struct sk_buff *skb;
const struct freelQ_ce *ce = &fl->centries[fl->cidx];
+ struct pci_dev *pdev = adapter->pdev;
+ struct sk_buff *skb;
if (len < copybreak) {
- skb = netdev_alloc_skb_ip_align(NULL, len);
+ skb = napi_alloc_skb(&adapter->napi, len);
if (!skb)
goto use_orig_buf;
@@ -1357,7 +1358,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
struct sge_port_stats *st;
struct net_device *dev;
- skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
+ skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
if (unlikely(!skb)) {
sge->stats.rx_drops++;
return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 1df65c915b99..b85280775997 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
+cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 3c481b260745..5ab5c3133acd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -50,13 +50,13 @@
#include "cxgb4_uld.h"
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0B
-#define T4FW_VERSION_MICRO 0x1B
+#define T4FW_VERSION_MINOR 0x0C
+#define T4FW_VERSION_MICRO 0x19
#define T4FW_VERSION_BUILD 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0B
-#define T5FW_VERSION_MICRO 0x1B
+#define T5FW_VERSION_MINOR 0x0C
+#define T5FW_VERSION_MICRO 0x19
#define T5FW_VERSION_BUILD 0x00
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
@@ -222,6 +222,12 @@ struct tp_err_stats {
u32 ofldCongDefer;
};
+struct sge_params {
+ u32 hps; /* host page size for our PF/VF */
+ u32 eq_qpp; /* egress queues/page for our PF/VF */
+ u32 iq_qpp; /* egress queues/page for our PF/VF */
+};
+
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
@@ -285,6 +291,7 @@ enum chip_type {
};
struct adapter_params {
+ struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
@@ -318,10 +325,10 @@ struct adapter_params {
#include "t4fw_api.h"
#define FW_VERSION(chip) ( \
- FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
- FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
- FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
- FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
+ FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
+ FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
+ FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
+ FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
struct fw_info {
@@ -354,7 +361,7 @@ struct link_config {
unsigned char link_ok; /* link up? */
};
-#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
@@ -385,7 +392,7 @@ struct port_info {
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
s8 mdio_addr;
- u8 port_type;
+ enum fw_port_type port_type;
u8 mod_type;
u8 port_id;
u8 tx_chan;
@@ -431,7 +438,8 @@ struct sge_fl { /* SGE free-buffer queue state */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
__be64 *desc; /* address of HW Rx descriptor ring */
dma_addr_t addr; /* bus address of HW ring start */
- u64 udb; /* BAR2 offset of User Doorbell area */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
};
/* A packet gather list */
@@ -461,7 +469,8 @@ struct sge_rspq { /* state for an SGE response queue */
u16 abs_id; /* absolute SGE id for the response q */
__be64 *desc; /* address of HW response ring */
dma_addr_t phys_addr; /* physical address of the ring */
- u64 udb; /* BAR2 offset of User Doorbell area */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
unsigned int iqe_len; /* entry size */
unsigned int size; /* capacity of response queue */
struct adapter *adap;
@@ -519,7 +528,8 @@ struct sge_txq {
int db_disabled;
unsigned short db_pidx;
unsigned short db_pidx_inc;
- u64 udb; /* BAR2 offset of User Doorbell area */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
};
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
@@ -995,6 +1005,15 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
+int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
@@ -1085,4 +1104,5 @@ void t4_db_dropped(struct adapter *adapter);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
+void t4_free_mem(void *addr);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4fe33606f372..a35d1ec6950e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -243,7 +243,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
const struct fw_port_cmd *pcmd)
{
const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
- int port = FW_PORT_CMD_PORTID_GET(be32_to_cpu(pcmd->op_to_portid));
+ int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
struct net_device *dev = adap->port[port];
struct port_info *pi = netdev_priv(dev);
struct port_dcb_info *dcb = &pi->dcb;
@@ -256,12 +256,12 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) {
enum cxgb4_dcb_state_input input =
((pcmd->u.dcb.control.all_syncd_pkd &
- FW_PORT_CMD_ALL_SYNCD)
+ FW_PORT_CMD_ALL_SYNCD_F)
? CXGB4_DCB_STATE_FW_ALLSYNCED
: CXGB4_DCB_STATE_FW_INCOMPLETE);
if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
- dcb_running_version = FW_PORT_CMD_DCB_VERSION_GET(
+ dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
be16_to_cpu(
pcmd->u.dcb.control.dcb_version_to_app_state));
if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 ||
@@ -519,7 +519,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
- pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+ pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
if (err != FW_PORT_DCB_CFG_SUCCESS)
@@ -583,7 +583,7 @@ static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid,
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
- pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+ pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
@@ -623,7 +623,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
- pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+ pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC;
pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
@@ -842,7 +842,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
/* write out new app table entry */
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
- pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+ pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 2a6aa88984f4..31ce425616c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -42,12 +42,12 @@
do { \
memset(&(__pcmd), 0, sizeof(__pcmd)); \
(__pcmd).op_to_portid = \
- cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | \
- FW_CMD_REQUEST | \
- FW_CMD_##__op | \
- FW_PORT_CMD_PORTID(__port)); \
+ cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | \
+ FW_CMD_REQUEST_F | \
+ FW_CMD_##__op##_F | \
+ FW_PORT_CMD_PORTID_V(__port)); \
(__pcmd).action_to_len16 = \
- cpu_to_be32(FW_PORT_CMD_ACTION(__action) | \
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(__action) | \
FW_LEN16(pcmd)); \
} while (0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
new file mode 100644
index 000000000000..c98a350d857e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -0,0 +1,158 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+#include <linux/sort.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+#include "cxgb4_debugfs.h"
+#include "l2t.h"
+
+static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file_inode(file)->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct adapter *adap = file->private_data - mem;
+ __be32 *data;
+ int ret;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ data = t4_alloc_mem(count);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock(&adap->win0_lock);
+ ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
+ spin_unlock(&adap->win0_lock);
+ if (ret) {
+ t4_free_mem(data);
+ return ret;
+ }
+ ret = copy_to_user(buf, data, count);
+
+ t4_free_mem(data);
+ if (ret)
+ return -EFAULT;
+
+ *ppos = pos + count;
+ return count;
+}
+
+static const struct file_operations mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mem_read,
+ .llseek = default_llseek,
+};
+
+static void add_debugfs_mem(struct adapter *adap, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
+ (void *)adap + idx, &mem_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = size_mb << 20;
+}
+
+/* Add an array of Debug FS files.
+ */
+void add_debugfs_files(struct adapter *adap,
+ struct t4_debugfs_entry *files,
+ unsigned int nfiles)
+{
+ int i;
+
+ /* debugfs support is best effort */
+ for (i = 0; i < nfiles; i++)
+ debugfs_create_file(files[i].name, files[i].mode,
+ adap->debugfs_root,
+ (void *)adap + files[i].data,
+ files[i].ops);
+}
+
+int t4_setup_debugfs(struct adapter *adap)
+{
+ int i;
+ u32 size;
+
+ static struct t4_debugfs_entry t4_debugfs_files[] = {
+ { "l2t", &t4_l2t_fops, S_IRUSR, 0},
+ };
+
+ add_debugfs_files(adap,
+ t4_debugfs_files,
+ ARRAY_SIZE(t4_debugfs_files));
+
+ i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (i & EDRAM0_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size));
+ }
+ if (i & EDRAM1_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
+ }
+ if (is_t4(adap->params.chip)) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ if (i & EXT_MEM_ENABLE_F)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_G(size));
+ } else {
+ if (i & EXT_MEM0_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ add_debugfs_mem(adap, "mc0", MEM_MC0,
+ EXT_MEM0_SIZE_G(size));
+ }
+ if (i & EXT_MEM1_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ add_debugfs_mem(adap, "mc1", MEM_MC1,
+ EXT_MEM1_SIZE_G(size));
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
new file mode 100644
index 000000000000..a3d8867efd3d
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_DEBUGFS_H
+#define __CXGB4_DEBUGFS_H
+
+#include <linux/export.h>
+
+struct t4_debugfs_entry {
+ const char *name;
+ const struct file_operations *ops;
+ mode_t mode;
+ unsigned char data;
+};
+
+int t4_setup_debugfs(struct adapter *adap);
+void add_debugfs_files(struct adapter *adap,
+ struct t4_debugfs_entry *files,
+ unsigned int nfiles);
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 279873cb6e3a..ccf3436024bc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -61,6 +61,7 @@
#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/addrconf.h>
+#include <net/bonding.h>
#include <asm/uaccess.h>
#include "cxgb4.h"
@@ -68,10 +69,9 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "cxgb4_dcb.h"
+#include "cxgb4_debugfs.h"
#include "l2t.h"
-#include <../drivers/net/bonding/bonding.h>
-
#ifdef DRV_VERSION
#undef DRV_VERSION
#endif
@@ -141,7 +141,7 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
* Give PF's access to all of the ports.
*/
if (vf == 0)
- return FW_PFVF_CMD_PMASK_MASK;
+ return FW_PFVF_CMD_PMASK_M;
/*
* For VFs, we'll assign them access to the ports based purely on the
@@ -210,114 +210,25 @@ struct filter_entry {
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
-#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
-
-static const struct pci_device_id cxgb4_pci_tbl[] = {
- CH_DEVICE(0xa000, 0), /* PE10K */
- CH_DEVICE(0x4001, -1),
- CH_DEVICE(0x4002, -1),
- CH_DEVICE(0x4003, -1),
- CH_DEVICE(0x4004, -1),
- CH_DEVICE(0x4005, -1),
- CH_DEVICE(0x4006, -1),
- CH_DEVICE(0x4007, -1),
- CH_DEVICE(0x4008, -1),
- CH_DEVICE(0x4009, -1),
- CH_DEVICE(0x400a, -1),
- CH_DEVICE(0x400d, -1),
- CH_DEVICE(0x400e, -1),
- CH_DEVICE(0x4080, -1),
- CH_DEVICE(0x4081, -1),
- CH_DEVICE(0x4082, -1),
- CH_DEVICE(0x4083, -1),
- CH_DEVICE(0x4084, -1),
- CH_DEVICE(0x4085, -1),
- CH_DEVICE(0x4086, -1),
- CH_DEVICE(0x4087, -1),
- CH_DEVICE(0x4088, -1),
- CH_DEVICE(0x4401, 4),
- CH_DEVICE(0x4402, 4),
- CH_DEVICE(0x4403, 4),
- CH_DEVICE(0x4404, 4),
- CH_DEVICE(0x4405, 4),
- CH_DEVICE(0x4406, 4),
- CH_DEVICE(0x4407, 4),
- CH_DEVICE(0x4408, 4),
- CH_DEVICE(0x4409, 4),
- CH_DEVICE(0x440a, 4),
- CH_DEVICE(0x440d, 4),
- CH_DEVICE(0x440e, 4),
- CH_DEVICE(0x4480, 4),
- CH_DEVICE(0x4481, 4),
- CH_DEVICE(0x4482, 4),
- CH_DEVICE(0x4483, 4),
- CH_DEVICE(0x4484, 4),
- CH_DEVICE(0x4485, 4),
- CH_DEVICE(0x4486, 4),
- CH_DEVICE(0x4487, 4),
- CH_DEVICE(0x4488, 4),
- CH_DEVICE(0x5001, 4),
- CH_DEVICE(0x5002, 4),
- CH_DEVICE(0x5003, 4),
- CH_DEVICE(0x5004, 4),
- CH_DEVICE(0x5005, 4),
- CH_DEVICE(0x5006, 4),
- CH_DEVICE(0x5007, 4),
- CH_DEVICE(0x5008, 4),
- CH_DEVICE(0x5009, 4),
- CH_DEVICE(0x500A, 4),
- CH_DEVICE(0x500B, 4),
- CH_DEVICE(0x500C, 4),
- CH_DEVICE(0x500D, 4),
- CH_DEVICE(0x500E, 4),
- CH_DEVICE(0x500F, 4),
- CH_DEVICE(0x5010, 4),
- CH_DEVICE(0x5011, 4),
- CH_DEVICE(0x5012, 4),
- CH_DEVICE(0x5013, 4),
- CH_DEVICE(0x5014, 4),
- CH_DEVICE(0x5015, 4),
- CH_DEVICE(0x5080, 4),
- CH_DEVICE(0x5081, 4),
- CH_DEVICE(0x5082, 4),
- CH_DEVICE(0x5083, 4),
- CH_DEVICE(0x5084, 4),
- CH_DEVICE(0x5085, 4),
- CH_DEVICE(0x5086, 4),
- CH_DEVICE(0x5087, 4),
- CH_DEVICE(0x5088, 4),
- CH_DEVICE(0x5401, 4),
- CH_DEVICE(0x5402, 4),
- CH_DEVICE(0x5403, 4),
- CH_DEVICE(0x5404, 4),
- CH_DEVICE(0x5405, 4),
- CH_DEVICE(0x5406, 4),
- CH_DEVICE(0x5407, 4),
- CH_DEVICE(0x5408, 4),
- CH_DEVICE(0x5409, 4),
- CH_DEVICE(0x540A, 4),
- CH_DEVICE(0x540B, 4),
- CH_DEVICE(0x540C, 4),
- CH_DEVICE(0x540D, 4),
- CH_DEVICE(0x540E, 4),
- CH_DEVICE(0x540F, 4),
- CH_DEVICE(0x5410, 4),
- CH_DEVICE(0x5411, 4),
- CH_DEVICE(0x5412, 4),
- CH_DEVICE(0x5413, 4),
- CH_DEVICE(0x5414, 4),
- CH_DEVICE(0x5415, 4),
- CH_DEVICE(0x5480, 4),
- CH_DEVICE(0x5481, 4),
- CH_DEVICE(0x5482, 4),
- CH_DEVICE(0x5483, 4),
- CH_DEVICE(0x5484, 4),
- CH_DEVICE(0x5485, 4),
- CH_DEVICE(0x5486, 4),
- CH_DEVICE(0x5487, 4),
- CH_DEVICE(0x5488, 4),
- { 0, }
-};
+/* Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static struct pci_device_id cxgb4_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+
+/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
+ * called for both.
+ */
+#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ {PCI_VDEVICE(CHELSIO, (devid)), 4}
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { 0, } \
+ }
+
+#include "t4_pci_id_tbl.h"
#define FW4_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
@@ -512,9 +423,10 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
u32 name, value;
int err;
- name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
- FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
+ name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(
+ FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
+ FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
value = enable ? i : 0xffffffff;
/* Since we can be called while atomic (from "interrupt
@@ -709,7 +621,7 @@ EXPORT_SYMBOL(cxgb4_dcb_enabled);
/* Handle a Data Center Bridging update message from the firmware. */
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
{
- int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
+ int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
struct net_device *dev = adap->port[port];
int old_dcb_enabled = cxgb4_dcb_enabled(dev);
int new_dcb_enabled;
@@ -832,17 +744,17 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
#ifdef CONFIG_CHELSIO_T4_DCB
const struct fw_port_cmd *pcmd = (const void *)p->data;
- unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
+ unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
unsigned int action =
- FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
+ FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
if (cmd == FW_PORT_CMD &&
action == FW_PORT_ACTION_GET_PORT_INFO) {
- int port = FW_PORT_CMD_PORTID_GET(
+ int port = FW_PORT_CMD_PORTID_G(
be32_to_cpu(pcmd->op_to_portid));
struct net_device *dev = q->adap->port[port];
int state_input = ((pcmd->u.info.dcbxdis_pkd &
- FW_PORT_CMD_DCBXDIS)
+ FW_PORT_CMD_DCBXDIS_F)
? CXGB4_DCB_INPUT_FW_DISABLED
: CXGB4_DCB_INPUT_FW_ENABLED);
@@ -1287,7 +1199,7 @@ void *t4_alloc_mem(size_t size)
/*
* Free memory allocated through alloc_mem().
*/
-static void t4_free_mem(void *addr)
+void t4_free_mem(void *addr)
{
if (is_vmalloc_addr(addr))
vfree(addr);
@@ -1339,52 +1251,52 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
* filter specification structure but for now it's easiest to simply
* put this fairly direct code in line ...
*/
- fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
- fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
+ fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
fwr->tid_to_iq =
- htonl(V_FW_FILTER_WR_TID(ftid) |
- V_FW_FILTER_WR_RQTYPE(f->fs.type) |
- V_FW_FILTER_WR_NOREPLY(0) |
- V_FW_FILTER_WR_IQ(f->fs.iq));
+ htonl(FW_FILTER_WR_TID_V(ftid) |
+ FW_FILTER_WR_RQTYPE_V(f->fs.type) |
+ FW_FILTER_WR_NOREPLY_V(0) |
+ FW_FILTER_WR_IQ_V(f->fs.iq));
fwr->del_filter_to_l2tix =
- htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
- V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
- V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
- V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
- V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
- V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
- V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
- V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
- V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
+ htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
+ FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
+ FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
+ FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
+ FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+ FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+ FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+ FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
- V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
+ FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
- V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
- V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
- V_FW_FILTER_WR_PRIO(f->fs.prio) |
- V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
+ FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
+ FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
+ FW_FILTER_WR_PRIO_V(f->fs.prio) |
+ FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
fwr->ethtype = htons(f->fs.val.ethtype);
fwr->ethtypem = htons(f->fs.mask.ethtype);
fwr->frag_to_ovlan_vldm =
- (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
- V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
- V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
- V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
- V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
- V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
+ (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
+ FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
+ FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
fwr->smac_sel = 0;
fwr->rx_chan_rx_rpl_iq =
- htons(V_FW_FILTER_WR_RX_CHAN(0) |
- V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
+ htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
fwr->maci_to_matchtypem =
- htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
- V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
- V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
- V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
- V_FW_FILTER_WR_PORT(f->fs.val.iport) |
- V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
- V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
- V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
+ htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
+ FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
+ FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
+ FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
+ FW_FILTER_WR_PORT_V(f->fs.val.iport) |
+ FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
+ FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
+ FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
fwr->ptcl = f->fs.val.proto;
fwr->ptclm = f->fs.mask.proto;
fwr->ttyp = f->fs.val.tos;
@@ -1615,14 +1527,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
- FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
- FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
- FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
- FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
- FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
- FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
- FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
- FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
}
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2413,7 +2325,7 @@ static int identify_port(struct net_device *dev,
return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
}
-static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
+static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
{
unsigned int v = 0;
@@ -2442,14 +2354,20 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
SUPPORTED_10000baseKX4_Full;
else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
+ type == FW_PORT_TYPE_FIBER_XAUI ||
+ type == FW_PORT_TYPE_SFP ||
+ type == FW_PORT_TYPE_QSFP_10G ||
+ type == FW_PORT_TYPE_QSA) {
v |= SUPPORTED_FIBRE;
if (caps & FW_PORT_CAP_SPEED_1G)
v |= SUPPORTED_1000baseT_Full;
if (caps & FW_PORT_CAP_SPEED_10G)
v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_BP40_BA)
+ } else if (type == FW_PORT_TYPE_BP40_BA ||
+ type == FW_PORT_TYPE_QSFP) {
v |= SUPPORTED_40000baseSR4_Full;
+ v |= SUPPORTED_FIBRE;
+ }
if (caps & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
@@ -2484,6 +2402,7 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->port = PORT_FIBRE;
else if (p->port_type == FW_PORT_TYPE_SFP ||
p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSA ||
p->port_type == FW_PORT_TYPE_QSFP) {
if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
p->mod_type == FW_PORT_MOD_TYPE_SR ||
@@ -2721,9 +2640,10 @@ static int set_rspq_intr_params(struct sge_rspq *q,
new_idx = closest_thres(&adap->sge, cnt);
if (q->desc && q->pktcnt_idx != new_idx) {
/* the queue has already been created, update it */
- v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
- FW_PARAMS_PARAM_YZ(q->cntxt_id);
+ v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
&new_idx);
if (err)
@@ -2937,7 +2857,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
int ret;
const struct firmware *fw;
struct adapter *adap = netdev2adap(netdev);
- unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
+ unsigned int mbox = PCIE_FW_MASTER_M + 1;
ef->data[sizeof(ef->data) - 1] = '\0';
ret = request_firmware(&fw, ef->data, adap->pdev_dev);
@@ -3014,21 +2934,35 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
-static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
+static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!p)
+ return 0;
while (n--)
p[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
+static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
+ const u8 hfunc)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
+ /* We require at least one supported parameter to be changed and no
+ * change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!p)
+ return 0;
+
for (i = 0; i < pi->rss_size; i++)
pi->rss[i] = p[i];
if (pi->adapter->flags & FULL_INIT_DONE)
@@ -3048,45 +2982,45 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
info->data = 0;
switch (info->flow_type) {
case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
}
@@ -3131,102 +3065,14 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.flash_device = set_flash,
};
-/*
- * debugfs support
- */
-static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- loff_t pos = *ppos;
- loff_t avail = file_inode(file)->i_size;
- unsigned int mem = (uintptr_t)file->private_data & 3;
- struct adapter *adap = file->private_data - mem;
- __be32 *data;
- int ret;
-
- if (pos < 0)
- return -EINVAL;
- if (pos >= avail)
- return 0;
- if (count > avail - pos)
- count = avail - pos;
-
- data = t4_alloc_mem(count);
- if (!data)
- return -ENOMEM;
-
- spin_lock(&adap->win0_lock);
- ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
- spin_unlock(&adap->win0_lock);
- if (ret) {
- t4_free_mem(data);
- return ret;
- }
- ret = copy_to_user(buf, data, count);
-
- t4_free_mem(data);
- if (ret)
- return -EFAULT;
-
- *ppos = pos + count;
- return count;
-}
-
-static const struct file_operations mem_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = mem_read,
- .llseek = default_llseek,
-};
-
-static void add_debugfs_mem(struct adapter *adap, const char *name,
- unsigned int idx, unsigned int size_mb)
-{
- struct dentry *de;
-
- de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
- (void *)adap + idx, &mem_debugfs_fops);
- if (de && de->d_inode)
- de->d_inode->i_size = size_mb << 20;
-}
-
static int setup_debugfs(struct adapter *adap)
{
- int i;
- u32 size;
-
if (IS_ERR_OR_NULL(adap->debugfs_root))
return -1;
- i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
- if (i & EDRAM0_ENABLE) {
- size = t4_read_reg(adap, MA_EDRAM0_BAR);
- add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
- }
- if (i & EDRAM1_ENABLE) {
- size = t4_read_reg(adap, MA_EDRAM1_BAR);
- add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
- }
- if (is_t4(adap->params.chip)) {
- size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
- if (i & EXT_MEM_ENABLE)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(size));
- } else {
- if (i & EXT_MEM_ENABLE) {
- size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
- add_debugfs_mem(adap, "mc0", MEM_MC0,
- EXT_MEM_SIZE_GET(size));
- }
- if (i & EXT_MEM1_ENABLE) {
- size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
- add_debugfs_mem(adap, "mc1", MEM_MC1,
- EXT_MEM_SIZE_GET(size));
- }
- }
- if (adap->l2t)
- debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
- &t4_l2t_fops);
+#ifdef CONFIG_DEBUG_FS
+ t4_setup_debugfs(adap);
+#endif
return 0;
}
@@ -3508,9 +3354,9 @@ int cxgb4_clip_get(const struct net_device *dev,
adap = netdev2adap(dev);
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
- c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
c.ip_hi = *(__be64 *)(lip->s6_addr);
c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
@@ -3525,9 +3371,9 @@ int cxgb4_clip_release(const struct net_device *dev,
adap = netdev2adap(dev);
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
- c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
c.ip_hi = *(__be64 *)(lip->s6_addr);
c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
@@ -3568,7 +3414,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->local_ip = sip;
req->peer_ip = htonl(0);
chan = rxq_to_chan(&adap->sge, queue);
- req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
ret = t4_mgmt_tx(adap, skb);
@@ -3611,7 +3457,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
req->peer_ip_hi = cpu_to_be64(0);
req->peer_ip_lo = cpu_to_be64(0);
chan = rxq_to_chan(&adap->sge, queue);
- req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
ret = t4_mgmt_tx(adap, skb);
@@ -3893,7 +3739,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{
struct adapter *adap;
u32 offset, memtype, memaddr;
- u32 edc0_size, edc1_size, mc0_size, mc1_size;
+ u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
u32 edc0_end, edc1_end, mc0_end, mc1_end;
int ret;
@@ -3907,9 +3753,12 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
* and EDC1. Some cards will have neither MC0 nor MC1, most cards have
* MC0, and some have both MC0 and MC1.
*/
- edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
- edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
- mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
+ size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ edc0_size = EDRAM0_SIZE_G(size) << 20;
+ size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ edc1_size = EDRAM1_SIZE_G(size) << 20;
+ size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ mc0_size = EXT_MEM0_SIZE_G(size) << 20;
edc0_end = edc0_size;
edc1_end = edc0_end + edc1_size;
@@ -3929,9 +3778,8 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
/* T4 only has a single memory channel */
goto err;
} else {
- mc1_size = EXT_MEM_SIZE_GET(
- t4_read_reg(adap,
- MA_EXT_MEMORY1_BAR)) << 20;
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ mc1_size = EXT_MEM1_SIZE_G(size) << 20;
mc1_end = mc0_end + mc1_size;
if (offset < mc1_end) {
memtype = MEM_MC1;
@@ -3968,6 +3816,22 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
}
EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
+int cxgb4_bar2_sge_qregs(struct net_device *dev,
+ unsigned int qid,
+ enum cxgb4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
+ qid,
+ (qtype == CXGB4_BAR2_QTYPE_EGRESS
+ ? T4_BAR2_QTYPE_EGRESS
+ : T4_BAR2_QTYPE_INGRESS),
+ pbar2_qoffset,
+ pbar2_qid);
+}
+EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
+
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
@@ -4150,31 +4014,18 @@ static void process_db_drop(struct work_struct *work)
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
u16 pidx_inc = dropped_db & 0x1fff;
- unsigned int s_qpp;
- unsigned short udb_density;
- unsigned long qpshift;
- int page;
- u32 udb;
-
- dev_warn(adap->pdev_dev,
- "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
- dropped_db, qid,
- (dropped_db >> 14) & 1,
- (dropped_db >> 13) & 1,
- pidx_inc);
-
- drain_db_fifo(adap, 1);
+ u64 bar2_qoffset;
+ unsigned int bar2_qid;
+ int ret;
- s_qpp = QUEUESPERPAGEPF1 * adap->fn;
- udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
- SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
- qpshift = PAGE_SHIFT - ilog2(udb_density);
- udb = qid << qpshift;
- udb &= PAGE_MASK;
- page = udb / PAGE_SIZE;
- udb += (qid - (page * udb_density)) * 128;
-
- writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
+ ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+ &bar2_qoffset, &bar2_qid);
+ if (ret)
+ dev_err(adap->pdev_dev, "doorbell drop recovery: "
+ "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
+ else
+ writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
+ adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
/* Re-enable BAR2 WC */
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
@@ -4232,12 +4083,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.adapter_type = adap->params.chip;
lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
- lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
- t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
- (adap->fn * 4));
- lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
- t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
- (adap->fn * 4));
+ lli.udb_density = 1 << adap->params.sge.eq_qpp;
+ lli.ucq_density = 1 << adap->params.sge.iq_qpp;
lli.filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
@@ -4399,8 +4246,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
if (cxgb4_netdev(event_dev)) {
switch (event) {
case NETDEV_UP:
- ret = cxgb4_clip_get(event_dev,
- (const struct in6_addr *)ifa->addr.s6_addr);
+ ret = cxgb4_clip_get(event_dev, &ifa->addr);
if (ret < 0) {
rcu_read_unlock();
return ret;
@@ -4408,8 +4254,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
ret = NOTIFY_OK;
break;
case NETDEV_DOWN:
- cxgb4_clip_release(event_dev,
- (const struct in6_addr *)ifa->addr.s6_addr);
+ cxgb4_clip_release(event_dev, &ifa->addr);
ret = NOTIFY_OK;
break;
default:
@@ -4478,8 +4323,7 @@ static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
- ret = cxgb4_clip_get(dev,
- (const struct in6_addr *)ifa->addr.s6_addr);
+ ret = cxgb4_clip_get(dev, &ifa->addr);
if (ret < 0)
break;
}
@@ -4960,14 +4804,14 @@ static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
*/
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
ldst_cmd.op_to_addrspace =
- htonl(FW_CMD_OP(FW_LDST_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
- FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+ htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
+ ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
ldst_cmd.u.pcie.ctrl_to_fn =
- (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
+ (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
ldst_cmd.u.pcie.r = reg;
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
&ldst_cmd);
@@ -5054,8 +4898,8 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
/* get device capabilities */
memset(c, 0, sizeof(*c));
- c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
+ c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
if (ret < 0)
@@ -5071,16 +4915,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
dev_err(adap->pdev_dev, "virtualization ACLs not supported");
return ret;
}
- c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
+ c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
if (ret < 0)
return ret;
ret = t4_config_glbl_rss(adap, adap->fn,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
if (ret < 0)
return ret;
@@ -5241,8 +5085,8 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (cf->size >= FLASH_CFG_MAX_SIZE)
ret = -ENOMEM;
else {
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adapter, adapter->mbox,
adapter->fn, 0, 1, params, val);
if (ret == 0) {
@@ -5260,8 +5104,8 @@ static int adap_init0_config(struct adapter *adapter, int reset)
size_t size = cf->size & ~0x3;
__be32 *data = (__be32 *)cf->data;
- mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
- maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
+ mtype = FW_PARAMS_PARAM_Y_G(val[0]);
+ maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
spin_lock(&adapter->win0_lock);
ret = t4_memory_rw(adapter, 0, mtype, maddr,
@@ -5298,13 +5142,13 @@ static int adap_init0_config(struct adapter *adapter, int reset)
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
caps_cmd.cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
@@ -5318,9 +5162,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret == -ENOENT) {
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
sizeof(caps_cmd), &caps_cmd);
@@ -5343,9 +5187,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* And now tell the firmware to use the configuration we just loaded.
*/
caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
@@ -5416,8 +5260,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
* Get device capabilities and select which we'll be using.
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
+ caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
@@ -5433,8 +5277,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
goto bye;
}
- caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
+ caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
if (ret < 0)
@@ -5456,10 +5300,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
adapter->flags |= RSS_TNLALLLOOKUP;
ret = t4_config_glbl_rss(adapter, adapter->mbox,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
- FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
((adapter->flags & RSS_TNLALLLOOKUP) ?
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
if (ret < 0)
goto bye;
@@ -5470,7 +5314,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
PFRES_NEQ, PFRES_NETHCTRL,
PFRES_NIQFLINT, PFRES_NIQ,
PFRES_TC, PFRES_NVI,
- FW_PFVF_CMD_CMASK_MASK,
+ FW_PFVF_CMD_CMASK_M,
pfvfres_pmask(adapter, adapter->fn, 0),
PFRES_NEXACTF,
PFRES_R_CAPS, PFRES_WX_CAPS);
@@ -5515,7 +5359,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
VFRES_NEQ, VFRES_NETHCTRL,
VFRES_NIQFLINT, VFRES_NIQ,
VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_MASK,
+ FW_PFVF_CMD_CMASK_M,
pfvfres_pmask(
adapter, pf, vf),
VFRES_NEXACTF,
@@ -5687,14 +5531,8 @@ static int adap_init0(struct adapter *adap)
struct fw_caps_config_cmd caps_cmd;
int reset = 1;
- /*
- * Contact FW, advertising Master capability (and potentially forcing
- * ourselves as the Master PF if our module parameter force_init is
- * set).
- */
- ret = t4_fw_hello(adap, adap->mbox, adap->fn,
- force_init ? MASTER_MUST : MASTER_MAY,
- &state);
+ /* Contact FW, advertising Master capability */
+ ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
if (ret < 0) {
dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
ret);
@@ -5702,8 +5540,6 @@ static int adap_init0(struct adapter *adap)
}
if (ret == adap->mbox)
adap->flags |= MASTER_PF;
- if (force_init && state == DEV_STATE_INIT)
- state = DEV_STATE_UNINIT;
/*
* If we're the Master PF Driver and the device is uninitialized,
@@ -5779,8 +5615,8 @@ static int adap_init0(struct adapter *adap)
* and portvec ...
*/
v =
- FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
if (ret < 0)
goto bye;
@@ -5802,7 +5638,6 @@ static int adap_init0(struct adapter *adap)
} else {
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
"Initializing adapter\n");
-
/*
* If the firmware doesn't support Configuration
* Files warn user and exit,
@@ -5817,8 +5652,9 @@ static int adap_init0(struct adapter *adap)
* Find out whether we're dealing with a version of
* the firmware which has configuration file support.
*/
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(
+ FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
params, val);
@@ -5878,14 +5714,14 @@ static int adap_init0(struct adapter *adap)
* Grab some of our basic fundamental operating parameters.
*/
#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
- FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
- FW_PARAMS_PARAM_Y(0) | \
- FW_PARAMS_PARAM_Z(0)
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0)
params[0] = FW_PARAM_PFVF(EQ_START);
params[1] = FW_PARAM_PFVF(L2T_START);
@@ -5945,8 +5781,8 @@ static int adap_init0(struct adapter *adap)
* to manage.
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
+ caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
@@ -6092,6 +5928,7 @@ static int adap_init0(struct adapter *adap)
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
}
+ t4_init_sge_params(adap);
t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 1366ba620c87..152b4c4c7809 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -52,10 +52,10 @@ enum {
};
#define INIT_TP_WR(w, tid) do { \
- (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
- FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
- (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
- FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \
+ FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ FW_WR_FLOWID_V(tid)); \
(w)->wr.wr_lo = cpu_to_be64(0); \
} while (0)
@@ -65,9 +65,10 @@ enum {
} while (0)
#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
- (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
- (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
- FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \
+ FW_WR_ATOMIC_V(atomic)); \
+ (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \
+ FW_WR_FLOWID_V(tid)); \
(w)->wr.wr_lo = cpu_to_be64(0); \
} while (0)
@@ -304,4 +305,11 @@ void cxgb4_enable_db_coalescing(struct net_device *dev);
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
u64 cxgb4_read_sge_timestamp(struct net_device *dev);
+enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS };
+int cxgb4_bar2_sge_qregs(struct net_device *dev,
+ unsigned int qid,
+ enum cxgb4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 96041397ee15..a047baa9fd04 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -435,9 +435,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
if (tp->vnic_shift >= 0) {
u32 viid = cxgb4_port_viid(dev);
- u32 vf = FW_VIID_VIN_GET(viid);
- u32 pf = FW_VIID_PFN_GET(viid);
- u32 vld = FW_VIID_VIVLD_GET(viid);
+ u32 vf = FW_VIID_VIN_G(viid);
+ u32 pf = FW_VIID_PFN_G(viid);
+ u32 vld = FW_VIID_VIVLD_G(viid);
ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
V_FT_VNID_ID_PF(pf) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 39f2b13e66c7..ebf935a1e352 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -527,14 +527,16 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
val |= DBPRIO(1);
wmb();
- /* If we're on T4, use the old doorbell mechanism; otherwise
- * use the new BAR2 mechanism.
+ /* If we don't have access to the new User Doorbell (T5+), use
+ * the old doorbell mechanism; otherwise use the new BAR2
+ * mechanism.
*/
- if (is_t4(adap->params.chip)) {
+ if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
val | QID(q->cntxt_id));
} else {
- writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL);
+ writel(val | QID(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to
* the User Doorbell area to be flushed.
@@ -576,7 +578,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
- gfp |= __GFP_NOWARN | __GFP_COLD;
+ gfp |= __GFP_NOWARN;
if (s->fl_pg_order == 0)
goto alloc_small_pages;
@@ -585,7 +587,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
* Prefer large buffers
*/
while (n) {
- pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
+ pg = __dev_alloc_pages(gfp, s->fl_pg_order);
if (unlikely(!pg)) {
q->large_alloc_failed++;
break; /* fall back to single pages */
@@ -615,7 +617,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
alloc_small_pages:
while (n--) {
- pg = __skb_alloc_page(gfp, NULL);
+ pg = __dev_alloc_page(gfp);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
@@ -816,7 +818,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
sgl->addr0 = cpu_to_be64(addr[1]);
}
- sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
if (likely(--nfrags == 0))
return;
/*
@@ -850,14 +852,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*end = 0;
}
-/* This function copies a tx_desc struct to memory mapped BAR2 space(user space
- * writes). For coalesced WR SGE, fetches data from the FIFO instead of from
- * Host.
+/* This function copies 64 byte coalesced work request to
+ * memory mapped BAR2 space. For coalesced WR SGE fetches
+ * data from the FIFO instead of from Host.
*/
-static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc)
+static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
{
- int count = sizeof(*desc) / sizeof(u64);
- u64 *src = (u64 *)desc;
+ int count = 8;
while (count) {
writeq(*src, dst);
@@ -879,7 +880,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
wmb(); /* write descriptors before telling HW */
- if (is_t4(adap->params.chip)) {
+ /* If we don't have access to the new User Doorbell (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(q->bar2_addr == NULL)) {
u32 val = PIDX(n);
unsigned long flags;
@@ -905,21 +909,22 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
*/
WARN_ON(val & DBPRIO(1));
- /* For T5 and later we use the Write-Combine mapped BAR2 User
- * Doorbell mechanism. If we're only writing a single TX
- * Descriptor and TX Write Combining hasn't been disabled, we
- * can use the Write Combining Gather Buffer; otherwise we use
- * the simple doorbell.
+ /* If we're only writing a single TX Descriptor and we can use
+ * Inferred QID registers, we can use the Write Combining
+ * Gather Buffer; otherwise we use the simple doorbell.
*/
- if (n == 1) {
+ if (n == 1 && q->bar2_qid == 0) {
int index = (q->pidx
? (q->pidx - 1)
: (q->size - 1));
+ u64 *wr = (u64 *)&q->desc[index];
- cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL,
- q->desc + index);
+ cxgb_pio_copy((u64 __iomem *)
+ (q->bar2_addr + SGE_UDB_WCDOORBELL),
+ wr);
} else {
- writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL);
+ writel(val | QID(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_KDOORBELL);
}
/* This Write Memory Barrier will force the write to the User
@@ -1092,10 +1097,10 @@ out_free: dev_kfree_skb_any(skb);
goto out_free;
}
- wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1112,8 +1117,8 @@ out_free: dev_kfree_skb_any(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
len += sizeof(*lso);
- wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(len));
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(len));
lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE | LSO_LAST_SLICE |
LSO_IPV6(v6) |
@@ -1135,8 +1140,8 @@ out_free: dev_kfree_skb_any(skb);
q->tx_cso += ssi->gso_segs;
} else {
len += sizeof(*cpl);
- wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(len));
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
@@ -1224,7 +1229,7 @@ static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
{
reclaim_completed_tx_imm(&q->q);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
- wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
q->q.stops++;
q->full = 1;
}
@@ -1406,7 +1411,7 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
{
struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
- wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
q->q.stops++;
q->full = 1;
}
@@ -1997,11 +2002,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
params = QINTR_TIMER_IDX(7);
val = CIDXINC(work_done) | SEINTARM(params);
- if (is_t4(q->adap->params.chip)) {
+
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
val | INGRESSQID((u32)q->cntxt_id));
} else {
- writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS);
+ writel(val | INGRESSQID(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_GTS);
wmb();
}
return work_done;
@@ -2047,11 +2057,16 @@ static unsigned int process_intrq(struct adapter *adap)
}
val = CIDXINC(credits) | SEINTARM(q->intr_params);
- if (is_t4(adap->params.chip)) {
+
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
val | INGRESSQID(q->cntxt_id));
} else {
- writel(val, adap->bar2 + q->udb + SGE_UDB_GTS);
+ writel(val | INGRESSQID(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_GTS);
wmb();
}
spin_unlock(&adap->sge.intrq_lock);
@@ -2235,48 +2250,32 @@ static void sge_tx_timer_cb(unsigned long data)
}
/**
- * udb_address - return the BAR2 User Doorbell address for a Queue
- * @adap: the adapter
- * @cntxt_id: the Queue Context ID
- * @qpp: Queues Per Page (for all PFs)
+ * bar2_address - return the BAR2 address for an SGE Queue's Registers
+ * @adapter: the adapter
+ * @qid: the SGE Queue ID
+ * @qtype: the SGE Queue Type (Egress or Ingress)
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
*
- * Returns the BAR2 address of the user Doorbell associated with the
- * indicated Queue Context ID. Note that this is only applicable
- * for T5 and later.
- */
-static u64 udb_address(struct adapter *adap, unsigned int cntxt_id,
- unsigned int qpp)
-{
- u64 udb;
- unsigned int s_qpp;
- unsigned short udb_density;
- unsigned long qpshift;
- int page;
-
- BUG_ON(is_t4(adap->params.chip));
-
- s_qpp = (QUEUESPERPAGEPF0 +
- (QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn);
- udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
- qpshift = PAGE_SHIFT - ilog2(udb_density);
- udb = (u64)cntxt_id << qpshift;
- udb &= PAGE_MASK;
- page = udb / PAGE_SIZE;
- udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE;
-
- return udb;
-}
+ * Returns the BAR2 address for the SGE Queue Registers associated with
+ * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
+ * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ * Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ unsigned int *pbar2_qid)
+{
+ u64 bar2_qoffset;
+ int ret;
-static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id)
-{
- return udb_address(adap, cntxt_id,
- t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
-}
+ ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
+ &bar2_qoffset, pbar2_qid);
+ if (ret)
+ return NULL;
-static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id)
-{
- return udb_address(adap, cntxt_id,
- t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
+ return adapter->bar2 + bar2_qoffset;
}
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
@@ -2297,20 +2296,20 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
return -ENOMEM;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
FW_LEN16(c));
- c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
- FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
- FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
- FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
+ FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
+ FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
- c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
- FW_IQ_CMD_IQGTSMODE |
- FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
- FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+ c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
+ FW_IQ_CMD_IQGTSMODE_F |
+ FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
@@ -2323,12 +2322,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
- FW_IQ_CMD_FL0FETCHRO(1) |
- FW_IQ_CMD_FL0DATARO(1) |
- FW_IQ_CMD_FL0PADEN(1));
- c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
- FW_IQ_CMD_FL0FBMAX(3));
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0FETCHRO_F |
+ FW_IQ_CMD_FL0DATARO_F |
+ FW_IQ_CMD_FL0PADEN_F);
+ c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
+ FW_IQ_CMD_FL0FBMAX_V(3));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
@@ -2344,8 +2343,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->next_intr_params = iq->intr_params;
iq->cntxt_id = ntohs(c.iqid);
iq->abs_id = ntohs(c.physiqid);
- if (!is_t4(adap->params.chip))
- iq->udb = udb_address_iq(adap, iq->cntxt_id);
+ iq->bar2_addr = bar2_address(adap,
+ iq->cntxt_id,
+ T4_BAR2_QTYPE_INGRESS,
+ &iq->bar2_qid);
iq->size--; /* subtract status entry */
iq->netdev = dev;
iq->handler = hnd;
@@ -2362,11 +2363,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
- /* Note, we must initialize the Free List User Doorbell
- * address before refilling the Free List!
+ /* Note, we must initialize the BAR2 Free List User Doorbell
+ * information before refilling the Free List!
*/
- if (!is_t4(adap->params.chip))
- fl->udb = udb_address_eq(adap, fl->cntxt_id);
+ fl->bar2_addr = bar2_address(adap,
+ fl->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &fl->bar2_qid);
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
return 0;
@@ -2392,9 +2395,10 @@ err:
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
q->cntxt_id = id;
- if (!is_t4(adap->params.chip))
- q->udb = udb_address_eq(adap, q->cntxt_id);
-
+ q->bar2_addr = bar2_address(adap,
+ q->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &q->bar2_qid);
q->in_use = 0;
q->cidx = q->pidx = 0;
q->stops = q->restarts = 0;
@@ -2423,21 +2427,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return -ENOMEM;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
- FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
- c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE |
- FW_EQ_ETH_CMD_VIID(pi->viid));
- c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
- FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_ETH_CMD_FETCHRO(1) |
- FW_EQ_ETH_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
- FW_EQ_ETH_CMD_FBMAX(3) |
- FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
- FW_EQ_ETH_CMD_EQSIZE(nentries));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_EQ_ETH_CMD_PFN_V(adap->fn) |
+ FW_EQ_ETH_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
+ FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
+ c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ FW_EQ_ETH_CMD_VIID_V(pi->viid));
+ c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO_V(1) |
+ FW_EQ_ETH_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
+ FW_EQ_ETH_CMD_FBMAX_V(3) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -2451,7 +2456,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->txq = netdevq;
txq->tso = txq->tx_cso = txq->vlan_ins = 0;
txq->mapping_err = 0;
@@ -2476,22 +2481,22 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
if (!txq->q.desc)
return -ENOMEM;
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_CTRL_CMD_PFN(adap->fn) |
- FW_EQ_CTRL_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
- FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
- c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
+ FW_EQ_CTRL_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
+ FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
c.physeqid_pkd = htonl(0);
- c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
- FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_CTRL_CMD_FETCHRO |
- FW_EQ_CTRL_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
- FW_EQ_CTRL_CMD_FBMAX(3) |
- FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
- FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
+ FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO_F |
+ FW_EQ_CTRL_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
+ FW_EQ_CTRL_CMD_FBMAX_V(3) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
+ FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -2503,7 +2508,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
+ init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
@@ -2530,20 +2535,20 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return -ENOMEM;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_OFLD_CMD_PFN(adap->fn) |
- FW_EQ_OFLD_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
- FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
- c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
- FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_OFLD_CMD_FETCHRO(1) |
- FW_EQ_OFLD_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
- FW_EQ_OFLD_CMD_FBMAX(3) |
- FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
- FW_EQ_OFLD_CMD_EQSIZE(nentries));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
+ FW_EQ_OFLD_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
+ FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
+ c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
+ FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO_F |
+ FW_EQ_OFLD_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
+ FW_EQ_OFLD_CMD_FBMAX_V(3) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -2557,7 +2562,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 163a2a14948c..c132d9030729 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -188,9 +188,9 @@ static void t4_report_fw_error(struct adapter *adap)
u32 pcie_fw;
pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
- if (pcie_fw & FW_PCIE_FW_ERR)
+ if (pcie_fw & PCIE_FW_ERR)
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
- reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
+ reason[PCIE_FW_EVAL_G(pcie_fw)]);
}
/*
@@ -310,16 +310,17 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
}
res = t4_read_reg64(adap, data_reg);
- if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
+ if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
fw_asrt(adap, data_reg);
- res = FW_CMD_RETVAL(EIO);
- } else if (rpl)
+ res = FW_CMD_RETVAL_V(EIO);
+ } else if (rpl) {
get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ }
- if (FW_CMD_RETVAL_GET((int)res))
+ if (FW_CMD_RETVAL_G((int)res))
dump_mbox(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, 0);
- return -FW_CMD_RETVAL_GET((int)res);
+ return -FW_CMD_RETVAL_G((int)res);
}
}
@@ -483,12 +484,12 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* MEM_MC0 = 2 -- For T5
* MEM_MC1 = 3 -- For T5
*/
- edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
+ edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
memoffset = (mtype * (edc_size * 1024 * 1024));
else {
- mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
- MA_EXT_MEMORY_BAR));
+ mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
+ MA_EXT_MEMORY1_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
}
@@ -710,8 +711,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
* Ask firmware for the Core Clock since it knows how to translate the
* Reference Clock ('V2') VPD field into a Core Clock value ...
*/
- cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
ret = t4_query_params(adapter, adapter->mbox, 0, 0,
1, &cclk_param, &cclk_val);
@@ -992,10 +993,10 @@ static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
install:
dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
"installing firmware %u.%u.%u.%u on card.\n",
- FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
- FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
- FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
- FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
return 1;
}
@@ -1067,12 +1068,12 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
"driver compiled with %d.%d.%d.%d, "
"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
state,
- FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
- FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
- FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
- FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
- FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
- FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
+ FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
+ FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
ret = EINVAL;
goto bye;
}
@@ -1131,6 +1132,27 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter)
return FLASH_CFG_START;
}
+/* Return TRUE if the specified firmware matches the adapter. I.e. T4
+ * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
+ * and emit an error message for mismatched firmware to save our caller the
+ * effort ...
+ */
+static bool t4_fw_matches_chip(const struct adapter *adap,
+ const struct fw_hdr *hdr)
+{
+ /* The expression below will return FALSE for any unsupported adapter
+ * which will keep us "honest" in the future ...
+ */
+ if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
+ (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
+ return true;
+
+ dev_err(adap->pdev_dev,
+ "FW image (%d) is not suitable for this adapter (%d)\n",
+ hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
+ return false;
+}
+
/**
* t4_load_fw - download firmware
* @adap: the adapter
@@ -1170,6 +1192,8 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
FW_MAX_SIZE);
return -EFBIG;
}
+ if (!t4_fw_matches_chip(adap, hdr))
+ return -EINVAL;
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
csum += ntohl(p[i]);
@@ -1212,6 +1236,8 @@ out:
if (ret)
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
ret);
+ else
+ ret = t4_get_fw_version(adap, &adap->params.fw_vers);
return ret;
}
@@ -1236,7 +1262,7 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
- unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+ unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
lc->link_ok = 0;
if (lc->requested_fc & PAUSE_RX)
@@ -1245,9 +1271,9 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
fc |= FW_PORT_CAP_FC_TX;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
@@ -1275,9 +1301,9 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -1563,7 +1589,7 @@ static void cim_intr_handler(struct adapter *adapter)
int fat;
- if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR)
t4_report_fw_error(adapter);
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
@@ -2071,9 +2097,9 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
struct fw_rss_ind_tbl_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE |
- FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_RSS_IND_TBL_CMD_VIID_V(viid));
cmd.retval_len16 = htonl(FW_LEN16(cmd));
/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
@@ -2090,13 +2116,13 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
while (nq > 0) {
unsigned int v;
- v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
+ v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
- v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
+ v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
- v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
+ v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
@@ -2126,14 +2152,14 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
struct fw_rss_glb_config_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
+ c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
c.retval_len16 = htonl(FW_LEN16(c));
if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
- c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
c.u.basicvirtual.mode_pkd =
- htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
} else
return -EINVAL;
@@ -2350,7 +2376,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"KR/KX",
"KR/KX/KX4",
"R QSFP_10G",
- "",
+ "R QSA",
"R QSFP",
"R BP40_BA",
};
@@ -2553,18 +2579,18 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
{
memset(wr, 0, sizeof(*wr));
- wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
- wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
- wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
- V_FW_FILTER_WR_NOREPLY(qid < 0));
- wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+ wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
+ wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
+ FW_FILTER_WR_NOREPLY_V(qid < 0));
+ wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
if (qid >= 0)
- wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+ wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
}
#define INIT_CMD(var, cmd, rd_wr) do { \
- (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
- FW_CMD_REQUEST | FW_CMD_##rd_wr); \
+ (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
+ FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
(var).retval_len16 = htonl(FW_LEN16(var)); \
} while (0)
@@ -2574,9 +2600,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
+ c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
c.cycles_to_len16 = htonl(FW_LEN16(c));
c.u.addrval.addr = htonl(addr);
c.u.addrval.val = htonl(val);
@@ -2602,11 +2628,11 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
- FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
- FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
c.u.mdio.raddr = htons(reg);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -2632,11 +2658,11 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
- FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
c.u.mdio.raddr = htons(reg);
c.u.mdio.rval = htons(val);
@@ -2773,13 +2799,13 @@ retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
c.err_to_clearinit = htonl(
- FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
- FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
- FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
- FW_HELLO_CMD_MBMASTER_MASK) |
- FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
- FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
- FW_HELLO_CMD_CLEARINIT);
+ FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
+ FW_HELLO_CMD_MBMASTER_M) |
+ FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
+ FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
+ FW_HELLO_CMD_CLEARINIT_F);
/*
* Issue the HELLO command to the firmware. If it's not successful
@@ -2792,17 +2818,17 @@ retry:
if (ret < 0) {
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
goto retry;
- if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR)
t4_report_fw_error(adap);
return ret;
}
v = ntohl(c.err_to_clearinit);
- master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
+ master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
if (state) {
- if (v & FW_HELLO_CMD_ERR)
+ if (v & FW_HELLO_CMD_ERR_F)
*state = DEV_STATE_ERR;
- else if (v & FW_HELLO_CMD_INIT)
+ else if (v & FW_HELLO_CMD_INIT_F)
*state = DEV_STATE_INIT;
else
*state = DEV_STATE_UNINIT;
@@ -2817,9 +2843,9 @@ retry:
* and we wouldn't want to fail pointlessly. (This can happen when an
* OS loads lots of different drivers rapidly at the same time). In
* this case, the Master PF returned by the firmware will be
- * FW_PCIE_FW_MASTER_MASK so the test below will work ...
+ * PCIE_FW_MASTER_M so the test below will work ...
*/
- if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
+ if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
master_mbox != mbox) {
int waiting = FW_CMD_HELLO_TIMEOUT;
@@ -2843,7 +2869,7 @@ retry:
* our retries ...
*/
pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
- if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
+ if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
if (waiting <= 0) {
if (retries-- > 0)
goto retry;
@@ -2858,9 +2884,9 @@ retry:
* report errors preferentially.
*/
if (state) {
- if (pcie_fw & FW_PCIE_FW_ERR)
+ if (pcie_fw & PCIE_FW_ERR)
*state = DEV_STATE_ERR;
- else if (pcie_fw & FW_PCIE_FW_INIT)
+ else if (pcie_fw & PCIE_FW_INIT)
*state = DEV_STATE_INIT;
}
@@ -2869,9 +2895,9 @@ retry:
* there's not a valid Master PF, grab its identity
* for our caller.
*/
- if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
- (pcie_fw & FW_PCIE_FW_MASTER_VLD))
- master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
+ if (master_mbox == PCIE_FW_MASTER_M &&
+ (pcie_fw & PCIE_FW_MASTER_VLD))
+ master_mbox = PCIE_FW_MASTER_G(pcie_fw);
break;
}
}
@@ -2939,7 +2965,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
* Issues a RESET command to firmware (if desired) with a HALT indication
* and then puts the microprocessor into RESET state. The RESET command
* will only be issued if a legitimate mailbox is provided (mbox <=
- * FW_PCIE_FW_MASTER_MASK).
+ * PCIE_FW_MASTER_M).
*
* This is generally used in order for the host to safely manipulate the
* adapter without fear of conflicting with whatever the firmware might
@@ -2954,13 +2980,13 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
* If a legitimate mailbox is provided, issue a RESET command
* with a HALT indication.
*/
- if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+ if (mbox <= PCIE_FW_MASTER_M) {
struct fw_reset_cmd c;
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
c.val = htonl(PIORST | PIORSTMODE);
- c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
+ c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2979,8 +3005,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
*/
if (ret == 0 || force) {
t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
- t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
- FW_PCIE_FW_HALT);
+ t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F,
+ PCIE_FW_HALT_F);
}
/*
@@ -3019,7 +3045,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* doing it automatically, we need to clear the PCIE_FW.HALT
* bit.
*/
- t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
+ t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0);
/*
* If we've been given a valid mailbox, first try to get the
@@ -3028,7 +3054,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* valid mailbox or the RESET command failed, fall back to
* hitting the chip with a hammer.
*/
- if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+ if (mbox <= PCIE_FW_MASTER_M) {
t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
msleep(100);
if (t4_fw_reset(adap, mbox,
@@ -3043,7 +3069,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
- if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
+ if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F))
return 0;
msleep(100);
ms += 100;
@@ -3080,6 +3106,9 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
int reset, ret;
+ if (!t4_fw_matches_chip(adap, fw_hdr))
+ return -EINVAL;
+
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
return ret;
@@ -3250,9 +3279,9 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
- FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
- FW_PARAMS_CMD_VFN(vf));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = htonl(FW_LEN16(c));
for (i = 0; i < nparams; i++, p += 2)
*p = htonl(*params++);
@@ -3290,10 +3319,10 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE |
- FW_PARAMS_CMD_PFN(pf) |
- FW_PARAMS_CMD_VFN(vf));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
while (nparams--) {
@@ -3328,9 +3357,9 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
- FW_PARAMS_CMD_VFN(vf));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = htonl(FW_LEN16(c));
while (nparams--) {
*p++ = htonl(*params++);
@@ -3370,20 +3399,20 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_pfvf_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
- FW_PFVF_CMD_VFN(vf));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
+ FW_PFVF_CMD_VFN_V(vf));
c.retval_len16 = htonl(FW_LEN16(c));
- c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
- FW_PFVF_CMD_NIQ(rxq));
- c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
- FW_PFVF_CMD_PMASK(pmask) |
- FW_PFVF_CMD_NEQ(txq));
- c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
- FW_PFVF_CMD_NEXACTF(nexact));
- c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
- FW_PFVF_CMD_WX_CAPS(wxcaps) |
- FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+ c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
+ FW_PFVF_CMD_NIQ_V(rxq));
+ c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
+ FW_PFVF_CMD_PMASK_V(pmask) |
+ FW_PFVF_CMD_NEQ_V(txq));
+ c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
+ FW_PFVF_CMD_NEXACTF_V(nexact));
+ c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
+ FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3412,11 +3441,11 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
struct fw_vi_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
- c.portid_pkd = FW_VI_CMD_PORTID(port);
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
+ c.portid_pkd = FW_VI_CMD_PORTID_V(port);
c.nmac = nmac - 1;
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -3437,8 +3466,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
}
if (rss_size)
- *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
- return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
+ *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
+ return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
}
/**
@@ -3465,23 +3494,23 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
if (mtu < 0)
mtu = FW_RXMODE_MTU_NO_CHG;
if (promisc < 0)
- promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+ promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
if (all_multi < 0)
- all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+ all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
if (bcast < 0)
- bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
if (vlanex < 0)
- vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
c.retval_len16 = htonl(FW_LEN16(c));
- c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
- FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -3522,15 +3551,15 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
- FW_VI_MAC_CMD_VIID(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
- FW_CMD_LEN16((naddr + 2) / 2));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
+ FW_CMD_LEN16_V((naddr + 2) / 2));
for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
}
@@ -3539,7 +3568,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
return ret;
for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+ u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
if (idx)
idx[i] = index >= max_naddr ? 0xffff : index;
@@ -3585,17 +3614,17 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
- c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_SMAC_RESULT(mode) |
- FW_VI_MAC_CMD_IDX(idx));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
+ p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
+ FW_VI_MAC_CMD_IDX_V(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
- ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+ ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
}
@@ -3619,11 +3648,11 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_mac_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
- FW_VI_MAC_CMD_HASHUNIEN(ucast) |
- FW_CMD_LEN16(1));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
+ FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+ FW_CMD_LEN16_V(1));
c.u.hash.hashvec = cpu_to_be64(vec);
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -3646,12 +3675,12 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
- FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
- FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
+ c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+ FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
+ FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3686,9 +3715,9 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
c.blinkdur = htons(nblinks);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3713,11 +3742,11 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_iq_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
- FW_IQ_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
- c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
c.iqid = htons(iqid);
c.fl0id = htons(fl0id);
c.fl1id = htons(fl1id);
@@ -3740,11 +3769,11 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_eth_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
- FW_EQ_ETH_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
+ FW_EQ_ETH_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3764,11 +3793,11 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ctrl_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
- FW_EQ_CTRL_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
- c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
+ FW_EQ_CTRL_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3788,11 +3817,11 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ofld_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
- FW_EQ_OFLD_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
+ FW_EQ_OFLD_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3810,25 +3839,25 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (opcode == FW_PORT_CMD) { /* link/module state change message */
int speed = 0, fc = 0;
const struct fw_port_cmd *p = (void *)rpl;
- int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
+ int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
int port = adap->chan_map[chan];
struct port_info *pi = adap2pinfo(adap, port);
struct link_config *lc = &pi->link_cfg;
u32 stat = ntohl(p->u.info.lstatus_to_modtype);
- int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
- u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
+ int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+ u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
- if (stat & FW_PORT_CMD_RXPAUSE)
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
- if (stat & FW_PORT_CMD_TXPAUSE)
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
- if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
- else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
- else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
- else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
if (link_ok != lc->link_ok || speed != lc->speed ||
@@ -4002,6 +4031,126 @@ int t4_prep_adapter(struct adapter *adapter)
}
/**
+ * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * @adapter: the adapter
+ * @qid: the Queue ID
+ * @qtype: the Ingress or Egress type for @qid
+ * @pbar2_qoffset: BAR2 Queue Offset
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 SGE Queue Registers information associated with the
+ * indicated Absolute Queue ID. These are passed back in return value
+ * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ * This may return an error which indicates that BAR2 SGE Queue
+ * registers aren't available. If an error is not returned, then the
+ * following values are returned:
+ *
+ * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ * require the "Inferred Queue ID" ability may be used. E.g. the
+ * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ * then these "Inferred Queue ID" register may not be used.
+ */
+int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+ u64 bar2_page_offset, bar2_qoffset;
+ unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+ /* T4 doesn't support BAR2 SGE Queue registers.
+ */
+ if (is_t4(adapter->params.chip))
+ return -EINVAL;
+
+ /* Get our SGE Page Size parameters.
+ */
+ page_shift = adapter->params.sge.hps + 10;
+ page_size = 1 << page_shift;
+
+ /* Get the right Queues per Page parameters for our Queue.
+ */
+ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
+ ? adapter->params.sge.eq_qpp
+ : adapter->params.sge.iq_qpp);
+ qpp_mask = (1 << qpp_shift) - 1;
+
+ /* Calculate the basics of the BAR2 SGE Queue register area:
+ * o The BAR2 page the Queue registers will be in.
+ * o The BAR2 Queue ID.
+ * o The BAR2 Queue ID Offset into the BAR2 page.
+ */
+ bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_qid = qid & qpp_mask;
+ bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+ /* If the BAR2 Queue ID Offset is less than the Page Size, then the
+ * hardware will infer the Absolute Queue ID simply from the writes to
+ * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+ * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
+ * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+ * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+ * from the BAR2 Page and BAR2 Queue ID.
+ *
+ * One important censequence of this is that some BAR2 SGE registers
+ * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+ * there. But other registers synthesize the SGE Queue ID purely
+ * from the writes to the registers -- the Write Combined Doorbell
+ * Buffer is a good example. These BAR2 SGE Registers are only
+ * available for those BAR2 SGE Register areas where the SGE Absolute
+ * Queue ID can be inferred from simple writes.
+ */
+ bar2_qoffset = bar2_page_offset;
+ bar2_qinferred = (bar2_qid_offset < page_size);
+ if (bar2_qinferred) {
+ bar2_qoffset += bar2_qid_offset;
+ bar2_qid = 0;
+ }
+
+ *pbar2_qoffset = bar2_qoffset;
+ *pbar2_qid = bar2_qid;
+ return 0;
+}
+
+/**
+ * t4_init_sge_params - initialize adap->params.sge
+ * @adapter: the adapter
+ *
+ * Initialize various fields of the adapter's SGE Parameters structure.
+ */
+int t4_init_sge_params(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 hps, qpp;
+ unsigned int s_hps, s_qpp;
+
+ /* Extract the SGE Page Size for our PF.
+ */
+ hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE);
+ s_hps = (HOSTPAGESIZEPF0_S +
+ (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
+ sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
+
+ /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
+ */
+ s_qpp = (QUEUESPERPAGEPF0_S +
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
+ qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+ qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+
+ return 0;
+}
+
+/**
* t4_init_tp_params - initialize adap->params.tp
* @adap: the adapter
*
@@ -4121,11 +4270,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ |
- FW_PORT_CMD_PORTID(j));
+ c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(j));
c.action_to_len16 = htonl(
- FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(c));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret)
@@ -4143,13 +4292,13 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
adap->port[i]->dev_port = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
- p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
- FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
- p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
+ p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(ret) : -1;
+ p->port_type = FW_PORT_CMD_PTYPE_G(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
- rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ |
+ rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
rvc.retval_len16 = htonl(FW_LEN16(rvc));
ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 5f4db2398c71..0f89f68948ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -205,16 +205,62 @@ struct work_request_hdr {
#define WR_HDR struct work_request_hdr wr
/* option 0 fields */
-#define S_MSS_IDX 60
-#define M_MSS_IDX 0xF
-#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
-#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+#define TX_CHAN_S 2
+#define TX_CHAN_V(x) ((x) << TX_CHAN_S)
+
+#define ULP_MODE_S 8
+#define ULP_MODE_V(x) ((x) << ULP_MODE_S)
+
+#define RCV_BUFSIZ_S 12
+#define RCV_BUFSIZ_M 0x3FFU
+#define RCV_BUFSIZ_V(x) ((x) << RCV_BUFSIZ_S)
+
+#define SMAC_SEL_S 28
+#define SMAC_SEL_V(x) ((__u64)(x) << SMAC_SEL_S)
+
+#define L2T_IDX_S 36
+#define L2T_IDX_V(x) ((__u64)(x) << L2T_IDX_S)
+
+#define WND_SCALE_S 50
+#define WND_SCALE_V(x) ((__u64)(x) << WND_SCALE_S)
+
+#define KEEP_ALIVE_S 54
+#define KEEP_ALIVE_V(x) ((__u64)(x) << KEEP_ALIVE_S)
+#define KEEP_ALIVE_F KEEP_ALIVE_V(1ULL)
+
+#define MSS_IDX_S 60
+#define MSS_IDX_M 0xF
+#define MSS_IDX_V(x) ((__u64)(x) << MSS_IDX_S)
+#define MSS_IDX_G(x) (((x) >> MSS_IDX_S) & MSS_IDX_M)
/* option 2 fields */
-#define S_RSS_QUEUE 0
-#define M_RSS_QUEUE 0x3FF
-#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
-#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
+#define RSS_QUEUE_S 0
+#define RSS_QUEUE_M 0x3FF
+#define RSS_QUEUE_V(x) ((x) << RSS_QUEUE_S)
+#define RSS_QUEUE_G(x) (((x) >> RSS_QUEUE_S) & RSS_QUEUE_M)
+
+#define RSS_QUEUE_VALID_S 10
+#define RSS_QUEUE_VALID_V(x) ((x) << RSS_QUEUE_VALID_S)
+#define RSS_QUEUE_VALID_F RSS_QUEUE_VALID_V(1U)
+
+#define RX_FC_DISABLE_S 20
+#define RX_FC_DISABLE_V(x) ((x) << RX_FC_DISABLE_S)
+#define RX_FC_DISABLE_F RX_FC_DISABLE_V(1U)
+
+#define RX_FC_VALID_S 22
+#define RX_FC_VALID_V(x) ((x) << RX_FC_VALID_S)
+#define RX_FC_VALID_F RX_FC_VALID_V(1U)
+
+#define RX_CHANNEL_S 26
+#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S)
+
+#define WND_SCALE_EN_S 28
+#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S)
+#define WND_SCALE_EN_F WND_SCALE_EN_V(1U)
+
+#define T5_OPT_2_VALID_S 31
+#define T5_OPT_2_VALID_V(x) ((x) << T5_OPT_2_VALID_S)
+#define T5_OPT_2_VALID_F T5_OPT_2_VALID_V(1U)
struct cpl_pass_open_req {
WR_HDR;
@@ -224,20 +270,11 @@ struct cpl_pass_open_req {
__be32 local_ip;
__be32 peer_ip;
__be64 opt0;
-#define TX_CHAN(x) ((x) << 2)
#define NO_CONG(x) ((x) << 4)
#define DELACK(x) ((x) << 5)
-#define ULP_MODE(x) ((x) << 8)
-#define RCV_BUFSIZ(x) ((x) << 12)
-#define RCV_BUFSIZ_MASK 0x3FFU
#define DSCP(x) ((x) << 22)
-#define SMAC_SEL(x) ((u64)(x) << 28)
-#define L2T_IDX(x) ((u64)(x) << 36)
#define TCAM_BYPASS(x) ((u64)(x) << 48)
#define NAGLE(x) ((u64)(x) << 49)
-#define WND_SCALE(x) ((u64)(x) << 50)
-#define KEEP_ALIVE(x) ((u64)(x) << 54)
-#define MSS_IDX(x) ((u64)(x) << 60)
__be64 opt1;
#define SYN_RSS_ENABLE (1 << 0)
#define SYN_RSS_QUEUE(x) ((x) << 2)
@@ -267,20 +304,13 @@ struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
__be32 opt2;
-#define RSS_QUEUE(x) ((x) << 0)
-#define RSS_QUEUE_VALID (1 << 10)
#define RX_COALESCE_VALID(x) ((x) << 11)
#define RX_COALESCE(x) ((x) << 12)
#define PACE(x) ((x) << 16)
-#define RX_FC_VALID ((1U) << 19)
-#define RX_FC_DISABLE ((1U) << 20)
#define TX_QUEUE(x) ((x) << 23)
-#define RX_CHANNEL(x) ((x) << 26)
#define CCTRL_ECN(x) ((x) << 27)
-#define WND_SCALE_EN(x) ((x) << 28)
#define TSTAMPS_EN(x) ((x) << 29)
#define SACK_EN(x) ((x) << 30)
-#define T5_OPT_2_VALID ((1U) << 31)
__be64 opt0;
};
@@ -305,10 +335,10 @@ struct cpl_act_open_req {
__be32 opt2;
};
-#define S_FILTER_TUPLE 24
-#define M_FILTER_TUPLE 0xFFFFFFFFFF
-#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
-#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+#define FILTER_TUPLE_S 24
+#define FILTER_TUPLE_M 0xFFFFFFFFFF
+#define FILTER_TUPLE_V(x) ((x) << FILTER_TUPLE_S)
+#define FILTER_TUPLE_G(x) (((x) >> FILTER_TUPLE_S) & FILTER_TUPLE_M)
struct cpl_t5_act_open_req {
WR_HDR;
union opcode_tid ot;
@@ -579,10 +609,16 @@ struct cpl_rx_data_ack {
WR_HDR;
union opcode_tid ot;
__be32 credit_dack;
-#define RX_CREDITS(x) ((x) << 0)
-#define RX_FORCE_ACK(x) ((x) << 28)
};
+/* cpl_rx_data_ack.ack_seq fields */
+#define RX_CREDITS_S 0
+#define RX_CREDITS_V(x) ((x) << RX_CREDITS_S)
+
+#define RX_FORCE_ACK_S 28
+#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S)
+#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U)
+
struct cpl_rx_pkt {
struct rss_header rsshdr;
u8 opcode;
@@ -803,6 +839,9 @@ enum {
ULP_TX_SC_ISGL = 0x83
};
+#define ULPTX_CMD_S 24
+#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S)
+
struct ulptx_sge_pair {
__be32 len[2];
__be64 addr[2];
@@ -810,7 +849,6 @@ struct ulptx_sge_pair {
struct ulptx_sgl {
__be32 cmd_nsge;
-#define ULPTX_CMD(x) ((x) << 24)
#define ULPTX_NSGE(x) ((x) << 0)
#define ULPTX_MORE (1U << 23)
__be32 len0;
@@ -821,15 +859,21 @@ struct ulptx_sgl {
struct ulp_mem_io {
WR_HDR;
__be32 cmd;
-#define ULP_MEMIO_ORDER(x) ((x) << 23)
__be32 len16; /* command length */
__be32 dlen; /* data length in 32-byte units */
-#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
__be32 lock_addr;
-#define ULP_MEMIO_ADDR(x) ((x) << 0)
#define ULP_MEMIO_LOCK(x) ((x) << 31)
};
+/* additional ulp_mem_io.cmd fields */
+#define ULP_MEMIO_ORDER_S 23
+#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
+#define ULP_MEMIO_ORDER_F ULP_MEMIO_ORDER_V(1U)
+
+#define T5_ULP_MEMIO_IMM_S 23
+#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
+#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U)
+
#define S_T5_ULP_MEMIO_IMM 23
#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
@@ -838,4 +882,12 @@ struct ulp_mem_io {
#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
+/* ulp_mem_io.lock_addr fields */
+#define ULP_MEMIO_ADDR_S 0
+#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
+
+/* ulp_mem_io.dlen fields */
+#define ULP_MEMIO_DATA_LEN_S 0
+#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S)
+
#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
new file mode 100644
index 000000000000..9e4f95a91fb4
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -0,0 +1,160 @@
+/*
+ * This file is part of the Chelsio T4/T5 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __T4_PCI_ID_TBL_H__
+#define __T4_PCI_ID_TBL_H__
+
+/* The code can defined cpp macros for creating a PCI Device ID Table. This is
+ * useful because it allows the PCI ID Table to be maintained in a single place.
+ *
+ * The macros are:
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ * -- Used to start the definition of the PCI ID Table.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION
+ * -- The PCI Function Number to use in the PCI Device ID Table. "0"
+ * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4,
+ * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION2 [optional]
+ * -- If defined, create a PCI Device ID Table with both
+ * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated.
+ *
+ * CH_PCI_ID_TABLE_ENTRY(DeviceID)
+ * -- Used for the individual PCI Device ID entries. Note that we will
+ * -- be adding a trailing comma (",") after all of the entries (and
+ * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined).
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+ * -- Used to finish the definition of the PCI ID Table. Note that we
+ * -- will be adding a trailing semi-colon (";") here.
+ */
+#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+
+#ifndef CH_PCI_DEVICE_ID_FUNCTION
+#error CH_PCI_DEVICE_ID_FUNCTION not defined!
+#endif
+#ifndef CH_PCI_ID_TABLE_ENTRY
+#error CH_PCI_ID_TABLE_ENTRY not defined!
+#endif
+#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined!
+#endif
+
+/* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc.
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ *
+ * We use this consistency in order to create the proper PCI Device IDs
+ * for the specified CH_PCI_DEVICE_ID_FUNCTION.
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION2
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8))
+#else
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION2) << 8))
+#endif
+
+CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ /* T4 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x4000), /* T440-dbg */
+ CH_PCI_ID_TABLE_FENTRY(0x4001), /* T420-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4002), /* T422-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4003), /* T440-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4004), /* T420-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x4005), /* T440-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x4006), /* T440-ch */
+ CH_PCI_ID_TABLE_FENTRY(0x4007), /* T420-so */
+ CH_PCI_ID_TABLE_FENTRY(0x4008), /* T420-cx */
+ CH_PCI_ID_TABLE_FENTRY(0x4009), /* T420-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x400a), /* T404-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x400b), /* B420-sr */
+ CH_PCI_ID_TABLE_FENTRY(0x400c), /* B404-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x400d), /* T480-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x400e), /* T440-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4080), /* Custom T480-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4081), /* Custom T440-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4082), /* Custom T420-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4083), /* Custom T420-xaui */
+ CH_PCI_ID_TABLE_FENTRY(0x4084), /* Custom T440-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4085), /* Custom T420-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4086), /* Custom T440-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x4087), /* Custom T440-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4088), /* Custom T440 2-xaui, 2-xfi */
+
+ /* T5 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */
+ CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */
+ CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */
+ CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */
+ CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
+ CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
+
+#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
+
+#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 8d2de1006b08..d7bd34ee65bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -155,17 +155,22 @@
#define HOSTPAGESIZEPF2_SHIFT 8
#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT)
-#define HOSTPAGESIZEPF1_MASK 0x0000000fU
-#define HOSTPAGESIZEPF1_SHIFT 4
-#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT)
+#define HOSTPAGESIZEPF1_M 0x0000000fU
+#define HOSTPAGESIZEPF1_S 4
+#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S)
-#define HOSTPAGESIZEPF0_MASK 0x0000000fU
-#define HOSTPAGESIZEPF0_SHIFT 0
-#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
+#define HOSTPAGESIZEPF0_M 0x0000000fU
+#define HOSTPAGESIZEPF0_S 0
+#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S)
#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
-#define QUEUESPERPAGEPF0_MASK 0x0000000fU
-#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
+
+#define QUEUESPERPAGEPF1_S 4
+
+#define QUEUESPERPAGEPF0_S 0
+#define QUEUESPERPAGEPF0_MASK 0x0000000fU
+#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
#define QUEUESPERPAGEPF0 0
#define QUEUESPERPAGEPF1 4
@@ -323,6 +328,7 @@
#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
#define S_HP_INT_THRESH 28
#define M_HP_INT_THRESH 0xfU
@@ -511,21 +517,62 @@
#define MC_BIST_STATUS_RDATA 0x7688
-#define MA_EDRAM0_BAR 0x77c0
-#define MA_EDRAM1_BAR 0x77c4
-#define EDRAM_SIZE_MASK 0xfffU
-#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK)
+#define MA_EDRAM0_BAR_A 0x77c0
+
+#define EDRAM0_SIZE_S 0
+#define EDRAM0_SIZE_M 0xfffU
+#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
+#define EDRAM0_SIZE_G(x) (((x) >> EDRAM0_SIZE_S) & EDRAM0_SIZE_M)
+
+#define MA_EDRAM1_BAR_A 0x77c4
+
+#define EDRAM1_SIZE_S 0
+#define EDRAM1_SIZE_M 0xfffU
+#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
+#define EDRAM1_SIZE_G(x) (((x) >> EDRAM1_SIZE_S) & EDRAM1_SIZE_M)
+
+#define MA_EXT_MEMORY_BAR_A 0x77c8
+
+#define EXT_MEM_SIZE_S 0
+#define EXT_MEM_SIZE_M 0xfffU
+#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
+#define EXT_MEM_SIZE_G(x) (((x) >> EXT_MEM_SIZE_S) & EXT_MEM_SIZE_M)
+
+#define MA_EXT_MEMORY1_BAR_A 0x7808
+
+#define EXT_MEM1_SIZE_S 0
+#define EXT_MEM1_SIZE_M 0xfffU
+#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
+#define EXT_MEM1_SIZE_G(x) (((x) >> EXT_MEM1_SIZE_S) & EXT_MEM1_SIZE_M)
+
+#define MA_EXT_MEMORY0_BAR_A 0x77c8
+
+#define EXT_MEM0_SIZE_S 0
+#define EXT_MEM0_SIZE_M 0xfffU
+#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
+#define EXT_MEM0_SIZE_G(x) (((x) >> EXT_MEM0_SIZE_S) & EXT_MEM0_SIZE_M)
+
+#define MA_TARGET_MEM_ENABLE_A 0x77d8
+
+#define EXT_MEM_ENABLE_S 2
+#define EXT_MEM_ENABLE_V(x) ((x) << EXT_MEM_ENABLE_S)
+#define EXT_MEM_ENABLE_F EXT_MEM_ENABLE_V(1U)
+
+#define EDRAM1_ENABLE_S 1
+#define EDRAM1_ENABLE_V(x) ((x) << EDRAM1_ENABLE_S)
+#define EDRAM1_ENABLE_F EDRAM1_ENABLE_V(1U)
+
+#define EDRAM0_ENABLE_S 0
+#define EDRAM0_ENABLE_V(x) ((x) << EDRAM0_ENABLE_S)
+#define EDRAM0_ENABLE_F EDRAM0_ENABLE_V(1U)
-#define MA_EXT_MEMORY_BAR 0x77c8
-#define EXT_MEM_SIZE_MASK 0x00000fffU
-#define EXT_MEM_SIZE_SHIFT 0
-#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
+#define EXT_MEM1_ENABLE_S 4
+#define EXT_MEM1_ENABLE_V(x) ((x) << EXT_MEM1_ENABLE_S)
+#define EXT_MEM1_ENABLE_F EXT_MEM1_ENABLE_V(1U)
-#define MA_TARGET_MEM_ENABLE 0x77d8
-#define EXT_MEM1_ENABLE 0x00000010U
-#define EXT_MEM_ENABLE 0x00000004U
-#define EDRAM1_ENABLE 0x00000002U
-#define EDRAM0_ENABLE 0x00000001U
+#define EXT_MEM0_ENABLE_S 2
+#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
+#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U)
#define MA_INT_CAUSE 0x77e0
#define MEM_PERR_INT_CAUSE 0x00000002U
@@ -542,7 +589,6 @@
#define MA_PARITY_ERROR_STATUS 0x77f4
#define MA_PARITY_ERROR_STATUS2 0x7804
-#define MA_EXT_MEMORY1_BAR 0x7808
#define EDC_0_BASE_ADDR 0x7900
#define EDC_BIST_CMD 0x7904
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 3409756a85b9..7c0aec85137a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -109,18 +109,49 @@ struct fw_wr_hdr {
__be32 lo;
};
-#define FW_WR_OP(x) ((x) << 24)
-#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff)
-#define FW_WR_ATOMIC(x) ((x) << 23)
-#define FW_WR_FLUSH(x) ((x) << 22)
-#define FW_WR_COMPL(x) ((x) << 21)
-#define FW_WR_IMMDLEN_MASK 0xff
-#define FW_WR_IMMDLEN(x) ((x) << 0)
-
-#define FW_WR_EQUIQ (1U << 31)
-#define FW_WR_EQUEQ (1U << 30)
-#define FW_WR_FLOWID(x) ((x) << 8)
-#define FW_WR_LEN16(x) ((x) << 0)
+/* work request opcode (hi) */
+#define FW_WR_OP_S 24
+#define FW_WR_OP_M 0xff
+#define FW_WR_OP_V(x) ((x) << FW_WR_OP_S)
+#define FW_WR_OP_G(x) (((x) >> FW_WR_OP_S) & FW_WR_OP_M)
+
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */
+#define FW_WR_ATOMIC_S 23
+#define FW_WR_ATOMIC_V(x) ((x) << FW_WR_ATOMIC_S)
+
+/* flush flag (hi) - firmware flushes flushable work request buffered
+ * in the flow context.
+ */
+#define FW_WR_FLUSH_S 22
+#define FW_WR_FLUSH_V(x) ((x) << FW_WR_FLUSH_S)
+
+/* completion flag (hi) - firmware generates a cpl_fw6_ack */
+#define FW_WR_COMPL_S 21
+#define FW_WR_COMPL_V(x) ((x) << FW_WR_COMPL_S)
+#define FW_WR_COMPL_F FW_WR_COMPL_V(1U)
+
+/* work request immediate data length (hi) */
+#define FW_WR_IMMDLEN_S 0
+#define FW_WR_IMMDLEN_M 0xff
+#define FW_WR_IMMDLEN_V(x) ((x) << FW_WR_IMMDLEN_S)
+
+/* egress queue status update to associated ingress queue entry (lo) */
+#define FW_WR_EQUIQ_S 31
+#define FW_WR_EQUIQ_V(x) ((x) << FW_WR_EQUIQ_S)
+#define FW_WR_EQUIQ_F FW_WR_EQUIQ_V(1U)
+
+/* egress queue status update to egress queue status entry (lo) */
+#define FW_WR_EQUEQ_S 30
+#define FW_WR_EQUEQ_V(x) ((x) << FW_WR_EQUEQ_S)
+#define FW_WR_EQUEQ_F FW_WR_EQUEQ_V(1U)
+
+/* flow context identifier (lo) */
+#define FW_WR_FLOWID_S 8
+#define FW_WR_FLOWID_V(x) ((x) << FW_WR_FLOWID_S)
+
+/* length in units of 16-bytes (lo) */
+#define FW_WR_LEN16_S 0
+#define FW_WR_LEN16_V(x) ((x) << FW_WR_LEN16_S)
#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
#define HW_TPL_FR_MT_PR_OV_P_FC 0X327
@@ -166,239 +197,239 @@ struct fw_filter_wr {
__u8 sma[6];
};
-#define S_FW_FILTER_WR_TID 12
-#define M_FW_FILTER_WR_TID 0xfffff
-#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
-#define G_FW_FILTER_WR_TID(x) \
- (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID)
-
-#define S_FW_FILTER_WR_RQTYPE 11
-#define M_FW_FILTER_WR_RQTYPE 0x1
-#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
-#define G_FW_FILTER_WR_RQTYPE(x) \
- (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE)
-#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U)
-
-#define S_FW_FILTER_WR_NOREPLY 10
-#define M_FW_FILTER_WR_NOREPLY 0x1
-#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
-#define G_FW_FILTER_WR_NOREPLY(x) \
- (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY)
-#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U)
-
-#define S_FW_FILTER_WR_IQ 0
-#define M_FW_FILTER_WR_IQ 0x3ff
-#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
-#define G_FW_FILTER_WR_IQ(x) \
- (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ)
-
-#define S_FW_FILTER_WR_DEL_FILTER 31
-#define M_FW_FILTER_WR_DEL_FILTER 0x1
-#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
-#define G_FW_FILTER_WR_DEL_FILTER(x) \
- (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER)
-#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
-
-#define S_FW_FILTER_WR_RPTTID 25
-#define M_FW_FILTER_WR_RPTTID 0x1
-#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
-#define G_FW_FILTER_WR_RPTTID(x) \
- (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID)
-#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U)
-
-#define S_FW_FILTER_WR_DROP 24
-#define M_FW_FILTER_WR_DROP 0x1
-#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
-#define G_FW_FILTER_WR_DROP(x) \
- (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP)
-#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U)
-
-#define S_FW_FILTER_WR_DIRSTEER 23
-#define M_FW_FILTER_WR_DIRSTEER 0x1
-#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
-#define G_FW_FILTER_WR_DIRSTEER(x) \
- (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER)
-#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U)
-
-#define S_FW_FILTER_WR_MASKHASH 22
-#define M_FW_FILTER_WR_MASKHASH 0x1
-#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
-#define G_FW_FILTER_WR_MASKHASH(x) \
- (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH)
-#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U)
-
-#define S_FW_FILTER_WR_DIRSTEERHASH 21
-#define M_FW_FILTER_WR_DIRSTEERHASH 0x1
-#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
-#define G_FW_FILTER_WR_DIRSTEERHASH(x) \
- (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH)
-#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U)
-
-#define S_FW_FILTER_WR_LPBK 20
-#define M_FW_FILTER_WR_LPBK 0x1
-#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
-#define G_FW_FILTER_WR_LPBK(x) \
- (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK)
-#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U)
-
-#define S_FW_FILTER_WR_DMAC 19
-#define M_FW_FILTER_WR_DMAC 0x1
-#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
-#define G_FW_FILTER_WR_DMAC(x) \
- (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC)
-#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U)
-
-#define S_FW_FILTER_WR_SMAC 18
-#define M_FW_FILTER_WR_SMAC 0x1
-#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
-#define G_FW_FILTER_WR_SMAC(x) \
- (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC)
-#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U)
-
-#define S_FW_FILTER_WR_INSVLAN 17
-#define M_FW_FILTER_WR_INSVLAN 0x1
-#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
-#define G_FW_FILTER_WR_INSVLAN(x) \
- (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN)
-#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U)
-
-#define S_FW_FILTER_WR_RMVLAN 16
-#define M_FW_FILTER_WR_RMVLAN 0x1
-#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
-#define G_FW_FILTER_WR_RMVLAN(x) \
- (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN)
-#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U)
-
-#define S_FW_FILTER_WR_HITCNTS 15
-#define M_FW_FILTER_WR_HITCNTS 0x1
-#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
-#define G_FW_FILTER_WR_HITCNTS(x) \
- (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS)
-#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U)
-
-#define S_FW_FILTER_WR_TXCHAN 13
-#define M_FW_FILTER_WR_TXCHAN 0x3
-#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
-#define G_FW_FILTER_WR_TXCHAN(x) \
- (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN)
-
-#define S_FW_FILTER_WR_PRIO 12
-#define M_FW_FILTER_WR_PRIO 0x1
-#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
-#define G_FW_FILTER_WR_PRIO(x) \
- (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO)
-#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U)
-
-#define S_FW_FILTER_WR_L2TIX 0
-#define M_FW_FILTER_WR_L2TIX 0xfff
-#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
-#define G_FW_FILTER_WR_L2TIX(x) \
- (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX)
-
-#define S_FW_FILTER_WR_FRAG 7
-#define M_FW_FILTER_WR_FRAG 0x1
-#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
-#define G_FW_FILTER_WR_FRAG(x) \
- (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG)
-#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U)
-
-#define S_FW_FILTER_WR_FRAGM 6
-#define M_FW_FILTER_WR_FRAGM 0x1
-#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
-#define G_FW_FILTER_WR_FRAGM(x) \
- (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM)
-#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U)
-
-#define S_FW_FILTER_WR_IVLAN_VLD 5
-#define M_FW_FILTER_WR_IVLAN_VLD 0x1
-#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
-#define G_FW_FILTER_WR_IVLAN_VLD(x) \
- (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD)
-#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U)
-
-#define S_FW_FILTER_WR_OVLAN_VLD 4
-#define M_FW_FILTER_WR_OVLAN_VLD 0x1
-#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
-#define G_FW_FILTER_WR_OVLAN_VLD(x) \
- (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD)
-#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U)
-
-#define S_FW_FILTER_WR_IVLAN_VLDM 3
-#define M_FW_FILTER_WR_IVLAN_VLDM 0x1
-#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
-#define G_FW_FILTER_WR_IVLAN_VLDM(x) \
- (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM)
-#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U)
-
-#define S_FW_FILTER_WR_OVLAN_VLDM 2
-#define M_FW_FILTER_WR_OVLAN_VLDM 0x1
-#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
-#define G_FW_FILTER_WR_OVLAN_VLDM(x) \
- (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM)
-#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U)
-
-#define S_FW_FILTER_WR_RX_CHAN 15
-#define M_FW_FILTER_WR_RX_CHAN 0x1
-#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
-#define G_FW_FILTER_WR_RX_CHAN(x) \
- (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN)
-#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U)
-
-#define S_FW_FILTER_WR_RX_RPL_IQ 0
-#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff
-#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
-#define G_FW_FILTER_WR_RX_RPL_IQ(x) \
- (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
-
-#define S_FW_FILTER_WR_MACI 23
-#define M_FW_FILTER_WR_MACI 0x1ff
-#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
-#define G_FW_FILTER_WR_MACI(x) \
- (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI)
-
-#define S_FW_FILTER_WR_MACIM 14
-#define M_FW_FILTER_WR_MACIM 0x1ff
-#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
-#define G_FW_FILTER_WR_MACIM(x) \
- (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM)
-
-#define S_FW_FILTER_WR_FCOE 13
-#define M_FW_FILTER_WR_FCOE 0x1
-#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
-#define G_FW_FILTER_WR_FCOE(x) \
- (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE)
-#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U)
-
-#define S_FW_FILTER_WR_FCOEM 12
-#define M_FW_FILTER_WR_FCOEM 0x1
-#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
-#define G_FW_FILTER_WR_FCOEM(x) \
- (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM)
-#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U)
-
-#define S_FW_FILTER_WR_PORT 9
-#define M_FW_FILTER_WR_PORT 0x7
-#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
-#define G_FW_FILTER_WR_PORT(x) \
- (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT)
-
-#define S_FW_FILTER_WR_PORTM 6
-#define M_FW_FILTER_WR_PORTM 0x7
-#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
-#define G_FW_FILTER_WR_PORTM(x) \
- (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM)
-
-#define S_FW_FILTER_WR_MATCHTYPE 3
-#define M_FW_FILTER_WR_MATCHTYPE 0x7
-#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
-#define G_FW_FILTER_WR_MATCHTYPE(x) \
- (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE)
-
-#define S_FW_FILTER_WR_MATCHTYPEM 0
-#define M_FW_FILTER_WR_MATCHTYPEM 0x7
-#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
-#define G_FW_FILTER_WR_MATCHTYPEM(x) \
- (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
+#define FW_FILTER_WR_TID_S 12
+#define FW_FILTER_WR_TID_M 0xfffff
+#define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S)
+#define FW_FILTER_WR_TID_G(x) \
+ (((x) >> FW_FILTER_WR_TID_S) & FW_FILTER_WR_TID_M)
+
+#define FW_FILTER_WR_RQTYPE_S 11
+#define FW_FILTER_WR_RQTYPE_M 0x1
+#define FW_FILTER_WR_RQTYPE_V(x) ((x) << FW_FILTER_WR_RQTYPE_S)
+#define FW_FILTER_WR_RQTYPE_G(x) \
+ (((x) >> FW_FILTER_WR_RQTYPE_S) & FW_FILTER_WR_RQTYPE_M)
+#define FW_FILTER_WR_RQTYPE_F FW_FILTER_WR_RQTYPE_V(1U)
+
+#define FW_FILTER_WR_NOREPLY_S 10
+#define FW_FILTER_WR_NOREPLY_M 0x1
+#define FW_FILTER_WR_NOREPLY_V(x) ((x) << FW_FILTER_WR_NOREPLY_S)
+#define FW_FILTER_WR_NOREPLY_G(x) \
+ (((x) >> FW_FILTER_WR_NOREPLY_S) & FW_FILTER_WR_NOREPLY_M)
+#define FW_FILTER_WR_NOREPLY_F FW_FILTER_WR_NOREPLY_V(1U)
+
+#define FW_FILTER_WR_IQ_S 0
+#define FW_FILTER_WR_IQ_M 0x3ff
+#define FW_FILTER_WR_IQ_V(x) ((x) << FW_FILTER_WR_IQ_S)
+#define FW_FILTER_WR_IQ_G(x) \
+ (((x) >> FW_FILTER_WR_IQ_S) & FW_FILTER_WR_IQ_M)
+
+#define FW_FILTER_WR_DEL_FILTER_S 31
+#define FW_FILTER_WR_DEL_FILTER_M 0x1
+#define FW_FILTER_WR_DEL_FILTER_V(x) ((x) << FW_FILTER_WR_DEL_FILTER_S)
+#define FW_FILTER_WR_DEL_FILTER_G(x) \
+ (((x) >> FW_FILTER_WR_DEL_FILTER_S) & FW_FILTER_WR_DEL_FILTER_M)
+#define FW_FILTER_WR_DEL_FILTER_F FW_FILTER_WR_DEL_FILTER_V(1U)
+
+#define FW_FILTER_WR_RPTTID_S 25
+#define FW_FILTER_WR_RPTTID_M 0x1
+#define FW_FILTER_WR_RPTTID_V(x) ((x) << FW_FILTER_WR_RPTTID_S)
+#define FW_FILTER_WR_RPTTID_G(x) \
+ (((x) >> FW_FILTER_WR_RPTTID_S) & FW_FILTER_WR_RPTTID_M)
+#define FW_FILTER_WR_RPTTID_F FW_FILTER_WR_RPTTID_V(1U)
+
+#define FW_FILTER_WR_DROP_S 24
+#define FW_FILTER_WR_DROP_M 0x1
+#define FW_FILTER_WR_DROP_V(x) ((x) << FW_FILTER_WR_DROP_S)
+#define FW_FILTER_WR_DROP_G(x) \
+ (((x) >> FW_FILTER_WR_DROP_S) & FW_FILTER_WR_DROP_M)
+#define FW_FILTER_WR_DROP_F FW_FILTER_WR_DROP_V(1U)
+
+#define FW_FILTER_WR_DIRSTEER_S 23
+#define FW_FILTER_WR_DIRSTEER_M 0x1
+#define FW_FILTER_WR_DIRSTEER_V(x) ((x) << FW_FILTER_WR_DIRSTEER_S)
+#define FW_FILTER_WR_DIRSTEER_G(x) \
+ (((x) >> FW_FILTER_WR_DIRSTEER_S) & FW_FILTER_WR_DIRSTEER_M)
+#define FW_FILTER_WR_DIRSTEER_F FW_FILTER_WR_DIRSTEER_V(1U)
+
+#define FW_FILTER_WR_MASKHASH_S 22
+#define FW_FILTER_WR_MASKHASH_M 0x1
+#define FW_FILTER_WR_MASKHASH_V(x) ((x) << FW_FILTER_WR_MASKHASH_S)
+#define FW_FILTER_WR_MASKHASH_G(x) \
+ (((x) >> FW_FILTER_WR_MASKHASH_S) & FW_FILTER_WR_MASKHASH_M)
+#define FW_FILTER_WR_MASKHASH_F FW_FILTER_WR_MASKHASH_V(1U)
+
+#define FW_FILTER_WR_DIRSTEERHASH_S 21
+#define FW_FILTER_WR_DIRSTEERHASH_M 0x1
+#define FW_FILTER_WR_DIRSTEERHASH_V(x) ((x) << FW_FILTER_WR_DIRSTEERHASH_S)
+#define FW_FILTER_WR_DIRSTEERHASH_G(x) \
+ (((x) >> FW_FILTER_WR_DIRSTEERHASH_S) & FW_FILTER_WR_DIRSTEERHASH_M)
+#define FW_FILTER_WR_DIRSTEERHASH_F FW_FILTER_WR_DIRSTEERHASH_V(1U)
+
+#define FW_FILTER_WR_LPBK_S 20
+#define FW_FILTER_WR_LPBK_M 0x1
+#define FW_FILTER_WR_LPBK_V(x) ((x) << FW_FILTER_WR_LPBK_S)
+#define FW_FILTER_WR_LPBK_G(x) \
+ (((x) >> FW_FILTER_WR_LPBK_S) & FW_FILTER_WR_LPBK_M)
+#define FW_FILTER_WR_LPBK_F FW_FILTER_WR_LPBK_V(1U)
+
+#define FW_FILTER_WR_DMAC_S 19
+#define FW_FILTER_WR_DMAC_M 0x1
+#define FW_FILTER_WR_DMAC_V(x) ((x) << FW_FILTER_WR_DMAC_S)
+#define FW_FILTER_WR_DMAC_G(x) \
+ (((x) >> FW_FILTER_WR_DMAC_S) & FW_FILTER_WR_DMAC_M)
+#define FW_FILTER_WR_DMAC_F FW_FILTER_WR_DMAC_V(1U)
+
+#define FW_FILTER_WR_SMAC_S 18
+#define FW_FILTER_WR_SMAC_M 0x1
+#define FW_FILTER_WR_SMAC_V(x) ((x) << FW_FILTER_WR_SMAC_S)
+#define FW_FILTER_WR_SMAC_G(x) \
+ (((x) >> FW_FILTER_WR_SMAC_S) & FW_FILTER_WR_SMAC_M)
+#define FW_FILTER_WR_SMAC_F FW_FILTER_WR_SMAC_V(1U)
+
+#define FW_FILTER_WR_INSVLAN_S 17
+#define FW_FILTER_WR_INSVLAN_M 0x1
+#define FW_FILTER_WR_INSVLAN_V(x) ((x) << FW_FILTER_WR_INSVLAN_S)
+#define FW_FILTER_WR_INSVLAN_G(x) \
+ (((x) >> FW_FILTER_WR_INSVLAN_S) & FW_FILTER_WR_INSVLAN_M)
+#define FW_FILTER_WR_INSVLAN_F FW_FILTER_WR_INSVLAN_V(1U)
+
+#define FW_FILTER_WR_RMVLAN_S 16
+#define FW_FILTER_WR_RMVLAN_M 0x1
+#define FW_FILTER_WR_RMVLAN_V(x) ((x) << FW_FILTER_WR_RMVLAN_S)
+#define FW_FILTER_WR_RMVLAN_G(x) \
+ (((x) >> FW_FILTER_WR_RMVLAN_S) & FW_FILTER_WR_RMVLAN_M)
+#define FW_FILTER_WR_RMVLAN_F FW_FILTER_WR_RMVLAN_V(1U)
+
+#define FW_FILTER_WR_HITCNTS_S 15
+#define FW_FILTER_WR_HITCNTS_M 0x1
+#define FW_FILTER_WR_HITCNTS_V(x) ((x) << FW_FILTER_WR_HITCNTS_S)
+#define FW_FILTER_WR_HITCNTS_G(x) \
+ (((x) >> FW_FILTER_WR_HITCNTS_S) & FW_FILTER_WR_HITCNTS_M)
+#define FW_FILTER_WR_HITCNTS_F FW_FILTER_WR_HITCNTS_V(1U)
+
+#define FW_FILTER_WR_TXCHAN_S 13
+#define FW_FILTER_WR_TXCHAN_M 0x3
+#define FW_FILTER_WR_TXCHAN_V(x) ((x) << FW_FILTER_WR_TXCHAN_S)
+#define FW_FILTER_WR_TXCHAN_G(x) \
+ (((x) >> FW_FILTER_WR_TXCHAN_S) & FW_FILTER_WR_TXCHAN_M)
+
+#define FW_FILTER_WR_PRIO_S 12
+#define FW_FILTER_WR_PRIO_M 0x1
+#define FW_FILTER_WR_PRIO_V(x) ((x) << FW_FILTER_WR_PRIO_S)
+#define FW_FILTER_WR_PRIO_G(x) \
+ (((x) >> FW_FILTER_WR_PRIO_S) & FW_FILTER_WR_PRIO_M)
+#define FW_FILTER_WR_PRIO_F FW_FILTER_WR_PRIO_V(1U)
+
+#define FW_FILTER_WR_L2TIX_S 0
+#define FW_FILTER_WR_L2TIX_M 0xfff
+#define FW_FILTER_WR_L2TIX_V(x) ((x) << FW_FILTER_WR_L2TIX_S)
+#define FW_FILTER_WR_L2TIX_G(x) \
+ (((x) >> FW_FILTER_WR_L2TIX_S) & FW_FILTER_WR_L2TIX_M)
+
+#define FW_FILTER_WR_FRAG_S 7
+#define FW_FILTER_WR_FRAG_M 0x1
+#define FW_FILTER_WR_FRAG_V(x) ((x) << FW_FILTER_WR_FRAG_S)
+#define FW_FILTER_WR_FRAG_G(x) \
+ (((x) >> FW_FILTER_WR_FRAG_S) & FW_FILTER_WR_FRAG_M)
+#define FW_FILTER_WR_FRAG_F FW_FILTER_WR_FRAG_V(1U)
+
+#define FW_FILTER_WR_FRAGM_S 6
+#define FW_FILTER_WR_FRAGM_M 0x1
+#define FW_FILTER_WR_FRAGM_V(x) ((x) << FW_FILTER_WR_FRAGM_S)
+#define FW_FILTER_WR_FRAGM_G(x) \
+ (((x) >> FW_FILTER_WR_FRAGM_S) & FW_FILTER_WR_FRAGM_M)
+#define FW_FILTER_WR_FRAGM_F FW_FILTER_WR_FRAGM_V(1U)
+
+#define FW_FILTER_WR_IVLAN_VLD_S 5
+#define FW_FILTER_WR_IVLAN_VLD_M 0x1
+#define FW_FILTER_WR_IVLAN_VLD_V(x) ((x) << FW_FILTER_WR_IVLAN_VLD_S)
+#define FW_FILTER_WR_IVLAN_VLD_G(x) \
+ (((x) >> FW_FILTER_WR_IVLAN_VLD_S) & FW_FILTER_WR_IVLAN_VLD_M)
+#define FW_FILTER_WR_IVLAN_VLD_F FW_FILTER_WR_IVLAN_VLD_V(1U)
+
+#define FW_FILTER_WR_OVLAN_VLD_S 4
+#define FW_FILTER_WR_OVLAN_VLD_M 0x1
+#define FW_FILTER_WR_OVLAN_VLD_V(x) ((x) << FW_FILTER_WR_OVLAN_VLD_S)
+#define FW_FILTER_WR_OVLAN_VLD_G(x) \
+ (((x) >> FW_FILTER_WR_OVLAN_VLD_S) & FW_FILTER_WR_OVLAN_VLD_M)
+#define FW_FILTER_WR_OVLAN_VLD_F FW_FILTER_WR_OVLAN_VLD_V(1U)
+
+#define FW_FILTER_WR_IVLAN_VLDM_S 3
+#define FW_FILTER_WR_IVLAN_VLDM_M 0x1
+#define FW_FILTER_WR_IVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_IVLAN_VLDM_S)
+#define FW_FILTER_WR_IVLAN_VLDM_G(x) \
+ (((x) >> FW_FILTER_WR_IVLAN_VLDM_S) & FW_FILTER_WR_IVLAN_VLDM_M)
+#define FW_FILTER_WR_IVLAN_VLDM_F FW_FILTER_WR_IVLAN_VLDM_V(1U)
+
+#define FW_FILTER_WR_OVLAN_VLDM_S 2
+#define FW_FILTER_WR_OVLAN_VLDM_M 0x1
+#define FW_FILTER_WR_OVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_OVLAN_VLDM_S)
+#define FW_FILTER_WR_OVLAN_VLDM_G(x) \
+ (((x) >> FW_FILTER_WR_OVLAN_VLDM_S) & FW_FILTER_WR_OVLAN_VLDM_M)
+#define FW_FILTER_WR_OVLAN_VLDM_F FW_FILTER_WR_OVLAN_VLDM_V(1U)
+
+#define FW_FILTER_WR_RX_CHAN_S 15
+#define FW_FILTER_WR_RX_CHAN_M 0x1
+#define FW_FILTER_WR_RX_CHAN_V(x) ((x) << FW_FILTER_WR_RX_CHAN_S)
+#define FW_FILTER_WR_RX_CHAN_G(x) \
+ (((x) >> FW_FILTER_WR_RX_CHAN_S) & FW_FILTER_WR_RX_CHAN_M)
+#define FW_FILTER_WR_RX_CHAN_F FW_FILTER_WR_RX_CHAN_V(1U)
+
+#define FW_FILTER_WR_RX_RPL_IQ_S 0
+#define FW_FILTER_WR_RX_RPL_IQ_M 0x3ff
+#define FW_FILTER_WR_RX_RPL_IQ_V(x) ((x) << FW_FILTER_WR_RX_RPL_IQ_S)
+#define FW_FILTER_WR_RX_RPL_IQ_G(x) \
+ (((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M)
+
+#define FW_FILTER_WR_MACI_S 23
+#define FW_FILTER_WR_MACI_M 0x1ff
+#define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S)
+#define FW_FILTER_WR_MACI_G(x) \
+ (((x) >> FW_FILTER_WR_MACI_S) & FW_FILTER_WR_MACI_M)
+
+#define FW_FILTER_WR_MACIM_S 14
+#define FW_FILTER_WR_MACIM_M 0x1ff
+#define FW_FILTER_WR_MACIM_V(x) ((x) << FW_FILTER_WR_MACIM_S)
+#define FW_FILTER_WR_MACIM_G(x) \
+ (((x) >> FW_FILTER_WR_MACIM_S) & FW_FILTER_WR_MACIM_M)
+
+#define FW_FILTER_WR_FCOE_S 13
+#define FW_FILTER_WR_FCOE_M 0x1
+#define FW_FILTER_WR_FCOE_V(x) ((x) << FW_FILTER_WR_FCOE_S)
+#define FW_FILTER_WR_FCOE_G(x) \
+ (((x) >> FW_FILTER_WR_FCOE_S) & FW_FILTER_WR_FCOE_M)
+#define FW_FILTER_WR_FCOE_F FW_FILTER_WR_FCOE_V(1U)
+
+#define FW_FILTER_WR_FCOEM_S 12
+#define FW_FILTER_WR_FCOEM_M 0x1
+#define FW_FILTER_WR_FCOEM_V(x) ((x) << FW_FILTER_WR_FCOEM_S)
+#define FW_FILTER_WR_FCOEM_G(x) \
+ (((x) >> FW_FILTER_WR_FCOEM_S) & FW_FILTER_WR_FCOEM_M)
+#define FW_FILTER_WR_FCOEM_F FW_FILTER_WR_FCOEM_V(1U)
+
+#define FW_FILTER_WR_PORT_S 9
+#define FW_FILTER_WR_PORT_M 0x7
+#define FW_FILTER_WR_PORT_V(x) ((x) << FW_FILTER_WR_PORT_S)
+#define FW_FILTER_WR_PORT_G(x) \
+ (((x) >> FW_FILTER_WR_PORT_S) & FW_FILTER_WR_PORT_M)
+
+#define FW_FILTER_WR_PORTM_S 6
+#define FW_FILTER_WR_PORTM_M 0x7
+#define FW_FILTER_WR_PORTM_V(x) ((x) << FW_FILTER_WR_PORTM_S)
+#define FW_FILTER_WR_PORTM_G(x) \
+ (((x) >> FW_FILTER_WR_PORTM_S) & FW_FILTER_WR_PORTM_M)
+
+#define FW_FILTER_WR_MATCHTYPE_S 3
+#define FW_FILTER_WR_MATCHTYPE_M 0x7
+#define FW_FILTER_WR_MATCHTYPE_V(x) ((x) << FW_FILTER_WR_MATCHTYPE_S)
+#define FW_FILTER_WR_MATCHTYPE_G(x) \
+ (((x) >> FW_FILTER_WR_MATCHTYPE_S) & FW_FILTER_WR_MATCHTYPE_M)
+
+#define FW_FILTER_WR_MATCHTYPEM_S 0
+#define FW_FILTER_WR_MATCHTYPEM_M 0x7
+#define FW_FILTER_WR_MATCHTYPEM_V(x) ((x) << FW_FILTER_WR_MATCHTYPEM_S)
+#define FW_FILTER_WR_MATCHTYPEM_G(x) \
+ (((x) >> FW_FILTER_WR_MATCHTYPEM_S) & FW_FILTER_WR_MATCHTYPEM_M)
struct fw_ulptx_wr {
__be32 op_to_compl;
@@ -460,65 +491,65 @@ struct fw_ofld_connection_wr {
} tcb;
};
-#define S_FW_OFLD_CONNECTION_WR_VERSION 31
-#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1
-#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_VERSION)
-#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \
- M_FW_OFLD_CONNECTION_WR_VERSION)
-#define F_FW_OFLD_CONNECTION_WR_VERSION \
- V_FW_OFLD_CONNECTION_WR_VERSION(1U)
-
-#define S_FW_OFLD_CONNECTION_WR_CPL 30
-#define M_FW_OFLD_CONNECTION_WR_CPL 0x1
-#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL)
-#define G_FW_OFLD_CONNECTION_WR_CPL(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL)
-#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U)
-
-#define S_FW_OFLD_CONNECTION_WR_T_STATE 28
-#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf
-#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE)
-#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \
- M_FW_OFLD_CONNECTION_WR_T_STATE)
-
-#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24
-#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf
-#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE)
-#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \
- M_FW_OFLD_CONNECTION_WR_RCV_SCALE)
-
-#define S_FW_OFLD_CONNECTION_WR_ASTID 0
-#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff
-#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_ASTID)
-#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID)
-
-#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15
-#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1
-#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
-#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \
- M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
-#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \
- V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U)
-
-#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14
-#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1
-#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
-#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \
- M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
-#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \
- V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U)
+#define FW_OFLD_CONNECTION_WR_VERSION_S 31
+#define FW_OFLD_CONNECTION_WR_VERSION_M 0x1
+#define FW_OFLD_CONNECTION_WR_VERSION_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_VERSION_S)
+#define FW_OFLD_CONNECTION_WR_VERSION_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_VERSION_S) & \
+ FW_OFLD_CONNECTION_WR_VERSION_M)
+#define FW_OFLD_CONNECTION_WR_VERSION_F \
+ FW_OFLD_CONNECTION_WR_VERSION_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_CPL_S 30
+#define FW_OFLD_CONNECTION_WR_CPL_M 0x1
+#define FW_OFLD_CONNECTION_WR_CPL_V(x) ((x) << FW_OFLD_CONNECTION_WR_CPL_S)
+#define FW_OFLD_CONNECTION_WR_CPL_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_CPL_S) & FW_OFLD_CONNECTION_WR_CPL_M)
+#define FW_OFLD_CONNECTION_WR_CPL_F FW_OFLD_CONNECTION_WR_CPL_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_T_STATE_S 28
+#define FW_OFLD_CONNECTION_WR_T_STATE_M 0xf
+#define FW_OFLD_CONNECTION_WR_T_STATE_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_T_STATE_S)
+#define FW_OFLD_CONNECTION_WR_T_STATE_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_T_STATE_S) & \
+ FW_OFLD_CONNECTION_WR_T_STATE_M)
+
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_S 24
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_M 0xf
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_RCV_SCALE_S)
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_RCV_SCALE_S) & \
+ FW_OFLD_CONNECTION_WR_RCV_SCALE_M)
+
+#define FW_OFLD_CONNECTION_WR_ASTID_S 0
+#define FW_OFLD_CONNECTION_WR_ASTID_M 0xffffff
+#define FW_OFLD_CONNECTION_WR_ASTID_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_ASTID_S)
+#define FW_OFLD_CONNECTION_WR_ASTID_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_ASTID_S) & FW_OFLD_CONNECTION_WR_ASTID_M)
+
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S 15
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M 0x1
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S)
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) & \
+ FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M)
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F \
+ FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S 14
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M 0x1
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S)
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) & \
+ FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M)
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_F \
+ FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(1U)
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
@@ -529,6 +560,7 @@ enum fw_flowc_mnem {
FW_FLOWC_MNEM_RCVNXT,
FW_FLOWC_MNEM_SNDBUF,
FW_FLOWC_MNEM_MSS,
+ FW_FLOWC_MNEM_TXDATAPLEN_MAX,
};
struct fw_flowc_mnemval {
@@ -539,33 +571,56 @@ struct fw_flowc_mnemval {
struct fw_flowc_wr {
__be32 op_to_nparams;
-#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0)
__be32 flowid_len16;
struct fw_flowc_mnemval mnemval[0];
};
+#define FW_FLOWC_WR_NPARAMS_S 0
+#define FW_FLOWC_WR_NPARAMS_V(x) ((x) << FW_FLOWC_WR_NPARAMS_S)
+
struct fw_ofld_tx_data_wr {
__be32 op_to_immdlen;
__be32 flowid_len16;
__be32 plen;
__be32 tunnel_to_proxy;
-#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19)
-#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18)
-#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17)
-#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16)
-#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15)
-#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14)
-#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10)
-#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6)
};
+#define FW_OFLD_TX_DATA_WR_TUNNEL_S 19
+#define FW_OFLD_TX_DATA_WR_TUNNEL_V(x) ((x) << FW_OFLD_TX_DATA_WR_TUNNEL_S)
+
+#define FW_OFLD_TX_DATA_WR_SAVE_S 18
+#define FW_OFLD_TX_DATA_WR_SAVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SAVE_S)
+
+#define FW_OFLD_TX_DATA_WR_FLUSH_S 17
+#define FW_OFLD_TX_DATA_WR_FLUSH_V(x) ((x) << FW_OFLD_TX_DATA_WR_FLUSH_S)
+#define FW_OFLD_TX_DATA_WR_FLUSH_F FW_OFLD_TX_DATA_WR_FLUSH_V(1U)
+
+#define FW_OFLD_TX_DATA_WR_URGENT_S 16
+#define FW_OFLD_TX_DATA_WR_URGENT_V(x) ((x) << FW_OFLD_TX_DATA_WR_URGENT_S)
+
+#define FW_OFLD_TX_DATA_WR_MORE_S 15
+#define FW_OFLD_TX_DATA_WR_MORE_V(x) ((x) << FW_OFLD_TX_DATA_WR_MORE_S)
+
+#define FW_OFLD_TX_DATA_WR_SHOVE_S 14
+#define FW_OFLD_TX_DATA_WR_SHOVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S)
+#define FW_OFLD_TX_DATA_WR_SHOVE_F FW_OFLD_TX_DATA_WR_SHOVE_V(1U)
+
+#define FW_OFLD_TX_DATA_WR_ULPMODE_S 10
+#define FW_OFLD_TX_DATA_WR_ULPMODE_V(x) ((x) << FW_OFLD_TX_DATA_WR_ULPMODE_S)
+
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_S 6
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(x) \
+ ((x) << FW_OFLD_TX_DATA_WR_ULPSUBMODE_S)
+
struct fw_cmd_wr {
__be32 op_dma;
-#define FW_CMD_WR_DMA (1U << 17)
__be32 len16_pkd;
__be64 cookie_daddr;
};
+#define FW_CMD_WR_DMA_S 17
+#define FW_CMD_WR_DMA_V(x) ((x) << FW_CMD_WR_DMA_S)
+
struct fw_eth_tx_pkt_vm_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
@@ -641,18 +696,39 @@ struct fw_cmd_hdr {
__be32 lo;
};
-#define FW_CMD_OP(x) ((x) << 24)
-#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
-#define FW_CMD_REQUEST (1U << 23)
-#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1)
-#define FW_CMD_READ (1U << 22)
-#define FW_CMD_WRITE (1U << 21)
-#define FW_CMD_EXEC (1U << 20)
-#define FW_CMD_RAMASK(x) ((x) << 20)
-#define FW_CMD_RETVAL(x) ((x) << 8)
-#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
-#define FW_CMD_LEN16(x) ((x) << 0)
-#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+#define FW_CMD_OP_S 24
+#define FW_CMD_OP_M 0xff
+#define FW_CMD_OP_V(x) ((x) << FW_CMD_OP_S)
+#define FW_CMD_OP_G(x) (((x) >> FW_CMD_OP_S) & FW_CMD_OP_M)
+
+#define FW_CMD_REQUEST_S 23
+#define FW_CMD_REQUEST_V(x) ((x) << FW_CMD_REQUEST_S)
+#define FW_CMD_REQUEST_F FW_CMD_REQUEST_V(1U)
+
+#define FW_CMD_READ_S 22
+#define FW_CMD_READ_V(x) ((x) << FW_CMD_READ_S)
+#define FW_CMD_READ_F FW_CMD_READ_V(1U)
+
+#define FW_CMD_WRITE_S 21
+#define FW_CMD_WRITE_V(x) ((x) << FW_CMD_WRITE_S)
+#define FW_CMD_WRITE_F FW_CMD_WRITE_V(1U)
+
+#define FW_CMD_EXEC_S 20
+#define FW_CMD_EXEC_V(x) ((x) << FW_CMD_EXEC_S)
+#define FW_CMD_EXEC_F FW_CMD_EXEC_V(1U)
+
+#define FW_CMD_RAMASK_S 20
+#define FW_CMD_RAMASK_V(x) ((x) << FW_CMD_RAMASK_S)
+
+#define FW_CMD_RETVAL_S 8
+#define FW_CMD_RETVAL_M 0xff
+#define FW_CMD_RETVAL_V(x) ((x) << FW_CMD_RETVAL_S)
+#define FW_CMD_RETVAL_G(x) (((x) >> FW_CMD_RETVAL_S) & FW_CMD_RETVAL_M)
+
+#define FW_CMD_LEN16_S 0
+#define FW_CMD_LEN16_V(x) ((x) << FW_CMD_LEN16_S)
+
+#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
@@ -685,7 +761,8 @@ enum fw_ldst_func_mod_index {
struct fw_ldst_cmd {
__be32 op_to_addrspace;
-#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0)
+#define FW_LDST_CMD_ADDRSPACE_S 0
+#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
__be32 cycles_to_len16;
union fw_ldst {
struct fw_ldst_addrval {
@@ -741,15 +818,33 @@ struct fw_ldst_cmd {
} u;
};
-#define FW_LDST_CMD_MSG(x) ((x) << 31)
-#define FW_LDST_CMD_PADDR(x) ((x) << 8)
-#define FW_LDST_CMD_MMD(x) ((x) << 0)
-#define FW_LDST_CMD_FID(x) ((x) << 15)
-#define FW_LDST_CMD_CTL(x) ((x) << 0)
-#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
-#define FW_LDST_CMD_LC (1U << 4)
-#define FW_LDST_CMD_NACCESS(x) ((x) << 0)
-#define FW_LDST_CMD_FN(x) ((x) << 0)
+#define FW_LDST_CMD_MSG_S 31
+#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
+
+#define FW_LDST_CMD_PADDR_S 8
+#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S)
+
+#define FW_LDST_CMD_MMD_S 0
+#define FW_LDST_CMD_MMD_V(x) ((x) << FW_LDST_CMD_MMD_S)
+
+#define FW_LDST_CMD_FID_S 15
+#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S)
+
+#define FW_LDST_CMD_CTL_S 0
+#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S)
+
+#define FW_LDST_CMD_RPLCPF_S 0
+#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S)
+
+#define FW_LDST_CMD_LC_S 4
+#define FW_LDST_CMD_LC_V(x) ((x) << FW_LDST_CMD_LC_S)
+#define FW_LDST_CMD_LC_F FW_LDST_CMD_LC_V(1U)
+
+#define FW_LDST_CMD_FN_S 0
+#define FW_LDST_CMD_FN_V(x) ((x) << FW_LDST_CMD_FN_S)
+
+#define FW_LDST_CMD_NACCESS_S 0
+#define FW_LDST_CMD_NACCESS_V(x) ((x) << FW_LDST_CMD_NACCESS_S)
struct fw_reset_cmd {
__be32 op_to_write;
@@ -758,11 +853,12 @@ struct fw_reset_cmd {
__be32 halt_pkd;
};
-#define FW_RESET_CMD_HALT_SHIFT 31
-#define FW_RESET_CMD_HALT_MASK 0x1
-#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT)
-#define FW_RESET_CMD_HALT_GET(x) \
- (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK)
+#define FW_RESET_CMD_HALT_S 31
+#define FW_RESET_CMD_HALT_M 0x1
+#define FW_RESET_CMD_HALT_V(x) ((x) << FW_RESET_CMD_HALT_S)
+#define FW_RESET_CMD_HALT_G(x) \
+ (((x) >> FW_RESET_CMD_HALT_S) & FW_RESET_CMD_HALT_M)
+#define FW_RESET_CMD_HALT_F FW_RESET_CMD_HALT_V(1U)
enum fw_hellow_cmd {
fw_hello_cmd_stage_os = 0x0
@@ -772,22 +868,42 @@ struct fw_hello_cmd {
__be32 op_to_write;
__be32 retval_len16;
__be32 err_to_clearinit;
-#define FW_HELLO_CMD_ERR (1U << 31)
-#define FW_HELLO_CMD_INIT (1U << 30)
-#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
-#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
-#define FW_HELLO_CMD_MBMASTER_MASK 0xfU
-#define FW_HELLO_CMD_MBMASTER_SHIFT 24
-#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
-#define FW_HELLO_CMD_MBMASTER_GET(x) \
- (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
-#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23)
-#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
-#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
-#define FW_HELLO_CMD_CLEARINIT (1U << 16)
__be32 fwrev;
};
+#define FW_HELLO_CMD_ERR_S 31
+#define FW_HELLO_CMD_ERR_V(x) ((x) << FW_HELLO_CMD_ERR_S)
+#define FW_HELLO_CMD_ERR_F FW_HELLO_CMD_ERR_V(1U)
+
+#define FW_HELLO_CMD_INIT_S 30
+#define FW_HELLO_CMD_INIT_V(x) ((x) << FW_HELLO_CMD_INIT_S)
+#define FW_HELLO_CMD_INIT_F FW_HELLO_CMD_INIT_V(1U)
+
+#define FW_HELLO_CMD_MASTERDIS_S 29
+#define FW_HELLO_CMD_MASTERDIS_V(x) ((x) << FW_HELLO_CMD_MASTERDIS_S)
+
+#define FW_HELLO_CMD_MASTERFORCE_S 28
+#define FW_HELLO_CMD_MASTERFORCE_V(x) ((x) << FW_HELLO_CMD_MASTERFORCE_S)
+
+#define FW_HELLO_CMD_MBMASTER_S 24
+#define FW_HELLO_CMD_MBMASTER_M 0xfU
+#define FW_HELLO_CMD_MBMASTER_V(x) ((x) << FW_HELLO_CMD_MBMASTER_S)
+#define FW_HELLO_CMD_MBMASTER_G(x) \
+ (((x) >> FW_HELLO_CMD_MBMASTER_S) & FW_HELLO_CMD_MBMASTER_M)
+
+#define FW_HELLO_CMD_MBASYNCNOTINT_S 23
+#define FW_HELLO_CMD_MBASYNCNOTINT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOTINT_S)
+
+#define FW_HELLO_CMD_MBASYNCNOT_S 20
+#define FW_HELLO_CMD_MBASYNCNOT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOT_S)
+
+#define FW_HELLO_CMD_STAGE_S 17
+#define FW_HELLO_CMD_STAGE_V(x) ((x) << FW_HELLO_CMD_STAGE_S)
+
+#define FW_HELLO_CMD_CLEARINIT_S 16
+#define FW_HELLO_CMD_CLEARINIT_V(x) ((x) << FW_HELLO_CMD_CLEARINIT_S)
+#define FW_HELLO_CMD_CLEARINIT_F FW_HELLO_CMD_CLEARINIT_V(1U)
+
struct fw_bye_cmd {
__be32 op_to_write;
__be32 retval_len16;
@@ -898,9 +1014,17 @@ struct fw_caps_config_cmd {
__be32 finicsum;
};
-#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27)
-#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24)
-#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16)
+#define FW_CAPS_CONFIG_CMD_CFVALID_S 27
+#define FW_CAPS_CONFIG_CMD_CFVALID_V(x) ((x) << FW_CAPS_CONFIG_CMD_CFVALID_S)
+#define FW_CAPS_CONFIG_CMD_CFVALID_F FW_CAPS_CONFIG_CMD_CFVALID_V(1U)
+
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S 24
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(x) \
+ ((x) << FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S)
+
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S 16
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(x) \
+ ((x) << FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S)
/*
* params command mnemonics
@@ -996,20 +1120,29 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
};
-#define FW_PARAMS_MNEM(x) ((x) << 24)
-#define FW_PARAMS_PARAM_X(x) ((x) << 16)
-#define FW_PARAMS_PARAM_Y_SHIFT 8
-#define FW_PARAMS_PARAM_Y_MASK 0xffU
-#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT)
-#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\
- FW_PARAMS_PARAM_Y_MASK)
-#define FW_PARAMS_PARAM_Z_SHIFT 0
-#define FW_PARAMS_PARAM_Z_MASK 0xffu
-#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT)
-#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\
- FW_PARAMS_PARAM_Z_MASK)
-#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
-#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
+#define FW_PARAMS_MNEM_S 24
+#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S)
+
+#define FW_PARAMS_PARAM_X_S 16
+#define FW_PARAMS_PARAM_X_V(x) ((x) << FW_PARAMS_PARAM_X_S)
+
+#define FW_PARAMS_PARAM_Y_S 8
+#define FW_PARAMS_PARAM_Y_M 0xffU
+#define FW_PARAMS_PARAM_Y_V(x) ((x) << FW_PARAMS_PARAM_Y_S)
+#define FW_PARAMS_PARAM_Y_G(x) (((x) >> FW_PARAMS_PARAM_Y_S) &\
+ FW_PARAMS_PARAM_Y_M)
+
+#define FW_PARAMS_PARAM_Z_S 0
+#define FW_PARAMS_PARAM_Z_M 0xffu
+#define FW_PARAMS_PARAM_Z_V(x) ((x) << FW_PARAMS_PARAM_Z_S)
+#define FW_PARAMS_PARAM_Z_G(x) (((x) >> FW_PARAMS_PARAM_Z_S) &\
+ FW_PARAMS_PARAM_Z_M)
+
+#define FW_PARAMS_PARAM_XYZ_S 0
+#define FW_PARAMS_PARAM_XYZ_V(x) ((x) << FW_PARAMS_PARAM_XYZ_S)
+
+#define FW_PARAMS_PARAM_YZ_S 0
+#define FW_PARAMS_PARAM_YZ_V(x) ((x) << FW_PARAMS_PARAM_YZ_S)
struct fw_params_cmd {
__be32 op_to_vfn;
@@ -1020,8 +1153,11 @@ struct fw_params_cmd {
} param[7];
};
-#define FW_PARAMS_CMD_PFN(x) ((x) << 8)
-#define FW_PARAMS_CMD_VFN(x) ((x) << 0)
+#define FW_PARAMS_CMD_PFN_S 8
+#define FW_PARAMS_CMD_PFN_V(x) ((x) << FW_PARAMS_CMD_PFN_S)
+
+#define FW_PARAMS_CMD_VFN_S 0
+#define FW_PARAMS_CMD_VFN_V(x) ((x) << FW_PARAMS_CMD_VFN_S)
struct fw_pfvf_cmd {
__be32 op_to_vfn;
@@ -1035,46 +1171,82 @@ struct fw_pfvf_cmd {
__be32 r4;
};
-#define FW_PFVF_CMD_PFN(x) ((x) << 8)
-#define FW_PFVF_CMD_VFN(x) ((x) << 0)
-
-#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20)
-#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff)
-
-#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
-#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_PFVF_CMD_TYPE (1 << 31)
-#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1)
-
-#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
-#define FW_PFVF_CMD_CMASK_MASK 0xf
-#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK)
-
-#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
-#define FW_PFVF_CMD_PMASK_MASK 0xf
-#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK)
-
-#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
-#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_PFVF_CMD_TC(x) ((x) << 24)
-#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff)
-
-#define FW_PFVF_CMD_NVI(x) ((x) << 16)
-#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff)
-
-#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0)
-#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff)
-
-#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24)
-#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff)
-
-#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16)
-#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff)
-
-#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0)
-#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff)
+#define FW_PFVF_CMD_PFN_S 8
+#define FW_PFVF_CMD_PFN_V(x) ((x) << FW_PFVF_CMD_PFN_S)
+
+#define FW_PFVF_CMD_VFN_S 0
+#define FW_PFVF_CMD_VFN_V(x) ((x) << FW_PFVF_CMD_VFN_S)
+
+#define FW_PFVF_CMD_NIQFLINT_S 20
+#define FW_PFVF_CMD_NIQFLINT_M 0xfff
+#define FW_PFVF_CMD_NIQFLINT_V(x) ((x) << FW_PFVF_CMD_NIQFLINT_S)
+#define FW_PFVF_CMD_NIQFLINT_G(x) \
+ (((x) >> FW_PFVF_CMD_NIQFLINT_S) & FW_PFVF_CMD_NIQFLINT_M)
+
+#define FW_PFVF_CMD_NIQ_S 0
+#define FW_PFVF_CMD_NIQ_M 0xfffff
+#define FW_PFVF_CMD_NIQ_V(x) ((x) << FW_PFVF_CMD_NIQ_S)
+#define FW_PFVF_CMD_NIQ_G(x) \
+ (((x) >> FW_PFVF_CMD_NIQ_S) & FW_PFVF_CMD_NIQ_M)
+
+#define FW_PFVF_CMD_TYPE_S 31
+#define FW_PFVF_CMD_TYPE_M 0x1
+#define FW_PFVF_CMD_TYPE_V(x) ((x) << FW_PFVF_CMD_TYPE_S)
+#define FW_PFVF_CMD_TYPE_G(x) \
+ (((x) >> FW_PFVF_CMD_TYPE_S) & FW_PFVF_CMD_TYPE_M)
+#define FW_PFVF_CMD_TYPE_F FW_PFVF_CMD_TYPE_V(1U)
+
+#define FW_PFVF_CMD_CMASK_S 24
+#define FW_PFVF_CMD_CMASK_M 0xf
+#define FW_PFVF_CMD_CMASK_V(x) ((x) << FW_PFVF_CMD_CMASK_S)
+#define FW_PFVF_CMD_CMASK_G(x) \
+ (((x) >> FW_PFVF_CMD_CMASK_S) & FW_PFVF_CMD_CMASK_M)
+
+#define FW_PFVF_CMD_PMASK_S 20
+#define FW_PFVF_CMD_PMASK_M 0xf
+#define FW_PFVF_CMD_PMASK_V(x) ((x) << FW_PFVF_CMD_PMASK_S)
+#define FW_PFVF_CMD_PMASK_G(x) \
+ (((x) >> FW_PFVF_CMD_PMASK_S) & FW_PFVF_CMD_PMASK_M)
+
+#define FW_PFVF_CMD_NEQ_S 0
+#define FW_PFVF_CMD_NEQ_M 0xfffff
+#define FW_PFVF_CMD_NEQ_V(x) ((x) << FW_PFVF_CMD_NEQ_S)
+#define FW_PFVF_CMD_NEQ_G(x) \
+ (((x) >> FW_PFVF_CMD_NEQ_S) & FW_PFVF_CMD_NEQ_M)
+
+#define FW_PFVF_CMD_TC_S 24
+#define FW_PFVF_CMD_TC_M 0xff
+#define FW_PFVF_CMD_TC_V(x) ((x) << FW_PFVF_CMD_TC_S)
+#define FW_PFVF_CMD_TC_G(x) (((x) >> FW_PFVF_CMD_TC_S) & FW_PFVF_CMD_TC_M)
+
+#define FW_PFVF_CMD_NVI_S 16
+#define FW_PFVF_CMD_NVI_M 0xff
+#define FW_PFVF_CMD_NVI_V(x) ((x) << FW_PFVF_CMD_NVI_S)
+#define FW_PFVF_CMD_NVI_G(x) (((x) >> FW_PFVF_CMD_NVI_S) & FW_PFVF_CMD_NVI_M)
+
+#define FW_PFVF_CMD_NEXACTF_S 0
+#define FW_PFVF_CMD_NEXACTF_M 0xffff
+#define FW_PFVF_CMD_NEXACTF_V(x) ((x) << FW_PFVF_CMD_NEXACTF_S)
+#define FW_PFVF_CMD_NEXACTF_G(x) \
+ (((x) >> FW_PFVF_CMD_NEXACTF_S) & FW_PFVF_CMD_NEXACTF_M)
+
+#define FW_PFVF_CMD_R_CAPS_S 24
+#define FW_PFVF_CMD_R_CAPS_M 0xff
+#define FW_PFVF_CMD_R_CAPS_V(x) ((x) << FW_PFVF_CMD_R_CAPS_S)
+#define FW_PFVF_CMD_R_CAPS_G(x) \
+ (((x) >> FW_PFVF_CMD_R_CAPS_S) & FW_PFVF_CMD_R_CAPS_M)
+
+#define FW_PFVF_CMD_WX_CAPS_S 16
+#define FW_PFVF_CMD_WX_CAPS_M 0xff
+#define FW_PFVF_CMD_WX_CAPS_V(x) ((x) << FW_PFVF_CMD_WX_CAPS_S)
+#define FW_PFVF_CMD_WX_CAPS_G(x) \
+ (((x) >> FW_PFVF_CMD_WX_CAPS_S) & FW_PFVF_CMD_WX_CAPS_M)
+
+#define FW_PFVF_CMD_NETHCTRL_S 0
+#define FW_PFVF_CMD_NETHCTRL_M 0xffff
+#define FW_PFVF_CMD_NETHCTRL_V(x) ((x) << FW_PFVF_CMD_NETHCTRL_S)
+#define FW_PFVF_CMD_NETHCTRL_G(x) \
+ (((x) >> FW_PFVF_CMD_NETHCTRL_S) & FW_PFVF_CMD_NETHCTRL_M)
enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
@@ -1102,85 +1274,239 @@ struct fw_iq_cmd {
__be64 fl1addr;
};
-#define FW_IQ_CMD_PFN(x) ((x) << 8)
-#define FW_IQ_CMD_VFN(x) ((x) << 0)
-
-#define FW_IQ_CMD_ALLOC (1U << 31)
-#define FW_IQ_CMD_FREE (1U << 30)
-#define FW_IQ_CMD_MODIFY (1U << 29)
-#define FW_IQ_CMD_IQSTART(x) ((x) << 28)
-#define FW_IQ_CMD_IQSTOP(x) ((x) << 27)
-
-#define FW_IQ_CMD_TYPE(x) ((x) << 29)
-#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28)
-#define FW_IQ_CMD_VIID(x) ((x) << 16)
-#define FW_IQ_CMD_IQANDST(x) ((x) << 15)
-#define FW_IQ_CMD_IQANUS(x) ((x) << 14)
-#define FW_IQ_CMD_IQANUD(x) ((x) << 12)
-#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0)
-
-#define FW_IQ_CMD_IQDROPRSS (1U << 15)
-#define FW_IQ_CMD_IQGTSMODE (1U << 14)
-#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12)
-#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11)
-#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6)
-#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4)
-#define FW_IQ_CMD_IQO (1U << 3)
-#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2)
-#define FW_IQ_CMD_IQESIZE(x) ((x) << 0)
-
-#define FW_IQ_CMD_IQNS(x) ((x) << 31)
-#define FW_IQ_CMD_IQRO(x) ((x) << 30)
-#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28)
-#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27)
-#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26)
-#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20)
-#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15)
-#define FW_IQ_CMD_FL0DBP(x) ((x) << 14)
-#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13)
-#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12)
-#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11)
-#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10)
-#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9)
-#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8)
-#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7)
-#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
-#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
-#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
-#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2)
-#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1)
-#define FW_IQ_CMD_FL0CONGEN (1U << 0)
-
-#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
-#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10)
-#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7)
-#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4)
-#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3)
-#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0)
-
-#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20)
-#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15)
-#define FW_IQ_CMD_FL1DBP(x) ((x) << 14)
-#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13)
-#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12)
-#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11)
-#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10)
-#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9)
-#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8)
-#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7)
-#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6)
-#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4)
-#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3)
-#define FW_IQ_CMD_FL1PADEN (1U << 2)
-#define FW_IQ_CMD_FL1PACKEN (1U << 1)
-#define FW_IQ_CMD_FL1CONGEN (1U << 0)
-
-#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15)
-#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10)
-#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7)
-#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4)
-#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3)
-#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0)
+#define FW_IQ_CMD_PFN_S 8
+#define FW_IQ_CMD_PFN_V(x) ((x) << FW_IQ_CMD_PFN_S)
+
+#define FW_IQ_CMD_VFN_S 0
+#define FW_IQ_CMD_VFN_V(x) ((x) << FW_IQ_CMD_VFN_S)
+
+#define FW_IQ_CMD_ALLOC_S 31
+#define FW_IQ_CMD_ALLOC_V(x) ((x) << FW_IQ_CMD_ALLOC_S)
+#define FW_IQ_CMD_ALLOC_F FW_IQ_CMD_ALLOC_V(1U)
+
+#define FW_IQ_CMD_FREE_S 30
+#define FW_IQ_CMD_FREE_V(x) ((x) << FW_IQ_CMD_FREE_S)
+#define FW_IQ_CMD_FREE_F FW_IQ_CMD_FREE_V(1U)
+
+#define FW_IQ_CMD_MODIFY_S 29
+#define FW_IQ_CMD_MODIFY_V(x) ((x) << FW_IQ_CMD_MODIFY_S)
+#define FW_IQ_CMD_MODIFY_F FW_IQ_CMD_MODIFY_V(1U)
+
+#define FW_IQ_CMD_IQSTART_S 28
+#define FW_IQ_CMD_IQSTART_V(x) ((x) << FW_IQ_CMD_IQSTART_S)
+#define FW_IQ_CMD_IQSTART_F FW_IQ_CMD_IQSTART_V(1U)
+
+#define FW_IQ_CMD_IQSTOP_S 27
+#define FW_IQ_CMD_IQSTOP_V(x) ((x) << FW_IQ_CMD_IQSTOP_S)
+#define FW_IQ_CMD_IQSTOP_F FW_IQ_CMD_IQSTOP_V(1U)
+
+#define FW_IQ_CMD_TYPE_S 29
+#define FW_IQ_CMD_TYPE_V(x) ((x) << FW_IQ_CMD_TYPE_S)
+
+#define FW_IQ_CMD_IQASYNCH_S 28
+#define FW_IQ_CMD_IQASYNCH_V(x) ((x) << FW_IQ_CMD_IQASYNCH_S)
+
+#define FW_IQ_CMD_VIID_S 16
+#define FW_IQ_CMD_VIID_V(x) ((x) << FW_IQ_CMD_VIID_S)
+
+#define FW_IQ_CMD_IQANDST_S 15
+#define FW_IQ_CMD_IQANDST_V(x) ((x) << FW_IQ_CMD_IQANDST_S)
+
+#define FW_IQ_CMD_IQANUS_S 14
+#define FW_IQ_CMD_IQANUS_V(x) ((x) << FW_IQ_CMD_IQANUS_S)
+
+#define FW_IQ_CMD_IQANUD_S 12
+#define FW_IQ_CMD_IQANUD_V(x) ((x) << FW_IQ_CMD_IQANUD_S)
+
+#define FW_IQ_CMD_IQANDSTINDEX_S 0
+#define FW_IQ_CMD_IQANDSTINDEX_V(x) ((x) << FW_IQ_CMD_IQANDSTINDEX_S)
+
+#define FW_IQ_CMD_IQDROPRSS_S 15
+#define FW_IQ_CMD_IQDROPRSS_V(x) ((x) << FW_IQ_CMD_IQDROPRSS_S)
+#define FW_IQ_CMD_IQDROPRSS_F FW_IQ_CMD_IQDROPRSS_V(1U)
+
+#define FW_IQ_CMD_IQGTSMODE_S 14
+#define FW_IQ_CMD_IQGTSMODE_V(x) ((x) << FW_IQ_CMD_IQGTSMODE_S)
+#define FW_IQ_CMD_IQGTSMODE_F FW_IQ_CMD_IQGTSMODE_V(1U)
+
+#define FW_IQ_CMD_IQPCIECH_S 12
+#define FW_IQ_CMD_IQPCIECH_V(x) ((x) << FW_IQ_CMD_IQPCIECH_S)
+
+#define FW_IQ_CMD_IQDCAEN_S 11
+#define FW_IQ_CMD_IQDCAEN_V(x) ((x) << FW_IQ_CMD_IQDCAEN_S)
+
+#define FW_IQ_CMD_IQDCACPU_S 6
+#define FW_IQ_CMD_IQDCACPU_V(x) ((x) << FW_IQ_CMD_IQDCACPU_S)
+
+#define FW_IQ_CMD_IQINTCNTTHRESH_S 4
+#define FW_IQ_CMD_IQINTCNTTHRESH_V(x) ((x) << FW_IQ_CMD_IQINTCNTTHRESH_S)
+
+#define FW_IQ_CMD_IQO_S 3
+#define FW_IQ_CMD_IQO_V(x) ((x) << FW_IQ_CMD_IQO_S)
+#define FW_IQ_CMD_IQO_F FW_IQ_CMD_IQO_V(1U)
+
+#define FW_IQ_CMD_IQCPRIO_S 2
+#define FW_IQ_CMD_IQCPRIO_V(x) ((x) << FW_IQ_CMD_IQCPRIO_S)
+
+#define FW_IQ_CMD_IQESIZE_S 0
+#define FW_IQ_CMD_IQESIZE_V(x) ((x) << FW_IQ_CMD_IQESIZE_S)
+
+#define FW_IQ_CMD_IQNS_S 31
+#define FW_IQ_CMD_IQNS_V(x) ((x) << FW_IQ_CMD_IQNS_S)
+
+#define FW_IQ_CMD_IQRO_S 30
+#define FW_IQ_CMD_IQRO_V(x) ((x) << FW_IQ_CMD_IQRO_S)
+
+#define FW_IQ_CMD_IQFLINTIQHSEN_S 28
+#define FW_IQ_CMD_IQFLINTIQHSEN_V(x) ((x) << FW_IQ_CMD_IQFLINTIQHSEN_S)
+
+#define FW_IQ_CMD_IQFLINTCONGEN_S 27
+#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S)
+
+#define FW_IQ_CMD_IQFLINTISCSIC_S 26
+#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
+
+#define FW_IQ_CMD_FL0CNGCHMAP_S 20
+#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
+
+#define FW_IQ_CMD_FL0CACHELOCK_S 15
+#define FW_IQ_CMD_FL0CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL0CACHELOCK_S)
+
+#define FW_IQ_CMD_FL0DBP_S 14
+#define FW_IQ_CMD_FL0DBP_V(x) ((x) << FW_IQ_CMD_FL0DBP_S)
+
+#define FW_IQ_CMD_FL0DATANS_S 13
+#define FW_IQ_CMD_FL0DATANS_V(x) ((x) << FW_IQ_CMD_FL0DATANS_S)
+
+#define FW_IQ_CMD_FL0DATARO_S 12
+#define FW_IQ_CMD_FL0DATARO_V(x) ((x) << FW_IQ_CMD_FL0DATARO_S)
+#define FW_IQ_CMD_FL0DATARO_F FW_IQ_CMD_FL0DATARO_V(1U)
+
+#define FW_IQ_CMD_FL0CONGCIF_S 11
+#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S)
+
+#define FW_IQ_CMD_FL0ONCHIP_S 10
+#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S)
+
+#define FW_IQ_CMD_FL0STATUSPGNS_S 9
+#define FW_IQ_CMD_FL0STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGNS_S)
+
+#define FW_IQ_CMD_FL0STATUSPGRO_S 8
+#define FW_IQ_CMD_FL0STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGRO_S)
+
+#define FW_IQ_CMD_FL0FETCHNS_S 7
+#define FW_IQ_CMD_FL0FETCHNS_V(x) ((x) << FW_IQ_CMD_FL0FETCHNS_S)
+
+#define FW_IQ_CMD_FL0FETCHRO_S 6
+#define FW_IQ_CMD_FL0FETCHRO_V(x) ((x) << FW_IQ_CMD_FL0FETCHRO_S)
+#define FW_IQ_CMD_FL0FETCHRO_F FW_IQ_CMD_FL0FETCHRO_V(1U)
+
+#define FW_IQ_CMD_FL0HOSTFCMODE_S 4
+#define FW_IQ_CMD_FL0HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL0HOSTFCMODE_S)
+
+#define FW_IQ_CMD_FL0CPRIO_S 3
+#define FW_IQ_CMD_FL0CPRIO_V(x) ((x) << FW_IQ_CMD_FL0CPRIO_S)
+
+#define FW_IQ_CMD_FL0PADEN_S 2
+#define FW_IQ_CMD_FL0PADEN_V(x) ((x) << FW_IQ_CMD_FL0PADEN_S)
+#define FW_IQ_CMD_FL0PADEN_F FW_IQ_CMD_FL0PADEN_V(1U)
+
+#define FW_IQ_CMD_FL0PACKEN_S 1
+#define FW_IQ_CMD_FL0PACKEN_V(x) ((x) << FW_IQ_CMD_FL0PACKEN_S)
+#define FW_IQ_CMD_FL0PACKEN_F FW_IQ_CMD_FL0PACKEN_V(1U)
+
+#define FW_IQ_CMD_FL0CONGEN_S 0
+#define FW_IQ_CMD_FL0CONGEN_V(x) ((x) << FW_IQ_CMD_FL0CONGEN_S)
+#define FW_IQ_CMD_FL0CONGEN_F FW_IQ_CMD_FL0CONGEN_V(1U)
+
+#define FW_IQ_CMD_FL0DCAEN_S 15
+#define FW_IQ_CMD_FL0DCAEN_V(x) ((x) << FW_IQ_CMD_FL0DCAEN_S)
+
+#define FW_IQ_CMD_FL0DCACPU_S 10
+#define FW_IQ_CMD_FL0DCACPU_V(x) ((x) << FW_IQ_CMD_FL0DCACPU_S)
+
+#define FW_IQ_CMD_FL0FBMIN_S 7
+#define FW_IQ_CMD_FL0FBMIN_V(x) ((x) << FW_IQ_CMD_FL0FBMIN_S)
+
+#define FW_IQ_CMD_FL0FBMAX_S 4
+#define FW_IQ_CMD_FL0FBMAX_V(x) ((x) << FW_IQ_CMD_FL0FBMAX_S)
+
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_S 3
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESHO_S)
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_F FW_IQ_CMD_FL0CIDXFTHRESHO_V(1U)
+
+#define FW_IQ_CMD_FL0CIDXFTHRESH_S 0
+#define FW_IQ_CMD_FL0CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESH_S)
+
+#define FW_IQ_CMD_FL1CNGCHMAP_S 20
+#define FW_IQ_CMD_FL1CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL1CNGCHMAP_S)
+
+#define FW_IQ_CMD_FL1CACHELOCK_S 15
+#define FW_IQ_CMD_FL1CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL1CACHELOCK_S)
+
+#define FW_IQ_CMD_FL1DBP_S 14
+#define FW_IQ_CMD_FL1DBP_V(x) ((x) << FW_IQ_CMD_FL1DBP_S)
+
+#define FW_IQ_CMD_FL1DATANS_S 13
+#define FW_IQ_CMD_FL1DATANS_V(x) ((x) << FW_IQ_CMD_FL1DATANS_S)
+
+#define FW_IQ_CMD_FL1DATARO_S 12
+#define FW_IQ_CMD_FL1DATARO_V(x) ((x) << FW_IQ_CMD_FL1DATARO_S)
+
+#define FW_IQ_CMD_FL1CONGCIF_S 11
+#define FW_IQ_CMD_FL1CONGCIF_V(x) ((x) << FW_IQ_CMD_FL1CONGCIF_S)
+
+#define FW_IQ_CMD_FL1ONCHIP_S 10
+#define FW_IQ_CMD_FL1ONCHIP_V(x) ((x) << FW_IQ_CMD_FL1ONCHIP_S)
+
+#define FW_IQ_CMD_FL1STATUSPGNS_S 9
+#define FW_IQ_CMD_FL1STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGNS_S)
+
+#define FW_IQ_CMD_FL1STATUSPGRO_S 8
+#define FW_IQ_CMD_FL1STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGRO_S)
+
+#define FW_IQ_CMD_FL1FETCHNS_S 7
+#define FW_IQ_CMD_FL1FETCHNS_V(x) ((x) << FW_IQ_CMD_FL1FETCHNS_S)
+
+#define FW_IQ_CMD_FL1FETCHRO_S 6
+#define FW_IQ_CMD_FL1FETCHRO_V(x) ((x) << FW_IQ_CMD_FL1FETCHRO_S)
+
+#define FW_IQ_CMD_FL1HOSTFCMODE_S 4
+#define FW_IQ_CMD_FL1HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL1HOSTFCMODE_S)
+
+#define FW_IQ_CMD_FL1CPRIO_S 3
+#define FW_IQ_CMD_FL1CPRIO_V(x) ((x) << FW_IQ_CMD_FL1CPRIO_S)
+
+#define FW_IQ_CMD_FL1PADEN_S 2
+#define FW_IQ_CMD_FL1PADEN_V(x) ((x) << FW_IQ_CMD_FL1PADEN_S)
+#define FW_IQ_CMD_FL1PADEN_F FW_IQ_CMD_FL1PADEN_V(1U)
+
+#define FW_IQ_CMD_FL1PACKEN_S 1
+#define FW_IQ_CMD_FL1PACKEN_V(x) ((x) << FW_IQ_CMD_FL1PACKEN_S)
+#define FW_IQ_CMD_FL1PACKEN_F FW_IQ_CMD_FL1PACKEN_V(1U)
+
+#define FW_IQ_CMD_FL1CONGEN_S 0
+#define FW_IQ_CMD_FL1CONGEN_V(x) ((x) << FW_IQ_CMD_FL1CONGEN_S)
+#define FW_IQ_CMD_FL1CONGEN_F FW_IQ_CMD_FL1CONGEN_V(1U)
+
+#define FW_IQ_CMD_FL1DCAEN_S 15
+#define FW_IQ_CMD_FL1DCAEN_V(x) ((x) << FW_IQ_CMD_FL1DCAEN_S)
+
+#define FW_IQ_CMD_FL1DCACPU_S 10
+#define FW_IQ_CMD_FL1DCACPU_V(x) ((x) << FW_IQ_CMD_FL1DCACPU_S)
+
+#define FW_IQ_CMD_FL1FBMIN_S 7
+#define FW_IQ_CMD_FL1FBMIN_V(x) ((x) << FW_IQ_CMD_FL1FBMIN_S)
+
+#define FW_IQ_CMD_FL1FBMAX_S 4
+#define FW_IQ_CMD_FL1FBMAX_V(x) ((x) << FW_IQ_CMD_FL1FBMAX_S)
+
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_S 3
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESHO_S)
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_F FW_IQ_CMD_FL1CIDXFTHRESHO_V(1U)
+
+#define FW_IQ_CMD_FL1CIDXFTHRESH_S 0
+#define FW_IQ_CMD_FL1CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESH_S)
struct fw_eq_eth_cmd {
__be32 op_to_vfn;
@@ -1195,40 +1521,102 @@ struct fw_eq_eth_cmd {
__be64 r9;
};
-#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8)
-#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0)
-#define FW_EQ_ETH_CMD_ALLOC (1U << 31)
-#define FW_EQ_ETH_CMD_FREE (1U << 30)
-#define FW_EQ_ETH_CMD_MODIFY (1U << 29)
-#define FW_EQ_ETH_CMD_EQSTART (1U << 28)
-#define FW_EQ_ETH_CMD_EQSTOP (1U << 27)
-
-#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
-#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
-#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
-#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
-#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
-#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24)
-#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23)
-#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22)
-#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20)
-#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19)
-#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18)
-#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16)
-#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0)
-
-#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31)
-#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26)
-#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23)
-#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20)
-#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19)
-#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
-#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
-
-#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30)
-#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
+#define FW_EQ_ETH_CMD_PFN_S 8
+#define FW_EQ_ETH_CMD_PFN_V(x) ((x) << FW_EQ_ETH_CMD_PFN_S)
+
+#define FW_EQ_ETH_CMD_VFN_S 0
+#define FW_EQ_ETH_CMD_VFN_V(x) ((x) << FW_EQ_ETH_CMD_VFN_S)
+
+#define FW_EQ_ETH_CMD_ALLOC_S 31
+#define FW_EQ_ETH_CMD_ALLOC_V(x) ((x) << FW_EQ_ETH_CMD_ALLOC_S)
+#define FW_EQ_ETH_CMD_ALLOC_F FW_EQ_ETH_CMD_ALLOC_V(1U)
+
+#define FW_EQ_ETH_CMD_FREE_S 30
+#define FW_EQ_ETH_CMD_FREE_V(x) ((x) << FW_EQ_ETH_CMD_FREE_S)
+#define FW_EQ_ETH_CMD_FREE_F FW_EQ_ETH_CMD_FREE_V(1U)
+
+#define FW_EQ_ETH_CMD_MODIFY_S 29
+#define FW_EQ_ETH_CMD_MODIFY_V(x) ((x) << FW_EQ_ETH_CMD_MODIFY_S)
+#define FW_EQ_ETH_CMD_MODIFY_F FW_EQ_ETH_CMD_MODIFY_V(1U)
+
+#define FW_EQ_ETH_CMD_EQSTART_S 28
+#define FW_EQ_ETH_CMD_EQSTART_V(x) ((x) << FW_EQ_ETH_CMD_EQSTART_S)
+#define FW_EQ_ETH_CMD_EQSTART_F FW_EQ_ETH_CMD_EQSTART_V(1U)
+
+#define FW_EQ_ETH_CMD_EQSTOP_S 27
+#define FW_EQ_ETH_CMD_EQSTOP_V(x) ((x) << FW_EQ_ETH_CMD_EQSTOP_S)
+#define FW_EQ_ETH_CMD_EQSTOP_F FW_EQ_ETH_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_ETH_CMD_EQID_S 0
+#define FW_EQ_ETH_CMD_EQID_M 0xfffff
+#define FW_EQ_ETH_CMD_EQID_V(x) ((x) << FW_EQ_ETH_CMD_EQID_S)
+#define FW_EQ_ETH_CMD_EQID_G(x) \
+ (((x) >> FW_EQ_ETH_CMD_EQID_S) & FW_EQ_ETH_CMD_EQID_M)
+
+#define FW_EQ_ETH_CMD_PHYSEQID_S 0
+#define FW_EQ_ETH_CMD_PHYSEQID_M 0xfffff
+#define FW_EQ_ETH_CMD_PHYSEQID_V(x) ((x) << FW_EQ_ETH_CMD_PHYSEQID_S)
+#define FW_EQ_ETH_CMD_PHYSEQID_G(x) \
+ (((x) >> FW_EQ_ETH_CMD_PHYSEQID_S) & FW_EQ_ETH_CMD_PHYSEQID_M)
+
+#define FW_EQ_ETH_CMD_FETCHSZM_S 26
+#define FW_EQ_ETH_CMD_FETCHSZM_V(x) ((x) << FW_EQ_ETH_CMD_FETCHSZM_S)
+#define FW_EQ_ETH_CMD_FETCHSZM_F FW_EQ_ETH_CMD_FETCHSZM_V(1U)
+
+#define FW_EQ_ETH_CMD_STATUSPGNS_S 25
+#define FW_EQ_ETH_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGNS_S)
+
+#define FW_EQ_ETH_CMD_STATUSPGRO_S 24
+#define FW_EQ_ETH_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGRO_S)
+
+#define FW_EQ_ETH_CMD_FETCHNS_S 23
+#define FW_EQ_ETH_CMD_FETCHNS_V(x) ((x) << FW_EQ_ETH_CMD_FETCHNS_S)
+
+#define FW_EQ_ETH_CMD_FETCHRO_S 22
+#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S)
+
+#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20
+#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_ETH_CMD_CPRIO_S 19
+#define FW_EQ_ETH_CMD_CPRIO_V(x) ((x) << FW_EQ_ETH_CMD_CPRIO_S)
+
+#define FW_EQ_ETH_CMD_ONCHIP_S 18
+#define FW_EQ_ETH_CMD_ONCHIP_V(x) ((x) << FW_EQ_ETH_CMD_ONCHIP_S)
+
+#define FW_EQ_ETH_CMD_PCIECHN_S 16
+#define FW_EQ_ETH_CMD_PCIECHN_V(x) ((x) << FW_EQ_ETH_CMD_PCIECHN_S)
+
+#define FW_EQ_ETH_CMD_IQID_S 0
+#define FW_EQ_ETH_CMD_IQID_V(x) ((x) << FW_EQ_ETH_CMD_IQID_S)
+
+#define FW_EQ_ETH_CMD_DCAEN_S 31
+#define FW_EQ_ETH_CMD_DCAEN_V(x) ((x) << FW_EQ_ETH_CMD_DCAEN_S)
+
+#define FW_EQ_ETH_CMD_DCACPU_S 26
+#define FW_EQ_ETH_CMD_DCACPU_V(x) ((x) << FW_EQ_ETH_CMD_DCACPU_S)
+
+#define FW_EQ_ETH_CMD_FBMIN_S 23
+#define FW_EQ_ETH_CMD_FBMIN_V(x) ((x) << FW_EQ_ETH_CMD_FBMIN_S)
+
+#define FW_EQ_ETH_CMD_FBMAX_S 20
+#define FW_EQ_ETH_CMD_FBMAX_V(x) ((x) << FW_EQ_ETH_CMD_FBMAX_S)
+
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO_S 19
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_ETH_CMD_CIDXFTHRESH_S 16
+#define FW_EQ_ETH_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_ETH_CMD_EQSIZE_S 0
+#define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S)
+
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S)
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U)
+
+#define FW_EQ_ETH_CMD_VIID_S 16
+#define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S)
struct fw_eq_ctrl_cmd {
__be32 op_to_vfn;
@@ -1240,38 +1628,102 @@ struct fw_eq_ctrl_cmd {
__be64 eqaddr;
};
-#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8)
-#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0)
-
-#define FW_EQ_CTRL_CMD_ALLOC (1U << 31)
-#define FW_EQ_CTRL_CMD_FREE (1U << 30)
-#define FW_EQ_CTRL_CMD_MODIFY (1U << 29)
-#define FW_EQ_CTRL_CMD_EQSTART (1U << 28)
-#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27)
-
-#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20)
-#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0)
-#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
-#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26)
-#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25)
-#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24)
-#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23)
-#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22)
-#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20)
-#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19)
-#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18)
-#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16)
-#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0)
-
-#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31)
-#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26)
-#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23)
-#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20)
-#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19)
-#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16)
-#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0)
+#define FW_EQ_CTRL_CMD_PFN_S 8
+#define FW_EQ_CTRL_CMD_PFN_V(x) ((x) << FW_EQ_CTRL_CMD_PFN_S)
+
+#define FW_EQ_CTRL_CMD_VFN_S 0
+#define FW_EQ_CTRL_CMD_VFN_V(x) ((x) << FW_EQ_CTRL_CMD_VFN_S)
+
+#define FW_EQ_CTRL_CMD_ALLOC_S 31
+#define FW_EQ_CTRL_CMD_ALLOC_V(x) ((x) << FW_EQ_CTRL_CMD_ALLOC_S)
+#define FW_EQ_CTRL_CMD_ALLOC_F FW_EQ_CTRL_CMD_ALLOC_V(1U)
+
+#define FW_EQ_CTRL_CMD_FREE_S 30
+#define FW_EQ_CTRL_CMD_FREE_V(x) ((x) << FW_EQ_CTRL_CMD_FREE_S)
+#define FW_EQ_CTRL_CMD_FREE_F FW_EQ_CTRL_CMD_FREE_V(1U)
+
+#define FW_EQ_CTRL_CMD_MODIFY_S 29
+#define FW_EQ_CTRL_CMD_MODIFY_V(x) ((x) << FW_EQ_CTRL_CMD_MODIFY_S)
+#define FW_EQ_CTRL_CMD_MODIFY_F FW_EQ_CTRL_CMD_MODIFY_V(1U)
+
+#define FW_EQ_CTRL_CMD_EQSTART_S 28
+#define FW_EQ_CTRL_CMD_EQSTART_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTART_S)
+#define FW_EQ_CTRL_CMD_EQSTART_F FW_EQ_CTRL_CMD_EQSTART_V(1U)
+
+#define FW_EQ_CTRL_CMD_EQSTOP_S 27
+#define FW_EQ_CTRL_CMD_EQSTOP_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTOP_S)
+#define FW_EQ_CTRL_CMD_EQSTOP_F FW_EQ_CTRL_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_CTRL_CMD_CMPLIQID_S 20
+#define FW_EQ_CTRL_CMD_CMPLIQID_V(x) ((x) << FW_EQ_CTRL_CMD_CMPLIQID_S)
+
+#define FW_EQ_CTRL_CMD_EQID_S 0
+#define FW_EQ_CTRL_CMD_EQID_M 0xfffff
+#define FW_EQ_CTRL_CMD_EQID_V(x) ((x) << FW_EQ_CTRL_CMD_EQID_S)
+#define FW_EQ_CTRL_CMD_EQID_G(x) \
+ (((x) >> FW_EQ_CTRL_CMD_EQID_S) & FW_EQ_CTRL_CMD_EQID_M)
+
+#define FW_EQ_CTRL_CMD_PHYSEQID_S 0
+#define FW_EQ_CTRL_CMD_PHYSEQID_M 0xfffff
+#define FW_EQ_CTRL_CMD_PHYSEQID_G(x) \
+ (((x) >> FW_EQ_CTRL_CMD_PHYSEQID_S) & FW_EQ_CTRL_CMD_PHYSEQID_M)
+
+#define FW_EQ_CTRL_CMD_FETCHSZM_S 26
+#define FW_EQ_CTRL_CMD_FETCHSZM_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHSZM_S)
+#define FW_EQ_CTRL_CMD_FETCHSZM_F FW_EQ_CTRL_CMD_FETCHSZM_V(1U)
+
+#define FW_EQ_CTRL_CMD_STATUSPGNS_S 25
+#define FW_EQ_CTRL_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGNS_S)
+#define FW_EQ_CTRL_CMD_STATUSPGNS_F FW_EQ_CTRL_CMD_STATUSPGNS_V(1U)
+
+#define FW_EQ_CTRL_CMD_STATUSPGRO_S 24
+#define FW_EQ_CTRL_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGRO_S)
+#define FW_EQ_CTRL_CMD_STATUSPGRO_F FW_EQ_CTRL_CMD_STATUSPGRO_V(1U)
+
+#define FW_EQ_CTRL_CMD_FETCHNS_S 23
+#define FW_EQ_CTRL_CMD_FETCHNS_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHNS_S)
+#define FW_EQ_CTRL_CMD_FETCHNS_F FW_EQ_CTRL_CMD_FETCHNS_V(1U)
+
+#define FW_EQ_CTRL_CMD_FETCHRO_S 22
+#define FW_EQ_CTRL_CMD_FETCHRO_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHRO_S)
+#define FW_EQ_CTRL_CMD_FETCHRO_F FW_EQ_CTRL_CMD_FETCHRO_V(1U)
+
+#define FW_EQ_CTRL_CMD_HOSTFCMODE_S 20
+#define FW_EQ_CTRL_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_CTRL_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_CTRL_CMD_CPRIO_S 19
+#define FW_EQ_CTRL_CMD_CPRIO_V(x) ((x) << FW_EQ_CTRL_CMD_CPRIO_S)
+
+#define FW_EQ_CTRL_CMD_ONCHIP_S 18
+#define FW_EQ_CTRL_CMD_ONCHIP_V(x) ((x) << FW_EQ_CTRL_CMD_ONCHIP_S)
+
+#define FW_EQ_CTRL_CMD_PCIECHN_S 16
+#define FW_EQ_CTRL_CMD_PCIECHN_V(x) ((x) << FW_EQ_CTRL_CMD_PCIECHN_S)
+
+#define FW_EQ_CTRL_CMD_IQID_S 0
+#define FW_EQ_CTRL_CMD_IQID_V(x) ((x) << FW_EQ_CTRL_CMD_IQID_S)
+
+#define FW_EQ_CTRL_CMD_DCAEN_S 31
+#define FW_EQ_CTRL_CMD_DCAEN_V(x) ((x) << FW_EQ_CTRL_CMD_DCAEN_S)
+
+#define FW_EQ_CTRL_CMD_DCACPU_S 26
+#define FW_EQ_CTRL_CMD_DCACPU_V(x) ((x) << FW_EQ_CTRL_CMD_DCACPU_S)
+
+#define FW_EQ_CTRL_CMD_FBMIN_S 23
+#define FW_EQ_CTRL_CMD_FBMIN_V(x) ((x) << FW_EQ_CTRL_CMD_FBMIN_S)
+
+#define FW_EQ_CTRL_CMD_FBMAX_S 20
+#define FW_EQ_CTRL_CMD_FBMAX_V(x) ((x) << FW_EQ_CTRL_CMD_FBMAX_S)
+
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_S 19
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_V(x) \
+ ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH_S 16
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_CTRL_CMD_EQSIZE_S 0
+#define FW_EQ_CTRL_CMD_EQSIZE_V(x) ((x) << FW_EQ_CTRL_CMD_EQSIZE_S)
struct fw_eq_ofld_cmd {
__be32 op_to_vfn;
@@ -1283,45 +1735,112 @@ struct fw_eq_ofld_cmd {
__be64 eqaddr;
};
-#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8)
-#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0)
-
-#define FW_EQ_OFLD_CMD_ALLOC (1U << 31)
-#define FW_EQ_OFLD_CMD_FREE (1U << 30)
-#define FW_EQ_OFLD_CMD_MODIFY (1U << 29)
-#define FW_EQ_OFLD_CMD_EQSTART (1U << 28)
-#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27)
-
-#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0)
-#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
-#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26)
-#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25)
-#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24)
-#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23)
-#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22)
-#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20)
-#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19)
-#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18)
-#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16)
-#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0)
-
-#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31)
-#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26)
-#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23)
-#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20)
-#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19)
-#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16)
-#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0)
+#define FW_EQ_OFLD_CMD_PFN_S 8
+#define FW_EQ_OFLD_CMD_PFN_V(x) ((x) << FW_EQ_OFLD_CMD_PFN_S)
+
+#define FW_EQ_OFLD_CMD_VFN_S 0
+#define FW_EQ_OFLD_CMD_VFN_V(x) ((x) << FW_EQ_OFLD_CMD_VFN_S)
+
+#define FW_EQ_OFLD_CMD_ALLOC_S 31
+#define FW_EQ_OFLD_CMD_ALLOC_V(x) ((x) << FW_EQ_OFLD_CMD_ALLOC_S)
+#define FW_EQ_OFLD_CMD_ALLOC_F FW_EQ_OFLD_CMD_ALLOC_V(1U)
+
+#define FW_EQ_OFLD_CMD_FREE_S 30
+#define FW_EQ_OFLD_CMD_FREE_V(x) ((x) << FW_EQ_OFLD_CMD_FREE_S)
+#define FW_EQ_OFLD_CMD_FREE_F FW_EQ_OFLD_CMD_FREE_V(1U)
+
+#define FW_EQ_OFLD_CMD_MODIFY_S 29
+#define FW_EQ_OFLD_CMD_MODIFY_V(x) ((x) << FW_EQ_OFLD_CMD_MODIFY_S)
+#define FW_EQ_OFLD_CMD_MODIFY_F FW_EQ_OFLD_CMD_MODIFY_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQSTART_S 28
+#define FW_EQ_OFLD_CMD_EQSTART_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTART_S)
+#define FW_EQ_OFLD_CMD_EQSTART_F FW_EQ_OFLD_CMD_EQSTART_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQSTOP_S 27
+#define FW_EQ_OFLD_CMD_EQSTOP_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTOP_S)
+#define FW_EQ_OFLD_CMD_EQSTOP_F FW_EQ_OFLD_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQID_S 0
+#define FW_EQ_OFLD_CMD_EQID_M 0xfffff
+#define FW_EQ_OFLD_CMD_EQID_V(x) ((x) << FW_EQ_OFLD_CMD_EQID_S)
+#define FW_EQ_OFLD_CMD_EQID_G(x) \
+ (((x) >> FW_EQ_OFLD_CMD_EQID_S) & FW_EQ_OFLD_CMD_EQID_M)
+
+#define FW_EQ_OFLD_CMD_PHYSEQID_S 0
+#define FW_EQ_OFLD_CMD_PHYSEQID_M 0xfffff
+#define FW_EQ_OFLD_CMD_PHYSEQID_G(x) \
+ (((x) >> FW_EQ_OFLD_CMD_PHYSEQID_S) & FW_EQ_OFLD_CMD_PHYSEQID_M)
+
+#define FW_EQ_OFLD_CMD_FETCHSZM_S 26
+#define FW_EQ_OFLD_CMD_FETCHSZM_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHSZM_S)
+
+#define FW_EQ_OFLD_CMD_STATUSPGNS_S 25
+#define FW_EQ_OFLD_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGNS_S)
+
+#define FW_EQ_OFLD_CMD_STATUSPGRO_S 24
+#define FW_EQ_OFLD_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGRO_S)
+
+#define FW_EQ_OFLD_CMD_FETCHNS_S 23
+#define FW_EQ_OFLD_CMD_FETCHNS_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHNS_S)
+
+#define FW_EQ_OFLD_CMD_FETCHRO_S 22
+#define FW_EQ_OFLD_CMD_FETCHRO_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHRO_S)
+#define FW_EQ_OFLD_CMD_FETCHRO_F FW_EQ_OFLD_CMD_FETCHRO_V(1U)
+
+#define FW_EQ_OFLD_CMD_HOSTFCMODE_S 20
+#define FW_EQ_OFLD_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_OFLD_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_OFLD_CMD_CPRIO_S 19
+#define FW_EQ_OFLD_CMD_CPRIO_V(x) ((x) << FW_EQ_OFLD_CMD_CPRIO_S)
+
+#define FW_EQ_OFLD_CMD_ONCHIP_S 18
+#define FW_EQ_OFLD_CMD_ONCHIP_V(x) ((x) << FW_EQ_OFLD_CMD_ONCHIP_S)
+
+#define FW_EQ_OFLD_CMD_PCIECHN_S 16
+#define FW_EQ_OFLD_CMD_PCIECHN_V(x) ((x) << FW_EQ_OFLD_CMD_PCIECHN_S)
+
+#define FW_EQ_OFLD_CMD_IQID_S 0
+#define FW_EQ_OFLD_CMD_IQID_V(x) ((x) << FW_EQ_OFLD_CMD_IQID_S)
+
+#define FW_EQ_OFLD_CMD_DCAEN_S 31
+#define FW_EQ_OFLD_CMD_DCAEN_V(x) ((x) << FW_EQ_OFLD_CMD_DCAEN_S)
+
+#define FW_EQ_OFLD_CMD_DCACPU_S 26
+#define FW_EQ_OFLD_CMD_DCACPU_V(x) ((x) << FW_EQ_OFLD_CMD_DCACPU_S)
+
+#define FW_EQ_OFLD_CMD_FBMIN_S 23
+#define FW_EQ_OFLD_CMD_FBMIN_V(x) ((x) << FW_EQ_OFLD_CMD_FBMIN_S)
+
+#define FW_EQ_OFLD_CMD_FBMAX_S 20
+#define FW_EQ_OFLD_CMD_FBMAX_V(x) ((x) << FW_EQ_OFLD_CMD_FBMAX_S)
+
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_S 19
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(x) \
+ ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH_S 16
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_OFLD_CMD_EQSIZE_S 0
+#define FW_EQ_OFLD_CMD_EQSIZE_V(x) ((x) << FW_EQ_OFLD_CMD_EQSIZE_S)
/*
* Macros for VIID parsing:
* VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
*/
-#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7)
-#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
-#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
+
+#define FW_VIID_PFN_S 8
+#define FW_VIID_PFN_M 0x7
+#define FW_VIID_PFN_G(x) (((x) >> FW_VIID_PFN_S) & FW_VIID_PFN_M)
+
+#define FW_VIID_VIVLD_S 7
+#define FW_VIID_VIVLD_M 0x1
+#define FW_VIID_VIVLD_G(x) (((x) >> FW_VIID_VIVLD_S) & FW_VIID_VIVLD_M)
+
+#define FW_VIID_VIN_S 0
+#define FW_VIID_VIN_M 0x7F
+#define FW_VIID_VIN_G(x) (((x) >> FW_VIID_VIN_S) & FW_VIID_VIN_M)
struct fw_vi_cmd {
__be32 op_to_vfn;
@@ -1341,15 +1860,35 @@ struct fw_vi_cmd {
__be64 r10;
};
-#define FW_VI_CMD_PFN(x) ((x) << 8)
-#define FW_VI_CMD_VFN(x) ((x) << 0)
-#define FW_VI_CMD_ALLOC (1U << 31)
-#define FW_VI_CMD_FREE (1U << 30)
-#define FW_VI_CMD_VIID(x) ((x) << 0)
-#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff)
-#define FW_VI_CMD_PORTID(x) ((x) << 4)
-#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf)
-#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
+#define FW_VI_CMD_PFN_S 8
+#define FW_VI_CMD_PFN_V(x) ((x) << FW_VI_CMD_PFN_S)
+
+#define FW_VI_CMD_VFN_S 0
+#define FW_VI_CMD_VFN_V(x) ((x) << FW_VI_CMD_VFN_S)
+
+#define FW_VI_CMD_ALLOC_S 31
+#define FW_VI_CMD_ALLOC_V(x) ((x) << FW_VI_CMD_ALLOC_S)
+#define FW_VI_CMD_ALLOC_F FW_VI_CMD_ALLOC_V(1U)
+
+#define FW_VI_CMD_FREE_S 30
+#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S)
+#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U)
+
+#define FW_VI_CMD_VIID_S 0
+#define FW_VI_CMD_VIID_M 0xfff
+#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S)
+#define FW_VI_CMD_VIID_G(x) (((x) >> FW_VI_CMD_VIID_S) & FW_VI_CMD_VIID_M)
+
+#define FW_VI_CMD_PORTID_S 4
+#define FW_VI_CMD_PORTID_M 0xf
+#define FW_VI_CMD_PORTID_V(x) ((x) << FW_VI_CMD_PORTID_S)
+#define FW_VI_CMD_PORTID_G(x) \
+ (((x) >> FW_VI_CMD_PORTID_S) & FW_VI_CMD_PORTID_M)
+
+#define FW_VI_CMD_RSSSIZE_S 0
+#define FW_VI_CMD_RSSSIZE_M 0x7ff
+#define FW_VI_CMD_RSSSIZE_G(x) \
+ (((x) >> FW_VI_CMD_RSSSIZE_S) & FW_VI_CMD_RSSSIZE_M)
/* Special VI_MAC command index ids */
#define FW_VI_MAC_ADD_MAC 0x3FF
@@ -1385,16 +1924,37 @@ struct fw_vi_mac_cmd {
} u;
};
-#define FW_VI_MAC_CMD_VIID(x) ((x) << 0)
-#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31)
-#define FW_VI_MAC_CMD_HASHVECEN (1U << 23)
-#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22)
-#define FW_VI_MAC_CMD_VALID (1U << 15)
-#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12)
-#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10)
-#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3)
-#define FW_VI_MAC_CMD_IDX(x) ((x) << 0)
-#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff)
+#define FW_VI_MAC_CMD_VIID_S 0
+#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S)
+
+#define FW_VI_MAC_CMD_FREEMACS_S 31
+#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S)
+
+#define FW_VI_MAC_CMD_HASHVECEN_S 23
+#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S)
+#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U)
+
+#define FW_VI_MAC_CMD_HASHUNIEN_S 22
+#define FW_VI_MAC_CMD_HASHUNIEN_V(x) ((x) << FW_VI_MAC_CMD_HASHUNIEN_S)
+
+#define FW_VI_MAC_CMD_VALID_S 15
+#define FW_VI_MAC_CMD_VALID_V(x) ((x) << FW_VI_MAC_CMD_VALID_S)
+#define FW_VI_MAC_CMD_VALID_F FW_VI_MAC_CMD_VALID_V(1U)
+
+#define FW_VI_MAC_CMD_PRIO_S 12
+#define FW_VI_MAC_CMD_PRIO_V(x) ((x) << FW_VI_MAC_CMD_PRIO_S)
+
+#define FW_VI_MAC_CMD_SMAC_RESULT_S 10
+#define FW_VI_MAC_CMD_SMAC_RESULT_M 0x3
+#define FW_VI_MAC_CMD_SMAC_RESULT_V(x) ((x) << FW_VI_MAC_CMD_SMAC_RESULT_S)
+#define FW_VI_MAC_CMD_SMAC_RESULT_G(x) \
+ (((x) >> FW_VI_MAC_CMD_SMAC_RESULT_S) & FW_VI_MAC_CMD_SMAC_RESULT_M)
+
+#define FW_VI_MAC_CMD_IDX_S 0
+#define FW_VI_MAC_CMD_IDX_M 0x3ff
+#define FW_VI_MAC_CMD_IDX_V(x) ((x) << FW_VI_MAC_CMD_IDX_S)
+#define FW_VI_MAC_CMD_IDX_G(x) \
+ (((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
#define FW_RXMODE_MTU_NO_CHG 65535
@@ -1405,17 +1965,30 @@ struct fw_vi_rxmode_cmd {
__be32 r4_lo;
};
-#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
-#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff
-#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
-#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
-#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
-#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3
-#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
-#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
-#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
-#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
-#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
+#define FW_VI_RXMODE_CMD_VIID_S 0
+#define FW_VI_RXMODE_CMD_VIID_V(x) ((x) << FW_VI_RXMODE_CMD_VIID_S)
+
+#define FW_VI_RXMODE_CMD_MTU_S 16
+#define FW_VI_RXMODE_CMD_MTU_M 0xffff
+#define FW_VI_RXMODE_CMD_MTU_V(x) ((x) << FW_VI_RXMODE_CMD_MTU_S)
+
+#define FW_VI_RXMODE_CMD_PROMISCEN_S 14
+#define FW_VI_RXMODE_CMD_PROMISCEN_M 0x3
+#define FW_VI_RXMODE_CMD_PROMISCEN_V(x) ((x) << FW_VI_RXMODE_CMD_PROMISCEN_S)
+
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_S 12
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_M 0x3
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_V(x) \
+ ((x) << FW_VI_RXMODE_CMD_ALLMULTIEN_S)
+
+#define FW_VI_RXMODE_CMD_BROADCASTEN_S 10
+#define FW_VI_RXMODE_CMD_BROADCASTEN_M 0x3
+#define FW_VI_RXMODE_CMD_BROADCASTEN_V(x) \
+ ((x) << FW_VI_RXMODE_CMD_BROADCASTEN_S)
+
+#define FW_VI_RXMODE_CMD_VLANEXEN_S 8
+#define FW_VI_RXMODE_CMD_VLANEXEN_M 0x3
+#define FW_VI_RXMODE_CMD_VLANEXEN_V(x) ((x) << FW_VI_RXMODE_CMD_VLANEXEN_S)
struct fw_vi_enable_cmd {
__be32 op_to_viid;
@@ -1425,11 +1998,21 @@ struct fw_vi_enable_cmd {
__be32 r4;
};
-#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
-#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
-#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
-#define FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << 28)
-#define FW_VI_ENABLE_CMD_LED (1U << 29)
+#define FW_VI_ENABLE_CMD_VIID_S 0
+#define FW_VI_ENABLE_CMD_VIID_V(x) ((x) << FW_VI_ENABLE_CMD_VIID_S)
+
+#define FW_VI_ENABLE_CMD_IEN_S 31
+#define FW_VI_ENABLE_CMD_IEN_V(x) ((x) << FW_VI_ENABLE_CMD_IEN_S)
+
+#define FW_VI_ENABLE_CMD_EEN_S 30
+#define FW_VI_ENABLE_CMD_EEN_V(x) ((x) << FW_VI_ENABLE_CMD_EEN_S)
+
+#define FW_VI_ENABLE_CMD_LED_S 29
+#define FW_VI_ENABLE_CMD_LED_V(x) ((x) << FW_VI_ENABLE_CMD_LED_S)
+#define FW_VI_ENABLE_CMD_LED_F FW_VI_ENABLE_CMD_LED_V(1U)
+
+#define FW_VI_ENABLE_CMD_DCB_INFO_S 28
+#define FW_VI_ENABLE_CMD_DCB_INFO_V(x) ((x) << FW_VI_ENABLE_CMD_DCB_INFO_S)
/* VI VF stats offset definitions */
#define VI_VF_NUM_STATS 16
@@ -1529,9 +2112,14 @@ struct fw_vi_stats_cmd {
} u;
};
-#define FW_VI_STATS_CMD_VIID(x) ((x) << 0)
-#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12)
-#define FW_VI_STATS_CMD_IX(x) ((x) << 0)
+#define FW_VI_STATS_CMD_VIID_S 0
+#define FW_VI_STATS_CMD_VIID_V(x) ((x) << FW_VI_STATS_CMD_VIID_S)
+
+#define FW_VI_STATS_CMD_NSTATS_S 12
+#define FW_VI_STATS_CMD_NSTATS_V(x) ((x) << FW_VI_STATS_CMD_NSTATS_S)
+
+#define FW_VI_STATS_CMD_IX_S 0
+#define FW_VI_STATS_CMD_IX_V(x) ((x) << FW_VI_STATS_CMD_IX_S)
struct fw_acl_mac_cmd {
__be32 op_to_vfn;
@@ -1548,9 +2136,14 @@ struct fw_acl_mac_cmd {
u8 macaddr3[6];
};
-#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8)
-#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0)
-#define FW_ACL_MAC_CMD_EN(x) ((x) << 31)
+#define FW_ACL_MAC_CMD_PFN_S 8
+#define FW_ACL_MAC_CMD_PFN_V(x) ((x) << FW_ACL_MAC_CMD_PFN_S)
+
+#define FW_ACL_MAC_CMD_VFN_S 0
+#define FW_ACL_MAC_CMD_VFN_V(x) ((x) << FW_ACL_MAC_CMD_VFN_S)
+
+#define FW_ACL_MAC_CMD_EN_S 31
+#define FW_ACL_MAC_CMD_EN_V(x) ((x) << FW_ACL_MAC_CMD_EN_S)
struct fw_acl_vlan_cmd {
__be32 op_to_vfn;
@@ -1561,11 +2154,20 @@ struct fw_acl_vlan_cmd {
__be16 vlanid[16];
};
-#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8)
-#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0)
-#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31)
-#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7)
-#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6)
+#define FW_ACL_VLAN_CMD_PFN_S 8
+#define FW_ACL_VLAN_CMD_PFN_V(x) ((x) << FW_ACL_VLAN_CMD_PFN_S)
+
+#define FW_ACL_VLAN_CMD_VFN_S 0
+#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S)
+
+#define FW_ACL_VLAN_CMD_EN_S 31
+#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+
+#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7
+#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
+
+#define FW_ACL_VLAN_CMD_FM_S 6
+#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
@@ -1587,13 +2189,14 @@ enum fw_port_cap {
};
enum fw_port_mdi {
- FW_PORT_MDI_UNCHANGED,
- FW_PORT_MDI_AUTO,
- FW_PORT_MDI_F_STRAIGHT,
- FW_PORT_MDI_F_CROSSOVER
+ FW_PORT_CAP_MDI_UNCHANGED,
+ FW_PORT_CAP_MDI_AUTO,
+ FW_PORT_CAP_MDI_F_STRAIGHT,
+ FW_PORT_CAP_MDI_F_CROSSOVER
};
-#define FW_PORT_MDI(x) ((x) << 9)
+#define FW_PORT_CAP_MDI_S 9
+#define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S)
enum fw_port_action {
FW_PORT_ACTION_L1_CFG = 0x0001,
@@ -1753,52 +2356,105 @@ struct fw_port_cmd {
} u;
};
-#define FW_PORT_CMD_READ (1U << 22)
-
-#define FW_PORT_CMD_PORTID(x) ((x) << 0)
-#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
-
-#define FW_PORT_CMD_ACTION(x) ((x) << 16)
-#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff)
-
-#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
-#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
-#define FW_PORT_CMD_OVLAN2(x) ((x) << 6)
-#define FW_PORT_CMD_OVLAN1(x) ((x) << 5)
-#define FW_PORT_CMD_OVLAN0(x) ((x) << 4)
-#define FW_PORT_CMD_IVLAN0(x) ((x) << 3)
-
-#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
-
-#define FW_PORT_CMD_LSTATUS (1U << 31)
-#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1)
-#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
-#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
-#define FW_PORT_CMD_TXPAUSE (1U << 23)
-#define FW_PORT_CMD_RXPAUSE (1U << 22)
-#define FW_PORT_CMD_MDIOCAP (1U << 21)
-#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f)
-#define FW_PORT_CMD_LPTXPAUSE (1U << 15)
-#define FW_PORT_CMD_LPRXPAUSE (1U << 14)
-#define FW_PORT_CMD_PTYPE_MASK 0x1f
-#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK)
-#define FW_PORT_CMD_MODTYPE_MASK 0x1f
-#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
-
-#define FW_PORT_CMD_DCBXDIS (1U << 7)
-#define FW_PORT_CMD_APPLY (1U << 7)
-#define FW_PORT_CMD_ALL_SYNCD (1U << 7)
-#define FW_PORT_CMD_DCB_VERSION_GET(x) (((x) >> 8) & 0xf)
-
-#define FW_PORT_CMD_PPPEN(x) ((x) << 31)
-#define FW_PORT_CMD_TPSRC(x) ((x) << 28)
-#define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
-
-#define FW_PORT_CMD_CH0(x) ((x) << 20)
-#define FW_PORT_CMD_CH1(x) ((x) << 16)
-#define FW_PORT_CMD_CH2(x) ((x) << 12)
-#define FW_PORT_CMD_CH3(x) ((x) << 8)
-#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
+#define FW_PORT_CMD_READ_S 22
+#define FW_PORT_CMD_READ_V(x) ((x) << FW_PORT_CMD_READ_S)
+#define FW_PORT_CMD_READ_F FW_PORT_CMD_READ_V(1U)
+
+#define FW_PORT_CMD_PORTID_S 0
+#define FW_PORT_CMD_PORTID_M 0xf
+#define FW_PORT_CMD_PORTID_V(x) ((x) << FW_PORT_CMD_PORTID_S)
+#define FW_PORT_CMD_PORTID_G(x) \
+ (((x) >> FW_PORT_CMD_PORTID_S) & FW_PORT_CMD_PORTID_M)
+
+#define FW_PORT_CMD_ACTION_S 16
+#define FW_PORT_CMD_ACTION_M 0xffff
+#define FW_PORT_CMD_ACTION_V(x) ((x) << FW_PORT_CMD_ACTION_S)
+#define FW_PORT_CMD_ACTION_G(x) \
+ (((x) >> FW_PORT_CMD_ACTION_S) & FW_PORT_CMD_ACTION_M)
+
+#define FW_PORT_CMD_OVLAN3_S 7
+#define FW_PORT_CMD_OVLAN3_V(x) ((x) << FW_PORT_CMD_OVLAN3_S)
+
+#define FW_PORT_CMD_OVLAN2_S 6
+#define FW_PORT_CMD_OVLAN2_V(x) ((x) << FW_PORT_CMD_OVLAN2_S)
+
+#define FW_PORT_CMD_OVLAN1_S 5
+#define FW_PORT_CMD_OVLAN1_V(x) ((x) << FW_PORT_CMD_OVLAN1_S)
+
+#define FW_PORT_CMD_OVLAN0_S 4
+#define FW_PORT_CMD_OVLAN0_V(x) ((x) << FW_PORT_CMD_OVLAN0_S)
+
+#define FW_PORT_CMD_IVLAN0_S 3
+#define FW_PORT_CMD_IVLAN0_V(x) ((x) << FW_PORT_CMD_IVLAN0_S)
+
+#define FW_PORT_CMD_TXIPG_S 3
+#define FW_PORT_CMD_TXIPG_V(x) ((x) << FW_PORT_CMD_TXIPG_S)
+
+#define FW_PORT_CMD_LSTATUS_S 31
+#define FW_PORT_CMD_LSTATUS_M 0x1
+#define FW_PORT_CMD_LSTATUS_V(x) ((x) << FW_PORT_CMD_LSTATUS_S)
+#define FW_PORT_CMD_LSTATUS_G(x) \
+ (((x) >> FW_PORT_CMD_LSTATUS_S) & FW_PORT_CMD_LSTATUS_M)
+#define FW_PORT_CMD_LSTATUS_F FW_PORT_CMD_LSTATUS_V(1U)
+
+#define FW_PORT_CMD_LSPEED_S 24
+#define FW_PORT_CMD_LSPEED_M 0x3f
+#define FW_PORT_CMD_LSPEED_V(x) ((x) << FW_PORT_CMD_LSPEED_S)
+#define FW_PORT_CMD_LSPEED_G(x) \
+ (((x) >> FW_PORT_CMD_LSPEED_S) & FW_PORT_CMD_LSPEED_M)
+
+#define FW_PORT_CMD_TXPAUSE_S 23
+#define FW_PORT_CMD_TXPAUSE_V(x) ((x) << FW_PORT_CMD_TXPAUSE_S)
+#define FW_PORT_CMD_TXPAUSE_F FW_PORT_CMD_TXPAUSE_V(1U)
+
+#define FW_PORT_CMD_RXPAUSE_S 22
+#define FW_PORT_CMD_RXPAUSE_V(x) ((x) << FW_PORT_CMD_RXPAUSE_S)
+#define FW_PORT_CMD_RXPAUSE_F FW_PORT_CMD_RXPAUSE_V(1U)
+
+#define FW_PORT_CMD_MDIOCAP_S 21
+#define FW_PORT_CMD_MDIOCAP_V(x) ((x) << FW_PORT_CMD_MDIOCAP_S)
+#define FW_PORT_CMD_MDIOCAP_F FW_PORT_CMD_MDIOCAP_V(1U)
+
+#define FW_PORT_CMD_MDIOADDR_S 16
+#define FW_PORT_CMD_MDIOADDR_M 0x1f
+#define FW_PORT_CMD_MDIOADDR_G(x) \
+ (((x) >> FW_PORT_CMD_MDIOADDR_S) & FW_PORT_CMD_MDIOADDR_M)
+
+#define FW_PORT_CMD_LPTXPAUSE_S 15
+#define FW_PORT_CMD_LPTXPAUSE_V(x) ((x) << FW_PORT_CMD_LPTXPAUSE_S)
+#define FW_PORT_CMD_LPTXPAUSE_F FW_PORT_CMD_LPTXPAUSE_V(1U)
+
+#define FW_PORT_CMD_LPRXPAUSE_S 14
+#define FW_PORT_CMD_LPRXPAUSE_V(x) ((x) << FW_PORT_CMD_LPRXPAUSE_S)
+#define FW_PORT_CMD_LPRXPAUSE_F FW_PORT_CMD_LPRXPAUSE_V(1U)
+
+#define FW_PORT_CMD_PTYPE_S 8
+#define FW_PORT_CMD_PTYPE_M 0x1f
+#define FW_PORT_CMD_PTYPE_G(x) \
+ (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M)
+
+#define FW_PORT_CMD_MODTYPE_S 0
+#define FW_PORT_CMD_MODTYPE_M 0x1f
+#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S)
+#define FW_PORT_CMD_MODTYPE_G(x) \
+ (((x) >> FW_PORT_CMD_MODTYPE_S) & FW_PORT_CMD_MODTYPE_M)
+
+#define FW_PORT_CMD_DCBXDIS_S 7
+#define FW_PORT_CMD_DCBXDIS_V(x) ((x) << FW_PORT_CMD_DCBXDIS_S)
+#define FW_PORT_CMD_DCBXDIS_F FW_PORT_CMD_DCBXDIS_V(1U)
+
+#define FW_PORT_CMD_APPLY_S 7
+#define FW_PORT_CMD_APPLY_V(x) ((x) << FW_PORT_CMD_APPLY_S)
+#define FW_PORT_CMD_APPLY_F FW_PORT_CMD_APPLY_V(1U)
+
+#define FW_PORT_CMD_ALL_SYNCD_S 7
+#define FW_PORT_CMD_ALL_SYNCD_V(x) ((x) << FW_PORT_CMD_ALL_SYNCD_S)
+#define FW_PORT_CMD_ALL_SYNCD_F FW_PORT_CMD_ALL_SYNCD_V(1U)
+
+#define FW_PORT_CMD_DCB_VERSION_S 12
+#define FW_PORT_CMD_DCB_VERSION_M 0x7
+#define FW_PORT_CMD_DCB_VERSION_G(x) \
+ (((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M)
enum fw_port_type {
FW_PORT_TYPE_FIBER_XFI,
@@ -1814,10 +2470,11 @@ enum fw_port_type {
FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_BP4_AP,
FW_PORT_TYPE_QSFP_10G,
+ FW_PORT_TYPE_QSA,
FW_PORT_TYPE_QSFP,
FW_PORT_TYPE_BP40_BA,
- FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
+ FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
enum fw_port_module_type {
@@ -1828,11 +2485,11 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
FW_PORT_MOD_TYPE_LRM,
- FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3,
- FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2,
- FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1,
+ FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_M - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_M - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_M - 1,
- FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
+ FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_M
};
enum fw_port_mod_sub_type {
@@ -1988,11 +2645,6 @@ struct fw_port_stats_cmd {
} u;
};
-#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4)
-#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0)
-#define FW_PORT_STATS_CMD_TX(x) ((x) << 7)
-#define FW_PORT_STATS_CMD_IX(x) ((x) << 0)
-
/* port loopback stats */
#define FW_NUM_LB_STATS 16
enum fw_port_lb_stats_index {
@@ -2048,22 +2700,13 @@ struct fw_port_lb_stats_cmd {
} u;
};
-#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0)
-#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4)
-#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0)
-#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0)
-
struct fw_rss_ind_tbl_cmd {
__be32 op_to_viid;
-#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0)
__be32 retval_len16;
__be16 niqid;
__be16 startidx;
__be32 r3;
__be32 iq0_to_iq2;
-#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20)
-#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10)
-#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0)
__be32 iq3_to_iq5;
__be32 iq6_to_iq8;
__be32 iq9_to_iq11;
@@ -2077,6 +2720,18 @@ struct fw_rss_ind_tbl_cmd {
__be32 r15_lo;
};
+#define FW_RSS_IND_TBL_CMD_VIID_S 0
+#define FW_RSS_IND_TBL_CMD_VIID_V(x) ((x) << FW_RSS_IND_TBL_CMD_VIID_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ0_S 20
+#define FW_RSS_IND_TBL_CMD_IQ0_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ0_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ1_S 10
+#define FW_RSS_IND_TBL_CMD_IQ1_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ1_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ2_S 0
+#define FW_RSS_IND_TBL_CMD_IQ2_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ2_S)
+
struct fw_rss_glb_config_cmd {
__be32 op_to_write;
__be32 retval_len16;
@@ -2090,27 +2745,75 @@ struct fw_rss_glb_config_cmd {
struct fw_rss_glb_config_basicvirtual {
__be32 mode_pkd;
__be32 synmapen_to_hashtoeplitz;
-#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8)
-#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7)
-#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6)
-#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5)
-#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4)
-#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3)
-#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2)
-#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1)
-#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0)
__be64 r8;
__be64 r9;
} basicvirtual;
} u;
};
-#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
-#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf)
+#define FW_RSS_GLB_CONFIG_CMD_MODE_S 28
+#define FW_RSS_GLB_CONFIG_CMD_MODE_M 0xf
+#define FW_RSS_GLB_CONFIG_CMD_MODE_V(x) ((x) << FW_RSS_GLB_CONFIG_CMD_MODE_S)
+#define FW_RSS_GLB_CONFIG_CMD_MODE_G(x) \
+ (((x) >> FW_RSS_GLB_CONFIG_CMD_MODE_S) & FW_RSS_GLB_CONFIG_CMD_MODE_M)
#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S 8
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F \
+ FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S 7
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F \
+ FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S 6
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F \
+ FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S 5
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F \
+ FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S 4
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F \
+ FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S 3
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F \
+ FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S 2
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F \
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S 1
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S)
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F \
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S 0
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S)
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F \
+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(1U)
+
struct fw_rss_vi_config_cmd {
__be32 op_to_viid;
#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
@@ -2124,19 +2827,51 @@ struct fw_rss_vi_config_cmd {
struct fw_rss_vi_config_basicvirtual {
__be32 r6;
__be32 defaultq_to_udpen;
-#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
-#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff)
-#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
-#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
-#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
-#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
-#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0)
__be64 r9;
__be64 r10;
} basicvirtual;
} u;
};
+#define FW_RSS_VI_CONFIG_CMD_VIID_S 0
+#define FW_RSS_VI_CONFIG_CMD_VIID_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_VIID_S)
+
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S 16
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M 0x3ff
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S)
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(x) \
+ (((x) >> FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) & \
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M)
+
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S 4
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F \
+ FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S 3
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F \
+ FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S 2
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F \
+ FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S 1
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F \
+ FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_S 0
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U)
+
struct fw_clip_cmd {
__be32 op_to_write;
__be32 alloc_to_len16;
@@ -2145,19 +2880,13 @@ struct fw_clip_cmd {
__be32 r4[2];
};
-#define S_FW_CLIP_CMD_ALLOC 31
-#define M_FW_CLIP_CMD_ALLOC 0x1
-#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
-#define G_FW_CLIP_CMD_ALLOC(x) \
- (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC)
-#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+#define FW_CLIP_CMD_ALLOC_S 31
+#define FW_CLIP_CMD_ALLOC_V(x) ((x) << FW_CLIP_CMD_ALLOC_S)
+#define FW_CLIP_CMD_ALLOC_F FW_CLIP_CMD_ALLOC_V(1U)
-#define S_FW_CLIP_CMD_FREE 30
-#define M_FW_CLIP_CMD_FREE 0x1
-#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
-#define G_FW_CLIP_CMD_FREE(x) \
- (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE)
-#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+#define FW_CLIP_CMD_FREE_S 30
+#define FW_CLIP_CMD_FREE_V(x) ((x) << FW_CLIP_CMD_FREE_S)
+#define FW_CLIP_CMD_FREE_F FW_CLIP_CMD_FREE_V(1U)
enum fw_error_type {
FW_ERROR_TYPE_EXCEPTION = 0x0,
@@ -2196,7 +2925,6 @@ struct fw_error_cmd {
struct fw_debug_cmd {
__be32 op_type;
-#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff)
__be32 len16_pkd;
union fw_debug {
struct fw_debug_assert {
@@ -2219,19 +2947,35 @@ struct fw_debug_cmd {
} u;
};
-#define FW_PCIE_FW_ERR (1U << 31)
-#define FW_PCIE_FW_INIT (1U << 30)
-#define FW_PCIE_FW_HALT (1U << 29)
-#define FW_PCIE_FW_MASTER_VLD (1U << 15)
-#define FW_PCIE_FW_MASTER_MASK 0x7
-#define FW_PCIE_FW_MASTER_SHIFT 12
-#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
-#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
- FW_PCIE_FW_MASTER_MASK)
-#define FW_PCIE_FW_EVAL_MASK 0x7
-#define FW_PCIE_FW_EVAL_SHIFT 24
-#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
- FW_PCIE_FW_EVAL_MASK)
+#define FW_DEBUG_CMD_TYPE_S 0
+#define FW_DEBUG_CMD_TYPE_M 0xff
+#define FW_DEBUG_CMD_TYPE_G(x) \
+ (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M)
+
+#define PCIE_FW_ERR_S 31
+#define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S)
+#define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U)
+
+#define PCIE_FW_INIT_S 30
+#define PCIE_FW_INIT_V(x) ((x) << PCIE_FW_INIT_S)
+#define PCIE_FW_INIT_F PCIE_FW_INIT_V(1U)
+
+#define PCIE_FW_HALT_S 29
+#define PCIE_FW_HALT_V(x) ((x) << PCIE_FW_HALT_S)
+#define PCIE_FW_HALT_F PCIE_FW_HALT_V(1U)
+
+#define PCIE_FW_EVAL_S 24
+#define PCIE_FW_EVAL_M 0x7
+#define PCIE_FW_EVAL_G(x) (((x) >> PCIE_FW_EVAL_S) & PCIE_FW_EVAL_M)
+
+#define PCIE_FW_MASTER_VLD_S 15
+#define PCIE_FW_MASTER_VLD_V(x) ((x) << PCIE_FW_MASTER_VLD_S)
+#define PCIE_FW_MASTER_VLD_F PCIE_FW_MASTER_VLD_V(1U)
+
+#define PCIE_FW_MASTER_S 12
+#define PCIE_FW_MASTER_M 0x7
+#define PCIE_FW_MASTER_V(x) ((x) << PCIE_FW_MASTER_S)
+#define PCIE_FW_MASTER_G(x) (((x) >> PCIE_FW_MASTER_S) & PCIE_FW_MASTER_M)
struct fw_hdr {
u8 ver;
@@ -2259,10 +3003,25 @@ enum fw_hdr_chip {
FW_HDR_CHIP_T5
};
-#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
-#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
-#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
-#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
+#define FW_HDR_FW_VER_MAJOR_S 24
+#define FW_HDR_FW_VER_MAJOR_M 0xff
+#define FW_HDR_FW_VER_MAJOR_G(x) \
+ (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M)
+
+#define FW_HDR_FW_VER_MINOR_S 16
+#define FW_HDR_FW_VER_MINOR_M 0xff
+#define FW_HDR_FW_VER_MINOR_G(x) \
+ (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M)
+
+#define FW_HDR_FW_VER_MICRO_S 8
+#define FW_HDR_FW_VER_MICRO_M 0xff
+#define FW_HDR_FW_VER_MICRO_G(x) \
+ (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M)
+
+#define FW_HDR_FW_VER_BUILD_S 0
+#define FW_HDR_FW_VER_BUILD_M 0xff
+#define FW_HDR_FW_VER_BUILD_G(x) \
+ (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M)
enum fw_hdr_intfver {
FW_HDR_INTFVER_NIC = 0x00,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 3d06e77d7121..d00a751f0588 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -138,6 +138,8 @@ struct sge_fl {
struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
__be64 *desc; /* address of HW RX descriptor ring */
dma_addr_t addr; /* PCI bus address of hardware ring */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
};
/*
@@ -178,6 +180,8 @@ struct sge_rspq {
u16 abs_id; /* SGE abs QID for the response Q */
__be64 *desc; /* address of hardware response ring */
dma_addr_t phys_addr; /* PCI bus address of ring */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
unsigned int iqe_len; /* entry size */
unsigned int size; /* capcity of response Q */
struct adapter *adapter; /* our adapter */
@@ -240,6 +244,8 @@ struct sge_txq {
struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
struct sge_qstat *stat; /* queue status entry */
dma_addr_t phys_addr; /* PCI bus address of hardware ring */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
};
/*
@@ -345,6 +351,7 @@ struct sge {
struct adapter {
/* PCI resources */
void __iomem *regs;
+ void __iomem *bar2;
struct pci_dev *pdev;
struct device *pdev_dev;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0b42bddaf284..aa74ec34a467 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1030,10 +1030,10 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
pktcnt_idx = closest_thres(&adapter->sge, cnt);
if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
- v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
- FW_PARAMS_PARAM_X(
+ v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
- FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
+ FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
if (err)
return err;
@@ -1230,14 +1230,14 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
- FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
- FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
- FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
- FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
- FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
- FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
- FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
- FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
}
/*
@@ -2095,7 +2095,6 @@ static int adap_init0(struct adapter *adapter)
unsigned int ethqsets;
int err;
u32 param, val = 0;
- unsigned int chipid;
/*
* Wait for the device to become ready before proceeding ...
@@ -2123,17 +2122,6 @@ static int adap_init0(struct adapter *adapter)
return err;
}
- adapter->params.chip = 0;
- switch (adapter->pdev->device >> 12) {
- case CHELSIO_T4:
- adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
- break;
- case CHELSIO_T5:
- chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
- adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
- break;
- }
-
/*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
@@ -2184,8 +2172,8 @@ static int adap_init0(struct adapter *adapter)
* firmware won't understand this and we'll just get
* unencapsulated messages ...
*/
- param = FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
+ param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
val = 1;
(void) t4vf_set_params(adapter, 1, &param, &val);
@@ -2594,6 +2582,27 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
goto err_free_adapter;
}
+ /* Wait for the device to become ready before proceeding ...
+ */
+ err = t4vf_prep_adapter(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "device didn't become ready:"
+ " err=%d\n", err);
+ goto err_unmap_bar0;
+ }
+
+ /* For T5 and later we want to use the new BAR-based User Doorbells,
+ * so we need to map BAR2 here ...
+ */
+ if (!is_t4(adapter->params.chip)) {
+ adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!adapter->bar2) {
+ dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
+ err = -ENOMEM;
+ goto err_unmap_bar0;
+ }
+ }
/*
* Initialize adapter level features.
*/
@@ -2786,6 +2795,10 @@ err_free_dev:
}
err_unmap_bar:
+ if (!is_t4(adapter->params.chip))
+ iounmap(adapter->bar2);
+
+err_unmap_bar0:
iounmap(adapter->regs);
err_free_adapter:
@@ -2856,6 +2869,8 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
iounmap(adapter->regs);
+ if (!is_t4(adapter->params.chip))
+ iounmap(adapter->bar2);
kfree(adapter);
}
@@ -2908,67 +2923,18 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-/*
- * PCI Device registration data structures.
- */
-#define CH_DEVICE(devid) \
- { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }
-
-static const struct pci_device_id cxgb4vf_pci_tbl[] = {
- CH_DEVICE(0xb000), /* PE10K FPGA */
- CH_DEVICE(0x4801), /* T420-cr */
- CH_DEVICE(0x4802), /* T422-cr */
- CH_DEVICE(0x4803), /* T440-cr */
- CH_DEVICE(0x4804), /* T420-bch */
- CH_DEVICE(0x4805), /* T440-bch */
- CH_DEVICE(0x4806), /* T460-ch */
- CH_DEVICE(0x4807), /* T420-so */
- CH_DEVICE(0x4808), /* T420-cx */
- CH_DEVICE(0x4809), /* T420-bt */
- CH_DEVICE(0x480a), /* T404-bt */
- CH_DEVICE(0x480d), /* T480-cr */
- CH_DEVICE(0x480e), /* T440-lp-cr */
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4881),
- CH_DEVICE(0x4882),
- CH_DEVICE(0x4883),
- CH_DEVICE(0x4884),
- CH_DEVICE(0x4885),
- CH_DEVICE(0x4886),
- CH_DEVICE(0x4887),
- CH_DEVICE(0x4888),
- CH_DEVICE(0x5801), /* T520-cr */
- CH_DEVICE(0x5802), /* T522-cr */
- CH_DEVICE(0x5803), /* T540-cr */
- CH_DEVICE(0x5804), /* T520-bch */
- CH_DEVICE(0x5805), /* T540-bch */
- CH_DEVICE(0x5806), /* T540-ch */
- CH_DEVICE(0x5807), /* T520-so */
- CH_DEVICE(0x5808), /* T520-cx */
- CH_DEVICE(0x5809), /* T520-bt */
- CH_DEVICE(0x580a), /* T504-bt */
- CH_DEVICE(0x580b), /* T520-sr */
- CH_DEVICE(0x580c), /* T504-bt */
- CH_DEVICE(0x580d), /* T580-cr */
- CH_DEVICE(0x580e), /* T540-lp-cr */
- CH_DEVICE(0x580f), /* Amsterdam */
- CH_DEVICE(0x5810), /* T580-lp-cr */
- CH_DEVICE(0x5811), /* T520-lp-cr */
- CH_DEVICE(0x5812), /* T560-cr */
- CH_DEVICE(0x5813), /* T580-cr */
- CH_DEVICE(0x5814), /* T580-so-cr */
- CH_DEVICE(0x5815), /* T502-bt */
- CH_DEVICE(0x5880),
- CH_DEVICE(0x5881),
- CH_DEVICE(0x5882),
- CH_DEVICE(0x5883),
- CH_DEVICE(0x5884),
- CH_DEVICE(0x5885),
- CH_DEVICE(0x5886),
- CH_DEVICE(0x5887),
- CH_DEVICE(0x5888),
- { 0, }
-};
+/* Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static struct pci_device_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x8
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { PCI_VDEVICE(CHELSIO, (devid)), 0 }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
+
+#include "../cxgb4/t4_pci_id_tbl.h"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index fdd078d7d82c..f7fd1317d996 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -118,7 +118,7 @@ enum {
* we can specify for immediate data in the firmware Ethernet TX
* Work Request.
*/
- MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
+ MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
/*
* Max size of a WR sent through a control TX queue.
@@ -525,19 +525,40 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
u32 val;
- /*
- * The SGE keeps track of its Producer and Consumer Indices in terms
+ /* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
* of multiples of Free List Entries per Egress Queue Units ...
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
- val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
- if (!is_t4(adapter->params.chip))
- val |= DBTYPE(1);
+ if (is_t4(adapter->params.chip))
+ val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+ else
+ val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) |
+ DBTYPE(1);
+ val |= DBPRIO(1);
+
+ /* Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
wmb();
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- DBPRIO(1) |
- QID(fl->cntxt_id) | val);
+
+ /* If we don't have access to the new User Doorbell (T5+), use
+ * the old doorbell mechanism; otherwise use the new BAR2
+ * mechanism.
+ */
+ if (unlikely(fl->bar2_addr == NULL)) {
+ t4_write_reg(adapter,
+ T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ QID(fl->cntxt_id) | val);
+ } else {
+ writel(val | QID(fl->bar2_qid),
+ fl->bar2_addr + SGE_UDB_KDOORBELL);
+
+ /* This Write memory Barrier will force the write to
+ * the User Doorbell area to be flushed.
+ */
+ wmb();
+ }
fl->pend_cred %= FL_PER_EQ_UNIT;
}
}
@@ -598,6 +619,8 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
*/
BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
+ gfp |= __GFP_NOWARN;
+
/*
* If we support large pages, prefer large buffers and fail over to
* small pages if we can't allocate large pages to satisfy the refill.
@@ -608,8 +631,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
goto alloc_small_pages;
while (n) {
- page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- s->fl_pg_order);
+ page = __dev_alloc_pages(gfp, s->fl_pg_order);
if (unlikely(!page)) {
/*
* We've failed inour attempt to allocate a "large
@@ -653,7 +675,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
alloc_small_pages:
while (n--) {
- page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL);
+ page = __dev_alloc_page(gfp);
if (unlikely(!page)) {
fl->alloc_failed++;
break;
@@ -902,7 +924,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
sgl->addr0 = cpu_to_be64(addr[1]);
}
- sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(nfrags));
if (likely(--nfrags == 0))
return;
@@ -948,14 +970,74 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
int n)
{
- /*
- * Warn if we write doorbells with the wrong priority and write
- * descriptors before telling HW.
+ /* Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
*/
- WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1));
wmb();
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- QID(tq->cntxt_id) | PIDX(n));
+
+ /* If we don't have access to the new User Doorbell (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(tq->bar2_addr == NULL)) {
+ u32 val = PIDX(n);
+
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ QID(tq->cntxt_id) | val);
+ } else {
+ u32 val = PIDX_T5(n);
+
+ /* T4 and later chips share the same PIDX field offset within
+ * the doorbell, but T5 and later shrank the field in order to
+ * gain a bit for Doorbell Priority. The field was absurdly
+ * large in the first place (14 bits) so we just use the T5
+ * and later limits and warn if a Queue ID is too large.
+ */
+ WARN_ON(val & DBPRIO(1));
+
+ /* If we're only writing a single Egress Unit and the BAR2
+ * Queue ID is 0, we can use the Write Combining Doorbell
+ * Gather Buffer; otherwise we use the simple doorbell.
+ */
+ if (n == 1 && tq->bar2_qid == 0) {
+ unsigned int index = (tq->pidx
+ ? (tq->pidx - 1)
+ : (tq->size - 1));
+ __be64 *src = (__be64 *)&tq->desc[index];
+ __be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
+ SGE_UDB_WCDOORBELL);
+ unsigned int count = EQ_UNIT / sizeof(__be64);
+
+ /* Copy the TX Descriptor in a tight loop in order to
+ * try to get it to the adapter in a single Write
+ * Combined transfer on the PCI-E Bus. If the Write
+ * Combine fails (say because of an interrupt, etc.)
+ * the hardware will simply take the last write as a
+ * simple doorbell write with a PIDX Increment of 1
+ * and will fetch the TX Descriptor from memory via
+ * DMA.
+ */
+ while (count) {
+ writeq(*src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+ } else
+ writel(val | QID(tq->bar2_qid),
+ tq->bar2_addr + SGE_UDB_KDOORBELL);
+
+ /* This Write Memory Barrier will force the write to the User
+ * Doorbell area to be flushed. This is needed to prevent
+ * writes on different CPUs for the same queue from hitting
+ * the adapter out of order. This is required when some Work
+ * Requests take the Write Combine Gather Buffer path (user
+ * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
+ * take the traditional path where we simply increment the
+ * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
+ * hardware DMA read the actual Work Request.
+ */
+ wmb();
+ }
}
/**
@@ -1145,7 +1227,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
goto out_free;
}
- wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
/*
* After we're done injecting the Work Request for this
@@ -1157,7 +1239,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up.
*/
txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/*
@@ -1187,9 +1269,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
wr->op_immdlen =
- cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
- FW_WR_IMMDLEN(sizeof(*lso) +
- sizeof(*cpl)));
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(sizeof(*lso) +
+ sizeof(*cpl)));
/*
* Fill in the LSO CPL message.
*/
@@ -1224,8 +1306,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
wr->op_immdlen =
- cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
- FW_WR_IMMDLEN(len));
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(len));
/*
* Set up TX Packet CPL pointer, control word and perform
@@ -1781,6 +1863,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
unsigned int intr_params;
struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
int work_done = process_responses(rspq, budget);
+ u32 val;
if (likely(work_done < budget)) {
napi_complete(napi);
@@ -1792,11 +1875,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
- t4_write_reg(rspq->adapter,
- T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(work_done) |
- INGRESSQID((u32)rspq->cntxt_id) |
- SEINTARM(intr_params));
+ val = CIDXINC(work_done) | SEINTARM(intr_params);
+ if (is_t4(rspq->adapter->params.chip)) {
+ t4_write_reg(rspq->adapter,
+ T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ val | INGRESSQID((u32)rspq->cntxt_id));
+ } else {
+ writel(val | INGRESSQID(rspq->bar2_qid),
+ rspq->bar2_addr + SGE_UDB_GTS);
+ wmb();
+ }
return work_done;
}
@@ -1821,6 +1909,7 @@ static unsigned int process_intrq(struct adapter *adapter)
struct sge *s = &adapter->sge;
struct sge_rspq *intrq = &s->intrq;
unsigned int work_done;
+ u32 val;
spin_lock(&adapter->sge.intrq_lock);
for (work_done = 0; ; work_done++) {
@@ -1886,10 +1975,15 @@ static unsigned int process_intrq(struct adapter *adapter)
rspq_next(intrq);
}
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(work_done) |
- INGRESSQID(intrq->cntxt_id) |
- SEINTARM(intrq->intr_params));
+ val = CIDXINC(work_done) | SEINTARM(intrq->intr_params);
+ if (is_t4(adapter->params.chip))
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ val | INGRESSQID(intrq->cntxt_id));
+ else {
+ writel(val | INGRESSQID(intrq->bar2_qid),
+ intrq->bar2_addr + SGE_UDB_GTS);
+ wmb();
+ }
spin_unlock(&adapter->sge.intrq_lock);
@@ -2035,6 +2129,35 @@ static void sge_tx_timer_cb(unsigned long data)
}
/**
+ * bar2_address - return the BAR2 address for an SGE Queue's Registers
+ * @adapter: the adapter
+ * @qid: the SGE Queue ID
+ * @qtype: the SGE Queue Type (Egress or Ingress)
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 address for the SGE Queue Registers associated with
+ * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
+ * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ * Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ unsigned int *pbar2_qid)
+{
+ u64 bar2_qoffset;
+ int ret;
+
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype,
+ &bar2_qoffset, pbar2_qid);
+ if (ret)
+ return NULL;
+
+ return adapter->bar2 + bar2_qoffset;
+}
+
+/**
* t4vf_sge_alloc_rxq - allocate an SGE RX Queue
* @adapter: the adapter
* @rspq: pointer to to the new rxq's Response Queue to be filled in
@@ -2087,26 +2210,26 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
* into OS-independent common code ...
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
- FW_IQ_CMD_IQSTART(1) |
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
+ FW_IQ_CMD_IQSTART_F |
FW_LEN16(cmd));
cmd.type_to_iqandstindex =
- cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
- FW_IQ_CMD_IQASYNCH(iqasynch) |
- FW_IQ_CMD_VIID(pi->viid) |
- FW_IQ_CMD_IQANDST(iqandst) |
- FW_IQ_CMD_IQANUS(1) |
- FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
- FW_IQ_CMD_IQANDSTINDEX(intr_dest));
+ cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH_V(iqasynch) |
+ FW_IQ_CMD_VIID_V(pi->viid) |
+ FW_IQ_CMD_IQANDST_V(iqandst) |
+ FW_IQ_CMD_IQANUS_V(1) |
+ FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
+ FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
cmd.iqdroprss_to_iqesize =
- cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
- FW_IQ_CMD_IQGTSMODE |
- FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
- FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
+ cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
+ FW_IQ_CMD_IQGTSMODE_F |
+ FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
cmd.iqsize = cpu_to_be16(rspq->size);
cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
@@ -2140,13 +2263,13 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
*/
cmd.iqns_to_fl0congen =
cpu_to_be32(
- FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
- FW_IQ_CMD_FL0PACKEN(1) |
- FW_IQ_CMD_FL0PADEN(1));
+ FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
+ FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0PADEN_F);
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
- FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
- FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
+ FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+ FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
cmd.fl0size = cpu_to_be16(flsz);
cmd.fl0addr = cpu_to_be64(fl->addr);
}
@@ -2165,6 +2288,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
rspq->gen = 1;
rspq->next_intr_params = rspq->intr_params;
rspq->cntxt_id = be16_to_cpu(rpl.iqid);
+ rspq->bar2_addr = bar2_address(adapter,
+ rspq->cntxt_id,
+ T4_BAR2_QTYPE_INGRESS,
+ &rspq->bar2_qid);
rspq->abs_id = be16_to_cpu(rpl.physiqid);
rspq->size--; /* subtract status entry */
rspq->adapter = adapter;
@@ -2183,6 +2310,15 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
fl->alloc_failed = 0;
fl->large_alloc_failed = 0;
fl->starving = 0;
+
+ /* Note, we must initialize the BAR2 Free List User Doorbell
+ * information before refilling the Free List!
+ */
+ fl->bar2_addr = bar2_address(adapter,
+ fl->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &fl->bar2_qid);
+
refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
}
@@ -2250,24 +2386,25 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
* into the common code ...
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
- FW_EQ_ETH_CMD_EQSTART |
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
+ FW_EQ_ETH_CMD_EQSTART_F |
FW_LEN16(cmd));
- cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE |
- FW_EQ_ETH_CMD_VIID(pi->viid));
+ cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ FW_EQ_ETH_CMD_VIID_V(pi->viid));
cmd.fetchszm_to_iqid =
- cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
- FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
- FW_EQ_ETH_CMD_IQID(iqid));
+ cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
+ FW_EQ_ETH_CMD_IQID_V(iqid));
cmd.dcaen_to_eqsize =
- cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
- FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
- FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
- FW_EQ_ETH_CMD_EQSIZE(nentries));
+ cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+ FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(
+ SGE_CIDXFLUSHTHRESH_32) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
/*
@@ -2293,9 +2430,13 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
txq->q.cidx = 0;
txq->q.pidx = 0;
txq->q.stat = (void *)&txq->q.desc[txq->q.size];
- txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
+ txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
+ txq->q.bar2_addr = bar2_address(adapter,
+ txq->q.cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &txq->q.bar2_qid);
txq->q.abs_id =
- FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
+ FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
txq->txq = devq;
txq->tso = 0;
txq->tx_cso = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 4b6a6d14d86d..8d3237f5e364 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -67,7 +67,7 @@ enum chip_type {
/*
* The "len16" field of a Firmware Command Structure ...
*/
-#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
/*
* Per-VF statistics.
@@ -135,9 +135,12 @@ struct dev_params {
struct sge_params {
u32 sge_control; /* padding, boundaries, lengths, etc. */
u32 sge_control2; /* T5: more of the same */
- u32 sge_host_page_size; /* RDMA page sizes */
- u32 sge_queues_per_page; /* RDMA queues/page */
- u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
+ u32 sge_host_page_size; /* PF0-7 page sizes */
+ u32 sge_egress_queues_per_page; /* PF0-7 egress queues/page */
+ u32 sge_ingress_queues_per_page;/* PF0-7 ingress queues/page */
+ u32 sge_vf_hps; /* host page size for our vf */
+ u32 sge_vf_eq_qpp; /* egress queues/page for our VF */
+ u32 sge_vf_iq_qpp; /* ingress queues/page for our VF */
u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
u32 sge_congestion_control; /* congestion thresholds, etc. */
@@ -267,6 +270,8 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
+#define CHELSIO_PCI_ID_VER(dev_id) ((dev_id) >> 12)
+
static inline int is_t4(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
@@ -278,6 +283,13 @@ int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int t4_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
int t4vf_get_dev_params(struct adapter *);
@@ -309,5 +321,6 @@ int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
int t4vf_eth_eq_free(struct adapter *, unsigned int);
int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
+int t4vf_prep_adapter(struct adapter *);
#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1e896b923234..02e8833b7797 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -204,20 +204,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
/* return value in low-order little-endian word */
v = t4_read_reg(adapter, mbox_data);
- if (FW_CMD_RETVAL_GET(v))
+ if (FW_CMD_RETVAL_G(v))
dump_mbox(adapter, "FW Error", mbox_data);
if (rpl) {
/* request bit in high-order BE word */
WARN_ON((be32_to_cpu(*(const u32 *)cmd)
- & FW_CMD_REQUEST) == 0);
+ & FW_CMD_REQUEST_F) == 0);
get_mbox_rpl(adapter, rpl, size, mbox_data);
WARN_ON((be32_to_cpu(*(u32 *)rpl)
- & FW_CMD_REQUEST) != 0);
+ & FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
MBOWNER(MBOX_OWNER_NONE));
- return -FW_CMD_RETVAL_GET(v);
+ return -FW_CMD_RETVAL_G(v);
}
}
@@ -287,17 +287,17 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
* like MAC address, etc.
*/
memset(&vi_cmd, 0, sizeof(vi_cmd));
- vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
- vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid));
+ vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
if (v)
return v;
- BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd));
- pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd));
+ BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
+ pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
/*
@@ -308,12 +308,12 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
return 0;
memset(&port_cmd, 0, sizeof(port_cmd));
- port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
- FW_PORT_CMD_PORTID(pi->port_id));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(pi->port_id));
port_cmd.action_to_len16 =
- cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(port_cmd));
v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
if (v)
@@ -349,8 +349,8 @@ int t4vf_fw_reset(struct adapter *adapter)
struct fw_reset_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
- FW_CMD_WRITE);
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
+ FW_CMD_WRITE_F);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
@@ -377,12 +377,12 @@ static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
return -EINVAL;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
param[nparams].mnem), 16);
- cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
p->mnem = htonl(*params++);
@@ -415,12 +415,12 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
return -EINVAL;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
param[nparams]), 16);
- cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
p->mnem = cpu_to_be32(*params++);
p->val = cpu_to_be32(*vals++);
@@ -430,6 +430,95 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
}
/**
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * @adapter: the adapter
+ * @qid: the Queue ID
+ * @qtype: the Ingress or Egress type for @qid
+ * @pbar2_qoffset: BAR2 Queue Offset
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 SGE Queue Registers information associated with the
+ * indicated Absolute Queue ID. These are passed back in return value
+ * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ * This may return an error which indicates that BAR2 SGE Queue
+ * registers aren't available. If an error is not returned, then the
+ * following values are returned:
+ *
+ * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ * require the "Inferred Queue ID" ability may be used. E.g. the
+ * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ * then these "Inferred Queue ID" register may not be used.
+ */
+int t4_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+ u64 bar2_page_offset, bar2_qoffset;
+ unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+ /* T4 doesn't support BAR2 SGE Queue registers.
+ */
+ if (is_t4(adapter->params.chip))
+ return -EINVAL;
+
+ /* Get our SGE Page Size parameters.
+ */
+ page_shift = adapter->params.sge.sge_vf_hps + 10;
+ page_size = 1 << page_shift;
+
+ /* Get the right Queues per Page parameters for our Queue.
+ */
+ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
+ ? adapter->params.sge.sge_vf_eq_qpp
+ : adapter->params.sge.sge_vf_iq_qpp);
+ qpp_mask = (1 << qpp_shift) - 1;
+
+ /* Calculate the basics of the BAR2 SGE Queue register area:
+ * o The BAR2 page the Queue registers will be in.
+ * o The BAR2 Queue ID.
+ * o The BAR2 Queue ID Offset into the BAR2 page.
+ */
+ bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_qid = qid & qpp_mask;
+ bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+ /* If the BAR2 Queue ID Offset is less than the Page Size, then the
+ * hardware will infer the Absolute Queue ID simply from the writes to
+ * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+ * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
+ * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+ * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+ * from the BAR2 Page and BAR2 Queue ID.
+ *
+ * One important censequence of this is that some BAR2 SGE registers
+ * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+ * there. But other registers synthesize the SGE Queue ID purely
+ * from the writes to the registers -- the Write Combined Doorbell
+ * Buffer is a good example. These BAR2 SGE Registers are only
+ * available for those BAR2 SGE Register areas where the SGE Absolute
+ * Queue ID can be inferred from simple writes.
+ */
+ bar2_qoffset = bar2_page_offset;
+ bar2_qinferred = (bar2_qid_offset < page_size);
+ if (bar2_qinferred) {
+ bar2_qoffset += bar2_qid_offset;
+ bar2_qid = 0;
+ }
+
+ *pbar2_qoffset = bar2_qoffset;
+ *pbar2_qid = bar2_qid;
+ return 0;
+}
+
+/**
* t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
* @adapter: the adapter
*
@@ -443,20 +532,20 @@ int t4vf_get_sge_params(struct adapter *adapter)
u32 params[7], vals[7];
int v;
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_CONTROL));
- params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE));
- params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0));
- params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1));
- params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1));
- params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3));
- params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL));
+ params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE));
+ params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0));
+ params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1));
+ params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5));
v = t4vf_query_params(adapter, 7, params, vals);
if (v)
return v;
@@ -479,8 +568,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
* right value.
*/
if (!is_t4(adapter->params.chip)) {
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
v = t4vf_query_params(adapter, 1, params, vals);
if (v != FW_SUCCESS) {
dev_err(adapter->pdev_dev,
@@ -491,16 +580,65 @@ int t4vf_get_sge_params(struct adapter *adapter)
sge_params->sge_control2 = vals[0];
}
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
- params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD));
+ params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL));
v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
sge_params->sge_ingress_rx_threshold = vals[0];
sge_params->sge_congestion_control = vals[1];
+ /* For T5 and later we want to use the new BAR2 Doorbells.
+ * Unfortunately, older firmware didn't allow the this register to be
+ * read.
+ */
+ if (!is_t4(adapter->params.chip)) {
+ u32 whoami;
+ unsigned int pf, s_hps, s_qpp;
+
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(
+ SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
+ params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(
+ SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_warn(adapter->pdev_dev,
+ "Unable to get VF SGE Queues/Page; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_params->sge_egress_queues_per_page = vals[0];
+ sge_params->sge_ingress_queues_per_page = vals[1];
+
+ /* We need the Queues/Page for our VF. This is based on the
+ * PF from which we're instantiated and is indexed in the
+ * register we just read. Do it once here so other code in
+ * the driver can just use it.
+ */
+ whoami = t4_read_reg(adapter,
+ T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
+ pf = SOURCEPF_GET(whoami);
+
+ s_hps = (HOSTPAGESIZEPF0_S +
+ (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
+ sge_params->sge_vf_hps =
+ ((sge_params->sge_host_page_size >> s_hps)
+ & HOSTPAGESIZEPF0_M);
+
+ s_qpp = (QUEUESPERPAGEPF0_S +
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
+ sge_params->sge_vf_eq_qpp =
+ ((sge_params->sge_egress_queues_per_page >> s_qpp)
+ & QUEUESPERPAGEPF0_MASK);
+ sge_params->sge_vf_iq_qpp =
+ ((sge_params->sge_ingress_queues_per_page >> s_qpp)
+ & QUEUESPERPAGEPF0_MASK);
+ }
+
return 0;
}
@@ -517,8 +655,8 @@ int t4vf_get_vpd_params(struct adapter *adapter)
u32 params[7], vals[7];
int v;
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
v = t4vf_query_params(adapter, 1, params, vals);
if (v)
return v;
@@ -540,10 +678,10 @@ int t4vf_get_dev_params(struct adapter *adapter)
u32 params[7], vals[7];
int v;
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
- params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
@@ -571,9 +709,9 @@ int t4vf_get_rss_glb_config(struct adapter *adapter)
* our RSS configuration.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
@@ -585,7 +723,7 @@ int t4vf_get_rss_glb_config(struct adapter *adapter)
* filtering at this point to weed out modes which don't support
* VF Drivers ...
*/
- rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET(
+ rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
be32_to_cpu(rpl.u.manual.mode_pkd));
switch (rss->mode) {
case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
@@ -593,26 +731,26 @@ int t4vf_get_rss_glb_config(struct adapter *adapter)
rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
rss->u.basicvirtual.synmapen =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
rss->u.basicvirtual.syn4tupenipv6 =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
rss->u.basicvirtual.syn2tupenipv6 =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
rss->u.basicvirtual.syn4tupenipv4 =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
rss->u.basicvirtual.syn2tupenipv4 =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
rss->u.basicvirtual.ofdmapen =
- ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
rss->u.basicvirtual.tnlmapen =
- ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
rss->u.basicvirtual.tnlalllookup =
- ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
rss->u.basicvirtual.hashtoeplitz =
- ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
/* we need at least Tunnel Map Enable to be set */
if (!rss->u.basicvirtual.tnlmapen)
@@ -647,9 +785,9 @@ int t4vf_get_vfres(struct adapter *adapter)
* with error on command failure.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
@@ -659,22 +797,22 @@ int t4vf_get_vfres(struct adapter *adapter)
* Extract VF resource limits and return success.
*/
word = be32_to_cpu(rpl.niqflint_niq);
- vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word);
- vfres->niq = FW_PFVF_CMD_NIQ_GET(word);
+ vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
+ vfres->niq = FW_PFVF_CMD_NIQ_G(word);
word = be32_to_cpu(rpl.type_to_neq);
- vfres->neq = FW_PFVF_CMD_NEQ_GET(word);
- vfres->pmask = FW_PFVF_CMD_PMASK_GET(word);
+ vfres->neq = FW_PFVF_CMD_NEQ_G(word);
+ vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
word = be32_to_cpu(rpl.tc_to_nexactf);
- vfres->tc = FW_PFVF_CMD_TC_GET(word);
- vfres->nvi = FW_PFVF_CMD_NVI_GET(word);
- vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word);
+ vfres->tc = FW_PFVF_CMD_TC_G(word);
+ vfres->nvi = FW_PFVF_CMD_NVI_G(word);
+ vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
word = be32_to_cpu(rpl.r_caps_to_nethctrl);
- vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word);
- vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word);
- vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word);
+ vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
+ vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
+ vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
return 0;
}
@@ -695,9 +833,9 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
int v;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
FW_RSS_VI_CONFIG_CMD_VIID(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
@@ -709,17 +847,17 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
config->basicvirtual.ip6fourtupen =
- ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
config->basicvirtual.ip6twotupen =
- ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
config->basicvirtual.ip4fourtupen =
- ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
config->basicvirtual.ip4twotupen =
- ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
config->basicvirtual.udpen =
- ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
config->basicvirtual.defaultq =
- FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word);
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
break;
}
@@ -745,9 +883,9 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
struct fw_rss_vi_config_cmd cmd, rpl;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
FW_RSS_VI_CONFIG_CMD_VIID(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
switch (adapter->params.rss.mode) {
@@ -755,16 +893,16 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
u32 word = 0;
if (config->basicvirtual.ip6fourtupen)
- word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
if (config->basicvirtual.ip6twotupen)
- word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
if (config->basicvirtual.ip4fourtupen)
- word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
if (config->basicvirtual.ip4twotupen)
- word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
if (config->basicvirtual.udpen)
- word |= FW_RSS_VI_CONFIG_CMD_UDPEN;
- word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ(
+ word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
+ word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
config->basicvirtual.defaultq);
cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
break;
@@ -803,10 +941,10 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
* Initialize firmware command template to write the RSS table.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_RSS_IND_TBL_CMD_VIID_V(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
/*
@@ -857,9 +995,9 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
if (rsp >= rsp_end)
rsp = rspq;
}
- *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
- FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
- FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
+ *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
+ FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
+ FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
}
/*
@@ -892,18 +1030,18 @@ int t4vf_alloc_vi(struct adapter *adapter, int port_id)
* VIID.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_CMD_EXEC);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F);
cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
- FW_VI_CMD_ALLOC);
- cmd.portid_pkd = FW_VI_CMD_PORTID(port_id);
+ FW_VI_CMD_ALLOC_F);
+ cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
return v;
- return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid));
+ return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
}
/**
@@ -922,12 +1060,12 @@ int t4vf_free_vi(struct adapter *adapter, int viid)
* Execute a VI command to free the Virtual Interface.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F);
cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
- FW_VI_CMD_FREE);
- cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid));
+ FW_VI_CMD_FREE_F);
+ cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
@@ -946,12 +1084,12 @@ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
struct fw_vi_enable_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC |
- FW_VI_ENABLE_CMD_VIID(viid));
- cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) |
- FW_VI_ENABLE_CMD_EEN(tx_en) |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+ FW_VI_ENABLE_CMD_EEN_V(tx_en) |
FW_LEN16(cmd));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
@@ -970,11 +1108,11 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
struct fw_vi_enable_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC |
- FW_VI_ENABLE_CMD_VIID(viid));
- cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
FW_LEN16(cmd));
cmd.blinkdur = cpu_to_be16(nblinks);
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
@@ -1001,28 +1139,28 @@ int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
/* convert to FW values */
if (mtu < 0)
- mtu = FW_VI_RXMODE_CMD_MTU_MASK;
+ mtu = FW_VI_RXMODE_CMD_MTU_M;
if (promisc < 0)
- promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+ promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
if (all_multi < 0)
- all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+ all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
if (bcast < 0)
- bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
if (vlanex < 0)
- vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_VI_RXMODE_CMD_VIID(viid));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_VI_RXMODE_CMD_VIID_V(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
cmd.mtu_to_vlanexen =
- cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
- FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
}
@@ -1072,19 +1210,19 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
int i;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- (free ? FW_CMD_EXEC : 0) |
- FW_VI_MAC_CMD_VIID(viid));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ (free ? FW_CMD_EXEC_F : 0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
cmd.freemacs_to_len16 =
- cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
- FW_CMD_LEN16(len16));
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+ FW_CMD_LEN16_V(len16));
for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
p->valid_to_idx = cpu_to_be16(
- FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
}
@@ -1095,7 +1233,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
break;
for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_GET(
+ u16 index = FW_VI_MAC_CMD_IDX_G(
be16_to_cpu(p->valid_to_idx));
if (idx)
@@ -1161,19 +1299,19 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_VI_MAC_CMD_VIID(viid));
- cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
- p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(idx));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (ret == 0) {
p = &rpl.u.exact[0];
- ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
+ ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_naddr)
ret = -ENOMEM;
}
@@ -1198,13 +1336,13 @@ int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
u.exact[0]), 16);
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_VI_ENABLE_CMD_VIID(viid));
- cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN |
- FW_VI_MAC_CMD_HASHUNIEN(ucast) |
- FW_CMD_LEN16(len16));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+ FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+ FW_CMD_LEN16_V(len16));
cmd.u.hash.hashvec = cpu_to_be64(vec);
return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
}
@@ -1240,14 +1378,14 @@ int t4vf_get_port_stats(struct adapter *adapter, int pidx,
int ret;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) |
- FW_VI_STATS_CMD_VIID(pi->viid) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
- cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
+ FW_VI_STATS_CMD_VIID_V(pi->viid) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
cmd.u.ctl.nstats_ix =
- cpu_to_be16(FW_VI_STATS_CMD_IX(ix) |
- FW_VI_STATS_CMD_NSTATS(nstats));
+ cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
+ FW_VI_STATS_CMD_NSTATS_V(nstats));
ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
if (ret)
return ret;
@@ -1299,13 +1437,13 @@ int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
struct fw_iq_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE |
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
FW_LEN16(cmd));
cmd.type_to_iqandstindex =
- cpu_to_be32(FW_IQ_CMD_TYPE(iqtype));
+ cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
cmd.iqid = cpu_to_be16(iqid);
cmd.fl0id = cpu_to_be16(fl0id);
@@ -1325,12 +1463,12 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
struct fw_eq_eth_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE |
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
FW_LEN16(cmd));
- cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid));
+ cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
@@ -1344,7 +1482,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
{
const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
- u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
+ u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
switch (opcode) {
case FW_PORT_CMD: {
@@ -1359,7 +1497,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
/*
* Extract various fields from port status change message.
*/
- action = FW_PORT_CMD_ACTION_GET(
+ action = FW_PORT_CMD_ACTION_G(
be32_to_cpu(port_cmd->action_to_len16));
if (action != FW_PORT_ACTION_GET_PORT_INFO) {
dev_err(adapter->pdev_dev,
@@ -1368,24 +1506,24 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
break;
}
- port_id = FW_PORT_CMD_PORTID_GET(
+ port_id = FW_PORT_CMD_PORTID_G(
be32_to_cpu(port_cmd->op_to_portid));
word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
- link_ok = (word & FW_PORT_CMD_LSTATUS) != 0;
+ link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0;
speed = 0;
fc = 0;
- if (word & FW_PORT_CMD_RXPAUSE)
+ if (word & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
- if (word & FW_PORT_CMD_TXPAUSE)
+ if (word & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
- if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
- else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
- else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
- else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
/*
@@ -1420,3 +1558,38 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
}
return 0;
}
+
+/**
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+ int err;
+ unsigned int chipid;
+
+ /* Wait for the device to become ready before proceeding ...
+ */
+ err = t4vf_wait_dev_ready(adapter);
+ if (err)
+ return err;
+
+ /* Default port and clock for debugging in case we can't reach
+ * firmware.
+ */
+ adapter->params.nports = 1;
+ adapter->params.vfres.pmask = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ adapter->params.chip = 0;
+ switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
+ case CHELSIO_T4:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ break;
+
+ case CHELSIO_T5:
+ chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 9823a0ea7937..d1c025fd9726 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -60,6 +60,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
+#include <linux/jiffies.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -238,13 +239,13 @@ writereg(struct net_device *dev, u16 regno, u16 value)
static int __init
wait_eeprom_ready(struct net_device *dev)
{
- int timeout = jiffies;
+ unsigned long timeout = jiffies;
/* check to see if the EEPROM is ready,
* a timeout is used just in case EEPROM is ready when
* SI_BUSY in the PP_SelfST is clear
*/
while (readreg(dev, PP_SelfST) & SI_BUSY)
- if (jiffies - timeout >= 40)
+ if (time_after_eq(jiffies, timeout + 40))
return -1;
return 0;
}
@@ -485,7 +486,7 @@ control_dc_dc(struct net_device *dev, int on_not_off)
{
struct net_local *lp = netdev_priv(dev);
unsigned int selfcontrol;
- int timenow = jiffies;
+ unsigned long timenow = jiffies;
/* control the DC to DC convertor in the SelfControl register.
* Note: This is hooked up to a general purpose pin, might not
* always be a DC to DC convertor.
@@ -499,7 +500,7 @@ control_dc_dc(struct net_device *dev, int on_not_off)
writereg(dev, PP_SelfCTL, selfcontrol);
/* Wait for the DC/DC converter to power up - 500ms */
- while (jiffies - timenow < HZ)
+ while (time_before(jiffies, timenow + HZ))
;
}
@@ -514,7 +515,7 @@ send_test_pkt(struct net_device *dev)
0, 0, /* DSAP=0 & SSAP=0 fields */
0xf3, 0 /* Control (Test Req + P bit set) */
};
- long timenow = jiffies;
+ unsigned long timenow = jiffies;
writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
@@ -525,10 +526,10 @@ send_test_pkt(struct net_device *dev)
iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT);
/* Test to see if the chip has allocated memory for the packet */
- while (jiffies - timenow < 5)
+ while (time_before(jiffies, timenow + 5))
if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
break;
- if (jiffies - timenow >= 5)
+ if (time_after_eq(jiffies, timenow + 5))
return 0; /* this shouldn't happen */
/* Write the contents of the packet */
@@ -536,7 +537,7 @@ send_test_pkt(struct net_device *dev)
cs89_dbg(1, debug, "Sending test packet ");
/* wait a couple of jiffies for packet to be received */
- for (timenow = jiffies; jiffies - timenow < 3;)
+ for (timenow = jiffies; time_before(jiffies, timenow + 3);)
;
if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
cs89_dbg(1, cont, "succeeded\n");
@@ -556,7 +557,7 @@ static int
detect_tp(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
- int timenow = jiffies;
+ unsigned long timenow = jiffies;
int fdx;
cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name);
@@ -574,7 +575,7 @@ detect_tp(struct net_device *dev)
/* Delay for the hardware to work out if the TP cable is present
* - 150ms
*/
- for (timenow = jiffies; jiffies - timenow < 15;)
+ for (timenow = jiffies; time_before(jiffies, timenow + 15);)
;
if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
return DETECTED_NONE;
@@ -618,7 +619,7 @@ detect_tp(struct net_device *dev)
if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
pr_info("%s: negotiating duplex...\n", dev->name);
while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
- if (jiffies - timenow > 4000) {
+ if (time_after(jiffies, timenow + 4000)) {
pr_err("**** Full / half duplex auto-negotiation timed out ****\n");
break;
}
@@ -1271,7 +1272,7 @@ static void __init reset_chip(struct net_device *dev)
{
#if !defined(CONFIG_MACH_MX31ADS)
struct net_local *lp = netdev_priv(dev);
- int reset_start_time;
+ unsigned long reset_start_time;
writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -1294,7 +1295,7 @@ static void __init reset_chip(struct net_device *dev)
/* Wait until the chip is reset */
reset_start_time = jiffies;
while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
- jiffies - reset_start_time < 2)
+ time_before(jiffies, reset_start_time + 2))
;
#endif /* !CONFIG_MACH_MX31ADS */
}
@@ -1897,7 +1898,6 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
static struct platform_driver cs89x0_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.remove = cs89x0_platform_remove,
};
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 2be2a99c5ea3..3a12c096ea1c 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -881,7 +881,6 @@ static struct platform_driver ep93xx_eth_driver = {
.remove = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index e285f384b096..07719676c305 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -216,14 +216,10 @@ struct net_device * __init mac89x0_probe(int unit)
ioaddr = (unsigned long)
nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE);
{
- unsigned long flags;
int card_present;
- local_irq_save(flags);
- card_present = (hwreg_present((void*) ioaddr+4) &&
- hwreg_present((void*) ioaddr + DATA_PORT));
- local_irq_restore(flags);
-
+ card_present = (hwreg_present((void *)ioaddr + 4) &&
+ hwreg_present((void *)ioaddr + DATA_PORT));
if (!card_present)
goto out;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 5ba5ad071bb6..25c4d88853d8 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -187,6 +187,7 @@ struct enic {
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
u32 rx_copybreak;
+ u8 rss_key[ENIC_RSS_LEN];
};
static inline struct device *enic_get_dev(struct enic *enic)
@@ -246,5 +247,6 @@ int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
+int __enic_set_rsskey(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 85173d620758..eba1eb846d34 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -23,6 +23,7 @@
#include "enic.h"
#include "enic_dev.h"
#include "enic_clsf.h"
+#include "vnic_rss.h"
struct enic_stat {
char name[ETH_GSTRING_LEN];
@@ -416,6 +417,40 @@ static int enic_set_tunable(struct net_device *dev,
return ret;
}
+static u32 enic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return ENIC_RSS_LEN;
+}
+
+static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (hkey)
+ memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
+ indir)
+ return -EINVAL;
+
+ if (hkey)
+ memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
+
+ return __enic_set_rsskey(enic);
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
@@ -430,6 +465,9 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxnfc = enic_get_rxnfc,
.get_tunable = enic_get_tunable,
.set_tunable = enic_set_tunable,
+ .get_rxfh_key_size = enic_get_rxfh_key_size,
+ .get_rxfh = enic_get_rxfh,
+ .set_rxfh = enic_set_rxfh,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 73cf1653a4a3..868d0f605d60 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -283,12 +283,10 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
return IRQ_HANDLED;
}
- if (ENIC_TEST_INTR(pba, io_intr)) {
- if (napi_schedule_prep(&enic->napi[0]))
- __napi_schedule(&enic->napi[0]);
- } else {
+ if (ENIC_TEST_INTR(pba, io_intr))
+ napi_schedule_irqoff(&enic->napi[0]);
+ else
vnic_intr_unmask(&enic->intr[io_intr]);
- }
return IRQ_HANDLED;
}
@@ -313,7 +311,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
* writes).
*/
- napi_schedule(&enic->napi[0]);
+ napi_schedule_irqoff(&enic->napi[0]);
return IRQ_HANDLED;
}
@@ -322,7 +320,7 @@ static irqreturn_t enic_isr_msix(int irq, void *data)
{
struct napi_struct *napi = data;
- napi_schedule(napi);
+ napi_schedule_irqoff(napi);
return IRQ_HANDLED;
}
@@ -531,8 +529,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_wq *wq;
- unsigned long flags;
unsigned int txq_map;
+ struct netdev_queue *txq;
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
@@ -541,6 +539,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
wq = &enic->wq[txq_map];
+ txq = netdev_get_tx_queue(netdev, txq_map);
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely. In the off chance it's going to take
@@ -554,23 +553,25 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
+ spin_lock(&enic->wq_lock[txq_map]);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
- netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
+ netif_tx_stop_queue(txq);
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
+ spin_unlock(&enic->wq_lock[txq_map]);
return NETDEV_TX_BUSY;
}
enic_queue_wq_skb(enic, wq, skb);
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
- netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
+ netif_tx_stop_queue(txq);
+ if (!skb->xmit_more || netif_xmit_stopped(txq))
+ vnic_wq_doorbell(wq);
- spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
+ spin_unlock(&enic->wq_lock[txq_map]);
return NETDEV_TX_OK;
}
@@ -1312,9 +1313,10 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
if (!wq_work_done) {
napi_complete(napi);
vnic_intr_unmask(&enic->intr[intr]);
+ return 0;
}
- return 0;
+ return budget;
}
static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
@@ -1886,25 +1888,23 @@ static int enic_dev_hang_reset(struct enic *enic)
return err;
}
-static int enic_set_rsskey(struct enic *enic)
+int __enic_set_rsskey(struct enic *enic)
{
+ union vnic_rss_key *rss_key_buf_va;
dma_addr_t rss_key_buf_pa;
- union vnic_rss_key *rss_key_buf_va = NULL;
- union vnic_rss_key rss_key = {
- .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
- .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
- .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
- .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
- };
- int err;
+ int i, kidx, bidx, err;
- rss_key_buf_va = pci_alloc_consistent(enic->pdev,
- sizeof(union vnic_rss_key), &rss_key_buf_pa);
+ rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
+ sizeof(union vnic_rss_key),
+ &rss_key_buf_pa);
if (!rss_key_buf_va)
return -ENOMEM;
- memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
-
+ for (i = 0; i < ENIC_RSS_LEN; i++) {
+ kidx = i / ENIC_RSS_BYTES_PER_KEY;
+ bidx = i % ENIC_RSS_BYTES_PER_KEY;
+ rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
+ }
spin_lock_bh(&enic->devcmd_lock);
err = enic_set_rss_key(enic,
rss_key_buf_pa,
@@ -1917,6 +1917,13 @@ static int enic_set_rsskey(struct enic *enic)
return err;
}
+static int enic_set_rsskey(struct enic *enic)
+{
+ netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
+
+ return __enic_set_rsskey(enic);
+}
+
static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
{
dma_addr_t rss_cpu_buf_pa;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h
index fa421baf45b8..881fa18542b3 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rss.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h
@@ -20,11 +20,16 @@
#define _VNIC_RSS_H_
/* RSS key array */
+
+#define ENIC_RSS_BYTES_PER_KEY 10
+#define ENIC_RSS_KEYS 4
+#define ENIC_RSS_LEN (ENIC_RSS_BYTES_PER_KEY * ENIC_RSS_KEYS)
+
union vnic_rss_key {
struct {
- u8 b[10];
+ u8 b[ENIC_RSS_BYTES_PER_KEY];
u8 b_pad[6];
- } key[4];
+ } key[ENIC_RSS_KEYS];
u64 raw[8];
};
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 2c6c70804a39..816f1ad6072f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
return wq->to_use->desc;
}
+static inline void vnic_wq_doorbell(struct vnic_wq *wq)
+{
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(wq->to_use->index, &wq->ctrl->posted_index);
+}
+
static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
unsigned int len, int sop, int eop,
@@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
buf->wr_id = wrid;
buf = buf->next;
- if (eop) {
- /* Adding write memory barrier prevents compiler and/or CPU
- * reordering, thus avoiding descriptor posting before
- * descriptor is initialized. Otherwise, hardware can read
- * stale descriptor fields.
- */
- wmb();
- iowrite32(buf->index, &wq->ctrl->posted_index);
- }
wq->to_use = buf;
wq->ring.desc_avail -= desc_skip_cnt;
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 316c5e5a92ad..7ec2d74f94d3 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -4,7 +4,7 @@
config DM9000
tristate "DM9000 support"
- depends on ARM || BLACKFIN || MIPS || COLDFIRE
+ depends on ARM || BLACKFIN || MIPS || COLDFIRE || NIOS2
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index f3ba840cbf7b..ef0bb58750e6 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1749,7 +1749,6 @@ MODULE_DEVICE_TABLE(of, dm9000_of_matches);
static struct platform_driver dm9000_driver = {
.driver = {
.name = "dm9000",
- .owner = THIS_MODULE,
.pm = &dm9000_drv_pm_ops,
.of_match_table = of_match_ptr(dm9000_of_matches),
},
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index cf8b6ff21613..badff181e719 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -995,7 +995,6 @@ static void de4x5_dbg_mii(struct net_device *dev, int k);
static void de4x5_dbg_media(struct net_device *dev);
static void de4x5_dbg_srom(struct de4x5_srom *p);
static void de4x5_dbg_rx(struct sk_buff *skb, int len);
-static int de4x5_strncmp(char *a, char *b, int n);
static int dc21041_infoleaf(struct net_device *dev);
static int dc21140_infoleaf(struct net_device *dev);
static int dc21142_infoleaf(struct net_device *dev);
@@ -4102,8 +4101,7 @@ get_hw_addr(struct net_device *dev)
}
/*
-** Test for enet addresses in the first 32 bytes. The built-in strncmp
-** didn't seem to work here...?
+** Test for enet addresses in the first 32 bytes.
*/
static int
de4x5_bad_srom(struct de4x5_private *lp)
@@ -4111,8 +4109,8 @@ de4x5_bad_srom(struct de4x5_private *lp)
int i, status = 0;
for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
- if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
- !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
+ if (!memcmp(&lp->srom, &enet_det[i], 3) &&
+ !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
if (i == 0) {
status = SMC;
} else if (i == 1) {
@@ -4125,18 +4123,6 @@ de4x5_bad_srom(struct de4x5_private *lp)
return status;
}
-static int
-de4x5_strncmp(char *a, char *b, int n)
-{
- int ret=0;
-
- for (;n && !ret; n--) {
- ret = *a++ - *b++;
- }
-
- return ret;
-}
-
static void
srom_repair(struct net_device *dev, int card)
{
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c8205606c775..50a00777228e 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -2265,7 +2265,7 @@ static int __init dmfe_init_module(void)
static void __exit dmfe_cleanup_module(void)
{
- DMFE_DBUG(0, "dmfe_clean_module() ", debug);
+ DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
pci_unregister_driver(&dmfe_driver);
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 4061f9b22812..1c5916b13778 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1837,7 +1837,7 @@ static int __init uli526x_init_module(void)
static void __exit uli526x_cleanup_module(void)
{
- ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
+ ULI526X_DBUG(0, "uli526x_cleanup_module() ", debug);
pci_unregister_driver(&uli526x_driver);
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 9a2d75235e89..712e7f8e1df7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -522,6 +522,7 @@ struct be_adapter {
u8 hba_port_num;
u16 pvid;
__be16 vxlan_port;
+ int vxlan_port_count;
struct phy_info phy;
u8 wol_cap;
bool wol_en;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index e42a791c1835..73a500ccbf69 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1171,7 +1171,8 @@ static u32 be_get_rxfh_key_size(struct net_device *netdev)
return RSS_HASH_KEY_LEN;
}
-static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i;
@@ -1185,16 +1186,23 @@ static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
if (hkey)
memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
return 0;
}
static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *hkey)
+ const u8 *hkey, const u8 hfunc)
{
int rc = 0, i, j;
struct be_adapter *adapter = netdev_priv(netdev);
u8 rsstable[RSS_INDIR_TABLE_LEN];
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
if (indir) {
struct be_rx_obj *rxo;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 597c463e384d..196073110e32 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -887,7 +887,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
}
if (vlan_tag) {
- skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ vlan_tag);
if (unlikely(!skb))
return skb;
skb->vlan_tci = 0;
@@ -896,7 +897,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
/* Insert the outer VLAN, if any */
if (adapter->qnq_vid) {
vlan_tag = adapter->qnq_vid;
- skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ vlan_tag);
if (unlikely(!skb))
return skb;
if (skip_hw_vlan)
@@ -1015,9 +1017,8 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
* to pad short packets (<= 32 bytes) to a 36-byte length.
*/
if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
- if (skb_padto(skb, 36))
+ if (skb_put_padto(skb, 36))
return NULL;
- skb->len = 36;
}
if (BEx_chip(adapter) || lancer_chip(adapter)) {
@@ -2853,10 +2854,10 @@ static int be_close(struct net_device *netdev)
static int be_rx_qs_create(struct be_adapter *adapter)
{
+ struct rss_info *rss = &adapter->rss_info;
+ u8 rss_key[RSS_HASH_KEY_LEN];
struct be_rx_obj *rxo;
int rc, i, j;
- u8 rss_hkey[RSS_HASH_KEY_LEN];
- struct rss_info *rss = &adapter->rss_info;
for_all_rx_queues(adapter, rxo, i) {
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2901,15 +2902,15 @@ static int be_rx_qs_create(struct be_adapter *adapter)
rss->rss_flags = RSS_ENABLE_NONE;
}
- get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+ netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
- 128, rss_hkey);
+ 128, rss_key);
if (rc) {
rss->rss_flags = RSS_ENABLE_NONE;
return rc;
}
- memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+ memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
/* First time posting */
for_all_rx_queues(adapter, rxo, i)
@@ -3123,6 +3124,8 @@ static void be_mac_clear(struct be_adapter *adapter)
#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
+
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
be_cmd_manage_iface(adapter, adapter->if_handle,
OP_CONVERT_TUNNEL_TO_NORMAL);
@@ -3132,6 +3135,10 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
adapter->vxlan_port = 0;
+
+ netdev->hw_enc_features = 0;
+ netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
+ netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
#endif
@@ -4365,10 +4372,24 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
hsw_mode == PORT_FWD_TYPE_VEPA ?
- BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
+ 0, 0);
}
#ifdef CONFIG_BE2NET_VXLAN
+/* VxLAN offload Notes:
+ *
+ * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
+ * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
+ * is expected to work across all types of IP tunnels once exported. Skyhawk
+ * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
+ * offloads in hw_enc_features only when a VxLAN port is added. Note this only
+ * ensures that other tunnels work fine while VxLAN offloads are not enabled.
+ *
+ * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
+ * adds more than one port, disable offloads and don't re-enable them again
+ * until after all the tunnels are removed.
+ */
static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
__be16 port)
{
@@ -4380,13 +4401,16 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
- dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
- be16_to_cpu(port));
dev_info(dev,
"Only one UDP port supported for VxLAN offloads\n");
- return;
+ dev_info(dev, "Disabling VxLAN offloads\n");
+ adapter->vxlan_port_count++;
+ goto err;
}
+ if (adapter->vxlan_port_count++ >= 1)
+ return;
+
status = be_cmd_manage_iface(adapter, adapter->if_handle,
OP_CONVERT_NORMAL_TO_TUNNEL);
if (status) {
@@ -4402,6 +4426,12 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
adapter->vxlan_port = port;
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
return;
@@ -4418,13 +4448,15 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
return;
if (adapter->vxlan_port != port)
- return;
+ goto done;
be_disable_vxlan_offloads(adapter);
dev_info(&adapter->pdev->dev,
"Disabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
+done:
+ adapter->vxlan_port_count--;
}
static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
@@ -4468,12 +4500,6 @@ static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (skyhawk_chip(adapter)) {
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- }
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0bc6c102f3ac..f88cfaa359e7 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1312,7 +1312,6 @@ static struct platform_driver ethoc_driver = {
.resume = ethoc_resume,
.driver = {
.name = "ethoc",
- .owner = THIS_MODULE,
.of_match_table = ethoc_match,
},
};
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c77fa4a69844..6d0c5d5eea6d 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1335,7 +1335,6 @@ static struct platform_driver ftgmac100_driver = {
.remove = __exit_p(ftgmac100_remove),
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 4ff1adc6bfca..dce5f7b7f772 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1177,7 +1177,6 @@ static struct platform_driver ftmac100_driver = {
.remove = __exit_p(ftmac100_remove),
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 9af296a1ca99..469691ad4a1e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -38,9 +38,9 @@
#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
#define FEC_OPD 0x0ec /* Opcode + Pause duration */
-#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */
-#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */
-#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */
+#define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */
+#define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */
+#define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */
#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */
#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */
#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */
@@ -53,16 +53,18 @@
#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */
#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */
+#define FEC_R_BUFF_SIZE_1 0x168 /* Maximum receive buff ring1 size */
#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */
#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */
+#define FEC_R_BUFF_SIZE_2 0x174 /* Maximum receive buff ring2 size */
#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */
-#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_0 0x188 /* Maximum receive buff size */
#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
-#define FEC_RACC 0x1C4 /* Receive Accelerator function */
+#define FEC_RACC 0x1c4 /* Receive Accelerator function */
#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
@@ -82,57 +84,57 @@
#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
-#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */
+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
-#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */
+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
#define RMON_T_COL 0x224 /* RMON TX collision count */
#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
-#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */
+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
-#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */
+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
#define RMON_T_OCTETS 0x244 /* RMON TX octets */
#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
-#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */
+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
-#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */
+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
-#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */
+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
-#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */
+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
-#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */
-#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
-#define RMON_R_RESVD_O 0x2A4 /* Reserved */
-#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */
-#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */
-#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */
-#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */
-#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */
-#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */
-#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */
-#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */
-#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */
-#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */
-#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */
-#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */
-#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */
-#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */
-#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */
+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
+#define RMON_R_RESVD_O 0x2a4 /* Reserved */
+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
#else
@@ -165,21 +167,23 @@
#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */
#define FEC_X_DES_START_1 FEC_X_DES_START_0
#define FEC_X_DES_START_2 FEC_X_DES_START_0
-#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_0 0x3d8 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_1 FEC_R_BUFF_SIZE_0
+#define FEC_R_BUFF_SIZE_2 FEC_R_BUFF_SIZE_0
#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
/* Not existed in real chip
* Just for pass build.
*/
-#define FEC_RCMR_1 0xFFF
-#define FEC_RCMR_2 0xFFF
-#define FEC_DMA_CFG_1 0xFFF
-#define FEC_DMA_CFG_2 0xFFF
-#define FEC_TXIC0 0xFFF
-#define FEC_TXIC1 0xFFF
-#define FEC_TXIC2 0xFFF
-#define FEC_RXIC0 0xFFF
-#define FEC_RXIC1 0xFFF
-#define FEC_RXIC2 0xFFF
+#define FEC_RCMR_1 0xfff
+#define FEC_RCMR_2 0xfff
+#define FEC_DMA_CFG_1 0xfff
+#define FEC_DMA_CFG_2 0xfff
+#define FEC_TXIC0 0xfff
+#define FEC_TXIC1 0xfff
+#define FEC_TXIC2 0xfff
+#define FEC_RXIC0 0xfff
+#define FEC_RXIC1 0xfff
+#define FEC_RXIC2 0xfff
#endif /* CONFIG_M5272 */
@@ -213,60 +217,60 @@ struct bufdesc_ex {
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
*/
-#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
-#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
-#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
-#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
-#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
-#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
-#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
-#define BD_SC_BR ((ushort)0x0020) /* Break received */
-#define BD_SC_FR ((ushort)0x0010) /* Framing error */
-#define BD_SC_PR ((ushort)0x0008) /* Parity error */
-#define BD_SC_OV ((ushort)0x0002) /* Overrun */
-#define BD_SC_CD ((ushort)0x0001) /* ?? */
+#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
+#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
+#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
+#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
+#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
+#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
+#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
+#define BD_SC_BR ((ushort)0x0020) /* Break received */
+#define BD_SC_FR ((ushort)0x0010) /* Framing error */
+#define BD_SC_PR ((ushort)0x0008) /* Parity error */
+#define BD_SC_OV ((ushort)0x0002) /* Overrun */
+#define BD_SC_CD ((ushort)0x0001) /* ?? */
/* Buffer descriptor control/status used by Ethernet receive.
-*/
-#define BD_ENET_RX_EMPTY ((ushort)0x8000)
-#define BD_ENET_RX_WRAP ((ushort)0x2000)
-#define BD_ENET_RX_INTR ((ushort)0x1000)
-#define BD_ENET_RX_LAST ((ushort)0x0800)
-#define BD_ENET_RX_FIRST ((ushort)0x0400)
-#define BD_ENET_RX_MISS ((ushort)0x0100)
-#define BD_ENET_RX_LG ((ushort)0x0020)
-#define BD_ENET_RX_NO ((ushort)0x0010)
-#define BD_ENET_RX_SH ((ushort)0x0008)
-#define BD_ENET_RX_CR ((ushort)0x0004)
-#define BD_ENET_RX_OV ((ushort)0x0002)
-#define BD_ENET_RX_CL ((ushort)0x0001)
-#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
+ */
+#define BD_ENET_RX_EMPTY ((ushort)0x8000)
+#define BD_ENET_RX_WRAP ((ushort)0x2000)
+#define BD_ENET_RX_INTR ((ushort)0x1000)
+#define BD_ENET_RX_LAST ((ushort)0x0800)
+#define BD_ENET_RX_FIRST ((ushort)0x0400)
+#define BD_ENET_RX_MISS ((ushort)0x0100)
+#define BD_ENET_RX_LG ((ushort)0x0020)
+#define BD_ENET_RX_NO ((ushort)0x0010)
+#define BD_ENET_RX_SH ((ushort)0x0008)
+#define BD_ENET_RX_CR ((ushort)0x0004)
+#define BD_ENET_RX_OV ((ushort)0x0002)
+#define BD_ENET_RX_CL ((ushort)0x0001)
+#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
/* Enhanced buffer descriptor control/status used by Ethernet receive */
-#define BD_ENET_RX_VLAN 0x00000004
+#define BD_ENET_RX_VLAN 0x00000004
/* Buffer descriptor control/status used by Ethernet transmit.
-*/
-#define BD_ENET_TX_READY ((ushort)0x8000)
-#define BD_ENET_TX_PAD ((ushort)0x4000)
-#define BD_ENET_TX_WRAP ((ushort)0x2000)
-#define BD_ENET_TX_INTR ((ushort)0x1000)
-#define BD_ENET_TX_LAST ((ushort)0x0800)
-#define BD_ENET_TX_TC ((ushort)0x0400)
-#define BD_ENET_TX_DEF ((ushort)0x0200)
-#define BD_ENET_TX_HB ((ushort)0x0100)
-#define BD_ENET_TX_LC ((ushort)0x0080)
-#define BD_ENET_TX_RL ((ushort)0x0040)
-#define BD_ENET_TX_RCMASK ((ushort)0x003c)
-#define BD_ENET_TX_UN ((ushort)0x0002)
-#define BD_ENET_TX_CSL ((ushort)0x0001)
-#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
-
-/*enhanced buffer descriptor control/status used by Ethernet transmit*/
-#define BD_ENET_TX_INT 0x40000000
-#define BD_ENET_TX_TS 0x20000000
-#define BD_ENET_TX_PINS 0x10000000
-#define BD_ENET_TX_IINS 0x08000000
+ */
+#define BD_ENET_TX_READY ((ushort)0x8000)
+#define BD_ENET_TX_PAD ((ushort)0x4000)
+#define BD_ENET_TX_WRAP ((ushort)0x2000)
+#define BD_ENET_TX_INTR ((ushort)0x1000)
+#define BD_ENET_TX_LAST ((ushort)0x0800)
+#define BD_ENET_TX_TC ((ushort)0x0400)
+#define BD_ENET_TX_DEF ((ushort)0x0200)
+#define BD_ENET_TX_HB ((ushort)0x0100)
+#define BD_ENET_TX_LC ((ushort)0x0080)
+#define BD_ENET_TX_RL ((ushort)0x0040)
+#define BD_ENET_TX_RCMASK ((ushort)0x003c)
+#define BD_ENET_TX_UN ((ushort)0x0002)
+#define BD_ENET_TX_CSL ((ushort)0x0001)
+#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
+
+/* enhanced buffer descriptor control/status used by Ethernet transmit */
+#define BD_ENET_TX_INT 0x40000000
+#define BD_ENET_TX_TS 0x20000000
+#define BD_ENET_TX_PINS 0x10000000
+#define BD_ENET_TX_IINS 0x08000000
/* This device has up to three irqs on some platforms */
@@ -279,36 +283,40 @@ struct bufdesc_ex {
#define FEC_ENET_MAX_TX_QS 3
#define FEC_ENET_MAX_RX_QS 3
-#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \
- ((X == 2) ? \
+#define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \
+ (((X) == 2) ? \
FEC_R_DES_START_2 : FEC_R_DES_START_0))
-#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \
- ((X == 2) ? \
+#define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \
+ (((X) == 2) ? \
FEC_X_DES_START_2 : FEC_X_DES_START_0))
-#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \
- ((X == 2) ? \
+#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
+ (((X) == 2) ? \
+ FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
+#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
+ (((X) == 2) ? \
FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
-#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \
- ((X == 2) ? \
+#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
+ (((X) == 2) ? \
FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
-#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
+#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
#define DMA_CLASS_EN (1 << 16)
-#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
-#define IDLE_SLOPE_MASK 0xFFFF
+#define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
+#define IDLE_SLOPE_MASK 0xffff
#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
-#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
+#define IDLE_SLOPE(X) (((X) == 1) ? \
+ (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
(IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
-#define RCMR_MATCHEN (0x1 << 16)
-#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2))
+#define RCMR_MATCHEN (0x1 << 16)
+#define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2))
#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
-#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
-#define FEC_TX_BD_FTYPE(X) ((X & 0xF) << 20)
+#define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
+#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
@@ -326,8 +334,8 @@ struct bufdesc_ex {
#define TX_RING_SIZE 512 /* Must be power of two */
#define TX_RING_MOD_MASK 511 /* for this to work */
-#define BD_ENET_RX_INT 0x00800000
-#define BD_ENET_RX_PTP ((ushort)0x0400)
+#define BD_ENET_RX_INT 0x00800000
+#define BD_ENET_RX_PTP ((ushort)0x0400)
#define BD_ENET_RX_ICE 0x00000020
#define BD_ENET_RX_PCR 0x00000010
#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
@@ -359,13 +367,13 @@ struct bufdesc_ex {
/* ENET interrupt coalescing macro define */
#define FEC_ITR_CLK_SEL (0x1 << 30)
#define FEC_ITR_EN (0x1 << 31)
-#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20)
-#define FEC_ITR_ICTT(X) ((X) & 0xFFFF)
+#define FEC_ITR_ICFT(X) (((X) & 0xff) << 20)
+#define FEC_ITR_ICTT(X) ((X) & 0xffff)
#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */
#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */
-#define FEC_VLAN_TAG_LEN 0x04
-#define FEC_ETHTYPE_LEN 0x02
+#define FEC_VLAN_TAG_LEN 0x04
+#define FEC_ETHTYPE_LEN 0x02
/* Controller is ENET-MAC */
#define FEC_QUIRK_ENET_MAC (1 << 0)
@@ -390,7 +398,7 @@ struct bufdesc_ex {
* frames not being transmitted until there is a 0-to-1 transition on
* ENET_TDAR[TDAR].
*/
-#define FEC_QUIRK_ERR006358 (1 << 7)
+#define FEC_QUIRK_ERR006358 (1 << 7)
/* ENET IP hw AVB
*
* i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
@@ -501,8 +509,9 @@ struct fec_enet_private {
int speed;
struct completion mdio_done;
int irq[FEC_IRQ_NUM];
- int bufdesc_ex;
+ bool bufdesc_ex;
int pause_flag;
+ u32 quirks;
struct napi_struct napi;
int csum_flags;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3dca494797bd..5ebdf8dc8a31 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -287,15 +287,13 @@ static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
return entries > 0 ? entries : entries + txq->tx_ring_size;
}
-static void *swap_buffer(void *bufaddr, int len)
+static void swap_buffer(void *bufaddr, int len)
{
int i;
unsigned int *buf = bufaddr;
- for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
- *buf = cpu_to_be32(*buf);
-
- return bufaddr;
+ for (i = 0; i < len; i += 4, buf++)
+ swab32s(buf);
}
static void swap_buffer2(void *dst_buf, void *src_buf, int len)
@@ -361,8 +359,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct bufdesc *bdp = txq->cur_tx;
struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -398,7 +394,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
}
if (fep->bufdesc_ex) {
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -410,11 +406,11 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & fep->tx_align ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], bufaddr, frag_len);
bufaddr = txq->tx_bounce[index];
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, frag_len);
}
@@ -450,8 +446,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
int nr_frags = skb_shinfo(skb)->nr_frags;
struct bufdesc *bdp, *last_bdp;
void *bufaddr;
@@ -490,11 +484,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
queue = skb_get_queue_mapping(skb);
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & fep->tx_align ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, buflen);
bufaddr = txq->tx_bounce[index];
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, buflen);
}
@@ -529,7 +523,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
fep->hwts_tx_en))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -573,8 +567,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
int size, bool last_tcp, bool is_last)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb);
unsigned short status;
@@ -587,11 +579,11 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
if (((unsigned long) data) & fep->tx_align ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], data, size);
data = txq->tx_bounce[index];
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, size);
}
@@ -607,7 +599,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
bdp->cbd_bufaddr = addr;
if (fep->bufdesc_ex) {
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -635,8 +627,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb);
@@ -652,11 +642,11 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
if (((unsigned long)bufaddr) & fep->tx_align ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, hdr_len);
bufaddr = txq->tx_bounce[index];
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, hdr_len);
dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
@@ -673,7 +663,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
bdp->cbd_datlen = hdr_len;
if (fep->bufdesc_ex) {
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -698,8 +688,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct tso_t tso;
unsigned int index = 0;
int ret;
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
dev_kfree_skb_any(skb);
@@ -761,7 +749,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
txq->cur_tx = bdp;
/* Trigger transmission start */
- if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) ||
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
@@ -879,6 +867,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -924,8 +913,6 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
u32 val;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
@@ -935,7 +922,7 @@ fec_restart(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -946,17 +933,14 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
}
/* Clear any outstanding interrupt. */
- writel(0xffc00000, fep->hwp + FEC_IEVENT);
-
- /* Set maximum receive buffer size. */
- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+ writel(0xffffffff, fep->hwp + FEC_IEVENT);
fec_enet_bd_init(ndev);
@@ -992,7 +976,7 @@ fec_restart(struct net_device *ndev)
* The phy interface and speed need to get configured
* differently on enet-mac.
*/
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* Enable flow control and length check */
rcntl |= 0x40000000 | 0x00000020;
@@ -1015,7 +999,7 @@ fec_restart(struct net_device *ndev)
}
} else {
#ifdef FEC_MIIGSK_ENR
- if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
+ if (fep->quirks & FEC_QUIRK_USE_GASKET) {
u32 cfgr;
/* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR);
@@ -1068,7 +1052,7 @@ fec_restart(struct net_device *ndev)
writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
#endif
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= (1 << 8);
/* enable ENET store and forward mode */
@@ -1091,7 +1075,10 @@ fec_restart(struct net_device *ndev)
fec_ptp_start_cyclecounter(ndev);
/* Enable interrupts we wish to service */
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ if (fep->link)
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ else
+ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
/* Init the interrupt coalescing */
fec_enet_itr_coal_init(ndev);
@@ -1102,8 +1089,6 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
/* We cannot expect a graceful transmit stop without link !!! */
@@ -1118,7 +1103,7 @@ fec_stop(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -1128,7 +1113,7 @@ fec_stop(struct net_device *ndev)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
writel(2, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
@@ -1350,8 +1335,6 @@ static int
fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
@@ -1365,7 +1348,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
u16 vlan_tag;
int index = 0;
bool is_copybreak;
- bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
+ bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1575,20 +1558,21 @@ fec_enet_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct fec_enet_private *fep = netdev_priv(ndev);
- const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
uint int_events;
irqreturn_t ret = IRQ_NONE;
int_events = readl(fep->hwp + FEC_IEVENT);
- writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
+ writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
- if (int_events & napi_mask) {
+ if (fep->work_tx || fep->work_rx) {
ret = IRQ_HANDLED;
- /* Disable the NAPI interrupts */
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
- napi_schedule(&fep->napi);
+ if (napi_schedule_prep(&fep->napi)) {
+ /* Disable the NAPI interrupts */
+ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+ __napi_schedule(&fep->napi);
+ }
}
if (int_events & FEC_ENET_MII) {
@@ -1608,12 +1592,6 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
struct fec_enet_private *fep = netdev_priv(ndev);
int pkts;
- /*
- * Clear any pending transmit or receive interrupts before
- * processing the rings to avoid racing with the hardware.
- */
- writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
-
pkts = fec_enet_rx(ndev, budget);
fec_enet_tx(ndev);
@@ -1880,8 +1858,6 @@ failed_clk_ipg:
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct phy_device *phy_dev = NULL;
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
@@ -1894,6 +1870,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_dev = of_phy_connect(ndev, fep->phy_node,
&fec_enet_adjust_link, 0,
fep->phy_interface);
+ if (!phy_dev)
+ return -ENODEV;
} else {
/* check for attached phy */
for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
@@ -1927,7 +1905,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
+ if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
#if !defined(CONFIG_M5272)
@@ -1955,8 +1933,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
static struct mii_bus *fec0_mii_bus;
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct device_node *node;
int err = -ENXIO, i;
@@ -1976,7 +1952,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+ if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus;
@@ -1997,7 +1973,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* document.
*/
fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ if (fep->quirks & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -2039,7 +2015,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
mii_cnt++;
/* save fec0 mii_bus */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ if (fep->quirks & FEC_QUIRK_ENET_MAC)
fec0_mii_bus = fep->mii_bus;
return 0;
@@ -2308,11 +2284,9 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
int rx_itr, tx_itr;
- if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return;
/* Must be greater than zero to avoid unpredictable behavior */
@@ -2347,10 +2321,8 @@ static int
fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
- if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr;
@@ -2366,12 +2338,9 @@ static int
fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
-
unsigned int cycle;
- if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
@@ -2951,8 +2920,6 @@ static const struct net_device_ops fec_netdev_ops = {
static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *cbd_base;
@@ -3031,11 +2998,11 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
- if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
+ if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
+ if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
/* enable hw accelerator */
@@ -3044,7 +3011,7 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
fep->tx_align = 0;
fep->rx_align = 0x3f;
}
@@ -3144,10 +3111,6 @@ fec_probe(struct platform_device *pdev)
int num_tx_qs;
int num_rx_qs;
- of_id = of_match_device(fec_dt_ids, &pdev->dev);
- if (of_id)
- pdev->id_entry = of_id->data;
-
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
/* Init network device */
@@ -3161,13 +3124,17 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
+ of_id = of_match_device(fec_dt_ids, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ fep->quirks = pdev->id_entry->driver_data;
+
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
- if (pdev->id_entry &&
- (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
+ if (fep->quirks & FEC_QUIRK_HAS_GBIT)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
@@ -3184,8 +3151,6 @@ fec_probe(struct platform_device *pdev)
fep->pdev = pdev;
fep->dev_id = dev_id++;
- fep->bufdesc_ex = 0;
-
platform_set_drvdata(pdev, ndev);
phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -3238,12 +3203,11 @@ fec_probe(struct platform_device *pdev)
if (IS_ERR(fep->clk_ref))
fep->clk_ref = NULL;
+ fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
- fep->bufdesc_ex =
- pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
if (IS_ERR(fep->clk_ptp)) {
fep->clk_ptp = NULL;
- fep->bufdesc_ex = 0;
+ fep->bufdesc_ex = false;
}
ret = fec_enet_clk_enable(ndev, true);
@@ -3366,6 +3330,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+ /* SOC supply clock to phy, when clock is disabled, phy link down
+ * SOC control phy regulator, when regulator is disabled, phy link down
+ */
+ if (fep->clk_enet_out || fep->reg_phy)
+ fep->link = 0;
+
return 0;
}
@@ -3411,7 +3381,6 @@ static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
static struct platform_driver fec_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &fec_pm_ops,
.of_match_table = fec_dt_ids,
},
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index ff55fbb20a75..f495796248db 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -1069,7 +1069,6 @@ MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
static struct platform_driver mpc52xx_fec_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_fec_match,
},
.probe = mpc52xx_fec_probe,
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index c92c3b7876ca..9e2bcb807923 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1087,7 +1087,6 @@ MODULE_DEVICE_TABLE(of, fs_enet_match);
static struct platform_driver fs_enet_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "fs_enet",
.of_match_table = fs_enet_match,
},
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 3d3fde66c2cc..1d5617d2d8bd 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -224,7 +224,6 @@ MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
static struct platform_driver fs_enet_bb_mdio_driver = {
.driver = {
.name = "fsl-bb-mdio",
- .owner = THIS_MODULE,
.of_match_table = fs_enet_mdio_bb_match,
},
.probe = fs_enet_mdio_probe,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index ebf5d6429a8d..1648e3582500 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -225,7 +225,6 @@ MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
static struct platform_driver fs_enet_fec_mdio_driver = {
.driver = {
.name = "fsl-fec-mdio",
- .owner = THIS_MODULE,
.of_match_table = fs_enet_mdio_fec_match,
},
.probe = fs_enet_mdio_probe,
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 964c6bf37710..d1a91e344e6b 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -488,7 +488,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
static struct platform_driver fsl_pq_mdio_driver = {
.driver = {
.name = "fsl-pq_mdio",
- .owner = THIS_MODULE,
.of_match_table = fsl_pq_mdio_match,
},
.probe = fsl_pq_mdio_probe,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4fdf0aa16978..5645342f5b28 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -116,9 +116,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
-struct sk_buff *gfar_new_skb(struct net_device *dev);
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb);
+struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -173,11 +171,14 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
static int gfar_init_bds(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
+ u32 *rfbptr;
int i, j;
+ dma_addr_t bufaddr;
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
@@ -201,6 +202,7 @@ static int gfar_init_bds(struct net_device *ndev)
txbdp->status |= TXBD_WRAP;
}
+ rfbptr = &regs->rfbptr0;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->cur_rx = rx_queue->rx_bd_base;
@@ -211,22 +213,22 @@ static int gfar_init_bds(struct net_device *ndev)
struct sk_buff *skb = rx_queue->rx_skbuff[j];
if (skb) {
- gfar_init_rxbdp(rx_queue, rxbdp,
- rxbdp->bufPtr);
+ bufaddr = rxbdp->bufPtr;
} else {
- skb = gfar_new_skb(ndev);
+ skb = gfar_new_skb(ndev, &bufaddr);
if (!skb) {
netdev_err(ndev, "Can't allocate RX buffers\n");
return -ENOMEM;
}
rx_queue->rx_skbuff[j] = skb;
-
- gfar_new_rxbdp(rx_queue, rxbdp, skb);
}
+ gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
rxbdp++;
}
+ rx_queue->rfbptr = rfbptr;
+ rfbptr += 2;
}
return 0;
@@ -336,6 +338,20 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
}
}
+static void gfar_init_rqprm(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i;
+
+ baddr = &regs->rqprm0;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
+ (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
+ baddr++;
+ }
+}
+
static void gfar_rx_buff_size_config(struct gfar_private *priv)
{
int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
@@ -396,6 +412,13 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ /* Clear the LFC bit */
+ gfar_write(&regs->rctrl, rctrl);
+ /* Init flow control threshold values */
+ gfar_init_rqprm(priv);
+ gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
+ rctrl |= RCTRL_LFC;
+
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
}
@@ -1687,6 +1710,9 @@ static int init_phy(struct net_device *dev)
priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
priv->phydev->advertising = priv->phydev->supported;
+ /* Add support for flow control, but don't advertise it by default */
+ priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+
return 0;
}
@@ -2290,6 +2316,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
0,
frag_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
/* set the TxBD length and buffer pointer */
txbdp->bufPtr = bufaddr;
@@ -2339,8 +2367,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb->ptp = 1;
}
- txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ txbdp_start->bufPtr = bufaddr;
/* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
@@ -2406,6 +2438,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tx_queue->txlock, flags);
return NETDEV_TX_OK;
+
+dma_map_err:
+ txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
+ if (do_tstamp)
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ for (i = 0; i < nr_frags; i++) {
+ lstatus = txbdp->lstatus;
+ if (!(lstatus & BD_LFLAG(TXBD_READY)))
+ break;
+
+ txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
+ bufaddr = txbdp->bufPtr;
+ dma_unmap_page(priv->dev, bufaddr, txbdp->length,
+ DMA_TO_DEVICE);
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ }
+ gfar_wmb();
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/* Stops the kernel queue, and halts the controller */
@@ -2606,18 +2657,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb)
-{
- struct net_device *dev = rx_queue->dev;
- struct gfar_private *priv = netdev_priv(dev);
- dma_addr_t buf;
-
- buf = dma_map_single(priv->dev, skb->data,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
- gfar_init_rxbdp(rx_queue, bdp, buf);
-}
-
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -2632,9 +2671,25 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
return skb;
}
-struct sk_buff *gfar_new_skb(struct net_device *dev)
+struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
{
- return gfar_alloc_skb(dev);
+ struct gfar_private *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ dma_addr_t addr;
+
+ skb = gfar_alloc_skb(dev);
+ if (!skb)
+ return NULL;
+
+ addr = dma_map_single(priv->dev, skb->data,
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ *bufaddr = addr;
+ return skb;
}
static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2805,11 +2860,12 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
+ dma_addr_t bufaddr;
rmb();
/* Add another skb for the future */
- newskb = gfar_new_skb(dev);
+ newskb = gfar_new_skb(dev, &bufaddr);
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
@@ -2825,9 +2881,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp->status & RXBD_ERR)) {
count_errors(bdp->status, dev);
- if (unlikely(!newskb))
+ if (unlikely(!newskb)) {
newskb = skb;
- else if (skb)
+ bufaddr = bdp->bufPtr;
+ } else if (skb)
dev_kfree_skb(skb);
} else {
/* Increment the number of packets */
@@ -2854,7 +2911,11 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
/* Setup the new bdp */
- gfar_new_rxbdp(rx_queue, bdp, newskb);
+ gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+
+ /* Update Last Free RxBD pointer for LFC */
+ if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
+ gfar_write(rx_queue->rfbptr, (u32)bdp);
/* Update to the next pointer */
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
@@ -3370,7 +3431,11 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+ lcl_adv = 0;
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
@@ -3386,6 +3451,9 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct phy_device *phydev = priv->phydev;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int i;
+ struct rxbd8 *bdp;
if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
return;
@@ -3394,6 +3462,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
+ u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
@@ -3438,6 +3507,26 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
tempval1 |= gfar_get_flowctrl_cfg(priv);
+ /* Turn last free buffer recording on */
+ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ bdp = rx_queue->cur_rx;
+ /* skip to previous bd */
+ bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
+ rx_queue->rx_bd_base,
+ rx_queue->rx_ring_size);
+
+ if (rx_queue->rfbptr)
+ gfar_write(rx_queue->rfbptr, (u32)bdp);
+ }
+
+ priv->tx_actual_en = 1;
+ }
+
+ if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
+ priv->tx_actual_en = 0;
+
gfar_write(&regs->maccfg1, tempval1);
gfar_write(&regs->maccfg2, tempval);
gfar_write(&regs->ecntrl, ecntrl);
@@ -3472,7 +3561,6 @@ MODULE_DEVICE_TABLE(of, gfar_match);
static struct platform_driver gfar_driver = {
.driver = {
.name = "fsl-gianfar",
- .owner = THIS_MODULE,
.pm = GFAR_PM_OPS,
.of_match_table = gfar_match,
},
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 2805cfbf1765..b581b8823a2a 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -99,6 +99,10 @@ extern const char gfar_driver_version[];
#define GFAR_MAX_FIFO_STARVE 511
#define GFAR_MAX_FIFO_STARVE_OFF 511
+#define FBTHR_SHIFT 24
+#define DEFAULT_RX_LFC_THR 16
+#define DEFAULT_LFC_PTVVAL 4
+
#define DEFAULT_RX_BUFFER_SIZE 1536
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
@@ -145,9 +149,7 @@ extern const char gfar_driver_version[];
| SUPPORTED_Autoneg \
| SUPPORTED_MII)
-#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
- | SUPPORTED_Pause \
- | SUPPORTED_Asym_Pause)
+#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full
/* TBI register addresses */
#define MII_TBICON 0x11
@@ -275,6 +277,7 @@ extern const char gfar_driver_version[];
#define RCTRL_TS_ENABLE 0x01000000
#define RCTRL_PAL_MASK 0x001f0000
+#define RCTRL_LFC 0x00004000
#define RCTRL_VLEX 0x00002000
#define RCTRL_FILREN 0x00001000
#define RCTRL_GHTX 0x00000400
@@ -851,7 +854,32 @@ struct gfar {
u8 res23c[248];
u32 attr; /* 0x.bf8 - Attributes Register */
u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
- u8 res24[688];
+ u32 rqprm0; /* 0x.c00 - Receive queue parameters register 0 */
+ u32 rqprm1; /* 0x.c04 - Receive queue parameters register 1 */
+ u32 rqprm2; /* 0x.c08 - Receive queue parameters register 2 */
+ u32 rqprm3; /* 0x.c0c - Receive queue parameters register 3 */
+ u32 rqprm4; /* 0x.c10 - Receive queue parameters register 4 */
+ u32 rqprm5; /* 0x.c14 - Receive queue parameters register 5 */
+ u32 rqprm6; /* 0x.c18 - Receive queue parameters register 6 */
+ u32 rqprm7; /* 0x.c1c - Receive queue parameters register 7 */
+ u8 res24[36];
+ u32 rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */
+ u8 res24a[4];
+ u32 rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */
+ u8 res24b[4];
+ u32 rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */
+ u8 res24c[4];
+ u32 rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */
+ u8 res24d[4];
+ u32 rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */
+ u8 res24e[4];
+ u32 rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */
+ u8 res24f[4];
+ u32 rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */
+ u8 res24g[4];
+ u32 rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */
+ u8 res24h[4];
+ u8 res24x[556];
u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */
u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */
u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */
@@ -1011,6 +1039,7 @@ struct gfar_priv_rx_q {
/* RX Coalescing values */
unsigned char rxcoalescing;
unsigned long rxic;
+ u32 *rfbptr;
};
enum gfar_irqinfo_id {
@@ -1101,6 +1130,7 @@ struct gfar_private {
unsigned int num_tx_queues;
unsigned int num_rx_queues;
unsigned int num_grps;
+ int tx_actual_en;
/* Network Statistics */
struct gfar_extra_stats extra_stats;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 76d70708f864..3e1a9c1a67a9 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -579,8 +579,13 @@ static int gfar_spauseparam(struct net_device *dev,
u32 tempval;
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
- if (priv->tx_pause_en)
+
+ priv->tx_actual_en = 0;
+ if (priv->tx_pause_en) {
+ priv->tx_actual_en = 1;
tempval |= MACCFG1_TX_FLOW;
+ }
+
if (priv->rx_pause_en)
tempval |= MACCFG1_RX_FLOW;
gfar_write(&regs->maccfg1, tempval);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index bb568006f37d..16826341a4c9 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -563,7 +563,6 @@ static struct platform_driver gianfar_ptp_driver = {
.driver = {
.name = "gianfar_ptp",
.of_match_table = match_table,
- .owner = THIS_MODULE,
},
.probe = gianfar_ptp_probe,
.remove = gianfar_ptp_remove,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3cf0478b3728..357e8b576905 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3943,7 +3943,6 @@ MODULE_DEVICE_TABLE(of, ucc_geth_match);
static struct platform_driver ucc_geth_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = ucc_geth_match,
},
.probe = ucc_geth_probe,
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 76a6e0c77d69..ae6e30d39f0f 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -490,7 +490,8 @@ static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
eid = hp100_read_id(ioaddr);
if (eid == NULL) { /* bad checksum? */
- printk(KERN_WARNING "hp100_probe: bad ID checksum at base port 0x%x\n", ioaddr);
+ printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n",
+ __func__, ioaddr);
goto out2;
}
@@ -498,7 +499,9 @@ static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
for (i = uc = 0; i < 7; i++)
uc += hp100_inb(LAN_ADDR + i);
if (uc != 0xff) {
- printk(KERN_WARNING "hp100_probe: bad lan address checksum at port 0x%x)\n", ioaddr);
+ printk(KERN_WARNING
+ "%s: bad lan address checksum at port 0x%x)\n",
+ __func__, ioaddr);
err = -EIO;
goto out2;
}
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 372fa8d1fda1..2af7f77345fb 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -165,7 +165,6 @@ static struct platform_driver sni_82596_driver = {
.remove = sni_82596_driver_remove,
.driver = {
.name = sni_82596_string,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 87bd953cc2ee..9388a83818f2 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2323,16 +2323,11 @@ static int emac_check_deps(struct emac_instance *dev,
static void emac_put_deps(struct emac_instance *dev)
{
- if (dev->mal_dev)
- of_dev_put(dev->mal_dev);
- if (dev->zmii_dev)
- of_dev_put(dev->zmii_dev);
- if (dev->rgmii_dev)
- of_dev_put(dev->rgmii_dev);
- if (dev->mdio_dev)
- of_dev_put(dev->mdio_dev);
- if (dev->tah_dev)
- of_dev_put(dev->tah_dev);
+ of_dev_put(dev->mal_dev);
+ of_dev_put(dev->zmii_dev);
+ of_dev_put(dev->rgmii_dev);
+ of_dev_put(dev->mdio_dev);
+ of_dev_put(dev->tah_dev);
}
static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
@@ -2371,8 +2366,7 @@ static int emac_wait_deps(struct emac_instance *dev)
bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
for (i = 0; i < EMAC_DEP_COUNT; i++) {
- if (deps[i].node)
- of_node_put(deps[i].node);
+ of_node_put(deps[i].node);
if (err && deps[i].ofdev)
of_dev_put(deps[i].ofdev);
}
@@ -2383,8 +2377,7 @@ static int emac_wait_deps(struct emac_instance *dev)
dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
}
- if (deps[EMAC_DEP_PREV_IDX].ofdev)
- of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
+ of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
return err;
}
@@ -3009,7 +3002,6 @@ MODULE_DEVICE_TABLE(of, emac_match);
static struct platform_driver emac_driver = {
.driver = {
.name = "emac",
- .owner = THIS_MODULE,
.of_match_table = emac_match,
},
.probe = emac_probe,
@@ -3113,8 +3105,7 @@ static void __exit emac_exit(void)
/* Destroy EMAC boot list */
for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
- if (emac_boot_list[i])
- of_node_put(emac_boot_list[i]);
+ of_node_put(emac_boot_list[i]);
}
module_init(emac_init);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 63eb959a28aa..dddaab11a4c7 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -776,7 +776,6 @@ static struct of_device_id mal_platform_match[] =
static struct platform_driver mal_of_driver = {
.driver = {
.name = "mcmal",
- .owner = THIS_MODULE,
.of_match_table = mal_platform_match,
},
.probe = mal_probe,
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index a01182cce965..457088fc5b06 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -319,7 +319,6 @@ static struct of_device_id rgmii_match[] =
static struct platform_driver rgmii_driver = {
.driver = {
.name = "emac-rgmii",
- .owner = THIS_MODULE,
.of_match_table = rgmii_match,
},
.probe = rgmii_probe,
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index 9f24769ed826..cb18e7f917c6 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -163,7 +163,6 @@ static struct of_device_id tah_match[] =
static struct platform_driver tah_driver = {
.driver = {
.name = "emac-tah",
- .owner = THIS_MODULE,
.of_match_table = tah_match,
},
.probe = tah_probe,
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 9ca67a38c062..36409ccb75ea 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -310,7 +310,6 @@ static struct of_device_id zmii_match[] =
static struct platform_driver zmii_driver = {
.driver = {
.name = "emac-zmii",
- .owner = THIS_MODULE,
.of_match_table = zmii_match,
},
.probe = zmii_probe,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 24f3986cfae2..83140cbb5f01 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3136,12 +3136,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* packets may get corrupted during padding by HW.
* To WA this issue, pad all small packets manually.
*/
- if (skb->len < ETH_ZLEN) {
- if (skb_pad(skb, ETH_ZLEN - skb->len))
- return NETDEV_TX_OK;
- skb->len = ETH_ZLEN;
- skb_set_tail_pointer(skb, ETH_ZLEN);
- }
+ if (eth_skb_pad(skb))
+ return NETDEV_TX_OK;
mss = skb_shinfo(skb)->gso_size;
/* The controller does a simple calculation to
@@ -4104,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
unsigned int bufsz)
{
- struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
+ struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
if (unlikely(!skb))
adapter->alloc_rx_buff_failed++;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 952ef7c434e8..e14fd85f64eb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
*/
if (length < copybreak) {
struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
+ napi_alloc_skb(&adapter->napi, length);
if (new_skb) {
skb_copy_to_linear_data_offset(new_skb,
-NET_IP_ALIGN,
@@ -3449,15 +3449,12 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
+ u32 rss_key[10];
int i;
- static const u32 rsskey[10] = {
- 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
- 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
- };
- /* Fill out hash function seed */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i < 10; i++)
- ew32(RSSRK(i), rsskey[i]);
+ ew32(RSSRK(i), rss_key[i]);
/* Direct all traffic to queue 0 */
for (i = 0; i < 32; i++)
@@ -5557,12 +5554,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* The minimum packet size with TCTL.PSP set is 17 bytes so
* pad skb in order to meet this minimum size requirement
*/
- if (unlikely(skb->len < 17)) {
- if (skb_pad(skb, 17 - skb->len))
- return NETDEV_TX_OK;
- skb->len = 17;
- skb_set_tail_pointer(skb, 17);
- }
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
mss = skb_shinfo(skb)->gso_size;
if (mss) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2d04464e6aa3..651f53bc7376 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -916,11 +916,15 @@ static u32 fm10k_get_rssrk_size(struct net_device *netdev)
return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
}
-static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key)
+static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
int i, err;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
err = fm10k_get_reta(netdev, indir);
if (err || !key)
return err;
@@ -932,12 +936,16 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key)
}
static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw;
int i, err;
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
err = fm10k_set_reta(netdev, indir);
if (err || !key)
return err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e645af412e76..eb088b129bc7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -83,7 +83,7 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
return true;
/* alloc new page for storage */
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ page = dev_alloc_page();
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
return false;
@@ -308,8 +308,8 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
#endif
/* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- FM10K_RX_HDR_LEN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+ FM10K_RX_HDR_LEN);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
@@ -578,14 +578,9 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
if (skb_is_nonlinear(skb))
fm10k_pull_tail(rx_ring, rx_desc, skb);
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
-
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
return false;
}
@@ -620,14 +615,14 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD))
+ if (!rx_desc->d.staterr)
break;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
- * RXD_STATUS_DD bit is set
+ * descriptor has been written back
*/
- rmb();
+ dma_rmb();
/* retrieve a buffer from the ring */
skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index a0cb74ab3dc6..4f5892cc32d7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1551,15 +1551,11 @@ void fm10k_down(struct fm10k_intfc *interface)
static int fm10k_sw_init(struct fm10k_intfc *interface,
const struct pci_device_id *ent)
{
- static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25,
- 0x3d256741, 0xb08fa343,
- 0xcb2bcad0, 0xb4307bae,
- 0xa32dcb77, 0x0cf23080,
- 0x3bb7426a, 0xfa01acbe };
const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
struct fm10k_hw *hw = &interface->hw;
struct pci_dev *pdev = interface->pdev;
struct net_device *netdev = interface->netdev;
+ u32 rss_key[FM10K_RSSRK_SIZE];
unsigned int rss;
int err;
@@ -1673,8 +1669,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
/* initialize vxlan_port list */
INIT_LIST_HEAD(&interface->vxlan_port);
- /* initialize RSS key */
- memcpy(interface->rssrk, seed, sizeof(seed));
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ memcpy(interface->rssrk, rss_key, sizeof(rss_key));
/* Start off interface as being down */
set_bit(__FM10K_DOWN, &interface->state);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index f1e33f896439..fc50f6461b13 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -87,7 +87,7 @@
#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
-#define I40E_AQ_LEN 32
+#define I40E_AQ_LEN 128
#define I40E_AQ_WORK_LIMIT 16
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_MSG_ENABLE 4
@@ -146,6 +146,7 @@ enum i40e_state_t {
__I40E_DOWN_REQUESTED,
__I40E_FD_FLUSH_REQUESTED,
__I40E_RESET_FAILED,
+ __I40E_PORT_TX_SUSPENDED,
};
enum i40e_interrupt_policy {
@@ -269,7 +270,8 @@ struct i40e_pf {
u16 msg_enable;
char misc_int_name[IFNAMSIZ + 9];
u16 adminq_work_limit; /* num of admin receive queue desc to process */
- int service_timer_period;
+ unsigned long service_timer_period;
+ unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 72f5d25a222f..77f6254a89ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -51,7 +51,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
static void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
- if (hw->mac.type == I40E_MAC_VF) {
+ if (i40e_is_vf(hw)) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
hw->aq.asq.len = I40E_VF_ATQLEN1;
@@ -617,7 +617,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_busy = false;
+ hw->aq.nvm_release_on_done = false;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
@@ -754,12 +755,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
- if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
- status = I40E_ERR_NVM;
- goto asq_send_command_exit;
- }
-
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -853,7 +848,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
- u32 delay_len = 10;
do {
/* AQ designers suggest use of head for better
@@ -861,9 +855,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- /* ugh! delay while spin_lock */
- udelay(delay_len);
- total_delay += delay_len;
+ usleep_range(1000, 2000);
+ total_delay++;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -903,9 +896,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
- if (!status && i40e_is_nvm_update_op(desc))
- hw->aq.nvm_busy = true;
-
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
asq_send_command_exit:
@@ -958,9 +948,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQRX: Queue is empty.\n");
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
@@ -982,10 +969,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
- e->msg_size = min(datalen, e->msg_size);
- if (e->msg_buf != NULL && (e->msg_size != 0))
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
- e->msg_size);
+ e->msg_len);
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
@@ -1021,7 +1008,6 @@ clean_arq_element_out:
mutex_unlock(&hw->aq.arq_mutex);
if (i40e_is_nvm_update_op(&e->desc)) {
- hw->aq.nvm_busy = false;
if (hw->aq.nvm_release_on_done) {
i40e_release_nvm(hw);
hw->aq.nvm_release_on_done = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index ba38a89c79d6..564d0b0192f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -28,6 +28,7 @@
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
+#include "i40e_status.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
@@ -76,7 +77,8 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
struct i40e_aq_desc desc;
- u16 msg_size;
+ u16 msg_len;
+ u16 buf_len;
u8 *msg_buf;
};
@@ -93,7 +95,6 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_busy;
bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
@@ -108,7 +109,7 @@ struct i40e_adminq_info {
* i40e_aq_rc_to_posix - convert errors to user-land codes
* aq_rc: AdminQ error code to convert
**/
-static inline int i40e_aq_rc_to_posix(u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc)
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+ return -ERANGE;
return aq_to_posix[aq_rc];
}
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 15f289f2917f..8835aeeff23e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -33,8 +33,8 @@
* This file needs to comply with the Linux Kernel coding style.
*/
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
struct i40e_aq_desc {
__le16 flags;
@@ -66,216 +66,217 @@ struct i40e_aq_desc {
*/
/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
};
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
- i40e_aqc_opc_get_version = 0x0001,
- i40e_aqc_opc_driver_version = 0x0002,
- i40e_aqc_opc_queue_shutdown = 0x0003,
- i40e_aqc_opc_set_pf_context = 0x0004,
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
- i40e_aqc_opc_request_resource = 0x0008,
- i40e_aqc_opc_release_resource = 0x0009,
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
- i40e_aqc_opc_list_func_capabilities = 0x000A,
- i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
- i40e_aqc_opc_mac_address_read = 0x0107,
- i40e_aqc_opc_mac_address_write = 0x0108,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
/* PXE */
- i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
/* internal switch commands */
- i40e_aqc_opc_get_switch_config = 0x0200,
- i40e_aqc_opc_add_statistics = 0x0201,
- i40e_aqc_opc_remove_statistics = 0x0202,
- i40e_aqc_opc_set_port_parameters = 0x0203,
- i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
-
- i40e_aqc_opc_add_vsi = 0x0210,
- i40e_aqc_opc_update_vsi_parameters = 0x0211,
- i40e_aqc_opc_get_vsi_parameters = 0x0212,
-
- i40e_aqc_opc_add_pv = 0x0220,
- i40e_aqc_opc_update_pv_parameters = 0x0221,
- i40e_aqc_opc_get_pv_parameters = 0x0222,
-
- i40e_aqc_opc_add_veb = 0x0230,
- i40e_aqc_opc_update_veb_parameters = 0x0231,
- i40e_aqc_opc_get_veb_parameters = 0x0232,
-
- i40e_aqc_opc_delete_element = 0x0243,
-
- i40e_aqc_opc_add_macvlan = 0x0250,
- i40e_aqc_opc_remove_macvlan = 0x0251,
- i40e_aqc_opc_add_vlan = 0x0252,
- i40e_aqc_opc_remove_vlan = 0x0253,
- i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
- i40e_aqc_opc_add_tag = 0x0255,
- i40e_aqc_opc_remove_tag = 0x0256,
- i40e_aqc_opc_add_multicast_etag = 0x0257,
- i40e_aqc_opc_remove_multicast_etag = 0x0258,
- i40e_aqc_opc_update_tag = 0x0259,
- i40e_aqc_opc_add_control_packet_filter = 0x025A,
- i40e_aqc_opc_remove_control_packet_filter = 0x025B,
- i40e_aqc_opc_add_cloud_filters = 0x025C,
- i40e_aqc_opc_remove_cloud_filters = 0x025D,
-
- i40e_aqc_opc_add_mirror_rule = 0x0260,
- i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
/* DCB commands */
- i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
- i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
/* TX scheduler */
- i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
- i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
- i40e_aqc_opc_query_vsi_bw_config = 0x0408,
- i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
- i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
-
- i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
- i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
- i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
- i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
- i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
- i40e_aqc_opc_query_port_ets_config = 0x0419,
- i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
- i40e_aqc_opc_suspend_port_tx = 0x041B,
- i40e_aqc_opc_resume_port_tx = 0x041C,
- i40e_aqc_opc_configure_partition_bw = 0x041D,
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
- i40e_aqc_opc_get_phy_abilities = 0x0600,
- i40e_aqc_opc_set_phy_config = 0x0601,
- i40e_aqc_opc_set_mac_config = 0x0603,
- i40e_aqc_opc_set_link_restart_an = 0x0605,
- i40e_aqc_opc_get_link_status = 0x0607,
- i40e_aqc_opc_set_phy_int_mask = 0x0613,
- i40e_aqc_opc_get_local_advt_reg = 0x0614,
- i40e_aqc_opc_set_local_advt_reg = 0x0615,
- i40e_aqc_opc_get_partner_advt = 0x0616,
- i40e_aqc_opc_set_lb_modes = 0x0618,
- i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_debug = 0x0622,
- i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
- i40e_aqc_opc_nvm_config_read = 0x0704,
- i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
- i40e_aqc_opc_send_msg_to_pf = 0x0801,
- i40e_aqc_opc_send_msg_to_vf = 0x0802,
- i40e_aqc_opc_send_msg_to_peer = 0x0803,
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
/* alternate structure */
- i40e_aqc_opc_alternate_write = 0x0900,
- i40e_aqc_opc_alternate_write_indirect = 0x0901,
- i40e_aqc_opc_alternate_read = 0x0902,
- i40e_aqc_opc_alternate_read_indirect = 0x0903,
- i40e_aqc_opc_alternate_write_done = 0x0904,
- i40e_aqc_opc_alternate_set_mode = 0x0905,
- i40e_aqc_opc_alternate_clear_port = 0x0906,
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
/* LLDP commands */
- i40e_aqc_opc_lldp_get_mib = 0x0A00,
- i40e_aqc_opc_lldp_update_mib = 0x0A01,
- i40e_aqc_opc_lldp_add_tlv = 0x0A02,
- i40e_aqc_opc_lldp_update_tlv = 0x0A03,
- i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
- i40e_aqc_opc_lldp_stop = 0x0A05,
- i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
/* Tunnel commands */
- i40e_aqc_opc_add_udp_tunnel = 0x0B00,
- i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
/* Async Events */
- i40e_aqc_opc_event_lan_overflow = 0x1001,
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
/* OEM commands */
- i40e_aqc_opc_oem_parameter_change = 0xFE00,
- i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
- i40e_aqc_opc_debug_read_reg = 0xFF03,
- i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_modify_reg = 0xFF07,
- i40e_aqc_opc_debug_dump_internals = 0xFF08,
- i40e_aqc_opc_debug_modify_internals = 0xFF09,
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@@ -302,7 +303,7 @@ enum i40e_admin_queue_opc {
/* This macro is used extensively to ensure that command structures are 16
* bytes in length as they have to map to the raw array of that size.
*/
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
/* internal (0x00XX) commands */
@@ -320,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
- __le32 driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING 0x1
- u8 reserved[12];
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
@@ -351,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
-#define I40E_AQ_RESOURCE_NVM 1
-#define I40E_AQ_RESOURCE_SDP 2
-#define I40E_AQ_RESOURCE_ACCESS_READ 1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
@@ -373,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
*/
struct i40e_aqc_list_capabilites {
u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
u8 pf_index;
u8 reserved[2];
__le32 count;
@@ -384,123 +385,123 @@ struct i40e_aqc_list_capabilites {
I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
};
/* list of caps */
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
- __le16 command_flags;
-#define I40E_AQ_CPPM_EN_LTRC 0x0800
-#define I40E_AQ_CPPM_EN_DMCTH 0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
-#define I40E_AQ_CPPM_EN_HPTC 0x4000
-#define I40E_AQ_CPPM_EN_DMARC 0x8000
- __le16 ttlx;
- __le32 dmacr;
- __le16 dmcth;
- u8 hptc;
- u8 reserved;
- __le32 pfltrc;
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
- __le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
- __le16 table_id;
- __le32 pfpm_proxyfc;
- __le32 ip_addr;
- u8 mac_addr[6];
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
};
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
- __le16 table_idx_mac_addr_0;
- __le16 table_idx_mac_addr_1;
- __le16 table_idx_ipv6_0;
- __le16 table_idx_ipv6_1;
- __le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
- u8 mac_addr_0[6];
- u8 mac_addr_1[6];
- u8 local_mac_addr[6];
- u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
- u8 ipv6_addr_1[16];
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
};
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
-#define I40E_AQ_LAA_FLAG_WR 0x8000
- u8 reserved[2];
- __le32 sal;
- __le16 sah;
- u8 reserved2[6];
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
};
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
-#define I40E_AQC_LAN_ADDR_VALID 0x10
-#define I40E_AQC_SAN_ADDR_VALID 0x20
-#define I40E_AQC_PORT_ADDR_VALID 0x40
-#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
@@ -516,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
- __le16 command_flags;
-#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
-#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
-#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
- __le16 mac_sah;
- __le32 mac_sal;
- u8 reserved[8];
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
@@ -544,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
* command
*/
struct i40e_aqc_switch_seid {
- __le16 seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
@@ -556,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_switch_config_header_resp {
- __le16 num_reported;
- __le16 num_total;
- u8 reserved[12];
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
};
struct i40e_aqc_switch_config_element_resp {
- u8 element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC 1
-#define I40E_AQ_SW_ELEM_TYPE_PF 2
-#define I40E_AQ_SW_ELEM_TYPE_VF 3
-#define I40E_AQ_SW_ELEM_TYPE_EMP 4
-#define I40E_AQ_SW_ELEM_TYPE_BMC 5
-#define I40E_AQ_SW_ELEM_TYPE_PV 16
-#define I40E_AQ_SW_ELEM_TYPE_VEB 17
-#define I40E_AQ_SW_ELEM_TYPE_PA 18
-#define I40E_AQ_SW_ELEM_TYPE_VSI 19
- u8 revision;
-#define I40E_AQ_SW_ELEM_REV_1 1
- __le16 seid;
- __le16 uplink_seid;
- __le16 downlink_seid;
- u8 reserved[3];
- u8 connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR 0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_CONN_TYPE_CASCADED 0x3
- __le16 scheduler_id;
- __le16 element_info;
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
};
/* Get Switch Configuration (indirect 0x0200)
@@ -591,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp {
* the first in the array is the header, remainder are elements
*/
struct i40e_aqc_get_switch_config_resp {
- struct i40e_aqc_get_switch_config_header_resp header;
- struct i40e_aqc_switch_config_element_resp element[1];
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
};
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
struct i40e_aqc_add_remove_statistics {
- __le16 seid;
- __le16 vlan;
- __le16 stat_index;
- u8 reserved[10];
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
/* Set Port Parameters command (direct 0x0203) */
struct i40e_aqc_set_port_parameters {
- __le16 command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
- __le16 bad_frame_vsi;
- __le16 default_seid; /* reserved for command */
- u8 reserved[10];
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
/* Get Switch Resource Allocation (indirect 0x0204) */
struct i40e_aqc_get_switch_resource_alloc {
- u8 num_entries; /* reserved for command */
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
/* expect an array of these structs in the response buffer */
struct i40e_aqc_switch_resource_alloc_element_resp {
- u8 resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
- u8 reserved1;
- __le16 guaranteed;
- __le16 total;
- __le16 used;
- __le16 total_unalloced;
- u8 reserved2[6];
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
};
/* Add VSI (indirect 0x0210)
@@ -671,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
* uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
- __le16 uplink_seid;
- u8 connection_type;
-#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
- u8 reserved1;
- u8 vf_id;
- u8 reserved2;
- __le16 vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT 0x0
-#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
-#define I40E_AQ_VSI_TYPE_VF 0x0
-#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
-#define I40E_AQ_VSI_TYPE_PF 0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
- __le32 addr_high;
- __le32 addr_low;
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
@@ -706,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
struct i40e_aqc_vsi_properties_data {
/* first 96 byte are written by SW */
- __le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
/* switch section */
- __le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
-#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
- u8 sw_reserved[2];
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
/* security section */
- u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
- u8 sec_reserved;
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
/* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- __le16 fcoe_pvid;
- u8 port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
- I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
- I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
- u8 pvlan_reserved[3];
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
/* ingress egress up sections */
- __le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
- __le32 egress_table; /* same defines as for ingress table */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
/* cascaded PV section */
- __le16 cas_pv_tag;
- u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
- u8 cas_pv_reserved;
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
/* queue mapping section */
- __le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
- __le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
- __le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
- u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
- u8 queueing_opt_reserved[3];
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
/* scheduler section */
- u8 up_enable_bits;
- u8 sched_reserved;
+ u8 up_enable_bits;
+ u8 sched_reserved;
/* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress table */
- u8 cmd_reserved[8];
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
- __le16 qs_handle[8];
+ __le16 qs_handle[8];
#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
- __le16 stat_counter_idx;
- __le16 sched_id;
- u8 resp_reserved[12];
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
};
I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
@@ -830,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
* (IS_CTRL_PORT only works on add PV)
*/
struct i40e_aqc_add_update_pv {
- __le16 command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
- __le16 uplink_seid;
- __le16 connected_seid;
- u8 reserved[10];
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
struct i40e_aqc_add_update_pv_completion {
/* reserved for update; for add also encodes error if rc == ENOSPC */
- __le16 pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
- u8 reserved[14];
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
@@ -859,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
*/
struct i40e_aqc_get_pv_params_completion {
- __le16 seid;
- __le16 default_stag;
- __le16 pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE 0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
- u8 reserved[8];
- __le16 default_port_seid;
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
/* Add VEB (direct 0x0230) */
struct i40e_aqc_add_veb {
- __le16 uplink_seid;
- __le16 downlink_seid;
- __le16 veb_flags;
-#define I40E_AQC_ADD_VEB_FLOATING 0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
- u8 enable_tcs;
- u8 reserved[9];
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
struct i40e_aqc_add_veb_completion {
- u8 reserved[6];
- __le16 switch_seid;
+ u8 reserved[6];
+ __le16 switch_seid;
/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
- __le16 veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
@@ -909,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_veb_parameters_completion {
- __le16 seid;
- __le16 switch_id;
- __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
- u8 reserved[4];
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
@@ -928,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
/* used for the command for most vlan commands */
struct i40e_aqc_macvlan {
- __le16 num_addresses;
- __le16 seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
- __le32 addr_high;
- __le32 addr_low;
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
/* indirect data for command and response */
struct i40e_aqc_add_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- __le16 flags;
-#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
-#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
- __le16 queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
/* response section */
- u8 match_method;
-#define I40E_AQC_MM_PERFECT_MATCH 0x01
-#define I40E_AQC_MM_HASH_MATCH 0x02
-#define I40E_AQC_MM_ERR_NO_RES 0xFF
- u8 reserved1[3];
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
};
struct i40e_aqc_add_remove_macvlan_completion {
@@ -978,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
*/
struct i40e_aqc_remove_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- u8 flags;
-#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
-#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
- u8 reserved[3];
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
/* reply section */
- u8 error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
- u8 reply_reserved[3];
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
};
/* Add VLAN (indirect 0x0252)
@@ -998,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data {
* use the generic i40e_aqc_macvlan for the command
*/
struct i40e_aqc_add_remove_vlan_element_data {
- __le16 vlan_tag;
- u8 vlan_flags;
+ __le16 vlan_tag;
+ u8 vlan_flags;
/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL 0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
- I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT 3
-#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL 0x1
- u8 reserved;
- u8 result;
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
- u8 reserved1[3];
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
};
struct i40e_aqc_add_remove_vlan_completion {
- u8 reserved[4];
- __le16 vlans_used;
- __le16 vlans_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
/* Set VSI Promiscuous Modes (direct 0x0254) */
struct i40e_aqc_set_vsi_promiscuous_modes {
- __le16 promiscuous_flags;
- __le16 valid_flags;
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
/* flags used for both fields above */
-#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
-#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
-#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
-#define I40E_AQC_SET_VSI_DEFAULT 0x08
-#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
- __le16 seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- __le16 vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
- u8 reserved[8];
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1059,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_add_tag {
- __le16 flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
- __le16 seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- __le16 queue_number;
- u8 reserved[8];
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
struct i40e_aqc_add_remove_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
@@ -1084,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_remove_tag {
- __le16 seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- u8 reserved[12];
+ __le16 tag;
+ u8 reserved[12];
};
/* Add multicast E-Tag (direct 0x0257)
@@ -1097,22 +1097,22 @@ struct i40e_aqc_remove_tag {
* and no external data
*/
struct i40e_aqc_add_remove_mcast_etag {
- __le16 pv_seid;
- __le16 etag;
- u8 num_unicast_etags;
- u8 reserved[3];
- __le32 addr_high; /* address of array of 2-byte s-tags */
- __le32 addr_low;
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
struct i40e_aqc_add_remove_mcast_etag_completion {
- u8 reserved[4];
- __le16 mcast_etags_used;
- __le16 mcast_etags_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
@@ -1120,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
/* Update S/E-Tag (direct 0x0259) */
struct i40e_aqc_update_tag {
- __le16 seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 old_tag;
- __le16 new_tag;
- u8 reserved[10];
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
struct i40e_aqc_update_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
@@ -1145,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
* and the generic direct completion structure
*/
struct i40e_aqc_add_remove_control_packet_filter {
- u8 mac[6];
- __le16 etype;
- __le16 flags;
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
- __le16 seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
- __le16 queue;
- u8 reserved[2];
+ __le16 queue;
+ u8 reserved[2];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
struct i40e_aqc_add_remove_control_packet_filter_completion {
- __le16 mac_etype_used;
- __le16 etype_used;
- __le16 mac_etype_free;
- __le16 etype_free;
- u8 reserved[8];
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
@@ -1179,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
* and the generic indirect completion structure
*/
struct i40e_aqc_add_remove_cloud_filters {
- u8 num_filters;
- u8 reserved;
- __le16 seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
struct i40e_aqc_add_remove_cloud_filters_element_data {
- u8 outer_mac[6];
- u8 inner_mac[6];
- __le16 inner_vlan;
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
union {
struct {
u8 reserved[12];
@@ -1205,49 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 data[16];
} v6;
} ipaddr;
- __le16 flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
/* 0x0002 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
/* 0x0005 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
/* 0x0007 reserved */
/* 0x0008 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
-#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
-
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
-
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
-
- __le32 tenant_id;
- u8 reserved[4];
- __le16 queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
- I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved2[14];
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
/* response section */
- u8 allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
- u8 response_reserved[7];
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
};
struct i40e_aqc_remove_cloud_filters_completion {
@@ -1269,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
struct i40e_aqc_add_delete_mirror_rule {
__le16 seid;
__le16 rule_type;
-#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
-#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
-#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
__le16 num_entries;
__le16 destination; /* VSI for add, rule id for delete */
__le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
@@ -1286,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
struct i40e_aqc_add_delete_mirror_rule_completion {
- u8 reserved[2];
- __le16 rule_id; /* only used on add */
- __le16 mirror_rules_used;
- __le16 mirror_rules_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
@@ -1302,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
* the command and response use the same descriptor structure
*/
struct i40e_aqc_pfc_ignore {
- u8 tc_bitmap;
- u8 command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET 0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
- u8 reserved[14];
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
@@ -1321,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
* this generic struct to pass the SEID in param0
*/
struct i40e_aqc_tx_sched_ind {
- __le16 vsi_seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
@@ -1336,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp {
/* Configure VSI BW limits (direct 0x0400) */
struct i40e_aqc_configure_vsi_bw_limit {
- __le16 vsi_seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_credit; /* 0-3, limit = 2^max */
- u8 reserved2[7];
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
@@ -1350,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_ets_sla_bw_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
};
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_tc_bw_data {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 tc_bw_credits[8];
- u8 reserved1[4];
- __le16 qs_handles[8];
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
};
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
- u8 tc_valid_bits;
- u8 tc_suspended_bits;
- u8 reserved[14];
- __le16 qs_handles[8];
- u8 reserved1[4];
- __le16 port_bw_limit;
- u8 reserved2[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved3[23];
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
};
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 share_credits[8];
- __le16 credits[8];
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
+ __le16 tc_bw_max[2];
};
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
- __le16 seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved2[7];
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
@@ -1411,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
* Disable Physical Port ETS (indirect 0x0415)
*/
struct i40e_aqc_configure_switching_comp_ets_data {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
- u8 tc_strict_priority_flags;
- u8 reserved1[17];
- u8 tc_bw_share_credits[8];
- u8 reserved2[96];
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credit[8];
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
};
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
struct i40e_aqc_configure_switching_comp_bw_config_data {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits; /* bool */
- u8 tc_bw_share_credits[8];
- u8 reserved1[20];
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
};
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
- u8 tc_valid_bits;
- u8 reserved[35];
- __le16 port_bw_limit;
- u8 reserved1[2];
- u8 tc_bw_max; /* 0-3, limit = 2^max */
- u8 reserved2[23];
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
};
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 reserved1;
- u8 tc_strict_priority_bits;
- u8 reserved2;
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved3[32];
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
};
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
struct i40e_aqc_query_switching_comp_bw_config_resp {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits_enable; /* bool */
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
+ __le16 tc_bw_max[2];
};
/* Suspend/resume port TX traffic
@@ -1490,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (indirect 0x041D)
*/
struct i40e_aqc_configure_partition_bw_data {
- __le16 pf_valid_bits;
- u8 min_bw[16]; /* guaranteed bandwidth */
- u8 max_bw[16]; /* bandwidth limit */
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
};
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
enum i40e_aq_hmc_profile {
/* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
};
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
-#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
-#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
enum i40e_aq_phy_type {
I40E_PHY_TYPE_SGMII = 0x0,
@@ -1578,147 +1578,147 @@ struct i40e_aqc_module_desc {
};
struct i40e_aq_get_phy_abilities_resp {
- __le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum bit patterns */
- u8 abilities;
-#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
-#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_LINK_ENABLED 0x08
-#define I40E_AQ_PHY_AN_ENABLED 0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
- __le16 eee_capability;
-#define I40E_AQ_EEE_100BASE_TX 0x0002
-#define I40E_AQ_EEE_1000BASE_T 0x0004
-#define I40E_AQ_EEE_10GBASE_T 0x0008
-#define I40E_AQ_EEE_1000BASE_KX 0x0010
-#define I40E_AQ_EEE_10GBASE_KX4 0x0020
-#define I40E_AQ_EEE_10GBASE_KR 0x0040
- __le32 eeer_val;
- u8 d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
- u8 phy_id[4];
- u8 module_type[3];
- u8 qualified_module_count;
-#define I40E_AQ_PHY_MAX_QMS 16
- struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
- __le32 phy_type;
- u8 link_speed;
- u8 abilities;
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
/* bits 0-2 use the values from get_phy_abilities_resp */
#define I40E_AQ_PHY_ENABLE_LINK 0x08
#define I40E_AQ_PHY_ENABLE_AN 0x10
#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
- __le16 eee_capability;
- __le32 eeer;
- u8 low_power_ctrl;
- u8 reserved[3];
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
/* Set MAC Config command data structure (direct 0x0603) */
struct i40e_aq_set_mac_config {
- __le16 max_frame_size;
- u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
- u8 tx_timer_priority; /* bitmap */
- __le16 tx_timer_value;
- __le16 fc_refresh_threshold;
- u8 reserved[8];
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
/* Restart Auto-Negotiation (direct 0x605) */
struct i40e_aqc_set_link_restart_an {
- u8 command;
-#define I40E_AQ_PHY_RESTART_AN 0x02
-#define I40E_AQ_PHY_LINK_ENABLE 0x04
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
/* Get Link Status cmd & response data structure (direct 0x0607) */
struct i40e_aqc_get_link_status {
- __le16 command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK 0x3
-#define I40E_AQ_LSE_NOP 0x0
-#define I40E_AQ_LSE_DISABLE 0x2
-#define I40E_AQ_LSE_ENABLE 0x3
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
/* only response uses this flag */
-#define I40E_AQ_LSE_IS_ENABLED 0x1
- u8 phy_type; /* i40e_aq_phy_type */
- u8 link_speed; /* i40e_aq_link_speed */
- u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
-#define I40E_AQ_LINK_FAULT 0x02
-#define I40E_AQ_LINK_FAULT_TX 0x04
-#define I40E_AQ_LINK_FAULT_RX 0x08
-#define I40E_AQ_LINK_FAULT_REMOTE 0x10
-#define I40E_AQ_MEDIA_AVAILABLE 0x40
-#define I40E_AQ_SIGNAL_DETECT 0x80
- u8 an_info;
-#define I40E_AQ_AN_COMPLETED 0x01
-#define I40E_AQ_LP_AN_ABILITY 0x02
-#define I40E_AQ_PD_FAULT 0x04
-#define I40E_AQ_FEC_EN 0x08
-#define I40E_AQ_PHY_LOW_POWER 0x10
-#define I40E_AQ_LINK_PAUSE_TX 0x20
-#define I40E_AQ_LINK_PAUSE_RX 0x40
-#define I40E_AQ_QUALIFIED_MODULE 0x80
- u8 ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
-#define I40E_AQ_LINK_TX_SHIFT 0x02
-#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE 0x00
-#define I40E_AQ_LINK_TX_DRAINED 0x01
-#define I40E_AQ_LINK_TX_FLUSHED 0x03
-#define I40E_AQ_LINK_FORCED_40G 0x10
- u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
- __le16 max_frame_size;
- u8 config;
-#define I40E_AQ_CONFIG_CRC_ENA 0x04
-#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 reserved[5];
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
/* Set event mask command (direct 0x613) */
struct i40e_aqc_set_phy_int_mask {
- u8 reserved[8];
- __le16 event_mask;
-#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
-#define I40E_AQ_EVENT_MEDIA_NA 0x0004
-#define I40E_AQ_EVENT_LINK_FAULT 0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
-#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
- u8 reserved1[6];
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
@@ -1728,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
* Get Link Partner AN advt register (direct 0x0616)
*/
struct i40e_aqc_an_advt_reg {
- __le32 local_an_reg0;
- __le16 local_an_reg1;
- u8 reserved[10];
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
- __le16 lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL 0x01
-#define I40E_AQ_LB_PHY_REMOTE 0x02
-#define I40E_AQ_LB_MAC_LOCAL 0x04
- u8 reserved[14];
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
/* Set PHY Debug command (0x0622) */
struct i40e_aqc_set_phy_debug {
- u8 command_flags;
+ u8 command_flags;
#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
@@ -1757,15 +1757,15 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
- u8 reserved[15];
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
- I40E_AQC_PHY_REG_INTERNAL = 0x1,
- I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
- I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
};
/* NVM Read command (indirect 0x0701)
@@ -1773,40 +1773,40 @@ enum i40e_aq_phy_reg_type {
* NVM Update commands (indirect 0x0703)
*/
struct i40e_aqc_nvm_update {
- u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
- u8 module_pointer;
- __le16 length;
- __le32 offset;
- __le32 addr_high;
- __le32 addr_low;
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
- __le16 cmd_flags;
+ __le16 cmd_flags;
#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
#define ANVM_READ_SINGLE_FEATURE 0
#define ANVM_READ_MULTIPLE_FEATURES 1
- __le16 element_count;
- __le16 element_id; /* Feature/field ID */
- u8 reserved[2];
- __le32 address_high;
- __le32 address_low;
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
/* NVM Config Write (indirect 0x0705) */
struct i40e_aqc_nvm_config_write {
- __le16 cmd_flags;
- __le16 element_count;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
@@ -1831,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field {
* Send to Peer PF command (indirect 0x0803)
*/
struct i40e_aqc_pf_vf_message {
- __le32 id;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
@@ -1870,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
* uses i40e_aq_desc
*/
struct i40e_aqc_alternate_write_done {
- __le16 cmd_flags;
+ __le16 cmd_flags;
#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
- u8 reserved[14];
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
/* Set OEM mode (direct 0x0905) */
struct i40e_aqc_alternate_set_mode {
- __le32 mode;
+ __le32 mode;
#define I40E_AQ_ALTERNATE_MODE_NONE 0
#define I40E_AQ_ALTERNATE_MODE_OEM 1
- u8 reserved[12];
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
@@ -1896,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
/* Lan Queue Overflow Event (direct, 0x1001) */
struct i40e_aqc_lan_overflow {
- __le32 prtdcb_rupto;
- __le32 otx_ctl;
- u8 reserved[8];
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
/* Get LLDP MIB (indirect 0x0A00) */
struct i40e_aqc_lldp_get_mib {
- u8 type;
- u8 reserved1;
-#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
-#define I40E_AQ_LLDP_MIB_LOCAL 0x0
-#define I40E_AQ_LLDP_MIB_REMOTE 0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
-#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
-#define I40E_AQ_LLDP_TX_SHIFT 0x4
-#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
/* TX pause flags use I40E_AQ_LINK_TX_* above */
- __le16 local_len;
- __le16 remote_len;
- u8 reserved2[2];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
@@ -1931,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
* also used for the event (with type in the command field)
*/
struct i40e_aqc_lldp_update_mib {
- u8 command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
-#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
@@ -1945,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
* Delete LLDP TLV (indirect 0x0A04)
*/
struct i40e_aqc_lldp_add_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved1[1];
- __le16 len;
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
/* Update LLDP TLV (indirect 0x0A03) */
struct i40e_aqc_lldp_update_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved;
- __le16 old_len;
- __le16 new_offset;
- __le16 new_len;
- __le32 addr_high;
- __le32 addr_low;
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
@@ -1981,57 +1981,97 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
struct i40e_aqc_lldp_start {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
-/* Apply MIB changes (0x0A07)
- * uses the generic struc as it contains no data
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
*/
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3;
+ __le16 oper_app_prio;
+ u8 reserved4;
+ __le16 tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
- __le16 udp_port;
- u8 reserved0[3];
- u8 protocol_type;
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
- u8 reserved1[10];
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
struct i40e_aqc_add_udp_tunnel_completion {
- __le16 udp_port;
- u8 filter_entry_index;
- u8 multiple_pfs;
-#define I40E_AQC_SINGLE_PF 0x0
-#define I40E_AQC_MULTIPLE_PFS 0x1
- u8 total_filters;
- u8 reserved[11];
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
- u8 reserved[2];
- u8 index; /* 0 to 15 */
- u8 reserved2[13];
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
- __le16 udp_port;
- u8 index; /* 0 to 15 */
- u8 multiple_pfs;
- u8 total_filters_used;
- u8 reserved1[11];
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
@@ -2044,11 +2084,11 @@ struct i40e_aqc_tunnel_key_structure {
u8 key1_len; /* 0 to 15 */
u8 key2_len; /* 0 to 15 */
u8 flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
u8 network_key_index;
#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
@@ -2061,21 +2101,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
/* OEM mode commands (direct 0xFE0x) */
struct i40e_aqc_oem_param_change {
- __le32 param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
-#define I40E_AQ_OEM_PARAM_MAC 2
- __le32 param_value1;
- u8 param_value2[8];
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
struct i40e_aqc_oem_state_change {
- __le32 state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
-#define I40E_AQ_OEM_STATE_LINK_UP 0x1
- u8 reserved[12];
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
@@ -2087,18 +2127,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
/* set test more (0xFF01, internal) */
struct i40e_acq_set_test_mode {
- u8 mode;
-#define I40E_AQ_TEST_PARTIAL 0
-#define I40E_AQ_TEST_FULL 1
-#define I40E_AQ_TEST_NVM 2
- u8 reserved[3];
- u8 command;
-#define I40E_AQ_TEST_OPEN 0
-#define I40E_AQ_TEST_CLOSE 1
-#define I40E_AQ_TEST_INC 2
- u8 reserved2[3];
- __le32 address_high;
- __le32 address_low;
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
@@ -2151,21 +2191,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
- u8 cluster_id;
- u8 table_id;
- __le16 data_size;
- __le32 idx;
- __le32 address_high;
- __le32 address_low;
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
struct i40e_aqc_debug_modify_internals {
- u8 cluster_id;
- u8 cluster_specific_params[7];
- __le32 address_high;
- __le32 address_low;
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 30056b25d94e..3d741ee99a2c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -50,6 +50,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
@@ -549,7 +550,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
i40e_status i40e_init_shared_code(struct i40e_hw *hw)
{
i40e_status status = 0;
- u32 reg;
+ u32 port, ari, func_rid;
i40e_set_mac_type(hw);
@@ -562,18 +563,17 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
hw->phy.get_link_info = true;
- /* Determine port number */
- reg = rd32(hw, I40E_PFGEN_PORTNUM);
- reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
- I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
- hw->port = (u8)reg;
-
- /* Determine the PF number based on the PCI fn */
- reg = rd32(hw, I40E_GLPCI_CAPSUP);
- if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
- hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ /* Determine port number and PF number*/
+ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+ >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ hw->port = (u8)port;
+ ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+ I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ func_rid = rd32(hw, I40E_PF_FUNC_RID);
+ if (ari)
+ hw->pf_id = (u8)(func_rid & 0xff);
else
- hw->pf_id = (u8)hw->bus.func;
+ hw->pf_id = (u8)(func_rid & 0x7);
status = i40e_init_nvm(hw);
return status;
@@ -790,7 +790,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
}
#define I40E_PF_RESET_WAIT_COUNT_A0 200
-#define I40E_PF_RESET_WAIT_COUNT 100
+#define I40E_PF_RESET_WAIT_COUNT 110
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -1420,6 +1420,33 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse)
}
/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_int_mask *cmd =
+ (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_int_mask);
+
+ cmd->event_mask = cpu_to_le16(mask);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2632,6 +2659,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
}
/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add
@@ -3189,6 +3244,26 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_set_pci_config_data - store PCI bus info
* @hw: pointer to hardware structure
* @link_status: the link status word from PCI config space
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 036570d76176..3ce43588592d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -59,7 +59,7 @@ i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
- struct i40e_ieee_ets_config *etscfg;
+ struct i40e_dcb_ets_config *etscfg;
u8 *buf = tlv->tlvinfo;
u16 offset = 0;
u8 priority;
@@ -407,6 +407,166 @@ free_mem:
}
/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add APPs if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+ struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add APPs if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
* i40e_get_dcb_config
* @hw: pointer to the hw struct
*
@@ -415,7 +575,44 @@ free_mem:
i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
{
i40e_status ret = 0;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+
+ /* If Firmware version < v4.33 IEEE only */
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4))
+ goto ieee;
+
+ /* If Firmware version == v4.33 use old CEE struct */
+ if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+ sizeof(cee_v1_cfg), NULL);
+ if (!ret) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+ &hw->local_dcbx_config);
+ }
+ } else {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+ sizeof(cee_cfg), NULL);
+ if (!ret) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ i40e_cee_to_dcb_config(&cee_cfg,
+ &hw->local_dcbx_config);
+ }
+ }
+
+ /* CEE mode not enabled try querying IEEE data */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ goto ieee;
+ else
+ goto out;
+ieee:
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
/* Get Local DCB Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
&hw->local_dcbx_config);
@@ -426,6 +623,10 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = 0;
+
out:
return ret;
}
@@ -439,10 +640,27 @@ out:
i40e_status i40e_init_dcb(struct i40e_hw *hw)
{
i40e_status ret = 0;
+ struct i40e_lldp_variables lldp_cfg;
+ u8 adminstatus = 0;
if (!hw->func_caps.dcb)
return ret;
+ /* Read LLDP NVM area */
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xF;
+
+ /* LLDP agent disabled */
+ if (!adminstatus) {
+ hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+ return ret;
+ }
+
/* Get DCBX status */
ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
if (ret)
@@ -454,6 +672,8 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
case I40E_DCBX_STATUS_IN_PROGRESS:
/* Get current DCBX configuration */
ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
break;
case I40E_DCBX_STATUS_DISABLED:
return ret;
@@ -470,3 +690,33 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
return ret;
}
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
+{
+ i40e_status ret = 0;
+ u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+
+ if (!lldp_cfg)
+ return I40E_ERR_PARAM;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
+ sizeof(struct i40e_lldp_variables),
+ (u8 *)lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 34cf1c30c7ff..e137e3fac8ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -65,6 +65,11 @@
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT 0
+#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT 4
+#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT 15
/* Defines for IEEE TSA types */
#define I40E_IEEE_TSA_STRICT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 00bc0cdb3a03..183dcb63ce98 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -207,7 +207,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
* VSI
**/
static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
- struct i40e_ieee_app_priority_table *app)
+ struct i40e_dcb_app_priority_table *app)
{
struct net_device *dev = vsi->netdev;
struct dcb_app sapp;
@@ -229,7 +229,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
* Delete given APP from all the VSIs for given PF
**/
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
- struct i40e_ieee_app_priority_table *app)
+ struct i40e_dcb_app_priority_table *app)
{
int v, err;
for (v = 0; v < pf->num_alloc_vsi; v++) {
@@ -252,7 +252,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
* Find given APP in the DCB configuration
**/
static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
- struct i40e_ieee_app_priority_table *app)
+ struct i40e_dcb_app_priority_table *app)
{
int i;
@@ -277,7 +277,7 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
struct i40e_dcbx_config *new_cfg)
{
- struct i40e_ieee_app_priority_table app;
+ struct i40e_dcb_app_priority_table app;
struct i40e_dcbx_config *dcbxcfg;
struct i40e_hw *hw = &pf->hw;
int i;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 7067f4b9159c..433a55886ad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -773,7 +773,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
{
struct i40e_tx_desc *txd;
union i40e_rx_desc *rxd;
- struct i40e_ring ring;
+ struct i40e_ring *ring;
struct i40e_vsi *vsi;
int i;
@@ -792,29 +792,32 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
vsi_seid);
return;
}
- if (is_rx_ring)
- ring = *vsi->rx_rings[ring_id];
- else
- ring = *vsi->tx_rings[ring_id];
+
+ ring = kmemdup(is_rx_ring
+ ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id],
+ sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return;
+
if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
- for (i = 0; i < ring.count; i++) {
+ for (i = 0; i < ring->count; i++) {
if (!is_rx_ring) {
- txd = I40E_TX_DESC(&ring, i);
+ txd = I40E_TX_DESC(ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr,
txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(&ring, i);
+ rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
rxd->read.hdr_addr);
} else {
- rxd = I40E_RX_DESC(&ring, i);
+ rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
@@ -823,26 +826,26 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
}
}
} else if (cnt == 3) {
- if (desc_n >= ring.count || desc_n < 0) {
+ if (desc_n >= ring->count || desc_n < 0) {
dev_info(&pf->pdev->dev,
"descriptor %d not found\n", desc_n);
return;
}
if (!is_rx_ring) {
- txd = I40E_TX_DESC(&ring, desc_n);
+ txd = I40E_TX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(&ring, desc_n);
+ rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
rxd->read.pkt_addr, rxd->read.hdr_addr);
} else {
- rxd = I40E_RX_DESC(&ring, desc_n);
+ rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
@@ -852,6 +855,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
+ kfree(ring);
}
/**
@@ -895,90 +899,6 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
}
/**
- * i40e_dbg_dump_stats - handles dump stats write into command datum
- * @pf: the i40e_pf created in command write
- * @stats: the stats structure to be dumped
- **/
-static void i40e_dbg_dump_stats(struct i40e_pf *pf,
- struct i40e_hw_port_stats *stats)
-{
- int i;
-
- dev_info(&pf->pdev->dev, " stats:\n");
- dev_info(&pf->pdev->dev,
- " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n",
- stats->crc_errors, stats->illegal_bytes, stats->error_bytes);
- dev_info(&pf->pdev->dev,
- " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n",
- stats->mac_local_faults, stats->mac_remote_faults,
- stats->rx_length_errors);
- dev_info(&pf->pdev->dev,
- " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n",
- stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx);
- dev_info(&pf->pdev->dev,
- " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n",
- stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127);
- dev_info(&pf->pdev->dev,
- " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n",
- stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023);
- dev_info(&pf->pdev->dev,
- " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n",
- stats->rx_size_big, stats->rx_undersize, stats->rx_jabber);
- dev_info(&pf->pdev->dev,
- " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n",
- stats->rx_fragments, stats->rx_oversize, stats->tx_size_64);
- dev_info(&pf->pdev->dev,
- " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n",
- stats->tx_size_127, stats->tx_size_255, stats->tx_size_511);
- dev_info(&pf->pdev->dev,
- " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n",
- stats->tx_size_1023, stats->tx_size_big,
- stats->mac_short_packet_dropped);
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xon_rx[i],
- i+1, stats->priority_xon_rx[i+1],
- i+2, stats->priority_xon_rx[i+2],
- i+3, stats->priority_xon_rx[i+3]);
- }
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xoff_rx[i],
- i+1, stats->priority_xoff_rx[i+1],
- i+2, stats->priority_xoff_rx[i+2],
- i+3, stats->priority_xoff_rx[i+3]);
- }
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xon_tx[i],
- i+1, stats->priority_xon_tx[i+1],
- i+2, stats->priority_xon_tx[i+2],
- i+3, stats->priority_xon_rx[i+3]);
- }
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xoff_tx[i],
- i+1, stats->priority_xoff_tx[i+1],
- i+2, stats->priority_xoff_tx[i+2],
- i+3, stats->priority_xoff_tx[i+3]);
- }
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xon_2_xoff[i],
- i+1, stats->priority_xon_2_xoff[i+1],
- i+2, stats->priority_xon_2_xoff[i+2],
- i+3, stats->priority_xon_2_xoff[i+3]);
- }
-
- i40e_dbg_dump_eth_stats(pf, &stats->eth);
-}
-
-/**
* i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
* @pf: the i40e_pf created in command write
* @seid: the seid the user put in
@@ -1342,11 +1262,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, "dump desc aq\n");
}
- } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) {
- dev_info(&pf->pdev->dev, "pf stats:\n");
- i40e_dbg_dump_stats(pf, &pf->stats);
- dev_info(&pf->pdev->dev, "pf stats_offsets:\n");
- i40e_dbg_dump_stats(pf, &pf->stats_offsets);
} else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
dev_info(&pf->pdev->dev,
"core reset count: %d\n", pf->corer_count);
@@ -1402,6 +1317,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
bw_data = NULL;
dev_info(&pf->pdev->dev,
+ "port dcbx_mode=%d\n", cfg->dcbx_mode);
+ dev_info(&pf->pdev->dev,
"port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
cfg->etscfg.willing, cfg->etscfg.cbs,
cfg->etscfg.maxtcs);
@@ -1464,8 +1381,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev,
"dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
- dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n");
- dev_info(&pf->pdev->dev, "dump stats\n");
+ dev_info(&pf->pdev->dev, "dump switch\n");
+ dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
dev_info(&pf->pdev->dev, "dump reset stats\n");
dev_info(&pf->pdev->dev, "dump port\n");
dev_info(&pf->pdev->dev,
@@ -1580,7 +1497,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[11],
- "%hx %hx %hx %hx %x %x %x %x %x %x",
+ "%hi %hi %hi %hi %i %i %i %i %i %i",
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
@@ -1628,7 +1545,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[20],
- "%hx %hx %hx %hx %x %x %x %x %x %x %hd",
+ "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1dda467ae1ac..951e8767fc50 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -40,8 +40,9 @@ struct i40e_stats {
.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
+
#define I40E_NETDEV_STAT(_net_stat) \
- I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
+ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
#define I40E_PF_STAT(_name, _stat) \
I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
@@ -264,6 +265,14 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseKR_Full;
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
+ case I40E_DEV_ID_10G_BASE_T:
+ ecmd->supported = SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full;
+ break;
default:
/* all the rest are 10G/1G */
ecmd->supported = SUPPORTED_10000baseT_Full |
@@ -322,9 +331,13 @@ static int i40e_get_settings(struct net_device *netdev,
case I40E_PHY_TYPE_10GBASE_CR1:
case I40E_PHY_TYPE_10GBASE_T:
ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
@@ -335,14 +348,22 @@ static int i40e_get_settings(struct net_device *netdev,
case I40E_PHY_TYPE_1000BASE_KX:
case I40E_PHY_TYPE_1000BASE_T:
ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_SGMII:
@@ -426,6 +447,9 @@ no_valid_phy_type:
case I40E_LINK_SPEED_1GB:
ethtool_cmd_speed_set(ecmd, SPEED_1000);
break;
+ case I40E_LINK_SPEED_100MB:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
default:
break;
}
@@ -528,7 +552,7 @@ static int i40e_set_settings(struct net_device *netdev,
}
/* If autoneg is currently enabled */
if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
- config.abilities = abilities.abilities |
+ config.abilities = abilities.abilities &
~I40E_AQ_PHY_ENABLE_AN;
change = true;
}
@@ -621,11 +645,19 @@ static void i40e_get_pauseparam(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
pause->autoneg =
((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
+ /* PFC enabled so report LFC as off */
+ if (dcbx_cfg->pfc.pfcenable) {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ return;
+ }
+
if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
pause->rx_pause = 1;
} else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
@@ -649,6 +681,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
i40e_status status;
u8 aq_failures;
@@ -670,8 +703,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
- if (hw->fc.current_mode == I40E_FC_PFC) {
- netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
+ if (dcbx_cfg->pfc.pfcenable) {
+ netdev_info(netdev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
return -EOPNOTSUPP;
}
@@ -788,7 +822,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_pf *pf = np->vsi->back;
- int ret_val = 0, len;
+ int ret_val = 0, len, offset;
u8 *eeprom_buff;
u16 i, sectors;
bool last;
@@ -801,19 +835,21 @@ static int i40e_get_eeprom(struct net_device *netdev,
/* check for NVMUpdate access method */
magic = hw->vendor_id | (hw->device_id << 16);
if (eeprom->magic && eeprom->magic != magic) {
+ struct i40e_nvm_access *cmd;
int errno;
/* make sure it is the right magic for NVMUpdate */
if ((eeprom->magic >> 16) != hw->device_id)
return -EINVAL;
- ret_val = i40e_nvmupd_command(hw,
- (struct i40e_nvm_access *)eeprom,
- bytes, &errno);
+ cmd = (struct i40e_nvm_access *)eeprom;
+ ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
if (ret_val)
dev_info(&pf->pdev->dev,
- "NVMUpdate read failed err=%d status=0x%x\n",
- ret_val, hw->aq.asq_last_status);
+ "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
+ ret_val, hw->aq.asq_last_status, errno,
+ (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
+ cmd->offset, cmd->data_size);
return errno;
}
@@ -842,20 +878,29 @@ static int i40e_get_eeprom(struct net_device *netdev,
len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
last = true;
}
- ret_val = i40e_aq_read_nvm(hw, 0x0,
- eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
- len,
+ offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
- if (ret_val) {
+ if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed, invalid offset 0x%x\n",
+ offset);
+ break;
+ } else if (ret_val &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
dev_info(&pf->pdev->dev,
- "read NVM failed err=%d status=0x%x\n",
- ret_val, hw->aq.asq_last_status);
- goto release_nvm;
+ "read NVM failed, access, offset 0x%x\n",
+ offset);
+ break;
+ } else if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed offset %d err=%d status=0x%x\n",
+ offset, ret_val, hw->aq.asq_last_status);
+ break;
}
}
-release_nvm:
i40e_release_nvm(hw);
memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
free_buff:
@@ -883,6 +928,7 @@ static int i40e_set_eeprom(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_pf *pf = np->vsi->back;
+ struct i40e_nvm_access *cmd;
int ret_val = 0;
int errno;
u32 magic;
@@ -900,12 +946,14 @@ static int i40e_set_eeprom(struct net_device *netdev,
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
return -EBUSY;
- ret_val = i40e_nvmupd_command(hw, (struct i40e_nvm_access *)eeprom,
- bytes, &errno);
- if (ret_val)
+ cmd = (struct i40e_nvm_access *)eeprom;
+ ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
+ if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY)
dev_info(&pf->pdev->dev,
- "NVMUpdate write failed err=%d status=0x%x\n",
- ret_val, hw->aq.asq_last_status);
+ "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
+ ret_val, hw->aq.asq_last_status, errno,
+ (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
+ cmd->offset, cmd->data_size);
return errno;
}
@@ -1292,6 +1340,10 @@ static int i40e_get_ts_info(struct net_device *dev,
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ /* only report HW timestamping if PTP is enabled */
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return ethtool_op_get_ts_info(dev, info);
+
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
@@ -1355,6 +1407,9 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
netif_info(pf, hw, netdev, "eeprom test\n");
*data = i40e_diag_eeprom_test(&pf->hw);
+ /* forcebly clear the NVM Update state machine */
+ pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return *data;
}
@@ -1367,7 +1422,10 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
netif_info(pf, hw, netdev, "interrupt test\n");
wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
(I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
usleep_range(1000, 2000);
*data = (swc_old == pf->sw_int_count);
@@ -1551,13 +1609,10 @@ static int i40e_set_coalesce(struct net_device *netdev,
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
} else if (ec->rx_coalesce_usecs == 0) {
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
- i40e_irq_dynamic_disable(vsi, vector);
if (ec->use_adaptive_rx_coalesce)
- netif_info(pf, drv, netdev,
- "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n");
+ netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
} else {
- netif_info(pf, drv, netdev,
- "Invalid value, Rx-usecs range is 0, 8-8160\n");
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
}
@@ -1566,13 +1621,11 @@ static int i40e_set_coalesce(struct net_device *netdev,
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
} else if (ec->tx_coalesce_usecs == 0) {
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- i40e_irq_dynamic_disable(vsi, vector);
if (ec->use_adaptive_tx_coalesce)
- netif_info(pf, drv, netdev,
- "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n");
+ netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
} else {
netif_info(pf, drv, netdev,
- "Invalid value, Tx-usecs range is 0, 8-8160\n");
+ "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 5d01db1d789b..a8b8bd95108d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -343,7 +343,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
**/
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
{
- struct i40e_ieee_app_priority_table app;
+ struct i40e_dcb_app_priority_table app;
struct i40e_hw *hw = &pf->hw;
u8 enabled_tc = 0;
u8 tc, i;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c3a7f4a4b775..a5f2660d552d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -74,6 +74,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
/* required last entry */
{0, }
};
@@ -812,7 +813,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy;
+ struct i40e_ring *p;
u32 rx_page, rx_buf;
+ u64 bytes, packets;
+ unsigned int start;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -836,10 +840,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_buf = 0;
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
- struct i40e_ring *p;
- u64 bytes, packets;
- unsigned int start;
-
/* locate Tx ring */
p = ACCESS_ONCE(vsi->tx_rings[q]);
@@ -2382,6 +2382,35 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
}
/**
+ * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
+ * @ring: The Tx ring to configure
+ *
+ * This enables/disables XPS for a given Tx descriptor ring
+ * based on the TCs enabled for the VSI that ring belongs to.
+ **/
+static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ cpumask_var_t mask;
+
+ if (ring->q_vector && ring->netdev) {
+ /* Single TC mode enable XPS */
+ if (vsi->tc_config.numtc <= 1 &&
+ !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->affinity_mask,
+ ring->queue_index);
+ } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ /* Disable XPS to allow selection based on TC */
+ bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
+ netif_set_xps_queue(ring->netdev, mask,
+ ring->queue_index);
+ free_cpumask_var(mask);
+ }
+ }
+}
+
+/**
* i40e_configure_tx_ring - Configure a transmit ring context and rest
* @ring: The Tx ring to configure
*
@@ -2404,13 +2433,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
ring->atr_sample_rate = 0;
}
- /* initialize XPS */
- if (ring->q_vector && ring->netdev &&
- vsi->tc_config.numtc <= 1 &&
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
- netif_set_xps_queue(ring->netdev,
- &ring->q_vector->affinity_mask,
- ring->queue_index);
+ /* configure XPS */
+ i40e_config_xps_tx_ring(ring);
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(tx_ctx));
@@ -2462,10 +2486,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/* Now associate this queue with this PCI function */
- if (vsi->type == I40E_VSI_VMDQ2)
+ if (vsi->type == I40E_VSI_VMDQ2) {
qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
- else
+ qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK;
+ } else {
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ }
+
qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
@@ -3440,7 +3468,7 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
break;
- udelay(10);
+ usleep_range(10, 20);
}
if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
@@ -3466,7 +3494,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* warn the TX unit of coming changes */
i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
if (!enable)
- udelay(10);
+ usleep_range(10, 20);
for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
@@ -3488,6 +3516,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+ /* No waiting for the Tx queue to disable */
+ if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
+ continue;
/* wait for the change to finish */
ret = i40e_pf_txq_wait(pf, pf_q, enable);
@@ -3526,7 +3557,7 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
break;
- udelay(10);
+ usleep_range(10, 20);
}
if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
@@ -3855,6 +3886,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (test_bit(__I40E_DOWN, &vsi->state))
return;
+ /* No need to disable FCoE VSI when Tx suspended */
+ if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
+ vsi->type == I40E_VSI_FCOE) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "%s: VSI seid %d skipping FCoE VSI disable\n",
+ __func__, vsi->seid);
+ return;
+ }
+
set_bit(__I40E_NEEDS_RESTART, &vsi->state);
if (vsi->netdev && netif_running(vsi->netdev)) {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
@@ -3907,6 +3947,57 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
}
}
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
+ * @vsi: the VSI being configured
+ *
+ * This function waits for the given VSI's Tx queues to be disabled.
+ **/
+static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q, ret;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ /* Check and wait for the disable status of the queue */
+ ret = i40e_pf_txq_wait(pf, pf_q, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: VSI seid %d Tx ring %d disable timeout\n",
+ __func__, vsi->seid, pf_q);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
+ * @pf: the PF
+ *
+ * This function waits for the Tx queues to be in disabled state for all the
+ * VSIs that are managed by this PF.
+ **/
+static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
+{
+ int v, ret = 0;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ /* No need to wait for FCoE VSI queues */
+ if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
+ ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+#endif
/**
* i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
* @dcbcfg: the corresponding DCBx configuration structure
@@ -4378,6 +4469,31 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
}
/**
+ * i40e_resume_port_tx - Resume port Tx
+ * @pf: PF struct
+ *
+ * Resume a port's Tx and issue a PF reset in case of failure to
+ * resume.
+ **/
+static int i40e_resume_port_tx(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_aq_resume_port_tx(hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "AQ command Resume Port Tx failed = %d\n",
+ pf->hw.aq.asq_last_status);
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ }
+
+ return ret;
+}
+
+/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -4413,6 +4529,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* Enable DCB tagging only when more than one TC */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
+ dev_dbg(&pf->pdev->dev,
+ "DCBX offload is supported for this PF.\n");
}
} else {
dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
@@ -4449,6 +4567,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_1GB:
strlcpy(speed, "1000 Mbps", SPEED_SIZE);
break;
+ case I40E_LINK_SPEED_100MB:
+ strncpy(speed, "100 Mbps", SPEED_SIZE);
+ break;
default:
break;
}
@@ -4479,12 +4600,8 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
static int i40e_up_complete(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- u8 set_fc_aq_fail = 0;
int err;
- /* force flow control off */
- i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
-
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
i40e_vsi_configure_msix(vsi);
else
@@ -4753,9 +4870,11 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
goto err_set_queues;
} else if (vsi->type == I40E_VSI_FDIR) {
- snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
- dev_driver_string(&pf->pdev->dev));
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
+
} else {
err = -EINVAL;
goto err_setup_rx;
@@ -4995,6 +5114,8 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
+ dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
+ need_reconfig);
return need_reconfig;
}
@@ -5022,11 +5143,16 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ dev_dbg(&pf->pdev->dev,
+ "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
return ret;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ dev_dbg(&pf->pdev->dev,
+ "%s: LLDP event mib type %s\n", __func__,
+ type ? "remote" : "local");
if (type == I40E_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
@@ -5035,12 +5161,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
goto exit;
}
- /* Convert/store the DCBX data from LLDPDU temporarily */
memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
- ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
+ /* Store the old configuration */
+ tmp_dcbx_cfg = *dcbx_cfg;
+
+ /* Get updated DCBX data from firmware */
+ ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
- /* Error in LLDPDU parsing return */
- dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
+ dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
goto exit;
}
@@ -5050,12 +5178,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
goto exit;
}
- need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
-
- i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
+ need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
- /* Overwrite the new configuration */
- *dcbx_cfg = tmp_dcbx_cfg;
+ i40e_dcbnl_flush_apps(pf, dcbx_cfg);
if (!need_reconfig)
goto exit;
@@ -5066,13 +5191,24 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
/* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf);
/* Changes in configuration update VEB/VSI */
i40e_dcb_reconfigure(pf);
- i40e_pf_unquiesce_all_vsi(pf);
+ ret = i40e_resume_port_tx(pf);
+
+ clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto exit;
+
+ /* Wait for the PF's Tx queues to be disabled */
+ ret = i40e_pf_wait_txq_disabled(pf);
+ if (!ret)
+ i40e_pf_unquiesce_all_vsi(pf);
exit:
return ret;
}
@@ -5212,6 +5348,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
int flush_wait_retry = 50;
int reg;
+ if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
+ return;
+
if (time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
@@ -5273,6 +5412,9 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->state))
return;
+ if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
+ return;
+
if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
(i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
(i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
@@ -5310,8 +5452,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
break;
case I40E_VSI_SRIOV:
- break;
-
case I40E_VSI_VMDQ2:
case I40E_VSI_CTRL:
case I40E_VSI_MIRROR:
@@ -5353,14 +5493,21 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
static void i40e_link_event(struct i40e_pf *pf)
{
bool new_link, old_link;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ /* set this to force the get_link_status call to refresh state */
+ pf->hw.phy.get_link_info = true;
- new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+ new_link = i40e_get_link_status(&pf->hw);
- if (new_link == old_link)
+ if (new_link == old_link &&
+ (test_bit(__I40E_DOWN, &vsi->state) ||
+ new_link == netif_carrier_ok(vsi->netdev)))
return;
- if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
- i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -5368,7 +5515,7 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
else
- i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
+ i40e_vsi_link_event(vsi, new_link);
if (pf->vf)
i40e_vc_notify_link_state(pf);
@@ -5418,11 +5565,17 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
(I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
} else {
u16 vec = vsi->base_vector - 1;
u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
for (i = 0; i < vsi->num_q_vectors; i++, vec++)
wr32(&vsi->back->hw,
I40E_PFINT_DYN_CTLN(vec), val);
@@ -5433,7 +5586,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
}
/**
- * i40e_watchdog_subtask - Check and bring link up
+ * i40e_watchdog_subtask - periodic checks not using event driven response
* @pf: board private structure
**/
static void i40e_watchdog_subtask(struct i40e_pf *pf)
@@ -5445,6 +5598,15 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
+ /* make sure we don't do these things too often */
+ if (time_before(jiffies, (pf->service_timer_previous +
+ pf->service_timer_period)))
+ return;
+ pf->service_timer_previous = jiffies;
+
+ i40e_check_hang_subtask(pf);
+ i40e_link_event(pf);
+
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
@@ -5525,33 +5687,20 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
memcpy(&pf->hw.phy.link_info_old, hw_link_info,
sizeof(pf->hw.phy.link_info_old));
+ /* Do a new status request to re-enable LSE reporting
+ * and load new status information into the hw struct
+ * This completely ignores any state information
+ * in the ARQ event info, instead choosing to always
+ * issue the AQ update link status command.
+ */
+ i40e_link_event(pf);
+
/* check for unqualified module, if link is down */
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)))
dev_err(&pf->pdev->dev,
"The driver failed to link because an unqualified module was detected.\n");
-
- /* update link status */
- hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
- hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
- hw_link_info->link_info = status->link_info;
- hw_link_info->an_info = status->an_info;
- hw_link_info->ext_info = status->ext_info;
- hw_link_info->lse_enable =
- le16_to_cpu(status->command_flags) &
- I40E_AQ_LSE_ENABLE;
-
- /* process the event */
- i40e_link_event(pf);
-
- /* Do a new status request to re-enable LSE reporting
- * and load new status information into the hw struct,
- * then see if the status changed while processing the
- * initial event.
- */
- i40e_update_link_info(&pf->hw, true);
- i40e_link_event(pf);
}
/**
@@ -5607,13 +5756,12 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
if (oldval != val)
wr32(&pf->hw, pf->hw.aq.asq.len, val);
- event.msg_size = I40E_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ event.buf_len = I40E_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
return;
do {
- event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
break;
@@ -5634,7 +5782,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
le32_to_cpu(event.desc.cookie_high),
le32_to_cpu(event.desc.cookie_low),
event.msg_buf,
- event.msg_size);
+ event.msg_len);
break;
case i40e_aqc_opc_lldp_update_mib:
dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
@@ -5740,6 +5888,9 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
+ /* Enable LB mode for the main VSI now that it is on a VEB */
+ i40e_enable_pf_switch_lb(pf);
+
/* create the remaining VSIs attached to this VEB */
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
@@ -5967,6 +6118,7 @@ static void i40e_send_version(struct i40e_pf *pf)
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
{
struct i40e_hw *hw = &pf->hw;
+ u8 set_fc_aq_fail = 0;
i40e_status ret;
u32 v;
@@ -6038,6 +6190,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (ret)
goto end_core_reset;
+ /* driver is only interested in link up/down and module qualification
+ * reports from firmware
+ */
+ ret = i40e_aq_set_phy_int_mask(&pf->hw,
+ I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+
+ /* make sure our flow control settings are restored */
+ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+ if (ret)
+ dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
* need to rebuild the switch model in the HW.
@@ -6092,6 +6258,13 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
}
+ msleep(75);
+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
@@ -6149,12 +6322,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
I40E_GL_MDET_TX_PF_NUM_SHIFT;
- u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+ u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
I40E_GL_MDET_TX_VF_NUM_SHIFT;
u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
I40E_GL_MDET_TX_EVENT_SHIFT;
- u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
- I40E_GL_MDET_TX_QUEUE_SHIFT;
+ u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+ I40E_GL_MDET_TX_QUEUE_SHIFT) -
+ pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
event, queue, pf_num, vf_num);
@@ -6167,8 +6341,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_RX_FUNCTION_SHIFT;
u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
I40E_GL_MDET_RX_EVENT_SHIFT;
- u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
- I40E_GL_MDET_RX_QUEUE_SHIFT;
+ u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+ I40E_GL_MDET_RX_QUEUE_SHIFT) -
+ pf->hw.func_caps.base_queue;
if (netif_msg_rx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
@@ -6298,7 +6473,6 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
- i40e_check_hang_subtask(pf);
i40e_sync_filters_subtask(pf);
#ifdef CONFIG_I40E_VXLAN
i40e_sync_vxlan_filters_subtask(pf);
@@ -6676,6 +6850,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
{
i40e_status err = 0;
struct i40e_hw *hw = &pf->hw;
+ int other_vecs = 0;
int v_budget, i;
int vec;
@@ -6701,10 +6876,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
*/
pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
- v_budget = 1 + pf->num_lan_msix;
- v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
+ other_vecs = 1;
+ other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
- v_budget++;
+ other_vecs++;
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -6714,7 +6889,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
#endif
/* Scale down if necessary, and the rings will share vectors */
- v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
+ pf->num_lan_msix = min_t(int, pf->num_lan_msix,
+ (hw->func_caps.num_msix_vectors - other_vecs));
+ v_budget = pf->num_lan_msix + other_vecs;
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -6964,20 +7141,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
**/
static int i40e_config_rss(struct i40e_pf *pf)
{
- /* Set of random keys generated using kernel random number generator */
- static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
- 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
- 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+ u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
struct i40e_hw *hw = &pf->hw;
u32 lut = 0;
int i, j;
u64 hena;
u32 reg_val;
- /* Fill out hash function seed */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
@@ -7341,7 +7514,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
#endif
static int i40e_get_phys_port_id(struct net_device *netdev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -7356,18 +7529,18 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
return 0;
}
-#ifdef HAVE_FDB_OPS
-#ifdef USE_CONST_DEV_UC_CHAR
+/**
+ * i40e_ndo_fdb_add - add an entry to the hardware database
+ * @ndm: the input from the stack
+ * @tb: pointer to array of nladdr (unused)
+ * @dev: the net device pointer
+ * @addr: the MAC address entry being added
+ * @flags: instructions from stack about fdb operation
+ */
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr,
- u16 flags)
-#else
-static int i40e_ndo_fdb_add(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr,
+ const unsigned char *addr, u16 vid,
u16 flags)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_pf *pf = np->vsi->back;
@@ -7376,6 +7549,11 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm,
if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
return -EOPNOTSUPP;
+ if (vid) {
+ pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
+ return -EINVAL;
+ }
+
/* Hardware does not support aging addresses so if a
* ndm_state is given only allow permanent addresses
*/
@@ -7398,55 +7576,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm,
return err;
}
-#ifndef USE_DEFAULT_FDB_DEL_DUMP
-#ifdef USE_CONST_DEV_UC_CHAR
-static int i40e_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- const unsigned char *addr)
-#else
-static int i40e_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr)
-#endif
-{
- struct i40e_netdev_priv *np = netdev_priv(dev);
- struct i40e_pf *pf = np->vsi->back;
- int err = -EOPNOTSUPP;
-
- if (ndm->ndm_state & NUD_PERMANENT) {
- netdev_info(dev, "FDB only supports static addresses\n");
- return -EINVAL;
- }
-
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int i40e_ndo_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev,
- int idx)
-{
- struct i40e_netdev_priv *np = netdev_priv(dev);
- struct i40e_pf *pf = np->vsi->back;
-
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
- idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx);
-
- return idx;
-}
-
-#endif /* USE_DEFAULT_FDB_DEL_DUMP */
-#endif /* HAVE_FDB_OPS */
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -7480,13 +7609,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
-#ifdef HAVE_FDB_OPS
.ndo_fdb_add = i40e_ndo_fdb_add,
-#ifndef USE_DEFAULT_FDB_DEL_DUMP
- .ndo_fdb_del = i40e_ndo_fdb_del,
- .ndo_fdb_dump = i40e_ndo_fdb_dump,
-#endif
-#endif
};
/**
@@ -7682,6 +7805,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = 0x1; /* regular data port */
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -7931,8 +8058,8 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
- "failed to get queue tracking for VSI %d, err=%d\n",
- vsi->seid, vsi->base_vector);
+ "failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ vsi->num_q_vectors, vsi->seid, vsi->base_vector);
i40e_vsi_free_q_vectors(vsi);
ret = -ENOENT;
goto vector_setup_out;
@@ -7968,8 +8095,9 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
if (ret < 0) {
- dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
- vsi->seid, ret);
+ dev_info(&pf->pdev->dev,
+ "failed to get tracking for %d queues for VSI %d err=%d\n",
+ vsi->alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -8066,7 +8194,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
-
+ if (veb) {
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
+ __func__);
+ return NULL;
+ }
+ i40e_enable_pf_switch_lb(pf);
+ }
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
veb = pf->veb[i];
@@ -8098,8 +8234,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
vsi->idx);
if (ret < 0) {
- dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
- vsi->seid, ret);
+ dev_info(&pf->pdev->dev,
+ "failed to get tracking for %d queues for VSI %d err=%d\n",
+ vsi->alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -8206,6 +8343,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
veb->bw_max_quanta = ets_data.tc_bw_max;
veb->is_abs_credits = bw_data.absolute_credits_enable;
+ veb->enabled_tc = ets_data.tc_valid_bits;
tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
(le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -8719,6 +8857,14 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
+ /* fill in link information and enable LSE reporting */
+ i40e_update_link_info(&pf->hw, true);
+ i40e_link_event(pf);
+
+ /* Initialize user-specific link properties */
+ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+ I40E_AQ_AN_COMPLETED) ? true : false);
+
i40e_ptp_init(pf);
return ret;
@@ -8987,6 +9133,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.func = PCI_FUNC(pdev->devfn);
pf->instance = pfs_found;
+ if (debug != -1) {
+ pf->msg_enable = pf->hw.debug_mask;
+ pf->msg_enable = debug;
+ }
+
/* do a special CORER for clearing PXE mode once at init */
if (hw->revision_id == 0 &&
(rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
@@ -9012,9 +9163,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
+
snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
- "%s-pf%d:misc",
- dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
+ "%s-%s:misc",
+ dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
err = i40e_init_shared_code(hw);
if (err) {
@@ -9158,6 +9310,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ /* driver is only interested in link up/down and module qualification
+ * reports from firmware
+ */
+ err = i40e_aq_set_phy_int_mask(&pf->hw,
+ I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+
+ msleep(75);
+ err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 25c4f9a3011f..3e70f2e45a47 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -61,7 +61,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
} else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
ret_code = I40E_ERR_NVM_BLANK_MODE;
- hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
}
return ret_code;
@@ -80,46 +80,45 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
{
i40e_status ret_code = 0;
u64 gtime, timeout;
- u64 time = 0;
+ u64 time_left = 0;
if (hw->nvm.blank_nvm_mode)
goto i40e_i40e_acquire_nvm_exit;
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
- 0, &time, NULL);
+ 0, &time_left, NULL);
/* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
/* Store the timeout */
- hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
- if (ret_code) {
- /* Set the polling timeout */
- if (time > I40E_MAX_NVM_TIMEOUT)
- timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
- + gtime;
- else
- timeout = hw->nvm.hw_semaphore_timeout;
+ if (ret_code)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+ access, time_left, ret_code, hw->aq.asq_last_status);
+
+ if (ret_code && time_left) {
/* Poll until the current NVM owner timeouts */
- while (gtime < timeout) {
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+ while ((gtime < timeout) && time_left) {
usleep_range(10000, 20000);
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
ret_code = i40e_aq_request_resource(hw,
I40E_NVM_RESOURCE_ID,
- access, 0, &time,
+ access, 0, &time_left,
NULL);
if (!ret_code) {
hw->nvm.hw_semaphore_timeout =
- I40E_MS_TO_GTIME(time) + gtime;
+ I40E_MS_TO_GTIME(time_left) + gtime;
break;
}
- gtime = rd32(hw, I40E_GLVFGEN_TIMER);
}
if (ret_code) {
hw->nvm.hw_semaphore_timeout = 0;
- hw->nvm.hw_semaphore_wait =
- I40E_MS_TO_GTIME(time) + gtime;
- hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
- time);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+ time_left, ret_code, hw->aq.asq_last_status);
}
}
@@ -160,7 +159,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
udelay(5);
}
if (ret_code == I40E_ERR_TIMEOUT)
- hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
+ i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
return ret_code;
}
@@ -179,7 +178,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u32 sr_reg;
if (offset >= hw->nvm.sr_size) {
- hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ offset, hw->nvm.sr_size);
ret_code = I40E_ERR_PARAM;
goto read_nvm_exit;
}
@@ -202,8 +203,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
}
if (ret_code)
- hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
- offset);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
read_nvm_exit:
return ret_code;
@@ -263,14 +265,20 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
* Firmware will check the module-based model.
*/
if ((offset + words) > hw->nvm.sr_size)
- hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
- hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
- hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
else
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
@@ -438,6 +446,22 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+static char *i40e_nvm_update_state_str[] = {
+ "I40E_NVMUPD_INVALID",
+ "I40E_NVMUPD_READ_CON",
+ "I40E_NVMUPD_READ_SNT",
+ "I40E_NVMUPD_READ_LCB",
+ "I40E_NVMUPD_READ_SA",
+ "I40E_NVMUPD_WRITE_ERA",
+ "I40E_NVMUPD_WRITE_CON",
+ "I40E_NVMUPD_WRITE_SNT",
+ "I40E_NVMUPD_WRITE_LCB",
+ "I40E_NVMUPD_WRITE_SA",
+ "I40E_NVMUPD_CSUM_CON",
+ "I40E_NVMUPD_CSUM_SA",
+ "I40E_NVMUPD_CSUM_LCB",
+};
+
/**
* i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
@@ -471,6 +495,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
default:
/* invalid state, should never happen */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
@@ -501,7 +527,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_READ_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
i40e_release_nvm(hw);
@@ -511,17 +538,22 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_READ_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
- hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
}
break;
case I40E_NVMUPD_WRITE_ERA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
if (status)
@@ -534,7 +566,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_WRITE_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
if (status)
@@ -547,22 +580,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_WRITE_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
}
break;
case I40E_NVMUPD_CSUM_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
*errno = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
i40e_release_nvm(hw);
} else {
@@ -572,6 +611,9 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
break;
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in init state\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_ERR_NVM;
*errno = -ESRCH;
break;
@@ -611,6 +653,9 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
break;
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in reading state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
@@ -644,33 +689,38 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
case I40E_NVMUPD_WRITE_LCB:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
- if (!status) {
+ if (!status)
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- }
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_CSUM_CON:
status = i40e_update_nvm_checksum(hw);
- if (status)
+ if (status) {
*errno = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
break;
case I40E_NVMUPD_CSUM_LCB:
status = i40e_update_nvm_checksum(hw);
- if (status) {
+ if (status)
*errno = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
- } else {
+ else
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- }
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in writing state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
@@ -702,8 +752,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
/* limits on data size */
if ((cmd->data_size < 1) ||
(cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
- hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n",
- cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command data_size %d\n",
+ cmd->data_size);
*errno = -EFAULT;
return I40E_NVMUPD_INVALID;
}
@@ -755,12 +806,16 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
}
break;
}
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->aq.nvm_release_on_done);
if (upd_cmd == I40E_NVMUPD_INVALID) {
*errno = -EFAULT;
- hw_dbg(hw,
- "i40e_nvmupd_validate_command returns %d errno: %d\n",
- upd_cmd, *errno);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *errno);
}
return upd_cmd;
}
@@ -785,14 +840,18 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
- hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
bytes, last, NULL);
- hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status);
- if (status)
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
@@ -816,13 +875,17 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
- hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
last, NULL);
- hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status);
- if (status)
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
@@ -847,13 +910,18 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
- hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
+
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last, NULL);
- hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status);
- if (status)
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 0988b5c1fe87..2fb4306597e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -84,6 +84,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
bool atomic_reset);
+i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
@@ -173,6 +175,9 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
u8 *filter_index,
@@ -228,6 +233,10 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
/* i40e_common */
i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 537b6216971d..6d1ec926aa37 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -382,11 +382,17 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
incval = I40E_PTP_1GB_INCVAL;
break;
case I40E_LINK_SPEED_100MB:
- dev_warn(&pf->pdev->dev,
- "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n",
- __func__);
+ {
+ static int warn_once;
+
+ if (!warn_once) {
+ dev_warn(&pf->pdev->dev,
+ "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
+ warn_once++;
+ }
incval = 0;
break;
+ }
case I40E_LINK_SPEED_40GB:
default:
incval = I40E_PTP_40GB_INCVAL;
@@ -418,6 +424,9 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config = &pf->tstamp_config;
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return -EOPNOTSUPP;
+
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
@@ -438,22 +447,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
struct hwtstamp_config *config)
{
struct i40e_hw *hw = &pf->hw;
- u32 pf_id, tsyntype, regval;
+ u32 tsyntype, regval;
/* Reserved for future extensions. */
if (config->flags)
return -EINVAL;
- /* Confirm that 1588 is supported on this PF. */
- pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
- I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
- if (hw->pf_id != pf_id) {
- dev_err(&pf->pdev->dev,
- "PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n",
- hw->pf_id, hw->port, pf_id);
- return -EPERM;
- }
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
pf->ptp_tx = false;
@@ -556,6 +555,9 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return -EOPNOTSUPP;
+
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -625,8 +627,22 @@ void i40e_ptp_init(struct i40e_pf *pf)
{
struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
struct i40e_hw *hw = &pf->hw;
+ u32 pf_id;
long err;
+ /* Only one PF is assigned to control 1588 logic per port. Do not
+ * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID
+ */
+ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+ I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ if (hw->pf_id != pf_id) {
+ pf->flags &= ~I40E_FLAG_PTP;
+ dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n",
+ __func__,
+ netdev->name);
+ return;
+ }
+
/* we have to initialize the lock first, since we can't control
* when the user will enter the PHC device entry points
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3195d82e4942..04b441460bbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2399,12 +2399,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* hardware can't handle really short frames, hardware padding works
* beyond this point
*/
- if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
- if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
- return NETDEV_TX_OK;
- skb->len = I40E_MIN_TX_LEN;
- skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
- }
+ if (skb_put_padto(skb, I40E_MIN_TX_LEN))
+ return NETDEV_TX_OK;
return i40e_xmit_frame_ring(skb, tx_ring);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d7a625a6a14f..e60d3accb2e2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -30,10 +30,7 @@
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
-#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
-#define I40E_MAX_IRATE 0x03F
-#define I40E_MIN_IRATE 0x001
-#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
#define I40E_ITR_20K 0x0019
#define I40E_ITR_8K 0x003E
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index ce04d9093db6..c1f2eb963357 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -43,6 +43,7 @@
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
@@ -260,8 +261,7 @@ enum i40e_aq_resource_access_type {
};
struct i40e_nvm_info {
- u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
- u64 hw_semaphore_wait; /* - || - */
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
u32 timeout; /* [ms] */
u16 sr_size; /* Shadow RAM size in words */
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
@@ -380,9 +380,18 @@ struct i40e_fc_info {
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DCBX_MAX_APPS 32
#define I40E_LLDPDU_SIZE 1500
-
-/* IEEE 802.1Qaz ETS Configuration data */
-struct i40e_ieee_ets_config {
+#define I40E_TLV_STATUS_OPER 0x1
+#define I40E_TLV_STATUS_SYNC 0x2
+#define I40E_TLV_STATUS_ERR 0x4
+#define I40E_CEE_OPER_MAX_APPS 3
+#define I40E_APP_PROTOID_FCOE 0x8906
+#define I40E_APP_PROTOID_ISCSI 0x0cbc
+#define I40E_APP_PROTOID_FIP 0x8914
+#define I40E_APP_SEL_ETHTYPE 0x1
+#define I40E_APP_SEL_TCPIP 0x2
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
u8 willing;
u8 cbs;
u8 maxtcs;
@@ -391,34 +400,30 @@ struct i40e_ieee_ets_config {
u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
};
-/* IEEE 802.1Qaz ETS Recommendation data */
-struct i40e_ieee_ets_recommend {
- u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
- u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
- u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* IEEE 802.1Qaz PFC Configuration data */
-struct i40e_ieee_pfc_config {
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
u8 willing;
u8 mbc;
u8 pfccap;
u8 pfcenable;
};
-/* IEEE 802.1Qaz Application Priority data */
-struct i40e_ieee_app_priority_table {
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
u8 priority;
u8 selector;
u16 protocolid;
};
struct i40e_dcbx_config {
+ u8 dcbx_mode;
+#define I40E_DCBX_MODE_CEE 0x1
+#define I40E_DCBX_MODE_IEEE 0x2
u32 numapps;
- struct i40e_ieee_ets_config etscfg;
- struct i40e_ieee_ets_recommend etsrec;
- struct i40e_ieee_pfc_config pfc;
- struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+ struct i40e_dcb_ets_config etscfg;
+ struct i40e_dcb_ets_config etsrec;
+ struct i40e_dcb_pfc_config pfc;
+ struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
};
/* Port hardware description */
@@ -476,6 +481,11 @@ struct i40e_hw {
u32 debug_mask;
};
+static inline bool i40e_is_vf(struct i40e_hw *hw)
+{
+ return hw->mac.type == I40E_MAC_VF;
+}
+
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
@@ -1371,6 +1381,18 @@ enum i40e_reset_type {
I40E_RESET_EMPR = 3,
};
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0xD
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 70951d2edcad..61dd1b187624 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -79,6 +79,7 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_VIRTCHNL_OP_GET_STATS,
I40E_VIRTCHNL_OP_FCOE,
+ I40E_VIRTCHNL_OP_CONFIG_RSS,
/* PF sends status change events to vfs using
* the following op.
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4eeed267e4b7..5bae89550657 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -674,7 +674,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
* that the requested op was completed
* successfully
*/
- udelay(10);
+ usleep_range(10, 20);
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
rsd = true;
@@ -707,7 +707,6 @@ complete_reset:
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw);
}
-#ifdef CONFIG_PCI_IOV
/**
* i40e_enable_pf_switch_lb
@@ -715,7 +714,7 @@ complete_reset:
*
* enable switch loop back or die - no point in a return value
**/
-static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
@@ -742,7 +741,6 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
__func__, vsi->back->hw.aq.asq_last_status);
}
}
-#endif
/**
* i40e_disable_pf_switch_lb
@@ -1869,6 +1867,12 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
return 0;
+ /* re-enable vflr interrupt cause */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+
clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
@@ -1885,12 +1889,6 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
}
}
- /* re-enable vflr interrupt cause */
- reg = rd32(hw, I40E_PFINT_ICR0_ENA);
- reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
- i40e_flush(hw);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 0adc61e1052d..9452f5247cff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -126,5 +126,6 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+void i40e_enable_pf_switch_lb(struct i40e_pf *pf);
#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index f206be917842..c1d25f8c1abc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -49,7 +49,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
static void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
- if (hw->mac.type == I40E_MAC_VF) {
+ if (i40e_is_vf(hw)) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
hw->aq.asq.len = I40E_VF_ATQLEN1;
@@ -801,7 +801,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
- u32 delay_len = 10;
do {
/* AQ designers suggest use of head for better
@@ -809,9 +808,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
*/
if (i40evf_asq_done(hw))
break;
- /* ugh! delay while spin_lock */
- udelay(delay_len);
- total_delay += delay_len;
+ usleep_range(1000, 2000);
+ total_delay++;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -838,9 +836,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
- if (i40e_is_nvm_update_op(desc))
- hw->aq.nvm_busy = true;
-
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
@@ -907,9 +902,6 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQRX: Queue is empty.\n");
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
@@ -931,13 +923,10 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
- e->msg_size = min(datalen, e->msg_size);
- if (e->msg_buf != NULL && (e->msg_size != 0))
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
- e->msg_size);
-
- if (i40e_is_nvm_update_op(&e->desc))
- hw->aq.nvm_busy = false;
+ e->msg_len);
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 91a5c5bd80f3..6c31bf22c2c3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -28,6 +28,7 @@
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
+#include "i40e_status.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
@@ -76,7 +77,8 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
struct i40e_aq_desc desc;
- u16 msg_size;
+ u16 msg_len;
+ u16 buf_len;
u8 *msg_buf;
};
@@ -93,7 +95,6 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_busy;
bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
@@ -108,7 +109,7 @@ struct i40e_adminq_info {
* i40e_aq_rc_to_posix - convert errors to user-land codes
* aq_rc: AdminQ error code to convert
**/
-static inline int i40e_aq_rc_to_posix(u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc)
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+ return -ERANGE;
return aq_to_posix[aq_rc];
}
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index e656ea7a7920..ff1b16370da9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -33,8 +33,8 @@
* This file needs to comply with the Linux Kernel coding style.
*/
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
#define I40E_FW_API_VERSION_A0_MINOR 0x0000
struct i40e_aq_desc {
@@ -67,216 +67,216 @@ struct i40e_aq_desc {
*/
/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
};
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
- i40e_aqc_opc_get_version = 0x0001,
- i40e_aqc_opc_driver_version = 0x0002,
- i40e_aqc_opc_queue_shutdown = 0x0003,
- i40e_aqc_opc_set_pf_context = 0x0004,
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
- i40e_aqc_opc_request_resource = 0x0008,
- i40e_aqc_opc_release_resource = 0x0009,
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
- i40e_aqc_opc_list_func_capabilities = 0x000A,
- i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
- i40e_aqc_opc_mac_address_read = 0x0107,
- i40e_aqc_opc_mac_address_write = 0x0108,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
/* PXE */
- i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
/* internal switch commands */
- i40e_aqc_opc_get_switch_config = 0x0200,
- i40e_aqc_opc_add_statistics = 0x0201,
- i40e_aqc_opc_remove_statistics = 0x0202,
- i40e_aqc_opc_set_port_parameters = 0x0203,
- i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
-
- i40e_aqc_opc_add_vsi = 0x0210,
- i40e_aqc_opc_update_vsi_parameters = 0x0211,
- i40e_aqc_opc_get_vsi_parameters = 0x0212,
-
- i40e_aqc_opc_add_pv = 0x0220,
- i40e_aqc_opc_update_pv_parameters = 0x0221,
- i40e_aqc_opc_get_pv_parameters = 0x0222,
-
- i40e_aqc_opc_add_veb = 0x0230,
- i40e_aqc_opc_update_veb_parameters = 0x0231,
- i40e_aqc_opc_get_veb_parameters = 0x0232,
-
- i40e_aqc_opc_delete_element = 0x0243,
-
- i40e_aqc_opc_add_macvlan = 0x0250,
- i40e_aqc_opc_remove_macvlan = 0x0251,
- i40e_aqc_opc_add_vlan = 0x0252,
- i40e_aqc_opc_remove_vlan = 0x0253,
- i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
- i40e_aqc_opc_add_tag = 0x0255,
- i40e_aqc_opc_remove_tag = 0x0256,
- i40e_aqc_opc_add_multicast_etag = 0x0257,
- i40e_aqc_opc_remove_multicast_etag = 0x0258,
- i40e_aqc_opc_update_tag = 0x0259,
- i40e_aqc_opc_add_control_packet_filter = 0x025A,
- i40e_aqc_opc_remove_control_packet_filter = 0x025B,
- i40e_aqc_opc_add_cloud_filters = 0x025C,
- i40e_aqc_opc_remove_cloud_filters = 0x025D,
-
- i40e_aqc_opc_add_mirror_rule = 0x0260,
- i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
/* DCB commands */
- i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
- i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
/* TX scheduler */
- i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
- i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
- i40e_aqc_opc_query_vsi_bw_config = 0x0408,
- i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
- i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
-
- i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
- i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
- i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
- i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
- i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
- i40e_aqc_opc_query_port_ets_config = 0x0419,
- i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
- i40e_aqc_opc_suspend_port_tx = 0x041B,
- i40e_aqc_opc_resume_port_tx = 0x041C,
- i40e_aqc_opc_configure_partition_bw = 0x041D,
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
- i40e_aqc_opc_get_phy_abilities = 0x0600,
- i40e_aqc_opc_set_phy_config = 0x0601,
- i40e_aqc_opc_set_mac_config = 0x0603,
- i40e_aqc_opc_set_link_restart_an = 0x0605,
- i40e_aqc_opc_get_link_status = 0x0607,
- i40e_aqc_opc_set_phy_int_mask = 0x0613,
- i40e_aqc_opc_get_local_advt_reg = 0x0614,
- i40e_aqc_opc_set_local_advt_reg = 0x0615,
- i40e_aqc_opc_get_partner_advt = 0x0616,
- i40e_aqc_opc_set_lb_modes = 0x0618,
- i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_debug = 0x0622,
- i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
- i40e_aqc_opc_nvm_config_read = 0x0704,
- i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
- i40e_aqc_opc_send_msg_to_pf = 0x0801,
- i40e_aqc_opc_send_msg_to_vf = 0x0802,
- i40e_aqc_opc_send_msg_to_peer = 0x0803,
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
/* alternate structure */
- i40e_aqc_opc_alternate_write = 0x0900,
- i40e_aqc_opc_alternate_write_indirect = 0x0901,
- i40e_aqc_opc_alternate_read = 0x0902,
- i40e_aqc_opc_alternate_read_indirect = 0x0903,
- i40e_aqc_opc_alternate_write_done = 0x0904,
- i40e_aqc_opc_alternate_set_mode = 0x0905,
- i40e_aqc_opc_alternate_clear_port = 0x0906,
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
/* LLDP commands */
- i40e_aqc_opc_lldp_get_mib = 0x0A00,
- i40e_aqc_opc_lldp_update_mib = 0x0A01,
- i40e_aqc_opc_lldp_add_tlv = 0x0A02,
- i40e_aqc_opc_lldp_update_tlv = 0x0A03,
- i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
- i40e_aqc_opc_lldp_stop = 0x0A05,
- i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
/* Tunnel commands */
- i40e_aqc_opc_add_udp_tunnel = 0x0B00,
- i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
/* Async Events */
- i40e_aqc_opc_event_lan_overflow = 0x1001,
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
/* OEM commands */
- i40e_aqc_opc_oem_parameter_change = 0xFE00,
- i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
- i40e_aqc_opc_debug_read_reg = 0xFF03,
- i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_modify_reg = 0xFF07,
- i40e_aqc_opc_debug_dump_internals = 0xFF08,
- i40e_aqc_opc_debug_modify_internals = 0xFF09,
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@@ -303,7 +303,7 @@ enum i40e_admin_queue_opc {
/* This macro is used extensively to ensure that command structures are 16
* bytes in length as they have to map to the raw array of that size.
*/
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
/* internal (0x00XX) commands */
@@ -321,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
- __le32 driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING 0x1
- u8 reserved[12];
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
@@ -352,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
-#define I40E_AQ_RESOURCE_NVM 1
-#define I40E_AQ_RESOURCE_SDP 2
-#define I40E_AQ_RESOURCE_ACCESS_READ 1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
@@ -374,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
*/
struct i40e_aqc_list_capabilites {
u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
u8 pf_index;
u8 reserved[2];
__le32 count;
@@ -385,123 +385,123 @@ struct i40e_aqc_list_capabilites {
I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
};
/* list of caps */
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
- __le16 command_flags;
-#define I40E_AQ_CPPM_EN_LTRC 0x0800
-#define I40E_AQ_CPPM_EN_DMCTH 0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
-#define I40E_AQ_CPPM_EN_HPTC 0x4000
-#define I40E_AQ_CPPM_EN_DMARC 0x8000
- __le16 ttlx;
- __le32 dmacr;
- __le16 dmcth;
- u8 hptc;
- u8 reserved;
- __le32 pfltrc;
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
- __le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
- __le16 table_id;
- __le32 pfpm_proxyfc;
- __le32 ip_addr;
- u8 mac_addr[6];
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
};
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
- __le16 table_idx_mac_addr_0;
- __le16 table_idx_mac_addr_1;
- __le16 table_idx_ipv6_0;
- __le16 table_idx_ipv6_1;
- __le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
- u8 mac_addr_0[6];
- u8 mac_addr_1[6];
- u8 local_mac_addr[6];
- u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
- u8 ipv6_addr_1[16];
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
};
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
-#define I40E_AQ_LAA_FLAG_WR 0x8000
- u8 reserved[2];
- __le32 sal;
- __le16 sah;
- u8 reserved2[6];
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
};
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
-#define I40E_AQC_LAN_ADDR_VALID 0x10
-#define I40E_AQC_SAN_ADDR_VALID 0x20
-#define I40E_AQC_PORT_ADDR_VALID 0x40
-#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
@@ -517,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
- __le16 command_flags;
-#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
-#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
-#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
- __le16 mac_sah;
- __le32 mac_sal;
- u8 reserved[8];
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
@@ -545,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
* command
*/
struct i40e_aqc_switch_seid {
- __le16 seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
@@ -557,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_switch_config_header_resp {
- __le16 num_reported;
- __le16 num_total;
- u8 reserved[12];
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
};
struct i40e_aqc_switch_config_element_resp {
- u8 element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC 1
-#define I40E_AQ_SW_ELEM_TYPE_PF 2
-#define I40E_AQ_SW_ELEM_TYPE_VF 3
-#define I40E_AQ_SW_ELEM_TYPE_EMP 4
-#define I40E_AQ_SW_ELEM_TYPE_BMC 5
-#define I40E_AQ_SW_ELEM_TYPE_PV 16
-#define I40E_AQ_SW_ELEM_TYPE_VEB 17
-#define I40E_AQ_SW_ELEM_TYPE_PA 18
-#define I40E_AQ_SW_ELEM_TYPE_VSI 19
- u8 revision;
-#define I40E_AQ_SW_ELEM_REV_1 1
- __le16 seid;
- __le16 uplink_seid;
- __le16 downlink_seid;
- u8 reserved[3];
- u8 connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR 0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_CONN_TYPE_CASCADED 0x3
- __le16 scheduler_id;
- __le16 element_info;
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
};
/* Get Switch Configuration (indirect 0x0200)
@@ -592,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp {
* the first in the array is the header, remainder are elements
*/
struct i40e_aqc_get_switch_config_resp {
- struct i40e_aqc_get_switch_config_header_resp header;
- struct i40e_aqc_switch_config_element_resp element[1];
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
};
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
struct i40e_aqc_add_remove_statistics {
- __le16 seid;
- __le16 vlan;
- __le16 stat_index;
- u8 reserved[10];
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
/* Set Port Parameters command (direct 0x0203) */
struct i40e_aqc_set_port_parameters {
- __le16 command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
- __le16 bad_frame_vsi;
- __le16 default_seid; /* reserved for command */
- u8 reserved[10];
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
/* Get Switch Resource Allocation (indirect 0x0204) */
struct i40e_aqc_get_switch_resource_alloc {
- u8 num_entries; /* reserved for command */
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
/* expect an array of these structs in the response buffer */
struct i40e_aqc_switch_resource_alloc_element_resp {
- u8 resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
- u8 reserved1;
- __le16 guaranteed;
- __le16 total;
- __le16 used;
- __le16 total_unalloced;
- u8 reserved2[6];
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
};
/* Add VSI (indirect 0x0210)
@@ -672,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
* uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
- __le16 uplink_seid;
- u8 connection_type;
-#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
- u8 reserved1;
- u8 vf_id;
- u8 reserved2;
- __le16 vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT 0x0
-#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
-#define I40E_AQ_VSI_TYPE_VF 0x0
-#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
-#define I40E_AQ_VSI_TYPE_PF 0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
- __le32 addr_high;
- __le32 addr_low;
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
@@ -707,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
struct i40e_aqc_vsi_properties_data {
/* first 96 byte are written by SW */
- __le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
/* switch section */
- __le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
-#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
- u8 sw_reserved[2];
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
/* security section */
- u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
- u8 sec_reserved;
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
/* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- __le16 fcoe_pvid;
- u8 port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
- I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
- I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
- u8 pvlan_reserved[3];
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
/* ingress egress up sections */
- __le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
- __le32 egress_table; /* same defines as for ingress table */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
/* cascaded PV section */
- __le16 cas_pv_tag;
- u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
- u8 cas_pv_reserved;
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
/* queue mapping section */
- __le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
- __le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
- __le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
- u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
- u8 queueing_opt_reserved[3];
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
/* scheduler section */
- u8 up_enable_bits;
- u8 sched_reserved;
+ u8 up_enable_bits;
+ u8 sched_reserved;
/* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress table */
- u8 cmd_reserved[8];
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
- __le16 qs_handle[8];
+ __le16 qs_handle[8];
#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
- __le16 stat_counter_idx;
- __le16 sched_id;
- u8 resp_reserved[12];
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
};
I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
@@ -831,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
* (IS_CTRL_PORT only works on add PV)
*/
struct i40e_aqc_add_update_pv {
- __le16 command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
- __le16 uplink_seid;
- __le16 connected_seid;
- u8 reserved[10];
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
struct i40e_aqc_add_update_pv_completion {
/* reserved for update; for add also encodes error if rc == ENOSPC */
- __le16 pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
- u8 reserved[14];
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
@@ -860,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
*/
struct i40e_aqc_get_pv_params_completion {
- __le16 seid;
- __le16 default_stag;
- __le16 pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE 0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
- u8 reserved[8];
- __le16 default_port_seid;
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
/* Add VEB (direct 0x0230) */
struct i40e_aqc_add_veb {
- __le16 uplink_seid;
- __le16 downlink_seid;
- __le16 veb_flags;
-#define I40E_AQC_ADD_VEB_FLOATING 0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
- u8 enable_tcs;
- u8 reserved[9];
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
struct i40e_aqc_add_veb_completion {
- u8 reserved[6];
- __le16 switch_seid;
+ u8 reserved[6];
+ __le16 switch_seid;
/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
- __le16 veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
@@ -910,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_veb_parameters_completion {
- __le16 seid;
- __le16 switch_id;
- __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
- u8 reserved[4];
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
@@ -929,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
/* used for the command for most vlan commands */
struct i40e_aqc_macvlan {
- __le16 num_addresses;
- __le16 seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
- __le32 addr_high;
- __le32 addr_low;
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
/* indirect data for command and response */
struct i40e_aqc_add_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- __le16 flags;
-#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
-#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
- __le16 queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
/* response section */
- u8 match_method;
-#define I40E_AQC_MM_PERFECT_MATCH 0x01
-#define I40E_AQC_MM_HASH_MATCH 0x02
-#define I40E_AQC_MM_ERR_NO_RES 0xFF
- u8 reserved1[3];
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
};
struct i40e_aqc_add_remove_macvlan_completion {
@@ -979,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
*/
struct i40e_aqc_remove_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- u8 flags;
-#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
-#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
- u8 reserved[3];
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
/* reply section */
- u8 error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
- u8 reply_reserved[3];
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
};
/* Add VLAN (indirect 0x0252)
@@ -999,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data {
* use the generic i40e_aqc_macvlan for the command
*/
struct i40e_aqc_add_remove_vlan_element_data {
- __le16 vlan_tag;
- u8 vlan_flags;
+ __le16 vlan_tag;
+ u8 vlan_flags;
/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL 0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
- I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT 3
-#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL 0x1
- u8 reserved;
- u8 result;
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
- u8 reserved1[3];
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
};
struct i40e_aqc_add_remove_vlan_completion {
- u8 reserved[4];
- __le16 vlans_used;
- __le16 vlans_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
/* Set VSI Promiscuous Modes (direct 0x0254) */
struct i40e_aqc_set_vsi_promiscuous_modes {
- __le16 promiscuous_flags;
- __le16 valid_flags;
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
/* flags used for both fields above */
-#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
-#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
-#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
-#define I40E_AQC_SET_VSI_DEFAULT 0x08
-#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
- __le16 seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- __le16 vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
- u8 reserved[8];
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1060,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_add_tag {
- __le16 flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
- __le16 seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- __le16 queue_number;
- u8 reserved[8];
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
struct i40e_aqc_add_remove_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
@@ -1085,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_remove_tag {
- __le16 seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- u8 reserved[12];
+ __le16 tag;
+ u8 reserved[12];
};
/* Add multicast E-Tag (direct 0x0257)
@@ -1098,22 +1097,22 @@ struct i40e_aqc_remove_tag {
* and no external data
*/
struct i40e_aqc_add_remove_mcast_etag {
- __le16 pv_seid;
- __le16 etag;
- u8 num_unicast_etags;
- u8 reserved[3];
- __le32 addr_high; /* address of array of 2-byte s-tags */
- __le32 addr_low;
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
struct i40e_aqc_add_remove_mcast_etag_completion {
- u8 reserved[4];
- __le16 mcast_etags_used;
- __le16 mcast_etags_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
@@ -1121,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
/* Update S/E-Tag (direct 0x0259) */
struct i40e_aqc_update_tag {
- __le16 seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 old_tag;
- __le16 new_tag;
- u8 reserved[10];
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
struct i40e_aqc_update_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
@@ -1146,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
* and the generic direct completion structure
*/
struct i40e_aqc_add_remove_control_packet_filter {
- u8 mac[6];
- __le16 etype;
- __le16 flags;
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
- __le16 seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
- __le16 queue;
- u8 reserved[2];
+ __le16 queue;
+ u8 reserved[2];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
struct i40e_aqc_add_remove_control_packet_filter_completion {
- __le16 mac_etype_used;
- __le16 etype_used;
- __le16 mac_etype_free;
- __le16 etype_free;
- u8 reserved[8];
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
@@ -1180,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
* and the generic indirect completion structure
*/
struct i40e_aqc_add_remove_cloud_filters {
- u8 num_filters;
- u8 reserved;
- __le16 seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
struct i40e_aqc_add_remove_cloud_filters_element_data {
- u8 outer_mac[6];
- u8 inner_mac[6];
- __le16 inner_vlan;
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
union {
struct {
u8 reserved[12];
@@ -1206,52 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 data[16];
} v6;
} ipaddr;
- __le16 flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
/* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
/* 0x0002 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
/* 0x0005 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
/* 0x0007 reserved */
/* 0x0008 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
-#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
-
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
-
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
-
- __le32 tenant_id;
- u8 reserved[4];
- __le16 queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
- I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved2[14];
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
/* response section */
- u8 allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
- u8 response_reserved[7];
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
};
struct i40e_aqc_remove_cloud_filters_completion {
@@ -1273,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
struct i40e_aqc_add_delete_mirror_rule {
__le16 seid;
__le16 rule_type;
-#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
-#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
-#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
__le16 num_entries;
__le16 destination; /* VSI for add, rule id for delete */
__le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
@@ -1290,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
struct i40e_aqc_add_delete_mirror_rule_completion {
- u8 reserved[2];
- __le16 rule_id; /* only used on add */
- __le16 mirror_rules_used;
- __le16 mirror_rules_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
@@ -1306,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
* the command and response use the same descriptor structure
*/
struct i40e_aqc_pfc_ignore {
- u8 tc_bitmap;
- u8 command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET 0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
- u8 reserved[14];
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
@@ -1325,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
* this generic struct to pass the SEID in param0
*/
struct i40e_aqc_tx_sched_ind {
- __le16 vsi_seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
@@ -1340,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp {
/* Configure VSI BW limits (direct 0x0400) */
struct i40e_aqc_configure_vsi_bw_limit {
- __le16 vsi_seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_credit; /* 0-3, limit = 2^max */
- u8 reserved2[7];
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
@@ -1354,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_ets_sla_bw_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
};
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_tc_bw_data {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 tc_bw_credits[8];
- u8 reserved1[4];
- __le16 qs_handles[8];
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
};
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
- u8 tc_valid_bits;
- u8 tc_suspended_bits;
- u8 reserved[14];
- __le16 qs_handles[8];
- u8 reserved1[4];
- __le16 port_bw_limit;
- u8 reserved2[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved3[23];
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
};
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 share_credits[8];
- __le16 credits[8];
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
+ __le16 tc_bw_max[2];
};
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
- __le16 seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved2[7];
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
@@ -1415,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
* Disable Physical Port ETS (indirect 0x0415)
*/
struct i40e_aqc_configure_switching_comp_ets_data {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
- u8 tc_strict_priority_flags;
- u8 reserved1[17];
- u8 tc_bw_share_credits[8];
- u8 reserved2[96];
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credit[8];
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
};
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
struct i40e_aqc_configure_switching_comp_bw_config_data {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits; /* bool */
- u8 tc_bw_share_credits[8];
- u8 reserved1[20];
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
};
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
- u8 tc_valid_bits;
- u8 reserved[35];
- __le16 port_bw_limit;
- u8 reserved1[2];
- u8 tc_bw_max; /* 0-3, limit = 2^max */
- u8 reserved2[23];
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
};
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 reserved1;
- u8 tc_strict_priority_bits;
- u8 reserved2;
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved3[32];
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
};
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
struct i40e_aqc_query_switching_comp_bw_config_resp {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits_enable; /* bool */
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
+ __le16 tc_bw_max[2];
};
/* Suspend/resume port TX traffic
@@ -1494,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (indirect 0x041D)
*/
struct i40e_aqc_configure_partition_bw_data {
- __le16 pf_valid_bits;
- u8 min_bw[16]; /* guaranteed bandwidth */
- u8 max_bw[16]; /* bandwidth limit */
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
};
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
enum i40e_aq_hmc_profile {
/* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
};
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
-#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
-#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
enum i40e_aq_phy_type {
I40E_PHY_TYPE_SGMII = 0x0,
@@ -1582,147 +1578,147 @@ struct i40e_aqc_module_desc {
};
struct i40e_aq_get_phy_abilities_resp {
- __le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum bit patterns */
- u8 abilities;
-#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
-#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_LINK_ENABLED 0x08
-#define I40E_AQ_PHY_AN_ENABLED 0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
- __le16 eee_capability;
-#define I40E_AQ_EEE_100BASE_TX 0x0002
-#define I40E_AQ_EEE_1000BASE_T 0x0004
-#define I40E_AQ_EEE_10GBASE_T 0x0008
-#define I40E_AQ_EEE_1000BASE_KX 0x0010
-#define I40E_AQ_EEE_10GBASE_KX4 0x0020
-#define I40E_AQ_EEE_10GBASE_KR 0x0040
- __le32 eeer_val;
- u8 d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
- u8 phy_id[4];
- u8 module_type[3];
- u8 qualified_module_count;
-#define I40E_AQ_PHY_MAX_QMS 16
- struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
- __le32 phy_type;
- u8 link_speed;
- u8 abilities;
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
/* bits 0-2 use the values from get_phy_abilities_resp */
#define I40E_AQ_PHY_ENABLE_LINK 0x08
#define I40E_AQ_PHY_ENABLE_AN 0x10
#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
- __le16 eee_capability;
- __le32 eeer;
- u8 low_power_ctrl;
- u8 reserved[3];
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
/* Set MAC Config command data structure (direct 0x0603) */
struct i40e_aq_set_mac_config {
- __le16 max_frame_size;
- u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
- u8 tx_timer_priority; /* bitmap */
- __le16 tx_timer_value;
- __le16 fc_refresh_threshold;
- u8 reserved[8];
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
/* Restart Auto-Negotiation (direct 0x605) */
struct i40e_aqc_set_link_restart_an {
- u8 command;
-#define I40E_AQ_PHY_RESTART_AN 0x02
-#define I40E_AQ_PHY_LINK_ENABLE 0x04
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
/* Get Link Status cmd & response data structure (direct 0x0607) */
struct i40e_aqc_get_link_status {
- __le16 command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK 0x3
-#define I40E_AQ_LSE_NOP 0x0
-#define I40E_AQ_LSE_DISABLE 0x2
-#define I40E_AQ_LSE_ENABLE 0x3
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
/* only response uses this flag */
-#define I40E_AQ_LSE_IS_ENABLED 0x1
- u8 phy_type; /* i40e_aq_phy_type */
- u8 link_speed; /* i40e_aq_link_speed */
- u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
-#define I40E_AQ_LINK_FAULT 0x02
-#define I40E_AQ_LINK_FAULT_TX 0x04
-#define I40E_AQ_LINK_FAULT_RX 0x08
-#define I40E_AQ_LINK_FAULT_REMOTE 0x10
-#define I40E_AQ_MEDIA_AVAILABLE 0x40
-#define I40E_AQ_SIGNAL_DETECT 0x80
- u8 an_info;
-#define I40E_AQ_AN_COMPLETED 0x01
-#define I40E_AQ_LP_AN_ABILITY 0x02
-#define I40E_AQ_PD_FAULT 0x04
-#define I40E_AQ_FEC_EN 0x08
-#define I40E_AQ_PHY_LOW_POWER 0x10
-#define I40E_AQ_LINK_PAUSE_TX 0x20
-#define I40E_AQ_LINK_PAUSE_RX 0x40
-#define I40E_AQ_QUALIFIED_MODULE 0x80
- u8 ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
-#define I40E_AQ_LINK_TX_SHIFT 0x02
-#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE 0x00
-#define I40E_AQ_LINK_TX_DRAINED 0x01
-#define I40E_AQ_LINK_TX_FLUSHED 0x03
-#define I40E_AQ_LINK_FORCED_40G 0x10
- u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
- __le16 max_frame_size;
- u8 config;
-#define I40E_AQ_CONFIG_CRC_ENA 0x04
-#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 reserved[5];
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
/* Set event mask command (direct 0x613) */
struct i40e_aqc_set_phy_int_mask {
- u8 reserved[8];
- __le16 event_mask;
-#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
-#define I40E_AQ_EVENT_MEDIA_NA 0x0004
-#define I40E_AQ_EVENT_LINK_FAULT 0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
-#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
- u8 reserved1[6];
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
@@ -1732,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
* Get Link Partner AN advt register (direct 0x0616)
*/
struct i40e_aqc_an_advt_reg {
- __le32 local_an_reg0;
- __le16 local_an_reg1;
- u8 reserved[10];
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
- __le16 lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL 0x01
-#define I40E_AQ_LB_PHY_REMOTE 0x02
-#define I40E_AQ_LB_MAC_LOCAL 0x04
- u8 reserved[14];
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
/* Set PHY Debug command (0x0622) */
struct i40e_aqc_set_phy_debug {
- u8 command_flags;
+ u8 command_flags;
#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
@@ -1761,15 +1757,15 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
- u8 reserved[15];
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
- I40E_AQC_PHY_REG_INTERNAL = 0x1,
- I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
- I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
};
/* NVM Read command (indirect 0x0701)
@@ -1777,40 +1773,40 @@ enum i40e_aq_phy_reg_type {
* NVM Update commands (indirect 0x0703)
*/
struct i40e_aqc_nvm_update {
- u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
- u8 module_pointer;
- __le16 length;
- __le32 offset;
- __le32 addr_high;
- __le32 addr_low;
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
- __le16 cmd_flags;
+ __le16 cmd_flags;
#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
#define ANVM_READ_SINGLE_FEATURE 0
#define ANVM_READ_MULTIPLE_FEATURES 1
- __le16 element_count;
- __le16 element_id; /* Feature/field ID */
- u8 reserved[2];
- __le32 address_high;
- __le32 address_low;
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
/* NVM Config Write (indirect 0x0705) */
struct i40e_aqc_nvm_config_write {
- __le16 cmd_flags;
- __le16 element_count;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
@@ -1835,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field {
* Send to Peer PF command (indirect 0x0803)
*/
struct i40e_aqc_pf_vf_message {
- __le32 id;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
@@ -1874,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
* uses i40e_aq_desc
*/
struct i40e_aqc_alternate_write_done {
- __le16 cmd_flags;
+ __le16 cmd_flags;
#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
- u8 reserved[14];
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
/* Set OEM mode (direct 0x0905) */
struct i40e_aqc_alternate_set_mode {
- __le32 mode;
+ __le32 mode;
#define I40E_AQ_ALTERNATE_MODE_NONE 0
#define I40E_AQ_ALTERNATE_MODE_OEM 1
- u8 reserved[12];
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
@@ -1900,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
/* Lan Queue Overflow Event (direct, 0x1001) */
struct i40e_aqc_lan_overflow {
- __le32 prtdcb_rupto;
- __le32 otx_ctl;
- u8 reserved[8];
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
/* Get LLDP MIB (indirect 0x0A00) */
struct i40e_aqc_lldp_get_mib {
- u8 type;
- u8 reserved1;
-#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
-#define I40E_AQ_LLDP_MIB_LOCAL 0x0
-#define I40E_AQ_LLDP_MIB_REMOTE 0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
-#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
-#define I40E_AQ_LLDP_TX_SHIFT 0x4
-#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
/* TX pause flags use I40E_AQ_LINK_TX_* above */
- __le16 local_len;
- __le16 remote_len;
- u8 reserved2[2];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
@@ -1935,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
* also used for the event (with type in the command field)
*/
struct i40e_aqc_lldp_update_mib {
- u8 command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
-#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
@@ -1949,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
* Delete LLDP TLV (indirect 0x0A04)
*/
struct i40e_aqc_lldp_add_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved1[1];
- __le16 len;
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
/* Update LLDP TLV (indirect 0x0A03) */
struct i40e_aqc_lldp_update_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved;
- __le16 old_len;
- __le16 new_offset;
- __le16 new_len;
- __le32 addr_high;
- __le32 addr_low;
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
@@ -1985,9 +1981,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
struct i40e_aqc_lldp_start {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
@@ -1998,13 +1994,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
- __le16 udp_port;
- u8 reserved0[3];
- u8 protocol_type;
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
- u8 reserved1[10];
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
@@ -2013,8 +2009,8 @@ struct i40e_aqc_add_udp_tunnel_completion {
__le16 udp_port;
u8 filter_entry_index;
u8 multiple_pfs;
-#define I40E_AQC_SINGLE_PF 0x0
-#define I40E_AQC_MULTIPLE_PFS 0x1
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
u8 total_filters;
u8 reserved[11];
};
@@ -2023,23 +2019,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
- u8 reserved[2];
- u8 index; /* 0 to 15 */
- u8 pf_filters;
- u8 total_filters;
- u8 reserved2[11];
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
- __le16 udp_port;
- u8 index; /* 0 to 15 */
- u8 multiple_pfs;
- u8 total_filters_used;
- u8 reserved;
- u8 tunnels_free;
- u8 reserved1[9];
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
@@ -2068,11 +2060,11 @@ struct i40e_aqc_tunnel_key_structure {
u8 key1_len; /* 0 to 15 */
u8 key2_len; /* 0 to 15 */
u8 flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
u8 network_key_index;
#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
@@ -2085,21 +2077,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
/* OEM mode commands (direct 0xFE0x) */
struct i40e_aqc_oem_param_change {
- __le32 param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
-#define I40E_AQ_OEM_PARAM_MAC 2
- __le32 param_value1;
- u8 param_value2[8];
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
struct i40e_aqc_oem_state_change {
- __le32 state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
-#define I40E_AQ_OEM_STATE_LINK_UP 0x1
- u8 reserved[12];
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
@@ -2111,18 +2103,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
/* set test more (0xFF01, internal) */
struct i40e_acq_set_test_mode {
- u8 mode;
-#define I40E_AQ_TEST_PARTIAL 0
-#define I40E_AQ_TEST_FULL 1
-#define I40E_AQ_TEST_NVM 2
- u8 reserved[3];
- u8 command;
-#define I40E_AQ_TEST_OPEN 0
-#define I40E_AQ_TEST_CLOSE 1
-#define I40E_AQ_TEST_INC 2
- u8 reserved2[3];
- __le32 address_high;
- __le32 address_low;
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
@@ -2175,21 +2167,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
- u8 cluster_id;
- u8 table_id;
- __le16 data_size;
- __le32 idx;
- __le32 address_high;
- __le32 address_low;
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
struct i40e_aqc_debug_modify_internals {
- u8 cluster_id;
- u8 cluster_specific_params[7];
- __le32 address_high;
- __le32 address_low;
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 952560551964..28c40c57d4f5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -50,6 +50,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index f6dcf9dd9290..c7f29626eada 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -30,10 +30,7 @@
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
-#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
-#define I40E_MAX_IRATE 0x03F
-#define I40E_MIN_IRATE 0x001
-#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
#define I40E_ITR_20K 0x0019
#define I40E_ITR_8K 0x003E
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 15376436cead..68aec11f6523 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -43,6 +43,7 @@
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
@@ -259,8 +260,7 @@ enum i40e_aq_resource_access_type {
};
struct i40e_nvm_info {
- u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
- u64 hw_semaphore_wait; /* - || - */
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
u32 timeout; /* [ms] */
u16 sr_size; /* Shadow RAM size in words */
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
@@ -475,6 +475,11 @@ struct i40e_hw {
u32 debug_mask;
};
+static inline bool i40e_is_vf(struct i40e_hw *hw)
+{
+ return hw->mac.type == I40E_MAC_VF;
+}
+
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index cd18d5689006..e0c8208138f4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -79,6 +79,7 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_VIRTCHNL_OP_GET_STATS,
I40E_VIRTCHNL_OP_FCOE,
+ I40E_VIRTCHNL_OP_CONFIG_RSS,
/* PF sends status change events to vfs using
* the following op.
*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 30ef519d4b91..981224743c73 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -191,6 +191,7 @@ struct i40evf_adapter {
struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
struct list_head vlan_filter_list;
char misc_vector_name[IFNAMSIZ + 9];
+ int num_active_queues;
/* TX */
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
@@ -243,7 +244,7 @@ struct i40evf_adapter {
struct i40e_hw hw; /* defined in i40e_type.h */
enum i40evf_state_t state;
- volatile unsigned long crit_section;
+ unsigned long crit_section;
struct work_struct watchdog_task;
bool netdev_registered;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index efee6b290c0f..69b97bac182c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -58,8 +58,8 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
#define I40EVF_QUEUE_STATS_LEN(_dev) \
- (((struct i40evf_adapter *) \
- netdev_priv(_dev))->vsi_res->num_queue_pairs \
+ (((struct i40evf_adapter *)\
+ netdev_priv(_dev))->num_active_queues \
* 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
@@ -121,11 +121,11 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev,
p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
data[i] = *(u64 *)p;
}
- for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ for (j = 0; j < adapter->num_active_queues; j++) {
data[i++] = adapter->tx_rings[j]->stats.packets;
data[i++] = adapter->tx_rings[j]->stats.bytes;
}
- for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ for (j = 0; j < adapter->num_active_queues; j++) {
data[i++] = adapter->rx_rings[j]->stats.packets;
data[i++] = adapter->rx_rings[j]->stats.bytes;
}
@@ -151,13 +151,13 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
@@ -175,6 +175,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
static u32 i40evf_get_msglevel(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+
return adapter->msg_enable;
}
@@ -189,6 +190,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev)
static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+
adapter->msg_enable = data;
}
@@ -219,7 +221,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
* but the number of rings is not reported.
**/
static void i40evf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
@@ -280,7 +282,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
* this functionality.
**/
static int i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
@@ -308,7 +310,7 @@ static int i40evf_get_coalesce(struct net_device *netdev,
* Change current coalescing settings.
**/
static int i40evf_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
@@ -430,7 +432,7 @@ static int i40evf_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = adapter->vsi_res->num_queue_pairs;
+ cmd->data = adapter->num_active_queues;
ret = 0;
break;
case ETHTOOL_GRXFH:
@@ -598,12 +600,12 @@ static void i40evf_get_channels(struct net_device *netdev,
struct i40evf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = adapter->vsi_res->num_queue_pairs;
+ ch->max_combined = adapter->num_active_queues;
ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS;
- ch->combined_count = adapter->vsi_res->num_queue_pairs;
+ ch->combined_count = adapter->num_active_queues;
}
/**
@@ -621,17 +623,23 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
* i40evf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
- * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ * @key: hash key
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
-static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
u32 hlut_val;
int i, j;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
indir[j++] = hlut_val & 0xff;
@@ -646,19 +654,26 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
* i40evf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
- * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ * @key: hash key
*
* Returns -EINVAL if the table specifies an inavlid queue id, otherwise
* returns 0 after programming the table.
**/
static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
u32 hlut_val;
int i, j;
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
hlut_val = indir[j++];
hlut_val |= indir[j++] << 8;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index c51bc7a33bc5..cabaf599f562 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.0.5"
+#define DRV_VERSION "1.0.6"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -185,6 +185,7 @@ static void i40evf_tx_timeout(struct net_device *netdev)
static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
+
wr32(hw, I40E_VFINT_DYN_CTL01, 0);
/* read flush */
@@ -200,6 +201,7 @@ static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
+
wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
@@ -226,7 +228,6 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
}
/* read flush */
rd32(hw, I40E_VFGEN_RSTAT);
-
}
/**
@@ -253,8 +254,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
* @adapter: board private structure
* @mask: bitmap of vectors to trigger
**/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
- u32 mask)
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
{
struct i40e_hw *hw = &adapter->hw;
int i;
@@ -397,8 +397,8 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
int q_vectors;
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
- int rxr_remaining = adapter->vsi_res->num_queue_pairs;
- int txr_remaining = adapter->vsi_res->num_queue_pairs;
+ int rxr_remaining = adapter->num_active_queues;
+ int txr_remaining = adapter->num_active_queues;
int i, j;
int rqpv, tqpv;
int err = 0;
@@ -551,6 +551,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
{
int i;
int q_vectors;
+
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (i = 0; i < q_vectors; i++) {
@@ -584,7 +585,8 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+
+ for (i = 0; i < adapter->num_active_queues; i++)
adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
}
@@ -629,7 +631,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
rx_buf_len = ALIGN(max_frame, 1024);
}
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
}
@@ -667,9 +669,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
struct i40evf_vlan_filter *f;
f = i40evf_find_vlan(adapter, vlan);
- if (NULL == f) {
+ if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f)
+ if (!f)
return NULL;
f->vlan = vlan;
@@ -705,7 +707,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+ __always_unused __be16 proto, u16 vid)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
@@ -720,7 +722,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+ __always_unused __be16 proto, u16 vid)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
@@ -772,9 +774,9 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
udelay(1);
f = i40evf_find_filter(adapter, macaddr);
- if (NULL == f) {
+ if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f) {
+ if (!f) {
clear_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section);
return NULL;
@@ -881,6 +883,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
struct napi_struct *napi;
+
q_vector = adapter->q_vector[q_idx];
napi = &q_vector->napi;
napi_enable(napi);
@@ -918,8 +921,9 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
i40evf_configure_rx(adapter);
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = adapter->rx_rings[i];
+
i40evf_alloc_rx_buffers(ring, ring->count);
ring->next_to_use = ring->count - 1;
writel(ring->next_to_use, ring->tail);
@@ -950,7 +954,7 @@ static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ for (i = 0; i < adapter->num_active_queues; i++)
i40evf_clean_rx_ring(adapter->rx_rings[i]);
}
@@ -962,7 +966,7 @@ static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ for (i = 0; i < adapter->num_active_queues; i++)
i40evf_clean_tx_ring(adapter->tx_rings[i]);
}
@@ -1064,7 +1068,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
if (!adapter->vsi_res)
return;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
if (adapter->tx_rings[i])
kfree_rcu(adapter->tx_rings[i], rcu);
adapter->tx_rings[i] = NULL;
@@ -1084,11 +1088,11 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
- tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
@@ -1130,7 +1134,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
err = -EIO;
goto out;
}
- pairs = adapter->vsi_res->num_queue_pairs;
+ pairs = adapter->num_active_queues;
/* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
@@ -1172,14 +1176,14 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
- i40evf_napi_poll, NAPI_POLL_WEIGHT);
+ i40evf_napi_poll, NAPI_POLL_WEIGHT);
adapter->q_vector[q_idx] = q_vector;
}
@@ -1210,7 +1214,7 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
int napi_vectors;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- napi_vectors = adapter->vsi_res->num_queue_pairs;
+ napi_vectors = adapter->num_active_queues;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
@@ -1265,8 +1269,8 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
}
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
- (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" :
- "Disabled", adapter->vsi_res->num_queue_pairs);
+ (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
+ adapter->num_active_queues);
return 0;
err_alloc_queues:
@@ -1284,6 +1288,7 @@ err_set_interrupt:
static void i40evf_watchdog_timer(unsigned long data)
{
struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+
schedule_work(&adapter->watchdog_task);
/* timer will be rescheduled in watchdog task */
}
@@ -1295,8 +1300,8 @@ static void i40evf_watchdog_timer(unsigned long data)
static void i40evf_watchdog_task(struct work_struct *work)
{
struct i40evf_adapter *adapter = container_of(work,
- struct i40evf_adapter,
- watchdog_task);
+ struct i40evf_adapter,
+ watchdog_task);
struct i40e_hw *hw = &adapter->hw;
uint32_t rstat_val;
@@ -1334,7 +1339,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
/* check for reset */
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED)) {
@@ -1425,7 +1430,7 @@ static int next_queue(struct i40evf_adapter *adapter, int j)
{
j += 1;
- return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
+ return j >= adapter->num_active_queues ? 0 : j;
}
/**
@@ -1434,23 +1439,23 @@ static int next_queue(struct i40evf_adapter *adapter, int j)
**/
static void i40evf_configure_rss(struct i40evf_adapter *adapter)
{
+ u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
struct i40e_hw *hw = &adapter->hw;
u32 lut = 0;
int i, j;
u64 hena;
- /* Set of random keys generated using kernel random number generator */
- static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
- 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
- 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
- 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
- 0x4954b126 };
+ /* No RSS for single queue. */
+ if (adapter->num_active_queues == 1) {
+ wr32(hw, I40E_VFQF_HENA(0), 0);
+ wr32(hw, I40E_VFQF_HENA(1), 0);
+ return;
+ }
/* Hash type is configured by the PF - we just supply the key */
-
- /* Fill out hash function seed */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+ wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
hena = I40E_DEFAULT_RSS_HENA;
@@ -1458,7 +1463,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
- j = adapter->vsi_res->num_queue_pairs;
+ j = adapter->num_active_queues;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
j = next_queue(adapter, j);
lut = j;
@@ -1494,7 +1499,7 @@ static void i40evf_reset_task(struct work_struct *work)
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section))
- udelay(500);
+ usleep_range(500, 1000);
if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
@@ -1508,8 +1513,7 @@ static void i40evf_reset_task(struct work_struct *work)
if ((rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED))
break;
- else
- msleep(I40EVF_RESET_WAIT_MS);
+ msleep(I40EVF_RESET_WAIT_MS);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
@@ -1523,8 +1527,7 @@ static void i40evf_reset_task(struct work_struct *work)
if ((rstat_val == I40E_VFR_VFACTIVE) ||
(rstat_val == I40E_VFR_COMPLETED))
break;
- else
- msleep(I40EVF_RESET_WAIT_MS);
+ msleep(I40EVF_RESET_WAIT_MS);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
struct i40evf_mac_filter *f, *ftmp;
@@ -1575,12 +1578,12 @@ continue_reset:
/* kill and reinit the admin queue */
if (i40evf_shutdown_adminq(hw))
dev_warn(&adapter->pdev->dev,
- "%s: Failed to destroy the Admin Queue resources\n",
- __func__);
+ "%s: Failed to destroy the Admin Queue resources\n",
+ __func__);
err = i40evf_init_adminq(hw);
if (err)
dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
- __func__, err);
+ __func__, err);
adapter->aq_pending = 0;
adapter->aq_required = 0;
@@ -1632,8 +1635,8 @@ static void i40evf_adminq_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
return;
- event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
return;
@@ -1645,13 +1648,9 @@ static void i40evf_adminq_task(struct work_struct *work)
i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
v_msg->v_retval, event.msg_buf,
- event.msg_size);
- if (pending != 0) {
- dev_info(&adapter->pdev->dev,
- "%s: ARQ: Pending events %d\n",
- __func__, pending);
+ event.msg_len);
+ if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
- }
} while (pending);
/* check for error indications */
@@ -1705,10 +1704,9 @@ static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->tx_rings[i]->desc)
i40evf_free_tx_resources(adapter->tx_rings[i]);
-
}
/**
@@ -1725,7 +1723,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
{
int i, err = 0;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
adapter->tx_rings[i]->count = adapter->tx_desc_count;
err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
if (!err)
@@ -1753,7 +1751,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
{
int i, err = 0;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i]->count = adapter->rx_desc_count;
err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
if (!err)
@@ -1776,7 +1774,7 @@ static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->rx_rings[i]->desc)
i40evf_free_rx_resources(adapter->rx_rings[i]);
}
@@ -1980,7 +1978,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
if ((rstat == I40E_VFR_VFACTIVE) ||
(rstat == I40E_VFR_COMPLETED))
return 0;
- udelay(10);
+ usleep_range(10, 20);
}
return -EBUSY;
}
@@ -2022,7 +2020,7 @@ static void i40evf_init_task(struct work_struct *work)
err = i40evf_check_reset_complete(hw);
if (err) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
- err);
+ err);
goto err;
}
hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -2047,6 +2045,8 @@ static void i40evf_init_task(struct work_struct *work)
case __I40EVF_INIT_VERSION_CHECK:
if (!i40evf_asq_done(hw)) {
dev_err(&pdev->dev, "Admin queue command never completed\n");
+ i40evf_shutdown_adminq(hw);
+ adapter->state = __I40EVF_STARTUP;
goto err;
}
@@ -2054,7 +2054,7 @@ static void i40evf_init_task(struct work_struct *work)
err = i40evf_verify_api_ver(adapter);
if (err) {
dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
- err);
+ err);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pdev->dev, "Resending request\n");
err = i40evf_send_api_ver(adapter);
@@ -2080,8 +2080,11 @@ static void i40evf_init_task(struct work_struct *work)
goto err;
}
err = i40evf_get_vf_config(adapter);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- goto restart;
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+ dev_info(&pdev->dev, "Resending VF config request\n");
+ err = i40evf_send_vf_config_msg(adapter);
+ goto err;
+ }
if (err) {
dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
err);
@@ -2138,7 +2141,7 @@ static void i40evf_init_task(struct work_struct *work)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f)
+ if (!f)
goto err_sw_init;
ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
@@ -2152,6 +2155,9 @@ static void i40evf_init_task(struct work_struct *work)
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ adapter->num_active_queues = min_t(int,
+ adapter->vsi_res->num_queue_pairs,
+ (int)(num_online_cpus()));
adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
@@ -2500,8 +2506,9 @@ static struct pci_driver i40evf_driver = {
static int __init i40evf_init_module(void)
{
int ret;
+
pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
- i40evf_driver_version);
+ i40evf_driver_version);
pr_info("%s\n", i40evf_copyright);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 66d12f5b4ca8..5fde5a7f4591 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -89,27 +89,37 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
struct i40e_virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
+ enum i40e_virtchnl_ops op;
i40e_status err;
- event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
err = -ENOMEM;
goto out;
}
- err = i40evf_clean_arq_element(hw, &event, NULL);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- goto out_alloc;
+ while (1) {
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ /* When the AQ is empty, i40evf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ if (err)
+ goto out_alloc;
+ op =
+ (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == I40E_VIRTCHNL_OP_VERSION)
+ break;
+ }
+
err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
if (err)
goto out_alloc;
- if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_VERSION) {
+ if (op != I40E_VIRTCHNL_OP_VERSION) {
dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
- le32_to_cpu(event.desc.cookie_high));
+ op);
err = -EIO;
goto out_alloc;
}
@@ -153,42 +163,34 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- u16 len;
+ enum i40e_virtchnl_ops op;
i40e_status err;
+ u16 len;
len = sizeof(struct i40e_virtchnl_vf_resource) +
I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
- event.msg_size = len;
- event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ event.buf_len = len;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
err = -ENOMEM;
goto out;
}
- err = i40evf_clean_arq_element(hw, &event, NULL);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- goto out_alloc;
-
- err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "%s: Error returned from PF, %d, %d\n", __func__,
- le32_to_cpu(event.desc.cookie_high),
- le32_to_cpu(event.desc.cookie_low));
- err = -EIO;
- goto out_alloc;
+ while (1) {
+ /* When the AQ is empty, i40evf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err)
+ goto out_alloc;
+ op =
+ (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES)
+ break;
}
- if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
- dev_err(&adapter->pdev->dev,
- "%s: Invalid response from PF, %d, %d\n", __func__,
- le32_to_cpu(event.desc.cookie_high),
- le32_to_cpu(event.desc.cookie_low));
- err = -EIO;
- goto out_alloc;
- }
- memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
i40e_vf_parse_hw_config(hw, adapter->vf_res);
out_alloc:
@@ -207,7 +209,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
{
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
- int pairs = adapter->vsi_res->num_queue_pairs;
+ int pairs = adapter->num_active_queues;
int i, len;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
@@ -273,7 +275,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
}
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
@@ -299,7 +301,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
}
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
@@ -393,7 +395,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
@@ -454,7 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
@@ -516,7 +518,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
@@ -576,7 +578,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
@@ -635,6 +637,7 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
void i40evf_request_stats(struct i40evf_adapter *adapter)
{
struct i40e_virtchnl_queue_select vqs;
+
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* no error message, this isn't crucial */
return;
@@ -709,19 +712,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
"%s: Unknown event %d from pf\n",
__func__, vpe->event);
break;
-
}
return;
}
- if (v_opcode != adapter->current_op) {
- dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
- __func__, adapter->current_op, v_opcode);
- /* We're probably completely screwed at this point, but clear
- * the current op and try to carry on....
- */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
- return;
- }
+ if (v_opcode != adapter->current_op)
+ dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
+ adapter->current_op, v_opcode);
if (v_retval) {
dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode);
@@ -773,8 +769,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
break;
default:
- dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
- __func__, v_opcode);
+ dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n",
+ v_opcode);
break;
} /* switch v_opcode */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 02cfd3b14762..d5673eb90c54 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2842,11 +2842,16 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
for (i = 0; i < IGB_RETA_SIZE; i++)
indir[i] = adapter->rss_indir_tbl[i];
@@ -2889,13 +2894,20 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int i;
u32 num_queues;
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+
num_queues = adapter->rss_queues;
switch (hw->mac.type) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b85880a6e4c4..ff59897a9463 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3373,14 +3373,11 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
u32 j, num_rx_queues;
- static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
- 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
- 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
- 0xFA01ACBE };
+ u32 rss_key[10];
- /* Fill out hash function seeds */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (j = 0; j < 10; j++)
- wr32(E1000_RSSRK(j), rsskey[j]);
+ wr32(E1000_RSSRK(j), rss_key[j]);
num_rx_queues = adapter->rss_queues;
@@ -5092,12 +5089,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (unlikely(skb->len < 17)) {
- if (skb_pad(skb, 17 - skb->len))
- return NETDEV_TX_OK;
- skb->len = 17;
- skb_set_tail_pointer(skb, 17);
- }
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
}
@@ -6649,8 +6642,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
#endif
/* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IGB_RX_HDR_LEN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
@@ -6851,14 +6843,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
if (skb_is_nonlinear(skb))
igb_pull_tail(rx_ring, rx_desc, skb);
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
-
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
return false;
}
@@ -6923,14 +6910,14 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ if (!rx_desc->wb.upper.status_error)
break;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
+ * descriptor has been written back
*/
- rmb();
+ dma_rmb();
/* retrieve a buffer from the ring */
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
@@ -6993,7 +6980,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
/* alloc new page for storage */
- page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
+ page = dev_alloc_page();
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
return false;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 055961b0f24b..aa87605b144a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
-static void ixgb_check_copybreak(struct net_device *netdev,
+static void ixgb_check_copybreak(struct napi_struct *napi,
struct ixgb_buffer *buffer_info,
u32 length, struct sk_buff **skb)
{
@@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev,
if (length > copybreak)
return;
- new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ new_skb = napi_alloc_skb(napi, length);
if (!new_skb)
return;
@@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done;
}
- ixgb_check_copybreak(netdev, buffer_info, length, &skb);
+ ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
/* Good Receive */
skb_put(skb, length);
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index be2989e60009..35e6fa643c7e 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5032a602d5c9..b6137be43920 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -300,16 +300,17 @@ enum ixgbe_ring_f_enum {
RING_F_ARRAY_SIZE /* must be last in enum set */
};
-#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
-#define IXGBE_MAX_FCOE_INDICES 8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define IXGBE_MAX_L2A_QUEUES 4
-#define IXGBE_BAD_L2A_QUEUE 3
-#define IXGBE_MAX_MACVLANS 31
-#define IXGBE_MAX_DCBMACVLANS 8
+#define IXGBE_MAX_RSS_INDICES 16
+#define IXGBE_MAX_RSS_INDICES_X550 64
+#define IXGBE_MAX_VMDQ_INDICES 64
+#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
+#define IXGBE_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_BAD_L2A_QUEUE 3
+#define IXGBE_MAX_MACVLANS 31
+#define IXGBE_MAX_DCBMACVLANS 8
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
@@ -553,11 +554,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
-static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
-{
- writel(value, ring->tail);
-}
-
#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \
@@ -769,6 +765,21 @@ struct ixgbe_adapter {
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
};
+static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ return IXGBE_MAX_RSS_INDICES;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ return IXGBE_MAX_RSS_INDICES_X550;
+ default:
+ return 0;
+ }
+}
+
struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
@@ -804,11 +815,15 @@ enum ixgbe_boards {
board_82598,
board_82599,
board_X540,
+ board_X550,
+ board_X550EM_x,
};
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
extern struct ixgbe_info ixgbe_X540_info;
+extern struct ixgbe_info ixgbe_X550_info;
+extern struct ixgbe_info ixgbe_X550EM_x_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5f484bf3fda..9c66babd4edd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1625,7 +1625,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1636,7 +1636,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ if (hw->eeprom.ops.read(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@@ -1645,24 +1645,35 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
/* Include all data from pointers except for the fw pointer */
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
- hw->eeprom.ops.read(hw, i, &pointer);
+ if (hw->eeprom.ops.read(hw, i, &pointer)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* If the pointer seems invalid */
+ if (pointer == 0xFFFF || pointer == 0)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, pointer, &length)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
- /* Make sure the pointer seems valid */
- if (pointer != 0xFFFF && pointer != 0) {
- hw->eeprom.ops.read(hw, pointer, &length);
+ if (length == 0xFFFF || length == 0)
+ continue;
- if (length != 0xFFFF && length != 0) {
- for (j = pointer+1; j <= pointer+length; j++) {
- hw->eeprom.ops.read(hw, j, &word);
- checksum += word;
- }
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
}
+ checksum += word;
}
}
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return checksum;
+ return (s32)checksum;
}
/**
@@ -1686,26 +1697,33 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return status;
+ }
- if (status == 0) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
- hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+ checksum = (u16)(status & 0xffff);
- /*
- * Verify read checksum from EEPROM is the same as
- * calculated checksum
- */
- if (read_checksum != checksum)
- status = IXGBE_ERR_EEPROM_CHECKSUM;
-
- /* If the user cares, return the calculated checksum */
- if (checksum_val)
- *checksum_val = checksum;
- } else {
+ status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+ if (status) {
hw_dbg(hw, "EEPROM read failed\n");
+ return status;
}
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
return status;
}
@@ -1724,15 +1742,19 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status == 0) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
- status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
- } else {
+ if (status) {
hw_dbg(hw, "EEPROM read failed\n");
+ return status;
}
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+
return status;
}
@@ -2469,7 +2491,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@@ -2514,7 +2536,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr;
u32 swmask = mask;
@@ -2799,6 +2821,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
@@ -3192,17 +3216,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*link_up = false;
}
- if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_10G_82599)
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (links_reg & IXGBE_LINKS_SPEED_NON_STD))
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_100_82599)
- *speed = IXGBE_LINK_SPEED_100_FULL;
- else
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (links_reg & IXGBE_LINKS_SPEED_NON_STD))
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
return 0;
}
@@ -3434,23 +3468,34 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* @buffer: contains the command to write and where the return status will
* be placed
* @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
*
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length)
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length, u32 timeout,
+ bool return_data)
{
- u32 hicr, i, bi;
+ u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- u8 buf_len, dword_len;
+ u16 buf_len, dword_len;
- if (length == 0 || length & 0x3 ||
- length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
- hw_dbg(hw, "Buffer length failure.\n");
+ if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Set bit 9 of FWSTS clearing FW reset indication */
+ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
+ IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
+
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if ((hicr & IXGBE_HICR_EN) == 0) {
@@ -3458,7 +3503,12 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
- /* Calculate length in DWORDs */
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ hw_dbg(hw, "Buffer length failure, not aligned to dword");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
dword_len = length >> 2;
/*
@@ -3472,7 +3522,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
- for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_C))
break;
@@ -3480,12 +3530,15 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
}
/* Check command successful completion. */
- if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ if ((timeout != 0 && i == timeout) ||
(!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
hw_dbg(hw, "Command has failed with no status valid.\n");
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ if (!return_data)
+ return 0;
+
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
@@ -3556,7 +3609,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
- sizeof(fw_cmd));
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
if (ret_val != 0)
continue;
@@ -3583,7 +3638,8 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
**/
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
{
- u32 gcr_ext, hlreg0;
+ u32 gcr_ext, hlreg0, i, poll;
+ u16 value;
/*
* If double reset is not requested then all transactions should
@@ -3600,6 +3656,23 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+ /* wait for a last completion before clearing buffers */
+ IXGBE_WRITE_FLUSH(hw);
+ usleep_range(3000, 6000);
+
+ /* Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
+ usleep_range(100, 200);
+ value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (ixgbe_removed(hw->hw_addr))
+ break;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ break;
+ }
+
/* initiate cleaning flow for buffers in the PCIe transaction layer */
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2ae5d4b8fc93..8cfadcb2676e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -64,7 +64,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
@@ -84,8 +84,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
@@ -110,6 +110,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length, u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 48f35fc963f8..a507a6fe3624 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -286,6 +286,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
bwgid, ptype);
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
default:
@@ -302,6 +304,8 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
return ixgbe_dcb_config_pfc_82598(hw, pfc_en);
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
default:
break;
@@ -357,6 +361,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -385,6 +391,8 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_dcb_read_rtrup2tc_82599(hw, map);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 58a7f5312a96..2707bda37418 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -180,6 +180,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0ae038b9af90..e5be0dd508de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -511,6 +511,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -622,6 +624,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
for (i = 0; i < 8; i++)
@@ -1406,6 +1410,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1644,6 +1650,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1680,6 +1688,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1733,12 +1743,16 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
- /* X540 needs to set the MACC.FLU bit to force link up */
- if (adapter->hw.mac.type == ixgbe_mac_X540) {
+ /* X540 and X550 needs to set the MACC.FLU bit to force link up */
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
- } else {
+ break;
+ default:
if (hw->mac.orig_autoc) {
reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
@@ -2776,7 +2790,14 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
/* if we changed something we need to update flags */
if (flags2 != adapter->flags2) {
struct ixgbe_hw *hw = &adapter->hw;
- u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ u32 mrqc;
+ unsigned int pf_pool = adapter->num_vfs;
+
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
+ else
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
if ((flags2 & UDP_RSS_FLAGS) &&
!(adapter->flags2 & UDP_RSS_FLAGS))
@@ -2799,7 +2820,11 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
return 0;
@@ -2833,6 +2858,8 @@ static int ixgbe_get_ts_info(struct net_device *dev,
struct ixgbe_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -2900,7 +2927,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
max_combined = IXGBE_MAX_FDIR_INDICES;
} else {
/* support up to 16 queues with RSS */
- max_combined = IXGBE_MAX_RSS_INDICES;
+ max_combined = ixgbe_max_rss_indices(adapter);
}
return max_combined;
@@ -2948,6 +2975,7 @@ static int ixgbe_set_channels(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
unsigned int count = ch->combined_count;
+ u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
@@ -2964,9 +2992,9 @@ static int ixgbe_set_channels(struct net_device *dev,
/* update feature limits from largest to smallest supported values */
adapter->ring_feature[RING_F_FDIR].limit = count;
- /* cap RSS limit at 16 */
- if (count > IXGBE_MAX_RSS_INDICES)
- count = IXGBE_MAX_RSS_INDICES;
+ /* cap RSS limit */
+ if (count > max_rss_indices)
+ count = max_rss_indices;
adapter->ring_feature[RING_F_RSS].limit = count;
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ce40c77381e9..68e1e757ecef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -126,6 +126,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cc51554c9e99..2ed2c7de2304 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -42,6 +42,7 @@
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
@@ -50,6 +51,15 @@
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
+#ifdef CONFIG_OF
+#include <linux/of_net.h>
+#endif
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
@@ -65,15 +75,17 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.19.1-k"
+#define DRV_VERSION "4.0.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2014 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
- [board_82598] = &ixgbe_82598_info,
- [board_82599] = &ixgbe_82599_info,
- [board_X540] = &ixgbe_X540_info,
+ [board_82598] = &ixgbe_82598_info,
+ [board_82599] = &ixgbe_82599_info,
+ [board_X540] = &ixgbe_X540_info,
+ [board_X550] = &ixgbe_X550_info,
+ [board_X550EM_x] = &ixgbe_X550EM_x_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -115,6 +127,9 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
/* required last entry */
{0, }
};
@@ -835,6 +850,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -871,6 +888,8 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1412,41 +1431,21 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
-{
- rx_ring->next_to_use = val;
-
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- ixgbe_write_tail(rx_ring, val);
-}
-
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t dma = bi->dma;
+ dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
- if (likely(dma))
+ if (likely(page))
return true;
/* alloc new page for storage */
- if (likely(!page)) {
- page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
- bi->skb, ixgbe_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
- return false;
- }
- bi->page = page;
+ page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
}
/* map page for use */
@@ -1459,13 +1458,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
- bi->page = NULL;
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
bi->dma = dma;
+ bi->page = page;
bi->page_offset = 0;
return true;
@@ -1509,16 +1508,28 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the hdr_addr for the next_to_use descriptor */
- rx_desc->read.hdr_addr = 0;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.upper.status_error = 0;
cleaned_count--;
} while (cleaned_count);
i += rx_ring->count;
- if (rx_ring->next_to_use != i)
- ixgbe_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
}
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
@@ -1763,14 +1774,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
return false;
#endif
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
-
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
return false;
}
@@ -1795,9 +1801,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- new_buff->page = old_buff->page;
- new_buff->dma = old_buff->dma;
- new_buff->page_offset = old_buff->page_offset;
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
@@ -1806,6 +1810,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
DMA_FROM_DEVICE);
}
+static inline bool ixgbe_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
/**
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -1841,12 +1850,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* we can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(page) == numa_node_id()))
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!ixgbe_page_is_reserved(page)))
return true;
/* this page cannot be reused so discard it */
- put_page(page);
+ __free_pages(page, ixgbe_rx_pg_order(rx_ring));
return false;
}
@@ -1854,7 +1863,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
rx_buffer->page_offset, size, truesize);
/* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
+ if (unlikely(ixgbe_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
@@ -1864,22 +1873,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
-
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- atomic_inc(&page->_count);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > last_offset)
return false;
-
- /* bump ref count on page before it is given to the stack */
- get_page(page);
#endif
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ atomic_inc(&page->_count);
+
return true;
}
@@ -1907,8 +1913,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
#endif
/* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IXGBE_RX_HDR_SIZE);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+ IXGBE_RX_HDR_SIZE);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
return NULL;
@@ -1942,6 +1948,8 @@ dma_sync:
rx_buffer->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
+
+ rx_buffer->skb = NULL;
}
/* pull page into skb */
@@ -1959,8 +1967,6 @@ dma_sync:
}
/* clear contents of buffer_info */
- rx_buffer->skb = NULL;
- rx_buffer->dma = 0;
rx_buffer->page = NULL;
return skb;
@@ -2003,15 +2009,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+ if (!rx_desc->wb.upper.status_error)
break;
- /*
- * This memory barrier is needed to keep us from reading
+ /* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
+ * descriptor has been written back
*/
- rmb();
+ dma_rmb();
/* retrieve a buffer from the ring */
skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
@@ -2155,6 +2160,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2264,6 +2271,8 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2467,6 +2476,8 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2493,6 +2504,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2525,6 +2538,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
mask |= IXGBE_EIMS_GPI_SDP0;
break;
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask |= IXGBE_EIMS_TS;
break;
default:
@@ -2536,7 +2551,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_82599EB:
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
+ /* fall through */
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_MAILBOX;
break;
@@ -2544,9 +2562,6 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
break;
}
- if (adapter->hw.mac.type == ixgbe_mac_X540)
- mask |= IXGBE_EIMS_TIMESYNC;
-
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
mask |= IXGBE_EIMS_FLOW_DIR;
@@ -2592,6 +2607,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -2811,6 +2828,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
/* Fall through */
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -2905,6 +2924,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3190,16 +3211,14 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
-static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
{
struct ixgbe_hw *hw = &adapter->hw;
- static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
- 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
- 0x6A3E67EA, 0x14364D17, 0x3BED200D};
- u32 mrqc = 0, reta = 0;
- u32 rxcsum;
+ u32 reta = 0;
int i, j;
+ int reta_entries = 128;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ int indices_multi;
/*
* Program table for at least 2 queues w/ SR-IOV so that VFs can
@@ -3213,16 +3232,69 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
+ /* Fill out the redirection table as follows:
+ * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices
+ * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index
+ * X550: 512 (8 bit wide) entries containing 6 bit RSS index
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ indices_multi = 0x11;
+ else
+ indices_multi = 0x1;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ reta_entries = 512;
+ default:
+ break;
+ }
+
/* Fill out redirection table */
- for (i = 0, j = 0; i < 128; i++, j++) {
+ for (i = 0, j = 0; i < reta_entries; i++, j++) {
if (j == rss_i)
j = 0;
- /* reta = 4-byte sliding window of
- * 0x00..(indices-1)(indices-1)00..etc. */
- reta = (reta << 8) | (j * 0x11);
+ reta = (reta << 8) | (j * indices_multi);
+ if ((i & 3) == 3) {
+ if (i < 128)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
+ reta);
+ }
+ }
+}
+
+static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vfreta = 0;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ unsigned int pf_pool = adapter->num_vfs;
+ int i, j;
+
+ /* Fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]);
+
+ /* Fill out the redirection table */
+ for (i = 0, j = 0; i < 64; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+ vfreta = (vfreta << 8) | j;
if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+ vfreta);
}
+}
+
+static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
+ u32 rss_key[10];
+ u32 rxcsum;
/* Disable indicating checksum in descriptor, enables RSS hash */
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
@@ -3255,17 +3327,35 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/* Perform hash on these packet types */
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
- IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
- IXGBE_MRQC_RSS_FIELD_IPV6 |
- IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
+ IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
+ IXGBE_MRQC_RSS_FIELD_IPV6 |
+ IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+ unsigned int pf_pool = adapter->num_vfs;
+
+ /* Enable VF RSS mode */
+ mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ /* Setup RSS through the VF registers */
+ ixgbe_setup_vfreta(adapter, rss_key);
+ vfmrqc = IXGBE_MRQC_RSSEN;
+ vfmrqc |= rss_field;
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+ } else {
+ ixgbe_setup_reta(adapter, rss_key);
+ mrqc |= rss_field;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ }
}
/**
@@ -3534,6 +3624,8 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
case ixgbe_mac_82598EB:
/*
* For VMDq support of different descriptor types or
@@ -3657,6 +3749,8 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -3691,6 +3785,8 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4112,6 +4208,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -4170,6 +4268,8 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -4308,29 +4408,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer;
+ struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
- rx_buffer = &rx_ring->rx_buffer_info[i];
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
- if (IXGBE_CB(skb)->page_released) {
+ if (IXGBE_CB(skb)->page_released)
dma_unmap_page(dev,
IXGBE_CB(skb)->dma,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
- }
dev_kfree_skb(skb);
rx_buffer->skb = NULL;
}
- if (rx_buffer->dma)
- dma_unmap_page(dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
- if (rx_buffer->page)
- __free_pages(rx_buffer->page,
- ixgbe_rx_pg_order(rx_ring));
+
+ if (!rx_buffer->page)
+ continue;
+
+ dma_unmap_page(dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+ __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
+
rx_buffer->page = NULL;
}
@@ -4606,6 +4703,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -4948,10 +5047,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
}
- /* Disable the Tx DMA engine on 82599 and X540 */
+ /* Disable the Tx DMA engine on 82599 and later MAC */
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -5016,7 +5117,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_device_id = pdev->subsystem_device;
/* Set common capability flags and settings */
- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -5071,6 +5172,12 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550:
+#ifdef CONFIG_IXGBE_DCA
+ adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
+#endif
+ break;
default:
break;
}
@@ -5086,6 +5193,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
switch (hw->mac.type) {
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
@@ -5675,6 +5784,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -5806,6 +5917,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -5819,7 +5932,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
+ (hw->mac.type == ixgbe_mac_X540) ||
+ (hw->mac.type == ixgbe_mac_X550) ||
+ (hw->mac.type == ixgbe_mac_X550EM_x)) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -5842,7 +5957,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
break;
case ixgbe_mac_X540:
- /* OS2BMC stats are X540 only*/
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ /* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
@@ -6110,6 +6227,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
}
break;
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6221,6 +6340,10 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
if (!adapter->num_vfs)
return false;
+ /* resetting the PF is only needed for MAC before X550 */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ return false;
+
for (i = 0; i < adapter->num_vfs; i++) {
for (j = 0; j < q_per_pool; j++) {
u32 h, t;
@@ -6256,6 +6379,66 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
}
+#ifdef CONFIG_PCI_IOV
+static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
+ struct pci_dev *vfdev)
+{
+ if (!pci_wait_for_pending_transaction(vfdev))
+ e_dev_warn("Issuing VFLR with pending transactions\n");
+
+ e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
+ pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+
+ msleep(100);
+}
+
+static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *vfdev;
+ u32 gpc;
+ int pos;
+ unsigned short vf_id;
+
+ if (!(netif_carrier_ok(adapter->netdev)))
+ return;
+
+ gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
+ if (gpc) /* If incrementing then no need for the check below */
+ return;
+ /* Check to see if a bad DMA write target from an errant or
+ * malicious VF has caused a PCIe error. If so then we can
+ * issue a VFLR to the offending VF(s) and then resume without
+ * requesting a full slot reset.
+ */
+
+ if (!pdev)
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return;
+
+ /* get the device ID for the VF */
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
+
+ /* check status reg for all VFs owned by this PF */
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
+ u16 status_reg;
+
+ pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
+ if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
+ /* issue VFLR */
+ ixgbe_issue_vf_flr(adapter, vfdev);
+ }
+
+ vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
+ }
+}
+
static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
{
u32 ssvpc;
@@ -6276,6 +6459,17 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
}
+#else
+static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
+{
+}
+
+static void
+ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
+{
+}
+#endif /* CONFIG_PCI_IOV */
+
/**
* ixgbe_watchdog_subtask - check and bring link up
@@ -6296,6 +6490,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
else
ixgbe_watchdog_link_is_down(adapter);
+ ixgbe_check_for_bad_vf(adapter);
ixgbe_spoof_check(adapter);
ixgbe_update_stats(adapter);
@@ -6407,51 +6602,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
}
-#ifdef CONFIG_PCI_IOV
-static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
-{
- int vf;
- struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- u32 gpc;
- u32 ciaa, ciad;
-
- gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
- if (gpc) /* If incrementing then no need for the check below */
- return;
- /*
- * Check to see if a bad DMA write target from an errant or
- * malicious VF has caused a PCIe error. If so then we can
- * issue a VFLR to the offending VF(s) and then resume without
- * requesting a full slot reset.
- */
-
- for (vf = 0; vf < adapter->num_vfs; vf++) {
- ciaa = (vf << 16) | 0x80000000;
- /* 32 bit read so align, we really want status at offset 6 */
- ciaa |= PCI_COMMAND;
- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
- ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599);
- ciaa &= 0x7FFFFFFF;
- /* disable debug mode asap after reading data */
- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
- /* Get the upper 16 bits which will be the PCI status reg */
- ciad >>= 16;
- if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
- netdev_err(netdev, "VF %d Hung DMA\n", vf);
- /* Issue VFLR */
- ciaa = (vf << 16) | 0x80000000;
- ciaa |= 0xA8;
- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
- ciad = 0x00008000; /* VFLR */
- IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad);
- ciaa &= 0x7FFFFFFF;
- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
- }
- }
-}
-
-#endif
/**
* ixgbe_service_timer - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@@ -6460,7 +6610,6 @@ static void ixgbe_service_timer(unsigned long data)
{
struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
unsigned long next_event_offset;
- bool ready = true;
/* poll faster when waiting for link */
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
@@ -6468,32 +6617,10 @@ static void ixgbe_service_timer(unsigned long data)
else
next_event_offset = HZ * 2;
-#ifdef CONFIG_PCI_IOV
- /*
- * don't bother with SR-IOV VF DMA hang check if there are
- * no VFs or the link is down
- */
- if (!adapter->num_vfs ||
- (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
- goto normal_timer_service;
-
- /* If we have VFs allocated then we must check for DMA hangs */
- ixgbe_check_for_bad_vf(adapter);
- next_event_offset = HZ / 50;
- adapter->timer_event_accumulator++;
-
- if (adapter->timer_event_accumulator >= 100)
- adapter->timer_event_accumulator = 0;
- else
- ready = false;
-
-normal_timer_service:
-#endif
/* Reset the timer */
mod_timer(&adapter->service_timer, next_event_offset + jiffies);
- if (ready)
- ixgbe_service_event_schedule(adapter);
+ ixgbe_service_event_schedule(adapter);
}
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@@ -6898,8 +7025,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
- /* notify HW of packet */
- ixgbe_write_tail(tx_ring, i);
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
}
return;
@@ -7197,12 +7328,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
* The minimum packet size for olinfo paylen is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (unlikely(skb->len < 17)) {
- if (skb_pad(skb, 17 - skb->len))
- return NETDEV_TX_OK;
- skb->len = 17;
- skb_set_tail_pointer(skb, 17);
- }
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
@@ -7646,7 +7773,7 @@ static int ixgbe_set_features(struct net_device *netdev,
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr,
+ const unsigned char *addr, u16 vid,
u16 flags)
{
/* guarantee we can provide a unique filter for the unicast address */
@@ -7655,7 +7782,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -ENOMEM;
}
- return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
+ return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
}
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
@@ -7716,7 +7843,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
else
mode = BRIDGE_MODE_VEPA;
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
}
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
@@ -7965,6 +8092,29 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
+ * @adapter: Pointer to adapter struct
+ */
+static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_OF
+ struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ const unsigned char *addr;
+
+ addr = of_get_mac_address(dp);
+ if (addr) {
+ ether_addr_copy(hw->mac.perm_addr, addr);
+ return;
+ }
+#endif /* CONFIG_OF */
+
+#ifdef CONFIG_SPARC
+ ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
+#endif /* CONFIG_SPARC */
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -8046,7 +8196,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev_priv(netdev);
- pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -8104,6 +8253,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -8167,6 +8318,8 @@ skip_sriov:
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
netdev->features |= NETIF_F_SCTP_CSUM;
netdev->hw_features |= NETIF_F_SCTP_CSUM |
NETIF_F_NTUPLE;
@@ -8229,6 +8382,8 @@ skip_sriov:
goto err_sw_init;
}
+ ixgbe_get_platform_mac_addr(adapter);
+
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -8320,6 +8475,8 @@ skip_sriov:
if (err)
goto err_register;
+ pci_set_drvdata(pdev, adapter);
+
/* power down the optics for 82599 SFP+ fiber */
if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
@@ -8399,9 +8556,14 @@ err_dma:
static void ixgbe_remove(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev;
bool disable_dev;
+ /* if !adapter then we already cleaned up in probe */
+ if (!adapter)
+ return;
+
+ netdev = adapter->netdev;
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
@@ -8523,6 +8685,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_X540:
device_id = IXGBE_X540_VF_DEVICE_ID;
break;
+ case ixgbe_mac_X550:
+ device_id = IXGBE_DEV_ID_X550_VF;
+ break;
+ case ixgbe_mac_X550EM_x:
+ device_id = IXGBE_DEV_ID_X550EM_X_VF;
+ break;
default:
device_id = 0;
break;
@@ -8542,8 +8710,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
* VFLR. Just clean up the AER in that case.
*/
if (vfdev) {
- e_dev_err("Issuing VFLR to VF %d\n", vf);
- pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+ ixgbe_issue_vf_flr(adapter, vfdev);
/* Free device reference count */
pci_dev_put(vfdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index cc8f0128286c..9993a471d668 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -305,6 +305,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
@@ -426,6 +428,8 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 28b81ae09b5a..8a2be444113d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -43,13 +43,195 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
-static bool ixgbe_get_i2c_data(u32 *i2cctl);
+static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
+ * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
+ * @hw: pointer to the hardware structure
+ * @byte: byte to send
+ *
+ * Returns an error code on error.
+ **/
+static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ return ixgbe_get_i2c_ack(hw);
+}
+
+/**
+ * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
+ * @hw: pointer to the hardware structure
+ * @byte: pointer to a u8 to receive the byte
+ *
+ * Returns an error code on error.
+ **/
+static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_in_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ /* ACK */
+ return ixgbe_clock_out_i2c_bit(hw, false);
+}
+
+/**
+ * ixgbe_ones_comp_byte_add - Perform one's complement addition
+ * @add1: addend 1
+ * @add2: addend 2
+ *
+ * Returns one's complement 8-bit sum.
+ **/
+static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
+{
+ u16 sum = add1 + add2;
+
+ sum = (sum & 0xFF) + (sum >> 8);
+ return sum & 0xFF;
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int max_retry = 10;
+ int retry = 0;
+ u8 csum_byte;
+ u8 high_bits;
+ u8 low_bits;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ~csum;
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ /* Re-start condition */
+ ixgbe_i2c_start(hw);
+ /* Device Address and read indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
+ goto fail;
+ /* Get upper bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
+ goto fail;
+ /* Get low bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
+ goto fail;
+ /* Get csum */
+ if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
+ goto fail;
+ /* NACK */
+ if (ixgbe_clock_out_i2c_bit(hw, false))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ *val = (high_bits << 8) | low_bits;
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ retry++;
+ if (retry < max_retry)
+ hw_dbg(hw, "I2C byte read combined error - Retry.\n");
+ else
+ hw_dbg(hw, "I2C byte read combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ int max_retry = 1;
+ int retry = 0;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
+ csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
+ csum = ~csum;
+ do {
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write data 15:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
+ goto fail;
+ /* Write data 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ hw_dbg(hw, "I2C byte write combined error - Retry.\n");
+ else
+ hw_dbg(hw, "I2C byte write combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
*
@@ -60,6 +242,15 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
u32 phy_addr;
u16 ext_ability = 0;
+ if (!hw->phy.phy_semaphore_mask) {
+ hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
+ IXGBE_STATUS_LAN_ID_1;
+ if (hw->phy.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ }
+
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
hw->phy.mdio.prtad = phy_addr;
@@ -315,12 +506,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
s32 status;
- u16 gssr;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- gssr = IXGBE_GSSR_PHY1_SM;
- else
- gssr = IXGBE_GSSR_PHY0_SM;
+ u32 gssr = hw->phy.phy_semaphore_mask;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@@ -418,7 +604,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
s32 status;
- u16 gssr;
+ u32 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
gssr = IXGBE_GSSR_PHY1_SM;
@@ -576,6 +762,10 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_100_FULL;
}
+ /* Internal PHY does not support 100 Mbps */
+ if (hw->mac.type == ixgbe_mac_X550EM_x)
+ *speed &= ~IXGBE_LINK_SPEED_100_FULL;
+
return status;
}
@@ -632,6 +822,9 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* @hw: pointer to hardware structure
*
* Restart autonegotiation and PHY and waits for completion.
+ * This function always returns success, this is nessary since
+ * it is called via a function pointer that could call other
+ * functions that could return an error.
**/
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
@@ -1462,15 +1655,10 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 status;
u32 max_retry = 10;
u32 retry = 0;
- u16 swfw_mask = 0;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = true;
*data = 0;
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- swfw_mask = IXGBE_GSSR_PHY1_SM;
- else
- swfw_mask = IXGBE_GSSR_PHY0_SM;
-
do {
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
@@ -1548,12 +1736,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 status;
u32 max_retry = 1;
u32 retry = 0;
- u16 swfw_mask = 0;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- swfw_mask = IXGBE_GSSR_PHY1_SM;
- else
- swfw_mask = IXGBE_GSSR_PHY0_SM;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
@@ -1610,7 +1793,7 @@ fail:
**/
static void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -1639,7 +1822,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
**/
static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
/* Stop condition must begin with data low and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1697,9 +1880,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
}
/* Release SDA line (set high) */
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- i2cctl |= IXGBE_I2C_DATA_OUT;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
return status;
@@ -1715,7 +1898,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
s32 status = 0;
u32 i = 0;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
u32 timeout = 10;
bool ack = true;
@@ -1728,8 +1911,8 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
/* Poll for ACK. Note that ACK in I2C spec is
* transition from 1 to 0 */
for (i = 0; i < timeout; i++) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- ack = ixgbe_get_i2c_data(&i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ ack = ixgbe_get_i2c_data(hw, &i2cctl);
udelay(1);
if (ack == 0)
@@ -1758,15 +1941,15 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
**/
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- *data = ixgbe_get_i2c_data(&i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ *data = ixgbe_get_i2c_data(hw, &i2cctl);
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -1786,7 +1969,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
s32 status;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -1822,14 +2005,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
u32 i2cctl_r = 0;
for (i = 0; i < timeout; i++) {
- *i2cctl |= IXGBE_I2C_CLK_OUT;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
- i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- if (i2cctl_r & IXGBE_I2C_CLK_IN)
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
break;
}
}
@@ -1844,9 +2027,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
- *i2cctl &= ~IXGBE_I2C_CLK_OUT;
+ *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
@@ -1864,19 +2047,19 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT;
+ *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT;
+ *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
/* Verify data was set correctly */
- *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- if (data != ixgbe_get_i2c_data(i2cctl)) {
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
return IXGBE_ERR_I2C;
}
@@ -1891,9 +2074,9 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*
* Returns the I2C data bit value
**/
-static bool ixgbe_get_i2c_data(u32 *i2cctl)
+static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
- if (*i2cctl & IXGBE_I2C_DATA_IN)
+ if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
return true;
return false;
}
@@ -1907,7 +2090,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
**/
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
u32 i;
ixgbe_i2c_start(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 54071ed17e3b..434643881287 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -77,6 +77,11 @@
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
+#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -110,7 +115,6 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -157,4 +161,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val);
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 val);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 97c85b859536..c76ba90ecc6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -221,7 +221,8 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ rss = min_t(int, ixgbe_max_rss_indices(adapter),
+ num_online_cpus());
} else {
rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
}
@@ -618,6 +619,27 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
return 0;
}
+static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
+ u32 qde)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ int i;
+
+ for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
+ u32 reg;
+
+ /* flush previous write */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* indicate to hardware that we want to set drop enable */
+ reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+}
+
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
@@ -647,15 +669,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* force drop enable for all VF Rx queues */
- for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
- /* flush previous write */
- IXGBE_WRITE_FLUSH(hw);
-
- /* indicate to hardware that we want to set drop enable */
- reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
- reg |= i << IXGBE_QDE_IDX_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
- }
+ ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
@@ -1079,52 +1093,86 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return ixgbe_set_vf_mac(adapter, vf, mac);
}
+static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
+ u16 vlan, u8 qos)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err = 0;
+
+ if (adapter->vfinfo[vf].pf_vlan)
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan,
+ vf);
+ if (err)
+ goto out;
+ ixgbe_set_vmvir(adapter, vlan, qos, vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ if (adapter->vfinfo[vf].spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ adapter->vfinfo[vf].vlan_count++;
+
+ /* enable hide vlan on X550 */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
+ IXGBE_QDE_HIDE_VLAN);
+
+ adapter->vfinfo[vf].pf_vlan = vlan;
+ adapter->vfinfo[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before attempting to use the VF device.\n");
+ }
+
+out:
+ return err;
+}
+
+static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_clear_vmvir(adapter, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
+ if (adapter->vfinfo[vf].vlan_count)
+ adapter->vfinfo[vf].vlan_count--;
+ adapter->vfinfo[vf].pf_vlan = 0;
+ adapter->vfinfo[vf].pf_qos = 0;
+
+ return err;
+}
+
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
if (vlan || qos) {
+ /* Check if there is already a port VLAN set, if so
+ * we have to delete the old one first before we
+ * can set the new one. The usage model had
+ * previously assumed the user would delete the
+ * old port VLAN before setting a new one but this
+ * is not necessarily the case.
+ */
if (adapter->vfinfo[vf].pf_vlan)
- err = ixgbe_set_vf_vlan(adapter, false,
- adapter->vfinfo[vf].pf_vlan,
- vf);
- if (err)
- goto out;
- err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+ err = ixgbe_disable_port_vlan(adapter, vf);
if (err)
goto out;
- ixgbe_set_vmvir(adapter, vlan, qos, vf);
- ixgbe_set_vmolr(hw, vf, false);
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- adapter->vfinfo[vf].vlan_count++;
- adapter->vfinfo[vf].pf_vlan = vlan;
- adapter->vfinfo[vf].pf_qos = qos;
- dev_info(&adapter->pdev->dev,
- "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
- if (test_bit(__IXGBE_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev,
- "The VF VLAN has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before"
- " attempting to use the VF device.\n");
- }
+ err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
} else {
- err = ixgbe_set_vf_vlan(adapter, false,
- adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_clear_vmvir(adapter, vf);
- ixgbe_set_vmolr(hw, vf, true);
- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
- if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
- adapter->vfinfo[vf].pf_vlan = 0;
- adapter->vfinfo[vf].pf_qos = 0;
+ err = ixgbe_disable_port_vlan(adapter, vf);
}
+
out:
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index dfd55d83bc03..d101b25dc4b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -74,9 +74,22 @@
#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
#define IXGBE_DEV_ID_X540T1 0x1560
+#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
+#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -84,7 +97,8 @@
#define IXGBE_CTRL_EXT 0x00018
#define IXGBE_ESDP 0x00020
#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL 0x00028
+#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ 0x15F5C : 0x00028))
#define IXGBE_LEDCTL 0x00200
#define IXGBE_FRTIMER 0x00048
#define IXGBE_TCPTIMER 0x0004C
@@ -112,10 +126,14 @@
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN 0x00000001
-#define IXGBE_I2C_CLK_OUT 0x00000002
-#define IXGBE_I2C_DATA_IN 0x00000004
-#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00004000 : 0x00000001)
+#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00000200 : 0x00000002)
+#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00001000 : 0x00000004)
+#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00000400 : 0x00000008)
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
@@ -290,8 +308,17 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_IMIRVP 0x05AC0
#define IXGBE_VMD_CTL 0x0581C
#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */
#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+/* Registers for setting up RSS on X550 with SRIOV
+ * _p - pool number (0..63)
+ * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
+ */
+#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4))
+#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40))
+#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40))
+
/* Flow Director registers */
#define IXGBE_FDIRCTRL 0x0EE00
#define IXGBE_FDIRHKEY 0x0EE68
@@ -725,6 +752,24 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_LDPCECL 0x0E820
#define IXGBE_LDPCECH 0x0E821
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+
+#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */
+#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */
+
+#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
+
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+
/* Management */
#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -798,6 +843,12 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_PBACLR_82599 0x11068
#define IXGBE_CIAA_82599 0x11088
#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_CIAA_X550 0x11508
+#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
+#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
#define IXGBE_PICAUSE 0x110B0
#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -1120,6 +1171,13 @@ struct ixgbe_thermal_sensor_data {
/* MDIO definitions */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+#define IXGBE_TWINAX_DEV 1
+
#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
@@ -1129,9 +1187,23 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
+#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
+#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
+
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */
+#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */
+#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */
/* MII clause 22/28 definitions */
#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
@@ -1632,6 +1704,7 @@ enum {
#define IXGBE_LINKS_TL_FAULT 0x00001000
#define IXGBE_LINKS_SIGNAL 0x00000F00
+#define IXGBE_LINKS_SPEED_NON_STD 0x08000000
#define IXGBE_LINKS_SPEED_82599 0x30000000
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
@@ -1674,12 +1747,14 @@ enum {
#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
/* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM 0x0001
-#define IXGBE_GSSR_PHY0_SM 0x0002
-#define IXGBE_GSSR_PHY1_SM 0x0004
-#define IXGBE_GSSR_MAC_CSR_SM 0x0008
-#define IXGBE_GSSR_FLASH_SM 0x0010
-#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
+#define IXGBE_GSSR_I2C_MASK 0x1800
/* FW Status register bitmask */
#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
@@ -1713,27 +1788,32 @@ enum {
#define IXGBE_PBANUM_LENGTH 11
/* Checksum and EEPROM pointers */
-#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
-#define IXGBE_EEPROM_CHECKSUM 0x3F
-#define IXGBE_EEPROM_SUM 0xBABA
-#define IXGBE_PCIE_ANALOG_PTR 0x03
-#define IXGBE_ATLAS0_CONFIG_PTR 0x04
-#define IXGBE_PHY_PTR 0x04
-#define IXGBE_ATLAS1_CONFIG_PTR 0x05
-#define IXGBE_OPTION_ROM_PTR 0x05
-#define IXGBE_PCIE_GENERAL_PTR 0x06
-#define IXGBE_PCIE_CONFIG0_PTR 0x07
-#define IXGBE_PCIE_CONFIG1_PTR 0x08
-#define IXGBE_CORE0_PTR 0x09
-#define IXGBE_CORE1_PTR 0x0A
-#define IXGBE_MAC0_PTR 0x0B
-#define IXGBE_MAC1_PTR 0x0C
-#define IXGBE_CSR0_CONFIG_PTR 0x0D
-#define IXGBE_CSR1_CONFIG_PTR 0x0E
-#define IXGBE_FW_PTR 0x0F
-#define IXGBE_PBANUM0_PTR 0x15
-#define IXGBE_PBANUM1_PTR 0x16
-#define IXGBE_FREE_SPACE_PTR 0X3E
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_PCIE_ANALOG_PTR_X550 0x02
+#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000
+#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24
+#define IXGBE_PCIE_CONFIG_SIZE 0x08
+#define IXGBE_EEPROM_LAST_WORD 0x41
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_FREE_SPACE_PTR 0X3E
/* External Thermal Sensor Config */
#define IXGBE_ETS_CFG 0x26
@@ -1994,12 +2074,14 @@ enum {
#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000
#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
#define IXGBE_FWSM_TS_ENABLED 0x1
/* Queue Drop Enable */
#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_HIDE_VLAN 0x00000002
#define IXGBE_QDE_IDX_MASK 0x00007F00
#define IXGBE_QDE_IDX_SHIFT 8
#define IXGBE_QDE_WRITE 0x00010000
@@ -2289,18 +2371,32 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIR_DROP_QUEUE 127
/* Manageablility Host Interface defines */
-#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
-#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
-#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */
+#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */
+#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */
/* CEM Support */
-#define FW_CEM_HDR_LEN 0x4
-#define FW_CEM_CMD_DRIVER_INFO 0xDD
-#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
-#define FW_CEM_CMD_RESERVED 0x0
-#define FW_CEM_UNUSED_VER 0x0
-#define FW_CEM_MAX_RETRIES 3
-#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0x0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_READ_SHADOW_RAM_CMD 0x31
+#define FW_READ_SHADOW_RAM_LEN 0x6
+#define FW_WRITE_SHADOW_RAM_CMD 0x33
+#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */
+#define FW_SHADOW_RAM_DUMP_CMD 0x36
+#define FW_SHADOW_RAM_DUMP_LEN 0
+#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET 3
+#define FW_MAX_READ_BUFFER_SIZE 1024
+#define FW_DISABLE_RXEN_CMD 0xDE
+#define FW_DISABLE_RXEN_LEN 0x1
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2313,6 +2409,25 @@ struct ixgbe_hic_hdr {
u8 checksum;
};
+struct ixgbe_hic_hdr2_req {
+ u8 cmd;
+ u8 buf_lenh;
+ u8 buf_lenl;
+ u8 checksum;
+};
+
+struct ixgbe_hic_hdr2_rsp {
+ u8 cmd;
+ u8 buf_lenl;
+ u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
+ u8 checksum;
+};
+
+union ixgbe_hic_hdr2 {
+ struct ixgbe_hic_hdr2_req req;
+ struct ixgbe_hic_hdr2_rsp rsp;
+};
+
struct ixgbe_hic_drv_info {
struct ixgbe_hic_hdr hdr;
u8 port_num;
@@ -2324,6 +2439,32 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+/* These need to be dword aligned */
+struct ixgbe_hic_read_shadow_ram {
+ union ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_write_shadow_ram {
+ union ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_disable_rxen {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad2;
+ u16 pad3;
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2437,10 +2578,12 @@ struct ixgbe_adv_tx_context_desc {
typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
typedef u32 ixgbe_link_speed;
-#define IXGBE_LINK_SPEED_UNKNOWN 0
-#define IXGBE_LINK_SPEED_100_FULL 0x0008
-#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
-#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_100_FULL 0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
+#define IXGBE_LINK_SPEED_5GB_FULL 0x0800
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
@@ -2588,6 +2731,8 @@ enum ixgbe_mac_type {
ixgbe_mac_82598EB,
ixgbe_mac_82599EB,
ixgbe_mac_X540,
+ ixgbe_mac_X550,
+ ixgbe_mac_X550EM_x,
ixgbe_num_macs
};
@@ -2596,6 +2741,9 @@ enum ixgbe_phy_type {
ixgbe_phy_none,
ixgbe_phy_tn,
ixgbe_phy_aq,
+ ixgbe_phy_x550em_kr,
+ ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_ext_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -2839,7 +2987,7 @@ struct ixgbe_eeprom_operations {
s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
- u16 (*calc_checksum)(struct ixgbe_hw *);
+ s32 (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
@@ -2861,8 +3009,8 @@ struct ixgbe_mac_operations {
s32 (*disable_rx_buff)(struct ixgbe_hw *);
s32 (*enable_rx_buff)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
- void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u32);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@@ -2908,6 +3056,11 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+
+ /* DMA Coalescing */
+ s32 (*dmac_config)(struct ixgbe_hw *hw);
+ s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
};
struct ixgbe_phy_operations {
@@ -2920,6 +3073,7 @@ struct ixgbe_phy_operations {
s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_internal_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
@@ -2928,6 +3082,8 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
};
@@ -2980,6 +3136,8 @@ struct ixgbe_phy_info {
bool sfp_setup_needed;
u32 revision;
enum ixgbe_media_type media_type;
+ u8 lan_id;
+ u32 phy_semaphore_mask;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
enum ixgbe_smart_speed smart_speed;
@@ -3086,4 +3244,71 @@ struct ixgbe_info {
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+
+#define IXGBE_KX4_LINK_CNTL_1 0x4C
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
+
+#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
+#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
+
+#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0
+#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \
+ (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+ (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
+#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
+#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
+#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
+
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e88305d5d18d..ba54ff07b438 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -32,6 +32,7 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
+#include "ixgbe_x540.h"
#define IXGBE_X540_MAX_TX_QUEUES 128
#define IXGBE_X540_MAX_RX_QUEUES 128
@@ -42,17 +43,15 @@
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
-static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
{
return ixgbe_media_type_copper;
}
-static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -76,9 +75,8 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
-static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
@@ -92,7 +90,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
@@ -179,7 +177,7 @@ mac_reset_top:
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
s32 ret_val;
@@ -197,7 +195,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@@ -316,7 +314,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
*
* @hw: pointer to hardware structure
**/
-static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -324,6 +322,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
+ u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
+ u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
/*
* Do not use hw->eeprom.ops.read because we do not want to take
@@ -332,10 +332,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
*/
/* Include 0x0-0x3F in the checksum */
- for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
+ for (i = 0; i < checksum_last_word; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
checksum += word;
}
@@ -344,11 +344,11 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
* Include all data from pointers 0x3, 0x6-0xE. This excludes the
* FW, PHY module, and PCIe Expansion/Option ROM pointers.
*/
- for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
continue;
- if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
+ if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@@ -358,8 +358,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
pointer >= hw->eeprom.word_size)
continue;
- if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
+ if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
break;
}
@@ -368,10 +369,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
(pointer + length) >= hw->eeprom.word_size)
continue;
- for (j = pointer+1; j <= pointer+length; j++) {
- if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
checksum += word;
}
@@ -379,7 +380,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return checksum;
+ return (s32)checksum;
}
/**
@@ -410,23 +411,34 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return IXGBE_ERR_SWFW_SYNC;
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
+
+ checksum = (u16)(status & 0xffff);
/* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores twice here.
*/
status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
&read_checksum);
+ if (status)
+ goto out;
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ hw_dbg(hw, "Invalid EEPROM checksum");
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ }
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
- /* Verify read and calculated checksums are the same */
- if (read_checksum != checksum)
- return IXGBE_ERR_EEPROM_CHECKSUM;
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@@ -457,15 +469,22 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return IXGBE_ERR_SWFW_SYNC;
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
+
+ checksum = (u16)(status & 0xffff);
/* Do not use hw->eeprom.ops.write because we do not want to
* take the synchronization semaphores twice here.
*/
status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
- if (!status)
- status = ixgbe_update_flash_X540(hw);
+ if (status)
+ goto out;
+
+ status = ixgbe_update_flash_X540(hw);
+out:
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@@ -544,7 +563,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swfw_sync;
u32 swmask = mask;
@@ -612,7 +631,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through the SW_FW_SYNC register
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
**/
-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swfw_sync;
u32 swmask = mask;
@@ -699,7 +718,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
* Devices that implement the version 2 interface:
* X540
**/
-static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
@@ -735,7 +754,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* Devices that implement the version 2 interface:
* X540
**/
-static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
new file mode 100644
index 000000000000..a1468b1f4d8a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -0,0 +1,39 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
new file mode 100644
index 000000000000..ffdd1231f419
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -0,0 +1,1432 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+/** ixgbe_identify_phy_x550em - Get PHY type based on device id
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+{
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ /* set up for CS4227 usage */
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+
+ return ixgbe_identify_module_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ hw->phy.type = ixgbe_phy_x550em_kx4;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ hw->phy.type = ixgbe_phy_x550em_kr;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ return ixgbe_identify_phy_generic(hw);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return 0;
+}
+
+/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
+ * IOSF device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @phy_data: Pointer to read data from the register
+ **/
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data)
+{
+ u32 i, command, error;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usleep_range(10, 20);
+
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ hw_dbg(hw, "Failed to read, error %x\n", error);
+ return IXGBE_ERR_PHY;
+ }
+
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ hw_dbg(hw, "Read timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+
+ return 0;
+}
+
+/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
+ * command assuming that the semaphore is already obtained.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+ struct ixgbe_hic_read_shadow_ram buffer;
+
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = cpu_to_be32(offset * 2);
+ /* one word */
+ buffer.length = cpu_to_be16(sizeof(u16));
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+ if (status)
+ return status;
+
+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ FW_NVM_DATA_OFFSET);
+
+ return 0;
+}
+
+/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ struct ixgbe_hic_read_shadow_ram buffer;
+ u32 current_word = 0;
+ u16 words_to_read;
+ s32 status;
+ u32 i;
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status) {
+ hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
+ return status;
+ }
+
+ while (words) {
+ if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+ words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+ else
+ words_to_read = words;
+
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = cpu_to_be32((offset + current_word) * 2);
+ buffer.length = cpu_to_be16(words_to_read * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ false);
+ if (status) {
+ hw_dbg(hw, "Host interface command failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words_to_read; i++) {
+ u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
+ 2 * i;
+ u32 value = IXGBE_READ_REG(hw, reg);
+
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ i++;
+ if (i < words_to_read) {
+ value >>= 16;
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ }
+ }
+ words -= words_to_read;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/** ixgbe_checksum_ptr_x550 - Checksum one pointer region
+ * @hw: pointer to hardware structure
+ * @ptr: pointer offset in eeprom
+ * @size: size of section pointed by ptr, if 0 first word will be used as size
+ * @csum: address of checksum to update
+ *
+ * Returns error status for any failure
+ **/
+static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ u16 size, u16 *csum, u16 *buffer,
+ u32 buffer_size)
+{
+ u16 buf[256];
+ s32 status;
+ u16 length, bufsz, i, start;
+ u16 *local_buffer;
+
+ bufsz = sizeof(buf) / sizeof(buf[0]);
+
+ /* Read a chunk at the pointer location */
+ if (!buffer) {
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
+ if (status) {
+ hw_dbg(hw, "Failed to read EEPROM image\n");
+ return status;
+ }
+ local_buffer = buf;
+ } else {
+ if (buffer_size < ptr)
+ return IXGBE_ERR_PARAM;
+ local_buffer = &buffer[ptr];
+ }
+
+ if (size) {
+ start = 0;
+ length = size;
+ } else {
+ start = 1;
+ length = local_buffer[0];
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (ptr + length) >= hw->eeprom.word_size)
+ return 0;
+ }
+
+ if (buffer && ((u32)start + (u32)length > buffer_size))
+ return IXGBE_ERR_PARAM;
+
+ for (i = start; length; i++, length--) {
+ if (i == bufsz && !buffer) {
+ ptr += bufsz;
+ i = 0;
+ if (length < bufsz)
+ bufsz = length;
+
+ /* Read a chunk at the pointer location */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
+ bufsz, buf);
+ if (status) {
+ hw_dbg(hw, "Failed to read EEPROM image\n");
+ return status;
+ }
+ }
+ *csum += local_buffer[i];
+ }
+ return 0;
+}
+
+/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @buffer: pointer to buffer containing calculated checksum
+ * @buffer_size: size of buffer
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
+{
+ u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 *local_buffer;
+ s32 status;
+ u16 checksum = 0;
+ u16 pointer, i, size;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (!buffer) {
+ /* Read pointer area */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
+ IXGBE_EEPROM_LAST_WORD + 1,
+ eeprom_ptrs);
+ if (status) {
+ hw_dbg(hw, "Failed to read EEPROM image\n");
+ return status;
+ }
+ local_buffer = eeprom_ptrs;
+ } else {
+ if (buffer_size < IXGBE_EEPROM_LAST_WORD)
+ return IXGBE_ERR_PARAM;
+ local_buffer = buffer;
+ }
+
+ /* For X550 hardware include 0x0-0x41 in the checksum, skip the
+ * checksum word itself
+ */
+ for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
+ if (i != IXGBE_EEPROM_CHECKSUM)
+ checksum += local_buffer[i];
+
+ /* Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ pointer = local_buffer[i];
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ switch (i) {
+ case IXGBE_PCIE_GENERAL_PTR:
+ size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
+ break;
+ case IXGBE_PCIE_CONFIG0_PTR:
+ case IXGBE_PCIE_CONFIG1_PTR:
+ size = IXGBE_PCIE_CONFIG_SIZE;
+ break;
+ default:
+ size = 0;
+ break;
+ }
+
+ status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
+ buffer, buffer_size);
+ if (status)
+ return status;
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ return ixgbe_calc_checksum_X550(hw, NULL, 0);
+}
+
+/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+ if (status)
+ return status;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ hw_dbg(hw, "Invalid EEPROM checksum");
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return status;
+}
+
+/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+ struct ixgbe_hic_write_shadow_ram buffer;
+
+ buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* one word */
+ buffer.length = cpu_to_be16(sizeof(u16));
+ buffer.data = data;
+ buffer.address = cpu_to_be32(offset * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+ return status;
+}
+
+/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ hw_dbg(hw, "write ee hostif failed to get semaphore");
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
+ **/
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ union ixgbe_hic_hdr2 buffer;
+
+ buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
+ buffer.req.buf_lenh = 0;
+ buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
+ buffer.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+ return status;
+}
+
+/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum = 0;
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
+ if (status) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return status;
+ }
+
+ status = ixgbe_calc_eeprom_checksum_X550(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+ if (status)
+ return status;
+
+ status = ixgbe_update_flash_X550(hw);
+
+ return status;
+}
+
+/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ *
+ * Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = 0;
+ u32 i = 0;
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status) {
+ hw_dbg(hw, "EEPROM write buffer - semaphore failed\n");
+ return status;
+ }
+
+ for (i = 0; i < words; i++) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
+ data[i]);
+ if (status) {
+ hw_dbg(hw, "Eeprom buffered write failed\n");
+ break;
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
+ * @hw: pointer to hardware structure
+ **/
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* CS4227 does not support autoneg, so disable the laser control
+ * functions for SFP+ fiber
+ */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+}
+
+/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+{
+ bool setup_linear;
+ u16 reg_slice, edc_mode;
+ s32 ret_val;
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_unknown:
+ return 0;
+ case ixgbe_sfp_type_not_present:
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ setup_linear = true;
+ break;
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_da_act_lmt_core0:
+ case ixgbe_sfp_type_da_act_lmt_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ setup_linear = false;
+ break;
+ default:
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ ixgbe_init_mac_link_ops_X550em(hw);
+ hw->phy.ops.reset = NULL;
+
+ /* The CS4227 slice address is the base address + the port-pair reg
+ * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
+ */
+ reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
+
+ if (setup_linear)
+ edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+
+ /* Configure CS4227 for connection type. */
+ ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ edc_mode);
+
+ if (ret_val)
+ ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
+ edc_mode);
+
+ return ret_val;
+}
+
+/** ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ **/
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ /* SFP */
+ if (hw->phy.media_type == ixgbe_media_type_fiber) {
+ /* CS4227 SFP must not enable auto-negotiation */
+ *autoneg = false;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return 0;
+ }
+
+ /* Link capabilities are based on SFP */
+ if (hw->phy.multispeed_fiber)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ } else {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ }
+ return 0;
+}
+
+/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
+ * IOSF device
+ *
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
+{
+ u32 i, command, error;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Write IOSF data register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usleep_range(10, 20);
+
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ hw_dbg(hw, "Failed to write, error %x\n", error);
+ return IXGBE_ERR_PHY;
+ }
+
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ hw_dbg(hw, "Write timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ * internal and external PHY at a specific speed, without autonegotiation.
+ **/
+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Disable training protocol FSM. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Disable Flex from training TXFFE. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Enable override for coefficients. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KX4 PHY.
+ **/
+s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
+ IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
+
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
+
+ /* Advertise 10G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
+
+ /* Advertise 1G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, reg_val);
+
+ return status;
+}
+
+/** ixgbe_setup_kr_x550em - Configure the KR PHY.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KR PHY.
+ **/
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+ /* Advertise 10G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+ /* Advertise 1G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
+ * @hw: point to hardware structure
+ *
+ * Configures the integrated KR PHY to talk to the external PHY. The base
+ * driver will call this function when it gets notification via interrupt from
+ * the external PHY. This function forces the internal PHY into iXFI mode at
+ * the correct speed.
+ *
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
+ **/
+s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 lasi, autoneg_status, speed;
+ ixgbe_link_speed force_speed;
+
+ /* Verify that the external link status has changed */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi);
+ if (status)
+ return status;
+
+ /* If there was no change in link status, we can just exit */
+ if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
+ return 0;
+
+ /* we read this twice back to back to indicate current status */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (status)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (status)
+ return status;
+
+ /* If link is not up return an error indicating treat link as down */
+ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+
+ /* clear everything but the speed and duplex bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
+
+ switch (speed) {
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ default:
+ /* Internal PHY does not support anything else */
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+}
+
+/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ **/
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 esdp;
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ }
+
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ ixgbe_init_mac_link_ops_X550em(hw);
+ if (phy->sfp_type != ixgbe_sfp_type_unknown)
+ phy->ops.reset = NULL;
+
+ /* Set functions pointers based on phy type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kx4:
+ phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_x550em_kr:
+ phy->ops.setup_link = ixgbe_setup_kr_x550em;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_x550em_ext_t:
+ phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
+ break;
+ default:
+ break;
+ }
+ return ret_val;
+}
+
+/** ixgbe_get_media_type_X550em - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ *
+ */
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ media_type = ixgbe_media_type_copper;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+ return media_type;
+}
+
+/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+ ** @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 reg;
+ u32 retries = 2;
+
+ do {
+ /* decrement retries counter and exit if we hit 0 */
+ if (retries < 1) {
+ hw_dbg(hw, "External PHY not yet finished resetting.");
+ return IXGBE_ERR_PHY;
+ }
+ retries--;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ /* Verify PHY FW reset has completed */
+ } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
+
+ /* Set port to low power mode */
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ /* Enable the transmitter */
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
+
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ reg);
+ if (status)
+ return status;
+
+ /* Un-stall the PHY FW */
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ return status;
+}
+
+/** ixgbe_reset_hw_X550em - Perform hardware reset
+ ** @hw: pointer to hardware structure
+ **
+ ** Resets the hardware by resetting the transmit and receive units, masks
+ ** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ ** reset.
+ **/
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl = 0;
+ u32 i;
+ bool link_up = false;
+
+ /* Call adapter stop to disable Tx/Rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status)
+ return status;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+ /* start the external PHY */
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = ixgbe_init_ext_t_x550em(hw);
+ if (status)
+ return status;
+ }
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ /* Reset PHY */
+ if (!hw->phy.reset_disable && hw->phy.ops.reset)
+ hw->phy.ops.reset(hw);
+
+mac_reset_top:
+ /* Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear meaning reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ msleep(50);
+
+ /* Double resets are required for recovery from certain error
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ return status;
+}
+
+#define X550_COMMON_MAC \
+ .init_hw = &ixgbe_init_hw_generic, \
+ .start_hw = &ixgbe_start_hw_X540, \
+ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \
+ .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \
+ .get_mac_addr = &ixgbe_get_mac_addr_generic, \
+ .get_device_caps = &ixgbe_get_device_caps_generic, \
+ .stop_adapter = &ixgbe_stop_adapter_generic, \
+ .get_bus_info = &ixgbe_get_bus_info_generic, \
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \
+ .read_analog_reg8 = NULL, \
+ .write_analog_reg8 = NULL, \
+ .set_rxpba = &ixgbe_set_rxpba_generic, \
+ .check_link = &ixgbe_check_mac_link_generic, \
+ .led_on = &ixgbe_led_on_generic, \
+ .led_off = &ixgbe_led_off_generic, \
+ .blink_led_start = &ixgbe_blink_led_start_X540, \
+ .blink_led_stop = &ixgbe_blink_led_stop_X540, \
+ .set_rar = &ixgbe_set_rar_generic, \
+ .clear_rar = &ixgbe_clear_rar_generic, \
+ .set_vmdq = &ixgbe_set_vmdq_generic, \
+ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \
+ .clear_vmdq = &ixgbe_clear_vmdq_generic, \
+ .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \
+ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \
+ .enable_mc = &ixgbe_enable_mc_generic, \
+ .disable_mc = &ixgbe_disable_mc_generic, \
+ .clear_vfta = &ixgbe_clear_vfta_generic, \
+ .set_vfta = &ixgbe_set_vfta_generic, \
+ .fc_enable = &ixgbe_fc_enable_generic, \
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \
+ .init_uta_tables = &ixgbe_init_uta_tables_generic, \
+ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
+ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
+ .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
+ .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
+ .get_thermal_sensor_data = NULL, \
+ .init_thermal_sensor_thresh = NULL, \
+ .prot_autoc_read = &prot_autoc_read_generic, \
+ .prot_autoc_write = &prot_autoc_write_generic, \
+
+static struct ixgbe_mac_operations mac_ops_X550 = {
+ X550_COMMON_MAC
+ .reset_hw = &ixgbe_reset_hw_X540,
+ .get_media_type = &ixgbe_get_media_type_X540,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
+ .setup_link = &ixgbe_setup_mac_link_X540,
+ .set_rxpba = &ixgbe_set_rxpba_generic,
+ .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
+ .setup_sfp = NULL,
+};
+
+static struct ixgbe_mac_operations mac_ops_X550EM_x = {
+ X550_COMMON_MAC
+ .reset_hw = &ixgbe_reset_hw_X550em,
+ .get_media_type = &ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = NULL, /* defined later */
+ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+
+};
+
+#define X550_COMMON_EEP \
+ .read = &ixgbe_read_ee_hostif_X550, \
+ .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \
+ .write = &ixgbe_write_ee_hostif_X550, \
+ .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
+ .update_checksum = &ixgbe_update_eeprom_checksum_X550, \
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
+
+static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
+ X550_COMMON_EEP
+ .init_params = &ixgbe_init_eeprom_params_X550,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
+ X550_COMMON_EEP
+ .init_params = &ixgbe_init_eeprom_params_X540,
+};
+
+#define X550_COMMON_PHY \
+ .identify_sfp = &ixgbe_identify_module_generic, \
+ .reset = NULL, \
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \
+ .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \
+ .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
+ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
+ .check_overtemp = &ixgbe_tn_check_overtemp, \
+ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
+
+static struct ixgbe_phy_operations phy_ops_X550 = {
+ X550_COMMON_PHY
+ .init = NULL,
+ .identify = &ixgbe_identify_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
+ .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
+};
+
+static struct ixgbe_phy_operations phy_ops_X550EM_x = {
+ X550_COMMON_PHY
+ .init = &ixgbe_init_phy_ops_X550em,
+ .identify = &ixgbe_identify_phy_x550em,
+ .read_reg = NULL, /* defined later */
+ .write_reg = NULL, /* defined later */
+ .setup_link = NULL, /* defined later */
+};
+
+struct ixgbe_info ixgbe_X550_info = {
+ .mac = ixgbe_mac_X550,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X550,
+ .eeprom_ops = &eeprom_ops_X550,
+ .phy_ops = &phy_ops_X550,
+ .mbx_ops = &mbx_ops_generic,
+};
+
+struct ixgbe_info ixgbe_X550EM_x_info = {
+ .mac = ixgbe_mac_X550EM_x,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X550EM_x,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_X550EM_x,
+ .mbx_ops = &mbx_ops_generic,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 05e4f32d84f7..7412d378b77b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -31,6 +31,8 @@
/* Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index ba96cb5b886d..8c44ab25f3fa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,8 +58,9 @@ struct ixgbevf_tx_buffer {
};
struct ixgbevf_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
};
struct ixgbevf_stats {
@@ -79,7 +80,6 @@ struct ixgbevf_tx_queue_stats {
};
struct ixgbevf_rx_queue_stats {
- u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 csum_err;
@@ -92,9 +92,10 @@ struct ixgbevf_ring {
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
- unsigned int count; /* amount of descriptors */
- unsigned int next_to_use;
- unsigned int next_to_clean;
+ u16 count; /* amount of descriptors */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
union {
struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -110,12 +111,11 @@ struct ixgbevf_ring {
u64 hw_csum_rx_error;
u8 __iomem *tail;
+ struct sk_buff *skb;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
-
- u16 rx_buf_len;
int queue_index; /* needed for multiqueue queue management */
};
@@ -134,12 +134,10 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2K 2048
-#define IXGBEVF_RXBUFFER_4K 4096
-#define IXGBEVF_RXBUFFER_8K 8192
-#define IXGBEVF_RXBUFFER_10K 10240
+#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
@@ -307,6 +305,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
{
u16 ntc = ring->next_to_clean;
@@ -339,8 +344,10 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
/* board specific private data structure */
struct ixgbevf_adapter {
- struct timer_list watchdog_timer;
+ /* this field must be first, see ixgbevf_process_skb_fields */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ struct timer_list watchdog_timer;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
@@ -363,7 +370,6 @@ struct ixgbevf_adapter {
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 non_eop_descs;
int num_msix_vectors;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -373,7 +379,7 @@ struct ixgbevf_adapter {
*/
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
+
#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
struct msix_entry *msix_entries;
@@ -423,18 +429,17 @@ enum ixbgevf_state_t {
__IXGBEVF_WORK_INIT,
};
-struct ixgbevf_cb {
- struct sk_buff *prev;
-};
-#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
-
enum ixgbevf_boards {
board_82599_vf,
board_X540_vf,
+ board_X550_vf,
+ board_X550EM_x_vf,
};
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
/* needed by ethtool.c */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 030a219c85e3..62a0d8e0f17d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -66,6 +66,8 @@ static char ixgbevf_copyright[] =
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
[board_X540_vf] = &ixgbevf_X540_vf_info,
+ [board_X550_vf] = &ixgbevf_X550_vf_info,
+ [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -79,6 +81,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
/* required last entry */
{0, }
};
@@ -143,21 +147,6 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
return value;
}
-static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
- u32 val)
-{
- rx_ring->next_to_use = val;
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- ixgbevf_write_tail(rx_ring, val);
-}
-
/**
* ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
@@ -343,39 +332,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
}
/**
- * ixgbevf_receive_skb - Send a completed packet up the stack
- * @q_vector: structure containing interrupt and ring information
- * @skb: packet to send up
- * @status: hardware indication of status of receive
- * @rx_desc: rx descriptor
- **/
-static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
- struct sk_buff *skb, u8 status,
- union ixgbe_adv_rx_desc *rx_desc)
-{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- bool is_vlan = (status & IXGBE_RXD_STAT_VP);
- u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
-
- if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
-
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- napi_gro_receive(&q_vector->napi, skb);
- else
- netif_rx(skb);
-}
-
-/**
* ixgbevf_rx_skb - Helper function to determine proper Rx method
* @q_vector: structure containing interrupt and ring information
* @skb: packet to send up
- * @status: hardware indication of status of receive
- * @rx_desc: rx descriptor
**/
static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
- struct sk_buff *skb, u8 status,
- union ixgbe_adv_rx_desc *rx_desc)
+ struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
skb_mark_napi_id(skb, &q_vector->napi);
@@ -387,17 +349,17 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
- ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
+ napi_gro_receive(&q_vector->napi, skb);
}
-/**
- * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
- * @ring: pointer to Rx descriptor ring structure
- * @status_err: hardware indication of status of receive
+/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @ring: structure containig ring specific data
+ * @rx_desc: current Rx descriptor being processed
* @skb: skb currently being received and modified
- **/
+ */
static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
- u32 status_err, struct sk_buff *skb)
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -406,16 +368,16 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
return;
/* if IP and error */
- if ((status_err & IXGBE_RXD_STAT_IPCS) &&
- (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
+ ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
ring->rx_stats.csum_err++;
return;
}
- if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
return;
- if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
ring->rx_stats.csum_err++;
return;
}
@@ -424,52 +386,408 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the checksum, VLAN, protocol, and other fields within
+ * the skb.
+ */
+static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
+
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+ u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
+
+ if (test_bit(vid & VLAN_VID_MASK, active_vlans))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+/**
+ * ixgbevf_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
+
+ if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+ return false;
+
+ return true;
+}
+
+static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma = bi->dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
+
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
+ return true;
+}
+
/**
* ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
* @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
+ * @cleaned_count: number of buffers to replace
**/
static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
- int cleaned_count)
+ u16 cleaned_count)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
unsigned int i = rx_ring->next_to_use;
- while (cleaned_count--) {
- rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_buffer_info[i];
+ /* nothing to do or no valid netdev defined */
+ if (!cleaned_count || !rx_ring->netdev)
+ return;
- if (!bi->skb) {
- struct sk_buff *skb;
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len);
- if (!skb)
- goto no_buffers;
+ do {
+ if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
+ break;
- bi->skb = skb;
+ /* Refresh the desc even if pkt_addr didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
- bi->dma = dma_map_single(rx_ring->dev, skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- dev_kfree_skb(skb);
- bi->skb = NULL;
- dev_err(rx_ring->dev, "Rx DMA map failed\n");
- break;
- }
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
}
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /* clear the hdr_addr for the next_to_use descriptor */
+ rx_desc->read.hdr_addr = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ ixgbevf_write_tail(rx_ring, i);
}
+}
+
+/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ixgbevf specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
-no_buffers:
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- if (rx_ring->next_to_use != i)
- ixgbevf_release_rx_desc(rx_ring, i);
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/* ixgbevf_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right. These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ */
+static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ /* verify that the packet does not have any known errors */
+ if (unlikely(ixgbevf_test_staterr(rx_desc,
+ IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
+ struct net_device *netdev = rx_ring->netdev;
+
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ }
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ ixgbevf_pull_tail(rx_ring, skb);
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ */
+static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *old_buff)
+{
+ struct ixgbevf_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->page = old_buff->page;
+ new_buff->dma = old_buff->dma;
+ new_buff->page_offset = old_buff->page_offset;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
+ new_buff->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+}
+
+static inline bool ixgbevf_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
+/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ */
+static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IXGBEVF_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
+
+ if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* page is not reserved, we can reuse buffer as is */
+ if (likely(!ixgbevf_page_is_reserved(page)))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(ixgbevf_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
+
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
+ return false;
+
+#endif
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ atomic_inc(&page->_count);
+
+ return true;
+}
+
+static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct ixgbevf_rx_buffer *rx_buffer;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IXGBEVF_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -484,78 +802,51 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
- struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
- struct sk_buff *skb;
- unsigned int i;
- u32 len, staterr;
- int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
+ struct sk_buff *skb = rx_ring->skb;
- i = rx_ring->next_to_clean;
- rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ while (likely(total_rx_packets < budget)) {
+ union ixgbe_adv_rx_desc *rx_desc;
- while (staterr & IXGBE_RXD_STAT_DD) {
- if (!budget)
- break;
- budget--;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
- rmb(); /* read descriptor and rx_buffer_info after status DD */
- len = le16_to_cpu(rx_desc->wb.upper.length);
- skb = rx_buffer_info->skb;
- prefetch(skb->data - NET_IP_ALIGN);
- rx_buffer_info->skb = NULL;
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (rx_buffer_info->dma) {
- dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
- skb_put(skb, len);
- }
+ if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+ break;
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
- next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
- prefetch(next_rxd);
- cleaned_count++;
+ /* retrieve a buffer from the ring */
+ skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
- next_buffer = &rx_ring->rx_buffer_info[i];
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
- if (!(staterr & IXGBE_RXD_STAT_EOP)) {
- skb->next = next_buffer->skb;
- IXGBE_CB(skb->next)->prev = skb;
- rx_ring->rx_stats.non_eop_descs++;
- goto next_desc;
- }
+ cleaned_count++;
- /* we should not be chaining buffers, if we did drop the skb */
- if (IXGBE_CB(skb)->prev) {
- do {
- struct sk_buff *this = skb;
- skb = IXGBE_CB(skb)->prev;
- dev_kfree_skb(this);
- } while (skb);
- goto next_desc;
- }
+ /* fetch next buffer in frame if non-eop */
+ if (ixgbevf_is_non_eop(rx_ring, rx_desc))
+ continue;
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
- dev_kfree_skb_irq(skb);
- goto next_desc;
+ /* verify the packet layout is correct */
+ if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
}
- ixgbevf_rx_checksum(rx_ring, staterr, skb);
-
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
-
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
/* Workaround hardware that can't do proper VEPA multicast
* source pruning.
@@ -565,32 +856,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
- goto next_desc;
+ continue;
}
- ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
+ /* populate checksum, VLAN, and protocol */
+ ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
-next_desc:
- rx_desc->wb.upper.status_error = 0;
+ ixgbevf_rx_skb(q_vector, skb);
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
- ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ /* reset skb pointer */
+ skb = NULL;
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ /* update budget accounting */
+ total_rx_packets++;
}
- rx_ring->next_to_clean = i;
- cleaned_count = ixgbevf_desc_unused(rx_ring);
-
- if (cleaned_count)
- ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -634,12 +916,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
- adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget)
< per_ring_budget);
- adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
#ifdef CONFIG_NET_RX_BUSY_POLL
ixgbevf_qv_unlock_napi(q_vector);
@@ -1229,19 +1509,15 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
{
- struct ixgbevf_ring *rx_ring;
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
- rx_ring = adapter->rx_ring[index];
-
srrctl = IXGBE_SRRCTL_DROP_EN;
+ srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
@@ -1260,40 +1536,6 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
}
-static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- int i;
- u16 rx_buf_len;
-
- /* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, max_frame);
-
- /* PF will allow an extra 4 bytes past for vlan tagged frames */
- max_frame += VLAN_HLEN;
-
- /*
- * Allocate buffer sizes that fit well into 32K and
- * take into account max frame size of 9.5K
- */
- if ((hw->mac.type == ixgbe_mac_X540_vf) &&
- (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else if (max_frame <= IXGBEVF_RXBUFFER_2K)
- rx_buf_len = IXGBEVF_RXBUFFER_2K;
- else if (max_frame <= IXGBEVF_RXBUFFER_4K)
- rx_buf_len = IXGBEVF_RXBUFFER_4K;
- else if (max_frame <= IXGBEVF_RXBUFFER_8K)
- rx_buf_len = IXGBEVF_RXBUFFER_8K;
- else
- rx_buf_len = IXGBEVF_RXBUFFER_10K;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
-}
-
#define IXGBEVF_MAX_RX_DESC_POLL 10
static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
@@ -1371,12 +1613,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
+ ring->next_to_alloc = 0;
ixgbevf_configure_srrctl(adapter, reg_idx);
- /* prevent DMA from exceeding buffer space available */
- rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
- rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+ /* allow any size packet since we can handle overflow */
+ rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
+
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
@@ -1393,11 +1636,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
ixgbevf_setup_psrtype(adapter);
- /* set_rx_buffer_len must be called before ring initialization */
- ixgbevf_set_rx_buffer_len(adapter);
+ /* notify the PF of our intent to use this size of frame */
+ ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
@@ -1702,32 +1947,32 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
+ struct device *dev = rx_ring->dev;
unsigned long size;
unsigned int i;
+ /* Free Rx ring sk_buff */
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
+ /* ring already cleared, nothing to do */
if (!rx_ring->rx_buffer_info)
return;
- /* Free all the Rx ring sk_buffs */
+ /* Free all the Rx ring pages */
for (i = 0; i < rx_ring->count; i++) {
- struct ixgbevf_rx_buffer *rx_buffer_info;
+ struct ixgbevf_rx_buffer *rx_buffer;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
- if (rx_buffer_info->dma) {
- dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
- }
- if (rx_buffer_info->skb) {
- struct sk_buff *skb = rx_buffer_info->skb;
- rx_buffer_info->skb = NULL;
- do {
- struct sk_buff *this = skb;
- skb = IXGBE_CB(skb)->prev;
- dev_kfree_skb(this);
- } while (skb);
- }
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer->dma)
+ dma_unmap_page(dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
+ if (rx_buffer->page)
+ __free_page(rx_buffer->page);
+ rx_buffer->page = NULL;
}
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -3274,6 +3519,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -3282,7 +3528,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
default:
- if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
}
@@ -3291,17 +3537,35 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
- hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+ hw_dbg(hw, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
- if (netif_running(netdev))
- ixgbevf_reinit_locked(adapter);
+ /* notify the PF of our intent to use this size of frame */
+ ixgbevf_rlpml_set_vf(hw, max_frame);
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ixgbevf_netpoll(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3438,6 +3702,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = ixgbevf_busy_poll_recv,
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ixgbevf_netpoll,
+#endif
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3465,6 +3732,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
int err, pci_using_dac;
+ bool disable_dev = false;
err = pci_enable_device(pdev);
if (err)
@@ -3499,7 +3767,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
- pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
@@ -3588,16 +3855,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_register;
+ pci_set_drvdata(pdev, netdev);
netif_carrier_off(netdev);
ixgbevf_init_last_counter_stats(adapter);
- /* print the MAC address */
- hw_dbg(hw, "%pM\n", netdev->dev_addr);
+ /* print the VF info */
+ dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
+ dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550_vf:
+ dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
+ break;
+ case ixgbe_mac_X540_vf:
+ dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
+ break;
+ case ixgbe_mac_82599_vf:
+ default:
+ dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
+ break;
+ }
- hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
return 0;
err_register:
@@ -3606,12 +3885,13 @@ err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
iounmap(adapter->io_addr);
err_ioremap:
+ disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
- if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ if (!adapter || disable_dev)
pci_disable_device(pdev);
return err;
}
@@ -3628,7 +3908,13 @@ err_dma:
static void ixgbevf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter;
+ bool disable_dev;
+
+ if (!netdev)
+ return;
+
+ adapter = netdev_priv(netdev);
set_bit(__IXGBEVF_REMOVING, &adapter->state);
@@ -3648,9 +3934,10 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n");
+ disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
- if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 9cddd56d02c3..cdb53be7d995 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -617,3 +617,13 @@ const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_X550_vf_info = {
+ .mac = ixgbe_mac_X550_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
+ .mac = ixgbe_mac_X550EM_x_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index aa8cc8dc25d1..5b172427f459 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -74,6 +74,8 @@ enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82599_vf,
ixgbe_mac_X540_vf,
+ ixgbe_mac_X550_vf,
+ ixgbe_mac_X550EM_x_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 4a1be34d7214..44ce7d88f554 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1364,8 +1364,8 @@ err_out_free_rx_resources:
jme_free_rx_resources(jme);
out_enable_tasklet:
tasklet_enable(&jme->txclean_task);
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_enable(&jme->rxclean_task);
+ tasklet_enable(&jme->rxempty_task);
out:
atomic_inc(&jme->link_changing);
}
@@ -2408,8 +2408,8 @@ static inline void jme_resume_rx(struct jme_adapter *jme)
if (test_bit(JME_FLAG_POLL, &jme->flags)) {
JME_NAPI_ENABLE(jme);
} else {
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_enable(&jme->rxclean_task);
+ tasklet_enable(&jme->rxempty_task);
}
dpi->cur = PCC_P1;
dpi->attempt = PCC_P1;
@@ -3290,8 +3290,8 @@ jme_suspend(struct device *dev)
}
tasklet_enable(&jme->txclean_task);
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_enable(&jme->rxclean_task);
+ tasklet_enable(&jme->rxempty_task);
jme_powersave_phy(jme);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 2dad4d5047ba..581928c068f2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -789,7 +789,6 @@ static struct platform_driver ltq_mii_driver = {
.remove = ltq_etop_remove,
.driver = {
.name = "ltq_etop",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d44560d1d268..a62fc38f045e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2839,7 +2839,6 @@ static struct platform_driver mv643xx_eth_shared_driver = {
.remove = mv643xx_eth_shared_remove,
.driver = {
.name = MV643XX_ETH_SHARED_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
},
};
@@ -3188,7 +3187,6 @@ static struct platform_driver mv643xx_eth_driver = {
.shutdown = mv643xx_eth_shutdown,
.driver = {
.name = MV643XX_ETH_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ade067de1689..96208f17bb53 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -216,7 +216,7 @@
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 16
+#define MVNETA_TXDONE_COAL_PKTS 1
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
@@ -1721,6 +1721,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_desc *tx_desc;
+ int len = skb->len;
int frags = 0;
u32 tx_cmd;
@@ -1788,7 +1789,7 @@ out:
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
@@ -2558,11 +2559,10 @@ static void mvneta_adjust_link(struct net_device *ndev)
MVNETA_GMAC_FORCE_LINK_DOWN);
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
mvneta_port_up(pp);
- netdev_info(pp->dev, "link up\n");
} else {
mvneta_port_down(pp);
- netdev_info(pp->dev, "link down\n");
}
+ phy_print_status(phydev);
}
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index c3b209cd0660..af829c578400 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -106,6 +106,7 @@
#define SDMA_CMD_ERD (1 << 7)
/* Bit definitions of the Port Config Reg */
+#define PCR_DUPLEX_FULL (1 << 15)
#define PCR_HS (1 << 12)
#define PCR_EN (1 << 7)
#define PCR_PM (1 << 0)
@@ -113,11 +114,17 @@
/* Bit definitions of the Port Config Extend Reg */
#define PCXR_2BSM (1 << 28)
#define PCXR_DSCP_EN (1 << 21)
+#define PCXR_RMII_EN (1 << 20)
+#define PCXR_AN_SPEED_DIS (1 << 19)
+#define PCXR_SPEED_100 (1 << 18)
#define PCXR_MFL_1518 (0 << 14)
#define PCXR_MFL_1536 (1 << 14)
#define PCXR_MFL_2048 (2 << 14)
#define PCXR_MFL_64K (3 << 14)
+#define PCXR_FLOWCTL_DIS (1 << 12)
#define PCXR_FLP (1 << 11)
+#define PCXR_AN_FLOWCTL_DIS (1 << 10)
+#define PCXR_AN_DUPLEX_DIS (1 << 9)
#define PCXR_PRIO_TX_OFF 3
#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
@@ -170,7 +177,6 @@
#define LINK_UP (1 << 3)
/* Bit definitions for work to be done */
-#define WORK_LINK (1 << 0)
#define WORK_TX_DONE (1 << 1)
/*
@@ -197,6 +203,9 @@ struct tx_desc {
struct pxa168_eth_private {
int port_num; /* User Ethernet port number */
int phy_addr;
+ int phy_speed;
+ int phy_duplex;
+ phy_interface_t phy_intf;
int rx_resource_err; /* Rx ring resource error flag */
@@ -269,11 +278,11 @@ enum hash_table_entry {
static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static int pxa168_init_phy(struct net_device *dev);
static void eth_port_reset(struct net_device *dev);
static void eth_port_start(struct net_device *dev);
static int pxa168_eth_open(struct net_device *dev);
static int pxa168_eth_stop(struct net_device *dev);
-static int ethernet_phy_setup(struct net_device *dev);
static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
{
@@ -305,26 +314,6 @@ static void abort_dma(struct pxa168_eth_private *pep)
netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
}
-static int ethernet_phy_get(struct pxa168_eth_private *pep)
-{
- unsigned int reg_data;
-
- reg_data = rdl(pep, PHY_ADDRESS);
-
- return (reg_data >> (5 * pep->port_num)) & 0x1f;
-}
-
-static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
-{
- u32 reg_data;
- int addr_shift = 5 * pep->port_num;
-
- reg_data = rdl(pep, PHY_ADDRESS);
- reg_data &= ~(0x1f << addr_shift);
- reg_data |= (phy_addr & 0x1f) << addr_shift;
- wrl(pep, PHY_ADDRESS, reg_data);
-}
-
static void rxq_refill(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -655,14 +644,7 @@ static void eth_port_start(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int tx_curr_desc, rx_curr_desc;
- /* Perform PHY reset, if there is a PHY. */
- if (pep->phy != NULL) {
- struct ethtool_cmd cmd;
-
- pxa168_get_settings(pep->dev, &cmd);
- phy_init_hw(pep->phy);
- pxa168_set_settings(pep->dev, &cmd);
- }
+ phy_start(pep->phy);
/* Assignment of Tx CTRP of given queue */
tx_curr_desc = pep->tx_curr_desc_q;
@@ -717,6 +699,8 @@ static void eth_port_reset(struct net_device *dev)
val = rdl(pep, PORT_CONFIG);
val &= ~PCR_EN;
wrl(pep, PORT_CONFIG, val);
+
+ phy_stop(pep->phy);
}
/*
@@ -884,43 +868,9 @@ static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
}
if (icr & ICR_RXBUF)
ret = 1;
- if (icr & ICR_MII_CH) {
- pep->work_todo |= WORK_LINK;
- ret = 1;
- }
return ret;
}
-static void handle_link_event(struct pxa168_eth_private *pep)
-{
- struct net_device *dev = pep->dev;
- u32 port_status;
- int speed;
- int duplex;
- int fc;
-
- port_status = rdl(pep, PORT_STATUS);
- if (!(port_status & LINK_UP)) {
- if (netif_carrier_ok(dev)) {
- netdev_info(dev, "link down\n");
- netif_carrier_off(dev);
- txq_reclaim(dev, 1);
- }
- return;
- }
- if (port_status & PORT_SPEED_100)
- speed = 100;
- else
- speed = 10;
-
- duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
- fc = (port_status & FLOW_CONTROL_DISABLED) ? 0 : 1;
- netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
- speed, duplex ? "full" : "half", fc ? "en" : "dis");
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-}
-
static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
@@ -978,8 +928,11 @@ static int set_port_config_ext(struct pxa168_eth_private *pep)
skb_size = PCXR_MFL_64K;
/* Extended Port Configuration */
- wrl(pep,
- PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+ wrl(pep, PORT_CONFIG_EXT,
+ PCXR_AN_SPEED_DIS | /* Disable HW AN */
+ PCXR_AN_DUPLEX_DIS |
+ PCXR_AN_FLOWCTL_DIS |
+ PCXR_2BSM | /* Two byte prefix aligns IP hdr */
PCXR_DSCP_EN | /* Enable DSCP in IP */
skb_size | PCXR_FLP | /* do not force link pass */
PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
@@ -987,6 +940,69 @@ static int set_port_config_ext(struct pxa168_eth_private *pep)
return 0;
}
+static void pxa168_eth_adjust_link(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct phy_device *phy = pep->phy;
+ u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
+ u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
+
+ cfg = cfg_o & ~PCR_DUPLEX_FULL;
+ cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
+
+ if (phy->interface == PHY_INTERFACE_MODE_RMII)
+ cfgext |= PCXR_RMII_EN;
+ if (phy->speed == SPEED_100)
+ cfgext |= PCXR_SPEED_100;
+ if (phy->duplex)
+ cfg |= PCR_DUPLEX_FULL;
+ if (!phy->pause)
+ cfgext |= PCXR_FLOWCTL_DIS;
+
+ /* Bail out if there has nothing changed */
+ if (cfg == cfg_o && cfgext == cfgext_o)
+ return;
+
+ wrl(pep, PORT_CONFIG, cfg);
+ wrl(pep, PORT_CONFIG_EXT, cfgext);
+
+ phy_print_status(phy);
+}
+
+static int pxa168_init_phy(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct ethtool_cmd cmd;
+ int err;
+
+ if (pep->phy)
+ return 0;
+
+ pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ if (!pep->phy)
+ return -ENODEV;
+
+ err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
+ pep->phy_intf);
+ if (err)
+ return err;
+
+ err = pxa168_get_settings(dev, &cmd);
+ if (err)
+ return err;
+
+ cmd.phy_address = pep->phy_addr;
+ cmd.speed = pep->phy_speed;
+ cmd.duplex = pep->phy_duplex;
+ cmd.advertising = PHY_BASIC_FEATURES;
+ cmd.autoneg = AUTONEG_ENABLE;
+
+ if (cmd.speed != 0)
+ cmd.autoneg = AUTONEG_DISABLE;
+
+ return pxa168_set_settings(dev, &cmd);
+}
+
static int pxa168_init_hw(struct pxa168_eth_private *pep)
{
int err = 0;
@@ -1133,6 +1149,10 @@ static int pxa168_eth_open(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int err;
+ err = pxa168_init_phy(dev);
+ if (err)
+ return err;
+
err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
if (err) {
dev_err(&dev->dev, "can't assign irq\n");
@@ -1153,8 +1173,8 @@ static int pxa168_eth_open(struct net_device *dev)
pep->rx_used_desc_q = 0;
pep->rx_curr_desc_q = 0;
netif_carrier_off(dev);
- eth_port_start(dev);
napi_enable(&pep->napi);
+ eth_port_start(dev);
return 0;
out_free_rx_skb:
rxq_deinit(dev);
@@ -1231,10 +1251,6 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
struct net_device *dev = pep->dev;
int work_done = 0;
- if (unlikely(pep->work_todo & WORK_LINK)) {
- pep->work_todo &= ~(WORK_LINK);
- handle_link_event(pep);
- }
/*
* We call txq_reclaim every time since in NAPI interupts are disabled
* and due to this we miss the TX_DONE interrupt,which is not updated in
@@ -1357,77 +1373,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
return -EOPNOTSUPP;
}
-static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
-{
- struct mii_bus *bus = pep->smi_bus;
- struct phy_device *phydev;
- int start;
- int num;
- int i;
-
- if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
- /* Scan entire range */
- start = ethernet_phy_get(pep);
- num = 32;
- } else {
- /* Use phy addr specific to platform */
- start = phy_addr & 0x1f;
- num = 1;
- }
- phydev = NULL;
- for (i = 0; i < num; i++) {
- int addr = (start + i) & 0x1f;
- if (bus->phy_map[addr] == NULL)
- mdiobus_scan(bus, addr);
-
- if (phydev == NULL) {
- phydev = bus->phy_map[addr];
- if (phydev != NULL)
- ethernet_phy_set_addr(pep, addr);
- }
- }
-
- return phydev;
-}
-
-static void phy_init(struct pxa168_eth_private *pep)
-{
- struct phy_device *phy = pep->phy;
-
- phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
-
- if (pep->pd && pep->pd->speed != 0) {
- phy->autoneg = AUTONEG_DISABLE;
- phy->advertising = 0;
- phy->speed = pep->pd->speed;
- phy->duplex = pep->pd->duplex;
- } else {
- phy->autoneg = AUTONEG_ENABLE;
- phy->speed = 0;
- phy->duplex = 0;
- phy->supported &= PHY_BASIC_FEATURES;
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
- }
-
- phy_start_aneg(phy);
-}
-
-static int ethernet_phy_setup(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- if (pep->pd && pep->pd->init)
- pep->pd->init();
-
- pep->phy = phy_scan(pep, pep->phy_addr & 0x1f);
- if (pep->phy != NULL)
- phy_init(pep);
-
- update_hash_table_mac_address(pep, NULL, dev->dev_addr);
-
- return 0;
-}
-
static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1505,16 +1450,14 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep = netdev_priv(dev);
pep->dev = dev;
pep->clk = clk;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- err = -ENODEV;
- goto err_netdev;
- }
pep->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pep->base)) {
err = -ENOMEM;
goto err_netdev;
}
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
BUG_ON(!res);
dev->irq = res->start;
@@ -1552,13 +1495,23 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->port_num = pep->pd->port_number;
pep->phy_addr = pep->pd->phy_addr;
+ pep->phy_speed = pep->pd->speed;
+ pep->phy_duplex = pep->pd->duplex;
+ pep->phy_intf = pep->pd->intf;
+
+ if (pep->pd->init)
+ pep->pd->init();
} else if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node, "port-id",
&pep->port_num);
np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (np)
- of_property_read_u32(np, "reg", &pep->phy_addr);
+ if (!np) {
+ dev_err(&pdev->dev, "missing phy-handle\n");
+ return -EINVAL;
+ }
+ of_property_read_u32(np, "reg", &pep->phy_addr);
+ pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
}
/* Hardware supports only 3 ports */
@@ -1587,11 +1540,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
if (err)
goto err_free_mdio;
- pxa168_init_hw(pep);
- err = ethernet_phy_setup(dev);
- if (err)
- goto err_mdiobus;
SET_NETDEV_DEV(dev, &pdev->dev);
+ pxa168_init_hw(pep);
err = register_netdev(dev);
if (err)
goto err_mdiobus;
@@ -1621,13 +1571,13 @@ static int pxa168_eth_remove(struct platform_device *pdev)
pep->htpr, pep->htpr_dma);
pep->htpr = NULL;
}
+ if (pep->phy)
+ phy_disconnect(pep->phy);
if (pep->clk) {
clk_disable(pep->clk);
clk_put(pep->clk);
pep->clk = NULL;
}
- if (pep->phy != NULL)
- phy_detach(pep->phy);
iounmap(pep->base);
pep->base = NULL;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 264eab7d3b26..7173836fe361 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3433,10 +3433,9 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
if (status & IS_HW_ERR)
skge_error_irq(hw);
-
+out:
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
-out:
spin_unlock(&hw->hw_lock);
return IRQ_RETVAL(handled);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index bd3366267039..867a6a3ef81f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1290,14 +1290,6 @@ static void rx_set_checksum(struct sky2_port *sky2)
? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
-/*
- * Fixed initial key as seed to RSS.
- */
-static const uint32_t rss_init_key[10] = {
- 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43,
- 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30
-};
-
/* Enable/disable receive hash calculation (RSS) */
static void rx_set_rss(struct net_device *dev, netdev_features_t features)
{
@@ -1313,9 +1305,12 @@ static void rx_set_rss(struct net_device *dev, netdev_features_t features)
/* Program RSS initial values */
if (features & NETIF_F_RXHASH) {
+ u32 rss_key[10];
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i < nkeys; i++)
sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
- rss_init_key[i]);
+ rss_key[i]);
/* Need to turn on (undocumented) flag to make hashing work */
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
@@ -1366,7 +1361,9 @@ static void sky2_rx_clean(struct sky2_port *sky2)
{
unsigned i;
- memset(sky2->rx_le, 0, RX_LE_BYTES);
+ if (sky2->rx_le)
+ memset(sky2->rx_le, 0, RX_LE_BYTES);
+
for (i = 0; i < sky2->rx_pending; i++) {
struct rx_ring_info *re = sky2->rx_ring + i;
@@ -2419,6 +2416,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
imask = sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
dev->trans_start = jiffies; /* prevent tx timeout */
napi_disable(&hw->napi);
@@ -3487,8 +3485,8 @@ static void sky2_all_down(struct sky2_hw *hw)
int i;
if (hw->flags & SKY2_HW_IRQ_SETUP) {
- sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
synchronize_irq(hw->pdev->irq);
napi_disable(&hw->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b0297da50304..963dd7e6d547 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -76,22 +76,53 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
}
-u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
+static unsigned long find_aligned_range(unsigned long *bitmap,
+ u32 start, u32 nbits,
+ int len, int align, u32 skip_mask)
+{
+ unsigned long end, i;
+
+again:
+ start = ALIGN(start, align);
+
+ while ((start < nbits) && (test_bit(start, bitmap) ||
+ (start & skip_mask)))
+ start += align;
+
+ if (start >= nbits)
+ return -1;
+
+ end = start+len;
+ if (end > nbits)
+ return -1;
+
+ for (i = start + 1; i < end; i++) {
+ if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
+ start = i + 1;
+ goto again;
+ }
+ }
+
+ return start;
+}
+
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
+ int align, u32 skip_mask)
{
u32 obj;
- if (likely(cnt == 1 && align == 1))
+ if (likely(cnt == 1 && align == 1 && !skip_mask))
return mlx4_bitmap_alloc(bitmap);
spin_lock(&bitmap->lock);
- obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
- bitmap->last, cnt, align - 1);
+ obj = find_aligned_range(bitmap->table, bitmap->last,
+ bitmap->max, cnt, align, skip_mask);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
- obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
- 0, cnt, align - 1);
+ obj = find_aligned_range(bitmap->table, 0, bitmap->max,
+ cnt, align, skip_mask);
}
if (obj < bitmap->max) {
@@ -118,6 +149,11 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
return bitmap->avail;
}
+static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj)
+{
+ return obj & (bitmap->max + bitmap->reserved_top - 1);
+}
+
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
int use_rr)
{
@@ -147,6 +183,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
bitmap->avail = num - reserved_top - reserved_bot;
+ bitmap->effective_len = bitmap->avail;
spin_lock_init(&bitmap->lock);
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
sizeof (long), GFP_KERNEL);
@@ -163,6 +200,382 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
kfree(bitmap->table);
}
+struct mlx4_zone_allocator {
+ struct list_head entries;
+ struct list_head prios;
+ u32 last_uid;
+ u32 mask;
+ /* protect the zone_allocator from concurrent accesses */
+ spinlock_t lock;
+ enum mlx4_zone_alloc_flags flags;
+};
+
+struct mlx4_zone_entry {
+ struct list_head list;
+ struct list_head prio_list;
+ u32 uid;
+ struct mlx4_zone_allocator *allocator;
+ struct mlx4_bitmap *bitmap;
+ int use_rr;
+ int priority;
+ int offset;
+ enum mlx4_zone_flags flags;
+};
+
+struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)
+{
+ struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL);
+
+ if (NULL == zones)
+ return NULL;
+
+ INIT_LIST_HEAD(&zones->entries);
+ INIT_LIST_HEAD(&zones->prios);
+ spin_lock_init(&zones->lock);
+ zones->last_uid = 0;
+ zones->mask = 0;
+ zones->flags = flags;
+
+ return zones;
+}
+
+int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
+ struct mlx4_bitmap *bitmap,
+ u32 flags,
+ int priority,
+ int offset,
+ u32 *puid)
+{
+ u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1);
+ struct mlx4_zone_entry *it;
+ struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
+
+ if (NULL == zone)
+ return -ENOMEM;
+
+ zone->flags = flags;
+ zone->bitmap = bitmap;
+ zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
+ zone->priority = priority;
+ zone->offset = offset;
+
+ spin_lock(&zone_alloc->lock);
+
+ zone->uid = zone_alloc->last_uid++;
+ zone->allocator = zone_alloc;
+
+ if (zone_alloc->mask < mask)
+ zone_alloc->mask = mask;
+
+ list_for_each_entry(it, &zone_alloc->prios, prio_list)
+ if (it->priority >= priority)
+ break;
+
+ if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
+ list_add_tail(&zone->prio_list, &it->prio_list);
+ list_add_tail(&zone->list, &it->list);
+
+ spin_unlock(&zone_alloc->lock);
+
+ *puid = zone->uid;
+
+ return 0;
+}
+
+/* Should be called under a lock */
+static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
+{
+ struct mlx4_zone_allocator *zone_alloc = entry->allocator;
+
+ if (!list_empty(&entry->prio_list)) {
+ /* Check if we need to add an alternative node to the prio list */
+ if (!list_is_last(&entry->list, &zone_alloc->entries)) {
+ struct mlx4_zone_entry *next = list_first_entry(&entry->list,
+ typeof(*next),
+ list);
+
+ if (next->priority == entry->priority)
+ list_add_tail(&next->prio_list, &entry->prio_list);
+ }
+
+ list_del(&entry->prio_list);
+ }
+
+ list_del(&entry->list);
+
+ if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) {
+ u32 mask = 0;
+ struct mlx4_zone_entry *it;
+
+ list_for_each_entry(it, &zone_alloc->prios, prio_list) {
+ u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
+
+ if (mask < cur_mask)
+ mask = cur_mask;
+ }
+ zone_alloc->mask = mask;
+ }
+
+ return 0;
+}
+
+void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
+{
+ struct mlx4_zone_entry *zone, *tmp;
+
+ spin_lock(&zone_alloc->lock);
+
+ list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
+ list_del(&zone->list);
+ list_del(&zone->prio_list);
+ kfree(zone);
+ }
+
+ spin_unlock(&zone_alloc->lock);
+ kfree(zone_alloc);
+}
+
+/* Should be called under a lock */
+static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
+ int align, u32 skip_mask, u32 *puid)
+{
+ u32 uid;
+ u32 res;
+ struct mlx4_zone_allocator *zone_alloc = zone->allocator;
+ struct mlx4_zone_entry *curr_node;
+
+ res = mlx4_bitmap_alloc_range(zone->bitmap, count,
+ align, skip_mask);
+
+ if (res != (u32)-1) {
+ res += zone->offset;
+ uid = zone->uid;
+ goto out;
+ }
+
+ list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) {
+ if (unlikely(curr_node->priority == zone->priority))
+ break;
+ }
+
+ if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
+ struct mlx4_zone_entry *it = curr_node;
+
+ list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
+ res = mlx4_bitmap_alloc_range(it->bitmap, count,
+ align, skip_mask);
+ if (res != (u32)-1) {
+ res += it->offset;
+ uid = it->uid;
+ goto out;
+ }
+ }
+ }
+
+ if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
+ struct mlx4_zone_entry *it = curr_node;
+
+ list_for_each_entry_from(it, &zone_alloc->entries, list) {
+ if (unlikely(it == zone))
+ continue;
+
+ if (unlikely(it->priority != curr_node->priority))
+ break;
+
+ res = mlx4_bitmap_alloc_range(it->bitmap, count,
+ align, skip_mask);
+ if (res != (u32)-1) {
+ res += it->offset;
+ uid = it->uid;
+ goto out;
+ }
+ }
+ }
+
+ if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
+ if (list_is_last(&curr_node->prio_list, &zone_alloc->prios))
+ goto out;
+
+ curr_node = list_first_entry(&curr_node->prio_list,
+ typeof(*curr_node),
+ prio_list);
+
+ list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
+ res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
+ align, skip_mask);
+ if (res != (u32)-1) {
+ res += curr_node->offset;
+ uid = curr_node->uid;
+ goto out;
+ }
+ }
+ }
+
+out:
+ if (NULL != puid && res != (u32)-1)
+ *puid = uid;
+ return res;
+}
+
+/* Should be called under a lock */
+static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
+ u32 count)
+{
+ mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
+}
+
+/* Should be called under a lock */
+static struct mlx4_zone_entry *__mlx4_find_zone_by_uid(
+ struct mlx4_zone_allocator *zones, u32 uid)
+{
+ struct mlx4_zone_entry *zone;
+
+ list_for_each_entry(zone, &zones->entries, list) {
+ if (zone->uid == uid)
+ return zone;
+ }
+
+ return NULL;
+}
+
+struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid)
+{
+ struct mlx4_zone_entry *zone;
+ struct mlx4_bitmap *bitmap;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid(zones, uid);
+
+ bitmap = zone == NULL ? NULL : zone->bitmap;
+
+ spin_unlock(&zones->lock);
+
+ return bitmap;
+}
+
+int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
+{
+ struct mlx4_zone_entry *zone;
+ int res;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid(zones, uid);
+
+ if (NULL == zone) {
+ res = -1;
+ goto out;
+ }
+
+ res = __mlx4_zone_remove_one_entry(zone);
+
+out:
+ spin_unlock(&zones->lock);
+ kfree(zone);
+
+ return res;
+}
+
+/* Should be called under a lock */
+static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique(
+ struct mlx4_zone_allocator *zones, u32 obj)
+{
+ struct mlx4_zone_entry *zone, *zone_candidate = NULL;
+ u32 dist = (u32)-1;
+
+ /* Search for the smallest zone that this obj could be
+ * allocated from. This is done in order to handle
+ * situations when small bitmaps are allocated from bigger
+ * bitmaps (and the allocated space is marked as reserved in
+ * the bigger bitmap.
+ */
+ list_for_each_entry(zone, &zones->entries, list) {
+ if (obj >= zone->offset) {
+ u32 mobj = (obj - zone->offset) & zones->mask;
+
+ if (mobj < zone->bitmap->max) {
+ u32 curr_dist = zone->bitmap->effective_len;
+
+ if (curr_dist < dist) {
+ dist = curr_dist;
+ zone_candidate = zone;
+ }
+ }
+ }
+ }
+
+ return zone_candidate;
+}
+
+u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
+ int align, u32 skip_mask, u32 *puid)
+{
+ struct mlx4_zone_entry *zone;
+ int res = -1;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid(zones, uid);
+
+ if (NULL == zone)
+ goto out;
+
+ res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
+
+out:
+ spin_unlock(&zones->lock);
+
+ return res;
+}
+
+u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
+{
+ struct mlx4_zone_entry *zone;
+ int res = 0;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid(zones, uid);
+
+ if (NULL == zone) {
+ res = -1;
+ goto out;
+ }
+
+ __mlx4_free_from_zone(zone, obj, count);
+
+out:
+ spin_unlock(&zones->lock);
+
+ return res;
+}
+
+u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
+{
+ struct mlx4_zone_entry *zone;
+ int res;
+
+ if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP))
+ return -EFAULT;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid_unique(zones, obj);
+
+ if (NULL == zone) {
+ res = -1;
+ goto out;
+ }
+
+ __mlx4_free_from_zone(zone, obj, count);
+ res = 0;
+
+out:
+ spin_unlock(&zones->lock);
+
+ return res;
+}
/*
* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0. If the
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b16e1b95566f..5c93d1451c44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -990,11 +990,11 @@ static struct mlx4_cmd_info cmd_info[] = {
{
.opcode = MLX4_CMD_CONFIG_DEV,
.has_inbox = false,
- .has_outbox = false,
+ .has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_CMD_EPERM_wrapper
+ .wrapper = mlx4_CONFIG_DEV_wrapper
},
{
.opcode = MLX4_CMD_ALLOC_RES,
@@ -1338,6 +1338,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_QUERY_IF_STAT_wrapper
},
+ {
+ .opcode = MLX4_CMD_ACCESS_REG,
+ .has_inbox = true,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ACCESS_REG_wrapper,
+ },
/* Native multicast commands are not available for guests */
{
.opcode = MLX4_CMD_QP_ATTACH,
@@ -2108,50 +2117,52 @@ err_vhcr:
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ int flags = 0;
+
+ if (!priv->cmd.initialized) {
+ mutex_init(&priv->cmd.hcr_mutex);
+ mutex_init(&priv->cmd.slave_cmd_mutex);
+ sema_init(&priv->cmd.poll_sem, 1);
+ priv->cmd.use_events = 0;
+ priv->cmd.toggle = 1;
+ priv->cmd.initialized = 1;
+ flags |= MLX4_CMD_CLEANUP_STRUCT;
+ }
- mutex_init(&priv->cmd.hcr_mutex);
- mutex_init(&priv->cmd.slave_cmd_mutex);
- sema_init(&priv->cmd.poll_sem, 1);
- priv->cmd.use_events = 0;
- priv->cmd.toggle = 1;
-
- priv->cmd.hcr = NULL;
- priv->mfunc.vhcr = NULL;
-
- if (!mlx4_is_slave(dev)) {
+ if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
mlx4_err(dev, "Couldn't map command register\n");
- return -ENOMEM;
+ goto err;
}
+ flags |= MLX4_CMD_CLEANUP_HCR;
}
- if (mlx4_is_mfunc(dev)) {
+ if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
&priv->mfunc.vhcr_dma,
GFP_KERNEL);
if (!priv->mfunc.vhcr)
- goto err_hcr;
+ goto err;
+
+ flags |= MLX4_CMD_CLEANUP_VHCR;
}
- priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
- MLX4_MAILBOX_SIZE,
- MLX4_MAILBOX_SIZE, 0);
- if (!priv->cmd.pool)
- goto err_vhcr;
+ if (!priv->cmd.pool) {
+ priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
+ MLX4_MAILBOX_SIZE,
+ MLX4_MAILBOX_SIZE, 0);
+ if (!priv->cmd.pool)
+ goto err;
- return 0;
+ flags |= MLX4_CMD_CLEANUP_POOL;
+ }
-err_vhcr:
- if (mlx4_is_mfunc(dev))
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
- priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
- priv->mfunc.vhcr = NULL;
+ return 0;
-err_hcr:
- if (!mlx4_is_slave(dev))
- iounmap(priv->cmd.hcr);
+err:
+ mlx4_cmd_cleanup(dev, flags);
return -ENOMEM;
}
@@ -2175,18 +2186,28 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
iounmap(priv->mfunc.comm);
}
-void mlx4_cmd_cleanup(struct mlx4_dev *dev)
+void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- pci_pool_destroy(priv->cmd.pool);
+ if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
+ pci_pool_destroy(priv->cmd.pool);
+ priv->cmd.pool = NULL;
+ }
- if (!mlx4_is_slave(dev))
+ if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
+ (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
iounmap(priv->cmd.hcr);
- if (mlx4_is_mfunc(dev))
+ priv->cmd.hcr = NULL;
+ }
+ if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
+ (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
- priv->mfunc.vhcr = NULL;
+ priv->mfunc.vhcr = NULL;
+ }
+ if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
+ priv->cmd.initialized = 0;
}
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 56022d647837..e71f31387ac6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -52,6 +52,51 @@
#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
#define MLX4_EQ_STATE_FIRED (10 << 8)
+#define TASKLET_MAX_TIME 2
+#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
+
+void mlx4_cq_tasklet_cb(unsigned long data)
+{
+ unsigned long flags;
+ unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
+ struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
+ struct mlx4_cq *mcq, *temp;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ list_splice_tail_init(&ctx->list, &ctx->process_list);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
+ list_del_init(&mcq->tasklet_ctx.list);
+ mcq->tasklet_ctx.comp(mcq);
+ if (atomic_dec_and_test(&mcq->refcount))
+ complete(&mcq->free);
+ if (time_after(jiffies, end))
+ break;
+ }
+
+ if (!list_empty(&ctx->process_list))
+ tasklet_schedule(&ctx->task);
+}
+
+static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
+{
+ unsigned long flags;
+ struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+
+ spin_lock_irqsave(&tasklet_ctx->lock, flags);
+ /* When migrating CQs between EQs will be implemented, please note
+ * that you need to sync this point. It is possible that
+ * while migrating a CQ, completions on the old EQs could
+ * still arrive.
+ */
+ if (list_empty_careful(&cq->tasklet_ctx.list)) {
+ atomic_inc(&cq->refcount);
+ list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
+ }
+ spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
+}
+
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
@@ -292,6 +337,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq->uar = uar;
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
+ cq->comp = mlx4_add_cq_to_tasklet;
+ cq->tasklet_ctx.priv =
+ &priv->eq_table.eq[cq->vector].tasklet_ctx;
+ INIT_LIST_HEAD(&cq->tasklet_ctx.list);
+
cq->irq = priv->eq_table.eq[cq->vector].irq;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 57dda95b67d8..999014413b1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -35,52 +35,6 @@
#include "mlx4_en.h"
-int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int port_up = 0;
- int err = 0;
-
- if (priv->hwtstamp_config.tx_type == tx_type &&
- priv->hwtstamp_config.rx_filter == rx_filter)
- return 0;
-
- mutex_lock(&mdev->state_lock);
- if (priv->port_up) {
- port_up = 1;
- mlx4_en_stop_port(dev, 1);
- }
-
- mlx4_en_free_resources(priv);
-
- en_warn(priv, "Changing Time Stamp configuration\n");
-
- priv->hwtstamp_config.tx_type = tx_type;
- priv->hwtstamp_config.rx_filter = rx_filter;
-
- if (rx_filter != HWTSTAMP_FILTER_NONE)
- dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- else
- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
- if (port_up) {
- err = mlx4_en_start_port(dev);
- if (err)
- en_err(priv, "Failed starting port\n");
- }
-
-out:
- mutex_unlock(&mdev->state_lock);
- netdev_features_change(dev);
- return err;
-}
-
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
*/
static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ae83da9cd18a..90e0f045a6bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -35,6 +35,7 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
#include <linux/in.h>
#include <net/ip.h>
@@ -114,7 +115,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"tso_packets",
"xmit_more",
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
- "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
+ "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
/* packet statistics */
"broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
@@ -374,7 +375,302 @@ static void mlx4_en_get_strings(struct net_device *dev,
}
}
-static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static u32 mlx4_en_autoneg_get(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ u32 autoneg = AUTONEG_DISABLE;
+
+ if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
+ (priv->port_state.flags & MLX4_EN_PORT_ANE))
+ autoneg = AUTONEG_ENABLE;
+
+ return autoneg;
+}
+
+static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg)
+{
+ u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
+ | MLX4_PROT_MASK(MLX4_1000BASE_T)
+ | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
+ return SUPPORTED_TP;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
+ | MLX4_PROT_MASK(MLX4_10GBASE_SR)
+ | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
+ return SUPPORTED_FIBRE;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KR)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
+ return SUPPORTED_Backplane;
+ }
+ return 0;
+}
+
+static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
+{
+ u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
+
+ if (!eth_proto) /* link down */
+ eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
+ | MLX4_PROT_MASK(MLX4_1000BASE_T)
+ | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
+ return PORT_TP;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
+ | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
+ return PORT_FIBRE;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
+ | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
+ return PORT_DA;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KR)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
+ return PORT_NONE;
+ }
+ return PORT_OTHER;
+}
+
+#define MLX4_LINK_MODES_SZ \
+ (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
+
+enum ethtool_report {
+ SUPPORTED = 0,
+ ADVERTISED = 1,
+ SPEED = 2
+};
+
+/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
+static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = {
+ [MLX4_100BASE_TX] = {
+ SUPPORTED_100baseT_Full,
+ ADVERTISED_100baseT_Full,
+ SPEED_100
+ },
+
+ [MLX4_1000BASE_T] = {
+ SUPPORTED_1000baseT_Full,
+ ADVERTISED_1000baseT_Full,
+ SPEED_1000
+ },
+ [MLX4_1000BASE_CX_SGMII] = {
+ SUPPORTED_1000baseKX_Full,
+ ADVERTISED_1000baseKX_Full,
+ SPEED_1000
+ },
+ [MLX4_1000BASE_KX] = {
+ SUPPORTED_1000baseKX_Full,
+ ADVERTISED_1000baseKX_Full,
+ SPEED_1000
+ },
+
+ [MLX4_10GBASE_T] = {
+ SUPPORTED_10000baseT_Full,
+ ADVERTISED_10000baseT_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_CX4] = {
+ SUPPORTED_10000baseKX4_Full,
+ ADVERTISED_10000baseKX4_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_KX4] = {
+ SUPPORTED_10000baseKX4_Full,
+ ADVERTISED_10000baseKX4_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_KR] = {
+ SUPPORTED_10000baseKR_Full,
+ ADVERTISED_10000baseKR_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_CR] = {
+ SUPPORTED_10000baseKR_Full,
+ ADVERTISED_10000baseKR_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_SR] = {
+ SUPPORTED_10000baseKR_Full,
+ ADVERTISED_10000baseKR_Full,
+ SPEED_10000
+ },
+
+ [MLX4_20GBASE_KR2] = {
+ SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full,
+ ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full,
+ SPEED_20000
+ },
+
+ [MLX4_40GBASE_CR4] = {
+ SUPPORTED_40000baseCR4_Full,
+ ADVERTISED_40000baseCR4_Full,
+ SPEED_40000
+ },
+ [MLX4_40GBASE_KR4] = {
+ SUPPORTED_40000baseKR4_Full,
+ ADVERTISED_40000baseKR4_Full,
+ SPEED_40000
+ },
+ [MLX4_40GBASE_SR4] = {
+ SUPPORTED_40000baseSR4_Full,
+ ADVERTISED_40000baseSR4_Full,
+ SPEED_40000
+ },
+
+ [MLX4_56GBASE_KR4] = {
+ SUPPORTED_56000baseKR4_Full,
+ ADVERTISED_56000baseKR4_Full,
+ SPEED_56000
+ },
+ [MLX4_56GBASE_CR4] = {
+ SUPPORTED_56000baseCR4_Full,
+ ADVERTISED_56000baseCR4_Full,
+ SPEED_56000
+ },
+ [MLX4_56GBASE_SR4] = {
+ SUPPORTED_56000baseSR4_Full,
+ ADVERTISED_56000baseSR4_Full,
+ SPEED_56000
+ },
+};
+
+static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report)
+{
+ int i;
+ u32 link_modes = 0;
+
+ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+ if (eth_proto & MLX4_PROT_MASK(i))
+ link_modes |= ptys2ethtool_map[i][report];
+ }
+ return link_modes;
+}
+
+static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report)
+{
+ int i;
+ u32 ptys_modes = 0;
+
+ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+ if (ptys2ethtool_map[i][report] & link_modes)
+ ptys_modes |= 1 << i;
+ }
+ return ptys_modes;
+}
+
+/* Convert actual speed (SPEED_XXX) to ptys link modes */
+static u32 speed2ptys_link_modes(u32 speed)
+{
+ int i;
+ u32 ptys_modes = 0;
+
+ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+ if (ptys2ethtool_map[i][SPEED] == speed)
+ ptys_modes |= 1 << i;
+ }
+ return ptys_modes;
+}
+
+static int ethtool_get_ptys_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_ptys_reg ptys_reg;
+ u32 eth_proto;
+ int ret;
+
+ memset(&ptys_reg, 0, sizeof(ptys_reg));
+ ptys_reg.local_port = priv->port;
+ ptys_reg.proto_mask = MLX4_PTYS_EN;
+ ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
+ MLX4_ACCESS_REG_QUERY, &ptys_reg);
+ if (ret) {
+ en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
+ ret);
+ return ret;
+ }
+ en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
+ ptys_reg.proto_mask);
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_cap));
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_admin));
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_oper));
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_lp_adv));
+
+ cmd->supported = 0;
+ cmd->advertising = 0;
+
+ cmd->supported |= ptys_get_supported_port(&ptys_reg);
+
+ eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
+ cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED);
+
+ eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
+ cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED);
+
+ cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0;
+
+ cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ?
+ ADVERTISED_Asym_Pause : 0;
+
+ cmd->port = ptys_get_active_port(&ptys_reg);
+ cmd->transceiver = (SUPPORTED_TP & cmd->supported) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
+
+ if (mlx4_en_autoneg_get(dev)) {
+ cmd->supported |= SUPPORTED_Autoneg;
+ cmd->advertising |= ADVERTISED_Autoneg;
+ }
+
+ cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
+ cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED);
+
+ cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
+ ADVERTISED_Autoneg : 0;
+
+ cmd->phy_address = 0;
+ cmd->mdio_support = 0;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ return ret;
+}
+
+static void ethtool_get_default_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int trans_type;
@@ -382,18 +678,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->autoneg = AUTONEG_DISABLE;
cmd->supported = SUPPORTED_10000baseT_Full;
cmd->advertising = ADVERTISED_10000baseT_Full;
-
- if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
- return -ENOMEM;
-
- trans_type = priv->port_state.transciver;
- if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
- cmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
- }
+ trans_type = priv->port_state.transceiver;
if (trans_type > 0 && trans_type <= 0xC) {
cmd->port = PORT_FIBRE;
@@ -409,17 +694,118 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->port = -1;
cmd->transceiver = -1;
}
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int ret = -EINVAL;
+
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+
+ en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
+ priv->port_state.flags & MLX4_EN_PORT_ANC,
+ priv->port_state.flags & MLX4_EN_PORT_ANE);
+
+ if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
+ ret = ethtool_get_ptys_settings(dev, cmd);
+ if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
+ ethtool_get_default_settings(dev, cmd);
+
+ if (netif_carrier_ok(dev)) {
+ ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
+ }
return 0;
}
+/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
+static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
+ __be32 proto_cap)
+{
+ __be32 proto_admin = 0;
+
+ if (!speed) { /* Speed = 0 ==> Reset Link modes */
+ proto_admin = proto_cap;
+ en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
+ be32_to_cpu(proto_cap));
+ } else {
+ u32 ptys_link_modes = speed2ptys_link_modes(speed);
+
+ proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
+ en_info(priv, "Setting Speed to %d\n", speed);
+ }
+ return proto_admin;
+}
+
static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- if ((cmd->autoneg == AUTONEG_ENABLE) ||
- (ethtool_cmd_speed(cmd) != SPEED_10000) ||
- (cmd->duplex != DUPLEX_FULL))
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_ptys_reg ptys_reg;
+ __be32 proto_admin;
+ int ret;
+
+ u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED);
+ int speed = ethtool_cmd_speed(cmd);
+
+ en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
+ speed, cmd->advertising, cmd->autoneg, cmd->duplex);
+
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
+ (cmd->duplex == DUPLEX_HALF))
return -EINVAL;
- /* Nothing to change */
+ memset(&ptys_reg, 0, sizeof(ptys_reg));
+ ptys_reg.local_port = priv->port;
+ ptys_reg.proto_mask = MLX4_PTYS_EN;
+ ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
+ MLX4_ACCESS_REG_QUERY, &ptys_reg);
+ if (ret) {
+ en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
+ ret);
+ return 0;
+ }
+
+ proto_admin = cpu_to_be32(ptys_adv);
+ if (speed >= 0 && speed != priv->port_state.link_speed)
+ /* If speed was set then speed decides :-) */
+ proto_admin = speed_set_ptys_admin(priv, speed,
+ ptys_reg.eth_proto_cap);
+
+ proto_admin &= ptys_reg.eth_proto_cap;
+
+ if (proto_admin == ptys_reg.eth_proto_admin)
+ return 0; /* Nothing to change */
+
+ if (!proto_admin) {
+ en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
+ return -EINVAL; /* nothing to change due to bad input */
+ }
+
+ en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
+ be32_to_cpu(proto_admin));
+
+ ptys_reg.eth_proto_admin = proto_admin;
+ ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
+ &ptys_reg);
+ if (ret) {
+ en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
+ be32_to_cpu(ptys_reg.eth_proto_admin), ret);
+ return ret;
+ }
+
+ en_warn(priv, "Port link mode changed, restarting port...\n");
+ mutex_lock(&priv->mdev->state_lock);
+ if (priv->port_up) {
+ mlx4_en_stop_port(dev, 1);
+ if (mlx4_en_start_port(dev))
+ en_err(priv, "Failed restarting port %d\n", priv->port);
+ }
+ mutex_unlock(&priv->mdev->state_lock);
return 0;
}
@@ -587,7 +973,34 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
return priv->rx_ring_num;
}
-static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
+static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
+{
+ return MLX4_EN_RSS_KEY_SIZE;
+}
+
+static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ /* check if requested function is supported by the device */
+ if ((hfunc == ETH_RSS_HASH_TOP &&
+ !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
+ (hfunc == ETH_RSS_HASH_XOR &&
+ !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
+ return -EINVAL;
+
+ priv->rss_hash_fn = hfunc;
+ if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
+ en_warn(priv,
+ "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+ if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
+ en_warn(priv,
+ "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+ return 0;
+}
+
+static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
+ u8 *hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
@@ -596,17 +1009,23 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
int err = 0;
rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
+ rss_rings = 1 << ilog2(rss_rings);
while (n--) {
+ if (!ring_index)
+ break;
ring_index[n] = rss_map->qps[n % rss_rings].qpn -
rss_map->base_qpn;
}
-
+ if (key)
+ memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
+ if (hfunc)
+ *hfunc = priv->rss_hash_fn;
return err;
}
static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -619,6 +1038,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
* between rings
*/
for (i = 0; i < priv->rx_ring_num; i++) {
+ if (!ring_index)
+ continue;
if (i > 0 && !ring_index[i] && !rss_rings)
rss_rings = i;
@@ -633,13 +1054,22 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
if (!is_power_of_2(rss_rings))
return -EINVAL;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ err = mlx4_en_check_rxfh_func(dev, hfunc);
+ if (err)
+ return err;
+ }
+
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- priv->prof->rss_rings = rss_rings;
+ if (ring_index)
+ priv->prof->rss_rings = rss_rings;
+ if (key)
+ memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
if (port_up) {
err = mlx4_en_start_port(dev);
@@ -1309,6 +1739,86 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
+static int mlx4_en_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int ret;
+ u8 data[4];
+
+ /* Read first 2 bytes to get Module & REV ID */
+ ret = mlx4_get_module_info(mdev->dev, priv->port,
+ 0/*offset*/, 2/*size*/, data);
+ if (ret < 2)
+ return -EIO;
+
+ switch (data[0] /* identifier */) {
+ case MLX4_MODULE_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ if (data[1] >= 0x3) { /* revision id */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLX4_MODULE_ID_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ case MLX4_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int mlx4_en_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int offset = ee->offset;
+ int i = 0, ret;
+
+ if (ee->len == 0)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ en_dbg(DRV, priv,
+ "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
+ i, offset, ee->len - i);
+
+ ret = mlx4_get_module_info(mdev->dev, priv->port,
+ offset, ee->len - i, data + i);
+
+ if (!ret) /* Done reading */
+ return 0;
+
+ if (ret < 0) {
+ en_err(priv,
+ "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
+ i, offset, ee->len - i, ret);
+ return 0;
+ }
+
+ i += ret;
+ offset += ret;
+ }
+ return 0;
+}
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
@@ -1332,6 +1842,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
+ .get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
.get_rxfh = mlx4_en_get_rxfh,
.set_rxfh = mlx4_en_set_rxfh,
.get_channels = mlx4_en_get_channels,
@@ -1341,6 +1852,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_priv_flags = mlx4_en_get_priv_flags,
.get_tunable = mlx4_en_get_tunable,
.set_tunable = mlx4_en_set_tunable,
+ .get_module_info = mlx4_en_get_module_info,
+ .get_module_eeprom = mlx4_en_get_module_eeprom
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2091ae88615d..9f16f754137b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -221,15 +221,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
{
struct mlx4_en_dev *mdev;
int i;
- int err;
printk_once(KERN_INFO "%s", mlx4_en_version);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev) {
- err = -ENOMEM;
+ if (!mdev)
goto err_free_res;
- }
if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
goto err_free_dev;
@@ -264,8 +261,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
}
/* Build device profile according to supplied module parameters */
- err = mlx4_en_get_profile(mdev);
- if (err) {
+ if (mlx4_en_get_profile(mdev)) {
mlx4_err(mdev, "Bad module parameters, aborting\n");
goto err_mr;
}
@@ -286,10 +282,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
* Note: we cannot use the shared workqueue because of deadlocks caused
* by the rtnl lock */
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
- if (!mdev->workqueue) {
- err = -ENOMEM;
+ if (!mdev->workqueue)
goto err_mr;
- }
/* At this stage all non-port specific tasks are complete:
* mark the card state as up */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4d69e382b4e5..190cbd931f6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -575,7 +575,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
- u64 reg_id;
+ u64 reg_id = 0;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
@@ -595,7 +595,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return 0;
}
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
@@ -1569,8 +1569,15 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
- for (j = 0; j < cq->size; j++)
- cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+
+ for (j = 0; j < cq->size; j++) {
+ struct mlx4_cqe *cqe = NULL;
+
+ cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
+ priv->cqe_factor;
+ cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+ }
+
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");
@@ -1843,8 +1850,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
}
local_bh_enable();
- while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
- msleep(1);
+ napi_synchronize(&cq->napi);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
@@ -1894,6 +1900,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->rx_ring[i]->packets = 0;
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
+ priv->rx_ring[i]->csum_complete = 0;
}
}
@@ -1974,15 +1981,8 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
- int err;
int node;
- err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
- if (err) {
- en_err(priv, "failed reserving range for TX rings\n");
- return err;
- }
-
/* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
node = cpu_to_node(i % num_online_cpus());
@@ -1991,7 +1991,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
- priv->base_tx_qpn + i,
prof->tx_ring_size, TXBB_SIZE,
node, i))
goto err;
@@ -2157,7 +2156,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
- if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
+ if (mlx4_en_reset_config(dev, config, dev->features)) {
config.tx_type = HWTSTAMP_TX_OFF;
config.rx_filter = HWTSTAMP_FILTER_NONE;
}
@@ -2190,6 +2189,16 @@ static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
+ en_info(priv, "Turn %s RX vlan strip offload\n",
+ (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
+ ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
+ features);
+ if (ret)
+ return ret;
+ }
if (features & NETIF_F_LOOPBACK)
priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
@@ -2249,7 +2258,7 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_dev *mdev = priv->mdev->dev;
@@ -2455,6 +2464,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv));
+ spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
+ INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+ INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+#ifdef CONFIG_MLX4_EN_VXLAN
+ INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
+ INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ INIT_LIST_HEAD(&priv->filters);
+ spin_lock_init(&priv->filters_lock);
+#endif
+
priv->dev = dev;
priv->mdev = mdev;
priv->ddev = &mdev->pdev->dev;
@@ -2468,6 +2492,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
+ netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);
@@ -2486,16 +2511,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->cqe_size = mdev->dev->caps.cqe_size;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
- spin_lock_init(&priv->stats_lock);
- INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
- INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
- INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
- INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
- INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
- INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
-#endif
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
@@ -2513,6 +2528,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
+ if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
+ MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
+ priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
+
/* Set default MAC */
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
@@ -2538,11 +2557,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (err)
goto out;
-#ifdef CONFIG_RFS_ACCEL
- INIT_LIST_HEAD(&priv->filters);
- spin_lock_init(&priv->filters_lock);
-#endif
-
/* Initialize time stamping config */
priv->hwtstamp_config.flags = 0;
priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
@@ -2583,15 +2597,28 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- dev->hw_features |= NETIF_F_LOOPBACK;
+ dev->hw_features |= NETIF_F_LOOPBACK |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (mdev->dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED)
+ MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
dev->hw_features |= NETIF_F_NTUPLE;
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
+ /* Setting a default hash function value */
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
+ priv->rss_hash_fn = ETH_RSS_HASH_TOP;
+ } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
+ priv->rss_hash_fn = ETH_RSS_HASH_XOR;
+ } else {
+ en_warn(priv,
+ "No RSS hash capabilities exposed, using Toeplitz\n");
+ priv->rss_hash_fn = ETH_RSS_HASH_TOP;
+ }
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -2650,3 +2677,79 @@ out:
return err;
}
+int mlx4_en_reset_config(struct net_device *dev,
+ struct hwtstamp_config ts_config,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up = 0;
+ int err = 0;
+
+ if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
+ priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
+ !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
+ return 0; /* Nothing to change */
+
+ if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+ (features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
+ en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
+ mlx4_en_free_resources(priv);
+
+ en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
+ ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+ priv->hwtstamp_config.tx_type = ts_config.tx_type;
+ priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+
+ if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ /* RX time-stamping is OFF, update the RX vlan offload
+ * to the latest wanted state
+ */
+ if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ /* RX vlan offload and RX time-stamping can't co-exist !
+ * Regardless of the caller's choice,
+ * Turn Off RX vlan offload in case of time-stamping is ON
+ */
+ if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ err = mlx4_en_alloc_resources(priv);
+ if (err) {
+ en_err(priv, "Failed reallocating port resources\n");
+ goto out;
+ }
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ netdev_features_change(dev);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0a0261d128b9..6cb80072af6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -91,21 +91,37 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
* already synchronized, no need in locking */
state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
+ case MLX4_EN_100M_SPEED:
+ state->link_speed = SPEED_100;
+ break;
case MLX4_EN_1G_SPEED:
- state->link_speed = 1000;
+ state->link_speed = SPEED_1000;
break;
case MLX4_EN_10G_SPEED_XAUI:
case MLX4_EN_10G_SPEED_XFI:
- state->link_speed = 10000;
+ state->link_speed = SPEED_10000;
+ break;
+ case MLX4_EN_20G_SPEED:
+ state->link_speed = SPEED_20000;
break;
case MLX4_EN_40G_SPEED:
- state->link_speed = 40000;
+ state->link_speed = SPEED_40000;
+ break;
+ case MLX4_EN_56G_SPEED:
+ state->link_speed = SPEED_56000;
break;
default:
state->link_speed = -1;
break;
}
- state->transciver = qport_context->transceiver;
+
+ state->transceiver = qport_context->transceiver;
+
+ state->flags = 0; /* Reset and recalculate the port flags */
+ state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ?
+ MLX4_EN_PORT_ANC : 0;
+ state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ?
+ MLX4_EN_PORT_ANE : 0;
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
@@ -139,11 +155,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->rx_bytes = 0;
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
+ priv->port_stats.rx_chksum_complete = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i]->packets;
stats->rx_bytes += priv->rx_ring[i]->bytes;
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
+ priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 745090b49d9e..040da4b16b1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -53,22 +53,49 @@ enum {
MLX4_MCAST_ENABLE = 2,
};
+enum mlx4_link_mode {
+ MLX4_1000BASE_CX_SGMII = 0,
+ MLX4_1000BASE_KX = 1,
+ MLX4_10GBASE_CX4 = 2,
+ MLX4_10GBASE_KX4 = 3,
+ MLX4_10GBASE_KR = 4,
+ MLX4_20GBASE_KR2 = 5,
+ MLX4_40GBASE_CR4 = 6,
+ MLX4_40GBASE_KR4 = 7,
+ MLX4_56GBASE_KR4 = 8,
+ MLX4_10GBASE_CR = 12,
+ MLX4_10GBASE_SR = 13,
+ MLX4_40GBASE_SR4 = 15,
+ MLX4_56GBASE_CR4 = 17,
+ MLX4_56GBASE_SR4 = 18,
+ MLX4_100BASE_TX = 24,
+ MLX4_1000BASE_T = 25,
+ MLX4_10GBASE_T = 26,
+};
+
+#define MLX4_PROT_MASK(link_mode) (1<<link_mode)
+
enum {
- MLX4_EN_1G_SPEED = 0x02,
- MLX4_EN_10G_SPEED_XFI = 0x01,
+ MLX4_EN_100M_SPEED = 0x04,
MLX4_EN_10G_SPEED_XAUI = 0x00,
+ MLX4_EN_10G_SPEED_XFI = 0x01,
+ MLX4_EN_1G_SPEED = 0x02,
+ MLX4_EN_20G_SPEED = 0x08,
MLX4_EN_40G_SPEED = 0x40,
+ MLX4_EN_56G_SPEED = 0x20,
MLX4_EN_OTHER_SPEED = 0x0f,
};
struct mlx4_en_query_port_context {
u8 link_up;
#define MLX4_EN_LINK_UP_MASK 0x80
- u8 reserved;
+#define MLX4_EN_ANC_MASK 0x40
+ u8 autoneg;
+#define MLX4_EN_AUTONEG_MASK 0x80
__be16 mtu;
u8 reserved2;
u8 link_speed;
-#define MLX4_EN_SPEED_MASK 0x43
+#define MLX4_EN_SPEED_MASK 0x6f
u16 reserved3[5];
__be64 mac;
u8 transceiver;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 01660c595f5c..a0474eb94aa3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -42,6 +42,10 @@
#include <linux/vmalloc.h>
#include <linux/irq.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ip6_checksum.h>
+#endif
+
#include "mlx4_en.h"
static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
@@ -74,7 +78,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page;
page_alloc->dma = dma;
- page_alloc->page_offset = frag_info->frag_align;
+ page_alloc->page_offset = 0;
/* Not doing get_page() for each frag is a big win
* on asymetric workloads. Note we can not use atomic_set().
*/
@@ -119,7 +123,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
out:
while (i--) {
- frag_info = &priv->frag_info[i];
if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma,
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
@@ -157,7 +160,7 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
- frag_info, GFP_KERNEL))
+ frag_info, GFP_KERNEL | __GFP_COLD))
goto out;
}
return 0;
@@ -269,7 +272,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
- GFP_KERNEL)) {
+ GFP_KERNEL | __GFP_COLD)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
@@ -636,13 +639,94 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
int index = ring->prod & ring->size_mask;
while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
- if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC))
+ if (mlx4_en_prepare_rx_desc(priv, ring, index,
+ GFP_ATOMIC | __GFP_COLD))
break;
ring->prod++;
index = ring->prod & ring->size_mask;
}
}
+/* When hardware doesn't strip the vlan, we need to calculate the checksum
+ * over it and add it to the hardware's checksum calculation
+ */
+static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
+ struct vlan_hdr *vlanh)
+{
+ return csum_add(hw_checksum, *(__wsum *)vlanh);
+}
+
+/* Although the stack expects checksum which doesn't include the pseudo
+ * header, the HW adds it. To address that, we are subtracting the pseudo
+ * header checksum from the checksum value provided by the HW.
+ */
+static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+ struct iphdr *iph)
+{
+ __u16 length_for_csum = 0;
+ __wsum csum_pseudo_header = 0;
+
+ length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
+ csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ length_for_csum, iph->protocol, 0);
+ skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* In IPv6 packets, besides subtracting the pseudo header checksum,
+ * we also compute/add the IP header checksum which
+ * is not added by the HW.
+ */
+static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
+ struct ipv6hdr *ipv6h)
+{
+ __wsum csum_pseudo_hdr = 0;
+
+ if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ return -1;
+ hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
+
+ csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
+ sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
+ csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
+ csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
+
+ skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
+ skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
+ return 0;
+}
+#endif
+static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+ int hwtstamp_rx_filter)
+{
+ __wsum hw_checksum = 0;
+
+ void *hdr = (u8 *)va + sizeof(struct ethhdr);
+
+ hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
+
+ if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
+ hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
+ /* next protocol non IPv4 or IPv6 */
+ if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
+ != htons(ETH_P_IP) &&
+ ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
+ != htons(ETH_P_IPV6))
+ return -1;
+ hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
+ hdr += sizeof(struct vlan_hdr);
+ }
+
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
+ get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+ if (get_fixed_ipv6_csum(hw_checksum, skb, hdr))
+ return -1;
+#endif
+ return 0;
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -744,73 +828,96 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
if (likely(dev->features & NETIF_F_RXCSUM)) {
- if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
- (cqe->checksum == cpu_to_be16(0xffff))) {
- ring->csum_ok++;
- /* This packet is eligible for GRO if it is:
- * - DIX Ethernet (type interpretation)
- * - TCP/IP (v4)
- * - without IP options
- * - not an IP fragment
- * - no LLS polling in progress
- */
- if (!mlx4_en_cq_busy_polling(cq) &&
- (dev->features & NETIF_F_GRO)) {
- struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
- if (!gro_skb)
- goto next;
-
- nr = mlx4_en_complete_rx_desc(priv,
- rx_desc, frags, gro_skb,
- length);
- if (!nr)
- goto next;
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP)) {
+ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ cqe->checksum == cpu_to_be16(0xffff)) {
+ ip_summed = CHECKSUM_UNNECESSARY;
+ ring->csum_ok++;
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ }
+ } else {
+ if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+ MLX4_CQE_STATUS_IPV6))) {
+ ip_summed = CHECKSUM_COMPLETE;
+ ring->csum_complete++;
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ }
+ }
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ }
- skb_shinfo(gro_skb)->nr_frags = nr;
- gro_skb->len = length;
- gro_skb->data_len = length;
- gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* This packet is eligible for GRO if it is:
+ * - DIX Ethernet (type interpretation)
+ * - TCP/IP (v4)
+ * - without IP options
+ * - not an IP fragment
+ * - no LLS polling in progress
+ */
+ if (!mlx4_en_cq_busy_polling(cq) &&
+ (dev->features & NETIF_F_GRO)) {
+ struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
+ if (!gro_skb)
+ goto next;
+
+ nr = mlx4_en_complete_rx_desc(priv,
+ rx_desc, frags, gro_skb,
+ length);
+ if (!nr)
+ goto next;
+
+ if (ip_summed == CHECKSUM_COMPLETE) {
+ void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
+ if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ ring->csum_complete--;
+ }
+ }
- if (l2_tunnel)
- gro_skb->csum_level = 1;
- if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- u16 vid = be16_to_cpu(cqe->sl_vid);
+ skb_shinfo(gro_skb)->nr_frags = nr;
+ gro_skb->len = length;
+ gro_skb->data_len = length;
+ gro_skb->ip_summed = ip_summed;
- __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
- }
+ if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
+ gro_skb->csum_level = 1;
- if (dev->features & NETIF_F_RXHASH)
- skb_set_hash(gro_skb,
- be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ u16 vid = be16_to_cpu(cqe->sl_vid);
- skb_record_rx_queue(gro_skb, cq->ring);
- skb_mark_napi_id(gro_skb, &cq->napi);
+ __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+ }
- if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev,
- skb_hwtstamps(gro_skb),
- timestamp);
- }
+ if (dev->features & NETIF_F_RXHASH)
+ skb_set_hash(gro_skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
- napi_gro_frags(&cq->napi);
- goto next;
- }
+ skb_record_rx_queue(gro_skb, cq->ring);
+ skb_mark_napi_id(gro_skb, &cq->napi);
- /* GRO not possible, complete processing here */
- ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+ if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev,
+ skb_hwtstamps(gro_skb),
+ timestamp);
}
- } else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+
+ napi_gro_frags(&cq->napi);
+ goto next;
}
+ /* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
priv->stats.rx_dropped++;
@@ -822,6 +929,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ if (ip_summed == CHECKSUM_COMPLETE) {
+ if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_complete--;
+ ring->csum_none++;
+ }
+ }
+
skb->ip_summed = ip_summed;
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
@@ -879,8 +994,8 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- if (priv->port_up)
- napi_schedule(&cq->napi);
+ if (likely(priv->port_up))
+ napi_schedule_irqoff(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
@@ -910,20 +1025,18 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
cpu_curr = smp_processor_id();
aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
- if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
- /* Current cpu is not according to smp_irq_affinity -
- * probably affinity changed. need to stop this NAPI
- * poll, and restart it on the right CPU
- */
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
- return 0;
- }
- } else {
- /* Done for now */
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
+ if (likely(cpumask_test_cpu(cpu_curr, aff)))
+ return budget;
+
+ /* Current cpu is not according to smp_irq_affinity -
+ * probably affinity changed. need to stop this NAPI
+ * poll, and restart it on the right CPU
+ */
+ done = 0;
}
+ /* Done for now */
+ napi_complete_done(napi, done);
+ mlx4_en_arm_cq(priv, cq);
return done;
}
@@ -946,15 +1059,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
- if (!i) {
- priv->frag_info[i].frag_align = NET_IP_ALIGN;
- priv->frag_info[i].frag_stride =
- ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
- } else {
- priv->frag_info[i].frag_align = 0;
- priv->frag_info[i].frag_stride =
- ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
- }
+ priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
+ SMP_CACHE_BYTES);
buf_size += priv->frag_info[i].frag_size;
i++;
}
@@ -967,11 +1073,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
- " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
+ " frag:%d - size:%d prefix:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
priv->frag_info[i].frag_prefix_size,
- priv->frag_info[i].frag_align,
priv->frag_info[i].frag_stride);
}
}
@@ -1026,7 +1131,8 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
int err;
u32 qpn;
- err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
+ err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
+ MLX4_RESERVE_A0_QP);
if (err) {
en_err(priv, "Failed reserving drop qpn\n");
return err;
@@ -1065,14 +1171,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
int i, qpn;
int err = 0;
int good_qps = 0;
- static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
- 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
- 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
en_dbg(DRV, priv, "Configuring rss steering\n");
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
priv->rx_ring_num,
- &rss_map->base_qpn);
+ &rss_map->base_qpn, 0);
if (err) {
en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
return err;
@@ -1122,9 +1225,19 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
- for (i = 0; i < 10; i++)
- rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
-
+ if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
+ rss_context->hash_fn = MLX4_RSS_HASH_XOR;
+ } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
+ memcpy(rss_context->rss_key, priv->rss_key,
+ MLX4_EN_RSS_KEY_SIZE);
+ netdev_rss_key_fill(rss_context->rss_key,
+ MLX4_EN_RSS_KEY_SIZE);
+ } else {
+ en_err(priv, "Unknown RSS hash function requested\n");
+ err = -EINVAL;
+ goto indir_err;
+ }
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 49d5afc7cfb8..2d8ee66138e8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -129,11 +129,15 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
- /* The device supports 1G, 10G and 40G speeds */
- if (priv->port_state.link_speed != 1000 &&
- priv->port_state.link_speed != 10000 &&
- priv->port_state.link_speed != 40000)
+ /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */
+ if (priv->port_state.link_speed != SPEED_100 &&
+ priv->port_state.link_speed != SPEED_1000 &&
+ priv->port_state.link_speed != SPEED_10000 &&
+ priv->port_state.link_speed != SPEED_20000 &&
+ priv->port_state.link_speed != SPEED_40000 &&
+ priv->port_state.link_speed != SPEED_56000)
return priv->port_state.link_speed;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 454d9fea640e..a308d41e4de0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -46,7 +46,7 @@
#include "mlx4_en.h"
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring **pring, int qpn, u32 size,
+ struct mlx4_en_tx_ring **pring, u32 size,
u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
@@ -112,11 +112,17 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring, ring->buf, ring->size, ring->buf_size,
(unsigned long long) ring->wqres.buf.direct.map);
- ring->qpn = qpn;
+ err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
+ MLX4_RESERVE_ETH_BF_QP);
+ if (err) {
+ en_err(priv, "failed reserving qp for TX ring\n");
+ goto err_map;
+ }
+
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
- goto err_map;
+ goto err_reserve;
}
ring->qp.event = mlx4_en_sqp_event;
@@ -143,6 +149,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
*pring = ring;
return 0;
+err_reserve:
+ mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_map:
mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
@@ -479,8 +487,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- if (priv->port_up)
- napi_schedule(&cq->napi);
+ if (likely(priv->port_up))
+ napi_schedule_irqoff(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 49290a405903..3d275fbaf0eb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -450,7 +450,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe;
- int cqn;
+ int cqn = -1;
int eqes_found = 0;
int set_ci = 0;
int port;
@@ -758,6 +758,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq_set_ci(eq, 1);
+ /* cqn is 24bit wide but is initialized such that its higher bits
+ * are ones too. Thus, if we got any event, cqn's high bits should be off
+ * and we need to schedule the tasklet.
+ */
+ if (!(cqn & ~0xffffff))
+ tasklet_schedule(&eq->tasklet_ctx.task);
+
return eqes_found;
}
@@ -971,6 +978,12 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
eq->cons_index = 0;
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb,
+ (unsigned long)&eq->tasklet_ctx);
+
return err;
err_out_free_mtt:
@@ -1027,6 +1040,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
}
}
synchronize_irq(eq->irq);
+ tasklet_disable(&eq->tasklet_ctx.task);
mlx4_mtt_cleanup(dev, &eq->mtt);
for (i = 0; i < npages; ++i)
@@ -1123,8 +1137,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
goto err_out_free;
}
- err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
- dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
+ err = mlx4_bitmap_init(&priv->eq_table.bitmap,
+ roundup_pow_of_two(dev->caps.num_eqs),
+ dev->caps.num_eqs - 1,
+ dev->caps.reserved_eqs,
+ roundup_pow_of_two(dev->caps.num_eqs) -
+ dev->caps.num_eqs);
if (err)
goto err_out_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 2e88a235e26b..982861d1df44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -139,7 +139,13 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[10] = "TCP/IP offloads/flow-steering for VXLAN support",
[11] = "MAD DEMUX (Secure-Host) support",
[12] = "Large cache line (>64B) CQE stride support",
- [13] = "Large cache line (>64B) EQE stride support"
+ [13] = "Large cache line (>64B) EQE stride support",
+ [14] = "Ethernet protocol control support",
+ [15] = "Ethernet Backplane autoneg support",
+ [16] = "CONFIG DEV support",
+ [17] = "Asymmetric EQs support",
+ [18] = "More than 80 VFs support",
+ [19] = "Performance optimized for limited rule configuration flow steering support"
};
int i;
@@ -174,6 +180,61 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
return err;
}
+int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 in_modifier;
+ u8 field;
+ u16 field16;
+ int err;
+
+#define QUERY_FUNC_BUS_OFFSET 0x00
+#define QUERY_FUNC_DEVICE_OFFSET 0x01
+#define QUERY_FUNC_FUNCTION_OFFSET 0x01
+#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
+#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
+#define QUERY_FUNC_MAX_EQ_OFFSET 0x06
+#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ in_modifier = slave;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
+ MLX4_CMD_QUERY_FUNC,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
+ func->bus = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
+ func->device = field & 0xf1;
+ MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
+ func->function = field & 0x7;
+ MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
+ func->physical_function = field & 0xf;
+ MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
+ func->rsvd_eqs = field16 & 0xffff;
+ MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
+ func->max_eq = field16 & 0xffff;
+ MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
+ func->rsvd_uars = field & 0x0f;
+
+ mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
+ func->bus, func->device, func->function, func->physical_function,
+ func->max_eq, func->rsvd_eqs, func->rsvd_uars);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -184,6 +245,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
u8 field, port;
u32 size, proxy_qp, qkey;
int err = 0;
+ struct mlx4_func func;
#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
@@ -205,10 +267,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
+#define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c
+
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
+#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
+
+#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
+#define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30)
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
@@ -228,6 +296,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
+#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
@@ -277,7 +346,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
mlx4_get_active_ports(dev, slave);
/* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
- QUERY_FUNC_CAP_FLAG_QUOTAS);
+ QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = min(
@@ -306,11 +375,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.num_cqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
- size = dev->caps.num_eqs;
- MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
-
- size = dev->caps.reserved_eqs;
- MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
+ mlx4_QUERY_FUNC(dev, &func, slave)) {
+ size = vhcr->in_modifier &
+ QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
+ dev->caps.num_eqs :
+ rounddown_pow_of_two(dev->caps.num_eqs);
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ size = dev->caps.reserved_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ } else {
+ size = vhcr->in_modifier &
+ QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
+ func.max_eq :
+ rounddown_pow_of_two(func.max_eq);
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ size = func.rsvd_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ }
size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
@@ -326,13 +408,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
+ size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
+ QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
} else
err = -EINVAL;
return err;
}
-int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
struct mlx4_func_cap *func_cap)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -340,14 +425,17 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
u8 field, op_modifier;
u32 size, qkey;
int err = 0, quotas = 0;
+ u32 in_modifier;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
+ in_modifier = op_modifier ? gen_or_port :
+ QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
MLX4_CMD_QUERY_FUNC_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
@@ -415,6 +503,19 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF;
+ func_cap->extra_flags = 0;
+
+ /* Mailbox data from 0x6c and onward should only be treated if
+ * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
+ */
+ if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
+ if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
+ func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
+ if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG)
+ func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP;
+ }
+
goto out;
}
@@ -519,6 +620,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
+#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
@@ -560,6 +662,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
+#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -571,11 +674,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
+#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
+#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
+#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
+#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -605,7 +712,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
dev_cap->max_mpts = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
- dev_cap->reserved_eqs = field & 0xf;
+ dev_cap->reserved_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
@@ -616,6 +723,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->reserved_mrws = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
dev_cap->max_mtt_seg = 1 << (field & 0x3f);
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
+ dev_cap->num_sys_eqs = size & 0xfff;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
@@ -678,11 +787,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
field = 3;
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
- mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
- dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
} else {
dev_cap->bf_reg_size = 0;
- mlx4_dbg(dev, "BlueFlame not available\n");
}
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
@@ -737,15 +843,22 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
dev_cap->max_rq_desc_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+ if (field & (1 << 5))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
if (field & (1 << 6))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
if (field & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
-
MLX4_GET(dev_cap->bmme_flags, outbox,
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+ if (field & 0x20)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+ MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
+ if (field32 & (1 << 0))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -763,6 +876,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
+ MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
+ QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
+ dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
+ MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
+ QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
+ dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
+
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
@@ -770,75 +890,41 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
if (field32 & (1 << 20))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
+ if (field32 & (1 << 21))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
- if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
- for (i = 1; i <= dev_cap->num_ports; ++i) {
- MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
- dev_cap->max_vl[i] = field >> 4;
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
- dev_cap->ib_mtu[i] = field >> 4;
- dev_cap->max_port_width[i] = field & 0xf;
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
- dev_cap->max_gids[i] = 1 << (field & 0xf);
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
- dev_cap->max_pkeys[i] = 1 << (field & 0xf);
- }
- } else {
-#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
-#define QUERY_PORT_MTU_OFFSET 0x01
-#define QUERY_PORT_ETH_MTU_OFFSET 0x02
-#define QUERY_PORT_WIDTH_OFFSET 0x06
-#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
-#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
-#define QUERY_PORT_MAX_VL_OFFSET 0x0b
-#define QUERY_PORT_MAC_OFFSET 0x10
-#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
-#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
-#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
-
- for (i = 1; i <= dev_cap->num_ports; ++i) {
- err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
- if (err)
- goto out;
-
- MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
- dev_cap->supported_port_types[i] = field & 3;
- dev_cap->suggested_type[i] = (field >> 3) & 1;
- dev_cap->default_sense[i] = (field >> 4) & 1;
- MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
- dev_cap->ib_mtu[i] = field & 0xf;
- MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
- dev_cap->max_port_width[i] = field & 0xf;
- MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
- dev_cap->max_gids[i] = 1 << (field >> 4);
- dev_cap->max_pkeys[i] = 1 << (field & 0xf);
- MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
- dev_cap->max_vl[i] = field & 0xf;
- MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
- dev_cap->log_max_macs[i] = field & 0xf;
- dev_cap->log_max_vlans[i] = field >> 4;
- MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
- MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
- MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
- dev_cap->trans_type[i] = field32 >> 24;
- dev_cap->vendor_oui[i] = field32 & 0xffffff;
- MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
- MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
- }
+ for (i = 1; i <= dev_cap->num_ports; i++) {
+ err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
+ if (err)
+ goto out;
}
- mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
- dev_cap->bmme_flags, dev_cap->reserved_lkey);
-
/*
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
* we can't use any EQs whose doorbell falls on that page,
* even if the EQ itself isn't reserved.
*/
- dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
- dev_cap->reserved_eqs);
+ if (dev_cap->num_sys_eqs == 0)
+ dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
+ dev_cap->reserved_eqs);
+ else
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+ if (dev_cap->bf_reg_size > 0)
+ mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
+ dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
+ else
+ mlx4_dbg(dev, "BlueFlame not available\n");
+
+ mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
+ dev_cap->bmme_flags, dev_cap->reserved_lkey);
mlx4_dbg(dev, "Max ICM size %lld MB\n",
(unsigned long long) dev_cap->max_icm_sz >> 20);
mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
@@ -847,8 +933,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
- mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
- dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
+ mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
+ dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
+ dev_cap->eqc_entry_sz);
mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
dev_cap->reserved_mrws, dev_cap->reserved_mtts);
mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
@@ -858,8 +945,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
- dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
- dev_cap->max_port_width[1]);
+ dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu,
+ dev_cap->port_cap[1].max_port_width);
mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
@@ -867,15 +954,97 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
-
+ mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
+ dev_cap->dmfs_high_rate_qpn_base);
+ mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
+ dev_cap->dmfs_high_rate_qpn_range);
dump_dev_cap_flags(dev, dev_cap->flags);
dump_dev_cap_flags2(dev, dev_cap->flags2);
+}
+
+int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 field;
+ u32 field32;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
+ port_cap->max_vl = field >> 4;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
+ port_cap->ib_mtu = field >> 4;
+ port_cap->max_port_width = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
+ port_cap->max_gids = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
+ port_cap->max_pkeys = 1 << (field & 0xf);
+ } else {
+#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
+#define QUERY_PORT_MTU_OFFSET 0x01
+#define QUERY_PORT_ETH_MTU_OFFSET 0x02
+#define QUERY_PORT_WIDTH_OFFSET 0x06
+#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
+#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
+#define QUERY_PORT_MAX_VL_OFFSET 0x0b
+#define QUERY_PORT_MAC_OFFSET 0x10
+#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
+#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
+#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ port_cap->supported_port_types = field & 3;
+ port_cap->suggested_type = (field >> 3) & 1;
+ port_cap->default_sense = (field >> 4) & 1;
+ port_cap->dmfs_optimized_state = (field >> 5) & 1;
+ MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
+ port_cap->ib_mtu = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
+ port_cap->max_port_width = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
+ port_cap->max_gids = 1 << (field >> 4);
+ port_cap->max_pkeys = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
+ port_cap->max_vl = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
+ port_cap->log_max_macs = field & 0xf;
+ port_cap->log_max_vlans = field >> 4;
+ MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET);
+ MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET);
+ MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
+ port_cap->trans_type = field32 >> 24;
+ port_cap->vendor_oui = field32 & 0xffffff;
+ MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
+ MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
+ }
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+#define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
+#define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21)
+#define DEV_CAP_EXT_2_FLAG_FSM (1 << 20)
+
int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -885,7 +1054,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
u64 flags;
int err = 0;
u8 field;
- u32 bmme_flags;
+ u32 bmme_flags, field32;
int real_port;
int slave_port;
int first_port;
@@ -956,6 +1125,12 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= ~0x80;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ /* turn off host side virt features (VST, FSM, etc) for guests */
+ MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
+ field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
+ DEV_CAP_EXT_2_FLAG_FSM);
+ MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
+
return 0;
}
@@ -1374,6 +1549,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
struct mlx4_cmd_mailbox *mailbox;
__be32 *inbox;
int err;
+ static const u8 a0_dmfs_hw_steering[] = {
+ [MLX4_STEERING_DMFS_A0_DEFAULT] = 0,
+ [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1,
+ [MLX4_STEERING_DMFS_A0_STATIC] = 2,
+ [MLX4_STEERING_DMFS_A0_DISABLE] = 3
+ };
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
@@ -1394,6 +1575,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
+#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
#define INIT_HCA_MCAST_OFFSET 0x0c0
@@ -1406,6 +1588,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
@@ -1497,6 +1680,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
@@ -1515,8 +1699,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* Enable Ethernet flow steering
* with udp unicast and tcp unicast
*/
- MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
- INIT_HCA_FS_ETH_BITS_OFFSET);
+ if (dev->caps.dmfs_high_steer_mode !=
+ MLX4_STEERING_DMFS_A0_STATIC)
+ MLX4_PUT(inbox,
+ (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
+ INIT_HCA_FS_ETH_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
/* Enable IPoIB flow steering
@@ -1526,6 +1713,13 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
INIT_HCA_FS_IB_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
+
+ if (dev->caps.dmfs_high_steer_mode !=
+ MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
+ MLX4_PUT(inbox,
+ ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
+ << 6)),
+ INIT_HCA_FS_A0_OFFSET);
} else {
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_mc_entry_sz,
@@ -1576,6 +1770,12 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
u32 dword_field;
int err;
u8 byte_field;
+ static const u8 a0_dmfs_query_hw_steering[] = {
+ [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
+ [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
+ [2] = MLX4_STEERING_DMFS_A0_STATIC,
+ [3] = MLX4_STEERING_DMFS_A0_DISABLE
+ };
#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
@@ -1607,6 +1807,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
@@ -1627,6 +1828,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
MLX4_GET(param->log_mc_table_sz, outbox,
INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ MLX4_GET(byte_field, outbox,
+ INIT_HCA_FS_A0_OFFSET);
+ param->dmfs_high_steer_mode =
+ a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
} else {
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
@@ -1647,8 +1852,8 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
if (byte_field) {
- param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
- param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
+ param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
+ param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
param->cqe_size = 1 << ((byte_field &
MLX4_CQE_SIZE_MASK_STRIDE) + 5);
param->eqe_size = 1 << (((byte_field &
@@ -1841,14 +2046,18 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
struct mlx4_config_dev {
__be32 update_flags;
- __be32 rsdv1[3];
+ __be32 rsvd1[3];
__be16 vxlan_udp_dport;
__be16 rsvd2;
+ __be32 rsvd3[27];
+ __be16 rsvd4;
+ u8 rsvd5;
+ u8 rx_checksum_val;
};
#define MLX4_VXLAN_UDP_DPORT (1 << 0)
-static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
{
int err;
struct mlx4_cmd_mailbox *mailbox;
@@ -1866,6 +2075,77 @@ static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_
return err;
}
+static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+{
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ if (!err)
+ memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+/* Conversion between the HW values and the actual functionality.
+ * The value represented by the array index,
+ * and the functionality determined by the flags.
+ */
+static const u8 config_dev_csum_flags[] = {
+ [0] = 0,
+ [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
+ [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP |
+ MLX4_RX_CSUM_MODE_L4,
+ [3] = MLX4_RX_CSUM_MODE_L4 |
+ MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP |
+ MLX4_RX_CSUM_MODE_MULTI_VLAN
+};
+
+int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
+ struct mlx4_config_dev_params *params)
+{
+ struct mlx4_config_dev config_dev;
+ int err;
+ u8 csum_mask;
+
+#define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
+#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
+#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
+
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
+ return -ENOTSUPP;
+
+ err = mlx4_CONFIG_DEV_get(dev, &config_dev);
+ if (err)
+ return err;
+
+ csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
+ CONFIG_DEV_RX_CSUM_MODE_MASK;
+
+ if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
+ return -EINVAL;
+ params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
+
+ csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
+ CONFIG_DEV_RX_CSUM_MODE_MASK;
+
+ if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
+ return -EINVAL;
+ params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
+
+ params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
+
int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
{
struct mlx4_config_dev config_dev;
@@ -1874,7 +2154,7 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
config_dev.vxlan_udp_dport = udp_port;
- return mlx4_CONFIG_DEV(dev, &config_dev);
+ return mlx4_CONFIG_DEV_set(dev, &config_dev);
}
EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
@@ -2144,3 +2424,142 @@ out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+
+/* Access Reg commands */
+enum mlx4_access_reg_masks {
+ MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
+ MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
+ MLX4_ACCESS_REG_LEN_MASK = 0x7ff
+};
+
+struct mlx4_access_reg {
+ __be16 constant1;
+ u8 status;
+ u8 resrvd1;
+ __be16 reg_id;
+ u8 method;
+ u8 constant2;
+ __be32 resrvd2[2];
+ __be16 len_const;
+ __be16 resrvd3;
+#define MLX4_ACCESS_REG_HEADER_SIZE (20)
+ u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
+} __attribute__((__packed__));
+
+/**
+ * mlx4_ACCESS_REG - Generic access reg command.
+ * @dev: mlx4_dev.
+ * @reg_id: register ID to access.
+ * @method: Access method Read/Write.
+ * @reg_len: register length to Read/Write in bytes.
+ * @reg_data: reg_data pointer to Read/Write From/To.
+ *
+ * Access ConnectX registers FW command.
+ * Returns 0 on success and copies outbox mlx4_access_reg data
+ * field into reg_data or a negative error code.
+ */
+static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
+ enum mlx4_access_reg_method method,
+ u16 reg_len, void *reg_data)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_access_reg *inbuf, *outbuf;
+ int err;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inbuf = inbox->buf;
+ outbuf = outbox->buf;
+
+ inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
+ inbuf->constant2 = 0x1;
+ inbuf->reg_id = cpu_to_be16(reg_id);
+ inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
+
+ reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
+ inbuf->len_const =
+ cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
+ ((0x3) << 12));
+
+ memcpy(inbuf->reg_data, reg_data, reg_len);
+ err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
+ MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
+ err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
+ mlx4_err(dev,
+ "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
+ reg_id, err);
+ goto out;
+ }
+
+ memcpy(reg_data, outbuf->reg_data, reg_len);
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return err;
+}
+
+/* ConnectX registers IDs */
+enum mlx4_reg_id {
+ MLX4_REG_ID_PTYS = 0x5004,
+};
+
+/**
+ * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
+ * register
+ * @dev: mlx4_dev.
+ * @method: Access method Read/Write.
+ * @ptys_reg: PTYS register data pointer.
+ *
+ * Access ConnectX PTYS register, to Read/Write Port Type/Speed
+ * configuration
+ * Returns 0 on success or a negative error code.
+ */
+int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
+ enum mlx4_access_reg_method method,
+ struct mlx4_ptys_reg *ptys_reg)
+{
+ return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
+ method, sizeof(*ptys_reg), ptys_reg);
+}
+EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
+
+int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_access_reg *inbuf = inbox->buf;
+ u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
+ u16 reg_id = be16_to_cpu(inbuf->reg_id);
+
+ if (slave != mlx4_master_func_num(dev) &&
+ method == MLX4_ACCESS_REG_WRITE)
+ return -EPERM;
+
+ if (reg_id == MLX4_REG_ID_PTYS) {
+ struct mlx4_ptys_reg *ptys_reg =
+ (struct mlx4_ptys_reg *)inbuf->reg_data;
+
+ ptys_reg->local_port =
+ mlx4_slave_convert_port(dev, slave,
+ ptys_reg->local_port);
+ }
+
+ return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
+ 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 9b835aecac96..62562b60fa87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -43,6 +43,26 @@ struct mlx4_mod_stat_cfg {
u8 log_pg_sz_m;
};
+struct mlx4_port_cap {
+ u8 supported_port_types;
+ u8 suggested_type;
+ u8 default_sense;
+ u8 log_max_macs;
+ u8 log_max_vlans;
+ int ib_mtu;
+ int max_port_width;
+ int max_vl;
+ int max_gids;
+ int max_pkeys;
+ u64 def_mac;
+ u16 eth_mtu;
+ int trans_type;
+ int vendor_oui;
+ u16 wavelength;
+ u64 trans_code;
+ u8 dmfs_optimized_state;
+};
+
struct mlx4_dev_cap {
int max_srq_sz;
int max_qp_sz;
@@ -56,6 +76,7 @@ struct mlx4_dev_cap {
int max_mpts;
int reserved_eqs;
int max_eqs;
+ int num_sys_eqs;
int reserved_mtts;
int max_mrw_sz;
int reserved_mrws;
@@ -66,17 +87,6 @@ struct mlx4_dev_cap {
int local_ca_ack_delay;
int num_ports;
u32 max_msg_sz;
- int ib_mtu[MLX4_MAX_PORTS + 1];
- int max_port_width[MLX4_MAX_PORTS + 1];
- int max_vl[MLX4_MAX_PORTS + 1];
- int max_gids[MLX4_MAX_PORTS + 1];
- int max_pkeys[MLX4_MAX_PORTS + 1];
- u64 def_mac[MLX4_MAX_PORTS + 1];
- u16 eth_mtu[MLX4_MAX_PORTS + 1];
- int trans_type[MLX4_MAX_PORTS + 1];
- int vendor_oui[MLX4_MAX_PORTS + 1];
- u16 wavelength[MLX4_MAX_PORTS + 1];
- u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
int fs_log_max_ucast_qp_range_size;
int fs_max_num_qp_per_entry;
@@ -114,12 +124,10 @@ struct mlx4_dev_cap {
u64 max_icm_sz;
int max_gso_sz;
int max_rss_tbl_sz;
- u8 supported_port_types[MLX4_MAX_PORTS + 1];
- u8 suggested_type[MLX4_MAX_PORTS + 1];
- u8 default_sense[MLX4_MAX_PORTS + 1];
- u8 log_max_macs[MLX4_MAX_PORTS + 1];
- u8 log_max_vlans[MLX4_MAX_PORTS + 1];
u32 max_counters;
+ u32 dmfs_high_rate_qpn_base;
+ u32 dmfs_high_rate_qpn_range;
+ struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
};
struct mlx4_func_cap {
@@ -143,6 +151,17 @@ struct mlx4_func_cap {
u8 port_flags;
u8 flags1;
u64 phys_port_id;
+ u32 extra_flags;
+};
+
+struct mlx4_func {
+ int bus;
+ int device;
+ int function;
+ int physical_function;
+ int rsvd_eqs;
+ int max_eq;
+ int rsvd_uars;
};
struct mlx4_adapter {
@@ -170,6 +189,7 @@ struct mlx4_init_hca_param {
u8 log_num_srqs;
u8 log_num_cqs;
u8 log_num_eqs;
+ u16 num_sys_eqs;
u8 log_rd_per_qp;
u8 log_mc_table_sz;
u8 log_mpt_sz;
@@ -177,6 +197,7 @@ struct mlx4_init_hca_param {
u8 mw_enabled; /* Enable memory windows */
u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 steering_mode; /* for QUERY_HCA */
+ u8 dmfs_high_steer_mode; /* for QUERY_HCA */
u64 dev_cap_enabled;
u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */
@@ -203,14 +224,17 @@ struct mlx4_set_ib_param {
u32 cap_mask;
};
+void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
-int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
struct mlx4_func_cap *func_cap);
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave);
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
int mlx4_RUN_FW(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 90de6e1ad06e..943cbd47d832 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -105,7 +105,8 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe,
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
- MLX4_FUNC_CAP_EQE_CQE_STRIDE)
+ MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
+ MLX4_FUNC_CAP_DMFS_A0_STATIC)
static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
@@ -170,9 +171,9 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
{
int i;
- for (i = 0; i < dev->caps.num_ports - 1; i++) {
- if (port_type[i] != port_type[i + 1]) {
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ for (i = 0; i < dev->caps.num_ports - 1; i++) {
+ if (port_type[i] != port_type[i + 1]) {
mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
return -EINVAL;
}
@@ -197,6 +198,29 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
dev->caps.port_mask[i] = dev->caps.port_type[i];
}
+enum {
+ MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
+};
+
+static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+ int err = 0;
+ struct mlx4_func func;
+
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ err = mlx4_QUERY_FUNC(dev, &func, 0);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+ dev_cap->max_eqs = func.max_eq;
+ dev_cap->reserved_eqs = func.rsvd_eqs;
+ dev_cap->reserved_uars = func.rsvd_uars;
+ err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
+ }
+ return err;
+}
+
static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
{
struct mlx4_caps *dev_cap = &dev->caps;
@@ -231,6 +255,46 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
}
}
+static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
+ struct mlx4_port_cap *port_cap)
+{
+ dev->caps.vl_cap[port] = port_cap->max_vl;
+ dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
+ dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
+ dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
+ /* set gid and pkey table operating lengths by default
+ * to non-sriov values
+ */
+ dev->caps.gid_table_len[port] = port_cap->max_gids;
+ dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
+ dev->caps.port_width_cap[port] = port_cap->max_port_width;
+ dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
+ dev->caps.def_mac[port] = port_cap->def_mac;
+ dev->caps.supported_type[port] = port_cap->supported_port_types;
+ dev->caps.suggested_type[port] = port_cap->suggested_type;
+ dev->caps.default_sense[port] = port_cap->default_sense;
+ dev->caps.trans_type[port] = port_cap->trans_type;
+ dev->caps.vendor_oui[port] = port_cap->vendor_oui;
+ dev->caps.wavelength[port] = port_cap->wavelength;
+ dev->caps.trans_code[port] = port_cap->trans_code;
+
+ return 0;
+}
+
+static int mlx4_dev_port(struct mlx4_dev *dev, int port,
+ struct mlx4_port_cap *port_cap)
+{
+ int err = 0;
+
+ err = mlx4_QUERY_PORT(dev, port, port_cap);
+
+ if (err)
+ mlx4_err(dev, "QUERY_PORT command failed.\n");
+
+ return err;
+}
+
+#define MLX4_A0_STEERING_TABLE_SIZE 256
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err;
@@ -241,6 +305,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
+ mlx4_dev_cap_dump(dev, dev_cap);
if (dev_cap->min_page_sz > PAGE_SIZE) {
mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
@@ -261,26 +326,16 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
dev->caps.num_ports = dev_cap->num_ports;
- dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
+ dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
+ dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
+ dev->caps.num_sys_eqs :
+ MLX4_MAX_EQ_NUM;
for (i = 1; i <= dev->caps.num_ports; ++i) {
- dev->caps.vl_cap[i] = dev_cap->max_vl[i];
- dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
- dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i];
- dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
- /* set gid and pkey table operating lengths by default
- * to non-sriov values */
- dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
- dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
- dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
- dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
- dev->caps.def_mac[i] = dev_cap->def_mac[i];
- dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
- dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
- dev->caps.default_sense[i] = dev_cap->default_sense[i];
- dev->caps.trans_type[i] = dev_cap->trans_type[i];
- dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
- dev->caps.wavelength[i] = dev_cap->wavelength[i];
- dev->caps.trans_code[i] = dev_cap->trans_code[i];
+ err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
+ if (err) {
+ mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
+ return err;
+ }
}
dev->caps.uar_page_size = PAGE_SIZE;
@@ -389,13 +444,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.possible_type[i] = dev->caps.port_type[i];
}
- if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
- dev->caps.log_num_macs = dev_cap->log_max_macs[i];
+ if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
+ dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_macs);
}
- if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
- dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
+ if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
+ dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_vlans);
}
@@ -411,6 +466,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.num_ports;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
+ if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
+ dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
+ dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
+ else
+ dev->caps.dmfs_high_rate_qpn_base =
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
+
+ if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
+ dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
+ dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
+ dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
+ dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
+ } else {
+ dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
+ dev->caps.dmfs_high_rate_qpn_base =
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
+ dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
+ }
+
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
+ dev->caps.dmfs_high_rate_qpn_range;
+
dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
@@ -440,8 +517,14 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_is_master(dev))
dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
- if (!mlx4_is_slave(dev))
+ if (!mlx4_is_slave(dev)) {
mlx4_enable_cqe_eqe_stride(dev);
+ dev->caps.alloc_res_qp_mask =
+ (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
+ MLX4_RESERVE_A0_QP;
+ } else {
+ dev->caps.alloc_res_qp_mask = 0;
+ }
return 0;
}
@@ -631,7 +714,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
struct mlx4_dev_cap dev_cap;
struct mlx4_func_cap func_cap;
struct mlx4_init_hca_param hca_param;
- int i;
+ u8 i;
memset(&hca_param, 0, sizeof(hca_param));
err = mlx4_QUERY_HCA(dev, &hca_param);
@@ -692,7 +775,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
PF_CONTEXT_BEHAVIOUR_MASK) {
- mlx4_err(dev, "Unknown pf context behaviour\n");
+ mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
+ func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
return -ENOSYS;
}
@@ -732,7 +816,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
for (i = 1; i <= dev->caps.num_ports; ++i) {
- err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
+ err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
if (err) {
mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
i, err);
@@ -791,6 +875,13 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
+ if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
+ dev->caps.bf_reg_size)
+ dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
+
+ if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
+ dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
+
return 0;
err_mem:
@@ -901,9 +992,12 @@ static ssize_t set_port_type(struct device *dev,
struct mlx4_priv *priv = mlx4_priv(mdev);
enum mlx4_port_type types[MLX4_MAX_PORTS];
enum mlx4_port_type new_types[MLX4_MAX_PORTS];
+ static DEFINE_MUTEX(set_port_type_mutex);
int i;
int err = 0;
+ mutex_lock(&set_port_type_mutex);
+
if (!strcmp(buf, "ib\n"))
info->tmp_type = MLX4_PORT_TYPE_IB;
else if (!strcmp(buf, "eth\n"))
@@ -912,7 +1006,8 @@ static ssize_t set_port_type(struct device *dev,
info->tmp_type = MLX4_PORT_TYPE_AUTO;
else {
mlx4_err(mdev, "%s is not supported port type\n", buf);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_out;
}
mlx4_stop_sense(mdev);
@@ -958,6 +1053,9 @@ static ssize_t set_port_type(struct device *dev,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
+err_out:
+ mutex_unlock(&set_port_type_mutex);
+
return err ? err : count;
}
@@ -1123,8 +1221,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
- num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
- dev->caps.num_eqs;
+ num_eqs = dev->phys_caps.num_phys_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
@@ -1186,8 +1283,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
}
- num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
- dev->caps.num_eqs;
+ num_eqs = dev->phys_caps.num_phys_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
@@ -1466,6 +1562,12 @@ static void mlx4_close_hca(struct mlx4_dev *dev)
else {
mlx4_CLOSE_HCA(dev, 0);
mlx4_free_icms(dev);
+ }
+}
+
+static void mlx4_close_fw(struct mlx4_dev *dev)
+{
+ if (!mlx4_is_slave(dev)) {
mlx4_UNMAP_FA(dev);
mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
}
@@ -1561,10 +1663,46 @@ static int choose_log_fs_mgm_entry_size(int qp_per_entry)
return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
}
+static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
+{
+ switch (dmfs_high_steer_mode) {
+ case MLX4_STEERING_DMFS_A0_DEFAULT:
+ return "default performance";
+
+ case MLX4_STEERING_DMFS_A0_DYNAMIC:
+ return "dynamic hybrid mode";
+
+ case MLX4_STEERING_DMFS_A0_STATIC:
+ return "performance optimized for limited rule configuration (static)";
+
+ case MLX4_STEERING_DMFS_A0_DISABLE:
+ return "disabled performance optimized steering";
+
+ case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
+ return "performance optimized steering not supported";
+
+ default:
+ return "Unrecognized mode";
+ }
+}
+
+#define MLX4_DMFS_A0_STEERING (1UL << 2)
+
static void choose_steering_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
- if (mlx4_log_num_mgm_entry_size == -1 &&
+ if (mlx4_log_num_mgm_entry_size <= 0) {
+ if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
+ if (dev->caps.dmfs_high_steer_mode ==
+ MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
+ mlx4_err(dev, "DMFS high rate mode not supported\n");
+ else
+ dev->caps.dmfs_high_steer_mode =
+ MLX4_STEERING_DMFS_A0_STATIC;
+ }
+ }
+
+ if (mlx4_log_num_mgm_entry_size <= 0 &&
dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
(!mlx4_is_mfunc(dev) ||
(dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
@@ -1577,6 +1715,9 @@ static void choose_steering_mode(struct mlx4_dev *dev,
dev->caps.fs_log_max_ucast_qp_range_size =
dev_cap->fs_log_max_ucast_qp_range_size;
} else {
+ if (dev->caps.dmfs_high_steer_mode !=
+ MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
+ dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
@@ -1603,7 +1744,8 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
- dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS &&
+ dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
else
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
@@ -1612,16 +1754,39 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
}
-static int mlx4_init_hca(struct mlx4_dev *dev)
+static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
+{
+ int i;
+ struct mlx4_port_cap port_cap;
+
+ if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
+ return -EINVAL;
+
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_dev_port(dev, i, &port_cap)) {
+ mlx4_err(dev,
+ "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
+ } else if ((dev->caps.dmfs_high_steer_mode !=
+ MLX4_STEERING_DMFS_A0_DEFAULT) &&
+ (port_cap.dmfs_optimized_state ==
+ !!(dev->caps.dmfs_high_steer_mode ==
+ MLX4_STEERING_DMFS_A0_DISABLE))) {
+ mlx4_err(dev,
+ "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
+ dmfs_high_rate_steering_mode_str(
+ dev->caps.dmfs_high_steer_mode),
+ (port_cap.dmfs_optimized_state ?
+ "enabled" : "disabled"));
+ }
+ }
+
+ return 0;
+}
+
+static int mlx4_init_fw(struct mlx4_dev *dev)
{
- struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_adapter adapter;
- struct mlx4_dev_cap dev_cap;
struct mlx4_mod_stat_cfg mlx4_cfg;
- struct mlx4_profile profile;
- struct mlx4_init_hca_param init_hca;
- u64 icm_size;
- int err;
+ int err = 0;
if (!mlx4_is_slave(dev)) {
err = mlx4_QUERY_FW(dev);
@@ -1644,7 +1809,23 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
if (err)
mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+ }
+
+ return err;
+}
+
+static int mlx4_init_hca(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_adapter adapter;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_profile profile;
+ struct mlx4_init_hca_param init_hca;
+ u64 icm_size;
+ struct mlx4_config_dev_params params;
+ int err;
+ if (!mlx4_is_slave(dev)) {
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
@@ -1654,6 +1835,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
choose_steering_mode(dev, &dev_cap);
choose_tunnel_offload_mode(dev, &dev_cap);
+ if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
+ mlx4_is_master(dev))
+ dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
+
err = mlx4_get_phys_port_id(dev);
if (err)
mlx4_err(dev, "Fail to get physical port id\n");
@@ -1696,6 +1881,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
+
+ if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ err = mlx4_query_func(dev, &dev_cap);
+ if (err < 0) {
+ mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
+ goto err_stop_fw;
+ } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
+ dev->caps.num_eqs = dev_cap.max_eqs;
+ dev->caps.reserved_eqs = dev_cap.reserved_eqs;
+ dev->caps.reserved_uars = dev_cap.reserved_uars;
+ }
+ }
+
/*
* If TS is supported by FW
* read HCA frequency by QUERY_HCA command
@@ -1727,6 +1925,24 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
}
}
+
+ if (dev->caps.dmfs_high_steer_mode !=
+ MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
+ if (mlx4_validate_optimized_steering(dev))
+ mlx4_warn(dev, "Optimized steering validation failed\n");
+
+ if (dev->caps.dmfs_high_steer_mode ==
+ MLX4_STEERING_DMFS_A0_DISABLE) {
+ dev->caps.dmfs_high_rate_qpn_base =
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
+ dev->caps.dmfs_high_rate_qpn_range =
+ MLX4_A0_STEERING_TABLE_SIZE;
+ }
+
+ mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
+ dmfs_high_rate_steering_mode_str(
+ dev->caps.dmfs_high_steer_mode));
+ }
} else {
err = mlx4_init_slave(dev);
if (err) {
@@ -1755,6 +1971,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto unmap_bf;
}
+ /* Query CONFIG_DEV parameters */
+ err = mlx4_config_dev_retrieval(dev, &params);
+ if (err && err != -ENOTSUPP) {
+ mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
+ } else if (!err) {
+ dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
+ dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
+ }
priv->eq_table.inta_pin = adapter.inta_pin;
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
@@ -2054,12 +2278,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
- int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, num_online_cpus() + 1,
- MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
int i;
if (msi_x) {
+ int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
@@ -2259,6 +2482,72 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
iounmap(owner);
}
+#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
+ !!((flags) & MLX4_FLAG_MASTER))
+
+static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
+ u8 total_vfs, int existing_vfs)
+{
+ u64 dev_flags = dev->flags;
+ int err = 0;
+
+ atomic_inc(&pf_loading);
+ if (dev->flags & MLX4_FLAG_SRIOV) {
+ if (existing_vfs != total_vfs) {
+ mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
+ existing_vfs, total_vfs);
+ total_vfs = existing_vfs;
+ }
+ }
+
+ dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
+ if (NULL == dev->dev_vfs) {
+ mlx4_err(dev, "Failed to allocate memory for VFs\n");
+ goto disable_sriov;
+ }
+
+ if (!(dev->flags & MLX4_FLAG_SRIOV)) {
+ mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
+ err = pci_enable_sriov(pdev, total_vfs);
+ }
+ if (err) {
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
+ err);
+ goto disable_sriov;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev_flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev_flags &= ~MLX4_FLAG_SLAVE;
+ dev->num_vfs = total_vfs;
+ }
+ return dev_flags;
+
+disable_sriov:
+ atomic_dec(&pf_loading);
+ dev->num_vfs = 0;
+ kfree(dev->dev_vfs);
+ return dev_flags & ~MLX4_FLAG_MASTER;
+}
+
+enum {
+ MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
+};
+
+static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
+ int *nvfs)
+{
+ int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
+ /* Checking for 64 VFs as a limitation of CX2 */
+ if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
+ requested_vfs >= 64) {
+ mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
+ requested_vfs);
+ return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
+ }
+ return 0;
+}
+
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv)
{
@@ -2267,6 +2556,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int err;
int port;
int i;
+ struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
dev = &priv->dev;
@@ -2303,40 +2593,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- if (total_vfs) {
- mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
- total_vfs);
- dev->dev_vfs = kzalloc(
- total_vfs * sizeof(*dev->dev_vfs),
- GFP_KERNEL);
- if (NULL == dev->dev_vfs) {
- mlx4_err(dev, "Failed to allocate memory for VFs\n");
- err = -ENOMEM;
- goto err_free_own;
- } else {
- atomic_inc(&pf_loading);
- existing_vfs = pci_num_vf(pdev);
- if (existing_vfs) {
- err = 0;
- if (existing_vfs != total_vfs)
- mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
- existing_vfs, total_vfs);
- } else {
- err = pci_enable_sriov(pdev, total_vfs);
- }
- if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
- err);
- atomic_dec(&pf_loading);
- } else {
- mlx4_warn(dev, "Running in master mode\n");
- dev->flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
- dev->num_vfs = total_vfs;
- }
- }
- }
-
atomic_set(&priv->opreq_count, 0);
INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
@@ -2350,6 +2606,14 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
mlx4_err(dev, "Failed to reset HCA, aborting\n");
goto err_sriov;
}
+
+ if (total_vfs) {
+ dev->flags = MLX4_FLAG_MASTER;
+ existing_vfs = pci_num_vf(pdev);
+ if (existing_vfs)
+ dev->flags |= MLX4_FLAG_SRIOV;
+ dev->num_vfs = total_vfs;
+ }
}
slave_start:
@@ -2363,9 +2627,10 @@ slave_start:
* before posting commands. Also, init num_slaves before calling
* mlx4_init_hca */
if (mlx4_is_mfunc(dev)) {
- if (mlx4_is_master(dev))
+ if (mlx4_is_master(dev)) {
dev->num_slaves = MLX4_MAX_NUM_SLAVES;
- else {
+
+ } else {
dev->num_slaves = 0;
err = mlx4_multi_func_init(dev);
if (err) {
@@ -2375,17 +2640,110 @@ slave_start:
}
}
+ err = mlx4_init_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to init fw, aborting.\n");
+ goto err_mfunc;
+ }
+
+ if (mlx4_is_master(dev)) {
+ /* when we hit the goto slave_start below, dev_cap already initialized */
+ if (!dev_cap) {
+ dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
+
+ if (!dev_cap) {
+ err = -ENOMEM;
+ goto err_fw;
+ }
+
+ err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_fw;
+ }
+
+ if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
+ goto err_fw;
+
+ if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
+ u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
+ existing_vfs);
+
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
+ dev->flags = dev_flags;
+ if (!SRIOV_VALID_STATE(dev->flags)) {
+ mlx4_err(dev, "Invalid SRIOV state\n");
+ goto err_sriov;
+ }
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_sriov;
+ }
+ goto slave_start;
+ }
+ } else {
+ /* Legacy mode FW requires SRIOV to be enabled before
+ * doing QUERY_DEV_CAP, since max_eq's value is different if
+ * SRIOV is enabled.
+ */
+ memset(dev_cap, 0, sizeof(*dev_cap));
+ err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_fw;
+ }
+
+ if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
+ goto err_fw;
+ }
+ }
+
err = mlx4_init_hca(dev);
if (err) {
if (err == -EACCES) {
/* Not primary Physical function
* Running in slave mode */
- mlx4_cmd_cleanup(dev);
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
+ /* We're not a PF */
+ if (dev->flags & MLX4_FLAG_SRIOV) {
+ if (!existing_vfs)
+ pci_disable_sriov(pdev);
+ if (mlx4_is_master(dev))
+ atomic_dec(&pf_loading);
+ dev->flags &= ~MLX4_FLAG_SRIOV;
+ }
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
dev->flags |= MLX4_FLAG_SLAVE;
dev->flags &= ~MLX4_FLAG_MASTER;
goto slave_start;
} else
- goto err_mfunc;
+ goto err_fw;
+ }
+
+ if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
+ u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs);
+
+ if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
+ dev->flags = dev_flags;
+ err = mlx4_cmd_init(dev);
+ if (err) {
+ /* Only VHCR is cleaned up, so could still
+ * send FW commands
+ */
+ mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
+ goto err_close;
+ }
+ } else {
+ dev->flags = dev_flags;
+ }
+
+ if (!SRIOV_VALID_STATE(dev->flags)) {
+ mlx4_err(dev, "Invalid SRIOV state\n");
+ goto err_close;
+ }
}
/* check if the device is functioning at its maximum possible speed.
@@ -2496,6 +2854,7 @@ slave_start:
if (mlx4_is_master(dev) && dev->num_vfs)
atomic_dec(&pf_loading);
+ kfree(dev_cap);
return 0;
err_port:
@@ -2540,12 +2899,15 @@ err_master_mfunc:
err_close:
mlx4_close_hca(dev);
+err_fw:
+ mlx4_close_fw(dev);
+
err_mfunc:
if (mlx4_is_slave(dev))
mlx4_multi_func_cleanup(dev);
err_cmd:
- mlx4_cmd_cleanup(dev);
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
err_sriov:
if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs)
@@ -2556,10 +2918,10 @@ err_sriov:
kfree(priv->dev.dev_vfs);
-err_free_own:
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
+ kfree(dev_cap);
return err;
}
@@ -2787,15 +3149,17 @@ static void mlx4_unload_one(struct pci_dev *pdev)
if (mlx4_is_master(dev))
mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev);
+ mlx4_close_fw(dev);
if (mlx4_is_slave(dev))
mlx4_multi_func_cleanup(dev);
- mlx4_cmd_cleanup(dev);
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
+ dev->flags &= ~MLX4_FLAG_SRIOV;
dev->num_vfs = 0;
}
@@ -2956,10 +3320,11 @@ static int __init mlx4_verify_params(void)
port_type_array[0] = true;
}
- if (mlx4_log_num_mgm_entry_size != -1 &&
- (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
- mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
- pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
+ if (mlx4_log_num_mgm_entry_size < -7 ||
+ (mlx4_log_num_mgm_entry_size > 0 &&
+ (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
+ mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
+ pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
mlx4_log_num_mgm_entry_size,
MLX4_MIN_MGM_LOG_ENTRY_SIZE,
MLX4_MAX_MGM_LOG_ENTRY_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 872843179f44..a3867e7ef885 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -999,12 +999,27 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
}
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
- if (ret == -ENOMEM)
+ if (ret == -ENOMEM) {
mlx4_err_rule(dev,
"mcg table is full. Fail to register network rule\n",
rule);
- else if (ret)
- mlx4_err_rule(dev, "Fail to register network rule\n", rule);
+ } else if (ret) {
+ if (ret == -ENXIO) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ mlx4_err_rule(dev,
+ "DMFS is not enabled, "
+ "failed to register network rule.\n",
+ rule);
+ else
+ mlx4_err_rule(dev,
+ "Rule exceeds the dmfs_high_rate_mode limitations, "
+ "failed to register network rule.\n",
+ rule);
+
+ } else {
+ mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+ }
+ }
mlx4_free_cmd_mailbox(dev, mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index de10dbb2e6ed..bdd4eea2247c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -43,6 +43,8 @@
#include <linux/timer.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -243,6 +245,7 @@ struct mlx4_bitmap {
u32 reserved_top;
u32 mask;
u32 avail;
+ u32 effective_len;
spinlock_t lock;
unsigned long *table;
};
@@ -373,6 +376,14 @@ struct mlx4_srq_context {
__be64 db_rec_addr;
};
+struct mlx4_eq_tasklet {
+ struct list_head list;
+ struct list_head process_list;
+ struct tasklet_struct task;
+ /* lock on completion tasklet list */
+ spinlock_t lock;
+};
+
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
@@ -383,6 +394,7 @@ struct mlx4_eq {
int nent;
struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt;
+ struct mlx4_eq_tasklet tasklet_ctx;
};
struct mlx4_slave_eqe {
@@ -606,6 +618,7 @@ struct mlx4_cmd {
u8 use_events;
u8 toggle;
u8 comm_toggle;
+ u8 initialized;
};
enum {
@@ -669,8 +682,17 @@ struct mlx4_srq_table {
struct mlx4_icm_table cmpt_table;
};
+enum mlx4_qp_table_zones {
+ MLX4_QP_TABLE_ZONE_GENERAL,
+ MLX4_QP_TABLE_ZONE_RSS,
+ MLX4_QP_TABLE_ZONE_RAW_ETH,
+ MLX4_QP_TABLE_ZONE_NUM
+};
+
struct mlx4_qp_table {
- struct mlx4_bitmap bitmap;
+ struct mlx4_bitmap *bitmap_gen;
+ struct mlx4_zone_allocator *zones;
+ u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM];
u32 rdmarc_base;
int rdmarc_shift;
spinlock_t lock;
@@ -872,7 +894,8 @@ extern struct workqueue_struct *mlx4_wq;
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
-u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
+ int align, u32 skip_mask);
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
int use_rr);
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
@@ -947,13 +970,18 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
- int *base);
+ int *base, u8 flags);
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
@@ -1121,8 +1149,16 @@ int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave,
int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
+enum {
+ MLX4_CMD_CLEANUP_STRUCT = 1UL << 0,
+ MLX4_CMD_CLEANUP_POOL = 1UL << 1,
+ MLX4_CMD_CLEANUP_HCR = 1UL << 2,
+ MLX4_CMD_CLEANUP_VHCR = 1UL << 3,
+ MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1
+};
+
int mlx4_cmd_init(struct mlx4_dev *dev);
-void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
int mlx4_multi_func_init(struct mlx4_dev *dev);
void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
@@ -1132,6 +1168,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev);
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
unsigned long timeout);
+void mlx4_cq_tasklet_cb(unsigned long data);
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
@@ -1273,6 +1310,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
@@ -1313,4 +1355,72 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
int mlx4_config_mad_demux(struct mlx4_dev *dev);
+enum mlx4_zone_flags {
+ MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
+ MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1,
+ MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2,
+ MLX4_ZONE_USE_RR = 1UL << 3,
+};
+
+enum mlx4_zone_alloc_flags {
+ /* No two objects could overlap between zones. UID
+ * could be left unused. If this flag is given and
+ * two overlapped zones are used, an object will be free'd
+ * from the smallest possible matching zone.
+ */
+ MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0,
+};
+
+struct mlx4_zone_allocator;
+
+/* Create a new zone allocator */
+struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags);
+
+/* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator
+ * <zone_alloc>. Allocating an object from this zone adds an offset <offset>.
+ * Similarly, when searching for an object to free, this offset it taken into
+ * account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap>
+ * is given through the MLX4_ZONE_USE_RR flag in <flags>.
+ * When an allocation fails, <zone_alloc> tries to allocate from other zones
+ * according to the policy set by <flags>. <puid> is the unique identifier
+ * received to this zone.
+ */
+int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
+ struct mlx4_bitmap *bitmap,
+ u32 flags,
+ int priority,
+ int offset,
+ u32 *puid);
+
+/* Remove bitmap indicated by <uid> from <zone_alloc> */
+int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid);
+
+/* Delete the zone allocator <zone_alloc. This function doesn't destroy
+ * the attached bitmaps.
+ */
+void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
+
+/* Allocate <count> objects with align <align> and skip_mask <skip_mask>
+ * from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually
+ * allocated from is returned in <puid>. If the allocation fails, a negative
+ * number is returned. Otherwise, the offset of the first object is returned.
+ */
+u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
+ int align, u32 skip_mask, u32 *puid);
+
+/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
+ * <zones>.
+ */
+u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
+ u32 uid, u32 obj, u32 count);
+
+/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
+ * specifying the uid when freeing an object, zone allocator could figure it by
+ * itself. Other parameters are similar to mlx4_zone_free.
+ */
+u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count);
+
+/* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */
+struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid);
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8fef65840b3b..944a112dff37 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -326,6 +326,7 @@ struct mlx4_en_rx_ring {
#endif
unsigned long csum_ok;
unsigned long csum_none;
+ unsigned long csum_complete;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
};
@@ -375,7 +376,6 @@ struct mlx4_en_port_profile {
};
struct mlx4_en_profile {
- int rss_xor;
int udp_rss;
u8 rss_mask;
u32 active_ports;
@@ -421,10 +421,16 @@ struct mlx4_en_rss_map {
enum mlx4_qp_state indir_state;
};
+enum mlx4_en_port_flag {
+ MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */
+ MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */
+};
+
struct mlx4_en_port_state {
int link_state;
int link_speed;
- int transciver;
+ int transceiver;
+ u32 flags;
};
struct mlx4_en_pkt_stats {
@@ -443,6 +449,7 @@ struct mlx4_en_port_stats {
unsigned long rx_alloc_failed;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
+ unsigned long rx_chksum_complete;
unsigned long tx_chksum_offload;
#define NUM_PORT_STATS 9
};
@@ -475,7 +482,6 @@ struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
u16 frag_stride;
- u16 frag_align;
};
#ifdef CONFIG_MLX4_EN_DCB
@@ -502,7 +508,8 @@ enum {
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
/* whether we need to drop packets that hardware loopback-ed */
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
- MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
+ MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
+ MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
};
#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
@@ -610,6 +617,8 @@ struct mlx4_en_priv {
__be16 vxlan_port;
u32 pflags;
+ u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
+ u8 rss_hash_fn;
};
enum mlx4_en_wol {
@@ -769,7 +778,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
- int qpn, u32 size, u16 stride,
+ u32 size, u16 stride,
int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
@@ -829,6 +838,13 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
+#define DEV_FEATURE_CHANGED(dev, new_features, feature) \
+ ((dev->features & feature) ^ (new_features & feature))
+
+int mlx4_en_reset_config(struct net_device *dev,
+ struct hwtstamp_config ts_config,
+ netdev_features_t new_features);
+
/*
* Functions for time stamping
*/
@@ -838,9 +854,6 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
u64 timestamp);
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
-int mlx4_en_timestamp_config(struct net_device *dev,
- int tx_type,
- int rx_filter);
/* Globals
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 193a6adb5d04..d6f549685c0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -130,10 +130,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
- vfree(buddy->bits[i]);
- else
- kfree(buddy->bits[i]);
+ kvfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -147,10 +144,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- if (is_vmalloc_addr(buddy->bits[i]))
- vfree(buddy->bits[i]);
- else
- kfree(buddy->bits[i]);
+ kvfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 94eeb2c7d7e4..30eb1ead0fe6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1311,3 +1311,159 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
return 0;
}
EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
+
+/* Cable Module Info */
+#define MODULE_INFO_MAX_READ 48
+
+#define I2C_ADDR_LOW 0x50
+#define I2C_ADDR_HIGH 0x51
+#define I2C_PAGE_SIZE 256
+
+/* Module Info Data */
+struct mlx4_cable_info {
+ u8 i2c_addr;
+ u8 page_num;
+ __be16 dev_mem_address;
+ __be16 reserved1;
+ __be16 size;
+ __be32 reserved2[2];
+ u8 data[MODULE_INFO_MAX_READ];
+};
+
+enum cable_info_err {
+ CABLE_INF_INV_PORT = 0x1,
+ CABLE_INF_OP_NOSUP = 0x2,
+ CABLE_INF_NOT_CONN = 0x3,
+ CABLE_INF_NO_EEPRM = 0x4,
+ CABLE_INF_PAGE_ERR = 0x5,
+ CABLE_INF_INV_ADDR = 0x6,
+ CABLE_INF_I2C_ADDR = 0x7,
+ CABLE_INF_QSFP_VIO = 0x8,
+ CABLE_INF_I2C_BUSY = 0x9,
+};
+
+#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
+
+static inline const char *cable_info_mad_err_str(u16 mad_status)
+{
+ u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
+
+ switch (err) {
+ case CABLE_INF_INV_PORT:
+ return "invalid port selected";
+ case CABLE_INF_OP_NOSUP:
+ return "operation not supported for this port (the port is of type CX4 or internal)";
+ case CABLE_INF_NOT_CONN:
+ return "cable is not connected";
+ case CABLE_INF_NO_EEPRM:
+ return "the connected cable has no EPROM (passive copper cable)";
+ case CABLE_INF_PAGE_ERR:
+ return "page number is greater than 15";
+ case CABLE_INF_INV_ADDR:
+ return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
+ case CABLE_INF_I2C_ADDR:
+ return "invalid I2C slave address";
+ case CABLE_INF_QSFP_VIO:
+ return "at least one cable violates the QSFP specification and ignores the modsel signal";
+ case CABLE_INF_I2C_BUSY:
+ return "I2C bus is constantly busy";
+ }
+ return "Unknown Error";
+}
+
+/**
+ * mlx4_get_module_info - Read cable module eeprom data
+ * @dev: mlx4_dev.
+ * @port: port number.
+ * @offset: byte offset in eeprom to start reading data from.
+ * @size: num of bytes to read.
+ * @data: output buffer to put the requested data into.
+ *
+ * Reads cable module eeprom data, puts the outcome data into
+ * data pointer paramer.
+ * Returns num of read bytes on success or a negative error
+ * code.
+ */
+int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+ u16 offset, u16 size, u8 *data)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+ u16 i2c_addr;
+ int ret;
+
+ if (size > MODULE_INFO_MAX_READ)
+ size = MODULE_INFO_MAX_READ;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+ inmad->method = 0x1; /* Get */
+ inmad->class_version = 0x1;
+ inmad->mgmt_class = 0x1;
+ inmad->base_version = 0x1;
+ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+ if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
+ /* Cross pages reads are not allowed
+ * read until offset 256 in low page
+ */
+ size -= offset + size - I2C_PAGE_SIZE;
+
+ i2c_addr = I2C_ADDR_LOW;
+ if (offset >= I2C_PAGE_SIZE) {
+ /* Reset offset to high page */
+ i2c_addr = I2C_ADDR_HIGH;
+ offset -= I2C_PAGE_SIZE;
+ }
+
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = cpu_to_be16(offset);
+ cable_info->page_num = 0;
+ cable_info->i2c_addr = i2c_addr;
+ cable_info->size = cpu_to_be16(size);
+
+ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ if (be16_to_cpu(outmad->status)) {
+ /* Mad returned with bad status */
+ ret = be16_to_cpu(outmad->status);
+ mlx4_warn(dev,
+ "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+ 0xFF60, port, i2c_addr, offset, size,
+ ret, cable_info_mad_err_str(ret));
+
+ if (i2c_addr == I2C_ADDR_HIGH &&
+ MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
+ /* Some SFP cables do not support i2c slave
+ * address 0x51 (high page), abort silently.
+ */
+ ret = 0;
+ else
+ ret = -ret;
+ goto out;
+ }
+ cable_info = (struct mlx4_cable_info *)outmad->data;
+ memcpy(data, cable_info->data, size);
+ ret = size;
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+EXPORT_SYMBOL(mlx4_get_module_info);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 14089d9e1667..2bf437aafc53 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,8 +126,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
- profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
- dev->phys_caps.num_phys_eqs :
+ profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs :
min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
@@ -216,10 +215,18 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->log_num_cqs = profile[i].log_num;
break;
case MLX4_RES_EQ:
- dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
- MAX_MSIX));
- init_hca->eqc_base = profile[i].start;
- init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ init_hca->log_num_eqs = 0x1f;
+ init_hca->eqc_base = profile[i].start;
+ init_hca->num_sys_eqs = dev_cap->num_sys_eqs;
+ } else {
+ dev->caps.num_eqs = roundup_pow_of_two(
+ min_t(unsigned,
+ dev_cap->max_eqs,
+ MAX_MSIX));
+ init_hca->eqc_base = profile[i].start;
+ init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
+ }
break;
case MLX4_RES_DMPT:
dev->caps.num_mpts = profile[i].num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2301365c79c7..1586ecce13c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -42,6 +42,10 @@
#include "mlx4.h"
#include "icm.h"
+/* QP to support BF should have bits 6,7 cleared */
+#define MLX4_BF_QP_SKIP_MASK 0xc0
+#define MLX4_MAX_BF_QP_RANGE 0x40
+
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
@@ -207,26 +211,45 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
- int *base)
+ int *base, u8 flags)
{
+ u32 uid;
+ int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
+
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
+ return -ENOMEM;
+
+ uid = MLX4_QP_TABLE_ZONE_GENERAL;
+ if (flags & (u8)MLX4_RESERVE_A0_QP) {
+ if (bf_qp)
+ uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
+ else
+ uid = MLX4_QP_TABLE_ZONE_RSS;
+ }
+
+ *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
+ bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
if (*base == -1)
return -ENOMEM;
return 0;
}
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base, u8 flags)
{
u64 in_param = 0;
u64 out_param;
int err;
+ /* Turn off all unsupported QP allocation flags */
+ flags &= dev->caps.alloc_res_qp_mask;
+
if (mlx4_is_mfunc(dev)) {
- set_param_l(&in_param, cnt);
+ set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
set_param_h(&in_param, align);
err = mlx4_cmd_imm(dev, in_param, &out_param,
RES_QP, RES_OP_RESERVE,
@@ -238,7 +261,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
*base = get_param_l(&out_param);
return 0;
}
- return __mlx4_qp_reserve_range(dev, cnt, align, base);
+ return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
@@ -249,7 +272,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
return;
- mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
+ mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
}
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
@@ -459,28 +482,261 @@ static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
+#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
+#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
+#define MLX4_QP_TABLE_RAW_ETH_SIZE 256
+
+static int mlx4_create_zones(struct mlx4_dev *dev,
+ u32 reserved_bottom_general,
+ u32 reserved_top_general,
+ u32 reserved_bottom_rss,
+ u32 start_offset_rss,
+ u32 max_table_offset)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
+ int bitmap_initialized = 0;
+ u32 last_offset;
+ int k;
+ int err;
+
+ qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
+
+ if (NULL == qp_table->zones)
+ return -ENOMEM;
+
+ bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
+
+ if (NULL == bitmap) {
+ err = -ENOMEM;
+ goto free_zone;
+ }
+
+ err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
+ (1 << 23) - 1, reserved_bottom_general,
+ reserved_top_general);
+
+ if (err)
+ goto free_bitmap;
+
+ ++bitmap_initialized;
+
+ err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
+ MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
+ MLX4_ZONE_USE_RR, 0,
+ 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
+
+ if (err)
+ goto free_bitmap;
+
+ err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
+ reserved_bottom_rss,
+ reserved_bottom_rss - 1,
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ reserved_bottom_rss - start_offset_rss);
+
+ if (err)
+ goto free_bitmap;
+
+ ++bitmap_initialized;
+
+ err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
+ MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
+ MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
+ MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
+ 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
+
+ if (err)
+ goto free_bitmap;
+
+ last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
+ /* We have a single zone for the A0 steering QPs area of the FW. This area
+ * needs to be split into subareas. One set of subareas is for RSS QPs
+ * (in which qp number bits 6 and/or 7 are set); the other set of subareas
+ * is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
+ * Currently, the values returned by the FW (A0 steering area starting qp number
+ * and A0 steering area size) are such that there are only two subareas -- one
+ * for RSS and one for RAW_ETH.
+ */
+ for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
+ k++) {
+ int size;
+ u32 offset = start_offset_rss;
+ u32 bf_mask;
+ u32 requested_size;
+
+ /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
+ * a mask of all LSB bits set until (and not including) the first
+ * set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
+ * is 0xc0, bf_mask will be 0x3f.
+ */
+ bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
+ requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
+
+ if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
+ ((int)(max_table_offset - last_offset)) >=
+ roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
+ (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
+ !((last_offset + requested_size - 1) &
+ MLX4_BF_QP_SKIP_MASK)))
+ size = requested_size;
+ else {
+ u32 candidate_offset =
+ (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
+
+ if (last_offset & MLX4_BF_QP_SKIP_MASK)
+ last_offset = candidate_offset;
+
+ /* From this point, the BF bits are 0 */
+
+ if (last_offset > max_table_offset) {
+ /* need to skip */
+ size = -1;
+ } else {
+ size = min3(max_table_offset - last_offset,
+ bf_mask - (last_offset & bf_mask),
+ requested_size);
+ if (size < requested_size) {
+ int candidate_size;
+
+ candidate_size = min3(
+ max_table_offset - candidate_offset,
+ bf_mask - (last_offset & bf_mask),
+ requested_size);
+
+ /* We will not take this path if last_offset was
+ * already set above to candidate_offset
+ */
+ if (candidate_size > size) {
+ last_offset = candidate_offset;
+ size = candidate_size;
+ }
+ }
+ }
+ }
+
+ if (size > 0) {
+ /* mlx4_bitmap_alloc_range will find a contiguous range of "size"
+ * QPs in which both bits 6 and 7 are zero, because we pass it the
+ * MLX4_BF_SKIP_MASK).
+ */
+ offset = mlx4_bitmap_alloc_range(
+ *bitmap + MLX4_QP_TABLE_ZONE_RSS,
+ size, 1,
+ MLX4_BF_QP_SKIP_MASK);
+
+ if (offset == (u32)-1) {
+ err = -ENOMEM;
+ break;
+ }
+
+ last_offset = offset + size;
+
+ err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
+ roundup_pow_of_two(size) - 1, 0,
+ roundup_pow_of_two(size) - size);
+ } else {
+ /* Add an empty bitmap, we'll allocate from different zones (since
+ * at least one is reserved)
+ */
+ err = mlx4_bitmap_init(*bitmap + k, 1,
+ MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
+ 0);
+ mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
+ }
+
+ if (err)
+ break;
+
+ ++bitmap_initialized;
+
+ err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
+ MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
+ MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
+ MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
+ offset, qp_table->zones_uids + k);
+
+ if (err)
+ break;
+ }
+
+ if (err)
+ goto free_bitmap;
+
+ qp_table->bitmap_gen = *bitmap;
+
+ return err;
+
+free_bitmap:
+ for (k = 0; k < bitmap_initialized; k++)
+ mlx4_bitmap_cleanup(*bitmap + k);
+ kfree(bitmap);
+free_zone:
+ mlx4_zone_allocator_destroy(qp_table->zones);
+ return err;
+}
+
+static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+
+ if (qp_table->zones) {
+ int i;
+
+ for (i = 0;
+ i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
+ i++) {
+ struct mlx4_bitmap *bitmap =
+ mlx4_zone_get_bitmap(qp_table->zones,
+ qp_table->zones_uids[i]);
+
+ mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
+ if (NULL == bitmap)
+ continue;
+
+ mlx4_bitmap_cleanup(bitmap);
+ }
+ mlx4_zone_allocator_destroy(qp_table->zones);
+ kfree(qp_table->bitmap_gen);
+ qp_table->bitmap_gen = NULL;
+ qp_table->zones = NULL;
+ }
+}
+
int mlx4_init_qp_table(struct mlx4_dev *dev)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err;
int reserved_from_top = 0;
+ int reserved_from_bot;
int k;
+ int fixed_reserved_from_bot_rv = 0;
+ int bottom_reserved_for_rss_bitmap;
+ u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
+ dev->caps.dmfs_high_rate_qpn_range;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
if (mlx4_is_slave(dev))
return 0;
- /*
- * We reserve 2 extra QPs per port for the special QPs. The
+ /* We reserve 2 extra QPs per port for the special QPs. The
* block of special QPs must be aligned to a multiple of 8, so
* round up.
*
* We also reserve the MSB of the 24-bit QP number to indicate
* that a QP is an XRC QP.
*/
- dev->phys_caps.base_sqpn =
- ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
+ for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
+ fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
+
+ if (fixed_reserved_from_bot_rv < max_table_offset)
+ fixed_reserved_from_bot_rv = max_table_offset;
+
+ /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
+ bottom_reserved_for_rss_bitmap =
+ roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
+ dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
{
int sort[MLX4_NUM_QP_REGION];
@@ -490,8 +746,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
sort[i] = i;
- for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
- for (j = 2; j < i; ++j) {
+ for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
+ for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
dev->caps.reserved_qps_cnt[sort[j - 1]]) {
tmp = sort[j];
@@ -501,13 +757,12 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
}
}
- for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
+ for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
last_base -= dev->caps.reserved_qps_cnt[sort[i]];
dev->caps.reserved_qps_base[sort[i]] = last_base;
reserved_from_top +=
dev->caps.reserved_qps_cnt[sort[i]];
}
-
}
/* Reserve 8 real SQPs in both native and SRIOV modes.
@@ -520,10 +775,17 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
* b. All the proxy SQPs (8 per function)
* c. All the tunnel QPs (8 per function)
*/
+ reserved_from_bot = mlx4_num_reserved_sqps(dev);
+ if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
+ mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
+ return -EINVAL;
+ }
+
+ err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
+ bottom_reserved_for_rss_bitmap,
+ fixed_reserved_from_bot_rv,
+ max_table_offset);
- err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 23) - 1, mlx4_num_reserved_sqps(dev),
- reserved_from_top);
if (err)
return err;
@@ -559,7 +821,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
if (err)
goto err_mem;
- return 0;
+
+ return err;
err_mem:
kfree(dev->caps.qp0_tunnel);
@@ -568,6 +831,7 @@ err_mem:
kfree(dev->caps.qp1_proxy);
dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
+ mlx4_cleanup_qp_zones(dev);
return err;
}
@@ -577,7 +841,8 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
return;
mlx4_CONF_SPECIAL_QP(dev, 0);
- mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
+
+ mlx4_cleanup_qp_zones(dev);
}
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index cd5cf6d957c7..4efbd1eca611 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1543,16 +1543,21 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
int align;
int base;
int qpn;
+ u8 flags;
switch (op) {
case RES_OP_RESERVE:
count = get_param_l(&in_param) & 0xffffff;
+ /* Turn off all unsupported QP allocation flags that the
+ * slave tries to set.
+ */
+ flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
align = get_param_h(&in_param);
err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err)
return err;
- err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
if (err) {
mlx4_release_resource(dev, slave, RES_QP, count, 0);
return err;
@@ -2872,6 +2877,23 @@ out_add:
return err;
}
+int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ u8 get = vhcr->op_modifier;
+
+ if (get != 1)
+ return -EPERM;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ return err;
+}
+
static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
int len, struct res_mtt **res)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 368c6c5ea014..a2853057c779 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1363,7 +1363,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_map;
}
- if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
+ if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
dev_err(&dev->pdev->dev, "command queue size overflow\n");
err = -EINVAL;
goto err_map;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ad2c96a02a53..da82991239a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -157,6 +157,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
+ case MLX5_EVENT_TYPE_PAGE_FAULT:
+ return "MLX5_EVENT_TYPE_PAGE_FAULT";
default:
return "Unrecognized event";
}
@@ -225,8 +227,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
- mlx5_core_dbg(dev, "event %s(%d) arrived\n",
- eqe_type_str(eqe->type), eqe->type);
+ mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
+ eqe_type_str(eqe->type), eqe->type, rsn);
mlx5_rsc_event(dev, rsn, eqe->type);
break;
@@ -279,6 +281,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
break;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ case MLX5_EVENT_TYPE_PAGE_FAULT:
+ mlx5_eq_pagefault(dev, eqe);
+ break;
+#endif
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
@@ -390,7 +397,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
*/
eq_update_ci(eq, 1);
- mlx5_vfree(in);
+ kvfree(in);
return 0;
err_irq:
@@ -400,7 +407,7 @@ err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
err_in:
- mlx5_vfree(in);
+ kvfree(in);
err_buf:
mlx5_buf_free(dev, &eq->buf);
@@ -446,8 +453,12 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
int mlx5_start_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
+ u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
+ if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
@@ -459,7 +470,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_use_events(dev);
err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
- MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK,
+ MLX5_NUM_ASYNC_EQE, async_event_mask,
"mlx5_async_eq", &dev->priv.uuari.uars[0]);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 087c4c797deb..06f9036acd83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -69,6 +69,46 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
}
+int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
+{
+ u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *out;
+ int err;
+
+ if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
+ return -ENOTSUPP;
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err) {
+ mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
+ goto out;
+ }
+
+ memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
+ sizeof(*caps));
+
+ mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
+ be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
+ be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
+ be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+
+out:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_query_odp_caps);
+
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_init_hca_mbox_in in;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 71b10b210792..3f4525619a07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -43,6 +43,7 @@
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
+#include <linux/kmod.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
@@ -225,7 +226,7 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
table->msix_arr[i].entry = i;
nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
- MLX5_EQ_VEC_COMP_BASE, nvec);
+ MLX5_EQ_VEC_COMP_BASE + 1, nvec);
if (nvec < 0)
return nvec;
@@ -840,6 +841,8 @@ struct mlx5_core_event_handler {
void *data);
};
+#define MLX5_IB_MOD "mlx5_ib"
+
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -878,6 +881,10 @@ static int init_one(struct pci_dev *pdev,
goto out_init;
}
+ err = request_module_nowait(MLX5_IB_MOD);
+ if (err)
+ pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
return 0;
out_init:
@@ -896,8 +903,12 @@ static void remove_one(struct pci_dev *pdev)
}
static const struct pci_device_id mlx5_core_pci_table[] = {
- { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
+ { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
+ { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
{ PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
+ { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
+ { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
+ { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d476918ef269..4fdaae9b54d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -349,7 +349,7 @@ out_4k:
for (i--; i >= 0; i--)
free_4k(dev, be64_to_cpu(in->pas[i]));
out_free:
- mlx5_vfree(in);
+ kvfree(in);
return err;
}
@@ -400,7 +400,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
out_free:
- mlx5_vfree(out);
+ kvfree(out);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 313965853e10..72c2d002c3b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -68,9 +68,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
memcpy(data_out, out->data, size_out);
ex2:
- mlx5_vfree(out);
+ kvfree(out);
ex1:
- mlx5_vfree(in);
+ kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 5261a2b0da43..575d853dbe05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -88,6 +88,95 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
mlx5_core_put_rsc(common);
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+{
+ struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
+ int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
+ struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
+ struct mlx5_core_qp *qp =
+ container_of(common, struct mlx5_core_qp, common);
+ struct mlx5_pagefault pfault;
+
+ if (!qp) {
+ mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
+ qpn);
+ return;
+ }
+
+ pfault.event_subtype = eqe->sub_type;
+ pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
+ (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
+ pfault.bytes_committed = be32_to_cpu(
+ pf_eqe->bytes_committed);
+
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
+ eqe->sub_type, pfault.flags);
+
+ switch (eqe->sub_type) {
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ /* RDMA based event */
+ pfault.rdma.r_key =
+ be32_to_cpu(pf_eqe->rdma.r_key);
+ pfault.rdma.packet_size =
+ be16_to_cpu(pf_eqe->rdma.packet_length);
+ pfault.rdma.rdma_op_len =
+ be32_to_cpu(pf_eqe->rdma.rdma_op_len);
+ pfault.rdma.rdma_va =
+ be64_to_cpu(pf_eqe->rdma.rdma_va);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
+ qpn, pfault.rdma.r_key);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
+ pfault.rdma.rdma_op_len);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: rdma_va: 0x%016llx,\n",
+ pfault.rdma.rdma_va);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: bytes_committed: 0x%06x\n",
+ pfault.bytes_committed);
+ break;
+
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ /* WQE based event */
+ pfault.wqe.wqe_index =
+ be16_to_cpu(pf_eqe->wqe.wqe_index);
+ pfault.wqe.packet_size =
+ be16_to_cpu(pf_eqe->wqe.packet_length);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
+ qpn, pfault.wqe.wqe_index);
+ mlx5_core_dbg(dev,
+ "PAGE_FAULT: bytes_committed: 0x%06x\n",
+ pfault.bytes_committed);
+ break;
+
+ default:
+ mlx5_core_warn(dev,
+ "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
+ eqe->sub_type, qpn);
+ /* Unsupported page faults should still be resolved by the
+ * page fault handler
+ */
+ }
+
+ if (qp->pfault_handler) {
+ qp->pfault_handler(qp, &pfault);
+ } else {
+ mlx5_core_err(dev,
+ "ODP event for QP %08x, without a fault handler in QP\n",
+ qpn);
+ /* Page fault will remain unresolved. QP will hang until it is
+ * destroyed
+ */
+ }
+
+ mlx5_core_put_rsc(common);
+}
+#endif
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
@@ -322,3 +411,33 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
+ u8 flags, int error)
+{
+ struct mlx5_page_fault_resume_mbox_in in;
+ struct mlx5_page_fault_resume_mbox_out out;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
+ in.hdr.opmod = 0;
+ flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
+ MLX5_PAGE_FAULT_RESUME_WRITE |
+ MLX5_PAGE_FAULT_RESUME_RDMA);
+ flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
+ in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
+ (flags << MLX5_QPN_BITS));
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ err = mlx5_cmd_status_to_err(&out.hdr);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 0a6348cefc01..06801d6f595e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -96,6 +96,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
int err;
memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
in.uarn = cpu_to_be32(uarn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 6c7c78baedca..a8522d8af95d 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1612,7 +1612,6 @@ ks8695_drv_remove(struct platform_device *pdev)
static struct platform_driver ks8695_driver = {
.driver = {
.name = MODULENAME,
- .owner = THIS_MODULE,
},
.probe = ks8695_probe,
.remove = ks8695_drv_remove,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 0c33b92a5a81..f78909a00f15 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1255,7 +1255,6 @@ static int ks8842_remove(struct platform_device *pdev)
static struct platform_driver ks8842_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ks8842_probe,
.remove = ks8842_remove,
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 0eb47649191b..2fc5cd56c0a8 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1679,7 +1679,6 @@ static int ks8851_remove(struct platform_device *pdev)
static struct platform_driver ks8851_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ks8851_ml_dt_ids),
},
.probe = ks8851_probe,
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index bde1b70f473b..6c72e74fef3e 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -559,7 +559,6 @@ static struct platform_driver moxart_mac_driver = {
.remove = moxart_remove,
.driver = {
.name = "moxart-ethernet",
- .owner = THIS_MODULE,
.of_match_table = moxart_mac_match,
},
};
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 9e7e3f1dce3e..af099057f0e9 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2913,16 +2913,11 @@ again:
flags |= MXGEFW_FLAGS_SMALL;
/* pad frames to at least ETH_ZLEN bytes */
- if (unlikely(skb->len < ETH_ZLEN)) {
- if (skb_padto(skb, ETH_ZLEN)) {
- /* The packet is gone, so we must
- * return 0 */
- ss->stats.tx_dropped += 1;
- return NETDEV_TX_OK;
- }
- /* adjust the len to account for the zero pad
- * so that the nic can know how long it is */
- skb->len = ETH_ZLEN;
+ if (eth_skb_pad(skb)) {
+ /* The packet is gone, so we must
+ * return 0 */
+ ss->stats.tx_dropped += 1;
+ return NETDEV_TX_OK;
}
}
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index a5512a97cc4d..acf3f11e38cc 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -287,7 +287,6 @@ static struct platform_driver jazz_sonic_driver = {
.remove = jazz_sonic_device_remove,
.driver = {
.name = jazz_sonic_string,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 9e4ddbba7036..d98f5b8a1c66 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -326,13 +326,9 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
macintosh_config->ident == MAC_MODEL_P588 ||
macintosh_config->ident == MAC_MODEL_P575 ||
macintosh_config->ident == MAC_MODEL_C610) {
- unsigned long flags;
int card_present;
- local_irq_save(flags);
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
- local_irq_restore(flags);
-
if (!card_present) {
printk("none.\n");
return -ENODEV;
@@ -634,7 +630,6 @@ static struct platform_driver mac_sonic_driver = {
.remove = mac_sonic_device_remove,
.driver = {
.name = mac_sonic_string,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 8176c8a1cc6a..9fbc30264237 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -457,7 +457,6 @@ static struct platform_driver netx_eth_driver = {
.resume = netx_eth_drv_resume,
.driver = {
.name = CARDNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 379b7fbded78..afa445842f3e 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -1081,7 +1081,6 @@ static struct platform_driver w90p910_ether_driver = {
.remove = w90p910_ether_remove,
.driver = {
.name = "nuc900-emc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index a42293092ea4..d36599f47af5 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1568,7 +1568,6 @@ MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
static struct platform_driver octeon_mgmt_driver = {
.driver = {
.name = "octeon_mgmt",
- .owner = THIS_MODULE,
.of_match_table = octeon_mgmt_match,
},
.probe = octeon_mgmt_probe,
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 30d934d66356..44e8d7d25547 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1837,10 +1837,8 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
out:
- if (mac->iob_pdev)
- pci_dev_put(mac->iob_pdev);
- if (mac->dma_pdev)
- pci_dev_put(mac->dma_pdev);
+ pci_dev_put(mac->iob_pdev);
+ pci_dev_put(mac->dma_pdev);
free_netdev(dev);
out_disable_device:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index a913b3ad2f89..1aa25b13ace1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -376,13 +376,14 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
}
static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *netdev, const unsigned char *addr)
+ struct net_device *netdev,
+ const unsigned char *addr, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = -EOPNOTSUPP;
if (!adapter->fdb_mac_learn)
- return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
+ return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
qlcnic_sriov_check(adapter)) {
@@ -401,13 +402,13 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
- const unsigned char *addr, u16 flags)
+ const unsigned char *addr, u16 vid, u16 flags)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
if (!adapter->fdb_mac_learn)
- return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
+ return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
!qlcnic_sriov_check(adapter)) {
@@ -460,7 +461,7 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
}
static int qlcnic_get_phys_port_id(struct net_device *netdev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 75b1693ec8bf..9c31e46d1eee 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -507,7 +507,7 @@ rx_status_loop:
netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
rx_tail, status, len);
- new_skb = netdev_alloc_skb_ip_align(dev, buflen);
+ new_skb = napi_alloc_skb(napi, buflen);
if (!new_skb) {
dev->stats.rx_dropped++;
goto rx_next;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 007b38cce69a..6d0b9dfac313 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -112,6 +112,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
#include <asm/irq.h>
#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
@@ -182,10 +183,13 @@ static int debug = -1;
/* Number of Tx descriptor registers. */
#define NUM_TX_DESC 4
-/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
-#define MAX_ETH_FRAME_SIZE 1536
+/* max supported ethernet frame size -- must be at least (dev->mtu+18+4).*/
+#define MAX_ETH_FRAME_SIZE 1792
-/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+/* max supported payload size */
+#define MAX_ETH_DATA_SIZE (MAX_ETH_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)
+
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+18+4). */
#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
@@ -920,11 +924,19 @@ static int rtl8139_set_features(struct net_device *dev, netdev_features_t featur
return 0;
}
+static int rtl8139_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 68 || new_mtu > MAX_ETH_DATA_SIZE)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_open = rtl8139_open,
.ndo_stop = rtl8139_close,
.ndo_get_stats64 = rtl8139_get_stats64,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = rtl8139_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = rtl8139_set_mac_address,
.ndo_start_xmit = rtl8139_start_xmit,
@@ -2025,7 +2037,7 @@ keep_pkt:
/* Malloc up new buffer, compatible with net-2e. */
/* Omit the four octet CRC from the length. */
- skb = netdev_alloc_skb_ip_align(dev, pkt_size);
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
if (likely(skb)) {
#if RX_BUF_IDX == 3
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 040b13739947..32497f0e537c 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -6,10 +6,10 @@
/* The header prepended to received packets. */
struct rx_header {
- ushort pad; /* Pad. */
- ushort rx_count;
- ushort rx_status; /* Unknown bit assignments :-<. */
- ushort cur_addr; /* Apparently the current buffer address(?) */
+ ushort pad; /* Pad. */
+ ushort rx_count;
+ ushort rx_status; /* Unknown bit assignments :-<. */
+ ushort cur_addr; /* Apparently the current buffer address(?) */
};
#define PAR_DATA 0
@@ -29,22 +29,25 @@ struct rx_header {
#define RdAddr 0xC0
#define HNib 0x10
-enum page0_regs
-{
- /* The first six registers hold the ethernet physical station address. */
- PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
- TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
- TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
- ISR = 10, IMR = 11, /* Interrupt status and mask. */
- CMR1 = 12, /* Command register 1. */
- CMR2 = 13, /* Command register 2. */
- MODSEL = 14, /* Mode select register. */
- MAR = 14, /* Memory address register (?). */
- CMR2_h = 0x1d, };
-
-enum eepage_regs
-{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */
+enum page0_regs {
+ /* The first six registers hold
+ * the ethernet physical station address.
+ */
+ PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
+ TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
+ TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
+ ISR = 10, IMR = 11, /* Interrupt status and mask. */
+ CMR1 = 12, /* Command register 1. */
+ CMR2 = 13, /* Command register 2. */
+ MODSEL = 14, /* Mode select register. */
+ MAR = 14, /* Memory address register (?). */
+ CMR2_h = 0x1d,
+};
+enum eepage_regs {
+ PROM_CMD = 6,
+ PROM_DATA = 7 /* Note that PROM_CMD is in the "high" bits. */
+};
#define ISR_TxOK 0x01
#define ISR_RxOK 0x04
@@ -72,141 +75,146 @@ enum eepage_regs
#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
#define CMR2h_PROMISC 3 /* Promiscuous mode. */
-/* An inline function used below: it differs from inb() by explicitly return an unsigned
- char, saving a truncation. */
+/* An inline function used below: it differs from inb() by explicitly
+ * return an unsigned char, saving a truncation.
+ */
static inline unsigned char inbyte(unsigned short port)
{
- unsigned char _v;
- __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
- return _v;
+ unsigned char _v;
+
+ __asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port));
+ return _v;
}
/* Read register OFFSET.
- This command should always be terminated with read_end(). */
+ * This command should always be terminated with read_end().
+ */
static inline unsigned char read_nibble(short port, unsigned char offset)
{
- unsigned char retval;
- outb(EOC+offset, port + PAR_DATA);
- outb(RdAddr+offset, port + PAR_DATA);
- inbyte(port + PAR_STATUS); /* Settling time delay */
- retval = inbyte(port + PAR_STATUS);
- outb(EOC+offset, port + PAR_DATA);
-
- return retval;
+ unsigned char retval;
+
+ outb(EOC+offset, port + PAR_DATA);
+ outb(RdAddr+offset, port + PAR_DATA);
+ inbyte(port + PAR_STATUS); /* Settling time delay */
+ retval = inbyte(port + PAR_STATUS);
+ outb(EOC+offset, port + PAR_DATA);
+
+ return retval;
}
/* Functions for bulk data read. The interrupt line is always disabled. */
/* Get a byte using read mode 0, reading data from the control lines. */
static inline unsigned char read_byte_mode0(short ioaddr)
{
- unsigned char low_nib;
-
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
}
/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
static inline unsigned char read_byte_mode2(short ioaddr)
{
- unsigned char low_nib;
-
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
}
/* Read a byte through the data register. */
static inline unsigned char read_byte_mode4(short ioaddr)
{
- unsigned char low_nib;
+ unsigned char low_nib;
- outb(RdAddr | MAR, ioaddr + PAR_DATA);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
}
/* Read a byte through the data register, double reading to allow settling. */
static inline unsigned char read_byte_mode6(short ioaddr)
{
- unsigned char low_nib;
-
- outb(RdAddr | MAR, ioaddr + PAR_DATA);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
- inbyte(ioaddr + PAR_STATUS);
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
}
static inline void
write_reg(short port, unsigned char reg, unsigned char value)
{
- unsigned char outval;
- outb(EOC | reg, port + PAR_DATA);
- outval = WrAddr | reg;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outval &= 0xf0;
- outval |= value;
- outb(outval, port + PAR_DATA);
- outval &= 0x1f;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA);
-
- outb(EOC | outval, port + PAR_DATA);
+ unsigned char outval;
+
+ outb(EOC | reg, port + PAR_DATA);
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval &= 0xf0;
+ outval |= value;
+ outb(outval, port + PAR_DATA);
+ outval &= 0x1f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | outval, port + PAR_DATA);
}
static inline void
write_reg_high(short port, unsigned char reg, unsigned char value)
{
- unsigned char outval = EOC | HNib | reg;
+ unsigned char outval = EOC | HNib | reg;
- outb(outval, port + PAR_DATA);
- outval &= WrAddr | HNib | 0x0f;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+ outb(outval, port + PAR_DATA);
+ outval &= WrAddr | HNib | 0x0f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
- outval = WrAddr | HNib | value;
- outb(outval, port + PAR_DATA);
- outval &= HNib | 0x0f; /* HNib | value */
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA);
+ outval = WrAddr | HNib | value;
+ outb(outval, port + PAR_DATA);
+ outval &= HNib | 0x0f; /* HNib | value */
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
- outb(EOC | HNib | outval, port + PAR_DATA);
+ outb(EOC | HNib | outval, port + PAR_DATA);
}
/* Write a byte out using nibble mode. The low nibble is written first. */
static inline void
write_reg_byte(short port, unsigned char reg, unsigned char value)
{
- unsigned char outval;
- outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
- outval = WrAddr | reg;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
- outb(value & 0x0f, port + PAR_DATA);
- value >>= 4;
- outb(value, port + PAR_DATA);
- outb(0x10 | value, port + PAR_DATA);
- outb(0x10 | value, port + PAR_DATA);
-
- outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
+ unsigned char outval;
+
+ outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
+ outb(value & 0x0f, port + PAR_DATA);
+ value >>= 4;
+ outb(value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+
+ outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
}
-/*
- * Bulk data writes to the packet buffer. The interrupt line remains enabled.
+/* Bulk data writes to the packet buffer. The interrupt line remains enabled.
* The first, faster method uses only the dataport (data modes 0, 2 & 4).
* The second (backup) method uses data and control regs (modes 1, 3 & 5).
* It should only be needed when there is skew between the individual data
@@ -214,28 +222,28 @@ write_reg_byte(short port, unsigned char reg, unsigned char value)
*/
static inline void write_byte_mode0(short ioaddr, unsigned char value)
{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
}
static inline void write_byte_mode1(short ioaddr, unsigned char value)
{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
- outb((value>>4) | 0x10, ioaddr + PAR_DATA);
- outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
}
/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
static inline void write_word_mode0(short ioaddr, unsigned short value)
{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- value >>= 4;
- outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
- value >>= 4;
- outb(value & 0x0f, ioaddr + PAR_DATA);
- value >>= 4;
- outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
}
/* EEPROM_Ctrl bits. */
@@ -248,10 +256,10 @@ static inline void write_word_mode0(short ioaddr, unsigned short value)
/* Delay between EEPROM clock transitions. */
#define eeprom_delay(ticks) \
-do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
-#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
+#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index cf154f74cba1..14a1c5cec3a5 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1377,6 +1377,16 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
return RTL_R8(IBISR0) & 0x02;
}
+static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
+ rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
+ RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
+ RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
+}
+
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
@@ -1417,12 +1427,7 @@ static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
- RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
- RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
+ rtl8168ep_stop_cmac(tp);
ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
@@ -5914,7 +5919,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
}
-static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
@@ -5934,7 +5939,6 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -5950,6 +5954,24 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_enable(tp, false);
}
+static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ static const struct ephy_info e_info_8168g_1[] = {
+ { 0x00, 0x0000, 0x0008 },
+ { 0x0c, 0x37d0, 0x0820 },
+ { 0x1e, 0x0000, 0x0001 },
+ { 0x19, 0x8000, 0x0000 }
+ };
+
+ rtl_hw_start_8168g(tp);
+
+ /* disable aspm and clock request before access ephy */
+ RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
+ RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+}
+
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -5960,7 +5982,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{ 0x1e, 0xffff, 0x20eb }
};
- rtl_hw_start_8168g_1(tp);
+ rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
@@ -5979,7 +6001,7 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{ 0x1e, 0x0000, 0x2000 }
};
- rtl_hw_start_8168g_1(tp);
+ rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
@@ -6027,7 +6049,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -6091,6 +6112,8 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ rtl8168ep_stop_cmac(tp);
+
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
@@ -6109,7 +6132,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -6601,6 +6623,9 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+ /* Force memory writes to complete before releasing descriptor */
+ dma_wmb();
+
desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
}
@@ -6608,7 +6633,6 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
u32 rx_buf_sz)
{
desc->addr = cpu_to_le64(mapping);
- wmb();
rtl8169_mark_to_asic(desc, rx_buf_sz);
}
@@ -6832,14 +6856,6 @@ err_out:
return -EIO;
}
-static bool rtl_skb_pad(struct sk_buff *skb)
-{
- if (skb_padto(skb, ETH_ZLEN))
- return false;
- skb_put(skb, ETH_ZLEN - skb->len);
- return true;
-}
-
static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
{
return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
@@ -6980,7 +6996,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
u8 ip_protocol;
if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
+ return !(skb_checksum_help(skb) || eth_skb_pad(skb));
if (transport_offset > TCPHO_MAX) {
netif_warn(tp, tx_err, tp->dev,
@@ -7015,7 +7031,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return rtl_skb_pad(skb);
+ return !eth_skb_pad(skb);
}
return true;
@@ -7077,16 +7093,18 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
- wmb();
+ /* Force memory writes to complete before releasing descriptor */
+ dma_wmb();
/* Anti gcc 2.95.3 bugware (sic) */
status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
txd->opts1 = cpu_to_le32(status);
- tp->cur_tx += frags + 1;
-
+ /* Force all memory writes to complete before notifying device */
wmb();
+ tp->cur_tx += frags + 1;
+
RTL_W8(TxPoll, NPQ);
mmiowb();
@@ -7185,11 +7203,16 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
struct ring_info *tx_skb = tp->tx_skb + entry;
u32 status;
- rmb();
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the Tx descriptor until
+ * we know the status of DescOwn
+ */
+ dma_rmb();
+
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
@@ -7264,7 +7287,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
data = rtl8169_align(data);
dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
prefetch(data);
- skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
if (skb)
memcpy(skb->data, data, pkt_size);
dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
@@ -7284,11 +7307,16 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
- rmb();
status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
-
if (status & DescOwn)
break;
+
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the Rx descriptor until
+ * we know the status of DescOwn
+ */
+ dma_rmb();
+
if (unlikely(status & RxRES)) {
netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
status);
@@ -7350,7 +7378,6 @@ process_pkt:
}
release_descriptor:
desc->opts2 = 0;
- wmb();
rtl8169_mark_to_asic(desc, rx_buf_sz);
}
@@ -8005,6 +8032,12 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp)
return;
}
+static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
+{
+ rtl8168ep_stop_cmac(tp);
+ rtl_hw_init_8168g(tp);
+}
+
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -8017,12 +8050,13 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
+ rtl_hw_init_8168g(tp);
+ break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- rtl_hw_init_8168g(tp);
+ rtl_hw_init_8168ep(tp);
break;
-
default:
break;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b5db6b3f939f..c29ba80ae02b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,5 +1,6 @@
/* SuperH Ethernet device driver
*
+ * Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2014 Renesas Solutions Corp.
* Copyright (C) 2013-2014 Cogent Embedded, Inc.
@@ -1136,7 +1137,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
DMA_FROM_DEVICE);
- rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
+ rxdesc->addr = virt_to_phys(skb->data);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1387,11 +1388,14 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
int entry = mdp->cur_rx % mdp->num_rx_ring;
int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
+ int limit;
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
desc_status = edmac_to_cpu(mdp, rxdesc->status);
@@ -1400,11 +1404,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (--boguscnt < 0)
break;
- if (*quota <= 0)
- break;
-
- (*quota)--;
-
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
@@ -1471,7 +1470,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
rxdesc->buffer_length, DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
- rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
+ rxdesc->addr = virt_to_phys(skb->data);
}
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
@@ -1495,6 +1494,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
+ *quota -= limit - boguscnt - 1;
+
return *quota <= 0;
}
@@ -2746,6 +2747,7 @@ static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
{ .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
{ }
@@ -2769,10 +2771,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(res == NULL)) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
if (!ndev)
@@ -2781,8 +2779,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- /* The sh Ether-specific entries in the device structure. */
- ndev->base_addr = res->start;
devno = pdev->id;
if (devno < 0)
devno = 0;
@@ -2806,6 +2802,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
+ ndev->base_addr = res->start;
+
spin_lock_init(&mdp->lock);
mdp->pdev = pdev;
@@ -2887,6 +2885,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
}
}
+ if (mdp->cd->rmiimode)
+ sh_eth_write(ndev, 0x1, RMIIMODE);
+
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
@@ -2973,6 +2974,7 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
{ "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
{ "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
+ { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data },
{ "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
{ }
};
diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig
new file mode 100644
index 000000000000..b9952ef040e4
--- /dev/null
+++ b/drivers/net/ethernet/rocker/Kconfig
@@ -0,0 +1,27 @@
+#
+# Rocker device configuration
+#
+
+config NET_VENDOR_ROCKER
+ bool "Rocker devices"
+ default y
+ ---help---
+ If you have a network device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Rocker devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_ROCKER
+
+config ROCKER
+ tristate "Rocker switch driver (EXPERIMENTAL)"
+ depends on PCI && NET_SWITCHDEV && BRIDGE
+ ---help---
+ This driver supports Rocker switch device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocker.
+
+endif # NET_VENDOR_ROCKER
diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile
new file mode 100644
index 000000000000..f85fb12f36f1
--- /dev/null
+++ b/drivers/net/ethernet/rocker/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Rocker network device drivers.
+#
+
+obj-$(CONFIG_ROCKER) += rocker.o
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
new file mode 100644
index 000000000000..2f398fa4b9e6
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -0,0 +1,4375 @@
+/*
+ * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/hashtable.h>
+#include <linux/crc32.h>
+#include <linux/sort.h>
+#include <linux/random.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/bitops.h>
+#include <net/switchdev.h>
+#include <net/rtnetlink.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <generated/utsrelease.h>
+
+#include "rocker.h"
+
+static const char rocker_driver_name[] = "rocker";
+
+static const struct pci_device_id rocker_pci_id_table[] = {
+ {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
+ {0, }
+};
+
+struct rocker_flow_tbl_key {
+ u32 priority;
+ enum rocker_of_dpa_table_id tbl_id;
+ union {
+ struct {
+ u32 in_lport;
+ u32 in_lport_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ } ig_port;
+ struct {
+ u32 in_lport;
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ bool untagged;
+ __be16 new_vlan_id;
+ } vlan;
+ struct {
+ u32 in_lport;
+ u32 in_lport_mask;
+ __be16 eth_type;
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ bool copy_to_cpu;
+ } term_mac;
+ struct {
+ __be16 eth_type;
+ __be32 dst4;
+ __be32 dst4_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 group_id;
+ } ucast_routing;
+ struct {
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ int has_eth_dst;
+ int has_eth_dst_mask;
+ __be16 vlan_id;
+ u32 tunnel_id;
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 group_id;
+ bool copy_to_cpu;
+ } bridge;
+ struct {
+ u32 in_lport;
+ u32 in_lport_mask;
+ u8 eth_src[ETH_ALEN];
+ u8 eth_src_mask[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ __be16 eth_type;
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ u8 ip_proto;
+ u8 ip_proto_mask;
+ u8 ip_tos;
+ u8 ip_tos_mask;
+ u32 group_id;
+ } acl;
+ };
+};
+
+struct rocker_flow_tbl_entry {
+ struct hlist_node entry;
+ u32 ref_count;
+ u64 cookie;
+ struct rocker_flow_tbl_key key;
+ u32 key_crc32; /* key */
+};
+
+struct rocker_group_tbl_entry {
+ struct hlist_node entry;
+ u32 cmd;
+ u32 group_id; /* key */
+ u16 group_count;
+ u32 *group_ids;
+ union {
+ struct {
+ u8 pop_vlan;
+ } l2_interface;
+ struct {
+ u8 eth_src[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ __be16 vlan_id;
+ u32 group_id;
+ } l2_rewrite;
+ struct {
+ u8 eth_src[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ __be16 vlan_id;
+ bool ttl_check;
+ u32 group_id;
+ } l3_unicast;
+ };
+};
+
+struct rocker_fdb_tbl_entry {
+ struct hlist_node entry;
+ u32 key_crc32; /* key */
+ bool learned;
+ struct rocker_fdb_tbl_key {
+ u32 lport;
+ u8 addr[ETH_ALEN];
+ __be16 vlan_id;
+ } key;
+};
+
+struct rocker_internal_vlan_tbl_entry {
+ struct hlist_node entry;
+ int ifindex; /* key */
+ u32 ref_count;
+ __be16 vlan_id;
+};
+
+struct rocker_desc_info {
+ char *data; /* mapped */
+ size_t data_size;
+ size_t tlv_size;
+ struct rocker_desc *desc;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+};
+
+struct rocker_dma_ring_info {
+ size_t size;
+ u32 head;
+ u32 tail;
+ struct rocker_desc *desc; /* mapped */
+ dma_addr_t mapaddr;
+ struct rocker_desc_info *desc_info;
+ unsigned int type;
+};
+
+struct rocker;
+
+enum {
+ ROCKER_CTRL_LINK_LOCAL_MCAST,
+ ROCKER_CTRL_LOCAL_ARP,
+ ROCKER_CTRL_IPV4_MCAST,
+ ROCKER_CTRL_IPV6_MCAST,
+ ROCKER_CTRL_DFLT_BRIDGING,
+ ROCKER_CTRL_MAX,
+};
+
+#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
+#define ROCKER_N_INTERNAL_VLANS 255
+#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
+#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
+
+struct rocker_port {
+ struct net_device *dev;
+ struct net_device *bridge_dev;
+ struct rocker *rocker;
+ unsigned int port_number;
+ u32 lport;
+ __be16 internal_vlan_id;
+ int stp_state;
+ u32 brport_flags;
+ bool ctrls[ROCKER_CTRL_MAX];
+ unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
+ struct napi_struct napi_tx;
+ struct napi_struct napi_rx;
+ struct rocker_dma_ring_info tx_ring;
+ struct rocker_dma_ring_info rx_ring;
+};
+
+struct rocker {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+ struct msix_entry *msix_entries;
+ unsigned int port_count;
+ struct rocker_port **ports;
+ struct {
+ u64 id;
+ } hw;
+ spinlock_t cmd_ring_lock;
+ struct rocker_dma_ring_info cmd_ring;
+ struct rocker_dma_ring_info event_ring;
+ DECLARE_HASHTABLE(flow_tbl, 16);
+ spinlock_t flow_tbl_lock;
+ u64 flow_tbl_next_cookie;
+ DECLARE_HASHTABLE(group_tbl, 16);
+ spinlock_t group_tbl_lock;
+ DECLARE_HASHTABLE(fdb_tbl, 16);
+ spinlock_t fdb_tbl_lock;
+ unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
+ DECLARE_HASHTABLE(internal_vlan_tbl, 8);
+ spinlock_t internal_vlan_tbl_lock;
+};
+
+static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
+static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+/* Rocker priority levels for flow table entries. Higher
+ * priority match takes precedence over lower priority match.
+ */
+
+enum {
+ ROCKER_PRIORITY_UNKNOWN = 0,
+ ROCKER_PRIORITY_IG_PORT = 1,
+ ROCKER_PRIORITY_VLAN = 1,
+ ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
+ ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
+ ROCKER_PRIORITY_UNICAST_ROUTING = 1,
+ ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
+ ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
+ ROCKER_PRIORITY_BRIDGING_VLAN = 3,
+ ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
+ ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
+ ROCKER_PRIORITY_BRIDGING_TENANT = 3,
+ ROCKER_PRIORITY_ACL_CTRL = 3,
+ ROCKER_PRIORITY_ACL_NORMAL = 2,
+ ROCKER_PRIORITY_ACL_DFLT = 1,
+};
+
+static bool rocker_vlan_id_is_internal(__be16 vlan_id)
+{
+ u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
+ u16 end = 0xffe;
+ u16 _vlan_id = ntohs(vlan_id);
+
+ return (_vlan_id >= start && _vlan_id <= end);
+}
+
+static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+ u16 vid, bool *pop_vlan)
+{
+ __be16 vlan_id;
+
+ if (pop_vlan)
+ *pop_vlan = false;
+ vlan_id = htons(vid);
+ if (!vlan_id) {
+ vlan_id = rocker_port->internal_vlan_id;
+ if (pop_vlan)
+ *pop_vlan = true;
+ }
+
+ return vlan_id;
+}
+
+static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+ __be16 vlan_id)
+{
+ if (rocker_vlan_id_is_internal(vlan_id))
+ return 0;
+
+ return ntohs(vlan_id);
+}
+
+static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+{
+ return !!rocker_port->bridge_dev;
+}
+
+struct rocker_wait {
+ wait_queue_head_t wait;
+ bool done;
+ bool nowait;
+};
+
+static void rocker_wait_reset(struct rocker_wait *wait)
+{
+ wait->done = false;
+ wait->nowait = false;
+}
+
+static void rocker_wait_init(struct rocker_wait *wait)
+{
+ init_waitqueue_head(&wait->wait);
+ rocker_wait_reset(wait);
+}
+
+static struct rocker_wait *rocker_wait_create(gfp_t gfp)
+{
+ struct rocker_wait *wait;
+
+ wait = kmalloc(sizeof(*wait), gfp);
+ if (!wait)
+ return NULL;
+ rocker_wait_init(wait);
+ return wait;
+}
+
+static void rocker_wait_destroy(struct rocker_wait *work)
+{
+ kfree(work);
+}
+
+static bool rocker_wait_event_timeout(struct rocker_wait *wait,
+ unsigned long timeout)
+{
+ wait_event_timeout(wait->wait, wait->done, HZ / 10);
+ if (!wait->done)
+ return false;
+ return true;
+}
+
+static void rocker_wait_wake_up(struct rocker_wait *wait)
+{
+ wait->done = true;
+ wake_up(&wait->wait);
+}
+
+static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+{
+ return rocker->msix_entries[vector].vector;
+}
+
+static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+{
+ return rocker_msix_vector(rocker_port->rocker,
+ ROCKER_MSIX_VEC_TX(rocker_port->port_number));
+}
+
+static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+{
+ return rocker_msix_vector(rocker_port->rocker,
+ ROCKER_MSIX_VEC_RX(rocker_port->port_number));
+}
+
+#define rocker_write32(rocker, reg, val) \
+ writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_read32(rocker, reg) \
+ readl((rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_write64(rocker, reg, val) \
+ writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_read64(rocker, reg) \
+ readq((rocker)->hw_addr + (ROCKER_ ## reg))
+
+/*****************************
+ * HW basic testing functions
+ *****************************/
+
+static int rocker_reg_test(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ u64 test_reg;
+ u64 rnd;
+
+ rnd = prandom_u32();
+ rnd >>= 1;
+ rocker_write32(rocker, TEST_REG, rnd);
+ test_reg = rocker_read32(rocker, TEST_REG);
+ if (test_reg != rnd * 2) {
+ dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
+ test_reg, rnd * 2);
+ return -EIO;
+ }
+
+ rnd = prandom_u32();
+ rnd <<= 31;
+ rnd |= prandom_u32();
+ rocker_write64(rocker, TEST_REG64, rnd);
+ test_reg = rocker_read64(rocker, TEST_REG64);
+ if (test_reg != rnd * 2) {
+ dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
+ test_reg, rnd * 2);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
+ u32 test_type, dma_addr_t dma_handle,
+ unsigned char *buf, unsigned char *expect,
+ size_t size)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int i;
+
+ rocker_wait_reset(wait);
+ rocker_write32(rocker, TEST_DMA_CTRL, test_type);
+
+ if (!rocker_wait_event_timeout(wait, HZ / 10)) {
+ dev_err(&pdev->dev, "no interrupt received within a timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (buf[i] != expect[i]) {
+ dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
+ buf[i], i, expect[i]);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
+#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
+
+static int rocker_dma_test_offset(struct rocker *rocker,
+ struct rocker_wait *wait, int offset)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ unsigned char *alloc;
+ unsigned char *buf;
+ unsigned char *expect;
+ dma_addr_t dma_handle;
+ int i;
+ int err;
+
+ alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
+ GFP_KERNEL | GFP_DMA);
+ if (!alloc)
+ return -ENOMEM;
+ buf = alloc + offset;
+ expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
+
+ dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(pdev, dma_handle)) {
+ err = -EIO;
+ goto free_alloc;
+ }
+
+ rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
+ rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
+
+ memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+ memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+ prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
+ for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
+ expect[i] = ~buf[i];
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+unmap:
+ pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+free_alloc:
+ kfree(alloc);
+
+ return err;
+}
+
+static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < 8; i++) {
+ err = rocker_dma_test_offset(rocker, wait, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_wait *wait = dev_id;
+
+ rocker_wait_wake_up(wait);
+
+ return IRQ_HANDLED;
+}
+
+static int rocker_basic_hw_test(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ struct rocker_wait wait;
+ int err;
+
+ err = rocker_reg_test(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "reg test failed\n");
+ return err;
+ }
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
+ rocker_test_irq_handler, 0,
+ rocker_driver_name, &wait);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign test irq\n");
+ return err;
+ }
+
+ rocker_wait_init(&wait);
+ rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
+
+ if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
+ dev_err(&pdev->dev, "no interrupt received within a timeout\n");
+ err = -EIO;
+ goto free_irq;
+ }
+
+ err = rocker_dma_test(rocker, &wait);
+ if (err)
+ dev_err(&pdev->dev, "dma test failed\n");
+
+free_irq:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
+ return err;
+}
+
+/******
+ * TLV
+ ******/
+
+#define ROCKER_TLV_ALIGNTO 8U
+#define ROCKER_TLV_ALIGN(len) \
+ (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
+#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
+
+/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
+ * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
+ * | Header | Pad | Payload | Pad |
+ * | (struct rocker_tlv) | ing | | ing |
+ * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
+ * <--------------------------- tlv->len -------------------------->
+ */
+
+static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
+ int *remaining)
+{
+ int totlen = ROCKER_TLV_ALIGN(tlv->len);
+
+ *remaining -= totlen;
+ return (struct rocker_tlv *) ((char *) tlv + totlen);
+}
+
+static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
+{
+ return remaining >= (int) ROCKER_TLV_HDRLEN &&
+ tlv->len >= ROCKER_TLV_HDRLEN &&
+ tlv->len <= remaining;
+}
+
+#define rocker_tlv_for_each(pos, head, len, rem) \
+ for (pos = head, rem = len; \
+ rocker_tlv_ok(pos, rem); \
+ pos = rocker_tlv_next(pos, &(rem)))
+
+#define rocker_tlv_for_each_nested(pos, tlv, rem) \
+ rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
+ rocker_tlv_len(tlv), rem)
+
+static int rocker_tlv_attr_size(int payload)
+{
+ return ROCKER_TLV_HDRLEN + payload;
+}
+
+static int rocker_tlv_total_size(int payload)
+{
+ return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
+}
+
+static int rocker_tlv_padlen(int payload)
+{
+ return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
+}
+
+static int rocker_tlv_type(const struct rocker_tlv *tlv)
+{
+ return tlv->type;
+}
+
+static void *rocker_tlv_data(const struct rocker_tlv *tlv)
+{
+ return (char *) tlv + ROCKER_TLV_HDRLEN;
+}
+
+static int rocker_tlv_len(const struct rocker_tlv *tlv)
+{
+ return tlv->len - ROCKER_TLV_HDRLEN;
+}
+
+static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
+{
+ return *(u8 *) rocker_tlv_data(tlv);
+}
+
+static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
+{
+ return *(u16 *) rocker_tlv_data(tlv);
+}
+
+static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
+{
+ return *(__be16 *) rocker_tlv_data(tlv);
+}
+
+static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
+{
+ return *(u32 *) rocker_tlv_data(tlv);
+}
+
+static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
+{
+ return *(u64 *) rocker_tlv_data(tlv);
+}
+
+static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+ const char *buf, int buf_len)
+{
+ const struct rocker_tlv *tlv;
+ const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
+ int rem;
+
+ memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
+
+ rocker_tlv_for_each(tlv, head, buf_len, rem) {
+ u32 type = rocker_tlv_type(tlv);
+
+ if (type > 0 && type <= maxtype)
+ tb[type] = (struct rocker_tlv *) tlv;
+ }
+}
+
+static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+ const struct rocker_tlv *tlv)
+{
+ rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
+ rocker_tlv_len(tlv));
+}
+
+static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
+ struct rocker_desc_info *desc_info)
+{
+ rocker_tlv_parse(tb, maxtype, desc_info->data,
+ desc_info->desc->tlv_size);
+}
+
+static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
+{
+ return (struct rocker_tlv *) ((char *) desc_info->data +
+ desc_info->tlv_size);
+}
+
+static int rocker_tlv_put(struct rocker_desc_info *desc_info,
+ int attrtype, int attrlen, const void *data)
+{
+ int tail_room = desc_info->data_size - desc_info->tlv_size;
+ int total_size = rocker_tlv_total_size(attrlen);
+ struct rocker_tlv *tlv;
+
+ if (unlikely(tail_room < total_size))
+ return -EMSGSIZE;
+
+ tlv = rocker_tlv_start(desc_info);
+ desc_info->tlv_size += total_size;
+ tlv->type = attrtype;
+ tlv->len = rocker_tlv_attr_size(attrlen);
+ memcpy(rocker_tlv_data(tlv), data, attrlen);
+ memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
+ return 0;
+}
+
+static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
+ int attrtype, u8 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+}
+
+static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
+ int attrtype, u16 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+}
+
+static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
+ int attrtype, __be16 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
+}
+
+static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
+ int attrtype, u32 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+}
+
+static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
+ int attrtype, __be32 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
+}
+
+static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
+ int attrtype, u64 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+}
+
+static struct rocker_tlv *
+rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
+{
+ struct rocker_tlv *start = rocker_tlv_start(desc_info);
+
+ if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
+ return NULL;
+
+ return start;
+}
+
+static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
+ struct rocker_tlv *start)
+{
+ start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
+}
+
+static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
+ struct rocker_tlv *start)
+{
+ desc_info->tlv_size = (char *) start - desc_info->data;
+}
+
+/******************************************
+ * DMA rings and descriptors manipulations
+ ******************************************/
+
+static u32 __pos_inc(u32 pos, size_t limit)
+{
+ return ++pos == limit ? 0 : pos;
+}
+
+static int rocker_desc_err(struct rocker_desc_info *desc_info)
+{
+ return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN);
+}
+
+static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+{
+ desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
+}
+
+static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+{
+ u32 comp_err = desc_info->desc->comp_err;
+
+ return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
+}
+
+static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+{
+ return (void *) desc_info->desc->cookie;
+}
+
+static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+ void *ptr)
+{
+ desc_info->desc->cookie = (long) ptr;
+}
+
+static struct rocker_desc_info *
+rocker_desc_head_get(struct rocker_dma_ring_info *info)
+{
+ static struct rocker_desc_info *desc_info;
+ u32 head = __pos_inc(info->head, info->size);
+
+ desc_info = &info->desc_info[info->head];
+ if (head == info->tail)
+ return NULL; /* ring full */
+ desc_info->tlv_size = 0;
+ return desc_info;
+}
+
+static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+{
+ desc_info->desc->buf_size = desc_info->data_size;
+ desc_info->desc->tlv_size = desc_info->tlv_size;
+}
+
+static void rocker_desc_head_set(struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ struct rocker_desc_info *desc_info)
+{
+ u32 head = __pos_inc(info->head, info->size);
+
+ BUG_ON(head == info->tail);
+ rocker_desc_commit(desc_info);
+ info->head = head;
+ rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
+}
+
+static struct rocker_desc_info *
+rocker_desc_tail_get(struct rocker_dma_ring_info *info)
+{
+ static struct rocker_desc_info *desc_info;
+
+ if (info->tail == info->head)
+ return NULL; /* nothing to be done between head and tail */
+ desc_info = &info->desc_info[info->tail];
+ if (!rocker_desc_gen(desc_info))
+ return NULL; /* gen bit not set, desc is not ready yet */
+ info->tail = __pos_inc(info->tail, info->size);
+ desc_info->tlv_size = desc_info->desc->tlv_size;
+ return desc_info;
+}
+
+static void rocker_dma_ring_credits_set(struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ u32 credits)
+{
+ if (credits)
+ rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
+}
+
+static unsigned long rocker_dma_ring_size_fix(size_t size)
+{
+ return max(ROCKER_DMA_SIZE_MIN,
+ min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
+}
+
+static int rocker_dma_ring_create(struct rocker *rocker,
+ unsigned int type,
+ size_t size,
+ struct rocker_dma_ring_info *info)
+{
+ int i;
+
+ BUG_ON(size != rocker_dma_ring_size_fix(size));
+ info->size = size;
+ info->type = type;
+ info->head = 0;
+ info->tail = 0;
+ info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
+ GFP_KERNEL);
+ if (!info->desc_info)
+ return -ENOMEM;
+
+ info->desc = pci_alloc_consistent(rocker->pdev,
+ info->size * sizeof(*info->desc),
+ &info->mapaddr);
+ if (!info->desc) {
+ kfree(info->desc_info);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < info->size; i++)
+ info->desc_info[i].desc = &info->desc[i];
+
+ rocker_write32(rocker, DMA_DESC_CTRL(info->type),
+ ROCKER_DMA_DESC_CTRL_RESET);
+ rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
+ rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
+
+ return 0;
+}
+
+static void rocker_dma_ring_destroy(struct rocker *rocker,
+ struct rocker_dma_ring_info *info)
+{
+ rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
+
+ pci_free_consistent(rocker->pdev,
+ info->size * sizeof(struct rocker_desc),
+ info->desc, info->mapaddr);
+ kfree(info->desc_info);
+}
+
+static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+ struct rocker_dma_ring_info *info)
+{
+ int i;
+
+ BUG_ON(info->head || info->tail);
+
+ /* When ring is consumer, we need to advance head for each desc.
+ * That tells hw that the desc is ready to be used by it.
+ */
+ for (i = 0; i < info->size - 1; i++)
+ rocker_desc_head_set(rocker, info, &info->desc_info[i]);
+ rocker_desc_commit(&info->desc_info[i]);
+}
+
+static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ int direction, size_t buf_size)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int i;
+ int err;
+
+ for (i = 0; i < info->size; i++) {
+ struct rocker_desc_info *desc_info = &info->desc_info[i];
+ struct rocker_desc *desc = &info->desc[i];
+ dma_addr_t dma_handle;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ err = -ENOMEM;
+ goto rollback;
+ }
+
+ dma_handle = pci_map_single(pdev, buf, buf_size, direction);
+ if (pci_dma_mapping_error(pdev, dma_handle)) {
+ kfree(buf);
+ err = -EIO;
+ goto rollback;
+ }
+
+ desc_info->data = buf;
+ desc_info->data_size = buf_size;
+ dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
+
+ desc->buf_addr = dma_handle;
+ desc->buf_size = buf_size;
+ }
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--) {
+ struct rocker_desc_info *desc_info = &info->desc_info[i];
+
+ pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ desc_info->data_size, direction);
+ kfree(desc_info->data);
+ }
+ return err;
+}
+
+static void rocker_dma_ring_bufs_free(struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ int direction)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int i;
+
+ for (i = 0; i < info->size; i++) {
+ struct rocker_desc_info *desc_info = &info->desc_info[i];
+ struct rocker_desc *desc = &info->desc[i];
+
+ desc->buf_addr = 0;
+ desc->buf_size = 0;
+ pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ desc_info->data_size, direction);
+ kfree(desc_info->data);
+ }
+}
+
+static int rocker_dma_rings_init(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int err;
+
+ err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
+ ROCKER_DMA_CMD_DEFAULT_SIZE,
+ &rocker->cmd_ring);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create command dma ring\n");
+ return err;
+ }
+
+ spin_lock_init(&rocker->cmd_ring_lock);
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
+ goto err_dma_cmd_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
+ ROCKER_DMA_EVENT_DEFAULT_SIZE,
+ &rocker->event_ring);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create event dma ring\n");
+ goto err_dma_event_ring_create;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
+ PCI_DMA_FROMDEVICE, PAGE_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
+ goto err_dma_event_ring_bufs_alloc;
+ }
+ rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
+ return 0;
+
+err_dma_event_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker->event_ring);
+err_dma_event_ring_create:
+ rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL);
+err_dma_cmd_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
+ return err;
+}
+
+static void rocker_dma_rings_fini(struct rocker *rocker)
+{
+ rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker->event_ring);
+ rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
+}
+
+static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ struct sk_buff *skb, size_t buf_len)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+
+ dma_handle = pci_map_single(pdev, skb->data, buf_len,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, dma_handle))
+ return -EIO;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
+ goto tlv_put_failure;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
+ goto tlv_put_failure;
+ return 0;
+
+tlv_put_failure:
+ pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
+ desc_info->tlv_size = 0;
+ return -EMSGSIZE;
+}
+
+static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+{
+ return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+}
+
+static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info)
+{
+ struct net_device *dev = rocker_port->dev;
+ struct sk_buff *skb;
+ size_t buf_len = rocker_port_rx_buf_len(rocker_port);
+ int err;
+
+ /* Ensure that hw will see tlv_size zero in case of an error.
+ * That tells hw to use another descriptor.
+ */
+ rocker_desc_cookie_ptr_set(desc_info, NULL);
+ desc_info->tlv_size = 0;
+
+ skb = netdev_alloc_skb_ip_align(dev, buf_len);
+ if (!skb)
+ return -ENOMEM;
+ err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
+ skb, buf_len);
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return err;
+ }
+ rocker_desc_cookie_ptr_set(desc_info, skb);
+ return 0;
+}
+
+static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
+ struct rocker_tlv **attrs)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+ size_t len;
+
+ if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
+ !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
+ return;
+ dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
+ len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
+ pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
+}
+
+static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
+ struct rocker_desc_info *desc_info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
+
+ if (!skb)
+ return;
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
+ rocker_dma_rx_ring_skb_unmap(rocker, attrs);
+ dev_kfree_skb_any(skb);
+}
+
+static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
+ struct rocker_port *rocker_port)
+{
+ struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ int i;
+ int err;
+
+ for (i = 0; i < rx_ring->size; i++) {
+ err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+ &rx_ring->desc_info[i]);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--)
+ rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
+ return err;
+}
+
+static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
+ struct rocker_port *rocker_port)
+{
+ struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ int i;
+
+ for (i = 0; i < rx_ring->size; i++)
+ rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
+}
+
+static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ int err;
+
+ err = rocker_dma_ring_create(rocker,
+ ROCKER_DMA_TX(rocker_port->port_number),
+ ROCKER_DMA_TX_DEFAULT_SIZE,
+ &rocker_port->tx_ring);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
+ return err;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE,
+ ROCKER_DMA_TX_DESC_SIZE);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
+ goto err_dma_tx_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_ring_create(rocker,
+ ROCKER_DMA_RX(rocker_port->port_number),
+ ROCKER_DMA_RX_DEFAULT_SIZE,
+ &rocker_port->rx_ring);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
+ goto err_dma_rx_ring_create;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL,
+ ROCKER_DMA_RX_DESC_SIZE);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
+ goto err_dma_rx_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
+ goto err_dma_rx_ring_skbs_alloc;
+ }
+ rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
+
+ return 0;
+
+err_dma_rx_ring_skbs_alloc:
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL);
+err_dma_rx_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
+err_dma_rx_ring_create:
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE);
+err_dma_tx_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
+ return err;
+}
+
+static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+
+ rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE);
+ rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
+}
+
+static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+{
+ u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
+
+ if (enable)
+ val |= 1 << rocker_port->lport;
+ else
+ val &= ~(1 << rocker_port->lport);
+ rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
+}
+
+/********************************
+ * Interrupt handler and helpers
+ ********************************/
+
+static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
+{
+ struct rocker *rocker = dev_id;
+ struct rocker_desc_info *desc_info;
+ struct rocker_wait *wait;
+ u32 credits = 0;
+
+ spin_lock(&rocker->cmd_ring_lock);
+ while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
+ wait = rocker_desc_cookie_ptr_get(desc_info);
+ if (wait->nowait) {
+ rocker_desc_gen_clear(desc_info);
+ rocker_wait_destroy(wait);
+ } else {
+ rocker_wait_wake_up(wait);
+ }
+ credits++;
+ }
+ spin_unlock(&rocker->cmd_ring_lock);
+ rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
+
+ return IRQ_HANDLED;
+}
+
+static void rocker_port_link_up(struct rocker_port *rocker_port)
+{
+ netif_carrier_on(rocker_port->dev);
+ netdev_info(rocker_port->dev, "Link is up\n");
+}
+
+static void rocker_port_link_down(struct rocker_port *rocker_port)
+{
+ netif_carrier_off(rocker_port->dev);
+ netdev_info(rocker_port->dev, "Link is down\n");
+}
+
+static int rocker_event_link_change(struct rocker *rocker,
+ const struct rocker_tlv *info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+ unsigned int port_number;
+ bool link_up;
+ struct rocker_port *rocker_port;
+
+ rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
+ if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] ||
+ !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
+ return -EIO;
+ port_number =
+ rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1;
+ link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
+
+ if (port_number >= rocker->port_count)
+ return -EINVAL;
+
+ rocker_port = rocker->ports[port_number];
+ if (netif_carrier_ok(rocker_port->dev) != link_up) {
+ if (link_up)
+ rocker_port_link_up(rocker_port);
+ else
+ rocker_port_link_down(rocker_port);
+ }
+
+ return 0;
+}
+
+#define ROCKER_OP_FLAG_REMOVE BIT(0)
+#define ROCKER_OP_FLAG_NOWAIT BIT(1)
+#define ROCKER_OP_FLAG_LEARNED BIT(2)
+#define ROCKER_OP_FLAG_REFRESH BIT(3)
+
+static int rocker_port_fdb(struct rocker_port *rocker_port,
+ const unsigned char *addr,
+ __be16 vlan_id, int flags);
+
+static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+ const struct rocker_tlv *info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+ unsigned int port_number;
+ struct rocker_port *rocker_port;
+ unsigned char *addr;
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
+ __be16 vlan_id;
+
+ rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
+ if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] ||
+ !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
+ !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
+ return -EIO;
+ port_number =
+ rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1;
+ addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
+ vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
+
+ if (port_number >= rocker->port_count)
+ return -EINVAL;
+
+ rocker_port = rocker->ports[port_number];
+
+ if (rocker_port->stp_state != BR_STATE_LEARNING &&
+ rocker_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
+ return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_event_process(struct rocker *rocker,
+ struct rocker_desc_info *desc_info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+ struct rocker_tlv *info;
+ u16 type;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
+ !attrs[ROCKER_TLV_EVENT_INFO])
+ return -EIO;
+
+ type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
+ info = attrs[ROCKER_TLV_EVENT_INFO];
+
+ switch (type) {
+ case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
+ return rocker_event_link_change(rocker, info);
+ case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
+ return rocker_event_mac_vlan_seen(rocker, info);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
+{
+ struct rocker *rocker = dev_id;
+ struct pci_dev *pdev = rocker->pdev;
+ struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
+ err = rocker_desc_err(desc_info);
+ if (err) {
+ dev_err(&pdev->dev, "event desc received with err %d\n",
+ err);
+ } else {
+ err = rocker_event_process(rocker, desc_info);
+ if (err)
+ dev_err(&pdev->dev, "event processing failed with err %d\n",
+ err);
+ }
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
+ credits++;
+ }
+ rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_port *rocker_port = dev_id;
+
+ napi_schedule(&rocker_port->napi_tx);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_port *rocker_port = dev_id;
+
+ napi_schedule(&rocker_port->napi_rx);
+ return IRQ_HANDLED;
+}
+
+/********************
+ * Command interface
+ ********************/
+
+typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv);
+
+static int rocker_cmd_exec(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ rocker_cmd_cb_t prepare, void *prepare_priv,
+ rocker_cmd_cb_t process, void *process_priv,
+ bool nowait)
+{
+ struct rocker_desc_info *desc_info;
+ struct rocker_wait *wait;
+ unsigned long flags;
+ int err;
+
+ wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
+ if (!wait)
+ return -ENOMEM;
+ wait->nowait = nowait;
+
+ spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+ desc_info = rocker_desc_head_get(&rocker->cmd_ring);
+ if (!desc_info) {
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ err = -EAGAIN;
+ goto out;
+ }
+ err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+ if (err) {
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ goto out;
+ }
+ rocker_desc_cookie_ptr_set(desc_info, wait);
+ rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+
+ if (nowait)
+ return 0;
+
+ if (!rocker_wait_event_timeout(wait, HZ / 10))
+ return -EIO;
+
+ err = rocker_desc_err(desc_info);
+ if (err)
+ return err;
+
+ if (process)
+ err = process(rocker, rocker_port, desc_info, process_priv);
+
+ rocker_desc_gen_clear(desc_info);
+out:
+ rocker_wait_destroy(wait);
+ return err;
+}
+
+static int
+rocker_cmd_get_port_settings_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct ethtool_cmd *ecmd = priv;
+ struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ u32 speed;
+ u8 duplex;
+ u8 autoneg;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
+ !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
+ !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
+ return -EIO;
+
+ speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
+ duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
+ autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->supported = SUPPORTED_TP;
+ ecmd->phy_address = 0xff;
+ ecmd->port = PORT_TP;
+ ethtool_cmd_speed_set(ecmd, speed);
+ ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ unsigned char *macaddr = priv;
+ struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ struct rocker_tlv *attr;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
+ if (!attr)
+ return -EIO;
+
+ if (rocker_tlv_len(attr) != ETH_ALEN)
+ return -EINVAL;
+
+ ether_addr_copy(macaddr, rocker_tlv_data(attr));
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct ethtool_cmd *ecmd = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
+ ethtool_cmd_speed(ecmd)))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
+ ecmd->duplex))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
+ ecmd->autoneg))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ unsigned char *macaddr = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
+ ETH_ALEN, macaddr))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_learning_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
+ !!(rocker_port->brport_flags & BR_LEARNING)))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_cmd *ecmd)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_ethtool_proc,
+ ecmd, false);
+}
+
+static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
+ unsigned char *macaddr)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_macaddr_proc,
+ macaddr, false);
+}
+
+static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_cmd *ecmd)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_set_port_settings_ethtool_prep,
+ ecmd, NULL, NULL, false);
+}
+
+static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
+ unsigned char *macaddr)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_set_port_settings_macaddr_prep,
+ macaddr, NULL, NULL, false);
+}
+
+static int rocker_port_set_learning(struct rocker_port *rocker_port)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_set_port_learning_prep,
+ NULL, NULL, NULL, false);
+}
+
+static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+ entry->key.ig_port.in_lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+ entry->key.ig_port.in_lport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.ig_port.goto_tbl))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+ entry->key.vlan.in_lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.vlan.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.vlan.vlan_id_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.vlan.goto_tbl))
+ return -EMSGSIZE;
+ if (entry->key.vlan.untagged &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
+ entry->key.vlan.new_vlan_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+ entry->key.term_mac.in_lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+ entry->key.term_mac.in_lport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.term_mac.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.term_mac.eth_dst))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.term_mac.eth_dst_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.term_mac.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.term_mac.vlan_id_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.term_mac.goto_tbl))
+ return -EMSGSIZE;
+ if (entry->key.term_mac.copy_to_cpu &&
+ rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
+ entry->key.term_mac.copy_to_cpu))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.ucast_routing.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
+ entry->key.ucast_routing.dst4))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
+ entry->key.ucast_routing.dst4_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.ucast_routing.goto_tbl))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.ucast_routing.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (entry->key.bridge.has_eth_dst &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.bridge.eth_dst))
+ return -EMSGSIZE;
+ if (entry->key.bridge.has_eth_dst_mask &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.bridge.eth_dst_mask))
+ return -EMSGSIZE;
+ if (entry->key.bridge.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.bridge.vlan_id))
+ return -EMSGSIZE;
+ if (entry->key.bridge.tunnel_id &&
+ rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
+ entry->key.bridge.tunnel_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.bridge.goto_tbl))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.bridge.group_id))
+ return -EMSGSIZE;
+ if (entry->key.bridge.copy_to_cpu &&
+ rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
+ entry->key.bridge.copy_to_cpu))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+ entry->key.acl.in_lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+ entry->key.acl.in_lport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->key.acl.eth_src))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
+ ETH_ALEN, entry->key.acl.eth_src_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.acl.eth_dst))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.acl.eth_dst_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.acl.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.acl.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.acl.vlan_id_mask))
+ return -EMSGSIZE;
+
+ switch (ntohs(entry->key.acl.eth_type)) {
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
+ entry->key.acl.ip_proto))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
+ entry->key.acl.ip_proto_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
+ entry->key.acl.ip_tos & 0x3f))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
+ entry->key.acl.ip_tos_mask & 0x3f))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
+ (entry->key.acl.ip_tos & 0xc0) >> 6))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_ECN_MASK,
+ (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
+ return -EMSGSIZE;
+ break;
+ }
+
+ if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
+ rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.acl.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_flow_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+ int err = 0;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
+ entry->key.tbl_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
+ entry->key.priority))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
+ entry->cookie))
+ return -EMSGSIZE;
+
+ switch (entry->key.tbl_id) {
+ case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
+ err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_VLAN:
+ err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
+ err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
+ err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
+ err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
+ err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
+ break;
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct rocker_flow_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
+ entry->cookie))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
+ struct rocker_group_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT,
+ ROCKER_GROUP_PORT_GET(entry->group_id)))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
+ entry->l2_interface.pop_vlan))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
+ struct rocker_group_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
+ entry->l2_rewrite.group_id))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->l2_rewrite.eth_src))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->l2_rewrite.eth_dst))
+ return -EMSGSIZE;
+ if (entry->l2_rewrite.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->l2_rewrite.vlan_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
+ struct rocker_group_tbl_entry *entry)
+{
+ int i;
+ struct rocker_tlv *group_ids;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
+ entry->group_count))
+ return -EMSGSIZE;
+
+ group_ids = rocker_tlv_nest_start(desc_info,
+ ROCKER_TLV_OF_DPA_GROUP_IDS);
+ if (!group_ids)
+ return -EMSGSIZE;
+
+ for (i = 0; i < entry->group_count; i++)
+ /* Note TLV array is 1-based */
+ if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
+ return -EMSGSIZE;
+
+ rocker_tlv_nest_end(desc_info, group_ids);
+
+ return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
+ struct rocker_group_tbl_entry *entry)
+{
+ if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->l3_unicast.eth_src))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->l3_unicast.eth_dst))
+ return -EMSGSIZE;
+ if (entry->l3_unicast.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->l3_unicast.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
+ entry->l3_unicast.ttl_check))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
+ entry->l3_unicast.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_group_tbl_add(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_group_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+ int err = 0;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->group_id))
+ return -EMSGSIZE;
+
+ switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
+ err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
+ err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+ err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
+ err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
+ break;
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int rocker_cmd_group_tbl_del(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct rocker_group_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->group_id))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+/*****************************************
+ * Flow, group, FDB, internal VLAN tables
+ *****************************************/
+
+static int rocker_init_tbls(struct rocker *rocker)
+{
+ hash_init(rocker->flow_tbl);
+ spin_lock_init(&rocker->flow_tbl_lock);
+
+ hash_init(rocker->group_tbl);
+ spin_lock_init(&rocker->group_tbl_lock);
+
+ hash_init(rocker->fdb_tbl);
+ spin_lock_init(&rocker->fdb_tbl_lock);
+
+ hash_init(rocker->internal_vlan_tbl);
+ spin_lock_init(&rocker->internal_vlan_tbl_lock);
+
+ return 0;
+}
+
+static void rocker_free_tbls(struct rocker *rocker)
+{
+ unsigned long flags;
+ struct rocker_flow_tbl_entry *flow_entry;
+ struct rocker_group_tbl_entry *group_entry;
+ struct rocker_fdb_tbl_entry *fdb_entry;
+ struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
+ struct hlist_node *tmp;
+ int bkt;
+
+ spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+ hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
+ hash_del(&flow_entry->entry);
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+ spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+ hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
+ hash_del(&group_entry->entry);
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
+ hash_del(&fdb_entry->entry);
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
+
+ spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
+ hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
+ tmp, internal_vlan_entry, entry)
+ hash_del(&internal_vlan_entry->entry);
+ spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
+}
+
+static struct rocker_flow_tbl_entry *
+rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+{
+ struct rocker_flow_tbl_entry *found;
+
+ hash_for_each_possible(rocker->flow_tbl, found,
+ entry, match->key_crc32) {
+ if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+ return found;
+ }
+
+ return NULL;
+}
+
+static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
+ struct rocker_flow_tbl_entry *match,
+ bool nowait)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_flow_tbl_entry *found;
+ unsigned long flags;
+ bool add_to_hw = false;
+ int err = 0;
+
+ match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+
+ spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+
+ found = rocker_flow_tbl_find(rocker, match);
+
+ if (found) {
+ kfree(match);
+ } else {
+ found = match;
+ found->cookie = rocker->flow_tbl_next_cookie++;
+ hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
+ add_to_hw = true;
+ }
+
+ found->ref_count++;
+
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+ if (add_to_hw) {
+ err = rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_flow_tbl_add,
+ found, NULL, NULL, nowait);
+ if (err) {
+ spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+ hash_del(&found->entry);
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+ kfree(found);
+ }
+ }
+
+ return err;
+}
+
+static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
+ struct rocker_flow_tbl_entry *match,
+ bool nowait)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_flow_tbl_entry *found;
+ unsigned long flags;
+ bool del_from_hw = false;
+ int err = 0;
+
+ match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+
+ spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+
+ found = rocker_flow_tbl_find(rocker, match);
+
+ if (found) {
+ found->ref_count--;
+ if (found->ref_count == 0) {
+ hash_del(&found->entry);
+ del_from_hw = true;
+ }
+ }
+
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+ kfree(match);
+
+ if (del_from_hw) {
+ err = rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_flow_tbl_del,
+ found, NULL, NULL, nowait);
+ kfree(found);
+ }
+
+ return err;
+}
+
+static gfp_t rocker_op_flags_gfp(int flags)
+{
+ return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
+ int flags, struct rocker_flow_tbl_entry *entry)
+{
+ bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
+
+ if (flags & ROCKER_OP_FLAG_REMOVE)
+ return rocker_flow_tbl_del(rocker_port, entry, nowait);
+ else
+ return rocker_flow_tbl_add(rocker_port, entry, nowait);
+}
+
+static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
+ int flags, u32 in_lport, u32 in_lport_mask,
+ enum rocker_of_dpa_table_id goto_tbl)
+{
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.priority = ROCKER_PRIORITY_IG_PORT;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
+ entry->key.ig_port.in_lport = in_lport;
+ entry->key.ig_port.in_lport_mask = in_lport_mask;
+ entry->key.ig_port.goto_tbl = goto_tbl;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
+ int flags, u32 in_lport,
+ __be16 vlan_id, __be16 vlan_id_mask,
+ enum rocker_of_dpa_table_id goto_tbl,
+ bool untagged, __be16 new_vlan_id)
+{
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.priority = ROCKER_PRIORITY_VLAN;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
+ entry->key.vlan.in_lport = in_lport;
+ entry->key.vlan.vlan_id = vlan_id;
+ entry->key.vlan.vlan_id_mask = vlan_id_mask;
+ entry->key.vlan.goto_tbl = goto_tbl;
+
+ entry->key.vlan.untagged = untagged;
+ entry->key.vlan.new_vlan_id = new_vlan_id;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
+ u32 in_lport, u32 in_lport_mask,
+ __be16 eth_type, const u8 *eth_dst,
+ const u8 *eth_dst_mask, __be16 vlan_id,
+ __be16 vlan_id_mask, bool copy_to_cpu,
+ int flags)
+{
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ if (is_multicast_ether_addr(eth_dst)) {
+ entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
+ entry->key.term_mac.goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
+ } else {
+ entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
+ entry->key.term_mac.goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+ }
+
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+ entry->key.term_mac.in_lport = in_lport;
+ entry->key.term_mac.in_lport_mask = in_lport_mask;
+ entry->key.term_mac.eth_type = eth_type;
+ ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
+ ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
+ entry->key.term_mac.vlan_id = vlan_id;
+ entry->key.term_mac.vlan_id_mask = vlan_id_mask;
+ entry->key.term_mac.copy_to_cpu = copy_to_cpu;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
+ int flags,
+ const u8 *eth_dst, const u8 *eth_dst_mask,
+ __be16 vlan_id, u32 tunnel_id,
+ enum rocker_of_dpa_table_id goto_tbl,
+ u32 group_id, bool copy_to_cpu)
+{
+ struct rocker_flow_tbl_entry *entry;
+ u32 priority;
+ bool vlan_bridging = !!vlan_id;
+ bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
+ bool wild = false;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
+
+ if (eth_dst) {
+ entry->key.bridge.has_eth_dst = 1;
+ ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
+ }
+ if (eth_dst_mask) {
+ entry->key.bridge.has_eth_dst_mask = 1;
+ ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
+ if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
+ wild = true;
+ }
+
+ priority = ROCKER_PRIORITY_UNKNOWN;
+ if (vlan_bridging && dflt && wild)
+ priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
+ else if (vlan_bridging && dflt && !wild)
+ priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
+ else if (vlan_bridging && !dflt)
+ priority = ROCKER_PRIORITY_BRIDGING_VLAN;
+ else if (!vlan_bridging && dflt && wild)
+ priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
+ else if (!vlan_bridging && dflt && !wild)
+ priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
+ else if (!vlan_bridging && !dflt)
+ priority = ROCKER_PRIORITY_BRIDGING_TENANT;
+
+ entry->key.priority = priority;
+ entry->key.bridge.vlan_id = vlan_id;
+ entry->key.bridge.tunnel_id = tunnel_id;
+ entry->key.bridge.goto_tbl = goto_tbl;
+ entry->key.bridge.group_id = group_id;
+ entry->key.bridge.copy_to_cpu = copy_to_cpu;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
+ int flags, u32 in_lport,
+ u32 in_lport_mask,
+ const u8 *eth_src, const u8 *eth_src_mask,
+ const u8 *eth_dst, const u8 *eth_dst_mask,
+ __be16 eth_type,
+ __be16 vlan_id, __be16 vlan_id_mask,
+ u8 ip_proto, u8 ip_proto_mask,
+ u8 ip_tos, u8 ip_tos_mask,
+ u32 group_id)
+{
+ u32 priority;
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ priority = ROCKER_PRIORITY_ACL_NORMAL;
+ if (eth_dst && eth_dst_mask) {
+ if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
+ priority = ROCKER_PRIORITY_ACL_DFLT;
+ else if (is_link_local_ether_addr(eth_dst))
+ priority = ROCKER_PRIORITY_ACL_CTRL;
+ }
+
+ entry->key.priority = priority;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ entry->key.acl.in_lport = in_lport;
+ entry->key.acl.in_lport_mask = in_lport_mask;
+
+ if (eth_src)
+ ether_addr_copy(entry->key.acl.eth_src, eth_src);
+ if (eth_src_mask)
+ ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
+ if (eth_dst)
+ ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
+ if (eth_dst_mask)
+ ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
+
+ entry->key.acl.eth_type = eth_type;
+ entry->key.acl.vlan_id = vlan_id;
+ entry->key.acl.vlan_id_mask = vlan_id_mask;
+ entry->key.acl.ip_proto = ip_proto;
+ entry->key.acl.ip_proto_mask = ip_proto_mask;
+ entry->key.acl.ip_tos = ip_tos;
+ entry->key.acl.ip_tos_mask = ip_tos_mask;
+ entry->key.acl.group_id = group_id;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static struct rocker_group_tbl_entry *
+rocker_group_tbl_find(struct rocker *rocker,
+ struct rocker_group_tbl_entry *match)
+{
+ struct rocker_group_tbl_entry *found;
+
+ hash_for_each_possible(rocker->group_tbl, found,
+ entry, match->group_id) {
+ if (found->group_id == match->group_id)
+ return found;
+ }
+
+ return NULL;
+}
+
+static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
+{
+ switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+ kfree(entry->group_ids);
+ break;
+ default:
+ break;
+ }
+ kfree(entry);
+}
+
+static int rocker_group_tbl_add(struct rocker_port *rocker_port,
+ struct rocker_group_tbl_entry *match,
+ bool nowait)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_group_tbl_entry *found;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+
+ found = rocker_group_tbl_find(rocker, match);
+
+ if (found) {
+ hash_del(&found->entry);
+ rocker_group_tbl_entry_free(found);
+ found = match;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
+ } else {
+ found = match;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
+ }
+
+ hash_add(rocker->group_tbl, &found->entry, found->group_id);
+
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+ if (found->cmd)
+ err = rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_group_tbl_add,
+ found, NULL, NULL, nowait);
+
+ return err;
+}
+
+static int rocker_group_tbl_del(struct rocker_port *rocker_port,
+ struct rocker_group_tbl_entry *match,
+ bool nowait)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_group_tbl_entry *found;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+
+ found = rocker_group_tbl_find(rocker, match);
+
+ if (found) {
+ hash_del(&found->entry);
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
+ }
+
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+ rocker_group_tbl_entry_free(match);
+
+ if (found) {
+ err = rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_group_tbl_del,
+ found, NULL, NULL, nowait);
+ rocker_group_tbl_entry_free(found);
+ }
+
+ return err;
+}
+
+static int rocker_group_tbl_do(struct rocker_port *rocker_port,
+ int flags, struct rocker_group_tbl_entry *entry)
+{
+ bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
+
+ if (flags & ROCKER_OP_FLAG_REMOVE)
+ return rocker_group_tbl_del(rocker_port, entry, nowait);
+ else
+ return rocker_group_tbl_add(rocker_port, entry, nowait);
+}
+
+static int rocker_group_l2_interface(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id,
+ u32 out_lport, int pop_vlan)
+{
+ struct rocker_group_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+ entry->l2_interface.pop_vlan = pop_vlan;
+
+ return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
+ int flags, u8 group_count,
+ u32 *group_ids, u32 group_id)
+{
+ struct rocker_group_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->group_id = group_id;
+ entry->group_count = group_count;
+
+ entry->group_ids = kcalloc(group_count, sizeof(u32),
+ rocker_op_flags_gfp(flags));
+ if (!entry->group_ids) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+ memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
+
+ return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_group_l2_flood(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id,
+ u8 group_count, u32 *group_ids,
+ u32 group_id)
+{
+ return rocker_group_l2_fan_out(rocker_port, flags,
+ group_count, group_ids,
+ group_id);
+}
+
+static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id)
+{
+ struct rocker_port *p;
+ struct rocker *rocker = rocker_port->rocker;
+ u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
+ u32 group_ids[rocker->port_count];
+ u8 group_count = 0;
+ int err;
+ int i;
+
+ /* Adjust the flood group for this VLAN. The flood group
+ * references an L2 interface group for each port in this
+ * VLAN.
+ */
+
+ for (i = 0; i < rocker->port_count; i++) {
+ p = rocker->ports[i];
+ if (!rocker_port_is_bridged(p))
+ continue;
+ if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
+ group_ids[group_count++] =
+ ROCKER_GROUP_L2_INTERFACE(vlan_id,
+ p->lport);
+ }
+ }
+
+ /* If there are no bridged ports in this VLAN, we're done */
+ if (group_count == 0)
+ return 0;
+
+ err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
+ group_count, group_ids,
+ group_id);
+ if (err)
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 flood group\n", err);
+
+ return err;
+}
+
+static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id,
+ bool pop_vlan)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_port *p;
+ bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+ u32 out_lport;
+ int ref = 0;
+ int err;
+ int i;
+
+ /* An L2 interface group for this port in this VLAN, but
+ * only when port STP state is LEARNING|FORWARDING.
+ */
+
+ if (rocker_port->stp_state == BR_STATE_LEARNING ||
+ rocker_port->stp_state == BR_STATE_FORWARDING) {
+ out_lport = rocker_port->lport;
+ err = rocker_group_l2_interface(rocker_port, flags,
+ vlan_id, out_lport,
+ pop_vlan);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 group for lport %d\n",
+ err, out_lport);
+ return err;
+ }
+ }
+
+ /* An L2 interface group for this VLAN to CPU port.
+ * Add when first port joins this VLAN and destroy when
+ * last port leaves this VLAN.
+ */
+
+ for (i = 0; i < rocker->port_count; i++) {
+ p = rocker->ports[i];
+ if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+ ref++;
+ }
+
+ if ((!adding || ref != 1) && (adding || ref != 0))
+ return 0;
+
+ out_lport = 0;
+ err = rocker_group_l2_interface(rocker_port, flags,
+ vlan_id, out_lport,
+ pop_vlan);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 group for CPU port\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct rocker_ctrl {
+ const u8 *eth_dst;
+ const u8 *eth_dst_mask;
+ __be16 eth_type;
+ bool acl;
+ bool bridge;
+ bool term;
+ bool copy_to_cpu;
+} rocker_ctrls[] = {
+ [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
+ /* pass link local multicast pkts up to CPU for filtering */
+ .eth_dst = ll_mac,
+ .eth_dst_mask = ll_mask,
+ .acl = true,
+ },
+ [ROCKER_CTRL_LOCAL_ARP] = {
+ /* pass local ARP pkts up to CPU */
+ .eth_dst = zero_mac,
+ .eth_dst_mask = zero_mac,
+ .eth_type = htons(ETH_P_ARP),
+ .acl = true,
+ },
+ [ROCKER_CTRL_IPV4_MCAST] = {
+ /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
+ .eth_dst = ipv4_mcast,
+ .eth_dst_mask = ipv4_mask,
+ .eth_type = htons(ETH_P_IP),
+ .term = true,
+ .copy_to_cpu = true,
+ },
+ [ROCKER_CTRL_IPV6_MCAST] = {
+ /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
+ .eth_dst = ipv6_mcast,
+ .eth_dst_mask = ipv6_mask,
+ .eth_type = htons(ETH_P_IPV6),
+ .term = true,
+ .copy_to_cpu = true,
+ },
+ [ROCKER_CTRL_DFLT_BRIDGING] = {
+ /* flood any pkts on vlan */
+ .bridge = true,
+ .copy_to_cpu = true,
+ },
+};
+
+static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
+ int flags, struct rocker_ctrl *ctrl,
+ __be16 vlan_id)
+{
+ u32 in_lport = rocker_port->lport;
+ u32 in_lport_mask = 0xffffffff;
+ u32 out_lport = 0;
+ u8 *eth_src = NULL;
+ u8 *eth_src_mask = NULL;
+ __be16 vlan_id_mask = htons(0xffff);
+ u8 ip_proto = 0;
+ u8 ip_proto_mask = 0;
+ u8 ip_tos = 0;
+ u8 ip_tos_mask = 0;
+ u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+ int err;
+
+ err = rocker_flow_tbl_acl(rocker_port, flags,
+ in_lport, in_lport_mask,
+ eth_src, eth_src_mask,
+ ctrl->eth_dst, ctrl->eth_dst_mask,
+ ctrl->eth_type,
+ vlan_id, vlan_id_mask,
+ ip_proto, ip_proto_mask,
+ ip_tos, ip_tos_mask,
+ group_id);
+
+ if (err)
+ netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
+
+ return err;
+}
+
+static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
+ int flags, struct rocker_ctrl *ctrl,
+ __be16 vlan_id)
+{
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
+ u32 tunnel_id = 0;
+ int err;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return 0;
+
+ err = rocker_flow_tbl_bridge(rocker_port, flags,
+ ctrl->eth_dst, ctrl->eth_dst_mask,
+ vlan_id, tunnel_id,
+ goto_tbl, group_id, ctrl->copy_to_cpu);
+
+ if (err)
+ netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
+
+ return err;
+}
+
+static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
+ int flags, struct rocker_ctrl *ctrl,
+ __be16 vlan_id)
+{
+ u32 in_lport_mask = 0xffffffff;
+ __be16 vlan_id_mask = htons(0xffff);
+ int err;
+
+ if (ntohs(vlan_id) == 0)
+ vlan_id = rocker_port->internal_vlan_id;
+
+ err = rocker_flow_tbl_term_mac(rocker_port,
+ rocker_port->lport, in_lport_mask,
+ ctrl->eth_type, ctrl->eth_dst,
+ ctrl->eth_dst_mask, vlan_id,
+ vlan_id_mask, ctrl->copy_to_cpu,
+ flags);
+
+ if (err)
+ netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
+
+ return err;
+}
+
+static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
+ struct rocker_ctrl *ctrl, __be16 vlan_id)
+{
+ if (ctrl->acl)
+ return rocker_port_ctrl_vlan_acl(rocker_port, flags,
+ ctrl, vlan_id);
+ if (ctrl->bridge)
+ return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
+ ctrl, vlan_id);
+
+ if (ctrl->term)
+ return rocker_port_ctrl_vlan_term(rocker_port, flags,
+ ctrl, vlan_id);
+
+ return -EOPNOTSUPP;
+}
+
+static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < ROCKER_CTRL_MAX; i++) {
+ if (rocker_port->ctrls[i]) {
+ err = rocker_port_ctrl_vlan(rocker_port, flags,
+ &rocker_ctrls[i], vlan_id);
+ if (err)
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
+ struct rocker_ctrl *ctrl)
+{
+ u16 vid;
+ int err = 0;
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ if (!test_bit(vid, rocker_port->vlan_bitmap))
+ continue;
+ err = rocker_port_ctrl_vlan(rocker_port, flags,
+ ctrl, htons(vid));
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
+ u16 vid)
+{
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+ u32 in_lport = rocker_port->lport;
+ __be16 vlan_id = htons(vid);
+ __be16 vlan_id_mask = htons(0xffff);
+ __be16 internal_vlan_id;
+ bool untagged;
+ bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+ int err;
+
+ internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
+
+ if (adding && test_and_set_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
+ return 0; /* already added */
+ else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
+ return 0; /* already removed */
+
+ if (adding) {
+ err = rocker_port_ctrl_vlan_add(rocker_port, flags,
+ internal_vlan_id);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port ctrl vlan add\n", err);
+ return err;
+ }
+ }
+
+ err = rocker_port_vlan_l2_groups(rocker_port, flags,
+ internal_vlan_id, untagged);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 groups\n", err);
+ return err;
+ }
+
+ err = rocker_port_vlan_flood_group(rocker_port, flags,
+ internal_vlan_id);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 flood group\n", err);
+ return err;
+ }
+
+ err = rocker_flow_tbl_vlan(rocker_port, flags,
+ in_lport, vlan_id, vlan_id_mask,
+ goto_tbl, untagged, internal_vlan_id);
+ if (err)
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN table\n", err);
+
+ return err;
+}
+
+static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
+{
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 in_lport;
+ u32 in_lport_mask;
+ int err;
+
+ /* Normal Ethernet Frames. Matches pkts from any local physical
+ * ports. Goto VLAN tbl.
+ */
+
+ in_lport = 0;
+ in_lport_mask = 0xffff0000;
+ goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
+
+ err = rocker_flow_tbl_ig_port(rocker_port, flags,
+ in_lport, in_lport_mask,
+ goto_tbl);
+ if (err)
+ netdev_err(rocker_port->dev,
+ "Error (%d) ingress port table entry\n", err);
+
+ return err;
+}
+
+struct rocker_fdb_learn_work {
+ struct work_struct work;
+ struct net_device *dev;
+ int flags;
+ u8 addr[ETH_ALEN];
+ u16 vid;
+};
+
+static void rocker_port_fdb_learn_work(struct work_struct *work)
+{
+ struct rocker_fdb_learn_work *lw =
+ container_of(work, struct rocker_fdb_learn_work, work);
+ bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
+ bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
+
+ if (learned && removing)
+ br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid);
+ else if (learned && !removing)
+ br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid);
+
+ kfree(work);
+}
+
+static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
+ int flags, const u8 *addr, __be16 vlan_id)
+{
+ struct rocker_fdb_learn_work *lw;
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 out_lport = rocker_port->lport;
+ u32 tunnel_id = 0;
+ u32 group_id = ROCKER_GROUP_NONE;
+ bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
+ bool copy_to_cpu = false;
+ int err;
+
+ if (rocker_port_is_bridged(rocker_port))
+ group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+
+ if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
+ err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
+ vlan_id, tunnel_id, goto_tbl,
+ group_id, copy_to_cpu);
+ if (err)
+ return err;
+ }
+
+ if (!syncing)
+ return 0;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return 0;
+
+ lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
+ if (!lw)
+ return -ENOMEM;
+
+ INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
+
+ lw->dev = rocker_port->dev;
+ lw->flags = flags;
+ ether_addr_copy(lw->addr, addr);
+ lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
+
+ schedule_work(&lw->work);
+
+ return 0;
+}
+
+static struct rocker_fdb_tbl_entry *
+rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+{
+ struct rocker_fdb_tbl_entry *found;
+
+ hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
+ if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+ return found;
+
+ return NULL;
+}
+
+static int rocker_port_fdb(struct rocker_port *rocker_port,
+ const unsigned char *addr,
+ __be16 vlan_id, int flags)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_fdb_tbl_entry *fdb;
+ struct rocker_fdb_tbl_entry *found;
+ bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
+ unsigned long lock_flags;
+
+ fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
+ if (!fdb)
+ return -ENOMEM;
+
+ fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
+ fdb->key.lport = rocker_port->lport;
+ ether_addr_copy(fdb->key.addr, addr);
+ fdb->key.vlan_id = vlan_id;
+ fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+ found = rocker_fdb_tbl_find(rocker, fdb);
+
+ if (removing && found) {
+ kfree(fdb);
+ hash_del(&found->entry);
+ } else if (!removing && !found) {
+ hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+ }
+
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+ /* Check if adding and already exists, or removing and can't find */
+ if (!found != !removing) {
+ kfree(fdb);
+ if (!found && removing)
+ return 0;
+ /* Refreshing existing to update aging timers */
+ flags |= ROCKER_OP_FLAG_REFRESH;
+ }
+
+ return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
+}
+
+static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_fdb_tbl_entry *found;
+ unsigned long lock_flags;
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+ struct hlist_node *tmp;
+ int bkt;
+ int err = 0;
+
+ if (rocker_port->stp_state == BR_STATE_LEARNING ||
+ rocker_port->stp_state == BR_STATE_FORWARDING)
+ return 0;
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.lport != rocker_port->lport)
+ continue;
+ if (!found->learned)
+ continue;
+ err = rocker_port_fdb_learn(rocker_port, flags,
+ found->key.addr,
+ found->key.vlan_id);
+ if (err)
+ goto err_out;
+ hash_del(&found->entry);
+ }
+
+err_out:
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+ return err;
+}
+
+static int rocker_port_router_mac(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id)
+{
+ u32 in_lport_mask = 0xffffffff;
+ __be16 eth_type;
+ const u8 *dst_mac_mask = ff_mac;
+ __be16 vlan_id_mask = htons(0xffff);
+ bool copy_to_cpu = false;
+ int err;
+
+ if (ntohs(vlan_id) == 0)
+ vlan_id = rocker_port->internal_vlan_id;
+
+ eth_type = htons(ETH_P_IP);
+ err = rocker_flow_tbl_term_mac(rocker_port,
+ rocker_port->lport, in_lport_mask,
+ eth_type, rocker_port->dev->dev_addr,
+ dst_mac_mask, vlan_id, vlan_id_mask,
+ copy_to_cpu, flags);
+ if (err)
+ return err;
+
+ eth_type = htons(ETH_P_IPV6);
+ err = rocker_flow_tbl_term_mac(rocker_port,
+ rocker_port->lport, in_lport_mask,
+ eth_type, rocker_port->dev->dev_addr,
+ dst_mac_mask, vlan_id, vlan_id_mask,
+ copy_to_cpu, flags);
+
+ return err;
+}
+
+static int rocker_port_fwding(struct rocker_port *rocker_port)
+{
+ bool pop_vlan;
+ u32 out_lport;
+ __be16 vlan_id;
+ u16 vid;
+ int flags = ROCKER_OP_FLAG_NOWAIT;
+ int err;
+
+ /* Port will be forwarding-enabled if its STP state is LEARNING
+ * or FORWARDING. Traffic from CPU can still egress, regardless of
+ * port STP state. Use L2 interface group on port VLANs as a way
+ * to toggle port forwarding: if forwarding is disabled, L2
+ * interface group will not exist.
+ */
+
+ if (rocker_port->stp_state != BR_STATE_LEARNING &&
+ rocker_port->stp_state != BR_STATE_FORWARDING)
+ flags |= ROCKER_OP_FLAG_REMOVE;
+
+ out_lport = rocker_port->lport;
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ if (!test_bit(vid, rocker_port->vlan_bitmap))
+ continue;
+ vlan_id = htons(vid);
+ pop_vlan = rocker_vlan_id_is_internal(vlan_id);
+ err = rocker_group_l2_interface(rocker_port, flags,
+ vlan_id, out_lport,
+ pop_vlan);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 group for lport %d\n",
+ err, out_lport);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
+{
+ bool want[ROCKER_CTRL_MAX] = { 0, };
+ int flags;
+ int err;
+ int i;
+
+ if (rocker_port->stp_state == state)
+ return 0;
+
+ rocker_port->stp_state = state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ /* port is completely disabled */
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+ break;
+ case BR_STATE_LEARNING:
+ case BR_STATE_FORWARDING:
+ want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+ want[ROCKER_CTRL_IPV4_MCAST] = true;
+ want[ROCKER_CTRL_IPV6_MCAST] = true;
+ if (rocker_port_is_bridged(rocker_port))
+ want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+ else
+ want[ROCKER_CTRL_LOCAL_ARP] = true;
+ break;
+ }
+
+ for (i = 0; i < ROCKER_CTRL_MAX; i++) {
+ if (want[i] != rocker_port->ctrls[i]) {
+ flags = ROCKER_OP_FLAG_NOWAIT |
+ (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+ err = rocker_port_ctrl(rocker_port, flags,
+ &rocker_ctrls[i]);
+ if (err)
+ return err;
+ rocker_port->ctrls[i] = want[i];
+ }
+ }
+
+ err = rocker_port_fdb_flush(rocker_port);
+ if (err)
+ return err;
+
+ return rocker_port_fwding(rocker_port);
+}
+
+static struct rocker_internal_vlan_tbl_entry *
+rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+{
+ struct rocker_internal_vlan_tbl_entry *found;
+
+ hash_for_each_possible(rocker->internal_vlan_tbl, found,
+ entry, ifindex) {
+ if (found->ifindex == ifindex)
+ return found;
+ }
+
+ return NULL;
+}
+
+static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
+ int ifindex)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_internal_vlan_tbl_entry *entry;
+ struct rocker_internal_vlan_tbl_entry *found;
+ unsigned long lock_flags;
+ int i;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return 0;
+
+ entry->ifindex = ifindex;
+
+ spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+ found = rocker_internal_vlan_tbl_find(rocker, ifindex);
+ if (found) {
+ kfree(entry);
+ goto found;
+ }
+
+ found = entry;
+ hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
+
+ for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
+ if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
+ continue;
+ found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
+ goto found;
+ }
+
+ netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
+
+found:
+ found->ref_count++;
+ spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+ return found->vlan_id;
+}
+
+static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
+ int ifindex)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_internal_vlan_tbl_entry *found;
+ unsigned long lock_flags;
+ unsigned long bit;
+
+ spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+ found = rocker_internal_vlan_tbl_find(rocker, ifindex);
+ if (!found) {
+ netdev_err(rocker_port->dev,
+ "ifindex (%d) not found in internal VLAN tbl\n",
+ ifindex);
+ goto not_found;
+ }
+
+ if (--found->ref_count <= 0) {
+ bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
+ clear_bit(bit, rocker->internal_vlan_bitmap);
+ hash_del(&found->entry);
+ kfree(found);
+ }
+
+not_found:
+ spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
+}
+
+/*****************
+ * Net device ops
+ *****************/
+
+static int rocker_port_open(struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ u8 stp_state = rocker_port_is_bridged(rocker_port) ?
+ BR_STATE_BLOCKING : BR_STATE_FORWARDING;
+ int err;
+
+ err = rocker_port_dma_rings_init(rocker_port);
+ if (err)
+ return err;
+
+ err = request_irq(rocker_msix_tx_vector(rocker_port),
+ rocker_tx_irq_handler, 0,
+ rocker_driver_name, rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "cannot assign tx irq\n");
+ goto err_request_tx_irq;
+ }
+
+ err = request_irq(rocker_msix_rx_vector(rocker_port),
+ rocker_rx_irq_handler, 0,
+ rocker_driver_name, rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "cannot assign rx irq\n");
+ goto err_request_rx_irq;
+ }
+
+ err = rocker_port_stp_update(rocker_port, stp_state);
+ if (err)
+ goto err_stp_update;
+
+ napi_enable(&rocker_port->napi_tx);
+ napi_enable(&rocker_port->napi_rx);
+ rocker_port_set_enable(rocker_port, true);
+ netif_start_queue(dev);
+ return 0;
+
+err_stp_update:
+ free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
+err_request_rx_irq:
+ free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
+err_request_tx_irq:
+ rocker_port_dma_rings_fini(rocker_port);
+ return err;
+}
+
+static int rocker_port_stop(struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ rocker_port_set_enable(rocker_port, false);
+ napi_disable(&rocker_port->napi_rx);
+ napi_disable(&rocker_port->napi_tx);
+ rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+ free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
+ free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
+ rocker_port_dma_rings_fini(rocker_port);
+
+ return 0;
+}
+
+static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct pci_dev *pdev = rocker->pdev;
+ struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+ struct rocker_tlv *attr;
+ int rem;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_TX_FRAGS])
+ return;
+ rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
+ struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+ dma_addr_t dma_handle;
+ size_t len;
+
+ if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
+ continue;
+ rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
+ attr);
+ if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
+ !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
+ continue;
+ dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
+ len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
+ pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
+ }
+}
+
+static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ char *buf, size_t buf_len)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+ struct rocker_tlv *frag;
+
+ dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
+ if (net_ratelimit())
+ netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
+ return -EIO;
+ }
+ frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
+ if (!frag)
+ goto unmap_frag;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
+ dma_handle))
+ goto nest_cancel;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
+ buf_len))
+ goto nest_cancel;
+ rocker_tlv_nest_end(desc_info, frag);
+ return 0;
+
+nest_cancel:
+ rocker_tlv_nest_cancel(desc_info, frag);
+unmap_frag:
+ pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
+ return -EMSGSIZE;
+}
+
+static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ struct rocker_tlv *frags;
+ int i;
+ int err;
+
+ desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
+ if (unlikely(!desc_info)) {
+ if (net_ratelimit())
+ netdev_err(dev, "tx ring full when queue awake\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ rocker_desc_cookie_ptr_set(desc_info, skb);
+
+ frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
+ if (!frags)
+ goto out;
+ err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
+ skb->data, skb_headlen(skb));
+ if (err)
+ goto nest_cancel;
+ if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
+ goto nest_cancel;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
+ if (err)
+ goto unmap_frags;
+ }
+ rocker_tlv_nest_end(desc_info, frags);
+
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
+
+ desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
+ if (!desc_info)
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+
+unmap_frags:
+ rocker_tx_desc_frags_unmap(rocker_port, desc_info);
+nest_cancel:
+ rocker_tlv_nest_cancel(desc_info, frags);
+out:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int rocker_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
+ if (err)
+ return err;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err;
+
+ err = rocker_port_vlan(rocker_port, 0, vid);
+ if (err)
+ return err;
+
+ return rocker_port_router_mac(rocker_port, 0, htons(vid));
+}
+
+static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err;
+
+ err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
+ htons(vid));
+ if (err)
+ return err;
+
+ return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
+}
+
+static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid,
+ u16 nlm_flags)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
+ int flags = 0;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
+
+ return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
+ int flags = ROCKER_OP_FLAG_REMOVE;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
+
+ return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_fdb_fill_info(struct sk_buff *skb,
+ struct rocker_port *rocker_port,
+ const unsigned char *addr, u16 vid,
+ u32 portid, u32 seq, int type,
+ unsigned int flags)
+{
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = rocker_port->dev->ifindex;
+ ndm->ndm_state = NUD_REACHABLE;
+
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ if (vid && nla_put_u16(skb, NDA_VLAN, vid))
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int rocker_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev,
+ int idx)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_fdb_tbl_entry *found;
+ struct hlist_node *tmp;
+ int bkt;
+ unsigned long lock_flags;
+ const unsigned char *addr;
+ u16 vid;
+ int err;
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.lport != rocker_port->lport)
+ continue;
+ if (idx < cb->args[0])
+ goto skip;
+ addr = found->key.addr;
+ vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
+ err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI);
+ if (err < 0)
+ break;
+skip:
+ ++idx;
+ }
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+ return idx;
+}
+
+static int rocker_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct nlattr *protinfo;
+ struct nlattr *attr;
+ int err;
+
+ protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+ IFLA_PROTINFO);
+ if (protinfo) {
+ attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
+ if (attr) {
+ if (nla_len(attr) < sizeof(u8))
+ return -EINVAL;
+
+ if (nla_get_u8(attr))
+ rocker_port->brport_flags |= BR_LEARNING;
+ else
+ rocker_port->brport_flags &= ~BR_LEARNING;
+ err = rocker_port_set_learning(rocker_port);
+ if (err)
+ return err;
+ }
+ attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
+ if (attr) {
+ if (nla_len(attr) < sizeof(u8))
+ return -EINVAL;
+
+ if (nla_get_u8(attr))
+ rocker_port->brport_flags |= BR_LEARNING_SYNC;
+ else
+ rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
+ }
+ }
+
+ return 0;
+}
+
+static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 filter_mask)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ u16 mode = BRIDGE_MODE_UNDEF;
+ u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
+ rocker_port->brport_flags, mask);
+}
+
+static int rocker_port_switch_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+
+ psid->id_len = sizeof(rocker->hw.id);
+ memcpy(&psid->id, &rocker->hw.id, psid->id_len);
+ return 0;
+}
+
+static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_port_stp_update(rocker_port, state);
+}
+
+static const struct net_device_ops rocker_port_netdev_ops = {
+ .ndo_open = rocker_port_open,
+ .ndo_stop = rocker_port_stop,
+ .ndo_start_xmit = rocker_port_xmit,
+ .ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
+ .ndo_fdb_add = rocker_port_fdb_add,
+ .ndo_fdb_del = rocker_port_fdb_del,
+ .ndo_fdb_dump = rocker_port_fdb_dump,
+ .ndo_bridge_setlink = rocker_port_bridge_setlink,
+ .ndo_bridge_getlink = rocker_port_bridge_getlink,
+ .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get,
+ .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update,
+};
+
+/********************
+ * ethtool interface
+ ********************/
+
+static int rocker_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
+}
+
+static int rocker_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
+}
+
+static void rocker_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct ethtool_ops rocker_port_ethtool_ops = {
+ .get_settings = rocker_port_get_settings,
+ .set_settings = rocker_port_set_settings,
+ .get_drvinfo = rocker_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+/*****************
+ * NAPI interface
+ *****************/
+
+static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
+{
+ return container_of(napi, struct rocker_port, napi_tx);
+}
+
+static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ /* Cleanup tx descriptors */
+ while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
+ err = rocker_desc_err(desc_info);
+ if (err && net_ratelimit())
+ netdev_err(rocker_port->dev, "tx desc received with err %d\n",
+ err);
+ rocker_tx_desc_frags_unmap(rocker_port, desc_info);
+ dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info));
+ credits++;
+ }
+
+ if (credits && netif_queue_stopped(rocker_port->dev))
+ netif_wake_queue(rocker_port->dev);
+
+ napi_complete(napi);
+ rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
+
+ return 0;
+}
+
+static int rocker_port_rx_proc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
+ size_t rx_len;
+
+ if (!skb)
+ return -ENOENT;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
+ return -EINVAL;
+
+ rocker_dma_rx_ring_skb_unmap(rocker, attrs);
+
+ rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
+ skb_put(skb, rx_len);
+ skb->protocol = eth_type_trans(skb, rocker_port->dev);
+ netif_receive_skb(skb);
+
+ return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+}
+
+static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
+{
+ return container_of(napi, struct rocker_port, napi_rx);
+}
+
+static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ /* Process rx descriptors */
+ while (credits < budget &&
+ (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
+ err = rocker_desc_err(desc_info);
+ if (err) {
+ if (net_ratelimit())
+ netdev_err(rocker_port->dev, "rx desc received with err %d\n",
+ err);
+ } else {
+ err = rocker_port_rx_proc(rocker, rocker_port,
+ desc_info);
+ if (err && net_ratelimit())
+ netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
+ err);
+ }
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
+ credits++;
+ }
+
+ if (credits < budget)
+ napi_complete(napi);
+
+ rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
+
+ return credits;
+}
+
+/*****************
+ * PCI driver ops
+ *****************/
+
+static void rocker_carrier_init(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
+ bool link_up;
+
+ link_up = link_status & (1 << rocker_port->lport);
+ if (link_up)
+ netif_carrier_on(rocker_port->dev);
+ else
+ netif_carrier_off(rocker_port->dev);
+}
+
+static void rocker_remove_ports(struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+ int i;
+
+ for (i = 0; i < rocker->port_count; i++) {
+ rocker_port = rocker->ports[i];
+ rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+ unregister_netdev(rocker_port->dev);
+ }
+ kfree(rocker->ports);
+}
+
+static void rocker_port_dev_addr_init(struct rocker *rocker,
+ struct rocker_port *rocker_port)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int err;
+
+ err = rocker_cmd_get_port_settings_macaddr(rocker_port,
+ rocker_port->dev->dev_addr);
+ if (err) {
+ dev_warn(&pdev->dev, "failed to get mac address, using random\n");
+ eth_hw_addr_random(rocker_port->dev);
+ }
+}
+
+static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ struct rocker_port *rocker_port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct rocker_port));
+ if (!dev)
+ return -ENOMEM;
+ rocker_port = netdev_priv(dev);
+ rocker_port->dev = dev;
+ rocker_port->rocker = rocker;
+ rocker_port->port_number = port_number;
+ rocker_port->lport = port_number + 1;
+ rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+
+ rocker_port_dev_addr_init(rocker, rocker_port);
+ dev->netdev_ops = &rocker_port_netdev_ops;
+ dev->ethtool_ops = &rocker_port_ethtool_ops;
+ netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
+ NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
+ NAPI_POLL_WEIGHT);
+ rocker_carrier_init(rocker_port);
+
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "register_netdev failed\n");
+ goto err_register_netdev;
+ }
+ rocker->ports[port_number] = rocker_port;
+
+ rocker_port_set_learning(rocker_port);
+
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+ err = rocker_port_ig_tbl(rocker_port, 0);
+ if (err) {
+ dev_err(&pdev->dev, "install ig port table failed\n");
+ goto err_port_ig_tbl;
+ }
+
+ return 0;
+
+err_port_ig_tbl:
+ unregister_netdev(dev);
+err_register_netdev:
+ free_netdev(dev);
+ return err;
+}
+
+static int rocker_probe_ports(struct rocker *rocker)
+{
+ int i;
+ size_t alloc_size;
+ int err;
+
+ alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
+ rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ for (i = 0; i < rocker->port_count; i++) {
+ err = rocker_probe_port(rocker, i);
+ if (err)
+ goto remove_ports;
+ }
+ return 0;
+
+remove_ports:
+ rocker_remove_ports(rocker);
+ return err;
+}
+
+static int rocker_msix_init(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int msix_entries;
+ int i;
+ int err;
+
+ msix_entries = pci_msix_vec_count(pdev);
+ if (msix_entries < 0)
+ return msix_entries;
+
+ if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
+ return -EINVAL;
+
+ rocker->msix_entries = kmalloc_array(msix_entries,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!rocker->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < msix_entries; i++)
+ rocker->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
+ if (err < 0)
+ goto err_enable_msix;
+
+ return 0;
+
+err_enable_msix:
+ kfree(rocker->msix_entries);
+ return err;
+}
+
+static void rocker_msix_fini(struct rocker *rocker)
+{
+ pci_disable_msix(rocker->pdev);
+ kfree(rocker->msix_entries);
+}
+
+static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct rocker *rocker;
+ int err;
+
+ rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
+ if (!rocker)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, rocker_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ }
+
+ if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
+ dev_err(&pdev->dev, "invalid PCI region size\n");
+ goto err_pci_resource_len_check;
+ }
+
+ rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!rocker->hw_addr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+ pci_set_master(pdev);
+
+ rocker->pdev = pdev;
+ pci_set_drvdata(pdev, rocker);
+
+ rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
+
+ err = rocker_msix_init(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_msix_init;
+ }
+
+ err = rocker_basic_hw_test(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "basic hw test failed\n");
+ goto err_basic_hw_test;
+ }
+
+ rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+
+ err = rocker_dma_rings_init(rocker);
+ if (err)
+ goto err_dma_rings_init;
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
+ rocker_cmd_irq_handler, 0,
+ rocker_driver_name, rocker);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign cmd irq\n");
+ goto err_request_cmd_irq;
+ }
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
+ rocker_event_irq_handler, 0,
+ rocker_driver_name, rocker);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign event irq\n");
+ goto err_request_event_irq;
+ }
+
+ rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
+
+ err = rocker_init_tbls(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "cannot init rocker tables\n");
+ goto err_init_tbls;
+ }
+
+ err = rocker_probe_ports(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "failed to probe ports\n");
+ goto err_probe_ports;
+ }
+
+ dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
+
+ return 0;
+
+err_probe_ports:
+ rocker_free_tbls(rocker);
+err_init_tbls:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+err_request_event_irq:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
+err_request_cmd_irq:
+ rocker_dma_rings_fini(rocker);
+err_dma_rings_init:
+err_basic_hw_test:
+ rocker_msix_fini(rocker);
+err_msix_init:
+ iounmap(rocker->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+ pci_disable_device(pdev);
+err_pci_enable_device:
+ kfree(rocker);
+ return err;
+}
+
+static void rocker_remove(struct pci_dev *pdev)
+{
+ struct rocker *rocker = pci_get_drvdata(pdev);
+
+ rocker_free_tbls(rocker);
+ rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+ rocker_remove_ports(rocker);
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
+ rocker_dma_rings_fini(rocker);
+ rocker_msix_fini(rocker);
+ iounmap(rocker->hw_addr);
+ pci_release_regions(rocker->pdev);
+ pci_disable_device(rocker->pdev);
+ kfree(rocker);
+}
+
+static struct pci_driver rocker_pci_driver = {
+ .name = rocker_driver_name,
+ .id_table = rocker_pci_id_table,
+ .probe = rocker_probe,
+ .remove = rocker_remove,
+};
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static bool rocker_port_dev_check(struct net_device *dev)
+{
+ return dev->netdev_ops == &rocker_port_netdev_ops;
+}
+
+static int rocker_port_bridge_join(struct rocker_port *rocker_port,
+ struct net_device *bridge)
+{
+ int err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->dev->ifindex);
+
+ rocker_port->bridge_dev = bridge;
+
+ /* Use bridge internal VLAN ID for untagged pkts */
+ err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+ if (err)
+ return err;
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port,
+ bridge->ifindex);
+ err = rocker_port_vlan(rocker_port, 0, 0);
+
+ return err;
+}
+
+static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
+{
+ int err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->bridge_dev->ifindex);
+
+ rocker_port->bridge_dev = NULL;
+
+ /* Use port internal VLAN ID for untagged pkts */
+ err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+ if (err)
+ return err;
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port,
+ rocker_port->dev->ifindex);
+ err = rocker_port_vlan(rocker_port, 0, 0);
+
+ return err;
+}
+
+static int rocker_port_master_changed(struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct net_device *master = netdev_master_upper_dev_get(dev);
+ int err = 0;
+
+ if (master && master->rtnl_link_ops &&
+ !strcmp(master->rtnl_link_ops->kind, "bridge"))
+ err = rocker_port_bridge_join(rocker_port, master);
+ else
+ err = rocker_port_bridge_leave(rocker_port);
+
+ return err;
+}
+
+static int rocker_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev;
+ int err;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ dev = netdev_notifier_info_to_dev(ptr);
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+ err = rocker_port_master_changed(dev);
+ if (err)
+ netdev_warn(dev,
+ "failed to reflect master change (err %d)\n",
+ err);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_netdevice_nb __read_mostly = {
+ .notifier_call = rocker_netdevice_event,
+};
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init rocker_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&rocker_netdevice_nb);
+ err = pci_register_driver(&rocker_pci_driver);
+ if (err)
+ goto err_pci_register_driver;
+ return 0;
+
+err_pci_register_driver:
+ unregister_netdevice_notifier(&rocker_netdevice_nb);
+ return err;
+}
+
+static void __exit rocker_module_exit(void)
+{
+ unregister_netdevice_notifier(&rocker_netdevice_nb);
+ pci_unregister_driver(&rocker_pci_driver);
+}
+
+module_init(rocker_module_init);
+module_exit(rocker_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
+MODULE_DESCRIPTION("Rocker switch device driver");
+MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
new file mode 100644
index 000000000000..8d2865ba634c
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -0,0 +1,428 @@
+/*
+ * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ROCKER_H
+#define _ROCKER_H
+
+#include <linux/types.h>
+
+#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006
+
+#define ROCKER_PCI_BAR0_SIZE 0x2000
+
+/* MSI-X vectors */
+enum {
+ ROCKER_MSIX_VEC_CMD,
+ ROCKER_MSIX_VEC_EVENT,
+ ROCKER_MSIX_VEC_TEST,
+ ROCKER_MSIX_VEC_RESERVED0,
+ __ROCKER_MSIX_VEC_TX,
+ __ROCKER_MSIX_VEC_RX,
+#define ROCKER_MSIX_VEC_TX(port) \
+ (__ROCKER_MSIX_VEC_TX + ((port) * 2))
+#define ROCKER_MSIX_VEC_RX(port) \
+ (__ROCKER_MSIX_VEC_RX + ((port) * 2))
+#define ROCKER_MSIX_VEC_COUNT(portcnt) \
+ (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1)
+};
+
+/* Rocker bogus registers */
+#define ROCKER_BOGUS_REG0 0x0000
+#define ROCKER_BOGUS_REG1 0x0004
+#define ROCKER_BOGUS_REG2 0x0008
+#define ROCKER_BOGUS_REG3 0x000c
+
+/* Rocker test registers */
+#define ROCKER_TEST_REG 0x0010
+#define ROCKER_TEST_REG64 0x0018 /* 8-byte */
+#define ROCKER_TEST_IRQ 0x0020
+#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */
+#define ROCKER_TEST_DMA_SIZE 0x0030
+#define ROCKER_TEST_DMA_CTRL 0x0034
+
+/* Rocker test register ctrl */
+#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0)
+#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1)
+#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2)
+
+/* Rocker DMA ring register offsets */
+#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */
+#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32)
+#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32)
+#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32)
+#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32)
+#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32)
+#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32)
+
+/* Rocker dma ctrl register bits */
+#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0)
+
+/* Rocker DMA ring types */
+enum rocker_dma_type {
+ ROCKER_DMA_CMD,
+ ROCKER_DMA_EVENT,
+ __ROCKER_DMA_TX,
+ __ROCKER_DMA_RX,
+#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2)
+#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2)
+};
+
+/* Rocker DMA ring size limits and default sizes */
+#define ROCKER_DMA_SIZE_MIN 2ul
+#define ROCKER_DMA_SIZE_MAX 65536ul
+#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul
+#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul
+#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul
+#define ROCKER_DMA_TX_DESC_SIZE 256
+#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul
+#define ROCKER_DMA_RX_DESC_SIZE 256
+
+/* Rocker DMA descriptor struct */
+struct rocker_desc {
+ u64 buf_addr;
+ u64 cookie;
+ u16 buf_size;
+ u16 tlv_size;
+ u16 resv[5];
+ u16 comp_err;
+};
+
+#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15)
+
+/* Rocker DMA TLV struct */
+struct rocker_tlv {
+ u32 type;
+ u16 len;
+};
+
+/* TLVs */
+enum {
+ ROCKER_TLV_CMD_UNSPEC,
+ ROCKER_TLV_CMD_TYPE, /* u16 */
+ ROCKER_TLV_CMD_INFO, /* nest */
+
+ __ROCKER_TLV_CMD_MAX,
+ ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_CMD_TYPE_UNSPEC,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
+
+ __ROCKER_TLV_CMD_TYPE_MAX,
+ ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC,
+ ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
+
+ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
+ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
+};
+
+enum rocker_port_mode {
+ ROCKER_PORT_MODE_OF_DPA,
+};
+
+enum {
+ ROCKER_TLV_EVENT_UNSPEC,
+ ROCKER_TLV_EVENT_TYPE, /* u16 */
+ ROCKER_TLV_EVENT_INFO, /* nest */
+
+ __ROCKER_TLV_EVENT_MAX,
+ ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_TYPE_UNSPEC,
+ ROCKER_TLV_EVENT_TYPE_LINK_CHANGED,
+ ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN,
+
+ __ROCKER_TLV_EVENT_TYPE_MAX,
+ ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC,
+ ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */
+ ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */
+
+ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX,
+ ROCKER_TLV_EVENT_LINK_CHANGED_MAX =
+ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC,
+ ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */
+ ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */
+ ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */
+
+ __ROCKER_TLV_EVENT_MAC_VLAN_MAX,
+ ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_RX_UNSPEC,
+ ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */
+ ROCKER_TLV_RX_CSUM, /* u16 */
+ ROCKER_TLV_RX_FRAG_ADDR, /* u64 */
+ ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */
+ ROCKER_TLV_RX_FRAG_LEN, /* u16 */
+
+ __ROCKER_TLV_RX_MAX,
+ ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
+};
+
+#define ROCKER_RX_FLAGS_IPV4 (1 << 0)
+#define ROCKER_RX_FLAGS_IPV6 (1 << 1)
+#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3)
+#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4)
+#define ROCKER_RX_FLAGS_TCP (1 << 5)
+#define ROCKER_RX_FLAGS_UDP (1 << 6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7)
+
+enum {
+ ROCKER_TLV_TX_UNSPEC,
+ ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */
+ ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */
+ ROCKER_TLV_TX_TSO_MSS, /* u16 */
+ ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */
+ ROCKER_TLV_TX_FRAGS, /* array */
+
+ __ROCKER_TLV_TX_MAX,
+ ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1,
+};
+
+#define ROCKER_TX_OFFLOAD_NONE 0
+#define ROCKER_TX_OFFLOAD_IP_CSUM 1
+#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2
+#define ROCKER_TX_OFFLOAD_L3_CSUM 3
+#define ROCKER_TX_OFFLOAD_TSO 4
+
+#define ROCKER_TX_FRAGS_MAX 16
+
+enum {
+ ROCKER_TLV_TX_FRAG_UNSPEC,
+ ROCKER_TLV_TX_FRAG, /* nest */
+
+ __ROCKER_TLV_TX_FRAG_MAX,
+ ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_TX_FRAG_ATTR_UNSPEC,
+ ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */
+ ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */
+
+ __ROCKER_TLV_TX_FRAG_ATTR_MAX,
+ ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1,
+};
+
+/* cmd info nested for OF-DPA msgs */
+enum {
+ ROCKER_TLV_OF_DPA_UNSPEC,
+ ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */
+ ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */
+ ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */
+ ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */
+ ROCKER_TLV_OF_DPA_COOKIE, /* u64 */
+ ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */
+ ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */
+ ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */
+ ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */
+ ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */
+ ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */
+ ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */
+ ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */
+ ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */
+ ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */
+ ROCKER_TLV_OF_DPA_DST_MAC, /* binary */
+ ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */
+ ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */
+ ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */
+ ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */
+ ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */
+ ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */
+ ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */
+
+ __ROCKER_TLV_OF_DPA_MAX,
+ ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1,
+};
+
+/* OF-DPA table IDs */
+
+enum rocker_of_dpa_table_id {
+ ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0,
+ ROCKER_OF_DPA_TABLE_ID_VLAN = 10,
+ ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20,
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30,
+ ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40,
+ ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50,
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60,
+};
+
+/* OF-DPA flow stats */
+enum {
+ ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC,
+ ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */
+ ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */
+ ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */
+
+ __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX,
+ ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1,
+};
+
+/* OF-DPA group types */
+enum rocker_of_dpa_group_type {
+ ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY,
+};
+
+/* OF-DPA group L2 overlay types */
+enum rocker_of_dpa_overlay_type {
+ ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0,
+ ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST,
+ ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST,
+ ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST,
+};
+
+/* OF-DPA group ID encoding */
+#define ROCKER_GROUP_TYPE_SHIFT 28
+#define ROCKER_GROUP_TYPE_MASK 0xf0000000
+#define ROCKER_GROUP_VLAN_SHIFT 16
+#define ROCKER_GROUP_VLAN_MASK 0x0fff0000
+#define ROCKER_GROUP_PORT_SHIFT 0
+#define ROCKER_GROUP_PORT_MASK 0x0000ffff
+#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12
+#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000
+#define ROCKER_GROUP_SUBTYPE_SHIFT 10
+#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00
+#define ROCKER_GROUP_INDEX_SHIFT 0
+#define ROCKER_GROUP_INDEX_MASK 0x0000ffff
+#define ROCKER_GROUP_INDEX_LONG_SHIFT 0
+#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff
+
+#define ROCKER_GROUP_TYPE_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT)
+#define ROCKER_GROUP_TYPE_SET(type) \
+ (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK)
+#define ROCKER_GROUP_VLAN_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT)
+#define ROCKER_GROUP_VLAN_SET(vlan_id) \
+ (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK)
+#define ROCKER_GROUP_PORT_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT)
+#define ROCKER_GROUP_PORT_SET(port) \
+ (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK)
+#define ROCKER_GROUP_INDEX_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT)
+#define ROCKER_GROUP_INDEX_SET(index) \
+ (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK)
+#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \
+ ROCKER_GROUP_INDEX_LONG_SHIFT)
+#define ROCKER_GROUP_INDEX_LONG_SET(index) \
+ (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \
+ ROCKER_GROUP_INDEX_LONG_MASK)
+
+#define ROCKER_GROUP_NONE 0
+#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port))
+#define ROCKER_GROUP_L2_REWRITE(index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\
+ ROCKER_GROUP_INDEX_LONG_SET(index))
+#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
+#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
+#define ROCKER_GROUP_L3_UNICAST(index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\
+ ROCKER_GROUP_INDEX_LONG_SET(index))
+
+/* Rocker general purpose registers */
+#define ROCKER_CONTROL 0x0300
+#define ROCKER_PORT_PHYS_COUNT 0x0304
+#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */
+#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */
+#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */
+
+/* Rocker control bits */
+#define ROCKER_CONTROL_RESET (1 << 0)
+
+#endif
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 1f4449ad8900..f537cbea20e5 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -1048,7 +1048,6 @@ static struct platform_driver s6gmac_driver = {
.remove = s6gmac_remove,
.driver = {
.name = "s6gmac",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b147d469a799..866560ea9e18 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -90,9 +90,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
/* Get memory resource */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto err_out;
-
addr = devm_ioremap_resource(dev, res);
if (IS_ERR(addr))
return PTR_ERR(addr);
@@ -236,7 +233,6 @@ static struct platform_driver sxgbe_platform_driver = {
.remove = sxgbe_platform_remove,
.driver = {
.name = SXGBE_RESOURCE_NAME,
- .owner = THIS_MODULE,
.pm = &sxgbe_platform_pm_ops,
.of_match_table = of_match_ptr(sxgbe_dt_ids),
},
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 69e4fd21adb4..ca7336605748 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -826,7 +826,6 @@ static struct platform_driver sgiseeq_driver = {
.remove = __exit_p(sgiseeq_remove),
.driver = {
.name = "sgiseeq",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index a77f05ce8325..fbb6cfa0f5f1 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3689,6 +3689,11 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .sriov_init = efx_ef10_sriov_init,
+ .sriov_fini = efx_ef10_sriov_fini,
+ .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
+ .sriov_wanted = efx_ef10_sriov_wanted,
+ .sriov_reset = efx_ef10_sriov_reset,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b2cc590dd1dd..238482495e81 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1314,7 +1314,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
/* If RSS is requested for the PF *and* VFs then we can't write RSS
* table entries that are inaccessible to VFs
*/
- if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
count > efx_vf_size(efx)) {
netif_warn(efx, probe, efx->net_dev,
"Reducing number of RSS channels from %u to %u for "
@@ -1426,7 +1426,9 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
- efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
+
+ efx->rss_spread = ((efx->n_rx_channels > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx));
return 0;
@@ -1614,7 +1616,7 @@ static int efx_probe_nic(struct efx_nic *efx)
goto fail2;
if (efx->n_channels > 1)
- get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
+ netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] =
ethtool_rxfh_indir_default(i, efx->rss_spread);
@@ -2166,7 +2168,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
}
ether_addr_copy(net_dev->dev_addr, new_addr);
- efx_sriov_mac_address_changed(efx);
+ efx->type->sriov_mac_address_changed(efx);
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
@@ -2210,10 +2212,10 @@ static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_SFC_SRIOV
- .ndo_set_vf_mac = efx_sriov_set_vf_mac,
- .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
- .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
- .ndo_get_vf_config = efx_sriov_get_vf_config,
+ .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_siena_sriov_get_vf_config,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
@@ -2433,7 +2435,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (rc)
goto fail;
efx_restore_filters(efx);
- efx_sriov_reset(efx);
+ efx->type->sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
@@ -2826,7 +2828,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_disable_interrupts(efx);
rtnl_unlock();
- efx_sriov_fini(efx);
+ efx->type->sriov_fini(efx);
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
@@ -3023,7 +3025,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail4;
- rc = efx_sriov_init(efx);
+ rc = efx->type->sriov_init(efx);
if (rc)
netif_err(efx, probe, efx->net_dev,
"SR-IOV can't be enabled rc %d\n", rc);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index cad258a78708..4835bc0d0de8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1086,19 +1086,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
0 : ARRAY_SIZE(efx->rx_indir_table));
}
-static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key)
+static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct efx_nic *efx = netdev_priv(net_dev);
- memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (indir)
+ memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
return 0;
}
-static int efx_ethtool_set_rxfh(struct net_device *net_dev,
- const u32 *indir, const u8 *key)
+static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
efx->type->rx_push_rss_config(efx);
return 0;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 157037546d30..f166c8ef38a3 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2766,6 +2766,11 @@ const struct efx_nic_type falcon_a1_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
+ .sriov_init = efx_falcon_sriov_init,
+ .sriov_fini = efx_falcon_sriov_fini,
+ .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
+ .sriov_wanted = efx_falcon_sriov_wanted,
+ .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2862,6 +2867,11 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
+ .sriov_init = efx_falcon_sriov_init,
+ .sriov_fini = efx_falcon_sriov_fini,
+ .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
+ .sriov_wanted = efx_falcon_sriov_wanted,
+ .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 6859437b59fb..75975328e020 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -226,6 +226,9 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer,
unsigned int len)
{
+#ifdef CONFIG_SFC_SRIOV
+ struct siena_nic_data *nic_data = efx->nic_data;
+#endif
len = ALIGN(len, EFX_BUF_SIZE);
if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
@@ -237,8 +240,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
#ifdef CONFIG_SFC_SRIOV
- BUG_ON(efx_sriov_enabled(efx) &&
- efx->vf_buftbl_base < efx->next_buffer_table);
+ BUG_ON(efx_siena_sriov_enabled(efx) &&
+ nic_data->vf_buftbl_base < efx->next_buffer_table);
#endif
netif_dbg(efx, probe, efx->net_dev,
@@ -667,7 +670,7 @@ static int efx_farch_do_flush(struct efx_nic *efx)
* the firmware (though we will still have to poll for
* completion). If that fails, fall back to the old scheme.
*/
- if (efx_sriov_enabled(efx)) {
+ if (efx_siena_sriov_enabled(efx)) {
rc = efx_mcdi_flush_rxqs(efx);
if (!rc)
goto wait;
@@ -1195,13 +1198,13 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
- efx_sriov_tx_flush_done(efx, event);
+ efx_siena_sriov_tx_flush_done(efx, event);
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
- efx_sriov_rx_flush_done(efx, event);
+ efx_siena_sriov_rx_flush_done(efx, event);
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
@@ -1240,7 +1243,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@@ -1250,7 +1253,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
@@ -1315,7 +1318,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
efx_farch_handle_driver_event(channel, &event);
break;
case FSE_CZ_EV_CODE_USER_EV:
- efx_sriov_event(channel, &event);
+ efx_siena_sriov_event(channel, &event);
break;
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
@@ -1668,6 +1671,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
unsigned vi_count, buftbl_min;
+#ifdef CONFIG_SFC_SRIOV
+ struct siena_nic_data *nic_data = efx->nic_data;
+#endif
+
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
@@ -1678,10 +1685,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
- if (efx_sriov_wanted(efx)) {
+ if (efx->type->sriov_wanted(efx)) {
unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
- efx->vf_buftbl_base = buftbl_min;
+ nic_data->vf_buftbl_base = buftbl_min;
vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
vi_count = max(vi_count, EFX_VI_BASE);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 5239cf9bdc56..d37928f01949 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1035,7 +1035,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
/* MAC stats are gather lazily. We can ignore this. */
break;
case MCDI_EVENT_CODE_FLR:
- efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+ efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
break;
case MCDI_EVENT_CODE_PTP_RX:
case MCDI_EVENT_CODE_PTP_FAULT:
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9ede32064685..325dd94bca46 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -913,13 +913,6 @@ struct vfdi_status;
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
- * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
- * @vfdi_status: Common VFDI status page to be dmad to VF address space.
- * @local_addr_list: List of local addresses. Protected by %local_lock.
- * @local_page_list: List of DMA addressable pages used to broadcast
- * %local_addr_list. Protected by %local_lock.
- * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
- * @peer_work: Work item to broadcast peer addresses to VMs.
* @ptp_data: PTP state data
* @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
@@ -1060,17 +1053,10 @@ struct efx_nic {
wait_queue_head_t flush_wq;
#ifdef CONFIG_SFC_SRIOV
- struct efx_channel *vfdi_channel;
struct efx_vf *vf;
unsigned vf_count;
unsigned vf_init_count;
unsigned vi_scale;
- unsigned vf_buftbl_base;
- struct efx_buffer vfdi_status;
- struct list_head local_addr_list;
- struct list_head local_page_list;
- struct mutex local_lock;
- struct work_struct peer_work;
#endif
struct efx_ptp_data *ptp_data;
@@ -1344,6 +1330,11 @@ struct efx_nic_type {
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
+ int (*sriov_init)(struct efx_nic *efx);
+ void (*sriov_fini)(struct efx_nic *efx);
+ void (*sriov_mac_address_changed)(struct efx_nic *efx);
+ bool (*sriov_wanted)(struct efx_nic *efx);
+ void (*sriov_reset)(struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index f77cce034ad4..93d10cbbd1cf 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -378,12 +378,30 @@ enum {
/**
* struct siena_nic_data - Siena NIC state
+ * @efx: Pointer back to main interface structure
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
+ * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
+ * @vfdi_status: Common VFDI status page to be dmad to VF address space.
+ * @local_addr_list: List of local addresses. Protected by %local_lock.
+ * @local_page_list: List of DMA addressable pages used to broadcast
+ * %local_addr_list. Protected by %local_lock.
+ * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
+ * @peer_work: Work item to broadcast peer addresses to VMs.
*/
struct siena_nic_data {
+ struct efx_nic *efx;
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_channel *vfdi_channel;
+ unsigned vf_buftbl_base;
+ struct efx_buffer vfdi_status;
+ struct list_head local_addr_list;
+ struct list_head local_page_list;
+ struct mutex local_lock;
+ struct work_struct peer_work;
+#endif
};
enum {
@@ -522,62 +540,88 @@ struct efx_ef10_nic_data {
#ifdef CONFIG_SFC_SRIOV
-static inline bool efx_sriov_wanted(struct efx_nic *efx)
+/* SIENA */
+static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
{
return efx->vf_count != 0;
}
-static inline bool efx_sriov_enabled(struct efx_nic *efx)
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return efx->vf_init_count != 0;
}
+
static inline unsigned int efx_vf_size(struct efx_nic *efx)
{
return 1 << efx->vi_scale;
}
int efx_init_sriov(void);
-void efx_sriov_probe(struct efx_nic *efx);
-int efx_sriov_init(struct efx_nic *efx);
-void efx_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_sriov_reset(struct efx_nic *efx);
-void efx_sriov_fini(struct efx_nic *efx);
+void efx_siena_sriov_probe(struct efx_nic *efx);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
void efx_fini_sriov(void);
+/* EF10 */
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
+
#else
-static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; }
+/* SIENA */
+static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-
static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_sriov_tx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_sriov_rx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_sriov_event(struct efx_channel *channel,
- efx_qword_t *event) {}
-static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {}
-static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_sriov_fini(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
+static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_event(struct efx_channel *channel,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
+ unsigned dmaq) {}
+static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
+static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
static inline void efx_fini_sriov(void) {}
+/* EF10 */
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
+
#endif
-int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
-int efx_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
+/* FALCON */
+static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
+
+int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos);
+int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf);
+int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+ bool spoofchk);
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index ae696855f21a..3583f0208a6e 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -251,6 +251,7 @@ static int siena_probe_nic(struct efx_nic *efx)
nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
if (!nic_data)
return -ENOMEM;
+ nic_data->efx = efx;
efx->nic_data = nic_data;
if (efx_farch_fpga_ver(efx) != 0) {
@@ -306,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail5;
- efx_sriov_probe(efx);
+ efx_siena_sriov_probe(efx);
efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -996,6 +997,11 @@ const struct efx_nic_type siena_a0_nic_type = {
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.ptp_set_ts_config = siena_ptp_set_ts_config,
+ .sriov_init = efx_siena_sriov_init,
+ .sriov_fini = efx_siena_sriov_fini,
+ .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
+ .sriov_wanted = efx_siena_sriov_wanted,
+ .sriov_reset = efx_siena_sriov_reset,
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 43d2e64546ed..a8bbbad68a88 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -66,7 +66,7 @@ enum efx_vf_tx_filter_mode {
* @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
* @peer_page_addrs and @peer_page_count from simultaneous
* updates by the VM and consumption by
- * efx_sriov_update_vf_addr()
+ * efx_siena_sriov_update_vf_addr()
* @peer_page_addrs: Pointer to an array of guest pages for local addresses.
* @peer_page_count: Number of entries in @peer_page_count.
* @evq0_addrs: Array of guest pages backing evq0.
@@ -194,8 +194,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index)
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
-static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
- unsigned *vi_scale_out, unsigned *vf_total_out)
+static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
+ unsigned *vi_scale_out, unsigned *vf_total_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
@@ -227,18 +227,20 @@ static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
return 0;
}
-static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
+static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
EFX_POPULATE_OWORD_2(reg,
FRF_CZ_USREV_DIS, enabled ? 0 : 1,
- FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
+ FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel);
efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
}
-static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
- unsigned int count)
+static int efx_siena_sriov_memcpy(struct efx_nic *efx,
+ struct efx_memcpy_req *req,
+ unsigned int count)
{
MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
MCDI_DECLARE_STRUCT_PTR(record);
@@ -297,7 +299,7 @@ out:
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
-static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -341,7 +343,7 @@ static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
}
/* The RX filter is managed here on behalf of the VF driver */
-static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -380,22 +382,26 @@ static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
}
}
-static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
{
- efx_sriov_reset_tx_filter(vf);
- efx_sriov_reset_rx_filter(vf);
- queue_work(vfdi_workqueue, &vf->efx->peer_work);
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+
+ efx_siena_sriov_reset_tx_filter(vf);
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
}
/* Push the peer list to this VF. The caller must hold status_lock to interlock
* with VFDI requests, and they must be serialised against manipulation of
* local_page_list, either by acquiring local_lock or by running from
- * efx_sriov_peer_work()
+ * efx_siena_sriov_peer_work()
*/
-static void __efx_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
- struct vfdi_status *status = efx->vfdi_status.addr;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_status *status = nic_data->vfdi_status.addr;
struct efx_memcpy_req copy[4];
struct efx_endpoint_page *epp;
unsigned int pos, count;
@@ -421,7 +427,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
*/
data_offset = offsetof(struct vfdi_status, version);
copy[1].from_rid = efx->pci_dev->devfn;
- copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
+ copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset;
copy[1].to_rid = vf->pci_rid;
copy[1].to_addr = vf->status_addr + data_offset;
copy[1].length = status->length - data_offset;
@@ -429,7 +435,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
/* Copy the peer pages */
pos = 2;
count = 0;
- list_for_each_entry(epp, &efx->local_page_list, link) {
+ list_for_each_entry(epp, &nic_data->local_page_list, link) {
if (count == vf->peer_page_count) {
/* The VF driver will know they need to provide more
* pages because peer_addr_count is too large.
@@ -444,7 +450,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
copy[pos].length = EFX_PAGE_SIZE;
if (++pos == ARRAY_SIZE(copy)) {
- efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+ efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
pos = 0;
}
++count;
@@ -456,7 +462,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
generation_end);
copy[pos].length = sizeof(status->generation_end);
- efx_sriov_memcpy(efx, copy, pos + 1);
+ efx_siena_sriov_memcpy(efx, copy, pos + 1);
/* Notify the guest */
EFX_POPULATE_QWORD_3(event,
@@ -469,8 +475,8 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
&event);
}
-static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
- u64 *addr, unsigned count)
+static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset,
+ u64 *addr, unsigned count)
{
efx_qword_t buf;
unsigned pos;
@@ -539,7 +545,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf)
return VFDI_RC_EINVAL;
}
- efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
@@ -584,7 +590,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
}
if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
++vf->rxq_count;
- efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
EFX_POPULATE_OWORD_6(reg,
@@ -628,7 +634,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf)
if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
++vf->txq_count;
mutex_unlock(&vf->txq_lock);
- efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
@@ -742,8 +748,8 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
vf_offset + index);
}
- efx_sriov_bufs(efx, vf->buftbl_base, NULL,
- EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
+ efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL,
+ EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
efx_vfdi_flush_clear(vf);
vf->evq0_count = 0;
@@ -754,6 +760,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
static int efx_vfdi_insert_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_req *req = vf->buf.addr;
unsigned vf_rxq = req->u.mac_filter.rxq;
unsigned flags;
@@ -776,17 +783,20 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf)
vf->rx_filter_qid = vf_rxq;
vf->rx_filtering = true;
- efx_sriov_reset_rx_filter(vf);
- queue_work(vfdi_workqueue, &efx->peer_work);
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
{
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+
vf->rx_filtering = false;
- efx_sriov_reset_rx_filter(vf);
- queue_work(vfdi_workqueue, &vf->efx->peer_work);
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
return VFDI_RC_SUCCESS;
}
@@ -794,6 +804,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
static int efx_vfdi_set_status_page(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_req *req = vf->buf.addr;
u64 page_count = req->u.set_status_page.peer_page_count;
u64 max_page_count =
@@ -809,7 +820,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
return VFDI_RC_EINVAL;
}
- mutex_lock(&efx->local_lock);
+ mutex_lock(&nic_data->local_lock);
mutex_lock(&vf->status_lock);
vf->status_addr = req->u.set_status_page.dma_addr;
@@ -828,9 +839,9 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
}
}
- __efx_sriov_push_vf_status(vf);
+ __efx_siena_sriov_push_vf_status(vf);
mutex_unlock(&vf->status_lock);
- mutex_unlock(&efx->local_lock);
+ mutex_unlock(&nic_data->local_lock);
return VFDI_RC_SUCCESS;
}
@@ -857,7 +868,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
};
-static void efx_sriov_vfdi(struct work_struct *work)
+static void efx_siena_sriov_vfdi(struct work_struct *work)
{
struct efx_vf *vf = container_of(work, struct efx_vf, req);
struct efx_nic *efx = vf->efx;
@@ -872,7 +883,7 @@ static void efx_sriov_vfdi(struct work_struct *work)
copy[0].to_rid = efx->pci_dev->devfn;
copy[0].to_addr = vf->buf.dma_addr;
copy[0].length = EFX_PAGE_SIZE;
- rc = efx_sriov_memcpy(efx, copy, 1);
+ rc = efx_siena_sriov_memcpy(efx, copy, 1);
if (rc) {
/* If we can't get the request, we can't reply to the caller */
if (net_ratelimit())
@@ -916,7 +927,7 @@ static void efx_sriov_vfdi(struct work_struct *work)
copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
copy[1].length = sizeof(req->op);
- (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+ (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
}
@@ -925,7 +936,8 @@ static void efx_sriov_vfdi(struct work_struct *work)
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
-static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
+static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+ struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
struct efx_memcpy_req copy_req[4];
@@ -961,7 +973,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
copy_req[k].to_addr = vf->evq0_addrs[pos + k];
copy_req[k].length = EFX_PAGE_SIZE;
}
- rc = efx_sriov_memcpy(efx, copy_req, count);
+ rc = efx_siena_sriov_memcpy(efx, copy_req, count);
if (rc) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
@@ -974,7 +986,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
/* Reinitialise, arm and trigger evq0 */
abs_evq = abs_index(vf, 0);
buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
- efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
+ efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
@@ -992,19 +1004,19 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
mutex_unlock(&vf->status_lock);
}
-static void efx_sriov_reset_vf_work(struct work_struct *work)
+static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
{
struct efx_vf *vf = container_of(work, struct efx_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
- efx_sriov_reset_vf(vf, &buf);
+ efx_siena_sriov_reset_vf(vf, &buf);
efx_nic_free_buffer(efx, &buf);
}
}
-static void efx_sriov_handle_no_channel(struct efx_nic *efx)
+static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx)
{
netif_err(efx, drv, efx->net_dev,
"ERROR: IOV requires MSI-X and 1 additional interrupt"
@@ -1012,35 +1024,38 @@ static void efx_sriov_handle_no_channel(struct efx_nic *efx)
efx->vf_count = 0;
}
-static int efx_sriov_probe_channel(struct efx_channel *channel)
+static int efx_siena_sriov_probe_channel(struct efx_channel *channel)
{
- channel->efx->vfdi_channel = channel;
+ struct siena_nic_data *nic_data = channel->efx->nic_data;
+ nic_data->vfdi_channel = channel;
+
return 0;
}
static void
-efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+efx_siena_sriov_get_channel_name(struct efx_channel *channel,
+ char *buf, size_t len)
{
snprintf(buf, len, "%s-iov", channel->efx->name);
}
-static const struct efx_channel_type efx_sriov_channel_type = {
- .handle_no_channel = efx_sriov_handle_no_channel,
- .pre_probe = efx_sriov_probe_channel,
+static const struct efx_channel_type efx_siena_sriov_channel_type = {
+ .handle_no_channel = efx_siena_sriov_handle_no_channel,
+ .pre_probe = efx_siena_sriov_probe_channel,
.post_remove = efx_channel_dummy_op_void,
- .get_name = efx_sriov_get_channel_name,
+ .get_name = efx_siena_sriov_get_channel_name,
/* no copy operation; channel must not be reallocated */
.keep_eventq = true,
};
-void efx_sriov_probe(struct efx_nic *efx)
+void efx_siena_sriov_probe(struct efx_nic *efx)
{
unsigned count;
if (!max_vfs)
return;
- if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
+ if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
return;
if (count > 0 && count > max_vfs)
count = max_vfs;
@@ -1048,17 +1063,20 @@ void efx_sriov_probe(struct efx_nic *efx)
/* efx_nic_dimension_resources() will reduce vf_count as appopriate */
efx->vf_count = count;
- efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type;
}
/* Copy the list of individual addresses into the vfdi_status.peers
* array and auxillary pages, protected by %local_lock. Drop that lock
* and then broadcast the address list to every VF.
*/
-static void efx_sriov_peer_work(struct work_struct *data)
+static void efx_siena_sriov_peer_work(struct work_struct *data)
{
- struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
- struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
+ struct siena_nic_data *nic_data = container_of(data,
+ struct siena_nic_data,
+ peer_work);
+ struct efx_nic *efx = nic_data->efx;
+ struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
struct efx_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
@@ -1068,11 +1086,11 @@ static void efx_sriov_peer_work(struct work_struct *data)
unsigned int peer_count;
unsigned int pos;
- mutex_lock(&efx->local_lock);
+ mutex_lock(&nic_data->local_lock);
/* Move the existing peer pages off %local_page_list */
INIT_LIST_HEAD(&pages);
- list_splice_tail_init(&efx->local_page_list, &pages);
+ list_splice_tail_init(&nic_data->local_page_list, &pages);
/* Populate the VF addresses starting from entry 1 (entry 0 is
* the PF address)
@@ -1094,7 +1112,7 @@ static void efx_sriov_peer_work(struct work_struct *data)
}
/* Fill the remaining addresses */
- list_for_each_entry(local_addr, &efx->local_addr_list, link) {
+ list_for_each_entry(local_addr, &nic_data->local_addr_list, link) {
ether_addr_copy(peer->mac_addr, local_addr->addr);
peer->tci = 0;
++peer;
@@ -1117,13 +1135,13 @@ static void efx_sriov_peer_work(struct work_struct *data)
list_del(&epp->link);
}
- list_add_tail(&epp->link, &efx->local_page_list);
+ list_add_tail(&epp->link, &nic_data->local_page_list);
peer = (struct vfdi_endpoint *)epp->ptr;
peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
}
}
vfdi_status->peer_count = peer_count;
- mutex_unlock(&efx->local_lock);
+ mutex_unlock(&nic_data->local_lock);
/* Free any now unused endpoint pages */
while (!list_empty(&pages)) {
@@ -1141,25 +1159,26 @@ static void efx_sriov_peer_work(struct work_struct *data)
mutex_lock(&vf->status_lock);
if (vf->status_addr)
- __efx_sriov_push_vf_status(vf);
+ __efx_siena_sriov_push_vf_status(vf);
mutex_unlock(&vf->status_lock);
}
}
-static void efx_sriov_free_local(struct efx_nic *efx)
+static void efx_siena_sriov_free_local(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
struct efx_local_addr *local_addr;
struct efx_endpoint_page *epp;
- while (!list_empty(&efx->local_addr_list)) {
- local_addr = list_first_entry(&efx->local_addr_list,
+ while (!list_empty(&nic_data->local_addr_list)) {
+ local_addr = list_first_entry(&nic_data->local_addr_list,
struct efx_local_addr, link);
list_del(&local_addr->link);
kfree(local_addr);
}
- while (!list_empty(&efx->local_page_list)) {
- epp = list_first_entry(&efx->local_page_list,
+ while (!list_empty(&nic_data->local_page_list)) {
+ epp = list_first_entry(&nic_data->local_page_list,
struct efx_endpoint_page, link);
list_del(&epp->link);
dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
@@ -1168,7 +1187,7 @@ static void efx_sriov_free_local(struct efx_nic *efx)
}
}
-static int efx_sriov_vf_alloc(struct efx_nic *efx)
+static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
struct efx_vf *vf;
@@ -1185,8 +1204,8 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx)
vf->rx_filter_id = -1;
vf->tx_filter_mode = VF_TX_FILTER_AUTO;
vf->tx_filter_id = -1;
- INIT_WORK(&vf->req, efx_sriov_vfdi);
- INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
+ INIT_WORK(&vf->req, efx_siena_sriov_vfdi);
+ INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work);
init_waitqueue_head(&vf->flush_waitq);
mutex_init(&vf->status_lock);
mutex_init(&vf->txq_lock);
@@ -1195,7 +1214,7 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx)
return 0;
}
-static void efx_sriov_vfs_fini(struct efx_nic *efx)
+static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
{
struct efx_vf *vf;
unsigned int pos;
@@ -1212,9 +1231,10 @@ static void efx_sriov_vfs_fini(struct efx_nic *efx)
}
}
-static int efx_sriov_vfs_init(struct efx_nic *efx)
+static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
struct efx_vf *vf;
@@ -1227,7 +1247,7 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
- buftbl_base = efx->vf_buftbl_base;
+ buftbl_base = nic_data->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
vf = efx->vf + index;
@@ -1253,13 +1273,14 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
return 0;
fail:
- efx_sriov_vfs_fini(efx);
+ efx_siena_sriov_vfs_fini(efx);
return rc;
}
-int efx_sriov_init(struct efx_nic *efx)
+int efx_siena_sriov_init(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
+ struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_status *vfdi_status;
int rc;
@@ -1271,15 +1292,15 @@ int efx_sriov_init(struct efx_nic *efx)
if (efx->vf_count == 0)
return 0;
- rc = efx_sriov_cmd(efx, true, NULL, NULL);
+ rc = efx_siena_sriov_cmd(efx, true, NULL, NULL);
if (rc)
goto fail_cmd;
- rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
- GFP_KERNEL);
+ rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status,
+ sizeof(*vfdi_status), GFP_KERNEL);
if (rc)
goto fail_status;
- vfdi_status = efx->vfdi_status.addr;
+ vfdi_status = nic_data->vfdi_status.addr;
memset(vfdi_status, 0, sizeof(*vfdi_status));
vfdi_status->version = 1;
vfdi_status->length = sizeof(*vfdi_status);
@@ -1289,16 +1310,16 @@ int efx_sriov_init(struct efx_nic *efx)
vfdi_status->peer_count = 1 + efx->vf_count;
vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
- rc = efx_sriov_vf_alloc(efx);
+ rc = efx_siena_sriov_vf_alloc(efx);
if (rc)
goto fail_alloc;
- mutex_init(&efx->local_lock);
- INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
- INIT_LIST_HEAD(&efx->local_addr_list);
- INIT_LIST_HEAD(&efx->local_page_list);
+ mutex_init(&nic_data->local_lock);
+ INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work);
+ INIT_LIST_HEAD(&nic_data->local_addr_list);
+ INIT_LIST_HEAD(&nic_data->local_page_list);
- rc = efx_sriov_vfs_init(efx);
+ rc = efx_siena_sriov_vfs_init(efx);
if (rc)
goto fail_vfs;
@@ -1307,7 +1328,7 @@ int efx_sriov_init(struct efx_nic *efx)
efx->vf_init_count = efx->vf_count;
rtnl_unlock();
- efx_sriov_usrev(efx, true);
+ efx_siena_sriov_usrev(efx, true);
/* At this point we must be ready to accept VFDI requests */
@@ -1321,34 +1342,35 @@ int efx_sriov_init(struct efx_nic *efx)
return 0;
fail_pci:
- efx_sriov_usrev(efx, false);
+ efx_siena_sriov_usrev(efx, false);
rtnl_lock();
efx->vf_init_count = 0;
rtnl_unlock();
- efx_sriov_vfs_fini(efx);
+ efx_siena_sriov_vfs_fini(efx);
fail_vfs:
- cancel_work_sync(&efx->peer_work);
- efx_sriov_free_local(efx);
+ cancel_work_sync(&nic_data->peer_work);
+ efx_siena_sriov_free_local(efx);
kfree(efx->vf);
fail_alloc:
- efx_nic_free_buffer(efx, &efx->vfdi_status);
+ efx_nic_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
- efx_sriov_cmd(efx, false, NULL, NULL);
+ efx_siena_sriov_cmd(efx, false, NULL, NULL);
fail_cmd:
return rc;
}
-void efx_sriov_fini(struct efx_nic *efx)
+void efx_siena_sriov_fini(struct efx_nic *efx)
{
struct efx_vf *vf;
unsigned int pos;
+ struct siena_nic_data *nic_data = efx->nic_data;
if (efx->vf_init_count == 0)
return;
/* Disable all interfaces to reconfiguration */
- BUG_ON(efx->vfdi_channel->enabled);
- efx_sriov_usrev(efx, false);
+ BUG_ON(nic_data->vfdi_channel->enabled);
+ efx_siena_sriov_usrev(efx, false);
rtnl_lock();
efx->vf_init_count = 0;
rtnl_unlock();
@@ -1359,19 +1381,19 @@ void efx_sriov_fini(struct efx_nic *efx)
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
- cancel_work_sync(&efx->peer_work);
+ cancel_work_sync(&nic_data->peer_work);
pci_disable_sriov(efx->pci_dev);
/* Tear down back-end state */
- efx_sriov_vfs_fini(efx);
- efx_sriov_free_local(efx);
+ efx_siena_sriov_vfs_fini(efx);
+ efx_siena_sriov_free_local(efx);
kfree(efx->vf);
- efx_nic_free_buffer(efx, &efx->vfdi_status);
- efx_sriov_cmd(efx, false, NULL, NULL);
+ efx_nic_free_buffer(efx, &nic_data->vfdi_status);
+ efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
-void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_vf *vf;
@@ -1428,7 +1450,7 @@ error:
vf->req_seqno = seq + 1;
}
-void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
struct efx_vf *vf;
@@ -1445,18 +1467,19 @@ void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
vf->evq0_count = 0;
}
-void efx_sriov_mac_address_changed(struct efx_nic *efx)
+void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
{
- struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
if (!efx->vf_init_count)
return;
ether_addr_copy(vfdi_status->peers[0].mac_addr,
efx->net_dev->dev_addr);
- queue_work(vfdi_workqueue, &efx->peer_work);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
}
-void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_vf *vf;
unsigned queue, qid;
@@ -1475,7 +1498,7 @@ void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
wake_up(&vf->flush_waitq);
}
-void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_vf *vf;
unsigned ev_failed, queue, qid;
@@ -1500,7 +1523,7 @@ void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
}
/* Called from napi. Schedule the reset work item */
-void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
struct efx_vf *vf;
unsigned int rel;
@@ -1516,7 +1539,7 @@ void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
}
/* Reset all VFs */
-void efx_sriov_reset(struct efx_nic *efx)
+void efx_siena_sriov_reset(struct efx_nic *efx)
{
unsigned int vf_i;
struct efx_buffer buf;
@@ -1527,15 +1550,15 @@ void efx_sriov_reset(struct efx_nic *efx)
if (efx->vf_init_count == 0)
return;
- efx_sriov_usrev(efx, true);
- (void)efx_sriov_cmd(efx, true, NULL, NULL);
+ efx_siena_sriov_usrev(efx, true);
+ (void)efx_siena_sriov_cmd(efx, true, NULL, NULL);
if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
vf = efx->vf + vf_i;
- efx_sriov_reset_vf(vf, &buf);
+ efx_siena_sriov_reset_vf(vf, &buf);
}
efx_nic_free_buffer(efx, &buf);
@@ -1543,8 +1566,8 @@ void efx_sriov_reset(struct efx_nic *efx)
int efx_init_sriov(void)
{
- /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
- * efx_sriov_peer_work() spend almost all their time sleeping for
+ /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and
+ * efx_siena_sriov_peer_work() spend almost all their time sleeping for
* MCDI to complete anyway
*/
vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
@@ -1559,7 +1582,7 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
@@ -1570,14 +1593,14 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
mutex_lock(&vf->status_lock);
ether_addr_copy(vf->addr.mac_addr, mac);
- __efx_sriov_update_vf_addr(vf);
+ __efx_siena_sriov_update_vf_addr(vf);
mutex_unlock(&vf->status_lock);
return 0;
}
-int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
- u16 vlan, u8 qos)
+int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+ u16 vlan, u8 qos)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
@@ -1590,14 +1613,14 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
vf->addr.tci = htons(tci);
- __efx_sriov_update_vf_addr(vf);
+ __efx_siena_sriov_update_vf_addr(vf);
mutex_unlock(&vf->status_lock);
return 0;
}
-int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
- bool spoofchk)
+int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
@@ -1620,8 +1643,8 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
return rc;
}
-int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
- struct ifla_vf_info *ivi)
+int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 5564a5fa3385..5eac523b4b0c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -870,7 +870,6 @@ static struct platform_driver meth_driver = {
.remove = __exit_p(meth_remove),
.driver = {
.name = "meth",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 753630f5d3d3..9468e64e6007 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_SMSC
bool "SMC (SMSC)/Western Digital devices"
default y
depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
- BLACKFIN || MN10300 || COLDFIRE || XTENSA || PCI || PCMCIA
+ BLACKFIN || MN10300 || COLDFIRE || XTENSA || NIOS2 || PCI || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -39,7 +39,7 @@ config SMC91X
select CRC32
select MII
depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
- MN10300 || COLDFIRE || ARM64 || XTENSA)
+ MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) && (!OF || GPIOLIB)
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index e88df9c7f1c0..bd64eb982e52 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2173,7 +2173,6 @@ static struct platform_driver smc911x_driver = {
.resume = smc911x_drv_resume,
.driver = {
.name = CARDNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 6cc3cf6f17c8..88a55f95fe09 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2472,7 +2472,6 @@ static struct platform_driver smc_driver = {
.remove = smc_drv_remove,
.driver = {
.name = CARDNAME,
- .owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
.of_match_table = of_match_ptr(smc91x_match),
},
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 77ed74561e5f..2965c6ae7d6e 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -59,6 +59,8 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
+
#include "smsc911x.h"
#define SMSC_CHIPNAME "smsc911x"
@@ -2338,6 +2340,9 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
free_netdev(dev);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
@@ -2491,6 +2496,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (pdata->config.shift)
pdata->ops = &shifted_smsc911x_ops;
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
retval = smsc911x_init(dev);
if (retval < 0)
goto out_disable_resources;
@@ -2572,6 +2580,8 @@ out_unregister_netdev_5:
out_free_irq:
free_irq(dev->irq, dev);
out_disable_resources:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
(void)smsc911x_disable_resources(pdev);
out_enable_resources_fail:
smsc911x_free_resources(pdev);
@@ -2649,7 +2659,6 @@ static struct platform_driver smsc911x_driver = {
.remove = smsc911x_drv_remove,
.driver = {
.name = SMSC_CHIPNAME,
- .owner = THIS_MODULE,
.pm = SMSC911X_PM_OPS,
.of_match_table = of_match_ptr(smsc911x_dt_ids),
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index b02d4a3ffa37..7d3af190be55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -14,62 +14,20 @@ config STMMAC_ETH
if STMMAC_ETH
config STMMAC_PLATFORM
- bool "STMMAC Platform bus support"
+ tristate "STMMAC Platform bus support"
depends on STMMAC_ETH
default y
---help---
- This selects the platform specific bus support for
- the stmmac device driver. This is the driver used
- on many embedded STM platforms based on ARM and SuperH
- processors.
+ This selects the platform specific bus support for the stmmac driver.
+ This is the driver used on several SoCs:
+ STi, Allwinner, Amlogic Meson, Altera SOCFPGA.
+
If you have a controller with this interface, say Y or M here.
If unsure, say N.
-config DWMAC_MESON
- bool "Amlogic Meson dwmac support"
- depends on STMMAC_PLATFORM && ARCH_MESON
- help
- Support for Ethernet controller on Amlogic Meson SoCs.
-
- This selects the Amlogic Meson SoC glue layer support for
- the stmmac device driver. This driver is used for Meson6 and
- Meson8 SoCs.
-
-config DWMAC_SOCFPGA
- bool "SOCFPGA dwmac support"
- depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST)
- help
- Support for ethernet controller on Altera SOCFPGA
-
- This selects the Altera SOCFPGA SoC glue layer support
- for the stmmac device driver. This driver is used for
- arria5 and cyclone5 FPGA SoCs.
-
-config DWMAC_SUNXI
- bool "Allwinner GMAC support"
- depends on STMMAC_PLATFORM && ARCH_SUNXI
- default y
- ---help---
- Support for Allwinner A20/A31 GMAC ethernet controllers.
-
- This selects Allwinner SoC glue layer support for the
- stmmac device driver. This driver is used for A20/A31
- GMAC ethernet controller.
-
-config DWMAC_STI
- bool "STi GMAC support"
- depends on STMMAC_PLATFORM && ARCH_STI
- default y
- ---help---
- Support for ethernet controller on STi SOCs.
-
- This selects STi SoC glue layer support for the stmmac
- device driver. This driver is used on for the STi series
- SOCs GMAC ethernet controller.
-
config STMMAC_PCI
- bool "STMMAC PCI bus support"
+ tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
---help---
This is to select the Synopsys DWMAC available on PCI devices,
@@ -79,22 +37,4 @@ config STMMAC_PCI
D1215994A VIRTEX FPGA board.
If unsure, say N.
-
-config STMMAC_DEBUG_FS
- bool "Enable monitoring via sysFS "
- default n
- depends on STMMAC_ETH && DEBUG_FS
- ---help---
- The stmmac entry in /sys reports DMA TX/RX rings
- or (if supported) the HW cap register.
-
-config STMMAC_DA
- bool "STMMAC DMA arbitration scheme"
- default n
- ---help---
- Selecting this option, rx has priority over Tx (only for Giga
- Ethernet device).
- By default, the DMA arbitration scheme is based on Round-robin
- (rx:tx priority is 1:1).
-
endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 0533d0ba783d..ac4d5629d905 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,11 +1,12 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
-stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
-stmmac-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
-stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
-stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
-stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
- chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
- dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
+ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
+
+obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
+stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
+ dwmac-sti.o dwmac-socfpga.o
+
+obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 593e6c4144a7..cd77289c3cfe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -44,6 +44,7 @@
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
+/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
unsigned long tx_underflow ____cacheline_aligned;
@@ -220,6 +221,7 @@ enum dma_irq_status {
handle_tx = 0x8,
};
+/* EEE and LPI defines */
#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
@@ -229,6 +231,7 @@ enum dma_irq_status {
#define CORE_PCS_LINK_STATUS (1 << 6)
#define CORE_RGMII_IRQ (1 << 7)
+/* Physical Coding Sublayer */
struct rgmii_adv {
unsigned int pause;
unsigned int duplex;
@@ -294,6 +297,7 @@ struct dma_features {
#define JUMBO_LEN 9000
+/* Descriptors helpers */
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
@@ -341,6 +345,10 @@ struct stmmac_desc_ops {
int (*get_rx_timestamp_status) (void *desc, u32 ats);
};
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+/* Specific DMA helpers */
struct stmmac_dma_ops {
/* DMA core initialization */
int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
@@ -370,6 +378,7 @@ struct stmmac_dma_ops {
struct mac_device_info;
+/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, int mtu);
@@ -400,6 +409,7 @@ struct stmmac_ops {
void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv);
};
+/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
void (*config_sub_second_increment) (void __iomem *ioaddr);
@@ -410,6 +420,8 @@ struct stmmac_hwtimestamp {
u64(*get_systime) (void __iomem *ioaddr);
};
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+
struct mac_link {
int port;
int duplex;
@@ -421,6 +433,7 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
+/* Helpers to manage the descriptors for chain and ring modes */
struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index d225a603e604..cca028d632f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -18,6 +18,8 @@
#include <linux/platform_device.h>
#include <linux/stmmac.h>
+#include "stmmac_platform.h"
+
#define ETHMAC_SPEED_100 BIT(1)
struct meson_dwmac {
@@ -56,7 +58,7 @@ static void *meson6_dwmac_setup(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dwmac->reg))
- return dwmac->reg;
+ return ERR_CAST(dwmac->reg);
return dwmac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 3aad413e74b4..e97074cd5800 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -23,7 +23,9 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/stmmac.h>
+
#include "stmmac.h"
+#include "stmmac_platform.h"
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index ccfe7e510418..056b358b4a72 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -1,4 +1,4 @@
-/**
+/*
* dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
*
* Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
@@ -22,6 +22,8 @@
#include <linux/of.h>
#include <linux/of_net.h>
+#include "stmmac_platform.h"
+
#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
#define DWMAC_25MHZ 25000000
@@ -35,9 +37,8 @@
#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
-/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) */
-
-/**
+/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families)
+ *
* Below table summarizes the clock requirement and clock sources for
* supported phy interface modes with link speeds.
* ________________________________________________
@@ -76,9 +77,7 @@
#define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
#define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
-/* STiD127 register definitions */
-
-/**
+/* STiD127 register definitions
*-----------------------
* src |BIT(6)| BIT(7)|
*-----------------------
@@ -104,13 +103,13 @@
#define EN_MASK GENMASK(1, 1)
#define EN BIT(1)
-/**
+/*
* 3 bits [4:2]
* 000-GMII/MII
* 001-RGMII
* 010-SGMII
* 100-RMII
-*/
+ */
#define MII_PHY_SEL_MASK GENMASK(4, 2)
#define ETH_PHY_SEL_RMII BIT(4)
#define ETH_PHY_SEL_SGMII BIT(3)
@@ -310,16 +309,16 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
const char *rs;
- dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
err = of_property_read_string(np, "st,tx-retime-src", &rs);
- if (err < 0)
+ if (err < 0) {
dev_warn(dev, "Use internal clock source\n");
-
- if (!strcasecmp(rs, "clk_125"))
+ dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
+ } else if (!strcasecmp(rs, "clk_125")) {
dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
- else if (!strcasecmp(rs, "txclk"))
+ } else if (!strcasecmp(rs, "txclk")) {
dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
+ }
dwmac->speed = SPEED_1000;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 771cd15fca18..c5ea9ab75b03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -1,4 +1,4 @@
-/**
+/*
* dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer
*
* Copyright (C) 2013 Chen-Yu Tsai
@@ -22,6 +22,8 @@
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
+#include "stmmac_platform.h"
+
struct sunxi_priv_data {
int interface;
int clk_enabled;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 0c2058a69fd2..59d92e811750 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -70,10 +70,6 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
if (mb)
value |= DMA_BUS_MODE_MB;
-#ifdef CONFIG_STMMAC_DA
- value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
-#endif
-
if (atds)
value |= DMA_BUS_MODE_ATDS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c3c40650b309..c0a391983372 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -122,9 +122,7 @@ int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
+
int stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct net_device *ndev);
@@ -136,77 +134,4 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
-#ifdef CONFIG_STMMAC_PLATFORM
-#ifdef CONFIG_DWMAC_MESON
-extern const struct stmmac_of_data meson6_dwmac_data;
-#endif
-#ifdef CONFIG_DWMAC_SUNXI
-extern const struct stmmac_of_data sun7i_gmac_data;
-#endif
-#ifdef CONFIG_DWMAC_STI
-extern const struct stmmac_of_data stih4xx_dwmac_data;
-extern const struct stmmac_of_data stid127_dwmac_data;
-#endif
-#ifdef CONFIG_DWMAC_SOCFPGA
-extern const struct stmmac_of_data socfpga_gmac_data;
-#endif
-extern struct platform_driver stmmac_pltfr_driver;
-static inline int stmmac_register_platform(void)
-{
- int err;
-
- err = platform_driver_register(&stmmac_pltfr_driver);
- if (err)
- pr_err("stmmac: failed to register the platform driver\n");
-
- return err;
-}
-
-static inline void stmmac_unregister_platform(void)
-{
- platform_driver_unregister(&stmmac_pltfr_driver);
-}
-#else
-static inline int stmmac_register_platform(void)
-{
- pr_debug("stmmac: do not register the platf driver\n");
-
- return 0;
-}
-
-static inline void stmmac_unregister_platform(void)
-{
-}
-#endif /* CONFIG_STMMAC_PLATFORM */
-
-#ifdef CONFIG_STMMAC_PCI
-extern struct pci_driver stmmac_pci_driver;
-static inline int stmmac_register_pci(void)
-{
- int err;
-
- err = pci_register_driver(&stmmac_pci_driver);
- if (err)
- pr_err("stmmac: failed to register the PCI driver\n");
-
- return err;
-}
-
-static inline void stmmac_unregister_pci(void)
-{
- pci_unregister_driver(&stmmac_pci_driver);
-}
-#else
-static inline int stmmac_register_pci(void)
-{
- pr_debug("stmmac: do not register the PCI driver\n");
-
- return 0;
-}
-
-static inline void stmmac_unregister_pci(void)
-{
-}
-#endif /* CONFIG_STMMAC_PCI */
-
#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 3a08a1f78c73..771cda2a48b2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -696,7 +696,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
(ec->tx_max_coalesced_frames == 0))
return -EINVAL;
- if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) ||
+ if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
(ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
return -EINVAL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 18c46bb0f3bf..118a427d1942 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -44,10 +44,10 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/pinctrl/consumer.h>
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#endif /* CONFIG_STMMAC_DEBUG_FS */
+#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
@@ -116,7 +116,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static int stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(void);
#endif
@@ -125,8 +125,8 @@ static void stmmac_exit_fs(void);
/**
* stmmac_verify_args - verify the driver parameters.
- * Description: it verifies if some wrong parameter is passed to the driver.
- * Note that wrong parameters are replaced with the default values.
+ * Description: it checks the driver parameters and set a default in case of
+ * errors.
*/
static void stmmac_verify_args(void)
{
@@ -191,14 +191,8 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
static void print_pkt(unsigned char *buf, int len)
{
- int j;
- pr_debug("len = %d byte, buf addr: 0x%p", len, buf);
- for (j = 0; j < len; j++) {
- if ((j % 16) == 0)
- pr_debug("\n %03x:", j);
- pr_debug(" %02x", buf[j]);
- }
- pr_debug("\n");
+ pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
}
/* minimum number of free TX descriptors required to wake up TX process */
@@ -210,7 +204,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
}
/**
- * stmmac_hw_fix_mac_speed: callback for speed selection
+ * stmmac_hw_fix_mac_speed - callback for speed selection
* @priv: driver private structure
* Description: on some platforms (e.g. ST), some HW system configuraton
* registers have to be set according to the link speed negotiated.
@@ -224,9 +218,10 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
}
/**
- * stmmac_enable_eee_mode: Check and enter in LPI mode
+ * stmmac_enable_eee_mode - check and enter in LPI mode
* @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode for EEE.
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
*/
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
@@ -237,7 +232,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_disable_eee_mode: disable/exit from EEE
+ * stmmac_disable_eee_mode - disable and exit from LPI mode
* @priv: driver private structure
* Description: this function is to exit and disable EEE in case of
* LPI state is true. This is called by the xmit.
@@ -250,7 +245,7 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_eee_ctrl_timer: EEE TX SW timer.
+ * stmmac_eee_ctrl_timer - EEE TX SW timer.
* @arg : data hook
* Description:
* if there is no data transfer and if we are not in LPI state,
@@ -265,13 +260,12 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
}
/**
- * stmmac_eee_init: init EEE
+ * stmmac_eee_init - init EEE
* @priv: driver private structure
* Description:
- * If the EEE support has been enabled while configuring the driver,
- * if the GMAC actually supports the EEE (from the HW cap reg) and the
- * phy can also manage EEE, so enable the LPI state and start the timer
- * to verify if the tx path can enter in LPI state.
+ * if the GMAC supports the EEE (from the HW cap reg) and the phy device
+ * can also manage EEE, this function enable the LPI state and start related
+ * timer.
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
@@ -338,7 +332,7 @@ out:
return ret;
}
-/* stmmac_get_tx_hwtstamp: get HW TX timestamps
+/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @entry : descriptor index to be used.
* @skb : the socket buffer
@@ -380,7 +374,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
return;
}
-/* stmmac_get_rx_hwtstamp: get HW RX timestamps
+/* stmmac_get_rx_hwtstamp - get HW RX timestamps
* @priv: driver private structure
* @entry : descriptor index to be used.
* @skb : the socket buffer
@@ -636,11 +630,11 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
}
/**
- * stmmac_init_ptp: init PTP
+ * stmmac_init_ptp - init PTP
* @priv: driver private structure
- * Description: this is to verify if the HW supports the PTPv1 or v2.
+ * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
* This is done by looking at the HW cap. register.
- * Also it registers the ptp driver.
+ * This function also registers the ptp driver.
*/
static int stmmac_init_ptp(struct stmmac_priv *priv)
{
@@ -682,9 +676,13 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
}
/**
- * stmmac_adjust_link
+ * stmmac_adjust_link - adjusts the link parameters
* @dev: net device structure
- * Description: it adjusts the link parameters.
+ * Description: this is the helper called by the physical abstraction layer
+ * drivers to communicate the phy link status. According the speed and duplex
+ * this driver can invoke registered glue-logic as well.
+ * It also invoke the eee initialization because it could happen when switch
+ * on different networks (that are eee capable).
*/
static void stmmac_adjust_link(struct net_device *dev)
{
@@ -774,7 +772,7 @@ static void stmmac_adjust_link(struct net_device *dev)
}
/**
- * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported
+ * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
* @priv: driver private structure
* Description: this is to verify if the HW supports the PCS.
* Physical Coding Sublayer (PCS) interface that can be used when the MAC is
@@ -863,7 +861,7 @@ static int stmmac_init_phy(struct net_device *dev)
}
/**
- * stmmac_display_ring: display ring
+ * stmmac_display_ring - display ring
* @head: pointer to the head of the ring passed.
* @size: size of the ring.
* @extend_desc: to verify if extended descriptors are used.
@@ -931,7 +929,7 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
}
/**
- * stmmac_clear_descriptors: clear descriptors
+ * stmmac_clear_descriptors - clear descriptors
* @priv: driver private structure
* Description: this function is called to clear the tx and rx descriptors
* in case of both basic and extended descriptors are used.
@@ -963,6 +961,15 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
(i == txsize - 1));
}
+/**
+ * stmmac_init_rx_buffers - init the RX descriptor buffer.
+ * @priv: driver private structure
+ * @p: descriptor pointer
+ * @i: descriptor index
+ * @flags: gfp flag.
+ * Description: this function is called to allocate a receive buffer, perform
+ * the DMA mapping and init the descriptor.
+ */
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
int i, gfp_t flags)
{
@@ -1007,7 +1014,8 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
- * Description: this function initializes the DMA RX/TX descriptors
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
@@ -1144,6 +1152,14 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
}
}
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
@@ -1255,8 +1271,8 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
/**
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv: driver private structure
- * Description: it sets the DMA operation mode: tx/rx DMA thresholds
- * or Store-And-Forward capability.
+ * Description: it is used for configuring the DMA operation mode register in
+ * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
@@ -1277,9 +1293,9 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_tx_clean:
+ * stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
- * Description: it reclaims resources after transmission completes.
+ * Description: it reclaims the transmit resources after transmission completes.
*/
static void stmmac_tx_clean(struct stmmac_priv *priv)
{
@@ -1378,10 +1394,10 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
}
/**
- * stmmac_tx_err: irq tx error mng function
+ * stmmac_tx_err - to manage the tx error
* @priv: driver private structure
* Description: it cleans the descriptors and restarts the transmission
- * in case of errors.
+ * in case of transmission errors.
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
@@ -1409,12 +1425,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
}
/**
- * stmmac_dma_interrupt: DMA ISR
+ * stmmac_dma_interrupt - DMA ISR
* @priv: driver private structure
* Description: this is the DMA ISR. It is called by the main ISR.
- * It calls the dwmac dma routine to understand which type of interrupt
- * happened. In case of there is a Normal interrupt and either TX or RX
- * interrupt happened so the NAPI is scheduled.
+ * It calls the dwmac dma routine and schedule poll method in case of some
+ * work can be done.
*/
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
@@ -1457,6 +1472,12 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
pr_info(" No MAC Management Counters available\n");
}
+/**
+ * stmmac_get_synopsys_id - return the SYINID.
+ * @priv: driver private structure
+ * Description: this simple function is to decode and return the SYINID
+ * starting from the HW core register.
+ */
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
{
u32 hwid = priv->hw->synopsys_uid;
@@ -1475,11 +1496,11 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
}
/**
- * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors
+ * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
* @priv: driver private structure
* Description: select the Enhanced/Alternate or Normal descriptors.
- * In case of Enhanced/Alternate, it looks at the extended descriptors are
- * supported by the HW cap. register.
+ * In case of Enhanced/Alternate, it checks if the extended descriptors are
+ * supported by the HW capability register.
*/
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
@@ -1501,7 +1522,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_get_hw_features: get MAC capabilities from the HW cap. register.
+ * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
* @priv: driver private structure
* Description:
* new GMAC chip generations have a new register to indicate the
@@ -1559,7 +1580,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
}
/**
- * stmmac_check_ether_addr: check if the MAC addr is valid
+ * stmmac_check_ether_addr - check if the MAC addr is valid
* @priv: driver private structure
* Description:
* it is to verify if the MAC address is valid, in case of failures it
@@ -1578,7 +1599,7 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
}
/**
- * stmmac_init_dma_engine: DMA init.
+ * stmmac_init_dma_engine - DMA init.
* @priv: driver private structure
* Description:
* It inits the DMA invoking the specific MAC/GMAC callback.
@@ -1607,7 +1628,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
/**
- * stmmac_tx_timer: mitigation sw timer for tx.
+ * stmmac_tx_timer - mitigation sw timer for tx.
* @data: data pointer
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
@@ -1620,7 +1641,7 @@ static void stmmac_tx_timer(unsigned long data)
}
/**
- * stmmac_init_tx_coalesce: init tx mitigation options.
+ * stmmac_init_tx_coalesce - init tx mitigation options.
* @priv: driver private structure
* Description:
* This inits the transmit coalesce parameters: i.e. timer rate,
@@ -1639,10 +1660,13 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
}
/**
- * stmmac_hw_setup: setup mac in a usable state.
+ * stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
* Description:
- * This function sets up the ip in a usable state.
+ * this is the main function to setup the HW in a usable state because the
+ * dma engine is reset, the core registers are configured (e.g. AXI,
+ * Checksum features, timers). The DMA is ready to start receiving and
+ * transmitting.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
@@ -1688,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev)
if (ret && ret != -EOPNOTSUPP)
pr_warn("%s: failed PTP initialisation\n", __func__);
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
ret = stmmac_init_fs(dev);
if (ret < 0)
pr_warn("%s: failed debugFS registration\n", __func__);
@@ -1870,7 +1894,7 @@ static int stmmac_release(struct net_device *dev)
netif_carrier_off(dev);
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs();
#endif
@@ -1880,7 +1904,7 @@ static int stmmac_release(struct net_device *dev)
}
/**
- * stmmac_xmit: Tx entry point of the driver
+ * stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
* Description : this is the tx entry point of the driver.
@@ -2055,7 +2079,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
/**
- * stmmac_rx_refill: refill used skb preallocated buffers
+ * stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
* Description : this is to reallocate the skb for the reception process
* that is based on zero-copy.
@@ -2106,7 +2130,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
/**
- * stmmac_rx_refill: refill used skb preallocated buffers
+ * stmmac_rx - manage the receive process
* @priv: driver private structure
* @limit: napi bugget.
* Description : this the function called by the napi poll method.
@@ -2375,8 +2399,11 @@ static int stmmac_set_features(struct net_device *netdev,
* @irq: interrupt number.
* @dev_id: to pass the net device pointer.
* Description: this is the main driver interrupt service routine.
- * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
- * interrupts.
+ * It can call:
+ * o DMA service routine (to manage incoming frame reception and transmission
+ * status)
+ * o Core interrupts to manage: remote wake-up, management counter, LPI
+ * interrupts.
*/
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
@@ -2457,7 +2484,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static struct dentry *stmmac_fs_dir;
static struct dentry *stmmac_rings_status;
static struct dentry *stmmac_dma_cap;
@@ -2642,7 +2669,7 @@ static void stmmac_exit_fs(void)
debugfs_remove(stmmac_dma_cap);
debugfs_remove(stmmac_fs_dir);
}
-#endif /* CONFIG_STMMAC_DEBUG_FS */
+#endif /* CONFIG_DEBUG_FS */
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
@@ -2663,11 +2690,10 @@ static const struct net_device_ops stmmac_netdev_ops = {
/**
* stmmac_hw_init - Init the MAC device
* @priv: driver private structure
- * Description: this function detects which MAC device
- * (GMAC/MAC10-100) has to attached, checks the HW capability
- * (if supported) and sets the driver's features (for example
- * to use the ring or chaine mode or support the normal/enh
- * descriptor structure).
+ * Description: this function is to configure the MAC device according to
+ * some platform parameters or the HW capability register. It prepares the
+ * driver to use either ring or chain modes and to setup either enhanced or
+ * normal descriptors.
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
@@ -2891,6 +2917,7 @@ error_clk_get:
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
* stmmac_dvr_remove
@@ -2920,8 +2947,15 @@ int stmmac_dvr_remove(struct net_device *ndev)
return 0;
}
+EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
-#ifdef CONFIG_PM
+/**
+ * stmmac_suspend - suspend callback
+ * @ndev: net device pointer
+ * Description: this is the function to suspend the device and it is called
+ * by the platform driver to stop the network queue, release the resources,
+ * program the PMT register (for WoL), clean and release driver resources.
+ */
int stmmac_suspend(struct net_device *ndev)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -2963,7 +2997,14 @@ int stmmac_suspend(struct net_device *ndev)
priv->oldduplex = -1;
return 0;
}
+EXPORT_SYMBOL_GPL(stmmac_suspend);
+/**
+ * stmmac_resume - resume callback
+ * @ndev: net device pointer
+ * Description: when resume this function is invoked to setup the DMA and CORE
+ * in a usable state.
+ */
int stmmac_resume(struct net_device *ndev)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -3009,37 +3050,7 @@ int stmmac_resume(struct net_device *ndev)
return 0;
}
-#endif /* CONFIG_PM */
-
-/* Driver can be configured w/ and w/ both PCI and Platf drivers
- * depending on the configuration selected.
- */
-static int __init stmmac_init(void)
-{
- int ret;
-
- ret = stmmac_register_platform();
- if (ret)
- goto err;
- ret = stmmac_register_pci();
- if (ret)
- goto err_pci;
- return 0;
-err_pci:
- stmmac_unregister_platform();
-err:
- pr_err("stmmac: driver registration failed\n");
- return ret;
-}
-
-static void __exit stmmac_exit(void)
-{
- stmmac_unregister_platform();
- stmmac_unregister_pci();
-}
-
-module_init(stmmac_init);
-module_exit(stmmac_exit);
+EXPORT_SYMBOL_GPL(stmmac_resume);
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index e17a970eaf2b..054520d67de4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -26,34 +26,26 @@
#include <linux/pci.h>
#include "stmmac.h"
-static struct plat_stmmacenet_data plat_dat;
-static struct stmmac_mdio_bus_data mdio_data;
-static struct stmmac_dma_cfg dma_cfg;
-
-static void stmmac_default_data(void)
+static void stmmac_default_data(struct plat_stmmacenet_data *plat)
{
- memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
-
- plat_dat.bus_id = 1;
- plat_dat.phy_addr = 0;
- plat_dat.interface = PHY_INTERFACE_MODE_GMII;
- plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat_dat.has_gmac = 1;
- plat_dat.force_sf_dma_mode = 1;
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
- mdio_data.phy_reset = NULL;
- mdio_data.phy_mask = 0;
- plat_dat.mdio_bus_data = &mdio_data;
+ plat->mdio_bus_data->phy_reset = NULL;
+ plat->mdio_bus_data->phy_mask = 0;
- dma_cfg.pbl = 32;
- dma_cfg.burst_len = DMA_AXI_BLEN_256;
- plat_dat.dma_cfg = &dma_cfg;
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
/* Set default value for multicast hash bins */
- plat_dat.multicast_filter_bins = HASH_TABLE_SIZE;
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
/* Set default value for unicast filter entries */
- plat_dat.unicast_filter_entries = 1;
+ plat->unicast_filter_entries = 1;
}
/**
@@ -71,64 +63,61 @@ static void stmmac_default_data(void)
static int stmmac_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int ret = 0;
- void __iomem *addr = NULL;
- struct stmmac_priv *priv = NULL;
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_priv *priv;
int i;
+ int ret;
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
+ GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return -ENOMEM;
/* Enable pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
- pr_err("%s : ERROR: failed to enable %s device\n", __func__,
- pci_name(pdev));
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+ __func__);
return ret;
}
- if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
- pr_err("%s: ERROR: failed to get PCI region\n", __func__);
- ret = -ENODEV;
- goto err_out_req_reg_failed;
- }
/* Get the base address of device */
- for (i = 0; i <= 5; i++) {
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- addr = pci_iomap(pdev, i, 0);
- if (addr == NULL) {
- pr_err("%s: ERROR: cannot map register memory aborting",
- __func__);
- ret = -EIO;
- goto err_out_map_failed;
- }
+ ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
+ if (ret)
+ return ret;
break;
}
+
pci_set_master(pdev);
- stmmac_default_data();
+ stmmac_default_data(plat);
- priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
+ priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
if (IS_ERR(priv)) {
- pr_err("%s: main driver probe failed", __func__);
- ret = PTR_ERR(priv);
- goto err_out;
+ dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__);
+ return PTR_ERR(priv);
}
priv->dev->irq = pdev->irq;
priv->wol_irq = pdev->irq;
pci_set_drvdata(pdev, priv->dev);
- pr_debug("STMMAC platform driver registration completed");
+ dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n");
return 0;
-
-err_out:
- pci_clear_master(pdev);
-err_out_map_failed:
- pci_release_regions(pdev);
-err_out_req_reg_failed:
- pci_disable_device(pdev);
-
- return ret;
}
/**
@@ -141,39 +130,30 @@ err_out_req_reg_failed:
static void stmmac_pci_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
stmmac_dvr_remove(ndev);
-
- pci_iounmap(pdev, priv->ioaddr);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int stmmac_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *ndev = pci_get_drvdata(pdev);
- int ret;
-
- ret = stmmac_suspend(ndev);
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return ret;
+ return stmmac_suspend(ndev);
}
-static int stmmac_pci_resume(struct pci_dev *pdev)
+static int stmmac_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *ndev = pci_get_drvdata(pdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
return stmmac_resume(ndev);
}
#endif
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+
#define STMMAC_VENDOR_ID 0x700
#define STMMAC_DEVICE_ID 0x1108
@@ -185,17 +165,18 @@ static const struct pci_device_id stmmac_id_table[] = {
MODULE_DEVICE_TABLE(pci, stmmac_id_table);
-struct pci_driver stmmac_pci_driver = {
+static struct pci_driver stmmac_pci_driver = {
.name = STMMAC_RESOURCE_NAME,
.id_table = stmmac_id_table,
.probe = stmmac_pci_probe,
.remove = stmmac_pci_remove,
-#ifdef CONFIG_PM
- .suspend = stmmac_pci_suspend,
- .resume = stmmac_pci_resume,
-#endif
+ .driver = {
+ .pm = &stmmac_pm_ops,
+ },
};
+module_pci_driver(stmmac_pci_driver);
+
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 58a1a0a423d4..4032b170fe24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -27,25 +27,19 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+
#include "stmmac.h"
+#include "stmmac_platform.h"
static const struct of_device_id stmmac_dt_ids[] = {
-#ifdef CONFIG_DWMAC_MESON
+ /* SoC specific glue layers should come before generic bindings */
{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
-#endif
-#ifdef CONFIG_DWMAC_SUNXI
{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
-#endif
-#ifdef CONFIG_DWMAC_STI
{ .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
{ .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
{ .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
{ .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
-#endif
-#ifdef CONFIG_DWMAC_SOCFPGA
{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
-#endif
- /* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
@@ -57,7 +51,11 @@ MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
#ifdef CONFIG_OF
-/* This function validates the number of Multicast filtering bins specified
+/**
+ * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
+ * @mcast_bins: Multicast filtering bins
+ * Description:
+ * this function validates the number of Multicast filtering bins specified
* by the configuration through the device tree. The Synopsys GMAC supports
* 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
* number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
@@ -83,7 +81,11 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
return x;
}
-/* This function validates the number of Unicast address entries supported
+/**
+ * dwmac1000_validate_ucast_entries - validate the Unicast address entries
+ * @ucast_entries: number of Unicast address entries
+ * Description:
+ * This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
* supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
* logic. This function validates a valid, supported configuration is
@@ -109,6 +111,15 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
return x;
}
+/**
+ * stmmac_probe_config_dt - parse device-tree driver parameters
+ * @pdev: platform_device structure
+ * @plat: driver data platform structure
+ * @mac: MAC address to use
+ * Description:
+ * this function is to read the driver parameters from device-tree and
+ * set some private fields that will be used by the main at runtime.
+ */
static int stmmac_probe_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
const char **mac)
@@ -242,11 +253,11 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
#endif /* CONFIG_OF */
/**
- * stmmac_pltfr_probe
+ * stmmac_pltfr_probe - platform driver probe.
* @pdev: platform device pointer
- * Description: platform_device probe function. It allocates
- * the necessary resources and invokes the main to init
- * the net device, register the mdio bus etc.
+ * Description: platform_device probe function. It is to allocate
+ * the necessary platform resources, invoke custom helper (if required) and
+ * invoke the main probe function.
*/
static int stmmac_pltfr_probe(struct platform_device *pdev)
{
@@ -369,7 +380,14 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
return ret;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+/**
+ * stmmac_pltfr_suspend
+ * @dev: device pointer
+ * Description: this function is invoked when suspend the driver and it direcly
+ * call the main suspend function and then, if required, on some platform, it
+ * can call an exit helper.
+ */
static int stmmac_pltfr_suspend(struct device *dev)
{
int ret;
@@ -384,6 +402,13 @@ static int stmmac_pltfr_suspend(struct device *dev)
return ret;
}
+/**
+ * stmmac_pltfr_resume
+ * @dev: device pointer
+ * Description: this function is invoked when resume the driver before calling
+ * the main resume function, on some platforms, it can call own init helper
+ * if required.
+ */
static int stmmac_pltfr_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -395,13 +420,12 @@ static int stmmac_pltfr_resume(struct device *dev)
return stmmac_resume(ndev);
}
-
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
- stmmac_pltfr_suspend, stmmac_pltfr_resume);
+ stmmac_pltfr_suspend, stmmac_pltfr_resume);
-struct platform_driver stmmac_pltfr_driver = {
+static struct platform_driver stmmac_pltfr_driver = {
.probe = stmmac_pltfr_probe,
.remove = stmmac_pltfr_remove,
.driver = {
@@ -409,9 +433,11 @@ struct platform_driver stmmac_pltfr_driver = {
.owner = THIS_MODULE,
.pm = &stmmac_pltfr_pm_ops,
.of_match_table = of_match_ptr(stmmac_dt_ids),
- },
+ },
};
+module_platform_driver(stmmac_pltfr_driver);
+
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
new file mode 100644
index 000000000000..25dd1f7ace02
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -0,0 +1,28 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __STMMAC_PLATFORM_H__
+#define __STMMAC_PLATFORM_H__
+
+extern const struct stmmac_of_data meson6_dwmac_data;
+extern const struct stmmac_of_data sun7i_gmac_data;
+extern const struct stmmac_of_data stih4xx_dwmac_data;
+extern const struct stmmac_of_data stid127_dwmac_data;
+extern const struct stmmac_of_data socfpga_gmac_data;
+
+#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 02d370e58110..3dc1f68b322d 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -5179,8 +5179,7 @@ static void cas_remove_one(struct pci_dev *pdev)
cp = netdev_priv(dev);
unregister_netdev(dev);
- if (cp->fw_data)
- vfree(cp->fw_data);
+ vfree(cp->fw_data);
mutex_lock(&cp->pm_mutex);
cancel_work_sync(&cp->reset_task);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 904fd1ab5f6e..0c6416213837 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6651,13 +6651,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->len < ETH_ZLEN) {
- unsigned int pad_bytes = ETH_ZLEN - skb->len;
-
- if (skb_pad(skb, pad_bytes))
- goto out;
- skb_put(skb, pad_bytes);
- }
+ if (eth_skb_pad(skb))
+ goto out;
len = sizeof(struct tx_pkt_hdr) + 15;
if (skb_headroom(skb) < len) {
@@ -10185,7 +10180,6 @@ MODULE_DEVICE_TABLE(of, niu_match);
static struct platform_driver niu_of_driver = {
.driver = {
.name = "niu",
- .owner = THIS_MODULE,
.of_match_table = niu_match,
},
.probe = niu_of_probe,
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 206c1063815a..aa4f9d2d8fa9 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1273,7 +1273,6 @@ MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
static struct platform_driver bigmac_sbus_driver = {
.driver = {
.name = "sunbmac",
- .owner = THIS_MODULE,
.of_match_table = bigmac_sbus_match,
},
.probe = bigmac_sbus_probe,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 9c014803b03b..7a8ca2c7b7df 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -3323,7 +3323,6 @@ MODULE_DEVICE_TABLE(of, hme_sbus_match);
static struct platform_driver hme_sbus_driver = {
.driver = {
.name = "hme",
- .owner = THIS_MODULE,
.of_match_table = hme_sbus_match,
},
.probe = hme_sbus_probe,
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 5695ae2411de..9b825780b3be 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -963,7 +963,6 @@ MODULE_DEVICE_TABLE(of, qec_sbus_match);
static struct platform_driver qec_sbus_driver = {
.driver = {
.name = "qec",
- .owner = THIS_MODULE,
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3652afd3ec78..45c408ef67d0 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -15,12 +15,14 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/mutex.h>
+#include <linux/highmem.h>
#include <linux/if_vlan.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/icmpv6.h>
#endif
+#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
@@ -40,6 +42,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
+#define VNET_MAX_TXQS 16
+
/* Heuristic for the number of times to exponentially backoff and
* retry sending an LDC trigger when EAGAIN is encountered
*/
@@ -49,6 +53,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
/* Ordered from largest major to lowest */
static struct vio_version vnet_versions[] = {
+ { .major = 1, .minor = 8 },
+ { .major = 1, .minor = 7 },
{ .major = 1, .minor = 6 },
{ .major = 1, .minor = 0 },
};
@@ -71,13 +77,19 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg)
return -ECONNRESET;
}
+static int vnet_port_alloc_tx_ring(struct vnet_port *port);
+
static int vnet_send_attr(struct vio_driver_state *vio)
{
struct vnet_port *port = to_vnet_port(vio);
struct net_device *dev = port->vp->dev;
struct vio_net_attr_info pkt;
int framelen = ETH_FRAME_LEN;
- int i;
+ int i, err;
+
+ err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
+ if (err)
+ return err;
memset(&pkt, 0, sizeof(pkt));
pkt.tag.type = VIO_TYPE_CTRL;
@@ -108,8 +120,15 @@ static int vnet_send_attr(struct vio_driver_state *vio)
pkt.mtu = framelen + VLAN_HLEN;
}
- pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
pkt.cflags = 0;
+ if (vio_version_after_eq(vio, 1, 7) && port->tso) {
+ pkt.cflags |= VNET_LSO_IPV4_CAPAB;
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ pkt.ipv4_lso_maxlen = port->tsolen;
+ }
+
+ pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
"ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
@@ -163,6 +182,26 @@ static int handle_attr_info(struct vio_driver_state *vio,
}
port->rmtu = localmtu;
+ /* LSO negotiation */
+ if (vio_version_after_eq(vio, 1, 7))
+ port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
+ else
+ port->tso = false;
+ if (port->tso) {
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
+ if (port->tsolen < VNET_MINTSO) {
+ port->tso = false;
+ port->tsolen = 0;
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ }
+ pkt->ipv4_lso_maxlen = port->tsolen;
+ } else {
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ pkt->ipv4_lso_maxlen = 0;
+ }
+
/* for version >= 1.6, ACK packet mode we support */
if (vio_version_after_eq(vio, 1, 6)) {
pkt->xfer_mode = VIO_NEW_DRING_MODE;
@@ -274,10 +313,42 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
return skb;
}
-static int vnet_rx_one(struct vnet_port *port, unsigned int len,
- struct ldc_trans_cookie *cookies, int ncookies)
+static inline void vnet_fullcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ int offset = skb_transport_offset(skb);
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+ if (iph->protocol != IPPROTO_TCP &&
+ iph->protocol != IPPROTO_UDP)
+ return;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_level = 1;
+ skb->csum = 0;
+ if (iph->protocol == IPPROTO_TCP) {
+ struct tcphdr *ptcp = tcp_hdr(skb);
+
+ ptcp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ struct udphdr *pudp = udp_hdr(skb);
+
+ pudp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_UDP,
+ skb->csum);
+ }
+}
+
+static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
{
struct net_device *dev = port->vp->dev;
+ unsigned int len = desc->size;
unsigned int copy_len;
struct sk_buff *skb;
int err;
@@ -299,7 +370,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
skb_put(skb, copy_len);
err = ldc_copy(port->vio.lp, LDC_COPY_IN,
skb->data, copy_len, 0,
- cookies, ncookies);
+ desc->cookies, desc->ncookies);
if (unlikely(err < 0)) {
dev->stats.rx_frame_errors++;
goto out_free_skb;
@@ -309,11 +380,33 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
skb_trim(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ if (vio_version_after_eq(&port->vio, 1, 8)) {
+ struct vio_net_dext *dext = vio_net_ext(desc);
- netif_rx(skb);
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
+ if (skb->protocol == ETH_P_IP) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ iph->check = 0;
+ ip_send_check(iph);
+ }
+ }
+ if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
+ skb->ip_summed == CHECKSUM_NONE)
+ vnet_fullcsum(skb);
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_level = 0;
+ if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
+ skb->csum_level = 1;
+ }
+ }
+
+ skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ napi_gro_receive(&port->napi, skb);
return 0;
out_free_skb:
@@ -373,23 +466,6 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
return err;
}
-static u32 next_idx(u32 idx, struct vio_dring_state *dr)
-{
- if (++idx == dr->num_entries)
- idx = 0;
- return idx;
-}
-
-static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
-{
- if (idx == 0)
- idx = dr->num_entries - 1;
- else
- idx--;
-
- return idx;
-}
-
static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
struct vio_dring_state *dr,
u32 index)
@@ -430,6 +506,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
struct vio_driver_state *vio = &port->vio;
int err;
+ BUG_ON(desc == NULL);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -444,7 +521,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
desc->cookies[0].cookie_addr,
desc->cookies[0].cookie_size);
- err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
+ err = vnet_rx_one(port, desc);
if (err == -ECONNRESET)
return err;
desc->hdr.state = VIO_DESC_DONE;
@@ -456,12 +533,14 @@ static int vnet_walk_rx_one(struct vnet_port *port,
}
static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
- u32 start, u32 end)
+ u32 start, u32 end, int *npkts, int budget)
{
struct vio_driver_state *vio = &port->vio;
int ack_start = -1, ack_end = -1;
+ bool send_ack = true;
- end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
+ end = (end == (u32) -1) ? vio_dring_prev(dr, start)
+ : vio_dring_next(dr, end);
viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
@@ -471,10 +550,11 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
return err;
if (err != 0)
break;
+ (*npkts)++;
if (ack_start == -1)
ack_start = start;
ack_end = start;
- start = next_idx(start, dr);
+ start = vio_dring_next(dr, start);
if (ack && start != end) {
err = vnet_send_ack(port, dr, ack_start, ack_end,
VIO_DRING_ACTIVE);
@@ -482,13 +562,26 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
return err;
ack_start = -1;
}
+ if ((*npkts) >= budget) {
+ send_ack = false;
+ break;
+ }
}
if (unlikely(ack_start == -1))
- ack_start = ack_end = prev_idx(start, dr);
- return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
+ ack_start = ack_end = vio_dring_prev(dr, start);
+ if (send_ack) {
+ port->napi_resume = false;
+ return vnet_send_ack(port, dr, ack_start, ack_end,
+ VIO_DRING_STOPPED);
+ } else {
+ port->napi_resume = true;
+ port->napi_stop_idx = ack_end;
+ return 1;
+ }
}
-static int vnet_rx(struct vnet_port *port, void *msgbuf)
+static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
+ int budget)
{
struct vio_dring_data *pkt = msgbuf;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
@@ -505,11 +598,13 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf)
return 0;
}
- dr->rcv_nxt++;
+ if (!port->napi_resume)
+ dr->rcv_nxt++;
/* XXX Validate pkt->start_idx and pkt->end_idx XXX */
- return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
+ return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
+ npkts, budget);
}
static int idx_is_pending(struct vio_dring_state *dr, u32 end)
@@ -522,7 +617,7 @@ static int idx_is_pending(struct vio_dring_state *dr, u32 end)
found = 1;
break;
}
- idx = next_idx(idx, dr);
+ idx = vio_dring_next(dr, idx);
}
return found;
}
@@ -535,19 +630,26 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
struct vnet *vp;
u32 end;
struct vio_net_desc *desc;
+ struct netdev_queue *txq;
+
if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
return 0;
end = pkt->end_idx;
- if (unlikely(!idx_is_pending(dr, end)))
+ vp = port->vp;
+ dev = vp->dev;
+ netif_tx_lock(dev);
+ if (unlikely(!idx_is_pending(dr, end))) {
+ netif_tx_unlock(dev);
return 0;
+ }
/* sync for race conditions with vnet_start_xmit() and tell xmit it
* is time to send a trigger.
*/
- dr->cons = next_idx(end, dr);
+ dr->cons = vio_dring_next(dr, end);
desc = vio_dring_entry(dr, dr->cons);
- if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
+ if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
/* vnet_start_xmit() just populated this dring but missed
* sending the "start" LDC message to the consumer.
* Send a "start" trigger on its behalf.
@@ -559,11 +661,10 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
} else {
port->start_cons = true;
}
+ netif_tx_unlock(dev);
-
- vp = port->vp;
- dev = vp->dev;
- if (unlikely(netif_queue_stopped(dev) &&
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ if (unlikely(netif_tx_queue_stopped(txq) &&
vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
return 1;
@@ -591,58 +692,64 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
return 0;
}
-static void maybe_tx_wakeup(unsigned long param)
+/* Got back a STOPPED LDC message on port. If the queue is stopped,
+ * wake it up so that we'll send out another START message at the
+ * next TX.
+ */
+static void maybe_tx_wakeup(struct vnet_port *port)
{
- struct vnet *vp = (struct vnet *)param;
- struct net_device *dev = vp->dev;
-
- netif_tx_lock(dev);
- if (likely(netif_queue_stopped(dev))) {
- struct vnet_port *port;
- int wake = 1;
+ struct netdev_queue *txq;
- list_for_each_entry(port, &vp->port_list, list) {
- struct vio_dring_state *dr;
+ txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
+ __netif_tx_lock(txq, smp_processor_id());
+ if (likely(netif_tx_queue_stopped(txq))) {
+ struct vio_dring_state *dr;
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (vnet_tx_dring_avail(dr) <
- VNET_TX_WAKEUP_THRESH(dr)) {
- wake = 0;
- break;
- }
- }
- if (wake)
- netif_wake_queue(dev);
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ netif_tx_wake_queue(txq);
}
- netif_tx_unlock(dev);
+ __netif_tx_unlock(txq);
}
-static void vnet_event(void *arg, int event)
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+ struct vio_driver_state *vio = &vnet->vio;
+
+ return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
+static int vnet_event_napi(struct vnet_port *port, int budget)
{
- struct vnet_port *port = arg;
struct vio_driver_state *vio = &port->vio;
- unsigned long flags;
int tx_wakeup, err;
+ int npkts = 0;
+ int event = (port->rx_event & LDC_EVENT_RESET);
- spin_lock_irqsave(&vio->lock, flags);
-
+ldc_ctrl:
if (unlikely(event == LDC_EVENT_RESET ||
event == LDC_EVENT_UP)) {
vio_link_state_change(vio, event);
- spin_unlock_irqrestore(&vio->lock, flags);
if (event == LDC_EVENT_RESET) {
port->rmtu = 0;
+ port->tso = true;
+ port->tsolen = 0;
vio_port_up(vio);
}
- return;
+ port->rx_event = 0;
+ return 0;
}
+ /* We may have multiple LDC events in rx_event. Unroll send_events() */
+ event = (port->rx_event & LDC_EVENT_UP);
+ port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
+ if (event == LDC_EVENT_UP)
+ goto ldc_ctrl;
+ event = port->rx_event;
+ if (!(event & LDC_EVENT_DATA_READY))
+ return 0;
- if (unlikely(event != LDC_EVENT_DATA_READY)) {
- pr_warn("Unexpected LDC event %d\n", event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
- }
+ /* we dont expect any other bits than RESET, UP, DATA_READY */
+ BUG_ON(event != LDC_EVENT_DATA_READY);
tx_wakeup = err = 0;
while (1) {
@@ -651,6 +758,20 @@ static void vnet_event(void *arg, int event)
u64 raw[8];
} msgbuf;
+ if (port->napi_resume) {
+ struct vio_dring_data *pkt =
+ (struct vio_dring_data *)&msgbuf;
+ struct vio_dring_state *dr =
+ &port->vio.drings[VIO_DRIVER_RX_RING];
+
+ pkt->tag.type = VIO_TYPE_DATA;
+ pkt->tag.stype = VIO_SUBTYPE_INFO;
+ pkt->tag.stype_env = VIO_DRING_DATA;
+ pkt->seq = dr->rcv_nxt;
+ pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
+ pkt->end_idx = -1;
+ goto napi_resume;
+ }
err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
@@ -667,10 +788,22 @@ static void vnet_event(void *arg, int event)
err = vio_validate_sid(vio, &msgbuf.tag);
if (err < 0)
break;
-
+napi_resume:
if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
- err = vnet_rx(port, &msgbuf);
+ if (!port_is_up(port)) {
+ /* failures like handshake_failure()
+ * may have cleaned up dring, but
+ * NAPI polling may bring us here.
+ */
+ err = -ECONNRESET;
+ break;
+ }
+ err = vnet_rx(port, &msgbuf, &npkts, budget);
+ if (npkts >= budget)
+ break;
+ if (npkts == 0)
+ break;
} else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
err = vnet_ack(port, &msgbuf);
if (err > 0)
@@ -691,15 +824,34 @@ static void vnet_event(void *arg, int event)
if (err == -ECONNRESET)
break;
}
- spin_unlock(&vio->lock);
- /* Kick off a tasklet to wake the queue. We cannot call
- * maybe_tx_wakeup directly here because we could deadlock on
- * netif_tx_lock() with dev_watchdog()
- */
if (unlikely(tx_wakeup && err != -ECONNRESET))
- tasklet_schedule(&port->vp->vnet_tx_wakeup);
+ maybe_tx_wakeup(port);
+ return npkts;
+}
+
+static int vnet_poll(struct napi_struct *napi, int budget)
+{
+ struct vnet_port *port = container_of(napi, struct vnet_port, napi);
+ struct vio_driver_state *vio = &port->vio;
+ int processed = vnet_event_napi(port, budget);
+
+ if (processed < budget) {
+ napi_complete(napi);
+ port->rx_event &= ~LDC_EVENT_DATA_READY;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
+ }
+ return processed;
+}
+
+static void vnet_event(void *arg, int event)
+{
+ struct vnet_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+
+ port->rx_event |= event;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
+ napi_schedule(&port->napi);
- local_irq_restore(flags);
}
static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
@@ -746,26 +898,19 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
return err;
}
-static inline bool port_is_up(struct vnet_port *vnet)
-{
- struct vio_driver_state *vio = &vnet->vio;
-
- return !!(vio->hs_state & VIO_HS_COMPLETE);
-}
-
struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
struct hlist_head *hp = &vp->port_hash[hash];
struct vnet_port *port;
- hlist_for_each_entry(port, hp, hash) {
+ hlist_for_each_entry_rcu(port, hp, hash) {
if (!port_is_up(port))
continue;
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
- list_for_each_entry(port, &vp->port_list, list) {
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
if (!port->switch_port)
continue;
if (!port_is_up(port))
@@ -775,18 +920,6 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
return NULL;
}
-struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
-{
- struct vnet_port *ret;
- unsigned long flags;
-
- spin_lock_irqsave(&vp->lock, flags);
- ret = __tx_port_find(vp, skb);
- spin_unlock_irqrestore(&vp->lock, flags);
-
- return ret;
-}
-
static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
unsigned *pending)
{
@@ -847,11 +980,10 @@ static void vnet_clean_timer_expire(unsigned long port0)
struct vnet_port *port = (struct vnet_port *)port0;
struct sk_buff *freeskbs;
unsigned pending;
- unsigned long flags;
- spin_lock_irqsave(&port->vio.lock, flags);
+ netif_tx_lock(port->vp->dev);
freeskbs = vnet_clean_tx_ring(port, &pending);
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ netif_tx_unlock(port->vp->dev);
vnet_free_skbs(freeskbs);
@@ -862,11 +994,54 @@ static void vnet_clean_timer_expire(unsigned long port0)
del_timer(&port->clean_timer);
}
-static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
- int *plen)
+static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
+ struct ldc_trans_cookie *cookies, int ncookies,
+ unsigned int map_perm)
+{
+ int i, nc, err, blen;
+
+ /* header */
+ blen = skb_headlen(skb);
+ if (blen < ETH_ZLEN)
+ blen = ETH_ZLEN;
+ blen += VNET_PACKET_SKIP;
+ blen += 8 - (blen & 7);
+
+ err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
+ ncookies, map_perm);
+ if (err < 0)
+ return err;
+ nc = err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ if (nc < ncookies) {
+ vaddr = kmap_atomic(skb_frag_page(f));
+ blen = skb_frag_size(f);
+ blen += 8 - (blen & 7);
+ err = ldc_map_single(lp, vaddr + f->page_offset,
+ blen, cookies + nc, ncookies - nc,
+ map_perm);
+ kunmap_atomic(vaddr);
+ } else {
+ err = -EMSGSIZE;
+ }
+
+ if (err < 0) {
+ ldc_unmap(lp, cookies, nc);
+ return err;
+ }
+ nc += err;
+ }
+ return nc;
+}
+
+static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
{
struct sk_buff *nskb;
- int len, pad;
+ int i, len, pad, docopy;
len = skb->len;
pad = 0;
@@ -876,51 +1051,223 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
}
len += VNET_PACKET_SKIP;
pad += 8 - (len & 7);
- len += 8 - (len & 7);
+ /* make sure we have enough cookies and alignment in every frag */
+ docopy = skb_shinfo(skb)->nr_frags >= ncookies;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+
+ docopy |= f->page_offset & 7;
+ }
if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
skb_tailroom(skb) < pad ||
- skb_headroom(skb) < VNET_PACKET_SKIP) {
- nskb = alloc_and_align_skb(skb->dev, skb->len);
+ skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
+ int start = 0, offset;
+ __wsum csum;
+
+ len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
+ nskb = alloc_and_align_skb(skb->dev, len);
+ if (nskb == NULL) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_reserve(nskb, VNET_PACKET_SKIP);
- if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
+
+ nskb->protocol = skb->protocol;
+ offset = skb_mac_header(skb) - skb->data;
+ skb_set_mac_header(nskb, offset);
+ offset = skb_network_header(skb) - skb->data;
+ skb_set_network_header(nskb, offset);
+ offset = skb_transport_header(skb) - skb->data;
+ skb_set_transport_header(nskb, offset);
+
+ offset = 0;
+ nskb->csum_offset = skb->csum_offset;
+ nskb->ip_summed = skb->ip_summed;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ start = skb_checksum_start_offset(skb);
+ if (start) {
+ struct iphdr *iph = ip_hdr(nskb);
+ int offset = start + nskb->csum_offset;
+
+ if (skb_copy_bits(skb, 0, nskb->data, start)) {
+ dev_kfree_skb(nskb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ *(__sum16 *)(skb->data + offset) = 0;
+ csum = skb_copy_and_csum_bits(skb, start,
+ nskb->data + start,
+ skb->len - start, 0);
+ if (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP) {
+ csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - start,
+ iph->protocol, csum);
+ }
+ *(__sum16 *)(nskb->data + offset) = csum;
+
+ nskb->ip_summed = CHECKSUM_NONE;
+ } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
dev_kfree_skb(nskb);
dev_kfree_skb(skb);
return NULL;
}
(void)skb_put(nskb, skb->len);
+ if (skb_is_gso(skb)) {
+ skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
+ skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+ }
dev_kfree_skb(skb);
skb = nskb;
}
-
- *pstart = skb->data - VNET_PACKET_SKIP;
- *plen = len;
return skb;
}
+static u16
+vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port = __tx_port_find(vp, skb);
+
+ if (port == NULL)
+ return 0;
+ return port->q_index;
+}
+
+static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
+{
+ struct net_device *dev = port->vp->dev;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct sk_buff *segs;
+ int maclen, datalen;
+ int status;
+ int gso_size, gso_type, gso_segs;
+ int hlen = skb_transport_header(skb) - skb_mac_header(skb);
+ int proto = IPPROTO_IP;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ proto = ipv6_hdr(skb)->nexthdr;
+
+ if (proto == IPPROTO_TCP)
+ hlen += tcp_hdr(skb)->doff * 4;
+ else if (proto == IPPROTO_UDP)
+ hlen += sizeof(struct udphdr);
+ else {
+ pr_err("vnet_handle_offloads GSO with unknown transport "
+ "protocol %d tproto %d\n", skb->protocol, proto);
+ hlen = 128; /* XXX */
+ }
+ datalen = port->tsolen - hlen;
+
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_type = skb_shinfo(skb)->gso_type;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (port->tso && gso_size < datalen)
+ gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
+
+ if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ netif_tx_stop_queue(txq);
+ if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
+ return NETDEV_TX_BUSY;
+ netif_tx_wake_queue(txq);
+ }
+
+ maclen = skb_network_header(skb) - skb_mac_header(skb);
+ skb_pull(skb, maclen);
+
+ if (port->tso && gso_size < datalen) {
+ /* segment to TSO size */
+ skb_shinfo(skb)->gso_size = datalen;
+ skb_shinfo(skb)->gso_segs = gso_segs;
+
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+
+ /* restore gso_size & gso_segs */
+ skb_shinfo(skb)->gso_size = gso_size;
+ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen,
+ gso_size);
+ } else
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+ if (IS_ERR(segs)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ skb_push(skb, maclen);
+ skb_reset_mac_header(skb);
+
+ status = 0;
+ while (segs) {
+ struct sk_buff *curr = segs;
+
+ segs = segs->next;
+ curr->next = NULL;
+ if (port->tso && curr->len > dev->mtu) {
+ skb_shinfo(curr)->gso_size = gso_size;
+ skb_shinfo(curr)->gso_type = gso_type;
+ skb_shinfo(curr)->gso_segs =
+ DIV_ROUND_UP(curr->len - hlen, gso_size);
+ } else
+ skb_shinfo(curr)->gso_size = 0;
+
+ skb_push(curr, maclen);
+ skb_reset_mac_header(curr);
+ memcpy(skb_mac_header(curr), skb_mac_header(skb),
+ maclen);
+ curr->csum_start = skb_transport_header(curr) - curr->head;
+ if (ip_hdr(curr)->protocol == IPPROTO_TCP)
+ curr->csum_offset = offsetof(struct tcphdr, check);
+ else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
+ curr->csum_offset = offsetof(struct udphdr, check);
+
+ if (!(status & NETDEV_TX_MASK))
+ status = vnet_start_xmit(curr, dev);
+ if (status & NETDEV_TX_MASK)
+ dev_kfree_skb_any(curr);
+ }
+
+ if (!(status & NETDEV_TX_MASK))
+ dev_kfree_skb_any(skb);
+ return status;
+}
+
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
- struct vnet_port *port = tx_port_find(vp, skb);
+ struct vnet_port *port = NULL;
struct vio_dring_state *dr;
struct vio_net_desc *d;
- unsigned long flags;
unsigned int len;
struct sk_buff *freeskbs = NULL;
int i, err, txi;
- void *start = NULL;
- int nlen = 0;
unsigned pending = 0;
+ struct netdev_queue *txq;
- if (unlikely(!port))
+ rcu_read_lock();
+ port = __tx_port_find(vp, skb);
+ if (unlikely(!port)) {
+ rcu_read_unlock();
goto out_dropped;
+ }
- skb = vnet_skb_shape(skb, &start, &nlen);
-
- if (unlikely(!skb))
- goto out_dropped;
+ if (skb_is_gso(skb) && skb->len > port->tsolen) {
+ err = vnet_handle_offloads(port, skb);
+ rcu_read_unlock();
+ return err;
+ }
- if (skb->len > port->rmtu) {
+ if (!skb_is_gso(skb) && skb->len > port->rmtu) {
unsigned long localmtu = port->rmtu - ETH_HLEN;
if (vio_version_after_eq(&port->vio, 1, 3))
@@ -937,6 +1284,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
fl4.saddr = ip_hdr(skb)->saddr;
rt = ip_route_output_key(dev_net(dev), &fl4);
+ rcu_read_unlock();
if (!IS_ERR(rt)) {
skb_dst_set(skb, &rt->dst);
icmp_send(skb, ICMP_DEST_UNREACH,
@@ -951,18 +1299,26 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out_dropped;
}
- spin_lock_irqsave(&port->vio.lock, flags);
+ skb = vnet_skb_shape(skb, 2);
+
+ if (unlikely(!skb))
+ goto out_dropped;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vnet_fullcsum(skb);
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ i = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, i);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
dev->stats.tx_errors++;
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ rcu_read_unlock();
return NETDEV_TX_BUSY;
}
@@ -978,16 +1334,15 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (len < ETH_ZLEN)
len = ETH_ZLEN;
- port->tx_bufs[txi].skb = skb;
- skb = NULL;
-
- err = ldc_map_single(port->vio.lp, start, nlen,
- port->tx_bufs[txi].cookies, VNET_MAXCOOKIES,
- (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
+ err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
+ (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
if (err < 0) {
netdev_info(dev, "tx buffer map error %d\n", err);
- goto out_dropped_unlock;
+ goto out_dropped;
}
+
+ port->tx_bufs[txi].skb = skb;
+ skb = NULL;
port->tx_bufs[txi].ncookies = err;
/* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
@@ -1003,6 +1358,21 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
d->ncookies = port->tx_bufs[txi].ncookies;
for (i = 0; i < d->ncookies; i++)
d->cookies[i] = port->tx_bufs[txi].cookies[i];
+ if (vio_version_after_eq(&port->vio, 1, 7)) {
+ struct vio_net_dext *dext = vio_net_ext(d);
+
+ memset(dext, 0, sizeof(*dext));
+ if (skb_is_gso(port->tx_bufs[txi].skb)) {
+ dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
+ ->gso_size;
+ dext->flags |= VNET_PKT_IPV4_LSO;
+ }
+ if (vio_version_after_eq(&port->vio, 1, 8) &&
+ !port->switch_port) {
+ dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
+ dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
+ }
+ }
/* This has to be a non-SMP write barrier because we are writing
* to memory which is shared with the peer LDOM.
@@ -1039,7 +1409,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
dev->stats.tx_carrier_errors++;
- goto out_dropped_unlock;
+ goto out_dropped;
}
ldc_start_done:
@@ -1050,31 +1420,29 @@ ldc_start_done:
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
- netif_wake_queue(dev);
+ netif_tx_wake_queue(txq);
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
+ rcu_read_unlock();
vnet_free_skbs(freeskbs);
- (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
-
return NETDEV_TX_OK;
-out_dropped_unlock:
- spin_unlock_irqrestore(&port->vio.lock, flags);
-
out_dropped:
- if (skb)
- dev_kfree_skb(skb);
- vnet_free_skbs(freeskbs);
if (pending)
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else if (port)
del_timer(&port->clean_timer);
+ if (port)
+ rcu_read_unlock();
+ if (skb)
+ dev_kfree_skb(skb);
+ vnet_free_skbs(freeskbs);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1087,14 +1455,14 @@ static void vnet_tx_timeout(struct net_device *dev)
static int vnet_open(struct net_device *dev)
{
netif_carrier_on(dev);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
static int vnet_close(struct net_device *dev)
{
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
return 0;
@@ -1204,18 +1572,17 @@ static void vnet_set_rx_mode(struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port;
- unsigned long flags;
- spin_lock_irqsave(&vp->lock, flags);
- if (!list_empty(&vp->port_list)) {
- port = list_entry(vp->port_list.next, struct vnet_port, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
if (port->switch_port) {
__update_mc_list(vp, dev);
__send_mc_list(vp, port);
+ break;
}
}
- spin_unlock_irqrestore(&vp->lock, flags);
+ rcu_read_unlock();
}
static int vnet_change_mtu(struct net_device *dev, int new_mtu)
@@ -1295,18 +1662,20 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
}
}
-static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
+static int vnet_port_alloc_tx_ring(struct vnet_port *port)
{
struct vio_dring_state *dr;
- unsigned long len;
+ unsigned long len, elen;
int i, err, ncookies;
void *dring;
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- len = (VNET_TX_RING_SIZE *
- (sizeof(struct vio_net_desc) +
- (sizeof(struct ldc_trans_cookie) * 2)));
+ elen = sizeof(struct vio_net_desc) +
+ sizeof(struct ldc_trans_cookie) * 2;
+ if (vio_version_after_eq(&port->vio, 1, 7))
+ elen += sizeof(struct vio_net_dext);
+ len = VNET_TX_RING_SIZE * elen;
ncookies = VIO_MAX_RING_COOKIES;
dring = ldc_alloc_exp_dring(port->vio.lp, len,
@@ -1320,8 +1689,7 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
}
dr->base = dring;
- dr->entry_size = (sizeof(struct vio_net_desc) +
- (sizeof(struct ldc_trans_cookie) * 2));
+ dr->entry_size = elen;
dr->num_entries = VNET_TX_RING_SIZE;
dr->prod = dr->cons = 0;
port->start_cons = true; /* need an initial trigger */
@@ -1342,6 +1710,21 @@ err_out:
return err;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vnet_poll_controller(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (!list_empty(&vp->port_list)) {
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+ napi_schedule(&port->napi);
+ }
+ spin_unlock_irqrestore(&vp->lock, flags);
+}
+#endif
static LIST_HEAD(vnet_list);
static DEFINE_MUTEX(vnet_list_mutex);
@@ -1354,6 +1737,10 @@ static const struct net_device_ops vnet_ops = {
.ndo_tx_timeout = vnet_tx_timeout,
.ndo_change_mtu = vnet_change_mtu,
.ndo_start_xmit = vnet_start_xmit,
+ .ndo_select_queue = vnet_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vnet_poll_controller,
+#endif
};
static struct vnet *vnet_new(const u64 *local_mac)
@@ -1362,7 +1749,7 @@ static struct vnet *vnet_new(const u64 *local_mac)
struct vnet *vp;
int err, i;
- dev = alloc_etherdev(sizeof(*vp));
+ dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->needed_headroom = VNET_PACKET_SKIP + 8;
@@ -1374,7 +1761,6 @@ static struct vnet *vnet_new(const u64 *local_mac)
vp = netdev_priv(dev);
spin_lock_init(&vp->lock);
- tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp);
vp->dev = dev;
INIT_LIST_HEAD(&vp->port_list);
@@ -1387,6 +1773,10 @@ static struct vnet *vnet_new(const u64 *local_mac)
dev->ethtool_ops = &vnet_ethtool_ops;
dev->watchdog_timeo = VNET_TX_TIMEOUT;
+ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->features = dev->hw_features;
+
err = register_netdev(dev);
if (err) {
pr_err("Cannot register net device, aborting\n");
@@ -1434,7 +1824,6 @@ static void vnet_cleanup(void)
vp = list_first_entry(&vnet_list, struct vnet, list);
list_del(&vp->list);
dev = vp->dev;
- tasklet_kill(&vp->vnet_tx_wakeup);
/* vio_unregister_driver() should have cleaned up port_list */
BUG_ON(!list_empty(&vp->port_list));
unregister_netdev(dev);
@@ -1489,6 +1878,25 @@ static void print_version(void)
const char *remote_macaddr_prop = "remote-mac-address";
+static void
+vnet_port_add_txq(struct vnet_port *port)
+{
+ struct vnet *vp = port->vp;
+ int n;
+
+ n = vp->nports++;
+ n = n & (VNET_MAX_TXQS - 1);
+ port->q_index = n;
+ netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
+}
+
+static void
+vnet_port_rm_txq(struct vnet_port *port)
+{
+ port->vp->nports--;
+ netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
+}
+
static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
@@ -1536,9 +1944,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_port;
- err = vnet_port_alloc_tx_bufs(port);
- if (err)
- goto err_out_free_ldc;
+ netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
INIT_HLIST_NODE(&port->hash);
INIT_LIST_HEAD(&port->list);
@@ -1547,13 +1953,17 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
switch_port = 1;
port->switch_port = switch_port;
+ port->tso = true;
+ port->tsolen = 0;
spin_lock_irqsave(&vp->lock, flags);
if (switch_port)
- list_add(&port->list, &vp->port_list);
+ list_add_rcu(&port->list, &vp->port_list);
else
- list_add_tail(&port->list, &vp->port_list);
- hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
+ list_add_tail_rcu(&port->list, &vp->port_list);
+ hlist_add_head_rcu(&port->hash,
+ &vp->port_hash[vnet_hashfn(port->raddr)]);
+ vnet_port_add_txq(port);
spin_unlock_irqrestore(&vp->lock, flags);
dev_set_drvdata(&vdev->dev, port);
@@ -1564,15 +1974,13 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
setup_timer(&port->clean_timer, vnet_clean_timer_expire,
(unsigned long)port);
+ napi_enable(&port->napi);
vio_port_up(&port->vio);
mdesc_release(hp);
return 0;
-err_out_free_ldc:
- vio_ldc_free(&port->vio);
-
err_out_free_port:
kfree(port);
@@ -1586,17 +1994,18 @@ static int vnet_port_remove(struct vio_dev *vdev)
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- struct vnet *vp = port->vp;
- unsigned long flags;
del_timer_sync(&port->vio.timer);
- del_timer_sync(&port->clean_timer);
- spin_lock_irqsave(&vp->lock, flags);
- list_del(&port->list);
- hlist_del(&port->hash);
- spin_unlock_irqrestore(&vp->lock, flags);
+ napi_disable(&port->napi);
+
+ list_del_rcu(&port->list);
+ hlist_del_rcu(&port->hash);
+ synchronize_rcu();
+ del_timer_sync(&port->clean_timer);
+ vnet_port_rm_txq(port);
+ netif_napi_del(&port->napi);
vnet_port_free_tx_bufs(port);
vio_ldc_free(&port->vio);
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index c91104542619..01ca78191683 100644
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -20,6 +20,9 @@
#define VNET_TX_RING_SIZE 512
#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
+#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
+#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
+
/* VNET packets are sent in buffers with the first 6 bytes skipped
* so that after the ethernet header the IPv4/IPv6 headers are aligned
* properly.
@@ -40,8 +43,9 @@ struct vnet_port {
struct hlist_node hash;
u8 raddr[ETH_ALEN];
- u8 switch_port;
- u8 __pad;
+ unsigned switch_port:1;
+ unsigned tso:1;
+ unsigned __pad:14;
struct vnet *vp;
@@ -56,6 +60,13 @@ struct vnet_port {
struct timer_list clean_timer;
u64 rmtu;
+ u16 tsolen;
+
+ struct napi_struct napi;
+ u32 napi_stop_idx;
+ bool napi_resume;
+ int rx_event;
+ u16 q_index;
};
static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
@@ -97,7 +108,7 @@ struct vnet {
struct list_head list;
u64 local_mac;
- struct tasklet_struct vnet_tx_wakeup;
+ int nports;
};
#endif /* _SUNVNET_H */
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 5d8cb7956113..605dd909bcc3 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_TI
bool "Texas Instruments (TI) devices"
default y
- depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE))
+ depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE )
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
select PHYLIB
---help---
This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO
config TI_DAVINCI_CPDMA
tristate "TI DaVinci CPDMA Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
---help---
This driver supports TI's DaVinci CPDMA dma engine.
@@ -58,7 +58,7 @@ config TI_CPSW_PHY_SEL
config TI_CPSW
tristate "TI CPSW Switch Support"
- depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
select TI_CPSW_PHY_SEL
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 47eeb3abf7f7..520cf50a3d5a 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -161,7 +161,6 @@ static struct platform_driver tsi_eth_driver = {
.remove = tsi108_ether_remove,
.driver = {
.name = "tsi-ethernet",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 68c5260cc322..a191afc23b56 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2508,7 +2508,6 @@ static struct platform_driver rhine_driver_platform = {
.remove = rhine_remove_one_platform,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = rhine_of_tbl,
.pm = RHINE_PM_OPS,
}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index a43e8492b1ce..282f83a63b67 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3281,7 +3281,6 @@ static struct platform_driver velocity_platform_driver = {
.remove = velocity_platform_remove,
.driver = {
.name = "via-velocity",
- .owner = THIS_MODULE,
.of_match_table = velocity_of_ids,
.pm = &velocity_pm_ops,
},
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0f56b1c0e082..a495931a66a1 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -638,14 +638,12 @@ static int w5100_hw_probe(struct platform_device *pdev)
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENXIO;
- mem_size = resource_size(mem);
-
priv->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ mem_size = resource_size(mem);
+
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
if (priv->indirect) {
@@ -790,7 +788,6 @@ static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
static struct platform_driver w5100_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &w5100_pm_ops,
},
.probe = w5100_probe,
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index f961f14a0473..09322d9db578 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -558,14 +558,12 @@ static int w5300_hw_probe(struct platform_device *pdev)
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENXIO;
- mem_size = resource_size(mem);
-
priv->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ mem_size = resource_size(mem);
+
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
if (priv->indirect) {
@@ -702,7 +700,6 @@ static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
static struct platform_driver w5300_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &w5300_pm_ops,
},
.probe = w5300_probe,
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 629077050fce..9c2d91ea0af4 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -224,8 +224,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
dma_free_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
lp->tx_bd_v, lp->tx_bd_p);
- if (lp->rx_skb)
- kfree(lp->rx_skb);
+ kfree(lp->rx_skb);
}
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 28dbbdc393eb..24858799c204 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1200,8 +1200,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
unregister_netdev(ndev);
- if (lp->phy_node)
- of_node_put(lp->phy_node);
+ of_node_put(lp->phy_node);
lp->phy_node = NULL;
xemaclite_remove_ndev(ndev);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index caed6eee289c..7f975a2c8990 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -414,7 +414,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
* ================
*
* Overview:
- * Retrieves the address range used to access control and status
+ * Retrieves the address ranges used to access control and status
* registers.
*
* Returns:
@@ -422,8 +422,8 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
*
* Arguments:
* bdev - pointer to device information
- * bar_start - pointer to store the start address
- * bar_len - pointer to store the length of the area
+ * bar_start - pointer to store the start addresses
+ * bar_len - pointer to store the lengths of the areas
*
* Assumptions:
* I am sure there are some.
@@ -442,38 +442,47 @@ static void dfx_get_bars(struct device *bdev,
if (dfx_bus_pci) {
int num = dfx_use_mmio ? 0 : 1;
- *bar_start = pci_resource_start(to_pci_dev(bdev), num);
- *bar_len = pci_resource_len(to_pci_dev(bdev), num);
+ bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
+ bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
+ bar_start[2] = bar_start[1] = 0;
+ bar_len[2] = bar_len[1] = 0;
}
if (dfx_bus_eisa) {
unsigned long base_addr = to_eisa_device(bdev)->base_addr;
- resource_size_t bar;
+ resource_size_t bar_lo;
+ resource_size_t bar_hi;
if (dfx_use_mmio) {
- bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
- bar <<= 8;
- bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
- bar <<= 8;
- bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
- bar <<= 16;
- *bar_start = bar;
- bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
- bar <<= 8;
- bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
- bar <<= 8;
- bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
- bar <<= 16;
- *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
+ bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2);
+ bar_lo <<= 8;
+ bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1);
+ bar_lo <<= 8;
+ bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0);
+ bar_lo <<= 8;
+ bar_start[0] = bar_lo;
+ bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2);
+ bar_hi <<= 8;
+ bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1);
+ bar_hi <<= 8;
+ bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0);
+ bar_hi <<= 8;
+ bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) +
+ 1;
} else {
- *bar_start = base_addr;
- *bar_len = PI_ESIC_K_CSR_IO_LEN +
- PI_ESIC_K_BURST_HOLDOFF_LEN;
+ bar_start[0] = base_addr;
+ bar_len[0] = PI_ESIC_K_CSR_IO_LEN;
}
+ bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF;
+ bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN;
+ bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR;
+ bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN;
}
if (dfx_bus_tc) {
- *bar_start = to_tc_dev(bdev)->resource.start +
- PI_TC_K_CSR_OFFSET;
- *bar_len = PI_TC_K_CSR_LEN;
+ bar_start[0] = to_tc_dev(bdev)->resource.start +
+ PI_TC_K_CSR_OFFSET;
+ bar_len[0] = PI_TC_K_CSR_LEN;
+ bar_start[2] = bar_start[1] = 0;
+ bar_len[2] = bar_len[1] = 0;
}
}
@@ -518,13 +527,14 @@ static int dfx_register(struct device *bdev)
{
static int version_disp;
int dfx_bus_pci = dev_is_pci(bdev);
+ int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
const char *print_name = dev_name(bdev);
struct net_device *dev;
DFX_board_t *bp; /* board pointer */
- resource_size_t bar_start = 0; /* pointer to port */
- resource_size_t bar_len = 0; /* resource length */
+ resource_size_t bar_start[3]; /* pointers to ports */
+ resource_size_t bar_len[3]; /* resource length */
int alloc_size; /* total buffer size used */
struct resource *region;
int err = 0;
@@ -542,10 +552,13 @@ static int dfx_register(struct device *bdev)
}
/* Enable PCI device. */
- if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
- printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
- print_name);
- goto err_out;
+ if (dfx_bus_pci) {
+ err = pci_enable_device(to_pci_dev(bdev));
+ if (err) {
+ pr_err("%s: Cannot enable PCI device, aborting\n",
+ print_name);
+ goto err_out;
+ }
}
SET_NETDEV_DEV(dev, bdev);
@@ -554,31 +567,62 @@ static int dfx_register(struct device *bdev)
bp->bus_dev = bdev;
dev_set_drvdata(bdev, dev);
- dfx_get_bars(bdev, &bar_start, &bar_len);
+ dfx_get_bars(bdev, bar_start, bar_len);
+ if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
+ pr_err("%s: Cannot use MMIO, no address set, aborting\n",
+ print_name);
+ pr_err("%s: Run ECU and set adapter's MMIO location\n",
+ print_name);
+ pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
+ "\n", print_name);
+ err = -ENXIO;
+ goto err_out;
+ }
if (dfx_use_mmio)
- region = request_mem_region(bar_start, bar_len, print_name);
+ region = request_mem_region(bar_start[0], bar_len[0],
+ print_name);
else
- region = request_region(bar_start, bar_len, print_name);
+ region = request_region(bar_start[0], bar_len[0], print_name);
if (!region) {
- printk(KERN_ERR "%s: Cannot reserve I/O resource "
- "0x%lx @ 0x%lx, aborting\n",
- print_name, (long)bar_len, (long)bar_start);
+ pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
+ "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
+ (long)bar_len[0], (long)bar_start[0]);
err = -EBUSY;
goto err_out_disable;
}
+ if (bar_start[1] != 0) {
+ region = request_region(bar_start[1], bar_len[1], print_name);
+ if (!region) {
+ pr_err("%s: Cannot reserve I/O resource "
+ "0x%lx @ 0x%lx, aborting\n", print_name,
+ (long)bar_len[1], (long)bar_start[1]);
+ err = -EBUSY;
+ goto err_out_csr_region;
+ }
+ }
+ if (bar_start[2] != 0) {
+ region = request_region(bar_start[2], bar_len[2], print_name);
+ if (!region) {
+ pr_err("%s: Cannot reserve I/O resource "
+ "0x%lx @ 0x%lx, aborting\n", print_name,
+ (long)bar_len[2], (long)bar_start[2]);
+ err = -EBUSY;
+ goto err_out_bh_region;
+ }
+ }
/* Set up I/O base address. */
if (dfx_use_mmio) {
- bp->base.mem = ioremap_nocache(bar_start, bar_len);
+ bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]);
if (!bp->base.mem) {
printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
err = -ENOMEM;
- goto err_out_region;
+ goto err_out_esic_region;
}
} else {
- bp->base.port = bar_start;
- dev->base_addr = bar_start;
+ bp->base.port = bar_start[0];
+ dev->base_addr = bar_start[0];
}
/* Initialize new device structure */
@@ -587,7 +631,7 @@ static int dfx_register(struct device *bdev)
if (dfx_bus_pci)
pci_set_master(to_pci_dev(bdev));
- if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
+ if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) {
err = -ENODEV;
goto err_out_unmap;
}
@@ -615,11 +659,19 @@ err_out_unmap:
if (dfx_use_mmio)
iounmap(bp->base.mem);
-err_out_region:
+err_out_esic_region:
+ if (bar_start[2] != 0)
+ release_region(bar_start[2], bar_len[2]);
+
+err_out_bh_region:
+ if (bar_start[1] != 0)
+ release_region(bar_start[1], bar_len[1]);
+
+err_out_csr_region:
if (dfx_use_mmio)
- release_mem_region(bar_start, bar_len);
+ release_mem_region(bar_start[0], bar_len[0]);
else
- release_region(bar_start, bar_len);
+ release_region(bar_start[0], bar_len[0]);
err_out_disable:
if (dfx_bus_pci)
@@ -711,13 +763,14 @@ static void dfx_bus_init(struct net_device *dev)
}
/*
- * Enable memory decoding (MEMCS0) and/or port decoding
+ * Enable memory decoding (MEMCS1) and/or port decoding
* (IOCS1/IOCS0) as appropriate in Function Control
- * Register. IOCS0 is used for PDQ registers, taking 16
- * 32-bit words, while IOCS1 is used for the Burst Holdoff
- * register, taking a single 32-bit word only. We use the
- * slot-specific I/O range as per the ESIC spec, that is
- * set bits 15:12 in the mask registers to mask them out.
+ * Register. MEMCS1 or IOCS0 is used for PDQ registers,
+ * taking 16 32-bit words, while IOCS1 is used for the
+ * Burst Holdoff register, taking a single 32-bit word
+ * only. We use the slot-specific I/O range as per the
+ * ESIC spec, that is set bits 15:12 in the mask registers
+ * to mask them out.
*/
/* Set the decode range of the board. */
@@ -742,9 +795,11 @@ static void dfx_bus_init(struct net_device *dev)
outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
/* Enable the decoders. */
- val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
+ val = PI_FUNCTION_CNTRL_M_IOCS1;
if (dfx_use_mmio)
- val |= PI_FUNCTION_CNTRL_M_MEMCS0;
+ val |= PI_FUNCTION_CNTRL_M_MEMCS1;
+ else
+ val |= PI_FUNCTION_CNTRL_M_IOCS0;
outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
/*
@@ -838,6 +893,12 @@ static void dfx_bus_uninit(struct net_device *dev)
val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
+
+ /* Disable the board. */
+ outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
+
+ /* Disable memory and port decoders. */
+ outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
}
if (dfx_bus_pci) {
/* Disable interrupts at PCI bus interface chip (PFI) */
@@ -1061,8 +1122,8 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
board_name = "DEFEA";
if (dfx_bus_pci)
board_name = "DEFPA";
- pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
- print_name, board_name, dfx_use_mmio ? "" : "I/O ",
+ pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
+ print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O",
(long long)bar_start, dev->irq, dev->dev_addr);
/*
@@ -3636,8 +3697,8 @@ static void dfx_unregister(struct device *bdev)
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
- resource_size_t bar_start = 0; /* pointer to port */
- resource_size_t bar_len = 0; /* resource length */
+ resource_size_t bar_start[3]; /* pointers to ports */
+ resource_size_t bar_len[3]; /* resource lengths */
int alloc_size; /* total buffer size used */
unregister_netdev(dev);
@@ -3655,12 +3716,16 @@ static void dfx_unregister(struct device *bdev)
dfx_bus_uninit(dev);
- dfx_get_bars(bdev, &bar_start, &bar_len);
+ dfx_get_bars(bdev, bar_start, bar_len);
+ if (bar_start[2] != 0)
+ release_region(bar_start[2], bar_len[2]);
+ if (bar_start[1] != 0)
+ release_region(bar_start[1], bar_len[1]);
if (dfx_use_mmio) {
iounmap(bp->base.mem);
- release_mem_region(bar_start, bar_len);
+ release_mem_region(bar_start[0], bar_len[0]);
} else
- release_region(bar_start, bar_len);
+ release_region(bar_start[0], bar_len[0]);
if (dfx_bus_pci)
pci_disable_device(to_pci_dev(bdev));
diff --git a/drivers/net/fddi/defxx.h b/drivers/net/fddi/defxx.h
index 9527f0182fd4..9d30fde2ef3c 100644
--- a/drivers/net/fddi/defxx.h
+++ b/drivers/net/fddi/defxx.h
@@ -1481,9 +1481,11 @@ typedef union
#define PI_ESIC_K_CSR_IO_LEN 0x40 /* 64 bytes */
#define PI_ESIC_K_BURST_HOLDOFF_LEN 0x04 /* 4 bytes */
+#define PI_ESIC_K_ESIC_CSR_LEN 0x40 /* 64 bytes */
#define PI_DEFEA_K_CSR_IO 0x000
#define PI_DEFEA_K_BURST_HOLDOFF 0x040
+#define PI_ESIC_K_ESIC_CSR 0xC80
#define PI_ESIC_K_SLOT_ID 0xC80
#define PI_ESIC_K_SLOT_CNTRL 0xC84
@@ -1556,7 +1558,7 @@ typedef union
#define PI_BURST_HOLDOFF_V_RESERVED 1
#define PI_BURST_HOLDOFF_V_MEM_MAP 0
-/* Define the implicit mask of the Memory Address Mask Register. */
+/* Define the implicit mask of the Memory Address Compare registers. */
#define PI_MEM_ADD_MASK_M 0x3ff
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index c3c4051a089d..daca0dee88f3 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -675,8 +675,7 @@ out_free:
kfree(xbuff);
kfree(rbuff);
- if (dev)
- free_netdev(dev);
+ free_netdev(dev);
out:
return err;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 7d76c9523395..dd867e6cabd6 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -440,7 +440,8 @@ static int negotiate_nvsp_ver(struct hv_device *device,
/* NVSPv2 only: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
- init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
+ ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -560,9 +561,7 @@ int netvsc_device_remove(struct hv_device *device)
vmbus_close(device->channel);
/* Release all resources */
- if (net_device->sub_cb_buf)
- vfree(net_device->sub_cb_buf);
-
+ vfree(net_device->sub_cb_buf);
free_netvsc_device(net_device);
return 0;
}
@@ -765,6 +764,9 @@ int netvsc_send(struct hv_device *device,
out_channel = device->channel;
packet->channel = out_channel;
+ if (out_channel->rescind)
+ return -ENODEV;
+
if (packet->page_buf_cnt) {
ret = vmbus_sendpacket_pagebuffer(out_channel,
packet->page_buf,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 78ec33f5100b..15d82eda0baf 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -193,7 +193,9 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
struct flow_keys flow;
int data_len;
- if (!skb_flow_dissect(skb, &flow) || flow.n_proto != htons(ETH_P_IP))
+ if (!skb_flow_dissect(skb, &flow) ||
+ !(flow.n_proto == htons(ETH_P_IP) ||
+ flow.n_proto == htons(ETH_P_IPV6)))
return false;
if (flow.ip_proto == IPPROTO_TCP)
@@ -697,9 +699,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return -ENODEV;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
- limit = NETVSC_MTU;
+ limit = NETVSC_MTU - ETH_HLEN;
- if (mtu < 68 || mtu > limit)
+ /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
+ if (mtu < ETH_DATA_LEN || mtu > limit)
return -EINVAL;
nvdev->start_remove = true;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2b86f0b6f6d1..ec0c40a8f653 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -728,7 +728,8 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
rssp->flag = 0;
rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
- NDIS_HASH_TCP_IPV4;
+ NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
+ NDIS_HASH_TCP_IPV6;
rssp->indirect_tabsize = 4*ITAB_NUM;
rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
rssp->hashkey_size = HASH_KEYLEN;
@@ -957,6 +958,9 @@ static int rndis_filter_close_device(struct rndis_device *dev)
return 0;
ret = rndis_filter_set_packet_filter(dev, 0);
+ if (ret == -ENODEV)
+ ret = 0;
+
if (ret == 0)
dev->state = RNDIS_DEV_INITIALIZED;
@@ -997,6 +1001,7 @@ int rndis_filter_device_add(struct hv_device *dev,
int t;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+ u32 mtu, size;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -1028,6 +1033,14 @@ int rndis_filter_device_add(struct hv_device *dev,
return ret;
}
+ /* Get the MTU from the host */
+ size = sizeof(u32);
+ ret = rndis_filter_query_device(rndis_device,
+ RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
+ &mtu, &size);
+ if (ret == 0 && size == sizeof(u32))
+ net_device->ndev->mtu = mtu;
+
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
if (ret != 0) {
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 391a916622a9..1a3c3e57aa0b 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -10,16 +10,6 @@ menuconfig IEEE802154_DRIVERS
If you say N, all options in this submenu will be skipped and
disabled.
-config IEEE802154_FAKEHARD
- tristate "Fake LR-WPAN driver with several interconnected devices"
- depends on IEEE802154_DRIVERS
- ---help---
- Say Y here to enable the fake driver that serves as an example
- of HardMAC device driver.
-
- This driver can also be built as a module. To do so say M here.
- The module will be called 'fakehard'.
-
config IEEE802154_FAKELB
depends on IEEE802154_DRIVERS && MAC802154
tristate "IEEE 802.15.4 loopback driver"
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 655cb95e6e24..d77fa4d77e27 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index c9d2a752abd7..1c0135620c62 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
@@ -33,10 +29,10 @@
#include <linux/regmap.h>
#include <linux/skbuff.h>
#include <linux/of_gpio.h>
+#include <linux/ieee802154.h>
-#include <net/ieee802154.h>
#include <net/mac802154.h>
-#include <net/wpan-phy.h>
+#include <net/cfg802154.h>
struct at86rf230_local;
/* at86rf2xx chip depend data.
@@ -50,15 +46,11 @@ struct at86rf2xx_chip_data {
u16 t_off_to_tx_on;
u16 t_frame;
u16 t_p_ack;
- /* short interframe spacing time */
- u16 t_sifs;
- /* long interframe spacing time */
- u16 t_lifs;
/* completion timeout for tx in msecs */
u16 t_tx_timeout;
int rssi_base_val;
- int (*set_channel)(struct at86rf230_local *, int, int);
+ int (*set_channel)(struct at86rf230_local *, u8, u8);
int (*get_desense_steps)(struct at86rf230_local *, s32);
};
@@ -74,12 +66,14 @@ struct at86rf230_state_change {
void (*complete)(void *context);
u8 from_state;
u8 to_state;
+
+ bool irq_enable;
};
struct at86rf230_local {
struct spi_device *spi;
- struct ieee802154_dev *dev;
+ struct ieee802154_hw *hw;
struct at86rf2xx_chip_data *data;
struct regmap *regmap;
@@ -89,10 +83,10 @@ struct at86rf230_local {
struct at86rf230_state_change irq;
bool tx_aret;
+ s8 max_frame_retries;
bool is_tx;
/* spinlock for is_tx protection */
spinlock_t lock;
- struct completion tx_complete;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
};
@@ -291,10 +285,11 @@ struct at86rf230_local {
#define AT86RF2XX_NUMREGS 0x3F
-static int
+static void
at86rf230_async_state_change(struct at86rf230_local *lp,
struct at86rf230_state_change *ctx,
- const u8 state, void (*complete)(void *context));
+ const u8 state, void (*complete)(void *context),
+ const bool irq_enable);
static inline int
__at86rf230_write(struct at86rf230_local *lp,
@@ -451,7 +446,8 @@ at86rf230_async_error_recover(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
+ at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false);
+ ieee802154_wake_queue(lp->hw);
}
static void
@@ -461,21 +457,31 @@ at86rf230_async_error(struct at86rf230_local *lp,
dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
- at86rf230_async_error_recover);
+ at86rf230_async_error_recover, false);
}
/* Generic function to get some register value in async mode */
-static int
+static void
at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
struct at86rf230_state_change *ctx,
- void (*complete)(void *context))
+ void (*complete)(void *context),
+ const bool irq_enable)
{
+ int rc;
+
u8 *tx_buf = ctx->buf;
tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
ctx->trx.len = 2;
ctx->msg.complete = complete;
- return spi_async(lp->spi, &ctx->msg);
+ ctx->irq_enable = irq_enable;
+ rc = spi_async(lp->spi, &ctx->msg);
+ if (rc) {
+ if (irq_enable)
+ enable_irq(lp->spi->irq);
+
+ at86rf230_async_error(lp, ctx, rc);
+ }
}
static void
@@ -512,7 +518,8 @@ at86rf230_async_state_assert(void *context)
if (ctx->to_state == STATE_TX_ON) {
at86rf230_async_state_change(lp, ctx,
STATE_FORCE_TX_ON,
- ctx->complete);
+ ctx->complete,
+ ctx->irq_enable);
return;
}
}
@@ -535,7 +542,6 @@ at86rf230_async_state_delay(void *context)
struct at86rf230_local *lp = ctx->lp;
struct at86rf2xx_chip_data *c = lp->data;
bool force = false;
- int rc;
/* The force state changes are will show as normal states in the
* state status subregister. We change the to_state to the
@@ -604,10 +610,9 @@ at86rf230_async_state_delay(void *context)
udelay(1);
change:
- rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
- at86rf230_async_state_assert);
- if (rc)
- dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+ at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+ at86rf230_async_state_assert,
+ ctx->irq_enable);
}
static void
@@ -622,10 +627,9 @@ at86rf230_async_state_change_start(void *context)
/* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
if (trx_state == STATE_TRANSITION_IN_PROGRESS) {
udelay(1);
- rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
- at86rf230_async_state_change_start);
- if (rc)
- dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+ at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+ at86rf230_async_state_change_start,
+ ctx->irq_enable);
return;
}
@@ -647,20 +651,27 @@ at86rf230_async_state_change_start(void *context)
ctx->trx.len = 2;
ctx->msg.complete = at86rf230_async_state_delay;
rc = spi_async(lp->spi, &ctx->msg);
- if (rc)
- dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+ if (rc) {
+ if (ctx->irq_enable)
+ enable_irq(lp->spi->irq);
+
+ at86rf230_async_error(lp, &lp->state, rc);
+ }
}
-static int
+static void
at86rf230_async_state_change(struct at86rf230_local *lp,
struct at86rf230_state_change *ctx,
- const u8 state, void (*complete)(void *context))
+ const u8 state, void (*complete)(void *context),
+ const bool irq_enable)
{
/* Initialization for the state change context */
ctx->to_state = state;
ctx->complete = complete;
- return at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
- at86rf230_async_state_change_start);
+ ctx->irq_enable = irq_enable;
+ at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+ at86rf230_async_state_change_start,
+ irq_enable);
}
static void
@@ -681,17 +692,16 @@ at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state)
{
int rc;
- rc = at86rf230_async_state_change(lp, &lp->state, state,
- at86rf230_sync_state_change_complete);
- if (rc) {
- at86rf230_async_error(lp, &lp->state, rc);
- return rc;
- }
+ at86rf230_async_state_change(lp, &lp->state, state,
+ at86rf230_sync_state_change_complete,
+ false);
rc = wait_for_completion_timeout(&lp->state_complete,
msecs_to_jiffies(100));
- if (!rc)
+ if (!rc) {
+ at86rf230_async_error(lp, &lp->state, -ETIMEDOUT);
return -ETIMEDOUT;
+ }
return 0;
}
@@ -701,8 +711,14 @@ at86rf230_tx_complete(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
+ struct sk_buff *skb = lp->tx_skb;
+
+ enable_irq(lp->spi->irq);
- complete(&lp->tx_complete);
+ if (lp->max_frame_retries <= 0)
+ ieee802154_xmit_complete(lp->hw, skb, true);
+ else
+ ieee802154_xmit_complete(lp->hw, skb, false);
}
static void
@@ -710,12 +726,9 @@ at86rf230_tx_on(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
- rc = at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
- at86rf230_tx_complete);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
+ at86rf230_tx_complete, true);
}
static void
@@ -723,12 +736,9 @@ at86rf230_tx_trac_error(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
- rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- at86rf230_tx_on);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+ at86rf230_tx_on, true);
}
static void
@@ -738,17 +748,14 @@ at86rf230_tx_trac_check(void *context)
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = ctx->buf;
const u8 trac = (buf[1] & 0xe0) >> 5;
- int rc;
/* If trac status is different than zero we need to do a state change
* to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
* state to TX_ON.
*/
if (trac) {
- rc = at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
- at86rf230_tx_trac_error);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
+ at86rf230_tx_trac_error, true);
return;
}
@@ -761,51 +768,29 @@ at86rf230_tx_trac_status(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
- rc = at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
- at86rf230_tx_trac_check);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
+ at86rf230_tx_trac_check, true);
}
static void
at86rf230_rx(struct at86rf230_local *lp,
- const u8 *data, u8 len)
+ const u8 *data, const u8 len, const u8 lqi)
{
- u8 lqi;
struct sk_buff *skb;
u8 rx_local_buf[AT86RF2XX_MAX_BUF];
- if (len < 2)
- return;
-
- /* read full frame buffer and invalid lqi value to lowest
- * indicator if frame was is in a corrupted state.
- */
- if (len > IEEE802154_MTU) {
- lqi = 0;
- len = IEEE802154_MTU;
- dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
- } else {
- lqi = data[len];
- }
-
memcpy(rx_local_buf, data, len);
enable_irq(lp->spi->irq);
- skb = alloc_skb(IEEE802154_MTU, GFP_ATOMIC);
+ skb = dev_alloc_skb(IEEE802154_MTU);
if (!skb) {
dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
return;
}
memcpy(skb_put(skb, len), rx_local_buf, len);
-
- /* We do not put CRC into the frame */
- skb_trim(skb, len - 2);
-
- ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+ ieee802154_rx_irqsafe(lp->hw, skb, lqi);
}
static void
@@ -814,20 +799,31 @@ at86rf230_rx_read_frame_complete(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = lp->irq.buf;
- const u8 len = buf[1];
+ u8 len = buf[1];
+
+ if (!ieee802154_is_valid_psdu_len(len)) {
+ dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
+ len = IEEE802154_MTU;
+ }
- at86rf230_rx(lp, buf + 2, len);
+ at86rf230_rx(lp, buf + 2, len, buf[2 + len]);
}
-static int
+static void
at86rf230_rx_read_frame(struct at86rf230_local *lp)
{
+ int rc;
+
u8 *buf = lp->irq.buf;
buf[0] = CMD_FB;
lp->irq.trx.len = AT86RF2XX_MAX_BUF;
lp->irq.msg.complete = at86rf230_rx_read_frame_complete;
- return spi_async(lp->spi, &lp->irq.msg);
+ rc = spi_async(lp->spi, &lp->irq.msg);
+ if (rc) {
+ enable_irq(lp->spi->irq);
+ at86rf230_async_error(lp, &lp->irq, rc);
+ }
}
static void
@@ -835,7 +831,6 @@ at86rf230_rx_trac_check(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
/* Possible check on trac status here. This could be useful to make
* some stats why receive is failed. Not used at the moment, but it's
@@ -843,34 +838,31 @@ at86rf230_rx_trac_check(void *context)
* The programming guide say do it so.
*/
- rc = at86rf230_rx_read_frame(lp);
- if (rc) {
- enable_irq(lp->spi->irq);
- at86rf230_async_error(lp, ctx, rc);
- }
+ at86rf230_rx_read_frame(lp);
}
-static int
+static void
at86rf230_irq_trx_end(struct at86rf230_local *lp)
{
spin_lock(&lp->lock);
if (lp->is_tx) {
lp->is_tx = 0;
spin_unlock(&lp->lock);
- enable_irq(lp->spi->irq);
if (lp->tx_aret)
- return at86rf230_async_state_change(lp, &lp->irq,
- STATE_FORCE_TX_ON,
- at86rf230_tx_trac_status);
+ at86rf230_async_state_change(lp, &lp->irq,
+ STATE_FORCE_TX_ON,
+ at86rf230_tx_trac_status,
+ true);
else
- return at86rf230_async_state_change(lp, &lp->irq,
- STATE_RX_AACK_ON,
- at86rf230_tx_complete);
+ at86rf230_async_state_change(lp, &lp->irq,
+ STATE_RX_AACK_ON,
+ at86rf230_tx_complete,
+ true);
} else {
spin_unlock(&lp->lock);
- return at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
- at86rf230_rx_trac_check);
+ at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
+ at86rf230_rx_trac_check, true);
}
}
@@ -881,12 +873,9 @@ at86rf230_irq_status(void *context)
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = lp->irq.buf;
const u8 irq = buf[1];
- int rc;
if (irq & IRQ_TRX_END) {
- rc = at86rf230_irq_trx_end(lp);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_irq_trx_end(lp);
} else {
enable_irq(lp->spi->irq);
dev_err(&lp->spi->dev, "not supported irq %02x received\n",
@@ -901,13 +890,14 @@ static irqreturn_t at86rf230_isr(int irq, void *data)
u8 *buf = ctx->buf;
int rc;
- disable_irq_nosync(lp->spi->irq);
+ disable_irq_nosync(irq);
buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
ctx->trx.len = 2;
ctx->msg.complete = at86rf230_irq_status;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
+ enable_irq(irq);
at86rf230_async_error(lp, ctx, rc);
return IRQ_NONE;
}
@@ -960,22 +950,18 @@ at86rf230_xmit_tx_on(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
- rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
- at86rf230_write_frame);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+ at86rf230_write_frame, false);
}
static int
-at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
struct at86rf230_state_change *ctx = &lp->tx;
void (*tx_complete)(void *context) = at86rf230_write_frame;
- int rc;
lp->tx_skb = skb;
@@ -986,61 +972,39 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (lp->tx_aret)
tx_complete = at86rf230_xmit_tx_on;
- rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- tx_complete);
- if (rc) {
- at86rf230_async_error(lp, ctx, rc);
- return rc;
- }
- rc = wait_for_completion_interruptible_timeout(&lp->tx_complete,
- msecs_to_jiffies(lp->data->t_tx_timeout));
- if (!rc) {
- at86rf230_async_error(lp, ctx, rc);
- return -ETIMEDOUT;
- }
-
- /* Interfame spacing time, which is phy depend.
- * TODO
- * Move this handling in MAC 802.15.4 layer.
- * This is currently a workaround to avoid fragmenation issues.
- */
- if (skb->len > 18)
- usleep_range(lp->data->t_lifs, lp->data->t_lifs + 10);
- else
- usleep_range(lp->data->t_sifs, lp->data->t_sifs + 10);
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ON, tx_complete, false);
return 0;
}
static int
-at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
{
- might_sleep();
BUG_ON(!level);
*level = 0xbe;
return 0;
}
static int
-at86rf230_start(struct ieee802154_dev *dev)
+at86rf230_start(struct ieee802154_hw *hw)
{
- return at86rf230_sync_state_change(dev->priv, STATE_RX_AACK_ON);
+ return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON);
}
static void
-at86rf230_stop(struct ieee802154_dev *dev)
+at86rf230_stop(struct ieee802154_hw *hw)
{
- at86rf230_sync_state_change(dev->priv, STATE_FORCE_TRX_OFF);
+ at86rf230_sync_state_change(hw->priv, STATE_FORCE_TRX_OFF);
}
static int
-at86rf23x_set_channel(struct at86rf230_local *lp, int page, int channel)
+at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
{
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
static int
-at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
+at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
{
int rc;
@@ -1061,44 +1025,60 @@ at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
if (rc < 0)
return rc;
+ /* This sets the symbol_duration according frequency on the 212.
+ * TODO move this handling while set channel and page in cfg802154.
+ * We can do that, this timings are according 802.15.4 standard.
+ * If we do that in cfg802154, this is a more generic calculation.
+ *
+ * This should also protected from ifs_timer. Means cancel timer and
+ * init with a new value. For now, this is okay.
+ */
+ if (channel == 0) {
+ if (page == 0) {
+ /* SUB:0 and BPSK:0 -> BPSK-20 */
+ lp->hw->phy->symbol_duration = 50;
+ } else {
+ /* SUB:1 and BPSK:0 -> BPSK-40 */
+ lp->hw->phy->symbol_duration = 25;
+ }
+ } else {
+ if (page == 0)
+ /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */
+ lp->hw->phy->symbol_duration = 40;
+ else
+ /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */
+ lp->hw->phy->symbol_duration = 16;
+ }
+
+ lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD *
+ lp->hw->phy->symbol_duration;
+ lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD *
+ lp->hw->phy->symbol_duration;
+
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
static int
-at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
+at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
int rc;
- might_sleep();
-
- if (page < 0 || page > 31 ||
- !(lp->dev->phy->channels_supported[page] & BIT(channel))) {
- WARN_ON(1);
- return -EINVAL;
- }
-
rc = lp->data->set_channel(lp, page, channel);
- if (rc < 0)
- return rc;
-
/* Wait for PLL */
usleep_range(lp->data->t_channel_switch,
lp->data->t_channel_switch + 10);
- dev->phy->current_channel = channel;
- dev->phy->current_page = page;
-
- return 0;
+ return rc;
}
static int
-at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
+at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
- if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(&lp->spi->dev,
@@ -1107,7 +1087,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
- if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
dev_vdbg(&lp->spi->dev,
@@ -1116,7 +1096,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
- if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
u8 i, addr[8];
memcpy(addr, &filt->ieee_addr, 8);
@@ -1126,7 +1106,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
- if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
dev_vdbg(&lp->spi->dev,
"at86rf230_set_hw_addr_filt called for panc change\n");
if (filt->pan_coord)
@@ -1139,9 +1119,9 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
}
static int
-at86rf230_set_txpower(struct ieee802154_dev *dev, int db)
+at86rf230_set_txpower(struct ieee802154_hw *hw, int db)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
/* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
* bits decrease power in 1dB steps. 0x60 represents extra PA gain of
@@ -1158,17 +1138,17 @@ at86rf230_set_txpower(struct ieee802154_dev *dev, int db)
}
static int
-at86rf230_set_lbt(struct ieee802154_dev *dev, bool on)
+at86rf230_set_lbt(struct ieee802154_hw *hw, bool on)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on);
}
static int
-at86rf230_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+at86rf230_set_cca_mode(struct ieee802154_hw *hw, u8 mode)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
}
@@ -1186,9 +1166,9 @@ at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
}
static int
-at86rf230_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
if (level < lp->data->rssi_base_val || level > 30)
return -EINVAL;
@@ -1198,15 +1178,12 @@ at86rf230_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
}
static int
-at86rf230_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+at86rf230_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be,
u8 retries)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
int rc;
- if (min_be > max_be || max_be > 8 || retries > 5)
- return -EINVAL;
-
rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be);
if (rc)
return rc;
@@ -1219,15 +1196,13 @@ at86rf230_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
}
static int
-at86rf230_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
int rc = 0;
- if (retries < -1 || retries > 15)
- return -EINVAL;
-
lp->tx_aret = retries >= 0;
+ lp->max_frame_retries = retries;
if (retries >= 0)
rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
@@ -1235,9 +1210,36 @@ at86rf230_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
return rc;
}
-static struct ieee802154_ops at86rf230_ops = {
+static int
+at86rf230_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
+{
+ struct at86rf230_local *lp = hw->priv;
+ int rc;
+
+ if (on) {
+ rc = at86rf230_write_subreg(lp, SR_AACK_DIS_ACK, 1);
+ if (rc < 0)
+ return rc;
+
+ rc = at86rf230_write_subreg(lp, SR_AACK_PROM_MODE, 1);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = at86rf230_write_subreg(lp, SR_AACK_PROM_MODE, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = at86rf230_write_subreg(lp, SR_AACK_DIS_ACK, 0);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct ieee802154_ops at86rf230_ops = {
.owner = THIS_MODULE,
- .xmit = at86rf230_xmit,
+ .xmit_async = at86rf230_xmit,
.ed = at86rf230_ed,
.set_channel = at86rf230_channel,
.start = at86rf230_start,
@@ -1249,6 +1251,7 @@ static struct ieee802154_ops at86rf230_ops = {
.set_cca_ed_level = at86rf230_set_cca_ed_level,
.set_csma_params = at86rf230_set_csma_params,
.set_frame_retries = at86rf230_set_frame_retries,
+ .set_promiscuous_mode = at86rf230_set_promiscuous_mode,
};
static struct at86rf2xx_chip_data at86rf233_data = {
@@ -1259,8 +1262,6 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_off_to_tx_on = 80,
.t_frame = 4096,
.t_p_ack = 545,
- .t_sifs = 192,
- .t_lifs = 480,
.t_tx_timeout = 2000,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
@@ -1275,8 +1276,6 @@ static struct at86rf2xx_chip_data at86rf231_data = {
.t_off_to_tx_on = 110,
.t_frame = 4096,
.t_p_ack = 545,
- .t_sifs = 192,
- .t_lifs = 480,
.t_tx_timeout = 2000,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
@@ -1291,8 +1290,6 @@ static struct at86rf2xx_chip_data at86rf212_data = {
.t_off_to_tx_on = 200,
.t_frame = 4096,
.t_p_ack = 545,
- .t_sifs = 192,
- .t_lifs = 480,
.t_tx_timeout = 2000,
.rssi_base_val = -100,
.set_channel = at86rf212_set_channel,
@@ -1354,7 +1351,11 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return -EINVAL;
}
- return 0;
+ /* Force setting slotted operation bit to 0. Sometimes the atben
+ * sets this bit and I don't know why. We set this always force
+ * to zero while probing.
+ */
+ return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0);
}
static struct at86rf230_platform_data *
@@ -1409,9 +1410,10 @@ at86rf230_detect_device(struct at86rf230_local *lp)
return -EINVAL;
}
- lp->dev->extra_tx_headroom = 0;
- lp->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
- IEEE802154_HW_TXPOWER | IEEE802154_HW_CSMA;
+ lp->hw->extra_tx_headroom = 0;
+ lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
+ IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
+ IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
switch (part) {
case 2:
@@ -1421,15 +1423,19 @@ at86rf230_detect_device(struct at86rf230_local *lp)
case 3:
chip = "at86rf231";
lp->data = &at86rf231_data;
- lp->dev->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->current_channel = 11;
+ lp->hw->phy->symbol_duration = 16;
break;
case 7:
chip = "at86rf212";
if (version == 1) {
lp->data = &at86rf212_data;
- lp->dev->flags |= IEEE802154_HW_LBT;
- lp->dev->phy->channels_supported[0] = 0x00007FF;
- lp->dev->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->flags |= IEEE802154_HW_LBT;
+ lp->hw->phy->channels_supported[0] = 0x00007FF;
+ lp->hw->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->phy->current_channel = 5;
+ lp->hw->phy->symbol_duration = 25;
} else {
rc = -ENOTSUPP;
}
@@ -1437,7 +1443,9 @@ at86rf230_detect_device(struct at86rf230_local *lp)
case 11:
chip = "at86rf233";
lp->data = &at86rf233_data;
- lp->dev->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->current_channel = 13;
+ lp->hw->phy->symbol_duration = 16;
break;
default:
chip = "unkown";
@@ -1478,7 +1486,7 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp)
static int at86rf230_probe(struct spi_device *spi)
{
struct at86rf230_platform_data *pdata;
- struct ieee802154_dev *dev;
+ struct ieee802154_hw *hw;
struct at86rf230_local *lp;
unsigned int status;
int rc, irq_type;
@@ -1517,14 +1525,16 @@ static int at86rf230_probe(struct spi_device *spi)
usleep_range(120, 240);
}
- dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
- if (!dev)
+ hw = ieee802154_alloc_hw(sizeof(*lp), &at86rf230_ops);
+ if (!hw)
return -ENOMEM;
- lp = dev->priv;
- lp->dev = dev;
+ lp = hw->priv;
+ lp->hw = hw;
lp->spi = spi;
- dev->parent = &spi->dev;
+ hw->parent = &spi->dev;
+ hw->vif_data_size = sizeof(*lp);
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
lp->regmap = devm_regmap_init_spi(spi, &at86rf230_regmap_spi_config);
if (IS_ERR(lp->regmap)) {
@@ -1541,7 +1551,6 @@ static int at86rf230_probe(struct spi_device *spi)
goto free_dev;
spin_lock_init(&lp->lock);
- init_completion(&lp->tx_complete);
init_completion(&lp->state_complete);
spi_set_drvdata(spi, lp);
@@ -1564,14 +1573,14 @@ static int at86rf230_probe(struct spi_device *spi)
if (rc)
goto free_dev;
- rc = ieee802154_register_device(lp->dev);
+ rc = ieee802154_register_hw(lp->hw);
if (rc)
goto free_dev;
return rc;
free_dev:
- ieee802154_free_device(lp->dev);
+ ieee802154_free_hw(lp->hw);
return rc;
}
@@ -1582,8 +1591,8 @@ static int at86rf230_remove(struct spi_device *spi)
/* mask all at86rf230 irq's */
at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
- ieee802154_unregister_device(lp->dev);
- ieee802154_free_device(lp->dev);
+ ieee802154_unregister_hw(lp->hw);
+ ieee802154_free_hw(lp->hw);
dev_dbg(&spi->dev, "unregistered at86rf230\n");
return 0;
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 8a5ac7ab2300..f9df9fa86d5f 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -21,10 +21,10 @@
#include <linux/skbuff.h>
#include <linux/pinctrl/consumer.h>
#include <linux/of_gpio.h>
+#include <linux/ieee802154.h>
#include <net/mac802154.h>
-#include <net/wpan-phy.h>
-#include <net/ieee802154.h>
+#include <net/cfg802154.h>
#define SPI_COMMAND_BUFFER 3
#define HIGH 1
@@ -193,7 +193,7 @@
/* Driver private information */
struct cc2520_private {
struct spi_device *spi; /* SPI device structure */
- struct ieee802154_dev *dev; /* IEEE-802.15.4 device */
+ struct ieee802154_hw *hw; /* IEEE-802.15.4 device */
u8 *buf; /* SPI TX/Rx data buffer */
struct mutex buffer_mutex; /* SPI buffer mutex */
bool is_tx; /* Flag for sync b/w Tx and Rx */
@@ -453,20 +453,20 @@ cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len, u8 *lqi)
return status;
}
-static int cc2520_start(struct ieee802154_dev *dev)
+static int cc2520_start(struct ieee802154_hw *hw)
{
- return cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRXON);
+ return cc2520_cmd_strobe(hw->priv, CC2520_CMD_SRXON);
}
-static void cc2520_stop(struct ieee802154_dev *dev)
+static void cc2520_stop(struct ieee802154_hw *hw)
{
- cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRFOFF);
+ cc2520_cmd_strobe(hw->priv, CC2520_CMD_SRFOFF);
}
static int
-cc2520_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct cc2520_private *priv = dev->priv;
+ struct cc2520_private *priv = hw->priv;
unsigned long flags;
int rc;
u8 status = 0;
@@ -524,7 +524,7 @@ static int cc2520_rx(struct cc2520_private *priv)
if (len < 2 || len > IEEE802154_MTU)
return -EINVAL;
- skb = alloc_skb(len, GFP_KERNEL);
+ skb = dev_alloc_skb(len);
if (!skb)
return -ENOMEM;
@@ -536,7 +536,7 @@ static int cc2520_rx(struct cc2520_private *priv)
skb_trim(skb, skb->len - 2);
- ieee802154_rx_irqsafe(priv->dev, skb, lqi);
+ ieee802154_rx_irqsafe(priv->hw, skb, lqi);
dev_vdbg(&priv->spi->dev, "RXFIFO: %x %x\n", len, lqi);
@@ -544,9 +544,9 @@ static int cc2520_rx(struct cc2520_private *priv)
}
static int
-cc2520_ed(struct ieee802154_dev *dev, u8 *level)
+cc2520_ed(struct ieee802154_hw *hw, u8 *level)
{
- struct cc2520_private *priv = dev->priv;
+ struct cc2520_private *priv = hw->priv;
u8 status = 0xff;
u8 rssi;
int ret;
@@ -569,12 +569,11 @@ cc2520_ed(struct ieee802154_dev *dev, u8 *level)
}
static int
-cc2520_set_channel(struct ieee802154_dev *dev, int page, int channel)
+cc2520_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- struct cc2520_private *priv = dev->priv;
+ struct cc2520_private *priv = hw->priv;
int ret;
- might_sleep();
dev_dbg(&priv->spi->dev, "trying to set channel\n");
BUG_ON(page != 0);
@@ -588,12 +587,12 @@ cc2520_set_channel(struct ieee802154_dev *dev, int page, int channel)
}
static int
-cc2520_filter(struct ieee802154_dev *dev,
+cc2520_filter(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt, unsigned long changed)
{
- struct cc2520_private *priv = dev->priv;
+ struct cc2520_private *priv = hw->priv;
- if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 panid = le16_to_cpu(filt->pan_id);
dev_vdbg(&priv->spi->dev,
@@ -602,7 +601,7 @@ cc2520_filter(struct ieee802154_dev *dev,
sizeof(panid), (u8 *)&panid);
}
- if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
dev_vdbg(&priv->spi->dev,
"cc2520_filter called for IEEE addr\n");
cc2520_write_ram(priv, CC2520RAM_IEEEADDR,
@@ -610,7 +609,7 @@ cc2520_filter(struct ieee802154_dev *dev,
(u8 *)&filt->ieee_addr);
}
- if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(&priv->spi->dev,
@@ -619,7 +618,7 @@ cc2520_filter(struct ieee802154_dev *dev,
sizeof(addr), (u8 *)&addr);
}
- if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
dev_vdbg(&priv->spi->dev,
"cc2520_filter called for panc change\n");
if (filt->pan_coord)
@@ -631,11 +630,11 @@ cc2520_filter(struct ieee802154_dev *dev,
return 0;
}
-static struct ieee802154_ops cc2520_ops = {
+static const struct ieee802154_ops cc2520_ops = {
.owner = THIS_MODULE,
.start = cc2520_start,
.stop = cc2520_stop,
- .xmit = cc2520_tx,
+ .xmit_sync = cc2520_tx,
.ed = cc2520_ed,
.set_channel = cc2520_set_channel,
.set_hw_addr_filt = cc2520_filter,
@@ -645,27 +644,29 @@ static int cc2520_register(struct cc2520_private *priv)
{
int ret = -ENOMEM;
- priv->dev = ieee802154_alloc_device(sizeof(*priv), &cc2520_ops);
- if (!priv->dev)
+ priv->hw = ieee802154_alloc_hw(sizeof(*priv), &cc2520_ops);
+ if (!priv->hw)
goto err_ret;
- priv->dev->priv = priv;
- priv->dev->parent = &priv->spi->dev;
- priv->dev->extra_tx_headroom = 0;
+ priv->hw->priv = priv;
+ priv->hw->parent = &priv->spi->dev;
+ priv->hw->extra_tx_headroom = 0;
+ priv->hw->vif_data_size = sizeof(*priv);
/* We do support only 2.4 Ghz */
- priv->dev->phy->channels_supported[0] = 0x7FFF800;
- priv->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+ priv->hw->phy->channels_supported[0] = 0x7FFF800;
+ priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
+ IEEE802154_HW_AFILT;
dev_vdbg(&priv->spi->dev, "registered cc2520\n");
- ret = ieee802154_register_device(priv->dev);
+ ret = ieee802154_register_hw(priv->hw);
if (ret)
goto err_free_device;
return 0;
err_free_device:
- ieee802154_free_device(priv->dev);
+ ieee802154_free_hw(priv->hw);
err_ret:
return ret;
}
@@ -857,7 +858,7 @@ static int cc2520_probe(struct spi_device *spi)
pinctrl = devm_pinctrl_get_select_default(&spi->dev);
if (IS_ERR(pinctrl))
dev_warn(&spi->dev,
- "pinctrl pins are not configured");
+ "pinctrl pins are not configured\n");
pdata = cc2520_get_platform_data(spi);
if (!pdata) {
@@ -1002,8 +1003,8 @@ static int cc2520_remove(struct spi_device *spi)
mutex_destroy(&priv->buffer_mutex);
flush_work(&priv->fifop_irqwork);
- ieee802154_unregister_device(priv->dev);
- ieee802154_free_device(priv->dev);
+ ieee802154_unregister_hw(priv->hw);
+ ieee802154_free_hw(priv->hw);
return 0;
}
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
deleted file mode 100644
index 6cbc56ad9ff4..000000000000
--- a/drivers/net/ieee802154/fakehard.c
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * Sample driver for HardMAC IEEE 802.15.4 devices
- *
- * Copyright (C) 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-#include <net/ieee802154.h>
-#include <net/nl802154.h>
-#include <net/wpan-phy.h>
-
-struct fakehard_priv {
- struct wpan_phy *phy;
-};
-
-static struct wpan_phy *fake_to_phy(const struct net_device *dev)
-{
- struct fakehard_priv *priv = netdev_priv(dev);
- return priv->phy;
-}
-
-/**
- * fake_get_phy - Return a phy corresponding to this device.
- * @dev: The network device for which to return the wan-phy object
- *
- * This function returns a wpan-phy object corresponding to the passed
- * network device. Reference counter for wpan-phy object is incremented,
- * so when the wpan-phy isn't necessary, you should drop the reference
- * via @wpan_phy_put() call.
- */
-static struct wpan_phy *fake_get_phy(const struct net_device *dev)
-{
- struct wpan_phy *phy = fake_to_phy(dev);
- return to_phy(get_device(&phy->dev));
-}
-
-/**
- * fake_get_pan_id - Retrieve the PAN ID of the device.
- * @dev: The network device to retrieve the PAN of.
- *
- * Return the ID of the PAN from the PIB.
- */
-static __le16 fake_get_pan_id(const struct net_device *dev)
-{
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- return cpu_to_le16(0xeba1);
-}
-
-/**
- * fake_get_short_addr - Retrieve the short address of the device.
- * @dev: The network device to retrieve the short address of.
- *
- * Returns the IEEE 802.15.4 short-form address cached for this
- * device. If the device has not yet had a short address assigned
- * then this should return 0xFFFF to indicate a lack of association.
- */
-static __le16 fake_get_short_addr(const struct net_device *dev)
-{
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- return cpu_to_le16(0x1);
-}
-
-/**
- * fake_get_dsn - Retrieve the DSN of the device.
- * @dev: The network device to retrieve the DSN for.
- *
- * Returns the IEEE 802.15.4 DSN for the network device.
- * The DSN is the sequence number which will be added to each
- * packet or MAC command frame by the MAC during transmission.
- *
- * DSN means 'Data Sequence Number'.
- *
- * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
- * document.
- */
-static u8 fake_get_dsn(const struct net_device *dev)
-{
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- return 0x00; /* DSN are implemented in HW, so return just 0 */
-}
-
-/**
- * fake_assoc_req - Make an association request to the HW.
- * @dev: The network device which we are associating to a network.
- * @addr: The coordinator with which we wish to associate.
- * @channel: The channel on which to associate.
- * @cap: The capability information field to use in the association.
- *
- * Start an association with a coordinator. The coordinator's address
- * and PAN ID can be found in @addr.
- *
- * Note: This is in section 7.3.1 and 7.5.3.1 of the IEEE
- * 802.15.4-2006 document.
- */
-static int fake_assoc_req(struct net_device *dev,
- struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap)
-{
- struct wpan_phy *phy = fake_to_phy(dev);
-
- mutex_lock(&phy->pib_lock);
- phy->current_channel = channel;
- phy->current_page = page;
- mutex_unlock(&phy->pib_lock);
-
- /* We simply emulate it here */
- return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev),
- IEEE802154_SUCCESS);
-}
-
-/**
- * fake_assoc_resp - Send an association response to a device.
- * @dev: The network device on which to send the response.
- * @addr: The address of the device to respond to.
- * @short_addr: The assigned short address for the device (if any).
- * @status: The result of the association request.
- *
- * Queue the association response of the coordinator to another
- * device's attempt to associate with the network which we
- * coordinate. This is then added to the indirect-send queue to be
- * transmitted to the end device when it polls for data.
- *
- * Note: This is in section 7.3.2 and 7.5.3.1 of the IEEE
- * 802.15.4-2006 document.
- */
-static int fake_assoc_resp(struct net_device *dev,
- struct ieee802154_addr *addr, __le16 short_addr, u8 status)
-{
- return 0;
-}
-
-/**
- * fake_disassoc_req - Disassociate a device from a network.
- * @dev: The network device on which we're disassociating a device.
- * @addr: The device to disassociate from the network.
- * @reason: The reason to give to the device for being disassociated.
- *
- * This sends a disassociation notification to the device being
- * disassociated from the network.
- *
- * Note: This is in section 7.5.3.2 of the IEEE 802.15.4-2006
- * document, with the reason described in 7.3.3.2.
- */
-static int fake_disassoc_req(struct net_device *dev,
- struct ieee802154_addr *addr, u8 reason)
-{
- return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS);
-}
-
-/**
- * fake_start_req - Start an IEEE 802.15.4 PAN.
- * @dev: The network device on which to start the PAN.
- * @addr: The coordinator address to use when starting the PAN.
- * @channel: The channel on which to start the PAN.
- * @bcn_ord: Beacon order.
- * @sf_ord: Superframe order.
- * @pan_coord: Whether or not we are the PAN coordinator or just
- * requesting a realignment perhaps?
- * @blx: Battery Life Extension feature bitfield.
- * @coord_realign: Something to realign something else.
- *
- * If pan_coord is non-zero then this starts a network with the
- * provided parameters, otherwise it attempts a coordinator
- * realignment of the stated network instead.
- *
- * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
- * document, with 7.3.8 describing coordinator realignment.
- */
-static int fake_start_req(struct net_device *dev,
- struct ieee802154_addr *addr, u8 channel, u8 page,
- u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
- u8 coord_realign)
-{
- struct wpan_phy *phy = fake_to_phy(dev);
-
- mutex_lock(&phy->pib_lock);
- phy->current_channel = channel;
- phy->current_page = page;
- mutex_unlock(&phy->pib_lock);
-
- /* We don't emulate beacons here at all, so START should fail */
- ieee802154_nl_start_confirm(dev, IEEE802154_INVALID_PARAMETER);
- return 0;
-}
-
-/**
- * fake_scan_req - Start a channel scan.
- * @dev: The network device on which to perform a channel scan.
- * @type: The type of scan to perform.
- * @channels: The channel bitmask to scan.
- * @duration: How long to spend on each channel.
- *
- * This starts either a passive (energy) scan or an active (PAN) scan
- * on the channels indicated in the @channels bitmask. The duration of
- * the scan is measured in terms of superframe duration. Specifically,
- * the scan will spend aBaseSuperFrameDuration * ((2^n) + 1) on each
- * channel.
- *
- * Note: This is in section 7.5.2.1 of the IEEE 802.15.4-2006 document.
- */
-static int fake_scan_req(struct net_device *dev, u8 type, u32 channels,
- u8 page, u8 duration)
-{
- u8 edl[27] = {};
- return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type,
- channels, page,
- type == IEEE802154_MAC_SCAN_ED ? edl : NULL);
-}
-
-static struct ieee802154_mlme_ops fake_mlme = {
- .assoc_req = fake_assoc_req,
- .assoc_resp = fake_assoc_resp,
- .disassoc_req = fake_disassoc_req,
- .start_req = fake_start_req,
- .scan_req = fake_scan_req,
-
- .get_phy = fake_get_phy,
-
- .get_pan_id = fake_get_pan_id,
- .get_short_addr = fake_get_short_addr,
- .get_dsn = fake_get_dsn,
-};
-
-static int ieee802154_fake_open(struct net_device *dev)
-{
- netif_start_queue(dev);
- return 0;
-}
-
-static int ieee802154_fake_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- /* FIXME: do hardware work here ... */
-
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
-{
- struct sockaddr_ieee802154 *sa =
- (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
- u16 pan_id, short_addr;
-
- switch (cmd) {
- case SIOCGIFADDR:
- /* FIXME: fixed here, get from device IRL */
- pan_id = le16_to_cpu(fake_get_pan_id(dev));
- short_addr = le16_to_cpu(fake_get_short_addr(dev));
- if (pan_id == IEEE802154_PANID_BROADCAST ||
- short_addr == IEEE802154_ADDR_BROADCAST)
- return -EADDRNOTAVAIL;
-
- sa->family = AF_IEEE802154;
- sa->addr.addr_type = IEEE802154_ADDR_SHORT;
- sa->addr.pan_id = pan_id;
- sa->addr.short_addr = short_addr;
- return 0;
- }
- return -ENOIOCTLCMD;
-}
-
-static int ieee802154_fake_mac_addr(struct net_device *dev, void *p)
-{
- return -EBUSY; /* HW address is built into the device */
-}
-
-static const struct net_device_ops fake_ops = {
- .ndo_open = ieee802154_fake_open,
- .ndo_stop = ieee802154_fake_close,
- .ndo_start_xmit = ieee802154_fake_xmit,
- .ndo_do_ioctl = ieee802154_fake_ioctl,
- .ndo_set_mac_address = ieee802154_fake_mac_addr,
-};
-
-static void ieee802154_fake_destruct(struct net_device *dev)
-{
- struct wpan_phy *phy = fake_to_phy(dev);
-
- wpan_phy_unregister(phy);
- free_netdev(dev);
- wpan_phy_free(phy);
-}
-
-static void ieee802154_fake_setup(struct net_device *dev)
-{
- dev->addr_len = IEEE802154_ADDR_LEN;
- memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->features = NETIF_F_HW_CSUM;
- dev->needed_tailroom = 2; /* FCS */
- dev->mtu = 127;
- dev->tx_queue_len = 10;
- dev->type = ARPHRD_IEEE802154;
- dev->flags = IFF_NOARP | IFF_BROADCAST;
- dev->watchdog_timeo = 0;
- dev->destructor = ieee802154_fake_destruct;
-}
-
-
-static int ieee802154fake_probe(struct platform_device *pdev)
-{
- struct net_device *dev;
- struct fakehard_priv *priv;
- struct wpan_phy *phy = wpan_phy_alloc(0);
- int err;
-
- if (!phy)
- return -ENOMEM;
-
- dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d",
- NET_NAME_UNKNOWN, ieee802154_fake_setup);
- if (!dev) {
- wpan_phy_free(phy);
- return -ENOMEM;
- }
-
- memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
- dev->addr_len);
-
- /*
- * For now we'd like to emulate 2.4 GHz-only device,
- * both O-QPSK and CSS
- */
- /* 2.4 GHz O-QPSK 802.15.4-2003 */
- phy->channels_supported[0] |= 0x7FFF800;
- /* 2.4 GHz CSS 802.15.4a-2007 */
- phy->channels_supported[3] |= 0x3fff;
-
- phy->transmit_power = 0xbf;
-
- dev->netdev_ops = &fake_ops;
- dev->ml_priv = &fake_mlme;
-
- priv = netdev_priv(dev);
- priv->phy = phy;
-
- wpan_phy_set_dev(phy, &pdev->dev);
- SET_NETDEV_DEV(dev, &phy->dev);
-
- platform_set_drvdata(pdev, dev);
-
- err = wpan_phy_register(phy);
- if (err)
- goto err_phy_reg;
-
- err = register_netdev(dev);
- if (err)
- goto err_netdev_reg;
-
- dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
- return 0;
-
-err_netdev_reg:
- wpan_phy_unregister(phy);
-err_phy_reg:
- free_netdev(dev);
- wpan_phy_free(phy);
- return err;
-}
-
-static int ieee802154fake_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- unregister_netdev(dev);
- return 0;
-}
-
-static struct platform_device *ieee802154fake_dev;
-
-static struct platform_driver ieee802154fake_driver = {
- .probe = ieee802154fake_probe,
- .remove = ieee802154fake_remove,
- .driver = {
- .name = "ieee802154hardmac",
- .owner = THIS_MODULE,
- },
-};
-
-static __init int fake_init(void)
-{
- ieee802154fake_dev = platform_device_register_simple(
- "ieee802154hardmac", -1, NULL, 0);
- return platform_driver_register(&ieee802154fake_driver);
-}
-
-static __exit void fake_exit(void)
-{
- platform_driver_unregister(&ieee802154fake_driver);
- platform_device_unregister(ieee802154fake_dev);
-}
-
-module_init(fake_init);
-module_exit(fake_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 27d83207d24c..dc2bfb600b4b 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
@@ -29,12 +25,12 @@
#include <linux/device.h>
#include <linux/spinlock.h>
#include <net/mac802154.h>
-#include <net/wpan-phy.h>
+#include <net/cfg802154.h>
static int numlbs = 1;
struct fakelb_dev_priv {
- struct ieee802154_dev *dev;
+ struct ieee802154_hw *hw;
struct list_head list;
struct fakelb_priv *fake;
@@ -49,9 +45,8 @@ struct fakelb_priv {
};
static int
-fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
+fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
- might_sleep();
BUG_ON(!level);
*level = 0xbe;
@@ -59,14 +54,10 @@ fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
}
static int
-fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel)
+fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
pr_debug("set channel to %d\n", channel);
- might_sleep();
- dev->phy->current_page = page;
- dev->phy->current_channel = channel;
-
return 0;
}
@@ -78,19 +69,17 @@ fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
spin_lock(&priv->lock);
if (priv->working) {
newskb = pskb_copy(skb, GFP_ATOMIC);
- ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc);
+ ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc);
}
spin_unlock(&priv->lock);
}
static int
-fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct fakelb_dev_priv *priv = dev->priv;
+ struct fakelb_dev_priv *priv = hw->priv;
struct fakelb_priv *fake = priv->fake;
- might_sleep();
-
read_lock_bh(&fake->lock);
if (priv->list.next == priv->list.prev) {
/* we are the only one device */
@@ -99,8 +88,8 @@ fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
struct fakelb_dev_priv *dp;
list_for_each_entry(dp, &priv->fake->list, list) {
if (dp != priv &&
- (dp->dev->phy->current_channel ==
- priv->dev->phy->current_channel))
+ (dp->hw->phy->current_channel ==
+ priv->hw->phy->current_channel))
fakelb_hw_deliver(dp, skb);
}
}
@@ -110,8 +99,8 @@ fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
}
static int
-fakelb_hw_start(struct ieee802154_dev *dev) {
- struct fakelb_dev_priv *priv = dev->priv;
+fakelb_hw_start(struct ieee802154_hw *hw) {
+ struct fakelb_dev_priv *priv = hw->priv;
int ret = 0;
spin_lock(&priv->lock);
@@ -125,17 +114,17 @@ fakelb_hw_start(struct ieee802154_dev *dev) {
}
static void
-fakelb_hw_stop(struct ieee802154_dev *dev) {
- struct fakelb_dev_priv *priv = dev->priv;
+fakelb_hw_stop(struct ieee802154_hw *hw) {
+ struct fakelb_dev_priv *priv = hw->priv;
spin_lock(&priv->lock);
priv->working = 0;
spin_unlock(&priv->lock);
}
-static struct ieee802154_ops fakelb_ops = {
+static const struct ieee802154_ops fakelb_ops = {
.owner = THIS_MODULE,
- .xmit = fakelb_hw_xmit,
+ .xmit_sync = fakelb_hw_xmit,
.ed = fakelb_hw_ed,
.set_channel = fakelb_hw_channel,
.start = fakelb_hw_start,
@@ -150,54 +139,54 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
{
struct fakelb_dev_priv *priv;
int err;
- struct ieee802154_dev *ieee;
+ struct ieee802154_hw *hw;
- ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops);
- if (!ieee)
+ hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops);
+ if (!hw)
return -ENOMEM;
- priv = ieee->priv;
- priv->dev = ieee;
+ priv = hw->priv;
+ priv->hw = hw;
/* 868 MHz BPSK 802.15.4-2003 */
- ieee->phy->channels_supported[0] |= 1;
+ hw->phy->channels_supported[0] |= 1;
/* 915 MHz BPSK 802.15.4-2003 */
- ieee->phy->channels_supported[0] |= 0x7fe;
+ hw->phy->channels_supported[0] |= 0x7fe;
/* 2.4 GHz O-QPSK 802.15.4-2003 */
- ieee->phy->channels_supported[0] |= 0x7FFF800;
+ hw->phy->channels_supported[0] |= 0x7FFF800;
/* 868 MHz ASK 802.15.4-2006 */
- ieee->phy->channels_supported[1] |= 1;
+ hw->phy->channels_supported[1] |= 1;
/* 915 MHz ASK 802.15.4-2006 */
- ieee->phy->channels_supported[1] |= 0x7fe;
+ hw->phy->channels_supported[1] |= 0x7fe;
/* 868 MHz O-QPSK 802.15.4-2006 */
- ieee->phy->channels_supported[2] |= 1;
+ hw->phy->channels_supported[2] |= 1;
/* 915 MHz O-QPSK 802.15.4-2006 */
- ieee->phy->channels_supported[2] |= 0x7fe;
+ hw->phy->channels_supported[2] |= 0x7fe;
/* 2.4 GHz CSS 802.15.4a-2007 */
- ieee->phy->channels_supported[3] |= 0x3fff;
+ hw->phy->channels_supported[3] |= 0x3fff;
/* UWB Sub-gigahertz 802.15.4a-2007 */
- ieee->phy->channels_supported[4] |= 1;
+ hw->phy->channels_supported[4] |= 1;
/* UWB Low band 802.15.4a-2007 */
- ieee->phy->channels_supported[4] |= 0x1e;
+ hw->phy->channels_supported[4] |= 0x1e;
/* UWB High band 802.15.4a-2007 */
- ieee->phy->channels_supported[4] |= 0xffe0;
+ hw->phy->channels_supported[4] |= 0xffe0;
/* 750 MHz O-QPSK 802.15.4c-2009 */
- ieee->phy->channels_supported[5] |= 0xf;
+ hw->phy->channels_supported[5] |= 0xf;
/* 750 MHz MPSK 802.15.4c-2009 */
- ieee->phy->channels_supported[5] |= 0xf0;
+ hw->phy->channels_supported[5] |= 0xf0;
/* 950 MHz BPSK 802.15.4d-2009 */
- ieee->phy->channels_supported[6] |= 0x3ff;
+ hw->phy->channels_supported[6] |= 0x3ff;
/* 950 MHz GFSK 802.15.4d-2009 */
- ieee->phy->channels_supported[6] |= 0x3ffc00;
+ hw->phy->channels_supported[6] |= 0x3ffc00;
INIT_LIST_HEAD(&priv->list);
priv->fake = fake;
spin_lock_init(&priv->lock);
- ieee->parent = dev;
+ hw->parent = dev;
- err = ieee802154_register_device(ieee);
+ err = ieee802154_register_hw(hw);
if (err)
goto err_reg;
@@ -208,7 +197,7 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
return 0;
err_reg:
- ieee802154_free_device(priv->dev);
+ ieee802154_free_hw(priv->hw);
return err;
}
@@ -218,8 +207,8 @@ static void fakelb_del(struct fakelb_dev_priv *priv)
list_del(&priv->list);
write_unlock_bh(&priv->fake->lock);
- ieee802154_unregister_device(priv->dev);
- ieee802154_free_device(priv->dev);
+ ieee802154_unregister_hw(priv->hw);
+ ieee802154_free_hw(priv->hw);
}
static int fakelb_probe(struct platform_device *pdev)
@@ -272,7 +261,6 @@ static struct platform_driver ieee802154fake_driver = {
.remove = fakelb_remove,
.driver = {
.name = "ieee802154fakelb",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 07e0b887c350..a200fa16beae 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -13,18 +13,14 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <net/wpan-phy.h>
+#include <linux/ieee802154.h>
+#include <net/cfg802154.h>
#include <net/mac802154.h>
-#include <net/ieee802154.h>
/* MRF24J40 Short Address Registers */
#define REG_RXMCR 0x00 /* Receive MAC control */
@@ -43,6 +39,8 @@
#define REG_TXSTBL 0x2E /* TX Stabilization */
#define REG_INTSTAT 0x31 /* Interrupt Status */
#define REG_INTCON 0x32 /* Interrupt Control */
+#define REG_GPIO 0x33 /* GPIO */
+#define REG_TRISGPIO 0x34 /* GPIO direction */
#define REG_RFCTL 0x36 /* RF Control Mode Register */
#define REG_BBREG1 0x39 /* Baseband Registers */
#define REG_BBREG2 0x3A /* */
@@ -63,6 +61,7 @@
#define REG_SLPCON1 0x220
#define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */
#define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */
+#define REG_TESTMODE 0x22F /* Test mode */
#define REG_RX_FIFO 0x300 /* Receive FIFO */
/* Device configuration: Only channels 11-26 on page 0 are supported. */
@@ -75,10 +74,12 @@
#define RX_FIFO_SIZE 144 /* From datasheet */
#define SET_CHANNEL_DELAY_US 192 /* From datasheet */
+enum mrf24j40_modules { MRF24J40, MRF24J40MA, MRF24J40MC };
+
/* Device Private Data */
struct mrf24j40 {
struct spi_device *spi;
- struct ieee802154_dev *dev;
+ struct ieee802154_hw *hw;
struct mutex buffer_mutex; /* only used to protect buf */
struct completion tx_complete;
@@ -331,9 +332,9 @@ out:
return ret;
}
-static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+static int mrf24j40_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
u8 val;
int ret = 0;
@@ -382,7 +383,7 @@ err:
return ret;
}
-static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
+static int mrf24j40_ed(struct ieee802154_hw *hw, u8 *level)
{
/* TODO: */
pr_warn("mrf24j40: ed not implemented\n");
@@ -390,9 +391,9 @@ static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
return 0;
}
-static int mrf24j40_start(struct ieee802154_dev *dev)
+static int mrf24j40_start(struct ieee802154_hw *hw)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
u8 val;
int ret;
@@ -407,9 +408,9 @@ static int mrf24j40_start(struct ieee802154_dev *dev)
return 0;
}
-static void mrf24j40_stop(struct ieee802154_dev *dev)
+static void mrf24j40_stop(struct ieee802154_hw *hw)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
u8 val;
int ret;
@@ -422,10 +423,9 @@ static void mrf24j40_stop(struct ieee802154_dev *dev)
write_short_reg(devrec, REG_INTCON, val);
}
-static int mrf24j40_set_channel(struct ieee802154_dev *dev,
- int page, int channel)
+static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
u8 val;
int ret;
@@ -453,15 +453,15 @@ static int mrf24j40_set_channel(struct ieee802154_dev *dev,
return 0;
}
-static int mrf24j40_filter(struct ieee802154_dev *dev,
+static int mrf24j40_filter(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
dev_dbg(printdev(devrec), "filter\n");
- if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
/* Short Addr */
u8 addrh, addrl;
@@ -474,7 +474,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
"Set short addr to %04hx\n", filt->short_addr);
}
- if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
/* Device Address */
u8 i, addr[8];
@@ -490,7 +490,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
#endif
}
- if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
/* PAN ID */
u8 panidl, panidh;
@@ -502,7 +502,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
}
- if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
/* Pan Coordinator */
u8 val;
int ret;
@@ -543,7 +543,7 @@ static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
val |= 4; /* SET RXDECINV */
write_short_reg(devrec, REG_BBREG1, val);
- skb = alloc_skb(len, GFP_KERNEL);
+ skb = dev_alloc_skb(len);
if (!skb) {
ret = -ENOMEM;
goto out;
@@ -563,7 +563,7 @@ static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
/* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
* also from a workqueue). I think irqsafe is not necessary here.
* Can someone confirm? */
- ieee802154_rx_irqsafe(devrec->dev, skb, lqi);
+ ieee802154_rx_irqsafe(devrec->hw, skb, lqi);
dev_dbg(printdev(devrec), "RX Handled\n");
@@ -578,9 +578,9 @@ out:
return ret;
}
-static struct ieee802154_ops mrf24j40_ops = {
+static const struct ieee802154_ops mrf24j40_ops = {
.owner = THIS_MODULE,
- .xmit = mrf24j40_tx,
+ .xmit_sync = mrf24j40_tx,
.ed = mrf24j40_ed,
.start = mrf24j40_start,
.stop = mrf24j40_stop,
@@ -691,6 +691,28 @@ static int mrf24j40_hw_init(struct mrf24j40 *devrec)
if (ret)
goto err_ret;
+ if (spi_get_device_id(devrec->spi)->driver_data == MRF24J40MC) {
+ /* Enable external amplifier.
+ * From MRF24J40MC datasheet section 1.3: Operation.
+ */
+ read_long_reg(devrec, REG_TESTMODE, &val);
+ val |= 0x7; /* Configure GPIO 0-2 to control amplifier */
+ write_long_reg(devrec, REG_TESTMODE, val);
+
+ read_short_reg(devrec, REG_TRISGPIO, &val);
+ val |= 0x8; /* Set GPIO3 as output. */
+ write_short_reg(devrec, REG_TRISGPIO, val);
+
+ read_short_reg(devrec, REG_GPIO, &val);
+ val |= 0x8; /* Set GPIO3 HIGH to enable U5 voltage regulator */
+ write_short_reg(devrec, REG_GPIO, val);
+
+ /* Reduce TX pwr to meet FCC requirements.
+ * From MRF24J40MC datasheet section 3.1.1
+ */
+ write_long_reg(devrec, REG_RFCON3, 0x28);
+ }
+
return 0;
err_ret:
@@ -722,17 +744,18 @@ static int mrf24j40_probe(struct spi_device *spi)
/* Register with the 802154 subsystem */
- devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
- if (!devrec->dev)
+ devrec->hw = ieee802154_alloc_hw(0, &mrf24j40_ops);
+ if (!devrec->hw)
goto err_ret;
- devrec->dev->priv = devrec;
- devrec->dev->parent = &devrec->spi->dev;
- devrec->dev->phy->channels_supported[0] = CHANNEL_MASK;
- devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK;
+ devrec->hw->priv = devrec;
+ devrec->hw->parent = &devrec->spi->dev;
+ devrec->hw->phy->channels_supported[0] = CHANNEL_MASK;
+ devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
+ IEEE802154_HW_AFILT;
dev_dbg(printdev(devrec), "registered mrf24j40\n");
- ret = ieee802154_register_device(devrec->dev);
+ ret = ieee802154_register_hw(devrec->hw);
if (ret)
goto err_register_device;
@@ -757,9 +780,9 @@ static int mrf24j40_probe(struct spi_device *spi)
err_irq:
err_hw_init:
- ieee802154_unregister_device(devrec->dev);
+ ieee802154_unregister_hw(devrec->hw);
err_register_device:
- ieee802154_free_device(devrec->dev);
+ ieee802154_free_hw(devrec->hw);
err_ret:
return ret;
}
@@ -770,8 +793,8 @@ static int mrf24j40_remove(struct spi_device *spi)
dev_dbg(printdev(devrec), "remove\n");
- ieee802154_unregister_device(devrec->dev);
- ieee802154_free_device(devrec->dev);
+ ieee802154_unregister_hw(devrec->hw);
+ ieee802154_free_hw(devrec->hw);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
* complete? */
@@ -779,8 +802,9 @@ static int mrf24j40_remove(struct spi_device *spi)
}
static const struct spi_device_id mrf24j40_ids[] = {
- { "mrf24j40", 0 },
- { "mrf24j40ma", 0 },
+ { "mrf24j40", MRF24J40 },
+ { "mrf24j40ma", MRF24J40MA },
+ { "mrf24j40mc", MRF24J40MC },
{ },
};
MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile
new file mode 100644
index 000000000000..df79910192d6
--- /dev/null
+++ b/drivers/net/ipvlan/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Ethernet Ipvlan driver
+#
+
+obj-$(CONFIG_IPVLAN) += ipvlan.o
+
+ipvlan-objs := ipvlan_core.o ipvlan_main.o
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
new file mode 100644
index 000000000000..2729f64b3e7e
--- /dev/null
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ */
+#ifndef __IPVLAN_H
+#define __IPVLAN_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_link.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/inetdevice.h>
+#include <net/ip.h>
+#include <net/ip6_route.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+
+#define IPVLAN_DRV "ipvlan"
+#define IPV_DRV_VER "0.1"
+
+#define IPVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
+#define IPVLAN_HASH_MASK (IPVLAN_HASH_SIZE - 1)
+
+#define IPVLAN_MAC_FILTER_BITS 8
+#define IPVLAN_MAC_FILTER_SIZE (1 << IPVLAN_MAC_FILTER_BITS)
+#define IPVLAN_MAC_FILTER_MASK (IPVLAN_MAC_FILTER_SIZE - 1)
+
+typedef enum {
+ IPVL_IPV6 = 0,
+ IPVL_ICMPV6,
+ IPVL_IPV4,
+ IPVL_ARP,
+} ipvl_hdr_type;
+
+struct ipvl_pcpu_stats {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_mcast;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_errs;
+ u32 tx_drps;
+};
+
+struct ipvl_port;
+
+struct ipvl_dev {
+ struct net_device *dev;
+ struct list_head pnode;
+ struct ipvl_port *port;
+ struct net_device *phy_dev;
+ struct list_head addrs;
+ int ipv4cnt;
+ int ipv6cnt;
+ struct ipvl_pcpu_stats *pcpu_stats;
+ DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
+ netdev_features_t sfeatures;
+ u32 msg_enable;
+ u16 mtu_adj;
+};
+
+struct ipvl_addr {
+ struct ipvl_dev *master; /* Back pointer to master */
+ union {
+ struct in6_addr ip6; /* IPv6 address on logical interface */
+ struct in_addr ip4; /* IPv4 address on logical interface */
+ } ipu;
+#define ip6addr ipu.ip6
+#define ip4addr ipu.ip4
+ struct hlist_node hlnode; /* Hash-table linkage */
+ struct list_head anode; /* logical-interface linkage */
+ struct rcu_head rcu;
+ ipvl_hdr_type atype;
+};
+
+struct ipvl_port {
+ struct net_device *dev;
+ struct hlist_head hlhead[IPVLAN_HASH_SIZE];
+ struct list_head ipvlans;
+ struct rcu_head rcu;
+ int count;
+ u16 mode;
+};
+
+static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
+{
+ return rcu_dereference(d->rx_handler_data);
+}
+
+static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
+{
+ return rtnl_dereference(d->rx_handler_data);
+}
+
+void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev);
+void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval);
+void ipvlan_init_secret(void);
+unsigned int ipvlan_mac_hash(const unsigned char *addr);
+rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
+int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
+void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
+bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6);
+struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
+ const void *iaddr, bool is_v6);
+void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
+#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
new file mode 100644
index 000000000000..a14d87783245
--- /dev/null
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -0,0 +1,607 @@
+/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ */
+
+#include "ipvlan.h"
+
+static u32 ipvlan_jhash_secret;
+
+void ipvlan_init_secret(void)
+{
+ net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
+}
+
+static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
+ unsigned int len, bool success, bool mcast)
+{
+ if (!ipvlan)
+ return;
+
+ if (likely(success)) {
+ struct ipvl_pcpu_stats *pcptr;
+
+ pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
+ u64_stats_update_begin(&pcptr->syncp);
+ pcptr->rx_pkts++;
+ pcptr->rx_bytes += len;
+ if (mcast)
+ pcptr->rx_mcast++;
+ u64_stats_update_end(&pcptr->syncp);
+ } else {
+ this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
+ }
+}
+
+static u8 ipvlan_get_v6_hash(const void *iaddr)
+{
+ const struct in6_addr *ip6_addr = iaddr;
+
+ return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
+ IPVLAN_HASH_MASK;
+}
+
+static u8 ipvlan_get_v4_hash(const void *iaddr)
+{
+ const struct in_addr *ip4_addr = iaddr;
+
+ return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
+ IPVLAN_HASH_MASK;
+}
+
+struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
+ const void *iaddr, bool is_v6)
+{
+ struct ipvl_addr *addr;
+ u8 hash;
+
+ hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
+ ipvlan_get_v4_hash(iaddr);
+ hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) {
+ if (is_v6 && addr->atype == IPVL_IPV6 &&
+ ipv6_addr_equal(&addr->ip6addr, iaddr))
+ return addr;
+ else if (!is_v6 && addr->atype == IPVL_IPV4 &&
+ addr->ip4addr.s_addr ==
+ ((struct in_addr *)iaddr)->s_addr)
+ return addr;
+ }
+ return NULL;
+}
+
+void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
+{
+ struct ipvl_port *port = ipvlan->port;
+ u8 hash;
+
+ hash = (addr->atype == IPVL_IPV6) ?
+ ipvlan_get_v6_hash(&addr->ip6addr) :
+ ipvlan_get_v4_hash(&addr->ip4addr);
+ hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
+}
+
+void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
+{
+ hlist_del_rcu(&addr->hlnode);
+ if (sync)
+ synchronize_rcu();
+}
+
+bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
+{
+ struct ipvl_port *port = ipvlan->port;
+ struct ipvl_addr *addr;
+
+ list_for_each_entry(addr, &ipvlan->addrs, anode) {
+ if ((is_v6 && addr->atype == IPVL_IPV6 &&
+ ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
+ (!is_v6 && addr->atype == IPVL_IPV4 &&
+ addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
+ return true;
+ }
+
+ if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
+ return true;
+
+ return false;
+}
+
+static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
+{
+ void *lyr3h = NULL;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_ARP): {
+ struct arphdr *arph;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
+ return NULL;
+
+ arph = arp_hdr(skb);
+ *type = IPVL_ARP;
+ lyr3h = arph;
+ break;
+ }
+ case htons(ETH_P_IP): {
+ u32 pktlen;
+ struct iphdr *ip4h;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
+ return NULL;
+
+ ip4h = ip_hdr(skb);
+ pktlen = ntohs(ip4h->tot_len);
+ if (ip4h->ihl < 5 || ip4h->version != 4)
+ return NULL;
+ if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
+ return NULL;
+
+ *type = IPVL_IPV4;
+ lyr3h = ip4h;
+ break;
+ }
+ case htons(ETH_P_IPV6): {
+ struct ipv6hdr *ip6h;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->version != 6)
+ return NULL;
+
+ *type = IPVL_IPV6;
+ lyr3h = ip6h;
+ /* Only Neighbour Solicitation pkts need different treatment */
+ if (ipv6_addr_any(&ip6h->saddr) &&
+ ip6h->nexthdr == NEXTHDR_ICMP) {
+ *type = IPVL_ICMPV6;
+ lyr3h = ip6h + 1;
+ }
+ break;
+ }
+ default:
+ return NULL;
+ }
+
+ return lyr3h;
+}
+
+unsigned int ipvlan_mac_hash(const unsigned char *addr)
+{
+ u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
+ ipvlan_jhash_secret);
+
+ return hash & IPVLAN_MAC_FILTER_MASK;
+}
+
+static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
+ const struct ipvl_dev *in_dev, bool local)
+{
+ struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_dev *ipvlan;
+ struct sk_buff *nskb;
+ unsigned int len;
+ unsigned int mac_hash;
+ int ret;
+
+ if (skb->protocol == htons(ETH_P_PAUSE))
+ return;
+
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ if (local && (ipvlan == in_dev))
+ continue;
+
+ mac_hash = ipvlan_mac_hash(eth->h_dest);
+ if (!test_bit(mac_hash, ipvlan->mac_filters))
+ continue;
+
+ ret = NET_RX_DROP;
+ len = skb->len + ETH_HLEN;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto mcast_acct;
+
+ if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast))
+ nskb->pkt_type = PACKET_BROADCAST;
+ else
+ nskb->pkt_type = PACKET_MULTICAST;
+
+ nskb->dev = ipvlan->dev;
+ if (local)
+ ret = dev_forward_skb(ipvlan->dev, nskb);
+ else
+ ret = netif_rx(nskb);
+mcast_acct:
+ ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+ }
+
+ /* Locally generated? ...Forward a copy to the main-device as
+ * well. On the RX side we'll ignore it (wont give it to any
+ * of the virtual devices.
+ */
+ if (local) {
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (nskb) {
+ if (ether_addr_equal(eth->h_dest, port->dev->broadcast))
+ nskb->pkt_type = PACKET_BROADCAST;
+ else
+ nskb->pkt_type = PACKET_MULTICAST;
+
+ dev_forward_skb(port->dev, nskb);
+ }
+ }
+}
+
+static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
+ bool local)
+{
+ struct ipvl_dev *ipvlan = addr->master;
+ struct net_device *dev = ipvlan->dev;
+ unsigned int len;
+ rx_handler_result_t ret = RX_HANDLER_CONSUMED;
+ bool success = false;
+
+ len = skb->len + ETH_HLEN;
+ if (unlikely(!(dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ skb->dev = dev;
+ skb->pkt_type = PACKET_HOST;
+
+ if (local) {
+ if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
+ success = true;
+ } else {
+ ret = RX_HANDLER_ANOTHER;
+ success = true;
+ }
+
+out:
+ ipvlan_count_rx(ipvlan, len, success, false);
+ return ret;
+}
+
+static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
+ void *lyr3h, int addr_type,
+ bool use_dest)
+{
+ struct ipvl_addr *addr = NULL;
+
+ if (addr_type == IPVL_IPV6) {
+ struct ipv6hdr *ip6h;
+ struct in6_addr *i6addr;
+
+ ip6h = (struct ipv6hdr *)lyr3h;
+ i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
+ addr = ipvlan_ht_addr_lookup(port, i6addr, true);
+ } else if (addr_type == IPVL_ICMPV6) {
+ struct nd_msg *ndmh;
+ struct in6_addr *i6addr;
+
+ /* Make sure that the NeighborSolicitation ICMPv6 packets
+ * are handled to avoid DAD issue.
+ */
+ ndmh = (struct nd_msg *)lyr3h;
+ if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ i6addr = &ndmh->target;
+ addr = ipvlan_ht_addr_lookup(port, i6addr, true);
+ }
+ } else if (addr_type == IPVL_IPV4) {
+ struct iphdr *ip4h;
+ __be32 *i4addr;
+
+ ip4h = (struct iphdr *)lyr3h;
+ i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
+ addr = ipvlan_ht_addr_lookup(port, i4addr, false);
+ } else if (addr_type == IPVL_ARP) {
+ struct arphdr *arph;
+ unsigned char *arp_ptr;
+ __be32 dip;
+
+ arph = (struct arphdr *)lyr3h;
+ arp_ptr = (unsigned char *)(arph + 1);
+ if (use_dest)
+ arp_ptr += (2 * port->dev->addr_len) + 4;
+ else
+ arp_ptr += port->dev->addr_len;
+
+ memcpy(&dip, arp_ptr, 4);
+ addr = ipvlan_ht_addr_lookup(port, &dip, false);
+ }
+
+ return addr;
+}
+
+static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+{
+ const struct iphdr *ip4h = ip_hdr(skb);
+ struct net_device *dev = skb->dev;
+ struct rtable *rt;
+ int err, ret = NET_XMIT_DROP;
+ struct flowi4 fl4 = {
+ .flowi4_oif = dev->iflink,
+ .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .daddr = ip4h->daddr,
+ .saddr = ip4h->saddr,
+ };
+
+ rt = ip_route_output_flow(dev_net(dev), &fl4, NULL);
+ if (IS_ERR(rt))
+ goto err;
+
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
+ goto err;
+ }
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+ err = ip_local_out(skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+out:
+ return ret;
+}
+
+static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct net_device *dev = skb->dev;
+ struct dst_entry *dst;
+ int err, ret = NET_XMIT_DROP;
+ struct flowi6 fl6 = {
+ .flowi6_iif = skb->dev->ifindex,
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ .flowi6_flags = FLOWI_FLAG_ANYSRC,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
+
+ dst = ip6_route_output(dev_net(dev), NULL, &fl6);
+ if (IS_ERR(dst))
+ goto err;
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+ err = ip6_local_out(skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+out:
+ return ret;
+}
+
+static int ipvlan_process_outbound(struct sk_buff *skb,
+ const struct ipvl_dev *ipvlan)
+{
+ struct ethhdr *ethh = eth_hdr(skb);
+ int ret = NET_XMIT_DROP;
+
+ /* In this mode we dont care about multicast and broadcast traffic */
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
+ ntohs(skb->protocol));
+ kfree_skb(skb);
+ goto out;
+ }
+
+ /* The ipvlan is a pseudo-L2 device, so the packets that we receive
+ * will have L2; which need to discarded and processed further
+ * in the net-ns of the main-device.
+ */
+ if (skb_mac_header_was_set(skb)) {
+ skb_pull(skb, sizeof(*ethh));
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb_reset_network_header(skb);
+ }
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ ret = ipvlan_process_v6_outbound(skb);
+ else if (skb->protocol == htons(ETH_P_IP))
+ ret = ipvlan_process_v4_outbound(skb);
+ else {
+ pr_warn_ratelimited("Dropped outbound packet type=%x\n",
+ ntohs(skb->protocol));
+ kfree_skb(skb);
+ }
+out:
+ return ret;
+}
+
+static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ void *lyr3h;
+ struct ipvl_addr *addr;
+ int addr_type;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ goto out;
+
+ addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
+ if (addr)
+ return ipvlan_rcv_frame(addr, skb, true);
+
+out:
+ skb->dev = ipvlan->phy_dev;
+ return ipvlan_process_outbound(skb, ipvlan);
+}
+
+static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_addr *addr;
+ void *lyr3h;
+ int addr_type;
+
+ if (ether_addr_equal(eth->h_dest, eth->h_source)) {
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (lyr3h) {
+ addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
+ if (addr)
+ return ipvlan_rcv_frame(addr, skb, true);
+ }
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_XMIT_DROP;
+
+ /* Packet definitely does not belong to any of the
+ * virtual devices, but the dest is local. So forward
+ * the skb for the main-dev. At the RX side we just return
+ * RX_PASS for it to be processed further on the stack.
+ */
+ return dev_forward_skb(ipvlan->phy_dev, skb);
+
+ } else if (is_multicast_ether_addr(eth->h_dest)) {
+ u8 ip_summed = skb->ip_summed;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true);
+ skb->ip_summed = ip_summed;
+ }
+
+ skb->dev = ipvlan->phy_dev;
+ return dev_queue_xmit(skb);
+}
+
+int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
+
+ if (!port)
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
+ goto out;
+
+ switch(port->mode) {
+ case IPVLAN_MODE_L2:
+ return ipvlan_xmit_mode_l2(skb, dev);
+ case IPVLAN_MODE_L3:
+ return ipvlan_xmit_mode_l3(skb, dev);
+ }
+
+ /* Should not reach here */
+ WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
+ port->mode);
+out:
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
+static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
+{
+ struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_addr *addr;
+ void *lyr3h;
+ int addr_type;
+
+ if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ return true;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
+ if (addr)
+ return false;
+ }
+
+ return true;
+}
+
+static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
+ struct ipvl_port *port)
+{
+ void *lyr3h;
+ int addr_type;
+ struct ipvl_addr *addr;
+ struct sk_buff *skb = *pskb;
+ rx_handler_result_t ret = RX_HANDLER_PASS;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ goto out;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
+ if (addr)
+ ret = ipvlan_rcv_frame(addr, skb, false);
+
+out:
+ return ret;
+}
+
+static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
+ struct ipvl_port *port)
+{
+ struct sk_buff *skb = *pskb;
+ struct ethhdr *eth = eth_hdr(skb);
+ rx_handler_result_t ret = RX_HANDLER_PASS;
+ void *lyr3h;
+ int addr_type;
+
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ if (ipvlan_external_frame(skb, port))
+ ipvlan_multicast_frame(port, skb, NULL, false);
+ } else {
+ struct ipvl_addr *addr;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ return ret;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
+ if (addr)
+ ret = ipvlan_rcv_frame(addr, skb, false);
+ }
+
+ return ret;
+}
+
+rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
+
+ if (!port)
+ return RX_HANDLER_PASS;
+
+ switch (port->mode) {
+ case IPVLAN_MODE_L2:
+ return ipvlan_handle_mode_l2(pskb, port);
+ case IPVLAN_MODE_L3:
+ return ipvlan_handle_mode_l3(pskb, port);
+ }
+
+ /* Should not reach here */
+ WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
+ port->mode);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
new file mode 100644
index 000000000000..4f4099d5603d
--- /dev/null
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -0,0 +1,795 @@
+/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ */
+
+#include "ipvlan.h"
+
+void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
+{
+ ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
+}
+
+void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval)
+{
+ struct ipvl_dev *ipvlan;
+
+ if (port->mode != nval) {
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ if (nval == IPVLAN_MODE_L3)
+ ipvlan->dev->flags |= IFF_NOARP;
+ else
+ ipvlan->dev->flags &= ~IFF_NOARP;
+ }
+ port->mode = nval;
+ }
+}
+
+static int ipvlan_port_create(struct net_device *dev)
+{
+ struct ipvl_port *port;
+ int err, idx;
+
+ if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
+ netdev_err(dev, "Master is either lo or non-ether device\n");
+ return -EINVAL;
+ }
+
+ if (netif_is_macvlan_port(dev)) {
+ netdev_err(dev, "Master is a macvlan port.\n");
+ return -EBUSY;
+ }
+
+ port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = dev;
+ port->mode = IPVLAN_MODE_L3;
+ INIT_LIST_HEAD(&port->ipvlans);
+ for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
+ INIT_HLIST_HEAD(&port->hlhead[idx]);
+
+ err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
+ if (err)
+ goto err;
+
+ dev->priv_flags |= IFF_IPVLAN_MASTER;
+ return 0;
+
+err:
+ kfree_rcu(port, rcu);
+ return err;
+}
+
+static void ipvlan_port_destroy(struct net_device *dev)
+{
+ struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
+
+ dev->priv_flags &= ~IFF_IPVLAN_MASTER;
+ netdev_rx_handler_unregister(dev);
+ kfree_rcu(port, rcu);
+}
+
+/* ipvlan network devices have devices nesting below it and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key ipvlan_netdev_xmit_lock_key;
+static struct lock_class_key ipvlan_netdev_addr_lock_key;
+
+#define IPVLAN_FEATURES \
+ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
+
+#define IPVLAN_STATE_MASK \
+ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+
+static void ipvlan_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key);
+}
+
+static void ipvlan_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL);
+}
+
+static int ipvlan_init(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ const struct net_device *phy_dev = ipvlan->phy_dev;
+
+ dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
+ (phy_dev->state & IPVLAN_STATE_MASK);
+ dev->features = phy_dev->features & IPVLAN_FEATURES;
+ dev->features |= NETIF_F_LLTX;
+ dev->gso_max_size = phy_dev->gso_max_size;
+ dev->iflink = phy_dev->ifindex;
+ dev->hard_header_len = phy_dev->hard_header_len;
+
+ ipvlan_set_lockdep_class(dev);
+
+ ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
+ if (!ipvlan->pcpu_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ipvlan_uninit(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan->port;
+
+ free_percpu(ipvlan->pcpu_stats);
+
+ port->count -= 1;
+ if (!port->count)
+ ipvlan_port_destroy(port->dev);
+}
+
+static int ipvlan_open(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_addr *addr;
+
+ if (ipvlan->port->mode == IPVLAN_MODE_L3)
+ dev->flags |= IFF_NOARP;
+ else
+ dev->flags &= ~IFF_NOARP;
+
+ if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_add(ipvlan, addr);
+ }
+ return dev_uc_add(phy_dev, phy_dev->dev_addr);
+}
+
+static int ipvlan_stop(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_addr *addr;
+
+ dev_uc_unsync(phy_dev, dev);
+ dev_mc_unsync(phy_dev, dev);
+
+ dev_uc_del(phy_dev, phy_dev->dev_addr);
+
+ if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_del(addr, !dev->dismantle);
+ }
+ return 0;
+}
+
+static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ int skblen = skb->len;
+ int ret;
+
+ ret = ipvlan_queue_xmit(skb, dev);
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ struct ipvl_pcpu_stats *pcptr;
+
+ pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
+
+ u64_stats_update_begin(&pcptr->syncp);
+ pcptr->tx_pkts++;
+ pcptr->tx_bytes += skblen;
+ u64_stats_update_end(&pcptr->syncp);
+ } else {
+ this_cpu_inc(ipvlan->pcpu_stats->tx_drps);
+ }
+ return ret;
+}
+
+static netdev_features_t ipvlan_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+}
+
+static void ipvlan_change_rx_flags(struct net_device *dev, int change)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
+}
+
+static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set)
+{
+ struct net_device *dev = ipvlan->dev;
+ unsigned int hashbit = ipvlan_mac_hash(dev->broadcast);
+
+ if (set && !test_bit(hashbit, ipvlan->mac_filters))
+ __set_bit(hashbit, ipvlan->mac_filters);
+ else if (!set && test_bit(hashbit, ipvlan->mac_filters))
+ __clear_bit(hashbit, ipvlan->mac_filters);
+}
+
+static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ bitmap_fill(ipvlan->mac_filters, IPVLAN_MAC_FILTER_SIZE);
+ } else {
+ struct netdev_hw_addr *ha;
+ DECLARE_BITMAP(mc_filters, IPVLAN_MAC_FILTER_SIZE);
+
+ bitmap_zero(mc_filters, IPVLAN_MAC_FILTER_SIZE);
+ netdev_for_each_mc_addr(ha, dev)
+ __set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
+
+ bitmap_copy(ipvlan->mac_filters, mc_filters,
+ IPVLAN_MAC_FILTER_SIZE);
+ }
+ dev_uc_sync(ipvlan->phy_dev, dev);
+ dev_mc_sync(ipvlan->phy_dev, dev);
+}
+
+static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (ipvlan->pcpu_stats) {
+ struct ipvl_pcpu_stats *pcptr;
+ u64 rx_pkts, rx_bytes, rx_mcast, tx_pkts, tx_bytes;
+ u32 rx_errs = 0, tx_drps = 0;
+ u32 strt;
+ int idx;
+
+ for_each_possible_cpu(idx) {
+ pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
+ do {
+ strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
+ rx_pkts = pcptr->rx_pkts;
+ rx_bytes = pcptr->rx_bytes;
+ rx_mcast = pcptr->rx_mcast;
+ tx_pkts = pcptr->tx_pkts;
+ tx_bytes = pcptr->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&pcptr->syncp,
+ strt));
+
+ s->rx_packets += rx_pkts;
+ s->rx_bytes += rx_bytes;
+ s->multicast += rx_mcast;
+ s->tx_packets += tx_pkts;
+ s->tx_bytes += tx_bytes;
+
+ /* u32 values are updated without syncp protection. */
+ rx_errs += pcptr->rx_errs;
+ tx_drps += pcptr->tx_drps;
+ }
+ s->rx_errors = rx_errs;
+ s->rx_dropped = rx_errs;
+ s->tx_dropped = tx_drps;
+ }
+ return s;
+}
+
+static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+
+ return vlan_vid_add(phy_dev, proto, vid);
+}
+
+static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+
+ vlan_vid_del(phy_dev, proto, vid);
+ return 0;
+}
+
+static const struct net_device_ops ipvlan_netdev_ops = {
+ .ndo_init = ipvlan_init,
+ .ndo_uninit = ipvlan_uninit,
+ .ndo_open = ipvlan_open,
+ .ndo_stop = ipvlan_stop,
+ .ndo_start_xmit = ipvlan_start_xmit,
+ .ndo_fix_features = ipvlan_fix_features,
+ .ndo_change_rx_flags = ipvlan_change_rx_flags,
+ .ndo_set_rx_mode = ipvlan_set_multicast_mac_filter,
+ .ndo_get_stats64 = ipvlan_get_stats64,
+ .ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid,
+};
+
+static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned len)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+
+ /* TODO Probably use a different field than dev_addr so that the
+ * mac-address on the virtual device is portable and can be carried
+ * while the packets use the mac-addr on the physical device.
+ */
+ return dev_hard_header(skb, phy_dev, type, daddr,
+ saddr ? : dev->dev_addr, len);
+}
+
+static const struct header_ops ipvlan_header_ops = {
+ .create = ipvlan_hard_header,
+ .rebuild = eth_rebuild_header,
+ .parse = eth_header_parse,
+ .cache = eth_header_cache,
+ .cache_update = eth_header_cache_update,
+};
+
+static int ipvlan_ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return __ethtool_get_settings(ipvlan->phy_dev, cmd);
+}
+
+static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
+}
+
+static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return ipvlan->msg_enable;
+}
+
+static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ ipvlan->msg_enable = value;
+}
+
+static const struct ethtool_ops ipvlan_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = ipvlan_ethtool_get_settings,
+ .get_drvinfo = ipvlan_ethtool_get_drvinfo,
+ .get_msglevel = ipvlan_ethtool_get_msglevel,
+ .set_msglevel = ipvlan_ethtool_set_msglevel,
+};
+
+static int ipvlan_nl_changelink(struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
+
+ if (data && data[IFLA_IPVLAN_MODE]) {
+ u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+
+ ipvlan_set_port_mode(port, nmode);
+ }
+ return 0;
+}
+
+static size_t ipvlan_nl_getsize(const struct net_device *dev)
+{
+ return (0
+ + nla_total_size(2) /* IFLA_IPVLAN_MODE */
+ );
+}
+
+static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (data && data[IFLA_IPVLAN_MODE]) {
+ u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+
+ if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ipvlan_nl_fillinfo(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
+ int ret = -EINVAL;
+
+ if (!port)
+ goto err;
+
+ ret = -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode))
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port;
+ struct net_device *phy_dev;
+ int err;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+
+ phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!phy_dev)
+ return -ENODEV;
+
+ if (netif_is_ipvlan(phy_dev)) {
+ struct ipvl_dev *tmp = netdev_priv(phy_dev);
+
+ phy_dev = tmp->phy_dev;
+ } else if (!netif_is_ipvlan_port(phy_dev)) {
+ err = ipvlan_port_create(phy_dev);
+ if (err < 0)
+ return err;
+ }
+
+ port = ipvlan_port_get_rtnl(phy_dev);
+ if (data && data[IFLA_IPVLAN_MODE])
+ port->mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+
+ ipvlan->phy_dev = phy_dev;
+ ipvlan->dev = dev;
+ ipvlan->port = port;
+ ipvlan->sfeatures = IPVLAN_FEATURES;
+ INIT_LIST_HEAD(&ipvlan->addrs);
+ ipvlan->ipv4cnt = 0;
+ ipvlan->ipv6cnt = 0;
+
+ /* TODO Probably put random address here to be presented to the
+ * world but keep using the physical-dev address for the outgoing
+ * packets.
+ */
+ memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+
+ dev->priv_flags |= IFF_IPVLAN_SLAVE;
+
+ port->count += 1;
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto ipvlan_destroy_port;
+
+ err = netdev_upper_dev_link(phy_dev, dev);
+ if (err)
+ goto ipvlan_destroy_port;
+
+ list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
+ netif_stacked_transfer_operstate(phy_dev, dev);
+ return 0;
+
+ipvlan_destroy_port:
+ port->count -= 1;
+ if (!port->count)
+ ipvlan_port_destroy(phy_dev);
+
+ return err;
+}
+
+static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_addr *addr, *next;
+
+ if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
+ list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
+ ipvlan_ht_addr_del(addr, !dev->dismantle);
+ list_del_rcu(&addr->anode);
+ }
+ }
+ list_del_rcu(&ipvlan->pnode);
+ unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
+}
+
+static void ipvlan_link_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+ dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->netdev_ops = &ipvlan_netdev_ops;
+ dev->destructor = free_netdev;
+ dev->header_ops = &ipvlan_header_ops;
+ dev->ethtool_ops = &ipvlan_ethtool_ops;
+ dev->tx_queue_len = 0;
+}
+
+static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
+{
+ [IFLA_IPVLAN_MODE] = { .type = NLA_U16 },
+};
+
+static struct rtnl_link_ops ipvlan_link_ops = {
+ .kind = "ipvlan",
+ .priv_size = sizeof(struct ipvl_dev),
+
+ .get_size = ipvlan_nl_getsize,
+ .policy = ipvlan_nl_policy,
+ .validate = ipvlan_nl_validate,
+ .fill_info = ipvlan_nl_fillinfo,
+ .changelink = ipvlan_nl_changelink,
+ .maxtype = IFLA_IPVLAN_MAX,
+
+ .setup = ipvlan_link_setup,
+ .newlink = ipvlan_link_new,
+ .dellink = ipvlan_link_delete,
+};
+
+static int ipvlan_link_register(struct rtnl_link_ops *ops)
+{
+ return rtnl_link_register(ops);
+}
+
+static int ipvlan_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ipvl_dev *ipvlan, *next;
+ struct ipvl_port *port;
+ LIST_HEAD(lst_kill);
+
+ if (!netif_is_ipvlan_port(dev))
+ return NOTIFY_DONE;
+
+ port = ipvlan_port_get_rtnl(dev);
+
+ switch (event) {
+ case NETDEV_CHANGE:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ netif_stacked_transfer_operstate(ipvlan->phy_dev,
+ ipvlan->dev);
+ break;
+
+ case NETDEV_UNREGISTER:
+ if (dev->reg_state != NETREG_UNREGISTERING)
+ break;
+
+ list_for_each_entry_safe(ipvlan, next, &port->ipvlans,
+ pnode)
+ ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev,
+ &lst_kill);
+ unregister_netdevice_many(&lst_kill);
+ break;
+
+ case NETDEV_FEAT_CHANGE:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
+ ipvlan->dev->gso_max_size = dev->gso_max_size;
+ netdev_features_change(ipvlan->dev);
+ }
+ break;
+
+ case NETDEV_CHANGEMTU:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ ipvlan_adjust_mtu(ipvlan, dev);
+ break;
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlying device to change its type. */
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+ struct ipvl_addr *addr;
+
+ if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
+ netif_err(ipvlan, ifup, ipvlan->dev,
+ "Failed to add IPv6=%pI6c addr for %s intf\n",
+ ip6_addr, ipvlan->dev->name);
+ return -EINVAL;
+ }
+ addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
+ if (!addr)
+ return -ENOMEM;
+
+ addr->master = ipvlan;
+ memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
+ addr->atype = IPVL_IPV6;
+ list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+ ipvlan->ipv6cnt++;
+ ipvlan_ht_addr_add(ipvlan, addr);
+
+ return 0;
+}
+
+static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+ struct ipvl_addr *addr;
+
+ addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
+ if (!addr)
+ return;
+
+ ipvlan_ht_addr_del(addr, true);
+ list_del_rcu(&addr->anode);
+ ipvlan->ipv6cnt--;
+ WARN_ON(ipvlan->ipv6cnt < 0);
+ kfree_rcu(addr, rcu);
+
+ return;
+}
+
+static int ipvlan_addr6_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr;
+ struct net_device *dev = (struct net_device *)if6->idev->dev;
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (!netif_is_ipvlan(dev))
+ return NOTIFY_DONE;
+
+ if (!ipvlan || !ipvlan->port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (ipvlan_add_addr6(ipvlan, &if6->addr))
+ return NOTIFY_BAD;
+ break;
+
+ case NETDEV_DOWN:
+ ipvlan_del_addr6(ipvlan, &if6->addr);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
+{
+ struct ipvl_addr *addr;
+
+ if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
+ netif_err(ipvlan, ifup, ipvlan->dev,
+ "Failed to add IPv4=%pI4 on %s intf.\n",
+ ip4_addr, ipvlan->dev->name);
+ return -EINVAL;
+ }
+ addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ addr->master = ipvlan;
+ memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
+ addr->atype = IPVL_IPV4;
+ list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+ ipvlan->ipv4cnt++;
+ ipvlan_ht_addr_add(ipvlan, addr);
+ ipvlan_set_broadcast_mac_filter(ipvlan, true);
+
+ return 0;
+}
+
+static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
+{
+ struct ipvl_addr *addr;
+
+ addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
+ if (!addr)
+ return;
+
+ ipvlan_ht_addr_del(addr, true);
+ list_del_rcu(&addr->anode);
+ ipvlan->ipv4cnt--;
+ WARN_ON(ipvlan->ipv4cnt < 0);
+ if (!ipvlan->ipv4cnt)
+ ipvlan_set_broadcast_mac_filter(ipvlan, false);
+ kfree_rcu(addr, rcu);
+
+ return;
+}
+
+static int ipvlan_addr4_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *if4 = (struct in_ifaddr *)ptr;
+ struct net_device *dev = (struct net_device *)if4->ifa_dev->dev;
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct in_addr ip4_addr;
+
+ if (!netif_is_ipvlan(dev))
+ return NOTIFY_DONE;
+
+ if (!ipvlan || !ipvlan->port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ ip4_addr.s_addr = if4->ifa_address;
+ if (ipvlan_add_addr4(ipvlan, &ip4_addr))
+ return NOTIFY_BAD;
+ break;
+
+ case NETDEV_DOWN:
+ ip4_addr.s_addr = if4->ifa_address;
+ ipvlan_del_addr4(ipvlan, &ip4_addr);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
+ .notifier_call = ipvlan_addr4_event,
+};
+
+static struct notifier_block ipvlan_notifier_block __read_mostly = {
+ .notifier_call = ipvlan_device_event,
+};
+
+static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
+ .notifier_call = ipvlan_addr6_event,
+};
+
+static int __init ipvlan_init_module(void)
+{
+ int err;
+
+ ipvlan_init_secret();
+ register_netdevice_notifier(&ipvlan_notifier_block);
+ register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+ register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+
+ err = ipvlan_link_register(&ipvlan_link_ops);
+ if (err < 0)
+ goto error;
+
+ return 0;
+error:
+ unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+ unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+ unregister_netdevice_notifier(&ipvlan_notifier_block);
+ return err;
+}
+
+static void __exit ipvlan_cleanup_module(void)
+{
+ rtnl_link_unregister(&ipvlan_link_ops);
+ unregister_netdevice_notifier(&ipvlan_notifier_block);
+ unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+ unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+}
+
+module_init(ipvlan_init_module);
+module_exit(ipvlan_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
+MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
+MODULE_ALIAS_RTNL_LINK("ipvlan");
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
index 8ff084f1d236..e8917511e1aa 100644
--- a/drivers/net/irda/act200l-sir.c
+++ b/drivers/net/irda/act200l-sir.c
@@ -107,8 +107,6 @@ static int act200l_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Power on the dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -124,8 +122,6 @@ static int act200l_open(struct sir_dev *dev)
static int act200l_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Power off the dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -143,8 +139,6 @@ static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[3];
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Clear DTR and set RTS to enter command mode */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
@@ -212,8 +206,6 @@ static int act200l_reset(struct sir_dev *dev)
};
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
/* Reset the dongle : set RTS low for 25 ms */
@@ -240,7 +232,8 @@ static int act200l_reset(struct sir_dev *dev)
dev->speed = 9600;
break;
default:
- IRDA_ERROR("%s(), unknown state %d\n", __func__, state);
+ net_err_ratelimited("%s(), unknown state %d\n",
+ __func__, state);
ret = -1;
break;
}
diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c
index 50b2141a6103..e224b8b99517 100644
--- a/drivers/net/irda/actisys-sir.c
+++ b/drivers/net/irda/actisys-sir.c
@@ -165,8 +165,7 @@ static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
int ret = 0;
int i = 0;
- IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__,
- speed, dev->speed);
+ pr_debug("%s(), speed=%d (was %d)\n", __func__, speed, dev->speed);
/* dongle was already resetted from irda_request state machine,
* we are in known state (dongle default)
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index befa45f809c3..58f98f4de773 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -61,7 +61,6 @@ static struct platform_driver ali_ircc_driver = {
.resume = ali_ircc_resume,
.driver = {
.name = ALI_IRCC_DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
@@ -154,12 +153,10 @@ static int __init ali_ircc_init(void)
int reg, revision;
int i = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
-
ret = platform_driver_register(&ali_ircc_driver);
if (ret) {
- IRDA_ERROR("%s, Can't register driver!\n",
- ALI_IRCC_DRIVER_NAME);
+ net_err_ratelimited("%s, Can't register driver!\n",
+ ALI_IRCC_DRIVER_NAME);
return ret;
}
@@ -168,7 +165,7 @@ static int __init ali_ircc_init(void)
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)
{
- IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name);
+ pr_debug("%s(), Probing for %s ...\n", __func__, chip->name);
/* Try all config registers for this chip */
for (cfg=0; cfg<2; cfg++)
@@ -198,12 +195,13 @@ static int __init ali_ircc_init(void)
if (reg == chip->cid_value)
{
- IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base);
+ pr_debug("%s(), Chip found at 0x%03x\n",
+ __func__, cfg_base);
outb(0x1F, cfg_base);
revision = inb(cfg_base+1);
- IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__,
- chip->name, revision);
+ pr_debug("%s(), Found %s chip, revision=%d\n",
+ __func__, chip->name, revision);
/*
* If the user supplies the base address, then
@@ -225,15 +223,14 @@ static int __init ali_ircc_init(void)
}
else
{
- IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base);
+ pr_debug("%s(), No %s chip at 0x%03x\n",
+ __func__, chip->name, cfg_base);
}
/* Exit configuration */
outb(0xbb, cfg_base);
}
}
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
-
if (ret)
platform_driver_unregister(&ali_ircc_driver);
@@ -250,8 +247,6 @@ static void __exit ali_ircc_cleanup(void)
{
int i;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
-
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
ali_ircc_close(dev_self[i]);
@@ -259,7 +254,6 @@ static void __exit ali_ircc_cleanup(void)
platform_driver_unregister(&ali_ircc_driver);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
}
static const struct net_device_ops ali_ircc_sir_ops = {
@@ -289,11 +283,9 @@ static int ali_ircc_open(int i, chipio_t *info)
int dongle_id;
int err;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
-
if (i >= ARRAY_SIZE(dev_self)) {
- IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
- __func__);
+ net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
+ __func__);
return -ENOMEM;
}
@@ -303,8 +295,8 @@ static int ali_ircc_open(int i, chipio_t *info)
dev = alloc_irdadev(sizeof(*self));
if (dev == NULL) {
- IRDA_ERROR("%s(), can't allocate memory for control block!\n",
- __func__);
+ net_err_ratelimited("%s(), can't allocate memory for control block!\n",
+ __func__);
return -ENOMEM;
}
@@ -328,8 +320,8 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Reserve the ioports that we need */
if (!request_region(self->io.fir_base, self->io.fir_ext,
ALI_IRCC_DRIVER_NAME)) {
- IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__,
- self->io.fir_base);
+ net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
err = -ENODEV;
goto err_out1;
}
@@ -380,19 +372,20 @@ static int ali_ircc_open(int i, chipio_t *info)
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
goto err_out4;
}
- IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
/* Check dongle id */
dongle_id = ali_ircc_read_dongle_id(i, info);
- IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__,
- ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]);
+ net_info_ratelimited("%s(), %s, Found dongle: %s\n",
+ __func__, ALI_IRCC_DRIVER_NAME,
+ dongle_types[dongle_id]);
self->io.dongle_id = dongle_id;
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
@@ -421,8 +414,6 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
iobase = self->io.fir_base;
@@ -431,7 +422,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
+ pr_debug("%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -445,7 +436,6 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
dev_self[self->index] = NULL;
free_netdev(self->netdev);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
}
@@ -488,7 +478,6 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
int cfg_base = info->cfg_base;
int hi, low, reg;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Enter Configuration */
outb(chip->entr1, cfg_base);
@@ -507,13 +496,13 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
info->sir_base = info->fir_base;
- IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
+ pr_debug("%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
/* Read IRQ control register */
outb(0x70, cfg_base);
reg = inb(cfg_base+1);
info->irq = reg & 0x0f;
- IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
+ pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
/* Read DMA channel */
outb(0x74, cfg_base);
@@ -521,26 +510,26 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
info->dma = reg & 0x07;
if(info->dma == 0x04)
- IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__);
+ net_warn_ratelimited("%s(), No DMA channel assigned !\n",
+ __func__);
else
- IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
+ pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
/* Read Enabled Status */
outb(0x30, cfg_base);
reg = inb(cfg_base+1);
info->enabled = (reg & 0x80) && (reg & 0x01);
- IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled);
+ pr_debug("%s(), probing enabled=%d\n", __func__, info->enabled);
/* Read Power Status */
outb(0x22, cfg_base);
reg = inb(cfg_base+1);
info->suspended = (reg & 0x20);
- IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended);
+ pr_debug("%s(), probing suspended=%d\n", __func__, info->suspended);
/* Exit configuration */
outb(0xbb, cfg_base);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
}
@@ -558,7 +547,6 @@ static int ali_ircc_setup(chipio_t *info)
int version;
int iobase = info->fir_base;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Locking comments :
* Most operations here need to be protected. We are called before
@@ -578,8 +566,8 @@ static int ali_ircc_setup(chipio_t *info)
/* Should be 0x00 in the M1535/M1535D */
if(version != 0x00)
{
- IRDA_ERROR("%s, Wrong chip version %02x\n",
- ALI_IRCC_DRIVER_NAME, version);
+ net_err_ratelimited("%s, Wrong chip version %02x\n",
+ ALI_IRCC_DRIVER_NAME, version);
return -1;
}
@@ -612,14 +600,13 @@ static int ali_ircc_setup(chipio_t *info)
/* Switch to SIR space */
FIR2SIR(iobase);
- IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n",
- ALI_IRCC_DRIVER_NAME);
+ net_info_ratelimited("%s, driver loaded (Benjamin Kong)\n",
+ ALI_IRCC_DRIVER_NAME);
/* Enable receive interrupts */
// outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
// Turn on the interrupts in ali_ircc_net_open
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return 0;
}
@@ -636,7 +623,6 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
int dongle_id, reg;
int cfg_base = info->cfg_base;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Enter Configuration */
outb(chips[i].entr1, cfg_base);
@@ -650,13 +636,12 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
outb(0xf0, cfg_base);
reg = inb(cfg_base+1);
dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
- IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__,
- dongle_id, dongle_types[dongle_id]);
+ pr_debug("%s(), probing dongle_id=%d, dongle_types=%s\n",
+ __func__, dongle_id, dongle_types[dongle_id]);
/* Exit configuration */
outb(0xbb, cfg_base);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return dongle_id;
}
@@ -673,7 +658,6 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
struct ali_ircc_cb *self;
int ret;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
self = netdev_priv(dev);
@@ -687,7 +671,6 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
spin_unlock(&self->lock);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return ret;
}
/*
@@ -701,7 +684,6 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
__u8 eir, OldMessageCount;
int iobase, tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__);
iobase = self->io.fir_base;
@@ -714,10 +696,10 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
//self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
- IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID);
- IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus);
- IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier);
- IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir);
+ pr_debug("%s(), self->InterruptID = %x\n", __func__, self->InterruptID);
+ pr_debug("%s(), self->LineStatus = %x\n", __func__, self->LineStatus);
+ pr_debug("%s(), self->ier = %x\n", __func__, self->ier);
+ pr_debug("%s(), eir = %x\n", __func__, eir);
/* Disable interrupts */
SetCOMInterrupts(self, FALSE);
@@ -728,7 +710,8 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
{
if (self->io.direction == IO_XMIT) /* TX */
{
- IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__);
+ pr_debug("%s(), ******* IIR_EOM (Tx) *******\n",
+ __func__);
if(ali_ircc_dma_xmit_complete(self))
{
@@ -747,23 +730,27 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
}
else /* RX */
{
- IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__);
+ pr_debug("%s(), ******* IIR_EOM (Rx) *******\n",
+ __func__);
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
+ pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE ********\n",
+ __func__);
}
if (ali_ircc_dma_receive_complete(self))
{
- IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
+ pr_debug("%s(), ******* receive complete ********\n",
+ __func__);
self->ier = IER_EOM;
}
else
{
- IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
+ pr_debug("%s(), ******* Not receive complete ********\n",
+ __func__);
self->ier = IER_EOM | IER_TIMER;
}
@@ -776,7 +763,8 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
+ pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE *******\n",
+ __func__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
@@ -808,7 +796,6 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
/* Restore Interrupt */
SetCOMInterrupts(self, TRUE);
- IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__);
return IRQ_RETVAL(eir);
}
@@ -823,7 +810,6 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
int iobase;
int iir, lsr;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
iobase = self->io.sir_base;
@@ -832,13 +818,13 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
/* Clear interrupt */
lsr = inb(iobase+UART_LSR);
- IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__,
- iir, lsr, iobase);
+ pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __func__, iir, lsr, iobase);
switch (iir)
{
case UART_IIR_RLSI:
- IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
+ pr_debug("%s(), RLSI\n", __func__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
@@ -852,15 +838,14 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
}
break;
default:
- IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir);
+ pr_debug("%s(), unhandled IIR=%#x\n",
+ __func__, iir);
break;
}
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
-
return IRQ_RETVAL(iir);
}
@@ -876,7 +861,6 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
int boguscount = 0;
int iobase;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
@@ -891,12 +875,11 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
/* Make sure we don't stay here too long */
if (boguscount++ > 32) {
- IRDA_DEBUG(2,"%s(), breaking!\n", __func__);
+ pr_debug("%s(), breaking!\n", __func__);
break;
}
} while (inb(iobase+UART_LSR) & UART_LSR_DR);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -913,7 +896,6 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
IRDA_ASSERT(self != NULL, return;);
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
iobase = self->io.sir_base;
@@ -932,16 +914,18 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
{
/* We must wait until all data are gone */
while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
- IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ );
+ pr_debug("%s(), UART_LSR_THRE\n", __func__);
- IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed);
+ pr_debug("%s(), Changing speed! self->new_speed = %d\n",
+ __func__, self->new_speed);
ali_ircc_change_speed(self, self->new_speed);
self->new_speed = 0;
// benjamin 2000/11/10 06:32PM
if (self->io.speed > 115200)
{
- IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
+ pr_debug("%s(), ali_ircc_change_speed from UART_LSR_TEMT\n",
+ __func__);
self->ier = IER_EOM;
// SetCOMInterrupts(self, TRUE);
@@ -959,7 +943,6 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
outb(UART_IER_RDI, iobase+UART_IER);
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
@@ -967,9 +950,8 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
struct net_device *dev = self->netdev;
int iobase;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
+ pr_debug("%s(), setting speed = %d\n", __func__, baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
@@ -1008,7 +990,6 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
netif_wake_queue(self->netdev);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
@@ -1018,14 +999,14 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
struct ali_ircc_cb *self = priv;
struct net_device *dev;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
iobase = self->io.fir_base;
- IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud);
+ pr_debug("%s(), self->io.speed = %d, change to speed = %d\n",
+ __func__, self->io.speed, baud);
/* Come from SIR speed */
if(self->io.speed <=115200)
@@ -1039,7 +1020,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
// Set Dongle Speed mode
ali_ircc_change_dongle_speed(self, baud);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -1057,9 +1037,8 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
int lcr; /* Line control reg */
int divisor;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed);
+ pr_debug("%s(), Setting speed to: %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
@@ -1113,7 +1092,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
@@ -1123,14 +1101,14 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
int iobase,dongle_id;
int tmp = 0;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
dongle_id = self->io.dongle_id;
/* We are already locked, no need to do it again */
- IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed);
+ pr_debug("%s(), Set Speed for %s , Speed = %d\n",
+ __func__, dongle_types[dongle_id], speed);
switch_bank(iobase, BANK2);
tmp = inb(iobase+FIR_IRDA_CR);
@@ -1294,7 +1272,6 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -1307,11 +1284,10 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
{
int actual = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
/* Tx FIFO should be empty! */
if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
- IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ );
+ pr_debug("%s(), failed, fifo not empty!\n", __func__);
return 0;
}
@@ -1323,7 +1299,6 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
actual++;
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return actual;
}
@@ -1339,7 +1314,6 @@ static int ali_ircc_net_open(struct net_device *dev)
int iobase;
char hwname[32];
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -1352,9 +1326,8 @@ static int ali_ircc_net_open(struct net_device *dev)
/* Request IRQ and install Interrupt Handler */
if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
{
- IRDA_WARNING("%s, unable to allocate irq=%d\n",
- ALI_IRCC_DRIVER_NAME,
- self->io.irq);
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ ALI_IRCC_DRIVER_NAME, self->io.irq);
return -EAGAIN;
}
@@ -1363,9 +1336,8 @@ static int ali_ircc_net_open(struct net_device *dev)
* failure.
*/
if (request_dma(self->io.dma, dev->name)) {
- IRDA_WARNING("%s, unable to allocate dma=%d\n",
- ALI_IRCC_DRIVER_NAME,
- self->io.dma);
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ ALI_IRCC_DRIVER_NAME, self->io.dma);
free_irq(self->io.irq, dev);
return -EAGAIN;
}
@@ -1385,7 +1357,6 @@ static int ali_ircc_net_open(struct net_device *dev)
*/
self->irlap = irlap_open(dev, &self->qos, hwname);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1402,7 +1373,6 @@ static int ali_ircc_net_close(struct net_device *dev)
struct ali_ircc_cb *self;
//int iobase;
- IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -1425,7 +1395,6 @@ static int ali_ircc_net_close(struct net_device *dev)
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1445,7 +1414,6 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
__u32 speed;
int mtt, diff;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
self = netdev_priv(dev);
iobase = self->io.fir_base;
@@ -1499,7 +1467,8 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
diff = self->now.tv_usec - self->stamp.tv_usec;
/* self->stamp is set from ali_ircc_dma_receive_complete() */
- IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
+ pr_debug("%s(), ******* diff = %d *******\n",
+ __func__, diff);
if (diff < 0)
diff += 1000000;
@@ -1521,7 +1490,8 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
/* Adjust for timer resolution */
mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
- IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt);
+ pr_debug("%s(), ************** mtt = %d ***********\n",
+ __func__, mtt);
/* Setup timer */
if (mtt == 1) /* 500 us */
@@ -1578,7 +1548,6 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return NETDEV_TX_OK;
}
@@ -1589,7 +1558,6 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
unsigned char FIFO_OPTI, Hi, Lo;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1640,7 +1608,8 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
tmp = inb(iobase+FIR_LCR_B);
tmp &= ~0x20; // Disable SIP
outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
+ pr_debug("%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n",
+ __func__, inb(iobase + FIR_LCR_B));
outb(0, iobase+FIR_LSR);
@@ -1650,7 +1619,6 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
@@ -1658,7 +1626,6 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
int iobase;
int ret = TRUE;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1671,7 +1638,8 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
{
- IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
+ net_err_ratelimited("%s(), ********* LSR_FRAME_ABORT *********\n",
+ __func__);
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
}
@@ -1714,7 +1682,6 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return ret;
}
@@ -1729,7 +1696,6 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
{
int iobase, tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1767,7 +1733,8 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
//switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
- IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
+ pr_debug("%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n",
+ __func__, inb(iobase + FIR_LCR_B));
/* Set Rx Threshold */
switch_bank(iobase, BANK1);
@@ -1779,7 +1746,6 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1790,8 +1756,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
__u8 status, MessageCount;
int len, i, iobase, val;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
-
st_fifo = &self->st_fifo;
iobase = self->io.fir_base;
@@ -1799,7 +1763,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
MessageCount = inb(iobase+ FIR_LSR)&0x07;
if (MessageCount > 0)
- IRDA_DEBUG(0, "%s(), Message count = %d,\n", __func__ , MessageCount);
+ pr_debug("%s(), Message count = %d\n", __func__, MessageCount);
for (i=0; i<=MessageCount; i++)
{
@@ -1812,11 +1776,11 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
len = len << 8;
len |= inb(iobase+FIR_RX_DSR_LO);
- IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len);
- IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status);
+ pr_debug("%s(), RX Length = 0x%.2x,\n", __func__ , len);
+ pr_debug("%s(), RX Status = 0x%.2x,\n", __func__ , status);
if (st_fifo->tail >= MAX_RX_WINDOW) {
- IRDA_DEBUG(0, "%s(), window is full!\n", __func__ );
+ pr_debug("%s(), window is full!\n", __func__);
continue;
}
@@ -1839,7 +1803,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
/* Check for errors */
if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
{
- IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
+ pr_debug("%s(), ************* RX Errors ************\n",
+ __func__);
/* Skip frame */
self->netdev->stats.rx_errors++;
@@ -1849,29 +1814,34 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
if (status & LSR_FIFO_UR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
+ pr_debug("%s(), ************* FIFO Errors ************\n",
+ __func__);
}
if (status & LSR_FRAME_ERROR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
+ pr_debug("%s(), ************* FRAME Errors ************\n",
+ __func__);
}
if (status & LSR_CRC_ERROR)
{
self->netdev->stats.rx_crc_errors++;
- IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
+ pr_debug("%s(), ************* CRC Errors ************\n",
+ __func__);
}
if(self->rcvFramesOverflow)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
+ pr_debug("%s(), ************* Overran DMA buffer ************\n",
+ __func__);
}
if(len == 0)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
+ pr_debug("%s(), ********** Receive Frame Size = 0 *********\n",
+ __func__);
}
}
else
@@ -1883,7 +1853,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
val = inb(iobase+FIR_BSR);
if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
{
- IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
+ pr_debug("%s(), ************* BSR_FIFO_NOT_EMPTY ************\n",
+ __func__);
/* Put this entry back in fifo */
st_fifo->head--;
@@ -1918,9 +1889,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
skb = dev_alloc_skb(len+1);
if (skb == NULL)
{
- IRDA_WARNING("%s(), memory squeeze, "
- "dropping frame.\n",
- __func__);
self->netdev->stats.rx_dropped++;
return FALSE;
@@ -1947,7 +1915,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return TRUE;
}
@@ -1967,7 +1934,6 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
int iobase;
__u32 speed;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
@@ -2016,7 +1982,6 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
dev_kfree_skb(skb);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return NETDEV_TX_OK;
}
@@ -2035,7 +2000,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
unsigned long flags;
int ret = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -2043,11 +2007,11 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
- IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ );
+ pr_debug("%s(), SIOCSBANDWIDTH\n", __func__);
/*
* This function will also be used by IrLAP to change the
* speed, so we still must allow for speed change within
@@ -2061,13 +2025,13 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
spin_unlock_irqrestore(&self->lock, flags);
break;
case SIOCSMEDIABUSY: /* Set media busy */
- IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ );
+ pr_debug("%s(), SIOCSMEDIABUSY\n", __func__);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
irda_device_set_media_busy(self->netdev, TRUE);
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
- IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ );
+ pr_debug("%s(), SIOCGRECEIVING\n", __func__);
/* This is protected */
irq->ifr_receiving = ali_ircc_is_receiving(self);
break;
@@ -2075,7 +2039,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = -EOPNOTSUPP;
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return ret;
}
@@ -2092,7 +2055,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
int status = FALSE;
int iobase;
- IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ );
IRDA_ASSERT(self != NULL, return FALSE;);
@@ -2106,7 +2068,8 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
{
/* We are receiving something */
- IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ );
+ pr_debug("%s(), We are receiving something\n",
+ __func__);
status = TRUE;
}
switch_bank(iobase, BANK0);
@@ -2118,7 +2081,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return status;
}
@@ -2127,7 +2089,7 @@ static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
{
struct ali_ircc_cb *self = platform_get_drvdata(dev);
- IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
+ net_info_ratelimited("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
if (self->io.suspended)
return 0;
@@ -2148,7 +2110,7 @@ static int ali_ircc_resume(struct platform_device *dev)
ali_ircc_net_open(self->netdev);
- IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
+ net_info_ratelimited("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
self->io.suspended = 0;
@@ -2164,7 +2126,8 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
int iobase = self->io.fir_base; /* or sir_base */
- IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable);
+ pr_debug("%s(), -------- Start -------- ( Enable = %d )\n",
+ __func__, enable);
/* Enable the interrupt which we wish to */
if (enable){
@@ -2205,14 +2168,12 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
else
outb(newMask, iobase+UART_IER);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void SIR2FIR(int iobase)
{
//unsigned char tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
@@ -2228,14 +2189,12 @@ static void SIR2FIR(int iobase)
//tmp |= 0x20;
//outb(tmp, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static void FIR2SIR(int iobase)
{
unsigned char val;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
@@ -2251,7 +2210,6 @@ static void FIR2SIR(int iobase)
val = inb(iobase+UART_LSR);
val = inb(iobase+UART_MSR);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index aab2cf72d025..e151205281e2 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -989,7 +989,6 @@ static int au1k_irda_remove(struct platform_device *pdev)
static struct platform_driver au1k_irda_driver = {
.driver = {
.name = "au1000-irda",
- .owner = THIS_MODULE,
},
.probe = au1k_irda_probe,
.remove = au1k_irda_remove,
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index a87a82ca111f..b337e6d23a88 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -232,7 +232,7 @@ char head=tete;
for (i=0;i<len;i+=16) {
for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); }
dump [3*j]=0;
- IRDA_DEBUG (2, "%c%s\n",head , dump);
+ pr_debug("%c%s\n", head, dump);
head='+';
}
}
@@ -245,8 +245,6 @@ toshoboe_dumpregs (struct toshoboe_cb *self)
{
__u32 ringbase;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
ringbase = INB (OBOE_RING_BASE0) << 10;
ringbase |= INB (OBOE_RING_BASE1) << 18;
ringbase |= INB (OBOE_RING_BASE2) << 26;
@@ -293,8 +291,6 @@ static void
toshoboe_disablebm (struct toshoboe_cb *self)
{
__u8 command;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
command &= ~PCI_COMMAND_MASTER;
pci_write_config_byte (self->pdev, PCI_COMMAND, command);
@@ -305,8 +301,6 @@ toshoboe_disablebm (struct toshoboe_cb *self)
static void
toshoboe_stopchip (struct toshoboe_cb *self)
{
- IRDA_DEBUG (4, "%s()\n", __func__);
-
/*Disable interrupts */
OUTB (0x0, OBOE_IER);
/*Disable DMA, Disable Rx, Disable Tx */
@@ -350,7 +344,7 @@ toshoboe_setbaud (struct toshoboe_cb *self)
__u16 pconfig = 0;
__u8 config0l = 0;
- IRDA_DEBUG (2, "%s(%d/%d)\n", __func__, self->speed, self->io.speed);
+ pr_debug("%s(%d/%d)\n", __func__, self->speed, self->io.speed);
switch (self->speed)
{
@@ -482,7 +476,6 @@ toshoboe_setbaud (struct toshoboe_cb *self)
static void
toshoboe_enablebm (struct toshoboe_cb *self)
{
- IRDA_DEBUG (4, "%s()\n", __func__);
pci_set_master (self->pdev);
}
@@ -492,8 +485,6 @@ toshoboe_initring (struct toshoboe_cb *self)
{
int i;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
for (i = 0; i < TX_SLOTS; ++i)
{
self->ring->tx[i].len = 0;
@@ -550,8 +541,6 @@ toshoboe_startchip (struct toshoboe_cb *self)
{
__u32 physaddr;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
toshoboe_initring (self);
toshoboe_enablebm (self);
OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1);
@@ -636,9 +625,8 @@ toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt)
xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/
xbofs++;
- IRDA_DEBUG (2, DRIVER_NAME
- ": generated mtt of %d bytes for %d us at %d baud\n"
- , xbofs,mtt,self->speed);
+ pr_debug(DRIVER_NAME ": generated mtt of %d bytes for %d us at %d baud\n",
+ xbofs, mtt, self->speed);
if (xbofs > TX_LEN)
{
@@ -824,8 +812,6 @@ toshoboe_probe (struct toshoboe_cb *self)
#endif
unsigned long flags;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
if (request_irq (self->io.irq, toshoboe_probeinterrupt,
self->io.irqflags, "toshoboe", (void *) self))
{
@@ -983,10 +969,10 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
IRDA_ASSERT (self != NULL, return NETDEV_TX_OK; );
- IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __func__
- ,skb->len,self->txpending,INB (OBOE_ENABLEH));
+ pr_debug("%s.tx:%x(%x)%x\n",
+ __func__, skb->len, self->txpending, INB(OBOE_ENABLEH));
if (!cb->magic) {
- IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __func__, cb->magic);
+ pr_debug("%s.Not IrLAP:%x\n", __func__, cb->magic);
#ifdef DUMP_PACKETS
_dumpbufs(skb->data,skb->len,'>');
#endif
@@ -1012,8 +998,8 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
if (self->txpending || skb->len)
{
self->new_speed = speed;
- IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
- __func__, speed);
+ pr_debug("%s: Queued TxDone scheduled speed change %d\n" ,
+ __func__, speed);
/* if no data, that's all! */
if (!skb->len)
{
@@ -1055,8 +1041,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
/* which we will add a wrong checksum to */
mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
- IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __func__
- ,skb->len,mtt,self->txpending);
+ pr_debug("%s.mtt:%x(%x)%d\n", __func__, skb->len, mtt, self->txpending);
if (mtt)
{
self->ring->tx[self->txs].len = mtt & 0xfff;
@@ -1099,8 +1084,9 @@ dumpbufs(skb->data,skb->len,'>');
if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
{
- IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __func__
- ,skb->len, self->ring->tx[self->txs].control, self->txpending);
+ pr_debug("%s.ful:%x(%x)%x\n",
+ __func__, skb->len, self->ring->tx[self->txs].control,
+ self->txpending);
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
spin_unlock_irqrestore(&self->spinlock, flags);
return NETDEV_TX_BUSY;
@@ -1177,8 +1163,7 @@ toshoboe_interrupt (int irq, void *dev_id)
if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
self->txpending++;
}
- IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __func__
- ,irqstat,txp,self->txpending);
+ pr_debug("%s.txd(%x)%x/%x\n", __func__, irqstat, txp, self->txpending);
txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
@@ -1206,8 +1191,8 @@ toshoboe_interrupt (int irq, void *dev_id)
if ((!self->txpending) && (self->new_speed))
{
self->speed = self->new_speed;
- IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
- __func__, self->speed);
+ pr_debug("%s: Executed TxDone scheduled speed change %d\n",
+ __func__, self->speed);
toshoboe_setbaud (self);
}
@@ -1222,8 +1207,8 @@ toshoboe_interrupt (int irq, void *dev_id)
{
int len = self->ring->rx[self->rxs].len;
skb = NULL;
- IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __func__
- ,len,self->ring->rx[self->rxs].control);
+ pr_debug("%s.rcv:%x(%x)\n", __func__
+ , len, self->ring->rx[self->rxs].control);
#ifdef DUMP_PACKETS
dumpbufs(self->rx_bufs[self->rxs],len,'<');
@@ -1244,7 +1229,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 2;
else
len = 0;
- IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __func__, len,enable);
+ pr_debug("%s.SIR:%x(%x)\n", __func__, len, enable);
}
#ifdef USE_MIR
@@ -1254,7 +1239,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 2;
else
len = 0;
- IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __func__, len,enable);
+ pr_debug("%s.MIR:%x(%x)\n", __func__, len, enable);
}
#endif
else if (enable & OBOE_ENABLEH_FIRON)
@@ -1263,10 +1248,10 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 4; /*FIXME: check this */
else
len = 0;
- IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __func__, len,enable);
+ pr_debug("%s.FIR:%x(%x)\n", __func__, len, enable);
}
else
- IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __func__, len,enable);
+ pr_debug("%s.?IR:%x(%x)\n", __func__, len, enable);
if (len)
{
@@ -1299,8 +1284,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
/* (SIR) data is splitted in several slots. */
/* we have to join all the received buffers received */
/*in a large buffer before checking CRC. */
- IRDA_DEBUG (0, "%s.err:%x(%x)\n", __func__
- ,len,self->ring->rx[self->rxs].control);
+ pr_debug("%s.err:%x(%x)\n", __func__
+ , len, self->ring->rx[self->rxs].control);
}
self->ring->rx[self->rxs].len = 0x0;
@@ -1327,8 +1312,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
if (irqstat & OBOE_INT_SIP)
{
self->int_sip++;
- IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __func__
- ,self->int_sip,irqstat,self->txpending);
+ pr_debug("%s.sip:%x(%x)%x\n",
+ __func__, self->int_sip, irqstat, self->txpending);
}
return IRQ_HANDLED;
}
@@ -1341,8 +1326,6 @@ toshoboe_net_open (struct net_device *dev)
unsigned long flags;
int rc;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
self = netdev_priv(dev);
if (self->async)
@@ -1379,8 +1362,6 @@ toshoboe_net_close (struct net_device *dev)
{
struct toshoboe_cb *self;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
IRDA_ASSERT (dev != NULL, return -1; );
self = netdev_priv(dev);
@@ -1424,7 +1405,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT (self != NULL, return -1; );
- IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
/* Disable interrupts & save flags */
spin_lock_irqsave(&self->spinlock, flags);
@@ -1436,8 +1417,8 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
* speed, so we still must allow for speed change within
* interrupt context.
*/
- IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __func__
- ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
+ pr_debug("%s(BANDWIDTH), %s, (%X/%ld\n",
+ __func__, dev->name, INB(OBOE_STATUS), irq->ifr_baudrate);
if (!in_interrupt () && !capable (CAP_NET_ADMIN)) {
ret = -EPERM;
goto out;
@@ -1449,8 +1430,9 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
self->new_speed = irq->ifr_baudrate;
break;
case SIOCSMEDIABUSY: /* Set media busy */
- IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __func__
- ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
+ pr_debug("%s(MEDIABUSY), %s, (%X/%x)\n",
+ __func__, dev->name,
+ INB(OBOE_STATUS), capable(CAP_NET_ADMIN));
if (!capable (CAP_NET_ADMIN)) {
ret = -EPERM;
goto out;
@@ -1459,11 +1441,11 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
- IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __func__
- ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
+ pr_debug("%s(RECEIVING), %s, (%X/%x)\n",
+ __func__, dev->name, INB(OBOE_STATUS), irq->ifr_receiving);
break;
default:
- IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
ret = -EOPNOTSUPP;
}
out:
@@ -1490,8 +1472,6 @@ toshoboe_close (struct pci_dev *pci_dev)
int i;
struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
- IRDA_DEBUG (4, "%s()\n", __func__);
-
IRDA_ASSERT (self != NULL, return; );
if (!self->stopped)
@@ -1538,8 +1518,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
int ok = 0;
int err;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
if ((err=pci_enable_device(pci_dev)))
return err;
@@ -1700,8 +1678,6 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
unsigned long flags;
int i = 10;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
if (!self || self->stopped)
return 0;
@@ -1728,8 +1704,6 @@ toshoboe_wakeup (struct pci_dev *pci_dev)
struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
unsigned long flags;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
if (!self || !self->stopped)
return 0;
diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c
index 96cdecff349d..7e0a5b8c6d53 100644
--- a/drivers/net/irda/girbil-sir.c
+++ b/drivers/net/irda/girbil-sir.c
@@ -86,8 +86,6 @@ static int girbil_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power on dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -102,8 +100,6 @@ static int girbil_open(struct sir_dev *dev)
static int girbil_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -126,8 +122,6 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[2];
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* dongle alread reset - port and dongle at default speed */
switch(state) {
@@ -179,7 +173,8 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s - undefined state %d\n", __func__, state);
+ net_err_ratelimited("%s - undefined state %d\n",
+ __func__, state);
ret = -EINVAL;
break;
}
@@ -209,8 +204,6 @@ static int girbil_reset(struct sir_dev *dev)
u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
/* Reset dongle */
@@ -241,7 +234,8 @@ static int girbil_reset(struct sir_dev *dev)
break;
default:
- IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
+ net_err_ratelimited("%s(), undefined state %d\n",
+ __func__, state);
ret = -1;
break;
}
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 925b78cc9797..48b2f9a321b7 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -176,12 +176,13 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
(!force) && (self->speed != -1)) {
/* No speed and xbofs change here
* (we'll do it later in the write callback) */
- IRDA_DEBUG(2, "%s(), not changing speed yet\n", __func__);
+ pr_debug("%s(), not changing speed yet\n", __func__);
*header = 0;
return;
}
- IRDA_DEBUG(2, "%s(), changing speed to %d\n", __func__, self->new_speed);
+ pr_debug("%s(), changing speed to %d\n",
+ __func__, self->new_speed);
self->speed = self->new_speed;
/* We will do ` self->new_speed = -1; ' in the completion
* handler just in case the current URB fail - Jean II */
@@ -227,7 +228,8 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
/* Set the negotiated additional XBOFS */
if (self->new_xbofs != -1) {
- IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __func__, self->new_xbofs);
+ pr_debug("%s(), changing xbofs to %d\n",
+ __func__, self->new_xbofs);
self->xbofs = self->new_xbofs;
/* We will do ` self->new_xbofs = -1; ' in the completion
* handler just in case the current URB fail - Jean II */
@@ -301,13 +303,13 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
struct urb *urb;
int ret;
- IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __func__,
- self->new_speed, self->new_xbofs);
+ pr_debug("%s(), speed=%d, xbofs=%d\n", __func__,
+ self->new_speed, self->new_xbofs);
/* Grab the speed URB */
urb = self->speed_urb;
if (urb->status != 0) {
- IRDA_WARNING("%s(), URB still in use!\n", __func__);
+ net_warn_ratelimited("%s(), URB still in use!\n", __func__);
return;
}
@@ -333,7 +335,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
/* Irq disabled -> GFP_ATOMIC */
if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
- IRDA_WARNING("%s(), failed Speed URB\n", __func__);
+ net_warn_ratelimited("%s(), failed Speed URB\n", __func__);
}
}
@@ -346,8 +348,6 @@ static void speed_bulk_callback(struct urb *urb)
{
struct irda_usb_cb *self = urb->context;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* We should always have a context */
IRDA_ASSERT(self != NULL, return;);
/* We should always be called for the speed URB */
@@ -356,7 +356,8 @@ static void speed_bulk_callback(struct urb *urb)
/* Check for timeout and other USB nasties */
if (urb->status != 0) {
/* I get a lot of -ECONNABORTED = -103 here - Jean II */
- IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
+ pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
/* Don't do anything here, that might confuse the USB layer.
* Instead, we will wait for irda_usb_net_timeout(), the
@@ -391,7 +392,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
s16 xbofs;
int res, mtt;
- IRDA_DEBUG(4, "%s() on %s\n", __func__, netdev->name);
+ pr_debug("%s() on %s\n", __func__, netdev->name);
netif_stop_queue(netdev);
@@ -402,7 +403,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
* We need to check self->present under the spinlock because
* of irda_usb_disconnect() is synchronous - Jean II */
if (!self->present) {
- IRDA_DEBUG(0, "%s(), Device is gone...\n", __func__);
+ pr_debug("%s(), Device is gone...\n", __func__);
goto drop;
}
@@ -435,7 +436,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
}
if (urb->status != 0) {
- IRDA_WARNING("%s(), URB still in use!\n", __func__);
+ net_warn_ratelimited("%s(), URB still in use!\n", __func__);
goto drop;
}
@@ -522,7 +523,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
/* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
- IRDA_WARNING("%s(), failed Tx URB\n", __func__);
+ net_warn_ratelimited("%s(), failed Tx URB\n", __func__);
netdev->stats.tx_errors++;
/* Let USB recover : We will catch that in the watchdog */
/*netif_start_queue(netdev);*/
@@ -554,8 +555,6 @@ static void write_bulk_callback(struct urb *urb)
struct sk_buff *skb = urb->context;
struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* We should always have a context */
IRDA_ASSERT(self != NULL, return;);
/* We should always be called for the speed URB */
@@ -568,7 +567,8 @@ static void write_bulk_callback(struct urb *urb)
/* Check for timeout and other USB nasties */
if (urb->status != 0) {
/* I get a lot of -ECONNABORTED = -103 here - Jean II */
- IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
+ pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
/* Don't do anything here, that might confuse the USB layer,
* and we could go in recursion and blow the kernel stack...
@@ -587,7 +587,7 @@ static void write_bulk_callback(struct urb *urb)
/* If the network is closed, stop everything */
if ((!self->netopen) || (!self->present)) {
- IRDA_DEBUG(0, "%s(), Network is gone...\n", __func__);
+ pr_debug("%s(), Network is gone...\n", __func__);
spin_unlock_irqrestore(&self->lock, flags);
return;
}
@@ -598,7 +598,7 @@ static void write_bulk_callback(struct urb *urb)
(self->new_xbofs != self->xbofs)) {
/* We haven't changed speed yet (because of
* IUC_SPEED_BUG), so do it now - Jean II */
- IRDA_DEBUG(1, "%s(), Changing speed now...\n", __func__);
+ pr_debug("%s(), Changing speed now...\n", __func__);
irda_usb_change_speed_xbofs(self);
} else {
/* New speed and xbof is now committed in hardware */
@@ -630,7 +630,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
struct urb *urb;
int done = 0; /* If we have made any progress */
- IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __func__);
+ pr_debug("%s(), Network layer thinks we timed out!\n", __func__);
IRDA_ASSERT(self != NULL, return;);
/* Protect us from USB callbacks, net Tx and else. */
@@ -638,7 +638,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
/* self->present *MUST* be read under spinlock */
if (!self->present) {
- IRDA_WARNING("%s(), device not present!\n", __func__);
+ net_warn_ratelimited("%s(), device not present!\n", __func__);
netif_stop_queue(netdev);
spin_unlock_irqrestore(&self->lock, flags);
return;
@@ -647,7 +647,8 @@ static void irda_usb_net_timeout(struct net_device *netdev)
/* Check speed URB */
urb = self->speed_urb;
if (urb->status != 0) {
- IRDA_DEBUG(0, "%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
+ pr_debug("%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n",
+ netdev->name, urb->status, urb->transfer_flags);
switch (urb->status) {
case -EINPROGRESS:
@@ -672,7 +673,8 @@ static void irda_usb_net_timeout(struct net_device *netdev)
if (urb->status != 0) {
struct sk_buff *skb = urb->context;
- IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
+ pr_debug("%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n",
+ netdev->name, urb->status, urb->transfer_flags);
/* Increase error count */
netdev->stats.tx_errors++;
@@ -761,8 +763,6 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
struct irda_skb_cb *cb;
int ret;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* This should never happen */
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(urb != NULL, return;);
@@ -783,8 +783,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
if (ret) {
/* If this ever happen, we are in deep s***.
* Basically, the Rx path will stop... */
- IRDA_WARNING("%s(), Failed to submit Rx URB %d\n",
- __func__, ret);
+ net_warn_ratelimited("%s(), Failed to submit Rx URB %d\n",
+ __func__, ret);
}
}
@@ -805,7 +805,7 @@ static void irda_usb_receive(struct urb *urb)
struct urb *next_urb;
unsigned int len, docopy;
- IRDA_DEBUG(2, "%s(), len=%d\n", __func__, urb->actual_length);
+ pr_debug("%s(), len=%d\n", __func__, urb->actual_length);
/* Find ourselves */
cb = (struct irda_skb_cb *) skb->cb;
@@ -815,7 +815,7 @@ static void irda_usb_receive(struct urb *urb)
/* If the network is closed or the device gone, stop everything */
if ((!self->netopen) || (!self->present)) {
- IRDA_DEBUG(0, "%s(), Network is gone!\n", __func__);
+ pr_debug("%s(), Network is gone!\n", __func__);
/* Don't re-submit the URB : will stall the Rx path */
return;
}
@@ -838,7 +838,8 @@ static void irda_usb_receive(struct urb *urb)
/* Usually precursor to a hot-unplug on OHCI. */
default:
self->netdev->stats.rx_errors++;
- IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
+ pr_debug("%s(), RX status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
break;
}
/* If we received an error, we don't want to resubmit the
@@ -859,7 +860,7 @@ static void irda_usb_receive(struct urb *urb)
/* Check for empty frames */
if (urb->actual_length <= self->header_length) {
- IRDA_WARNING("%s(), empty frame!\n", __func__);
+ net_warn_ratelimited("%s(), empty frame!\n", __func__);
goto done;
}
@@ -964,8 +965,6 @@ static void irda_usb_rx_defer_expired(unsigned long data)
struct irda_skb_cb *cb;
struct urb *next_urb;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Find ourselves */
cb = (struct irda_skb_cb *) skb->cb;
IRDA_ASSERT(cb != NULL, return;);
@@ -1049,8 +1048,8 @@ static int stir421x_fw_upload(struct irda_usb_cb *self,
self->bulk_out_ep),
patch_block, block_size,
&actual_len, msecs_to_jiffies(500));
- IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n",
- __func__, actual_len, ret);
+ pr_debug("%s(): Bulk send %u bytes, ret=%d\n",
+ __func__, actual_len, ret);
if (ret < 0)
break;
@@ -1088,8 +1087,8 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
return ret;
/* We get a patch from userspace */
- IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n",
- __func__, stir421x_fw_name, fw->size);
+ net_info_ratelimited("%s(): Received firmware %s (%zu bytes)\n",
+ __func__, stir421x_fw_name, fw->size);
ret = -EINVAL;
@@ -1112,8 +1111,8 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
+ ((build / 10) << 4)
+ (build % 10);
- IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n",
- __func__, fw_version);
+ pr_debug("%s(): Firmware Product version %ld\n",
+ __func__, fw_version);
}
}
@@ -1169,8 +1168,6 @@ static int irda_usb_net_open(struct net_device *netdev)
char hwname[16];
int i;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(netdev != NULL, return -1;);
self = netdev_priv(netdev);
IRDA_ASSERT(self != NULL, return -1;);
@@ -1179,13 +1176,13 @@ static int irda_usb_net_open(struct net_device *netdev)
/* Can only open the device if it's there */
if(!self->present) {
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_WARNING("%s(), device not present!\n", __func__);
+ net_warn_ratelimited("%s(), device not present!\n", __func__);
return -1;
}
if(self->needspatch) {
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_WARNING("%s(), device needs patch\n", __func__) ;
+ net_warn_ratelimited("%s(), device needs patch\n", __func__);
return -EIO ;
}
@@ -1227,8 +1224,6 @@ static int irda_usb_net_open(struct net_device *netdev)
if (!skb) {
/* If this ever happen, we are in deep s***.
* Basically, we can't start the Rx path... */
- IRDA_WARNING("%s(), Failed to allocate Rx skb\n",
- __func__);
return -1;
}
//skb_reserve(newskb, USB_IRDA_HEADER - 1);
@@ -1251,8 +1246,6 @@ static int irda_usb_net_close(struct net_device *netdev)
struct irda_usb_cb *self;
int i;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(netdev != NULL, return -1;);
self = netdev_priv(netdev);
IRDA_ASSERT(self != NULL, return -1;);
@@ -1306,7 +1299,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -1356,7 +1349,6 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
{
struct irda_class_desc *desc;
- IRDA_DEBUG(3, "%s()\n", __func__);
desc = self->irda_desc;
@@ -1372,8 +1364,10 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
self->qos.window_size.bits = desc->bmWindowSize;
self->qos.data_size.bits = desc->bmDataSize;
- IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
- __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits);
+ pr_debug("%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
+ __func__, self->qos.baud_rate.bits, self->qos.data_size.bits,
+ self->qos.window_size.bits, self->qos.additional_bofs.bits,
+ self->qos.min_turn_time.bits);
/* Don't always trust what the dongle tell us */
if(self->capability & IUC_SIR_ONLY)
@@ -1416,8 +1410,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
{
struct net_device *netdev = self->netdev;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
netdev->netdev_ops = &irda_usb_netdev_ops;
irda_usb_init_qos(self);
@@ -1432,8 +1424,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
*/
static inline void irda_usb_close(struct irda_usb_cb *self)
{
- IRDA_DEBUG(1, "%s()\n", __func__);
-
/* Remove netdevice */
unregister_netdev(self->netdev);
@@ -1505,13 +1495,15 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
/* This is our interrupt endpoint */
self->bulk_int_ep = ep;
} else {
- IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __func__, ep);
+ net_err_ratelimited("%s(), Unrecognised endpoint %02X\n",
+ __func__, ep);
}
}
}
- IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
- __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
+ pr_debug("%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
+ __func__, self->bulk_in_ep, self->bulk_out_ep,
+ self->bulk_out_mtu, self->bulk_int_ep);
return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0);
}
@@ -1573,13 +1565,13 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf
0, intf->altsetting->desc.bInterfaceNumber, desc,
sizeof(*desc), 500);
- IRDA_DEBUG(1, "%s(), ret=%d\n", __func__, ret);
+ pr_debug("%s(), ret=%d\n", __func__, ret);
if (ret < sizeof(*desc)) {
- IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n",
- (ret<0) ? "failed" : "too short", ret);
+ net_warn_ratelimited("usb-irda: class_descriptor read %s (%d)\n",
+ ret < 0 ? "failed" : "too short", ret);
}
else if (desc->bDescriptorType != USB_DT_IRDA) {
- IRDA_WARNING("usb-irda: bad class_descriptor type\n");
+ net_warn_ratelimited("usb-irda: bad class_descriptor type\n");
}
else {
#ifdef IU_DUMP_CLASS_DESC
@@ -1622,9 +1614,9 @@ static int irda_usb_probe(struct usb_interface *intf,
* don't need to check if the dongle is really ours.
* Jean II */
- IRDA_MESSAGE("IRDA-USB found at address %d, Vendor: %x, Product: %x\n",
- dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
+ net_info_ratelimited("IRDA-USB found at address %d, Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
net = alloc_irdadev(sizeof(*self));
if (!net)
@@ -1680,7 +1672,8 @@ static int irda_usb_probe(struct usb_interface *intf,
* specify an alternate, but very few driver do like this.
* Jean II */
ret = usb_set_interface(dev, intf->altsetting->desc.bInterfaceNumber, 0);
- IRDA_DEBUG(1, "usb-irda: set interface %d result %d\n", intf->altsetting->desc.bInterfaceNumber, ret);
+ pr_debug("usb-irda: set interface %d result %d\n",
+ intf->altsetting->desc.bInterfaceNumber, ret);
switch (ret) {
case 0:
break;
@@ -1688,10 +1681,11 @@ static int irda_usb_probe(struct usb_interface *intf,
/* Martin Diehl says if we get a -EPIPE we should
* be fine and we don't need to do a usb_clear_halt().
* - Jean II */
- IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __func__);
+ pr_debug("%s(), Received -EPIPE, ignoring...\n",
+ __func__);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown error %d\n", __func__, ret);
+ pr_debug("%s(), Unknown error %d\n", __func__, ret);
ret = -EIO;
goto err_out_3;
}
@@ -1700,7 +1694,7 @@ static int irda_usb_probe(struct usb_interface *intf,
interface = intf->cur_altsetting;
if(!irda_usb_parse_endpoints(self, interface->endpoint,
interface->desc.bNumEndpoints)) {
- IRDA_ERROR("%s(), Bogus endpoints...\n", __func__);
+ net_err_ratelimited("%s(), Bogus endpoints...\n", __func__);
ret = -EIO;
goto err_out_3;
}
@@ -1717,7 +1711,7 @@ static int irda_usb_probe(struct usb_interface *intf,
ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
0x02, 0x40, 0, 0, NULL, 0, 500);
if (ret < 0) {
- IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret);
+ pr_debug("usb_control_msg failed %d\n", ret);
goto err_out_3;
} else {
mdelay(10);
@@ -1746,7 +1740,7 @@ static int irda_usb_probe(struct usb_interface *intf,
if (ret)
goto err_out_5;
- IRDA_MESSAGE("IrDA: Registered device %s\n", net->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", net->name);
usb_set_intfdata(intf, self);
if (self->needspatch) {
@@ -1754,7 +1748,7 @@ static int irda_usb_probe(struct usb_interface *intf,
ret = stir421x_patch_device(self);
self->needspatch = (ret < 0);
if (self->needspatch) {
- IRDA_ERROR("STIR421X: Couldn't upload patch\n");
+ net_err_ratelimited("STIR421X: Couldn't upload patch\n");
goto err_out_6;
}
@@ -1809,8 +1803,6 @@ static void irda_usb_disconnect(struct usb_interface *intf)
struct irda_usb_cb *self = usb_get_intfdata(intf);
int i;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
usb_set_intfdata(intf, NULL);
if (!self)
return;
@@ -1859,7 +1851,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
/* Free self and network device */
free_netdev(self->netdev);
- IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__);
+ pr_debug("%s(), USB IrDA Disconnected\n", __func__);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 24b6dddd7f2f..696852eb23c3 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -231,7 +231,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
dev = priv->dev;
if (!dev) {
- IRDA_WARNING("%s(), not ready yet!\n", __func__);
+ net_warn_ratelimited("%s(), not ready yet!\n", __func__);
return;
}
@@ -240,7 +240,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
* Characters received with a parity error, etc?
*/
if (fp && *fp++) {
- IRDA_DEBUG(0, "Framing or parity error!\n");
+ pr_debug("Framing or parity error!\n");
sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
return;
}
@@ -387,7 +387,7 @@ static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
IRDA_ASSERT(priv != NULL, return -ENODEV;);
IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
- IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __func__, cmd);
+ pr_debug("%s(cmd=0x%X)\n", __func__, cmd);
dev = priv->dev;
IRDA_ASSERT(dev != NULL, return -1;);
@@ -477,7 +477,7 @@ static int irtty_open(struct tty_struct *tty)
mutex_unlock(&irtty_mutex);
- IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __func__, tty->name);
+ pr_debug("%s - %s: irda line discipline opened\n", __func__, tty->name);
return 0;
@@ -528,7 +528,7 @@ static void irtty_close(struct tty_struct *tty)
kfree(priv);
- IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __func__, tty->name);
+ pr_debug("%s - %s: irda line discipline closed\n", __func__, tty->name);
}
/* ------------------------------------------------------- */
@@ -555,8 +555,8 @@ static int __init irtty_sir_init(void)
int err;
if ((err = tty_register_ldisc(N_IRDA, &irda_ldisc)) != 0)
- IRDA_ERROR("IrDA: can't register line discipline (err = %d)\n",
- err);
+ net_err_ratelimited("IrDA: can't register line discipline (err = %d)\n",
+ err);
return err;
}
@@ -565,8 +565,8 @@ static void __exit irtty_sir_cleanup(void)
int err;
if ((err = tty_unregister_ldisc(N_IRDA))) {
- IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n",
- __func__, err);
+ net_err_ratelimited("%s(), can't unregister line discipline (err = %d)\n",
+ __func__, err);
}
}
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index 6827777cbeea..8eefcb44bac3 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -76,8 +76,6 @@ static int litelink_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power up dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -93,8 +91,6 @@ static int litelink_open(struct sir_dev *dev)
static int litelink_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -111,8 +107,6 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
{
int i;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* dongle already reset by irda-thread - current speed (dongle and
* port) is the default speed (115200 for litelink!)
*/
@@ -154,8 +148,6 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
*/
static int litelink_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* probably the power-up can be dropped here, but with only
* 15 usec delay it's not worth the risk unless somebody with
* the hardware confirms it doesn't break anything...
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index a9a81358477b..a764817b47f1 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -65,13 +65,11 @@ static struct dongle_driver ma600 = {
static int __init ma600_sir_init(void)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
return irda_register_dongle(&ma600);
}
static void __exit ma600_sir_cleanup(void)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
irda_unregister_dongle(&ma600);
}
@@ -86,8 +84,6 @@ static int ma600_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
sirdev_set_dtr_rts(dev, TRUE, TRUE);
/* Explicitly set the speeds we can accept */
@@ -104,8 +100,6 @@ static int ma600_open(struct sir_dev *dev)
static int ma600_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -174,8 +168,8 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
{
u8 byte;
- IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __func__,
- speed, dev->speed);
+ pr_debug("%s(), speed=%d (was %d)\n", __func__,
+ speed, dev->speed);
/* dongle already reset, dongle and port at default speed (9600) */
@@ -198,13 +192,13 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
sirdev_raw_read(dev, &byte, sizeof(byte));
if (byte != get_control_byte(speed)) {
- IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n",
- __func__, (unsigned) byte,
- (unsigned) get_control_byte(speed));
+ net_warn_ratelimited("%s(): bad control byte read-back %02x != %02x\n",
+ __func__, (unsigned)byte,
+ (unsigned)get_control_byte(speed));
return -1;
}
else
- IRDA_DEBUG(2, "%s() control byte write read OK\n", __func__);
+ pr_debug("%s() control byte write read OK\n", __func__);
#endif
/* Set DTR, Set RTS */
@@ -236,8 +230,6 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
static int ma600_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Reset the dongle : set DTR low for 10 ms */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
msleep(10);
diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c
index 5e2f4859cee7..2e33f91bfe8f 100644
--- a/drivers/net/irda/mcp2120-sir.c
+++ b/drivers/net/irda/mcp2120-sir.c
@@ -63,8 +63,6 @@ static int mcp2120_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* seems no explicit power-on required here and reset switching it on anyway */
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
@@ -76,8 +74,6 @@ static int mcp2120_open(struct sir_dev *dev)
static int mcp2120_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
/* reset and inhibit mcp2120 */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -102,8 +98,6 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[2];
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch (state) {
case SIRDEV_STATE_DONGLE_SPEED:
/* Set DTR to enter command mode */
@@ -155,7 +149,8 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s(), undefine state %d\n", __func__, state);
+ net_err_ratelimited("%s(), undefine state %d\n",
+ __func__, state);
ret = -EINVAL;
break;
}
@@ -187,8 +182,6 @@ static int mcp2120_reset(struct sir_dev *dev)
unsigned delay = 0;
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
//printk("mcp2120_reset: dongle_reset\n");
@@ -213,7 +206,8 @@ static int mcp2120_reset(struct sir_dev *dev)
break;
default:
- IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
+ net_err_ratelimited("%s(), undefined state %d\n",
+ __func__, state);
ret = -EINVAL;
break;
}
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 16f8ffb50e04..e4d678fbeb2f 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -197,14 +197,14 @@ error:
/* Setup a communication between mcs7780 and agilent chip. */
static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs)
{
- IRDA_WARNING("This transceiver type is not supported yet.\n");
+ net_warn_ratelimited("This transceiver type is not supported yet\n");
return 1;
}
/* Setup a communication between mcs7780 and sharp chip. */
static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs)
{
- IRDA_WARNING("This transceiver type is not supported yet.\n");
+ net_warn_ratelimited("This transceiver type is not supported yet\n");
return 1;
}
@@ -213,9 +213,9 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
{
int ret = 0;
__u16 rval;
- char *msg;
+ const char *msg;
- msg = "Basic transceiver setup error.";
+ msg = "Basic transceiver setup error";
/* read value of MODE Register, set the DRIVER and RESET bits
* and write value back out to MODE Register
@@ -261,7 +261,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
if(unlikely(ret))
goto error;
- msg = "transceiver model specific setup error.";
+ msg = "transceiver model specific setup error";
switch (mcs->transceiver_type) {
case MCS_TSC_VISHAY:
ret = mcs_setup_transceiver_vishay(mcs);
@@ -276,8 +276,8 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
break;
default:
- IRDA_WARNING("Unknown transceiver type: %d\n",
- mcs->transceiver_type);
+ net_warn_ratelimited("Unknown transceiver type: %d\n",
+ mcs->transceiver_type);
ret = 1;
}
if (unlikely(ret))
@@ -300,7 +300,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
goto error;
}
- msg = "transceiver reset.";
+ msg = "transceiver reset";
ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
if (unlikely(ret != 2))
@@ -315,7 +315,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
return ret;
error:
- IRDA_ERROR("%s\n", msg);
+ net_err_ratelimited("%s\n", msg);
return ret;
}
@@ -399,8 +399,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
new_len = len - 2;
if(unlikely(new_len <= 0)) {
- IRDA_ERROR("%s short frame length %d\n",
- mcs->netdev->name, new_len);
+ net_err_ratelimited("%s short frame length %d\n",
+ mcs->netdev->name, new_len);
++mcs->netdev->stats.rx_errors;
++mcs->netdev->stats.rx_length_errors;
return;
@@ -409,8 +409,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
fcs = irda_calc_crc16(~fcs, buf, len);
if(fcs != GOOD_FCS) {
- IRDA_ERROR("crc error calc 0x%x len %d\n",
- fcs, new_len);
+ net_err_ratelimited("crc error calc 0x%x len %d\n",
+ fcs, new_len);
mcs->netdev->stats.rx_errors++;
mcs->netdev->stats.rx_crc_errors++;
return;
@@ -452,8 +452,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
new_len = len - 4;
if(unlikely(new_len <= 0)) {
- IRDA_ERROR("%s short frame length %d\n",
- mcs->netdev->name, new_len);
+ net_err_ratelimited("%s short frame length %d\n",
+ mcs->netdev->name, new_len);
++mcs->netdev->stats.rx_errors;
++mcs->netdev->stats.rx_length_errors;
return;
@@ -461,7 +461,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
fcs = ~(crc32_le(~0, buf, new_len));
if(fcs != get_unaligned_le32(buf + new_len)) {
- IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
+ net_err_ratelimited("crc error calc 0x%x len %d\n",
+ fcs, new_len);
mcs->netdev->stats.rx_errors++;
mcs->netdev->stats.rx_crc_errors++;
return;
@@ -583,7 +584,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
} while(cnt++ < 100 && (rval & MCS_IRINTX));
if (cnt > 100) {
- IRDA_ERROR("unable to change speed\n");
+ net_err_ratelimited("unable to change speed\n");
ret = -EIO;
goto error;
}
@@ -634,8 +635,8 @@ static int mcs_speed_change(struct mcs_cb *mcs)
default:
ret = 1;
- IRDA_WARNING("Unknown transceiver type: %d\n",
- mcs->transceiver_type);
+ net_warn_ratelimited("Unknown transceiver type: %d\n",
+ mcs->transceiver_type);
}
if (unlikely(ret))
goto error;
@@ -731,7 +732,7 @@ static int mcs_net_open(struct net_device *netdev)
sprintf(hwname, "usb#%d", mcs->usbdev->devnum);
mcs->irlap = irlap_open(netdev, &mcs->qos, hwname);
if (!mcs->irlap) {
- IRDA_ERROR("mcs7780: irlap_open failed\n");
+ net_err_ratelimited("mcs7780: irlap_open failed\n");
goto error2;
}
@@ -851,7 +852,7 @@ static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
mcs->out_buf, wraplen, mcs_send_irq, mcs);
if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) {
- IRDA_ERROR("failed tx_urb: %d\n", ret);
+ net_err_ratelimited("failed tx_urb: %d\n", ret);
switch (ret) {
case -ENODEV:
case -EPIPE:
@@ -893,13 +894,13 @@ static int mcs_probe(struct usb_interface *intf,
if (!ndev)
goto error1;
- IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
+ pr_debug("MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
SET_NETDEV_DEV(ndev, &intf->dev);
ret = usb_reset_configuration(udev);
if (ret != 0) {
- IRDA_ERROR("mcs7780: usb reset configuration failed\n");
+ net_err_ratelimited("mcs7780: usb reset configuration failed\n");
goto error2;
}
@@ -941,8 +942,8 @@ static int mcs_probe(struct usb_interface *intf,
if (ret != 0)
goto error2;
- IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n",
- ndev->name);
+ pr_debug("IrDA: Registered MosChip MCS7780 device as %s\n",
+ ndev->name);
mcs->transceiver_type = transceiver_type;
mcs->sir_tweak = sir_tweak;
@@ -972,7 +973,7 @@ static void mcs_disconnect(struct usb_interface *intf)
free_netdev(mcs->netdev);
usb_set_intfdata(intf, NULL);
- IRDA_DEBUG(0, "MCS7780 now disconnected.\n");
+ pr_debug("MCS7780 now disconnected.\n");
}
module_usb_driver(mcs_driver);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 66bc03bdb138..e7317b104bfb 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -211,7 +211,8 @@ static int __init nsc_ircc_init(void)
ret = platform_driver_register(&nsc_ircc_driver);
if (ret) {
- IRDA_ERROR("%s, Can't register driver!\n", driver_name);
+ net_err_ratelimited("%s, Can't register driver!\n",
+ driver_name);
return ret;
}
@@ -225,8 +226,8 @@ static int __init nsc_ircc_init(void)
/* Probe for all the NSC chipsets we know about */
for (chip = chips; chip->name ; chip++) {
- IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__,
- chip->name);
+ pr_debug("%s(), Probing for %s ...\n", __func__,
+ chip->name);
/* Try all config registers for this chip */
for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) {
@@ -237,7 +238,8 @@ static int __init nsc_ircc_init(void)
/* Read index register */
reg = inb(cfg_base);
if (reg == 0xff) {
- IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base);
+ pr_debug("%s() no chip at 0x%03x\n",
+ __func__, cfg_base);
continue;
}
@@ -245,8 +247,9 @@ static int __init nsc_ircc_init(void)
outb(chip->cid_index, cfg_base);
id = inb(cfg_base+1);
if ((id & chip->cid_mask) == chip->cid_value) {
- IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
- __func__, chip->name, id & ~chip->cid_mask);
+ pr_debug("%s() Found %s chip, revision=%d\n",
+ __func__, chip->name,
+ id & ~chip->cid_mask);
/*
* If we found a correct PnP setting,
@@ -260,7 +263,8 @@ static int __init nsc_ircc_init(void)
info.irq = pnp_info.irq;
if (info.fir_base < 0x2000) {
- IRDA_MESSAGE("%s, chip->init\n", driver_name);
+ net_info_ratelimited("%s, chip->init\n",
+ driver_name);
chip->init(chip, &info);
} else
chip->probe(chip, &info);
@@ -275,7 +279,8 @@ static int __init nsc_ircc_init(void)
* the chip.
*/
if (ret) {
- IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name);
+ pr_debug("%s, PnP init failed\n",
+ driver_name);
memset(&info, 0, sizeof(chipio_t));
info.cfg_base = cfg_base;
info.fir_base = io[i];
@@ -297,7 +302,8 @@ static int __init nsc_ircc_init(void)
}
i++;
} else {
- IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id);
+ pr_debug("%s(), Wrong chip id=0x%02x\n",
+ __func__, id);
}
}
}
@@ -361,31 +367,29 @@ static int __init nsc_ircc_open(chipio_t *info)
void *ret;
int err, chip_index;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
-
for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
if (!dev_self[chip_index])
break;
}
if (chip_index == ARRAY_SIZE(dev_self)) {
- IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__);
+ net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
+ __func__);
return -ENOMEM;
}
- IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name,
- info->cfg_base);
+ net_info_ratelimited("%s, Found chip at base=0x%03x\n",
+ driver_name, info->cfg_base);
if ((nsc_ircc_setup(info)) == -1)
return -1;
- IRDA_MESSAGE("%s, driver loaded (Dag Brattli)\n", driver_name);
+ net_info_ratelimited("%s, driver loaded (Dag Brattli)\n", driver_name);
dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
if (dev == NULL) {
- IRDA_ERROR("%s(), can't allocate memory for "
- "control block!\n", __func__);
+ net_err_ratelimited("%s(), can't allocate memory for control block!\n",
+ __func__);
return -ENOMEM;
}
@@ -408,8 +412,8 @@ static int __init nsc_ircc_open(chipio_t *info)
/* Reserve the ioports that we need */
ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
if (!ret) {
- IRDA_WARNING("%s(), can't get iobase of 0x%03x\n",
- __func__, self->io.fir_base);
+ net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
err = -ENODEV;
goto out1;
}
@@ -460,21 +464,22 @@ static int __init nsc_ircc_open(chipio_t *info)
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
goto out4;
}
- IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
/* Check if user has supplied a valid dongle id or not */
if ((dongle_id <= 0) ||
(dongle_id >= ARRAY_SIZE(dongle_types))) {
dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base);
- IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name,
- dongle_types[dongle_id]);
+ net_info_ratelimited("%s, Found dongle: %s\n",
+ driver_name, dongle_types[dongle_id]);
} else {
- IRDA_MESSAGE("%s, Using dongle: %s\n", driver_name,
- dongle_types[dongle_id]);
+ net_info_ratelimited("%s, Using dongle: %s\n",
+ driver_name, dongle_types[dongle_id]);
}
self->io.dongle_id = dongle_id;
@@ -516,8 +521,6 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
iobase = self->io.fir_base;
@@ -528,8 +531,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(4, "%s(), Releasing Region %03x\n",
- __func__, self->io.fir_base);
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -567,7 +570,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 0x2e8: outb(0x15, cfg_base+1); break;
case 0x3f8: outb(0x16, cfg_base+1); break;
case 0x2f8: outb(0x17, cfg_base+1); break;
- default: IRDA_ERROR("%s(), invalid base_address", __func__);
+ default: net_err_ratelimited("%s(), invalid base_address\n", __func__);
}
/* Control Signal Routing Register (CSRT) */
@@ -579,7 +582,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 9: temp = 0x05; break;
case 11: temp = 0x06; break;
case 15: temp = 0x07; break;
- default: IRDA_ERROR("%s(), invalid irq", __func__);
+ default: net_err_ratelimited("%s(), invalid irq\n", __func__);
}
outb(CFG_108_CSRT, cfg_base);
@@ -587,7 +590,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 0: outb(0x08+temp, cfg_base+1); break;
case 1: outb(0x10+temp, cfg_base+1); break;
case 3: outb(0x18+temp, cfg_base+1); break;
- default: IRDA_ERROR("%s(), invalid dma", __func__);
+ default: net_err_ratelimited("%s(), invalid dma\n", __func__);
}
outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
@@ -626,8 +629,8 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
break;
}
info->sir_base = info->fir_base;
- IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__,
- info->fir_base);
+ pr_debug("%s(), probing fir_base=0x%03x\n", __func__,
+ info->fir_base);
/* Read control signals routing register (CSRT) */
outb(CFG_108_CSRT, cfg_base);
@@ -659,7 +662,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
info->irq = 15;
break;
}
- IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
+ pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
/* Currently we only read Rx DMA but it will also be used for Tx */
switch ((reg >> 3) & 0x03) {
@@ -676,7 +679,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
info->dma = 3;
break;
}
- IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
+ pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
/* Read mode control register (MCTL) */
outb(CFG_108_MCTL, cfg_base);
@@ -727,7 +730,7 @@ static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info)
pnp = (reg >> 3) & 0x01;
if (pnp) {
- IRDA_DEBUG(2, "(), Chip is in PnP mode\n");
+ pr_debug("(), Chip is in PnP mode\n");
outb(0x46, cfg_base);
reg = (inb(cfg_base+1) & 0xfe) << 2;
@@ -831,9 +834,8 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
int enabled;
/* User is sure about his config... accept it. */
- IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
- "io=0x%04x, irq=%d, dma=%d\n",
- __func__, info->fir_base, info->irq, info->dma);
+ pr_debug("%s(): nsc_ircc_init_39x (user settings): io=0x%04x, irq=%d, dma=%d\n",
+ __func__, info->fir_base, info->irq, info->dma);
/* Access bank for SP2 */
outb(CFG_39X_LDN, cfg_base);
@@ -873,8 +875,8 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
int reg1, reg2, irq, irqt, dma1, dma2;
int enabled, susp;
- IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n",
- __func__, cfg_base);
+ pr_debug("%s(), nsc_ircc_probe_39x, base=%d\n",
+ __func__, cfg_base);
/* This function should be executed with irq off to avoid
* another driver messing with the Super I/O bank - Jean II */
@@ -908,7 +910,8 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
outb(CFG_39X_SPC, cfg_base);
susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
- IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp);
+ pr_debug("%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n",
+ __func__, reg1, reg2, irq, irqt, dma1, dma2, enabled, susp);
/* Configure SP2 */
@@ -959,8 +962,8 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
!(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED))
pnp_info.dma = pnp_dma(dev, 0);
- IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
- __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
+ pr_debug("%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
+ __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
if((pnp_info.fir_base == 0) ||
(pnp_info.irq == -1) || (pnp_info.dma == -1)) {
@@ -988,13 +991,13 @@ static int nsc_ircc_setup(chipio_t *info)
switch_bank(iobase, BANK3);
version = inb(iobase+MID);
- IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n",
- __func__, driver_name, version);
+ pr_debug("%s() Driver %s Found chip version %02x\n",
+ __func__, driver_name, version);
/* Should be 0x2? */
if (0x20 != (version & 0xf0)) {
- IRDA_ERROR("%s, Wrong chip version %02x\n",
- driver_name, version);
+ net_err_ratelimited("%s, Wrong chip version %02x\n",
+ driver_name, version);
return -1;
}
@@ -1092,39 +1095,39 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
switch (dongle_id) {
case 0x00: /* same as */
case 0x01: /* Differential serial interface */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x02: /* same as */
case 0x03: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x04: /* Sharp RY5HD01 */
break;
case 0x05: /* Reserved, but this is what the Thinkpad reports */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x06: /* Single-ended serial interface */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x07: /* Consumer-IR only */
- IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
- IRDA_DEBUG(0, "%s(), %s\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
outb(0x28, iobase+7); /* Set irsl[0-2] as output */
break;
case 0x0A: /* same as */
case 0x0B: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x0C: /* same as */
case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1138,15 +1141,15 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
outb(0x28, iobase+7); /* Set irsl[0-2] as output */
break;
case 0x0F: /* No dongle connected */
- IRDA_DEBUG(0, "%s(), %s\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
switch_bank(iobase, BANK0);
outb(0x62, iobase+MCR);
break;
default:
- IRDA_DEBUG(0, "%s(), invalid dongle_id %#x",
- __func__, dongle_id);
+ pr_debug("%s(), invalid dongle_id %#x",
+ __func__, dongle_id);
}
/* IRCFG1: IRSL1 and 2 are set to IrDA mode */
@@ -1177,31 +1180,31 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
switch (dongle_id) {
case 0x00: /* same as */
case 0x01: /* Differential serial interface */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x02: /* same as */
case 0x03: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x04: /* Sharp RY5HD01 */
break;
case 0x05: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x06: /* Single-ended serial interface */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x07: /* Consumer-IR only */
- IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
- IRDA_DEBUG(0, "%s(), %s\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
outb(0x00, iobase+4);
if (speed > 115200)
outb(0x01, iobase+4);
@@ -1219,8 +1222,8 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
break;
case 0x0A: /* same as */
case 0x0B: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x0C: /* same as */
case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1228,14 +1231,14 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
case 0x0E: /* Supports SIR Mode only */
break;
case 0x0F: /* No dongle connected */
- IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
switch_bank(iobase, BANK0);
outb(0x62, iobase+MCR);
break;
default:
- IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__);
+ pr_debug("%s(), invalid data_rate\n", __func__);
}
/* Restore bank register */
outb(bank, iobase+BSR);
@@ -1256,7 +1259,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
__u8 bank;
__u8 ier; /* Interrupt enable register */
- IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed);
+ pr_debug("%s(), speed=%d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return 0;);
@@ -1289,20 +1292,20 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
outb(inb(iobase+4) | 0x04, iobase+4);
mcr = MCR_MIR;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
+ pr_debug("%s(), handling baud of 576000\n", __func__);
break;
case 1152000:
mcr = MCR_MIR;
- IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__);
+ pr_debug("%s(), handling baud of 1152000\n", __func__);
break;
case 4000000:
mcr = MCR_FIR;
- IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__);
+ pr_debug("%s(), handling baud of 4000000\n", __func__);
break;
default:
mcr = MCR_FIR;
- IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n",
- __func__, speed);
+ pr_debug("%s(), unknown baud rate of %d\n",
+ __func__, speed);
break;
}
@@ -1609,15 +1612,13 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
int actual = 0;
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
/* Save current bank */
bank = inb(iobase+BSR);
switch_bank(iobase, BANK0);
if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
- IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n",
- __func__);
+ pr_debug("%s(), warning, FIFO not empty yet!\n",
+ __func__);
/* FIFO may still be filled to the Tx interrupt threshold */
fifo_size -= 17;
@@ -1629,8 +1630,8 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
outb(buf[actual++], iobase+TXD);
}
- IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
- __func__, fifo_size, actual, len);
+ pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
+ __func__, fifo_size, actual, len);
/* Restore bank */
outb(bank, iobase+BSR);
@@ -1651,8 +1652,6 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
__u8 bank;
int ret = TRUE;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
iobase = self->io.fir_base;
/* Save current bank */
@@ -1782,7 +1781,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
if (st_fifo->tail >= MAX_RX_WINDOW) {
- IRDA_DEBUG(0, "%s(), window is full!\n", __func__);
+ pr_debug("%s(), window is full!\n", __func__);
continue;
}
@@ -1872,9 +1871,6 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
skb = dev_alloc_skb(len+1);
if (skb == NULL) {
- IRDA_WARNING("%s(), memory squeeze, "
- "dropping frame.\n",
- __func__);
self->netdev->stats.rx_dropped++;
/* Restore bank register */
@@ -1979,7 +1975,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
* Need to be after self->io.direction to avoid race with
* nsc_ircc_hard_xmit_sir() - Jean II */
if (self->new_speed) {
- IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__);
+ pr_debug("%s(), Changing speed!\n", __func__);
self->ier = nsc_ircc_change_speed(self,
self->new_speed);
self->new_speed = 0;
@@ -2063,9 +2059,8 @@ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
nsc_ircc_dma_receive(self);
self->ier = IER_SFIF_IE;
} else
- IRDA_WARNING("%s(), potential "
- "Tx queue lockup !\n",
- __func__);
+ net_warn_ratelimited("%s(), potential Tx queue lockup !\n",
+ __func__);
}
} else {
/* Not finished yet, so interrupt on DMA again */
@@ -2174,7 +2169,6 @@ static int nsc_ircc_net_open(struct net_device *dev)
char hwname[32];
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -2184,8 +2178,8 @@ static int nsc_ircc_net_open(struct net_device *dev)
iobase = self->io.fir_base;
if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) {
- IRDA_WARNING("%s, unable to allocate irq=%d\n",
- driver_name, self->io.irq);
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
return -EAGAIN;
}
/*
@@ -2193,8 +2187,8 @@ static int nsc_ircc_net_open(struct net_device *dev)
* failure.
*/
if (request_dma(self->io.dma, dev->name)) {
- IRDA_WARNING("%s, unable to allocate dma=%d\n",
- driver_name, self->io.dma);
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
free_irq(self->io.irq, dev);
return -EAGAIN;
}
@@ -2236,7 +2230,6 @@ static int nsc_ircc_net_close(struct net_device *dev)
int iobase;
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
@@ -2290,7 +2283,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -2329,7 +2322,7 @@ static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
if (self->io.suspended)
return 0;
- IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
+ pr_debug("%s, Suspending\n", driver_name);
rtnl_lock();
if (netif_running(self->netdev)) {
@@ -2363,7 +2356,7 @@ static int nsc_ircc_resume(struct platform_device *dev)
if (!self->io.suspended)
return 0;
- IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
+ pr_debug("%s, Waking up\n", driver_name);
rtnl_lock();
nsc_ircc_setup(&self->io);
@@ -2372,8 +2365,8 @@ static int nsc_ircc_resume(struct platform_device *dev)
if (netif_running(self->netdev)) {
if (request_irq(self->io.irq, nsc_ircc_interrupt, 0,
self->netdev->name, self->netdev)) {
- IRDA_WARNING("%s, unable to allocate irq=%d\n",
- driver_name, self->io.irq);
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
/*
* Don't fail resume process, just kill this
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index f237136f3827..a7c2e990ae69 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -90,8 +90,6 @@ static int old_belkin_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power on dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -108,8 +106,6 @@ static int old_belkin_open(struct sir_dev *dev)
static int old_belkin_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -123,8 +119,6 @@ static int old_belkin_close(struct sir_dev *dev)
*/
static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
dev->speed = 9600;
return (speed==dev->speed) ? 0 : -EINVAL;
}
@@ -137,8 +131,6 @@ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
*/
static int old_belkin_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* This dongles speed "defaults" to 9600 bps ;-) */
dev->speed = 9600;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 3eeaaf800494..100454662e4b 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -966,7 +966,6 @@ static int pxa_irda_remove(struct platform_device *_dev)
static struct platform_driver pxa_ir_driver = {
.driver = {
.name = "pxa2xx-ir",
- .owner = THIS_MODULE,
},
.probe = pxa_irda_probe,
.remove = pxa_irda_remove,
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 42fde9ed23e1..7b17fa2114e1 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -1114,7 +1114,6 @@ static struct platform_driver sa1100ir_driver = {
.resume = sa1100_irda_resume,
.driver = {
.name = "sa11x0-ir",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 43e9ab4f4d7e..6af26a7d787c 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -82,7 +82,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
return 0;
default:
- IRDA_ERROR("%s - undefined state\n", __func__);
+ net_err_ratelimited("%s - undefined state\n", __func__);
return -EINVAL;
}
fsm->substate = next_state;
@@ -109,11 +109,11 @@ static void sirdev_config_fsm(struct work_struct *work)
int ret = -1;
unsigned delay;
- IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
+ pr_debug("%s(), <%ld>\n", __func__, jiffies);
do {
- IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
- __func__, fsm->state, fsm->substate);
+ pr_debug("%s - state=0x%04x / substate=0x%04x\n",
+ __func__, fsm->state, fsm->substate);
next_state = fsm->state;
delay = 0;
@@ -251,12 +251,13 @@ static void sirdev_config_fsm(struct work_struct *work)
break;
default:
- IRDA_ERROR("%s - undefined state\n", __func__);
+ net_err_ratelimited("%s - undefined state\n", __func__);
fsm->result = -EINVAL;
/* fall thru */
case SIRDEV_STATE_ERROR:
- IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
+ net_err_ratelimited("%s - error: %d\n",
+ __func__, fsm->result);
#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
netif_stop_queue(dev->netdev);
@@ -286,12 +287,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
{
struct sir_fsm *fsm = &dev->fsm;
- IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
- initial_state, param);
+ pr_debug("%s - state=0x%04x / param=%u\n", __func__,
+ initial_state, param);
if (down_trylock(&fsm->sem)) {
if (in_interrupt() || in_atomic() || irqs_disabled()) {
- IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
+ pr_debug("%s(), state machine busy!\n", __func__);
return -EWOULDBLOCK;
} else
down(&fsm->sem);
@@ -299,7 +300,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
if (fsm->state == SIRDEV_STATE_DEAD) {
/* race with sirdev_close should never happen */
- IRDA_ERROR("%s(), instance staled!\n", __func__);
+ net_err_ratelimited("%s(), instance staled!\n", __func__);
up(&fsm->sem);
return -ESTALE; /* or better EPIPE? */
}
@@ -344,7 +345,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
{
int err;
- IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
+ pr_debug("%s : requesting dongle %d.\n", __func__, type);
err = sirdev_schedule_dongle_open(dev, type);
if (unlikely(err))
@@ -379,7 +380,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
if (ret > 0) {
- IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
+ pr_debug("%s(), raw-tx started\n", __func__);
dev->tx_buff.data += ret;
dev->tx_buff.len -= ret;
@@ -439,8 +440,8 @@ void sirdev_write_complete(struct sir_dev *dev)
spin_lock_irqsave(&dev->tx_lock, flags);
- IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
- __func__, dev->tx_buff.len);
+ pr_debug("%s() - dev->tx_buff.len = %d\n",
+ __func__, dev->tx_buff.len);
if (likely(dev->tx_buff.len > 0)) {
/* Write data left in transmit buffer */
@@ -452,8 +453,8 @@ void sirdev_write_complete(struct sir_dev *dev)
}
else if (unlikely(actual<0)) {
/* could be dropped later when we have tx_timeout to recover */
- IRDA_ERROR("%s: drv->do_write failed (%d)\n",
- __func__, actual);
+ net_err_ratelimited("%s: drv->do_write failed (%d)\n",
+ __func__, actual);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
dev_kfree_skb_any(skb);
@@ -474,7 +475,7 @@ void sirdev_write_complete(struct sir_dev *dev)
* restarted when the irda-thread has completed the request.
*/
- IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
+ pr_debug("%s(), raw-tx done\n", __func__);
dev->raw_tx = 0;
goto done; /* no post-frame handling in raw mode */
}
@@ -491,7 +492,7 @@ void sirdev_write_complete(struct sir_dev *dev)
* re-activated.
*/
- IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
+ pr_debug("%s(), finished with frame!\n", __func__);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
@@ -501,14 +502,14 @@ void sirdev_write_complete(struct sir_dev *dev)
}
if (unlikely(dev->new_speed > 0)) {
- IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
+ pr_debug("%s(), Changing speed!\n", __func__);
err = sirdev_schedule_speed(dev, dev->new_speed);
if (unlikely(err)) {
/* should never happen
* forget the speed change and hope the stack recovers
*/
- IRDA_ERROR("%s - schedule speed change failed: %d\n",
- __func__, err);
+ net_err_ratelimited("%s - schedule speed change failed: %d\n",
+ __func__, err);
netif_wake_queue(dev->netdev);
}
/* else: success
@@ -535,13 +536,13 @@ EXPORT_SYMBOL(sirdev_write_complete);
int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
{
if (!dev || !dev->netdev) {
- IRDA_WARNING("%s(), not ready yet!\n", __func__);
+ net_warn_ratelimited("%s(), not ready yet!\n", __func__);
return -1;
}
if (!dev->irlap) {
- IRDA_WARNING("%s - too early: %p / %zd!\n",
- __func__, cp, count);
+ net_warn_ratelimited("%s - too early: %p / %zd!\n",
+ __func__, cp, count);
return -1;
}
@@ -551,7 +552,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
*/
irda_device_set_media_busy(dev->netdev, TRUE);
dev->netdev->stats.rx_dropped++;
- IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
+ pr_debug("%s; rx-drop: %zd\n", __func__, count);
return 0;
}
@@ -597,7 +598,7 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
netif_stop_queue(ndev);
- IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
+ pr_debug("%s(), skb->len = %d\n", __func__, skb->len);
speed = irda_get_next_speed(skb);
if ((speed != dev->speed) && (speed != -1)) {
@@ -634,7 +635,7 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
/* Check problems */
if(spin_is_locked(&dev->tx_lock)) {
- IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
+ pr_debug("%s(), write not completed\n", __func__);
}
/* serialize with write completion */
@@ -661,8 +662,8 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
}
else if (unlikely(actual < 0)) {
/* could be dropped later when we have tx_timeout to recover */
- IRDA_ERROR("%s: drv->do_write failed (%d)\n",
- __func__, actual);
+ net_err_ratelimited("%s: drv->do_write failed (%d)\n",
+ __func__, actual);
dev_kfree_skb_any(skb);
dev->netdev->stats.tx_errors++;
dev->netdev->stats.tx_dropped++;
@@ -683,7 +684,7 @@ static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
IRDA_ASSERT(dev != NULL, return -1;);
- IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -800,8 +801,6 @@ static int sirdev_open(struct net_device *ndev)
if (!try_module_get(drv->owner))
return -ESTALE;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
if (sirdev_alloc_buffers(dev))
goto errout_dec;
@@ -818,7 +817,7 @@ static int sirdev_open(struct net_device *ndev)
netif_wake_queue(ndev);
- IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
+ pr_debug("%s - done, speed = %d\n", __func__, dev->speed);
return 0;
@@ -838,7 +837,7 @@ static int sirdev_close(struct net_device *ndev)
struct sir_dev *dev = netdev_priv(ndev);
const struct sir_driver *drv;
-// IRDA_DEBUG(0, "%s\n", __func__);
+/* pr_debug("%s\n", __func__); */
netif_stop_queue(ndev);
@@ -880,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
struct net_device *ndev;
struct sir_dev *dev;
- IRDA_DEBUG(0, "%s - %s\n", __func__, name);
+ pr_debug("%s - %s\n", __func__, name);
/* instead of adding tests to protect against drv->do_write==NULL
* at several places we refuse to create a sir_dev instance for
@@ -894,7 +893,8 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
*/
ndev = alloc_irdadev(sizeof(*dev));
if (ndev == NULL) {
- IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
+ net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n",
+ __func__);
goto out;
}
dev = netdev_priv(ndev);
@@ -919,7 +919,8 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
ndev->netdev_ops = &sirdev_ops;
if (register_netdev(ndev)) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
goto out_freenetdev;
}
@@ -936,7 +937,7 @@ int sirdev_put_instance(struct sir_dev *dev)
{
int err = 0;
- IRDA_DEBUG(0, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
atomic_set(&dev->enable_rx, 0);
@@ -946,7 +947,7 @@ int sirdev_put_instance(struct sir_dev *dev)
if (dev->dongle_drv)
err = sirdev_schedule_dongle_close(dev);
if (err)
- IRDA_ERROR("%s - error %d\n", __func__, err);
+ net_err_ratelimited("%s - error %d\n", __func__, err);
sirdev_close(dev->netdev);
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index cfbabb63f5cc..7436f73ff1bb 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -34,8 +34,8 @@ int irda_register_dongle(struct dongle_driver *new)
struct list_head *entry;
struct dongle_driver *drv;
- IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
- __func__, new->driver_name, new->type);
+ pr_debug("%s : registering dongle \"%s\" (%d).\n",
+ __func__, new->driver_name, new->type);
mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 282120430f12..b455ffe8850c 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -419,13 +419,16 @@ static int __init smsc_ircc_legacy_probe(void)
#ifdef CONFIG_PCI
if (smsc_ircc_preconfigure_subsystems(ircc_cfg, ircc_fir, ircc_sir, ircc_dma, ircc_irq) < 0) {
/* Ignore errors from preconfiguration */
- IRDA_ERROR("%s, Preconfiguration failed !\n", driver_name);
+ net_err_ratelimited("%s, Preconfiguration failed !\n",
+ driver_name);
}
#endif
if (ircc_fir > 0 && ircc_sir > 0) {
- IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir);
- IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir);
+ net_info_ratelimited(" Overriding FIR address 0x%04x\n",
+ ircc_fir);
+ net_info_ratelimited(" Overriding SIR address 0x%04x\n",
+ ircc_sir);
if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq))
ret = -ENODEV;
@@ -434,8 +437,8 @@ static int __init smsc_ircc_legacy_probe(void)
/* try user provided configuration register base address */
if (ircc_cfg > 0) {
- IRDA_MESSAGE(" Overriding configuration address "
- "0x%04x\n", ircc_cfg);
+ net_info_ratelimited(" Overriding configuration address 0x%04x\n",
+ ircc_cfg);
if (!smsc_superio_fdc(ircc_cfg))
ret = 0;
if (!smsc_superio_lpc(ircc_cfg))
@@ -458,11 +461,12 @@ static int __init smsc_ircc_init(void)
{
int ret;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
ret = platform_driver_register(&smsc_ircc_driver);
if (ret) {
- IRDA_ERROR("%s, Can't register driver!\n", driver_name);
+ net_err_ratelimited("%s, Can't register driver!\n",
+ driver_name);
return ret;
}
@@ -519,7 +523,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
struct net_device *dev;
int err;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
err = smsc_ircc_present(fir_base, sir_base);
if (err)
@@ -527,7 +531,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
err = -ENOMEM;
if (dev_count >= ARRAY_SIZE(dev_self)) {
- IRDA_WARNING("%s(), too many devices!\n", __func__);
+ net_warn_ratelimited("%s(), too many devices!\n", __func__);
goto err_out1;
}
@@ -536,7 +540,8 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
*/
dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
if (!dev) {
- IRDA_WARNING("%s() can't allocate net device\n", __func__);
+ net_warn_ratelimited("%s() can't allocate net device\n",
+ __func__);
goto err_out1;
}
@@ -588,8 +593,8 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
err = register_netdev(self->netdev);
if (err) {
- IRDA_ERROR("%s, Network device registration failed!\n",
- driver_name);
+ net_err_ratelimited("%s, Network device registration failed!\n",
+ driver_name);
goto err_out4;
}
@@ -601,7 +606,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
}
platform_set_drvdata(self->pldev, self);
- IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
dev_count++;
return 0;
@@ -637,15 +642,15 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
driver_name)) {
- IRDA_WARNING("%s: can't get fir_base of 0x%03x\n",
- __func__, fir_base);
+ net_warn_ratelimited("%s: can't get fir_base of 0x%03x\n",
+ __func__, fir_base);
goto out1;
}
if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
driver_name)) {
- IRDA_WARNING("%s: can't get sir_base of 0x%03x\n",
- __func__, sir_base);
+ net_warn_ratelimited("%s: can't get sir_base of 0x%03x\n",
+ __func__, sir_base);
goto out2;
}
@@ -660,13 +665,13 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
- IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
- __func__, fir_base);
+ net_warn_ratelimited("%s(), addr 0x%04x - no device found!\n",
+ __func__, fir_base);
goto out3;
}
- IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, "
- "firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n",
- chip & 0x0f, version, fir_base, sir_base, dma, irq);
+ net_info_ratelimited("SMsC IrDA Controller found\n IrCC version %d.%d, firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n",
+ chip & 0x0f, version,
+ fir_base, sir_base, dma, irq);
return 0;
@@ -704,16 +709,16 @@ static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
if (irq != IRQ_INVAL) {
if (irq != chip_irq)
- IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n",
- driver_name, chip_irq, irq);
+ net_info_ratelimited("%s, Overriding IRQ - chip says %d, using %d\n",
+ driver_name, chip_irq, irq);
self->io.irq = irq;
} else
self->io.irq = chip_irq;
if (dma != DMA_INVAL) {
if (dma != chip_dma)
- IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n",
- driver_name, chip_dma, dma);
+ net_info_ratelimited("%s, Overriding DMA - chip says %d, using %d\n",
+ driver_name, chip_dma, dma);
self->io.dma = dma;
} else
self->io.dma = chip_dma;
@@ -798,7 +803,7 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -852,8 +857,8 @@ static void smsc_ircc_timeout(struct net_device *dev)
struct smsc_ircc_cb *self = netdev_priv(dev);
unsigned long flags;
- IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n",
- dev->name, self->io.speed);
+ net_warn_ratelimited("%s: transmit timed out, changing speed to: %d\n",
+ dev->name, self->io.speed);
spin_lock_irqsave(&self->lock, flags);
smsc_ircc_sir_start(self);
smsc_ircc_change_speed(self, self->io.speed);
@@ -877,7 +882,7 @@ static netdev_tx_t smsc_ircc_hard_xmit_sir(struct sk_buff *skb,
unsigned long flags;
s32 speed;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
@@ -952,21 +957,21 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
ir_mode = IRCC_CFGA_IRDA_HDLC;
ctrl = IRCC_CRC;
fast = 0;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
+ pr_debug("%s(), handling baud of 576000\n", __func__);
break;
case 1152000:
ir_mode = IRCC_CFGA_IRDA_HDLC;
ctrl = IRCC_1152 | IRCC_CRC;
fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
- IRDA_DEBUG(0, "%s(), handling baud of 1152000\n",
- __func__);
+ pr_debug("%s(), handling baud of 1152000\n",
+ __func__);
break;
case 4000000:
ir_mode = IRCC_CFGA_IRDA_4PPM;
ctrl = IRCC_CRC;
fast = IRCC_LCR_A_FAST;
- IRDA_DEBUG(0, "%s(), handling baud of 4000000\n",
- __func__);
+ pr_debug("%s(), handling baud of 4000000\n",
+ __func__);
break;
}
#if 0
@@ -994,7 +999,7 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
struct net_device *dev;
int fir_base;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1039,7 +1044,7 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
{
int fir_base;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
@@ -1063,7 +1068,7 @@ static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
struct net_device *dev;
int last_speed_was_sir;
- IRDA_DEBUG(0, "%s() changing speed to: %d\n", __func__, speed);
+ pr_debug("%s() changing speed to: %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1131,7 +1136,7 @@ static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
int lcr; /* Line control reg */
int divisor;
- IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __func__, speed);
+ pr_debug("%s(), Setting speed to: %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
@@ -1166,7 +1171,7 @@ static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
/* Turn on interrups */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
- IRDA_DEBUG(2, "%s() speed changed to: %d\n", __func__, speed);
+ pr_debug("%s() speed changed to: %d\n", __func__, speed);
}
@@ -1250,7 +1255,7 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
int iobase = self->io.fir_base;
u8 ctrl;
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
#if 1
/* Disable Rx */
register_bank(iobase, 0);
@@ -1304,7 +1309,7 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
{
int iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
#if 0
/* Disable Tx */
register_bank(iobase, 0);
@@ -1408,7 +1413,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
register_bank(iobase, 0);
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
#if 0
/* Disable Rx */
register_bank(iobase, 0);
@@ -1419,8 +1424,8 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
lsr= inb(iobase + IRCC_LSR);
msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
- IRDA_DEBUG(2, "%s: dma count = %d\n", __func__,
- get_dma_residue(self->io.dma));
+ pr_debug("%s: dma count = %d\n", __func__,
+ get_dma_residue(self->io.dma));
len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
@@ -1442,17 +1447,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
len -= self->io.speed < 4000000 ? 2 : 4;
if (len < 2 || len > 2050) {
- IRDA_WARNING("%s(), bogus len=%d\n", __func__, len);
+ net_warn_ratelimited("%s(), bogus len=%d\n", __func__, len);
return;
}
- IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
+ pr_debug("%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
skb = dev_alloc_skb(len + 1);
- if (!skb) {
- IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
- __func__);
+ if (!skb)
return;
- }
+
/* Make sure IP header gets aligned */
skb_reserve(skb, 1);
@@ -1491,7 +1494,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
/* Make sure we don't stay here to long */
if (boguscount++ > 32) {
- IRDA_DEBUG(2, "%s(), breaking!\n", __func__);
+ pr_debug("%s(), breaking!\n", __func__);
break;
}
} while (inb(iobase + UART_LSR) & UART_LSR_DR);
@@ -1533,7 +1536,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
lcra = inb(iobase + IRCC_LCR_A);
lsr = inb(iobase + IRCC_LSR);
- IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __func__, iir);
+ pr_debug("%s(), iir = 0x%02x\n", __func__, iir);
if (iir & IRCC_IIR_EOM) {
if (self->io.direction == IO_RECV)
@@ -1583,12 +1586,12 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
/* Clear interrupt */
lsr = inb(iobase + UART_LSR);
- IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
- __func__, iir, lsr, iobase);
+ pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __func__, iir, lsr, iobase);
switch (iir) {
case UART_IIR_RLSI:
- IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
+ pr_debug("%s(), RLSI\n", __func__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
@@ -1600,8 +1603,8 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
smsc_ircc_sir_write_wakeup(self);
break;
default:
- IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
- __func__, iir);
+ pr_debug("%s(), unhandled IIR=%#x\n",
+ __func__, iir);
break;
}
@@ -1628,12 +1631,12 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
int status = FALSE;
/* int iobase; */
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return FALSE;);
- IRDA_DEBUG(0, "%s: dma count = %d\n", __func__,
- get_dma_residue(self->io.dma));
+ pr_debug("%s: dma count = %d\n", __func__,
+ get_dma_residue(self->io.dma));
status = (self->rx_buff.state != OUTSIDE_FRAME);
@@ -1648,8 +1651,8 @@ static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
error = request_irq(self->io.irq, smsc_ircc_interrupt, 0,
self->netdev->name, self->netdev);
if (error)
- IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n",
- __func__, self->io.irq, error);
+ pr_debug("%s(), unable to allocate irq=%d, err=%d\n",
+ __func__, self->io.irq, error);
return error;
}
@@ -1693,21 +1696,21 @@ static int smsc_ircc_net_open(struct net_device *dev)
struct smsc_ircc_cb *self;
char hwname[16];
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
if (self->io.suspended) {
- IRDA_DEBUG(0, "%s(), device is suspended\n", __func__);
+ pr_debug("%s(), device is suspended\n", __func__);
return -EAGAIN;
}
if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
(void *) dev)) {
- IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
- __func__, self->io.irq);
+ pr_debug("%s(), unable to allocate irq=%d\n",
+ __func__, self->io.irq);
return -EAGAIN;
}
@@ -1730,8 +1733,8 @@ static int smsc_ircc_net_open(struct net_device *dev)
if (request_dma(self->io.dma, dev->name)) {
smsc_ircc_net_close(dev);
- IRDA_WARNING("%s(), unable to allocate DMA=%d\n",
- __func__, self->io.dma);
+ net_warn_ratelimited("%s(), unable to allocate DMA=%d\n",
+ __func__, self->io.dma);
return -EAGAIN;
}
@@ -1750,7 +1753,7 @@ static int smsc_ircc_net_close(struct net_device *dev)
{
struct smsc_ircc_cb *self;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -1781,7 +1784,7 @@ static int smsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
struct smsc_ircc_cb *self = platform_get_drvdata(dev);
if (!self->io.suspended) {
- IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
+ pr_debug("%s, Suspending\n", driver_name);
rtnl_lock();
if (netif_running(self->netdev)) {
@@ -1802,7 +1805,7 @@ static int smsc_ircc_resume(struct platform_device *dev)
struct smsc_ircc_cb *self = platform_get_drvdata(dev);
if (self->io.suspended) {
- IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
+ pr_debug("%s, Waking up\n", driver_name);
rtnl_lock();
smsc_ircc_init_chip(self);
@@ -1833,7 +1836,7 @@ static int smsc_ircc_resume(struct platform_device *dev)
*/
static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
{
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
@@ -1845,13 +1848,13 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
smsc_ircc_stop_interrupts(self);
/* Release the PORTS that this driver is using */
- IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
- self->io.fir_base);
+ pr_debug("%s(), releasing 0x%03x\n", __func__,
+ self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
- IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
- self->io.sir_base);
+ pr_debug("%s(), releasing 0x%03x\n", __func__,
+ self->io.sir_base);
release_region(self->io.sir_base, self->io.sir_ext);
@@ -1872,7 +1875,7 @@ static void __exit smsc_ircc_cleanup(void)
{
int i;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
for (i = 0; i < 2; i++) {
if (dev_self[i])
@@ -1896,7 +1899,7 @@ static void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
struct net_device *dev;
int fir_base, sir_base;
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1922,7 +1925,7 @@ static void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
/* Turn on interrups */
outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
- IRDA_DEBUG(3, "%s() - exit\n", __func__);
+ pr_debug("%s() - exit\n", __func__);
outb(0x00, fir_base + IRCC_MASTER);
}
@@ -1932,7 +1935,7 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
iobase = self->io.sir_base;
/* Reset UART */
@@ -1958,7 +1961,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
IRDA_ASSERT(self != NULL, return;);
- IRDA_DEBUG(4, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
iobase = self->io.sir_base;
@@ -1979,8 +1982,8 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
* if we need to change the speed of the hardware
*/
if (self->new_speed) {
- IRDA_DEBUG(5, "%s(), Changing speed to %d.\n",
- __func__, self->new_speed);
+ pr_debug("%s(), Changing speed to %d.\n",
+ __func__, self->new_speed);
smsc_ircc_sir_wait_hw_transmitter_finish(self);
smsc_ircc_change_speed(self, self->new_speed);
self->new_speed = 0;
@@ -2019,7 +2022,8 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
/* Tx FIFO should be empty! */
if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
- IRDA_WARNING("%s(), failed, fifo not empty!\n", __func__);
+ net_warn_ratelimited("%s(), failed, fifo not empty!\n",
+ __func__);
return 0;
}
@@ -2058,14 +2062,14 @@ static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
for (i = 0; smsc_transceivers[i].name != NULL; i++)
if (smsc_transceivers[i].probe(self->io.fir_base)) {
- IRDA_MESSAGE(" %s transceiver found\n",
- smsc_transceivers[i].name);
+ net_info_ratelimited(" %s transceiver found\n",
+ smsc_transceivers[i].name);
self->transceiver= i + 1;
return;
}
- IRDA_MESSAGE("No transceiver found. Defaulting to %s\n",
- smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
+ net_info_ratelimited("No transceiver found. Defaulting to %s\n",
+ smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
self->transceiver = SMSC_IRCC2_C_DEFAULT_TRANSCEIVER;
}
@@ -2119,7 +2123,7 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
udelay(1);
if (count < 0)
- IRDA_DEBUG(0, "%s(): stuck transmitter\n", __func__);
+ pr_debug("%s(): stuck transmitter\n", __func__);
}
@@ -2180,7 +2184,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
u8 mode, dma, irq;
int ret = -ENODEV;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
return ret;
@@ -2191,7 +2195,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
/*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/
if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
- IRDA_WARNING("%s(): IrDA not enabled\n", __func__);
+ net_warn_ratelimited("%s(): IrDA not enabled\n", __func__);
outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
sirbase = inb(cfgbase + 1) << 2;
@@ -2208,7 +2212,8 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
- IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __func__, firbase, sirbase, dma, irq, mode);
+ net_info_ratelimited("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n",
+ __func__, firbase, sirbase, dma, irq, mode);
if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
ret = 0;
@@ -2230,7 +2235,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
unsigned short fir_io, sir_io;
int ret = -ENODEV;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
return ret;
@@ -2264,7 +2269,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
{
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
outb(reg, cfg_base);
return inb(cfg_base) != reg ? -1 : 0;
@@ -2274,7 +2279,7 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base,
{
u8 devid, xdevid, rev;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
/* Leave configuration */
@@ -2329,16 +2334,16 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base,
return NULL;
}
- IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",
- devid, rev, cfg_base, type, chip->name);
+ net_info_ratelimited("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",
+ devid, rev, cfg_base, type, chip->name);
if (chip->rev > rev) {
- IRDA_MESSAGE("Revision higher than expected\n");
+ net_info_ratelimited("Revision higher than expected\n");
return NULL;
}
if (chip->flags & NoIRDA)
- IRDA_MESSAGE("chipset does not support IRDA\n");
+ net_info_ratelimited("chipset does not support IRDA\n");
return chip;
}
@@ -2348,8 +2353,8 @@ static int __init smsc_superio_fdc(unsigned short cfg_base)
int ret = -1;
if (!request_region(cfg_base, 2, driver_name)) {
- IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
- __func__, cfg_base);
+ net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n",
+ __func__, cfg_base);
} else {
if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
!smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
@@ -2366,8 +2371,8 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
int ret = -1;
if (!request_region(cfg_base, 2, driver_name)) {
- IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
- __func__, cfg_base);
+ net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n",
+ __func__, cfg_base);
} else {
if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
!smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
@@ -2529,9 +2534,8 @@ static int __init preconfigure_smsc_chip(struct
outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state
outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID
tmpbyte = inb(iobase +1); // Read device ID
- IRDA_DEBUG(0,
- "Detected Chip id: 0x%02x, setting up registers...\n",
- tmpbyte);
+ pr_debug("Detected Chip id: 0x%02x, setting up registers...\n",
+ tmpbyte);
/* Disable UART1 and set up SIR I/O port */
outb(0x24, iobase); // select CR24 - UART1 base addr
@@ -2540,8 +2544,8 @@ static int __init preconfigure_smsc_chip(struct
outb( (conf->sir_io >> 2), iobase + 1); // bits 2-9 of 0x3f8
tmpbyte = inb(iobase + 1);
if (tmpbyte != (conf->sir_io >> 2) ) {
- IRDA_WARNING("ERROR: could not configure SIR ioport.\n");
- IRDA_WARNING("Try to supply ircc_cfg argument.\n");
+ net_warn_ratelimited("ERROR: could not configure SIR ioport\n");
+ net_warn_ratelimited("Try to supply ircc_cfg argument\n");
return -ENXIO;
}
@@ -2553,7 +2557,7 @@ static int __init preconfigure_smsc_chip(struct
outb(tmpbyte, iobase + 1);
tmpbyte = inb(iobase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
if (tmpbyte != conf->fir_irq) {
- IRDA_WARNING("ERROR: could not configure FIR IRQ channel.\n");
+ net_warn_ratelimited("ERROR: could not configure FIR IRQ channel\n");
return -ENXIO;
}
@@ -2562,7 +2566,7 @@ static int __init preconfigure_smsc_chip(struct
outb((conf->fir_io >> 3), iobase + 1);
tmpbyte = inb(iobase + 1);
if (tmpbyte != (conf->fir_io >> 3) ) {
- IRDA_WARNING("ERROR: could not configure FIR I/O port.\n");
+ net_warn_ratelimited("ERROR: could not configure FIR I/O port\n");
return -ENXIO;
}
@@ -2571,7 +2575,7 @@ static int __init preconfigure_smsc_chip(struct
outb((conf->fir_dma & LPC47N227_FIRDMASELECT_MASK), iobase + 1); // DMA
tmpbyte = inb(iobase + 1) & LPC47N227_FIRDMASELECT_MASK;
if (tmpbyte != (conf->fir_dma & LPC47N227_FIRDMASELECT_MASK)) {
- IRDA_WARNING("ERROR: could not configure FIR DMA channel.\n");
+ net_warn_ratelimited("ERROR: could not configure FIR DMA channel\n");
return -ENXIO;
}
@@ -2628,7 +2632,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
unsigned short tmpword;
unsigned char tmpbyte;
- IRDA_MESSAGE("Setting up Intel 82801 controller and SMSC device\n");
+ net_info_ratelimited("Setting up Intel 82801 controller and SMSC device\n");
/*
* Select the range for the COMA COM port (SIR)
* Register COM_DEC:
@@ -2677,7 +2681,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
default:
tmpbyte |= 0x01; /* COM2 default */
}
- IRDA_DEBUG(1, "COM_DEC (write): 0x%02x\n", tmpbyte);
+ pr_debug("COM_DEC (write): 0x%02x\n", tmpbyte);
pci_write_config_byte(dev, COM_DEC, tmpbyte);
/* Enable Low Pin Count interface */
@@ -2699,13 +2703,13 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
tmpword |= 0x0400;
break;
default:
- IRDA_WARNING("Uncommon I/O base address: 0x%04x\n",
- conf->cfg_base);
+ net_warn_ratelimited("Uncommon I/O base address: 0x%04x\n",
+ conf->cfg_base);
break;
}
tmpword &= 0xfffd; /* disable LPC COMB */
tmpword |= 0x0001; /* set bit 0 : enable LPC COMA addr range (GEN2) */
- IRDA_DEBUG(1, "LPC_EN (write): 0x%04x\n", tmpword);
+ pr_debug("LPC_EN (write): 0x%04x\n", tmpword);
pci_write_config_word(dev, LPC_EN, tmpword);
/*
@@ -2750,7 +2754,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
default:
break; /* do not change settings */
}
- IRDA_DEBUG(1, "PCI_DMA_C (write): 0x%04x\n", tmpword);
+ pr_debug("PCI_DMA_C (write): 0x%04x\n", tmpword);
pci_write_config_word(dev, PCI_DMA_C, tmpword);
/*
@@ -2761,7 +2765,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
*/
tmpword = conf->fir_io & 0xfff8;
tmpword |= 0x0001;
- IRDA_DEBUG(1, "GEN2_DEC (write): 0x%04x\n", tmpword);
+ pr_debug("GEN2_DEC (write): 0x%04x\n", tmpword);
pci_write_config_word(dev, GEN2_DEC, tmpword);
/* Pre-configure chip */
@@ -2800,7 +2804,8 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
mask = 0x08;
break;
default:
- IRDA_ERROR("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n", port);
+ net_err_ratelimited("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n",
+ port);
return;
}
@@ -2808,7 +2813,8 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
/* Turn on the right bits */
tmpbyte |= mask;
pci_write_config_byte(dev, reg, tmpbyte);
- IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
+ net_info_ratelimited("Activated ALi 1533 ISA bridge port 0x%04x\n",
+ port);
}
static int __init preconfigure_through_ali(struct pci_dev *dev,
@@ -2877,7 +2883,8 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
if (ircc_irq != IRQ_INVAL)
tmpconf.fir_irq = ircc_irq;
- IRDA_MESSAGE("Detected unconfigured %s SMSC IrDA chip, pre-configuring device.\n", conf->name);
+ net_info_ratelimited("Detected unconfigured %s SMSC IrDA chip, pre-configuring device\n",
+ conf->name);
if (conf->preconfigure)
ret = conf->preconfigure(dev, &tmpconf);
else
@@ -2922,8 +2929,8 @@ static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
/* empty */;
if (val)
- IRDA_WARNING("%s(): ATC: 0x%02x\n", __func__,
- inb(fir_base + IRCC_ATC));
+ net_warn_ratelimited("%s(): ATC: 0x%02x\n",
+ __func__, inb(fir_base + IRCC_ATC));
}
/*
diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c
index 048a15422844..9dcf0c103b9d 100644
--- a/drivers/net/irda/tekram-sir.c
+++ b/drivers/net/irda/tekram-sir.c
@@ -63,8 +63,8 @@ static int __init tekram_sir_init(void)
{
if (tekram_delay < 1 || tekram_delay > 500)
tekram_delay = 200;
- IRDA_DEBUG(1, "%s - using %d ms delay\n",
- tekram.driver_name, tekram_delay);
+ pr_debug("%s - using %d ms delay\n",
+ tekram.driver_name, tekram_delay);
return irda_register_dongle(&tekram);
}
@@ -77,8 +77,6 @@ static int tekram_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
sirdev_set_dtr_rts(dev, TRUE, TRUE);
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
@@ -92,8 +90,6 @@ static int tekram_open(struct sir_dev *dev)
static int tekram_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -130,8 +126,6 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
u8 byte;
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch(state) {
case SIRDEV_STATE_DONGLE_SPEED:
@@ -179,7 +173,8 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s - undefined state %d\n", __func__, state);
+ net_err_ratelimited("%s - undefined state %d\n",
+ __func__, state);
ret = -EINVAL;
break;
}
@@ -204,8 +199,6 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
static int tekram_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Clear DTR, Set RTS */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index 19ad4606b799..6d2f55959c49 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -168,8 +168,8 @@ static int __init toim3232_sir_init(void)
{
if (toim3232delay < 1 || toim3232delay > 500)
toim3232delay = 200;
- IRDA_DEBUG(1, "%s - using %d ms delay\n",
- toim3232.driver_name, toim3232delay);
+ pr_debug("%s - using %d ms delay\n",
+ toim3232.driver_name, toim3232delay);
return irda_register_dongle(&toim3232);
}
@@ -182,8 +182,6 @@ static int toim3232_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Pull the lines high to start with.
*
* For the IR320ST-2, we need to charge the main supply capacitor to
@@ -210,8 +208,6 @@ static int toim3232_open(struct sir_dev *dev)
static int toim3232_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -242,8 +238,6 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
u8 byte;
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch(state) {
case SIRDEV_STATE_DONGLE_SPEED:
@@ -345,8 +339,6 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
static int toim3232_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Switch off both DTR and RTS to switch off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 36e004288ea7..6960d4cd3cae 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -144,12 +144,10 @@ static int __init via_ircc_init(void)
{
int rc;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
rc = pci_register_driver(&via_driver);
if (rc < 0) {
- IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
- __func__, rc);
+ pr_debug("%s(): error rc = %d, returning -ENODEV...\n",
+ __func__, rc);
return -ENODEV;
}
return 0;
@@ -162,11 +160,11 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
chipio_t info;
- IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
+ pr_debug("%s(): Device ID=(0X%X)\n", __func__, id->device);
rc = pci_enable_device (pcidev);
if (rc) {
- IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
+ pr_debug("%s(): error rc = %d\n", __func__, rc);
return -ENODEV;
}
@@ -177,7 +175,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
Chipset=0x3076;
if (Chipset==0x3076) {
- IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
+ pr_debug("%s(): Chipset = 3076\n", __func__);
WriteLPCReg(7,0x0c );
temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
@@ -213,7 +211,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
} else
rc = -ENODEV; //IR not turn on
} else { //Not VT1211
- IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
+ pr_debug("%s(): Chipset = 3096\n", __func__);
pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
if((bTmp&0x01)==1) { // BIOS enable FIR
@@ -252,14 +250,12 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
rc = -ENODEV; //IR not turn on !!!!!
}//Not VT1211
- IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
+ pr_debug("%s(): End - rc = %d\n", __func__, rc);
return rc;
}
static void __exit via_ircc_cleanup(void)
{
- IRDA_DEBUG(3, "%s()\n", __func__);
-
/* Cleanup all instances of the driver */
pci_unregister_driver (&via_driver);
}
@@ -289,8 +285,6 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
struct via_ircc_cb *self;
int err;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
/* Allocate new instance of the driver */
dev = alloc_irdadev(sizeof(struct via_ircc_cb));
if (dev == NULL)
@@ -316,8 +310,8 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
/* Reserve the ioports that we need */
if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
- IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
- __func__, self->io.fir_base);
+ pr_debug("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
err = -ENODEV;
goto err_out1;
}
@@ -391,7 +385,8 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
if (err)
goto err_out4;
- IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s (via-ircc)\n",
+ dev->name);
/* Initialise the hardware..
*/
@@ -422,8 +417,6 @@ static void via_remove_one(struct pci_dev *pdev)
struct via_ircc_cb *self = pci_get_drvdata(pdev);
int iobase;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
iobase = self->io.fir_base;
ResetChip(iobase, 5); //hardware reset.
@@ -431,8 +424,8 @@ static void via_remove_one(struct pci_dev *pdev)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
- __func__, self->io.fir_base);
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
@@ -457,8 +450,6 @@ static void via_hw_init(struct via_ircc_cb *self)
{
int iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
// FIFO Init
EnRXFIFOReadyInt(iobase, OFF);
@@ -510,7 +501,7 @@ static void via_hw_init(struct via_ircc_cb *self)
*/
static int via_ircc_read_dongle_id(int iobase)
{
- IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
+ net_err_ratelimited("via-ircc: dongle probing not supported, please specify dongle_id module parameter\n");
return 9; /* Default to IBM */
}
@@ -527,8 +518,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
/* speed is unused, as we use IsSIROn()/IsMIROn() */
speed = speed;
- IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
- __func__, speed, iobase, dongle_id);
+ pr_debug("%s(): change_dongle_speed to %d for 0x%x, %d\n",
+ __func__, speed, iobase, dongle_id);
switch (dongle_id) {
@@ -617,7 +608,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
case 0x11: /* Temic TFDS4500 */
- IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
+ pr_debug("%s: Temic TFDS4500: One RX pin, TX normal, RX inverted\n",
+ __func__);
UseOneRX(iobase, ON); //use ONE RX....RX1
InvertTX(iobase, OFF);
@@ -635,7 +627,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
SlowIRRXLowActive(iobase, OFF);
} else{
- IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
+ pr_debug("%s: Warning: TFDS4500 not running in SIR mode !\n",
+ __func__);
}
break;
@@ -652,8 +645,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
break;
default:
- IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
- __func__, dongle_id);
+ net_err_ratelimited("%s: Error: dongle_id %d unsupported !\n",
+ __func__, dongle_id);
}
}
@@ -672,7 +665,7 @@ static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
iobase = self->io.fir_base;
/* Update accounting for new speed */
self->io.speed = speed;
- IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
+ pr_debug("%s: change_speed to %d bps.\n", __func__, speed);
WriteReg(iobase, I_ST_CT_0, 0x0);
@@ -902,10 +895,10 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
self->tx_buff.head) + self->tx_buff_dma,
self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
- IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
- __func__, self->tx_fifo.ptr,
- self->tx_fifo.queue[self->tx_fifo.ptr].len,
- self->tx_fifo.len);
+ pr_debug("%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
+ __func__, self->tx_fifo.ptr,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ self->tx_fifo.len);
SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
RXStart(iobase, OFF);
@@ -926,8 +919,6 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
int iobase;
u8 Tx_status;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
iobase = self->io.fir_base;
/* Disable DMA */
// DisableDmaChannel(self->io.dma);
@@ -957,10 +948,9 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
self->tx_fifo.ptr++;
}
}
- IRDA_DEBUG(1,
- "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
- __func__,
- self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
+ pr_debug("%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
+ __func__,
+ self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
/* F01_S
// Any frames to be sent back-to-back?
if (self->tx_fifo.len) {
@@ -995,8 +985,6 @@ static int via_ircc_dma_receive(struct via_ircc_cb *self)
iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
self->tx_fifo.tail = self->tx_buff.head;
self->RxDataReady = 0;
@@ -1078,15 +1066,15 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
if (len == 0)
return TRUE; //interrupt only, data maybe move by RxT
if (((len - 4) < 2) || ((len - 4) > 2048)) {
- IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
- __func__, len, RxCurCount(iobase, self),
- self->RxLastCount);
+ pr_debug("%s(): Trouble:len=%x,CurCount=%x,LastCount=%x\n",
+ __func__, len, RxCurCount(iobase, self),
+ self->RxLastCount);
hwreset(self);
return FALSE;
}
- IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
- __func__,
- st_fifo->len, len - 4, RxCurCount(iobase, self));
+ pr_debug("%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
+ __func__,
+ st_fifo->len, len - 4, RxCurCount(iobase, self));
st_fifo->entries[st_fifo->tail].status = status;
st_fifo->entries[st_fifo->tail].len = len;
@@ -1133,8 +1121,8 @@ F01_E */
skb_put(skb, len - 4);
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
- IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
- len - 4, self->rx_buff.data);
+ pr_debug("%s(): len=%x.rx_buff=%p\n", __func__,
+ len - 4, self->rx_buff.data);
// Move to next frame
self->rx_buff.data += len;
@@ -1163,7 +1151,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
len = GetRecvByte(iobase, self);
- IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
+ pr_debug("%s(): len=%x\n", __func__, len);
if ((len - 4) < 2) {
self->netdev->stats.rx_dropped++;
@@ -1248,8 +1236,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
skb_put(skb, len - 4);
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
- IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
- len - 4, st_fifo->head);
+ pr_debug("%s(): len=%x.head=%x\n", __func__,
+ len - 4, st_fifo->head);
// Move to next frame
self->rx_buff.data += len;
@@ -1262,10 +1250,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
} //while
self->RetryCount = 0;
- IRDA_DEBUG(2,
- "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
- __func__,
- GetHostStatus(iobase), GetRXStatus(iobase));
+ pr_debug("%s(): End of upload HostStatus=%x,RxStatus=%x\n",
+ __func__, GetHostStatus(iobase), GetRXStatus(iobase));
/*
* if frame is receive complete at this routine ,then upload
@@ -1303,12 +1289,12 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
spin_lock(&self->lock);
iHostIntType = GetHostStatus(iobase);
- IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
- __func__, iHostIntType,
- (iHostIntType & 0x40) ? "Timer" : "",
- (iHostIntType & 0x20) ? "Tx" : "",
- (iHostIntType & 0x10) ? "Rx" : "",
- (iHostIntType & 0x0e) >> 1);
+ pr_debug("%s(): iHostIntType %02x: %s %s %s %02x\n",
+ __func__, iHostIntType,
+ (iHostIntType & 0x40) ? "Timer" : "",
+ (iHostIntType & 0x20) ? "Tx" : "",
+ (iHostIntType & 0x10) ? "Rx" : "",
+ (iHostIntType & 0x0e) >> 1);
if ((iHostIntType & 0x40) != 0) { //Timer Event
self->EventFlag.TimeOut++;
@@ -1333,12 +1319,12 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
if ((iHostIntType & 0x20) != 0) { //Tx Event
iTxIntType = GetTXStatus(iobase);
- IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
- __func__, iTxIntType,
- (iTxIntType & 0x08) ? "FIFO underr." : "",
- (iTxIntType & 0x04) ? "EOM" : "",
- (iTxIntType & 0x02) ? "FIFO ready" : "",
- (iTxIntType & 0x01) ? "Early EOM" : "");
+ pr_debug("%s(): iTxIntType %02x: %s %s %s %s\n",
+ __func__, iTxIntType,
+ (iTxIntType & 0x08) ? "FIFO underr." : "",
+ (iTxIntType & 0x04) ? "EOM" : "",
+ (iTxIntType & 0x02) ? "FIFO ready" : "",
+ (iTxIntType & 0x01) ? "Early EOM" : "");
if (iTxIntType & 0x4) {
self->EventFlag.EOMessage++; // read and will auto clean
@@ -1357,17 +1343,17 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
/* Check if DMA has finished */
iRxIntType = GetRXStatus(iobase);
- IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
- __func__, iRxIntType,
- (iRxIntType & 0x80) ? "PHY err." : "",
- (iRxIntType & 0x40) ? "CRC err" : "",
- (iRxIntType & 0x20) ? "FIFO overr." : "",
- (iRxIntType & 0x10) ? "EOF" : "",
- (iRxIntType & 0x08) ? "RxData" : "",
- (iRxIntType & 0x02) ? "RxMaxLen" : "",
- (iRxIntType & 0x01) ? "SIR bad" : "");
+ pr_debug("%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
+ __func__, iRxIntType,
+ (iRxIntType & 0x80) ? "PHY err." : "",
+ (iRxIntType & 0x40) ? "CRC err" : "",
+ (iRxIntType & 0x20) ? "FIFO overr." : "",
+ (iRxIntType & 0x10) ? "EOF" : "",
+ (iRxIntType & 0x08) ? "RxData" : "",
+ (iRxIntType & 0x02) ? "RxMaxLen" : "",
+ (iRxIntType & 0x01) ? "SIR bad" : "");
if (!iRxIntType)
- IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
+ pr_debug("%s(): RxIRQ =0\n", __func__);
if (iRxIntType & 0x10) {
if (via_ircc_dma_receive_complete(self, iobase)) {
@@ -1376,10 +1362,9 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
}
} // No ERR
else { //ERR
- IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
- __func__, iRxIntType, iHostIntType,
- RxCurCount(iobase, self),
- self->RxLastCount);
+ pr_debug("%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
+ __func__, iRxIntType, iHostIntType,
+ RxCurCount(iobase, self), self->RxLastCount);
if (iRxIntType & 0x20) { //FIFO OverRun ERR
ResetChip(iobase, 0);
@@ -1402,8 +1387,6 @@ static void hwreset(struct via_ircc_cb *self)
int iobase;
iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
ResetChip(iobase, 5);
EnableDMA(iobase, OFF);
EnableTX(iobase, OFF);
@@ -1447,7 +1430,7 @@ static int via_ircc_is_receiving(struct via_ircc_cb *self)
if (CkRxRecv(iobase, self))
status = TRUE;
- IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
+ pr_debug("%s(): status=%x....\n", __func__, status);
return status;
}
@@ -1465,16 +1448,14 @@ static int via_ircc_net_open(struct net_device *dev)
int iobase;
char hwname[32];
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
dev->stats.rx_packets = 0;
IRDA_ASSERT(self != NULL, return 0;);
iobase = self->io.fir_base;
if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
- IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
- self->io.irq);
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
return -EAGAIN;
}
/*
@@ -1482,15 +1463,15 @@ static int via_ircc_net_open(struct net_device *dev)
* failure.
*/
if (request_dma(self->io.dma, dev->name)) {
- IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
- self->io.dma);
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
free_irq(self->io.irq, dev);
return -EAGAIN;
}
if (self->io.dma2 != self->io.dma) {
if (request_dma(self->io.dma2, dev->name)) {
- IRDA_WARNING("%s, unable to allocate dma2=%d\n",
- driver_name, self->io.dma2);
+ net_warn_ratelimited("%s, unable to allocate dma2=%d\n",
+ driver_name, self->io.dma2);
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
return -EAGAIN;
@@ -1532,8 +1513,6 @@ static int via_ircc_net_close(struct net_device *dev)
struct via_ircc_cb *self;
int iobase;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
@@ -1576,8 +1555,8 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
- cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
+ cmd);
/* Disable interrupts & save flags */
spin_lock_irqsave(&self->lock, flags);
switch (cmd) {
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index a2e556168286..ac39d9f33d5f 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -429,8 +429,8 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
if (rd->buf == NULL ||
!(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
if (rd->buf) {
- IRDA_ERROR("%s: failed to create PCI-MAP for %p",
- __func__, rd->buf);
+ net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
+ __func__, rd->buf);
kfree(rd->buf);
rd->buf = NULL;
}
@@ -483,11 +483,8 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE,
&idev->busaddr);
- if (!ringarea) {
- IRDA_ERROR("%s: insufficient memory for descriptor rings\n",
- __func__);
+ if (!ringarea)
goto out;
- }
hwmap = (struct ring_descr_hw *)ringarea;
idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
@@ -559,7 +556,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
len -= crclen; /* remove trailing CRC */
if (len <= 0) {
- IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len);
+ pr_debug("%s: strange frame (len=%d)\n", __func__, len);
ret |= VLSI_RX_DROP;
goto done;
}
@@ -574,14 +571,14 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
*/
le16_to_cpus(rd->buf+len);
if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
- IRDA_DEBUG(0, "%s: crc error\n", __func__);
+ pr_debug("%s: crc error\n", __func__);
ret |= VLSI_RX_CRC;
goto done;
}
}
if (!rd->skb) {
- IRDA_WARNING("%s: rx packet lost\n", __func__);
+ net_warn_ratelimited("%s: rx packet lost\n", __func__);
ret |= VLSI_RX_DROP;
goto done;
}
@@ -610,8 +607,8 @@ static void vlsi_fill_rx(struct vlsi_ring *r)
for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
if (rd_is_active(rd)) {
- IRDA_WARNING("%s: driver bug: rx descr race with hw\n",
- __func__);
+ net_warn_ratelimited("%s: driver bug: rx descr race with hw\n",
+ __func__);
vlsi_ring_debug(r);
break;
}
@@ -670,7 +667,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
if (ring_first(r) == NULL) {
/* we are in big trouble, if this should ever happen */
- IRDA_ERROR("%s: rx ring exhausted!\n", __func__);
+ net_err_ratelimited("%s: rx ring exhausted!\n", __func__);
vlsi_ring_debug(r);
}
else
@@ -692,7 +689,7 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
if (rd_is_active(rd)) {
rd_set_status(rd, 0);
if (rd_get_count(rd)) {
- IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__);
+ pr_debug("%s - dropping rx packet\n", __func__);
ret = -VLSI_RX_DROP;
}
rd_set_count(rd, 0);
@@ -767,7 +764,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
int fifocnt;
baudrate = idev->new_baud;
- IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
+ pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
if (baudrate == 4000000) {
mode = IFF_FIR;
config = IRCFG_FIR;
@@ -783,8 +780,8 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
switch(baudrate) {
default:
- IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n",
- __func__, baudrate);
+ net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n",
+ __func__, baudrate);
baudrate = 9600;
/* fallthru */
case 2400:
@@ -801,7 +798,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
- IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
+ pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt);
}
outw(0, iobase+VLSI_PIO_IRENABLE);
@@ -825,14 +822,16 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
config ^= IRENABLE_SIR_ON;
if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
- IRDA_WARNING("%s: failed to set %s mode!\n", __func__,
- (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
+ net_warn_ratelimited("%s: failed to set %s mode!\n",
+ __func__,
+ mode == IFF_SIR ? "SIR" :
+ mode == IFF_MIR ? "MIR" : "FIR");
ret = -1;
}
else {
if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
- IRDA_WARNING("%s: failed to apply baudrate %d\n",
- __func__, baudrate);
+ net_warn_ratelimited("%s: failed to apply baudrate %d\n",
+ __func__, baudrate);
ret = -1;
}
else {
@@ -977,8 +976,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
*/
if (len >= r->len-5)
- IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n",
- __func__);
+ net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n",
+ __func__);
}
else {
/* hw deals with MIR/FIR mode wrapping */
@@ -1023,7 +1022,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
- IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
+ pr_debug("%s: rx fifo not empty(%d)\n",
+ __func__, fifocnt);
}
config = inw(iobase+VLSI_PIO_IRCFG);
@@ -1035,7 +1035,7 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
if (ring_put(r) == NULL) {
netif_stop_queue(ndev);
- IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__);
+ pr_debug("%s: tx ring full - queue stopped\n", __func__);
}
spin_unlock_irqrestore(&idev->lock, flags);
@@ -1044,7 +1044,7 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
drop_unlock:
spin_unlock_irqrestore(&idev->lock, flags);
drop:
- IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
+ net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg);
dev_kfree_skb_any(skb);
ndev->stats.tx_errors++;
ndev->stats.tx_dropped++;
@@ -1100,8 +1100,8 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
- IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n",
- __func__, fifocnt);
+ pr_debug("%s: rx fifo not empty(%d)\n",
+ __func__, fifocnt);
}
outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
}
@@ -1110,7 +1110,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
if (netif_queue_stopped(ndev) && !idev->new_baud) {
netif_wake_queue(ndev);
- IRDA_DEBUG(3, "%s: queue awoken\n", __func__);
+ pr_debug("%s: queue awoken\n", __func__);
}
}
@@ -1134,7 +1134,7 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
dev_kfree_skb_any(rd->skb);
rd->skb = NULL;
}
- IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__);
+ pr_debug("%s - dropping tx packet\n", __func__);
ret = -VLSI_TX_DROP;
}
else
@@ -1183,8 +1183,8 @@ static int vlsi_start_clock(struct pci_dev *pdev)
}
if (count < 3) {
if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
- IRDA_ERROR("%s: no PLL or failed to lock!\n",
- __func__);
+ net_err_ratelimited("%s: no PLL or failed to lock!\n",
+ __func__);
clkctl = CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
return -1;
@@ -1192,8 +1192,8 @@ static int vlsi_start_clock(struct pci_dev *pdev)
else /* was: clksrc=0(auto) */
clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
- IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n",
- __func__, clksrc);
+ pr_debug("%s: PLL not locked, fallback to clksrc=%d\n",
+ __func__, clksrc);
}
else
clksrc = 1; /* got successful PLL lock */
@@ -1265,7 +1265,7 @@ static int vlsi_init_chip(struct pci_dev *pdev)
/* start the clock and clean the registers */
if (vlsi_start_clock(pdev)) {
- IRDA_ERROR("%s: no valid clock source\n", __func__);
+ net_err_ratelimited("%s: no valid clock source\n", __func__);
return -1;
}
iobase = ndev->base_addr;
@@ -1389,8 +1389,8 @@ static void vlsi_tx_timeout(struct net_device *ndev)
idev->new_baud = idev->baud; /* keep current baudrate */
if (vlsi_start_hw(idev))
- IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",
- __func__, pci_name(idev->pdev), ndev->name);
+ net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n",
+ __func__, pci_name(idev->pdev), ndev->name);
else
netif_start_queue(ndev);
}
@@ -1434,8 +1434,8 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
break;
default:
- IRDA_WARNING("%s: notsupp - cmd=%04x\n",
- __func__, cmd);
+ net_warn_ratelimited("%s: notsupp - cmd=%04x\n",
+ __func__, cmd);
ret = -EOPNOTSUPP;
}
@@ -1479,8 +1479,8 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
spin_unlock_irqrestore(&idev->lock,flags);
if (boguscount <= 0)
- IRDA_MESSAGE("%s: too much work in interrupt!\n",
- __func__);
+ net_info_ratelimited("%s: too much work in interrupt!\n",
+ __func__);
return IRQ_RETVAL(handled);
}
@@ -1493,7 +1493,7 @@ static int vlsi_open(struct net_device *ndev)
char hwname[32];
if (pci_request_regions(idev->pdev, drivername)) {
- IRDA_WARNING("%s: io resource busy\n", __func__);
+ net_warn_ratelimited("%s: io resource busy\n", __func__);
goto errout;
}
ndev->base_addr = pci_resource_start(idev->pdev,0);
@@ -1507,8 +1507,8 @@ static int vlsi_open(struct net_device *ndev)
if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
drivername, ndev)) {
- IRDA_WARNING("%s: couldn't get IRQ: %d\n",
- __func__, ndev->irq);
+ net_warn_ratelimited("%s: couldn't get IRQ: %d\n",
+ __func__, ndev->irq);
goto errout_io;
}
@@ -1529,7 +1529,8 @@ static int vlsi_open(struct net_device *ndev)
netif_start_queue(ndev);
- IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name);
+ net_info_ratelimited("%s: device %s operational\n",
+ __func__, ndev->name);
return 0;
@@ -1563,7 +1564,7 @@ static int vlsi_close(struct net_device *ndev)
pci_release_regions(idev->pdev);
- IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name);
+ net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name);
return 0;
}
@@ -1590,7 +1591,8 @@ static int vlsi_irda_init(struct net_device *ndev)
if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) ||
pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
- IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__);
+ net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n",
+ __func__);
return -1;
}
@@ -1632,19 +1634,19 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
pdev->current_state = 0; /* hw must be running now */
- IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n",
- drivername, pci_name(pdev));
+ net_info_ratelimited("%s: IrDA PCI controller %s detected\n",
+ drivername, pci_name(pdev));
if ( !pci_resource_start(pdev,0) ||
!(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
- IRDA_ERROR("%s: bar 0 invalid", __func__);
+ net_err_ratelimited("%s: bar 0 invalid", __func__);
goto out_disable;
}
ndev = alloc_irdadev(sizeof(*idev));
if (ndev==NULL) {
- IRDA_ERROR("%s: Unable to allocate device memory.\n",
- __func__);
+ net_err_ratelimited("%s: Unable to allocate device memory.\n",
+ __func__);
goto out_disable;
}
@@ -1659,7 +1661,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_freedev;
if (register_netdev(ndev) < 0) {
- IRDA_ERROR("%s: register_netdev failed\n", __func__);
+ net_err_ratelimited("%s: register_netdev failed\n", __func__);
goto out_freedev;
}
@@ -1669,14 +1671,15 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
vlsi_proc_root, VLSI_PROC_FOPS, ndev);
if (!ent) {
- IRDA_WARNING("%s: failed to create proc entry\n",
- __func__);
+ net_warn_ratelimited("%s: failed to create proc entry\n",
+ __func__);
} else {
proc_set_size(ent, 0);
}
idev->proc_entry = ent;
}
- IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name);
+ net_info_ratelimited("%s: registered device %s\n",
+ drivername, ndev->name);
pci_set_drvdata(pdev, ndev);
mutex_unlock(&idev->mtx);
@@ -1698,7 +1701,7 @@ static void vlsi_irda_remove(struct pci_dev *pdev)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s: lost netdevice?\n", drivername);
+ net_err_ratelimited("%s: lost netdevice?\n", drivername);
return;
}
@@ -1714,7 +1717,7 @@ static void vlsi_irda_remove(struct pci_dev *pdev)
free_netdev(ndev);
- IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev));
+ net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev));
}
#ifdef CONFIG_PM
@@ -1733,8 +1736,8 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice\n",
- __func__, pci_name(pdev));
+ net_err_ratelimited("%s - %s: no netdevice\n",
+ __func__, pci_name(pdev));
return 0;
}
idev = netdev_priv(ndev);
@@ -1745,7 +1748,9 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
pdev->current_state = state.event;
}
else
- IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event);
+ net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n",
+ __func__, pci_name(pdev),
+ pdev->current_state, state.event);
mutex_unlock(&idev->mtx);
return 0;
}
@@ -1772,16 +1777,16 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice\n",
- __func__, pci_name(pdev));
+ net_err_ratelimited("%s - %s: no netdevice\n",
+ __func__, pci_name(pdev));
return 0;
}
idev = netdev_priv(ndev);
mutex_lock(&idev->mtx);
if (pdev->current_state == 0) {
mutex_unlock(&idev->mtx);
- IRDA_WARNING("%s - %s: already resumed\n",
- __func__, pci_name(pdev));
+ net_warn_ratelimited("%s - %s: already resumed\n",
+ __func__, pci_name(pdev));
return 0;
}
@@ -1800,7 +1805,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
* now we explicitly set pdev->current_state = 0 after enabling the
* device and independently resume_ok should catch any garbage config.
*/
- IRDA_WARNING("%s - hm, nothing to resume?\n", __func__);
+ net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__);
mutex_unlock(&idev->mtx);
return 0;
}
@@ -1837,7 +1842,8 @@ static int __init vlsi_mod_init(void)
int i, ret;
if (clksrc < 0 || clksrc > 3) {
- IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc);
+ net_err_ratelimited("%s: invalid clksrc=%d\n",
+ drivername, clksrc);
return -1;
}
@@ -1850,7 +1856,10 @@ static int __init vlsi_mod_init(void)
case 64:
break;
default:
- IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]);
+ net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n",
+ drivername,
+ i ? "rx" : "tx",
+ ringsize[i]);
ringsize[i] = 8;
break;
}
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 56399204e68c..f9119c6d2a09 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -615,7 +615,8 @@ static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
*/
if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
- IRDA_ERROR("%s: pci busaddr inconsistency!\n", __func__);
+ net_err_ratelimited("%s: pci busaddr inconsistency!\n",
+ __func__);
dump_stack();
return;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 11dbdf36d9c1..4e3d2e7c697c 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -110,8 +110,6 @@ static int __init w83977af_init(void)
{
int i;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
@@ -129,8 +127,6 @@ static void __exit w83977af_cleanup(void)
{
int i;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
w83977af_close(dev_self[i]);
@@ -157,12 +153,10 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
struct w83977af_ir *self;
int err;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
/* Lock the port that we need */
if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
- IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
- __func__ , iobase);
+ pr_debug("%s(), can't get iobase of 0x%03x\n",
+ __func__ , iobase);
return -ENODEV;
}
@@ -236,10 +230,11 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
+ net_err_ratelimited("%s(), register_netdevice() failed!\n",
+ __func__);
goto err_out3;
}
- IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
/* Need to store self somewhere */
dev_self[i] = self;
@@ -268,8 +263,6 @@ static int w83977af_close(struct w83977af_ir *self)
{
int iobase;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
iobase = self->io.fir_base;
#ifdef CONFIG_USE_W977_PNP
@@ -288,8 +281,8 @@ static int w83977af_close(struct w83977af_ir *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
- __func__ , self->io.fir_base);
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__ , self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -311,7 +304,6 @@ static int w83977af_probe(int iobase, int irq, int dma)
int i;
for (i=0; i < 2; i++) {
- IRDA_DEBUG( 0, "%s()\n", __func__ );
#ifdef CONFIG_USE_W977_PNP
/* Enter PnP configuration mode */
w977_efm_enter(efbase[i]);
@@ -392,13 +384,13 @@ static int w83977af_probe(int iobase, int irq, int dma)
switch_bank(iobase, SET7);
outb(0x40, iobase+7);
- IRDA_MESSAGE("W83977AF (IR) driver loaded. "
- "Version: 0x%02x\n", version);
+ net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
+ version);
return 0;
} else {
/* Try next extented function register address */
- IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
+ pr_debug("%s(), Wrong chip version", __func__);
}
}
return -1;
@@ -434,19 +426,19 @@ static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
case 115200: outb(0x01, iobase+ABLL); break;
case 576000:
ir_mode = HCR_MIR_576;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
+ pr_debug("%s(), handling baud of 576000\n", __func__);
break;
case 1152000:
ir_mode = HCR_MIR_1152;
- IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
+ pr_debug("%s(), handling baud of 1152000\n", __func__);
break;
case 4000000:
ir_mode = HCR_FIR;
- IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
+ pr_debug("%s(), handling baud of 4000000\n", __func__);
break;
default:
ir_mode = HCR_FIR;
- IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
+ pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed);
break;
}
@@ -497,8 +489,8 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
iobase = self->io.fir_base;
- IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
- (int) skb->len);
+ pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies,
+ (int)skb->len);
/* Lock transmit buffer */
netif_stop_queue(dev);
@@ -525,7 +517,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
self->tx_buff.len = skb->len;
mtt = irda_get_mtt(skb);
- IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
+ pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
if (mtt)
udelay(mtt);
@@ -559,7 +551,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
- IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
+ pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len);
/* Save current set */
set = inb(iobase+SSR);
@@ -594,19 +586,16 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
int actual = 0;
__u8 set;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
/* Save current bank */
set = inb(iobase+SSR);
switch_bank(iobase, SET0);
if (!(inb_p(iobase+USR) & USR_TSRE)) {
- IRDA_DEBUG(4,
- "%s(), warning, FIFO not empty yet!\n", __func__ );
+ pr_debug("%s(), warning, FIFO not empty yet!\n", __func__);
fifo_size -= 17;
- IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
- __func__ , fifo_size);
+ pr_debug("%s(), %d bytes left in tx fifo\n",
+ __func__ , fifo_size);
}
/* Fill FIFO with current frame */
@@ -615,8 +604,8 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
outb(buf[actual++], iobase+TBR);
}
- IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
- __func__ , fifo_size, actual, len);
+ pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
+ __func__ , fifo_size, actual, len);
/* Restore bank */
outb(set, iobase+SSR);
@@ -636,7 +625,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
int iobase;
__u8 set;
- IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
+ pr_debug("%s(%ld)\n", __func__ , jiffies);
IRDA_ASSERT(self != NULL, return;);
@@ -651,7 +640,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
/* Check for underrun! */
if (inb(iobase+AUDR) & AUDR_UNDR) {
- IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
+ pr_debug("%s(), Transmit underrun!\n", __func__);
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
@@ -692,7 +681,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
#endif
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(4, "%s\n", __func__ );
+ pr_debug("%s\n", __func__);
iobase= self->io.fir_base;
@@ -763,7 +752,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
__u8 set;
__u8 status;
- IRDA_DEBUG(4, "%s\n", __func__ );
+ pr_debug("%s\n", __func__);
st_fifo = &self->st_fifo;
@@ -880,8 +869,6 @@ static void w83977af_pio_receive(struct w83977af_ir *self)
__u8 byte = 0x00;
int iobase;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.fir_base;
@@ -907,7 +894,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
__u8 set;
int iobase;
- IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
+ pr_debug("%s(), isr=%#x\n", __func__ , isr);
iobase = self->io.fir_base;
/* Transmit FIFO low on data */
@@ -943,8 +930,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
if (isr & ISR_TXEMP_I) {
/* Check if we need to change the speed? */
if (self->new_speed) {
- IRDA_DEBUG(2,
- "%s(), Changing speed!\n", __func__ );
+ pr_debug("%s(), Changing speed!\n", __func__);
w83977af_change_speed(self, self->new_speed);
self->new_speed = 0;
}
@@ -1126,7 +1112,6 @@ static int w83977af_net_open(struct net_device *dev)
char hwname[32];
__u8 set;
- IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -1189,8 +1174,6 @@ static int w83977af_net_close(struct net_device *dev)
int iobase;
__u8 set;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -1244,7 +1227,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
spin_lock_irqsave(&self->lock, flags);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index bfb0b6ec8c56..612e0731142d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -742,11 +742,12 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
#define ALWAYS_ON_FEATURES \
- (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
+ (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \
+ NETIF_F_GSO_ROBUST)
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
- NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
@@ -783,6 +784,7 @@ static int macvlan_init(struct net_device *dev)
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
dev->features |= ALWAYS_ON_FEATURES;
+ dev->hw_features |= NETIF_F_LRO;
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
@@ -872,7 +874,7 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr,
+ const unsigned char *addr, u16 vid,
u16 flags)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -897,7 +899,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr)
+ const unsigned char *addr, u16 vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
int err = -EINVAL;
@@ -935,15 +937,15 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ netdev_features_t lowerdev_features = vlan->lowerdev->features;
netdev_features_t mask;
features |= NETIF_F_ALL_FOR_ALL;
features &= (vlan->set_features | ~MACVLAN_FEATURES);
mask = features;
- features = netdev_increment_features(vlan->lowerdev->features,
- features,
- mask);
+ lowerdev_features &= (features | ~NETIF_F_LRO);
+ features = netdev_increment_features(lowerdev_features, features, mask);
features |= ALWAYS_ON_FEATURES;
features &= ~NETIF_F_NETNS_LOCAL;
@@ -1055,6 +1057,9 @@ static int macvlan_port_create(struct net_device *dev)
if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
return -EINVAL;
+ if (netif_is_ipvlan_port(dev))
+ return -EBUSY;
+
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (port == NULL)
return -ENOMEM;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 880cc090dc44..7df221788cd4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -15,6 +15,7 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/fs.h>
+#include <linux/uio.h>
#include <net/ipv6.h>
#include <net/net_namespace.h>
@@ -45,6 +46,20 @@ struct macvtap_queue {
struct list_head next;
};
+#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
+
+#define MACVTAP_VNET_LE 0x80000000
+
+static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
+{
+ return __virtio16_to_cpu(q->flags & MACVTAP_VNET_LE, val);
+}
+
+static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
+{
+ return __cpu_to_virtio16(q->flags & MACVTAP_VNET_LE, val);
+}
+
static struct proto macvtap_proto = {
.name = "macvtap",
.owner = THIS_MODULE,
@@ -557,7 +572,8 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
* macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
* be shared with the tun/tap driver.
*/
-static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
+ struct sk_buff *skb,
struct virtio_net_hdr *vnet_hdr)
{
unsigned short gso_type = 0;
@@ -588,13 +604,13 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
}
if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
- vnet_hdr->csum_offset))
+ if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start),
+ macvtap16_to_cpu(q, vnet_hdr->csum_offset)))
return -EINVAL;
}
if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
+ skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size);
skb_shinfo(skb)->gso_type = gso_type;
/* Header must be checked, and gso_segs computed. */
@@ -604,8 +620,9 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
return 0;
}
-static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
- struct virtio_net_hdr *vnet_hdr)
+static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
+ const struct sk_buff *skb,
+ struct virtio_net_hdr *vnet_hdr)
{
memset(vnet_hdr, 0, sizeof(*vnet_hdr));
@@ -613,8 +630,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
struct skb_shared_info *sinfo = skb_shinfo(skb);
/* This is a hint as to how much should be linear. */
- vnet_hdr->hdr_len = skb_headlen(skb);
- vnet_hdr->gso_size = sinfo->gso_size;
+ vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb));
+ vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size);
if (sinfo->gso_type & SKB_GSO_TCPV4)
vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
@@ -628,10 +645,13 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- vnet_hdr->csum_start = skb_checksum_start_offset(skb);
if (vlan_tx_tag_present(skb))
- vnet_hdr->csum_start += VLAN_HLEN;
- vnet_hdr->csum_offset = skb->csum_offset;
+ vnet_hdr->csum_start = cpu_to_macvtap16(q,
+ skb_checksum_start_offset(skb) + VLAN_HLEN);
+ else
+ vnet_hdr->csum_start = cpu_to_macvtap16(q,
+ skb_checksum_start_offset(skb));
+ vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset);
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
@@ -639,12 +659,12 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
- const struct iovec *iv, unsigned long total_len,
- size_t count, int noblock)
+ struct iov_iter *from, int noblock)
{
int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
struct sk_buff *skb;
struct macvlan_dev *vlan;
+ unsigned long total_len = iov_iter_count(from);
unsigned long len = total_len;
int err;
struct virtio_net_hdr vnet_hdr = { 0 };
@@ -652,6 +672,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
int copylen = 0;
bool zerocopy = false;
size_t linear;
+ ssize_t n;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = q->vnet_hdr_sz;
@@ -661,17 +682,20 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
goto err;
len -= vnet_hdr_len;
- err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
- sizeof(vnet_hdr));
- if (err < 0)
+ err = -EFAULT;
+ n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
+ if (n != sizeof(vnet_hdr))
goto err;
+ iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
- vnet_hdr.hdr_len)
- vnet_hdr.hdr_len = vnet_hdr.csum_start +
- vnet_hdr.csum_offset + 2;
+ macvtap16_to_cpu(q, vnet_hdr.csum_start) +
+ macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
+ macvtap16_to_cpu(q, vnet_hdr.hdr_len))
+ vnet_hdr.hdr_len = cpu_to_macvtap16(q,
+ macvtap16_to_cpu(q, vnet_hdr.csum_start) +
+ macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
err = -EINVAL;
- if (vnet_hdr.hdr_len > len)
+ if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len)
goto err;
}
@@ -679,26 +703,26 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (unlikely(len < ETH_HLEN))
goto err;
- err = -EMSGSIZE;
- if (unlikely(count > UIO_MAXIOV))
- goto err;
-
if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
- copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
+ struct iov_iter i;
+
+ copylen = vnet_hdr.hdr_len ?
+ macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
- if (iov_pages(iv, vnet_hdr_len + copylen, count)
- <= MAX_SKB_FRAGS)
+ i = *from;
+ iov_iter_advance(&i, copylen);
+ if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
zerocopy = true;
}
if (!zerocopy) {
copylen = len;
- if (vnet_hdr.hdr_len > good_linear)
+ if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
linear = good_linear;
else
- linear = vnet_hdr.hdr_len;
+ linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
}
skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
@@ -707,10 +731,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
goto err;
if (zerocopy)
- err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+ err = zerocopy_sg_from_iter(skb, from);
else {
- err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
- len);
+ err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (!err && m && m->msg_control) {
struct ubuf_info *uarg = m->msg_control;
uarg->callback(uarg, false);
@@ -725,7 +748,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb->protocol = eth_hdr(skb)->h_proto;
if (vnet_hdr_len) {
- err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
+ err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr);
if (err)
goto err_kfree;
}
@@ -763,46 +786,42 @@ err:
return err;
}
-static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- ssize_t result = -ENOLINK;
struct macvtap_queue *q = file->private_data;
- result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
- file->f_flags & O_NONBLOCK);
- return result;
+ return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
}
/* Put packet to the user space buffer */
static ssize_t macvtap_put_user(struct macvtap_queue *q,
const struct sk_buff *skb,
- const struct iovec *iv, int len)
+ struct iov_iter *iter)
{
int ret;
int vnet_hdr_len = 0;
int vlan_offset = 0;
- int copied, total;
+ int total;
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
vnet_hdr_len = q->vnet_hdr_sz;
- if ((len -= vnet_hdr_len) < 0)
+ if (iov_iter_count(iter) < vnet_hdr_len)
return -EINVAL;
- macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
+ macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr);
- if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
+ if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
+ sizeof(vnet_hdr))
return -EFAULT;
+
+ iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
}
- total = copied = vnet_hdr_len;
+ total = vnet_hdr_len;
total += skb->len;
- if (!vlan_tx_tag_present(skb))
- len = min_t(int, skb->len, len);
- else {
- int copy;
+ if (vlan_tx_tag_present(skb)) {
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
@@ -811,86 +830,77 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
- len = min_t(int, skb->len + VLAN_HLEN, len);
total += VLAN_HLEN;
- copy = min_t(int, vlan_offset, len);
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
- len -= copy;
- copied += copy;
- if (ret || !len)
+ ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
+ if (ret || !iov_iter_count(iter))
goto done;
- copy = min_t(int, sizeof(veth), len);
- ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
- len -= copy;
- copied += copy;
- if (ret || !len)
+ ret = copy_to_iter(&veth, sizeof(veth), iter);
+ if (ret != sizeof(veth) || !iov_iter_count(iter))
goto done;
}
- ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+ ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
+ skb->len - vlan_offset);
done:
return ret ? ret : total;
}
static ssize_t macvtap_do_read(struct macvtap_queue *q,
- const struct iovec *iv, unsigned long len,
+ struct iov_iter *to,
int noblock)
{
DEFINE_WAIT(wait);
struct sk_buff *skb;
ssize_t ret = 0;
- while (len) {
+ if (!iov_iter_count(to))
+ return 0;
+
+ while (1) {
if (!noblock)
prepare_to_wait(sk_sleep(&q->sk), &wait,
TASK_INTERRUPTIBLE);
/* Read frames from the queue */
skb = skb_dequeue(&q->sk.sk_receive_queue);
- if (!skb) {
- if (noblock) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- /* Nothing to read, let's sleep */
- schedule();
- continue;
+ if (skb)
+ break;
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
}
- ret = macvtap_put_user(q, skb, iv, len);
- kfree_skb(skb);
- break;
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* Nothing to read, let's sleep */
+ schedule();
+ }
+ if (skb) {
+ ret = macvtap_put_user(q, skb, to);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
}
-
if (!noblock)
finish_wait(sk_sleep(&q->sk), &wait);
return ret;
}
-static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct macvtap_queue *q = file->private_data;
- ssize_t len, ret = 0;
+ ssize_t len = iov_iter_count(to), ret;
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
+ ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
-out:
return ret;
}
@@ -991,7 +1001,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)arg;
struct ifreq __user *ifr = argp;
unsigned int __user *up = argp;
- unsigned int u;
+ unsigned short u;
int __user *sp = argp;
int s;
int ret;
@@ -1003,11 +1013,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
ret = 0;
- if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
- (IFF_NO_PI | IFF_TAP))
+ if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
ret = -EINVAL;
else
- q->flags = u;
+ q->flags = (q->flags & ~MACVTAP_FEATURES) | u;
return ret;
@@ -1020,8 +1029,9 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
}
ret = 0;
+ u = q->flags;
if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
- put_user(q->flags, &ifr->ifr_flags))
+ put_user(u, &ifr->ifr_flags))
ret = -EFAULT;
macvtap_put_vlan(vlan);
rtnl_unlock();
@@ -1036,8 +1046,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return ret;
case TUNGETFEATURES:
- if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
- IFF_MULTI_QUEUE, up))
+ if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up))
return -EFAULT;
return 0;
@@ -1063,6 +1072,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
q->vnet_hdr_sz = s;
return 0;
+ case TUNGETVNETLE:
+ s = !!(q->flags & MACVTAP_VNET_LE);
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETLE:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s)
+ q->flags |= MACVTAP_VNET_LE;
+ else
+ q->flags &= ~MACVTAP_VNET_LE;
+ return 0;
+
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
@@ -1091,8 +1115,10 @@ static const struct file_operations macvtap_fops = {
.owner = THIS_MODULE,
.open = macvtap_open,
.release = macvtap_release,
- .aio_read = macvtap_aio_read,
- .aio_write = macvtap_aio_write,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = macvtap_read_iter,
+ .write_iter = macvtap_write_iter,
.poll = macvtap_poll,
.llseek = no_llseek,
.unlocked_ioctl = macvtap_ioctl,
@@ -1105,8 +1131,7 @@ static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
- return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
- m->msg_flags & MSG_DONTWAIT);
+ return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
}
static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
@@ -1117,8 +1142,7 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = macvtap_do_read(q, m->msg_iov, total_len,
- flags & MSG_DONTWAIT);
+ ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 75472cf734de..a3c251b79f38 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -26,7 +26,7 @@ config AMD_PHY
config AMD_XGBE_PHY
tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
- depends on OF
+ depends on OF && HAS_IOMEM
---help---
Currently supports the AMD 10GbE PHY
@@ -119,8 +119,8 @@ config MICREL_PHY
Supports the KSZ9021, VSC8201, KS8001 PHYs.
config FIXED_PHY
- bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
- depends on PHYLIB=y
+ tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB
---help---
Adds the platform "fixed" MDIO Bus to cover the boards that use
PHYs that are not connected to the real MDIO bus.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index eb3b18b5978b..501ea7699a2d 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
-obj-$(CONFIG_FIXED_PHY) += fixed.o
+obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index c456559f6e7f..903dc3dc9ea7 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
if (ret & MDIO_CTRL1_RESET)
return -ETIMEDOUT;
- return 0;
+ /* Make sure the XPCS and SerDes are in compatible states */
+ return amd_xgbe_phy_xgmii_mode(phydev);
}
static int amd_xgbe_phy_config_init(struct phy_device *phydev)
@@ -1467,20 +1468,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = {
},
};
-static int __init amd_xgbe_phy_init(void)
-{
- return phy_drivers_register(amd_xgbe_phy_driver,
- ARRAY_SIZE(amd_xgbe_phy_driver));
-}
-
-static void __exit amd_xgbe_phy_exit(void)
-{
- phy_drivers_unregister(amd_xgbe_phy_driver,
- ARRAY_SIZE(amd_xgbe_phy_driver));
-}
-
-module_init(amd_xgbe_phy_init);
-module_exit(amd_xgbe_phy_exit);
+module_phy_driver(amd_xgbe_phy_driver);
static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
{ XGBE_PHY_ID, XGBE_PHY_MASK },
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index a3fb5ceb6487..65a488f82eb8 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -61,7 +61,7 @@ static int am79c_config_intr(struct phy_device *phydev)
return err;
}
-static struct phy_driver am79c_driver = {
+static struct phy_driver am79c_driver[] = { {
.phy_id = PHY_ID_AM79C874,
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
@@ -73,20 +73,9 @@ static struct phy_driver am79c_driver = {
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static int __init am79c_init(void)
-{
- return phy_driver_register(&am79c_driver);
-}
-
-static void __exit am79c_exit(void)
-{
- phy_driver_unregister(&am79c_driver);
-}
+} };
-module_init(am79c_init);
-module_exit(am79c_exit);
+module_phy_driver(am79c_driver);
static struct mdio_device_id __maybe_unused amd_tbl[] = {
{ PHY_ID_AM79C874, 0xfffffff0 },
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index fdc1b418fa6a..f80e19ac6704 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -352,19 +352,7 @@ static struct phy_driver at803x_driver[] = {
},
} };
-static int __init atheros_init(void)
-{
- return phy_drivers_register(at803x_driver,
- ARRAY_SIZE(at803x_driver));
-}
-
-static void __exit atheros_exit(void)
-{
- phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
-}
-
-module_init(atheros_init);
-module_exit(atheros_exit);
+module_phy_driver(at803x_driver);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, 0xffffffef },
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index ac55b0807853..830ec31f952f 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -100,20 +100,7 @@ static struct phy_driver bcm63xx_driver[] = {
.driver = { .owner = THIS_MODULE },
} };
-static int __init bcm63xx_phy_init(void)
-{
- return phy_drivers_register(bcm63xx_driver,
- ARRAY_SIZE(bcm63xx_driver));
-}
-
-static void __exit bcm63xx_phy_exit(void)
-{
- phy_drivers_unregister(bcm63xx_driver,
- ARRAY_SIZE(bcm63xx_driver));
-}
-
-module_init(bcm63xx_phy_init);
-module_exit(bcm63xx_phy_exit);
+module_phy_driver(bcm63xx_driver);
static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
{ 0x00406000, 0xfffffc00 },
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 1d211d369039..974ec4515269 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -39,45 +39,15 @@
#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
+#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2)
#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
+#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1)
+#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
#define CORE_EXPB0 0xb0
-static int bcm7445_config_init(struct phy_device *phydev)
-{
- int ret;
- const struct bcm7445_regs {
- int reg;
- u16 value;
- } bcm7445_regs_cfg[] = {
- /* increases ADC latency by 24ns */
- { MII_BCM54XX_EXP_SEL, 0x0038 },
- { MII_BCM54XX_EXP_DATA, 0xAB95 },
- /* increases internal 1V LDO voltage by 5% */
- { MII_BCM54XX_EXP_SEL, 0x2038 },
- { MII_BCM54XX_EXP_DATA, 0xBB22 },
- /* reduce RX low pass filter corner frequency */
- { MII_BCM54XX_EXP_SEL, 0x6038 },
- { MII_BCM54XX_EXP_DATA, 0xFFC5 },
- /* reduce RX high pass filter corner frequency */
- { MII_BCM54XX_EXP_SEL, 0x003a },
- { MII_BCM54XX_EXP_DATA, 0x2002 },
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) {
- ret = phy_write(phydev,
- bcm7445_regs_cfg[i].reg,
- bcm7445_regs_cfg[i].value);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static void phy_write_exp(struct phy_device *phydev,
u16 reg, u16 value)
{
@@ -102,7 +72,16 @@ static void phy_write_misc(struct phy_device *phydev,
phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
}
-static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
+static void r_rc_cal_reset(struct phy_device *phydev)
+{
+ /* Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, 0x00b0, 0x0010);
+
+ /* Disable Reset R_AL/RC_CAL Engine */
+ phy_write_exp(phydev, 0x00b0, 0x0000);
+}
+
+static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
{
/* Increase VCO range to prevent unlocking problem of PLL at low
* temp
@@ -123,11 +102,7 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
/* Switch to CORE_BASE1E */
phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
- /* Reset R_CAL/RC_CAL Engine */
- phy_write_exp(phydev, CORE_EXPB0, 0x0010);
-
- /* Disable Reset R_CAL/RC_CAL Engine */
- phy_write_exp(phydev, CORE_EXPB0, 0x0000);
+ r_rc_cal_reset(phydev);
/* write AFE_RXCONFIG_0 */
phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
@@ -147,6 +122,71 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
return 0;
}
+static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
+{
+ /* AFE_RXCONFIG_0 */
+ phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb15);
+
+ /* AFE_RXCONFIG_1 */
+ phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+
+ /* AFE_RXCONFIG_2, set rCal offset for HT=0 code and LT=-2 code */
+ phy_write_misc(phydev, AFE_RXCONFIG_2, 0x2003);
+
+ /* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
+ phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+
+ /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061);
+
+ /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
+ phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
+
+ /* AFE_VDAC_OTHERS_0, set 1000BT Cidac=010 for all ports */
+ phy_write_misc(phydev, AFE_VDAC_OTHERS_0, 0xa020);
+
+ /* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal
+ * offset for HT=0 code
+ */
+ phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
+
+ /* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */
+ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010);
+
+ /* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */
+ phy_write_misc(phydev, DSP_TAP10, 0x011b);
+
+ /* Reset R_CAL/RC_CAL engine */
+ r_rc_cal_reset(phydev);
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
+{
+ /* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
+ phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+
+ /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
+ phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
+
+ /* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal
+ * offset for HT=0 code
+ */
+ phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
+
+ /* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */
+ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010);
+
+ /* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */
+ phy_write_misc(phydev, DSP_TAP10, 0x011b);
+
+ /* Reset R_CAL/RC_CAL engine */
+ r_rc_cal_reset(phydev);
+
+ return 0;
+}
+
static int bcm7xxx_apd_enable(struct phy_device *phydev)
{
int val;
@@ -200,15 +240,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags);
int ret = 0;
- dev_info(&phydev->dev, "PHY revision: 0x%02x, patch: %d\n", rev, patch);
+ pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
+ dev_name(&phydev->dev), phydev->drv->name, rev, patch);
switch (rev) {
- case 0xa0:
case 0xb0:
- ret = bcm7445_config_init(phydev);
+ ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
+ break;
+ case 0xd0:
+ ret = bcm7xxx_28nm_d0_afe_config_init(phydev);
+ break;
+ case 0xe0:
+ case 0xf0:
+ /* Rev G0 introduces a roll over */
+ case 0x10:
+ ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev);
break;
default:
- ret = bcm7xxx_28nm_afe_config_init(phydev);
break;
}
@@ -336,7 +384,7 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
.features = PHY_GBIT_FEATURES | \
SUPPORTED_Pause | SUPPORTED_Asym_Pause, \
.flags = PHY_IS_INTERNAL, \
- .config_init = bcm7xxx_28nm_afe_config_init, \
+ .config_init = bcm7xxx_28nm_config_init, \
.config_aneg = genphy_config_aneg, \
.read_status = genphy_read_status, \
.resume = bcm7xxx_28nm_resume, \
@@ -416,20 +464,7 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ }
};
-static int __init bcm7xxx_phy_init(void)
-{
- return phy_drivers_register(bcm7xxx_driver,
- ARRAY_SIZE(bcm7xxx_driver));
-}
-
-static void __exit bcm7xxx_phy_exit(void)
-{
- phy_drivers_unregister(bcm7xxx_driver,
- ARRAY_SIZE(bcm7xxx_driver));
-}
-
-module_init(bcm7xxx_phy_init);
-module_exit(bcm7xxx_phy_exit);
+module_phy_driver(bcm7xxx_driver);
MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 799789518e87..1eca20452f03 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -216,18 +216,6 @@ static struct phy_driver bcm87xx_driver[] = {
.driver = { .owner = THIS_MODULE },
} };
-static int __init bcm87xx_init(void)
-{
- return phy_drivers_register(bcm87xx_driver,
- ARRAY_SIZE(bcm87xx_driver));
-}
-module_init(bcm87xx_init);
-
-static void __exit bcm87xx_exit(void)
-{
- phy_drivers_unregister(bcm87xx_driver,
- ARRAY_SIZE(bcm87xx_driver));
-}
-module_exit(bcm87xx_exit);
+module_phy_driver(bcm87xx_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 854f2c9a7b2b..a52afb26421b 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -654,20 +654,7 @@ static struct phy_driver broadcom_drivers[] = {
.driver = { .owner = THIS_MODULE },
} };
-static int __init broadcom_init(void)
-{
- return phy_drivers_register(broadcom_drivers,
- ARRAY_SIZE(broadcom_drivers));
-}
-
-static void __exit broadcom_exit(void)
-{
- phy_drivers_unregister(broadcom_drivers,
- ARRAY_SIZE(broadcom_drivers));
-}
-
-module_init(broadcom_init);
-module_exit(broadcom_exit);
+module_phy_driver(broadcom_drivers);
static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index b57ce0cc9657..27f5464899d4 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -129,20 +129,7 @@ static struct phy_driver cis820x_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init cicada_init(void)
-{
- return phy_drivers_register(cis820x_driver,
- ARRAY_SIZE(cis820x_driver));
-}
-
-static void __exit cicada_exit(void)
-{
- phy_drivers_unregister(cis820x_driver,
- ARRAY_SIZE(cis820x_driver));
-}
-
-module_init(cicada_init);
-module_exit(cicada_exit);
+module_phy_driver(cis820x_driver);
static struct mdio_device_id __maybe_unused cicada_tbl[] = {
{ 0x000fc410, 0x000ffff0 },
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index d2c08f625a41..0d16c7d9e1bf 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -182,20 +182,7 @@ static struct phy_driver dm91xx_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init davicom_init(void)
-{
- return phy_drivers_register(dm91xx_driver,
- ARRAY_SIZE(dm91xx_driver));
-}
-
-static void __exit davicom_exit(void)
-{
- phy_drivers_unregister(dm91xx_driver,
- ARRAY_SIZE(dm91xx_driver));
-}
-
-module_init(davicom_init);
-module_exit(davicom_exit);
+module_phy_driver(dm91xx_driver);
static struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index a8eb19ec3183..a907743816a8 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -87,7 +87,7 @@ static int et1011c_read_status(struct phy_device *phydev)
return ret;
}
-static struct phy_driver et1011c_driver = {
+static struct phy_driver et1011c_driver[] = { {
.phy_id = 0x0282f014,
.name = "ET1011C",
.phy_id_mask = 0xfffffff0,
@@ -96,20 +96,9 @@ static struct phy_driver et1011c_driver = {
.config_aneg = et1011c_config_aneg,
.read_status = et1011c_read_status,
.driver = { .owner = THIS_MODULE,},
-};
-
-static int __init et1011c_init(void)
-{
- return phy_driver_register(&et1011c_driver);
-}
-
-static void __exit et1011c_exit(void)
-{
- phy_driver_unregister(&et1011c_driver);
-}
+} };
-module_init(et1011c_init);
-module_exit(et1011c_exit);
+module_phy_driver(et1011c_driver);
static struct mdio_device_id __maybe_unused et1011c_tbl[] = {
{ 0x0282f014, 0xfffffff0 },
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed_phy.c
index 47872caa0081..3ad0e6e16c39 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -274,6 +274,7 @@ struct phy_device *fixed_phy_register(unsigned int irq,
return phy;
}
+EXPORT_SYMBOL_GPL(fixed_phy_register);
static int __init fixed_mdio_bus_init(void)
{
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 97bf58bf4939..8644f039d922 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -253,20 +253,7 @@ static struct phy_driver icplus_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init icplus_init(void)
-{
- return phy_drivers_register(icplus_driver,
- ARRAY_SIZE(icplus_driver));
-}
-
-static void __exit icplus_exit(void)
-{
- phy_drivers_unregister(icplus_driver,
- ARRAY_SIZE(icplus_driver));
-}
-
-module_init(icplus_init);
-module_exit(icplus_exit);
+module_phy_driver(icplus_driver);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 9108f3191701..a3a5a703635b 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -312,20 +312,7 @@ static struct phy_driver lxt97x_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init lxt_init(void)
-{
- return phy_drivers_register(lxt97x_driver,
- ARRAY_SIZE(lxt97x_driver));
-}
-
-static void __exit lxt_exit(void)
-{
- phy_drivers_unregister(lxt97x_driver,
- ARRAY_SIZE(lxt97x_driver));
-}
-
-module_init(lxt_init);
-module_exit(lxt_exit);
+module_phy_driver(lxt97x_driver);
static struct mdio_device_id __maybe_unused lxt_tbl[] = {
{ 0x78100000, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 225c033b08f3..1b1698f98818 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -54,6 +54,9 @@
#define MII_M1145_PHY_EXT_CR 0x14
#define MII_M1145_RGMII_RX_DELAY 0x0080
#define MII_M1145_RGMII_TX_DELAY 0x0002
+#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4
+#define MII_M1145_HWCFG_MODE_MASK 0xf
+#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000
#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4
#define MII_M1145_HWCFG_MODE_MASK 0xf
@@ -123,6 +126,9 @@
#define MII_M1116R_CONTROL_REG_MAC 21
+#define MII_88E3016_PHY_SPEC_CTRL 0x10
+#define MII_88E3016_DISABLE_SCRAMBLER 0x0200
+#define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030
MODULE_DESCRIPTION("Marvell PHY driver");
MODULE_AUTHOR("Andy Fleming");
@@ -439,6 +445,25 @@ static int m88e1116r_config_init(struct phy_device *phydev)
return 0;
}
+static int m88e3016_config_init(struct phy_device *phydev)
+{
+ int reg;
+
+ /* Enable Scrambler and Auto-Crossover */
+ reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
+ if (reg < 0)
+ return reg;
+
+ reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
+ reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
+
+ reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
+ if (reg < 0)
+ return reg;
+
+ return 0;
+}
+
static int m88e1111_config_init(struct phy_device *phydev)
{
int err;
@@ -625,6 +650,7 @@ static int m88e1149_config_init(struct phy_device *phydev)
static int m88e1145_config_init(struct phy_device *phydev)
{
int err;
+ int temp;
/* Take care of errata E0 & E1 */
err = phy_write(phydev, 0x1d, 0x001b);
@@ -682,7 +708,7 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
+ temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
if (temp < 0)
return temp;
@@ -789,6 +815,12 @@ static int marvell_read_status(struct phy_device *phydev)
return 0;
}
+static int marvell_aneg_done(struct phy_device *phydev)
+{
+ int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
+ return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED);
+}
+
static int m88e1121_did_interrupt(struct phy_device *phydev)
{
int imask;
@@ -1069,22 +1101,26 @@ static struct phy_driver marvell_drivers[] = {
.suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
+ {
+ .phy_id = MARVELL_PHY_ID_88E3016,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E3016",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .config_init = &m88e3016_config_init,
+ .aneg_done = &marvell_aneg_done,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ .driver = { .owner = THIS_MODULE },
+ },
};
-static int __init marvell_init(void)
-{
- return phy_drivers_register(marvell_drivers,
- ARRAY_SIZE(marvell_drivers));
-}
-
-static void __exit marvell_exit(void)
-{
- phy_drivers_unregister(marvell_drivers,
- ARRAY_SIZE(marvell_drivers));
-}
-
-module_init(marvell_init);
-module_exit(marvell_exit);
+module_phy_driver(marvell_drivers);
static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
@@ -1098,6 +1134,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 5b643e588e8f..6deac6d32f57 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -199,7 +199,6 @@ static struct of_device_id unimac_mdio_ids[] = {
static struct platform_driver unimac_mdio_driver = {
.driver = {
.name = "unimac-mdio",
- .owner = THIS_MODULE,
.of_match_table = unimac_mdio_ids,
},
.probe = unimac_mdio_probe,
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 5f1a2250018f..0a0578a592b8 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -259,7 +259,6 @@ static struct platform_driver mdio_gpio_driver = {
.remove = mdio_gpio_remove,
.driver = {
.name = "mdio-gpio",
- .owner = THIS_MODULE,
.of_match_table = mdio_gpio_of_match,
},
};
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 096695163491..320eb15315c8 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -14,13 +14,13 @@
#include <linux/mdio-mux.h>
#include <linux/of_gpio.h>
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.1"
#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
#define MDIO_MUX_GPIO_MAX_BITS 8
struct mdio_mux_gpio_state {
- int gpio[MDIO_MUX_GPIO_MAX_BITS];
+ struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS];
unsigned int num_gpios;
void *mux_handle;
};
@@ -28,29 +28,23 @@ struct mdio_mux_gpio_state {
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
void *data)
{
- int change;
+ int values[MDIO_MUX_GPIO_MAX_BITS];
unsigned int n;
struct mdio_mux_gpio_state *s = data;
if (current_child == desired_child)
return 0;
- change = current_child == -1 ? -1 : current_child ^ desired_child;
-
for (n = 0; n < s->num_gpios; n++) {
- if (change & 1)
- gpio_set_value_cansleep(s->gpio[n],
- (desired_child & 1) != 0);
- change >>= 1;
- desired_child >>= 1;
+ values[n] = (desired_child >> n) & 1;
}
+ gpiod_set_array_cansleep(s->num_gpios, s->gpio, values);
return 0;
}
static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
- enum of_gpio_flags f;
struct mdio_mux_gpio_state *s;
int num_gpios;
unsigned int n;
@@ -70,22 +64,14 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
s->num_gpios = num_gpios;
for (n = 0; n < num_gpios; ) {
- int gpio = of_get_gpio_flags(pdev->dev.of_node, n, &f);
- if (gpio < 0) {
- r = (gpio == -ENODEV) ? -EPROBE_DEFER : gpio;
+ struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio)) {
+ r = PTR_ERR(gpio);
goto err;
}
s->gpio[n] = gpio;
-
n++;
-
- r = gpio_request(gpio, "mdio_mux_gpio");
- if (r)
- goto err;
-
- r = gpio_direction_output(gpio, 0);
- if (r)
- goto err;
}
r = mdio_mux_init(&pdev->dev,
@@ -98,15 +84,18 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
err:
while (n) {
n--;
- gpio_free(s->gpio[n]);
+ gpiod_put(s->gpio[n]);
}
return r;
}
static int mdio_mux_gpio_remove(struct platform_device *pdev)
{
+ unsigned int n;
struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
mdio_mux_uninit(s->mux_handle);
+ for (n = 0; n < s->num_gpios; n++)
+ gpiod_put(s->gpio[n]);
return 0;
}
@@ -125,7 +114,6 @@ MODULE_DEVICE_TABLE(of, mdio_mux_gpio_match);
static struct platform_driver mdio_mux_gpio_driver = {
.driver = {
.name = "mdio-mux-gpio",
- .owner = THIS_MODULE,
.of_match_table = mdio_mux_gpio_match,
},
.probe = mdio_mux_gpio_probe,
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 1656785ff339..0aa985c74014 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -156,7 +156,6 @@ MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match);
static struct platform_driver mdio_mux_mmioreg_driver = {
.driver = {
.name = "mdio-mux-mmioreg",
- .owner = THIS_MODULE,
.of_match_table = mdio_mux_mmioreg_match,
},
.probe = mdio_mux_mmioreg_probe,
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index a51ed92fbada..c81052486edc 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -263,7 +263,6 @@ MODULE_DEVICE_TABLE(of, octeon_mdiobus_match);
static struct platform_driver octeon_mdiobus_driver = {
.driver = {
.name = "mdio-octeon",
- .owner = THIS_MODULE,
.of_match_table = octeon_mdiobus_match,
},
.probe = octeon_mdiobus_probe,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 8c2a29a9bd7f..c530de1e63f5 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -6,6 +6,7 @@
* Author: David J. Choi
*
* Copyright (c) 2010-2013 Micrel, Inc.
+ * Copyright (c) 2014 Johan Hovold <johan@kernel.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -30,30 +31,32 @@
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
-#define KSZPHY_OMSO_B_CAST_OFF (1 << 9)
-#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1)
-#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0)
+#define KSZPHY_OMSO_B_CAST_OFF BIT(9)
+#define KSZPHY_OMSO_RMII_OVERRIDE BIT(1)
+#define KSZPHY_OMSO_MII_OVERRIDE BIT(0)
/* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B
-#define KSZPHY_INTCS_JABBER (1 << 15)
-#define KSZPHY_INTCS_RECEIVE_ERR (1 << 14)
-#define KSZPHY_INTCS_PAGE_RECEIVE (1 << 13)
-#define KSZPHY_INTCS_PARELLEL (1 << 12)
-#define KSZPHY_INTCS_LINK_PARTNER_ACK (1 << 11)
-#define KSZPHY_INTCS_LINK_DOWN (1 << 10)
-#define KSZPHY_INTCS_REMOTE_FAULT (1 << 9)
-#define KSZPHY_INTCS_LINK_UP (1 << 8)
+#define KSZPHY_INTCS_JABBER BIT(15)
+#define KSZPHY_INTCS_RECEIVE_ERR BIT(14)
+#define KSZPHY_INTCS_PAGE_RECEIVE BIT(13)
+#define KSZPHY_INTCS_PARELLEL BIT(12)
+#define KSZPHY_INTCS_LINK_PARTNER_ACK BIT(11)
+#define KSZPHY_INTCS_LINK_DOWN BIT(10)
+#define KSZPHY_INTCS_REMOTE_FAULT BIT(9)
+#define KSZPHY_INTCS_LINK_UP BIT(8)
#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
KSZPHY_INTCS_LINK_DOWN)
-/* general PHY control reg in vendor specific block. */
-#define MII_KSZPHY_CTRL 0x1F
+/* PHY Control 1 */
+#define MII_KSZPHY_CTRL_1 0x1e
+
+/* PHY Control 2 / PHY Control (if no PHY Control 1) */
+#define MII_KSZPHY_CTRL_2 0x1f
+#define MII_KSZPHY_CTRL MII_KSZPHY_CTRL_2
/* bitmap of PHY register to set interrupt mode */
-#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
-#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
-#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
-#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
+#define KSZPHY_CTRL_INT_ACTIVE_HIGH BIT(9)
+#define KSZPHY_RMII_REF_CLK_SEL BIT(7)
/* Write/read to/from extended registers */
#define MII_KSZPHY_EXTREG 0x0b
@@ -69,20 +72,46 @@
#define PS_TO_REG 200
-static int ksz_config_flags(struct phy_device *phydev)
-{
- int regval;
+struct kszphy_type {
+ u32 led_mode_reg;
+ u16 interrupt_level_mask;
+ bool has_broadcast_disable;
+ bool has_rmii_ref_clk_sel;
+};
- if (phydev->dev_flags & (MICREL_PHY_50MHZ_CLK | MICREL_PHY_25MHZ_CLK)) {
- regval = phy_read(phydev, MII_KSZPHY_CTRL);
- if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK)
- regval |= KSZ8051_RMII_50MHZ_CLK;
- else
- regval &= ~KSZ8051_RMII_50MHZ_CLK;
- return phy_write(phydev, MII_KSZPHY_CTRL, regval);
- }
- return 0;
-}
+struct kszphy_priv {
+ const struct kszphy_type *type;
+ int led_mode;
+ bool rmii_ref_clk_sel;
+ bool rmii_ref_clk_sel_val;
+};
+
+static const struct kszphy_type ksz8021_type = {
+ .led_mode_reg = MII_KSZPHY_CTRL_2,
+ .has_rmii_ref_clk_sel = true,
+};
+
+static const struct kszphy_type ksz8041_type = {
+ .led_mode_reg = MII_KSZPHY_CTRL_1,
+};
+
+static const struct kszphy_type ksz8051_type = {
+ .led_mode_reg = MII_KSZPHY_CTRL_2,
+};
+
+static const struct kszphy_type ksz8081_type = {
+ .led_mode_reg = MII_KSZPHY_CTRL_2,
+ .has_broadcast_disable = true,
+ .has_rmii_ref_clk_sel = true,
+};
+
+static const struct kszphy_type ks8737_type = {
+ .interrupt_level_mask = BIT(14),
+};
+
+static const struct kszphy_type ksz9021_type = {
+ .interrupt_level_mask = BIT(14),
+};
static int kszphy_extended_write(struct phy_device *phydev,
u32 regnum, u16 val)
@@ -108,112 +137,137 @@ static int kszphy_ack_interrupt(struct phy_device *phydev)
return (rc < 0) ? rc : 0;
}
-static int kszphy_set_interrupt(struct phy_device *phydev)
+static int kszphy_config_intr(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
int temp;
- temp = (PHY_INTERRUPT_ENABLED == phydev->interrupts) ?
- KSZPHY_INTCS_ALL : 0;
- return phy_write(phydev, MII_KSZPHY_INTCS, temp);
-}
+ u16 mask;
-static int kszphy_config_intr(struct phy_device *phydev)
-{
- int temp, rc;
+ if (type && type->interrupt_level_mask)
+ mask = type->interrupt_level_mask;
+ else
+ mask = KSZPHY_CTRL_INT_ACTIVE_HIGH;
/* set the interrupt pin active low */
temp = phy_read(phydev, MII_KSZPHY_CTRL);
- temp &= ~KSZPHY_CTRL_INT_ACTIVE_HIGH;
+ if (temp < 0)
+ return temp;
+ temp &= ~mask;
phy_write(phydev, MII_KSZPHY_CTRL, temp);
- rc = kszphy_set_interrupt(phydev);
- return rc < 0 ? rc : 0;
-}
-static int ksz9021_config_intr(struct phy_device *phydev)
-{
- int temp, rc;
+ /* enable / disable interrupts */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ temp = KSZPHY_INTCS_ALL;
+ else
+ temp = 0;
- /* set the interrupt pin active low */
- temp = phy_read(phydev, MII_KSZPHY_CTRL);
- temp &= ~KSZ9021_CTRL_INT_ACTIVE_HIGH;
- phy_write(phydev, MII_KSZPHY_CTRL, temp);
- rc = kszphy_set_interrupt(phydev);
- return rc < 0 ? rc : 0;
+ return phy_write(phydev, MII_KSZPHY_INTCS, temp);
}
-static int ks8737_config_intr(struct phy_device *phydev)
+static int kszphy_rmii_clk_sel(struct phy_device *phydev, bool val)
{
- int temp, rc;
+ int ctrl;
- /* set the interrupt pin active low */
- temp = phy_read(phydev, MII_KSZPHY_CTRL);
- temp &= ~KS8737_CTRL_INT_ACTIVE_HIGH;
- phy_write(phydev, MII_KSZPHY_CTRL, temp);
- rc = kszphy_set_interrupt(phydev);
- return rc < 0 ? rc : 0;
-}
-
-static int kszphy_setup_led(struct phy_device *phydev,
- unsigned int reg, unsigned int shift)
-{
+ ctrl = phy_read(phydev, MII_KSZPHY_CTRL);
+ if (ctrl < 0)
+ return ctrl;
- struct device *dev = &phydev->dev;
- struct device_node *of_node = dev->of_node;
- int rc, temp;
- u32 val;
+ if (val)
+ ctrl |= KSZPHY_RMII_REF_CLK_SEL;
+ else
+ ctrl &= ~KSZPHY_RMII_REF_CLK_SEL;
- if (!of_node && dev->parent->of_node)
- of_node = dev->parent->of_node;
+ return phy_write(phydev, MII_KSZPHY_CTRL, ctrl);
+}
- if (of_property_read_u32(of_node, "micrel,led-mode", &val))
- return 0;
+static int kszphy_setup_led(struct phy_device *phydev, u32 reg, int val)
+{
+ int rc, temp, shift;
+
+ switch (reg) {
+ case MII_KSZPHY_CTRL_1:
+ shift = 14;
+ break;
+ case MII_KSZPHY_CTRL_2:
+ shift = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
temp = phy_read(phydev, reg);
- if (temp < 0)
- return temp;
+ if (temp < 0) {
+ rc = temp;
+ goto out;
+ }
temp &= ~(3 << shift);
temp |= val << shift;
rc = phy_write(phydev, reg, temp);
+out:
+ if (rc < 0)
+ dev_err(&phydev->dev, "failed to set led mode\n");
- return rc < 0 ? rc : 0;
+ return rc;
}
-static int kszphy_config_init(struct phy_device *phydev)
+/* Disable PHY address 0 as the broadcast address, so that it can be used as a
+ * unique (non-broadcast) address on a shared bus.
+ */
+static int kszphy_broadcast_disable(struct phy_device *phydev)
{
- return 0;
-}
+ int ret;
-static int kszphy_config_init_led8041(struct phy_device *phydev)
-{
- /* single led control, register 0x1e bits 15..14 */
- return kszphy_setup_led(phydev, 0x1e, 14);
+ ret = phy_read(phydev, MII_KSZPHY_OMSO);
+ if (ret < 0)
+ goto out;
+
+ ret = phy_write(phydev, MII_KSZPHY_OMSO, ret | KSZPHY_OMSO_B_CAST_OFF);
+out:
+ if (ret)
+ dev_err(&phydev->dev, "failed to disable broadcast address\n");
+
+ return ret;
}
-static int ksz8021_config_init(struct phy_device *phydev)
+static int kszphy_config_init(struct phy_device *phydev)
{
- const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
- int rc;
+ struct kszphy_priv *priv = phydev->priv;
+ const struct kszphy_type *type;
+ int ret;
- rc = kszphy_setup_led(phydev, 0x1f, 4);
- if (rc)
- dev_err(&phydev->dev, "failed to set led mode\n");
+ if (!priv)
+ return 0;
- rc = ksz_config_flags(phydev);
- if (rc < 0)
- return rc;
- rc = phy_write(phydev, MII_KSZPHY_OMSO, val);
- return rc < 0 ? rc : 0;
+ type = priv->type;
+
+ if (type->has_broadcast_disable)
+ kszphy_broadcast_disable(phydev);
+
+ if (priv->rmii_ref_clk_sel) {
+ ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
+ if (ret) {
+ dev_err(&phydev->dev, "failed to set rmii reference clock\n");
+ return ret;
+ }
+ }
+
+ if (priv->led_mode >= 0)
+ kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
+
+ return 0;
}
-static int ks8051_config_init(struct phy_device *phydev)
+static int ksz8021_config_init(struct phy_device *phydev)
{
int rc;
- rc = kszphy_setup_led(phydev, 0x1f, 4);
+ rc = kszphy_config_init(phydev);
if (rc)
- dev_err(&phydev->dev, "failed to set led mode\n");
+ return rc;
+
+ rc = kszphy_broadcast_disable(phydev);
- rc = ksz_config_flags(phydev);
return rc < 0 ? rc : 0;
}
@@ -394,8 +448,8 @@ static int ksz9031_config_init(struct phy_device *phydev)
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
-#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
-#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
+#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6)
+#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4)
static int ksz8873mll_read_status(struct phy_device *phydev)
{
int regval;
@@ -446,24 +500,62 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
{
}
-static int ksz8021_probe(struct phy_device *phydev)
+static int kszphy_probe(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ struct device_node *np = phydev->dev.of_node;
+ struct kszphy_priv *priv;
struct clk *clk;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->type = type;
+
+ if (type->led_mode_reg) {
+ ret = of_property_read_u32(np, "micrel,led-mode",
+ &priv->led_mode);
+ if (ret)
+ priv->led_mode = -1;
+
+ if (priv->led_mode > 3) {
+ dev_err(&phydev->dev, "invalid led mode: 0x%02x\n",
+ priv->led_mode);
+ priv->led_mode = -1;
+ }
+ } else {
+ priv->led_mode = -1;
+ }
clk = devm_clk_get(&phydev->dev, "rmii-ref");
if (!IS_ERR(clk)) {
unsigned long rate = clk_get_rate(clk);
+ bool rmii_ref_clk_sel_25_mhz;
+
+ priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
+ rmii_ref_clk_sel_25_mhz = of_property_read_bool(np,
+ "micrel,rmii-reference-clock-select-25-mhz");
if (rate > 24500000 && rate < 25500000) {
- phydev->dev_flags |= MICREL_PHY_25MHZ_CLK;
+ priv->rmii_ref_clk_sel_val = rmii_ref_clk_sel_25_mhz;
} else if (rate > 49500000 && rate < 50500000) {
- phydev->dev_flags |= MICREL_PHY_50MHZ_CLK;
+ priv->rmii_ref_clk_sel_val = !rmii_ref_clk_sel_25_mhz;
} else {
dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate);
return -EINVAL;
}
}
+ /* Support legacy board-file configuration */
+ if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ priv->rmii_ref_clk_sel = true;
+ priv->rmii_ref_clk_sel_val = true;
+ }
+
return 0;
}
@@ -474,11 +566,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KS8737",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
- .config_intr = ks8737_config_intr,
+ .config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -489,7 +582,8 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .probe = ksz8021_probe,
+ .driver_data = &ksz8021_type,
+ .probe = kszphy_probe,
.config_init = ksz8021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -505,7 +599,8 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .probe = ksz8021_probe,
+ .driver_data = &ksz8021_type,
+ .probe = kszphy_probe,
.config_init = ksz8021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -521,7 +616,9 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init_led8041,
+ .driver_data = &ksz8041_type,
+ .probe = kszphy_probe,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -536,7 +633,9 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init_led8041,
+ .driver_data = &ksz8041_type,
+ .probe = kszphy_probe,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -551,7 +650,9 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = ks8051_config_init,
+ .driver_data = &ksz8051_type,
+ .probe = kszphy_probe,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -565,7 +666,9 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init_led8041,
+ .driver_data = &ksz8041_type,
+ .probe = kszphy_probe,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -579,6 +682,8 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00fffff0,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .driver_data = &ksz8081_type,
+ .probe = kszphy_probe,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -607,11 +712,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .driver_data = &ksz9021_type,
.config_init = ksz9021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
- .config_intr = ksz9021_config_intr,
+ .config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_mmd_indirect = ksz9021_rd_mmd_phyreg,
@@ -623,11 +729,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ9031 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .driver_data = &ksz9021_type,
.config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
- .config_intr = ksz9021_config_intr,
+ .config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
@@ -657,20 +764,7 @@ static struct phy_driver ksphy_driver[] = {
.driver = { .owner = THIS_MODULE, },
} };
-static int __init ksphy_init(void)
-{
- return phy_drivers_register(ksphy_driver,
- ARRAY_SIZE(ksphy_driver));
-}
-
-static void __exit ksphy_exit(void)
-{
- phy_drivers_unregister(ksphy_driver,
- ARRAY_SIZE(ksphy_driver));
-}
-
-module_init(ksphy_init);
-module_exit(ksphy_exit);
+module_phy_driver(ksphy_driver);
MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 9a5f234d95b0..0a7b9c7f09a2 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -129,7 +129,7 @@ static int ns_config_init(struct phy_device *phydev)
return ns_ack_interrupt(phydev);
}
-static struct phy_driver dp83865_driver = {
+static struct phy_driver dp83865_driver[] = { {
.phy_id = DP83865_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
@@ -141,25 +141,14 @@ static struct phy_driver dp83865_driver = {
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
.driver = {.owner = THIS_MODULE,}
-};
+} };
-static int __init ns_init(void)
-{
- return phy_driver_register(&dp83865_driver);
-}
-
-static void __exit ns_exit(void)
-{
- phy_driver_unregister(&dp83865_driver);
-}
+module_phy_driver(dp83865_driver);
MODULE_DESCRIPTION("NatSemi PHY driver");
MODULE_AUTHOR("Stuart Menefy");
MODULE_LICENSE("GPL");
-module_init(ns_init);
-module_exit(ns_exit);
-
static struct mdio_device_id __maybe_unused ns_tbl[] = {
{ DP83865_PHY_ID, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index fe0d0a15d5e1..be4c6f7c3645 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -111,7 +111,7 @@ static int qs6612_config_intr(struct phy_device *phydev)
}
-static struct phy_driver qs6612_driver = {
+static struct phy_driver qs6612_driver[] = { {
.phy_id = 0x00181440,
.name = "QS6612",
.phy_id_mask = 0xfffffff0,
@@ -123,20 +123,9 @@ static struct phy_driver qs6612_driver = {
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static int __init qs6612_init(void)
-{
- return phy_driver_register(&qs6612_driver);
-}
-
-static void __exit qs6612_exit(void)
-{
- phy_driver_unregister(&qs6612_driver);
-}
+} };
-module_init(qs6612_init);
-module_exit(qs6612_exit);
+module_phy_driver(qs6612_driver);
static struct mdio_device_id __maybe_unused qs6612_tbl[] = {
{ 0x00181440, 0xfffffff0 },
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 45483fdfbe06..96a0f0fab3ca 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -101,18 +101,7 @@ static struct phy_driver realtek_drvs[] = {
},
};
-static int __init realtek_init(void)
-{
- return phy_drivers_register(realtek_drvs, ARRAY_SIZE(realtek_drvs));
-}
-
-static void __exit realtek_exit(void)
-{
- phy_drivers_unregister(realtek_drvs, ARRAY_SIZE(realtek_drvs));
-}
-
-module_init(realtek_init);
-module_exit(realtek_exit);
+module_phy_driver(realtek_drvs);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc912, 0x001fffff },
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index a4b08198fb9f..c0f6479e19d4 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -250,24 +250,12 @@ static struct phy_driver smsc_phy_driver[] = {
.driver = { .owner = THIS_MODULE, }
} };
-static int __init smsc_init(void)
-{
- return phy_drivers_register(smsc_phy_driver,
- ARRAY_SIZE(smsc_phy_driver));
-}
-
-static void __exit smsc_exit(void)
-{
- phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
-}
+module_phy_driver(smsc_phy_driver);
MODULE_DESCRIPTION("SMSC PHY driver");
MODULE_AUTHOR("Herbert Valerio Riedel");
MODULE_LICENSE("GPL");
-module_init(smsc_init);
-module_exit(smsc_exit);
-
static struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0a0, 0xfffffff0 },
{ 0x0007c0b0, 0xfffffff0 },
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index eab57fc5b967..46530159256b 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -353,7 +353,9 @@ static int ks8995_probe(struct spi_device *spi)
static int ks8995_remove(struct spi_device *spi)
{
- sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+ struct ks8995_switch *ks = spi_get_drvdata(spi);
+
+ sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
return 0;
}
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 5e1eb138916f..3fc199b773e6 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -112,20 +112,7 @@ static struct phy_driver ste10xp_pdriver[] = {
.driver = {.owner = THIS_MODULE,}
} };
-static int __init ste10Xp_init(void)
-{
- return phy_drivers_register(ste10xp_pdriver,
- ARRAY_SIZE(ste10xp_pdriver));
-}
-
-static void __exit ste10Xp_exit(void)
-{
- phy_drivers_unregister(ste10xp_pdriver,
- ARRAY_SIZE(ste10xp_pdriver));
-}
-
-module_init(ste10Xp_init);
-module_exit(ste10Xp_exit);
+module_phy_driver(ste10xp_pdriver);
static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
{ STE101P_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 5dc0935da99c..76cad712ddb2 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -311,19 +311,7 @@ static struct phy_driver vsc82xx_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init vsc82xx_init(void)
-{
- return phy_drivers_register(vsc82xx_driver,
- ARRAY_SIZE(vsc82xx_driver));
-}
-
-static void __exit vsc82xx_exit(void)
-{
- phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
-}
-
-module_init(vsc82xx_init);
-module_exit(vsc82xx_exit);
+module_phy_driver(vsc82xx_driver);
static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 794a47329368..af034dba9bd6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -417,6 +417,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
ssize_t ret;
struct sk_buff *skb = NULL;
struct iovec iov;
+ struct iov_iter to;
ret = count;
@@ -462,7 +463,8 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
ret = -EFAULT;
iov.iov_base = buf;
iov.iov_len = count;
- if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
+ iov_iter_init(&to, READ, &iov, 1, count);
+ if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
goto outf;
ret = skb->len;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 6c9c16d76935..d2408a5e43a6 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -869,7 +869,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
start = (char *)&ph->tag[0];
- error = memcpy_fromiovec(start, m->msg_iov, total_len);
+ error = memcpy_from_msg(start, m, total_len);
if (error < 0) {
kfree_skb(skb);
goto end;
@@ -981,7 +981,7 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
- error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
+ error = skb_copy_datagram_msg(skb, 0, m, total_len);
if (error == 0) {
consume_skb(skb);
return total_len;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2368395d8ae5..93e224217e24 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1179,6 +1179,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_enable_netpoll;
}
+ if (!(dev->features & NETIF_F_LRO))
+ dev_disable_lro(port_dev);
+
err = netdev_rx_handler_register(port_dev, team_handle_frame,
port);
if (err) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4d332dc93b70..8c8dc16839a7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -71,6 +71,7 @@
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <linux/seq_file.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
@@ -103,6 +104,17 @@ do { \
} while (0)
#endif
+/* TUN device flags */
+
+/* IFF_ATTACH_QUEUE is never stored in device flags,
+ * overload it to mean fasync when stored there.
+ */
+#define TUN_FASYNC IFF_ATTACH_QUEUE
+/* High bits in flags field are unused. */
+#define TUN_VNET_LE 0x80000000
+
+#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
+ IFF_MULTI_QUEUE)
#define GOODCOPY_LEN 128
#define FLT_EXACT_COUNT 8
@@ -196,6 +208,16 @@ struct tun_struct {
u32 flow_count;
};
+static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
+{
+ return __virtio16_to_cpu(tun->flags & TUN_VNET_LE, val);
+}
+
+static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
+{
+ return __cpu_to_virtio16(tun->flags & TUN_VNET_LE, val);
+}
+
static inline u32 tun_hashfn(u32 rxhash)
{
return rxhash & 0x3ff;
@@ -472,7 +494,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
netif_carrier_off(tun->dev);
- if (!(tun->flags & TUN_PERSIST) &&
+ if (!(tun->flags & IFF_PERSIST) &&
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
@@ -523,7 +545,7 @@ static void tun_detach_all(struct net_device *dev)
}
BUG_ON(tun->numdisabled != 0);
- if (tun->flags & TUN_PERSIST)
+ if (tun->flags & IFF_PERSIST)
module_put(THIS_MODULE);
}
@@ -541,7 +563,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
goto out;
err = -EBUSY;
- if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
+ if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
goto out;
err = -E2BIG;
@@ -818,7 +840,7 @@ drop:
skb_tx_error(skb);
kfree_skb(skb);
rcu_read_unlock();
- return NETDEV_TX_OK;
+ return NET_XMIT_DROP;
}
static void tun_net_mclist(struct net_device *dev)
@@ -920,7 +942,7 @@ static void tun_net_init(struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
switch (tun->flags & TUN_TYPE_MASK) {
- case TUN_TUN_DEV:
+ case IFF_TUN:
dev->netdev_ops = &tun_netdev_ops;
/* Point-to-Point TUN Device */
@@ -934,7 +956,7 @@ static void tun_net_init(struct net_device *dev)
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
- case TUN_TAP_DEV:
+ case IFF_TAP:
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
@@ -1011,75 +1033,80 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
- void *msg_control, const struct iovec *iv,
- size_t total_len, size_t count, int noblock)
+ void *msg_control, struct iov_iter *from,
+ int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
+ size_t total_len = iov_iter_count(from);
size_t len = total_len, align = NET_SKB_PAD, linear;
struct virtio_net_hdr gso = { 0 };
int good_linear;
- int offset = 0;
int copylen;
bool zerocopy = false;
int err;
u32 rxhash;
+ ssize_t n;
- if (!(tun->flags & TUN_NO_PI)) {
+ if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
len -= sizeof(pi);
- if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
+ n = copy_from_iter(&pi, sizeof(pi), from);
+ if (n != sizeof(pi))
return -EFAULT;
- offset += sizeof(pi);
}
- if (tun->flags & TUN_VNET_HDR) {
+ if (tun->flags & IFF_VNET_HDR) {
if (len < tun->vnet_hdr_sz)
return -EINVAL;
len -= tun->vnet_hdr_sz;
- if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
+ n = copy_from_iter(&gso, sizeof(gso), from);
+ if (n != sizeof(gso))
return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
- gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
+ tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
+ gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
- if (gso.hdr_len > len)
+ if (tun16_to_cpu(tun, gso.hdr_len) > len)
return -EINVAL;
- offset += tun->vnet_hdr_sz;
+ iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
}
- if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
+ if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
align += NET_IP_ALIGN;
if (unlikely(len < ETH_HLEN ||
- (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
+ (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
return -EINVAL;
}
good_linear = SKB_MAX_HEAD(align);
if (msg_control) {
+ struct iov_iter i = *from;
+
/* There are 256 bytes to be copied in skb, so there is
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
- copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+ copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
- if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
+ iov_iter_advance(&i, copylen);
+ if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
zerocopy = true;
}
if (!zerocopy) {
copylen = len;
- if (gso.hdr_len > good_linear)
+ if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
linear = good_linear;
else
- linear = gso.hdr_len;
+ linear = tun16_to_cpu(tun, gso.hdr_len);
}
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
@@ -1090,9 +1117,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (zerocopy)
- err = zerocopy_sg_from_iovec(skb, iv, offset, count);
+ err = zerocopy_sg_from_iter(skb, from);
else {
- err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
+ err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (!err && msg_control) {
struct ubuf_info *uarg = msg_control;
uarg->callback(uarg, false);
@@ -1106,8 +1133,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- if (!skb_partial_csum_set(skb, gso.csum_start,
- gso.csum_offset)) {
+ if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
+ tun16_to_cpu(tun, gso.csum_offset))) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
return -EINVAL;
@@ -1115,8 +1142,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
switch (tun->flags & TUN_TYPE_MASK) {
- case TUN_TUN_DEV:
- if (tun->flags & TUN_NO_PI) {
+ case IFF_TUN:
+ if (tun->flags & IFF_NO_PI) {
switch (skb->data[0] & 0xf0) {
case 0x40:
pi.proto = htons(ETH_P_IP);
@@ -1135,7 +1162,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb->protocol = pi.proto;
skb->dev = tun->dev;
break;
- case TUN_TAP_DEV:
+ case IFF_TAP:
skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
@@ -1175,7 +1202,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
- skb_shinfo(skb)->gso_size = gso.gso_size;
+ skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
if (skb_shinfo(skb)->gso_size == 0) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
@@ -1206,8 +1233,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return total_len;
}
-static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct tun_struct *tun = tun_get(file);
@@ -1217,10 +1243,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
if (!tun)
return -EBADFD;
- tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
-
- result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
- count, file->f_flags & O_NONBLOCK);
+ result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
tun_put(tun);
return result;
@@ -1230,45 +1253,47 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_file *tfile,
struct sk_buff *skb,
- const struct iovec *iv, int len)
+ struct iov_iter *iter)
{
struct tun_pi pi = { 0, skb->protocol };
- ssize_t total = 0;
- int vlan_offset = 0, copied;
+ ssize_t total;
+ int vlan_offset = 0;
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
if (vlan_tx_tag_present(skb))
vlan_hlen = VLAN_HLEN;
- if (tun->flags & TUN_VNET_HDR)
+ if (tun->flags & IFF_VNET_HDR)
vnet_hdr_sz = tun->vnet_hdr_sz;
- if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) < 0)
+ total = skb->len + vlan_hlen + vnet_hdr_sz;
+
+ if (!(tun->flags & IFF_NO_PI)) {
+ if (iov_iter_count(iter) < sizeof(pi))
return -EINVAL;
- if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
+ total += sizeof(pi);
+ if (iov_iter_count(iter) < total) {
/* Packet will be striped */
pi.flags |= TUN_PKT_STRIP;
}
- if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
+ if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
return -EFAULT;
- total += sizeof(pi);
}
if (vnet_hdr_sz) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= vnet_hdr_sz) < 0)
+ if (iov_iter_count(iter) < vnet_hdr_sz)
return -EINVAL;
if (skb_is_gso(skb)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
/* This is a hint as to how much should be linear. */
- gso.hdr_len = skb_headlen(skb);
- gso.gso_size = sinfo->gso_size;
+ gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
+ gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
if (sinfo->gso_type & SKB_GSO_TCPV4)
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
@@ -1276,12 +1301,12 @@ static ssize_t tun_put_user(struct tun_struct *tun,
else {
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, gso.gso_size,
- gso.hdr_len);
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
- min((int)gso.hdr_len, 64), true);
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
WARN_ON_ONCE(1);
return -EINVAL;
}
@@ -1292,24 +1317,21 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- gso.csum_start = skb_checksum_start_offset(skb) +
- vlan_hlen;
- gso.csum_offset = skb->csum_offset;
+ gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
+ vlan_hlen);
+ gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
- if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
- sizeof(gso))))
+ if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
return -EFAULT;
- total += vnet_hdr_sz;
+
+ iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
}
- copied = total;
- len = min_t(int, skb->len + vlan_hlen, len);
- total += skb->len + vlan_hlen;
if (vlan_hlen) {
- int copy, ret;
+ int ret;
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
@@ -1320,41 +1342,36 @@ static ssize_t tun_put_user(struct tun_struct *tun,
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
- copy = min_t(int, vlan_offset, len);
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
- len -= copy;
- copied += copy;
- if (ret || !len)
+ ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
+ if (ret || !iov_iter_count(iter))
goto done;
- copy = min_t(int, sizeof(veth), len);
- ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
- len -= copy;
- copied += copy;
- if (ret || !len)
+ ret = copy_to_iter(&veth, sizeof(veth), iter);
+ if (ret != sizeof(veth) || !iov_iter_count(iter))
goto done;
}
- skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+ skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
done:
tun->dev->stats.tx_packets++;
- tun->dev->stats.tx_bytes += len;
+ tun->dev->stats.tx_bytes += skb->len + vlan_hlen;
return total;
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
- const struct iovec *iv, ssize_t len, int noblock)
+ struct iov_iter *to,
+ int noblock)
{
struct sk_buff *skb;
- ssize_t ret = 0;
+ ssize_t ret;
int peeked, err, off = 0;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
- if (!len)
- return ret;
+ if (!iov_iter_count(to))
+ return 0;
if (tun->dev->reg_state != NETREG_REGISTERED)
return -EIO;
@@ -1362,37 +1379,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
/* Read frames from queue */
skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
&peeked, &off, &err);
- if (skb) {
- ret = tun_put_user(tun, tfile, skb, iv, len);
+ if (!skb)
+ return 0;
+
+ ret = tun_put_user(tun, tfile, skb, to);
+ if (unlikely(ret < 0))
kfree_skb(skb);
- } else
- ret = err;
+ else
+ consume_skb(skb);
return ret;
}
-static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
- ssize_t len, ret;
+ ssize_t len = iov_iter_count(to), ret;
if (!tun)
return -EBADFD;
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = tun_do_read(tun, tfile, iv, len,
- file->f_flags & O_NONBLOCK);
+ ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
-out:
tun_put(tun);
return ret;
}
@@ -1462,8 +1473,9 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
if (!tun)
return -EBADFD;
- ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
- m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+
+ ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
+ m->msg_flags & MSG_DONTWAIT);
tun_put(tun);
return ret;
}
@@ -1488,8 +1500,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
- flags & MSG_DONTWAIT);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -1521,32 +1532,7 @@ static struct proto tun_proto = {
static int tun_flags(struct tun_struct *tun)
{
- int flags = 0;
-
- if (tun->flags & TUN_TUN_DEV)
- flags |= IFF_TUN;
- else
- flags |= IFF_TAP;
-
- if (tun->flags & TUN_NO_PI)
- flags |= IFF_NO_PI;
-
- /* This flag has no real effect. We track the value for backwards
- * compatibility.
- */
- if (tun->flags & TUN_ONE_QUEUE)
- flags |= IFF_ONE_QUEUE;
-
- if (tun->flags & TUN_VNET_HDR)
- flags |= IFF_VNET_HDR;
-
- if (tun->flags & TUN_TAP_MQ)
- flags |= IFF_MULTI_QUEUE;
-
- if (tun->flags & TUN_PERSIST)
- flags |= IFF_PERSIST;
-
- return flags;
+ return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
}
static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
@@ -1602,7 +1588,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return -EINVAL;
if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
- !!(tun->flags & TUN_TAP_MQ))
+ !!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
if (tun_not_capable(tun))
@@ -1615,7 +1601,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (err < 0)
return err;
- if (tun->flags & TUN_TAP_MQ &&
+ if (tun->flags & IFF_MULTI_QUEUE &&
(tun->numqueues + tun->numdisabled > 1)) {
/* One or more queue has already been attached, no need
* to initialize the device again.
@@ -1638,11 +1624,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
/* Set dev type */
if (ifr->ifr_flags & IFF_TUN) {
/* TUN device */
- flags |= TUN_TUN_DEV;
+ flags |= IFF_TUN;
name = "tun%d";
} else if (ifr->ifr_flags & IFF_TAP) {
/* TAP device */
- flags |= TUN_TAP_DEV;
+ flags |= IFF_TAP;
name = "tap%d";
} else
return -EINVAL;
@@ -1706,28 +1692,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
- if (ifr->ifr_flags & IFF_NO_PI)
- tun->flags |= TUN_NO_PI;
- else
- tun->flags &= ~TUN_NO_PI;
-
- /* This flag has no real effect. We track the value for backwards
- * compatibility.
- */
- if (ifr->ifr_flags & IFF_ONE_QUEUE)
- tun->flags |= TUN_ONE_QUEUE;
- else
- tun->flags &= ~TUN_ONE_QUEUE;
-
- if (ifr->ifr_flags & IFF_VNET_HDR)
- tun->flags |= TUN_VNET_HDR;
- else
- tun->flags &= ~TUN_VNET_HDR;
-
- if (ifr->ifr_flags & IFF_MULTI_QUEUE)
- tun->flags |= TUN_TAP_MQ;
- else
- tun->flags &= ~TUN_TAP_MQ;
+ tun->flags = (tun->flags & ~TUN_FEATURES) |
+ (ifr->ifr_flags & TUN_FEATURES);
/* Make sure persistent devices do not get stuck in
* xoff state.
@@ -1855,7 +1821,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = tun_attach(tun, file, false);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
- if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
+ if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
ret = -EINVAL;
else
__tun_detach(tfile, false);
@@ -1879,6 +1845,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int sndbuf;
int vnet_hdr_sz;
unsigned int ifindex;
+ int le;
int ret;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
@@ -1890,9 +1857,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (cmd == TUNGETFEATURES) {
/* Currently this just means: "what IFF flags are valid?".
* This is needed because we never checked for invalid flags on
- * TUNSETIFF. */
- return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
- IFF_VNET_HDR | IFF_MULTI_QUEUE,
+ * TUNSETIFF.
+ */
+ return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
(unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE)
return tun_set_queue(file, &ifr);
@@ -1959,12 +1926,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
/* Disable/Enable persist mode. Keep an extra reference to the
* module to prevent the module being unprobed.
*/
- if (arg && !(tun->flags & TUN_PERSIST)) {
- tun->flags |= TUN_PERSIST;
+ if (arg && !(tun->flags & IFF_PERSIST)) {
+ tun->flags |= IFF_PERSIST;
__module_get(THIS_MODULE);
}
- if (!arg && (tun->flags & TUN_PERSIST)) {
- tun->flags &= ~TUN_PERSIST;
+ if (!arg && (tun->flags & IFF_PERSIST)) {
+ tun->flags &= ~IFF_PERSIST;
module_put(THIS_MODULE);
}
@@ -2022,7 +1989,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case TUNSETTXFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
- if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = update_filter(&tun->txflt, (void __user *)arg);
break;
@@ -2078,10 +2045,27 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->vnet_hdr_sz = vnet_hdr_sz;
break;
+ case TUNGETVNETLE:
+ le = !!(tun->flags & TUN_VNET_LE);
+ if (put_user(le, (int __user *)argp))
+ ret = -EFAULT;
+ break;
+
+ case TUNSETVNETLE:
+ if (get_user(le, (int __user *)argp)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (le)
+ tun->flags |= TUN_VNET_LE;
+ else
+ tun->flags &= ~TUN_VNET_LE;
+ break;
+
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
- if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = -EFAULT;
if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
@@ -2093,7 +2077,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case TUNDETACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
- if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = 0;
tun_detach_filter(tun, tun->numqueues);
@@ -2101,7 +2085,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case TUNGETFILTER:
ret = -EINVAL;
- if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = -EFAULT;
if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
@@ -2245,10 +2229,10 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .read = do_sync_read,
- .aio_read = tun_chr_aio_read,
- .write = do_sync_write,
- .aio_write = tun_chr_aio_write,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = tun_chr_read_iter,
+ .write_iter = tun_chr_write_iter,
.poll = tun_chr_poll,
.unlocked_ioctl = tun_chr_ioctl,
#ifdef CONFIG_COMPAT
@@ -2294,10 +2278,10 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
- case TUN_TUN_DEV:
+ case IFF_TUN:
strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
- case TUN_TAP_DEV:
+ case IFF_TAP:
strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 816d511e34d3..bf49792062a2 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -487,8 +487,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
- if (dev->driver_priv)
- kfree(dev->driver_priv);
+ kfree(dev->driver_priv);
}
static const struct ethtool_ops ax88178_ethtool_ops = {
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 2ec1500d0077..415ce8b882c6 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL);
+ page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC);
if (!page)
return -ENOMEM;
@@ -212,7 +212,7 @@ resubmit:
if (page)
put_page(page);
if (req)
- rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
+ rx_submit(pnd, req, GFP_ATOMIC);
}
static int usbpn_close(struct net_device *dev);
@@ -231,7 +231,7 @@ static int usbpn_open(struct net_device *dev)
for (i = 0; i < rxq_size; i++) {
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
- if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
+ if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
usb_free_urb(req);
usbpn_close(dev);
return -ENOMEM;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index d3920b54a92c..9311a08565be 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -71,18 +71,19 @@ static void usbnet_cdc_update_filter(struct usbnet *dev)
{
struct cdc_state *info = (void *) &dev->data;
struct usb_interface *intf = info->control;
+ struct net_device *net = dev->net;
- u16 cdc_filter =
- USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED |
- USB_CDC_PACKET_TYPE_BROADCAST;
+ u16 cdc_filter = USB_CDC_PACKET_TYPE_DIRECTED
+ | USB_CDC_PACKET_TYPE_BROADCAST;
- if (dev->net->flags & IFF_PROMISC)
- cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS;
-
- /* FIXME cdc-ether has some multicast code too, though it complains
- * in routine cases. info->ether describes the multicast support.
- * Implement that here, manipulating the cdc filter as needed.
+ /* filtering on the device is an optional feature and not worth
+ * the hassle so we just roughly care about snooping and if any
+ * multicast is requested, we take every multicast
*/
+ if (net->flags & IFF_PROMISC)
+ cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS;
+ if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI))
+ cdc_filter |= USB_CDC_PACKET_TYPE_ALL_MULTICAST;
usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 5ee7a1dbc023..96fc8a5bde84 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -402,7 +402,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
/* map MBIM session to VLAN */
if (tci)
- vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
err:
return skb;
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index babda7d8693e..9c5aa922a9f4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2746,8 +2746,7 @@ exit:
tty_unregister_device(tty_drv, serial->minor);
kfree(serial);
}
- if (hso_dev)
- kfree(hso_dev);
+ kfree(hso_dev);
return NULL;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c6554c7a8147..2d1c77e81836 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,6 +24,7 @@
#include <net/ip6_checksum.h>
#include <uapi/linux/mdio.h>
#include <linux/mdio.h>
+#include <linux/usb/cdc.h>
/* Version Information */
#define DRIVER_VERSION "v1.07.0 (2014/10/09)"
@@ -461,18 +462,11 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
-#define PRODUCT_ID_RTL8152 0x8152
-#define PRODUCT_ID_RTL8153 0x8153
-
#define VENDOR_ID_SAMSUNG 0x04e8
-#define PRODUCT_ID_SAMSUNG 0xa101
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
-#define REALTEK_USB_DEVICE(vend, prod) \
- USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
-
struct tally_counter {
__le64 tx_packets;
__le64 rx_packets;
@@ -486,7 +480,7 @@ struct tally_counter {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
};
struct rx_desc {
@@ -690,6 +684,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
}
}
+ if (ret == -ENODEV)
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+
return ret;
}
@@ -757,6 +754,9 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
}
error1:
+ if (ret == -ENODEV)
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+
return ret;
}
@@ -1030,7 +1030,6 @@ static void read_bulk_callback(struct urb *urb)
int status = urb->status;
struct rx_agg *agg;
struct r8152 *tp;
- int result;
agg = urb->context;
if (!agg)
@@ -1081,15 +1080,7 @@ static void read_bulk_callback(struct urb *urb)
break;
}
- result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
- if (result == -ENODEV) {
- netif_device_detach(tp->netdev);
- } else if (result) {
- spin_lock(&tp->rx_lock);
- list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock(&tp->rx_lock);
- tasklet_schedule(&tp->tl);
- }
+ r8152_submit_rx(tp, agg, GFP_ATOMIC);
}
static void write_bulk_callback(struct urb *urb)
@@ -1190,11 +1181,13 @@ static void intr_callback(struct urb *urb)
resubmit:
res = usb_submit_urb(urb, GFP_ATOMIC);
- if (res == -ENODEV)
+ if (res == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
- else if (res)
+ } else if (res) {
netif_err(tp, intr, tp->netdev,
"can't resubmit intr, status %d\n", res);
+ }
}
static inline void *rx_agg_align(void *data)
@@ -1250,7 +1243,6 @@ static int alloc_all_mem(struct r8152 *tp)
spin_lock_init(&tp->rx_lock);
spin_lock_init(&tp->tx_lock);
- INIT_LIST_HEAD(&tp->rx_done);
INIT_LIST_HEAD(&tp->tx_free);
skb_queue_head_init(&tp->tx_queue);
@@ -1676,7 +1668,6 @@ static void rx_bottom(struct r8152 *tp)
int len_used = 0;
struct urb *urb;
u8 *rx_data;
- int ret;
list_del_init(cursor);
@@ -1729,13 +1720,7 @@ find_next_rx:
}
submit:
- ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
- if (ret && ret != -ENODEV) {
- spin_lock_irqsave(&tp->rx_lock, flags);
- list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
- tasklet_schedule(&tp->tl);
- }
+ r8152_submit_rx(tp, agg, GFP_ATOMIC);
}
}
@@ -1758,6 +1743,7 @@ static void tx_bottom(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
if (res == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(netdev);
} else {
struct net_device_stats *stats = &netdev->stats;
@@ -1792,6 +1778,8 @@ static void bottom_half(unsigned long data)
if (!netif_carrier_ok(tp->netdev))
return;
+ clear_bit(SCHEDULE_TASKLET, &tp->flags);
+
rx_bottom(tp);
tx_bottom(tp);
}
@@ -1799,11 +1787,28 @@ static void bottom_half(unsigned long data)
static
int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
{
+ int ret;
+
usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
agg->head, agg_buf_sz,
(usb_complete_t)read_bulk_callback, agg);
- return usb_submit_urb(agg->urb, mem_flags);
+ ret = usb_submit_urb(agg->urb, mem_flags);
+ if (ret == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+ netif_device_detach(tp->netdev);
+ } else if (ret) {
+ struct urb *urb = agg->urb;
+ unsigned long flags;
+
+ urb->actual_length = 0;
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ tasklet_schedule(&tp->tl);
+ }
+
+ return ret;
}
static void rtl_drop_queued_tx(struct r8152 *tp)
@@ -1994,6 +1999,25 @@ static int rtl_start_rx(struct r8152 *tp)
break;
}
+ if (ret && ++i < RTL8152_MAX_RX) {
+ struct list_head rx_queue;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&rx_queue);
+
+ do {
+ struct rx_agg *agg = &tp->rx_info[i++];
+ struct urb *urb = agg->urb;
+
+ urb->actual_length = 0;
+ list_add_tail(&agg->list, &rx_queue);
+ } while (i < RTL8152_MAX_RX);
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_splice_tail(&rx_queue, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ }
+
return ret;
}
@@ -2850,15 +2874,18 @@ static void rtl_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, schedule.work);
+ /* If the device is unplugged or !netif_running(), the workqueue
+ * doesn't need to wake the device, and could return directly.
+ */
+ if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
+ return;
+
if (usb_autopm_get_interface(tp->intf) < 0)
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
goto out1;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
- goto out1;
-
if (!mutex_trylock(&tp->control)) {
schedule_delayed_work(&tp->schedule, 0);
goto out1;
@@ -2933,6 +2960,8 @@ static int rtl8152_open(struct net_device *netdev)
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
free_all_mem(tp);
+ } else {
+ tasklet_enable(&tp->tl);
}
mutex_unlock(&tp->control);
@@ -2948,6 +2977,7 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ tasklet_disable(&tp->tl);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
@@ -2965,9 +2995,7 @@ static int rtl8152_close(struct net_device *netdev)
*/
rtl_runtime_suspend_enable(tp, false);
- tasklet_disable(&tp->tl);
tp->rtl_ops.down(tp);
- tasklet_enable(&tp->tl);
mutex_unlock(&tp->control);
@@ -3427,7 +3455,7 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
data[9] = le64_to_cpu(tally.rx_broadcast);
data[10] = le32_to_cpu(tally.rx_multicast);
data[11] = le16_to_cpu(tally.tx_aborted);
- data[12] = le16_to_cpu(tally.tx_underun);
+ data[12] = le16_to_cpu(tally.tx_underrun);
}
static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -3565,11 +3593,33 @@ out:
return ret;
}
+static int rtl8152_nway_reset(struct net_device *dev)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ mutex_lock(&tp->control);
+
+ ret = mii_nway_restart(&tp->mii);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
+}
+
static struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
+ .nway_reset = rtl8152_nway_reset,
.get_msglevel = rtl8152_get_msglevel,
.set_msglevel = rtl8152_set_msglevel,
.get_wol = rtl8152_get_wol,
@@ -3709,66 +3759,43 @@ static void rtl8153_unload(struct r8152 *tp)
r8153_power_cut_en(tp, false);
}
-static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
+static int rtl_ops_init(struct r8152 *tp)
{
struct rtl_ops *ops = &tp->rtl_ops;
- int ret = -ENODEV;
-
- switch (id->idVendor) {
- case VENDOR_ID_REALTEK:
- switch (id->idProduct) {
- case PRODUCT_ID_RTL8152:
- ops->init = r8152b_init;
- ops->enable = rtl8152_enable;
- ops->disable = rtl8152_disable;
- ops->up = rtl8152_up;
- ops->down = rtl8152_down;
- ops->unload = rtl8152_unload;
- ops->eee_get = r8152_get_eee;
- ops->eee_set = r8152_set_eee;
- ret = 0;
- break;
- case PRODUCT_ID_RTL8153:
- ops->init = r8153_init;
- ops->enable = rtl8153_enable;
- ops->disable = rtl8153_disable;
- ops->up = rtl8153_up;
- ops->down = rtl8153_down;
- ops->unload = rtl8153_unload;
- ops->eee_get = r8153_get_eee;
- ops->eee_set = r8153_set_eee;
- ret = 0;
- break;
- default:
- break;
- }
+ int ret = 0;
+
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ ops->init = r8152b_init;
+ ops->enable = rtl8152_enable;
+ ops->disable = rtl8152_disable;
+ ops->up = rtl8152_up;
+ ops->down = rtl8152_down;
+ ops->unload = rtl8152_unload;
+ ops->eee_get = r8152_get_eee;
+ ops->eee_set = r8152_set_eee;
break;
- case VENDOR_ID_SAMSUNG:
- switch (id->idProduct) {
- case PRODUCT_ID_SAMSUNG:
- ops->init = r8153_init;
- ops->enable = rtl8153_enable;
- ops->disable = rtl8153_disable;
- ops->up = rtl8153_up;
- ops->down = rtl8153_down;
- ops->unload = rtl8153_unload;
- ops->eee_get = r8153_get_eee;
- ops->eee_set = r8153_set_eee;
- ret = 0;
- break;
- default:
- break;
- }
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ ops->init = r8153_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8153_disable;
+ ops->up = rtl8153_up;
+ ops->down = rtl8153_down;
+ ops->unload = rtl8153_unload;
+ ops->eee_get = r8153_get_eee;
+ ops->eee_set = r8153_set_eee;
break;
default:
+ ret = -ENODEV;
+ netif_err(tp, probe, tp->netdev, "Unknown Device\n");
break;
}
- if (ret)
- netif_err(tp, probe, tp->netdev, "Unknown Device\n");
-
return ret;
}
@@ -3800,7 +3827,8 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->netdev = netdev;
tp->intf = intf;
- ret = rtl_ops_init(tp, id);
+ r8152b_get_version(tp);
+ ret = rtl_ops_init(tp);
if (ret)
goto out;
@@ -3833,11 +3861,9 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.phy_id_mask = 0x3f;
tp->mii.reg_num_mask = 0x1f;
tp->mii.phy_id = R8152_PHY_ID;
- tp->mii.supports_gmii = 0;
intf->needs_remote_wakeup = 1;
- r8152b_get_version(tp);
tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
@@ -3855,12 +3881,15 @@ static int rtl8152_probe(struct usb_interface *intf,
else
device_set_wakeup_enable(&udev->dev, false);
+ tasklet_disable(&tp->tl);
+
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
out1:
usb_set_intfdata(intf, NULL);
+ tasklet_kill(&tp->tl);
out:
free_netdev(netdev);
return ret;
@@ -3884,11 +3913,27 @@ static void rtl8152_disconnect(struct usb_interface *intf)
}
}
+#define REALTEK_USB_DEVICE(vend, prod) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_CLASS, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC \
+}, \
+{ \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \
+ USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
- {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
- {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
- {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{}
};
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 6e87e5710048..d37b7dce2d40 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -753,14 +753,13 @@ static int rtl8150_open(struct net_device *netdev)
static int rtl8150_close(struct net_device *netdev)
{
rtl8150_t *dev = netdev_priv(netdev);
- int res = 0;
netif_stop_queue(netdev);
if (!test_bit(RTL8150_UNPLUG, &dev->flags))
disable_net_traffic(dev);
unlink_all_urbs(dev);
- return res;
+ return 0;
}
static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d07bf4cb893f..26423adc35ee 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1670,12 +1670,14 @@ done:
static int smsc95xx_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- u8 suspend_flags = pdata->suspend_flags;
+ struct smsc95xx_priv *pdata;
+ u8 suspend_flags;
int ret;
u32 val;
BUG_ON(!dev);
+ pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ suspend_flags = pdata->suspend_flags;
netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0bc8ead47de..b8bd7191572d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -123,6 +123,9 @@ struct virtnet_info {
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
+ /* Packet virtio header size */
+ u8 hdr_len;
+
/* Active statistics */
struct virtnet_stats __percpu *stats;
@@ -139,21 +142,14 @@ struct virtnet_info {
struct notifier_block nb;
};
-struct skb_vnet_hdr {
- union {
- struct virtio_net_hdr hdr;
- struct virtio_net_hdr_mrg_rxbuf mhdr;
- };
-};
-
struct padded_vnet_hdr {
- struct virtio_net_hdr hdr;
+ struct virtio_net_hdr_mrg_rxbuf hdr;
/*
- * virtio_net_hdr should be in a separated sg buffer because of a
- * QEMU bug, and data sg buffer shares same page with this header sg.
- * This padding makes next sg 16 byte aligned after virtio_net_hdr.
+ * hdr is in a separate sg buffer, and data sg buffer shares same page
+ * with this header sg. This padding makes next sg 16 byte aligned
+ * after the header.
*/
- char padding[6];
+ char padding[4];
};
/* Converting between virtqueue no. and kernel tx/rx queue no.
@@ -179,9 +175,9 @@ static int rxq2vq(int rxq)
return rxq * 2;
}
-static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
+static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
{
- return (struct skb_vnet_hdr *)skb->cb;
+ return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
}
/*
@@ -241,13 +237,13 @@ static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
}
/* Called from bottom half context */
-static struct sk_buff *page_to_skb(struct receive_queue *rq,
+static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+ struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
- struct skb_vnet_hdr *hdr;
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
unsigned int copy, hdr_len, hdr_padded_len;
char *p;
@@ -260,13 +256,11 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
hdr = skb_vnet_hdr(skb);
- if (vi->mergeable_rx_bufs) {
- hdr_len = sizeof hdr->mhdr;
- hdr_padded_len = sizeof hdr->mhdr;
- } else {
- hdr_len = sizeof hdr->hdr;
+ hdr_len = vi->hdr_len;
+ if (vi->mergeable_rx_bufs)
+ hdr_padded_len = sizeof *hdr;
+ else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- }
memcpy(hdr, p, hdr_len);
@@ -317,23 +311,24 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb;
}
-static struct sk_buff *receive_small(void *buf, unsigned int len)
+static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
{
struct sk_buff * skb = buf;
- len -= sizeof(struct virtio_net_hdr);
+ len -= vi->hdr_len;
skb_trim(skb, len);
return skb;
}
static struct sk_buff *receive_big(struct net_device *dev,
+ struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
unsigned int len)
{
struct page *page = buf;
- struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
+ struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
if (unlikely(!skb))
goto err;
@@ -347,18 +342,20 @@ err:
}
static struct sk_buff *receive_mergeable(struct net_device *dev,
+ struct virtnet_info *vi,
struct receive_queue *rq,
unsigned long ctx,
unsigned int len)
{
void *buf = mergeable_ctx_to_buf_address(ctx);
- struct skb_vnet_hdr *hdr = buf;
- int num_buf = hdr->mhdr.num_buffers;
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+ u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
- struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
+ struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len,
+ truesize);
struct sk_buff *curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -369,7 +366,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
if (unlikely(!ctx)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
- dev->name, num_buf, hdr->mhdr.num_buffers);
+ dev->name, num_buf,
+ virtio16_to_cpu(vi->vdev,
+ hdr->num_buffers));
dev->stats.rx_length_errors++;
goto err_buf;
}
@@ -430,15 +429,15 @@ err_buf:
return NULL;
}
-static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, unsigned int len)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
struct net_device *dev = vi->dev;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
- struct skb_vnet_hdr *hdr;
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
- if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
+ if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
if (vi->mergeable_rx_bufs) {
@@ -454,11 +453,11 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
}
if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, rq, (unsigned long)buf, len);
+ skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
else if (vi->big_packets)
- skb = receive_big(dev, rq, buf, len);
+ skb = receive_big(dev, vi, rq, buf, len);
else
- skb = receive_small(buf, len);
+ skb = receive_small(vi, buf, len);
if (unlikely(!skb))
return;
@@ -473,8 +472,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
if (!skb_partial_csum_set(skb,
- hdr->hdr.csum_start,
- hdr->hdr.csum_offset))
+ virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start),
+ virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset)))
goto frame_err;
} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -514,7 +513,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
- skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
+ skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev,
+ hdr->hdr.gso_size);
if (skb_shinfo(skb)->gso_size == 0) {
net_warn_ratelimited("%s: zero gso size.\n", dev->name);
goto frame_err;
@@ -535,11 +535,11 @@ frame_err:
dev_kfree_skb(skb);
}
-static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
+static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
+ gfp_t gfp)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
- struct skb_vnet_hdr *hdr;
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
int err;
skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
@@ -550,7 +550,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
hdr = skb_vnet_hdr(skb);
sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
- sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(rq->sg, hdr, vi->hdr_len);
skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
@@ -560,7 +560,8 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
return err;
}
-static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
+static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
+ gfp_t gfp)
{
struct page *first, *list = NULL;
char *p;
@@ -591,8 +592,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
p = page_address(first);
/* rq->sg[0], rq->sg[1] share the same page */
- /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
- sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
+ /* a separated rq->sg[0] for header - required in case !any_header_sg */
+ sg_set_buf(&rq->sg[0], p, vi->hdr_len);
/* rq->sg[1] for data packet, from offset */
offset = sizeof(struct padded_vnet_hdr);
@@ -660,9 +661,9 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
* before we're receiving packets, or from refill_work which is
* careful to disable receiving (using napi_disable).
*/
-static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
+static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
+ gfp_t gfp)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
int err;
bool oom;
@@ -671,9 +672,9 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(rq, gfp);
else if (vi->big_packets)
- err = add_recvbuf_big(rq, gfp);
+ err = add_recvbuf_big(vi, rq, gfp);
else
- err = add_recvbuf_small(rq, gfp);
+ err = add_recvbuf_small(vi, rq, gfp);
oom = err == -ENOMEM;
if (err)
@@ -722,7 +723,7 @@ static void refill_work(struct work_struct *work)
struct receive_queue *rq = &vi->rq[i];
napi_disable(&rq->napi);
- still_empty = !try_fill_recv(rq, GFP_KERNEL);
+ still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
virtnet_napi_enable(rq);
/* In theory, this can happen: if we don't get any buffers in
@@ -741,12 +742,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- receive_buf(rq, buf, len);
+ receive_buf(vi, rq, buf, len);
received++;
}
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
- if (!try_fill_recv(rq, GFP_ATOMIC))
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
@@ -822,7 +823,7 @@ static int virtnet_open(struct net_device *dev)
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(&vi->rq[i]);
}
@@ -851,18 +852,14 @@ static void free_old_xmit_skbs(struct send_queue *sq)
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
- struct skb_vnet_hdr *hdr;
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned num_sg;
- unsigned hdr_len;
+ unsigned hdr_len = vi->hdr_len;
bool can_push;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
- if (vi->mergeable_rx_bufs)
- hdr_len = sizeof hdr->mhdr;
- else
- hdr_len = sizeof hdr->hdr;
can_push = vi->any_header_sg &&
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
@@ -870,22 +867,25 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
/* Even if we can, don't push here yet as this would skew
* csum_start offset below. */
if (can_push)
- hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
+ hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
else
hdr = skb_vnet_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->hdr.csum_start = skb_checksum_start_offset(skb);
- hdr->hdr.csum_offset = skb->csum_offset;
+ hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev,
+ skb_checksum_start_offset(skb));
+ hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev,
+ skb->csum_offset);
} else {
hdr->hdr.flags = 0;
hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
}
if (skb_is_gso(skb)) {
- hdr->hdr.hdr_len = skb_headlen(skb);
- hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
+ hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb));
+ hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev,
+ skb_shinfo(skb)->gso_size);
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -900,7 +900,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
}
if (vi->mergeable_rx_bufs)
- hdr->mhdr.num_buffers = 0;
+ hdr->num_buffers = 0;
sg_init_table(sq->sg, MAX_SKB_FRAGS + 2);
if (can_push) {
@@ -1030,7 +1030,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
"Failed to set mac address by vq command.\n");
return -EINVAL;
}
- } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
+ } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
+ !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
unsigned int i;
/* Naturally, this has an atomicity problem. */
@@ -1112,7 +1113,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
- s.virtqueue_pairs = queue_pairs;
+ s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
sg_init_one(&sg, &s, sizeof(s));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
@@ -1189,7 +1190,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_table(sg, 2);
/* Store the unicast list and count in the front of the buffer */
- mac_data->entries = uc_count;
+ mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
i = 0;
netdev_for_each_uc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
@@ -1200,7 +1201,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
/* multicast list and count fill the end */
mac_data = (void *)&mac_data->macs[uc_count][0];
- mac_data->entries = mc_count;
+ mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
@@ -1805,18 +1806,20 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ vi->hdr_len = sizeof(struct virtio_net_hdr);
+
if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
vi->any_header_sg = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
- if (vi->any_header_sg) {
- if (vi->mergeable_rx_bufs)
- dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
- else
- dev->needed_headroom = sizeof(struct virtio_net_hdr);
- }
+ if (vi->any_header_sg)
+ dev->needed_headroom = vi->hdr_len;
/* Use single tx/rx queue pair as default */
vi->curr_queue_pairs = 1;
@@ -1844,7 +1847,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Last of all, set up some receive buffers. */
for (i = 0; i < vi->curr_queue_pairs; i++) {
- try_fill_recv(&vi->rq[i], GFP_KERNEL);
+ try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
if (vi->rq[i].vq->num_free ==
@@ -1964,7 +1967,7 @@ static int virtnet_restore(struct virtio_device *vdev)
if (netif_running(vi->dev)) {
for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
for (i = 0; i < vi->max_queue_pairs; i++)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6dfcbf523936..afd295348ddb 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2199,13 +2199,6 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
if (adapter->rss) {
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
- 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
- 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
- 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
- 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
- 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
- };
devRead->misc.uptFeatures |= UPT1_F_RSS;
devRead->misc.numRxQueues = adapter->num_rx_queues;
@@ -2216,7 +2209,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
- memcpy(rssConf->hashKey, rss_key, sizeof(rss_key));
+ netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = ethtool_rxfh_indir_default(
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b725fd9e7803..b7b53329d575 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -583,12 +583,16 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev)
}
static int
-vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key)
+vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
unsigned int n = rssConf->indTableSize;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!p)
+ return 0;
while (n--)
p[n] = rssConf->indTable[n];
return 0;
@@ -596,13 +600,20 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key)
}
static int
-vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key)
+vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
+ const u8 hfunc)
{
unsigned int i;
unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!p)
+ return 0;
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = p[i];
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index be4649a49c5e..49d9f2291998 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -849,7 +849,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
/* Add static entry (via netlink) */
static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 flags)
+ const unsigned char *addr, u16 vid, u16 flags)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
/* struct net *net = dev_net(vxlan->dev); */
@@ -885,7 +885,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* Delete entry (via netlink) */
static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr)
+ const unsigned char *addr, u16 vid)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
@@ -1593,14 +1593,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
if (unlikely(err))
return err;
- if (vlan_tx_tag_present(skb)) {
- if (WARN_ON(!__vlan_put_tag(skb,
- skb->vlan_proto,
- vlan_tx_tag_get(skb))))
- return -ENOMEM;
-
- skb->vlan_tci = 0;
- }
+ skb = vlan_hwaccel_push_inside(skb);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1637,14 +1632,9 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
if (unlikely(err))
return err;
- if (vlan_tx_tag_present(skb)) {
- if (WARN_ON(!__vlan_put_tag(skb,
- skb->vlan_proto,
- vlan_tx_tag_get(skb))))
- return -ENOMEM;
-
- skb->vlan_tci = 0;
- }
+ skb = vlan_hwaccel_push_inside(skb);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1995,9 +1985,8 @@ static int vxlan_init(struct net_device *dev)
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
vxlan->dst_port);
- if (vs) {
+ if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
/* If we have a socket with same port already, reuse it */
- atomic_inc(&vs->refcnt);
vxlan_vs_add_dev(vs, vxlan);
} else {
/* otherwise make new socket outside of RTNL */
@@ -2236,6 +2225,9 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_PORT] = { .type = NLA_U16 },
+ [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
+ [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -2396,12 +2388,9 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
- if (vs) {
- if (vs->rcv == rcv)
- atomic_inc(&vs->refcnt);
- else
+ if (vs && ((vs->rcv != rcv) ||
+ !atomic_add_unless(&vs->refcnt, 1, 0)))
vs = ERR_PTR(-EBUSY);
- }
spin_unlock(&vn->sock_lock);
if (!vs)
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 86907e5ba6ca..ccba4fea7269 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -135,6 +135,11 @@ struct ath_ops {
struct ath_common;
struct ath_bus_ops;
+struct ath_ps_ops {
+ void (*wakeup)(struct ath_common *common);
+ void (*restore)(struct ath_common *common);
+};
+
struct ath_common {
void *ah;
void *priv;
@@ -148,7 +153,7 @@ struct ath_common {
u16 cachelsz;
u16 curaid;
u8 macaddr[ETH_ALEN];
- u8 curbssid[ETH_ALEN];
+ u8 curbssid[ETH_ALEN] __aligned(2);
u8 bssidmask[ETH_ALEN];
u32 rx_bufsize;
@@ -169,6 +174,7 @@ struct ath_common {
struct ath_regulatory reg_world_copy;
const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops;
+ const struct ath_ps_ops *ps_ops;
bool btcoex_enabled;
bool disable_ani;
@@ -178,6 +184,11 @@ struct ath_common {
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
};
+static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
+{
+ return common->ps_ops;
+}
+
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
u32 len,
gfp_t gfp_mask);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 101cadb6e4ba..a156e6e48708 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -443,12 +443,12 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -558,6 +558,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
/* sanity */
dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -576,11 +577,11 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp)
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
@@ -817,7 +818,10 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id;
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ /* Skip the last copy engine, CE7 the diagnostic window, as that
+ * uses polling and isn't initialized for interrupts.
+ */
+ for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
}
@@ -832,8 +836,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->src_nentries);
- memset(src_ring->per_transfer_context, 0,
- nentries * sizeof(*src_ring->per_transfer_context));
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@@ -869,8 +873,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->dest_nentries);
- memset(dest_ring->per_transfer_context, 0,
- nentries * sizeof(*dest_ring->per_transfer_context));
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
@@ -1020,37 +1024,10 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
* initialized by software/firmware.
*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
- const struct ce_attr *attr,
- void (*send_cb)(struct ath10k_ce_pipe *),
- void (*recv_cb)(struct ath10k_ce_pipe *))
+ const struct ce_attr *attr)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
- /*
- * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
- * additional TX locking checks.
- *
- * For the lack of a better place do the check here.
- */
- BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-
- spin_lock_bh(&ar_pci->ce_lock);
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
- if (attr->src_nentries)
- ce_state->send_cb = send_cb;
- if (attr->dest_nentries)
- ce_state->recv_cb = recv_cb;
- spin_unlock_bh(&ar_pci->ce_lock);
-
if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
@@ -1098,12 +1075,37 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
- const struct ce_attr *attr)
+ const struct ce_attr *attr,
+ void (*send_cb)(struct ath10k_ce_pipe *),
+ void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
+ /*
+ * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+
+ if (attr->src_nentries)
+ ce_state->send_cb = send_cb;
+
+ if (attr->dest_nentries)
+ ce_state->recv_cb = recv_cb;
+
if (attr->src_nentries) {
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 329b7340fa72..617a151e8ce4 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -192,15 +192,21 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp,
unsigned int *transfer_idp);
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
/*==================CE Engine Initialization=======================*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
- const struct ce_attr *attr,
- void (*send_cb)(struct ath10k_ce_pipe *),
- void (*recv_cb)(struct ath10k_ce_pipe *));
+ const struct ce_attr *attr);
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
- const struct ce_attr *attr);
+ const struct ce_attr *attr,
+ void (*send_cb)(struct ath10k_ce_pipe *),
+ void (*recv_cb)(struct ath10k_ce_pipe *));
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
@@ -213,6 +219,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp);
+
/*
* Support clean shutdown by allowing the caller to cancel
* pending sends. Target DMA must be stopped before using
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index cee18c89d7f2..7762061a1944 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -31,12 +31,17 @@
unsigned int ath10k_debug_mask;
static bool uart_print;
static unsigned int ath10k_p2p;
+static bool skip_otp;
+
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param(uart_print, bool, 0644);
module_param_named(p2p, ath10k_p2p, uint, 0644);
+module_param(skip_otp, bool, 0644);
+
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
+MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@@ -138,7 +143,8 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
return fw;
}
-static int ath10k_push_board_ext_data(struct ath10k *ar)
+static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
+ size_t data_len)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
@@ -159,14 +165,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
if (board_ext_data_addr == 0)
return 0;
- if (ar->board_len != (board_data_size + board_ext_data_size)) {
+ if (data_len != (board_data_size + board_ext_data_size)) {
ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
- ar->board_len, board_data_size, board_ext_data_size);
+ data_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
- ar->board_data + board_data_size,
+ data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err(ar, "could not write board ext data (%d)\n", ret);
@@ -184,13 +190,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
return 0;
}
-static int ath10k_download_board_data(struct ath10k *ar)
+static int ath10k_download_board_data(struct ath10k *ar, const void *data,
+ size_t data_len)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
int ret;
- ret = ath10k_push_board_ext_data(ar);
+ ret = ath10k_push_board_ext_data(ar, data, data_len);
if (ret) {
ath10k_err(ar, "could not push board ext data (%d)\n", ret);
goto exit;
@@ -202,9 +209,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
goto exit;
}
- ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
+ ret = ath10k_bmi_write_memory(ar, address, data,
min_t(u32, board_data_size,
- ar->board_len));
+ data_len));
if (ret) {
ath10k_err(ar, "could not write board data (%d)\n", ret);
goto exit;
@@ -220,11 +227,39 @@ exit:
return ret;
}
+static int ath10k_download_cal_file(struct ath10k *ar)
+{
+ int ret;
+
+ if (!ar->cal_file)
+ return -ENOENT;
+
+ if (IS_ERR(ar->cal_file))
+ return PTR_ERR(ar->cal_file);
+
+ ret = ath10k_download_board_data(ar, ar->cal_file->data,
+ ar->cal_file->size);
+ if (ret) {
+ ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
+
+ return 0;
+}
+
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
int ret;
+ ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+ if (ret) {
+ ath10k_err(ar, "failed to download board data: %d\n", ret);
+ return ret;
+ }
+
/* OTP is optional */
if (!ar->otp_data || !ar->otp_len) {
@@ -250,7 +285,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
- if (result != 0) {
+ if (!skip_otp && result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
@@ -308,6 +343,9 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (ar->firmware && !IS_ERR(ar->firmware))
release_firmware(ar->firmware);
+ if (ar->cal_file && !IS_ERR(ar->cal_file))
+ release_firmware(ar->cal_file);
+
ar->board = NULL;
ar->board_data = NULL;
ar->board_len = 0;
@@ -319,6 +357,27 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
ar->firmware = NULL;
ar->firmware_data = NULL;
ar->firmware_len = 0;
+
+ ar->cal_file = NULL;
+}
+
+static int ath10k_fetch_cal_file(struct ath10k *ar)
+{
+ char filename[100];
+
+ /* cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (IS_ERR(ar->cal_file))
+ /* calibration file is optional, don't print any warnings */
+ return PTR_ERR(ar->cal_file);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
+ ATH10K_FW_DIR, filename);
+
+ return 0;
}
static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
@@ -562,6 +621,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
+ /* calibration file is optional, don't check for any errors */
+ ath10k_fetch_cal_file(ar);
+
ar->fw_api = 3;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
@@ -589,30 +651,32 @@ success:
return 0;
}
-static int ath10k_init_download_firmware(struct ath10k *ar,
- enum ath10k_firmware_mode mode)
+static int ath10k_download_cal_data(struct ath10k *ar)
{
int ret;
- ret = ath10k_download_board_data(ar);
- if (ret) {
- ath10k_err(ar, "failed to download board data: %d\n", ret);
- return ret;
+ ret = ath10k_download_cal_file(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_FILE;
+ goto done;
}
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a calibration file, try OTP next: %d\n",
+ ret);
+
ret = ath10k_download_and_run_otp(ar);
if (ret) {
ath10k_err(ar, "failed to run otp: %d\n", ret);
return ret;
}
- ret = ath10k_download_fw(ar, mode);
- if (ret) {
- ath10k_err(ar, "failed to download firmware: %d\n", ret);
- return ret;
- }
+ ar->cal_mode = ATH10K_CAL_MODE_OTP;
- return ret;
+done:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+ return 0;
}
static int ath10k_init_uart(struct ath10k *ar)
@@ -685,6 +749,25 @@ static void ath10k_core_restart(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+ /* Place a barrier to make sure the compiler doesn't reorder
+ * CRASH_FLUSH and calling other functions.
+ */
+ barrier();
+
+ ieee80211_stop_queues(ar->hw);
+ ath10k_drain_tx(ar);
+ complete_all(&ar->scan.started);
+ complete_all(&ar->scan.completed);
+ complete_all(&ar->scan.on_channel);
+ complete_all(&ar->offchan_tx_completed);
+ complete_all(&ar->install_key_done);
+ complete_all(&ar->vdev_setup_done);
+ wake_up(&ar->htt.empty_tx_wq);
+ wake_up(&ar->wmi.tx_credits_wq);
+ wake_up(&ar->peer_mapping_wq);
+
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
@@ -716,12 +799,25 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_core_init_max_sta_count(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ar->max_num_peers = TARGET_10X_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ } else {
+ ar->max_num_peers = TARGET_NUM_PEERS;
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ }
+}
+
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
{
int status;
lockdep_assert_held(&ar->conf_mutex);
+ clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
@@ -729,7 +825,11 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err;
}
- status = ath10k_init_download_firmware(ar, mode);
+ status = ath10k_download_cal_data(ar);
+ if (status)
+ goto err;
+
+ status = ath10k_download_fw(ar, mode);
if (status)
goto err;
@@ -846,9 +946,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err_hif_stop;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
+ ar->free_vdev_map = (1LL << TARGET_10X_NUM_VDEVS) - 1;
else
- ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+ ar->free_vdev_map = (1LL << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
@@ -946,6 +1046,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
+ ath10k_core_init_max_sta_count(ar);
+
mutex_lock(&ar->conf_mutex);
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -1084,6 +1186,7 @@ void ath10k_core_unregister(struct ath10k *ar)
EXPORT_SYMBOL(ath10k_core_unregister);
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
@@ -1100,6 +1203,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->dev = dev;
ar->hif.ops = hif_ops;
+ ar->hif.bus = bus;
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
@@ -1120,6 +1224,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
+ init_waitqueue_head(&ar->htt.empty_tx_wq);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index fe531ea6926c..514c219263a5 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -63,12 +63,28 @@
struct ath10k;
+enum ath10k_bus {
+ ATH10K_BUS_PCI,
+};
+
+static inline const char *ath10k_bus_str(enum ath10k_bus bus)
+{
+ switch (bus) {
+ case ATH10K_BUS_PCI:
+ return "pci";
+ }
+
+ return "unknown";
+}
+
struct ath10k_skb_cb {
dma_addr_t paddr;
+ u8 eid;
u8 vdev_id;
struct {
u8 tid;
+ u16 freq;
bool is_offchan;
struct ath10k_htt_txbuf *txbuf;
u32 txbuf_paddr;
@@ -96,8 +112,6 @@ struct ath10k_bmi {
bool done_sent;
};
-#define ATH10K_MAX_MEM_REQS 16
-
struct ath10k_mem_chunk {
void *vaddr;
dma_addr_t paddr;
@@ -110,22 +124,27 @@ struct ath10k_wmi {
struct completion service_ready;
struct completion unified_ready;
wait_queue_head_t tx_credits_wq;
+ DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
u32 num_mem_chunks;
- struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
+ struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
};
-struct ath10k_peer_stat {
+struct ath10k_fw_stats_peer {
+ struct list_head list;
+
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
};
-struct ath10k_target_stats {
+struct ath10k_fw_stats_pdev {
+ struct list_head list;
+
/* PDEV stats */
s32 ch_noise_floor;
u32 tx_frame_count;
@@ -180,15 +199,11 @@ struct ath10k_target_stats {
s32 phy_errs;
s32 phy_err_drop;
s32 mpdu_errs;
+};
- /* VDEV STATS */
-
- /* PEER STATS */
- u8 peers;
- struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
-
- /* TODO: Beacon filter stats */
-
+struct ath10k_fw_stats {
+ struct list_head pdevs;
+ struct list_head peers;
};
struct ath10k_dfs_stats {
@@ -206,6 +221,8 @@ struct ath10k_peer {
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+
+ /* protected by ar->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
@@ -234,6 +251,8 @@ struct ath10k_vif {
struct sk_buff *beacon;
/* protected by data_lock */
bool beacon_sent;
+ void *beacon_buf;
+ dma_addr_t beacon_paddr;
struct ath10k *ar;
struct ieee80211_vif *vif;
@@ -273,6 +292,7 @@ struct ath10k_vif {
u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
+ int txpower;
};
struct ath10k_vif_iter {
@@ -292,17 +312,19 @@ struct ath10k_fw_crash_data {
struct ath10k_debug {
struct dentry *debugfs_phy;
- struct ath10k_target_stats target_stats;
- DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_MAX);
-
- struct completion event_stats_compl;
+ struct ath10k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
struct ath10k_dfs_stats dfs_stats;
struct ath_dfs_pool_stats dfs_pool_stats;
+ /* protected by conf_mutex */
u32 fw_dbglog_mask;
+ u32 pktlog_filter;
+ u32 reg_addr;
u8 htt_max_amsdu;
u8 htt_max_ampdu;
@@ -321,7 +343,7 @@ enum ath10k_state {
* stopped in ath10k_core_restart() work holding conf_mutex. The state
* RESTARTED means that the device is up and mac80211 has started hw
* reconfiguration. Once mac80211 is done with the reconfiguration we
- * set the state to STATE_ON in restart_complete(). */
+ * set the state to STATE_ON in reconfig_complete(). */
ATH10K_STATE_RESTARTING,
ATH10K_STATE_RESTARTED,
@@ -369,8 +391,30 @@ enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING,
ATH10K_FLAG_CORE_REGISTERED,
+
+ /* Device has crashed and needs to restart. This indicates any pending
+ * waiters should immediately cancel instead of waiting for a time out.
+ */
+ ATH10K_FLAG_CRASH_FLUSH,
};
+enum ath10k_cal_mode {
+ ATH10K_CAL_MODE_FILE,
+ ATH10K_CAL_MODE_OTP,
+};
+
+static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
+{
+ switch (mode) {
+ case ATH10K_CAL_MODE_FILE:
+ return "file";
+ case ATH10K_CAL_MODE_OTP:
+ return "otp";
+ }
+
+ return "unknown";
+}
+
enum ath10k_scan_state {
ATH10K_SCAN_IDLE,
ATH10K_SCAN_STARTING,
@@ -421,6 +465,7 @@ struct ath10k {
bool p2p;
struct {
+ enum ath10k_bus bus;
const struct ath10k_hif_ops *ops;
} hif;
@@ -456,7 +501,10 @@ struct ath10k {
const void *firmware_data;
size_t firmware_len;
+ const struct firmware *cal_file;
+
int fw_api;
+ enum ath10k_cal_mode cal_mode;
struct {
struct completion started;
@@ -482,7 +530,7 @@ struct ath10k {
/* current operating channel definition */
struct cfg80211_chan_def chandef;
- int free_vdev_map;
+ unsigned long long free_vdev_map;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
@@ -517,8 +565,12 @@ struct ath10k {
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
- /* number of created peers; protected by data_lock */
+ /* protected by conf_mutex */
int num_peers;
+ int num_stations;
+
+ int max_num_peers;
+ int max_num_stations;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@@ -563,11 +615,19 @@ struct ath10k {
bool utf_monitor;
} testmode;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ u32 fw_warm_reset_counter;
+ u32 fw_cold_reset_counter;
+ } stats;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 3756feba3223..a716758f14b0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -17,12 +17,12 @@
#include <linux/module.h>
#include <linux/debugfs.h>
-#include <linux/version.h>
-#include <linux/vermagic.h>
#include <linux/vmalloc.h>
+#include <linux/utsname.h>
#include "core.h"
#include "debug.h"
+#include "hif.h"
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
@@ -106,34 +106,37 @@ struct ath10k_dump_file_data {
u8 data[0];
} __packed;
-int ath10k_info(struct ath10k *ar, const char *fmt, ...)
+void ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = dev_info(ar->dev, "%pV", &vaf);
+ dev_info(ar->dev, "%pV", &vaf);
trace_ath10k_log_info(ar, &vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
+ ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d.%d.%d.%d cal %s max_sta %d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
- ar->htt.target_version_minor);
+ ar->htt.target_version_minor,
+ ar->fw_version_major,
+ ar->fw_version_minor,
+ ar->fw_version_release,
+ ar->fw_version_build,
+ ath10k_cal_mode_str(ar->cal_mode),
+ ar->max_num_stations);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),
@@ -143,25 +146,22 @@ void ath10k_print_driver_info(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_print_driver_info);
-int ath10k_err(struct ath10k *ar, const char *fmt, ...)
+void ath10k_err(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = dev_err(ar->dev, "%pV", &vaf);
+ dev_err(ar->dev, "%pV", &vaf);
trace_ath10k_log_err(ar, &vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath10k_err);
-int ath10k_warn(struct ath10k *ar, const char *fmt, ...)
+void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -174,20 +174,11 @@ int ath10k_warn(struct ath10k *ar, const char *fmt, ...)
trace_ath10k_log_warn(ar, &vaf);
va_end(args);
-
- return 0;
}
EXPORT_SYMBOL(ath10k_warn);
#ifdef CONFIG_ATH10K_DEBUGFS
-void ath10k_debug_read_service_map(struct ath10k *ar,
- void *service_map,
- size_t map_size)
-{
- memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
-}
-
static ssize_t ath10k_read_wmi_services(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -209,8 +200,9 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
if (len > buf_len)
len = buf_len;
+ spin_lock_bh(&ar->data_lock);
for (i = 0; i < WMI_SERVICE_MAX; i++) {
- enabled = test_bit(i, ar->debug.wmi_service_bitmap);
+ enabled = test_bit(i, ar->wmi.svc_map);
name = wmi_service_name(i);
if (!name) {
@@ -226,6 +218,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
"%-40s %s\n",
name, enabled ? "enabled" : "-");
}
+ spin_unlock_bh(&ar->data_lock);
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -242,169 +235,182 @@ static const struct file_operations fops_wmi_services = {
.llseek = default_llseek,
};
-void ath10k_debug_read_target_stats(struct ath10k *ar,
- struct wmi_stats_event *ev)
+static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
{
- u8 *tmp = ev->data;
- struct ath10k_target_stats *stats;
- int num_pdev_stats, num_vdev_stats, num_peer_stats;
- struct wmi_pdev_stats_10x *ps;
- int i;
+ struct ath10k_fw_stats_pdev *i, *tmp;
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
+{
spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ spin_unlock_bh(&ar->data_lock);
+}
- stats = &ar->debug.target_stats;
-
- num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
- num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
- num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
-
- if (num_pdev_stats) {
- ps = (struct wmi_pdev_stats_10x *)tmp;
-
- stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
- stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
- stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
- stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
- stats->cycle_count = __le32_to_cpu(ps->cycle_count);
- stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
- stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
-
- stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
- stats->comp_delivered =
- __le32_to_cpu(ps->wal.tx.comp_delivered);
- stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
- stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
- stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
- stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
- stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
- stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
- stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
- stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
- stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
- stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
- stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
- stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
- stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
- stats->sw_retry_failure =
- __le32_to_cpu(ps->wal.tx.sw_retry_failure);
- stats->illgl_rate_phy_err =
- __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
- stats->pdev_cont_xretry =
- __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
- stats->pdev_tx_timeout =
- __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
- stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
- stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
- stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
-
- stats->mid_ppdu_route_change =
- __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
- stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
- stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
- stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
- stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
- stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
- stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
- stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
- stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
- stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
- stats->oversize_amsdu =
- __le32_to_cpu(ps->wal.rx.oversize_amsdu);
- stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
- stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
- stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
-
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
- ar->fw_features)) {
- stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
- stats->rts_bad = __le32_to_cpu(ps->rts_bad);
- stats->rts_good = __le32_to_cpu(ps->rts_good);
- stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
- stats->no_beacons = __le32_to_cpu(ps->no_beacons);
- stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
- tmp += sizeof(struct wmi_pdev_stats_10x);
- } else {
- tmp += sizeof(struct wmi_pdev_stats_old);
- }
+static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_fw_stats stats = {};
+ bool is_start, is_started, is_end;
+ size_t num_peers;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.peers);
+
+ spin_lock_bh(&ar->data_lock);
+ ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
+ if (ret) {
+ ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
+ goto unlock;
}
- /* 0 or max vdevs */
- /* Currently firmware does not support VDEV stats */
- if (num_vdev_stats) {
- struct wmi_vdev_stats *vdev_stats;
+ /* Stat data may exceed htc-wmi buffer limit. In such case firmware
+ * splits the stats data and delivers it in a ping-pong fashion of
+ * request cmd-update event.
+ *
+ * However there is no explicit end-of-data. Instead start-of-data is
+ * used as an implicit one. This works as follows:
+ * a) discard stat update events until one with pdev stats is
+ * delivered - this skips session started at end of (b)
+ * b) consume stat update events until another one with pdev stats is
+ * delivered which is treated as end-of-data and is itself discarded
+ */
- for (i = 0; i < num_vdev_stats; i++) {
- vdev_stats = (struct wmi_vdev_stats *)tmp;
- tmp += sizeof(struct wmi_vdev_stats);
- }
+ if (ar->debug.fw_stats_done) {
+ ath10k_warn(ar, "received unsolicited stats update event\n");
+ goto free;
}
- if (num_peer_stats) {
- struct wmi_peer_stats_10x *peer_stats;
- struct ath10k_peer_stat *s;
-
- stats->peers = num_peer_stats;
-
- for (i = 0; i < num_peer_stats; i++) {
- peer_stats = (struct wmi_peer_stats_10x *)tmp;
- s = &stats->peer_stat[i];
-
- memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
- ETH_ALEN);
- s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
- s->peer_tx_rate =
- __le32_to_cpu(peer_stats->peer_tx_rate);
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
- ar->fw_features)) {
- s->peer_rx_rate =
- __le32_to_cpu(peer_stats->peer_rx_rate);
- tmp += sizeof(struct wmi_peer_stats_10x);
-
- } else {
- tmp += sizeof(struct wmi_peer_stats_old);
- }
+ num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+ is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+ is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+
+ if (is_start)
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+
+ if (is_end)
+ ar->debug.fw_stats_done = true;
+
+ is_started = !list_empty(&ar->debug.fw_stats.pdevs);
+
+ if (is_started && !is_end) {
+ if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
+ /* Although this is unlikely impose a sane limit to
+ * prevent firmware from DoS-ing the host.
+ */
+ ath10k_warn(ar, "dropping fw peer stats\n");
+ goto free;
}
+
+ list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
}
+ complete(&ar->debug.fw_stats_complete);
+
+free:
+ /* In some cases lists have been spliced and cleared. Free up
+ * resources if that is not the case.
+ */
+ ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
+ ath10k_debug_fw_stats_peers_free(&stats.peers);
+
+unlock:
spin_unlock_bh(&ar->data_lock);
- complete(&ar->debug.event_stats_compl);
}
-static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
- struct ath10k *ar = file->private_data;
- struct ath10k_target_stats *fw_stats;
- char *buf = NULL;
- unsigned int len = 0, buf_len = 8000;
- ssize_t ret_cnt = 0;
- long left;
- int i;
+ unsigned long timeout;
int ret;
- fw_stats = &ar->debug.target_stats;
+ lockdep_assert_held(&ar->conf_mutex);
- mutex_lock(&ar->conf_mutex);
+ timeout = jiffies + msecs_to_jiffies(1*HZ);
- if (ar->state != ATH10K_STATE_ON)
- goto exit;
+ ath10k_debug_fw_stats_reset(ar);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- goto exit;
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
- ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
- if (ret) {
- ath10k_warn(ar, "could not request stats (%d)\n", ret);
- goto exit;
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
+ if (ret) {
+ ath10k_warn(ar, "could not request stats (%d)\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1*HZ);
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
}
- left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
- if (left <= 0)
- goto exit;
+ return 0;
+}
+
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+
+static void ath10k_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ unsigned int len = 0;
+ unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
+
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PDEV stats");
@@ -412,29 +418,29 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Channel noise floor", fw_stats->ch_noise_floor);
+ "Channel noise floor", pdev->ch_noise_floor);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Channel TX power", fw_stats->chan_tx_power);
+ "Channel TX power", pdev->chan_tx_power);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "TX frame count", fw_stats->tx_frame_count);
+ "TX frame count", pdev->tx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RX frame count", fw_stats->rx_frame_count);
+ "RX frame count", pdev->rx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RX clear count", fw_stats->rx_clear_count);
+ "RX clear count", pdev->rx_clear_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Cycle count", fw_stats->cycle_count);
+ "Cycle count", pdev->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "PHY error count", fw_stats->phy_err_count);
+ "PHY error count", pdev->phy_err_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RTS bad count", fw_stats->rts_bad);
+ "RTS bad count", pdev->rts_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RTS good count", fw_stats->rts_good);
+ "RTS good count", pdev->rts_good);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "FCS bad count", fw_stats->fcs_bad);
+ "FCS bad count", pdev->fcs_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "No beacon count", fw_stats->no_beacons);
+ "No beacon count", pdev->no_beacons);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "MIB int count", fw_stats->mib_int_count);
+ "MIB int count", pdev->mib_int_count);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -443,51 +449,51 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HTT cookies queued", fw_stats->comp_queued);
+ "HTT cookies queued", pdev->comp_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HTT cookies disp.", fw_stats->comp_delivered);
+ "HTT cookies disp.", pdev->comp_delivered);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDU queued", fw_stats->msdu_enqued);
+ "MSDU queued", pdev->msdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU queued", fw_stats->mpdu_enqued);
+ "MPDU queued", pdev->mpdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs dropped", fw_stats->wmm_drop);
+ "MSDUs dropped", pdev->wmm_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Local enqued", fw_stats->local_enqued);
+ "Local enqued", pdev->local_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Local freed", fw_stats->local_freed);
+ "Local freed", pdev->local_freed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HW queued", fw_stats->hw_queued);
+ "HW queued", pdev->hw_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PPDUs reaped", fw_stats->hw_reaped);
+ "PPDUs reaped", pdev->hw_reaped);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Num underruns", fw_stats->underrun);
+ "Num underruns", pdev->underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PPDUs cleaned", fw_stats->tx_abort);
+ "PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs requed", fw_stats->mpdus_requed);
+ "MPDUs requed", pdev->mpdus_requed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Excessive retries", fw_stats->tx_ko);
+ "Excessive retries", pdev->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HW rate", fw_stats->data_rc);
+ "HW rate", pdev->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Sched self tiggers", fw_stats->self_triggers);
+ "Sched self tiggers", pdev->self_triggers);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Dropped due to SW retries",
- fw_stats->sw_retry_failure);
+ pdev->sw_retry_failure);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Illegal rate phy errors",
- fw_stats->illgl_rate_phy_err);
+ pdev->illgl_rate_phy_err);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Pdev continous xretry", fw_stats->pdev_cont_xretry);
+ "Pdev continous xretry", pdev->pdev_cont_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "TX timeout", fw_stats->pdev_tx_timeout);
+ "TX timeout", pdev->pdev_tx_timeout);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PDEV resets", fw_stats->pdev_resets);
+ "PDEV resets", pdev->pdev_resets);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY underrun", fw_stats->phy_underrun);
+ "PHY underrun", pdev->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU is more than txop limit", fw_stats->txop_ovf);
+ "MPDU is more than txop limit", pdev->txop_ovf);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -497,70 +503,161 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Mid PPDU route change",
- fw_stats->mid_ppdu_route_change);
+ pdev->mid_ppdu_route_change);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Tot. number of statuses", fw_stats->status_rcvd);
+ "Tot. number of statuses", pdev->status_rcvd);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 0", fw_stats->r0_frags);
+ "Extra frags on rings 0", pdev->r0_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 1", fw_stats->r1_frags);
+ "Extra frags on rings 1", pdev->r1_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 2", fw_stats->r2_frags);
+ "Extra frags on rings 2", pdev->r2_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 3", fw_stats->r3_frags);
+ "Extra frags on rings 3", pdev->r3_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs delivered to HTT", fw_stats->htt_msdus);
+ "MSDUs delivered to HTT", pdev->htt_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs delivered to HTT", fw_stats->htt_mpdus);
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs delivered to stack", fw_stats->loc_msdus);
+ "MSDUs delivered to stack", pdev->loc_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs delivered to stack", fw_stats->loc_mpdus);
+ "MPDUs delivered to stack", pdev->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Oversized AMSUs", fw_stats->oversize_amsdu);
+ "Oversized AMSUs", pdev->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY errors", fw_stats->phy_errs);
+ "PHY errors", pdev->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY errors drops", fw_stats->phy_err_drop);
+ "PHY errors drops", pdev->phy_err_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
- "ath10k PEER stats", fw_stats->peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
- for (i = 0; i < fw_stats->peers; i++) {
+ list_for_each_entry(peer, &fw_stats->peers, list) {
len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
- "Peer MAC address",
- fw_stats->peer_stat[i].peer_macaddr);
+ "Peer MAC address", peer->peer_macaddr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
+ "Peer RSSI", peer->peer_rssi);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer TX rate",
- fw_stats->peer_stat[i].peer_tx_rate);
+ "Peer TX rate", peer->peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer RX rate",
- fw_stats->peer_stat[i].peer_rx_rate);
+ "Peer RX rate", peer->peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
+
+unlock:
spin_unlock_bh(&ar->data_lock);
- if (len > buf_len)
- len = buf_len;
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
- ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+ file->private_data = buf;
-exit:
mutex_unlock(&ar->conf_mutex);
- kfree(buf);
- return ret_cnt;
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ unsigned int len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_fw_stats = {
- .read = ath10k_read_fw_stats,
+ .open = ath10k_fw_stats_open,
+ .release = ath10k_fw_stats_release,
+ .read = ath10k_fw_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret, len, buf_len;
+ char *buf;
+
+ buf_len = 500;
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = 0;
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_warm_reset_counter\t\t%d\n",
+ ar->stats.fw_warm_reset_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_cold_reset_counter\t\t%d\n",
+ ar->stats.fw_cold_reset_counter);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_fw_reset_stats = {
.open = simple_open,
+ .read = ath10k_debug_fw_reset_stats_read,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -593,7 +690,8 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
"To simulate firmware crash write one of the keywords to this file:\n"
"`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
"`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
- "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n";
+ "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
@@ -646,6 +744,10 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
} else if (!strcmp(buf, "assert")) {
ath10k_info(ar, "simulating firmware assert crash\n");
ret = ath10k_debug_fw_assert(ar);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath10k_info(ar, "user requested hw restart\n");
+ queue_work(ar->workqueue, &ar->restart_work);
+ ret = 0;
} else {
ret = -EINVAL;
goto exit;
@@ -759,8 +861,8 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
sizeof(dump_data->fw_ver));
- dump_data->kernel_ver_code = cpu_to_le32(LINUX_VERSION_CODE);
- strlcpy(dump_data->kernel_ver, VERMAGIC_STRING,
+ dump_data->kernel_ver_code = 0;
+ strlcpy(dump_data->kernel_ver, init_utsname()->release,
sizeof(dump_data->kernel_ver));
dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
@@ -822,6 +924,236 @@ static const struct file_operations fops_fw_crash_dump = {
.llseek = default_llseek,
};
+static ssize_t ath10k_reg_addr_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[32];
+ unsigned int len = 0;
+ u32 reg_addr;
+
+ mutex_lock(&ar->conf_mutex);
+ reg_addr = ar->debug.reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_reg_addr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_addr);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(reg_addr, 4))
+ return -EFAULT;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.reg_addr = reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_reg_addr = {
+ .read = ath10k_reg_addr_read,
+ .write = ath10k_reg_addr_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[48];
+ unsigned int len;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ reg_val = ath10k_hif_read32(ar, reg_addr);
+ len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_reg_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_val);
+ if (ret)
+ goto exit;
+
+ ath10k_hif_write32(ar, reg_addr, reg_val);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_reg_value = {
+ .read = ath10k_reg_value_read,
+ .write = ath10k_reg_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_mem_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ ret = copy_to_user(user_buf, buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ count -= ret;
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_mem_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_write(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_mem_value = {
+ .read = ath10k_mem_value_read,
+ .write = ath10k_mem_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static int ath10k_debug_htt_stats_req(struct ath10k *ar)
{
u64 cookie;
@@ -1029,6 +1361,166 @@ exit:
return ret;
}
+/* TODO: Would be nice to always support ethtool stats, would need to
+ * move the stats storage out of ath10k_debug, or always have ath10k_debug
+ * struct available..
+ */
+
+/* This generally cooresponds to the debugfs fw_stats file */
+static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ "d_noise_floor",
+ "d_cycle_count",
+ "d_phy_error",
+ "d_rts_bad",
+ "d_rts_good",
+ "d_tx_power", /* in .5 dbM I think */
+ "d_rx_crc_err", /* fcs_bad */
+ "d_no_beacon",
+ "d_tx_mpdus_queued",
+ "d_tx_msdu_queued",
+ "d_tx_msdu_dropped",
+ "d_local_enqued",
+ "d_local_freed",
+ "d_tx_ppdu_hw_queued",
+ "d_tx_ppdu_reaped",
+ "d_tx_fifo_underrun",
+ "d_tx_ppdu_abort",
+ "d_tx_mpdu_requed",
+ "d_tx_excessive_retries",
+ "d_tx_hw_rate",
+ "d_tx_dropped_sw_retries",
+ "d_tx_illegal_rate",
+ "d_tx_continuous_xretries",
+ "d_tx_timeout",
+ "d_tx_mpdu_txop_limit",
+ "d_pdev_resets",
+ "d_rx_mid_ppdu_route_change",
+ "d_rx_status",
+ "d_rx_extra_frags_ring0",
+ "d_rx_extra_frags_ring1",
+ "d_rx_extra_frags_ring2",
+ "d_rx_extra_frags_ring3",
+ "d_rx_msdu_htt",
+ "d_rx_mpdu_htt",
+ "d_rx_msdu_stack",
+ "d_rx_mpdu_stack",
+ "d_rx_phy_err",
+ "d_rx_phy_err_drops",
+ "d_rx_mpdu_errors", /* FCS, MIC, ENC */
+ "d_fw_crash_count",
+ "d_fw_warm_reset_count",
+ "d_fw_cold_reset_count",
+};
+
+#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath10k_gstrings_stats,
+ sizeof(ath10k_gstrings_stats));
+}
+
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH10K_SSTATS_LEN;
+
+ return 0;
+}
+
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath10k *ar = hw->priv;
+ static const struct ath10k_fw_stats_pdev zero_stats = {};
+ const struct ath10k_fw_stats_pdev *pdev_stats;
+ int i = 0, ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ /* just print a warning and try to use older results */
+ ath10k_warn(ar,
+ "failed to get fw stats for ethtool: %d\n",
+ ret);
+ }
+ }
+
+ pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
+ struct ath10k_fw_stats_pdev,
+ list);
+ if (!pdev_stats) {
+ /* no results available so just return zeroes */
+ pdev_stats = &zero_stats;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
+ data[i++] = 0; /* tx bytes */
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = 0; /* rx bytes */
+ data[i++] = pdev_stats->ch_noise_floor;
+ data[i++] = pdev_stats->cycle_count;
+ data[i++] = pdev_stats->phy_err_count;
+ data[i++] = pdev_stats->rts_bad;
+ data[i++] = pdev_stats->rts_good;
+ data[i++] = pdev_stats->chan_tx_power;
+ data[i++] = pdev_stats->fcs_bad;
+ data[i++] = pdev_stats->no_beacons;
+ data[i++] = pdev_stats->mpdu_enqued;
+ data[i++] = pdev_stats->msdu_enqued;
+ data[i++] = pdev_stats->wmm_drop;
+ data[i++] = pdev_stats->local_enqued;
+ data[i++] = pdev_stats->local_freed;
+ data[i++] = pdev_stats->hw_queued;
+ data[i++] = pdev_stats->hw_reaped;
+ data[i++] = pdev_stats->underrun;
+ data[i++] = pdev_stats->tx_abort;
+ data[i++] = pdev_stats->mpdus_requed;
+ data[i++] = pdev_stats->tx_ko;
+ data[i++] = pdev_stats->data_rc;
+ data[i++] = pdev_stats->sw_retry_failure;
+ data[i++] = pdev_stats->illgl_rate_phy_err;
+ data[i++] = pdev_stats->pdev_cont_xretry;
+ data[i++] = pdev_stats->pdev_tx_timeout;
+ data[i++] = pdev_stats->txop_ovf;
+ data[i++] = pdev_stats->pdev_resets;
+ data[i++] = pdev_stats->mid_ppdu_route_change;
+ data[i++] = pdev_stats->status_rcvd;
+ data[i++] = pdev_stats->r0_frags;
+ data[i++] = pdev_stats->r1_frags;
+ data[i++] = pdev_stats->r2_frags;
+ data[i++] = pdev_stats->r3_frags;
+ data[i++] = pdev_stats->htt_msdus;
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = pdev_stats->loc_msdus;
+ data[i++] = pdev_stats->loc_mpdus;
+ data[i++] = pdev_stats->phy_errs;
+ data[i++] = pdev_stats->phy_err_drop;
+ data[i++] = pdev_stats->mpdu_errs;
+ data[i++] = ar->stats.fw_crash_counter;
+ data[i++] = ar->stats.fw_warm_reset_counter;
+ data[i++] = ar->stats.fw_cold_reset_counter;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ WARN_ON(i != ATH10K_SSTATS_LEN);
+}
+
static const struct file_operations fops_fw_dbglog = {
.read = ath10k_read_fw_dbglog,
.write = ath10k_write_fw_dbglog,
@@ -1037,6 +1529,84 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf;
+ u32 hi_addr;
+ __le32 addr;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto err;
+ }
+
+ buf = vmalloc(QCA988X_CAL_DATA_LEN);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
+
+ ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
+ if (ret) {
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
+ goto err_vfree;
+ }
+
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ QCA988X_CAL_DATA_LEN);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
+ goto err_vfree;
+ }
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_vfree:
+ vfree(buf);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_debug_cal_data_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ void *buf = file->private_data;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, QCA988X_CAL_DATA_LEN);
+}
+
+static int ath10k_debug_cal_data_release(struct inode *inode,
+ struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations fops_cal_data = {
+ .open = ath10k_debug_cal_data_open,
+ .read = ath10k_debug_cal_data_read,
+ .release = ath10k_debug_cal_data_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
@@ -1057,7 +1627,22 @@ int ath10k_debug_start(struct ath10k *ar)
ret);
}
- return 0;
+ if (ar->debug.pktlog_filter) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->debug.pktlog_filter);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ }
+
+ return ret;
}
void ath10k_debug_stop(struct ath10k *ar)
@@ -1072,6 +1657,8 @@ void ath10k_debug_stop(struct ath10k *ar)
ar->debug.htt_max_amsdu = 0;
ar->debug.htt_max_ampdu = 0;
+
+ ath10k_wmi_pdev_pktlog_disable(ar);
}
static ssize_t ath10k_write_simulate_radar(struct file *file,
@@ -1154,12 +1741,78 @@ static const struct file_operations fops_dfs_stats = {
.llseek = default_llseek,
};
+static ssize_t ath10k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ar->debug.pktlog_filter = filter;
+ ret = count;
+ goto out;
+ }
+
+ if (filter && (filter != ar->debug.pktlog_filter)) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+ ar->debug.pktlog_filter = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.pktlog_filter);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath10k_read_pktlog_filter,
+ .write = ath10k_write_pktlog_filter,
+ .open = simple_open
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+
return 0;
}
@@ -1167,6 +1820,8 @@ void ath10k_debug_destroy(struct ath10k *ar)
{
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+
+ ath10k_debug_fw_stats_reset(ar);
}
int ath10k_debug_register(struct ath10k *ar)
@@ -1183,11 +1838,14 @@ int ath10k_debug_register(struct ath10k *ar)
INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
ath10k_debug_htt_stats_dwork);
- init_completion(&ar->debug.event_stats_compl);
+ init_completion(&ar->debug.fw_stats_complete);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_fw_stats);
+ debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_fw_reset_stats);
+
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
@@ -1197,6 +1855,15 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_crash_dump);
+ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_reg_addr);
+
+ debugfs_create_file("reg_value", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_reg_value);
+
+ debugfs_create_file("mem_value", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_mem_value);
+
debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_chip_id);
@@ -1210,6 +1877,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_dbglog);
+ debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_cal_data);
+
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
@@ -1224,6 +1894,9 @@ int ath10k_debug_register(struct ath10k *ar)
&fops_dfs_stats);
}
+ debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
+
return 0;
}
@@ -1260,11 +1933,26 @@ void ath10k_dbg_dump(struct ath10k *ar,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
+ char linebuf[256];
+ unsigned int linebuflen;
+ const void *ptr;
+
if (ath10k_debug_mask & mask) {
if (msg)
ath10k_dbg(ar, mask, "%s\n", msg);
- print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
+ }
}
/* tracing code doesn't like null strings :/ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index b3774f7f492c..1b87a5dbec53 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -35,14 +35,24 @@ enum ath10k_debug_mask {
ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_TESTMODE = 0x00001000,
+ ATH10K_DBG_WMI_PRINT = 0x00002000,
ATH10K_DBG_ANY = 0xffffffff,
};
+enum ath10k_pktlog_filter {
+ ATH10K_PKTLOG_RX = 0x000000001,
+ ATH10K_PKTLOG_TX = 0x000000002,
+ ATH10K_PKTLOG_RCFIND = 0x000000004,
+ ATH10K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
+ ATH10K_PKTLOG_ANY = 0x00000001f,
+};
+
extern unsigned int ath10k_debug_mask;
-__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...);
-__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...);
-__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
void ath10k_print_driver_info(struct ath10k *ar);
#ifdef CONFIG_ATH10K_DEBUGFS
@@ -52,18 +62,22 @@ int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_destroy(struct ath10k *ar);
int ath10k_debug_register(struct ath10k *ar);
void ath10k_debug_unregister(struct ath10k *ar);
-void ath10k_debug_read_service_map(struct ath10k *ar,
- void *service_map,
- size_t map_size);
-void ath10k_debug_read_target_stats(struct ath10k *ar,
- struct wmi_stats_event *ev);
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
-
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
@@ -92,14 +106,8 @@ static inline void ath10k_debug_unregister(struct ath10k *ar)
{
}
-static inline void ath10k_debug_read_service_map(struct ath10k *ar,
- void *service_map,
- size_t map_size)
-{
-}
-
-static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
- struct wmi_stats_event *ev)
+static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
+ struct sk_buff *skb)
{
}
@@ -116,6 +124,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+#define ath10k_debug_get_et_strings NULL
+#define ath10k_debug_get_et_sset_count NULL
+#define ath10k_debug_get_et_stats NULL
+
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 62323fea27e1..0c92e0251e84 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include "core.h"
+#include "debug.h"
struct ath10k_hif_sg_item {
u16 transfer_id;
@@ -31,11 +32,9 @@ struct ath10k_hif_sg_item {
struct ath10k_hif_cb {
int (*tx_completion)(struct ath10k *ar,
- struct sk_buff *wbuf,
- unsigned transfer_id);
+ struct sk_buff *wbuf);
int (*rx_completion)(struct ath10k *ar,
- struct sk_buff *wbuf,
- u8 pipe_id);
+ struct sk_buff *wbuf);
};
struct ath10k_hif_ops {
@@ -43,6 +42,12 @@ struct ath10k_hif_ops {
int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items);
+ /* read firmware memory through the diagnose interface */
+ int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+
+ int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
+ int nbytes);
/*
* API to handle HIF-specific BMI message exchanges, this API is
* synchronous and only allowed to be called from a context that
@@ -80,6 +85,10 @@ struct ath10k_hif_ops {
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+ u32 (*read32)(struct ath10k *ar, u32 address);
+
+ void (*write32)(struct ath10k *ar, u32 address, u32 value);
+
/* Power up the device and enter BMI transfer mode for FW download */
int (*power_up)(struct ath10k *ar);
@@ -98,6 +107,21 @@ static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
}
+static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ar->hif.ops->diag_read(ar, address, buf, buf_len);
+}
+
+static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ if (!ar->hif.ops->diag_write)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->diag_write(ar, address, data, nbytes);
+}
+
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len)
@@ -177,4 +201,25 @@ static inline int ath10k_hif_resume(struct ath10k *ar)
return ar->hif.ops->resume(ar);
}
+static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
+{
+ if (!ar->hif.ops->read32) {
+ ath10k_warn(ar, "hif read32 not supported\n");
+ return 0xdeaddead;
+ }
+
+ return ar->hif.ops->read32(ar, address);
+}
+
+static inline void ath10k_hif_write32(struct ath10k *ar,
+ u32 address, u32 data)
+{
+ if (!ar->hif.ops->write32) {
+ ath10k_warn(ar, "hif write32 not supported\n");
+ return;
+ }
+
+ ar->hif.ops->write32(ar, address, data);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 676bd4ed969b..f1946a6be442 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -160,6 +160,7 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
+ skb_cb->eid = eid;
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret)
@@ -197,15 +198,18 @@ err_pull:
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
- struct sk_buff *skb,
- unsigned int eid)
+ struct sk_buff *skb)
{
struct ath10k_htc *htc = &ar->htc;
- struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k_skb_cb *skb_cb;
+ struct ath10k_htc_ep *ep;
if (WARN_ON_ONCE(!skb))
return 0;
+ skb_cb = ATH10K_SKB_CB(skb);
+ ep = &htc->endpoint[skb_cb->eid];
+
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
@@ -317,8 +321,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
}
static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
- struct sk_buff *skb,
- u8 pipe_id)
+ struct sk_buff *skb)
{
int status = 0;
struct ath10k_htc *htc = &ar->htc;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 3b44217a6c19..1bd5545af903 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -126,6 +126,7 @@ enum htt_data_tx_ext_tid {
* (HL hosts manage queues on the host )
* more_in_batch: only for HL hosts. indicates if more packets are
* pending. this allows target to wait and aggregate
+ * freq: 0 means home channel of given vdev. intended for offchannel
*/
struct htt_data_tx_desc {
u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
@@ -133,7 +134,8 @@ struct htt_data_tx_desc {
__le16 len;
__le16 id;
__le32 frags_paddr;
- __le32 peerid;
+ __le16 peerid;
+ __le16 freq;
u8 prefetch[0]; /* start of frame, for FW classification engine */
} __packed;
@@ -156,6 +158,9 @@ enum htt_rx_ring_flags {
HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
};
+#define HTT_RX_RING_SIZE_MIN 128
+#define HTT_RX_RING_SIZE_MAX 2048
+
struct htt_rx_ring_setup_ring {
__le32 fw_idx_shadow_reg_paddr;
__le32 rx_ring_base_paddr;
@@ -725,7 +730,7 @@ static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
*/
struct htt_pktlog_msg {
u8 pad[3];
- __le32 payload[1 /* or more */];
+ u8 payload[0];
} __packed;
struct htt_dbg_stats_rx_reorder_stats {
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 60d40a04508b..9c782a42665e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -25,19 +25,8 @@
#include <linux/log2.h>
-/* slightly larger than one large A-MPDU */
-#define HTT_RX_RING_SIZE_MIN 128
-
-/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
-#define HTT_RX_RING_SIZE_MAX 2048
-
-#define HTT_RX_AVG_FRM_BYTES 1000
-
-/* ms, very conservative */
-#define HTT_RX_HOST_LATENCY_MAX_MS 20
-
-/* ms, conservative */
-#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
+#define HTT_RX_RING_SIZE 1024
+#define HTT_RX_RING_FILL_LEVEL 1000
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -45,68 +34,6 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
-static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
-{
- int size;
-
- /*
- * It is expected that the host CPU will typically be able to
- * service the rx indication from one A-MPDU before the rx
- * indication from the subsequent A-MPDU happens, roughly 1-2 ms
- * later. However, the rx ring should be sized very conservatively,
- * to accomodate the worst reasonable delay before the host CPU
- * services a rx indication interrupt.
- *
- * The rx ring need not be kept full of empty buffers. In theory,
- * the htt host SW can dynamically track the low-water mark in the
- * rx ring, and dynamically adjust the level to which the rx ring
- * is filled with empty buffers, to dynamically meet the desired
- * low-water mark.
- *
- * In contrast, it's difficult to resize the rx ring itself, once
- * it's in use. Thus, the ring itself should be sized very
- * conservatively, while the degree to which the ring is filled
- * with empty buffers should be sized moderately conservatively.
- */
-
- /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
- size =
- htt->max_throughput_mbps +
- 1000 /
- (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
-
- if (size < HTT_RX_RING_SIZE_MIN)
- size = HTT_RX_RING_SIZE_MIN;
-
- if (size > HTT_RX_RING_SIZE_MAX)
- size = HTT_RX_RING_SIZE_MAX;
-
- size = roundup_pow_of_two(size);
-
- return size;
-}
-
-static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
-{
- int size;
-
- /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
- size =
- htt->max_throughput_mbps *
- 1000 /
- (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
-
- /*
- * Make sure the fill level is at least 1 less than the ring size.
- * Leaving 1 element empty allows the SW to easily distinguish
- * between a full ring vs. an empty ring.
- */
- if (size >= htt->rx_ring.size)
- size = htt->rx_ring.size - 1;
-
- return size;
-}
-
static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
{
struct sk_buff *skb;
@@ -291,50 +218,38 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
htt->rx_ring.fill_cnt--;
- return msdu;
-}
+ dma_unmap_single(htt->ar->dev,
+ ATH10K_SKB_CB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
-static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
-{
- struct sk_buff *next;
-
- while (skb) {
- next = skb->next;
- dev_kfree_skb_any(skb);
- skb = next;
- }
+ return msdu;
}
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
u8 **fw_desc, int *fw_desc_len,
- struct sk_buff **head_msdu,
- struct sk_buff **tail_msdu,
- u32 *attention)
+ struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
int msdu_len, msdu_chaining = 0;
- struct sk_buff *msdu, *next;
+ struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
lockdep_assert_held(&htt->rx_ring.lock);
- if (htt->rx_confused) {
- ath10k_warn(ar, "htt is confused. refusing rx\n");
- return -1;
- }
-
- msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
- while (msdu) {
+ for (;;) {
int last_msdu, msdu_len_invalid, msdu_chained;
- dma_unmap_single(htt->ar->dev,
- ATH10K_SKB_CB(msdu)->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
- ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
- msdu->data, msdu->len + skb_tailroom(msdu));
+ __skb_queue_tail(amsdu, msdu);
rx_desc = (struct htt_rx_desc *)msdu->data;
@@ -353,19 +268,10 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
*/
if (!(__le32_to_cpu(rx_desc->attention.flags)
& RX_ATTENTION_FLAGS_MSDU_DONE)) {
- ath10k_htt_rx_free_msdu_chain(*head_msdu);
- *head_msdu = NULL;
- msdu = NULL;
- ath10k_err(ar, "htt rx stopped. cannot recover\n");
- htt->rx_confused = true;
- break;
+ __skb_queue_purge(amsdu);
+ return -EIO;
}
- *attention |= __le32_to_cpu(rx_desc->attention.flags) &
- (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
- RX_ATTENTION_FLAGS_DECRYPT_ERR |
- RX_ATTENTION_FLAGS_FCS_ERR |
- RX_ATTENTION_FLAGS_MGMT_TYPE);
/*
* Copy the FW rx descriptor for this MSDU from the rx
* indication message into the MSDU's netbuf. HL uses the
@@ -422,43 +328,32 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
msdu_len -= msdu->len;
- /* FIXME: Do chained buffers include htt_rx_desc or not? */
+ /* Note: Chained buffers do not contain rx descriptor */
while (msdu_chained--) {
- struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
-
- dma_unmap_single(htt->ar->dev,
- ATH10K_SKB_CB(next)->paddr,
- next->len + skb_tailroom(next),
- DMA_FROM_DEVICE);
-
- ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
- "htt rx chained: ", next->data,
- next->len + skb_tailroom(next));
-
- skb_trim(next, 0);
- skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
- msdu_len -= next->len;
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
- msdu->next = next;
- msdu = next;
+ __skb_queue_tail(amsdu, msdu);
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
+ msdu_len -= msdu->len;
msdu_chaining = 1;
}
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
- if (last_msdu) {
- msdu->next = NULL;
- break;
- }
+ trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
+ sizeof(*rx_desc) - sizeof(u32));
- next = ath10k_htt_rx_netbuf_pop(htt);
- msdu->next = next;
- msdu = next;
+ if (last_msdu)
+ break;
}
- *tail_msdu = msdu;
- if (*head_msdu == NULL)
+ if (skb_queue_empty(amsdu))
msdu_chaining = -1;
/*
@@ -492,25 +387,20 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
- htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
+ htt->rx_confused = false;
+
+ /* XXX: The fill level could be changed during runtime in response to
+ * the host processing latency. Is this really worth it?
+ */
+ htt->rx_ring.size = HTT_RX_RING_SIZE;
+ htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+ htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn(ar, "htt rx ring size is not power of 2\n");
return -EINVAL;
}
- htt->rx_ring.size_mask = htt->rx_ring.size - 1;
-
- /*
- * Set the initial value for the level to which the rx ring
- * should be filled, based on the max throughput and the
- * worst likely latency for the host to fill the rx ring
- * with new buffers. In theory, this fill level can be
- * dynamically adjusted from the initial value set here, to
- * reflect the actual host latency rather than a
- * conservative assumption about the host latency.
- */
- htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
-
htt->rx_ring.netbufs_ring =
kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
GFP_KERNEL);
@@ -581,73 +471,50 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
- return 4;
+ return IEEE80211_WEP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
- case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
- case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
+ return IEEE80211_TKIP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
- return 8;
- case HTT_RX_MPDU_ENCRYPT_NONE:
- return 0;
+ return IEEE80211_CCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
}
- ath10k_warn(ar, "unknown encryption type %d\n", type);
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
+#define MICHAEL_MIC_LEN 8
+
static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
- case HTT_RX_MPDU_ENCRYPT_WEP128:
- case HTT_RX_MPDU_ENCRYPT_WAPI:
- return 0;
+ return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
- return 4;
+ return IEEE80211_TKIP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
- return 8;
+ return IEEE80211_CCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
}
- ath10k_warn(ar, "unknown encryption type %d\n", type);
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
-/* Applies for first msdu in chain, before altering it. */
-static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- enum rx_msdu_decap_format fmt;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
-
- if (fmt == RX_MSDU_DECAP_RAW)
- return (void *)skb->data;
-
- return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
-}
-
-/* This function only applies for first msdu in an msdu chain */
-static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
-{
- u8 *qc;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qc = ieee80211_get_qos_ctl(hdr);
- if (qc[0] & 0x80)
- return true;
- }
- return false;
-}
-
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
@@ -682,23 +549,34 @@ static const u8 rx_legacy_rate_idx[] = {
};
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
- enum ieee80211_band band,
- u8 info0, u32 info1, u32 info2,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
{
+ enum ieee80211_band band;
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
u8 preamble = 0;
+ u32 info1, info2, info3;
- /* Check if valid fields */
- if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+ /* Band value can't be set as undefined but freq can be 0 - use that to
+ * determine whether band is provided.
+ *
+ * FIXME: Perhaps this can go away if CCK rate reporting is a little
+ * reworked?
+ */
+ if (!status->freq)
return;
- preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+ band = status->band;
+ info1 = __le32_to_cpu(rxd->ppdu_start.info1);
+ info2 = __le32_to_cpu(rxd->ppdu_start.info2);
+ info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+
+ preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
switch (preamble) {
case HTT_RX_LEGACY:
- cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
- rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+ cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
+ rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
rate_idx = 0;
if (rate < 0x08 || rate > 0x0F)
@@ -725,11 +603,11 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
- /* HT-SIG - Table 20-11 in info1 and info2 */
- mcs = info1 & 0x1F;
+ /* HT-SIG - Table 20-11 in info2 and info3 */
+ mcs = info2 & 0x1F;
nss = mcs >> 3;
- bw = (info1 >> 7) & 1;
- sgi = (info2 >> 7) & 1;
+ bw = (info2 >> 7) & 1;
+ sgi = (info3 >> 7) & 1;
status->rate_idx = mcs;
status->flag |= RX_FLAG_HT;
@@ -740,12 +618,12 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
- /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+ /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
TODO check this */
- mcs = (info2 >> 4) & 0x0F;
- nss = ((info1 >> 10) & 0x07) + 1;
- bw = info1 & 3;
- sgi = info2 & 1;
+ mcs = (info3 >> 4) & 0x0F;
+ nss = ((info2 >> 10) & 0x07) + 1;
+ bw = info2 & 3;
+ sgi = info3 & 1;
status->rate_idx = mcs;
status->vht_nss = nss;
@@ -773,41 +651,6 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
}
}
-static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
- struct ieee80211_rx_status *rx_status,
- struct sk_buff *skb,
- enum htt_rx_mpdu_encrypt_type enctype,
- enum rx_msdu_decap_format fmt,
- bool dot11frag)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-
- rx_status->flag &= ~(RX_FLAG_DECRYPTED |
- RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED);
-
- if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
- return;
-
- /*
- * There's no explicit rx descriptor flag to indicate whether a given
- * frame has been decrypted or not. We're forced to use the decap
- * format as an implicit indication. However fragmentation rx is always
- * raw and it probably never reports undecrypted raws.
- *
- * This makes sure sniffed frames are reported as-is without stripping
- * the protected flag.
- */
- if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
- return;
-
- rx_status->flag |= RX_FLAG_DECRYPTED |
- RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
- hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
- ~IEEE80211_FCTL_PROTECTED);
-}
-
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
struct ieee80211_rx_status *status)
{
@@ -828,6 +671,72 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
return true;
}
+static void ath10k_htt_rx_h_signal(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ /* FIXME: Get real NF */
+ status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rxd->ppdu_start.rssi_comb;
+ status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+}
+
+static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
+ * means all prior MSDUs in a PPDU are reported to mac80211 without the
+ * TSF. Is it worth holding frames until end of PPDU is known?
+ *
+ * FIXME: Can we get/compute 64bit TSF?
+ */
+ status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+}
+
+static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *first;
+ struct htt_rx_desc *rxd;
+ bool is_first_ppdu;
+ bool is_last_ppdu;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+
+ is_first_ppdu = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
+ is_last_ppdu = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
+
+ if (is_first_ppdu) {
+ /* New PPDU starts so clear out the old per-PPDU status. */
+ status->freq = 0;
+ status->rate_idx = 0;
+ status->vht_nss = 0;
+ status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
+ status->flag &= ~(RX_FLAG_HT |
+ RX_FLAG_VHT |
+ RX_FLAG_SHORT_GI |
+ RX_FLAG_40MHZ |
+ RX_FLAG_MACTIME_END);
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ ath10k_htt_rx_h_signal(ar, status, rxd);
+ ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_rates(ar, status, rxd);
+ }
+
+ if (is_last_ppdu)
+ ath10k_htt_rx_h_mactime(ar, status, rxd);
+}
+
static const char * const tid_to_ac[] = {
"BE",
"BK",
@@ -892,6 +801,8 @@ static void ath10k_process_rx(struct ath10k *ar,
!!(status->flag & RX_FLAG_AMSDU_MORE));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
+ trace_ath10k_rx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_rx_payload(ar, skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
}
@@ -902,187 +813,263 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
}
-static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct ieee80211_rx_status *rx_status,
- struct sk_buff *skb_in)
+static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
{
- struct ath10k *ar = htt->ar;
+ struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
- struct sk_buff *skb = skb_in;
- struct sk_buff *first;
- enum rx_msdu_decap_format fmt;
- enum htt_rx_mpdu_encrypt_type enctype;
+ size_t hdr_len;
+ size_t crypto_len;
+ bool is_first;
+ bool is_last;
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ is_first = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Delivered decapped frame:
+ * [802.11 header]
+ * [crypto param] <-- can be trimmed if !fcs_err &&
+ * !decrypt_err && !peer_idx_invalid
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ * [payload]
+ * [FCS] <-- at end, needs to be trimmed
+ */
+
+ /* This probably shouldn't happen but warn just in case */
+ if (unlikely(WARN_ON_ONCE(!is_first)))
+ return;
+
+ /* This probably shouldn't happen but warn just in case */
+ if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
+ return;
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ /* In most cases this will be true for sniffed frames. It makes sense
+ * to deliver them as-is without stripping the crypto param. This would
+ * also make sense for software based decryption (which is not
+ * implemented in ath10k).
+ *
+ * If there's no error then the frame is decrypted. At least that is
+ * the case for frames that come in via fragmented rx indication.
+ */
+ if (!is_decrypted)
+ return;
+
+ /* The payload is decrypted so strip crypto params. Start from tail
+ * since hdr is used to compute some stuff.
+ */
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+
+ /* MMIC */
+ if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ skb_trim(msdu, msdu->len - 8);
+
+ /* Head */
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+}
+
+static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64])
+{
struct ieee80211_hdr *hdr;
- u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos;
- unsigned int hdr_len;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
- rxd = (void *)skb->data - sizeof(*rxd);
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ /* Delivered decapped frame:
+ * [nwifi 802.11 header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ *
+ * Note: The nwifi header doesn't have QoS Control and is
+ * (always?) a 3addr frame.
+ *
+ * Note2: There's no A-MSDU subframe header. Even if it's part
+ * of an A-MSDU.
+ */
+
+ /* pull decapped header and copy SA & DA */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, hdr_len);
- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(hdr_buf, hdr, hdr_len);
- hdr = (struct ieee80211_hdr *)hdr_buf;
-
- first = skb;
- while (skb) {
- void *decap_hdr;
- int len;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
- decap_hdr = (void *)rxd->rx_hdr_status;
-
- skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
-
- /* First frame in an A-MSDU chain has more decapped data. */
- if (skb == first) {
- len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
- len += round_up(ath10k_htt_rx_crypto_param_len(ar,
- enctype), 4);
- decap_hdr += len;
- }
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
- switch (fmt) {
- case RX_MSDU_DECAP_RAW:
- /* remove trailing FCS */
- skb_trim(skb, skb->len - FCS_LEN);
- break;
- case RX_MSDU_DECAP_NATIVE_WIFI:
- /* pull decapped header and copy SA & DA */
- hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
- ether_addr_copy(da, ieee80211_get_DA(hdr));
- ether_addr_copy(sa, ieee80211_get_SA(hdr));
- skb_pull(skb, hdr_len);
-
- /* push original 802.11 header */
- hdr = (struct ieee80211_hdr *)hdr_buf;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
-
- /* original A-MSDU header has the bit set but we're
- * not including A-MSDU subframe header */
- hdr = (struct ieee80211_hdr *)skb->data;
- qos = ieee80211_get_qos_ctl(hdr);
- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
-
- /* original 802.11 header has a different DA and in
- * case of 4addr it may also have different SA
- */
- ether_addr_copy(ieee80211_get_DA(hdr), da);
- ether_addr_copy(ieee80211_get_SA(hdr), sa);
- break;
- case RX_MSDU_DECAP_ETHERNET2_DIX:
- /* strip ethernet header and insert decapped 802.11
- * header, amsdu subframe header and rfc1042 header */
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
- len = 0;
- len += sizeof(struct rfc1042_hdr);
- len += sizeof(struct amsdu_subframe_hdr);
+static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
+ struct sk_buff *msdu,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_first, is_last, is_amsdu;
- skb_pull(skb, sizeof(struct ethhdr));
- memcpy(skb_push(skb, len), decap_hdr, len);
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
- break;
- case RX_MSDU_DECAP_8023_SNAP_LLC:
- /* insert decapped 802.11 header making a singly
- * A-MSDU */
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
- break;
- }
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
- skb_in = skb;
- ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
- false);
- skb = skb->next;
- skb_in->next = NULL;
+ is_first = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+ is_amsdu = !(is_first && is_last);
- if (skb)
- rx_status->flag |= RX_FLAG_AMSDU_MORE;
- else
- rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
+ rfc1042 = hdr;
+
+ if (is_first) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- ath10k_process_rx(htt->ar, rx_status, skb_in);
+ rfc1042 += round_up(hdr_len, 4) +
+ round_up(crypto_len, 4);
}
- /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
- * monitor interface active for sniffing purposes. */
+ if (is_amsdu)
+ rfc1042 += sizeof(struct amsdu_subframe_hdr);
+
+ return rfc1042;
}
-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
- struct ieee80211_rx_status *rx_status,
- struct sk_buff *skb)
+static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
{
- struct ath10k *ar = htt->ar;
- struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
- enum rx_msdu_decap_format fmt;
- enum htt_rx_mpdu_encrypt_type enctype;
- int hdr_len;
+ struct ethhdr *eth;
+ size_t hdr_len;
void *rfc1042;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
- /* This shouldn't happen. If it does than it may be a FW bug. */
- if (skb->next) {
- ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n");
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- }
+ /* Delivered decapped frame:
+ * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
+ * [payload]
+ */
- rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
+ sizeof(struct rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
- skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
- switch (fmt) {
+static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64])
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+
+ /* Delivered decapped frame:
+ * [amsdu header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ * [payload]
+ */
+
+ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+}
+
+static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
+{
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format decap;
+ struct ieee80211_hdr *hdr;
+
+ /* First msdu's decapped header:
+ * [802.11 header] <-- padded to 4 bytes long
+ * [crypto param] <-- padded to 4 bytes long
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ *
+ * Other (2nd, 3rd, ..) msdu's decapped header:
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ */
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ switch (decap) {
case RX_MSDU_DECAP_RAW:
- /* remove trailing FCS */
- skb_trim(skb, skb->len - FCS_LEN);
+ ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
+ is_decrypted);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- /* Pull decapped header */
- hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
- skb_pull(skb, hdr_len);
-
- /* Push original header */
- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
- /* strip ethernet header and insert decapped 802.11 header and
- * rfc1042 header */
-
- rfc1042 = hdr;
- rfc1042 += roundup(hdr_len, 4);
- rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar,
- enctype), 4);
-
- skb_pull(skb, sizeof(struct ethhdr));
- memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
- rfc1042, sizeof(struct rfc1042_hdr));
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
- /* remove A-MSDU subframe header and insert
- * decapped 802.11 header. rfc1042 header is already there */
-
- skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
break;
}
-
- ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
-
- ath10k_process_rx(htt->ar, rx_status, skb);
}
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -1116,10 +1103,128 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
return CHECKSUM_UNNECESSARY;
}
-static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
+static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+{
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+}
+
+static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
{
- struct sk_buff *next = msdu_head->next;
- struct sk_buff *to_free = next;
+ struct sk_buff *first;
+ struct sk_buff *last;
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ u8 first_hdr[64];
+ u8 *qos;
+ size_t hdr_len;
+ bool has_fcs_err;
+ bool has_crypto_err;
+ bool has_tkip_err;
+ bool has_peer_idx_invalid;
+ bool is_decrypted;
+ u32 attention;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
+ * decapped header. It'll be used for undecapping of each MSDU.
+ */
+ hdr = (void *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(first_hdr, hdr, hdr_len);
+
+ /* Each A-MSDU subframe will use the original header as the base and be
+ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+ */
+ hdr = (void *)first_hdr;
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ /* Some attention flags are valid only in the last MSDU. */
+ last = skb_peek_tail(amsdu);
+ rxd = (void *)last->data - sizeof(*rxd);
+ attention = __le32_to_cpu(rxd->attention.flags);
+
+ has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
+ has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
+ has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+ has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
+
+ /* Note: If hardware captures an encrypted frame that it can't decrypt,
+ * e.g. due to fcs error, missing peer or invalid key data it will
+ * report the frame as raw.
+ */
+ is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
+ !has_fcs_err &&
+ !has_crypto_err &&
+ !has_peer_idx_invalid);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (has_fcs_err)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (has_tkip_err)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted)
+ status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+
+ skb_queue_walk(amsdu, msdu) {
+ ath10k_htt_rx_h_csum_offload(msdu);
+ ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
+ is_decrypted);
+
+ /* Undecapping involves copying the original 802.11 header back
+ * to sk_buff. If frame is protected and hardware has decrypted
+ * it then remove the protected bit.
+ */
+ if (!is_decrypted)
+ continue;
+
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *msdu;
+
+ while ((msdu = __skb_dequeue(amsdu))) {
+ /* Setup per-MSDU flags */
+ if (skb_queue_empty(amsdu))
+ status->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ status->flag |= RX_FLAG_AMSDU_MORE;
+
+ ath10k_process_rx(ar, status, msdu);
+ }
+}
+
+static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
+{
+ struct sk_buff *skb, *first;
int space;
int total_len = 0;
@@ -1130,113 +1235,142 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
* skb?
*/
- msdu_head->next = NULL;
+ first = __skb_dequeue(amsdu);
/* Allocate total length all at once. */
- while (next) {
- total_len += next->len;
- next = next->next;
- }
+ skb_queue_walk(amsdu, skb)
+ total_len += skb->len;
- space = total_len - skb_tailroom(msdu_head);
+ space = total_len - skb_tailroom(first);
if ((space > 0) &&
- (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
+ (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
/* TODO: bump some rx-oom error stat */
/* put it back together so we can free the
* whole list at once.
*/
- msdu_head->next = to_free;
+ __skb_queue_head(amsdu, first);
return -1;
}
/* Walk list again, copying contents into
* msdu_head
*/
- next = to_free;
- while (next) {
- skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
- next->len);
- next = next->next;
+ while ((skb = __skb_dequeue(amsdu))) {
+ skb_copy_from_linear_data(skb, skb_put(first, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
}
- /* If here, we have consolidated skb. Free the
- * fragments and pass the main skb on up the
- * stack.
- */
- ath10k_htt_rx_free_msdu_chain(to_free);
+ __skb_queue_head(amsdu, first);
return 0;
}
-static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
- struct sk_buff *head,
- enum htt_rx_mpdu_status status,
- bool channel_set,
- u32 attention)
+static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ bool chained)
{
- struct ath10k *ar = htt->ar;
+ struct sk_buff *first;
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format decap;
- if (head->len == 0) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx dropping due to zero-len\n");
- return false;
- }
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
- if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx dropping due to decrypt-err\n");
- return false;
- }
+ if (!chained)
+ return;
- if (!channel_set) {
- ath10k_warn(ar, "no channel configured; ignoring frame!\n");
- return false;
+ /* FIXME: Current unchaining logic can only handle simple case of raw
+ * msdu chaining. If decapping is other than raw the chaining may be
+ * more complex and this isn't handled by the current code. Don't even
+ * try re-constructing such frames - it'll be pretty much garbage.
+ */
+ if (decap != RX_MSDU_DECAP_RAW ||
+ skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+ __skb_queue_purge(amsdu);
+ return;
}
- /* Skip mgmt frames while we handle this in WMI */
- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
- attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ ath10k_unchain_msdu(amsdu);
+}
+
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+ bool is_mgmt;
+ bool has_fcs_err;
+
+ msdu = skb_peek(amsdu);
+ rxd = (void *)msdu->data - sizeof(*rxd);
+
+ /* FIXME: It might be a good idea to do some fuzzy-testing to drop
+ * invalid/dangerous frames.
+ */
+
+ if (!rx_status->freq) {
+ ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
return false;
}
- if (status != HTT_RX_IND_MPDU_STATUS_OK &&
- status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
- status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
- !htt->ar->monitor_started) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx ignoring frame w/ status %d\n",
- status);
+ is_mgmt = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+ has_fcs_err = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
+
+ /* Management frames are handled via WMI events. The pros of such
+ * approach is that channel is explicitly provided in WMI events
+ * whereas HTT doesn't provide channel information for Rxed frames.
+ *
+ * However some firmware revisions don't report corrupted frames via
+ * WMI so don't drop them.
+ */
+ if (is_mgmt && !has_fcs_err) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
return false;
}
- if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx CAC running\n");
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
return false;
}
return true;
}
+static void ath10k_htt_rx_h_filter(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ if (skb_queue_empty(amsdu))
+ return;
+
+ if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
+ return;
+
+ __skb_queue_purge(amsdu);
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
- struct htt_rx_desc *rxd;
- enum htt_rx_mpdu_status status;
- struct ieee80211_hdr *hdr;
+ struct sk_buff_head amsdu;
int num_mpdu_ranges;
- u32 attention;
int fw_desc_len;
u8 *fw_desc;
- bool channel_set;
- int i, j;
- int ret;
+ int i, ret, mpdu_count = 0;
lockdep_assert_held(&htt->rx_ring.lock);
+ if (htt->rx_confused)
+ return;
+
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
fw_desc = (u8 *)&rx->fw_desc;
@@ -1244,92 +1378,33 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
- /* Fill this once, while this is per-ppdu */
- if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
- memset(rx_status, 0, sizeof(*rx_status));
- rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
- rx->ppdu.combined_rssi;
- }
-
- if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
- /* TSF available only in 32-bit */
- rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
- rx_status->flag |= RX_FLAG_MACTIME_END;
- }
-
- channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
-
- if (channel_set) {
- ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
- rx->ppdu.info0,
- __le32_to_cpu(rx->ppdu.info1),
- __le32_to_cpu(rx->ppdu.info2),
- rx_status);
- }
-
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
- for (i = 0; i < num_mpdu_ranges; i++) {
- status = mpdu_ranges[i].mpdu_range_status;
-
- for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
- struct sk_buff *msdu_head, *msdu_tail;
-
- attention = 0;
- msdu_head = NULL;
- msdu_tail = NULL;
- ret = ath10k_htt_rx_amsdu_pop(htt,
- &fw_desc,
- &fw_desc_len,
- &msdu_head,
- &msdu_tail,
- &attention);
-
- if (ret < 0) {
- ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n",
- ret);
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- rxd = container_of((void *)msdu_head->data,
- struct htt_rx_desc,
- msdu_payload);
-
- if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
- status,
- channel_set,
- attention)) {
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- if (ret > 0 &&
- ath10k_unchain_msdu(msdu_head) < 0) {
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- else
- rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
-
- if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
- else
- rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
-
- hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
-
- if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
- else
- ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
+ for (i = 0; i < num_mpdu_ranges; i++)
+ mpdu_count += mpdu_ranges[i].mpdu_count;
+
+ while (mpdu_count--) {
+ __skb_queue_head_init(&amsdu);
+ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
+ &fw_desc_len, &amsdu);
+ if (ret < 0) {
+ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+ __skb_queue_purge(&amsdu);
+ /* FIXME: It's probably a good idea to reboot the
+ * device instead of leaving it inoperable.
+ */
+ htt->rx_confused = true;
+ break;
}
+
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
}
tasklet_schedule(&htt->rx_replenish_task);
@@ -1339,108 +1414,44 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag)
{
struct ath10k *ar = htt->ar;
- struct sk_buff *msdu_head, *msdu_tail;
- enum htt_rx_mpdu_encrypt_type enctype;
- struct htt_rx_desc *rxd;
- enum rx_msdu_decap_format fmt;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
- struct ieee80211_hdr *hdr;
+ struct sk_buff_head amsdu;
int ret;
- bool tkip_mic_err;
- bool decrypt_err;
u8 *fw_desc;
- int fw_desc_len, hdrlen, paramlen;
- int trim;
- u32 attention = 0;
+ int fw_desc_len;
fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
fw_desc = (u8 *)frag->fw_msdu_rx_desc;
- msdu_head = NULL;
- msdu_tail = NULL;
+ __skb_queue_head_init(&amsdu);
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &msdu_head, &msdu_tail,
- &attention);
+ &amsdu);
spin_unlock_bh(&htt->rx_ring.lock);
+ tasklet_schedule(&htt->rx_replenish_task);
+
ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
if (ret) {
ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
ret);
- ath10k_htt_rx_free_msdu_chain(msdu_head);
+ __skb_queue_purge(&amsdu);
return;
}
- /* FIXME: implement signal strength */
- rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
-
- hdr = (struct ieee80211_hdr *)msdu_head->data;
- rxd = (void *)msdu_head->data - sizeof(*rxd);
- tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
- decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
-
- if (fmt != RX_MSDU_DECAP_RAW) {
- ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n");
- dev_kfree_skb_any(msdu_head);
- goto end;
- }
-
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
- true);
- msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
-
- if (tkip_mic_err)
- ath10k_warn(ar, "tkip mic error\n");
-
- if (decrypt_err) {
- ath10k_warn(ar, "decryption err in fragmented rx\n");
- dev_kfree_skb_any(msdu_head);
- goto end;
- }
-
- if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype);
-
- /* It is more efficient to move the header than the payload */
- memmove((void *)msdu_head->data + paramlen,
- (void *)msdu_head->data,
- hdrlen);
- skb_pull(msdu_head, paramlen);
- hdr = (struct ieee80211_hdr *)msdu_head->data;
- }
-
- /* remove trailing FCS */
- trim = 4;
-
- /* remove crypto trailer */
- trim += ath10k_htt_rx_crypto_tail_len(ar, enctype);
-
- /* last fragment of TKIP frags has MIC */
- if (!ieee80211_has_morefrags(hdr->frame_control) &&
- enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
- trim += 8;
-
- if (trim > msdu_head->len) {
- ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
- dev_kfree_skb_any(msdu_head);
- goto end;
+ if (skb_queue_len(&amsdu) != 1) {
+ ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
+ __skb_queue_purge(&amsdu);
+ return;
}
- skb_trim(msdu_head, msdu_head->len - trim);
-
- ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
- msdu_head->data, msdu_head->len);
- ath10k_process_rx(htt->ar, rx_status, msdu_head);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-end:
if (fw_desc_len > 0) {
ath10k_dbg(ar, ATH10K_DBG_HTT,
"expecting more fragmented rx in one indication %d\n",
@@ -1674,6 +1685,15 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_DELBA:
ath10k_htt_rx_delba(ar, resp);
break;
+ case HTT_T2H_MSG_TYPE_PKTLOG: {
+ struct ath10k_pktlog_hdr *hdr =
+ (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
+
+ trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
+ sizeof(*hdr) +
+ __le16_to_cpu(hdr->size));
+ break;
+ }
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
/* Ignore this event because mac80211 takes care of Rx
* aggregation reordering.
@@ -1681,8 +1701,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
default:
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n",
- resp->hdr.msg_type);
+ ath10k_warn(ar, "htt event (%d) not handled\n",
+ resp->hdr.msg_type);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index bd87a35201d8..4bc51d8a14a3 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -92,7 +92,6 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
- init_waitqueue_head(&htt->empty_tx_wq);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
@@ -555,14 +554,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
- skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+ skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
+ skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
- (u32)skb_cb->paddr, vdev_id, tid);
+ (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 3cf5702c1e7e..dfedfd0e0f34 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -20,15 +20,16 @@
#include "targaddrs.h"
+#define ATH10K_FW_DIR "ath10k"
+
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
-#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
-#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
@@ -43,6 +44,8 @@
#define REG_DUMP_COUNT_QCA988X 60
+#define QCA988X_CAL_DATA_LEN 2116
+
struct ath10k_fw_ie {
__le32 id;
__le32 len;
@@ -78,6 +81,15 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
+struct ath10k_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ __le16 log_type;
+ __le16 size;
+ __le32 timestamp;
+ u8 payload[0];
+} __packed;
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -85,11 +97,13 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_DMA_BURST_SIZE 0
#define TARGET_MAC_AGGR_DELIM 0
#define TARGET_AST_SKID_LIMIT 16
-#define TARGET_NUM_PEERS 16
+#define TARGET_NUM_STATIONS 16
+#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \
+ (TARGET_NUM_VDEVS))
#define TARGET_NUM_OFFLOAD_PEERS 0
#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_NUM_PEER_KEYS 2
-#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
+#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2)
#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
@@ -120,12 +134,15 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16
-#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
-#define TARGET_10X_NUM_PEERS_MAX 128
+#define TARGET_10X_NUM_STATIONS 128
+#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
-#define TARGET_10X_NUM_TIDS 256
+#define TARGET_10X_NUM_TIDS_MAX 256
+#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_NUM_PEERS) * 2)
#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
@@ -279,6 +296,7 @@ enum ath10k_mcast2ucast_mode {
#define SI_RX_DATA1_OFFSET 0x00000014
#define CORE_CTRL_CPU_INTR_MASK 0x00002000
+#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 46709301a51e..c4005670cba2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -136,7 +136,9 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
if (ret)
return ret;
+ spin_lock_bh(&ar->data_lock);
peer->keys[i] = arvif->wep_keys[i];
+ spin_unlock_bh(&ar->data_lock);
}
return 0;
@@ -173,12 +175,39 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
i, ret);
+ spin_lock_bh(&ar->data_lock);
peer->keys[i] = NULL;
+ spin_unlock_bh(&ar->data_lock);
}
return first_errno;
}
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx)
+{
+ struct ath10k_peer *peer;
+ int i;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ /* We don't know which vdev this peer belongs to,
+ * since WMI doesn't give us that information.
+ *
+ * FIXME: multi-bss needs to be handled.
+ */
+ peer = ath10k_peer_find(ar, 0, addr);
+ if (!peer)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
+ return true;
+ }
+
+ return false;
+}
+
static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key)
{
@@ -326,6 +355,9 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
lockdep_assert_held(&ar->conf_mutex);
+ if (ar->num_peers >= ar->max_num_peers)
+ return -ENOBUFS;
+
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
@@ -339,9 +371,8 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
addr, vdev_id, ret);
return ret;
}
- spin_lock_bh(&ar->data_lock);
+
ar->num_peers++;
- spin_unlock_bh(&ar->data_lock);
return 0;
}
@@ -391,15 +422,11 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
return 0;
}
-static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
- if (value != 0xFFFFFFFF)
- value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
- ATH10K_RTS_MAX);
-
vdev_param = ar->wmi.vdev_param->rts_threshold;
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
@@ -432,9 +459,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
if (ret)
return ret;
- spin_lock_bh(&ar->data_lock);
ar->num_peers--;
- spin_unlock_bh(&ar->data_lock);
return 0;
}
@@ -471,20 +496,59 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
list_del(&peer->list);
kfree(peer);
}
- ar->num_peers = 0;
spin_unlock_bh(&ar->data_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
}
/************************/
/* Interface management */
/************************/
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!arvif->beacon)
+ return;
+
+ if (!arvif->beacon_buf)
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(arvif->beacon);
+
+ arvif->beacon = NULL;
+ arvif->beacon_sent = false;
+}
+
+static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_mac_vif_beacon_free(arvif);
+
+ if (arvif->beacon_buf) {
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf, arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+}
+
static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
ret = wait_for_completion_timeout(&ar->vdev_setup_done,
ATH10K_VDEV_SETUP_TIMEOUT_HZ);
if (ret == 0)
@@ -517,6 +581,8 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
arg.channel.max_reg_power = channel->max_reg_power * 2;
arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+ reinit_completion(&ar->vdev_setup_done);
+
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
@@ -564,6 +630,8 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
+ reinit_completion(&ar->vdev_setup_done);
+
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
@@ -590,9 +658,9 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar)
return -ENOMEM;
}
- bit = ffs(ar->free_vdev_map);
+ bit = __ffs64(ar->free_vdev_map);
- ar->monitor_vdev_id = bit - 1;
+ ar->monitor_vdev_id = bit;
ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
WMI_VDEV_TYPE_MONITOR,
@@ -603,7 +671,7 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar)
return ret;
}
- ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
+ ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
@@ -623,7 +691,7 @@ static int ath10k_monitor_vdev_delete(struct ath10k *ar)
return ret;
}
- ar->free_vdev_map |= 1 << ar->monitor_vdev_id;
+ ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
@@ -909,15 +977,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->is_up = false;
spin_lock_bh(&arvif->ar->data_lock);
- if (arvif->beacon) {
- dma_unmap_single(arvif->ar->dev,
- ATH10K_SKB_CB(arvif->beacon)->paddr,
- arvif->beacon->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(arvif->beacon);
-
- arvif->beacon = NULL;
- arvif->beacon_sent = false;
- }
+ ath10k_mac_vif_beacon_free(arvif);
spin_unlock_bh(&arvif->ar->data_lock);
return;
@@ -966,14 +1026,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
if (is_zero_ether_addr(arvif->bssid))
return;
- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
- arvif->bssid);
- if (ret) {
- ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
- arvif->bssid, arvif->vdev_id, ret);
- return;
- }
-
memset(arvif->bssid, 0, ETH_ALEN);
return;
@@ -1042,51 +1094,45 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
/* Station management */
/**********************/
+static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
+ struct ieee80211_vif *vif)
+{
+ /* Some firmware revisions have unstable STA powersave when listen
+ * interval is set too high (e.g. 5). The symptoms are firmware doesn't
+ * generate NullFunc frames properly even if buffered frames have been
+ * indicated in Beacon TIM. Firmware would seldom wake up to pull
+ * buffered frames. Often pinging the device from AP would simply fail.
+ *
+ * As a workaround set it to 1.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return 1;
+
+ return ar->hw->conf.listen_interval;
+}
+
static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
lockdep_assert_held(&ar->conf_mutex);
ether_addr_copy(arg->addr, sta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_aid = sta->aid;
arg->peer_flags |= WMI_PEER_AUTH;
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
- /*
- * Seems FW have problems with Power Save in STA
- * mode when we setup this parameter to high (eg. 5).
- * Often we see that FW don't send NULL (with clean P flags)
- * frame even there is info about buffered frames in beacons.
- * Sometimes we have to wait more than 10 seconds before FW
- * will wakeup. Often sending one ping from AP to our device
- * just fail (more than 50%).
- *
- * Seems setting this FW parameter to 1 couse FW
- * will check every beacon and will wakup immediately
- * after detection buffered data.
- */
- arg->peer_listen_intval = 1;
- else
- arg->peer_listen_intval = ar->hw->conf.listen_interval;
-
+ arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
arg->peer_num_spatial_streams = 1;
-
- /*
- * The assoc capabilities are available only in managed mode.
- */
- if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
- arg->peer_caps = bss_conf->assoc_capability;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
}
static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct wmi_peer_assoc_complete_arg *arg)
{
- struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_bss_conf *info = &vif->bss_conf;
struct cfg80211_bss *bss;
const u8 *rsnie = NULL;
@@ -1343,11 +1389,12 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
if (sta->wme)
@@ -1359,7 +1406,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
break;
case WMI_VDEV_TYPE_STA:
- if (bss_conf->qos)
+ if (vif->bss_conf.qos)
arg->peer_flags |= WMI_PEER_QOS;
break;
default:
@@ -1368,7 +1415,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
@@ -1419,22 +1466,21 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
}
static int ath10k_peer_assoc_prepare(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
lockdep_assert_held(&ar->conf_mutex);
memset(arg, 0, sizeof(*arg));
- ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
- ath10k_peer_assoc_h_crypto(ar, arvif, arg);
+ ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_crypto(ar, vif, arg);
ath10k_peer_assoc_h_rates(ar, sta, arg);
ath10k_peer_assoc_h_ht(ar, sta, arg);
ath10k_peer_assoc_h_vht(ar, sta, arg);
- ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
- ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
+ ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
return 0;
}
@@ -1480,6 +1526,9 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
rcu_read_lock();
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
@@ -1494,8 +1543,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
* before calling ath10k_setup_peer_smps() which might sleep. */
ht_cap = ap_sta->ht_cap;
- ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
- bss_conf, &peer_arg);
+ ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
@@ -1523,6 +1571,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+ WARN_ON(arvif->is_up);
+
arvif->aid = bss_conf->aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
@@ -1536,9 +1586,6 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->is_up = true;
}
-/*
- * FIXME: flush TIDs
- */
static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1548,45 +1595,30 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
lockdep_assert_held(&ar->conf_mutex);
- /*
- * For some reason, calling VDEV-DOWN before VDEV-STOP
- * makes the FW to send frames via HTT after disassociation.
- * No idea why this happens, even though VDEV-DOWN is supposed
- * to be analogous to link down, so just stop the VDEV.
- */
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
- arvif->vdev_id);
-
- /* FIXME: check return value */
- ret = ath10k_vdev_stop(arvif);
-
- /*
- * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
- * report beacons from previously associated network through HTT.
- * This in turn would spam mac80211 WARN_ON if we bring down all
- * interfaces as it expects there is no rx when no interface is
- * running.
- */
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
- /* FIXME: why don't we print error if wmi call fails? */
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "faield to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
arvif->def_wep_key_idx = 0;
-
- arvif->is_started = false;
arvif->is_up = false;
}
-static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
- struct ieee80211_sta *sta, bool reassoc)
+static int ath10k_station_assoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
+ ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -1601,43 +1633,51 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
return ret;
}
- ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
- if (ret) {
- ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- if (!sta->wme && !reassoc) {
- arvif->num_legacy_stations++;
- ret = ath10k_recalc_rtscts_prot(arvif);
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (!reassoc) {
+ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->ht_cap);
if (ret) {
- ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
- }
- ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
- if (ret) {
- ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
+ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
- ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
- if (ret) {
- ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
- return ret;
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
}
return ret;
}
-static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
+static int ath10k_station_disassoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
@@ -1729,6 +1769,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
ch->passive = passive;
ch->freq = channel->center_freq;
+ ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;
ch->max_power = channel->max_power * 2;
ch->max_reg_power = channel->max_reg_power * 2;
@@ -1983,6 +2024,18 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
}
}
+static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
+{
+ /* FIXME: Not really sure since when the behaviour changed. At some
+ * point new firmware stopped requiring creation of peer entries for
+ * offchannel tx (and actually creating them causes issues with wmi-htc
+ * tx credit replenishment and reliability). Assuming it's at least 3.4
+ * because that's when the `freq` was introduced to TX_FRM HTT command.
+ */
+ return !(ar->htt.target_version_major >= 3 &&
+ ar->htt.target_version_minor >= 4);
+}
+
static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -2158,10 +2211,10 @@ void __ath10k_scan_finish(struct ath10k *ar)
case ATH10K_SCAN_IDLE:
break;
case ATH10K_SCAN_RUNNING:
- case ATH10K_SCAN_ABORTING:
if (ar->scan.is_roc)
ieee80211_remain_on_channel_expired(ar->hw);
- else
+ case ATH10K_SCAN_ABORTING:
+ if (!ar->scan.is_roc)
ieee80211_scan_completed(ar->hw,
(ar->scan.state ==
ATH10K_SCAN_ABORTING));
@@ -2327,23 +2380,28 @@ static void ath10k_tx(struct ieee80211_hw *hw,
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
spin_lock_bh(&ar->data_lock);
- ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+ ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq;
ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
spin_unlock_bh(&ar->data_lock);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
- skb);
+ if (ath10k_mac_need_offchan_tx_work(ar)) {
+ ATH10K_SKB_CB(skb)->htt.freq = 0;
+ ATH10K_SKB_CB(skb)->htt.is_offchan = true;
- skb_queue_tail(&ar->offchan_tx_queue, skb);
- ieee80211_queue_work(hw, &ar->offchan_tx_work);
- return;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+ skb);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return;
+ }
}
ath10k_tx_htt(ar, skb);
}
/* Must not be called with conf_mutex held as workers can use that also. */
-static void ath10k_drain_tx(struct ath10k *ar)
+void ath10k_drain_tx(struct ath10k *ar)
{
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
@@ -2376,16 +2434,8 @@ void ath10k_halt(struct ath10k *ar)
ath10k_hif_power_down(ar);
spin_lock_bh(&ar->data_lock);
- list_for_each_entry(arvif, &ar->arvifs, list) {
- if (!arvif->beacon)
- continue;
-
- dma_unmap_single(arvif->ar->dev,
- ATH10K_SKB_CB(arvif->beacon)->paddr,
- arvif->beacon->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(arvif->beacon);
- arvif->beacon = NULL;
- }
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ ath10k_mac_vif_beacon_cleanup(arvif);
spin_unlock_bh(&ar->data_lock);
}
@@ -2408,12 +2458,28 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
+static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
+{
+ /* It is not clear that allowing gaps in chainmask
+ * is helpful. Probably it will not do what user
+ * is hoping for, so warn in that case.
+ */
+ if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
+ return;
+
+ ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
+ dbg, cm);
+}
+
static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_check_chain_mask(ar, tx_ant, "tx");
+ ath10k_check_chain_mask(ar, rx_ant, "rx");
+
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
@@ -2677,12 +2743,68 @@ static void ath10k_config_chan(struct ath10k *ar)
ath10k_monitor_recalc(ar);
}
+static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
+{
+ int ret;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
+
+ param = ar->wmi.pdev_param->txpower_limit2g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ param = ar->wmi.pdev_param->txpower_limit5g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret, txpower = -1;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ WARN_ON(arvif->txpower < 0);
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (WARN_ON(txpower == -1))
+ return -EINVAL;
+
+ ret = ath10k_mac_txpower_setup(ar, txpower);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup tx power %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
- u32 param;
mutex_lock(&ar->conf_mutex);
@@ -2706,25 +2828,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n",
- hw->conf.power_level);
-
- param = ar->wmi.pdev_param->txpower_limit2g;
- ret = ath10k_wmi_pdev_set_param(ar, param,
- hw->conf.power_level * 2);
- if (ret)
- ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
- hw->conf.power_level, ret);
-
- param = ar->wmi.pdev_param->txpower_limit5g;
- ret = ath10k_wmi_pdev_set_param(ar, param,
- hw->conf.power_level * 2);
- if (ret)
- ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
- hw->conf.power_level, ret);
- }
-
if (changed & IEEE80211_CONF_CHANGE_PS)
ath10k_config_ps(ar);
@@ -2739,6 +2842,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
return ret;
}
+static u32 get_nss_from_chainmask(u16 chain_mask)
+{
+ if ((chain_mask & 0x15) == 0x15)
+ return 4;
+ else if ((chain_mask & 0x7) == 0x7)
+ return 3;
+ else if ((chain_mask & 0x3) == 0x3)
+ return 2;
+ return 1;
+}
+
/*
* TODO:
* Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -2772,9 +2886,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = -EBUSY;
goto err;
}
- bit = ffs(ar->free_vdev_map);
+ bit = __ffs64(ar->free_vdev_map);
- arvif->vdev_id = bit - 1;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
+ bit, ar->free_vdev_map);
+
+ arvif->vdev_id = bit;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
if (ar->p2p)
@@ -2804,8 +2921,39 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
- arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
+ /* Some firmware revisions don't wait for beacon tx completion before
+ * sending another SWBA event. This could lead to hardware using old
+ * (freed) beacon data in some cases, e.g. tx credit starvation
+ * combined with missed TBTT. This is very very rare.
+ *
+ * On non-IOMMU-enabled hosts this could be a possible security issue
+ * because hw could beacon some random data on the air. On
+ * IOMMU-enabled hosts DMAR faults would occur in most cases and target
+ * device would crash.
+ *
+ * Since there are no beacon tx completions (implicit nor explicit)
+ * propagated to host the only workaround for this is to allocate a
+ * DMA-coherent buffer for a lifetime of a vif and use it for all
+ * beacon tx commands. Worst case for this approach is some beacons may
+ * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
+ if (!arvif->beacon_buf) {
+ ret = -ENOMEM;
+ ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ arvif->beacon_buf ? "single-buf" : "per-skb");
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
@@ -2815,7 +2963,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err;
}
- ar->free_vdev_map &= ~(1 << arvif->vdev_id);
+ ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
list_add(&arvif->list, &ar->arvifs);
vdev_param = ar->wmi.vdev_param->def_keyid;
@@ -2837,6 +2985,20 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_vdev_delete;
}
+ if (ar->cfg_tx_chainmask) {
+ u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss,
+ ret);
+ goto err_vdev_delete;
+ }
+ }
+
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
@@ -2899,6 +3061,13 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_peer_delete;
}
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ goto err_peer_delete;
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -2908,10 +3077,16 @@ err_peer_delete:
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
- ar->free_vdev_map |= 1 << arvif->vdev_id;
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
err:
+ if (arvif->beacon_buf) {
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf, arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -2924,19 +3099,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
- mutex_lock(&ar->conf_mutex);
-
cancel_work_sync(&arvif->wep_key_work);
- spin_lock_bh(&ar->data_lock);
- if (arvif->beacon) {
- dma_unmap_single(arvif->ar->dev,
- ATH10K_SKB_CB(arvif->beacon)->paddr,
- arvif->beacon->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(arvif->beacon);
- arvif->beacon = NULL;
- }
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_vif_beacon_cleanup(arvif);
spin_unlock_bh(&ar->data_lock);
ret = ath10k_spectral_vif_stop(arvif);
@@ -2944,7 +3112,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
arvif->vdev_id, ret);
- ar->free_vdev_map |= 1 << arvif->vdev_id;
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -3068,54 +3236,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
- /*
- * Firmware manages AP self-peer internally so make sure to not create
- * it in driver. Otherwise AP self-peer deletion may timeout later.
- */
- if (changed & BSS_CHANGED_BSSID &&
- vif->type != NL80211_IFTYPE_AP) {
- if (!is_zero_ether_addr(info->bssid)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d create peer %pM\n",
- arvif->vdev_id, info->bssid);
-
- ret = ath10k_peer_create(ar, arvif->vdev_id,
- info->bssid);
- if (ret)
- ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n",
- info->bssid, arvif->vdev_id, ret);
-
- if (vif->type == NL80211_IFTYPE_STATION) {
- /*
- * this is never erased as we it for crypto key
- * clearing; this is FW requirement
- */
- ether_addr_copy(arvif->bssid, info->bssid);
-
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d start %pM\n",
- arvif->vdev_id, info->bssid);
-
- ret = ath10k_vdev_start(arvif);
- if (ret) {
- ath10k_warn(ar, "failed to start vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto exit;
- }
-
- arvif->is_started = true;
- }
-
- /*
- * Mac80211 does not keep IBSS bssid when leaving IBSS,
- * so driver need to store it. It is needed when leaving
- * IBSS in order to remove BSSID peer.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC)
- memcpy(arvif->bssid, info->bssid,
- ETH_ALEN);
- }
- }
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED)
ath10k_control_beaconing(arvif, info);
@@ -3177,10 +3299,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_monitor_stop(ar);
ath10k_bss_assoc(hw, vif, info);
ath10k_monitor_recalc(ar);
+ } else {
+ ath10k_bss_disassoc(hw, vif);
}
}
-exit:
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3266,9 +3399,10 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
- cancel_delayed_work_sync(&ar->scan.timeout);
ath10k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
}
static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
@@ -3453,7 +3587,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
- err = ath10k_station_assoc(ar, arvif, sta, true);
+ err = ath10k_station_assoc(ar, arvif->vif, sta, true);
if (err)
ath10k_warn(ar, "failed to reassociate station: %pM\n",
sta->addr);
@@ -3462,6 +3596,37 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
mutex_unlock(&ar->conf_mutex);
}
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return;
+
+ ar->num_stations--;
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3471,7 +3636,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
- int max_num_peers;
int ret = 0;
if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -3489,31 +3653,46 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE &&
- vif->type != NL80211_IFTYPE_STATION) {
+ new_state == IEEE80211_STA_NONE) {
/*
* New station addition.
*/
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1;
- else
- max_num_peers = TARGET_NUM_PEERS;
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
+ arvif->vdev_id, sta->addr,
+ ar->num_stations + 1, ar->max_num_stations,
+ ar->num_peers + 1, ar->max_num_peers);
- if (ar->num_peers >= max_num_peers) {
- ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n",
- ar->num_peers, max_num_peers);
- ret = -ENOBUFS;
+ ret = ath10k_mac_inc_num_stations(arvif);
+ if (ret) {
+ ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
goto exit;
}
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d peer create %pM (new sta) num_peers %d\n",
- arvif->vdev_id, sta->addr, ar->num_peers);
-
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
- if (ret)
+ if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
+ ath10k_mac_dec_num_stations(arvif);
+ goto exit;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ WARN_ON(arvif->is_started);
+
+ ret = ath10k_vdev_start(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to start vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr));
+ ath10k_mac_dec_num_stations(arvif);
+ goto exit;
+ }
+
+ arvif->is_started = true;
+ }
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
@@ -3522,13 +3701,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ WARN_ON(!arvif->is_started);
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+ }
+
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
- if (vif->type == NL80211_IFTYPE_STATION)
- ath10k_bss_disassoc(hw, vif);
+ ath10k_mac_dec_num_stations(arvif);
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -3539,7 +3729,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
- ret = ath10k_station_assoc(ar, arvif, sta, false);
+ ret = ath10k_station_assoc(ar, vif, sta, false);
if (ret)
ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -3553,7 +3743,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
sta->addr);
- ret = ath10k_station_disassoc(ar, arvif, sta);
+ ret = ath10k_station_disassoc(ar, vif, sta);
if (ret)
ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -3717,6 +3907,8 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
if (ret)
goto exit;
+ duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
+
memset(&arg, 0, sizeof(arg));
ath10k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
@@ -3761,10 +3953,11 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
- cancel_delayed_work_sync(&ar->scan.timeout);
ath10k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
return 0;
}
@@ -3807,7 +4000,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
arvif->vdev_id, value);
- ret = ath10k_mac_set_rts(arvif, value);
+ ret = ath10k_mac_set_frag(arvif, value);
if (ret) {
ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3843,7 +4036,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
- skip = (ar->state == ATH10K_STATE_WEDGED);
+ skip = (ar->state == ATH10K_STATE_WEDGED) ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH,
+ &ar->dev_flags);
(empty || skip);
}), ATH10K_FLUSH_TIMEOUT_HZ);
@@ -3929,10 +4124,14 @@ exit:
}
#endif
-static void ath10k_restart_complete(struct ieee80211_hw *hw)
+static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
{
struct ath10k *ar = hw->priv;
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
mutex_lock(&ar->conf_mutex);
/* If device failed to restart it will be in a different state, e.g.
@@ -3940,6 +4139,7 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw)
if (ar->state == ATH10K_STATE_RESTARTED) {
ath10k_info(ar, "device successfully recovered\n");
ar->state = ATH10K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
}
mutex_unlock(&ar->conf_mutex);
@@ -3975,6 +4175,9 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
survey->channel = &sband->channels[idx];
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -4022,6 +4225,10 @@ ath10k_default_bitrate_mask(struct ath10k *ar,
u32 legacy = 0x00ff;
u8 ht = 0xff, i;
u16 vht = 0x3ff;
+ u16 nrf = ar->num_rf_chains;
+
+ if (ar->cfg_tx_chainmask)
+ nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
switch (band) {
case IEEE80211_BAND_2GHZ:
@@ -4037,11 +4244,11 @@ ath10k_default_bitrate_mask(struct ath10k *ar,
if (mask->control[band].legacy != legacy)
return false;
- for (i = 0; i < ar->num_rf_chains; i++)
+ for (i = 0; i < nrf; i++)
if (mask->control[band].ht_mcs[i] != ht)
return false;
- for (i = 0; i < ar->num_rf_chains; i++)
+ for (i = 0; i < nrf; i++)
if (mask->control[band].vht_mcs[i] != vht)
return false;
@@ -4292,6 +4499,9 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
u8 fixed_nss = ar->num_rf_chains;
u8 force_sgi;
+ if (ar->cfg_tx_chainmask)
+ fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
force_sgi = mask->control[band].gi;
if (force_sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
@@ -4450,12 +4660,15 @@ static const struct ieee80211_ops ath10k_ops = {
.tx_last_beacon = ath10k_tx_last_beacon,
.set_antenna = ath10k_set_antenna,
.get_antenna = ath10k_get_antenna,
- .restart_complete = ath10k_restart_complete,
+ .reconfig_complete = ath10k_reconfig_complete,
.get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
.ampdu_action = ath10k_ampdu_action,
+ .get_et_sset_count = ath10k_debug_get_et_sset_count,
+ .get_et_stats = ath10k_debug_get_et_stats,
+ .get_et_strings = ath10k_debug_get_et_strings,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
@@ -4800,15 +5013,6 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP);
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- /* TODO: Have to deal with 2x2 chips if/when the come out. */
- ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK;
- ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK;
- } else {
- ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK;
- ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK;
- }
-
ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
@@ -4827,10 +5031,6 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_SPECTRUM_MGMT;
- /* MSDU can have HTT TX fragment pushed in front. The additional 4
- * bytes is used for padding/alignment if necessary. */
- ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
-
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
@@ -4854,6 +5054,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 6c80eeada3e2..68296117d203 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -21,6 +21,8 @@
#include <net/mac80211.h>
#include "core.h"
+#define WEP_KEYID_SHIFT 6
+
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
@@ -39,6 +41,10 @@ void ath10k_offchan_tx_work(struct work_struct *work);
void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
+void ath10k_drain_tx(struct ath10k *ar);
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 59e0ea83be50..7abb8367119a 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -485,6 +485,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
void *data_buf = NULL;
int i;
+ spin_lock_bh(&ar_pci->ce_lock);
+
ce_diag = ar_pci->ce_diag;
/*
@@ -511,7 +513,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
if (ret != 0)
goto done;
@@ -527,15 +529,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
address);
- ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
- 0);
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
+ 0);
if (ret)
goto done;
i = 0;
- while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id) != 0) {
+ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
ret = -EBUSY;
@@ -554,9 +556,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
i = 0;
- while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id, &flags) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -591,6 +593,8 @@ done:
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
ce_data_base);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
return ret;
}
@@ -648,6 +652,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
dma_addr_t ce_data_base = 0;
int i;
+ spin_lock_bh(&ar_pci->ce_lock);
+
ce_diag = ar_pci->ce_diag;
/*
@@ -688,7 +694,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
if (ret != 0)
goto done;
@@ -696,15 +702,15 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
- ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data,
- nbytes, 0, 0);
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
+ nbytes, 0, 0);
if (ret != 0)
goto done;
i = 0;
- while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id) != 0) {
+ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -724,9 +730,9 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
i = 0;
- while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id, &flags) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -760,6 +766,8 @@ done:
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
address, ret);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
return ret;
}
@@ -815,20 +823,24 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
- void *transfer_context;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
- while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
- &ce_data, &nbytes,
- &transfer_id) == 0) {
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
+ &nbytes, &transfer_id) == 0) {
/* no need to call tx completion for NULL pointers */
- if (transfer_context == NULL)
+ if (skb == NULL)
continue;
- cb->tx_completion(ar, transfer_context, transfer_id);
+ __skb_queue_tail(&list, skb);
}
+
+ while ((skb = __skb_dequeue(&list)))
+ cb->tx_completion(ar, skb);
}
/* Called by lower (CE) layer when data is received from the Target. */
@@ -839,12 +851,14 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
struct sk_buff *skb;
+ struct sk_buff_head list;
void *transfer_context;
u32 ce_data;
unsigned int nbytes, max_nbytes;
unsigned int transfer_id;
unsigned int flags;
+ __skb_queue_head_init(&list);
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
@@ -861,7 +875,16 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
}
skb_put(skb, nbytes);
- cb->rx_completion(ar, skb, pipe_info->pipe_num);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ cb->rx_completion(ar, skb);
}
ath10k_pci_rx_post_pipe(pipe_info);
@@ -936,6 +959,12 @@ err:
return err;
}
+static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
+}
+
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -986,6 +1015,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_crash_counter++;
+
crash_data = ath10k_debug_get_new_fw_crash_data(ar);
if (crash_data)
@@ -1121,14 +1152,37 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
-static void ath10k_pci_irq_disable(struct ath10k *ar)
+static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
+ u32 val;
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
+ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+}
+static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
+ val |= CORE_CTRL_PCIE_REG_31_MASK;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+}
+
+static void ath10k_pci_irq_disable(struct ath10k *ar)
+{
ath10k_ce_disable_interrupts(ar);
ath10k_pci_disable_and_clear_legacy_irq(ar);
- /* FIXME: How to mask all MSI interrupts? */
+ ath10k_pci_irq_msi_fw_mask(ar);
+}
+
+static void ath10k_pci_irq_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
synchronize_irq(ar_pci->pdev->irq + i);
@@ -1138,7 +1192,7 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
{
ath10k_ce_enable_interrupts(ar);
ath10k_pci_enable_legacy_irq(ar);
- /* FIXME: How to unmask all MSI interrupts? */
+ ath10k_pci_irq_msi_fw_unmask(ar);
}
static int ath10k_pci_hif_start(struct ath10k *ar)
@@ -1151,64 +1205,74 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
- struct ath10k_pci *ar_pci;
- struct ath10k_ce_pipe *ce_hdl;
- u32 buf_sz;
- struct sk_buff *netbuf;
- u32 ce_data;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
- buf_sz = pipe_info->buf_sz;
+ ar = pci_pipe->hif_ce_state;
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
- /* Unused Copy Engine */
- if (buf_sz == 0)
+ if (!ce_ring)
return;
- ar = pipe_info->hif_ce_state;
- ar_pci = ath10k_pci_priv(ar);
- ce_hdl = pipe_info->ce_hdl;
+ if (!pci_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
- while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
- &ce_data) == 0) {
- dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
- netbuf->len + skb_tailroom(netbuf),
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
- dev_kfree_skb_any(netbuf);
+ dev_kfree_skb_any(skb);
}
}
-static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ath10k_ce_pipe *ce_hdl;
- struct sk_buff *netbuf;
- u32 ce_data;
- unsigned int nbytes;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct ce_desc *ce_desc;
+ struct sk_buff *skb;
unsigned int id;
- u32 buf_sz;
+ int i;
- buf_sz = pipe_info->buf_sz;
+ ar = pci_pipe->hif_ce_state;
+ ar_pci = ath10k_pci_priv(ar);
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
- /* Unused Copy Engine */
- if (buf_sz == 0)
+ if (!ce_ring)
return;
- ar = pipe_info->hif_ce_state;
- ar_pci = ath10k_pci_priv(ar);
- ce_hdl = pipe_info->ce_hdl;
+ if (!pci_pipe->buf_sz)
+ return;
- while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
- &ce_data, &nbytes, &id) == 0) {
- /* no need to call tx completion for NULL pointers */
- if (!netbuf)
+ ce_desc = ce_ring->shadow_base;
+ if (WARN_ON(!ce_desc))
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
continue;
- ar_pci->msg_callbacks_current.tx_completion(ar,
- netbuf,
- id);
+ ce_ring->per_transfer_context[i] = NULL;
+ id = MS(__le16_to_cpu(ce_desc[i].flags),
+ CE_DESC_FLAGS_META_DATA);
+
+ ar_pci->msg_callbacks_current.tx_completion(ar, skb);
}
}
@@ -1266,6 +1330,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_warm_reset(ar);
ath10k_pci_irq_disable(ar);
+ ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
}
@@ -1386,6 +1451,9 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
&nbytes, &transfer_id, &flags))
return;
+ if (WARN_ON_ONCE(!xfer))
+ return;
+
if (!xfer->wait_for_resp) {
ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
return;
@@ -1569,23 +1637,40 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0;
}
-static int ath10k_pci_alloc_ce(struct ath10k *ar)
+static int ath10k_pci_alloc_pipes(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe;
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
- ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ pipe = &ar_pci->pipe_info[i];
+ pipe->ce_hdl = &ar_pci->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
+ ath10k_pci_ce_send_done,
+ ath10k_pci_ce_recv_data);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
return ret;
}
+
+ /* Last CE is Diagnostic Window */
+ if (i == CE_COUNT - 1) {
+ ar_pci->ce_diag = pipe->ce_hdl;
+ continue;
+ }
+
+ pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
}
return 0;
}
-static void ath10k_pci_free_ce(struct ath10k *ar)
+static void ath10k_pci_free_pipes(struct ath10k *ar)
{
int i;
@@ -1593,39 +1678,17 @@ static void ath10k_pci_free_ce(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
-static int ath10k_pci_ce_init(struct ath10k *ar)
+static int ath10k_pci_init_pipes(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info;
- const struct ce_attr *attr;
- int pipe_num, ret;
+ int i, ret;
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
- pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
- pipe_info->pipe_num = pipe_num;
- pipe_info->hif_ce_state = ar;
- attr = &host_ce_config_wlan[pipe_num];
-
- ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
- ath10k_pci_ce_send_done,
- ath10k_pci_ce_recv_data);
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
- pipe_num, ret);
+ i, ret);
return ret;
}
-
- if (pipe_num == CE_COUNT - 1) {
- /*
- * Reserve the ultimate CE for
- * diagnostic Window support
- */
- ar_pci->ce_diag = pipe_info->ce_hdl;
- continue;
- }
-
- pipe_info->buf_sz = (size_t)(attr->src_sz_max);
}
return 0;
@@ -1666,93 +1729,167 @@ static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
msleep(10);
}
-static int ath10k_pci_warm_reset(struct ath10k *ar)
+static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
{
u32 val;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
-
- /* debug */
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_CAUSE_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
- val);
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- CPU_INTR_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
- val);
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+}
- /* disable pending irqs */
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_ENABLE_ADDRESS, 0);
+static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
+{
+ u32 val;
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_CLR_ADDRESS, ~0);
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
- msleep(100);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
+ msleep(10);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+}
- /* clear fw indicator */
- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
+static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
+{
+ u32 val;
- /* clear target LF timer interrupts */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_LF_TIMER_CONTROL0_ADDRESS);
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
SOC_LF_TIMER_CONTROL0_ADDRESS,
val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+}
- /* reset CE */
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CE_RST_MASK);
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- msleep(10);
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+ int ret;
- /* unreset CE */
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val & ~SOC_RESET_CONTROL_CE_RST_MASK);
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- msleep(10);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
+
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_warm_reset_counter++;
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_pci_irq_disable(ar);
+
+ /* Make sure the target CPU is not doing anything dangerous, e.g. if it
+ * were to access copy engine while host performs copy engine reset
+ * then it is possible for the device to confuse pci-e controller to
+ * the point of bringing host system to a complete stop (i.e. hang).
+ */
ath10k_pci_warm_reset_si0(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
+ ath10k_pci_wait_for_target_init(ar);
- /* debug */
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_CAUSE_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
- val);
+ ath10k_pci_warm_reset_clear_lf(ar);
+ ath10k_pci_warm_reset_ce(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- CPU_INTR_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
- val);
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
+ return ret;
+ }
- /* CPU warm reset */
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
- val);
+ return 0;
+}
- msleep(100);
+static int ath10k_pci_chip_reset(struct ath10k *ar)
+{
+ int i, ret;
+ u32 val;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n");
+
+ /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
+ * It is thus preferred to use warm reset which is safer but may not be
+ * able to recover the device from all possible fail scenarios.
+ *
+ * Warm reset doesn't always work on first try so attempt it a few
+ * times before giving up.
+ */
+ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
+ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
+ ret);
+ continue;
+ }
+
+ /* FIXME: Sometimes copy engine doesn't recover after warm
+ * reset. In most cases this needs cold reset. In some of these
+ * cases the device is in such a state that a cold reset may
+ * lock up the host.
+ *
+ * Reading any host interest register via copy engine is
+ * sufficient to verify if device is capable of booting
+ * firmware blob.
+ */
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
+ &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
+ return 0;
+ }
+
+ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
+ ath10k_warn(ar, "refusing cold reset as requested\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n");
return 0;
}
-static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up target: %d\n", ret);
+ return ret;
+ }
+
/*
* Bring the target up cleanly.
*
@@ -1763,26 +1900,16 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
* is in an unexpected state. We try to catch that here in order to
* reset the Target and retry the probe.
*/
- if (cold_reset)
- ret = ath10k_pci_cold_reset(ar);
- else
- ret = ath10k_pci_warm_reset(ar);
-
+ ret = ath10k_pci_chip_reset(ar);
if (ret) {
- ath10k_err(ar, "failed to reset target: %d\n", ret);
- goto err;
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_sleep;
}
- ret = ath10k_pci_ce_init(ar);
+ ret = ath10k_pci_init_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
- goto err;
- }
-
- ret = ath10k_pci_wait_for_target_init(ar);
- if (ret) {
- ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
- goto err_ce;
+ goto err_sleep;
}
ret = ath10k_pci_init_config(ar);
@@ -1801,73 +1928,21 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
err_ce:
ath10k_pci_ce_deinit(ar);
- ath10k_pci_warm_reset(ar);
-err:
- return ret;
-}
-
-static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
-{
- int i, ret;
-
- /*
- * Sometime warm reset succeeds after retries.
- *
- * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
- * at first try.
- */
- for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
- ret = __ath10k_pci_hif_power_up(ar, false);
- if (ret == 0)
- break;
-
- ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
- i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
- }
+err_sleep:
+ ath10k_pci_sleep(ar);
return ret;
}
-static int ath10k_pci_hif_power_up(struct ath10k *ar)
-{
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
-
- /*
- * Hardware CUS232 version 2 has some issues with cold reset and the
- * preferred (and safer) way to perform a device reset is through a
- * warm reset.
- *
- * Warm reset doesn't always work though so fall back to cold reset may
- * be necessary.
- */
- ret = ath10k_pci_hif_power_up_warm(ar);
- if (ret) {
- ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
- ret);
-
- if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
- return ret;
-
- ath10k_warn(ar, "trying cold reset\n");
-
- ret = __ath10k_pci_hif_power_up(ar, true);
- if (ret) {
- ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
- ath10k_pci_warm_reset(ar);
+ /* Currently hif_power_up performs effectively a reset and hif_stop
+ * resets the chip as well so there's no point in resetting here.
+ */
+
+ ath10k_pci_sleep(ar);
}
#ifdef CONFIG_PM
@@ -1921,6 +1996,8 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
.start = ath10k_pci_hif_start,
.stop = ath10k_pci_hif_stop,
@@ -1931,6 +2008,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
.power_up = ath10k_pci_hif_power_up,
.power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_pci_read32,
+ .write32 = ath10k_pci_write32,
#ifdef CONFIG_PM
.suspend = ath10k_pci_hif_suspend,
.resume = ath10k_pci_hif_resume,
@@ -2250,14 +2329,14 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_ENABLE_ADDRESS,
- PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL);
+ ath10k_pci_enable_legacy_irq(ar);
mdelay(10);
} while (time_before(jiffies, timeout));
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+
if (val == 0xffffffff) {
ath10k_err(ar, "failed to read device register, device is gone\n");
return -EIO;
@@ -2287,6 +2366,12 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
+ spin_lock_bh(&ar->data_lock);
+
+ ar->stats.fw_cold_reset_counter++;
+
+ spin_unlock_bh(&ar->data_lock);
+
/* Put Target, including PCIe, into RESET. */
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
val |= 1;
@@ -2400,6 +2485,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
u32 chip_id;
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
+ ATH10K_BUS_PCI,
&ath10k_pci_hif_ops);
if (!ar) {
dev_err(&pdev->dev, "failed to allocate core\n");
@@ -2435,7 +2521,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_sleep;
}
- ret = ath10k_pci_alloc_ce(ar);
+ ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
ret);
@@ -2443,25 +2529,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
ath10k_pci_ce_deinit(ar);
-
- ret = ath10k_ce_disable_interrupts(ar);
- if (ret) {
- ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
- ret);
- goto err_free_ce;
- }
-
- /* Workaround: There's no known way to mask all possible interrupts via
- * device CSR. The only way to make sure device doesn't assert
- * interrupts is to reset it. Interrupts are then disabled on host
- * after handlers are registered.
- */
- ath10k_pci_warm_reset(ar);
+ ath10k_pci_irq_disable(ar);
ret = ath10k_pci_init_irq(ar);
if (ret) {
ath10k_err(ar, "failed to init irqs: %d\n", ret);
- goto err_free_ce;
+ goto err_free_pipes;
}
ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
@@ -2474,8 +2547,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq;
}
- /* This shouldn't race as the device has been reset above. */
- ath10k_pci_irq_disable(ar);
+ ath10k_pci_sleep(ar);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
@@ -2492,8 +2564,8 @@ err_free_irq:
err_deinit_irq:
ath10k_pci_deinit_irq(ar);
-err_free_ce:
- ath10k_pci_free_ce(ar);
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
err_sleep:
ath10k_pci_sleep(ar);
@@ -2527,8 +2599,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
- ath10k_pci_free_ce(ar);
- ath10k_pci_sleep(ar);
+ ath10k_pci_free_pipes(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
}
@@ -2565,5 +2636,7 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 3e1454b74e00..63ce61fcdac8 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -56,14 +56,14 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
}
int ath10k_spectral_process_fft(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_fft_report *fftr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
struct fft_sample_ath10k *fft_sample;
u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
- u32 reg0, reg1, nf_list1, nf_list2;
+ u32 reg0, reg1;
u8 chain_idx, *bins;
int dc_pos;
@@ -82,7 +82,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
/* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
* but the results/plots suggest that its actually 22/44/88 MHz.
*/
- switch (event->hdr.chan_width_mhz) {
+ switch (phyerr->chan_width_mhz) {
case 20:
fft_sample->chan_width_mhz = 22;
break;
@@ -101,7 +101,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample->chan_width_mhz = 88;
break;
default:
- fft_sample->chan_width_mhz = event->hdr.chan_width_mhz;
+ fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
}
fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
@@ -110,36 +110,22 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
- fft_sample->rssi = event->hdr.rssi_combined;
+ fft_sample->rssi = phyerr->rssi_combined;
total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
- freq1 = __le16_to_cpu(event->hdr.freq1);
- freq2 = __le16_to_cpu(event->hdr.freq2);
+ freq1 = __le16_to_cpu(phyerr->freq1);
+ freq2 = __le16_to_cpu(phyerr->freq2);
fft_sample->freq1 = __cpu_to_be16(freq1);
fft_sample->freq2 = __cpu_to_be16(freq2);
- nf_list1 = __le32_to_cpu(event->hdr.nf_list_1);
- nf_list2 = __le32_to_cpu(event->hdr.nf_list_2);
chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
- switch (chain_idx) {
- case 0:
- fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu);
- break;
- case 1:
- fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu);
- break;
- case 2:
- fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu);
- break;
- case 3:
- fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu);
- break;
- }
+ fft_sample->noise = __cpu_to_be16(
+ __le16_to_cpu(phyerr->nf_chains[chain_idx]));
bins = (u8 *)fftr;
bins += sizeof(*fftr);
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index ddc57c557272..042f5b302c75 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -47,8 +47,8 @@ enum ath10k_spectral_mode {
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_spectral_process_fft(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_fft_report *fftr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf);
int ath10k_spectral_start(struct ath10k *ar);
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
@@ -59,8 +59,8 @@ void ath10k_spectral_destroy(struct ath10k *ar);
static inline int
ath10k_spectral_process_fft(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_fft_report *fftr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 574b75ab2609..b289378b6e3e 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -20,6 +20,15 @@
#include <linux/tracepoint.h>
#include "core.h"
+#if !defined(_TRACE_H_)
+static inline u32 ath10k_frm_hdr_len(const void *buf)
+{
+ const struct ieee80211_hdr *hdr = buf;
+
+ return ieee80211_hdrlen(hdr->frame_control);
+}
+#endif
+
#define _TRACE_H_
/* create empty functions when tracing is disabled */
@@ -138,7 +147,8 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len, int ret),
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
+ int ret),
TP_ARGS(ar, id, buf, buf_len, ret),
@@ -171,7 +181,7 @@ TRACE_EVENT(ath10k_wmi_cmd,
);
TRACE_EVENT(ath10k_wmi_event,
- TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len),
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
TP_ARGS(ar, id, buf, buf_len),
@@ -201,7 +211,7 @@ TRACE_EVENT(ath10k_wmi_event,
);
TRACE_EVENT(ath10k_htt_stats,
- TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len),
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
TP_ARGS(ar, buf, buf_len),
@@ -228,7 +238,7 @@ TRACE_EVENT(ath10k_htt_stats,
);
TRACE_EVENT(ath10k_wmi_dbglog,
- TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len),
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
TP_ARGS(ar, buf, buf_len),
@@ -254,6 +264,195 @@ TRACE_EVENT(ath10k_wmi_dbglog,
)
);
+TRACE_EVENT(ath10k_htt_pktlog,
+ TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, buf_len)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %hu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_tx,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
+ u8 vdev_id, u8 tid),
+
+ TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ __field(u16, msdu_len)
+ __field(u8, vdev_id)
+ __field(u8, tid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->msdu_id = msdu_id;
+ __entry->msdu_len = msdu_len;
+ __entry->vdev_id = vdev_id;
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id,
+ __entry->msdu_len,
+ __entry->vdev_id,
+ __entry->tid
+ )
+);
+
+TRACE_EVENT(ath10k_txrx_tx_unref,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id),
+
+ TP_ARGS(ar, msdu_id),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->msdu_id = msdu_id;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_hdr_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = ath10k_frm_hdr_len(data);
+ memcpy(__get_dynamic_array(data), data, __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_payload_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len - ath10k_frm_hdr_len(data);
+ memcpy(__get_dynamic_array(payload),
+ data + ath10k_frm_hdr_len(data), __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+TRACE_EVENT(ath10k_htt_rx_desc,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, len)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index a0cbc21d0d4b..7579de8e7a8c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -78,6 +78,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
+ trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
@@ -145,7 +146,8 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
mapped = !!ath10k_peer_find(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
- mapped == expect_mapped;
+ (mapped == expect_mapped ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
}), 3*HZ);
if (ret <= 0)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2c42bd504b79..c0f3e4d09263 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -609,6 +609,40 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
};
+static void
+ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg)
+{
+ u32 flags = 0;
+
+ memset(ch, 0, sizeof(*ch));
+
+ if (arg->passive)
+ flags |= WMI_CHAN_FLAG_PASSIVE;
+ if (arg->allow_ibss)
+ flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+ if (arg->allow_ht)
+ flags |= WMI_CHAN_FLAG_ALLOW_HT;
+ if (arg->allow_vht)
+ flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+ if (arg->ht40plus)
+ flags |= WMI_CHAN_FLAG_HT40_PLUS;
+ if (arg->chan_radar)
+ flags |= WMI_CHAN_FLAG_DFS;
+
+ ch->mhz = __cpu_to_le32(arg->freq);
+ ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
+ ch->band_center_freq2 = 0;
+ ch->min_power = arg->min_power;
+ ch->max_power = arg->max_power;
+ ch->reg_power = arg->max_reg_power;
+ ch->antenna_max = arg->max_antenna_gain;
+
+ /* mode & flags share storage */
+ ch->mode = arg->mode;
+ ch->flags |= __cpu_to_le32(flags);
+}
+
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
int ret;
@@ -745,6 +779,10 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
ath10k_wmi_tx_beacons_nowait(ar);
ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+
+ if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ ret = -ESHUTDOWN;
+
(ret != -EAGAIN);
}), 3*HZ);
@@ -800,6 +838,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
+ trace_ath10k_tx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_tx_payload(ar, skb->data, skb->len);
/* Send the management frame buffer to the target */
ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
@@ -1073,13 +1113,46 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
return rate_idx;
}
+/* If keys are configured, HW decrypts all frames
+ * with protected bit set. Mark such frames as decrypted.
+ */
+static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ unsigned int hdrlen;
+ bool peer_key;
+ u8 *addr, keyidx;
+
+ if (!ieee80211_is_auth(hdr->frame_control) ||
+ !ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
+ return;
+
+ keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
+ addr = ieee80211_get_SA(hdr);
+
+ spin_lock_bh(&ar->data_lock);
+ peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer_key) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac wep key present for peer %pM\n", addr);
+ status->flag |= RX_FLAG_DECRYPTED;
+ }
+}
+
static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_event_v1 *ev_v1;
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_channel *ch;
struct ieee80211_hdr *hdr;
u32 rx_status;
u32 channel;
@@ -1127,30 +1200,34 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
- if (rx_status & WMI_RX_STATUS_ERR_CRC)
- status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_status & WMI_RX_STATUS_ERR_CRC) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
+ /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
* MODE_11B. This means phy_mode is not a reliable source for the band
- * of mgmt rx. */
-
- ch = ar->scan_channel;
- if (!ch)
- ch = ar->rx_channel;
-
- if (ch) {
- status->band = ch->band;
-
- if (phy_mode == MODE_11B &&
- status->band == IEEE80211_BAND_5GHZ)
- ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+ * of mgmt rx.
+ */
+ if (channel >= 1 && channel <= 14) {
+ status->band = IEEE80211_BAND_2GHZ;
+ } else if (channel >= 36 && channel <= 165) {
+ status->band = IEEE80211_BAND_5GHZ;
} else {
- ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n");
- status->band = phy_mode_to_band(phy_mode);
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ return 0;
}
+ if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
@@ -1160,6 +1237,8 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
+ ath10k_wmi_handle_wep_reauth(ar, skb, status);
+
/* FW delivers WEP Shared Auth frame with Protected Bit set and
* encrypted payload. However in case of PMF it delivers decrypted
* frames with Protected Bit set. */
@@ -1295,14 +1374,196 @@ static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ const struct wal_dbg_tx_stats *tx = &src->wal.tx;
+ const struct wal_dbg_rx_stats *rx = &src->wal.rx;
+
+ dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
+ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+ dst->cycle_count = __le32_to_cpu(src->cycle_count);
+ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+
+ dst->comp_queued = __le32_to_cpu(tx->comp_queued);
+ dst->comp_delivered = __le32_to_cpu(tx->comp_delivered);
+ dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued);
+ dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued);
+ dst->wmm_drop = __le32_to_cpu(tx->wmm_drop);
+ dst->local_enqued = __le32_to_cpu(tx->local_enqued);
+ dst->local_freed = __le32_to_cpu(tx->local_freed);
+ dst->hw_queued = __le32_to_cpu(tx->hw_queued);
+ dst->hw_reaped = __le32_to_cpu(tx->hw_reaped);
+ dst->underrun = __le32_to_cpu(tx->underrun);
+ dst->tx_abort = __le32_to_cpu(tx->tx_abort);
+ dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed);
+ dst->tx_ko = __le32_to_cpu(tx->tx_ko);
+ dst->data_rc = __le32_to_cpu(tx->data_rc);
+ dst->self_triggers = __le32_to_cpu(tx->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(tx->pdev_resets);
+ dst->phy_underrun = __le32_to_cpu(tx->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(tx->txop_ovf);
+
+ dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change);
+ dst->status_rcvd = __le32_to_cpu(rx->status_rcvd);
+ dst->r0_frags = __le32_to_cpu(rx->r0_frags);
+ dst->r1_frags = __le32_to_cpu(rx->r1_frags);
+ dst->r2_frags = __le32_to_cpu(rx->r2_frags);
+ dst->r3_frags = __le32_to_cpu(rx->r3_frags);
+ dst->htt_msdus = __le32_to_cpu(rx->htt_msdus);
+ dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus);
+ dst->loc_msdus = __le32_to_cpu(rx->loc_msdus);
+ dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus);
+ dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu);
+ dst->phy_errs = __le32_to_cpu(rx->phy_errs);
+ dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop);
+ dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs);
+}
+
+static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+}
+
+static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(src, dst);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10x_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats(&src->old, dst);
+
+ dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
+ dst->rts_bad = __le32_to_cpu(src->rts_bad);
+ dst->rts_good = __le32_to_cpu(src->rts_good);
+ dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
+ dst->no_beacons = __le32_to_cpu(src->no_beacons);
+ dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats);
+ else
+ return ath10k_wmi_main_pull_fw_stats(ar, skb, stats);
+}
+
static void ath10k_wmi_event_update_stats(struct ath10k *ar,
struct sk_buff *skb)
{
- struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
-
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
-
- ath10k_debug_read_target_stats(ar, ev);
+ ath10k_debug_fw_stats_process(ar, skb);
}
static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
@@ -1579,6 +1840,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
+ dma_addr_t paddr;
int ret, vdev_id = 0;
ev = (struct wmi_host_swba_event *)skb->data;
@@ -1647,27 +1909,37 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "SWBA overrun on vdev %d\n",
arvif->vdev_id);
- dma_unmap_single(arvif->ar->dev,
- ATH10K_SKB_CB(arvif->beacon)->paddr,
- arvif->beacon->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(arvif->beacon);
- arvif->beacon = NULL;
+ ath10k_mac_vif_beacon_free(arvif);
}
- ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
- bcn->data, bcn->len,
- DMA_TO_DEVICE);
- ret = dma_mapping_error(arvif->ar->dev,
- ATH10K_SKB_CB(bcn)->paddr);
- if (ret) {
- ath10k_warn(ar, "failed to map beacon: %d\n", ret);
- dev_kfree_skb_any(bcn);
- goto skip;
+ if (!arvif->beacon_buf) {
+ paddr = dma_map_single(arvif->ar->dev, bcn->data,
+ bcn->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(arvif->ar->dev, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to map beacon: %d\n",
+ ret);
+ dev_kfree_skb_any(bcn);
+ goto skip;
+ }
+
+ ATH10K_SKB_CB(bcn)->paddr = paddr;
+ } else {
+ if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
+ ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
+ bcn->len, IEEE80211_MAX_FRAME_LEN);
+ skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
+ }
+ memcpy(arvif->beacon_buf, bcn->data, bcn->len);
+ ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
}
arvif->beacon = bcn;
arvif->beacon_sent = false;
+ trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
+ trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
+
ath10k_wmi_tx_beacon_nowait(arvif);
skip:
spin_unlock_bh(&ar->data_lock);
@@ -1681,8 +1953,8 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
}
static void ath10k_dfs_radar_report(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_radar_report *rr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_radar_report *rr,
u64 tsf)
{
u32 reg0, reg1, tsf32l;
@@ -1715,12 +1987,12 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
return;
/* report event to DFS pattern detector */
- tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
+ tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
tsf64 = tsf & (~0xFFFFFFFFULL);
tsf64 |= tsf32l;
width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
- rssi = event->hdr.rssi_combined;
+ rssi = phyerr->rssi_combined;
/* hardware store this as 8 bit signed value,
* set to zero if negative number
@@ -1759,8 +2031,8 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
}
static int ath10k_dfs_fft_report(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_fft_report *fftr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
u64 tsf)
{
u32 reg0, reg1;
@@ -1768,7 +2040,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
- rssi = event->hdr.rssi_combined;
+ rssi = phyerr->rssi_combined;
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
@@ -1797,20 +2069,20 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
}
static void ath10k_wmi_event_dfs(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
+ const struct wmi_phyerr *phyerr,
u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
- struct phyerr_tlv *tlv;
- struct phyerr_radar_report *rr;
- struct phyerr_fft_report *fftr;
- u8 *tlv_buf;
+ const struct phyerr_tlv *tlv;
+ const struct phyerr_radar_report *rr;
+ const struct phyerr_fft_report *fftr;
+ const u8 *tlv_buf;
- buf_len = __le32_to_cpu(event->hdr.buf_len);
+ buf_len = __le32_to_cpu(phyerr->buf_len);
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
- event->hdr.phy_err_code, event->hdr.rssi_combined,
- __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
+ phyerr->phy_err_code, phyerr->rssi_combined,
+ __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
/* Skip event if DFS disabled */
if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
@@ -1825,9 +2097,9 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
return;
}
- tlv = (struct phyerr_tlv *)&event->bufp[i];
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
tlv_len = __le16_to_cpu(tlv->len);
- tlv_buf = &event->bufp[i + sizeof(*tlv)];
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
tlv_len, tlv->tag, tlv->sig);
@@ -1841,7 +2113,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
}
rr = (struct phyerr_radar_report *)tlv_buf;
- ath10k_dfs_radar_report(ar, event, rr, tsf);
+ ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
break;
case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
@@ -1851,7 +2123,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
}
fftr = (struct phyerr_fft_report *)tlv_buf;
- res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
+ res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
if (res)
return;
break;
@@ -1863,16 +2135,16 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
static void
ath10k_wmi_event_spectral_scan(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
+ const struct wmi_phyerr *phyerr,
u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
struct phyerr_tlv *tlv;
- u8 *tlv_buf;
- struct phyerr_fft_report *fftr;
+ const void *tlv_buf;
+ const struct phyerr_fft_report *fftr;
size_t fftr_len;
- buf_len = __le32_to_cpu(event->hdr.buf_len);
+ buf_len = __le32_to_cpu(phyerr->buf_len);
while (i < buf_len) {
if (i + sizeof(*tlv) > buf_len) {
@@ -1881,9 +2153,9 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
return;
}
- tlv = (struct phyerr_tlv *)&event->bufp[i];
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
tlv_len = __le16_to_cpu(tlv->len);
- tlv_buf = &event->bufp[i + sizeof(*tlv)];
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
if (i + sizeof(*tlv) + tlv_len > buf_len) {
ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
@@ -1900,8 +2172,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
}
fftr_len = tlv_len - sizeof(*fftr);
- fftr = (struct phyerr_fft_report *)tlv_buf;
- res = ath10k_spectral_process_fft(ar, event,
+ fftr = tlv_buf;
+ res = ath10k_spectral_process_fft(ar, phyerr,
fftr, fftr_len,
tsf);
if (res < 0) {
@@ -1918,8 +2190,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
{
- struct wmi_comb_phyerr_rx_event *comb_event;
- struct wmi_single_phyerr_rx_event *event;
+ const struct wmi_phyerr_event *ev;
+ const struct wmi_phyerr *phyerr;
u32 count, i, buf_len, phy_err_code;
u64 tsf;
int left_len = skb->len;
@@ -1927,38 +2199,38 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
ATH10K_DFS_STAT_INC(ar, phy_errors);
/* Check if combined event available */
- if (left_len < sizeof(*comb_event)) {
+ if (left_len < sizeof(*ev)) {
ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
return;
}
- left_len -= sizeof(*comb_event);
+ left_len -= sizeof(*ev);
/* Check number of included events */
- comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
- count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
+ ev = (const struct wmi_phyerr_event *)skb->data;
+ count = __le32_to_cpu(ev->num_phyerrs);
- tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
+ tsf = __le32_to_cpu(ev->tsf_u32);
tsf <<= 32;
- tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
+ tsf |= __le32_to_cpu(ev->tsf_l32);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event phyerr count %d tsf64 0x%llX\n",
count, tsf);
- event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
+ phyerr = ev->phyerrs;
for (i = 0; i < count; i++) {
/* Check if we can read event header */
- if (left_len < sizeof(*event)) {
+ if (left_len < sizeof(*phyerr)) {
ath10k_warn(ar, "single event (%d) wrong head len\n",
i);
return;
}
- left_len -= sizeof(*event);
+ left_len -= sizeof(*phyerr);
- buf_len = __le32_to_cpu(event->hdr.buf_len);
- phy_err_code = event->hdr.phy_err_code;
+ buf_len = __le32_to_cpu(phyerr->buf_len);
+ phy_err_code = phyerr->phy_err_code;
if (left_len < buf_len) {
ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
@@ -1969,20 +2241,20 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
switch (phy_err_code) {
case PHY_ERROR_RADAR:
- ath10k_wmi_event_dfs(ar, event, tsf);
+ ath10k_wmi_event_dfs(ar, phyerr, tsf);
break;
case PHY_ERROR_SPECTRAL_SCAN:
- ath10k_wmi_event_spectral_scan(ar, event, tsf);
+ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
break;
case PHY_ERROR_FALSE_RADAR_EXT:
- ath10k_wmi_event_dfs(ar, event, tsf);
- ath10k_wmi_event_spectral_scan(ar, event, tsf);
+ ath10k_wmi_event_dfs(ar, phyerr, tsf);
+ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
break;
default:
break;
}
- event += sizeof(*event) + buf_len;
+ phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
}
}
@@ -2028,7 +2300,7 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar,
/* the last byte is always reserved for the null character */
buf[i] = '\0';
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
+ ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
}
static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
@@ -2163,30 +2435,117 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
return 0;
}
-static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
- struct sk_buff *skb)
+static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_service_ready_event *ev;
+ size_t i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->sw_version;
+ arg->sw_ver1 = ev->sw_version_1;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_10x_service_ready_event *ev;
+ int i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->sw_version;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static void ath10k_wmi_event_service_ready(struct ath10k *ar,
+ struct sk_buff *skb)
{
- struct wmi_service_ready_event *ev = (void *)skb->data;
- DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
+ struct wmi_svc_rdy_ev_arg arg = {};
+ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+ int ret;
- if (skb->len < sizeof(*ev)) {
- ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
- skb->len, sizeof(*ev));
+ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg);
+ wmi_10x_svc_map(arg.service_map, ar->wmi.svc_map,
+ arg.service_map_len);
+ } else {
+ ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg);
+ wmi_main_svc_map(arg.service_map, ar->wmi.svc_map,
+ arg.service_map_len);
+ }
+
+ if (ret) {
+ ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
return;
}
- ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
- ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
- ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
- ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+ ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
+ ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
ar->fw_version_major =
- (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
- ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+ (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
ar->fw_version_release =
- (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
- ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
- ar->phy_capability = __le32_to_cpu(ev->phy_capability);
- ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+ (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
+ ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
+ ar->phy_capability = __le32_to_cpu(arg.phy_capab);
+ ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
+ ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
+ arg.service_map, arg.service_map_len);
/* only manually set fw features when not using FW IE format */
if (ar->fw_api == 1 && ar->fw_version_build > 636)
@@ -2198,13 +2557,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
}
- ar->ath_common.regulatory.current_rd =
- __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
-
- wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap);
- ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
- ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
- ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
+ ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
+ ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
if (strlen(ar->hw->wiphy->fw_version) == 0) {
snprintf(ar->hw->wiphy->fw_version,
@@ -2216,93 +2570,18 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->fw_version_build);
}
- /* FIXME: it probably should be better to support this */
- if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
- ath10k_warn(ar, "target requested %d memory chunks; ignoring\n",
- __le32_to_cpu(ev->num_mem_reqs));
- }
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
- __le32_to_cpu(ev->sw_version),
- __le32_to_cpu(ev->sw_version_1),
- __le32_to_cpu(ev->abi_version),
- __le32_to_cpu(ev->phy_capability),
- __le32_to_cpu(ev->ht_cap_info),
- __le32_to_cpu(ev->vht_cap_info),
- __le32_to_cpu(ev->vht_supp_mcs),
- __le32_to_cpu(ev->sys_cap_info),
- __le32_to_cpu(ev->num_mem_reqs),
- __le32_to_cpu(ev->num_rf_chains));
-
- complete(&ar->wmi.service_ready);
-}
-
-static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
- struct sk_buff *skb)
-{
- u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
- int ret;
- struct wmi_service_ready_event_10x *ev = (void *)skb->data;
- DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
-
- if (skb->len < sizeof(*ev)) {
- ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
- skb->len, sizeof(*ev));
- return;
- }
-
- ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
- ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
- ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
- ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
- ar->fw_version_major =
- (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
- ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
- ar->phy_capability = __le32_to_cpu(ev->phy_capability);
- ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
-
- if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
- ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
- ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
- ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
- }
-
- ar->ath_common.regulatory.current_rd =
- __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
-
- wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap);
- ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
- ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
- ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
-
- if (strlen(ar->hw->wiphy->fw_version) == 0) {
- snprintf(ar->hw->wiphy->fw_version,
- sizeof(ar->hw->wiphy->fw_version),
- "%u.%u",
- ar->fw_version_major,
- ar->fw_version_minor);
- }
-
- num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
-
- if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
+ num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
+ if (num_mem_reqs > WMI_MAX_MEM_REQS) {
ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
num_mem_reqs);
return;
}
- if (!num_mem_reqs)
- goto exit;
-
- ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
- num_mem_reqs);
-
for (i = 0; i < num_mem_reqs; ++i) {
- req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
- num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
- unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
- num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
+ req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
+ num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
+ unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
+ num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
/* number of units to allocate is number of
@@ -2316,7 +2595,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
req_id,
- __le32_to_cpu(ev->mem_reqs[i].num_units),
+ __le32_to_cpu(arg.mem_reqs[i]->num_units),
num_unit_info,
unit_size,
num_units);
@@ -2327,23 +2606,23 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
return;
}
-exit:
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
- __le32_to_cpu(ev->sw_version),
- __le32_to_cpu(ev->abi_version),
- __le32_to_cpu(ev->phy_capability),
- __le32_to_cpu(ev->ht_cap_info),
- __le32_to_cpu(ev->vht_cap_info),
- __le32_to_cpu(ev->vht_supp_mcs),
- __le32_to_cpu(ev->sys_cap_info),
- __le32_to_cpu(ev->num_mem_reqs),
- __le32_to_cpu(ev->num_rf_chains));
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+ __le32_to_cpu(arg.min_tx_power),
+ __le32_to_cpu(arg.max_tx_power),
+ __le32_to_cpu(arg.ht_cap),
+ __le32_to_cpu(arg.vht_cap),
+ __le32_to_cpu(arg.sw_ver0),
+ __le32_to_cpu(arg.sw_ver1),
+ __le32_to_cpu(arg.phy_capab),
+ __le32_to_cpu(arg.num_rf_chains),
+ __le32_to_cpu(arg.eeprom_rd),
+ __le32_to_cpu(arg.num_mem_reqs));
complete(&ar->wmi.service_ready);
}
-static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
@@ -2466,10 +2745,10 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_install_key_complete(ar, skb);
break;
case WMI_SERVICE_READY_EVENTID:
- ath10k_wmi_service_ready_event_rx(ar, skb);
+ ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_READY_EVENTID:
- ath10k_wmi_ready_event_rx(ar, skb);
+ ath10k_wmi_event_ready(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
@@ -2586,10 +2865,10 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_resume_req(ar, skb);
break;
case WMI_10X_SERVICE_READY_EVENTID:
- ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+ ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_10X_READY_EVENTID:
- ath10k_wmi_ready_event_rx(ar, skb);
+ ath10k_wmi_event_ready(ar, skb);
break;
case WMI_10X_PDEV_UTF_EVENTID:
/* ignore utf events */
@@ -2697,10 +2976,10 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_resume_req(ar, skb);
break;
case WMI_10_2_SERVICE_READY_EVENTID:
- ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+ ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_10_2_READY_EVENTID:
- ath10k_wmi_ready_event_rx(ar, skb);
+ ath10k_wmi_event_ready(ar, skb);
break;
case WMI_10_2_RTT_KEEPALIVE_EVENTID:
case WMI_10_2_GPIO_INPUT_EVENTID:
@@ -2732,45 +3011,6 @@ static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
}
}
-/* WMI Initialization functions */
-int ath10k_wmi_attach(struct ath10k *ar)
-{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- ar->wmi.cmd = &wmi_10_2_cmd_map;
- else
- ar->wmi.cmd = &wmi_10x_cmd_map;
-
- ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
- ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
- } else {
- ar->wmi.cmd = &wmi_cmd_map;
- ar->wmi.vdev_param = &wmi_vdev_param_map;
- ar->wmi.pdev_param = &wmi_pdev_param_map;
- }
-
- init_completion(&ar->wmi.service_ready);
- init_completion(&ar->wmi.unified_ready);
- init_waitqueue_head(&ar->wmi.tx_credits_wq);
-
- return 0;
-}
-
-void ath10k_wmi_detach(struct ath10k *ar)
-{
- int i;
-
- /* free the host memory chunks requested by firmware */
- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- dma_free_coherent(ar->dev,
- ar->wmi.mem_chunks[i].len,
- ar->wmi.mem_chunks[i].vaddr,
- ar->wmi.mem_chunks[i].paddr);
- }
-
- ar->wmi.num_mem_chunks = 0;
-}
-
int ath10k_wmi_connect(struct ath10k *ar)
{
int status;
@@ -2865,42 +3105,6 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
ctl2g, ctl5g);
}
-int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
- const struct wmi_channel_arg *arg)
-{
- struct wmi_set_channel_cmd *cmd;
- struct sk_buff *skb;
- u32 ch_flags = 0;
-
- if (arg->passive)
- return -EINVAL;
-
- skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
- if (!skb)
- return -ENOMEM;
-
- if (arg->chan_radar)
- ch_flags |= WMI_CHAN_FLAG_DFS;
-
- cmd = (struct wmi_set_channel_cmd *)skb->data;
- cmd->chan.mhz = __cpu_to_le32(arg->freq);
- cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
- cmd->chan.mode = arg->mode;
- cmd->chan.flags |= __cpu_to_le32(ch_flags);
- cmd->chan.min_power = arg->min_power;
- cmd->chan.max_power = arg->max_power;
- cmd->chan.reg_power = arg->max_reg_power;
- cmd->chan.reg_classid = arg->reg_class_id;
- cmd->chan.antenna_max = arg->max_antenna_gain;
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi set channel mode %d freq %d\n",
- arg->mode, arg->freq);
-
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->pdev_set_channel_cmdid);
-}
-
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
{
struct wmi_pdev_suspend_cmd *cmd;
@@ -2951,16 +3155,37 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
+static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks)
+{
+ struct host_memory_chunk *chunk;
+ int i;
+
+ chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ chunk = &chunks->items[i];
+ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
+ i,
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
+ }
+}
+
static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
{
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
u32 len, val;
- int i;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
- config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
config.num_offload_reorder_bufs =
@@ -3019,32 +3244,8 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd *)buf->data;
- if (ar->wmi.num_mem_chunks == 0) {
- cmd->num_host_mem_chunks = 0;
- goto out;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- ar->wmi.num_mem_chunks);
-
- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
-
- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- cmd->host_mem_chunks[i].ptr =
- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
- cmd->host_mem_chunks[i].size =
- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
- cmd->host_mem_chunks[i].req_id =
- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%llx\n",
- i,
- ar->wmi.mem_chunks[i].len,
- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
- }
-out:
memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
@@ -3056,7 +3257,6 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
u32 len, val;
- int i;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
@@ -3110,32 +3310,8 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10x *)buf->data;
- if (ar->wmi.num_mem_chunks == 0) {
- cmd->num_host_mem_chunks = 0;
- goto out;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- ar->wmi.num_mem_chunks);
-
- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
-
- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- cmd->host_mem_chunks[i].ptr =
- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
- cmd->host_mem_chunks[i].size =
- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
- cmd->host_mem_chunks[i].req_id =
- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%llx\n",
- i,
- ar->wmi.mem_chunks[i].len,
- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
- }
-out:
memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
@@ -3147,7 +3323,6 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
u32 len, val;
- int i;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
@@ -3201,32 +3376,8 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
- if (ar->wmi.num_mem_chunks == 0) {
- cmd->num_host_mem_chunks = 0;
- goto out;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- ar->wmi.num_mem_chunks);
-
- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
-
- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- cmd->host_mem_chunks[i].ptr =
- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
- cmd->host_mem_chunks[i].size =
- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
- cmd->host_mem_chunks[i].req_id =
- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%llx\n",
- i,
- ar->wmi.mem_chunks[i].len,
- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
- }
-out:
memcpy(&cmd->resource_config.common, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
@@ -3248,52 +3399,50 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
return ret;
}
-static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
- const struct wmi_start_scan_arg *arg)
+static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
{
- int len;
+ if (arg->ie_len && !arg->ie)
+ return -EINVAL;
+ if (arg->n_channels && !arg->channels)
+ return -EINVAL;
+ if (arg->n_ssids && !arg->ssids)
+ return -EINVAL;
+ if (arg->n_bssids && !arg->bssids)
+ return -EINVAL;
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- len = sizeof(struct wmi_start_scan_cmd_10x);
- else
- len = sizeof(struct wmi_start_scan_cmd);
+ if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+ return -EINVAL;
+ if (arg->n_channels > ARRAY_SIZE(arg->channels))
+ return -EINVAL;
+ if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+ return -EINVAL;
+ if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+ return -EINVAL;
- if (arg->ie_len) {
- if (!arg->ie)
- return -EINVAL;
- if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
- return -EINVAL;
+ return 0;
+}
+static size_t
+ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
+{
+ int len = 0;
+
+ if (arg->ie_len) {
len += sizeof(struct wmi_ie_data);
len += roundup(arg->ie_len, 4);
}
if (arg->n_channels) {
- if (!arg->channels)
- return -EINVAL;
- if (arg->n_channels > ARRAY_SIZE(arg->channels))
- return -EINVAL;
-
len += sizeof(struct wmi_chan_list);
len += sizeof(__le32) * arg->n_channels;
}
if (arg->n_ssids) {
- if (!arg->ssids)
- return -EINVAL;
- if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
- return -EINVAL;
-
len += sizeof(struct wmi_ssid_list);
len += sizeof(struct wmi_ssid) * arg->n_ssids;
}
if (arg->n_bssids) {
- if (!arg->bssids)
- return -EINVAL;
- if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
- return -EINVAL;
-
len += sizeof(struct wmi_bssid_list);
len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
}
@@ -3301,28 +3450,12 @@ static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
return len;
}
-int ath10k_wmi_start_scan(struct ath10k *ar,
- const struct wmi_start_scan_arg *arg)
+static void
+ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg)
{
- struct wmi_start_scan_cmd *cmd;
- struct sk_buff *skb;
- struct wmi_ie_data *ie;
- struct wmi_chan_list *channels;
- struct wmi_ssid_list *ssids;
- struct wmi_bssid_list *bssids;
u32 scan_id;
u32 scan_req_id;
- int off;
- int len = 0;
- int i;
-
- len = ath10k_wmi_start_scan_calc_len(ar, arg);
- if (len < 0)
- return len; /* len contains error code here */
-
- skb = ath10k_wmi_alloc_skb(ar, len);
- if (!skb)
- return -ENOMEM;
scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
scan_id |= arg->scan_id;
@@ -3330,35 +3463,36 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
scan_req_id |= arg->scan_req_id;
- cmd = (struct wmi_start_scan_cmd *)skb->data;
- cmd->scan_id = __cpu_to_le32(scan_id);
- cmd->scan_req_id = __cpu_to_le32(scan_req_id);
- cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
- cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
- cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
- cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
- cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
- cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
- cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
- cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
- cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
- cmd->idle_time = __cpu_to_le32(arg->idle_time);
- cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
- cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
- cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
-
- /* TLV list starts after fields included in the struct */
- /* There's just one filed that differes the two start_scan
- * structures - burst_duration, which we are not using btw,
- no point to make the split here, just shift the buffer to fit with
- given FW */
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- off = sizeof(struct wmi_start_scan_cmd_10x);
- else
- off = sizeof(struct wmi_start_scan_cmd);
+ cmn->scan_id = __cpu_to_le32(scan_id);
+ cmn->scan_req_id = __cpu_to_le32(scan_req_id);
+ cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+ cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
+ cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+ cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
+ cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
+ cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
+ cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+ cmn->idle_time = __cpu_to_le32(arg->idle_time);
+ cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
+ cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
+ cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
+}
+
+static void
+ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_ie_data *ie;
+ struct wmi_chan_list *channels;
+ struct wmi_ssid_list *ssids;
+ struct wmi_bssid_list *bssids;
+ void *ptr = tlvs->tlvs;
+ int i;
if (arg->n_channels) {
- channels = (void *)skb->data + off;
+ channels = ptr;
channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
channels->num_chan = __cpu_to_le32(arg->n_channels);
@@ -3366,12 +3500,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
channels->channel_list[i].freq =
__cpu_to_le16(arg->channels[i]);
- off += sizeof(*channels);
- off += sizeof(__le32) * arg->n_channels;
+ ptr += sizeof(*channels);
+ ptr += sizeof(__le32) * arg->n_channels;
}
if (arg->n_ssids) {
- ssids = (void *)skb->data + off;
+ ssids = ptr;
ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
@@ -3383,12 +3517,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
arg->ssids[i].len);
}
- off += sizeof(*ssids);
- off += sizeof(struct wmi_ssid) * arg->n_ssids;
+ ptr += sizeof(*ssids);
+ ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
}
if (arg->n_bssids) {
- bssids = (void *)skb->data + off;
+ bssids = ptr;
bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
@@ -3397,23 +3531,57 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
arg->bssids[i].bssid,
ETH_ALEN);
- off += sizeof(*bssids);
- off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ ptr += sizeof(*bssids);
+ ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
}
if (arg->ie_len) {
- ie = (void *)skb->data + off;
+ ie = ptr;
ie->tag = __cpu_to_le32(WMI_IE_TAG);
ie->ie_len = __cpu_to_le32(arg->ie_len);
memcpy(ie->ie_data, arg->ie, arg->ie_len);
- off += sizeof(*ie);
- off += roundup(arg->ie_len, 4);
+ ptr += sizeof(*ie);
+ ptr += roundup(arg->ie_len, 4);
}
+}
- if (off != skb->len) {
- dev_kfree_skb(skb);
- return -EINVAL;
+int ath10k_wmi_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ret;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ len = sizeof(struct wmi_10x_start_scan_cmd) +
+ ath10k_wmi_start_scan_tlvs_len(arg);
+ else
+ len = sizeof(struct wmi_start_scan_cmd) +
+ ath10k_wmi_start_scan_tlvs_len(arg);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ struct wmi_10x_start_scan_cmd *cmd;
+
+ cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ } else {
+ struct wmi_start_scan_cmd *cmd;
+
+ cmd = (struct wmi_start_scan_cmd *)skb->data;
+ cmd->burst_duration_ms = __cpu_to_le32(0);
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
@@ -3532,7 +3700,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
- u32 ch_flags = 0;
if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
@@ -3559,8 +3726,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
flags |= WMI_VDEV_START_HIDDEN_SSID;
if (arg->pmf_enabled)
flags |= WMI_VDEV_START_PMF_ENABLED;
- if (arg->channel.chan_radar)
- ch_flags |= WMI_CHAN_FLAG_DFS;
cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
@@ -3576,18 +3741,7 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
}
- cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
-
- cmd->chan.band_center_freq1 =
- __cpu_to_le32(arg->channel.band_center_freq1);
-
- cmd->chan.mode = arg->channel.mode;
- cmd->chan.flags |= __cpu_to_le32(ch_flags);
- cmd->chan.min_power = arg->channel.min_power;
- cmd->chan.max_power = arg->channel.max_power;
- cmd->chan.reg_power = arg->channel.max_reg_power;
- cmd->chan.reg_classid = arg->channel.reg_class_id;
- cmd->chan.antenna_max = arg->channel.max_antenna_gain;
+ ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
@@ -3968,35 +4122,10 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
for (i = 0; i < arg->n_channels; i++) {
- u32 flags = 0;
-
ch = &arg->channels[i];
ci = &cmd->chan_info[i];
- if (ch->passive)
- flags |= WMI_CHAN_FLAG_PASSIVE;
- if (ch->allow_ibss)
- flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
- if (ch->allow_ht)
- flags |= WMI_CHAN_FLAG_ALLOW_HT;
- if (ch->allow_vht)
- flags |= WMI_CHAN_FLAG_ALLOW_VHT;
- if (ch->ht40plus)
- flags |= WMI_CHAN_FLAG_HT40_PLUS;
- if (ch->chan_radar)
- flags |= WMI_CHAN_FLAG_DFS;
-
- ci->mhz = __cpu_to_le32(ch->freq);
- ci->band_center_freq1 = __cpu_to_le32(ch->freq);
- ci->band_center_freq2 = 0;
- ci->min_power = ch->min_power;
- ci->max_power = ch->max_power;
- ci->reg_power = ch->max_reg_power;
- ci->antenna_max = ch->max_antenna_gain;
-
- /* mode & flags share storage */
- ci->mode = ch->mode;
- ci->flags |= __cpu_to_le32(flags);
+ ath10k_wmi_put_wmi_channel(ci, ch);
}
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
@@ -4108,9 +4237,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
- else
ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
+ else
+ ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
} else {
ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
}
@@ -4267,3 +4396,73 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
}
+
+int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
+{
+ struct wmi_pdev_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ ev_bitmap &= ATH10K_PKTLOG_ANY;
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi enable pktlog filter:%x\n", ev_bitmap);
+
+ cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
+ cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_pktlog_enable_cmdid);
+}
+
+int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_pktlog_disable_cmdid);
+}
+
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
+ ar->wmi.cmd = &wmi_10_2_cmd_map;
+ else
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ } else {
+ ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.vdev_param = &wmi_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_pdev_param_map;
+ }
+
+ init_completion(&ar->wmi.service_ready);
+ init_completion(&ar->wmi.unified_ready);
+
+ return 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+ int i;
+
+ /* free the host memory chunks requested by firmware */
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
+ }
+
+ ar->wmi.num_mem_chunks = 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 86f5ebccfe79..21391929d318 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -222,128 +222,131 @@ static inline char *wmi_service_name(int service_id)
#undef SVCSTR
}
-#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
- (__le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
BIT((svc_id)%(sizeof(u32))))
-#define SVCMAP(x, y) \
+#define SVCMAP(x, y, len) \
do { \
- if (WMI_SERVICE_IS_ENABLED((in), (x))) \
+ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
__set_bit(y, out); \
} while (0)
-static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out)
+static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
{
SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
- WMI_SERVICE_BEACON_OFFLOAD);
+ WMI_SERVICE_BEACON_OFFLOAD, len);
SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
- WMI_SERVICE_SCAN_OFFLOAD);
+ WMI_SERVICE_SCAN_OFFLOAD, len);
SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
- WMI_SERVICE_ROAM_OFFLOAD);
+ WMI_SERVICE_ROAM_OFFLOAD, len);
SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
- WMI_SERVICE_BCN_MISS_OFFLOAD);
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
- WMI_SERVICE_STA_PWRSAVE);
+ WMI_SERVICE_STA_PWRSAVE, len);
SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
- WMI_SERVICE_STA_ADVANCED_PWRSAVE);
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
- WMI_SERVICE_AP_UAPSD);
+ WMI_SERVICE_AP_UAPSD, len);
SVCMAP(WMI_10X_SERVICE_AP_DFS,
- WMI_SERVICE_AP_DFS);
+ WMI_SERVICE_AP_DFS, len);
SVCMAP(WMI_10X_SERVICE_11AC,
- WMI_SERVICE_11AC);
+ WMI_SERVICE_11AC, len);
SVCMAP(WMI_10X_SERVICE_BLOCKACK,
- WMI_SERVICE_BLOCKACK);
+ WMI_SERVICE_BLOCKACK, len);
SVCMAP(WMI_10X_SERVICE_PHYERR,
- WMI_SERVICE_PHYERR);
+ WMI_SERVICE_PHYERR, len);
SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
- WMI_SERVICE_BCN_FILTER);
+ WMI_SERVICE_BCN_FILTER, len);
SVCMAP(WMI_10X_SERVICE_RTT,
- WMI_SERVICE_RTT);
+ WMI_SERVICE_RTT, len);
SVCMAP(WMI_10X_SERVICE_RATECTRL,
- WMI_SERVICE_RATECTRL);
+ WMI_SERVICE_RATECTRL, len);
SVCMAP(WMI_10X_SERVICE_WOW,
- WMI_SERVICE_WOW);
+ WMI_SERVICE_WOW, len);
SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
- WMI_SERVICE_RATECTRL_CACHE);
+ WMI_SERVICE_RATECTRL_CACHE, len);
SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
- WMI_SERVICE_IRAM_TIDS);
+ WMI_SERVICE_IRAM_TIDS, len);
SVCMAP(WMI_10X_SERVICE_BURST,
- WMI_SERVICE_BURST);
+ WMI_SERVICE_BURST, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
- WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
- WMI_SERVICE_FORCE_FW_HANG);
+ WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
- WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
}
-static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out)
+static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
{
SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
- WMI_SERVICE_BEACON_OFFLOAD);
+ WMI_SERVICE_BEACON_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
- WMI_SERVICE_SCAN_OFFLOAD);
+ WMI_SERVICE_SCAN_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
- WMI_SERVICE_ROAM_OFFLOAD);
+ WMI_SERVICE_ROAM_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
- WMI_SERVICE_BCN_MISS_OFFLOAD);
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
- WMI_SERVICE_STA_PWRSAVE);
+ WMI_SERVICE_STA_PWRSAVE, len);
SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
- WMI_SERVICE_STA_ADVANCED_PWRSAVE);
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
- WMI_SERVICE_AP_UAPSD);
+ WMI_SERVICE_AP_UAPSD, len);
SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
- WMI_SERVICE_AP_DFS);
+ WMI_SERVICE_AP_DFS, len);
SVCMAP(WMI_MAIN_SERVICE_11AC,
- WMI_SERVICE_11AC);
+ WMI_SERVICE_11AC, len);
SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
- WMI_SERVICE_BLOCKACK);
+ WMI_SERVICE_BLOCKACK, len);
SVCMAP(WMI_MAIN_SERVICE_PHYERR,
- WMI_SERVICE_PHYERR);
+ WMI_SERVICE_PHYERR, len);
SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
- WMI_SERVICE_BCN_FILTER);
+ WMI_SERVICE_BCN_FILTER, len);
SVCMAP(WMI_MAIN_SERVICE_RTT,
- WMI_SERVICE_RTT);
+ WMI_SERVICE_RTT, len);
SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
- WMI_SERVICE_RATECTRL);
+ WMI_SERVICE_RATECTRL, len);
SVCMAP(WMI_MAIN_SERVICE_WOW,
- WMI_SERVICE_WOW);
+ WMI_SERVICE_WOW, len);
SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
- WMI_SERVICE_RATECTRL_CACHE);
+ WMI_SERVICE_RATECTRL_CACHE, len);
SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
- WMI_SERVICE_IRAM_TIDS);
+ WMI_SERVICE_IRAM_TIDS, len);
SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
- WMI_SERVICE_ARPNS_OFFLOAD);
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_NLO,
- WMI_SERVICE_NLO);
+ WMI_SERVICE_NLO, len);
SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
- WMI_SERVICE_GTK_OFFLOAD);
+ WMI_SERVICE_GTK_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
- WMI_SERVICE_SCAN_SCH);
+ WMI_SERVICE_SCAN_SCH, len);
SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
- WMI_SERVICE_CSA_OFFLOAD);
+ WMI_SERVICE_CSA_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_CHATTER,
- WMI_SERVICE_CHATTER);
+ WMI_SERVICE_CHATTER, len);
SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
- WMI_SERVICE_COEX_FREQAVOID);
+ WMI_SERVICE_COEX_FREQAVOID, len);
SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
- WMI_SERVICE_PACKET_POWER_SAVE);
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
- WMI_SERVICE_FORCE_FW_HANG);
+ WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_MAIN_SERVICE_GPIO,
- WMI_SERVICE_GPIO);
+ WMI_SERVICE_GPIO, len);
SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
- WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
- WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
- WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
- WMI_SERVICE_STA_KEEP_ALIVE);
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
- WMI_SERVICE_TX_ENCAP);
+ WMI_SERVICE_TX_ENCAP, len);
}
#undef SVCMAP
@@ -1428,11 +1431,11 @@ struct wmi_service_ready_event {
* where FW can access this memory directly (or) by DMA.
*/
__le32 num_mem_reqs;
- struct wlan_host_mem_req mem_reqs[1];
+ struct wlan_host_mem_req mem_reqs[0];
} __packed;
/* This is the definition from 10.X firmware branch */
-struct wmi_service_ready_event_10x {
+struct wmi_10x_service_ready_event {
__le32 sw_version;
__le32 abi_version;
@@ -1467,7 +1470,7 @@ struct wmi_service_ready_event_10x {
*/
__le32 num_mem_reqs;
- struct wlan_host_mem_req mem_reqs[1];
+ struct wlan_host_mem_req mem_reqs[0];
} __packed;
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
@@ -1883,38 +1886,26 @@ struct host_memory_chunk {
__le32 size;
} __packed;
+struct wmi_host_mem_chunks {
+ __le32 count;
+ /* some fw revisions require at least 1 chunk regardless of count */
+ struct host_memory_chunk items[1];
+} __packed;
+
struct wmi_init_cmd {
struct wmi_resource_config resource_config;
- __le32 num_host_mem_chunks;
-
- /*
- * variable number of host memory chunks.
- * This should be the last element in the structure
- */
- struct host_memory_chunk host_mem_chunks[1];
+ struct wmi_host_mem_chunks mem_chunks;
} __packed;
/* _10x stucture is from 10.X FW API */
struct wmi_init_cmd_10x {
struct wmi_resource_config_10x resource_config;
- __le32 num_host_mem_chunks;
-
- /*
- * variable number of host memory chunks.
- * This should be the last element in the structure
- */
- struct host_memory_chunk host_mem_chunks[1];
+ struct wmi_host_mem_chunks mem_chunks;
} __packed;
struct wmi_init_cmd_10_2 {
struct wmi_resource_config_10_2 resource_config;
- __le32 num_host_mem_chunks;
-
- /*
- * variable number of host memory chunks.
- * This should be the last element in the structure
- */
- struct host_memory_chunk host_mem_chunks[1];
+ struct wmi_host_mem_chunks mem_chunks;
} __packed;
struct wmi_chan_list_entry {
@@ -1964,6 +1955,11 @@ struct wmi_ssid_list {
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
/* Scan priority numbers must be sequential, starting with 0 */
enum wmi_scan_priority {
WMI_SCAN_PRIORITY_VERY_LOW = 0,
@@ -1974,7 +1970,7 @@ enum wmi_scan_priority {
WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
};
-struct wmi_start_scan_cmd {
+struct wmi_start_scan_common {
/* Scan ID */
__le32 scan_id;
/* Scan requestor ID */
@@ -2032,95 +2028,25 @@ struct wmi_start_scan_cmd {
__le32 probe_delay;
/* Scan control flags */
__le32 scan_ctrl_flags;
-
- /* Burst duration time in msecs */
- __le32 burst_duration;
- /*
- * TLV (tag length value ) paramerters follow the scan_cmd structure.
- * TLV can contain channel list, bssid list, ssid list and
- * ie. the TLV tags are defined above;
- */
} __packed;
-/* This is the definition from 10.X firmware branch */
-struct wmi_start_scan_cmd_10x {
- /* Scan ID */
- __le32 scan_id;
-
- /* Scan requestor ID */
- __le32 scan_req_id;
-
- /* VDEV id(interface) that is requesting scan */
- __le32 vdev_id;
-
- /* Scan Priority, input to scan scheduler */
- __le32 scan_priority;
-
- /* Scan events subscription */
- __le32 notify_scan_events;
-
- /* dwell time in msec on active channels */
- __le32 dwell_time_active;
-
- /* dwell time in msec on passive channels */
- __le32 dwell_time_passive;
-
- /*
- * min time in msec on the BSS channel,only valid if atleast one
- * VDEV is active
- */
- __le32 min_rest_time;
-
- /*
- * max rest time in msec on the BSS channel,only valid if at least
- * one VDEV is active
- */
- /*
- * the scanner will rest on the bss channel at least min_rest_time
- * after min_rest_time the scanner will start checking for tx/rx
- * activity on all VDEVs. if there is no activity the scanner will
- * switch to off channel. if there is activity the scanner will let
- * the radio on the bss channel until max_rest_time expires.at
- * max_rest_time scanner will switch to off channel irrespective of
- * activity. activity is determined by the idle_time parameter.
- */
- __le32 max_rest_time;
-
- /*
- * time before sending next set of probe requests.
- * The scanner keeps repeating probe requests transmission with
- * period specified by repeat_probe_time.
- * The number of probe requests specified depends on the ssid_list
- * and bssid_list
- */
- __le32 repeat_probe_time;
-
- /* time in msec between 2 consequetive probe requests with in a set. */
- __le32 probe_spacing_time;
-
- /*
- * data inactivity time in msec on bss channel that will be used by
- * scanner for measuring the inactivity.
- */
- __le32 idle_time;
-
- /* maximum time in msec allowed for scan */
- __le32 max_scan_time;
-
- /*
- * delay in msec before sending first probe request after switching
- * to a channel
+struct wmi_start_scan_tlvs {
+ /* TLV parameters. These includes channel list, ssid list, bssid list,
+ * extra ies.
*/
- __le32 probe_delay;
+ u8 tlvs[0];
+} __packed;
- /* Scan control flags */
- __le32 scan_ctrl_flags;
+struct wmi_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ struct wmi_start_scan_tlvs tlvs;
+} __packed;
- /*
- * TLV (tag length value ) paramerters follow the scan_cmd structure.
- * TLV can contain channel list, bssid list, ssid list and
- * ie. the TLV tags are defined above;
- */
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ struct wmi_start_scan_tlvs tlvs;
} __packed;
struct wmi_ssid_arg {
@@ -2306,94 +2232,25 @@ struct wmi_mgmt_rx_event_v2 {
#define PHY_ERROR_FALSE_RADAR_EXT 0x24
#define PHY_ERROR_RADAR 0x05
-struct wmi_single_phyerr_rx_hdr {
- /* TSF timestamp */
+struct wmi_phyerr {
__le32 tsf_timestamp;
-
- /*
- * Current freq1, freq2
- *
- * [7:0]: freq1[lo]
- * [15:8] : freq1[hi]
- * [23:16]: freq2[lo]
- * [31:24]: freq2[hi]
- */
__le16 freq1;
__le16 freq2;
-
- /*
- * Combined RSSI over all chains and channel width for this PHY error
- *
- * [7:0]: RSSI combined
- * [15:8]: Channel width (MHz)
- * [23:16]: PHY error code
- * [24:16]: reserved (future use)
- */
u8 rssi_combined;
u8 chan_width_mhz;
u8 phy_err_code;
u8 rsvd0;
-
- /*
- * RSSI on chain 0 through 3
- *
- * This is formatted the same as the PPDU_START RX descriptor
- * field:
- *
- * [7:0]: pri20
- * [15:8]: sec20
- * [23:16]: sec40
- * [31:24]: sec80
- */
-
- __le32 rssi_chain0;
- __le32 rssi_chain1;
- __le32 rssi_chain2;
- __le32 rssi_chain3;
-
- /*
- * Last calibrated NF value for chain 0 through 3
- *
- * nf_list_1:
- *
- * + [15:0] - chain 0
- * + [31:16] - chain 1
- *
- * nf_list_2:
- *
- * + [15:0] - chain 2
- * + [31:16] - chain 3
- */
- __le32 nf_list_1;
- __le32 nf_list_2;
-
- /* Length of the frame */
+ __le32 rssi_chains[4];
+ __le16 nf_chains[4];
__le32 buf_len;
+ u8 buf[0];
} __packed;
-struct wmi_single_phyerr_rx_event {
- /* Phy error event header */
- struct wmi_single_phyerr_rx_hdr hdr;
- /* frame buffer */
- u8 bufp[0];
-} __packed;
-
-struct wmi_comb_phyerr_rx_hdr {
- /* Phy error phy error count */
- __le32 num_phyerr_events;
+struct wmi_phyerr_event {
+ __le32 num_phyerrs;
__le32 tsf_l32;
__le32 tsf_u32;
-} __packed;
-
-struct wmi_comb_phyerr_rx_event {
- /* Phy error phy error count */
- struct wmi_comb_phyerr_rx_hdr hdr;
- /*
- * frame buffer - contains multiple payloads in the order:
- * header - payload, header - payload...
- * (The header is of type: wmi_single_phyerr_rx_hdr)
- */
- u8 bufp[0];
+ struct wmi_phyerr phyerrs[0];
} __packed;
#define PHYERR_TLV_SIG 0xBB
@@ -2908,11 +2765,6 @@ enum wmi_tp_scale {
WMI_TP_SCALE_SIZE = 5, /* max num of enum */
};
-struct wmi_set_channel_cmd {
- /* channel (only frequency and mode info are used) */
- struct wmi_channel chan;
-} __packed;
-
struct wmi_pdev_chanlist_update_event {
/* number of channels */
__le32 num_chan;
@@ -2943,6 +2795,10 @@ struct wmi_pdev_set_channel_cmd {
struct wmi_channel chan;
} __packed;
+struct wmi_pdev_pktlog_enable_cmd {
+ __le32 ev_bitmap;
+} __packed;
+
/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
#define WMI_DSCP_MAP_MAX (64)
struct wmi_pdev_set_dscp_tid_map_cmd {
@@ -3177,7 +3033,7 @@ struct wmi_stats_event {
* PDEV statistics
* TODO: add all PDEV stats here
*/
-struct wmi_pdev_stats_old {
+struct wmi_pdev_stats {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
@@ -3188,15 +3044,8 @@ struct wmi_pdev_stats_old {
struct wal_dbg_stats wal; /* WAL dbg stats */
} __packed;
-struct wmi_pdev_stats_10x {
- __le32 chan_nf; /* Channel noise floor */
- __le32 tx_frame_count; /* TX frame count */
- __le32 rx_frame_count; /* RX frame count */
- __le32 rx_clear_count; /* rx clear count */
- __le32 cycle_count; /* cycle count */
- __le32 phy_err_count; /* Phy error count */
- __le32 chan_tx_pwr; /* channel tx power */
- struct wal_dbg_stats wal; /* WAL dbg stats */
+struct wmi_10x_pdev_stats {
+ struct wmi_pdev_stats old;
__le32 ack_rx_bad;
__le32 rts_bad;
__le32 rts_good;
@@ -3217,16 +3066,14 @@ struct wmi_vdev_stats {
* peer statistics.
* TODO: add more stats
*/
-struct wmi_peer_stats_old {
+struct wmi_peer_stats {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
} __packed;
-struct wmi_peer_stats_10x {
- struct wmi_mac_addr peer_macaddr;
- __le32 peer_rssi;
- __le32 peer_tx_rate;
+struct wmi_10x_peer_stats {
+ struct wmi_peer_stats old;
__le32 peer_rx_rate;
} __packed;
@@ -4708,7 +4555,6 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
-#define ATH10K_RTS_MAX 2347
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -4719,8 +4565,27 @@ struct wmi_dbglog_cfg_cmd {
/* By default disable power save for IBSS */
#define ATH10K_DEFAULT_ATIM 0
+#define WMI_MAX_MEM_REQS 16
+
+struct wmi_svc_rdy_ev_arg {
+ __le32 min_tx_power;
+ __le32 max_tx_power;
+ __le32 ht_cap;
+ __le32 vht_cap;
+ __le32 sw_ver0;
+ __le32 sw_ver1;
+ __le32 phy_capab;
+ __le32 num_rf_chains;
+ __le32 eeprom_rd;
+ __le32 num_mem_reqs;
+ const __le32 *service_map;
+ size_t service_map_len;
+ const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
+};
+
struct ath10k;
struct ath10k_vif;
+struct ath10k_fw_stats;
int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
@@ -4732,8 +4597,6 @@ int ath10k_wmi_connect(struct ath10k *ar);
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
-int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
- const struct wmi_channel_arg *);
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
@@ -4794,5 +4657,9 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
+int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats);
+int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_list);
+int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 93caf8e68901..2399a3921762 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,12 +1,13 @@
config ATH5K
tristate "Atheros 5xxx wireless cards support"
- depends on PCI && MAC80211
+ depends on (PCI || ATH25) && MAC80211
select ATH_COMMON
select MAC80211_LEDS
select LEDS_CLASS
select NEW_LEDS
select AVERAGE
- select ATH5K_PCI
+ select ATH5K_AHB if ATH25
+ select ATH5K_PCI if !ATH25
---help---
This module adds support for wireless adapters based on
Atheros 5xxx chipset.
@@ -51,9 +52,16 @@ config ATH5K_TRACER
If unsure, say N.
+config ATH5K_AHB
+ bool "Atheros 5xxx AHB bus support"
+ depends on ATH25
+ ---help---
+ This adds support for WiSoC type chipsets of the 5xxx Atheros
+ family.
+
config ATH5K_PCI
bool "Atheros 5xxx PCI bus support"
- depends on PCI
+ depends on (!ATH25 && PCI)
---help---
This adds support for PCI type chipsets of the 5xxx Atheros
family.
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 51e2d8668041..1b3a34f7f224 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -17,5 +17,6 @@ ath5k-y += ani.o
ath5k-y += sysfs.o
ath5k-y += mac80211-ops.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
+ath5k-$(CONFIG_ATH5K_AHB) += ahb.o
ath5k-$(CONFIG_ATH5K_PCI) += pci.o
obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
new file mode 100644
index 000000000000..8f387cf67340
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/nl80211.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/export.h>
+#include <ath25_platform.h>
+#include "ath5k.h"
+#include "debug.h"
+#include "base.h"
+#include "reg.h"
+
+/* return bus cachesize in 4B word units */
+static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool
+ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath5k_hw *ah = common->priv;
+ struct platform_device *pdev = to_platform_device(ah->dev);
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ u16 *eeprom, *eeprom_end;
+
+ eeprom = (u16 *) bcfg->radio;
+ eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
+
+ eeprom += off;
+ if (eeprom > eeprom_end)
+ return false;
+
+ *data = *eeprom;
+ return true;
+}
+
+int ath5k_hw_read_srev(struct ath5k_hw *ah)
+{
+ struct platform_device *pdev = to_platform_device(ah->dev);
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ ah->ah_mac_srev = bcfg->devid;
+ return 0;
+}
+
+static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
+{
+ struct platform_device *pdev = to_platform_device(ah->dev);
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ u8 *cfg_mac;
+
+ if (to_platform_device(ah->dev)->id == 0)
+ cfg_mac = bcfg->config->wlan0_mac;
+ else
+ cfg_mac = bcfg->config->wlan1_mac;
+
+ memcpy(mac, cfg_mac, ETH_ALEN);
+ return 0;
+}
+
+static const struct ath_bus_ops ath_ahb_bus_ops = {
+ .ath_bus_type = ATH_AHB,
+ .read_cachesize = ath5k_ahb_read_cachesize,
+ .eeprom_read = ath5k_ahb_eeprom_read,
+ .eeprom_read_mac = ath5k_ahb_eeprom_read_mac,
+};
+
+/*Initialization*/
+static int ath_ahb_probe(struct platform_device *pdev)
+{
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ struct ath5k_hw *ah;
+ struct ieee80211_hw *hw;
+ struct resource *res;
+ void __iomem *mem;
+ int irq;
+ int ret = 0;
+ u32 reg;
+
+ if (!dev_get_platdata(&pdev->dev)) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource found\n");
+ ret = -ENXIO;
+ goto err_out;
+ }
+
+ mem = ioremap_nocache(res->start, resource_size(res));
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
+ ret = -ENXIO;
+ goto err_iounmap;
+ }
+
+ irq = res->start;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
+ if (hw == NULL) {
+ dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ ah = hw->priv;
+ ah->hw = hw;
+ ah->dev = &pdev->dev;
+ ah->iobase = mem;
+ ah->irq = irq;
+ ah->devid = bcfg->devid;
+
+ if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
+ /* Enable WMAC AHB arbitration */
+ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN;
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+
+ /* Enable global WMAC swapping */
+ reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP);
+ reg |= AR5K_AR2315_BYTESWAP_WMAC;
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP);
+ } else {
+ /* Enable WMAC DMA access (assuming 5312 or 231x*/
+ /* TODO: check other platforms */
+ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
+ if (to_platform_device(ah->dev)->id == 0)
+ reg |= AR5K_AR5312_ENABLE_WLAN0;
+ else
+ reg |= AR5K_AR5312_ENABLE_WLAN1;
+ iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
+
+ /*
+ * On a dual-band AR5312, the multiband radio is only
+ * used as pass-through. Disable 2 GHz support in the
+ * driver for it
+ */
+ if (to_platform_device(ah->dev)->id == 0 &&
+ (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
+ (BD_WLAN1 | BD_WLAN0))
+ ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+ else
+ ah->ah_capabilities.cap_needs_2GHz_ovr = false;
+ }
+
+ ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
+ ret = -ENODEV;
+ goto err_free_hw;
+ }
+
+ platform_set_drvdata(pdev, hw);
+
+ return 0;
+
+ err_free_hw:
+ ieee80211_free_hw(hw);
+ err_iounmap:
+ iounmap(mem);
+ err_out:
+ return ret;
+}
+
+static int ath_ahb_remove(struct platform_device *pdev)
+{
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct ath5k_hw *ah;
+ u32 reg;
+
+ if (!hw)
+ return 0;
+
+ ah = hw->priv;
+
+ if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
+ /* Disable WMAC AHB arbitration */
+ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN;
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ } else {
+ /*Stop DMA access */
+ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
+ if (to_platform_device(ah->dev)->id == 0)
+ reg &= ~AR5K_AR5312_ENABLE_WLAN0;
+ else
+ reg &= ~AR5K_AR5312_ENABLE_WLAN1;
+ iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
+ }
+
+ ath5k_deinit_ah(ah);
+ iounmap(ah->iobase);
+ ieee80211_free_hw(hw);
+
+ return 0;
+}
+
+static struct platform_driver ath_ahb_driver = {
+ .probe = ath_ahb_probe,
+ .remove = ath_ahb_remove,
+ .driver = {
+ .name = "ar231x-wmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ath_ahb_driver);
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ed2468220216..1ed7a88aeea9 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1647,6 +1647,32 @@ static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
return &(ath5k_hw_common(ah)->regulatory);
}
+#ifdef CONFIG_ATH5K_AHB
+#define AR5K_AR2315_PCI_BASE ((void __iomem *)0xb0100000)
+
+static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
+{
+ /* On AR2315 and AR2317 the PCI clock domain registers
+ * are outside of the WMAC register space */
+ if (unlikely((reg >= 0x4000) && (reg < 0x5000) &&
+ (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
+ return AR5K_AR2315_PCI_BASE + reg;
+
+ return ah->iobase + reg;
+}
+
+static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
+{
+ return ioread32(ath5k_ahb_reg(ah, reg));
+}
+
+static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
+{
+ iowrite32(val, ath5k_ahb_reg(ah, reg));
+}
+
+#else
+
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
{
return ioread32(ah->iobase + reg);
@@ -1657,6 +1683,8 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
iowrite32(val, ah->iobase + reg);
}
+#endif
+
static inline enum ath_bus_type ath5k_get_bus_type(struct ath5k_hw *ah)
{
return ath5k_hw_common(ah)->bus_ops->ath_bus_type;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a4a09bb8f2f3..bc9cb356fa69 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -99,6 +99,15 @@ static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
/* Known SREVs */
static const struct ath5k_srev_name srev_names[] = {
+#ifdef CONFIG_ATH5K_AHB
+ { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
+ { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
+ { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
+ { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
+ { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
+ { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
+ { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
+#else
{ "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
{ "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
{ "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
@@ -117,6 +126,7 @@ static const struct ath5k_srev_name srev_names[] = {
{ "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
{ "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
{ "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
+#endif
{ "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
{ "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
{ "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
@@ -132,6 +142,10 @@ static const struct ath5k_srev_name srev_names[] = {
{ "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
{ "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
{ "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
+#ifdef CONFIG_ATH5K_AHB
+ { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
+ { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
+#endif
{ "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
};
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 0beb7e7d6075..ca4b7ccd697f 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -163,14 +163,20 @@ int ath5k_init_leds(struct ath5k_hw *ah)
{
int ret = 0;
struct ieee80211_hw *hw = ah->hw;
+#ifndef CONFIG_ATH5K_AHB
struct pci_dev *pdev = ah->pdev;
+#endif
char name[ATH5K_LED_MAX_NAME_LEN + 1];
const struct pci_device_id *match;
if (!ah->pdev)
return 0;
+#ifdef CONFIG_ATH5K_AHB
+ match = NULL;
+#else
match = pci_match_id(&ath5k_led_devices[0], pdev);
+#endif
if (match) {
__set_bit(ATH_STAT_LEDSOFT, ah->status);
ah->led_pin = ATH_PIN(match->driver_data);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index ab2709a43768..19eab2a69ad5 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -547,7 +547,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static void
-ath5k_sw_scan_start(struct ieee80211_hw *hw)
+ath5k_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct ath5k_hw *ah = hw->priv;
if (!ah->assoc)
@@ -556,7 +558,7 @@ ath5k_sw_scan_start(struct ieee80211_hw *hw)
static void
-ath5k_sw_scan_complete(struct ieee80211_hw *hw)
+ath5k_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
ath5k_hw_set_ledstate(ah, ah->assoc ?
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 0583c69d26db..ddaad712c59a 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
} else {
switch (queue_type) {
case AR5K_TX_QUEUE_DATA:
- for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
- ah->ah_txq[queue].tqi_type !=
- AR5K_TX_QUEUE_INACTIVE; queue++) {
-
- if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
- return -EINVAL;
- }
+ queue = queue_info->tqi_subtype;
break;
case AR5K_TX_QUEUE_UAPSD:
queue = AR5K_TX_QUEUE_ID_UAPSD;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index ba60e37213eb..7a5337877a0c 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2976,11 +2976,11 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac)
+ struct station_del_parameters *params)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
- const u8 *addr = mac ? mac : bcast_addr;
+ const u8 *addr = params->mac ? params->mac : bcast_addr;
return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH,
addr, WLAN_REASON_PREV_AUTH_NOT_VALID);
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index 05debf700a84..4f82e8632d37 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,7 +22,7 @@
#define ATH6KL_MAX_IE 256
-__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...);
/*
* Reflects the version of binary interface exposed by ATH6KL target
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 55c4064dd506..81ba48d2938b 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -37,76 +37,64 @@ struct ath6kl_fwlog_slot {
#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
-int ath6kl_printk(const char *level, const char *fmt, ...)
+void ath6kl_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int rtn;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- rtn = printk("%sath6kl: %pV", level, &vaf);
+ printk("%sath6kl: %pV", level, &vaf);
va_end(args);
-
- return rtn;
}
EXPORT_SYMBOL(ath6kl_printk);
-int ath6kl_info(const char *fmt, ...)
+void ath6kl_info(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = ath6kl_printk(KERN_INFO, "%pV", &vaf);
+ ath6kl_printk(KERN_INFO, "%pV", &vaf);
trace_ath6kl_log_info(&vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath6kl_info);
-int ath6kl_err(const char *fmt, ...)
+void ath6kl_err(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = ath6kl_printk(KERN_ERR, "%pV", &vaf);
+ ath6kl_printk(KERN_ERR, "%pV", &vaf);
trace_ath6kl_log_err(&vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath6kl_err);
-int ath6kl_warn(const char *fmt, ...)
+void ath6kl_warn(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = ath6kl_printk(KERN_WARNING, "%pV", &vaf);
+ ath6kl_printk(KERN_WARNING, "%pV", &vaf);
trace_ath6kl_log_warn(&vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath6kl_warn);
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index e194c10d9f00..19106ed28961 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -50,10 +50,10 @@ enum ATH6K_DEBUG_MASK {
};
extern unsigned int debug_mask;
-__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
-__printf(1, 2) int ath6kl_info(const char *fmt, ...);
-__printf(1, 2) int ath6kl_err(const char *fmt, ...);
-__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) void ath6kl_info(const char *fmt, ...);
+__printf(1, 2) void ath6kl_err(const char *fmt, ...);
+__printf(1, 2) void ath6kl_warn(const char *fmt, ...);
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
@@ -81,10 +81,9 @@ int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
#else
-static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
- const char *fmt, ...)
+static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
+ const char *fmt, ...)
{
- return 0;
}
static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index a6a5e40b3e98..9da3594fd010 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -1193,18 +1193,10 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface)
return 0;
}
-static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf)
-{
- if (usb_get_intfdata(intf))
- ath6kl_usb_remove(intf);
- return 0;
-}
-
#else
#define ath6kl_usb_pm_suspend NULL
#define ath6kl_usb_pm_resume NULL
-#define ath6kl_usb_pm_reset_resume NULL
#endif
@@ -1222,7 +1214,6 @@ static struct usb_driver ath6kl_usb_driver = {
.probe = ath6kl_usb_probe,
.suspend = ath6kl_usb_pm_suspend,
.resume = ath6kl_usb_pm_resume,
- .reset_resume = ath6kl_usb_pm_reset_resume,
.disconnect = ath6kl_usb_remove,
.id_table = ath6kl_usb_ids,
.supports_autosuspend = true,
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 896e63281b3b..fee0cadb0f5e 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -3,6 +3,8 @@ config ATH9K_HW
config ATH9K_COMMON
tristate
select ATH_COMMON
+ select DEBUG_FS
+ select RELAY
config ATH9K_DFS_DEBUGFS
def_bool y
depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
@@ -148,6 +150,11 @@ config ATH9K_CHANNEL_CONTEXT
for multi-channel concurrency. Enable this if P2P PowerSave support
is required.
+config ATH9K_PCOEM
+ bool "Atheros ath9k support for PC OEM cards" if EXPERT
+ depends on ATH9K
+ default y
+
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 73704c1be736..473972288a84 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -16,8 +16,7 @@ ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
ath9k-$(CONFIG_ATH9K_TX99) += tx99.o
ath9k-$(CONFIG_ATH9K_WOW) += wow.o
-ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o \
- spectral.o
+ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_STATION_STATISTICS) += debug_sta.o
@@ -32,7 +31,6 @@ ath9k_hw-y:= \
ar5008_phy.o \
ar9002_calib.o \
ar9003_calib.o \
- ar9003_rtt.o \
calib.o \
eeprom.o \
eeprom_def.o \
@@ -50,6 +48,8 @@ ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
ar9003_mci.o
+ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o
+
ath9k_hw-$(CONFIG_ATH9K_DYNACK) += dynack.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
@@ -58,7 +58,8 @@ obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o \
common-init.o \
common-beacon.o \
- common-debug.o
+ common-debug.o \
+ common-spectral.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 4173838f4684..e000c4c27881 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -175,7 +175,6 @@ static struct platform_driver ath_ahb_driver = {
.remove = ath_ahb_remove,
.driver = {
.name = "ath9k",
- .owner = THIS_MODULE,
},
.id_table = ath9k_platform_id_table,
};
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index b72d0be716db..5829074208fa 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1190,7 +1190,7 @@ static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
static void ar5008_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
- u32 radar_0 = 0, radar_1 = 0;
+ u32 radar_0 = 0, radar_1;
if (!conf) {
REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
@@ -1204,6 +1204,9 @@ static void ar5008_hw_set_radar_params(struct ath_hw *ah,
radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
+ radar_1 = REG_READ(ah, AR_PHY_RADAR_1);
+ radar_1 &= ~(AR_PHY_RADAR_1_MAXLEN | AR_PHY_RADAR_1_RELSTEP_THRESH |
+ AR_PHY_RADAR_1_RELPWR_THRESH);
radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
@@ -1225,7 +1228,7 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
conf->fir_power = -33;
conf->radar_rssi = 20;
conf->pulse_height = 10;
- conf->pulse_rssi = 24;
+ conf->pulse_rssi = 15;
conf->pulse_inband = 15;
conf->pulse_maxlen = 255;
conf->pulse_inband_step = 12;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index cdc74005650c..42190b67c671 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -657,31 +657,29 @@ static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
ar9280_hw_olc_temp_compensation(ah);
}
-static bool ar9002_hw_calibrate(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u8 rxchainmask,
- bool longcal)
+static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
{
- bool iscaldone = true;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
- bool nfcal, nfcal_pending = false;
+ bool nfcal, nfcal_pending = false, percal_pending;
+ int ret;
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
- if (currCal && !nfcal &&
- (currCal->calState == CAL_RUNNING ||
- currCal->calState == CAL_WAITING)) {
- iscaldone = ar9002_hw_per_calibration(ah, chan,
- rxchainmask, currCal);
- if (iscaldone) {
- ah->cal_list_curr = currCal = currCal->calNext;
-
- if (currCal->calState == CAL_WAITING) {
- iscaldone = false;
- ath9k_hw_reset_calibration(ah, currCal);
- }
+ percal_pending = (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING));
+
+ if (percal_pending && !nfcal) {
+ if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal))
+ return 0;
+
+ ah->cal_list_curr = currCal = currCal->calNext;
+ if (currCal->calState == CAL_WAITING) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ return 0;
}
}
@@ -698,7 +696,9 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
* NF is slow time-variant, so it is OK to use a
* historical value.
*/
- ath9k_hw_loadnf(ah, ah->curchan);
+ ret = ath9k_hw_loadnf(ah, ah->curchan);
+ if (ret < 0)
+ return ret;
}
if (longcal) {
@@ -709,7 +709,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
}
}
- return iscaldone;
+ return !percal_pending;
}
/* Carrier leakage Calibration fix */
@@ -856,6 +856,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
/* Do PA Calibration */
ar9002_hw_pa_cal(ah, true);
+ ath9k_hw_loadnf(ah, chan);
+ ath9k_hw_start_nfcal(ah, true);
if (ah->caldata)
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 2a93519f4bdf..f816909d9474 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -281,7 +281,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(i->txpower, AR_XmitPower0)
+ | SM(i->txpower[0], AR_XmitPower0)
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
| (i->flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
@@ -307,9 +307,9 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
| set11nRateFlags(i->rates, 3)
| SM(i->rtscts_rate, AR_RTSCTSRate);
- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
+ ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
+ ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
+ ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
}
static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 9a2afa2c690b..fc08162b5820 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -643,9 +643,12 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
* and fix otherwise.
*/
count = param->count;
- if (param->endless)
- count = 0x80;
- else if (count & 0x80)
+ if (param->endless) {
+ if (AR_SREV_9271(ah))
+ count = 0;
+ else
+ count = 0x80;
+ } else if (count & 0x80)
count = 0x7f;
REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index ac8301ef5242..06ab71db6e80 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -121,13 +121,12 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
return iscaldone;
}
-static bool ar9003_hw_calibrate(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u8 rxchainmask,
- bool longcal)
+static int ar9003_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
{
bool iscaldone = true;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
+ int ret;
/*
* For given calibration:
@@ -163,7 +162,9 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah,
* NF is slow time-variant, so it is OK to use a historical
* value.
*/
- ath9k_hw_loadnf(ah, ah->curchan);
+ ret = ath9k_hw_loadnf(ah, ah->curchan);
+ if (ret < 0)
+ return ret;
/* start NF calibration, without updating BB NF register */
ath9k_hw_start_nfcal(ah, false);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 80c6eacbda53..08225a0067c2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4079,27 +4079,28 @@ static int ar9003_hw_get_thermometer(struct ath_hw *ah)
static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
int thermometer = ar9003_hw_get_thermometer(ah);
u8 therm_on = (thermometer < 0) ? 0 : 1;
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
- if (ah->caps.tx_chainmask & BIT(1))
+ if (pCap->chip_chainmask & BIT(1))
REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
- if (ah->caps.tx_chainmask & BIT(2))
+ if (pCap->chip_chainmask & BIT(2))
REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
- if (ah->caps.tx_chainmask & BIT(1)) {
+ if (pCap->chip_chainmask & BIT(1)) {
therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
}
- if (ah->caps.tx_chainmask & BIT(2)) {
+ if (pCap->chip_chainmask & BIT(2)) {
therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
@@ -4376,6 +4377,25 @@ static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
targetPowerArray, numPiers);
}
+static void ar9003_hw_selfgen_tpc_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *pwr_array)
+{
+ u32 val;
+
+ /* target power values for self generated frames (ACK,RTS/CTS) */
+ if (IS_CHAN_2GHZ(chan)) {
+ val = SM(pwr_array[ALL_TARGET_LEGACY_1L_5L], AR_TPC_ACK) |
+ SM(pwr_array[ALL_TARGET_LEGACY_1L_5L], AR_TPC_CTS) |
+ SM(0x3f, AR_TPC_CHIRP) | SM(0x3f, AR_TPC_RPT);
+ } else {
+ val = SM(pwr_array[ALL_TARGET_LEGACY_6_24], AR_TPC_ACK) |
+ SM(pwr_array[ALL_TARGET_LEGACY_6_24], AR_TPC_CTS) |
+ SM(0x3f, AR_TPC_CHIRP) | SM(0x3f, AR_TPC_RPT);
+ }
+ REG_WRITE(ah, AR_TPC, val);
+}
+
/* Set tx power registers to array of values passed in */
static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
{
@@ -5311,6 +5331,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
struct ar9300_modal_eep_header *modal_hdr;
u8 targetPowerValT2[ar9300RateSize];
u8 target_power_val_t2_eep[ar9300RateSize];
+ u8 targetPowerValT2_tpc[ar9300RateSize];
unsigned int i = 0, paprd_scale_factor = 0;
u8 pwr_idx, min_pwridx = 0;
@@ -5362,6 +5383,9 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
twiceAntennaReduction,
powerLimit);
+ memcpy(targetPowerValT2_tpc, targetPowerValT2,
+ sizeof(targetPowerValT2));
+
if (ar9003_is_paprd_enabled(ah)) {
for (i = 0; i < ar9300RateSize; i++) {
if ((ah->paprd_ratemask & (1 << i)) &&
@@ -5395,6 +5419,30 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
ar9003_hw_calibration_apply(ah, chan->channel);
ar9003_paprd_set_txpower(ah, chan, targetPowerValT2);
+
+ ar9003_hw_selfgen_tpc_txpower(ah, chan, targetPowerValT2);
+
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ u32 val;
+
+ ar9003_hw_init_rate_txpower(ah, targetPowerValT2_tpc, chan);
+
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_PWRTX_MAX,
+ AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ /* Disable per chain power reduction */
+ val = REG_READ(ah, AR_PHY_POWER_TX_SUB);
+ if (AR_SREV_9340(ah))
+ REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
+ val & 0xFFFFFFC0);
+ else
+ REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
+ val & 0xFFFFF000);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_PWRTX_MAX, 0);
+ }
}
static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index ddef9eedbac6..06ad2172030e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -333,12 +333,29 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
qca953x_1p0_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
qca953x_1p0_soc_postamble);
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- qca953x_1p0_common_wo_xlna_rx_gain_table);
- INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
- qca953x_1p0_common_wo_xlna_rx_gain_bounds);
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- qca953x_1p0_modes_no_xpa_tx_gain_table);
+
+ if (AR_SREV_9531_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_2p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_2p0_common_wo_xlna_rx_gain_bounds);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+ }
+
+ if (AR_SREV_9531_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_2p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_no_xpa_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_no_xpa_tx_gain_table);
+
INIT_INI_ARRAY(&ah->iniModesFastClock,
qca953x_1p0_modes_fast_clock);
} else if (AR_SREV_9580(ah)) {
@@ -518,9 +535,15 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_xpa_tx_gain_table);
- else if (AR_SREV_9531(ah))
+ else if (AR_SREV_9531_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- qca953x_1p0_modes_xpa_tx_gain_table);
+ qca953x_2p0_modes_xpa_tx_gain_table);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_lowest_ob_db_tx_gain_table);
@@ -562,7 +585,10 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_no_xpa_tx_gain_table);
else if (AR_SREV_9531(ah)) {
- if (AR_SREV_9531_11(ah))
+ if (AR_SREV_9531_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_2p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
qca953x_1p1_modes_no_xpa_tx_gain_table);
else
@@ -670,9 +696,6 @@ static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_green_ob_db_tx_gain_1_1);
- else if (AR_SREV_9340(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_ub124_tx_gain_table_1p0);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_type5_tx_gain_table);
@@ -792,11 +815,16 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
ar955x_1p0_common_wo_xlna_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_wo_xlna_rx_gain_bounds);
- } else if (AR_SREV_9531(ah)) {
+ } else if (AR_SREV_9531_10(ah) || AR_SREV_9531_11(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
qca953x_1p0_common_wo_xlna_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9531_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_2p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_2p0_common_wo_xlna_rx_gain_bounds);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_wo_xlna_rx_gain_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 057b1657c428..da84b705cbcd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -101,7 +101,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(i->txpower, AR_XmitPower0)
+ | SM(i->txpower[0], AR_XmitPower0)
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
| (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0)
@@ -152,9 +152,9 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
- ACCESS_ONCE(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
- ACCESS_ONCE(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
- ACCESS_ONCE(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
+ ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
+ ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
+ ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
}
static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1e8ea5e4d4ca..ae6cde273414 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -18,6 +18,21 @@
#include "hw.h"
#include "ar9003_phy.h"
+#define AR9300_OFDM_RATES 8
+#define AR9300_HT_SS_RATES 8
+#define AR9300_HT_DS_RATES 8
+#define AR9300_HT_TS_RATES 8
+
+#define AR9300_11NA_OFDM_SHIFT 0
+#define AR9300_11NA_HT_SS_SHIFT 8
+#define AR9300_11NA_HT_DS_SHIFT 16
+#define AR9300_11NA_HT_TS_SHIFT 24
+
+#define AR9300_11NG_OFDM_SHIFT 4
+#define AR9300_11NG_HT_SS_SHIFT 12
+#define AR9300_11NG_HT_DS_SHIFT 20
+#define AR9300_11NG_HT_TS_SHIFT 28
+
static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
@@ -40,6 +55,71 @@ static const int m2ThreshLowExt_off = 127;
static const int m1ThreshExt_off = 127;
static const int m2ThreshExt_off = 127;
+static const u8 ofdm2pwr[] = {
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_36,
+ ALL_TARGET_LEGACY_48,
+ ALL_TARGET_LEGACY_54
+};
+
+static const u8 mcs2pwr_ht20[] = {
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_4,
+ ALL_TARGET_HT20_5,
+ ALL_TARGET_HT20_6,
+ ALL_TARGET_HT20_7,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_12,
+ ALL_TARGET_HT20_13,
+ ALL_TARGET_HT20_14,
+ ALL_TARGET_HT20_15,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_20,
+ ALL_TARGET_HT20_21,
+ ALL_TARGET_HT20_22,
+ ALL_TARGET_HT20_23
+};
+
+static const u8 mcs2pwr_ht40[] = {
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_4,
+ ALL_TARGET_HT40_5,
+ ALL_TARGET_HT40_6,
+ ALL_TARGET_HT40_7,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_12,
+ ALL_TARGET_HT40_13,
+ ALL_TARGET_HT40_14,
+ ALL_TARGET_HT40_15,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_20,
+ ALL_TARGET_HT40_21,
+ ALL_TARGET_HT40_22,
+ ALL_TARGET_HT40_23,
+};
+
/**
* ar9003_hw_set_channel - set channel on single-chip device
* @ah: atheros hardware structure
@@ -1361,7 +1441,7 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
unsigned int regWrites = 0;
- u32 radar_0 = 0, radar_1 = 0;
+ u32 radar_0 = 0, radar_1;
if (!conf) {
REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
@@ -1375,6 +1455,9 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah,
radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
+ radar_1 = REG_READ(ah, AR_PHY_RADAR_1);
+ radar_1 &= ~(AR_PHY_RADAR_1_MAXLEN | AR_PHY_RADAR_1_RELSTEP_THRESH |
+ AR_PHY_RADAR_1_RELPWR_THRESH);
radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
@@ -1401,7 +1484,7 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
conf->fir_power = -28;
conf->radar_rssi = 0;
conf->pulse_height = 10;
- conf->pulse_rssi = 24;
+ conf->pulse_rssi = 15;
conf->pulse_inband = 8;
conf->pulse_maxlen = 255;
conf->pulse_inband_step = 12;
@@ -1796,6 +1879,100 @@ static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
}
+static void ar9003_hw_init_txpower_cck(struct ath_hw *ah, u8 *rate_array)
+{
+ ah->tx_power[0] = rate_array[ALL_TARGET_LEGACY_1L_5L];
+ ah->tx_power[1] = rate_array[ALL_TARGET_LEGACY_1L_5L];
+ ah->tx_power[2] = min(rate_array[ALL_TARGET_LEGACY_1L_5L],
+ rate_array[ALL_TARGET_LEGACY_5S]);
+ ah->tx_power[3] = min(rate_array[ALL_TARGET_LEGACY_11L],
+ rate_array[ALL_TARGET_LEGACY_11S]);
+}
+
+static void ar9003_hw_init_txpower_ofdm(struct ath_hw *ah, u8 *rate_array,
+ int offset)
+{
+ int i, j;
+
+ for (i = offset; i < offset + AR9300_OFDM_RATES; i++) {
+ /* OFDM rate to power table idx */
+ j = ofdm2pwr[i - offset];
+ ah->tx_power[i] = rate_array[j];
+ }
+}
+
+static void ar9003_hw_init_txpower_ht(struct ath_hw *ah, u8 *rate_array,
+ int ss_offset, int ds_offset,
+ int ts_offset, bool is_40)
+{
+ int i, j, mcs_idx = 0;
+ const u8 *mcs2pwr = (is_40) ? mcs2pwr_ht40 : mcs2pwr_ht20;
+
+ for (i = ss_offset; i < ss_offset + AR9300_HT_SS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+
+ for (i = ds_offset; i < ds_offset + AR9300_HT_DS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+
+ for (i = ts_offset; i < ts_offset + AR9300_HT_TS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+}
+
+static void ar9003_hw_init_txpower_stbc(struct ath_hw *ah, int ss_offset,
+ int ds_offset, int ts_offset)
+{
+ memcpy(&ah->tx_power_stbc[ss_offset], &ah->tx_power[ss_offset],
+ AR9300_HT_SS_RATES);
+ memcpy(&ah->tx_power_stbc[ds_offset], &ah->tx_power[ds_offset],
+ AR9300_HT_DS_RATES);
+ memcpy(&ah->tx_power_stbc[ts_offset], &ah->tx_power[ts_offset],
+ AR9300_HT_TS_RATES);
+}
+
+void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
+ struct ath9k_channel *chan)
+{
+ if (IS_CHAN_5GHZ(chan)) {
+ ar9003_hw_init_txpower_ofdm(ah, rate_array,
+ AR9300_11NA_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar9003_hw_init_txpower_ht(ah, rate_array,
+ AR9300_11NA_HT_SS_SHIFT,
+ AR9300_11NA_HT_DS_SHIFT,
+ AR9300_11NA_HT_TS_SHIFT,
+ IS_CHAN_HT40(chan));
+ ar9003_hw_init_txpower_stbc(ah,
+ AR9300_11NA_HT_SS_SHIFT,
+ AR9300_11NA_HT_DS_SHIFT,
+ AR9300_11NA_HT_TS_SHIFT);
+ }
+ } else {
+ ar9003_hw_init_txpower_cck(ah, rate_array);
+ ar9003_hw_init_txpower_ofdm(ah, rate_array,
+ AR9300_11NG_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar9003_hw_init_txpower_ht(ah, rate_array,
+ AR9300_11NG_HT_SS_SHIFT,
+ AR9300_11NG_HT_DS_SHIFT,
+ AR9300_11NG_HT_TS_SHIFT,
+ IS_CHAN_HT40(chan));
+ ar9003_hw_init_txpower_stbc(ah,
+ AR9300_11NG_HT_SS_SHIFT,
+ AR9300_11NG_HT_DS_SHIFT,
+ AR9300_11NG_HT_TS_SHIFT);
+ }
+ }
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
index a43b30d723a4..6290467a75a0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
@@ -17,6 +17,7 @@
#ifndef AR9003_RTT_H
#define AR9003_RTT_H
+#ifdef CONFIG_ATH9K_PCOEM
void ar9003_hw_rtt_enable(struct ath_hw *ah);
void ar9003_hw_rtt_disable(struct ath_hw *ah);
void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask);
@@ -25,5 +26,40 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah);
void ar9003_hw_rtt_fill_hist(struct ath_hw *ah);
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah);
bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan);
+#else
+static inline void ar9003_hw_rtt_enable(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_disable(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask)
+{
+}
+
+static inline bool ar9003_hw_rtt_force_restore(struct ath_hw *ah)
+{
+ return false;
+}
+
+static inline void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
+{
+}
+
+static inline bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ return false;
+}
+#endif
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 812a9d787bf3..159cc6fd2362 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -20,6 +20,8 @@
#define qca953x_1p0_mac_postamble ar9300_2p2_mac_postamble
+#define qca953x_1p0_soc_preamble ar955x_1p0_soc_preamble
+
#define qca953x_1p0_soc_postamble ar9300_2p2_soc_postamble
#define qca953x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
@@ -28,6 +30,10 @@
#define qca953x_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+#define qca953x_1p0_common_wo_xlna_rx_gain_bounds ar955x_1p0_common_wo_xlna_rx_gain_bounds
+
+#define qca953x_1p0_common_rx_gain_bounds ar955x_1p0_common_rx_gain_bounds
+
static const u32 qca953x_1p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -490,35 +496,6 @@ static const u32 qca953x_1p0_radio_postamble[][5] = {
{0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
};
-static const u32 qca953x_1p0_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x00007000, 0x00000000},
- {0x00007004, 0x00000000},
- {0x00007008, 0x00000000},
- {0x0000700c, 0x00000000},
- {0x0000701c, 0x00000000},
- {0x00007020, 0x00000000},
- {0x00007024, 0x00000000},
- {0x00007028, 0x00000000},
- {0x0000702c, 0x00000000},
- {0x00007030, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
- {0x00007048, 0x00000000},
-};
-
-static const u32 qca953x_1p0_common_rx_gain_bounds[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
-};
-
-static const u32 qca953x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
-};
-
static const u32 qca953x_1p0_modes_xpa_tx_gain_table[][2] = {
/* Addr allmodes */
{0x0000a2dc, 0xfffd5aaa},
@@ -715,8 +692,73 @@ static const u32 qca953x_1p1_modes_no_xpa_tx_gain_table[][2] = {
{0x00016448, 0x6c927a70},
};
+static const u32 qca953x_1p1_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffb52aa},
+ {0x0000a2e0, 0xfffd64cc},
+ {0x0000a2e4, 0xfffe80f0},
+ {0x0000a2e8, 0xffffff00},
+ {0x0000a410, 0x000050d5},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x1000000a},
+ {0x0000a514, 0x1400000c},
+ {0x0000a518, 0x1800000e},
+ {0x0000a51c, 0x1c000048},
+ {0x0000a520, 0x2000004a},
+ {0x0000a524, 0x2400004c},
+ {0x0000a528, 0x2800004e},
+ {0x0000a52c, 0x2b00024a},
+ {0x0000a530, 0x2f00024c},
+ {0x0000a534, 0x3300024e},
+ {0x0000a538, 0x36000668},
+ {0x0000a53c, 0x38000669},
+ {0x0000a540, 0x3a000868},
+ {0x0000a544, 0x3d00086a},
+ {0x0000a548, 0x4000086c},
+ {0x0000a54c, 0x4200086e},
+ {0x0000a550, 0x43000a6e},
+ {0x0000a554, 0x43000a6e},
+ {0x0000a558, 0x43000a6e},
+ {0x0000a55c, 0x43000a6e},
+ {0x0000a560, 0x43000a6e},
+ {0x0000a564, 0x43000a6e},
+ {0x0000a568, 0x43000a6e},
+ {0x0000a56c, 0x43000a6e},
+ {0x0000a570, 0x43000a6e},
+ {0x0000a574, 0x43000a6e},
+ {0x0000a578, 0x43000a6e},
+ {0x0000a57c, 0x43000a6e},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x03804e01},
+ {0x0000a614, 0x03804e01},
+ {0x0000a618, 0x03804e01},
+ {0x0000a61c, 0x04009002},
+ {0x0000a620, 0x04009002},
+ {0x0000a624, 0x04009002},
+ {0x0000a628, 0x04009002},
+ {0x0000a62c, 0x04009002},
+ {0x0000a630, 0x04009002},
+ {0x0000a634, 0x04009002},
+ {0x0000a638, 0x04009002},
+ {0x0000a63c, 0x04009002},
+ {0x0000b2dc, 0xfffb52aa},
+ {0x0000b2e0, 0xfffd64cc},
+ {0x0000b2e4, 0xfffe80f0},
+ {0x0000b2e8, 0xffffff00},
+ {0x00016044, 0x024922db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x024922db},
+ {0x00016448, 0x6c927a70},
+};
+
static const u32 qca953x_2p0_baseband_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00009800, 0xafe68e30},
{0x00009804, 0xfd14e000},
{0x00009808, 0x9c0a9f6b},
@@ -914,4 +956,400 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
{0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
};
+static const u32 qca953x_2p0_common_wo_xlna_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 qca953x_2p0_common_wo_xlna_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 qca953x_2p0_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffb52aa},
+ {0x0000a2e0, 0xfffd64cc},
+ {0x0000a2e4, 0xfffe80f0},
+ {0x0000a2e8, 0xffffff00},
+ {0x0000a410, 0x000050d5},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x1000000a},
+ {0x0000a514, 0x1400000c},
+ {0x0000a518, 0x1800000e},
+ {0x0000a51c, 0x1c000048},
+ {0x0000a520, 0x2000004a},
+ {0x0000a524, 0x2400004c},
+ {0x0000a528, 0x2800004e},
+ {0x0000a52c, 0x2b00024a},
+ {0x0000a530, 0x2f00024c},
+ {0x0000a534, 0x3300024e},
+ {0x0000a538, 0x36000668},
+ {0x0000a53c, 0x38000669},
+ {0x0000a540, 0x3a000868},
+ {0x0000a544, 0x3d00086a},
+ {0x0000a548, 0x4000086c},
+ {0x0000a54c, 0x4200086e},
+ {0x0000a550, 0x43000a6e},
+ {0x0000a554, 0x43000a6e},
+ {0x0000a558, 0x43000a6e},
+ {0x0000a55c, 0x43000a6e},
+ {0x0000a560, 0x43000a6e},
+ {0x0000a564, 0x43000a6e},
+ {0x0000a568, 0x43000a6e},
+ {0x0000a56c, 0x43000a6e},
+ {0x0000a570, 0x43000a6e},
+ {0x0000a574, 0x43000a6e},
+ {0x0000a578, 0x43000a6e},
+ {0x0000a57c, 0x43000a6e},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x03804e01},
+ {0x0000a614, 0x03804e01},
+ {0x0000a618, 0x03804e01},
+ {0x0000a61c, 0x04009002},
+ {0x0000a620, 0x04009002},
+ {0x0000a624, 0x04009002},
+ {0x0000a628, 0x04009002},
+ {0x0000a62c, 0x04009002},
+ {0x0000a630, 0x04009002},
+ {0x0000a634, 0x04009002},
+ {0x0000a638, 0x04009002},
+ {0x0000a63c, 0x04009002},
+ {0x0000b2dc, 0xfffb52aa},
+ {0x0000b2e0, 0xfffd64cc},
+ {0x0000b2e4, 0xfffe80f0},
+ {0x0000b2e8, 0xffffff00},
+ {0x00016044, 0x024922db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x024922db},
+ {0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_2p0_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050de},
+ {0x0000a500, 0x00000061},
+ {0x0000a504, 0x04000063},
+ {0x0000a508, 0x08000065},
+ {0x0000a50c, 0x0c000261},
+ {0x0000a510, 0x10000263},
+ {0x0000a514, 0x14000265},
+ {0x0000a518, 0x18000482},
+ {0x0000a51c, 0x1b000484},
+ {0x0000a520, 0x1f000486},
+ {0x0000a524, 0x240008c2},
+ {0x0000a528, 0x28000cc1},
+ {0x0000a52c, 0x2d000ce3},
+ {0x0000a530, 0x31000ce5},
+ {0x0000a534, 0x350010e5},
+ {0x0000a538, 0x360012e5},
+ {0x0000a53c, 0x380014e5},
+ {0x0000a540, 0x3b0018e5},
+ {0x0000a544, 0x3d001d04},
+ {0x0000a548, 0x3e001d05},
+ {0x0000a54c, 0x40001d07},
+ {0x0000a550, 0x42001f27},
+ {0x0000a554, 0x43001f67},
+ {0x0000a558, 0x46001fe7},
+ {0x0000a55c, 0x47001f2b},
+ {0x0000a560, 0x49001f0d},
+ {0x0000a564, 0x4b001ed2},
+ {0x0000a568, 0x4c001ed4},
+ {0x0000a56c, 0x4e001f15},
+ {0x0000a570, 0x4f001ff6},
+ {0x0000a574, 0x4f001ff6},
+ {0x0000a578, 0x4f001ff6},
+ {0x0000a57c, 0x4f001ff6},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x02c14c07},
+ {0x0000a630, 0x01008704},
+ {0x0000a634, 0x01c10402},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
#endif /* INITVALS_953X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index 74d8bc05b317..fd6a84ccd49e 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -507,7 +507,7 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
{0x00009d04, 0x40206c10},
{0x00009d08, 0x009c4060},
{0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
+ {0x00009d10, 0x01884061},
{0x00009d14, 0x00c0040b},
{0x00009d18, 0x00000000},
{0x00009e08, 0x0038230c},
@@ -545,9 +545,9 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x1f020503},
- {0x0000a39c, 0x29180c03},
- {0x0000a3a0, 0x9a8b6844},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index a5ca65240af3..5d4629f96c15 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -24,7 +24,149 @@
#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
-#define ar9580_1p0_radio_core ar9300_2p2_radio_core
+static const u32 ar9580_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db2db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x00000004},
+ {0x00016294, 0x458a214f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db2db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db2db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 01a7db061c6a..1a9fe0983a6b 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -28,7 +28,6 @@
#include "debug.h"
#include "mci.h"
#include "dfs.h"
-#include "spectral.h"
struct ath_node;
struct ath_vif;
@@ -190,6 +189,7 @@ struct ath_frame_info {
u8 rtscts_rate;
u8 retries : 7;
u8 baw_tracked : 1;
+ u8 tx_power;
};
struct ath_rxbuf {
@@ -345,7 +345,9 @@ struct ath_chanctx {
u64 tsf_val;
u32 last_beacon;
+ int flush_timeout;
u16 txpower;
+ u16 cur_txpower;
bool offchannel;
bool stopped;
bool active;
@@ -362,7 +364,7 @@ enum ath_chanctx_event {
ATH_CHANCTX_EVENT_BEACON_SENT,
ATH_CHANCTX_EVENT_TSF_TIMER,
ATH_CHANCTX_EVENT_BEACON_RECEIVED,
- ATH_CHANCTX_EVENT_ASSOC,
+ ATH_CHANCTX_EVENT_AUTHORIZED,
ATH_CHANCTX_EVENT_SWITCH,
ATH_CHANCTX_EVENT_ASSIGN,
ATH_CHANCTX_EVENT_UNASSIGN,
@@ -380,10 +382,12 @@ enum ath_chanctx_state {
struct ath_chanctx_sched {
bool beacon_pending;
+ bool beacon_adjust;
bool offchannel_pending;
bool wait_switch;
bool force_noa_update;
bool extend_absence;
+ bool mgd_prepare_tx;
enum ath_chanctx_state state;
u8 beacon_miss;
@@ -468,6 +472,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force);
void ath_offchannel_next(struct ath_softc *sc);
void ath_scan_complete(struct ath_softc *sc, bool abort);
void ath_roc_complete(struct ath_softc *sc, bool abort);
+struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
#else
@@ -540,7 +545,6 @@ static inline void ath_chanctx_check_active(struct ath_softc *sc,
#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
-int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan);
void ath_startrecv(struct ath_softc *sc);
bool ath_stoprecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
@@ -595,7 +599,7 @@ struct ath_vif {
u16 seq_no;
/* BSS info */
- u8 bssid[ETH_ALEN];
+ u8 bssid[ETH_ALEN] __aligned(2);
u16 aid;
bool assoc;
@@ -618,6 +622,7 @@ struct ath_vif {
u32 noa_start;
u32 noa_duration;
bool periodic_noa;
+ bool oneshot_noa;
};
struct ath9k_vif_iter_data {
@@ -715,7 +720,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
void ath_ps_full_sleep(unsigned long data);
-void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
+ bool sw_pending, bool timeout_override);
/**********/
/* BTCOEX */
@@ -927,6 +933,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH9K_PCI_AR9565_2ANT 0x0100
#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200
#define ATH9K_PCI_KILLER 0x0400
+#define ATH9K_PCI_LED_ACT_HI 0x0800
/*
* Default cache line size, in bytes.
@@ -975,6 +982,7 @@ struct ath_softc {
struct ath_chanctx_sched sched;
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
+ struct completion go_beacon;
#endif
unsigned long driver_data;
@@ -982,7 +990,6 @@ struct ath_softc {
u8 gtt_cnt;
u32 intrstatus;
u16 ps_flags; /* PS_* */
- u16 curtxpow;
bool ps_enabled;
bool ps_idle;
short nbcnvifs;
@@ -1023,10 +1030,8 @@ struct ath_softc {
struct dfs_pattern_detector *dfs_detector;
u64 dfs_prev_pulse_ts;
u32 wow_enabled;
- /* relay(fs) channel for spectral scan */
- struct rchan *rfs_chan_spec_scan;
- enum spectral_mode spectral_mode;
- struct ath_spec_scan spec_config;
+
+ struct ath_spec_scan_priv spec_priv;
struct ieee80211_vif *tx99_vif;
struct sk_buff *tx99_skb;
@@ -1069,7 +1074,7 @@ void ath9k_tasklet(unsigned long data);
int ath_cabq_update(struct ath_softc *);
u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
-int ath_reset(struct ath_softc *sc);
+int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan);
void ath_cancel_work(struct ath_softc *sc);
void ath_restart_work(struct ath_softc *sc);
int ath9k_init_device(u16 devid, struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index ecb783beeec2..cb366adc820b 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -78,7 +78,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
struct ath_tx_info info;
struct ieee80211_supported_band *sband;
u8 chainmask = ah->txchainmask;
- u8 rate = 0;
+ u8 i, rate = 0;
sband = &common->sbands[sc->cur_chandef.chan->band];
rate = sband->bitrates[rateidx].hw_value;
@@ -88,7 +88,8 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
memset(&info, 0, sizeof(info));
info.pkt_len = skb->len + FCS_LEN;
info.type = ATH9K_PKT_TYPE_BEACON;
- info.txpower = MAX_RATE_POWER;
+ for (i = 0; i < 4; i++)
+ info.txpower[i] = MAX_RATE_POWER;
info.keyix = ATH9K_TXKEYIX_INVALID;
info.keytype = ATH9K_KEY_TYPE_CLEAR;
info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_CLRDMASK;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 278365b8a895..e200a6e3aca5 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -234,7 +234,7 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
}
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h = NULL;
unsigned i, j;
@@ -301,7 +301,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
- return;
+ return -ETIMEDOUT;
}
/*
@@ -322,6 +322,8 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
}
REGWRITE_BUFFER_FLUSH(ah);
+
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b8ed95e9a335..87badf4bb8a4 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -109,7 +109,7 @@ struct ath9k_pacal_info{
bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update);
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
+int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 945c89826b14..206665059d66 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -66,7 +66,7 @@ static int ath_set_channel(struct ath_softc *sc)
}
hchan = &sc->sc_ah->channels[pos];
- r = ath_reset_internal(sc, hchan);
+ r = ath_reset(sc, hchan);
if (r)
return r;
@@ -92,8 +92,8 @@ static int ath_set_channel(struct ath_softc *sc)
} else {
/* perform spectral scan if requested. */
if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
- sc->spectral_mode == SPECTRAL_CHANSCAN)
- ath9k_spectral_scan_trigger(hw);
+ sc->spec_priv.spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_cmn_spectral_scan_trigger(common, &sc->spec_priv);
}
return 0;
@@ -117,6 +117,7 @@ void ath_chanctx_init(struct ath_softc *sc)
cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
INIT_LIST_HEAD(&ctx->vifs);
ctx->txpower = ATH_TXPOWER_MAX;
+ ctx->flush_timeout = HZ / 5; /* 200ms */
for (j = 0; j < ARRAY_SIZE(ctx->acq); j++)
INIT_LIST_HEAD(&ctx->acq[j]);
}
@@ -145,6 +146,36 @@ void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx,
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+/*************/
+/* Utilities */
+/*************/
+
+struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc)
+{
+ struct ath_chanctx *ctx;
+ struct ath_vif *avp;
+ struct ieee80211_vif *vif;
+
+ spin_lock_bh(&sc->chan_lock);
+
+ ath_for_each_chanctx(sc, ctx) {
+ if (!ctx->active)
+ continue;
+
+ list_for_each_entry(avp, &ctx->vifs, list) {
+ vif = avp->vif;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) {
+ spin_unlock_bh(&sc->chan_lock);
+ return ctx;
+ }
+ }
+ }
+
+ spin_unlock_bh(&sc->chan_lock);
+ return NULL;
+}
+
/**********************************************************/
/* Functions to handle the channel context state machine. */
/**********************************************************/
@@ -171,7 +202,7 @@ static const char *chanctx_event_string(enum ath_chanctx_event ev)
case_rtn_string(ATH_CHANCTX_EVENT_BEACON_SENT);
case_rtn_string(ATH_CHANCTX_EVENT_TSF_TIMER);
case_rtn_string(ATH_CHANCTX_EVENT_BEACON_RECEIVED);
- case_rtn_string(ATH_CHANCTX_EVENT_ASSOC);
+ case_rtn_string(ATH_CHANCTX_EVENT_AUTHORIZED);
case_rtn_string(ATH_CHANCTX_EVENT_SWITCH);
case_rtn_string(ATH_CHANCTX_EVENT_ASSIGN);
case_rtn_string(ATH_CHANCTX_EVENT_UNASSIGN);
@@ -198,6 +229,7 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_chanctx *ictx;
struct ath_vif *avp;
bool active = false;
u8 n_active = 0;
@@ -205,6 +237,28 @@ void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
if (!ctx)
return;
+ if (ctx == &sc->offchannel.chan) {
+ spin_lock_bh(&sc->chan_lock);
+
+ if (likely(sc->sched.channel_switch_time))
+ ctx->flush_timeout =
+ usecs_to_jiffies(sc->sched.channel_switch_time);
+ else
+ ctx->flush_timeout =
+ msecs_to_jiffies(10);
+
+ spin_unlock_bh(&sc->chan_lock);
+
+ /*
+ * There is no need to iterate over the
+ * active/assigned channel contexts if
+ * the current context is offchannel.
+ */
+ return;
+ }
+
+ ictx = ctx;
+
list_for_each_entry(avp, &ctx->vifs, list) {
struct ieee80211_vif *vif = avp->vif;
@@ -227,12 +281,23 @@ void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
n_active++;
}
+ spin_lock_bh(&sc->chan_lock);
+
if (n_active <= 1) {
+ ictx->flush_timeout = HZ / 5;
clear_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags);
+ spin_unlock_bh(&sc->chan_lock);
return;
}
- if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+
+ ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time);
+
+ if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) {
+ spin_unlock_bh(&sc->chan_lock);
return;
+ }
+
+ spin_unlock_bh(&sc->chan_lock);
if (ath9k_is_chanctx_enabled()) {
ath_chanctx_event(sc, NULL,
@@ -301,6 +366,111 @@ static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time)
"Setup chanctx timer with timeout: %d ms\n", jiffies_to_msecs(tsf_time));
}
+static void ath_chanctx_handle_bmiss(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath_vif *avp)
+{
+ /*
+ * Clear the extend_absence flag if it had been
+ * set during the previous beacon transmission,
+ * since we need to revert to the normal NoA
+ * schedule.
+ */
+ if (ctx->active && sc->sched.extend_absence) {
+ avp->noa_duration = 0;
+ sc->sched.extend_absence = false;
+ }
+
+ /* If at least two consecutive beacons were missed on the STA
+ * chanctx, stay on the STA channel for one extra beacon period,
+ * to resync the timer properly.
+ */
+ if (ctx->active && sc->sched.beacon_miss >= 2) {
+ avp->noa_duration = 0;
+ sc->sched.extend_absence = true;
+ }
+}
+
+static void ath_chanctx_offchannel_noa(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath_vif *avp,
+ u32 tsf_time)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->offchannel_start = tsf_time;
+ avp->offchannel_duration = sc->sched.offchannel_duration;
+
+ ath_dbg(common, CHAN_CTX,
+ "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n",
+ avp->offchannel_duration,
+ avp->offchannel_start,
+ avp->noa_index);
+
+ /*
+ * When multiple contexts are active, the NoA
+ * has to be recalculated and advertised after
+ * an offchannel operation.
+ */
+ if (ctx->active && avp->noa_duration)
+ avp->noa_duration = 0;
+}
+
+static void ath_chanctx_set_periodic_noa(struct ath_softc *sc,
+ struct ath_vif *avp,
+ struct ath_beacon_config *cur_conf,
+ u32 tsf_time,
+ u32 beacon_int)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->noa_start = tsf_time;
+
+ if (sc->sched.extend_absence)
+ avp->noa_duration = (3 * beacon_int / 2) +
+ sc->sched.channel_switch_time;
+ else
+ avp->noa_duration =
+ TU_TO_USEC(cur_conf->beacon_interval) / 2 +
+ sc->sched.channel_switch_time;
+
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) ||
+ sc->sched.extend_absence)
+ avp->periodic_noa = false;
+ else
+ avp->periodic_noa = true;
+
+ ath_dbg(common, CHAN_CTX,
+ "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
+ avp->noa_duration,
+ avp->noa_start,
+ avp->noa_index,
+ avp->periodic_noa);
+}
+
+static void ath_chanctx_set_oneshot_noa(struct ath_softc *sc,
+ struct ath_vif *avp,
+ u32 tsf_time,
+ u32 duration)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->noa_start = tsf_time;
+ avp->periodic_noa = false;
+ avp->oneshot_noa = true;
+ avp->noa_duration = duration + sc->sched.channel_switch_time;
+
+ ath_dbg(common, CHAN_CTX,
+ "oneshot noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
+ avp->noa_duration,
+ avp->noa_start,
+ avp->noa_index,
+ avp->periodic_noa);
+}
+
void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
enum ath_chanctx_event ev)
{
@@ -327,6 +497,14 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
if (avp->offchannel_duration)
avp->offchannel_duration = 0;
+ if (avp->oneshot_noa) {
+ avp->noa_duration = 0;
+ avp->oneshot_noa = false;
+
+ ath_dbg(common, CHAN_CTX,
+ "Clearing oneshot NoA\n");
+ }
+
if (avp->chanctx != sc->cur_chan) {
ath_dbg(common, CHAN_CTX,
"Contexts differ, not preparing beacon\n");
@@ -356,6 +534,24 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
"Move chanctx state from WAIT_FOR_TIMER to WAIT_FOR_BEACON\n");
}
+ if (sc->sched.mgd_prepare_tx)
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+
+ /*
+ * When a context becomes inactive, for example,
+ * disassociation of a station context, the NoA
+ * attribute needs to be removed from subsequent
+ * beacons.
+ */
+ if (!ctx->active && avp->noa_duration &&
+ sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) {
+ avp->noa_duration = 0;
+ avp->periodic_noa = false;
+
+ ath_dbg(common, CHAN_CTX,
+ "Clearing NoA schedule\n");
+ }
+
if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
break;
@@ -378,45 +574,22 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
* values and increment the index.
*/
if (sc->next_chan == &sc->offchannel.chan) {
- avp->noa_index++;
- avp->offchannel_start = tsf_time;
- avp->offchannel_duration = sc->sched.offchannel_duration;
-
- ath_dbg(common, CHAN_CTX,
- "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n",
- avp->offchannel_duration,
- avp->offchannel_start,
- avp->noa_index);
-
- /*
- * When multiple contexts are active, the NoA
- * has to be recalculated and advertised after
- * an offchannel operation.
- */
- if (ctx->active && avp->noa_duration)
- avp->noa_duration = 0;
-
+ ath_chanctx_offchannel_noa(sc, ctx, avp, tsf_time);
break;
}
- /*
- * Clear the extend_absence flag if it had been
- * set during the previous beacon transmission,
- * since we need to revert to the normal NoA
- * schedule.
- */
- if (ctx->active && sc->sched.extend_absence) {
- avp->noa_duration = 0;
- sc->sched.extend_absence = false;
- }
+ ath_chanctx_handle_bmiss(sc, ctx, avp);
- /* If at least two consecutive beacons were missed on the STA
- * chanctx, stay on the STA channel for one extra beacon period,
- * to resync the timer properly.
+ /*
+ * If a mgd_prepare_tx() has been called by mac80211,
+ * a one-shot NoA needs to be sent. This can happen
+ * with one or more active channel contexts - in both
+ * cases, a new NoA schedule has to be advertised.
*/
- if (ctx->active && sc->sched.beacon_miss >= 2) {
- avp->noa_duration = 0;
- sc->sched.extend_absence = true;
+ if (sc->sched.mgd_prepare_tx) {
+ ath_chanctx_set_oneshot_noa(sc, avp, tsf_time,
+ jiffies_to_usecs(HZ / 5));
+ break;
}
/* Prevent wrap-around issues */
@@ -429,31 +602,9 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
* announcement.
*/
if (ctx->active &&
- (!avp->noa_duration || sc->sched.force_noa_update)) {
- avp->noa_index++;
- avp->noa_start = tsf_time;
-
- if (sc->sched.extend_absence)
- avp->noa_duration = (3 * beacon_int / 2) +
- sc->sched.channel_switch_time;
- else
- avp->noa_duration =
- TU_TO_USEC(cur_conf->beacon_interval) / 2 +
- sc->sched.channel_switch_time;
-
- if (test_bit(ATH_OP_SCANNING, &common->op_flags) ||
- sc->sched.extend_absence)
- avp->periodic_noa = false;
- else
- avp->periodic_noa = true;
-
- ath_dbg(common, CHAN_CTX,
- "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
- avp->noa_duration,
- avp->noa_start,
- avp->noa_index,
- avp->periodic_noa);
- }
+ (!avp->noa_duration || sc->sched.force_noa_update))
+ ath_chanctx_set_periodic_noa(sc, avp, cur_conf,
+ tsf_time, beacon_int);
if (ctx->active && sc->sched.force_noa_update)
sc->sched.force_noa_update = false;
@@ -467,6 +618,15 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
}
sc->sched.beacon_pending = false;
+
+ if (sc->sched.mgd_prepare_tx) {
+ sc->sched.mgd_prepare_tx = false;
+ complete(&sc->go_beacon);
+ ath_dbg(common, CHAN_CTX,
+ "Beacon sent, complete go_beacon\n");
+ break;
+ }
+
if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
break;
@@ -495,10 +655,16 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
sc->cur_chan == &sc->offchannel.chan)
break;
- ath_chanctx_adjust_tbtt_delta(sc);
sc->sched.beacon_pending = false;
sc->sched.beacon_miss = 0;
+ if (sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
+ !sc->sched.beacon_adjust ||
+ !sc->cur_chan->tsf_val)
+ break;
+
+ ath_chanctx_adjust_tbtt_delta(sc);
+
/* TSF time might have been updated by the incoming beacon,
* need update the channel switch timer to reflect the change.
*/
@@ -507,10 +673,10 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
tsf_time += ath9k_hw_gettsf32(ah);
-
+ sc->sched.beacon_adjust = false;
ath_chanctx_setup_timer(sc, tsf_time);
break;
- case ATH_CHANCTX_EVENT_ASSOC:
+ case ATH_CHANCTX_EVENT_AUTHORIZED:
if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE ||
avp->chanctx != sc->cur_chan)
break;
@@ -552,6 +718,7 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
ath_chanctx_setup_timer(sc, tsf_time);
sc->sched.beacon_pending = true;
+ sc->sched.beacon_adjust = true;
break;
case ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL:
if (sc->cur_chan == &sc->offchannel.chan ||
@@ -578,22 +745,6 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
ieee80211_queue_work(sc->hw, &sc->chanctx_work);
break;
case ATH_CHANCTX_EVENT_ASSIGN:
- /*
- * When adding a new channel context, check if a scan
- * is in progress and abort it since the addition of
- * a new channel context is usually followed by VIF
- * assignment, in which case we have to start multi-channel
- * operation.
- */
- if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
- ath_dbg(common, CHAN_CTX,
- "Aborting HW scan to add new context\n");
-
- spin_unlock_bh(&sc->chan_lock);
- del_timer_sync(&sc->offchannel.timer);
- ath_scan_complete(sc, true);
- spin_lock_bh(&sc->chan_lock);
- }
break;
case ATH_CHANCTX_EVENT_CHANGE:
break;
@@ -751,6 +902,11 @@ void ath_offchannel_next(struct ath_softc *sc)
sc->offchannel.state = ATH_OFFCHANNEL_ROC_START;
ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan);
} else {
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.offchannel_pending = false;
+ sc->sched.wait_switch = false;
+ spin_unlock_bh(&sc->chan_lock);
+
ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false),
NULL);
sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
@@ -770,8 +926,7 @@ void ath_roc_complete(struct ath_softc *sc, bool abort)
sc->offchannel.roc_vif = NULL;
sc->offchannel.roc_chan = NULL;
- if (!abort)
- ieee80211_remain_on_channel_expired(sc->hw);
+ ieee80211_remain_on_channel_expired(sc->hw);
ath_offchannel_next(sc);
ath9k_ps_restore(sc);
}
@@ -808,7 +963,7 @@ static void ath_scan_send_probe(struct ath_softc *sc,
struct ieee80211_tx_info *info;
int band = sc->offchannel.chan.chandef.chan->band;
- skb = ieee80211_probereq_get(sc->hw, vif,
+ skb = ieee80211_probereq_get(sc->hw, vif->addr,
ssid->ssid, ssid->ssid_len, req->ie_len);
if (!skb)
return;
@@ -902,9 +1057,8 @@ static void ath_offchannel_timer(unsigned long data)
break;
case ATH_OFFCHANNEL_ROC_START:
case ATH_OFFCHANNEL_ROC_WAIT:
- ctx = ath_chanctx_get_oper_chan(sc, false);
sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
- ath_chanctx_switch(sc, ctx, NULL);
+ ath_roc_complete(sc, false);
break;
default:
break;
@@ -1034,7 +1188,6 @@ static void ath_offchannel_channel_change(struct ath_softc *sc)
ieee80211_ready_on_channel(sc->hw);
break;
case ATH_OFFCHANNEL_ROC_DONE:
- ath_roc_complete(sc, false);
break;
default:
break;
@@ -1082,10 +1235,11 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
ath9k_chanctx_stop_queues(sc, sc->cur_chan);
queues_stopped = true;
- __ath9k_flush(sc->hw, ~0, true);
+ __ath9k_flush(sc->hw, ~0, true, false, false);
if (ath_chanctx_send_ps_frame(sc, true))
- __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false);
+ __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO),
+ false, false, false);
send_ps = true;
spin_lock_bh(&sc->chan_lock);
@@ -1177,6 +1331,8 @@ void ath9k_init_channel_context(struct ath_softc *sc)
(unsigned long)sc);
setup_timer(&sc->sched.timer, ath_chanctx_timer,
(unsigned long)sc);
+
+ init_completion(&sc->go_beacon);
}
void ath9k_deinit_channel_context(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 8f68426ca653..ec93ddf0863a 100644
--- a/drivers/net/wireless/ath/ath9k/spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -24,23 +24,24 @@ static s8 fix_rssi_inv_only(u8 rssi_val)
return (s8) rssi_val;
}
-static void ath_debug_send_fft_sample(struct ath_softc *sc,
+static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv,
struct fft_sample_tlv *fft_sample_tlv)
{
int length;
- if (!sc->rfs_chan_spec_scan)
+ if (!spec_priv->rfs_chan_spec_scan)
return;
length = __be16_to_cpu(fft_sample_tlv->length) +
sizeof(*fft_sample_tlv);
- relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
+ relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
}
/* returns 1 if this was a spectral frame, even if not handled. */
-int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
struct ath_rx_status *rs, u64 tsf)
{
- struct ath_hw *ah = sc->sc_ah;
+ struct ath_hw *ah = spec_priv->ah;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
u8 num_bins, *bins, *vdata = (u8 *)hdr;
struct fft_sample_ht20 fft_sample_20;
struct fft_sample_ht20_40 fft_sample_40;
@@ -67,7 +68,7 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
- chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+ chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
if ((chan_type == NL80211_CHAN_HT40MINUS) ||
(chan_type == NL80211_CHAN_HT40PLUS)) {
fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
@@ -199,10 +200,11 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
tlv = (struct fft_sample_tlv *)&fft_sample_20;
}
- ath_debug_send_fft_sample(sc, tlv);
+ ath_debug_send_fft_sample(spec_priv, tlv);
return 1;
}
+EXPORT_SYMBOL(ath_cmn_process_fft);
/*********************/
/* spectral_scan_ctl */
@@ -211,11 +213,11 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char *mode = "";
unsigned int len;
- switch (sc->spectral_mode) {
+ switch (spec_priv->spectral_mode) {
case SPECTRAL_DISABLED:
mode = "disable";
break;
@@ -233,12 +235,84 @@ static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, mode, len);
}
+void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv)
+{
+ struct ath_hw *ah = spec_priv->ah;
+ u32 rxfilter;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return;
+ }
+
+ ath_ps_ops(common)->wakeup(common);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ ath9k_hw_setrxfilter(ah, rxfilter |
+ ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR);
+
+ /* TODO: usually this should not be neccesary, but for some reason
+ * (or in some mode?) the trigger must be called after the
+ * configuration, otherwise the register will have its values reset
+ * (on my ar9220 to value 0x01002310)
+ */
+ ath9k_cmn_spectral_scan_config(common, spec_priv, spec_priv->spectral_mode);
+ ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
+ ath_ps_ops(common)->restore(common);
+}
+EXPORT_SYMBOL(ath9k_cmn_spectral_scan_trigger);
+
+int ath9k_cmn_spectral_scan_config(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv,
+ enum spectral_mode spectral_mode)
+{
+ struct ath_hw *ah = spec_priv->ah;
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return -1;
+ }
+
+ switch (spectral_mode) {
+ case SPECTRAL_DISABLED:
+ spec_priv->spec_config.enabled = 0;
+ break;
+ case SPECTRAL_BACKGROUND:
+ /* send endless samples.
+ * TODO: is this really useful for "background"?
+ */
+ spec_priv->spec_config.endless = 1;
+ spec_priv->spec_config.enabled = 1;
+ break;
+ case SPECTRAL_CHANSCAN:
+ case SPECTRAL_MANUAL:
+ spec_priv->spec_config.endless = 0;
+ spec_priv->spec_config.enabled = 1;
+ break;
+ default:
+ return -1;
+ }
+
+ ath_ps_ops(common)->wakeup(common);
+ ath9k_hw_ops(ah)->spectral_scan_config(ah, &spec_priv->spec_config);
+ ath_ps_ops(common)->restore(common);
+
+ spec_priv->spectral_mode = spectral_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_spectral_scan_config);
+
static ssize_t write_file_spec_scan_ctl(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
char buf[32];
ssize_t len;
@@ -252,18 +326,18 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
buf[len] = '\0';
if (strncmp("trigger", buf, 7) == 0) {
- ath9k_spectral_scan_trigger(sc->hw);
+ ath9k_cmn_spectral_scan_trigger(common, spec_priv);
} else if (strncmp("background", buf, 10) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_BACKGROUND);
ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
} else if (strncmp("chanscan", buf, 8) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_CHANSCAN);
ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
} else if (strncmp("manual", buf, 6) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_MANUAL);
ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
} else if (strncmp("disable", buf, 7) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_DISABLED);
ath_dbg(common, CONFIG, "spectral scan: disabled\n");
} else {
return -EINVAL;
@@ -288,11 +362,11 @@ static ssize_t read_file_spectral_short_repeat(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.short_repeat);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -300,7 +374,7 @@ static ssize_t write_file_spectral_short_repeat(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
@@ -316,7 +390,7 @@ static ssize_t write_file_spectral_short_repeat(struct file *file,
if (val > 1)
return -EINVAL;
- sc->spec_config.short_repeat = val;
+ spec_priv->spec_config.short_repeat = val;
return count;
}
@@ -336,11 +410,11 @@ static ssize_t read_file_spectral_count(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", sc->spec_config.count);
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.count);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -348,7 +422,7 @@ static ssize_t write_file_spectral_count(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
@@ -364,7 +438,7 @@ static ssize_t write_file_spectral_count(struct file *file,
if (val > 255)
return -EINVAL;
- sc->spec_config.count = val;
+ spec_priv->spec_config.count = val;
return count;
}
@@ -384,11 +458,11 @@ static ssize_t read_file_spectral_period(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", sc->spec_config.period);
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.period);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -396,7 +470,7 @@ static ssize_t write_file_spectral_period(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
@@ -412,7 +486,7 @@ static ssize_t write_file_spectral_period(struct file *file,
if (val > 255)
return -EINVAL;
- sc->spec_config.period = val;
+ spec_priv->spec_config.period = val;
return count;
}
@@ -432,11 +506,11 @@ static ssize_t read_file_spectral_fft_period(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.fft_period);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -444,7 +518,7 @@ static ssize_t write_file_spectral_fft_period(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
@@ -460,7 +534,7 @@ static ssize_t write_file_spectral_fft_period(struct file *file,
if (val > 15)
return -EINVAL;
- sc->spec_config.fft_period = val;
+ spec_priv->spec_config.fft_period = val;
return count;
}
@@ -506,38 +580,41 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
/* Debug Init/Deinit */
/*********************/
-void ath9k_spectral_deinit_debug(struct ath_softc *sc)
+void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
- relay_close(sc->rfs_chan_spec_scan);
- sc->rfs_chan_spec_scan = NULL;
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
+ relay_close(spec_priv->rfs_chan_spec_scan);
+ spec_priv->rfs_chan_spec_scan = NULL;
}
}
+EXPORT_SYMBOL(ath9k_cmn_spectral_deinit_debug);
-void ath9k_spectral_init_debug(struct ath_softc *sc)
+void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
+ struct dentry *debugfs_phy)
{
- sc->rfs_chan_spec_scan = relay_open("spectral_scan",
- sc->debug.debugfs_phy,
+ spec_priv->rfs_chan_spec_scan = relay_open("spectral_scan",
+ debugfs_phy,
1024, 256, &rfs_spec_scan_cb,
NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spec_scan_ctl);
debugfs_create_file("spectral_short_repeat",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spectral_short_repeat);
debugfs_create_file("spectral_count",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spectral_count);
debugfs_create_file("spectral_period",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spectral_period);
debugfs_create_file("spectral_fft_period",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spectral_fft_period);
}
+EXPORT_SYMBOL(ath9k_cmn_spectral_init_debug);
diff --git a/drivers/net/wireless/ath/ath9k/spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 7b410c6858b0..82d9dd29652c 100644
--- a/drivers/net/wireless/ath/ath9k/spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -92,6 +92,13 @@ struct ath_ht20_40_fft_packet {
struct ath_radar_info radar_info;
} __packed;
+struct ath_spec_scan_priv {
+ struct ath_hw *ah;
+ /* relay(fs) channel for spectral scan */
+ struct rchan *rfs_chan_spec_scan;
+ enum spectral_mode spectral_mode;
+ struct ath_spec_scan spec_config;
+};
#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
@@ -123,23 +130,15 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
return bins[0] & 0x3f;
}
-void ath9k_spectral_init_debug(struct ath_softc *sc);
-void ath9k_spectral_deinit_debug(struct ath_softc *sc);
+void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
+void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
-void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
-int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv);
+int ath9k_cmn_spectral_scan_config(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv,
enum spectral_mode spectral_mode);
-
-#ifdef CONFIG_ATH9K_DEBUGFS
-int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
struct ath_rx_status *rs, u64 tsf);
-#else
-static inline int ath_process_fft(struct ath_softc *sc,
- struct ieee80211_hdr *hdr,
- struct ath_rx_status *rs, u64 tsf)
-{
- return 0;
-}
-#endif /* CONFIG_ATH9K_DEBUGFS */
#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 33b0c7aef2ea..e8c699446470 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -159,7 +159,7 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
if (test_bit(keyix, common->keymap))
rxs->flag |= RX_FLAG_DECRYPTED;
}
- if (ah->sw_mgmt_crypto &&
+ if (ah->sw_mgmt_crypto_rx &&
(rxs->flag & RX_FLAG_DECRYPTED) &&
ieee80211_is_mgmt(fc))
/* Use software decrypt for management frames. */
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ffc454b18637..2b79a568e803 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -24,6 +24,7 @@
#include "common-init.h"
#include "common-beacon.h"
#include "common-debug.h"
+#include "common-spectral.h"
/* Common header for Atheros 802.11n base driver cores */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 5c45e787814e..871e969409bf 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -401,22 +401,14 @@ static const struct file_operations fops_antenna_diversity = {
.llseek = default_llseek,
};
-static ssize_t read_file_dma(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int read_file_dma(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_softc *sc = file->private;
struct ath_hw *ah = sc->sc_ah;
- char *buf;
- int retval;
- unsigned int len = 0;
u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
int i, qcuOffset = 0, dcuOffset = 0;
u32 *qcuBase = &val[0], *dcuBase = &val[4];
- buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
ath9k_ps_wakeup(sc);
REG_WRITE_D(ah, AR_MACMISC,
@@ -424,21 +416,18 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "Raw DMA Debug values:\n");
+ seq_puts(file, "Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
+ seq_puts(file, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
- i, val[i]);
+ seq_printf(file, "%d: %08x ", i, val[i]);
}
- len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+ seq_puts(file, "\n\n");
+ seq_puts(file, "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
if (i == 8) {
@@ -451,55 +440,47 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "%2d %2x %1x %2x %2x\n",
- i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
- (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
- (val[2] & (0x7 << (i * 3))) >> (i * 3),
- (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ seq_printf(file, "%2d %2x %1x %2x %2x\n",
+ i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
+ (val[2] & (0x7 << (i * 3))) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
-
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "qcu_stitch state: %2x qcu_fetch state: %2x\n",
- (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "qcu_complete state: %2x dcu_complete state: %2x\n",
- (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "dcu_arb state: %2x dcu_fp state: %2x\n",
- (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
- (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
- (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
- (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
-
- len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
- REG_READ_D(ah, AR_OBS_BUS_1));
- len += scnprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
+ seq_puts(file, "\n");
+
+ seq_printf(file, "qcu_stitch state: %2x qcu_fetch state: %2x\n",
+ (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
+ seq_printf(file, "qcu_complete state: %2x dcu_complete state: %2x\n",
+ (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
+ seq_printf(file, "dcu_arb state: %2x dcu_fp state: %2x\n",
+ (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
+ seq_printf(file, "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
+ (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
+ seq_printf(file, "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
+ (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
+ seq_printf(file, "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
+ (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
+
+ seq_printf(file, "pcu observe: 0x%x\n", REG_READ_D(ah, AR_OBS_BUS_1));
+ seq_printf(file, "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
- if (len > DMA_BUF_LEN)
- len = DMA_BUF_LEN;
+ return 0;
+}
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return retval;
+static int open_file_dma(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_dma, inode->i_private);
}
static const struct file_operations fops_dma = {
- .read = read_file_dma,
- .open = simple_open,
+ .open = open_file_dma,
+ .read = seq_read,
.owner = THIS_MODULE,
- .llseek = default_llseek,
+ .llseek = seq_lseek,
+ .release = single_release,
};
@@ -556,22 +537,14 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
sc->debug.stats.istats.gen_timer++;
}
-static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int read_file_interrupt(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private_data;
- unsigned int len = 0;
- int rv;
- int mxlen = 4000;
- char *buf = kmalloc(mxlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ struct ath_softc *sc = file->private;
#define PR_IS(a, s) \
do { \
- len += scnprintf(buf + len, mxlen - len, \
- "%21s: %10u\n", a, \
- sc->debug.stats.istats.s); \
+ seq_printf(file, "%21s: %10u\n", a, \
+ sc->debug.stats.istats.s); \
} while (0)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -602,8 +575,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("GENTIMER", gen_timer);
PR_IS("TOTAL", total);
- len += scnprintf(buf + len, mxlen - len,
- "SYNC_CAUSE stats:\n");
+ seq_puts(file, "SYNC_CAUSE stats:\n");
PR_IS("Sync-All", sync_cause_all);
PR_IS("RTC-IRQ", sync_rtc_irq);
@@ -625,35 +597,27 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("MAC-Asleep", mac_asleep);
PR_IS("MAC-Sleep-Access", mac_sleep_access);
- if (len > mxlen)
- len = mxlen;
+ return 0;
+}
- rv = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return rv;
+static int open_file_interrupt(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_interrupt, inode->i_private);
}
static const struct file_operations fops_interrupt = {
- .read = read_file_interrupt,
- .open = simple_open,
+ .read = seq_read,
+ .open = open_file_interrupt,
.owner = THIS_MODULE,
- .llseek = default_llseek,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int read_file_xmit(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private_data;
- char *buf;
- unsigned int len = 0, size = 2048;
- ssize_t retval = 0;
+ struct ath_softc *sc = file->private;
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += sprintf(buf, "%30s %10s%10s%10s\n\n",
- "BE", "BK", "VI", "VO");
+ seq_printf(file, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
PR("MPDUs Queued: ", queued);
PR("MPDUs Completed: ", completed);
@@ -678,215 +642,162 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("HW-tx-proc-desc: ", txprocdesc);
PR("TX-Failed: ", txfailed);
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
+ return 0;
}
-static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
- char *buf, ssize_t size)
+static void print_queue(struct ath_softc *sc, struct ath_txq *txq,
+ struct seq_file *file)
{
- ssize_t len = 0;
-
ath_txq_lock(sc, txq);
- len += scnprintf(buf + len, size - len, "%s: %d ",
- "qnum", txq->axq_qnum);
- len += scnprintf(buf + len, size - len, "%s: %2d ",
- "qdepth", txq->axq_depth);
- len += scnprintf(buf + len, size - len, "%s: %2d ",
- "ampdu-depth", txq->axq_ampdu_depth);
- len += scnprintf(buf + len, size - len, "%s: %3d ",
- "pending", txq->pending_frames);
- len += scnprintf(buf + len, size - len, "%s: %d\n",
- "stopped", txq->stopped);
+ seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum);
+ seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth);
+ seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth);
+ seq_printf(file, "%s: %3d ", "pending", txq->pending_frames);
+ seq_printf(file, "%s: %d\n", "stopped", txq->stopped);
ath_txq_unlock(sc, txq);
- return len;
}
-static ssize_t read_file_queues(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int read_file_queues(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_softc *sc = file->private;
struct ath_txq *txq;
- char *buf;
- unsigned int len = 0;
- const unsigned int size = 1024;
- ssize_t retval = 0;
int i;
static const char *qname[4] = {
"VO", "VI", "BE", "BK"
};
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
- len += scnprintf(buf + len, size - len, "(%s): ", qname[i]);
- len += print_queue(sc, txq, buf + len, size - len);
+ seq_printf(file, "(%s): ", qname[i]);
+ print_queue(sc, txq, file);
}
- len += scnprintf(buf + len, size - len, "(CAB): ");
- len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
+ seq_puts(file, "(CAB): ");
+ print_queue(sc, sc->beacon.cabq, file);
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
+ return 0;
}
-static ssize_t read_file_misc(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int read_file_misc(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_softc *sc = file->private;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath9k_vif_iter_data iter_data;
struct ath_chanctx *ctx;
- char buf[512];
- unsigned int len = 0;
- ssize_t retval = 0;
unsigned int reg;
u32 rxfilter, i;
- len += scnprintf(buf + len, sizeof(buf) - len,
- "BSSID: %pM\n", common->curbssid);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "BSSID-MASK: %pM\n", common->bssidmask);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "OPMODE: %s\n",
- ath_opmode_to_string(sc->sc_ah->opmode));
+ seq_printf(file, "BSSID: %pM\n", common->curbssid);
+ seq_printf(file, "BSSID-MASK: %pM\n", common->bssidmask);
+ seq_printf(file, "OPMODE: %s\n",
+ ath_opmode_to_string(sc->sc_ah->opmode));
ath9k_ps_wakeup(sc);
rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
ath9k_ps_restore(sc);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "RXFILTER: 0x%x", rxfilter);
+ seq_printf(file, "RXFILTER: 0x%x", rxfilter);
if (rxfilter & ATH9K_RX_FILTER_UCAST)
- len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
+ seq_puts(file, " UCAST");
if (rxfilter & ATH9K_RX_FILTER_MCAST)
- len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
+ seq_puts(file, " MCAST");
if (rxfilter & ATH9K_RX_FILTER_BCAST)
- len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
+ seq_puts(file, " BCAST");
if (rxfilter & ATH9K_RX_FILTER_CONTROL)
- len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ seq_puts(file, " CONTROL");
if (rxfilter & ATH9K_RX_FILTER_BEACON)
- len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
+ seq_puts(file, " BEACON");
if (rxfilter & ATH9K_RX_FILTER_PROM)
- len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
+ seq_puts(file, " PROM");
if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
- len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ seq_puts(file, " PROBEREQ");
if (rxfilter & ATH9K_RX_FILTER_PHYERR)
- len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
+ seq_puts(file, " PHYERR");
if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
- len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
+ seq_puts(file, " MYBEACON");
if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
- len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
+ seq_puts(file, " COMP_BAR");
if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
- len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
+ seq_puts(file, " PSPOLL");
if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
- len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
+ seq_puts(file, " PHYRADAR");
if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
- len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+ seq_puts(file, " MCAST_BCAST_ALL");
if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
- len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
+ seq_puts(file, " CONTROL_WRAPPER");
- len += scnprintf(buf + len, sizeof(buf) - len, "\n");
+ seq_puts(file, "\n");
reg = sc->sc_ah->imask;
- len += scnprintf(buf + len, sizeof(buf) - len,
- "INTERRUPT-MASK: 0x%x", reg);
+ seq_printf(file, "INTERRUPT-MASK: 0x%x", reg);
if (reg & ATH9K_INT_SWBA)
- len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
+ seq_puts(file, " SWBA");
if (reg & ATH9K_INT_BMISS)
- len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
+ seq_puts(file, " BMISS");
if (reg & ATH9K_INT_CST)
- len += scnprintf(buf + len, sizeof(buf) - len, " CST");
+ seq_puts(file, " CST");
if (reg & ATH9K_INT_RX)
- len += scnprintf(buf + len, sizeof(buf) - len, " RX");
+ seq_puts(file, " RX");
if (reg & ATH9K_INT_RXHP)
- len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
+ seq_puts(file, " RXHP");
if (reg & ATH9K_INT_RXLP)
- len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
+ seq_puts(file, " RXLP");
if (reg & ATH9K_INT_BB_WATCHDOG)
- len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
+ seq_puts(file, " BB_WATCHDOG");
- len += scnprintf(buf + len, sizeof(buf) - len, "\n");
+ seq_puts(file, "\n");
i = 0;
ath_for_each_chanctx(sc, ctx) {
- if (!ctx->assigned || list_empty(&ctx->vifs))
+ if (list_empty(&ctx->vifs))
continue;
ath9k_calculate_iter_data(sc, ctx, &iter_data);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "VIF-COUNTS: CTX %i AP: %i STA: %i MESH: %i WDS: %i",
- i++, iter_data.naps, iter_data.nstations,
- iter_data.nmeshes, iter_data.nwds);
- len += scnprintf(buf + len, sizeof(buf) - len,
- " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
- iter_data.nadhocs, sc->cur_chan->nvifs, sc->nbcnvifs);
+ seq_printf(file,
+ "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i WDS: %i",
+ i++, (int)(ctx->assigned), iter_data.naps,
+ iter_data.nstations,
+ iter_data.nmeshes, iter_data.nwds);
+ seq_printf(file, " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+ iter_data.nadhocs, sc->cur_chan->nvifs,
+ sc->nbcnvifs);
}
- if (len > sizeof(buf))
- len = sizeof(buf);
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- return retval;
+ return 0;
}
-static ssize_t read_file_reset(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int read_file_reset(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private_data;
- char buf[512];
- unsigned int len = 0;
+ struct ath_softc *sc = file->private;
+ static const char * const reset_cause[__RESET_TYPE_MAX] = {
+ [RESET_TYPE_BB_HANG] = "Baseband Hang",
+ [RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog",
+ [RESET_TYPE_FATAL_INT] = "Fatal HW Error",
+ [RESET_TYPE_TX_ERROR] = "TX HW error",
+ [RESET_TYPE_TX_GTT] = "Transmit timeout",
+ [RESET_TYPE_TX_HANG] = "TX Path Hang",
+ [RESET_TYPE_PLL_HANG] = "PLL RX Hang",
+ [RESET_TYPE_MAC_HANG] = "MAC Hang",
+ [RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
+ [RESET_TYPE_MCI] = "MCI Reset",
+ [RESET_TYPE_CALIBRATION] = "Calibration error",
+ };
+ int i;
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Hang",
- sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Watchdog",
- sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Fatal HW Error",
- sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX HW error",
- sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX Path Hang",
- sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "PLL RX Hang",
- sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "MAC Hang",
- sc->debug.stats.reset[RESET_TYPE_MAC_HANG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Stuck Beacon",
- sc->debug.stats.reset[RESET_TYPE_BEACON_STUCK]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "MCI Reset",
- sc->debug.stats.reset[RESET_TYPE_MCI]);
-
- if (len > sizeof(buf))
- len = sizeof(buf);
+ for (i = 0; i < ARRAY_SIZE(reset_cause); i++) {
+ if (!reset_cause[i])
+ continue;
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ seq_printf(file, "%17s: %2d\n", reset_cause[i],
+ sc->debug.stats.reset[i]);
+ }
+
+ return 0;
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
@@ -926,32 +837,56 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
TX_STAT_INC(qnum, delim_underrun);
}
+static int open_file_xmit(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_xmit, inode->i_private);
+}
+
static const struct file_operations fops_xmit = {
- .read = read_file_xmit,
- .open = simple_open,
+ .read = seq_read,
+ .open = open_file_xmit,
.owner = THIS_MODULE,
- .llseek = default_llseek,
+ .llseek = seq_lseek,
+ .release = single_release,
};
+static int open_file_queues(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_queues, inode->i_private);
+}
+
static const struct file_operations fops_queues = {
- .read = read_file_queues,
- .open = simple_open,
+ .read = seq_read,
+ .open = open_file_queues,
.owner = THIS_MODULE,
- .llseek = default_llseek,
+ .llseek = seq_lseek,
+ .release = single_release,
};
+static int open_file_misc(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_misc, inode->i_private);
+}
+
static const struct file_operations fops_misc = {
- .read = read_file_misc,
- .open = simple_open,
+ .read = seq_read,
+ .open = open_file_misc,
.owner = THIS_MODULE,
- .llseek = default_llseek,
+ .llseek = seq_lseek,
+ .release = single_release,
};
+static int open_file_reset(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_reset, inode->i_private);
+}
+
static const struct file_operations fops_reset = {
- .read = read_file_reset,
- .open = simple_open,
+ .read = seq_read,
+ .open = open_file_reset,
.owner = THIS_MODULE,
- .llseek = default_llseek,
+ .llseek = seq_lseek,
+ .release = single_release,
};
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
@@ -960,7 +895,7 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
}
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
char buf[32];
@@ -971,7 +906,7 @@ static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
}
static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
unsigned long regidx;
@@ -999,7 +934,7 @@ static const struct file_operations fops_regidx = {
};
static ssize_t read_file_regval(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
@@ -1015,7 +950,7 @@ static ssize_t read_file_regval(struct file *file, char __user *user_buf,
}
static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
@@ -1081,57 +1016,45 @@ static const struct file_operations fops_regdump = {
.llseek = default_llseek,/* read accesses f_pos */
};
-static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int read_file_dump_nfcal(struct seq_file *file, void *data)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_softc *sc = file->private;
struct ath_hw *ah = sc->sc_ah;
struct ath9k_nfcal_hist *h = sc->cur_chan->caldata.nfCalHist;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
- u32 len = 0, size = 1500;
u32 i, j;
- ssize_t retval = 0;
- char *buf;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
u8 nread;
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len += scnprintf(buf + len, size - len,
- "Channel Noise Floor : %d\n", ah->noise);
- len += scnprintf(buf + len, size - len,
- "Chain | privNF | # Readings | NF Readings\n");
+ seq_printf(file, "Channel Noise Floor : %d\n", ah->noise);
+ seq_puts(file, "Chain | privNF | # Readings | NF Readings\n");
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
continue;
nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
- len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
- i, h[i].privNF, nread);
+ seq_printf(file, " %d\t %d\t %d\t\t", i, h[i].privNF, nread);
for (j = 0; j < nread; j++)
- len += scnprintf(buf + len, size - len,
- " %d", h[i].nfCalBuffer[j]);
- len += scnprintf(buf + len, size - len, "\n");
+ seq_printf(file, " %d", h[i].nfCalBuffer[j]);
+ seq_puts(file, "\n");
}
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
+ return 0;
+}
- return retval;
+static int open_file_dump_nfcal(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_dump_nfcal, inode->i_private);
}
static const struct file_operations fops_dump_nfcal = {
- .read = read_file_dump_nfcal,
- .open = simple_open,
+ .read = seq_read,
+ .open = open_file_dump_nfcal,
.owner = THIS_MODULE,
- .llseek = default_llseek,
+ .llseek = seq_lseek,
+ .release = single_release,
};
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -1315,7 +1238,7 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
void ath9k_deinit_debug(struct ath_softc *sc)
{
- ath9k_spectral_deinit_debug(sc);
+ ath9k_cmn_spectral_deinit_debug(&sc->spec_priv);
}
int ath9k_init_debug(struct ath_hw *ah)
@@ -1335,7 +1258,7 @@ int ath9k_init_debug(struct ath_hw *ah)
ath9k_dfs_init_debug(sc);
ath9k_tx99_init_debug(sc);
- ath9k_spectral_init_debug(sc);
+ ath9k_cmn_spectral_init_debug(&sc->spec_priv, sc->debug.debugfs_phy);
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 53ae15bd0c9d..a8e9319958e6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -49,6 +49,7 @@ enum ath_reset_type {
RESET_TYPE_MAC_HANG,
RESET_TYPE_BEACON_STUCK,
RESET_TYPE_MCI,
+ RESET_TYPE_CALIBRATION,
__RESET_TYPE_MAX
};
@@ -195,12 +196,11 @@ struct ath_tx_stats {
#define TXSTATS sc->debug.stats.txstats
#define PR(str, elem) \
do { \
- len += scnprintf(buf + len, size - len, \
- "%s%13u%11u%10u%10u\n", str, \
- TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
- TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
- TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
- TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+ seq_printf(file, "%s%13u%11u%10u%10u\n", str, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
} while(0)
struct ath_rx_rate_stats {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 3218ca994746..122b846b8ec0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -262,7 +262,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
struct ath_common *common = ath9k_hw_common(ah);
- u16 *eepdata, temp, magic, magic2;
+ u16 *eepdata, temp, magic;
u32 sum = 0, el;
bool need_swap = false;
int i, addr, size;
@@ -272,27 +272,16 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
return false;
}
- if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
-
- if (magic != AR5416_EEPROM_MAGIC) {
- magic2 = swab16(magic);
-
- if (magic2 == AR5416_EEPROM_MAGIC) {
- size = sizeof(struct ar5416_eeprom_def);
- need_swap = true;
- eepdata = (u16 *) (&ah->eeprom);
+ if (swab16(magic) == AR5416_EEPROM_MAGIC &&
+ !(ah->ah_flags & AH_NO_EEP_SWAP)) {
+ size = sizeof(struct ar5416_eeprom_def);
+ need_swap = true;
+ eepdata = (u16 *) (&ah->eeprom);
- for (addr = 0; addr < size / sizeof(u16); addr++) {
- temp = swab16(*eepdata);
- *eepdata = temp;
- eepdata++;
- }
- } else {
- ath_err(common,
- "Invalid EEPROM Magic. Endianness mismatch.\n");
- return -EINVAL;
- }
+ for (addr = 0; addr < size / sizeof(u16); addr++) {
+ temp = swab16(*eepdata);
+ *eepdata = temp;
+ eepdata++;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index b1956bf6e01e..2fef7a480fec 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -25,7 +25,12 @@ static void ath_led_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF));
+ u32 val = (brightness == LED_OFF);
+
+ if (sc->sc_ah->config.led_active_high)
+ val = !val;
+
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, val);
}
void ath_deinit_leds(struct ath_softc *sc)
@@ -82,7 +87,7 @@ void ath_fill_led_pin(struct ath_softc *sc)
ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
/* LED off, active low */
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1);
}
#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 09a5d72f3ff5..9dde265d3f84 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -481,6 +481,7 @@ struct ath9k_htc_priv {
unsigned long op_flags;
struct ath9k_hw_cal_data caldata;
+ struct ath_spec_scan_priv spec_priv;
spinlock_t beacon_lock;
struct ath_beacon_config cur_beacon_conf;
@@ -625,8 +626,12 @@ int ath9k_htc_resume(struct htc_target *htc_handle);
#endif
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
int ath9k_htc_init_debug(struct ath_hw *ah);
+void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv);
#else
static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
+static inline void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv)
+{
+}
#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 8b529e4b8ac4..8cef1edcc621 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -490,6 +490,10 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
}
+void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv)
+{
+ ath9k_cmn_spectral_deinit_debug(&priv->spec_priv);
+}
int ath9k_htc_init_debug(struct ath_hw *ah)
{
@@ -501,6 +505,8 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
if (!priv->debug.debugfs_phy)
return -ENOMEM;
+ ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
+
debugfs_create_file("tgt_int_stats", S_IRUSR, priv->debug.debugfs_phy,
priv, &fops_tgt_int_stats);
debugfs_create_file("tgt_tx_stats", S_IRUSR, priv->debug.debugfs_phy,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 4014c4be6e79..e8fa9448da24 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -53,6 +53,21 @@ static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
};
#endif
+static void ath9k_htc_op_ps_wakeup(struct ath_common *common)
+{
+ ath9k_htc_ps_wakeup((struct ath9k_htc_priv *) common->priv);
+}
+
+static void ath9k_htc_op_ps_restore(struct ath_common *common)
+{
+ ath9k_htc_ps_restore((struct ath9k_htc_priv *) common->priv);
+}
+
+static struct ath_ps_ops ath9k_htc_ps_ops = {
+ .wakeup = ath9k_htc_op_ps_wakeup,
+ .restore = ath9k_htc_op_ps_restore,
+};
+
static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
{
int time_left;
@@ -87,6 +102,7 @@ static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
wiphy_rfkill_stop_polling(hw->wiphy);
ath9k_deinit_leds(priv);
+ ath9k_htc_deinit_debug(priv);
ieee80211_unregister_hw(hw);
ath9k_rx_cleanup(priv);
ath9k_tx_cleanup(priv);
@@ -449,6 +465,14 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
priv->ah->opmode = NL80211_IFTYPE_STATION;
+
+ priv->spec_priv.ah = priv->ah;
+ priv->spec_priv.spec_config.enabled = 0;
+ priv->spec_priv.spec_config.short_repeat = false;
+ priv->spec_priv.spec_config.count = 8;
+ priv->spec_priv.spec_config.endless = false;
+ priv->spec_priv.spec_config.period = 0x12;
+ priv->spec_priv.spec_config.fft_period = 0x02;
}
static int ath9k_init_priv(struct ath9k_htc_priv *priv,
@@ -478,6 +502,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
common = ath9k_hw_common(ah);
common->ops = &ah->reg_ops;
+ common->ps_ops = &ath9k_htc_ps_ops;
common->bus_ops = &ath9k_usb_bus_ops;
common->ah = ah;
common->hw = priv->hw;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 994fff1ff519..92d5a6c5a225 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -314,6 +314,10 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
+ /* perform spectral scan if requested. */
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
+ priv->spec_priv.spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_cmn_spectral_scan_trigger(common, &priv->spec_priv);
err:
ath9k_htc_ps_restore(priv);
return ret;
@@ -1443,7 +1447,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- if (priv->ah->sw_mgmt_crypto &&
+ if (priv->ah->sw_mgmt_crypto_tx &&
key->cipher == WLAN_CIPHER_SUITE_CCMP)
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
@@ -1687,7 +1691,9 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
-static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
+static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -1701,7 +1707,8 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
mutex_unlock(&priv->mutex);
}
-static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
+static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index f0484b1b617e..a0f58e2aa553 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -946,7 +946,7 @@ static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
struct ath_htc_rx_status *rxstatus)
{
- rx_stats->rs_datalen = rxstatus->rs_datalen;
+ rx_stats->rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
rx_stats->rs_status = rxstatus->rs_status;
rx_stats->rs_phyerr = rxstatus->rs_phyerr;
rx_stats->rs_rssi = rxstatus->rs_rssi;
@@ -1012,6 +1012,20 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* separately to avoid doing two lookups for a rate for each frame.
*/
hdr = (struct ieee80211_hdr *)skb->data;
+
+ /*
+ * Process PHY errors and return so that the packet
+ * can be dropped.
+ */
+ if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
+ /* TODO: Not using DFS processing now. */
+ if (ath_cmn_process_fft(&priv->spec_priv, hdr,
+ &rx_stats, rx_status->mactime)) {
+ /* TODO: Code to collect spectral scan statistics */
+ }
+ goto rx_next;
+ }
+
if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
&decrypt_error, priv->rxfilter))
goto rx_next;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 8e85efeaeffc..88769b64b20b 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -41,10 +41,9 @@ static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
ath9k_hw_ops(ah)->set_desc_link(ds, link);
}
-static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u8 rxchainmask,
- bool longcal)
+static inline int ath9k_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
{
return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2ad605760e21..6d4b273469b1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/time.h>
#include <linux/bitops.h>
+#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "hw.h"
@@ -446,8 +447,16 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
common->macaddr[2 * i] = eeval >> 8;
common->macaddr[2 * i + 1] = eeval & 0xff;
}
- if (sum == 0 || sum == 0xffff * 3)
- return -EADDRNOTAVAIL;
+ if (!is_valid_ether_addr(common->macaddr)) {
+ ath_err(common,
+ "eeprom contains invalid mac address: %pM\n",
+ common->macaddr);
+
+ random_ether_addr(common->macaddr);
+ ath_err(common,
+ "random mac address will be used: %pM\n",
+ common->macaddr);
+ }
return 0;
}
@@ -1576,16 +1585,22 @@ static void ath9k_hw_init_mfp(struct ath_hw *ah)
* frames when constructing CCMP AAD. */
REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
0xc7ff);
- ah->sw_mgmt_crypto = false;
+ if (AR_SREV_9271(ah) || AR_DEVID_7010(ah))
+ ah->sw_mgmt_crypto_tx = true;
+ else
+ ah->sw_mgmt_crypto_tx = false;
+ ah->sw_mgmt_crypto_rx = false;
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
/* Disable hardware crypto for management frames */
REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
- ah->sw_mgmt_crypto = true;
+ ah->sw_mgmt_crypto_tx = true;
+ ah->sw_mgmt_crypto_rx = true;
} else {
- ah->sw_mgmt_crypto = true;
+ ah->sw_mgmt_crypto_tx = true;
+ ah->sw_mgmt_crypto_rx = true;
}
}
@@ -1932,6 +1947,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REGWRITE_BUFFER_FLUSH(ah);
+ ath9k_hw_gen_timer_start_tsf2(ah);
+
ath9k_hw_init_desc(ah);
if (ath9k_hw_btcoex_is_enabled(ah))
@@ -1940,8 +1957,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_check_bt(ah);
- ath9k_hw_loadnf(ah, chan);
- ath9k_hw_start_nfcal(ah, true);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ath9k_hw_loadnf(ah, chan);
+ ath9k_hw_start_nfcal(ah, true);
+ }
if (AR_SREV_9300_20_OR_LATER(ah))
ar9003_hw_bb_watchdog_config(ah);
@@ -2309,7 +2328,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
- unsigned int chip_chainmask;
u16 eeval;
u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
@@ -2329,31 +2347,40 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
- if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) {
- ath_err(common,
- "no band has been marked as supported in EEPROM\n");
- return -EINVAL;
+
+ if (eeval & AR5416_OPFLAGS_11A) {
+ if (ah->disable_5ghz)
+ ath_warn(common, "disabling 5GHz band\n");
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_5GHZ;
}
- if (eeval & AR5416_OPFLAGS_11A)
- pCap->hw_caps |= ATH9K_HW_CAP_5GHZ;
+ if (eeval & AR5416_OPFLAGS_11G) {
+ if (ah->disable_2ghz)
+ ath_warn(common, "disabling 2GHz band\n");
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
+ }
- if (eeval & AR5416_OPFLAGS_11G)
- pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
+ if ((pCap->hw_caps & (ATH9K_HW_CAP_2GHZ | ATH9K_HW_CAP_5GHZ)) == 0) {
+ ath_err(common, "both bands are disabled\n");
+ return -EINVAL;
+ }
if (AR_SREV_9485(ah) ||
AR_SREV_9285(ah) ||
AR_SREV_9330(ah) ||
AR_SREV_9565(ah))
- chip_chainmask = 1;
- else if (AR_SREV_9462(ah))
- chip_chainmask = 3;
+ pCap->chip_chainmask = 1;
else if (!AR_SREV_9280_20_OR_LATER(ah))
- chip_chainmask = 7;
- else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
- chip_chainmask = 3;
+ pCap->chip_chainmask = 7;
+ else if (!AR_SREV_9300_20_OR_LATER(ah) ||
+ AR_SREV_9340(ah) ||
+ AR_SREV_9462(ah) ||
+ AR_SREV_9531(ah))
+ pCap->chip_chainmask = 3;
else
- chip_chainmask = 7;
+ pCap->chip_chainmask = 7;
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
/*
@@ -2371,8 +2398,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
/* Use rx_chainmask from EEPROM. */
pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
- pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask);
- pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask);
+ pCap->tx_chainmask = fixup_chainmask(pCap->chip_chainmask, pCap->tx_chainmask);
+ pCap->rx_chainmask = fixup_chainmask(pCap->chip_chainmask, pCap->rx_chainmask);
ah->txchainmask = pCap->tx_chainmask;
ah->rxchainmask = pCap->rx_chainmask;
@@ -2886,6 +2913,16 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_gettsf32);
+void ath9k_hw_gen_timer_start_tsf2(struct ath_hw *ah)
+{
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ if (timer_table->tsf2_enabled) {
+ REG_SET_BIT(ah, AR_DIRECT_CONNECT, AR_DC_AP_STA_EN);
+ REG_SET_BIT(ah, AR_RESET_TSF, AR_RESET_TSF2_ONCE);
+ }
+}
+
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
void (*overflow)(void *),
@@ -2896,7 +2933,11 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
struct ath_gen_timer *timer;
if ((timer_index < AR_FIRST_NDP_TIMER) ||
- (timer_index >= ATH_MAX_GEN_TIMER))
+ (timer_index >= ATH_MAX_GEN_TIMER))
+ return NULL;
+
+ if ((timer_index > AR_FIRST_NDP_TIMER) &&
+ !AR_SREV_9300_20_OR_LATER(ah))
return NULL;
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
@@ -2910,6 +2951,11 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
timer->overflow = overflow;
timer->arg = arg;
+ if ((timer_index > AR_FIRST_NDP_TIMER) && !timer_table->tsf2_enabled) {
+ timer_table->tsf2_enabled = true;
+ ath9k_hw_gen_timer_start_tsf2(ah);
+ }
+
return timer;
}
EXPORT_SYMBOL(ath_gen_timer_alloc);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 975074fc11bc..1cbd33551513 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -217,8 +217,8 @@
#define AH_WOW_BEACON_MISS BIT(3)
enum ath_hw_txq_subtype {
- ATH_TXQ_AC_BE = 0,
- ATH_TXQ_AC_BK = 1,
+ ATH_TXQ_AC_BK = 0,
+ ATH_TXQ_AC_BE = 1,
ATH_TXQ_AC_VI = 2,
ATH_TXQ_AC_VO = 3,
};
@@ -244,13 +244,20 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_2GHZ = BIT(11),
ATH9K_HW_CAP_5GHZ = BIT(12),
ATH9K_HW_CAP_APM = BIT(13),
+#ifdef CONFIG_ATH9K_PCOEM
ATH9K_HW_CAP_RTT = BIT(14),
ATH9K_HW_CAP_MCI = BIT(15),
- ATH9K_HW_CAP_DFS = BIT(16),
- ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
- ATH9K_HW_CAP_PAPRD = BIT(18),
- ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19),
- ATH9K_HW_CAP_BT_ANT_DIV = BIT(20),
+ ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(16),
+ ATH9K_HW_CAP_BT_ANT_DIV = BIT(17),
+#else
+ ATH9K_HW_CAP_RTT = 0,
+ ATH9K_HW_CAP_MCI = 0,
+ ATH9K_HW_WOW_DEVICE_CAPABLE = 0,
+ ATH9K_HW_CAP_BT_ANT_DIV = 0,
+#endif
+ ATH9K_HW_CAP_DFS = BIT(18),
+ ATH9K_HW_CAP_PAPRD = BIT(19),
+ ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(20),
};
/*
@@ -269,6 +276,7 @@ struct ath9k_hw_capabilities {
u16 rts_aggr_limit;
u8 tx_chainmask;
u8 rx_chainmask;
+ u8 chip_chainmask;
u8 max_txchains;
u8 max_rxchains;
u8 num_gpio_pins;
@@ -322,6 +330,7 @@ struct ath9k_ops_config {
bool alt_mingainidx;
bool no_pll_pwrsave;
bool tx_gain_buffalo;
+ bool led_active_high;
};
enum ath9k_int {
@@ -517,6 +526,7 @@ struct ath_gen_timer {
struct ath_gen_timer_table {
struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER];
u16 timer_mask;
+ bool tsf2_enabled;
};
struct ath_hw_antcomb_conf {
@@ -681,10 +691,8 @@ struct ath_hw_ops {
bool power_off);
void (*rx_enable)(struct ath_hw *ah);
void (*set_desc_link)(void *ds, u32 link);
- bool (*calibrate)(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u8 rxchainmask,
- bool longcal);
+ int (*calibrate)(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal);
bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked,
u32 *sync_cause_p);
void (*set_txdesc)(struct ath_hw *ah, void *ds,
@@ -726,6 +734,7 @@ enum ath_cal_list {
#define AH_USE_EEPROM 0x1
#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
#define AH_FASTCC 0x4
+#define AH_NO_EEP_SWAP 0x8 /* Do not swap EEPROM data */
struct ath_hw {
struct ath_ops reg_ops;
@@ -747,7 +756,8 @@ struct ath_hw {
} eeprom;
const struct eeprom_ops *eep_ops;
- bool sw_mgmt_crypto;
+ bool sw_mgmt_crypto_tx;
+ bool sw_mgmt_crypto_rx;
bool is_pciexpress;
bool aspm_enabled;
bool is_monitoring;
@@ -924,10 +934,16 @@ struct ath_hw {
bool is_clk_25mhz;
int (*get_mac_revision)(void);
int (*external_reset)(void);
+ bool disable_2ghz;
+ bool disable_5ghz;
const struct firmware *eeprom_blob;
struct ath_dynack dynack;
+
+ bool tpc_enabled;
+ u8 tx_power[Ar5416RateSize];
+ u8 tx_power_stbc[Ar5416RateSize];
};
struct ath_bus_ops {
@@ -1027,6 +1043,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
struct ath_gen_timer *timer,
u32 timer_next,
u32 timer_period);
+void ath9k_hw_gen_timer_start_tsf2(struct ath_hw *ah);
void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
@@ -1067,6 +1084,8 @@ int ar9003_paprd_init_table(struct ath_hw *ah);
bool ar9003_paprd_is_done(struct ath_hw *ah);
bool ar9003_is_paprd_enabled(struct ath_hw *ah);
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
+ struct ath9k_channel *chan);
/* Hardware family op attach helpers */
int ar5008_hw_attach_phy_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3bd030494986..d1c39346b264 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -88,6 +88,21 @@ static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
static void ath9k_deinit_softc(struct ath_softc *sc);
+static void ath9k_op_ps_wakeup(struct ath_common *common)
+{
+ ath9k_ps_wakeup((struct ath_softc *) common->priv);
+}
+
+static void ath9k_op_ps_restore(struct ath_common *common)
+{
+ ath9k_ps_restore((struct ath_softc *) common->priv);
+}
+
+static struct ath_ps_ops ath9k_ps_ops = {
+ .wakeup = ath9k_op_ps_wakeup,
+ .restore = ath9k_op_ps_restore,
+};
+
/*
* Read and write, they both share the same lock. We do this to serialize
* reads and writes on Atheros 802.11n PCI devices only. This is required
@@ -172,17 +187,20 @@ static void ath9k_reg_notifier(struct wiphy *wiphy,
ath_reg_notifier_apply(wiphy, request, reg);
/* Set tx power */
- if (ah->curchan) {
- sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power;
- ath9k_ps_wakeup(sc);
- ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
- sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
- /* synchronize DFS detector if regulatory domain changed */
- if (sc->dfs_detector != NULL)
- sc->dfs_detector->set_dfs_domain(sc->dfs_detector,
- request->dfs_region);
- ath9k_ps_restore(sc);
- }
+ if (!ah->curchan)
+ return;
+
+ sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+ ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
+ sc->cur_chan->txpower,
+ &sc->cur_chan->cur_txpower);
+ /* synchronize DFS detector if regulatory domain changed */
+ if (sc->dfs_detector != NULL)
+ sc->dfs_detector->set_dfs_domain(sc->dfs_detector,
+ request->dfs_region);
+ ath9k_ps_restore(sc);
}
/*
@@ -348,12 +366,13 @@ static void ath9k_init_misc(struct ath_softc *sc)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
- sc->spec_config.enabled = 0;
- sc->spec_config.short_repeat = true;
- sc->spec_config.count = 8;
- sc->spec_config.endless = false;
- sc->spec_config.period = 0xFF;
- sc->spec_config.fft_period = 0xF;
+ sc->spec_priv.ah = sc->sc_ah;
+ sc->spec_priv.spec_config.enabled = 0;
+ sc->spec_priv.spec_config.short_repeat = true;
+ sc->spec_priv.spec_config.count = 8;
+ sc->spec_priv.spec_config.endless = false;
+ sc->spec_priv.spec_config.period = 0xFF;
+ sc->spec_priv.spec_config.fft_period = 0xF;
}
static void ath9k_init_pcoem_platform(struct ath_softc *sc)
@@ -362,6 +381,9 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
+ if (!IS_ENABLED(CONFIG_ATH9K_PCOEM))
+ return;
+
if (common->bus_ops->ath_bus_type != ATH_PCI)
return;
@@ -419,6 +441,9 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
ah->config.no_pll_pwrsave = true;
ath_info(common, "Disable PLL PowerSave\n");
}
+
+ if (sc->driver_data & ATH9K_PCI_LED_ACT_HI)
+ ah->config.led_active_high = true;
}
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@@ -507,10 +532,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->reg_ops.read = ath9k_ioread32;
ah->reg_ops.write = ath9k_iowrite32;
ah->reg_ops.rmw = ath9k_reg_rmw;
- sc->sc_ah = ah;
pCap = &ah->caps;
common = ath9k_hw_common(ah);
+
+ /* Will be cleared in ath9k_start() */
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
+ sc->sc_ah = ah;
sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
sc->tx99_power = MAX_RATE_POWER + 1;
init_waitqueue_head(&sc->tx_wait);
@@ -528,10 +557,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->is_clk_25mhz = pdata->is_clk_25mhz;
ah->get_mac_revision = pdata->get_mac_revision;
ah->external_reset = pdata->external_reset;
+ ah->disable_2ghz = pdata->disable_2ghz;
+ ah->disable_5ghz = pdata->disable_5ghz;
+ if (!pdata->endian_check)
+ ah->ah_flags |= AH_NO_EEP_SWAP;
}
common->ops = &ah->reg_ops;
common->bus_ops = bus_ops;
+ common->ps_ops = &ath9k_ps_ops;
common->ah = ah;
common->hw = sc->hw;
common->priv = sc;
@@ -866,9 +900,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
common = ath9k_hw_common(ah);
ath9k_set_hw_capab(sc, hw);
- /* Will be cleared in ath9k_start() */
- set_bit(ATH_OP_INVALID, &common->op_flags);
-
/* Initialize regulatory */
error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
ath9k_reg_notifier);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 2343f56e6498..b829263e3d0a 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -371,9 +371,15 @@ void ath_ani_calibrate(unsigned long data)
/* Perform calibration if necessary */
if (longcal || shortcal) {
- common->ani.caldone =
- ath9k_hw_calibrate(ah, ah->curchan,
- ah->rxchainmask, longcal);
+ int ret = ath9k_hw_calibrate(ah, ah->curchan, ah->rxchainmask,
+ longcal);
+ if (ret < 0) {
+ common->ani.caldone = 0;
+ ath9k_queue_reset(sc, RESET_TYPE_CALIBRATION);
+ return;
+ }
+
+ common->ani.caldone = ret;
}
ath_dbg(common, ANI,
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 275205ab5f15..3e58bfa0c1fd 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
q = ATH9K_NUM_TX_QUEUES - 3;
break;
case ATH9K_TX_QUEUE_DATA:
- for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
- if (ah->txq[q].tqi_type ==
- ATH9K_TX_QUEUE_INACTIVE)
- break;
- if (q == ATH9K_NUM_TX_QUEUES) {
- ath_err(common, "No available TX queue\n");
- return -1;
- }
+ q = qinfo->tqi_subtype;
break;
default:
ath_err(common, "Invalid TX queue type: %u\n", type);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index aa69ceaad0be..e55fa11894b6 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -704,7 +704,7 @@ struct ath_tx_info {
enum ath9k_pkt_type type;
enum ath9k_key_type keytype;
u8 keyix;
- u8 txpower;
+ u8 txpower[4];
};
struct ath_hw;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 4f18a6be0c7d..9a72640237cb 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -54,7 +54,8 @@ u8 ath9k_parse_mpdudensity(u8 mpdudensity)
}
}
-static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
+static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq,
+ bool sw_pending)
{
bool pending = false;
@@ -65,6 +66,9 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
goto out;
}
+ if (!sw_pending)
+ goto out;
+
if (txq->mac80211_qnum >= 0) {
struct list_head *list;
@@ -229,8 +233,9 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath9k_calculate_summary_state(sc, sc->cur_chan);
ath_startrecv(sc);
- ath9k_cmn_update_txpow(ah, sc->curtxpow,
- sc->cur_chan->txpower, &sc->curtxpow);
+ ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
+ sc->cur_chan->txpower,
+ &sc->cur_chan->cur_txpower);
clear_bit(ATH_OP_HW_RESET, &common->op_flags);
if (!sc->cur_chan->offchannel && start) {
@@ -270,7 +275,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
return true;
}
-int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
+static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -281,6 +286,7 @@ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
__ath_cancel_work(sc);
tasklet_disable(&sc->intr_tq);
+ tasklet_disable(&sc->bcon_tasklet);
spin_lock_bh(&sc->sc_pcu_lock);
if (!sc->cur_chan->offchannel) {
@@ -326,6 +332,7 @@ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
out:
spin_unlock_bh(&sc->sc_pcu_lock);
+ tasklet_enable(&sc->bcon_tasklet);
tasklet_enable(&sc->intr_tq);
return r;
@@ -505,16 +512,13 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
- /* shared irq, not for us */
+ if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ return IRQ_NONE;
+ /* shared irq, not for us */
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
- if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
- ath9k_hw_kill_interrupts(ah);
- return IRQ_HANDLED;
- }
-
/*
* Figure out the reason(s) for the interrupt. Note
* that the hal returns a pseudo-ISR that may include
@@ -525,6 +529,9 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
+ if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ return IRQ_HANDLED;
+
/*
* If there are no status bits set, then this interrupt was not
* for me (should have been caught above).
@@ -539,11 +546,10 @@ irqreturn_t ath_isr(int irq, void *dev)
sched = true;
/*
- * If a FATAL or RXORN interrupt is received, we have to reset the
- * chip immediately.
+ * If a FATAL interrupt is received, we have to reset the chip
+ * immediately.
*/
- if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
- !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
+ if (status & ATH9K_INT_FATAL)
goto chip_reset;
if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
@@ -598,23 +604,37 @@ chip_reset:
#undef SCHED_INTR
}
-int ath_reset(struct ath_softc *sc)
+/*
+ * This function is called when a HW reset cannot be deferred
+ * and has to be immediate.
+ */
+int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int r;
+ ath9k_hw_kill_interrupts(sc->sc_ah);
+ set_bit(ATH_OP_HW_RESET, &common->op_flags);
+
ath9k_ps_wakeup(sc);
- r = ath_reset_internal(sc, NULL);
+ r = ath_reset_internal(sc, hchan);
ath9k_ps_restore(sc);
return r;
}
+/*
+ * When a HW reset can be deferred, it is added to the
+ * hw_reset_work workqueue, but we set ATH_OP_HW_RESET before
+ * queueing.
+ */
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
#ifdef CONFIG_ATH9K_DEBUGFS
RESET_STAT_INC(sc, type);
#endif
+ ath9k_hw_kill_interrupts(sc->sc_ah);
set_bit(ATH_OP_HW_RESET, &common->op_flags);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
@@ -623,7 +643,9 @@ void ath_reset_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
- ath_reset(sc);
+ ath9k_ps_wakeup(sc);
+ ath_reset_internal(sc, NULL);
+ ath9k_ps_restore(sc);
}
/**********************/
@@ -707,7 +729,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->led_pin >= 0) {
ath9k_hw_cfg_output(ah, ah->led_pin,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ ath9k_hw_set_gpio(ah, ah->led_pin,
+ (ah->config.led_active_high) ? 1 : 0);
}
/*
@@ -849,7 +872,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
if (ah->led_pin >= 0) {
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_set_gpio(ah, ah->led_pin,
+ (ah->config.led_active_high) ? 0 : 1);
ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
}
@@ -865,6 +889,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
&sc->cur_chan->chandef);
ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
ath9k_hw_phy_disable(ah);
ath9k_hw_configpcipowersave(ah, true);
@@ -873,7 +900,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
- set_bit(ATH_OP_INVALID, &common->op_flags);
sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@@ -1036,7 +1062,7 @@ static void ath9k_set_offchannel_state(struct ath_softc *sc)
eth_zero_addr(common->curbssid);
eth_broadcast_addr(common->bssidmask);
- ether_addr_copy(common->macaddr, vif->addr);
+ memcpy(common->macaddr, vif->addr, ETH_ALEN);
common->curaid = 0;
ah->opmode = vif->type;
ah->imask &= ~ATH9K_INT_SWBA;
@@ -1077,7 +1103,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ath9k_calculate_iter_data(sc, ctx, &iter_data);
if (iter_data.has_hw_macaddr)
- ether_addr_copy(common->macaddr, iter_data.hw_macaddr);
+ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
@@ -1167,7 +1193,8 @@ static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
for (i = 0; i < IEEE80211_NUM_ACS; i++)
vif->hw_queue[i] = i;
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)
vif->cab_queue = hw->queues - 2;
else
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
@@ -1323,78 +1350,6 @@ static void ath9k_disable_ps(struct ath_softc *sc)
ath_dbg(common, PS, "PowerSave disabled\n");
}
-void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 rxfilter;
-
- if (config_enabled(CONFIG_ATH9K_TX99))
- return;
-
- if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
- ath_err(common, "spectrum analyzer not implemented on this hardware\n");
- return;
- }
-
- ath9k_ps_wakeup(sc);
- rxfilter = ath9k_hw_getrxfilter(ah);
- ath9k_hw_setrxfilter(ah, rxfilter |
- ATH9K_RX_FILTER_PHYRADAR |
- ATH9K_RX_FILTER_PHYERR);
-
- /* TODO: usually this should not be neccesary, but for some reason
- * (or in some mode?) the trigger must be called after the
- * configuration, otherwise the register will have its values reset
- * (on my ar9220 to value 0x01002310)
- */
- ath9k_spectral_scan_config(hw, sc->spectral_mode);
- ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
- ath9k_ps_restore(sc);
-}
-
-int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
- enum spectral_mode spectral_mode)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
- ath_err(common, "spectrum analyzer not implemented on this hardware\n");
- return -1;
- }
-
- switch (spectral_mode) {
- case SPECTRAL_DISABLED:
- sc->spec_config.enabled = 0;
- break;
- case SPECTRAL_BACKGROUND:
- /* send endless samples.
- * TODO: is this really useful for "background"?
- */
- sc->spec_config.endless = 1;
- sc->spec_config.enabled = 1;
- break;
- case SPECTRAL_CHANSCAN:
- case SPECTRAL_MANUAL:
- sc->spec_config.endless = 0;
- sc->spec_config.enabled = 1;
- break;
- default:
- return -1;
- }
-
- ath9k_ps_wakeup(sc);
- ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config);
- ath9k_ps_restore(sc);
-
- sc->spectral_mode = spectral_mode;
-
- return 0;
-}
-
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_softc *sc = hw->priv;
@@ -1455,8 +1410,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_POWER) {
ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
sc->cur_chan->txpower = 2 * conf->power_level;
- ath9k_cmn_update_txpow(ah, sc->curtxpow,
- sc->cur_chan->txpower, &sc->curtxpow);
+ ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
+ sc->cur_chan->txpower,
+ &sc->cur_chan->cur_txpower);
}
mutex_unlock(&sc->mutex);
@@ -1553,6 +1509,40 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
return 0;
}
+static int ath9k_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int ret = 0;
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = ath9k_sta_add(hw, vif, sta);
+ ath_dbg(common, CONFIG,
+ "Add station: %pM\n", sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ ret = ath9k_sta_remove(hw, vif, sta);
+ ath_dbg(common, CONFIG,
+ "Remove station: %pM\n", sta->addr);
+ }
+
+ if (ath9k_is_chanctx_enabled()) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ath_chanctx_event(sc, vif,
+ ATH_CHANCTX_EVENT_AUTHORIZED);
+ }
+ }
+
+ return ret;
+}
+
static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
struct ath_node *an,
bool set)
@@ -1677,7 +1667,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- if (sc->sc_ah->sw_mgmt_crypto &&
+ if (sc->sc_ah->sw_mgmt_crypto_tx &&
key->cipher == WLAN_CIPHER_SUITE_CCMP)
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
@@ -1737,17 +1727,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
bss_conf->bssid, bss_conf->assoc);
- ether_addr_copy(avp->bssid, bss_conf->bssid);
+ memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
avp->aid = bss_conf->aid;
avp->assoc = bss_conf->assoc;
ath9k_calculate_summary_state(sc, avp->chanctx);
-
- if (ath9k_is_chanctx_enabled()) {
- if (bss_conf->assoc)
- ath_chanctx_event(sc, vif,
- ATH_CHANCTX_EVENT_ASSOC);
- }
}
if (changed & BSS_CHANGED_IBSS) {
@@ -1843,6 +1827,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
u16 tid, u16 *ssn, u8 buf_size)
{
struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
bool flush = false;
int ret = 0;
@@ -1854,6 +1839,12 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
+ if (ath9k_is_chanctx_enabled()) {
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
+ ret = -EBUSY;
+ break;
+ }
+ }
ath9k_ps_wakeup(sc);
ret = ath_tx_aggr_start(sc, sta, tid, ssn);
if (!ret)
@@ -1967,7 +1958,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
mutex_unlock(&sc->mutex);
}
-static bool ath9k_has_tx_pending(struct ath_softc *sc)
+static bool ath9k_has_tx_pending(struct ath_softc *sc,
+ bool sw_pending)
{
int i, npend = 0;
@@ -1975,7 +1967,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
if (!ATH_TXQ_SETUP(sc, i))
continue;
- npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
+ npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i],
+ sw_pending);
if (npend)
break;
}
@@ -1987,18 +1980,38 @@ static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (ath9k_is_chanctx_enabled()) {
+ if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+ goto flush;
+ /*
+ * If MCC is active, extend the flush timeout
+ * and wait for the HW/SW queues to become
+ * empty. This needs to be done outside the
+ * sc->mutex lock to allow the channel scheduler
+ * to switch channel contexts.
+ *
+ * The vif queues have been stopped in mac80211,
+ * so there won't be any incoming frames.
+ */
+ __ath9k_flush(hw, queues, drop, true, true);
+ return;
+ }
+flush:
mutex_lock(&sc->mutex);
- __ath9k_flush(hw, queues, drop);
+ __ath9k_flush(hw, queues, drop, true, false);
mutex_unlock(&sc->mutex);
}
-void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
+ bool sw_pending, bool timeout_override)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- int timeout = HZ / 5; /* 200 ms */
+ int timeout;
bool drain_txq;
cancel_delayed_work_sync(&sc->tx_complete_work);
@@ -2013,7 +2026,17 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
return;
}
- if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc),
+ spin_lock_bh(&sc->chan_lock);
+ if (timeout_override)
+ timeout = HZ / 5;
+ else
+ timeout = sc->cur_chan->flush_timeout;
+ spin_unlock_bh(&sc->chan_lock);
+
+ ath_dbg(common, CHAN_CTX,
+ "Flush timeout: %d\n", jiffies_to_msecs(timeout));
+
+ if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc, sw_pending),
timeout) > 0)
drop = false;
@@ -2024,7 +2047,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
spin_unlock_bh(&sc->sc_pcu_lock);
if (!drain_txq)
- ath_reset(sc);
+ ath_reset(sc, NULL);
ath9k_ps_restore(sc);
}
@@ -2036,7 +2059,7 @@ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
- return ath9k_has_tx_pending(sc);
+ return ath9k_has_tx_pending(sc, true);
}
static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
@@ -2167,14 +2190,17 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
-static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
+static void ath9k_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
set_bit(ATH_OP_SCANNING, &common->op_flags);
}
-static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
+static void ath9k_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2183,6 +2209,28 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (sc->offchannel.roc_vif) {
+ ath_dbg(common, CHAN_CTX,
+ "%s: Aborting RoC\n", __func__);
+
+ del_timer_sync(&sc->offchannel.timer);
+ if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
+ ath_roc_complete(sc, true);
+ }
+
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
+ ath_dbg(common, CHAN_CTX,
+ "%s: Aborting HW scan\n", __func__);
+
+ del_timer_sync(&sc->offchannel.timer);
+ ath_scan_complete(sc, true);
+ }
+}
+
static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
@@ -2313,7 +2361,6 @@ static int ath9k_add_chanctx(struct ieee80211_hw *hw,
conf->def.chan->center_freq);
ath_chanctx_set_channel(sc, ctx, &conf->def);
- ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ASSIGN);
mutex_unlock(&sc->mutex);
return 0;
@@ -2370,6 +2417,8 @@ static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath_chanctx *ctx = ath_chanctx_get(conf);
int i;
+ ath9k_cancel_pending_offchannel(sc);
+
mutex_lock(&sc->mutex);
ath_dbg(common, CHAN_CTX,
@@ -2399,6 +2448,8 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ath_chanctx *ctx = ath_chanctx_get(conf);
int ac;
+ ath9k_cancel_pending_offchannel(sc);
+
mutex_lock(&sc->mutex);
ath_dbg(common, CHAN_CTX,
@@ -2422,7 +2473,11 @@ static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (struct ath_vif *) vif->drv_priv;
+ struct ath_beacon_config *cur_conf;
+ struct ath_chanctx *go_ctx;
+ unsigned long timeout;
bool changed = false;
+ u32 beacon_int;
if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
return;
@@ -2433,19 +2488,57 @@ static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
spin_lock_bh(&sc->chan_lock);
- if (sc->next_chan || (sc->cur_chan != avp->chanctx)) {
- sc->next_chan = avp->chanctx;
+ if (sc->next_chan || (sc->cur_chan != avp->chanctx))
changed = true;
+ spin_unlock_bh(&sc->chan_lock);
+
+ if (!changed)
+ goto out;
+
+ ath9k_cancel_pending_offchannel(sc);
+
+ go_ctx = ath_is_go_chanctx_present(sc);
+
+ if (go_ctx) {
+ /*
+ * Wait till the GO interface gets a chance
+ * to send out an NoA.
+ */
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.mgd_prepare_tx = true;
+ cur_conf = &go_ctx->beacon;
+ beacon_int = TU_TO_USEC(cur_conf->beacon_interval);
+ spin_unlock_bh(&sc->chan_lock);
+
+ timeout = usecs_to_jiffies(beacon_int * 2);
+ init_completion(&sc->go_beacon);
+
+ mutex_unlock(&sc->mutex);
+
+ if (wait_for_completion_timeout(&sc->go_beacon,
+ timeout) == 0) {
+ ath_dbg(common, CHAN_CTX,
+ "Failed to send new NoA\n");
+
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.mgd_prepare_tx = false;
+ spin_unlock_bh(&sc->chan_lock);
+ }
+
+ mutex_lock(&sc->mutex);
}
+
ath_dbg(common, CHAN_CTX,
- "%s: Set chanctx state to FORCE_ACTIVE, changed: %d\n",
- __func__, changed);
+ "%s: Set chanctx state to FORCE_ACTIVE for vif: %pM\n",
+ __func__, vif->addr);
+
+ spin_lock_bh(&sc->chan_lock);
+ sc->next_chan = avp->chanctx;
sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
spin_unlock_bh(&sc->chan_lock);
- if (changed)
- ath_chanctx_set_next(sc, true);
-
+ ath_chanctx_set_next(sc, true);
+out:
mutex_unlock(&sc->mutex);
}
@@ -2468,6 +2561,24 @@ void ath9k_fill_chanctx_ops(void)
#endif
+static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ mutex_lock(&sc->mutex);
+ if (avp->chanctx)
+ *dbm = avp->chanctx->cur_txpower;
+ else
+ *dbm = sc->cur_chan->cur_txpower;
+ mutex_unlock(&sc->mutex);
+
+ *dbm /= 2;
+
+ return 0;
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2477,8 +2588,7 @@ struct ieee80211_ops ath9k_ops = {
.remove_interface = ath9k_remove_interface,
.config = ath9k_config,
.configure_filter = ath9k_configure_filter,
- .sta_add = ath9k_sta_add,
- .sta_remove = ath9k_sta_remove,
+ .sta_state = ath9k_sta_state,
.sta_notify = ath9k_sta_notify,
.conf_tx = ath9k_conf_tx,
.bss_info_changed = ath9k_bss_info_changed,
@@ -2515,4 +2625,5 @@ struct ieee80211_ops ath9k_ops = {
#endif
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
+ .get_txpower = ath9k_get_txpower,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index c018dea0b2e8..f009b5b57e5e 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -30,6 +30,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+#ifdef CONFIG_ATH9K_PCOEM
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
PCI_VENDOR_ID_AZWAVE,
@@ -82,6 +83,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
PCI_VENDOR_ID_AZWAVE,
0x2C37),
.driver_data = ATH9K_PCI_BT_ANT_DIV },
+#endif
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
@@ -102,6 +104,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
+#ifdef CONFIG_ATH9K_PCOEM
/* PCI-E CUS198 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
@@ -294,10 +297,12 @@ static const struct pci_device_id ath_pci_id_table[] = {
PCI_VENDOR_ID_ASUSTEK,
0x850D),
.driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+#endif
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
+#ifdef CONFIG_ATH9K_PCOEM
/* PCI-E CUS217 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0034,
@@ -652,11 +657,14 @@ static const struct pci_device_id ath_pci_id_table[] = {
0x0036,
PCI_VENDOR_ID_DELL,
0x020E),
- .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ .driver_data = ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV |
+ ATH9K_PCI_LED_ACT_HI},
/* PCI-E AR9565 (WB335) */
{ PCI_VDEVICE(ATHEROS, 0x0036),
.driver_data = ATH9K_PCI_BT_ANT_DIV },
+#endif
{ 0 }
};
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6914e21816e4..7395afbc5124 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -870,7 +870,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
*/
if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
- if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
+ if (ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime))
RX_STAT_INC(rx_spectral);
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 2a938f4feac5..fb11a9172f38 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -892,10 +892,21 @@
(AR_SREV_9330((_ah)) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_12))
+#ifdef CONFIG_ATH9K_PCOEM
+#define AR_SREV_9462(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
#define AR_SREV_9485(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
+#define AR_SREV_9565(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
+#else
+#define AR_SREV_9462(_ah) 0
+#define AR_SREV_9485(_ah) 0
+#define AR_SREV_9565(_ah) 0
+#endif
+
#define AR_SREV_9485_11_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485) && \
+ (AR_SREV_9485(_ah) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11))
#define AR_SREV_9485_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485))
@@ -915,34 +926,30 @@
(AR_SREV_9285_12_OR_LATER(_ah) && \
((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
-#define AR_SREV_9462(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
#define AR_SREV_9462_20(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ (AR_SREV_9462(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
#define AR_SREV_9462_21(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ (AR_SREV_9462(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_21))
#define AR_SREV_9462_20_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ (AR_SREV_9462(_ah) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
#define AR_SREV_9462_21_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ (AR_SREV_9462(_ah) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_21))
-#define AR_SREV_9565(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
#define AR_SREV_9565_10(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ (AR_SREV_9565(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
#define AR_SREV_9565_101(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ (AR_SREV_9565(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_101))
#define AR_SREV_9565_11(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ (AR_SREV_9565(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_11))
#define AR_SREV_9565_11_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ (AR_SREV_9565(_ah) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9565_11))
#define AR_SREV_9550(_ah) \
@@ -1598,6 +1605,7 @@ enum {
#define AR_RESET_TSF 0x8020
#define AR_RESET_TSF_ONCE 0x01000000
+#define AR_RESET_TSF2_ONCE 0x02000000
#define AR_MAX_CFP_DUR 0x8038
#define AR_CFP_VAL 0x0000FFFF
@@ -1716,6 +1724,8 @@ enum {
#define AR_TPC_CTS_S 8
#define AR_TPC_CHIRP 0x003f0000
#define AR_TPC_CHIRP_S 16
+#define AR_TPC_RPT 0x3f000000
+#define AR_TPC_RPT_S 24
#define AR_QUIET1 0x80fc
#define AR_QUIET1_NEXT_QUIET_S 0
@@ -1959,6 +1969,8 @@ enum {
#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000
#define AR_MAC_PCU_GEN_TIMER_TSF_SEL 0x83d8
+#define AR_DIRECT_CONNECT 0x83a0
+#define AR_DC_AP_STA_EN 0x00000001
#define AR_AES_MUTE_MASK0 0x805c
#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 40ab65e6882f..ac4781f37e78 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -99,7 +99,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
static void ath9k_tx99_deinit(struct ath_softc *sc)
{
- ath_reset(sc);
+ ath_reset(sc, NULL);
ath9k_ps_wakeup(sc);
ath9k_tx99_stop(sc);
@@ -127,7 +127,7 @@ static int ath9k_tx99_init(struct ath_softc *sc)
memset(&txctl, 0, sizeof(txctl));
txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
- ath_reset(sc);
+ ath_reset(sc, NULL);
ath9k_ps_wakeup(sc);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d6e54a3c88f6..e9bd02c2e844 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1096,6 +1096,37 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
}
}
+static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
+ u8 rateidx)
+{
+ u8 max_power;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (sc->tx99_state)
+ return MAX_RATE_POWER;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ /* ar9002 is not sipported for the moment */
+ return MAX_RATE_POWER;
+ }
+
+ if (!bf->bf_state.bfs_paprd) {
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath_frame_info *fi = get_frame_info(skb);
+
+ if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
+ max_power = min(ah->tx_power_stbc[rateidx],
+ fi->tx_power);
+ else
+ max_power = min(ah->tx_power[rateidx], fi->tx_power);
+ } else {
+ max_power = ah->paprd_training_power;
+ }
+
+ return max_power;
+}
+
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_info *info, int len, bool rts)
{
@@ -1166,6 +1197,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
is_40, is_sgi, is_sp);
if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
+
+ info->txpower[i] = ath_get_rate_txpower(sc, bf, rix);
continue;
}
@@ -1193,6 +1226,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
phy, rate->bitrate * 100, len, rix, is_sp);
+
+ info->txpower[i] = ath_get_rate_txpower(sc, bf, rix);
}
/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
@@ -1239,7 +1274,6 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
memset(&info, 0, sizeof(info));
info.is_first = true;
info.is_last = true;
- info.txpower = MAX_RATE_POWER;
info.qcu = txq->axq_qnum;
while (bf) {
@@ -2063,6 +2097,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
+ fi->tx_power = MAX_RATE_POWER;
if (!rate)
return;
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index b80b2138ce3c..dca6df13fd5b 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -994,7 +994,7 @@ static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
refsel0 = 0;
refsel1 = 1;
}
- chansel = byte_rev_table[chansel];
+ chansel = bitrev8(chansel);
} else {
if (freq == 2484) {
chansel = 10 + (freq - 2274) / 5;
@@ -1002,7 +1002,7 @@ static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
} else
chansel = 16 + (freq - 2272) / 5;
chansel *= 4;
- chansel = byte_rev_table[chansel];
+ chansel = bitrev8(chansel);
}
d1 = chansel;
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 650be79c7ac9..cfd0554cf140 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -86,7 +86,7 @@ static const struct radar_detector_specs fcc_radar_ref_types[] = {
FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
- FCC_PATTERN(4, 50, 100, 1000, 2000, 20, 1),
+ FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 20),
FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
};
@@ -105,7 +105,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
- JP_PATTERN(7, 50, 100, 1000, 2000, 20, 1),
+ JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
};
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index b71d2b33532d..7dd8873f757e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -494,7 +494,9 @@ out:
return ret;
}
-static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct wcn36xx *wcn = hw->priv;
@@ -502,7 +504,8 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
wcn36xx_smd_start_scan(wcn);
}
-static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
@@ -1075,7 +1078,6 @@ static struct platform_driver wcn36xx_driver = {
.remove = wcn36xx_remove,
.driver = {
.name = "wcn36xx",
- .owner = THIS_MODULE,
},
.id_table = wcn36xx_platform_id_table,
};
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index d9f4b30dd343..38332a6dfb3a 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -792,12 +792,13 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
}
static int wil_cfg80211_del_station(struct wiphy *wiphy,
- struct net_device *dev, const u8 *mac)
+ struct net_device *dev,
+ struct station_del_parameters *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
mutex_lock(&wil->mutex);
- wil6210_disconnect(wil, mac);
+ wil6210_disconnect(wil, params->mac, params->reason_code, false);
mutex_unlock(&wil->mutex);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index 8d99021d27a8..3249562d93b4 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -32,6 +32,23 @@ void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
va_end(args);
}
+void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ if (net_ratelimit()) {
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ netdev_err(ndev, "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
+ }
+}
+
void wil_info(struct wil6210_priv *wil, const char *fmt, ...)
{
struct net_device *ndev = wil_to_ndev(wil);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 54a6ddc6301b..4e6e14501c2f 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -573,8 +573,10 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
if (!frame)
return -ENOMEM;
- if (copy_from_user(frame, buf, len))
+ if (copy_from_user(frame, buf, len)) {
+ kfree(frame);
return -EIO;
+ }
params.buf = frame;
params.len = len;
@@ -614,8 +616,10 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
return -ENOMEM;
rc = simple_write_to_buffer(wmi, len, ppos, buf, len);
- if (rc < 0)
+ if (rc < 0) {
+ kfree(wmi);
return rc;
+ }
cmd = &wmi[1];
cmdid = le16_to_cpu(wmi->id);
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 8c6f3b041f77..93c5cc16c515 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -15,7 +15,6 @@
*/
#include <linux/firmware.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/crc32.h>
#include "wil6210.h"
#include "fw.h"
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 44cb71f5ea5b..d4acf93a9a02 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -446,7 +446,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
if (size >= sizeof(*hdr)) {
wil_err_fw(wil, "Stop at offset %ld"
" record type %d [%zd bytes]\n",
- (const void *)hdr - data,
+ (long)((const void *)hdr - data),
le16_to_cpu(hdr->type), hdr_sz);
}
return -EINVAL;
@@ -471,7 +471,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name)
size_t sz;
const void *d;
- rc = request_firmware(&fw, name, wil_to_pcie_dev(wil));
+ rc = request_firmware(&fw, name, wil_to_dev(wil));
if (rc) {
wil_err_fw(wil, "Failed to load firmware %s\n", name);
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 90f416f239bd..4bcbd6297b3e 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -36,7 +36,8 @@
*/
#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
-#define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE
+#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \
+ BIT_DMA_EP_RX_ICR_RX_HTRSH)
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
@@ -171,6 +172,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
u32 isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
@@ -182,12 +184,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
wil6210_mask_irq_rx(wil);
- if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
+ /* RX_DONE and RX_HTRSH interrupts are the same if interrupt
+ * moderation is not used. Interrupt moderation may cause RX
+ * buffer overflow while RX_DONE is delayed. The required
+ * action is always the same - should empty the accumulated
+ * packets from the RX ring.
+ */
+ if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) {
wil_dbg_irq(wil, "RX done\n");
- isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
+
+ if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)
+ wil_err_ratelimited(wil, "Received \"Rx buffer is in risk "
+ "of overflow\" interrupt\n");
+
+ isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH);
if (test_bit(wil_status_reset_done, &wil->status)) {
if (test_bit(wil_status_napi_en, &wil->status)) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ need_unmask = false;
napi_schedule(&wil->napi_rx);
} else {
wil_err(wil, "Got Rx interrupt while "
@@ -204,6 +218,10 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
/* Rx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_rx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_rx(wil);
+
return IRQ_HANDLED;
}
@@ -213,6 +231,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
u32 isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
@@ -231,6 +250,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
isr &= ~(BIT(25) - 1UL);
if (test_bit(wil_status_reset_done, &wil->status)) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ need_unmask = false;
napi_schedule(&wil->napi_tx);
} else {
wil_err(wil, "Got Tx interrupt while in reset\n");
@@ -243,6 +263,10 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
/* Tx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_tx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_tx(wil);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 6500caf8d609..8ff3fe34fe05 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -38,6 +38,65 @@ static unsigned int itr_trsh = WIL6210_ITR_TRSH_DEFAULT;
module_param(itr_trsh, uint, S_IRUGO);
MODULE_PARM_DESC(itr_trsh, " Interrupt moderation threshold, usecs.");
+/* We allow allocation of more than 1 page buffers to support large packets.
+ * It is suboptimal behavior performance wise in case MTU above page size.
+ */
+unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - ETH_HLEN;
+static int mtu_max_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ /* sets mtu_max directly. no need to restore it in case of
+ * illegal value since we assume this will fail insmod
+ */
+ ret = param_set_uint(val, kp);
+ if (ret)
+ return ret;
+
+ if (mtu_max < 68 || mtu_max > IEEE80211_MAX_DATA_LEN_DMG)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct kernel_param_ops mtu_max_ops = {
+ .set = mtu_max_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO);
+MODULE_PARM_DESC(mtu_max, " Max MTU value.");
+
+static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
+static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
+
+static int ring_order_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ uint x;
+
+ ret = kstrtouint(val, 0, &x);
+ if (ret)
+ return ret;
+
+ if ((x < WIL_RING_SIZE_ORDER_MIN) || (x > WIL_RING_SIZE_ORDER_MAX))
+ return -EINVAL;
+
+ *((uint *)kp->arg) = x;
+
+ return 0;
+}
+
+static struct kernel_param_ops ring_order_ops = {
+ .set = ring_order_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO);
+MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
+module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO);
+MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
+
#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
@@ -74,7 +133,8 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
__raw_writel(*s++, d++);
}
-static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
+static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
+ u16 reason_code, bool from_event)
{
uint i;
struct net_device *ndev = wil_to_ndev(wil);
@@ -86,7 +146,9 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
sta->data_port_open = false;
if (sta->status != wil_sta_unused) {
- wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING);
+ if (!from_event)
+ wmi_disconnect_sta(wil, sta->addr, reason_code);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
@@ -118,7 +180,8 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
memset(&sta->stats, 0, sizeof(sta->stats));
}
-static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
+static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event)
{
int cid = -ENOENT;
struct net_device *ndev = wil_to_ndev(wil);
@@ -133,10 +196,10 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
}
if (cid >= 0) /* disconnect 1 peer */
- wil_disconnect_cid(wil, cid);
+ wil_disconnect_cid(wil, cid, reason_code, from_event);
else /* disconnect all */
for (cid = 0; cid < WIL6210_MAX_CID; cid++)
- wil_disconnect_cid(wil, cid);
+ wil_disconnect_cid(wil, cid, reason_code, from_event);
/* link state */
switch (wdev->iftype) {
@@ -145,8 +208,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
wil_link_off(wil);
if (test_bit(wil_status_fwconnected, &wil->status)) {
clear_bit(wil_status_fwconnected, &wil->status);
- cfg80211_disconnected(ndev,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
+ cfg80211_disconnected(ndev, reason_code,
NULL, 0, GFP_KERNEL);
} else if (test_bit(wil_status_fwconnecting, &wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
@@ -166,7 +228,7 @@ static void wil_disconnect_worker(struct work_struct *work)
struct wil6210_priv, disconnect_worker);
mutex_lock(&wil->mutex);
- _wil6210_disconnect(wil, NULL);
+ _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false);
mutex_unlock(&wil->mutex);
}
@@ -188,6 +250,7 @@ static void wil_scan_timer_fn(ulong x)
clear_bit(wil_status_fwready, &wil->status);
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
+ wil->recovery_state = fw_recovery_pending;
schedule_work(&wil->fw_error_worker);
}
@@ -223,6 +286,11 @@ static void wil_fw_error_worker(struct work_struct *work)
wil_dbg_misc(wil, "fw error worker\n");
+ if (!netif_running(wil_to_ndev(wil))) {
+ wil_info(wil, "No recovery - interface is down\n");
+ return;
+ }
+
/* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
* passed since last recovery attempt
*/
@@ -257,9 +325,12 @@ static void wil_fw_error_worker(struct work_struct *work)
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
+ wil_info(wil, "No recovery for AP-like interface\n");
/* recovery in these modes is done by upper layers */
break;
default:
+ wil_err(wil, "No recovery - unknown interface type %d\n",
+ wdev->iftype);
break;
}
mutex_unlock(&wil->mutex);
@@ -291,7 +362,7 @@ static void wil_connect_worker(struct work_struct *work)
wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
- rc = wil_vring_init_tx(wil, ringid, WIL6210_TX_RING_SIZE, cid, 0);
+ rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0);
wil->pending_connect_cid = -1;
if (rc == 0) {
wil->sta[cid].status = wil_sta_connected;
@@ -346,12 +417,23 @@ int wil_priv_init(struct wil6210_priv *wil)
return 0;
}
-void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
+/**
+ * wil6210_disconnect - disconnect one connection
+ * @wil: driver context
+ * @bssid: peer to disconnect, NULL to disconnect all
+ * @reason_code: Reason code for the Disassociation frame
+ * @from_event: whether is invoked from FW event handler
+ *
+ * Disconnect and release associated resources. If invoked not from the
+ * FW event handler, issue WMI command(s) to trigger MAC disconnect.
+ */
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event)
{
wil_dbg_misc(wil, "%s()\n", __func__);
del_timer_sync(&wil->connect_timer);
- _wil6210_disconnect(wil, bssid);
+ _wil6210_disconnect(wil, bssid, reason_code, from_event);
}
void wil_priv_deinit(struct wil6210_priv *wil)
@@ -363,7 +445,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
mutex_lock(&wil->mutex);
- wil6210_disconnect(wil, NULL);
+ wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
mutex_unlock(&wil->mutex);
wmi_event_flush(wil);
destroy_workqueue(wil->wmi_wq_conn);
@@ -395,7 +477,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
- u32 hw_state;
+ u32 x;
u32 rev_id;
bool is_sparrow = (wil->board->board == WIL_BOARD_SPARROW);
@@ -410,9 +492,25 @@ static int wil_target_reset(struct wil6210_priv *wil)
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
wil_halt_cpu(wil);
- C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); /* 40 MHz */
+
+ /* Clear Fw Download notification */
+ C(RGF_USER_USAGE_6, BIT(0));
if (is_sparrow) {
+ S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+ /* XTAL stabilization should take about 3ms */
+ usleep_range(5000, 7000);
+ x = R(RGF_CAF_PLL_LOCK_STATUS);
+ if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
+ wil_err(wil, "Xtal stabilization timeout\n"
+ "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
+ return -ETIME;
+ }
+ /* switch 10k to XTAL*/
+ C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+ /* 40 MHz */
+ C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
}
@@ -453,13 +551,13 @@ static int wil_target_reset(struct wil6210_priv *wil)
/* wait until device ready. typical time is 200..250 msec */
do {
msleep(RST_DELAY);
- hw_state = R(RGF_USER_HW_MACHINE_STATE);
+ x = R(RGF_USER_HW_MACHINE_STATE);
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
- hw_state);
+ x);
return -ETIME;
}
- } while (hw_state != HW_MACHINE_BOOT_DONE);
+ } while (x != HW_MACHINE_BOOT_DONE);
/* TODO: Erez check rev_id != 1 */
if (!is_sparrow && (rev_id != 1))
@@ -535,7 +633,7 @@ int wil_reset(struct wil6210_priv *wil)
WARN_ON(test_bit(wil_status_napi_en, &wil->status));
cancel_work_sync(&wil->disconnect_worker);
- wil6210_disconnect(wil, NULL);
+ wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil->status = 0; /* prevent NAPI from being scheduled */
@@ -640,7 +738,7 @@ int __wil_up(struct wil6210_priv *wil)
return rc;
/* Rx VRING. After MAC and beacon */
- rc = wil_rx_init(wil);
+ rc = wil_rx_init(wil, 1 << rx_ring_order);
if (rc)
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 239965106c05..e81703ca7701 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -41,7 +41,7 @@ static int wil_change_mtu(struct net_device *ndev, int new_mtu)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- if (new_mtu < 68 || new_mtu > (TX_BUF_LEN - ETH_HLEN)) {
+ if (new_mtu < 68 || new_mtu > mtu_max) {
wil_err(wil, "invalid MTU %d\n", new_mtu);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 2936ef0c18cb..e3f8bdce5abc 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -206,12 +206,10 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
- unsigned int sz = RX_BUF_LEN;
+ unsigned int sz = mtu_max + ETH_HLEN;
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d = &vring->va[i].rx;
dma_addr_t pa;
-
- /* TODO align */
struct sk_buff *skb = dev_alloc_skb(sz + headroom);
if (unlikely(!skb))
@@ -385,7 +383,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
struct vring_rx_desc *d;
struct sk_buff *skb;
dma_addr_t pa;
- unsigned int sz = RX_BUF_LEN;
+ unsigned int sz = mtu_max + ETH_HLEN;
u16 dmalen;
u8 ftype;
u8 ds_bits;
@@ -596,7 +594,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
wil_rx_refill(wil, v->size);
}
-int wil_rx_init(struct wil6210_priv *wil)
+int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
struct vring *vring = &wil->vring_rx;
int rc;
@@ -608,7 +606,7 @@ int wil_rx_init(struct wil6210_priv *wil)
return -EINVAL;
}
- vring->size = WIL6210_RX_RING_SIZE;
+ vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
return rc;
@@ -646,7 +644,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
.action = cpu_to_le32(WMI_VRING_CMD_ADD),
.vring_cfg = {
.tx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
+ .max_mpdu_size =
+ cpu_to_le16(mtu_max + ETH_HLEN),
.ring_size = cpu_to_le16(size),
},
.ringid = id,
@@ -927,8 +926,9 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "%s()\n", __func__);
if (avail < 1 + nr_frags) {
- wil_err(wil, "Tx ring full. No space for %d fragments\n",
- 1 + nr_frags);
+ wil_err_ratelimited(wil,
+ "Tx ring full. No space for %d fragments\n",
+ 1 + nr_frags);
return -ENOMEM;
}
_d = &vring->va[i].tx;
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index de046716d2b7..630aeb5fa7f4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -21,8 +21,8 @@
#define BUF_HW_OWNED (0)
/* size of max. Tx/Rx buffers, as supported by FW */
-#define RX_BUF_LEN (2242)
-#define TX_BUF_LEN (2242)
+#define TXRX_BUF_LEN_DEFAULT (2242)
+
/* how many bytes to reserve for rtap header? */
#define WIL6210_RTAP_SIZE (128)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index ce6488e42091..c6ec5b99ac7d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -24,6 +24,7 @@
#include "wil_platform.h"
extern bool no_fw_recovery;
+extern unsigned int mtu_max;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME "wil6210.fw"
@@ -48,8 +49,11 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
-#define WIL6210_RX_RING_SIZE (128)
-#define WIL6210_TX_RING_SIZE (512)
+#define WIL_RX_RING_SIZE_ORDER_DEFAULT (9)
+#define WIL_TX_RING_SIZE_ORDER_DEFAULT (9)
+/* limit ring size in range [32..32k] */
+#define WIL_RING_SIZE_ORDER_MIN (5)
+#define WIL_RING_SIZE_ORDER_MAX (15)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
@@ -117,12 +121,15 @@ struct RGF_ICR {
#define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0 (0x880c18)
#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c)
+#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
+ #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
#define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
#define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */
#define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */
#define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0)
+ #define BIT_DMA_EP_RX_ICR_RX_HTRSH BIT(1)
#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
#define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
#define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
@@ -152,6 +159,10 @@ struct RGF_ICR {
#define RGF_MAC_MTRL_COUNTER_0 (0x886aa8)
#define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */
+#define RGF_CAF_OSC_CONTROL (0x88afa4)
+ #define BIT_CAF_OSC_XTAL_EN BIT(0)
+#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
+ #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
/* popular locations */
#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
@@ -461,10 +472,14 @@ struct wil6210_priv {
#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
-#define wil_to_pcie_dev(i) (&i->pdev->dev)
+__printf(2, 3)
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
void wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg(wil, fmt, arg...) do { \
netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
@@ -575,9 +590,10 @@ void wil_wdev_free(struct wil6210_priv *wil);
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
int wmi_pcp_stop(struct wil6210_priv *wil);
-void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid);
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event);
-int wil_rx_init(struct wil6210_priv *wil);
+int wil_rx_init(struct wil6210_priv *wil, u16 size);
void wil_rx_fini(struct wil6210_priv *wil);
/* TX API */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 4311df982c60..63476c86cd0e 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -478,15 +478,15 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
void *d, int len)
{
struct wmi_disconnect_event *evt = d;
+ u16 reason_code = le16_to_cpu(evt->protocol_reason_status);
- wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
- evt->bssid,
- evt->protocol_reason_status, evt->disconnect_reason);
+ wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+ evt->bssid, reason_code, evt->disconnect_reason);
wil->sinfo_gen++;
mutex_lock(&wil->mutex);
- wil6210_disconnect(wil, evt->bssid);
+ wil6210_disconnect(wil, evt->bssid, reason_code, true);
mutex_unlock(&wil->mutex);
}
@@ -1025,7 +1025,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
struct wmi_cfg_rx_chain_cmd cmd = {
.action = WMI_RX_CHAIN_ADD,
.rx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ .max_mpdu_size = cpu_to_le16(mtu_max + ETH_HLEN),
.ring_mem_base = cpu_to_le64(vring->pa),
.ring_size = cpu_to_le16(vring->size),
},
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5d4173ee55bc..47731cb0d815 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5110,7 +5110,9 @@ static void b43_op_sta_notify(struct ieee80211_hw *hw,
B43_WARN_ON(!vif || wl->vif != vif);
}
-static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw)
+static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -5124,7 +5126,8 @@ static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw)
mutex_unlock(&wl->mutex);
}
-static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw)
+static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 90a977fe9a64..dc4c75083085 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -23,15 +23,15 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
- wl_cfg80211.o \
+ cfg80211.o \
chip.o \
fwil.o \
fweh.o \
fwsignal.o \
p2p.o \
proto.o \
- dhd_common.o \
- dhd_linux.o \
+ common.o \
+ core.o \
firmware.o \
feature.o \
btcoex.o \
@@ -43,14 +43,14 @@ brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
flowring.o \
msgbuf.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
- dhd_sdio.o \
+ sdio.o \
bcmsdh.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMFMAC_PCIE) += \
pcie.o
brcmfmac-$(CONFIG_BRCMDBG) += \
- dhd_dbg.o
+ debug.o
brcmfmac-$(CONFIG_BRCM_TRACING) += \
tracepoint.o
brcmfmac-$(CONFIG_OF) += \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
index a159ff3427de..8e0e91c4a0b1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
@@ -25,10 +25,10 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_bus.h"
+#include "core.h"
+#include "bus.h"
#include "fwsignal.h"
-#include "dhd_dbg.h"
+#include "debug.h"
#include "tracepoint.h"
#include "proto.h"
#include "bcdc.h"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 8dbd5dbb78fd..3c06e9365949 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -41,9 +41,9 @@
#include <chipcommon.h>
#include <soc.h>
#include "chip.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
-#include "sdio_host.h"
+#include "bus.h"
+#include "debug.h"
+#include "sdio.h"
#include "of.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
@@ -1064,6 +1064,16 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
if (!sdiodev->pdata)
brcmf_of_probe(sdiodev);
+#ifdef CONFIG_PM_SLEEP
+ /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
+ * is true or when platform data OOB irq is true).
+ */
+ if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
+ ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
+ (sdiodev->pdata->oob_irq_supported)))
+ bus_if->wowl_supported = true;
+#endif
+
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_word_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
@@ -1116,34 +1126,39 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(SDIO, "Exit\n");
}
+void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+ brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
+ sdiodev->wowl_enabled = enabled;
+}
+
#ifdef CONFIG_PM_SLEEP
static int brcmf_ops_sdio_suspend(struct device *dev)
{
- mmc_pm_flag_t sdio_flags;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- int ret = 0;
+ mmc_pm_flag_t sdio_flags;
brcmf_dbg(SDIO, "Enter\n");
- sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
- if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- brcmf_err("Host can't keep power while suspended\n");
- return -EINVAL;
- }
-
atomic_set(&sdiodev->suspend, true);
- ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
- if (ret) {
- brcmf_err("Failed to set pm_flags\n");
- atomic_set(&sdiodev->suspend, false);
- return ret;
+ if (sdiodev->wowl_enabled) {
+ sdio_flags = MMC_PM_KEEP_POWER;
+ if (sdiodev->pdata->oob_irq_supported)
+ enable_irq_wake(sdiodev->pdata->oob_irq_nr);
+ else
+ sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
+ if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+ brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
}
brcmf_sdio_wd_timer(sdiodev->bus, 0);
- return ret;
+ return 0;
}
static int brcmf_ops_sdio_resume(struct device *dev)
@@ -1152,6 +1167,8 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(SDIO, "Enter\n");
+ if (sdiodev->pdata->oob_irq_supported)
+ disable_irq_wake(sdiodev->pdata->oob_irq_nr);
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false);
return 0;
@@ -1204,7 +1221,6 @@ static struct platform_driver brcmf_sdio_pd = {
.remove = brcmf_sdio_pd_remove,
.driver = {
.name = BRCMFMAC_SDIO_PDATA_NAME,
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
index a29ac4977b3a..0445163991b7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
@@ -20,13 +20,13 @@
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include <defs.h>
-#include <dhd.h>
-#include <dhd_dbg.h>
+#include "core.h"
+#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
#include "btcoex.h"
#include "p2p.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
/* T1 start SCO/eSCO priority suppression */
#define BRCMF_BTCOEX_OPPR_WIN_TIME 2000
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
index 80e73a1262be..ef344e47218a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
@@ -14,10 +14,10 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _BRCMF_BUS_H_
-#define _BRCMF_BUS_H_
+#ifndef BRCMFMAC_BUS_H
+#define BRCMFMAC_BUS_H
-#include "dhd_dbg.h"
+#include "debug.h"
/* IDs of the 6 default common rings of msgbuf protocol */
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT 0
@@ -227,8 +227,7 @@ void brcmf_txflowblock(struct device *dev, bool state);
void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
int brcmf_bus_start(struct device *dev);
-s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data,
- u32 len);
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len);
void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
@@ -241,4 +240,4 @@ void brcmf_usb_exit(void);
void brcmf_usb_register(void);
#endif
-#endif /* _BRCMF_BUS_H_ */
+#endif /* BRCMFMAC_BUS_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 39b45c038a93..3aecc5f48719 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -26,18 +26,18 @@
#include <brcmu_utils.h>
#include <defs.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "debug.h"
#include "tracepoint.h"
#include "fwil_types.h"
#include "p2p.h"
#include "btcoex.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
#include "feature.h"
#include "fwil.h"
#include "proto.h"
#include "vendor.h"
-#include "dhd_bus.h"
+#include "bus.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
#define BRCMF_PNO_VERSION 2
@@ -520,6 +520,95 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
ADDR_INDIRECT);
}
+static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
+{
+ struct brcmf_mbss_ssid_le mbss_ssid_le;
+ int bsscfgidx;
+ int err;
+
+ memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
+ bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr);
+ if (bsscfgidx < 0)
+ return bsscfgidx;
+
+ mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx);
+ mbss_ssid_le.SSID_len = cpu_to_le32(5);
+ sprintf(mbss_ssid_le.SSID, "ssid%d" , bsscfgidx);
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le,
+ sizeof(mbss_ssid_le));
+ if (err < 0)
+ brcmf_err("setting ssid failed %d\n", err);
+
+ return err;
+}
+
+/**
+ * brcmf_ap_add_vif() - create a new AP virtual interface for multiple BSS
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ * @flags: not used.
+ * @params: contains mac address for AP device.
+ */
+static
+struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
+ u32 *flags, struct vif_params *params)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_cfg80211_vif *vif;
+ int err;
+
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ return ERR_PTR(-EBUSY);
+
+ brcmf_dbg(INFO, "Adding vif \"%s\"\n", name);
+
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false);
+ if (IS_ERR(vif))
+ return (struct wireless_dev *)vif;
+
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+
+ err = brcmf_cfg80211_request_ap_if(ifp);
+ if (err) {
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ goto fail;
+ }
+
+ /* wait for firmware event */
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
+ msecs_to_jiffies(1500));
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ if (!err) {
+ brcmf_err("timeout occurred\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ /* interface created in firmware */
+ ifp = vif->ifp;
+ if (!ifp) {
+ brcmf_err("no if pointer provided\n");
+ err = -ENOENT;
+ goto fail;
+ }
+
+ strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+ err = brcmf_net_attach(ifp, true);
+ if (err) {
+ brcmf_err("Registering netdevice failed\n");
+ goto fail;
+ }
+
+ return &ifp->vif->wdev;
+
+fail:
+ brcmf_free_vif(vif);
+ return ERR_PTR(err);
+}
+
static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
{
enum nl80211_iftype iftype;
@@ -545,12 +634,16 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
switch (type) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return ERR_PTR(-EOPNOTSUPP);
+ case NL80211_IFTYPE_AP:
+ wdev = brcmf_ap_add_vif(wiphy, name, flags, params);
+ if (!IS_ERR(wdev))
+ brcmf_cfg80211_update_proto_addr_mode(wdev);
+ return wdev;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
@@ -1815,6 +1908,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
return -EIO;
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
@@ -2785,6 +2879,44 @@ static __always_inline void brcmf_delay(u32 ms)
}
}
+static s32 brcmf_config_wowl_pattern(struct brcmf_if *ifp, u8 cmd[4],
+ u8 *pattern, u32 patternsize, u8 *mask,
+ u32 packet_offset)
+{
+ struct brcmf_fil_wowl_pattern_le *filter;
+ u32 masksize;
+ u32 patternoffset;
+ u8 *buf;
+ u32 bufsize;
+ s32 ret;
+
+ masksize = (patternsize + 7) / 8;
+ patternoffset = sizeof(*filter) - sizeof(filter->cmd) + masksize;
+
+ bufsize = sizeof(*filter) + patternsize + masksize;
+ buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ filter = (struct brcmf_fil_wowl_pattern_le *)buf;
+
+ memcpy(filter->cmd, cmd, 4);
+ filter->masksize = cpu_to_le32(masksize);
+ filter->offset = cpu_to_le32(packet_offset);
+ filter->patternoffset = cpu_to_le32(patternoffset);
+ filter->patternsize = cpu_to_le32(patternsize);
+ filter->type = cpu_to_le32(BRCMF_WOWL_PATTERN_TYPE_BITMAP);
+
+ if ((mask) && (masksize))
+ memcpy(buf + sizeof(*filter), mask, masksize);
+ if ((pattern) && (patternsize))
+ memcpy(buf + sizeof(*filter) + masksize, pattern, patternsize);
+
+ ret = brcmf_fil_iovar_data_set(ifp, "wowl_pattern", buf, bufsize);
+
+ kfree(buf);
+ return ret;
+}
+
static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
@@ -2794,10 +2926,11 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
brcmf_dbg(TRACE, "Enter\n");
if (cfg->wowl_enabled) {
+ brcmf_configure_arp_offload(ifp, true);
brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM,
cfg->pre_wowl_pmmode);
- brcmf_fil_iovar_data_set(ifp, "wowl_pattern", "clr", 4);
brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0);
+ brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0);
cfg->wowl_enabled = false;
}
return 0;
@@ -2808,21 +2941,29 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
struct cfg80211_wowlan *wowl)
{
u32 wowl_config;
+ u32 i;
brcmf_dbg(TRACE, "Suspend, wowl config.\n");
+ brcmf_configure_arp_offload(ifp, false);
brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->pre_wowl_pmmode);
brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX);
wowl_config = 0;
if (wowl->disconnect)
- wowl_config |= WL_WOWL_DIS | WL_WOWL_BCN | WL_WOWL_RETR;
- /* Note: if "wowl" target and not "wowlpf" then wowl_bcn_loss
- * should be configured. This paramater is not supported by
- * wowlpf.
- */
+ wowl_config = BRCMF_WOWL_DIS | BRCMF_WOWL_BCN | BRCMF_WOWL_RETR;
if (wowl->magic_pkt)
- wowl_config |= WL_WOWL_MAGIC;
+ wowl_config |= BRCMF_WOWL_MAGIC;
+ if ((wowl->patterns) && (wowl->n_patterns)) {
+ wowl_config |= BRCMF_WOWL_NET;
+ for (i = 0; i < wowl->n_patterns; i++) {
+ brcmf_config_wowl_pattern(ifp, "add",
+ (u8 *)wowl->patterns[i].pattern,
+ wowl->patterns[i].pattern_len,
+ (u8 *)wowl->patterns[i].mask,
+ wowl->patterns[i].pkt_offset);
+ }
+ }
brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
brcmf_bus_wowl_config(cfg->pub->bus_if, true);
@@ -2885,7 +3026,7 @@ brcmf_update_pmklist(struct net_device *ndev,
struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
{
int i, j;
- int pmkid_len;
+ u32 pmkid_len;
pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
@@ -2913,8 +3054,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_if *ifp = netdev_priv(ndev);
struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
s32 err = 0;
- int i;
- int pmkid_len;
+ u32 pmkid_len, i;
brcmf_dbg(TRACE, "Enter\n");
if (!check_vif_up(ifp->vif))
@@ -2953,7 +3093,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_if *ifp = netdev_priv(ndev);
struct pmkid_list pmkid;
s32 err = 0;
- int i, pmkid_len;
+ u32 pmkid_len, i;
brcmf_dbg(TRACE, "Enter\n");
if (!check_vif_up(ifp->vif))
@@ -3314,11 +3454,10 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
}
static s32
-brcmf_configure_wpaie(struct net_device *ndev,
+brcmf_configure_wpaie(struct brcmf_if *ifp,
const struct brcmf_vs_tlv *wpa_ie,
bool is_rsn_ie)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
u16 count;
s32 err = 0;
@@ -3793,6 +3932,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
u16 chanspec;
+ bool mbss;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
@@ -3803,6 +3943,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
settings->inactivity_timeout);
dev_role = ifp->vif->wdev.iftype;
+ mbss = ifp->vif->mbss;
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
@@ -3822,8 +3963,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
}
- brcmf_set_mpc(ifp, 0);
- brcmf_configure_arp_offload(ifp, false);
+ if (!mbss) {
+ brcmf_set_mpc(ifp, 0);
+ brcmf_configure_arp_offload(ifp, false);
+ }
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
@@ -3837,13 +3980,16 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(TRACE, "WPA(2) IE is found\n");
if (wpa_ie != NULL) {
/* WPA IE */
- err = brcmf_configure_wpaie(ndev, wpa_ie, false);
+ err = brcmf_configure_wpaie(ifp, wpa_ie, false);
if (err < 0)
goto exit;
} else {
+ struct brcmf_vs_tlv *tmp_ie;
+
+ tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
+
/* RSN IE */
- err = brcmf_configure_wpaie(ndev,
- (struct brcmf_vs_tlv *)rsn_ie, true);
+ err = brcmf_configure_wpaie(ifp, tmp_ie, true);
if (err < 0)
goto exit;
}
@@ -3854,45 +4000,53 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
- chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
- err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
- if (err < 0) {
- brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
- goto exit;
- }
-
- if (settings->beacon_interval) {
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
- settings->beacon_interval);
+ if (!mbss) {
+ chanspec = chandef_to_chanspec(&cfg->d11inf,
+ &settings->chandef);
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
if (err < 0) {
- brcmf_err("Beacon Interval Set Error, %d\n", err);
+ brcmf_err("Set Channel failed: chspec=%d, %d\n",
+ chanspec, err);
goto exit;
}
- }
- if (settings->dtim_period) {
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
- settings->dtim_period);
- if (err < 0) {
- brcmf_err("DTIM Interval Set Error, %d\n", err);
- goto exit;
+
+ if (settings->beacon_interval) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
+ settings->beacon_interval);
+ if (err < 0) {
+ brcmf_err("Beacon Interval Set Error, %d\n",
+ err);
+ goto exit;
+ }
+ }
+ if (settings->dtim_period) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
+ settings->dtim_period);
+ if (err < 0) {
+ brcmf_err("DTIM Interval Set Error, %d\n", err);
+ goto exit;
+ }
}
- }
- if (dev_role == NL80211_IFTYPE_AP) {
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ if (dev_role == NL80211_IFTYPE_AP) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ goto exit;
+ }
+ brcmf_fil_iovar_int_set(ifp, "apsta", 0);
+ }
+
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
- brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
- brcmf_fil_iovar_int_set(ifp, "apsta", 0);
- }
-
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
- if (err < 0) {
- brcmf_err("SET INFRA error %d\n", err);
- goto exit;
}
if (dev_role == NL80211_IFTYPE_AP) {
+ if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
+ brcmf_fil_iovar_int_set(ifp, "mbss", 1);
+
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
if (err < 0) {
brcmf_err("setting AP mode failed %d\n", err);
@@ -3937,7 +4091,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
exit:
- if (err) {
+ if ((err) && (!mbss)) {
brcmf_set_mpc(ifp, 1);
brcmf_configure_arp_offload(ifp, true);
}
@@ -3958,20 +4112,31 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* first to make sure they get processed by fw. */
msleep(400);
+ if (ifp->vif->mbss) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ return err;
+ }
+
memset(&join_params, 0, sizeof(join_params));
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, sizeof(join_params));
if (err < 0)
brcmf_err("SET SSID error (%d)\n", err);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0)
- brcmf_err("BRCMF_C_UP error %d\n", err);
+ brcmf_err("BRCMF_C_DOWN error %d\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
if (err < 0)
brcmf_err("setting AP mode failed %d\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
if (err < 0)
brcmf_err("setting INFRA mode failed %d\n", err);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
+ brcmf_fil_iovar_int_set(ifp, "mbss", 0);
+ /* Bring device back up so it can be used again */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+ if (err < 0)
+ brcmf_err("BRCMF_C_UP error %d\n", err);
} else {
bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
bss_enable.enable = cpu_to_le32(0);
@@ -4004,24 +4169,24 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
static int
brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
- const u8 *mac)
+ struct station_del_parameters *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_scb_val_le scbval;
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
- if (!mac)
+ if (!params->mac)
return -EFAULT;
- brcmf_dbg(TRACE, "Enter %pM\n", mac);
+ brcmf_dbg(TRACE, "Enter %pM\n", params->mac);
if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
if (!check_vif_up(ifp->vif))
return -EIO;
- memcpy(&scbval.ea, mac, ETH_ALEN);
+ memcpy(&scbval.ea, params->mac, ETH_ALEN);
scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scbval));
@@ -4323,7 +4488,9 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
enum nl80211_iftype type,
bool pm_block)
{
+ struct brcmf_cfg80211_vif *vif_walk;
struct brcmf_cfg80211_vif *vif;
+ bool mbss;
brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
sizeof(*vif));
@@ -4339,6 +4506,17 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
brcmf_init_prof(&vif->profile);
+ if (type == NL80211_IFTYPE_AP) {
+ mbss = false;
+ list_for_each_entry(vif_walk, &cfg->vif_list, list) {
+ if (vif_walk->wdev.iftype == NL80211_IFTYPE_AP) {
+ mbss = true;
+ break;
+ }
+ }
+ vif->mbss = mbss;
+ }
+
list_add_tail(&vif->list, &cfg->vif_list);
return vif;
}
@@ -4581,6 +4759,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
@@ -4591,6 +4770,8 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
ndev != cfg_to_ndev(cfg)) {
brcmf_dbg(CONN, "AP mode link down\n");
complete(&cfg->vif_disabled);
+ if (ifp->vif->mbss)
+ brcmf_remove_interface(ifp->drvr, ifp->bssidx);
return 0;
}
@@ -5382,7 +5563,28 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy)
return 0;
}
-static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
+static const struct ieee80211_iface_limit brcmf_iface_limits_mbss[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC)
+ },
+ {
+ .max = 4,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ }
+};
+
+static const struct ieee80211_iface_limit brcmf_iface_limits_sbss[] = {
{
.max = 2,
.types = BIT(NL80211_IFTYPE_STATION) |
@@ -5403,8 +5605,8 @@ static struct ieee80211_iface_combination brcmf_iface_combos[] = {
{
.max_interfaces = BRCMF_IFACE_MAX_CNT,
.num_different_channels = 1,
- .n_limits = ARRAY_SIZE(brcmf_iface_limits),
- .limits = brcmf_iface_limits
+ .n_limits = ARRAY_SIZE(brcmf_iface_limits_sbss),
+ .limits = brcmf_iface_limits_sbss,
}
};
@@ -5446,10 +5648,13 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
}
-
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support brcmf_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = BRCMF_WOWL_MAXPATTERNS,
+ .pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE,
+ .pattern_min_len = 1,
+ .max_pkt_offset = 1500,
};
#endif
@@ -5477,6 +5682,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
ifc_combo = brcmf_iface_combos[0];
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
ifc_combo.num_different_channels = 2;
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
+ ifc_combo.n_limits = ARRAY_SIZE(brcmf_iface_limits_mbss),
+ ifc_combo.limits = brcmf_iface_limits_mbss;
+ }
wiphy->iface_combinations = kmemdup(&ifc_combo,
sizeof(ifc_combo),
GFP_KERNEL);
@@ -5613,7 +5822,8 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp)
return wdev->iftype;
}
-bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, unsigned long state)
+bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
+ unsigned long state)
{
struct brcmf_cfg80211_vif *vif;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
index 6abf94e41d3d..9e98b8d52757 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
@@ -14,8 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wl_cfg80211_h_
-#define _wl_cfg80211_h_
+#ifndef BRCMFMAC_CFG80211_H
+#define BRCMFMAC_CFG80211_H
/* for brcmu_d11inf */
#include <brcmu_d11.h>
@@ -183,6 +183,7 @@ struct vif_saved_ie {
* @pm_block: power-management blocked.
* @list: linked list.
* @mgmt_rx_reg: registered rx mgmt frame types.
+ * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
*/
struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
@@ -194,6 +195,7 @@ struct brcmf_cfg80211_vif {
struct vif_saved_ie saved_ie;
struct list_head list;
u16 mgmt_rx_reg;
+ bool mbss;
};
/* association inform */
@@ -480,7 +482,8 @@ const struct brcmf_tlv *
brcmf_parse_tlvs(const void *buf, int buflen, uint key);
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch);
-bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, unsigned long state);
+bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
+ unsigned long state);
void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_vif *vif);
bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
@@ -493,4 +496,4 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
-#endif /* _wl_cfg80211_h_ */
+#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index 95efde868db8..ddae0b5e56ec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -25,7 +25,7 @@
#include <brcm_hw_ids.h>
#include <brcmu_utils.h>
#include <chipcommon.h>
-#include "dhd_dbg.h"
+#include "debug.h"
#include "chip.h"
/* SOC Interconnect types (aka chip types) */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.c b/drivers/net/wireless/brcm80211/brcmfmac/common.c
new file mode 100644
index 000000000000..1861a13e8d03
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
+#include "fwil.h"
+#include "fwil_types.h"
+#include "tracepoint.h"
+
+#define BRCMF_DEFAULT_BCN_TIMEOUT 3
+#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
+#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
+
+/* boost value for RSSI_DELTA in preferred join selection */
+#define BRCMF_JOIN_PREF_RSSI_BOOST 8
+
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+{
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ u8 buf[BRCMF_DCMD_SMLEN];
+ struct brcmf_join_pref_params join_pref_params[2];
+ char *ptr;
+ s32 err;
+
+ /* retreive mac address */
+ err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
+ sizeof(ifp->mac_addr));
+ if (err < 0) {
+ brcmf_err("Retreiving cur_etheraddr failed, %d\n",
+ err);
+ goto done;
+ }
+ memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ strcpy(buf, "ver");
+ err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
+ if (err < 0) {
+ brcmf_err("Retreiving version information failed, %d\n",
+ err);
+ goto done;
+ }
+ ptr = (char *)buf;
+ strsep(&ptr, "\n");
+
+ /* Print fw version info */
+ brcmf_err("Firmware version = %s\n", buf);
+
+ /* locate firmware version number for ethtool */
+ ptr = strrchr(buf, ' ') + 1;
+ strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+
+ /* set mpc */
+ err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
+ if (err) {
+ brcmf_err("failed setting mpc\n");
+ goto done;
+ }
+
+ /*
+ * Setup timeout if Beacons are lost and roam is off to report
+ * link down
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
+ BRCMF_DEFAULT_BCN_TIMEOUT);
+ if (err) {
+ brcmf_err("bcn_timeout error (%d)\n", err);
+ goto done;
+ }
+
+ /* Enable/Disable build-in roaming to allowed ext supplicant to take
+ * of romaing
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+ if (err) {
+ brcmf_err("roam_off error (%d)\n", err);
+ goto done;
+ }
+
+ /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
+ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+ join_pref_params[0].len = 2;
+ join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+ join_pref_params[0].band = WLC_BAND_5G;
+ join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+ join_pref_params[1].len = 2;
+ join_pref_params[1].rssi_gain = 0;
+ join_pref_params[1].band = 0;
+ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+ sizeof(join_pref_params));
+ if (err)
+ brcmf_err("Set join_pref error (%d)\n", err);
+
+ /* Setup event_msgs, enable E_IF */
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ if (err) {
+ brcmf_err("Get event_msgs error (%d)\n", err);
+ goto done;
+ }
+ setbit(eventmask, BRCMF_E_IF);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ if (err) {
+ brcmf_err("Set event_msgs error (%d)\n", err);
+ goto done;
+ }
+
+ /* Setup default scan channel time */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
+ if (err) {
+ brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
+ err);
+ goto done;
+ }
+
+ /* Setup default scan unassoc time */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
+ if (err) {
+ brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
+ err);
+ goto done;
+ }
+
+ /* do bus specific preinit here */
+ err = brcmf_bus_preinit(ifp->drvr->bus_if);
+done:
+ return err;
+}
+
+#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
+void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ if (brcmf_msg_level & level)
+ pr_debug("%s %pV", func, &vaf);
+ trace_brcmf_dbg(level, func, &vaf);
+ va_end(args);
+}
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
index c6d65b8e1e15..77656c711bed 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
@@ -19,7 +19,7 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
+#include "core.h"
#include "commonring.h"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index fb1043908a23..effe6d7831d9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -22,12 +22,12 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
#include "fwil_types.h"
#include "p2p.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
#include "fwil.h"
#include "fwsignal.h"
#include "feature.h"
@@ -836,7 +836,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
return ifp;
}
-void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
+static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
{
struct brcmf_if *ifp;
@@ -869,6 +869,38 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
}
}
+void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx)
+{
+ if (drvr->iflist[bssidx]) {
+ brcmf_fws_del_interface(drvr->iflist[bssidx]);
+ brcmf_del_if(drvr, bssidx);
+ }
+}
+
+int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
+{
+ int ifidx;
+ int bsscfgidx;
+ bool available;
+ int highest;
+
+ available = false;
+ bsscfgidx = 2;
+ highest = 2;
+ for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
+ if (drvr->iflist[ifidx]) {
+ if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
+ bsscfgidx = highest + 1;
+ else if (drvr->iflist[ifidx]->bssidx > highest)
+ highest = drvr->iflist[ifidx]->bssidx;
+ } else {
+ available = true;
+ }
+ }
+
+ return available ? bsscfgidx : -ENOMEM;
+}
+
int brcmf_attach(struct device *dev)
{
struct brcmf_pub *drvr = NULL;
@@ -1033,10 +1065,7 @@ void brcmf_detach(struct device *dev)
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
- if (drvr->iflist[i]) {
- brcmf_fws_del_interface(drvr->iflist[i]);
- brcmf_del_if(drvr, i);
- }
+ brcmf_remove_interface(drvr, i);
brcmf_cfg80211_detach(drvr->config);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index 5e4317dbc2b0..23f74b139cc8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -18,8 +18,8 @@
* Common types *
*/
-#ifndef _BRCMF_H_
-#define _BRCMF_H_
+#ifndef BRCMFMAC_CORE_H
+#define BRCMFMAC_CORE_H
#include "fweh.h"
@@ -83,7 +83,6 @@ struct brcmf_pub {
/* Internal brcmf items */
uint hdrlen; /* Total BRCMF header length (proto + bus) */
uint rxsz; /* Rx buffer size bus module should use */
- u8 wme_dp; /* wme discard priority */
/* Dongle media info */
char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
@@ -176,7 +175,8 @@ char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
char *name, u8 *mac_addr);
-void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx);
+int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
@@ -186,4 +186,4 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
-#endif /* _BRCMF_H_ */
+#endif /* BRCMFMAC_CORE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
index be9f4f829192..9b473d50b005 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
@@ -19,9 +19,9 @@
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
static struct dentry *root_folder;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
index dec40d316c82..eb0b8c47479d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
@@ -14,8 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _BRCMF_DBG_H_
-#define _BRCMF_DBG_H_
+#ifndef BRCMFMAC_DEBUG_H
+#define BRCMFMAC_DEBUG_H
/* message levels */
#define BRCMF_TRACE_VAL 0x00000002
@@ -133,4 +133,4 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
}
#endif
-#endif /* _BRCMF_DBG_H_ */
+#endif /* BRCMFMAC_DEBUG_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
deleted file mode 100644
index d991f8e3d9ec..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
-#include "fwil.h"
-#include "fwil_types.h"
-#include "tracepoint.h"
-
-#define PKTFILTER_BUF_SIZE 128
-#define BRCMF_DEFAULT_BCN_TIMEOUT 3
-#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
-#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
-#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
-
-/* boost value for RSSI_DELTA in preferred join selection */
-#define BRCMF_JOIN_PREF_RSSI_BOOST 8
-
-
-bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
- struct sk_buff *pkt, int prec)
-{
- struct sk_buff *p;
- int eprec = -1; /* precedence to evict from */
- bool discard_oldest;
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_pub *drvr = bus_if->drvr;
-
- /* Fast case, precedence queue is not full and we are also not
- * exceeding total queue length
- */
- if (!pktq_pfull(q, prec) && !pktq_full(q)) {
- brcmu_pktq_penq(q, prec, pkt);
- return true;
- }
-
- /* Determine precedence from which to evict packet, if any */
- if (pktq_pfull(q, prec))
- eprec = prec;
- else if (pktq_full(q)) {
- p = brcmu_pktq_peek_tail(q, &eprec);
- if (eprec > prec)
- return false;
- }
-
- /* Evict if needed */
- if (eprec >= 0) {
- /* Detect queueing to unconfigured precedence */
- discard_oldest = ac_bitmap_tst(drvr->wme_dp, eprec);
- if (eprec == prec && !discard_oldest)
- return false; /* refuse newer (incoming) packet */
- /* Evict packet according to discard policy */
- p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
- brcmu_pktq_pdeq_tail(q, eprec);
- if (p == NULL)
- brcmf_err("brcmu_pktq_penq() failed, oldest %d\n",
- discard_oldest);
-
- brcmu_pkt_buf_free_skb(p);
- }
-
- /* Enqueue */
- p = brcmu_pktq_penq(q, prec, pkt);
- if (p == NULL)
- brcmf_err("brcmu_pktq_penq() failed\n");
-
- return p != NULL;
-}
-
-/* Convert user's input in hex pattern to byte-size mask */
-static int brcmf_c_pattern_atoh(char *src, char *dst)
-{
- int i;
- if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
- brcmf_err("Mask invalid format. Needs to start with 0x\n");
- return -EINVAL;
- }
- src = src + 2; /* Skip past 0x */
- if (strlen(src) % 2 != 0) {
- brcmf_err("Mask invalid format. Length must be even.\n");
- return -EINVAL;
- }
- for (i = 0; *src != '\0'; i++) {
- unsigned long res;
- char num[3];
- strncpy(num, src, 2);
- num[2] = '\0';
- if (kstrtoul(num, 16, &res))
- return -EINVAL;
- dst[i] = (u8)res;
- src += 2;
- }
- return i;
-}
-
-static void
-brcmf_c_pktfilter_offload_enable(struct brcmf_if *ifp, char *arg, int enable,
- int master_mode)
-{
- unsigned long res;
- char *argv;
- char *arg_save = NULL, *arg_org = NULL;
- s32 err;
- struct brcmf_pkt_filter_enable_le enable_parm;
-
- arg_save = kstrdup(arg, GFP_ATOMIC);
- if (!arg_save)
- goto fail;
-
- arg_org = arg_save;
-
- argv = strsep(&arg_save, " ");
-
- if (argv == NULL) {
- brcmf_err("No args provided\n");
- goto fail;
- }
-
- /* Parse packet filter id. */
- enable_parm.id = 0;
- if (!kstrtoul(argv, 0, &res))
- enable_parm.id = cpu_to_le32((u32)res);
-
- /* Enable/disable the specified filter. */
- enable_parm.enable = cpu_to_le32(enable);
-
- err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_enable", &enable_parm,
- sizeof(enable_parm));
- if (err)
- brcmf_err("Set pkt_filter_enable error (%d)\n", err);
-
- /* Control the master mode */
- err = brcmf_fil_iovar_int_set(ifp, "pkt_filter_mode", master_mode);
- if (err)
- brcmf_err("Set pkt_filter_mode error (%d)\n", err);
-
-fail:
- kfree(arg_org);
-}
-
-static void brcmf_c_pktfilter_offload_set(struct brcmf_if *ifp, char *arg)
-{
- struct brcmf_pkt_filter_le *pkt_filter;
- unsigned long res;
- int buf_len;
- s32 err;
- u32 mask_size;
- u32 pattern_size;
- char *argv[8], *buf = NULL;
- int i = 0;
- char *arg_save = NULL, *arg_org = NULL;
-
- arg_save = kstrdup(arg, GFP_ATOMIC);
- if (!arg_save)
- goto fail;
-
- arg_org = arg_save;
-
- buf = kmalloc(PKTFILTER_BUF_SIZE, GFP_ATOMIC);
- if (!buf)
- goto fail;
-
- argv[i] = strsep(&arg_save, " ");
- while (argv[i]) {
- i++;
- if (i >= 8) {
- brcmf_err("Too many parameters\n");
- goto fail;
- }
- argv[i] = strsep(&arg_save, " ");
- }
-
- if (i != 6) {
- brcmf_err("Not enough args provided %d\n", i);
- goto fail;
- }
-
- pkt_filter = (struct brcmf_pkt_filter_le *)buf;
-
- /* Parse packet filter id. */
- pkt_filter->id = 0;
- if (!kstrtoul(argv[0], 0, &res))
- pkt_filter->id = cpu_to_le32((u32)res);
-
- /* Parse filter polarity. */
- pkt_filter->negate_match = 0;
- if (!kstrtoul(argv[1], 0, &res))
- pkt_filter->negate_match = cpu_to_le32((u32)res);
-
- /* Parse filter type. */
- pkt_filter->type = 0;
- if (!kstrtoul(argv[2], 0, &res))
- pkt_filter->type = cpu_to_le32((u32)res);
-
- /* Parse pattern filter offset. */
- pkt_filter->u.pattern.offset = 0;
- if (!kstrtoul(argv[3], 0, &res))
- pkt_filter->u.pattern.offset = cpu_to_le32((u32)res);
-
- /* Parse pattern filter mask. */
- mask_size = brcmf_c_pattern_atoh(argv[4],
- (char *)pkt_filter->u.pattern.mask_and_pattern);
-
- /* Parse pattern filter pattern. */
- pattern_size = brcmf_c_pattern_atoh(argv[5],
- (char *)&pkt_filter->u.pattern.mask_and_pattern[mask_size]);
-
- if (mask_size != pattern_size) {
- brcmf_err("Mask and pattern not the same size\n");
- goto fail;
- }
-
- pkt_filter->u.pattern.size_bytes = cpu_to_le32(mask_size);
- buf_len = offsetof(struct brcmf_pkt_filter_le,
- u.pattern.mask_and_pattern);
- buf_len += mask_size + pattern_size;
-
- err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_add", pkt_filter,
- buf_len);
- if (err)
- brcmf_err("Set pkt_filter_add error (%d)\n", err);
-
-fail:
- kfree(arg_org);
-
- kfree(buf);
-}
-
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
-{
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- u8 buf[BRCMF_DCMD_SMLEN];
- struct brcmf_join_pref_params join_pref_params[2];
- char *ptr;
- s32 err;
-
- /* retreive mac address */
- err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
- sizeof(ifp->mac_addr));
- if (err < 0) {
- brcmf_err("Retreiving cur_etheraddr failed, %d\n",
- err);
- goto done;
- }
- memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
-
- /* query for 'ver' to get version info from firmware */
- memset(buf, 0, sizeof(buf));
- strcpy(buf, "ver");
- err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
- if (err < 0) {
- brcmf_err("Retreiving version information failed, %d\n",
- err);
- goto done;
- }
- ptr = (char *)buf;
- strsep(&ptr, "\n");
-
- /* Print fw version info */
- brcmf_err("Firmware version = %s\n", buf);
-
- /* locate firmware version number for ethtool */
- ptr = strrchr(buf, ' ') + 1;
- strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
-
- /* set mpc */
- err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
- if (err) {
- brcmf_err("failed setting mpc\n");
- goto done;
- }
-
- /*
- * Setup timeout if Beacons are lost and roam is off to report
- * link down
- */
- err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
- BRCMF_DEFAULT_BCN_TIMEOUT);
- if (err) {
- brcmf_err("bcn_timeout error (%d)\n", err);
- goto done;
- }
-
- /* Enable/Disable build-in roaming to allowed ext supplicant to take
- * of romaing
- */
- err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
- if (err) {
- brcmf_err("roam_off error (%d)\n", err);
- goto done;
- }
-
- /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
- join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
- join_pref_params[0].len = 2;
- join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
- join_pref_params[0].band = WLC_BAND_5G;
- join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
- join_pref_params[1].len = 2;
- join_pref_params[1].rssi_gain = 0;
- join_pref_params[1].band = 0;
- err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
- sizeof(join_pref_params));
- if (err)
- brcmf_err("Set join_pref error (%d)\n", err);
-
- /* Setup event_msgs, enable E_IF */
- err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
- if (err) {
- brcmf_err("Get event_msgs error (%d)\n", err);
- goto done;
- }
- setbit(eventmask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
- if (err) {
- brcmf_err("Set event_msgs error (%d)\n", err);
- goto done;
- }
-
- /* Setup default scan channel time */
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
- BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
- if (err) {
- brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
- err);
- goto done;
- }
-
- /* Setup default scan unassoc time */
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
- BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
- if (err) {
- brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
- err);
- goto done;
- }
-
- /* Setup packet filter */
- brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER);
- brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
- 0, true);
-
- /* do bus specific preinit here */
- err = brcmf_bus_preinit(ifp->drvr->bus_if);
-done:
- return err;
-}
-
-#ifdef CONFIG_BRCM_TRACING
-void __brcmf_err(const char *func, const char *fmt, ...)
-{
- struct va_format vaf = {
- .fmt = fmt,
- };
- va_list args;
-
- va_start(args, fmt);
- vaf.va = &args;
- pr_err("%s: %pV", func, &vaf);
- trace_brcmf_err(func, &vaf);
- va_end(args);
-}
-#endif
-#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
-void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
-{
- struct va_format vaf = {
- .fmt = fmt,
- };
- va_list args;
-
- va_start(args, fmt);
- vaf.va = &args;
- if (brcmf_msg_level & level)
- pr_debug("%s %pV", func, &vaf);
- trace_brcmf_dbg(level, func, &vaf);
- va_end(args);
-}
-#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index aed53acef456..defb7a44e0bc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -17,18 +17,13 @@
#include <linux/netdevice.h>
#include <brcm_hw_ids.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
#include "fwil.h"
#include "feature.h"
/*
- * firmware error code received if iovar is unsupported.
- */
-#define EBRCMF_FEAT_UNSUPPORTED 23
-
-/*
* expand feature list to array of feature strings.
*/
#define BRCMF_FEAT_DEF(_f) \
@@ -102,6 +97,28 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
}
}
+/**
+ * brcmf_feat_iovar_int_set() - determine feature through iovar set.
+ *
+ * @ifp: interface to query.
+ * @id: feature id.
+ * @name: iovar name.
+ */
+static void brcmf_feat_iovar_int_set(struct brcmf_if *ifp,
+ enum brcmf_feat_id id, char *name, u32 val)
+{
+ int err;
+
+ err = brcmf_fil_iovar_int_set(ifp, name, val);
+ if (err == 0) {
+ brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
+ ifp->drvr->feat_flags |= BIT(id);
+ } else {
+ brcmf_dbg(TRACE, "%s feature check failed: %d\n",
+ brcmf_feat_names[id], err);
+ }
+}
+
void brcmf_feat_attach(struct brcmf_pub *drvr)
{
struct brcmf_if *ifp = drvr->iflist[0];
@@ -109,6 +126,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
+ brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
/* set chip related quirks */
switch (drvr->bus_if->chip) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
index b9a796d0a44d..f5832e077bb7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
@@ -22,6 +22,7 @@
* MCHAN: multi-channel for concurrent P2P.
*/
#define BRCMF_FEAT_LIST \
+ BRCMF_FEAT_DEF(MBSS) \
BRCMF_FEAT_DEF(MCHAN) \
BRCMF_FEAT_DEF(WOWL)
/*
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 8ea9f283d2b8..1ff787d1a36b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -20,7 +20,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include "dhd_dbg.h"
+#include "debug.h"
#include "firmware.h"
char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
@@ -262,8 +262,7 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
- if (fwctx->code)
- release_firmware(fwctx->code);
+ release_firmware(fwctx->code);
device_release_driver(fwctx->dev);
kfree(fwctx);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 1faa929f5fff..44f3a84d1999 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -19,9 +19,9 @@
#include <linux/etherdevice.h>
#include <brcmu_utils.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
+#include "core.h"
+#include "debug.h"
+#include "bus.h"
#include "proto.h"
#include "flowring.h"
#include "msgbuf.h"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 44fc85f68f7a..ec62492ffa69 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -18,8 +18,8 @@
#include "brcmu_wifi.h"
#include "brcmu_utils.h"
-#include "dhd.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "debug.h"
#include "tracepoint.h"
#include "fwsignal.h"
#include "fweh.h"
@@ -221,10 +221,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
- if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
- brcmf_fws_del_interface(ifp);
- brcmf_del_if(drvr, ifevent->bssidx);
- }
+ if (ifp && ifevent->action == BRCMF_E_IF_DEL)
+ brcmf_remove_interface(drvr, ifevent->bssidx);
}
/**
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index ded328f80cd1..03f2c406a17b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -22,9 +22,9 @@
#include <linux/netdevice.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
#include "tracepoint.h"
#include "fwil.h"
#include "proto.h"
@@ -32,6 +32,76 @@
#define MAX_HEX_DUMP_LEN 64
+#ifdef DEBUG
+static const char * const brcmf_fil_errstr[] = {
+ "BCME_OK",
+ "BCME_ERROR",
+ "BCME_BADARG",
+ "BCME_BADOPTION",
+ "BCME_NOTUP",
+ "BCME_NOTDOWN",
+ "BCME_NOTAP",
+ "BCME_NOTSTA",
+ "BCME_BADKEYIDX",
+ "BCME_RADIOOFF",
+ "BCME_NOTBANDLOCKED",
+ "BCME_NOCLK",
+ "BCME_BADRATESET",
+ "BCME_BADBAND",
+ "BCME_BUFTOOSHORT",
+ "BCME_BUFTOOLONG",
+ "BCME_BUSY",
+ "BCME_NOTASSOCIATED",
+ "BCME_BADSSIDLEN",
+ "BCME_OUTOFRANGECHAN",
+ "BCME_BADCHAN",
+ "BCME_BADADDR",
+ "BCME_NORESOURCE",
+ "BCME_UNSUPPORTED",
+ "BCME_BADLEN",
+ "BCME_NOTREADY",
+ "BCME_EPERM",
+ "BCME_NOMEM",
+ "BCME_ASSOCIATED",
+ "BCME_RANGE",
+ "BCME_NOTFOUND",
+ "BCME_WME_NOT_ENABLED",
+ "BCME_TSPEC_NOTFOUND",
+ "BCME_ACM_NOTSUPPORTED",
+ "BCME_NOT_WME_ASSOCIATION",
+ "BCME_SDIO_ERROR",
+ "BCME_DONGLE_DOWN",
+ "BCME_VERSION",
+ "BCME_TXFAIL",
+ "BCME_RXFAIL",
+ "BCME_NODEVICE",
+ "BCME_NMODE_DISABLED",
+ "BCME_NONRESIDENT",
+ "BCME_SCANREJECT",
+ "BCME_USAGE_ERROR",
+ "BCME_IOCTL_ERROR",
+ "BCME_SERIAL_PORT_ERR",
+ "BCME_DISABLED",
+ "BCME_DECERR",
+ "BCME_ENCERR",
+ "BCME_MICERR",
+ "BCME_REPLAY",
+ "BCME_IE_NOTFOUND",
+};
+
+static const char *brcmf_fil_get_errstr(u32 err)
+{
+ if (err >= ARRAY_SIZE(brcmf_fil_errstr))
+ return "(unknown)";
+
+ return brcmf_fil_errstr[err];
+}
+#else
+static const char *brcmf_fil_get_errstr(u32 err)
+{
+ return "";
+}
+#endif /* DEBUG */
static s32
brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
@@ -52,11 +122,11 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
if (err >= 0)
- err = 0;
- else
- brcmf_dbg(FIL, "Failed err=%d\n", err);
+ return 0;
- return err;
+ brcmf_dbg(FIL, "Failed: %s (%d)\n",
+ brcmf_fil_get_errstr((u32)(-err)), err);
+ return -EBADE;
}
s32
@@ -66,7 +136,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
mutex_lock(&ifp->drvr->proto_block);
- brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -84,7 +154,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
mutex_lock(&ifp->drvr->proto_block);
err = brcmf_fil_cmd_data(ifp, cmd, data, len, false);
- brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -101,7 +171,7 @@ brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
__le32 data_le = cpu_to_le32(data);
mutex_lock(&ifp->drvr->proto_block);
- brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
mutex_unlock(&ifp->drvr->proto_block);
@@ -118,7 +188,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
mutex_unlock(&ifp->drvr->proto_block);
*data = le32_to_cpu(data_le);
- brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
return err;
}
@@ -154,7 +224,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
mutex_lock(&drvr->proto_block);
- brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+ brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -194,7 +264,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
brcmf_err("Creating iovar failed\n");
}
- brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+ brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -277,7 +347,8 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
mutex_lock(&drvr->proto_block);
- brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+ brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx,
+ ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -316,7 +387,8 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
err = -EPERM;
brcmf_err("Creating bsscfg failed\n");
}
- brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+ brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx,
+ ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 5ff5cd0bb032..50891c02c4c1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -55,59 +55,63 @@
/* WOWL bits */
/* Wakeup on Magic packet: */
-#define WL_WOWL_MAGIC (1 << 0)
+#define BRCMF_WOWL_MAGIC (1 << 0)
/* Wakeup on Netpattern */
-#define WL_WOWL_NET (1 << 1)
+#define BRCMF_WOWL_NET (1 << 1)
/* Wakeup on loss-of-link due to Disassoc/Deauth: */
-#define WL_WOWL_DIS (1 << 2)
+#define BRCMF_WOWL_DIS (1 << 2)
/* Wakeup on retrograde TSF: */
-#define WL_WOWL_RETR (1 << 3)
+#define BRCMF_WOWL_RETR (1 << 3)
/* Wakeup on loss of beacon: */
-#define WL_WOWL_BCN (1 << 4)
+#define BRCMF_WOWL_BCN (1 << 4)
/* Wakeup after test: */
-#define WL_WOWL_TST (1 << 5)
+#define BRCMF_WOWL_TST (1 << 5)
/* Wakeup after PTK refresh: */
-#define WL_WOWL_M1 (1 << 6)
+#define BRCMF_WOWL_M1 (1 << 6)
/* Wakeup after receipt of EAP-Identity Req: */
-#define WL_WOWL_EAPID (1 << 7)
+#define BRCMF_WOWL_EAPID (1 << 7)
/* Wakeind via PME(0) or GPIO(1): */
-#define WL_WOWL_PME_GPIO (1 << 8)
+#define BRCMF_WOWL_PME_GPIO (1 << 8)
/* need tkip phase 1 key to be updated by the driver: */
-#define WL_WOWL_NEEDTKIP1 (1 << 9)
+#define BRCMF_WOWL_NEEDTKIP1 (1 << 9)
/* enable wakeup if GTK fails: */
-#define WL_WOWL_GTK_FAILURE (1 << 10)
+#define BRCMF_WOWL_GTK_FAILURE (1 << 10)
/* support extended magic packets: */
-#define WL_WOWL_EXTMAGPAT (1 << 11)
+#define BRCMF_WOWL_EXTMAGPAT (1 << 11)
/* support ARP/NS/keepalive offloading: */
-#define WL_WOWL_ARPOFFLOAD (1 << 12)
+#define BRCMF_WOWL_ARPOFFLOAD (1 << 12)
/* read protocol version for EAPOL frames: */
-#define WL_WOWL_WPA2 (1 << 13)
+#define BRCMF_WOWL_WPA2 (1 << 13)
/* If the bit is set, use key rotaton: */
-#define WL_WOWL_KEYROT (1 << 14)
+#define BRCMF_WOWL_KEYROT (1 << 14)
/* If the bit is set, frm received was bcast frame: */
-#define WL_WOWL_BCAST (1 << 15)
+#define BRCMF_WOWL_BCAST (1 << 15)
/* If the bit is set, scan offload is enabled: */
-#define WL_WOWL_SCANOL (1 << 16)
+#define BRCMF_WOWL_SCANOL (1 << 16)
/* Wakeup on tcpkeep alive timeout: */
-#define WL_WOWL_TCPKEEP_TIME (1 << 17)
+#define BRCMF_WOWL_TCPKEEP_TIME (1 << 17)
/* Wakeup on mDNS Conflict Resolution: */
-#define WL_WOWL_MDNS_CONFLICT (1 << 18)
+#define BRCMF_WOWL_MDNS_CONFLICT (1 << 18)
/* Wakeup on mDNS Service Connect: */
-#define WL_WOWL_MDNS_SERVICE (1 << 19)
+#define BRCMF_WOWL_MDNS_SERVICE (1 << 19)
/* tcp keepalive got data: */
-#define WL_WOWL_TCPKEEP_DATA (1 << 20)
+#define BRCMF_WOWL_TCPKEEP_DATA (1 << 20)
/* Firmware died in wowl mode: */
-#define WL_WOWL_FW_HALT (1 << 21)
+#define BRCMF_WOWL_FW_HALT (1 << 21)
/* Enable detection of radio button changes: */
-#define WL_WOWL_ENAB_HWRADIO (1 << 22)
+#define BRCMF_WOWL_ENAB_HWRADIO (1 << 22)
/* Offloads detected MIC failure(s): */
-#define WL_WOWL_MIC_FAIL (1 << 23)
+#define BRCMF_WOWL_MIC_FAIL (1 << 23)
/* Wakeup in Unassociated state (Net/Magic Pattern): */
-#define WL_WOWL_UNASSOC (1 << 24)
+#define BRCMF_WOWL_UNASSOC (1 << 24)
/* Wakeup if received matched secured pattern: */
-#define WL_WOWL_SECURE (1 << 25)
+#define BRCMF_WOWL_SECURE (1 << 25)
/* Link Down indication in WoWL mode: */
-#define WL_WOWL_LINKDOWN (1 << 31)
+#define BRCMF_WOWL_LINKDOWN (1 << 31)
+
+#define BRCMF_WOWL_MAXPATTERNS 8
+#define BRCMF_WOWL_MAXPATTERNSIZE 128
+
/* join preference types for join_pref iovar */
enum brcmf_join_pref_types {
@@ -124,6 +128,12 @@ enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_DEV,
};
+enum brcmf_wowl_pattern_type {
+ BRCMF_WOWL_PATTERN_TYPE_BITMAP = 0,
+ BRCMF_WOWL_PATTERN_TYPE_ARP,
+ BRCMF_WOWL_PATTERN_TYPE_NA
+};
+
struct brcmf_fil_p2p_if_le {
u8 addr[ETH_ALEN];
__le16 type;
@@ -484,4 +494,35 @@ struct brcmf_rx_mgmt_data {
__be32 rate;
};
+/**
+ * struct brcmf_fil_wowl_pattern_le - wowl pattern configuration struct.
+ *
+ * @cmd: "add", "del" or "clr".
+ * @masksize: Size of the mask in #of bytes
+ * @offset: Pattern byte offset in packet
+ * @patternoffset: Offset of start of pattern. Starting from field masksize.
+ * @patternsize: Size of the pattern itself in #of bytes
+ * @id: id
+ * @reasonsize: Size of the wakeup reason code
+ * @type: Type of pattern (enum brcmf_wowl_pattern_type)
+ */
+struct brcmf_fil_wowl_pattern_le {
+ u8 cmd[4];
+ __le32 masksize;
+ __le32 offset;
+ __le32 patternoffset;
+ __le32 patternsize;
+ __le32 id;
+ __le32 reasonsize;
+ __le32 type;
+ /* u8 mask[] - Mask follows the structure above */
+ /* u8 pattern[] - Pattern follows the mask is at 'patternoffset' */
+};
+
+struct brcmf_mbss_ssid_le {
+ __le32 bsscfgidx;
+ __le32 SSID_len;
+ unsigned char SSID[32];
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 183f08d7fc8c..f0dda0ecd23b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -26,15 +26,15 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
+#include "core.h"
+#include "debug.h"
+#include "bus.h"
#include "fwil.h"
#include "fwil_types.h"
#include "fweh.h"
#include "fwsignal.h"
#include "p2p.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
#include "proto.h"
/**
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 11cc051f97cd..456944a6a2db 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -24,13 +24,13 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "debug.h"
#include "proto.h"
#include "msgbuf.h"
#include "commonring.h"
#include "flowring.h"
-#include "dhd_bus.h"
+#include "bus.h"
#include "tracepoint.h"
@@ -518,8 +518,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
len : msgbuf->ioctl_resp_ret_len);
}
- if (skb)
- brcmu_pkt_buf_free_skb(skb);
+ brcmu_pkt_buf_free_skb(skb);
return msgbuf->ioctl_resp_status;
}
@@ -1081,8 +1080,17 @@ brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
{
struct brcmf_if *ifp;
+ /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+ * events this is easy because it contains the bssidx which maps
+ * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+ * bssidx 1 is used for p2p0 and no data can be received or
+ * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+ */
+ if (ifidx)
+ (ifidx)++;
ifp = msgbuf->drvr->iflist[ifidx];
if (!ifp || !ifp->ndev) {
+ brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
brcmu_pkt_buf_free_skb(skb);
return;
}
@@ -1355,6 +1363,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
}
INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
+ count = count * sizeof(unsigned long);
msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
if (!msgbuf->flow_map)
goto fail;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
index 927bffd5be64..c824570ddea3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -21,8 +21,8 @@
#include <linux/mmc/sdio_func.h>
#include <defs.h>
-#include "dhd_dbg.h"
-#include "sdio_host.h"
+#include "debug.h"
+#include "sdio.h"
void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
{
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index d54c58a32faa..effb48ebd864 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -21,12 +21,12 @@
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include <defs.h>
-#include <dhd.h>
-#include <dhd_dbg.h>
+#include "core.h"
+#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
#include "p2p.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
/* parameters used for p2p escan */
#define P2PAPI_SCAN_NPROBES 1
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 16fef3382019..905991fdb7b1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -30,8 +30,8 @@
#include <brcmu_wifi.h>
#include <brcm_hw_ids.h>
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
+#include "debug.h"
+#include "bus.h"
#include "commonring.h"
#include "msgbuf.h"
#include "pcie.h"
@@ -798,12 +798,14 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
brcmf_dbg(PCIE, "Enter\n");
/* is it a v1 or v2 implementation */
devinfo->irq_requested = false;
+ pci_enable_msi(pdev);
if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
if (request_threaded_irq(pdev->irq,
brcmf_pcie_quick_check_isr_v1,
brcmf_pcie_isr_thread_v1,
IRQF_SHARED, "brcmf_pcie_intr",
devinfo)) {
+ pci_disable_msi(pdev);
brcmf_err("Failed to request IRQ %d\n", pdev->irq);
return -EIO;
}
@@ -813,6 +815,7 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
brcmf_pcie_isr_thread_v2,
IRQF_SHARED, "brcmf_pcie_intr",
devinfo)) {
+ pci_disable_msi(pdev);
brcmf_err("Failed to request IRQ %d\n", pdev->irq);
return -EIO;
}
@@ -839,6 +842,7 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
return;
devinfo->irq_requested = false;
free_irq(pdev->irq, devinfo);
+ pci_disable_msi(pdev);
msleep(50);
count = 0;
@@ -1857,6 +1861,8 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
index 62b940723339..26b68c367f57 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
@@ -20,9 +20,9 @@
#include <linux/netdevice.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
#include "proto.h"
#include "bcdc.h"
#include "msgbuf.h"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index d20d4e6f391a..0b0d51a61060 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -40,7 +40,7 @@
#include <brcmu_utils.h>
#include <brcm_hw_ids.h>
#include <soc.h>
-#include "sdio_host.h"
+#include "sdio.h"
#include "chip.h"
#include "firmware.h"
@@ -96,8 +96,8 @@ struct rte_console {
#endif /* DEBUG */
#include <chipcommon.h>
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "bus.h"
+#include "debug.h"
#include "tracepoint.h"
#define TXQLEN 2048 /* bulk tx queue length */
@@ -2762,6 +2762,48 @@ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
return &bus->txq;
}
+static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
+{
+ struct sk_buff *p;
+ int eprec = -1; /* precedence to evict from */
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+ brcmu_pktq_penq(q, prec, pkt);
+ return true;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(q, prec)) {
+ eprec = prec;
+ } else if (pktq_full(q)) {
+ p = brcmu_pktq_peek_tail(q, &eprec);
+ if (eprec > prec)
+ return false;
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ if (eprec == prec)
+ return false; /* refuse newer (incoming) packet */
+ /* Evict packet according to discard policy */
+ p = brcmu_pktq_pdeq_tail(q, eprec);
+ if (p == NULL)
+ brcmf_err("brcmu_pktq_pdeq_tail() failed\n");
+ brcmu_pkt_buf_free_skb(p);
+ }
+
+ /* Enqueue */
+ p = brcmu_pktq_penq(q, prec, pkt);
+ if (p == NULL)
+ brcmf_err("brcmu_pktq_penq() failed\n");
+
+ return p != NULL;
+}
+
static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
@@ -2787,7 +2829,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
spin_lock_bh(&bus->txq_lock);
/* reset bus_flags in packet cb */
*(u16 *)(pkt->cb) = 0;
- if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
+ if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) {
skb_pull(pkt, bus->tx_hdrlen);
brcmf_err("out of bus->txq !!!\n");
ret = -ENOSR;
@@ -2797,7 +2839,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
if (pktq_len(&bus->txq) >= TXHI) {
bus->txoff = true;
- brcmf_txflowblock(bus->sdiodev->dev, true);
+ brcmf_txflowblock(dev, true);
}
spin_unlock_bh(&bus->txq_lock);
@@ -3948,6 +3990,7 @@ static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.txctl = brcmf_sdio_bus_txctl,
.rxctl = brcmf_sdio_bus_rxctl,
.gettxq = brcmf_sdio_bus_gettxq,
+ .wowl_config = brcmf_sdio_wowl_config
};
static void brcmf_sdio_firmware_callback(struct device *dev,
@@ -4074,7 +4117,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
/* platform specific configuration:
* alignments must be at least 4 bytes for ADMA
- */
+ */
bus->head_align = ALIGNMENT;
bus->sgentry_align = ALIGNMENT;
if (sdiodev->pdata) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
index f2d06cae366a..8eb42620129c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
@@ -14,8 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _BRCM_SDH_H_
-#define _BRCM_SDH_H_
+#ifndef BRCMFMAC_SDIO_H
+#define BRCMFMAC_SDIO_H
#include <linux/skbuff.h>
#include <linux/firmware.h>
@@ -186,6 +186,7 @@ struct brcmf_sdio_dev {
struct sg_table sgtable;
char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+ bool wowl_enabled;
};
/* sdio core registers */
@@ -334,5 +335,6 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus);
void brcmf_sdio_isr(struct brcmf_sdio *bus);
void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
-#endif /* _BRCM_SDH_H_ */
+#endif /* BRCMFMAC_SDIO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
index b505db48c60d..a10f35c5eb3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
@@ -19,4 +19,19 @@
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "tracepoint.h"
+
+void __brcmf_err(const char *func, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ pr_err("%s: %pV", func, &vaf);
+ trace_brcmf_err(func, &vaf);
+ va_end(args);
+}
+
#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 875d1142c8b0..4572defc280f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -23,13 +23,12 @@
#include <brcmu_utils.h>
#include <brcm_hw_ids.h>
#include <brcmu_wifi.h>
-#include <dhd_bus.h>
-#include <dhd_dbg.h>
-
+#include "bus.h"
+#include "debug.h"
#include "firmware.h"
-#include "usb_rdl.h"
#include "usb.h"
+
#define IOCTL_RESP_TIMEOUT 2000
#define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */
@@ -49,6 +48,71 @@
#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
#define BRCMF_USB_43569_FW_NAME "brcm/brcmfmac43569.bin"
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_MAX_OFFSET 3 /* Max number of file offsets */
+#define TRX_UNCOMP_IMAGE 0x20 /* Trx holds uncompressed img */
+#define TRX_RDL_CHUNK 1500 /* size of each dl transfer */
+#define TRX_OFFSETS_DLFWLEN_IDX 0
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE 0 /* returns the rdl_state_t struct */
+#define DL_CHECK_CRC 1 /* currently unused */
+#define DL_GO 2 /* execute downloaded image */
+#define DL_START 3 /* initialize dl state */
+#define DL_REBOOT 4 /* reboot the device in 2 seconds */
+#define DL_GETVER 5 /* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset
+ * event to occur in 2 seconds. It is the
+ * responsibility of the downloaded code to
+ * clear this event
+ */
+#define DL_EXEC 7 /* jump to a supplied address */
+#define DL_RESETCFG 8 /* To support single enum on dongle
+ * - Not used by bootloader
+ */
+#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup
+ * if resp unavailable
+ */
+
+/* states */
+#define DL_WAITING 0 /* waiting to rx first pkt */
+#define DL_READY 1 /* hdr was good, waiting for more of the
+ * compressed image
+ */
+#define DL_BAD_HDR 2 /* hdr was corrupted */
+#define DL_BAD_CRC 3 /* compressed image was corrupted */
+#define DL_RUNNABLE 4 /* download was successful,waiting for go cmd */
+#define DL_START_FAIL 5 /* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM
+ * value
+ */
+#define DL_IMAGE_TOOBIG 7 /* firmware image too big */
+
+
+struct trx_header_le {
+ __le32 magic; /* "HDR0" */
+ __le32 len; /* Length of file including header */
+ __le32 crc32; /* CRC from flag_version to end of file */
+ __le32 flag_version; /* 0:15 flags, 16:31 version */
+ __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of
+ * header
+ */
+};
+
+struct rdl_state_le {
+ __le32 state;
+ __le32 bytes;
+};
+
+struct bootrom_id_le {
+ __le32 chip; /* Chip id */
+ __le32 chiprev; /* Chip rev */
+ __le32 ramsize; /* Size of RAM */
+ __le32 remapbase; /* Current remap base address */
+ __le32 boardtype; /* Type of board */
+ __le32 boardrev; /* Board revision */
+};
+
struct brcmf_usb_image {
struct list_head list;
s8 *fwname;
@@ -93,6 +157,8 @@ struct brcmf_usbdev_info {
u8 ifnum;
struct urb *bulk_urb; /* used for FW download */
+
+ bool wowl_enabled;
};
static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -600,6 +666,16 @@ static int brcmf_usb_up(struct device *dev)
return 0;
}
+static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
+{
+ if (devinfo->ctl_urb)
+ usb_kill_urb(devinfo->ctl_urb);
+ if (devinfo->bulk_urb)
+ usb_kill_urb(devinfo->bulk_urb);
+ brcmf_usb_free_q(&devinfo->tx_postq, true);
+ brcmf_usb_free_q(&devinfo->rx_postq, true);
+}
+
static void brcmf_usb_down(struct device *dev)
{
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
@@ -613,14 +689,7 @@ static void brcmf_usb_down(struct device *dev)
brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
- if (devinfo->ctl_urb)
- usb_kill_urb(devinfo->ctl_urb);
-
- if (devinfo->bulk_urb)
- usb_kill_urb(devinfo->bulk_urb);
- brcmf_usb_free_q(&devinfo->tx_postq, true);
-
- brcmf_usb_free_q(&devinfo->rx_postq, true);
+ brcmf_cancel_all_urbs(devinfo);
}
static void
@@ -785,7 +854,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen);
- bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC);
+ bulkchunk = kmalloc(TRX_RDL_CHUNK, GFP_ATOMIC);
if (bulkchunk == NULL) {
err = -ENOMEM;
goto fail;
@@ -812,10 +881,10 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
/* Wait until the usb device reports it received all
* the bytes we sent */
if ((rdlbytes == sent) && (rdlbytes != dllen)) {
- if ((dllen-sent) < RDL_CHUNK)
+ if ((dllen-sent) < TRX_RDL_CHUNK)
sendlen = dllen-sent;
else
- sendlen = RDL_CHUNK;
+ sendlen = TRX_RDL_CHUNK;
/* simply avoid having to send a ZLP by ensuring we
* never have an even
@@ -980,21 +1049,6 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
kfree(devinfo->rx_reqs);
}
-#define TRX_MAGIC 0x30524448 /* "HDR0" */
-#define TRX_VERSION 1 /* Version 1 */
-#define TRX_MAX_LEN 0x3B0000 /* Max length */
-#define TRX_NO_HEADER 1 /* Do not write TRX header */
-#define TRX_MAX_OFFSET 3 /* Max number of individual files */
-#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed image */
-
-struct trx_header_le {
- __le32 magic; /* "HDR0" */
- __le32 len; /* Length of file including header */
- __le32 crc32; /* CRC from flag_version to end of file */
- __le32 flag_version; /* 0:15 flags, 16:31 version */
- __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of
- * header */
-};
static int check_file(const u8 *headers)
{
@@ -1096,11 +1150,24 @@ error:
return NULL;
}
+static void brcmf_usb_wowl_config(struct device *dev, bool enabled)
+{
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+ brcmf_dbg(USB, "Configuring WOWL, enabled=%d\n", enabled);
+ devinfo->wowl_enabled = enabled;
+ if (enabled)
+ device_set_wakeup_enable(devinfo->dev, true);
+ else
+ device_set_wakeup_enable(devinfo->dev, false);
+}
+
static struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
.stop = brcmf_usb_down,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
+ .wowl_config = brcmf_usb_wowl_config,
};
static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
@@ -1188,6 +1255,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->ops = &brcmf_usb_bus_ops;
bus->proto_type = BRCMF_PROTO_BCDC;
bus->always_use_fws_queue = true;
+#ifdef CONFIG_PM
+ bus->wowl_supported = true;
+#endif
if (!brcmf_usb_dlneeded(devinfo)) {
ret = brcmf_usb_bus_setup(devinfo);
@@ -1341,7 +1411,10 @@ static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
brcmf_dbg(USB, "Enter\n");
devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
- brcmf_detach(&usb->dev);
+ if (devinfo->wowl_enabled)
+ brcmf_cancel_all_urbs(devinfo);
+ else
+ brcmf_detach(&usb->dev);
return 0;
}
@@ -1354,7 +1427,12 @@ static int brcmf_usb_resume(struct usb_interface *intf)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
brcmf_dbg(USB, "Enter\n");
- return brcmf_usb_bus_setup(devinfo);
+ if (!devinfo->wowl_enabled)
+ return brcmf_usb_bus_setup(devinfo);
+
+ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_UP;
+ brcmf_usb_rx_fill_all(devinfo);
+ return 0;
}
static int brcmf_usb_reset_resume(struct usb_interface *intf)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h b/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h
deleted file mode 100644
index 0a35c51c3da2..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _USB_RDL_H
-#define _USB_RDL_H
-
-/* Control messages: bRequest values */
-#define DL_GETSTATE 0 /* returns the rdl_state_t struct */
-#define DL_CHECK_CRC 1 /* currently unused */
-#define DL_GO 2 /* execute downloaded image */
-#define DL_START 3 /* initialize dl state */
-#define DL_REBOOT 4 /* reboot the device in 2 seconds */
-#define DL_GETVER 5 /* returns the bootrom_id_t struct */
-#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset
- * event to occur in 2 seconds. It is the
- * responsibility of the downloaded code to
- * clear this event
- */
-#define DL_EXEC 7 /* jump to a supplied address */
-#define DL_RESETCFG 8 /* To support single enum on dongle
- * - Not used by bootloader
- */
-#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup
- * if resp unavailable
- */
-
-/* states */
-#define DL_WAITING 0 /* waiting to rx first pkt */
-#define DL_READY 1 /* hdr was good, waiting for more of the
- * compressed image */
-#define DL_BAD_HDR 2 /* hdr was corrupted */
-#define DL_BAD_CRC 3 /* compressed image was corrupted */
-#define DL_RUNNABLE 4 /* download was successful,waiting for go cmd */
-#define DL_START_FAIL 5 /* failed to initialize correctly */
-#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM
- * value */
-#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START
- * for rdl) */
-
-struct rdl_state_le {
- __le32 state;
- __le32 bytes;
-};
-
-struct bootrom_id_le {
- __le32 chip; /* Chip id */
- __le32 chiprev; /* Chip rev */
- __le32 ramsize; /* Size of RAM */
- __le32 remapbase; /* Current remap base address */
- __le32 boardtype; /* Type of board */
- __le32 boardrev; /* Board revision */
-};
-
-#define RDL_CHUNK 1500 /* size of each dl transfer */
-
-#define TRX_OFFSETS_DLFWLEN_IDX 0
-#define TRX_OFFSETS_JUMPTO_IDX 1
-#define TRX_OFFSETS_NVM_LEN_IDX 2
-
-#define TRX_OFFSETS_DLBASE_IDX 0
-
-#endif /* _USB_RDL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
index 5960d827508c..50cdf7090198 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -20,10 +20,10 @@
#include <brcmu_wifi.h>
#include "fwil_types.h"
-#include "dhd.h"
+#include "core.h"
#include "p2p.h"
-#include "dhd_dbg.h"
-#include "wl_cfg80211.h"
+#include "debug.h"
+#include "cfg80211.h"
#include "vendor.h"
#include "fwil.h"
@@ -31,8 +31,8 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int len)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_if *ifp;
const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
struct sk_buff *reply;
int ret, payload, ret_len;
@@ -42,6 +42,9 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set,
cmdhdr->len);
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+ ifp = vif->ifp;
+
len -= sizeof(struct brcmf_vndr_dcmd_hdr);
ret_len = cmdhdr->len;
if (ret_len > 0 || len > 0) {
@@ -63,11 +66,11 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
}
if (cmdhdr->set)
- ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), cmdhdr->cmd,
- dcmd_buf, ret_len);
+ ret = brcmf_fil_cmd_data_set(ifp, cmdhdr->cmd, dcmd_buf,
+ ret_len);
else
- ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), cmdhdr->cmd,
- dcmd_buf, ret_len);
+ ret = brcmf_fil_cmd_data_get(ifp, cmdhdr->cmd, dcmd_buf,
+ ret_len);
if (ret != 0)
goto exit;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
index a5d4add26f41..c9a8b9360ab1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
@@ -30,6 +30,7 @@
#include "main.h"
#include "debug.h"
#include "brcms_trace_events.h"
+#include "phy/phy_int.h"
static struct dentry *root_folder;
@@ -71,48 +72,161 @@ struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr)
}
static
-ssize_t brcms_debugfs_hardware_read(struct file *f, char __user *data,
- size_t count, loff_t *ppos)
+int brcms_debugfs_hardware_read(struct seq_file *s, void *data)
{
- char buf[128];
- int res;
- struct brcms_pub *drvr = f->private_data;
-
- /* only allow read from start */
- if (*ppos > 0)
- return 0;
-
- res = scnprintf(buf, sizeof(buf),
- "board vendor: %x\n"
- "board type: %x\n"
- "board revision: %x\n"
- "board flags: %x\n"
- "board flags2: %x\n"
- "firmware revision: %x\n",
- drvr->wlc->hw->d11core->bus->boardinfo.vendor,
- drvr->wlc->hw->d11core->bus->boardinfo.type,
- drvr->wlc->hw->boardrev,
- drvr->wlc->hw->boardflags,
- drvr->wlc->hw->boardflags2,
- drvr->wlc->ucode_rev
- );
-
- return simple_read_from_buffer(data, count, ppos, buf, res);
+ struct brcms_pub *drvr = s->private;
+ struct brcms_hardware *hw = drvr->wlc->hw;
+ struct bcma_device *core = hw->d11core;
+ struct bcma_bus *bus = core->bus;
+ char boardrev[10];
+
+ seq_printf(s, "chipnum 0x%x\n"
+ "chiprev 0x%x\n"
+ "chippackage 0x%x\n"
+ "corerev 0x%x\n"
+ "boardid 0x%x\n"
+ "boardvendor 0x%x\n"
+ "boardrev %s\n"
+ "boardflags 0x%x\n"
+ "boardflags2 0x%x\n"
+ "ucoderev 0x%x\n"
+ "radiorev 0x%x\n"
+ "phytype 0x%x\n"
+ "phyrev 0x%x\n"
+ "anarev 0x%x\n"
+ "nvramrev %d\n",
+ bus->chipinfo.id, bus->chipinfo.rev, bus->chipinfo.pkg,
+ core->id.rev, bus->boardinfo.type, bus->boardinfo.vendor,
+ brcmu_boardrev_str(hw->boardrev, boardrev),
+ drvr->wlc->hw->boardflags, drvr->wlc->hw->boardflags2,
+ drvr->wlc->ucode_rev, hw->band->radiorev,
+ hw->band->phytype, hw->band->phyrev, hw->band->pi->ana_rev,
+ hw->sromrev);
+ return 0;
+}
+
+static int brcms_debugfs_macstat_read(struct seq_file *s, void *data)
+{
+ struct brcms_pub *drvr = s->private;
+ struct brcms_info *wl = drvr->ieee_hw->priv;
+ struct macstat stats;
+ int i;
+
+ spin_lock_bh(&wl->lock);
+ stats = *(drvr->wlc->core->macstat_snapshot);
+ spin_unlock_bh(&wl->lock);
+
+ seq_printf(s, "txallfrm: %d\n", stats.txallfrm);
+ seq_printf(s, "txrtsfrm: %d\n", stats.txrtsfrm);
+ seq_printf(s, "txctsfrm: %d\n", stats.txctsfrm);
+ seq_printf(s, "txackfrm: %d\n", stats.txackfrm);
+ seq_printf(s, "txdnlfrm: %d\n", stats.txdnlfrm);
+ seq_printf(s, "txbcnfrm: %d\n", stats.txbcnfrm);
+ seq_printf(s, "txfunfl[8]:");
+ for (i = 0; i < ARRAY_SIZE(stats.txfunfl); i++)
+ seq_printf(s, " %d", stats.txfunfl[i]);
+ seq_printf(s, "\ntxtplunfl: %d\n", stats.txtplunfl);
+ seq_printf(s, "txphyerr: %d\n", stats.txphyerr);
+ seq_printf(s, "pktengrxducast: %d\n", stats.pktengrxducast);
+ seq_printf(s, "pktengrxdmcast: %d\n", stats.pktengrxdmcast);
+ seq_printf(s, "rxfrmtoolong: %d\n", stats.rxfrmtoolong);
+ seq_printf(s, "rxfrmtooshrt: %d\n", stats.rxfrmtooshrt);
+ seq_printf(s, "rxinvmachdr: %d\n", stats.rxinvmachdr);
+ seq_printf(s, "rxbadfcs: %d\n", stats.rxbadfcs);
+ seq_printf(s, "rxbadplcp: %d\n", stats.rxbadplcp);
+ seq_printf(s, "rxcrsglitch: %d\n", stats.rxcrsglitch);
+ seq_printf(s, "rxstrt: %d\n", stats.rxstrt);
+ seq_printf(s, "rxdfrmucastmbss: %d\n", stats.rxdfrmucastmbss);
+ seq_printf(s, "rxmfrmucastmbss: %d\n", stats.rxmfrmucastmbss);
+ seq_printf(s, "rxcfrmucast: %d\n", stats.rxcfrmucast);
+ seq_printf(s, "rxrtsucast: %d\n", stats.rxrtsucast);
+ seq_printf(s, "rxctsucast: %d\n", stats.rxctsucast);
+ seq_printf(s, "rxackucast: %d\n", stats.rxackucast);
+ seq_printf(s, "rxdfrmocast: %d\n", stats.rxdfrmocast);
+ seq_printf(s, "rxmfrmocast: %d\n", stats.rxmfrmocast);
+ seq_printf(s, "rxcfrmocast: %d\n", stats.rxcfrmocast);
+ seq_printf(s, "rxrtsocast: %d\n", stats.rxrtsocast);
+ seq_printf(s, "rxctsocast: %d\n", stats.rxctsocast);
+ seq_printf(s, "rxdfrmmcast: %d\n", stats.rxdfrmmcast);
+ seq_printf(s, "rxmfrmmcast: %d\n", stats.rxmfrmmcast);
+ seq_printf(s, "rxcfrmmcast: %d\n", stats.rxcfrmmcast);
+ seq_printf(s, "rxbeaconmbss: %d\n", stats.rxbeaconmbss);
+ seq_printf(s, "rxdfrmucastobss: %d\n", stats.rxdfrmucastobss);
+ seq_printf(s, "rxbeaconobss: %d\n", stats.rxbeaconobss);
+ seq_printf(s, "rxrsptmout: %d\n", stats.rxrsptmout);
+ seq_printf(s, "bcntxcancl: %d\n", stats.bcntxcancl);
+ seq_printf(s, "rxf0ovfl: %d\n", stats.rxf0ovfl);
+ seq_printf(s, "rxf1ovfl: %d\n", stats.rxf1ovfl);
+ seq_printf(s, "rxf2ovfl: %d\n", stats.rxf2ovfl);
+ seq_printf(s, "txsfovfl: %d\n", stats.txsfovfl);
+ seq_printf(s, "pmqovfl: %d\n", stats.pmqovfl);
+ seq_printf(s, "rxcgprqfrm: %d\n", stats.rxcgprqfrm);
+ seq_printf(s, "rxcgprsqovfl: %d\n", stats.rxcgprsqovfl);
+ seq_printf(s, "txcgprsfail: %d\n", stats.txcgprsfail);
+ seq_printf(s, "txcgprssuc: %d\n", stats.txcgprssuc);
+ seq_printf(s, "prs_timeout: %d\n", stats.prs_timeout);
+ seq_printf(s, "rxnack: %d\n", stats.rxnack);
+ seq_printf(s, "frmscons: %d\n", stats.frmscons);
+ seq_printf(s, "txnack: %d\n", stats.txnack);
+ seq_printf(s, "txglitch_nack: %d\n", stats.txglitch_nack);
+ seq_printf(s, "txburst: %d\n", stats.txburst);
+ seq_printf(s, "bphy_rxcrsglitch: %d\n", stats.bphy_rxcrsglitch);
+ seq_printf(s, "phywatchdog: %d\n", stats.phywatchdog);
+ seq_printf(s, "bphy_badplcp: %d\n", stats.bphy_badplcp);
+ return 0;
}
-static const struct file_operations brcms_debugfs_hardware_ops = {
+struct brcms_debugfs_entry {
+ int (*read)(struct seq_file *seq, void *data);
+ struct brcms_pub *drvr;
+};
+
+static int brcms_debugfs_entry_open(struct inode *inode, struct file *f)
+{
+ struct brcms_debugfs_entry *entry = inode->i_private;
+
+ return single_open(f, entry->read, entry->drvr);
+}
+
+static const struct file_operations brcms_debugfs_def_ops = {
.owner = THIS_MODULE,
- .open = simple_open,
- .read = brcms_debugfs_hardware_read
+ .open = brcms_debugfs_entry_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
};
+static int
+brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn,
+ int (*read_fn)(struct seq_file *seq, void *data))
+{
+ struct device *dev = &drvr->wlc->hw->d11core->dev;
+ struct dentry *dentry = drvr->dbgfs_dir;
+ struct brcms_debugfs_entry *entry;
+
+ if (IS_ERR_OR_NULL(dentry))
+ return -ENOENT;
+
+ entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->read = read_fn;
+ entry->drvr = drvr;
+
+ dentry = debugfs_create_file(fn, S_IRUGO, dentry, entry,
+ &brcms_debugfs_def_ops);
+
+ return PTR_ERR_OR_ZERO(dentry);
+}
+
void brcms_debugfs_create_files(struct brcms_pub *drvr)
{
- struct dentry *dentry = drvr->dbgfs_dir;
+ if (IS_ERR_OR_NULL(drvr->dbgfs_dir))
+ return;
- if (!IS_ERR_OR_NULL(dentry))
- debugfs_create_file("hardware", S_IRUGO, dentry,
- drvr, &brcms_debugfs_hardware_ops);
+ brcms_debugfs_add_entry(drvr, "hardware", brcms_debugfs_hardware_read);
+ brcms_debugfs_add_entry(drvr, "macstat", brcms_debugfs_macstat_read);
}
#define __brcms_fn(fn) \
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 43c71bfaa474..f95b52442281 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -764,7 +764,9 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
return;
}
-static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw)
+static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct brcms_info *wl = hw->priv;
spin_lock_bh(&wl->lock);
@@ -773,7 +775,8 @@ static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw)
return;
}
-static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw)
+static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct brcms_info *wl = hw->priv;
spin_lock_bh(&wl->lock);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 1b474828d5b8..eb8584a9c49a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -316,7 +316,7 @@ static const u16 xmtfifo_sz[][NFIFO] = {
static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
#else
-static const char fifo_names[6][0];
+static const char fifo_names[6][1];
#endif
#ifdef DEBUG
@@ -445,18 +445,18 @@ static void brcms_c_detach_mfree(struct brcms_c_info *wlc)
kfree(wlc->protection);
kfree(wlc->stf);
kfree(wlc->bandstate[0]);
- kfree(wlc->corestate->macstat_snapshot);
+ if (wlc->corestate)
+ kfree(wlc->corestate->macstat_snapshot);
kfree(wlc->corestate);
- kfree(wlc->hw->bandstate[0]);
+ if (wlc->hw)
+ kfree(wlc->hw->bandstate[0]);
kfree(wlc->hw);
if (wlc->beacon)
dev_kfree_skb_any(wlc->beacon);
if (wlc->probe_resp)
dev_kfree_skb_any(wlc->probe_resp);
- /* free the wlc */
kfree(wlc);
- wlc = NULL;
}
static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit)
@@ -1009,8 +1009,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
if (txh)
trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
sizeof(*txh));
- if (p)
- brcmu_pkt_buf_free_skb(p);
+ brcmu_pkt_buf_free_skb(p);
}
if (dma && queue < NFIFO) {
@@ -3081,7 +3080,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
static void brcms_c_statsupd(struct brcms_c_info *wlc)
{
int i;
- struct macstat macstats;
+ struct macstat *macstats;
#ifdef DEBUG
u16 delta;
u16 rxf0ovfl;
@@ -3092,31 +3091,31 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
if (!wlc->pub->up)
return;
+ macstats = wlc->core->macstat_snapshot;
+
#ifdef DEBUG
/* save last rx fifo 0 overflow count */
- rxf0ovfl = wlc->core->macstat_snapshot->rxf0ovfl;
+ rxf0ovfl = macstats->rxf0ovfl;
/* save last tx fifo underflow count */
for (i = 0; i < NFIFO; i++)
- txfunfl[i] = wlc->core->macstat_snapshot->txfunfl[i];
+ txfunfl[i] = macstats->txfunfl[i];
#endif /* DEBUG */
/* Read mac stats from contiguous shared memory */
- brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, &macstats,
- sizeof(struct macstat), OBJADDR_SHM_SEL);
+ brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, macstats,
+ sizeof(*macstats), OBJADDR_SHM_SEL);
#ifdef DEBUG
/* check for rx fifo 0 overflow */
- delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
+ delta = (u16)(macstats->rxf0ovfl - rxf0ovfl);
if (delta)
brcms_err(wlc->hw->d11core, "wl%d: %u rx fifo 0 overflows!\n",
wlc->pub->unit, delta);
/* check for tx fifo underflows */
for (i = 0; i < NFIFO; i++) {
- delta =
- (u16) (wlc->core->macstat_snapshot->txfunfl[i] -
- txfunfl[i]);
+ delta = macstats->txfunfl[i] - txfunfl[i];
if (delta)
brcms_err(wlc->hw->d11core,
"wl%d: %u tx fifo %d underflows!\n",
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index 0f7e1c7b6f58..906e89ddf319 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -261,6 +261,21 @@ struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
}
EXPORT_SYMBOL(brcmu_pktq_mdeq);
+/* Produce a human-readable string for boardrev */
+char *brcmu_boardrev_str(u32 brev, char *buf)
+{
+ char c;
+
+ if (brev < 0x100) {
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ } else {
+ c = (brev & 0xf000) == 0x1000 ? 'P' : 'A';
+ snprintf(buf, 8, "%c%03x", c, brev & 0xfff);
+ }
+ return buf;
+}
+EXPORT_SYMBOL(brcmu_boardrev_str);
+
#if defined(DEBUG)
/* pretty hex print a pkt buffer chain */
void brcmu_prpkt(const char *msg, struct sk_buff *p0)
@@ -292,4 +307,5 @@ void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data, size);
}
EXPORT_SYMBOL(brcmu_dbg_hex_dump);
+
#endif /* defined(DEBUG) */
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index af26e0de1e5c..6996fcc144cf 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -68,6 +68,8 @@
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
+#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
+#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 8ba445b3fd72..a043e29f07e2 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -218,4 +218,6 @@ void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
}
#endif
+char *brcmu_boardrev_str(u32 brev, char *buf);
+
#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
index b2fb6c632092..f2e276faca70 100644
--- a/drivers/net/wireless/cw1200/scan.c
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -78,7 +78,7 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
return -EINVAL;
- frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0,
+ frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
if (!frame.skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index b6ec51923b20..50033aa7c7d5 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -381,18 +381,15 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &old_cor);
if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
- "(%d)\n", res);
+ printk(KERN_DEBUG "%s failed 1 (%d)\n", __func__, res);
return;
}
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset: original COR %02x\n",
- old_cor);
+ printk(KERN_DEBUG "%s: original COR %02x\n", __func__, old_cor);
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
old_cor | COR_SOFT_RESET);
if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
- "(%d)\n", res);
+ printk(KERN_DEBUG "%s failed 2 (%d)\n", __func__, res);
return;
}
@@ -401,8 +398,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
/* Setup Genesis mode */
res = pcmcia_write_config_byte(hw_priv->link, CISREG_CCSR, hcr);
if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
- "(%d)\n", res);
+ printk(KERN_DEBUG "%s failed 3 (%d)\n", __func__, res);
return;
}
mdelay(10);
@@ -410,8 +406,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
old_cor & ~COR_SOFT_RESET);
if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
- "(%d)\n", res);
+ printk(KERN_DEBUG "%s failed 4 (%d)\n", __func__, res);
return;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index edc344334a75..67cad9b05ad8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1363,7 +1363,7 @@ static ssize_t show_cmd_log(struct device *d,
if (!priv->cmdlog)
return 0;
for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
- (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
+ (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
i = (i + 1) % priv->cmdlog_len) {
len +=
snprintf(buf + len, PAGE_SIZE - len,
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 5ce2f59d3378..b0571618c2ed 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -654,10 +654,6 @@ struct libipw_network {
/* TPC Report - mandatory if spctrm mgmt required */
struct libipw_tpc_report tpc_report;
- /* IBSS DFS - mandatory if spctrm mgmt required and IBSS
- * NOTE: This is variable length and so must be allocated dynamically */
- struct libipw_ibss_dfs *ibss_dfs;
-
/* Channel Switch Announcement - optional if spctrm mgmt required */
struct libipw_csa csa;
@@ -970,7 +966,6 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
/* make sure to set stats->len */
void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
struct libipw_rx_stats *stats);
-void libipw_network_reset(struct libipw_network *network);
/* libipw_geo.c */
const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 5f31b72a4921..60f28740f6af 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -84,25 +84,12 @@ static int libipw_networks_allocate(struct libipw_device *ieee)
return 0;
}
-void libipw_network_reset(struct libipw_network *network)
-{
- if (!network)
- return;
-
- if (network->ibss_dfs) {
- kfree(network->ibss_dfs);
- network->ibss_dfs = NULL;
- }
-}
-
static inline void libipw_networks_free(struct libipw_device *ieee)
{
int i;
- for (i = 0; i < MAX_NETWORK_COUNT; i++) {
- kfree(ieee->networks[i]->ibss_dfs);
+ for (i = 0; i < MAX_NETWORK_COUNT; i++)
kfree(ieee->networks[i]);
- }
}
void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 2d66984079bb..a6877dd6ba73 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1298,13 +1298,6 @@ static int libipw_parse_info_param(struct libipw_info_element
break;
case WLAN_EID_IBSS_DFS:
- if (network->ibss_dfs)
- break;
- network->ibss_dfs = kmemdup(info_element->data,
- info_element->len,
- GFP_ATOMIC);
- if (!network->ibss_dfs)
- return 1;
network->flags |= NETWORK_HAS_IBSS_DFS;
break;
@@ -1335,9 +1328,7 @@ static int libipw_parse_info_param(struct libipw_info_element
static int libipw_handle_assoc_resp(struct libipw_device *ieee, struct libipw_assoc_response
*frame, struct libipw_rx_stats *stats)
{
- struct libipw_network network_resp = {
- .ibss_dfs = NULL,
- };
+ struct libipw_network network_resp = { };
struct libipw_network *network = &network_resp;
struct net_device *dev = ieee->dev;
@@ -1472,9 +1463,6 @@ static void update_network(struct libipw_network *dst,
int qos_active;
u8 old_param;
- libipw_network_reset(dst);
- dst->ibss_dfs = src->ibss_dfs;
-
/* We only update the statistics if they were created by receiving
* the network information on the actual channel the network is on.
*
@@ -1548,9 +1536,7 @@ static void libipw_process_probe_response(struct libipw_device
*stats)
{
struct net_device *dev = ieee->dev;
- struct libipw_network network = {
- .ibss_dfs = NULL,
- };
+ struct libipw_network network = { };
struct libipw_network *target;
struct libipw_network *oldest = NULL;
#ifdef CONFIG_LIBIPW_DEBUG
@@ -1618,7 +1604,6 @@ static void libipw_process_probe_response(struct libipw_device
LIBIPW_DEBUG_SCAN("Expired '%*pE' (%pM) from network list.\n",
target->ssid_len, target->ssid,
target->bssid);
- libipw_network_reset(target);
} else {
/* Otherwise just pull from the free list */
target = list_entry(ieee->network_free_list.next,
@@ -1634,7 +1619,6 @@ static void libipw_process_probe_response(struct libipw_device
"BEACON" : "PROBE RESPONSE");
#endif
memcpy(target, &network, sizeof(*target));
- network.ibss_dfs = NULL;
list_add_tail(&target->list, &ieee->network_list);
} else {
LIBIPW_DEBUG_SCAN("Updating '%*pE' (%pM) via %s.\n",
@@ -1643,7 +1627,6 @@ static void libipw_process_probe_response(struct libipw_device
is_beacon(beacon->header.frame_ctl) ?
"BEACON" : "PROBE RESPONSE");
update_network(target, &network);
- network.ibss_dfs = NULL;
}
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 26fec54dcd03..2748fde4b90c 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6063,7 +6063,7 @@ il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
void
-il4965_mac_channel_switch(struct ieee80211_hw *hw,
+il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch)
{
struct il_priv *il = hw->priv;
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 337dfcf3bbde..3a57f71b8ed5 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -187,8 +187,9 @@ int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 buf_size);
int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void il4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch);
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *ch_switch);
void il4965_led_enable(struct il_priv *il);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 267e48a2915e..ab019b45551b 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -59,6 +59,7 @@ config IWLDVM
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
+ select WANT_DEV_COREDUMP
help
This is the driver that supports the MVM firmware which is
currently only available for 7260 and 3160 devices.
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 751ae1d10b7f..7a34e4d158d1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -966,21 +966,21 @@ struct iwl_rem_sta_cmd {
/* WiFi queues mask */
-#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0))
-#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1))
-#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2))
-#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3))
-#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3))
+#define IWL_SCD_BK_MSK BIT(0)
+#define IWL_SCD_BE_MSK BIT(1)
+#define IWL_SCD_VI_MSK BIT(2)
+#define IWL_SCD_VO_MSK BIT(3)
+#define IWL_SCD_MGMT_MSK BIT(3)
/* PAN queues mask */
-#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4))
-#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5))
-#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6))
-#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7))
-#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7))
-#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8))
+#define IWL_PAN_SCD_BK_MSK BIT(4)
+#define IWL_PAN_SCD_BE_MSK BIT(5)
+#define IWL_PAN_SCD_VI_MSK BIT(6)
+#define IWL_PAN_SCD_VO_MSK BIT(7)
+#define IWL_PAN_SCD_MGMT_MSK BIT(7)
+#define IWL_PAN_SCD_MULTICAST_MSK BIT(8)
-#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
+#define IWL_AGG_TX_QUEUE_MSK 0xffc00
#define IWL_DROP_ALL BIT(1)
@@ -1005,12 +1005,17 @@ struct iwl_rem_sta_cmd {
* 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
* 2: Dump all FIFO
*/
-struct iwl_txfifo_flush_cmd {
+struct iwl_txfifo_flush_cmd_v3 {
__le32 queue_control;
__le16 flush_control;
__le16 reserved;
} __packed;
+struct iwl_txfifo_flush_cmd_v2 {
+ __le16 queue_control;
+ __le16 flush_control;
+} __packed;
+
/*
* REPLY_WEP_KEY = 0x20
*/
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 2191621d69c1..1d2223df5cb0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -137,37 +137,38 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
*/
int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
{
- struct iwl_txfifo_flush_cmd flush_cmd;
- struct iwl_host_cmd cmd = {
- .id = REPLY_TXFIFO_FLUSH,
- .len = { sizeof(struct iwl_txfifo_flush_cmd), },
- .data = { &flush_cmd, },
+ struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = {
+ .flush_control = cpu_to_le16(IWL_DROP_ALL),
+ };
+ struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = {
+ .flush_control = cpu_to_le16(IWL_DROP_ALL),
};
- memset(&flush_cmd, 0, sizeof(flush_cmd));
+ u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
+ IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK;
- flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
- IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
- IWL_SCD_MGMT_MSK;
if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
- flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
- IWL_PAN_SCD_VI_MSK |
- IWL_PAN_SCD_BE_MSK |
- IWL_PAN_SCD_BK_MSK |
- IWL_PAN_SCD_MGMT_MSK |
- IWL_PAN_SCD_MULTICAST_MSK;
+ queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK |
+ IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK |
+ IWL_PAN_SCD_MGMT_MSK |
+ IWL_PAN_SCD_MULTICAST_MSK;
if (priv->nvm_data->sku_cap_11n_enable)
- flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
+ queue_control |= IWL_AGG_TX_QUEUE_MSK;
if (scd_q_msk)
- flush_cmd.queue_control = cpu_to_le32(scd_q_msk);
-
- IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
- flush_cmd.queue_control);
- flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
-
- return iwl_dvm_send_cmd(priv, &cmd);
+ queue_control = scd_q_msk;
+
+ IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control);
+ flush_cmd_v3.queue_control = cpu_to_le32(queue_control);
+ flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control);
+
+ if (IWL_UCODE_API(priv->fw->ucode_ver) > 2)
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
+ sizeof(flush_cmd_v3),
+ &flush_cmd_v3);
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
+ sizeof(flush_cmd_v2), &flush_cmd_v2);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
@@ -418,8 +419,8 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
{
- return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >>
- BT_UART_MSG_FRAME3SCOESCO_POS;
+ return (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
+ BT_UART_MSG_FRAME3SCOESCO_POS;
}
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cae692ff1013..47e64e8b9517 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -941,6 +941,7 @@ static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
}
static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index b04b8858c690..e5be2d21868f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -73,12 +73,12 @@
#define IWL3160_UCODE_API_MAX 10
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 9
-#define IWL3160_UCODE_API_OK 9
+#define IWL7260_UCODE_API_OK 10
+#define IWL3160_UCODE_API_OK 10
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 8
-#define IWL3160_UCODE_API_MIN 8
+#define IWL7260_UCODE_API_MIN 9
+#define IWL3160_UCODE_API_MIN 9
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -89,6 +89,8 @@
#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265_NVM_VERSION 0x0a1d
#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
+#define IWL7265D_NVM_VERSION 0x0c11
+#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7260_FW_PRE "iwlwifi-7260-"
#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -102,6 +104,9 @@
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_FW_PRE "iwlwifi-7265D-"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
static const struct iwl_base_params iwl7000_base_params = {
@@ -132,8 +137,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.base_params = &iwl7000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \
- .non_shared_ant = ANT_A
-
+ .non_shared_ant = ANT_A, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl7260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7260",
@@ -267,7 +272,38 @@ const struct iwl_cfg iwl7265_n_cfg = {
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
+const struct iwl_cfg iwl7265d_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 7265",
+ .fw_name_pre = IWL7265D_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7265_ht_params,
+ .nvm_ver = IWL7265D_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
+const struct iwl_cfg iwl7265d_2n_cfg = {
+ .name = "Intel(R) Dual Band Wireless N 7265",
+ .fw_name_pre = IWL7265D_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7265_ht_params,
+ .nvm_ver = IWL7265D_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
+const struct iwl_cfg iwl7265d_n_cfg = {
+ .name = "Intel(R) Wireless N 7265",
+ .fw_name_pre = IWL7265D_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7265_ht_params,
+ .nvm_ver = IWL7265D_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index d2b7234b1c73..bf0a95cb7153 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -72,10 +72,10 @@
#define IWL8000_UCODE_API_MAX 10
/* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK 8
+#define IWL8000_UCODE_API_OK 10
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 8
+#define IWL8000_UCODE_API_MIN 9
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
@@ -91,6 +91,10 @@
/* Max SDIO RX aggregation size of the ADDBA request/response */
#define MAX_RX_AGG_SIZE_8260_SDIO 28
+/* Max A-MPDU exponent for HT and VHT */
+#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
+#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K
+
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -104,6 +108,7 @@ static const struct iwl_base_params iwl8000_base_params = {
};
static const struct iwl_ht_params iwl8000_ht_params = {
+ .stbc = true,
.ldpc = true,
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
@@ -118,6 +123,7 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
+ .d0i3 = true, \
.non_shared_ant = ANT_A
const struct iwl_cfg iwl8260_2n_cfg = {
@@ -136,6 +142,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
@@ -148,6 +155,23 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
+ .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
+ .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
+};
+
+const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
+ .name = "Intel(R) Dual Band Wireless-AC 4165",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
+ .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+ .bt_shared_single_ant = true,
+ .disable_dummy_notification = true,
+ .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
+ .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 2ef83a39ff10..3a4b9c7fc083 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -87,6 +87,16 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
};
+static inline bool iwl_has_secure_boot(u32 hw_rev,
+ enum iwl_device_family family)
+{
+ /* return 1 only for family 8000 B0 */
+ if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
+ return 1;
+
+ return 0;
+}
+
/*
* LED mode
* IWL_LED_DEFAULT: use device default
@@ -246,6 +256,11 @@ struct iwl_pwr_tx_backoff {
* @nvm_hw_section_num: the ID of the HW NVM section
* @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
+ * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
+ * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
+ * station can receive in HT
+ * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
+ * station can receive in VHT
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -285,6 +300,9 @@ struct iwl_cfg {
const char *default_nvm_file;
unsigned int max_rx_agg_size;
bool disable_dummy_notification;
+ unsigned int max_tx_agg_size;
+ unsigned int max_ht_ampdu_exponent;
+ unsigned int max_vht_ampdu_exponent;
};
/*
@@ -346,9 +364,14 @@ extern const struct iwl_cfg iwl3165_2ac_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg;
+extern const struct iwl_cfg iwl7265d_2ac_cfg;
+extern const struct iwl_cfg iwl7265d_2n_cfg;
+extern const struct iwl_cfg iwl7265d_n_cfg;
extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl4265_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 3f6f015285e5..aff63c3f5bf8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -129,6 +129,8 @@
#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
+#define CSR_MBOX_SET_REG (CSR_BASE + 0x88)
+
#define CSR_LED_REG (CSR_BASE+0x094)
#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */
@@ -184,6 +186,8 @@
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
+#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5)
+
#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
@@ -305,23 +309,24 @@ enum {
};
-#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
-#define CSR_HW_REV_TYPE_5300 (0x0000020)
-#define CSR_HW_REV_TYPE_5350 (0x0000030)
-#define CSR_HW_REV_TYPE_5100 (0x0000050)
-#define CSR_HW_REV_TYPE_5150 (0x0000040)
-#define CSR_HW_REV_TYPE_1000 (0x0000060)
-#define CSR_HW_REV_TYPE_6x00 (0x0000070)
-#define CSR_HW_REV_TYPE_6x50 (0x0000080)
-#define CSR_HW_REV_TYPE_6150 (0x0000084)
-#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
-#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
-#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
-#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
-#define CSR_HW_REV_TYPE_2x00 (0x0000100)
-#define CSR_HW_REV_TYPE_105 (0x0000110)
-#define CSR_HW_REV_TYPE_135 (0x0000120)
-#define CSR_HW_REV_TYPE_NONE (0x00001F0)
+#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
+#define CSR_HW_REV_TYPE_5300 (0x0000020)
+#define CSR_HW_REV_TYPE_5350 (0x0000030)
+#define CSR_HW_REV_TYPE_5100 (0x0000050)
+#define CSR_HW_REV_TYPE_5150 (0x0000040)
+#define CSR_HW_REV_TYPE_1000 (0x0000060)
+#define CSR_HW_REV_TYPE_6x00 (0x0000070)
+#define CSR_HW_REV_TYPE_6x50 (0x0000080)
+#define CSR_HW_REV_TYPE_6150 (0x0000084)
+#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
+#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
+#define CSR_HW_REV_TYPE_2x00 (0x0000100)
+#define CSR_HW_REV_TYPE_105 (0x0000110)
+#define CSR_HW_REV_TYPE_135 (0x0000120)
+#define CSR_HW_REV_TYPE_7265D (0x0000210)
+#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 0a70bcd241f5..684254553558 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -143,7 +143,7 @@ do { \
#define IWL_DL_INFO 0x00000001
#define IWL_DL_MAC80211 0x00000002
#define IWL_DL_HCMD 0x00000004
-#define IWL_DL_STATE 0x00000008
+#define IWL_DL_TDLS 0x00000008
/* 0x000000F0 - 0x00000010 */
#define IWL_DL_QUOTA 0x00000010
#define IWL_DL_TE 0x00000020
@@ -180,6 +180,7 @@ do { \
#define IWL_DL_TX_QUEUES 0x80000000
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 0f1084f09caa..38de1513e4de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -78,9 +78,6 @@
#include "iwl-config.h"
#include "iwl-modparams.h"
-/* private includes */
-#include "iwl-fw-file.h"
-
/******************************************************************************
*
* module boiler plate
@@ -187,6 +184,11 @@ static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
static void iwl_dealloc_ucode(struct iwl_drv *drv)
{
int i;
+
+ kfree(drv->fw.dbg_dest_tlv);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
+ kfree(drv->fw.dbg_conf_tlv[i]);
+
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
}
@@ -248,6 +250,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
/*
* Starting 8000B - FW name format has changed. This overwrites the
* previous name and uses the new format.
+ *
+ * TODO:
+ * Once there is only one supported step for 8000 family - delete this!
*/
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
char rev_step[2] = {
@@ -258,6 +263,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP)
rev_step[0] = 0;
+ /*
+ * If hw_rev wasn't set yet - default as B-step. If it IS A-step
+ * we'll reload that FW later instead.
+ */
+ if (drv->trans->hw_rev == 0)
+ rev_step[0] = 'B';
+
snprintf(drv->firmware_name, sizeof(drv->firmware_name),
"%s%s-%s.ucode", name_pre, rev_step, tag);
}
@@ -301,6 +313,11 @@ struct iwl_firmware_pieces {
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+ /* FW debug data parsed for driver usage */
+ struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+ size_t dbg_conf_tlv_len[FW_DBG_MAX];
};
/*
@@ -574,6 +591,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
char buildstr[25];
u32 build;
int num_of_cpus;
+ bool usniffer_images = false;
+ bool usniffer_req = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -807,19 +826,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
tlv_len);
drv->fw.mvm_fw = true;
- drv->fw.img[IWL_UCODE_REGULAR].is_secure = true;
break;
case IWL_UCODE_TLV_SECURE_SEC_INIT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
tlv_len);
drv->fw.mvm_fw = true;
- drv->fw.img[IWL_UCODE_INIT].is_secure = true;
break;
case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
tlv_len);
drv->fw.mvm_fw = true;
- drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true;
break;
case IWL_UCODE_TLV_NUM_OF_CPU:
if (tlv_len != sizeof(u32))
@@ -849,12 +865,79 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->n_scan_channels =
le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_FW_DBG_DEST: {
+ struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
+
+ if (pieces->dbg_dest_tlv) {
+ IWL_ERR(drv,
+ "dbg destination ignored, already exists\n");
+ break;
+ }
+
+ pieces->dbg_dest_tlv = dest;
+ IWL_INFO(drv, "Found debug destination: %s\n",
+ get_fw_dbg_mode_string(dest->monitor_mode));
+
+ drv->fw.dbg_dest_reg_num =
+ tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv,
+ reg_ops);
+ drv->fw.dbg_dest_reg_num /=
+ sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
+
+ break;
+ }
+ case IWL_UCODE_TLV_FW_DBG_CONF: {
+ struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
+
+ if (!pieces->dbg_dest_tlv) {
+ IWL_ERR(drv,
+ "Ignore dbg config %d - no destination configured\n",
+ conf->id);
+ break;
+ }
+
+ if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
+ IWL_ERR(drv,
+ "Skip unknown configuration: %d\n",
+ conf->id);
+ break;
+ }
+
+ if (pieces->dbg_conf_tlv[conf->id]) {
+ IWL_ERR(drv,
+ "Ignore duplicate dbg config %d\n",
+ conf->id);
+ break;
+ }
+
+ if (conf->usniffer)
+ usniffer_req = true;
+
+ IWL_INFO(drv, "Found debug configuration: %d\n",
+ conf->id);
+
+ pieces->dbg_conf_tlv[conf->id] = conf;
+ pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
+ break;
+ }
+ case IWL_UCODE_TLV_SEC_RT_USNIFFER:
+ usniffer_images = true;
+ iwl_store_ucode_sec(pieces, tlv_data,
+ IWL_UCODE_REGULAR_USNIFFER,
+ tlv_len);
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
}
}
+ if (usniffer_req && !usniffer_images) {
+ IWL_ERR(drv,
+ "user selected to work with usniffer but usniffer image isn't available in ucode package\n");
+ return -EINVAL;
+ }
+
if (len) {
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
@@ -992,13 +1075,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
struct iwl_ucode_header *ucode;
struct iwlwifi_opmode_table *op;
int err;
- struct iwl_firmware_pieces pieces;
+ struct iwl_firmware_pieces *pieces;
const unsigned int api_max = drv->cfg->ucode_api_max;
unsigned int api_ok = drv->cfg->ucode_api_ok;
const unsigned int api_min = drv->cfg->ucode_api_min;
u32 api_ver;
int i;
bool load_module = false;
+ u32 hw_rev = drv->trans->hw_rev;
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
@@ -1008,7 +1092,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (!api_ok)
api_ok = api_max;
- memset(&pieces, 0, sizeof(pieces));
+ pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
+ if (!pieces)
+ return;
if (!ucode_raw) {
if (drv->fw_index <= api_ok)
@@ -1031,10 +1117,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
ucode = (struct iwl_ucode_header *)ucode_raw->data;
if (ucode->ver)
- err = iwl_parse_v1_v2_firmware(drv, ucode_raw, &pieces);
+ err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
else
- err = iwl_parse_tlv_firmware(drv, ucode_raw, &pieces,
- &fw->ucode_capa);
+ err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces,
+ &fw->ucode_capa);
if (err)
goto try_again;
@@ -1074,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* In mvm uCode there is no difference between data and instructions
* sections.
*/
- if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg))
+ if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg))
goto try_again;
/* Allocate ucode buffers for card's bus-master loading ... */
@@ -1083,9 +1169,33 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
- if (iwl_alloc_ucode(drv, &pieces, i))
+ if (iwl_alloc_ucode(drv, pieces, i))
goto out_free_fw;
+ if (pieces->dbg_dest_tlv) {
+ drv->fw.dbg_dest_tlv =
+ kmemdup(pieces->dbg_dest_tlv,
+ sizeof(*pieces->dbg_dest_tlv) +
+ sizeof(pieces->dbg_dest_tlv->reg_ops[0]) *
+ drv->fw.dbg_dest_reg_num, GFP_KERNEL);
+
+ if (!drv->fw.dbg_dest_tlv)
+ goto out_free_fw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
+ if (pieces->dbg_conf_tlv[i]) {
+ drv->fw.dbg_conf_tlv_len[i] =
+ pieces->dbg_conf_tlv_len[i];
+ drv->fw.dbg_conf_tlv[i] =
+ kmemdup(pieces->dbg_conf_tlv[i],
+ drv->fw.dbg_conf_tlv_len[i],
+ GFP_KERNEL);
+ if (!drv->fw.dbg_conf_tlv[i])
+ goto out_free_fw;
+ }
+ }
+
/* Now that we can no longer fail, copy information */
/*
@@ -1093,20 +1203,20 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* for each event, which is of mode 1 (including timestamp) for all
* new microcodes that include this information.
*/
- fw->init_evtlog_ptr = pieces.init_evtlog_ptr;
- if (pieces.init_evtlog_size)
- fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
+ fw->init_evtlog_ptr = pieces->init_evtlog_ptr;
+ if (pieces->init_evtlog_size)
+ fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
else
fw->init_evtlog_size =
drv->cfg->base_params->max_event_log_size;
- fw->init_errlog_ptr = pieces.init_errlog_ptr;
- fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
- if (pieces.inst_evtlog_size)
- fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
+ fw->init_errlog_ptr = pieces->init_errlog_ptr;
+ fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
+ if (pieces->inst_evtlog_size)
+ fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
else
fw->inst_evtlog_size =
drv->cfg->base_params->max_event_log_size;
- fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
+ fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
/*
* figure out the offset of chain noise reset and gain commands
@@ -1165,10 +1275,55 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
op->name, err);
#endif
}
+
+ /*
+ * We may have loaded the wrong FW file in 8000 HW family if it is an
+ * A-step card, and if drv->trans->hw_rev wasn't properly read when
+ * the FW file had been loaded. (This might happen in SDIO.) In such a
+ * case - unload and reload the correct file.
+ *
+ * TODO:
+ * Once there is only one supported step for 8000 family - delete this!
+ */
+ if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP &&
+ drv->trans->hw_rev != hw_rev) {
+ char firmware_name[32];
+
+ /* Free previous FW resources */
+ if (drv->op_mode)
+ _iwl_op_mode_stop(drv);
+ iwl_dealloc_ucode(drv);
+
+ /* Build name of correct-step FW */
+ snprintf(firmware_name, sizeof(firmware_name),
+ strrchr(drv->firmware_name, '-'));
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name),
+ "%s%s", drv->cfg->fw_name_pre, firmware_name);
+
+ /* Clear data before loading correct FW */
+ list_del(&drv->list);
+
+ /* Request correct FW file this time */
+ IWL_DEBUG_INFO(drv, "attempting to load A-step FW %s\n",
+ drv->firmware_name);
+ err = request_firmware(&ucode_raw, drv->firmware_name,
+ drv->trans->dev);
+ if (err) {
+ IWL_ERR(drv, "Failed swapping FW!\n");
+ goto out_unbind;
+ }
+
+ /* Redo callback function - this time with right FW */
+ iwl_req_fw_callback(ucode_raw, context);
+ }
+
+ kfree(pieces);
return;
try_again:
/* try next, if any */
+ kfree(pieces);
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
@@ -1179,6 +1334,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
out_unbind:
+ kfree(pieces);
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 74b796dc4242..41ff85de7334 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -764,7 +764,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
if (iwlwifi_mod_params.amsdu_size_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
ht_info->mcs.rx_mask[0] = 0xFF;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index e30a41d04c8b..20a8a64c9fe3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -81,6 +81,7 @@
* @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
*/
enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_SRAM = 0,
@@ -90,6 +91,8 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4,
IWL_FW_ERROR_DUMP_FW_MONITOR = 5,
IWL_FW_ERROR_DUMP_PRPH = 6,
+ IWL_FW_ERROR_DUMP_TXF = 7,
+ IWL_FW_ERROR_DUMP_FH_REGS = 8,
IWL_FW_ERROR_DUMP_MAX,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 401f7be36b93..f2a047f6bb3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -131,6 +131,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_API_CHANGES_SET = 29,
IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
+ IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
+ IWL_UCODE_TLV_FW_DBG_DEST = 38,
+ IWL_UCODE_TLV_FW_DBG_CONF = 39,
};
struct iwl_ucode_tlv {
@@ -179,4 +182,309 @@ struct iwl_ucode_capa {
__le32 api_capa;
} __packed;
+/**
+ * enum iwl_ucode_tlv_flag - ucode API flags
+ * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ * was a separate TLV but moved here to save space.
+ * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * treats good CRC threshold as a boolean
+ * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
+ * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
+ * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in different bindings.
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in same bindings.
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
+ */
+enum iwl_ucode_tlv_flag {
+ IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
+ IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
+ IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+ IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
+ IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
+ IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
+ IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
+ IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
+ IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
+ IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
+ IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
+ IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
+};
+
+/**
+ * enum iwl_ucode_tlv_api - ucode api
+ * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
+ * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
+ * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
+ * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
+ * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
+ * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
+ * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
+ * longer than the passive one, which is essential for fragmented scan.
+ */
+enum iwl_ucode_tlv_api {
+ IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
+ IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
+ IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
+ IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
+ IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
+ IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
+};
+
+/**
+ * enum iwl_ucode_tlv_capa - ucode capabilities
+ * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
+ * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
+ * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
+ * tx power value into TPC Report action frame and Link Measurement Report
+ * action frame
+ * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
+ * channel in DS parameter set element in probe requests.
+ * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
+ * probe requests.
+ * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ * which also implies support for the scheduler configuration command
+ * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ */
+enum iwl_ucode_tlv_capa {
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
+ IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
+};
+
+/* The default calibrate table size if not specified by firmware file */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
+
+/* The default max probe length if not specified by the firmware file */
+#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
+
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define IWL_UCODE_SECTION_MAX 12
+#define IWL_API_ARRAY_SIZE 1
+#define IWL_CAPABILITIES_ARRAY_SIZE 1
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwl_tlv_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
+enum iwl_fw_phy_cfg {
+ FW_PHY_CFG_RADIO_TYPE_POS = 0,
+ FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
+ FW_PHY_CFG_RADIO_STEP_POS = 2,
+ FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
+ FW_PHY_CFG_RADIO_DASH_POS = 4,
+ FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
+ FW_PHY_CFG_TX_CHAIN_POS = 16,
+ FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
+ FW_PHY_CFG_RX_CHAIN_POS = 20,
+ FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
+};
+
+#define IWL_UCODE_MAX_CS 1
+
+/**
+ * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwl_fw_cipher_scheme {
+ __le32 cipher;
+ u8 flags;
+ u8 hdr_len;
+ u8 pn_len;
+ u8 pn_off;
+ u8 key_idx_off;
+ u8 key_idx_mask;
+ u8 key_idx_shift;
+ u8 mic_len;
+ u8 hw_cipher;
+} __packed;
+
+enum iwl_fw_dbg_reg_operator {
+ CSR_ASSIGN,
+ CSR_SETBIT,
+ CSR_CLEARBIT,
+
+ PRPH_ASSIGN,
+ PRPH_SETBIT,
+ PRPH_CLEARBIT,
+};
+
+/**
+ * struct iwl_fw_dbg_reg_op - an operation on a register
+ *
+ * @op: %enum iwl_fw_dbg_reg_operator
+ * @addr: offset of the register
+ * @val: value
+ */
+struct iwl_fw_dbg_reg_op {
+ u8 op;
+ u8 reserved[3];
+ __le32 addr;
+ __le32 val;
+} __packed;
+
+/**
+ * enum iwl_fw_dbg_monitor_mode - available monitor recording modes
+ *
+ * @SMEM_MODE: monitor stores the data in SMEM
+ * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
+ * @MARBH_MODE: monitor stores the data in MARBH buffer
+ */
+enum iwl_fw_dbg_monitor_mode {
+ SMEM_MODE = 0,
+ EXTERNAL_MODE = 1,
+ MARBH_MODE = 2,
+};
+
+/**
+ * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
+ *
+ * @version: version of the TLV - currently 0
+ * @monitor_mode: %enum iwl_fw_dbg_monitor_mode
+ * @base_reg: addr of the base addr register (PRPH)
+ * @end_reg: addr of the end addr register (PRPH)
+ * @write_ptr_reg: the addr of the reg of the write pointer
+ * @wrap_count: the addr of the reg of the wrap_count
+ * @base_shift: shift right of the base addr reg
+ * @end_shift: shift right of the end addr reg
+ * @reg_ops: array of registers operations
+ *
+ * This parses IWL_UCODE_TLV_FW_DBG_DEST
+ */
+struct iwl_fw_dbg_dest_tlv {
+ u8 version;
+ u8 monitor_mode;
+ u8 reserved[2];
+ __le32 base_reg;
+ __le32 end_reg;
+ __le32 write_ptr_reg;
+ __le32 wrap_count;
+ u8 base_shift;
+ u8 end_shift;
+ struct iwl_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+struct iwl_fw_dbg_conf_hcmd {
+ u8 id;
+ u8 reserved;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration
+ *
+ * @enabled: is this trigger enabled
+ * @reserved:
+ * @len: length, in bytes, of the %trigger field
+ * @trigger: pointer to a trigger struct
+ */
+struct iwl_fw_dbg_trigger {
+ u8 enabled;
+ u8 reserved;
+ u8 len;
+ u8 trigger[0];
+} __packed;
+
+/**
+ * enum iwl_fw_dbg_conf - configurations available
+ *
+ * @FW_DBG_CUSTOM: take this configuration from alive
+ * Note that the trigger is NO-OP for this configuration
+ */
+enum iwl_fw_dbg_conf {
+ FW_DBG_CUSTOM = 0,
+
+ /* must be last */
+ FW_DBG_MAX,
+ FW_DBG_INVALID = 0xff,
+};
+
+/**
+ * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration
+ *
+ * @id: %enum iwl_fw_dbg_conf
+ * @usniffer: should the uSniffer image be used
+ * @num_of_hcmds: how many HCMDs to send are present here
+ * @hcmd: a variable length host command to be sent to apply the configuration.
+ * If there is more than one HCMD to send, they will appear one after the
+ * other and be sent in the order that they appear in.
+ * This parses IWL_UCODE_TLV_FW_DBG_CONF
+ */
+struct iwl_fw_dbg_conf_tlv {
+ u8 id;
+ u8 usniffer;
+ u8 reserved;
+ u8 num_of_hcmds;
+ struct iwl_fw_dbg_conf_hcmd hcmd;
+
+ /* struct iwl_fw_dbg_trigger sits after all variable length hcmds */
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index b894a84e8393..e6dc3b870949 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -70,112 +70,6 @@
#include "iwl-fw-file.h"
/**
- * enum iwl_ucode_tlv_flag - ucode API flags
- * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
- * was a separate TLV but moved here to save space.
- * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
- * treats good CRC threshold as a boolean
- * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
- * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
- * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
- * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
- * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
- * offload profile config command.
- * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
- * (rather than two) IPv6 addresses
- * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
- * from the probe request template.
- * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
- * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in different bindings.
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in same bindings.
- * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
- * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
- * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
- * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
- */
-enum iwl_ucode_tlv_flag {
- IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
- IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
- IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
- IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
- IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
- IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
- IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
- IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
- IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
- IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
- IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
- IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
- IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
- IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
-};
-
-/**
- * enum iwl_ucode_tlv_api - ucode api
- * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
- * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
- * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
- * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
- * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
- * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
- * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
- * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
- * longer than the passive one, which is essential for fragmented scan.
- */
-enum iwl_ucode_tlv_api {
- IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
- IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
- IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
- IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
- IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
- IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
- IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
-};
-
-/**
- * enum iwl_ucode_tlv_capa - ucode capabilities
- * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
- * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
- * tx power value into TPC Report action frame and Link Measurement Report
- * action frame
- * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports adding DS params
- * element in probe requests.
- * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
- * probe requests.
- * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
- * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
- * which also implies support for the scheduler configuration command
- * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
- */
-enum iwl_ucode_tlv_capa {
- IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
- IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
- IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
- IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
- IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
- IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
- IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
-};
-
-/* The default calibrate table size if not specified by firmware file */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
-#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
-
-/* The default max probe length if not specified by the firmware file */
-#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
-
-/**
* enum iwl_ucode_type
*
* The type of ucode.
@@ -183,11 +77,13 @@ enum iwl_ucode_tlv_capa {
* @IWL_UCODE_REGULAR: Normal runtime ucode
* @IWL_UCODE_INIT: Initial ucode
* @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
+ * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image
*/
enum iwl_ucode_type {
IWL_UCODE_REGULAR,
IWL_UCODE_INIT,
IWL_UCODE_WOWLAN,
+ IWL_UCODE_REGULAR_USNIFFER,
IWL_UCODE_TYPE_MAX,
};
@@ -202,14 +98,6 @@ enum iwl_ucode_sec {
IWL_UCODE_SECTION_DATA,
IWL_UCODE_SECTION_INST,
};
-/*
- * For 16.0 uCode and above, there is no differentiation between sections,
- * just an offset to the HW address.
- */
-#define IWL_UCODE_SECTION_MAX 12
-#define IWL_API_ARRAY_SIZE 1
-#define IWL_CAPABILITIES_ARRAY_SIZE 1
-#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
struct iwl_ucode_capabilities {
u32 max_probe_length;
@@ -229,7 +117,6 @@ struct fw_desc {
struct fw_img {
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
- bool is_secure;
bool is_dual_cpus;
};
@@ -238,66 +125,6 @@ struct iwl_sf_region {
u32 size;
};
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
-
-/*
- * Calibration control struct.
- * Sent as part of the phy configuration command.
- * @flow_trigger: bitmap for which calibrations to perform according to
- * flow triggers.
- * @event_trigger: bitmap for which calibrations to perform according to
- * event triggers.
- */
-struct iwl_tlv_calib_ctrl {
- __le32 flow_trigger;
- __le32 event_trigger;
-} __packed;
-
-enum iwl_fw_phy_cfg {
- FW_PHY_CFG_RADIO_TYPE_POS = 0,
- FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
- FW_PHY_CFG_RADIO_STEP_POS = 2,
- FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
- FW_PHY_CFG_RADIO_DASH_POS = 4,
- FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
- FW_PHY_CFG_TX_CHAIN_POS = 16,
- FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
- FW_PHY_CFG_RX_CHAIN_POS = 20,
- FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
-};
-
-#define IWL_UCODE_MAX_CS 1
-
-/**
- * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
- * @cipher: a cipher suite selector
- * @flags: cipher scheme flags (currently reserved for a future use)
- * @hdr_len: a size of MPDU security header
- * @pn_len: a size of PN
- * @pn_off: an offset of pn from the beginning of the security header
- * @key_idx_off: an offset of key index byte in the security header
- * @key_idx_mask: a bit mask of key_idx bits
- * @key_idx_shift: bit shift needed to get key_idx
- * @mic_len: mic length in bytes
- * @hw_cipher: a HW cipher index used in host commands
- */
-struct iwl_fw_cipher_scheme {
- __le32 cipher;
- u8 flags;
- u8 hdr_len;
- u8 pn_len;
- u8 pn_off;
- u8 key_idx_off;
- u8 key_idx_mask;
- u8 key_idx_shift;
- u8 mic_len;
- u8 hw_cipher;
-} __packed;
-
/**
* struct iwl_fw_cscheme_list - a cipher scheme list
* @size: a number of entries
@@ -324,6 +151,11 @@ struct iwl_fw_cscheme_list {
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @mvm_fw: indicates this is MVM firmware
* @cipher_scheme: optional external cipher scheme.
+ * @human_readable: human readable version
+ * @dbg_dest_tlv: points to the destination TLV for debug
+ * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
+ * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_fw {
u32 ucode_ver;
@@ -348,6 +180,68 @@ struct iwl_fw {
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
+
+ struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+ size_t dbg_conf_tlv_len[FW_DBG_MAX];
+
+ u8 dbg_dest_reg_num;
};
+static inline const char *get_fw_dbg_mode_string(int mode)
+{
+ switch (mode) {
+ case SMEM_MODE:
+ return "SMEM";
+ case EXTERNAL_MODE:
+ return "EXTERNAL_DRAM";
+ case MARBH_MODE:
+ return "MARBH";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const struct iwl_fw_dbg_trigger *
+iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id)
+{
+ const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+ u8 *ptr;
+ int i;
+
+ if (!conf_tlv)
+ return NULL;
+
+ ptr = (void *)&conf_tlv->hcmd;
+ for (i = 0; i < conf_tlv->num_of_hcmds; i++) {
+ ptr += sizeof(conf_tlv->hcmd);
+ ptr += le16_to_cpu(conf_tlv->hcmd.len);
+ }
+
+ return (const struct iwl_fw_dbg_trigger *)ptr;
+}
+
+static inline bool
+iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id)
+{
+ const struct iwl_fw_dbg_trigger *trigger =
+ iwl_fw_dbg_conf_get_trigger(fw, id);
+
+ if (!trigger)
+ return false;
+
+ return trigger->enabled;
+}
+
+static inline bool
+iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
+{
+ const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+
+ if (!conf_tlv)
+ return false;
+
+ return conf_tlv->usniffer;
+}
+
#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index c302e7468559..06e02fcd6f7b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -325,6 +325,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
{
int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains);
+ unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
+ IEEE80211_VHT_MAX_AMPDU_1024K);
vht_cap->vht_supported = true;
@@ -332,7 +334,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
- 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ max_ampdu_exponent <<
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (cfg->ht_params->ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b6d666ee8359..17de6d46222a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -138,7 +138,8 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
- * @enter_d0i3: configure the fw to enter d0i3. May sleep.
+ * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3
+ * entrance is aborted (e.g. due to held reference). May sleep.
* @exit_d0i3: configure the fw to exit d0i3. May sleep.
*/
struct iwl_op_mode_ops {
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 1560f4576c7d..2df51eab1348 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -322,6 +322,7 @@ enum secure_boot_config_reg {
LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
};
+#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0 (0xA01E30)
#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
enum secure_boot_status_reg {
@@ -333,6 +334,7 @@ enum secure_boot_status_reg {
LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
};
+#define FH_UCODE_LOAD_STATUS (0x1AF0)
#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
enum secure_load_status_reg {
LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
@@ -352,7 +354,7 @@ enum secure_load_status_reg {
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
-#define LMPM_SECURE_TIME_OUT (100)
+#define LMPM_SECURE_TIME_OUT (100) /* 10 micro */
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)
@@ -368,4 +370,10 @@ enum secure_load_status_reg {
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
+/* FW chicken bits */
+#define LMPM_CHICK 0xA01FF8
+enum {
+ LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
+};
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index d8fc548c0d6c..028408a6ecba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -534,10 +534,10 @@ struct iwl_trans_ops {
u32 value);
void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans);
+ void (*suspend)(struct iwl_trans *trans);
+ void (*resume)(struct iwl_trans *trans);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
-#endif
};
/**
@@ -574,6 +574,9 @@ enum iwl_trans_state {
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
* @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
+ * @dbg_dest_tlv: points to the destination TLV for debug
+ * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -605,6 +608,10 @@ struct iwl_trans {
u64 dflt_pwr_limit;
+ const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+ u8 dbg_dest_reg_num;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -704,7 +711,18 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
trans->ops->unref(trans);
}
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline void iwl_trans_suspend(struct iwl_trans *trans)
+{
+ if (trans->ops->suspend)
+ trans->ops->suspend(trans);
+}
+
+static inline void iwl_trans_resume(struct iwl_trans *trans)
+{
+ if (trans->ops->resume)
+ trans->ops->resume(trans);
+}
+
static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans)
{
@@ -712,7 +730,6 @@ iwl_trans_dump_data(struct iwl_trans *trans)
return NULL;
return trans->ops->dump_data(trans);
}
-#endif
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index da2ffb785194..a3bfda45d9e6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -72,8 +72,6 @@
#include "mvm.h"
#include "iwl-debug.h"
-#define BT_ANTENNA_COUPLING_THRESHOLD (30)
-
const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
[BT_KILL_MSK_DEFAULT] = 0xfffffc00,
[BT_KILL_MSK_NEVER] = 0xffffffff,
@@ -302,11 +300,6 @@ static const __le64 iwl_ci_mask[][3] = {
},
};
-static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
- cpu_to_le32(0x2e402280),
- cpu_to_le32(0x7711a751),
-};
-
struct corunning_block_luts {
u8 range;
__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
@@ -605,7 +598,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->max_kill = cpu_to_le32(5);
bt_cmd->bt4_antenna_isolation_thr =
- cpu_to_le32(BT_ANTENNA_COUPLING_THRESHOLD);
+ cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS);
bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15);
bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15);
bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
@@ -638,8 +631,8 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost,
sizeof(iwl_bt_prio_boost));
- memcpy(&bt_cmd->multiprio_lut, iwl_bt_mprio_lut,
- sizeof(iwl_bt_mprio_lut));
+ bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
+ bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
send_cmd:
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
@@ -1144,6 +1137,22 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return lut_type != BT_COEX_LOOSE_LUT;
}
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
+{
+ /* there is no other antenna, shared antenna is always available */
+ if (mvm->cfg->bt_shared_single_ant)
+ return true;
+
+ if (ant & mvm->cfg->non_shared_ant)
+ return true;
+
+ if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
+
+ return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC;
+}
+
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
{
/* there is no other antenna, shared antenna is always available */
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 8a1d2f33d5b7..b3210cfbecc8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -102,8 +102,6 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#undef EVENT_PRIO_ANT
-#define BT_ANTENNA_COUPLING_THRESHOLD (30)
-
static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
@@ -290,11 +288,6 @@ static const __le64 iwl_ci_mask[][3] = {
},
};
-static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
- cpu_to_le32(0x2e402280),
- cpu_to_le32(0x7711a751),
-};
-
struct corunning_block_luts {
u8 range;
__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
@@ -593,7 +586,8 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
}
bt_cmd->max_kill = 5;
- bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+ bt_cmd->bt4_antenna_isolation_thr =
+ IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
bt_cmd->bt4_tx_rx_max_freq0 = 15;
@@ -618,7 +612,9 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
BT_VALID_TXRX_MAX_FREQ_0 |
- BT_VALID_SYNC_TO_SCO);
+ BT_VALID_SYNC_TO_SCO |
+ BT_VALID_TTC |
+ BT_VALID_RRC);
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
@@ -634,6 +630,12 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
}
+ if (IWL_MVM_BT_COEX_TTC)
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
+
+ if (IWL_MVM_BT_COEX_RRC)
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
+
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
@@ -649,8 +651,8 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
sizeof(iwl_bt_prio_boost));
- memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
- sizeof(iwl_bt_mprio_lut));
+ bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
+ bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
send_cmd:
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -830,6 +832,9 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
if (!vif->bss_conf.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
IWL_DEBUG_COEX(data->mvm,
"mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status, bt_activity_grading,
@@ -1162,6 +1167,12 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
return lut_type != BT_COEX_LOOSE_LUT;
}
+bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant)
+{
+ u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
+ return ag < BT_HIGH_TRAFFIC;
+}
+
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
{
u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index d4dfbe4cb66d..3bd93476ec1c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -92,8 +92,15 @@
#define IWL_MVM_BT_COEX_SYNC2SCO 1
#define IWL_MVM_BT_COEX_CORUNNING 0
#define IWL_MVM_BT_COEX_MPLUT 1
+#define IWL_MVM_BT_COEX_RRC 1
+#define IWL_MVM_BT_COEX_TTC 1
+#define IWL_MVM_BT_COEX_MPLUT_REG0 0x28412201
+#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
+#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
+#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
#define IWL_MVM_QUOTA_THRESHOLD 8
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
+#define IWL_MVM_RS_DISABLE_MIMO 0
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index c17be0fb7283..744de262373e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -601,33 +601,6 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
return ret;
}
-struct iwl_d3_iter_data {
- struct iwl_mvm *mvm;
- struct ieee80211_vif *vif;
- bool error;
-};
-
-static void iwl_mvm_d3_iface_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_d3_iter_data *data = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return;
-
- if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
- return;
-
- if (data->vif) {
- IWL_ERR(data->mvm, "More than one managed interface active!\n");
- data->error = true;
- return;
- }
-
- data->vif = vif;
-}
-
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *ap_sta)
{
@@ -783,130 +756,81 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
-static int
-iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
- const struct iwl_wowlan_config_cmd_v3 *cmd)
+static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
{
- /* start only with the v2 part of the command */
- u16 cmd_len = sizeof(cmd->common);
-
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
- cmd_len = sizeof(*cmd);
-
- return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
- cmd_len, cmd);
-}
-
-static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan,
- bool test)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_d3_iter_data suspend_iter_data = {
- .mvm = mvm,
- };
- struct ieee80211_vif *vif;
- struct iwl_mvm_vif *mvmvif;
- struct ieee80211_sta *ap_sta;
- struct iwl_mvm_sta *mvm_ap_sta;
- struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
- struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
- struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
- struct iwl_d3_manager_config d3_cfg_cmd_data = {
- /*
- * Program the minimum sleep time to 10 seconds, as many
- * platforms have issues processing a wakeup signal while
- * still being in the process of suspending.
- */
- .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
- };
- struct iwl_host_cmd d3_cfg_cmd = {
- .id = D3_CONFIG_CMD,
- .flags = CMD_WANT_SKB,
- .data[0] = &d3_cfg_cmd_data,
- .len[0] = sizeof(d3_cfg_cmd_data),
- };
- struct wowlan_key_data key_data = {
- .use_rsc_tsc = false,
- .tkip = &tkip_cmd,
- .use_tkip = false,
- };
- int ret;
- int len __maybe_unused;
-
- if (!wowlan) {
- /*
- * mac80211 shouldn't get here, but for D3 test
- * it doesn't warrant a warning
- */
- WARN_ON(!test);
- return -EINVAL;
- }
-
- key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
- if (!key_data.rsc_tsc)
- return -ENOMEM;
+ iwl_mvm_cancel_scan(mvm);
- mutex_lock(&mvm->mutex);
+ iwl_trans_stop_device(mvm->trans);
- /* see if there's only a single BSS vif and it's associated */
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_iface_iterator, &suspend_iter_data);
+ /*
+ * Set the HW restart bit -- this is mostly true as we're
+ * going to load new firmware and reprogram that, though
+ * the reprogramming is going to be manual to avoid adding
+ * all the MACs that aren't support.
+ * We don't have to clear up everything though because the
+ * reprogramming is manual. When we resume, we'll actually
+ * go through a proper restart sequence again to switch
+ * back to the runtime firmware image.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
- if (suspend_iter_data.error || !suspend_iter_data.vif) {
- ret = 1;
- goto out_noreset;
- }
+ /* We reprogram keys and shouldn't allocate new key indices */
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
- vif = suspend_iter_data.vif;
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
- ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(ap_sta)) {
- ret = -EINVAL;
- goto out_noreset;
- }
+ return iwl_mvm_load_d3_fw(mvm);
+}
- mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+static int
+iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+ struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct ieee80211_sta *ap_sta)
+{
+ int ret;
+ struct iwl_mvm_sta *mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
- /* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */
+ /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
- wowlan_config_cmd.common.is_11n_connection =
+ wowlan_config_cmd->is_11n_connection =
ap_sta->ht_cap.ht_supported;
/* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
if (ret < 0)
- goto out_noreset;
- wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret);
+ return ret;
+
+ wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
- iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common);
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
if (wowlan->disconnect)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
if (wowlan->tcp) {
@@ -914,44 +838,43 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* Set the "link change" (really "link lost") flag as well
* since that implies losing the TCP connection.
*/
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
}
- iwl_mvm_cancel_scan(mvm);
-
- iwl_trans_stop_device(mvm->trans);
-
- /*
- * Set the HW restart bit -- this is mostly true as we're
- * going to load new firmware and reprogram that, though
- * the reprogramming is going to be manual to avoid adding
- * all the MACs that aren't support.
- * We don't have to clear up everything though because the
- * reprogramming is manual. When we resume, we'll actually
- * go through a proper restart sequence again to switch
- * back to the runtime firmware image.
- */
- set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
-
- /* We reprogram keys and shouldn't allocate new key indices */
- memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+ return 0;
+}
- mvm->ptk_ivlen = 0;
- mvm->ptk_icvlen = 0;
- mvm->ptk_ivlen = 0;
- mvm->ptk_icvlen = 0;
+static int
+iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+ struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct ieee80211_sta *ap_sta)
+{
+ struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+ struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ struct wowlan_key_data key_data = {
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ int ret;
- ret = iwl_mvm_load_d3_fw(mvm);
+ ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
- goto out;
+ return ret;
ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
if (ret)
- goto out;
+ return ret;
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc)
+ return -ENOMEM;
if (!iwlwifi_mod_params.sw_crypto) {
/*
@@ -1010,7 +933,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
}
}
- ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+ sizeof(*wowlan_config_cmd),
+ wowlan_config_cmd);
if (ret)
goto out;
@@ -1023,8 +948,153 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
goto out;
ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
+
+out:
+ kfree(key_data.rsc_tsc);
+ return ret;
+}
+
+static int
+iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ int ret;
+
+ ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
- goto out;
+ return ret;
+
+ /* rfkill release can be either for wowlan or netdetect */
+ if (wowlan->rfkill_release)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
+ return -EBUSY;
+
+ /* save the sched scan matchsets... */
+ if (nd_config->n_match_sets) {
+ mvm->nd_match_sets = kmemdup(nd_config->match_sets,
+ sizeof(*nd_config->match_sets) *
+ nd_config->n_match_sets,
+ GFP_KERNEL);
+ if (mvm->nd_match_sets)
+ mvm->n_nd_match_sets = nd_config->n_match_sets;
+ }
+
+ /* ...and the sched scan channels for later reporting */
+ mvm->nd_channels = kmemdup(nd_config->channels,
+ sizeof(*nd_config->channels) *
+ nd_config->n_channels,
+ GFP_KERNEL);
+ if (mvm->nd_channels)
+ mvm->n_nd_channels = nd_config->n_channels;
+
+ return 0;
+}
+
+static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
+{
+ kfree(mvm->nd_match_sets);
+ mvm->nd_match_sets = NULL;
+ mvm->n_nd_match_sets = 0;
+ kfree(mvm->nd_channels);
+ mvm->nd_channels = NULL;
+ mvm->n_nd_channels = 0;
+}
+
+static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan,
+ bool test)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *vif = NULL;
+ struct iwl_mvm_vif *mvmvif = NULL;
+ struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {
+ /*
+ * Program the minimum sleep time to 10 seconds, as many
+ * platforms have issues processing a wakeup signal while
+ * still being in the process of suspending.
+ */
+ .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
+ };
+ struct iwl_host_cmd d3_cfg_cmd = {
+ .id = D3_CONFIG_CMD,
+ .flags = CMD_WANT_SKB,
+ .data[0] = &d3_cfg_cmd_data,
+ .len[0] = sizeof(d3_cfg_cmd_data),
+ };
+ int ret;
+ int len __maybe_unused;
+
+ if (!wowlan) {
+ /*
+ * mac80211 shouldn't get here, but for D3 test
+ * it doesn't warrant a warning
+ */
+ WARN_ON(!test);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif)) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
+ /* if we're not associated, this must be netdetect */
+ if (!wowlan->nd_config && !mvm->nd_config) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ ret = iwl_mvm_netdetect_config(
+ mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif);
+ if (ret)
+ goto out;
+
+ mvm->net_detect = true;
+ } else {
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta)) {
+ ret = -EINVAL;
+ goto out_noreset;
+ }
+
+ ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+ vif, mvmvif, ap_sta);
+ if (ret)
+ goto out_noreset;
+ ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+ vif, mvmvif, ap_sta);
+ if (ret)
+ goto out;
+
+ mvm->net_detect = false;
+ }
ret = iwl_mvm_power_update_device(mvm);
if (ret)
@@ -1057,11 +1127,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
- if (ret < 0)
+ if (ret < 0) {
ieee80211_restart_hw(mvm->hw);
+ iwl_mvm_free_nd(mvm);
+ }
out_noreset:
- kfree(key_data.rsc_tsc);
-
mutex_unlock(&mvm->mutex);
return ret;
@@ -1071,6 +1141,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ iwl_trans_suspend(mvm->trans);
if (iwl_mvm_is_d0i3_supported(mvm)) {
mutex_lock(&mvm->d0i3_suspend_mutex);
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
@@ -1449,9 +1520,8 @@ out:
return true;
}
-/* releases the MVM mutex */
-static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static struct iwl_wowlan_status *
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
struct error_table_start {
@@ -1463,19 +1533,15 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
};
- struct iwl_wowlan_status_data status;
- struct iwl_wowlan_status *fw_status;
- int ret, len, status_size, i;
- bool keep;
- struct ieee80211_sta *ap_sta;
- struct iwl_mvm_sta *mvm_ap_sta;
+ struct iwl_wowlan_status *status, *fw_status;
+ int ret, len, status_size;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
- IWL_INFO(mvm, "error table is valid (%d)\n",
- err_info.valid);
+ IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
+ err_info.valid, err_info.error_id);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
@@ -1483,7 +1549,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
- goto out_unlock;
+ return ERR_PTR(-EIO);
}
/* only for tracing for now */
@@ -1494,22 +1560,53 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
- goto out_unlock;
+ return ERR_PTR(ret);
}
/* RF-kill already asserted again... */
- if (!cmd.resp_pkt)
- goto out_unlock;
+ if (!cmd.resp_pkt) {
+ ret = -ERFKILL;
+ goto out_free_resp;
+ }
status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ ret = -EIO;
goto out_free_resp;
}
- fw_status = (void *)cmd.resp_pkt->data;
+ status = (void *)cmd.resp_pkt->data;
+ if (len != (status_size +
+ ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ fw_status = kmemdup(status, len, GFP_KERNEL);
+
+out_free_resp:
+ iwl_free_resp(&cmd);
+ return ret ? ERR_PTR(ret) : fw_status;
+}
+
+/* releases the MVM mutex */
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_wowlan_status_data status;
+ struct iwl_wowlan_status *fw_status;
+ int i;
+ bool keep;
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
+
+ fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ if (IS_ERR_OR_NULL(fw_status))
+ goto out_unlock;
status.pattern_number = le16_to_cpu(fw_status->pattern_number);
for (i = 0; i < 8; i++)
@@ -1522,17 +1619,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
le32_to_cpu(fw_status->wake_packet_bufsize);
status.wake_packet = fw_status->wake_packet;
- if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- goto out_free_resp;
- }
-
/* still at hard-coded place 0 for D3 image */
ap_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[0],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta))
- goto out_free_resp;
+ goto out_free;
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
@@ -1549,16 +1641,151 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
- iwl_free_resp(&cmd);
+ kfree(fw_status);
return keep;
- out_free_resp:
- iwl_free_resp(&cmd);
- out_unlock:
+out_free:
+ kfree(fw_status);
+out_unlock:
mutex_unlock(&mvm->mutex);
return false;
}
+struct iwl_mvm_nd_query_results {
+ u32 matched_profiles;
+ struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+};
+
+static int
+iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_query_results *results)
+{
+ struct iwl_scan_offload_profiles_query *query;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int ret, len;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
+ return ret;
+ }
+
+ /* RF-kill already asserted again... */
+ if (!cmd.resp_pkt) {
+ ret = -ERFKILL;
+ goto out_free_resp;
+ }
+
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len < sizeof(*query)) {
+ IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ query = (void *)cmd.resp_pkt->data;
+
+ results->matched_profiles = le32_to_cpu(query->matched_profiles);
+ memcpy(results->matches, query->matches, sizeof(results->matches));
+
+out_free_resp:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct cfg80211_wowlan_nd_info *net_detect = NULL;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ struct iwl_mvm_nd_query_results query;
+ struct iwl_wowlan_status *fw_status;
+ unsigned long matched_profiles;
+ u32 reasons = 0;
+ int i, j, n_matches, ret;
+
+ fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ if (!IS_ERR_OR_NULL(fw_status))
+ reasons = le32_to_cpu(fw_status->wakeup_reasons);
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
+ goto out;
+
+ ret = iwl_mvm_netdetect_query_results(mvm, &query);
+ if (ret || !query.matched_profiles) {
+ wakeup_report = NULL;
+ goto out;
+ }
+
+ matched_profiles = query.matched_profiles;
+ if (mvm->n_nd_match_sets) {
+ n_matches = hweight_long(matched_profiles);
+ } else {
+ IWL_ERR(mvm, "no net detect match information available\n");
+ n_matches = 0;
+ }
+
+ net_detect = kzalloc(sizeof(*net_detect) +
+ (n_matches * sizeof(net_detect->matches[0])),
+ GFP_KERNEL);
+ if (!net_detect || !n_matches)
+ goto out_report_nd;
+
+ for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+ struct iwl_scan_offload_profile_match *fw_match;
+ struct cfg80211_wowlan_nd_match *match;
+ int n_channels = 0;
+
+ fw_match = &query.matches[i];
+
+ for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
+ n_channels += hweight8(fw_match->matching_channels[j]);
+
+ match = kzalloc(sizeof(*match) +
+ (n_channels * sizeof(*match->channels)),
+ GFP_KERNEL);
+ if (!match)
+ goto out_report_nd;
+
+ net_detect->matches[net_detect->n_matches++] = match;
+
+ match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
+ memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
+ match->ssid.ssid_len);
+
+ if (mvm->n_nd_channels < n_channels)
+ continue;
+
+ for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
+ if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
+ match->channels[match->n_channels++] =
+ mvm->nd_channels[j]->center_freq;
+ }
+
+out_report_nd:
+ wakeup.net_detect = net_detect;
+out:
+ iwl_mvm_free_nd(mvm);
+
+ mutex_unlock(&mvm->mutex);
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+
+ if (net_detect) {
+ for (i = 0; i < net_detect->n_matches; i++)
+ kfree(net_detect->matches[i]);
+ kfree(net_detect);
+ }
+}
+
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
{
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1592,9 +1819,6 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
- struct iwl_d3_iter_data resume_iter_data = {
- .mvm = mvm,
- };
struct ieee80211_vif *vif = NULL;
int ret;
enum iwl_d3_status d3_status;
@@ -1603,15 +1827,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
/* get the BSS vif pointer again */
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_iface_iterator, &resume_iter_data);
-
- if (WARN_ON(resume_iter_data.error || !resume_iter_data.vif))
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
goto out_unlock;
- vif = resume_iter_data.vif;
-
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
if (ret)
goto out_unlock;
@@ -1624,11 +1843,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
- keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+ if (mvm->net_detect) {
+ iwl_mvm_query_netdetect_reasons(mvm, vif);
+ } else {
+ keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (keep)
- mvm->keep_vif = vif;
+ if (keep)
+ mvm->keep_vif = vif;
#endif
+ }
/* has unlocked the mutex, so skip that */
goto out;
@@ -1643,6 +1866,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
return 1;
}
@@ -1650,18 +1874,10 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- if (iwl_mvm_is_d0i3_supported(mvm)) {
- bool exit_now;
+ iwl_trans_resume(mvm->trans);
- mutex_lock(&mvm->d0i3_suspend_mutex);
- __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
- exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
- &mvm->d0i3_suspend_flags);
- mutex_unlock(&mvm->d0i3_suspend_mutex);
- if (exit_now)
- _iwl_mvm_exit_d0i3(mvm);
+ if (iwl_mvm_is_d0i3_supported(mvm))
return 0;
- }
return __iwl_mvm_resume(mvm, false);
}
@@ -1741,7 +1957,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
int remaining_time = 10;
mvm->d3_test_active = false;
+ rtnl_lock();
__iwl_mvm_resume(mvm, true);
+ rtnl_unlock();
iwl_abort_notification_waits(&mvm->notif_wait);
ieee80211_restart_hw(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 50527a9bb267..33bf915cd7ea 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -121,78 +121,6 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
return ret;
}
-static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
-{
- struct iwl_mvm *mvm = inode->i_private;
- int ret;
-
- if (!mvm)
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
- if (!mvm->fw_error_dump) {
- ret = -ENODATA;
- goto out;
- }
-
- file->private_data = mvm->fw_error_dump;
- mvm->fw_error_dump = NULL;
- ret = 0;
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_dump_ptrs *dump_ptrs = (void *)file->private_data;
- ssize_t bytes_read = 0;
- ssize_t bytes_read_trans = 0;
-
- if (*ppos < dump_ptrs->op_mode_len)
- bytes_read +=
- simple_read_from_buffer(user_buf, count, ppos,
- dump_ptrs->op_mode_ptr,
- dump_ptrs->op_mode_len);
-
- if (bytes_read < 0 || *ppos < dump_ptrs->op_mode_len)
- return bytes_read;
-
- if (dump_ptrs->trans_ptr) {
- *ppos -= dump_ptrs->op_mode_len;
- bytes_read_trans =
- simple_read_from_buffer(user_buf + bytes_read,
- count - bytes_read, ppos,
- dump_ptrs->trans_ptr->data,
- dump_ptrs->trans_ptr->len);
- *ppos += dump_ptrs->op_mode_len;
-
- if (bytes_read_trans >= 0)
- bytes_read += bytes_read_trans;
- else if (!bytes_read)
- /* propagate the failure */
- return bytes_read_trans;
- }
-
- return bytes_read;
-
-}
-
-static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
- struct file *file)
-{
- struct iwl_mvm_dump_ptrs *dump_ptrs = (void *)file->private_data;
-
- vfree(dump_ptrs->op_mode_ptr);
- vfree(dump_ptrs->trans_ptr);
- kfree(dump_ptrs);
-
- return 0;
-}
-
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1008,7 +936,11 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
if (scan_rx_ant & ~mvm->fw->valid_rx_ant)
return -EINVAL;
- mvm->scan_rx_ant = scan_rx_ant;
+ if (mvm->scan_rx_ant != scan_rx_ant) {
+ mvm->scan_rx_ant = scan_rx_ant;
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ iwl_mvm_config_scan(mvm);
+ }
return count;
}
@@ -1250,6 +1182,118 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
return ret;
}
+
+#define MAX_NUM_ND_MATCHSETS 10
+
+static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ const char *seps = ",\n";
+ char *buf_ptr = buf;
+ char *value_str = NULL;
+ int ret, i;
+
+ /* TODO: don't free if write is being called several times in one go */
+ if (mvm->nd_config) {
+ kfree(mvm->nd_config->match_sets);
+ kfree(mvm->nd_config);
+ mvm->nd_config = NULL;
+ }
+
+ mvm->nd_config = kzalloc(sizeof(*mvm->nd_config) +
+ (11 * sizeof(struct ieee80211_channel *)),
+ GFP_KERNEL);
+ if (!mvm->nd_config) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ mvm->nd_config->n_channels = 11;
+ mvm->nd_config->scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ mvm->nd_config->interval = 5;
+ mvm->nd_config->min_rssi_thold = -80;
+ for (i = 0; i < mvm->nd_config->n_channels; i++)
+ mvm->nd_config->channels[i] = &mvm->nvm_data->channels[i];
+
+ mvm->nd_config->match_sets =
+ kcalloc(MAX_NUM_ND_MATCHSETS,
+ sizeof(*mvm->nd_config->match_sets),
+ GFP_KERNEL);
+ if (!mvm->nd_config->match_sets) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ while ((value_str = strsep(&buf_ptr, seps)) &&
+ strlen(value_str)) {
+ struct cfg80211_match_set *set;
+
+ if (mvm->nd_config->n_match_sets >= MAX_NUM_ND_MATCHSETS) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ set = &mvm->nd_config->match_sets[mvm->nd_config->n_match_sets];
+ set->ssid.ssid_len = strlen(value_str);
+
+ if (set->ssid.ssid_len > IEEE80211_MAX_SSID_LEN) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ memcpy(set->ssid.ssid, value_str, set->ssid.ssid_len);
+
+ mvm->nd_config->n_match_sets++;
+ }
+
+ ret = count;
+
+ if (mvm->nd_config->n_match_sets)
+ goto out;
+
+out_free:
+ if (mvm->nd_config)
+ kfree(mvm->nd_config->match_sets);
+ kfree(mvm->nd_config);
+ mvm->nd_config = NULL;
+out:
+ return ret;
+}
+
+static ssize_t
+iwl_dbgfs_netdetect_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ size_t bufsz, ret;
+ char *buf;
+ int i, n_match_sets, pos = 0;
+
+ n_match_sets = mvm->nd_config ? mvm->nd_config->n_match_sets : 0;
+
+ bufsz = n_match_sets * (IEEE80211_MAX_SSID_LEN + 1) + 1;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < n_match_sets; i++) {
+ if (pos +
+ mvm->nd_config->match_sets[i].ssid.ssid_len + 2 > bufsz) {
+ ret = -EIO;
+ goto out;
+ }
+
+ memcpy(buf + pos, mvm->nd_config->match_sets[i].ssid.ssid,
+ mvm->nd_config->match_sets[i].ssid.ssid_len);
+ pos += mvm->nd_config->match_sets[i].ssid.ssid_len;
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+out:
+ kfree(buf);
+ return ret;
+}
#endif
#define PRINT_MVM_REF(ref) do { \
@@ -1295,6 +1339,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_NMI);
PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
+ PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -1415,12 +1460,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
-static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
- .open = iwl_dbgfs_fw_error_dump_open,
- .read = iwl_dbgfs_fw_error_dump_read,
- .release = iwl_dbgfs_fw_error_dump_release,
-};
-
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
@@ -1428,6 +1467,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(netdetect, 384);
#endif
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
@@ -1446,7 +1486,6 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
- MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
@@ -1487,6 +1526,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
mvm->debugfs_dir, &mvm->d3_wake_sysassert))
goto err;
+ MVM_DEBUGFS_ADD_FILE(netdetect, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
#endif
if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 816883f9ff94..f3b11897991e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -84,6 +84,8 @@
* @BT_COEX_SYNC2SCO:
* @BT_COEX_CORUNNING:
* @BT_COEX_MPLUT:
+ * @BT_COEX_TTC:
+ * @BT_COEX_RRC:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
@@ -100,6 +102,8 @@ enum iwl_bt_coex_flags {
BT_COEX_SYNC2SCO = BIT(7),
BT_COEX_CORUNNING = BIT(8),
BT_COEX_MPLUT = BIT(9),
+ BT_COEX_TTC = BIT(20),
+ BT_COEX_RRC = BIT(21),
};
/*
@@ -127,6 +131,8 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
BT_VALID_SYNC_TO_SCO = BIT(18),
+ BT_VALID_TTC = BIT(20),
+ BT_VALID_RRC = BIT(21),
};
/**
@@ -506,7 +512,8 @@ struct iwl_bt_coex_profile_notif_old {
u8 bt_agg_traffic_load;
u8 bt_ci_compliance;
u8 ttc_enabled;
- __le16 reserved;
+ u8 rrc_enabled;
+ u8 reserved;
__le32 primary_ch_lut;
__le32 secondary_ch_lut;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index e74cdf2132f8..6d3bea5c59d1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -241,16 +241,12 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
-struct iwl_wowlan_config_cmd_v2 {
+struct iwl_wowlan_config_cmd {
__le32 wakeup_filter;
__le16 non_qos_seq;
__le16 qos_seq[8];
u8 wowlan_ba_teardown_tids;
u8 is_11n_connection;
-} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
-
-struct iwl_wowlan_config_cmd_v3 {
- struct iwl_wowlan_config_cmd_v2 common;
u8 offloading_tid;
u8 reserved[3];
} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 2fd8ad4633e0..430020047b77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -370,7 +370,7 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_DEBUG_FLAG_DEFAULT 0
#define IWL_BF_DEBUG_FLAG_D0I3 0
-#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_DEFAULT 0
#define IWL_BF_ESCAPE_TIMER_D0I3 0
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 1354c68f6468..1f2acf47bfb2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -794,4 +794,301 @@ struct iwl_periodic_scan_complete {
__le32 reserved;
} __packed;
+/* UMAC Scan API */
+
+/**
+ * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
+ * @size: size of the command (not including header)
+ * @reserved0: for future use and alignment
+ * @ver: API version number
+ */
+struct iwl_mvm_umac_cmd_hdr {
+ __le16 size;
+ u8 reserved0;
+ u8 ver;
+} __packed;
+
+#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8
+
+enum scan_config_flags {
+ SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
+ SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1),
+ SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2),
+ SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3),
+ SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8),
+ SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9),
+ SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10),
+ SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11),
+ SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12),
+ SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13),
+ SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14),
+ SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15),
+ SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16),
+ SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17),
+ SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18),
+ SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
+ SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
+ SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
+
+ /* Bits 26-31 are for num of channels in channel_array */
+#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
+};
+
+enum scan_config_rates {
+ /* OFDM basic rates */
+ SCAN_CONFIG_RATE_6M = BIT(0),
+ SCAN_CONFIG_RATE_9M = BIT(1),
+ SCAN_CONFIG_RATE_12M = BIT(2),
+ SCAN_CONFIG_RATE_18M = BIT(3),
+ SCAN_CONFIG_RATE_24M = BIT(4),
+ SCAN_CONFIG_RATE_36M = BIT(5),
+ SCAN_CONFIG_RATE_48M = BIT(6),
+ SCAN_CONFIG_RATE_54M = BIT(7),
+ /* CCK basic rates */
+ SCAN_CONFIG_RATE_1M = BIT(8),
+ SCAN_CONFIG_RATE_2M = BIT(9),
+ SCAN_CONFIG_RATE_5M = BIT(10),
+ SCAN_CONFIG_RATE_11M = BIT(11),
+
+ /* Bits 16-27 are for supported rates */
+#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
+};
+
+enum iwl_channel_flags {
+ IWL_CHANNEL_FLAG_EBS = BIT(0),
+ IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1),
+ IWL_CHANNEL_FLAG_EBS_ADD = BIT(2),
+ IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
+};
+
+/**
+ * struct iwl_scan_config
+ * @hdr: umac command header
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell_active: default dwell time for active scan
+ * @dwell_passive: default dwell time for passive scan
+ * @dwell_fragmented: default dwell time for fragmented scan
+ * @reserved: for future use and alignment
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwl_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
+struct iwl_scan_config {
+ struct iwl_mvm_umac_cmd_hdr hdr;
+ __le32 flags;
+ __le32 tx_chains;
+ __le32 rx_chains;
+ __le32 legacy_rates;
+ __le32 out_of_channel_time;
+ __le32 suspend_time;
+ u8 dwell_active;
+ u8 dwell_passive;
+ u8 dwell_fragmented;
+ u8 reserved;
+ u8 mac_addr[ETH_ALEN];
+ u8 bcast_sta_id;
+ u8 channel_flags;
+ u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
+
+/**
+ * iwl_umac_scan_flags
+ *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ * can be preempted by other scan requests with higher priority.
+ * The low priority scan is aborted.
+ *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ * when scan starts.
+ */
+enum iwl_umac_scan_flags {
+ IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0),
+ IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1),
+};
+
+enum iwl_umac_scan_uid_offsets {
+ IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0,
+ IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8,
+};
+
+enum iwl_umac_scan_general_flags {
+ IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
+ IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
+ IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
+ IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
+ IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
+ IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
+ IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
+ IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9)
+};
+
+/**
+ * struct iwl_scan_channel_cfg_umac
+ * @flags: bitmap - 0-19: directed scan to i'th ssid.
+ * @channel_num: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two scan interations on one channel.
+ */
+struct iwl_scan_channel_cfg_umac {
+ __le32 flags;
+ u8 channel_num;
+ u8 iter_count;
+ __le16 iter_interval;
+} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
+
+/**
+ * struct iwl_scan_umac_schedule
+ * @interval: interval in seconds between scan iterations
+ * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
+ * @reserved: for alignment and future use
+ */
+struct iwl_scan_umac_schedule {
+ __le16 interval;
+ u8 iter_count;
+ u8 reserved;
+} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwl_scan_req_umac_tail {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwl_scan_umac_schedule schedule[2];
+ __le16 delay;
+ __le16 reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_1 */
+ struct iwl_scan_probe_req preq;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwl_scan_req_umac
+ * @hdr: umac command header
+ * @flags: &enum iwl_umac_scan_flags
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @general_flags: &enum iwl_umac_scan_general_flags
+ * @reserved1: for future use and alignment
+ * @active_dwell: dwell time for active scan
+ * @passive_dwell: dwell time for passive scan
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @max_out_time: max out of serving channel time
+ * @suspend_time: max suspend time
+ * @scan_priority: scan internal prioritization &enum iwl_scan_priority
+ * @channel_flags: &enum iwl_scan_channel_flags
+ * @n_channels: num of channels in scan request
+ * @reserved2: for future use and alignment
+ * @data: &struct iwl_scan_channel_cfg_umac and
+ * &struct iwl_scan_req_umac_tail
+ */
+struct iwl_scan_req_umac {
+ struct iwl_mvm_umac_cmd_hdr hdr;
+ __le32 flags;
+ __le32 uid;
+ __le32 ooc_priority;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
+ __le32 general_flags;
+ u8 reserved1;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ __le32 max_out_time;
+ __le32 suspend_time;
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved2;
+ u8 data[];
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwl_umac_scan_abort
+ * @hdr: umac command header
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @flags: reserved
+ */
+struct iwl_umac_scan_abort {
+ struct iwl_mvm_umac_cmd_hdr hdr;
+ __le32 uid;
+ __le32 flags;
+} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwl_umac_scan_complete
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @last_schedule: last scheduling line
+ * @last_iter: last scan iteration number
+ * @scan status: &enum iwl_scan_offload_complete_status
+ * @ebs_status: &enum iwl_scan_ebs_status
+ * @time_from_last_iter: time elapsed from last iteration
+ * @reserved: for future use
+ */
+struct iwl_umac_scan_complete {
+ __le32 uid;
+ u8 last_schedule;
+ u8 last_iter;
+ u8 status;
+ u8 ebs_status;
+ __le32 time_from_last_iter;
+ __le32 reserved;
+} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+/**
+ * struct iwl_scan_offload_profile_match - match information
+ * @bssid: matched bssid
+ * @channel: channel where the match occurred
+ * @energy:
+ * @matching_feature:
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in tue scan offload request
+ */
+struct iwl_scan_offload_profile_match {
+ u8 bssid[ETH_ALEN];
+ __le16 reserved;
+ u8 channel;
+ u8 energy;
+ u8 matching_feature;
+ u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwl_scan_offload_profiles_query - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwl_scan_offload_profiles_query {
+ __le32 matched_profiles;
+ __le32 last_scan_age;
+ __le32 n_scans_done;
+ __le32 gp2_d0u;
+ __le32 gp2_invoked;
+ u8 resume_while_scanning;
+ u8 self_recovery;
+ __le16 reserved;
+ struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index c62575d86bcd..88af6dd2ceaa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -106,6 +106,12 @@ enum {
DBG_CFG = 0x9,
ANTENNA_COUPLING_NOTIFICATION = 0xa,
+ /* UMAC scan commands */
+ SCAN_CFG_CMD = 0xc,
+ SCAN_REQ_UMAC = 0xd,
+ SCAN_ABORT_UMAC = 0xe,
+ SCAN_COMPLETE_UMAC = 0xf,
+
/* station table */
ADD_STA_KEY = 0x17,
ADD_STA = 0x18,
@@ -122,6 +128,11 @@ enum {
/* global key */
WEP_KEY = 0x20,
+ /* TDLS */
+ TDLS_CHANNEL_SWITCH_CMD = 0x27,
+ TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
+ TDLS_CONFIG_CMD = 0xa7,
+
/* MAC and Binding commands */
MAC_CONTEXT_CMD = 0x28,
TIME_EVENT_CMD = 0x29, /* both CMD and response */
@@ -190,6 +201,8 @@ enum {
/* Power - new power table command */
MAC_PM_POWER_TABLE = 0xa9,
+ MFUART_LOAD_NOTIFICATION = 0xb1,
+
REPLY_RX_PHY_CMD = 0xc0,
REPLY_RX_MPDU_CMD = 0xc1,
BA_NOTIF = 0xc5,
@@ -236,11 +249,9 @@ enum {
WOWLAN_TX_POWER_PER_DB = 0xe6,
/* and for NetDetect */
- NET_DETECT_CONFIG_CMD = 0x54,
- NET_DETECT_PROFILES_QUERY_CMD = 0x56,
- NET_DETECT_PROFILES_CMD = 0x57,
- NET_DETECT_HOTSPOTS_CMD = 0x58,
- NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59,
+ SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
+ SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58,
+ SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59,
REPLY_MAX = 0xff,
};
@@ -1201,6 +1212,21 @@ struct iwl_missed_beacons_notif {
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
/**
+ * struct iwl_mfuart_load_notif - mfuart image version & status
+ * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwl_mfuart_load_notif {
+ __le32 installed_ver;
+ __le32 external_ver;
+ __le32 status;
+ __le32 duration;
+} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
+
+/**
* struct iwl_set_calib_default_cmd - set default value for calibration.
* ( SET_CALIB_DEFAULT_CMD = 0x8e )
* @calib_index: the calibration to set value for
@@ -1589,7 +1615,7 @@ enum iwl_sf_scenario {
#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
/* smart FIFO default values */
-#define SF_W_MARK_SISO 4096
+#define SF_W_MARK_SISO 6144
#define SF_W_MARK_MIMO2 8192
#define SF_W_MARK_MIMO3 6144
#define SF_W_MARK_LEGACY 4096
@@ -1711,4 +1737,145 @@ struct iwl_scd_txq_cfg_cmd {
u8 flags;
} __packed;
+/***********************************
+ * TDLS API
+ ***********************************/
+
+/* Type of TDLS request */
+enum iwl_tdls_channel_switch_type {
+ TDLS_SEND_CHAN_SW_REQ = 0,
+ TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
+ TDLS_MOVE_CH,
+}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
+
+/**
+ * Switch timing sub-element in a TDLS channel-switch command
+ * @frame_timestamp: GP2 timestamp of channel-switch request/response packet
+ * received from peer
+ * @max_offchan_duration: What amount of microseconds out of a DTIM is given
+ * to the TDLS off-channel communication. For instance if the DTIM is
+ * 200TU and the TDLS peer is to be given 25% of the time, the value
+ * given will be 50TU, or 50 * 1024 if translated into microseconds.
+ * @switch_time: switch time the peer sent in its channel switch timing IE
+ * @switch_timout: switch timeout the peer sent in its channel switch timing IE
+ */
+struct iwl_tdls_channel_switch_timing {
+ __le32 frame_timestamp; /* GP2 time of peer packet Rx */
+ __le32 max_offchan_duration; /* given in micro-seconds */
+ __le32 switch_time; /* given in micro-seconds */
+ __le32 switch_timeout; /* given in micro-seconds */
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
+
+#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
+
+/**
+ * TDLS channel switch frame template
+ *
+ * A template representing a TDLS channel-switch request or response frame
+ *
+ * @switch_time_offset: offset to the channel switch timing IE in the template
+ * @tx_cmd: Tx parameters for the frame
+ * @data: frame data
+ */
+struct iwl_tdls_channel_switch_frame {
+ __le32 switch_time_offset;
+ struct iwl_tx_cmd tx_cmd;
+ u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
+
+/**
+ * TDLS channel switch command
+ *
+ * The command is sent to initiate a channel switch and also in response to
+ * incoming TDLS channel-switch request/response packets from remote peers.
+ *
+ * @switch_type: see &enum iwl_tdls_channel_switch_type
+ * @peer_sta_id: station id of TDLS peer
+ * @ci: channel we switch to
+ * @timing: timing related data for command
+ * @frame: channel-switch request/response template, depending to switch_type
+ */
+struct iwl_tdls_channel_switch_cmd {
+ u8 switch_type;
+ __le32 peer_sta_id;
+ struct iwl_fw_channel_info ci;
+ struct iwl_tdls_channel_switch_timing timing;
+ struct iwl_tdls_channel_switch_frame frame;
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
+
+/**
+ * TDLS channel switch start notification
+ *
+ * @status: non-zero on success
+ * @offchannel_duration: duration given in microseconds
+ * @sta_id: peer currently performing the channel-switch with
+ */
+struct iwl_tdls_channel_switch_notif {
+ __le32 status;
+ __le32 offchannel_duration;
+ __le32 sta_id;
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
+
+/**
+ * TDLS station info
+ *
+ * @sta_id: station id of the TDLS peer
+ * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
+ * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer
+ * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
+ */
+struct iwl_tdls_sta_info {
+ u8 sta_id;
+ u8 tx_to_peer_tid;
+ __le16 tx_to_peer_ssn;
+ __le32 is_initiator;
+} __packed; /* TDLS_STA_INFO_VER_1 */
+
+/**
+ * TDLS basic config command
+ *
+ * @id_and_color: MAC id and color being configured
+ * @tdls_peer_count: amount of currently connected TDLS peers
+ * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx
+ * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP
+ * @sta_info: per-station info. Only the first tdls_peer_count entries are set
+ * @pti_req_data_offset: offset of network-level data for the PTI template
+ * @pti_req_tx_cmd: Tx parameters for PTI request template
+ * @pti_req_template: PTI request template data
+ */
+struct iwl_tdls_config_cmd {
+ __le32 id_and_color; /* mac id and color */
+ u8 tdls_peer_count;
+ u8 tx_to_ap_tid;
+ __le16 tx_to_ap_ssn;
+ struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
+
+ __le32 pti_req_data_offset;
+ struct iwl_tx_cmd pti_req_tx_cmd;
+ u8 pti_req_template[0];
+} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * TDLS per-station config information from FW
+ *
+ * @sta_id: station id of the TDLS peer
+ * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
+ * the peer
+ */
+struct iwl_tdls_config_sta_info_res {
+ __le16 sta_id;
+ __le16 tx_to_peer_last_seq;
+} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
+
+/**
+ * TDLS config information from FW
+ *
+ * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
+ * @sta_info: per-station TDLS config information
+ */
+struct iwl_tdls_config_res {
+ __le32 tx_to_ap_last_seq;
+ struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
+} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index eb03943f8463..d0fa6e9ed590 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -186,7 +186,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
static const u8 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
- fw = iwl_get_ucode_image(mvm, ucode_type);
+ if (ucode_type == IWL_UCODE_REGULAR &&
+ iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) &&
+ iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM))
+ fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
+ else
+ fw = iwl_get_ucode_image(mvm, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
@@ -227,6 +232,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
st_fwrd_space.addr = mvm->sf_space.addr;
st_fwrd_space.size = mvm->sf_space.size;
ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
+ return ret;
+ }
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
@@ -390,6 +399,42 @@ out:
return ret;
}
+static int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm,
+ enum iwl_fw_dbg_conf conf_id)
+{
+ u8 *ptr;
+ int ret;
+ int i;
+
+ if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv),
+ "Invalid configuration %d\n", conf_id))
+ return -EINVAL;
+
+ if (!mvm->fw->dbg_conf_tlv[conf_id])
+ return -EINVAL;
+
+ if (mvm->fw_dbg_conf != FW_DBG_INVALID)
+ IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n",
+ mvm->fw_dbg_conf);
+
+ /* Send all HCMDs for configuring the FW debug */
+ ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd;
+ for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
+ struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0,
+ le16_to_cpu(cmd->len), cmd->data);
+ if (ret)
+ return ret;
+
+ ptr += sizeof(*cmd);
+ ptr += le16_to_cpu(cmd->len);
+ }
+
+ mvm->fw_dbg_conf = conf_id;
+ return ret;
+}
+
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@@ -441,6 +486,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
+ mvm->fw_dbg_conf = FW_DBG_INVALID;
+ iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM);
+
ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;
@@ -462,6 +510,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
@@ -501,6 +551,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ ret = iwl_mvm_config_scan(mvm);
+ if (ret)
+ goto error;
+ }
+
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -587,3 +643,19 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
le32_to_cpu(radio_version->radio_dash));
return 0;
}
+
+int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mvm,
+ "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->installed_ver),
+ le32_to_cpu(mfuart_notif->external_ver),
+ le32_to_cpu(mfuart_notif->status),
+ le32_to_cpu(mfuart_notif->duration));
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 0c5c0b0e23f5..f6d86ccce6a8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -83,11 +83,15 @@ struct iwl_mvm_mac_iface_iterator_data {
struct ieee80211_vif *vif;
unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
- u32 used_hw_queues;
enum iwl_tsf_id preferred_tsf;
bool found_vif;
};
+struct iwl_mvm_hw_queues_iface_iterator_data {
+ struct ieee80211_vif *exclude_vif;
+ unsigned long used_hw_queues;
+};
+
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -197,8 +201,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
/*
* Get the mask of the queues used by the vif
*/
-u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
{
u32 qmask = 0, ac;
@@ -214,6 +217,54 @@ u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
return qmask;
}
+static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
+
+ /* exclude the given vif */
+ if (vif == data->exclude_vif)
+ return;
+
+ data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
+}
+
+static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* Mark the queues used by the sta */
+ data->used_hw_queues |= mvmsta->tfd_queue_msk;
+}
+
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *exclude_vif)
+{
+ struct iwl_mvm_hw_queues_iface_iterator_data data = {
+ .exclude_vif = exclude_vif,
+ .used_hw_queues =
+ BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
+ BIT(mvm->aux_queue) |
+ BIT(IWL_MVM_CMD_QUEUE),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* mark all VIF used hw queues */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_iface_hw_queues_iter, &data);
+
+ /* don't assign the same hw queues as TDLS stations */
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_mac_sta_hw_queues_iter,
+ &data);
+
+ return data.used_hw_queues;
+}
+
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -226,9 +277,6 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
return;
}
- /* Mark the queues used by the vif */
- data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(data->mvm, vif);
-
/* Mark MAC IDs as used by clearing the available bit, and
* (below) mark TSFs as used if their existing use is not
* compatible with the new interface type.
@@ -275,10 +323,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
.available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
/* no preference yet */
.preferred_tsf = NUM_TSF_IDS,
- .used_hw_queues =
- BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(mvm->aux_queue) |
- BIT(IWL_MVM_CMD_QUEUE),
.found_vif = false,
};
u32 ac;
@@ -317,6 +361,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_mac_iface_iterator, &data);
+ used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
+
/*
* In the case we're getting here during resume, it's similar to
* firmware restart, and with RESUME_ALL the iterator will find
@@ -366,8 +412,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0;
}
- used_hw_queues = data.used_hw_queues;
-
/* Find available queues, and allocate them to the ACs */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(&used_hw_queues,
@@ -1219,17 +1263,25 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
- struct ieee80211_vif *csa_vif, u32 gp2)
+ struct ieee80211_vif *csa_vif, u32 gp2,
+ bool tx_success)
{
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(csa_vif);
+ /* Don't start to countdown from a failed beacon */
+ if (!tx_success && !mvmvif->csa_countdown)
+ return;
+
+ mvmvif->csa_countdown = true;
+
if (!ieee80211_csa_is_complete(csa_vif)) {
int c = ieee80211_csa_update_counter(csa_vif);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
if (csa_vif->p2p &&
- !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2) {
+ !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 &&
+ tx_success) {
u32 rel_time = (c + 1) *
csa_vif->bss_conf.beacon_int -
IWL_MVM_CHANNEL_SWITCH_TIME_GO;
@@ -1252,38 +1304,30 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
struct iwl_mvm_tx_resp *beacon_notify_hdr;
struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif;
- u64 tsf;
+ u16 status;
lockdep_assert_held(&mvm->mutex);
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_CAPA_EXTENDED_BEACON) {
- struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
-
- beacon_notify_hdr = &beacon->beacon_notify_hdr;
- tsf = le64_to_cpu(beacon->tsf);
- mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
- } else {
- struct iwl_beacon_notif *beacon = (void *)pkt->data;
-
- beacon_notify_hdr = &beacon->beacon_notify_hdr;
- tsf = le64_to_cpu(beacon->tsf);
- }
+ beacon_notify_hdr = &beacon->beacon_notify_hdr;
+ mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
+ status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
- le16_to_cpu(beacon_notify_hdr->status.status) &
- TX_STATUS_MSK,
- beacon_notify_hdr->failure_frame, tsf,
+ status, beacon_notify_hdr->failure_frame,
+ le64_to_cpu(beacon->tsf),
mvm->ap_last_beacon_gp2,
le32_to_cpu(beacon_notify_hdr->initial_rate));
csa_vif = rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
if (unlikely(csa_vif && csa_vif->csa_active))
- iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2);
+ iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
+ (status == TX_STATUS_SUCCESS));
tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
lockdep_is_held(&mvm->mutex));
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index b6d2683da3a9..31a5b3f4266c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -69,6 +69,7 @@
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
+#include <linux/devcoredump.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
#include <net/tcp.h>
@@ -253,6 +254,26 @@ static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
spin_unlock_bh(&mvm->refs_lock);
}
+bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
+{
+ int i;
+ bool taken = false;
+
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return true;
+
+ spin_lock_bh(&mvm->refs_lock);
+ for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
+ if (mvm->refs[i]) {
+ taken = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&mvm->refs_lock);
+
+ return taken;
+}
+
int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
iwl_mvm_ref(mvm, ref_type);
@@ -302,7 +323,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
- hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
hw->rate_control_algorithm = "iwl-mvm-rs";
/*
@@ -315,15 +337,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
- IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 &&
!iwlwifi_mod_params.uapsd_disable) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
}
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
+ mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+ hw->wiphy->features |=
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ }
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -343,8 +369,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW)
- hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
@@ -402,7 +427,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_P2P_GO_OPPPS |
NL80211_FEATURE_DYNAMIC_SMPS |
- NL80211_FEATURE_STATIC_SMPS;
+ NL80211_FEATURE_STATIC_SMPS |
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
@@ -440,7 +466,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_RFKILL_RELEASE;
+ WIPHY_WOWLAN_RFKILL_RELEASE |
+ WIPHY_WOWLAN_NET_DETECT;
if (!iwlwifi_mod_params.sw_crypto)
mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -449,6 +476,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
hw->wiphy->wowlan = &mvm->wowlan;
}
@@ -463,6 +491,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
return ret;
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
+ IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ }
+
+ if (mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
+ hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+ }
+
ret = ieee80211_register_hw(mvm->hw);
if (ret)
iwl_mvm_leds_exit(mvm);
@@ -679,10 +718,51 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
}
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen)
+{
+ const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
+ ssize_t bytes_read;
+ ssize_t bytes_read_trans;
+
+ if (offset < dump_ptrs->op_mode_len) {
+ bytes_read = min_t(ssize_t, count,
+ dump_ptrs->op_mode_len - offset);
+ memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
+ bytes_read);
+ offset += bytes_read;
+ count -= bytes_read;
+
+ if (count == 0)
+ return bytes_read;
+ } else {
+ bytes_read = 0;
+ }
+
+ if (!dump_ptrs->trans_ptr)
+ return bytes_read;
+
+ offset -= dump_ptrs->op_mode_len;
+ bytes_read_trans = min_t(ssize_t, count,
+ dump_ptrs->trans_ptr->len - offset);
+ memcpy(buffer + bytes_read,
+ (u8 *)dump_ptrs->trans_ptr->data + offset,
+ bytes_read_trans);
+
+ return bytes_read + bytes_read_trans;
+}
+
+static void iwl_mvm_free_coredump(const void *data)
+{
+ const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
+
+ vfree(fw_error_dump->op_mode_ptr);
+ vfree(fw_error_dump->trans_ptr);
+ kfree(fw_error_dump);
+}
+
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
- static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL };
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info;
@@ -695,10 +775,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- if (mvm->fw_error_dump)
- return;
-
- fw_error_dump = kzalloc(sizeof(*mvm->fw_error_dump), GFP_KERNEL);
+ fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
return;
@@ -773,16 +850,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (fw_error_dump->trans_ptr)
file_len += fw_error_dump->trans_ptr->len;
dump_file->file_len = cpu_to_le32(file_len);
- mvm->fw_error_dump = fw_error_dump;
- /* notify the userspace about the error we had */
- kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env);
+ dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
+ GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
}
-#endif
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
- iwl_mvm_fw_error_dump(mvm);
+ /* clear the D3 reconfig, we only need it to avoid dumping a
+ * firmware coredump on reconfiguration, we shouldn't do that
+ * on D3->D0 transition
+ */
+ if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status))
+ iwl_mvm_fw_error_dump(mvm);
iwl_trans_stop_device(mvm->trans);
@@ -803,6 +883,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+ memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@@ -859,9 +940,8 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
return ret;
}
-static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
+static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
mutex_lock(&mvm->mutex);
@@ -876,9 +956,50 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
/* allow transport/FW low power modes */
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ /*
+ * If we have TDLS peers, remove them. We don't know the last seqno/PN
+ * of packets the FW sent out, so we must reconnect.
+ */
+ iwl_mvm_teardown_tdls_peers(mvm);
+
mutex_unlock(&mvm->mutex);
}
+static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
+{
+ bool exit_now;
+
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+ &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+ if (exit_now) {
+ IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+ _iwl_mvm_exit_d0i3(mvm);
+ }
+}
+
+static void
+iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ switch (reconfig_type) {
+ case IEEE80211_RECONFIG_TYPE_RESTART:
+ iwl_mvm_restart_complete(mvm);
+ break;
+ case IEEE80211_RECONFIG_TYPE_SUSPEND:
+ iwl_mvm_resume_complete(mvm);
+ break;
+ }
+}
+
void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
@@ -1087,7 +1208,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- u32 tfd_msk = iwl_mvm_mac_get_queues_mask(mvm, vif);
+ u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
if (tfd_msk) {
mutex_lock(&mvm->mutex);
@@ -1383,6 +1504,9 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
.cmd = cmd,
};
+ if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
+ return false;
+
memset(cmd, 0, sizeof(*cmd));
cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
cmd->max_macs = ARRAY_SIZE(cmd->macs);
@@ -1835,9 +1959,11 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -EINVAL;
- ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
- if (ret)
- return ret;
+ if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
+ if (ret)
+ return ret;
+ }
mutex_lock(&mvm->mutex);
@@ -1848,7 +1974,9 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
+ else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
else
ret = iwl_mvm_scan_request(mvm, vif, req);
@@ -2065,6 +2193,15 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
out_unlock:
mutex_unlock(&mvm->mutex);
+ if (sta->tdls && ret == 0) {
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+ else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+ }
+
return ret;
}
@@ -2147,9 +2284,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
- if (ret)
- return ret;
+ if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
+ if (ret)
+ return ret;
+ }
mutex_lock(&mvm->mutex);
@@ -2169,27 +2308,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
goto out;
}
- mvm->scan_status = IWL_MVM_SCAN_SCHED;
-
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
- ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
- if (ret)
- goto err;
- }
-
- ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
if (ret)
- goto err;
-
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
- ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
- else
- ret = iwl_mvm_sched_scan_start(mvm, req);
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
- if (!ret)
- goto out;
-err:
- mvm->scan_status = IWL_MVM_SCAN_NONE;
out:
mutex_unlock(&mvm->mutex);
return ret;
@@ -2207,6 +2329,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
iwl_mvm_wait_for_async_handlers(mvm);
return ret;
+
}
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
@@ -2235,12 +2358,16 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- /*
- * Support for TX only, at least for now, so accept
- * the key and do nothing else. Then mac80211 will
- * pass it for TX but we don't have to use it for RX.
+ /* For non-client mode, only use WEP keys for TX as we probably
+ * don't have a station yet anyway and would then have to keep
+ * track of the keys, linking them to each of the clients/peers
+ * as they appear. For now, don't do that, for performance WEP
+ * offload doesn't really matter much, but we need it for some
+ * other offload features in client mode.
*/
- return 0;
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+ break;
default:
/* currently FW supports only one optional cipher scheme */
if (hw->n_cipher_schemes &&
@@ -2563,7 +2690,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(mvm, "enter\n");
mutex_lock(&mvm->mutex);
- iwl_mvm_stop_p2p_roc(mvm);
+ iwl_mvm_stop_roc(mvm);
mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
@@ -2676,8 +2803,8 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
switch (vif->type) {
case NL80211_IFTYPE_AP:
- /* Unless it's a CSA flow we have nothing to do here */
- if (vif->csa_active) {
+ /* only needed if we're switching chanctx (i.e. during CSA) */
+ if (switching_chanctx) {
mvmvif->ap_ibss_active = true;
break;
}
@@ -2721,23 +2848,32 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
}
/* Handle binding during CSA */
- if ((vif->type == NL80211_IFTYPE_AP) ||
- (switching_chanctx && (vif->type == NL80211_IFTYPE_STATION))) {
+ if (vif->type == NL80211_IFTYPE_AP) {
iwl_mvm_update_quotas(mvm, NULL);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
- if (vif->csa_active && vif->type == NL80211_IFTYPE_STATION) {
- struct iwl_mvm_sta *mvmsta;
+ if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
+ u32 duration = 2 * vif->bss_conf.beacon_int;
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
- mvmvif->ap_sta_id);
+ /* iwl_mvm_protect_session() reads directly from the
+ * device (the system time), so make sure it is
+ * available.
+ */
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
+ if (ret)
+ goto out_remove_binding;
- if (WARN_ON(!mvmsta))
- goto out;
+ /* Protect the session to make sure we hear the first
+ * beacon on the new channel.
+ */
+ iwl_mvm_protect_session(mvm, vif, duration, duration,
+ vif->bss_conf.beacon_int / 2,
+ true);
- /* TODO: only re-enable after the first beacon */
- iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
+
+ iwl_mvm_update_quotas(mvm, NULL);
}
goto out;
@@ -2771,7 +2907,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_vif *disabled_vif = NULL;
- struct iwl_mvm_sta *mvmsta;
lockdep_assert_held(&mvm->mutex);
@@ -2786,9 +2921,11 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
break;
case NL80211_IFTYPE_AP:
/* This part is triggered only during CSA */
- if (!vif->csa_active || !mvmvif->ap_ibss_active)
+ if (!switching_chanctx || !mvmvif->ap_ibss_active)
goto out;
+ mvmvif->csa_countdown = false;
+
/* Set CS bit on all the stations */
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
@@ -2803,12 +2940,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
disabled_vif = vif;
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
- mvmvif->ap_sta_id);
-
- if (!WARN_ON(!mvmsta))
- iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
-
iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
break;
default:
@@ -2834,18 +2965,12 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
- struct ieee80211_vif_chanctx_switch *vifs,
- int n_vifs,
- enum ieee80211_chanctx_switch_mode mode)
+static int
+iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
+ struct ieee80211_vif_chanctx_switch *vifs)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- /* we only support SWAP_CONTEXTS and with a single-vif right now */
- if (mode != CHANCTX_SWMODE_SWAP_CONTEXTS || n_vifs > 1)
- return -EOPNOTSUPP;
-
mutex_lock(&mvm->mutex);
__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
@@ -2874,15 +2999,51 @@ out_remove:
__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
out_reassign:
- ret = __iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx);
- if (ret) {
+ if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
goto out_restart;
}
- ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+ if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+ true)) {
+ IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+ goto out_restart;
+ }
+
+ goto out;
+
+out_restart:
+ /* things keep failing, better restart the hw */
+ iwl_mvm_nic_restart(mvm, false);
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int
+iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
+ struct ieee80211_vif_chanctx_switch *vifs)
+{
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
+
+ ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
true);
if (ret) {
+ IWL_ERR(mvm,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ goto out;
+
+out_reassign:
+ if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+ true)) {
IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
goto out_restart;
}
@@ -2895,6 +3056,34 @@ out_restart:
out:
mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ /* we only support a single-vif right now */
+ if (n_vifs > 1)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
+ break;
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
return ret;
}
@@ -2980,27 +3169,134 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
-static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_chan_def *chandef)
+static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ /* By implementing this operation, we prevent mac80211 from
+ * starting its own channel switch timer, so that we can call
+ * ieee80211_chswitch_done() ourselves at the right time
+ * (which is when the absence time event starts).
+ */
+
+ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
+ "dummy channel switch op\n");
+}
+
+static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *csa_vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 apply_time;
+ int ret;
mutex_lock(&mvm->mutex);
- csa_vif = rcu_dereference_protected(mvm->csa_vif,
- lockdep_is_held(&mvm->mutex));
- if (WARN(csa_vif && csa_vif->csa_active,
- "Another CSA is already in progress"))
+ IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
+ chsw->chandef.center_freq1);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ csa_vif =
+ rcu_dereference_protected(mvm->csa_vif,
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ONCE(csa_vif && csa_vif->csa_active,
+ "Another CSA is already in progress")) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ rcu_assign_pointer(mvm->csa_vif, vif);
+
+ if (WARN_ONCE(mvmvif->csa_countdown,
+ "Previous CSA countdown didn't complete")) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* Schedule the time event to a bit before beacon 1,
+ * to make sure we're in the new channel when the
+ * GO/AP arrives.
+ */
+ apply_time = chsw->device_timestamp +
+ ((vif->bss_conf.beacon_int * (chsw->count - 1) -
+ IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
+
+ if (chsw->block_tx)
+ iwl_mvm_csa_client_absent(mvm, vif);
+
+ iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
+ apply_time);
+ if (mvmvif->bf_data.bf_enabled) {
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_unlock;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ mvmvif->ps_disabled = true;
+
+ ret = iwl_mvm_power_update_ps(mvm);
+ if (ret)
goto out_unlock;
- IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
- chandef->center_freq1);
- rcu_assign_pointer(mvm->csa_vif, vif);
+ /* we won't be on this channel any longer */
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct iwl_mvm_sta *mvmsta;
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
+ mvmvif->ap_sta_id);
+
+ if (WARN_ON(!mvmsta)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_unlock;
+
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ mvmvif->ps_disabled = false;
+
+ ret = iwl_mvm_power_update_ps(mvm);
out_unlock:
mutex_unlock(&mvm->mutex);
+
+ return ret;
}
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
@@ -3009,33 +3305,52 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_sta *sta;
+ int i;
+ u32 msk = 0;
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
mutex_lock(&mvm->mutex);
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
- if (WARN_ON_ONCE(!mvmsta))
- goto done;
+ /* flush the AP-station and all TDLS peers */
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->vif != vif)
+ continue;
+
+ /* make sure only TDLS peers or the AP are flushed */
+ WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
+
+ msk |= mvmsta->tfd_queue_msk;
+ }
if (drop) {
- if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true))
+ if (iwl_mvm_flush_tx_path(mvm, msk, true))
IWL_ERR(mvm, "flush request fail\n");
+ mutex_unlock(&mvm->mutex);
} else {
- iwl_trans_wait_tx_queue_empty(mvm->trans,
- mvmsta->tfd_queue_msk);
+ mutex_unlock(&mvm->mutex);
+
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
+ */
+ iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
}
-done:
- mutex_unlock(&mvm->mutex);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.start = iwl_mvm_mac_start,
- .restart_complete = iwl_mvm_mac_restart_complete,
+ .reconfig_complete = iwl_mvm_mac_reconfig_complete,
.stop = iwl_mvm_mac_stop,
.add_interface = iwl_mvm_mac_add_interface,
.remove_interface = iwl_mvm_mac_remove_interface,
@@ -3076,7 +3391,13 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.set_tim = iwl_mvm_set_tim,
- .channel_switch_beacon = iwl_mvm_channel_switch_beacon,
+ .channel_switch = iwl_mvm_channel_switch,
+ .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .post_channel_switch = iwl_mvm_post_channel_switch,
+
+ .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
+ .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
+ .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 845429c88cf4..d24660fb4ef2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -87,12 +87,18 @@
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
-/* This value represents the number of TUs before CSA "beacon 0" TBTT
- * when the CSA time-event needs to be scheduled to start. It must be
- * big enough to ensure that we switch in time.
+/* For GO, this value represents the number of TUs before CSA "beacon
+ * 0" TBTT when the CSA time-event needs to be scheduled to start. It
+ * must be big enough to ensure that we switch in time.
*/
#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40
+/* For client, this value represents the number of TUs before CSA
+ * "beacon 1" TBTT, instead. This is because we don't know when the
+ * GO/AP will be in the new channel, so we switch early enough.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10
+
/*
* This value (in TUs) is used to fine tune the CSA NoA end time which should
* be just before "beacon 0" TBTT.
@@ -269,6 +275,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_NMI,
IWL_MVM_REF_TM_CMD,
IWL_MVM_REF_EXIT_WORK,
+ IWL_MVM_REF_PROTECT_CSA,
/* update debugfs.c when changing this */
@@ -288,7 +295,6 @@ enum iwl_bt_force_ant_mode {
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
* @ba_enabled: indicated if beacon abort is enabled
-* @last_beacon_signal: last beacon rssi signal in dbm
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
* @bt_coex_min_thold: minimum threshold for BT coex
@@ -399,6 +405,9 @@ struct iwl_mvm_vif {
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
+
+ /* Indicates that CSA countdown may be started */
+ bool csa_countdown;
};
static inline struct iwl_mvm_vif *
@@ -519,6 +528,13 @@ enum {
#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
+enum iwl_mvm_tdls_cs_state {
+ IWL_MVM_TDLS_SW_IDLE = 0,
+ IWL_MVM_TDLS_SW_REQ_SENT,
+ IWL_MVM_TDLS_SW_REQ_RCVD,
+ IWL_MVM_TDLS_SW_ACTIVE,
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -578,6 +594,7 @@ struct iwl_mvm {
struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
+ u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -588,6 +605,10 @@ struct iwl_mvm {
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+ /* UMAC scan tracking */
+ u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS];
+ u8 scan_seq_num, sched_scan_seq_num;
+
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -649,7 +670,7 @@ struct iwl_mvm {
/* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw;
struct work_struct fw_error_dump_wk;
- struct iwl_mvm_dump_ptrs *fw_error_dump;
+ enum iwl_fw_dbg_conf fw_dbg_conf;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
@@ -660,6 +681,15 @@ struct iwl_mvm {
#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+
+ /* sched scan settings for net detect */
+ struct cfg80211_sched_scan_request *nd_config;
+ struct ieee80211_scan_ies nd_ies;
+ struct cfg80211_match_set *nd_match_sets;
+ int n_nd_match_sets;
+ struct ieee80211_channel **nd_channels;
+ int n_nd_channels;
+ bool net_detect;
#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
bool d3_test_active;
@@ -732,6 +762,28 @@ struct iwl_mvm {
u32 ap_last_beacon_gp2;
u8 low_latency_agg_frame_limit;
+
+ /* TDLS channel switch data */
+ struct {
+ struct delayed_work dwork;
+ enum iwl_mvm_tdls_cs_state state;
+
+ /*
+ * Current cs sta - might be different from periodic cs peer
+ * station. Value is meaningless when the cs-state is idle.
+ */
+ u8 cur_sta_id;
+
+ /* TDLS periodic channel-switch peer */
+ struct {
+ u8 sta_id;
+ u8 op_class;
+ bool initiator; /* are we the link initiator */
+ struct cfg80211_chan_def chandef;
+ struct sk_buff *skb; /* ch sw template */
+ u32 ch_sw_tm_ie;
+ } peer;
+ } tdls_cs;
};
/* Extract MVM priv from op_mode and _hw */
@@ -748,6 +800,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
+ IWL_MVM_STATUS_D3_RECONFIG,
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -756,6 +809,26 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
}
+/* Must be called with rcu_read_lock() held and it can only be
+ * released when mvmsta is not needed anymore.
+ */
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return NULL;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
static inline struct iwl_mvm_sta *
iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
{
@@ -829,6 +902,16 @@ int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, u8 sta_id);
+void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag);
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc);
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
@@ -885,6 +968,8 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
/* MVM PHY */
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -898,6 +983,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
+u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
/* MAC (virtual interface) programming */
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -906,8 +993,7 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool force_assoc_off, const u8 *bssid_override);
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
@@ -918,6 +1004,8 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *exclude_vif);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -928,6 +1016,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
struct ieee80211_vif *disabled_vif);
/* Scanning */
+int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
@@ -950,6 +1039,10 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
+int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies);
int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -964,6 +1057,17 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies);
+/* UMAC scan */
+int iwl_mvm_config_scan(struct iwl_mvm *mvm);
+int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies);
+int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -1043,7 +1147,7 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
- struct iwl_wowlan_config_cmd_v2 *cmd);
+ struct iwl_wowlan_config_cmd *cmd);
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
@@ -1053,6 +1157,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
@@ -1068,12 +1173,14 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
enum ieee80211_band band);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
+bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
@@ -1189,6 +1296,10 @@ bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
+void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
+int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@@ -1200,18 +1311,37 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool added_vif);
/* TDLS */
+
+/*
+ * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
+ * This TID is marked as used vs the AP and all connected TDLS peers.
+ */
+#define IWL_MVM_TDLS_FW_TID 4
+
int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool sta_added);
void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
+void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params);
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
+
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
-#else
-static inline void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) {}
-#endif
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index af074563e770..d55fd8e3654c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -339,11 +339,15 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
} *file_sec;
const u8 *eof, *temp;
int max_section_size;
+ const __le32 *dword_buff;
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
+#define NVM_HEADER_0 (0x2A504C54)
+#define NVM_HEADER_1 (0x4E564D2A)
+#define NVM_HEADER_SIZE (4 * sizeof(u32))
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
@@ -372,12 +376,6 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
mvm->nvm_file_name, fw_entry->size);
- if (fw_entry->size < sizeof(*file_sec)) {
- IWL_ERR(mvm, "NVM file too small\n");
- ret = -EINVAL;
- goto out;
- }
-
if (fw_entry->size > MAX_NVM_FILE_LEN) {
IWL_ERR(mvm, "NVM file too large\n");
ret = -EINVAL;
@@ -385,8 +383,25 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
}
eof = fw_entry->data + fw_entry->size;
-
- file_sec = (void *)fw_entry->data;
+ dword_buff = (__le32 *)fw_entry->data;
+
+ /* some NVM file will contain a header.
+ * The header is identified by 2 dwords header as follow:
+ * dword[0] = 0x2A504C54
+ * dword[1] = 0x4E564D2A
+ *
+ * This header must be skipped when providing the NVM data to the FW.
+ */
+ if (fw_entry->size > NVM_HEADER_SIZE &&
+ dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
+ dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
+ file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
+ IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
+ IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
+ le32_to_cpu(dword_buff[3]));
+ } else {
+ file_sec = (void *)fw_entry->data;
+ }
while (true) {
if (file_sec->data > eof) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c
index adcbf4c8edd8..68b0169c8892 100644
--- a/drivers/net/wireless/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/iwlwifi/mvm/offloading.c
@@ -67,7 +67,7 @@
#include "mvm.h"
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
- struct iwl_wowlan_config_cmd_v2 *cmd)
+ struct iwl_wowlan_config_cmd *cmd)
{
int i;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 5b719ee8e789..97dfba50c682 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -244,6 +244,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
false),
+ RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
+ true),
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -254,6 +256,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
+ RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
+
+ RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
+ true),
+ RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+
};
#undef RX_HANDLER
#define CMD(x) [x] = #x
@@ -317,11 +325,9 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(WOWLAN_KEK_KCK_MATERIAL),
CMD(WOWLAN_GET_STATUSES),
CMD(WOWLAN_TX_POWER_PER_DB),
- CMD(NET_DETECT_CONFIG_CMD),
- CMD(NET_DETECT_PROFILES_QUERY_CMD),
- CMD(NET_DETECT_PROFILES_CMD),
- CMD(NET_DETECT_HOTSPOTS_CMD),
- CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
+ CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
+ CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD),
+ CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD),
CMD(CARD_STATE_NOTIFICATION),
CMD(MISSED_BEACONS_NOTIFICATION),
CMD(BT_COEX_PRIO_TABLE),
@@ -344,6 +350,13 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
CMD(ANTENNA_COUPLING_NOTIFICATION),
CMD(SCD_QUEUE_CFG),
+ CMD(SCAN_CFG_CMD),
+ CMD(SCAN_REQ_UMAC),
+ CMD(SCAN_ABORT_UMAC),
+ CMD(SCAN_COMPLETE_UMAC),
+ CMD(TDLS_CHANNEL_SWITCH_CMD),
+ CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
+ CMD(TDLS_CONFIG_CMD),
};
#undef CMD
@@ -403,6 +416,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (cfg->max_rx_agg_size)
hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
+ if (cfg->max_tx_agg_size)
+ hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
+
op_mode = hw->priv;
op_mode->ops = &iwl_mvm_ops;
@@ -439,6 +455,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk);
+ INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock);
@@ -479,6 +496,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+ trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
+ trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
+ memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
+ sizeof(trans->dbg_conf_tlv));
/* set up notification wait support */
iwl_notification_wait_init(&mvm->notif_wait);
@@ -522,7 +543,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
err = iwl_run_init_mvm_ucode(mvm, true);
- iwl_trans_stop_device(trans);
+ if (!err || !iwlmvm_mod_params.init_dbg)
+ iwl_trans_stop_device(trans);
mutex_unlock(&mvm->mutex);
/* returns 0 if successful, 1 if success but in rfkill */
if (err < 0 && !iwlmvm_mod_params.init_dbg) {
@@ -531,16 +553,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
}
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
- scan_size = sizeof(struct iwl_scan_req_unified_lmac) +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels +
- sizeof(struct iwl_scan_probe_req);
- else
- scan_size = sizeof(struct iwl_scan_cmd) +
- mvm->fw->ucode_capa.max_probe_length +
- mvm->fw->ucode_capa.n_scan_channels *
- sizeof(struct iwl_scan_channel);
+ scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
if (!mvm->scan_cmd)
@@ -585,16 +598,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
- if (mvm->fw_error_dump) {
- vfree(mvm->fw_error_dump->op_mode_ptr);
- vfree(mvm->fw_error_dump->trans_ptr);
- kfree(mvm->fw_error_dump);
- }
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
+ if (mvm->nd_config) {
+ kfree(mvm->nd_config->match_sets);
+ kfree(mvm->nd_config);
+ mvm->nd_config = NULL;
+ }
#endif
iwl_trans_op_mode_leave(mvm->trans);
@@ -991,7 +1004,7 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
}
static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
- struct iwl_wowlan_config_cmd_v3 *cmd,
+ struct iwl_wowlan_config_cmd *cmd,
struct iwl_d0i3_iter_data *iter_data)
{
struct ieee80211_sta *ap_sta;
@@ -1007,14 +1020,14 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
goto out;
mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
- cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
+ cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
cmd->offloading_tid = iter_data->offloading_tid;
/*
* The d0i3 uCode takes care of the nonqos counters,
* so configure only the qos seq ones.
*/
- iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
out:
rcu_read_unlock();
}
@@ -1026,14 +1039,11 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
struct iwl_d0i3_iter_data d0i3_iter_data = {
.mvm = mvm,
};
- struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
- .common = {
- .wakeup_filter =
- cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
- IWL_WOWLAN_WAKEUP_BEACON_MISS |
- IWL_WOWLAN_WAKEUP_LINK_CHANGE |
- IWL_WOWLAN_WAKEUP_BCN_FILTERING),
- },
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING),
};
struct iwl_d3_manager_config d3_cfg_cmd = {
.min_sleep_time = cpu_to_le32(1000),
@@ -1045,6 +1055,19 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
synchronize_net();
+ /*
+ * iwl_mvm_ref_sync takes a reference before checking the flag.
+ * so by checking there is no held reference we prevent a state
+ * in which iwl_mvm_ref_sync continues successfully while we
+ * configure the firmware to enter d0i3
+ */
+ if (iwl_mvm_ref_taken(mvm)) {
+ IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
+ clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ wake_up(&mvm->d0i3_exit_waitq);
+ return 1;
+ }
+
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_enter_d0i3_iterator,
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 12283b55ee84..1c0d4a45c1a8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -68,7 +68,7 @@
#include "mvm.h"
/* Maps the driver specific channel width definition to the the fw values */
-static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -90,7 +90,7 @@ static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
* Maps the driver specific control channel position (relative to the center
* freq) definitions to the the fw values
*/
-static inline u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
{
switch (chandef->chan->center_freq - chandef->center_freq1) {
case -70:
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 5b85b0cc7a2a..2620dd0c45f9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -286,6 +286,27 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
return true;
}
+static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
+{
+ int numerator;
+ int dtim_interval = dtimper * bi;
+
+ if (WARN_ON(!dtim_interval))
+ return 0;
+
+ if (dtimper == 1) {
+ if (bi > 100)
+ numerator = 408;
+ else
+ numerator = 510;
+ } else if (dtimper < 10) {
+ numerator = 612;
+ } else {
+ return 0;
+ }
+ return max(1, (numerator / dtim_interval));
+}
+
static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
{
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -308,7 +329,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
{
- int dtimper, dtimper_msec;
+ int dtimper, bi;
int keep_alive;
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
@@ -317,6 +338,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
dtimper = vif->bss_conf.dtim_period;
+ bi = vif->bss_conf.beacon_int;
/*
* Regardless of power management state the driver must set
@@ -324,10 +346,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
* immediately after association. Check that keep alive period
* is at least 3 * DTIM
*/
- dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- keep_alive = max_t(int, 3 * dtimper_msec,
- MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
- keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+ keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
+ USEC_PER_SEC);
+ keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
if (mvm->ps_disabled)
@@ -352,11 +373,14 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
radar_detect = iwl_mvm_power_is_radar(vif);
/* Check skip over DTIM conditions */
- if (!radar_detect && (dtimper <= 10) &&
+ if (!radar_detect && (dtimper < 10) &&
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- cmd->skip_dtim_periods = 3;
+ cmd->skip_dtim_periods =
+ iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
+ if (cmd->skip_dtim_periods)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 18a539999580..30ceb67ed7a7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -158,6 +158,12 @@ struct rs_tx_column {
allow_column_func_t checks[MAX_COLUMN_CHECKS];
};
+static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
+}
+
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl)
{
@@ -218,6 +224,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
},
+ .checks = {
+ rs_ant_allow,
+ },
},
[RS_COLUMN_LEGACY_ANT_B] = {
.mode = RS_LEGACY,
@@ -231,6 +240,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
},
+ .checks = {
+ rs_ant_allow,
+ },
},
[RS_COLUMN_SISO_ANT_A] = {
.mode = RS_SISO,
@@ -246,6 +258,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
},
.checks = {
rs_siso_allow,
+ rs_ant_allow,
},
},
[RS_COLUMN_SISO_ANT_B] = {
@@ -262,6 +275,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
},
.checks = {
rs_siso_allow,
+ rs_ant_allow,
},
},
[RS_COLUMN_SISO_ANT_A_SGI] = {
@@ -279,6 +293,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
},
.checks = {
rs_siso_allow,
+ rs_ant_allow,
rs_sgi_allow,
},
},
@@ -297,6 +312,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
},
.checks = {
rs_siso_allow,
+ rs_ant_allow,
rs_sgi_allow,
},
},
@@ -505,10 +521,11 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
const char *prefix)
{
- IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d\n",
+ IWL_DEBUG_RATE(mvm,
+ "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
prefix, rs_pretty_lq_type(rate->type),
rate->index, rs_pretty_ant(rate->ant),
- rate->bw, rate->sgi, rate->ldpc);
+ rate->bw, rate->sgi, rate->ldpc, rate->stbc);
}
static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
@@ -741,6 +758,12 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
}
+ if (is_siso(rate) && rate->stbc) {
+ /* To enable STBC we need to set both a flag and ANT_AB */
+ ucode_rate |= RATE_MCS_ANT_AB_MSK;
+ ucode_rate |= RATE_MCS_VHT_STBC_MSK;
+ }
+
ucode_rate |= rate->bw;
if (rate->sgi)
ucode_rate |= RATE_MCS_SGI_MSK;
@@ -785,6 +808,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
rate->ldpc = true;
+ if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
+ rate->stbc = true;
rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
@@ -794,7 +819,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) {
rate->type = LQ_HT_SISO;
- WARN_ON_ONCE(num_of_ant != 1);
+ WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
} else if (nss == 2) {
rate->type = LQ_HT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
@@ -807,7 +832,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) {
rate->type = LQ_VHT_SISO;
- WARN_ON_ONCE(num_of_ant != 1);
+ WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
} else if (nss == 2) {
rate->type = LQ_VHT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
@@ -992,7 +1017,15 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
static inline bool rs_rate_match(struct rs_rate *a,
struct rs_rate *b)
{
- return (a->type == b->type) && (a->ant == b->ant) && (a->sgi == b->sgi);
+ bool ant_match;
+
+ if (a->stbc)
+ ant_match = (b->ant == ANT_A || b->ant == ANT_B);
+ else
+ ant_match = (a->ant == b->ant);
+
+ return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi)
+ && ant_match;
}
static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
@@ -1093,10 +1126,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (time_after(jiffies,
(unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
- int tid;
+ int t;
+
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
- for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
- ieee80211_stop_tx_ba_session(sta, tid);
+ for (t = 0; t < IWL_MAX_TID_COUNT; t++)
+ ieee80211_stop_tx_ba_session(sta, t);
iwl_mvm_rs_rate_init(mvm, sta, info->band, false);
return;
@@ -1137,16 +1171,15 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Rate did match, so reset the missed_rate_counter */
lq_sta->missed_rate_counter = 0;
- /* Figure out if rate scale algorithm is in active or search table */
- if (rs_rate_match(&rate,
- &(lq_sta->lq_info[lq_sta->active_tbl].rate))) {
+ if (!lq_sta->search_better_tbl) {
curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (rs_rate_match(&rate,
- &lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) {
+ } else {
curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- } else {
+ }
+
+ if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) {
IWL_DEBUG_RATE(mvm,
"Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1171,6 +1204,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ /* ampdu_ack_len = 0 marks no BA was received. In this case
+ * treat it as a single frame loss as we don't want the success
+ * ratio to dip too quickly because a BA wasn't received
+ */
+ if (info->status.ampdu_ack_len == 0)
+ info->status.ampdu_len = 1;
+
ucode_rate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
@@ -1225,7 +1265,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done:
/* See if there's a better rate or modulation mode to try. */
- if (sta && sta->supp_rates[info->band])
+ if (sta->supp_rates[info->band])
rs_rate_scale_perform(mvm, sta, lq_sta, tid);
}
@@ -1623,6 +1663,8 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
else
rate->type = LQ_LEGACY_G;
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+ rate->ldpc = false;
rate_mask = lq_sta->active_legacy_rate;
} else if (column->mode == RS_SISO) {
rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
@@ -1634,8 +1676,11 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
WARN_ON_ONCE("Bad column mode");
}
- rate->bw = rs_bw_from_sta_bw(sta);
- rate->ldpc = lq_sta->ldpc;
+ if (column->mode != RS_LEGACY) {
+ rate->bw = rs_bw_from_sta_bw(sta);
+ rate->ldpc = lq_sta->ldpc;
+ }
+
search_tbl->column = col_id;
rs_set_expected_tpt_table(lq_sta, search_tbl);
@@ -1754,6 +1799,29 @@ out:
return action;
}
+static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mvmsta->vif;
+ bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
+ !vif->bss_conf.ps);
+
+ /* Our chip supports Tx STBC and the peer is an HT/VHT STA which
+ * supports STBC of at least 1*SS
+ */
+ if (!lq_sta->stbc)
+ return false;
+
+ if (!mvm->ps_disabled && !sta_ps_disabled)
+ return false;
+
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ return false;
+
+ return true;
+}
+
static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
int *weaker, int *stronger)
{
@@ -2675,6 +2743,11 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (mvm->cfg->ht_params->ldpc &&
(ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
lq_sta->ldpc = true;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(mvm->fw->valid_tx_ant) > 1) &&
+ (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
+ lq_sta->stbc = true;
} else {
rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
lq_sta->is_vht = true;
@@ -2682,8 +2755,16 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (mvm->cfg->ht_params->ldpc &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
lq_sta->ldpc = true;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(mvm->fw->valid_tx_ant) > 1) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
+ lq_sta->stbc = true;
}
+ if (IWL_MVM_RS_DISABLE_MIMO)
+ lq_sta->active_mimo2_rate = 0;
+
lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate,
BITS_PER_LONG);
lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate,
@@ -2692,11 +2773,11 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
BITS_PER_LONG);
IWL_DEBUG_RATE(mvm,
- "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d\n",
+ "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC%d\n",
lq_sta->active_legacy_rate,
lq_sta->active_siso_rate,
lq_sta->active_mimo2_rate,
- lq_sta->is_vht, lq_sta->ldpc);
+ lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc);
IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
lq_sta->max_legacy_rate_idx,
lq_sta->max_siso_rate_idx,
@@ -2820,6 +2901,7 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
* rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
*/
static void rs_build_rates_table(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
const struct rs_rate *initial_rate)
{
@@ -2832,6 +2914,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
memcpy(&rate, initial_rate, sizeof(rate));
valid_tx_ant = mvm->fw->valid_tx_ant;
+ rate.stbc = rs_stbc_allow(mvm, sta, lq_sta);
if (is_siso(&rate)) {
num_rates = RS_INITIAL_SISO_NUM_RATES;
@@ -2903,7 +2986,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(!sta || !initial_rate))
return;
- rs_build_rates_table(mvm, lq_sta, initial_rate);
+ rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
if (num_of_ant(initial_rate->ant) == 1)
lq_cmd->single_stream_ant_msk = initial_rate->ant;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index eb34c1209acc..defd70a6d9e6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -208,6 +208,7 @@ struct rs_rate {
u32 bw;
bool sgi;
bool ldpc;
+ bool stbc;
};
@@ -331,6 +332,7 @@ struct iwl_lq_sta {
u64 last_tx;
bool is_vht;
bool ldpc; /* LDPC Rx is supported by the STA */
+ bool stbc; /* Tx STBC is supported by chip and Rx by STA */
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 3cf40f3f58ec..94b6e7297a1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -96,27 +96,27 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
* Adds the rxb to a new skb and give it to mac80211
*/
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
- u32 ampdu_status,
- struct iwl_rx_cmd_buffer *rxb,
- struct ieee80211_rx_status *stats)
+ u32 ampdu_status, u8 crypt_len,
+ struct iwl_rx_cmd_buffer *rxb)
{
- struct sk_buff *skb;
unsigned int hdrlen, fraglen;
- /* Dont use dev_alloc_skb(), we'll have enough headroom once
- * ieee80211_hdr pulled.
- */
- skb = alloc_skb(128, GFP_ATOMIC);
- if (!skb) {
- IWL_ERR(mvm, "alloc_skb failed\n");
- return;
- }
/* If frame is small enough to fit in skb->head, pull it completely.
- * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
- * are more efficient.
+ * If not, only pull ieee80211_hdr (including crypto if present, and
+ * an additional 8 bytes for SNAP/ethertype, see below) so that
+ * splice() or TCP coalesce are more efficient.
+ *
+ * Since, in addition, ieee80211_data_to_8023() always pull in at
+ * least 8 bytes (possibly more for mesh) we can do the same here
+ * to save the cost of doing it later. That still doesn't pull in
+ * the actual IP header since the typical case has a SNAP header.
+ * If the latter changes (there are efforts in the standards group
+ * to do so) we should revisit this and ieee80211_data_to_8023().
*/
- hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
+ hdrlen = (len <= skb_tailroom(skb)) ? len :
+ sizeof(*hdr) + crypt_len + 8;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen;
@@ -129,8 +129,6 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen, rxb->truesize);
}
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
ieee80211_rx(mvm->hw, skb);
}
@@ -185,7 +183,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *stats,
- u32 rx_pkt_status)
+ u32 rx_pkt_status,
+ u8 *crypt_len)
{
if (!ieee80211_has_protected(hdr->frame_control) ||
(rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
@@ -205,12 +204,14 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
stats->flag |= RX_FLAG_DECRYPTED;
IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n");
+ *crypt_len = IEEE80211_CCMP_HDR_LEN;
return 0;
case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
/* Don't drop the frame and decrypt it in SW */
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
+ *crypt_len = IEEE80211_TKIP_IV_LEN;
/* fall through if TTAK OK */
case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
@@ -218,6 +219,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return -1;
stats->flag |= RX_FLAG_DECRYPTED;
+ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_WEP_ENC)
+ *crypt_len = IEEE80211_WEP_IV_LEN;
return 0;
case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
@@ -242,15 +246,17 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct ieee80211_hdr *hdr;
- struct ieee80211_rx_status rx_status = {};
+ struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_info *phy_info;
struct iwl_rx_mpdu_res_start *rx_res;
struct ieee80211_sta *sta;
+ struct sk_buff *skb;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
u32 rx_pkt_status;
+ u8 crypt_len = 0;
phy_info = &mvm->last_phy_info;
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -259,20 +265,32 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_pkt_status = le32_to_cpup((__le32 *)
(pkt->data + sizeof(*rx_res) + len));
- memset(&rx_status, 0, sizeof(rx_status));
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return 0;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
/*
* drop the packet if it has failed being decrypted by HW
*/
- if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) {
+ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
+ &crypt_len)) {
IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
rx_pkt_status);
+ kfree_skb(skb);
return 0;
}
if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
phy_info->cfg_phy_cnt);
+ kfree_skb(skb);
return 0;
}
@@ -283,31 +301,31 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
!(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
- rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_info->timestamp);
- rx_status.device_timestamp = le32_to_cpu(phy_info->system_timestamp);
- rx_status.band =
+ rx_status->mactime = le64_to_cpu(phy_info->timestamp);
+ rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
+ rx_status->band =
(phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
+ rx_status->freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
- rx_status.band);
+ rx_status->band);
/*
* TSF as indicated by the fw is at INA time, but mac80211 expects the
* TSF at the beginning of the MPDU.
*/
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+ /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
- iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
+ iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
- IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
- (unsigned long long)rx_status.mactime);
+ IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
+ (unsigned long long)rx_status->mactime);
rcu_read_lock();
/*
@@ -326,15 +344,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
if (sta) {
struct iwl_mvm_sta *mvmsta;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- rs_update_last_rssi(mvm, &mvmsta->lq_sta,
- &rx_status);
+ rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
}
rcu_read_unlock();
/* set the preamble flag if appropriate */
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
- rx_status.flag |= RX_FLAG_SHORTPRE;
+ rx_status->flag |= RX_FLAG_SHORTPRE;
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
/*
@@ -342,8 +359,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
* together since we get a single PHY response
* from the firmware for all of them
*/
- rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
- rx_status.ampdu_reference = mvm->ampdu_ref;
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->ampdu_reference = mvm->ampdu_ref;
}
/* Set up the HT phy flags */
@@ -351,50 +368,50 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
case RATE_MCS_CHAN_WIDTH_20:
break;
case RATE_MCS_CHAN_WIDTH_40:
- rx_status.flag |= RX_FLAG_40MHZ;
+ rx_status->flag |= RX_FLAG_40MHZ;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
+ rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
+ rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
+ rx_status->flag |= RX_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status.flag |= RX_FLAG_HT_GF;
+ rx_status->flag |= RX_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status.flag |= RX_FLAG_LDPC;
+ rx_status->flag |= RX_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status.flag |= RX_FLAG_HT;
- rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
- rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status.vht_nss =
+ rx_status->vht_nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
- rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
- rx_status.flag |= RX_FLAG_VHT;
- rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->flag |= RX_FLAG_VHT;
+ rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status.vht_flag |= RX_VHT_FLAG_BF;
+ rx_status->vht_flag |= RX_VHT_FLAG_BF;
} else {
- rx_status.rate_idx =
+ rx_status->rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- rx_status.band);
+ rx_status->band);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags,
- rx_status.flag & RX_FLAG_AMPDU_DETAILS);
+ rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif
- iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
- rxb, &rx_status);
+ iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+ crypt_len, rxb);
return 0;
}
@@ -500,29 +517,8 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
.mvm = mvm,
};
- /*
- * set temperature debug enabled - ignore FW temperature updates
- * and use the user set temperature.
- */
- if (mvm->temperature_test) {
- if (mvm->temperature < le32_to_cpu(common->temperature))
- IWL_DEBUG_TEMP(mvm,
- "Ignoring FW temperature update that is greater than the debug set temperature (debug temp = %d, fw temp = %d)\n",
- mvm->temperature,
- le32_to_cpu(common->temperature));
- /*
- * skip iwl_mvm_tt_handler since we are in
- * temperature debug mode and we are ignoring
- * the new temperature value
- */
- goto update;
- }
+ iwl_mvm_tt_temp_changed(mvm, le32_to_cpu(common->temperature));
- if (mvm->temperature != le32_to_cpu(common->temperature)) {
- mvm->temperature = le32_to_cpu(common->temperature);
- iwl_mvm_tt_handler(mvm);
- }
-update:
iwl_mvm_update_rx_statistics(mvm, stats);
ieee80211_iterate_active_interfaces(mvm->hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 7554f7053830..e5294d01181e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -83,15 +83,29 @@ struct iwl_mvm_scan_params {
} dwell[IEEE80211_NUM_BANDS];
};
+enum iwl_umac_scan_uid_type {
+ IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
+ IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
+ IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN |
+ IWL_UMAC_SCAN_UID_SCHED_SCAN,
+};
+
+static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type, bool notify);
+
+static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
+{
+ if (mvm->scan_rx_ant != ANT_NONE)
+ return mvm->scan_rx_ant;
+ return mvm->fw->valid_rx_ant;
+}
+
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
u16 rx_chain;
u8 rx_ant;
- if (mvm->scan_rx_ant != ANT_NONE)
- rx_ant = mvm->scan_rx_ant;
- else
- rx_ant = mvm->fw->valid_rx_ant;
+ rx_ant = iwl_mvm_scan_rx_ant(mvm);
rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -270,7 +284,8 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool *global_bound = data;
- if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS)
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id < MAX_PHYS)
*global_bound = true;
}
@@ -365,6 +380,10 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
!is_sched_scan)
max_probe_len -= 32;
+ /* DS parameter set element is added on 2.4GHZ band if required */
+ if (iwl_mvm_rrm_scan_needed(mvm))
+ max_probe_len -= 3;
+
return max_probe_len;
}
@@ -536,23 +555,17 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u8 client_bitmap = 0;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
+ if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
+ !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
struct iwl_sched_scan_results *notif = (void *)pkt->data;
- client_bitmap = notif->client_bitmap;
+ if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
+ return 0;
}
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
- client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
- if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
- IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
- ieee80211_sched_scan_results(mvm->hw);
- } else {
- IWL_DEBUG_SCAN(mvm, "Scan results\n");
- }
- }
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mvm->hw);
return 0;
}
@@ -662,6 +675,7 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw,
status == IWL_SCAN_OFFLOAD_ABORTED);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
mvm->last_ebs_successful = !ebs_status;
@@ -963,6 +977,20 @@ free_blacklist:
return ret;
}
+static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending scheduled scan with filtering, n_match_sets %d\n",
+ req->n_match_sets);
+ return false;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
+ return true;
+}
+
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
{
@@ -978,15 +1006,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
.schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
};
- if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
- IWL_DEBUG_SCAN(mvm,
- "Sending scheduled scan with filtering, filter len %d\n",
- req->n_match_sets);
- } else {
- IWL_DEBUG_SCAN(mvm,
- "Sending Scheduled scan without filtering\n");
+ if (iwl_mvm_scan_pass_all(mvm, req))
scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
- }
if (mvm->last_ebs_successful &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
@@ -997,6 +1018,38 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
sizeof(scan_req), &scan_req);
}
+int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ int ret;
+
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
+ } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
+ mvm->scan_status = IWL_MVM_SCAN_SCHED;
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
+ } else {
+ mvm->scan_status = IWL_MVM_SCAN_SCHED;
+ ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_sched_scan_start(mvm, req);
+ }
+
+ return ret;
+}
+
static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
{
int ret;
@@ -1041,6 +1094,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
lockdep_assert_held(&mvm->mutex);
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
+ notify);
+
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1071,8 +1128,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
/*
* Clear the scan status so the next scan requests will succeed. This
* also ensures the Rx handler doesn't do anything, as the scan was
- * stopped from above.
+ * stopped from above. Since the rx handler won't do anything now,
+ * we have to release the scan reference here.
*/
+ if (mvm->scan_status == IWL_MVM_SCAN_OS)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) {
@@ -1124,20 +1185,64 @@ iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
+static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
+ size_t len, u8 *const pos)
+{
+ static const u8 before_ds_params[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ };
+ size_t offs;
+ u8 *newpos = pos;
+
+ if (!iwl_mvm_rrm_scan_needed(mvm)) {
+ memcpy(newpos, ies, len);
+ return newpos + len;
+ }
+
+ offs = ieee80211_ie_split(ies, len,
+ before_ds_params,
+ ARRAY_SIZE(before_ds_params),
+ 0);
+
+ memcpy(newpos, ies, offs);
+ newpos += offs;
+
+ /* Add a placeholder for DS Parameter Set element */
+ *newpos++ = WLAN_EID_DS_PARAMS;
+ *newpos++ = 1;
+ *newpos++ = 0;
+
+ memcpy(newpos, ies + offs, len - offs);
+ newpos += len - offs;
+
+ return newpos;
+}
+
static void
iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies,
- struct iwl_scan_req_unified_lmac *cmd)
+ struct iwl_scan_probe_req *preq,
+ const u8 *mac_addr, const u8 *mac_addr_mask)
{
- struct iwl_scan_probe_req *preq = (void *)(cmd->data +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels);
struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
- u8 *pos;
+ u8 *pos, *newpos;
+
+ /*
+ * Unfortunately, right now the offload scan doesn't support randomising
+ * within the firmware, so until the firmware API is ready we implement
+ * it in the driver. This means that the scan iterations won't really be
+ * random, only when it's restarted, but at least that helps a bit.
+ */
+ if (mac_addr)
+ get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
+ else
+ memcpy(frame->sa, vif->addr, ETH_ALEN);
frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
eth_broadcast_addr(frame->da);
- memcpy(frame->sa, vif->addr, ETH_ALEN);
eth_broadcast_addr(frame->bssid);
frame->seq_ctrl = 0;
@@ -1148,11 +1253,14 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
preq->mac_header.offset = 0;
preq->mac_header.len = cpu_to_le16(24 + 2);
- memcpy(pos, ies->ies[IEEE80211_BAND_2GHZ],
- ies->len[IEEE80211_BAND_2GHZ]);
+ /* Insert ds parameter set element on 2.4 GHz band */
+ newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
+ ies->ies[IEEE80211_BAND_2GHZ],
+ ies->len[IEEE80211_BAND_2GHZ],
+ pos);
preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
- preq->band_data[0].len = cpu_to_le16(ies->len[IEEE80211_BAND_2GHZ]);
- pos += ies->len[IEEE80211_BAND_2GHZ];
+ preq->band_data[0].len = cpu_to_le16(newpos - pos);
+ pos = newpos;
memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ]);
@@ -1213,9 +1321,10 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
+ struct iwl_scan_probe_req *preq;
struct iwl_mvm_scan_params params = {};
u32 flags;
- int ssid_bitmap = 0;
+ u32 ssid_bitmap = 0;
int ret, i;
lockdep_assert_held(&mvm->mutex);
@@ -1274,7 +1383,13 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
req->req.n_channels, ssid_bitmap,
cmd);
- iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, cmd);
+ preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels);
+
+ iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
+ req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ req->req.mac_addr : NULL,
+ req->req.mac_addr_mask);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
@@ -1307,6 +1422,7 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
+ struct iwl_scan_probe_req *preq;
struct iwl_mvm_scan_params params = {};
int ret;
u32 flags = 0, ssid_bitmap = 0;
@@ -1330,15 +1446,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->n_channels = (u8)req->n_channels;
- if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
- IWL_DEBUG_SCAN(mvm,
- "Sending scheduled scan with filtering, n_match_sets %d\n",
- req->n_match_sets);
- } else {
- IWL_DEBUG_SCAN(mvm,
- "Sending Scheduled scan without filtering\n");
+ if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
- }
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
@@ -1368,7 +1477,13 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd);
- iwl_mvm_build_unified_scan_probe(mvm, vif, ies, cmd);
+ preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels);
+
+ iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
+ req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ req->mac_addr : NULL,
+ req->mac_addr_mask);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
@@ -1390,6 +1505,10 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
{
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
+ true);
+
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return 0;
@@ -1404,3 +1523,576 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
return iwl_mvm_scan_offload_stop(mvm, true);
return iwl_mvm_cancel_regular_scan(mvm);
}
+
+/* UMAC scan API */
+
+struct iwl_umac_scan_done {
+ struct iwl_mvm *mvm;
+ enum iwl_umac_scan_uid_type type;
+};
+
+static int rate_to_scan_rate_flag(unsigned int rate)
+{
+ static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
+ [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
+ [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
+ [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
+ [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
+ [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
+ [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
+ [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
+ [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
+ [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
+ [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
+ [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
+ [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
+ };
+
+ return rate_to_scan_rate[rate];
+}
+
+static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
+{
+ struct ieee80211_supported_band *band;
+ unsigned int rates = 0;
+ int i;
+
+ band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ for (i = 0; i < band->n_bitrates; i++)
+ rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+ band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ for (i = 0; i < band->n_bitrates; i++)
+ rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+
+ /* Set both basic rates and supported rates */
+ rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
+
+ return cpu_to_le32(rates);
+}
+
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+
+ struct iwl_scan_config *scan_config;
+ struct ieee80211_supported_band *band;
+ int num_channels =
+ mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ int ret, i, j = 0, cmd_size, data_size;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_CFG_CMD,
+ };
+
+ if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
+ return -ENOBUFS;
+
+ cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
+
+ scan_config = kzalloc(cmd_size, GFP_KERNEL);
+ if (!scan_config)
+ return -ENOMEM;
+
+ data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
+ scan_config->hdr.size = cpu_to_le16(data_size);
+ scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
+ SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+ SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+ SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+ SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+ SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
+ SCAN_CONFIG_N_CHANNELS(num_channels));
+ scan_config->tx_chains = cpu_to_le32(mvm->fw->valid_tx_ant);
+ scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+ scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+ scan_config->out_of_channel_time = cpu_to_le32(170);
+ scan_config->suspend_time = cpu_to_le32(30);
+ scan_config->dwell_active = 20;
+ scan_config->dwell_passive = 110;
+ scan_config->dwell_fragmented = 20;
+
+ memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+ scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
+ scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
+ IWL_CHANNEL_FLAG_ACCURATE_EBS |
+ IWL_CHANNEL_FLAG_EBS_ADD |
+ IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
+
+ band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ for (i = 0; i < band->n_channels; i++, j++)
+ scan_config->channel_array[j] = band->channels[i].center_freq;
+ band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ for (i = 0; i < band->n_channels; i++, j++)
+ scan_config->channel_array[j] = band->channels[i].center_freq;
+
+ cmd.data[0] = scan_config;
+ cmd.len[0] = cmd_size;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+ IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ kfree(scan_config);
+ return ret;
+}
+
+static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
+{
+ int i;
+
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
+ if (mvm->scan_uid[i] == uid)
+ return i;
+
+ return i;
+}
+
+static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_find_scan_uid(mvm, 0);
+}
+
+static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type)
+{
+ int i;
+
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
+ if (mvm->scan_uid[i] & type)
+ return true;
+
+ return false;
+}
+
+static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type)
+{
+ u32 uid;
+
+ /* make sure exactly one bit is on in scan type */
+ WARN_ON(hweight8(type) != 1);
+
+ /*
+ * Make sure scan uids are unique. If one scan lasts long time while
+ * others are completing frequently, the seq number will wrap up and
+ * we may have more than one scan with the same uid.
+ */
+ do {
+ uid = type | (mvm->scan_seq_num <<
+ IWL_UMAC_SCAN_UID_SEQ_OFFSET);
+ mvm->scan_seq_num++;
+ } while (iwl_mvm_find_scan_uid(mvm, uid) <
+ IWL_MVM_MAX_SIMULTANEOUS_SCANS);
+
+ IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
+
+ return uid;
+}
+
+static void
+iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
+ struct iwl_scan_req_umac *cmd,
+ struct iwl_mvm_scan_params *params)
+{
+ memset(cmd, 0, ksize(cmd));
+ cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
+ sizeof(struct iwl_mvm_umac_cmd_hdr));
+ cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
+ cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
+ if (params->passive_fragmented)
+ cmd->fragmented_dwell =
+ params->dwell[IEEE80211_BAND_2GHZ].passive;
+ cmd->max_out_time = cpu_to_le32(params->max_out_time);
+ cmd->suspend_time = cpu_to_le32(params->suspend_time);
+ cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+}
+
+static void
+iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ int n_channels, u32 ssid_bitmap,
+ struct iwl_scan_req_umac *cmd)
+{
+ struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
+ channel_cfg[i].channel_num = channels[i]->hw_value;
+ channel_cfg[i].iter_count = 1;
+ channel_cfg[i].iter_interval = 0;
+ }
+}
+
+static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
+ struct cfg80211_ssid *ssids,
+ int fragmented)
+{
+ int flags = 0;
+
+ if (n_ssids == 0)
+ flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
+
+ if (n_ssids == 1 && ssids[0].ssid_len != 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
+
+ if (fragmented)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
+
+ if (iwl_mvm_rrm_scan_needed(mvm))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
+
+ return flags;
+}
+
+int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = SCAN_REQ_UMAC,
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels;
+ struct iwl_mvm_scan_params params = {};
+ u32 uid, flags;
+ u32 ssid_bitmap = 0;
+ int ret, i, uid_idx;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ uid_idx = iwl_mvm_find_free_scan_uid(mvm);
+ if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ return -EBUSY;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(mvm->scan_cmd == NULL))
+ return -ENOMEM;
+
+ if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
+ req->ies.common_ie_len +
+ req->ies.len[NL80211_BAND_2GHZ] +
+ req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
+ SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
+ mvm->fw->ucode_capa.n_scan_channels))
+ return -ENOBUFS;
+
+ iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
+ &params);
+
+ iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
+
+ uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
+ mvm->scan_uid[uid_idx] = uid;
+ cmd->uid = cpu_to_le32(uid);
+
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+
+ flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
+ req->req.ssids,
+ params.passive_fragmented);
+
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+
+ cmd->general_flags = cpu_to_le32(flags);
+ cmd->n_channels = req->req.n_channels;
+
+ for (i = 0; i < req->req.n_ssids; i++)
+ ssid_bitmap |= BIT(i);
+
+ iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
+ req->req.n_channels, ssid_bitmap, cmd);
+
+ sec_part->schedule[0].iter_count = 1;
+ sec_part->delay = 0;
+
+ iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
+ req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ req->req.mac_addr : NULL,
+ req->req.mac_addr_mask);
+
+ iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
+ req->req.n_ssids, 0);
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mvm,
+ "Scan request was sent successfully\n");
+ } else {
+ /*
+ * If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+ }
+ return ret;
+}
+
+int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+
+ struct iwl_host_cmd hcmd = {
+ .id = SCAN_REQ_UMAC,
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels;
+ struct iwl_mvm_scan_params params = {};
+ u32 uid, flags;
+ u32 ssid_bitmap = 0;
+ int ret, uid_idx;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ uid_idx = iwl_mvm_find_free_scan_uid(mvm);
+ if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ return -EBUSY;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(mvm->scan_cmd == NULL))
+ return -ENOMEM;
+
+ if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
+ ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
+ ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
+ SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
+ mvm->fw->ucode_capa.n_scan_channels))
+ return -ENOBUFS;
+
+ iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
+ &params);
+
+ iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
+
+ cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
+ uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
+ mvm->scan_uid[uid_idx] = uid;
+ cmd->uid = cpu_to_le32(uid);
+
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+
+ flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
+ params.passive_fragmented);
+
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+ if (iwl_mvm_scan_pass_all(mvm, req))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+ cmd->general_flags = cpu_to_le32(flags);
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+ mvm->last_ebs_successful)
+ cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ cmd->n_channels = req->n_channels;
+
+ iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
+ false);
+
+ /* This API uses bits 0-19 instead of 1-20. */
+ ssid_bitmap = ssid_bitmap >> 1;
+
+ iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
+ ssid_bitmap, cmd);
+
+ sec_part->schedule[0].interval =
+ cpu_to_le16(req->interval / MSEC_PER_SEC);
+ sec_part->schedule[0].iter_count = 0xff;
+
+ sec_part->delay = 0;
+
+ iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
+ req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ req->mac_addr : NULL,
+ req->mac_addr_mask);
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sched scan request was sent successfully\n");
+ } else {
+ /*
+ * If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
+ }
+ return ret;
+}
+
+int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+ u32 uid = __le32_to_cpu(notif->uid);
+ bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
+ int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
+
+ /*
+ * Scan uid may be set to zero in case of scan abort request from above.
+ */
+ if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ return 0;
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed, uid %u type %s, status %s, EBS status %s\n",
+ uid, sched ? "sched" : "regular",
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
+ "success" : "failed");
+
+ mvm->last_ebs_successful = !notif->ebs_status;
+ mvm->scan_uid[uid_idx] = 0;
+
+ if (!sched) {
+ ieee80211_scan_completed(mvm->hw,
+ notif->status ==
+ IWL_SCAN_OFFLOAD_ABORTED);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ } else {
+ IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
+ }
+
+ return 0;
+}
+
+static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_umac_scan_done *scan_done = data;
+ struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+ u32 uid = __le32_to_cpu(notif->uid);
+ int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
+
+ if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
+ return false;
+
+ if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ return false;
+
+ /*
+ * Clear scan uid of scans that was aborted from above and completed
+ * in FW so the RX handler does nothing.
+ */
+ scan_done->mvm->scan_uid[uid_idx] = 0;
+
+ return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
+}
+
+static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
+{
+ struct iwl_umac_scan_abort cmd = {
+ .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
+ sizeof(struct iwl_mvm_umac_cmd_hdr)),
+ .uid = cpu_to_le32(uid),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
+
+ return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+}
+
+static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type, bool notify)
+{
+ struct iwl_notification_wait wait_scan_done;
+ static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
+ struct iwl_umac_scan_done scan_done = {
+ .mvm = mvm,
+ .type = type,
+ };
+ int i, ret = -EIO;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+ scan_done_notif,
+ ARRAY_SIZE(scan_done_notif),
+ iwl_scan_umac_done_check, &scan_done);
+
+ IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
+
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
+ if (mvm->scan_uid[i] & type) {
+ int err;
+
+ if (iwl_mvm_is_radio_killed(mvm) &&
+ (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
+ ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ break;
+ }
+
+ err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
+ if (!err)
+ ret = 0;
+ }
+ }
+
+ if (ret) {
+ IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
+ if (ret)
+ return ret;
+
+ if (notify) {
+ if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
+ ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ }
+ }
+
+ return ret;
+}
+
+int iwl_mvm_scan_size(struct iwl_mvm *mvm)
+{
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ return sizeof(struct iwl_scan_req_umac) +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ sizeof(struct iwl_scan_req_umac_tail);
+
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+ return sizeof(struct iwl_scan_req_unified_lmac) +
+ sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ sizeof(struct iwl_scan_probe_req);
+
+ return sizeof(struct iwl_scan_cmd) +
+ mvm->fw->ucode_capa.max_probe_length +
+ mvm->fw->ucode_capa.n_scan_channels *
+ sizeof(struct iwl_scan_channel);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 1731c205c81d..d86fe432e51f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -204,6 +204,56 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
+static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ unsigned long used_hw_queues;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ u32 ac;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
+
+ /* Find available queues, and allocate them to the ACs */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ u8 queue = find_first_zero_bit(&used_hw_queues,
+ mvm->first_agg_queue);
+
+ if (queue >= mvm->first_agg_queue) {
+ IWL_ERR(mvm, "Failed to allocate STA queue\n");
+ return -EBUSY;
+ }
+
+ __set_bit(queue, &used_hw_queues);
+ mvmsta->hw_queue[ac] = queue;
+ }
+
+ /* Found a place for all queues - enable them */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
+ iwl_mvm_ac_to_tx_fifo[ac]);
+ mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned long sta_msk;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* disable the TDLS STA-specific queues */
+ sta_msk = mvmsta->tfd_queue_msk;
+ for_each_set_bit(i, &sta_msk, sizeof(sta_msk))
+ iwl_mvm_disable_txq(mvm, i);
+}
+
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -237,9 +287,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0;
mvm_sta->tfd_queue_msk = 0;
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
- mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
+
+ /* allocate new queues for a TDLS station */
+ if (sta->tdls) {
+ ret = iwl_mvm_tdls_sta_init(mvm, sta);
+ if (ret)
+ return ret;
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
+ mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
+ }
/* for HW restart - reset everything but the sequence number */
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
@@ -251,7 +309,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret)
- return ret;
+ goto err;
if (vif->type == NL80211_IFTYPE_STATION) {
if (!sta->tdls) {
@@ -265,6 +323,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
return 0;
+
+err:
+ iwl_mvm_tdls_sta_deinit(mvm, sta);
+ return ret;
}
int iwl_mvm_update_sta(struct iwl_mvm *mvm,
@@ -398,6 +460,17 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
}
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
clear_bit(sta_id, mvm->sta_drained);
+
+ if (mvm->tfd_drained[sta_id]) {
+ unsigned long i, msk = mvm->tfd_drained[sta_id];
+
+ for_each_set_bit(i, &msk, sizeof(msk))
+ iwl_mvm_disable_txq(mvm, i);
+
+ mvm->tfd_drained[sta_id] = 0;
+ IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
+ sta_id, msk);
+ }
}
mutex_unlock(&mvm->mutex);
@@ -431,6 +504,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
}
/*
+ * This shouldn't happen - the TDLS channel switch should be canceled
+ * before the STA is removed.
+ */
+ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ cancel_delayed_work(&mvm->tdls_cs.dwork);
+ }
+
+ /*
* Make sure that the tx response code sees the station as -EBUSY and
* calls the drain worker.
*/
@@ -443,9 +525,22 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
ERR_PTR(-EBUSY));
spin_unlock_bh(&mvm_sta->lock);
+
+ /* disable TDLS sta queues on drain complete */
+ if (sta->tdls) {
+ mvm->tfd_drained[mvm_sta->sta_id] =
+ mvm_sta->tfd_queue_msk;
+ IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
+ mvm_sta->sta_id);
+ }
+
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
} else {
spin_unlock_bh(&mvm_sta->lock);
+
+ if (sta->tdls)
+ iwl_mvm_tdls_sta_deinit(mvm, sta);
+
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
}
@@ -609,7 +704,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
+ qmask = iwl_mvm_mac_get_queues_mask(vif);
/*
* The firmware defines the TFD queue mask to only be relevant
@@ -1071,15 +1166,16 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
- u32 cmd_flags)
+ struct ieee80211_key_conf *keyconf, bool mcast,
+ u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
{
struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags;
- int ret, status;
+ int ret;
+ u32 status;
u16 keyidx;
int i;
+ u8 sta_id = mvm_sta->sta_id;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
@@ -1098,12 +1194,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
+ memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
+ break;
default:
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
}
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
cmd.key_offset = keyconf->hw_key_idx;
@@ -1195,17 +1297,88 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
return NULL;
}
+static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ bool mcast)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ int ret;
+ const u8 *addr;
+ struct ieee80211_key_seq seq;
+ u16 p1k[5];
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+ /* get phase 1 key from mac80211 */
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ seq.tkip.iv32, p1k, 0);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ 0, NULL, 0);
+ break;
+ default:
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ 0, NULL, 0);
+ }
+
+ return ret;
+}
+
+static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
+ struct ieee80211_key_conf *keyconf,
+ bool mcast)
+{
+ struct iwl_mvm_add_sta_key_cmd cmd = {};
+ __le16 key_flags;
+ int ret;
+ u32 status;
+
+ key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+ key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+ if (mcast)
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ cmd.key_flags = key_flags;
+ cmd.key_offset = keyconf->hw_key_idx;
+ cmd.sta_id = sta_id;
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+ &cmd, &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf,
bool have_key_offset)
{
- struct iwl_mvm_sta *mvm_sta;
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ u8 sta_id;
int ret;
- u8 *addr, sta_id;
- struct ieee80211_key_seq seq;
- u16 p1k[5];
lockdep_assert_held(&mvm->mutex);
@@ -1234,8 +1407,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
}
}
- mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
- if (WARN_ON_ONCE(mvm_sta->vif != vif))
+ if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
return -EINVAL;
if (!have_key_offset) {
@@ -1249,26 +1421,26 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
return -ENOSPC;
}
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
- /* get phase 1 key from mac80211 */
- ieee80211_get_key_rx_seq(keyconf, 0, &seq);
- ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
- seq.tkip.iv32, p1k, 0);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
- 0, NULL, 0);
- break;
- default:
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
- sta_id, 0, NULL, 0);
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
+ if (ret) {
+ __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+ goto end;
}
- if (ret)
- __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+ /*
+ * For WEP, the same key is used for multicast and unicast. Upload it
+ * again, using the same key offset, and now pointing the other one
+ * to the same key slot (offset).
+ * If this fails, remove the original as well.
+ */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
+ if (ret) {
+ __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+ __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+ }
+ }
end:
IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
@@ -1282,11 +1454,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf)
{
- struct iwl_mvm_sta *mvm_sta;
- struct iwl_mvm_add_sta_key_cmd cmd = {};
- __le16 key_flags;
- int ret, status;
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
+ int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1299,8 +1469,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
- ret = __test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
- if (!ret) {
+ if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
IWL_ERR(mvm, "offset %d not used in fw key table.\n",
keyconf->hw_key_idx);
return -ENOENT;
@@ -1326,35 +1495,17 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
}
}
- mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
- if (WARN_ON_ONCE(mvm_sta->vif != vif))
+ if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
return -EINVAL;
- key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
- STA_KEY_FLG_KEYID_MSK);
- key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
- key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
-
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
-
- cmd.key_flags = key_flags;
- cmd.key_offset = keyconf->hw_key_idx;
- cmd.sta_id = sta_id;
-
- status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
- &cmd, &status);
+ ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+ if (ret)
+ return ret;
- switch (status) {
- case ADD_STA_SUCCESS:
- IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
- break;
- }
+ /* delete WEP key twice to get rid of (now useless) offset */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
+ ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
return ret;
}
@@ -1367,6 +1518,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvm_sta;
u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
return;
@@ -1381,8 +1533,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
}
}
- mvm_sta = (void *)sta->drv_priv;
- iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
iv32, phase1key, CMD_ASYNC);
rcu_read_unlock();
}
@@ -1580,3 +1732,18 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
}
}
+
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta;
+
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+
+ if (!WARN_ON(!mvmsta))
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
+
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index d9c0d7b0e9d4..d8f48975ad08 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -264,6 +264,7 @@ enum iwl_mvm_agg_state {
* the first packet to be sent in legacy HW queue in Tx AGG stop flow.
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
+ * @tx_time: medium time consumed by this A-MPDU
*/
struct iwl_mvm_tid_data {
u16 seq_number;
@@ -274,6 +275,7 @@ struct iwl_mvm_tid_data {
enum iwl_mvm_agg_state state;
u16 txq_id;
u16 ssn;
+ u16 tx_time;
};
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
@@ -286,6 +288,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
* @tfd_queue_msk: the tfd queues used by the station
+ * @hw_queue: per-AC mapping of the TFD queues used by station
* @mac_id_n_color: the MAC context this station is linked to
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
@@ -309,6 +312,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
struct iwl_mvm_sta {
u32 sta_id;
u32 tfd_queue_msk;
+ u8 hw_queue[IEEE80211_NUM_ACS];
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
@@ -418,5 +422,6 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
bool disable);
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c
index 66c82df2d0a1..c0e00bae5bd0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c
@@ -61,9 +61,13 @@
*
*****************************************************************************/
+#include <linux/etherdevice.h>
#include "mvm.h"
#include "time-event.h"
+#define TU_TO_US(x) (x * 1024)
+#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
+
void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
{
struct ieee80211_sta *sta;
@@ -113,17 +117,85 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return count;
}
+static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_rx_packet *pkt;
+ struct iwl_tdls_config_res *resp;
+ struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
+ struct iwl_host_cmd cmd = {
+ .id = TDLS_CONFIG_CMD,
+ .flags = CMD_WANT_SKB,
+ .data = { &tdls_cfg_cmd, },
+ .len = { sizeof(struct iwl_tdls_config_cmd), },
+ };
+ struct ieee80211_sta *sta;
+ int ret, i, cnt;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ tdls_cfg_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
+ tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
+
+ /* for now the Tx cmd is empty and unused */
+
+ /* populate TDLS peer data */
+ cnt = 0;
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
+ continue;
+
+ tdls_cfg_cmd.sta_info[cnt].sta_id = i;
+ tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
+ IWL_MVM_TDLS_FW_TID;
+ tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
+ tdls_cfg_cmd.sta_info[cnt].is_initiator =
+ cpu_to_le32(sta->tdls_initiator ? 1 : 0);
+
+ cnt++;
+ }
+
+ tdls_cfg_cmd.tdls_peer_count = cnt;
+ IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (WARN_ON_ONCE(ret))
+ return;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
+ pkt->hdr.flags);
+ goto exit;
+ }
+
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
+ goto exit;
+
+ /* we don't really care about the response at this point */
+
+exit:
+ iwl_free_resp(&cmd);
+}
+
void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool sta_added)
{
int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
- /*
- * Disable ps when the first TDLS sta is added and re-enable it
- * when the last TDLS sta is removed
- */
- if ((tdls_sta_cnt == 1 && sta_added) ||
- (tdls_sta_cnt == 0 && !sta_added))
+ /* when the first peer joins, send a power update first */
+ if (tdls_sta_cnt == 1 && sta_added)
+ iwl_mvm_power_update_mac(mvm);
+
+ /* configure the FW with TDLS peer info */
+ iwl_mvm_tdls_config(mvm, vif);
+
+ /* when the last peer leaves, send a power update last */
+ if (tdls_sta_cnt == 0 && !sta_added)
iwl_mvm_power_update_mac(mvm);
}
@@ -147,3 +219,488 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
}
+
+static const char *
+iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
+{
+ switch (state) {
+ case IWL_MVM_TDLS_SW_IDLE:
+ return "IDLE";
+ case IWL_MVM_TDLS_SW_REQ_SENT:
+ return "REQ SENT";
+ case IWL_MVM_TDLS_SW_REQ_RCVD:
+ return "REQ RECEIVED";
+ case IWL_MVM_TDLS_SW_ACTIVE:
+ return "ACTIVE";
+ }
+
+ return NULL;
+}
+
+static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
+ enum iwl_mvm_tdls_cs_state state)
+{
+ if (mvm->tdls_cs.state == state)
+ return;
+
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
+ iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
+ iwl_mvm_tdls_cs_state_str(state));
+ mvm->tdls_cs.state = state;
+
+ if (state == IWL_MVM_TDLS_SW_IDLE)
+ mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
+}
+
+int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ unsigned int delay;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* can fail sometimes */
+ if (!le32_to_cpu(notif->status)) {
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+ goto out;
+ }
+
+ if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
+ goto out;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* the station may not be here, but if it is, it must be a TDLS peer */
+ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+
+ /*
+ * Update state and possibly switch again after this is over (DTIM).
+ * Also convert TU to msec.
+ */
+ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
+
+out:
+ return 0;
+}
+
+static int
+iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
+ enum iwl_tdls_channel_switch_type type,
+ const u8 *peer, bool peer_initiator)
+{
+ bool same_peer = false;
+ int ret = 0;
+
+ /* get the existing peer if it's there */
+ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!IS_ERR_OR_NULL(sta))
+ same_peer = ether_addr_equal(peer, sta->addr);
+ }
+
+ switch (mvm->tdls_cs.state) {
+ case IWL_MVM_TDLS_SW_IDLE:
+ /*
+ * might be spurious packet from the peer after the switch is
+ * already done
+ */
+ if (type == TDLS_MOVE_CH)
+ ret = -EINVAL;
+ break;
+ case IWL_MVM_TDLS_SW_REQ_SENT:
+ /*
+ * We received a ch-switch request while an outgoing one is
+ * pending. Allow it to proceed if the other peer is the same
+ * one we sent to, and we are not the link initiator.
+ */
+ if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH) {
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (!peer_initiator) /* we are the initiator */
+ ret = -EBUSY;
+ }
+ break;
+ case IWL_MVM_TDLS_SW_REQ_RCVD:
+ /* as above, allow the link initiator to proceed */
+ if (type == TDLS_SEND_CHAN_SW_REQ) {
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (peer_initiator) /* they are the initiator */
+ ret = -EBUSY;
+ } else if (type == TDLS_MOVE_CH) {
+ ret = -EINVAL;
+ }
+ break;
+ case IWL_MVM_TDLS_SW_ACTIVE:
+ /* we don't allow initiations during active channel switch */
+ if (type == TDLS_SEND_CHAN_SW_REQ)
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ IWL_DEBUG_TDLS(mvm,
+ "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
+ type, mvm->tdls_cs.state, peer, same_peer,
+ peer_initiator);
+
+ return ret;
+}
+
+static int
+iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_tdls_channel_switch_type type,
+ const u8 *peer, bool peer_initiator,
+ u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ u32 timestamp, u16 switch_time,
+ u16 switch_timeout, struct sk_buff *skb,
+ u32 ch_sw_tm_ie)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_hdr *hdr;
+ struct iwl_tdls_channel_switch_cmd cmd = {0};
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator);
+ if (ret)
+ return ret;
+
+ if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd.switch_type = type;
+ cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
+ cmd.timing.switch_time = cpu_to_le32(switch_time);
+ cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, peer);
+ if (!sta) {
+ rcu_read_unlock();
+ ret = -ENOENT;
+ goto out;
+ }
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
+
+ if (!chandef) {
+ if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+ mvm->tdls_cs.peer.chandef.chan) {
+ /* actually moving to the channel */
+ chandef = &mvm->tdls_cs.peer.chandef;
+ } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
+ type == TDLS_MOVE_CH) {
+ /* we need to return to base channel */
+ struct ieee80211_chanctx_conf *chanctx =
+ rcu_dereference(vif->chanctx_conf);
+
+ if (WARN_ON_ONCE(!chanctx)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ chandef = &chanctx->def;
+ }
+ }
+
+ if (chandef) {
+ cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+ PHY_BAND_24 : PHY_BAND_5);
+ cmd.ci.channel = chandef->chan->hw_value;
+ cmd.ci.width = iwl_mvm_get_channel_width(chandef);
+ cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+ }
+
+ /* keep quota calculation simple for now - 50% of DTIM for TDLS */
+ cmd.timing.max_offchan_duration =
+ cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int) / 2);
+
+ /* Switch time is the first element in the switch-timing IE. */
+ cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
+
+ info = IEEE80211_SKB_CB(skb);
+ if (info->control.hw_key)
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+
+ iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
+ mvmsta->sta_id);
+
+ hdr = (void *)skb->data;
+ iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
+ hdr->frame_control);
+ rcu_read_unlock();
+
+ memcpy(cmd.frame.data, skb->data, skb->len);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
+ sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
+ ret);
+ goto out;
+ }
+
+ /* channel switch has started, update state */
+ if (type != TDLS_MOVE_CH) {
+ mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
+ iwl_mvm_tdls_update_cs_state(mvm,
+ type == TDLS_SEND_CHAN_SW_REQ ?
+ IWL_MVM_TDLS_SW_REQ_SENT :
+ IWL_MVM_TDLS_SW_REQ_RCVD);
+ }
+
+out:
+
+ /* channel switch failed - we are idle */
+ if (ret)
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+ return ret;
+}
+
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
+{
+ struct iwl_mvm *mvm;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ unsigned int delay;
+ int ret;
+
+ mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
+ mutex_lock(&mvm->mutex);
+
+ /* called after an active channel switch has finished or timed-out */
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+ /* station might be gone, in that case do nothing */
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
+ goto out;
+
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* the station may not be here, but if it is, it must be a TDLS peer */
+ if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+ ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+ TDLS_SEND_CHAN_SW_REQ,
+ sta->addr,
+ mvm->tdls_cs.peer.initiator,
+ mvm->tdls_cs.peer.op_class,
+ &mvm->tdls_cs.peer.chandef,
+ 0, 0, 0,
+ mvm->tdls_cs.peer.skb,
+ mvm->tdls_cs.peer.ch_sw_tm_ie);
+ if (ret)
+ IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
+
+ /* retry after a DTIM if we failed sending now */
+ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+ queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+out:
+ mutex_unlock(&mvm->mutex);
+}
+
+int
+iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta;
+ unsigned int delay;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
+ sta->addr, chandef->chan->center_freq, chandef->width);
+
+ /* we only support a single peer for channel switching */
+ if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
+ IWL_DEBUG_TDLS(mvm,
+ "Existing peer. Can't start switch with %pM\n",
+ sta->addr);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+ TDLS_SEND_CHAN_SW_REQ,
+ sta->addr, sta->tdls_initiator,
+ oper_class, chandef, 0, 0, 0,
+ tmpl_skb, ch_sw_tm_ie);
+ if (ret)
+ goto out;
+
+ /*
+ * Mark the peer as "in tdls switch" for this vif. We only allow a
+ * single such peer per vif.
+ */
+ mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
+ if (!mvm->tdls_cs.peer.skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
+ mvm->tdls_cs.peer.chandef = *chandef;
+ mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
+ mvm->tdls_cs.peer.op_class = oper_class;
+ mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
+
+ /*
+ * Wait for 2 DTIM periods before attempting the next switch. The next
+ * switch will be made sooner if the current one completes before that.
+ */
+ delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int);
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *cur_sta;
+ bool wait_for_phy = false;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
+
+ /* we only support a single peer for channel switching */
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
+ IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
+ goto out;
+ }
+
+ cur_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* make sure it's the same peer */
+ if (cur_sta != sta)
+ goto out;
+
+ /*
+ * If we're currently in a switch because of the now canceled peer,
+ * wait a DTIM here to make sure the phy is back on the base channel.
+ * We can't otherwise force it.
+ */
+ if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
+ mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
+ wait_for_phy = true;
+
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ dev_kfree_skb(mvm->tdls_cs.peer.skb);
+ mvm->tdls_cs.peer.skb = NULL;
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ /* make sure the phy is on the base channel */
+ if (wait_for_phy)
+ msleep(TU_TO_MS(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int));
+
+ /* flush the channel switch state */
+ flush_delayed_work(&mvm->tdls_cs.dwork);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
+}
+
+void
+iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ enum iwl_tdls_channel_switch_type type;
+ unsigned int delay;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm,
+ "Received TDLS ch switch action %d from %pM status %d\n",
+ params->action_code, params->sta->addr, params->status);
+
+ /*
+ * we got a non-zero status from a peer we were switching to - move to
+ * the idle state and retry again later
+ */
+ if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
+ params->status != 0 &&
+ mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *cur_sta;
+
+ /* make sure it's the same peer */
+ cur_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (cur_sta == params->sta) {
+ iwl_mvm_tdls_update_cs_state(mvm,
+ IWL_MVM_TDLS_SW_IDLE);
+ goto retry;
+ }
+ }
+
+ type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
+ TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
+
+ iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
+ params->sta->tdls_initiator, 0,
+ params->chandef, params->timestamp,
+ params->switch_time,
+ params->switch_timeout,
+ params->tmpl_skb,
+ params->ch_sw_tm_ie);
+
+retry:
+ /* register a timeout in case we don't succeed in switching */
+ delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
+ 1024 / 1000;
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+ mutex_unlock(&mvm->mutex);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 6dfad230be5e..54fafbf9a711 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -191,6 +191,35 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
return true;
}
+static void
+iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_notif *notif)
+{
+ if (!le32_to_cpu(notif->status)) {
+ IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
+ iwl_mvm_te_clear_data(mvm, te_data);
+ return;
+ }
+
+ switch (te_data->vif->type) {
+ case NL80211_IFTYPE_AP:
+ iwl_mvm_csa_noa_start(mvm);
+ break;
+ case NL80211_IFTYPE_STATION:
+ iwl_mvm_csa_client_absent(mvm, te_data->vif);
+ ieee80211_chswitch_done(te_data->vif, true);
+ break;
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ /* we don't need it anymore */
+ iwl_mvm_te_clear_data(mvm, te_data);
+}
+
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -252,14 +281,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
ieee80211_ready_on_channel(mvm->hw);
- } else if (te_data->vif->type == NL80211_IFTYPE_AP) {
- if (le32_to_cpu(notif->status))
- iwl_mvm_csa_noa_start(mvm);
- else
- IWL_DEBUG_TE(mvm, "CSA NOA failed to start\n");
-
- /* we don't need it anymore */
- iwl_mvm_te_clear_data(mvm, te_data);
+ } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
+ iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
}
} else {
IWL_WARN(mvm, "Got TE with unknown action\n");
@@ -549,18 +572,11 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
}
}
-/*
- * Explicit request to remove a time event. The removal of a time event needs to
- * be synchronized with the flow of a time event's end notification, which also
- * removes the time event from the op mode data structures.
- */
-void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif,
- struct iwl_mvm_time_event_data *te_data)
+static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ u32 *uid)
{
- struct iwl_time_event_cmd time_cmd = {};
- u32 id, uid;
- int ret;
+ u32 id;
/*
* It is possible that by the time we got to this point the time
@@ -569,7 +585,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
spin_lock_bh(&mvm->time_event_lock);
/* Save time event uid before clearing its data */
- uid = te_data->uid;
+ *uid = te_data->uid;
id = te_data->id;
/*
@@ -584,10 +600,59 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
* send a removal command.
*/
if (id == TE_MAX) {
- IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid);
- return;
+ IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
+ return false;
}
+ return true;
+}
+
+/*
+ * Explicit request to remove a aux roc time event. The removal of a time
+ * event needs to be synchronized with the flow of a time event's end
+ * notification, which also removes the time event from the op mode
+ * data structures.
+ */
+static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_hs20_roc_req aux_cmd = {};
+ u32 uid;
+ int ret;
+
+ if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+ return;
+
+ aux_cmd.event_unique_id = cpu_to_le32(uid);
+ aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+ aux_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
+ le32_to_cpu(aux_cmd.event_unique_id));
+ ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
+ sizeof(aux_cmd), &aux_cmd);
+
+ if (WARN_ON(ret))
+ return;
+}
+
+/*
+ * Explicit request to remove a time event. The removal of a time event needs to
+ * be synchronized with the flow of a time event's end notification, which also
+ * removes the time event from the op mode data structures.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_time_event_cmd time_cmd = {};
+ u32 uid;
+ int ret;
+
+ if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+ return;
+
/* When we remove a TE, the UID is to be set in the id field */
time_cmd.id = cpu_to_le32(uid);
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
@@ -666,13 +731,17 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
-void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
{
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_time_event_data *te_data;
+ bool is_p2p = false;
lockdep_assert_held(&mvm->mutex);
+ mvmvif = NULL;
+ spin_lock_bh(&mvm->time_event_lock);
+
/*
* Iterate over the list of time events and find the time event that is
* associated with a P2P_DEVICE interface.
@@ -680,22 +749,41 @@ void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
* event at any given time and this time event coresponds to a ROC
* request
*/
- mvmvif = NULL;
- spin_lock_bh(&mvm->time_event_lock);
list_for_each_entry(te_data, &mvm->time_event_list, list) {
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE &&
+ te_data->running) {
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- break;
+ is_p2p = true;
+ goto remove_te;
}
}
+
+ /*
+ * Iterate over the list of aux roc time events and find the time
+ * event that is associated with a BSS interface.
+ * This assumes that a BSS interface can have only a single time
+ * event at any given time and this time event coresponds to a ROC
+ * request
+ */
+ list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+ if (te_data->running) {
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ goto remove_te;
+ }
+ }
+
+remove_te:
spin_unlock_bh(&mvm->time_event_lock);
if (!mvmvif) {
- IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n");
+ IWL_WARN(mvm, "No remain on channel event\n");
return;
}
- iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+ if (is_p2p)
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+ else
+ iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
iwl_mvm_roc_finished(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index b350e47e19da..6f6b35db3ab8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -182,14 +182,14 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration, enum ieee80211_roc_type type);
/**
- * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity
+ * iwl_mvm_stop_roc - stop remain on channel functionality
* @mvm: the mvm component
*
* This function can be used to cancel an ongoing ROC session.
* The function is async, it will instruct the FW to stop serving the ROC
* session, but will not wait for the actual stopping of the session.
*/
-void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm);
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
/**
* iwl_mvm_remove_time_event - general function to clean up of time event
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index acca44a45086..2b1e61fac34a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -64,10 +64,6 @@
*****************************************************************************/
#include "mvm.h"
-#include "iwl-config.h"
-#include "iwl-io.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
@@ -99,32 +95,81 @@ static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
iwl_mvm_set_hw_ctkill_state(mvm, false);
}
-static bool iwl_mvm_temp_notif(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt, void *data)
+void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
+{
+ /* ignore the notification if we are in test mode */
+ if (mvm->temperature_test)
+ return;
+
+ if (mvm->temperature == temp)
+ return;
+
+ mvm->temperature = temp;
+ iwl_mvm_tt_handler(mvm);
+}
+
+static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
{
- struct iwl_mvm *mvm =
- container_of(notif_wait, struct iwl_mvm, notif_wait);
- int *temp = data;
struct iwl_dts_measurement_notif *notif;
int len = iwl_rx_packet_payload_len(pkt);
+ int temp;
if (WARN_ON_ONCE(len != sizeof(*notif))) {
IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
- return true;
+ return -EINVAL;
}
notif = (void *)pkt->data;
- *temp = le32_to_cpu(notif->temp);
+ temp = le32_to_cpu(notif->temp);
/* shouldn't be negative, but since it's s32, make sure it isn't */
- if (WARN_ON_ONCE(*temp < 0))
- *temp = 0;
+ if (WARN_ON_ONCE(temp < 0))
+ temp = 0;
+
+ IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp);
+
+ return temp;
+}
+
+static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ int *temp = data;
+ int ret;
+
+ ret = iwl_mvm_temp_notif_parse(mvm, pkt);
+ if (ret < 0)
+ return true;
+
+ *temp = ret;
- IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", *temp);
return true;
}
+int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int temp;
+
+ /* the notification is handled synchronously in ctkill, so skip here */
+ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+ return 0;
+
+ temp = iwl_mvm_temp_notif_parse(mvm, pkt);
+ if (temp < 0)
+ return 0;
+
+ iwl_mvm_tt_temp_changed(mvm, temp);
+
+ return 0;
+}
+
static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
{
struct iwl_dts_measurement_cmd cmd = {
@@ -145,7 +190,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
temp_notif, ARRAY_SIZE(temp_notif),
- iwl_mvm_temp_notif, &temp);
+ iwl_mvm_temp_notif_wait, &temp);
ret = iwl_mvm_get_temp_cmd(mvm);
if (ret) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index c6a517c771df..4f15d9decc81 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -73,9 +73,9 @@
/*
* Sets most of the Tx cmd's fields
*/
-static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info, u8 sta_id)
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, u8 sta_id)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
@@ -149,11 +149,9 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
/*
* Sets the fields in the Tx cmd that are rate related
*/
-static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- __le16 fc)
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
{
u32 rate_flags;
int rate_idx;
@@ -189,8 +187,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
/* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
- "Got an HT rate for a non data frame 0x%x\n",
- info->control.rates[0].flags);
+ "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
+ info->control.rates[0].flags,
+ info->control.rates[0].idx,
+ le16_to_cpu(fc));
rate_idx = info->control.rates[0].idx;
/* if the rate isn't a well known legacy rate, take the lowest one */
@@ -230,10 +230,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
/*
* Sets the fields in the Tx cmd that are crypto related
*/
-static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag)
+void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -424,6 +424,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+ if (sta->tdls) {
+ /* default to TID 0 for non-QoS packets */
+ u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
+
+ txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
+ }
+
if (is_ampdu) {
if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta;
@@ -658,6 +665,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
+ /*
+ * TODO: this is not accurate if we are freeing more than one
+ * packet.
+ */
+ info->status.tx_time =
+ le16_to_cpu(tx_resp->wireless_media_time);
BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
info->status.status_driver_data[0] =
(void *)(uintptr_t)tx_resp->reduced_tpc;
@@ -850,6 +863,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
+ mvmsta->tid_data[tid].tx_time =
+ le16_to_cpu(tx_resp->wireless_media_time);
}
rcu_read_unlock();
@@ -878,6 +893,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info);
+ /* TODO: not accounted if the whole A-MPDU failed */
+ info->status.tx_time = tid_data->tx_time;
info->status.status_driver_data[0] =
(void *)(uintptr_t)tid_data->reduced_tpc;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 8021f6eec27f..e56e77ef5d2e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -734,3 +734,40 @@ bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
return idle;
}
+
+struct iwl_bss_iter_data {
+ struct ieee80211_vif *vif;
+ bool error;
+};
+
+static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bss_iter_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return;
+
+ if (data->vif) {
+ data->error = true;
+ return;
+ }
+
+ data->vif = vif;
+}
+
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
+{
+ struct iwl_bss_iter_data bss_iter_data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bss_iface_iterator, &bss_iter_data);
+
+ if (bss_iter_data.error) {
+ IWL_ERR(mvm, "More than one managed interface active!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return bss_iter_data.vif;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 6ced8549eb3a..3ee8e3848876 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -499,6 +499,7 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int ret;
@@ -507,6 +508,25 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
+#if IS_ENABLED(CONFIG_IWLMVM)
+ /*
+ * special-case 7265D, it has the same PCI IDs.
+ *
+ * Note that because we already pass the cfg to the transport above,
+ * all the parameters that the transport uses must, until that is
+ * changed, be identical to the ones in the 7265D configuration.
+ */
+ if (cfg == &iwl7265_2ac_cfg)
+ cfg_7265d = &iwl7265d_2ac_cfg;
+ else if (cfg == &iwl7265_2n_cfg)
+ cfg_7265d = &iwl7265d_2n_cfg;
+ else if (cfg == &iwl7265_n_cfg)
+ cfg_7265d = &iwl7265d_n_cfg;
+ if (cfg_7265d &&
+ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+ cfg = cfg_7265d;
+#endif
+
pci_set_drvdata(pdev, iwl_trans);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dd2f3f8baa9d..5d79a1f44b8e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -78,6 +78,11 @@
#include "iwl-agn-hw.h"
#include "iwl-fw-error-dump.h"
#include "internal.h"
+#include "iwl-fh.h"
+
+/* extended range in FW SRAM */
+#define IWL_FW_MEM_EXTENDED_START 0x40000
+#define IWL_FW_MEM_EXTENDED_END 0x57FFF
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
@@ -133,7 +138,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
break;
}
- if (!page)
+ if (WARN_ON_ONCE(!page))
return;
trans_pcie->fw_mon_page = page;
@@ -512,6 +517,9 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
HW_READY_TIMEOUT);
+ if (ret >= 0)
+ iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
+
IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
return ret;
}
@@ -624,14 +632,28 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
}
for (offset = 0; offset < section->len; offset += chunk_sz) {
- u32 copy_size;
+ u32 copy_size, dst_addr;
+ bool extended_addr = false;
copy_size = min_t(u32, chunk_sz, section->len - offset);
+ dst_addr = section->offset + offset;
+
+ if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
+ dst_addr <= IWL_FW_MEM_EXTENDED_END)
+ extended_addr = true;
+
+ if (extended_addr)
+ iwl_set_bits_prph(trans, LMPM_CHICK,
+ LMPM_CHICK_EXTENDED_ADDR_SPACE);
memcpy(v_addr, (u8 *)section->data + offset, copy_size);
- ret = iwl_pcie_load_firmware_chunk(trans,
- section->offset + offset,
- p_addr, copy_size);
+ ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
+ copy_size);
+
+ if (extended_addr)
+ iwl_clear_bits_prph(trans, LMPM_CHICK,
+ LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
if (ret) {
IWL_ERR(trans,
"Could not load the [%d] uCode section\n",
@@ -644,14 +666,14 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
- const struct fw_img *image,
- int cpu,
- int *first_ucode_section)
+static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
int shift_param;
- int i, ret = 0;
- u32 last_read_idx = 0;
+ int i, ret = 0, sec_num = 0x1;
+ u32 val, last_read_idx = 0;
if (cpu == 1) {
shift_param = 0;
@@ -672,21 +694,16 @@ static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
break;
}
- if (i == (*first_ucode_section) + 1)
- /* set CPU to started */
- iwl_set_bits_prph(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- LMPM_CPU_HDRS_LOADING_COMPLETED
- << shift_param);
-
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
+
+ /* Notify the ucode of the loaded section number and status */
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ sec_num = (sec_num << 1) | 0x1;
}
- /* image loading complete */
- iwl_set_bits_prph(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
*first_ucode_section = last_read_idx;
@@ -739,49 +756,78 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
-static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
- const struct fw_img *image)
+static void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret = 0;
- int first_ucode_section;
+ const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+ int i;
- IWL_DEBUG_FW(trans,
- "working with %s image\n",
- image->is_secure ? "Secured" : "Non Secured");
- IWL_DEBUG_FW(trans,
- "working with %s CPU\n",
- image->is_dual_cpus ? "Dual" : "Single");
+ if (dest->version)
+ IWL_ERR(trans,
+ "DBG DEST version is %d - expect issues\n",
+ dest->version);
- /* configure the ucode to be ready to get the secured image */
- if (image->is_secure) {
- /* set secure boot inspector addresses */
- iwl_write_prph(trans,
- LMPM_SECURE_INSPECTOR_CODE_ADDR,
- LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+ IWL_INFO(trans, "Applying debug destination %s\n",
+ get_fw_dbg_mode_string(dest->monitor_mode));
- iwl_write_prph(trans,
- LMPM_SECURE_INSPECTOR_DATA_ADDR,
- LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+ if (dest->monitor_mode == EXTERNAL_MODE)
+ iwl_pcie_alloc_fw_monitor(trans);
+ else
+ IWL_WARN(trans, "PCI should have external buffer debug\n");
- /* set CPU1 header address */
- iwl_write_prph(trans,
- LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
- LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+ for (i = 0; i < trans->dbg_dest_reg_num; i++) {
+ u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
+ u32 val = le32_to_cpu(dest->reg_ops[i].val);
- /* load to FW the binary Secured sections of CPU1 */
- ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
- &first_ucode_section);
- if (ret)
- return ret;
+ switch (dest->reg_ops[i].op) {
+ case CSR_ASSIGN:
+ iwl_write32(trans, addr, val);
+ break;
+ case CSR_SETBIT:
+ iwl_set_bit(trans, addr, BIT(val));
+ break;
+ case CSR_CLEARBIT:
+ iwl_clear_bit(trans, addr, BIT(val));
+ break;
+ case PRPH_ASSIGN:
+ iwl_write_prph(trans, addr, val);
+ break;
+ case PRPH_SETBIT:
+ iwl_set_bits_prph(trans, addr, BIT(val));
+ break;
+ case PRPH_CLEARBIT:
+ iwl_clear_bits_prph(trans, addr, BIT(val));
+ break;
+ default:
+ IWL_ERR(trans, "FW debug - unknown OP %d\n",
+ dest->reg_ops[i].op);
+ break;
+ }
+ }
- } else {
- /* load to FW the binary Non secured sections of CPU1 */
- ret = iwl_pcie_load_cpu_sections(trans, image, 1,
- &first_ucode_section);
- if (ret)
- return ret;
+ if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+ iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+ trans_pcie->fw_mon_phys >> dest->base_shift);
+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+ (trans_pcie->fw_mon_phys +
+ trans_pcie->fw_mon_size) >> dest->end_shift);
}
+}
+
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = 0;
+ int first_ucode_section;
+
+ IWL_DEBUG_FW(trans, "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* load to FW the binary non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
+ if (ret)
+ return ret;
if (image->is_dual_cpus) {
/* set CPU2 header address */
@@ -790,13 +836,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
LMPM_SECURE_CPU2_HDR_MEM_SPACE);
/* load to FW the binary sections of CPU2 */
- if (image->is_secure)
- ret = iwl_pcie_load_cpu_secured_sections(
- trans, image, 2,
- &first_ucode_section);
- else
- ret = iwl_pcie_load_cpu_sections(trans, image, 2,
- &first_ucode_section);
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
if (ret)
return ret;
}
@@ -813,6 +854,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
(trans_pcie->fw_mon_phys +
trans_pcie->fw_mon_size) >> 4);
}
+ } else if (trans->dbg_dest_tlv) {
+ iwl_pcie_apply_destination(trans);
}
/* release CPU reset */
@@ -821,18 +864,50 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
else
iwl_write32(trans, CSR_RESET, 0);
- if (image->is_secure) {
- /* wait for image verification to complete */
- ret = iwl_poll_prph_bit(trans,
- LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
- LMPM_SECURE_BOOT_STATUS_SUCCESS,
- LMPM_SECURE_BOOT_STATUS_SUCCESS,
- LMPM_SECURE_TIME_OUT);
+ return 0;
+}
- if (ret < 0) {
- IWL_ERR(trans, "Time out on secure boot process\n");
- return ret;
- }
+static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int first_ucode_section;
+ u32 reg;
+
+ IWL_DEBUG_FW(trans, "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* configure the ucode to be ready to get the secured image */
+ /* release CPU reset */
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 1,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ /* load to FW the binary sections of CPU2 */
+ ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ /* Notify FW loading is done */
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+
+ /* wait for image verification to complete */
+ ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_TIME_OUT);
+ if (ret < 0) {
+ reg = iwl_read_prph(trans,
+ LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0);
+
+ IWL_ERR(trans, "Timeout on secure boot process, reg = %x\n",
+ reg);
+ return ret;
}
return 0;
@@ -884,7 +959,11 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- return iwl_pcie_load_given_ucode(trans, fw);
+ if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
+ (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP))
+ return iwl_pcie_load_given_ucode_8000b(trans, fw);
+ else
+ return iwl_pcie_load_given_ucode(trans, fw);
}
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@@ -941,7 +1020,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ udelay(20);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
@@ -974,6 +1054,9 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ /* re-take ownership to prevent other users from stealing the deivce */
+ iwl_pcie_prepare_card_hw(trans);
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
@@ -1023,14 +1106,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
- iwl_pcie_set_pwr(trans, false);
-
- val = iwl_read32(trans, CSR_RESET);
- if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
- *status = IWL_D3_STATUS_RESET;
- return 0;
- }
-
/*
* Also enables interrupts - none will happen as the device doesn't
* know we're waking it up, only when the opmode actually tells it
@@ -1041,6 +1116,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
@@ -1050,6 +1128,8 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
+ iwl_pcie_set_pwr(trans, false);
+
iwl_trans_pcie_tx_reset(trans);
ret = iwl_pcie_rx_init(trans);
@@ -1058,7 +1138,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
- *status = IWL_D3_STATUS_ALIVE;
+ val = iwl_read32(trans, CSR_RESET);
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
+ *status = IWL_D3_STATUS_RESET;
+ else
+ *status = IWL_D3_STATUS_ALIVE;
+
return 0;
}
@@ -1236,6 +1321,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
/*
* These bits say the device is running, and should keep running for
@@ -1767,6 +1854,13 @@ err:
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
return -ENOMEM;
}
+#else
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{
+ return 0;
+}
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
{
@@ -1937,6 +2031,31 @@ static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
return csr_len;
}
+static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data)
+{
+ u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
+ unsigned long flags;
+ __le32 *val;
+ int i;
+
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ return 0;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
+ (*data)->len = cpu_to_le32(fh_regs_len);
+ val = (void *)(*data)->data;
+
+ for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
+ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+
+ iwl_trans_release_nic_access(trans, &flags);
+
+ *data = iwl_fw_error_next_data(*data);
+
+ return sizeof(**data) + fh_regs_len;
+}
+
static
struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
{
@@ -1946,6 +2065,7 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len;
+ u32 monitor_len;
int i, ptr;
/* transport dump header */
@@ -1968,10 +2088,34 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
num_bytes_in_chunk;
}
+ /* FH registers */
+ len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
/* FW monitor */
- if (trans_pcie->fw_mon_page)
+ if (trans_pcie->fw_mon_page) {
+ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+ trans_pcie->fw_mon_size;
+ monitor_len = trans_pcie->fw_mon_size;
+ } else if (trans->dbg_dest_tlv) {
+ u32 base, end;
+
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ end = iwl_read_prph(trans, end) <<
+ trans->dbg_dest_tlv->end_shift;
+
+ /* Make "end" point to the actual end */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ end += (1 << trans->dbg_dest_tlv->end_shift);
+ monitor_len = end - base;
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans_pcie->fw_mon_size;
+ monitor_len;
+ } else {
+ monitor_len = 0;
+ }
dump_data = vzalloc(len);
if (!dump_data)
@@ -2008,49 +2152,77 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
len += iwl_trans_pcie_dump_prph(trans, &data);
len += iwl_trans_pcie_dump_csr(trans, &data);
+ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
/* data is already pointing to the next section */
- if (trans_pcie->fw_mon_page) {
+ if ((trans_pcie->fw_mon_page &&
+ trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+ trans->dbg_dest_tlv) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+ u32 base, write_ptr, wrap_cnt;
+
+ /* If there was a dest TLV - use the values from there */
+ if (trans->dbg_dest_tlv) {
+ write_ptr =
+ le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ } else {
+ base = MON_BUFF_BASE_ADDR;
+ write_ptr = MON_BUFF_WRPTR;
+ wrap_cnt = MON_BUFF_CYCLE_CNT;
+ }
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
- data->len = cpu_to_le32(trans_pcie->fw_mon_size +
- sizeof(*fw_mon_data));
fw_mon_data = (void *)data->data;
fw_mon_data->fw_mon_wr_ptr =
- cpu_to_le32(iwl_read_prph(trans, MON_BUFF_WRPTR));
+ cpu_to_le32(iwl_read_prph(trans, write_ptr));
fw_mon_data->fw_mon_cycle_cnt =
- cpu_to_le32(iwl_read_prph(trans, MON_BUFF_CYCLE_CNT));
+ cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
- cpu_to_le32(iwl_read_prph(trans, MON_BUFF_BASE_ADDR));
-
- /*
- * The firmware is now asserted, it won't write anything to
- * the buffer. CPU can take ownership to fetch the data.
- * The buffer will be handed back to the device before the
- * firmware will be restarted.
- */
- dma_sync_single_for_cpu(trans->dev, trans_pcie->fw_mon_phys,
- trans_pcie->fw_mon_size,
- DMA_FROM_DEVICE);
- memcpy(fw_mon_data->data, page_address(trans_pcie->fw_mon_page),
- trans_pcie->fw_mon_size);
-
- len += sizeof(*data) + sizeof(*fw_mon_data) +
- trans_pcie->fw_mon_size;
+ cpu_to_le32(iwl_read_prph(trans, base));
+
+ len += sizeof(*data) + sizeof(*fw_mon_data);
+ if (trans_pcie->fw_mon_page) {
+ data->len = cpu_to_le32(trans_pcie->fw_mon_size +
+ sizeof(*fw_mon_data));
+
+ /*
+ * The firmware is now asserted, it won't write anything
+ * to the buffer. CPU can take ownership to fetch the
+ * data. The buffer will be handed back to the device
+ * before the firmware will be restarted.
+ */
+ dma_sync_single_for_cpu(trans->dev,
+ trans_pcie->fw_mon_phys,
+ trans_pcie->fw_mon_size,
+ DMA_FROM_DEVICE);
+ memcpy(fw_mon_data->data,
+ page_address(trans_pcie->fw_mon_page),
+ trans_pcie->fw_mon_size);
+
+ len += trans_pcie->fw_mon_size;
+ } else {
+ /* If we are here then the buffer is internal */
+
+ /*
+ * Update pointers to reflect actual values after
+ * shifting
+ */
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ iwl_trans_read_mem(trans, base, fw_mon_data->data,
+ monitor_len / sizeof(u32));
+ data->len = cpu_to_le32(sizeof(*fw_mon_data) +
+ monitor_len);
+ len += monitor_len;
+ }
}
dump_data->len = len;
return dump_data;
}
-#else
-static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
- struct dentry *dir)
-{
- return 0;
-}
-#endif /*CONFIG_IWLWIFI_DEBUGFS */
static const struct iwl_trans_ops trans_ops_pcie = {
.start_hw = iwl_trans_pcie_start_hw,
@@ -2087,9 +2259,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.release_nic_access = iwl_trans_pcie_release_nic_access,
.set_bits_mask = iwl_trans_pcie_set_bits_mask,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
.dump_data = iwl_trans_pcie_dump_data,
-#endif
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index eb8e2984c5e9..8a6c7a084aa1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -989,6 +989,65 @@ out:
spin_unlock_bh(&txq->lock);
}
+static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ if (trans_pcie->cmd_in_flight)
+ return 0;
+
+ trans_pcie->cmd_in_flight = true;
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set.
+ */
+ if (trans->cfg->base_params->apmg_wake_up_wa) {
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+ 15000);
+ if (ret < 0) {
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ trans_pcie->cmd_in_flight = false;
+ IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ if (WARN_ON(!trans_pcie->cmd_in_flight))
+ return 0;
+
+ trans_pcie->cmd_in_flight = false;
+
+ if (trans->cfg->base_params->apmg_wake_up_wa)
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ return 0;
+}
+
/*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -1024,14 +1083,9 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
}
}
- if (trans->cfg->base_params->apmg_wake_up_wa &&
- q->read_ptr == q->write_ptr) {
+ if (q->read_ptr == q->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- WARN_ON(!trans_pcie->cmd_in_flight);
- trans_pcie->cmd_in_flight = false;
- __iwl_trans_pcie_clear_bit(trans,
- CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
@@ -1419,32 +1473,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
-
- /*
- * wake up the NIC to make sure that the firmware will see the host
- * command - we will let the NIC sleep once all the host commands
- * returned. This needs to be done only on NICs that have
- * apmg_wake_up_wa set.
- */
- if (trans->cfg->base_params->apmg_wake_up_wa &&
- !trans_pcie->cmd_in_flight) {
- trans_pcie->cmd_in_flight = true;
- __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
- 15000);
- if (ret < 0) {
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
- trans_pcie->cmd_in_flight = false;
- IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
- idx = -EIO;
- goto out;
- }
+ ret = iwl_pcie_set_cmd_in_flight(trans);
+ if (ret < 0) {
+ idx = ret;
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ goto out;
}
/* Increment and update queue's write index */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c9ad4cf1adfb..ef58a8862d91 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -395,7 +395,6 @@ static int hwsim_radio_idx;
static struct platform_driver mac80211_hwsim_driver = {
.driver = {
.name = "mac80211_hwsim",
- .owner = THIS_MODULE,
},
};
@@ -412,6 +411,11 @@ struct mac80211_hwsim_data {
struct mac_address addresses[2];
int channels, idx;
bool use_chanctx;
+ bool destroy_on_close;
+ struct work_struct destroy_work;
+ u32 portid;
+ char alpha2[2];
+ const struct ieee80211_regdomain *regd;
struct ieee80211_channel *tmp_chan;
struct delayed_work roc_done;
@@ -419,6 +423,7 @@ struct mac80211_hwsim_data {
struct cfg80211_scan_request *hw_scan_request;
struct ieee80211_vif *hw_scan_vif;
int scan_chan_idx;
+ u8 scan_addr[ETH_ALEN];
struct ieee80211_channel *channel;
u64 beacon_int /* beacon interval in us */;
@@ -436,7 +441,7 @@ struct mac80211_hwsim_data {
/*
* Only radios in the same group can communicate together (the
* channel has to match too). Each bit represents a group. A
- * radio can be in more then one group.
+ * radio can be in more than one group.
*/
u64 group;
@@ -447,6 +452,14 @@ struct mac80211_hwsim_data {
s64 bcn_delta;
/* absolute beacon transmission time. Used to cover up "tx" delay. */
u64 abs_bcn_ts;
+
+ /* Stats */
+ u64 tx_pkts;
+ u64 rx_pkts;
+ u64 tx_bytes;
+ u64 rx_bytes;
+ u64 tx_dropped;
+ u64 tx_failed;
};
@@ -476,6 +489,14 @@ static struct genl_family hwsim_genl_family = {
.maxattr = HWSIM_ATTR_MAX,
};
+enum hwsim_multicast_groups {
+ HWSIM_MCGRP_CONFIG,
+};
+
+static const struct genl_multicast_group hwsim_mcgrps[] = {
+ [HWSIM_MCGRP_CONFIG] = { .name = "config", },
+};
+
/* MAC80211_HWSIM netlink policy */
static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
@@ -496,6 +517,10 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
[HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING },
+ [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_FREQ] = { .type = NLA_U32 },
};
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -807,6 +832,9 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
.ret = false,
};
+ if (data->scanning && memcmp(addr, data->scan_addr, ETH_ALEN) == 0)
+ return true;
+
memcpy(md.addr, addr, ETH_ALEN);
ieee80211_iterate_active_interfaces_atomic(data->hw,
@@ -861,8 +889,10 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
/* If the queue contains MAX_QUEUE skb's drop some */
if (skb_queue_len(&data->pending) >= MAX_QUEUE) {
/* Droping until WARN_QUEUE level */
- while (skb_queue_len(&data->pending) >= WARN_QUEUE)
- skb_dequeue(&data->pending);
+ while (skb_queue_len(&data->pending) >= WARN_QUEUE) {
+ ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
+ data->tx_dropped++;
+ }
}
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -896,6 +926,9 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
goto nla_put_failure;
+ if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
+ goto nla_put_failure;
+
/* We get the tx control (rate and retries) info*/
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -917,10 +950,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
/* Enqueue the packet */
skb_queue_tail(&data->pending, my_skb);
+ data->tx_pkts++;
+ data->tx_bytes += my_skb->len;
return;
nla_put_failure:
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
+ ieee80211_free_txskb(hw, my_skb);
+ data->tx_failed++;
}
static bool hwsim_chans_compat(struct ieee80211_channel *c1,
@@ -952,6 +989,53 @@ static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
data->receive = true;
}
+static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb)
+{
+ /*
+ * To enable this code, #define the HWSIM_RADIOTAP_OUI,
+ * e.g. like this:
+ * #define HWSIM_RADIOTAP_OUI "\x02\x00\x00"
+ * (but you should use a valid OUI, not that)
+ *
+ * If anyone wants to 'donate' a radiotap OUI/subns code
+ * please send a patch removing this #ifdef and changing
+ * the values accordingly.
+ */
+#ifdef HWSIM_RADIOTAP_OUI
+ struct ieee80211_vendor_radiotap *rtap;
+
+ /*
+ * Note that this code requires the headroom in the SKB
+ * that was allocated earlier.
+ */
+ rtap = (void *)skb_push(skb, sizeof(*rtap) + 8 + 4);
+ rtap->oui[0] = HWSIM_RADIOTAP_OUI[0];
+ rtap->oui[1] = HWSIM_RADIOTAP_OUI[1];
+ rtap->oui[2] = HWSIM_RADIOTAP_OUI[2];
+ rtap->subns = 127;
+
+ /*
+ * Radiotap vendor namespaces can (and should) also be
+ * split into fields by using the standard radiotap
+ * presence bitmap mechanism. Use just BIT(0) here for
+ * the presence bitmap.
+ */
+ rtap->present = BIT(0);
+ /* We have 8 bytes of (dummy) data */
+ rtap->len = 8;
+ /* For testing, also require it to be aligned */
+ rtap->align = 8;
+ /* And also test that padding works, 4 bytes */
+ rtap->pad = 4;
+ /* push the data */
+ memcpy(rtap->data, "ABCDEFGH", 8);
+ /* make sure to clear padding, mac80211 doesn't */
+ memset(rtap->data + 8, 0, 4);
+
+ IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA;
+#endif
+}
+
static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan)
@@ -1066,6 +1150,11 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.mactime = now + data2->tsf_offset;
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
+
+ mac80211_hwsim_add_vendor_rtap(nskb);
+
+ data2->rx_pkts++;
+ data2->rx_bytes += nskb->len;
ieee80211_rx_irqsafe(data2->hw, nskb);
}
spin_unlock(&hwsim_radio_lock);
@@ -1133,6 +1222,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
/* NO wmediumd detected, perfect medium simulation */
+ data->tx_pkts++;
+ data->tx_bytes += skb->len;
ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
if (ack && skb->len >= 16) {
@@ -1716,7 +1807,7 @@ static void hw_scan_work(struct work_struct *work)
struct sk_buff *probe;
probe = ieee80211_probereq_get(hwsim->hw,
- hwsim->hw_scan_vif,
+ hwsim->scan_addr,
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
@@ -1754,6 +1845,12 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
hwsim->hw_scan_request = req;
hwsim->hw_scan_vif = vif;
hwsim->scan_chan_idx = 0;
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ get_random_mask_addr(hwsim->scan_addr,
+ hw_req->req.mac_addr,
+ hw_req->req.mac_addr_mask);
+ else
+ memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN);
mutex_unlock(&hwsim->mutex);
wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
@@ -1780,7 +1877,9 @@ static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&hwsim->mutex);
}
-static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
+static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -1792,13 +1891,16 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
}
printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
+
+ memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN);
hwsim->scanning = true;
out:
mutex_unlock(&hwsim->mutex);
}
-static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
+static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -1806,6 +1908,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
printk(KERN_DEBUG "hwsim sw_scan_complete\n");
hwsim->scanning = false;
+ memset(hwsim->scan_addr, 0, ETH_ALEN);
mutex_unlock(&hwsim->mutex);
}
@@ -1916,6 +2019,57 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
hwsim_check_chanctx_magic(ctx);
}
+static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ "d_tx_dropped",
+ "d_tx_failed",
+ "d_ps_mode",
+ "d_group",
+ "d_tx_power",
+};
+
+#define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats)
+
+static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *mac80211_hwsim_gstrings_stats,
+ sizeof(mac80211_hwsim_gstrings_stats));
+}
+
+static int mac80211_hwsim_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return MAC80211_HWSIM_SSTATS_LEN;
+ return 0;
+}
+
+static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mac80211_hwsim_data *ar = hw->priv;
+ int i = 0;
+
+ data[i++] = ar->tx_pkts;
+ data[i++] = ar->tx_bytes;
+ data[i++] = ar->rx_pkts;
+ data[i++] = ar->rx_bytes;
+ data[i++] = ar->tx_dropped;
+ data[i++] = ar->tx_failed;
+ data[i++] = ar->ps;
+ data[i++] = ar->group;
+ data[i++] = ar->power_level;
+
+ WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN);
+}
+
static const struct ieee80211_ops mac80211_hwsim_ops = {
.tx = mac80211_hwsim_tx,
.start = mac80211_hwsim_start,
@@ -1939,14 +2093,131 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
.flush = mac80211_hwsim_flush,
.get_tsf = mac80211_hwsim_get_tsf,
.set_tsf = mac80211_hwsim_set_tsf,
+ .get_et_sset_count = mac80211_hwsim_get_et_sset_count,
+ .get_et_stats = mac80211_hwsim_get_et_stats,
+ .get_et_strings = mac80211_hwsim_get_et_strings,
};
static struct ieee80211_ops mac80211_hwsim_mchan_ops;
-static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
- const struct ieee80211_regdomain *regd,
- bool reg_strict, bool p2p_device,
- bool use_chanctx)
+struct hwsim_new_radio_params {
+ unsigned int channels;
+ const char *reg_alpha2;
+ const struct ieee80211_regdomain *regd;
+ bool reg_strict;
+ bool p2p_device;
+ bool use_chanctx;
+ bool destroy_on_close;
+ const char *hwname;
+ bool no_vif;
+};
+
+static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
+ struct genl_info *info)
+{
+ if (info)
+ genl_notify(&hwsim_genl_family, mcast_skb,
+ genl_info_net(info), info->snd_portid,
+ HWSIM_MCGRP_CONFIG, info->nlhdr, GFP_KERNEL);
+ else
+ genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0,
+ HWSIM_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int append_radio_msg(struct sk_buff *skb, int id,
+ struct hwsim_new_radio_params *param)
+{
+ int ret;
+
+ ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id);
+ if (ret < 0)
+ return ret;
+
+ if (param->channels) {
+ ret = nla_put_u32(skb, HWSIM_ATTR_CHANNELS, param->channels);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->reg_alpha2) {
+ ret = nla_put(skb, HWSIM_ATTR_REG_HINT_ALPHA2, 2,
+ param->reg_alpha2);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->regd) {
+ int i;
+
+ for (i = 0; hwsim_world_regdom_custom[i] != param->regd &&
+ i < ARRAY_SIZE(hwsim_world_regdom_custom); i++)
+ ;
+
+ if (i < ARRAY_SIZE(hwsim_world_regdom_custom)) {
+ ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (param->reg_strict) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_REG_STRICT_REG);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->p2p_device) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_P2P_DEVICE);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->use_chanctx) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_USE_CHANCTX);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->hwname) {
+ ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME,
+ strlen(param->hwname), param->hwname);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hwsim_mcast_new_radio(int id, struct genl_info *info,
+ struct hwsim_new_radio_params *param)
+{
+ struct sk_buff *mcast_skb;
+ void *data;
+
+ mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!mcast_skb)
+ return;
+
+ data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0,
+ HWSIM_CMD_NEW_RADIO);
+ if (!data)
+ goto out_err;
+
+ if (append_radio_msg(mcast_skb, id, param) < 0)
+ goto out_err;
+
+ genlmsg_end(mcast_skb, data);
+
+ hwsim_mcast_config_msg(mcast_skb, info);
+ return;
+
+out_err:
+ genlmsg_cancel(mcast_skb, data);
+ nlmsg_free(mcast_skb);
+}
+
+static int mac80211_hwsim_new_radio(struct genl_info *info,
+ struct hwsim_new_radio_params *param)
{
int err;
u8 addr[ETH_ALEN];
@@ -1956,16 +2227,16 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
int idx;
- if (WARN_ON(channels > 1 && !use_chanctx))
+ if (WARN_ON(param->channels > 1 && !param->use_chanctx))
return -EINVAL;
spin_lock_bh(&hwsim_radio_lock);
idx = hwsim_radio_idx++;
spin_unlock_bh(&hwsim_radio_lock);
- if (use_chanctx)
+ if (param->use_chanctx)
ops = &mac80211_hwsim_mchan_ops;
- hw = ieee80211_alloc_hw(sizeof(*data), ops);
+ hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname);
if (!hw) {
printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n");
err = -ENOMEM;
@@ -2003,9 +2274,12 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
hw->wiphy->n_addresses = 2;
hw->wiphy->addresses = data->addresses;
- data->channels = channels;
- data->use_chanctx = use_chanctx;
+ data->channels = param->channels;
+ data->use_chanctx = param->use_chanctx;
data->idx = idx;
+ data->destroy_on_close = param->destroy_on_close;
+ if (info)
+ data->portid = info->snd_portid;
if (data->use_chanctx) {
hw->wiphy->max_scan_ssids = 255;
@@ -2014,12 +2288,12 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
/* For channels > 1 DFS is not allowed */
hw->wiphy->n_iface_combinations = 1;
hw->wiphy->iface_combinations = &data->if_combination;
- if (p2p_device)
+ if (param->p2p_device)
data->if_combination = hwsim_if_comb_p2p_dev[0];
else
data->if_combination = hwsim_if_comb[0];
data->if_combination.num_different_channels = data->channels;
- } else if (p2p_device) {
+ } else if (param->p2p_device) {
hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(hwsim_if_comb_p2p_dev);
@@ -2040,7 +2314,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
- if (p2p_device)
+ if (param->p2p_device)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
@@ -2060,7 +2334,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_STATIC_SMPS |
- NL80211_FEATURE_DYNAMIC_SMPS;
+ NL80211_FEATURE_DYNAMIC_SMPS |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -2095,6 +2370,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
sband->ht_cap.ht_supported = true;
sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40;
sband->ht_cap.ampdu_factor = 0x3;
@@ -2111,7 +2387,6 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
sband->vht_cap.cap =
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
@@ -2142,15 +2417,19 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
hw->max_rates = 4;
hw->max_rate_tries = 11;
- if (reg_strict)
+ if (param->reg_strict)
hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
- if (regd) {
+ if (param->regd) {
+ data->regd = param->regd;
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
- wiphy_apply_custom_regulatory(hw->wiphy, regd);
+ wiphy_apply_custom_regulatory(hw->wiphy, param->regd);
/* give the regulatory workqueue a chance to run */
schedule_timeout_interruptible(1);
}
+ if (param->no_vif)
+ hw->flags |= IEEE80211_HW_NO_AUTO_VIF;
+
err = ieee80211_register_hw(hw);
if (err < 0) {
printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
@@ -2160,8 +2439,11 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr);
- if (reg_alpha2)
- regulatory_hint(hw->wiphy, reg_alpha2);
+ if (param->reg_alpha2) {
+ data->alpha2[0] = param->reg_alpha2[0];
+ data->alpha2[1] = param->reg_alpha2[1];
+ regulatory_hint(hw->wiphy, param->reg_alpha2);
+ }
data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir);
debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
@@ -2180,6 +2462,9 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
list_add_tail(&data->list, &hwsim_radios);
spin_unlock_bh(&hwsim_radio_lock);
+ if (idx > 0)
+ hwsim_mcast_new_radio(idx, info, param);
+
return idx;
failed_hw:
@@ -2192,8 +2477,46 @@ failed:
return err;
}
-static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data)
+static void hwsim_mcast_del_radio(int id, const char *hwname,
+ struct genl_info *info)
{
+ struct sk_buff *skb;
+ void *data;
+ int ret;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
+ HWSIM_CMD_DEL_RADIO);
+ if (!data)
+ goto error;
+
+ ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id);
+ if (ret < 0)
+ goto error;
+
+ ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname),
+ hwname);
+ if (ret < 0)
+ goto error;
+
+ genlmsg_end(skb, data);
+
+ hwsim_mcast_config_msg(skb, info);
+
+ return;
+
+error:
+ nlmsg_free(skb);
+}
+
+static void mac80211_hwsim_del_radio(struct mac80211_hwsim_data *data,
+ const char *hwname,
+ struct genl_info *info)
+{
+ hwsim_mcast_del_radio(data->idx, hwname, info);
debugfs_remove_recursive(data->debugfs);
ieee80211_unregister_hw(data->hw);
device_release_driver(data->dev);
@@ -2201,6 +2524,46 @@ static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data)
ieee80211_free_hw(data->hw);
}
+static int mac80211_hwsim_get_radio(struct sk_buff *skb,
+ struct mac80211_hwsim_data *data,
+ u32 portid, u32 seq,
+ struct netlink_callback *cb, int flags)
+{
+ void *hdr;
+ struct hwsim_new_radio_params param = { };
+ int res = -EMSGSIZE;
+
+ hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags,
+ HWSIM_CMD_GET_RADIO);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (cb)
+ genl_dump_check_consistent(cb, hdr, &hwsim_genl_family);
+
+ if (data->alpha2[0] && data->alpha2[1])
+ param.reg_alpha2 = data->alpha2;
+
+ param.reg_strict = !!(data->hw->wiphy->regulatory_flags &
+ REGULATORY_STRICT_REG);
+ param.p2p_device = !!(data->hw->wiphy->interface_modes &
+ BIT(NL80211_IFTYPE_P2P_DEVICE));
+ param.use_chanctx = data->use_chanctx;
+ param.regd = data->regd;
+ param.channels = data->channels;
+ param.hwname = wiphy_name(data->hw->wiphy);
+
+ res = append_radio_msg(skb, data->idx, &param);
+ if (res < 0)
+ goto out_err;
+
+ return genlmsg_end(skb, hdr);
+
+out_err:
+ genlmsg_cancel(skb, hdr);
+ return res;
+}
+
static void mac80211_hwsim_free(void)
{
struct mac80211_hwsim_data *data;
@@ -2211,7 +2574,8 @@ static void mac80211_hwsim_free(void)
list))) {
list_del(&data->list);
spin_unlock_bh(&hwsim_radio_lock);
- mac80211_hwsim_destroy_radio(data);
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+ NULL);
spin_lock_bh(&hwsim_radio_lock);
}
spin_unlock_bh(&hwsim_radio_lock);
@@ -2339,7 +2703,6 @@ out:
static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
struct genl_info *info)
{
-
struct mac80211_hwsim_data *data2;
struct ieee80211_rx_status rx_status;
const u8 *dst;
@@ -2382,18 +2745,22 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
/* A frame is received from user space */
memset(&rx_status, 0, sizeof(rx_status));
+ /* TODO: Check ATTR_FREQ if it exists, and maybe throw away off-channel
+ * packets?
+ */
rx_status.freq = data2->channel->center_freq;
rx_status.band = data2->channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ data2->rx_pkts++;
+ data2->rx_bytes += skb->len;
ieee80211_rx_irqsafe(data2->hw, skb);
return 0;
err:
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
- goto out;
out:
dev_kfree_skb(skb);
return -EINVAL;
@@ -2429,54 +2796,72 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
return 0;
}
-static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
+static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
- unsigned int chans = channels;
- const char *alpha2 = NULL;
- const struct ieee80211_regdomain *regd = NULL;
- bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
- bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
- bool use_chanctx;
+ struct hwsim_new_radio_params param = { 0 };
+
+ param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+ param.channels = channels;
+ param.destroy_on_close =
+ info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE];
if (info->attrs[HWSIM_ATTR_CHANNELS])
- chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+ param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+
+ if (info->attrs[HWSIM_ATTR_NO_VIF])
+ param.no_vif = true;
+
+ if (info->attrs[HWSIM_ATTR_RADIO_NAME])
+ param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
- use_chanctx = true;
+ param.use_chanctx = true;
else
- use_chanctx = (chans > 1);
+ param.use_chanctx = (param.channels > 1);
if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
- alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
+ param.reg_alpha2 =
+ nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
return -EINVAL;
- regd = hwsim_world_regdom_custom[idx];
+ param.regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict,
- p2p_device, use_chanctx);
+ return mac80211_hwsim_new_radio(info, &param);
}
-static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
+static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct mac80211_hwsim_data *data;
- int idx;
+ s64 idx = -1;
+ const char *hwname = NULL;
- if (!info->attrs[HWSIM_ATTR_RADIO_ID])
+ if (info->attrs[HWSIM_ATTR_RADIO_ID])
+ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+ else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
+ hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
+ else
return -EINVAL;
- idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data, &hwsim_radios, list) {
- if (data->idx != idx)
- continue;
+ if (idx >= 0) {
+ if (data->idx != idx)
+ continue;
+ } else {
+ if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
+ continue;
+ }
+
list_del(&data->list);
spin_unlock_bh(&hwsim_radio_lock);
- mac80211_hwsim_destroy_radio(data);
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+ info);
return 0;
}
spin_unlock_bh(&hwsim_radio_lock);
@@ -2484,6 +2869,77 @@ static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
return -ENODEV;
}
+static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct mac80211_hwsim_data *data;
+ struct sk_buff *skb;
+ int idx, res = -ENODEV;
+
+ if (!info->attrs[HWSIM_ATTR_RADIO_ID])
+ return -EINVAL;
+ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry(data, &hwsim_radios, list) {
+ if (data->idx != idx)
+ continue;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb) {
+ res = -ENOMEM;
+ goto out_err;
+ }
+
+ res = mac80211_hwsim_get_radio(skb, data, info->snd_portid,
+ info->snd_seq, NULL, 0);
+ if (res < 0) {
+ nlmsg_free(skb);
+ goto out_err;
+ }
+
+ genlmsg_reply(skb, info);
+ break;
+ }
+
+out_err:
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ return res;
+}
+
+static int hwsim_dump_radio_nl(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int idx = cb->args[0];
+ struct mac80211_hwsim_data *data = NULL;
+ int res;
+
+ spin_lock_bh(&hwsim_radio_lock);
+
+ if (idx == hwsim_radio_idx)
+ goto done;
+
+ list_for_each_entry(data, &hwsim_radios, list) {
+ if (data->idx < idx)
+ continue;
+
+ res = mac80211_hwsim_get_radio(skb, data,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb,
+ NLM_F_MULTI);
+ if (res < 0)
+ break;
+
+ idx = data->idx + 1;
+ }
+
+ cb->args[0] = idx;
+
+done:
+ spin_unlock_bh(&hwsim_radio_lock);
+ return skb->len;
+}
+
/* Generic Netlink operations array */
static const struct genl_ops hwsim_ops[] = {
{
@@ -2503,19 +2959,48 @@ static const struct genl_ops hwsim_ops[] = {
.doit = hwsim_tx_info_frame_received_nl,
},
{
- .cmd = HWSIM_CMD_CREATE_RADIO,
+ .cmd = HWSIM_CMD_NEW_RADIO,
.policy = hwsim_genl_policy,
- .doit = hwsim_create_radio_nl,
+ .doit = hwsim_new_radio_nl,
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = HWSIM_CMD_DESTROY_RADIO,
+ .cmd = HWSIM_CMD_DEL_RADIO,
.policy = hwsim_genl_policy,
- .doit = hwsim_destroy_radio_nl,
+ .doit = hwsim_del_radio_nl,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = HWSIM_CMD_GET_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_get_radio_nl,
+ .dumpit = hwsim_dump_radio_nl,
+ },
};
+static void destroy_radio(struct work_struct *work)
+{
+ struct mac80211_hwsim_data *data =
+ container_of(work, struct mac80211_hwsim_data, destroy_work);
+
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
+}
+
+static void remove_user_radios(u32 portid)
+{
+ struct mac80211_hwsim_data *entry, *tmp;
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
+ if (entry->destroy_on_close && entry->portid == portid) {
+ list_del(&entry->list);
+ INIT_WORK(&entry->destroy_work, destroy_radio);
+ schedule_work(&entry->destroy_work);
+ }
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+}
+
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
unsigned long state,
void *_notify)
@@ -2525,6 +3010,8 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
if (state != NETLINK_URELEASE)
return NOTIFY_DONE;
+ remove_user_radios(notify->portid);
+
if (notify->portid == wmediumd_portid) {
printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
" socket, switching to perfect channel medium\n");
@@ -2544,7 +3031,9 @@ static int hwsim_init_netlink(void)
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops);
+ rc = genl_register_family_with_ops_groups(&hwsim_genl_family,
+ hwsim_ops,
+ hwsim_mcgrps);
if (rc)
goto failure;
@@ -2605,69 +3094,73 @@ static int __init init_mac80211_hwsim(void)
goto out_unregister_driver;
}
+ err = hwsim_init_netlink();
+ if (err < 0)
+ goto out_unregister_driver;
+
for (i = 0; i < radios; i++) {
- const char *reg_alpha2 = NULL;
- const struct ieee80211_regdomain *regd = NULL;
- bool reg_strict = false;
+ struct hwsim_new_radio_params param = { 0 };
+
+ param.channels = channels;
switch (regtest) {
case HWSIM_REGTEST_DIFF_COUNTRY:
if (i < ARRAY_SIZE(hwsim_alpha2s))
- reg_alpha2 = hwsim_alpha2s[i];
+ param.reg_alpha2 = hwsim_alpha2s[i];
break;
case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
if (!i)
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_alpha2 = hwsim_alpha2s[0];
break;
case HWSIM_REGTEST_STRICT_ALL:
- reg_strict = true;
+ param.reg_strict = true;
case HWSIM_REGTEST_DRIVER_REG_ALL:
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_alpha2 = hwsim_alpha2s[0];
break;
case HWSIM_REGTEST_WORLD_ROAM:
if (i == 0)
- regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_01;
break;
case HWSIM_REGTEST_CUSTOM_WORLD:
- regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_01;
break;
case HWSIM_REGTEST_CUSTOM_WORLD_2:
if (i == 0)
- regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_01;
else if (i == 1)
- regd = &hwsim_world_regdom_custom_02;
+ param.regd = &hwsim_world_regdom_custom_02;
break;
case HWSIM_REGTEST_STRICT_FOLLOW:
if (i == 0) {
- reg_strict = true;
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_strict = true;
+ param.reg_alpha2 = hwsim_alpha2s[0];
}
break;
case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
if (i == 0) {
- reg_strict = true;
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_strict = true;
+ param.reg_alpha2 = hwsim_alpha2s[0];
} else if (i == 1) {
- reg_alpha2 = hwsim_alpha2s[1];
+ param.reg_alpha2 = hwsim_alpha2s[1];
}
break;
case HWSIM_REGTEST_ALL:
switch (i) {
case 0:
- regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_01;
break;
case 1:
- regd = &hwsim_world_regdom_custom_02;
+ param.regd = &hwsim_world_regdom_custom_02;
break;
case 2:
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_alpha2 = hwsim_alpha2s[0];
break;
case 3:
- reg_alpha2 = hwsim_alpha2s[1];
+ param.reg_alpha2 = hwsim_alpha2s[1];
break;
case 4:
- reg_strict = true;
- reg_alpha2 = hwsim_alpha2s[2];
+ param.reg_strict = true;
+ param.reg_alpha2 = hwsim_alpha2s[2];
break;
}
break;
@@ -2675,10 +3168,10 @@ static int __init init_mac80211_hwsim(void)
break;
}
- err = mac80211_hwsim_create_radio(channels, reg_alpha2,
- regd, reg_strict,
- support_p2p_device,
- channels > 1);
+ param.p2p_device = support_p2p_device;
+ param.use_chanctx = channels > 1;
+
+ err = mac80211_hwsim_new_radio(NULL, &param);
if (err < 0)
goto out_free_radios;
}
@@ -2704,10 +3197,6 @@ static int __init init_mac80211_hwsim(void)
}
rtnl_unlock();
- err = hwsim_init_netlink();
- if (err < 0)
- goto out_free_mon;
-
return 0;
out_free_mon:
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index c9d0315575ba..66e1c73bd507 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -60,14 +60,17 @@ enum hwsim_tx_control_flags {
* space, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER,
* %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE,
- * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional)
* @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
* kernel, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
* %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
- * @HWSIM_CMD_CREATE_RADIO: create a new radio with the given parameters,
- * returns the radio ID (>= 0) or negative on errors
- * @HWSIM_CMD_DESTROY_RADIO: destroy a radio
+ * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters,
+ * returns the radio ID (>= 0) or negative on errors, if successful
+ * then multicast the result
+ * @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted
+ * @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses:
+ * %HWSIM_ATTR_RADIO_ID
* @__HWSIM_CMD_MAX: enum limit
*/
enum {
@@ -75,12 +78,16 @@ enum {
HWSIM_CMD_REGISTER,
HWSIM_CMD_FRAME,
HWSIM_CMD_TX_INFO_FRAME,
- HWSIM_CMD_CREATE_RADIO,
- HWSIM_CMD_DESTROY_RADIO,
+ HWSIM_CMD_NEW_RADIO,
+ HWSIM_CMD_DEL_RADIO,
+ HWSIM_CMD_GET_RADIO,
__HWSIM_CMD_MAX,
};
#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)
+#define HWSIM_CMD_CREATE_RADIO HWSIM_CMD_NEW_RADIO
+#define HWSIM_CMD_DESTROY_RADIO HWSIM_CMD_DEL_RADIO
+
/**
* enum hwsim_attrs - hwsim netlink attributes
*
@@ -111,6 +118,11 @@ enum {
* @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO
* command to force use of channel contexts even when only a
* single channel is supported
+ * @HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE: used with the %HWSIM_CMD_CREATE_RADIO
+ * command to force radio removal when process that created the radio dies
+ * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666
+ * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio.
+ * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received.
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -132,6 +144,10 @@ enum {
HWSIM_ATTR_REG_STRICT_REG,
HWSIM_ATTR_SUPPORT_P2P_DEVICE,
HWSIM_ATTR_USE_CHANCTX,
+ HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE,
+ HWSIM_ATTR_RADIO_NAME,
+ HWSIM_ATTR_NO_VIF,
+ HWSIM_ATTR_FREQ,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 62f5dbe602d3..9d4786e7ddff 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -544,6 +544,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
u32 tx_win_size = priv->add_ba_param.tx_win_size;
static u8 dialog_tok;
int ret;
+ unsigned long flags;
u16 block_ack_param_set;
dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
@@ -554,15 +555,18 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
struct mwifiex_sta_node *sta_ptr;
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
if (!sta_ptr) {
dev_warn(priv->adapter->dev,
"BA setup with unknown TDLS peer %pM!\n",
peer_mac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return -1;
}
if (sta_ptr->is_11ac_enabled)
tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 2ee268b632be..f275675cdbd3 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -84,6 +84,8 @@ mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
{
struct mwifiex_tx_ba_stream_tbl *tx_tbl;
+ if (is_broadcast_ether_addr(ptr->ra))
+ return false;
tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
if (tx_tbl)
return tx_tbl->amsdu;
@@ -96,6 +98,8 @@ static inline u8
mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ptr, int tid)
{
+ if (is_broadcast_ether_addr(ptr->ra))
+ return false;
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
} else {
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 5ef5a0eeba50..d73fda312c87 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -351,6 +351,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->init_win = seq_num;
new_node->flags = 0;
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
if (mwifiex_queuing_ra_based(priv)) {
dev_dbg(priv->adapter->dev,
"info: AP/ADHOC:last_seq=%d start_win=%d\n",
@@ -367,6 +368,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
else
last_seq = priv->rx_seq[tid];
}
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
last_seq >= new_node->start_win) {
@@ -455,22 +457,26 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
u32 rx_win_size = priv->add_ba_param.rx_win_size;
u8 tid;
int win_size;
+ unsigned long flags;
uint16_t block_ack_param_set;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->is_hw_11ac_capable &&
memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv,
cmd_addba_req->peer_mac_addr);
if (!sta_ptr) {
dev_warn(priv->adapter->dev,
"BA setup with unknown TDLS peer %pM!\n",
cmd_addba_req->peer_mac_addr);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return -1;
}
if (sta_ptr->is_11ac_enabled)
rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index e70d0df9b0da..aa01c9bc77f9 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -31,7 +31,7 @@ config MWIFIEX_PCIE
mwifiex_pcie.
config MWIFIEX_USB
- tristate "Marvell WiFi-Ex Driver for USB8797/8897"
+ tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897"
depends on MWIFIEX && USB
select FW_LOADER
---help---
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 0dd672954ad1..4a66a6555366 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -194,10 +194,17 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
tx_info->pkt_len = pkt_len;
mwifiex_form_mgmt_frame(skb, buf, len);
- mwifiex_queue_tx_pkt(priv, skb);
-
*cookie = prandom_u32() | 1;
- cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
+
+ if (ieee80211_is_action(mgmt->frame_control))
+ skb = mwifiex_clone_skb_for_tx_status(priv,
+ skb,
+ MWIFIEX_BUF_FLAG_ACTION_TX_STATUS, cookie);
+ else
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+ GFP_ATOMIC);
+
+ mwifiex_queue_tx_pkt(priv, skb);
wiphy_dbg(wiphy, "info: management frame transmitted\n");
return 0;
@@ -992,6 +999,52 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
return mwifiex_dump_station_info(priv, sinfo);
}
+static int
+mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
+ int idx, struct survey_info *survey)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
+ enum ieee80211_band band;
+
+ dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx);
+
+ memset(survey, 0, sizeof(struct survey_info));
+
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ priv->media_connected && idx == 0) {
+ u8 curr_bss_band = priv->curr_bss_params.band;
+ u32 chan = priv->curr_bss_params.bss_descriptor.channel;
+
+ band = mwifiex_band_to_radio_type(curr_bss_band);
+ survey->channel = ieee80211_get_channel(wiphy,
+ ieee80211_channel_to_frequency(chan, band));
+
+ if (priv->bcn_nf_last) {
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = priv->bcn_nf_last;
+ }
+ return 0;
+ }
+
+ if (idx >= priv->adapter->num_in_chan_stats)
+ return -ENOENT;
+
+ if (!pchan_stats[idx].cca_scan_dur)
+ return 0;
+
+ band = pchan_stats[idx].bandcfg;
+ survey->channel = ieee80211_get_channel(wiphy,
+ ieee80211_channel_to_frequency(pchan_stats[idx].chan_num, band));
+ survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_CHANNEL_TIME | SURVEY_INFO_CHANNEL_TIME_BUSY;
+ survey->noise = pchan_stats[idx].noise;
+ survey->channel_time = pchan_stats[idx].cca_scan_dur;
+ survey->channel_time_busy = pchan_stats[idx].cca_busy_dur;
+
+ return 0;
+}
+
/* Supported rates to be advertised to the cfg80211 */
static struct ieee80211_rate mwifiex_rates[] = {
{.bitrate = 10, .hw_value = 2, },
@@ -1239,36 +1292,34 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac)
+ struct station_del_parameters *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_sta_node *sta_node;
+ u8 deauth_mac[ETH_ALEN];
unsigned long flags;
if (list_empty(&priv->sta_list) || !priv->bss_started)
return 0;
- if (!mac || is_broadcast_ether_addr(mac)) {
- wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
- list_for_each_entry(sta_node, &priv->sta_list, list) {
- if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
- HostCmd_ACT_GEN_SET, 0,
- sta_node->mac_addr, true))
- return -1;
- mwifiex_uap_del_sta_data(priv, sta_node);
- }
- } else {
- wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, mac);
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
- sta_node = mwifiex_get_sta_entry(priv, mac);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- if (sta_node) {
- if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
- HostCmd_ACT_GEN_SET, 0,
- sta_node->mac_addr, true))
- return -1;
- mwifiex_uap_del_sta_data(priv, sta_node);
- }
+ if (!params->mac || is_broadcast_ether_addr(params->mac))
+ return 0;
+
+ wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
+
+ memset(deauth_mac, 0, ETH_ALEN);
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_node = mwifiex_get_sta_entry(priv, params->mac);
+ if (sta_node)
+ ether_addr_copy(deauth_mac, params->mac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (is_valid_ether_addr(deauth_mac)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
+ HostCmd_ACT_GEN_SET, 0,
+ deauth_mac, true))
+ return -1;
}
return 0;
@@ -1759,6 +1810,10 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
dev_dbg(priv->adapter->dev,
"info: associated to bssid %pM successfully\n",
priv->cfg_bssid);
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->auto_tdls &&
+ priv->bss_type == MWIFIEX_BSS_TYPE_STA)
+ mwifiex_setup_auto_tdls_timer(priv);
} else {
dev_dbg(priv->adapter->dev,
"info: association to bssid %pM failed\n",
@@ -2630,11 +2685,13 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
dev_dbg(priv->adapter->dev,
"Send TDLS Setup Request to %pM status_code=%d\n", peer,
status_code);
+ mwifiex_add_auto_tdls_peer(priv, peer);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_SETUP_RESPONSE:
+ mwifiex_add_auto_tdls_peer(priv, peer);
dev_dbg(priv->adapter->dev,
"Send TDLS Setup Response to %pM status_code=%d\n",
peer, status_code);
@@ -2779,6 +2836,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.disconnect = mwifiex_cfg80211_disconnect,
.get_station = mwifiex_cfg80211_get_station,
.dump_station = mwifiex_cfg80211_dump_station,
+ .dump_survey = mwifiex_cfg80211_dump_survey,
.set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
.join_ibss = mwifiex_cfg80211_join_ibss,
.leave_ibss = mwifiex_cfg80211_leave_ibss,
@@ -2840,6 +2898,25 @@ static const struct wiphy_coalesce_support mwifiex_coalesce_support = {
.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
};
+int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
+{
+ u32 n_channels_bg, n_channels_a = 0;
+
+ n_channels_bg = mwifiex_band_2ghz.n_channels;
+
+ if (adapter->config_bands & BAND_A)
+ n_channels_a = mwifiex_band_5ghz.n_channels;
+
+ adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
+ adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
+ adapter->num_in_chan_stats);
+
+ if (!adapter->chan_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
/*
* This function registers the device with CFG802.11 subsystem.
*
@@ -2915,6 +2992,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (adapter->fw_api_ver == MWIFIEX_FW_V15)
+ wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e0d00a7f0ec3..2269acf41ad8 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -76,6 +76,8 @@
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
+#define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS BIT(3)
+#define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS BIT(4)
#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
@@ -85,6 +87,11 @@
#define MWIFIEX_TDLS_CREATE_LINK 0x02
#define MWIFIEX_TDLS_CONFIG_LINK 0x03
+#define MWIFIEX_TDLS_RSSI_HIGH 50
+#define MWIFIEX_TDLS_RSSI_LOW 55
+#define MWIFIEX_TDLS_MAX_FAIL_COUNT 4
+#define MWIFIEX_AUTO_TDLS_IDLE_TIME 10
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -154,6 +161,8 @@ struct mwifiex_txinfo {
u8 bss_num;
u8 bss_type;
u32 pkt_len;
+ u8 ack_frame_id;
+ u64 cookie;
};
enum mwifiex_wmm_ac_e {
@@ -185,4 +194,14 @@ struct mwifiex_arp_eth_header {
u8 ar_tha[ETH_ALEN];
u8 ar_tip[4];
} __packed;
+
+struct mwifiex_chan_stats {
+ u8 chan_num;
+ u8 bandcfg;
+ u8 flags;
+ s8 noise;
+ u16 total_bss;
+ u16 cca_scan_dur;
+ u16 cca_busy_dur;
+} __packed;
#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 1eb61739071f..fb5936eb82e3 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -172,6 +172,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
#define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197)
#define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
+#define TLV_TYPE_CHANNEL_STATS (PROPRIETARY_TLV_BASE_ID + 198)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -493,6 +494,7 @@ enum P2P_MODES {
#define EVENT_TDLS_GENERIC_EVENT 0x00000052
#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
+#define EVENT_TX_STATUS_REPORT 0x00000074
#define EVENT_ID_MASK 0xffff
#define BSS_NUM_MASK 0xf
@@ -541,6 +543,7 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
#define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01
+#define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS 0x20
struct txpd {
u8 bss_type;
@@ -552,7 +555,9 @@ struct txpd {
u8 priority;
u8 flags;
u8 pkt_delay_2ms;
- u8 reserved1;
+ u8 reserved1[2];
+ u8 tx_token_id;
+ u8 reserved[2];
} __packed;
struct rxpd {
@@ -583,6 +588,7 @@ struct rxpd {
* [Bit 7] Reserved
*/
u8 ht_info;
+ u8 reserved[3];
u8 flags;
} __packed;
@@ -596,8 +602,9 @@ struct uap_txpd {
u8 priority;
u8 flags;
u8 pkt_delay_2ms;
- u8 reserved1;
- __le32 reserved2;
+ u8 reserved1[2];
+ u8 tx_token_id;
+ u8 reserved[2];
};
struct uap_rxpd {
@@ -611,6 +618,16 @@ struct uap_rxpd {
u8 reserved1;
};
+struct mwifiex_fw_chan_stats {
+ u8 chan_num;
+ u8 bandcfg;
+ u8 flags;
+ s8 noise;
+ __le16 total_bss;
+ __le16 cca_scan_dur;
+ __le16 cca_busy_dur;
+} __packed;
+
enum mwifiex_chan_scan_mode_bitmasks {
MWIFIEX_PASSIVE_SCAN = BIT(0),
MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -660,6 +677,11 @@ struct mwifiex_ie_types_scan_chan_gap {
__le16 chan_gap;
} __packed;
+struct mwifiex_ietypes_chanstats {
+ struct mwifiex_ie_types_header header;
+ struct mwifiex_fw_chan_stats chanstats[0];
+} __packed;
+
struct mwifiex_ie_types_wildcard_ssid_params {
struct mwifiex_ie_types_header header;
u8 max_ssid_length;
@@ -1207,6 +1229,12 @@ struct mwifiex_event_scan_result {
u8 num_of_set;
} __packed;
+struct tx_status_event {
+ u8 packet_type;
+ u8 tx_token_id;
+ u8 status;
+} __packed;
+
#define MWIFIEX_USER_SCAN_CHAN_MAX 50
#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 580aa45ec4bc..520ad4a3018b 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->csa_expire_time = 0;
priv->del_list_idx = 0;
priv->hs2_enabled = false;
+ priv->check_tdls_tx = false;
memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
return mwifiex_add_bss_prio_tbl(priv);
@@ -366,6 +367,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
list_del(&priv->tx_ba_stream_tbl_ptr);
list_del(&priv->rx_reorder_tbl_ptr);
list_del(&priv->sta_list);
+ list_del(&priv->auto_tdls_list);
}
}
}
@@ -434,6 +436,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&priv->wmm.ra_list_spinlock);
spin_lock_init(&priv->curr_bcn_buf_lock);
spin_lock_init(&priv->sta_list_spinlock);
+ spin_lock_init(&priv->auto_tdls_lock);
}
}
@@ -449,7 +452,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->scan_pending_q_lock);
spin_lock_init(&adapter->rx_proc_lock);
- skb_queue_head_init(&adapter->usb_rx_data_q);
skb_queue_head_init(&adapter->rx_data_q);
for (i = 0; i < adapter->priv_num; ++i) {
@@ -466,10 +468,14 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
INIT_LIST_HEAD(&priv->sta_list);
+ INIT_LIST_HEAD(&priv->auto_tdls_list);
skb_queue_head_init(&priv->tdls_txq);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
+
+ spin_lock_init(&priv->ack_status_lock);
+ idr_init(&priv->ack_status_frames);
}
return 0;
@@ -646,6 +652,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
if (adapter->priv[i]) {
priv = adapter->priv[i];
+ mwifiex_clean_auto_tdls(priv);
mwifiex_clean_txrx(priv);
mwifiex_delete_bss_prio_tbl(priv);
}
@@ -668,19 +675,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
spin_lock(&adapter->mwifiex_lock);
- if (adapter->if_ops.data_complete) {
- while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) {
- struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
-
- priv = adapter->priv[rx_info->bss_num];
- if (priv)
- priv->stats.rx_dropped++;
-
- dev_kfree_skb_any(skb);
- adapter->if_ops.data_complete(adapter);
- }
- }
-
mwifiex_adapter_cleanup(adapter);
spin_unlock(&adapter->mwifiex_lock);
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 8d6c25908b6d..411a6c2f4aca 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -880,9 +880,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
/* Set Capability info */
bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_IBSS;
- tmp_cap = le16_to_cpu(adhoc_start->cap_info_bitmap);
- tmp_cap &= ~WLAN_CAPABILITY_ESS;
- tmp_cap |= WLAN_CAPABILITY_IBSS;
+ tmp_cap = WLAN_CAPABILITY_IBSS;
/* Set up privacy in bss_desc */
if (priv->sec_info.encryption_mode) {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index d5070c444fe1..d4d2223d1f31 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -28,6 +28,11 @@ const char driver_version[] = "mwifiex " VERSION " (%s) ";
static char *cal_data_cfg;
module_param(cal_data_cfg, charp, 0);
+static unsigned short driver_mode;
+module_param(driver_mode, ushort, 0);
+MODULE_PARM_DESC(driver_mode,
+ "station=0x1(default), ap-sta=0x3, station-p2p=0x5, ap-sta-p2p=0x7");
+
/*
* This function registers the device and performs all the necessary
* initializations.
@@ -122,6 +127,7 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
}
}
+ vfree(adapter->chan_stats);
kfree(adapter);
return 0;
}
@@ -143,8 +149,11 @@ static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
/* Check for Rx data */
while ((skb = skb_dequeue(&adapter->rx_data_q))) {
atomic_dec(&adapter->rx_pending);
- if (adapter->delay_main_work &&
+ if ((adapter->delay_main_work ||
+ adapter->iface_type == MWIFIEX_USB) &&
(atomic_read(&adapter->rx_pending) < LOW_RX_PENDING)) {
+ if (adapter->if_ops.submit_rem_rx_urbs)
+ adapter->if_ops.submit_rem_rx_urbs(adapter);
adapter->delay_main_work = false;
queue_work(adapter->workqueue, &adapter->main_work);
}
@@ -177,7 +186,6 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
{
int ret = 0;
unsigned long flags;
- struct sk_buff *skb;
spin_lock_irqsave(&adapter->main_proc_lock, flags);
@@ -195,12 +203,15 @@ process_start:
(adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
break;
- /* If we process interrupts first, it would increase RX pending
- * even further. Avoid this by checking if rx_pending has
- * crossed high threshold and schedule rx work queue
- * and then process interrupts
+ /* For non-USB interfaces, If we process interrupts first, it
+ * would increase RX pending even further. Avoid this by
+ * checking if rx_pending has crossed high threshold and
+ * schedule rx work queue and then process interrupts.
+ * For USB interface, there are no interrupts. We already have
+ * HIGH_RX_PENDING check in usb.c
*/
- if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING) {
+ if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING &&
+ adapter->iface_type != MWIFIEX_USB) {
adapter->delay_main_work = true;
if (!adapter->rx_processing)
queue_work(adapter->rx_workqueue,
@@ -252,11 +263,6 @@ process_start:
}
}
- /* Check Rx data for USB */
- if (adapter->iface_type == MWIFIEX_USB)
- while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
- mwifiex_handle_rx_packet(adapter, skb);
-
/* Check for event */
if (adapter->event_received) {
adapter->event_received = false;
@@ -447,6 +453,16 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto err_init_fw;
}
+ if (mwifiex_init_channel_scan_gap(adapter)) {
+ dev_err(adapter->dev, "could not init channel stats table\n");
+ goto err_init_fw;
+ }
+
+ if (driver_mode) {
+ driver_mode &= MWIFIEX_DRIVER_MODE_BITMASK;
+ driver_mode |= MWIFIEX_DRIVER_MODE_STA;
+ }
+
rtnl_lock();
/* Create station interface by default */
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
@@ -456,6 +472,28 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
rtnl_unlock();
goto err_add_intf;
}
+
+ if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) {
+ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
+ NL80211_IFTYPE_AP, NULL, NULL);
+ if (IS_ERR(wdev)) {
+ dev_err(adapter->dev, "cannot create AP interface\n");
+ rtnl_unlock();
+ goto err_add_intf;
+ }
+ }
+
+ if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) {
+ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
+ NL80211_IFTYPE_P2P_CLIENT, NULL,
+ NULL);
+ if (IS_ERR(wdev)) {
+ dev_err(adapter->dev,
+ "cannot create p2p client interface\n");
+ rtnl_unlock();
+ goto err_add_intf;
+ }
+ }
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -570,6 +608,48 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
return 0;
}
+struct sk_buff *
+mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
+ struct sk_buff *skb, u8 flag, u64 *cookie)
+{
+ struct sk_buff *orig_skb = skb;
+ struct mwifiex_txinfo *tx_info, *orig_tx_info;
+
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (skb) {
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&priv->ack_status_lock, flags);
+ id = idr_alloc(&priv->ack_status_frames, orig_skb,
+ 1, 0xff, GFP_ATOMIC);
+ spin_unlock_irqrestore(&priv->ack_status_lock, flags);
+
+ if (id >= 0) {
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->ack_frame_id = id;
+ tx_info->flags |= flag;
+ orig_tx_info = MWIFIEX_SKB_TXCB(orig_skb);
+ orig_tx_info->ack_frame_id = id;
+ orig_tx_info->flags |= flag;
+
+ if (flag == MWIFIEX_BUF_FLAG_ACTION_TX_STATUS && cookie)
+ orig_tx_info->cookie = *cookie;
+
+ } else if (skb_shared(skb)) {
+ kfree_skb(orig_skb);
+ } else {
+ kfree_skb(skb);
+ skb = orig_skb;
+ }
+ } else {
+ /* couldn't clone -- lose tx status ... */
+ skb = orig_skb;
+ }
+
+ return skb;
+}
+
/*
* CFG802.11 network device handler for data transmission.
*/
@@ -579,6 +659,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct sk_buff *new_skb;
struct mwifiex_txinfo *tx_info;
+ bool multicast;
dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
jiffies, priv->bss_type, priv->bss_num);
@@ -619,6 +700,15 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = skb->len;
+ multicast = is_multicast_ether_addr(skb->data);
+
+ if (unlikely(!multicast && skb->sk &&
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS &&
+ priv->adapter->fw_api_ver == MWIFIEX_FW_V15))
+ skb = mwifiex_clone_skb_for_tx_status(priv,
+ skb,
+ MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS, NULL);
+
/* Record the current time the packet was queued; used to
* determine the amount of time the packet was queued in
* the driver before it was sent to the firmware.
@@ -628,6 +718,13 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
__net_timestamp(skb);
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->bss_type == MWIFIEX_BSS_TYPE_STA &&
+ !ether_addr_equal_unaligned(priv->cfg_bssid, skb->data)) {
+ if (priv->adapter->auto_tdls && priv->check_tdls_tx)
+ mwifiex_tdls_check_tx(priv, skb);
+ }
+
mwifiex_queue_tx_pkt(priv, skb);
return 0;
@@ -858,7 +955,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
- if (num_possible_cpus() > 1) {
+ if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) {
adapter->rx_work_enabled = true;
pr_notice("rx work enabled, cpus %d\n", num_possible_cpus());
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index f55658d15c60..e66993cb5daf 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -34,6 +34,7 @@
#include <linux/firmware.h>
#include <linux/ctype.h>
#include <linux/of.h>
+#include <linux/idr.h>
#include "decl.h"
#include "ioctl.h"
@@ -48,6 +49,11 @@ enum {
MWIFIEX_SYNC_CMD
};
+#define MWIFIEX_DRIVER_MODE_STA BIT(0)
+#define MWIFIEX_DRIVER_MODE_UAP BIT(1)
+#define MWIFIEX_DRIVER_MODE_P2P BIT(2)
+#define MWIFIEX_DRIVER_MODE_BITMASK (BIT(0) | BIT(1) | BIT(2))
+
#define MWIFIEX_MAX_AP 64
#define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ)
@@ -106,10 +112,7 @@ enum {
*/
#define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \
adapter->event_received || \
- ((adapter->iface_type != MWIFIEX_USB) && \
- adapter->data_received) || \
- ((adapter->iface_type == MWIFIEX_USB) && \
- !skb_queue_empty(&adapter->usb_rx_data_q)))
+ adapter->data_received)
#define MWIFIEX_TYPE_CMD 1
#define MWIFIEX_TYPE_DATA 0
@@ -504,8 +507,11 @@ struct mwifiex_private {
struct mwifiex_wmm_desc wmm;
atomic_t wmm_tx_pending[IEEE80211_NUM_ACS];
struct list_head sta_list;
- /* spin lock for associated station list */
+ /* spin lock for associated station/TDLS peers list */
spinlock_t sta_list_spinlock;
+ struct list_head auto_tdls_list;
+ /* spin lock for auto TDLS peer list */
+ spinlock_t auto_tdls_lock;
struct list_head tx_ba_stream_tbl_ptr;
/* spin lock for tx_ba_stream_tbl_ptr queue */
spinlock_t tx_ba_stream_tbl_lock;
@@ -570,6 +576,12 @@ struct mwifiex_private {
bool hs2_enabled;
struct station_parameters *sta_params;
struct sk_buff_head tdls_txq;
+ u8 check_tdls_tx;
+ struct timer_list auto_tdls_timer;
+ bool auto_tdls_timer_active;
+ struct idr ack_status_frames;
+ /* spin lock for ack status */
+ spinlock_t ack_status_lock;
};
enum mwifiex_ba_status {
@@ -670,6 +682,17 @@ struct mwifiex_sta_node {
struct mwifiex_tdls_capab tdls_cap;
};
+struct mwifiex_auto_tdls_peer {
+ struct list_head list;
+ u8 mac_addr[ETH_ALEN];
+ u8 tdls_status;
+ int rssi;
+ long rssi_jiffies;
+ u8 failure_count;
+ u8 do_discover;
+ u8 do_setup;
+};
+
struct mwifiex_if_ops {
int (*init_if) (struct mwifiex_adapter *);
void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,13 +713,13 @@ struct mwifiex_if_ops {
void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
- int (*data_complete) (struct mwifiex_adapter *);
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
void (*fw_dump)(struct mwifiex_adapter *);
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
void (*iface_work)(struct work_struct *work);
+ void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
};
struct mwifiex_adapter {
@@ -767,7 +790,6 @@ struct mwifiex_adapter {
spinlock_t scan_pending_q_lock;
/* spin lock for RX processing routine */
spinlock_t rx_proc_lock;
- struct sk_buff_head usb_rx_data_q;
u32 scan_processing;
u16 region_code;
struct mwifiex_802_11d_domain_reg domain_reg;
@@ -845,6 +867,10 @@ struct mwifiex_adapter {
u8 curr_mem_idx;
bool scan_chan_gap_enabled;
struct sk_buff_head rx_data_q;
+ struct mwifiex_chan_stats *chan_stats;
+ u32 num_in_chan_stats;
+ int survey_idx;
+ bool auto_tdls;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -949,6 +975,8 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
int mwifiex_process_sta_event(struct mwifiex_private *);
int mwifiex_process_uap_event(struct mwifiex_private *);
void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
+void mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv,
+ const u8 *ra_addr);
void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
@@ -1031,7 +1059,8 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
void *data_buf);
-int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv);
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp);
int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
void *buf);
@@ -1301,6 +1330,24 @@ u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
u32 pri_chan, u8 chan_bw);
int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter);
+int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb);
+void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv);
+void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
+ const u8 *mac, u8 link_status);
+void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
+ u8 *mac, s8 snr, s8 nflr);
+void mwifiex_check_auto_tdls(unsigned long context);
+void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
+void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
+void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+
+void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
+ void *event_body);
+
+struct sk_buff *
+mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
+ struct sk_buff *skb, u8 flag, u64 *cookie);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index ca64d4c94112..984a7a4fa93b 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1623,7 +1623,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
if (*bytes_left >= sizeof(beacon_size)) {
/* Extract & convert beacon size from command buffer */
- memcpy(&beacon_size, *bss_info, sizeof(beacon_size));
+ beacon_size = le16_to_cpu(*(__le16 *)(*bss_info));
*bytes_left -= sizeof(beacon_size);
*bss_info += sizeof(beacon_size);
}
@@ -1755,6 +1755,7 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
+ adapter->survey_idx = 0;
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
if (!priv->scan_request) {
@@ -1976,10 +1977,53 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
return 0;
}
+static void
+mwifiex_update_chan_statistics(struct mwifiex_private *priv,
+ struct mwifiex_ietypes_chanstats *tlv_stat)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 i, num_chan;
+ struct mwifiex_fw_chan_stats *fw_chan_stats;
+ struct mwifiex_chan_stats chan_stats;
+
+ fw_chan_stats = (void *)((u8 *)tlv_stat +
+ sizeof(struct mwifiex_ie_types_header));
+ num_chan = le16_to_cpu(tlv_stat->header.len) /
+ sizeof(struct mwifiex_chan_stats);
+
+ for (i = 0 ; i < num_chan; i++) {
+ chan_stats.chan_num = fw_chan_stats->chan_num;
+ chan_stats.bandcfg = fw_chan_stats->bandcfg;
+ chan_stats.flags = fw_chan_stats->flags;
+ chan_stats.noise = fw_chan_stats->noise;
+ chan_stats.total_bss = le16_to_cpu(fw_chan_stats->total_bss);
+ chan_stats.cca_scan_dur =
+ le16_to_cpu(fw_chan_stats->cca_scan_dur);
+ chan_stats.cca_busy_dur =
+ le16_to_cpu(fw_chan_stats->cca_busy_dur);
+ dev_dbg(adapter->dev,
+ "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
+ chan_stats.chan_num,
+ chan_stats.noise,
+ chan_stats.total_bss,
+ chan_stats.cca_scan_dur,
+ chan_stats.cca_busy_dur);
+ memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats,
+ sizeof(struct mwifiex_chan_stats));
+ fw_chan_stats++;
+ }
+}
+
/* This function handles the command response of extended scan */
-int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
{
struct mwifiex_adapter *adapter = priv->adapter;
+ struct host_cmd_ds_802_11_scan_ext *ext_scan_resp;
+ struct mwifiex_ie_types_header *tlv;
+ struct mwifiex_ietypes_chanstats *tlv_stat;
+ u16 buf_left, type, len;
+
struct host_cmd_ds_command *cmd_ptr;
struct cmd_ctrl_node *cmd_node;
unsigned long cmd_flags, scan_flags;
@@ -1987,6 +2031,36 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+ ext_scan_resp = &resp->params.ext_scan;
+
+ tlv = (void *)ext_scan_resp->tlv_buffer;
+ buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN
+ - 1);
+
+ while (buf_left >= sizeof(struct mwifiex_ie_types_header)) {
+ type = le16_to_cpu(tlv->type);
+ len = le16_to_cpu(tlv->len);
+
+ if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) {
+ dev_err(adapter->dev,
+ "error processing scan response TLVs");
+ break;
+ }
+
+ switch (type) {
+ case TLV_TYPE_CHANNEL_STATS:
+ tlv_stat = (void *)tlv;
+ mwifiex_update_chan_statistics(priv, tlv_stat);
+ break;
+ default:
+ break;
+ }
+
+ buf_left -= len + sizeof(struct mwifiex_ie_types_header);
+ tlv = (void *)((u8 *)tlv + len +
+ sizeof(struct mwifiex_ie_types_header));
+ }
+
spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_flags);
spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_flags);
if (list_empty(&adapter->scan_pending_q)) {
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index b25766b43b9f..933dae137850 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -106,6 +106,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
card->supports_fw_dump = data->supports_fw_dump;
+ card->auto_tdls = data->auto_tdls;
}
sdio_claim_host(func);
@@ -1880,6 +1881,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
return -1;
}
+ adapter->auto_tdls = card->auto_tdls;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 20cd9adc98d3..54c07156dd78 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -246,6 +246,7 @@ struct sdio_mmc_card {
u8 curr_wr_port;
u8 *mp_regs;
+ u8 auto_tdls;
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
@@ -262,6 +263,7 @@ struct mwifiex_sdio_device {
u16 tx_buf_size;
u32 mp_tx_agg_buf_size;
u32 mp_rx_agg_buf_size;
+ u8 auto_tdls;
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -387,6 +389,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
+ .auto_tdls = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -400,6 +403,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
+ .auto_tdls = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -413,6 +417,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
+ .auto_tdls = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -426,6 +431,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.supports_fw_dump = true,
+ .auto_tdls = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
@@ -439,6 +445,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.supports_fw_dump = false,
+ .auto_tdls = true,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 4aad44685f8d..b65e1014b0fc 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -983,7 +983,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
adapter->curr_cmd->wait_q_enabled = false;
break;
case HostCmd_CMD_802_11_SCAN_EXT:
- ret = mwifiex_ret_802_11_scan_ext(priv);
+ ret = mwifiex_ret_802_11_scan_ext(priv, resp);
adapter->curr_cmd->wait_q_enabled = false;
break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f1c240eca0cd..b8c171df6223 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -55,9 +55,13 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->scan_block = false;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
- ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) {
mwifiex_disable_all_tdls_links(priv);
+ if (priv->adapter->auto_tdls)
+ mwifiex_clean_auto_tdls(priv);
+ }
+
/* Free Tx and Rx packets, report disconnect to upper layer */
mwifiex_clean_txrx(priv);
@@ -163,9 +167,6 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
NL80211_TDLS_TEARDOWN,
le16_to_cpu(tdls_evt->u.reason_code),
GFP_KERNEL);
- ret = mwifiex_tdls_oper(priv, tdls_evt->peer_mac,
- MWIFIEX_TDLS_DISABLE_LINK);
- queue_work(adapter->workqueue, &adapter->main_work);
break;
default:
break;
@@ -503,6 +504,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
break;
+ case EVENT_TX_STATUS_REPORT:
+ dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+ mwifiex_parse_tx_status_event(priv, adapter->event_body);
+ break;
+
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 92f3eb839866..1626868a4b5c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -1026,12 +1026,12 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
int max_len)
{
union {
- u32 l;
+ __le32 l;
u8 c[4];
} ver;
char fw_ver[32];
- ver.l = adapter->fw_release_number;
+ ver.l = cpu_to_le32(adapter->fw_release_number);
sprintf(fw_ver, "%u.%u.%u.p%u", ver.c[2], ver.c[1], ver.c[0], ver.c[3]);
snprintf(version, max_len, driver_version, fw_ver);
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 9ceb1dbe34c5..c2ad3b63ae70 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -232,6 +232,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
if (sta_ptr)
sta_ptr->rx_seq[local_rx_pd->priority] =
le16_to_cpu(local_rx_pd->seq_num);
+ mwifiex_auto_tdls_update_peer_signal(priv, ta,
+ local_rx_pd->snr,
+ local_rx_pd->nf);
}
} else {
if (rx_pkt_type != PKT_TYPE_BAR)
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index dab7b33c54be..b896d7375b52 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -77,6 +77,12 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
local_tx_pd->pkt_delay_2ms =
mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS ||
+ tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) {
+ local_tx_pd->tx_token_id = tx_info->ack_frame_id;
+ local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS;
+ }
+
if (local_tx_pd->priority <
ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
/*
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index e2949077f5b5..22884b429be7 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -24,6 +24,7 @@
#define TDLS_REQ_FIX_LEN 6
#define TDLS_RESP_FIX_LEN 8
#define TDLS_CONFIRM_FIX_LEN 6
+#define MWIFIEX_TDLS_WMM_INFO_SIZE 7
static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
const u8 *mac, u8 status)
@@ -367,6 +368,55 @@ static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
*pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
}
+static void
+mwifiex_tdls_add_wmm_param_ie(struct mwifiex_private *priv, struct sk_buff *skb)
+{
+ struct ieee80211_wmm_param_ie *wmm;
+ u8 ac_vi[] = {0x42, 0x43, 0x5e, 0x00};
+ u8 ac_vo[] = {0x62, 0x32, 0x2f, 0x00};
+ u8 ac_be[] = {0x03, 0xa4, 0x00, 0x00};
+ u8 ac_bk[] = {0x27, 0xa4, 0x00, 0x00};
+
+ wmm = (void *)skb_put(skb, sizeof(*wmm));
+ memset(wmm, 0, sizeof(*wmm));
+
+ wmm->element_id = WLAN_EID_VENDOR_SPECIFIC;
+ wmm->len = sizeof(*wmm) - 2;
+ wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */
+ wmm->oui[1] = 0x50;
+ wmm->oui[2] = 0xf2;
+ wmm->oui_type = 2; /* WME */
+ wmm->oui_subtype = 1; /* WME param */
+ wmm->version = 1; /* WME ver */
+ wmm->qos_info = 0; /* U-APSD not in use */
+
+ /* use default WMM AC parameters for TDLS link*/
+ memcpy(&wmm->ac[0], ac_be, sizeof(ac_be));
+ memcpy(&wmm->ac[1], ac_bk, sizeof(ac_bk));
+ memcpy(&wmm->ac[2], ac_vi, sizeof(ac_vi));
+ memcpy(&wmm->ac[3], ac_vo, sizeof(ac_vo));
+}
+
+static void
+mwifiex_add_wmm_info_ie(struct mwifiex_private *priv, struct sk_buff *skb,
+ u8 qosinfo)
+{
+ u8 *buf;
+
+ buf = (void *)skb_put(skb, MWIFIEX_TDLS_WMM_INFO_SIZE +
+ sizeof(struct ieee_types_header));
+
+ *buf++ = WLAN_EID_VENDOR_SPECIFIC;
+ *buf++ = 7; /* len */
+ *buf++ = 0x00; /* Microsoft OUI 00:50:F2 */
+ *buf++ = 0x50;
+ *buf++ = 0xf2;
+ *buf++ = 2; /* WME */
+ *buf++ = 0; /* WME info */
+ *buf++ = 1; /* WME ver */
+ *buf++ = qosinfo; /* U-APSD no in use */
+}
+
static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
const u8 *peer, u8 action_code,
u8 dialog_token,
@@ -421,6 +471,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
mwifiex_tdls_add_ext_capab(priv, skb);
mwifiex_tdls_add_qos_capab(skb);
+ mwifiex_add_wmm_info_ie(priv, skb, 0);
break;
case WLAN_TDLS_SETUP_RESPONSE:
@@ -458,6 +509,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
mwifiex_tdls_add_ext_capab(priv, skb);
mwifiex_tdls_add_qos_capab(skb);
+ mwifiex_add_wmm_info_ie(priv, skb, 0);
break;
case WLAN_TDLS_SETUP_CONFIRM:
@@ -466,6 +518,8 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
skb_put(skb, sizeof(tf->u.setup_cfm));
tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
tf->u.setup_cfm.dialog_token = dialog_token;
+
+ mwifiex_tdls_add_wmm_param_ie(priv, skb);
if (priv->adapter->is_hw_11ac_capable) {
ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
if (ret) {
@@ -544,6 +598,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
sizeof(struct ieee_types_bss_co_2040) +
sizeof(struct ieee80211_ht_operation) +
sizeof(struct ieee80211_tdls_lnkie) +
+ sizeof(struct ieee80211_wmm_param_ie) +
extra_ies_len;
if (priv->adapter->is_hw_11ac_capable)
@@ -973,6 +1028,7 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
}
mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+ mwifiex_auto_tdls_update_peer_status(priv, peer, TDLS_NOT_SETUP);
memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
@@ -1017,6 +1073,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
+ mwifiex_auto_tdls_update_peer_status(priv, peer,
+ TDLS_SETUP_COMPLETE);
} else {
dev_dbg(priv->adapter->dev,
"tdls: enable link %pM failed\n", peer);
@@ -1030,6 +1088,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
mwifiex_del_sta_entry(priv, peer);
}
mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+ mwifiex_auto_tdls_update_peer_status(priv, peer,
+ TDLS_NOT_SETUP);
return -1;
}
@@ -1097,3 +1157,231 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
mwifiex_del_all_sta_list(priv);
}
+
+int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
+{
+ struct mwifiex_auto_tdls_peer *peer;
+ unsigned long flags;
+ u8 mac[ETH_ALEN];
+
+ ether_addr_copy(mac, skb->data);
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(peer, &priv->auto_tdls_list, list) {
+ if (!memcmp(mac, peer->mac_addr, ETH_ALEN)) {
+ if (peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH &&
+ peer->tdls_status == TDLS_NOT_SETUP &&
+ (peer->failure_count <
+ MWIFIEX_TDLS_MAX_FAIL_COUNT)) {
+ peer->tdls_status = TDLS_SETUP_INPROGRESS;
+ dev_dbg(priv->adapter->dev,
+ "setup TDLS link, peer=%pM rssi=%d\n",
+ peer->mac_addr, peer->rssi);
+
+ cfg80211_tdls_oper_request(priv->netdev,
+ peer->mac_addr,
+ NL80211_TDLS_SETUP,
+ 0, GFP_ATOMIC);
+ peer->do_setup = false;
+ priv->check_tdls_tx = false;
+ } else if (peer->failure_count <
+ MWIFIEX_TDLS_MAX_FAIL_COUNT &&
+ peer->do_discover) {
+ mwifiex_send_tdls_data_frame(priv,
+ peer->mac_addr,
+ WLAN_TDLS_DISCOVERY_REQUEST,
+ 1, 0, NULL, 0);
+ peer->do_discover = false;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+
+ return 0;
+}
+
+void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv)
+{
+ struct mwifiex_auto_tdls_peer *peer, *tmp_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry_safe(peer, tmp_node, &priv->auto_tdls_list, list) {
+ list_del(&peer->list);
+ kfree(peer);
+ }
+
+ INIT_LIST_HEAD(&priv->auto_tdls_list);
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ priv->check_tdls_tx = false;
+}
+
+void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac)
+{
+ struct mwifiex_auto_tdls_peer *tdls_peer;
+ unsigned long flags;
+
+ if (!priv->adapter->auto_tdls)
+ return;
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
+ if (!memcmp(tdls_peer->mac_addr, mac, ETH_ALEN)) {
+ tdls_peer->tdls_status = TDLS_SETUP_INPROGRESS;
+ tdls_peer->rssi_jiffies = jiffies;
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ return;
+ }
+ }
+
+ /* create new TDLS peer */
+ tdls_peer = kzalloc(sizeof(*tdls_peer), GFP_ATOMIC);
+ if (tdls_peer) {
+ ether_addr_copy(tdls_peer->mac_addr, mac);
+ tdls_peer->tdls_status = TDLS_SETUP_INPROGRESS;
+ tdls_peer->rssi_jiffies = jiffies;
+ INIT_LIST_HEAD(&tdls_peer->list);
+ list_add_tail(&tdls_peer->list, &priv->auto_tdls_list);
+ dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n",
+ mac);
+ }
+
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+}
+
+void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
+ const u8 *mac, u8 link_status)
+{
+ struct mwifiex_auto_tdls_peer *peer;
+ unsigned long flags;
+
+ if (!priv->adapter->auto_tdls)
+ return;
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(peer, &priv->auto_tdls_list, list) {
+ if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
+ if ((link_status == TDLS_NOT_SETUP) &&
+ (peer->tdls_status == TDLS_SETUP_INPROGRESS))
+ peer->failure_count++;
+ else if (link_status == TDLS_SETUP_COMPLETE)
+ peer->failure_count = 0;
+
+ peer->tdls_status = link_status;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+}
+
+void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
+ u8 *mac, s8 snr, s8 nflr)
+{
+ struct mwifiex_auto_tdls_peer *peer;
+ unsigned long flags;
+
+ if (!priv->adapter->auto_tdls)
+ return;
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(peer, &priv->auto_tdls_list, list) {
+ if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
+ peer->rssi = nflr - snr;
+ peer->rssi_jiffies = jiffies;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+}
+
+void mwifiex_check_auto_tdls(unsigned long context)
+{
+ struct mwifiex_private *priv = (struct mwifiex_private *)context;
+ struct mwifiex_auto_tdls_peer *tdls_peer;
+ unsigned long flags;
+ u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+ if (WARN_ON_ONCE(!priv || !priv->adapter)) {
+ pr_err("mwifiex: %s: adapter or private structure is NULL\n",
+ __func__);
+ return;
+ }
+
+ if (unlikely(!priv->adapter->auto_tdls))
+ return;
+
+ if (!priv->auto_tdls_timer_active) {
+ dev_dbg(priv->adapter->dev,
+ "auto TDLS timer inactive; return");
+ return;
+ }
+
+ priv->check_tdls_tx = false;
+
+ if (list_empty(&priv->auto_tdls_list)) {
+ mod_timer(&priv->auto_tdls_timer,
+ jiffies +
+ msecs_to_jiffies(MWIFIEX_TIMER_10S));
+ return;
+ }
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
+ if ((jiffies - tdls_peer->rssi_jiffies) >
+ (MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) {
+ tdls_peer->rssi = 0;
+ tdls_peer->do_discover = true;
+ priv->check_tdls_tx = true;
+ }
+
+ if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) ||
+ !tdls_peer->rssi) &&
+ tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
+ tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
+ dev_dbg(priv->adapter->dev,
+ "teardown TDLS link,peer=%pM rssi=%d\n",
+ tdls_peer->mac_addr, -tdls_peer->rssi);
+ tdls_peer->do_discover = true;
+ priv->check_tdls_tx = true;
+ cfg80211_tdls_oper_request(priv->netdev,
+ tdls_peer->mac_addr,
+ NL80211_TDLS_TEARDOWN,
+ reason, GFP_ATOMIC);
+ } else if (tdls_peer->rssi &&
+ tdls_peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH &&
+ tdls_peer->tdls_status == TDLS_NOT_SETUP &&
+ tdls_peer->failure_count <
+ MWIFIEX_TDLS_MAX_FAIL_COUNT) {
+ priv->check_tdls_tx = true;
+ tdls_peer->do_setup = true;
+ dev_dbg(priv->adapter->dev,
+ "check TDLS with peer=%pM rssi=%d\n",
+ tdls_peer->mac_addr, -tdls_peer->rssi);
+ }
+ }
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+
+ mod_timer(&priv->auto_tdls_timer,
+ jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+}
+
+void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv)
+{
+ init_timer(&priv->auto_tdls_timer);
+ priv->auto_tdls_timer.function = mwifiex_check_auto_tdls;
+ priv->auto_tdls_timer.data = (unsigned long)priv;
+ priv->auto_tdls_timer_active = true;
+ mod_timer(&priv->auto_tdls_timer,
+ jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+}
+
+void mwifiex_clean_auto_tdls(struct mwifiex_private *priv)
+{
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->auto_tdls &&
+ priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
+ priv->auto_tdls_timer_active = false;
+ del_timer(&priv->auto_tdls_timer);
+ mwifiex_flush_auto_tdls_list(priv);
+ }
+}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 96a2126cc44b..6ae133333363 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -64,10 +64,6 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
else
ret = mwifiex_process_sta_rx_packet(priv, skb);
- /* Decrement RX pending counter for each packet */
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter);
-
return ret;
}
EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -207,3 +203,34 @@ done:
}
EXPORT_SYMBOL_GPL(mwifiex_write_data_complete);
+void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
+ void *event_body)
+{
+ struct tx_status_event *tx_status = (void *)priv->adapter->event_body;
+ struct sk_buff *ack_skb;
+ unsigned long flags;
+ struct mwifiex_txinfo *tx_info;
+
+ if (!tx_status->tx_token_id)
+ return;
+
+ spin_lock_irqsave(&priv->ack_status_lock, flags);
+ ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id);
+ if (ack_skb)
+ idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
+ spin_unlock_irqrestore(&priv->ack_status_lock, flags);
+
+ if (ack_skb) {
+ tx_info = MWIFIEX_SKB_TXCB(ack_skb);
+
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS) {
+ /* consumes ack_skb */
+ skb_complete_wifi_ack(ack_skb, !tx_status->status);
+ } else {
+ cfg80211_mgmt_tx_status(priv->wdev, tx_info->cookie,
+ ack_skb->data, ack_skb->len,
+ !tx_status->status, GFP_ATOMIC);
+ dev_kfree_skb_any(ack_skb);
+ }
+ }
+}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 300bab438011..0f347fdefa0a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,7 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (ht_ie) {
- memcpy(&bss_cfg->ht_cap, ht_ie + 2,
+ memcpy(&bss_cfg->ht_cap, ht_ie,
sizeof(struct ieee80211_ht_cap));
cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
memset(&bss_cfg->ht_cap.mcs, 0,
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 7c2b97660a03..c54a537e31fb 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -110,6 +110,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
}
+ mwifiex_wmm_del_peer_ra_list(priv, deauth_mac);
mwifiex_del_sta_entry(priv, deauth_mac);
break;
case EVENT_UAP_BSS_IDLE:
@@ -172,6 +173,10 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
return mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
break;
+ case EVENT_TX_STATUS_REPORT:
+ dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+ mwifiex_parse_tx_status_event(priv, adapter->event_body);
+ break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index ec7309d096ab..be3a203a529b 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -266,6 +266,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
struct rx_packet_hdr *rx_pkt_hdr;
u16 rx_pkt_type;
u8 ta[ETH_ALEN], pkt_type;
+ unsigned long flags;
struct mwifiex_sta_node *node;
uap_rx_pd = (struct uap_rxpd *)(skb->data);
@@ -294,10 +295,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
node = mwifiex_get_sta_entry(priv, ta);
if (node)
node->rx_seq[uap_rx_pd->priority] =
le16_to_cpu(uap_rx_pd->seq_num);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
if (!priv->ap_11n_enabled ||
@@ -370,10 +373,16 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
txpd->bss_num = priv->bss_num;
txpd->bss_type = priv->bss_type;
txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len));
-
txpd->priority = (u8)skb->priority;
+
txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS ||
+ tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) {
+ txpd->tx_token_id = tx_info->ack_frame_id;
+ txpd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS;
+ }
+
if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
/*
* Set the priority specific tx_control field, setting of 0 will
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 4371e12b36f3..1b56495ec872 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -27,6 +27,11 @@ static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
static struct usb_device_id mwifiex_usb_table[] = {
+ /* 8766 */
+ {USB_DEVICE(USB8XXX_VID, USB8766_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8766_PID_2,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0xff)},
/* 8797 */
{USB_DEVICE(USB8XXX_VID, USB8797_PID_1)},
{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2,
@@ -125,8 +130,10 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
dev_err(dev, "DATA: skb->len too large\n");
return -1;
}
- skb_queue_tail(&adapter->usb_rx_data_q, skb);
+
+ skb_queue_tail(&adapter->rx_data_q, skb);
adapter->data_received = true;
+ atomic_inc(&adapter->rx_pending);
break;
default:
dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
@@ -176,7 +183,6 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
else
skb_put(skb, recv_length - skb->len);
- atomic_inc(&adapter->rx_pending);
status = mwifiex_usb_recv(adapter, skb, context->ep);
dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
@@ -191,7 +197,6 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
if (card->rx_cmd_ep == context->ep)
return;
} else {
- atomic_dec(&adapter->rx_pending);
if (status == -1)
dev_err(adapter->dev,
"received data processing failed!\n");
@@ -222,7 +227,13 @@ setup_for_next:
else
size = MWIFIEX_RX_DATA_BUF_SIZE;
- mwifiex_usb_submit_rx_urb(context, size);
+ if (card->rx_cmd_ep == context->ep) {
+ mwifiex_usb_submit_rx_urb(context, size);
+ } else {
+ context->skb = NULL;
+ if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING)
+ mwifiex_usb_submit_rx_urb(context, size);
+ }
return;
}
@@ -348,10 +359,12 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
/* PID_1 is used for firmware downloading only */
switch (id_product) {
+ case USB8766_PID_1:
case USB8797_PID_1:
case USB8897_PID_1:
card->usb_boot_state = USB8XXX_FW_DNLD;
break;
+ case USB8766_PID_2:
case USB8797_PID_2:
case USB8897_PID_2:
card->usb_boot_state = USB8XXX_FW_READY;
@@ -780,6 +793,11 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
break;
+ case USB8766_PID_1:
+ case USB8766_PID_2:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
+ strcpy(adapter->fw_name, USB8766_DEFAULT_FW_NAME);
+ break;
case USB8797_PID_1:
case USB8797_PID_2:
default:
@@ -962,19 +980,11 @@ static void mwifiex_submit_rx_urb(struct mwifiex_adapter *adapter, u8 ep)
static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
- atomic_dec(&adapter->rx_pending);
mwifiex_submit_rx_urb(adapter, MWIFIEX_USB_EP_CMD_EVENT);
return 0;
}
-static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter)
-{
- atomic_dec(&adapter->rx_pending);
-
- return 0;
-}
-
/* This function wakes up the card. */
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
@@ -986,6 +996,20 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
return 0;
}
+static void mwifiex_usb_submit_rem_rx_urbs(struct mwifiex_adapter *adapter)
+{
+ struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ int i;
+ struct urb_context *ctx;
+
+ for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
+ if (card->rx_data_list[i].skb)
+ continue;
+ ctx = &card->rx_data_list[i];
+ mwifiex_usb_submit_rx_urb(ctx, MWIFIEX_RX_DATA_BUF_SIZE);
+ }
+}
+
static struct mwifiex_if_ops usb_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
@@ -996,8 +1020,8 @@ static struct mwifiex_if_ops usb_ops = {
.dnld_fw = mwifiex_usb_dnld_fw,
.cmdrsp_complete = mwifiex_usb_cmd_event_complete,
.event_complete = mwifiex_usb_cmd_event_complete,
- .data_complete = mwifiex_usb_data_complete,
.host_to_card = mwifiex_usb_host_to_card,
+ .submit_rem_rx_urbs = mwifiex_usb_submit_rem_rx_urbs,
};
/* This function initializes the USB driver module.
@@ -1048,5 +1072,6 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
MODULE_VERSION(USB_VERSION);
MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index 4c41c2a193c5..a7cbba1355af 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -24,6 +24,8 @@
#define USB8XXX_VID 0x1286
+#define USB8766_PID_1 0x2041
+#define USB8766_PID_2 0x2042
#define USB8797_PID_1 0x2043
#define USB8797_PID_2 0x2044
#define USB8897_PID_1 0x2045
@@ -37,6 +39,7 @@
#define MWIFIEX_RX_DATA_URB 6
#define MWIFIEX_USB_TIMEOUT 100
+#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin"
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index ec79c49de097..b1768fbf98f2 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -141,6 +141,38 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
+ struct rxpd *rx_pd)
+{
+ u16 stype;
+ u8 category, action_code;
+ struct ieee80211_hdr *ieee_hdr = (void *)payload;
+
+ stype = (le16_to_cpu(ieee_hdr->frame_control) & IEEE80211_FCTL_STYPE);
+
+ switch (stype) {
+ case IEEE80211_STYPE_ACTION:
+ category = *(payload + sizeof(struct ieee80211_hdr));
+ action_code = *(payload + sizeof(struct ieee80211_hdr) + 1);
+ if (category == WLAN_CATEGORY_PUBLIC &&
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
+ dev_dbg(priv->adapter->dev,
+ "TDLS discovery response %pM nf=%d, snr=%d\n",
+ ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
+ mwifiex_auto_tdls_update_peer_signal(priv,
+ ieee_hdr->addr2,
+ rx_pd->snr,
+ rx_pd->nf);
+ }
+ break;
+ default:
+ dev_dbg(priv->adapter->dev,
+ "unknown mgmt frame subytpe %#x\n", stype);
+ }
+
+ return 0;
+}
/*
* This function processes the received management packet and send it
* to the kernel.
@@ -151,6 +183,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
{
struct rxpd *rx_pd;
u16 pkt_len;
+ struct ieee80211_hdr *ieee_hdr;
if (!skb)
return -1;
@@ -162,6 +195,11 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
+ ieee_hdr = (void *)skb->data;
+ if (ieee80211_is_mgmt(ieee_hdr->frame_control)) {
+ mwifiex_parse_mgmt_packet(priv, (u8 *)ieee_hdr,
+ pkt_len, rx_pd);
+ }
/* Remove address4 */
memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
skb->data + sizeof(struct ieee80211_hdr),
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 94c98a86ebbe..ffffd2c5a76e 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -147,9 +147,6 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
struct mwifiex_sta_node *node;
unsigned long flags;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
- node = mwifiex_get_sta_entry(priv, ra);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -170,10 +167,13 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
}
} else {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ node = mwifiex_get_sta_entry(priv, ra);
ra_list->is_11n_enabled =
mwifiex_is_sta_11n_enabled(priv, node);
if (ra_list->is_11n_enabled)
ra_list->max_amsdu = node->max_amsdu;
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
@@ -523,6 +523,13 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
}
}
+static int mwifiex_free_ack_frame(int id, void *p, void *data)
+{
+ pr_warn("Have pending ack frames!\n");
+ kfree_skb(p);
+ return 0;
+}
+
/*
* This function cleans up the Tx and Rx queues.
*
@@ -558,6 +565,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+
+ idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
+ idr_destroy(&priv->ack_status_frames);
}
/*
@@ -601,6 +611,32 @@ mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
}
/*
+ * This function deletes RA list nodes for given mac for all TIDs.
+ * Function also decrements TX pending count accordingly.
+ */
+void
+mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; ++i) {
+ ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
+
+ if (!ra_list)
+ continue;
+ mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
+ atomic_sub(ra_list->total_pkt_count, &priv->wmm.tx_pkts_queued);
+ list_del(&ra_list->list);
+ kfree(ra_list);
+ }
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+/*
* This function checks if a particular RA list node exists in a given TID
* table index.
*/
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index ef1104476bd8..b8d1e04aa9b9 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5548,7 +5548,9 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return rc;
}
-static void mwl8k_sw_scan_start(struct ieee80211_hw *hw)
+static void mwl8k_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct mwl8k_priv *priv = hw->priv;
u8 tmp;
@@ -5565,7 +5567,8 @@ static void mwl8k_sw_scan_start(struct ieee80211_hw *hw)
priv->sw_scan_start = true;
}
-static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw)
+static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
u8 tmp;
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
deleted file mode 100644
index aedfaf24f386..000000000000
--- a/drivers/net/wireless/p54/net2280.h
+++ /dev/null
@@ -1,451 +0,0 @@
-#ifndef NET2280_H
-#define NET2280_H
-/*
- * NetChip 2280 high/full speed USB device controller.
- * Unlike many such controllers, this one talks PCI.
- */
-
-/*
- * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
- * Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*-------------------------------------------------------------------------*/
-
-/* NET2280 MEMORY MAPPED REGISTERS
- *
- * The register layout came from the chip documentation, and the bit
- * number definitions were extracted from chip specification.
- *
- * Use the shift operator ('<<') to build bit masks, with readl/writel
- * to access the registers through PCI.
- */
-
-/* main registers, BAR0 + 0x0000 */
-struct net2280_regs {
- /* offset 0x0000 */
- __le32 devinit;
-#define LOCAL_CLOCK_FREQUENCY 8
-#define FORCE_PCI_RESET 7
-#define PCI_ID 6
-#define PCI_ENABLE 5
-#define FIFO_SOFT_RESET 4
-#define CFG_SOFT_RESET 3
-#define PCI_SOFT_RESET 2
-#define USB_SOFT_RESET 1
-#define M8051_RESET 0
- __le32 eectl;
-#define EEPROM_ADDRESS_WIDTH 23
-#define EEPROM_CHIP_SELECT_ACTIVE 22
-#define EEPROM_PRESENT 21
-#define EEPROM_VALID 20
-#define EEPROM_BUSY 19
-#define EEPROM_CHIP_SELECT_ENABLE 18
-#define EEPROM_BYTE_READ_START 17
-#define EEPROM_BYTE_WRITE_START 16
-#define EEPROM_READ_DATA 8
-#define EEPROM_WRITE_DATA 0
- __le32 eeclkfreq;
- u32 _unused0;
- /* offset 0x0010 */
-
- __le32 pciirqenb0; /* interrupt PCI master ... */
-#define SETUP_PACKET_INTERRUPT_ENABLE 7
-#define ENDPOINT_F_INTERRUPT_ENABLE 6
-#define ENDPOINT_E_INTERRUPT_ENABLE 5
-#define ENDPOINT_D_INTERRUPT_ENABLE 4
-#define ENDPOINT_C_INTERRUPT_ENABLE 3
-#define ENDPOINT_B_INTERRUPT_ENABLE 2
-#define ENDPOINT_A_INTERRUPT_ENABLE 1
-#define ENDPOINT_0_INTERRUPT_ENABLE 0
- __le32 pciirqenb1;
-#define PCI_INTERRUPT_ENABLE 31
-#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
-#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
-#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
-#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
-#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
-#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18
-#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
-#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
-#define GPIO_INTERRUPT_ENABLE 13
-#define DMA_D_INTERRUPT_ENABLE 12
-#define DMA_C_INTERRUPT_ENABLE 11
-#define DMA_B_INTERRUPT_ENABLE 10
-#define DMA_A_INTERRUPT_ENABLE 9
-#define EEPROM_DONE_INTERRUPT_ENABLE 8
-#define VBUS_INTERRUPT_ENABLE 7
-#define CONTROL_STATUS_INTERRUPT_ENABLE 6
-#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
-#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
-#define RESUME_INTERRUPT_ENABLE 1
-#define SOF_INTERRUPT_ENABLE 0
- __le32 cpu_irqenb0; /* ... or onboard 8051 */
-#define SETUP_PACKET_INTERRUPT_ENABLE 7
-#define ENDPOINT_F_INTERRUPT_ENABLE 6
-#define ENDPOINT_E_INTERRUPT_ENABLE 5
-#define ENDPOINT_D_INTERRUPT_ENABLE 4
-#define ENDPOINT_C_INTERRUPT_ENABLE 3
-#define ENDPOINT_B_INTERRUPT_ENABLE 2
-#define ENDPOINT_A_INTERRUPT_ENABLE 1
-#define ENDPOINT_0_INTERRUPT_ENABLE 0
- __le32 cpu_irqenb1;
-#define CPU_INTERRUPT_ENABLE 31
-#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
-#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
-#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
-#define PCI_INTA_INTERRUPT_ENABLE 24
-#define PCI_PME_INTERRUPT_ENABLE 23
-#define PCI_SERR_INTERRUPT_ENABLE 22
-#define PCI_PERR_INTERRUPT_ENABLE 21
-#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
-#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
-#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
-#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
-#define GPIO_INTERRUPT_ENABLE 13
-#define DMA_D_INTERRUPT_ENABLE 12
-#define DMA_C_INTERRUPT_ENABLE 11
-#define DMA_B_INTERRUPT_ENABLE 10
-#define DMA_A_INTERRUPT_ENABLE 9
-#define EEPROM_DONE_INTERRUPT_ENABLE 8
-#define VBUS_INTERRUPT_ENABLE 7
-#define CONTROL_STATUS_INTERRUPT_ENABLE 6
-#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
-#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
-#define RESUME_INTERRUPT_ENABLE 1
-#define SOF_INTERRUPT_ENABLE 0
-
- /* offset 0x0020 */
- u32 _unused1;
- __le32 usbirqenb1;
-#define USB_INTERRUPT_ENABLE 31
-#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
-#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
-#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
-#define PCI_INTA_INTERRUPT_ENABLE 24
-#define PCI_PME_INTERRUPT_ENABLE 23
-#define PCI_SERR_INTERRUPT_ENABLE 22
-#define PCI_PERR_INTERRUPT_ENABLE 21
-#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
-#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
-#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
-#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
-#define GPIO_INTERRUPT_ENABLE 13
-#define DMA_D_INTERRUPT_ENABLE 12
-#define DMA_C_INTERRUPT_ENABLE 11
-#define DMA_B_INTERRUPT_ENABLE 10
-#define DMA_A_INTERRUPT_ENABLE 9
-#define EEPROM_DONE_INTERRUPT_ENABLE 8
-#define VBUS_INTERRUPT_ENABLE 7
-#define CONTROL_STATUS_INTERRUPT_ENABLE 6
-#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
-#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
-#define RESUME_INTERRUPT_ENABLE 1
-#define SOF_INTERRUPT_ENABLE 0
- __le32 irqstat0;
-#define INTA_ASSERTED 12
-#define SETUP_PACKET_INTERRUPT 7
-#define ENDPOINT_F_INTERRUPT 6
-#define ENDPOINT_E_INTERRUPT 5
-#define ENDPOINT_D_INTERRUPT 4
-#define ENDPOINT_C_INTERRUPT 3
-#define ENDPOINT_B_INTERRUPT 2
-#define ENDPOINT_A_INTERRUPT 1
-#define ENDPOINT_0_INTERRUPT 0
- __le32 irqstat1;
-#define POWER_STATE_CHANGE_INTERRUPT 27
-#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
-#define PCI_PARITY_ERROR_INTERRUPT 25
-#define PCI_INTA_INTERRUPT 24
-#define PCI_PME_INTERRUPT 23
-#define PCI_SERR_INTERRUPT 22
-#define PCI_PERR_INTERRUPT 21
-#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20
-#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19
-#define PCI_RETRY_ABORT_INTERRUPT 17
-#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16
-#define GPIO_INTERRUPT 13
-#define DMA_D_INTERRUPT 12
-#define DMA_C_INTERRUPT 11
-#define DMA_B_INTERRUPT 10
-#define DMA_A_INTERRUPT 9
-#define EEPROM_DONE_INTERRUPT 8
-#define VBUS_INTERRUPT 7
-#define CONTROL_STATUS_INTERRUPT 6
-#define ROOT_PORT_RESET_INTERRUPT 4
-#define SUSPEND_REQUEST_INTERRUPT 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2
-#define RESUME_INTERRUPT 1
-#define SOF_INTERRUPT 0
- /* offset 0x0030 */
- __le32 idxaddr;
- __le32 idxdata;
- __le32 fifoctl;
-#define PCI_BASE2_RANGE 16
-#define IGNORE_FIFO_AVAILABILITY 3
-#define PCI_BASE2_SELECT 2
-#define FIFO_CONFIGURATION_SELECT 0
- u32 _unused2;
- /* offset 0x0040 */
- __le32 memaddr;
-#define START 28
-#define DIRECTION 27
-#define FIFO_DIAGNOSTIC_SELECT 24
-#define MEMORY_ADDRESS 0
- __le32 memdata0;
- __le32 memdata1;
- u32 _unused3;
- /* offset 0x0050 */
- __le32 gpioctl;
-#define GPIO3_LED_SELECT 12
-#define GPIO3_INTERRUPT_ENABLE 11
-#define GPIO2_INTERRUPT_ENABLE 10
-#define GPIO1_INTERRUPT_ENABLE 9
-#define GPIO0_INTERRUPT_ENABLE 8
-#define GPIO3_OUTPUT_ENABLE 7
-#define GPIO2_OUTPUT_ENABLE 6
-#define GPIO1_OUTPUT_ENABLE 5
-#define GPIO0_OUTPUT_ENABLE 4
-#define GPIO3_DATA 3
-#define GPIO2_DATA 2
-#define GPIO1_DATA 1
-#define GPIO0_DATA 0
- __le32 gpiostat;
-#define GPIO3_INTERRUPT 3
-#define GPIO2_INTERRUPT 2
-#define GPIO1_INTERRUPT 1
-#define GPIO0_INTERRUPT 0
-} __packed;
-
-/* usb control, BAR0 + 0x0080 */
-struct net2280_usb_regs {
- /* offset 0x0080 */
- __le32 stdrsp;
-#define STALL_UNSUPPORTED_REQUESTS 31
-#define SET_TEST_MODE 16
-#define GET_OTHER_SPEED_CONFIGURATION 15
-#define GET_DEVICE_QUALIFIER 14
-#define SET_ADDRESS 13
-#define ENDPOINT_SET_CLEAR_HALT 12
-#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11
-#define GET_STRING_DESCRIPTOR_2 10
-#define GET_STRING_DESCRIPTOR_1 9
-#define GET_STRING_DESCRIPTOR_0 8
-#define GET_SET_INTERFACE 6
-#define GET_SET_CONFIGURATION 5
-#define GET_CONFIGURATION_DESCRIPTOR 4
-#define GET_DEVICE_DESCRIPTOR 3
-#define GET_ENDPOINT_STATUS 2
-#define GET_INTERFACE_STATUS 1
-#define GET_DEVICE_STATUS 0
- __le32 prodvendid;
-#define PRODUCT_ID 16
-#define VENDOR_ID 0
- __le32 relnum;
- __le32 usbctl;
-#define SERIAL_NUMBER_INDEX 16
-#define PRODUCT_ID_STRING_ENABLE 13
-#define VENDOR_ID_STRING_ENABLE 12
-#define USB_ROOT_PORT_WAKEUP_ENABLE 11
-#define VBUS_PIN 10
-#define TIMED_DISCONNECT 9
-#define SUSPEND_IMMEDIATELY 7
-#define SELF_POWERED_USB_DEVICE 6
-#define REMOTE_WAKEUP_SUPPORT 5
-#define PME_POLARITY 4
-#define USB_DETECT_ENABLE 3
-#define PME_WAKEUP_ENABLE 2
-#define DEVICE_REMOTE_WAKEUP_ENABLE 1
-#define SELF_POWERED_STATUS 0
- /* offset 0x0090 */
- __le32 usbstat;
-#define HIGH_SPEED 7
-#define FULL_SPEED 6
-#define GENERATE_RESUME 5
-#define GENERATE_DEVICE_REMOTE_WAKEUP 4
- __le32 xcvrdiag;
-#define FORCE_HIGH_SPEED_MODE 31
-#define FORCE_FULL_SPEED_MODE 30
-#define USB_TEST_MODE 24
-#define LINE_STATE 16
-#define TRANSCEIVER_OPERATION_MODE 2
-#define TRANSCEIVER_SELECT 1
-#define TERMINATION_SELECT 0
- __le32 setup0123;
- __le32 setup4567;
- /* offset 0x0090 */
- u32 _unused0;
- __le32 ouraddr;
-#define FORCE_IMMEDIATE 7
-#define OUR_USB_ADDRESS 0
- __le32 ourconfig;
-} __packed;
-
-/* pci control, BAR0 + 0x0100 */
-struct net2280_pci_regs {
- /* offset 0x0100 */
- __le32 pcimstctl;
-#define PCI_ARBITER_PARK_SELECT 13
-#define PCI_MULTI LEVEL_ARBITER 12
-#define PCI_RETRY_ABORT_ENABLE 11
-#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10
-#define DMA_READ_MULTIPLE_ENABLE 9
-#define DMA_READ_LINE_ENABLE 8
-#define PCI_MASTER_COMMAND_SELECT 6
-#define MEM_READ_OR_WRITE 0
-#define IO_READ_OR_WRITE 1
-#define CFG_READ_OR_WRITE 2
-#define PCI_MASTER_START 5
-#define PCI_MASTER_READ_WRITE 4
-#define PCI_MASTER_WRITE 0
-#define PCI_MASTER_READ 1
-#define PCI_MASTER_BYTE_WRITE_ENABLES 0
- __le32 pcimstaddr;
- __le32 pcimstdata;
- __le32 pcimststat;
-#define PCI_ARBITER_CLEAR 2
-#define PCI_EXTERNAL_ARBITER 1
-#define PCI_HOST_MODE 0
-} __packed;
-
-/* dma control, BAR0 + 0x0180 ... array of four structs like this,
- * for channels 0..3. see also struct net2280_dma: descriptor
- * that can be loaded into some of these registers.
- */
-struct net2280_dma_regs { /* [11.7] */
- /* offset 0x0180, 0x01a0, 0x01c0, 0x01e0, */
- __le32 dmactl;
-#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25
-#define DMA_CLEAR_COUNT_ENABLE 21
-#define DESCRIPTOR_POLLING_RATE 19
-#define POLL_CONTINUOUS 0
-#define POLL_1_USEC 1
-#define POLL_100_USEC 2
-#define POLL_1_MSEC 3
-#define DMA_VALID_BIT_POLLING_ENABLE 18
-#define DMA_VALID_BIT_ENABLE 17
-#define DMA_SCATTER_GATHER_ENABLE 16
-#define DMA_OUT_AUTO_START_ENABLE 4
-#define DMA_PREEMPT_ENABLE 3
-#define DMA_FIFO_VALIDATE 2
-#define DMA_ENABLE 1
-#define DMA_ADDRESS_HOLD 0
- __le32 dmastat;
-#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25
-#define DMA_TRANSACTION_DONE_INTERRUPT 24
-#define DMA_ABORT 1
-#define DMA_START 0
- u32 _unused0[2];
- /* offset 0x0190, 0x01b0, 0x01d0, 0x01f0, */
- __le32 dmacount;
-#define VALID_BIT 31
-#define DMA_DIRECTION 30
-#define DMA_DONE_INTERRUPT_ENABLE 29
-#define END_OF_CHAIN 28
-#define DMA_BYTE_COUNT_MASK ((1<<24)-1)
-#define DMA_BYTE_COUNT 0
- __le32 dmaaddr;
- __le32 dmadesc;
- u32 _unused1;
-} __packed;
-
-/* dedicated endpoint registers, BAR0 + 0x0200 */
-
-struct net2280_dep_regs { /* [11.8] */
- /* offset 0x0200, 0x0210, 0x220, 0x230, 0x240 */
- __le32 dep_cfg;
- /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */
- __le32 dep_rsp;
- u32 _unused[2];
-} __packed;
-
-/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
- * like this, for ep0 then the configurable endpoints A..F
- * ep0 reserved for control; E and F have only 64 bytes of fifo
- */
-struct net2280_ep_regs { /* [11.9] */
- /* offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 */
- __le32 ep_cfg;
-#define ENDPOINT_BYTE_COUNT 16
-#define ENDPOINT_ENABLE 10
-#define ENDPOINT_TYPE 8
-#define ENDPOINT_DIRECTION 7
-#define ENDPOINT_NUMBER 0
- __le32 ep_rsp;
-#define SET_NAK_OUT_PACKETS 15
-#define SET_EP_HIDE_STATUS_PHASE 14
-#define SET_EP_FORCE_CRC_ERROR 13
-#define SET_INTERRUPT_MODE 12
-#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11
-#define SET_NAK_OUT_PACKETS_MODE 10
-#define SET_ENDPOINT_TOGGLE 9
-#define SET_ENDPOINT_HALT 8
-#define CLEAR_NAK_OUT_PACKETS 7
-#define CLEAR_EP_HIDE_STATUS_PHASE 6
-#define CLEAR_EP_FORCE_CRC_ERROR 5
-#define CLEAR_INTERRUPT_MODE 4
-#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3
-#define CLEAR_NAK_OUT_PACKETS_MODE 2
-#define CLEAR_ENDPOINT_TOGGLE 1
-#define CLEAR_ENDPOINT_HALT 0
- __le32 ep_irqenb;
-#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6
-#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5
-#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
-#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
-#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1
-#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
- __le32 ep_stat;
-#define FIFO_VALID_COUNT 24
-#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22
-#define TIMEOUT 21
-#define USB_STALL_SENT 20
-#define USB_IN_NAK_SENT 19
-#define USB_IN_ACK_RCVD 18
-#define USB_OUT_PING_NAK_SENT 17
-#define USB_OUT_ACK_SENT 16
-#define FIFO_OVERFLOW 13
-#define FIFO_UNDERFLOW 12
-#define FIFO_FULL 11
-#define FIFO_EMPTY 10
-#define FIFO_FLUSH 9
-#define SHORT_PACKET_OUT_DONE_INTERRUPT 6
-#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5
-#define NAK_OUT_PACKETS 4
-#define DATA_PACKET_RECEIVED_INTERRUPT 3
-#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
-#define DATA_OUT_PING_TOKEN_INTERRUPT 1
-#define DATA_IN_TOKEN_INTERRUPT 0
- /* offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 */
- __le32 ep_avail;
- __le32 ep_data;
- u32 _unused0[2];
-} __packed;
-
-struct net2280_reg_write {
- __le16 port;
- __le32 addr;
- __le32 val;
-} __packed;
-
-struct net2280_reg_read {
- __le16 port;
- __le32 addr;
-} __packed;
-#endif /* NET2280_H */
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index d273be7272b9..a5f5f0fea3bd 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -16,7 +16,7 @@
/* for isl3886 register definitions used on ver 1 devices */
#include "p54pci.h"
-#include "net2280.h"
+#include <linux/usb/net2280.h>
/* pci */
#define NET2280_BASE 0x10000000
@@ -93,6 +93,17 @@ enum net2280_op_type {
NET2280_DEV_CFG_U16 = 0x0883
};
+struct net2280_reg_write {
+ __le16 port;
+ __le32 addr;
+ __le32 val;
+} __packed;
+
+struct net2280_reg_read {
+ __le16 port;
+ __le32 addr;
+} __packed;
+
#define P54U_FW_BLOCK 2048
#define X2_SIGNATURE "x2 "
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index d849d590de25..05c64597838d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -47,7 +47,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
* BBP and RF register require indirect register access,
* and use the CSR registers BBPCSR and RFCSR to achieve this.
* These indirect registers work with busy bits,
- * and we will try maximal REGISTER_BUSY_COUNT times to access
+ * and we will try maximal REGISTER_USB_BUSY_COUNT times to access
* the register while taking a REGISTER_BUSY_DELAY us delay
* between each attampt. When the busy bit is still set at that time,
* the access attempt is considered to have failed,
@@ -62,7 +62,7 @@ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
__le16 reg;
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
- &reg, sizeof(reg), REGISTER_TIMEOUT);
+ &reg, sizeof(reg));
*value = le16_to_cpu(reg);
}
@@ -83,8 +83,7 @@ static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev,
{
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
- value, length,
- REGISTER_TIMEOUT16(length));
+ value, length);
}
static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
@@ -94,7 +93,7 @@ static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
__le16 reg = cpu_to_le16(value);
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
USB_VENDOR_REQUEST_OUT, offset,
- &reg, sizeof(reg), REGISTER_TIMEOUT);
+ &reg, sizeof(reg));
}
static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
@@ -113,8 +112,7 @@ static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
{
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
USB_VENDOR_REQUEST_OUT, offset,
- value, length,
- REGISTER_TIMEOUT16(length));
+ value, length);
}
static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
@@ -124,7 +122,7 @@ static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt2500usb_register_read_lock(rt2x00dev, offset, reg);
if (!rt2x00_get_field16(*reg, field))
return 1;
@@ -906,7 +904,7 @@ static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
unsigned int i;
u8 value;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt2500usb_bbp_read(rt2x00dev, 0, &value);
if ((value != 0xff) && (value != 0x00))
return 0;
@@ -1025,7 +1023,7 @@ static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev,
* We must wait until the register indicates that the
* device has entered the correct state.
*/
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt2500usb_register_read(rt2x00dev, MAC_CSR17, &reg2);
bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE);
rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9f57a2db791c..81ee481487cf 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -4119,7 +4119,20 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
* expected. We adjust it, based on TSSI reference and boundaries values
* provided in EEPROM.
*/
- delta += rt2800_get_gain_calibration_delta(rt2x00dev);
+ switch (rt2x00dev->chip.rt) {
+ case RT2860:
+ case RT2872:
+ case RT2883:
+ case RT3070:
+ case RT3071:
+ case RT3090:
+ case RT3572:
+ delta += rt2800_get_gain_calibration_delta(rt2x00dev);
+ break;
+ default:
+ /* TODO: temperature compensation code for other chips. */
+ break;
+ }
/*
* Decrease power according to user settings, on devices with unknown
@@ -4136,25 +4149,19 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
* TODO: we do not use +6 dBm option to do not increase power beyond
* regulatory limit, however this could be utilized for devices with
* CAPABILITY_POWER_LIMIT.
- *
- * TODO: add different temperature compensation code for RT3290 & RT5390
- * to allow to use BBP_R1 for those chips.
- */
- if (!rt2x00_rt(rt2x00dev, RT3290) &&
- !rt2x00_rt(rt2x00dev, RT5390)) {
- rt2800_bbp_read(rt2x00dev, 1, &r1);
- if (delta <= -12) {
- power_ctrl = 2;
- delta += 12;
- } else if (delta <= -6) {
- power_ctrl = 1;
- delta += 6;
- } else {
- power_ctrl = 0;
- }
- rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
- rt2800_bbp_write(rt2x00dev, 1, r1);
+ */
+ if (delta <= -12) {
+ power_ctrl = 2;
+ delta += 12;
+ } else if (delta <= -6) {
+ power_ctrl = 1;
+ delta += 6;
+ } else {
+ power_ctrl = 0;
}
+ rt2800_bbp_read(rt2x00dev, 1, &r1);
+ rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
+ rt2800_bbp_write(rt2x00dev, 1, r1);
offset = TX_PWR_CFG_0;
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
index f6d1bf5be006..aaa7aa4cad9d 100644
--- a/drivers/net/wireless/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -244,7 +244,6 @@ static int rt2800soc_probe(struct platform_device *pdev)
static struct platform_driver rt2800soc_driver = {
.driver = {
.name = "rt2800_wmac",
- .owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
},
.probe = rt2800soc_probe,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index d13f25cd70d5..9bb398bed9bb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1019,9 +1019,12 @@ struct rt2x00_bar_list_entry {
* Register defines.
* Some registers require multiple attempts before success,
* in those cases REGISTER_BUSY_COUNT attempts should be
- * taken with a REGISTER_BUSY_DELAY interval.
+ * taken with a REGISTER_BUSY_DELAY interval. Due to USB
+ * bus delays, we do not have to loop so many times to wait
+ * for valid register value on that bus.
*/
#define REGISTER_BUSY_COUNT 100
+#define REGISTER_USB_BUSY_COUNT 20
#define REGISTER_BUSY_DELAY 100
/*
@@ -1437,8 +1440,11 @@ int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw);
-void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw);
+void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr);
+void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
int rt2x00mac_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index ad6e5a8d1e10..cb40245a0695 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -568,7 +568,9 @@ int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
-void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw)
+void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
@@ -576,7 +578,8 @@ void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start);
-void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw)
+void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 86c43d112a4b..892270dd3e7b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -42,37 +42,27 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
{
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
int status;
- unsigned int i;
unsigned int pipe =
(requesttype == USB_VENDOR_REQUEST_IN) ?
usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+ unsigned long expire = jiffies + msecs_to_jiffies(timeout);
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return -ENODEV;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ do {
status = usb_control_msg(usb_dev, pipe, request, requesttype,
value, offset, buffer, buffer_length,
- timeout);
+ timeout / 2);
if (status >= 0)
return 0;
- /*
- * Check for errors
- * -ENODEV: Device has disappeared, no point continuing.
- * All other errors: Try again.
- */
- else if (status == -ENODEV) {
+ if (status == -ENODEV) {
+ /* Device has disappeared. */
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
break;
}
- }
-
- /* If the port is powered down, we get a -EPROTO error, and this
- * leads to a endless loop. So just say that the device is gone.
- */
- if (status == -EPROTO)
- clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+ } while (time_before(jiffies, expire));
rt2x00_err(rt2x00dev,
"Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
@@ -116,7 +106,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
const u8 request, const u8 requesttype,
const u16 offset, void *buffer,
- const u16 buffer_length, const int timeout)
+ const u16 buffer_length)
{
int status = 0;
unsigned char *tb;
@@ -131,7 +121,7 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
bsize = min_t(u16, CSR_CACHE_SIZE, len);
status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
requesttype, off, tb,
- bsize, timeout);
+ bsize, REGISTER_TIMEOUT);
tb += bsize;
len -= bsize;
@@ -154,7 +144,7 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return -ENODEV;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
if (!rt2x00_get_field32(*reg, field))
return 1;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 831b65f93feb..8f85fbd5f237 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -33,27 +33,14 @@
})
/*
- * For USB vendor requests we need to pass a timeout
- * time in ms, for this we use the REGISTER_TIMEOUT,
- * however when loading firmware a higher value is
- * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE.
+ * For USB vendor requests we need to pass a timeout time in ms, for this we
+ * use the REGISTER_TIMEOUT, however when loading firmware or read EEPROM
+ * a higher value is required. In that case we use the REGISTER_TIMEOUT_FIRMWARE
+ * and EEPROM_TIMEOUT.
*/
-#define REGISTER_TIMEOUT 500
+#define REGISTER_TIMEOUT 100
#define REGISTER_TIMEOUT_FIRMWARE 1000
-
-/**
- * REGISTER_TIMEOUT16 - Determine the timeout for 16bit register access
- * @__datalen: Data length
- */
-#define REGISTER_TIMEOUT16(__datalen) \
- ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u16)) )
-
-/**
- * REGISTER_TIMEOUT32 - Determine the timeout for 32bit register access
- * @__datalen: Data length
- */
-#define REGISTER_TIMEOUT32(__datalen) \
- ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u32)) )
+#define EEPROM_TIMEOUT 2000
/*
* Cache size
@@ -126,7 +113,6 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
* @offset: Register offset to perform action on
* @buffer: Buffer where information will be read/written to by device
* @buffer_length: Size of &buffer
- * @timeout: Operation timeout
*
* This function will use a previously with kmalloc allocated cache
* to communicate with the device. The contents of the buffer pointer
@@ -139,7 +125,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
const u8 request, const u8 requesttype,
const u16 offset, void *buffer,
- const u16 buffer_length, const int timeout);
+ const u16 buffer_length);
/**
* rt2x00usb_vendor_request_buff - Send register command to device (buffered)
@@ -197,8 +183,7 @@ static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
{
return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ,
USB_VENDOR_REQUEST_IN, 0, 0,
- eeprom, length,
- REGISTER_TIMEOUT16(length));
+ eeprom, length, EEPROM_TIMEOUT);
}
/**
@@ -217,7 +202,7 @@ static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
__le32 reg;
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
- &reg, sizeof(reg), REGISTER_TIMEOUT);
+ &reg, sizeof(reg));
*value = le32_to_cpu(reg);
}
@@ -257,8 +242,7 @@ static inline void rt2x00usb_register_multiread(struct rt2x00_dev *rt2x00dev,
{
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
- value, length,
- REGISTER_TIMEOUT32(length));
+ value, length);
}
/**
@@ -277,7 +261,7 @@ static inline void rt2x00usb_register_write(struct rt2x00_dev *rt2x00dev,
__le32 reg = cpu_to_le32(value);
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
USB_VENDOR_REQUEST_OUT, offset,
- &reg, sizeof(reg), REGISTER_TIMEOUT);
+ &reg, sizeof(reg));
}
/**
@@ -316,8 +300,7 @@ static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
{
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
USB_VENDOR_REQUEST_OUT, offset,
- (void *)value, length,
- REGISTER_TIMEOUT32(length));
+ (void *)value, length);
}
/**
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 95724ff9c726..a5458cf01fb2 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1295,7 +1295,7 @@ static int rt73usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
unsigned int i;
u8 value;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt73usb_bbp_read(rt2x00dev, 0, &value);
if ((value != 0xff) && (value != 0x00))
return 0;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 07dae0d44abc..5fc6f52641bd 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -786,6 +786,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags, u64 multicast)
{
+ bool update_rcr = false;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -806,6 +807,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive multicast frame\n");
}
+ update_rcr = true;
}
if (changed_flags & FIF_FCSFAIL) {
@@ -818,6 +820,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive FCS error frame\n");
}
+ if (!update_rcr)
+ update_rcr = true;
}
/* if ssid not set to hw don't check bssid
@@ -832,6 +836,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_chk_bssid(hw, false);
else
rtlpriv->cfg->ops->set_chk_bssid(hw, true);
+ if (update_rcr)
+ update_rcr = false;
}
}
@@ -846,6 +852,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive control frame.\n");
}
+ if (!update_rcr)
+ update_rcr = true;
}
if (changed_flags & FIF_OTHER_BSS) {
@@ -858,7 +866,13 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive other BSS's frame.\n");
}
+ if (!update_rcr)
+ update_rcr = true;
}
+
+ if (update_rcr)
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&mac->rx_conf));
}
static int rtl_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1361,7 +1375,9 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
+static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1396,7 +1412,8 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0);
}
-static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
+static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 55357d69397a..5c646d5f7bb8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -955,6 +955,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
local_save_flags(flags);
local_irq_enable();
+ rtlhal->fw_ready = false;
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
if (!rtstatus) {
@@ -971,6 +972,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
goto exit;
}
+ rtlhal->fw_ready = true;
rtlhal->last_hmeboxnum = 0;
rtl92c_phy_mac_config(hw);
/* because last function modify RCR, so we update
@@ -1287,6 +1289,7 @@ void rtl92ce_enable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
}
void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
@@ -1296,7 +1299,7 @@ void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
- synchronize_irq(rtlpci->pdev->irq);
+ rtlpci->irq_enabled = false;
}
static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 46ea07605eb4..dd5aa089126a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -228,6 +228,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.led_control = rtl92ce_led_control,
.set_desc = rtl92ce_set_desc,
.get_desc = rtl92ce_get_desc,
+ .is_tx_desc_closed = rtl92ce_is_tx_desc_closed,
.tx_polling = rtl92ce_tx_polling,
.enable_hw_sec = rtl92ce_enable_hw_security_config,
.set_key = rtl92ce_set_key,
@@ -271,6 +272,8 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
.maps[MAC_RCR_ACRC32] = ACRC32,
.maps[MAC_RCR_ACF] = ACF,
.maps[MAC_RCR_AAP] = AAP,
+ .maps[MAC_HIMR] = REG_HIMR,
+ .maps[MAC_HIMRE] = REG_HIMRE,
.maps[EFUSE_TEST] = REG_EFUSE_TEST,
.maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index dc3d20b17a26..e88dcd0e0af1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -720,16 +720,15 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
break;
}
} else {
- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(pdesc);
+ ret = GET_RX_DESC_OWN(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
+ ret = GET_RX_DESC_PKT_LEN(p_desc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_STATUS_DESC_BUFF_ADDR(pdesc);
+ ret = GET_RX_DESC_BUFF_ADDR(p_desc);
break;
default:
RT_ASSERT(false, "ERR rxdesc :%d not process\n",
@@ -740,6 +739,23 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
return ret;
}
+bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8)rtl92ce_get_desc(entry, true, HW_DESC_OWN);
+
+ /*beacon packet will only use the first
+ *descriptor defautly,and the own may not
+ *be cleared by the hardware
+ */
+ if (own)
+ return false;
+ return true;
+}
+
void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index 9a39ec4204dd..4bec4b07e3e0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -723,6 +723,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool b_firstseg, bool b_lastseg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 873363acbacf..551321728ae0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1592,7 +1592,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
-bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
{
/* Currently nothing happens here.
* Traffic stops after some seconds in WPA2 802.11n mode.
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
index 11952b99daf8..0315eeda9b60 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
@@ -1,6 +1,3 @@
-obj-m := rtl8192ee.o
-
-
rtl8192ee-objs := \
dm.o \
fw.o \
@@ -14,6 +11,6 @@ rtl8192ee-objs := \
trx.o \
-obj-$(CONFIG_RTL8821AE) += rtl8192ee.o
+obj-$(CONFIG_RTL8192EE) += rtl8192ee.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
index 9c34a85fdb89..6220672a96f4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
@@ -1,6 +1,3 @@
-obj-m := rtl8723ae.o
-
-
rtl8723ae-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
index 59e416abd93a..a77c34102792 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
@@ -1,6 +1,3 @@
-obj-m := rtl8723be.o
-
-
rtl8723be-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
index 87ad604a1eb3..f7a26f71197e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
@@ -1,6 +1,3 @@
-obj-m := rtl8821ae.o
-
-
rtl8821ae-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
index 9be106109921..ba30b0d250fd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
@@ -2078,8 +2078,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
if (rtldm->tx_rate != 0xFF)
tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpwr_track_set_pwr\n");
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===>%s\n", __func__);
if (tx_rate != 0xFF) { /* Mimic Modify High Rate BBSwing Limit.*/
/*CCK*/
@@ -2128,7 +2127,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
if (method == BBSWING) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpwr_track_set_pwr\n");
+ "===>%s\n", __func__);
if (rf_path == RF90_PATH_A) {
final_swing_idx[RF90_PATH_A] =
(rtldm->ofdm_index[RF90_PATH_A] >
@@ -2260,7 +2259,8 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ "===>%s,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ __func__,
rtldm->swing_idx_cck_base,
rtldm->swing_idx_ofdm_base[RF90_PATH_A],
rtldm->default_ofdm_index);
@@ -2539,8 +2539,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
}
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n");
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===%s\n", __func__);
}
void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
index 1e9570fa874f..9b4d8a637915 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
@@ -800,7 +800,7 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 2.4G,Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else if (band == BAND_ON_5G) {
switch (rate_section) {
case OFDM:
@@ -823,7 +823,7 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band);
@@ -870,7 +870,7 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else if (band == BAND_ON_5G) {
switch (rate_section) {
case OFDM:
@@ -893,7 +893,7 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band);
@@ -3746,7 +3746,7 @@ static void _rtl8821ae_iqk_tx_fill_iqc(struct ieee80211_hw *hw,
break;
default:
break;
- };
+ }
}
static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw,
@@ -3767,7 +3767,7 @@ static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw,
break;
default:
break;
- };
+ }
}
#define cal_num 10
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 38234851457e..0b30a7b4d663 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1029,7 +1029,7 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
goto out_sleep;
}
- skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ skb = ieee80211_probereq_get(wl->hw, wl->vif->addr, ssid, ssid_len,
req->ie_len);
if (!skb) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 0bccf123831e..d6d0d6d9c7a8 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1900,7 +1900,6 @@ static struct platform_driver wl12xx_driver = {
.id_table = wl12xx_id_table,
.driver = {
.name = "wl12xx_driver",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 7af1936719eb..8e562610bf16 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1968,7 +1968,6 @@ static struct platform_driver wl18xx_driver = {
.id_table = wl18xx_id_table,
.driver = {
.name = "wl18xx_driver",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 05604ee31224..b82661962d33 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -64,6 +64,9 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
id != CMD_STOP_FWLOGGER))
return -EIO;
+ if (WARN_ON_ONCE(len < sizeof(*cmd)))
+ return -EIO;
+
cmd = buf;
cmd->id = cpu_to_le16(id);
cmd->status = 0;
@@ -128,8 +131,9 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
* send command to fw and return cmd status on success
* valid_rets contains a bitmap of allowed error codes
*/
-int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
- size_t res_len, unsigned long valid_rets)
+static int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf,
+ size_t len, size_t res_len,
+ unsigned long valid_rets)
{
int ret = __wlcore_cmd_send(wl, id, buf, len, res_len);
@@ -150,7 +154,6 @@ fail:
wl12xx_queue_recovery_work(wl);
return ret;
}
-EXPORT_SYMBOL_GPL(wl1271_cmd_send);
/*
* wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS
@@ -165,6 +168,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
return ret;
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_cmd_send);
/*
* Poll the mailbox event field until any of the bits in the mask is set or a
@@ -891,6 +895,9 @@ int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
wl1271_debug(DEBUG_CMD, "cmd configure (%d)", id);
+ if (WARN_ON_ONCE(len < sizeof(*acx)))
+ return -EIO;
+
acx->id = cpu_to_le16(id);
/* payload length, does not include any headers */
@@ -1138,7 +1145,7 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
- skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
+ skb = ieee80211_probereq_get(wl->hw, vif->addr, ssid, ssid_len,
ie0_len + ie1_len);
if (!skb) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index ca6a28b03f8f..453684a71d30 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -31,8 +31,6 @@ struct acx_header;
int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len);
-int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
- size_t res_len, unsigned long valid_rets);
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
u8 *role_id);
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 16d10281798d..5153640f4532 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -259,10 +259,7 @@ void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
&wlvif->connection_loss_work,
msecs_to_jiffies(delay));
- ieee80211_cqm_rssi_notify(
- vif,
- NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
- GFP_KERNEL);
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
}
}
EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 575c8f6d4009..6ad3fcedab9b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5177,10 +5177,11 @@ out:
}
static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch)
{
struct wl1271 *wl = hw->priv;
- struct wl12xx_vif *wlvif;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -5190,14 +5191,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WLCORE_STATE_OFF)) {
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
-
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- continue;
-
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
ieee80211_chswitch_done(vif, false);
- }
goto out;
} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
goto out;
@@ -5208,11 +5203,9 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
goto out;
/* TODO: change mac80211 to pass vif as param */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- unsigned long delay_usec;
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- continue;
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+ unsigned long delay_usec;
ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
if (ret)
@@ -5222,10 +5215,10 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
/* indicate failure 5 seconds after channel switch time */
delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
- ch_switch->count;
+ ch_switch->count;
ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
- usecs_to_jiffies(delay_usec) +
- msecs_to_jiffies(5000));
+ usecs_to_jiffies(delay_usec) +
+ msecs_to_jiffies(5000));
}
out_sleep:
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 73a49b868035..07b94eda9604 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -129,7 +129,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
r = zd_ioread16v_locked(chip, v16, a16, count16);
if (r) {
dev_dbg_f(zd_chip_dev(chip),
- "error: zd_ioread16v_locked. Error number %d\n", r);
+ "error: %s. Error number %d\n", __func__, r);
return r;
}
@@ -256,8 +256,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
if (r) {
zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
- "error _zd_iowrite32v_locked."
- " Error number %d\n", r);
+ "error _%s. Error number %d\n", __func__,
+ r);
return r;
}
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 083ecc93fe5e..5f1fda44882b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -230,6 +230,8 @@ struct xenvif {
*/
bool disabled;
unsigned long status;
+ unsigned long drain_timeout;
+ unsigned long stall_timeout;
/* Queues */
struct xenvif_queue *queues;
@@ -328,7 +330,7 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id);
extern bool separate_tx_rx_irq;
extern unsigned int rx_drain_timeout_msecs;
-extern unsigned int rx_drain_timeout_jiffies;
+extern unsigned int rx_stall_timeout_msecs;
extern unsigned int xenvif_max_queues;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 895fe84011e7..9259a732e8a4 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -166,7 +166,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
cb = XENVIF_RX_CB(skb);
- cb->expires = jiffies + rx_drain_timeout_jiffies;
+ cb->expires = jiffies + vif->drain_timeout;
xenvif_rx_queue_tail(queue, skb);
xenvif_kick_thread(queue);
@@ -235,10 +235,10 @@ static void xenvif_down(struct xenvif *vif)
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
- napi_disable(&queue->napi);
disable_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
disable_irq(queue->rx_irq);
+ napi_disable(&queue->napi);
del_timer_sync(&queue->credit_timeout);
}
}
@@ -414,6 +414,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->ip_csum = 1;
vif->dev = dev;
vif->disabled = false;
+ vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
+ vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
/* Start out with no queues. */
vif->queues = NULL;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6563f0713fc0..908e65e9b821 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -60,14 +60,12 @@ module_param(separate_tx_rx_irq, bool, 0644);
*/
unsigned int rx_drain_timeout_msecs = 10000;
module_param(rx_drain_timeout_msecs, uint, 0444);
-unsigned int rx_drain_timeout_jiffies;
/* The length of time before the frontend is considered unresponsive
* because it isn't providing Rx slots.
*/
-static unsigned int rx_stall_timeout_msecs = 60000;
+unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
-static unsigned int rx_stall_timeout_jiffies;
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
@@ -82,6 +80,16 @@ MODULE_PARM_DESC(max_queues,
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
+/* The amount to copy out of the first guest Tx slot into the skb's
+ * linear area. If the first slot has more data, it will be mapped
+ * and put into the first frag.
+ *
+ * This is sized to avoid pulling headers from the frags for most
+ * TCP/IP packets.
+ */
+#define XEN_NETBACK_TX_COPY_LEN 128
+
+
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
@@ -125,13 +133,6 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
pending_tx_info[0]);
}
-/* This is a miniumum size for the linear area to avoid lots of
- * calls to __pskb_pull_tail() as we set up checksum offsets. The
- * value 128 was chosen as it covers all IPv4 and most likely
- * IPv6 headers.
- */
-#define PKT_PROT_LEN 128
-
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
return (u16)frag->page_offset;
@@ -1446,9 +1447,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
- data_len = (txreq.size > PKT_PROT_LEN &&
+ data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
- PKT_PROT_LEN : txreq.size;
+ XEN_NETBACK_TX_COPY_LEN : txreq.size;
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
@@ -1550,7 +1551,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
unsigned int len;
BUG_ON(i >= MAX_SKB_FRAGS);
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+ page = alloc_page(GFP_ATOMIC);
if (!page) {
int j;
skb->truesize += skb->data_len;
@@ -1653,11 +1654,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
}
}
- if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
- int target = min_t(int, skb->len, PKT_PROT_LEN);
- __pskb_pull_tail(skb, target - skb_headlen(skb));
- }
-
skb->dev = queue->vif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
skb_reset_network_header(skb);
@@ -2022,7 +2018,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
return !queue->stalled
&& prod - cons < XEN_NETBK_RX_SLOTS_MAX
&& time_after(jiffies,
- queue->last_rx_time + rx_stall_timeout_jiffies);
+ queue->last_rx_time + queue->vif->stall_timeout);
}
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
@@ -2040,8 +2036,9 @@ static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
&& xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
- || xenvif_rx_queue_stalled(queue)
- || xenvif_rx_queue_ready(queue)
+ || (queue->vif->stall_timeout &&
+ (xenvif_rx_queue_stalled(queue)
+ || xenvif_rx_queue_ready(queue)))
|| kthread_should_stop()
|| queue->vif->disabled;
}
@@ -2094,6 +2091,9 @@ int xenvif_kthread_guest_rx(void *data)
struct xenvif_queue *queue = data;
struct xenvif *vif = queue->vif;
+ if (!vif->stall_timeout)
+ xenvif_queue_carrier_on(queue);
+
for (;;) {
xenvif_wait_for_rx_work(queue);
@@ -2120,10 +2120,12 @@ int xenvif_kthread_guest_rx(void *data)
* while it's probably not responsive, drop the
* carrier so packets are dropped earlier.
*/
- if (xenvif_rx_queue_stalled(queue))
- xenvif_queue_carrier_off(queue);
- else if (xenvif_rx_queue_ready(queue))
- xenvif_queue_carrier_on(queue);
+ if (vif->stall_timeout) {
+ if (xenvif_rx_queue_stalled(queue))
+ xenvif_queue_carrier_off(queue);
+ else if (xenvif_rx_queue_ready(queue))
+ xenvif_queue_carrier_on(queue);
+ }
/* Queued packets may have foreign pages from other
* domains. These cannot be queued indefinitely as
@@ -2194,9 +2196,6 @@ static int __init netback_init(void)
if (rc)
goto failed_init;
- rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
- rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
-
#ifdef CONFIG_DEBUG_FS
xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
if (IS_ERR_OR_NULL(xen_netback_dbg_root))
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index fab0d4b42f58..efbaf2ae1999 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -404,6 +404,7 @@ static int backend_create_xenvif(struct backend_info *be)
int err;
long handle;
struct xenbus_device *dev = be->dev;
+ struct xenvif *vif;
if (be->vif != NULL)
return 0;
@@ -414,13 +415,13 @@ static int backend_create_xenvif(struct backend_info *be)
return (err < 0) ? err : -EINVAL;
}
- be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
- if (IS_ERR(be->vif)) {
- err = PTR_ERR(be->vif);
- be->vif = NULL;
+ vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
+ if (IS_ERR(vif)) {
+ err = PTR_ERR(vif);
xenbus_dev_fatal(dev, err, "creating interface");
return err;
}
+ be->vif = vif;
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
return 0;
@@ -886,9 +887,15 @@ static int read_xenbus_vif_flags(struct backend_info *be)
return -EOPNOTSUPP;
if (xenbus_scanf(XBT_NIL, dev->otherend,
- "feature-rx-notify", "%d", &val) < 0 || val == 0) {
- xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
- return -EINVAL;
+ "feature-rx-notify", "%d", &val) < 0)
+ val = 0;
+ if (!val) {
+ /* - Reduce drain timeout to poll more frequently for
+ * Rx requests.
+ * - Disable Rx stall detection.
+ */
+ be->vif->drain_timeout = msecs_to_jiffies(30);
+ be->vif->stall_timeout = 0;
}
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ece8d1804d13..22bcb4e12e2a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -77,7 +77,9 @@ struct netfront_cb {
#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
+
+/* Minimum number of Rx slots (includes slot for GSO metadata). */
+#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
/* Queue name is interface name with "-qNNN" appended */
#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
@@ -137,13 +139,6 @@ struct netfront_queue {
struct xen_netif_rx_front_ring rx;
int rx_ring_ref;
- /* Receive-ring batched refills. */
-#define RX_MIN_TARGET 8
-#define RX_DFL_MIN_TARGET 64
-#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
- unsigned rx_min_target, rx_max_target, rx_target;
- struct sk_buff_head rx_batch;
-
struct timer_list rx_refill_timer;
struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
@@ -251,7 +246,7 @@ static void rx_refill_timeout(unsigned long data)
static int netfront_tx_slot_available(struct netfront_queue *queue)
{
return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
- (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
+ (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
}
static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -265,77 +260,55 @@ static void xennet_maybe_wake_tx(struct netfront_queue *queue)
netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
}
-static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
+
+static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
{
- unsigned short id;
struct sk_buff *skb;
struct page *page;
- int i, batch_target, notify;
- RING_IDX req_prod = queue->rx.req_prod_pvt;
- grant_ref_t ref;
- unsigned long pfn;
- void *vaddr;
- struct xen_netif_rx_request *req;
- if (unlikely(!netif_carrier_ok(queue->info->netdev)))
- return;
-
- /*
- * Allocate skbuffs greedily, even though we batch updates to the
- * receive ring. This creates a less bursty demand on the memory
- * allocator, so should reduce the chance of failed allocation requests
- * both for ourself and for other kernel subsystems.
- */
- batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
- for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
- skb = __netdev_alloc_skb(queue->info->netdev,
- RX_COPY_THRESHOLD + NET_IP_ALIGN,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- goto no_skb;
-
- /* Align ip header to a 16 bytes boundary */
- skb_reserve(skb, NET_IP_ALIGN);
-
- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
- if (!page) {
- kfree_skb(skb);
-no_skb:
- /* Could not allocate any skbuffs. Try again later. */
- mod_timer(&queue->rx_refill_timer,
- jiffies + (HZ/10));
-
- /* Any skbuffs queued for refill? Force them out. */
- if (i != 0)
- goto refill;
- break;
- }
+ skb = __netdev_alloc_skb(queue->info->netdev,
+ RX_COPY_THRESHOLD + NET_IP_ALIGN,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
- skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
- __skb_queue_tail(&queue->rx_batch, skb);
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ kfree_skb(skb);
+ return NULL;
}
+ skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+
+ /* Align ip header to a 16 bytes boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = queue->info->netdev;
- /* Is the batch large enough to be worthwhile? */
- if (i < (queue->rx_target/2)) {
- if (req_prod > queue->rx.sring->req_prod)
- goto push;
+ return skb;
+}
+
+
+static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
+{
+ RING_IDX req_prod = queue->rx.req_prod_pvt;
+ int notify;
+
+ if (unlikely(!netif_carrier_ok(queue->info->netdev)))
return;
- }
- /* Adjust our fill target if we risked running out of buffers. */
- if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
- ((queue->rx_target *= 2) > queue->rx_max_target))
- queue->rx_target = queue->rx_max_target;
+ for (req_prod = queue->rx.req_prod_pvt;
+ req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
+ req_prod++) {
+ struct sk_buff *skb;
+ unsigned short id;
+ grant_ref_t ref;
+ unsigned long pfn;
+ struct xen_netif_rx_request *req;
- refill:
- for (i = 0; ; i++) {
- skb = __skb_dequeue(&queue->rx_batch);
- if (skb == NULL)
+ skb = xennet_alloc_one_rx_buffer(queue);
+ if (!skb)
break;
- skb->dev = queue->info->netdev;
-
- id = xennet_rxidx(req_prod + i);
+ id = xennet_rxidx(req_prod);
BUG_ON(queue->rx_skbs[id]);
queue->rx_skbs[id] = skb;
@@ -345,9 +318,8 @@ no_skb:
queue->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
- vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
- req = RING_GET_REQUEST(&queue->rx, req_prod + i);
+ req = RING_GET_REQUEST(&queue->rx, req_prod);
gnttab_grant_foreign_access_ref(ref,
queue->info->xbdev->otherend_id,
pfn_to_mfn(pfn),
@@ -357,11 +329,16 @@ no_skb:
req->gref = ref;
}
+ queue->rx.req_prod_pvt = req_prod;
+
+ /* Not enough requests? Try again later. */
+ if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
+ return;
+ }
+
wmb(); /* barrier so backend seens requests */
- /* Above is a suitable barrier to ensure backend will see requests. */
- queue->rx.req_prod_pvt = req_prod + i;
- push:
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);
@@ -627,6 +604,9 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
slots, skb->len);
if (skb_linearize(skb))
goto drop;
+ data = skb->data;
+ offset = offset_in_page(data);
+ len = skb_headlen(skb);
}
spin_lock_irqsave(&queue->tx_lock, flags);
@@ -997,7 +977,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
struct sk_buff_head rxq;
struct sk_buff_head errq;
struct sk_buff_head tmpq;
- unsigned long flags;
int err;
spin_lock(&queue->rx_lock);
@@ -1065,27 +1044,16 @@ err:
work_done -= handle_incoming_queue(queue, &rxq);
- /* If we get a callback with very few responses, reduce fill target. */
- /* NB. Note exponential increase, linear decrease. */
- if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
- ((3*queue->rx_target) / 4)) &&
- (--queue->rx_target < queue->rx_min_target))
- queue->rx_target = queue->rx_min_target;
-
xennet_alloc_rx_buffers(queue);
if (work_done < budget) {
int more_to_do = 0;
- napi_gro_flush(napi, false);
-
- local_irq_save(flags);
+ napi_complete(napi);
RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
- if (!more_to_do)
- __napi_complete(napi);
-
- local_irq_restore(flags);
+ if (more_to_do)
+ napi_schedule(napi);
}
spin_unlock(&queue->rx_lock);
@@ -1638,11 +1606,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- skb_queue_head_init(&queue->rx_batch);
- queue->rx_target = RX_DFL_MIN_TARGET;
- queue->rx_min_target = RX_DFL_MIN_TARGET;
- queue->rx_max_target = RX_MAX_TARGET;
-
init_timer(&queue->rx_refill_timer);
queue->rx_refill_timer.data = (unsigned long)queue;
queue->rx_refill_timer.function = rx_refill_timeout;
@@ -1665,7 +1628,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
}
/* A grant for every tx ring slot */
- if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+ if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
&queue->gref_tx_head) < 0) {
pr_alert("can't alloc tx grant refs\n");
err = -ENOMEM;
@@ -1673,7 +1636,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
}
/* A grant for every rx ring slot */
- if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+ if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
&queue->gref_rx_head) < 0) {
pr_alert("can't alloc rx grant refs\n");
err = -ENOMEM;
@@ -2141,83 +2104,18 @@ static const struct ethtool_ops xennet_ethtool_ops =
};
#ifdef CONFIG_SYSFS
-static ssize_t show_rxbuf_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_rxbuf(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *info = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
-
- if (num_queues)
- return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
- else
- return sprintf(buf, "%u\n", RX_MIN_TARGET);
+ return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
}
-static ssize_t store_rxbuf_min(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t store_rxbuf(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *np = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
char *endp;
unsigned long target;
- unsigned int i;
- struct netfront_queue *queue;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- target = simple_strtoul(buf, &endp, 0);
- if (endp == buf)
- return -EBADMSG;
-
- if (target < RX_MIN_TARGET)
- target = RX_MIN_TARGET;
- if (target > RX_MAX_TARGET)
- target = RX_MAX_TARGET;
-
- for (i = 0; i < num_queues; ++i) {
- queue = &np->queues[i];
- spin_lock_bh(&queue->rx_lock);
- if (target > queue->rx_max_target)
- queue->rx_max_target = target;
- queue->rx_min_target = target;
- if (target > queue->rx_target)
- queue->rx_target = target;
-
- xennet_alloc_rx_buffers(queue);
-
- spin_unlock_bh(&queue->rx_lock);
- }
- return len;
-}
-
-static ssize_t show_rxbuf_max(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *info = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
-
- if (num_queues)
- return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
- else
- return sprintf(buf, "%u\n", RX_MAX_TARGET);
-}
-
-static ssize_t store_rxbuf_max(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *np = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
- char *endp;
- unsigned long target;
- unsigned int i = 0;
- struct netfront_queue *queue = NULL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -2226,44 +2124,15 @@ static ssize_t store_rxbuf_max(struct device *dev,
if (endp == buf)
return -EBADMSG;
- if (target < RX_MIN_TARGET)
- target = RX_MIN_TARGET;
- if (target > RX_MAX_TARGET)
- target = RX_MAX_TARGET;
+ /* rxbuf_min and rxbuf_max are no longer configurable. */
- for (i = 0; i < num_queues; ++i) {
- queue = &np->queues[i];
- spin_lock_bh(&queue->rx_lock);
- if (target < queue->rx_min_target)
- queue->rx_min_target = target;
- queue->rx_max_target = target;
- if (target < queue->rx_target)
- queue->rx_target = target;
-
- xennet_alloc_rx_buffers(queue);
-
- spin_unlock_bh(&queue->rx_lock);
- }
return len;
}
-static ssize_t show_rxbuf_cur(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *info = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
-
- if (num_queues)
- return sprintf(buf, "%u\n", info->queues[0].rx_target);
- else
- return sprintf(buf, "0\n");
-}
-
static struct device_attribute xennet_attrs[] = {
- __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
- __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
- __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
+ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
+ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
+ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
};
static int xennet_sysfs_addif(struct net_device *netdev)
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 683671a71c7e..ce2e2cf54fbc 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -566,7 +566,6 @@ static struct platform_driver nfcwilink_driver = {
.remove = nfcwilink_remove,
.driver = {
.name = "nfcwilink",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 440291ab7263..fc02e8d6a193 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -29,8 +29,8 @@
#include <linux/delay.h>
#include <linux/nfc.h>
#include <linux/firmware.h>
-#include <linux/unaligned/access_ok.h>
#include <linux/platform_data/pn544.h>
+#include <asm/unaligned.h>
#include <net/nfc/hci.h>
#include <net/nfc/llc.h>
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 0ea756b77519..05722085a59f 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -28,8 +28,8 @@
#include <linux/delay.h>
#include <linux/nfc.h>
#include <linux/firmware.h>
-#include <linux/unaligned/access_ok.h>
#include <linux/platform_data/st21nfca.h>
+#include <asm/unaligned.h>
#include <net/nfc/hci.h>
#include <net/nfc/llc.h>
@@ -72,7 +72,6 @@ struct st21nfca_i2c_phy {
struct nfc_hci_dev *hdev;
unsigned int gpio_ena;
- unsigned int gpio_irq;
unsigned int irq_polarity;
struct sk_buff *pending_skb;
@@ -531,20 +530,12 @@ static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
"clf_enable");
if (r) {
nfc_err(&client->dev, "Failed to request enable pin\n");
- return -ENODEV;
+ return r;
}
phy->gpio_ena = gpio;
- /* IRQ */
- r = irq_of_parse_and_map(pp, 0);
- if (r < 0) {
- nfc_err(&client->dev, "Unable to get irq, error: %d\n", r);
- return r;
- }
-
- phy->irq_polarity = irq_get_trigger_type(r);
- client->irq = r;
+ phy->irq_polarity = irq_get_trigger_type(client->irq);
return 0;
}
@@ -560,7 +551,6 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client)
struct st21nfca_nfc_platform_data *pdata;
struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
int r;
- int irq;
pdata = client->dev.platform_data;
if (pdata == NULL) {
@@ -569,36 +559,18 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client)
}
/* store for later use */
- phy->gpio_irq = pdata->gpio_irq;
phy->gpio_ena = pdata->gpio_ena;
phy->irq_polarity = pdata->irq_polarity;
- r = devm_gpio_request_one(&client->dev, phy->gpio_irq, GPIOF_IN,
- "wake_up");
- if (r) {
- pr_err("%s : gpio_request failed\n", __FILE__);
- return -ENODEV;
- }
-
if (phy->gpio_ena > 0) {
r = devm_gpio_request_one(&client->dev, phy->gpio_ena,
GPIOF_OUT_INIT_HIGH, "clf_enable");
if (r) {
pr_err("%s : ena gpio_request failed\n", __FILE__);
- return -ENODEV;
+ return r;
}
}
- /* IRQ */
- irq = gpio_to_irq(phy->gpio_irq);
- if (irq < 0) {
- nfc_err(&client->dev,
- "Unable to get irq number for GPIO %d error %d\n",
- phy->gpio_irq, r);
- return -ENODEV;
- }
- client->irq = irq;
-
return 0;
}
@@ -656,7 +628,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
r = st21nfca_hci_platform_init(phy);
if (r < 0) {
nfc_err(&client->dev, "Unable to reboot st21nfca\n");
- return -ENODEV;
+ return r;
}
r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
@@ -687,10 +659,13 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id of_st21nfca_i2c_match[] = {
{ .compatible = "st,st21nfca_i2c", },
{}
};
+MODULE_DEVICE_TABLE(of, of_st21nfca_i2c_match);
+#endif
static struct i2c_driver st21nfca_hci_i2c_driver = {
.driver = {
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
index a89e56c2c749..f2596c8d68b0 100644
--- a/drivers/nfc/st21nfca/st21nfca.c
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -77,10 +77,6 @@
((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN))
#define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/
-#define ST21NFCA_EVT_FIELD_ON 0x11
-#define ST21NFCA_EVT_CARD_DEACTIVATED 0x12
-#define ST21NFCA_EVT_CARD_ACTIVATED 0x13
-#define ST21NFCA_EVT_FIELD_OFF 0x14
static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES);
@@ -841,31 +837,11 @@ static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev,
static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
u8 event, struct sk_buff *skb)
{
- int r;
- struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
-
- pr_debug("hci event: %d\n", event);
+ pr_debug("hci event: %d gate: %x\n", event, gate);
- switch (event) {
- case ST21NFCA_EVT_CARD_ACTIVATED:
- if (gate == ST21NFCA_RF_CARD_F_GATE)
- info->dep_info.curr_nfc_dep_pni = 0;
- break;
- case ST21NFCA_EVT_CARD_DEACTIVATED:
- break;
- case ST21NFCA_EVT_FIELD_ON:
- break;
- case ST21NFCA_EVT_FIELD_OFF:
- break;
- case ST21NFCA_EVT_SEND_DATA:
- if (gate == ST21NFCA_RF_CARD_F_GATE) {
- r = st21nfca_tm_event_send_data(hdev, skb, gate);
- if (r < 0)
- return r;
- return 0;
- }
- info->dep_info.curr_nfc_dep_pni = 0;
- return 1;
+ switch (gate) {
+ case ST21NFCA_RF_CARD_F_GATE:
+ return st21nfca_dep_event_received(hdev, event, skb);
default:
return 1;
}
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
index a0b77f1ba6d9..7c2a85292230 100644
--- a/drivers/nfc/st21nfca/st21nfca.h
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -85,6 +85,4 @@ struct st21nfca_hci_info {
#define ST21NFCA_RF_CARD_F_GATE 0x24
-#define ST21NFCA_EVT_SEND_DATA 0x10
-
#endif /* __LOCAL_ST21NFCA_H_ */
diff --git a/drivers/nfc/st21nfca/st21nfca_dep.c b/drivers/nfc/st21nfca/st21nfca_dep.c
index bfb6df56c505..8882181d65de 100644
--- a/drivers/nfc/st21nfca/st21nfca_dep.c
+++ b/drivers/nfc/st21nfca/st21nfca_dep.c
@@ -49,6 +49,12 @@
#define ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B 0x30
#define ST21NFCA_GB_BIT 0x02
+#define ST21NFCA_EVT_SEND_DATA 0x10
+#define ST21NFCA_EVT_FIELD_ON 0x11
+#define ST21NFCA_EVT_CARD_DEACTIVATED 0x12
+#define ST21NFCA_EVT_CARD_ACTIVATED 0x13
+#define ST21NFCA_EVT_FIELD_OFF 0x14
+
#define ST21NFCA_EVT_CARD_F_BITRATE 0x16
#define ST21NFCA_EVT_READER_F_BITRATE 0x13
#define ST21NFCA_PSL_REQ_SEND_SPEED(brs) (brs & 0x38)
@@ -372,8 +378,8 @@ exit:
return r;
}
-int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb,
- u8 gate)
+static int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev,
+ struct sk_buff *skb)
{
u8 cmd0, cmd1;
int r;
@@ -400,7 +406,42 @@ int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb,
}
return r;
}
-EXPORT_SYMBOL(st21nfca_tm_event_send_data);
+
+/*
+ * Returns:
+ * <= 0: driver handled the event, skb consumed
+ * 1: driver does not handle the event, please do standard processing
+ */
+int st21nfca_dep_event_received(struct nfc_hci_dev *hdev,
+ u8 event, struct sk_buff *skb)
+{
+ int r = 0;
+ struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ pr_debug("dep event: %d\n", event);
+
+ switch (event) {
+ case ST21NFCA_EVT_CARD_ACTIVATED:
+ info->dep_info.curr_nfc_dep_pni = 0;
+ break;
+ case ST21NFCA_EVT_CARD_DEACTIVATED:
+ break;
+ case ST21NFCA_EVT_FIELD_ON:
+ break;
+ case ST21NFCA_EVT_FIELD_OFF:
+ break;
+ case ST21NFCA_EVT_SEND_DATA:
+ r = st21nfca_tm_event_send_data(hdev, skb);
+ if (r < 0)
+ return r;
+ return 0;
+ default:
+ return 1;
+ }
+ kfree_skb(skb);
+ return r;
+}
+EXPORT_SYMBOL(st21nfca_dep_event_received);
static void st21nfca_im_send_psl_req(struct nfc_hci_dev *hdev, u8 did, u8 bsi,
u8 bri, u8 lri)
diff --git a/drivers/nfc/st21nfca/st21nfca_dep.h b/drivers/nfc/st21nfca/st21nfca_dep.h
index ca213dee9c6e..baf4664b4fc4 100644
--- a/drivers/nfc/st21nfca/st21nfca_dep.h
+++ b/drivers/nfc/st21nfca/st21nfca_dep.h
@@ -32,8 +32,8 @@ struct st21nfca_dep_info {
u8 lri;
} __packed;
-int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb,
- u8 gate);
+int st21nfca_dep_event_received(struct nfc_hci_dev *hdev,
+ u8 event, struct sk_buff *skb);
int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb);
int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len);
diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
index c5d2427a3db2..01ba865863ee 100644
--- a/drivers/nfc/st21nfcb/i2c.c
+++ b/drivers/nfc/st21nfcb/i2c.c
@@ -50,7 +50,6 @@ struct st21nfcb_i2c_phy {
struct i2c_client *i2c_dev;
struct llt_ndlc *ndlc;
- unsigned int gpio_irq;
unsigned int gpio_reset;
unsigned int irq_polarity;
@@ -81,8 +80,6 @@ static void st21nfcb_nci_i2c_disable(void *phy_id)
{
struct st21nfcb_i2c_phy *phy = phy_id;
- pr_info("\n");
-
phy->powered = 0;
/* reset chip in order to flush clf */
gpio_set_value(phy->gpio_reset, 0);
@@ -258,19 +255,11 @@ static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client)
GPIOF_OUT_INIT_HIGH, "clf_reset");
if (r) {
nfc_err(&client->dev, "Failed to request reset pin\n");
- return -ENODEV;
- }
- phy->gpio_reset = gpio;
-
- /* IRQ */
- r = irq_of_parse_and_map(pp, 0);
- if (r < 0) {
- nfc_err(&client->dev, "Unable to get irq, error: %d\n", r);
return r;
}
+ phy->gpio_reset = gpio;
- phy->irq_polarity = irq_get_trigger_type(r);
- client->irq = r;
+ phy->irq_polarity = irq_get_trigger_type(client->irq);
return 0;
}
@@ -286,7 +275,6 @@ static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client)
struct st21nfcb_nfc_platform_data *pdata;
struct st21nfcb_i2c_phy *phy = i2c_get_clientdata(client);
int r;
- int irq;
pdata = client->dev.platform_data;
if (pdata == NULL) {
@@ -295,33 +283,15 @@ static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client)
}
/* store for later use */
- phy->gpio_irq = pdata->gpio_irq;
phy->gpio_reset = pdata->gpio_reset;
phy->irq_polarity = pdata->irq_polarity;
- r = devm_gpio_request_one(&client->dev, phy->gpio_irq,
- GPIOF_IN, "clf_irq");
- if (r) {
- pr_err("%s : gpio_request failed\n", __FILE__);
- return -ENODEV;
- }
-
r = devm_gpio_request_one(&client->dev,
phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset");
if (r) {
pr_err("%s : reset gpio_request failed\n", __FILE__);
- return -ENODEV;
- }
-
- /* IRQ */
- irq = gpio_to_irq(phy->gpio_irq);
- if (irq < 0) {
- nfc_err(&client->dev,
- "Unable to get irq number for GPIO %d error %d\n",
- phy->gpio_irq, r);
- return -ENODEV;
+ return r;
}
- client->irq = irq;
return 0;
}
@@ -401,10 +371,13 @@ static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id of_st21nfcb_i2c_match[] = {
{ .compatible = "st,st21nfcb_i2c", },
{}
};
+MODULE_DEVICE_TABLE(of, of_st21nfcb_i2c_match);
+#endif
static struct i2c_driver st21nfcb_nci_i2c_driver = {
.driver = {
diff --git a/drivers/nfc/st21nfcb/ndlc.c b/drivers/nfc/st21nfcb/ndlc.c
index e7bff8921d11..bac50e805f1d 100644
--- a/drivers/nfc/st21nfcb/ndlc.c
+++ b/drivers/nfc/st21nfcb/ndlc.c
@@ -266,7 +266,7 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
*ndlc_id = ndlc;
- /* start timers */
+ /* initialize timers */
init_timer(&ndlc->t1_timer);
ndlc->t1_timer.data = (unsigned long)ndlc;
ndlc->t1_timer.function = ndlc_t1_timeout;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 1a13f5b722c5..b5e0c873d4e1 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -7,8 +7,8 @@ config OF
menu "Device Tree and Open Firmware support"
depends on OF
-config OF_SELFTEST
- bool "Device Tree Runtime self tests"
+config OF_UNITTEST
+ bool "Device Tree runtime unit tests"
depends on OF_IRQ && OF_EARLY_FLATTREE
select OF_DYNAMIC
select OF_RESOLVE
@@ -23,6 +23,7 @@ config OF_FLATTREE
bool
select DTC
select LIBFDT
+ select CRC32
config OF_EARLY_FLATTREE
bool
@@ -83,4 +84,10 @@ config OF_RESERVED_MEM
config OF_RESOLVE
bool
+config OF_OVERLAY
+ bool
+ depends on OF
+ select OF_DYNAMIC
+ select OF_RESOLVE
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index ca9209ce50cd..7563f36c71db 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -6,14 +6,15 @@ obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_NET) += of_net.o
-obj-$(CONFIG_OF_SELFTEST) += of_selftest.o
-of_selftest-objs := selftest.o testcase-data/testcases.dtb.o
+obj-$(CONFIG_OF_UNITTEST) += of_unittest.o
+of_unittest-objs := unittest.o unittest-data/testcases.dtb.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
obj-$(CONFIG_OF_MTD) += of_mtd.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
+obj-$(CONFIG_OF_OVERLAY) += overlay.o
CFLAGS_fdt.o = -I$(src)/../../scripts/dtc/libfdt
CFLAGS_fdt_address.o = -I$(src)/../../scripts/dtc/libfdt
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 06af494184d6..ad2906919d45 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -491,7 +491,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
*/
ranges = of_get_property(parent, rprop, &rlen);
if (ranges == NULL && !of_empty_ranges_quirk()) {
- pr_err("OF: no ranges; cannot translate\n");
+ pr_debug("OF: no ranges; cannot translate\n");
return 1;
}
if (ranges == NULL || rlen == 0) {
@@ -884,7 +884,7 @@ EXPORT_SYMBOL(of_iomap);
* return PTR_ERR(base);
*/
void __iomem *of_io_request_and_map(struct device_node *np, int index,
- char *name)
+ const char *name)
{
struct resource res;
void __iomem *mem;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 4c2ccde42427..36536b6a8834 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -32,11 +32,12 @@
LIST_HEAD(aliases_lookup);
-struct device_node *of_allnodes;
-EXPORT_SYMBOL(of_allnodes);
+struct device_node *of_root;
+EXPORT_SYMBOL(of_root);
struct device_node *of_chosen;
struct device_node *of_aliases;
struct device_node *of_stdout;
+static const char *of_stdout_options;
struct kset *of_kset;
@@ -48,7 +49,7 @@ struct kset *of_kset;
*/
DEFINE_MUTEX(of_mutex);
-/* use when traversing tree through the allnext, child, sibling,
+/* use when traversing tree through the child, sibling,
* or parent members of struct device_node.
*/
DEFINE_RAW_SPINLOCK(devtree_lock);
@@ -204,7 +205,7 @@ static int __init of_init(void)
mutex_unlock(&of_mutex);
/* Symlink in /proc as required by userspace ABI */
- if (of_allnodes)
+ if (of_root)
proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
return 0;
@@ -245,6 +246,23 @@ struct property *of_find_property(const struct device_node *np,
}
EXPORT_SYMBOL(of_find_property);
+struct device_node *__of_find_all_nodes(struct device_node *prev)
+{
+ struct device_node *np;
+ if (!prev) {
+ np = of_root;
+ } else if (prev->child) {
+ np = prev->child;
+ } else {
+ /* Walk back up looking for a sibling, or the end of the structure */
+ np = prev;
+ while (np->parent && !np->sibling)
+ np = np->parent;
+ np = np->sibling; /* Might be null at the end of the tree */
+ }
+ return np;
+}
+
/**
* of_find_all_nodes - Get next node in global list
* @prev: Previous node or NULL to start iteration
@@ -259,10 +277,8 @@ struct device_node *of_find_all_nodes(struct device_node *prev)
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
- np = prev ? prev->allnext : of_allnodes;
- for (; np != NULL; np = np->allnext)
- if (of_node_get(np))
- break;
+ np = __of_find_all_nodes(prev);
+ of_node_get(np);
of_node_put(prev);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
@@ -485,7 +501,7 @@ EXPORT_SYMBOL(of_device_is_compatible);
* of_machine_is_compatible - Test root of device tree for a given compatible value
* @compat: compatible string to look for in root node's compatible property.
*
- * Returns true if the root node has the given value in its
+ * Returns a positive integer if the root node has the given value in its
* compatible property.
*/
int of_machine_is_compatible(const char *compat)
@@ -507,27 +523,27 @@ EXPORT_SYMBOL(of_machine_is_compatible);
*
* @device: Node to check for availability, with locks already held
*
- * Returns 1 if the status property is absent or set to "okay" or "ok",
- * 0 otherwise
+ * Returns true if the status property is absent or set to "okay" or "ok",
+ * false otherwise
*/
-static int __of_device_is_available(const struct device_node *device)
+static bool __of_device_is_available(const struct device_node *device)
{
const char *status;
int statlen;
if (!device)
- return 0;
+ return false;
status = __of_get_property(device, "status", &statlen);
if (status == NULL)
- return 1;
+ return true;
if (statlen > 0) {
if (!strcmp(status, "okay") || !strcmp(status, "ok"))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -535,13 +551,13 @@ static int __of_device_is_available(const struct device_node *device)
*
* @device: Node to check for availability
*
- * Returns 1 if the status property is absent or set to "okay" or "ok",
- * 0 otherwise
+ * Returns true if the status property is absent or set to "okay" or "ok",
+ * false otherwise
*/
-int of_device_is_available(const struct device_node *device)
+bool of_device_is_available(const struct device_node *device)
{
unsigned long flags;
- int res;
+ bool res;
raw_spin_lock_irqsave(&devtree_lock, flags);
res = __of_device_is_available(device);
@@ -577,9 +593,9 @@ EXPORT_SYMBOL(of_get_parent);
* of_get_next_parent - Iterate to a node's parent
* @node: Node to get parent of
*
- * This is like of_get_parent() except that it drops the
- * refcount on the passed node, making it suitable for iterating
- * through a node's parents.
+ * This is like of_get_parent() except that it drops the
+ * refcount on the passed node, making it suitable for iterating
+ * through a node's parents.
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
@@ -699,10 +715,15 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
{
struct device_node *child;
int len = strchrnul(path, '/') - path;
+ int term;
if (!len)
return NULL;
+ term = strchrnul(path, ':') - path;
+ if (term < len)
+ len = term;
+
__for_each_child_of_node(parent, child) {
const char *name = strrchr(child->full_name, '/');
if (WARN(!name, "malformed device_node %s\n", child->full_name))
@@ -715,11 +736,14 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
}
/**
- * of_find_node_by_path - Find a node matching a full OF path
+ * of_find_node_opts_by_path - Find a node matching a full OF path
* @path: Either the full path to match, or if the path does not
* start with '/', the name of a property of the /aliases
* node (an alias). In the case of an alias, the node
* matching the alias' value will be returned.
+ * @opts: Address of a pointer into which to store the start of
+ * an options string appended to the end of the path with
+ * a ':' separator.
*
* Valid paths:
* /foo/bar Full path
@@ -729,19 +753,23 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
-struct device_node *of_find_node_by_path(const char *path)
+struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
{
struct device_node *np = NULL;
struct property *pp;
unsigned long flags;
+ const char *separator = strchr(path, ':');
+
+ if (opts)
+ *opts = separator ? separator + 1 : NULL;
if (strcmp(path, "/") == 0)
- return of_node_get(of_allnodes);
+ return of_node_get(of_root);
/* The path could begin with an alias */
if (*path != '/') {
char *p = strchrnul(path, '/');
- int len = p - path;
+ int len = separator ? separator - path : p - path;
/* of_aliases must not be NULL */
if (!of_aliases)
@@ -761,7 +789,7 @@ struct device_node *of_find_node_by_path(const char *path)
/* Step down the tree matching path components */
raw_spin_lock_irqsave(&devtree_lock, flags);
if (!np)
- np = of_node_get(of_allnodes);
+ np = of_node_get(of_root);
while (np && *path == '/') {
path++; /* Increment past '/' delimiter */
np = __of_find_node_by_path(np, path);
@@ -770,7 +798,7 @@ struct device_node *of_find_node_by_path(const char *path)
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
-EXPORT_SYMBOL(of_find_node_by_path);
+EXPORT_SYMBOL(of_find_node_opts_by_path);
/**
* of_find_node_by_name - Find a node by its "name" property
@@ -790,8 +818,7 @@ struct device_node *of_find_node_by_name(struct device_node *from,
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : of_allnodes;
- for (; np; np = np->allnext)
+ for_each_of_allnodes_from(from, np)
if (np->name && (of_node_cmp(np->name, name) == 0)
&& of_node_get(np))
break;
@@ -820,8 +847,7 @@ struct device_node *of_find_node_by_type(struct device_node *from,
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : of_allnodes;
- for (; np; np = np->allnext)
+ for_each_of_allnodes_from(from, np)
if (np->type && (of_node_cmp(np->type, type) == 0)
&& of_node_get(np))
break;
@@ -852,12 +878,10 @@ struct device_node *of_find_compatible_node(struct device_node *from,
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : of_allnodes;
- for (; np; np = np->allnext) {
+ for_each_of_allnodes_from(from, np)
if (__of_device_is_compatible(np, compatible, type, NULL) &&
of_node_get(np))
break;
- }
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
@@ -884,8 +908,7 @@ struct device_node *of_find_node_with_property(struct device_node *from,
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : of_allnodes;
- for (; np; np = np->allnext) {
+ for_each_of_allnodes_from(from, np) {
for (pp = np->properties; pp; pp = pp->next) {
if (of_prop_cmp(pp->name, prop_name) == 0) {
of_node_get(np);
@@ -923,7 +946,7 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches,
}
/**
- * of_match_node - Tell if an device_node has a matching of_match structure
+ * of_match_node - Tell if a device_node has a matching of_match structure
* @matches: array of of device match structures to search in
* @node: the of device structure to match against
*
@@ -967,8 +990,7 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from,
*match = NULL;
raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : of_allnodes;
- for (; np; np = np->allnext) {
+ for_each_of_allnodes_from(from, np) {
m = __of_match_node(matches, np);
if (m && of_node_get(np)) {
if (match)
@@ -1025,7 +1047,7 @@ struct device_node *of_find_node_by_phandle(phandle handle)
return NULL;
raw_spin_lock_irqsave(&devtree_lock, flags);
- for (np = of_allnodes; np; np = np->allnext)
+ for_each_of_allnodes(np)
if (np->phandle == handle)
break;
of_node_get(np);
@@ -1350,7 +1372,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
EXPORT_SYMBOL_GPL(of_property_match_string);
/**
- * of_property_read_string_util() - Utility helper for parsing string properties
+ * of_property_read_string_helper() - Utility helper for parsing string properties
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_strs: output array of string pointers.
@@ -1549,21 +1571,21 @@ EXPORT_SYMBOL(of_parse_phandle);
* Returns 0 on success and fills out_args, on error returns appropriate
* errno value.
*
- * Caller is responsible to call of_node_put() on the returned out_args->node
+ * Caller is responsible to call of_node_put() on the returned out_args->np
* pointer.
*
* Example:
*
* phandle1: node1 {
- * #list-cells = <2>;
+ * #list-cells = <2>;
* }
*
* phandle2: node2 {
- * #list-cells = <1>;
+ * #list-cells = <1>;
* }
*
* node3 {
- * list = <&phandle1 1 2 &phandle2 3>;
+ * list = <&phandle1 1 2 &phandle2 3>;
* }
*
* To get a device_node of the `node2' node you may call this:
@@ -1592,7 +1614,7 @@ EXPORT_SYMBOL(of_parse_phandle_with_args);
* Returns 0 on success and fills out_args, on error returns appropriate
* errno value.
*
- * Caller is responsible to call of_node_put() on the returned out_args->node
+ * Caller is responsible to call of_node_put() on the returned out_args->np
* pointer.
*
* Example:
@@ -1604,7 +1626,7 @@ EXPORT_SYMBOL(of_parse_phandle_with_args);
* }
*
* node3 {
- * list = <&phandle1 0 2 &phandle2 2 3>;
+ * list = <&phandle1 0 2 &phandle2 2 3>;
* }
*
* To get a device_node of the `node2' node you may call this:
@@ -1838,14 +1860,14 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np,
}
/**
- * of_alias_scan - Scan all properties of 'aliases' node
+ * of_alias_scan - Scan all properties of the 'aliases' node
*
- * The function scans all the properties of 'aliases' node and populate
- * the the global lookup table with the properties. It returns the
- * number of alias_prop found, or error code in error case.
+ * The function scans all the properties of the 'aliases' node and populates
+ * the global lookup table with the properties. It returns the
+ * number of alias properties found, or an error code in case of failure.
*
* @dt_alloc: An allocator that provides a virtual address to memory
- * for the resulting tree
+ * for storing the resulting tree
*/
void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
{
@@ -1864,7 +1886,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
if (IS_ENABLED(CONFIG_PPC) && !name)
name = of_get_property(of_aliases, "stdout", NULL);
if (name)
- of_stdout = of_find_node_by_path(name);
+ of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
}
if (!of_aliases)
@@ -1990,7 +2012,8 @@ bool of_console_check(struct device_node *dn, char *name, int index)
{
if (!dn || dn != of_stdout || console_set_on_cmdline)
return false;
- return !add_preferred_console(name, index, NULL);
+ return !add_preferred_console(name, index,
+ kstrdup(of_stdout_options, GFP_KERNEL));
}
EXPORT_SYMBOL_GPL(of_console_check);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index d4994177dec2..3351ef408125 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -77,18 +77,132 @@ int of_reconfig_notifier_unregister(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
-int of_reconfig_notify(unsigned long action, void *p)
+#ifdef DEBUG
+const char *action_names[] = {
+ [OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE",
+ [OF_RECONFIG_DETACH_NODE] = "DETACH_NODE",
+ [OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY",
+ [OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY",
+ [OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY",
+};
+#endif
+
+int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
{
int rc;
+#ifdef DEBUG
+ struct of_reconfig_data *pr = p;
+
+ switch (action) {
+ case OF_RECONFIG_ATTACH_NODE:
+ case OF_RECONFIG_DETACH_NODE:
+ pr_debug("of/notify %-15s %s\n", action_names[action],
+ pr->dn->full_name);
+ break;
+ case OF_RECONFIG_ADD_PROPERTY:
+ case OF_RECONFIG_REMOVE_PROPERTY:
+ case OF_RECONFIG_UPDATE_PROPERTY:
+ pr_debug("of/notify %-15s %s:%s\n", action_names[action],
+ pr->dn->full_name, pr->prop->name);
+ break;
+ }
+#endif
rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p);
return notifier_to_errno(rc);
}
+/*
+ * of_reconfig_get_state_change() - Returns new state of device
+ * @action - action of the of notifier
+ * @arg - argument of the of notifier
+ *
+ * Returns the new state of a device based on the notifier used.
+ * Returns 0 on device going from enabled to disabled, 1 on device
+ * going from disabled to enabled and -1 on no change.
+ */
+int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
+{
+ struct property *prop, *old_prop = NULL;
+ int is_status, status_state, old_status_state, prev_state, new_state;
+
+ /* figure out if a device should be created or destroyed */
+ switch (action) {
+ case OF_RECONFIG_ATTACH_NODE:
+ case OF_RECONFIG_DETACH_NODE:
+ prop = of_find_property(pr->dn, "status", NULL);
+ break;
+ case OF_RECONFIG_ADD_PROPERTY:
+ case OF_RECONFIG_REMOVE_PROPERTY:
+ prop = pr->prop;
+ break;
+ case OF_RECONFIG_UPDATE_PROPERTY:
+ prop = pr->prop;
+ old_prop = pr->old_prop;
+ break;
+ default:
+ return OF_RECONFIG_NO_CHANGE;
+ }
+
+ is_status = 0;
+ status_state = -1;
+ old_status_state = -1;
+ prev_state = -1;
+ new_state = -1;
+
+ if (prop && !strcmp(prop->name, "status")) {
+ is_status = 1;
+ status_state = !strcmp(prop->value, "okay") ||
+ !strcmp(prop->value, "ok");
+ if (old_prop)
+ old_status_state = !strcmp(old_prop->value, "okay") ||
+ !strcmp(old_prop->value, "ok");
+ }
+
+ switch (action) {
+ case OF_RECONFIG_ATTACH_NODE:
+ prev_state = 0;
+ /* -1 & 0 status either missing or okay */
+ new_state = status_state != 0;
+ break;
+ case OF_RECONFIG_DETACH_NODE:
+ /* -1 & 0 status either missing or okay */
+ prev_state = status_state != 0;
+ new_state = 0;
+ break;
+ case OF_RECONFIG_ADD_PROPERTY:
+ if (is_status) {
+ /* no status property -> enabled (legacy) */
+ prev_state = 1;
+ new_state = status_state;
+ }
+ break;
+ case OF_RECONFIG_REMOVE_PROPERTY:
+ if (is_status) {
+ prev_state = status_state;
+ /* no status property -> enabled (legacy) */
+ new_state = 1;
+ }
+ break;
+ case OF_RECONFIG_UPDATE_PROPERTY:
+ if (is_status) {
+ prev_state = old_status_state != 0;
+ new_state = status_state != 0;
+ }
+ break;
+ }
+
+ if (prev_state == new_state)
+ return OF_RECONFIG_NO_CHANGE;
+
+ return new_state ? OF_RECONFIG_CHANGE_ADD : OF_RECONFIG_CHANGE_REMOVE;
+}
+EXPORT_SYMBOL_GPL(of_reconfig_get_state_change);
+
int of_property_notify(int action, struct device_node *np,
struct property *prop, struct property *oldprop)
{
- struct of_prop_reconfig pr;
+ struct of_reconfig_data pr;
/* only call notifiers if the node is attached */
if (!of_node_is_attached(np))
@@ -117,8 +231,6 @@ void __of_attach_node(struct device_node *np)
np->child = NULL;
np->sibling = np->parent->child;
- np->allnext = np->parent->allnext;
- np->parent->allnext = np;
np->parent->child = np;
of_node_clear_flag(np, OF_DETACHED);
}
@@ -128,8 +240,12 @@ void __of_attach_node(struct device_node *np)
*/
int of_attach_node(struct device_node *np)
{
+ struct of_reconfig_data rd;
unsigned long flags;
+ memset(&rd, 0, sizeof(rd));
+ rd.dn = np;
+
mutex_lock(&of_mutex);
raw_spin_lock_irqsave(&devtree_lock, flags);
__of_attach_node(np);
@@ -138,7 +254,7 @@ int of_attach_node(struct device_node *np)
__of_attach_node_sysfs(np);
mutex_unlock(&of_mutex);
- of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np);
+ of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, &rd);
return 0;
}
@@ -154,17 +270,6 @@ void __of_detach_node(struct device_node *np)
if (WARN_ON(!parent))
return;
- if (of_allnodes == np)
- of_allnodes = np->allnext;
- else {
- struct device_node *prev;
- for (prev = of_allnodes;
- prev->allnext != np;
- prev = prev->allnext)
- ;
- prev->allnext = np->allnext;
- }
-
if (parent->child == np)
parent->child = np->sibling;
else {
@@ -187,9 +292,13 @@ void __of_detach_node(struct device_node *np)
*/
int of_detach_node(struct device_node *np)
{
+ struct of_reconfig_data rd;
unsigned long flags;
int rc = 0;
+ memset(&rd, 0, sizeof(rd));
+ rd.dn = np;
+
mutex_lock(&of_mutex);
raw_spin_lock_irqsave(&devtree_lock, flags);
__of_detach_node(np);
@@ -198,7 +307,7 @@ int of_detach_node(struct device_node *np)
__of_detach_node_sysfs(np);
mutex_unlock(&of_mutex);
- of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np);
+ of_reconfig_notify(OF_RECONFIG_DETACH_NODE, &rd);
return rc;
}
@@ -285,36 +394,54 @@ struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags)
}
/**
- * __of_node_alloc() - Create an empty device node dynamically.
- * @full_name: Full name of the new device node
- * @allocflags: Allocation flags (typically pass GFP_KERNEL)
+ * __of_node_dup() - Duplicate or create an empty device node dynamically.
+ * @fmt: Format string (plus vargs) for new full name of the device node
*
- * Create an empty device tree node, suitable for further modification.
- * The node data are dynamically allocated and all the node flags
- * have the OF_DYNAMIC & OF_DETACHED bits set.
- * Returns the newly allocated node or NULL on out of memory error.
+ * Create an device tree node, either by duplicating an empty node or by allocating
+ * an empty one suitable for further modification. The node data are
+ * dynamically allocated and all the node flags have the OF_DYNAMIC &
+ * OF_DETACHED bits set. Returns the newly allocated node or NULL on out of
+ * memory error.
*/
-struct device_node *__of_node_alloc(const char *full_name, gfp_t allocflags)
+struct device_node *__of_node_dup(const struct device_node *np, const char *fmt, ...)
{
+ va_list vargs;
struct device_node *node;
- node = kzalloc(sizeof(*node), allocflags);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return NULL;
+ va_start(vargs, fmt);
+ node->full_name = kvasprintf(GFP_KERNEL, fmt, vargs);
+ va_end(vargs);
+ if (!node->full_name) {
+ kfree(node);
+ return NULL;
+ }
- node->full_name = kstrdup(full_name, allocflags);
of_node_set_flag(node, OF_DYNAMIC);
of_node_set_flag(node, OF_DETACHED);
- if (!node->full_name)
- goto err_free;
-
of_node_init(node);
+ /* Iterate over and duplicate all properties */
+ if (np) {
+ struct property *pp, *new_pp;
+ for_each_property_of_node(np, pp) {
+ new_pp = __of_prop_dup(pp, GFP_KERNEL);
+ if (!new_pp)
+ goto err_prop;
+ if (__of_add_property(node, new_pp)) {
+ kfree(new_pp->name);
+ kfree(new_pp->value);
+ kfree(new_pp);
+ goto err_prop;
+ }
+ }
+ }
return node;
- err_free:
- kfree(node->full_name);
- kfree(node);
+ err_prop:
+ of_node_put(node); /* Frees the node and properties */
return NULL;
}
@@ -330,27 +457,15 @@ static void __of_changeset_entry_dump(struct of_changeset_entry *ce)
{
switch (ce->action) {
case OF_RECONFIG_ADD_PROPERTY:
- pr_debug("%p: %s %s/%s\n",
- ce, "ADD_PROPERTY ", ce->np->full_name,
- ce->prop->name);
- break;
case OF_RECONFIG_REMOVE_PROPERTY:
- pr_debug("%p: %s %s/%s\n",
- ce, "REMOVE_PROPERTY", ce->np->full_name,
- ce->prop->name);
- break;
case OF_RECONFIG_UPDATE_PROPERTY:
- pr_debug("%p: %s %s/%s\n",
- ce, "UPDATE_PROPERTY", ce->np->full_name,
- ce->prop->name);
+ pr_debug("of/cset<%p> %-15s %s/%s\n", ce, action_names[ce->action],
+ ce->np->full_name, ce->prop->name);
break;
case OF_RECONFIG_ATTACH_NODE:
- pr_debug("%p: %s %s\n",
- ce, "ATTACH_NODE ", ce->np->full_name);
- break;
case OF_RECONFIG_DETACH_NODE:
- pr_debug("%p: %s %s\n",
- ce, "DETACH_NODE ", ce->np->full_name);
+ pr_debug("of/cset<%p> %-15s %s\n", ce, action_names[ce->action],
+ ce->np->full_name);
break;
}
}
@@ -388,6 +503,7 @@ static void __of_changeset_entry_invert(struct of_changeset_entry *ce,
static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool revert)
{
+ struct of_reconfig_data rd;
struct of_changeset_entry ce_inverted;
int ret;
@@ -399,7 +515,9 @@ static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool reve
switch (ce->action) {
case OF_RECONFIG_ATTACH_NODE:
case OF_RECONFIG_DETACH_NODE:
- ret = of_reconfig_notify(ce->action, ce->np);
+ memset(&rd, 0, sizeof(rd));
+ rd.dn = ce->np;
+ ret = of_reconfig_notify(ce->action, &rd);
break;
case OF_RECONFIG_ADD_PROPERTY:
case OF_RECONFIG_REMOVE_PROPERTY:
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index d134710de96d..510074226d57 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -9,6 +9,7 @@
* version 2 as published by the Free Software Foundation.
*/
+#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/initrd.h>
#include <linux/memblock.h>
@@ -22,6 +23,7 @@
#include <linux/libfdt.h>
#include <linux/debugfs.h>
#include <linux/serial_core.h>
+#include <linux/sysfs.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -145,15 +147,15 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size,
* @mem: Memory chunk to use for allocating device nodes and properties
* @p: pointer to node in flat tree
* @dad: Parent struct device_node
- * @allnextpp: pointer to ->allnext from last allocated device_node
* @fpsize: Size of the node path up at the current depth.
*/
static void * unflatten_dt_node(void *blob,
void *mem,
int *poffset,
struct device_node *dad,
- struct device_node ***allnextpp,
- unsigned long fpsize)
+ struct device_node **nodepp,
+ unsigned long fpsize,
+ bool dryrun)
{
const __be32 *p;
struct device_node *np;
@@ -200,7 +202,7 @@ static void * unflatten_dt_node(void *blob,
np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
__alignof__(struct device_node));
- if (allnextpp) {
+ if (!dryrun) {
char *fn;
of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
@@ -222,16 +224,10 @@ static void * unflatten_dt_node(void *blob,
memcpy(fn, pathp, l);
prev_pp = &np->properties;
- **allnextpp = np;
- *allnextpp = &np->allnext;
if (dad != NULL) {
np->parent = dad;
- /* we temporarily use the next field as `last_child'*/
- if (dad->next == NULL)
- dad->child = np;
- else
- dad->next->sibling = np;
- dad->next = np;
+ np->sibling = dad->child;
+ dad->child = np;
}
}
/* process properties */
@@ -254,7 +250,7 @@ static void * unflatten_dt_node(void *blob,
has_name = 1;
pp = unflatten_dt_alloc(&mem, sizeof(struct property),
__alignof__(struct property));
- if (allnextpp) {
+ if (!dryrun) {
/* We accept flattened tree phandles either in
* ePAPR-style "phandle" properties, or the
* legacy "linux,phandle" properties. If both
@@ -296,7 +292,7 @@ static void * unflatten_dt_node(void *blob,
sz = (pa - ps) + 1;
pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
__alignof__(struct property));
- if (allnextpp) {
+ if (!dryrun) {
pp->name = "name";
pp->length = sz;
pp->value = pp + 1;
@@ -308,7 +304,7 @@ static void * unflatten_dt_node(void *blob,
(char *)pp->value);
}
}
- if (allnextpp) {
+ if (!dryrun) {
*prev_pp = NULL;
np->name = of_get_property(np, "name", NULL);
np->type = of_get_property(np, "device_type", NULL);
@@ -324,12 +320,30 @@ static void * unflatten_dt_node(void *blob,
if (depth < 0)
depth = 0;
while (*poffset > 0 && depth > old_depth)
- mem = unflatten_dt_node(blob, mem, poffset, np, allnextpp,
- fpsize);
+ mem = unflatten_dt_node(blob, mem, poffset, np, NULL,
+ fpsize, dryrun);
if (*poffset < 0 && *poffset != -FDT_ERR_NOTFOUND)
pr_err("unflatten: error %d processing FDT\n", *poffset);
+ /*
+ * Reverse the child list. Some drivers assumes node order matches .dts
+ * node order
+ */
+ if (!dryrun && np->child) {
+ struct device_node *child = np->child;
+ np->child = NULL;
+ while (child) {
+ struct device_node *next = child->sibling;
+ child->sibling = np->child;
+ np->child = child;
+ child = next;
+ }
+ }
+
+ if (nodepp)
+ *nodepp = np;
+
return mem;
}
@@ -352,7 +366,6 @@ static void __unflatten_device_tree(void *blob,
unsigned long size;
int start;
void *mem;
- struct device_node **allnextp = mynodes;
pr_debug(" -> unflatten_device_tree()\n");
@@ -373,7 +386,7 @@ static void __unflatten_device_tree(void *blob,
/* First pass, scan for size */
start = 0;
- size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0);
+ size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0, true);
size = ALIGN(size, 4);
pr_debug(" size is %lx, allocating...\n", size);
@@ -388,11 +401,10 @@ static void __unflatten_device_tree(void *blob,
/* Second pass, do actual unflattening */
start = 0;
- unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0);
+ unflatten_dt_node(blob, mem, &start, NULL, mynodes, 0, false);
if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warning("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
- *allnextp = NULL;
pr_debug(" <- unflatten_device_tree()\n");
}
@@ -425,6 +437,8 @@ void *initial_boot_params;
#ifdef CONFIG_OF_EARLY_FLATTREE
+static u32 of_fdt_crc32;
+
/**
* res_mem_reserve_reg() - reserve all memory described in 'reg' property
*/
@@ -930,6 +944,11 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
const u64 phys_offset = __pa(PAGE_OFFSET);
if (!PAGE_ALIGNED(base)) {
+ if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
+ pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
+ return;
+ }
size -= PAGE_SIZE - (base & ~PAGE_MASK);
base = PAGE_ALIGN(base);
}
@@ -992,15 +1011,14 @@ bool __init early_init_dt_verify(void *params)
if (!params)
return false;
- /* Setup flat device-tree pointer */
- initial_boot_params = params;
-
/* check device tree validity */
- if (fdt_check_header(params)) {
- initial_boot_params = NULL;
+ if (fdt_check_header(params))
return false;
- }
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+ of_fdt_crc32 = crc32_be(~0, initial_boot_params,
+ fdt_totalsize(initial_boot_params));
return true;
}
@@ -1039,7 +1057,7 @@ bool __init early_init_dt_scan(void *params)
*/
void __init unflatten_device_tree(void)
{
- __unflatten_device_tree(initial_boot_params, &of_allnodes,
+ __unflatten_device_tree(initial_boot_params, &of_root,
early_init_dt_alloc_memory_arch);
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
@@ -1078,27 +1096,32 @@ void __init unflatten_and_copy_device_tree(void)
unflatten_device_tree();
}
-#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
-static struct debugfs_blob_wrapper flat_dt_blob;
-
-static int __init of_flat_dt_debugfs_export_fdt(void)
+#ifdef CONFIG_SYSFS
+static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
- struct dentry *d = debugfs_create_dir("device-tree", NULL);
-
- if (!d)
- return -ENOENT;
+ memcpy(buf, initial_boot_params + off, count);
+ return count;
+}
- flat_dt_blob.data = initial_boot_params;
- flat_dt_blob.size = fdt_totalsize(initial_boot_params);
+static int __init of_fdt_raw_init(void)
+{
+ static struct bin_attribute of_fdt_raw_attr =
+ __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
- d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
- d, &flat_dt_blob);
- if (!d)
- return -ENOENT;
+ if (!initial_boot_params)
+ return 0;
- return 0;
+ if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
+ fdt_totalsize(initial_boot_params))) {
+ pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
+ return 0;
+ }
+ of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
+ return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
}
-module_init(of_flat_dt_debugfs_export_fdt);
+late_initcall(of_fdt_raw_init);
#endif
#endif /* CONFIG_OF_EARLY_FLATTREE */
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 1471e0a223a5..0d7765807f49 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -405,6 +405,7 @@ int of_irq_get(struct device_node *dev, int index)
return irq_create_of_mapping(&oirq);
}
+EXPORT_SYMBOL_GPL(of_irq_get);
/**
* of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 858e0a5d9a11..8e882e706cd8 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -61,7 +61,7 @@ static inline int of_property_notify(int action, struct device_node *np,
* own the devtree lock or work on detached trees only.
*/
struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags);
-struct device_node *__of_node_alloc(const char *full_name, gfp_t allocflags);
+__printf(2, 3) struct device_node *__of_node_dup(const struct device_node *np, const char *fmt, ...);
extern const void *__of_get_property(const struct device_node *np,
const char *name, int *lenp);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
new file mode 100644
index 000000000000..ea63fbd228ed
--- /dev/null
+++ b/drivers/of/overlay.c
@@ -0,0 +1,562 @@
+/*
+ * Functions for working with device tree overlays
+ *
+ * Copyright (C) 2012 Pantelis Antoniou <panto@antoniou-consulting.com>
+ * Copyright (C) 2012 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+#undef DEBUG
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "of_private.h"
+
+/**
+ * struct of_overlay_info - Holds a single overlay info
+ * @target: target of the overlay operation
+ * @overlay: pointer to the overlay contents node
+ *
+ * Holds a single overlay state, including all the overlay logs &
+ * records.
+ */
+struct of_overlay_info {
+ struct device_node *target;
+ struct device_node *overlay;
+};
+
+/**
+ * struct of_overlay - Holds a complete overlay transaction
+ * @node: List on which we are located
+ * @count: Count of ovinfo structures
+ * @ovinfo_tab: Overlay info table (count sized)
+ * @cset: Changeset to be used
+ *
+ * Holds a complete overlay transaction
+ */
+struct of_overlay {
+ int id;
+ struct list_head node;
+ int count;
+ struct of_overlay_info *ovinfo_tab;
+ struct of_changeset cset;
+};
+
+static int of_overlay_apply_one(struct of_overlay *ov,
+ struct device_node *target, const struct device_node *overlay);
+
+static int of_overlay_apply_single_property(struct of_overlay *ov,
+ struct device_node *target, struct property *prop)
+{
+ struct property *propn, *tprop;
+
+ /* NOTE: Multiple changes of single properties not supported */
+ tprop = of_find_property(target, prop->name, NULL);
+
+ /* special properties are not meant to be updated (silent NOP) */
+ if (of_prop_cmp(prop->name, "name") == 0 ||
+ of_prop_cmp(prop->name, "phandle") == 0 ||
+ of_prop_cmp(prop->name, "linux,phandle") == 0)
+ return 0;
+
+ propn = __of_prop_dup(prop, GFP_KERNEL);
+ if (propn == NULL)
+ return -ENOMEM;
+
+ /* not found? add */
+ if (tprop == NULL)
+ return of_changeset_add_property(&ov->cset, target, propn);
+
+ /* found? update */
+ return of_changeset_update_property(&ov->cset, target, propn);
+}
+
+static int of_overlay_apply_single_device_node(struct of_overlay *ov,
+ struct device_node *target, struct device_node *child)
+{
+ const char *cname;
+ struct device_node *tchild, *grandchild;
+ int ret = 0;
+
+ cname = kbasename(child->full_name);
+ if (cname == NULL)
+ return -ENOMEM;
+
+ /* NOTE: Multiple mods of created nodes not supported */
+ tchild = of_get_child_by_name(target, cname);
+ if (tchild != NULL) {
+ /* apply overlay recursively */
+ ret = of_overlay_apply_one(ov, tchild, child);
+ of_node_put(tchild);
+ } else {
+ /* create empty tree as a target */
+ tchild = __of_node_dup(child, "%s/%s", target->full_name, cname);
+ if (!tchild)
+ return -ENOMEM;
+
+ /* point to parent */
+ tchild->parent = target;
+
+ ret = of_changeset_attach_node(&ov->cset, tchild);
+ if (ret)
+ return ret;
+
+ ret = of_overlay_apply_one(ov, tchild, child);
+ if (ret)
+ return ret;
+
+ /* The properties are already copied, now do the child nodes */
+ for_each_child_of_node(child, grandchild) {
+ ret = of_overlay_apply_single_device_node(ov, tchild, grandchild);
+ if (ret) {
+ pr_err("%s: Failed to apply single node @%s/%s\n",
+ __func__, tchild->full_name,
+ grandchild->name);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Apply a single overlay node recursively.
+ *
+ * Note that the in case of an error the target node is left
+ * in a inconsistent state. Error recovery should be performed
+ * by using the changeset.
+ */
+static int of_overlay_apply_one(struct of_overlay *ov,
+ struct device_node *target, const struct device_node *overlay)
+{
+ struct device_node *child;
+ struct property *prop;
+ int ret;
+
+ for_each_property_of_node(overlay, prop) {
+ ret = of_overlay_apply_single_property(ov, target, prop);
+ if (ret) {
+ pr_err("%s: Failed to apply prop @%s/%s\n",
+ __func__, target->full_name, prop->name);
+ return ret;
+ }
+ }
+
+ for_each_child_of_node(overlay, child) {
+ ret = of_overlay_apply_single_device_node(ov, target, child);
+ if (ret != 0) {
+ pr_err("%s: Failed to apply single node @%s/%s\n",
+ __func__, target->full_name,
+ child->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * of_overlay_apply() - Apply @count overlays pointed at by @ovinfo_tab
+ * @ov: Overlay to apply
+ *
+ * Applies the overlays given, while handling all error conditions
+ * appropriately. Either the operation succeeds, or if it fails the
+ * live tree is reverted to the state before the attempt.
+ * Returns 0, or an error if the overlay attempt failed.
+ */
+static int of_overlay_apply(struct of_overlay *ov)
+{
+ int i, err;
+
+ /* first we apply the overlays atomically */
+ for (i = 0; i < ov->count; i++) {
+ struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+
+ err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay);
+ if (err != 0) {
+ pr_err("%s: overlay failed '%s'\n",
+ __func__, ovinfo->target->full_name);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Find the target node using a number of different strategies
+ * in order of preference
+ *
+ * "target" property containing the phandle of the target
+ * "target-path" property containing the path of the target
+ */
+static struct device_node *find_target_node(struct device_node *info_node)
+{
+ const char *path;
+ u32 val;
+ int ret;
+
+ /* first try to go by using the target as a phandle */
+ ret = of_property_read_u32(info_node, "target", &val);
+ if (ret == 0)
+ return of_find_node_by_phandle(val);
+
+ /* now try to locate by path */
+ ret = of_property_read_string(info_node, "target-path", &path);
+ if (ret == 0)
+ return of_find_node_by_path(path);
+
+ pr_err("%s: Failed to find target for node %p (%s)\n", __func__,
+ info_node, info_node->name);
+
+ return NULL;
+}
+
+/**
+ * of_fill_overlay_info() - Fill an overlay info structure
+ * @ov Overlay to fill
+ * @info_node: Device node containing the overlay
+ * @ovinfo: Pointer to the overlay info structure to fill
+ *
+ * Fills an overlay info structure with the overlay information
+ * from a device node. This device node must have a target property
+ * which contains a phandle of the overlay target node, and an
+ * __overlay__ child node which has the overlay contents.
+ * Both ovinfo->target & ovinfo->overlay have their references taken.
+ *
+ * Returns 0 on success, or a negative error value.
+ */
+static int of_fill_overlay_info(struct of_overlay *ov,
+ struct device_node *info_node, struct of_overlay_info *ovinfo)
+{
+ ovinfo->overlay = of_get_child_by_name(info_node, "__overlay__");
+ if (ovinfo->overlay == NULL)
+ goto err_fail;
+
+ ovinfo->target = find_target_node(info_node);
+ if (ovinfo->target == NULL)
+ goto err_fail;
+
+ return 0;
+
+err_fail:
+ of_node_put(ovinfo->target);
+ of_node_put(ovinfo->overlay);
+
+ memset(ovinfo, 0, sizeof(*ovinfo));
+ return -EINVAL;
+}
+
+/**
+ * of_build_overlay_info() - Build an overlay info array
+ * @ov Overlay to build
+ * @tree: Device node containing all the overlays
+ *
+ * Helper function that given a tree containing overlay information,
+ * allocates and builds an overlay info array containing it, ready
+ * for use using of_overlay_apply.
+ *
+ * Returns 0 on success with the @cntp @ovinfop pointers valid,
+ * while on error a negative error value is returned.
+ */
+static int of_build_overlay_info(struct of_overlay *ov,
+ struct device_node *tree)
+{
+ struct device_node *node;
+ struct of_overlay_info *ovinfo;
+ int cnt, err;
+
+ /* worst case; every child is a node */
+ cnt = 0;
+ for_each_child_of_node(tree, node)
+ cnt++;
+
+ ovinfo = kcalloc(cnt, sizeof(*ovinfo), GFP_KERNEL);
+ if (ovinfo == NULL)
+ return -ENOMEM;
+
+ cnt = 0;
+ for_each_child_of_node(tree, node) {
+ memset(&ovinfo[cnt], 0, sizeof(*ovinfo));
+ err = of_fill_overlay_info(ov, node, &ovinfo[cnt]);
+ if (err == 0)
+ cnt++;
+ }
+
+ /* if nothing filled, return error */
+ if (cnt == 0) {
+ kfree(ovinfo);
+ return -ENODEV;
+ }
+
+ ov->count = cnt;
+ ov->ovinfo_tab = ovinfo;
+
+ return 0;
+}
+
+/**
+ * of_free_overlay_info() - Free an overlay info array
+ * @ov Overlay to free the overlay info from
+ * @ovinfo_tab: Array of overlay_info's to free
+ *
+ * Releases the memory of a previously allocated ovinfo array
+ * by of_build_overlay_info.
+ * Returns 0, or an error if the arguments are bogus.
+ */
+static int of_free_overlay_info(struct of_overlay *ov)
+{
+ struct of_overlay_info *ovinfo;
+ int i;
+
+ /* do it in reverse */
+ for (i = ov->count - 1; i >= 0; i--) {
+ ovinfo = &ov->ovinfo_tab[i];
+
+ of_node_put(ovinfo->target);
+ of_node_put(ovinfo->overlay);
+ }
+ kfree(ov->ovinfo_tab);
+
+ return 0;
+}
+
+static LIST_HEAD(ov_list);
+static DEFINE_IDR(ov_idr);
+
+/**
+ * of_overlay_create() - Create and apply an overlay
+ * @tree: Device node containing all the overlays
+ *
+ * Creates and applies an overlay while also keeping track
+ * of the overlay in a list. This list can be used to prevent
+ * illegal overlay removals.
+ *
+ * Returns the id of the created overlay, or an negative error number
+ */
+int of_overlay_create(struct device_node *tree)
+{
+ struct of_overlay *ov;
+ int err, id;
+
+ /* allocate the overlay structure */
+ ov = kzalloc(sizeof(*ov), GFP_KERNEL);
+ if (ov == NULL)
+ return -ENOMEM;
+ ov->id = -1;
+
+ INIT_LIST_HEAD(&ov->node);
+
+ of_changeset_init(&ov->cset);
+
+ mutex_lock(&of_mutex);
+
+ id = idr_alloc(&ov_idr, ov, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ pr_err("%s: idr_alloc() failed for tree@%s\n",
+ __func__, tree->full_name);
+ err = id;
+ goto err_destroy_trans;
+ }
+ ov->id = id;
+
+ /* build the overlay info structures */
+ err = of_build_overlay_info(ov, tree);
+ if (err) {
+ pr_err("%s: of_build_overlay_info() failed for tree@%s\n",
+ __func__, tree->full_name);
+ goto err_free_idr;
+ }
+
+ /* apply the overlay */
+ err = of_overlay_apply(ov);
+ if (err) {
+ pr_err("%s: of_overlay_apply() failed for tree@%s\n",
+ __func__, tree->full_name);
+ goto err_abort_trans;
+ }
+
+ /* apply the changeset */
+ err = of_changeset_apply(&ov->cset);
+ if (err) {
+ pr_err("%s: of_changeset_apply() failed for tree@%s\n",
+ __func__, tree->full_name);
+ goto err_revert_overlay;
+ }
+
+ /* add to the tail of the overlay list */
+ list_add_tail(&ov->node, &ov_list);
+
+ mutex_unlock(&of_mutex);
+
+ return id;
+
+err_revert_overlay:
+err_abort_trans:
+ of_free_overlay_info(ov);
+err_free_idr:
+ idr_remove(&ov_idr, ov->id);
+err_destroy_trans:
+ of_changeset_destroy(&ov->cset);
+ kfree(ov);
+ mutex_unlock(&of_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(of_overlay_create);
+
+/* check whether the given node, lies under the given tree */
+static int overlay_subtree_check(struct device_node *tree,
+ struct device_node *dn)
+{
+ struct device_node *child;
+
+ /* match? */
+ if (tree == dn)
+ return 1;
+
+ for_each_child_of_node(tree, child) {
+ if (overlay_subtree_check(child, dn))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* check whether this overlay is the topmost */
+static int overlay_is_topmost(struct of_overlay *ov, struct device_node *dn)
+{
+ struct of_overlay *ovt;
+ struct of_changeset_entry *ce;
+
+ list_for_each_entry_reverse(ovt, &ov_list, node) {
+ /* if we hit ourselves, we're done */
+ if (ovt == ov)
+ break;
+
+ /* check against each subtree affected by this overlay */
+ list_for_each_entry(ce, &ovt->cset.entries, node) {
+ if (overlay_subtree_check(ce->np, dn)) {
+ pr_err("%s: #%d clashes #%d @%s\n",
+ __func__, ov->id, ovt->id,
+ dn->full_name);
+ return 0;
+ }
+ }
+ }
+
+ /* overlay is topmost */
+ return 1;
+}
+
+/*
+ * We can safely remove the overlay only if it's the top-most one.
+ * Newly applied overlays are inserted at the tail of the overlay list,
+ * so a top most overlay is the one that is closest to the tail.
+ *
+ * The topmost check is done by exploiting this property. For each
+ * affected device node in the log list we check if this overlay is
+ * the one closest to the tail. If another overlay has affected this
+ * device node and is closest to the tail, then removal is not permited.
+ */
+static int overlay_removal_is_ok(struct of_overlay *ov)
+{
+ struct of_changeset_entry *ce;
+
+ list_for_each_entry(ce, &ov->cset.entries, node) {
+ if (!overlay_is_topmost(ov, ce->np)) {
+ pr_err("%s: overlay #%d is not topmost\n",
+ __func__, ov->id);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * of_overlay_destroy() - Removes an overlay
+ * @id: Overlay id number returned by a previous call to of_overlay_create
+ *
+ * Removes an overlay if it is permissible.
+ *
+ * Returns 0 on success, or an negative error number
+ */
+int of_overlay_destroy(int id)
+{
+ struct of_overlay *ov;
+ int err;
+
+ mutex_lock(&of_mutex);
+
+ ov = idr_find(&ov_idr, id);
+ if (ov == NULL) {
+ err = -ENODEV;
+ pr_err("%s: Could not find overlay #%d\n",
+ __func__, id);
+ goto out;
+ }
+
+ /* check whether the overlay is safe to remove */
+ if (!overlay_removal_is_ok(ov)) {
+ err = -EBUSY;
+ pr_err("%s: removal check failed for overlay #%d\n",
+ __func__, id);
+ goto out;
+ }
+
+
+ list_del(&ov->node);
+ of_changeset_revert(&ov->cset);
+ of_free_overlay_info(ov);
+ idr_remove(&ov_idr, id);
+ of_changeset_destroy(&ov->cset);
+ kfree(ov);
+
+ err = 0;
+
+out:
+ mutex_unlock(&of_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(of_overlay_destroy);
+
+/**
+ * of_overlay_destroy_all() - Removes all overlays from the system
+ *
+ * Removes all overlays from the system in the correct order.
+ *
+ * Returns 0 on success, or an negative error number
+ */
+int of_overlay_destroy_all(void)
+{
+ struct of_overlay *ov, *ovn;
+
+ mutex_lock(&of_mutex);
+
+ /* the tail of list is guaranteed to be safe to remove */
+ list_for_each_entry_safe_reverse(ov, ovn, &ov_list, node) {
+ list_del(&ov->node);
+ of_changeset_revert(&ov->cset);
+ of_free_overlay_info(ov);
+ idr_remove(&ov_idr, ov->id);
+ kfree(ov);
+ }
+
+ mutex_unlock(&of_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_overlay_destroy_all);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 36b4035881b0..d2acae825af9 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -25,8 +25,7 @@
static struct of_pdt_ops *of_pdt_prom_ops __initdata;
-void __initdata (*of_pdt_build_more)(struct device_node *dp,
- struct device_node ***nextp);
+void __initdata (*of_pdt_build_more)(struct device_node *dp);
#if defined(CONFIG_SPARC)
unsigned int of_pdt_unique_id __initdata;
@@ -192,8 +191,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
}
static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
- phandle node,
- struct device_node ***nextp)
+ phandle node)
{
struct device_node *ret = NULL, *prev_sibling = NULL;
struct device_node *dp;
@@ -210,16 +208,12 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
ret = dp;
prev_sibling = dp;
- *(*nextp) = dp;
- *nextp = &dp->allnext;
-
dp->full_name = of_pdt_build_full_name(dp);
- dp->child = of_pdt_build_tree(dp,
- of_pdt_prom_ops->getchild(node), nextp);
+ dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node));
if (of_pdt_build_more)
- of_pdt_build_more(dp, nextp);
+ of_pdt_build_more(dp);
node = of_pdt_prom_ops->getsibling(node);
}
@@ -234,20 +228,17 @@ static void * __init kernel_tree_alloc(u64 size, u64 align)
void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops)
{
- struct device_node **nextp;
-
BUG_ON(!ops);
of_pdt_prom_ops = ops;
- of_allnodes = of_pdt_create_node(root_node, NULL);
+ of_root = of_pdt_create_node(root_node, NULL);
#if defined(CONFIG_SPARC)
- of_allnodes->path_component_name = "";
+ of_root->path_component_name = "";
#endif
- of_allnodes->full_name = "/";
+ of_root->full_name = "/";
- nextp = &of_allnodes->allnext;
- of_allnodes->child = of_pdt_build_tree(of_allnodes,
- of_pdt_prom_ops->getchild(of_allnodes->phandle), &nextp);
+ of_root->child = of_pdt_build_tree(of_root,
+ of_pdt_prom_ops->getchild(of_root->phandle));
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
of_alias_scan(kernel_tree_alloc);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 3b64d0bf5bba..5b33c6a21807 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -138,7 +139,7 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
dev->dev.of_node = of_node_get(np);
- dev->dev.parent = parent;
+ dev->dev.parent = parent ? : &platform_bus;
if (bus_id)
dev_set_name(&dev->dev, "%s", bus_id);
@@ -164,6 +165,9 @@ static void of_dma_configure(struct device *dev)
{
u64 dma_addr, paddr, size;
int ret;
+ bool coherent;
+ unsigned long offset;
+ struct iommu_ops *iommu;
/*
* Set default dma-mask to 32 bit. Drivers are expected to setup
@@ -178,28 +182,30 @@ static void of_dma_configure(struct device *dev)
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
- /*
- * if dma-coherent property exist, call arch hook to setup
- * dma coherent operations.
- */
- if (of_dma_is_coherent(dev->of_node)) {
- set_arch_dma_coherent_ops(dev);
- dev_dbg(dev, "device is dma coherent\n");
- }
-
- /*
- * if dma-ranges property doesn't exist - just return else
- * setup the dma offset
- */
ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size);
if (ret < 0) {
- dev_dbg(dev, "no dma range information to setup\n");
- return;
+ dma_addr = offset = 0;
+ size = dev->coherent_dma_mask;
+ } else {
+ offset = PFN_DOWN(paddr - dma_addr);
+ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
}
+ dev->dma_pfn_offset = offset;
+
+ coherent = of_dma_is_coherent(dev->of_node);
+ dev_dbg(dev, "device is%sdma coherent\n",
+ coherent ? " " : " not ");
+
+ iommu = of_iommu_configure(dev);
+ dev_dbg(dev, "device is%sbehind an iommu\n",
+ iommu ? " " : " not ");
- /* DMA ranges found. Calculate and set dma_pfn_offset */
- dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr);
- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
+ arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+}
+
+static void of_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
}
/**
@@ -228,16 +234,12 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev)
goto err_clear_flag;
- of_dma_configure(&dev->dev);
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
-
- /* We do not fill the DMA ops for platform devices by default.
- * This is currently the responsibility of the platform code
- * to do such, possibly using a device notifier
- */
+ of_dma_configure(&dev->dev);
if (of_device_add(dev) != 0) {
+ of_dma_deconfigure(&dev->dev);
platform_device_put(dev);
goto err_clear_flag;
}
@@ -291,7 +293,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
/* setup generic device info */
dev->dev.of_node = of_node_get(node);
- dev->dev.parent = parent;
+ dev->dev.parent = parent ? : &platform_bus;
dev->dev.platform_data = platform_data;
if (bus_id)
dev_set_name(&dev->dev, "%s", bus_id);
@@ -500,6 +502,7 @@ int of_platform_populate(struct device_node *root,
if (rc)
break;
}
+ of_node_set_flag(root, OF_POPULATED_BUS);
of_node_put(root);
return rc;
@@ -542,8 +545,66 @@ static int of_platform_device_destroy(struct device *dev, void *data)
*/
void of_platform_depopulate(struct device *parent)
{
- device_for_each_child(parent, NULL, of_platform_device_destroy);
+ if (parent->of_node && of_node_check_flag(parent->of_node, OF_POPULATED_BUS)) {
+ device_for_each_child(parent, NULL, of_platform_device_destroy);
+ of_node_clear_flag(parent->of_node, OF_POPULATED_BUS);
+ }
}
EXPORT_SYMBOL_GPL(of_platform_depopulate);
+#ifdef CONFIG_OF_DYNAMIC
+static int of_platform_notify(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct platform_device *pdev_parent, *pdev;
+ bool children_left;
+
+ switch (of_reconfig_get_state_change(action, rd)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ /* verify that the parent is a bus */
+ if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
+ return NOTIFY_OK; /* not for us */
+
+ /* pdev_parent may be NULL when no bus platform device */
+ pdev_parent = of_find_device_by_node(rd->dn->parent);
+ pdev = of_platform_device_create(rd->dn, NULL,
+ pdev_parent ? &pdev_parent->dev : NULL);
+ of_dev_put(pdev_parent);
+
+ if (pdev == NULL) {
+ pr_err("%s: failed to create for '%s'\n",
+ __func__, rd->dn->full_name);
+ /* of_platform_device_create tosses the error code */
+ return notifier_from_errno(-EINVAL);
+ }
+ break;
+
+ case OF_RECONFIG_CHANGE_REMOVE:
+ /* find our device by node */
+ pdev = of_find_device_by_node(rd->dn);
+ if (pdev == NULL)
+ return NOTIFY_OK; /* no? not meant for us */
+
+ /* unregister takes one ref away */
+ of_platform_device_destroy(&pdev->dev, &children_left);
+
+ /* and put the reference of the find */
+ of_dev_put(pdev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block platform_of_notifier = {
+ .notifier_call = of_platform_notify,
+};
+
+void of_platform_register_reconfig_notifier(void)
+{
+ WARN_ON(of_reconfig_notifier_register(&platform_of_notifier));
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
#endif /* CONFIG_OF_ADDRESS */
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index aed7959f800d..640eb4cb46e3 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -111,7 +111,8 @@ static void __of_adjust_tree_phandles(struct device_node *node,
__of_adjust_tree_phandles(child, phandle_delta);
}
-static int __of_adjust_phandle_ref(struct device_node *node, struct property *rprop, int value, bool is_delta)
+static int __of_adjust_phandle_ref(struct device_node *node,
+ struct property *rprop, int value)
{
phandle phandle;
struct device_node *refnode;
@@ -181,7 +182,7 @@ static int __of_adjust_phandle_ref(struct device_node *node, struct property *rp
goto err_fail;
}
- phandle = is_delta ? be32_to_cpup(sprop->value + offset) + value : value;
+ phandle = value;
*(__be32 *)(sprop->value + offset) = cpu_to_be32(phandle);
}
@@ -190,36 +191,97 @@ err_fail:
return err;
}
+/* compare nodes taking into account that 'name' strips out the @ part */
+static int __of_node_name_cmp(const struct device_node *dn1,
+ const struct device_node *dn2)
+{
+ const char *n1 = strrchr(dn1->full_name, '/') ? : "/";
+ const char *n2 = strrchr(dn2->full_name, '/') ? : "/";
+
+ return of_node_cmp(n1, n2);
+}
+
/*
* Adjust the local phandle references by the given phandle delta.
- * Assumes the existances of a __local_fixups__ node at the root
- * of the tree. Does not take any devtree locks so make sure you
- * call this on a tree which is at the detached state.
+ * Assumes the existances of a __local_fixups__ node at the root.
+ * Assumes that __of_verify_tree_phandle_references has been called.
+ * Does not take any devtree locks so make sure you call this on a tree
+ * which is at the detached state.
*/
static int __of_adjust_tree_phandle_references(struct device_node *node,
- int phandle_delta)
+ struct device_node *target, int phandle_delta)
{
- struct device_node *child;
- struct property *rprop;
- int err;
-
- /* locate the symbols & fixups nodes on resolve */
- for_each_child_of_node(node, child)
- if (of_node_cmp(child->name, "__local_fixups__") == 0)
- break;
+ struct device_node *child, *childtarget;
+ struct property *rprop, *sprop;
+ int err, i, count;
+ unsigned int off;
+ phandle phandle;
- /* no local fixups */
- if (!child)
+ if (node == NULL)
return 0;
- /* find the local fixups property */
- for_each_property_of_node(child, rprop) {
+ for_each_property_of_node(node, rprop) {
+
/* skip properties added automatically */
- if (of_prop_cmp(rprop->name, "name") == 0)
+ if (of_prop_cmp(rprop->name, "name") == 0 ||
+ of_prop_cmp(rprop->name, "phandle") == 0 ||
+ of_prop_cmp(rprop->name, "linux,phandle") == 0)
continue;
- err = __of_adjust_phandle_ref(node, rprop, phandle_delta, true);
- if (err)
+ if ((rprop->length % 4) != 0 || rprop->length == 0) {
+ pr_err("%s: Illegal property (size) '%s' @%s\n",
+ __func__, rprop->name, node->full_name);
+ return -EINVAL;
+ }
+ count = rprop->length / sizeof(__be32);
+
+ /* now find the target property */
+ for_each_property_of_node(target, sprop) {
+ if (of_prop_cmp(sprop->name, rprop->name) == 0)
+ break;
+ }
+
+ if (sprop == NULL) {
+ pr_err("%s: Could not find target property '%s' @%s\n",
+ __func__, rprop->name, node->full_name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ off = be32_to_cpu(((__be32 *)rprop->value)[i]);
+ /* make sure the offset doesn't overstep (even wrap) */
+ if (off >= sprop->length ||
+ (off + 4) > sprop->length) {
+ pr_err("%s: Illegal property '%s' @%s\n",
+ __func__, rprop->name,
+ node->full_name);
+ return -EINVAL;
+ }
+
+ if (phandle_delta) {
+ /* adjust */
+ phandle = be32_to_cpu(*(__be32 *)(sprop->value + off));
+ phandle += phandle_delta;
+ *(__be32 *)(sprop->value + off) = cpu_to_be32(phandle);
+ }
+ }
+ }
+
+ for_each_child_of_node(node, child) {
+
+ for_each_child_of_node(target, childtarget)
+ if (__of_node_name_cmp(child, childtarget) == 0)
+ break;
+
+ if (!childtarget) {
+ pr_err("%s: Could not find target child '%s' @%s\n",
+ __func__, child->name, node->full_name);
+ return -EINVAL;
+ }
+
+ err = __of_adjust_tree_phandle_references(child, childtarget,
+ phandle_delta);
+ if (err != 0)
return err;
}
@@ -241,7 +303,7 @@ static int __of_adjust_tree_phandle_references(struct device_node *node,
*/
int of_resolve_phandles(struct device_node *resolve)
{
- struct device_node *child, *refnode;
+ struct device_node *child, *childroot, *refnode;
struct device_node *root_sym, *resolve_sym, *resolve_fix;
struct property *rprop;
const char *refpath;
@@ -255,9 +317,23 @@ int of_resolve_phandles(struct device_node *resolve)
/* first we need to adjust the phandles */
phandle_delta = of_get_tree_max_phandle() + 1;
__of_adjust_tree_phandles(resolve, phandle_delta);
- err = __of_adjust_tree_phandle_references(resolve, phandle_delta);
- if (err != 0)
- return err;
+
+ /* locate the local fixups */
+ childroot = NULL;
+ for_each_child_of_node(resolve, childroot)
+ if (of_node_cmp(childroot->name, "__local_fixups__") == 0)
+ break;
+
+ if (childroot != NULL) {
+ /* resolve root is guaranteed to be the '/' */
+ err = __of_adjust_tree_phandle_references(childroot,
+ resolve, 0);
+ if (err != 0)
+ return err;
+
+ BUG_ON(__of_adjust_tree_phandle_references(childroot,
+ resolve, phandle_delta));
+ }
root_sym = NULL;
resolve_sym = NULL;
@@ -322,7 +398,7 @@ int of_resolve_phandles(struct device_node *resolve)
pr_debug("%s: %s phandle is 0x%08x\n",
__func__, rprop->name, phandle);
- err = __of_adjust_phandle_ref(resolve, rprop, phandle, false);
+ err = __of_adjust_phandle_ref(resolve, rprop, phandle);
if (err)
break;
}
diff --git a/drivers/of/testcase-data/testcases.dts b/drivers/of/testcase-data/testcases.dts
deleted file mode 100644
index 6994e15c24bf..000000000000
--- a/drivers/of/testcase-data/testcases.dts
+++ /dev/null
@@ -1,50 +0,0 @@
-/dts-v1/;
-/ {
- testcase-data {
- changeset {
- prop-update = "hello";
- prop-remove = "world";
- node-remove {
- };
- };
- };
-};
-#include "tests-phandle.dtsi"
-#include "tests-interrupts.dtsi"
-#include "tests-match.dtsi"
-#include "tests-platform.dtsi"
-
-/*
- * phandle fixup data - generated by dtc patches that aren't upstream.
- * This data must be regenerated whenever phandle references are modified in
- * the testdata tree.
- *
- * The format of this data may be subject to change. For the time being consider
- * this a kernel-internal data format.
- */
-/ { __local_fixups__ {
- fixup = "/testcase-data/testcase-device2:interrupt-parent:0",
- "/testcase-data/testcase-device1:interrupt-parent:0",
- "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:60",
- "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:52",
- "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:44",
- "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:36",
- "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:24",
- "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:8",
- "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:0",
- "/testcase-data/interrupts/interrupts1:interrupt-parent:0",
- "/testcase-data/interrupts/interrupts0:interrupt-parent:0",
- "/testcase-data/interrupts/intmap1:interrupt-map:12",
- "/testcase-data/interrupts/intmap0:interrupt-map:52",
- "/testcase-data/interrupts/intmap0:interrupt-map:36",
- "/testcase-data/interrupts/intmap0:interrupt-map:16",
- "/testcase-data/interrupts/intmap0:interrupt-map:4",
- "/testcase-data/phandle-tests/consumer-a:phandle-list-bad-args:12",
- "/testcase-data/phandle-tests/consumer-a:phandle-list-bad-args:0",
- "/testcase-data/phandle-tests/consumer-a:phandle-list:56",
- "/testcase-data/phandle-tests/consumer-a:phandle-list:52",
- "/testcase-data/phandle-tests/consumer-a:phandle-list:40",
- "/testcase-data/phandle-tests/consumer-a:phandle-list:24",
- "/testcase-data/phandle-tests/consumer-a:phandle-list:8",
- "/testcase-data/phandle-tests/consumer-a:phandle-list:0";
-}; };
diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts
new file mode 100644
index 000000000000..12f7c3d649c8
--- /dev/null
+++ b/drivers/of/unittest-data/testcases.dts
@@ -0,0 +1,79 @@
+/dts-v1/;
+/ {
+ testcase-data {
+ changeset {
+ prop-update = "hello";
+ prop-remove = "world";
+ node-remove {
+ };
+ };
+ };
+};
+#include "tests-phandle.dtsi"
+#include "tests-interrupts.dtsi"
+#include "tests-match.dtsi"
+#include "tests-platform.dtsi"
+#include "tests-overlay.dtsi"
+
+/*
+ * phandle fixup data - generated by dtc patches that aren't upstream.
+ * This data must be regenerated whenever phandle references are modified in
+ * the testdata tree.
+ *
+ * The format of this data may be subject to change. For the time being consider
+ * this a kernel-internal data format.
+ */
+/ { __local_fixups__ {
+ testcase-data {
+ phandle-tests {
+ consumer-a {
+ phandle-list = <0x00000000 0x00000008
+ 0x00000018 0x00000028
+ 0x00000034 0x00000038>;
+ phandle-list-bad-args = <0x00000000 0x0000000c>;
+ };
+ };
+ interrupts {
+ intmap0 {
+ interrupt-map = <0x00000004 0x00000010
+ 0x00000024 0x00000034>;
+ };
+ intmap1 {
+ interrupt-map = <0x0000000c>;
+ };
+ interrupts0 {
+ interrupt-parent = <0x00000000>;
+ };
+ interrupts1 {
+ interrupt-parent = <0x00000000>;
+ };
+ interrupts-extended0 {
+ interrupts-extended = <0x00000000 0x00000008
+ 0x00000018 0x00000024
+ 0x0000002c 0x00000034
+ 0x0000003c>;
+ };
+ };
+ testcase-device1 {
+ interrupt-parent = <0x00000000>;
+ };
+ testcase-device2 {
+ interrupt-parent = <0x00000000>;
+ };
+ overlay2 {
+ fragment@0 {
+ target = <0x00000000>;
+ };
+ };
+ overlay3 {
+ fragment@0 {
+ target = <0x00000000>;
+ };
+ };
+ overlay4 {
+ fragment@0 {
+ target = <0x00000000>;
+ };
+ };
+ };
+}; };
diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/unittest-data/tests-interrupts.dtsi
index da4695f60351..da4695f60351 100644
--- a/drivers/of/testcase-data/tests-interrupts.dtsi
+++ b/drivers/of/unittest-data/tests-interrupts.dtsi
diff --git a/drivers/of/testcase-data/tests-match.dtsi b/drivers/of/unittest-data/tests-match.dtsi
index c9e541129534..c9e541129534 100644
--- a/drivers/of/testcase-data/tests-match.dtsi
+++ b/drivers/of/unittest-data/tests-match.dtsi
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
new file mode 100644
index 000000000000..75976da22b2e
--- /dev/null
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -0,0 +1,180 @@
+
+/ {
+ testcase-data {
+ overlay-node {
+
+ /* test bus */
+ selftestbus: test-bus {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ selftest100: test-selftest100 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <100>;
+ };
+
+ selftest101: test-selftest101 {
+ compatible = "selftest";
+ status = "disabled";
+ reg = <101>;
+ };
+
+ selftest0: test-selftest0 {
+ compatible = "selftest";
+ status = "disabled";
+ reg = <0>;
+ };
+
+ selftest1: test-selftest1 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ selftest2: test-selftest2 {
+ compatible = "selftest";
+ status = "disabled";
+ reg = <2>;
+ };
+
+ selftest3: test-selftest3 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <3>;
+ };
+
+ selftest5: test-selftest5 {
+ compatible = "selftest";
+ status = "disabled";
+ reg = <5>;
+ };
+
+ selftest6: test-selftest6 {
+ compatible = "selftest";
+ status = "disabled";
+ reg = <6>;
+ };
+
+ selftest7: test-selftest7 {
+ compatible = "selftest";
+ status = "disabled";
+ reg = <7>;
+ };
+
+ selftest8: test-selftest8 {
+ compatible = "selftest";
+ status = "disabled";
+ reg = <8>;
+ };
+ };
+ };
+
+ /* test enable using absolute target path */
+ overlay0 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/test-selftest0";
+ __overlay__ {
+ status = "okay";
+ };
+ };
+ };
+
+ /* test disable using absolute target path */
+ overlay1 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/test-selftest1";
+ __overlay__ {
+ status = "disabled";
+ };
+ };
+ };
+
+ /* test enable using label */
+ overlay2 {
+ fragment@0 {
+ target = <&selftest2>;
+ __overlay__ {
+ status = "okay";
+ };
+ };
+ };
+
+ /* test disable using label */
+ overlay3 {
+ fragment@0 {
+ target = <&selftest3>;
+ __overlay__ {
+ status = "disabled";
+ };
+ };
+ };
+
+ /* test insertion of a full node */
+ overlay4 {
+ fragment@0 {
+ target = <&selftestbus>;
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest4 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <4>;
+ };
+ };
+ };
+ };
+
+ /* test overlay apply revert */
+ overlay5 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/test-selftest5";
+ __overlay__ {
+ status = "okay";
+ };
+ };
+ };
+
+ /* test overlays application and removal in sequence */
+ overlay6 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/test-selftest6";
+ __overlay__ {
+ status = "okay";
+ };
+ };
+ };
+ overlay7 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/test-selftest7";
+ __overlay__ {
+ status = "okay";
+ };
+ };
+ };
+
+ /* test overlays application and removal in bad sequence */
+ overlay8 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/test-selftest8";
+ __overlay__ {
+ status = "okay";
+ };
+ };
+ };
+ overlay9 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus/test-selftest8";
+ __overlay__ {
+ property-foo = "bar";
+ };
+ };
+ };
+
+ };
+};
diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi
index 5b1527e8a7fb..5b1527e8a7fb 100644
--- a/drivers/of/testcase-data/tests-phandle.dtsi
+++ b/drivers/of/unittest-data/tests-phandle.dtsi
diff --git a/drivers/of/testcase-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index eb20eeb2b062..eb20eeb2b062 100644
--- a/drivers/of/testcase-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
diff --git a/drivers/of/selftest.c b/drivers/of/unittest.c
index e2d79afa9dc6..844838e11ef1 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/unittest.c
@@ -17,6 +17,8 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include "of_private.h"
@@ -30,19 +32,22 @@ static struct device_node *nodes[NO_OF_NODES];
static int last_node_index;
static bool selftest_live_tree;
-#define selftest(result, fmt, ...) { \
- if (!(result)) { \
+#define selftest(result, fmt, ...) ({ \
+ bool failed = !(result); \
+ if (failed) { \
selftest_results.failed++; \
pr_err("FAIL %s():%i " fmt, __func__, __LINE__, ##__VA_ARGS__); \
} else { \
selftest_results.passed++; \
pr_debug("pass %s():%i\n", __func__, __LINE__); \
} \
-}
+ failed; \
+})
static void __init of_selftest_find_node_by_name(void)
{
struct device_node *np;
+ const char *options;
np = of_find_node_by_path("/testcase-data");
selftest(np && !strcmp("/testcase-data", np->full_name),
@@ -83,6 +88,35 @@ static void __init of_selftest_find_node_by_name(void)
np = of_find_node_by_path("testcase-alias/missing-path");
selftest(!np, "non-existent alias with relative path returned node %s\n", np->full_name);
of_node_put(np);
+
+ np = of_find_node_opts_by_path("/testcase-data:testoption", &options);
+ selftest(np && !strcmp("testoption", options),
+ "option path test failed\n");
+ of_node_put(np);
+
+ np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
+ selftest(np, "NULL option path test failed\n");
+ of_node_put(np);
+
+ np = of_find_node_opts_by_path("testcase-alias:testaliasoption",
+ &options);
+ selftest(np && !strcmp("testaliasoption", options),
+ "option alias path test failed\n");
+ of_node_put(np);
+
+ np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
+ selftest(np, "NULL option alias path test failed\n");
+ of_node_put(np);
+
+ options = "testoption";
+ np = of_find_node_opts_by_path("testcase-alias", &options);
+ selftest(np && !options, "option clearing test failed\n");
+ of_node_put(np);
+
+ options = "testoption";
+ np = of_find_node_opts_by_path("/", &options);
+ selftest(np && !options, "option clearing root node test failed\n");
+ of_node_put(np);
}
static void __init of_selftest_dynamic(void)
@@ -148,7 +182,7 @@ static void __init of_selftest_dynamic(void)
static int __init of_selftest_check_node_linkage(struct device_node *np)
{
- struct device_node *child, *allnext_index = np;
+ struct device_node *child;
int count = 0, rc;
for_each_child_of_node(np, child) {
@@ -158,14 +192,6 @@ static int __init of_selftest_check_node_linkage(struct device_node *np)
return -EINVAL;
}
- while (allnext_index && allnext_index != child)
- allnext_index = allnext_index->allnext;
- if (allnext_index != child) {
- pr_err("Node %s is ordered differently in sibling and allnode lists\n",
- child->name);
- return -EINVAL;
- }
-
rc = of_selftest_check_node_linkage(child);
if (rc < 0)
return rc;
@@ -180,12 +206,12 @@ static void __init of_selftest_check_tree_linkage(void)
struct device_node *np;
int allnode_count = 0, child_count;
- if (!of_allnodes)
+ if (!of_root)
return;
for_each_of_allnodes(np)
allnode_count++;
- child_count = of_selftest_check_node_linkage(of_allnodes);
+ child_count = of_selftest_check_node_linkage(of_root);
selftest(child_count > 0, "Device node data structure is corrupted\n");
selftest(child_count == allnode_count, "allnodes list size (%i) doesn't match"
@@ -451,15 +477,15 @@ static void __init of_selftest_changeset(void)
struct property *ppadd, padd = { .name = "prop-add", .length = 0, .value = "" };
struct property *ppupdate, pupdate = { .name = "prop-update", .length = 5, .value = "abcd" };
struct property *ppremove;
- struct device_node *n1, *n2, *n21, *nremove, *parent;
+ struct device_node *n1, *n2, *n21, *nremove, *parent, *np;
struct of_changeset chgset;
of_changeset_init(&chgset);
- n1 = __of_node_alloc("/testcase-data/changeset/n1", GFP_KERNEL);
+ n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
selftest(n1, "testcase setup failure\n");
- n2 = __of_node_alloc("/testcase-data/changeset/n2", GFP_KERNEL);
+ n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
selftest(n2, "testcase setup failure\n");
- n21 = __of_node_alloc("/testcase-data/changeset/n2/n21", GFP_KERNEL);
+ n21 = __of_node_dup(NULL, "%s/%s", "/testcase-data/changeset/n2", "n21");
selftest(n21, "testcase setup failure %p\n", n21);
nremove = of_find_node_by_path("/testcase-data/changeset/node-remove");
selftest(nremove, "testcase setup failure\n");
@@ -487,6 +513,11 @@ static void __init of_selftest_changeset(void)
selftest(!of_changeset_apply(&chgset), "apply failed\n");
mutex_unlock(&of_mutex);
+ /* Make sure node names are constructed correctly */
+ selftest((np = of_find_node_by_path("/testcase-data/changeset/n2/n21")),
+ "'%s' not added\n", n21->full_name);
+ of_node_put(np);
+
mutex_lock(&of_mutex);
selftest(!of_changeset_revert(&chgset), "revert failed\n");
mutex_unlock(&of_mutex);
@@ -702,10 +733,13 @@ static void __init of_selftest_match_node(void)
}
}
+struct device test_bus = {
+ .init_name = "unittest-bus",
+};
static void __init of_selftest_platform_populate(void)
{
- int irq;
- struct device_node *np, *child;
+ int irq, rc;
+ struct device_node *np, *child, *grandchild;
struct platform_device *pdev;
struct of_device_id match[] = {
{ .compatible = "test-device", },
@@ -730,20 +764,32 @@ static void __init of_selftest_platform_populate(void)
irq = platform_get_irq(pdev, 0);
selftest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
- np = of_find_node_by_path("/testcase-data/platform-tests");
- if (!np) {
- pr_err("No testcase data in device tree\n");
+ if (selftest(np = of_find_node_by_path("/testcase-data/platform-tests"),
+ "No testcase data in device tree\n"));
+ return;
+
+ if (selftest(!(rc = device_register(&test_bus)),
+ "testbus registration failed; rc=%i\n", rc));
return;
- }
for_each_child_of_node(np, child) {
- struct device_node *grandchild;
- of_platform_populate(child, match, NULL, NULL);
+ of_platform_populate(child, match, NULL, &test_bus);
for_each_child_of_node(child, grandchild)
selftest(of_find_device_by_node(grandchild),
"Could not create device for node '%s'\n",
grandchild->name);
}
+
+ of_platform_depopulate(&test_bus);
+ for_each_child_of_node(np, child) {
+ for_each_child_of_node(child, grandchild)
+ selftest(!of_find_device_by_node(grandchild),
+ "device didn't get destroyed '%s'\n",
+ grandchild->name);
+ }
+
+ device_unregister(&test_bus);
+ of_node_put(np);
}
/**
@@ -775,33 +821,29 @@ static void update_node_properties(struct device_node *np,
*/
static int attach_node_and_children(struct device_node *np)
{
- struct device_node *next, *root = np, *dup;
+ struct device_node *next, *dup, *child;
- /* skip root node */
- np = np->child;
- /* storing a copy in temporary node */
- dup = np;
+ dup = of_find_node_by_path(np->full_name);
+ if (dup) {
+ update_node_properties(np, dup);
+ return 0;
+ }
- while (dup) {
+ /* Children of the root need to be remembered for removal */
+ if (np->parent == of_root) {
if (WARN_ON(last_node_index >= NO_OF_NODES))
return -EINVAL;
- nodes[last_node_index++] = dup;
- dup = dup->sibling;
+ nodes[last_node_index++] = np;
}
- dup = NULL;
- while (np) {
- next = np->allnext;
- dup = of_find_node_by_path(np->full_name);
- if (dup)
- update_node_properties(np, dup);
- else {
- np->child = NULL;
- if (np->parent == root)
- np->parent = of_allnodes;
- of_attach_node(np);
- }
- np = next;
+ child = np->child;
+ np->child = NULL;
+ np->sibling = NULL;
+ of_attach_node(np);
+ while (child) {
+ next = child->sibling;
+ attach_node_and_children(child);
+ child = next;
}
return 0;
@@ -846,10 +888,10 @@ static int __init selftest_data_add(void)
return -EINVAL;
}
- if (!of_allnodes) {
+ if (!of_root) {
/* enabling flag for removing nodes */
selftest_live_tree = true;
- of_allnodes = selftest_data_node;
+ of_root = selftest_data_node;
for_each_of_allnodes(np)
__of_attach_node_sysfs(np);
@@ -859,7 +901,14 @@ static int __init selftest_data_add(void)
}
/* attach the sub-tree to live tree */
- return attach_node_and_children(selftest_data_node);
+ np = selftest_data_node->child;
+ while (np) {
+ struct device_node *next = np->sibling;
+ np->parent = of_root;
+ attach_node_and_children(np);
+ np = next;
+ }
+ return 0;
}
/**
@@ -889,10 +938,10 @@ static void selftest_data_remove(void)
of_node_put(of_chosen);
of_aliases = NULL;
of_chosen = NULL;
- for_each_child_of_node(of_allnodes, np)
+ for_each_child_of_node(of_root, np)
detach_node_and_children(np);
- __of_detach_node_sysfs(of_allnodes);
- of_allnodes = NULL;
+ __of_detach_node_sysfs(of_root);
+ of_root = NULL;
return;
}
@@ -915,6 +964,483 @@ static void selftest_data_remove(void)
}
}
+#ifdef CONFIG_OF_OVERLAY
+
+static int selftest_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ if (np == NULL) {
+ dev_err(dev, "No OF data for device\n");
+ return -EINVAL;
+
+ }
+
+ dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+ return 0;
+}
+
+static int selftest_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+ return 0;
+}
+
+static struct of_device_id selftest_match[] = {
+ { .compatible = "selftest", },
+ {},
+};
+
+static struct platform_driver selftest_driver = {
+ .probe = selftest_probe,
+ .remove = selftest_remove,
+ .driver = {
+ .name = "selftest",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(selftest_match),
+ },
+};
+
+/* get the platform device instantiated at the path */
+static struct platform_device *of_path_to_platform_device(const char *path)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+
+ np = of_find_node_by_path(path);
+ if (np == NULL)
+ return NULL;
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+
+ return pdev;
+}
+
+/* find out if a platform device exists at that path */
+static int of_path_platform_device_exists(const char *path)
+{
+ struct platform_device *pdev;
+
+ pdev = of_path_to_platform_device(path);
+ platform_device_put(pdev);
+ return pdev != NULL;
+}
+
+static const char *selftest_path(int nr)
+{
+ static char buf[256];
+
+ snprintf(buf, sizeof(buf) - 1,
+ "/testcase-data/overlay-node/test-bus/test-selftest%d", nr);
+ buf[sizeof(buf) - 1] = '\0';
+
+ return buf;
+}
+
+static const char *overlay_path(int nr)
+{
+ static char buf[256];
+
+ snprintf(buf, sizeof(buf) - 1,
+ "/testcase-data/overlay%d", nr);
+ buf[sizeof(buf) - 1] = '\0';
+
+ return buf;
+}
+
+static const char *bus_path = "/testcase-data/overlay-node/test-bus";
+
+static int of_selftest_apply_overlay(int selftest_nr, int overlay_nr,
+ int *overlay_id)
+{
+ struct device_node *np = NULL;
+ int ret, id = -1;
+
+ np = of_find_node_by_path(overlay_path(overlay_nr));
+ if (np == NULL) {
+ selftest(0, "could not find overlay node @\"%s\"\n",
+ overlay_path(overlay_nr));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_overlay_create(np);
+ if (ret < 0) {
+ selftest(0, "could not create overlay from \"%s\"\n",
+ overlay_path(overlay_nr));
+ goto out;
+ }
+ id = ret;
+
+ ret = 0;
+
+out:
+ of_node_put(np);
+
+ if (overlay_id)
+ *overlay_id = id;
+
+ return ret;
+}
+
+/* apply an overlay while checking before and after states */
+static int of_selftest_apply_overlay_check(int overlay_nr, int selftest_nr,
+ int before, int after)
+{
+ int ret;
+
+ /* selftest device must not be in before state */
+ if (of_path_platform_device_exists(selftest_path(selftest_nr))
+ != before) {
+ selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ overlay_path(overlay_nr),
+ selftest_path(selftest_nr),
+ !before ? "enabled" : "disabled");
+ return -EINVAL;
+ }
+
+ ret = of_selftest_apply_overlay(overlay_nr, selftest_nr, NULL);
+ if (ret != 0) {
+ /* of_selftest_apply_overlay already called selftest() */
+ return ret;
+ }
+
+ /* selftest device must be to set to after state */
+ if (of_path_platform_device_exists(selftest_path(selftest_nr))
+ != after) {
+ selftest(0, "overlay @\"%s\" failed to create @\"%s\" %s\n",
+ overlay_path(overlay_nr),
+ selftest_path(selftest_nr),
+ !after ? "enabled" : "disabled");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* apply an overlay and then revert it while checking before, after states */
+static int of_selftest_apply_revert_overlay_check(int overlay_nr,
+ int selftest_nr, int before, int after)
+{
+ int ret, ov_id;
+
+ /* selftest device must be in before state */
+ if (of_path_platform_device_exists(selftest_path(selftest_nr))
+ != before) {
+ selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ overlay_path(overlay_nr),
+ selftest_path(selftest_nr),
+ !before ? "enabled" : "disabled");
+ return -EINVAL;
+ }
+
+ /* apply the overlay */
+ ret = of_selftest_apply_overlay(overlay_nr, selftest_nr, &ov_id);
+ if (ret != 0) {
+ /* of_selftest_apply_overlay already called selftest() */
+ return ret;
+ }
+
+ /* selftest device must be in after state */
+ if (of_path_platform_device_exists(selftest_path(selftest_nr))
+ != after) {
+ selftest(0, "overlay @\"%s\" failed to create @\"%s\" %s\n",
+ overlay_path(overlay_nr),
+ selftest_path(selftest_nr),
+ !after ? "enabled" : "disabled");
+ return -EINVAL;
+ }
+
+ ret = of_overlay_destroy(ov_id);
+ if (ret != 0) {
+ selftest(0, "overlay @\"%s\" failed to be destroyed @\"%s\"\n",
+ overlay_path(overlay_nr),
+ selftest_path(selftest_nr));
+ return ret;
+ }
+
+ /* selftest device must be again in before state */
+ if (of_path_platform_device_exists(selftest_path(selftest_nr))
+ != before) {
+ selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ overlay_path(overlay_nr),
+ selftest_path(selftest_nr),
+ !before ? "enabled" : "disabled");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* test activation of device */
+static void of_selftest_overlay_0(void)
+{
+ int ret;
+
+ /* device should enable */
+ ret = of_selftest_apply_overlay_check(0, 0, 0, 1);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 0);
+}
+
+/* test deactivation of device */
+static void of_selftest_overlay_1(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_overlay_check(1, 1, 1, 0);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 1);
+}
+
+/* test activation of device */
+static void of_selftest_overlay_2(void)
+{
+ int ret;
+
+ /* device should enable */
+ ret = of_selftest_apply_overlay_check(2, 2, 0, 1);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 2);
+}
+
+/* test deactivation of device */
+static void of_selftest_overlay_3(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_overlay_check(3, 3, 1, 0);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 3);
+}
+
+/* test activation of a full device node */
+static void of_selftest_overlay_4(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_overlay_check(4, 4, 0, 1);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 4);
+}
+
+/* test overlay apply/revert sequence */
+static void of_selftest_overlay_5(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_revert_overlay_check(5, 5, 0, 1);
+ if (ret != 0)
+ return;
+
+ selftest(1, "overlay test %d passed\n", 5);
+}
+
+/* test overlay application in sequence */
+static void of_selftest_overlay_6(void)
+{
+ struct device_node *np;
+ int ret, i, ov_id[2];
+ int overlay_nr = 6, selftest_nr = 6;
+ int before = 0, after = 1;
+
+ /* selftest device must be in before state */
+ for (i = 0; i < 2; i++) {
+ if (of_path_platform_device_exists(
+ selftest_path(selftest_nr + i))
+ != before) {
+ selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ overlay_path(overlay_nr + i),
+ selftest_path(selftest_nr + i),
+ !before ? "enabled" : "disabled");
+ return;
+ }
+ }
+
+ /* apply the overlays */
+ for (i = 0; i < 2; i++) {
+
+ np = of_find_node_by_path(overlay_path(overlay_nr + i));
+ if (np == NULL) {
+ selftest(0, "could not find overlay node @\"%s\"\n",
+ overlay_path(overlay_nr + i));
+ return;
+ }
+
+ ret = of_overlay_create(np);
+ if (ret < 0) {
+ selftest(0, "could not create overlay from \"%s\"\n",
+ overlay_path(overlay_nr + i));
+ return;
+ }
+ ov_id[i] = ret;
+ }
+
+ for (i = 0; i < 2; i++) {
+ /* selftest device must be in after state */
+ if (of_path_platform_device_exists(
+ selftest_path(selftest_nr + i))
+ != after) {
+ selftest(0, "overlay @\"%s\" failed @\"%s\" %s\n",
+ overlay_path(overlay_nr + i),
+ selftest_path(selftest_nr + i),
+ !after ? "enabled" : "disabled");
+ return;
+ }
+ }
+
+ for (i = 1; i >= 0; i--) {
+ ret = of_overlay_destroy(ov_id[i]);
+ if (ret != 0) {
+ selftest(0, "overlay @\"%s\" failed destroy @\"%s\"\n",
+ overlay_path(overlay_nr + i),
+ selftest_path(selftest_nr + i));
+ return;
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ /* selftest device must be again in before state */
+ if (of_path_platform_device_exists(
+ selftest_path(selftest_nr + i))
+ != before) {
+ selftest(0, "overlay @\"%s\" with device @\"%s\" %s\n",
+ overlay_path(overlay_nr + i),
+ selftest_path(selftest_nr + i),
+ !before ? "enabled" : "disabled");
+ return;
+ }
+ }
+
+ selftest(1, "overlay test %d passed\n", 6);
+}
+
+/* test overlay application in sequence */
+static void of_selftest_overlay_8(void)
+{
+ struct device_node *np;
+ int ret, i, ov_id[2];
+ int overlay_nr = 8, selftest_nr = 8;
+
+ /* we don't care about device state in this test */
+
+ /* apply the overlays */
+ for (i = 0; i < 2; i++) {
+
+ np = of_find_node_by_path(overlay_path(overlay_nr + i));
+ if (np == NULL) {
+ selftest(0, "could not find overlay node @\"%s\"\n",
+ overlay_path(overlay_nr + i));
+ return;
+ }
+
+ ret = of_overlay_create(np);
+ if (ret < 0) {
+ selftest(0, "could not create overlay from \"%s\"\n",
+ overlay_path(overlay_nr + i));
+ return;
+ }
+ ov_id[i] = ret;
+ }
+
+ /* now try to remove first overlay (it should fail) */
+ ret = of_overlay_destroy(ov_id[0]);
+ if (ret == 0) {
+ selftest(0, "overlay @\"%s\" was destroyed @\"%s\"\n",
+ overlay_path(overlay_nr + 0),
+ selftest_path(selftest_nr));
+ return;
+ }
+
+ /* removing them in order should work */
+ for (i = 1; i >= 0; i--) {
+ ret = of_overlay_destroy(ov_id[i]);
+ if (ret != 0) {
+ selftest(0, "overlay @\"%s\" not destroyed @\"%s\"\n",
+ overlay_path(overlay_nr + i),
+ selftest_path(selftest_nr));
+ return;
+ }
+ }
+
+ selftest(1, "overlay test %d passed\n", 8);
+}
+
+static void __init of_selftest_overlay(void)
+{
+ struct device_node *bus_np = NULL;
+ int ret;
+
+ ret = platform_driver_register(&selftest_driver);
+ if (ret != 0) {
+ selftest(0, "could not register selftest driver\n");
+ goto out;
+ }
+
+ bus_np = of_find_node_by_path(bus_path);
+ if (bus_np == NULL) {
+ selftest(0, "could not find bus_path \"%s\"\n", bus_path);
+ goto out;
+ }
+
+ ret = of_platform_populate(bus_np, of_default_bus_match_table,
+ NULL, NULL);
+ if (ret != 0) {
+ selftest(0, "could not populate bus @ \"%s\"\n", bus_path);
+ goto out;
+ }
+
+ if (!of_path_platform_device_exists(selftest_path(100))) {
+ selftest(0, "could not find selftest0 @ \"%s\"\n",
+ selftest_path(100));
+ goto out;
+ }
+
+ if (of_path_platform_device_exists(selftest_path(101))) {
+ selftest(0, "selftest1 @ \"%s\" should not exist\n",
+ selftest_path(101));
+ goto out;
+ }
+
+ selftest(1, "basic infrastructure of overlays passed");
+
+ /* tests in sequence */
+ of_selftest_overlay_0();
+ of_selftest_overlay_1();
+ of_selftest_overlay_2();
+ of_selftest_overlay_3();
+ of_selftest_overlay_4();
+ of_selftest_overlay_5();
+ of_selftest_overlay_6();
+ of_selftest_overlay_8();
+
+out:
+ of_node_put(bus_np);
+}
+
+#else
+static inline void __init of_selftest_overlay(void) { }
+#endif
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -947,6 +1473,7 @@ static int __init of_selftest(void)
of_selftest_parse_interrupts_extended();
of_selftest_match_node();
of_selftest_platform_populate();
+ of_selftest_overlay();
/* removing selftest data from live tree */
selftest_data_remove();
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 26ecdea84fb5..9c68f2aec4ff 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -239,7 +239,6 @@ static struct platform_driver amiga_parallel_driver = {
.remove = __exit_p(amiga_parallel_remove),
.driver = {
.name = "amiga-parallel",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
index 7c5d86696eed..8f8c9f3aa691 100644
--- a/drivers/parport/parport_ax88796.c
+++ b/drivers/parport/parport_ax88796.c
@@ -412,7 +412,6 @@ MODULE_ALIAS("platform:ax88796-pp");
static struct platform_driver axdrv = {
.driver = {
.name = "ax88796-pp",
- .owner = THIS_MODULE,
},
.probe = parport_ax88796_probe,
.remove = parport_ax88796_remove,
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index f721299eb1ba..53d15b30636a 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3011,7 +3011,6 @@ static int parport_pc_platform_probe(struct platform_device *pdev)
static struct platform_driver parport_pc_platform_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "parport_pc",
},
.probe = parport_pc_platform_probe,
@@ -3340,13 +3339,14 @@ static void __exit parport_pc_exit(void)
while (!list_empty(&ports_list)) {
struct parport_pc_private *priv;
struct parport *port;
+ struct device *dev;
priv = list_entry(ports_list.next,
struct parport_pc_private, list);
port = priv->port;
- if (port->dev && port->dev->bus == &platform_bus_type)
- platform_device_unregister(
- to_platform_device(port->dev));
+ dev = port->dev;
parport_pc_unregister_port(port);
+ if (dev && dev->bus == &platform_bus_type)
+ platform_device_unregister(to_platform_device(dev));
}
}
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index ee932004724f..e15b4845f7c6 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -64,6 +64,7 @@ enum parport_pc_pci_cards {
timedia_9079c,
wch_ch353_1s1p,
wch_ch353_2s1p,
+ wch_ch382_2s1p,
sunix_2s1p,
};
@@ -151,6 +152,7 @@ static struct parport_pc_pci cards[] = {
/* timedia_9079c */ { 1, { { 2, 3 }, } },
/* wch_ch353_1s1p*/ { 1, { { 1, -1}, } },
/* wch_ch353_2s1p*/ { 1, { { 2, -1}, } },
+ /* wch_ch382_2s1p*/ { 1, { { 2, -1}, } },
/* sunix_2s1p */ { 1, { { 3, -1 }, } },
};
@@ -257,6 +259,7 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
/* WCH CARDS */
{ 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
{ 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
+ { 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
/*
* More SUNIX variations. At least one of these has part number
@@ -494,6 +497,13 @@ static struct pciserial_board pci_parport_serial_boards[] = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [wch_ch382_2s1p] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0xC0,
+ },
[sunix_2s1p] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 2,
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index dffd6d0bd15b..01cf1c1a841a 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -362,7 +362,6 @@ MODULE_DEVICE_TABLE(of, bpp_match);
static struct platform_driver bpp_sbus_driver = {
.driver = {
.name = "bpp",
- .owner = THIS_MODULE,
.of_match_table = bpp_match,
},
.probe = bpp_probe,
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 8c6969747acd..2d57e19a2cd4 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -446,7 +446,6 @@ static struct platform_driver dra7xx_pcie_driver = {
.remove = __exit_p(dra7xx_pcie_remove),
.driver = {
.name = "dra7-pcie",
- .owner = THIS_MODULE,
.of_match_table = of_dra7xx_pcie_match,
},
};
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 850c9f951a3f..d202b37c3698 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -641,7 +641,6 @@ static struct platform_driver exynos_pcie_driver = {
.remove = __exit_p(exynos_pcie_remove),
.driver = {
.name = "exynos-pcie",
- .owner = THIS_MODULE,
.of_match_table = exynos_pcie_of_match,
},
};
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 18959075d164..6eb1aa75bd37 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -310,7 +310,6 @@ static int gen_pci_probe(struct platform_device *pdev)
static struct platform_driver gen_pci_driver = {
.driver = {
.name = "pci-host-generic",
- .owner = THIS_MODULE,
.of_match_table = gen_pci_of_match,
},
.probe = gen_pci_probe,
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index d1a26d17b586..fdb95367721e 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -643,7 +643,6 @@ MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
static struct platform_driver imx6_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
- .owner = THIS_MODULE,
.of_match_table = imx6_pcie_of_match,
},
.shutdown = imx6_pcie_shutdown,
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 62b9454c86fb..78f79e31ac5c 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -402,7 +402,6 @@ static struct platform_driver ks_pcie_driver __refdata = {
.remove = __exit_p(ks_pcie_remove),
.driver = {
.name = "keystone-pcie",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ks_pcie_of_match),
},
};
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index fed3fab132f2..1dd759596b0a 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1081,7 +1081,6 @@ MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static struct platform_driver mvebu_pcie_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "mvebu-pcie",
.of_match_table = mvebu_pcie_of_match_table,
/* driver unloading/unbinding currently not supported */
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 3ef854f5a5b5..d9c042febb1a 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -412,7 +412,6 @@ MODULE_DEVICE_TABLE(of, rcar_pci_of_match);
static struct platform_driver rcar_pci_driver = {
.driver = {
.name = "pci-rcar-gen2",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
.of_match_table = rcar_pci_of_match,
},
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index feccfa6b6c11..a800ae916394 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -2129,7 +2129,6 @@ put_resources:
static struct platform_driver tegra_pcie_driver = {
.driver = {
.name = "tegra-pcie",
- .owner = THIS_MODULE,
.of_match_table = tegra_pcie_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 2988fe136c1e..b1d0596457c5 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -652,7 +652,6 @@ static const struct of_device_id xgene_pcie_match_table[] = {
static struct platform_driver xgene_pcie_driver = {
.driver = {
.name = "xgene-pcie",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(xgene_pcie_match_table),
},
.probe = xgene_pcie_probe_bridge,
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 5519e939e412..748786c402fc 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -981,7 +981,6 @@ static int rcar_pcie_probe(struct platform_device *pdev)
static struct platform_driver rcar_pcie_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = rcar_pcie_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 2ca10cc887ee..866465fd3dbf 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -374,7 +374,6 @@ static struct platform_driver spear13xx_pcie_driver __initdata = {
.probe = spear13xx_pcie_probe,
.driver = {
.name = "spear-pcie",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(spear13xx_pcie_of_match),
},
};
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 2f50fa5953fd..ef3ebaf9a738 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -947,7 +947,6 @@ static struct of_device_id xilinx_pcie_of_match[] = {
static struct platform_driver xilinx_pcie_driver = {
.driver = {
.name = "xilinx-pcie",
- .owner = THIS_MODULE,
.of_match_table = xilinx_pcie_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index a62acc443d5b..aa012fb3834b 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -77,11 +77,10 @@ static ssize_t broken_parity_status_store(struct device *dev,
}
static DEVICE_ATTR_RW(broken_parity_status);
-static ssize_t pci_dev_show_local_cpu(struct device *dev, int type,
+static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
struct device_attribute *attr, char *buf)
{
const struct cpumask *mask;
- int len;
#ifdef CONFIG_NUMA
mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
@@ -89,59 +88,41 @@ static ssize_t pci_dev_show_local_cpu(struct device *dev, int type,
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
- len = type ?
- cpumask_scnprintf(buf, PAGE_SIZE-2, mask) :
- cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
-
- buf[len++] = '\n';
- buf[len] = '\0';
- return len;
+ return cpumap_print_to_pagebuf(list, buf, mask);
}
static ssize_t local_cpus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return pci_dev_show_local_cpu(dev, 1, attr, buf);
+ return pci_dev_show_local_cpu(dev, false, attr, buf);
}
static DEVICE_ATTR_RO(local_cpus);
static ssize_t local_cpulist_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return pci_dev_show_local_cpu(dev, 0, attr, buf);
+ return pci_dev_show_local_cpu(dev, true, attr, buf);
}
static DEVICE_ATTR_RO(local_cpulist);
/*
* PCI Bus Class Devices
*/
-static ssize_t pci_bus_show_cpuaffinity(struct device *dev, int type,
- struct device_attribute *attr,
- char *buf)
-{
- int ret;
- const struct cpumask *cpumask;
-
- cpumask = cpumask_of_pcibus(to_pci_bus(dev));
- ret = type ?
- cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
- cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
- buf[ret++] = '\n';
- buf[ret] = '\0';
- return ret;
-}
-
static ssize_t cpuaffinity_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
+ const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
+
+ return cpumap_print_to_pagebuf(false, buf, cpumask);
}
static DEVICE_ATTR_RO(cpuaffinity);
static ssize_t cpulistaffinity_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
+ const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask);
}
static DEVICE_ATTR_RO(cpulistaffinity);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a7ac72639c52..cab05f31223f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1138,8 +1138,8 @@ EXPORT_SYMBOL_GPL(pci_store_saved_state);
* @dev: PCI device that we're dealing with
* @state: Saved state returned from pci_store_saved_state()
*/
-static int pci_load_saved_state(struct pci_dev *dev,
- struct pci_saved_state *state)
+int pci_load_saved_state(struct pci_dev *dev,
+ struct pci_saved_state *state)
{
struct pci_cap_saved_data *cap;
@@ -1167,6 +1167,7 @@ static int pci_load_saved_state(struct pci_dev *dev,
dev->state_saved = true;
return 0;
}
+EXPORT_SYMBOL_GPL(pci_load_saved_state);
/**
* pci_load_and_free_saved_state - Reload the save state pointed to by state,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 90acb32c85b1..ed6f89b6efe5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -379,6 +379,26 @@ static void quirk_ati_exploding_mce(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
/*
+ * In the AMD NL platform, this device ([1022:7912]) has a class code of
+ * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
+ * claim it.
+ * But the dwc3 driver is a more specific driver for this device, and we'd
+ * prefer to use it instead of xhci. To prevent xhci from claiming the
+ * device, change the class code to 0x0c03fe, which the PCI r3.0 spec
+ * defines as "USB device (not host controller)". The dwc3 driver can then
+ * claim it based on its Vendor and Device ID.
+ */
+static void quirk_amd_nl_class(struct pci_dev *pdev)
+{
+ /*
+ * Use 'USB Device' (0x0c03fe) instead of PCI header provided
+ */
+ pdev->class = 0x0c03fe;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
+ quirk_amd_nl_class);
+
+/*
* Let's make the southbridge information explicit instead
* of having to worry about people probing the ACPI areas,
* for example.. (Yes, it happens, and if you read the wrong
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index b0ce7cdee0c2..910e90bf16c6 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -147,7 +147,6 @@ config TCIC
config PCMCIA_ALCHEMY_DEVBOARD
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
depends on MIPS_ALCHEMY && PCMCIA
- select 64BIT_PHYS_ADDR
help
Enable this driver of you want PCMCIA support on your Alchemy
Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
@@ -158,7 +157,6 @@ config PCMCIA_ALCHEMY_DEVBOARD
config PCMCIA_XXS1500
tristate "MyCable XXS1500 PCMCIA socket support"
depends on PCMCIA && MIPS_XXS1500
- select 64BIT_PHYS_ADDR
help
Support for the PCMCIA/CF socket interface on MyCable XXS1500
systems.
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index de24232c5191..bfb799c7b343 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -401,7 +401,6 @@ static int at91_cf_resume(struct platform_device *pdev)
static struct platform_driver at91_cf_driver = {
.driver = {
.name = "at91_cf",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91_cf_dt_ids),
},
.probe = at91_cf_probe,
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 971991bab975..bba1dcbb8075 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -304,7 +304,6 @@ static int bfin_cf_remove(struct platform_device *pdev)
static struct platform_driver bfin_cf_driver = {
.driver = {
.name = driver_name,
- .owner = THIS_MODULE,
},
.probe = bfin_cf_probe,
.remove = bfin_cf_remove,
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index a31e69ea99f3..4c2fa05b4589 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -574,7 +574,6 @@ static int db1x_pcmcia_socket_remove(struct platform_device *pdev)
static struct platform_driver db1x_pcmcia_socket_driver = {
.driver = {
.name = "db1xxx_pcmcia",
- .owner = THIS_MODULE,
},
.probe = db1x_pcmcia_socket_probe,
.remove = db1x_pcmcia_socket_remove,
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 5ea64d0f61ab..7f9950d324df 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -360,7 +360,6 @@ MODULE_DEVICE_TABLE(of, electra_cf_match);
static struct platform_driver electra_cf_driver = {
.driver = {
.name = driver_name,
- .owner = THIS_MODULE,
.of_match_table = electra_cf_match,
},
.probe = electra_cf_probe,
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index e6f3d17dd2b4..a2c138719bac 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1233,7 +1233,6 @@ static struct pccard_operations pcic_operations = {
static struct platform_driver i82365_driver = {
.driver = {
.name = "i82365",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index a26f38c6402a..0075bd7162ed 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -687,7 +687,6 @@ static struct pccard_operations pcc_operations = {
static struct platform_driver pcc_driver = {
.driver = {
.name = "cfc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index 296514155cd5..a77e571b08b8 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -664,7 +664,6 @@ static struct pccard_operations pcc_operations = {
static struct platform_driver pcc_driver = {
.driver = {
.name = "pcc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 25c4b1993b3d..8170102d1e93 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -334,7 +334,6 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
static struct platform_driver omap_cf_driver = {
.driver = {
.name = (char *) driver_name,
- .owner = THIS_MODULE,
},
.remove = __exit_p(omap_cf_remove),
};
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index cfec9dd18ff5..984a8ff559d8 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -368,7 +368,6 @@ static struct platform_driver pxa2xx_pcmcia_driver = {
.remove = pxa2xx_drv_pcmcia_remove,
.driver = {
.name = "pxa2xx-pcmcia",
- .owner = THIS_MODULE,
.pm = &pxa2xx_drv_pcmcia_pm_ops,
},
};
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index a76f495953ab..7ac6647d286e 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -172,7 +172,6 @@ static struct platform_driver viper_pcmcia_driver = {
.remove = viper_pcmcia_remove,
.driver = {
.name = "arcom-pcmcia",
- .owner = THIS_MODULE,
},
.id_table = viper_pcmcia_id_table,
};
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index ff8a027a4afb..803945259da8 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -93,6 +93,7 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
for (i = 0; i < sinfo->nskt; i++)
soc_pcmcia_remove_one(&sinfo->skt[i]);
+ clk_put(sinfo->clk);
kfree(sinfo);
return 0;
}
@@ -100,7 +101,6 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
static struct platform_driver sa11x0_pcmcia_driver = {
.driver = {
.name = "sa11x0-pcmcia",
- .owner = THIS_MODULE,
},
.probe = sa11x0_drv_pcmcia_probe,
.remove = sa11x0_drv_pcmcia_remove,
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
index 4d206f4dd67b..12f0dd091477 100644
--- a/drivers/pcmcia/sa1111_badge4.c
+++ b/drivers/pcmcia/sa1111_badge4.c
@@ -132,7 +132,7 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
.nr = 2,
};
-int pcmcia_badge4_init(struct device *dev)
+int pcmcia_badge4_init(struct sa1111_dev *dev)
{
int ret = -ENODEV;
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 65b02c3e14ce..80b8e9d05275 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -145,6 +145,12 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
return -ENOMEM;
s->soc.nr = ops->first + i;
+ s->soc.clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(s->soc.clk)) {
+ ret = PTR_ERR(s->soc.clk);
+ kfree(s);
+ return ret;
+ }
soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
s->dev = dev;
if (s->soc.nr) {
@@ -197,10 +203,10 @@ static int pcmcia_probe(struct sa1111_dev *dev)
sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
#ifdef CONFIG_SA1100_BADGE4
- pcmcia_badge4_init(&dev->dev);
+ pcmcia_badge4_init(dev);
#endif
#ifdef CONFIG_SA1100_JORNADA720
- pcmcia_jornada720_init(&dev->dev);
+ pcmcia_jornada720_init(dev);
#endif
#ifdef CONFIG_ARCH_LUBBOCK
pcmcia_lubbock_init(dev);
@@ -220,6 +226,7 @@ static int pcmcia_remove(struct sa1111_dev *dev)
for (; s; s = next) {
next = s->next;
soc_pcmcia_remove_one(&s->soc);
+ clk_put(s->soc.clk);
kfree(s);
}
diff --git a/drivers/pcmcia/sa1111_generic.h b/drivers/pcmcia/sa1111_generic.h
index f6376e34a7e4..e74ecfdc1b26 100644
--- a/drivers/pcmcia/sa1111_generic.h
+++ b/drivers/pcmcia/sa1111_generic.h
@@ -18,8 +18,8 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *);
extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *);
-extern int pcmcia_badge4_init(struct device *);
-extern int pcmcia_jornada720_init(struct device *);
+extern int pcmcia_badge4_init(struct sa1111_dev *);
+extern int pcmcia_jornada720_init(struct sa1111_dev *);
extern int pcmcia_lubbock_init(struct sa1111_dev *);
extern int pcmcia_neponset_init(struct sa1111_dev *);
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
index 40e040314503..c2c30580c83f 100644
--- a/drivers/pcmcia/sa1111_jornada720.c
+++ b/drivers/pcmcia/sa1111_jornada720.c
@@ -92,10 +92,9 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
.nr = 2,
};
-int pcmcia_jornada720_init(struct device *dev)
+int pcmcia_jornada720_init(struct sa1111_dev *sadev)
{
int ret = -ENODEV;
- struct sa1111_dev *sadev = SA1111_DEV(dev);
if (machine_is_jornada720()) {
unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 54d3089d157b..cf6de2c2b329 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -135,14 +135,16 @@ sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
static int
sa1100_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
{
- return sa1100_pcmcia_set_mecr(skt, cpufreq_get(0));
+ unsigned long clk = clk_get_rate(skt->clk);
+
+ return sa1100_pcmcia_set_mecr(skt, clk / 1000);
}
static int
sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
{
struct soc_pcmcia_timing timing;
- unsigned int clock = cpufreq_get(0);
+ unsigned int clock = clk_get_rate(skt->clk);
unsigned long mecr = MECR;
char *p = buf;
@@ -218,6 +220,11 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
struct skt_dev_info *sinfo;
struct soc_pcmcia_socket *skt;
int i, ret = 0;
+ struct clk *clk;
+
+ clk = clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
sa11xx_drv_pcmcia_ops(ops);
@@ -226,12 +233,14 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
return -ENOMEM;
sinfo->nskt = nr;
+ sinfo->clk = clk;
/* Initialize processor specific parameters */
for (i = 0; i < nr; i++) {
skt = &sinfo->skt[i];
skt->nr = first + i;
+ skt->clk = clk;
soc_pcmcia_init_one(skt, ops, dev);
ret = sa11xx_drv_pcmcia_add_one(skt);
@@ -242,6 +251,7 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
if (ret) {
while (--i >= 0)
soc_pcmcia_remove_one(&sinfo->skt[i]);
+ clk_put(clk);
kfree(sinfo);
} else {
dev_set_drvdata(dev, sinfo);
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index a2bc6ee1702e..933f4657515b 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -120,6 +120,8 @@ static void __soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt,
if (skt->ops->hw_shutdown)
skt->ops->hw_shutdown(skt);
+
+ clk_disable_unprepare(skt->clk);
}
static void soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
@@ -131,6 +133,8 @@ static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret = 0, i;
+ clk_prepare_enable(skt->clk);
+
if (skt->ops->hw_init) {
ret = skt->ops->hw_init(skt);
if (ret)
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index cbe15fc37411..1ee63e5f0550 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -349,7 +349,6 @@ static int __init get_tcic_id(void)
static struct platform_driver tcic_driver = {
.driver = {
.name = "tcic-pcmcia",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index d98a08612492..21973d55a055 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -709,7 +709,6 @@ __setup("vrc4171_card=", vrc4171_card_setup);
static struct platform_driver vrc4171_card_driver = {
.driver = {
.name = vrc4171_card_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index 95f5b270ad44..4c04360f378b 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -314,7 +314,6 @@ static int xxs1500_pcmcia_remove(struct platform_device *pdev)
static struct platform_driver xxs1500_pcmcia_socket_driver = {
.driver = {
.name = "xxs1500_pcmcia",
- .owner = THIS_MODULE,
},
.probe = xxs1500_pcmcia_probe,
.remove = xxs1500_pcmcia_remove,
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 2a436e607f99..ccad8809ecb1 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -15,6 +15,13 @@ config GENERIC_PHY
phy users can obtain reference to the PHY. All the users of this
framework should select this config.
+config PHY_BERLIN_USB
+ tristate "Marvell Berlin USB PHY Driver"
+ depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the USB PHY on Marvell Berlin SoCs.
+
config PHY_BERLIN_SATA
tristate "Marvell Berlin SATA PHY driver"
depends on ARCH_BERLIN && HAS_IOMEM && OF
@@ -22,6 +29,12 @@ config PHY_BERLIN_SATA
help
Enable this to support the SATA PHY on Marvell Berlin SoCs.
+config ARMADA375_USBCLUSTER_PHY
+ def_bool y
+ depends on MACH_ARMADA_375 || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+
config PHY_EXYNOS_MIPI_VIDEO
tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
depends on HAS_IOMEM
@@ -38,6 +51,14 @@ config PHY_MVEBU_SATA
depends on OF
select GENERIC_PHY
+config PHY_MIPHY28LP
+ tristate "STMicroelectronics MIPHY28LP PHY driver for STiH407"
+ depends on ARCH_STI
+ select GENERIC_PHY
+ help
+ Enable this to support the miphy transceiver (for SATA/PCIE/USB3)
+ that is part of STMicroelectronics STiH407 SoC.
+
config PHY_MIPHY365X
tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series"
depends on ARCH_STI
@@ -193,7 +214,7 @@ config PHY_EXYNOS5250_USB2
config PHY_EXYNOS5_USBDRD
tristate "Exynos5 SoC series USB DRD PHY driver"
- depends on ARCH_EXYNOS5 && OF
+ depends on ARCH_EXYNOS && OF
depends on HAS_IOMEM
depends on USB_DWC3_EXYNOS
select GENERIC_PHY
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c4590fce082f..aa74f961e44e 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -3,11 +3,14 @@
#
obj-$(CONFIG_GENERIC_PHY) += phy-core.o
+obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o
obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o
+obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o
obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
+obj-$(CONFIG_PHY_MIPHY28LP) += phy-miphy28lp.o
obj-$(CONFIG_PHY_MIPHY365X) += phy-miphy365x.o
obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o
obj-$(CONFIG_OMAP_CONTROL_PHY) += phy-omap-control.o
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c
new file mode 100644
index 000000000000..ac7d99d01cb3
--- /dev/null
+++ b/drivers/phy/phy-armada375-usb2.c
@@ -0,0 +1,158 @@
+/*
+ * USB cluster support for Armada 375 platform.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2 or later. This program is licensed "as is"
+ * without any warranty of any kind, whether express or implied.
+ *
+ * Armada 375 comes with an USB2 host and device controller and an
+ * USB3 controller. The USB cluster control register allows to manage
+ * common features of both USB controllers.
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define USB2_PHY_CONFIG_DISABLE BIT(0)
+
+struct armada375_cluster_phy {
+ struct phy *phy;
+ void __iomem *reg;
+ bool use_usb3;
+ int phy_provided;
+};
+
+static int armada375_usb_phy_init(struct phy *phy)
+{
+ struct armada375_cluster_phy *cluster_phy;
+ u32 reg;
+
+ cluster_phy = dev_get_drvdata(phy->dev.parent);
+ if (!cluster_phy)
+ return -ENODEV;
+
+ reg = readl(cluster_phy->reg);
+ if (cluster_phy->use_usb3)
+ reg |= USB2_PHY_CONFIG_DISABLE;
+ else
+ reg &= ~USB2_PHY_CONFIG_DISABLE;
+ writel(reg, cluster_phy->reg);
+
+ return 0;
+}
+
+static struct phy_ops armada375_usb_phy_ops = {
+ .init = armada375_usb_phy_init,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Only one controller can use this PHY. We shouldn't have the case
+ * when two controllers want to use this PHY. But if this case occurs
+ * then we provide a phy to the first one and return an error for the
+ * next one. This error has also to be an error returned by
+ * devm_phy_optional_get() so different from ENODEV for USB2. In the
+ * USB3 case it still optional and we use ENODEV.
+ */
+static struct phy *armada375_usb_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct armada375_cluster_phy *cluster_phy = dev_get_drvdata(dev);
+
+ if (!cluster_phy)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Either the phy had never been requested and then the first
+ * usb claiming it can get it, or it had already been
+ * requested in this case, we only allow to use it with the
+ * same configuration.
+ */
+ if (WARN_ON((cluster_phy->phy_provided != PHY_NONE) &&
+ (cluster_phy->phy_provided != args->args[0]))) {
+ dev_err(dev, "This PHY has already been provided!\n");
+ dev_err(dev, "Check your device tree, only one controller can use it\n.");
+ if (args->args[0] == PHY_TYPE_USB2)
+ return ERR_PTR(-EBUSY);
+ else
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (args->args[0] == PHY_TYPE_USB2)
+ cluster_phy->use_usb3 = false;
+ else if (args->args[0] == PHY_TYPE_USB3)
+ cluster_phy->use_usb3 = true;
+ else {
+ dev_err(dev, "Invalid PHY mode\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* Store which phy mode is used for next test */
+ cluster_phy->phy_provided = args->args[0];
+
+ return cluster_phy->phy;
+}
+
+static int armada375_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+ void __iomem *usb_cluster_base;
+ struct resource *res;
+ struct armada375_cluster_phy *cluster_phy;
+
+ cluster_phy = devm_kzalloc(dev, sizeof(*cluster_phy), GFP_KERNEL);
+ if (!cluster_phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ usb_cluster_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!usb_cluster_base)
+ return -ENOMEM;
+
+ phy = devm_phy_create(dev, NULL, &armada375_usb_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ cluster_phy->phy = phy;
+ cluster_phy->reg = usb_cluster_base;
+
+ dev_set_drvdata(dev, cluster_phy);
+
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ armada375_usb_phy_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id of_usb_cluster_table[] = {
+ { .compatible = "marvell,armada-375-usb-cluster", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, of_usb_cluster_table);
+
+static struct platform_driver armada375_usb_phy_driver = {
+ .probe = armada375_usb_phy_probe,
+ .driver = {
+ .of_match_table = of_usb_cluster_table,
+ .name = "armada-375-usb-cluster",
+ .owner = THIS_MODULE,
+ }
+};
+module_platform_driver(armada375_usb_phy_driver);
+
+MODULE_DESCRIPTION("Armada 375 USB cluster driver");
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-bcm-kona-usb2.c b/drivers/phy/phy-bcm-kona-usb2.c
index c1e0ca335c0e..ef2dc1aab2b9 100644
--- a/drivers/phy/phy-bcm-kona-usb2.c
+++ b/drivers/phy/phy-bcm-kona-usb2.c
@@ -117,7 +117,7 @@ static int bcm_kona_usb2_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, phy);
- gphy = devm_phy_create(dev, NULL, &ops, NULL);
+ gphy = devm_phy_create(dev, NULL, &ops);
if (IS_ERR(gphy))
return PTR_ERR(gphy);
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 69ced52d72aa..099eee8851e5 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -30,7 +30,8 @@
#define MBUS_WRITE_REQUEST_SIZE_128 (BIT(2) << 16)
#define MBUS_READ_REQUEST_SIZE_128 (BIT(2) << 19)
-#define PHY_BASE 0x200
+#define BG2_PHY_BASE 0x080
+#define BG2Q_PHY_BASE 0x200
/* register 0x01 */
#define REF_FREF_SEL_25 BIT(0)
@@ -61,15 +62,16 @@ struct phy_berlin_priv {
struct clk *clk;
struct phy_berlin_desc **phys;
unsigned nphys;
+ u32 phy_base;
};
-static inline void phy_berlin_sata_reg_setbits(void __iomem *ctrl_reg, u32 reg,
- u32 mask, u32 val)
+static inline void phy_berlin_sata_reg_setbits(void __iomem *ctrl_reg,
+ u32 phy_base, u32 reg, u32 mask, u32 val)
{
u32 regval;
/* select register */
- writel(PHY_BASE + reg, ctrl_reg + PORT_VSR_ADDR);
+ writel(phy_base + reg, ctrl_reg + PORT_VSR_ADDR);
/* set bits */
regval = readl(ctrl_reg + PORT_VSR_DATA);
@@ -103,17 +105,20 @@ static int phy_berlin_sata_power_on(struct phy *phy)
writel(regval, priv->base + HOST_VSA_DATA);
/* set PHY mode and ref freq to 25 MHz */
- phy_berlin_sata_reg_setbits(ctrl_reg, 0x1, 0xff,
- REF_FREF_SEL_25 | PHY_MODE_SATA);
+ phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01,
+ 0x00ff, REF_FREF_SEL_25 | PHY_MODE_SATA);
/* set PHY up to 6 Gbps */
- phy_berlin_sata_reg_setbits(ctrl_reg, 0x25, 0xc00, PHY_GEN_MAX_6_0);
+ phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25,
+ 0x0c00, PHY_GEN_MAX_6_0);
/* set 40 bits width */
- phy_berlin_sata_reg_setbits(ctrl_reg, 0x23, 0xc00, DATA_BIT_WIDTH_40);
+ phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x23,
+ 0x0c00, DATA_BIT_WIDTH_40);
/* use max pll rate */
- phy_berlin_sata_reg_setbits(ctrl_reg, 0x2, 0x0, USE_MAX_PLL_RATE);
+ phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x02,
+ 0x0000, USE_MAX_PLL_RATE);
/* set Gen3 controller speed */
regval = readl(ctrl_reg + PORT_SCR_CTL);
@@ -218,6 +223,11 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
if (!priv->phys)
return -ENOMEM;
+ if (of_device_is_compatible(dev->of_node, "marvell,berlin2-sata-phy"))
+ priv->phy_base = BG2_PHY_BASE;
+ else
+ priv->phy_base = BG2Q_PHY_BASE;
+
dev_set_drvdata(dev, priv);
spin_lock_init(&priv->lock);
@@ -239,7 +249,7 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
if (!phy_desc)
return -ENOMEM;
- phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops, NULL);
+ phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create PHY %d\n", phy_id);
return PTR_ERR(phy);
@@ -258,13 +268,11 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
phy_provider =
devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id phy_berlin_sata_of_match[] = {
+ { .compatible = "marvell,berlin2-sata-phy" },
{ .compatible = "marvell,berlin2q-sata-phy" },
{ },
};
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
new file mode 100644
index 000000000000..c8a8d53a6ece
--- /dev/null
+++ b/drivers/phy/phy-berlin-usb.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2014 Marvell Technology Group Ltd.
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ * Jisheng Zhang <jszhang@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#define USB_PHY_PLL 0x04
+#define USB_PHY_PLL_CONTROL 0x08
+#define USB_PHY_TX_CTRL0 0x10
+#define USB_PHY_TX_CTRL1 0x14
+#define USB_PHY_TX_CTRL2 0x18
+#define USB_PHY_RX_CTRL 0x20
+#define USB_PHY_ANALOG 0x34
+
+/* USB_PHY_PLL */
+#define CLK_REF_DIV(x) ((x) << 4)
+#define FEEDBACK_CLK_DIV(x) ((x) << 8)
+
+/* USB_PHY_PLL_CONTROL */
+#define CLK_STABLE BIT(0)
+#define PLL_CTRL_PIN BIT(1)
+#define PLL_CTRL_REG BIT(2)
+#define PLL_ON BIT(3)
+#define PHASE_OFF_TOL_125 (0x0 << 5)
+#define PHASE_OFF_TOL_250 BIT(5)
+#define KVC0_CALIB (0x0 << 9)
+#define KVC0_REG_CTRL BIT(9)
+#define KVC0_HIGH (0x0 << 10)
+#define KVC0_LOW (0x3 << 10)
+#define CLK_BLK_EN BIT(13)
+
+/* USB_PHY_TX_CTRL0 */
+#define EXT_HS_RCAL_EN BIT(3)
+#define EXT_FS_RCAL_EN BIT(4)
+#define IMPCAL_VTH_DIV(x) ((x) << 5)
+#define EXT_RS_RCAL_DIV(x) ((x) << 8)
+#define EXT_FS_RCAL_DIV(x) ((x) << 12)
+
+/* USB_PHY_TX_CTRL1 */
+#define TX_VDD15_14 (0x0 << 4)
+#define TX_VDD15_15 BIT(4)
+#define TX_VDD15_16 (0x2 << 4)
+#define TX_VDD15_17 (0x3 << 4)
+#define TX_VDD12_VDD (0x0 << 6)
+#define TX_VDD12_11 BIT(6)
+#define TX_VDD12_12 (0x2 << 6)
+#define TX_VDD12_13 (0x3 << 6)
+#define LOW_VDD_EN BIT(8)
+#define TX_OUT_AMP(x) ((x) << 9)
+
+/* USB_PHY_TX_CTRL2 */
+#define TX_CHAN_CTRL_REG(x) ((x) << 0)
+#define DRV_SLEWRATE(x) ((x) << 4)
+#define IMP_CAL_FS_HS_DLY_0 (0x0 << 6)
+#define IMP_CAL_FS_HS_DLY_1 BIT(6)
+#define IMP_CAL_FS_HS_DLY_2 (0x2 << 6)
+#define IMP_CAL_FS_HS_DLY_3 (0x3 << 6)
+#define FS_DRV_EN_MASK(x) ((x) << 8)
+#define HS_DRV_EN_MASK(x) ((x) << 12)
+
+/* USB_PHY_RX_CTRL */
+#define PHASE_FREEZE_DLY_2_CL (0x0 << 0)
+#define PHASE_FREEZE_DLY_4_CL BIT(0)
+#define ACK_LENGTH_8_CL (0x0 << 2)
+#define ACK_LENGTH_12_CL BIT(2)
+#define ACK_LENGTH_16_CL (0x2 << 2)
+#define ACK_LENGTH_20_CL (0x3 << 2)
+#define SQ_LENGTH_3 (0x0 << 4)
+#define SQ_LENGTH_6 BIT(4)
+#define SQ_LENGTH_9 (0x2 << 4)
+#define SQ_LENGTH_12 (0x3 << 4)
+#define DISCON_THRESHOLD_260 (0x0 << 6)
+#define DISCON_THRESHOLD_270 BIT(6)
+#define DISCON_THRESHOLD_280 (0x2 << 6)
+#define DISCON_THRESHOLD_290 (0x3 << 6)
+#define SQ_THRESHOLD(x) ((x) << 8)
+#define LPF_COEF(x) ((x) << 12)
+#define INTPL_CUR_10 (0x0 << 14)
+#define INTPL_CUR_20 BIT(14)
+#define INTPL_CUR_30 (0x2 << 14)
+#define INTPL_CUR_40 (0x3 << 14)
+
+/* USB_PHY_ANALOG */
+#define ANA_PWR_UP BIT(1)
+#define ANA_PWR_DOWN BIT(2)
+#define V2I_VCO_RATIO(x) ((x) << 7)
+#define R_ROTATE_90 (0x0 << 10)
+#define R_ROTATE_0 BIT(10)
+#define MODE_TEST_EN BIT(11)
+#define ANA_TEST_DC_CTRL(x) ((x) << 12)
+
+#define to_phy_berlin_usb_priv(p) \
+ container_of((p), struct phy_berlin_usb_priv, phy)
+
+static const u32 phy_berlin_pll_dividers[] = {
+ /* Berlin 2 */
+ CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
+ /* Berlin 2CD */
+ CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
+};
+
+struct phy_berlin_usb_priv {
+ void __iomem *base;
+ struct phy *phy;
+ struct reset_control *rst_ctrl;
+ u32 pll_divider;
+};
+
+static int phy_berlin_usb_power_on(struct phy *phy)
+{
+ struct phy_berlin_usb_priv *priv = dev_get_drvdata(phy->dev.parent);
+
+ reset_control_reset(priv->rst_ctrl);
+
+ writel(priv->pll_divider,
+ priv->base + USB_PHY_PLL);
+ writel(CLK_STABLE | PLL_CTRL_REG | PHASE_OFF_TOL_250 | KVC0_REG_CTRL |
+ CLK_BLK_EN, priv->base + USB_PHY_PLL_CONTROL);
+ writel(V2I_VCO_RATIO(0x5) | R_ROTATE_0 | ANA_TEST_DC_CTRL(0x5),
+ priv->base + USB_PHY_ANALOG);
+ writel(PHASE_FREEZE_DLY_4_CL | ACK_LENGTH_16_CL | SQ_LENGTH_12 |
+ DISCON_THRESHOLD_260 | SQ_THRESHOLD(0xa) | LPF_COEF(0x2) |
+ INTPL_CUR_30, priv->base + USB_PHY_RX_CTRL);
+
+ writel(TX_VDD12_13 | TX_OUT_AMP(0x3), priv->base + USB_PHY_TX_CTRL1);
+ writel(EXT_HS_RCAL_EN | IMPCAL_VTH_DIV(0x3) | EXT_RS_RCAL_DIV(0x4),
+ priv->base + USB_PHY_TX_CTRL0);
+
+ writel(EXT_HS_RCAL_EN | IMPCAL_VTH_DIV(0x3) | EXT_RS_RCAL_DIV(0x4) |
+ EXT_FS_RCAL_DIV(0x2), priv->base + USB_PHY_TX_CTRL0);
+
+ writel(EXT_HS_RCAL_EN | IMPCAL_VTH_DIV(0x3) | EXT_RS_RCAL_DIV(0x4),
+ priv->base + USB_PHY_TX_CTRL0);
+ writel(TX_CHAN_CTRL_REG(0xf) | DRV_SLEWRATE(0x3) | IMP_CAL_FS_HS_DLY_3 |
+ FS_DRV_EN_MASK(0xd), priv->base + USB_PHY_TX_CTRL2);
+
+ return 0;
+}
+
+static struct phy_ops phy_berlin_usb_ops = {
+ .power_on = phy_berlin_usb_power_on,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id phy_berlin_sata_of_match[] = {
+ {
+ .compatible = "marvell,berlin2-usb-phy",
+ .data = &phy_berlin_pll_dividers[0],
+ },
+ {
+ .compatible = "marvell,berlin2cd-usb-phy",
+ .data = &phy_berlin_pll_dividers[1],
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match);
+
+static int phy_berlin_usb_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match =
+ of_match_device(phy_berlin_sata_of_match, &pdev->dev);
+ struct phy_berlin_usb_priv *priv;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->rst_ctrl = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst_ctrl))
+ return PTR_ERR(priv->rst_ctrl);
+
+ priv->pll_divider = *((u32 *)match->data);
+
+ priv->phy = devm_phy_create(&pdev->dev, NULL, &phy_berlin_usb_ops);
+ if (IS_ERR(priv->phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ phy_provider =
+ devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+static struct platform_driver phy_berlin_usb_driver = {
+ .probe = phy_berlin_usb_probe,
+ .driver = {
+ .name = "phy-berlin-usb",
+ .owner = THIS_MODULE,
+ .of_match_table = phy_berlin_sata_of_match,
+ },
+};
+module_platform_driver(phy_berlin_usb_driver);
+
+MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Berlin PHY driver for USB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index ff5eec5af817..a12d35338313 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -26,6 +26,7 @@
static struct class *phy_class;
static DEFINE_MUTEX(phy_provider_mutex);
static LIST_HEAD(phy_provider_list);
+static LIST_HEAD(phys);
static DEFINE_IDA(phy_ida);
static void devm_phy_release(struct device *dev, void *res)
@@ -54,34 +55,79 @@ static int devm_phy_match(struct device *dev, void *res, void *match_data)
return res == match_data;
}
-static struct phy *phy_lookup(struct device *device, const char *port)
+/**
+ * phy_create_lookup() - allocate and register PHY/device association
+ * @phy: the phy of the association
+ * @con_id: connection ID string on device
+ * @dev_id: the device of the association
+ *
+ * Creates and registers phy_lookup entry.
+ */
+int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
{
- unsigned int count;
- struct phy *phy;
- struct device *dev;
- struct phy_consumer *consumers;
- struct class_dev_iter iter;
+ struct phy_lookup *pl;
- class_dev_iter_init(&iter, phy_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- phy = to_phy(dev);
+ if (!phy || !dev_id || !con_id)
+ return -EINVAL;
- if (!phy->init_data)
- continue;
- count = phy->init_data->num_consumers;
- consumers = phy->init_data->consumers;
- while (count--) {
- if (!strcmp(consumers->dev_name, dev_name(device)) &&
- !strcmp(consumers->port, port)) {
- class_dev_iter_exit(&iter);
- return phy;
- }
- consumers++;
+ pl = kzalloc(sizeof(*pl), GFP_KERNEL);
+ if (!pl)
+ return -ENOMEM;
+
+ pl->dev_id = dev_id;
+ pl->con_id = con_id;
+ pl->phy = phy;
+
+ mutex_lock(&phy_provider_mutex);
+ list_add_tail(&pl->node, &phys);
+ mutex_unlock(&phy_provider_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_create_lookup);
+
+/**
+ * phy_remove_lookup() - find and remove PHY/device association
+ * @phy: the phy of the association
+ * @con_id: connection ID string on device
+ * @dev_id: the device of the association
+ *
+ * Finds and unregisters phy_lookup entry that was created with
+ * phy_create_lookup().
+ */
+void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id)
+{
+ struct phy_lookup *pl;
+
+ if (!phy || !dev_id || !con_id)
+ return;
+
+ mutex_lock(&phy_provider_mutex);
+ list_for_each_entry(pl, &phys, node)
+ if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) &&
+ !strcmp(pl->con_id, con_id)) {
+ list_del(&pl->node);
+ kfree(pl);
+ break;
}
- }
+ mutex_unlock(&phy_provider_mutex);
+}
+EXPORT_SYMBOL_GPL(phy_remove_lookup);
- class_dev_iter_exit(&iter);
- return ERR_PTR(-ENODEV);
+static struct phy *phy_find(struct device *dev, const char *con_id)
+{
+ const char *dev_id = dev_name(dev);
+ struct phy_lookup *p, *pl = NULL;
+
+ mutex_lock(&phy_provider_mutex);
+ list_for_each_entry(p, &phys, node)
+ if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) {
+ pl = p;
+ break;
+ }
+ mutex_unlock(&phy_provider_mutex);
+
+ return pl ? pl->phy : ERR_PTR(-ENODEV);
}
static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
@@ -414,21 +460,13 @@ struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
{
struct phy *phy;
struct class_dev_iter iter;
- struct device_node *node = dev->of_node;
- struct device_node *child;
class_dev_iter_init(&iter, phy_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
phy = to_phy(dev);
- if (node != phy->dev.of_node) {
- for_each_child_of_node(node, child) {
- if (child == phy->dev.of_node)
- goto phy_found;
- }
+ if (args->np != phy->dev.of_node)
continue;
- }
-phy_found:
class_dev_iter_exit(&iter);
return phy;
}
@@ -463,7 +501,7 @@ struct phy *phy_get(struct device *dev, const char *string)
string);
phy = _of_phy_get(dev->of_node, index);
} else {
- phy = phy_lookup(dev, string);
+ phy = phy_find(dev, string);
}
if (IS_ERR(phy))
return phy;
@@ -588,13 +626,11 @@ EXPORT_SYMBOL_GPL(devm_of_phy_get);
* @dev: device that is creating the new phy
* @node: device node of the phy
* @ops: function pointers for performing phy operations
- * @init_data: contains the list of PHY consumers or NULL
*
* Called to create a phy using phy framework.
*/
struct phy *phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data)
+ const struct phy_ops *ops)
{
int ret;
int id;
@@ -632,7 +668,6 @@ struct phy *phy_create(struct device *dev, struct device_node *node,
phy->dev.of_node = node ?: dev->of_node;
phy->id = id;
phy->ops = ops;
- phy->init_data = init_data;
ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
if (ret)
@@ -667,7 +702,6 @@ EXPORT_SYMBOL_GPL(phy_create);
* @dev: device that is creating the new phy
* @node: device node of the phy
* @ops: function pointers for performing phy operations
- * @init_data: contains the list of PHY consumers or NULL
*
* Creates a new PHY device adding it to the PHY class.
* While at that, it also associates the device with the phy using devres.
@@ -675,8 +709,7 @@ EXPORT_SYMBOL_GPL(phy_create);
* then, devres data is freed.
*/
struct phy *devm_phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data)
+ const struct phy_ops *ops)
{
struct phy **ptr, *phy;
@@ -684,7 +717,7 @@ struct phy *devm_phy_create(struct device *dev, struct device_node *node,
if (!ptr)
return ERR_PTR(-ENOMEM);
- phy = phy_create(dev, node, ops, init_data);
+ phy = phy_create(dev, node, ops);
if (!IS_ERR(phy)) {
*ptr = phy;
devres_add(dev, ptr);
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 84f49e5a3f24..f86cbe68ddaf 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -112,7 +112,7 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
match = of_match_node(exynos_dp_video_phy_of_match, dev->of_node);
state->drvdata = match->data;
- phy = devm_phy_create(dev, NULL, &exynos_dp_video_phy_ops, NULL);
+ phy = devm_phy_create(dev, NULL, &exynos_dp_video_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create Display Port PHY\n");
return PTR_ERR(phy);
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 6a9bef138617..943e0f88a120 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -137,7 +137,7 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev, NULL,
- &exynos_mipi_video_phy_ops, NULL);
+ &exynos_mipi_video_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create PHY %d\n", i);
return PTR_ERR(phy);
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index f756aca871db..04374018425f 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -141,6 +141,7 @@ struct exynos5_usbdrd_phy_drvdata {
const struct exynos5_usbdrd_phy_config *phy_cfg;
u32 pmu_offset_usbdrd0_phy;
u32 pmu_offset_usbdrd1_phy;
+ bool has_common_clk_gate;
};
/**
@@ -148,6 +149,9 @@ struct exynos5_usbdrd_phy_drvdata {
* @dev: pointer to device instance of this platform device
* @reg_phy: usb phy controller register memory base
* @clk: phy clock for register access
+ * @pipeclk: clock for pipe3 phy
+ * @utmiclk: clock for utmi+ phy
+ * @itpclk: clock for ITP generation
* @drv_data: pointer to SoC level driver data structure
* @phys[]: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
* instances each with its 'phy' and 'phy_cfg'.
@@ -155,12 +159,16 @@ struct exynos5_usbdrd_phy_drvdata {
* reference clocks' for SS and HS operations
* @ref_clk: reference clock to PHY block from which PHY's
* operational clocks are derived
- * @ref_rate: rate of above reference clock
+ * vbus: VBUS regulator for phy
+ * vbus_boost: Boost regulator for VBUS present on few Exynos boards
*/
struct exynos5_usbdrd_phy {
struct device *dev;
void __iomem *reg_phy;
struct clk *clk;
+ struct clk *pipeclk;
+ struct clk *utmiclk;
+ struct clk *itpclk;
const struct exynos5_usbdrd_phy_drvdata *drv_data;
struct phy_usb_instance {
struct phy *phy;
@@ -172,6 +180,7 @@ struct exynos5_usbdrd_phy {
u32 extrefclk;
struct clk *ref_clk;
struct regulator *vbus;
+ struct regulator *vbus_boost;
};
static inline
@@ -447,13 +456,27 @@ static int exynos5_usbdrd_phy_power_on(struct phy *phy)
dev_dbg(phy_drd->dev, "Request to power_on usbdrd_phy phy\n");
clk_prepare_enable(phy_drd->ref_clk);
+ if (!phy_drd->drv_data->has_common_clk_gate) {
+ clk_prepare_enable(phy_drd->pipeclk);
+ clk_prepare_enable(phy_drd->utmiclk);
+ clk_prepare_enable(phy_drd->itpclk);
+ }
/* Enable VBUS supply */
+ if (phy_drd->vbus_boost) {
+ ret = regulator_enable(phy_drd->vbus_boost);
+ if (ret) {
+ dev_err(phy_drd->dev,
+ "Failed to enable VBUS boost supply\n");
+ goto fail_vbus;
+ }
+ }
+
if (phy_drd->vbus) {
ret = regulator_enable(phy_drd->vbus);
if (ret) {
dev_err(phy_drd->dev, "Failed to enable VBUS supply\n");
- goto fail_vbus;
+ goto fail_vbus_boost;
}
}
@@ -462,8 +485,17 @@ static int exynos5_usbdrd_phy_power_on(struct phy *phy)
return 0;
+fail_vbus_boost:
+ if (phy_drd->vbus_boost)
+ regulator_disable(phy_drd->vbus_boost);
+
fail_vbus:
clk_disable_unprepare(phy_drd->ref_clk);
+ if (!phy_drd->drv_data->has_common_clk_gate) {
+ clk_disable_unprepare(phy_drd->itpclk);
+ clk_disable_unprepare(phy_drd->utmiclk);
+ clk_disable_unprepare(phy_drd->pipeclk);
+ }
return ret;
}
@@ -481,8 +513,15 @@ static int exynos5_usbdrd_phy_power_off(struct phy *phy)
/* Disable VBUS supply */
if (phy_drd->vbus)
regulator_disable(phy_drd->vbus);
+ if (phy_drd->vbus_boost)
+ regulator_disable(phy_drd->vbus_boost);
clk_disable_unprepare(phy_drd->ref_clk);
+ if (!phy_drd->drv_data->has_common_clk_gate) {
+ clk_disable_unprepare(phy_drd->itpclk);
+ clk_disable_unprepare(phy_drd->pipeclk);
+ clk_disable_unprepare(phy_drd->utmiclk);
+ }
return 0;
}
@@ -506,6 +545,57 @@ static struct phy_ops exynos5_usbdrd_phy_ops = {
.owner = THIS_MODULE,
};
+static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd)
+{
+ unsigned long ref_rate;
+ int ret;
+
+ phy_drd->clk = devm_clk_get(phy_drd->dev, "phy");
+ if (IS_ERR(phy_drd->clk)) {
+ dev_err(phy_drd->dev, "Failed to get phy clock\n");
+ return PTR_ERR(phy_drd->clk);
+ }
+
+ phy_drd->ref_clk = devm_clk_get(phy_drd->dev, "ref");
+ if (IS_ERR(phy_drd->ref_clk)) {
+ dev_err(phy_drd->dev, "Failed to get phy reference clock\n");
+ return PTR_ERR(phy_drd->ref_clk);
+ }
+ ref_rate = clk_get_rate(phy_drd->ref_clk);
+
+ ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk);
+ if (ret) {
+ dev_err(phy_drd->dev, "Clock rate (%ld) not supported\n",
+ ref_rate);
+ return ret;
+ }
+
+ if (!phy_drd->drv_data->has_common_clk_gate) {
+ phy_drd->pipeclk = devm_clk_get(phy_drd->dev, "phy_pipe");
+ if (IS_ERR(phy_drd->pipeclk)) {
+ dev_info(phy_drd->dev,
+ "PIPE3 phy operational clock not specified\n");
+ phy_drd->pipeclk = NULL;
+ }
+
+ phy_drd->utmiclk = devm_clk_get(phy_drd->dev, "phy_utmi");
+ if (IS_ERR(phy_drd->utmiclk)) {
+ dev_info(phy_drd->dev,
+ "UTMI phy operational clock not specified\n");
+ phy_drd->utmiclk = NULL;
+ }
+
+ phy_drd->itpclk = devm_clk_get(phy_drd->dev, "itp");
+ if (IS_ERR(phy_drd->itpclk)) {
+ dev_info(phy_drd->dev,
+ "ITP clock from main OSC not specified\n");
+ phy_drd->itpclk = NULL;
+ }
+ }
+
+ return 0;
+}
+
static const struct exynos5_usbdrd_phy_config phy_cfg_exynos5[] = {
{
.id = EXYNOS5_DRDPHY_UTMI,
@@ -525,11 +615,19 @@ static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5420_USBDRD1_PHY_CONTROL,
+ .has_common_clk_gate = true,
};
static const struct exynos5_usbdrd_phy_drvdata exynos5250_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
+ .has_common_clk_gate = true,
+};
+
+static const struct exynos5_usbdrd_phy_drvdata exynos7_usbdrd_phy = {
+ .phy_cfg = phy_cfg_exynos5,
+ .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
+ .has_common_clk_gate = false,
};
static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
@@ -539,6 +637,9 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
}, {
.compatible = "samsung,exynos5420-usbdrd-phy",
.data = &exynos5420_usbdrd_phy
+ }, {
+ .compatible = "samsung,exynos7-usbdrd-phy",
+ .data = &exynos7_usbdrd_phy
},
{ },
};
@@ -555,7 +656,6 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
const struct exynos5_usbdrd_phy_drvdata *drv_data;
struct regmap *reg_pmu;
u32 pmu_offset;
- unsigned long ref_rate;
int i, ret;
int channel;
@@ -576,23 +676,9 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
drv_data = match->data;
phy_drd->drv_data = drv_data;
- phy_drd->clk = devm_clk_get(dev, "phy");
- if (IS_ERR(phy_drd->clk)) {
- dev_err(dev, "Failed to get clock of phy controller\n");
- return PTR_ERR(phy_drd->clk);
- }
-
- phy_drd->ref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(phy_drd->ref_clk)) {
- dev_err(dev, "Failed to get reference clock of usbdrd phy\n");
- return PTR_ERR(phy_drd->ref_clk);
- }
- ref_rate = clk_get_rate(phy_drd->ref_clk);
-
- ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk);
+ ret = exynos5_usbdrd_phy_clk_handle(phy_drd);
if (ret) {
- dev_err(phy_drd->dev, "Clock rate (%ld) not supported\n",
- ref_rate);
+ dev_err(dev, "Failed to initialize clocks\n");
return ret;
}
@@ -622,7 +708,7 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
break;
}
- /* Get Vbus regulator */
+ /* Get Vbus regulators */
phy_drd->vbus = devm_regulator_get(dev, "vbus");
if (IS_ERR(phy_drd->vbus)) {
ret = PTR_ERR(phy_drd->vbus);
@@ -633,12 +719,21 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
phy_drd->vbus = NULL;
}
+ phy_drd->vbus_boost = devm_regulator_get(dev, "vbus-boost");
+ if (IS_ERR(phy_drd->vbus_boost)) {
+ ret = PTR_ERR(phy_drd->vbus_boost);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ dev_warn(dev, "Failed to get VBUS boost supply regulator\n");
+ phy_drd->vbus_boost = NULL;
+ }
+
dev_vdbg(dev, "Creating usbdrd_phy phy\n");
for (i = 0; i < EXYNOS5_DRDPHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev, NULL,
- &exynos5_usbdrd_phy_ops,
- NULL);
+ &exynos5_usbdrd_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "Failed to create usbdrd_phy phy\n");
return PTR_ERR(phy);
diff --git a/drivers/phy/phy-exynos5250-sata.c b/drivers/phy/phy-exynos5250-sata.c
index 54cf4ae60d29..bc858cc800a1 100644
--- a/drivers/phy/phy-exynos5250-sata.c
+++ b/drivers/phy/phy-exynos5250-sata.c
@@ -210,7 +210,7 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
return ret;
}
- sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops, NULL);
+ sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops);
if (IS_ERR(sata_phy->phy)) {
clk_disable_unprepare(sata_phy->phyclk);
dev_err(dev, "failed to create PHY\n");
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c
index d5d978085c6d..34915b4202f1 100644
--- a/drivers/phy/phy-hix5hd2-sata.c
+++ b/drivers/phy/phy-hix5hd2-sata.c
@@ -156,7 +156,7 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev)
if (IS_ERR(priv->peri_ctrl))
priv->peri_ctrl = NULL;
- phy = devm_phy_create(dev, NULL, &hix5hd2_sata_phy_ops, NULL);
+ phy = devm_phy_create(dev, NULL, &hix5hd2_sata_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create PHY\n");
return PTR_ERR(phy);
@@ -164,10 +164,7 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy, priv);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id hix5hd2_sata_phy_of_match[] = {
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
new file mode 100644
index 000000000000..e34da13885e8
--- /dev/null
+++ b/drivers/phy/phy-miphy28lp.c
@@ -0,0 +1,1283 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics
+ *
+ * STMicroelectronics PHY driver MiPHY28lp (for SoC STiH407).
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/phy/phy.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/phy/phy.h>
+
+/* MiPHY registers */
+#define MIPHY_CONF_RESET 0x00
+#define RST_APPLI_SW BIT(0)
+#define RST_CONF_SW BIT(1)
+#define RST_MACRO_SW BIT(2)
+
+#define MIPHY_RESET 0x01
+#define RST_PLL_SW BIT(0)
+#define RST_COMP_SW BIT(2)
+
+#define MIPHY_STATUS_1 0x02
+#define PHY_RDY BIT(0)
+#define HFC_RDY BIT(1)
+#define HFC_PLL BIT(2)
+
+#define MIPHY_CONTROL 0x04
+#define TERM_EN_SW BIT(2)
+#define DIS_LINK_RST BIT(3)
+#define AUTO_RST_RX BIT(4)
+#define PX_RX_POL BIT(5)
+
+#define MIPHY_BOUNDARY_SEL 0x0a
+#define TX_SEL BIT(6)
+#define SSC_SEL BIT(4)
+#define GENSEL_SEL BIT(0)
+
+#define MIPHY_BOUNDARY_1 0x0b
+#define MIPHY_BOUNDARY_2 0x0c
+#define SSC_EN_SW BIT(2)
+
+#define MIPHY_PLL_CLKREF_FREQ 0x0d
+#define MIPHY_SPEED 0x0e
+#define TX_SPDSEL_80DEC 0
+#define TX_SPDSEL_40DEC 1
+#define TX_SPDSEL_20DEC 2
+#define RX_SPDSEL_80DEC 0
+#define RX_SPDSEL_40DEC (1 << 2)
+#define RX_SPDSEL_20DEC (2 << 2)
+
+#define MIPHY_CONF 0x0f
+#define MIPHY_CTRL_TEST_SEL 0x20
+#define MIPHY_CTRL_TEST_1 0x21
+#define MIPHY_CTRL_TEST_2 0x22
+#define MIPHY_CTRL_TEST_3 0x23
+#define MIPHY_CTRL_TEST_4 0x24
+#define MIPHY_FEEDBACK_TEST 0x25
+#define MIPHY_DEBUG_BUS 0x26
+#define MIPHY_DEBUG_STATUS_MSB 0x27
+#define MIPHY_DEBUG_STATUS_LSB 0x28
+#define MIPHY_PWR_RAIL_1 0x29
+#define MIPHY_PWR_RAIL_2 0x2a
+#define MIPHY_SYNCHAR_CONTROL 0x30
+
+#define MIPHY_COMP_FSM_1 0x3a
+#define COMP_START BIT(6)
+
+#define MIPHY_COMP_FSM_6 0x3f
+#define COMP_DONE BIT(7)
+
+#define MIPHY_COMP_POSTP 0x42
+#define MIPHY_TX_CTRL_1 0x49
+#define TX_REG_STEP_0V 0
+#define TX_REG_STEP_P_25MV 1
+#define TX_REG_STEP_P_50MV 2
+#define TX_REG_STEP_N_25MV 7
+#define TX_REG_STEP_N_50MV 6
+#define TX_REG_STEP_N_75MV 5
+
+#define MIPHY_TX_CTRL_2 0x4a
+#define TX_SLEW_SW_40_PS 0
+#define TX_SLEW_SW_80_PS 1
+#define TX_SLEW_SW_120_PS 2
+
+#define MIPHY_TX_CTRL_3 0x4b
+#define MIPHY_TX_CAL_MAN 0x4e
+#define TX_SLEW_CAL_MAN_EN BIT(0)
+
+#define MIPHY_TST_BIAS_BOOST_2 0x62
+#define MIPHY_BIAS_BOOST_1 0x63
+#define MIPHY_BIAS_BOOST_2 0x64
+#define MIPHY_RX_DESBUFF_FDB_2 0x67
+#define MIPHY_RX_DESBUFF_FDB_3 0x68
+#define MIPHY_SIGDET_COMPENS1 0x69
+#define MIPHY_SIGDET_COMPENS2 0x6a
+#define MIPHY_JITTER_PERIOD 0x6b
+#define MIPHY_JITTER_AMPLITUDE_1 0x6c
+#define MIPHY_JITTER_AMPLITUDE_2 0x6d
+#define MIPHY_JITTER_AMPLITUDE_3 0x6e
+#define MIPHY_RX_K_GAIN 0x78
+#define MIPHY_RX_BUFFER_CTRL 0x7a
+#define VGA_GAIN BIT(0)
+#define EQ_DC_GAIN BIT(2)
+#define EQ_BOOST_GAIN BIT(3)
+
+#define MIPHY_RX_VGA_GAIN 0x7b
+#define MIPHY_RX_EQU_GAIN_1 0x7f
+#define MIPHY_RX_EQU_GAIN_2 0x80
+#define MIPHY_RX_EQU_GAIN_3 0x81
+#define MIPHY_RX_CAL_CTRL_1 0x97
+#define MIPHY_RX_CAL_CTRL_2 0x98
+
+#define MIPHY_RX_CAL_OFFSET_CTRL 0x99
+#define CAL_OFFSET_VGA_64 (0x03 << 0)
+#define CAL_OFFSET_THRESHOLD_64 (0x03 << 2)
+#define VGA_OFFSET_POLARITY BIT(4)
+#define OFFSET_COMPENSATION_EN BIT(6)
+
+#define MIPHY_RX_CAL_VGA_STEP 0x9a
+#define MIPHY_RX_CAL_EYE_MIN 0x9d
+#define MIPHY_RX_CAL_OPT_LENGTH 0x9f
+#define MIPHY_RX_LOCK_CTRL_1 0xc1
+#define MIPHY_RX_LOCK_SETTINGS_OPT 0xc2
+#define MIPHY_RX_LOCK_STEP 0xc4
+
+#define MIPHY_RX_SIGDET_SLEEP_OA 0xc9
+#define MIPHY_RX_SIGDET_SLEEP_SEL 0xca
+#define MIPHY_RX_SIGDET_WAIT_SEL 0xcb
+#define MIPHY_RX_SIGDET_DATA_SEL 0xcc
+#define EN_ULTRA_LOW_POWER BIT(0)
+#define EN_FIRST_HALF BIT(1)
+#define EN_SECOND_HALF BIT(2)
+#define EN_DIGIT_SIGNAL_CHECK BIT(3)
+
+#define MIPHY_RX_POWER_CTRL_1 0xcd
+#define MIPHY_RX_POWER_CTRL_2 0xce
+#define MIPHY_PLL_CALSET_CTRL 0xd3
+#define MIPHY_PLL_CALSET_1 0xd4
+#define MIPHY_PLL_CALSET_2 0xd5
+#define MIPHY_PLL_CALSET_3 0xd6
+#define MIPHY_PLL_CALSET_4 0xd7
+#define MIPHY_PLL_SBR_1 0xe3
+#define SET_NEW_CHANGE BIT(1)
+
+#define MIPHY_PLL_SBR_2 0xe4
+#define MIPHY_PLL_SBR_3 0xe5
+#define MIPHY_PLL_SBR_4 0xe6
+#define MIPHY_PLL_COMMON_MISC_2 0xe9
+#define START_ACT_FILT BIT(6)
+
+#define MIPHY_PLL_SPAREIN 0xeb
+
+/*
+ * On STiH407 the glue logic can be different among MiPHY devices; for example:
+ * MiPHY0: OSC_FORCE_EXT means:
+ * 0: 30MHz crystal clk - 1: 100MHz ext clk routed through MiPHY1
+ * MiPHY1: OSC_FORCE_EXT means:
+ * 1: 30MHz crystal clk - 0: 100MHz ext clk routed through MiPHY1
+ * Some devices have not the possibility to check if the osc is ready.
+ */
+#define MIPHY_OSC_FORCE_EXT BIT(3)
+#define MIPHY_OSC_RDY BIT(5)
+
+#define MIPHY_CTRL_MASK 0x0f
+#define MIPHY_CTRL_DEFAULT 0
+#define MIPHY_CTRL_SYNC_D_EN BIT(2)
+
+/* SATA / PCIe defines */
+#define SATA_CTRL_MASK 0x07
+#define PCIE_CTRL_MASK 0xff
+#define SATA_CTRL_SELECT_SATA 1
+#define SATA_CTRL_SELECT_PCIE 0
+#define SYSCFG_PCIE_PCIE_VAL 0x80
+#define SATA_SPDMODE 1
+
+#define MIPHY_SATA_BANK_NB 3
+#define MIPHY_PCIE_BANK_NB 2
+
+struct miphy28lp_phy {
+ struct phy *phy;
+ struct miphy28lp_dev *phydev;
+ void __iomem *base;
+ void __iomem *pipebase;
+
+ bool osc_force_ext;
+ bool osc_rdy;
+ bool px_rx_pol_inv;
+ bool ssc;
+ bool tx_impedance;
+
+ struct reset_control *miphy_rst;
+
+ u32 sata_gen;
+
+ /* Sysconfig registers offsets needed to configure the device */
+ u32 syscfg_miphy_ctrl;
+ u32 syscfg_miphy_status;
+ u32 syscfg_pci;
+ u32 syscfg_sata;
+ u8 type;
+};
+
+struct miphy28lp_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex miphy_mutex;
+ struct miphy28lp_phy **phys;
+};
+
+struct miphy_initval {
+ u16 reg;
+ u16 val;
+};
+
+enum miphy_sata_gen { SATA_GEN1, SATA_GEN2, SATA_GEN3 };
+
+static char *PHY_TYPE_name[] = { "sata-up", "pcie-up", "", "usb3-up" };
+
+struct pll_ratio {
+ int clk_ref;
+ int calset_1;
+ int calset_2;
+ int calset_3;
+ int calset_4;
+ int cal_ctrl;
+};
+
+static struct pll_ratio sata_pll_ratio = {
+ .clk_ref = 0x1e,
+ .calset_1 = 0xc8,
+ .calset_2 = 0x00,
+ .calset_3 = 0x00,
+ .calset_4 = 0x00,
+ .cal_ctrl = 0x00,
+};
+
+static struct pll_ratio pcie_pll_ratio = {
+ .clk_ref = 0x1e,
+ .calset_1 = 0xa6,
+ .calset_2 = 0xaa,
+ .calset_3 = 0xaa,
+ .calset_4 = 0x00,
+ .cal_ctrl = 0x00,
+};
+
+static struct pll_ratio usb3_pll_ratio = {
+ .clk_ref = 0x1e,
+ .calset_1 = 0xa6,
+ .calset_2 = 0xaa,
+ .calset_3 = 0xaa,
+ .calset_4 = 0x04,
+ .cal_ctrl = 0x00,
+};
+
+struct miphy28lp_pll_gen {
+ int bank;
+ int speed;
+ int bias_boost_1;
+ int bias_boost_2;
+ int tx_ctrl_1;
+ int tx_ctrl_2;
+ int tx_ctrl_3;
+ int rx_k_gain;
+ int rx_vga_gain;
+ int rx_equ_gain_1;
+ int rx_equ_gain_2;
+ int rx_equ_gain_3;
+ int rx_buff_ctrl;
+};
+
+static struct miphy28lp_pll_gen sata_pll_gen[] = {
+ {
+ .bank = 0x00,
+ .speed = TX_SPDSEL_80DEC | RX_SPDSEL_80DEC,
+ .bias_boost_1 = 0x00,
+ .bias_boost_2 = 0xae,
+ .tx_ctrl_2 = 0x53,
+ .tx_ctrl_3 = 0x00,
+ .rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
+ .rx_vga_gain = 0x00,
+ .rx_equ_gain_1 = 0x7d,
+ .rx_equ_gain_2 = 0x56,
+ .rx_equ_gain_3 = 0x00,
+ },
+ {
+ .bank = 0x01,
+ .speed = TX_SPDSEL_40DEC | RX_SPDSEL_40DEC,
+ .bias_boost_1 = 0x00,
+ .bias_boost_2 = 0xae,
+ .tx_ctrl_2 = 0x72,
+ .tx_ctrl_3 = 0x20,
+ .rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
+ .rx_vga_gain = 0x00,
+ .rx_equ_gain_1 = 0x7d,
+ .rx_equ_gain_2 = 0x56,
+ .rx_equ_gain_3 = 0x00,
+ },
+ {
+ .bank = 0x02,
+ .speed = TX_SPDSEL_20DEC | RX_SPDSEL_20DEC,
+ .bias_boost_1 = 0x00,
+ .bias_boost_2 = 0xae,
+ .tx_ctrl_2 = 0xc0,
+ .tx_ctrl_3 = 0x20,
+ .rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
+ .rx_vga_gain = 0x00,
+ .rx_equ_gain_1 = 0x7d,
+ .rx_equ_gain_2 = 0x56,
+ .rx_equ_gain_3 = 0x00,
+ },
+};
+
+static struct miphy28lp_pll_gen pcie_pll_gen[] = {
+ {
+ .bank = 0x00,
+ .speed = TX_SPDSEL_40DEC | RX_SPDSEL_40DEC,
+ .bias_boost_1 = 0x00,
+ .bias_boost_2 = 0xa5,
+ .tx_ctrl_1 = TX_REG_STEP_N_25MV,
+ .tx_ctrl_2 = 0x71,
+ .tx_ctrl_3 = 0x60,
+ .rx_k_gain = 0x98,
+ .rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
+ .rx_vga_gain = 0x00,
+ .rx_equ_gain_1 = 0x79,
+ .rx_equ_gain_2 = 0x56,
+ },
+ {
+ .bank = 0x01,
+ .speed = TX_SPDSEL_20DEC | RX_SPDSEL_20DEC,
+ .bias_boost_1 = 0x00,
+ .bias_boost_2 = 0xa5,
+ .tx_ctrl_1 = TX_REG_STEP_N_25MV,
+ .tx_ctrl_2 = 0x70,
+ .tx_ctrl_3 = 0x60,
+ .rx_k_gain = 0xcc,
+ .rx_buff_ctrl = EQ_BOOST_GAIN | EQ_DC_GAIN | VGA_GAIN,
+ .rx_vga_gain = 0x00,
+ .rx_equ_gain_1 = 0x78,
+ .rx_equ_gain_2 = 0x07,
+ },
+};
+
+static inline void miphy28lp_set_reset(struct miphy28lp_phy *miphy_phy)
+{
+ void *base = miphy_phy->base;
+ u8 val;
+
+ /* Putting Macro in reset */
+ writeb_relaxed(RST_APPLI_SW, base + MIPHY_CONF_RESET);
+
+ val = RST_APPLI_SW | RST_CONF_SW;
+ writeb_relaxed(val, base + MIPHY_CONF_RESET);
+
+ writeb_relaxed(RST_APPLI_SW, base + MIPHY_CONF_RESET);
+
+ /* Bringing the MIPHY-CPU registers out of reset */
+ if (miphy_phy->type == PHY_TYPE_PCIE) {
+ val = AUTO_RST_RX | TERM_EN_SW;
+ writeb_relaxed(val, base + MIPHY_CONTROL);
+ } else {
+ val = AUTO_RST_RX | TERM_EN_SW | DIS_LINK_RST;
+ writeb_relaxed(val, base + MIPHY_CONTROL);
+ }
+}
+
+static inline void miphy28lp_pll_calibration(struct miphy28lp_phy *miphy_phy,
+ struct pll_ratio *pll_ratio)
+{
+ void *base = miphy_phy->base;
+ u8 val;
+
+ /* Applying PLL Settings */
+ writeb_relaxed(0x1d, base + MIPHY_PLL_SPAREIN);
+ writeb_relaxed(pll_ratio->clk_ref, base + MIPHY_PLL_CLKREF_FREQ);
+
+ /* PLL Ratio */
+ writeb_relaxed(pll_ratio->calset_1, base + MIPHY_PLL_CALSET_1);
+ writeb_relaxed(pll_ratio->calset_2, base + MIPHY_PLL_CALSET_2);
+ writeb_relaxed(pll_ratio->calset_3, base + MIPHY_PLL_CALSET_3);
+ writeb_relaxed(pll_ratio->calset_4, base + MIPHY_PLL_CALSET_4);
+ writeb_relaxed(pll_ratio->cal_ctrl, base + MIPHY_PLL_CALSET_CTRL);
+
+ writeb_relaxed(TX_SEL, base + MIPHY_BOUNDARY_SEL);
+
+ val = (0x68 << 1) | TX_SLEW_CAL_MAN_EN;
+ writeb_relaxed(val, base + MIPHY_TX_CAL_MAN);
+
+ val = VGA_OFFSET_POLARITY | CAL_OFFSET_THRESHOLD_64 | CAL_OFFSET_VGA_64;
+
+ if (miphy_phy->type != PHY_TYPE_SATA)
+ val |= OFFSET_COMPENSATION_EN;
+
+ writeb_relaxed(val, base + MIPHY_RX_CAL_OFFSET_CTRL);
+
+ if (miphy_phy->type == PHY_TYPE_USB3) {
+ writeb_relaxed(0x00, base + MIPHY_CONF);
+ writeb_relaxed(0x70, base + MIPHY_RX_LOCK_STEP);
+ writeb_relaxed(EN_FIRST_HALF, base + MIPHY_RX_SIGDET_SLEEP_OA);
+ writeb_relaxed(EN_FIRST_HALF, base + MIPHY_RX_SIGDET_SLEEP_SEL);
+ writeb_relaxed(EN_FIRST_HALF, base + MIPHY_RX_SIGDET_WAIT_SEL);
+
+ val = EN_DIGIT_SIGNAL_CHECK | EN_FIRST_HALF;
+ writeb_relaxed(val, base + MIPHY_RX_SIGDET_DATA_SEL);
+ }
+
+}
+
+static inline void miphy28lp_sata_config_gen(struct miphy28lp_phy *miphy_phy)
+{
+ void __iomem *base = miphy_phy->base;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sata_pll_gen); i++) {
+ struct miphy28lp_pll_gen *gen = &sata_pll_gen[i];
+
+ /* Banked settings */
+ writeb_relaxed(gen->bank, base + MIPHY_CONF);
+ writeb_relaxed(gen->speed, base + MIPHY_SPEED);
+ writeb_relaxed(gen->bias_boost_1, base + MIPHY_BIAS_BOOST_1);
+ writeb_relaxed(gen->bias_boost_2, base + MIPHY_BIAS_BOOST_2);
+
+ /* TX buffer Settings */
+ writeb_relaxed(gen->tx_ctrl_2, base + MIPHY_TX_CTRL_2);
+ writeb_relaxed(gen->tx_ctrl_3, base + MIPHY_TX_CTRL_3);
+
+ /* RX Buffer Settings */
+ writeb_relaxed(gen->rx_buff_ctrl, base + MIPHY_RX_BUFFER_CTRL);
+ writeb_relaxed(gen->rx_vga_gain, base + MIPHY_RX_VGA_GAIN);
+ writeb_relaxed(gen->rx_equ_gain_1, base + MIPHY_RX_EQU_GAIN_1);
+ writeb_relaxed(gen->rx_equ_gain_2, base + MIPHY_RX_EQU_GAIN_2);
+ writeb_relaxed(gen->rx_equ_gain_3, base + MIPHY_RX_EQU_GAIN_3);
+ }
+}
+
+static inline void miphy28lp_pcie_config_gen(struct miphy28lp_phy *miphy_phy)
+{
+ void __iomem *base = miphy_phy->base;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcie_pll_gen); i++) {
+ struct miphy28lp_pll_gen *gen = &pcie_pll_gen[i];
+
+ /* Banked settings */
+ writeb_relaxed(gen->bank, base + MIPHY_CONF);
+ writeb_relaxed(gen->speed, base + MIPHY_SPEED);
+ writeb_relaxed(gen->bias_boost_1, base + MIPHY_BIAS_BOOST_1);
+ writeb_relaxed(gen->bias_boost_2, base + MIPHY_BIAS_BOOST_2);
+
+ /* TX buffer Settings */
+ writeb_relaxed(gen->tx_ctrl_1, base + MIPHY_TX_CTRL_1);
+ writeb_relaxed(gen->tx_ctrl_2, base + MIPHY_TX_CTRL_2);
+ writeb_relaxed(gen->tx_ctrl_3, base + MIPHY_TX_CTRL_3);
+
+ writeb_relaxed(gen->rx_k_gain, base + MIPHY_RX_K_GAIN);
+
+ /* RX Buffer Settings */
+ writeb_relaxed(gen->rx_buff_ctrl, base + MIPHY_RX_BUFFER_CTRL);
+ writeb_relaxed(gen->rx_vga_gain, base + MIPHY_RX_VGA_GAIN);
+ writeb_relaxed(gen->rx_equ_gain_1, base + MIPHY_RX_EQU_GAIN_1);
+ writeb_relaxed(gen->rx_equ_gain_2, base + MIPHY_RX_EQU_GAIN_2);
+ }
+}
+
+static inline int miphy28lp_wait_compensation(struct miphy28lp_phy *miphy_phy)
+{
+ unsigned long finish = jiffies + 5 * HZ;
+ u8 val;
+
+ /* Waiting for Compensation to complete */
+ do {
+ val = readb_relaxed(miphy_phy->base + MIPHY_COMP_FSM_6);
+
+ if (time_after_eq(jiffies, finish))
+ return -EBUSY;
+ cpu_relax();
+ } while (!(val & COMP_DONE));
+
+ return 0;
+}
+
+
+static inline int miphy28lp_compensation(struct miphy28lp_phy *miphy_phy,
+ struct pll_ratio *pll_ratio)
+{
+ void __iomem *base = miphy_phy->base;
+
+ /* Poll for HFC ready after reset release */
+ /* Compensation measurement */
+ writeb_relaxed(RST_PLL_SW | RST_COMP_SW, base + MIPHY_RESET);
+
+ writeb_relaxed(0x00, base + MIPHY_PLL_COMMON_MISC_2);
+ writeb_relaxed(pll_ratio->clk_ref, base + MIPHY_PLL_CLKREF_FREQ);
+ writeb_relaxed(COMP_START, base + MIPHY_COMP_FSM_1);
+
+ if (miphy_phy->type == PHY_TYPE_PCIE)
+ writeb_relaxed(RST_PLL_SW, base + MIPHY_RESET);
+
+ writeb_relaxed(0x00, base + MIPHY_RESET);
+ writeb_relaxed(START_ACT_FILT, base + MIPHY_PLL_COMMON_MISC_2);
+ writeb_relaxed(SET_NEW_CHANGE, base + MIPHY_PLL_SBR_1);
+
+ /* TX compensation offset to re-center TX impedance */
+ writeb_relaxed(0x00, base + MIPHY_COMP_POSTP);
+
+ if (miphy_phy->type == PHY_TYPE_PCIE)
+ return miphy28lp_wait_compensation(miphy_phy);
+
+ return 0;
+}
+
+static inline void miphy28_usb3_miphy_reset(struct miphy28lp_phy *miphy_phy)
+{
+ void __iomem *base = miphy_phy->base;
+ u8 val;
+
+ /* MIPHY Reset */
+ writeb_relaxed(RST_APPLI_SW, base + MIPHY_CONF_RESET);
+ writeb_relaxed(0x00, base + MIPHY_CONF_RESET);
+ writeb_relaxed(RST_COMP_SW, base + MIPHY_RESET);
+
+ val = RST_COMP_SW | RST_PLL_SW;
+ writeb_relaxed(val, base + MIPHY_RESET);
+
+ writeb_relaxed(0x00, base + MIPHY_PLL_COMMON_MISC_2);
+ writeb_relaxed(0x1e, base + MIPHY_PLL_CLKREF_FREQ);
+ writeb_relaxed(COMP_START, base + MIPHY_COMP_FSM_1);
+ writeb_relaxed(RST_PLL_SW, base + MIPHY_RESET);
+ writeb_relaxed(0x00, base + MIPHY_RESET);
+ writeb_relaxed(START_ACT_FILT, base + MIPHY_PLL_COMMON_MISC_2);
+ writeb_relaxed(0x00, base + MIPHY_CONF);
+ writeb_relaxed(0x00, base + MIPHY_BOUNDARY_1);
+ writeb_relaxed(0x00, base + MIPHY_TST_BIAS_BOOST_2);
+ writeb_relaxed(0x00, base + MIPHY_CONF);
+ writeb_relaxed(SET_NEW_CHANGE, base + MIPHY_PLL_SBR_1);
+ writeb_relaxed(0xa5, base + MIPHY_DEBUG_BUS);
+ writeb_relaxed(0x00, base + MIPHY_CONF);
+}
+
+static void miphy_sata_tune_ssc(struct miphy28lp_phy *miphy_phy)
+{
+ void __iomem *base = miphy_phy->base;
+ u8 val;
+
+ /* Compensate Tx impedance to avoid out of range values */
+ /*
+ * Enable the SSC on PLL for all banks
+ * SSC Modulation @ 31 KHz and 4000 ppm modulation amp
+ */
+ val = readb_relaxed(base + MIPHY_BOUNDARY_2);
+ val |= SSC_EN_SW;
+ writeb_relaxed(val, base + MIPHY_BOUNDARY_2);
+
+ val = readb_relaxed(base + MIPHY_BOUNDARY_SEL);
+ val |= SSC_SEL;
+ writeb_relaxed(val, base + MIPHY_BOUNDARY_SEL);
+
+ for (val = 0; val < MIPHY_SATA_BANK_NB; val++) {
+ writeb_relaxed(val, base + MIPHY_CONF);
+
+ /* Add value to each reference clock cycle */
+ /* and define the period length of the SSC */
+ writeb_relaxed(0x3c, base + MIPHY_PLL_SBR_2);
+ writeb_relaxed(0x6c, base + MIPHY_PLL_SBR_3);
+ writeb_relaxed(0x81, base + MIPHY_PLL_SBR_4);
+
+ /* Clear any previous request */
+ writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
+
+ /* requests the PLL to take in account new parameters */
+ writeb_relaxed(SET_NEW_CHANGE, base + MIPHY_PLL_SBR_1);
+
+ /* To be sure there is no other pending requests */
+ writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
+ }
+}
+
+static void miphy_pcie_tune_ssc(struct miphy28lp_phy *miphy_phy)
+{
+ void __iomem *base = miphy_phy->base;
+ u8 val;
+
+ /* Compensate Tx impedance to avoid out of range values */
+ /*
+ * Enable the SSC on PLL for all banks
+ * SSC Modulation @ 31 KHz and 4000 ppm modulation amp
+ */
+ val = readb_relaxed(base + MIPHY_BOUNDARY_2);
+ val |= SSC_EN_SW;
+ writeb_relaxed(val, base + MIPHY_BOUNDARY_2);
+
+ val = readb_relaxed(base + MIPHY_BOUNDARY_SEL);
+ val |= SSC_SEL;
+ writeb_relaxed(val, base + MIPHY_BOUNDARY_SEL);
+
+ for (val = 0; val < MIPHY_PCIE_BANK_NB; val++) {
+ writeb_relaxed(val, base + MIPHY_CONF);
+
+ /* Validate Step component */
+ writeb_relaxed(0x69, base + MIPHY_PLL_SBR_3);
+ writeb_relaxed(0x21, base + MIPHY_PLL_SBR_4);
+
+ /* Validate Period component */
+ writeb_relaxed(0x3c, base + MIPHY_PLL_SBR_2);
+ writeb_relaxed(0x21, base + MIPHY_PLL_SBR_4);
+
+ /* Clear any previous request */
+ writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
+
+ /* requests the PLL to take in account new parameters */
+ writeb_relaxed(SET_NEW_CHANGE, base + MIPHY_PLL_SBR_1);
+
+ /* To be sure there is no other pending requests */
+ writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
+ }
+}
+
+static inline void miphy_tune_tx_impedance(struct miphy28lp_phy *miphy_phy)
+{
+ /* Compensate Tx impedance to avoid out of range values */
+ writeb_relaxed(0x02, miphy_phy->base + MIPHY_COMP_POSTP);
+}
+
+static inline int miphy28lp_configure_sata(struct miphy28lp_phy *miphy_phy)
+{
+ void __iomem *base = miphy_phy->base;
+ int err;
+ u8 val;
+
+ /* Putting Macro in reset */
+ miphy28lp_set_reset(miphy_phy);
+
+ /* PLL calibration */
+ miphy28lp_pll_calibration(miphy_phy, &sata_pll_ratio);
+
+ /* Banked settings Gen1/Gen2/Gen3 */
+ miphy28lp_sata_config_gen(miphy_phy);
+
+ /* Power control */
+ /* Input bridge enable, manual input bridge control */
+ writeb_relaxed(0x21, base + MIPHY_RX_POWER_CTRL_1);
+
+ /* Macro out of reset */
+ writeb_relaxed(0x00, base + MIPHY_CONF_RESET);
+
+ /* Poll for HFC ready after reset release */
+ /* Compensation measurement */
+ err = miphy28lp_compensation(miphy_phy, &sata_pll_ratio);
+ if (err)
+ return err;
+
+ if (miphy_phy->px_rx_pol_inv) {
+ /* Invert Rx polarity */
+ val = readb_relaxed(miphy_phy->base + MIPHY_CONTROL);
+ val |= PX_RX_POL;
+ writeb_relaxed(val, miphy_phy->base + MIPHY_CONTROL);
+ }
+
+ if (miphy_phy->ssc)
+ miphy_sata_tune_ssc(miphy_phy);
+
+ if (miphy_phy->tx_impedance)
+ miphy_tune_tx_impedance(miphy_phy);
+
+ return 0;
+}
+
+static inline int miphy28lp_configure_pcie(struct miphy28lp_phy *miphy_phy)
+{
+ void __iomem *base = miphy_phy->base;
+ int err;
+
+ /* Putting Macro in reset */
+ miphy28lp_set_reset(miphy_phy);
+
+ /* PLL calibration */
+ miphy28lp_pll_calibration(miphy_phy, &pcie_pll_ratio);
+
+ /* Banked settings Gen1/Gen2 */
+ miphy28lp_pcie_config_gen(miphy_phy);
+
+ /* Power control */
+ /* Input bridge enable, manual input bridge control */
+ writeb_relaxed(0x21, base + MIPHY_RX_POWER_CTRL_1);
+
+ /* Macro out of reset */
+ writeb_relaxed(0x00, base + MIPHY_CONF_RESET);
+
+ /* Poll for HFC ready after reset release */
+ /* Compensation measurement */
+ err = miphy28lp_compensation(miphy_phy, &pcie_pll_ratio);
+ if (err)
+ return err;
+
+ if (miphy_phy->ssc)
+ miphy_pcie_tune_ssc(miphy_phy);
+
+ if (miphy_phy->tx_impedance)
+ miphy_tune_tx_impedance(miphy_phy);
+
+ return 0;
+}
+
+
+static inline void miphy28lp_configure_usb3(struct miphy28lp_phy *miphy_phy)
+{
+ void __iomem *base = miphy_phy->base;
+ u8 val;
+
+ /* Putting Macro in reset */
+ miphy28lp_set_reset(miphy_phy);
+
+ /* PLL calibration */
+ miphy28lp_pll_calibration(miphy_phy, &usb3_pll_ratio);
+
+ /* Writing The Speed Rate */
+ writeb_relaxed(0x00, base + MIPHY_CONF);
+
+ val = RX_SPDSEL_20DEC | TX_SPDSEL_20DEC;
+ writeb_relaxed(val, base + MIPHY_SPEED);
+
+ /* RX Channel compensation and calibration */
+ writeb_relaxed(0x1c, base + MIPHY_RX_LOCK_SETTINGS_OPT);
+ writeb_relaxed(0x51, base + MIPHY_RX_CAL_CTRL_1);
+ writeb_relaxed(0x70, base + MIPHY_RX_CAL_CTRL_2);
+
+ val = OFFSET_COMPENSATION_EN | VGA_OFFSET_POLARITY |
+ CAL_OFFSET_THRESHOLD_64 | CAL_OFFSET_VGA_64;
+ writeb_relaxed(val, base + MIPHY_RX_CAL_OFFSET_CTRL);
+ writeb_relaxed(0x22, base + MIPHY_RX_CAL_VGA_STEP);
+ writeb_relaxed(0x0e, base + MIPHY_RX_CAL_OPT_LENGTH);
+
+ val = EQ_DC_GAIN | VGA_GAIN;
+ writeb_relaxed(val, base + MIPHY_RX_BUFFER_CTRL);
+ writeb_relaxed(0x78, base + MIPHY_RX_EQU_GAIN_1);
+ writeb_relaxed(0x1b, base + MIPHY_SYNCHAR_CONTROL);
+
+ /* TX compensation offset to re-center TX impedance */
+ writeb_relaxed(0x02, base + MIPHY_COMP_POSTP);
+
+ /* Enable GENSEL_SEL and SSC */
+ /* TX_SEL=0 swing preemp forced by pipe registres */
+ val = SSC_SEL | GENSEL_SEL;
+ writeb_relaxed(val, base + MIPHY_BOUNDARY_SEL);
+
+ /* MIPHY Bias boost */
+ writeb_relaxed(0x00, base + MIPHY_BIAS_BOOST_1);
+ writeb_relaxed(0xa7, base + MIPHY_BIAS_BOOST_2);
+
+ /* SSC modulation */
+ writeb_relaxed(SSC_EN_SW, base + MIPHY_BOUNDARY_2);
+
+ /* MIPHY TX control */
+ writeb_relaxed(0x00, base + MIPHY_CONF);
+
+ /* Validate Step component */
+ writeb_relaxed(0x5a, base + MIPHY_PLL_SBR_3);
+ writeb_relaxed(0xa0, base + MIPHY_PLL_SBR_4);
+
+ /* Validate Period component */
+ writeb_relaxed(0x3c, base + MIPHY_PLL_SBR_2);
+ writeb_relaxed(0xa1, base + MIPHY_PLL_SBR_4);
+
+ /* Clear any previous request */
+ writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
+
+ /* requests the PLL to take in account new parameters */
+ writeb_relaxed(0x02, base + MIPHY_PLL_SBR_1);
+
+ /* To be sure there is no other pending requests */
+ writeb_relaxed(0x00, base + MIPHY_PLL_SBR_1);
+
+ /* Rx PI controller settings */
+ writeb_relaxed(0xca, base + MIPHY_RX_K_GAIN);
+
+ /* MIPHY RX input bridge control */
+ /* INPUT_BRIDGE_EN_SW=1, manual input bridge control[0]=1 */
+ writeb_relaxed(0x21, base + MIPHY_RX_POWER_CTRL_1);
+ writeb_relaxed(0x29, base + MIPHY_RX_POWER_CTRL_1);
+ writeb_relaxed(0x1a, base + MIPHY_RX_POWER_CTRL_2);
+
+ /* MIPHY Reset for usb3 */
+ miphy28_usb3_miphy_reset(miphy_phy);
+}
+
+static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
+{
+ unsigned long finish = jiffies + 5 * HZ;
+ u8 mask = HFC_PLL | HFC_RDY;
+ u8 val;
+
+ /*
+ * For PCIe and USB3 check only that PLL and HFC are ready
+ * For SATA check also that phy is ready!
+ */
+ if (miphy_phy->type == PHY_TYPE_SATA)
+ mask |= PHY_RDY;
+
+ do {
+ val = readb_relaxed(miphy_phy->base + MIPHY_STATUS_1);
+ if ((val & mask) != mask)
+ cpu_relax();
+ else
+ return 0;
+ } while (!time_after_eq(jiffies, finish));
+
+ return -EBUSY;
+}
+
+static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
+{
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+ unsigned long finish = jiffies + 5 * HZ;
+ u32 val;
+
+ if (!miphy_phy->osc_rdy)
+ return 0;
+
+ if (!miphy_phy->syscfg_miphy_status)
+ return -EINVAL;
+
+ do {
+ regmap_read(miphy_dev->regmap, miphy_phy->syscfg_miphy_status,
+ &val);
+
+ if ((val & MIPHY_OSC_RDY) != MIPHY_OSC_RDY)
+ cpu_relax();
+ else
+ return 0;
+ } while (!time_after_eq(jiffies, finish));
+
+ return -EBUSY;
+}
+
+static int miphy28lp_get_resource_byname(struct device_node *child,
+ char *rname, struct resource *res)
+{
+ int index;
+
+ index = of_property_match_string(child, "reg-names", rname);
+ if (index < 0)
+ return -ENODEV;
+
+ return of_address_to_resource(child, index, res);
+}
+
+static int miphy28lp_get_one_addr(struct device *dev,
+ struct device_node *child, char *rname,
+ void __iomem **base)
+{
+ struct resource res;
+ int ret;
+
+ ret = miphy28lp_get_resource_byname(child, rname, &res);
+ if (!ret) {
+ *base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!*base) {
+ dev_err(dev, "failed to ioremap %s address region\n"
+ , rname);
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+/* MiPHY reset and sysconf setup */
+static int miphy28lp_setup(struct miphy28lp_phy *miphy_phy, u32 miphy_val)
+{
+ int err;
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+
+ if (!miphy_phy->syscfg_miphy_ctrl)
+ return -EINVAL;
+
+ err = reset_control_assert(miphy_phy->miphy_rst);
+ if (err) {
+ dev_err(miphy_dev->dev, "unable to bring out of miphy reset\n");
+ return err;
+ }
+
+ if (miphy_phy->osc_force_ext)
+ miphy_val |= MIPHY_OSC_FORCE_EXT;
+
+ regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_miphy_ctrl,
+ MIPHY_CTRL_MASK, miphy_val);
+
+ err = reset_control_deassert(miphy_phy->miphy_rst);
+ if (err) {
+ dev_err(miphy_dev->dev, "unable to bring out of miphy reset\n");
+ return err;
+ }
+
+ return miphy_osc_is_ready(miphy_phy);
+}
+
+static int miphy28lp_init_sata(struct miphy28lp_phy *miphy_phy)
+{
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+ int err, sata_conf = SATA_CTRL_SELECT_SATA;
+
+ if ((!miphy_phy->syscfg_sata) || (!miphy_phy->syscfg_pci)
+ || (!miphy_phy->base))
+ return -EINVAL;
+
+ dev_info(miphy_dev->dev, "sata-up mode, addr 0x%p\n", miphy_phy->base);
+
+ /* Configure the glue-logic */
+ sata_conf |= ((miphy_phy->sata_gen - SATA_GEN1) << SATA_SPDMODE);
+
+ regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_sata,
+ SATA_CTRL_MASK, sata_conf);
+
+ regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_pci,
+ PCIE_CTRL_MASK, SATA_CTRL_SELECT_PCIE);
+
+ /* MiPHY path and clocking init */
+ err = miphy28lp_setup(miphy_phy, MIPHY_CTRL_DEFAULT);
+
+ if (err) {
+ dev_err(miphy_dev->dev, "SATA phy setup failed\n");
+ return err;
+ }
+
+ /* initialize miphy */
+ miphy28lp_configure_sata(miphy_phy);
+
+ return miphy_is_ready(miphy_phy);
+}
+
+static int miphy28lp_init_pcie(struct miphy28lp_phy *miphy_phy)
+{
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+ int err;
+
+ if ((!miphy_phy->syscfg_sata) || (!miphy_phy->syscfg_pci)
+ || (!miphy_phy->base) || (!miphy_phy->pipebase))
+ return -EINVAL;
+
+ dev_info(miphy_dev->dev, "pcie-up mode, addr 0x%p\n", miphy_phy->base);
+
+ /* Configure the glue-logic */
+ regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_sata,
+ SATA_CTRL_MASK, SATA_CTRL_SELECT_PCIE);
+
+ regmap_update_bits(miphy_dev->regmap, miphy_phy->syscfg_pci,
+ PCIE_CTRL_MASK, SYSCFG_PCIE_PCIE_VAL);
+
+ /* MiPHY path and clocking init */
+ err = miphy28lp_setup(miphy_phy, MIPHY_CTRL_DEFAULT);
+
+ if (err) {
+ dev_err(miphy_dev->dev, "PCIe phy setup failed\n");
+ return err;
+ }
+
+ /* initialize miphy */
+ err = miphy28lp_configure_pcie(miphy_phy);
+ if (err)
+ return err;
+
+ /* PIPE Wrapper Configuration */
+ writeb_relaxed(0x68, miphy_phy->pipebase + 0x104); /* Rise_0 */
+ writeb_relaxed(0x61, miphy_phy->pipebase + 0x105); /* Rise_1 */
+ writeb_relaxed(0x68, miphy_phy->pipebase + 0x108); /* Fall_0 */
+ writeb_relaxed(0x61, miphy_phy->pipebase + 0x109); /* Fall-1 */
+ writeb_relaxed(0x68, miphy_phy->pipebase + 0x10c); /* Threshold_0 */
+ writeb_relaxed(0x60, miphy_phy->pipebase + 0x10d); /* Threshold_1 */
+
+ /* Wait for phy_ready */
+ return miphy_is_ready(miphy_phy);
+}
+
+static int miphy28lp_init_usb3(struct miphy28lp_phy *miphy_phy)
+{
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+ int err;
+
+ if ((!miphy_phy->base) || (!miphy_phy->pipebase))
+ return -EINVAL;
+
+ dev_info(miphy_dev->dev, "usb3-up mode, addr 0x%p\n", miphy_phy->base);
+
+ /* MiPHY path and clocking init */
+ err = miphy28lp_setup(miphy_phy, MIPHY_CTRL_SYNC_D_EN);
+ if (err) {
+ dev_err(miphy_dev->dev, "USB3 phy setup failed\n");
+ return err;
+ }
+
+ /* initialize miphy */
+ miphy28lp_configure_usb3(miphy_phy);
+
+ /* PIPE Wrapper Configuration */
+ writeb_relaxed(0x68, miphy_phy->pipebase + 0x23);
+ writeb_relaxed(0x61, miphy_phy->pipebase + 0x24);
+ writeb_relaxed(0x68, miphy_phy->pipebase + 0x26);
+ writeb_relaxed(0x61, miphy_phy->pipebase + 0x27);
+ writeb_relaxed(0x18, miphy_phy->pipebase + 0x29);
+ writeb_relaxed(0x61, miphy_phy->pipebase + 0x2a);
+
+ /* pipe Wrapper usb3 TX swing de-emph margin PREEMPH[7:4], SWING[3:0] */
+ writeb_relaxed(0X67, miphy_phy->pipebase + 0x68);
+ writeb_relaxed(0x0d, miphy_phy->pipebase + 0x69);
+ writeb_relaxed(0X67, miphy_phy->pipebase + 0x6a);
+ writeb_relaxed(0X0d, miphy_phy->pipebase + 0x6b);
+ writeb_relaxed(0X67, miphy_phy->pipebase + 0x6c);
+ writeb_relaxed(0X0d, miphy_phy->pipebase + 0x6d);
+ writeb_relaxed(0X67, miphy_phy->pipebase + 0x6e);
+ writeb_relaxed(0X0d, miphy_phy->pipebase + 0x6f);
+
+ return miphy_is_ready(miphy_phy);
+}
+
+static int miphy28lp_init(struct phy *phy)
+{
+ struct miphy28lp_phy *miphy_phy = phy_get_drvdata(phy);
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+ int ret;
+
+ mutex_lock(&miphy_dev->miphy_mutex);
+
+ switch (miphy_phy->type) {
+
+ case PHY_TYPE_SATA:
+ ret = miphy28lp_init_sata(miphy_phy);
+ break;
+ case PHY_TYPE_PCIE:
+ ret = miphy28lp_init_pcie(miphy_phy);
+ break;
+ case PHY_TYPE_USB3:
+ ret = miphy28lp_init_usb3(miphy_phy);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_unlock(&miphy_dev->miphy_mutex);
+
+ return ret;
+}
+
+static int miphy28lp_get_addr(struct miphy28lp_phy *miphy_phy)
+{
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+ struct device_node *phynode = miphy_phy->phy->dev.of_node;
+ int err;
+
+ if ((miphy_phy->type != PHY_TYPE_SATA) &&
+ (miphy_phy->type != PHY_TYPE_PCIE) &&
+ (miphy_phy->type != PHY_TYPE_USB3)) {
+ return -EINVAL;
+ }
+
+ err = miphy28lp_get_one_addr(miphy_dev->dev, phynode,
+ PHY_TYPE_name[miphy_phy->type - PHY_TYPE_SATA],
+ &miphy_phy->base);
+ if (err)
+ return err;
+
+ if ((miphy_phy->type == PHY_TYPE_PCIE) ||
+ (miphy_phy->type == PHY_TYPE_USB3)) {
+ err = miphy28lp_get_one_addr(miphy_dev->dev, phynode, "pipew",
+ &miphy_phy->pipebase);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct phy *miphy28lp_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct miphy28lp_dev *miphy_dev = dev_get_drvdata(dev);
+ struct miphy28lp_phy *miphy_phy = NULL;
+ struct device_node *phynode = args->np;
+ int ret, index = 0;
+
+ if (!of_device_is_available(phynode)) {
+ dev_warn(dev, "Requested PHY is disabled\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (args->args_count != 1) {
+ dev_err(dev, "Invalid number of cells in 'phy' property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (index = 0; index < of_get_child_count(dev->of_node); index++)
+ if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
+ miphy_phy = miphy_dev->phys[index];
+ break;
+ }
+
+ if (!miphy_phy) {
+ dev_err(dev, "Failed to find appropriate phy\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ miphy_phy->type = args->args[0];
+
+ ret = miphy28lp_get_addr(miphy_phy);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return miphy_phy->phy;
+}
+
+static struct phy_ops miphy28lp_ops = {
+ .init = miphy28lp_init,
+};
+
+static int miphy28lp_probe_resets(struct device_node *node,
+ struct miphy28lp_phy *miphy_phy)
+{
+ struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+ int err;
+
+ miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst");
+
+ if (IS_ERR(miphy_phy->miphy_rst)) {
+ dev_err(miphy_dev->dev,
+ "miphy soft reset control not defined\n");
+ return PTR_ERR(miphy_phy->miphy_rst);
+ }
+
+ err = reset_control_deassert(miphy_phy->miphy_rst);
+ if (err) {
+ dev_err(miphy_dev->dev, "unable to bring out of miphy reset\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int miphy28lp_of_probe(struct device_node *np,
+ struct miphy28lp_phy *miphy_phy)
+{
+ struct resource res;
+
+ miphy_phy->osc_force_ext =
+ of_property_read_bool(np, "st,osc-force-ext");
+
+ miphy_phy->osc_rdy = of_property_read_bool(np, "st,osc-rdy");
+
+ miphy_phy->px_rx_pol_inv =
+ of_property_read_bool(np, "st,px_rx_pol_inv");
+
+ miphy_phy->ssc = of_property_read_bool(np, "st,ssc-on");
+
+ miphy_phy->tx_impedance =
+ of_property_read_bool(np, "st,tx-impedance-comp");
+
+ of_property_read_u32(np, "st,sata-gen", &miphy_phy->sata_gen);
+ if (!miphy_phy->sata_gen)
+ miphy_phy->sata_gen = SATA_GEN1;
+
+ if (!miphy28lp_get_resource_byname(np, "miphy-ctrl-glue", &res))
+ miphy_phy->syscfg_miphy_ctrl = res.start;
+
+ if (!miphy28lp_get_resource_byname(np, "miphy-status-glue", &res))
+ miphy_phy->syscfg_miphy_status = res.start;
+
+ if (!miphy28lp_get_resource_byname(np, "pcie-glue", &res))
+ miphy_phy->syscfg_pci = res.start;
+
+ if (!miphy28lp_get_resource_byname(np, "sata-glue", &res))
+ miphy_phy->syscfg_sata = res.start;
+
+
+ return 0;
+}
+
+static int miphy28lp_probe(struct platform_device *pdev)
+{
+ struct device_node *child, *np = pdev->dev.of_node;
+ struct miphy28lp_dev *miphy_dev;
+ struct phy_provider *provider;
+ struct phy *phy;
+ int chancount, port = 0;
+ int ret;
+
+ miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
+ if (!miphy_dev)
+ return -ENOMEM;
+
+ chancount = of_get_child_count(np);
+ miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount,
+ GFP_KERNEL);
+ if (!miphy_dev->phys)
+ return -ENOMEM;
+
+ miphy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(miphy_dev->regmap)) {
+ dev_err(miphy_dev->dev, "No syscfg phandle specified\n");
+ return PTR_ERR(miphy_dev->regmap);
+ }
+
+ miphy_dev->dev = &pdev->dev;
+
+ dev_set_drvdata(&pdev->dev, miphy_dev);
+
+ mutex_init(&miphy_dev->miphy_mutex);
+
+ for_each_child_of_node(np, child) {
+ struct miphy28lp_phy *miphy_phy;
+
+ miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
+ GFP_KERNEL);
+ if (!miphy_phy)
+ return -ENOMEM;
+
+ miphy_dev->phys[port] = miphy_phy;
+
+ phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ miphy_dev->phys[port]->phy = phy;
+ miphy_dev->phys[port]->phydev = miphy_dev;
+
+ ret = miphy28lp_of_probe(child, miphy_phy);
+ if (ret)
+ return ret;
+
+ ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]);
+ if (ret)
+ return ret;
+
+ phy_set_drvdata(phy, miphy_dev->phys[port]);
+ port++;
+
+ }
+
+ provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate);
+ if (IS_ERR(provider))
+ return PTR_ERR(provider);
+
+ return 0;
+}
+
+static const struct of_device_id miphy28lp_of_match[] = {
+ {.compatible = "st,miphy28lp-phy", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, miphy28lp_of_match);
+
+static struct platform_driver miphy28lp_driver = {
+ .probe = miphy28lp_probe,
+ .driver = {
+ .name = "miphy28lp-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = miphy28lp_of_match,
+ }
+};
+
+module_platform_driver(miphy28lp_driver);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics miphy28lp driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 801afaf2d449..6ab43a814ad2 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -593,7 +593,7 @@ static int miphy365x_probe(struct platform_device *pdev)
miphy_dev->phys[port] = miphy_phy;
- phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops, NULL);
+ phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops);
if (IS_ERR(phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
return PTR_ERR(phy);
@@ -610,10 +610,7 @@ static int miphy365x_probe(struct platform_device *pdev)
}
provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
- if (IS_ERR(provider))
- return PTR_ERR(provider);
-
- return 0;
+ return PTR_ERR_OR_ZERO(provider);
}
static const struct of_device_id miphy365x_of_match[] = {
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
index d395558cb12e..03b94f92e6f1 100644
--- a/drivers/phy/phy-mvebu-sata.c
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -101,7 +101,7 @@ static int phy_mvebu_sata_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- phy = devm_phy_create(&pdev->dev, NULL, &phy_mvebu_sata_ops, NULL);
+ phy = devm_phy_create(&pdev->dev, NULL, &phy_mvebu_sata_ops);
if (IS_ERR(phy))
return PTR_ERR(phy);
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 48990e735759..6f4aef3db248 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL_GPL(omap_usb2_set_comparator);
static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
{
- struct omap_usb *phy = phy_to_omapusb(otg->phy);
+ struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
if (!phy->comparator)
return -ENODEV;
@@ -70,7 +70,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
static int omap_usb_start_srp(struct usb_otg *otg)
{
- struct omap_usb *phy = phy_to_omapusb(otg->phy);
+ struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
if (!phy->comparator)
return -ENODEV;
@@ -80,11 +80,9 @@ static int omap_usb_start_srp(struct usb_otg *otg)
static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
- struct usb_phy *phy = otg->phy;
-
otg->host = host;
if (!host)
- phy->state = OTG_STATE_UNDEFINED;
+ otg->state = OTG_STATE_UNDEFINED;
return 0;
}
@@ -92,11 +90,9 @@ static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
static int omap_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
- struct usb_phy *phy = otg->phy;
-
otg->gadget = gadget;
if (!gadget)
- phy->state = OTG_STATE_UNDEFINED;
+ otg->state = OTG_STATE_UNDEFINED;
return 0;
}
@@ -255,12 +251,12 @@ static int omap_usb2_probe(struct platform_device *pdev)
otg->set_vbus = omap_usb_set_vbus;
if (phy_data->flags & OMAP_USB2_HAS_START_SRP)
otg->start_srp = omap_usb_start_srp;
- otg->phy = &phy->phy;
+ otg->usb_phy = &phy->phy;
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
- generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL);
+ generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
pm_runtime_disable(phy->dev);
return PTR_ERR(generic_phy);
diff --git a/drivers/phy/phy-qcom-apq8064-sata.c b/drivers/phy/phy-qcom-apq8064-sata.c
index 7b3ddfb65898..4b243f7a10e4 100644
--- a/drivers/phy/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/phy-qcom-apq8064-sata.c
@@ -228,8 +228,7 @@ static int qcom_apq8064_sata_phy_probe(struct platform_device *pdev)
if (IS_ERR(phy->mmio))
return PTR_ERR(phy->mmio);
- generic_phy = devm_phy_create(dev, NULL, &qcom_apq8064_sata_phy_ops,
- NULL);
+ generic_phy = devm_phy_create(dev, NULL, &qcom_apq8064_sata_phy_ops);
if (IS_ERR(generic_phy)) {
dev_err(dev, "%s: failed to create phy\n", __func__);
return PTR_ERR(generic_phy);
diff --git a/drivers/phy/phy-qcom-ipq806x-sata.c b/drivers/phy/phy-qcom-ipq806x-sata.c
index 759b0bf5b6b3..6f2fe2627916 100644
--- a/drivers/phy/phy-qcom-ipq806x-sata.c
+++ b/drivers/phy/phy-qcom-ipq806x-sata.c
@@ -150,8 +150,7 @@ static int qcom_ipq806x_sata_phy_probe(struct platform_device *pdev)
if (IS_ERR(phy->mmio))
return PTR_ERR(phy->mmio);
- generic_phy = devm_phy_create(dev, NULL, &qcom_ipq806x_sata_phy_ops,
- NULL);
+ generic_phy = devm_phy_create(dev, NULL, &qcom_ipq806x_sata_phy_ops);
if (IS_ERR(generic_phy)) {
dev_err(dev, "%s: failed to create phy\n", __func__);
return PTR_ERR(generic_phy);
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 2793af17799f..778276aba3aa 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -304,7 +304,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
phy->select_value = select_value[channel_num][n];
phy->phy = devm_phy_create(dev, NULL,
- &rcar_gen2_phy_ops, NULL);
+ &rcar_gen2_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "Failed to create PHY\n");
return PTR_ERR(phy->phy);
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index 908949dfb4db..4a12f66b7fb5 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -202,8 +202,7 @@ static int samsung_usb2_phy_probe(struct platform_device *pdev)
struct samsung_usb2_phy_instance *p = &drv->instances[i];
dev_dbg(dev, "Creating phy \"%s\"\n", label);
- p->phy = devm_phy_create(dev, NULL, &samsung_usb2_phy_ops,
- NULL);
+ p->phy = devm_phy_create(dev, NULL, &samsung_usb2_phy_ops);
if (IS_ERR(p->phy)) {
dev_err(drv->dev, "Failed to create usb2_phy \"%s\"\n",
label);
diff --git a/drivers/phy/phy-spear1310-miphy.c b/drivers/phy/phy-spear1310-miphy.c
index 5f4c586ee951..9f47fae7eecb 100644
--- a/drivers/phy/phy-spear1310-miphy.c
+++ b/drivers/phy/phy-spear1310-miphy.c
@@ -227,7 +227,7 @@ static int spear1310_miphy_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv->phy = devm_phy_create(dev, NULL, &spear1310_miphy_ops, NULL);
+ priv->phy = devm_phy_create(dev, NULL, &spear1310_miphy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create SATA PCIe PHY\n");
return PTR_ERR(priv->phy);
diff --git a/drivers/phy/phy-spear1340-miphy.c b/drivers/phy/phy-spear1340-miphy.c
index 1ecd0945bad3..e42bc200275f 100644
--- a/drivers/phy/phy-spear1340-miphy.c
+++ b/drivers/phy/phy-spear1340-miphy.c
@@ -259,7 +259,7 @@ static int spear1340_miphy_probe(struct platform_device *pdev)
return PTR_ERR(priv->misc);
}
- priv->phy = devm_phy_create(dev, NULL, &spear1340_miphy_ops, NULL);
+ priv->phy = devm_phy_create(dev, NULL, &spear1340_miphy_ops);
if (IS_ERR(priv->phy)) {
dev_err(dev, "failed to create SATA PCIe PHY\n");
return PTR_ERR(priv->phy);
diff --git a/drivers/phy/phy-stih407-usb.c b/drivers/phy/phy-stih407-usb.c
index 42428d4181ea..74f0fab3cd8a 100644
--- a/drivers/phy/phy-stih407-usb.c
+++ b/drivers/phy/phy-stih407-usb.c
@@ -137,7 +137,7 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
}
phy_dev->param = res->start;
- phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data, NULL);
+ phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create Display Port PHY\n");
return PTR_ERR(phy);
diff --git a/drivers/phy/phy-stih41x-usb.c b/drivers/phy/phy-stih41x-usb.c
index 9f16cb8e01f4..a603801293ff 100644
--- a/drivers/phy/phy-stih41x-usb.c
+++ b/drivers/phy/phy-stih41x-usb.c
@@ -148,7 +148,7 @@ static int stih41x_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR(phy_dev->clk);
}
- phy = devm_phy_create(dev, NULL, &stih41x_usb_phy_ops, NULL);
+ phy = devm_phy_create(dev, NULL, &stih41x_usb_phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
@@ -160,10 +160,7 @@ static int stih41x_usb_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy, phy_dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id stih41x_usb_phy_of_match[] = {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index 0baf5efc8a40..fb02a67c9181 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -157,6 +157,10 @@ static int sun4i_usb_phy_init(struct phy *_phy)
return ret;
}
+ /* Enable USB 45 Ohm resistor calibration */
+ if (phy->index == 0)
+ sun4i_usb_phy_write(phy, PHY_RES45_CAL_EN, 0x01, 1);
+
/* Adjust PHY's magnitude and rate */
sun4i_usb_phy_write(phy, PHY_TX_AMPLITUDE_TUNE, 0x14, 5);
@@ -213,7 +217,7 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
{
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
- if (WARN_ON(args->args[0] == 0 || args->args[0] >= data->num_phys))
+ if (args->args[0] >= data->num_phys)
return ERR_PTR(-ENODEV);
return data->phys[args->args[0]].phy;
@@ -255,8 +259,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- /* Skip 0, 0 is the phy for otg which is not yet supported. */
- for (i = 1; i < data->num_phys; i++) {
+ for (i = 0; i < data->num_phys; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
char name[16];
@@ -295,7 +298,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR(phy->pmu);
}
- phy->phy = devm_phy_create(dev, NULL, &sun4i_usb_phy_ops, NULL);
+ phy->phy = devm_phy_create(dev, NULL, &sun4i_usb_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "failed to create PHY %d\n", i);
return PTR_ERR(phy->phy);
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 7c66922b2f93..1387b4d4afe3 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -399,7 +399,7 @@ static int ti_pipe3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
- generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL);
+ generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy))
return PTR_ERR(generic_phy);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 7b04befd5271..8e87f54671f3 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -606,7 +606,7 @@ static int twl4030_set_peripheral(struct usb_otg *otg,
otg->gadget = gadget;
if (!gadget)
- otg->phy->state = OTG_STATE_UNDEFINED;
+ otg->state = OTG_STATE_UNDEFINED;
return 0;
}
@@ -618,7 +618,7 @@ static int twl4030_set_host(struct usb_otg *otg, struct usb_bus *host)
otg->host = host;
if (!host)
- otg->phy->state = OTG_STATE_UNDEFINED;
+ otg->state = OTG_STATE_UNDEFINED;
return 0;
}
@@ -644,7 +644,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
struct usb_otg *otg;
struct device_node *np = pdev->dev.of_node;
struct phy_provider *phy_provider;
- struct phy_init_data *init_data = NULL;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -655,7 +654,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
(enum twl4030_usb_mode *)&twl->usb_mode);
else if (pdata) {
twl->usb_mode = pdata->usb_mode;
- init_data = pdata->init_data;
} else {
dev_err(&pdev->dev, "twl4030 initialized without pdata\n");
return -EINVAL;
@@ -676,11 +674,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
twl->phy.otg = otg;
twl->phy.type = USB_PHY_TYPE_USB2;
- otg->phy = &twl->phy;
+ otg->usb_phy = &twl->phy;
otg->set_host = twl4030_set_host;
otg->set_peripheral = twl4030_set_peripheral;
- phy = devm_phy_create(twl->dev, NULL, &ops, init_data);
+ phy = devm_phy_create(twl->dev, NULL, &ops);
if (IS_ERR(phy)) {
dev_dbg(&pdev->dev, "Failed to create PHY\n");
return PTR_ERR(phy);
@@ -733,6 +731,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
return status;
}
+ if (pdata)
+ err = phy_create_lookup(phy, "usb", "musb-hdrc.0");
+ if (err)
+ return err;
+
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(twl->dev);
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index f8a51b16ade8..29214a36ea28 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1707,7 +1707,7 @@ static int xgene_phy_probe(struct platform_device *pdev)
ctx->dev = &pdev->dev;
platform_set_drvdata(pdev, ctx);
- ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops, NULL);
+ ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops);
if (IS_ERR(ctx->phy)) {
dev_dbg(&pdev->dev, "Failed to create PHY\n");
rc = PTR_ERR(ctx->phy);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index c6a66de6ed72..d014f22f387a 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -67,18 +67,6 @@ config PINCTRL_AT91
help
Say Y here to enable the at91 pinctrl driver
-config PINCTRL_BAYTRAIL
- bool "Intel Baytrail GPIO pin control"
- depends on GPIOLIB && ACPI && X86
- select GPIOLIB_IRQCHIP
- help
- driver for memory mapped GPIO functionality on Intel Baytrail
- platforms. Supports 3 banks with 102, 28 and 44 gpios.
- Most pins are usually muxed to some other functionality by firmware,
- so only a small amount is available for gpio use.
-
- Requires ACPI device enumeration code to set up a platform device.
-
config PINCTRL_BCM2835
bool
select PINMUX
@@ -205,6 +193,7 @@ config PINCTRL_PALMAS
source "drivers/pinctrl/berlin/Kconfig"
source "drivers/pinctrl/freescale/Kconfig"
+source "drivers/pinctrl/intel/Kconfig"
source "drivers/pinctrl/mvebu/Kconfig"
source "drivers/pinctrl/nomadik/Kconfig"
source "drivers/pinctrl/qcom/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 51f52d32859e..c030b3db8034 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
-obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
@@ -39,6 +38,7 @@ obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-y += freescale/
+obj-$(CONFIG_X86) += intel/
obj-$(CONFIG_PLAT_ORION) += mvebu/
obj-y += nomadik/
obj-$(CONFIG_ARCH_QCOM) += qcom/
diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c
index dcd4f6a4fc50..b71a6fffef1b 100644
--- a/drivers/pinctrl/berlin/berlin-bg2.c
+++ b/drivers/pinctrl/berlin/berlin-bg2.c
@@ -263,7 +263,6 @@ static struct platform_driver berlin2_pinctrl_driver = {
.probe = berlin2_pinctrl_probe,
.driver = {
.name = "berlin-bg2-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = berlin2_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c
index 89d585ef7da7..19ac5a22c947 100644
--- a/drivers/pinctrl/berlin/berlin-bg2cd.c
+++ b/drivers/pinctrl/berlin/berlin-bg2cd.c
@@ -206,7 +206,6 @@ static struct platform_driver berlin2cd_pinctrl_driver = {
.probe = berlin2cd_pinctrl_probe,
.driver = {
.name = "berlin-bg2cd-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = berlin2cd_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c
index 9fcf9836045c..bd9662e57ad3 100644
--- a/drivers/pinctrl/berlin/berlin-bg2q.c
+++ b/drivers/pinctrl/berlin/berlin-bg2q.c
@@ -425,7 +425,6 @@ static struct platform_driver berlin2q_pinctrl_driver = {
.probe = berlin2q_pinctrl_probe,
.driver = {
.name = "berlin-bg2q-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = berlin2q_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index f2446769247f..52f2b9404fe0 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -294,11 +294,83 @@ static int imx_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
+static int imx_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset)
+{
+ struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx_pinctrl_soc_info *info = ipctl->info;
+ const struct imx_pin_reg *pin_reg;
+ struct imx_pin_group *grp;
+ struct imx_pin *imx_pin;
+ unsigned int pin, group;
+ u32 reg;
+
+ /* Currently implementation only for shared mux/conf register */
+ if (!(info->flags & SHARE_MUX_CONF_REG))
+ return -EINVAL;
+
+ pin_reg = &info->pin_regs[offset];
+ if (pin_reg->mux_reg == -1)
+ return -EINVAL;
+
+ /* Find the pinctrl config with GPIO mux mode for the requested pin */
+ for (group = 0; group < info->ngroups; group++) {
+ grp = &info->groups[group];
+ for (pin = 0; pin < grp->npins; pin++) {
+ imx_pin = &grp->pins[pin];
+ if (imx_pin->pin == offset && !imx_pin->mux_mode)
+ goto mux_pin;
+ }
+ }
+
+ return -EINVAL;
+
+mux_pin:
+ reg = readl(ipctl->base + pin_reg->mux_reg);
+ reg &= ~(0x7 << 20);
+ reg |= imx_pin->config;
+ writel(reg, ipctl->base + pin_reg->mux_reg);
+
+ return 0;
+}
+
+static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset, bool input)
+{
+ struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx_pinctrl_soc_info *info = ipctl->info;
+ const struct imx_pin_reg *pin_reg;
+ u32 reg;
+
+ /*
+ * Only Vybrid has the input/output buffer enable flags (IBE/OBE)
+ * They are part of the shared mux/conf register.
+ */
+ if (!(info->flags & SHARE_MUX_CONF_REG))
+ return -EINVAL;
+
+ pin_reg = &info->pin_regs[offset];
+ if (pin_reg->mux_reg == -1)
+ return -EINVAL;
+
+ /* IBE always enabled allows us to read the value "on the wire" */
+ reg = readl(ipctl->base + pin_reg->mux_reg);
+ if (input)
+ reg &= ~0x2;
+ else
+ reg |= 0x2;
+ writel(reg, ipctl->base + pin_reg->mux_reg);
+
+ return 0;
+}
+
static const struct pinmux_ops imx_pmx_ops = {
.get_functions_count = imx_pmx_get_funcs_count,
.get_function_name = imx_pmx_get_func_name,
.get_function_groups = imx_pmx_get_groups,
.set_mux = imx_pmx_set,
+ .gpio_request_enable = imx_pmx_gpio_request_enable,
+ .gpio_set_direction = imx_pmx_gpio_set_direction,
};
static int imx_pinconf_get(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index 533a6e519648..d3bacb7d6d37 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
@@ -267,7 +267,6 @@ MODULE_DEVICE_TABLE(of, imx1_pinctrl_of_match);
static struct platform_driver imx1_pinctrl_driver = {
.driver = {
.name = "imx1-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx1_pinctrl_of_match,
},
.remove = imx1_pinctrl_core_remove,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c
index 1b3b2311b033..9d9aca3dbd7e 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx21.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx21.c
@@ -330,7 +330,6 @@ MODULE_DEVICE_TABLE(of, imx21_pinctrl_of_match);
static struct platform_driver imx21_pinctrl_driver = {
.driver = {
.name = "imx21-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx21_pinctrl_of_match,
},
.remove = imx1_pinctrl_core_remove,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx23.c b/drivers/pinctrl/freescale/pinctrl-imx23.c
index df79096becb0..955cbf4f094f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx23.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx23.c
@@ -281,7 +281,6 @@ MODULE_DEVICE_TABLE(of, imx23_pinctrl_of_match);
static struct platform_driver imx23_pinctrl_driver = {
.driver = {
.name = "imx23-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx23_pinctrl_of_match,
},
.probe = imx23_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index 550e6d77ac2b..8d1013a040c9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -328,7 +328,6 @@ static int imx25_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx25_pinctrl_driver = {
.driver = {
.name = "imx25-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(imx25_pinctrl_of_match),
},
.probe = imx25_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index 945eccadea74..a461d5881f37 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -402,7 +402,6 @@ static int imx27_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx27_pinctrl_driver = {
.driver = {
.name = "imx27-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(imx27_pinctrl_of_match),
},
.probe = imx27_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx28.c b/drivers/pinctrl/freescale/pinctrl-imx28.c
index 3bd45da21229..5082efec4f72 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx28.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx28.c
@@ -397,7 +397,6 @@ MODULE_DEVICE_TABLE(of, imx28_pinctrl_of_match);
static struct platform_driver imx28_pinctrl_driver = {
.driver = {
.name = "imx28-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx28_pinctrl_of_match,
},
.probe = imx28_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 6bfbcd0112c1..9109c10c5dab 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -1018,7 +1018,6 @@ static int imx35_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx35_pinctrl_driver = {
.driver = {
.name = "imx35-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx35_pinctrl_of_match,
},
.probe = imx35_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index e8bd604ab147..51b31df96273 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -404,7 +404,6 @@ static int imx50_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx50_pinctrl_driver = {
.driver = {
.name = "imx50-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(imx50_pinctrl_of_match),
},
.probe = imx50_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index b818051db7c9..8dec494aa2c6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -781,7 +781,6 @@ static int imx51_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx51_pinctrl_driver = {
.driver = {
.name = "imx51-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx51_pinctrl_of_match,
},
.probe = imx51_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index 1884d53cf750..7344d340013c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -467,7 +467,6 @@ static int imx53_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx53_pinctrl_driver = {
.driver = {
.name = "imx53-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx53_pinctrl_of_match,
},
.probe = imx53_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 656c4b08cc2e..6805c678c3b2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -473,7 +473,6 @@ static int imx6dl_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6dl_pinctrl_driver = {
.driver = {
.name = "imx6dl-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx6dl_pinctrl_of_match,
},
.probe = imx6dl_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 59bb5b4ec0f6..4d1fcb861ac1 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -479,7 +479,6 @@ static int imx6q_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6q_pinctrl_driver = {
.driver = {
.name = "imx6q-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx6q_pinctrl_of_match,
},
.probe = imx6q_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index e0924bd7b98c..83fa5f19ae89 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -380,7 +380,6 @@ static int imx6sl_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6sl_pinctrl_driver = {
.driver = {
.name = "imx6sl-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = imx6sl_pinctrl_of_match,
},
.probe = imx6sl_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 840344c8580d..0d78fe690818 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -383,7 +383,6 @@ static int imx6sx_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver imx6sx_pinctrl_driver = {
.driver = {
.name = "imx6sx-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(imx6sx_pinctrl_of_match),
},
.probe = imx6sx_pinctrl_probe,
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index f98c6bb0f769..646d5c244af1 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -445,6 +445,31 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
if (of_property_read_u32(child, "reg", &val))
continue;
if (strcmp(fn, child->name)) {
+ struct device_node *child2;
+
+ /*
+ * This reference is dropped by
+ * of_get_next_child(np, * child)
+ */
+ of_node_get(child);
+
+ /*
+ * The logic parsing the functions from dt currently
+ * doesn't handle if functions with the same name are
+ * not grouped together. Only the first contiguous
+ * cluster is usable for each function name. This is a
+ * bug that is not trivial to fix, but at least warn
+ * about it.
+ */
+ for (child2 = of_get_next_child(np, child);
+ child2 != NULL;
+ child2 = of_get_next_child(np, child2)) {
+ if (!strcmp(child2->name, fn))
+ dev_warn(&pdev->dev,
+ "function nodes must be grouped by name (failed for: %s)",
+ fn);
+ }
+
f = &soc->functions[idxf++];
f->name = fn = child->name;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index b788e1578954..fc86276892fd 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -315,7 +315,6 @@ static int vf610_pinctrl_probe(struct platform_device *pdev)
static struct platform_driver vf610_pinctrl_driver = {
.driver = {
.name = "vf610-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = vf610_pinctrl_of_match,
},
.probe = vf610_pinctrl_probe,
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
new file mode 100644
index 000000000000..b801d869e91c
--- /dev/null
+++ b/drivers/pinctrl/intel/Kconfig
@@ -0,0 +1,27 @@
+#
+# Intel pin control drivers
+#
+
+config PINCTRL_BAYTRAIL
+ bool "Intel Baytrail GPIO pin control"
+ depends on GPIOLIB && ACPI
+ select GPIOLIB_IRQCHIP
+ help
+ driver for memory mapped GPIO functionality on Intel Baytrail
+ platforms. Supports 3 banks with 102, 28 and 44 gpios.
+ Most pins are usually muxed to some other functionality by firmware,
+ so only a small amount is available for gpio use.
+
+ Requires ACPI device enumeration code to set up a platform device.
+
+config PINCTRL_CHERRYVIEW
+ tristate "Intel Cherryview/Braswell pinctrl and GPIO driver"
+ depends on ACPI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ help
+ Cherryview/Braswell pinctrl driver provides an interface that
+ allows configuring of SoC pins and using them as GPIOs.
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
new file mode 100644
index 000000000000..4c210e4139e2
--- /dev/null
+++ b/drivers/pinctrl/intel/Makefile
@@ -0,0 +1,4 @@
+# Intel pin control drivers
+
+obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
+obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 9dc38140194b..5afe03e28b91 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -606,7 +606,6 @@ static struct platform_driver byt_gpio_driver = {
.remove = byt_gpio_remove,
.driver = {
.name = "byt_gpio",
- .owner = THIS_MODULE,
.pm = &byt_gpio_pm_ops,
.acpi_match_table = ACPI_PTR(byt_gpio_acpi_match),
},
@@ -616,5 +615,10 @@ static int __init byt_gpio_init(void)
{
return platform_driver_register(&byt_gpio_driver);
}
-
subsys_initcall(byt_gpio_init);
+
+static void __exit byt_gpio_exit(void)
+{
+ platform_driver_unregister(&byt_gpio_driver);
+}
+module_exit(byt_gpio_exit);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
new file mode 100644
index 000000000000..e9f8b39d1a9f
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -0,0 +1,1519 @@
+/*
+ * Cherryview/Braswell pinctrl driver
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This driver is based on the original Cherryview GPIO driver by
+ * Ning Li <ning.li@intel.com>
+ * Alan Cox <alan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/acpi.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
+
+#define CHV_INTSTAT 0x300
+#define CHV_INTMASK 0x380
+
+#define FAMILY_PAD_REGS_OFF 0x4400
+#define FAMILY_PAD_REGS_SIZE 0x400
+#define MAX_FAMILY_PAD_GPIO_NO 15
+#define GPIO_REGS_SIZE 8
+
+#define CHV_PADCTRL0 0x000
+#define CHV_PADCTRL0_INTSEL_SHIFT 28
+#define CHV_PADCTRL0_INTSEL_MASK (0xf << CHV_PADCTRL0_INTSEL_SHIFT)
+#define CHV_PADCTRL0_TERM_UP BIT(23)
+#define CHV_PADCTRL0_TERM_SHIFT 20
+#define CHV_PADCTRL0_TERM_MASK (7 << CHV_PADCTRL0_TERM_SHIFT)
+#define CHV_PADCTRL0_TERM_20K 1
+#define CHV_PADCTRL0_TERM_5K 2
+#define CHV_PADCTRL0_TERM_1K 4
+#define CHV_PADCTRL0_PMODE_SHIFT 16
+#define CHV_PADCTRL0_PMODE_MASK (0xf << CHV_PADCTRL0_PMODE_SHIFT)
+#define CHV_PADCTRL0_GPIOEN BIT(15)
+#define CHV_PADCTRL0_GPIOCFG_SHIFT 8
+#define CHV_PADCTRL0_GPIOCFG_MASK (7 << CHV_PADCTRL0_GPIOCFG_SHIFT)
+#define CHV_PADCTRL0_GPIOCFG_GPIO 0
+#define CHV_PADCTRL0_GPIOCFG_GPO 1
+#define CHV_PADCTRL0_GPIOCFG_GPI 2
+#define CHV_PADCTRL0_GPIOCFG_HIZ 3
+#define CHV_PADCTRL0_GPIOTXSTATE BIT(1)
+#define CHV_PADCTRL0_GPIORXSTATE BIT(0)
+
+#define CHV_PADCTRL1 0x004
+#define CHV_PADCTRL1_CFGLOCK BIT(31)
+#define CHV_PADCTRL1_INVRXTX_SHIFT 4
+#define CHV_PADCTRL1_INVRXTX_MASK (0xf << CHV_PADCTRL1_INVRXTX_SHIFT)
+#define CHV_PADCTRL1_INVRXTX_TXENABLE (2 << CHV_PADCTRL1_INVRXTX_SHIFT)
+#define CHV_PADCTRL1_ODEN BIT(3)
+#define CHV_PADCTRL1_INVRXTX_RXDATA (4 << CHV_PADCTRL1_INVRXTX_SHIFT)
+#define CHV_PADCTRL1_INTWAKECFG_MASK 7
+#define CHV_PADCTRL1_INTWAKECFG_FALLING 1
+#define CHV_PADCTRL1_INTWAKECFG_RISING 2
+#define CHV_PADCTRL1_INTWAKECFG_BOTH 3
+#define CHV_PADCTRL1_INTWAKECFG_LEVEL 4
+
+/**
+ * struct chv_alternate_function - A per group or per pin alternate function
+ * @pin: Pin number (only used in per pin configs)
+ * @mode: Mode the pin should be set in
+ * @invert_oe: Invert OE for this pin
+ */
+struct chv_alternate_function {
+ unsigned pin;
+ u8 mode;
+ bool invert_oe;
+};
+
+/**
+ * struct chv_pincgroup - describes a CHV pin group
+ * @name: Name of the group
+ * @pins: An array of pins in this group
+ * @npins: Number of pins in this group
+ * @altfunc: Alternate function applied to all pins in this group
+ * @overrides: Alternate function override per pin or %NULL if not used
+ * @noverrides: Number of per pin alternate function overrides if
+ * @overrides != NULL.
+ */
+struct chv_pingroup {
+ const char *name;
+ const unsigned *pins;
+ size_t npins;
+ struct chv_alternate_function altfunc;
+ const struct chv_alternate_function *overrides;
+ size_t noverrides;
+};
+
+/**
+ * struct chv_function - A CHV pinmux function
+ * @name: Name of the function
+ * @groups: An array of groups for this function
+ * @ngroups: Number of groups in @groups
+ */
+struct chv_function {
+ const char *name;
+ const char * const *groups;
+ size_t ngroups;
+};
+
+/**
+ * struct chv_gpio_pinrange - A range of pins that can be used as GPIOs
+ * @base: Start pin number
+ * @npins: Number of pins in this range
+ */
+struct chv_gpio_pinrange {
+ unsigned base;
+ unsigned npins;
+};
+
+/**
+ * struct chv_community - A community specific configuration
+ * @uid: ACPI _UID used to match the community
+ * @pins: All pins in this community
+ * @npins: Number of pins
+ * @groups: All groups in this community
+ * @ngroups: Number of groups
+ * @functions: All functions in this community
+ * @nfunctions: Number of functions
+ * @ngpios: Number of GPIOs in this community
+ * @gpio_ranges: An array of GPIO ranges in this community
+ * @ngpio_ranges: Number of GPIO ranges
+ * @ngpios: Total number of GPIOs in this community
+ */
+struct chv_community {
+ const char *uid;
+ const struct pinctrl_pin_desc *pins;
+ size_t npins;
+ const struct chv_pingroup *groups;
+ size_t ngroups;
+ const struct chv_function *functions;
+ size_t nfunctions;
+ const struct chv_gpio_pinrange *gpio_ranges;
+ size_t ngpio_ranges;
+ size_t ngpios;
+};
+
+/**
+ * struct chv_pinctrl - CHV pinctrl private structure
+ * @dev: Pointer to the parent device
+ * @pctldesc: Pin controller description
+ * @pctldev: Pointer to the pin controller device
+ * @chip: GPIO chip in this pin controller
+ * @regs: MMIO registers
+ * @lock: Lock to serialize register accesses
+ * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
+ * offset (in GPIO number space)
+ * @community: Community this pinctrl instance represents
+ *
+ * The first group in @groups is expected to contain all pins that can be
+ * used as GPIOs.
+ */
+struct chv_pinctrl {
+ struct device *dev;
+ struct pinctrl_desc pctldesc;
+ struct pinctrl_dev *pctldev;
+ struct gpio_chip chip;
+ void __iomem *regs;
+ spinlock_t lock;
+ unsigned intr_lines[16];
+ const struct chv_community *community;
+};
+
+#define gpiochip_to_pinctrl(c) container_of(c, struct chv_pinctrl, chip)
+
+#define ALTERNATE_FUNCTION(p, m, i) \
+ { \
+ .pin = (p), \
+ .mode = (m), \
+ .invert_oe = (i), \
+ }
+
+#define PIN_GROUP(n, p, m, i) \
+ { \
+ .name = (n), \
+ .pins = (p), \
+ .npins = ARRAY_SIZE((p)), \
+ .altfunc.mode = (m), \
+ .altfunc.invert_oe = (i), \
+ }
+
+#define PIN_GROUP_WITH_OVERRIDE(n, p, m, i, o) \
+ { \
+ .name = (n), \
+ .pins = (p), \
+ .npins = ARRAY_SIZE((p)), \
+ .altfunc.mode = (m), \
+ .altfunc.invert_oe = (i), \
+ .overrides = (o), \
+ .noverrides = ARRAY_SIZE((o)), \
+ }
+
+#define FUNCTION(n, g) \
+ { \
+ .name = (n), \
+ .groups = (g), \
+ .ngroups = ARRAY_SIZE((g)), \
+ }
+
+#define GPIO_PINRANGE(start, end) \
+ { \
+ .base = (start), \
+ .npins = (end) - (start) + 1, \
+ }
+
+static const struct pinctrl_pin_desc southwest_pins[] = {
+ PINCTRL_PIN(0, "FST_SPI_D2"),
+ PINCTRL_PIN(1, "FST_SPI_D0"),
+ PINCTRL_PIN(2, "FST_SPI_CLK"),
+ PINCTRL_PIN(3, "FST_SPI_D3"),
+ PINCTRL_PIN(4, "FST_SPI_CS1_B"),
+ PINCTRL_PIN(5, "FST_SPI_D1"),
+ PINCTRL_PIN(6, "FST_SPI_CS0_B"),
+ PINCTRL_PIN(7, "FST_SPI_CS2_B"),
+
+ PINCTRL_PIN(15, "UART1_RTS_B"),
+ PINCTRL_PIN(16, "UART1_RXD"),
+ PINCTRL_PIN(17, "UART2_RXD"),
+ PINCTRL_PIN(18, "UART1_CTS_B"),
+ PINCTRL_PIN(19, "UART2_RTS_B"),
+ PINCTRL_PIN(20, "UART1_TXD"),
+ PINCTRL_PIN(21, "UART2_TXD"),
+ PINCTRL_PIN(22, "UART2_CTS_B"),
+
+ PINCTRL_PIN(30, "MF_HDA_CLK"),
+ PINCTRL_PIN(31, "MF_HDA_RSTB"),
+ PINCTRL_PIN(32, "MF_HDA_SDIO"),
+ PINCTRL_PIN(33, "MF_HDA_SDO"),
+ PINCTRL_PIN(34, "MF_HDA_DOCKRSTB"),
+ PINCTRL_PIN(35, "MF_HDA_SYNC"),
+ PINCTRL_PIN(36, "MF_HDA_SDI1"),
+ PINCTRL_PIN(37, "MF_HDA_DOCKENB"),
+
+ PINCTRL_PIN(45, "I2C5_SDA"),
+ PINCTRL_PIN(46, "I2C4_SDA"),
+ PINCTRL_PIN(47, "I2C6_SDA"),
+ PINCTRL_PIN(48, "I2C5_SCL"),
+ PINCTRL_PIN(49, "I2C_NFC_SDA"),
+ PINCTRL_PIN(50, "I2C4_SCL"),
+ PINCTRL_PIN(51, "I2C6_SCL"),
+ PINCTRL_PIN(52, "I2C_NFC_SCL"),
+
+ PINCTRL_PIN(60, "I2C1_SDA"),
+ PINCTRL_PIN(61, "I2C0_SDA"),
+ PINCTRL_PIN(62, "I2C2_SDA"),
+ PINCTRL_PIN(63, "I2C1_SCL"),
+ PINCTRL_PIN(64, "I2C3_SDA"),
+ PINCTRL_PIN(65, "I2C0_SCL"),
+ PINCTRL_PIN(66, "I2C2_SCL"),
+ PINCTRL_PIN(67, "I2C3_SCL"),
+
+ PINCTRL_PIN(75, "SATA_GP0"),
+ PINCTRL_PIN(76, "SATA_GP1"),
+ PINCTRL_PIN(77, "SATA_LEDN"),
+ PINCTRL_PIN(78, "SATA_GP2"),
+ PINCTRL_PIN(79, "MF_SMB_ALERTB"),
+ PINCTRL_PIN(80, "SATA_GP3"),
+ PINCTRL_PIN(81, "MF_SMB_CLK"),
+ PINCTRL_PIN(82, "MF_SMB_DATA"),
+
+ PINCTRL_PIN(90, "PCIE_CLKREQ0B"),
+ PINCTRL_PIN(91, "PCIE_CLKREQ1B"),
+ PINCTRL_PIN(92, "GP_SSP_2_CLK"),
+ PINCTRL_PIN(93, "PCIE_CLKREQ2B"),
+ PINCTRL_PIN(94, "GP_SSP_2_RXD"),
+ PINCTRL_PIN(95, "PCIE_CLKREQ3B"),
+ PINCTRL_PIN(96, "GP_SSP_2_FS"),
+ PINCTRL_PIN(97, "GP_SSP_2_TXD"),
+};
+
+static const unsigned southwest_fspi_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+static const unsigned southwest_uart0_pins[] = { 16, 20 };
+static const unsigned southwest_uart1_pins[] = { 15, 16, 18, 20 };
+static const unsigned southwest_uart2_pins[] = { 17, 19, 21, 22 };
+static const unsigned southwest_i2c0_pins[] = { 61, 65 };
+static const unsigned southwest_hda_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37 };
+static const unsigned southwest_lpe_pins[] = {
+ 30, 31, 32, 33, 34, 35, 36, 37, 92, 94, 96, 97,
+};
+static const unsigned southwest_i2c1_pins[] = { 60, 63 };
+static const unsigned southwest_i2c2_pins[] = { 62, 66 };
+static const unsigned southwest_i2c3_pins[] = { 64, 67 };
+static const unsigned southwest_i2c4_pins[] = { 46, 50 };
+static const unsigned southwest_i2c5_pins[] = { 45, 48 };
+static const unsigned southwest_i2c6_pins[] = { 47, 51 };
+static const unsigned southwest_i2c_nfc_pins[] = { 49, 52 };
+static const unsigned southwest_smbus_pins[] = { 79, 81, 82 };
+static const unsigned southwest_spi3_pins[] = { 76, 79, 80, 81, 82 };
+
+/* LPE I2S TXD pins need to have invert_oe set */
+static const struct chv_alternate_function southwest_lpe_altfuncs[] = {
+ ALTERNATE_FUNCTION(30, 1, true),
+ ALTERNATE_FUNCTION(34, 1, true),
+ ALTERNATE_FUNCTION(97, 1, true),
+};
+
+/*
+ * Two spi3 chipselects are available in different mode than the main spi3
+ * functionality, which is using mode 1.
+ */
+static const struct chv_alternate_function southwest_spi3_altfuncs[] = {
+ ALTERNATE_FUNCTION(76, 3, false),
+ ALTERNATE_FUNCTION(80, 3, false),
+};
+
+static const struct chv_pingroup southwest_groups[] = {
+ PIN_GROUP("uart0_grp", southwest_uart0_pins, 2, false),
+ PIN_GROUP("uart1_grp", southwest_uart1_pins, 1, false),
+ PIN_GROUP("uart2_grp", southwest_uart2_pins, 1, false),
+ PIN_GROUP("hda_grp", southwest_hda_pins, 2, false),
+ PIN_GROUP("i2c0_grp", southwest_i2c0_pins, 1, true),
+ PIN_GROUP("i2c1_grp", southwest_i2c1_pins, 1, true),
+ PIN_GROUP("i2c2_grp", southwest_i2c2_pins, 1, true),
+ PIN_GROUP("i2c3_grp", southwest_i2c3_pins, 1, true),
+ PIN_GROUP("i2c4_grp", southwest_i2c4_pins, 1, true),
+ PIN_GROUP("i2c5_grp", southwest_i2c5_pins, 1, true),
+ PIN_GROUP("i2c6_grp", southwest_i2c6_pins, 1, true),
+ PIN_GROUP("i2c_nfc_grp", southwest_i2c_nfc_pins, 2, true),
+
+ PIN_GROUP_WITH_OVERRIDE("lpe_grp", southwest_lpe_pins, 1, false,
+ southwest_lpe_altfuncs),
+ PIN_GROUP_WITH_OVERRIDE("spi3_grp", southwest_spi3_pins, 2, false,
+ southwest_spi3_altfuncs),
+};
+
+static const char * const southwest_uart0_groups[] = { "uart0_grp" };
+static const char * const southwest_uart1_groups[] = { "uart1_grp" };
+static const char * const southwest_uart2_groups[] = { "uart2_grp" };
+static const char * const southwest_hda_groups[] = { "hda_grp" };
+static const char * const southwest_lpe_groups[] = { "lpe_grp" };
+static const char * const southwest_i2c0_groups[] = { "i2c0_grp" };
+static const char * const southwest_i2c1_groups[] = { "i2c1_grp" };
+static const char * const southwest_i2c2_groups[] = { "i2c2_grp" };
+static const char * const southwest_i2c3_groups[] = { "i2c3_grp" };
+static const char * const southwest_i2c4_groups[] = { "i2c4_grp" };
+static const char * const southwest_i2c5_groups[] = { "i2c5_grp" };
+static const char * const southwest_i2c6_groups[] = { "i2c6_grp" };
+static const char * const southwest_i2c_nfc_groups[] = { "i2c_nfc_grp" };
+static const char * const southwest_spi3_groups[] = { "spi3_grp" };
+
+/*
+ * Only do pinmuxing for certain LPSS devices for now. Rest of the pins are
+ * enabled only as GPIOs.
+ */
+static const struct chv_function southwest_functions[] = {
+ FUNCTION("uart0", southwest_uart0_groups),
+ FUNCTION("uart1", southwest_uart1_groups),
+ FUNCTION("uart2", southwest_uart2_groups),
+ FUNCTION("hda", southwest_hda_groups),
+ FUNCTION("lpe", southwest_lpe_groups),
+ FUNCTION("i2c0", southwest_i2c0_groups),
+ FUNCTION("i2c1", southwest_i2c1_groups),
+ FUNCTION("i2c2", southwest_i2c2_groups),
+ FUNCTION("i2c3", southwest_i2c3_groups),
+ FUNCTION("i2c4", southwest_i2c4_groups),
+ FUNCTION("i2c5", southwest_i2c5_groups),
+ FUNCTION("i2c6", southwest_i2c6_groups),
+ FUNCTION("i2c_nfc", southwest_i2c_nfc_groups),
+ FUNCTION("spi3", southwest_spi3_groups),
+};
+
+static const struct chv_gpio_pinrange southwest_gpio_ranges[] = {
+ GPIO_PINRANGE(0, 7),
+ GPIO_PINRANGE(15, 22),
+ GPIO_PINRANGE(30, 37),
+ GPIO_PINRANGE(45, 52),
+ GPIO_PINRANGE(60, 67),
+ GPIO_PINRANGE(75, 82),
+ GPIO_PINRANGE(90, 97),
+};
+
+static const struct chv_community southwest_community = {
+ .uid = "1",
+ .pins = southwest_pins,
+ .npins = ARRAY_SIZE(southwest_pins),
+ .groups = southwest_groups,
+ .ngroups = ARRAY_SIZE(southwest_groups),
+ .functions = southwest_functions,
+ .nfunctions = ARRAY_SIZE(southwest_functions),
+ .gpio_ranges = southwest_gpio_ranges,
+ .ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
+ .ngpios = ARRAY_SIZE(southwest_pins),
+};
+
+static const struct pinctrl_pin_desc north_pins[] = {
+ PINCTRL_PIN(0, "GPIO_DFX_0"),
+ PINCTRL_PIN(1, "GPIO_DFX_3"),
+ PINCTRL_PIN(2, "GPIO_DFX_7"),
+ PINCTRL_PIN(3, "GPIO_DFX_1"),
+ PINCTRL_PIN(4, "GPIO_DFX_5"),
+ PINCTRL_PIN(5, "GPIO_DFX_4"),
+ PINCTRL_PIN(6, "GPIO_DFX_8"),
+ PINCTRL_PIN(7, "GPIO_DFX_2"),
+ PINCTRL_PIN(8, "GPIO_DFX_6"),
+
+ PINCTRL_PIN(15, "GPIO_SUS0"),
+ PINCTRL_PIN(16, "SEC_GPIO_SUS10"),
+ PINCTRL_PIN(17, "GPIO_SUS3"),
+ PINCTRL_PIN(18, "GPIO_SUS7"),
+ PINCTRL_PIN(19, "GPIO_SUS1"),
+ PINCTRL_PIN(20, "GPIO_SUS5"),
+ PINCTRL_PIN(21, "SEC_GPIO_SUS11"),
+ PINCTRL_PIN(22, "GPIO_SUS4"),
+ PINCTRL_PIN(23, "SEC_GPIO_SUS8"),
+ PINCTRL_PIN(24, "GPIO_SUS2"),
+ PINCTRL_PIN(25, "GPIO_SUS6"),
+ PINCTRL_PIN(26, "CX_PREQ_B"),
+ PINCTRL_PIN(27, "SEC_GPIO_SUS9"),
+
+ PINCTRL_PIN(30, "TRST_B"),
+ PINCTRL_PIN(31, "TCK"),
+ PINCTRL_PIN(32, "PROCHOT_B"),
+ PINCTRL_PIN(33, "SVIDO_DATA"),
+ PINCTRL_PIN(34, "TMS"),
+ PINCTRL_PIN(35, "CX_PRDY_B_2"),
+ PINCTRL_PIN(36, "TDO_2"),
+ PINCTRL_PIN(37, "CX_PRDY_B"),
+ PINCTRL_PIN(38, "SVIDO_ALERT_B"),
+ PINCTRL_PIN(39, "TDO"),
+ PINCTRL_PIN(40, "SVIDO_CLK"),
+ PINCTRL_PIN(41, "TDI"),
+
+ PINCTRL_PIN(45, "GP_CAMERASB_05"),
+ PINCTRL_PIN(46, "GP_CAMERASB_02"),
+ PINCTRL_PIN(47, "GP_CAMERASB_08"),
+ PINCTRL_PIN(48, "GP_CAMERASB_00"),
+ PINCTRL_PIN(49, "GP_CAMERASB_06"),
+ PINCTRL_PIN(50, "GP_CAMERASB_10"),
+ PINCTRL_PIN(51, "GP_CAMERASB_03"),
+ PINCTRL_PIN(52, "GP_CAMERASB_09"),
+ PINCTRL_PIN(53, "GP_CAMERASB_01"),
+ PINCTRL_PIN(54, "GP_CAMERASB_07"),
+ PINCTRL_PIN(55, "GP_CAMERASB_11"),
+ PINCTRL_PIN(56, "GP_CAMERASB_04"),
+
+ PINCTRL_PIN(60, "PANEL0_BKLTEN"),
+ PINCTRL_PIN(61, "HV_DDI0_HPD"),
+ PINCTRL_PIN(62, "HV_DDI2_DDC_SDA"),
+ PINCTRL_PIN(63, "PANEL1_BKLTCTL"),
+ PINCTRL_PIN(64, "HV_DDI1_HPD"),
+ PINCTRL_PIN(65, "PANEL0_BKLTCTL"),
+ PINCTRL_PIN(66, "HV_DDI0_DDC_SDA"),
+ PINCTRL_PIN(67, "HV_DDI2_DDC_SCL"),
+ PINCTRL_PIN(68, "HV_DDI2_HPD"),
+ PINCTRL_PIN(69, "PANEL1_VDDEN"),
+ PINCTRL_PIN(70, "PANEL1_BKLTEN"),
+ PINCTRL_PIN(71, "HV_DDI0_DDC_SCL"),
+ PINCTRL_PIN(72, "PANEL0_VDDEN"),
+};
+
+static const struct chv_gpio_pinrange north_gpio_ranges[] = {
+ GPIO_PINRANGE(0, 8),
+ GPIO_PINRANGE(15, 27),
+ GPIO_PINRANGE(30, 41),
+ GPIO_PINRANGE(45, 56),
+ GPIO_PINRANGE(60, 72),
+};
+
+static const struct chv_community north_community = {
+ .uid = "2",
+ .pins = north_pins,
+ .npins = ARRAY_SIZE(north_pins),
+ .gpio_ranges = north_gpio_ranges,
+ .ngpio_ranges = ARRAY_SIZE(north_gpio_ranges),
+ .ngpios = ARRAY_SIZE(north_pins),
+};
+
+static const struct pinctrl_pin_desc east_pins[] = {
+ PINCTRL_PIN(0, "PMU_SLP_S3_B"),
+ PINCTRL_PIN(1, "PMU_BATLOW_B"),
+ PINCTRL_PIN(2, "SUS_STAT_B"),
+ PINCTRL_PIN(3, "PMU_SLP_S0IX_B"),
+ PINCTRL_PIN(4, "PMU_AC_PRESENT"),
+ PINCTRL_PIN(5, "PMU_PLTRST_B"),
+ PINCTRL_PIN(6, "PMU_SUSCLK"),
+ PINCTRL_PIN(7, "PMU_SLP_LAN_B"),
+ PINCTRL_PIN(8, "PMU_PWRBTN_B"),
+ PINCTRL_PIN(9, "PMU_SLP_S4_B"),
+ PINCTRL_PIN(10, "PMU_WAKE_B"),
+ PINCTRL_PIN(11, "PMU_WAKE_LAN_B"),
+
+ PINCTRL_PIN(15, "MF_ISH_GPIO_3"),
+ PINCTRL_PIN(16, "MF_ISH_GPIO_7"),
+ PINCTRL_PIN(17, "MF_ISH_I2C1_SCL"),
+ PINCTRL_PIN(18, "MF_ISH_GPIO_1"),
+ PINCTRL_PIN(19, "MF_ISH_GPIO_5"),
+ PINCTRL_PIN(20, "MF_ISH_GPIO_9"),
+ PINCTRL_PIN(21, "MF_ISH_GPIO_0"),
+ PINCTRL_PIN(22, "MF_ISH_GPIO_4"),
+ PINCTRL_PIN(23, "MF_ISH_GPIO_8"),
+ PINCTRL_PIN(24, "MF_ISH_GPIO_2"),
+ PINCTRL_PIN(25, "MF_ISH_GPIO_6"),
+ PINCTRL_PIN(26, "MF_ISH_I2C1_SDA"),
+};
+
+static const struct chv_gpio_pinrange east_gpio_ranges[] = {
+ GPIO_PINRANGE(0, 11),
+ GPIO_PINRANGE(15, 26),
+};
+
+static const struct chv_community east_community = {
+ .uid = "3",
+ .pins = east_pins,
+ .npins = ARRAY_SIZE(east_pins),
+ .gpio_ranges = east_gpio_ranges,
+ .ngpio_ranges = ARRAY_SIZE(east_gpio_ranges),
+ .ngpios = ARRAY_SIZE(east_pins),
+};
+
+static const struct pinctrl_pin_desc southeast_pins[] = {
+ PINCTRL_PIN(0, "MF_PLT_CLK0"),
+ PINCTRL_PIN(1, "PWM1"),
+ PINCTRL_PIN(2, "MF_PLT_CLK1"),
+ PINCTRL_PIN(3, "MF_PLT_CLK4"),
+ PINCTRL_PIN(4, "MF_PLT_CLK3"),
+ PINCTRL_PIN(5, "PWM0"),
+ PINCTRL_PIN(6, "MF_PLT_CLK5"),
+ PINCTRL_PIN(7, "MF_PLT_CLK2"),
+
+ PINCTRL_PIN(15, "SDMMC2_D3_CD_B"),
+ PINCTRL_PIN(16, "SDMMC1_CLK"),
+ PINCTRL_PIN(17, "SDMMC1_D0"),
+ PINCTRL_PIN(18, "SDMMC2_D1"),
+ PINCTRL_PIN(19, "SDMMC2_CLK"),
+ PINCTRL_PIN(20, "SDMMC1_D2"),
+ PINCTRL_PIN(21, "SDMMC2_D2"),
+ PINCTRL_PIN(22, "SDMMC2_CMD"),
+ PINCTRL_PIN(23, "SDMMC1_CMD"),
+ PINCTRL_PIN(24, "SDMMC1_D1"),
+ PINCTRL_PIN(25, "SDMMC2_D0"),
+ PINCTRL_PIN(26, "SDMMC1_D3_CD_B"),
+
+ PINCTRL_PIN(30, "SDMMC3_D1"),
+ PINCTRL_PIN(31, "SDMMC3_CLK"),
+ PINCTRL_PIN(32, "SDMMC3_D3"),
+ PINCTRL_PIN(33, "SDMMC3_D2"),
+ PINCTRL_PIN(34, "SDMMC3_CMD"),
+ PINCTRL_PIN(35, "SDMMC3_D0"),
+
+ PINCTRL_PIN(45, "MF_LPC_AD2"),
+ PINCTRL_PIN(46, "LPC_CLKRUNB"),
+ PINCTRL_PIN(47, "MF_LPC_AD0"),
+ PINCTRL_PIN(48, "LPC_FRAMEB"),
+ PINCTRL_PIN(49, "MF_LPC_CLKOUT1"),
+ PINCTRL_PIN(50, "MF_LPC_AD3"),
+ PINCTRL_PIN(51, "MF_LPC_CLKOUT0"),
+ PINCTRL_PIN(52, "MF_LPC_AD1"),
+
+ PINCTRL_PIN(60, "SPI1_MISO"),
+ PINCTRL_PIN(61, "SPI1_CSO_B"),
+ PINCTRL_PIN(62, "SPI1_CLK"),
+ PINCTRL_PIN(63, "MMC1_D6"),
+ PINCTRL_PIN(64, "SPI1_MOSI"),
+ PINCTRL_PIN(65, "MMC1_D5"),
+ PINCTRL_PIN(66, "SPI1_CS1_B"),
+ PINCTRL_PIN(67, "MMC1_D4_SD_WE"),
+ PINCTRL_PIN(68, "MMC1_D7"),
+ PINCTRL_PIN(69, "MMC1_RCLK"),
+
+ PINCTRL_PIN(75, "USB_OC1_B"),
+ PINCTRL_PIN(76, "PMU_RESETBUTTON_B"),
+ PINCTRL_PIN(77, "GPIO_ALERT"),
+ PINCTRL_PIN(78, "SDMMC3_PWR_EN_B"),
+ PINCTRL_PIN(79, "ILB_SERIRQ"),
+ PINCTRL_PIN(80, "USB_OC0_B"),
+ PINCTRL_PIN(81, "SDMMC3_CD_B"),
+ PINCTRL_PIN(82, "SPKR"),
+ PINCTRL_PIN(83, "SUSPWRDNACK"),
+ PINCTRL_PIN(84, "SPARE_PIN"),
+ PINCTRL_PIN(85, "SDMMC3_1P8_EN"),
+};
+
+static const unsigned southeast_pwm0_pins[] = { 5 };
+static const unsigned southeast_pwm1_pins[] = { 1 };
+static const unsigned southeast_sdmmc1_pins[] = {
+ 16, 17, 20, 23, 24, 26, 63, 65, 67, 68, 69,
+};
+static const unsigned southeast_sdmmc2_pins[] = { 15, 18, 19, 21, 22, 25 };
+static const unsigned southeast_sdmmc3_pins[] = {
+ 30, 31, 32, 33, 34, 35, 78, 81, 85,
+};
+static const unsigned southeast_spi1_pins[] = { 60, 61, 62, 64, 66 };
+static const unsigned southeast_spi2_pins[] = { 2, 3, 4, 6, 7 };
+
+static const struct chv_pingroup southeast_groups[] = {
+ PIN_GROUP("pwm0_grp", southeast_pwm0_pins, 1, false),
+ PIN_GROUP("pwm1_grp", southeast_pwm1_pins, 1, false),
+ PIN_GROUP("sdmmc1_grp", southeast_sdmmc1_pins, 1, false),
+ PIN_GROUP("sdmmc2_grp", southeast_sdmmc2_pins, 1, false),
+ PIN_GROUP("sdmmc3_grp", southeast_sdmmc3_pins, 1, false),
+ PIN_GROUP("spi1_grp", southeast_spi1_pins, 1, false),
+ PIN_GROUP("spi2_grp", southeast_spi2_pins, 4, false),
+};
+
+static const char * const southeast_pwm0_groups[] = { "pwm0_grp" };
+static const char * const southeast_pwm1_groups[] = { "pwm1_grp" };
+static const char * const southeast_sdmmc1_groups[] = { "sdmmc1_grp" };
+static const char * const southeast_sdmmc2_groups[] = { "sdmmc2_grp" };
+static const char * const southeast_sdmmc3_groups[] = { "sdmmc3_grp" };
+static const char * const southeast_spi1_groups[] = { "spi1_grp" };
+static const char * const southeast_spi2_groups[] = { "spi2_grp" };
+
+static const struct chv_function southeast_functions[] = {
+ FUNCTION("pwm0", southeast_pwm0_groups),
+ FUNCTION("pwm1", southeast_pwm1_groups),
+ FUNCTION("sdmmc1", southeast_sdmmc1_groups),
+ FUNCTION("sdmmc2", southeast_sdmmc2_groups),
+ FUNCTION("sdmmc3", southeast_sdmmc3_groups),
+ FUNCTION("spi1", southeast_spi1_groups),
+ FUNCTION("spi2", southeast_spi2_groups),
+};
+
+static const struct chv_gpio_pinrange southeast_gpio_ranges[] = {
+ GPIO_PINRANGE(0, 7),
+ GPIO_PINRANGE(15, 26),
+ GPIO_PINRANGE(30, 35),
+ GPIO_PINRANGE(45, 52),
+ GPIO_PINRANGE(60, 69),
+ GPIO_PINRANGE(75, 85),
+};
+
+static const struct chv_community southeast_community = {
+ .uid = "4",
+ .pins = southeast_pins,
+ .npins = ARRAY_SIZE(southeast_pins),
+ .groups = southeast_groups,
+ .ngroups = ARRAY_SIZE(southeast_groups),
+ .functions = southeast_functions,
+ .nfunctions = ARRAY_SIZE(southeast_functions),
+ .gpio_ranges = southeast_gpio_ranges,
+ .ngpio_ranges = ARRAY_SIZE(southeast_gpio_ranges),
+ .ngpios = ARRAY_SIZE(southeast_pins),
+};
+
+static const struct chv_community *chv_communities[] = {
+ &southwest_community,
+ &north_community,
+ &east_community,
+ &southeast_community,
+};
+
+static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
+ unsigned reg)
+{
+ unsigned family_no = offset / MAX_FAMILY_PAD_GPIO_NO;
+ unsigned pad_no = offset % MAX_FAMILY_PAD_GPIO_NO;
+
+ offset = FAMILY_PAD_REGS_OFF + FAMILY_PAD_REGS_SIZE * family_no +
+ GPIO_REGS_SIZE * pad_no;
+
+ return pctrl->regs + offset + reg;
+}
+
+static void chv_writel(u32 value, void __iomem *reg)
+{
+ writel(value, reg);
+ /* simple readback to confirm the bus transferring done */
+ readl(reg);
+}
+
+/* When Pad Cfg is locked, driver can only change GPIOTXState or GPIORXState */
+static bool chv_pad_locked(struct chv_pinctrl *pctrl, unsigned offset)
+{
+ void __iomem *reg;
+
+ reg = chv_padreg(pctrl, offset, CHV_PADCTRL1);
+ return readl(reg) & CHV_PADCTRL1_CFGLOCK;
+}
+
+static int chv_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->community->ngroups;
+}
+
+static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->community->groups[group].name;
+}
+
+static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
+ const unsigned **pins, unsigned *npins)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->community->groups[group].pins;
+ *npins = pctrl->community->groups[group].npins;
+ return 0;
+}
+
+static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long flags;
+ u32 ctrl0, ctrl1;
+ bool locked;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
+ locked = chv_pad_locked(pctrl, offset);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
+ seq_puts(s, "GPIO ");
+ } else {
+ u32 mode;
+
+ mode = ctrl0 & CHV_PADCTRL0_PMODE_MASK;
+ mode >>= CHV_PADCTRL0_PMODE_SHIFT;
+
+ seq_printf(s, "mode %d ", mode);
+ }
+
+ seq_printf(s, "ctrl0 0x%08x ctrl1 0x%08x", ctrl0, ctrl1);
+
+ if (locked)
+ seq_puts(s, " [LOCKED]");
+}
+
+static const struct pinctrl_ops chv_pinctrl_ops = {
+ .get_groups_count = chv_get_groups_count,
+ .get_group_name = chv_get_group_name,
+ .get_group_pins = chv_get_group_pins,
+ .pin_dbg_show = chv_pin_dbg_show,
+};
+
+static int chv_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->community->nfunctions;
+}
+
+static const char *chv_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->community->functions[function].name;
+}
+
+static int chv_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const ngroups)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->community->functions[function].groups;
+ *ngroups = pctrl->community->functions[function].ngroups;
+ return 0;
+}
+
+static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned group)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct chv_pingroup *grp;
+ unsigned long flags;
+ int i;
+
+ grp = &pctrl->community->groups[group];
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ /* Check first that the pad is not locked */
+ for (i = 0; i < grp->npins; i++) {
+ if (chv_pad_locked(pctrl, grp->pins[i])) {
+ dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
+ grp->pins[i]);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ return -EBUSY;
+ }
+ }
+
+ for (i = 0; i < grp->npins; i++) {
+ const struct chv_alternate_function *altfunc = &grp->altfunc;
+ int pin = grp->pins[i];
+ void __iomem *reg;
+ u32 value;
+
+ /* Check if there is pin-specific config */
+ if (grp->overrides) {
+ int j;
+
+ for (j = 0; j < grp->noverrides; j++) {
+ if (grp->overrides[j].pin == pin) {
+ altfunc = &grp->overrides[j];
+ break;
+ }
+ }
+ }
+
+ reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
+ value = readl(reg);
+ /* Disable GPIO mode */
+ value &= ~CHV_PADCTRL0_GPIOEN;
+ /* Set to desired mode */
+ value &= ~CHV_PADCTRL0_PMODE_MASK;
+ value |= altfunc->mode << CHV_PADCTRL0_PMODE_SHIFT;
+ chv_writel(value, reg);
+
+ /* Update for invert_oe */
+ reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
+ value = readl(reg) & ~CHV_PADCTRL1_INVRXTX_MASK;
+ if (altfunc->invert_oe)
+ value |= CHV_PADCTRL1_INVRXTX_TXENABLE;
+ chv_writel(value, reg);
+
+ dev_dbg(pctrl->dev, "configured pin %u mode %u OE %sinverted\n",
+ pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
+ }
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 value;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ if (chv_pad_locked(pctrl, offset)) {
+ value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ if (!(value & CHV_PADCTRL0_GPIOEN)) {
+ /* Locked so cannot enable */
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ return -EBUSY;
+ }
+ } else {
+ int i;
+
+ /* Reset the interrupt mapping */
+ for (i = 0; i < ARRAY_SIZE(pctrl->intr_lines); i++) {
+ if (pctrl->intr_lines[i] == offset) {
+ pctrl->intr_lines[i] = 0;
+ break;
+ }
+ }
+
+ /* Disable interrupt generation */
+ reg = chv_padreg(pctrl, offset, CHV_PADCTRL1);
+ value = readl(reg);
+ value &= ~CHV_PADCTRL1_INTWAKECFG_MASK;
+ value &= ~CHV_PADCTRL1_INVRXTX_MASK;
+ chv_writel(value, reg);
+
+ /* Switch to a GPIO mode */
+ reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
+ value = readl(reg) | CHV_PADCTRL0_GPIOEN;
+ chv_writel(value, reg);
+ }
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 value;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
+ value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
+ chv_writel(value, reg);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset, bool input)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
+ unsigned long flags;
+ u32 ctrl0;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
+ if (input)
+ ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPI << CHV_PADCTRL0_GPIOCFG_SHIFT;
+ else
+ ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
+ chv_writel(ctrl0, reg);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static const struct pinmux_ops chv_pinmux_ops = {
+ .get_functions_count = chv_get_functions_count,
+ .get_function_name = chv_get_function_name,
+ .get_function_groups = chv_get_function_groups,
+ .set_mux = chv_pinmux_set_mux,
+ .gpio_request_enable = chv_gpio_request_enable,
+ .gpio_disable_free = chv_gpio_disable_free,
+ .gpio_set_direction = chv_gpio_set_direction,
+};
+
+static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned long flags;
+ u32 ctrl0, ctrl1;
+ u16 arg = 0;
+ u32 term;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (term)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (!(ctrl0 & CHV_PADCTRL0_TERM_UP))
+ return -EINVAL;
+
+ switch (term) {
+ case CHV_PADCTRL0_TERM_20K:
+ arg = 20000;
+ break;
+ case CHV_PADCTRL0_TERM_5K:
+ arg = 5000;
+ break;
+ case CHV_PADCTRL0_TERM_1K:
+ arg = 1000;
+ break;
+ }
+
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!term || (ctrl0 & CHV_PADCTRL0_TERM_UP))
+ return -EINVAL;
+
+ switch (term) {
+ case CHV_PADCTRL0_TERM_20K:
+ arg = 20000;
+ break;
+ case CHV_PADCTRL0_TERM_5K:
+ arg = 5000;
+ break;
+ }
+
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (!(ctrl1 & CHV_PADCTRL1_ODEN))
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: {
+ u32 cfg;
+
+ cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+ if (cfg != CHV_PADCTRL0_GPIOCFG_HIZ)
+ return -EINVAL;
+
+ break;
+ }
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ enum pin_config_param param, u16 arg)
+{
+ void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
+ unsigned long flags;
+ u32 ctrl0, pull;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ ctrl0 = readl(reg);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ctrl0 &= ~(CHV_PADCTRL0_TERM_MASK | CHV_PADCTRL0_TERM_UP);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ctrl0 &= ~(CHV_PADCTRL0_TERM_MASK | CHV_PADCTRL0_TERM_UP);
+
+ switch (arg) {
+ case 1000:
+ /* For 1k there is only pull up */
+ pull = CHV_PADCTRL0_TERM_1K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ case 5000:
+ pull = CHV_PADCTRL0_TERM_5K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ case 20000:
+ pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ default:
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ return -EINVAL;
+ }
+
+ ctrl0 |= CHV_PADCTRL0_TERM_UP | pull;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ctrl0 &= ~(CHV_PADCTRL0_TERM_MASK | CHV_PADCTRL0_TERM_UP);
+
+ switch (arg) {
+ case 5000:
+ pull = CHV_PADCTRL0_TERM_5K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ case 20000:
+ pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ break;
+ default:
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ return -EINVAL;
+ }
+
+ ctrl0 |= pull;
+ break;
+
+ default:
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ return -EINVAL;
+ }
+
+ chv_writel(ctrl0, reg);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, unsigned nconfigs)
+{
+ struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ int i, ret;
+ u16 arg;
+
+ if (chv_pad_locked(pctrl, pin))
+ return -EBUSY;
+
+ for (i = 0; i < nconfigs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = chv_config_set_pull(pctrl, pin, param, arg);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ dev_dbg(pctrl->dev, "pin %d set config %d arg %u\n", pin,
+ param, arg);
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops chv_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_set = chv_config_set,
+ .pin_config_get = chv_config_get,
+};
+
+static struct pinctrl_desc chv_pinctrl_desc = {
+ .pctlops = &chv_pinctrl_ops,
+ .pmxops = &chv_pinmux_ops,
+ .confops = &chv_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int chv_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void chv_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static unsigned chv_gpio_offset_to_pin(struct chv_pinctrl *pctrl,
+ unsigned offset)
+{
+ return pctrl->community->pins[offset].number;
+}
+
+static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
+ int pin = chv_gpio_offset_to_pin(pctrl, offset);
+ u32 ctrl0, cfg;
+
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+
+ cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+
+ if (cfg == CHV_PADCTRL0_GPIOCFG_GPO)
+ return !!(ctrl0 & CHV_PADCTRL0_GPIOTXSTATE);
+ return !!(ctrl0 & CHV_PADCTRL0_GPIORXSTATE);
+}
+
+static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
+ unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 ctrl0;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
+ ctrl0 = readl(reg);
+
+ if (value)
+ ctrl0 |= CHV_PADCTRL0_GPIOTXSTATE;
+ else
+ ctrl0 &= ~CHV_PADCTRL0_GPIOTXSTATE;
+
+ chv_writel(ctrl0, reg);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
+ unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
+ u32 ctrl0, direction;
+
+ ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+
+ direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+
+ return direction != CHV_PADCTRL0_GPIOCFG_GPO;
+}
+
+static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static const struct gpio_chip chv_gpio_chip = {
+ .owner = THIS_MODULE,
+ .request = chv_gpio_request,
+ .free = chv_gpio_free,
+ .get_direction = chv_gpio_get_direction,
+ .direction_input = chv_gpio_direction_input,
+ .direction_output = chv_gpio_direction_output,
+ .get = chv_gpio_get,
+ .set = chv_gpio_set,
+};
+
+static void chv_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
+ u32 intr_line;
+
+ spin_lock(&pctrl->lock);
+
+ intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+ intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
+ chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
+
+ spin_unlock(&pctrl->lock);
+}
+
+static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
+ u32 value, intr_line;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+ intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
+
+ value = readl(pctrl->regs + CHV_INTMASK);
+ if (mask)
+ value &= ~BIT(intr_line);
+ else
+ value |= BIT(intr_line);
+ chv_writel(value, pctrl->regs + CHV_INTMASK);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void chv_gpio_irq_mask(struct irq_data *d)
+{
+ chv_gpio_irq_mask_unmask(d, true);
+}
+
+static void chv_gpio_irq_unmask(struct irq_data *d)
+{
+ chv_gpio_irq_mask_unmask(d, false);
+}
+
+static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ unsigned offset = irqd_to_hwirq(d);
+ int pin = chv_gpio_offset_to_pin(pctrl, offset);
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ /*
+ * Pins which can be used as shared interrupt are configured in
+ * BIOS. Driver trusts BIOS configurations and assigns different
+ * handler according to the irq type.
+ *
+ * Driver needs to save the mapping between each pin and
+ * its interrupt line.
+ * 1. If the pin cfg is locked in BIOS:
+ * Trust BIOS has programmed IntWakeCfg bits correctly,
+ * driver just needs to save the mapping.
+ * 2. If the pin cfg is not locked in BIOS:
+ * Driver programs the IntWakeCfg bits and save the mapping.
+ */
+ if (!chv_pad_locked(pctrl, pin)) {
+ void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
+
+ value = readl(reg);
+ value &= ~CHV_PADCTRL1_INTWAKECFG_MASK;
+ value &= ~CHV_PADCTRL1_INVRXTX_MASK;
+
+ if (type & IRQ_TYPE_EDGE_BOTH) {
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ value |= CHV_PADCTRL1_INTWAKECFG_BOTH;
+ else if (type & IRQ_TYPE_EDGE_RISING)
+ value |= CHV_PADCTRL1_INTWAKECFG_RISING;
+ else if (type & IRQ_TYPE_EDGE_FALLING)
+ value |= CHV_PADCTRL1_INTWAKECFG_FALLING;
+ } else if (type & IRQ_TYPE_LEVEL_MASK) {
+ value |= CHV_PADCTRL1_INTWAKECFG_LEVEL;
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value |= CHV_PADCTRL1_INVRXTX_RXDATA;
+ }
+
+ chv_writel(value, reg);
+ }
+
+ value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ value &= CHV_PADCTRL0_INTSEL_MASK;
+ value >>= CHV_PADCTRL0_INTSEL_SHIFT;
+
+ pctrl->intr_lines[value] = offset;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip chv_gpio_irqchip = {
+ .name = "chv-gpio",
+ .irq_ack = chv_gpio_irq_ack,
+ .irq_mask = chv_gpio_irq_mask,
+ .irq_unmask = chv_gpio_irq_unmask,
+ .irq_set_type = chv_gpio_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned long pending;
+ u32 intr_line;
+
+ chained_irq_enter(chip, desc);
+
+ pending = readl(pctrl->regs + CHV_INTSTAT);
+ for_each_set_bit(intr_line, &pending, 16) {
+ unsigned irq, offset;
+
+ offset = pctrl->intr_lines[intr_line];
+ irq = irq_find_mapping(gc->irqdomain, offset);
+ generic_handle_irq(irq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
+{
+ const struct chv_gpio_pinrange *range;
+ struct gpio_chip *chip = &pctrl->chip;
+ int ret, i, offset;
+
+ *chip = chv_gpio_chip;
+
+ chip->ngpio = pctrl->community->ngpios;
+ chip->label = dev_name(pctrl->dev);
+ chip->dev = pctrl->dev;
+ chip->base = -1;
+
+ ret = gpiochip_add(chip);
+ if (ret) {
+ dev_err(pctrl->dev, "Failed to register gpiochip\n");
+ return ret;
+ }
+
+ for (i = 0, offset = 0; i < pctrl->community->ngpio_ranges; i++) {
+ range = &pctrl->community->gpio_ranges[i];
+ ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev), offset,
+ range->base, range->npins);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+ goto fail;
+ }
+
+ offset += range->npins;
+ }
+
+ /* Mask and clear all interrupts */
+ chv_writel(0, pctrl->regs + CHV_INTMASK);
+ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
+
+ ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add IRQ chip\n");
+ goto fail;
+ }
+
+ gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq,
+ chv_gpio_irq_handler);
+ return 0;
+
+fail:
+ gpiochip_remove(chip);
+
+ return ret;
+}
+
+static int chv_pinctrl_probe(struct platform_device *pdev)
+{
+ struct chv_pinctrl *pctrl;
+ struct acpi_device *adev;
+ struct resource *res;
+ int ret, irq, i;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return -ENODEV;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(chv_communities); i++)
+ if (!strcmp(adev->pnp.unique_id, chv_communities[i]->uid)) {
+ pctrl->community = chv_communities[i];
+ break;
+ }
+ if (i == ARRAY_SIZE(chv_communities))
+ return -ENODEV;
+
+ spin_lock_init(&pctrl->lock);
+ pctrl->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->regs))
+ return PTR_ERR(pctrl->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get interrupt number\n");
+ return irq;
+ }
+
+ pctrl->pctldesc = chv_pinctrl_desc;
+ pctrl->pctldesc.name = dev_name(&pdev->dev);
+ pctrl->pctldesc.pins = pctrl->community->pins;
+ pctrl->pctldesc.npins = pctrl->community->npins;
+
+ pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl);
+ if (!pctrl->pctldev) {
+ dev_err(&pdev->dev, "failed to register pinctrl driver\n");
+ return -ENODEV;
+ }
+
+ ret = chv_gpio_probe(pctrl, irq);
+ if (ret) {
+ pinctrl_unregister(pctrl->pctldev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pctrl);
+
+ return 0;
+}
+
+static int chv_pinctrl_remove(struct platform_device *pdev)
+{
+ struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&pctrl->chip);
+ pinctrl_unregister(pctrl->pctldev);
+
+ return 0;
+}
+
+static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
+ { "INT33FF" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, chv_pinctrl_acpi_match);
+
+static struct platform_driver chv_pinctrl_driver = {
+ .probe = chv_pinctrl_probe,
+ .remove = chv_pinctrl_remove,
+ .driver = {
+ .name = "cherryview-pinctrl",
+ .owner = THIS_MODULE,
+ .acpi_match_table = chv_pinctrl_acpi_match,
+ },
+};
+
+static int __init chv_pinctrl_init(void)
+{
+ return platform_driver_register(&chv_pinctrl_driver);
+}
+subsys_initcall(chv_pinctrl_init);
+
+static void __exit chv_pinctrl_exit(void)
+{
+ platform_driver_unregister(&chv_pinctrl_driver);
+}
+module_exit(chv_pinctrl_exit);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Cherryview/Braswell pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index 670e5b01c678..c4f51d0cd2cc 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -425,7 +425,6 @@ static int armada_370_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver armada_370_pinctrl_driver = {
.driver = {
.name = "armada-370-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = armada_370_pinctrl_of_match,
},
.probe = armada_370_pinctrl_probe,
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
index db078fe7ace6..cd7c8f51f7d9 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-375.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -445,7 +445,6 @@ static int armada_375_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver armada_375_pinctrl_driver = {
.driver = {
.name = "armada-375-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(armada_375_pinctrl_of_match),
},
.probe = armada_375_pinctrl_probe,
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
index 1049f82fb62f..224c6cff6aa2 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -448,7 +448,6 @@ static int armada_38x_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver armada_38x_pinctrl_driver = {
.driver = {
.name = "armada-38x-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(armada_38x_pinctrl_of_match),
},
.probe = armada_38x_pinctrl_probe,
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index de311129f7a0..fc3376147c18 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -472,7 +472,6 @@ static int armada_xp_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver armada_xp_pinctrl_driver = {
.driver = {
.name = "armada-xp-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = armada_xp_pinctrl_of_match,
},
.probe = armada_xp_pinctrl_probe,
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 3b022178a566..89a52e15f0ae 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -851,7 +851,6 @@ static int dove_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver dove_pinctrl_driver = {
.driver = {
.name = "dove-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = dove_pinctrl_of_match,
},
.probe = dove_pinctrl_probe,
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index 0d0211a1a0b0..dbc673cf7131 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -489,7 +489,6 @@ static int kirkwood_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver kirkwood_pinctrl_driver = {
.driver = {
.name = "kirkwood-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = kirkwood_pinctrl_of_match,
},
.probe = kirkwood_pinctrl_probe,
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index dda1e7254e15..3a632efb56bb 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -247,7 +247,6 @@ static int orion_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver orion_pinctrl_driver = {
.driver = {
.name = "orion-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(orion_pinctrl_of_match),
},
.probe = orion_pinctrl_probe,
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 228972827132..3d6d97228523 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -891,14 +891,13 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
const char *function = NULL;
unsigned long *configs;
unsigned int nconfigs = 0;
- bool has_config = 0;
struct property *prop;
- const char *group, *gpio_name;
- struct device_node *np_config;
- ret = of_property_read_string(np, "ste,function", &function);
+ ret = of_property_read_string(np, "function", &function);
if (ret >= 0) {
- ret = of_property_count_strings(np, "ste,pins");
+ const char *group;
+
+ ret = of_property_count_strings(np, "groups");
if (ret < 0)
goto exit;
@@ -907,7 +906,7 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
- of_property_for_each_string(np, "ste,pins", prop, group) {
+ of_property_for_each_string(np, "groups", prop, group) {
ret = abx500_dt_add_map_mux(map, reserved_maps,
num_maps, group, function);
if (ret < 0)
@@ -916,18 +915,11 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
}
ret = pinconf_generic_parse_dt_config(np, &configs, &nconfigs);
- if (nconfigs)
- has_config = 1;
- np_config = of_parse_phandle(np, "ste,config", 0);
- if (np_config) {
- ret = pinconf_generic_parse_dt_config(np_config, &configs,
- &nconfigs);
- if (ret)
- goto exit;
- has_config |= nconfigs;
- }
- if (has_config) {
- ret = of_property_count_strings(np, "ste,pins");
+ if (nconfigs) {
+ const char *gpio_name;
+ const char *pin;
+
+ ret = of_property_count_strings(np, "pins");
if (ret < 0)
goto exit;
@@ -937,8 +929,8 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
- of_property_for_each_string(np, "ste,pins", prop, group) {
- gpio_name = abx500_find_pin_name(pctldev, group);
+ of_property_for_each_string(np, "pins", prop, pin) {
+ gpio_name = abx500_find_pin_name(pctldev, pin);
ret = abx500_dt_add_map_configs(map, reserved_maps,
num_maps, gpio_name, configs, 1);
@@ -1112,6 +1104,7 @@ out:
static const struct pinconf_ops abx500_pinconf_ops = {
.pin_config_get = abx500_pin_config_get,
.pin_config_set = abx500_pin_config_set,
+ .is_generic = true,
};
static struct pinctrl_desc abx500_pinctrl_desc = {
@@ -1285,7 +1278,6 @@ static int abx500_gpio_remove(struct platform_device *pdev)
static struct platform_driver abx500_gpio_driver = {
.driver = {
.name = "abx500-gpio",
- .owner = THIS_MODULE,
.of_match_table = abx500_gpio_match,
},
.probe = abx500_gpio_probe,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index ed39dcafd4f8..2cd71470f270 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -291,6 +291,7 @@ static const unsigned u0_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5,
static const unsigned mmcsd_a_1_pins[] = { STN8815_PIN_B10, STN8815_PIN_A10,
STN8815_PIN_C11, STN8815_PIN_B11, STN8815_PIN_A11, STN8815_PIN_C12,
STN8815_PIN_B12, STN8815_PIN_A12, STN8815_PIN_C13, STN8815_PIN_C15 };
+static const unsigned mmcsd_b_1_pins[] = { STN8815_PIN_D15 };
static const unsigned u1_a_1_pins[] = { STN8815_PIN_M2, STN8815_PIN_L1,
STN8815_PIN_F3, STN8815_PIN_F2 };
static const unsigned i2c1_a_1_pins[] = { STN8815_PIN_L4, STN8815_PIN_L3 };
@@ -305,6 +306,7 @@ static const unsigned i2cusb_b_1_pins[] = { STN8815_PIN_C21, STN8815_PIN_C20 };
static const struct nmk_pingroup nmk_stn8815_groups[] = {
STN8815_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
+ STN8815_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
STN8815_PIN_GROUP(i2c1_a_1, NMK_GPIO_ALT_A),
STN8815_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
@@ -317,7 +319,7 @@ static const struct nmk_pingroup nmk_stn8815_groups[] = {
static const char * const a##_groups[] = { b };
STN8815_FUNC_GROUPS(u0, "u0_a_1");
-STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1");
+STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1", "mmcsd_b_1");
STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1");
STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
STN8815_FUNC_GROUPS(i2c0, "i2c0_a_1");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 746db6acf648..a6a22054c0ba 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1520,12 +1520,13 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
unsigned long configs = 0;
bool has_config = 0;
struct property *prop;
- const char *group, *gpio_name;
struct device_node *np_config;
- ret = of_property_read_string(np, "ste,function", &function);
+ ret = of_property_read_string(np, "function", &function);
if (ret >= 0) {
- ret = of_property_count_strings(np, "ste,pins");
+ const char *group;
+
+ ret = of_property_count_strings(np, "groups");
if (ret < 0)
goto exit;
@@ -1535,7 +1536,7 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
- of_property_for_each_string(np, "ste,pins", prop, group) {
+ of_property_for_each_string(np, "groups", prop, group) {
ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
group, function);
if (ret < 0)
@@ -1548,7 +1549,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (np_config)
has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
if (has_config) {
- ret = of_property_count_strings(np, "ste,pins");
+ const char *gpio_name;
+ const char *pin;
+
+ ret = of_property_count_strings(np, "pins");
if (ret < 0)
goto exit;
ret = pinctrl_utils_reserve_map(pctldev, map,
@@ -1557,8 +1561,8 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
- of_property_for_each_string(np, "ste,pins", prop, group) {
- gpio_name = nmk_find_pin_name(pctldev, group);
+ of_property_for_each_string(np, "pins", prop, pin) {
+ gpio_name = nmk_find_pin_name(pctldev, pin);
ret = nmk_dt_add_map_configs(map, reserved_maps,
num_maps,
@@ -2047,7 +2051,6 @@ static const struct of_device_id nmk_gpio_match[] = {
static struct platform_driver nmk_gpio_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "gpio",
.of_match_table = nmk_gpio_match,
},
@@ -2060,7 +2063,6 @@ static SIMPLE_DEV_PM_OPS(nmk_pinctrl_pm_ops,
static struct platform_driver nmk_pinctrl_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "pinctrl-nomadik",
.of_match_table = nmk_pinctrl_match,
.pm = &nmk_pinctrl_pm_ops,
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 29ff77f90fcb..f78b416d7984 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -32,30 +32,32 @@ struct pin_config_item {
const enum pin_config_param param;
const char * const display;
const char * const format;
+ bool has_arg;
};
-#define PCONFDUMP(a, b, c) { .param = a, .display = b, .format = c }
+#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
+ .has_arg = d }
-static struct pin_config_item conf_items[] = {
- PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL),
- PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL),
- PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL),
- PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL),
- PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL),
+static const struct pin_config_item conf_items[] = {
+ PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false),
+ PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false),
+ PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false),
+ PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
+ PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false),
PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
- "input bias pull to pin specific state", NULL),
- PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL),
- PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL),
- PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL),
- PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA"),
- PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL),
- PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL),
- PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL),
- PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec"),
- PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector"),
- PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL),
- PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode"),
- PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level"),
+ "input bias pull to pin specific state", NULL, false),
+ PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
+ PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
+ PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
+ PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA", true),
+ PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false),
+ PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
+ PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
+ PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true),
+ PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
+ PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
+ PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true),
+ PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
};
void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
@@ -85,11 +87,14 @@ void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
seq_puts(s, " ");
seq_puts(s, conf_items[i].display);
/* Print unit if available */
- if (conf_items[i].format &&
- pinconf_to_config_argument(config) != 0)
- seq_printf(s, " (%u %s)",
- pinconf_to_config_argument(config),
- conf_items[i].format);
+ if (conf_items[i].has_arg) {
+ seq_printf(s, " (%u",
+ pinconf_to_config_argument(config));
+ if (conf_items[i].format)
+ seq_printf(s, " %s)", conf_items[i].format);
+ else
+ seq_puts(s, ")");
+ }
}
}
@@ -121,10 +126,14 @@ void pinconf_generic_dump_group(struct pinctrl_dev *pctldev,
seq_puts(s, " ");
seq_puts(s, conf_items[i].display);
/* Print unit if available */
- if (conf_items[i].format && config != 0)
- seq_printf(s, " (%u %s)",
- pinconf_to_config_argument(config),
- conf_items[i].format);
+ if (conf_items[i].has_arg) {
+ seq_printf(s, " (%u",
+ pinconf_to_config_argument(config));
+ if (conf_items[i].format)
+ seq_printf(s, " %s)", conf_items[i].format);
+ else
+ seq_puts(s, ")");
+ }
}
}
@@ -150,7 +159,7 @@ struct pinconf_generic_dt_params {
u32 default_value;
};
-static struct pinconf_generic_dt_params dt_params[] = {
+static const struct pinconf_generic_dt_params dt_params[] = {
{ "bias-disable", PIN_CONFIG_BIAS_DISABLE, 0 },
{ "bias-high-impedance", PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0 },
{ "bias-bus-hold", PIN_CONFIG_BIAS_BUS_HOLD, 0 },
@@ -200,7 +209,7 @@ int pinconf_generic_parse_dt_config(struct device_node *np,
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
- struct pinconf_generic_dt_params *par = &dt_params[i];
+ const struct pinconf_generic_dt_params *par = &dt_params[i];
ret = of_property_read_u32(np, par->property, &val);
/* property not found */
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 1f790a4b83fe..169b1bfa00c8 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -634,7 +634,6 @@ MODULE_DEVICE_TABLE(of, as3722_pinctrl_of_match);
static struct platform_driver as3722_pinctrl_driver = {
.driver = {
.name = "as3722-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = as3722_pinctrl_of_match,
},
.probe = as3722_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 354a81d40925..dfd021e8268f 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -25,9 +25,7 @@
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
-#include <mach/hardware.h>
-#include <mach/at91_pio.h>
-
+#include "pinctrl-at91.h"
#include "core.h"
#define MAX_GPIO_BANKS 5
@@ -1344,7 +1342,6 @@ static void at91_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
for (i = 0; i < chip->ngpio; i++) {
unsigned mask = pin_to_mask(i);
const char *gpio_label;
- u32 pdsr;
gpio_label = gpiochip_is_requested(chip, i);
if (!gpio_label)
@@ -1353,11 +1350,13 @@ static void at91_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, "[%s] GPIO%s%d: ",
gpio_label, chip->label, i);
if (mode == AT91_MUX_GPIO) {
- pdsr = readl_relaxed(pio + PIO_PDSR);
-
- seq_printf(s, "[gpio] %s\n",
- pdsr & mask ?
- "set" : "clear");
+ seq_printf(s, "[gpio] ");
+ seq_printf(s, "%s ",
+ readl_relaxed(pio + PIO_OSR) & mask ?
+ "output" : "input");
+ seq_printf(s, "%s\n",
+ readl_relaxed(pio + PIO_PDSR) & mask ?
+ "set" : "clear");
} else {
seq_printf(s, "[periph %c]\n",
mode + 'A' - 1);
@@ -1472,7 +1471,7 @@ static unsigned int gpio_irq_startup(struct irq_data *d)
unsigned pin = d->hwirq;
int ret;
- ret = gpio_lock_as_irq(&at91_gpio->chip, pin);
+ ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
if (ret) {
dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
d->hwirq);
@@ -1488,7 +1487,7 @@ static void gpio_irq_shutdown(struct irq_data *d)
unsigned pin = d->hwirq;
gpio_irq_mask(d);
- gpio_unlock_as_irq(&at91_gpio->chip, pin);
+ gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
}
#ifdef CONFIG_PM
@@ -1832,7 +1831,6 @@ err:
static struct platform_driver at91_gpio_driver = {
.driver = {
.name = "gpio-at91",
- .owner = THIS_MODULE,
.of_match_table = at91_gpio_of_match,
},
.probe = at91_gpio_probe,
@@ -1841,7 +1839,6 @@ static struct platform_driver at91_gpio_driver = {
static struct platform_driver at91_pinctrl_driver = {
.driver = {
.name = "pinctrl-at91",
- .owner = THIS_MODULE,
.of_match_table = at91_pinctrl_of_match,
},
.probe = at91_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-at91.h b/drivers/pinctrl/pinctrl-at91.h
new file mode 100644
index 000000000000..79b957f1dfa2
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-at91.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Parallel I/O Controller (PIO) - System peripherals registers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PINCTRL_AT91_H
+#define __PINCTRL_AT91_H
+
+#define PIO_PER 0x00 /* Enable Register */
+#define PIO_PDR 0x04 /* Disable Register */
+#define PIO_PSR 0x08 /* Status Register */
+#define PIO_OER 0x10 /* Output Enable Register */
+#define PIO_ODR 0x14 /* Output Disable Register */
+#define PIO_OSR 0x18 /* Output Status Register */
+#define PIO_IFER 0x20 /* Glitch Input Filter Enable */
+#define PIO_IFDR 0x24 /* Glitch Input Filter Disable */
+#define PIO_IFSR 0x28 /* Glitch Input Filter Status */
+#define PIO_SODR 0x30 /* Set Output Data Register */
+#define PIO_CODR 0x34 /* Clear Output Data Register */
+#define PIO_ODSR 0x38 /* Output Data Status Register */
+#define PIO_PDSR 0x3c /* Pin Data Status Register */
+#define PIO_IER 0x40 /* Interrupt Enable Register */
+#define PIO_IDR 0x44 /* Interrupt Disable Register */
+#define PIO_IMR 0x48 /* Interrupt Mask Register */
+#define PIO_ISR 0x4c /* Interrupt Status Register */
+#define PIO_MDER 0x50 /* Multi-driver Enable Register */
+#define PIO_MDDR 0x54 /* Multi-driver Disable Register */
+#define PIO_MDSR 0x58 /* Multi-driver Status Register */
+#define PIO_PUDR 0x60 /* Pull-up Disable Register */
+#define PIO_PUER 0x64 /* Pull-up Enable Register */
+#define PIO_PUSR 0x68 /* Pull-up Status Register */
+#define PIO_ASR 0x70 /* Peripheral A Select Register */
+#define PIO_ABCDSR1 0x70 /* Peripheral ABCD Select Register 1 [some sam9 only] */
+#define PIO_BSR 0x74 /* Peripheral B Select Register */
+#define PIO_ABCDSR2 0x74 /* Peripheral ABCD Select Register 2 [some sam9 only] */
+#define PIO_ABSR 0x78 /* AB Status Register */
+#define PIO_IFSCDR 0x80 /* Input Filter Slow Clock Disable Register */
+#define PIO_IFSCER 0x84 /* Input Filter Slow Clock Enable Register */
+#define PIO_IFSCSR 0x88 /* Input Filter Slow Clock Status Register */
+#define PIO_SCDR 0x8c /* Slow Clock Divider Debouncing Register */
+#define PIO_SCDR_DIV (0x3fff << 0) /* Slow Clock Divider Mask */
+#define PIO_PPDDR 0x90 /* Pad Pull-down Disable Register */
+#define PIO_PPDER 0x94 /* Pad Pull-down Enable Register */
+#define PIO_PPDSR 0x98 /* Pad Pull-down Status Register */
+#define PIO_OWER 0xa0 /* Output Write Enable Register */
+#define PIO_OWDR 0xa4 /* Output Write Disable Register */
+#define PIO_OWSR 0xa8 /* Output Write Status Register */
+#define PIO_AIMER 0xb0 /* Additional Interrupt Modes Enable Register */
+#define PIO_AIMDR 0xb4 /* Additional Interrupt Modes Disable Register */
+#define PIO_AIMMR 0xb8 /* Additional Interrupt Modes Mask Register */
+#define PIO_ESR 0xc0 /* Edge Select Register */
+#define PIO_LSR 0xc4 /* Level Select Register */
+#define PIO_ELSR 0xc8 /* Edge/Level Status Register */
+#define PIO_FELLSR 0xd0 /* Falling Edge/Low Level Select Register */
+#define PIO_REHLSR 0xd4 /* Rising Edge/ High Level Select Register */
+#define PIO_FRLHSR 0xd8 /* Fall/Rise - Low/High Status Register */
+#define PIO_SCHMITT 0x100 /* Schmitt Trigger Register */
+
+#define SAMA5D3_PIO_DRIVER1 0x118 /*PIO Driver 1 register offset*/
+#define SAMA5D3_PIO_DRIVER2 0x11C /*PIO Driver 2 register offset*/
+
+#define AT91SAM9X5_PIO_DRIVER1 0x114 /*PIO Driver 1 register offset*/
+#define AT91SAM9X5_PIO_DRIVER2 0x118 /*PIO Driver 2 register offset*/
+
+#endif
diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/pinctrl-bcm281xx.c
index a26e0c2ba33e..fa2a00f22ff1 100644
--- a/drivers/pinctrl/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/pinctrl-bcm281xx.c
@@ -1404,11 +1404,6 @@ static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
/* So far We can assume there is only 1 bank of registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Missing MEM resource\n");
- return -ENODEV;
- }
-
pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pdata->reg_base)) {
dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
@@ -1448,7 +1443,6 @@ static struct of_device_id bcm281xx_pinctrl_of_match[] = {
static struct platform_driver bcm281xx_pinctrl_driver = {
.driver = {
.name = "bcm281xx-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = bcm281xx_pinctrl_of_match,
},
};
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index eabba02f71f9..9aa8a3f10b10 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -1062,7 +1062,6 @@ static struct platform_driver bcm2835_pinctrl_driver = {
.remove = bcm2835_pinctrl_remove,
.driver = {
.name = MODULE_NAME,
- .owner = THIS_MODULE,
.of_match_table = bcm2835_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index 2e62689b5e9f..1d21dc226920 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -500,7 +500,6 @@ static struct platform_driver pinctrl_falcon_driver = {
.probe = pinctrl_falcon_probe,
.driver = {
.name = "pinctrl-falcon",
- .owner = THIS_MODULE,
.of_match_table = falcon_match,
},
};
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index e3079d3d19fe..26461e30f0ae 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -1062,7 +1062,6 @@ static int palmas_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver palmas_pinctrl_driver = {
.driver = {
.name = "palmas-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = palmas_pinctrl_of_match,
},
.probe = palmas_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 016f4578e494..ba74f0aa60c7 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -856,27 +856,22 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
* leads to this function call (via the pinctrl_gpio_direction_{input|output}()
* function called from the gpiolib interface).
*/
-static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range,
- unsigned offset, bool input)
+static int _rockchip_pmx_gpio_set_direction(struct gpio_chip *chip,
+ int pin, bool input)
{
- struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct rockchip_pin_bank *bank;
- struct gpio_chip *chip;
- int pin, ret;
+ int ret;
+ unsigned long flags;
u32 data;
- chip = range->gc;
bank = gc_to_pin_bank(chip);
- pin = offset - chip->base;
-
- dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
- offset, range->name, pin, input ? "input" : "output");
ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
if (ret < 0)
return ret;
+ spin_lock_irqsave(&bank->slock, flags);
+
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
/* set bit to 1 for output, 0 for input */
if (!input)
@@ -885,9 +880,28 @@ static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
data &= ~BIT(pin);
writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
+ spin_unlock_irqrestore(&bank->slock, flags);
+
return 0;
}
+static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset, bool input)
+{
+ struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip;
+ int pin;
+
+ chip = range->gc;
+ pin = offset - chip->base;
+ dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
+ offset, range->name, pin, input ? "input" : "output");
+
+ return _rockchip_pmx_gpio_set_direction(chip, offset - chip->base,
+ input);
+}
+
static const struct pinmux_ops rockchip_pmx_ops = {
.get_functions_count = rockchip_pmx_get_funcs_count,
.get_function_name = rockchip_pmx_get_func_name,
@@ -917,8 +931,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
return false;
}
-static int rockchip_gpio_direction_output(struct gpio_chip *gc,
- unsigned offset, int value);
+static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value);
static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset);
/* set the pin config settings for a specified pin */
@@ -959,9 +972,10 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return rc;
break;
case PIN_CONFIG_OUTPUT:
- rc = rockchip_gpio_direction_output(&bank->gpio_chip,
- pin - bank->pin_base,
- arg);
+ rockchip_gpio_set(&bank->gpio_chip,
+ pin - bank->pin_base, arg);
+ rc = _rockchip_pmx_gpio_set_direction(&bank->gpio_chip,
+ pin - bank->pin_base, false);
if (rc)
return rc;
break;
@@ -1253,6 +1267,10 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
}
}
+ ret = rockchip_pinctrl_parse_dt(pdev, info);
+ if (ret)
+ return ret;
+
info->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, info);
if (!info->pctl_dev) {
dev_err(&pdev->dev, "could not register pinctrl driver\n");
@@ -1270,12 +1288,6 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
pinctrl_add_gpio_range(info->pctl_dev, &pin_bank->grange);
}
- ret = rockchip_pinctrl_parse_dt(pdev, info);
- if (ret) {
- pinctrl_unregister(info->pctl_dev);
- return ret;
- }
-
return 0;
}
@@ -1387,6 +1399,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
u32 polarity = 0, data = 0;
u32 pend;
bool edge_changed = false;
+ unsigned long flags;
dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1432,10 +1445,14 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
if (bank->toggle_edge_mode && edge_changed) {
/* Interrupt params should only be set with ints disabled */
+ spin_lock_irqsave(&bank->slock, flags);
+
data = readl_relaxed(bank->reg_base + GPIO_INTEN);
writel_relaxed(0, bank->reg_base + GPIO_INTEN);
writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
writel(data, bank->reg_base + GPIO_INTEN);
+
+ spin_unlock_irqrestore(&bank->slock, flags);
}
chained_irq_exit(chip, desc);
@@ -1449,6 +1466,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
u32 polarity;
u32 level;
u32 data;
+ unsigned long flags;
int ret;
/* make sure the pin is configured as gpio input */
@@ -1456,15 +1474,20 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
if (ret < 0)
return ret;
+ spin_lock_irqsave(&bank->slock, flags);
+
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
data &= ~mask;
writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
+ spin_unlock_irqrestore(&bank->slock, flags);
+
if (type & IRQ_TYPE_EDGE_BOTH)
__irq_set_handler_locked(d->irq, handle_edge_irq);
else
__irq_set_handler_locked(d->irq, handle_level_irq);
+ spin_lock_irqsave(&bank->slock, flags);
irq_gc_lock(gc);
level = readl_relaxed(gc->reg_base + GPIO_INTTYPE_LEVEL);
@@ -1507,6 +1530,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
break;
default:
irq_gc_unlock(gc);
+ spin_unlock_irqrestore(&bank->slock, flags);
return -EINVAL;
}
@@ -1514,6 +1538,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
writel_relaxed(polarity, gc->reg_base + GPIO_INT_POLARITY);
irq_gc_unlock(gc);
+ spin_unlock_irqrestore(&bank->slock, flags);
return 0;
}
@@ -1563,6 +1588,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
+ gc->wake_enabled = IRQ_MSK(bank->nr_pins);
irq_set_handler_data(bank->irq, bank);
irq_set_chained_handler(bank->irq, rockchip_irq_demux);
@@ -1770,6 +1796,51 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
return ctrl;
}
+#define RK3288_GRF_GPIO6C_IOMUX 0x64
+#define GPIO6C6_SEL_WRITE_ENABLE BIT(28)
+
+static u32 rk3288_grf_gpio6c_iomux;
+
+static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
+{
+ struct rockchip_pinctrl *info = dev_get_drvdata(dev);
+ int ret = pinctrl_force_sleep(info->pctl_dev);
+
+ if (ret)
+ return ret;
+
+ /*
+ * RK3288 GPIO6_C6 mux would be modified by Maskrom when resume, so save
+ * the setting here, and restore it at resume.
+ */
+ if (info->ctrl->type == RK3288) {
+ ret = regmap_read(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+ &rk3288_grf_gpio6c_iomux);
+ if (ret) {
+ pinctrl_force_default(info->pctl_dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
+{
+ struct rockchip_pinctrl *info = dev_get_drvdata(dev);
+ int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+ rk3288_grf_gpio6c_iomux |
+ GPIO6C6_SEL_WRITE_ENABLE);
+
+ if (ret)
+ return ret;
+
+ return pinctrl_force_default(info->pctl_dev);
+}
+
+static SIMPLE_DEV_PM_OPS(rockchip_pinctrl_dev_pm_ops, rockchip_pinctrl_suspend,
+ rockchip_pinctrl_resume);
+
static int rockchip_pinctrl_probe(struct platform_device *pdev)
{
struct rockchip_pinctrl *info;
@@ -1982,7 +2053,7 @@ static struct platform_driver rockchip_pinctrl_driver = {
.probe = rockchip_pinctrl_probe,
.driver = {
.name = "rockchip-pinctrl",
- .owner = THIS_MODULE,
+ .pm = &rockchip_pinctrl_dev_pm_ops,
.of_match_table = rockchip_pinctrl_dt_match,
},
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index fb94b772ad62..69e84427f913 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -2016,7 +2016,6 @@ static struct platform_driver pcs_driver = {
.probe = pcs_probe,
.remove = pcs_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
.of_match_table = pcs_of_match,
},
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 4b1792aad3d8..7c9d51382248 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
gpio_irq, st_gpio_irq_handler);
}
- if (info->irqmux_base > 0 || gpio_irq > 0) {
+ if (info->irqmux_base || gpio_irq > 0) {
err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
0, handle_simple_irq,
IRQ_TYPE_LEVEL_LOW);
@@ -1689,7 +1689,6 @@ static int st_pctl_probe(struct platform_device *pdev)
static struct platform_driver st_pctl_driver = {
.driver = {
.name = "st-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = st_pctl_of_match,
},
.probe = st_pctl_probe,
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 3b9bfcf717ac..160a1f5e9896 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -759,7 +759,7 @@ static struct pinctrl_desc tb10x_pindesc = {
static int tb10x_pinctrl_probe(struct platform_device *pdev)
{
int ret = -EINVAL;
- struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *mem;
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
struct device_node *child;
@@ -771,11 +771,6 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!mem) {
- dev_err(dev, "No memory resource defined.\n");
- return -EINVAL;
- }
-
state = devm_kzalloc(dev, sizeof(struct tb10x_pinctrl) +
of_get_child_count(of_node)
* sizeof(struct tb10x_of_pinfunc),
@@ -787,6 +782,7 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
state->pinfuncs = (struct tb10x_of_pinfunc *)(state + 1);
mutex_init(&state->mutex);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
state->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(state->base)) {
ret = PTR_ERR(state->base);
@@ -847,7 +843,6 @@ static struct platform_driver tb10x_pinctrl_pdrv = {
.driver = {
.name = "tb10x_pinctrl",
.of_match_table = of_match_ptr(tb10x_pinctrl_dt_ids),
- .owner = THIS_MODULE
}
};
diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/pinctrl-tegra-xusb.c
index 1631ec94fb02..753d747d4261 100644
--- a/drivers/pinctrl/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/pinctrl-tegra-xusb.c
@@ -20,6 +20,7 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
@@ -171,7 +172,7 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
if (err == -EINVAL)
continue;
- return err;
+ goto out;
}
config = TEGRA_XUSB_PADCTL_PACK(properties[i].param, value);
@@ -179,7 +180,7 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
err = pinctrl_utils_add_config(padctl->pinctrl, &configs,
&num_configs, config);
if (err < 0)
- return err;
+ goto out;
}
if (function)
@@ -190,14 +191,14 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
err = of_property_count_strings(np, "nvidia,lanes");
if (err < 0)
- return err;
+ goto out;
reserve *= err;
err = pinctrl_utils_reserve_map(padctl->pinctrl, maps, reserved_maps,
num_maps, reserve);
if (err < 0)
- return err;
+ goto out;
of_property_for_each_string(np, "nvidia,lanes", prop, group) {
if (function) {
@@ -205,7 +206,7 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
reserved_maps, num_maps, group,
function);
if (err < 0)
- return err;
+ goto out;
}
if (num_configs) {
@@ -214,11 +215,15 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
configs, num_configs,
PIN_MAP_TYPE_CONFIGS_GROUP);
if (err < 0)
- return err;
+ goto out;
}
}
- return 0;
+ err = 0;
+
+out:
+ kfree(configs);
+ return err;
}
static int tegra_xusb_padctl_dt_node_to_map(struct pinctrl_dev *pinctrl,
@@ -910,7 +915,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
goto reset;
}
- phy = devm_phy_create(&pdev->dev, NULL, &pcie_phy_ops, NULL);
+ phy = devm_phy_create(&pdev->dev, NULL, &pcie_phy_ops);
if (IS_ERR(phy)) {
err = PTR_ERR(phy);
goto unregister;
@@ -919,7 +924,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
padctl->phys[TEGRA_XUSB_PADCTL_PCIE] = phy;
phy_set_drvdata(phy, padctl);
- phy = devm_phy_create(&pdev->dev, NULL, &sata_phy_ops, NULL);
+ phy = devm_phy_create(&pdev->dev, NULL, &sata_phy_ops);
if (IS_ERR(phy)) {
err = PTR_ERR(phy);
goto unregister;
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index a3db85b0b75f..52e4ec6386b4 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -1859,7 +1859,6 @@ MODULE_DEVICE_TABLE(of, tegra114_pinctrl_of_match);
static struct platform_driver tegra114_pinctrl_driver = {
.driver = {
.name = "tegra114-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = tegra114_pinctrl_of_match,
},
.probe = tegra114_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/pinctrl-tegra124.c
index 2f9b75c14967..2b20906c5356 100644
--- a/drivers/pinctrl/pinctrl-tegra124.c
+++ b/drivers/pinctrl/pinctrl-tegra124.c
@@ -2072,7 +2072,6 @@ MODULE_DEVICE_TABLE(of, tegra124_pinctrl_of_match);
static struct platform_driver tegra124_pinctrl_driver = {
.driver = {
.name = "tegra124-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = tegra124_pinctrl_of_match,
},
.probe = tegra124_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/pinctrl-tegra20.c
index c9805d2e71b0..d3a5722e4acb 100644
--- a/drivers/pinctrl/pinctrl-tegra20.c
+++ b/drivers/pinctrl/pinctrl-tegra20.c
@@ -2236,7 +2236,6 @@ static const struct of_device_id tegra20_pinctrl_of_match[] = {
static struct platform_driver tegra20_pinctrl_driver = {
.driver = {
.name = "tegra20-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = tegra20_pinctrl_of_match,
},
.probe = tegra20_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
index e7b72e916558..f6edc2ff5494 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/pinctrl-tegra30.c
@@ -2493,7 +2493,6 @@ MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
static struct platform_driver tegra30_pinctrl_driver = {
.driver = {
.name = "tegra30-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = tegra30_pinctrl_of_match,
},
.probe = tegra30_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index 3bb6a3b78864..146e48a9b839 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -977,7 +977,6 @@ static struct of_device_id tz1090_pdc_pinctrl_of_match[] = {
static struct platform_driver tz1090_pdc_pinctrl_driver = {
.driver = {
.name = "tz1090-pdc-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = tz1090_pdc_pinctrl_of_match,
},
.probe = tz1090_pdc_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index 48d36413b99f..df8cb1e5b7b4 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1992,7 +1992,6 @@ static struct of_device_id tz1090_pinctrl_of_match[] = {
static struct platform_driver tz1090_pinctrl_driver = {
.driver = {
.name = "tz1090-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = tz1090_pinctrl_of_match,
},
.probe = tz1090_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index e9c7113d81f2..f931e65aba3a 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1098,7 +1098,6 @@ static const struct of_device_id u300_pinctrl_match[] = {
static struct platform_driver u300_pmx_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = u300_pinctrl_match,
},
.probe = u300_pmx_probe,
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 37040ab42890..c5cef59f5965 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -840,7 +840,6 @@ static struct platform_driver pinmux_xway_driver = {
.probe = pinmux_xway_probe,
.driver = {
.name = "pinctrl-xway",
- .owner = THIS_MODULE,
.of_match_table = xway_match,
},
};
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 81275af9638b..3cd243c26b7d 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -47,4 +47,17 @@ config PINCTRL_MSM8X74
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8974 platform.
+config PINCTRL_QCOM_SPMI_PMIC
+ tristate "Qualcomm SPMI PMIC pin controller driver"
+ depends on GPIOLIB && OF && SPMI
+ select REGMAP_SPMI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SPMI for communication with SoC. Example PMIC's
+ devices are pm8841, pm8941 and pma8084.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index ba8519fcd8d3..bfd79af5f982 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
+obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
+obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c
index c832d7d6b912..cd96699b1929 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c
@@ -612,7 +612,6 @@ static const struct of_device_id apq8064_pinctrl_of_match[] = {
static struct platform_driver apq8064_pinctrl_driver = {
.driver = {
.name = "apq8064-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = apq8064_pinctrl_of_match,
},
.probe = apq8064_pinctrl_probe,
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c
index 138cbf6134a5..d07e8df43b90 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8084.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c
@@ -1221,7 +1221,6 @@ static const struct of_device_id apq8084_pinctrl_of_match[] = {
static struct platform_driver apq8084_pinctrl_driver = {
.driver = {
.name = "apq8084-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = apq8084_pinctrl_of_match,
},
.probe = apq8084_pinctrl_probe,
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index 81f49a9b4dbe..bcb29c02f4b0 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -645,7 +645,6 @@ static const struct of_device_id ipq8064_pinctrl_of_match[] = {
static struct platform_driver ipq8064_pinctrl_driver = {
.driver = {
.name = "ipq8064-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = ipq8064_pinctrl_of_match,
},
.probe = ipq8064_pinctrl_probe,
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
index 2ab21ce5575a..ed23e367fe89 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -1259,7 +1259,6 @@ static const struct of_device_id msm8960_pinctrl_of_match[] = {
static struct platform_driver msm8960_pinctrl_driver = {
.driver = {
.name = "msm8960-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = msm8960_pinctrl_of_match,
},
.probe = msm8960_pinctrl_probe,
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 3c858384d041..46fe6ad5f97e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -1043,7 +1043,6 @@ static const struct of_device_id msm8x74_pinctrl_of_match[] = {
static struct platform_driver msm8x74_pinctrl_driver = {
.driver = {
.name = "msm8x74-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = msm8x74_pinctrl_of_match,
},
.probe = msm8x74_pinctrl_probe,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
new file mode 100644
index 000000000000..b863b5080890
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -0,0 +1,933 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define PMIC_GPIO_ADDRESS_RANGE 0x100
+
+/* type and subtype registers base address offsets */
+#define PMIC_GPIO_REG_TYPE 0x4
+#define PMIC_GPIO_REG_SUBTYPE 0x5
+
+/* GPIO peripheral type and subtype out_values */
+#define PMIC_GPIO_TYPE 0x10
+#define PMIC_GPIO_SUBTYPE_GPIO_4CH 0x1
+#define PMIC_GPIO_SUBTYPE_GPIOC_4CH 0x5
+#define PMIC_GPIO_SUBTYPE_GPIO_8CH 0x9
+#define PMIC_GPIO_SUBTYPE_GPIOC_8CH 0xd
+
+#define PMIC_MPP_REG_RT_STS 0x10
+#define PMIC_MPP_REG_RT_STS_VAL_MASK 0x1
+
+/* control register base address offsets */
+#define PMIC_GPIO_REG_MODE_CTL 0x40
+#define PMIC_GPIO_REG_DIG_VIN_CTL 0x41
+#define PMIC_GPIO_REG_DIG_PULL_CTL 0x42
+#define PMIC_GPIO_REG_DIG_OUT_CTL 0x45
+#define PMIC_GPIO_REG_EN_CTL 0x46
+
+/* PMIC_GPIO_REG_MODE_CTL */
+#define PMIC_GPIO_REG_MODE_VALUE_SHIFT 0x1
+#define PMIC_GPIO_REG_MODE_FUNCTION_SHIFT 1
+#define PMIC_GPIO_REG_MODE_FUNCTION_MASK 0x7
+#define PMIC_GPIO_REG_MODE_DIR_SHIFT 4
+#define PMIC_GPIO_REG_MODE_DIR_MASK 0x7
+
+/* PMIC_GPIO_REG_DIG_VIN_CTL */
+#define PMIC_GPIO_REG_VIN_SHIFT 0
+#define PMIC_GPIO_REG_VIN_MASK 0x7
+
+/* PMIC_GPIO_REG_DIG_PULL_CTL */
+#define PMIC_GPIO_REG_PULL_SHIFT 0
+#define PMIC_GPIO_REG_PULL_MASK 0x7
+
+#define PMIC_GPIO_PULL_DOWN 4
+#define PMIC_GPIO_PULL_DISABLE 5
+
+/* PMIC_GPIO_REG_DIG_OUT_CTL */
+#define PMIC_GPIO_REG_OUT_STRENGTH_SHIFT 0
+#define PMIC_GPIO_REG_OUT_STRENGTH_MASK 0x3
+#define PMIC_GPIO_REG_OUT_TYPE_SHIFT 4
+#define PMIC_GPIO_REG_OUT_TYPE_MASK 0x3
+
+/*
+ * Output type - indicates pin should be configured as push-pull,
+ * open drain or open source.
+ */
+#define PMIC_GPIO_OUT_BUF_CMOS 0
+#define PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS 1
+#define PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS 2
+
+/* PMIC_GPIO_REG_EN_CTL */
+#define PMIC_GPIO_REG_MASTER_EN_SHIFT 7
+
+#define PMIC_GPIO_PHYSICAL_OFFSET 1
+
+/* Qualcomm specific pin configurations */
+#define PMIC_GPIO_CONF_PULL_UP (PIN_CONFIG_END + 1)
+#define PMIC_GPIO_CONF_STRENGTH (PIN_CONFIG_END + 2)
+
+/**
+ * struct pmic_gpio_pad - keep current GPIO settings
+ * @base: Address base in SPMI device.
+ * @irq: IRQ number which this GPIO generate.
+ * @is_enabled: Set to false when GPIO should be put in high Z state.
+ * @out_value: Cached pin output value
+ * @have_buffer: Set to true if GPIO output could be configured in push-pull,
+ * open-drain or open-source mode.
+ * @output_enabled: Set to true if GPIO output logic is enabled.
+ * @input_enabled: Set to true if GPIO input buffer logic is enabled.
+ * @num_sources: Number of power-sources supported by this GPIO.
+ * @power_source: Current power-source used.
+ * @buffer_type: Push-pull, open-drain or open-source.
+ * @pullup: Constant current which flow trough GPIO output buffer.
+ * @strength: No, Low, Medium, High
+ * @function: See pmic_gpio_functions[]
+ */
+struct pmic_gpio_pad {
+ u16 base;
+ int irq;
+ bool is_enabled;
+ bool out_value;
+ bool have_buffer;
+ bool output_enabled;
+ bool input_enabled;
+ unsigned int num_sources;
+ unsigned int power_source;
+ unsigned int buffer_type;
+ unsigned int pullup;
+ unsigned int strength;
+ unsigned int function;
+};
+
+struct pmic_gpio_state {
+ struct device *dev;
+ struct regmap *map;
+ struct pinctrl_dev *ctrl;
+ struct gpio_chip chip;
+};
+
+struct pmic_gpio_bindings {
+ const char *property;
+ unsigned param;
+};
+
+static struct pmic_gpio_bindings pmic_gpio_bindings[] = {
+ {"qcom,pull-up-strength", PMIC_GPIO_CONF_PULL_UP},
+ {"qcom,drive-strength", PMIC_GPIO_CONF_STRENGTH},
+};
+
+static const char *const pmic_gpio_groups[] = {
+ "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8",
+ "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+};
+
+static const char *const pmic_gpio_functions[] = {
+ PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED,
+ PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2,
+ PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2,
+ PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4,
+};
+
+static inline struct pmic_gpio_state *to_gpio_state(struct gpio_chip *chip)
+{
+ return container_of(chip, struct pmic_gpio_state, chip);
+};
+
+static int pmic_gpio_read(struct pmic_gpio_state *state,
+ struct pmic_gpio_pad *pad, unsigned int addr)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(state->map, pad->base + addr, &val);
+ if (ret < 0)
+ dev_err(state->dev, "read 0x%x failed\n", addr);
+ else
+ ret = val;
+
+ return ret;
+}
+
+static int pmic_gpio_write(struct pmic_gpio_state *state,
+ struct pmic_gpio_pad *pad, unsigned int addr,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(state->map, pad->base + addr, val);
+ if (ret < 0)
+ dev_err(state->dev, "write 0x%x failed\n", addr);
+
+ return ret;
+}
+
+static int pmic_gpio_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ /* Every PIN is a group */
+ return pctldev->desc->npins;
+}
+
+static const char *pmic_gpio_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned pin)
+{
+ return pctldev->desc->pins[pin].name;
+}
+
+static int pmic_gpio_get_group_pins(struct pinctrl_dev *pctldev, unsigned pin,
+ const unsigned **pins, unsigned *num_pins)
+{
+ *pins = &pctldev->desc->pins[pin].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static int pmic_gpio_parse_dt_config(struct device_node *np,
+ struct pinctrl_dev *pctldev,
+ unsigned long **configs,
+ unsigned int *nconfs)
+{
+ struct pmic_gpio_bindings *par;
+ unsigned long cfg;
+ int ret, i;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(pmic_gpio_bindings); i++) {
+ par = &pmic_gpio_bindings[i];
+ ret = of_property_read_u32(np, par->property, &val);
+
+ /* property not found */
+ if (ret == -EINVAL)
+ continue;
+
+ /* use zero as default value */
+ if (ret)
+ val = 0;
+
+ dev_dbg(pctldev->dev, "found %s with value %u\n",
+ par->property, val);
+
+ cfg = pinconf_to_config_packed(par->param, val);
+
+ ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pmic_gpio_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned *reserv, unsigned *nmaps,
+ enum pinctrl_map_type type)
+{
+ unsigned long *configs = NULL;
+ unsigned nconfs = 0;
+ struct property *prop;
+ const char *group;
+ int ret;
+
+ ret = pmic_gpio_parse_dt_config(np, pctldev, &configs, &nconfs);
+ if (ret < 0)
+ return ret;
+
+ if (!nconfs)
+ return 0;
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret < 0)
+ goto exit;
+
+ ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_string(np, "pins", prop, group) {
+ ret = pinctrl_utils_add_map_configs(pctldev, map,
+ reserv, nmaps, group,
+ configs, nconfs, type);
+ if (ret < 0)
+ break;
+ }
+exit:
+ kfree(configs);
+ return ret;
+}
+
+static int pmic_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *nmaps)
+{
+ enum pinctrl_map_type type;
+ struct device_node *np;
+ unsigned reserv;
+ int ret;
+
+ ret = 0;
+ *map = NULL;
+ *nmaps = 0;
+ reserv = 0;
+ type = PIN_MAP_TYPE_CONFIGS_GROUP;
+
+ for_each_child_of_node(np_config, np) {
+ ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
+ &reserv, nmaps, type);
+ if (ret)
+ break;
+
+ ret = pmic_gpio_dt_subnode_to_map(pctldev, np, map, &reserv,
+ nmaps, type);
+ if (ret)
+ break;
+ }
+
+ if (ret < 0)
+ pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
+
+ return ret;
+}
+
+static const struct pinctrl_ops pmic_gpio_pinctrl_ops = {
+ .get_groups_count = pmic_gpio_get_groups_count,
+ .get_group_name = pmic_gpio_get_group_name,
+ .get_group_pins = pmic_gpio_get_group_pins,
+ .dt_node_to_map = pmic_gpio_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int pmic_gpio_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pmic_gpio_functions);
+}
+
+static const char *pmic_gpio_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ return pmic_gpio_functions[function];
+}
+
+static int pmic_gpio_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char *const **groups,
+ unsigned *const num_qgroups)
+{
+ *groups = pmic_gpio_groups;
+ *num_qgroups = pctldev->desc->npins;
+ return 0;
+}
+
+static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned pin)
+{
+ struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
+ struct pmic_gpio_pad *pad;
+ unsigned int val;
+ int ret;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ pad->function = function;
+
+ val = 0;
+ if (pad->output_enabled) {
+ if (pad->input_enabled)
+ val = 2;
+ else
+ val = 1;
+ }
+
+ val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+ val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = pad->is_enabled << PMIC_GPIO_REG_MASTER_EN_SHIFT;
+
+ return pmic_gpio_write(state, pad, PMIC_GPIO_REG_EN_CTL, val);
+}
+
+static const struct pinmux_ops pmic_gpio_pinmux_ops = {
+ .get_functions_count = pmic_gpio_get_functions_count,
+ .get_function_name = pmic_gpio_get_function_name,
+ .get_function_groups = pmic_gpio_get_function_groups,
+ .set_mux = pmic_gpio_set_mux,
+};
+
+static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ unsigned param = pinconf_to_config_param(*config);
+ struct pmic_gpio_pad *pad;
+ unsigned arg;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_CMOS;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = pad->pullup == PMIC_GPIO_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = pad->pullup = PMIC_GPIO_PULL_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = pad->pullup == PMIC_GPIO_PULL_UP_30;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ arg = !pad->is_enabled;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ arg = pad->power_source;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ arg = pad->input_enabled;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ arg = pad->out_value;
+ break;
+ case PMIC_GPIO_CONF_PULL_UP:
+ arg = pad->pullup;
+ break;
+ case PMIC_GPIO_CONF_STRENGTH:
+ arg = pad->strength;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned nconfs)
+{
+ struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
+ struct pmic_gpio_pad *pad;
+ unsigned param, arg;
+ unsigned int val;
+ int i, ret;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ for (i = 0; i < nconfs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ pad->buffer_type = PMIC_GPIO_OUT_BUF_CMOS;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (!pad->have_buffer)
+ return -EINVAL;
+ pad->buffer_type = PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ if (!pad->have_buffer)
+ return -EINVAL;
+ pad->buffer_type = PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ pad->pullup = PMIC_GPIO_PULL_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pad->pullup = PMIC_GPIO_PULL_UP_30;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (arg)
+ pad->pullup = PMIC_GPIO_PULL_DOWN;
+ else
+ pad->pullup = PMIC_GPIO_PULL_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ pad->is_enabled = false;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ if (arg > pad->num_sources)
+ return -EINVAL;
+ pad->power_source = arg;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ pad->input_enabled = arg ? true : false;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ pad->output_enabled = true;
+ pad->out_value = arg;
+ break;
+ case PMIC_GPIO_CONF_PULL_UP:
+ if (arg > PMIC_GPIO_PULL_UP_1P5_30)
+ return -EINVAL;
+ pad->pullup = arg;
+ break;
+ case PMIC_GPIO_CONF_STRENGTH:
+ if (arg > PMIC_GPIO_STRENGTH_LOW)
+ return -EINVAL;
+ pad->strength = arg;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ val = pad->power_source << PMIC_GPIO_REG_VIN_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_VIN_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = pad->pullup << PMIC_GPIO_REG_PULL_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_PULL_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT;
+ val = pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = 0;
+ if (pad->output_enabled) {
+ if (pad->input_enabled)
+ val = 2;
+ else
+ val = 1;
+ }
+
+ val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
+ val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+ val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+ return pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
+}
+
+static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned pin)
+{
+ struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
+ struct pmic_gpio_pad *pad;
+ int ret, val;
+
+ static const char *const biases[] = {
+ "pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA",
+ "pull-up 1.5uA + 30uA boost", "pull-down 10uA", "no pull"
+ };
+ static const char *const buffer_types[] = {
+ "push-pull", "open-drain", "open-source"
+ };
+ static const char *const strengths[] = {
+ "no", "high", "medium", "low"
+ };
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ seq_printf(s, " gpio%-2d:", pin + PMIC_GPIO_PHYSICAL_OFFSET);
+
+ val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_EN_CTL);
+
+ if (val < 0 || !(val >> PMIC_GPIO_REG_MASTER_EN_SHIFT)) {
+ seq_puts(s, " ---");
+ } else {
+
+ if (!pad->input_enabled) {
+ ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS);
+ if (!ret) {
+ ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+ pad->out_value = ret;
+ }
+ }
+
+ seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
+ seq_printf(s, " %-7s", pmic_gpio_functions[pad->function]);
+ seq_printf(s, " vin-%d", pad->power_source);
+ seq_printf(s, " %-27s", biases[pad->pullup]);
+ seq_printf(s, " %-10s", buffer_types[pad->buffer_type]);
+ seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ seq_printf(s, " %-7s", strengths[pad->strength]);
+ }
+}
+
+static const struct pinconf_ops pmic_gpio_pinconf_ops = {
+ .pin_config_group_get = pmic_gpio_config_get,
+ .pin_config_group_set = pmic_gpio_config_set,
+ .pin_config_group_dbg_show = pmic_gpio_config_dbg_show,
+};
+
+static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
+{
+ struct pmic_gpio_state *state = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
+
+ return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int pmic_gpio_direction_output(struct gpio_chip *chip,
+ unsigned pin, int val)
+{
+ struct pmic_gpio_state *state = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+
+ return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int pmic_gpio_get(struct gpio_chip *chip, unsigned pin)
+{
+ struct pmic_gpio_state *state = to_gpio_state(chip);
+ struct pmic_gpio_pad *pad;
+ int ret;
+
+ pad = state->ctrl->desc->pins[pin].drv_data;
+
+ if (!pad->is_enabled)
+ return -EINVAL;
+
+ if (pad->input_enabled) {
+ ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS);
+ if (ret < 0)
+ return ret;
+
+ pad->out_value = ret & PMIC_MPP_REG_RT_STS_VAL_MASK;
+ }
+
+ return pad->out_value;
+}
+
+static void pmic_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct pmic_gpio_state *state = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+
+ pmic_gpio_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int pmic_gpio_request(struct gpio_chip *chip, unsigned base)
+{
+ return pinctrl_request_gpio(chip->base + base);
+}
+
+static void pmic_gpio_free(struct gpio_chip *chip, unsigned base)
+{
+ pinctrl_free_gpio(chip->base + base);
+}
+
+static int pmic_gpio_of_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *gpio_desc,
+ u32 *flags)
+{
+ if (chip->of_gpio_n_cells < 2)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpio_desc->args[1];
+
+ return gpio_desc->args[0] - PMIC_GPIO_PHYSICAL_OFFSET;
+}
+
+static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
+{
+ struct pmic_gpio_state *state = to_gpio_state(chip);
+ struct pmic_gpio_pad *pad;
+
+ pad = state->ctrl->desc->pins[pin].drv_data;
+
+ return pad->irq;
+}
+
+static void pmic_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct pmic_gpio_state *state = to_gpio_state(chip);
+ unsigned i;
+
+ for (i = 0; i < chip->ngpio; i++) {
+ pmic_gpio_config_dbg_show(state->ctrl, s, i);
+ seq_puts(s, "\n");
+ }
+}
+
+static const struct gpio_chip pmic_gpio_gpio_template = {
+ .direction_input = pmic_gpio_direction_input,
+ .direction_output = pmic_gpio_direction_output,
+ .get = pmic_gpio_get,
+ .set = pmic_gpio_set,
+ .request = pmic_gpio_request,
+ .free = pmic_gpio_free,
+ .of_xlate = pmic_gpio_of_xlate,
+ .to_irq = pmic_gpio_to_irq,
+ .dbg_show = pmic_gpio_dbg_show,
+};
+
+static int pmic_gpio_populate(struct pmic_gpio_state *state,
+ struct pmic_gpio_pad *pad)
+{
+ int type, subtype, val, dir;
+
+ type = pmic_gpio_read(state, pad, PMIC_GPIO_REG_TYPE);
+ if (type < 0)
+ return type;
+
+ if (type != PMIC_GPIO_TYPE) {
+ dev_err(state->dev, "incorrect block type 0x%x at 0x%x\n",
+ type, pad->base);
+ return -ENODEV;
+ }
+
+ subtype = pmic_gpio_read(state, pad, PMIC_GPIO_REG_SUBTYPE);
+ if (subtype < 0)
+ return subtype;
+
+ switch (subtype) {
+ case PMIC_GPIO_SUBTYPE_GPIO_4CH:
+ pad->have_buffer = true;
+ case PMIC_GPIO_SUBTYPE_GPIOC_4CH:
+ pad->num_sources = 4;
+ break;
+ case PMIC_GPIO_SUBTYPE_GPIO_8CH:
+ pad->have_buffer = true;
+ case PMIC_GPIO_SUBTYPE_GPIOC_8CH:
+ pad->num_sources = 8;
+ break;
+ default:
+ dev_err(state->dev, "unknown GPIO type 0x%x\n", subtype);
+ return -ENODEV;
+ }
+
+ val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL);
+ if (val < 0)
+ return val;
+
+ pad->out_value = val & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+ dir = val >> PMIC_GPIO_REG_MODE_DIR_SHIFT;
+ dir &= PMIC_GPIO_REG_MODE_DIR_MASK;
+ switch (dir) {
+ case 0:
+ pad->input_enabled = true;
+ pad->output_enabled = false;
+ break;
+ case 1:
+ pad->input_enabled = false;
+ pad->output_enabled = true;
+ break;
+ case 2:
+ pad->input_enabled = true;
+ pad->output_enabled = true;
+ break;
+ default:
+ dev_err(state->dev, "unknown GPIO direction\n");
+ return -ENODEV;
+ }
+
+ pad->function = val >> PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+ pad->function &= PMIC_GPIO_REG_MODE_FUNCTION_MASK;
+
+ val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_VIN_CTL);
+ if (val < 0)
+ return val;
+
+ pad->power_source = val >> PMIC_GPIO_REG_VIN_SHIFT;
+ pad->power_source &= PMIC_GPIO_REG_VIN_MASK;
+
+ val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_PULL_CTL);
+ if (val < 0)
+ return val;
+
+ pad->pullup = val >> PMIC_GPIO_REG_PULL_SHIFT;
+ pad->pullup &= PMIC_GPIO_REG_PULL_MASK;
+
+ val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL);
+ if (val < 0)
+ return val;
+
+ pad->strength = val >> PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
+ pad->strength &= PMIC_GPIO_REG_OUT_STRENGTH_MASK;
+
+ pad->buffer_type = val >> PMIC_GPIO_REG_OUT_TYPE_SHIFT;
+ pad->buffer_type &= PMIC_GPIO_REG_OUT_TYPE_MASK;
+
+ /* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
+ pad->is_enabled = true;
+ return 0;
+}
+
+static int pmic_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pinctrl_pin_desc *pindesc;
+ struct pinctrl_desc *pctrldesc;
+ struct pmic_gpio_pad *pad, *pads;
+ struct pmic_gpio_state *state;
+ int ret, npins, i;
+ u32 res[2];
+
+ ret = of_property_read_u32_array(dev->of_node, "reg", res, 2);
+ if (ret < 0) {
+ dev_err(dev, "missing base address and/or range");
+ return ret;
+ }
+
+ npins = res[1] / PMIC_GPIO_ADDRESS_RANGE;
+
+ if (!npins)
+ return -EINVAL;
+
+ BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups));
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, state);
+
+ state->dev = &pdev->dev;
+ state->map = dev_get_regmap(dev->parent, NULL);
+
+ pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
+ if (!pindesc)
+ return -ENOMEM;
+
+ pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
+ if (!pads)
+ return -ENOMEM;
+
+ pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
+ if (!pctrldesc)
+ return -ENOMEM;
+
+ pctrldesc->pctlops = &pmic_gpio_pinctrl_ops;
+ pctrldesc->pmxops = &pmic_gpio_pinmux_ops;
+ pctrldesc->confops = &pmic_gpio_pinconf_ops;
+ pctrldesc->owner = THIS_MODULE;
+ pctrldesc->name = dev_name(dev);
+ pctrldesc->pins = pindesc;
+ pctrldesc->npins = npins;
+
+ for (i = 0; i < npins; i++, pindesc++) {
+ pad = &pads[i];
+ pindesc->drv_data = pad;
+ pindesc->number = i;
+ pindesc->name = pmic_gpio_groups[i];
+
+ pad->irq = platform_get_irq(pdev, i);
+ if (pad->irq < 0)
+ return pad->irq;
+
+ pad->base = res[0] + i * PMIC_GPIO_ADDRESS_RANGE;
+
+ ret = pmic_gpio_populate(state, pad);
+ if (ret < 0)
+ return ret;
+ }
+
+ state->chip = pmic_gpio_gpio_template;
+ state->chip.dev = dev;
+ state->chip.base = -1;
+ state->chip.ngpio = npins;
+ state->chip.label = dev_name(dev);
+ state->chip.of_gpio_n_cells = 2;
+ state->chip.can_sleep = false;
+
+ state->ctrl = pinctrl_register(pctrldesc, dev, state);
+ if (!state->ctrl)
+ return -ENODEV;
+
+ ret = gpiochip_add(&state->chip);
+ if (ret) {
+ dev_err(state->dev, "can't add gpio chip\n");
+ goto err_chip;
+ }
+
+ ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
+ if (ret) {
+ dev_err(dev, "failed to add pin range\n");
+ goto err_range;
+ }
+
+ return 0;
+
+err_range:
+ gpiochip_remove(&state->chip);
+err_chip:
+ pinctrl_unregister(state->ctrl);
+ return ret;
+}
+
+static int pmic_gpio_remove(struct platform_device *pdev)
+{
+ struct pmic_gpio_state *state = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&state->chip);
+ pinctrl_unregister(state->ctrl);
+ return 0;
+}
+
+static const struct of_device_id pmic_gpio_of_match[] = {
+ { .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */
+ { .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, pmic_gpio_of_match);
+
+static struct platform_driver pmic_gpio_driver = {
+ .driver = {
+ .name = "qcom-spmi-gpio",
+ .of_match_table = pmic_gpio_of_match,
+ },
+ .probe = pmic_gpio_probe,
+ .remove = pmic_gpio_remove,
+};
+
+module_platform_driver(pmic_gpio_driver);
+
+MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
+MODULE_DESCRIPTION("Qualcomm SPMI PMIC GPIO pin control driver");
+MODULE_ALIAS("platform:qcom-spmi-gpio");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
new file mode 100644
index 000000000000..a8924dba335e
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -0,0 +1,949 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define PMIC_MPP_ADDRESS_RANGE 0x100
+
+/*
+ * Pull Up Values - it indicates whether a pull-up should be
+ * applied for bidirectional mode only. The hardware ignores the
+ * configuration when operating in other modes.
+ */
+#define PMIC_MPP_PULL_UP_0P6KOHM 0
+#define PMIC_MPP_PULL_UP_10KOHM 1
+#define PMIC_MPP_PULL_UP_30KOHM 2
+#define PMIC_MPP_PULL_UP_OPEN 3
+
+/* type registers base address bases */
+#define PMIC_MPP_REG_TYPE 0x4
+#define PMIC_MPP_REG_SUBTYPE 0x5
+
+/* mpp peripheral type and subtype values */
+#define PMIC_MPP_TYPE 0x11
+#define PMIC_MPP_SUBTYPE_4CH_NO_ANA_OUT 0x3
+#define PMIC_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT 0x4
+#define PMIC_MPP_SUBTYPE_4CH_NO_SINK 0x5
+#define PMIC_MPP_SUBTYPE_ULT_4CH_NO_SINK 0x6
+#define PMIC_MPP_SUBTYPE_4CH_FULL_FUNC 0x7
+#define PMIC_MPP_SUBTYPE_8CH_FULL_FUNC 0xf
+
+#define PMIC_MPP_REG_RT_STS 0x10
+#define PMIC_MPP_REG_RT_STS_VAL_MASK 0x1
+
+/* control register base address bases */
+#define PMIC_MPP_REG_MODE_CTL 0x40
+#define PMIC_MPP_REG_DIG_VIN_CTL 0x41
+#define PMIC_MPP_REG_DIG_PULL_CTL 0x42
+#define PMIC_MPP_REG_DIG_IN_CTL 0x43
+#define PMIC_MPP_REG_EN_CTL 0x46
+#define PMIC_MPP_REG_AIN_CTL 0x4a
+
+/* PMIC_MPP_REG_MODE_CTL */
+#define PMIC_MPP_REG_MODE_VALUE_MASK 0x1
+#define PMIC_MPP_REG_MODE_FUNCTION_SHIFT 1
+#define PMIC_MPP_REG_MODE_FUNCTION_MASK 0x7
+#define PMIC_MPP_REG_MODE_DIR_SHIFT 4
+#define PMIC_MPP_REG_MODE_DIR_MASK 0x7
+
+/* PMIC_MPP_REG_DIG_VIN_CTL */
+#define PMIC_MPP_REG_VIN_SHIFT 0
+#define PMIC_MPP_REG_VIN_MASK 0x7
+
+/* PMIC_MPP_REG_DIG_PULL_CTL */
+#define PMIC_MPP_REG_PULL_SHIFT 0
+#define PMIC_MPP_REG_PULL_MASK 0x7
+
+/* PMIC_MPP_REG_EN_CTL */
+#define PMIC_MPP_REG_MASTER_EN_SHIFT 7
+
+/* PMIC_MPP_REG_AIN_CTL */
+#define PMIC_MPP_REG_AIN_ROUTE_SHIFT 0
+#define PMIC_MPP_REG_AIN_ROUTE_MASK 0x7
+
+#define PMIC_MPP_PHYSICAL_OFFSET 1
+
+/* Qualcomm specific pin configurations */
+#define PMIC_MPP_CONF_AMUX_ROUTE (PIN_CONFIG_END + 1)
+#define PMIC_MPP_CONF_ANALOG_MODE (PIN_CONFIG_END + 2)
+
+/**
+ * struct pmic_mpp_pad - keep current MPP settings
+ * @base: Address base in SPMI device.
+ * @irq: IRQ number which this MPP generate.
+ * @is_enabled: Set to false when MPP should be put in high Z state.
+ * @out_value: Cached pin output value.
+ * @output_enabled: Set to true if MPP output logic is enabled.
+ * @input_enabled: Set to true if MPP input buffer logic is enabled.
+ * @analog_mode: Set to true when MPP should operate in Analog Input, Analog
+ * Output or Bidirectional Analog mode.
+ * @num_sources: Number of power-sources supported by this MPP.
+ * @power_source: Current power-source used.
+ * @amux_input: Set the source for analog input.
+ * @pullup: Pullup resistor value. Valid in Bidirectional mode only.
+ * @function: See pmic_mpp_functions[].
+ */
+struct pmic_mpp_pad {
+ u16 base;
+ int irq;
+ bool is_enabled;
+ bool out_value;
+ bool output_enabled;
+ bool input_enabled;
+ bool analog_mode;
+ unsigned int num_sources;
+ unsigned int power_source;
+ unsigned int amux_input;
+ unsigned int pullup;
+ unsigned int function;
+};
+
+struct pmic_mpp_state {
+ struct device *dev;
+ struct regmap *map;
+ struct pinctrl_dev *ctrl;
+ struct gpio_chip chip;
+};
+
+struct pmic_mpp_bindings {
+ const char *property;
+ unsigned param;
+};
+
+static struct pmic_mpp_bindings pmic_mpp_bindings[] = {
+ {"qcom,amux-route", PMIC_MPP_CONF_AMUX_ROUTE},
+ {"qcom,analog-mode", PMIC_MPP_CONF_ANALOG_MODE},
+};
+
+static const char *const pmic_mpp_groups[] = {
+ "mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
+};
+
+static const char *const pmic_mpp_functions[] = {
+ PMIC_MPP_FUNC_NORMAL, PMIC_MPP_FUNC_PAIRED,
+ "reserved1", "reserved2",
+ PMIC_MPP_FUNC_DTEST1, PMIC_MPP_FUNC_DTEST2,
+ PMIC_MPP_FUNC_DTEST3, PMIC_MPP_FUNC_DTEST4,
+};
+
+static inline struct pmic_mpp_state *to_mpp_state(struct gpio_chip *chip)
+{
+ return container_of(chip, struct pmic_mpp_state, chip);
+};
+
+static int pmic_mpp_read(struct pmic_mpp_state *state,
+ struct pmic_mpp_pad *pad, unsigned int addr)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(state->map, pad->base + addr, &val);
+ if (ret < 0)
+ dev_err(state->dev, "read 0x%x failed\n", addr);
+ else
+ ret = val;
+
+ return ret;
+}
+
+static int pmic_mpp_write(struct pmic_mpp_state *state,
+ struct pmic_mpp_pad *pad, unsigned int addr,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(state->map, pad->base + addr, val);
+ if (ret < 0)
+ dev_err(state->dev, "write 0x%x failed\n", addr);
+
+ return ret;
+}
+
+static int pmic_mpp_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ /* Every PIN is a group */
+ return pctldev->desc->npins;
+}
+
+static const char *pmic_mpp_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned pin)
+{
+ return pctldev->desc->pins[pin].name;
+}
+
+static int pmic_mpp_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ const unsigned **pins, unsigned *num_pins)
+{
+ *pins = &pctldev->desc->pins[pin].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static int pmic_mpp_parse_dt_config(struct device_node *np,
+ struct pinctrl_dev *pctldev,
+ unsigned long **configs,
+ unsigned int *nconfs)
+{
+ struct pmic_mpp_bindings *par;
+ unsigned long cfg;
+ int ret, i;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(pmic_mpp_bindings); i++) {
+ par = &pmic_mpp_bindings[i];
+ ret = of_property_read_u32(np, par->property, &val);
+
+ /* property not found */
+ if (ret == -EINVAL)
+ continue;
+
+ /* use zero as default value, when no value is specified */
+ if (ret)
+ val = 0;
+
+ dev_dbg(pctldev->dev, "found %s with value %u\n",
+ par->property, val);
+
+ cfg = pinconf_to_config_packed(par->param, val);
+
+ ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pmic_mpp_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned *reserv, unsigned *nmaps,
+ enum pinctrl_map_type type)
+{
+ unsigned long *configs = NULL;
+ unsigned nconfs = 0;
+ struct property *prop;
+ const char *group;
+ int ret;
+
+ ret = pmic_mpp_parse_dt_config(np, pctldev, &configs, &nconfs);
+ if (ret < 0)
+ return ret;
+
+ if (!nconfs)
+ return 0;
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret < 0)
+ goto exit;
+
+ ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_string(np, "pins", prop, group) {
+ ret = pinctrl_utils_add_map_configs(pctldev, map,
+ reserv, nmaps, group,
+ configs, nconfs, type);
+ if (ret < 0)
+ break;
+ }
+exit:
+ kfree(configs);
+ return ret;
+}
+
+static int pmic_mpp_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *nmaps)
+{
+ struct device_node *np;
+ enum pinctrl_map_type type;
+ unsigned reserv;
+ int ret;
+
+ ret = 0;
+ *map = NULL;
+ *nmaps = 0;
+ reserv = 0;
+ type = PIN_MAP_TYPE_CONFIGS_GROUP;
+
+ for_each_child_of_node(np_config, np) {
+ ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
+ &reserv, nmaps, type);
+ if (ret)
+ break;
+
+ ret = pmic_mpp_dt_subnode_to_map(pctldev, np, map, &reserv,
+ nmaps, type);
+ if (ret)
+ break;
+ }
+
+ if (ret < 0)
+ pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
+
+ return ret;
+}
+
+static const struct pinctrl_ops pmic_mpp_pinctrl_ops = {
+ .get_groups_count = pmic_mpp_get_groups_count,
+ .get_group_name = pmic_mpp_get_group_name,
+ .get_group_pins = pmic_mpp_get_group_pins,
+ .dt_node_to_map = pmic_mpp_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int pmic_mpp_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pmic_mpp_functions);
+}
+
+static const char *pmic_mpp_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ return pmic_mpp_functions[function];
+}
+
+static int pmic_mpp_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char *const **groups,
+ unsigned *const num_qgroups)
+{
+ *groups = pmic_mpp_groups;
+ *num_qgroups = pctldev->desc->npins;
+ return 0;
+}
+
+static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned pin)
+{
+ struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
+ struct pmic_mpp_pad *pad;
+ unsigned int val;
+ int ret;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ pad->function = function;
+
+ if (!pad->analog_mode) {
+ val = 0; /* just digital input */
+ if (pad->output_enabled) {
+ if (pad->input_enabled)
+ val = 2; /* digital input and output */
+ else
+ val = 1; /* just digital output */
+ }
+ } else {
+ val = 4; /* just analog input */
+ if (pad->output_enabled) {
+ if (pad->input_enabled)
+ val = 3; /* analog input and output */
+ else
+ val = 5; /* just analog output */
+ }
+ }
+
+ val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
+ val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
+
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
+
+ return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
+}
+
+static const struct pinmux_ops pmic_mpp_pinmux_ops = {
+ .get_functions_count = pmic_mpp_get_functions_count,
+ .get_function_name = pmic_mpp_get_function_name,
+ .get_function_groups = pmic_mpp_get_function_groups,
+ .set_mux = pmic_mpp_set_mux,
+};
+
+static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ unsigned param = pinconf_to_config_param(*config);
+ struct pmic_mpp_pad *pad;
+ unsigned arg = 0;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ switch (pad->pullup) {
+ case PMIC_MPP_PULL_UP_OPEN:
+ arg = 0;
+ break;
+ case PMIC_MPP_PULL_UP_0P6KOHM:
+ arg = 600;
+ break;
+ case PMIC_MPP_PULL_UP_10KOHM:
+ arg = 10000;
+ break;
+ case PMIC_MPP_PULL_UP_30KOHM:
+ arg = 30000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ arg = !pad->is_enabled;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ arg = pad->power_source;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ arg = pad->input_enabled;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ arg = pad->out_value;
+ break;
+ case PMIC_MPP_CONF_AMUX_ROUTE:
+ arg = pad->amux_input;
+ break;
+ case PMIC_MPP_CONF_ANALOG_MODE:
+ arg = pad->analog_mode;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Convert register value to pinconf value */
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned nconfs)
+{
+ struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
+ struct pmic_mpp_pad *pad;
+ unsigned param, arg;
+ unsigned int val;
+ int i, ret;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ for (i = 0; i < nconfs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ pad->pullup = PMIC_MPP_PULL_UP_OPEN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ switch (arg) {
+ case 600:
+ pad->pullup = PMIC_MPP_PULL_UP_0P6KOHM;
+ break;
+ case 10000:
+ pad->pullup = PMIC_MPP_PULL_UP_10KOHM;
+ break;
+ case 30000:
+ pad->pullup = PMIC_MPP_PULL_UP_30KOHM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ pad->is_enabled = false;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ if (arg >= pad->num_sources)
+ return -EINVAL;
+ pad->power_source = arg;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ pad->input_enabled = arg ? true : false;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ pad->output_enabled = true;
+ pad->out_value = arg;
+ break;
+ case PMIC_MPP_CONF_AMUX_ROUTE:
+ if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
+ return -EINVAL;
+ pad->amux_input = arg;
+ break;
+ case PMIC_MPP_CONF_ANALOG_MODE:
+ pad->analog_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ val = pad->power_source << PMIC_MPP_REG_VIN_SHIFT;
+
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_VIN_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT;
+
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_PULL_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = pad->amux_input & PMIC_MPP_REG_AIN_ROUTE_MASK;
+
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_AIN_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ if (!pad->analog_mode) {
+ val = 0; /* just digital input */
+ if (pad->output_enabled) {
+ if (pad->input_enabled)
+ val = 2; /* digital input and output */
+ else
+ val = 1; /* just digital output */
+ }
+ } else {
+ val = 4; /* just analog input */
+ if (pad->output_enabled) {
+ if (pad->input_enabled)
+ val = 3; /* analog input and output */
+ else
+ val = 5; /* just analog output */
+ }
+ }
+
+ val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
+ val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
+ val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
+
+ return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+}
+
+static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned pin)
+{
+ struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
+ struct pmic_mpp_pad *pad;
+ int ret, val;
+
+ static const char *const biases[] = {
+ "0.6kOhm", "10kOhm", "30kOhm", "Disabled"
+ };
+
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ seq_printf(s, " mpp%-2d:", pin + PMIC_MPP_PHYSICAL_OFFSET);
+
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
+
+ if (val < 0 || !(val >> PMIC_MPP_REG_MASTER_EN_SHIFT)) {
+ seq_puts(s, " ---");
+ } else {
+
+ if (pad->input_enabled) {
+ ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
+ if (!ret) {
+ ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+ pad->out_value = ret;
+ }
+ }
+
+ seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
+ seq_printf(s, " %-4s", pad->analog_mode ? "ana" : "dig");
+ seq_printf(s, " %-7s", pmic_mpp_functions[pad->function]);
+ seq_printf(s, " vin-%d", pad->power_source);
+ seq_printf(s, " %-8s", biases[pad->pullup]);
+ seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+ }
+}
+
+static const struct pinconf_ops pmic_mpp_pinconf_ops = {
+ .pin_config_group_get = pmic_mpp_config_get,
+ .pin_config_group_set = pmic_mpp_config_set,
+ .pin_config_group_dbg_show = pmic_mpp_config_dbg_show,
+};
+
+static int pmic_mpp_direction_input(struct gpio_chip *chip, unsigned pin)
+{
+ struct pmic_mpp_state *state = to_mpp_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
+
+ return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int pmic_mpp_direction_output(struct gpio_chip *chip,
+ unsigned pin, int val)
+{
+ struct pmic_mpp_state *state = to_mpp_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+
+ return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int pmic_mpp_get(struct gpio_chip *chip, unsigned pin)
+{
+ struct pmic_mpp_state *state = to_mpp_state(chip);
+ struct pmic_mpp_pad *pad;
+ int ret;
+
+ pad = state->ctrl->desc->pins[pin].drv_data;
+
+ if (pad->input_enabled) {
+ ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
+ if (ret < 0)
+ return ret;
+
+ pad->out_value = ret & PMIC_MPP_REG_RT_STS_VAL_MASK;
+ }
+
+ return pad->out_value;
+}
+
+static void pmic_mpp_set(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct pmic_mpp_state *state = to_mpp_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+
+ pmic_mpp_config_set(state->ctrl, pin, &config, 1);
+}
+
+static int pmic_mpp_request(struct gpio_chip *chip, unsigned base)
+{
+ return pinctrl_request_gpio(chip->base + base);
+}
+
+static void pmic_mpp_free(struct gpio_chip *chip, unsigned base)
+{
+ pinctrl_free_gpio(chip->base + base);
+}
+
+static int pmic_mpp_of_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *gpio_desc,
+ u32 *flags)
+{
+ if (chip->of_gpio_n_cells < 2)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpio_desc->args[1];
+
+ return gpio_desc->args[0] - PMIC_MPP_PHYSICAL_OFFSET;
+}
+
+static int pmic_mpp_to_irq(struct gpio_chip *chip, unsigned pin)
+{
+ struct pmic_mpp_state *state = to_mpp_state(chip);
+ struct pmic_mpp_pad *pad;
+
+ pad = state->ctrl->desc->pins[pin].drv_data;
+
+ return pad->irq;
+}
+
+static void pmic_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct pmic_mpp_state *state = to_mpp_state(chip);
+ unsigned i;
+
+ for (i = 0; i < chip->ngpio; i++) {
+ pmic_mpp_config_dbg_show(state->ctrl, s, i);
+ seq_puts(s, "\n");
+ }
+}
+
+static const struct gpio_chip pmic_mpp_gpio_template = {
+ .direction_input = pmic_mpp_direction_input,
+ .direction_output = pmic_mpp_direction_output,
+ .get = pmic_mpp_get,
+ .set = pmic_mpp_set,
+ .request = pmic_mpp_request,
+ .free = pmic_mpp_free,
+ .of_xlate = pmic_mpp_of_xlate,
+ .to_irq = pmic_mpp_to_irq,
+ .dbg_show = pmic_mpp_dbg_show,
+};
+
+static int pmic_mpp_populate(struct pmic_mpp_state *state,
+ struct pmic_mpp_pad *pad)
+{
+ int type, subtype, val, dir;
+
+ type = pmic_mpp_read(state, pad, PMIC_MPP_REG_TYPE);
+ if (type < 0)
+ return type;
+
+ if (type != PMIC_MPP_TYPE) {
+ dev_err(state->dev, "incorrect block type 0x%x at 0x%x\n",
+ type, pad->base);
+ return -ENODEV;
+ }
+
+ subtype = pmic_mpp_read(state, pad, PMIC_MPP_REG_SUBTYPE);
+ if (subtype < 0)
+ return subtype;
+
+ switch (subtype) {
+ case PMIC_MPP_SUBTYPE_4CH_NO_ANA_OUT:
+ case PMIC_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT:
+ case PMIC_MPP_SUBTYPE_4CH_NO_SINK:
+ case PMIC_MPP_SUBTYPE_ULT_4CH_NO_SINK:
+ case PMIC_MPP_SUBTYPE_4CH_FULL_FUNC:
+ pad->num_sources = 4;
+ break;
+ case PMIC_MPP_SUBTYPE_8CH_FULL_FUNC:
+ pad->num_sources = 8;
+ break;
+ default:
+ dev_err(state->dev, "unknown MPP type 0x%x at 0x%x\n",
+ subtype, pad->base);
+ return -ENODEV;
+ }
+
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_MODE_CTL);
+ if (val < 0)
+ return val;
+
+ pad->out_value = val & PMIC_MPP_REG_MODE_VALUE_MASK;
+
+ dir = val >> PMIC_MPP_REG_MODE_DIR_SHIFT;
+ dir &= PMIC_MPP_REG_MODE_DIR_MASK;
+
+ switch (dir) {
+ case 0:
+ pad->input_enabled = true;
+ pad->output_enabled = false;
+ pad->analog_mode = false;
+ break;
+ case 1:
+ pad->input_enabled = false;
+ pad->output_enabled = true;
+ pad->analog_mode = false;
+ break;
+ case 2:
+ pad->input_enabled = true;
+ pad->output_enabled = true;
+ pad->analog_mode = false;
+ break;
+ case 3:
+ pad->input_enabled = true;
+ pad->output_enabled = true;
+ pad->analog_mode = true;
+ break;
+ case 4:
+ pad->input_enabled = true;
+ pad->output_enabled = false;
+ pad->analog_mode = true;
+ break;
+ case 5:
+ pad->input_enabled = false;
+ pad->output_enabled = true;
+ pad->analog_mode = true;
+ break;
+ default:
+ dev_err(state->dev, "unknown MPP direction\n");
+ return -ENODEV;
+ }
+
+ pad->function = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
+ pad->function &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_VIN_CTL);
+ if (val < 0)
+ return val;
+
+ pad->power_source = val >> PMIC_MPP_REG_VIN_SHIFT;
+ pad->power_source &= PMIC_MPP_REG_VIN_MASK;
+
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL);
+ if (val < 0)
+ return val;
+
+ pad->pullup = val >> PMIC_MPP_REG_PULL_SHIFT;
+ pad->pullup &= PMIC_MPP_REG_PULL_MASK;
+
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AIN_CTL);
+ if (val < 0)
+ return val;
+
+ pad->amux_input = val >> PMIC_MPP_REG_AIN_ROUTE_SHIFT;
+ pad->amux_input &= PMIC_MPP_REG_AIN_ROUTE_MASK;
+
+ /* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
+ pad->is_enabled = true;
+ return 0;
+}
+
+static int pmic_mpp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pinctrl_pin_desc *pindesc;
+ struct pinctrl_desc *pctrldesc;
+ struct pmic_mpp_pad *pad, *pads;
+ struct pmic_mpp_state *state;
+ int ret, npins, i;
+ u32 res[2];
+
+ ret = of_property_read_u32_array(dev->of_node, "reg", res, 2);
+ if (ret < 0) {
+ dev_err(dev, "missing base address and/or range");
+ return ret;
+ }
+
+ npins = res[1] / PMIC_MPP_ADDRESS_RANGE;
+ if (!npins)
+ return -EINVAL;
+
+ BUG_ON(npins > ARRAY_SIZE(pmic_mpp_groups));
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, state);
+
+ state->dev = &pdev->dev;
+ state->map = dev_get_regmap(dev->parent, NULL);
+
+ pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
+ if (!pindesc)
+ return -ENOMEM;
+
+ pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
+ if (!pads)
+ return -ENOMEM;
+
+ pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
+ if (!pctrldesc)
+ return -ENOMEM;
+
+ pctrldesc->pctlops = &pmic_mpp_pinctrl_ops;
+ pctrldesc->pmxops = &pmic_mpp_pinmux_ops;
+ pctrldesc->confops = &pmic_mpp_pinconf_ops;
+ pctrldesc->owner = THIS_MODULE;
+ pctrldesc->name = dev_name(dev);
+ pctrldesc->pins = pindesc;
+ pctrldesc->npins = npins;
+
+ for (i = 0; i < npins; i++, pindesc++) {
+ pad = &pads[i];
+ pindesc->drv_data = pad;
+ pindesc->number = i;
+ pindesc->name = pmic_mpp_groups[i];
+
+ pad->irq = platform_get_irq(pdev, i);
+ if (pad->irq < 0)
+ return pad->irq;
+
+ pad->base = res[0] + i * PMIC_MPP_ADDRESS_RANGE;
+
+ ret = pmic_mpp_populate(state, pad);
+ if (ret < 0)
+ return ret;
+ }
+
+ state->chip = pmic_mpp_gpio_template;
+ state->chip.dev = dev;
+ state->chip.base = -1;
+ state->chip.ngpio = npins;
+ state->chip.label = dev_name(dev);
+ state->chip.of_gpio_n_cells = 2;
+ state->chip.can_sleep = false;
+
+ state->ctrl = pinctrl_register(pctrldesc, dev, state);
+ if (!state->ctrl)
+ return -ENODEV;
+
+ ret = gpiochip_add(&state->chip);
+ if (ret) {
+ dev_err(state->dev, "can't add gpio chip\n");
+ goto err_chip;
+ }
+
+ ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
+ if (ret) {
+ dev_err(dev, "failed to add pin range\n");
+ goto err_range;
+ }
+
+ return 0;
+
+err_range:
+ gpiochip_remove(&state->chip);
+err_chip:
+ pinctrl_unregister(state->ctrl);
+ return ret;
+}
+
+static int pmic_mpp_remove(struct platform_device *pdev)
+{
+ struct pmic_mpp_state *state = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&state->chip);
+ pinctrl_unregister(state->ctrl);
+ return 0;
+}
+
+static const struct of_device_id pmic_mpp_of_match[] = {
+ { .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */
+ { .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
+ { .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, pmic_mpp_of_match);
+
+static struct platform_driver pmic_mpp_driver = {
+ .driver = {
+ .name = "qcom-spmi-mpp",
+ .of_match_table = pmic_mpp_of_match,
+ },
+ .probe = pmic_mpp_probe,
+ .remove = pmic_mpp_remove,
+};
+
+module_platform_driver(pmic_mpp_driver);
+
+MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
+MODULE_DESCRIPTION("Qualcomm SPMI PMIC MPP pin control driver");
+MODULE_ALIAS("platform:qcom-spmi-mpp");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index d7154ed0b0eb..becb3792977b 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -46,22 +46,16 @@ static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
return container_of(chip, struct exynos_irq_chip, chip);
}
-static struct samsung_pin_bank_type bank_type_off = {
+static const struct samsung_pin_bank_type bank_type_off = {
.fld_width = { 4, 1, 2, 2, 2, 2, },
.reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
};
-static struct samsung_pin_bank_type bank_type_alive = {
+static const struct samsung_pin_bank_type bank_type_alive = {
.fld_width = { 4, 1, 2, 2, },
.reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
};
-/* list of external wakeup controllers supported */
-static const struct of_device_id exynos_wkup_irq_ids[] = {
- { .compatible = "samsung,exynos4210-wakeup-eint", },
- { }
-};
-
static void exynos_irq_mask(struct irq_data *irqd)
{
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
@@ -171,7 +165,7 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pin_bank_type *bank_type = bank->type;
+ const struct samsung_pin_bank_type *bank_type = bank->type;
struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
@@ -180,7 +174,7 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
unsigned int con;
int ret;
- ret = gpio_lock_as_irq(&bank->gpio_chip, irqd->hwirq);
+ ret = gpiochip_lock_as_irq(&bank->gpio_chip, irqd->hwirq);
if (ret) {
dev_err(bank->gpio_chip.dev, "unable to lock pin %s-%lu IRQ\n",
bank->name, irqd->hwirq);
@@ -210,7 +204,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pin_bank_type *bank_type = bank->type;
+ const struct samsung_pin_bank_type *bank_type = bank->type;
struct samsung_pinctrl_drv_data *d = bank->drvdata;
unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
@@ -233,7 +227,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
spin_unlock_irqrestore(&bank->slock, flags);
- gpio_unlock_as_irq(&bank->gpio_chip, irqd->hwirq);
+ gpiochip_unlock_as_irq(&bank->gpio_chip, irqd->hwirq);
}
/*
@@ -254,31 +248,30 @@ static struct exynos_irq_chip exynos_gpio_irq_chip = {
.eint_pend = EXYNOS_GPIO_EPEND_OFFSET,
};
-static int exynos_gpio_irq_map(struct irq_domain *h, unsigned int virq,
+static int exynos_eint_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct samsung_pin_bank *b = h->host_data;
irq_set_chip_data(virq, b);
- irq_set_chip_and_handler(virq, &exynos_gpio_irq_chip.chip,
+ irq_set_chip_and_handler(virq, &b->irq_chip->chip,
handle_level_irq);
set_irq_flags(virq, IRQF_VALID);
return 0;
}
/*
- * irq domain callbacks for external gpio interrupt controller.
+ * irq domain callbacks for external gpio and wakeup interrupt controllers.
*/
-static const struct irq_domain_ops exynos_gpio_irqd_ops = {
- .map = exynos_gpio_irq_map,
+static const struct irq_domain_ops exynos_eint_irqd_ops = {
+ .map = exynos_eint_irq_map,
.xlate = irq_domain_xlate_twocell,
};
static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
{
struct samsung_pinctrl_drv_data *d = data;
- struct samsung_pin_ctrl *ctrl = d->ctrl;
- struct samsung_pin_bank *bank = ctrl->pin_banks;
+ struct samsung_pin_bank *bank = d->pin_banks;
unsigned int svc, group, pin, virq;
svc = readl(d->virt_base + EXYNOS_SVC_OFFSET);
@@ -325,12 +318,12 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
return -ENXIO;
}
- bank = d->ctrl->pin_banks;
- for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) {
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
if (bank->eint_type != EINT_TYPE_GPIO)
continue;
bank->irq_domain = irq_domain_add_linear(bank->of_node,
- bank->nr_pins, &exynos_gpio_irqd_ops, bank);
+ bank->nr_pins, &exynos_eint_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "gpio irq domain add failed\n");
ret = -ENXIO;
@@ -344,6 +337,8 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
ret = -ENOMEM;
goto err_domains;
}
+
+ bank->irq_chip = &exynos_gpio_irq_chip;
}
return 0;
@@ -383,9 +378,9 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
/*
* irq_chip for wakeup interrupts
*/
-static struct exynos_irq_chip exynos_wkup_irq_chip = {
+static struct exynos_irq_chip exynos4210_wkup_irq_chip __initdata = {
.chip = {
- .name = "exynos_wkup_irq_chip",
+ .name = "exynos4210_wkup_irq_chip",
.irq_unmask = exynos_irq_unmask,
.irq_mask = exynos_irq_mask,
.irq_ack = exynos_irq_ack,
@@ -399,6 +394,31 @@ static struct exynos_irq_chip exynos_wkup_irq_chip = {
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
};
+static struct exynos_irq_chip exynos7_wkup_irq_chip __initdata = {
+ .chip = {
+ .name = "exynos7_wkup_irq_chip",
+ .irq_unmask = exynos_irq_unmask,
+ .irq_mask = exynos_irq_mask,
+ .irq_ack = exynos_irq_ack,
+ .irq_set_type = exynos_irq_set_type,
+ .irq_set_wake = exynos_wkup_irq_set_wake,
+ .irq_request_resources = exynos_irq_request_resources,
+ .irq_release_resources = exynos_irq_release_resources,
+ },
+ .eint_con = EXYNOS7_WKUP_ECON_OFFSET,
+ .eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
+ .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
+};
+
+/* list of external wakeup controllers supported */
+static const struct of_device_id exynos_wkup_irq_ids[] = {
+ { .compatible = "samsung,exynos4210-wakeup-eint",
+ .data = &exynos4210_wkup_irq_chip },
+ { .compatible = "samsung,exynos7-wakeup-eint",
+ .data = &exynos7_wkup_irq_chip },
+ { }
+};
+
/* interrupt handler for wakeup interrupts 0..15 */
static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
{
@@ -445,9 +465,9 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
for (i = 0; i < eintd->nr_banks; ++i) {
struct samsung_pin_bank *b = eintd->banks[i];
- pend = readl(d->virt_base + EXYNOS_WKUP_EPEND_OFFSET
+ pend = readl(d->virt_base + b->irq_chip->eint_pend
+ b->eint_offset);
- mask = readl(d->virt_base + EXYNOS_WKUP_EMASK_OFFSET
+ mask = readl(d->virt_base + b->irq_chip->eint_mask
+ b->eint_offset);
exynos_irq_demux_eint(pend & ~mask, b->irq_domain);
}
@@ -455,24 +475,6 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int exynos_wkup_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- irq_set_chip_and_handler(virq, &exynos_wkup_irq_chip.chip,
- handle_level_irq);
- irq_set_chip_data(virq, h->host_data);
- set_irq_flags(virq, IRQF_VALID);
- return 0;
-}
-
-/*
- * irq domain callbacks for external wakeup interrupt controller.
- */
-static const struct irq_domain_ops exynos_wkup_irqd_ops = {
- .map = exynos_wkup_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
/*
* exynos_eint_wkup_init() - setup handling of external wakeup interrupts.
* @d: driver data of samsung pinctrl driver.
@@ -485,12 +487,18 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
struct samsung_pin_bank *bank;
struct exynos_weint_data *weint_data;
struct exynos_muxed_weint_data *muxed_data;
+ struct exynos_irq_chip *irq_chip;
unsigned int muxed_banks = 0;
unsigned int i;
int idx, irq;
for_each_child_of_node(dev->of_node, np) {
- if (of_match_node(exynos_wkup_irq_ids, np)) {
+ const struct of_device_id *match;
+
+ match = of_match_node(exynos_wkup_irq_ids, np);
+ if (match) {
+ irq_chip = kmemdup(match->data,
+ sizeof(*irq_chip), GFP_KERNEL);
wkup_np = np;
break;
}
@@ -498,18 +506,20 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
if (!wkup_np)
return -ENODEV;
- bank = d->ctrl->pin_banks;
- for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) {
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
if (bank->eint_type != EINT_TYPE_WKUP)
continue;
bank->irq_domain = irq_domain_add_linear(bank->of_node,
- bank->nr_pins, &exynos_wkup_irqd_ops, bank);
+ bank->nr_pins, &exynos_eint_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "wkup irq domain add failed\n");
return -ENXIO;
}
+ bank->irq_chip = irq_chip;
+
if (!of_find_property(bank->of_node, "interrupts", NULL)) {
bank->eint_type = EINT_TYPE_WKUP_MUX;
++muxed_banks;
@@ -556,9 +566,9 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
irq_set_chained_handler(irq, exynos_irq_demux_eint16_31);
irq_set_handler_data(irq, muxed_data);
- bank = d->ctrl->pin_banks;
+ bank = d->pin_banks;
idx = 0;
- for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) {
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
if (bank->eint_type != EINT_TYPE_WKUP_MUX)
continue;
@@ -590,11 +600,10 @@ static void exynos_pinctrl_suspend_bank(
static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
{
- struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
- struct samsung_pin_bank *bank = ctrl->pin_banks;
+ struct samsung_pin_bank *bank = drvdata->pin_banks;
int i;
- for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+ for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
if (bank->eint_type == EINT_TYPE_GPIO)
exynos_pinctrl_suspend_bank(drvdata, bank);
}
@@ -626,17 +635,16 @@ static void exynos_pinctrl_resume_bank(
static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
{
- struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
- struct samsung_pin_bank *bank = ctrl->pin_banks;
+ struct samsung_pin_bank *bank = drvdata->pin_banks;
int i;
- for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+ for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
if (bank->eint_type == EINT_TYPE_GPIO)
exynos_pinctrl_resume_bank(drvdata, bank);
}
/* pin banks of s5pv210 pin-controller */
-static struct samsung_pin_bank s5pv210_pin_bank[] = {
+static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -673,7 +681,7 @@ static struct samsung_pin_bank s5pv210_pin_bank[] = {
EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c),
};
-struct samsung_pin_ctrl s5pv210_pin_ctrl[] = {
+const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = s5pv210_pin_bank,
@@ -682,12 +690,11 @@ struct samsung_pin_ctrl s5pv210_pin_ctrl[] = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "s5pv210-gpio-ctrl0",
},
};
/* pin banks of exynos3250 pin-controller 0 */
-static struct samsung_pin_bank exynos3250_pin_banks0[] = {
+static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -698,7 +705,7 @@ static struct samsung_pin_bank exynos3250_pin_banks0[] = {
};
/* pin banks of exynos3250 pin-controller 1 */
-static struct samsung_pin_bank exynos3250_pin_banks1[] = {
+static const struct samsung_pin_bank_data exynos3250_pin_banks1[] __initconst = {
EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpe0"),
EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpe1"),
EXYNOS_PIN_BANK_EINTN(3, 0x180, "gpe2"),
@@ -721,7 +728,7 @@ static struct samsung_pin_bank exynos3250_pin_banks1[] = {
* Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes
* two gpio/pin-mux/pinconfig controllers.
*/
-struct samsung_pin_ctrl exynos3250_pin_ctrl[] = {
+const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = exynos3250_pin_banks0,
@@ -729,7 +736,6 @@ struct samsung_pin_ctrl exynos3250_pin_ctrl[] = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos3250-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos3250_pin_banks1,
@@ -738,12 +744,11 @@ struct samsung_pin_ctrl exynos3250_pin_ctrl[] = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos3250-gpio-ctrl1",
},
};
/* pin banks of exynos4210 pin-controller 0 */
-static struct samsung_pin_bank exynos4210_pin_banks0[] = {
+static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -763,7 +768,7 @@ static struct samsung_pin_bank exynos4210_pin_banks0[] = {
};
/* pin banks of exynos4210 pin-controller 1 */
-static struct samsung_pin_bank exynos4210_pin_banks1[] = {
+static const struct samsung_pin_bank_data exynos4210_pin_banks1[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpj0", 0x00),
EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpj1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
@@ -787,7 +792,7 @@ static struct samsung_pin_bank exynos4210_pin_banks1[] = {
};
/* pin banks of exynos4210 pin-controller 2 */
-static struct samsung_pin_bank exynos4210_pin_banks2[] = {
+static const struct samsung_pin_bank_data exynos4210_pin_banks2[] __initconst = {
EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"),
};
@@ -795,7 +800,7 @@ static struct samsung_pin_bank exynos4210_pin_banks2[] = {
* Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes
* three gpio/pin-mux/pinconfig controllers.
*/
-struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
+const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = exynos4210_pin_banks0,
@@ -803,7 +808,6 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos4210-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos4210_pin_banks1,
@@ -812,17 +816,15 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos4210-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos4210_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos4210_pin_banks2),
- .label = "exynos4210-gpio-ctrl2",
},
};
/* pin banks of exynos4x12 pin-controller 0 */
-static struct samsung_pin_bank exynos4x12_pin_banks0[] = {
+static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -839,7 +841,7 @@ static struct samsung_pin_bank exynos4x12_pin_banks0[] = {
};
/* pin banks of exynos4x12 pin-controller 1 */
-static struct samsung_pin_bank exynos4x12_pin_banks1[] = {
+static const struct samsung_pin_bank_data exynos4x12_pin_banks1[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
@@ -866,12 +868,12 @@ static struct samsung_pin_bank exynos4x12_pin_banks1[] = {
};
/* pin banks of exynos4x12 pin-controller 2 */
-static struct samsung_pin_bank exynos4x12_pin_banks2[] = {
+static const struct samsung_pin_bank_data exynos4x12_pin_banks2[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
/* pin banks of exynos4x12 pin-controller 3 */
-static struct samsung_pin_bank exynos4x12_pin_banks3[] = {
+static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpv2", 0x08),
@@ -883,7 +885,7 @@ static struct samsung_pin_bank exynos4x12_pin_banks3[] = {
* Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes
* four gpio/pin-mux/pinconfig controllers.
*/
-struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
+const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = exynos4x12_pin_banks0,
@@ -891,7 +893,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos4x12-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos4x12_pin_banks1,
@@ -900,7 +901,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos4x12-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos4x12_pin_banks2,
@@ -908,7 +908,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos4x12-gpio-ctrl2",
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos4x12_pin_banks3,
@@ -916,12 +915,86 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos4x12-gpio-ctrl3",
+ },
+};
+
+/* pin banks of exynos4415 pin-controller 0 */
+static const struct samsung_pin_bank_data exynos4415_pin_banks0[] = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpd1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf0", 0x30),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpf1", 0x34),
+ EXYNOS_PIN_BANK_EINTG(1, 0x1C0, "gpf2", 0x38),
+};
+
+/* pin banks of exynos4415 pin-controller 1 */
+static const struct samsung_pin_bank_data exynos4415_pin_banks1[] = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpk0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
+ EXYNOS_PIN_BANK_EINTG(7, 0x0A0, "gpk3", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpl0", 0x18),
+ EXYNOS_PIN_BANK_EINTN(6, 0x120, "mp00"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x140, "mp01"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x160, "mp02"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x180, "mp03"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1A0, "mp04"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1C0, "mp05"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1E0, "mp06"),
+ EXYNOS_PIN_BANK_EINTG(8, 0x260, "gpm0", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x280, "gpm1", 0x28),
+ EXYNOS_PIN_BANK_EINTG(5, 0x2A0, "gpm2", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2C0, "gpm3", 0x30),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2E0, "gpm4", 0x34),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos4415 pin-controller 2 */
+static const struct samsung_pin_bank_data exynos4415_pin_banks2[] = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
+ EXYNOS_PIN_BANK_EINTN(2, 0x000, "etc1"),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos4415 SoC. Exynos4415 SoC includes
+ * three gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos4415_pin_ctrl[] = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos4415_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos4415_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos4415_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos4415_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos4415_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos4415_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
},
};
/* pin banks of exynos5250 pin-controller 0 */
-static struct samsung_pin_bank exynos5250_pin_banks0[] = {
+static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -950,7 +1023,7 @@ static struct samsung_pin_bank exynos5250_pin_banks0[] = {
};
/* pin banks of exynos5250 pin-controller 1 */
-static struct samsung_pin_bank exynos5250_pin_banks1[] = {
+static const struct samsung_pin_bank_data exynos5250_pin_banks1[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpf0", 0x08),
@@ -963,7 +1036,7 @@ static struct samsung_pin_bank exynos5250_pin_banks1[] = {
};
/* pin banks of exynos5250 pin-controller 2 */
-static struct samsung_pin_bank exynos5250_pin_banks2[] = {
+static const struct samsung_pin_bank_data exynos5250_pin_banks2[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
@@ -972,7 +1045,7 @@ static struct samsung_pin_bank exynos5250_pin_banks2[] = {
};
/* pin banks of exynos5250 pin-controller 3 */
-static struct samsung_pin_bank exynos5250_pin_banks3[] = {
+static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
@@ -980,7 +1053,7 @@ static struct samsung_pin_bank exynos5250_pin_banks3[] = {
* Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes
* four gpio/pin-mux/pinconfig controllers.
*/
-struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
+const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = exynos5250_pin_banks0,
@@ -989,7 +1062,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos5250-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5250_pin_banks1,
@@ -997,7 +1069,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos5250-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5250_pin_banks2,
@@ -1005,7 +1076,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos5250-gpio-ctrl2",
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos5250_pin_banks3,
@@ -1013,12 +1083,11 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
- .label = "exynos5250-gpio-ctrl3",
},
};
/* pin banks of exynos5260 pin-controller 0 */
-static struct samsung_pin_bank exynos5260_pin_banks0[] = {
+static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -1043,7 +1112,7 @@ static struct samsung_pin_bank exynos5260_pin_banks0[] = {
};
/* pin banks of exynos5260 pin-controller 1 */
-static struct samsung_pin_bank exynos5260_pin_banks1[] = {
+static const struct samsung_pin_bank_data exynos5260_pin_banks1[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
@@ -1052,7 +1121,7 @@ static struct samsung_pin_bank exynos5260_pin_banks1[] = {
};
/* pin banks of exynos5260 pin-controller 2 */
-static struct samsung_pin_bank exynos5260_pin_banks2[] = {
+static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
@@ -1061,31 +1130,28 @@ static struct samsung_pin_bank exynos5260_pin_banks2[] = {
* Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
* three gpio/pin-mux/pinconfig controllers.
*/
-struct samsung_pin_ctrl exynos5260_pin_ctrl[] = {
+const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = exynos5260_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
- .label = "exynos5260-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5260_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
.eint_gpio_init = exynos_eint_gpio_init,
- .label = "exynos5260-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5260_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
- .label = "exynos5260-gpio-ctrl2",
},
};
/* pin banks of exynos5420 pin-controller 0 */
-static struct samsung_pin_bank exynos5420_pin_banks0[] = {
+static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
@@ -1094,7 +1160,7 @@ static struct samsung_pin_bank exynos5420_pin_banks0[] = {
};
/* pin banks of exynos5420 pin-controller 1 */
-static struct samsung_pin_bank exynos5420_pin_banks1[] = {
+static const struct samsung_pin_bank_data exynos5420_pin_banks1[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpc0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
@@ -1111,7 +1177,7 @@ static struct samsung_pin_bank exynos5420_pin_banks1[] = {
};
/* pin banks of exynos5420 pin-controller 2 */
-static struct samsung_pin_bank exynos5420_pin_banks2[] = {
+static const struct samsung_pin_bank_data exynos5420_pin_banks2[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
EXYNOS_PIN_BANK_EINTG(6, 0x040, "gpf0", 0x08),
@@ -1123,7 +1189,7 @@ static struct samsung_pin_bank exynos5420_pin_banks2[] = {
};
/* pin banks of exynos5420 pin-controller 3 */
-static struct samsung_pin_bank exynos5420_pin_banks3[] = {
+static const struct samsung_pin_bank_data exynos5420_pin_banks3[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -1136,7 +1202,7 @@ static struct samsung_pin_bank exynos5420_pin_banks3[] = {
};
/* pin banks of exynos5420 pin-controller 4 */
-static struct samsung_pin_bank exynos5420_pin_banks4[] = {
+static const struct samsung_pin_bank_data exynos5420_pin_banks4[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
@@ -1144,37 +1210,137 @@ static struct samsung_pin_bank exynos5420_pin_banks4[] = {
* Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes
* four gpio/pin-mux/pinconfig controllers.
*/
-struct samsung_pin_ctrl exynos5420_pin_ctrl[] = {
+const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = exynos5420_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
- .label = "exynos5420-gpio-ctrl0",
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5420_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
.eint_gpio_init = exynos_eint_gpio_init,
- .label = "exynos5420-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5420_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
- .label = "exynos5420-gpio-ctrl2",
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos5420_pin_banks3,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
.eint_gpio_init = exynos_eint_gpio_init,
- .label = "exynos5420-gpio-ctrl3",
}, {
/* pin-controller instance 4 data */
.pin_banks = exynos5420_pin_banks4,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
.eint_gpio_init = exynos_eint_gpio_init,
- .label = "exynos5420-gpio-ctrl4",
+ },
+};
+
+/* pin banks of exynos7 pin-controller - ALIVE */
+static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+};
+
+/* pin banks of exynos7 pin-controller - BUS0 */
+static const struct samsung_pin_bank_data exynos7_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc0", 0x04),
+ EXYNOS_PIN_BANK_EINTG(2, 0x040, "gpc1", 0x08),
+ EXYNOS_PIN_BANK_EINTG(6, 0x060, "gpc2", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpc3", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpd2", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpd4", 0x20),
+ EXYNOS_PIN_BANK_EINTG(4, 0x120, "gpd5", 0x24),
+ EXYNOS_PIN_BANK_EINTG(6, 0x140, "gpd6", 0x28),
+ EXYNOS_PIN_BANK_EINTG(3, 0x160, "gpd7", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x180, "gpd8", 0x30),
+ EXYNOS_PIN_BANK_EINTG(2, 0x1a0, "gpg0", 0x34),
+ EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpg3", 0x38),
+};
+
+/* pin banks of exynos7 pin-controller - NFC */
+static const struct samsung_pin_bank_data exynos7_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - TOUCH */
+static const struct samsung_pin_bank_data exynos7_pin_banks3[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - FF */
+static const struct samsung_pin_bank_data exynos7_pin_banks4[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpg4", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - ESE */
+static const struct samsung_pin_bank_data exynos7_pin_banks5[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpv7", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - FSYS0 */
+static const struct samsung_pin_bank_data exynos7_pin_banks6[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpr4", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - FSYS1 */
+static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpr0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpr1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr3", 0x0c),
+};
+
+const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 Alive data */
+ .pin_banks = exynos7_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ }, {
+ /* pin-controller instance 1 BUS0 data */
+ .pin_banks = exynos7_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 2 NFC data */
+ .pin_banks = exynos7_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 3 TOUCH data */
+ .pin_banks = exynos7_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 4 FF data */
+ .pin_banks = exynos7_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 5 ESE data */
+ .pin_banks = exynos7_pin_banks5,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks5),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 6 FSYS0 data */
+ .pin_banks = exynos7_pin_banks6,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks6),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 7 FSYS1 data */
+ .pin_banks = exynos7_pin_banks7,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks7),
+ .eint_gpio_init = exynos_eint_gpio_init,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 3c91c357792f..0f0f7cedb2dc 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -25,6 +25,9 @@
#define EXYNOS_WKUP_ECON_OFFSET 0xE00
#define EXYNOS_WKUP_EMASK_OFFSET 0xF00
#define EXYNOS_WKUP_EPEND_OFFSET 0xF40
+#define EXYNOS7_WKUP_ECON_OFFSET 0x700
+#define EXYNOS7_WKUP_EMASK_OFFSET 0x900
+#define EXYNOS7_WKUP_EPEND_OFFSET 0xA00
#define EXYNOS_SVC_OFFSET 0xB08
#define EXYNOS_EINT_FUNC 0xF
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index 88acfc0efd54..86192be3b679 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -1036,7 +1036,6 @@ static struct platform_driver exynos5440_pinctrl_driver = {
.probe = exynos5440_pinctrl_probe,
.driver = {
.name = "exynos5440-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = exynos5440_pinctrl_dt_match,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index ad3eaad17001..f1993f42114c 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -44,12 +44,12 @@
#define EINT_EDGE_BOTH 6
#define EINT_MASK 0xf
-static struct samsung_pin_bank_type bank_type_1bit = {
+static const struct samsung_pin_bank_type bank_type_1bit = {
.fld_width = { 1, 1, },
.reg_offset = { 0x00, 0x04, },
};
-static struct samsung_pin_bank_type bank_type_2bit = {
+static const struct samsung_pin_bank_type bank_type_2bit = {
.fld_width = { 2, 1, 2, },
.reg_offset = { 0x00, 0x04, 0x08, },
};
@@ -143,7 +143,7 @@ static void s3c24xx_eint_set_handler(unsigned int irq, unsigned int type)
static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
struct samsung_pin_bank *bank, int pin)
{
- struct samsung_pin_bank_type *bank_type = bank->type;
+ const struct samsung_pin_bank_type *bank_type = bank->type;
unsigned long flags;
void __iomem *reg;
u8 shift;
@@ -518,8 +518,8 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
irq_set_handler_data(irq, eint_data);
}
- bank = d->ctrl->pin_banks;
- for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) {
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
struct s3c24xx_eint_domain_data *ddata;
unsigned int mask;
unsigned int irq;
@@ -561,7 +561,7 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
return 0;
}
-static struct samsung_pin_bank s3c2412_pin_banks[] = {
+static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = {
PIN_BANK_A(23, 0x000, "gpa"),
PIN_BANK_2BIT(11, 0x010, "gpb"),
PIN_BANK_2BIT(16, 0x020, "gpc"),
@@ -573,16 +573,15 @@ static struct samsung_pin_bank s3c2412_pin_banks[] = {
PIN_BANK_2BIT(13, 0x080, "gpj"),
};
-struct samsung_pin_ctrl s3c2412_pin_ctrl[] = {
+const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {
{
.pin_banks = s3c2412_pin_banks,
.nr_banks = ARRAY_SIZE(s3c2412_pin_banks),
.eint_wkup_init = s3c24xx_eint_init,
- .label = "S3C2412-GPIO",
},
};
-static struct samsung_pin_bank s3c2416_pin_banks[] = {
+static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {
PIN_BANK_A(27, 0x000, "gpa"),
PIN_BANK_2BIT(11, 0x010, "gpb"),
PIN_BANK_2BIT(16, 0x020, "gpc"),
@@ -596,16 +595,15 @@ static struct samsung_pin_bank s3c2416_pin_banks[] = {
PIN_BANK_2BIT(2, 0x100, "gpm"),
};
-struct samsung_pin_ctrl s3c2416_pin_ctrl[] = {
+const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {
{
.pin_banks = s3c2416_pin_banks,
.nr_banks = ARRAY_SIZE(s3c2416_pin_banks),
.eint_wkup_init = s3c24xx_eint_init,
- .label = "S3C2416-GPIO",
},
};
-static struct samsung_pin_bank s3c2440_pin_banks[] = {
+static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {
PIN_BANK_A(25, 0x000, "gpa"),
PIN_BANK_2BIT(11, 0x010, "gpb"),
PIN_BANK_2BIT(16, 0x020, "gpc"),
@@ -617,16 +615,15 @@ static struct samsung_pin_bank s3c2440_pin_banks[] = {
PIN_BANK_2BIT(13, 0x0d0, "gpj"),
};
-struct samsung_pin_ctrl s3c2440_pin_ctrl[] = {
+const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {
{
.pin_banks = s3c2440_pin_banks,
.nr_banks = ARRAY_SIZE(s3c2440_pin_banks),
.eint_wkup_init = s3c24xx_eint_init,
- .label = "S3C2440-GPIO",
},
};
-static struct samsung_pin_bank s3c2450_pin_banks[] = {
+static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {
PIN_BANK_A(28, 0x000, "gpa"),
PIN_BANK_2BIT(11, 0x010, "gpb"),
PIN_BANK_2BIT(16, 0x020, "gpc"),
@@ -641,11 +638,10 @@ static struct samsung_pin_bank s3c2450_pin_banks[] = {
PIN_BANK_2BIT(2, 0x100, "gpm"),
};
-struct samsung_pin_ctrl s3c2450_pin_ctrl[] = {
+const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = {
{
.pin_banks = s3c2450_pin_banks,
.nr_banks = ARRAY_SIZE(s3c2450_pin_banks),
.eint_wkup_init = s3c24xx_eint_init,
- .label = "S3C2450-GPIO",
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 89143c903000..7756c1e9e763 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -68,32 +68,32 @@
#define EINT_CON_MASK 0xF
#define EINT_CON_LEN 4
-static struct samsung_pin_bank_type bank_type_4bit_off = {
+static const struct samsung_pin_bank_type bank_type_4bit_off = {
.fld_width = { 4, 1, 2, 0, 2, 2, },
.reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, },
};
-static struct samsung_pin_bank_type bank_type_4bit_alive = {
+static const struct samsung_pin_bank_type bank_type_4bit_alive = {
.fld_width = { 4, 1, 2, },
.reg_offset = { 0x00, 0x04, 0x08, },
};
-static struct samsung_pin_bank_type bank_type_4bit2_off = {
+static const struct samsung_pin_bank_type bank_type_4bit2_off = {
.fld_width = { 4, 1, 2, 0, 2, 2, },
.reg_offset = { 0x00, 0x08, 0x0c, 0, 0x10, 0x14, },
};
-static struct samsung_pin_bank_type bank_type_4bit2_alive = {
+static const struct samsung_pin_bank_type bank_type_4bit2_alive = {
.fld_width = { 4, 1, 2, },
.reg_offset = { 0x00, 0x08, 0x0c, },
};
-static struct samsung_pin_bank_type bank_type_2bit_off = {
+static const struct samsung_pin_bank_type bank_type_2bit_off = {
.fld_width = { 2, 1, 2, 0, 2, 2, },
.reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, },
};
-static struct samsung_pin_bank_type bank_type_2bit_alive = {
+static const struct samsung_pin_bank_type bank_type_2bit_alive = {
.fld_width = { 2, 1, 2, },
.reg_offset = { 0x00, 0x04, 0x08, },
};
@@ -272,7 +272,7 @@ static void s3c64xx_irq_set_handler(unsigned int irq, unsigned int type)
static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
struct samsung_pin_bank *bank, int pin)
{
- struct samsung_pin_bank_type *bank_type = bank->type;
+ const struct samsung_pin_bank_type *bank_type = bank->type;
unsigned long flags;
void __iomem *reg;
u8 shift;
@@ -468,8 +468,8 @@ static int s3c64xx_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
}
nr_domains = 0;
- bank = d->ctrl->pin_banks;
- for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) {
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
unsigned int nr_eints;
unsigned int mask;
@@ -497,9 +497,9 @@ static int s3c64xx_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
}
data->drvdata = d;
- bank = d->ctrl->pin_banks;
+ bank = d->pin_banks;
nr_domains = 0;
- for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) {
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
if (bank->eint_type != EINT_TYPE_GPIO)
continue;
@@ -735,8 +735,8 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
irq_set_handler_data(irq, data);
}
- bank = d->ctrl->pin_banks;
- for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) {
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
struct s3c64xx_eint0_domain_data *ddata;
unsigned int nr_eints;
unsigned int mask;
@@ -780,7 +780,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
}
/* pin banks of s3c64xx pin-controller 0 */
-static struct samsung_pin_bank s3c64xx_pin_banks0[] = {
+static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = {
PIN_BANK_4BIT_EINTG(8, 0x000, "gpa", 0),
PIN_BANK_4BIT_EINTG(7, 0x020, "gpb", 8),
PIN_BANK_4BIT_EINTG(8, 0x040, "gpc", 16),
@@ -804,13 +804,12 @@ static struct samsung_pin_bank s3c64xx_pin_banks0[] = {
* Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes
* one gpio/pin-mux/pinconfig controller.
*/
-struct samsung_pin_ctrl s3c64xx_pin_ctrl[] = {
+const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {
{
/* pin-controller instance 1 data */
.pin_banks = s3c64xx_pin_banks0,
.nr_banks = ARRAY_SIZE(s3c64xx_pin_banks0),
.eint_gpio_init = s3c64xx_eint_gpio_init,
.eint_wkup_init = s3c64xx_eint_eint0_init,
- .label = "S3C64xx-GPIO",
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 2d37c8f49f3c..ec580af35856 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -349,7 +349,7 @@ static void pin_to_reg_bank(struct samsung_pinctrl_drv_data *drvdata,
{
struct samsung_pin_bank *b;
- b = drvdata->ctrl->pin_banks;
+ b = drvdata->pin_banks;
while ((pin >= b->pin_base) &&
((b->pin_base + b->nr_pins - 1) < pin))
@@ -366,7 +366,7 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group, bool enable)
{
struct samsung_pinctrl_drv_data *drvdata;
- struct samsung_pin_bank_type *type;
+ const struct samsung_pin_bank_type *type;
struct samsung_pin_bank *bank;
void __iomem *reg;
u32 mask, shift, data, pin_offset;
@@ -378,7 +378,7 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
func = &drvdata->pmx_functions[selector];
grp = &drvdata->pin_groups[group];
- pin_to_reg_bank(drvdata, grp->pins[0] - drvdata->ctrl->base,
+ pin_to_reg_bank(drvdata, grp->pins[0] - drvdata->pin_base,
&reg, &pin_offset, &bank);
type = bank->type;
mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
@@ -422,7 +422,7 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config, bool set)
{
struct samsung_pinctrl_drv_data *drvdata;
- struct samsung_pin_bank_type *type;
+ const struct samsung_pin_bank_type *type;
struct samsung_pin_bank *bank;
void __iomem *reg_base;
enum pincfg_type cfg_type = PINCFG_UNPACK_TYPE(*config);
@@ -431,7 +431,7 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long flags;
drvdata = pinctrl_dev_get_drvdata(pctldev);
- pin_to_reg_bank(drvdata, pin - drvdata->ctrl->base, &reg_base,
+ pin_to_reg_bank(drvdata, pin - drvdata->pin_base, &reg_base,
&pin_offset, &bank);
type = bank->type;
@@ -528,7 +528,7 @@ static const struct pinconf_ops samsung_pinconf_ops = {
static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct samsung_pin_bank *bank = gc_to_pin_bank(gc);
- struct samsung_pin_bank_type *type = bank->type;
+ const struct samsung_pin_bank_type *type = bank->type;
unsigned long flags;
void __iomem *reg;
u32 data;
@@ -552,7 +552,7 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
void __iomem *reg;
u32 data;
struct samsung_pin_bank *bank = gc_to_pin_bank(gc);
- struct samsung_pin_bank_type *type = bank->type;
+ const struct samsung_pin_bank_type *type = bank->type;
reg = bank->drvdata->virt_base + bank->pctl_offset;
@@ -569,7 +569,7 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
static int samsung_gpio_set_direction(struct gpio_chip *gc,
unsigned offset, bool input)
{
- struct samsung_pin_bank_type *type;
+ const struct samsung_pin_bank_type *type;
struct samsung_pin_bank *bank;
struct samsung_pinctrl_drv_data *drvdata;
void __iomem *reg;
@@ -834,32 +834,32 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
ctrldesc->confops = &samsung_pinconf_ops;
pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
- drvdata->ctrl->nr_pins, GFP_KERNEL);
+ drvdata->nr_pins, GFP_KERNEL);
if (!pindesc) {
dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
return -ENOMEM;
}
ctrldesc->pins = pindesc;
- ctrldesc->npins = drvdata->ctrl->nr_pins;
+ ctrldesc->npins = drvdata->nr_pins;
/* dynamically populate the pin number and pin name for pindesc */
for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
- pdesc->number = pin + drvdata->ctrl->base;
+ pdesc->number = pin + drvdata->pin_base;
/*
* allocate space for storing the dynamically generated names for all
* the pins which belong to this pin-controller.
*/
pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
- drvdata->ctrl->nr_pins, GFP_KERNEL);
+ drvdata->nr_pins, GFP_KERNEL);
if (!pin_names) {
dev_err(&pdev->dev, "mem alloc for pin names failed\n");
return -ENOMEM;
}
/* for each pin, the name of the pin is pin-bank name + pin number */
- for (bank = 0; bank < drvdata->ctrl->nr_banks; bank++) {
- pin_bank = &drvdata->ctrl->pin_banks[bank];
+ for (bank = 0; bank < drvdata->nr_banks; bank++) {
+ pin_bank = &drvdata->pin_banks[bank];
for (pin = 0; pin < pin_bank->nr_pins; pin++) {
sprintf(pin_names, "%s-%d", pin_bank->name, pin);
pdesc = pindesc + pin_bank->pin_base + pin;
@@ -878,11 +878,11 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
return -EINVAL;
}
- for (bank = 0; bank < drvdata->ctrl->nr_banks; ++bank) {
- pin_bank = &drvdata->ctrl->pin_banks[bank];
+ for (bank = 0; bank < drvdata->nr_banks; ++bank) {
+ pin_bank = &drvdata->pin_banks[bank];
pin_bank->grange.name = pin_bank->name;
pin_bank->grange.id = bank;
- pin_bank->grange.pin_base = drvdata->ctrl->base
+ pin_bank->grange.pin_base = drvdata->pin_base
+ pin_bank->pin_base;
pin_bank->grange.base = pin_bank->gpio_chip.base;
pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
@@ -918,17 +918,16 @@ static const struct gpio_chip samsung_gpiolib_chip = {
static int samsung_gpiolib_register(struct platform_device *pdev,
struct samsung_pinctrl_drv_data *drvdata)
{
- struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
- struct samsung_pin_bank *bank = ctrl->pin_banks;
+ struct samsung_pin_bank *bank = drvdata->pin_banks;
struct gpio_chip *gc;
int ret;
int i;
- for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
+ for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
bank->gpio_chip = samsung_gpiolib_chip;
gc = &bank->gpio_chip;
- gc->base = ctrl->base + bank->pin_base;
+ gc->base = drvdata->pin_base + bank->pin_base;
gc->ngpio = bank->nr_pins;
gc->dev = &pdev->dev;
gc->of_node = bank->of_node;
@@ -954,51 +953,70 @@ fail:
static int samsung_gpiolib_unregister(struct platform_device *pdev,
struct samsung_pinctrl_drv_data *drvdata)
{
- struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
- struct samsung_pin_bank *bank = ctrl->pin_banks;
+ struct samsung_pin_bank *bank = drvdata->pin_banks;
int i;
- for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+ for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
gpiochip_remove(&bank->gpio_chip);
+
return 0;
}
static const struct of_device_id samsung_pinctrl_dt_match[];
/* retrieve the soc specific data */
-static struct samsung_pin_ctrl *samsung_pinctrl_get_soc_data(
- struct samsung_pinctrl_drv_data *d,
- struct platform_device *pdev)
+static const struct samsung_pin_ctrl *
+samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
+ struct platform_device *pdev)
{
int id;
const struct of_device_id *match;
struct device_node *node = pdev->dev.of_node;
struct device_node *np;
- struct samsung_pin_ctrl *ctrl;
+ const struct samsung_pin_bank_data *bdata;
+ const struct samsung_pin_ctrl *ctrl;
struct samsung_pin_bank *bank;
int i;
id = of_alias_get_id(node, "pinctrl");
if (id < 0) {
dev_err(&pdev->dev, "failed to get alias id\n");
- return NULL;
+ return ERR_PTR(-ENOENT);
}
match = of_match_node(samsung_pinctrl_dt_match, node);
ctrl = (struct samsung_pin_ctrl *)match->data + id;
- bank = ctrl->pin_banks;
- for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
+ d->suspend = ctrl->suspend;
+ d->resume = ctrl->resume;
+ d->nr_banks = ctrl->nr_banks;
+ d->pin_banks = devm_kcalloc(&pdev->dev, d->nr_banks,
+ sizeof(*d->pin_banks), GFP_KERNEL);
+ if (!d->pin_banks)
+ return ERR_PTR(-ENOMEM);
+
+ bank = d->pin_banks;
+ bdata = ctrl->pin_banks;
+ for (i = 0; i < ctrl->nr_banks; ++i, ++bdata, ++bank) {
+ bank->type = bdata->type;
+ bank->pctl_offset = bdata->pctl_offset;
+ bank->nr_pins = bdata->nr_pins;
+ bank->eint_func = bdata->eint_func;
+ bank->eint_type = bdata->eint_type;
+ bank->eint_mask = bdata->eint_mask;
+ bank->eint_offset = bdata->eint_offset;
+ bank->name = bdata->name;
+
spin_lock_init(&bank->slock);
bank->drvdata = d;
- bank->pin_base = ctrl->nr_pins;
- ctrl->nr_pins += bank->nr_pins;
+ bank->pin_base = d->nr_pins;
+ d->nr_pins += bank->nr_pins;
}
for_each_child_of_node(node, np) {
if (!of_find_property(np, "gpio-controller", NULL))
continue;
- bank = ctrl->pin_banks;
- for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank) {
if (!strcmp(bank->name, np->name)) {
bank->of_node = np;
break;
@@ -1006,8 +1024,8 @@ static struct samsung_pin_ctrl *samsung_pinctrl_get_soc_data(
}
}
- ctrl->base = pin_base;
- pin_base += ctrl->nr_pins;
+ d->pin_base = pin_base;
+ pin_base += d->nr_pins;
return ctrl;
}
@@ -1015,8 +1033,8 @@ static struct samsung_pin_ctrl *samsung_pinctrl_get_soc_data(
static int samsung_pinctrl_probe(struct platform_device *pdev)
{
struct samsung_pinctrl_drv_data *drvdata;
+ const struct samsung_pin_ctrl *ctrl;
struct device *dev = &pdev->dev;
- struct samsung_pin_ctrl *ctrl;
struct resource *res;
int ret;
@@ -1033,11 +1051,10 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
}
ctrl = samsung_pinctrl_get_soc_data(drvdata, pdev);
- if (!ctrl) {
+ if (IS_ERR(ctrl)) {
dev_err(&pdev->dev, "driver data not available\n");
- return -EINVAL;
+ return PTR_ERR(ctrl);
}
- drvdata->ctrl = ctrl;
drvdata->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1082,16 +1099,14 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
static void samsung_pinctrl_suspend_dev(
struct samsung_pinctrl_drv_data *drvdata)
{
- struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
void __iomem *virt_base = drvdata->virt_base;
int i;
- for (i = 0; i < ctrl->nr_banks; i++) {
- struct samsung_pin_bank *bank = &ctrl->pin_banks[i];
+ for (i = 0; i < drvdata->nr_banks; i++) {
+ struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
void __iomem *reg = virt_base + bank->pctl_offset;
-
- u8 *offs = bank->type->reg_offset;
- u8 *widths = bank->type->fld_width;
+ const u8 *offs = bank->type->reg_offset;
+ const u8 *widths = bank->type->fld_width;
enum pincfg_type type;
/* Registers without a powerdown config aren't lost */
@@ -1116,8 +1131,8 @@ static void samsung_pinctrl_suspend_dev(
}
}
- if (ctrl->suspend)
- ctrl->suspend(drvdata);
+ if (drvdata->suspend)
+ drvdata->suspend(drvdata);
}
/**
@@ -1130,19 +1145,17 @@ static void samsung_pinctrl_suspend_dev(
*/
static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
{
- struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
void __iomem *virt_base = drvdata->virt_base;
int i;
- if (ctrl->resume)
- ctrl->resume(drvdata);
+ if (drvdata->resume)
+ drvdata->resume(drvdata);
- for (i = 0; i < ctrl->nr_banks; i++) {
- struct samsung_pin_bank *bank = &ctrl->pin_banks[i];
+ for (i = 0; i < drvdata->nr_banks; i++) {
+ struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
void __iomem *reg = virt_base + bank->pctl_offset;
-
- u8 *offs = bank->type->reg_offset;
- u8 *widths = bank->type->fld_width;
+ const u8 *offs = bank->type->reg_offset;
+ const u8 *widths = bank->type->fld_width;
enum pincfg_type type;
/* Registers without a powerdown config aren't lost */
@@ -1218,6 +1231,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos4210_pin_ctrl },
{ .compatible = "samsung,exynos4x12-pinctrl",
.data = (void *)exynos4x12_pin_ctrl },
+ { .compatible = "samsung,exynos4415-pinctrl",
+ .data = (void *)exynos4415_pin_ctrl },
{ .compatible = "samsung,exynos5250-pinctrl",
.data = (void *)exynos5250_pin_ctrl },
{ .compatible = "samsung,exynos5260-pinctrl",
@@ -1226,6 +1241,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos5420_pin_ctrl },
{ .compatible = "samsung,s5pv210-pinctrl",
.data = (void *)s5pv210_pin_ctrl },
+ { .compatible = "samsung,exynos7-pinctrl",
+ .data = (void *)exynos7_pin_ctrl },
#endif
#ifdef CONFIG_PINCTRL_S3C64XX
{ .compatible = "samsung,s3c64xx-pinctrl",
@@ -1249,7 +1266,6 @@ static struct platform_driver samsung_pinctrl_driver = {
.probe = samsung_pinctrl_probe,
.driver = {
.name = "samsung-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = samsung_pinctrl_dt_match,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 5cedc9d26390..1b8c0139d604 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -113,39 +113,66 @@ struct samsung_pin_bank_type {
};
/**
+ * struct samsung_pin_bank_data: represent a controller pin-bank (init data).
+ * @type: type of the bank (register offsets and bitfield widths)
+ * @pctl_offset: starting offset of the pin-bank registers.
+ * @nr_pins: number of pins included in this bank.
+ * @eint_func: function to set in CON register to configure pin as EINT.
+ * @eint_type: type of the external interrupt supported by the bank.
+ * @eint_mask: bit mask of pins which support EINT function.
+ * @eint_offset: SoC-specific EINT register or interrupt offset of bank.
+ * @name: name to be prefixed for each pin in this pin bank.
+ */
+struct samsung_pin_bank_data {
+ const struct samsung_pin_bank_type *type;
+ u32 pctl_offset;
+ u8 nr_pins;
+ u8 eint_func;
+ enum eint_type eint_type;
+ u32 eint_mask;
+ u32 eint_offset;
+ const char *name;
+};
+
+/**
* struct samsung_pin_bank: represent a controller pin-bank.
* @type: type of the bank (register offsets and bitfield widths)
* @pctl_offset: starting offset of the pin-bank registers.
- * @pin_base: starting pin number of the bank.
* @nr_pins: number of pins included in this bank.
* @eint_func: function to set in CON register to configure pin as EINT.
* @eint_type: type of the external interrupt supported by the bank.
* @eint_mask: bit mask of pins which support EINT function.
+ * @eint_offset: SoC-specific EINT register or interrupt offset of bank.
* @name: name to be prefixed for each pin in this pin bank.
+ * @pin_base: starting pin number of the bank.
+ * @soc_priv: per-bank private data for SoC-specific code.
* @of_node: OF node of the bank.
* @drvdata: link to controller driver data
* @irq_domain: IRQ domain of the bank.
* @gpio_chip: GPIO chip of the bank.
* @grange: linux gpio pin range supported by this bank.
+ * @irq_chip: link to irq chip for external gpio and wakeup interrupts.
* @slock: spinlock protecting bank registers
* @pm_save: saved register values during suspend
*/
struct samsung_pin_bank {
- struct samsung_pin_bank_type *type;
+ const struct samsung_pin_bank_type *type;
u32 pctl_offset;
- u32 pin_base;
u8 nr_pins;
u8 eint_func;
enum eint_type eint_type;
u32 eint_mask;
u32 eint_offset;
- char *name;
+ const char *name;
+
+ u32 pin_base;
void *soc_priv;
struct device_node *of_node;
struct samsung_pinctrl_drv_data *drvdata;
struct irq_domain *irq_domain;
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range grange;
+ struct exynos_irq_chip *irq_chip;
spinlock_t slock;
u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/
@@ -155,27 +182,19 @@ struct samsung_pin_bank {
* struct samsung_pin_ctrl: represent a pin controller.
* @pin_banks: list of pin banks included in this controller.
* @nr_banks: number of pin banks.
- * @base: starting system wide pin number.
- * @nr_pins: number of pins supported by the controller.
* @eint_gpio_init: platform specific callback to setup the external gpio
* interrupts for the controller.
* @eint_wkup_init: platform specific callback to setup the external wakeup
* interrupts for the controller.
- * @label: for debug information.
*/
struct samsung_pin_ctrl {
- struct samsung_pin_bank *pin_banks;
+ const struct samsung_pin_bank_data *pin_banks;
u32 nr_banks;
- u32 base;
- u32 nr_pins;
-
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
void (*suspend)(struct samsung_pinctrl_drv_data *);
void (*resume)(struct samsung_pinctrl_drv_data *);
-
- char *label;
};
/**
@@ -191,6 +210,8 @@ struct samsung_pin_ctrl {
* @nr_groups: number of such pin groups.
* @pmx_functions: list of pin functions available to the driver.
* @nr_function: number of such pin functions.
+ * @pin_base: starting system wide pin number.
+ * @nr_pins: number of pins supported by the controller.
*/
struct samsung_pinctrl_drv_data {
struct list_head node;
@@ -198,7 +219,6 @@ struct samsung_pinctrl_drv_data {
struct device *dev;
int irq;
- struct samsung_pin_ctrl *ctrl;
struct pinctrl_desc pctl;
struct pinctrl_dev *pctl_dev;
@@ -206,6 +226,14 @@ struct samsung_pinctrl_drv_data {
unsigned int nr_groups;
const struct samsung_pmx_func *pmx_functions;
unsigned int nr_functions;
+
+ struct samsung_pin_bank *pin_banks;
+ u32 nr_banks;
+ unsigned int pin_base;
+ unsigned int nr_pins;
+
+ void (*suspend)(struct samsung_pinctrl_drv_data *);
+ void (*resume)(struct samsung_pinctrl_drv_data *);
};
/**
@@ -236,17 +264,19 @@ struct samsung_pmx_func {
};
/* list of all exported SoC specific data */
-extern struct samsung_pin_ctrl exynos3250_pin_ctrl[];
-extern struct samsung_pin_ctrl exynos4210_pin_ctrl[];
-extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
-extern struct samsung_pin_ctrl exynos5250_pin_ctrl[];
-extern struct samsung_pin_ctrl exynos5260_pin_ctrl[];
-extern struct samsung_pin_ctrl exynos5420_pin_ctrl[];
-extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
-extern struct samsung_pin_ctrl s3c2412_pin_ctrl[];
-extern struct samsung_pin_ctrl s3c2416_pin_ctrl[];
-extern struct samsung_pin_ctrl s3c2440_pin_ctrl[];
-extern struct samsung_pin_ctrl s3c2450_pin_ctrl[];
-extern struct samsung_pin_ctrl s5pv210_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos4415_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[];
+extern const struct samsung_pin_ctrl exynos7_pin_ctrl[];
+extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
+extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[];
+extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[];
+extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[];
+extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[];
+extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[];
#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 6572c233f73d..66dc62d2156c 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -650,7 +650,6 @@ static struct platform_driver sh_pfc_driver = {
.id_table = sh_pfc_id_table,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sh_pfc_of_table),
},
};
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 4c831fdfcc2f..4871647c7f85 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -406,7 +406,6 @@ static const struct dev_pm_ops sirfsoc_pinmux_pm_ops = {
static struct platform_driver sirfsoc_pinmux_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = pinmux_ids,
#ifdef CONFIG_PM_SLEEP
.pm = &sirfsoc_pinmux_pm_ops,
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index bddb79105d67..ae8f29fb5536 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -710,7 +710,6 @@ MODULE_DEVICE_TABLE(of, plgpio_of_match);
static struct platform_driver plgpio_driver = {
.probe = plgpio_probe,
.driver = {
- .owner = THIS_MODULE,
.name = "spear-plgpio",
.pm = &plgpio_dev_pm_ops,
.of_match_table = plgpio_of_match,
@@ -724,5 +723,5 @@ static int __init plgpio_init(void)
subsys_initcall(plgpio_init);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
-MODULE_DESCRIPTION("ST Microlectronics SPEAr PLGPIO driver");
+MODULE_DESCRIPTION("STMicroelectronics SPEAr PLGPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index 6d57d43ab640..a7bdc537efa7 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2712,7 +2712,6 @@ static int spear1310_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver spear1310_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = spear1310_pinctrl_of_match,
},
.probe = spear1310_pinctrl_probe,
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index d243e43e7f6d..f43ec85a0328 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2028,7 +2028,6 @@ static int spear1340_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver spear1340_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = spear1340_pinctrl_of_match,
},
.probe = spear1340_pinctrl_probe,
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 9db83e9ee18c..da8990a8eeef 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -685,7 +685,6 @@ static int spear300_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver spear300_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = spear300_pinctrl_of_match,
},
.probe = spear300_pinctrl_probe,
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index db775a414b7a..31ede51e819b 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -408,7 +408,6 @@ static int spear310_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver spear310_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = spear310_pinctrl_of_match,
},
.probe = spear310_pinctrl_probe,
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index 80fbd68e17bc..506e40b641e0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -3449,7 +3449,6 @@ static int spear320_pinctrl_remove(struct platform_device *pdev)
static struct platform_driver spear320_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = spear320_pinctrl_of_match,
},
.probe = spear320_pinctrl_probe,
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index a5e10f777ed2..230a952608cb 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -39,4 +39,8 @@ config PINCTRL_SUN8I_A23_R
depends on RESET_CONTROLLER
select PINCTRL_SUNXI_COMMON
+config PINCTRL_SUN9I_A80
+ def_bool MACH_SUN9I
+ select PINCTRL_SUNXI_COMMON
+
endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index e797efb02901..c7d92e4673b5 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o
obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o
obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o
obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o
+obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 86b608bedca6..24c5d88f943f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1029,7 +1029,6 @@ static struct platform_driver sun4i_a10_pinctrl_driver = {
.probe = sun4i_a10_pinctrl_probe,
.driver = {
.name = "sun4i-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun4i_a10_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
index 2fa7430cabaf..45a351affa59 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
@@ -680,7 +680,6 @@ static struct platform_driver sun5i_a10s_pinctrl_driver = {
.probe = sun5i_a10s_pinctrl_probe,
.driver = {
.name = "sun5i-a10s-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun5i_a10s_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
index e47c33dbae3a..4bd23471412c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
@@ -398,7 +398,6 @@ static struct platform_driver sun5i_a13_pinctrl_driver = {
.probe = sun5i_a13_pinctrl_probe,
.driver = {
.name = "sun5i-a13-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun5i_a13_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index 9a2517b65113..02174fa57997 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -130,7 +130,6 @@ static struct platform_driver sun6i_a31_r_pinctrl_driver = {
.probe = sun6i_a31_r_pinctrl_probe,
.driver = {
.name = "sun6i-a31-r-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun6i_a31_r_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index a2b4b85c5ad5..f42858eaca28 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -927,7 +927,6 @@ static struct platform_driver sun6i_a31_pinctrl_driver = {
.probe = sun6i_a31_pinctrl_probe,
.driver = {
.name = "sun6i-a31-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun6i_a31_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
index dac99e02bfdb..6af6cc8547b0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
@@ -1055,7 +1055,6 @@ static struct platform_driver sun7i_a20_pinctrl_driver = {
.probe = sun7i_a20_pinctrl_probe,
.driver = {
.name = "sun7i-a20-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun7i_a20_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 90f3b3a7c51e..327e03ff7c4d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -129,7 +129,6 @@ static struct platform_driver sun8i_a23_r_pinctrl_driver = {
.probe = sun8i_a23_r_pinctrl_probe,
.driver = {
.name = "sun8i-a23-r-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun8i_a23_r_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index ac71e8c5901b..62695c9a92c2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -581,7 +581,6 @@ static struct platform_driver sun8i_a23_pinctrl_driver = {
.probe = sun8i_a23_pinctrl_probe,
.driver = {
.name = "sun8i-a23-pinctrl",
- .owner = THIS_MODULE,
.of_match_table = sun8i_a23_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
new file mode 100644
index 000000000000..adb29422efc9
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -0,0 +1,749 @@
+/*
+ * Allwinner A80 SoCs pinctrl driver.
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun9i_a80_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD3 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD2 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD1 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD0 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXCK */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXCTL */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXERR */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD3 */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RING */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD2 */
+ SUNXI_FUNCTION(0x4, "eclk"), /* IN0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXEN */
+ SUNXI_FUNCTION(0x4, "eclk"), /* IN1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD0 */
+ SUNXI_FUNCTION(0x4, "clk_out_a"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* MII-CRS */
+ SUNXI_FUNCTION(0x4, "clk_out_b"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXCK */
+ SUNXI_FUNCTION(0x4, "pwm3"), /* PWM_P */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RGMII-TXCK / GMII-TXEN */
+ SUNXI_FUNCTION(0x4, "pwm3"), /* PWM_N */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* MII-TXERR */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RGMII-CLKIN / MII-COL */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* EMDC */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* EMDIO */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PB_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PB_EINT6 */
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */
+ SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */
+ SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* WE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* RE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE2 */
+ SUNXI_FUNCTION(0x3, "nand0_b")), /* RE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE3 */
+ SUNXI_FUNCTION(0x3, "nand0_b")), /* DQS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VPC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D22 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D23 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* DE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* VSYNC */
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "ts"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PE_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "ts"), /* ERR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PE_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "ts"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PE_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "ts"), /* DVLD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PE_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D0 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* CS0 */
+ SUNXI_FUNCTION(0x4, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PE_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D1 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PE_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D2 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* MOSI */
+ SUNXI_FUNCTION(0x4, "uart5"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PE_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D3 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* MISO */
+ SUNXI_FUNCTION(0x4, "uart5"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PE_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D4 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PE_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D5 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PE_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D6 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PE_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D7 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PE_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D8 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D4 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)), /* PE_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D9 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D5 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)), /* PE_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D10 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D6 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)), /* PE_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D11 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D7 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)), /* PE_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SCK */
+ SUNXI_FUNCTION(0x3, "i2c4"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)), /* PE_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SDA */
+ SUNXI_FUNCTION(0x3, "i2c4"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 17)), /* PE_EINT17 */
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0")), /* D2 */
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 0)), /* PG_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 1)), /* PG_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 2)), /* PG_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 3)), /* PG_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 4)), /* PG_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 5)), /* PG_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 6)), /* PG_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 7)), /* PG_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 8)), /* PG_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 9)), /* PG_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 10)), /* PG_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 11)), /* PG_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 12)), /* PG_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 13)), /* PG_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 14)), /* PG_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 15)), /* PG_EINT15 */
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0")),
+
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "pwm1"), /* Positive */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 8)), /* PH_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "pwm1"), /* Negative */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 9)), /* PH_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "pwm2"), /* Positive */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 10)), /* PH_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "pwm2"), /* Negative */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 11)), /* PH_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x3, "spi3"), /* CS2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 12)), /* PH_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x3, "spi3"), /* CS2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 13)), /* PH_EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi3"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 14)), /* PH_EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi3"), /* MOSI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 15)), /* PH_EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi3"), /* MISO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 16)), /* PH_EINT16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi3"), /* CS0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 17)), /* PH_EINT17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi3"), /* CS1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 18)), /* PH_EINT18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "hdmi")), /* SCL */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "hdmi")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "hdmi")), /* CEC */
+};
+
+static const struct sunxi_pinctrl_desc sun9i_a80_pinctrl_data = {
+ .pins = sun9i_a80_pins,
+ .npins = ARRAY_SIZE(sun9i_a80_pins),
+ .irq_banks = 5,
+};
+
+static int sun9i_a80_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun9i_a80_pinctrl_data);
+}
+
+static struct of_device_id sun9i_a80_pinctrl_match[] = {
+ { .compatible = "allwinner,sun9i-a80-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun9i_a80_pinctrl_match);
+
+static struct platform_driver sun9i_a80_pinctrl_driver = {
+ .probe = sun9i_a80_pinctrl_probe,
+ .driver = {
+ .name = "sun9i-a80-pinctrl",
+ .of_match_table = sun9i_a80_pinctrl_match,
+ },
+};
+module_platform_driver(sun9i_a80_pinctrl_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A80 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index ef9d804e55de..3d0744337736 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -553,7 +553,7 @@ static int sunxi_pinctrl_irq_request_resources(struct irq_data *d)
if (!func)
return -EINVAL;
- ret = gpio_lock_as_irq(pctl->chip,
+ ret = gpiochip_lock_as_irq(pctl->chip,
pctl->irq_array[d->hwirq] - pctl->desc->pin_base);
if (ret) {
dev_err(pctl->dev, "unable to lock HW IRQ %lu for IRQ\n",
@@ -571,8 +571,8 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- gpio_unlock_as_irq(pctl->chip,
- pctl->irq_array[d->hwirq] - pctl->desc->pin_base);
+ gpiochip_unlock_as_irq(pctl->chip,
+ pctl->irq_array[d->hwirq] - pctl->desc->pin_base);
}
static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 4245b96c7996..5a51523a3459 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -27,6 +27,7 @@
#define PI_BASE 256
#define PL_BASE 352
#define PM_BASE 384
+#define PN_BASE 416
#define SUNXI_PINCTRL_PIN(bank, pin) \
PINCTRL_PIN(P ## bank ## _BASE + (pin), "P" #bank #pin)
diff --git a/drivers/pinctrl/vt8500/pinctrl-vt8500.c b/drivers/pinctrl/vt8500/pinctrl-vt8500.c
index f2fe9f85cfa6..cf8bbc946ff7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-vt8500.c
+++ b/drivers/pinctrl/vt8500/pinctrl-vt8500.c
@@ -488,7 +488,6 @@ static struct platform_driver wmt_pinctrl_driver = {
.remove = vt8500_pinctrl_remove,
.driver = {
.name = "pinctrl-vt8500",
- .owner = THIS_MODULE,
.of_match_table = wmt_pinctrl_of_match,
},
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8505.c b/drivers/pinctrl/vt8500/pinctrl-wm8505.c
index 483ba732694e..3f9c32dcb3d0 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8505.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8505.c
@@ -519,7 +519,6 @@ static struct platform_driver wmt_pinctrl_driver = {
.remove = wm8505_pinctrl_remove,
.driver = {
.name = "pinctrl-wm8505",
- .owner = THIS_MODULE,
.of_match_table = wmt_pinctrl_of_match,
},
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8650.c b/drivers/pinctrl/vt8500/pinctrl-wm8650.c
index 7de57f063153..4e80f98c2ba7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8650.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8650.c
@@ -357,7 +357,6 @@ static struct platform_driver wmt_pinctrl_driver = {
.remove = wm8650_pinctrl_remove,
.driver = {
.name = "pinctrl-wm8650",
- .owner = THIS_MODULE,
.of_match_table = wmt_pinctrl_of_match,
},
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
index de43262398db..47b52a7cacac 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8750.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
@@ -396,7 +396,6 @@ static struct platform_driver wmt_pinctrl_driver = {
.remove = wm8750_pinctrl_remove,
.driver = {
.name = "pinctrl-wm8750",
- .owner = THIS_MODULE,
.of_match_table = wmt_pinctrl_of_match,
},
};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8850.c b/drivers/pinctrl/vt8500/pinctrl-wm8850.c
index ecadce9c91d5..8bbb38c931f6 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8850.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8850.c
@@ -375,7 +375,6 @@ static struct platform_driver wmt_pinctrl_driver = {
.remove = wm8850_pinctrl_remove,
.driver = {
.name = "pinctrl-wm8850",
- .owner = THIS_MODULE,
.of_match_table = wmt_pinctrl_of_match,
},
};
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index d866db80b4fd..b84fdd6b629b 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -571,7 +571,6 @@ static struct platform_device *cros_platform_device;
static struct platform_driver cros_platform_driver = {
.driver = {
.name = "chromeos_laptop",
- .owner = THIS_MODULE,
},
.probe = chromeos_laptop_probe,
};
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index a2eabe6ff9ad..638e797037da 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -38,7 +38,8 @@ config ACER_WMI
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
- depends on THERMAL && ACPI
+ depends on ACPI && THERMAL
+ select THERMAL_GOV_BANG_BANG
---help---
This is a driver for Acer Aspire One netbooks. It allows to access
the temperature sensor and to control the fan.
@@ -128,10 +129,10 @@ config DELL_WMI_AIO
be called dell-wmi-aio.
config DELL_SMO8800
- tristate "Dell Latitude freefall driver (ACPI SMO8800/SMO8810)"
+ tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
depends on ACPI
---help---
- Say Y here if you want to support SMO8800/SMO8810 freefall device
+ Say Y here if you want to support SMO88XX freefall devices
on Dell Latitude laptops.
To compile this driver as a module, choose M here: the module will
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 26c4fd1394da..3ac29a1e8f92 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -2117,7 +2117,6 @@ static void acer_platform_shutdown(struct platform_device *device)
static struct platform_driver acer_platform_driver = {
.driver = {
.name = "acer-wmi",
- .owner = THIS_MODULE,
.pm = &acer_pm,
},
.probe = acer_platform_probe,
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index f94467c05225..594c918b553d 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -50,7 +50,7 @@
*/
#undef START_IN_KERNEL_MODE
-#define DRV_VER "0.5.26"
+#define DRV_VER "0.7.0"
/*
* According to the Atom N270 datasheet,
@@ -119,116 +119,152 @@ struct fancmd {
u8 cmd_auto;
};
+struct manualcmd {
+ u8 mreg;
+ u8 moff;
+};
+
+/* default register and command to disable fan in manual mode */
+static const struct manualcmd mcmd = {
+ .mreg = 0x94,
+ .moff = 0xff,
+};
+
/* BIOS settings */
-struct bios_settings_t {
+struct bios_settings {
const char *vendor;
const char *product;
const char *version;
- unsigned char fanreg;
- unsigned char tempreg;
+ u8 fanreg;
+ u8 tempreg;
struct fancmd cmd;
+ int mcmd_enable;
};
/* Register addresses and values for different BIOS versions */
-static const struct bios_settings_t bios_tbl[] = {
+static const struct bios_settings bios_tbl[] = {
/* AOA110 */
- {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00}, 0},
/* AOA150 */
- {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x1f, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00}, 0},
/* LT1005u */
- {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00}, 0},
/* Acer 1410 */
- {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
/* Acer 1810xx */
- {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Acer 5755G */
+ {"Acer", "Aspire 5755G", "V1.20", 0xab, 0xb4, {0x00, 0x08}, 0},
+ {"Acer", "Aspire 5755G", "V1.21", 0xab, 0xb3, {0x00, 0x08}, 0},
+ /* Acer 521 */
+ {"Acer", "AO521", "V1.11", 0x55, 0x58, {0x1f, 0x00}, 0},
/* Acer 531 */
- {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00}, 0},
/* Acer 751 */
- {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AO751h", "V0.3206", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00}, 0},
+ /* Acer 753 */
+ {"Acer", "Aspire One 753", "V1.24", 0x93, 0xac, {0x14, 0x04}, 1},
/* Acer 1825 */
- {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
- {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Acer Extensa 5420 */
+ {"Acer", "Extensa 5420", "V1.17", 0x93, 0xac, {0x14, 0x04}, 1},
+ /* Acer Aspire 5315 */
+ {"Acer", "Aspire 5315", "V1.19", 0x93, 0xac, {0x14, 0x04}, 1},
+ /* Acer Aspire 5739 */
+ {"Acer", "Aspire 5739G", "V1.3311", 0x55, 0x58, {0x20, 0x00}, 0},
/* Acer TravelMate 7730 */
- {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00}, 0},
+ /* Acer TravelMate TM8573T */
+ {"Acer", "TM8573T", "V1.13", 0x93, 0xa8, {0x14, 0x04}, 1},
/* Gateway */
- {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
- {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
- {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} },
- {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
- {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
- {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
/* Packard Bell */
- {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
- {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
- {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
+ {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00}, 0},
/* pewpew-terminator */
- {"", "", "", 0, 0, {0, 0} }
+ {"", "", "", 0, 0, {0, 0}, 0}
};
-static const struct bios_settings_t *bios_cfg __read_mostly;
+static const struct bios_settings *bios_cfg __read_mostly;
+
+/*
+ * this struct is used to instruct thermal layer to use bang_bang instead of
+ * default governor for acerhdf
+ */
+static struct thermal_zone_params acerhdf_zone_params = {
+ .governor_name = "bang_bang",
+};
static int acerhdf_get_temp(int *temp)
{
@@ -275,6 +311,12 @@ static void acerhdf_change_fanstate(int state)
fanstate = state;
ec_write(bios_cfg->fanreg, cmd);
+
+ if (bios_cfg->mcmd_enable && state == ACERHDF_FAN_OFF) {
+ if (verbose)
+ pr_notice("turning off fan manually\n");
+ ec_write(mcmd.mreg, mcmd.moff);
+ }
}
static void acerhdf_check_param(struct thermal_zone_device *thermal)
@@ -401,6 +443,21 @@ static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
{
if (trip == 0)
*type = THERMAL_TRIP_ACTIVE;
+ else if (trip == 1)
+ *type = THERMAL_TRIP_CRITICAL;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
+ unsigned long *temp)
+{
+ if (trip != 0)
+ return -EINVAL;
+
+ *temp = fanon - fanoff;
return 0;
}
@@ -410,6 +467,10 @@ static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
{
if (trip == 0)
*temp = fanon;
+ else if (trip == 1)
+ *temp = ACERHDF_TEMP_CRIT;
+ else
+ return -EINVAL;
return 0;
}
@@ -429,6 +490,7 @@ static struct thermal_zone_device_ops acerhdf_dev_ops = {
.get_mode = acerhdf_get_mode,
.set_mode = acerhdf_set_mode,
.get_trip_type = acerhdf_get_trip_type,
+ .get_trip_hyst = acerhdf_get_trip_hyst,
.get_trip_temp = acerhdf_get_trip_temp,
.get_crit_temp = acerhdf_get_crit_temp,
};
@@ -481,9 +543,7 @@ static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev,
}
if (state == 0) {
- /* turn fan off only if below fanoff temperature */
- if ((cur_state == ACERHDF_FAN_AUTO) &&
- (cur_temp < fanoff))
+ if (cur_state == ACERHDF_FAN_AUTO)
acerhdf_change_fanstate(ACERHDF_FAN_OFF);
} else {
if (cur_state == ACERHDF_FAN_OFF)
@@ -533,7 +593,6 @@ static const struct dev_pm_ops acerhdf_pm_ops = {
static struct platform_driver acerhdf_driver = {
.driver = {
.name = "acerhdf",
- .owner = THIS_MODULE,
.pm = &acerhdf_pm_ops,
},
.probe = acerhdf_probe,
@@ -559,7 +618,7 @@ static int str_starts_with(const char *str, const char *start)
static int acerhdf_check_hardware(void)
{
char const *vendor, *version, *product;
- const struct bios_settings_t *bt = NULL;
+ const struct bios_settings *bt = NULL;
/* get BIOS data */
vendor = dmi_get_system_info(DMI_SYS_VENDOR);
@@ -661,12 +720,20 @@ static int acerhdf_register_thermal(void)
if (IS_ERR(cl_dev))
return -EINVAL;
- thz_dev = thermal_zone_device_register("acerhdf", 1, 0, NULL,
- &acerhdf_dev_ops, NULL, 0,
+ thz_dev = thermal_zone_device_register("acerhdf", 2, 0, NULL,
+ &acerhdf_dev_ops,
+ &acerhdf_zone_params, 0,
(kernelmode) ? interval*1000 : 0);
if (IS_ERR(thz_dev))
return -EINVAL;
+ if (strcmp(thz_dev->governor->name,
+ acerhdf_zone_params.governor_name)) {
+ pr_err("Didn't get thermal governor %s, perhaps not compiled into thermal subsystem.\n",
+ acerhdf_zone_params.governor_name);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -723,9 +790,15 @@ MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5755G:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAO521*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5739G:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*One*753:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5315:");
MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
+MODULE_ALIAS("dmi:*:*Acer*:TM8573T:");
MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
@@ -734,6 +807,7 @@ MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnExtensa 5420*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index c5af23b64438..1e1e59423889 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -157,7 +157,6 @@ static struct platform_zone *zone_data;
static struct platform_driver platform_driver = {
.driver = {
.name = "alienware-wmi",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index da36b5e824d4..0157625cb918 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -138,7 +138,6 @@ static int amilo_rfkill_remove(struct platform_device *device)
static struct platform_driver amilo_rfkill_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
.probe = amilo_rfkill_probe,
.remove = amilo_rfkill_remove,
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 7f4dc6f51f8a..f71700e0d132 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -843,8 +843,7 @@ static int asus_backlight_init(struct asus_laptop *asus)
static void asus_backlight_exit(struct asus_laptop *asus)
{
- if (asus->backlight_device)
- backlight_device_unregister(asus->backlight_device);
+ backlight_device_unregister(asus->backlight_device);
asus->backlight_device = NULL;
}
@@ -1699,7 +1698,6 @@ static void asus_platform_exit(struct asus_laptop *asus)
static struct platform_driver platform_driver = {
.driver = {
.name = ASUS_LAPTOP_FILE,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index c1a6cd66af42..abdaed34c728 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -191,6 +191,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X551CA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X551CA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X55A",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 21fc932da3a1..7543a56e0f45 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1308,8 +1308,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus)
static void asus_wmi_backlight_exit(struct asus_wmi *asus)
{
- if (asus->backlight_device)
- backlight_device_unregister(asus->backlight_device);
+ backlight_device_unregister(asus->backlight_device);
asus->backlight_device = NULL;
}
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 26bfd7bb5c13..15c0fab2bfa1 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -710,7 +710,6 @@ static int compal_remove(struct platform_device *);
static struct platform_driver compal_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = compal_probe,
.remove = compal_remove,
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 233d2ee598a6..9411eae39a4e 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,9 +2,11 @@
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
*
- * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
- * Inc.
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -32,6 +34,13 @@
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
+#define KBD_LED_OFF_TOKEN 0x01E1
+#define KBD_LED_ON_TOKEN 0x01E2
+#define KBD_LED_AUTO_TOKEN 0x01E3
+#define KBD_LED_AUTO_25_TOKEN 0x02EA
+#define KBD_LED_AUTO_50_TOKEN 0x02EB
+#define KBD_LED_AUTO_75_TOKEN 0x02EC
+#define KBD_LED_AUTO_100_TOKEN 0x02F6
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -62,6 +71,13 @@ struct calling_interface_structure {
struct quirk_entry {
u8 touchpad_led;
+
+ int needs_kbd_timeouts;
+ /*
+ * Ordered list of timeouts expressed in seconds.
+ * The list must end with -1
+ */
+ int kbd_timeouts[];
};
static struct quirk_entry *quirks;
@@ -76,6 +92,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
+/*
+ * These values come from Windows utility provided by Dell. If any other value
+ * is used then BIOS silently set timeout to 0 without any error message.
+ */
+static struct quirk_entry quirk_dell_xps13_9333 = {
+ .needs_kbd_timeouts = 1,
+ .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
+};
+
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -84,7 +109,6 @@ static struct calling_interface_token *da_tokens;
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
- .owner = THIS_MODULE,
}
};
@@ -268,6 +292,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_vostro_v130,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell XPS13 9333",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+ },
+ .driver_data = &quirk_dell_xps13_9333,
+ },
{ }
};
@@ -332,17 +365,29 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int find_token_location(int tokenid)
+static int find_token_id(int tokenid)
{
int i;
+
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
- return da_tokens[i].location;
+ return i;
}
return -1;
}
+static int find_token_location(int tokenid)
+{
+ int id;
+
+ id = find_token_id(tokenid);
+ if (id == -1)
+ return -1;
+
+ return da_tokens[id].location;
+}
+
static struct calling_interface_buffer *
dell_send_request(struct calling_interface_buffer *buffer, int class,
int select)
@@ -363,6 +408,20 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
+static inline int dell_smi_error(int value)
+{
+ switch (value) {
+ case 0: /* Completed successfully */
+ return 0;
+ case -1: /* Completed with error */
+ return -EIO;
+ case -2: /* Function not supported */
+ return -ENXIO;
+ default: /* Unknown error */
+ return -EINVAL;
+ }
+}
+
/* Derived from information in DellWirelessCtl.cpp:
Class 17, select 11 is radio control. It returns an array of 32-bit values.
@@ -564,7 +623,7 @@ static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
{
static bool extended;
- if (str & 0x20)
+ if (str & I8042_STR_AUXDATA)
return false;
if (unlikely(data == 0xe0)) {
@@ -717,7 +776,7 @@ static int dell_send_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 1, 1);
-out:
+ out:
release_buffer();
return ret;
}
@@ -741,7 +800,7 @@ static int dell_get_intensity(struct backlight_device *bd)
ret = buffer->output[1];
-out:
+ out:
release_buffer();
return ret;
}
@@ -790,6 +849,984 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
+/*
+ * Derived from information in smbios-keyboard-ctl:
+ *
+ * cbClass 4
+ * cbSelect 11
+ * Keyboard illumination
+ * cbArg1 determines the function to be performed
+ *
+ * cbArg1 0x0 = Get Feature Information
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of user-selectable modes
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * cbRES2, byte2 Reserved for future use
+ * cbRES2, byte3 Keyboard illumination type
+ * 0 Reserved
+ * 1 Tasklight
+ * 2 Backlight
+ * 3-255 Reserved for future use
+ * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte1 Supported timeout unit bitmap
+ * bit 0 Seconds
+ * bit 1 Minutes
+ * bit 2 Hours
+ * bit 3 Days
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte2 Number of keyboard light brightness levels
+ * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
+ * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
+ * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
+ * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
+ *
+ * cbArg1 0x1 = Get Current State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbRES2, byte2 Currently active auto keyboard illumination triggers.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES2, byte3 Current Timeout
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
+ * are set upon return from the [Get feature information] call.
+ * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
+ * cbRES3, byte1 Current ALS reading
+ * cbRES3, byte2 Current keyboard light level.
+ *
+ * cbArg1 0x2 = Set New State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbArg2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
+ * keyboard to turn off automatically.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbArg2, byte3 Desired Timeout
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
+ * cbArg3, byte2 Desired keyboard light level.
+ */
+
+
+enum kbd_timeout_unit {
+ KBD_TIMEOUT_SECONDS = 0,
+ KBD_TIMEOUT_MINUTES,
+ KBD_TIMEOUT_HOURS,
+ KBD_TIMEOUT_DAYS,
+};
+
+enum kbd_mode_bit {
+ KBD_MODE_BIT_OFF = 0,
+ KBD_MODE_BIT_ON,
+ KBD_MODE_BIT_ALS,
+ KBD_MODE_BIT_TRIGGER_ALS,
+ KBD_MODE_BIT_TRIGGER,
+ KBD_MODE_BIT_TRIGGER_25,
+ KBD_MODE_BIT_TRIGGER_50,
+ KBD_MODE_BIT_TRIGGER_75,
+ KBD_MODE_BIT_TRIGGER_100,
+};
+
+#define kbd_is_als_mode_bit(bit) \
+ ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
+#define kbd_is_trigger_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+#define kbd_is_level_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+
+struct kbd_info {
+ u16 modes;
+ u8 type;
+ u8 triggers;
+ u8 levels;
+ u8 seconds;
+ u8 minutes;
+ u8 hours;
+ u8 days;
+};
+
+struct kbd_state {
+ u8 mode_bit;
+ u8 triggers;
+ u8 timeout_value;
+ u8 timeout_unit;
+ u8 als_setting;
+ u8 als_value;
+ u8 level;
+};
+
+static const int kbd_tokens[] = {
+ KBD_LED_OFF_TOKEN,
+ KBD_LED_AUTO_25_TOKEN,
+ KBD_LED_AUTO_50_TOKEN,
+ KBD_LED_AUTO_75_TOKEN,
+ KBD_LED_AUTO_100_TOKEN,
+ KBD_LED_ON_TOKEN,
+};
+
+static u16 kbd_token_bits;
+
+static struct kbd_info kbd_info;
+static bool kbd_als_supported;
+static bool kbd_triggers_supported;
+
+static u8 kbd_mode_levels[16];
+static int kbd_mode_levels_count;
+
+static u8 kbd_previous_level;
+static u8 kbd_previous_mode_bit;
+
+static bool kbd_led_present;
+
+/*
+ * NOTE: there are three ways to set the keyboard backlight level.
+ * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
+ * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
+ * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
+ *
+ * There are laptops which support only one of these methods. If we want to
+ * support as many machines as possible we need to implement all three methods.
+ * The first two methods use the kbd_state structure. The third uses SMBIOS
+ * tokens. If kbd_info.levels == 0, the machine does not support setting the
+ * keyboard backlight level via kbd_state.level.
+ */
+
+static int kbd_get_info(struct kbd_info *info)
+{
+ u8 units;
+ int ret;
+
+ get_buffer();
+
+ buffer->input[0] = 0x0;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+
+ if (ret) {
+ ret = dell_smi_error(ret);
+ goto out;
+ }
+
+ info->modes = buffer->output[1] & 0xFFFF;
+ info->type = (buffer->output[1] >> 24) & 0xFF;
+ info->triggers = buffer->output[2] & 0xFF;
+ units = (buffer->output[2] >> 8) & 0xFF;
+ info->levels = (buffer->output[2] >> 16) & 0xFF;
+
+ if (units & BIT(0))
+ info->seconds = (buffer->output[3] >> 0) & 0xFF;
+ if (units & BIT(1))
+ info->minutes = (buffer->output[3] >> 8) & 0xFF;
+ if (units & BIT(2))
+ info->hours = (buffer->output[3] >> 16) & 0xFF;
+ if (units & BIT(3))
+ info->days = (buffer->output[3] >> 24) & 0xFF;
+
+ out:
+ release_buffer();
+ return ret;
+}
+
+static unsigned int kbd_get_max_level(void)
+{
+ if (kbd_info.levels != 0)
+ return kbd_info.levels;
+ if (kbd_mode_levels_count > 0)
+ return kbd_mode_levels_count - 1;
+ return 0;
+}
+
+static int kbd_get_level(struct kbd_state *state)
+{
+ int i;
+
+ if (kbd_info.levels != 0)
+ return state->level;
+
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < kbd_mode_levels_count; ++i)
+ if (kbd_mode_levels[i] == state->mode_bit)
+ return i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_set_level(struct kbd_state *state, u8 level)
+{
+ if (kbd_info.levels != 0) {
+ if (level != 0)
+ kbd_previous_level = level;
+ if (state->level == level)
+ return 0;
+ state->level = level;
+ if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
+ state->mode_bit = kbd_previous_mode_bit;
+ else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit = state->mode_bit;
+ state->mode_bit = KBD_MODE_BIT_OFF;
+ }
+ return 0;
+ }
+
+ if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
+ if (level != 0)
+ kbd_previous_level = level;
+ state->mode_bit = kbd_mode_levels[level];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_get_state(struct kbd_state *state)
+{
+ int ret;
+
+ get_buffer();
+
+ buffer->input[0] = 0x1;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+
+ if (ret) {
+ ret = dell_smi_error(ret);
+ goto out;
+ }
+
+ state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
+ if (state->mode_bit != 0)
+ state->mode_bit--;
+
+ state->triggers = (buffer->output[1] >> 16) & 0xFF;
+ state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
+ state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
+ state->als_setting = buffer->output[2] & 0xFF;
+ state->als_value = (buffer->output[2] >> 8) & 0xFF;
+ state->level = (buffer->output[2] >> 16) & 0xFF;
+
+ out:
+ release_buffer();
+ return ret;
+}
+
+static int kbd_set_state(struct kbd_state *state)
+{
+ int ret;
+
+ get_buffer();
+ buffer->input[0] = 0x2;
+ buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
+ buffer->input[1] |= (state->triggers & 0xFF) << 16;
+ buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
+ buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
+ buffer->input[2] = state->als_setting & 0xFF;
+ buffer->input[2] |= (state->level & 0xFF) << 16;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+ release_buffer();
+
+ return dell_smi_error(ret);
+}
+
+static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
+{
+ int ret;
+
+ ret = kbd_set_state(state);
+ if (ret == 0)
+ return 0;
+
+ /*
+ * When setting the new state fails,try to restore the previous one.
+ * This is needed on some machines where BIOS sets a default state when
+ * setting a new state fails. This default state could be all off.
+ */
+
+ if (kbd_set_state(old))
+ pr_err("Setting old previous keyboard state failed\n");
+
+ return ret;
+}
+
+static int kbd_set_token_bit(u8 bit)
+{
+ int id;
+ int ret;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ id = find_token_id(kbd_tokens[bit]);
+ if (id == -1)
+ return -EINVAL;
+
+ get_buffer();
+ buffer->input[0] = da_tokens[id].location;
+ buffer->input[1] = da_tokens[id].value;
+ dell_send_request(buffer, 1, 0);
+ ret = buffer->output[0];
+ release_buffer();
+
+ return dell_smi_error(ret);
+}
+
+static int kbd_get_token_bit(u8 bit)
+{
+ int id;
+ int ret;
+ int val;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ id = find_token_id(kbd_tokens[bit]);
+ if (id == -1)
+ return -EINVAL;
+
+ get_buffer();
+ buffer->input[0] = da_tokens[id].location;
+ dell_send_request(buffer, 0, 0);
+ ret = buffer->output[0];
+ val = buffer->output[1];
+ release_buffer();
+
+ if (ret)
+ return dell_smi_error(ret);
+
+ return (val == da_tokens[id].value);
+}
+
+static int kbd_get_first_active_token_bit(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
+ ret = kbd_get_token_bit(i);
+ if (ret == 1)
+ return i;
+ }
+
+ return ret;
+}
+
+static int kbd_get_valid_token_counts(void)
+{
+ return hweight16(kbd_token_bits);
+}
+
+static inline int kbd_init_info(void)
+{
+ struct kbd_state state;
+ int ret;
+ int i;
+
+ ret = kbd_get_info(&kbd_info);
+ if (ret)
+ return ret;
+
+ kbd_get_state(&state);
+
+ /* NOTE: timeout value is stored in 6 bits so max value is 63 */
+ if (kbd_info.seconds > 63)
+ kbd_info.seconds = 63;
+ if (kbd_info.minutes > 63)
+ kbd_info.minutes = 63;
+ if (kbd_info.hours > 63)
+ kbd_info.hours = 63;
+ if (kbd_info.days > 63)
+ kbd_info.days = 63;
+
+ /* NOTE: On tested machines ON mode did not work and caused
+ * problems (turned backlight off) so do not use it
+ */
+ kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
+
+ kbd_previous_level = kbd_get_level(&state);
+ kbd_previous_mode_bit = state.mode_bit;
+
+ if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
+ kbd_previous_level = 1;
+
+ if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit =
+ ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
+ if (kbd_previous_mode_bit != 0)
+ kbd_previous_mode_bit--;
+ }
+
+ if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
+ BIT(KBD_MODE_BIT_TRIGGER_ALS)))
+ kbd_als_supported = true;
+
+ if (kbd_info.modes & (
+ BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
+ BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
+ BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
+ ))
+ kbd_triggers_supported = true;
+
+ /* kbd_mode_levels[0] is reserved, see below */
+ for (i = 0; i < 16; ++i)
+ if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
+ kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
+
+ /*
+ * Find the first supported mode and assign to kbd_mode_levels[0].
+ * This should be 0 (off), but we cannot depend on the BIOS to
+ * support 0.
+ */
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < 16; ++i) {
+ if (BIT(i) & kbd_info.modes) {
+ kbd_mode_levels[0] = i;
+ break;
+ }
+ }
+ kbd_mode_levels_count++;
+ }
+
+ return 0;
+
+}
+
+static inline void kbd_init_tokens(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
+ if (find_token_id(kbd_tokens[i]) != -1)
+ kbd_token_bits |= BIT(i);
+}
+
+static void kbd_init(void)
+{
+ int ret;
+
+ ret = kbd_init_info();
+ kbd_init_tokens();
+
+ if (kbd_token_bits != 0 || ret == 0)
+ kbd_led_present = true;
+}
+
+static ssize_t kbd_led_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool convert;
+ int value;
+ int ret;
+ char ch;
+ u8 unit;
+ int i;
+
+ ret = sscanf(buf, "%d %c", &value, &ch);
+ if (ret < 1)
+ return -EINVAL;
+ else if (ret == 1)
+ ch = 's';
+
+ if (value < 0)
+ return -EINVAL;
+
+ convert = false;
+
+ switch (ch) {
+ case 's':
+ if (value > kbd_info.seconds)
+ convert = true;
+ unit = KBD_TIMEOUT_SECONDS;
+ break;
+ case 'm':
+ if (value > kbd_info.minutes)
+ convert = true;
+ unit = KBD_TIMEOUT_MINUTES;
+ break;
+ case 'h':
+ if (value > kbd_info.hours)
+ convert = true;
+ unit = KBD_TIMEOUT_HOURS;
+ break;
+ case 'd':
+ if (value > kbd_info.days)
+ convert = true;
+ unit = KBD_TIMEOUT_DAYS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts)
+ convert = true;
+
+ if (convert) {
+ /* Convert value from current units to seconds */
+ switch (unit) {
+ case KBD_TIMEOUT_DAYS:
+ value *= 24;
+ case KBD_TIMEOUT_HOURS:
+ value *= 60;
+ case KBD_TIMEOUT_MINUTES:
+ value *= 60;
+ unit = KBD_TIMEOUT_SECONDS;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts) {
+ for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
+ if (value <= quirks->kbd_timeouts[i]) {
+ value = quirks->kbd_timeouts[i];
+ break;
+ }
+ }
+ }
+
+ if (value <= kbd_info.seconds && kbd_info.seconds) {
+ unit = KBD_TIMEOUT_SECONDS;
+ } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
+ value /= 60;
+ unit = KBD_TIMEOUT_MINUTES;
+ } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
+ value /= (60 * 60);
+ unit = KBD_TIMEOUT_HOURS;
+ } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
+ value /= (60 * 60 * 24);
+ unit = KBD_TIMEOUT_DAYS;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ new_state = state;
+ new_state.timeout_value = value;
+ new_state.timeout_unit = unit;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t kbd_led_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ int ret;
+ int len;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = sprintf(buf, "%d", state.timeout_value);
+
+ switch (state.timeout_unit) {
+ case KBD_TIMEOUT_SECONDS:
+ return len + sprintf(buf+len, "s\n");
+ case KBD_TIMEOUT_MINUTES:
+ return len + sprintf(buf+len, "m\n");
+ case KBD_TIMEOUT_HOURS:
+ return len + sprintf(buf+len, "h\n");
+ case KBD_TIMEOUT_DAYS:
+ return len + sprintf(buf+len, "d\n");
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
+ kbd_led_timeout_show, kbd_led_timeout_store);
+
+static const char * const kbd_led_triggers[] = {
+ "keyboard",
+ "touchpad",
+ /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
+ "mouse",
+};
+
+static ssize_t kbd_led_triggers_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool triggers_enabled = false;
+ bool als_enabled = false;
+ bool disable_als = false;
+ bool enable_als = false;
+ int trigger_bit = -1;
+ char trigger[21];
+ int i, ret;
+
+ ret = sscanf(buf, "%20s", trigger);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (trigger[0] != '+' && trigger[0] != '-')
+ return -EINVAL;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ if (kbd_als_supported)
+ als_enabled = kbd_is_als_mode_bit(state.mode_bit);
+
+ if (kbd_triggers_supported)
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+
+ if (kbd_als_supported) {
+ if (strcmp(trigger, "+als") == 0) {
+ if (als_enabled)
+ return count;
+ enable_als = true;
+ } else if (strcmp(trigger, "-als") == 0) {
+ if (!als_enabled)
+ return count;
+ disable_als = true;
+ }
+ }
+
+ if (enable_als || disable_als) {
+ new_state = state;
+ if (enable_als) {
+ if (triggers_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
+ else
+ new_state.mode_bit = KBD_MODE_BIT_ALS;
+ } else {
+ if (triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else {
+ new_state.mode_bit = KBD_MODE_BIT_ON;
+ }
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit)))
+ return -EINVAL;
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+ kbd_previous_mode_bit = new_state.mode_bit;
+ return count;
+ }
+
+ if (kbd_triggers_supported) {
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
+ continue;
+ if (trigger[0] == '+' &&
+ triggers_enabled && (state.triggers & BIT(i)))
+ return count;
+ if (trigger[0] == '-' &&
+ (!triggers_enabled || !(state.triggers & BIT(i))))
+ return count;
+ trigger_bit = i;
+ break;
+ }
+ }
+
+ if (trigger_bit != -1) {
+ new_state = state;
+ if (trigger[0] == '+')
+ new_state.triggers |= BIT(trigger_bit);
+ else {
+ new_state.triggers &= ~BIT(trigger_bit);
+ /* NOTE: trackstick bit (2) must be disabled when
+ * disabling touchpad bit (1), otherwise touchpad
+ * bit (1) will not be disabled */
+ if (trigger_bit == 1)
+ new_state.triggers &= ~BIT(2);
+ }
+ if ((kbd_info.triggers & new_state.triggers) !=
+ new_state.triggers)
+ return -EINVAL;
+ if (new_state.triggers && !triggers_enabled) {
+ if (als_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
+ else {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ }
+ } else if (new_state.triggers == 0) {
+ if (als_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_ALS;
+ else
+ kbd_set_level(&new_state, 0);
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit)))
+ return -EINVAL;
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+ if (new_state.mode_bit != KBD_MODE_BIT_OFF)
+ kbd_previous_mode_bit = new_state.mode_bit;
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t kbd_led_triggers_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ bool triggers_enabled;
+ int level, i, ret;
+ int len = 0;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = 0;
+
+ if (kbd_triggers_supported) {
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+ level = kbd_get_level(&state);
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if ((triggers_enabled || level <= 0) &&
+ (state.triggers & BIT(i)))
+ buf[len++] = '+';
+ else
+ buf[len++] = '-';
+ len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
+ }
+ }
+
+ if (kbd_als_supported) {
+ if (kbd_is_als_mode_bit(state.mode_bit))
+ len += sprintf(buf+len, "+als ");
+ else
+ len += sprintf(buf+len, "-als ");
+ }
+
+ if (len)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
+ kbd_led_triggers_show, kbd_led_triggers_store);
+
+static ssize_t kbd_led_als_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u8 setting;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &setting);
+ if (ret)
+ return ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ new_state = state;
+ new_state.als_setting = setting;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t kbd_led_als_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ int ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", state.als_setting);
+}
+
+static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
+ kbd_led_als_show, kbd_led_als_store);
+
+static struct attribute *kbd_led_attrs[] = {
+ &dev_attr_stop_timeout.attr,
+ &dev_attr_start_triggers.attr,
+ &dev_attr_als_setting.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(kbd_led);
+
+static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
+{
+ int ret;
+ u16 num;
+ struct kbd_state state;
+
+ if (kbd_get_max_level()) {
+ ret = kbd_get_state(&state);
+ if (ret)
+ return 0;
+ ret = kbd_get_level(&state);
+ if (ret < 0)
+ return 0;
+ return ret;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ ret = kbd_get_first_active_token_bit();
+ if (ret < 0)
+ return 0;
+ for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return 0;
+ return ffs(num) - 1;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+ return 0;
+}
+
+static void kbd_led_level_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u16 num;
+
+ if (kbd_get_max_level()) {
+ if (kbd_get_state(&state))
+ return;
+ new_state = state;
+ if (kbd_set_level(&new_state, value))
+ return;
+ kbd_set_state_safe(&new_state, &state);
+ return;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ for (num = kbd_token_bits; num != 0 && value > 0; --value)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return;
+ kbd_set_token_bit(ffs(num) - 1);
+ return;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+}
+
+static struct led_classdev kbd_led = {
+ .name = "dell::kbd_backlight",
+ .brightness_set = kbd_led_level_set,
+ .brightness_get = kbd_led_level_get,
+ .groups = kbd_led_groups,
+};
+
+static int __init kbd_led_init(struct device *dev)
+{
+ kbd_init();
+ if (!kbd_led_present)
+ return -ENODEV;
+ kbd_led.max_brightness = kbd_get_max_level();
+ if (!kbd_led.max_brightness) {
+ kbd_led.max_brightness = kbd_get_valid_token_counts();
+ if (kbd_led.max_brightness)
+ kbd_led.max_brightness--;
+ }
+ return led_classdev_register(dev, &kbd_led);
+}
+
+static void brightness_set_exit(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ /* Don't change backlight level on exit */
+};
+
+static void kbd_led_exit(void)
+{
+ if (!kbd_led_present)
+ return;
+ kbd_led.brightness_set = brightness_set_exit;
+ led_classdev_unregister(&kbd_led);
+}
+
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -842,6 +1879,8 @@ static int __init dell_init(void)
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
+ kbd_led_init(&platform_device->dev);
+
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -909,6 +1948,7 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
+ kbd_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
@@ -925,5 +1965,7 @@ module_init(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
index a653716055d1..0aec4fd4c48e 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -1,5 +1,5 @@
/*
- * dell-smo8800.c - Dell Latitude ACPI SMO8800/SMO8810 freefall sensor driver
+ * dell-smo8800.c - Dell Latitude ACPI SMO88XX freefall sensor driver
*
* Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
* Copyright (C) 2014 Pali Rohár <pali.rohar@gmail.com>
@@ -209,7 +209,13 @@ static int smo8800_remove(struct acpi_device *device)
static const struct acpi_device_id smo8800_ids[] = {
{ "SMO8800", 0 },
+ { "SMO8801", 0 },
{ "SMO8810", 0 },
+ { "SMO8811", 0 },
+ { "SMO8820", 0 },
+ { "SMO8821", 0 },
+ { "SMO8830", 0 },
+ { "SMO8831", 0 },
{ "", 0 },
};
@@ -228,6 +234,6 @@ static struct acpi_driver smo8800_driver = {
module_acpi_driver(smo8800_driver);
-MODULE_DESCRIPTION("Dell Latitude freefall driver (ACPI SMO8800/SMO8810)");
+MODULE_DESCRIPTION("Dell Latitude freefall driver (ACPI SMO88XX)");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sonal Santan, Pali Rohár");
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 25721bf20092..6512a06bc053 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -65,10 +65,8 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
/* Battery health status button */
{ KE_KEY, 0xe007, { KEY_BATTERY } },
- /* This is actually for all radios. Although physically a
- * switch, the notification does not provide an indication of
- * state and so it should be reported as a key */
- { KE_KEY, 0xe008, { KEY_WLAN } },
+ /* Radio devices state change */
+ { KE_IGNORE, 0xe008, { KEY_RFKILL } },
/* The next device is at offset 6, the active devices are at
offset 8 and the attached devices at offset 10 */
@@ -145,57 +143,154 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
static struct input_dev *dell_wmi_input_dev;
+static void dell_wmi_process_key(int reported_key)
+{
+ const struct key_entry *key;
+
+ key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
+ reported_key);
+ if (!key) {
+ pr_info("Unknown key %x pressed\n", reported_key);
+ return;
+ }
+
+ pr_debug("Key %x pressed\n", reported_key);
+
+ /* Don't report brightness notifications that will also come via ACPI */
+ if ((key->keycode == KEY_BRIGHTNESSUP ||
+ key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video)
+ return;
+
+ sparse_keymap_report_entry(dell_wmi_input_dev, key, 1, true);
+}
+
static void dell_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
+ acpi_size buffer_size;
+ u16 *buffer_entry, *buffer_end;
+ int len, i;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- pr_info("bad event status 0x%x\n", status);
+ pr_warn("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
+ if (!obj) {
+ pr_warn("no response\n");
+ return;
+ }
- if (obj && obj->type == ACPI_TYPE_BUFFER) {
- const struct key_entry *key;
- int reported_key;
- u16 *buffer_entry = (u16 *)obj->buffer.pointer;
- int buffer_size = obj->buffer.length/2;
-
- if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) {
- pr_info("Received unknown WMI event (0x%x)\n",
- buffer_entry[1]);
- kfree(obj);
- return;
- }
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ pr_warn("bad response type %x\n", obj->type);
+ kfree(obj);
+ return;
+ }
- if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0))
- reported_key = (int)buffer_entry[2];
+ pr_debug("Received WMI event (%*ph)\n",
+ obj->buffer.length, obj->buffer.pointer);
+
+ buffer_entry = (u16 *)obj->buffer.pointer;
+ buffer_size = obj->buffer.length/2;
+
+ if (!dell_new_hk_type) {
+ if (buffer_size >= 3 && buffer_entry[1] == 0x0)
+ dell_wmi_process_key(buffer_entry[2]);
else if (buffer_size >= 2)
- reported_key = (int)buffer_entry[1] & 0xffff;
- else {
+ dell_wmi_process_key(buffer_entry[1]);
+ else
pr_info("Received unknown WMI event\n");
- kfree(obj);
- return;
+ kfree(obj);
+ return;
+ }
+
+ buffer_end = buffer_entry + buffer_size;
+
+ while (buffer_entry < buffer_end) {
+
+ len = buffer_entry[0];
+ if (len == 0)
+ break;
+
+ len++;
+
+ if (buffer_entry + len > buffer_end) {
+ pr_warn("Invalid length of WMI event\n");
+ break;
}
- key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
- reported_key);
- if (!key) {
- pr_info("Unknown key %x pressed\n", reported_key);
- } else if ((key->keycode == KEY_BRIGHTNESSUP ||
- key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
- /* Don't report brightness notifications that will also
- * come via ACPI */
- ;
- } else {
- sparse_keymap_report_entry(dell_wmi_input_dev, key,
- 1, true);
+ pr_debug("Process buffer (%*ph)\n", len*2, buffer_entry);
+
+ switch (buffer_entry[1]) {
+ case 0x00:
+ for (i = 2; i < len; ++i) {
+ switch (buffer_entry[i]) {
+ case 0xe043:
+ /* NIC Link is Up */
+ pr_debug("NIC Link is Up\n");
+ break;
+ case 0xe044:
+ /* NIC Link is Down */
+ pr_debug("NIC Link is Down\n");
+ break;
+ case 0xe045:
+ /* Unknown event but defined in DSDT */
+ default:
+ /* Unknown event */
+ pr_info("Unknown WMI event type 0x00: "
+ "0x%x\n", (int)buffer_entry[i]);
+ break;
+ }
+ }
+ break;
+ case 0x10:
+ /* Keys pressed */
+ for (i = 2; i < len; ++i)
+ dell_wmi_process_key(buffer_entry[i]);
+ break;
+ case 0x11:
+ for (i = 2; i < len; ++i) {
+ switch (buffer_entry[i]) {
+ case 0xfff0:
+ /* Battery unplugged */
+ pr_debug("Battery unplugged\n");
+ break;
+ case 0xfff1:
+ /* Battery inserted */
+ pr_debug("Battery inserted\n");
+ break;
+ case 0x01e1:
+ case 0x02ea:
+ case 0x02eb:
+ case 0x02ec:
+ case 0x02f6:
+ /* Keyboard backlight level changed */
+ pr_debug("Keyboard backlight level "
+ "changed\n");
+ break;
+ default:
+ /* Unknown event */
+ pr_info("Unknown WMI event type 0x11: "
+ "0x%x\n", (int)buffer_entry[i]);
+ break;
+ }
+ }
+ break;
+ default:
+ /* Unknown event */
+ pr_info("Unknown WMI event type 0x%x\n",
+ (int)buffer_entry[1]);
+ break;
}
+
+ buffer_entry += len;
+
}
+
kfree(obj);
}
@@ -213,11 +308,16 @@ static const struct key_entry * __init dell_wmi_prepare_new_keymap(void)
for (i = 0; i < hotkey_num; i++) {
const struct dell_bios_keymap_entry *bios_entry =
&dell_bios_hotkey_table->keymap[i];
- keymap[i].type = KE_KEY;
- keymap[i].code = bios_entry->scancode;
- keymap[i].keycode = bios_entry->keycode < 256 ?
+ u16 keycode = bios_entry->keycode < 256 ?
bios_to_linux_keycode[bios_entry->keycode] :
KEY_RESERVED;
+
+ if (keycode == KEY_KBDILLUMTOGGLE)
+ keymap[i].type = KE_IGNORE;
+ else
+ keymap[i].type = KE_KEY;
+ keymap[i].code = bios_entry->scancode;
+ keymap[i].keycode = keycode;
}
keymap[hotkey_num].type = KE_END;
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index db79902c4a8e..844c2096bde9 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -417,8 +417,7 @@ static ssize_t cpufv_disabled_store(struct device *dev,
switch (value) {
case 0:
if (eeepc->cpufv_disabled)
- pr_warn("cpufv enabled (not officially supported "
- "on this model)\n");
+ pr_warn("cpufv enabled (not officially supported on this model)\n");
eeepc->cpufv_disabled = false;
return count;
case 1:
@@ -580,59 +579,58 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
mutex_lock(&eeepc->hotplug_lock);
pci_lock_rescan_remove();
- if (eeepc->hotplug_slot) {
- port = acpi_get_pci_dev(handle);
- if (!port) {
- pr_warning("Unable to find port\n");
- goto out_unlock;
- }
+ if (!eeepc->hotplug_slot)
+ goto out_unlock;
- bus = port->subordinate;
+ port = acpi_get_pci_dev(handle);
+ if (!port) {
+ pr_warning("Unable to find port\n");
+ goto out_unlock;
+ }
- if (!bus) {
- pr_warn("Unable to find PCI bus 1?\n");
- goto out_put_dev;
- }
+ bus = port->subordinate;
- if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
- pr_err("Unable to read PCI config space?\n");
- goto out_put_dev;
- }
+ if (!bus) {
+ pr_warn("Unable to find PCI bus 1?\n");
+ goto out_put_dev;
+ }
+
+ if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
+ pr_err("Unable to read PCI config space?\n");
+ goto out_put_dev;
+ }
- absent = (l == 0xffffffff);
+ absent = (l == 0xffffffff);
- if (blocked != absent) {
- pr_warn("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
- blocked ? "blocked" : "unblocked",
- absent ? "absent" : "present");
- pr_warn("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ if (blocked != absent) {
+ pr_warn("BIOS says wireless lan is %s, but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warn("skipped wireless hotplug as probably inappropriate for this model\n");
+ goto out_put_dev;
+ }
+
+ if (!blocked) {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ /* Device already present */
+ pci_dev_put(dev);
goto out_put_dev;
}
-
- if (!blocked) {
- dev = pci_get_slot(bus, 0);
- if (dev) {
- /* Device already present */
- pci_dev_put(dev);
- goto out_put_dev;
- }
- dev = pci_scan_single_device(bus, 0);
- if (dev) {
- pci_bus_assign_resources(bus);
- pci_bus_add_device(dev);
- }
- } else {
- dev = pci_get_slot(bus, 0);
- if (dev) {
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
+ dev = pci_scan_single_device(bus, 0);
+ if (dev) {
+ pci_bus_assign_resources(bus);
+ pci_bus_add_device(dev);
+ }
+ } else {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
}
-out_put_dev:
- pci_dev_put(port);
}
+out_put_dev:
+ pci_dev_put(port);
out_unlock:
pci_unlock_rescan_remove();
@@ -821,11 +819,15 @@ static int eeepc_new_rfkill(struct eeepc_laptop *eeepc,
return 0;
}
+static char EEEPC_RFKILL_NODE_1[] = "\\_SB.PCI0.P0P5";
+static char EEEPC_RFKILL_NODE_2[] = "\\_SB.PCI0.P0P6";
+static char EEEPC_RFKILL_NODE_3[] = "\\_SB.PCI0.P0P7";
+
static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
{
- eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
- eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_3);
if (eeepc->wlan_rfkill) {
rfkill_unregister(eeepc->wlan_rfkill);
rfkill_destroy(eeepc->wlan_rfkill);
@@ -897,9 +899,9 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
if (result == -EBUSY)
result = 0;
- eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
- eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_3);
exit:
if (result && result != -ENODEV)
@@ -915,7 +917,7 @@ static int eeepc_hotk_thaw(struct device *device)
struct eeepc_laptop *eeepc = dev_get_drvdata(device);
if (eeepc->wlan_rfkill) {
- bool wlan;
+ int wlan;
/*
* Work around bios bug - acpi _PTS turns off the wireless led
@@ -923,7 +925,8 @@ static int eeepc_hotk_thaw(struct device *device)
* we should kick it ourselves in case hibernation is aborted.
*/
wlan = get_acpi(eeepc, CM_ASL_WLAN);
- set_acpi(eeepc, CM_ASL_WLAN, wlan);
+ if (wlan >= 0)
+ set_acpi(eeepc, CM_ASL_WLAN, wlan);
}
return 0;
@@ -935,9 +938,9 @@ static int eeepc_hotk_restore(struct device *device)
/* Refresh both wlan rfkill state and pci hotplug */
if (eeepc->wlan_rfkill) {
- eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5");
- eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6");
- eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7");
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_1);
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_2);
+ eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_3);
}
if (eeepc->bluetooth_rfkill)
@@ -961,7 +964,6 @@ static const struct dev_pm_ops eeepc_pm_ops = {
static struct platform_driver platform_driver = {
.driver = {
.name = EEEPC_LAPTOP_FILE,
- .owner = THIS_MODULE,
.pm = &eeepc_pm_ops,
}
};
@@ -978,18 +980,28 @@ static struct platform_driver platform_driver = {
#define EEEPC_EC_SFB0 0xD0
#define EEEPC_EC_FAN_CTRL (EEEPC_EC_SFB0 + 3) /* Byte containing SF25 */
+static inline int eeepc_pwm_to_lmsensors(int value)
+{
+ return value * 255 / 100;
+}
+
+static inline int eeepc_lmsensors_to_pwm(int value)
+{
+ value = clamp_val(value, 0, 255);
+ return value * 100 / 255;
+}
+
static int eeepc_get_fan_pwm(void)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_PWM, &value);
- return value * 255 / 100;
+ return eeepc_pwm_to_lmsensors(value);
}
static void eeepc_set_fan_pwm(int value)
{
- value = clamp_val(value, 0, 255);
- value = value * 100 / 255;
+ value = eeepc_lmsensors_to_pwm(value);
ec_write(EEEPC_EC_FAN_PWM, value);
}
@@ -1003,15 +1015,19 @@ static int eeepc_get_fan_rpm(void)
return high << 8 | low;
}
+#define EEEPC_EC_FAN_CTRL_BIT 0x02
+#define EEEPC_FAN_CTRL_MANUAL 1
+#define EEEPC_FAN_CTRL_AUTO 2
+
static int eeepc_get_fan_ctrl(void)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_CTRL, &value);
- if (value & 0x02)
- return 1; /* manual */
+ if (value & EEEPC_EC_FAN_CTRL_BIT)
+ return EEEPC_FAN_CTRL_MANUAL;
else
- return 2; /* automatic */
+ return EEEPC_FAN_CTRL_AUTO;
}
static void eeepc_set_fan_ctrl(int manual)
@@ -1019,10 +1035,10 @@ static void eeepc_set_fan_ctrl(int manual)
u8 value = 0;
ec_read(EEEPC_EC_FAN_CTRL, &value);
- if (manual == 1)
- value |= 0x02;
+ if (manual == EEEPC_FAN_CTRL_MANUAL)
+ value |= EEEPC_EC_FAN_CTRL_BIT;
else
- value &= ~0x02;
+ value &= ~EEEPC_EC_FAN_CTRL_BIT;
ec_write(EEEPC_EC_FAN_CTRL, value);
}
@@ -1157,8 +1173,7 @@ static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
static void eeepc_backlight_exit(struct eeepc_laptop *eeepc)
{
- if (eeepc->backlight_device)
- backlight_device_unregister(eeepc->backlight_device);
+ backlight_device_unregister(eeepc->backlight_device);
eeepc->backlight_device = NULL;
}
@@ -1217,7 +1232,7 @@ static void eeepc_input_exit(struct eeepc_laptop *eeepc)
static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
{
if (!eeepc->inputdev)
- return ;
+ return;
if (!sparse_keymap_report_event(eeepc->inputdev, event, 1, true))
pr_info("Unknown key %x pressed\n", event);
}
@@ -1225,6 +1240,7 @@ static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
{
struct eeepc_laptop *eeepc = acpi_driver_data(device);
+ int old_brightness, new_brightness;
u16 count;
if (event > ACPI_MAX_SYS_NOTIFY)
@@ -1235,34 +1251,32 @@ static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
count);
/* Brightness events are special */
- if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) {
-
- /* Ignore them completely if the acpi video driver is used */
- if (eeepc->backlight_device != NULL) {
- int old_brightness, new_brightness;
-
- /* Update the backlight device. */
- old_brightness = eeepc_backlight_notify(eeepc);
-
- /* Convert event to keypress (obsolescent hack) */
- new_brightness = event - NOTIFY_BRN_MIN;
-
- if (new_brightness < old_brightness) {
- event = NOTIFY_BRN_MIN; /* brightness down */
- } else if (new_brightness > old_brightness) {
- event = NOTIFY_BRN_MAX; /* brightness up */
- } else {
- /*
- * no change in brightness - already at min/max,
- * event will be desired value (or else ignored)
- */
- }
- eeepc_input_notify(eeepc, event);
- }
- } else {
- /* Everything else is a bona-fide keypress event */
+ if (event < NOTIFY_BRN_MIN || event > NOTIFY_BRN_MAX) {
eeepc_input_notify(eeepc, event);
+ return;
+ }
+
+ /* Ignore them completely if the acpi video driver is used */
+ if (!eeepc->backlight_device)
+ return;
+
+ /* Update the backlight device. */
+ old_brightness = eeepc_backlight_notify(eeepc);
+
+ /* Convert event to keypress (obsolescent hack) */
+ new_brightness = event - NOTIFY_BRN_MIN;
+
+ if (new_brightness < old_brightness) {
+ event = NOTIFY_BRN_MIN; /* brightness down */
+ } else if (new_brightness > old_brightness) {
+ event = NOTIFY_BRN_MAX; /* brightness up */
+ } else {
+ /*
+ * no change in brightness - already at min/max,
+ * event will be desired value (or else ignored)
+ */
}
+ eeepc_input_notify(eeepc, event);
}
static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
@@ -1294,8 +1308,8 @@ static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
*/
if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
eeepc->cpufv_disabled = true;
- pr_info("model %s does not officially support setting cpu "
- "speed\n", model);
+ pr_info("model %s does not officially support setting cpu speed\n",
+ model);
pr_info("cpufv disabled to avoid instability\n");
}
@@ -1321,8 +1335,8 @@ static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name)
Check if cm_getv[cm] works and, if yes, assume cm should be set. */
if (!(eeepc->cm_supported & (1 << cm))
&& !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) {
- pr_info("%s (%x) not reported by BIOS,"
- " enabling anyway\n", name, 1 << cm);
+ pr_info("%s (%x) not reported by BIOS, enabling anyway\n",
+ name, 1 << cm);
eeepc->cm_supported |= 1 << cm;
}
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 2655d4a988f3..7c21c1c44dfa 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -559,7 +559,6 @@ static struct attribute_group fujitsupf_attribute_group = {
static struct platform_driver fujitsupf_driver = {
.driver = {
.name = "fujitsu-laptop",
- .owner = THIS_MODULE,
}
};
@@ -1154,8 +1153,7 @@ fail_hotkey1:
fail_hotkey:
platform_driver_unregister(&fujitsupf_driver);
fail_backlight:
- if (fujitsu->bl_device)
- backlight_device_unregister(fujitsu->bl_device);
+ backlight_device_unregister(fujitsu->bl_device);
fail_sysfs_group:
sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
&fujitsupf_attribute_group);
@@ -1179,8 +1177,7 @@ static void __exit fujitsu_cleanup(void)
platform_driver_unregister(&fujitsupf_driver);
- if (fujitsu->bl_device)
- backlight_device_unregister(fujitsu->bl_device);
+ backlight_device_unregister(fujitsu->bl_device);
sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
&fujitsupf_attribute_group);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 777c7e3dda51..458e6c948c11 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -318,7 +318,6 @@ static struct platform_driver hdaps_driver = {
.probe = hdaps_probe,
.driver = {
.name = "hdaps",
- .owner = THIS_MODULE,
.pm = &hdaps_pm,
},
};
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index 415348fc1210..4e4cc8bd7557 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -85,6 +85,9 @@ static int hpwl_add(struct acpi_device *device)
int err;
err = hp_wireless_input_setup();
+ if (err)
+ pr_err("Failed to setup hp wireless hotkeys\n");
+
return err;
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 4c559640dcba..0ab2b377a778 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -1006,7 +1006,6 @@ static const struct dev_pm_ops hp_wmi_pm_ops = {
static struct platform_driver hp_wmi_driver = {
.driver = {
.name = "hp-wmi",
- .owner = THIS_MODULE,
.pm = &hp_wmi_pm_ops,
},
.remove = __exit_p(hp_wmi_bios_remove),
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 6bec745b6b92..10ce6cba4455 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -246,6 +246,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
+ AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ed494f37c40f..b3d419a84723 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -729,8 +729,7 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
static void ideapad_backlight_exit(struct ideapad_private *priv)
{
- if (priv->blightdev)
- backlight_device_unregister(priv->blightdev);
+ backlight_device_unregister(priv->blightdev);
priv->blightdev = NULL;
}
@@ -966,7 +965,6 @@ static struct platform_driver ideapad_acpi_driver = {
.remove = ideapad_acpi_remove,
.driver = {
.name = "ideapad_acpi",
- .owner = THIS_MODULE,
.pm = &ideapad_pm,
.acpi_match_table = ACPI_PTR(ideapad_device_ids),
},
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index c0242ed13d9e..e2065e06a3f3 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -33,7 +33,7 @@
* performance by allocating more power or thermal budget to the CPU or GPU
* based on available headroom and activity.
*
- * The basic algorithm is driven by a 5s moving average of tempurature. If
+ * The basic algorithm is driven by a 5s moving average of temperature. If
* thermal headroom is available, the CPU and/or GPU power clamps may be
* adjusted upwards. If we hit the thermal ceiling or a thermal trigger,
* we scale back the clamp. Aside from trigger events (when we're critically
@@ -593,7 +593,7 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips)
return;
if (!ips->gpu_turbo_disable())
- dev_err(&ips->dev->dev, "failed to disable graphis turbo\n");
+ dev_err(&ips->dev->dev, "failed to disable graphics turbo\n");
else
ips->__gpu_turbo_on = false;
}
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 8d6775266d66..22606d6b2af3 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -133,7 +133,6 @@ static int mfld_pb_remove(struct platform_device *pdev)
static struct platform_driver mfld_pb_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = mfld_pb_probe,
.remove = mfld_pb_remove,
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index ab7860a21a22..0944e834af8d 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -555,7 +555,6 @@ static const struct platform_device_id therm_id_table[] = {
static struct platform_driver mid_thermal_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &mid_thermal_pm,
},
.probe = mid_thermal_probe,
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index 4bc960416785..a4a4258f6134 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -271,8 +271,7 @@ static int oaktrail_backlight_init(void)
static void oaktrail_backlight_exit(void)
{
- if (oaktrail_bl_device)
- backlight_device_unregister(oaktrail_bl_device);
+ backlight_device_unregister(oaktrail_bl_device);
}
static int oaktrail_probe(struct platform_device *pdev)
@@ -288,7 +287,6 @@ static int oaktrail_remove(struct platform_device *pdev)
static struct platform_driver oaktrail_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = oaktrail_probe,
.remove = oaktrail_remove,
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 04fed00b88e9..709f0afdafa8 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -314,7 +314,6 @@ err2:
static struct platform_driver platform_pmic_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = platform_pmic_gpio_probe,
};
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 62f8030b9e77..085987730aab 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -573,7 +573,6 @@ static struct attribute_group msipf_old_attribute_group = {
static struct platform_driver msipf_driver = {
.driver = {
.name = "msi-laptop-pf",
- .owner = THIS_MODULE,
.pm = &msi_laptop_pm,
},
};
@@ -821,7 +820,7 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
{
static bool extended;
- if (str & 0x20)
+ if (str & I8042_STR_AUXDATA)
return false;
/* 0x54 wwan, 0x62 bluetooth, 0x76 wlan, 0xE4 touchpad toggle*/
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 70222f265f68..6d2bac0c463c 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -354,8 +354,7 @@ static void __exit msi_wmi_exit(void)
sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
}
- if (backlight)
- backlight_device_unregister(backlight);
+ backlight_device_unregister(backlight);
}
module_init(msi_wmi_init);
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index 28d12bda3ac1..e6aac725a0af 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -82,7 +82,6 @@ static int samsungq10_remove(struct platform_device *pdev)
static struct platform_driver samsungq10_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
.probe = samsungq10_probe,
.remove = samsungq10_remove,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 26ad9ff12ac5..6dd1c0e7dcd9 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -581,7 +581,6 @@ static atomic_t sony_pf_users = ATOMIC_INIT(0);
static struct platform_driver sony_pf_driver = {
.driver = {
.name = "sony-laptop",
- .owner = THIS_MODULE,
}
};
static struct platform_device *sony_pf_device;
@@ -3141,8 +3140,7 @@ static void sony_nc_backlight_setup(void)
static void sony_nc_backlight_cleanup(void)
{
- if (sony_bl_props.dev)
- backlight_device_unregister(sony_bl_props.dev);
+ backlight_device_unregister(sony_bl_props.dev);
}
static int sony_nc_add(struct acpi_device *device)
@@ -3717,8 +3715,7 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
dev->event_types = type2_events;
out:
- if (pcidev)
- pci_dev_put(pcidev);
+ pci_dev_put(pcidev);
pr_info("detected Type%d model\n",
dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 6a6ea28a7e51..e36542564131 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -234,7 +234,6 @@ static const struct dev_pm_ops tc1100_pm_ops = {
static struct platform_driver tc1100_driver = {
.driver = {
.name = "tc1100-wmi",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &tc1100_pm_ops,
#endif
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index cf0f89364d44..c3d11fabc46f 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -972,7 +972,6 @@ static void tpacpi_shutdown_handler(struct platform_device *pdev)
static struct platform_driver tpacpi_pdriver = {
.driver = {
.name = TPACPI_DRVR_NAME,
- .owner = THIS_MODULE,
.pm = &tpacpi_pm,
},
.shutdown = tpacpi_shutdown_handler,
@@ -981,7 +980,6 @@ static struct platform_driver tpacpi_pdriver = {
static struct platform_driver tpacpi_hwmon_pdriver = {
.driver = {
.name = TPACPI_HWMON_DRVR_NAME,
- .owner = THIS_MODULE,
},
};
@@ -6559,6 +6557,17 @@ static struct ibm_struct brightness_driver_data = {
* bits 3-0 (volume). Other bits in NVRAM may have other functions,
* such as bit 7 which is used to detect repeated presses of MUTE,
* and we leave them unchanged.
+ *
+ * On newer Lenovo ThinkPads, the EC can automatically change the volume
+ * in response to user input. Unfortunately, this rarely works well.
+ * The laptop changes the state of its internal MUTE gate and, on some
+ * models, sends KEY_MUTE, causing any user code that responds to the
+ * mute button to get confused. The hardware MUTE gate is also
+ * unnecessary, since user code can handle the mute button without
+ * kernel or EC help.
+ *
+ * To avoid confusing userspace, we simply disable all EC-based mute
+ * and volume controls when possible.
*/
#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
@@ -6613,11 +6622,21 @@ enum tpacpi_volume_capabilities {
TPACPI_VOL_CAP_MAX
};
+enum tpacpi_mute_btn_mode {
+ TP_EC_MUTE_BTN_LATCH = 0, /* Mute mutes; up/down unmutes */
+ /* We don't know what mode 1 is. */
+ TP_EC_MUTE_BTN_NONE = 2, /* Mute and up/down are just keys */
+ TP_EC_MUTE_BTN_TOGGLE = 3, /* Mute toggles; up/down unmutes */
+};
+
static enum tpacpi_volume_access_mode volume_mode =
TPACPI_VOL_MODE_MAX;
static enum tpacpi_volume_capabilities volume_capabilities;
static bool volume_control_allowed;
+static bool software_mute_requested = true;
+static bool software_mute_active;
+static int software_mute_orig_mode;
/*
* Used to syncronize writers to TP_EC_AUDIO and
@@ -6635,6 +6654,8 @@ static void tpacpi_volume_checkpoint_nvram(void)
return;
if (!volume_control_allowed)
return;
+ if (software_mute_active)
+ return;
vdbg_printk(TPACPI_DBG_MIXER,
"trying to checkpoint mixer state to NVRAM...\n");
@@ -6696,6 +6717,12 @@ static int volume_set_status_ec(const u8 status)
dbg_printk(TPACPI_DBG_MIXER, "set EC mixer to 0x%02x\n", status);
+ /*
+ * On X200s, and possibly on others, it can take a while for
+ * reads to become correct.
+ */
+ msleep(1);
+
return 0;
}
@@ -6778,6 +6805,57 @@ unlock:
return rc;
}
+static int volume_set_software_mute(bool startup)
+{
+ int result;
+
+ if (!tpacpi_is_lenovo())
+ return -ENODEV;
+
+ if (startup) {
+ if (!acpi_evalf(ec_handle, &software_mute_orig_mode,
+ "HAUM", "qd"))
+ return -EIO;
+
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "Initial HAUM setting was %d\n",
+ software_mute_orig_mode);
+ }
+
+ if (!acpi_evalf(ec_handle, &result, "SAUM", "qdd",
+ (int)TP_EC_MUTE_BTN_NONE))
+ return -EIO;
+
+ if (result != TP_EC_MUTE_BTN_NONE)
+ pr_warn("Unexpected SAUM result %d\n",
+ result);
+
+ /*
+ * In software mute mode, the standard codec controls take
+ * precendence, so we unmute the ThinkPad HW switch at
+ * startup. Just on case there are SAUM-capable ThinkPads
+ * with level controls, set max HW volume as well.
+ */
+ if (tp_features.mixer_no_level_control)
+ result = volume_set_mute(false);
+ else
+ result = volume_set_status(TP_EC_VOLUME_MAX);
+
+ if (result != 0)
+ pr_warn("Failed to unmute the HW mute switch\n");
+
+ return 0;
+}
+
+static void volume_exit_software_mute(void)
+{
+ int r;
+
+ if (!acpi_evalf(ec_handle, &r, "SAUM", "qdd", software_mute_orig_mode)
+ || r != software_mute_orig_mode)
+ pr_warn("Failed to restore mute mode\n");
+}
+
static int volume_alsa_set_volume(const u8 vol)
{
dbg_printk(TPACPI_DBG_MIXER,
@@ -6885,7 +6963,12 @@ static void volume_suspend(void)
static void volume_resume(void)
{
- volume_alsa_notify_change();
+ if (software_mute_active) {
+ if (volume_set_software_mute(false) < 0)
+ pr_warn("Failed to restore software mute\n");
+ } else {
+ volume_alsa_notify_change();
+ }
}
static void volume_shutdown(void)
@@ -6901,6 +6984,9 @@ static void volume_exit(void)
}
tpacpi_volume_checkpoint_nvram();
+
+ if (software_mute_active)
+ volume_exit_software_mute();
}
static int __init volume_create_alsa_mixer(void)
@@ -7085,16 +7171,20 @@ static int __init volume_init(struct ibm_init_struct *iibm)
"mute is supported, volume control is %s\n",
str_supported(!tp_features.mixer_no_level_control));
- rc = volume_create_alsa_mixer();
- if (rc) {
- pr_err("Could not create the ALSA mixer interface\n");
- return rc;
- }
+ if (software_mute_requested && volume_set_software_mute(true) == 0) {
+ software_mute_active = true;
+ } else {
+ rc = volume_create_alsa_mixer();
+ if (rc) {
+ pr_err("Could not create the ALSA mixer interface\n");
+ return rc;
+ }
- pr_info("Console audio control enabled, mode: %s\n",
- (volume_control_allowed) ?
- "override (read/write)" :
- "monitor (read only)");
+ pr_info("Console audio control enabled, mode: %s\n",
+ (volume_control_allowed) ?
+ "override (read/write)" :
+ "monitor (read only)");
+ }
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
"registering volume hotkeys as change notification\n");
@@ -9091,6 +9181,10 @@ MODULE_PARM_DESC(volume_control,
"Enables software override for the console audio "
"control when true");
+module_param_named(software_mute, software_mute_requested, bool, 0444);
+MODULE_PARM_DESC(software_mute,
+ "Request full software mute control");
+
/* ALSA module API parameters */
module_param_named(index, alsa_index, int, 0444);
MODULE_PARM_DESC(index, "ALSA index for the ACPI EC Mixer");
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ab6151f05420..fc34a71866ed 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -186,6 +186,7 @@ static struct toshiba_acpi_dev *toshiba_acpi;
static const struct acpi_device_id toshiba_device_ids[] = {
{"TOS6200", 0},
+ {"TOS6207", 0},
{"TOS6208", 0},
{"TOS1900", 0},
{"", 0},
@@ -928,9 +929,7 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
{
- u32 in[TCI_WORDS] = { HCI_SET, HCI_LCD_BRIGHTNESS, 0, 0, 0, 0 };
- u32 out[TCI_WORDS];
- acpi_status status;
+ u32 hci_result;
if (dev->tr_backlight_supported) {
bool enable = !value;
@@ -941,20 +940,9 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
value--;
}
- in[2] = value << HCI_LCD_BRIGHTNESS_SHIFT;
- status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
- pr_err("ACPI call to set brightness failed");
- return -EIO;
- }
- /* Extra check for "incomplete" backlight method, where the AML code
- * doesn't check for HCI_SET or HCI_GET and returns TOS_SUCCESS,
- * the actual brightness, and in some cases the max brightness.
- */
- if (out[2] > 0 || out[3] == 0xE000)
- return -ENODEV;
-
- return out[0] == TOS_SUCCESS ? 0 : -EIO;
+ value = value << HCI_LCD_BRIGHTNESS_SHIFT;
+ hci_result = hci_write1(dev, HCI_LCD_BRIGHTNESS, value);
+ return hci_result == TOS_SUCCESS ? 0 : -EIO;
}
static int set_lcd_status(struct backlight_device *bd)
@@ -1406,12 +1394,6 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
if (ret)
return ret;
- /* Update sysfs entries on successful mode change*/
- ret = sysfs_update_group(&toshiba->acpi_dev->dev.kobj,
- &toshiba_attr_group);
- if (ret)
- return ret;
-
toshiba->kbd_mode = mode;
}
@@ -1586,10 +1568,32 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
return exists ? attr->mode : 0;
}
+/*
+ * Hotkeys
+ */
+static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
+{
+ acpi_status status;
+ u32 result;
+
+ status = acpi_evaluate_object(dev->acpi_dev->handle,
+ "ENAB", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
+ if (result == TOS_FAILURE)
+ return -EIO;
+ else if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return 0;
+}
+
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
- if (str & 0x20)
+ if (str & I8042_STR_AUXDATA)
return false;
if (unlikely(data == 0xe0))
@@ -1648,9 +1652,45 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
pr_info("Unknown key %x\n", scancode);
}
+static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
+{
+ u32 hci_result, value;
+ int retries = 3;
+ int scancode;
+
+ if (dev->info_supported) {
+ scancode = toshiba_acpi_query_hotkey(dev);
+ if (scancode < 0)
+ pr_err("Failed to query hotkey event\n");
+ else if (scancode != 0)
+ toshiba_acpi_report_hotkey(dev, scancode);
+ } else if (dev->system_event_supported) {
+ do {
+ hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value);
+ switch (hci_result) {
+ case TOS_SUCCESS:
+ toshiba_acpi_report_hotkey(dev, (int)value);
+ break;
+ case TOS_NOT_SUPPORTED:
+ /*
+ * This is a workaround for an unresolved
+ * issue on some machines where system events
+ * sporadically become disabled.
+ */
+ hci_result =
+ hci_write1(dev, HCI_SYSTEM_EVENT, 1);
+ pr_notice("Re-enabled hotkeys\n");
+ /* fall through */
+ default:
+ retries--;
+ break;
+ }
+ } while (retries && hci_result != TOS_FIFO_EMPTY);
+ }
+}
+
static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
- acpi_status status;
acpi_handle ec_handle;
int error;
u32 hci_result;
@@ -1677,7 +1717,6 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
* supported, so if it's present set up an i8042 key filter
* for this purpose.
*/
- status = AE_ERROR;
ec_handle = ec_get_handle();
if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
@@ -1708,10 +1747,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
goto err_remove_filter;
}
- status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB", NULL, NULL);
- if (ACPI_FAILURE(status)) {
+ error = toshiba_acpi_enable_hotkeys(dev);
+ if (error) {
pr_info("Unable to enable hotkeys\n");
- error = -ENODEV;
goto err_remove_filter;
}
@@ -1721,7 +1759,6 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
goto err_remove_filter;
}
- hci_result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
return 0;
err_remove_filter:
@@ -1810,8 +1847,7 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
rfkill_destroy(dev->bt_rfk);
}
- if (dev->backlight_dev)
- backlight_device_unregister(dev->backlight_dev);
+ backlight_device_unregister(dev->backlight_dev);
if (dev->illumination_supported)
led_classdev_unregister(&dev->led_dev);
@@ -1967,41 +2003,29 @@ error:
static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
- u32 hci_result, value;
- int retries = 3;
- int scancode;
-
- if (event != 0x80)
- return;
+ int ret;
- if (dev->info_supported) {
- scancode = toshiba_acpi_query_hotkey(dev);
- if (scancode < 0)
- pr_err("Failed to query hotkey event\n");
- else if (scancode != 0)
- toshiba_acpi_report_hotkey(dev, scancode);
- } else if (dev->system_event_supported) {
- do {
- hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value);
- switch (hci_result) {
- case TOS_SUCCESS:
- toshiba_acpi_report_hotkey(dev, (int)value);
- break;
- case TOS_NOT_SUPPORTED:
- /*
- * This is a workaround for an unresolved
- * issue on some machines where system events
- * sporadically become disabled.
- */
- hci_result =
- hci_write1(dev, HCI_SYSTEM_EVENT, 1);
- pr_notice("Re-enabled hotkeys\n");
- /* fall through */
- default:
- retries--;
- break;
- }
- } while (retries && hci_result != TOS_FIFO_EMPTY);
+ switch (event) {
+ case 0x80: /* Hotkeys and some system events */
+ toshiba_acpi_process_hotkeys(dev);
+ break;
+ case 0x92: /* Keyboard backlight mode changed */
+ /* Update sysfs entries */
+ ret = sysfs_update_group(&acpi_dev->dev.kobj,
+ &toshiba_attr_group);
+ if (ret)
+ pr_err("Unable to update sysfs entries\n");
+ break;
+ case 0x81: /* Unknown */
+ case 0x82: /* Unknown */
+ case 0x83: /* Unknown */
+ case 0x8c: /* Unknown */
+ case 0x8e: /* Unknown */
+ case 0x8f: /* Unknown */
+ case 0x90: /* Unknown */
+ default:
+ pr_info("Unknown event received %x\n", event);
+ break;
}
}
@@ -2020,16 +2044,12 @@ static int toshiba_acpi_suspend(struct device *device)
static int toshiba_acpi_resume(struct device *device)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
- u32 result;
- acpi_status status;
+ int error;
if (dev->hotkey_dev) {
- status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB",
- NULL, NULL);
- if (ACPI_FAILURE(status))
+ error = toshiba_acpi_enable_hotkeys(dev);
+ if (error)
pr_info("Unable to re-enable hotkeys\n");
-
- result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
}
return 0;
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index 4bd17248dfc6..e46fa9cebc7d 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -71,7 +71,6 @@ static int xo1_rfkill_remove(struct platform_device *pdev)
static struct platform_driver xo1_rfkill_driver = {
.driver = {
.name = "xo1-rfkill",
- .owner = THIS_MODULE,
},
.probe = xo1_rfkill_probe,
.remove = xo1_rfkill_remove,
diff --git a/drivers/power/88pm860x_battery.c b/drivers/power/88pm860x_battery.c
index dfcda3a49403..bd3c997f4fee 100644
--- a/drivers/power/88pm860x_battery.c
+++ b/drivers/power/88pm860x_battery.c
@@ -1023,7 +1023,6 @@ static SIMPLE_DEV_PM_OPS(pm860x_battery_pm_ops,
static struct platform_driver pm860x_battery_driver = {
.driver = {
.name = "88pm860x-battery",
- .owner = THIS_MODULE,
.pm = &pm860x_battery_pm_ops,
},
.probe = pm860x_battery_probe,
diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
index de029bbc1cc1..650930e4fa79 100644
--- a/drivers/power/88pm860x_charger.c
+++ b/drivers/power/88pm860x_charger.c
@@ -732,7 +732,6 @@ static int pm860x_charger_remove(struct platform_device *pdev)
static struct platform_driver pm860x_charger_driver = {
.driver = {
.name = "88pm860x-charger",
- .owner = THIS_MODULE,
},
.probe = pm860x_charger_probe,
.remove = pm860x_charger_remove,
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index 7f9a4547dccd..4ebf7b0819f7 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -1195,7 +1195,6 @@ static struct platform_driver ab8500_btemp_driver = {
.resume = ab8500_btemp_resume,
.driver = {
.name = "ab8500-btemp",
- .owner = THIS_MODULE,
.of_match_table = ab8500_btemp_match,
},
};
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index 19110aa613a1..8c8d170ff0f8 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -3733,7 +3733,6 @@ static struct platform_driver ab8500_charger_driver = {
.resume = ab8500_charger_resume,
.driver = {
.name = "ab8500-charger",
- .owner = THIS_MODULE,
.of_match_table = ab8500_charger_match,
},
};
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 99a78d365ceb..69b80bcaa9e7 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -3281,7 +3281,6 @@ static struct platform_driver ab8500_fg_driver = {
.resume = ab8500_fg_resume,
.driver = {
.name = "ab8500-fg",
- .owner = THIS_MODULE,
.of_match_table = ab8500_fg_match,
},
};
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
index 6d2723664a01..ab54b8dea670 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/abx500_chargalg.c
@@ -2156,7 +2156,6 @@ static struct platform_driver abx500_chargalg_driver = {
.resume = abx500_chargalg_resume,
.driver = {
.name = "ab8500-chargalg",
- .owner = THIS_MODULE,
.of_match_table = ab8500_chargalg_match,
},
};
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index e3bacfe3bcd0..a78ac201828e 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -1014,7 +1014,6 @@ static struct platform_driver bq27000_battery_driver = {
.remove = bq27000_battery_remove,
.driver = {
.name = "bq27000-battery",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index ef8094a61f1e..649052e1f2d9 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -2086,7 +2086,6 @@ static const struct dev_pm_ops charger_manager_pm = {
static struct platform_driver charger_manager_driver = {
.driver = {
.name = "charger-manager",
- .owner = THIS_MODULE,
.pm = &charger_manager_pm,
.of_match_table = charger_manager_match,
},
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index ae6c41835ee6..78cd5d66144b 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -579,7 +579,6 @@ static int da9030_battery_remove(struct platform_device *dev)
static struct platform_driver da903x_battery_driver = {
.driver = {
.name = "da903x-battery",
- .owner = THIS_MODULE,
},
.probe = da9030_battery_probe,
.remove = da9030_battery_remove,
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
index f8f4c0f7c17d..d17250f745c2 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/da9052-battery.c
@@ -656,7 +656,6 @@ static struct platform_driver da9052_bat_driver = {
.remove = da9052_bat_remove,
.driver = {
.name = "da9052-bat",
- .owner = THIS_MODULE,
},
};
module_platform_driver(da9052_bat_driver);
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 041f9b638d28..39694883d3bf 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -351,13 +351,9 @@ static int ds278x_resume(struct device *dev)
schedule_delayed_work(&info->bat_work, DS278x_DELAY);
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(ds278x_battery_pm_ops, ds278x_suspend, ds278x_resume);
-#define DS278X_BATTERY_PM_OPS (&ds278x_battery_pm_ops)
-
-#else
-#define DS278X_BATTERY_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
enum ds278x_num_id {
DS2782 = 0,
@@ -460,7 +456,7 @@ MODULE_DEVICE_TABLE(i2c, ds278x_id);
static struct i2c_driver ds278x_battery_driver = {
.driver = {
.name = "ds2782-battery",
- .pm = DS278X_BATTERY_PM_OPS,
+ .pm = &ds278x_battery_pm_ops,
},
.probe = ds278x_battery_probe,
.remove = ds278x_battery_remove,
diff --git a/drivers/power/generic-adc-battery.c b/drivers/power/generic-adc-battery.c
index 59a1421f9288..d72733e4f93a 100644
--- a/drivers/power/generic-adc-battery.c
+++ b/drivers/power/generic-adc-battery.c
@@ -414,7 +414,6 @@ static const struct dev_pm_ops gab_pm_ops = {
static struct platform_driver gab_driver = {
.driver = {
.name = "generic-adc-battery",
- .owner = THIS_MODULE,
.pm = GAB_PM_OPS
},
.probe = gab_probe,
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 7536933d0ab9..aef74bdf7ab3 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -22,6 +22,8 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/power/gpio-charger.h>
@@ -69,6 +71,59 @@ static enum power_supply_property gpio_charger_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
};
+static
+struct gpio_charger_platform_data *gpio_charger_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct gpio_charger_platform_data *pdata;
+ const char *chargetype;
+ enum of_gpio_flags flags;
+ int ret;
+
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->name = np->name;
+
+ pdata->gpio = of_get_gpio_flags(np, 0, &flags);
+ if (pdata->gpio < 0) {
+ if (pdata->gpio != -EPROBE_DEFER)
+ dev_err(dev, "could not get charger gpio\n");
+ return ERR_PTR(pdata->gpio);
+ }
+
+ pdata->gpio_active_low = !!(flags & OF_GPIO_ACTIVE_LOW);
+
+ pdata->type = POWER_SUPPLY_TYPE_UNKNOWN;
+ ret = of_property_read_string(np, "charger-type", &chargetype);
+ if (ret >= 0) {
+ if (!strncmp("unknown", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_UNKNOWN;
+ else if (!strncmp("battery", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_BATTERY;
+ else if (!strncmp("ups", chargetype, 3))
+ pdata->type = POWER_SUPPLY_TYPE_UPS;
+ else if (!strncmp("mains", chargetype, 5))
+ pdata->type = POWER_SUPPLY_TYPE_MAINS;
+ else if (!strncmp("usb-sdp", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_USB;
+ else if (!strncmp("usb-dcp", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_USB_DCP;
+ else if (!strncmp("usb-cdp", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_USB_CDP;
+ else if (!strncmp("usb-aca", chargetype, 7))
+ pdata->type = POWER_SUPPLY_TYPE_USB_ACA;
+ else
+ dev_warn(dev, "unknown charger type %s\n", chargetype);
+ }
+
+ return pdata;
+}
+
static int gpio_charger_probe(struct platform_device *pdev)
{
const struct gpio_charger_platform_data *pdata = pdev->dev.platform_data;
@@ -78,8 +133,13 @@ static int gpio_charger_probe(struct platform_device *pdev)
int irq;
if (!pdata) {
- dev_err(&pdev->dev, "No platform data\n");
- return -EINVAL;
+ pdata = gpio_charger_parse_dt(&pdev->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "No platform data\n");
+ return ret;
+ }
}
if (!gpio_is_valid(pdata->gpio)) {
@@ -103,6 +163,7 @@ static int gpio_charger_probe(struct platform_device *pdev)
charger->get_property = gpio_charger_get_property;
charger->supplied_to = pdata->supplied_to;
charger->num_supplicants = pdata->num_supplicants;
+ charger->of_node = pdev->dev.of_node;
ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
if (ret) {
@@ -189,13 +250,19 @@ static int gpio_charger_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops,
gpio_charger_suspend, gpio_charger_resume);
+static const struct of_device_id gpio_charger_match[] = {
+ { .compatible = "gpio-charger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_charger_match);
+
static struct platform_driver gpio_charger_driver = {
.probe = gpio_charger_probe,
.remove = gpio_charger_remove,
.driver = {
.name = "gpio-charger",
- .owner = THIS_MODULE,
.pm = &gpio_charger_pm_ops,
+ .of_match_table = gpio_charger_match,
},
};
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index 4520811168ad..de3f39e6fa8e 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -773,7 +773,6 @@ static int platform_pmic_battery_remove(struct platform_device *pdev)
static struct platform_driver platform_pmic_battery_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = platform_pmic_battery_probe,
.remove = platform_pmic_battery_remove,
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 6c8931d4ad62..9cd391d61819 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -406,7 +406,6 @@ static struct platform_driver jz_battery_driver = {
.remove = jz_battery_remove,
.driver = {
.name = "jz4740-battery",
- .owner = THIS_MODULE,
.pm = JZ_BATTERY_PM_OPS,
},
};
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
index ed49b50b220b..21fc233c7d61 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/lp8788-charger.c
@@ -740,7 +740,6 @@ static struct platform_driver lp8788_charger_driver = {
.remove = lp8788_charger_remove,
.driver = {
.name = LP8788_DEV_CHARGER,
- .owner = THIS_MODULE,
},
};
module_platform_driver(lp8788_charger_driver);
diff --git a/drivers/power/max14577_charger.c b/drivers/power/max14577_charger.c
index 0a2bc7277026..ef4103ee6021 100644
--- a/drivers/power/max14577_charger.c
+++ b/drivers/power/max14577_charger.c
@@ -599,7 +599,6 @@ MODULE_DEVICE_TABLE(platform, max14577_charger_id);
static struct platform_driver max14577_charger_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "max14577-charger",
},
.probe = max14577_charger_probe,
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 08f0d7909b6b..99e3cdcd3e11 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -368,7 +368,6 @@ static struct platform_driver max8903_driver = {
.remove = max8903_remove,
.driver = {
.name = "max8903-charger",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
index 4bdedfed936d..aefa0c9a3007 100644
--- a/drivers/power/max8997_charger.c
+++ b/drivers/power/max8997_charger.c
@@ -181,7 +181,6 @@ static const struct platform_device_id max8997_battery_id[] = {
static struct platform_driver max8997_battery_driver = {
.driver = {
.name = "max8997-battery",
- .owner = THIS_MODULE,
},
.probe = max8997_battery_probe,
.remove = max8997_battery_remove,
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
index 5017470c2fc9..08694c7a9f38 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/max8998_charger.c
@@ -195,7 +195,6 @@ static const struct platform_device_id max8998_battery_id[] = {
static struct platform_driver max8998_battery_driver = {
.driver = {
.name = "max8998-battery",
- .owner = THIS_MODULE,
},
.probe = max8998_battery_probe,
.remove = max8998_battery_remove,
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 1ec810ada5ed..ad9cde705de1 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -677,7 +677,6 @@ MODULE_DEVICE_TABLE(of, olpc_battery_ids);
static struct platform_driver olpc_battery_driver = {
.driver = {
.name = "olpc-battery",
- .owner = THIS_MODULE,
.of_match_table = olpc_battery_ids,
},
.probe = olpc_battery_probe,
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index 684971199bd3..60d0295fffb1 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -82,7 +82,6 @@ static int as3722_poweroff_remove(struct platform_device *pdev)
static struct platform_driver as3722_poweroff_driver = {
.driver = {
.name = "as3722-power-off",
- .owner = THIS_MODULE,
},
.probe = as3722_poweroff_probe,
.remove = as3722_poweroff_remove,
diff --git a/drivers/power/reset/axxia-reset.c b/drivers/power/reset/axxia-reset.c
index 3b1f8d601784..4e4cd1c8fe50 100644
--- a/drivers/power/reset/axxia-reset.c
+++ b/drivers/power/reset/axxia-reset.c
@@ -19,14 +19,12 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
-#include <asm/system_misc.h>
-
-
#define SC_CRIT_WRITE_KEY 0x1000
#define SC_LATCH_ON_RESET 0x1004
#define SC_RESET_CONTROL 0x1008
@@ -39,7 +37,8 @@
static struct regmap *syscon;
-static void do_axxia_restart(enum reboot_mode reboot_mode, const char *cmd)
+static int axxia_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
/* Access Key (0xab) */
regmap_write(syscon, SC_CRIT_WRITE_KEY, 0xab);
@@ -50,11 +49,19 @@ static void do_axxia_restart(enum reboot_mode reboot_mode, const char *cmd)
/* Assert chip reset */
regmap_update_bits(syscon, SC_RESET_CONTROL,
RSTCTL_RST_CHIP, RSTCTL_RST_CHIP);
+
+ return NOTIFY_DONE;
}
+static struct notifier_block axxia_restart_nb = {
+ .notifier_call = axxia_restart_handler,
+ .priority = 128,
+};
+
static int axxia_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ int err;
syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(syscon)) {
@@ -62,9 +69,11 @@ static int axxia_reset_probe(struct platform_device *pdev)
return PTR_ERR(syscon);
}
- arm_pm_restart = do_axxia_restart;
+ err = register_restart_handler(&axxia_restart_nb);
+ if (err)
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- return 0;
+ return err;
}
static const struct of_device_id of_axxia_reset_match[] = {
diff --git a/drivers/power/reset/brcmstb-reboot.c b/drivers/power/reset/brcmstb-reboot.c
index 3f236924742a..100606f9b3dc 100644
--- a/drivers/power/reset/brcmstb-reboot.c
+++ b/drivers/power/reset/brcmstb-reboot.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -26,8 +27,6 @@
#include <linux/smp.h>
#include <linux/mfd/syscon.h>
-#include <asm/system_misc.h>
-
#define RESET_SOURCE_ENABLE_REG 1
#define SW_MASTER_RESET_REG 2
@@ -35,7 +34,8 @@ static struct regmap *regmap;
static u32 rst_src_en;
static u32 sw_mstr_rst;
-static void brcmstb_reboot(enum reboot_mode mode, const char *cmd)
+static int brcmstb_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
int rc;
u32 tmp;
@@ -43,31 +43,38 @@ static void brcmstb_reboot(enum reboot_mode mode, const char *cmd)
rc = regmap_write(regmap, rst_src_en, 1);
if (rc) {
pr_err("failed to write rst_src_en (%d)\n", rc);
- return;
+ return NOTIFY_DONE;
}
rc = regmap_read(regmap, rst_src_en, &tmp);
if (rc) {
pr_err("failed to read rst_src_en (%d)\n", rc);
- return;
+ return NOTIFY_DONE;
}
rc = regmap_write(regmap, sw_mstr_rst, 1);
if (rc) {
pr_err("failed to write sw_mstr_rst (%d)\n", rc);
- return;
+ return NOTIFY_DONE;
}
rc = regmap_read(regmap, sw_mstr_rst, &tmp);
if (rc) {
pr_err("failed to read sw_mstr_rst (%d)\n", rc);
- return;
+ return NOTIFY_DONE;
}
while (1)
;
+
+ return NOTIFY_DONE;
}
+static struct notifier_block brcmstb_restart_nb = {
+ .notifier_call = brcmstb_restart_handler,
+ .priority = 128,
+};
+
static int brcmstb_reboot_probe(struct platform_device *pdev)
{
int rc;
@@ -93,9 +100,12 @@ static int brcmstb_reboot_probe(struct platform_device *pdev)
return -EINVAL;
}
- arm_pm_restart = brcmstb_reboot;
+ rc = register_restart_handler(&brcmstb_restart_nb);
+ if (rc)
+ dev_err(&pdev->dev,
+ "cannot register restart handler (err=%d)\n", rc);
- return 0;
+ return rc;
}
static const struct of_device_id of_match[] = {
@@ -107,7 +117,6 @@ static struct platform_driver brcmstb_reboot_driver = {
.probe = brcmstb_reboot_probe,
.driver = {
.name = "brcmstb-reboot",
- .owner = THIS_MODULE,
.of_match_table = of_match,
},
};
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index ce849bc9b269..e5332f1db8a7 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -99,7 +99,6 @@ static struct platform_driver gpio_poweroff_driver = {
.remove = gpio_poweroff_remove,
.driver = {
.name = "poweroff-gpio",
- .owner = THIS_MODULE,
.of_match_table = of_gpio_poweroff_match,
},
};
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index a76829b3f1cd..edb327efee8b 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -137,7 +137,6 @@ static struct platform_driver gpio_restart_driver = {
.remove = gpio_restart_remove,
.driver = {
.name = "restart-gpio",
- .owner = THIS_MODULE,
.of_match_table = of_gpio_restart_match,
},
};
diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c
index 0c91d0231d36..5385460e23bb 100644
--- a/drivers/power/reset/hisi-reboot.c
+++ b/drivers/power/reset/hisi-reboot.c
@@ -14,27 +14,36 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <asm/proc-fns.h>
-#include <asm/system_misc.h>
static void __iomem *base;
static u32 reboot_offset;
-static void hisi_restart(enum reboot_mode mode, const char *cmd)
+static int hisi_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
writel_relaxed(0xdeadbeef, base + reboot_offset);
while (1)
cpu_do_idle();
+
+ return NOTIFY_DONE;
}
+static struct notifier_block hisi_restart_nb = {
+ .notifier_call = hisi_restart_handler,
+ .priority = 128,
+};
+
static int hisi_reboot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ int err;
base = of_iomap(np, 0);
if (!base) {
@@ -47,9 +56,12 @@ static int hisi_reboot_probe(struct platform_device *pdev)
return -EINVAL;
}
- arm_pm_restart = hisi_restart;
+ err = register_restart_handler(&hisi_restart_nb);
+ if (err)
+ dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
+ err);
- return 0;
+ return err;
}
static struct of_device_id hisi_reboot_of_match[] = {
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
index 408a18fd91cb..faedf16c8111 100644
--- a/drivers/power/reset/keystone-reset.c
+++ b/drivers/power/reset/keystone-reset.c
@@ -12,9 +12,9 @@
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
-#include <asm/system_misc.h>
#include <linux/mfd/syscon.h>
#include <linux/of_platform.h>
@@ -52,7 +52,8 @@ static inline int rsctrl_enable_rspll_write(void)
RSCTRL_KEY_MASK, RSCTRL_KEY);
}
-static void rsctrl_restart(enum reboot_mode mode, const char *cmd)
+static int rsctrl_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
/* enable write access to RSTCTRL */
rsctrl_enable_rspll_write();
@@ -60,8 +61,15 @@ static void rsctrl_restart(enum reboot_mode mode, const char *cmd)
/* reset the SOC */
regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG,
RSCTRL_RESET_MASK, 0);
+
+ return NOTIFY_DONE;
}
+static struct notifier_block rsctrl_restart_nb = {
+ .notifier_call = rsctrl_restart_handler,
+ .priority = 128,
+};
+
static struct of_device_id rsctrl_of_match[] = {
{.compatible = "ti,keystone-reset", },
{},
@@ -114,8 +122,6 @@ static int rsctrl_probe(struct platform_device *pdev)
if (ret)
return ret;
- arm_pm_restart = rsctrl_restart;
-
/* disable a reset isolation for all module clocks */
ret = regmap_write(pllctrl_regs, rspll_offset + RSISO_RG, 0);
if (ret)
@@ -147,13 +153,16 @@ static int rsctrl_probe(struct platform_device *pdev)
return ret;
}
- return 0;
+ ret = register_restart_handler(&rsctrl_restart_nb);
+ if (ret)
+ dev_err(dev, "cannot register restart handler (err=%d)\n", ret);
+
+ return ret;
}
static struct platform_driver rsctrl_driver = {
.probe = rsctrl_probe,
.driver = {
- .owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.of_match_table = rsctrl_of_match,
},
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 116a1cef8f7b..34f38a3dc3ff 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -342,7 +342,6 @@ static struct platform_driver ltc2952_poweroff_driver = {
.remove = ltc2952_poweroff_remove,
.driver = {
.name = "ltc2952-poweroff",
- .owner = THIS_MODULE,
.of_match_table = of_ltc2952_poweroff_match,
},
.suspend = ltc2952_poweroff_suspend,
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
index a75db7f8a92f..2789a61cec68 100644
--- a/drivers/power/reset/qnap-poweroff.c
+++ b/drivers/power/reset/qnap-poweroff.c
@@ -129,7 +129,6 @@ static struct platform_driver qnap_power_off_driver = {
.probe = qnap_power_off_probe,
.remove = qnap_power_off_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "qnap_power_off",
.of_match_table = of_match_ptr(qnap_power_off_of_match_table),
},
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index edd707ee7281..f46f2c2e4648 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -55,7 +55,6 @@ static struct platform_driver restart_poweroff_driver = {
.remove = restart_poweroff_remove,
.driver = {
.name = "poweroff-restart",
- .owner = THIS_MODULE,
.of_match_table = of_restart_poweroff_match,
},
};
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index 815b901822cf..c4049f45663f 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -68,7 +68,7 @@ static int syscon_reboot_probe(struct platform_device *pdev)
return -EINVAL;
ctx->restart_handler.notifier_call = syscon_restart_handle;
- ctx->restart_handler.priority = 128;
+ ctx->restart_handler.priority = 192;
err = register_restart_handler(&ctx->restart_handler);
if (err)
dev_err(dev, "can't register restart notifier (err=%d)\n", err);
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 4dc102e2b230..9dfc9cee3232 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -12,14 +12,14 @@
*/
#include <linux/delay.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/stat.h>
#include <linux/vexpress.h>
-#include <asm/system_misc.h>
-
static void vexpress_reset_do(struct device *dev, const char *what)
{
int err = -ENOENT;
@@ -43,11 +43,19 @@ static void vexpress_power_off(void)
static struct device *vexpress_restart_device;
-static void vexpress_restart(enum reboot_mode reboot_mode, const char *cmd)
+static int vexpress_restart(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
vexpress_reset_do(vexpress_restart_device, "restart");
+
+ return NOTIFY_DONE;
}
+static struct notifier_block vexpress_restart_nb = {
+ .notifier_call = vexpress_restart,
+ .priority = 128,
+};
+
static ssize_t vexpress_reset_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -86,12 +94,28 @@ static struct of_device_id vexpress_reset_of_match[] = {
{}
};
+static int _vexpress_register_restart_handler(struct device *dev)
+{
+ int err;
+
+ vexpress_restart_device = dev;
+ err = register_restart_handler(&vexpress_restart_nb);
+ if (err) {
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+ return err;
+ }
+ device_create_file(dev, &dev_attr_active);
+
+ return 0;
+}
+
static int vexpress_reset_probe(struct platform_device *pdev)
{
enum vexpress_reset_func func;
const struct of_device_id *match =
of_match_device(vexpress_reset_of_match, &pdev->dev);
struct regmap *regmap;
+ int ret = 0;
if (match)
func = (enum vexpress_reset_func)match->data;
@@ -110,18 +134,14 @@ static int vexpress_reset_probe(struct platform_device *pdev)
break;
case FUNC_RESET:
if (!vexpress_restart_device)
- vexpress_restart_device = &pdev->dev;
- arm_pm_restart = vexpress_restart;
- device_create_file(&pdev->dev, &dev_attr_active);
+ ret = _vexpress_register_restart_handler(&pdev->dev);
break;
case FUNC_REBOOT:
- vexpress_restart_device = &pdev->dev;
- arm_pm_restart = vexpress_restart;
- device_create_file(&pdev->dev, &dev_attr_active);
+ ret = _vexpress_register_restart_handler(&pdev->dev);
break;
};
- return 0;
+ return ret;
}
static const struct platform_device_id vexpress_reset_id_table[] = {
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
index 6b49be6867ab..b0e5002f8deb 100644
--- a/drivers/power/reset/xgene-reboot.c
+++ b/drivers/power/reset/xgene-reboot.c
@@ -24,63 +24,67 @@
* For system shutdown, this is board specify. If a board designer
* implements GPIO shutdown, use the gpio-poweroff.c driver.
*/
+#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/notifier.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/stat.h>
#include <linux/slab.h>
-#include <asm/system_misc.h>
struct xgene_reboot_context {
- struct platform_device *pdev;
+ struct device *dev;
void *csr;
u32 mask;
+ struct notifier_block restart_handler;
};
-static struct xgene_reboot_context *xgene_restart_ctx;
-
-static void xgene_restart(enum reboot_mode mode, const char *cmd)
+static int xgene_restart_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
- struct xgene_reboot_context *ctx = xgene_restart_ctx;
- unsigned long timeout;
+ struct xgene_reboot_context *ctx =
+ container_of(this, struct xgene_reboot_context,
+ restart_handler);
/* Issue the reboot */
- if (ctx)
- writel(ctx->mask, ctx->csr);
+ writel(ctx->mask, ctx->csr);
+
+ mdelay(1000);
- timeout = jiffies + HZ;
- while (time_before(jiffies, timeout))
- cpu_relax();
+ dev_emerg(ctx->dev, "Unable to restart system\n");
- dev_emerg(&ctx->pdev->dev, "Unable to restart system\n");
+ return NOTIFY_DONE;
}
static int xgene_reboot_probe(struct platform_device *pdev)
{
struct xgene_reboot_context *ctx;
+ struct device *dev = &pdev->dev;
+ int err;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- dev_err(&pdev->dev, "out of memory for context\n");
- return -ENODEV;
- }
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
- ctx->csr = of_iomap(pdev->dev.of_node, 0);
+ ctx->csr = of_iomap(dev->of_node, 0);
if (!ctx->csr) {
- devm_kfree(&pdev->dev, ctx);
- dev_err(&pdev->dev, "can not map resource\n");
+ dev_err(dev, "can not map resource\n");
return -ENODEV;
}
- if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+ if (of_property_read_u32(dev->of_node, "mask", &ctx->mask))
ctx->mask = 0xFFFFFFFF;
- ctx->pdev = pdev;
- arm_pm_restart = xgene_restart;
- xgene_restart_ctx = ctx;
+ ctx->dev = dev;
+ ctx->restart_handler.notifier_call = xgene_restart_handler;
+ ctx->restart_handler.priority = 128;
+ err = register_restart_handler(&ctx->restart_handler);
+ if (err)
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- return 0;
+ return err;
}
static struct of_device_id xgene_reboot_of_match[] = {
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index d5a2acfb8821..a01aacb32f59 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -281,7 +281,6 @@ static struct platform_driver rx51_battery_driver = {
.remove = rx51_battery_remove,
.driver = {
.name = "rx51-battery",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(n900_battery_of_match),
},
};
diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c
index 3e8ba97c8169..0f4e5971dff5 100644
--- a/drivers/power/tps65090-charger.c
+++ b/drivers/power/tps65090-charger.c
@@ -353,7 +353,6 @@ static struct platform_driver tps65090_charger_driver = {
.driver = {
.name = "tps65090-charger",
.of_match_table = of_tps65090_charger_match,
- .owner = THIS_MODULE,
},
.probe = tps65090_charger_probe,
.remove = tps65090_charger_remove,
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 2598c588006e..d35b83e635b5 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -716,7 +716,6 @@ MODULE_DEVICE_TABLE(of, twl_bci_of_match);
static struct platform_driver twl4030_bci_driver = {
.driver = {
.name = "twl4030_bci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(twl_bci_of_match),
},
.remove = __exit_p(twl4030_bci_remove),
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 58f7348e6c22..a8e6203673ad 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -281,7 +281,6 @@ static int wm97xx_bat_remove(struct platform_device *dev)
static struct platform_driver wm97xx_bat_driver = {
.driver = {
.name = "wm97xx-battery",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &wm97xx_bat_pm_ops,
#endif
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index f41bacfdc3dc..333ad7d5b45b 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -200,7 +200,6 @@ static struct platform_driver pps_gpio_driver = {
.remove = pps_gpio_remove,
.driver = {
.name = PPS_GPIO_NAME,
- .owner = THIS_MODULE,
.of_match_table = pps_gpio_dt_ids,
},
};
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index ef2dd2e4754b..a3ecf5809634 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -50,6 +50,17 @@ config PWM_ATMEL
To compile this driver as a module, choose M here: the module
will be called pwm-atmel.
+config PWM_ATMEL_HLCDC_PWM
+ tristate "Atmel HLCDC PWM support"
+ depends on MFD_ATMEL_HLCDC
+ help
+ Generic PWM framework driver for the PWM output of the HLCDC
+ (Atmel High-end LCD Controller). This PWM output is mainly used
+ to control the LCD backlight.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-atmel-hlcdc.
+
config PWM_ATMEL_TCB
tristate "Atmel TC Block PWM support"
depends on ATMEL_TCLIB && OF
@@ -71,6 +82,15 @@ config PWM_BCM_KONA
To compile this driver as a module, choose M here: the module
will be called pwm-bcm-kona.
+config PWM_BCM2835
+ tristate "BCM2835 PWM support"
+ depends on ARCH_BCM2835
+ help
+ PWM framework driver for BCM2835 controller (Raspberry Pi)
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-bcm2835.
+
config PWM_BFIN
tristate "Blackfin PWM support"
depends on BFIN_GPTIMERS
@@ -235,7 +255,7 @@ config PWM_ROCKCHIP
config PWM_SAMSUNG
tristate "Samsung PWM support"
- depends on PLAT_SAMSUNG
+ depends on PLAT_SAMSUNG || ARCH_EXYNOS
help
Generic PWM framework driver for Samsung.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index c458606c3755..65259ac1e8de 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -2,8 +2,10 @@ obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
+obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
+obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 4c07a8420b37..f39399273426 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -131,7 +131,6 @@ static int ab8500_pwm_remove(struct platform_device *pdev)
static struct platform_driver ab8500_pwm_driver = {
.driver = {
.name = "ab8500-pwm",
- .owner = THIS_MODULE,
},
.probe = ab8500_pwm_probe,
.remove = ab8500_pwm_remove,
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
new file mode 100644
index 000000000000..e7a785fadcdf
--- /dev/null
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/atmel-hlcdc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+#define ATMEL_HLCDC_PWMCVAL_MASK GENMASK(15, 8)
+#define ATMEL_HLCDC_PWMCVAL(x) (((x) << 8) & ATMEL_HLCDC_PWMCVAL_MASK)
+#define ATMEL_HLCDC_PWMPOL BIT(4)
+#define ATMEL_HLCDC_PWMPS_MASK GENMASK(2, 0)
+#define ATMEL_HLCDC_PWMPS_MAX 0x6
+#define ATMEL_HLCDC_PWMPS(x) ((x) & ATMEL_HLCDC_PWMPS_MASK)
+
+struct atmel_hlcdc_pwm_errata {
+ bool slow_clk_erratum;
+ bool div1_clk_erratum;
+};
+
+struct atmel_hlcdc_pwm {
+ struct pwm_chip chip;
+ struct atmel_hlcdc *hlcdc;
+ struct clk *cur_clk;
+ const struct atmel_hlcdc_pwm_errata *errata;
+};
+
+static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct atmel_hlcdc_pwm, chip);
+}
+
+static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
+ struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+ struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ struct clk *new_clk = hlcdc->slow_clk;
+ u64 pwmcval = duty_ns * 256;
+ unsigned long clk_freq;
+ u64 clk_period_ns;
+ u32 pwmcfg;
+ int pres;
+
+ if (!chip->errata || !chip->errata->slow_clk_erratum) {
+ clk_freq = clk_get_rate(new_clk);
+ clk_period_ns = (u64)NSEC_PER_SEC * 256;
+ do_div(clk_period_ns, clk_freq);
+ }
+
+ /* Errata: cannot use slow clk on some IP revisions */
+ if ((chip->errata && chip->errata->slow_clk_erratum) ||
+ clk_period_ns > period_ns) {
+ new_clk = hlcdc->sys_clk;
+ clk_freq = clk_get_rate(new_clk);
+ clk_period_ns = (u64)NSEC_PER_SEC * 256;
+ do_div(clk_period_ns, clk_freq);
+ }
+
+ for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) {
+ /* Errata: cannot divide by 1 on some IP revisions */
+ if (!pres && chip->errata && chip->errata->div1_clk_erratum)
+ continue;
+
+ if ((clk_period_ns << pres) >= period_ns)
+ break;
+ }
+
+ if (pres > ATMEL_HLCDC_PWMPS_MAX)
+ return -EINVAL;
+
+ pwmcfg = ATMEL_HLCDC_PWMPS(pres);
+
+ if (new_clk != chip->cur_clk) {
+ u32 gencfg = 0;
+ int ret;
+
+ ret = clk_prepare_enable(new_clk);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(chip->cur_clk);
+ chip->cur_clk = new_clk;
+
+ if (new_clk == hlcdc->sys_clk)
+ gencfg = ATMEL_HLCDC_CLKPWMSEL;
+
+ ret = regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(0),
+ ATMEL_HLCDC_CLKPWMSEL, gencfg);
+ if (ret)
+ return ret;
+ }
+
+ do_div(pwmcval, period_ns);
+
+ /*
+ * The PWM duty cycle is configurable from 0/256 to 255/256 of the
+ * period cycle. Hence we can't set a duty cycle occupying the
+ * whole period cycle if we're asked to.
+ * Set it to 255 if pwmcval is greater than 256.
+ */
+ if (pwmcval > 255)
+ pwmcval = 255;
+
+ pwmcfg |= ATMEL_HLCDC_PWMCVAL(pwmcval);
+
+ return regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6),
+ ATMEL_HLCDC_PWMCVAL_MASK |
+ ATMEL_HLCDC_PWMPS_MASK,
+ pwmcfg);
+}
+
+static int atmel_hlcdc_pwm_set_polarity(struct pwm_chip *c,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+ struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ u32 cfg = 0;
+
+ if (polarity == PWM_POLARITY_NORMAL)
+ cfg = ATMEL_HLCDC_PWMPOL;
+
+ return regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6),
+ ATMEL_HLCDC_PWMPOL, cfg);
+}
+
+static int atmel_hlcdc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+ struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+ struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ u32 status;
+ int ret;
+
+ ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PWM);
+ if (ret)
+ return ret;
+
+ while (true) {
+ ret = regmap_read(hlcdc->regmap, ATMEL_HLCDC_SR, &status);
+ if (ret)
+ return ret;
+
+ if ((status & ATMEL_HLCDC_PWM) != 0)
+ break;
+
+ usleep_range(1, 10);
+ }
+
+ return 0;
+}
+
+static void atmel_hlcdc_pwm_disable(struct pwm_chip *c,
+ struct pwm_device *pwm)
+{
+ struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+ struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ u32 status;
+ int ret;
+
+ ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PWM);
+ if (ret)
+ return;
+
+ while (true) {
+ ret = regmap_read(hlcdc->regmap, ATMEL_HLCDC_SR, &status);
+ if (ret)
+ return;
+
+ if ((status & ATMEL_HLCDC_PWM) == 0)
+ break;
+
+ usleep_range(1, 10);
+ }
+}
+
+static const struct pwm_ops atmel_hlcdc_pwm_ops = {
+ .config = atmel_hlcdc_pwm_config,
+ .set_polarity = atmel_hlcdc_pwm_set_polarity,
+ .enable = atmel_hlcdc_pwm_enable,
+ .disable = atmel_hlcdc_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_at91sam9x5_errata = {
+ .slow_clk_erratum = true,
+};
+
+static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
+ .div1_clk_erratum = true,
+};
+
+static const struct of_device_id atmel_hlcdc_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9x5-hlcdc",
+ .data = &atmel_hlcdc_pwm_at91sam9x5_errata,
+ },
+ {
+ .compatible = "atmel,sama5d3-hlcdc",
+ .data = &atmel_hlcdc_pwm_sama5d3_errata,
+ },
+ { /* sentinel */ },
+};
+
+static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct atmel_hlcdc_pwm *chip;
+ struct atmel_hlcdc *hlcdc;
+ int ret;
+
+ hlcdc = dev_get_drvdata(dev->parent);
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ ret = clk_prepare_enable(hlcdc->periph_clk);
+ if (ret)
+ return ret;
+
+ match = of_match_node(atmel_hlcdc_dt_ids, dev->parent->of_node);
+ if (match)
+ chip->errata = match->data;
+
+ chip->hlcdc = hlcdc;
+ chip->chip.ops = &atmel_hlcdc_pwm_ops;
+ chip->chip.dev = dev;
+ chip->chip.base = -1;
+ chip->chip.npwm = 1;
+ chip->chip.of_xlate = of_pwm_xlate_with_flags;
+ chip->chip.of_pwm_n_cells = 3;
+ chip->chip.can_sleep = 1;
+
+ ret = pwmchip_add(&chip->chip);
+ if (ret) {
+ clk_disable_unprepare(hlcdc->periph_clk);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, chip);
+
+ return 0;
+}
+
+static int atmel_hlcdc_pwm_remove(struct platform_device *pdev)
+{
+ struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwmchip_remove(&chip->chip);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(chip->hlcdc->periph_clk);
+
+ return 0;
+}
+
+static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = {
+ { .compatible = "atmel,hlcdc-pwm" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver atmel_hlcdc_pwm_driver = {
+ .driver = {
+ .name = "atmel-hlcdc-pwm",
+ .of_match_table = atmel_hlcdc_pwm_dt_ids,
+ },
+ .probe = atmel_hlcdc_pwm_probe,
+ .remove = atmel_hlcdc_pwm_remove,
+};
+module_platform_driver(atmel_hlcdc_pwm_driver);
+
+MODULE_ALIAS("platform:atmel-hlcdc-pwm");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Atmel HLCDC PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index d56e5b717431..d14e0677c92d 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -436,7 +436,6 @@ MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
static struct platform_driver atmel_tcb_pwm_driver = {
.driver = {
.name = "atmel-tcb-pwm",
- .owner = THIS_MODULE,
.of_match_table = atmel_tcb_pwm_dt_ids,
},
.probe = atmel_tcb_pwm_probe,
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
new file mode 100644
index 000000000000..b4c7f956b6fa
--- /dev/null
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2014 Bart Tanghe <bart.tanghe@thomasmore.be>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define PWM_CONTROL 0x000
+#define PWM_CONTROL_SHIFT(x) ((x) * 8)
+#define PWM_CONTROL_MASK 0xff
+#define PWM_MODE 0x80 /* set timer in PWM mode */
+#define PWM_ENABLE (1 << 0)
+#define PWM_POLARITY (1 << 4)
+
+#define PERIOD(x) (((x) * 0x10) + 0x10)
+#define DUTY(x) (((x) * 0x10) + 0x14)
+
+#define MIN_PERIOD 108 /* 9.2 MHz max. PWM clock */
+
+struct bcm2835_pwm {
+ struct pwm_chip chip;
+ struct device *dev;
+ unsigned long scaler;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static inline struct bcm2835_pwm *to_bcm2835_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct bcm2835_pwm, chip);
+}
+
+static int bcm2835_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+ value &= ~(PWM_CONTROL_MASK << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ value |= (PWM_MODE << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ writel(value, pc->base + PWM_CONTROL);
+
+ return 0;
+}
+
+static void bcm2835_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+ value &= ~(PWM_CONTROL_MASK << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ writel(value, pc->base + PWM_CONTROL);
+}
+
+static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+
+ if (period_ns <= MIN_PERIOD) {
+ dev_err(pc->dev, "period %d not supported, minimum %d\n",
+ period_ns, MIN_PERIOD);
+ return -EINVAL;
+ }
+
+ writel(duty_ns / pc->scaler, pc->base + DUTY(pwm->hwpwm));
+ writel(period_ns / pc->scaler, pc->base + PERIOD(pwm->hwpwm));
+
+ return 0;
+}
+
+static int bcm2835_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+ value |= PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm);
+ writel(value, pc->base + PWM_CONTROL);
+
+ return 0;
+}
+
+static void bcm2835_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+ value &= ~(PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ writel(value, pc->base + PWM_CONTROL);
+}
+
+static int bcm2835_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
+ u32 value;
+
+ value = readl(pc->base + PWM_CONTROL);
+
+ if (polarity == PWM_POLARITY_NORMAL)
+ value &= ~(PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ else
+ value |= PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm);
+
+ writel(value, pc->base + PWM_CONTROL);
+
+ return 0;
+}
+
+static const struct pwm_ops bcm2835_pwm_ops = {
+ .request = bcm2835_pwm_request,
+ .free = bcm2835_pwm_free,
+ .config = bcm2835_pwm_config,
+ .enable = bcm2835_pwm_enable,
+ .disable = bcm2835_pwm_disable,
+ .set_polarity = bcm2835_set_polarity,
+ .owner = THIS_MODULE,
+};
+
+static int bcm2835_pwm_probe(struct platform_device *pdev)
+{
+ struct bcm2835_pwm *pc;
+ struct resource *res;
+ int ret;
+
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ pc->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pc->base))
+ return PTR_ERR(pc->base);
+
+ pc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pc->clk)) {
+ dev_err(&pdev->dev, "clock not found: %ld\n", PTR_ERR(pc->clk));
+ return PTR_ERR(pc->clk);
+ }
+
+ ret = clk_prepare_enable(pc->clk);
+ if (ret)
+ return ret;
+
+ pc->scaler = NSEC_PER_SEC / clk_get_rate(pc->clk);
+
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &bcm2835_pwm_ops;
+ pc->chip.npwm = 2;
+
+ platform_set_drvdata(pdev, pc);
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0)
+ goto add_fail;
+
+ return 0;
+
+add_fail:
+ clk_disable_unprepare(pc->clk);
+ return ret;
+}
+
+static int bcm2835_pwm_remove(struct platform_device *pdev)
+{
+ struct bcm2835_pwm *pc = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(pc->clk);
+
+ return pwmchip_remove(&pc->chip);
+}
+
+static const struct of_device_id bcm2835_pwm_of_match[] = {
+ { .compatible = "brcm,bcm2835-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm2835_pwm_of_match);
+
+static struct platform_driver bcm2835_pwm_driver = {
+ .driver = {
+ .name = "bcm2835-pwm",
+ .of_match_table = bcm2835_pwm_of_match,
+ },
+ .probe = bcm2835_pwm_probe,
+ .remove = bcm2835_pwm_remove,
+};
+module_platform_driver(bcm2835_pwm_driver);
+
+MODULE_AUTHOR("Bart Tanghe <bart.tanghe@thomasmore.be");
+MODULE_DESCRIPTION("Broadcom BCM2835 PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-bfin.c b/drivers/pwm/pwm-bfin.c
index 9985d830e554..7631ef194de7 100644
--- a/drivers/pwm/pwm-bfin.c
+++ b/drivers/pwm/pwm-bfin.c
@@ -149,7 +149,6 @@ static int bfin_pwm_remove(struct platform_device *pdev)
static struct platform_driver bfin_pwm_driver = {
.driver = {
.name = "bfin-pwm",
- .owner = THIS_MODULE,
},
.probe = bfin_pwm_probe,
.remove = bfin_pwm_remove,
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index fafb6a0111b0..a80c10803636 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -163,7 +163,6 @@ MODULE_DEVICE_TABLE(of, clps711x_pwm_dt_ids);
static struct platform_driver clps711x_pwm_driver = {
.driver = {
.name = "clps711x-pwm",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(clps711x_pwm_dt_ids),
},
.probe = clps711x_pwm_probe,
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 0f2cc7ef7784..f9dfc8b6407a 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -299,7 +300,7 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
{
int ret;
- if (fpc->use_count != 0)
+ if (fpc->use_count++ != 0)
return 0;
/* select counter clock source */
@@ -316,8 +317,6 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
return ret;
}
- fpc->use_count++;
-
return 0;
}
@@ -399,12 +398,23 @@ static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
return 0;
}
+static bool fsl_pwm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case FTM_CNT:
+ return true;
+ }
+ return false;
+}
+
static const struct regmap_config fsl_pwm_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = FTM_PWMLOAD,
+ .volatile_reg = fsl_pwm_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
};
static int fsl_pwm_probe(struct platform_device *pdev)
@@ -427,7 +437,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- fpc->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
+ fpc->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "ftm_sys", base,
&fsl_pwm_regmap_config);
if (IS_ERR(fpc->regmap)) {
dev_err(&pdev->dev, "regmap init failed\n");
@@ -478,6 +488,51 @@ static int fsl_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&fpc->chip);
}
+#ifdef CONFIG_PM_SLEEP
+static int fsl_pwm_suspend(struct device *dev)
+{
+ struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+ u32 val;
+
+ regcache_cache_only(fpc->regmap, true);
+ regcache_mark_dirty(fpc->regmap);
+
+ /* read from cache */
+ regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+ if ((val & 0xFF) != 0xFF) {
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
+ }
+
+ return 0;
+}
+
+static int fsl_pwm_resume(struct device *dev)
+{
+ struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+ u32 val;
+
+ /* read from cache */
+ regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+ if ((val & 0xFF) != 0xFF) {
+ clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
+ clk_prepare_enable(fpc->clk[fpc->cnt_select]);
+ clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ }
+
+ /* restore all registers from cache */
+ regcache_cache_only(fpc->regmap, false);
+ regcache_sync(fpc->regmap);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_pwm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fsl_pwm_suspend, fsl_pwm_resume)
+};
+
static const struct of_device_id fsl_pwm_dt_ids[] = {
{ .compatible = "fsl,vf610-ftm-pwm", },
{ /* sentinel */ }
@@ -488,6 +543,7 @@ static struct platform_driver fsl_pwm_driver = {
.driver = {
.name = "fsl-ftm-pwm",
.of_match_table = fsl_pwm_dt_ids,
+ .pm = &fsl_pwm_pm_ops,
},
.probe = fsl_pwm_probe,
.remove = fsl_pwm_remove,
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index f8b5f109c1ab..66d6f0c5c421 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -336,7 +336,6 @@ static int imx_pwm_remove(struct platform_device *pdev)
static struct platform_driver imx_pwm_driver = {
.driver = {
.name = "imx-pwm",
- .owner = THIS_MODULE,
.of_match_table = imx_pwm_dt_ids,
},
.probe = imx_pwm_probe,
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 9c46209e1d02..76d13150283f 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -194,7 +194,6 @@ static int jz4740_pwm_remove(struct platform_device *pdev)
static struct platform_driver jz4740_pwm_driver = {
.driver = {
.name = "jz4740-pwm",
- .owner = THIS_MODULE,
},
.probe = jz4740_pwm_probe,
.remove = jz4740_pwm_remove,
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 2c39b0e50fa4..872ea76a4f19 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -305,7 +305,6 @@ static struct platform_driver lp3943_pwm_driver = {
.remove = lp3943_pwm_remove,
.driver = {
.name = "lp3943-pwm",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(lp3943_pwm_of_match),
},
};
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 9dc0f9d42bfa..9fde60ce8e7b 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -168,7 +168,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_pwm_dt_ids);
static struct platform_driver lpc32xx_pwm_driver = {
.driver = {
.name = "lpc32xx-pwm",
- .owner = THIS_MODULE,
.of_match_table = lpc32xx_pwm_dt_ids,
},
.probe = lpc32xx_pwm_probe,
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 4f1bb4e0a426..f75ecb09d97d 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -189,7 +189,6 @@ MODULE_DEVICE_TABLE(of, mxs_pwm_dt_ids);
static struct platform_driver mxs_pwm_driver = {
.driver = {
.name = "mxs-pwm",
- .owner = THIS_MODULE,
.of_match_table = mxs_pwm_dt_ids,
},
.probe = mxs_pwm_probe,
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c
index a9a28083f245..ed6007b27585 100644
--- a/drivers/pwm/pwm-puv3.c
+++ b/drivers/pwm/pwm-puv3.c
@@ -146,7 +146,6 @@ static int pwm_remove(struct platform_device *pdev)
static struct platform_driver puv3_pwm_driver = {
.driver = {
.name = "PKUnity-v3-PWM",
- .owner = THIS_MODULE,
},
.probe = pwm_probe,
.remove = pwm_remove,
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 0b312ec420b6..cb2f7024cf68 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -225,7 +225,6 @@ static int pwm_remove(struct platform_device *pdev)
static struct platform_driver pwm_driver = {
.driver = {
.name = "pxa25x-pwm",
- .owner = THIS_MODULE,
.of_match_table = pwm_of_match,
},
.probe = pwm_probe,
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 3b71b42e89d5..ee63f9e9d0fb 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -468,7 +468,6 @@ static struct platform_driver tpu_driver = {
.remove = tpu_remove,
.driver = {
.name = "renesas-tpu-pwm",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(tpu_of_table),
}
};
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index ba6b650cf8dc..3e9b5835a4af 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -601,7 +601,6 @@ static SIMPLE_DEV_PM_OPS(pwm_samsung_pm_ops, pwm_samsung_suspend,
static struct platform_driver pwm_samsung_driver = {
.driver = {
.name = "samsung-pwm",
- .owner = THIS_MODULE,
.pm = &pwm_samsung_pm_ops,
.of_match_table = of_match_ptr(samsung_pwm_matches),
},
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 6fd93e6a4122..6c6b44fd3f43 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -252,7 +252,6 @@ MODULE_DEVICE_TABLE(of, spear_pwm_of_match);
static struct platform_driver spear_pwm_driver = {
.driver = {
.name = "spear-pwm",
- .owner = THIS_MODULE,
.of_match_table = spear_pwm_of_match,
},
.probe = spear_pwm_probe,
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 61d86b9498ca..5b97cae5423a 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -237,7 +237,6 @@ MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
static struct platform_driver tegra_pwm_driver = {
.driver = {
.name = "tegra-pwm",
- .owner = THIS_MODULE,
.of_match_table = tegra_pwm_of_match,
},
.probe = tegra_pwm_probe,
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 74efbe7f20c3..e557befdf4e6 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -331,7 +331,6 @@ static SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume);
static struct platform_driver ecap_pwm_driver = {
.driver = {
.name = "ecap",
- .owner = THIS_MODULE,
.of_match_table = ecap_of_match,
.pm = &ecap_pwm_pm_ops,
},
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index cb75133085a8..694b3cf7694b 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -599,7 +599,6 @@ static SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend,
static struct platform_driver ehrpwm_pwm_driver = {
.driver = {
.name = "ehrpwm",
- .owner = THIS_MODULE,
.of_match_table = ehrpwm_of_match,
.pm = &ehrpwm_pwm_pm_ops,
},
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
index 67481dc6da3f..5cf65a15d021 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -119,7 +119,6 @@ static SIMPLE_DEV_PM_OPS(pwmss_pm_ops, pwmss_suspend, pwmss_resume);
static struct platform_driver pwmss_driver = {
.driver = {
.name = "pwmss",
- .owner = THIS_MODULE,
.pm = &pwmss_pm_ops,
.of_match_table = pwmss_of_match,
},
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 652e6b5b859b..cdb58fd4619d 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -266,7 +266,6 @@ static struct platform_driver vt8500_pwm_driver = {
.remove = vt8500_pwm_remove,
.driver = {
.name = "vt8500-pwm",
- .owner = THIS_MODULE,
.of_match_table = vt8500_pwm_dt_ids,
},
};
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 4e6c8c611905..832932bdc977 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -362,7 +362,6 @@ static int pm800_regulator_remove(struct platform_device *pdev)
static struct platform_driver pm800_regulator_driver = {
.driver = {
.name = "88pm80x-regulator",
- .owner = THIS_MODULE,
},
.probe = pm800_regulator_probe,
.remove = pm800_regulator_remove,
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 3fe47bd66153..c3d15427adc7 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -419,7 +419,6 @@ MODULE_DEVICE_TABLE(platform, pm8607_regulator_driver_ids);
static struct platform_driver pm8607_regulator_driver = {
.driver = {
.name = "88pm860x-regulator",
- .owner = THIS_MODULE,
},
.probe = pm8607_regulator_probe,
.id_table = pm8607_regulator_driver_ids,
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index c873ee0082cf..9dfabda8f478 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -191,7 +191,6 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
static struct platform_driver aat2870_regulator_driver = {
.driver = {
.name = "aat2870-regulator",
- .owner = THIS_MODULE,
},
.probe = aat2870_regulator_probe,
};
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index e10febe9ec34..de2644490f0d 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -721,7 +721,6 @@ static int ab3100_regulators_probe(struct platform_device *pdev)
static struct platform_driver ab3100_regulators_driver = {
.driver = {
.name = "ab3100-regulators",
- .owner = THIS_MODULE,
},
.probe = ab3100_regulators_probe,
.remove = ab3100_regulators_remove,
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
index 29c0faaf8eba..84c1ee39ddae 100644
--- a/drivers/regulator/ab8500-ext.c
+++ b/drivers/regulator/ab8500-ext.c
@@ -433,7 +433,6 @@ static struct platform_driver ab8500_ext_regulator_driver = {
.probe = ab8500_ext_regulator_probe,
.driver = {
.name = "ab8500-ext-regulator",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 1fda14e12ea8..0f97514e3474 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -3075,7 +3075,6 @@ static struct platform_driver ab8500_regulator_driver = {
.probe = ab8500_regulator_probe,
.driver = {
.name = "ab8500-regulator",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3586571814b2..738adfa5332b 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -322,7 +322,6 @@ static const struct of_device_id of_anatop_regulator_match_tbl[] = {
static struct platform_driver anatop_regulator_driver = {
.driver = {
.name = "anatop_regulator",
- .owner = THIS_MODULE,
.of_match_table = of_anatop_regulator_match_tbl,
},
.probe = anatop_regulator_probe,
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index d071b2119a60..8169165904c0 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -300,7 +300,6 @@ static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
.driver = {
.name = "arizona-ldo1",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index c313ef4c3a2f..20079006459a 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -302,7 +302,6 @@ static struct platform_driver arizona_micsupp_driver = {
.probe = arizona_micsupp_probe,
.driver = {
.name = "arizona-micsupp",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index 8459b0b648cd..c0e93b1332f7 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -267,7 +267,6 @@ static int as3711_regulator_probe(struct platform_device *pdev)
static struct platform_driver as3711_regulator_driver = {
.driver = {
.name = "as3711-regulator",
- .owner = THIS_MODULE,
},
.probe = as3711_regulator_probe,
};
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index b68f05f38537..8b046eec6ae0 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -916,7 +916,6 @@ MODULE_DEVICE_TABLE(of, of_as3722_regulator_match);
static struct platform_driver as3722_regulator_driver = {
.driver = {
.name = "as3722-regulator",
- .owner = THIS_MODULE,
.of_match_table = of_as3722_regulator_match,
},
.probe = as3722_regulator_probe,
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 2e1010a34ddc..f23d7e1f2ee7 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -275,7 +275,6 @@ static struct platform_driver axp20x_regulator_driver = {
.probe = axp20x_regulator_probe,
.driver = {
.name = "axp20x-regulator",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index fe6ac69549a6..628430bdc312 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -453,7 +453,6 @@ static int bcm590xx_probe(struct platform_device *pdev)
static struct platform_driver bcm590xx_regulator_driver = {
.driver = {
.name = "bcm590xx-vregs",
- .owner = THIS_MODULE,
},
.probe = bcm590xx_probe,
};
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index b431ae357fcd..affa1b191314 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -475,7 +475,6 @@ static int da903x_regulator_probe(struct platform_device *pdev)
static struct platform_driver da903x_regulator_driver = {
.driver = {
.name = "da903x-regulator",
- .owner = THIS_MODULE,
},
.probe = da903x_regulator_probe,
};
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 3945f1006d23..8a4df7a1f2ee 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -464,7 +464,6 @@ static struct platform_driver da9052_regulator_driver = {
.probe = da9052_regulator_probe,
.driver = {
.name = "da9052-regulator",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 9516317e1a9f..cafdafbffcaf 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -665,7 +665,6 @@ static struct platform_driver da9055_regulator_driver = {
.probe = da9055_regulator_probe,
.driver = {
.name = "da9055-regulator",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 37dd42759ca9..31c2c593ae0b 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -893,7 +893,6 @@ static int da9063_regulator_remove(struct platform_device *pdev)
static struct platform_driver da9063_regulator_driver = {
.driver = {
.name = DA9063_DRVNAME_REGULATORS,
- .owner = THIS_MODULE,
},
.probe = da9063_regulator_probe,
.remove = da9063_regulator_remove,
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 617c1adca816..7cec535cf0bc 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -534,7 +534,6 @@ static int db8500_regulator_remove(struct platform_device *pdev)
static struct platform_driver db8500_regulator_driver = {
.driver = {
.name = "db8500-prcmu-regulators",
- .owner = THIS_MODULE,
},
.probe = db8500_regulator_probe,
.remove = db8500_regulator_remove,
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 7aef9e4c6fbf..cde749778774 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -63,7 +63,6 @@ static struct platform_driver dummy_regulator_driver = {
.probe = dummy_regulator_probe,
.driver = {
.name = "reg-dummy",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index d21da27c0eb6..ff62d69ba0be 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -213,7 +213,6 @@ static struct platform_driver regulator_fixed_voltage_driver = {
.probe = reg_fixed_voltage_probe,
.driver = {
.name = "reg-fixed-voltage",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(fixed_of_match),
},
};
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index c888a9a9482c..464018de7e97 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -401,7 +401,6 @@ static struct platform_driver gpio_regulator_driver = {
.remove = gpio_regulator_remove,
.driver = {
.name = "gpio-regulator",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(regulator_gpio_of_match),
},
};
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 156d0d1a55f1..42dc5fb8c899 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -623,7 +623,6 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
static struct platform_driver hi6421_regulator_driver = {
.driver = {
.name = "hi6421-regulator",
- .owner = THIS_MODULE,
},
.probe = hi6421_regulator_probe,
};
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index 948afc249e29..a97bed90d39b 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -533,7 +533,6 @@ static struct platform_driver lp8788_buck_driver = {
.probe = lp8788_buck_probe,
.driver = {
.name = LP8788_DEV_BUCK,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index b9a29a29933f..9f22d079c8cc 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -561,7 +561,6 @@ static struct platform_driver lp8788_dldo_driver = {
.probe = lp8788_dldo_probe,
.driver = {
.name = LP8788_DEV_DLDO,
- .owner = THIS_MODULE,
},
};
@@ -611,7 +610,6 @@ static struct platform_driver lp8788_aldo_driver = {
.probe = lp8788_aldo_probe,
.driver = {
.name = LP8788_DEV_ALDO,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index 0ff5a20ac958..bf9a44c5fdd2 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -371,7 +371,6 @@ MODULE_DEVICE_TABLE(platform, max14577_regulator_id);
static struct platform_driver max14577_regulator_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "max14577-regulator",
},
.probe = max14577_regulator_probe,
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index 871b96bcd2d0..10d206266ac2 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -488,7 +488,6 @@ MODULE_DEVICE_TABLE(platform, max77686_pmic_id);
static struct platform_driver max77686_pmic_driver = {
.driver = {
.name = "max77686-pmic",
- .owner = THIS_MODULE,
},
.probe = max77686_pmic_probe,
.id_table = max77686_pmic_id,
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index 7b9755a6c3b5..07b313e51b21 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -267,7 +267,6 @@ MODULE_DEVICE_TABLE(platform, max77693_pmic_id);
static struct platform_driver max77693_pmic_driver = {
.driver = {
.name = "max77693-pmic",
- .owner = THIS_MODULE,
},
.probe = max77693_pmic_probe,
.id_table = max77693_pmic_id,
diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802.c
index 0766615c60bc..6af41abccacb 100644
--- a/drivers/regulator/max77802.c
+++ b/drivers/regulator/max77802.c
@@ -597,7 +597,6 @@ MODULE_DEVICE_TABLE(platform, max77802_pmic_id);
static struct platform_driver max77802_pmic_driver = {
.driver = {
.name = "max77802-pmic",
- .owner = THIS_MODULE,
},
.probe = max77802_pmic_probe,
.id_table = max77802_pmic_id,
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 3426be89c9f6..5e941db5ccaf 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -366,7 +366,6 @@ static int max8907_regulator_probe(struct platform_device *pdev)
static struct platform_driver max8907_regulator_driver = {
.driver = {
.name = "max8907-regulator",
- .owner = THIS_MODULE,
},
.probe = max8907_regulator_probe,
};
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 7770777befc4..c802f0239dc7 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -324,7 +324,6 @@ static int max8925_regulator_probe(struct platform_device *pdev)
static struct platform_driver max8925_regulator_driver = {
.driver = {
.name = "max8925-regulator",
- .owner = THIS_MODULE,
},
.probe = max8925_regulator_probe,
};
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 726fde1d883e..ea0196d4496b 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -1219,7 +1219,6 @@ MODULE_DEVICE_TABLE(platform, max8997_pmic_id);
static struct platform_driver max8997_pmic_driver = {
.driver = {
.name = "max8997-pmic",
- .owner = THIS_MODULE,
},
.probe = max8997_pmic_probe,
.id_table = max8997_pmic_id,
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 59e34a05a4a2..3027e7ce100b 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -895,7 +895,6 @@ MODULE_DEVICE_TABLE(platform, max8998_pmic_id);
static struct platform_driver max8998_pmic_driver = {
.driver = {
.name = "max8998-pmic",
- .owner = THIS_MODULE,
},
.probe = max8998_pmic_probe,
.id_table = max8998_pmic_id,
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 7f4a67edf780..fe4c7d677f9c 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -459,7 +459,6 @@ static int mc13783_regulator_probe(struct platform_device *pdev)
static struct platform_driver mc13783_regulator_driver = {
.driver = {
.name = "mc13783-regulator",
- .owner = THIS_MODULE,
},
.probe = mc13783_regulator_probe,
};
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 793b662a1967..0d17c9206816 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -633,7 +633,6 @@ err_unlock:
static struct platform_driver mc13892_regulator_driver = {
.driver = {
.name = "mc13892-regulator",
- .owner = THIS_MODULE,
},
.probe = mc13892_regulator_probe,
};
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 1878e5b567ef..9205f433573c 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1610,7 +1610,6 @@ static struct platform_driver palmas_driver = {
.driver = {
.name = "palmas-pmic",
.of_match_table = of_palmas_match_tbl,
- .owner = THIS_MODULE,
},
.probe = palmas_regulators_probe,
};
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 6d02d68dfb46..bd2b75c0d1d1 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -185,7 +185,6 @@ static struct platform_driver pbias_regulator_driver = {
.probe = pbias_regulator_probe,
.driver = {
.name = "pbias-regulator",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pbias_of_match),
},
};
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 3727b7d0e9ac..9b16e6158f15 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -259,7 +259,6 @@ static int pcap_regulator_probe(struct platform_device *pdev)
static struct platform_driver pcap_regulator_driver = {
.driver = {
.name = "pcap-regulator",
- .owner = THIS_MODULE,
},
.probe = pcap_regulator_probe,
};
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 91f34ca3a9ac..253833ae35f3 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -184,7 +184,6 @@ MODULE_DEVICE_TABLE(of, pwm_of_match);
static struct platform_driver pwm_regulator_driver = {
.driver = {
.name = "pwm-regulator",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pwm_of_match),
},
.probe = pwm_regulator_probe,
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 183598b111f9..8364ff331a81 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -797,7 +797,6 @@ static struct platform_driver rpm_reg_driver = {
.probe = rpm_reg_probe,
.driver = {
.name = "qcom_rpm_reg",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rpm_of_match),
},
};
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 4c414ae109ae..d2e67c512195 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -179,7 +179,6 @@ skip_ext_pwr_config:
static struct platform_driver rc5t583_regulator_driver = {
.driver = {
.name = "rc5t583-regulator",
- .owner = THIS_MODULE,
},
.probe = rc5t583_regulator_probe,
};
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index ea9d05eabd0a..c94a3e0f3b91 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -419,7 +419,6 @@ static struct platform_driver rk808_regulator_driver = {
.probe = rk808_regulator_probe,
.driver = {
.name = "rk808-regulator",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 5db4e12a7e04..92f88753bfed 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -405,7 +405,6 @@ MODULE_DEVICE_TABLE(platform, s2mpa01_pmic_id);
static struct platform_driver s2mpa01_pmic_driver = {
.driver = {
.name = "s2mpa01-pmic",
- .owner = THIS_MODULE,
},
.probe = s2mpa01_pmic_probe,
.id_table = s2mpa01_pmic_id,
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index b345cf51225a..c1444c3d84c2 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1033,7 +1033,6 @@ MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
static struct platform_driver s2mps11_pmic_driver = {
.driver = {
.name = "s2mps11-pmic",
- .owner = THIS_MODULE,
},
.probe = s2mps11_pmic_probe,
.id_table = s2mps11_pmic_id,
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index dc1328c0c71c..58f5d3b8e981 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -988,7 +988,6 @@ MODULE_DEVICE_TABLE(platform, s5m8767_pmic_id);
static struct platform_driver s5m8767_pmic_driver = {
.driver = {
.name = "s5m8767-pmic",
- .owner = THIS_MODULE,
},
.probe = s5m8767_pmic_probe,
.id_table = s5m8767_pmic_id,
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
index b4f1696456a7..89025f560259 100644
--- a/drivers/regulator/stw481x-vmmc.c
+++ b/drivers/regulator/stw481x-vmmc.c
@@ -95,7 +95,6 @@ static const struct of_device_id stw481x_vmmc_match[] = {
static struct platform_driver stw481x_vmmc_regulator_driver = {
.driver = {
.name = "stw481x-vmmc-regulator",
- .owner = THIS_MODULE,
.of_match_table = stw481x_vmmc_match,
},
.probe = stw481x_vmmc_regulator_probe,
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 1ef5aba96f17..d2f994298753 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -892,7 +892,6 @@ static struct platform_driver ti_abb_driver = {
.probe = ti_abb_probe,
.driver = {
.name = "ti_abb",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ti_abb_of_match),
},
};
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index c1e33a3d397b..3510b3e7330a 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -163,7 +163,6 @@ static int tps6105x_regulator_probe(struct platform_device *pdev)
static struct platform_driver tps6105x_regulator_driver = {
.driver = {
.name = "tps6105x-regulator",
- .owner = THIS_MODULE,
},
.probe = tps6105x_regulator_probe,
};
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 98e66ce26723..dad0bac09ecf 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -519,7 +519,6 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
static struct platform_driver tps6507x_pmic_driver = {
.driver = {
.name = "tps6507x-pmic",
- .owner = THIS_MODULE,
},
.probe = tps6507x_pmic_probe,
};
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index f1df4423d361..395f35dc8cdb 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -511,7 +511,6 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
static struct platform_driver tps65090_regulator_driver = {
.driver = {
.name = "tps65090-pmic",
- .owner = THIS_MODULE,
},
.probe = tps65090_regulator_probe,
};
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 263cc85d6202..7f97223f95c5 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -256,7 +256,6 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
static struct platform_driver tps65218_regulator_driver = {
.driver = {
.name = "tps65218-pmic",
- .owner = THIS_MODULE,
.of_match_table = tps65218_of_match,
},
.probe = tps65218_regulator_probe,
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index ccbb9f150b4e..2852de05d64d 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -522,7 +522,6 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
static struct platform_driver tps6586x_regulator_driver = {
.driver = {
.name = "tps6586x-regulator",
- .owner = THIS_MODULE,
},
.probe = tps6586x_regulator_probe,
};
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 18fc991175bc..e2cffe01b807 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -1245,7 +1245,6 @@ static void tps65910_shutdown(struct platform_device *pdev)
static struct platform_driver tps65910_driver = {
.driver = {
.name = "tps65910-pmic",
- .owner = THIS_MODULE,
},
.probe = tps65910_probe,
.shutdown = tps65910_shutdown,
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 9cafaa0f9455..9503d5481a52 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -519,7 +519,6 @@ static int tps65912_probe(struct platform_device *pdev)
static struct platform_driver tps65912_driver = {
.driver = {
.name = "tps65912-pmic",
- .owner = THIS_MODULE,
},
.probe = tps65912_probe,
};
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 26aa6d9c308f..d4cc60ad18ae 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -746,7 +746,6 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
static struct platform_driver tps80031_regulator_driver = {
.driver = {
.name = "tps80031-pmic",
- .owner = THIS_MODULE,
},
.probe = tps80031_regulator_probe,
};
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index dd727bca1983..955a6fb1355c 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1221,7 +1221,6 @@ static struct platform_driver twlreg_driver = {
*/
.driver = {
.name = "twl_reg",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(twl_of_match),
},
};
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index 5e7c789023a9..bed9d3ee4198 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -108,7 +108,6 @@ static struct platform_driver vexpress_regulator_driver = {
.probe = vexpress_regulator_probe,
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
.of_match_table = vexpress_regulator_of_match,
},
};
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index 6ff95b045984..a6f1c7a9914f 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -338,7 +338,6 @@ static struct platform_driver regulator_virtual_consumer_driver = {
.remove = regulator_virtual_remove,
.driver = {
.name = "reg-virt-consumer",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 0d88a82ab2a2..0d7e164a5e76 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -562,7 +562,6 @@ static struct platform_driver wm831x_buckv_driver = {
.probe = wm831x_buckv_probe,
.driver = {
.name = "wm831x-buckv",
- .owner = THIS_MODULE,
},
};
@@ -689,7 +688,6 @@ static struct platform_driver wm831x_buckp_driver = {
.probe = wm831x_buckp_probe,
.driver = {
.name = "wm831x-buckp",
- .owner = THIS_MODULE,
},
};
@@ -804,7 +802,6 @@ static struct platform_driver wm831x_boostp_driver = {
.probe = wm831x_boostp_probe,
.driver = {
.name = "wm831x-boostp",
- .owner = THIS_MODULE,
},
};
@@ -880,7 +877,6 @@ static struct platform_driver wm831x_epe_driver = {
.probe = wm831x_epe_probe,
.driver = {
.name = "wm831x-epe",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 72e385e76a9d..1e88391a1628 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -224,7 +224,6 @@ static struct platform_driver wm831x_isink_driver = {
.probe = wm831x_isink_probe,
.driver = {
.name = "wm831x-isink",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index eca0eeb78acd..7ae2dc82f636 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -307,7 +307,6 @@ static struct platform_driver wm831x_gp_ldo_driver = {
.probe = wm831x_gp_ldo_probe,
.driver = {
.name = "wm831x-ldo",
- .owner = THIS_MODULE,
},
};
@@ -516,7 +515,6 @@ static struct platform_driver wm831x_aldo_driver = {
.probe = wm831x_aldo_probe,
.driver = {
.name = "wm831x-aldo",
- .owner = THIS_MODULE,
},
};
@@ -650,7 +648,6 @@ static struct platform_driver wm831x_alive_ldo_driver = {
.probe = wm831x_alive_ldo_probe,
.driver = {
.name = "wm831x-alive-ldo",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 88f5064e412b..750e0bd61f9f 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -187,7 +187,6 @@ static struct platform_driver wm8994_ldo_driver = {
.probe = wm8994_ldo_probe,
.driver = {
.name = "wm8994-ldo",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 3841b9813109..89fd057e5f1d 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -300,7 +300,6 @@ static struct platform_driver da8xx_rproc_driver = {
.remove = da8xx_rproc_remove,
.driver = {
.name = "davinci-rproc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 51689721ea7a..e85f30370760 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -27,6 +27,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/remoteproc.h>
+#include <linux/mailbox_client.h>
#include <linux/omap-mailbox.h>
#include <linux/platform_data/remoteproc-omap.h>
@@ -36,20 +37,19 @@
/**
* struct omap_rproc - omap remote processor state
- * @mbox: omap mailbox handle
- * @nb: notifier block that will be invoked on inbound mailbox messages
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
* @rproc: rproc handle
*/
struct omap_rproc {
- struct omap_mbox *mbox;
- struct notifier_block nb;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
struct rproc *rproc;
};
/**
* omap_rproc_mbox_callback() - inbound mailbox message handler
- * @this: notifier block
- * @index: unused
+ * @client: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
*
* This handler is invoked by omap's mailbox driver whenever a mailbox
@@ -61,13 +61,13 @@ struct omap_rproc {
* that indicates different events. Those values are deliberately very
* big so they don't coincide with virtqueue indices.
*/
-static int omap_rproc_mbox_callback(struct notifier_block *this,
- unsigned long index, void *data)
+static void omap_rproc_mbox_callback(struct mbox_client *client, void *data)
{
- mbox_msg_t msg = (mbox_msg_t) data;
- struct omap_rproc *oproc = container_of(this, struct omap_rproc, nb);
+ struct omap_rproc *oproc = container_of(client, struct omap_rproc,
+ client);
struct device *dev = oproc->rproc->dev.parent;
const char *name = oproc->rproc->name;
+ u32 msg = (u32)data;
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
@@ -84,8 +84,6 @@ static int omap_rproc_mbox_callback(struct notifier_block *this,
if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
}
-
- return NOTIFY_DONE;
}
/* kick a virtqueue */
@@ -96,8 +94,8 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
int ret;
/* send the index of the triggered virtqueue in the mailbox payload */
- ret = omap_mbox_msg_send(oproc->mbox, vqid);
- if (ret)
+ ret = mbox_send_message(oproc->mbox, (void *)vqid);
+ if (ret < 0)
dev_err(dev, "omap_mbox_msg_send failed: %d\n", ret);
}
@@ -115,17 +113,22 @@ static int omap_rproc_start(struct rproc *rproc)
struct platform_device *pdev = to_platform_device(dev);
struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
int ret;
+ struct mbox_client *client = &oproc->client;
if (pdata->set_bootaddr)
pdata->set_bootaddr(rproc->bootaddr);
- oproc->nb.notifier_call = omap_rproc_mbox_callback;
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = omap_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
- /* every omap rproc is assigned a mailbox instance for messaging */
- oproc->mbox = omap_mbox_get(pdata->mbox_name, &oproc->nb);
+ oproc->mbox = omap_mbox_request_channel(client, pdata->mbox_name);
if (IS_ERR(oproc->mbox)) {
- ret = PTR_ERR(oproc->mbox);
- dev_err(dev, "omap_mbox_get failed: %d\n", ret);
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(oproc->mbox));
return ret;
}
@@ -136,9 +139,9 @@ static int omap_rproc_start(struct rproc *rproc)
* Note that the reply will _not_ arrive immediately: this message
* will wait in the mailbox fifo until the remote processor is booted.
*/
- ret = omap_mbox_msg_send(oproc->mbox, RP_MBOX_ECHO_REQUEST);
- if (ret) {
- dev_err(dev, "omap_mbox_get failed: %d\n", ret);
+ ret = mbox_send_message(oproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
goto put_mbox;
}
@@ -151,7 +154,7 @@ static int omap_rproc_start(struct rproc *rproc)
return 0;
put_mbox:
- omap_mbox_put(oproc->mbox, &oproc->nb);
+ mbox_free_channel(oproc->mbox);
return ret;
}
@@ -168,7 +171,7 @@ static int omap_rproc_stop(struct rproc *rproc)
if (ret)
return ret;
- omap_mbox_put(oproc->mbox, &oproc->nb);
+ mbox_free_channel(oproc->mbox);
return 0;
}
@@ -228,7 +231,6 @@ static struct platform_driver omap_rproc_driver = {
.remove = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index a34b50690b4e..e1a10232a943 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -207,7 +207,7 @@ static void rproc_virtio_reset(struct virtio_device *vdev)
}
/* provide the vdev features as retrieved from the firmware */
-static u32 rproc_virtio_get_features(struct virtio_device *vdev)
+static u64 rproc_virtio_get_features(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
@@ -217,7 +217,7 @@ static u32 rproc_virtio_get_features(struct virtio_device *vdev)
return rsc->dfeatures;
}
-static void rproc_virtio_finalize_features(struct virtio_device *vdev)
+static int rproc_virtio_finalize_features(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct fw_rsc_vdev *rsc;
@@ -227,11 +227,16 @@ static void rproc_virtio_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features */
vring_transport_features(vdev);
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
/*
* Remember the finalized features of our vdev, and provide it
* to the remote processor once it is powered on.
*/
- rsc->gfeatures = vdev->features[0];
+ rsc->gfeatures = vdev->features;
+
+ return 0;
}
static void rproc_virtio_get(struct virtio_device *vdev, unsigned offset,
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c
index c4ac9104dd8e..16b7b7bd805b 100644
--- a/drivers/remoteproc/ste_modem_rproc.c
+++ b/drivers/remoteproc/ste_modem_rproc.c
@@ -331,7 +331,6 @@ free_rproc:
static struct platform_driver sproc_driver = {
.driver = {
.name = SPROC_MODEM_NAME,
- .owner = THIS_MODULE,
},
.probe = sproc_probe,
.remove = sproc_drv_remove,
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 40582089474a..0a8def35ea2e 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -150,7 +150,6 @@ static struct platform_driver socfpga_reset_driver = {
.remove = socfpga_reset_remove,
.driver = {
.name = "socfpga-reset",
- .owner = THIS_MODULE,
.of_match_table = socfpga_reset_dt_ids,
},
};
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index a94e7a7820b4..eebc52cb6984 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -179,7 +179,6 @@ static struct platform_driver sunxi_reset_driver = {
.remove = sunxi_reset_remove,
.driver = {
.name = "sunxi-reset",
- .owner = THIS_MODULE,
.of_match_table = sunxi_reset_dt_ids,
},
};
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
index c93fd260447e..8dad603d863c 100644
--- a/drivers/reset/sti/reset-stih415.c
+++ b/drivers/reset/sti/reset-stih415.c
@@ -101,7 +101,6 @@ static struct platform_driver stih415_reset_driver = {
.probe = syscfg_reset_probe,
.driver = {
.name = "reset-stih415",
- .owner = THIS_MODULE,
.of_match_table = stih415_reset_match,
},
};
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
index 5fc987076a90..79aed70a26c0 100644
--- a/drivers/reset/sti/reset-stih416.c
+++ b/drivers/reset/sti/reset-stih416.c
@@ -132,7 +132,6 @@ static struct platform_driver stih416_reset_driver = {
.probe = syscfg_reset_probe,
.driver = {
.name = "reset-stih416",
- .owner = THIS_MODULE,
.of_match_table = stih416_reset_match,
},
};
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4d54eb..92f6af6da699 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -41,6 +41,7 @@
* @svq: tx virtqueue
* @rbufs: kernel address of rx buffers
* @sbufs: kernel address of tx buffers
+ * @num_bufs: total number of buffers for rx and tx
* @last_sbuf: index of last tx buffer used
* @bufs_dma: dma base addr of the buffers
* @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders.
@@ -60,6 +61,7 @@ struct virtproc_info {
struct virtio_device *vdev;
struct virtqueue *rvq, *svq;
void *rbufs, *sbufs;
+ unsigned int num_bufs;
int last_sbuf;
dma_addr_t bufs_dma;
struct mutex tx_lock;
@@ -86,13 +88,14 @@ struct rpmsg_channel_info {
#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
/*
- * We're allocating 512 buffers of 512 bytes for communications, and then
- * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers will be computed from the number of buffers supported
+ * by the vring, upto a maximum of 512 buffers (256 in each direction).
*
* Each buffer will have 16 bytes for the msg header and 496 bytes for
* the payload.
*
- * This will require a total space of 256KB for the buffers.
+ * This will utilize a maximum total space of 256KB for the buffers.
*
* We might also want to add support for user-provided buffers in time.
* This will allow bigger buffer size flexibility, and can also be used
@@ -102,9 +105,8 @@ struct rpmsg_channel_info {
* can change this without changing anything in the firmware of the remote
* processor.
*/
-#define RPMSG_NUM_BUFS (512)
+#define MAX_RPMSG_NUM_BUFS (512)
#define RPMSG_BUF_SIZE (512)
-#define RPMSG_TOTAL_BUF_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
/*
* Local addresses are dynamically allocated on-demand.
@@ -579,7 +581,7 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
* either pick the next unused tx buffer
* (half of our buffers are used for sending messages)
*/
- if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+ if (vrp->last_sbuf < vrp->num_bufs / 2)
ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
/* or recycle a used one */
else
@@ -948,6 +950,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
struct virtproc_info *vrp;
void *bufs_va;
int err = 0, i;
+ size_t total_buf_space;
vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
if (!vrp)
@@ -968,10 +971,22 @@ static int rpmsg_probe(struct virtio_device *vdev)
vrp->rvq = vqs[0];
vrp->svq = vqs[1];
+ /* we expect symmetric tx/rx vrings */
+ WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
+ virtqueue_get_vring_size(vrp->svq));
+
+ /* we need less buffers if vrings are small */
+ if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
+ vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
+ else
+ vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
+
+ total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
+
/* allocate coherent memory for the buffers */
bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
- RPMSG_TOTAL_BUF_SPACE,
- &vrp->bufs_dma, GFP_KERNEL);
+ total_buf_space, &vrp->bufs_dma,
+ GFP_KERNEL);
if (!bufs_va) {
err = -ENOMEM;
goto vqs_del;
@@ -984,10 +999,10 @@ static int rpmsg_probe(struct virtio_device *vdev)
vrp->rbufs = bufs_va;
/* and half is dedicated for TX */
- vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+ vrp->sbufs = bufs_va + total_buf_space / 2;
/* set up the receive buffers */
- for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+ for (i = 0; i < vrp->num_bufs / 2; i++) {
struct scatterlist sg;
void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
@@ -1023,8 +1038,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
return 0;
free_coherent:
- dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
- bufs_va, vrp->bufs_dma);
+ dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ bufs_va, vrp->bufs_dma);
vqs_del:
vdev->config->del_vqs(vrp->vdev);
free_vrp:
@@ -1042,6 +1057,7 @@ static int rpmsg_remove_device(struct device *dev, void *data)
static void rpmsg_remove(struct virtio_device *vdev)
{
struct virtproc_info *vrp = vdev->priv;
+ size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
int ret;
vdev->config->reset(vdev);
@@ -1057,8 +1073,8 @@ static void rpmsg_remove(struct virtio_device *vdev)
vdev->config->del_vqs(vrp->vdev);
- dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
- vrp->rbufs, vrp->bufs_dma);
+ dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ vrp->rbufs, vrp->bufs_dma);
kfree(vrp);
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4511ddc1ac31..f15cddfeb897 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -987,6 +987,17 @@ config RTC_DRV_NUC900
If you say yes here you get support for the RTC subsystem of the
NUC910/NUC920 used in embedded systems.
+config RTC_DRV_OPAL
+ tristate "IBM OPAL RTC driver"
+ depends on PPC_POWERNV
+ default y
+ help
+ If you say yes here you get support for the PowerNV platform RTC
+ driver based on OPAL interfaces.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-opal.
+
comment "on-CPU RTC drivers"
config RTC_DRV_DAVINCI
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index b188323c096a..c8ef3e1e6ccd 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
+obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 0916089c7c3e..7df0579d9852 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -352,7 +352,6 @@ static int pm80x_rtc_remove(struct platform_device *pdev)
static struct platform_driver pm80x_rtc_driver = {
.driver = {
.name = "88pm80x-rtc",
- .owner = THIS_MODULE,
.pm = &pm80x_rtc_pm_ops,
},
.probe = pm80x_rtc_probe,
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 0c6add1a38dc..19e53b3b8e00 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -448,7 +448,6 @@ static SIMPLE_DEV_PM_OPS(pm860x_rtc_pm_ops, pm860x_rtc_suspend, pm860x_rtc_resum
static struct platform_driver pm860x_rtc_driver = {
.driver = {
.name = "88pm860x-rtc",
- .owner = THIS_MODULE,
.pm = &pm860x_rtc_pm_ops,
},
.probe = pm860x_rtc_probe,
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index ff435343ba9f..1d0340fdb820 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -243,7 +243,6 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
static struct platform_driver ab3100_rtc_driver = {
.driver = {
.name = "ab3100-rtc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 866e0ef5122d..6856f0a3a3d5 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -519,7 +519,6 @@ static int ab8500_rtc_remove(struct platform_device *pdev)
static struct platform_driver ab8500_rtc_driver = {
.driver = {
.name = "ab8500-rtc",
- .owner = THIS_MODULE,
},
.probe = ab8500_rtc_probe,
.remove = ab8500_rtc_remove,
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index aee3387fb099..d618d6c7ef93 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -277,7 +277,6 @@ static struct platform_driver at32_rtc_driver = {
.remove = __exit_p(at32_rtc_remove),
.driver = {
.name = "at32ap700x_rtc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 44fe83ee9bee..70a5d94cc766 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -491,7 +491,6 @@ static struct platform_driver at91_rtc_driver = {
.shutdown = at91_rtc_shutdown,
.driver = {
.name = "at91_rtc",
- .owner = THIS_MODULE,
.pm = &at91_rtc_pm_ops,
.of_match_table = of_match_ptr(at91_rtc_dt_ids),
},
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index abac38abd38e..6b9aaf1afc72 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -525,7 +525,6 @@ static struct platform_driver at91_rtc_driver = {
.shutdown = at91_rtc_shutdown,
.driver = {
.name = "rtc-at91sam9",
- .owner = THIS_MODULE,
.pm = &at91_rtc_pm_ops,
.of_match_table = of_match_ptr(at91_rtc_dt_ids),
},
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index fd25e2374d4e..84d6e026784d 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -117,7 +117,6 @@ out_err:
static struct platform_driver au1xrtc_driver = {
.driver = {
.name = "rtc-au1xxx",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index fe4bdb06a55a..3d44b11721ea 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -434,7 +434,6 @@ static SIMPLE_DEV_PM_OPS(bfin_rtc_pm_ops, bfin_rtc_suspend, bfin_rtc_resume);
static struct platform_driver bfin_rtc_driver = {
.driver = {
.name = "rtc-bfin",
- .owner = THIS_MODULE,
.pm = &bfin_rtc_pm_ops,
},
.probe = bfin_rtc_probe,
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index fc0ff87aa5df..bd170cb3361c 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -192,7 +192,6 @@ MODULE_ALIAS("platform:rtc-bq4802");
static struct platform_driver bq4802_driver = {
.driver = {
.name = "rtc-bq4802",
- .owner = THIS_MODULE,
},
.probe = bq4802_probe,
};
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 869cae273799..56343b2fbc68 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -267,7 +267,6 @@ static const struct of_device_id coh901331_dt_match[] = {
static struct platform_driver coh901331_driver = {
.driver = {
.name = "rtc-coh901331",
- .owner = THIS_MODULE,
.pm = &coh901331_pm_ops,
.of_match_table = coh901331_dt_match,
},
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index e5c9486cf452..613c43b7e9ae 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -265,7 +265,6 @@ static struct platform_driver da9052_rtc_driver = {
.probe = da9052_rtc_probe,
.driver = {
.name = "da9052-rtc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index a825491331c8..7ec0872d5e3b 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -391,7 +391,6 @@ static struct platform_driver da9055_rtc_driver = {
.probe = da9055_rtc_probe,
.driver = {
.name = "da9055-rtc",
- .owner = THIS_MODULE,
.pm = &da9055_rtc_pm_ops,
},
};
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 731ed1a97f59..7ffc5707f8b9 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -341,7 +341,6 @@ static struct platform_driver da9063_rtc_driver = {
.probe = da9063_rtc_probe,
.driver = {
.name = DA9063_DRVNAME_RTC,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index c0a3b59f65a2..c84f46168a52 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -550,7 +550,6 @@ static struct platform_driver davinci_rtc_driver = {
.remove = __exit_p(davinci_rtc_remove),
.driver = {
.name = "rtc_davinci",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index 1aca08394c47..94067f8eeb10 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -146,7 +146,6 @@ static int dm355evm_rtc_probe(struct platform_device *pdev)
static struct platform_driver rtc_dm355evm_driver = {
.probe = dm355evm_rtc_probe,
.driver = {
- .owner = THIS_MODULE,
.name = "rtc-dm355evm",
},
};
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
index 9c04fd2bc209..d16f550897b8 100644
--- a/drivers/rtc/rtc-ds1216.c
+++ b/drivers/rtc/rtc-ds1216.c
@@ -170,7 +170,6 @@ static int __init ds1216_rtc_probe(struct platform_device *pdev)
static struct platform_driver ds1216_rtc_platform_driver = {
.driver = {
.name = "rtc-ds1216",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 50e109b78252..2fe537f4e2bd 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -356,7 +356,6 @@ static int ds1286_probe(struct platform_device *pdev)
static struct platform_driver ds1286_platform_driver = {
.driver = {
.name = "rtc-ds1286",
- .owner = THIS_MODULE,
},
.probe = ds1286_probe,
};
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 07e8d79b4a09..6bef7a5233c4 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -244,7 +244,6 @@ static int __init ds1302_rtc_probe(struct platform_device *pdev)
static struct platform_driver ds1302_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index b13d1399b81a..7415c2b4d6e8 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -561,7 +561,6 @@ static struct platform_driver ds1511_rtc_driver = {
.remove = ds1511_rtc_remove,
.driver = {
.name = "ds1511",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index ab56893aac73..a24e091bcb41 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -351,7 +351,6 @@ static struct platform_driver ds1553_rtc_driver = {
.remove = ds1553_rtc_remove,
.driver = {
.name = "rtc-ds1553",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 9822715db8ba..0f8d8ace1515 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -230,7 +230,6 @@ static struct platform_driver ds1742_rtc_driver = {
.remove = ds1742_rtc_remove,
.driver = {
.name = "rtc-ds1742",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ds1742_rtc_of_match),
},
};
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index fc209dc4e245..7885edd3d507 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -274,7 +274,6 @@ static struct platform_driver rtc_device_driver = {
.remove = rtc_remove,
.driver = {
.name = "ds2404",
- .owner = THIS_MODULE,
},
};
module_platform_driver(rtc_device_driver);
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 53b589dc34eb..b37b0c80bd5a 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -226,7 +226,6 @@ static int __init efi_rtc_probe(struct platform_device *dev)
static struct platform_driver efi_rtc_driver = {
.driver = {
.name = "rtc-efi",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 5e4f5dc40ba5..de325d68c7e4 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -174,7 +174,6 @@ static int ep93xx_rtc_remove(struct platform_device *pdev)
static struct platform_driver ep93xx_rtc_driver = {
.driver = {
.name = "ep93xx-rtc",
- .owner = THIS_MODULE,
},
.probe = ep93xx_rtc_probe,
.remove = ep93xx_rtc_remove,
diff --git a/drivers/rtc/rtc-generic.c b/drivers/rtc/rtc-generic.c
index 9b6725ebbfb2..e782ebd719b2 100644
--- a/drivers/rtc/rtc-generic.c
+++ b/drivers/rtc/rtc-generic.c
@@ -51,7 +51,6 @@ static int __init generic_rtc_probe(struct platform_device *dev)
static struct platform_driver generic_rtc_driver = {
.driver = {
.name = "rtc-generic",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 965a9da70867..ae7c2ba440cf 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -331,7 +331,6 @@ static struct platform_driver hid_time_platform_driver = {
.id_table = hid_time_ids,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
.probe = hid_time_probe,
.remove = hid_time_remove,
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index cd741c77e085..42f5570f42f8 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -499,7 +499,6 @@ MODULE_DEVICE_TABLE(of, dryice_dt_ids);
static struct platform_driver dryice_rtc_driver = {
.driver = {
.name = "imxdi_rtc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(dryice_dt_ids),
},
.remove = __exit_p(dryice_rtc_remove),
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 08f5160fb6d4..b2bcfc0bf2e5 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -299,7 +299,6 @@ static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.driver = {
.name = "jz4740-rtc",
- .owner = THIS_MODULE,
.pm = JZ4740_RTC_PM_OPS,
},
};
diff --git a/drivers/rtc/rtc-lp8788.c b/drivers/rtc/rtc-lp8788.c
index 4ff6c73253b3..e20e7bd822e0 100644
--- a/drivers/rtc/rtc-lp8788.c
+++ b/drivers/rtc/rtc-lp8788.c
@@ -316,7 +316,6 @@ static struct platform_driver lp8788_rtc_driver = {
.probe = lp8788_rtc_probe,
.driver = {
.name = LP8788_DEV_RTC,
- .owner = THIS_MODULE,
},
};
module_platform_driver(lp8788_rtc_driver);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index f130c08c98f8..f923f7324788 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -379,7 +379,6 @@ static struct platform_driver lpc32xx_rtc_driver = {
.remove = lpc32xx_rtc_remove,
.driver = {
.name = RTC_NAME,
- .owner = THIS_MODULE,
.pm = LPC32XX_RTC_PM_OPS,
.of_match_table = of_match_ptr(lpc32xx_rtc_match),
},
diff --git a/drivers/rtc/rtc-ls1x.c b/drivers/rtc/rtc-ls1x.c
index 682ecb094839..8445e564094a 100644
--- a/drivers/rtc/rtc-ls1x.c
+++ b/drivers/rtc/rtc-ls1x.c
@@ -188,7 +188,6 @@ err:
static struct platform_driver ls1x_rtc_driver = {
.driver = {
.name = "ls1x-rtc",
- .owner = THIS_MODULE,
},
.probe = ls1x_rtc_probe,
};
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 411adb3f86a1..c62b51217ecf 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -181,7 +181,6 @@ static int m48t35_probe(struct platform_device *pdev)
static struct platform_driver m48t35_platform_driver = {
.driver = {
.name = "rtc-m48t35",
- .owner = THIS_MODULE,
},
.probe = m48t35_probe,
};
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 11880c1e9dac..90abb5bd589c 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -504,7 +504,6 @@ MODULE_ALIAS("platform:rtc-m48t59");
static struct platform_driver m48t59_rtc_driver = {
.driver = {
.name = "rtc-m48t59",
- .owner = THIS_MODULE,
},
.probe = m48t59_rtc_probe,
.remove = m48t59_rtc_remove,
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 32f64c942621..a17b7a3ceece 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -169,7 +169,6 @@ static int m48t86_rtc_probe(struct platform_device *dev)
static struct platform_driver m48t86_rtc_platform_driver = {
.driver = {
.name = "rtc-m48t86",
- .owner = THIS_MODULE,
},
.probe = m48t86_rtc_probe,
};
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index cf73e969c8cc..9d71328e59b9 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -513,7 +513,6 @@ static const struct platform_device_id rtc_id[] = {
static struct platform_driver max77686_rtc_driver = {
.driver = {
.name = "max77686-rtc",
- .owner = THIS_MODULE,
.pm = &max77686_rtc_pm_ops,
},
.probe = max77686_rtc_probe,
diff --git a/drivers/rtc/rtc-max77802.c b/drivers/rtc/rtc-max77802.c
index 566471335b33..7f8adf8d6feb 100644
--- a/drivers/rtc/rtc-max77802.c
+++ b/drivers/rtc/rtc-max77802.c
@@ -488,7 +488,6 @@ static const struct platform_device_id rtc_id[] = {
static struct platform_driver max77802_rtc_driver = {
.driver = {
.name = "max77802-rtc",
- .owner = THIS_MODULE,
.pm = &max77802_rtc_pm_ops,
},
.probe = max77802_rtc_probe,
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c
index 3032178bd9e6..19c29b72598d 100644
--- a/drivers/rtc/rtc-max8907.c
+++ b/drivers/rtc/rtc-max8907.c
@@ -215,7 +215,6 @@ static int max8907_rtc_probe(struct platform_device *pdev)
static struct platform_driver max8907_rtc_driver = {
.driver = {
.name = "max8907-rtc",
- .owner = THIS_MODULE,
},
.probe = max8907_rtc_probe,
};
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 951d1a78e190..16d129a0bb3b 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -314,7 +314,6 @@ static SIMPLE_DEV_PM_OPS(max8925_rtc_pm_ops, max8925_rtc_suspend, max8925_rtc_re
static struct platform_driver max8925_rtc_driver = {
.driver = {
.name = "max8925-rtc",
- .owner = THIS_MODULE,
.pm = &max8925_rtc_pm_ops,
},
.probe = max8925_rtc_probe,
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index 0777c01b58e0..67fbe559d535 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -523,7 +523,6 @@ static const struct platform_device_id rtc_id[] = {
static struct platform_driver max8997_rtc_driver = {
.driver = {
.name = "max8997-rtc",
- .owner = THIS_MODULE,
},
.probe = max8997_rtc_probe,
.shutdown = max8997_rtc_shutdown,
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index f098ad8382de..5726ef7bd56e 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -313,7 +313,6 @@ static const struct platform_device_id max8998_rtc_id[] = {
static struct platform_driver max8998_rtc_driver = {
.driver = {
.name = "max8998-rtc",
- .owner = THIS_MODULE,
},
.probe = max8998_rtc_probe,
.id_table = max8998_rtc_id,
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 0765606a2d14..5bce904b7ee6 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -370,7 +370,6 @@ static struct platform_driver mc13xxx_rtc_driver = {
.remove = mc13xxx_rtc_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
index c31846238871..73759c9a4527 100644
--- a/drivers/rtc/rtc-moxart.c
+++ b/drivers/rtc/rtc-moxart.c
@@ -317,7 +317,6 @@ static struct platform_driver moxart_rtc_driver = {
.probe = moxart_rtc_probe,
.driver = {
.name = "moxart-rtc",
- .owner = THIS_MODULE,
.of_match_table = moxart_rtc_match,
},
};
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 3b965ad6f4d5..1767e18d5bd4 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -411,7 +411,6 @@ static const struct of_device_id mpc5121_rtc_match[] = {
static struct platform_driver mpc5121_rtc_driver = {
.driver = {
.name = "mpc5121-rtc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mpc5121_rtc_match),
},
.probe = mpc5121_rtc_probe,
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index 426cb5189daa..9bf877bdf836 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -225,7 +225,6 @@ static int __init msm6242_rtc_probe(struct platform_device *pdev)
static struct platform_driver msm6242_rtc_driver = {
.driver = {
.name = "rtc-msm6242",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index 6aaec2fc7c0d..423762241042 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -329,7 +329,6 @@ static struct platform_driver mv_rtc_driver = {
.remove = __exit_p(mv_rtc_remove),
.driver = {
.name = "rtc-mv",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rtc_mv_of_match_table),
},
};
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 419874fefa4b..3c3f8d10ab43 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -489,7 +489,6 @@ static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
.pm = &mxc_rtc_pm_ops,
- .owner = THIS_MODULE,
},
.id_table = imx_rtc_devtype,
.probe = mxc_rtc_probe,
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index a53da0958e95..09fc1c19f0df 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -262,7 +262,6 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev)
static struct platform_driver nuc900_rtc_driver = {
.driver = {
.name = "nuc900-rtc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 4f1c6ca97211..8e5851aa4369 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -719,7 +719,6 @@ static struct platform_driver omap_rtc_driver = {
.shutdown = omap_rtc_shutdown,
.driver = {
.name = "omap_rtc",
- .owner = THIS_MODULE,
.pm = &omap_rtc_pm_ops,
.of_match_table = omap_rtc_of_match,
},
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
new file mode 100644
index 000000000000..95f652165fe9
--- /dev/null
+++ b/drivers/rtc/rtc-opal.c
@@ -0,0 +1,261 @@
+/*
+ * IBM OPAL RTC driver
+ * Copyright (C) 2014 IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ */
+
+#define DRVNAME "rtc-opal"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <asm/opal.h>
+#include <asm/firmware.h>
+
+static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
+{
+ tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) +
+ bcd2bin((y_m_d >> 16) & 0xff)) - 1900;
+ tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1;
+ tm->tm_mday = bcd2bin(y_m_d & 0xff);
+ tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff);
+ tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff);
+ tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff);
+
+ GregorianDay(tm);
+}
+
+static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
+{
+ *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24;
+ *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16;
+ *y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8;
+ *y_m_d |= ((u32)bin2bcd(tm->tm_mday));
+
+ *h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56;
+ *h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48;
+ *h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40;
+}
+
+static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+{
+ long rc = OPAL_BUSY;
+ u32 y_m_d;
+ u64 h_m_s_ms;
+ __be32 __y_m_d;
+ __be64 __h_m_s_ms;
+
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+ else
+ msleep(10);
+ }
+
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+
+ y_m_d = be32_to_cpu(__y_m_d);
+ h_m_s_ms = be64_to_cpu(__h_m_s_ms);
+ opal_to_tm(y_m_d, h_m_s_ms, tm);
+
+ return 0;
+}
+
+static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
+{
+ long rc = OPAL_BUSY;
+ u32 y_m_d = 0;
+ u64 h_m_s_ms = 0;
+
+ tm_to_opal(tm, &y_m_d, &h_m_s_ms);
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_rtc_write(y_m_d, h_m_s_ms);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+ else
+ msleep(10);
+ }
+
+ return rc == OPAL_SUCCESS ? 0 : -EIO;
+}
+
+/*
+ * TPO Timed Power-On
+ *
+ * TPO get/set OPAL calls care about the hour and min and to make it consistent
+ * with the rtc utility time conversion functions, we use the 'u64' to store
+ * its value and perform bit shift by 32 before use..
+ */
+static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ __be32 __y_m_d, __h_m;
+ struct opal_msg msg;
+ int rc, token;
+ u64 h_m_s_ms;
+ u32 y_m_d;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_err("Failed to get the async token\n");
+
+ return token;
+ }
+
+ rc = opal_tpo_read(token, &__y_m_d, &__h_m);
+ if (rc != OPAL_ASYNC_COMPLETION) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ rc = opal_async_wait_response(token, &msg);
+ if (rc) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ rc = be64_to_cpu(msg.params[1]);
+ if (rc != OPAL_SUCCESS) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ y_m_d = be32_to_cpu(__y_m_d);
+ h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
+ opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
+
+exit:
+ opal_async_release_token(token);
+ return rc;
+}
+
+/* Set Timed Power-On */
+static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ u64 h_m_s_ms = 0, token;
+ struct opal_msg msg;
+ u32 y_m_d = 0;
+ int rc;
+
+ tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms);
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_err("Failed to get the async token\n");
+
+ return token;
+ }
+
+ /* TPO, we care about hour and minute */
+ rc = opal_tpo_write(token, y_m_d,
+ (u32)((h_m_s_ms >> 32) & 0xffff0000));
+ if (rc != OPAL_ASYNC_COMPLETION) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ rc = opal_async_wait_response(token, &msg);
+ if (rc) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ rc = be64_to_cpu(msg.params[1]);
+ if (rc != OPAL_SUCCESS)
+ rc = -EIO;
+
+exit:
+ opal_async_release_token(token);
+ return rc;
+}
+
+static const struct rtc_class_ops opal_rtc_ops = {
+ .read_time = opal_get_rtc_time,
+ .set_time = opal_set_rtc_time,
+ .read_alarm = opal_get_tpo_time,
+ .set_alarm = opal_set_tpo_time,
+};
+
+static int opal_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+
+ if (pdev->dev.of_node && of_get_property(pdev->dev.of_node, "has-tpo",
+ NULL))
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ rtc->uie_unsupported = 1;
+
+ return 0;
+}
+
+static const struct of_device_id opal_rtc_match[] = {
+ {
+ .compatible = "ibm,opal-rtc",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, opal_rtc_match);
+
+static const struct platform_device_id opal_rtc_driver_ids[] = {
+ {
+ .name = "opal-rtc",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, opal_rtc_driver_ids);
+
+static struct platform_driver opal_rtc_driver = {
+ .probe = opal_rtc_probe,
+ .id_table = opal_rtc_driver_ids,
+ .driver = {
+ .name = DRVNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = opal_rtc_match,
+ },
+};
+
+static int __init opal_rtc_init(void)
+{
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ return platform_driver_register(&opal_rtc_driver);
+}
+
+static void __exit opal_rtc_exit(void)
+{
+ platform_driver_unregister(&opal_rtc_driver);
+}
+
+MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM OPAL RTC driver");
+MODULE_LICENSE("GPL");
+
+module_init(opal_rtc_init);
+module_exit(opal_rtc_exit);
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 4dfe2d793fa3..3b01d567496d 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -363,7 +363,6 @@ static struct platform_driver palmas_rtc_driver = {
.probe = palmas_rtc_probe,
.remove = palmas_rtc_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "palmas-rtc",
.pm = &palmas_rtc_pm_ops,
.of_match_table = of_match_ptr(of_palmas_rtc_match),
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index 40b5c630bc7d..c4433240d8a9 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -184,7 +184,6 @@ static struct platform_driver pcap_rtc_driver = {
.remove = __exit_p(pcap_rtc_remove),
.driver = {
.name = "pcap-rtc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 5adcf111fc14..795fcbd02ea3 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -531,7 +531,6 @@ static struct platform_driver pm8xxx_rtc_driver = {
.probe = pm8xxx_rtc_probe,
.driver = {
.name = "rtc-pm8xxx",
- .owner = THIS_MODULE,
.pm = &pm8xxx_rtc_pm_ops,
.of_match_table = pm8xxx_id_table,
},
diff --git a/drivers/rtc/rtc-ps3.c b/drivers/rtc/rtc-ps3.c
index 554ada5e9b76..6a8f5d758eac 100644
--- a/drivers/rtc/rtc-ps3.c
+++ b/drivers/rtc/rtc-ps3.c
@@ -74,7 +74,6 @@ static int __init ps3_rtc_probe(struct platform_device *dev)
static struct platform_driver ps3_rtc_driver = {
.driver = {
.name = "rtc-ps3",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index 1cff2a21db67..c0a6e638c672 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -328,7 +328,6 @@ static struct platform_driver puv3_rtc_driver = {
.remove = puv3_rtc_remove,
.driver = {
.name = "PKUnity-v3-RTC",
- .owner = THIS_MODULE,
.pm = &puv3_rtc_pm_ops,
}
};
diff --git a/drivers/rtc/rtc-rc5t583.c b/drivers/rtc/rtc-rc5t583.c
index e53e9b1c69b3..f28d57788951 100644
--- a/drivers/rtc/rtc-rc5t583.c
+++ b/drivers/rtc/rtc-rc5t583.c
@@ -310,7 +310,6 @@ static struct platform_driver rc5t583_rtc_driver = {
.probe = rc5t583_rtc_probe,
.remove = rc5t583_rtc_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rtc-rc5t583",
.pm = &rc5t583_rtc_pm_ops,
},
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 89d073679267..b548551f385c 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -273,7 +273,6 @@ static int __exit rp5c01_rtc_remove(struct platform_device *dev)
static struct platform_driver rp5c01_rtc_driver = {
.driver = {
.name = "rtc-rp5c01",
- .owner = THIS_MODULE,
},
.remove = __exit_p(rp5c01_rtc_remove),
};
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c
index 68f7856422f1..5f48167c802a 100644
--- a/drivers/rtc/rtc-rs5c313.c
+++ b/drivers/rtc/rtc-rs5c313.c
@@ -381,7 +381,6 @@ static int rs5c313_rtc_probe(struct platform_device *pdev)
static struct platform_driver rs5c313_rtc_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = rs5c313_rtc_probe,
};
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 806072238c00..4241eeab3386 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -896,7 +896,6 @@ static struct platform_driver s3c_rtc_driver = {
.remove = s3c_rtc_remove,
.driver = {
.name = "s3c-rtc",
- .owner = THIS_MODULE,
.pm = &s3c_rtc_pm_ops,
.of_match_table = of_match_ptr(s3c_rtc_dt_match),
},
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 8754c33361e8..b5e7c4670205 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -837,7 +837,6 @@ static const struct platform_device_id s5m_rtc_id[] = {
static struct platform_driver s5m_rtc_driver = {
.driver = {
.name = "s5m-rtc",
- .owner = THIS_MODULE,
.pm = &s5m_rtc_pm_ops,
},
.probe = s5m_rtc_probe,
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index d0d2b047658b..2b81dd4baf17 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -782,7 +782,6 @@ static SIMPLE_DEV_PM_OPS(sh_rtc_pm_ops, sh_rtc_suspend, sh_rtc_resume);
static struct platform_driver sh_rtc_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &sh_rtc_pm_ops,
},
.remove = __exit_p(sh_rtc_remove),
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index d2ac6688e5c7..edc3b43282d4 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -444,7 +444,6 @@ static SIMPLE_DEV_PM_OPS(sirfsoc_rtc_pm_ops,
static struct platform_driver sirfsoc_rtc_driver = {
.driver = {
.name = "sirfsoc-rtc",
- .owner = THIS_MODULE,
.pm = &sirfsoc_rtc_pm_ops,
.of_match_table = sirfsoc_rtc_of_match,
},
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 2cd8ffe5c698..0479e807a776 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -344,13 +344,20 @@ static int snvs_rtc_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops snvs_rtc_pm_ops = {
.suspend_noirq = snvs_rtc_suspend,
.resume_noirq = snvs_rtc_resume,
};
+#define SNVS_RTC_PM_OPS (&snvs_rtc_pm_ops)
+
+#else
+
+#define SNVS_RTC_PM_OPS NULL
+
+#endif
+
static const struct of_device_id snvs_dt_ids[] = {
{ .compatible = "fsl,sec-v4.0-mon-rtc-lp", },
{ /* sentinel */ }
@@ -360,8 +367,7 @@ MODULE_DEVICE_TABLE(of, snvs_dt_ids);
static struct platform_driver snvs_rtc_driver = {
.driver = {
.name = "snvs_rtc",
- .owner = THIS_MODULE,
- .pm = &snvs_rtc_pm_ops,
+ .pm = SNVS_RTC_PM_OPS,
.of_match_table = snvs_dt_ids,
},
.probe = snvs_rtc_probe,
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index f7d8a6db8078..83a057a03060 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -54,7 +54,6 @@ static int __init starfire_rtc_probe(struct platform_device *pdev)
static struct platform_driver starfire_rtc_driver = {
.driver = {
.name = "rtc-starfire",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 35ed49ea1f81..0e93b714ee41 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -358,7 +358,6 @@ static struct platform_driver stk17ta8_rtc_driver = {
.remove = stk17ta8_rtc_remove,
.driver = {
.name = "stk17ta8",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index ea96492357b0..2939cdcb2688 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -341,7 +341,6 @@ static struct platform_driver stmp3xxx_rtcdrv = {
.remove = stmp3xxx_rtc_remove,
.driver = {
.name = "stmp3xxx-rtc",
- .owner = THIS_MODULE,
.pm = &stmp3xxx_rtc_pm_ops,
.of_match_table = rtc_dt_ids,
},
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index bc97ff91341d..7c696c12f28f 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -95,7 +95,6 @@ static int __init sun4v_rtc_probe(struct platform_device *pdev)
static struct platform_driver sun4v_rtc_driver = {
.driver = {
.name = "rtc-sun4v",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index b6f21f73d508..6e678fa4dfaf 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -511,7 +511,6 @@ static struct platform_driver sunxi_rtc_driver = {
.remove = sunxi_rtc_remove,
.driver = {
.name = "sunxi-rtc",
- .owner = THIS_MODULE,
.of_match_table = sunxi_rtc_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 76af92ad5a8a..d948277057d8 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -420,7 +420,6 @@ static struct platform_driver tegra_rtc_driver = {
.shutdown = tegra_rtc_shutdown,
.driver = {
.name = "tegra_rtc",
- .owner = THIS_MODULE,
.of_match_table = tegra_rtc_dt_match,
.pm = &tegra_rtc_pm_ops,
},
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 6599c20bc454..8f86fa91de1a 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -129,7 +129,6 @@ static struct platform_driver test_driver = {
.remove = test_remove,
.driver = {
.name = "rtc-test",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c
index ff9632eb79f2..0b60867d8390 100644
--- a/drivers/rtc/rtc-tile.c
+++ b/drivers/rtc/rtc-tile.c
@@ -94,7 +94,6 @@ static int tile_rtc_probe(struct platform_device *dev)
static struct platform_driver tile_rtc_platform_driver = {
.driver = {
.name = "rtc-tile",
- .owner = THIS_MODULE,
},
.probe = tile_rtc_probe,
};
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 426901cef14f..3b6ce80a769c 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -337,7 +337,6 @@ static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_rtc_suspend,
static struct platform_driver tps6586x_rtc_driver = {
.driver = {
.name = "tps6586x-rtc",
- .owner = THIS_MODULE,
.pm = &tps6586x_pm_ops,
},
.probe = tps6586x_rtc_probe,
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 2583349fbde5..f42aa2b2dcba 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -326,7 +326,6 @@ static struct platform_driver tps65910_rtc_driver = {
.probe = tps65910_rtc_probe,
.remove = tps65910_rtc_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "tps65910-rtc",
.pm = &tps65910_rtc_pm_ops,
},
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c
index 3e400dce2d06..27e254cde715 100644
--- a/drivers/rtc/rtc-tps80031.c
+++ b/drivers/rtc/rtc-tps80031.c
@@ -324,7 +324,6 @@ static SIMPLE_DEV_PM_OPS(tps80031_pm_ops, tps80031_rtc_suspend,
static struct platform_driver tps80031_rtc_driver = {
.driver = {
.name = "tps80031-rtc",
- .owner = THIS_MODULE,
.pm = &tps80031_pm_ops,
},
.probe = tps80031_rtc_probe,
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 1915464e4cd6..5baea3f54926 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -608,7 +608,6 @@ static struct platform_driver twl4030rtc_driver = {
.remove = twl_rtc_remove,
.shutdown = twl_rtc_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "twl_rtc",
.pm = &twl_rtc_pm_ops,
.of_match_table = of_match_ptr(twl_rtc_of_match),
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 2e678c681b13..cb7f94ede516 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -287,7 +287,6 @@ static struct platform_driver tx4939_rtc_driver = {
.remove = __exit_p(tx4939_rtc_remove),
.driver = {
.name = "tx4939rtc",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 25222cdccdc6..bfbfa7ed7bbf 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -382,7 +382,6 @@ static struct platform_driver rtc_device_driver = {
.remove = rtc_remove,
.driver = {
.name = "v3020",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 88c9c92e89fd..f64c282275b3 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -375,7 +375,6 @@ static struct platform_driver rtc_platform_driver = {
.probe = rtc_probe,
.driver = {
.name = rtc_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index 051da968da6d..a58b6d17e6f0 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -277,7 +277,6 @@ static struct platform_driver vt8500_rtc_driver = {
.remove = vt8500_rtc_remove,
.driver = {
.name = "vt8500-rtc",
- .owner = THIS_MODULE,
.of_match_table = wmt_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 14129cc85bdb..65b432a096fe 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -264,7 +264,6 @@ static struct platform_driver xgene_rtc_driver = {
.probe = xgene_rtc_probe,
.remove = xgene_rtc_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "xgene-rtc",
.pm = &xgene_rtc_pm_ops,
.of_match_table = of_match_ptr(xgene_rtc_of_match),
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 329db997ee66..4abf11965484 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1377,6 +1377,20 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
"I/O error, retry");
break;
case -EINVAL:
+ /*
+ * device not valid so no I/O could be running
+ * handle CQR as termination successful
+ */
+ cqr->status = DASD_CQR_CLEARED;
+ cqr->stopclk = get_tod_clock();
+ cqr->starttime = 0;
+ /* no retries for invalid devices */
+ cqr->retries = -1;
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "EINVAL, handle as terminated");
+ /* fake rc to success */
+ rc = 0;
+ break;
case -EBUSY:
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"device busy, retry later");
@@ -1683,11 +1697,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cqr->status == DASD_CQR_CLEAR_PENDING &&
scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_CLEARED;
- if (cqr->callback_data == DASD_SLEEPON_START_TAG)
- cqr->callback_data = DASD_SLEEPON_END_TAG;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
- wake_up(&generic_waitq);
dasd_schedule_device_bh(device);
return;
}
@@ -2326,21 +2337,11 @@ retry:
return -EAGAIN;
/* normal recovery for basedev IO */
- if (__dasd_sleep_on_erp(cqr)) {
+ if (__dasd_sleep_on_erp(cqr))
+ /* handle erp first */
goto retry;
- /* remember that ERP was needed */
- rc = 1;
- /* skip processing for active cqr */
- if (cqr->status != DASD_CQR_TERMINATED &&
- cqr->status != DASD_CQR_NEED_ERP)
- break;
- }
}
- /* start ERP requests in upper loop */
- if (rc)
- goto retry;
-
return 0;
}
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index f224d59c4b6b..90f39f79f5d7 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -99,15 +99,37 @@ void dasd_gendisk_free(struct dasd_block *block)
int dasd_scan_partitions(struct dasd_block *block)
{
struct block_device *bdev;
+ int retry, rc;
+ retry = 5;
bdev = bdget_disk(block->gdp, 0);
- if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
+ if (!bdev) {
+ DBF_DEV_EVENT(DBF_ERR, block->base, "%s",
+ "scan partitions error, bdget returned NULL");
return -ENODEV;
+ }
+
+ rc = blkdev_get(bdev, FMODE_READ, NULL);
+ if (rc < 0) {
+ DBF_DEV_EVENT(DBF_ERR, block->base,
+ "scan partitions error, blkdev_get returned %d",
+ rc);
+ return -ENODEV;
+ }
/*
* See fs/partition/check.c:register_disk,rescan_partitions
* Can't call rescan_partitions directly. Use ioctl.
*/
- ioctl_by_bdev(bdev, BLKRRPART, 0);
+ rc = ioctl_by_bdev(bdev, BLKRRPART, 0);
+ while (rc == -EBUSY && retry > 0) {
+ schedule();
+ rc = ioctl_by_bdev(bdev, BLKRRPART, 0);
+ retry--;
+ DBF_DEV_EVENT(DBF_ERR, block->base,
+ "scan partitions error, retry %d rc %d",
+ retry, rc);
+ }
+
/*
* Since the matching blkdev_put call to the blkdev_get in
* this function is not called before dasd_destroy_partitions
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 0f471750327e..b550c8c8d010 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -1003,7 +1003,6 @@ static const struct dev_pm_ops dcssblk_pm_ops = {
static struct platform_driver dcssblk_pdrv = {
.driver = {
.name = "dcssblk",
- .owner = THIS_MODULE,
.pm = &dcssblk_pm_ops,
},
};
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 56046ab39629..75d9896deccb 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
@@ -20,13 +21,18 @@
debug_info_t *scm_debug;
static int scm_major;
+static mempool_t *aidaw_pool;
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(inactive_requests);
static unsigned int nr_requests = 64;
+static unsigned int nr_requests_per_io = 8;
static atomic_t nr_devices = ATOMIC_INIT(0);
module_param(nr_requests, uint, S_IRUGO);
MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
+module_param(nr_requests_per_io, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
+
MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("scm:scmdev*");
@@ -36,8 +42,8 @@ static void __scm_free_rq(struct scm_request *scmrq)
struct aob_rq_header *aobrq = to_aobrq(scmrq);
free_page((unsigned long) scmrq->aob);
- free_page((unsigned long) scmrq->aidaw);
__scm_free_rq_cluster(scmrq);
+ kfree(scmrq->request);
kfree(aobrq);
}
@@ -53,6 +59,8 @@ static void scm_free_rqs(void)
__scm_free_rq(scmrq);
}
spin_unlock_irq(&list_lock);
+
+ mempool_destroy(aidaw_pool);
}
static int __scm_alloc_rq(void)
@@ -65,17 +73,17 @@ static int __scm_alloc_rq(void)
return -ENOMEM;
scmrq = (void *) aobrq->data;
- scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
- if (!scmrq->aob || !scmrq->aidaw) {
- __scm_free_rq(scmrq);
- return -ENOMEM;
- }
+ if (!scmrq->aob)
+ goto free;
- if (__scm_alloc_rq_cluster(scmrq)) {
- __scm_free_rq(scmrq);
- return -ENOMEM;
- }
+ scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
+ GFP_KERNEL);
+ if (!scmrq->request)
+ goto free;
+
+ if (__scm_alloc_rq_cluster(scmrq))
+ goto free;
INIT_LIST_HEAD(&scmrq->list);
spin_lock_irq(&list_lock);
@@ -83,12 +91,19 @@ static int __scm_alloc_rq(void)
spin_unlock_irq(&list_lock);
return 0;
+free:
+ __scm_free_rq(scmrq);
+ return -ENOMEM;
}
static int scm_alloc_rqs(unsigned int nrqs)
{
int ret = 0;
+ aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
+ if (!aidaw_pool)
+ return -ENOMEM;
+
while (nrqs-- && !ret)
ret = __scm_alloc_rq();
@@ -112,6 +127,18 @@ out:
static void scm_request_done(struct scm_request *scmrq)
{
unsigned long flags;
+ struct msb *msb;
+ u64 aidaw;
+ int i;
+
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+ msb = &scmrq->aob->msb[i];
+ aidaw = msb->data_addr;
+
+ if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
+ IS_ALIGNED(aidaw, PAGE_SIZE))
+ mempool_free(virt_to_page(aidaw), aidaw_pool);
+ }
spin_lock_irqsave(&list_lock, flags);
list_add(&scmrq->list, &inactive_requests);
@@ -123,48 +150,90 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
}
-static void scm_request_prepare(struct scm_request *scmrq)
+static inline struct aidaw *scm_aidaw_alloc(void)
+{
+ struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
+
+ return page ? page_address(page) : NULL;
+}
+
+static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
+{
+ unsigned long _aidaw = (unsigned long) aidaw;
+ unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
+
+ return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
+}
+
+struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
+{
+ struct aidaw *aidaw;
+
+ if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
+ return scmrq->next_aidaw;
+
+ aidaw = scm_aidaw_alloc();
+ if (aidaw)
+ memset(aidaw, 0, PAGE_SIZE);
+ return aidaw;
+}
+
+static int scm_request_prepare(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_device *scmdev = bdev->gendisk->private_data;
- struct aidaw *aidaw = scmrq->aidaw;
- struct msb *msb = &scmrq->aob->msb[0];
+ int pos = scmrq->aob->request.msb_count;
+ struct msb *msb = &scmrq->aob->msb[pos];
+ struct request *req = scmrq->request[pos];
struct req_iterator iter;
+ struct aidaw *aidaw;
struct bio_vec bv;
+ aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
+ if (!aidaw)
+ return -ENOMEM;
+
msb->bs = MSB_BS_4K;
- scmrq->aob->request.msb_count = 1;
- msb->scm_addr = scmdev->address +
- ((u64) blk_rq_pos(scmrq->request) << 9);
- msb->oc = (rq_data_dir(scmrq->request) == READ) ?
- MSB_OC_READ : MSB_OC_WRITE;
+ scmrq->aob->request.msb_count++;
+ msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
+ msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
msb->flags |= MSB_FLAG_IDA;
msb->data_addr = (u64) aidaw;
- rq_for_each_segment(bv, scmrq->request, iter) {
+ rq_for_each_segment(bv, req, iter) {
WARN_ON(bv.bv_offset);
msb->blk_count += bv.bv_len >> 12;
aidaw->data_addr = (u64) page_address(bv.bv_page);
aidaw++;
}
+
+ scmrq->next_aidaw = aidaw;
+ return 0;
+}
+
+static inline void scm_request_set(struct scm_request *scmrq,
+ struct request *req)
+{
+ scmrq->request[scmrq->aob->request.msb_count] = req;
}
static inline void scm_request_init(struct scm_blk_dev *bdev,
- struct scm_request *scmrq,
- struct request *req)
+ struct scm_request *scmrq)
{
struct aob_rq_header *aobrq = to_aobrq(scmrq);
struct aob *aob = scmrq->aob;
+ memset(scmrq->request, 0,
+ nr_requests_per_io * sizeof(scmrq->request[0]));
memset(aob, 0, sizeof(*aob));
- memset(scmrq->aidaw, 0, PAGE_SIZE);
aobrq->scmdev = bdev->scmdev;
aob->request.cmd_code = ARQB_CMD_MOVE;
aob->request.data = (u64) aobrq;
- scmrq->request = req;
scmrq->bdev = bdev;
scmrq->retries = 4;
scmrq->error = 0;
+ /* We don't use all msbs - place aidaws at the end of the aob page. */
+ scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
scm_request_cluster_init(scmrq);
}
@@ -180,9 +249,12 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
void scm_request_requeue(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
+ int i;
scm_release_cluster(scmrq);
- blk_requeue_request(bdev->rq, scmrq->request);
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
+ blk_requeue_request(bdev->rq, scmrq->request[i]);
+
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
scm_ensure_queue_restart(bdev);
@@ -191,20 +263,41 @@ void scm_request_requeue(struct scm_request *scmrq)
void scm_request_finish(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
+ int i;
scm_release_cluster(scmrq);
- blk_end_request_all(scmrq->request, scmrq->error);
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
+ blk_end_request_all(scmrq->request[i], scmrq->error);
+
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
}
+static int scm_request_start(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ int ret;
+
+ atomic_inc(&bdev->queued_reqs);
+ if (!scmrq->aob->request.msb_count) {
+ scm_request_requeue(scmrq);
+ return -EINVAL;
+ }
+
+ ret = eadm_start_aob(scmrq->aob);
+ if (ret) {
+ SCM_LOG(5, "no subchannel");
+ scm_request_requeue(scmrq);
+ }
+ return ret;
+}
+
static void scm_blk_request(struct request_queue *rq)
{
struct scm_device *scmdev = rq->queuedata;
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
- struct scm_request *scmrq;
+ struct scm_request *scmrq = NULL;
struct request *req;
- int ret;
while ((req = blk_peek_request(rq))) {
if (req->cmd_type != REQ_TYPE_FS) {
@@ -214,39 +307,64 @@ static void scm_blk_request(struct request_queue *rq)
continue;
}
- if (!scm_permit_request(bdev, req)) {
- scm_ensure_queue_restart(bdev);
- return;
- }
- scmrq = scm_request_fetch();
+ if (!scm_permit_request(bdev, req))
+ goto out;
+
if (!scmrq) {
- SCM_LOG(5, "no request");
- scm_ensure_queue_restart(bdev);
- return;
+ scmrq = scm_request_fetch();
+ if (!scmrq) {
+ SCM_LOG(5, "no request");
+ goto out;
+ }
+ scm_request_init(bdev, scmrq);
}
- scm_request_init(bdev, scmrq, req);
+ scm_request_set(scmrq, req);
+
if (!scm_reserve_cluster(scmrq)) {
SCM_LOG(5, "cluster busy");
+ scm_request_set(scmrq, NULL);
+ if (scmrq->aob->request.msb_count)
+ goto out;
+
scm_request_done(scmrq);
return;
}
+
if (scm_need_cluster_request(scmrq)) {
- atomic_inc(&bdev->queued_reqs);
- blk_start_request(req);
- scm_initiate_cluster_request(scmrq);
- return;
+ if (scmrq->aob->request.msb_count) {
+ /* Start cluster requests separately. */
+ scm_request_set(scmrq, NULL);
+ if (scm_request_start(scmrq))
+ return;
+ } else {
+ atomic_inc(&bdev->queued_reqs);
+ blk_start_request(req);
+ scm_initiate_cluster_request(scmrq);
+ }
+ scmrq = NULL;
+ continue;
+ }
+
+ if (scm_request_prepare(scmrq)) {
+ SCM_LOG(5, "aidaw alloc failed");
+ scm_request_set(scmrq, NULL);
+ goto out;
}
- scm_request_prepare(scmrq);
- atomic_inc(&bdev->queued_reqs);
blk_start_request(req);
- ret = eadm_start_aob(scmrq->aob);
- if (ret) {
- SCM_LOG(5, "no subchannel");
- scm_request_requeue(scmrq);
+ if (scmrq->aob->request.msb_count < nr_requests_per_io)
+ continue;
+
+ if (scm_request_start(scmrq))
return;
- }
+
+ scmrq = NULL;
}
+out:
+ if (scmrq)
+ scm_request_start(scmrq);
+ else
+ scm_ensure_queue_restart(bdev);
}
static void __scmrq_log_error(struct scm_request *scmrq)
@@ -443,11 +561,19 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
spin_unlock_irqrestore(&bdev->lock, flags);
}
+static bool __init scm_blk_params_valid(void)
+{
+ if (!nr_requests_per_io || nr_requests_per_io > 64)
+ return false;
+
+ return scm_cluster_size_valid();
+}
+
static int __init scm_blk_init(void)
{
int ret = -EINVAL;
- if (!scm_cluster_size_valid())
+ if (!scm_blk_params_valid())
goto out;
ret = register_blkdev(0, "scm");
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index e59331e6c2e5..09218cdc5129 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -30,8 +30,8 @@ struct scm_blk_dev {
struct scm_request {
struct scm_blk_dev *bdev;
- struct request *request;
- struct aidaw *aidaw;
+ struct aidaw *next_aidaw;
+ struct request **request;
struct aob *aob;
struct list_head list;
u8 retries;
@@ -55,6 +55,8 @@ void scm_blk_irq(struct scm_device *, void *, int);
void scm_request_finish(struct scm_request *);
void scm_request_requeue(struct scm_request *);
+struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
+
int scm_drv_init(void);
void scm_drv_cleanup(void);
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 9aae909d47a5..09db45296eed 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq)
scmrq->cluster.state = CLUSTER_NONE;
}
-static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
+static bool clusters_intersect(struct request *A, struct request *B)
{
unsigned long firstA, lastA, firstB, lastB;
- firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
- lastA = (((u64) blk_rq_pos(A->request) << 9) +
- blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
+ firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
+ lastA = (((u64) blk_rq_pos(A) << 9) +
+ blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
- firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
- lastB = (((u64) blk_rq_pos(B->request) << 9) +
- blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
+ firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
+ lastB = (((u64) blk_rq_pos(B) << 9) +
+ blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
return (firstB <= lastA && firstA <= lastB);
}
bool scm_reserve_cluster(struct scm_request *scmrq)
{
+ struct request *req = scmrq->request[scmrq->aob->request.msb_count];
struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_request *iter;
+ int pos, add = 1;
if (write_cluster_size == 0)
return true;
spin_lock(&bdev->lock);
list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
- if (clusters_intersect(scmrq, iter) &&
- (rq_data_dir(scmrq->request) == WRITE ||
- rq_data_dir(iter->request) == WRITE)) {
- spin_unlock(&bdev->lock);
- return false;
+ if (iter == scmrq) {
+ /*
+ * We don't have to use clusters_intersect here, since
+ * cluster requests are always started separately.
+ */
+ add = 0;
+ continue;
+ }
+ for (pos = 0; pos <= iter->aob->request.msb_count; pos++) {
+ if (clusters_intersect(req, iter->request[pos]) &&
+ (rq_data_dir(req) == WRITE ||
+ rq_data_dir(iter->request[pos]) == WRITE)) {
+ spin_unlock(&bdev->lock);
+ return false;
+ }
}
}
- list_add(&scmrq->cluster.list, &bdev->cluster_list);
+ if (add)
+ list_add(&scmrq->cluster.list, &bdev->cluster_list);
spin_unlock(&bdev->lock);
return true;
@@ -114,14 +127,14 @@ void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
}
-static void scm_prepare_cluster_request(struct scm_request *scmrq)
+static int scm_prepare_cluster_request(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_device *scmdev = bdev->gendisk->private_data;
- struct request *req = scmrq->request;
- struct aidaw *aidaw = scmrq->aidaw;
+ struct request *req = scmrq->request[0];
struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter;
+ struct aidaw *aidaw;
struct bio_vec bv;
int i = 0;
u64 addr;
@@ -131,11 +144,9 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
scmrq->cluster.state = CLUSTER_READ;
/* fall through */
case CLUSTER_READ:
- scmrq->aob->request.msb_count = 1;
msb->bs = MSB_BS_4K;
msb->oc = MSB_OC_READ;
msb->flags = MSB_FLAG_IDA;
- msb->data_addr = (u64) aidaw;
msb->blk_count = write_cluster_size;
addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
@@ -146,6 +157,12 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
CLUSTER_SIZE))
msb->blk_count = 2 * write_cluster_size;
+ aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
+ if (!aidaw)
+ return -ENOMEM;
+
+ scmrq->aob->request.msb_count = 1;
+ msb->data_addr = (u64) aidaw;
for (i = 0; i < msb->blk_count; i++) {
aidaw->data_addr = (u64) scmrq->cluster.buf[i];
aidaw++;
@@ -153,6 +170,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
break;
case CLUSTER_WRITE:
+ aidaw = (void *) msb->data_addr;
msb->oc = MSB_OC_WRITE;
for (addr = msb->scm_addr;
@@ -173,22 +191,29 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
}
break;
}
+ return 0;
}
bool scm_need_cluster_request(struct scm_request *scmrq)
{
- if (rq_data_dir(scmrq->request) == READ)
+ int pos = scmrq->aob->request.msb_count;
+
+ if (rq_data_dir(scmrq->request[pos]) == READ)
return false;
- return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
+ return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
}
/* Called with queue lock held. */
void scm_initiate_cluster_request(struct scm_request *scmrq)
{
- scm_prepare_cluster_request(scmrq);
+ if (scm_prepare_cluster_request(scmrq))
+ goto requeue;
if (eadm_start_aob(scmrq->aob))
- scm_request_requeue(scmrq);
+ goto requeue;
+ return;
+requeue:
+ scm_request_requeue(scmrq);
}
bool scm_test_cluster_request(struct scm_request *scmrq)
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 9e0de9c9a6fc..7d4e9397ac31 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -417,7 +417,6 @@ static const struct dev_pm_ops xpram_pm_ops = {
static struct platform_driver xpram_pdrv = {
.driver = {
.name = XPRAM_NAME,
- .owner = THIS_MODULE,
.pm = &xpram_pm_ops,
},
};
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index db2cb1f8a1b5..a5c6f7e157aa 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -102,6 +102,16 @@ config SCLP_ASYNC
want for inform other people about your kernel panics,
need this feature and intend to run your kernel in LPAR.
+config SCLP_ASYNC_ID
+ string "Component ID for Call Home"
+ depends on SCLP_ASYNC
+ default "000000000"
+ help
+ The Component ID for Call Home is used to identify the correct
+ problem reporting queue the call home records should be sent to.
+
+ If your are unsure, please use the default value "000000000".
+
config HMC_DRV
def_tristate m
prompt "Support for file transfers from HMC drive CD/DVD-ROM"
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 668b32b0dc1d..9b5d1138b2e2 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -334,7 +334,6 @@ static const struct dev_pm_ops monwriter_pm_ops = {
static struct platform_driver monwriter_pdrv = {
.driver = {
.name = "monwriter",
- .owner = THIS_MODULE,
.pm = &monwriter_pm_ops,
},
};
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index c316051d9bda..41ba56d2e752 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1167,7 +1167,6 @@ static const struct attribute_group *sclp_drv_attr_groups[] = {
static struct platform_driver sclp_pdrv = {
.driver = {
.name = "sclp",
- .owner = THIS_MODULE,
.pm = &sclp_pm_ops,
.groups = sclp_drv_attr_groups,
},
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 5f9f929e891c..19c25427f27f 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -137,7 +137,8 @@ static int sclp_async_send_wait(char *message)
* Retain Queue
* e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
*/
- strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
+ strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
+ sizeof(sccb->evbuf.comp_id));
sccb->evbuf.header.length = sizeof(sccb->evbuf);
sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
sccb->header.function_code = SCLP_NORMAL_WRITE;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 327cb19ad0b0..d3d1936057b4 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1090,7 +1090,7 @@ tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
"channel path 0x%x on CU",
sense->fmt.f71.md[1]);
else
- snprintf(service, BUFSIZE, "Repair will disable cannel"
+ snprintf(service, BUFSIZE, "Repair will disable channel"
" paths (0x%x-0x%x) on CU",
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
@@ -1481,7 +1481,7 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
}
if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
- DBF_EVENT(2, "cannel end\n");
+ DBF_EVENT(2, "channel end\n");
return TAPE_IO_PENDING;
}
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 37f0834300ea..bee8c11cd086 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -31,7 +31,7 @@
MODULE_DESCRIPTION("driver for s390 eadm subchannels");
MODULE_LICENSE("GPL");
-#define EADM_TIMEOUT (5 * HZ)
+#define EADM_TIMEOUT (7 * HZ)
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(eadm_list);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 99485415dcc2..91e97ec01418 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -44,6 +44,7 @@
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <asm/facility.h>
+#include <linux/crypto.h>
#include "ap_bus.h"
@@ -71,7 +72,7 @@ MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
"Copyright IBM Corp. 2006, 2012");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("z90crypt");
+MODULE_ALIAS_CRYPTO("z90crypt");
/*
* Module parameter
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 643129070c51..dd65c8b4c7fe 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -80,7 +80,7 @@ static unsigned desc_size(const struct kvm_device_desc *desc)
}
/* This gets the device's feature bits. */
-static u32 kvm_get_features(struct virtio_device *vdev)
+static u64 kvm_get_features(struct virtio_device *vdev)
{
unsigned int i;
u32 features = 0;
@@ -93,7 +93,7 @@ static u32 kvm_get_features(struct virtio_device *vdev)
return features;
}
-static void kvm_finalize_features(struct virtio_device *vdev)
+static int kvm_finalize_features(struct virtio_device *vdev)
{
unsigned int i, bits;
struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
@@ -103,12 +103,17 @@ static void kvm_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
memset(out_features, 0, desc->feature_len);
bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++) {
- if (test_bit(i, vdev->features))
+ if (__virtio_test_bit(vdev, i))
out_features[i / 8] |= (1 << (i % 8));
}
+
+ return 0;
}
/*
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index bda52f18e967..71d7802aa8b4 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -55,6 +55,7 @@ struct virtio_ccw_device {
struct ccw_device *cdev;
__u32 curr_io;
int err;
+ unsigned int revision; /* Transport revision */
wait_queue_head_t wait_q;
spinlock_t lock;
struct list_head virtqueues;
@@ -67,13 +68,22 @@ struct virtio_ccw_device {
void *airq_info;
};
-struct vq_info_block {
+struct vq_info_block_legacy {
__u64 queue;
__u32 align;
__u16 index;
__u16 num;
} __packed;
+struct vq_info_block {
+ __u64 desc;
+ __u32 res0;
+ __u16 index;
+ __u16 num;
+ __u64 avail;
+ __u64 used;
+} __packed;
+
struct virtio_feature_desc {
__u32 features;
__u8 index;
@@ -86,11 +96,23 @@ struct virtio_thinint_area {
u8 isc;
} __packed;
+struct virtio_rev_info {
+ __u16 revision;
+ __u16 length;
+ __u8 data[];
+};
+
+/* the highest virtio-ccw revision we support */
+#define VIRTIO_CCW_REV_MAX 1
+
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
void *queue;
- struct vq_info_block *info_block;
+ union {
+ struct vq_info_block s;
+ struct vq_info_block_legacy l;
+ } *info_block;
int bit_nr;
struct list_head node;
long cookie;
@@ -122,6 +144,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
#define CCW_CMD_SET_IND_ADAPTER 0x73
+#define CCW_CMD_SET_VIRTIO_REV 0x83
#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
#define VIRTIO_CCW_DOING_RESET 0x00040000
@@ -134,6 +157,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
+#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -399,13 +423,22 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
spin_unlock_irqrestore(&vcdev->lock, flags);
/* Release from host. */
- info->info_block->queue = 0;
- info->info_block->align = 0;
- info->info_block->index = index;
- info->info_block->num = 0;
+ if (vcdev->revision == 0) {
+ info->info_block->l.queue = 0;
+ info->info_block->l.align = 0;
+ info->info_block->l.index = index;
+ info->info_block->l.num = 0;
+ ccw->count = sizeof(info->info_block->l);
+ } else {
+ info->info_block->s.desc = 0;
+ info->info_block->s.index = index;
+ info->info_block->s.num = 0;
+ info->info_block->s.avail = 0;
+ info->info_block->s.used = 0;
+ ccw->count = sizeof(info->info_block->s);
+ }
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->count = sizeof(*info->info_block);
ccw->cda = (__u32)(unsigned long)(info->info_block);
ret = ccw_io_helper(vcdev, ccw,
VIRTIO_CCW_DOING_SET_VQ | index);
@@ -488,13 +521,22 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
}
/* Register it with the host. */
- info->info_block->queue = (__u64)info->queue;
- info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
- info->info_block->index = i;
- info->info_block->num = info->num;
+ if (vcdev->revision == 0) {
+ info->info_block->l.queue = (__u64)info->queue;
+ info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
+ info->info_block->l.index = i;
+ info->info_block->l.num = info->num;
+ ccw->count = sizeof(info->info_block->l);
+ } else {
+ info->info_block->s.desc = (__u64)info->queue;
+ info->info_block->s.index = i;
+ info->info_block->s.num = info->num;
+ info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
+ info->info_block->s.used = (__u64)virtqueue_get_used(vq);
+ ccw->count = sizeof(info->info_block->s);
+ }
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->count = sizeof(*info->info_block);
ccw->cda = (__u32)(unsigned long)(info->info_block);
err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
if (err) {
@@ -660,11 +702,12 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
kfree(ccw);
}
-static u32 virtio_ccw_get_features(struct virtio_device *vdev)
+static u64 virtio_ccw_get_features(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct virtio_feature_desc *features;
- int ret, rc;
+ int ret;
+ u64 rc;
struct ccw1 *ccw;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -677,7 +720,6 @@ static u32 virtio_ccw_get_features(struct virtio_device *vdev)
goto out_free;
}
/* Read the feature bits from the host. */
- /* TODO: Features > 32 bits */
features->index = 0;
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
@@ -691,46 +733,79 @@ static u32 virtio_ccw_get_features(struct virtio_device *vdev)
rc = le32_to_cpu(features->features);
+ if (vcdev->revision == 0)
+ goto out_free;
+
+ /* Read second half of the feature bits from the host. */
+ features->index = 1;
+ ccw->cmd_code = CCW_CMD_READ_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
+ if (ret == 0)
+ rc |= (u64)le32_to_cpu(features->features) << 32;
+
out_free:
kfree(features);
kfree(ccw);
return rc;
}
-static void virtio_ccw_finalize_features(struct virtio_device *vdev)
+static int virtio_ccw_finalize_features(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct virtio_feature_desc *features;
- int i;
struct ccw1 *ccw;
+ int ret;
+
+ if (vcdev->revision >= 1 &&
+ !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
+ dev_err(&vdev->dev, "virtio: device uses revision 1 "
+ "but does not have VIRTIO_F_VERSION_1\n");
+ return -EINVAL;
+ }
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
- return;
+ return -ENOMEM;
features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
- if (!features)
+ if (!features) {
+ ret = -ENOMEM;
goto out_free;
-
+ }
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
- i++) {
- int highbits = i % 2 ? 32 : 0;
- features->index = i;
- features->features = cpu_to_le32(vdev->features[i / 2]
- >> highbits);
- /* Write the feature bits to the host. */
- ccw->cmd_code = CCW_CMD_WRITE_FEAT;
- ccw->flags = 0;
- ccw->count = sizeof(*features);
- ccw->cda = (__u32)(unsigned long)features;
- ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
- }
+ features->index = 0;
+ features->features = cpu_to_le32((u32)vdev->features);
+ /* Write the first half of the feature bits to the host. */
+ ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+ if (ret)
+ goto out_free;
+
+ if (vcdev->revision == 0)
+ goto out_free;
+
+ features->index = 1;
+ features->features = cpu_to_le32(vdev->features >> 32);
+ /* Write the second half of the feature bits to the host. */
+ ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+ ccw->flags = 0;
+ ccw->count = sizeof(*features);
+ ccw->cda = (__u32)(unsigned long)features;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+
out_free:
kfree(features);
kfree(ccw);
+
+ return ret;
}
static void virtio_ccw_get_config(struct virtio_device *vdev,
@@ -806,7 +881,9 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ u8 old_status = *vcdev->status;
struct ccw1 *ccw;
+ int ret;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
@@ -818,7 +895,10 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
ccw->flags = 0;
ccw->count = sizeof(status);
ccw->cda = (__u32)(unsigned long)vcdev->status;
- ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+ /* Write failed? We assume status is unchanged. */
+ if (ret)
+ *vcdev->status = old_status;
kfree(ccw);
}
@@ -919,6 +999,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
case VIRTIO_CCW_DOING_RESET:
case VIRTIO_CCW_DOING_READ_VQ_CONF:
case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
+ case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
vcdev->curr_io &= ~activity;
wake_up(&vcdev->wait_q);
break;
@@ -1034,6 +1115,51 @@ static int virtio_ccw_offline(struct ccw_device *cdev)
return 0;
}
+static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
+{
+ struct virtio_rev_info *rev;
+ struct ccw1 *ccw;
+ int ret;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return -ENOMEM;
+ rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
+ if (!rev) {
+ kfree(ccw);
+ return -ENOMEM;
+ }
+
+ /* Set transport revision */
+ ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
+ ccw->flags = 0;
+ ccw->count = sizeof(*rev);
+ ccw->cda = (__u32)(unsigned long)rev;
+
+ vcdev->revision = VIRTIO_CCW_REV_MAX;
+ do {
+ rev->revision = vcdev->revision;
+ /* none of our supported revisions carry payload */
+ rev->length = 0;
+ ret = ccw_io_helper(vcdev, ccw,
+ VIRTIO_CCW_DOING_SET_VIRTIO_REV);
+ if (ret == -EOPNOTSUPP) {
+ if (vcdev->revision == 0)
+ /*
+ * The host device does not support setting
+ * the revision: let's operate it in legacy
+ * mode.
+ */
+ ret = 0;
+ else
+ vcdev->revision--;
+ }
+ } while (ret == -EOPNOTSUPP);
+
+ kfree(ccw);
+ kfree(rev);
+ return ret;
+}
static int virtio_ccw_online(struct ccw_device *cdev)
{
@@ -1074,6 +1200,15 @@ static int virtio_ccw_online(struct ccw_device *cdev)
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
+
+ if (virtio_device_is_legacy_only(vcdev->vdev.id)) {
+ vcdev->revision = 0;
+ } else {
+ ret = virtio_ccw_set_transport_rev(vcdev);
+ if (ret)
+ goto out_free;
+ }
+
ret = register_virtio_device(&vcdev->vdev);
if (ret) {
dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 8b3f55991805..f1b5111bbaba 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -71,7 +71,7 @@ config CLAW
config QETH
def_tristate y
prompt "Gigabit Ethernet device support"
- depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
+ depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET
help
This driver supports the IBM System z OSA Express adapters
in QDIO mode (all media types), HiperSockets interfaces and z/VM
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 6bcfbbb20f04..47773c4d235a 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -44,8 +44,8 @@ static ssize_t ctcm_buffer_write(struct device *dev,
return -ENODEV;
}
- rc = sscanf(buf, "%u", &bs1);
- if (rc != 1)
+ rc = kstrtouint(buf, 0, &bs1);
+ if (rc)
goto einval;
if (bs1 > CTCM_BUFSIZE_LIMIT)
goto einval;
@@ -151,8 +151,8 @@ static ssize_t ctcm_proto_store(struct device *dev,
if (!priv)
return -ENODEV;
- rc = sscanf(buf, "%d", &value);
- if ((rc != 1) ||
+ rc = kstrtoint(buf, 0, &value);
+ if (rc ||
!((value == CTCM_PROTO_S390) ||
(value == CTCM_PROTO_LINUX) ||
(value == CTCM_PROTO_MPC) ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0a7d87c372b8..92190aa20b9f 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1943,15 +1943,16 @@ static ssize_t
lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct lcs_card *card;
- int value, rc;
+ int rc;
+ s16 value;
card = dev_get_drvdata(dev);
if (!card)
return 0;
- rc = sscanf(buf, "%d", &value);
- if (rc != 1)
+ rc = kstrtos16(buf, 0, &value);
+ if (rc)
return -EINVAL;
/* TODO: sanity checks */
card->portno = value;
@@ -2007,8 +2008,8 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
if (!card)
return 0;
- rc = sscanf(buf, "%u", &value);
- if (rc != 1)
+ rc = kstrtouint(buf, 0, &value);
+ if (rc)
return -EINVAL;
/* TODO: sanity checks */
card->lancmd_timeout = value;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e7646ce3d659..7a8bb9f78e76 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -380,11 +380,6 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
-static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
-{
- return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
-}
-
enum qeth_qdio_buffer_states {
/*
* inbound: read out by driver; owned by hardware in order to be filled
@@ -843,13 +838,6 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
-static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
-{
- struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
- dev_get_drvdata(&cdev->dev))->dev);
- return card;
-}
-
static inline int qeth_get_micros(void)
{
return (int) (get_tod_clock() >> 12);
@@ -894,7 +882,6 @@ const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
void qeth_core_free_discipline(struct qeth_card *);
-void qeth_buffer_reclaim_work(struct work_struct *);
/* exports for qeth discipline device drivers */
extern struct qeth_card_list_struct qeth_core_card_list;
@@ -913,7 +900,6 @@ int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_startlan(struct qeth_card *);
-int qeth_send_stoplan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -954,8 +940,6 @@ int qeth_snmp_command(struct qeth_card *, char __user *);
int qeth_query_oat_command(struct qeth_card *, char __user *);
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
-int qeth_query_card_info(struct qeth_card *card,
- struct carrier_info *carrier_info);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fd22c811cbe1..f407e3763432 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -718,6 +718,13 @@ static int qeth_check_idx_response(struct qeth_card *card,
return 0;
}
+static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
+ dev_get_drvdata(&cdev->dev))->dev);
+ return card;
+}
+
static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
__u32 len)
{
@@ -1431,6 +1438,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
}
}
+static void qeth_buffer_reclaim_work(struct work_struct *);
static int qeth_setup_card(struct qeth_card *card)
{
@@ -3232,7 +3240,7 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
}
EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
-void qeth_buffer_reclaim_work(struct work_struct *work)
+static void qeth_buffer_reclaim_work(struct work_struct *work)
{
struct qeth_card *card = container_of(work, struct qeth_card,
buffer_reclaim_work.work);
@@ -4126,7 +4134,7 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
@@ -4493,13 +4501,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
snmp = &cmd->data.setadapterparms.data.snmp;
if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
return 0;
}
if (cmd->data.setadapterparms.hdr.return_code) {
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
- QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
return 0;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
@@ -4717,7 +4725,7 @@ static int qeth_query_card_info_cb(struct qeth_card *card,
return 0;
}
-int qeth_query_card_info(struct qeth_card *card,
+static int qeth_query_card_info(struct qeth_card *card,
struct carrier_info *carrier_info)
{
struct qeth_cmd_buffer *iob;
@@ -4730,7 +4738,6 @@ int qeth_query_card_info(struct qeth_card *card,
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
(void *)carrier_info);
}
-EXPORT_SYMBOL_GPL(qeth_query_card_info);
static inline int qeth_get_qdio_q_format(struct qeth_card *card)
{
@@ -5113,6 +5120,11 @@ static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
return 0;
}
+static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
+{
+ return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
+}
+
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qeth_qdio_buffer *qethbuffer,
struct qdio_buffer_element **__element, int *__offset,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c2679bfe7f66..d02cd1a67943 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1512,7 +1512,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "brstchng");
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
- QETH_CARD_TEXT_(card, 2, "BPsz%.8d", qports->entry_length);
+ QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
return;
}
extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 29c1c00e3a0f..551a4b4c03fd 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -42,10 +42,6 @@ struct qeth_ipato_entry {
};
-void qeth_l3_ipaddr4_to_string(const __u8 *, char *);
-int qeth_l3_string_to_ipaddr4(const char *, __u8 *);
-void qeth_l3_ipaddr6_to_string(const __u8 *, char *);
-int qeth_l3_string_to_ipaddr6(const char *, __u8 *);
void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
int qeth_l3_create_device_attributes(struct device *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index afebb9709763..625227ad16ee 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -55,12 +55,12 @@ static int qeth_l3_isxdigit(char *buf)
return 1;
}
-void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
+static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
{
sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
}
-int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
+static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
{
int count = 0, rc = 0;
unsigned int in[4];
@@ -78,12 +78,12 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
return 0;
}
-void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
+static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
{
sprintf(buf, "%pI6", addr);
}
-int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
+static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
{
const char *end, *end_tmp, *start;
__u16 *in;
@@ -2502,7 +2502,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
rc = -EFAULT;
goto free_and_out;
}
- QETH_CARD_TEXT_(card, 4, "qacts");
+ QETH_CARD_TEXT(card, 4, "qacts");
}
free_and_out:
kfree(qinfo.udata);
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 812b5f0361b6..129967ad345d 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -413,7 +413,6 @@ MODULE_DEVICE_TABLE(of, bbc_i2c_match);
static struct platform_driver bbc_i2c_driver = {
.driver = {
.name = "bbc_i2c",
- .owner = THIS_MODULE,
.of_match_table = bbc_i2c_match,
},
.probe = bbc_i2c_probe,
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 2b0ce7c350ee..33fbe8249fd5 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -263,7 +263,6 @@ MODULE_DEVICE_TABLE(of, d7s_match);
static struct platform_driver d7s_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = d7s_match,
},
.probe = d7s_probe,
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index af15a2fdab5e..e244cf3d9ec8 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1130,7 +1130,6 @@ MODULE_DEVICE_TABLE(of, envctrl_match);
static struct platform_driver envctrl_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = envctrl_match,
},
.probe = envctrl_probe,
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 25c738e9ef19..206ef4232adf 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -207,7 +207,6 @@ MODULE_DEVICE_TABLE(of, flash_match);
static struct platform_driver flash_driver = {
.driver = {
.name = "flash",
- .owner = THIS_MODULE,
.of_match_table = flash_match,
},
.probe = flash_probe,
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index b7acafc85099..57696fc0b482 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -425,7 +425,6 @@ MODULE_DEVICE_TABLE(of, uctrl_match);
static struct platform_driver uctrl_driver = {
.driver = {
.name = "uctrl",
- .owner = THIS_MODULE,
.of_match_table = uctrl_match,
},
.probe = uctrl_probe,
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index dd5b64726ddc..e6375b4de79e 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -276,7 +276,6 @@ static struct platform_driver amiga_a3000_scsi_driver = {
.remove = __exit_p(amiga_a3000_scsi_remove),
.driver = {
.name = "amiga-a3000-scsi",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index f5a2ab41543b..66c573093901 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -112,7 +112,6 @@ static struct platform_driver amiga_a4000t_scsi_driver = {
.remove = __exit_p(amiga_a4000t_scsi_remove),
.driver = {
.name = "amiga-a4000t-scsi",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 5ff1ce7ba1f4..cdd4ab683be9 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -373,10 +373,10 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
if (unlikely(task->ata_task.device_control_reg_update))
scb->header.opcode = CONTROL_ATA_DEV;
- else if (dev->sata_dev.command_set == ATA_COMMAND_SET)
- scb->header.opcode = INITIATE_ATA_TASK;
- else
+ else if (dev->sata_dev.class == ATA_DEV_ATAPI)
scb->header.opcode = INITIATE_ATAPI_TASK;
+ else
+ scb->header.opcode = INITIATE_ATA_TASK;
scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
if (dev->port->oob_mode == SAS_OOB_MODE)
@@ -387,7 +387,7 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
if (likely(!task->ata_task.device_control_reg_update))
scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+ if (dev->sata_dev.class == ATA_DEV_ATAPI)
memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
16);
scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
@@ -399,7 +399,7 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
if (task->ata_task.dma_xfer)
flags |= DATA_XFER_MODE_DMA;
if (task->ata_task.use_ncq &&
- dev->sata_dev.command_set != ATAPI_COMMAND_SET)
+ dev->sata_dev.class != ATA_DEV_ATAPI)
flags |= ATA_Q_TYPE_NCQ;
flags |= data_dir_flags[task->data_dir];
scb->ata_task.ata_flags = flags;
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
index 1e3f96adf9da..0f846ae2f918 100644
--- a/drivers/scsi/bvme6000_scsi.c
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -105,7 +105,6 @@ bvme6000_device_remove(struct platform_device *dev)
static struct platform_driver bvme6000_scsi_driver = {
.driver = {
.name = "bvme6000-scsi",
- .owner = THIS_MODULE,
},
.probe = bvme6000_probe,
.remove = bvme6000_device_remove,
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 065a87ace623..2d1c4ebd40f9 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -451,9 +451,9 @@ csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
/* Process Mbox response of VNP command */
rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
- if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
- FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
+ FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)));
ret = -EINVAL;
goto out_free;
}
@@ -526,9 +526,9 @@ csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
/* Process Mbox response of VNP command */
rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
- if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
- FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
+ FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)));
ret = -EINVAL;
}
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 0eaec4748957..9ab997e18b20 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -47,7 +47,6 @@
#include "csio_lnode.h"
#include "csio_rnode.h"
-int csio_force_master;
int csio_dbg_level = 0xFEFF;
unsigned int csio_port_mask = 0xf;
@@ -650,10 +649,10 @@ static void
csio_hw_print_fw_version(struct csio_hw *hw, char *str)
{
csio_info(hw, "%s: %u.%u.%u.%u\n", str,
- FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
- FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
- FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
- FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+ FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_G(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_G(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_G(hw->fwrev));
}
/*
@@ -706,9 +705,9 @@ csio_hw_check_fw_version(struct csio_hw *hw)
if (ret)
return ret;
- major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev);
- minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
- micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
+ major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
+ minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
+ micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
csio_err(hw, "card FW has major version %u, driver wants %u\n",
@@ -889,7 +888,6 @@ csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
{
struct csio_mb *mbp;
int rv = 0;
- enum csio_dev_master master;
enum fw_retval retval;
uint8_t mpfn;
char state_str[16];
@@ -904,11 +902,9 @@ csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
goto out;
}
- master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY;
-
retry:
csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
- hw->pfn, master, NULL);
+ hw->pfn, CSIO_MASTER_MAY, NULL);
rv = csio_mb_issue(hw, mbp);
if (rv) {
@@ -1170,7 +1166,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
}
csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
- PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1),
+ PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
NULL);
if (csio_mb_issue(hw, mbp)) {
@@ -1370,13 +1366,13 @@ csio_hw_fw_config_file(struct csio_hw *hw,
caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
caps_cmd->op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
caps_cmd->cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
FW_LEN16(*caps_cmd));
if (csio_mb_issue(hw, mbp)) {
@@ -1407,9 +1403,9 @@ csio_hw_fw_config_file(struct csio_hw *hw,
* And now tell the firmware to use the configuration we just loaded.
*/
caps_cmd->op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
if (csio_mb_issue(hw, mbp)) {
@@ -1678,7 +1674,7 @@ csio_get_fcoe_resinfo(struct csio_hw *hw)
}
rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
- retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
if (retval != FW_SUCCESS) {
csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
retval);
@@ -1723,8 +1719,8 @@ csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
* Find out whether we're dealing with a version of
* the firmware which has configuration file support.
*/
- _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+ _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
ARRAY_SIZE(_param), _param, NULL, false, NULL);
@@ -1781,8 +1777,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
goto leave;
}
- mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
- maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
+ mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
ret = csio_memory_write(hw, mtype, maddr,
cf->size + value_to_add, cfg_data);
@@ -1871,8 +1867,8 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
goto bye;
}
} else {
- mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
- maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
+ mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
using_flash = 0;
}
@@ -1998,13 +1994,13 @@ csio_hw_flash_fw(struct csio_hw *hw)
hdr = (const struct fw_hdr *)fw->data;
fw_ver = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw))
+ if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
return -EINVAL; /* wrong major version, won't do */
/*
* If the flash FW is unusable or we found something newer, load it.
*/
- if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
+ if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
fw_ver > hw->fwrev) {
ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
/*force=*/false);
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 5db2d85195b1..68248da1b9af 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -110,7 +110,6 @@ struct csio_scsi_cpu_info {
};
extern int csio_dbg_level;
-extern int csio_force_master;
extern unsigned int csio_port_mask;
extern int csio_msi;
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
index bca0de61ae80..4752fed476df 100644
--- a/drivers/scsi/csiostor/csio_hw_chip.h
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -36,60 +36,13 @@
#include "csio_defs.h"
-/* FCoE device IDs for T4 */
-#define CSIO_DEVID_T440DBG_FCOE 0x4600
-#define CSIO_DEVID_T420CR_FCOE 0x4601
-#define CSIO_DEVID_T422CR_FCOE 0x4602
-#define CSIO_DEVID_T440CR_FCOE 0x4603
-#define CSIO_DEVID_T420BCH_FCOE 0x4604
-#define CSIO_DEVID_T440BCH_FCOE 0x4605
-#define CSIO_DEVID_T440CH_FCOE 0x4606
-#define CSIO_DEVID_T420SO_FCOE 0x4607
-#define CSIO_DEVID_T420CX_FCOE 0x4608
-#define CSIO_DEVID_T420BT_FCOE 0x4609
-#define CSIO_DEVID_T404BT_FCOE 0x460A
-#define CSIO_DEVID_B420_FCOE 0x460B
-#define CSIO_DEVID_B404_FCOE 0x460C
-#define CSIO_DEVID_T480CR_FCOE 0x460D
-#define CSIO_DEVID_T440LPCR_FCOE 0x460E
-#define CSIO_DEVID_AMSTERDAM_T4_FCOE 0x460F
-#define CSIO_DEVID_HUAWEI_T480_FCOE 0x4680
-#define CSIO_DEVID_HUAWEI_T440_FCOE 0x4681
-#define CSIO_DEVID_HUAWEI_STG310_FCOE 0x4682
-#define CSIO_DEVID_ACROMAG_XMC_XAUI 0x4683
-#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE 0x4684
-#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE 0x4685
-#define CSIO_DEVID_HUAWEI_10GT_FCOE 0x4686
-#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE 0x4687
-
-/* FCoE device IDs for T5 */
-#define CSIO_DEVID_T580DBG_FCOE 0x5600
-#define CSIO_DEVID_T520CR_FCOE 0x5601
-#define CSIO_DEVID_T522CR_FCOE 0x5602
-#define CSIO_DEVID_T540CR_FCOE 0x5603
-#define CSIO_DEVID_T520BCH_FCOE 0x5604
-#define CSIO_DEVID_T540BCH_FCOE 0x5605
-#define CSIO_DEVID_T540CH_FCOE 0x5606
-#define CSIO_DEVID_T520SO_FCOE 0x5607
-#define CSIO_DEVID_T520CX_FCOE 0x5608
-#define CSIO_DEVID_T520BT_FCOE 0x5609
-#define CSIO_DEVID_T504BT_FCOE 0x560A
-#define CSIO_DEVID_B520_FCOE 0x560B
-#define CSIO_DEVID_B504_FCOE 0x560C
-#define CSIO_DEVID_T580CR2_FCOE 0x560D
-#define CSIO_DEVID_T540LPCR_FCOE 0x560E
-#define CSIO_DEVID_AMSTERDAM_T5_FCOE 0x560F
-#define CSIO_DEVID_T580LPCR_FCOE 0x5610
-#define CSIO_DEVID_T520LLCR_FCOE 0x5611
-#define CSIO_DEVID_T560CR_FCOE 0x5612
-#define CSIO_DEVID_T580CR_FCOE 0x5613
-
/* Define MACRO values */
#define CSIO_HW_T4 0x4000
#define CSIO_T4_FCOE_ASIC 0x4600
#define CSIO_HW_T5 0x5000
#define CSIO_T5_FCOE_ASIC 0x5600
#define CSIO_HW_CHIP_MASK 0xF000
+
#define T4_REGMAP_SIZE (160 * 1024)
#define T5_REGMAP_SIZE (332 * 1024)
#define FW_FNAME_T4 "cxgb4/t4fw.bin"
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
index 89ecbac5478f..95d831857640 100644
--- a/drivers/scsi/csiostor/csio_hw_t4.c
+++ b/drivers/scsi/csiostor/csio_hw_t4.c
@@ -307,12 +307,12 @@ csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* MEM_EDC1 = 1
* MEM_MC = 2 -- T4
*/
- edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
+ edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
memoffset = (mtype * (edc_size * 1024 * 1024));
else {
- mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
- MA_EXT_MEMORY_BAR));
+ mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw,
+ MA_EXT_MEMORY_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
}
@@ -383,11 +383,12 @@ static void
csio_t4_dfs_create_ext_mem(struct csio_hw *hw)
{
u32 size;
- int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
- if (i & EXT_MEM_ENABLE) {
- size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
+ int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
+
+ if (i & EXT_MEM_ENABLE_F) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A);
csio_add_debugfs_mem(hw, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(size));
+ EXT_MEM_SIZE_G(size));
}
}
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
index 27745c170c24..66e180a58718 100644
--- a/drivers/scsi/csiostor/csio_hw_t5.c
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -298,12 +298,12 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
* MEM_MC0 = 2 -- For T5
* MEM_MC1 = 3 -- For T5
*/
- edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
+ edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
memoffset = (mtype * (edc_size * 1024 * 1024));
else {
- mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
- MA_EXT_MEMORY_BAR));
+ mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw,
+ MA_EXT_MEMORY_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
}
@@ -372,16 +372,17 @@ static void
csio_t5_dfs_create_ext_mem(struct csio_hw *hw)
{
u32 size;
- int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
- if (i & EXT_MEM_ENABLE) {
- size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
+ int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
+
+ if (i & EXT_MEM_ENABLE_F) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A);
csio_add_debugfs_mem(hw, "mc0", MEM_MC0,
- EXT_MEM_SIZE_GET(size));
+ EXT_MEM_SIZE_G(size));
}
- if (i & EXT_MEM1_ENABLE) {
- size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR);
+ if (i & EXT_MEM1_ENABLE_F) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR_A);
csio_add_debugfs_mem(hw, "mc1", MEM_MC1,
- EXT_MEM_SIZE_GET(size));
+ EXT_MEM_SIZE_G(size));
}
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 17794add855c..34d20cc3e110 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -128,10 +128,10 @@ static int csio_setup_debugfs(struct csio_hw *hw)
if (IS_ERR_OR_NULL(hw->debugfs_root))
return -1;
- i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
- if (i & EDRAM0_ENABLE)
+ i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
+ if (i & EDRAM0_ENABLE_F)
csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
- if (i & EDRAM1_ENABLE)
+ if (i & EDRAM1_ENABLE_F)
csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
hw->chip_ops->chip_dfs_create_ext_mem(hw);
@@ -955,6 +955,10 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct csio_hw *hw;
struct csio_lnode *ln;
+ /* probe only T5 cards */
+ if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)))
+ return -ENODEV;
+
rv = csio_pci_init(pdev, &bars);
if (rv)
goto err;
@@ -974,10 +978,10 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
- FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
- FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
- FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
- FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+ FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_G(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_G(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_G(hw->fwrev));
for (i = 0; i < hw->num_pports; i++) {
ln = csio_shost_init(hw, &pdev->dev, true, NULL);
@@ -1167,53 +1171,21 @@ static struct pci_error_handlers csio_err_handler = {
.resume = csio_pci_resume,
};
-static const struct pci_device_id csio_pci_tbl[] = {
- CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */
- CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */
- CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */
- CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */
- CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */
- CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */
- CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */
- CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */
- CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */
- CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
- CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0), /* AMSTERDAM T4 FCOE */
- CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0), /* HUAWEI T480 FCOE */
- CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0), /* HUAWEI T440 FCOE */
- CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0), /* HUAWEI STG FCOE */
- CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0), /* ACROMAG XAUI FCOE */
- CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */
- CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0), /* HUAWEI 10GT FCOE */
- CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */
- CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0), /* T5 DEBUG FCOE */
- CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0), /* T520CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0), /* T522CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0), /* T540CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0), /* T520BCH FCOE */
- CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0), /* T540BCH FCOE */
- CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0), /* T540CH FCOE */
- CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0), /* T520SO FCOE */
- CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0), /* T520CX FCOE */
- CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0), /* T520BT FCOE */
- CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0), /* T504BT FCOE */
- CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0), /* B520 FCOE */
- CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0), /* B504 FCOE */
- CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0), /* T580 CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0), /* T540 LP-CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0), /* AMSTERDAM T5 FCOE */
- CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0), /* T580 LP-CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0), /* T520 LL-CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0), /* T560 CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0), /* T580 CR FCOE */
- { 0, 0, 0, 0, 0, 0, 0 }
-};
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static struct pci_device_id csio_pci_tbl[] = {
+/* Define for iSCSI uses PF5, FCoE uses PF6 */
+#define CH_PCI_DEVICE_ID_FUNCTION 0x5
+#define CH_PCI_DEVICE_ID_FUNCTION2 0x6
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { PCI_VDEVICE(CHELSIO, (devid)), 0 }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
+#include "t4_pci_id_tbl.h"
static struct pci_driver csio_pci_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index ffe9be04dc39..87f9280d9b43 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -603,7 +603,7 @@ csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
enum fw_retval retval;
__be32 nport_id;
- retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
if (retval != FW_SUCCESS) {
csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
mempool_free(mbp, hw->mb_mempool);
@@ -770,7 +770,7 @@ csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
(struct fw_fcoe_fcf_cmd *)(mbp->mb);
enum fw_retval retval;
- retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
if (retval != FW_SUCCESS) {
csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
retval);
@@ -1506,7 +1506,7 @@ csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
}
} else if (cpl_op == CPL_FW6_PLD) {
wr = (struct fw_wr_hdr *) (cmd + 4);
- if (FW_WR_OP_GET(be32_to_cpu(wr->hi))
+ if (FW_WR_OP_G(be32_to_cpu(wr->hi))
== FW_RDEV_WR) {
rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
@@ -1574,17 +1574,17 @@ out_pld:
return;
} else {
csio_warn(hw, "unexpected WR op(0x%x) recv\n",
- FW_WR_OP_GET(be32_to_cpu((wr->hi))));
+ FW_WR_OP_G(be32_to_cpu((wr->hi))));
CSIO_INC_STATS(hw, n_cpl_unexp);
}
} else if (cpl_op == CPL_FW6_MSG) {
wr = (struct fw_wr_hdr *) (cmd);
- if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
+ if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
csio_ln_mgmt_wr_handler(hw, wr,
sizeof(struct fw_fcoe_els_ct_wr));
} else {
csio_warn(hw, "unexpected WR op(0x%x) recv\n",
- FW_WR_OP_GET(be32_to_cpu((wr->hi))));
+ FW_WR_OP_G(be32_to_cpu((wr->hi))));
CSIO_INC_STATS(hw, n_cpl_unexp);
}
} else {
@@ -1668,12 +1668,12 @@ csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
__be32 port_id;
wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
- wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) |
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) |
FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
wr_len = DIV_ROUND_UP(wr_len, 16);
- wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) |
- FW_WR_LEN16(wr_len));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) |
+ FW_WR_LEN16_V(wr_len));
wr->els_ct_type = sub_op;
wr->ctl_pri = 0;
wr->cp_en_class = 0;
@@ -1757,7 +1757,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
else {
/* Program DSGL to dma payload */
- dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_MORE | ULPTX_NSGE(1));
dsgl.len0 = cpu_to_be32(pld_len);
dsgl.addr0 = cpu_to_be64(pld->paddr);
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 15b635142546..08c265c0f353 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -59,7 +59,7 @@ csio_mb_fw_retval(struct csio_mb *mbp)
hdr = (struct fw_cmd_hdr *)(mbp->mb);
- return FW_CMD_RETVAL_GET(ntohl(hdr->lo));
+ return FW_CMD_RETVAL_G(ntohl(hdr->lo));
}
/*
@@ -81,17 +81,17 @@ csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
- cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
- cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
cmdp->err_to_clearinit = htonl(
- FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |
- FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |
- FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?
- m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |
- FW_HELLO_CMD_MBASYNCNOT(a_mbox) |
- FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
- FW_HELLO_CMD_CLEARINIT);
+ FW_HELLO_CMD_MASTERDIS_V(master == CSIO_MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE_V(master == CSIO_MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER_V(master == CSIO_MASTER_MUST ?
+ m_mbox : FW_HELLO_CMD_MBMASTER_M) |
+ FW_HELLO_CMD_MBASYNCNOT_V(a_mbox) |
+ FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
+ FW_HELLO_CMD_CLEARINIT_F);
}
@@ -112,17 +112,17 @@ csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
uint32_t value;
- *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
if (*retval == FW_SUCCESS) {
hw->fwrev = ntohl(rsp->fwrev);
value = ntohl(rsp->err_to_clearinit);
- *mpfn = FW_HELLO_CMD_MBMASTER_GET(value);
+ *mpfn = FW_HELLO_CMD_MBMASTER_G(value);
- if (value & FW_HELLO_CMD_INIT)
+ if (value & FW_HELLO_CMD_INIT_F)
*state = CSIO_DEV_STATE_INIT;
- else if (value & FW_HELLO_CMD_ERR)
+ else if (value & FW_HELLO_CMD_ERR_F)
*state = CSIO_DEV_STATE_ERR;
else
*state = CSIO_DEV_STATE_UNINIT;
@@ -144,9 +144,9 @@ csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
- cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
- cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
}
@@ -167,9 +167,9 @@ csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
- cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
- cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
cmdp->val = htonl(reset);
cmdp->halt_pkd = htonl(halt);
@@ -202,12 +202,12 @@ csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
- cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) |
- FW_CMD_REQUEST |
- (wr ? FW_CMD_WRITE : FW_CMD_READ) |
- FW_PARAMS_CMD_PFN(pf) |
- FW_PARAMS_CMD_VFN(vf));
- cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F |
+ (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
/* Write Params */
if (wr) {
@@ -245,7 +245,7 @@ csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
uint32_t i;
__be32 *p = &rsp->param[0].val;
- *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
if (*retval == FW_SUCCESS)
for (i = 0; i < nparams; i++, p += 2)
@@ -271,14 +271,14 @@ csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
* specified PCI-E Configuration Space register.
*/
ldst_cmd->op_to_addrspace =
- htonl(FW_CMD_OP(FW_LDST_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
- FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+ htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
- ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
+ ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
ldst_cmd->u.pcie.ctrl_to_fn =
- (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
+ (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn));
ldst_cmd->u.pcie.r = (uint8_t)reg;
}
@@ -306,10 +306,10 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
- cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- (wr ? FW_CMD_WRITE : FW_CMD_READ));
- cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F));
+ cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
/* Read config */
if (!wr)
@@ -347,25 +347,25 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
void (*cbfn) (struct csio_hw *, struct csio_mb *))
{
struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
- unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+ unsigned int lfc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
- cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
- FW_CMD_REQUEST |
- (wr ? FW_CMD_EXEC : FW_CMD_READ) |
- FW_PORT_CMD_PORTID(portid));
+ cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F |
+ (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) |
+ FW_PORT_CMD_PORTID_V(portid));
if (!wr) {
cmdp->action_to_len16 = htonl(
- FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
return;
}
/* Set port */
cmdp->action_to_len16 = htonl(
- FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
if (fc & PAUSE_RX)
lfc |= FW_PORT_CAP_FC_RX;
@@ -393,7 +393,7 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
{
struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
- *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16));
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16));
if (*retval == FW_SUCCESS)
*caps = ntohs(rsp->u.info.pcap);
@@ -415,9 +415,9 @@ csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
- cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
- cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
}
@@ -443,18 +443,18 @@ csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
- cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
- FW_CMD_REQUEST | FW_CMD_EXEC |
- FW_IQ_CMD_PFN(iq_params->pfn) |
- FW_IQ_CMD_VFN(iq_params->vfn));
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_IQ_CMD_PFN_V(iq_params->pfn) |
+ FW_IQ_CMD_VFN_V(iq_params->vfn));
- cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
cmdp->type_to_iqandstindex = htonl(
- FW_IQ_CMD_VIID(iq_params->viid) |
- FW_IQ_CMD_TYPE(iq_params->type) |
- FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));
+ FW_IQ_CMD_VIID_V(iq_params->viid) |
+ FW_IQ_CMD_TYPE_V(iq_params->type) |
+ FW_IQ_CMD_IQASYNCH_V(iq_params->iqasynch));
cmdp->fl0size = htons(iq_params->fl0size);
cmdp->fl0size = htons(iq_params->fl1size);
@@ -488,8 +488,8 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
uint32_t iq_start_stop = (iq_params->iq_start) ?
- FW_IQ_CMD_IQSTART(1) :
- FW_IQ_CMD_IQSTOP(1);
+ FW_IQ_CMD_IQSTART_F :
+ FW_IQ_CMD_IQSTOP_F;
/*
* If this IQ write is cascaded with IQ alloc request, do not
@@ -499,51 +499,51 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
if (!cascaded_req)
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
- cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE |
- FW_IQ_CMD_PFN(iq_params->pfn) |
- FW_IQ_CMD_VFN(iq_params->vfn));
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_IQ_CMD_PFN_V(iq_params->pfn) |
+ FW_IQ_CMD_VFN_V(iq_params->vfn));
cmdp->alloc_to_len16 |= htonl(iq_start_stop |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
cmdp->iqid |= htons(iq_params->iqid);
cmdp->fl0id |= htons(iq_params->fl0id);
cmdp->fl1id |= htons(iq_params->fl1id);
cmdp->type_to_iqandstindex |= htonl(
- FW_IQ_CMD_IQANDST(iq_params->iqandst) |
- FW_IQ_CMD_IQANUS(iq_params->iqanus) |
- FW_IQ_CMD_IQANUD(iq_params->iqanud) |
- FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));
+ FW_IQ_CMD_IQANDST_V(iq_params->iqandst) |
+ FW_IQ_CMD_IQANUS_V(iq_params->iqanus) |
+ FW_IQ_CMD_IQANUD_V(iq_params->iqanud) |
+ FW_IQ_CMD_IQANDSTINDEX_V(iq_params->iqandstindex));
cmdp->iqdroprss_to_iqesize |= htons(
- FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |
- FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |
- FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |
- FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |
- FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |
- FW_IQ_CMD_IQESIZE(iq_params->iqesize));
+ FW_IQ_CMD_IQPCIECH_V(iq_params->iqpciech) |
+ FW_IQ_CMD_IQDCAEN_V(iq_params->iqdcaen) |
+ FW_IQ_CMD_IQDCACPU_V(iq_params->iqdcacpu) |
+ FW_IQ_CMD_IQINTCNTTHRESH_V(iq_params->iqintcntthresh) |
+ FW_IQ_CMD_IQCPRIO_V(iq_params->iqcprio) |
+ FW_IQ_CMD_IQESIZE_V(iq_params->iqesize));
cmdp->iqsize |= htons(iq_params->iqsize);
cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
if (iq_params->type == 0) {
cmdp->iqns_to_fl0congen |= htonl(
- FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|
- FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));
+ FW_IQ_CMD_IQFLINTIQHSEN_V(iq_params->iqflintiqhsen)|
+ FW_IQ_CMD_IQFLINTCONGEN_V(iq_params->iqflintcongen));
}
if (iq_params->fl0size && iq_params->fl0addr &&
(iq_params->fl0id != 0xFFFF)) {
cmdp->iqns_to_fl0congen |= htonl(
- FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|
- FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |
- FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |
- FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));
+ FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)|
+ FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) |
+ FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) |
+ FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen));
cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
- FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |
- FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |
- FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |
- FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |
- FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));
+ FW_IQ_CMD_FL0DCAEN_V(iq_params->fl0dcaen) |
+ FW_IQ_CMD_FL0DCACPU_V(iq_params->fl0dcacpu) |
+ FW_IQ_CMD_FL0FBMIN_V(iq_params->fl0fbmin) |
+ FW_IQ_CMD_FL0FBMAX_V(iq_params->fl0fbmax) |
+ FW_IQ_CMD_FL0CIDXFTHRESH_V(iq_params->fl0cidxfthresh));
cmdp->fl0size |= htons(iq_params->fl0size);
cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
}
@@ -588,7 +588,7 @@ csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
{
struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
- *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
if (*ret_val == FW_SUCCESS) {
iq_params->physiqid = ntohs(rsp->physiqid);
iq_params->iqid = ntohs(rsp->iqid);
@@ -622,13 +622,13 @@ csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
- cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
- FW_CMD_REQUEST | FW_CMD_EXEC |
- FW_IQ_CMD_PFN(iq_params->pfn) |
- FW_IQ_CMD_VFN(iq_params->vfn));
- cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
- cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_IQ_CMD_PFN_V(iq_params->pfn) |
+ FW_IQ_CMD_VFN_V(iq_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iq_params->type));
cmdp->iqid = htons(iq_params->iqid);
cmdp->fl0id = htons(iq_params->fl0id);
@@ -657,12 +657,12 @@ csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
- cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
- FW_CMD_REQUEST | FW_CMD_EXEC |
- FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
- FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
- cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
} /* csio_mb_eq_ofld_alloc */
@@ -694,7 +694,8 @@ csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
- FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;
+ FW_EQ_OFLD_CMD_EQSTART_F :
+ FW_EQ_OFLD_CMD_EQSTOP_F;
/*
* If this EQ write is cascaded with EQ alloc request, do not
@@ -704,29 +705,29 @@ csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
if (!cascaded_req)
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
- cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE |
- FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
- FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
cmdp->alloc_to_len16 |= htonl(eq_start_stop |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
- cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
+ cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid));
cmdp->fetchszm_to_iqid |= htonl(
- FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |
- FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |
- FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |
- FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));
+ FW_EQ_OFLD_CMD_HOSTFCMODE_V(eq_ofld_params->hostfcmode) |
+ FW_EQ_OFLD_CMD_CPRIO_V(eq_ofld_params->cprio) |
+ FW_EQ_OFLD_CMD_PCIECHN_V(eq_ofld_params->pciechn) |
+ FW_EQ_OFLD_CMD_IQID_V(eq_ofld_params->iqid));
cmdp->dcaen_to_eqsize |= htonl(
- FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |
- FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |
- FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |
- FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |
- FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |
- FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |
- FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));
+ FW_EQ_OFLD_CMD_DCAEN_V(eq_ofld_params->dcaen) |
+ FW_EQ_OFLD_CMD_DCACPU_V(eq_ofld_params->dcacpu) |
+ FW_EQ_OFLD_CMD_FBMIN_V(eq_ofld_params->fbmin) |
+ FW_EQ_OFLD_CMD_FBMAX_V(eq_ofld_params->fbmax) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(eq_ofld_params->cidxfthresho) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(eq_ofld_params->cidxfthresh) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(eq_ofld_params->eqsize));
cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
@@ -773,12 +774,12 @@ csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
{
struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
- *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
if (*ret_val == FW_SUCCESS) {
- eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(
+ eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_G(
ntohl(rsp->eqid_pkd));
- eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(
+ eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_G(
ntohl(rsp->physeqid_pkd));
} else
eq_ofld_params->eqid = 0;
@@ -807,13 +808,13 @@ csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
- cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
- FW_CMD_REQUEST | FW_CMD_EXEC |
- FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
- FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
- cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
- cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
+ cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid));
} /* csio_mb_eq_ofld_free */
@@ -840,15 +841,15 @@ csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
cmdp->op_to_portid = htonl((
- FW_CMD_OP(FW_FCOE_LINK_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
+ FW_CMD_OP_V(FW_FCOE_LINK_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
FW_FCOE_LINK_CMD_PORTID(port_id)));
cmdp->sub_opcode_fcfi = htonl(
FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
FW_FCOE_LINK_CMD_FCFI(fcfi));
cmdp->lstatus = link_status;
- cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
} /* csio_write_fcoe_link_cond_init_mb */
@@ -873,11 +874,11 @@ csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
- cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ));
+ cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F));
- cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
} /* csio_fcoe_read_res_info_init_mb */
@@ -908,13 +909,13 @@ csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
- cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC |
+ cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
FW_FCOE_VNP_CMD_FCFI(fcfi)));
cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
@@ -948,11 +949,11 @@ csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
(struct fw_fcoe_vnp_cmd *)(mbp->mb);
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
- cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
FW_FCOE_VNP_CMD_FCFI(fcfi));
- cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
}
@@ -978,12 +979,12 @@ csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
- cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC |
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
FW_FCOE_VNP_CMD_FCFI(fcfi));
cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
- FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
}
@@ -1009,11 +1010,11 @@ csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
- cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
FW_FCOE_FCF_CMD_FCFI(fcfi));
- cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
} /* csio_fcoe_read_fcf_init_mb */
@@ -1029,9 +1030,9 @@ csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
mbp->mb_size = 64;
- cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
- cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16));
+ cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16));
cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
FW_FCOE_STATS_CMD_PORT(portparams->portid);
@@ -1053,7 +1054,7 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw,
uint8_t *src;
uint8_t *dst;
- *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16));
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16));
memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
@@ -1125,7 +1126,7 @@ csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
{
struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
- if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {
+ if ((FW_DEBUG_CMD_TYPE_G(ntohl(dbg->op_type))) == 1) {
csio_info(hw, "FW print message:\n");
csio_info(hw, "\tdebug->dprtstridx = %d\n",
ntohs(dbg->u.prt.dprtstridx));
@@ -1305,7 +1306,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
fw_hdr = (struct fw_cmd_hdr *)&hdr;
- switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
+ switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
case FW_DEBUG_CMD:
csio_mb_debug_cmd_handler(hw);
continue;
@@ -1406,9 +1407,9 @@ csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
if (opcode == FW_PORT_CMD) {
pcmd = (struct fw_port_cmd *)cmd;
- port_id = FW_PORT_CMD_PORTID_GET(
+ port_id = FW_PORT_CMD_PORTID_G(
ntohl(pcmd->op_to_portid));
- action = FW_PORT_CMD_ACTION_GET(
+ action = FW_PORT_CMD_ACTION_G(
ntohl(pcmd->action_to_len16));
if (action != FW_PORT_ACTION_GET_PORT_INFO) {
csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
@@ -1417,15 +1418,15 @@ csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
}
link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
- mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);
+ mod_type = FW_PORT_CMD_MODTYPE_G(link_status);
hw->pport[port_id].link_status =
- FW_PORT_CMD_LSTATUS_GET(link_status);
+ FW_PORT_CMD_LSTATUS_G(link_status);
hw->pport[port_id].link_speed =
- FW_PORT_CMD_LSPEED_GET(link_status);
+ FW_PORT_CMD_LSPEED_G(link_status);
csio_info(hw, "Port:%x - LINK %s\n", port_id,
- FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");
+ FW_PORT_CMD_LSTATUS_G(link_status) ? "UP" : "DOWN");
if (mod_type != hw->pport[port_id].mod_type) {
hw->pport[port_id].mod_type = mod_type;
@@ -1498,7 +1499,7 @@ csio_mb_isr_handler(struct csio_hw *hw)
hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
fw_hdr = (struct fw_cmd_hdr *)&hdr;
- switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
+ switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) {
case FW_DEBUG_CMD:
csio_mb_debug_cmd_handler(hw);
return -EINVAL;
@@ -1571,11 +1572,11 @@ csio_mb_tmo_handler(struct csio_hw *hw)
fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
- FW_CMD_OP_GET(ntohl(fw_hdr->hi)));
+ FW_CMD_OP_G(ntohl(fw_hdr->hi)));
mbm->mcurrent = NULL;
CSIO_INC_STATS(mbm, n_tmo);
- fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT));
+ fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT));
return mbp;
}
@@ -1624,10 +1625,10 @@ csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
hdr = (struct fw_cmd_hdr *)(mbp->mb);
csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
- hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi)));
+ hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi)));
CSIO_INC_STATS(mbm, n_cancel);
- hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR));
+ hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR));
}
}
diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h
index a84179e54ab9..1bc82d0bc260 100644
--- a/drivers/scsi/csiostor/csio_mb.h
+++ b/drivers/scsi/csiostor/csio_mb.h
@@ -79,14 +79,14 @@ enum csio_dev_state {
};
#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
- (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
- FW_PARAMS_PARAM_Y(0) | \
- FW_PARAMS_PARAM_Z(0))
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0))
enum {
PAUSE_RX = 1 << 0,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 4d0b6ce55f20..51ea5dc5f084 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -209,10 +209,10 @@ csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
struct csio_dma_buf *dma_buf;
uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
- wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) |
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) |
FW_SCSI_CMD_WR_IMMDLEN(imm));
- wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
- FW_WR_LEN16(
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
+ FW_WR_LEN16_V(
DIV_ROUND_UP(size, 16)));
wr->cookie = (uintptr_t) req;
@@ -301,7 +301,7 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
struct csio_dma_buf *dma_buf;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
- sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE |
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE |
ULPTX_NSGE(req->nsge));
/* Now add the data SGLs */
if (likely(!req->dcopy)) {
@@ -370,10 +370,10 @@ csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
- wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) |
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) |
FW_SCSI_READ_WR_IMMDLEN(imm));
- wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
- FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
wr->cookie = (uintptr_t)req;
wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
wr->tmo_val = (uint8_t)(req->tmo);
@@ -423,10 +423,10 @@ csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
- wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) |
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) |
FW_SCSI_WRITE_WR_IMMDLEN(imm));
- wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
- FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
wr->cookie = (uintptr_t)req;
wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
wr->tmo_val = (uint8_t)(req->tmo);
@@ -653,9 +653,9 @@ csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
struct csio_rnode *rn = req->rnode;
struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
- wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR));
- wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
- FW_WR_LEN16(
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
+ FW_WR_LEN16_V(
DIV_ROUND_UP(size, 16)));
wr->cookie = (uintptr_t) req;
diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h
index 8d30e7ac1f5e..0c0dd9a658cc 100644
--- a/drivers/scsi/csiostor/csio_wr.h
+++ b/drivers/scsi/csiostor/csio_wr.h
@@ -101,7 +101,7 @@
/* WR status is at the same position as retval in a CMD header */
#define csio_wr_status(_wr) \
- (FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
+ (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
struct csio_hw;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index e6c3f55d9d36..a83d2ceded83 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -75,6 +75,7 @@ typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
static void *t4_uld_add(const struct cxgb4_lld_info *);
static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
static int t4_uld_state_change(void *, enum cxgb4_state state);
+static inline int send_tx_flowc_wr(struct cxgbi_sock *);
static const struct cxgb4_uld_info cxgb4i_uld_info = {
.name = DRV_MODULE_NAME,
@@ -157,12 +158,6 @@ static struct scsi_transport_template *cxgb4i_stt;
#define RCV_BUFSIZ_MASK 0x3FFU
#define MAX_IMM_TX_PKT_LEN 128
-static inline void set_queue(struct sk_buff *skb, unsigned int queue,
- const struct cxgbi_sock *csk)
-{
- skb->queue_mapping = queue;
-}
-
static int push_tx_frames(struct cxgbi_sock *, int);
/*
@@ -172,10 +167,14 @@ static int push_tx_frames(struct cxgbi_sock *, int);
* Returns true if a packet can be sent as an offload WR with immediate
* data. We currently use the same limit as for Ethernet packets.
*/
-static inline int is_ofld_imm(const struct sk_buff *skb)
+static inline bool is_ofld_imm(const struct sk_buff *skb)
{
- return skb->len <= (MAX_IMM_TX_PKT_LEN -
- sizeof(struct fw_ofld_tx_data_wr));
+ int len = skb->len;
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
+ len += sizeof(struct fw_ofld_tx_data_wr);
+
+ return len <= MAX_IMM_TX_PKT_LEN;
}
static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
@@ -189,18 +188,18 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
unsigned int qid_atid = ((unsigned int)csk->atid) |
(((unsigned int)csk->rss_qid) << 14);
- opt0 = KEEP_ALIVE(1) |
- WND_SCALE(wscale) |
- MSS_IDX(csk->mss_idx) |
- L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
- TX_CHAN(csk->tx_chan) |
- SMAC_SEL(csk->smac_idx) |
- ULP_MODE(ULP_MODE_ISCSI) |
- RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
- opt2 = RX_CHANNEL(0) |
- RSS_QUEUE_VALID |
- (1 << 20) |
- RSS_QUEUE(csk->rss_qid);
+ opt0 = KEEP_ALIVE_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(csk->mss_idx) |
+ L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F |
+ (RX_FC_DISABLE_F) |
+ RSS_QUEUE_V(csk->rss_qid);
if (is_t4(lldi->adapter_type)) {
struct cpl_act_open_req *req =
@@ -217,7 +216,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
req->params = cpu_to_be32(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t));
- opt2 |= 1 << 22;
+ opt2 |= RX_FC_VALID_F;
req->opt2 = cpu_to_be32(opt2);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -237,7 +236,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
req->local_ip = csk->saddr.sin_addr.s_addr;
req->peer_ip = csk->daddr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be64(V_FILTER_TUPLE(
+ req->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
@@ -272,19 +271,19 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
unsigned int qid_atid = ((unsigned int)csk->atid) |
(((unsigned int)csk->rss_qid) << 14);
- opt0 = KEEP_ALIVE(1) |
- WND_SCALE(wscale) |
- MSS_IDX(csk->mss_idx) |
- L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
- TX_CHAN(csk->tx_chan) |
- SMAC_SEL(csk->smac_idx) |
- ULP_MODE(ULP_MODE_ISCSI) |
- RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
+ opt0 = KEEP_ALIVE_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(csk->mss_idx) |
+ L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
- opt2 = RX_CHANNEL(0) |
- RSS_QUEUE_VALID |
- RX_FC_DISABLE |
- RSS_QUEUE(csk->rss_qid);
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F |
+ RX_FC_DISABLE_F |
+ RSS_QUEUE_V(csk->rss_qid);
if (t4) {
struct cpl_act_open_req6 *req =
@@ -305,7 +304,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
req->opt0 = cpu_to_be64(opt0);
- opt2 |= RX_FC_VALID;
+ opt2 |= RX_FC_VALID_F;
req->opt2 = cpu_to_be32(opt2);
req->params = cpu_to_be32(cxgb4_select_ntuple(
@@ -328,10 +327,10 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
8);
req->opt0 = cpu_to_be64(opt0);
- opt2 |= T5_OPT_2_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
req->opt2 = cpu_to_be32(opt2);
- req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple(
+ req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
csk->cdev->ports[csk->port_id],
csk->l2t)));
}
@@ -388,13 +387,19 @@ static void send_abort_req(struct cxgbi_sock *csk)
if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
return;
+
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ send_tx_flowc_wr(csk);
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+ }
+
cxgbi_sock_set_state(csk, CTP_ABORTING);
cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
cxgbi_sock_purge_write_queue(csk);
csk->cpl_abort_req = NULL;
req = (struct cpl_abort_req *)skb->head;
- set_queue(skb, CPL_PRIORITY_DATA, csk);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
req->cmd = CPL_ABORT_SEND_RST;
t4_set_arp_err_handler(skb, csk, abort_arp_failure);
INIT_TP_WR(req, csk->tid);
@@ -420,7 +425,7 @@ static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
csk, csk->state, csk->flags, csk->tid, rst_status);
csk->cpl_abort_rpl = NULL;
- set_queue(skb, CPL_PRIORITY_DATA, csk);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
INIT_TP_WR(rpl, csk->tid);
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
rpl->cmd = rst_status;
@@ -452,7 +457,8 @@ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
csk->tid));
- req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
+ req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
+ | RX_FORCE_ACK_F);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
return credits;
}
@@ -490,20 +496,40 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
return flits + sgl_len(cnt);
}
-static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
+#define FLOWC_WR_NPARAMS_MIN 9
+static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
+{
+ int nparams, flowclen16, flowclen;
+
+ nparams = FLOWC_WR_NPARAMS_MIN;
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+ /*
+ * Return the number of 16-byte credits used by the FlowC request.
+ * Pass back the nparams and actual FlowC length if requested.
+ */
+ if (nparamsp)
+ *nparamsp = nparams;
+ if (flowclenp)
+ *flowclenp = flowclen;
+
+ return flowclen16;
+}
+
+static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
{
struct sk_buff *skb;
struct fw_flowc_wr *flowc;
- int flowclen, i;
+ int nparams, flowclen16, flowclen;
- flowclen = 80;
+ flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
flowc = (struct fw_flowc_wr *)skb->head;
flowc->op_to_nparams =
- htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
+ htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
flowc->flowid_len16 =
- htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
- FW_WR_FLOWID(csk->tid));
+ htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
@@ -522,12 +548,10 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
flowc->mnemval[7].val = htonl(csk->advmss);
flowc->mnemval[8].mnemonic = 0;
flowc->mnemval[8].val = 0;
- for (i = 0; i < 9; i++) {
- flowc->mnemval[i].r4[0] = 0;
- flowc->mnemval[i].r4[1] = 0;
- flowc->mnemval[i].r4[2] = 0;
- }
- set_queue(skb, CPL_PRIORITY_DATA, csk);
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ flowc->mnemval[8].val = 16384;
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
@@ -536,6 +560,8 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
csk->advmss);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+
+ return flowclen16;
}
static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
@@ -543,30 +569,32 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
{
struct fw_ofld_tx_data_wr *req;
unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
- unsigned int wr_ulp_mode = 0;
+ unsigned int wr_ulp_mode = 0, val;
+ bool imm = is_ofld_imm(skb);
req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
- if (is_ofld_imm(skb)) {
- req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(dlen));
- req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
- FW_WR_LEN16(credits));
+ if (imm) {
+ req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(dlen));
+ req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
} else {
req->op_to_immdlen =
- cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(0));
+ cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(0));
req->flowid_len16 =
- cpu_to_be32(FW_WR_FLOWID(csk->tid) |
- FW_WR_LEN16(credits));
+ cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
}
if (submode)
- wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
- FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
+ wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
+ FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
+ val = skb_peek(&csk->write_queue) ? 0 : 1;
req->tunnel_to_proxy = htonl(wr_ulp_mode |
- FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
+ FW_OFLD_TX_DATA_WR_SHOVE_V(val));
req->plen = htonl(len);
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
@@ -595,16 +623,32 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
int dlen = skb->len;
int len = skb->len;
unsigned int credits_needed;
+ int flowclen16 = 0;
skb_reset_transport_header(skb);
if (is_ofld_imm(skb))
- credits_needed = DIV_ROUND_UP(dlen +
- sizeof(struct fw_ofld_tx_data_wr), 16);
+ credits_needed = DIV_ROUND_UP(dlen, 16);
else
- credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
- + sizeof(struct fw_ofld_tx_data_wr),
+ credits_needed = DIV_ROUND_UP(
+ 8 * calc_tx_flits_ofld(skb),
+ 16);
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
+ credits_needed += DIV_ROUND_UP(
+ sizeof(struct fw_ofld_tx_data_wr),
16);
+ /*
+ * Assumes the initial credits is large enough to support
+ * fw_flowc_wr plus largest possible first payload
+ */
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ flowclen16 = send_tx_flowc_wr(csk);
+ csk->wr_cred -= flowclen16;
+ csk->wr_una_cred += flowclen16;
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+ }
+
if (csk->wr_cred < credits_needed) {
log_debug(1 << CXGBI_DBG_PDU_TX,
"csk 0x%p, skb %u/%u, wr %d < %u.\n",
@@ -613,8 +657,8 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
break;
}
__skb_unlink(skb, &csk->write_queue);
- set_queue(skb, CPL_PRIORITY_DATA, csk);
- skb->csum = credits_needed;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
+ skb->csum = credits_needed + flowclen16;
csk->wr_cred -= credits_needed;
csk->wr_una_cred += credits_needed;
cxgbi_sock_enqueue_wr(csk, skb);
@@ -625,12 +669,6 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
csk->wr_cred, csk->wr_una_cred);
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
- if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
- send_tx_flowc_wr(csk);
- skb->csum += 5;
- csk->wr_cred -= 5;
- csk->wr_una_cred += 5;
- }
len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
make_tx_data_wr(csk, skb, dlen, len, credits_needed,
req_completion);
@@ -805,6 +843,13 @@ static void csk_act_open_retry_timer(unsigned long data)
}
+static inline bool is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
@@ -826,7 +871,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
"csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
atid, tid, status, csk, csk->state, csk->flags);
- if (status == CPL_ERR_RTX_NEG_ADVICE)
+ if (is_neg_adv(status))
goto rel_skb;
module_put(THIS_MODULE);
@@ -932,8 +977,7 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
(&csk->saddr), (&csk->daddr),
csk, csk->state, csk->flags, csk->tid, req->status);
- if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
- req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+ if (is_neg_adv(req->status))
goto rel_skb;
cxgbi_sock_get(csk);
@@ -987,6 +1031,27 @@ rel_skb:
__kfree_skb(skb);
}
+static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
+ unsigned int tid = GET_TID(cpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (!csk) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ } else {
+ /* not expecting this, reset the connection. */
+ pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
+ spin_lock_bh(&csk->lock);
+ send_abort_req(csk);
+ spin_unlock_bh(&csk->lock);
+ }
+ __kfree_skb(skb);
+}
+
static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
@@ -1406,6 +1471,7 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
[CPL_RX_DATA_DDP] = do_rx_data_ddp,
[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+ [CPL_RX_DATA] = do_rx_data,
};
int cxgb4i_ofld_init(struct cxgbi_device *cdev)
@@ -1445,16 +1511,16 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
INIT_ULPTX_WR(req, wr_len, 0, 0);
if (is_t4(lldi->adapter_type))
- req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
- (ULP_MEMIO_ORDER(1)));
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ (ULP_MEMIO_ORDER_F));
else
- req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
- (V_T5_ULP_MEMIO_IMM(1)));
- req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
- req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ (T5_ULP_MEMIO_IMM_F));
+ req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
+ req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
- idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
idata->len = htonl(dlen);
}
@@ -1483,7 +1549,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
return -ENOMEM;
}
req = (struct ulp_mem_io *)skb->head;
- set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
idata = (struct ulptx_idata *)(req + 1);
@@ -1678,7 +1744,8 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
cdev->itp = &cxgb4i_iscsi_transport;
- cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
+ cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
+ << FW_VIID_PFN_S;
pr_info("cdev 0x%p,%s, pfvf %u.\n",
cdev, lldi->ports[0]->name, cdev->pfvf);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 7da59c38a69e..eb58afcfb73b 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2294,10 +2294,12 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
return err;
}
- kfree_skb(skb);
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
+
+ kfree_skb(skb);
+
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
return err;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 2c7cb1c0c453..aba1af720df6 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -317,8 +317,8 @@ static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
__clear_bit(flag, &(cxgbi_skcb_flags(skb)));
}
-static inline int cxgbi_skcb_test_flag(struct sk_buff *skb,
- enum cxgbi_skcb_flags flag)
+static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
+ enum cxgbi_skcb_flags flag)
{
return test_bit(flag, &(cxgbi_skcb_flags(skb)));
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 308a016fdaea..cd00a6cdf55b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1671,10 +1671,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
/* must set skb->dev before calling vlan_put_tag */
skb->dev = fcoe->realdev;
- skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- vlan_dev_vlan_id(fcoe->netdev));
- if (!skb)
- return -ENOMEM;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_dev_vlan_id(fcoe->netdev));
} else
skb->dev = fcoe->netdev;
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 56e38096f0c4..cfd0084f1cd2 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -694,7 +694,7 @@ sci_io_request_construct_sata(struct isci_request *ireq,
}
/* ATAPI */
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
+ if (dev->sata_dev.class == ATA_DEV_ATAPI &&
task->ata_task.fis.command == ATA_CMD_PACKET) {
sci_atapi_construct(ireq);
return SCI_SUCCESS;
@@ -2980,7 +2980,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
state = SCI_REQ_SMP_WAIT_RESP;
} else if (task && sas_protocol_ata(task->task_proto) &&
!task->ata_task.use_ncq) {
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
+ if (dev->sata_dev.class == ATA_DEV_ATAPI &&
task->ata_task.fis.command == ATA_CMD_PACKET) {
state = SCI_REQ_ATAPI_WAIT_H2D;
} else if (task->data_dir == DMA_NONE) {
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 3f63c6318b0d..6dcaed0c1fc8 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -588,7 +588,7 @@ int isci_task_abort_task(struct sas_task *task)
ret = TMF_RESP_FUNC_COMPLETE;
} else {
- /* Fill in the tmf stucture */
+ /* Fill in the tmf structure */
isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
old_request);
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 69efbf12b299..9aaa74e349cc 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -226,7 +226,6 @@ static struct platform_driver esp_jazz_driver = {
.remove = esp_jazz_remove,
.driver = {
.name = "jazz_esp",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 577770fdee86..932d9cc98d2f 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -138,7 +138,7 @@ static void sas_ata_task_done(struct sas_task *task)
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
- dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
+ dev->sata_dev.class == ATA_DEV_ATAPI))) {
memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
@@ -272,7 +272,7 @@ static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
return to_sas_internal(dev->port->ha->core.shost->transportt);
}
-static void sas_get_ata_command_set(struct domain_device *dev);
+static int sas_get_ata_command_set(struct domain_device *dev);
int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
{
@@ -297,8 +297,7 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
}
memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
sizeof(struct dev_to_host_fis));
- /* TODO switch to ata_dev_classify() */
- sas_get_ata_command_set(dev);
+ dev->sata_dev.class = sas_get_ata_command_set(dev);
}
return 0;
}
@@ -419,18 +418,7 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
if (ret && ret != -EAGAIN)
sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
- /* XXX: if the class changes during the reset the upper layer
- * should be informed, if the device has gone away we assume
- * libsas will eventually delete it
- */
- switch (dev->sata_dev.command_set) {
- case ATA_COMMAND_SET:
- *class = ATA_DEV_ATA;
- break;
- case ATAPI_COMMAND_SET:
- *class = ATA_DEV_ATAPI;
- break;
- }
+ *class = dev->sata_dev.class;
ap->cbl = ATA_CBL_SATA;
return ret;
@@ -619,50 +607,18 @@ void sas_ata_task_abort(struct sas_task *task)
complete(waiting);
}
-static void sas_get_ata_command_set(struct domain_device *dev)
+static int sas_get_ata_command_set(struct domain_device *dev)
{
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
+ struct ata_taskfile tf;
if (dev->dev_type == SAS_SATA_PENDING)
- return;
+ return ATA_DEV_UNKNOWN;
+
+ ata_tf_from_fis((const u8 *)fis, &tf);
- if ((fis->sector_count == 1 && /* ATA */
- fis->lbal == 1 &&
- fis->lbam == 0 &&
- fis->lbah == 0 &&
- fis->device == 0)
- ||
- (fis->sector_count == 0 && /* CE-ATA (mATA) */
- fis->lbal == 0 &&
- fis->lbam == 0xCE &&
- fis->lbah == 0xAA &&
- (fis->device & ~0x10) == 0))
-
- dev->sata_dev.command_set = ATA_COMMAND_SET;
-
- else if ((fis->interrupt_reason == 1 && /* ATAPI */
- fis->lbal == 1 &&
- fis->byte_count_low == 0x14 &&
- fis->byte_count_high == 0xEB &&
- (fis->device & ~0x10) == 0))
-
- dev->sata_dev.command_set = ATAPI_COMMAND_SET;
-
- else if ((fis->sector_count == 1 && /* SEMB */
- fis->lbal == 1 &&
- fis->lbam == 0x3C &&
- fis->lbah == 0xC3 &&
- fis->device == 0)
- ||
- (fis->interrupt_reason == 1 && /* SATA PM */
- fis->lbal == 1 &&
- fis->byte_count_low == 0x69 &&
- fis->byte_count_high == 0x96 &&
- (fis->device & ~0x10) == 0))
-
- /* Treat it as a superset? */
- dev->sata_dev.command_set = ATAPI_COMMAND_SET;
+ return ata_dev_classify(&tf);
}
void sas_probe_sata(struct asd_sas_port *port)
@@ -768,7 +724,7 @@ int sas_discover_sata(struct domain_device *dev)
if (dev->dev_type == SAS_SATA_PM)
return -ENODEV;
- sas_get_ata_command_set(dev);
+ dev->sata_dev.class = sas_get_ata_command_set(dev);
sas_fill_in_rphy(dev, dev->rphy);
res = sas_notify_lldd_dev_found(dev);
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 994fc5caf036..14c0334f41e4 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -617,7 +617,6 @@ static struct platform_driver esp_mac_driver = {
.remove = esp_mac_remove,
.driver = {
.name = DRV_MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 953fd9b953c7..1e85c07e3b62 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -166,7 +166,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
XXX: Since bus errors in the PDMA routines never happen on my
computer, the bus error code is untested.
If the code works as intended, a bus error results in Pseudo-DMA
- beeing disabled, meaning that the driver switches to slow handshake.
+ being disabled, meaning that the driver switches to slow handshake.
If bus errors are NOT extremely rare, this has to be changed.
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f05580e693d0..ff283d23788a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6783,7 +6783,7 @@ static int __init megasas_init(void)
rval = pci_register_driver(&megasas_pci_driver);
if (rval) {
- printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
+ printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
goto err_pcidrv;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index fa0567c96050..7f842c88abd2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -224,7 +224,7 @@ struct mpt2_ioctl_eventreport {
};
/**
- * struct mpt2_ioctl_command - generic mpt firmware passthru ioclt
+ * struct mpt2_ioctl_command - generic mpt firmware passthru ioctl
* @hdr - generic header
* @timeout - command timeout in seconds. (if zero then use driver default
* value).
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
index 8fbb97a8bfd3..050c8c39d7ed 100644
--- a/drivers/scsi/mvme16x_scsi.c
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -126,7 +126,6 @@ static int mvme16x_device_remove(struct platform_device *dev)
static struct platform_driver mvme16x_scsi_driver = {
.driver = {
.name = "mvme16x-scsi",
- .owner = THIS_MODULE,
},
.probe = mvme16x_probe,
.remove = mvme16x_device_remove,
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 85d86a5cdb60..2d5ab6d969ec 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -479,7 +479,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
if (task->ata_task.use_ncq)
flags |= MCH_FPDMA;
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
+ if (dev->sata_dev.class == ATA_DEV_ATAPI) {
if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
flags |= MCH_ATAPI;
}
@@ -546,7 +546,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
/* fill in command FIS and ATAPI CDB */
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+ if (dev->sata_dev.class == ATA_DEV_ATAPI)
memcpy(buf_cmd + STP_ATAPI_CMD,
task->ata_task.atapi_packet, 16);
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 933f21471951..96dcc097a463 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4367,7 +4367,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
}
if (task->ata_task.use_ncq &&
- dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
ATAP = 0x07; /* FPDMA */
PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
}
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index b06443a0db2d..05cce463ab01 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -4077,7 +4077,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
}
if (task->ata_task.use_ncq &&
- dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
ATAP = 0x07; /* FPDMA */
PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
}
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index b3b48b5a984c..5298def33733 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -383,6 +383,7 @@ static int ps3rom_probe(struct ps3_system_bus_device *_dev)
if (!host) {
dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n",
__func__, __LINE__);
+ error = -ENOMEM;
goto fail_teardown;
}
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 740ae495aa77..fe122700cad8 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1455,7 +1455,6 @@ MODULE_DEVICE_TABLE(of, qpti_match);
static struct platform_driver qpti_sbus_driver = {
.driver = {
.name = "qpti",
- .owner = THIS_MODULE,
.of_match_table = qpti_match,
},
.probe = qpti_sbus_probe,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7e3d954c9cac..43318d556cbc 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1947,9 +1947,10 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request);
}
-static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
- bool last)
+static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request *req = bd->rq;
struct request_queue *q = req->q;
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index a464d959f66e..6d215e2fb46d 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -315,7 +315,6 @@ static struct platform_driver sgiwd93_driver = {
.remove = sgiwd93_remove,
.driver = {
.name = "sgiwd93",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 52d54e7425db..76278072147e 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -137,7 +137,6 @@ static struct platform_driver snirm710_driver = {
.remove = snirm710_driver_remove,
.driver = {
.name = "snirm_53c710",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 534eb96fc3a7..e26e81de7c45 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -295,7 +295,6 @@ static struct platform_driver esp_sun3x_driver = {
.remove = esp_sun3x_remove,
.driver = {
.name = "sun3x_esp",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index f2e68459f7ea..7b6d4c2087d7 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -633,7 +633,6 @@ MODULE_DEVICE_TABLE(of, esp_match);
static struct platform_driver esp_sbus_driver = {
.driver = {
.name = "esp",
- .owner = THIS_MODULE,
.of_match_table = esp_match,
},
.probe = esp_sbus_probe,
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 76d39b617db7..7db9564f507d 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -390,7 +390,6 @@ static struct platform_driver ufshcd_pltfrm_driver = {
.shutdown = ufshcd_pltfrm_shutdown,
.driver = {
.name = "ufshcd",
- .owner = THIS_MODULE,
.pm = &ufshcd_dev_pm_ops,
.of_match_table = ufs_of_match,
},
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 22e70126425b..c52bb5dfaedb 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -158,7 +158,7 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
sc, resp->response, resp->status, resp->sense_len);
sc->result = resp->status;
- virtscsi_compute_resid(sc, resp->resid);
+ virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
switch (resp->response) {
case VIRTIO_SCSI_S_OK:
set_host_byte(sc, DID_OK);
@@ -196,10 +196,13 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
break;
}
- WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
+ WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
+ VIRTIO_SCSI_SENSE_SIZE);
if (sc->sense_buffer) {
memcpy(sc->sense_buffer, resp->sense,
- min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
+ min_t(u32,
+ virtio32_to_cpu(vscsi->vdev, resp->sense_len),
+ VIRTIO_SCSI_SENSE_SIZE));
if (resp->sense_len)
set_driver_byte(sc, DRIVER_SENSE);
}
@@ -323,7 +326,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
unsigned int target = event->lun[1];
unsigned int lun = (event->lun[2] << 8) | event->lun[3];
- switch (event->reason) {
+ switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
case VIRTIO_SCSI_EVT_RESET_RESCAN:
scsi_add_device(shost, 0, target, lun);
break;
@@ -349,8 +352,8 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
unsigned int target = event->lun[1];
unsigned int lun = (event->lun[2] << 8) | event->lun[3];
- u8 asc = event->reason & 255;
- u8 ascq = event->reason >> 8;
+ u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
+ u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
sdev = scsi_device_lookup(shost, 0, target, lun);
if (!sdev) {
@@ -374,12 +377,14 @@ static void virtscsi_handle_event(struct work_struct *work)
struct virtio_scsi *vscsi = event_node->vscsi;
struct virtio_scsi_event *event = &event_node->event;
- if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
- event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
+ if (event->event &
+ cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
+ event->event &= ~cpu_to_virtio32(vscsi->vdev,
+ VIRTIO_SCSI_T_EVENTS_MISSED);
scsi_scan_host(virtio_scsi_host(vscsi->vdev));
}
- switch (event->event) {
+ switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
case VIRTIO_SCSI_T_NO_EVENT:
break;
case VIRTIO_SCSI_T_TRANSPORT_RESET:
@@ -482,26 +487,28 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
return err;
}
-static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
+static void virtio_scsi_init_hdr(struct virtio_device *vdev,
+ struct virtio_scsi_cmd_req *cmd,
struct scsi_cmnd *sc)
{
cmd->lun[0] = 1;
cmd->lun[1] = sc->device->id;
cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
cmd->lun[3] = sc->device->lun & 0xff;
- cmd->tag = (unsigned long)sc;
+ cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
cmd->prio = 0;
cmd->crn = 0;
}
-static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
+static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
+ struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
{
struct request *rq = sc->request;
struct blk_integrity *bi;
- virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
+ virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
if (!rq || !scsi_prot_sg_count(sc))
return;
@@ -509,9 +516,13 @@ static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
bi = blk_get_integrity(rq->rq_disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
- cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
+ cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
+ blk_rq_sectors(rq) *
+ bi->tuple_size);
else if (sc->sc_data_direction == DMA_FROM_DEVICE)
- cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
+ cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
+ blk_rq_sectors(rq) *
+ bi->tuple_size);
}
static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
@@ -536,11 +547,11 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
- virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
+ virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd_pi);
} else {
- virtio_scsi_init_hdr(&cmd->req.cmd, sc);
+ virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd);
}
@@ -669,7 +680,8 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
- .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
+ .subtype = cpu_to_virtio32(vscsi->vdev,
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
.lun[0] = 1,
.lun[1] = sc->device->id,
.lun[2] = (sc->device->lun >> 8) | 0x40,
@@ -710,7 +722,7 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
.lun[1] = sc->device->id,
.lun[2] = (sc->device->lun >> 8) | 0x40,
.lun[3] = sc->device->lun & 0xff,
- .tag = (unsigned long)sc,
+ .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
};
return virtscsi_tmf(vscsi, cmd);
}
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 7e1f120f2b32..729425ddfd3e 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -91,7 +91,6 @@ MODULE_DEVICE_TABLE(of, gsbi_dt_match);
static struct platform_driver gsbi_driver = {
.driver = {
.name = "gsbi",
- .owner = THIS_MODULE,
.of_match_table = gsbi_dt_match,
},
.probe = gsbi_probe,
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 11a5043959dc..011a3363c265 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -31,6 +31,7 @@
static u32 (*fuse_readl)(const unsigned int offset);
static int fuse_size;
struct tegra_sku_info tegra_sku_info;
+EXPORT_SYMBOL(tegra_sku_info);
static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
[TEGRA_REVISION_UNKNOWN] = "unknown",
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index 7cb63ab6aac2..5eff6f097f98 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -158,7 +158,6 @@ static struct platform_driver tegra20_fuse_driver = {
.probe = tegra20_fuse_probe,
.driver = {
.name = "tegra20_fuse",
- .owner = THIS_MODULE,
.of_match_table = tegra20_fuse_of_match,
}
};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index 5999cf34ab70..8646fa920d8d 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -130,7 +130,6 @@ static struct platform_driver tegra30_fuse_driver = {
.probe = tegra30_fuse_probe,
.driver = {
.name = "tegra_fuse",
- .owner = THIS_MODULE,
.of_match_table = tegra30_fuse_of_match,
}
};
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 17264275f32b..bc1b80ec6afe 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -803,7 +803,6 @@ static struct platform_driver knav_dma_driver = {
.remove = knav_dma_remove,
.driver = {
.name = "keystone-navigator-dma",
- .owner = THIS_MODULE,
.of_match_table = of_match,
},
};
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 9b8dd6732681..8e6a95d91d33 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1805,7 +1805,6 @@ static struct platform_driver keystone_qmss_driver = {
.remove = knav_queue_remove,
.driver = {
.name = "keystone-navigator-qmss",
- .owner = THIS_MODULE,
.of_match_table = keystone_qmss_of_match,
},
};
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 84e7c9e6ccef..99829985c1a1 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -225,6 +225,13 @@ config SPI_GPIO
GPIO operations, you should be able to leverage that for better
speed with a custom version of this driver; see the source code.
+config SPI_IMG_SPFI
+ tristate "IMG SPFI controller"
+ depends on MIPS || COMPILE_TEST
+ help
+ This enables support for the SPFI master controller found on
+ IMG SoCs.
+
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
depends on ARCH_MXC || COMPILE_TEST
@@ -301,6 +308,14 @@ config SPI_FSL_ESPI
From MPC8536, 85xx platform uses the controller, and all P10xx,
P20xx, P30xx,P40xx, P50xx uses this controller.
+config SPI_MESON_SPIFC
+ tristate "Amlogic Meson SPIFC controller"
+ depends on ARCH_MESON || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This enables master mode support for the SPIFC (SPI flash
+ controller) available in Amlogic Meson SoCs.
+
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
depends on GPIOLIB
@@ -444,7 +459,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on PLAT_SAMSUNG
+ depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
select S3C64XX_PL080 if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 78f24ca36fcf..6b9d2ac629cc 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -40,8 +40,10 @@ obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
diff --git a/drivers/spi/spi-adi-v3.c b/drivers/spi/spi-adi-v3.c
index 19ea8fb78cc7..a16b25dcd1e6 100644
--- a/drivers/spi/spi-adi-v3.c
+++ b/drivers/spi/spi-adi-v3.c
@@ -972,7 +972,6 @@ MODULE_ALIAS("platform:adi-spi3");
static struct platform_driver adi_spi_driver = {
.driver = {
.name = "adi-spi3",
- .owner = THIS_MODULE,
.pm = &adi_spi_pm_ops,
},
.remove = adi_spi_remove,
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 5b5709a5c957..b95010e72452 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -282,7 +282,6 @@ static struct platform_driver altera_spi_driver = {
.remove = altera_spi_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = NULL,
.of_match_table = of_match_ptr(altera_spi_match),
},
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 058db0fe8dc7..b02eb4ac0218 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -307,7 +307,6 @@ static struct platform_driver ath79_spi_driver = {
.shutdown = ath79_spi_shutdown,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
module_platform_driver(ath79_spi_driver);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 113c83f44b5c..23d8f5f56579 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
/* SPI register offsets */
#define SPI_CR 0x0000
@@ -191,6 +192,8 @@
#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+#define AUTOSUSPEND_TIMEOUT 2000
+
struct atmel_spi_dma {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
@@ -414,23 +417,6 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
return err;
}
-static bool filter(struct dma_chan *chan, void *pdata)
-{
- struct atmel_spi_dma *sl_pdata = pdata;
- struct at_dma_slave *sl;
-
- if (!sl_pdata)
- return false;
-
- sl = &sl_pdata->dma_slave;
- if (sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
static int atmel_spi_configure_dma(struct atmel_spi *as)
{
struct dma_slave_config slave_config;
@@ -441,19 +427,24 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- as->dma.chan_tx = dma_request_slave_channel_compat(mask, filter,
- &as->dma,
- dev, "tx");
- if (!as->dma.chan_tx) {
+ as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(as->dma.chan_tx)) {
+ err = PTR_ERR(as->dma.chan_tx);
+ if (err == -EPROBE_DEFER) {
+ dev_warn(dev, "no DMA channel available at the moment\n");
+ return err;
+ }
dev_err(dev,
"DMA TX channel not available, SPI unable to use DMA\n");
err = -EBUSY;
goto error;
}
- as->dma.chan_rx = dma_request_slave_channel_compat(mask, filter,
- &as->dma,
- dev, "rx");
+ /*
+ * No reason to check EPROBE_DEFER here since we have already requested
+ * tx channel. If it fails here, it's for another reason.
+ */
+ as->dma.chan_rx = dma_request_slave_channel(dev, "rx");
if (!as->dma.chan_rx) {
dev_err(dev,
@@ -474,7 +465,7 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
error:
if (as->dma.chan_rx)
dma_release_channel(as->dma.chan_rx);
- if (as->dma.chan_tx)
+ if (!IS_ERR(as->dma.chan_tx))
dma_release_channel(as->dma.chan_tx);
return err;
}
@@ -482,11 +473,9 @@ error:
static void atmel_spi_stop_dma(struct atmel_spi *as)
{
if (as->dma.chan_rx)
- as->dma.chan_rx->device->device_control(as->dma.chan_rx,
- DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(as->dma.chan_rx);
if (as->dma.chan_tx)
- as->dma.chan_tx->device->device_control(as->dma.chan_tx,
- DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(as->dma.chan_tx);
}
static void atmel_spi_release_dma(struct atmel_spi *as)
@@ -1315,6 +1304,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->setup = atmel_spi_setup;
master->transfer_one_message = atmel_spi_transfer_one_message;
master->cleanup = atmel_spi_cleanup;
+ master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
as = spi_master_get_devdata(master);
@@ -1347,8 +1337,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
- if (atmel_spi_configure_dma(as) == 0)
+ ret = atmel_spi_configure_dma(as);
+ if (ret == 0)
as->use_dma = true;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
} else {
as->use_pdc = true;
}
@@ -1387,6 +1380,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
(unsigned long)regs->start, irq);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
goto out_free_dma;
@@ -1394,6 +1392,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
return 0;
out_free_dma:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
if (as->use_dma)
atmel_spi_release_dma(as);
@@ -1415,6 +1416,8 @@ static int atmel_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ pm_runtime_get_sync(&pdev->dev);
+
/* reset the hardware and block queue progress */
spin_lock_irq(&as->lock);
if (as->use_dma) {
@@ -1432,14 +1435,37 @@ static int atmel_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(as->clk);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
+static int atmel_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(as->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int atmel_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(as->clk);
+}
+
static int atmel_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_master *master = dev_get_drvdata(dev);
int ret;
/* Stop the queue running */
@@ -1449,22 +1475,22 @@ static int atmel_spi_suspend(struct device *dev)
return ret;
}
- clk_disable_unprepare(as->clk);
-
- pinctrl_pm_select_sleep_state(dev);
+ if (!pm_runtime_suspended(dev))
+ atmel_spi_runtime_suspend(dev);
return 0;
}
static int atmel_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_master *master = dev_get_drvdata(dev);
int ret;
- pinctrl_pm_select_default_state(dev);
-
- clk_prepare_enable(as->clk);
+ if (!pm_runtime_suspended(dev)) {
+ ret = atmel_spi_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
/* Start the queue running */
ret = spi_master_resume(master);
@@ -1474,8 +1500,11 @@ static int atmel_spi_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
-
+static const struct dev_pm_ops atmel_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
+ SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
+ atmel_spi_runtime_resume, NULL)
+};
#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
#else
#define ATMEL_SPI_PM_OPS NULL
@@ -1493,7 +1522,6 @@ MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
- .owner = THIS_MODULE,
.pm = ATMEL_SPI_PM_OPS,
.of_match_table = of_match_ptr(atmel_spi_dt_ids),
},
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index f40b34cdf2fc..326f47973684 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -965,7 +965,6 @@ static struct platform_driver au1550_spi_drv = {
.remove = au1550_spi_remove,
.driver = {
.name = "au1550-spi",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 69167456ec1e..98aab457b24d 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -395,7 +395,6 @@ MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
static struct platform_driver bcm2835_spi_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = bcm2835_spi_match,
},
.probe = bcm2835_spi_probe,
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 86f5a98aa7a2..f5ca6dc3a157 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -459,7 +459,6 @@ static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
static struct platform_driver bcm63xx_hsspi_driver = {
.driver = {
.name = "bcm63xx-hsspi",
- .owner = THIS_MODULE,
.pm = &bcm63xx_hsspi_pm_ops,
},
.probe = bcm63xx_hsspi_probe,
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 8510400e7867..c20530982e26 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -469,7 +469,6 @@ static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
static struct platform_driver bcm63xx_spi_driver = {
.driver = {
.name = "bcm63xx-spi",
- .owner = THIS_MODULE,
.pm = &bcm63xx_spi_pm_ops,
},
.probe = bcm63xx_spi_probe,
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index f515c5e9db57..a78693189f45 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -921,7 +921,6 @@ static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend,
static struct platform_driver bfin_sport_spi_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = BFIN_SPORT_SPI_PM_OPS,
},
.probe = bfin_sport_spi_probe,
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index ebf720b88a2a..37079937d2f7 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -1455,7 +1455,6 @@ MODULE_ALIAS("platform:bfin-spi");
static struct platform_driver bfin_spi_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = BFIN_SPI_PM_OPS,
},
.probe = bfin_spi_probe,
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 7b811e38c7ad..5a6749881ff9 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -47,6 +47,7 @@
#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */
#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */
#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */
+#define CDNS_SPI_CR_PERI_SEL_MASK 0x00000200 /* Peripheral Select Decode */
#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */
#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */
#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */
@@ -148,6 +149,11 @@ static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
*/
static void cdns_spi_init_hw(struct cdns_spi *xspi)
{
+ u32 ctrl_reg = CDNS_SPI_CR_DEFAULT_MASK;
+
+ if (xspi->is_decoded_cs)
+ ctrl_reg |= CDNS_SPI_CR_PERI_SEL_MASK;
+
cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
CDNS_SPI_ER_DISABLE_MASK);
cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
@@ -160,8 +166,7 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
CDNS_SPI_IXR_ALL_MASK);
- cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET,
- CDNS_SPI_CR_DEFAULT_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
CDNS_SPI_ER_ENABLE_MASK);
}
@@ -516,6 +521,17 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_apb;
}
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ if (ret < 0)
+ master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
+ else
+ master->num_chipselect = num_cs;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
+ &xspi->is_decoded_cs);
+ if (ret < 0)
+ xspi->is_decoded_cs = 0;
+
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
@@ -534,19 +550,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto remove_master;
}
- ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
-
- if (ret < 0)
- master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
- else
- master->num_chipselect = num_cs;
-
- ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
- &xspi->is_decoded_cs);
-
- if (ret < 0)
- xspi->is_decoded_cs = 0;
-
master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
master->prepare_message = cdns_prepare_message;
master->transfer_one = cdns_transfer_one;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 181cf2262006..8c30de0315e7 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -203,7 +203,6 @@ err_out:
static struct platform_driver clps711x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = spi_clps711x_probe,
};
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 63700ab7bd9f..b3707badb1e5 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -1119,7 +1119,6 @@ static int davinci_spi_remove(struct platform_device *pdev)
static struct platform_driver davinci_spi_driver = {
.driver = {
.name = "spi_davinci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(davinci_spi_of_match),
},
.probe = davinci_spi_probe,
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 46c6d58e1fda..7281316a5ecb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -26,6 +26,9 @@
#include <linux/intel_mid_dma.h>
#include <linux/pci.h>
+#define RX_BUSY 0
+#define TX_BUSY 1
+
struct mid_dma {
struct intel_mid_dma_slave dmas_tx;
struct intel_mid_dma_slave dmas_rx;
@@ -98,41 +101,26 @@ static void mid_spi_dma_exit(struct dw_spi *dws)
}
/*
- * dws->dma_chan_done is cleared before the dma transfer starts,
- * callback for rx/tx channel will each increment it by 1.
- * Reaching 2 means the whole spi transaction is done.
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
+ * channel will clear a corresponding bit.
*/
-static void dw_spi_dma_done(void *arg)
+static void dw_spi_dma_tx_done(void *arg)
{
struct dw_spi *dws = arg;
- if (++dws->dma_chan_done != 2)
+ if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
return;
dw_spi_xfer_done(dws);
}
-static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
{
- struct dma_async_tx_descriptor *txdesc, *rxdesc;
- struct dma_slave_config txconf, rxconf;
- u16 dma_ctrl = 0;
-
- /* 1. setup DMA related registers */
- if (cs_change) {
- spi_enable_chip(dws, 0);
- dw_writew(dws, DW_SPI_DMARDLR, 0xf);
- dw_writew(dws, DW_SPI_DMATDLR, 0x10);
- if (dws->tx_dma)
- dma_ctrl |= SPI_DMA_TDMAE;
- if (dws->rx_dma)
- dma_ctrl |= SPI_DMA_RDMAE;
- dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
- spi_enable_chip(dws, 1);
- }
+ struct dma_slave_config txconf;
+ struct dma_async_tx_descriptor *txdesc;
- dws->dma_chan_done = 0;
+ if (!dws->tx_dma)
+ return NULL;
- /* 2. Prepare the TX dma transfer */
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
@@ -151,10 +139,33 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
1,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- txdesc->callback = dw_spi_dma_done;
+ txdesc->callback = dw_spi_dma_tx_done;
txdesc->callback_param = dws;
- /* 3. Prepare the RX dma transfer */
+ return txdesc;
+}
+
+/*
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
+ * channel will clear a corresponding bit.
+ */
+static void dw_spi_dma_rx_done(void *arg)
+{
+ struct dw_spi *dws = arg;
+
+ if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
+ return;
+ dw_spi_xfer_done(dws);
+}
+
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
+{
+ struct dma_slave_config rxconf;
+ struct dma_async_tx_descriptor *rxdesc;
+
+ if (!dws->rx_dma)
+ return NULL;
+
rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
@@ -173,15 +184,56 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
1,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- rxdesc->callback = dw_spi_dma_done;
+ rxdesc->callback = dw_spi_dma_rx_done;
rxdesc->callback_param = dws;
+ return rxdesc;
+}
+
+static void dw_spi_dma_setup(struct dw_spi *dws)
+{
+ u16 dma_ctrl = 0;
+
+ spi_enable_chip(dws, 0);
+
+ dw_writew(dws, DW_SPI_DMARDLR, 0xf);
+ dw_writew(dws, DW_SPI_DMATDLR, 0x10);
+
+ if (dws->tx_dma)
+ dma_ctrl |= SPI_DMA_TDMAE;
+ if (dws->rx_dma)
+ dma_ctrl |= SPI_DMA_RDMAE;
+ dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
+
+ spi_enable_chip(dws, 1);
+}
+
+static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+{
+ struct dma_async_tx_descriptor *txdesc, *rxdesc;
+
+ /* 1. setup DMA related registers */
+ if (cs_change)
+ dw_spi_dma_setup(dws);
+
+ /* 2. Prepare the TX dma transfer */
+ txdesc = dw_spi_dma_prepare_tx(dws);
+
+ /* 3. Prepare the RX dma transfer */
+ rxdesc = dw_spi_dma_prepare_rx(dws);
+
/* rx must be started before tx due to spi instinct */
- dmaengine_submit(rxdesc);
- dma_async_issue_pending(dws->rxchan);
+ if (rxdesc) {
+ set_bit(RX_BUSY, &dws->dma_chan_busy);
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(dws->rxchan);
+ }
- dmaengine_submit(txdesc);
- dma_async_issue_pending(dws->txchan);
+ if (txdesc) {
+ set_bit(TX_BUSY, &dws->dma_chan_busy);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(dws->txchan);
+ }
return 0;
}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 21ce0e36fa00..eb03e1215195 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -135,7 +135,6 @@ static struct platform_driver dw_spi_mmio_driver = {
.remove = dw_spi_mmio_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = dw_spi_mmio_of_match,
},
};
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 83a103a76481..3d32be68c142 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -139,7 +139,7 @@ struct dw_spi {
struct scatterlist tx_sgl;
struct dma_chan *rxchan;
struct scatterlist rx_sgl;
- int dma_chan_done;
+ unsigned long dma_chan_busy;
struct device *dma_dev;
dma_addr_t dma_addr; /* phy address of the Data register */
struct dw_spi_dma_ops *dma_ops;
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index 6caeb1cac0f3..065fe8744989 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -494,7 +494,6 @@ static struct platform_driver efm32_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = efm32_spi_dt_ids,
},
};
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index bf9728773247..bb00be8d1851 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -964,7 +964,6 @@ static int ep93xx_spi_remove(struct platform_device *pdev)
static struct platform_driver ep93xx_spi_driver = {
.driver = {
.name = "ep93xx-spi",
- .owner = THIS_MODULE,
},
.probe = ep93xx_spi_probe,
.remove = ep93xx_spi_remove,
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index f73b3004d6d3..912b9037e9cf 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -441,7 +441,6 @@ static struct platform_driver falcon_sflash_driver = {
.probe = falcon_sflash_probe,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = falcon_sflash_match,
}
};
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index c5dd20beee22..e85ab1cb17a2 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -56,12 +56,15 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
} else {
- cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
if (mspi->flags & SPI_CPM1) {
+ out_be32(&mspi->pram->rstate, 0);
out_be16(&mspi->pram->rbptr,
in_be16(&mspi->pram->rbase));
+ out_be32(&mspi->pram->tstate, 0);
out_be16(&mspi->pram->tbptr,
in_be16(&mspi->pram->tbase));
+ } else {
+ cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
}
}
}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 831ceb4a91f6..4cda994d3f40 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -438,7 +438,7 @@ static int dspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
-static struct regmap_config dspi_regmap_config = {
+static const struct regmap_config dspi_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -492,7 +492,6 @@ static int dspi_probe(struct platform_device *pdev)
goto out_master_put;
}
- dspi_regmap_config.lock_arg = dspi;
dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
&dspi_regmap_config);
if (IS_ERR(dspi->regmap)) {
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index a7f94b6a9e70..d0a73a09a9bd 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -411,7 +411,8 @@ static void fsl_espi_rw_trans(struct spi_message *m,
kfree(local_buf);
}
-static void fsl_espi_do_one_msg(struct spi_message *m)
+static int fsl_espi_do_one_msg(struct spi_master *master,
+ struct spi_message *m)
{
struct spi_transfer *t;
u8 *rx_buf = NULL;
@@ -441,8 +442,8 @@ static void fsl_espi_do_one_msg(struct spi_message *m)
m->actual_length = espi_trans.actual_length;
m->status = espi_trans.status;
- if (m->complete)
- m->complete(m->context);
+ spi_finalize_current_message(master);
+ return 0;
}
static int fsl_espi_setup(struct spi_device *spi)
@@ -587,6 +588,38 @@ static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
iounmap(mspi->reg_base);
}
+static int fsl_espi_suspend(struct spi_master *master)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval &= ~SPMODE_ENABLE;
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return 0;
+}
+
+static int fsl_espi_resume(struct spi_master *master)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval |= SPMODE_ENABLE;
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return 0;
+}
+
static struct spi_master * fsl_espi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
@@ -607,16 +640,16 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
dev_set_drvdata(dev, master);
- ret = mpc8xxx_spi_probe(dev, mem, irq);
- if (ret)
- goto err_probe;
+ mpc8xxx_spi_probe(dev, mem, irq);
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->setup = fsl_espi_setup;
master->cleanup = fsl_espi_cleanup;
+ master->transfer_one_message = fsl_espi_do_one_msg;
+ master->prepare_transfer_hardware = fsl_espi_resume;
+ master->unprepare_transfer_hardware = fsl_espi_suspend;
mpc8xxx_spi = spi_master_get_devdata(master);
- mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg;
mpc8xxx_spi->spi_remove = fsl_espi_remove;
mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
@@ -762,25 +795,15 @@ static int of_fsl_espi_remove(struct platform_device *dev)
static int of_fsl_espi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
- struct mpc8xxx_spi *mpc8xxx_spi;
- struct fsl_espi_reg *reg_base;
- u32 regval;
int ret;
- mpc8xxx_spi = spi_master_get_devdata(master);
- reg_base = mpc8xxx_spi->reg_base;
-
ret = spi_master_suspend(master);
if (ret) {
dev_warn(dev, "cannot suspend master\n");
return ret;
}
- regval = mpc8xxx_spi_read_reg(&reg_base->mode);
- regval &= ~SPMODE_ENABLE;
- mpc8xxx_spi_write_reg(&reg_base->mode, regval);
-
- return 0;
+ return fsl_espi_suspend(master);
}
static int of_fsl_espi_resume(struct device *dev)
@@ -827,7 +850,6 @@ MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
static struct platform_driver fsl_espi_driver = {
.driver = {
.name = "fsl_espi",
- .owner = THIS_MODULE,
.of_match_table = of_fsl_espi_match,
.pm = &espi_pm,
},
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 5ddb5b098e4e..446b737e1532 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -61,44 +61,6 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
}
-static void mpc8xxx_spi_work(struct work_struct *work)
-{
- struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
- work);
-
- spin_lock_irq(&mpc8xxx_spi->lock);
- while (!list_empty(&mpc8xxx_spi->queue)) {
- struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
- struct spi_message, queue);
-
- list_del_init(&m->queue);
- spin_unlock_irq(&mpc8xxx_spi->lock);
-
- if (mpc8xxx_spi->spi_do_one_msg)
- mpc8xxx_spi->spi_do_one_msg(m);
-
- spin_lock_irq(&mpc8xxx_spi->lock);
- }
- spin_unlock_irq(&mpc8xxx_spi->lock);
-}
-
-int mpc8xxx_spi_transfer(struct spi_device *spi,
- struct spi_message *m)
-{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- spin_lock_irqsave(&mpc8xxx_spi->lock, flags);
- list_add_tail(&m->queue, &mpc8xxx_spi->queue);
- queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
- spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
-
- return 0;
-}
-
const char *mpc8xxx_spi_strmode(unsigned int flags)
{
if (flags & SPI_QE_CPU_MODE) {
@@ -114,13 +76,12 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
return "CPU";
}
-int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
+void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
- int ret = 0;
master = dev_get_drvdata(dev);
@@ -128,7 +89,6 @@ int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
| SPI_LSB_FIRST | SPI_LOOP;
- master->transfer = mpc8xxx_spi_transfer;
master->dev.of_node = dev->of_node;
mpc8xxx_spi = spi_master_get_devdata(master);
@@ -147,22 +107,7 @@ int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
- spin_lock_init(&mpc8xxx_spi->lock);
init_completion(&mpc8xxx_spi->done);
- INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
- INIT_LIST_HEAD(&mpc8xxx_spi->queue);
-
- mpc8xxx_spi->workqueue = create_singlethread_workqueue(
- dev_name(master->dev.parent));
- if (mpc8xxx_spi->workqueue == NULL) {
- ret = -EBUSY;
- goto err;
- }
-
- return 0;
-
-err:
- return ret;
}
int mpc8xxx_spi_remove(struct device *dev)
@@ -173,8 +118,6 @@ int mpc8xxx_spi_remove(struct device *dev)
master = dev_get_drvdata(dev);
mpc8xxx_spi = spi_master_get_devdata(master);
- flush_workqueue(mpc8xxx_spi->workqueue);
- destroy_workqueue(mpc8xxx_spi->workqueue);
spi_unregister_master(master);
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 2fcbfd01d109..b4ed04e8862f 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -55,7 +55,6 @@ struct mpc8xxx_spi {
u32(*get_tx) (struct mpc8xxx_spi *);
/* hooks for different controller driver */
- void (*spi_do_one_msg) (struct spi_message *m);
void (*spi_remove) (struct mpc8xxx_spi *mspi);
unsigned int count;
@@ -78,12 +77,6 @@ struct mpc8xxx_spi {
int bits_per_word, int msb_first);
#endif
- struct workqueue_struct *workqueue;
- struct work_struct work;
-
- struct list_head queue;
- spinlock_t lock;
-
struct completion done;
};
@@ -123,9 +116,8 @@ extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
struct fsl_spi_platform_data *pdata);
extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len);
-extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m);
extern const char *mpc8xxx_spi_strmode(unsigned int flags);
-extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
+extern void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq);
extern int mpc8xxx_spi_remove(struct device *dev);
extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index ed792880c9d6..60c590790854 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -353,7 +353,8 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
return mpc8xxx_spi->count;
}
-static void fsl_spi_do_one_msg(struct spi_message *m)
+static int fsl_spi_do_one_msg(struct spi_master *master,
+ struct spi_message *m)
{
struct spi_device *spi = m->spi;
struct spi_transfer *t, *first;
@@ -367,10 +368,9 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
list_for_each_entry(t, &m->transfers, transfer_list) {
if ((first->bits_per_word != t->bits_per_word) ||
(first->speed_hz != t->speed_hz)) {
- status = -EINVAL;
dev_err(&spi->dev,
"bits_per_word/speed_hz should be same for the same SPI transfer\n");
- return;
+ return -EINVAL;
}
}
@@ -408,8 +408,7 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
}
m->status = status;
- if (m->complete)
- m->complete(m->context);
+ spi_finalize_current_message(master);
if (status || !cs_change) {
ndelay(nsecs);
@@ -417,6 +416,7 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
}
fsl_spi_setup_transfer(spi, NULL);
+ return 0;
}
static int fsl_spi_setup(struct spi_device *spi)
@@ -624,15 +624,13 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
dev_set_drvdata(dev, master);
- ret = mpc8xxx_spi_probe(dev, mem, irq);
- if (ret)
- goto err_probe;
+ mpc8xxx_spi_probe(dev, mem, irq);
master->setup = fsl_spi_setup;
master->cleanup = fsl_spi_cleanup;
+ master->transfer_one_message = fsl_spi_do_one_msg;
mpc8xxx_spi = spi_master_get_devdata(master);
- mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg;
mpc8xxx_spi->spi_remove = fsl_spi_remove;
mpc8xxx_spi->max_bits_per_word = 32;
mpc8xxx_spi->type = fsl_spi_get_type(dev);
@@ -704,7 +702,6 @@ free_irq:
err_ioremap:
fsl_spi_cpm_free(mpc8xxx_spi);
err_cpm_init:
-err_probe:
spi_master_put(master);
err:
return ERR_PTR(ret);
@@ -882,7 +879,6 @@ static int of_fsl_spi_remove(struct platform_device *ofdev)
static struct platform_driver of_fsl_spi_driver = {
.driver = {
.name = "fsl_spi",
- .owner = THIS_MODULE,
.of_match_table = of_fsl_spi_match,
},
.probe = of_fsl_spi_probe,
@@ -929,7 +925,6 @@ static struct platform_driver mpc8xxx_spi_driver = {
.remove = plat_mpc8xxx_spi_remove,
.driver = {
.name = "mpc8xxx_spi",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 9f595535cf27..aee4e7589568 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -48,7 +48,7 @@ struct spi_gpio {
struct spi_bitbang bitbang;
struct spi_gpio_platform_data pdata;
struct platform_device *pdev;
- int cs_gpios[0];
+ unsigned long cs_gpios[0];
};
/*----------------------------------------------------------------------*/
@@ -220,7 +220,7 @@ static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
- unsigned int cs = spi_gpio->cs_gpios[spi->chip_select];
+ unsigned long cs = spi_gpio->cs_gpios[spi->chip_select];
/* set initial clock polarity */
if (is_active)
@@ -234,7 +234,7 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
static int spi_gpio_setup(struct spi_device *spi)
{
- unsigned int cs;
+ unsigned long cs;
int status = 0;
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
struct device_node *np = spi->master->dev.of_node;
@@ -249,7 +249,7 @@ static int spi_gpio_setup(struct spi_device *spi)
/*
* ... otherwise, take it from spi->controller_data
*/
- cs = (unsigned int)(uintptr_t) spi->controller_data;
+ cs = (uintptr_t) spi->controller_data;
}
if (!spi->controller_state) {
@@ -277,7 +277,7 @@ static int spi_gpio_setup(struct spi_device *spi)
static void spi_gpio_cleanup(struct spi_device *spi)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
- unsigned int cs = spi_gpio->cs_gpios[spi->chip_select];
+ unsigned long cs = spi_gpio->cs_gpios[spi->chip_select];
if (cs != SPI_GPIO_NO_CHIPSELECT)
gpio_free(cs);
@@ -413,6 +413,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
struct spi_gpio_platform_data *pdata;
u16 master_flags = 0;
bool use_of = 0;
+ int num_devices;
status = spi_gpio_probe_dt(pdev);
if (status < 0)
@@ -422,16 +423,21 @@ static int spi_gpio_probe(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
#ifdef GENERIC_BITBANG
- if (!pdata || !pdata->num_chipselect)
+ if (!pdata || (!use_of && !pdata->num_chipselect))
return -ENODEV;
#endif
+ if (use_of && !SPI_N_CHIPSEL)
+ num_devices = 1;
+ else
+ num_devices = SPI_N_CHIPSEL;
+
status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags);
if (status < 0)
return status;
master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio) +
- (sizeof(int) * SPI_N_CHIPSEL));
+ (sizeof(unsigned long) * num_devices));
if (!master) {
status = -ENOMEM;
goto gpio_free;
@@ -446,7 +452,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->flags = master_flags;
master->bus_num = pdev->id;
- master->num_chipselect = SPI_N_CHIPSEL;
+ master->num_chipselect = num_devices;
master->setup = spi_gpio_setup;
master->cleanup = spi_gpio_cleanup;
#ifdef CONFIG_OF
@@ -461,9 +467,18 @@ static int spi_gpio_probe(struct platform_device *pdev)
* property of the node.
*/
- for (i = 0; i < SPI_N_CHIPSEL; i++)
- spi_gpio->cs_gpios[i] =
- of_get_named_gpio(np, "cs-gpios", i);
+ if (!SPI_N_CHIPSEL)
+ spi_gpio->cs_gpios[0] = SPI_GPIO_NO_CHIPSELECT;
+ else
+ for (i = 0; i < SPI_N_CHIPSEL; i++) {
+ status = of_get_named_gpio(np, "cs-gpios", i);
+ if (status < 0) {
+ dev_err(&pdev->dev,
+ "invalid cs-gpios property\n");
+ goto gpio_free;
+ }
+ spi_gpio->cs_gpios[i] = status;
+ }
}
#endif
@@ -524,7 +539,6 @@ MODULE_ALIAS("platform:" DRIVER_NAME);
static struct platform_driver spi_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(spi_gpio_dt_ids),
},
.probe = spi_gpio_probe,
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
new file mode 100644
index 000000000000..43781c9fe521
--- /dev/null
+++ b/drivers/spi/spi-img-spfi.c
@@ -0,0 +1,746 @@
+/*
+ * IMG SPFI controller driver
+ *
+ * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+
+#define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
+#define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
+#define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
+#define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
+#define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
+#define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
+
+#define SPFI_CONTROL 0x14
+#define SPFI_CONTROL_CONTINUE BIT(12)
+#define SPFI_CONTROL_SOFT_RESET BIT(11)
+#define SPFI_CONTROL_SEND_DMA BIT(10)
+#define SPFI_CONTROL_GET_DMA BIT(9)
+#define SPFI_CONTROL_TMODE_SHIFT 5
+#define SPFI_CONTROL_TMODE_MASK 0x7
+#define SPFI_CONTROL_TMODE_SINGLE 0
+#define SPFI_CONTROL_TMODE_DUAL 1
+#define SPFI_CONTROL_TMODE_QUAD 2
+#define SPFI_CONTROL_SPFI_EN BIT(0)
+
+#define SPFI_TRANSACTION 0x18
+#define SPFI_TRANSACTION_TSIZE_SHIFT 16
+#define SPFI_TRANSACTION_TSIZE_MASK 0xffff
+
+#define SPFI_PORT_STATE 0x1c
+#define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
+#define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
+#define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
+#define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
+
+#define SPFI_TX_32BIT_VALID_DATA 0x20
+#define SPFI_TX_8BIT_VALID_DATA 0x24
+#define SPFI_RX_32BIT_VALID_DATA 0x28
+#define SPFI_RX_8BIT_VALID_DATA 0x2c
+
+#define SPFI_INTERRUPT_STATUS 0x30
+#define SPFI_INTERRUPT_ENABLE 0x34
+#define SPFI_INTERRUPT_CLEAR 0x38
+#define SPFI_INTERRUPT_IACCESS BIT(12)
+#define SPFI_INTERRUPT_GDEX8BIT BIT(11)
+#define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
+#define SPFI_INTERRUPT_GDFUL BIT(8)
+#define SPFI_INTERRUPT_GDHF BIT(7)
+#define SPFI_INTERRUPT_GDEX32BIT BIT(6)
+#define SPFI_INTERRUPT_GDTRIG BIT(5)
+#define SPFI_INTERRUPT_SDFUL BIT(3)
+#define SPFI_INTERRUPT_SDHF BIT(2)
+#define SPFI_INTERRUPT_SDE BIT(1)
+#define SPFI_INTERRUPT_SDTRIG BIT(0)
+
+/*
+ * There are four parallel FIFOs of 16 bytes each. The word buffer
+ * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
+ * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA)
+ * accesses only a single FIFO, resulting in an effective FIFO size of
+ * 16 bytes.
+ */
+#define SPFI_32BIT_FIFO_SIZE 64
+#define SPFI_8BIT_FIFO_SIZE 16
+
+struct img_spfi {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ void __iomem *regs;
+ phys_addr_t phys;
+ int irq;
+ struct clk *spfi_clk;
+ struct clk *sys_clk;
+
+ struct dma_chan *rx_ch;
+ struct dma_chan *tx_ch;
+ bool tx_dma_busy;
+ bool rx_dma_busy;
+};
+
+static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
+{
+ return readl(spfi->regs + reg);
+}
+
+static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
+{
+ writel(val, spfi->regs + reg);
+}
+
+static inline void spfi_start(struct img_spfi *spfi)
+{
+ u32 val;
+
+ val = spfi_readl(spfi, SPFI_CONTROL);
+ val |= SPFI_CONTROL_SPFI_EN;
+ spfi_writel(spfi, val, SPFI_CONTROL);
+}
+
+static inline void spfi_stop(struct img_spfi *spfi)
+{
+ u32 val;
+
+ val = spfi_readl(spfi, SPFI_CONTROL);
+ val &= ~SPFI_CONTROL_SPFI_EN;
+ spfi_writel(spfi, val, SPFI_CONTROL);
+}
+
+static inline void spfi_reset(struct img_spfi *spfi)
+{
+ spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
+ udelay(1);
+ spfi_writel(spfi, 0, SPFI_CONTROL);
+}
+
+static void spfi_flush_tx_fifo(struct img_spfi *spfi)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+ spfi_writel(spfi, SPFI_INTERRUPT_SDE, SPFI_INTERRUPT_CLEAR);
+ while (time_before(jiffies, timeout)) {
+ if (spfi_readl(spfi, SPFI_INTERRUPT_STATUS) &
+ SPFI_INTERRUPT_SDE)
+ return;
+ cpu_relax();
+ }
+
+ dev_err(spfi->dev, "Timed out waiting for FIFO to drain\n");
+ spfi_reset(spfi);
+}
+
+static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max) {
+ spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_SDFUL)
+ break;
+ spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA);
+ count += 4;
+ }
+
+ return count;
+}
+
+static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max) {
+ spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_SDFUL)
+ break;
+ spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
+ count++;
+ }
+
+ return count;
+}
+
+static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max) {
+ spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
+ SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (!(status & SPFI_INTERRUPT_GDEX32BIT))
+ break;
+ buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
+ count += 4;
+ }
+
+ return count;
+}
+
+static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max) {
+ spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
+ SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (!(status & SPFI_INTERRUPT_GDEX8BIT))
+ break;
+ buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
+ count++;
+ }
+
+ return count;
+}
+
+static int img_spfi_start_pio(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ unsigned int tx_bytes = 0, rx_bytes = 0;
+ const void *tx_buf = xfer->tx_buf;
+ void *rx_buf = xfer->rx_buf;
+ unsigned long timeout;
+
+ if (tx_buf)
+ tx_bytes = xfer->len;
+ if (rx_buf)
+ rx_bytes = xfer->len;
+
+ spfi_start(spfi);
+
+ timeout = jiffies +
+ msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
+ while ((tx_bytes > 0 || rx_bytes > 0) &&
+ time_before(jiffies, timeout)) {
+ unsigned int tx_count, rx_count;
+
+ switch (xfer->bits_per_word) {
+ case 32:
+ tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
+ rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
+ break;
+ case 8:
+ default:
+ tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
+ rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
+ break;
+ }
+
+ tx_buf += tx_count;
+ rx_buf += rx_count;
+ tx_bytes -= tx_count;
+ rx_bytes -= rx_count;
+
+ cpu_relax();
+ }
+
+ if (rx_bytes > 0 || tx_bytes > 0) {
+ dev_err(spfi->dev, "PIO transfer timed out\n");
+ spfi_reset(spfi);
+ return -ETIMEDOUT;
+ }
+
+ if (tx_buf)
+ spfi_flush_tx_fifo(spfi);
+ spfi_stop(spfi);
+
+ return 0;
+}
+
+static void img_spfi_dma_rx_cb(void *data)
+{
+ struct img_spfi *spfi = data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&spfi->lock, flags);
+
+ spfi->rx_dma_busy = false;
+ if (!spfi->tx_dma_busy) {
+ spfi_stop(spfi);
+ spi_finalize_current_transfer(spfi->master);
+ }
+
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static void img_spfi_dma_tx_cb(void *data)
+{
+ struct img_spfi *spfi = data;
+ unsigned long flags;
+
+ spfi_flush_tx_fifo(spfi);
+
+ spin_lock_irqsave(&spfi->lock, flags);
+
+ spfi->tx_dma_busy = false;
+ if (!spfi->rx_dma_busy) {
+ spfi_stop(spfi);
+ spi_finalize_current_transfer(spfi->master);
+ }
+
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static int img_spfi_start_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+ struct dma_slave_config rxconf, txconf;
+
+ spfi->rx_dma_busy = false;
+ spfi->tx_dma_busy = false;
+
+ if (xfer->rx_buf) {
+ rxconf.direction = DMA_DEV_TO_MEM;
+ switch (xfer->bits_per_word) {
+ case 32:
+ rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
+ rxconf.src_addr_width = 4;
+ rxconf.src_maxburst = 4;
+ break;
+ case 8:
+ default:
+ rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
+ rxconf.src_addr_width = 1;
+ rxconf.src_maxburst = 1;
+ }
+ dmaengine_slave_config(spfi->rx_ch, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!rxdesc)
+ goto stop_dma;
+
+ rxdesc->callback = img_spfi_dma_rx_cb;
+ rxdesc->callback_param = spfi;
+ }
+
+ if (xfer->tx_buf) {
+ txconf.direction = DMA_MEM_TO_DEV;
+ switch (xfer->bits_per_word) {
+ case 32:
+ txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
+ txconf.dst_addr_width = 4;
+ txconf.dst_maxburst = 4;
+ break;
+ case 8:
+ default:
+ txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
+ txconf.dst_addr_width = 1;
+ txconf.dst_maxburst = 1;
+ break;
+ }
+ dmaengine_slave_config(spfi->tx_ch, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!txdesc)
+ goto stop_dma;
+
+ txdesc->callback = img_spfi_dma_tx_cb;
+ txdesc->callback_param = spfi;
+ }
+
+ if (xfer->rx_buf) {
+ spfi->rx_dma_busy = true;
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(spfi->rx_ch);
+ }
+
+ if (xfer->tx_buf) {
+ spfi->tx_dma_busy = true;
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(spfi->tx_ch);
+ }
+
+ spfi_start(spfi);
+
+ return 1;
+
+stop_dma:
+ dmaengine_terminate_all(spfi->rx_ch);
+ dmaengine_terminate_all(spfi->tx_ch);
+ return -EIO;
+}
+
+static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ u32 val, div;
+
+ /*
+ * output = spfi_clk * (BITCLK / 512), where BITCLK must be a
+ * power of 2 up to 256 (where 255 == 256 since BITCLK is 8 bits)
+ */
+ div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz);
+ div = clamp(512 / (1 << get_count_order(div)), 1, 255);
+
+ val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
+ val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
+ SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
+ val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
+ spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
+
+ val = spfi_readl(spfi, SPFI_CONTROL);
+ val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
+ if (xfer->tx_buf)
+ val |= SPFI_CONTROL_SEND_DMA;
+ if (xfer->rx_buf)
+ val |= SPFI_CONTROL_GET_DMA;
+ val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
+ if (xfer->tx_nbits == SPI_NBITS_DUAL &&
+ xfer->rx_nbits == SPI_NBITS_DUAL)
+ val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
+ else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
+ xfer->rx_nbits == SPI_NBITS_QUAD)
+ val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+ val &= ~SPFI_CONTROL_CONTINUE;
+ if (!xfer->cs_change && !list_is_last(&xfer->transfer_list,
+ &master->cur_msg->transfers))
+ val |= SPFI_CONTROL_CONTINUE;
+ spfi_writel(spfi, val, SPFI_CONTROL);
+
+ val = spfi_readl(spfi, SPFI_PORT_STATE);
+ if (spi->mode & SPI_CPHA)
+ val |= SPFI_PORT_STATE_CK_PHASE(spi->chip_select);
+ else
+ val &= ~SPFI_PORT_STATE_CK_PHASE(spi->chip_select);
+ if (spi->mode & SPI_CPOL)
+ val |= SPFI_PORT_STATE_CK_POL(spi->chip_select);
+ else
+ val &= ~SPFI_PORT_STATE_CK_POL(spi->chip_select);
+ spfi_writel(spfi, val, SPFI_PORT_STATE);
+
+ spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
+ SPFI_TRANSACTION);
+}
+
+static int img_spfi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ bool dma_reset = false;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Stop all DMA and reset the controller if the previous transaction
+ * timed-out and never completed it's DMA.
+ */
+ spin_lock_irqsave(&spfi->lock, flags);
+ if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
+ dev_err(spfi->dev, "SPI DMA still busy\n");
+ dma_reset = true;
+ }
+ spin_unlock_irqrestore(&spfi->lock, flags);
+
+ if (dma_reset) {
+ dmaengine_terminate_all(spfi->tx_ch);
+ dmaengine_terminate_all(spfi->rx_ch);
+ spfi_reset(spfi);
+ }
+
+ img_spfi_config(master, spi, xfer);
+ if (master->can_dma && master->can_dma(master, spi, xfer))
+ ret = img_spfi_start_dma(master, spi, xfer);
+ else
+ ret = img_spfi_start_pio(master, spi, xfer);
+
+ return ret;
+}
+
+static void img_spfi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = spfi_readl(spfi, SPFI_PORT_STATE);
+ val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << SPFI_PORT_STATE_DEV_SEL_SHIFT);
+ val |= spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT;
+ spfi_writel(spfi, val, SPFI_PORT_STATE);
+}
+
+static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE)
+ return true;
+ if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
+ return true;
+ return false;
+}
+
+static irqreturn_t img_spfi_irq(int irq, void *dev_id)
+{
+ struct img_spfi *spfi = (struct img_spfi *)dev_id;
+ u32 status;
+
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_IACCESS) {
+ spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
+ dev_err(spfi->dev, "Illegal access interrupt");
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int img_spfi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct img_spfi *spfi;
+ struct resource *res;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
+ if (!master)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
+ spfi = spi_master_get_devdata(master);
+ spfi->dev = &pdev->dev;
+ spfi->master = master;
+ spin_lock_init(&spfi->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spfi->regs = devm_ioremap_resource(spfi->dev, res);
+ if (IS_ERR(spfi->regs)) {
+ ret = PTR_ERR(spfi->regs);
+ goto put_spi;
+ }
+ spfi->phys = res->start;
+
+ spfi->irq = platform_get_irq(pdev, 0);
+ if (spfi->irq < 0) {
+ ret = spfi->irq;
+ goto put_spi;
+ }
+ ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
+ IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
+ if (ret)
+ goto put_spi;
+
+ spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
+ if (IS_ERR(spfi->sys_clk)) {
+ ret = PTR_ERR(spfi->sys_clk);
+ goto put_spi;
+ }
+ spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
+ if (IS_ERR(spfi->spfi_clk)) {
+ ret = PTR_ERR(spfi->spfi_clk);
+ goto put_spi;
+ }
+
+ ret = clk_prepare_enable(spfi->sys_clk);
+ if (ret)
+ goto put_spi;
+ ret = clk_prepare_enable(spfi->spfi_clk);
+ if (ret)
+ goto disable_pclk;
+
+ spfi_reset(spfi);
+ /*
+ * Only enable the error (IACCESS) interrupt. In PIO mode we'll
+ * poll the status of the FIFOs.
+ */
+ spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
+
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
+ if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
+ master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
+ master->num_chipselect = 5;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
+ master->max_speed_hz = clk_get_rate(spfi->spfi_clk);
+ master->min_speed_hz = master->max_speed_hz / 512;
+
+ master->set_cs = img_spfi_set_cs;
+ master->transfer_one = img_spfi_transfer_one;
+
+ spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
+ spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
+ if (!spfi->tx_ch || !spfi->rx_ch) {
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+ dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
+ } else {
+ master->dma_tx = spfi->tx_ch;
+ master->dma_rx = spfi->rx_ch;
+ master->can_dma = img_spfi_can_dma;
+ }
+
+ pm_runtime_set_active(spfi->dev);
+ pm_runtime_enable(spfi->dev);
+
+ ret = devm_spi_register_master(spfi->dev, master);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(spfi->dev);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ clk_disable_unprepare(spfi->spfi_clk);
+disable_pclk:
+ clk_disable_unprepare(spfi->sys_clk);
+put_spi:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int img_spfi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+
+ pm_runtime_disable(spfi->dev);
+ if (!pm_runtime_status_suspended(spfi->dev)) {
+ clk_disable_unprepare(spfi->spfi_clk);
+ clk_disable_unprepare(spfi->sys_clk);
+ }
+
+ spi_master_put(master);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int img_spfi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spfi->spfi_clk);
+ clk_disable_unprepare(spfi->sys_clk);
+
+ return 0;
+}
+
+static int img_spfi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spfi->sys_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(spfi->spfi_clk);
+ if (ret) {
+ clk_disable_unprepare(spfi->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+static int img_spfi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int img_spfi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret)
+ return ret;
+ spfi_reset(spfi);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_spfi_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
+};
+
+static const struct of_device_id img_spfi_of_match[] = {
+ { .compatible = "img,spfi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, img_spfi_of_match);
+
+static struct platform_driver img_spfi_driver = {
+ .driver = {
+ .name = "img-spfi",
+ .pm = &img_spfi_pm_ops,
+ .of_match_table = of_match_ptr(img_spfi_of_match),
+ },
+ .probe = img_spfi_probe,
+ .remove = img_spfi_remove,
+};
+module_platform_driver(img_spfi_driver);
+
+MODULE_DESCRIPTION("IMG SPFI controller driver");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 3637847b5370..961b97d43b43 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1231,7 +1231,6 @@ static int spi_imx_remove(struct platform_device *pdev)
static struct platform_driver spi_imx_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = spi_imx_dt_ids,
},
.id_table = spi_imx_devtype,
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
new file mode 100644
index 000000000000..0e48f8c2037d
--- /dev/null
+++ b/drivers/spi/spi-meson-spifc.c
@@ -0,0 +1,462 @@
+/*
+ * Driver for Amlogic Meson SPI flash controller (SPIFC)
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/* register map */
+#define REG_CMD 0x00
+#define REG_ADDR 0x04
+#define REG_CTRL 0x08
+#define REG_CTRL1 0x0c
+#define REG_STATUS 0x10
+#define REG_CTRL2 0x14
+#define REG_CLOCK 0x18
+#define REG_USER 0x1c
+#define REG_USER1 0x20
+#define REG_USER2 0x24
+#define REG_USER3 0x28
+#define REG_USER4 0x2c
+#define REG_SLAVE 0x30
+#define REG_SLAVE1 0x34
+#define REG_SLAVE2 0x38
+#define REG_SLAVE3 0x3c
+#define REG_C0 0x40
+#define REG_B8 0x60
+#define REG_MAX 0x7c
+
+/* register fields */
+#define CMD_USER BIT(18)
+#define CTRL_ENABLE_AHB BIT(17)
+#define CLOCK_SOURCE BIT(31)
+#define CLOCK_DIV_SHIFT 12
+#define CLOCK_DIV_MASK (0x3f << CLOCK_DIV_SHIFT)
+#define CLOCK_CNT_HIGH_SHIFT 6
+#define CLOCK_CNT_HIGH_MASK (0x3f << CLOCK_CNT_HIGH_SHIFT)
+#define CLOCK_CNT_LOW_SHIFT 0
+#define CLOCK_CNT_LOW_MASK (0x3f << CLOCK_CNT_LOW_SHIFT)
+#define USER_DIN_EN_MS BIT(0)
+#define USER_CMP_MODE BIT(2)
+#define USER_UC_DOUT_SEL BIT(27)
+#define USER_UC_DIN_SEL BIT(28)
+#define USER_UC_MASK ((BIT(5) - 1) << 27)
+#define USER1_BN_UC_DOUT_SHIFT 17
+#define USER1_BN_UC_DOUT_MASK (0xff << 16)
+#define USER1_BN_UC_DIN_SHIFT 8
+#define USER1_BN_UC_DIN_MASK (0xff << 8)
+#define USER4_CS_ACT BIT(30)
+#define SLAVE_TRST_DONE BIT(4)
+#define SLAVE_OP_MODE BIT(30)
+#define SLAVE_SW_RST BIT(31)
+
+#define SPIFC_BUFFER_SIZE 64
+
+/**
+ * struct meson_spifc
+ * @master: the SPI master
+ * @regmap: regmap for device registers
+ * @clk: input clock of the built-in baud rate generator
+ * @device: the device structure
+ */
+struct meson_spifc {
+ struct spi_master *master;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct device *dev;
+};
+
+static struct regmap_config spifc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_MAX,
+};
+
+/**
+ * meson_spifc_wait_ready() - wait for the current operation to terminate
+ * @spifc: the Meson SPI device
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_wait_ready(struct meson_spifc *spifc)
+{
+ unsigned long deadline = jiffies + msecs_to_jiffies(5);
+ u32 data;
+
+ do {
+ regmap_read(spifc->regmap, REG_SLAVE, &data);
+ if (data & SLAVE_TRST_DONE)
+ return 0;
+ cond_resched();
+ } while (!time_after(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * meson_spifc_drain_buffer() - copy data from device buffer to memory
+ * @spifc: the Meson SPI device
+ * @buf: the destination buffer
+ * @len: number of bytes to copy
+ */
+static void meson_spifc_drain_buffer(struct meson_spifc *spifc, u8 *buf,
+ int len)
+{
+ u32 data;
+ int i = 0;
+
+ while (i < len) {
+ regmap_read(spifc->regmap, REG_C0 + i, &data);
+
+ if (len - i >= 4) {
+ *((u32 *)buf) = data;
+ buf += 4;
+ } else {
+ memcpy(buf, &data, len - i);
+ break;
+ }
+ i += 4;
+ }
+}
+
+/**
+ * meson_spifc_fill_buffer() - copy data from memory to device buffer
+ * @spifc: the Meson SPI device
+ * @buf: the source buffer
+ * @len: number of bytes to copy
+ */
+static void meson_spifc_fill_buffer(struct meson_spifc *spifc, const u8 *buf,
+ int len)
+{
+ u32 data;
+ int i = 0;
+
+ while (i < len) {
+ if (len - i >= 4)
+ data = *(u32 *)buf;
+ else
+ memcpy(&data, buf, len - i);
+
+ regmap_write(spifc->regmap, REG_C0 + i, data);
+
+ buf += 4;
+ i += 4;
+ }
+}
+
+/**
+ * meson_spifc_setup_speed() - program the clock divider
+ * @spifc: the Meson SPI device
+ * @speed: desired speed in Hz
+ */
+static void meson_spifc_setup_speed(struct meson_spifc *spifc, u32 speed)
+{
+ unsigned long parent, value;
+ int n;
+
+ parent = clk_get_rate(spifc->clk);
+ n = max_t(int, parent / speed - 1, 1);
+
+ dev_dbg(spifc->dev, "parent %lu, speed %u, n %d\n", parent,
+ speed, n);
+
+ value = (n << CLOCK_DIV_SHIFT) & CLOCK_DIV_MASK;
+ value |= (n << CLOCK_CNT_LOW_SHIFT) & CLOCK_CNT_LOW_MASK;
+ value |= (((n + 1) / 2 - 1) << CLOCK_CNT_HIGH_SHIFT) &
+ CLOCK_CNT_HIGH_MASK;
+
+ regmap_write(spifc->regmap, REG_CLOCK, value);
+}
+
+/**
+ * meson_spifc_txrx() - transfer a chunk of data
+ * @spifc: the Meson SPI device
+ * @xfer: the current SPI transfer
+ * @offset: offset of the data to transfer
+ * @len: length of the data to transfer
+ * @last_xfer: whether this is the last transfer of the message
+ * @last_chunk: whether this is the last chunk of the transfer
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_txrx(struct meson_spifc *spifc,
+ struct spi_transfer *xfer,
+ int offset, int len, bool last_xfer,
+ bool last_chunk)
+{
+ bool keep_cs = true;
+ int ret;
+
+ if (xfer->tx_buf)
+ meson_spifc_fill_buffer(spifc, xfer->tx_buf + offset, len);
+
+ /* enable DOUT stage */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_UC_MASK,
+ USER_UC_DOUT_SEL);
+ regmap_write(spifc->regmap, REG_USER1,
+ (8 * len - 1) << USER1_BN_UC_DOUT_SHIFT);
+
+ /* enable data input during DOUT */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_DIN_EN_MS,
+ USER_DIN_EN_MS);
+
+ if (last_chunk) {
+ if (last_xfer)
+ keep_cs = xfer->cs_change;
+ else
+ keep_cs = !xfer->cs_change;
+ }
+
+ regmap_update_bits(spifc->regmap, REG_USER4, USER4_CS_ACT,
+ keep_cs ? USER4_CS_ACT : 0);
+
+ /* clear transition done bit */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_TRST_DONE, 0);
+ /* start transfer */
+ regmap_update_bits(spifc->regmap, REG_CMD, CMD_USER, CMD_USER);
+
+ ret = meson_spifc_wait_ready(spifc);
+
+ if (!ret && xfer->rx_buf)
+ meson_spifc_drain_buffer(spifc, xfer->rx_buf + offset, len);
+
+ return ret;
+}
+
+/**
+ * meson_spifc_transfer_one() - perform a single transfer
+ * @master: the SPI master
+ * @spi: the SPI device
+ * @xfer: the current SPI transfer
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int len, done = 0, ret = 0;
+
+ meson_spifc_setup_speed(spifc, xfer->speed_hz);
+
+ regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB, 0);
+
+ while (done < xfer->len && !ret) {
+ len = min_t(int, xfer->len - done, SPIFC_BUFFER_SIZE);
+ ret = meson_spifc_txrx(spifc, xfer, done, len,
+ spi_transfer_is_last(master, xfer),
+ done + len >= xfer->len);
+ done += len;
+ }
+
+ regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB,
+ CTRL_ENABLE_AHB);
+
+ return ret;
+}
+
+/**
+ * meson_spifc_hw_init() - reset and initialize the SPI controller
+ * @spifc: the Meson SPI device
+ */
+static void meson_spifc_hw_init(struct meson_spifc *spifc)
+{
+ /* reset device */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_SW_RST,
+ SLAVE_SW_RST);
+ /* disable compatible mode */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_CMP_MODE, 0);
+ /* set master mode */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_OP_MODE, 0);
+}
+
+static int meson_spifc_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct meson_spifc *spifc;
+ struct resource *res;
+ void __iomem *base;
+ unsigned int rate;
+ int ret = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct meson_spifc));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ spifc = spi_master_get_devdata(master);
+ spifc->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(spifc->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_err;
+ }
+
+ spifc->regmap = devm_regmap_init_mmio(spifc->dev, base,
+ &spifc_regmap_config);
+ if (IS_ERR(spifc->regmap)) {
+ ret = PTR_ERR(spifc->regmap);
+ goto out_err;
+ }
+
+ spifc->clk = devm_clk_get(spifc->dev, NULL);
+ if (IS_ERR(spifc->clk)) {
+ dev_err(spifc->dev, "missing clock\n");
+ ret = PTR_ERR(spifc->clk);
+ goto out_err;
+ }
+
+ ret = clk_prepare_enable(spifc->clk);
+ if (ret) {
+ dev_err(spifc->dev, "can't prepare clock\n");
+ goto out_err;
+ }
+
+ rate = clk_get_rate(spifc->clk);
+
+ master->num_chipselect = 1;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+ master->transfer_one = meson_spifc_transfer_one;
+ master->min_speed_hz = rate >> 6;
+ master->max_speed_hz = rate >> 1;
+
+ meson_spifc_hw_init(spifc);
+
+ pm_runtime_set_active(spifc->dev);
+ pm_runtime_enable(spifc->dev);
+
+ ret = devm_spi_register_master(spifc->dev, master);
+ if (ret) {
+ dev_err(spifc->dev, "failed to register spi master\n");
+ goto out_clk;
+ }
+
+ return 0;
+out_clk:
+ clk_disable_unprepare(spifc->clk);
+out_err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int meson_spifc_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spifc->clk);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int meson_spifc_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(spifc->clk);
+
+ return 0;
+}
+
+static int meson_spifc_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = clk_prepare_enable(spifc->clk);
+ if (ret)
+ return ret;
+ }
+
+ meson_spifc_hw_init(spifc);
+
+ ret = spi_master_resume(master);
+ if (ret)
+ clk_disable_unprepare(spifc->clk);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int meson_spifc_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spifc->clk);
+
+ return 0;
+}
+
+static int meson_spifc_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ return clk_prepare_enable(spifc->clk);
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops meson_spifc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
+ SET_RUNTIME_PM_OPS(meson_spifc_runtime_suspend,
+ meson_spifc_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id meson_spifc_dt_match[] = {
+ { .compatible = "amlogic,meson6-spifc", },
+ { },
+};
+
+static struct platform_driver meson_spifc_driver = {
+ .probe = meson_spifc_probe,
+ .remove = meson_spifc_remove,
+ .driver = {
+ .name = "meson-spifc",
+ .of_match_table = of_match_ptr(meson_spifc_dt_match),
+ .pm = &meson_spifc_pm_ops,
+ },
+};
+
+module_platform_driver(meson_spifc_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson SPIFC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 577d23a12763..ecae0d4e2945 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -600,7 +600,6 @@ static struct platform_driver mpc512x_psc_spi_of_driver = {
.remove = mpc512x_psc_spi_of_remove,
.driver = {
.name = "mpc512x-psc-spi",
- .owner = THIS_MODULE,
.of_match_table = mpc512x_psc_spi_of_match,
},
};
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index de532aa11d34..72d11ebefb28 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -508,7 +508,6 @@ static struct platform_driver mpc52xx_psc_spi_of_driver = {
.remove = mpc52xx_psc_spi_of_remove,
.driver = {
.name = "mpc52xx-psc-spi",
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_psc_spi_of_match,
},
};
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index b07db4b62d80..c36002110c30 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -543,7 +543,6 @@ MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
static struct platform_driver mpc52xx_spi_of_driver = {
.driver = {
.name = "mpc52xx-spi",
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_spi_match,
},
.probe = mpc52xx_spi_probe,
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 51460878af04..4045a1e580e1 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -182,7 +182,6 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
int min, ret;
u32 ctrl0;
struct page *vm_page;
- void *sg_buf;
struct {
u32 pio[4];
struct scatterlist sg;
@@ -232,13 +231,14 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
ret = -ENOMEM;
goto err_vmalloc;
}
- sg_buf = page_address(vm_page) +
- ((size_t)buf & ~PAGE_MASK);
+
+ sg_init_table(&dma_xfer[sg_count].sg, 1);
+ sg_set_page(&dma_xfer[sg_count].sg, vm_page,
+ min, offset_in_page(buf));
} else {
- sg_buf = buf;
+ sg_init_one(&dma_xfer[sg_count].sg, buf, min);
}
- sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -511,7 +511,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
init_completion(&spi->c);
ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
- DRIVER_NAME, ssp);
+ dev_name(&pdev->dev), ssp);
if (ret)
goto out_master_free;
@@ -572,7 +572,6 @@ static struct platform_driver mxs_spi_driver = {
.remove = mxs_spi_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = mxs_spi_dt_ids,
},
};
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 73e91d5a43df..f51a058e7678 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -423,7 +423,6 @@ static struct platform_driver nuc900_spi_driver = {
.remove = nuc900_spi_remove,
.driver = {
.name = "nuc900-spi",
- .owner = THIS_MODULE,
},
};
module_platform_driver(nuc900_spi_driver);
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 8998d11c7238..76656a77ec12 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -351,7 +351,6 @@ static struct platform_driver tiny_spi_driver = {
.remove = tiny_spi_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = NULL,
.of_match_table = of_match_ptr(tiny_spi_match),
},
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index c5e2f718eebd..b283d537d16a 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -247,7 +247,6 @@ MODULE_DEVICE_TABLE(of, octeon_spi_match);
static struct platform_driver octeon_spi_driver = {
.driver = {
.name = "spi-octeon",
- .owner = THIS_MODULE,
.of_match_table = octeon_spi_match,
},
.probe = octeon_spi_probe,
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index fb522765ce5a..79399ae9c84c 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -453,7 +453,6 @@ err:
static struct platform_driver omap1_spi100k_driver = {
.driver = {
.name = "omap1_spi100k",
- .owner = THIS_MODULE,
},
.probe = omap1_spi100k_probe,
};
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 8bca90a19dd1..daf1ada5cd11 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -527,7 +527,6 @@ MODULE_ALIAS("platform:omap_uwire");
static struct platform_driver uwire_driver = {
.driver = {
.name = "omap_uwire",
- .owner = THIS_MODULE,
},
.probe = uwire_probe,
.remove = uwire_remove,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 352eed7463ac..3bc3cbabbbc0 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1521,7 +1521,6 @@ static const struct dev_pm_ops omap2_mcspi_pm_ops = {
static struct platform_driver omap2_mcspi_driver = {
.driver = {
.name = "omap2_mcspi",
- .owner = THIS_MODULE,
.pm = &omap2_mcspi_pm_ops,
.of_match_table = omap_mcspi_of_match,
},
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 9c217abeb65d..3dec9e0b99b8 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -551,7 +551,6 @@ static const struct dev_pm_ops orion_spi_pm_ops = {
static struct platform_driver orion_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &orion_spi_pm_ops,
.of_match_table = of_match_ptr(orion_spi_of_match_table),
},
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 80b8408ac3e3..54fb984a3e17 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -575,7 +575,6 @@ static struct platform_driver spi_ppc4xx_of_driver = {
.remove = spi_ppc4xx_of_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = spi_ppc4xx_of_match,
},
};
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 6beee8ce2d68..fa7399e84bbb 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -19,6 +19,7 @@ enum {
PORT_BSW0,
PORT_BSW1,
PORT_BSW2,
+ PORT_QUARK_X1000,
};
struct pxa_spi_info {
@@ -92,6 +93,12 @@ static struct pxa_spi_info spi_info_configs[] = {
.tx_param = &bsw2_tx_param,
.rx_param = &bsw2_rx_param,
},
+ [PORT_QUARK_X1000] = {
+ .type = QUARK_X1000_SSP,
+ .port_id = -1,
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ },
};
static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
@@ -191,6 +198,7 @@ static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
{ PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0935), PORT_QUARK_X1000 },
{ PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT },
{ PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
{ PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 429ff7eb1414..05c623cfb078 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -63,10 +63,64 @@ MODULE_ALIAS("platform:pxa2xx-spi");
| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF \
+ | QUARK_X1000_SSCR1_EFWR \
+ | QUARK_X1000_SSCR1_RFT \
+ | QUARK_X1000_SSCR1_TFT \
+ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
#define LPSS_RX_THRESH_DFLT 64
#define LPSS_TX_LOTHRESH_DFLT 160
#define LPSS_TX_HITHRESH_DFLT 224
+struct quark_spi_rate {
+ u32 bitrate;
+ u32 dds_clk_rate;
+ u32 clk_div;
+};
+
+/*
+ * 'rate', 'dds', 'clk_div' lookup table, which is defined in
+ * the Quark SPI datasheet.
+ */
+static const struct quark_spi_rate quark_spi_rate_table[] = {
+/* bitrate, dds_clk_rate, clk_div */
+ {50000000, 0x800000, 0},
+ {40000000, 0x666666, 0},
+ {25000000, 0x400000, 0},
+ {20000000, 0x666666, 1},
+ {16667000, 0x800000, 2},
+ {13333000, 0x666666, 2},
+ {12500000, 0x200000, 0},
+ {10000000, 0x800000, 4},
+ {8000000, 0x666666, 4},
+ {6250000, 0x400000, 3},
+ {5000000, 0x400000, 4},
+ {4000000, 0x666666, 9},
+ {3125000, 0x80000, 0},
+ {2500000, 0x400000, 9},
+ {2000000, 0x666666, 19},
+ {1563000, 0x40000, 0},
+ {1250000, 0x200000, 9},
+ {1000000, 0x400000, 24},
+ {800000, 0x666666, 49},
+ {781250, 0x20000, 0},
+ {625000, 0x200000, 19},
+ {500000, 0x400000, 49},
+ {400000, 0x666666, 99},
+ {390625, 0x10000, 0},
+ {250000, 0x400000, 99},
+ {200000, 0x666666, 199},
+ {195313, 0x8000, 0},
+ {125000, 0x100000, 49},
+ {100000, 0x200000, 124},
+ {50000, 0x100000, 124},
+ {25000, 0x80000, 124},
+ {10016, 0x20000, 77},
+ {5040, 0x20000, 154},
+ {1002, 0x8000, 194},
+};
+
/* Offset from drv_data->lpss_base */
#define GENERAL_REG 0x08
#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
@@ -80,6 +134,96 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
return drv_data->ssp_type == LPSS_SSP;
}
+static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == QUARK_X1000_SSP;
+}
+
+static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return QUARK_X1000_SSCR1_CHANGE_MASK;
+ default:
+ return SSCR1_CHANGE_MASK;
+ }
+}
+
+static u32
+pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return RX_THRESH_QUARK_X1000_DFLT;
+ default:
+ return RX_THRESH_DFLT;
+ }
+}
+
+static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
+{
+ void __iomem *reg = drv_data->ioaddr;
+ u32 mask;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ mask = QUARK_X1000_SSSR_TFL_MASK;
+ break;
+ default:
+ mask = SSSR_TFL_MASK;
+ break;
+ }
+
+ return (read_SSSR(reg) & mask) == mask;
+}
+
+static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
+ u32 *sccr1_reg)
+{
+ u32 mask;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ mask = QUARK_X1000_SSCR1_RFT;
+ break;
+ default:
+ mask = SSCR1_RFT;
+ break;
+ }
+ *sccr1_reg &= ~mask;
+}
+
+static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
+ u32 *sccr1_reg, u32 threshold)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ *sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
+ break;
+ default:
+ *sccr1_reg |= SSCR1_RxTresh(threshold);
+ break;
+ }
+}
+
+static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
+ u32 clk_div, u8 bits)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return clk_div
+ | QUARK_X1000_SSCR0_Motorola
+ | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
+ | SSCR0_SSE;
+ default:
+ return clk_div
+ | SSCR0_Motorola
+ | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
+ | SSCR0_SSE
+ | (bits > 16 ? SSCR0_EDSS : 0);
+ }
+}
+
/*
* Read and write LPSS SSP private registers. Caller must first check that
* is_lpss_ssp() returns true before these can be called.
@@ -234,7 +378,7 @@ static int null_writer(struct driver_data *drv_data)
void __iomem *reg = drv_data->ioaddr;
u8 n_bytes = drv_data->n_bytes;
- if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+ if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
@@ -262,7 +406,7 @@ static int u8_writer(struct driver_data *drv_data)
{
void __iomem *reg = drv_data->ioaddr;
- if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+ if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
@@ -289,7 +433,7 @@ static int u16_writer(struct driver_data *drv_data)
{
void __iomem *reg = drv_data->ioaddr;
- if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+ if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
@@ -316,7 +460,7 @@ static int u32_writer(struct driver_data *drv_data)
{
void __iomem *reg = drv_data->ioaddr;
- if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+ if (pxa2xx_spi_txfifo_full(drv_data)
|| (drv_data->tx == drv_data->tx_end))
return 0;
@@ -508,8 +652,9 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
* remaining RX bytes.
*/
if (pxa25x_ssp_comp(drv_data)) {
+ u32 rx_thre;
- sccr1_reg &= ~SSCR1_RFT;
+ pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
bytes_left = drv_data->rx_end - drv_data->rx;
switch (drv_data->n_bytes) {
@@ -519,10 +664,11 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
bytes_left >>= 1;
}
- if (bytes_left > RX_THRESH_DFLT)
- bytes_left = RX_THRESH_DFLT;
+ rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
+ if (rx_thre > bytes_left)
+ rx_thre = bytes_left;
- sccr1_reg |= SSCR1_RxTresh(bytes_left);
+ pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
}
write_SSCR1(sccr1_reg, reg);
}
@@ -585,6 +731,28 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
return drv_data->transfer_handler(drv_data);
}
+/*
+ * The Quark SPI data sheet gives a table, and for the given 'rate',
+ * the 'dds' and 'clk_div' can be found in the table.
+ */
+static u32 quark_x1000_set_clk_regvals(u32 rate, u32 *dds, u32 *clk_div)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(quark_spi_rate_table); i++) {
+ if (rate >= quark_spi_rate_table[i].bitrate) {
+ *dds = quark_spi_rate_table[i].dds_clk_rate;
+ *clk_div = quark_spi_rate_table[i].clk_div;
+ return quark_spi_rate_table[i].bitrate;
+ }
+ }
+
+ *dds = quark_spi_rate_table[i-1].dds_clk_rate;
+ *clk_div = quark_spi_rate_table[i-1].clk_div;
+
+ return quark_spi_rate_table[i-1].bitrate;
+}
+
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
unsigned long ssp_clk = drv_data->max_clk_rate;
@@ -598,6 +766,20 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
return ((ssp_clk / rate - 1) & 0xfff) << 8;
}
+static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
+ struct chip_data *chip, int rate)
+{
+ u32 clk_div;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ quark_x1000_set_clk_regvals(rate, &chip->dds_rate, &clk_div);
+ return clk_div << 8;
+ default:
+ return ssp_get_clk_div(drv_data, rate);
+ }
+}
+
static void pump_transfers(unsigned long data)
{
struct driver_data *drv_data = (struct driver_data *)data;
@@ -613,6 +795,7 @@ static void pump_transfers(unsigned long data)
u32 cr1;
u32 dma_thresh = drv_data->cur_chip->dma_threshold;
u32 dma_burst = drv_data->cur_chip->dma_burst_size;
+ u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
/* Get current state information */
message = drv_data->cur_msg;
@@ -699,7 +882,7 @@ static void pump_transfers(unsigned long data)
if (transfer->bits_per_word)
bits = transfer->bits_per_word;
- clk_div = ssp_get_clk_div(drv_data, speed);
+ clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed);
if (bits <= 8) {
drv_data->n_bytes = 1;
@@ -731,11 +914,7 @@ static void pump_transfers(unsigned long data)
"pump_transfers: DMA burst size reduced to match bits_per_word\n");
}
- cr0 = clk_div
- | SSCR0_Motorola
- | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
- | SSCR0_SSE
- | (bits > 16 ? SSCR0_EDSS : 0);
+ cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
}
message->state = RUNNING_STATE;
@@ -771,17 +950,20 @@ static void pump_transfers(unsigned long data)
write_SSITF(chip->lpss_tx_threshold, reg);
}
+ if (is_quark_x1000_ssp(drv_data) &&
+ (read_DDS_RATE(reg) != chip->dds_rate))
+ write_DDS_RATE(chip->dds_rate, reg);
+
/* see if we need to reload the config registers */
- if ((read_SSCR0(reg) != cr0)
- || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
- (cr1 & SSCR1_CHANGE_MASK)) {
+ if ((read_SSCR0(reg) != cr0) ||
+ (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
write_SSCR0(cr0 & ~SSCR0_SSE, reg);
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(chip->timeout, reg);
/* first set CR1 without interrupt and service enables */
- write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
+ write_SSCR1(cr1 & change_mask, reg);
/* restart the SSP */
write_SSCR0(cr0, reg);
@@ -875,14 +1057,22 @@ static int setup(struct spi_device *spi)
unsigned int clk_div;
uint tx_thres, tx_hi_thres, rx_thres;
- if (is_lpss_ssp(drv_data)) {
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ tx_thres = TX_THRESH_QUARK_X1000_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_QUARK_X1000_DFLT;
+ break;
+ case LPSS_SSP:
tx_thres = LPSS_TX_LOTHRESH_DFLT;
tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
rx_thres = LPSS_RX_THRESH_DFLT;
- } else {
+ break;
+ default:
tx_thres = TX_THRESH_DFLT;
tx_hi_thres = 0;
rx_thres = RX_THRESH_DFLT;
+ break;
}
/* Only alloc on first setup */
@@ -935,9 +1125,6 @@ static int setup(struct spi_device *spi)
chip->enable_dma = drv_data->master_info->enable_dma;
}
- chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
- (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
-
chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
| SSITF_TxHiThresh(tx_hi_thres);
@@ -956,15 +1143,24 @@ static int setup(struct spi_device *spi)
}
}
- clk_div = ssp_get_clk_div(drv_data, spi->max_speed_hz);
+ clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz);
chip->speed_hz = spi->max_speed_hz;
- chip->cr0 = clk_div
- | SSCR0_Motorola
- | SSCR0_DataSize(spi->bits_per_word > 16 ?
- spi->bits_per_word - 16 : spi->bits_per_word)
- | SSCR0_SSE
- | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+ chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div,
+ spi->bits_per_word);
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
+ & QUARK_X1000_SSCR1_RFT)
+ | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
+ & QUARK_X1000_SSCR1_TFT);
+ break;
+ default:
+ chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
+ (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
+ break;
+ }
+
chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
@@ -993,7 +1189,8 @@ static int setup(struct spi_device *spi)
chip->read = u16_reader;
chip->write = u16_writer;
} else if (spi->bits_per_word <= 32) {
- chip->cr0 |= SSCR0_EDSS;
+ if (!is_quark_x1000_ssp(drv_data))
+ chip->cr0 |= SSCR0_EDSS;
chip->n_bytes = 4;
chip->read = u32_reader;
chip->write = u32_writer;
@@ -1144,7 +1341,15 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->ioaddr = ssp->mmio_base;
drv_data->ssdr_physical = ssp->phys_base + SSDR;
if (pxa25x_ssp_comp(drv_data)) {
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ break;
+ default:
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ break;
+ }
+
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
drv_data->dma_cr1 = 0;
drv_data->clear_sr = SSSR_ROR;
@@ -1182,16 +1387,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Load default SSP configuration */
write_SSCR0(0, drv_data->ioaddr);
- write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
- SSCR1_TxTresh(TX_THRESH_DFLT),
- drv_data->ioaddr);
- write_SSCR0(SSCR0_SCR(2)
- | SSCR0_Motorola
- | SSCR0_DataSize(8),
- drv_data->ioaddr);
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ write_SSCR1(QUARK_X1000_SSCR1_RxTresh(
+ RX_THRESH_QUARK_X1000_DFLT) |
+ QUARK_X1000_SSCR1_TxTresh(
+ TX_THRESH_QUARK_X1000_DFLT),
+ drv_data->ioaddr);
+
+ /* using the Motorola SPI protocol and use 8 bit frame */
+ write_SSCR0(QUARK_X1000_SSCR0_Motorola
+ | QUARK_X1000_SSCR0_DataSize(8),
+ drv_data->ioaddr);
+ break;
+ default:
+ write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
+ SSCR1_TxTresh(TX_THRESH_DFLT),
+ drv_data->ioaddr);
+ write_SSCR0(SSCR0_SCR(2)
+ | SSCR0_Motorola
+ | SSCR0_DataSize(8),
+ drv_data->ioaddr);
+ break;
+ }
+
if (!pxa25x_ssp_comp(drv_data))
write_SSTO(0, drv_data->ioaddr);
- write_SSPSP(0, drv_data->ioaddr);
+
+ if (!is_quark_x1000_ssp(drv_data))
+ write_SSPSP(0, drv_data->ioaddr);
lpss_ssp_setup(drv_data);
@@ -1334,7 +1558,6 @@ static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
static struct platform_driver driver = {
.driver = {
.name = "pxa2xx-spi",
- .owner = THIS_MODULE,
.pm = &pxa2xx_spi_pm_ops,
.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
},
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 5adc2a11c7bc..6bec59c90cd4 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -93,6 +93,7 @@ struct driver_data {
struct chip_data {
u32 cr0;
u32 cr1;
+ u32 dds_rate;
u32 psp;
u32 timeout;
u8 n_bytes;
@@ -126,6 +127,7 @@ DEFINE_SSP_REG(SSCR1, 0x04)
DEFINE_SSP_REG(SSSR, 0x08)
DEFINE_SSP_REG(SSITR, 0x0c)
DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(DDS_RATE, 0x28) /* DDS Clock Rate */
DEFINE_SSP_REG(SSTO, 0x28)
DEFINE_SSP_REG(SSPSP, 0x2c)
DEFINE_SSP_REG(SSITF, SSITF)
@@ -141,18 +143,22 @@ DEFINE_SSP_REG(SSIRF, SSIRF)
static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
{
- if (drv_data->ssp_type == PXA25x_SSP)
+ switch (drv_data->ssp_type) {
+ case PXA25x_SSP:
+ case CE4100_SSP:
+ case QUARK_X1000_SSP:
return 1;
- if (drv_data->ssp_type == CE4100_SSP)
- return 1;
- return 0;
+ default:
+ return 0;
+ }
}
static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
{
void __iomem *reg = drv_data->ioaddr;
- if (drv_data->ssp_type == CE4100_SSP)
+ if (drv_data->ssp_type == CE4100_SSP ||
+ drv_data->ssp_type == QUARK_X1000_SSP)
val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
write_SSSR(val, reg);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 3d0ab0a00466..e7fb5a0d2e8d 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -756,7 +756,6 @@ static const struct dev_pm_ops spi_qup_dev_pm_ops = {
static struct platform_driver spi_qup_driver = {
.driver = {
.name = "spi_qup",
- .owner = THIS_MODULE,
.pm = &spi_qup_dev_pm_ops,
.of_match_table = spi_qup_dt_match,
},
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 29ab9a106a5f..daabbabd26b0 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -749,8 +749,6 @@ static int rockchip_spi_remove(struct platform_device *pdev)
if (rs->dma_rx.ch)
dma_release_channel(rs->dma_rx.ch);
- spi_master_put(master);
-
return 0;
}
@@ -848,7 +846,6 @@ MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
static struct platform_driver rockchip_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &rockchip_spi_pm,
.of_match_table = of_match_ptr(rockchip_spi_dt_match),
},
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 54bb0faec155..2071f788c6fb 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1211,7 +1211,6 @@ static struct platform_driver rspi_driver = {
.id_table = spi_driver_ids,
.driver = {
.name = "renesas_spi",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rspi_of_match),
},
};
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index e713737d784f..f747ca269986 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -663,7 +663,6 @@ static struct platform_driver s3c24xx_spi_driver = {
.remove = s3c24xx_spi_remove,
.driver = {
.name = "s3c2410-spi",
- .owner = THIS_MODULE,
.pm = S3C24XX_SPI_PMOPS,
},
};
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index ec74e01e8ee1..37b19836f5cb 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -33,8 +33,9 @@
#include <linux/platform_data/spi-s3c64xx.h>
-#define MAX_SPI_PORTS 3
+#define MAX_SPI_PORTS 6
#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
+#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
/* Registers and bit-fields */
@@ -78,6 +79,7 @@
#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
+#define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4)
#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
@@ -344,16 +346,8 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
spi->dma_tx = sdd->tx_dma.ch;
}
- ret = pm_runtime_get_sync(&sdd->pdev->dev);
- if (ret < 0) {
- dev_err(dev, "Failed to enable device: %d\n", ret);
- goto out_tx;
- }
-
return 0;
-out_tx:
- dma_release_channel(sdd->tx_dma.ch);
out_rx:
dma_release_channel(sdd->rx_dma.ch);
out:
@@ -370,7 +364,6 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
dma_release_channel(sdd->tx_dma.ch);
}
- pm_runtime_put(&sdd->pdev->dev);
return 0;
}
@@ -717,7 +710,12 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
enable_datapath(sdd, spi, xfer, use_dma);
/* Start the signals */
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ else
+ writel(readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL)
+ | S3C64XX_SPI_SLAVE_AUTO | S3C64XX_SPI_SLAVE_NSC_CNT_2,
+ sdd->regs + S3C64XX_SPI_SLAVE_SEL);
spin_unlock_irqrestore(&sdd->lock, flags);
@@ -866,13 +864,15 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
pm_runtime_put(&sdd->pdev->dev);
- writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
return 0;
setup_exit:
pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
- writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
if (gpio_is_valid(spi->cs_gpio))
gpio_free(spi->cs_gpio);
@@ -946,7 +946,8 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
sdd->cur_speed = 0;
- writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
/* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN);
@@ -1341,6 +1342,15 @@ static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
.quirks = S3C64XX_SPI_QUIRK_POLL,
};
+static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
static struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
@@ -1374,6 +1384,9 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,exynos5440-spi",
.data = (void *)&exynos5440_spi_port_config,
},
+ { .compatible = "samsung,exynos7-spi",
+ .data = (void *)&exynos7_spi_port_config,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
@@ -1381,7 +1394,6 @@ MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
static struct platform_driver s3c64xx_spi_driver = {
.driver = {
.name = "s3c64xx-spi",
- .owner = THIS_MODULE,
.pm = &s3c64xx_spi_pm,
.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
},
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 94b5faed21e2..fc29233d0650 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -315,7 +315,6 @@ static struct platform_driver hspi_driver = {
.remove = hspi_remove,
.driver = {
.name = "sh-hspi",
- .owner = THIS_MODULE,
.of_match_table = hspi_of_match,
},
};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 3f365402fcc0..239be7cbe5a8 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1235,7 +1235,6 @@ static struct platform_driver sh_msiof_spi_drv = {
.id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sh_msiof_match),
},
};
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index b83dd733684c..a9beeeed812c 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -187,7 +187,6 @@ static struct platform_driver sh_sci_spi_drv = {
.remove = sh_sci_spi_remove,
.driver = {
.name = "spi_sh_sci",
- .owner = THIS_MODULE,
},
};
module_platform_driver(sh_sci_spi_drv);
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 8e171a76049f..1cfc906dd174 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -532,7 +532,6 @@ static struct platform_driver spi_sh_driver = {
.remove = spi_sh_remove,
.driver = {
.name = "sh_spi",
- .owner = THIS_MODULE,
},
};
module_platform_driver(spi_sh_driver);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index f63de781c729..d075191476f0 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -23,6 +23,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
+#include <linux/reset.h>
#define DRIVER_NAME "sirfsoc_spi"
@@ -134,6 +135,7 @@
ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
#define SIRFSOC_MAX_CMD_BYTES 4
+#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
struct sirfsoc_spi {
struct spi_bitbang bitbang;
@@ -629,9 +631,6 @@ static int spi_sirfsoc_setup(struct spi_device *spi)
{
struct sirfsoc_spi *sspi;
- if (!spi->max_speed_hz)
- return -EINVAL;
-
sspi = spi_master_get_devdata(spi->master);
if (spi->cs_gpio == -ENOENT)
@@ -649,6 +648,12 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
int irq;
int i, ret;
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "SPI reset failed!\n");
+ return ret;
+ }
+
master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI master\n");
@@ -683,6 +688,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
+ master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
/* request DMA channels */
@@ -820,7 +826,6 @@ MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
static struct platform_driver spi_sirfsoc_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &spi_sirfsoc_pm_ops,
.of_match_table = spi_sirfsoc_of_match,
},
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 85204c93f3d3..fbb0a4d74e91 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -464,7 +464,6 @@ static struct platform_driver sun4i_spi_driver = {
.remove = sun4i_spi_remove,
.driver = {
.name = "sun4i-spi",
- .owner = THIS_MODULE,
.of_match_table = sun4i_spi_match,
.pm = &sun4i_spi_pm_ops,
},
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index bd24093f4038..ac48f59705a8 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -470,7 +470,6 @@ static struct platform_driver sun6i_spi_driver = {
.remove = sun6i_spi_remove,
.driver = {
.name = "sun6i-spi",
- .owner = THIS_MODULE,
.of_match_table = sun6i_spi_match,
.pm = &sun6i_spi_pm_ops,
},
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 795bcbc0131b..73779cecc3bb 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1223,7 +1223,6 @@ static const struct dev_pm_ops tegra_spi_pm_ops = {
static struct platform_driver tegra_spi_driver = {
.driver = {
.name = "spi-tegra114",
- .owner = THIS_MODULE,
.pm = &tegra_spi_pm_ops,
.of_match_table = tegra_spi_of_match,
},
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index cd66fe7b78a9..b6558bb6f9df 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -608,7 +608,6 @@ static const struct dev_pm_ops slink_pm_ops = {
static struct platform_driver tegra_sflash_driver = {
.driver = {
.name = "spi-tegra-sflash",
- .owner = THIS_MODULE,
.pm = &slink_pm_ops,
.of_match_table = tegra_sflash_of_match,
},
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 0b9e32e9f493..85c91f58b42f 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1224,7 +1224,6 @@ static const struct dev_pm_ops slink_pm_ops = {
static struct platform_driver tegra_slink_driver = {
.driver = {
.name = "spi-tegra-slink",
- .owner = THIS_MODULE,
.pm = &slink_pm_ops,
.of_match_table = tegra_slink_of_match,
},
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 6c211d1910b0..6146c4cd6583 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -567,7 +567,6 @@ static struct platform_driver ti_qspi_driver = {
.remove = ti_qspi_remove,
.driver = {
.name = "ti-qspi",
- .owner = THIS_MODULE,
.pm = &ti_qspi_pm_ops,
.of_match_table = ti_qspi_match,
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 0a87ec39f619..be692ad50442 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1561,7 +1561,6 @@ static int pch_spi_pd_resume(struct platform_device *pd_dev)
static struct platform_driver pch_spi_pd_driver = {
.driver = {
.name = "pch-spi",
- .owner = THIS_MODULE,
},
.probe = pch_spi_pd_probe,
.remove = pch_spi_pd_remove,
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 2501a8373e89..9190124b6d90 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -402,8 +402,7 @@ exit_busy:
exit:
if (c->workqueue)
destroy_workqueue(c->workqueue);
- if (c->clk)
- clk_disable(c->clk);
+ clk_disable(c->clk);
spi_master_put(master);
return ret;
}
@@ -426,7 +425,6 @@ static struct platform_driver txx9spi_driver = {
.remove = txx9spi_remove,
.driver = {
.name = "spi_txx9",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 0dc5df5233a9..2e32ea2f194f 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -160,7 +160,6 @@ static struct platform_driver xtfpga_spi_driver = {
.remove = xtfpga_spi_remove,
.driver = {
.name = XTFPGA_SPI_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(xtfpga_spi_of_match),
},
};
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 50f20f243981..66a70e9bc743 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1001,7 +1001,7 @@ static int spi_init_queue(struct spi_master *master)
dev_name(&master->dev));
if (IS_ERR(master->kworker_task)) {
dev_err(&master->dev, "failed to create message pump task\n");
- return -ENOMEM;
+ return PTR_ERR(master->kworker_task);
}
init_kthread_work(&master->pump_messages, spi_pump_messages);
@@ -1220,6 +1220,121 @@ err_init_queue:
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
+static struct spi_device *
+of_register_spi_device(struct spi_master *master, struct device_node *nc)
+{
+ struct spi_device *spi;
+ int rc;
+ u32 value;
+
+ /* Alloc an spi_device */
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "spi_device alloc error for %s\n",
+ nc->full_name);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Select device driver */
+ rc = of_modalias_node(nc, spi->modalias,
+ sizeof(spi->modalias));
+ if (rc < 0) {
+ dev_err(&master->dev, "cannot find modalias for %s\n",
+ nc->full_name);
+ goto err_out;
+ }
+
+ /* Device address */
+ rc = of_property_read_u32(nc, "reg", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
+ nc->full_name, rc);
+ goto err_out;
+ }
+ spi->chip_select = value;
+
+ /* Mode (clock phase/polarity/etc.) */
+ if (of_find_property(nc, "spi-cpha", NULL))
+ spi->mode |= SPI_CPHA;
+ if (of_find_property(nc, "spi-cpol", NULL))
+ spi->mode |= SPI_CPOL;
+ if (of_find_property(nc, "spi-cs-high", NULL))
+ spi->mode |= SPI_CS_HIGH;
+ if (of_find_property(nc, "spi-3wire", NULL))
+ spi->mode |= SPI_3WIRE;
+ if (of_find_property(nc, "spi-lsb-first", NULL))
+ spi->mode |= SPI_LSB_FIRST;
+
+ /* Device DUAL/QUAD mode */
+ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
+ switch (value) {
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_TX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_TX_QUAD;
+ break;
+ default:
+ dev_warn(&master->dev,
+ "spi-tx-bus-width %d not supported\n",
+ value);
+ break;
+ }
+ }
+
+ if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
+ switch (value) {
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_warn(&master->dev,
+ "spi-rx-bus-width %d not supported\n",
+ value);
+ break;
+ }
+ }
+
+ /* Device speed */
+ rc = of_property_read_u32(nc, "spi-max-frequency", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
+ nc->full_name, rc);
+ goto err_out;
+ }
+ spi->max_speed_hz = value;
+
+ /* IRQ */
+ spi->irq = irq_of_parse_and_map(nc, 0);
+
+ /* Store a pointer to the node in the device structure */
+ of_node_get(nc);
+ spi->dev.of_node = nc;
+
+ /* Register the new device */
+ request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
+ rc = spi_add_device(spi);
+ if (rc) {
+ dev_err(&master->dev, "spi_device register error %s\n",
+ nc->full_name);
+ goto err_out;
+ }
+
+ return spi;
+
+err_out:
+ spi_dev_put(spi);
+ return ERR_PTR(rc);
+}
+
/**
* of_register_spi_devices() - Register child devices onto the SPI bus
* @master: Pointer to spi_master device
@@ -1231,116 +1346,15 @@ static void of_register_spi_devices(struct spi_master *master)
{
struct spi_device *spi;
struct device_node *nc;
- int rc;
- u32 value;
if (!master->dev.of_node)
return;
for_each_available_child_of_node(master->dev.of_node, nc) {
- /* Alloc an spi_device */
- spi = spi_alloc_device(master);
- if (!spi) {
- dev_err(&master->dev, "spi_device alloc error for %s\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
-
- /* Select device driver */
- if (of_modalias_node(nc, spi->modalias,
- sizeof(spi->modalias)) < 0) {
- dev_err(&master->dev, "cannot find modalias for %s\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
-
- /* Device address */
- rc = of_property_read_u32(nc, "reg", &value);
- if (rc) {
- dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
- nc->full_name, rc);
- spi_dev_put(spi);
- continue;
- }
- spi->chip_select = value;
-
- /* Mode (clock phase/polarity/etc.) */
- if (of_find_property(nc, "spi-cpha", NULL))
- spi->mode |= SPI_CPHA;
- if (of_find_property(nc, "spi-cpol", NULL))
- spi->mode |= SPI_CPOL;
- if (of_find_property(nc, "spi-cs-high", NULL))
- spi->mode |= SPI_CS_HIGH;
- if (of_find_property(nc, "spi-3wire", NULL))
- spi->mode |= SPI_3WIRE;
- if (of_find_property(nc, "spi-lsb-first", NULL))
- spi->mode |= SPI_LSB_FIRST;
-
- /* Device DUAL/QUAD mode */
- if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
- switch (value) {
- case 1:
- break;
- case 2:
- spi->mode |= SPI_TX_DUAL;
- break;
- case 4:
- spi->mode |= SPI_TX_QUAD;
- break;
- default:
- dev_warn(&master->dev,
- "spi-tx-bus-width %d not supported\n",
- value);
- break;
- }
- }
-
- if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
- switch (value) {
- case 1:
- break;
- case 2:
- spi->mode |= SPI_RX_DUAL;
- break;
- case 4:
- spi->mode |= SPI_RX_QUAD;
- break;
- default:
- dev_warn(&master->dev,
- "spi-rx-bus-width %d not supported\n",
- value);
- break;
- }
- }
-
- /* Device speed */
- rc = of_property_read_u32(nc, "spi-max-frequency", &value);
- if (rc) {
- dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
- nc->full_name, rc);
- spi_dev_put(spi);
- continue;
- }
- spi->max_speed_hz = value;
-
- /* IRQ */
- spi->irq = irq_of_parse_and_map(nc, 0);
-
- /* Store a pointer to the node in the device structure */
- of_node_get(nc);
- spi->dev.of_node = nc;
-
- /* Register the new device */
- request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
- rc = spi_add_device(spi);
- if (rc) {
- dev_err(&master->dev, "spi_device register error %s\n",
+ spi = of_register_spi_device(master, nc);
+ if (IS_ERR(spi))
+ dev_warn(&master->dev, "Failed to create SPI device for %s\n",
nc->full_name);
- spi_dev_put(spi);
- }
-
}
}
#else
@@ -2303,6 +2317,86 @@ EXPORT_SYMBOL_GPL(spi_write_then_read);
/*-------------------------------------------------------------------------*/
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int __spi_of_device_match(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+/* must call put_device() when done with returned spi_device device */
+static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
+{
+ struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
+ __spi_of_device_match);
+ return dev ? to_spi_device(dev) : NULL;
+}
+
+static int __spi_of_master_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/* the spi masters are not using spi_bus, so we find it with another way */
+static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device(&spi_master_class, NULL, node,
+ __spi_of_master_match);
+ if (!dev)
+ return NULL;
+
+ /* reference got in class_find_device */
+ return container_of(dev, struct spi_master, dev);
+}
+
+static int of_spi_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct spi_master *master;
+ struct spi_device *spi;
+
+ switch (of_reconfig_get_state_change(action, arg)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ master = of_find_spi_master_by_node(rd->dn->parent);
+ if (master == NULL)
+ return NOTIFY_OK; /* not for us */
+
+ spi = of_register_spi_device(master, rd->dn);
+ put_device(&master->dev);
+
+ if (IS_ERR(spi)) {
+ pr_err("%s: failed to create for '%s'\n",
+ __func__, rd->dn->full_name);
+ return notifier_from_errno(PTR_ERR(spi));
+ }
+ break;
+
+ case OF_RECONFIG_CHANGE_REMOVE:
+ /* find our device by node */
+ spi = of_find_spi_device_by_node(rd->dn);
+ if (spi == NULL)
+ return NOTIFY_OK; /* no? not meant for us */
+
+ /* unregister takes one ref away */
+ spi_unregister_device(spi);
+
+ /* and put the reference of the find */
+ put_device(&spi->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block spi_of_notifier = {
+ .notifier_call = of_spi_notify,
+};
+#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+extern struct notifier_block spi_of_notifier;
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
static int __init spi_init(void)
{
int status;
@@ -2320,6 +2414,10 @@ static int __init spi_init(void)
status = class_register(&spi_master_class);
if (status < 0)
goto err2;
+
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
+
return 0;
err2:
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index e50039fb1474..6941e04afb8c 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -87,6 +87,7 @@ struct spidev_data {
unsigned users;
u8 *tx_buffer;
u8 *rx_buffer;
+ u32 speed_hz;
};
static LIST_HEAD(device_list);
@@ -138,6 +139,7 @@ spidev_sync_write(struct spidev_data *spidev, size_t len)
struct spi_transfer t = {
.tx_buf = spidev->tx_buffer,
.len = len,
+ .speed_hz = spidev->speed_hz,
};
struct spi_message m;
@@ -152,6 +154,7 @@ spidev_sync_read(struct spidev_data *spidev, size_t len)
struct spi_transfer t = {
.rx_buf = spidev->rx_buffer,
.len = len,
+ .speed_hz = spidev->speed_hz,
};
struct spi_message m;
@@ -274,6 +277,8 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
+ if (!k_tmp->speed_hz)
+ k_tmp->speed_hz = spidev->speed_hz;
#ifdef VERBOSE
dev_dbg(&spidev->spi->dev,
" xfer len %zd %s%s%s%dbits %u usec %uHz\n",
@@ -377,7 +382,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
break;
case SPI_IOC_RD_MAX_SPEED_HZ:
- retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg);
+ retval = __put_user(spidev->speed_hz, (__u32 __user *)arg);
break;
/* write requests */
@@ -441,10 +446,11 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
spi->max_speed_hz = tmp;
retval = spi_setup(spi);
- if (retval < 0)
- spi->max_speed_hz = save;
+ if (retval >= 0)
+ spidev->speed_hz = tmp;
else
dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
+ spi->max_speed_hz = save;
}
break;
@@ -570,6 +576,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;
+ spidev->speed_hz = spidev->spi->max_speed_hz;
+
/* ... after we unbound from the underlying device? */
spin_lock_irq(&spidev->spi_lock);
dofree = (spidev->spi == NULL);
@@ -650,6 +658,8 @@ static int spidev_probe(struct spi_device *spi)
}
mutex_unlock(&device_list_lock);
+ spidev->speed_hz = spi->max_speed_hz;
+
if (status == 0)
spi_set_drvdata(spi, spidev);
else
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 246e03a18c94..20559ab3466d 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -768,7 +768,6 @@ static struct platform_driver spmi_pmic_arb_driver = {
.remove = spmi_pmic_arb_remove,
.driver = {
.name = "spmi_pmic_arb",
- .owner = THIS_MODULE,
.of_match_table = spmi_pmic_arb_match_table,
},
};
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 09077067b0c8..7b986f9f213f 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -15,6 +15,9 @@
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/time.h>
+#ifdef CONFIG_BCM47XX
+#include <bcm47xx_nvram.h>
+#endif
#include "ssb_private.h"
@@ -210,6 +213,7 @@ static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus = mcore->dev->bus;
+ struct ssb_sflash *sflash = &mcore->sflash;
struct ssb_pflash *pflash = &mcore->pflash;
/* When there is no chipcommon on the bus there is 4MB flash */
@@ -242,7 +246,15 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
}
ssb_pflash:
- if (pflash->present) {
+ if (sflash->present) {
+#ifdef CONFIG_BCM47XX
+ bcm47xx_nvram_init_from_mem(sflash->window, sflash->size);
+#endif
+ } else if (pflash->present) {
+#ifdef CONFIG_BCM47XX
+ bcm47xx_nvram_init_from_mem(pflash->window, pflash->window_size);
+#endif
+
ssb_pflash_data.width = pflash->buswidth;
ssb_pflash_resource.start = pflash->window;
ssb_pflash_resource.end = pflash->window + pflash->window_size;
diff --git a/drivers/ssb/pcihost_wrapper.c b/drivers/ssb/pcihost_wrapper.c
index 69161bbc4d0b..410215c16920 100644
--- a/drivers/ssb/pcihost_wrapper.c
+++ b/drivers/ssb/pcihost_wrapper.c
@@ -11,15 +11,17 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/ssb/ssb.h>
-#ifdef CONFIG_PM
-static int ssb_pcihost_suspend(struct pci_dev *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int ssb_pcihost_suspend(struct device *d)
{
+ struct pci_dev *dev = to_pci_dev(d);
struct ssb_bus *ssb = pci_get_drvdata(dev);
int err;
@@ -28,17 +30,23 @@ static int ssb_pcihost_suspend(struct pci_dev *dev, pm_message_t state)
return err;
pci_save_state(dev);
pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
+
+ /* if there is a wakeup enabled child device on ssb bus,
+ enable pci wakeup posibility. */
+ device_set_wakeup_enable(d, d->power.wakeup_path);
+
+ pci_prepare_to_sleep(dev);
return 0;
}
-static int ssb_pcihost_resume(struct pci_dev *dev)
+static int ssb_pcihost_resume(struct device *d)
{
+ struct pci_dev *dev = to_pci_dev(d);
struct ssb_bus *ssb = pci_get_drvdata(dev);
int err;
- pci_set_power_state(dev, PCI_D0);
+ pci_back_from_sleep(dev);
err = pci_enable_device(dev);
if (err)
return err;
@@ -49,10 +57,12 @@ static int ssb_pcihost_resume(struct pci_dev *dev)
return 0;
}
-#else /* CONFIG_PM */
-# define ssb_pcihost_suspend NULL
-# define ssb_pcihost_resume NULL
-#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops ssb_pcihost_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ssb_pcihost_suspend, ssb_pcihost_resume)
+};
+
+#endif /* CONFIG_PM_SLEEP */
static int ssb_pcihost_probe(struct pci_dev *dev,
const struct pci_device_id *id)
@@ -115,8 +125,9 @@ int ssb_pcihost_register(struct pci_driver *driver)
{
driver->probe = ssb_pcihost_probe;
driver->remove = ssb_pcihost_remove;
- driver->suspend = ssb_pcihost_suspend;
- driver->resume = ssb_pcihost_resume;
+#ifdef CONFIG_PM_SLEEP
+ driver->driver.pm = &ssb_pcihost_pm_ops;
+#endif
return pci_register_driver(driver);
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4690ae9a267f..815de379a130 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -62,8 +62,6 @@ source "drivers/staging/xgifb/Kconfig"
source "drivers/staging/emxx_udc/Kconfig"
-source "drivers/staging/bcm/Kconfig"
-
source "drivers/staging/ft1000/Kconfig"
source "drivers/staging/speakup/Kconfig"
@@ -86,8 +84,6 @@ source "drivers/staging/gdm72xx/Kconfig"
source "drivers/staging/gdm724x/Kconfig"
-source "drivers/staging/imx-drm/Kconfig"
-
source "drivers/staging/fwserial/Kconfig"
source "drivers/staging/goldfish/Kconfig"
@@ -108,4 +104,6 @@ source "drivers/staging/skein/Kconfig"
source "drivers/staging/unisys/Kconfig"
+source "drivers/staging/clocking-wizard/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index c780a0e70e15..33c640b49566 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
-obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
@@ -36,7 +35,6 @@ obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
-obj-$(CONFIG_DRM_IMX) += imx-drm/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_LUSTRE_FS) += lustre/
@@ -46,3 +44,4 @@ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_CRYPTO_SKEIN) += skein/
obj-$(CONFIG_UNISYSSPAR) += unisys/
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 7a0e28852965..7e012f37792b 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -1,37 +1,7 @@
menu "Android"
-config ANDROID
- bool "Android Drivers"
- ---help---
- Enable support for various drivers needed on the Android platform
-
if ANDROID
-config ANDROID_BINDER_IPC
- bool "Android Binder IPC Driver"
- depends on MMU
- default n
- ---help---
- Binder is used in Android for both communication between processes,
- and remote method invocation.
-
- This means one Android process can call a method/routine in another
- Android process, using Binder to identify, invoke and pass arguments
- between said processes.
-
-config ANDROID_BINDER_IPC_32BIT
- bool
- depends on !64BIT && ANDROID_BINDER_IPC
- default y
- ---help---
- The Binder API has been changed to support both 32 and 64bit
- applications in a mixed environment.
-
- Enable this to support an old 32-bit Android user-space (v4.4 and
- earlier).
-
- Note that enabling this will break newer Android user-space.
-
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 517ad5ffa429..479b2b86f8c8 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -2,7 +2,6 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index b15fb0d6b152..06954cdf3dba 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -5,6 +5,13 @@ TODO:
- make sure things build as modules properly
- add proper arch dependencies as needed
- audit userspace interfaces to make sure they are sane
+ - kuid_t should never be exposed to user space as it is
+ kernel internal type. Data structure for this kuid_t is:
+ typedef struct {
+ uid_t val;
+ } kuid_t;
+ - This bug is introduced by Xiong Zhou in the patch bd471258f2e09
+ - ("staging: android: logger: use kuid_t instead of uid_t")
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index ad4f5790a76f..8c7852742f4b 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -418,7 +418,7 @@ out:
}
/*
- * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
+ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
*
* 'nr_to_scan' is the number of objects to scan for freeing.
*
@@ -446,7 +446,7 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
- do_fallocate(range->asma->file,
+ vfs_fallocate(range->asma->file,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, end - start);
range->purged = ASHMEM_WAS_PURGED;
@@ -785,7 +785,6 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
.nr_to_scan = LONG_MAX,
};
ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
- nodes_setall(sc.nodes_to_scan);
ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 56604f41ec48..296d347660fc 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -250,7 +250,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
our systems the only dma_address space is physical addresses.
Additionally, we can't afford the overhead of invalidating every
allocation via dma_map_sg. The implicit contract here is that
- memory comming from the heaps is ready for dma, ie if it has a
+ memory coming from the heaps is ready for dma, ie if it has a
cached mapping that mapping has been invalidated */
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
sg_dma_address(sg) = sg_phys(sg);
@@ -263,8 +263,7 @@ err:
heap->ops->unmap_dma(heap, buffer);
heap->ops->free(buffer);
err1:
- if (buffer->pages)
- vfree(buffer->pages);
+ vfree(buffer->pages);
err2:
kfree(buffer);
return ERR_PTR(ret);
@@ -276,8 +275,7 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
- if (buffer->pages)
- vfree(buffer->pages);
+ vfree(buffer->pages);
kfree(buffer);
}
@@ -902,7 +900,7 @@ void ion_pages_sync_for_device(struct device *dev, struct page *page,
sg_set_page(&sg, page, size, 0);
/*
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
- * for the the targeted device, but this works on the currently targeted
+ * for the targeted device, but this works on the currently targeted
* hardware.
*/
sg_dma_address(&sg) = page_to_phys(page);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index d305bb7e9a74..443db8459a9e 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -76,7 +76,7 @@ struct ion_platform_data {
* size
*
* Calls memblock reserve to set aside memory for heaps that are
- * located at specific memory addresses or of specfic sizes not
+ * located at specific memory addresses or of specific sizes not
* managed by the kernel
*/
void ion_reserve(struct ion_platform_data *data);
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index f3ea1c31e533..5678870bff48 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -112,10 +112,8 @@ static int __init ion_dummy_init(void)
}
return 0;
err:
- for (i = 0; i < dummy_ion_pdata.nr; i++) {
- if (heaps[i])
- ion_heap_destroy(heaps[i]);
- }
+ for (i = 0; i < dummy_ion_pdata.nr; ++i)
+ ion_heap_destroy(heaps[i]);
kfree(heaps);
if (carveout_ptr) {
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 5864f3dfcbc6..4b88f11e52d3 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -120,7 +120,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
bool high;
if (current_is_kswapd())
- high = 1;
+ high = true;
else
high = !!(gfp_mask & __GFP_HIGHMEM);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index c8f01757abfa..18a5f93e13b7 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -345,7 +345,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant peformance benefit on
+ * invalidated from the cache, provides a significant performance benefit on
* many systems */
/**
@@ -362,7 +362,7 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
*
* Allows you to keep a pool of pre allocated pages to use from your heap.
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant peformance benefit
+ * been invalidated from the cache, provides a significant performance benefit
* on many systems
*/
struct ion_page_pool {
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
index 11c7cceb3c7d..5b8ef0e66010 100644
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -54,10 +54,8 @@ static int tegra_ion_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, idev);
return 0;
err:
- for (i = 0; i < num_heaps; i++) {
- if (heaps[i])
- ion_heap_destroy(heaps[i]);
- }
+ for (i = 0; i < num_heaps; ++i)
+ ion_heap_destroy(heaps[i]);
return err;
}
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 257fc91bf02b..1532a86404be 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
+#include <linux/time64.h>
#include "sync.h"
#ifdef CONFIG_DEBUG_FS
@@ -95,9 +96,9 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
sync_status_str(status));
if (status <= 0) {
- struct timeval tv = ktime_to_timeval(pt->base.timestamp);
+ struct timespec64 ts64 = ktime_to_timespec64(pt->base.timestamp);
- seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+ seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
if (parent->ops->timeline_value_str &&
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index 8fa4758517c0..938a35cd99bb 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -20,6 +20,7 @@
#include <linux/hrtimer.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/ktime.h>
#include "timed_output.h"
#include "timed_gpio.h"
@@ -46,16 +47,16 @@ static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
static int gpio_get_time(struct timed_output_dev *dev)
{
struct timed_gpio_data *data;
- struct timeval t;
+ ktime_t t;
data = container_of(dev, struct timed_gpio_data, dev);
if (!hrtimer_active(&data->timer))
return 0;
- t = ktime_to_timeval(hrtimer_get_remaining(&data->timer));
+ t = hrtimer_get_remaining(&data->timer);
- return t.tv_sec * 1000 + t.tv_usec / 1000;
+ return ktime_to_ms(t);
}
static void gpio_enable(struct timed_output_dev *dev, int value)
@@ -157,7 +158,6 @@ static struct platform_driver timed_gpio_driver = {
.remove = timed_gpio_remove,
.driver = {
.name = TIMED_GPIO_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
deleted file mode 100644
index 940c852e17b7..000000000000
--- a/drivers/staging/bcm/Adapter.h
+++ /dev/null
@@ -1,474 +0,0 @@
-/***********************************
-* Adapter.h
-************************************/
-#ifndef __ADAPTER_H__
-#define __ADAPTER_H__
-
-#define MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES 256
-#include "Debug.h"
-
-struct bcm_leader {
- USHORT Vcid;
- USHORT PLength;
- UCHAR Status;
- UCHAR Unused[3];
-} __packed;
-
-struct bcm_packettosend {
- struct bcm_leader Leader;
- UCHAR ucPayload;
-} __packed;
-
-struct bcm_control_packet {
- PVOID ControlBuff;
- UINT ControlBuffLen;
- struct bcm_control_packet *next;
-} __packed;
-
-struct bcm_link_request {
- struct bcm_leader Leader;
- UCHAR szData[4];
-} __packed;
-
-#define MAX_IP_RANGE_LENGTH 4
-#define MAX_PORT_RANGE 4
-#define MAX_PROTOCOL_LENGTH 32
-#define IPV6_ADDRESS_SIZEINBYTES 0x10
-
-union u_ip_address {
- struct {
- /* Source Ip Address Range */
- ULONG ulIpv4Addr[MAX_IP_RANGE_LENGTH];
- /* Source Ip Mask Address Range */
- ULONG ulIpv4Mask[MAX_IP_RANGE_LENGTH];
- };
- struct {
- /* Source Ip Address Range */
- ULONG ulIpv6Addr[MAX_IP_RANGE_LENGTH * 4];
- /* Source Ip Mask Address Range */
- ULONG ulIpv6Mask[MAX_IP_RANGE_LENGTH * 4];
- };
- struct {
- UCHAR ucIpv4Address[MAX_IP_RANGE_LENGTH * IP_LENGTH_OF_ADDRESS];
- UCHAR ucIpv4Mask[MAX_IP_RANGE_LENGTH * IP_LENGTH_OF_ADDRESS];
- };
- struct {
- UCHAR ucIpv6Address[MAX_IP_RANGE_LENGTH * IPV6_ADDRESS_SIZEINBYTES];
- UCHAR ucIpv6Mask[MAX_IP_RANGE_LENGTH * IPV6_ADDRESS_SIZEINBYTES];
- };
-};
-
-struct bcm_hdr_suppression_contextinfo {
- /* Intermediate buffer to accumulate pkt Header for PHS */
- UCHAR ucaHdrSuppressionInBuf[MAX_PHS_LENGTHS];
- /* Intermediate buffer containing pkt Header after PHS */
- UCHAR ucaHdrSuppressionOutBuf[MAX_PHS_LENGTHS + PHSI_LEN];
-};
-
-struct bcm_classifier_rule {
- ULONG ulSFID;
- UCHAR ucReserved[2];
- B_UINT16 uiClassifierRuleIndex;
- bool bUsed;
- USHORT usVCID_Value;
- /* This field detemines the Classifier Priority */
- B_UINT8 u8ClassifierRulePriority;
- union u_ip_address stSrcIpAddress;
- UCHAR ucIPSourceAddressLength; /* Ip Source Address Length */
-
- union u_ip_address stDestIpAddress;
- /* Ip Destination Address Length */
- UCHAR ucIPDestinationAddressLength;
- UCHAR ucIPTypeOfServiceLength; /* Type of service Length */
- UCHAR ucTosLow; /* Tos Low */
- UCHAR ucTosHigh; /* Tos High */
- UCHAR ucTosMask; /* Tos Mask */
-
- UCHAR ucProtocolLength; /* protocol Length */
- UCHAR ucProtocol[MAX_PROTOCOL_LENGTH]; /* protocol Length */
- USHORT usSrcPortRangeLo[MAX_PORT_RANGE];
- USHORT usSrcPortRangeHi[MAX_PORT_RANGE];
- UCHAR ucSrcPortRangeLength;
-
- USHORT usDestPortRangeLo[MAX_PORT_RANGE];
- USHORT usDestPortRangeHi[MAX_PORT_RANGE];
- UCHAR ucDestPortRangeLength;
-
- bool bProtocolValid;
- bool bTOSValid;
- bool bDestIpValid;
- bool bSrcIpValid;
-
- /* For IPv6 Addressing */
- UCHAR ucDirection;
- bool bIpv6Protocol;
- UINT32 u32PHSRuleID;
- struct bcm_phs_rule sPhsRule;
- UCHAR u8AssociatedPHSI;
-
- /* Classification fields for ETH CS */
- UCHAR ucEthCSSrcMACLen;
- UCHAR au8EThCSSrcMAC[MAC_ADDRESS_SIZE];
- UCHAR au8EThCSSrcMACMask[MAC_ADDRESS_SIZE];
- UCHAR ucEthCSDestMACLen;
- UCHAR au8EThCSDestMAC[MAC_ADDRESS_SIZE];
- UCHAR au8EThCSDestMACMask[MAC_ADDRESS_SIZE];
- UCHAR ucEtherTypeLen;
- UCHAR au8EthCSEtherType[NUM_ETHERTYPE_BYTES];
- UCHAR usUserPriority[2];
- USHORT usVLANID;
- USHORT usValidityBitMap;
-};
-
-struct bcm_fragmented_packet_info {
- bool bUsed;
- ULONG ulSrcIpAddress;
- USHORT usIpIdentification;
- struct bcm_classifier_rule *pstMatchedClassifierEntry;
- bool bOutOfOrderFragment;
-};
-
-struct bcm_packet_info {
- /* classification extension Rule */
- ULONG ulSFID;
- USHORT usVCID_Value;
- UINT uiThreshold;
- /* This field determines the priority of the SF Queues */
- B_UINT8 u8TrafficPriority;
-
- bool bValid;
- bool bActive;
- bool bActivateRequestSent;
-
- B_UINT8 u8QueueType; /* BE or rtPS */
-
- /* maximum size of the bucket for the queue */
- UINT uiMaxBucketSize;
- UINT uiCurrentQueueDepthOnTarget;
- UINT uiCurrentBytesOnHost;
- UINT uiCurrentPacketsOnHost;
- UINT uiDroppedCountBytes;
- UINT uiDroppedCountPackets;
- UINT uiSentBytes;
- UINT uiSentPackets;
- UINT uiCurrentDrainRate;
- UINT uiThisPeriodSentBytes;
- LARGE_INTEGER liDrainCalculated;
- UINT uiCurrentTokenCount;
- LARGE_INTEGER liLastUpdateTokenAt;
- UINT uiMaxAllowedRate;
- UINT NumOfPacketsSent;
- UCHAR ucDirection;
- USHORT usCID;
- struct bcm_mibs_parameters stMibsExtServiceFlowTable;
- UINT uiCurrentRxRate;
- UINT uiThisPeriodRxBytes;
- UINT uiTotalRxBytes;
- UINT uiTotalTxBytes;
- UINT uiPendedLast;
- UCHAR ucIpVersion;
-
- union {
- struct {
- struct sk_buff *FirstTxQueue;
- struct sk_buff *LastTxQueue;
- };
- struct {
- struct sk_buff *ControlHead;
- struct sk_buff *ControlTail;
- };
- };
-
- bool bProtocolValid;
- bool bTOSValid;
- bool bDestIpValid;
- bool bSrcIpValid;
-
- bool bActiveSet;
- bool bAdmittedSet;
- bool bAuthorizedSet;
- bool bClassifierPriority;
- UCHAR ucServiceClassName[MAX_CLASS_NAME_LENGTH];
- bool bHeaderSuppressionEnabled;
- spinlock_t SFQueueLock;
- void *pstSFIndication;
- struct timeval stLastUpdateTokenAt;
- atomic_t uiPerSFTxResourceCount;
- UINT uiMaxLatency;
- UCHAR bIPCSSupport;
- UCHAR bEthCSSupport;
-};
-
-struct bcm_tarang_data {
- struct bcm_tarang_data *next;
- struct bcm_mini_adapter *Adapter;
- struct sk_buff *RxAppControlHead;
- struct sk_buff *RxAppControlTail;
- int AppCtrlQueueLen;
- bool MacTracingEnabled;
- bool bApplicationToExit;
- struct bcm_mibs_dropped_cntrl_msg stDroppedAppCntrlMsgs;
- ULONG RxCntrlMsgBitMask;
-};
-
-struct bcm_targetdsx_buffer {
- ULONG ulTargetDsxBuffer;
- B_UINT16 tid;
- bool valid;
-};
-
-typedef int (*FP_FLASH_WRITE)(struct bcm_mini_adapter *, UINT, PVOID);
-
-typedef int (*FP_FLASH_WRITE_STATUS)(struct bcm_mini_adapter *, UINT, PVOID);
-
-/*
- * Driver adapter data structure
- */
-struct bcm_mini_adapter {
- struct bcm_mini_adapter *next;
- struct net_device *dev;
- u32 msg_enable;
- CHAR *caDsxReqResp;
- atomic_t ApplicationRunning;
- bool AppCtrlQueueOverFlow;
- atomic_t CurrentApplicationCount;
- atomic_t RegisteredApplicationCount;
- bool LinkUpStatus;
- bool TimerActive;
- u32 StatisticsPointer;
- struct sk_buff *RxControlHead;
- struct sk_buff *RxControlTail;
- struct semaphore RxAppControlQueuelock;
- struct semaphore fw_download_sema;
- struct bcm_tarang_data *pTarangs;
- spinlock_t control_queue_lock;
- wait_queue_head_t process_read_wait_queue;
-
- /* the pointer to the first packet we have queued in send
- * deserialized miniport support variables
- */
- atomic_t TotalPacketCount;
- atomic_t TxPktAvail;
-
- /* this to keep track of the Tx and Rx MailBox Registers. */
- atomic_t CurrNumFreeTxDesc;
- /* to keep track the no of byte received */
- USHORT PrevNumRecvDescs;
- USHORT CurrNumRecvDescs;
- UINT u32TotalDSD;
- struct bcm_packet_info PackInfo[NO_OF_QUEUES];
- struct bcm_classifier_rule astClassifierTable[MAX_CLASSIFIERS];
- bool TransferMode;
-
- /*************** qos ******************/
- bool bETHCSEnabled;
- ULONG BEBucketSize;
- ULONG rtPSBucketSize;
- UCHAR LinkStatus;
- bool AutoLinkUp;
- bool AutoSyncup;
-
- int major;
- int minor;
- wait_queue_head_t tx_packet_wait_queue;
- wait_queue_head_t process_rx_cntrlpkt;
- atomic_t process_waiting;
- bool fw_download_done;
-
- char *txctlpacket[MAX_CNTRL_PKTS];
- atomic_t cntrlpktCnt;
- atomic_t index_app_read_cntrlpkt;
- atomic_t index_wr_txcntrlpkt;
- atomic_t index_rd_txcntrlpkt;
- UINT index_datpkt;
- struct semaphore rdmwrmsync;
-
- struct bcm_targetdsx_buffer astTargetDsxBuffer[MAX_TARGET_DSX_BUFFERS];
- ULONG ulFreeTargetBufferCnt;
- ULONG ulCurrentTargetBuffer;
- ULONG ulTotalTargetBuffersAvailable;
- unsigned long chip_id;
- wait_queue_head_t lowpower_mode_wait_queue;
- bool bFlashBoot;
- bool bBinDownloaded;
- bool bCfgDownloaded;
- bool bSyncUpRequestSent;
- USHORT usBestEffortQueueIndex;
- wait_queue_head_t ioctl_fw_dnld_wait_queue;
- bool waiting_to_fw_download_done;
- pid_t fw_download_process_pid;
- struct bcm_target_params *pstargetparams;
- bool device_removed;
- bool DeviceAccess;
- bool bIsAutoCorrectEnabled;
- bool bDDRInitDone;
- int DDRSetting;
- ULONG ulPowerSaveMode;
- spinlock_t txtransmitlock;
- B_UINT8 txtransmit_running;
- /* Thread for control packet handling */
- struct task_struct *control_packet_handler;
- /* thread for transmitting packets. */
- struct task_struct *transmit_packet_thread;
-
- /* LED Related Structures */
- struct bcm_led_info LEDInfo;
-
- /* Driver State for LED Blinking */
- enum bcm_led_events DriverState;
- /* Interface Specific */
- PVOID pvInterfaceAdapter;
- int (*bcm_file_download)(PVOID,
- struct file *,
- unsigned int);
- int (*bcm_file_readback_from_chip)(PVOID,
- struct file *,
- unsigned int);
- int (*interface_rdm)(PVOID,
- UINT,
- PVOID,
- int);
- int (*interface_wrm)(PVOID,
- UINT,
- PVOID,
- int);
- int (*interface_transmit)(PVOID, PVOID , UINT);
- bool IdleMode;
- bool bDregRequestSentInIdleMode;
- bool bTriedToWakeUpFromlowPowerMode;
- bool bShutStatus;
- bool bWakeUpDevice;
- unsigned int usIdleModePattern;
- /* BOOLEAN bTriedToWakeUpFromShutdown; */
- bool bLinkDownRequested;
- int downloadDDR;
- struct bcm_phs_extension stBCMPhsContext;
- struct bcm_hdr_suppression_contextinfo stPhsTxContextInfo;
- uint8_t ucaPHSPktRestoreBuf[2048];
- uint8_t bPHSEnabled;
- bool AutoFirmDld;
- bool bMipsConfig;
- bool bDPLLConfig;
- UINT32 aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
- UINT32 aRxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
- struct bcm_fragmented_packet_info
- astFragmentedPktClassifierTable[MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES];
- atomic_t uiMBupdate;
- UINT32 PmuMode;
- enum bcm_nvm_type eNVMType;
- UINT uiSectorSize;
- UINT uiSectorSizeInCFG;
- bool bSectorSizeOverride;
- bool bStatusWrite;
- UINT uiNVMDSDSize;
- UINT uiVendorExtnFlag;
- /* it will always represent chosen DSD at any point of time.
- * Generally it is Active DSD but in case of NVM RD/WR it
- * might be different.
- */
- UINT ulFlashCalStart;
- ULONG ulFlashControlSectionStart;
- ULONG ulFlashWriteSize;
- ULONG ulFlashID;
- FP_FLASH_WRITE fpFlashWrite;
- FP_FLASH_WRITE_STATUS fpFlashWriteWithStatusCheck;
-
- struct semaphore NVMRdmWrmLock;
- struct device *pstCreatedClassDevice;
-
- /* BOOLEAN InterfaceUpStatus; */
- struct bcm_flash2x_cs_info *psFlash2xCSInfo;
- struct bcm_flash_cs_info *psFlashCSInfo;
- struct bcm_flash2x_vendor_info *psFlash2xVendorInfo;
- UINT uiFlashBaseAdd; /* Flash start address */
- /* Active ISO offset chosen before f/w download */
- UINT uiActiveISOOffset;
- enum bcm_flash2x_section_val eActiveISO; /* Active ISO section val */
- /* Active DSD val chosen before f/w download */
- enum bcm_flash2x_section_val eActiveDSD;
- /* For accessing Active DSD chosen before f/w download */
- UINT uiActiveDSDOffsetAtFwDld;
- UINT uiFlashLayoutMajorVersion;
- UINT uiFlashLayoutMinorVersion;
- bool bAllDSDWriteAllow;
- bool bSigCorrupted;
- /* this should be set who so ever want to change the Headers.
- * after Write it should be reset immediately.
- */
- bool bHeaderChangeAllowed;
- int SelectedChip;
- bool bEndPointHalted;
- /* while bFlashRawRead will be true, Driver
- * ignore map lay out and consider flash as of without any map.
- */
- bool bFlashRawRead;
- bool bPreparingForLowPowerMode;
- bool bDoSuspend;
- UINT syscfgBefFwDld;
- bool StopAllXaction;
- /* Used to Support extended CAPI requirements from */
- UINT32 liTimeSinceLastNetEntry;
- struct semaphore LowPowerModeSync;
- ULONG liDrainCalculated;
- UINT gpioBitMap;
- struct bcm_debug_state stDebugState;
-};
-
-#define GET_BCM_ADAPTER(net_dev) netdev_priv(net_dev)
-
-struct bcm_eth_header {
- UCHAR au8DestinationAddress[6];
- UCHAR au8SourceAddress[6];
- USHORT u16Etype;
-} __packed;
-
-struct bcm_firmware_info {
- void __user *pvMappedFirmwareAddress;
- ULONG u32FirmwareLength;
- ULONG u32StartingAddress;
-} __packed;
-
-/* holds the value of net_device structure.. */
-extern struct net_device *gblpnetdev;
-
-struct bcm_ddr_setting {
- UINT ulRegAddress;
- UINT ulRegValue;
-};
-int InitAdapter(struct bcm_mini_adapter *psAdapter);
-
-/* =====================================================================
- * Beceem vendor request codes for EP0
- * =====================================================================
- */
-
-#define BCM_REQUEST_READ 0x2
-#define BCM_REQUEST_WRITE 0x1
-#define EP2_MPS_REG 0x0F0110A0
-#define EP2_MPS 0x40
-
-#define EP2_CFG_REG 0x0F0110A8
-#define EP2_CFG_INT 0x27
-#define EP2_CFG_BULK 0x25
-
-#define EP4_MPS_REG 0x0F0110F0
-#define EP4_MPS 0x8C
-
-#define EP4_CFG_REG 0x0F0110F8
-
-#define ISO_MPS_REG 0x0F0110C8
-#define ISO_MPS 0x00000000
-
-#define EP1 0
-#define EP2 1
-#define EP3 2
-#define EP4 3
-#define EP5 4
-#define EP6 5
-
-enum bcm_einterface_setting {
- DEFAULT_SETTING_0 = 0,
- ALTERNATE_SETTING_1 = 1,
-};
-
-#endif /* __ADAPTER_H__ */
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
deleted file mode 100644
index 88ce2da531c6..000000000000
--- a/drivers/staging/bcm/Bcmchar.c
+++ /dev/null
@@ -1,2652 +0,0 @@
-#include <linux/fs.h>
-
-#include "headers.h"
-
-static int bcm_handle_nvm_read_cmd(struct bcm_mini_adapter *ad,
- PUCHAR read_data,
- struct bcm_nvm_readwrite *nvm_rw)
-{
- INT status = STATUS_FAILURE;
-
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) || (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(read_data);
- return -EACCES;
- }
-
- status = BeceemNVMRead(ad, (PUINT)read_data,
- nvm_rw->uiOffset,
- nvm_rw->uiNumBytes);
- up(&ad->NVMRdmWrmLock);
-
- if (status != STATUS_SUCCESS) {
- kfree(read_data);
- return status;
- }
-
- if (copy_to_user(nvm_rw->pBuffer, read_data, nvm_rw->uiNumBytes)) {
- kfree(read_data);
- return -EFAULT;
- }
-
- return STATUS_SUCCESS;
-}
-
-static int handle_flash2x_adapter(struct bcm_mini_adapter *ad,
- PUCHAR read_data,
- struct bcm_nvm_readwrite *nvm_rw)
-{
- /*
- * New Requirement:-
- * DSD section updation will be allowed in two case:-
- * 1. if DSD sig is present in DSD header means dongle
- * is ok and updation is fruitfull
- * 2. if point 1 failes then user buff should have
- * DSD sig. this point ensures that if dongle is
- * corrupted then user space program first modify
- * the DSD header with valid DSD sig so that this
- * as well as further write may be worthwhile.
- *
- * This restriction has been put assuming that
- * if DSD sig is corrupted, DSD data won't be
- * considered valid.
- */
- INT status;
- ULONG dsd_magic_num_in_usr_buff = 0;
-
- status = BcmFlash2xCorruptSig(ad, ad->eActiveDSD);
- if (status == STATUS_SUCCESS)
- return STATUS_SUCCESS;
-
- if (((nvm_rw->uiOffset + nvm_rw->uiNumBytes) !=
- ad->uiNVMDSDSize) ||
- (nvm_rw->uiNumBytes < SIGNATURE_SIZE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "DSD Sig is present neither in Flash nor User provided Input..");
- up(&ad->NVMRdmWrmLock);
- kfree(read_data);
- return status;
- }
-
- dsd_magic_num_in_usr_buff =
- ntohl(*(PUINT)(read_data + nvm_rw->uiNumBytes -
- SIGNATURE_SIZE));
- if (dsd_magic_num_in_usr_buff != DSD_IMAGE_MAGIC_NUMBER) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "DSD Sig is present neither in Flash nor User provided Input..");
- up(&ad->NVMRdmWrmLock);
- kfree(read_data);
- return status;
- }
-
- return STATUS_SUCCESS;
-}
-
-/***************************************************************
-* Function - bcm_char_open()
-*
-* Description - This is the "open" entry point for the character
-* driver.
-*
-* Parameters - inode: Pointer to the Inode structure of char device
-* filp : File pointer of the char device
-*
-* Returns - Zero(Success)
-****************************************************************/
-
-static int bcm_char_open(struct inode *inode, struct file *filp)
-{
- struct bcm_mini_adapter *ad = NULL;
- struct bcm_tarang_data *tarang = NULL;
-
- ad = GET_BCM_ADAPTER(gblpnetdev);
- tarang = kzalloc(sizeof(struct bcm_tarang_data), GFP_KERNEL);
- if (!tarang)
- return -ENOMEM;
-
- tarang->Adapter = ad;
- tarang->RxCntrlMsgBitMask = 0xFFFFFFFF & ~(1 << 0xB);
-
- down(&ad->RxAppControlQueuelock);
- tarang->next = ad->pTarangs;
- ad->pTarangs = tarang;
- up(&ad->RxAppControlQueuelock);
-
- /* Store the Adapter structure */
- filp->private_data = tarang;
-
- /* Start Queuing the control response Packets */
- atomic_inc(&ad->ApplicationRunning);
-
- nonseekable_open(inode, filp);
- return 0;
-}
-
-static int bcm_char_release(struct inode *inode, struct file *filp)
-{
- struct bcm_tarang_data *tarang, *tmp, *ptmp;
- struct bcm_mini_adapter *ad = NULL;
- struct sk_buff *pkt, *npkt;
-
- tarang = (struct bcm_tarang_data *)filp->private_data;
-
- if (tarang == NULL)
- return 0;
-
- ad = tarang->Adapter;
-
- down(&ad->RxAppControlQueuelock);
-
- tmp = ad->pTarangs;
- for (ptmp = NULL; tmp; ptmp = tmp, tmp = tmp->next) {
- if (tmp == tarang)
- break;
- }
-
- if (tmp) {
- if (!ptmp)
- ad->pTarangs = tmp->next;
- else
- ptmp->next = tmp->next;
- } else {
- up(&ad->RxAppControlQueuelock);
- return 0;
- }
-
- pkt = tarang->RxAppControlHead;
- while (pkt) {
- npkt = pkt->next;
- kfree_skb(pkt);
- pkt = npkt;
- }
-
- up(&ad->RxAppControlQueuelock);
-
- /* Stop Queuing the control response Packets */
- atomic_dec(&ad->ApplicationRunning);
-
- kfree(tarang);
-
- /* remove this filp from the asynchronously notified filp's */
- filp->private_data = NULL;
- return 0;
-}
-
-static ssize_t bcm_char_read(struct file *filp,
- char __user *buf,
- size_t size,
- loff_t *f_pos)
-{
- struct bcm_tarang_data *tarang = filp->private_data;
- struct bcm_mini_adapter *ad = tarang->Adapter;
- struct sk_buff *packet = NULL;
- ssize_t pkt_len = 0;
- int wait_ret_val = 0;
- unsigned long ret = 0;
-
- wait_ret_val = wait_event_interruptible(
- ad->process_read_wait_queue,
- (tarang->RxAppControlHead ||
- ad->device_removed));
-
- if ((wait_ret_val == -ERESTARTSYS)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Exiting as i've been asked to exit!!!\n");
- return wait_ret_val;
- }
-
- if (ad->device_removed) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device Removed... Killing the Apps...\n");
- return -ENODEV;
- }
-
- if (false == ad->fw_download_done)
- return -EACCES;
-
- down(&ad->RxAppControlQueuelock);
-
- if (tarang->RxAppControlHead) {
- packet = tarang->RxAppControlHead;
- DEQUEUEPACKET(tarang->RxAppControlHead,
- tarang->RxAppControlTail);
- tarang->AppCtrlQueueLen--;
- }
-
- up(&ad->RxAppControlQueuelock);
-
- if (packet) {
- pkt_len = packet->len;
- ret = copy_to_user(buf, packet->data,
- min_t(size_t, pkt_len, size));
- if (ret) {
- dev_kfree_skb(packet);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Returning from copy to user failure\n");
- return -EFAULT;
- }
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Read %zd Bytes From Adapter packet = %p by process %d!\n",
- pkt_len, packet, current->pid);
- dev_kfree_skb(packet);
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "<\n");
- return pkt_len;
-}
-
-static int bcm_char_ioctl_reg_read_private(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_rdm_buffer rdm_buff = {0};
- struct bcm_ioctl_buffer io_buff;
- PCHAR temp_buff;
- INT status = STATUS_FAILURE;
- UINT buff_len;
- u16 temp_value;
- int bytes;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(rdm_buff))
- return -EINVAL;
-
- if (copy_from_user(&rdm_buff, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- if (io_buff.OutputLength > USHRT_MAX ||
- io_buff.OutputLength == 0) {
- return -EINVAL;
- }
-
- buff_len = io_buff.OutputLength;
- temp_value = 4 - (buff_len % 4);
- buff_len += temp_value % 4;
-
- temp_buff = kmalloc(buff_len, GFP_KERNEL);
- if (!temp_buff)
- return -ENOMEM;
-
- bytes = rdmalt(ad, (UINT)rdm_buff.Register,
- (PUINT)temp_buff, buff_len);
- if (bytes > 0) {
- status = STATUS_SUCCESS;
- if (copy_to_user(io_buff.OutputBuffer, temp_buff, bytes)) {
- kfree(temp_buff);
- return -EFAULT;
- }
- } else {
- status = bytes;
- }
-
- kfree(temp_buff);
- return status;
-}
-
-static int bcm_char_ioctl_reg_write_private(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_wrm_buffer wrm_buff = {0};
- struct bcm_ioctl_buffer io_buff;
- UINT tmp = 0;
- INT status;
-
- /* Copy Ioctl Buffer structure */
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(wrm_buff))
- return -EINVAL;
-
- /* Get WrmBuffer structure */
- if (copy_from_user(&wrm_buff, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- tmp = wrm_buff.Register & EEPROM_REJECT_MASK;
- if (!((ad->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((tmp == EEPROM_REJECT_REG_1) ||
- (tmp == EEPROM_REJECT_REG_2) ||
- (tmp == EEPROM_REJECT_REG_3) ||
- (tmp == EEPROM_REJECT_REG_4))) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
-
- status = wrmalt(ad, (UINT)wrm_buff.Register,
- (PUINT)wrm_buff.Data, sizeof(ULONG));
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Done\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Failed\n");
- status = -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_eeprom_reg_read(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_rdm_buffer rdm_buff = {0};
- struct bcm_ioctl_buffer io_buff;
- PCHAR temp_buff = NULL;
- UINT tmp = 0;
- INT status;
- int bytes;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle Mode, Blocking Rdms\n");
- return -EACCES;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(rdm_buff))
- return -EINVAL;
-
- if (copy_from_user(&rdm_buff, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- if (io_buff.OutputLength > USHRT_MAX ||
- io_buff.OutputLength == 0) {
- return -EINVAL;
- }
-
- temp_buff = kmalloc(io_buff.OutputLength, GFP_KERNEL);
- if (!temp_buff)
- return STATUS_FAILURE;
-
- if ((((ULONG)rdm_buff.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)rdm_buff.Register & 0x3)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "RDM Done On invalid Address : %x Access Denied.\n",
- (int)rdm_buff.Register);
-
- kfree(temp_buff);
- return -EINVAL;
- }
-
- tmp = rdm_buff.Register & EEPROM_REJECT_MASK;
- bytes = rdmaltWithLock(ad, (UINT)rdm_buff.Register,
- (PUINT)temp_buff, io_buff.OutputLength);
-
- if (bytes > 0) {
- status = STATUS_SUCCESS;
- if (copy_to_user(io_buff.OutputBuffer, temp_buff, bytes)) {
- kfree(temp_buff);
- return -EFAULT;
- }
- } else {
- status = bytes;
- }
-
- kfree(temp_buff);
- return status;
-}
-
-static int bcm_char_ioctl_eeprom_reg_write(void __user *argp,
- struct bcm_mini_adapter *ad,
- UINT cmd)
-{
- struct bcm_wrm_buffer wrm_buff = {0};
- struct bcm_ioctl_buffer io_buff;
- UINT tmp = 0;
- INT status;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle Mode, Blocking Wrms\n");
- return -EACCES;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(wrm_buff))
- return -EINVAL;
-
- /* Get WrmBuffer structure */
- if (copy_from_user(&wrm_buff, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- if ((((ULONG)wrm_buff.Register & 0x0F000000) != 0x0F000000) ||
- ((ULONG)wrm_buff.Register & 0x3)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM Done On invalid Address : %x Access Denied.\n",
- (int)wrm_buff.Register);
- return -EINVAL;
- }
-
- tmp = wrm_buff.Register & EEPROM_REJECT_MASK;
- if (!((ad->pstargetparams->m_u32Customize) & VSG_MODE) &&
- ((tmp == EEPROM_REJECT_REG_1) ||
- (tmp == EEPROM_REJECT_REG_2) ||
- (tmp == EEPROM_REJECT_REG_3) ||
- (tmp == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
-
- status = wrmaltWithLock(ad, (UINT)wrm_buff.Register,
- (PUINT)wrm_buff.Data,
- wrm_buff.Length);
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, OSAL_DBG,
- DBG_LVL_ALL, "WRM Done\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "WRM Failed\n");
- status = -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_gpio_set_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_gpio_info gpio_info = {0};
- struct bcm_ioctl_buffer io_buff;
- UCHAR reset_val[4];
- UINT value = 0;
- UINT bit = 0;
- UINT operation = 0;
- INT status;
- int bytes;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "GPIO Can't be set/clear in Low power Mode");
- return -EACCES;
- }
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(gpio_info))
- return -EINVAL;
-
- if (copy_from_user(&gpio_info, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- bit = gpio_info.uiGpioNumber;
- operation = gpio_info.uiGpioValue;
- value = (1<<bit);
-
- if (IsReqGpioIsLedInNVM(ad, value) == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!",
- value);
- return -EINVAL;
- }
-
- /* Set - setting 1 */
- if (operation) {
- /* Set the gpio output register */
- status = wrmaltWithLock(ad,
- BCM_GPIO_OUTPUT_SET_REG,
- (PUINT)(&value), sizeof(UINT));
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Set the GPIO bit\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Failed to set the %dth GPIO\n",
- bit);
- return status;
- }
- } else {
- /* Set the gpio output register */
- status = wrmaltWithLock(ad,
- BCM_GPIO_OUTPUT_CLR_REG,
- (PUINT)(&value), sizeof(UINT));
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Set the GPIO bit\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Failed to clear the %dth GPIO\n",
- bit);
- return status;
- }
- }
-
- bytes = rdmaltWithLock(ad, (UINT)GPIO_MODE_REGISTER,
- (PUINT)reset_val, sizeof(UINT));
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "GPIO_MODE_REGISTER read failed");
- return status;
- }
- status = STATUS_SUCCESS;
-
- /* Set the gpio mode register to output */
- *(UINT *)reset_val |= (1<<bit);
- status = wrmaltWithLock(ad, GPIO_MODE_REGISTER,
- (PUINT)reset_val, sizeof(UINT));
-
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Set the GPIO to output Mode\n");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Failed to put GPIO in Output Mode\n");
- }
-
- return status;
-}
-
-static int bcm_char_ioctl_led_thread_state_change_req(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_user_thread_req thread_req = {0};
- struct bcm_ioctl_buffer io_buff;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "User made LED thread InActive");
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "GPIO Can't be set/clear in Low power Mode");
- return -EACCES;
- }
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(thread_req))
- return -EINVAL;
-
- if (copy_from_user(&thread_req, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- /* if LED thread is running(Actively or Inactively)
- * set it state to make inactive
- */
- if (ad->LEDInfo.led_thread_running) {
- if (thread_req.ThreadState == LED_THREAD_ACTIVATION_REQ) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "Activating thread req");
- ad->DriverState = LED_THREAD_ACTIVE;
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL,
- "DeActivating Thread req.....");
- ad->DriverState = LED_THREAD_INACTIVE;
- }
-
- /* signal thread. */
- wake_up(&ad->LEDInfo.notify_led_event);
- }
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_gpio_status_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_gpio_info gpio_info = {0};
- struct bcm_ioctl_buffer io_buff;
- ULONG bit = 0;
- UCHAR read[4];
- INT status;
- int bytes;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE))
- return -EACCES;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(gpio_info))
- return -EINVAL;
-
- if (copy_from_user(&gpio_info, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- bit = gpio_info.uiGpioNumber;
-
- /* Set the gpio output register */
- bytes = rdmaltWithLock(ad, (UINT)GPIO_PIN_STATE_REGISTER,
- (PUINT)read, sizeof(UINT));
-
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "RDM Failed\n");
- return status;
- }
- status = STATUS_SUCCESS;
- return status;
-}
-
-static int bcm_char_ioctl_gpio_multi_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_gpio_multi_info gpio_multi_info[MAX_IDX];
- struct bcm_gpio_multi_info *pgpio_multi_info =
- (struct bcm_gpio_multi_info *)gpio_multi_info;
- struct bcm_ioctl_buffer io_buff;
- UCHAR reset_val[4];
- INT status = STATUS_FAILURE;
- int bytes;
-
- memset(pgpio_multi_info, 0,
- MAX_IDX * sizeof(struct bcm_gpio_multi_info));
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE))
- return -EINVAL;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(gpio_multi_info))
- return -EINVAL;
- if (io_buff.OutputLength > sizeof(gpio_multi_info))
- io_buff.OutputLength = sizeof(gpio_multi_info);
-
- if (copy_from_user(&gpio_multi_info, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- if (IsReqGpioIsLedInNVM(ad, pgpio_multi_info[WIMAX_IDX].uiGPIOMask)
- == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask,
- ad->gpioBitMap);
- return -EINVAL;
- }
-
- /* Set the gpio output register */
- if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) &
- (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) {
- /* Set 1's in GPIO OUTPUT REGISTER */
- *(UINT *)reset_val = pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue;
-
- if (*(UINT *) reset_val)
- status = wrmaltWithLock(ad,
- BCM_GPIO_OUTPUT_SET_REG,
- (PUINT)reset_val, sizeof(ULONG));
-
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM to BCM_GPIO_OUTPUT_SET_REG Failed.");
- return status;
- }
-
- /* Clear to 0's in GPIO OUTPUT REGISTER */
- *(UINT *)reset_val =
- (pgpio_multi_info[WIMAX_IDX].uiGPIOMask &
- pgpio_multi_info[WIMAX_IDX].uiGPIOCommand &
- (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue)));
-
- if (*(UINT *) reset_val)
- status = wrmaltWithLock(ad,
- BCM_GPIO_OUTPUT_CLR_REG, (PUINT)reset_val,
- sizeof(ULONG));
-
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed.");
- return status;
- }
- }
-
- if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
- bytes = rdmaltWithLock(ad, (UINT)GPIO_PIN_STATE_REGISTER,
- (PUINT)reset_val, sizeof(UINT));
-
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "RDM to GPIO_PIN_STATE_REGISTER Failed.");
- return status;
- }
- status = STATUS_SUCCESS;
-
- pgpio_multi_info[WIMAX_IDX].uiGPIOValue =
- (*(UINT *)reset_val &
- pgpio_multi_info[WIMAX_IDX].uiGPIOMask);
- }
-
- status = copy_to_user(io_buff.OutputBuffer, &gpio_multi_info,
- io_buff.OutputLength);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Failed while copying Content to IOBufer for user space err:%d",
- status);
- return -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_gpio_mode_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_gpio_multi_mode gpio_multi_mode[MAX_IDX];
- struct bcm_gpio_multi_mode *pgpio_multi_mode =
- (struct bcm_gpio_multi_mode *)gpio_multi_mode;
- struct bcm_ioctl_buffer io_buff;
- UCHAR reset_val[4];
- INT status;
- int bytes;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE))
- return -EINVAL;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength > sizeof(gpio_multi_mode))
- return -EINVAL;
- if (io_buff.OutputLength > sizeof(gpio_multi_mode))
- io_buff.OutputLength = sizeof(gpio_multi_mode);
-
- if (copy_from_user(&gpio_multi_mode, io_buff.InputBuffer,
- io_buff.InputLength))
- return -EFAULT;
-
- bytes = rdmaltWithLock(ad, (UINT)GPIO_MODE_REGISTER,
- (PUINT)reset_val, sizeof(UINT));
-
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Read of GPIO_MODE_REGISTER failed");
- return status;
- }
- status = STATUS_SUCCESS;
-
- /* Validating the request */
- if (IsReqGpioIsLedInNVM(ad, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask)
- == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask,
- ad->gpioBitMap);
- return -EINVAL;
- }
-
- if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) {
- /* write all OUT's (1's) */
- *(UINT *) reset_val |=
- (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
-
- /* write all IN's (0's) */
- *(UINT *) reset_val &=
- ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) &
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMask);
-
- /* Currently implemented return the modes of all GPIO's
- * else needs to bit AND with mask
- */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)reset_val;
-
- status = wrmaltWithLock(ad, GPIO_MODE_REGISTER,
- (PUINT)reset_val, sizeof(ULONG));
- if (status == STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "WRM to GPIO_MODE_REGISTER Done");
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM to GPIO_MODE_REGISTER Failed");
- return -EFAULT;
- }
- } else {
- /* if uiGPIOMask is 0 then return mode register configuration */
- pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)reset_val;
- }
-
- status = copy_to_user(io_buff.OutputBuffer, &gpio_multi_mode,
- io_buff.OutputLength);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Failed while copying Content to IOBufer for user space err:%d",
- status);
- return -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_misc_request(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- PVOID buff = NULL;
- INT status;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength < sizeof(struct bcm_link_request))
- return -EINVAL;
-
- if (io_buff.InputLength > MAX_CNTL_PKT_SIZE)
- return -EINVAL;
-
- buff = memdup_user(io_buff.InputBuffer,
- io_buff.InputLength);
- if (IS_ERR(buff))
- return PTR_ERR(buff);
-
- down(&ad->LowPowerModeSync);
- status = wait_event_interruptible_timeout(
- ad->lowpower_mode_wait_queue,
- !ad->bPreparingForLowPowerMode,
- (1 * HZ));
-
- if (status == -ERESTARTSYS)
- goto cntrlEnd;
-
- if (ad->bPreparingForLowPowerMode) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Preparing Idle Mode is still True - Hence Rejecting control message\n");
- status = STATUS_FAILURE;
- goto cntrlEnd;
- }
- status = CopyBufferToControlPacket(ad, (PVOID)buff);
-
-cntrlEnd:
- up(&ad->LowPowerModeSync);
- kfree(buff);
- return status;
-}
-
-static int bcm_char_ioctl_buffer_download_start(
- struct bcm_mini_adapter *ad)
-{
- INT status;
-
- if (down_trylock(&ad->NVMRdmWrmLock)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Starting the firmware download PID =0x%x!!!!\n",
- current->pid);
-
- if (down_trylock(&ad->fw_download_sema))
- return -EBUSY;
-
- ad->bBinDownloaded = false;
- ad->fw_download_process_pid = current->pid;
- ad->bCfgDownloaded = false;
- ad->fw_download_done = false;
- netif_carrier_off(ad->dev);
- netif_stop_queue(ad->dev);
- status = reset_card_proc(ad);
- if (status) {
- pr_err(PFX "%s: reset_card_proc Failed!\n", ad->dev->name);
- up(&ad->fw_download_sema);
- up(&ad->NVMRdmWrmLock);
- return status;
- }
- mdelay(10);
-
- up(&ad->NVMRdmWrmLock);
- return status;
-}
-
-static int bcm_char_ioctl_buffer_download(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_firmware_info *fw_info = NULL;
- struct bcm_ioctl_buffer io_buff;
- INT status;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Starting the firmware download PID =0x%x!!!!\n", current->pid);
-
- if (!down_trylock(&ad->fw_download_sema)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Invalid way to download buffer. Use Start and then call this!!!\n");
- up(&ad->fw_download_sema);
- return -EINVAL;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) {
- up(&ad->fw_download_sema);
- return -EFAULT;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Length for FW DLD is : %lx\n", io_buff.InputLength);
-
- if (io_buff.InputLength > sizeof(struct bcm_firmware_info)) {
- up(&ad->fw_download_sema);
- return -EINVAL;
- }
-
- fw_info = kmalloc(sizeof(*fw_info), GFP_KERNEL);
- if (!fw_info) {
- up(&ad->fw_download_sema);
- return -ENOMEM;
- }
-
- if (copy_from_user(fw_info, io_buff.InputBuffer,
- io_buff.InputLength)) {
- up(&ad->fw_download_sema);
- kfree(fw_info);
- return -EFAULT;
- }
-
- if (!fw_info->pvMappedFirmwareAddress ||
- (fw_info->u32FirmwareLength == 0)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Something else is wrong %lu\n",
- fw_info->u32FirmwareLength);
- up(&ad->fw_download_sema);
- kfree(fw_info);
- status = -EINVAL;
- return status;
- }
-
- status = bcm_ioctl_fw_download(ad, fw_info);
-
- if (status != STATUS_SUCCESS) {
- if (fw_info->u32StartingAddress == CONFIG_BEGIN_ADDR)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "IOCTL: Configuration File Upload Failed\n");
- else
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "IOCTL: Firmware File Upload Failed\n");
-
- /* up(&ad->fw_download_sema); */
-
- if (ad->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ad->DriverState = DRIVER_INIT;
- ad->LEDInfo.bLedInitDone = false;
- wake_up(&ad->LEDInfo.notify_led_event);
- }
- }
-
- if (status != STATUS_SUCCESS)
- up(&ad->fw_download_sema);
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL: Firmware File Uploaded\n");
- kfree(fw_info);
- return status;
-}
-
-static int bcm_char_ioctl_buffer_download_stop(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- INT status;
- int timeout = 0;
-
- if (!down_trylock(&ad->fw_download_sema)) {
- up(&ad->fw_download_sema);
- return -EINVAL;
- }
-
- if (down_trylock(&ad->NVMRdmWrmLock)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "FW download blocked as EEPROM Read/Write is in progress\n");
- up(&ad->fw_download_sema);
- return -EACCES;
- }
-
- ad->bBinDownloaded = TRUE;
- ad->bCfgDownloaded = TRUE;
- atomic_set(&ad->CurrNumFreeTxDesc, 0);
- ad->CurrNumRecvDescs = 0;
- ad->downloadDDR = 0;
-
- /* setting the Mips to Run */
- status = run_card_proc(ad);
-
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Firm Download Failed\n");
- up(&ad->fw_download_sema);
- up(&ad->NVMRdmWrmLock);
- return status;
- }
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "Firm Download Over...\n");
-
- mdelay(10);
-
- /* Wait for MailBox Interrupt */
- if (StartInterruptUrb((struct bcm_interface_adapter *)ad->pvInterfaceAdapter))
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Unable to send interrupt...\n");
-
- timeout = 5*HZ;
- ad->waiting_to_fw_download_done = false;
- wait_event_timeout(ad->ioctl_fw_dnld_wait_queue,
- ad->waiting_to_fw_download_done, timeout);
- ad->fw_download_process_pid = INVALID_PID;
- ad->fw_download_done = TRUE;
- atomic_set(&ad->CurrNumFreeTxDesc, 0);
- ad->CurrNumRecvDescs = 0;
- ad->PrevNumRecvDescs = 0;
- atomic_set(&ad->cntrlpktCnt, 0);
- ad->LinkUpStatus = 0;
- ad->LinkStatus = 0;
-
- if (ad->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ad->DriverState = FW_DOWNLOAD_DONE;
- wake_up(&ad->LEDInfo.notify_led_event);
- }
-
- if (!timeout)
- status = -ENODEV;
-
- up(&ad->fw_download_sema);
- up(&ad->NVMRdmWrmLock);
- return status;
-}
-
-static int bcm_char_ioctl_chip_reset(struct bcm_mini_adapter *ad)
-{
- INT status;
- INT nvm_access;
-
- nvm_access = down_trylock(&ad->NVMRdmWrmLock);
- if (nvm_access) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
- return -EACCES;
- }
-
- down(&ad->RxAppControlQueuelock);
- status = reset_card_proc(ad);
- flushAllAppQ();
- up(&ad->RxAppControlQueuelock);
- up(&ad->NVMRdmWrmLock);
- ResetCounters(ad);
- return status;
-}
-
-static int bcm_char_ioctl_qos_threshold(ULONG arg,
- struct bcm_mini_adapter *ad)
-{
- USHORT i;
-
- for (i = 0; i < NO_OF_QUEUES; i++) {
- if (get_user(ad->PackInfo[i].uiThreshold,
- (unsigned long __user *)arg)) {
- return -EFAULT;
- }
- }
- return 0;
-}
-
-static int bcm_char_ioctl_switch_transfer_mode(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- UINT data = 0;
-
- if (copy_from_user(&data, argp, sizeof(UINT)))
- return -EFAULT;
-
- if (data) {
- /* Allow All Packets */
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
- ad->TransferMode = ETH_PACKET_TUNNELING_MODE;
- } else {
- /* Allow IP only Packets */
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n");
- ad->TransferMode = IP_PACKET_ONLY_MODE;
- }
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_get_driver_version(void __user *argp)
-{
- struct bcm_ioctl_buffer io_buff;
- ulong len;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- len = min_t(ulong, io_buff.OutputLength, strlen(DRV_VERSION) + 1);
-
- if (copy_to_user(io_buff.OutputBuffer, DRV_VERSION, len))
- return -EFAULT;
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_get_current_status(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_link_state link_state;
- struct bcm_ioctl_buffer io_buff;
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "copy_from_user failed..\n");
- return -EFAULT;
- }
-
- if (io_buff.OutputLength != sizeof(link_state))
- return -EINVAL;
-
- memset(&link_state, 0, sizeof(link_state));
- link_state.bIdleMode = ad->IdleMode;
- link_state.bShutdownMode = ad->bShutStatus;
- link_state.ucLinkStatus = ad->LinkStatus;
-
- if (copy_to_user(io_buff.OutputBuffer, &link_state, min_t(size_t,
- sizeof(link_state), io_buff.OutputLength))) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy_to_user Failed..\n");
- return -EFAULT;
- }
- return STATUS_SUCCESS;
-}
-
-
-static int bcm_char_ioctl_set_mac_tracing(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- UINT tracing_flag;
-
- /* copy ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (copy_from_user(&tracing_flag, io_buff.InputBuffer, sizeof(UINT)))
- return -EFAULT;
-
- if (tracing_flag)
- ad->pTarangs->MacTracingEnabled = TRUE;
- else
- ad->pTarangs->MacTracingEnabled = false;
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_get_dsx_indication(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- ULONG sf_id = 0;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength < sizeof(struct bcm_add_indication_alt)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Mismatch req: %lx needed is =0x%zx!!!",
- io_buff.OutputLength,
- sizeof(struct bcm_add_indication_alt));
- return -EINVAL;
- }
-
- if (copy_from_user(&sf_id, io_buff.InputBuffer, sizeof(sf_id)))
- return -EFAULT;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Get DSX Data SF ID is =%lx\n", sf_id);
- get_dsx_sf_data_to_application(ad, sf_id, io_buff.OutputBuffer);
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_get_host_mibs(void __user *argp,
- struct bcm_mini_adapter *ad,
- struct bcm_tarang_data *tarang)
-{
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_FAILURE;
- PVOID temp_buff;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength != sizeof(struct bcm_host_stats_mibs)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Length Check failed %lu %zd\n", io_buff.OutputLength,
- sizeof(struct bcm_host_stats_mibs));
- return -EINVAL;
- }
-
- /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
- temp_buff = kzalloc(sizeof(struct bcm_host_stats_mibs), GFP_KERNEL);
- if (!temp_buff)
- return STATUS_FAILURE;
-
- status = ProcessGetHostMibs(ad, temp_buff);
- GetDroppedAppCntrlPktMibs(temp_buff, tarang);
-
- if (status != STATUS_FAILURE) {
- if (copy_to_user(io_buff.OutputBuffer, temp_buff,
- sizeof(struct bcm_host_stats_mibs))) {
- kfree(temp_buff);
- return -EFAULT;
- }
- }
-
- kfree(temp_buff);
- return status;
-}
-
-static int bcm_char_ioctl_bulk_wrm(void __user *argp,
- struct bcm_mini_adapter *ad, UINT cmd)
-{
- struct bcm_bulk_wrm_buffer *bulk_buff;
- struct bcm_ioctl_buffer io_buff;
- UINT tmp = 0;
- INT status = STATUS_FAILURE;
- PCHAR buff = NULL;
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "Device in Idle/Shutdown Mode, Blocking Wrms\n");
- return -EACCES;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.InputLength < sizeof(ULONG) * 2)
- return -EINVAL;
-
- buff = memdup_user(io_buff.InputBuffer,
- io_buff.InputLength);
- if (IS_ERR(buff))
- return PTR_ERR(buff);
-
- bulk_buff = (struct bcm_bulk_wrm_buffer *)buff;
-
- if (((ULONG)bulk_buff->Register & 0x0F000000) != 0x0F000000 ||
- ((ULONG)bulk_buff->Register & 0x3)) {
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "WRM Done On invalid Address : %x Access Denied.\n",
- (int)bulk_buff->Register);
- kfree(buff);
- return -EINVAL;
- }
-
- tmp = bulk_buff->Register & EEPROM_REJECT_MASK;
- if (!((ad->pstargetparams->m_u32Customize)&VSG_MODE) &&
- ((tmp == EEPROM_REJECT_REG_1) ||
- (tmp == EEPROM_REJECT_REG_2) ||
- (tmp == EEPROM_REJECT_REG_3) ||
- (tmp == EEPROM_REJECT_REG_4)) &&
- (cmd == IOCTL_BCM_REGISTER_WRITE)) {
-
- kfree(buff);
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "EEPROM Access Denied, not in VSG Mode\n");
- return -EFAULT;
- }
-
- if (bulk_buff->SwapEndian == false)
- status = wrmWithLock(ad, (UINT)bulk_buff->Register,
- (PCHAR)bulk_buff->Values,
- io_buff.InputLength - 2*sizeof(ULONG));
- else
- status = wrmaltWithLock(ad, (UINT)bulk_buff->Register,
- (PUINT)bulk_buff->Values,
- io_buff.InputLength - 2*sizeof(ULONG));
-
- if (status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
-
- kfree(buff);
- return status;
-}
-
-static int bcm_char_ioctl_get_nvm_size(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (ad->eNVMType == NVM_EEPROM || ad->eNVMType == NVM_FLASH) {
- if (copy_to_user(io_buff.OutputBuffer, &ad->uiNVMDSDSize,
- sizeof(UINT)))
- return -EFAULT;
- }
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_cal_init(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- UINT sector_size = 0;
- INT status = STATUS_FAILURE;
-
- if (ad->eNVMType == NVM_FLASH) {
- if (copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (copy_from_user(&sector_size, io_buff.InputBuffer,
- sizeof(UINT)))
- return -EFAULT;
-
- if ((sector_size < MIN_SECTOR_SIZE) ||
- (sector_size > MAX_SECTOR_SIZE)) {
- if (copy_to_user(io_buff.OutputBuffer,
- &ad->uiSectorSize, sizeof(UINT)))
- return -EFAULT;
- } else {
- if (IsFlash2x(ad)) {
- if (copy_to_user(io_buff.OutputBuffer,
- &ad->uiSectorSize, sizeof(UINT)))
- return -EFAULT;
- } else {
- if ((TRUE == ad->bShutStatus) ||
- (TRUE == ad->IdleMode)) {
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_PRINTK, 0, 0,
- "Device is in Idle/Shutdown Mode\n");
- return -EACCES;
- }
-
- ad->uiSectorSize = sector_size;
- BcmUpdateSectorSize(ad,
- ad->uiSectorSize);
- }
- }
- status = STATUS_SUCCESS;
- } else {
- status = STATUS_FAILURE;
- }
- return status;
-}
-
-static int bcm_char_ioctl_set_debug(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
-#ifdef DEBUG
- struct bcm_ioctl_buffer io_buff;
- struct bcm_user_debug_state user_debug_state;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "In SET_DEBUG ioctl\n");
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (copy_from_user(&user_debug_state, io_buff.InputBuffer,
- sizeof(struct bcm_user_debug_state)))
- return -EFAULT;
-
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
- user_debug_state.OnOff, user_debug_state.Type);
- /* user_debug_state.Subtype <<= 1; */
- user_debug_state.Subtype = 1 << user_debug_state.Subtype;
- BCM_DEBUG_PRINT (ad, DBG_TYPE_PRINTK, 0, 0,
- "actual Subtype=0x%x\n", user_debug_state.Subtype);
-
- /* Update new 'DebugState' in the ad */
- ad->stDebugState.type |= user_debug_state.Type;
- /* Subtype: A bitmap of 32 bits for Subtype per Type.
- * Valid indexes in 'subtype' array: 1,2,4,8
- * corresponding to valid Type values. Hence we can use the 'Type' field
- * as the index value, ignoring the array entries 0,3,5,6,7 !
- */
- if (user_debug_state.OnOff)
- ad->stDebugState.subtype[user_debug_state.Type] |=
- user_debug_state.Subtype;
- else
- ad->stDebugState.subtype[user_debug_state.Type] &=
- ~user_debug_state.Subtype;
-
- BCM_SHOW_DEBUG_BITMAP(ad);
-#endif
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_nvm_rw(void __user *argp,
- struct bcm_mini_adapter *ad, UINT cmd)
-{
- struct bcm_nvm_readwrite nvm_rw;
- struct timeval tv0, tv1;
- struct bcm_ioctl_buffer io_buff;
- PUCHAR read_data = NULL;
- INT status = STATUS_FAILURE;
-
- memset(&tv0, 0, sizeof(struct timeval));
- memset(&tv1, 0, sizeof(struct timeval));
- if ((ad->eNVMType == NVM_FLASH) &&
- (ad->uiFlashLayoutMajorVersion == 0)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
- return -EFAULT;
- }
-
- if (IsFlash2x(ad)) {
- if ((ad->eActiveDSD != DSD0) &&
- (ad->eActiveDSD != DSD1) &&
- (ad->eActiveDSD != DSD2)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "No DSD is active..hence NVM Command is blocked");
- return STATUS_FAILURE;
- }
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (copy_from_user(&nvm_rw,
- (IOCTL_BCM_NVM_READ == cmd) ?
- io_buff.OutputBuffer : io_buff.InputBuffer,
- sizeof(struct bcm_nvm_readwrite)))
- return -EFAULT;
-
- /*
- * Deny the access if the offset crosses the cal area limit.
- */
- if (nvm_rw.uiNumBytes > ad->uiNVMDSDSize)
- return STATUS_FAILURE;
-
- if (nvm_rw.uiOffset >
- ad->uiNVMDSDSize - nvm_rw.uiNumBytes)
- return STATUS_FAILURE;
-
- read_data = memdup_user(nvm_rw.pBuffer,
- nvm_rw.uiNumBytes);
- if (IS_ERR(read_data))
- return PTR_ERR(read_data);
-
- do_gettimeofday(&tv0);
- if (IOCTL_BCM_NVM_READ == cmd) {
- int ret = bcm_handle_nvm_read_cmd(ad, read_data,
- &nvm_rw);
- if (ret != STATUS_SUCCESS)
- return ret;
- } else {
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(read_data);
- return -EACCES;
- }
-
- ad->bHeaderChangeAllowed = TRUE;
- if (IsFlash2x(ad)) {
- int ret = handle_flash2x_adapter(ad,
- read_data,
- &nvm_rw);
- if (ret != STATUS_SUCCESS)
- return ret;
- }
-
- status = BeceemNVMWrite(ad, (PUINT)read_data,
- nvm_rw.uiOffset, nvm_rw.uiNumBytes,
- nvm_rw.bVerify);
- if (IsFlash2x(ad))
- BcmFlash2xWriteSig(ad, ad->eActiveDSD);
-
- ad->bHeaderChangeAllowed = false;
-
- up(&ad->NVMRdmWrmLock);
-
- if (status != STATUS_SUCCESS) {
- kfree(read_data);
- return status;
- }
- }
-
- do_gettimeofday(&tv1);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- " timetaken by Write/read :%ld msec\n",
- (tv1.tv_sec - tv0.tv_sec)*1000 +
- (tv1.tv_usec - tv0.tv_usec)/1000);
-
- kfree(read_data);
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_flash2x_section_read(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_flash2x_readwrite flash_2x_read = {0};
- struct bcm_ioctl_buffer io_buff;
- PUCHAR read_buff = NULL;
- UINT nob = 0;
- UINT buff_size = 0;
- UINT read_bytes = 0;
- UINT read_offset = 0;
- INT status = STATUS_FAILURE;
- void __user *OutPutBuff;
-
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- /* Reading FLASH 2.x READ structure */
- if (copy_from_user(&flash_2x_read, io_buff.InputBuffer,
- sizeof(struct bcm_flash2x_readwrite)))
- return -EFAULT;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nflash_2x_read.Section :%x",
- flash_2x_read.Section);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nflash_2x_read.offset :%x",
- flash_2x_read.offset);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nflash_2x_read.numOfBytes :%x",
- flash_2x_read.numOfBytes);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nflash_2x_read.bVerify :%x\n",
- flash_2x_read.bVerify);
-
- /* This was internal to driver for raw read.
- * now it has ben exposed to user space app.
- */
- if (validateFlash2xReadWrite(ad, &flash_2x_read) == false)
- return STATUS_FAILURE;
-
- nob = flash_2x_read.numOfBytes;
- if (nob > ad->uiSectorSize)
- buff_size = ad->uiSectorSize;
- else
- buff_size = nob;
-
- read_offset = flash_2x_read.offset;
- OutPutBuff = io_buff.OutputBuffer;
- read_buff = kzalloc(buff_size , GFP_KERNEL);
-
- if (read_buff == NULL) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Memory allocation failed for Flash 2.x Read Structure");
- return -ENOMEM;
- }
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return -EACCES;
- }
-
- while (nob) {
- if (nob > ad->uiSectorSize)
- read_bytes = ad->uiSectorSize;
- else
- read_bytes = nob;
-
- /* Reading the data from Flash 2.x */
- status = BcmFlash2xBulkRead(ad, (PUINT)read_buff,
- flash_2x_read.Section, read_offset, read_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Flash 2x read err with status :%d",
- status);
- break;
- }
-
- BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, read_buff, read_bytes);
-
- status = copy_to_user(OutPutBuff, read_buff, read_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Copy to use failed with status :%d", status);
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return -EFAULT;
- }
- nob = nob - read_bytes;
- if (nob) {
- read_offset = read_offset + read_bytes;
- OutPutBuff = OutPutBuff + read_bytes;
- }
- }
-
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return status;
-}
-
-static int bcm_char_ioctl_flash2x_section_write(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_flash2x_readwrite sFlash2xWrite = {0};
- struct bcm_ioctl_buffer io_buff;
- PUCHAR write_buff;
- void __user *input_addr;
- UINT nob = 0;
- UINT buff_size = 0;
- UINT write_off = 0;
- UINT write_bytes = 0;
- INT status = STATUS_FAILURE;
-
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- /* First make this False so that we can enable the Sector
- * Permission Check in BeceemFlashBulkWrite
- */
- ad->bAllDSDWriteAllow = false;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- /* Reading FLASH 2.x READ structure */
- if (copy_from_user(&sFlash2xWrite, io_buff.InputBuffer,
- sizeof(struct bcm_flash2x_readwrite)))
- return -EFAULT;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nsFlash2xWrite.Section :%x", sFlash2xWrite.Section);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nsFlash2xWrite.offset :%d", sFlash2xWrite.offset);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nsFlash2xWrite.numOfBytes :%x", sFlash2xWrite.numOfBytes);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\nsFlash2xWrite.bVerify :%x\n", sFlash2xWrite.bVerify);
-
- if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1)
- && (sFlash2xWrite.Section != VSA2)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Only VSA write is allowed");
- return -EINVAL;
- }
-
- if (validateFlash2xReadWrite(ad, &sFlash2xWrite) == false)
- return STATUS_FAILURE;
-
- input_addr = sFlash2xWrite.pDataBuff;
- write_off = sFlash2xWrite.offset;
- nob = sFlash2xWrite.numOfBytes;
-
- if (nob > ad->uiSectorSize)
- buff_size = ad->uiSectorSize;
- else
- buff_size = nob;
-
- write_buff = kmalloc(buff_size, GFP_KERNEL);
-
- if (write_buff == NULL)
- return -ENOMEM;
-
- /* extracting the remainder of the given offset. */
- write_bytes = ad->uiSectorSize;
- if (write_off % ad->uiSectorSize) {
- write_bytes = ad->uiSectorSize -
- (write_off % ad->uiSectorSize);
- }
-
- if (nob < write_bytes)
- write_bytes = nob;
-
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(write_buff);
- return -EACCES;
- }
-
- BcmFlash2xCorruptSig(ad, sFlash2xWrite.Section);
- do {
- status = copy_from_user(write_buff, input_addr, write_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy to user failed with status :%d", status);
- up(&ad->NVMRdmWrmLock);
- kfree(write_buff);
- return -EFAULT;
- }
- BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS,
- OSAL_DBG, DBG_LVL_ALL, write_buff, write_bytes);
-
- /* Writing the data from Flash 2.x */
- status = BcmFlash2xBulkWrite(ad, (PUINT)write_buff,
- sFlash2xWrite.Section,
- write_off,
- write_bytes,
- sFlash2xWrite.bVerify);
-
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash 2x read err with status :%d", status);
- break;
- }
-
- nob = nob - write_bytes;
- if (nob) {
- write_off = write_off + write_bytes;
- input_addr = input_addr + write_bytes;
- if (nob > ad->uiSectorSize)
- write_bytes = ad->uiSectorSize;
- else
- write_bytes = nob;
- }
- } while (nob > 0);
-
- BcmFlash2xWriteSig(ad, sFlash2xWrite.Section);
- up(&ad->NVMRdmWrmLock);
- kfree(write_buff);
- return status;
-}
-
-static int bcm_char_ioctl_flash2x_section_bitmap(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_flash2x_bitmap *flash_2x_bit_map;
- struct bcm_ioctl_buffer io_buff;
-
-BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength != sizeof(struct bcm_flash2x_bitmap))
- return -EINVAL;
-
- flash_2x_bit_map = kzalloc(sizeof(struct bcm_flash2x_bitmap),
- GFP_KERNEL);
-
- if (flash_2x_bit_map == NULL) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Memory is not available");
- return -ENOMEM;
- }
-
- /* Reading the Flash Sectio Bit map */
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- kfree(flash_2x_bit_map);
- return -EACCES;
- }
-
- BcmGetFlash2xSectionalBitMap(ad, flash_2x_bit_map);
- up(&ad->NVMRdmWrmLock);
- if (copy_to_user(io_buff.OutputBuffer, flash_2x_bit_map,
- sizeof(struct bcm_flash2x_bitmap))) {
- kfree(flash_2x_bit_map);
- return -EFAULT;
- }
-
- kfree(flash_2x_bit_map);
- return STATUS_FAILURE;
-}
-
-static int bcm_char_ioctl_set_active_section(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- enum bcm_flash2x_section_val flash_2x_section_val = 0;
- INT status = STATUS_FAILURE;
- struct bcm_ioctl_buffer io_buff;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_SET_ACTIVE_SECTION Called");
-
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
-
- status = copy_from_user(&flash_2x_section_val,
- io_buff.InputBuffer, sizeof(INT));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of flash section val failed");
- return -EFAULT;
- }
-
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- return -EACCES;
- }
-
- status = BcmSetActiveSection(ad, flash_2x_section_val);
- if (status)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Failed to make it's priority Highest. status %d",
- status);
-
- up(&ad->NVMRdmWrmLock);
-
- return status;
-}
-
-static int bcm_char_ioctl_copy_section(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_flash2x_copy_section copy_sect_strut = {0};
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_SUCCESS;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_COPY_SECTION Called");
-
- ad->bAllDSDWriteAllow = false;
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of IOCTL BUFFER failed status :%d",
- status);
- return -EFAULT;
- }
-
- status = copy_from_user(&copy_sect_strut, io_buff.InputBuffer,
- sizeof(struct bcm_flash2x_copy_section));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of Copy_Section_Struct failed with status :%d",
- status);
- return -EFAULT;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Source SEction :%x", copy_sect_strut.SrcSection);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Destination SEction :%x", copy_sect_strut.DstSection);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "offset :%x", copy_sect_strut.offset);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "nob :%x", copy_sect_strut.numOfBytes);
-
- if (IsSectionExistInFlash(ad, copy_sect_strut.SrcSection) == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Source Section<%x> does not exist in Flash ",
- copy_sect_strut.SrcSection);
- return -EINVAL;
- }
-
- if (IsSectionExistInFlash(ad, copy_sect_strut.DstSection) == false) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Destinatio Section<%x> does not exist in Flash ",
- copy_sect_strut.DstSection);
- return -EINVAL;
- }
-
- if (copy_sect_strut.SrcSection == copy_sect_strut.DstSection) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Source and Destination section should be different");
- return -EINVAL;
- }
-
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- up(&ad->NVMRdmWrmLock);
- return -EACCES;
- }
-
- if (copy_sect_strut.SrcSection == ISO_IMAGE1 ||
- copy_sect_strut.SrcSection == ISO_IMAGE2) {
- if (IsNonCDLessDevice(ad)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Device is Non-CDLess hence won't have ISO !!");
- status = -EINVAL;
- } else if (copy_sect_strut.numOfBytes == 0) {
- status = BcmCopyISO(ad, copy_sect_strut);
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Partial Copy of ISO section is not Allowed..");
- status = STATUS_FAILURE;
- }
- up(&ad->NVMRdmWrmLock);
- return status;
- }
-
- status = BcmCopySection(ad, copy_sect_strut.SrcSection,
- copy_sect_strut.DstSection,
- copy_sect_strut.offset,
- copy_sect_strut.numOfBytes);
- up(&ad->NVMRdmWrmLock);
- return status;
-}
-
-static int bcm_char_ioctl_get_flash_cs_info(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_SUCCESS;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- " IOCTL_BCM_GET_FLASH_CS_INFO Called");
-
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
-
- if (ad->eNVMType != NVM_FLASH) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Connected device does not have flash");
- return -EINVAL;
- }
-
- if (IsFlash2x(ad) == TRUE) {
- if (io_buff.OutputLength < sizeof(struct bcm_flash2x_cs_info))
- return -EINVAL;
-
- if (copy_to_user(io_buff.OutputBuffer,
- ad->psFlash2xCSInfo,
- sizeof(struct bcm_flash2x_cs_info)))
- return -EFAULT;
- } else {
- if (io_buff.OutputLength < sizeof(struct bcm_flash_cs_info))
- return -EINVAL;
-
- if (copy_to_user(io_buff.OutputBuffer, ad->psFlashCSInfo,
- sizeof(struct bcm_flash_cs_info)))
- return -EFAULT;
- }
- return status;
-}
-
-static int bcm_char_ioctl_select_dsd(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_FAILURE;
- UINT sect_offset = 0;
- enum bcm_flash2x_section_val flash_2x_section_val;
-
- flash_2x_section_val = NO_SECTION_VAL;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_SELECT_DSD Called");
-
- if (IsFlash2x(ad) != TRUE) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash Does not have 2.x map");
- return -EINVAL;
- }
-
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
- status = copy_from_user(&flash_2x_section_val, io_buff.InputBuffer,
- sizeof(INT));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy of flash section val failed");
- return -EFAULT;
- }
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Read Section :%d", flash_2x_section_val);
- if ((flash_2x_section_val != DSD0) &&
- (flash_2x_section_val != DSD1) &&
- (flash_2x_section_val != DSD2)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Passed section<%x> is not DSD section",
- flash_2x_section_val);
- return STATUS_FAILURE;
- }
-
- sect_offset = BcmGetSectionValStartOffset(ad, flash_2x_section_val);
- if (sect_offset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Provided Section val <%d> does not exist in Flash 2.x",
- flash_2x_section_val);
- return -EINVAL;
- }
-
- ad->bAllDSDWriteAllow = TRUE;
- ad->ulFlashCalStart = sect_offset;
- ad->eActiveDSD = flash_2x_section_val;
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_nvm_raw_read(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_nvm_readwrite nvm_read;
- struct bcm_ioctl_buffer io_buff;
- unsigned int nob;
- INT buff_size;
- INT read_offset = 0;
- UINT read_bytes = 0;
- PUCHAR read_buff;
- void __user *OutPutBuff;
- INT status = STATUS_FAILURE;
-
- if (ad->eNVMType != NVM_FLASH) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "NVM TYPE is not Flash");
- return -EINVAL;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer))) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "copy_from_user 1 failed\n");
- return -EFAULT;
- }
-
- if (copy_from_user(&nvm_read, io_buff.OutputBuffer,
- sizeof(struct bcm_nvm_readwrite)))
- return -EFAULT;
-
- nob = nvm_read.uiNumBytes;
- /* In Raw-Read max Buff size : 64MB */
-
- if (nob > DEFAULT_BUFF_SIZE)
- buff_size = DEFAULT_BUFF_SIZE;
- else
- buff_size = nob;
-
- read_offset = nvm_read.uiOffset;
- OutPutBuff = nvm_read.pBuffer;
-
- read_buff = kzalloc(buff_size , GFP_KERNEL);
- if (read_buff == NULL) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Memory allocation failed for Flash 2.x Read Structure");
- return -ENOMEM;
- }
- down(&ad->NVMRdmWrmLock);
-
- if ((ad->IdleMode == TRUE) ||
- (ad->bShutStatus == TRUE) ||
- (ad->bPreparingForLowPowerMode == TRUE)) {
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Device is in Idle/Shutdown Mode\n");
- kfree(read_buff);
- up(&ad->NVMRdmWrmLock);
- return -EACCES;
- }
-
- ad->bFlashRawRead = TRUE;
-
- while (nob) {
- if (nob > DEFAULT_BUFF_SIZE)
- read_bytes = DEFAULT_BUFF_SIZE;
- else
- read_bytes = nob;
-
- /* Reading the data from Flash 2.x */
- status = BeceemNVMRead(ad, (PUINT)read_buff,
- read_offset, read_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Flash 2x read err with status :%d",
- status);
- break;
- }
-
- BCM_DEBUG_PRINT_BUFFER(ad, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, read_buff, read_bytes);
-
- status = copy_to_user(OutPutBuff, read_buff, read_bytes);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_PRINTK, 0, 0,
- "Copy to use failed with status :%d",
- status);
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return -EFAULT;
- }
- nob = nob - read_bytes;
- if (nob) {
- read_offset = read_offset + read_bytes;
- OutPutBuff = OutPutBuff + read_bytes;
- }
- }
- ad->bFlashRawRead = false;
- up(&ad->NVMRdmWrmLock);
- kfree(read_buff);
- return status;
-}
-
-static int bcm_char_ioctl_cntrlmsg_mask(void __user *argp,
- struct bcm_mini_adapter *ad,
- struct bcm_tarang_data *tarang)
-{
- struct bcm_ioctl_buffer io_buff;
- INT status = STATUS_FAILURE;
- ULONG rx_cntrl_msg_bit_mask = 0;
-
- /* Copy Ioctl Buffer structure */
- status = copy_from_user(&io_buff, argp,
- sizeof(struct bcm_ioctl_buffer));
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "copy of Ioctl buffer is failed from user space");
- return -EFAULT;
- }
-
- if (io_buff.InputLength != sizeof(unsigned long))
- return -EINVAL;
-
- status = copy_from_user(&rx_cntrl_msg_bit_mask, io_buff.InputBuffer,
- io_buff.InputLength);
- if (status) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "copy of control bit mask failed from user space");
- return -EFAULT;
- }
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "\n Got user defined cntrl msg bit mask :%lx",
- rx_cntrl_msg_bit_mask);
- tarang->RxCntrlMsgBitMask = rx_cntrl_msg_bit_mask;
-
- return status;
-}
-
-static int bcm_char_ioctl_get_device_driver_info(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_driver_info dev_info;
- struct bcm_ioctl_buffer io_buff;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
-
- memset(&dev_info, 0, sizeof(dev_info));
- dev_info.MaxRDMBufferSize = BUFFER_4K;
- dev_info.u32DSDStartOffset = EEPROM_CALPARAM_START;
- dev_info.u32RxAlignmentCorrection = 0;
- dev_info.u32NVMType = ad->eNVMType;
- dev_info.u32InterfaceType = BCM_USB;
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength < sizeof(dev_info))
- return -EINVAL;
-
- if (copy_to_user(io_buff.OutputBuffer, &dev_info, sizeof(dev_info)))
- return -EFAULT;
-
- return STATUS_SUCCESS;
-}
-
-static int bcm_char_ioctl_time_since_net_entry(void __user *argp,
- struct bcm_mini_adapter *ad)
-{
- struct bcm_time_elapsed time_elapsed_since_net_entry = {0};
- struct bcm_ioctl_buffer io_buff;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
-
- if (copy_from_user(&io_buff, argp, sizeof(struct bcm_ioctl_buffer)))
- return -EFAULT;
-
- if (io_buff.OutputLength < sizeof(struct bcm_time_elapsed))
- return -EINVAL;
-
- time_elapsed_since_net_entry.ul64TimeElapsedSinceNetEntry =
- get_seconds() - ad->liTimeSinceLastNetEntry;
-
- if (copy_to_user(io_buff.OutputBuffer, &time_elapsed_since_net_entry,
- sizeof(struct bcm_time_elapsed)))
- return -EFAULT;
-
- return STATUS_SUCCESS;
-}
-
-
-static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
-{
- struct bcm_tarang_data *tarang = filp->private_data;
- void __user *argp = (void __user *)arg;
- struct bcm_mini_adapter *ad = tarang->Adapter;
- INT status = STATUS_FAILURE;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX",
- cmd, arg);
-
- if (_IOC_TYPE(cmd) != BCM_IOCTL)
- return -EFAULT;
- if (_IOC_DIR(cmd) & _IOC_READ)
- status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) & _IOC_WRITE)
- status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
- else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE))
- status = STATUS_SUCCESS;
-
- if (status)
- return -EFAULT;
-
- if (ad->device_removed)
- return -EFAULT;
-
- if (false == ad->fw_download_done) {
- switch (cmd) {
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ:
- case IOCTL_BCM_GPIO_SET_REQUEST:
- case IOCTL_BCM_GPIO_STATUS_REQUEST:
- return -EACCES;
- default:
- break;
- }
- }
-
- status = vendorextnIoctl(ad, cmd, arg);
- if (status != CONTINUE_COMMON_PATH)
- return status;
-
- switch (cmd) {
- /* Rdms for Swin Idle... */
- case IOCTL_BCM_REGISTER_READ_PRIVATE:
- status = bcm_char_ioctl_reg_read_private(argp, ad);
- return status;
-
- case IOCTL_BCM_REGISTER_WRITE_PRIVATE:
- status = bcm_char_ioctl_reg_write_private(argp, ad);
- return status;
-
- case IOCTL_BCM_REGISTER_READ:
- case IOCTL_BCM_EEPROM_REGISTER_READ:
- status = bcm_char_ioctl_eeprom_reg_read(argp, ad);
- return status;
-
- case IOCTL_BCM_REGISTER_WRITE:
- case IOCTL_BCM_EEPROM_REGISTER_WRITE:
- status = bcm_char_ioctl_eeprom_reg_write(argp, ad, cmd);
- return status;
-
- case IOCTL_BCM_GPIO_SET_REQUEST:
- status = bcm_char_ioctl_gpio_set_request(argp, ad);
- return status;
-
- case BCM_LED_THREAD_STATE_CHANGE_REQ:
- status = bcm_char_ioctl_led_thread_state_change_req(argp,
- ad);
- return status;
-
- case IOCTL_BCM_GPIO_STATUS_REQUEST:
- status = bcm_char_ioctl_gpio_status_request(argp, ad);
- return status;
-
- case IOCTL_BCM_GPIO_MULTI_REQUEST:
- status = bcm_char_ioctl_gpio_multi_request(argp, ad);
- return status;
-
- case IOCTL_BCM_GPIO_MODE_REQUEST:
- status = bcm_char_ioctl_gpio_mode_request(argp, ad);
- return status;
-
- case IOCTL_MAC_ADDR_REQ:
- case IOCTL_LINK_REQ:
- case IOCTL_CM_REQUEST:
- case IOCTL_SS_INFO_REQ:
- case IOCTL_SEND_CONTROL_MESSAGE:
- case IOCTL_IDLE_REQ:
- status = bcm_char_ioctl_misc_request(argp, ad);
- return status;
-
- case IOCTL_BCM_BUFFER_DOWNLOAD_START:
- status = bcm_char_ioctl_buffer_download_start(ad);
- return status;
-
- case IOCTL_BCM_BUFFER_DOWNLOAD:
- status = bcm_char_ioctl_buffer_download(argp, ad);
- return status;
-
- case IOCTL_BCM_BUFFER_DOWNLOAD_STOP:
- status = bcm_char_ioctl_buffer_download_stop(argp, ad);
- return status;
-
-
- case IOCTL_BE_BUCKET_SIZE:
- status = 0;
- if (get_user(ad->BEBucketSize,
- (unsigned long __user *)arg))
- status = -EFAULT;
- break;
-
- case IOCTL_RTPS_BUCKET_SIZE:
- status = 0;
- if (get_user(ad->rtPSBucketSize,
- (unsigned long __user *)arg))
- status = -EFAULT;
- break;
-
- case IOCTL_CHIP_RESET:
- status = bcm_char_ioctl_chip_reset(ad);
- return status;
-
- case IOCTL_QOS_THRESHOLD:
- status = bcm_char_ioctl_qos_threshold(arg, ad);
- return status;
-
- case IOCTL_DUMP_PACKET_INFO:
- DumpPackInfo(ad);
- DumpPhsRules(&ad->stBCMPhsContext);
- status = STATUS_SUCCESS;
- break;
-
- case IOCTL_GET_PACK_INFO:
- if (copy_to_user(argp, &ad->PackInfo,
- sizeof(struct bcm_packet_info)*NO_OF_QUEUES))
- return -EFAULT;
- status = STATUS_SUCCESS;
- break;
-
- case IOCTL_BCM_SWITCH_TRANSFER_MODE:
- status = bcm_char_ioctl_switch_transfer_mode(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_DRIVER_VERSION:
- status = bcm_char_ioctl_get_driver_version(argp);
- return status;
-
- case IOCTL_BCM_GET_CURRENT_STATUS:
- status = bcm_char_ioctl_get_current_status(argp, ad);
- return status;
-
- case IOCTL_BCM_SET_MAC_TRACING:
- status = bcm_char_ioctl_set_mac_tracing(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_DSX_INDICATION:
- status = bcm_char_ioctl_get_dsx_indication(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_HOST_MIBS:
- status = bcm_char_ioctl_get_host_mibs(argp, ad, tarang);
- return status;
-
- case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE:
- if ((false == ad->bTriedToWakeUpFromlowPowerMode) &&
- (TRUE == ad->IdleMode)) {
- ad->usIdleModePattern = ABORT_IDLE_MODE;
- ad->bWakeUpDevice = TRUE;
- wake_up(&ad->process_rx_cntrlpkt);
- }
-
- status = STATUS_SUCCESS;
- break;
-
- case IOCTL_BCM_BULK_WRM:
- status = bcm_char_ioctl_bulk_wrm(argp, ad, cmd);
- return status;
-
- case IOCTL_BCM_GET_NVM_SIZE:
- status = bcm_char_ioctl_get_nvm_size(argp, ad);
- return status;
-
- case IOCTL_BCM_CAL_INIT:
- status = bcm_char_ioctl_cal_init(argp, ad);
- return status;
-
- case IOCTL_BCM_SET_DEBUG:
- status = bcm_char_ioctl_set_debug(argp, ad);
- return status;
-
- case IOCTL_BCM_NVM_READ:
- case IOCTL_BCM_NVM_WRITE:
- status = bcm_char_ioctl_nvm_rw(argp, ad, cmd);
- return status;
-
- case IOCTL_BCM_FLASH2X_SECTION_READ:
- status = bcm_char_ioctl_flash2x_section_read(argp, ad);
- return status;
-
- case IOCTL_BCM_FLASH2X_SECTION_WRITE:
- status = bcm_char_ioctl_flash2x_section_write(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP:
- status = bcm_char_ioctl_flash2x_section_bitmap(argp, ad);
- return status;
-
- case IOCTL_BCM_SET_ACTIVE_SECTION:
- status = bcm_char_ioctl_set_active_section(argp, ad);
- return status;
-
- case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION:
- /* Right Now we are taking care of only DSD */
- ad->bAllDSDWriteAllow = false;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
- status = STATUS_SUCCESS;
- break;
-
- case IOCTL_BCM_COPY_SECTION:
- status = bcm_char_ioctl_copy_section(argp, ad);
- return status;
-
- case IOCTL_BCM_GET_FLASH_CS_INFO:
- status = bcm_char_ioctl_get_flash_cs_info(argp, ad);
- return status;
-
- case IOCTL_BCM_SELECT_DSD:
- status = bcm_char_ioctl_select_dsd(argp, ad);
- return status;
-
- case IOCTL_BCM_NVM_RAW_READ:
- status = bcm_char_ioctl_nvm_raw_read(argp, ad);
- return status;
-
- case IOCTL_BCM_CNTRLMSG_MASK:
- status = bcm_char_ioctl_cntrlmsg_mask(argp, ad, tarang);
- return status;
-
- case IOCTL_BCM_GET_DEVICE_DRIVER_INFO:
- status = bcm_char_ioctl_get_device_driver_info(argp, ad);
- return status;
-
- case IOCTL_BCM_TIME_SINCE_NET_ENTRY:
- status = bcm_char_ioctl_time_since_net_entry(argp, ad);
- return status;
-
- case IOCTL_CLOSE_NOTIFICATION:
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
- "IOCTL_CLOSE_NOTIFICATION");
- break;
-
- default:
- pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd);
- status = STATUS_FAILURE;
- break;
- }
- return status;
-}
-
-
-static const struct file_operations bcm_fops = {
- .owner = THIS_MODULE,
- .open = bcm_char_open,
- .release = bcm_char_release,
- .read = bcm_char_read,
- .unlocked_ioctl = bcm_char_ioctl,
- .llseek = no_llseek,
-};
-
-int register_control_device_interface(struct bcm_mini_adapter *ad)
-{
-
- if (ad->major > 0)
- return ad->major;
-
- ad->major = register_chrdev(0, DEV_NAME, &bcm_fops);
- if (ad->major < 0) {
- pr_err(DRV_NAME ": could not created character device\n");
- return ad->major;
- }
-
- ad->pstCreatedClassDevice = device_create(bcm_class, NULL,
- MKDEV(ad->major, 0),
- ad, DEV_NAME);
-
- if (IS_ERR(ad->pstCreatedClassDevice)) {
- pr_err(DRV_NAME ": class device create failed\n");
- unregister_chrdev(ad->major, DEV_NAME);
- return PTR_ERR(ad->pstCreatedClassDevice);
- }
-
- return 0;
-}
-
-void unregister_control_device_interface(struct bcm_mini_adapter *ad)
-{
- if (ad->major > 0) {
- device_destroy(bcm_class, MKDEV(ad->major, 0));
- unregister_chrdev(ad->major, DEV_NAME);
- }
-}
-
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
deleted file mode 100644
index e57767684cee..000000000000
--- a/drivers/staging/bcm/Bcmnet.c
+++ /dev/null
@@ -1,240 +0,0 @@
-#include "headers.h"
-
-struct net_device *gblpnetdev;
-
-static INT bcm_open(struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- if (ad->fw_download_done == false) {
- pr_notice(PFX "%s: link up failed (download in progress)\n",
- dev->name);
- return -EBUSY;
- }
-
- if (netif_msg_ifup(ad))
- pr_info(PFX "%s: enabling interface\n", dev->name);
-
- if (ad->LinkUpStatus) {
- if (netif_msg_link(ad))
- pr_info(PFX "%s: link up\n", dev->name);
-
- netif_carrier_on(ad->dev);
- netif_start_queue(ad->dev);
- }
-
- return 0;
-}
-
-static INT bcm_close(struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- if (netif_msg_ifdown(ad))
- pr_info(PFX "%s: disabling interface\n", dev->name);
-
- netif_carrier_off(dev);
- netif_stop_queue(dev);
-
- return 0;
-}
-
-static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- return ClassifyPacket(netdev_priv(dev), skb);
-}
-
-/*******************************************************************
-* Function - bcm_transmit()
-*
-* Description - This is the main transmit function for our virtual
-* interface(eth0). It handles the ARP packets. It
-* clones this packet and then Queue it to a suitable
-* Queue. Then calls the transmit_packet().
-*
-* Parameter - skb - Pointer to the socket buffer structure
-* dev - Pointer to the virtual net device structure
-*
-*********************************************************************/
-
-static netdev_tx_t bcm_transmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
- u16 qindex = skb_get_queue_mapping(skb);
-
-
- if (ad->device_removed || !ad->LinkUpStatus)
- goto drop;
-
- if (ad->TransferMode != IP_PACKET_ONLY_MODE)
- goto drop;
-
- if (INVALID_QUEUE_INDEX == qindex)
- goto drop;
-
- if (ad->PackInfo[qindex].uiCurrentPacketsOnHost >=
- SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
- return NETDEV_TX_BUSY;
-
- /* Now Enqueue the packet */
- if (netif_msg_tx_queued(ad))
- pr_info(PFX "%s: enqueueing packet to queue %d\n",
- dev->name, qindex);
-
- spin_lock(&ad->PackInfo[qindex].SFQueueLock);
- ad->PackInfo[qindex].uiCurrentBytesOnHost += skb->len;
- ad->PackInfo[qindex].uiCurrentPacketsOnHost++;
-
- *((B_UINT32 *) skb->cb + SKB_CB_LATENCY_OFFSET) = jiffies;
- ENQUEUEPACKET(ad->PackInfo[qindex].FirstTxQueue,
- ad->PackInfo[qindex].LastTxQueue, skb);
- atomic_inc(&ad->TotalPacketCount);
- spin_unlock(&ad->PackInfo[qindex].SFQueueLock);
-
- /* FIXME - this is racy and incorrect, replace with work queue */
- if (!atomic_read(&ad->TxPktAvail)) {
- atomic_set(&ad->TxPktAvail, 1);
- wake_up(&ad->tx_packet_wait_queue);
- }
- return NETDEV_TX_OK;
-
- drop:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-
-/**
-@ingroup init_functions
-Register other driver entry points with the kernel
-*/
-static const struct net_device_ops bcmNetDevOps = {
- .ndo_open = bcm_open,
- .ndo_stop = bcm_close,
- .ndo_start_xmit = bcm_transmit,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_select_queue = bcm_select_queue,
-};
-
-static struct device_type wimax_type = {
- .name = "wimax",
-};
-
-static int bcm_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- cmd->supported = 0;
- cmd->advertising = 0;
- cmd->speed = SPEED_10000;
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_TP;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
- return 0;
-}
-
-static void bcm_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
- struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter;
- struct usb_device *udev = interface_to_usbdev(intf_ad->interface);
-
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u",
- ad->uiFlashLayoutMajorVersion,
- ad->uiFlashLayoutMinorVersion);
-
- usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
-}
-
-static u32 bcm_get_link(struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- return ad->LinkUpStatus;
-}
-
-static u32 bcm_get_msglevel(struct net_device *dev)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- return ad->msg_enable;
-}
-
-static void bcm_set_msglevel(struct net_device *dev, u32 level)
-{
- struct bcm_mini_adapter *ad = GET_BCM_ADAPTER(dev);
-
- ad->msg_enable = level;
-}
-
-static const struct ethtool_ops bcm_ethtool_ops = {
- .get_settings = bcm_get_settings,
- .get_drvinfo = bcm_get_drvinfo,
- .get_link = bcm_get_link,
- .get_msglevel = bcm_get_msglevel,
- .set_msglevel = bcm_set_msglevel,
-};
-
-int register_networkdev(struct bcm_mini_adapter *ad)
-{
- struct net_device *net = ad->dev;
- struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter;
- struct usb_interface *udev = intf_ad->interface;
- struct usb_device *xdev = intf_ad->udev;
-
- int result;
-
- net->netdev_ops = &bcmNetDevOps;
- net->ethtool_ops = &bcm_ethtool_ops;
- net->mtu = MTU_SIZE; /* 1400 Bytes */
- net->tx_queue_len = TX_QLEN;
- net->flags |= IFF_NOARP;
-
- netif_carrier_off(net);
-
- SET_NETDEV_DEVTYPE(net, &wimax_type);
-
- /* Read the MAC Address from EEPROM */
- result = ReadMacAddressFromNVM(ad);
- if (result != STATUS_SUCCESS) {
- dev_err(&udev->dev,
- PFX "Error in Reading the mac Address: %d", result);
- return -EIO;
- }
-
- result = register_netdev(net);
- if (result)
- return result;
-
- gblpnetdev = ad->dev;
-
- if (netif_msg_probe(ad))
- dev_info(&udev->dev, PFX "%s: register usb-%s-%s %pM\n",
- net->name, xdev->bus->bus_name, xdev->devpath,
- net->dev_addr);
-
- return 0;
-}
-
-void unregister_networkdev(struct bcm_mini_adapter *ad)
-{
- struct net_device *net = ad->dev;
- struct bcm_interface_adapter *intf_ad = ad->pvInterfaceAdapter;
- struct usb_interface *udev = intf_ad->interface;
- struct usb_device *xdev = intf_ad->udev;
-
- if (netif_msg_probe(ad))
- dev_info(&udev->dev, PFX "%s: unregister usb-%s%s\n",
- net->name, xdev->bus->bus_name, xdev->devpath);
-
- unregister_netdev(ad->dev);
-}
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
deleted file mode 100644
index adca0ce4d05f..000000000000
--- a/drivers/staging/bcm/CmHost.c
+++ /dev/null
@@ -1,2254 +0,0 @@
-/************************************************************
- * CMHOST.C
- * This file contains the routines for handling Connection
- * Management.
- ************************************************************/
-
-#include "headers.h"
-
-enum E_CLASSIFIER_ACTION {
- eInvalidClassifierAction,
- eAddClassifier,
- eReplaceClassifier,
- eDeleteClassifier
-};
-
-static ULONG GetNextTargetBufferLocation(struct bcm_mini_adapter *Adapter,
- B_UINT16 tid);
-static void restore_endianess_of_pstClassifierEntry(
- struct bcm_classifier_rule *pstClassifierEntry,
- enum bcm_ipaddr_context eIpAddrContext);
-
-static void apply_phs_rule_to_all_classifiers(
- register struct bcm_mini_adapter *Adapter,
- register UINT uiSearchRuleIndex,
- USHORT uVCID,
- struct bcm_phs_rule *sPhsRule,
- struct bcm_phs_rules *cPhsRule,
- struct bcm_add_indication_alt *pstAddIndication);
-
-/************************************************************
- * Function - SearchSfid
- *
- * Description - This routinue would search QOS queues having
- * specified SFID as input parameter.
- *
- * Parameters - Adapter: Pointer to the Adapter structure
- * uiSfid : Given SFID for matching
- *
- * Returns - Queue index for this SFID(If matched)
- * Else Invalid Queue Index(If Not matched)
- ************************************************************/
-int SearchSfid(struct bcm_mini_adapter *Adapter, UINT uiSfid)
-{
- int i;
-
- for (i = (NO_OF_QUEUES-1); i >= 0; i--)
- if (Adapter->PackInfo[i].ulSFID == uiSfid)
- return i;
-
- return NO_OF_QUEUES+1;
-}
-
-/***************************************************************
- * Function -SearchFreeSfid
- *
- * Description - This routinue would search Free available SFID.
- *
- * Parameter - Adapter: Pointer to the Adapter structure
- *
- * Returns - Queue index for the free SFID
- * Else returns Invalid Index.
- ****************************************************************/
-static int SearchFreeSfid(struct bcm_mini_adapter *Adapter)
-{
- int i;
-
- for (i = 0; i < (NO_OF_QUEUES-1); i++)
- if (Adapter->PackInfo[i].ulSFID == 0)
- return i;
-
- return NO_OF_QUEUES+1;
-}
-
-/*
- * Function: SearchClsid
- * Description: This routinue would search Classifier having specified ClassifierID as input parameter
- * Input parameters: struct bcm_mini_adapter *Adapter - Adapter Context
- * unsigned int uiSfid - The SF in which the classifier is to searched
- * B_UINT16 uiClassifierID - The classifier ID to be searched
- * Return: int :Classifier table index of matching entry
- */
-static int SearchClsid(struct bcm_mini_adapter *Adapter,
- ULONG ulSFID,
- B_UINT16 uiClassifierID)
-{
- int i;
-
- for (i = 0; i < MAX_CLASSIFIERS; i++) {
- if ((Adapter->astClassifierTable[i].bUsed) &&
- (Adapter->astClassifierTable[i].uiClassifierRuleIndex
- == uiClassifierID) &&
- (Adapter->astClassifierTable[i].ulSFID == ulSFID))
- return i;
- }
-
- return MAX_CLASSIFIERS+1;
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- * This routinue would search Free available Classifier entry in classifier table.
- * @return free Classifier Entry index in classifier table for specified SF
- */
-static int SearchFreeClsid(struct bcm_mini_adapter *Adapter /**Adapter Context*/)
-{
- int i;
-
- for (i = 0; i < MAX_CLASSIFIERS; i++) {
- if (!Adapter->astClassifierTable[i].bUsed)
- return i;
- }
-
- return MAX_CLASSIFIERS+1;
-}
-
-static VOID deleteSFBySfid(struct bcm_mini_adapter *Adapter,
- UINT uiSearchRuleIndex)
-{
- /* deleting all the packet held in the SF */
- flush_queue(Adapter, uiSearchRuleIndex);
-
- /* Deleting the all classifiers for this SF */
- DeleteAllClassifiersForSF(Adapter, uiSearchRuleIndex);
-
- /* Resetting only MIBS related entries in the SF */
- memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0,
- sizeof(struct bcm_mibs_table));
-}
-
-static inline VOID
-CopyIpAddrToClassifier(struct bcm_classifier_rule *pstClassifierEntry,
- B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
- bool bIpVersion6, enum bcm_ipaddr_context eIpAddrContext)
-{
- int i = 0;
- UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
- UCHAR *ptrClassifierIpAddress = NULL;
- UCHAR *ptrClassifierIpMask = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (bIpVersion6)
- nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES;
-
- /* Destination Ip Address */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Ip Address Range Length:0x%X ", u8IpAddressLen);
- if ((bIpVersion6 ? (IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2) :
- (TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen) {
-
- union u_ip_address *st_dest_ip =
- &pstClassifierEntry->stDestIpAddress;
-
- union u_ip_address *st_src_ip =
- &pstClassifierEntry->stSrcIpAddress;
-
- /*
- * checking both the mask and address togethor in Classification.
- * So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
- * (nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
- */
- if (eIpAddrContext == eDestIpAddress) {
- pstClassifierEntry->ucIPDestinationAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if (bIpVersion6) {
- ptrClassifierIpAddress =
- st_dest_ip->ucIpv6Address;
- ptrClassifierIpMask =
- st_dest_ip->ucIpv6Mask;
- } else {
- ptrClassifierIpAddress =
- st_dest_ip->ucIpv4Address;
- ptrClassifierIpMask =
- st_dest_ip->ucIpv4Mask;
- }
- } else if (eIpAddrContext == eSrcIpAddress) {
- pstClassifierEntry->ucIPSourceAddressLength =
- u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
- if (bIpVersion6) {
- ptrClassifierIpAddress =
- st_src_ip->ucIpv6Address;
- ptrClassifierIpMask = st_src_ip->ucIpv6Mask;
- } else {
- ptrClassifierIpAddress =
- st_src_ip->ucIpv4Address;
- ptrClassifierIpMask = st_src_ip->ucIpv4Mask;
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Address Length:0x%X\n",
- pstClassifierEntry->ucIPDestinationAddressLength);
- while ((u8IpAddressLen >= nSizeOfIPAddressInBytes)
- && (i < MAX_IP_RANGE_LENGTH)) {
- memcpy(ptrClassifierIpAddress +
- (i * nSizeOfIPAddressInBytes),
- (pu8IpAddressMaskSrc
- + (i * nSizeOfIPAddressInBytes * 2)),
- nSizeOfIPAddressInBytes);
-
- if (!bIpVersion6) {
- if (eIpAddrContext == eSrcIpAddress) {
- st_src_ip->ulIpv4Addr[i] =
- ntohl(st_src_ip->ulIpv4Addr[i]);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Src Ip Address:0x%luX ",
- st_src_ip->ulIpv4Addr[i]);
- } else if (eIpAddrContext == eDestIpAddress) {
- st_dest_ip->ulIpv4Addr[i] =
- ntohl(st_dest_ip->ulIpv4Addr[i]);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Dest Ip Address:0x%luX ",
- st_dest_ip->ulIpv4Addr[i]);
- }
- }
- u8IpAddressLen -= nSizeOfIPAddressInBytes;
- if (u8IpAddressLen >= nSizeOfIPAddressInBytes) {
- memcpy(ptrClassifierIpMask +
- (i * nSizeOfIPAddressInBytes),
- (pu8IpAddressMaskSrc
- + nSizeOfIPAddressInBytes
- + (i * nSizeOfIPAddressInBytes * 2)),
- nSizeOfIPAddressInBytes);
-
- if (!bIpVersion6) {
- if (eIpAddrContext == eSrcIpAddress) {
- st_src_ip->ulIpv4Mask[i] =
- ntohl(st_src_ip->ulIpv4Mask[i]);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Src Ip Mask Address:0x%luX ",
- st_src_ip->ulIpv4Mask[i]);
- } else if (eIpAddrContext == eDestIpAddress) {
- st_dest_ip->ulIpv4Mask[i] =
- ntohl(st_dest_ip->ulIpv4Mask[i]);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Dest Ip Mask Address:0x%luX ",
- st_dest_ip->ulIpv4Mask[i]);
- }
- }
- u8IpAddressLen -= nSizeOfIPAddressInBytes;
- }
- if (u8IpAddressLen == 0)
- pstClassifierEntry->bDestIpValid = TRUE;
-
- i++;
- }
- if (bIpVersion6) {
- /* Restore EndianNess of Struct */
- restore_endianess_of_pstClassifierEntry(
- pstClassifierEntry,
- eIpAddrContext
- );
- }
- }
-}
-
-void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter, B_UINT16 TID, bool bFreeAll)
-{
- int i;
- struct bcm_targetdsx_buffer *curr_buf;
-
- for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
- curr_buf = &Adapter->astTargetDsxBuffer[i];
-
- if (curr_buf->valid)
- continue;
-
- if ((bFreeAll) || (curr_buf->tid == TID)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
- TID, curr_buf->ulTargetDsxBuffer);
- curr_buf->valid = 1;
- curr_buf->tid = 0;
- Adapter->ulFreeTargetBufferCnt++;
- }
- }
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- * copy classifier rule into the specified SF index
- */
-static inline VOID CopyClassifierRuleToSF(struct bcm_mini_adapter *Adapter,
- struct bcm_convergence_types *psfCSType,
- UINT uiSearchRuleIndex,
- UINT nClassifierIndex)
-{
- struct bcm_classifier_rule *pstClassifierEntry = NULL;
- /* VOID *pvPhsContext = NULL; */
- int i;
- /* UCHAR ucProtocolLength=0; */
- /* ULONG ulPhsStatus; */
-
- struct bcm_packet_class_rules *pack_class_rule =
- &psfCSType->cCPacketClassificationRule;
-
- if (Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
- nClassifierIndex > (MAX_CLASSIFIERS-1))
- return;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Storing Classifier Rule Index : %X",
- ntohs(pack_class_rule->u16PacketClassificationRuleIndex));
-
- if (nClassifierIndex > MAX_CLASSIFIERS-1)
- return;
-
- pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if (pstClassifierEntry) {
- /* Store if Ipv6 */
- pstClassifierEntry->bIpv6Protocol =
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : false;
-
- /* Destinaiton Port */
- pstClassifierEntry->ucDestPortRangeLength =
- pack_class_rule->u8ProtocolDestPortRangeLength / 4;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Destination Port Range Length:0x%X ",
- pstClassifierEntry->ucDestPortRangeLength);
-
- if (pack_class_rule->u8ProtocolDestPortRangeLength <= MAX_PORT_RANGE) {
- for (i = 0; i < (pstClassifierEntry->ucDestPortRangeLength); i++) {
- pstClassifierEntry->usDestPortRangeLo[i] =
- *((PUSHORT)(pack_class_rule->u8ProtocolDestPortRange+i));
- pstClassifierEntry->usDestPortRangeHi[i] =
- *((PUSHORT)(pack_class_rule->u8ProtocolDestPortRange+2+i));
- pstClassifierEntry->usDestPortRangeLo[i] =
- ntohs(pstClassifierEntry->usDestPortRangeLo[i]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG, DBG_LVL_ALL,
- "Destination Port Range Lo:0x%X ",
- pstClassifierEntry->usDestPortRangeLo[i]);
- pstClassifierEntry->usDestPortRangeHi[i] =
- ntohs(pstClassifierEntry->usDestPortRangeHi[i]);
- }
- } else {
- pstClassifierEntry->ucDestPortRangeLength = 0;
- }
-
- /* Source Port */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Source Port Range Length:0x%X ",
- pack_class_rule->u8ProtocolSourcePortRangeLength);
- if (pack_class_rule->u8ProtocolSourcePortRangeLength <= MAX_PORT_RANGE) {
- pstClassifierEntry->ucSrcPortRangeLength =
- pack_class_rule->u8ProtocolSourcePortRangeLength/4;
- for (i = 0; i < (pstClassifierEntry->ucSrcPortRangeLength); i++) {
- pstClassifierEntry->usSrcPortRangeLo[i] =
- *((PUSHORT)(pack_class_rule->
- u8ProtocolSourcePortRange+i));
- pstClassifierEntry->usSrcPortRangeHi[i] =
- *((PUSHORT)(pack_class_rule->
- u8ProtocolSourcePortRange+2+i));
- pstClassifierEntry->usSrcPortRangeLo[i] =
- ntohs(pstClassifierEntry->usSrcPortRangeLo[i]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG, DBG_LVL_ALL,
- "Source Port Range Lo:0x%X ",
- pstClassifierEntry->usSrcPortRangeLo[i]);
- pstClassifierEntry->usSrcPortRangeHi[i] =
- ntohs(pstClassifierEntry->usSrcPortRangeHi[i]);
- }
- }
- /* Destination Ip Address and Mask */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Ip Destination Parameters : ");
- CopyIpAddrToClassifier(pstClassifierEntry,
- pack_class_rule->u8IPDestinationAddressLength,
- pack_class_rule->u8IPDestinationAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ?
- TRUE : false, eDestIpAddress);
-
- /* Source Ip Address and Mask */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Ip Source Parameters : ");
-
- CopyIpAddrToClassifier(pstClassifierEntry,
- pack_class_rule->u8IPMaskedSourceAddressLength,
- pack_class_rule->u8IPMaskedSourceAddress,
- (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : false,
- eSrcIpAddress);
-
- /* TOS */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "TOS Length:0x%X ",
- pack_class_rule->u8IPTypeOfServiceLength);
- if (pack_class_rule->u8IPTypeOfServiceLength == 3) {
- pstClassifierEntry->ucIPTypeOfServiceLength =
- pack_class_rule->u8IPTypeOfServiceLength;
- pstClassifierEntry->ucTosLow =
- pack_class_rule->u8IPTypeOfService[0];
- pstClassifierEntry->ucTosHigh =
- pack_class_rule->u8IPTypeOfService[1];
- pstClassifierEntry->ucTosMask =
- pack_class_rule->u8IPTypeOfService[2];
- pstClassifierEntry->bTOSValid = TRUE;
- }
- if (pack_class_rule->u8Protocol == 0) {
- /* we didn't get protocol field filled in by the BS */
- pstClassifierEntry->ucProtocolLength = 0;
- } else {
- pstClassifierEntry->ucProtocolLength = 1; /* 1 valid protocol */
- }
-
- pstClassifierEntry->ucProtocol[0] = pack_class_rule->u8Protocol;
- pstClassifierEntry->u8ClassifierRulePriority =
- pack_class_rule->u8ClassifierRulePriority;
-
- /* store the classifier rule ID and set this classifier entry as valid */
- pstClassifierEntry->ucDirection =
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
- pstClassifierEntry->uiClassifierRuleIndex =
- ntohs(pack_class_rule->u16PacketClassificationRuleIndex);
- pstClassifierEntry->usVCID_Value =
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
- pstClassifierEntry->ulSFID =
- Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
- uiSearchRuleIndex,
- pstClassifierEntry->ucDirection,
- pstClassifierEntry->uiClassifierRuleIndex,
- pstClassifierEntry->usVCID_Value);
-
- if (pack_class_rule->u8AssociatedPHSI)
- pstClassifierEntry->u8AssociatedPHSI =
- pack_class_rule->u8AssociatedPHSI;
-
- /* Copy ETH CS Parameters */
- pstClassifierEntry->ucEthCSSrcMACLen =
- (pack_class_rule->u8EthernetSourceMACAddressLength);
- memcpy(pstClassifierEntry->au8EThCSSrcMAC,
- pack_class_rule->u8EthernetSourceMACAddress,
- MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSSrcMACMask,
- pack_class_rule->u8EthernetSourceMACAddress
- + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
- pstClassifierEntry->ucEthCSDestMACLen =
- (pack_class_rule->u8EthernetDestMacAddressLength);
- memcpy(pstClassifierEntry->au8EThCSDestMAC,
- pack_class_rule->u8EthernetDestMacAddress,
- MAC_ADDRESS_SIZE);
- memcpy(pstClassifierEntry->au8EThCSDestMACMask,
- pack_class_rule->u8EthernetDestMacAddress
- + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
- pstClassifierEntry->ucEtherTypeLen =
- (pack_class_rule->u8EthertypeLength);
- memcpy(pstClassifierEntry->au8EthCSEtherType,
- pack_class_rule->u8Ethertype,
- NUM_ETHERTYPE_BYTES);
- memcpy(pstClassifierEntry->usUserPriority,
- &pack_class_rule->u16UserPriority, 2);
- pstClassifierEntry->usVLANID =
- ntohs(pack_class_rule->u16VLANID);
- pstClassifierEntry->usValidityBitMap =
- ntohs(pack_class_rule->u16ValidityBitMap);
-
- pstClassifierEntry->bUsed = TRUE;
- }
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- */
-static inline VOID DeleteClassifierRuleFromSF(struct bcm_mini_adapter *Adapter,
- UINT uiSearchRuleIndex, UINT nClassifierIndex)
-{
- struct bcm_classifier_rule *pstClassifierEntry = NULL;
- B_UINT16 u16PacketClassificationRuleIndex;
- USHORT usVCID;
- /* VOID *pvPhsContext = NULL; */
- /*ULONG ulPhsStatus; */
-
- usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
-
- if (nClassifierIndex > MAX_CLASSIFIERS-1)
- return;
-
- if (usVCID == 0)
- return;
-
- u16PacketClassificationRuleIndex =
- Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex;
- pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
- if (pstClassifierEntry) {
- pstClassifierEntry->bUsed = false;
- pstClassifierEntry->uiClassifierRuleIndex = 0;
- memset(pstClassifierEntry, 0,
- sizeof(struct bcm_classifier_rule));
-
- /* Delete the PHS Rule for this classifier */
- PhsDeleteClassifierRule(&Adapter->stBCMPhsContext, usVCID,
- u16PacketClassificationRuleIndex);
- }
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- */
-VOID DeleteAllClassifiersForSF(struct bcm_mini_adapter *Adapter,
- UINT uiSearchRuleIndex)
-{
- struct bcm_classifier_rule *pstClassifierEntry = NULL;
- int i;
- /* B_UINT16 u16PacketClassificationRuleIndex; */
- USHORT ulVCID;
- /* VOID *pvPhsContext = NULL; */
- /* ULONG ulPhsStatus; */
-
- ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
-
- if (ulVCID == 0)
- return;
-
- for (i = 0; i < MAX_CLASSIFIERS; i++) {
- if (Adapter->astClassifierTable[i].usVCID_Value == ulVCID) {
- pstClassifierEntry = &Adapter->astClassifierTable[i];
-
- if (pstClassifierEntry->bUsed)
- DeleteClassifierRuleFromSF(Adapter,
- uiSearchRuleIndex, i);
- }
- }
-
- /* Delete All Phs Rules Associated with this SF */
- PhsDeleteSFRules(&Adapter->stBCMPhsContext, ulVCID);
-}
-
-/*
- * This routinue copies the Connection Management
- * related data into the Adapter structure.
- * @ingroup ctrl_pkt_functions
- */
-static VOID CopyToAdapter(register struct bcm_mini_adapter *Adapter, /* <Pointer to the Adapter structure */
- register struct bcm_connect_mgr_params *psfLocalSet, /* Pointer to the connection manager parameters structure */
- register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
- register UCHAR ucDsxType,
- struct bcm_add_indication_alt *pstAddIndication) {
-
- /* UCHAR ucProtocolLength = 0; */
- ULONG ulSFID;
- UINT nClassifierIndex = 0;
- enum E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
- B_UINT16 u16PacketClassificationRuleIndex = 0;
- int i;
- struct bcm_convergence_types *psfCSType = NULL;
- struct bcm_phs_rule sPhsRule;
- struct bcm_packet_info *curr_packinfo =
- &Adapter->PackInfo[uiSearchRuleIndex];
- USHORT uVCID = curr_packinfo->usVCID_Value;
- UINT UGIValue = 0;
-
- curr_packinfo->bValid = TRUE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Search Rule Index = %d\n", uiSearchRuleIndex);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "%s: SFID= %x ", __func__, ntohl(psfLocalSet->u32SFID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Updating Queue %d", uiSearchRuleIndex);
-
- ulSFID = ntohl(psfLocalSet->u32SFID);
- /* Store IP Version used */
- /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
-
- curr_packinfo->bIPCSSupport = 0;
- curr_packinfo->bEthCSSupport = 0;
-
- /* Enable IP/ETh CS Support As Required */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "CopyToAdapter : u8CSSpecification : %X\n",
- psfLocalSet->u8CSSpecification);
- switch (psfLocalSet->u8CSSpecification) {
- case eCSPacketIPV4:
- curr_packinfo->bIPCSSupport = IPV4_CS;
- break;
- case eCSPacketIPV6:
- curr_packinfo->bIPCSSupport = IPV6_CS;
- break;
- case eCS802_3PacketEthernet:
- case eCS802_1QPacketVLAN:
- curr_packinfo->bEthCSSupport = ETH_CS_802_3;
- break;
- case eCSPacketIPV4Over802_1QVLAN:
- case eCSPacketIPV4Over802_3Ethernet:
- curr_packinfo->bIPCSSupport = IPV4_CS;
- curr_packinfo->bEthCSSupport = ETH_CS_802_3;
- break;
- case eCSPacketIPV6Over802_1QVLAN:
- case eCSPacketIPV6Over802_3Ethernet:
- curr_packinfo->bIPCSSupport = IPV6_CS;
- curr_packinfo->bEthCSSupport = ETH_CS_802_3;
- break;
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Error in value of CS Classification.. setting default to IP CS\n");
- curr_packinfo->bIPCSSupport = IPV4_CS;
- break;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X\n",
- uiSearchRuleIndex,
- curr_packinfo->bEthCSSupport,
- curr_packinfo->bIPCSSupport);
-
- /* Store IP Version used */
- /* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
- if (curr_packinfo->bIPCSSupport == IPV6_CS)
- curr_packinfo->ucIpVersion = IPV6;
- else
- curr_packinfo->ucIpVersion = IPV4;
-
- /* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */
- if (!Adapter->bETHCSEnabled)
- curr_packinfo->bEthCSSupport = 0;
-
- if (psfLocalSet->u8ServiceClassNameLength > 0 && psfLocalSet->u8ServiceClassNameLength < 32)
- memcpy(curr_packinfo->ucServiceClassName,
- psfLocalSet->u8ServiceClassName,
- psfLocalSet->u8ServiceClassNameLength);
-
- curr_packinfo->u8QueueType = psfLocalSet->u8ServiceFlowSchedulingType;
-
- if (curr_packinfo->u8QueueType == BE && curr_packinfo->ucDirection)
- Adapter->usBestEffortQueueIndex = uiSearchRuleIndex;
-
- curr_packinfo->ulSFID = ntohl(psfLocalSet->u32SFID);
-
- curr_packinfo->u8TrafficPriority = psfLocalSet->u8TrafficPriority;
-
- /* copy all the classifier in the Service Flow param structure */
- for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Classifier index =%d", i);
- psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Classifier index =%d", i);
-
- if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- curr_packinfo->bClassifierPriority = TRUE;
-
- if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
- curr_packinfo->bClassifierPriority = TRUE;
-
- if (ucDsxType == DSA_ACK) {
- eClassifierAction = eAddClassifier;
- } else if (ucDsxType == DSC_ACK) {
- switch (psfCSType->u8ClassfierDSCAction) {
- case 0: /* DSC Add Classifier */
- eClassifierAction = eAddClassifier;
- break;
- case 1: /* DSC Replace Classifier */
- eClassifierAction = eReplaceClassifier;
- break;
- case 2: /* DSC Delete Classifier */
- eClassifierAction = eDeleteClassifier;
- break;
- default:
- eClassifierAction = eInvalidClassifierAction;
- }
- }
-
- u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- switch (eClassifierAction) {
- case eAddClassifier:
- /* Get a Free Classifier Index From Classifier table for this SF to add the Classifier */
- /* Contained in this message */
- nClassifierIndex = SearchClsid(Adapter,
- ulSFID,
- u16PacketClassificationRuleIndex);
-
- if (nClassifierIndex > MAX_CLASSIFIERS) {
- nClassifierIndex = SearchFreeClsid(Adapter);
- if (nClassifierIndex > MAX_CLASSIFIERS) {
- /* Failed To get a free Entry */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "Error Failed To get a free Classifier Entry");
- break;
- }
- /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
- CopyClassifierRuleToSF(Adapter, psfCSType,
- uiSearchRuleIndex,
- nClassifierIndex);
- } else {
- /* This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG,
- DBG_LVL_ALL,
- "CopyToAdapter: Error The Specified Classifier Already Exists and attempted To Add Classifier with Same PCRI : 0x%x\n",
- u16PacketClassificationRuleIndex);
- }
- break;
- case eReplaceClassifier:
- /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
- /* with the new classifier Contained in this message */
- nClassifierIndex = SearchClsid(Adapter, ulSFID,
- u16PacketClassificationRuleIndex);
- if (nClassifierIndex > MAX_CLASSIFIERS) {
- /* Failed To search the classifier */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG, DBG_LVL_ALL,
- "Error Search for Classifier To be replaced failed");
- break;
- }
- /* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
- CopyClassifierRuleToSF(Adapter, psfCSType,
- uiSearchRuleIndex, nClassifierIndex);
- break;
- case eDeleteClassifier:
- /* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
- /* with the new classifier Contained in this message */
- nClassifierIndex = SearchClsid(Adapter, ulSFID,
- u16PacketClassificationRuleIndex);
- if (nClassifierIndex > MAX_CLASSIFIERS) {
- /* Failed To search the classifier */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CONN_MSG, DBG_LVL_ALL,
- "Error Search for Classifier To be deleted failed");
- break;
- }
-
- /* Delete This classifier */
- DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex,
- nClassifierIndex);
- break;
- default:
- /* Invalid Action for classifier */
- break;
- }
- }
-
- /* Repeat parsing Classification Entries to process PHS Rules */
- for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
- psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "psfCSType->u8PhsDSCAction : 0x%x\n",
- psfCSType->u8PhsDSCAction);
-
- switch (psfCSType->u8PhsDSCAction) {
- case eDeleteAllPHSRules:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "Deleting All PHS Rules For VCID: 0x%X\n",
- uVCID);
-
- /* Delete All the PHS rules for this Service flow */
- PhsDeleteSFRules(&Adapter->stBCMPhsContext, uVCID);
- break;
- case eDeletePHSRule:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "PHS DSC Action = Delete PHS Rule\n");
-
- if (psfCSType->cPhsRule.u8PHSI)
- PhsDeletePHSRule(&Adapter->stBCMPhsContext,
- uVCID,
- psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
-
- break;
- default:
- if (ucDsxType == DSC_ACK) {
- /* BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC\n",psfCSType->cPhsRule.u8PHSI)); */
- break; /* FOr DSC ACK Case PHS DSC Action must be in valid set */
- }
- /* Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified */
- /* No Break Here . Intentionally! */
-
- case eAddPHSRule:
- case eSetPHSRule:
- if (psfCSType->cPhsRule.u8PHSI) {
- /* Apply This PHS Rule to all classifiers whose Associated PHSI Match */
- apply_phs_rule_to_all_classifiers(Adapter,
- uiSearchRuleIndex,
- uVCID,
- &sPhsRule,
- &psfCSType->cPhsRule,
- pstAddIndication);
- }
- break;
- }
- }
-
- if (psfLocalSet->u32MaxSustainedTrafficRate == 0) {
- /* No Rate Limit . Set Max Sustained Traffic Rate to Maximum */
- curr_packinfo->uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
- } else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > WIMAX_MAX_ALLOWED_RATE) {
- /* Too large Allowed Rate specified. Limiting to Wi Max Allowed rate */
- curr_packinfo->uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
- } else {
- curr_packinfo->uiMaxAllowedRate =
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
- }
-
- curr_packinfo->uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency);
- if (curr_packinfo->uiMaxLatency == 0) /* 0 should be treated as infinite */
- curr_packinfo->uiMaxLatency = MAX_LATENCY_ALLOWED;
-
- if ((curr_packinfo->u8QueueType == ERTPS ||
- curr_packinfo->u8QueueType == UGS))
- UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
-
- if (UGIValue == 0)
- UGIValue = DEFAULT_UG_INTERVAL;
-
- /*
- * For UGI based connections...
- * DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
- * The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
- * In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
- */
- curr_packinfo->uiMaxBucketSize =
- (DEFAULT_UGI_FACTOR*curr_packinfo->uiMaxAllowedRate*UGIValue)/1000;
-
- if (curr_packinfo->uiMaxBucketSize < WIMAX_MAX_MTU*8) {
- UINT UGIFactor = 0;
- /* Special Handling to ensure the biggest size of packet can go out from host to FW as follows:
- * 1. Any packet from Host to FW can go out in different packet size.
- * 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
- * 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
- */
- UGIFactor = (curr_packinfo->uiMaxLatency/UGIValue + 1);
-
- if (UGIFactor > DEFAULT_UGI_FACTOR)
- curr_packinfo->uiMaxBucketSize =
- (UGIFactor*curr_packinfo->uiMaxAllowedRate*UGIValue)/1000;
-
- if (curr_packinfo->uiMaxBucketSize > WIMAX_MAX_MTU*8)
- curr_packinfo->uiMaxBucketSize = WIMAX_MAX_MTU*8;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "LAT: %d, UGI: %d\n", curr_packinfo->uiMaxLatency,
- UGIValue);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
- curr_packinfo->uiMaxAllowedRate,
- ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
- curr_packinfo->uiMaxBucketSize);
-
- /* copy the extended SF Parameters to Support MIBS */
- CopyMIBSExtendedSFParameters(Adapter, psfLocalSet, uiSearchRuleIndex);
-
- /* store header suppression enabled flag per SF */
- curr_packinfo->bHeaderSuppressionEnabled =
- !(psfLocalSet->u8RequesttransmissionPolicy &
- MASK_DISABLE_HEADER_SUPPRESSION);
-
- kfree(curr_packinfo->pstSFIndication);
- curr_packinfo->pstSFIndication = pstAddIndication;
-
- /* Re Sort the SF list in PackInfo according to Traffic Priority */
- SortPackInfo(Adapter);
-
- /* Re Sort the Classifier Rules table and re - arrange
- * according to Classifier Rule Priority
- */
- SortClassifiers(Adapter);
- DumpPhsRules(&Adapter->stBCMPhsContext);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "%s <=====", __func__);
-}
-
-/***********************************************************************
- * Function - DumpCmControlPacket
- *
- * Description - This routinue Dumps the Contents of the AddIndication
- * Structure in the Connection Management Control Packet
- *
- * Parameter - pvBuffer: Pointer to the buffer containing the
- * AddIndication data.
- *
- * Returns - None
- *************************************************************************/
-static VOID DumpCmControlPacket(PVOID pvBuffer)
-{
- int uiLoopIndex;
- int nIndex;
- struct bcm_add_indication_alt *pstAddIndication;
- UINT nCurClassifierCnt;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- pstAddIndication = pvBuffer;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID: 0x%X", ntohs(pstAddIndication->u16VCID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", htons(pstAddIndication->sfAuthorizedSet.u16CID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[3],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4],
- pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%X", pstAddIndication->sfAuthorizedSet.u8MBSService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%X", pstAddIndication->sfAuthorizedSet.u8QosParamSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%X, %p",
- pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate: 0x%X 0x%p",
- pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
- &pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfAuthorizedSet.u8SDUSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%X", pstAddIndication->sfAuthorizedSet.u16TargetSAID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQEnable);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%X", pstAddIndication->sfAuthorizedSet.u8CSSpecification);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAuthorizedSet.u16TimeBase);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAuthorizedSet.u8PagingPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval: 0x%X",
- pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
- *(unsigned int *)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
- *(unsigned int *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
- *(USHORT *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%X",
- pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
-
- nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers;
- if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
- if (!pstAddIndication->sfAuthorizedSet.bValid)
- pstAddIndication->sfAuthorizedSet.bValid = 1;
- for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
- struct bcm_convergence_types *psfCSType = NULL;
-
- psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex];
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
- psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-
- for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: %pM",
- psfCSType->cCPacketClassificationRule.
- u8EthernetDestMacAddress);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM",
- psfCSType->cCPacketClassificationRule.
- u8EthernetSourceMACAddress);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X ,0x%02X ,0x%02X ",
- psfCSType->cCPacketClassificationRule.u8Ethertype[0],
- psfCSType->cCPacketClassificationRule.u8Ethertype[1],
- psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
-#ifdef VERSION_D5
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x%*ph ",
- 6, psfCSType->cCPacketClassificationRule.
- u8IPv6FlowLable);
-#endif
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%02X", pstAddIndication->sfAuthorizedSet.bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfAdmittedSet.u32SFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfAdmittedSet.u16CID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
- pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,
- "u8ServiceClassName: 0x%*ph",
- 6, pstAddIndication->sfAdmittedSet.u8ServiceClassName);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfAdmittedSet.u8MBSService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfAdmittedSet.u8QosParamSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfAdmittedSet.u8TrafficPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
- pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAdmittedSet.u32MaximumLatency);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%02X", pstAddIndication->sfAdmittedSet.u8SDUSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%02X", pstAddIndication->sfAdmittedSet.u16TargetSAID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQEnable);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%02X", pstAddIndication->sfAdmittedSet.u8CSSpecification);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAdmittedSet.u16TimeBase);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAdmittedSet.u8PagingPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%02X",
- pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
-
- nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers;
- if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
-
- for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
- struct bcm_convergence_types *psfCSType = NULL;
-
- psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%02X",
- psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%*ph",
- 3, psfCSType->cCPacketClassificationRule.
- u8IPTypeOfService);
- for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", psfCSType->cCPacketClassificationRule.u8Protocol);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%*ph ",
- 4, psfCSType->cCPacketClassificationRule.
- u8ProtocolSourcePortRange);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%*ph ",
- 4, psfCSType->cCPacketClassificationRule.
- u8ProtocolDestPortRange);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: %pM",
- psfCSType->cCPacketClassificationRule.
- u8EthernetDestMacAddress);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: %pM",
- psfCSType->cCPacketClassificationRule.
- u8EthernetSourceMACAddress);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8Ethertype[3]: 0x%*ph",
- 3, psfCSType->cCPacketClassificationRule.
- u8Ethertype);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
- psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%02X",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%02X ",
- psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
-#ifdef VERSION_D5
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
- psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x%*ph ",
- 6, psfCSType->cCPacketClassificationRule.
- u8IPv6FlowLable);
-#endif
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%X", pstAddIndication->sfAdmittedSet.bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfActiveSet.u32SFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfActiveSet.u16CID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,
- "u8ServiceClassName: 0x%*ph",
- 6, pstAddIndication->sfActiveSet.u8ServiceClassName);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfActiveSet.u8MBSService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfActiveSet.u8QosParamSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfActiveSet.u8TrafficPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
- pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
- pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
- pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfActiveSet.u32ToleratedJitter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfActiveSet.u32MaximumLatency);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
- pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfActiveSet.u8SDUSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID: 0x%X", pstAddIndication->sfActiveSet.u16TargetSAID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable: 0x%X", pstAddIndication->sfActiveSet.u8ARQEnable);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQWindowSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification: 0x%X", pstAddIndication->sfActiveSet.u8CSSpecification);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService: 0x%X",
- pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase: 0x%X", pstAddIndication->sfActiveSet.u16TimeBase);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference: 0x%X", pstAddIndication->sfActiveSet.u8PagingPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference: 0x%X",
- pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfActiveSet.u8TotalClassifiers);
-
- nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers;
- if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
- nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
-
- for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
- struct bcm_convergence_types *psfCSType = NULL;
- struct bcm_packet_class_rules *clsRule = NULL;
-
- psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
- clsRule = &psfCSType->cCPacketClassificationRule;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8ClassifierRulePriority: 0x%X ",
- clsRule->u8ClassifierRulePriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8IPTypeOfServiceLength: 0x%X ",
- clsRule->u8IPTypeOfServiceLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
- clsRule->u8IPTypeOfService[0],
- clsRule->u8IPTypeOfService[1],
- clsRule->u8IPTypeOfService[2]);
-
- for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8Protocol: 0x%X ",
- clsRule->u8Protocol);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- "u8IPMaskedSourceAddressLength: 0x%X ",
- clsRule->u8IPMaskedSourceAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- "u8IPMaskedSourceAddress[32]: 0x%X ",
- clsRule->u8IPMaskedSourceAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- "u8IPDestinationAddressLength: 0x%02X ",
- clsRule->u8IPDestinationAddressLength);
-
- for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8IPDestinationAddress[32]:0x%X ",
- clsRule->u8IPDestinationAddress[uiLoopIndex]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8ProtocolSourcePortRangeLength: 0x%X ",
- clsRule->u8ProtocolSourcePortRangeLength);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8ProtocolSourcePortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
- clsRule->u8ProtocolSourcePortRange[0],
- clsRule->u8ProtocolSourcePortRange[1],
- clsRule->u8ProtocolSourcePortRange[2],
- clsRule->u8ProtocolSourcePortRange[3]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8ProtocolDestPortRangeLength: 0x%X ",
- clsRule->u8ProtocolDestPortRangeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8ProtocolDestPortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
- clsRule->u8ProtocolDestPortRange[0],
- clsRule->u8ProtocolDestPortRange[1],
- clsRule->u8ProtocolDestPortRange[2],
- clsRule->u8ProtocolDestPortRange[3]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8EthernetDestMacAddressLength: 0x%X ",
- clsRule->u8EthernetDestMacAddressLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8EthernetDestMacAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- clsRule->u8EthernetDestMacAddress[0],
- clsRule->u8EthernetDestMacAddress[1],
- clsRule->u8EthernetDestMacAddress[2],
- clsRule->u8EthernetDestMacAddress[3],
- clsRule->u8EthernetDestMacAddress[4],
- clsRule->u8EthernetDestMacAddress[5]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8EthernetSourceMACAddressLength: 0x%X ",
- clsRule->u8EthernetDestMacAddressLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- "u8EthernetSourceMACAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
- clsRule->u8EthernetSourceMACAddress[0],
- clsRule->u8EthernetSourceMACAddress[1],
- clsRule->u8EthernetSourceMACAddress[2],
- clsRule->u8EthernetSourceMACAddress[3],
- clsRule->u8EthernetSourceMACAddress[4],
- clsRule->u8EthernetSourceMACAddress[5]);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8EthertypeLength: 0x%X ",
- clsRule->u8EthertypeLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8Ethertype[3]: 0x%X ,0x%X ,0x%X ",
- clsRule->u8Ethertype[0],
- clsRule->u8Ethertype[1],
- clsRule->u8Ethertype[2]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u16UserPriority: 0x%X ",
- clsRule->u16UserPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u16VLANID: 0x%X ",
- clsRule->u16VLANID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8AssociatedPHSI: 0x%X ",
- clsRule->u8AssociatedPHSI);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u16PacketClassificationRuleIndex:0x%X ",
- clsRule->u16PacketClassificationRuleIndex);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8VendorSpecificClassifierParamLength:0x%X ",
- clsRule->u8VendorSpecificClassifierParamLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8VendorSpecificClassifierParam[1]:0x%X ",
- clsRule->u8VendorSpecificClassifierParam[0]);
-#ifdef VERSION_D5
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL, " u8IPv6FlowLableLength: 0x%X ",
- clsRule->u8IPv6FlowLableLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL,
- DBG_LVL_ALL,
- " u8IPv6FlowLable[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
- clsRule->u8IPv6FlowLable[0],
- clsRule->u8IPv6FlowLable[1],
- clsRule->u8IPv6FlowLable[2],
- clsRule->u8IPv6FlowLable[3],
- clsRule->u8IPv6FlowLable[4],
- clsRule->u8IPv6FlowLable[5]);
-#endif
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,
- " bValid: 0x%X", pstAddIndication->sfActiveSet.bValid);
-}
-
-static inline ULONG RestoreSFParam(struct bcm_mini_adapter *Adapter,
- ULONG ulAddrSFParamSet, PUCHAR pucDestBuffer)
-{
- UINT nBytesToRead = sizeof(struct bcm_connect_mgr_params);
-
- if (ulAddrSFParamSet == 0 || NULL == pucDestBuffer) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Got Param address as 0!!");
- return 0;
- }
- ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
-
- /* Read out the SF Param Set At the indicated Location */
- if (rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
- return STATUS_FAILURE;
-
- return 1;
-}
-
-static ULONG StoreSFParam(struct bcm_mini_adapter *Adapter, PUCHAR pucSrcBuffer,
- ULONG ulAddrSFParamSet)
-{
- UINT nBytesToWrite = sizeof(struct bcm_connect_mgr_params);
- int ret = 0;
-
- if (ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
- return 0;
-
- ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
- if (ret < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "%s:%d WRM failed", __func__, __LINE__);
- return ret;
- }
- return 1;
-}
-
-ULONG StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter,
- PVOID pvBuffer, UINT *puBufferLength)
-{
- struct bcm_add_indication_alt *pstAddIndicationAlt = NULL;
- struct bcm_add_indication *pstAddIndication = NULL;
- struct bcm_del_request *pstDeletionRequest;
- UINT uiSearchRuleIndex;
- ULONG ulSFID;
-
- pstAddIndicationAlt = pvBuffer;
-
- /*
- * In case of DSD Req By MS, we should immediately delete this SF so that
- * we can stop the further classifying the pkt for this SF.
- */
- if (pstAddIndicationAlt->u8Type == DSD_REQ) {
- pstDeletionRequest = pvBuffer;
-
- ulSFID = ntohl(pstDeletionRequest->u32SFID);
- uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
-
- if (uiSearchRuleIndex < NO_OF_QUEUES) {
- deleteSFBySfid(Adapter, uiSearchRuleIndex);
- Adapter->u32TotalDSD++;
- }
- return 1;
- }
-
- if ((pstAddIndicationAlt->u8Type == DSD_RSP) ||
- (pstAddIndicationAlt->u8Type == DSD_ACK)) {
- /* No Special handling send the message as it is */
- return 1;
- }
- /* For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! */
-
- pstAddIndication = kmalloc(sizeof(struct bcm_add_indication),
- GFP_KERNEL);
- if (pstAddIndication == NULL)
- return 0;
-
- /* AUTHORIZED SET */
- pstAddIndication->psfAuthorizedSet = (struct bcm_connect_mgr_params *)
- GetNextTargetBufferLocation(Adapter,
- pstAddIndicationAlt->u16TID);
- if (!pstAddIndication->psfAuthorizedSet) {
- kfree(pstAddIndication);
- return 0;
- }
-
- if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
- (ULONG)pstAddIndication->psfAuthorizedSet) != 1) {
- kfree(pstAddIndication);
- return 0;
- }
-
- /* this can't possibly be right */
- pstAddIndication->psfAuthorizedSet =
- (struct bcm_connect_mgr_params *) ntohl(
- (ULONG)pstAddIndication->psfAuthorizedSet);
-
- if (pstAddIndicationAlt->u8Type == DSA_REQ) {
- struct bcm_add_request AddRequest;
-
- AddRequest.u8Type = pstAddIndicationAlt->u8Type;
- AddRequest.eConnectionDir = pstAddIndicationAlt->u8Direction;
- AddRequest.u16TID = pstAddIndicationAlt->u16TID;
- AddRequest.u16CID = pstAddIndicationAlt->u16CID;
- AddRequest.u16VCID = pstAddIndicationAlt->u16VCID;
- AddRequest.psfParameterSet = pstAddIndication->psfAuthorizedSet;
- (*puBufferLength) = sizeof(struct bcm_add_request);
- memcpy(pvBuffer, &AddRequest, sizeof(struct bcm_add_request));
- kfree(pstAddIndication);
- return 1;
- }
-
- /* Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt */
- /* We need to extract the structure from the buffer and pack it differently */
-
- pstAddIndication->u8Type = pstAddIndicationAlt->u8Type;
- pstAddIndication->eConnectionDir = pstAddIndicationAlt->u8Direction;
- pstAddIndication->u16TID = pstAddIndicationAlt->u16TID;
- pstAddIndication->u16CID = pstAddIndicationAlt->u16CID;
- pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID;
- pstAddIndication->u8CC = pstAddIndicationAlt->u8CC;
-
- /* ADMITTED SET */
- pstAddIndication->psfAdmittedSet = (struct bcm_connect_mgr_params *)
- GetNextTargetBufferLocation(Adapter,
- pstAddIndicationAlt->u16TID);
- if (!pstAddIndication->psfAdmittedSet) {
- kfree(pstAddIndication);
- return 0;
- }
- if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAdmittedSet,
- (ULONG)pstAddIndication->psfAdmittedSet) != 1) {
- kfree(pstAddIndication);
- return 0;
- }
-
- pstAddIndication->psfAdmittedSet =
- (struct bcm_connect_mgr_params *) ntohl(
- (ULONG) pstAddIndication->psfAdmittedSet);
-
- /* ACTIVE SET */
- pstAddIndication->psfActiveSet = (struct bcm_connect_mgr_params *)
- GetNextTargetBufferLocation(Adapter,
- pstAddIndicationAlt->u16TID);
- if (!pstAddIndication->psfActiveSet) {
- kfree(pstAddIndication);
- return 0;
- }
- if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfActiveSet,
- (ULONG)pstAddIndication->psfActiveSet) != 1) {
- kfree(pstAddIndication);
- return 0;
- }
-
- pstAddIndication->psfActiveSet =
- (struct bcm_connect_mgr_params *) ntohl(
- (ULONG)pstAddIndication->psfActiveSet);
-
- (*puBufferLength) = sizeof(struct bcm_add_indication);
- *(struct bcm_add_indication *)pvBuffer = *pstAddIndication;
- kfree(pstAddIndication);
- return 1;
-}
-
-static inline struct bcm_add_indication_alt
-*RestoreCmControlResponseMessage(register struct bcm_mini_adapter *Adapter,
- register PVOID pvBuffer)
-{
- ULONG ulStatus = 0;
- struct bcm_add_indication *pstAddIndication = NULL;
- struct bcm_add_indication_alt *pstAddIndicationDest = NULL;
-
- pstAddIndication = pvBuffer;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "=====>");
- if ((pstAddIndication->u8Type == DSD_REQ) ||
- (pstAddIndication->u8Type == DSD_RSP) ||
- (pstAddIndication->u8Type == DSD_ACK))
- return pvBuffer;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Inside RestoreCmControlResponseMessage ");
- /*
- * Need to Allocate memory to contain the SUPER Large structures
- * Our driver can't create these structures on Stack :(
- */
- pstAddIndicationDest = kmalloc(sizeof(struct bcm_add_indication_alt),
- GFP_KERNEL);
-
- if (pstAddIndicationDest) {
- memset(pstAddIndicationDest, 0,
- sizeof(struct bcm_add_indication_alt));
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "Failed to allocate memory for SF Add Indication Structure ");
- return NULL;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u8Type : 0x%X",
- pstAddIndication->u8Type);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u8Direction : 0x%X",
- pstAddIndication->eConnectionDir);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u8TID : 0x%X",
- ntohs(pstAddIndication->u16TID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u8CID : 0x%X",
- ntohs(pstAddIndication->u16CID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-u16VCID : 0x%X",
- ntohs(pstAddIndication->u16VCID));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-autorized set loc : %p",
- pstAddIndication->psfAuthorizedSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-admitted set loc : %p",
- pstAddIndication->psfAdmittedSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "AddIndication-Active set loc : %p",
- pstAddIndication->psfActiveSet);
-
- pstAddIndicationDest->u8Type = pstAddIndication->u8Type;
- pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir;
- pstAddIndicationDest->u16TID = pstAddIndication->u16TID;
- pstAddIndicationDest->u16CID = pstAddIndication->u16CID;
- pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID;
- pstAddIndicationDest->u8CC = pstAddIndication->u8CC;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Restoring Active Set ");
- ulStatus = RestoreSFParam(Adapter,
- (ULONG)pstAddIndication->psfActiveSet,
- (PUCHAR)&pstAddIndicationDest->sfActiveSet);
- if (ulStatus != 1)
- goto failed_restore_sf_param;
-
- if (pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
- pstAddIndicationDest->sfActiveSet.u8TotalClassifiers =
- MAX_CLASSIFIERS_IN_SF;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Restoring Admitted Set ");
- ulStatus = RestoreSFParam(Adapter,
- (ULONG)pstAddIndication->psfAdmittedSet,
- (PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
- if (ulStatus != 1)
- goto failed_restore_sf_param;
-
- if (pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
- pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers =
- MAX_CLASSIFIERS_IN_SF;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Restoring Authorized Set ");
- ulStatus = RestoreSFParam(Adapter,
- (ULONG)pstAddIndication->psfAuthorizedSet,
- (PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
- if (ulStatus != 1)
- goto failed_restore_sf_param;
-
- if (pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
- pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers =
- MAX_CLASSIFIERS_IN_SF;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Dumping the whole raw packet");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "============================================================");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- " pstAddIndicationDest->sfActiveSet size %zx %p",
- sizeof(*pstAddIndicationDest), pstAddIndicationDest);
- /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG,
- * DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest,
- * sizeof(*pstAddIndicationDest));
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "============================================================");
- return pstAddIndicationDest;
-failed_restore_sf_param:
- kfree(pstAddIndicationDest);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "<=====");
- return NULL;
-}
-
-ULONG SetUpTargetDsxBuffers(struct bcm_mini_adapter *Adapter)
-{
- ULONG ulTargetDsxBuffersBase = 0;
- ULONG ulCntTargetBuffers;
- ULONG i;
- int Status;
-
- if (!Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Adapter was NULL!!!");
- return 0;
- }
-
- if (Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
- return 1;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Size of Each DSX Buffer(Also size of connection manager parameters): %zx ",
- sizeof(struct bcm_connect_mgr_params));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Reading DSX buffer From Target location %x ",
- DSX_MESSAGE_EXCHANGE_BUFFER);
-
- Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER,
- (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
- if (Status < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "RDM failed!!");
- return 0;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Base Address Of DSX Target Buffer : 0x%lx",
- ulTargetDsxBuffersBase);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Tgt Buffer is Now %lx :", ulTargetDsxBuffersBase);
- ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE /
- sizeof(struct bcm_connect_mgr_params);
-
- Adapter->ulTotalTargetBuffersAvailable =
- ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ?
- MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- " Total Target DSX Buffer setup %lx ",
- Adapter->ulTotalTargetBuffersAvailable);
-
- for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
- Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
- Adapter->astTargetDsxBuffer[i].valid = 1;
- Adapter->astTargetDsxBuffer[i].tid = 0;
- ulTargetDsxBuffersBase += sizeof(struct bcm_connect_mgr_params);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
- i, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
- }
- Adapter->ulCurrentTargetBuffer = 0;
- Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable;
- return 1;
-}
-
-static ULONG GetNextTargetBufferLocation(struct bcm_mini_adapter *Adapter,
- B_UINT16 tid)
-{
- ULONG dsx_buf;
- ULONG idx, max_try;
-
- if ((Adapter->ulTotalTargetBuffersAvailable == 0)
- || (Adapter->ulFreeTargetBufferCnt == 0)) {
- ClearTargetDSXBuffer(Adapter, tid, false);
- return 0;
- }
-
- idx = Adapter->ulCurrentTargetBuffer;
- max_try = Adapter->ulTotalTargetBuffersAvailable;
- while ((max_try) && (Adapter->astTargetDsxBuffer[idx].valid != 1)) {
- idx = (idx+1) % Adapter->ulTotalTargetBuffersAvailable;
- max_try--;
- }
-
- if (max_try == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ",
- Adapter->ulFreeTargetBufferCnt);
- ClearTargetDSXBuffer(Adapter, tid, false);
- return 0;
- }
-
- dsx_buf = Adapter->astTargetDsxBuffer[idx].ulTargetDsxBuffer;
- Adapter->astTargetDsxBuffer[idx].valid = 0;
- Adapter->astTargetDsxBuffer[idx].tid = tid;
- Adapter->ulFreeTargetBufferCnt--;
- idx = (idx+1)%Adapter->ulTotalTargetBuffersAvailable;
- Adapter->ulCurrentTargetBuffer = idx;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "GetNextTargetBufferLocation :Returning address %lx tid %d\n",
- dsx_buf, tid);
-
- return dsx_buf;
-}
-
-int AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter)
-{
- /*
- * Need to Allocate memory to contain the SUPER Large structures
- * Our driver can't create these structures on Stack
- */
- Adapter->caDsxReqResp = kmalloc(sizeof(struct bcm_add_indication_alt)
- + LEADER_SIZE, GFP_KERNEL);
- if (!Adapter->caDsxReqResp)
- return -ENOMEM;
-
- return 0;
-}
-
-int FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter)
-{
- kfree(Adapter->caDsxReqResp);
- return 0;
-}
-
-/*
- * @ingroup ctrl_pkt_functions
- * This routinue would process the Control responses
- * for the Connection Management.
- * @return - Queue index for the free SFID else returns Invalid Index.
- */
-bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, /* <Pointer to the Adapter structure */
- PVOID pvBuffer /* Starting Address of the Buffer, that contains the AddIndication Data */)
-{
- struct bcm_connect_mgr_params *psfLocalSet = NULL;
- struct bcm_add_indication_alt *pstAddIndication = NULL;
- struct bcm_change_indication *pstChangeIndication = NULL;
- struct bcm_leader *pLeader = NULL;
- INT uiSearchRuleIndex = 0;
- ULONG ulSFID;
-
- /*
- * Otherwise the message contains a target address from where we need to
- * read out the rest of the service flow param structure
- */
- pstAddIndication = RestoreCmControlResponseMessage(Adapter, pvBuffer);
- if (pstAddIndication == NULL) {
- ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication *)pvBuffer)->u16TID, false);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
- return false;
- }
-
- DumpCmControlPacket(pstAddIndication);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
- pLeader = (struct bcm_leader *)Adapter->caDsxReqResp;
-
- pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
- pLeader->Vcid = 0;
-
- ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, false);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
- switch (pstAddIndication->u8Type) {
- case DSA_REQ:
- pLeader->PLength = sizeof(struct bcm_add_indication_alt);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
- *((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
- CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
- kfree(pstAddIndication);
- break;
- case DSA_RSP:
- pLeader->PLength = sizeof(struct bcm_add_indication_alt);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
- pLeader->PLength);
- *((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
- = *pstAddIndication;
- ((struct bcm_add_indication_alt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
- /* FALLTHROUGH */
- case DSA_ACK:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
- ntohs(pstAddIndication->u16VCID));
- uiSearchRuleIndex = SearchFreeSfid(Adapter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiSearchRuleIndex:0x%X ",
- uiSearchRuleIndex);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
- pstAddIndication->u8Direction);
- if (uiSearchRuleIndex < NO_OF_QUEUES) {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
- pstAddIndication->u8Direction;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
- pstAddIndication->sfActiveSet.bValid);
- if (pstAddIndication->sfActiveSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
-
- if (pstAddIndication->sfAuthorizedSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
-
- if (pstAddIndication->sfAdmittedSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
-
- if (pstAddIndication->sfActiveSet.bValid == false) {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = false;
- if (pstAddIndication->sfAdmittedSet.bValid)
- psfLocalSet = &pstAddIndication->sfAdmittedSet;
- else if (pstAddIndication->sfAuthorizedSet.bValid)
- psfLocalSet = &pstAddIndication->sfAuthorizedSet;
- } else {
- psfLocalSet = &pstAddIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
- }
-
- if (!psfLocalSet) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
- kfree(pstAddIndication);
- } else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstAddIndication->u16VCID);
- Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstAddIndication->u16CID);
-
- if (UPLINK_DIR == pstAddIndication->u8Direction)
- atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
-
- CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSA_ACK, pstAddIndication);
- /* don't free pstAddIndication */
-
- /* Inside CopyToAdapter, Sorting of all the SFs take place.
- * Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
- * SHOULD BE STRICTLY AVOIDED.
- */
- /* *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; */
- memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
-
- if (pstAddIndication->sfActiveSet.bValid == TRUE) {
- if (UPLINK_DIR == pstAddIndication->u8Direction) {
- if (!Adapter->LinkUpStatus) {
- netif_carrier_on(Adapter->dev);
- netif_start_queue(Adapter->dev);
- Adapter->LinkUpStatus = 1;
- if (netif_msg_link(Adapter))
- pr_info(PFX "%s: link up\n", Adapter->dev->name);
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
- Adapter->liTimeSinceLastNetEntry = get_seconds();
- }
- }
- }
- } else {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
- kfree(pstAddIndication);
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
- kfree(pstAddIndication);
- return false;
- }
- break;
- case DSC_REQ:
- pLeader->PLength = sizeof(struct bcm_change_indication);
- pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
-
- *((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
-
- CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
- kfree(pstAddIndication);
- break;
- case DSC_RSP:
- pLeader->PLength = sizeof(struct bcm_change_indication);
- pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
- *((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
- ((struct bcm_change_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
- /* FALLTHROUGH */
- case DSC_ACK:
- pstChangeIndication = (struct bcm_change_indication *)pstAddIndication;
- uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
- if (uiSearchRuleIndex > NO_OF_QUEUES-1)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
-
- if (uiSearchRuleIndex < NO_OF_QUEUES) {
- Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
- if (pstChangeIndication->sfActiveSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
-
- if (pstChangeIndication->sfAuthorizedSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
-
- if (pstChangeIndication->sfAdmittedSet.bValid == TRUE)
- Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
-
- if (pstChangeIndication->sfActiveSet.bValid == false) {
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = false;
-
- if (pstChangeIndication->sfAdmittedSet.bValid)
- psfLocalSet = &pstChangeIndication->sfAdmittedSet;
- else if (pstChangeIndication->sfAuthorizedSet.bValid)
- psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
- } else {
- psfLocalSet = &pstChangeIndication->sfActiveSet;
- Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
- }
-
- if (!psfLocalSet) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
- Adapter->PackInfo[uiSearchRuleIndex].bActive = false;
- Adapter->PackInfo[uiSearchRuleIndex].bValid = false;
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
- kfree(pstAddIndication);
- } else if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) {
- Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstChangeIndication->u16VCID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
- pstChangeIndication->u8CC, psfLocalSet->bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
- Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstChangeIndication->u16CID);
- CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSC_ACK, pstAddIndication);
-
- *(PULONG)(((PUCHAR)pvBuffer)+1) = psfLocalSet->u32SFID;
- } else if (pstChangeIndication->u8CC == 6) {
- deleteSFBySfid(Adapter, uiSearchRuleIndex);
- kfree(pstAddIndication);
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
- kfree(pstAddIndication);
- return false;
- }
- break;
- case DSD_REQ:
- pLeader->PLength = sizeof(struct bcm_del_indication);
- *((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((struct bcm_del_indication *)pstAddIndication);
-
- ulSFID = ntohl(((struct bcm_del_indication *)pstAddIndication)->u32SFID);
- uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x", uiSearchRuleIndex);
-
- if (uiSearchRuleIndex < NO_OF_QUEUES) {
- /* Delete All Classifiers Associated with this SFID */
- deleteSFBySfid(Adapter, uiSearchRuleIndex);
- Adapter->u32TotalDSD++;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
- ((struct bcm_del_indication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
- CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
- /* FALLTHROUGH */
- case DSD_RSP:
- /* Do nothing as SF has already got Deleted */
- break;
- case DSD_ACK:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
- break;
- default:
- kfree(pstAddIndication);
- return false;
- }
- return TRUE;
-}
-
-int get_dsx_sf_data_to_application(struct bcm_mini_adapter *Adapter,
- UINT uiSFId, void __user *user_buffer)
-{
- int status = 0;
- struct bcm_packet_info *psSfInfo = NULL;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "status =%d", status);
- status = SearchSfid(Adapter, uiSFId);
- if (status >= NO_OF_QUEUES) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "SFID %d not present in queue !!!", uiSFId);
- return -EINVAL;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "status =%d", status);
- psSfInfo = &Adapter->PackInfo[status];
- if (psSfInfo->pstSFIndication
- && copy_to_user(user_buffer, psSfInfo->pstSFIndication,
- sizeof(struct bcm_add_indication_alt))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "copy to user failed SFID %d, present in queue !!!",
- uiSFId);
- status = -EFAULT;
- return status;
- }
- return STATUS_SUCCESS;
-}
-
-VOID OverrideServiceFlowParams(struct bcm_mini_adapter *Adapter,
- PUINT puiBuffer)
-{
- B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
- struct bcm_stim_sfhostnotify *pHostInfo = NULL;
- UINT uiSearchRuleIndex = 0;
- ULONG ulSFID = 0;
-
- puiBuffer += 2;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "u32NumofSFsinMsg: 0x%x\n", u32NumofSFsinMsg);
-
- while (u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES) {
- u32NumofSFsinMsg--;
- pHostInfo = (struct bcm_stim_sfhostnotify *)puiBuffer;
- puiBuffer = (PUINT)(pHostInfo + 1);
-
- ulSFID = ntohl(pHostInfo->SFID);
- uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "SFID: 0x%lx\n", ulSFID);
-
- if (uiSearchRuleIndex >= NO_OF_QUEUES
- || uiSearchRuleIndex == HiPriority) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "The SFID <%lx> doesn't exist in host entry or is Invalid\n",
- ulSFID);
- continue;
- }
-
- if (pHostInfo->RetainSF == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL, "Going to Delete SF");
- deleteSFBySfid(Adapter, uiSearchRuleIndex);
- } else {
- struct bcm_packet_info *packinfo =
- &Adapter->PackInfo[uiSearchRuleIndex];
-
- packinfo->usVCID_Value = ntohs(pHostInfo->VCID);
- packinfo->usCID = ntohs(pHostInfo->newCID);
- packinfo->bActive = false;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL,
- "pHostInfo->QoSParamSet: 0x%x\n",
- pHostInfo->QoSParamSet);
-
- if (pHostInfo->QoSParamSet & 0x1)
- packinfo->bAuthorizedSet = TRUE;
- if (pHostInfo->QoSParamSet & 0x2)
- packinfo->bAdmittedSet = TRUE;
- if (pHostInfo->QoSParamSet & 0x4) {
- packinfo->bActiveSet = TRUE;
- packinfo->bActive = TRUE;
- }
- }
- }
-}
-
-static void restore_endianess_of_pstClassifierEntry(
- struct bcm_classifier_rule *pstClassifierEntry,
- enum bcm_ipaddr_context eIpAddrContext)
-{
- int i;
- union u_ip_address *stSrc = &pstClassifierEntry->stSrcIpAddress;
- union u_ip_address *stDest = &pstClassifierEntry->stDestIpAddress;
-
- for (i = 0; i < MAX_IP_RANGE_LENGTH * 4; i++) {
- if (eIpAddrContext == eSrcIpAddress) {
- stSrc->ulIpv6Addr[i] = ntohl(stSrc->ulIpv6Addr[i]);
- stSrc->ulIpv6Mask[i] = ntohl(stSrc->ulIpv6Mask[i]);
- } else if (eIpAddrContext == eDestIpAddress) {
- stDest->ulIpv6Addr[i] = ntohl(stDest->ulIpv6Addr[i]);
- stDest->ulIpv6Mask[i] = ntohl(stDest->ulIpv6Mask[i]);
- }
- }
-}
-
-static void apply_phs_rule_to_all_classifiers(
- register struct bcm_mini_adapter *Adapter, /* <Pointer to the Adapter structure */
- register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
- USHORT uVCID,
- struct bcm_phs_rule *sPhsRule,
- struct bcm_phs_rules *cPhsRule,
- struct bcm_add_indication_alt *pstAddIndication)
-{
- unsigned int uiClassifierIndex = 0;
- struct bcm_classifier_rule *curr_classifier = NULL;
-
- if (pstAddIndication->u8Direction == UPLINK_DIR) {
- for (uiClassifierIndex = 0; uiClassifierIndex < MAX_CLASSIFIERS; uiClassifierIndex++) {
- curr_classifier =
- &Adapter->astClassifierTable[uiClassifierIndex];
- if ((curr_classifier->bUsed) &&
- (curr_classifier->ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) &&
- (curr_classifier->u8AssociatedPHSI == cPhsRule->u8PHSI)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
- "Adding PHS Rule For Classifier: 0x%x cPhsRule.u8PHSI: 0x%x\n",
- curr_classifier->uiClassifierRuleIndex,
- cPhsRule->u8PHSI);
- /* Update The PHS Rule for this classifier as Associated PHSI id defined */
-
- /* Copy the PHS Rule */
- sPhsRule->u8PHSI = cPhsRule->u8PHSI;
- sPhsRule->u8PHSFLength = cPhsRule->u8PHSFLength;
- sPhsRule->u8PHSMLength = cPhsRule->u8PHSMLength;
- sPhsRule->u8PHSS = cPhsRule->u8PHSS;
- sPhsRule->u8PHSV = cPhsRule->u8PHSV;
- memcpy(sPhsRule->u8PHSF, cPhsRule->u8PHSF, MAX_PHS_LENGTHS);
- memcpy(sPhsRule->u8PHSM, cPhsRule->u8PHSM, MAX_PHS_LENGTHS);
- sPhsRule->u8RefCnt = 0;
- sPhsRule->bUnclassifiedPHSRule = false;
- sPhsRule->PHSModifiedBytes = 0;
- sPhsRule->PHSModifiedNumPackets = 0;
- sPhsRule->PHSErrorNumPackets = 0;
-
- /* bPHSRuleAssociated = TRUE; */
- /* Store The PHS Rule for this classifier */
-
- PhsUpdateClassifierRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- curr_classifier->uiClassifierRuleIndex,
- sPhsRule,
- curr_classifier->u8AssociatedPHSI);
-
- /* Update PHS Rule For the Classifier */
- if (sPhsRule->u8PHSI) {
- curr_classifier->u32PHSRuleID = sPhsRule->u8PHSI;
- memcpy(&curr_classifier->sPhsRule, sPhsRule, sizeof(struct bcm_phs_rule));
- }
- }
- }
- } else {
- /* Error PHS Rule specified in signaling could not be applied to any classifier */
-
- /* Copy the PHS Rule */
- sPhsRule->u8PHSI = cPhsRule->u8PHSI;
- sPhsRule->u8PHSFLength = cPhsRule->u8PHSFLength;
- sPhsRule->u8PHSMLength = cPhsRule->u8PHSMLength;
- sPhsRule->u8PHSS = cPhsRule->u8PHSS;
- sPhsRule->u8PHSV = cPhsRule->u8PHSV;
- memcpy(sPhsRule->u8PHSF, cPhsRule->u8PHSF, MAX_PHS_LENGTHS);
- memcpy(sPhsRule->u8PHSM, cPhsRule->u8PHSM, MAX_PHS_LENGTHS);
- sPhsRule->u8RefCnt = 0;
- sPhsRule->bUnclassifiedPHSRule = TRUE;
- sPhsRule->PHSModifiedBytes = 0;
- sPhsRule->PHSModifiedNumPackets = 0;
- sPhsRule->PHSErrorNumPackets = 0;
- /* Store The PHS Rule for this classifier */
-
- /*
- * Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
- * clsid will be zero hence we can't have multiple PHS rules for the same SF.
- * To support multiple PHS rule, passing u8PHSI.
- */
- PhsUpdateClassifierRule(
- &Adapter->stBCMPhsContext,
- uVCID,
- sPhsRule->u8PHSI,
- sPhsRule,
- sPhsRule->u8PHSI);
- }
-}
diff --git a/drivers/staging/bcm/CmHost.h b/drivers/staging/bcm/CmHost.h
deleted file mode 100644
index 0887d3f49e2f..000000000000
--- a/drivers/staging/bcm/CmHost.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***************************************************************************
- * (c) Beceem Communications Inc.
- * All Rights Reserved
- *
- * file : CmHost.h
- * author: Rajeev Tirumala
- * date : September 8 , 2006
- * brief : Definitions for Connection Management Requests structure
- * which we will use to setup our connection structures.Its high
- * time we had a header file for CmHost.cpp to isolate the way
- * f/w sends DSx messages and the way we interpret them in code.
- * Revision History
- *
- * Date Author Version Description
- * 08-Sep-06 Rajeev 0.1 Created
- ***************************************************************************/
-#ifndef _CM_HOST_H
-#define _CM_HOST_H
-
-#pragma once
-#pragma pack(push, 4)
-
-#define DSX_MESSAGE_EXCHANGE_BUFFER 0xBF60AC84 /* This contains the pointer */
-#define DSX_MESSAGE_EXCHANGE_BUFFER_SIZE 72000 /* 24 K Bytes */
-
-struct bcm_add_indication_alt {
- u8 u8Type;
- u8 u8Direction;
- u16 u16TID;
- u16 u16CID;
- u16 u16VCID;
- struct bcm_connect_mgr_params sfAuthorizedSet;
- struct bcm_connect_mgr_params sfAdmittedSet;
- struct bcm_connect_mgr_params sfActiveSet;
- u8 u8CC; /* < Confirmation Code */
- u8 u8Padd;
- u16 u16Padd;
-};
-
-struct bcm_change_indication {
- u8 u8Type;
- u8 u8Direction;
- u16 u16TID;
- u16 u16CID;
- u16 u16VCID;
- struct bcm_connect_mgr_params sfAuthorizedSet;
- struct bcm_connect_mgr_params sfAdmittedSet;
- struct bcm_connect_mgr_params sfActiveSet;
- u8 u8CC; /* < Confirmation Code */
- u8 u8Padd;
- u16 u16Padd;
-};
-
-unsigned long StoreCmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer, unsigned int *puBufferLength);
-int AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
-int FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
-unsigned long SetUpTargetDsxBuffers(struct bcm_mini_adapter *Adapter);
-bool CmControlResponseMessage(struct bcm_mini_adapter *Adapter, void *pvBuffer);
-
-#pragma pack(pop)
-
-#endif
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
deleted file mode 100644
index 4226c931cd45..000000000000
--- a/drivers/staging/bcm/DDRInit.c
+++ /dev/null
@@ -1,1355 +0,0 @@
-#include "headers.h"
-
-
-
-#define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00
-#define MIPS_CLOCK_REG 0x0f000820
-
-/* DDR INIT-133Mhz */
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 12 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3_DDRSetting133MHz[] = {
- /* DPLL Clock Setting */
- {0x0F000800, 0x00007212},
- {0x0f000820, 0x07F13FFF},
- {0x0f000810, 0x00000F95},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF1B00},
- {0x0f000870, 0x00000002},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00a04C, 0x0000000C},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04030107},
- {0x0F007024, 0x02000007},
- {0x0F007028, 0x02020202},
- {0x0F00702c, 0x0206060a},
- {0x0F007030, 0x05000000},
- {0x0F007034, 0x00000003},
- {0x0F007038, 0x110a0200},
- {0x0F00703C, 0x02101010},
- {0x0F007040, 0x45751200},
- {0x0F007044, 0x110a0d00},
- {0x0F007048, 0x081b0306},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0000001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x0010246c},
- {0x0F007064, 0x00000010},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00007000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00000104},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-/* 80Mhz */
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 10 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3_DDRSetting80MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07f1ffff},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00a000, 0x00000016},
- {0x0F00a04C, 0x0000000C},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01000000},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x02020201},
- {0x0F00702c, 0x0204040a},
- {0x0F007030, 0x04000000},
- {0x0F007034, 0x00000002},
- {0x0F007038, 0x1F060200},
- {0x0F00703C, 0x1C22221F},
- {0x0F007040, 0x8A006600},
- {0x0F007044, 0x221a0800},
- {0x0F007048, 0x02690204},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0000001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x000A15D6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00004000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-/* 100Mhz */
-#define T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 13 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3_DDRSetting100MHz[] = {
- /* DPLL Clock Setting */
- {0x0F000800, 0x00007008},
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07F13E3F},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF1B00},
- {0x0f000870, 0x00000002},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00a04C, 0x0000000C},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x01020201},
- {0x0F00702c, 0x0204040A},
- {0x0F007030, 0x06000000},
- {0x0F007034, 0x00000004},
- {0x0F007038, 0x20080200},
- {0x0F00703C, 0x02030320},
- {0x0F007040, 0x6E7F1200},
- {0x0F007044, 0x01190A00},
- {0x0F007048, 0x06120305},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0000001C},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x00082ED6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00005000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-/* Net T3B DDR Settings
- * DDR INIT-133Mhz
- */
-static struct bcm_ddr_setting asDPLL_266MHZ[] = {
- {0x0F000800, 0x00007212},
- {0x0f000820, 0x07F13FFF},
- {0x0f000810, 0x00000F95},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF1B00},
- {0x0f000870, 0x00000002}
-};
-
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3B_DDRSetting133MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00000F95},
- {0x0f000810, 0x00000F95},
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07F13652},
- {0x0f000840, 0x0FFF0800},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000880, 0x000003DD},
- {0x0f000860, 0x00000000},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04030107},
- {0x0F007024, 0x02000007},
- {0x0F007028, 0x02020202},
- {0x0F00702c, 0x0206060a},
- {0x0F007030, 0x05000000},
- {0x0F007034, 0x00000003},
- {0x0F007038, 0x130a0200},
- {0x0F00703C, 0x02101012},
- {0x0F007040, 0x457D1200},
- {0x0F007044, 0x11130d00},
- {0x0F007048, 0x040D0306},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0000001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x0010246c},
- {0x0F007064, 0x00000012},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00007000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000},
- };
-
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3B_DDRSetting80MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07F13FFF},
- {0x0f000840, 0x0FFF1F00},
- {0x0f000880, 0x000003DD},
- {0x0f000860, 0x00000000},
-
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00a000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01000000},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x02020201},
- {0x0F00702c, 0x0204040a},
- {0x0F007030, 0x04000000},
- {0x0F007034, 0x02000002},
- {0x0F007038, 0x1F060202},
- {0x0F00703C, 0x1C22221F},
- {0x0F007040, 0x8A006600},
- {0x0F007044, 0x221a0800},
- {0x0F007048, 0x02690204},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x000A15D6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00004000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-/* 100Mhz */
-#define T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 9 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3B_DDRSetting100MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00000F95},
- {0x0f000820, 0x07F1369B},
- {0x0f000840, 0x0FFF0800},
- {0x0f000880, 0x000003DD},
- {0x0f000860, 0x00000000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x01020201},
- {0x0F00702c, 0x0204040A},
- {0x0F007030, 0x06000000},
- {0x0F007034, 0x02000004},
- {0x0F007038, 0x20080200},
- {0x0F00703C, 0x02030320},
- {0x0F007040, 0x6E7F1200},
- {0x0F007044, 0x01190A00},
- {0x0F007048, 0x06120305},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001C},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x00082ED6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00005000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00000104},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 9 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LP_DDRSetting133MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x03F1365B},
- {0x0f000810, 0x00002F95},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF0000},
- {0x0f000860, 0x00000000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04030107},
- {0x0F007024, 0x02000007},
- {0x0F007028, 0x02020200},
- {0x0F00702c, 0x0206060a},
- {0x0F007030, 0x05000000},
- {0x0F007034, 0x00000003},
- {0x0F007038, 0x200a0200},
- {0x0F00703C, 0x02101020},
- {0x0F007040, 0x45711200},
- {0x0F007044, 0x110D0D00},
- {0x0F007048, 0x04080306},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x0010245F},
- {0x0F007064, 0x00000010},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00007000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007088, 0x01000001},
- {0x0F00708c, 0x00000101},
- {0x0F007090, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00040000},
- {0x0F007098, 0x00000000},
- {0x0F0070c8, 0x00000104},
- /* Enable 2 ports within X-bar */
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 11 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LP_DDRSetting100MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00002F95},
- {0x0f000820, 0x03F1369B},
- {0x0f000840, 0x0fff0000},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF0000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x01020200},
- {0x0F00702c, 0x0204040a},
- {0x0F007030, 0x06000000},
- {0x0F007034, 0x00000004},
- {0x0F007038, 0x1F080200},
- {0x0F00703C, 0x0203031F},
- {0x0F007040, 0x6e001200},
- {0x0F007044, 0x011a0a00},
- {0x0F007048, 0x03000305},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x00082ED6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00005000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007088, 0x01000001},
- {0x0F00708c, 0x00000101},
- {0x0F007090, 0x00000000},
- {0x0F007094, 0x00010000},
- {0x0F007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-#define T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 9 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LP_DDRSetting80MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x07F13FFF},
- {0x0f000810, 0x00002F95},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- {0x0f000840, 0x0FFF1F00},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0F00a084, 0x1Cffffff},
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- {0x0f007000, 0x00010001},
- {0x0f007004, 0x01000000},
- {0x0f007008, 0x01000001},
- {0x0f00700c, 0x00000000},
- {0x0f007010, 0x01000000},
- {0x0f007014, 0x01000100},
- {0x0f007018, 0x01000000},
- {0x0f00701c, 0x01020000},
- {0x0f007020, 0x04020107},
- {0x0f007024, 0x00000007},
- {0x0f007028, 0x02020200},
- {0x0f00702c, 0x0204040a},
- {0x0f007030, 0x04000000},
- {0x0f007034, 0x00000002},
- {0x0f007038, 0x1d060200},
- {0x0f00703c, 0x1c22221d},
- {0x0f007040, 0x8A116600},
- {0x0f007044, 0x222d0800},
- {0x0f007048, 0x02690204},
- {0x0f00704c, 0x00000000},
- {0x0f007050, 0x0100001c},
- {0x0f007054, 0x00000000},
- {0x0f007058, 0x00000000},
- {0x0f00705c, 0x00000000},
- {0x0f007060, 0x000A15D6},
- {0x0f007064, 0x0000000A},
- {0x0f007068, 0x00000000},
- {0x0f00706c, 0x00000001},
- {0x0f007070, 0x00004000},
- {0x0f007074, 0x00000000},
- {0x0f007078, 0x00000000},
- {0x0f00707c, 0x00000000},
- {0x0f007080, 0x00000000},
- {0x0f007084, 0x00000000},
- {0x0f007088, 0x01000001},
- {0x0f00708c, 0x00000101},
- {0x0f007090, 0x00000000},
- {0x0f007094, 0x00010000},
- {0x0f007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- {0x0F007018, 0x01010000}
-};
-
-
-
-
-/* T3 LP-B (UMA-B) */
-
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ 7 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LPB_DDRSetting160MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x03F137DB},
- {0x0f000810, 0x01842795},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- {0x0f000840, 0x0FFF0400},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0f003050, 0x00000021}, /* this is flash/eeprom clock divisor which
- * set the flash clock to 20 MHz */
- {0x0F00a084, 0x1Cffffff}, /* Now dump from her in internal memory */
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- {0x0f007000, 0x00010001},
- {0x0f007004, 0x01000001},
- {0x0f007008, 0x01000101},
- {0x0f00700c, 0x00000000},
- {0x0f007010, 0x01000100},
- {0x0f007014, 0x01000100},
- {0x0f007018, 0x01000000},
- {0x0f00701c, 0x01020000},
- {0x0f007020, 0x04030107},
- {0x0f007024, 0x02000007},
- {0x0f007028, 0x02020200},
- {0x0f00702c, 0x0206060a},
- {0x0f007030, 0x050d0d00},
- {0x0f007034, 0x00000003},
- {0x0f007038, 0x170a0200},
- {0x0f00703c, 0x02101012},
- {0x0f007040, 0x45161200},
- {0x0f007044, 0x11250c00},
- {0x0f007048, 0x04da0307},
- {0x0f00704c, 0x00000000},
- {0x0f007050, 0x0000001c},
- {0x0f007054, 0x00000000},
- {0x0f007058, 0x00000000},
- {0x0f00705c, 0x00000000},
- {0x0f007060, 0x00142bb6},
- {0x0f007064, 0x20430014},
- {0x0f007068, 0x00000000},
- {0x0f00706c, 0x00000001},
- {0x0f007070, 0x00009000},
- {0x0f007074, 0x00000000},
- {0x0f007078, 0x00000000},
- {0x0f00707c, 0x00000000},
- {0x0f007080, 0x00000000},
- {0x0f007084, 0x00000000},
- {0x0f007088, 0x01000001},
- {0x0f00708c, 0x00000101},
- {0x0f007090, 0x00000000},
- {0x0f007094, 0x00040000},
- {0x0f007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- {0x0F007018, 0x01010000}
-};
-
-
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 7 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LPB_DDRSetting133MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x03F1365B},
- {0x0f000810, 0x00002F95},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF0000},
- {0x0f000860, 0x00000000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which
- * set the flash clock to 20 MHz */
- {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020001},
- {0x0F007020, 0x04030107},
- {0x0F007024, 0x02000007},
- {0x0F007028, 0x02020200},
- {0x0F00702c, 0x0206060a},
- {0x0F007030, 0x05000000},
- {0x0F007034, 0x00000003},
- {0x0F007038, 0x190a0200},
- {0x0F00703C, 0x02101017},
- {0x0F007040, 0x45171200},
- {0x0F007044, 0x11290D00},
- {0x0F007048, 0x04080306},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x0010245F},
- {0x0F007064, 0x00000010},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00007000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007088, 0x01000001},
- {0x0F00708c, 0x00000101},
- {0x0F007090, 0x00000000},
- /* Enable BW improvement within memory controller */
- {0x0F007094, 0x00040000},
- {0x0F007098, 0x00000000},
- {0x0F0070c8, 0x00000104},
- /* Enable 2 ports within X-bar */
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ 8 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LPB_DDRSetting100MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000810, 0x00002F95},
- {0x0f000820, 0x03F1369B},
- {0x0f000840, 0x0fff0000},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- /* Changed source for X-bar and MIPS clock to APLL */
- {0x0f000840, 0x0FFF0000},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor which
- * set the flash clock to 20 MHz */
- {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
- {0x0F00a080, 0x1C000000},
- /* Memcontroller Default values */
- {0x0F007000, 0x00010001},
- {0x0F007004, 0x01010100},
- {0x0F007008, 0x01000001},
- {0x0F00700c, 0x00000000},
- {0x0F007010, 0x01000000},
- {0x0F007014, 0x01000100},
- {0x0F007018, 0x01000000},
- {0x0F00701c, 0x01020000},
- {0x0F007020, 0x04020107},
- {0x0F007024, 0x00000007},
- {0x0F007028, 0x01020200},
- {0x0F00702c, 0x0204040a},
- {0x0F007030, 0x06000000},
- {0x0F007034, 0x00000004},
- {0x0F007038, 0x1F080200},
- {0x0F00703C, 0x0203031F},
- {0x0F007040, 0x6e001200},
- {0x0F007044, 0x011a0a00},
- {0x0F007048, 0x03000305},
- {0x0F00704c, 0x00000000},
- {0x0F007050, 0x0100001c},
- {0x0F007054, 0x00000000},
- {0x0F007058, 0x00000000},
- {0x0F00705c, 0x00000000},
- {0x0F007060, 0x00082ED6},
- {0x0F007064, 0x0000000A},
- {0x0F007068, 0x00000000},
- {0x0F00706c, 0x00000001},
- {0x0F007070, 0x00005000},
- {0x0F007074, 0x00000000},
- {0x0F007078, 0x00000000},
- {0x0F00707C, 0x00000000},
- {0x0F007080, 0x00000000},
- {0x0F007084, 0x00000000},
- {0x0F007088, 0x01000001},
- {0x0F00708c, 0x00000101},
- {0x0F007090, 0x00000000},
- {0x0F007094, 0x00010000},
- {0x0F007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- /* Enable 2 ports within X-bar */
- {0x0F00A000, 0x00000016},
- /* Enable start bit within memory controller */
- {0x0F007018, 0x01010000}
-};
-
-#define T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ 7 /* index for 0x0F007000 */
-static struct bcm_ddr_setting asT3LPB_DDRSetting80MHz[] = {
- /* DPLL Clock Setting */
- {0x0f000820, 0x07F13FFF},
- {0x0f000810, 0x00002F95},
- {0x0f000860, 0x00000000},
- {0x0f000880, 0x000003DD},
- {0x0f000840, 0x0FFF1F00},
- {0x0F00a044, 0x1fffffff},
- {0x0F00a040, 0x1f000000},
- {0x0f003050, 0x00000021}, /* flash/eeprom clock divisor
- * which set the flash clock to 20 MHz */
- {0x0F00a084, 0x1Cffffff}, /* dump from here in internal memory */
- {0x0F00a080, 0x1C000000},
- {0x0F00A000, 0x00000016},
- {0x0f007000, 0x00010001},
- {0x0f007004, 0x01000000},
- {0x0f007008, 0x01000001},
- {0x0f00700c, 0x00000000},
- {0x0f007010, 0x01000000},
- {0x0f007014, 0x01000100},
- {0x0f007018, 0x01000000},
- {0x0f00701c, 0x01020000},
- {0x0f007020, 0x04020107},
- {0x0f007024, 0x00000007},
- {0x0f007028, 0x02020200},
- {0x0f00702c, 0x0204040a},
- {0x0f007030, 0x04000000},
- {0x0f007034, 0x00000002},
- {0x0f007038, 0x1d060200},
- {0x0f00703c, 0x1c22221d},
- {0x0f007040, 0x8A116600},
- {0x0f007044, 0x222d0800},
- {0x0f007048, 0x02690204},
- {0x0f00704c, 0x00000000},
- {0x0f007050, 0x0100001c},
- {0x0f007054, 0x00000000},
- {0x0f007058, 0x00000000},
- {0x0f00705c, 0x00000000},
- {0x0f007060, 0x000A15D6},
- {0x0f007064, 0x0000000A},
- {0x0f007068, 0x00000000},
- {0x0f00706c, 0x00000001},
- {0x0f007070, 0x00004000},
- {0x0f007074, 0x00000000},
- {0x0f007078, 0x00000000},
- {0x0f00707c, 0x00000000},
- {0x0f007080, 0x00000000},
- {0x0f007084, 0x00000000},
- {0x0f007088, 0x01000001},
- {0x0f00708c, 0x00000101},
- {0x0f007090, 0x00000000},
- {0x0f007094, 0x00010000},
- {0x0f007098, 0x00000000},
- {0x0F0070C8, 0x00000104},
- {0x0F007018, 0x01010000}
-};
-
-
-int ddr_init(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_ddr_setting *psDDRSetting = NULL;
- ULONG RegCount = 0;
- UINT value = 0;
- UINT uiResetValue = 0;
- UINT uiClockSetting = 0;
- int retval = STATUS_SUCCESS;
-
- switch (Adapter->chip_id) {
- case 0xbece3200:
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3LP_DDRSetting80MHz;
- RegCount = (sizeof(asT3LP_DDRSetting80MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LP_DDRSetting100MHz;
- RegCount = (sizeof(asT3LP_DDRSetting100MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3LP_DDRSetting133MHz;
- RegCount = (sizeof(asT3LP_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
- if (Adapter->bMipsConfig == MIPS_200_MHZ)
- uiClockSetting = 0x03F13652;
- else
- uiClockSetting = 0x03F1365B;
- break;
- default:
- return -EINVAL;
- }
-
- break;
- case T3LPB:
- case BCS220_2:
- case BCS220_2BC:
- case BCS250_BC:
- case BCS220_3:
- /* Set bit 2 and bit 6 to 1 for BBIC 2mA drive
- * (please check current value and additionally set these bits)
- */
- if ((Adapter->chip_id != BCS220_2) &&
- (Adapter->chip_id != BCS220_2BC) &&
- (Adapter->chip_id != BCS220_3)) {
- retval = rdmalt(Adapter, (UINT)0x0f000830, &uiResetValue,
- sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__, __LINE__);
- return retval;
- }
- uiResetValue |= 0x44;
- retval = wrmalt(Adapter, (UINT)0x0f000830, &uiResetValue,
- sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__, __LINE__);
- return retval;
- }
- }
- switch (Adapter->DDRSetting) {
-
-
-
- case DDR_80_MHZ:
- psDDRSetting = asT3LPB_DDRSetting80MHz;
- RegCount = (sizeof(asT3B_DDRSetting80MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LPB_DDRSetting100MHz;
- RegCount = (sizeof(asT3B_DDRSetting100MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3LPB_DDRSetting133MHz;
- RegCount = (sizeof(asT3B_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
-
- if (Adapter->bMipsConfig == MIPS_200_MHZ)
- uiClockSetting = 0x03F13652;
- else
- uiClockSetting = 0x03F1365B;
- break;
-
- case DDR_160_MHZ:
- psDDRSetting = asT3LPB_DDRSetting160MHz;
- RegCount = sizeof(asT3LPB_DDRSetting160MHz) /
- sizeof(struct bcm_ddr_setting);
-
- if (Adapter->bMipsConfig == MIPS_200_MHZ)
- uiClockSetting = 0x03F137D2;
- else
- uiClockSetting = 0x03F137DB;
- }
- break;
-
- case 0xbece0110:
- case 0xbece0120:
- case 0xbece0121:
- case 0xbece0130:
- case 0xbece0300:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "DDR Setting: %x\n", Adapter->DDRSetting);
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3_DDRSetting80MHz;
- RegCount = (sizeof(asT3_DDRSetting80MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3_DDRSetting100MHz;
- RegCount = (sizeof(asT3_DDRSetting100MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3_DDRSetting133MHz;
- RegCount = (sizeof(asT3_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- default:
- return -EINVAL;
- }
- case 0xbece0310:
- {
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3B_DDRSetting80MHz;
- RegCount = (sizeof(asT3B_DDRSetting80MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3B_DDRSetting100MHz;
- RegCount = (sizeof(asT3B_DDRSetting100MHz) /
- sizeof(struct bcm_ddr_setting));
- break;
- case DDR_133_MHZ:
-
- /* 266Mhz PLL selected. */
- if (Adapter->bDPLLConfig == PLL_266_MHZ) {
- memcpy(asT3B_DDRSetting133MHz, asDPLL_266MHZ,
- sizeof(asDPLL_266MHZ));
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount = (sizeof(asT3B_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
- } else {
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount = (sizeof(asT3B_DDRSetting133MHz) /
- sizeof(struct bcm_ddr_setting));
- if (Adapter->bMipsConfig == MIPS_200_MHZ)
- uiClockSetting = 0x07F13652;
- else
- uiClockSetting = 0x07F1365B;
- }
- break;
- default:
- return -EINVAL;
- }
- break;
-
- }
- default:
- return -EINVAL;
- }
-
- value = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Register Count is =%lu\n", RegCount);
- while (RegCount && !retval) {
- if (uiClockSetting
- && psDDRSetting->ulRegAddress == MIPS_CLOCK_REG)
- value = uiClockSetting;
- else
- value = psDDRSetting->ulRegValue;
- retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value,
- sizeof(value));
- if (STATUS_SUCCESS != retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "%s:%d\n", __func__, __LINE__);
- break;
- }
-
- RegCount--;
- psDDRSetting++;
- }
-
- if (Adapter->chip_id >= 0xbece3300) {
-
- mdelay(3);
- if ((Adapter->chip_id != BCS220_2) &&
- (Adapter->chip_id != BCS220_2BC) &&
- (Adapter->chip_id != BCS220_3)) {
- /* drive MDDR to half in case of UMA-B: */
- uiResetValue = 0x01010001;
- retval = wrmalt(Adapter, (UINT)0x0F007018,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x00040020;
- retval = wrmalt(Adapter, (UINT)0x0F007094,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x01020101;
- retval = wrmalt(Adapter, (UINT)0x0F00701c,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x01010000;
- retval = wrmalt(Adapter, (UINT)0x0F007018,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- }
- mdelay(3);
-
- /* DC/DC standby change...
- * This is to be done only for Hybrid PMU mode.
- * with the current h/w there is no way to detect this.
- * and since we dont have internal PMU lets do it under
- * UMA-B chip id. we will change this when we will have
- * internal PMU.
- */
- if (Adapter->PmuMode == HYBRID_MODE_7C) {
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x1322a8;
- retval = wrmalt(Adapter, (UINT)0x0f000d1c,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x132296;
- retval = wrmalt(Adapter, (UINT)0x0f000d14,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- } else if (Adapter->PmuMode == HYBRID_MODE_6) {
-
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x6003229a;
- retval = wrmalt(Adapter, (UINT)0x0f000d14,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- retval = rdmalt(Adapter, (UINT)0x0f000c00,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- uiResetValue = 0x1322a8;
- retval = wrmalt(Adapter, (UINT)0x0f000d1c,
- &uiResetValue, sizeof(uiResetValue));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, CMHOST, RDM,
- DBG_LVL_ALL,
- "%s:%d RDM failed\n",
- __func__,
- __LINE__);
- return retval;
- }
- }
-
- }
- Adapter->bDDRInitDone = TRUE;
- return retval;
-}
-
-int download_ddr_settings(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_ddr_setting *psDDRSetting = NULL;
- ULONG RegCount = 0;
- unsigned long ul_ddr_setting_load_addr =
- DDR_DUMP_INTERNAL_DEVICE_MEMORY;
- UINT value = 0;
- int retval = STATUS_SUCCESS;
- bool bOverrideSelfRefresh = false;
-
- switch (Adapter->chip_id) {
- case 0xbece3200:
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3LP_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting80MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LP_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting100MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LP_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3LP_DDRSetting133MHz);
- RegCount -= T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- psDDRSetting += T3LP_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- default:
- return -EINVAL;
- }
- break;
-
- case T3LPB:
- case BCS220_2:
- case BCS220_2BC:
- case BCS250_BC:
- case BCS220_3:
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3LPB_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting80MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3LPB_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting100MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LPB_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting133MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
-
- case DDR_160_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3LPB_DDRSetting160MHz;
- RegCount = ARRAY_SIZE(asT3LPB_DDRSetting160MHz);
- RegCount -= T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
- psDDRSetting += T3LPB_SKIP_CLOCK_PROGRAM_DUMP_160MHZ;
-
- break;
- default:
- return -EINVAL;
- }
- break;
- case 0xbece0300:
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting80MHz);
- RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting100MHz);
- RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- psDDRSetting = asT3_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3_DDRSetting133MHz);
- RegCount -= T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- psDDRSetting += T3_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- default:
- return -EINVAL;
- }
- break;
- case 0xbece0310:
- {
- switch (Adapter->DDRSetting) {
- case DDR_80_MHZ:
- psDDRSetting = asT3B_DDRSetting80MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting80MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_80MHZ;
- break;
- case DDR_100_MHZ:
- psDDRSetting = asT3B_DDRSetting100MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting100MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_100MHZ;
- break;
- case DDR_133_MHZ:
- bOverrideSelfRefresh = TRUE;
- psDDRSetting = asT3B_DDRSetting133MHz;
- RegCount = ARRAY_SIZE(asT3B_DDRSetting133MHz);
- RegCount -= T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- psDDRSetting += T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ;
- break;
- }
- break;
- }
- default:
- return -EINVAL;
- }
- /* total number of Register that has to be dumped */
- value = RegCount;
- retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value,
- sizeof(value));
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "%s:%d\n", __func__, __LINE__);
-
- return retval;
- }
- ul_ddr_setting_load_addr += sizeof(ULONG);
- /* signature */
- value = (0x1d1e0dd0);
- retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value,
- sizeof(value));
- if (retval) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "%s:%d\n", __func__, __LINE__);
- return retval;
- }
-
- ul_ddr_setting_load_addr += sizeof(ULONG);
- RegCount *= (sizeof(struct bcm_ddr_setting)/sizeof(ULONG));
-
- while (RegCount && !retval) {
- value = psDDRSetting->ulRegAddress;
- retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value,
- sizeof(value));
- ul_ddr_setting_load_addr += sizeof(ULONG);
- if (!retval) {
- if (bOverrideSelfRefresh
- && (psDDRSetting->ulRegAddress
- == 0x0F007018))
- value = (psDDRSetting->ulRegValue | (1<<8));
- else
- value = psDDRSetting->ulRegValue;
-
- if (STATUS_SUCCESS != wrmalt(Adapter,
- ul_ddr_setting_load_addr,
- &value,
- sizeof(value))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "%s:%d\n", __func__, __LINE__);
- break;
- }
- }
- ul_ddr_setting_load_addr += sizeof(ULONG);
- RegCount--;
- psDDRSetting++;
- }
- return retval;
-}
diff --git a/drivers/staging/bcm/DDRInit.h b/drivers/staging/bcm/DDRInit.h
deleted file mode 100644
index b0196fce9255..000000000000
--- a/drivers/staging/bcm/DDRInit.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _DDR_INIT_H_
-#define _DDR_INIT_H_
-
-
-
-int ddr_init(struct bcm_mini_adapter *psAdapter);
-int download_ddr_settings(struct bcm_mini_adapter *psAdapter);
-
-#endif
diff --git a/drivers/staging/bcm/Debug.h b/drivers/staging/bcm/Debug.h
deleted file mode 100644
index 7b331215c1ac..000000000000
--- a/drivers/staging/bcm/Debug.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Debug.h
- *
- * Dynamic (runtime) debug framework implementation.
- * -kaiwan.
- */
-#ifndef _DEBUG_H
-#define _DEBUG_H
-#include <linux/string.h>
-#define NONE 0xFFFF
-
-/* TYPE and SUBTYPE
- * Define valid TYPE (or category or code-path, however you like to think of it)
- * and SUBTYPE s.
- * Type and SubType are treated as bitmasks.
- */
-#define DBG_TYPE_INITEXIT (1 << 0) /* 1 */
-#define DBG_TYPE_TX (1 << 1) /* 2 */
-#define DBG_TYPE_RX (1 << 2) /* 4 */
-#define DBG_TYPE_OTHERS (1 << 3) /* 8 */
-#define NUMTYPES 4
-
-/* -SUBTYPEs for TX : TYPE is DBG_TYPE_TX -----//
- * Transmit.c ,Arp.c, LeakyBucket.c, And Qos.c
- * total 17 macros
- */
-/* Transmit.c */
-#define TX 1
-#define MP_SEND (TX << 0)
-#define NEXT_SEND (TX << 1)
-#define TX_FIFO (TX << 2)
-#define TX_CONTROL (TX << 3)
-
-/* Arp.c */
-#define IP_ADDR (TX << 4)
-#define ARP_REQ (TX << 5)
-#define ARP_RESP (TX << 6)
-
-/* Leakybucket.c */
-#define TOKEN_COUNTS (TX << 8)
-#define CHECK_TOKENS (TX << 9)
-#define TX_PACKETS (TX << 10)
-#define TIMER (TX << 11)
-
-/* Qos.c */
-#define QOS TX
-#define QUEUE_INDEX (QOS << 12)
-#define IPV4_DBG (QOS << 13)
-#define IPV6_DBG (QOS << 14)
-#define PRUNE_QUEUE (QOS << 15)
-#define SEND_QUEUE (QOS << 16)
-
-/* TX_Misc */
-#define TX_OSAL_DBG (TX << 17)
-
-/* --SUBTYPEs for ------INIT & EXIT---------------------
- * ------------ TYPE is DBG_TYPE_INITEXIT -----//
- * DriverEntry.c, bcmfwup.c, ChipDetectTask.c, HaltnReset.c, InterfaceDDR.c
- */
-#define MP 1
-#define DRV_ENTRY (MP << 0)
-#define MP_INIT (MP << 1)
-#define READ_REG (MP << 3)
-#define DISPATCH (MP << 2)
-#define CLAIM_ADAP (MP << 4)
-#define REG_IO_PORT (MP << 5)
-#define INIT_DISP (MP << 6)
-#define RX_INIT (MP << 7)
-
-/* -SUBTYPEs for --RX----------------------------------
- * ------------RX : TYPE is DBG_TYPE_RX -----//
- * Receive.c
- */
-#define RX 1
-#define RX_DPC (RX << 0)
-#define RX_CTRL (RX << 3)
-#define RX_DATA (RX << 4)
-#define MP_RETURN (RX << 1)
-#define LINK_MSG (RX << 2)
-
-/* -SUBTYPEs for ----OTHER ROUTINES------------------
- * ------------OTHERS : TYPE is DBG_TYPE_OTHER -----//
- * HaltnReset,CheckForHang,PnP,Misc,CmHost
- * total 12 macros
- */
-#define OTHERS 1
-#define ISR OTHERS
-#define MP_DPC (ISR << 0)
-
-/* HaltnReset.c */
-#define HALT OTHERS
-#define MP_HALT (HALT << 1)
-#define CHECK_HANG (HALT << 2)
-#define MP_RESET (HALT << 3)
-#define MP_SHUTDOWN (HALT << 4)
-
-/* pnp.c */
-#define PNP OTHERS
-#define MP_PNP (PNP << 5)
-
-/* Misc.c */
-#define MISC OTHERS
-#define DUMP_INFO (MISC << 6)
-#define CLASSIFY (MISC << 7)
-#define LINK_UP_MSG (MISC << 8)
-#define CP_CTRL_PKT (MISC << 9)
-#define DUMP_CONTROL (MISC << 10)
-#define LED_DUMP_INFO (MISC << 11)
-
-/* CmHost.c */
-#define CMHOST OTHERS
-#define SERIAL (OTHERS << 12)
-#define IDLE_MODE (OTHERS << 13)
-#define WRM (OTHERS << 14)
-#define RDM (OTHERS << 15)
-
-/* TODO - put PHS_SEND in Tx PHS_RECEIVE in Rx path ? */
-#define PHS_SEND (OTHERS << 16)
-#define PHS_RECEIVE (OTHERS << 17)
-#define PHS_MODULE (OTHERS << 18)
-
-#define INTF_INIT (OTHERS << 19)
-#define INTF_ERR (OTHERS << 20)
-#define INTF_WARN (OTHERS << 21)
-#define INTF_NORM (OTHERS << 22)
-
-#define IRP_COMPLETION (OTHERS << 23)
-#define SF_DESCRIPTOR_CNTS (OTHERS << 24)
-#define PHS_DISPATCH (OTHERS << 25)
-#define OSAL_DBG (OTHERS << 26)
-#define NVM_RW (OTHERS << 27)
-
-#define HOST_MIBS (OTHERS << 28)
-#define CONN_MSG (CMHOST << 29)
-
-/* Debug level
- * We have 8 debug levels, in (numerical) increasing order of verbosity.
- * IMP: Currently implementing ONLY DBG_LVL_ALL , i.e. , all debug prints will
- * appear (of course, iff global debug flag is ON and we match the Type and SubType).
- * Finer granularity debug levels are currently not in use, although the feature exists.
- *
- * Another way to say this:
- * All the debug prints currently have 'debug_level' set to DBG_LVL_ALL .
- * You can compile-time change that to any of the below, if you wish to. However, as of now, there's
- * no dynamic facility to have the userspace 'TestApp' set debug_level. Slated for future expansion.
- */
-#define BCM_ALL 7
-#define BCM_LOW 6
-#define BCM_PRINT 5
-#define BCM_NORMAL 4
-#define BCM_MEDIUM 3
-#define BCM_SCREAM 2
-#define BCM_ERR 1
-/* Not meant for developer in debug prints.
- * To be used to disable all prints by setting the DBG_LVL_CURR to this value
- */
-#define BCM_NONE 0
-
-/* The current driver logging level.
- * Everything at this level and (numerically) lower (meaning higher prio)
- * is logged.
- * Replace 'BCM_ALL' in the DBG_LVL_CURR macro with the logging level desired.
- * For eg. to set the logging level to 'errors only' use:
- * #define DBG_LVL_CURR (BCM_ERR)
- */
-
-#define DBG_LVL_CURR (BCM_ALL)
-#define DBG_LVL_ALL BCM_ALL
-
-/* ---Userspace mapping of Debug State.
- * Delibrately matches that of the Windows driver..
- * The TestApp's ioctl passes this struct to us.
- */
-struct bcm_user_debug_state {
- unsigned int Subtype, Type;
- unsigned int OnOff;
-/* unsigned int debug_level; future expansion */
-} __packed;
-
-/* ---Kernel-space mapping of Debug State */
-struct bcm_debug_state {
- unsigned int type;
- /* A bitmap of 32 bits for Subtype per Type.
- * Valid indexes in 'subtype' array are *only* 1,2,4 and 8,
- * corresponding to valid Type values. Hence we use the 'Type' field
- * as the index value, ignoring the array entries 0,3,5,6,7 !
- */
- unsigned int subtype[(NUMTYPES*2)+1];
- unsigned int debug_level;
-};
-/* Instantiated in the Adapter structure
- * We'll reuse the debug level parameter to include a bit (the MSB) to indicate whether or not
- * we want the function's name printed.
- */
-#define DBG_NO_FUNC_PRINT (1 << 31)
-#define DBG_LVL_BITMASK 0xFF
-
-/* --- Only for direct printk's; "hidden" to API. */
-#define DBG_TYPE_PRINTK 3
-
-#define BCM_DEBUG_PRINT(Adapter, Type, SubType, dbg_level, string, args...) \
- do { \
- if (DBG_TYPE_PRINTK == Type) \
- pr_info("%s:" string, __func__, ##args); \
- else if (Adapter && \
- (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
- (Type & Adapter->stDebugState.type) && \
- (SubType & Adapter->stDebugState.subtype[Type])) { \
- if (dbg_level & DBG_NO_FUNC_PRINT) \
- pr_debug("%s:\n", string); \
- else \
- pr_debug("%s:\n" string, __func__, ##args); \
- } \
- } while (0)
-
-#define BCM_DEBUG_PRINT_BUFFER(Adapter, Type, SubType, dbg_level, buffer, bufferlen) \
- do { \
- if (DBG_TYPE_PRINTK == Type || \
- (Adapter && \
- (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
- (Type & Adapter->stDebugState.type) && \
- (SubType & Adapter->stDebugState.subtype[Type]))) { \
- pr_debug("%s:\n", __func__); \
- print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, \
- 16, 1, buffer, bufferlen, false); \
- } \
- } while (0)
-
-#define BCM_SHOW_DEBUG_BITMAP(Adapter) do { \
- int i; \
- for (i = 0; i < (NUMTYPES * 2) + 1; i++) { \
- if ((i == 1) || (i == 2) || (i == 4) || (i == 8)) { \
- /* CAUTION! Forcefully turn on ALL debug paths and subpaths! \
- * Adapter->stDebugState.subtype[i] = 0xffffffff; \
- */ \
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "subtype[%d] = 0x%08x\n", \
- i, Adapter->stDebugState.subtype[i]); \
- } \
- } \
-} while (0)
-
-#endif
diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c
deleted file mode 100644
index dd5d138a6528..000000000000
--- a/drivers/staging/bcm/HandleControlPacket.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * @file HandleControlPacket.c
- * This file contains the routines to deal with
- * sending and receiving of control packets.
- */
-#include "headers.h"
-
-/**
- * When a control packet is received, analyze the
- * "status" and call appropriate response function.
- * Enqueue the control packet for Application.
- * @return None
- */
-static VOID handle_rx_control_packet(struct bcm_mini_adapter *Adapter,
- struct sk_buff *skb)
-{
- struct bcm_tarang_data *pTarang = NULL;
- bool HighPriorityMessage = false;
- struct sk_buff *newPacket = NULL;
- CHAR cntrl_msg_mask_bit = 0;
- bool drop_pkt_flag = TRUE;
- USHORT usStatus = *(PUSHORT)(skb->data);
-
- if (netif_msg_pktdata(Adapter))
- print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE,
- 16, 1, skb->data, skb->len, 0);
-
- switch (usStatus) {
- case CM_RESPONSES: /* 0xA0 */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL,
- "MAC Version Seems to be Non Multi-Classifier, rejected by Driver");
- HighPriorityMessage = TRUE;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- HighPriorityMessage = TRUE;
- if (Adapter->LinkStatus == LINKUP_DONE)
- CmControlResponseMessage(Adapter,
- (skb->data + sizeof(USHORT)));
- break;
- case LINK_CONTROL_RESP: /* 0xA2 */
- case STATUS_RSP: /* 0xA1 */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL, "LINK_CONTROL_RESP");
- HighPriorityMessage = TRUE;
- LinkControlResponseMessage(Adapter,
- (skb->data + sizeof(USHORT)));
- break;
- case STATS_POINTER_RESP: /* 0xA6 */
- HighPriorityMessage = TRUE;
- StatisticsResponse(Adapter, (skb->data + sizeof(USHORT)));
- break;
- case IDLE_MODE_STATUS: /* 0xA3 */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL,
- "IDLE_MODE_STATUS Type Message Got from F/W");
- InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data +
- sizeof(USHORT)));
- HighPriorityMessage = TRUE;
- break;
-
- case AUTH_SS_HOST_MSG:
- HighPriorityMessage = TRUE;
- break;
-
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL, "Got Default Response");
- /* Let the Application Deal with This Packet */
- break;
- }
-
- /* Queue The Control Packet to The Application Queues */
- down(&Adapter->RxAppControlQueuelock);
-
- for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
- if (Adapter->device_removed)
- break;
-
- drop_pkt_flag = TRUE;
- /*
- * There are cntrl msg from A0 to AC. It has been mapped to 0 to
- * C bit in the cntrl mask.
- * Also, by default AD to BF has been masked to the rest of the
- * bits... which wil be ON by default.
- * if mask bit is enable to particular pkt status, send it out
- * to app else stop it.
- */
- cntrl_msg_mask_bit = (usStatus & 0x1F);
- /*
- * printk("\ninew msg mask bit which is disable in mask:%X",
- * cntrl_msg_mask_bit);
- */
- if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit))
- drop_pkt_flag = false;
-
- if ((drop_pkt_flag == TRUE) ||
- (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN)
- || ((pTarang->AppCtrlQueueLen >
- MAX_APP_QUEUE_LEN / 2) &&
- (HighPriorityMessage == false))) {
- /*
- * Assumption:-
- * 1. every tarang manages it own dropped pkt
- * statitistics
- * 2. Total packet dropped per tarang will be equal to
- * the sum of all types of dropped pkt by that
- * tarang only.
- */
- struct bcm_mibs_dropped_cntrl_msg *msg =
- &pTarang->stDroppedAppCntrlMsgs;
- switch (*(PUSHORT)skb->data) {
- case CM_RESPONSES:
- msg->cm_responses++;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- msg->cm_control_newdsx_multiclassifier_resp++;
- break;
- case LINK_CONTROL_RESP:
- msg->link_control_resp++;
- break;
- case STATUS_RSP:
- msg->status_rsp++;
- break;
- case STATS_POINTER_RESP:
- msg->stats_pointer_resp++;
- break;
- case IDLE_MODE_STATUS:
- msg->idle_mode_status++;
- break;
- case AUTH_SS_HOST_MSG:
- msg->auth_ss_host_msg++;
- break;
- default:
- msg->low_priority_message++;
- break;
- }
-
- continue;
- }
-
- newPacket = skb_clone(skb, GFP_KERNEL);
- if (!newPacket)
- break;
- ENQUEUEPACKET(pTarang->RxAppControlHead,
- pTarang->RxAppControlTail, newPacket);
- pTarang->AppCtrlQueueLen++;
- }
- up(&Adapter->RxAppControlQueuelock);
- wake_up(&Adapter->process_read_wait_queue);
- dev_kfree_skb(skb);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
- "After wake_up_interruptible");
-}
-
-/**
- * @ingroup ctrl_pkt_functions
- * Thread to handle control pkt reception
- */
-
-/* pointer to adapter object*/
-int control_packet_handler(struct bcm_mini_adapter *Adapter)
-{
- struct sk_buff *ctrl_packet = NULL;
- unsigned long flags = 0;
- /* struct timeval tv; */
- /* int *puiBuffer = NULL; */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
- "Entering to make thread wait on control packet event!");
- while (1) {
- wait_event_interruptible(Adapter->process_rx_cntrlpkt,
- atomic_read(&Adapter->cntrlpktCnt) ||
- Adapter->bWakeUpDevice ||
- kthread_should_stop());
-
-
- if (kthread_should_stop()) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
- DBG_LVL_ALL, "Exiting\n");
- return 0;
- }
- if (TRUE == Adapter->bWakeUpDevice) {
- Adapter->bWakeUpDevice = false;
- if ((false == Adapter->bTriedToWakeUpFromlowPowerMode)
- && ((TRUE == Adapter->IdleMode) ||
- (TRUE == Adapter->bShutStatus))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- CP_CTRL_PKT, DBG_LVL_ALL,
- "Calling InterfaceAbortIdlemode\n");
- /*
- * Adapter->bTriedToWakeUpFromlowPowerMode
- * = TRUE;
- */
- InterfaceIdleModeWakeup(Adapter);
- }
- continue;
- }
-
- while (atomic_read(&Adapter->cntrlpktCnt)) {
- spin_lock_irqsave(&Adapter->control_queue_lock, flags);
- ctrl_packet = Adapter->RxControlHead;
- if (ctrl_packet) {
- DEQUEUEPACKET(Adapter->RxControlHead,
- Adapter->RxControlTail);
- /* Adapter->RxControlHead=ctrl_packet->next; */
- }
-
- spin_unlock_irqrestore(&Adapter->control_queue_lock,
- flags);
- handle_rx_control_packet(Adapter, ctrl_packet);
- atomic_dec(&Adapter->cntrlpktCnt);
- }
-
- SetUpTargetDsxBuffers(Adapter);
- }
- return STATUS_SUCCESS;
-}
-
-INT flushAllAppQ(void)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_tarang_data *pTarang = NULL;
- struct sk_buff *PacketToDrop = NULL;
-
- for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
- while (pTarang->RxAppControlHead != NULL) {
- PacketToDrop = pTarang->RxAppControlHead;
- DEQUEUEPACKET(pTarang->RxAppControlHead,
- pTarang->RxAppControlTail);
- dev_kfree_skb(PacketToDrop);
- }
- pTarang->AppCtrlQueueLen = 0;
- /* dropped contrl packet statistics also should be reset. */
- memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0,
- sizeof(struct bcm_mibs_dropped_cntrl_msg));
-
- }
- return STATUS_SUCCESS;
-}
-
-
diff --git a/drivers/staging/bcm/HostMIBSInterface.h b/drivers/staging/bcm/HostMIBSInterface.h
deleted file mode 100644
index f922ac49b70e..000000000000
--- a/drivers/staging/bcm/HostMIBSInterface.h
+++ /dev/null
@@ -1,192 +0,0 @@
-#ifndef _HOST_MIBSINTERFACE_H
-#define _HOST_MIBSINTERFACE_H
-
-/*
- * Copyright (c) 2007 Beceem Communications Pvt. Ltd
- * File Name: HostMIBSInterface.h
- * Abstract: This file contains DS used by the Host to update the Host
- * statistics used for the MIBS.
- */
-
-#define MIBS_MAX_CLASSIFIERS 100
-#define MIBS_MAX_PHSRULES 100
-#define MIBS_MAX_SERVICEFLOWS 17
-#define MIBS_MAX_IP_RANGE_LENGTH 4
-#define MIBS_MAX_PORT_RANGE 4
-#define MIBS_MAX_PROTOCOL_LENGTH 32
-#define MIBS_MAX_PHS_LENGTHS 255
-#define MIBS_IPV6_ADDRESS_SIZEINBYTES 0x10
-#define MIBS_IP_LENGTH_OF_ADDRESS 4
-#define MIBS_MAX_HIST_ENTRIES 12
-#define MIBS_PKTSIZEHIST_RANGE 128
-
-union bcm_mibs_ip_addr {
- struct {
- /* Source Ip Address Range */
- unsigned long ulIpv4Addr[MIBS_MAX_IP_RANGE_LENGTH];
- /* Source Ip Mask Address Range */
- unsigned long ulIpv4Mask[MIBS_MAX_IP_RANGE_LENGTH];
- };
- struct {
- /* Source Ip Address Range */
- unsigned long ulIpv6Addr[MIBS_MAX_IP_RANGE_LENGTH * 4];
- /* Source Ip Mask Address Range */
- unsigned long ulIpv6Mask[MIBS_MAX_IP_RANGE_LENGTH * 4];
- };
- struct {
- unsigned char ucIpv4Address[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IP_LENGTH_OF_ADDRESS];
- unsigned char ucIpv4Mask[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IP_LENGTH_OF_ADDRESS];
- };
- struct {
- unsigned char ucIpv6Address[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IPV6_ADDRESS_SIZEINBYTES];
- unsigned char ucIpv6Mask[MIBS_MAX_IP_RANGE_LENGTH * MIBS_IPV6_ADDRESS_SIZEINBYTES];
- };
-};
-
-struct bcm_mibs_host_info {
- u64 GoodTransmits;
- u64 GoodReceives;
- /* this to keep track of the Tx and Rx MailBox Registers. */
- unsigned long NumDesUsed;
- unsigned long CurrNumFreeDesc;
- unsigned long PrevNumFreeDesc;
- /* to keep track the no of byte received */
- unsigned long PrevNumRcevBytes;
- unsigned long CurrNumRcevBytes;
- /* QOS Related */
- unsigned long BEBucketSize;
- unsigned long rtPSBucketSize;
- unsigned long LastTxQueueIndex;
- bool TxOutofDescriptors;
- bool TimerActive;
- u32 u32TotalDSD;
- u32 aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
- u32 aRxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
-};
-
-struct bcm_mibs_classifier_rule {
- unsigned long ulSFID;
- unsigned char ucReserved[2];
- u16 uiClassifierRuleIndex;
- bool bUsed;
- unsigned short usVCID_Value;
- u8 u8ClassifierRulePriority;
- union bcm_mibs_ip_addr stSrcIpAddress;
- /* IP Source Address Length */
- unsigned char ucIPSourceAddressLength;
- union bcm_mibs_ip_addr stDestIpAddress;
- /* IP Destination Address Length */
- unsigned char ucIPDestinationAddressLength;
- unsigned char ucIPTypeOfServiceLength;
- unsigned char ucTosLow;
- unsigned char ucTosHigh;
- unsigned char ucTosMask;
- unsigned char ucProtocolLength;
- unsigned char ucProtocol[MIBS_MAX_PROTOCOL_LENGTH];
- unsigned short usSrcPortRangeLo[MIBS_MAX_PORT_RANGE];
- unsigned short usSrcPortRangeHi[MIBS_MAX_PORT_RANGE];
- unsigned char ucSrcPortRangeLength;
- unsigned short usDestPortRangeLo[MIBS_MAX_PORT_RANGE];
- unsigned short usDestPortRangeHi[MIBS_MAX_PORT_RANGE];
- unsigned char ucDestPortRangeLength;
- bool bProtocolValid;
- bool bTOSValid;
- bool bDestIpValid;
- bool bSrcIpValid;
- unsigned char ucDirection;
- bool bIpv6Protocol;
- u32 u32PHSRuleID;
-};
-
-struct bcm_mibs_phs_rule {
- unsigned long ulSFID;
- u8 u8PHSI;
- u8 u8PHSFLength;
- u8 u8PHSF[MIBS_MAX_PHS_LENGTHS];
- u8 u8PHSMLength;
- u8 u8PHSM[MIBS_MAX_PHS_LENGTHS];
- u8 u8PHSS;
- u8 u8PHSV;
- u8 reserved[5];
- long PHSModifiedBytes;
- unsigned long PHSModifiedNumPackets;
- unsigned long PHSErrorNumPackets;
-};
-
-struct bcm_mibs_parameters {
- u32 wmanIfSfid;
- u32 wmanIfCmnCpsSfState;
- u32 wmanIfCmnCpsMaxSustainedRate;
- u32 wmanIfCmnCpsMaxTrafficBurst;
- u32 wmanIfCmnCpsMinReservedRate;
- u32 wmanIfCmnCpsToleratedJitter;
- u32 wmanIfCmnCpsMaxLatency;
- u32 wmanIfCmnCpsFixedVsVariableSduInd;
- u32 wmanIfCmnCpsSduSize;
- u32 wmanIfCmnCpsSfSchedulingType;
- u32 wmanIfCmnCpsArqEnable;
- u32 wmanIfCmnCpsArqWindowSize;
- u32 wmanIfCmnCpsArqBlockLifetime;
- u32 wmanIfCmnCpsArqSyncLossTimeout;
- u32 wmanIfCmnCpsArqDeliverInOrder;
- u32 wmanIfCmnCpsArqRxPurgeTimeout;
- u32 wmanIfCmnCpsArqBlockSize;
- u32 wmanIfCmnCpsMinRsvdTolerableRate;
- u32 wmanIfCmnCpsReqTxPolicy;
- u32 wmanIfCmnSfCsSpecification;
- u32 wmanIfCmnCpsTargetSaid;
-};
-
-struct bcm_mibs_table {
- unsigned long ulSFID;
- unsigned short usVCID_Value;
- unsigned int uiThreshold;
- u8 u8TrafficPriority;
- bool bValid;
- bool bActive;
- bool bActivateRequestSent;
- u8 u8QueueType;
- unsigned int uiMaxBucketSize;
- unsigned int uiCurrentQueueDepthOnTarget;
- unsigned int uiCurrentBytesOnHost;
- unsigned int uiCurrentPacketsOnHost;
- unsigned int uiDroppedCountBytes;
- unsigned int uiDroppedCountPackets;
- unsigned int uiSentBytes;
- unsigned int uiSentPackets;
- unsigned int uiCurrentDrainRate;
- unsigned int uiThisPeriodSentBytes;
- u64 liDrainCalculated;
- unsigned int uiCurrentTokenCount;
- u64 liLastUpdateTokenAt;
- unsigned int uiMaxAllowedRate;
- unsigned int NumOfPacketsSent;
- unsigned char ucDirection;
- unsigned short usCID;
- struct bcm_mibs_parameters stMibsExtServiceFlowTable;
- unsigned int uiCurrentRxRate;
- unsigned int uiThisPeriodRxBytes;
- unsigned int uiTotalRxBytes;
- unsigned int uiTotalTxBytes;
-};
-
-struct bcm_mibs_dropped_cntrl_msg {
- unsigned long cm_responses;
- unsigned long cm_control_newdsx_multiclassifier_resp;
- unsigned long link_control_resp;
- unsigned long status_rsp;
- unsigned long stats_pointer_resp;
- unsigned long idle_mode_status;
- unsigned long auth_ss_host_msg;
- unsigned long low_priority_message;
-};
-
-struct bcm_host_stats_mibs {
- struct bcm_mibs_host_info stHostInfo;
- struct bcm_mibs_classifier_rule astClassifierTable[MIBS_MAX_CLASSIFIERS];
- struct bcm_mibs_table astSFtable[MIBS_MAX_SERVICEFLOWS];
- struct bcm_mibs_phs_rule astPhsRulesTable[MIBS_MAX_PHSRULES];
- struct bcm_mibs_dropped_cntrl_msg stDroppedAppCntrlMsgs;
-};
-
-#endif
diff --git a/drivers/staging/bcm/IPv6Protocol.c b/drivers/staging/bcm/IPv6Protocol.c
deleted file mode 100644
index 27f3f416f184..000000000000
--- a/drivers/staging/bcm/IPv6Protocol.c
+++ /dev/null
@@ -1,476 +0,0 @@
-#include "headers.h"
-
-static bool MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- struct bcm_ipv6_hdr *pstIpv6Header);
-static bool MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- struct bcm_ipv6_hdr *pstIpv6Header);
-static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header);
-
-static UCHAR *GetNextIPV6ChainedHeader(UCHAR **ppucPayload,
- UCHAR *pucNextHeader, bool *bParseDone, USHORT *pusPayloadLength)
-{
- UCHAR *pucRetHeaderPtr = NULL;
- UCHAR *pucPayloadPtr = NULL;
- USHORT usNextHeaderOffset = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if ((ppucPayload == NULL) || (*pusPayloadLength == 0) ||
- (*bParseDone)) {
- *bParseDone = TRUE;
- return NULL;
- }
-
- pucRetHeaderPtr = *ppucPayload;
- pucPayloadPtr = *ppucPayload;
-
- if (!pucRetHeaderPtr || !pucPayloadPtr) {
- *bParseDone = TRUE;
- return NULL;
- }
-
- /* Get the Nextt Header Type */
- *bParseDone = false;
-
-
- switch (*pucNextHeader) {
- case IPV6HDR_TYPE_HOPBYHOP:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 HopByHop Header");
- usNextHeaderOffset += sizeof(struct bcm_ipv6_options_hdr);
- break;
-
- case IPV6HDR_TYPE_ROUTING:
- {
- struct bcm_ipv6_routing_hdr *pstIpv6RoutingHeader;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 Routing Header");
- pstIpv6RoutingHeader =
- (struct bcm_ipv6_routing_hdr *)pucPayloadPtr;
- usNextHeaderOffset += sizeof(struct bcm_ipv6_routing_hdr);
- usNextHeaderOffset += pstIpv6RoutingHeader->ucNumAddresses *
- IPV6_ADDRESS_SIZEINBYTES;
- }
- break;
-
- case IPV6HDR_TYPE_FRAGMENTATION:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Fragmentation Header");
- usNextHeaderOffset += sizeof(struct bcm_ipv6_fragment_hdr);
- break;
-
- case IPV6HDR_TYPE_DESTOPTS:
- {
- struct bcm_ipv6_dest_options_hdr *pstIpv6DestOptsHdr =
- (struct bcm_ipv6_dest_options_hdr *)pucPayloadPtr;
- int nTotalOptions = pstIpv6DestOptsHdr->ucHdrExtLen;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 DestOpts Header Header");
- usNextHeaderOffset += sizeof(struct bcm_ipv6_dest_options_hdr);
- usNextHeaderOffset += nTotalOptions *
- IPV6_DESTOPTS_HDR_OPTIONSIZE;
- }
- break;
-
-
- case IPV6HDR_TYPE_AUTHENTICATION:
- {
- struct bcm_ipv6_authentication_hdr *pstIpv6AuthHdr =
- (struct bcm_ipv6_authentication_hdr *)pucPayloadPtr;
- int nHdrLen = pstIpv6AuthHdr->ucLength;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Authentication Header");
- usNextHeaderOffset += nHdrLen * 4;
- }
- break;
-
- case IPV6HDR_TYPE_ENCRYPTEDSECURITYPAYLOAD:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Encrypted Security Payload Header");
- *bParseDone = TRUE;
- break;
-
- case IPV6_ICMP_HDR_TYPE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nICMP Header");
- *bParseDone = TRUE;
- break;
-
- case TCP_HEADER_TYPE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nTCP Header");
- *bParseDone = TRUE;
- break;
-
- case UDP_HEADER_TYPE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nUDP Header");
- *bParseDone = TRUE;
- break;
-
- default:
- *bParseDone = TRUE;
- break;
- }
-
- if (*bParseDone == false) {
- if (*pusPayloadLength <= usNextHeaderOffset) {
- *bParseDone = TRUE;
- } else {
- *pucNextHeader = *pucPayloadPtr;
- pucPayloadPtr += usNextHeaderOffset;
- (*pusPayloadLength) -= usNextHeaderOffset;
- }
-
- }
-
- *ppucPayload = pucPayloadPtr;
- return pucRetHeaderPtr;
-}
-
-
-static UCHAR GetIpv6ProtocolPorts(UCHAR *pucPayload, USHORT *pusSrcPort,
- USHORT *pusDestPort, USHORT usPayloadLength, UCHAR ucNextHeader)
-{
- UCHAR *pIpv6HdrScanContext = pucPayload;
- bool bDone = false;
- UCHAR ucHeaderType = 0;
- UCHAR *pucNextHeader = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (!pucPayload || (usPayloadLength == 0))
- return 0;
-
- *pusSrcPort = *pusDestPort = 0;
- ucHeaderType = ucNextHeader;
- while (!bDone) {
- pucNextHeader = GetNextIPV6ChainedHeader(&pIpv6HdrScanContext,
- &ucHeaderType,
- &bDone,
- &usPayloadLength);
- if (bDone) {
- if ((ucHeaderType == TCP_HEADER_TYPE) ||
- (ucHeaderType == UDP_HEADER_TYPE)) {
- *pusSrcPort = *((PUSHORT)(pucNextHeader));
- *pusDestPort = *((PUSHORT)(pucNextHeader+2));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nProtocol Ports - Src Port :0x%x Dest Port : 0x%x",
- ntohs(*pusSrcPort),
- ntohs(*pusDestPort));
- }
- break;
-
- }
- }
- return ucHeaderType;
-}
-
-
-/*
- * Arg 1 struct bcm_mini_adapter *Adapter is a pointer ot the driver control
- * structure
- * Arg 2 PVOID pcIpHeader is a pointer to the IP header of the packet
- */
-USHORT IpVersion6(struct bcm_mini_adapter *Adapter, PVOID pcIpHeader,
- struct bcm_classifier_rule *pstClassifierRule)
-{
- USHORT ushDestPort = 0;
- USHORT ushSrcPort = 0;
- UCHAR ucNextProtocolAboveIP = 0;
- struct bcm_ipv6_hdr *pstIpv6Header = NULL;
- bool bClassificationSucceed = false;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "IpVersion6 ==========>\n");
-
- pstIpv6Header = pcIpHeader;
-
- DumpIpv6Header(pstIpv6Header);
-
- /*
- * Try to get the next higher layer protocol
- * and the Ports Nos if TCP or UDP
- */
- ucNextProtocolAboveIP = GetIpv6ProtocolPorts((UCHAR *)(pcIpHeader +
- sizeof(struct bcm_ipv6_hdr)),
- &ushSrcPort,
- &ushDestPort,
- pstIpv6Header->usPayloadLength,
- pstIpv6Header->ucNextHeader);
-
- do {
- if (pstClassifierRule->ucDirection == 0) {
- /*
- * cannot be processed for classification.
- * it is a down link connection
- */
- break;
- }
-
- if (!pstClassifierRule->bIpv6Protocol) {
- /*
- * We are looking for Ipv6 Classifiers
- * Lets ignore this classifier and try the next one
- */
- break;
- }
-
- bClassificationSucceed = MatchSrcIpv6Address(pstClassifierRule,
- pstIpv6Header);
- if (!bClassificationSucceed)
- break;
-
- bClassificationSucceed = MatchDestIpv6Address(pstClassifierRule,
- pstIpv6Header);
- if (!bClassificationSucceed)
- break;
-
- /*
- * Match the protocol type.
- * For IPv6 the next protocol at end of
- * Chain of IPv6 prot headers
- */
- bClassificationSucceed = MatchProtocol(pstClassifierRule,
- ucNextProtocolAboveIP);
- if (!bClassificationSucceed)
- break;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 Protocol Matched");
-
- if ((ucNextProtocolAboveIP == TCP_HEADER_TYPE) ||
- (ucNextProtocolAboveIP == UDP_HEADER_TYPE)) {
- /* Match Src Port */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 Source Port:%x\n",
- ntohs(ushSrcPort));
- bClassificationSucceed = MatchSrcPort(pstClassifierRule,
- ntohs(ushSrcPort));
- if (!bClassificationSucceed)
- break;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL, "\nIPv6 Src Port Matched");
-
- /* Match Dest Port */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Destination Port:%x\n",
- ntohs(ushDestPort));
- bClassificationSucceed = MatchDestPort(pstClassifierRule,
- ntohs(ushDestPort));
- if (!bClassificationSucceed)
- break;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "\nIPv6 Dest Port Matched");
- }
- } while (0);
-
- if (bClassificationSucceed == TRUE) {
- INT iMatchedSFQueueIndex = 0;
-
- iMatchedSFQueueIndex = SearchSfid(Adapter,
- pstClassifierRule->ulSFID);
- if ((iMatchedSFQueueIndex >= NO_OF_QUEUES) ||
- (Adapter->PackInfo[iMatchedSFQueueIndex].bActive == false))
- bClassificationSucceed = false;
- }
-
- return bClassificationSucceed;
-}
-
-
-static bool MatchSrcIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- struct bcm_ipv6_hdr *pstIpv6Header)
-{
- UINT uiLoopIndex = 0;
- UINT uiIpv6AddIndex = 0;
- UINT uiIpv6AddrNoLongWords = 4;
- ULONG aulSrcIP[4];
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- union u_ip_address *src_addr = &pstClassifierRule->stSrcIpAddress;
-
- /*
- * This is the no. of Src Addresses ie Range of IP Addresses contained
- * in the classifier rule for which we need to match
- */
- UINT uiCountIPSrcAddresses =
- (UINT)pstClassifierRule->ucIPSourceAddressLength;
-
-
- if (uiCountIPSrcAddresses == 0)
- return TRUE;
-
-
- /* First Convert the Ip Address in the packet to Host Endian order */
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++)
- aulSrcIP[uiIpv6AddIndex] =
- ntohl(pstIpv6Header->ulSrcIpAddress[uiIpv6AddIndex]);
-
- for (uiLoopIndex = 0;
- uiLoopIndex < uiCountIPSrcAddresses;
- uiLoopIndex += uiIpv6AddrNoLongWords) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Src Ipv6 Address In Received Packet :\n ");
- DumpIpv6Address(aulSrcIP);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Src Ipv6 Mask In Classifier Rule:\n");
- DumpIpv6Address(&src_addr->ulIpv6Mask[uiLoopIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Src Ipv6 Address In Classifier Rule :\n");
- DumpIpv6Address(&src_addr->ulIpv6Addr[uiLoopIndex]);
-
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++) {
- if ((src_addr->ulIpv6Mask[uiLoopIndex+uiIpv6AddIndex] &
- aulSrcIP[uiIpv6AddIndex]) !=
- src_addr->ulIpv6Addr[uiLoopIndex+uiIpv6AddIndex]) {
- /*
- * Match failed for current Ipv6 Address
- * Try next Ipv6 Address
- */
- break;
- }
-
- if (uiIpv6AddIndex == uiIpv6AddrNoLongWords-1) {
- /* Match Found */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "Ipv6 Src Ip Address Matched\n");
- return TRUE;
- }
- }
- }
- return false;
-}
-
-static bool MatchDestIpv6Address(struct bcm_classifier_rule *pstClassifierRule,
- struct bcm_ipv6_hdr *pstIpv6Header)
-{
- UINT uiLoopIndex = 0;
- UINT uiIpv6AddIndex = 0;
- UINT uiIpv6AddrNoLongWords = 4;
- ULONG aulDestIP[4];
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- union u_ip_address *dest_addr = &pstClassifierRule->stDestIpAddress;
-
- /*
- * This is the no. of Destination Addresses
- * ie Range of IP Addresses contained in the classifier rule
- * for which we need to match
- */
- UINT uiCountIPDestinationAddresses =
- (UINT)pstClassifierRule->ucIPDestinationAddressLength;
-
- if (uiCountIPDestinationAddresses == 0)
- return TRUE;
-
-
- /* First Convert the Ip Address in the packet to Host Endian order */
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++)
- aulDestIP[uiIpv6AddIndex] =
- ntohl(pstIpv6Header->ulDestIpAddress[uiIpv6AddIndex]);
-
- for (uiLoopIndex = 0;
- uiLoopIndex < uiCountIPDestinationAddresses;
- uiLoopIndex += uiIpv6AddrNoLongWords) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Destination Ipv6 Address In Received Packet :\n ");
- DumpIpv6Address(aulDestIP);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Destination Ipv6 Mask In Classifier Rule :\n");
- DumpIpv6Address(&dest_addr->ulIpv6Mask[uiLoopIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "\n Destination Ipv6 Address In Classifier Rule :\n");
- DumpIpv6Address(&dest_addr->ulIpv6Addr[uiLoopIndex]);
-
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++) {
- if ((dest_addr->ulIpv6Mask[uiLoopIndex+uiIpv6AddIndex] &
- aulDestIP[uiIpv6AddIndex]) !=
- dest_addr->ulIpv6Addr[uiLoopIndex+uiIpv6AddIndex]) {
- /*
- * Match failed for current Ipv6 Address.
- * Try next Ipv6 Address
- */
- break;
- }
-
- if (uiIpv6AddIndex == uiIpv6AddrNoLongWords-1) {
- /* Match Found */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG,
- DBG_LVL_ALL,
- "Ipv6 Destination Ip Address Matched\n");
- return TRUE;
- }
- }
- }
- return false;
-
-}
-
-VOID DumpIpv6Address(ULONG *puIpv6Address)
-{
- UINT uiIpv6AddrNoLongWords = 4;
- UINT uiIpv6AddIndex = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- for (uiIpv6AddIndex = 0;
- uiIpv6AddIndex < uiIpv6AddrNoLongWords;
- uiIpv6AddIndex++) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- ":%lx", puIpv6Address[uiIpv6AddIndex]);
- }
-
-}
-
-static VOID DumpIpv6Header(struct bcm_ipv6_hdr *pstIpv6Header)
-{
- UCHAR ucVersion;
- UCHAR ucPrio;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "----Ipv6 Header---");
- ucVersion = pstIpv6Header->ucVersionPrio & 0xf0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Version : %x\n", ucVersion);
- ucPrio = pstIpv6Header->ucVersionPrio & 0x0f;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Priority : %x\n", ucPrio);
- /*
- * BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- * "Flow Label : %x\n",(pstIpv6Header->ucVersionPrio &0xf0);
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Payload Length : %x\n",
- ntohs(pstIpv6Header->usPayloadLength));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Next Header : %x\n", pstIpv6Header->ucNextHeader);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Hop Limit : %x\n", pstIpv6Header->ucHopLimit);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Src Address :\n");
- DumpIpv6Address(pstIpv6Header->ulSrcIpAddress);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "Dest Address :\n");
- DumpIpv6Address(pstIpv6Header->ulDestIpAddress);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV6_DBG, DBG_LVL_ALL,
- "----Ipv6 Header End---");
-
-
-}
diff --git a/drivers/staging/bcm/IPv6ProtocolHdr.h b/drivers/staging/bcm/IPv6ProtocolHdr.h
deleted file mode 100644
index 96b36a579af2..000000000000
--- a/drivers/staging/bcm/IPv6ProtocolHdr.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef _IPV6_PROTOCOL_DEFINES_
-#define _IPV6_PROTOCOL_DEFINES_
-
-#define IPV6HDR_TYPE_HOPBYHOP 0x0
-#define IPV6HDR_TYPE_ROUTING 0x2B
-#define IPV6HDR_TYPE_FRAGMENTATION 0x2C
-#define IPV6HDR_TYPE_DESTOPTS 0x3c
-#define IPV6HDR_TYPE_AUTHENTICATION 0x33
-#define IPV6HDR_TYPE_ENCRYPTEDSECURITYPAYLOAD 0x34
-#define MASK_IPV6_CS_SPEC 0x2
-
-#define TCP_HEADER_TYPE 0x6
-#define UDP_HEADER_TYPE 0x11
-#define IPV6_ICMP_HDR_TYPE 0x2
-#define IPV6_FLOWLABEL_BITOFFSET 9
-
-#define IPV6_MAX_CHAINEDHDR_BUFFBYTES 0x64
-/*
- * Size of Dest Options field of Destinations Options Header
- * in bytes.
- */
-#define IPV6_DESTOPTS_HDR_OPTIONSIZE 0x8
-
-struct bcm_ipv6_hdr {
- unsigned char ucVersionPrio;
- unsigned char aucFlowLabel[3];
- unsigned short usPayloadLength;
- unsigned char ucNextHeader;
- unsigned char ucHopLimit;
- unsigned long ulSrcIpAddress[4];
- unsigned long ulDestIpAddress[4];
-};
-
-struct bcm_ipv6_routing_hdr {
- unsigned char ucNextHeader;
- unsigned char ucRoutingType;
- unsigned char ucNumAddresses;
- unsigned char ucNextAddress;
- unsigned long ulReserved;
-};
-
-struct bcm_ipv6_fragment_hdr {
- unsigned char ucNextHeader;
- unsigned char ucReserved;
- unsigned short usFragmentOffset;
- unsigned long ulIdentification;
-};
-
-struct bcm_ipv6_dest_options_hdr {
- unsigned char ucNextHeader;
- unsigned char ucHdrExtLen;
- unsigned char ucDestOptions[6];
-};
-
-struct bcm_ipv6_options_hdr {
- unsigned char ucNextHeader;
- unsigned char ucMisc[3];
- unsigned long ulJumboPayloadLen;
-};
-
-struct bcm_ipv6_authentication_hdr {
- unsigned char ucNextHeader;
- unsigned char ucLength;
- unsigned short usReserved;
- unsigned long ulSecurityParametersIndex;
-};
-
-enum bcm_ipaddr_context {
- eSrcIpAddress,
- eDestIpAddress
-};
-
-/* Function Prototypes */
-
-unsigned short IpVersion6(struct bcm_mini_adapter *Adapter, /* < Pointer to the driver control structure */
- void *pcIpHeader, /* <Pointer to the IP Hdr of the packet */
- struct bcm_classifier_rule *pstClassifierRule);
-
-void DumpIpv6Address(unsigned long *puIpv6Address);
-
-extern bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, unsigned short ushSrcPort);
-extern bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, unsigned short ushSrcPort);
-extern bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, unsigned char ucProtocol);
-
-#endif
diff --git a/drivers/staging/bcm/InterfaceAdapter.h b/drivers/staging/bcm/InterfaceAdapter.h
deleted file mode 100644
index 06a6b18bca48..000000000000
--- a/drivers/staging/bcm/InterfaceAdapter.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef _INTERFACE_ADAPTER_H
-#define _INTERFACE_ADAPTER_H
-
-struct bcm_bulk_endpoint_in {
- char *bulk_in_buffer;
- size_t bulk_in_size;
- unsigned char bulk_in_endpointAddr;
- unsigned int bulk_in_pipe;
-};
-
-struct bcm_bulk_endpoint_out {
- unsigned char bulk_out_buffer;
- size_t bulk_out_size;
- unsigned char bulk_out_endpointAddr;
- unsigned int bulk_out_pipe;
- /* this is used when int out endpoint is used as bulk out end point */
- unsigned char int_out_interval;
-};
-
-struct bcm_intr_endpoint_in {
- char *int_in_buffer;
- size_t int_in_size;
- unsigned char int_in_endpointAddr;
- unsigned char int_in_interval;
- unsigned int int_in_pipe;
-};
-
-struct bcm_intr_endpoint_out {
- char *int_out_buffer;
- size_t int_out_size;
- unsigned char int_out_endpointAddr;
- unsigned char int_out_interval;
- unsigned int int_out_pipe;
-};
-
-struct bcm_usb_tcb {
- struct urb *urb;
- void *psIntfAdapter;
- bool bUsed;
-};
-
-struct bcm_usb_rcb {
- struct urb *urb;
- void *psIntfAdapter;
- bool bUsed;
-};
-
-/*
- * This is the interface specific Sub-Adapter
- * Structure.
- */
-struct bcm_interface_adapter {
- struct usb_device *udev;
- struct usb_interface *interface;
- /* Bulk endpoint in info */
- struct bcm_bulk_endpoint_in sBulkIn;
- /* Bulk endpoint out info */
- struct bcm_bulk_endpoint_out sBulkOut;
- /* Interrupt endpoint in info */
- struct bcm_intr_endpoint_in sIntrIn;
- /* Interrupt endpoint out info */
- struct bcm_intr_endpoint_out sIntrOut;
- unsigned long ulInterruptData[2];
- struct urb *psInterruptUrb;
- struct bcm_usb_tcb asUsbTcb[MAXIMUM_USB_TCB];
- struct bcm_usb_rcb asUsbRcb[MAXIMUM_USB_RCB];
- atomic_t uNumTcbUsed;
- atomic_t uCurrTcb;
- atomic_t uNumRcbUsed;
- atomic_t uCurrRcb;
- struct bcm_mini_adapter *psAdapter;
- bool bFlashBoot;
- bool bHighSpeedDevice;
- bool bSuspended;
- bool bPreparingForBusSuspend;
- struct work_struct usbSuspendWork;
-};
-
-#endif
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
deleted file mode 100644
index abc7a7ab782a..000000000000
--- a/drivers/staging/bcm/InterfaceDld.c
+++ /dev/null
@@ -1,317 +0,0 @@
-#include "headers.h"
-
-int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc)
-{
- /* unsigned int reg = 0; */
- mm_segment_t oldfs = {0};
- int errno = 0, len = 0; /* ,is_config_file = 0 */
- loff_t pos = 0;
- struct bcm_interface_adapter *psIntfAdapter = arg;
- /* struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter; */
- char *buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
-
- if (!buff)
- return -ENOMEM;
-
- while (1) {
- oldfs = get_fs();
- set_fs(get_ds());
- len = vfs_read(flp, (void __force __user *)buff,
- MAX_TRANSFER_CTRL_BYTE_USB, &pos);
- set_fs(oldfs);
- if (len <= 0) {
- if (len < 0)
- errno = len;
- else
- errno = 0;
- break;
- }
- /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT,
- * DBG_LVL_ALL, buff,
- * MAX_TRANSFER_CTRL_BYTE_USB);
- */
- errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len);
- if (errno)
- break;
- on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB;
- }
-
- kfree(buff);
- return errno;
-}
-
-int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp,
- unsigned int on_chip_loc)
-{
- char *buff, *buff_readback;
- unsigned int reg = 0;
- mm_segment_t oldfs = {0};
- int errno = 0, len = 0, is_config_file = 0;
- loff_t pos = 0;
- static int fw_down;
- INT Status = STATUS_SUCCESS;
- struct bcm_interface_adapter *psIntfAdapter = arg;
- int bytes;
-
- buff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
- buff_readback = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
- if (!buff || !buff_readback) {
- kfree(buff);
- kfree(buff_readback);
-
- return -ENOMEM;
- }
-
- is_config_file = (on_chip_loc == CONFIG_BEGIN_ADDR) ? 1 : 0;
-
- while (1) {
- oldfs = get_fs();
- set_fs(get_ds());
- len = vfs_read(flp, (void __force __user *)buff,
- MAX_TRANSFER_CTRL_BYTE_USB, &pos);
- set_fs(oldfs);
- fw_down++;
-
- if (len <= 0) {
- if (len < 0)
- errno = len;
- else
- errno = 0;
- break;
- }
-
- bytes = InterfaceRDM(psIntfAdapter, on_chip_loc,
- buff_readback, len);
- if (bytes < 0) {
- Status = bytes;
- goto exit;
- }
- reg++;
- if ((len-sizeof(unsigned int)) < 4) {
- if (memcmp(buff_readback, buff, len)) {
- Status = -EIO;
- goto exit;
- }
- } else {
- len -= 4;
-
- while (len) {
- if (*(unsigned int *)&buff_readback[len] !=
- *(unsigned int *)&buff[len]) {
- Status = -EIO;
- goto exit;
- }
- len -= 4;
- }
- }
- on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB;
- } /* End of while(1) */
-
-exit:
- kfree(buff);
- kfree(buff_readback);
- return Status;
-}
-
-static int bcm_download_config_file(struct bcm_mini_adapter *Adapter,
- struct bcm_firmware_info *psFwInfo)
-{
- int retval = STATUS_SUCCESS;
- B_UINT32 value = 0;
-
- if (Adapter->pstargetparams == NULL) {
- Adapter->pstargetparams =
- kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
- if (Adapter->pstargetparams == NULL)
- return -ENOMEM;
- }
-
- if (psFwInfo->u32FirmwareLength != sizeof(struct bcm_target_params))
- return -EIO;
-
- retval = copy_from_user(Adapter->pstargetparams,
- psFwInfo->pvMappedFirmwareAddress,
- psFwInfo->u32FirmwareLength);
- if (retval) {
- kfree(Adapter->pstargetparams);
- Adapter->pstargetparams = NULL;
- return -EFAULT;
- }
-
- /* Parse the structure and then Download the Firmware */
- beceem_parse_target_struct(Adapter);
-
- /* Initializing the NVM. */
- BcmInitNVM(Adapter);
- retval = InitLedSettings(Adapter);
-
- if (retval)
- return retval;
-
- if (Adapter->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->LEDInfo.bLedInitDone = false;
- Adapter->DriverState = DRIVER_INIT;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- if (Adapter->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = FW_DOWNLOAD;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- /* Initialize the DDR Controller */
- retval = ddr_init(Adapter);
- if (retval)
- return retval;
-
- value = 0;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4,
- &value, sizeof(value));
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8,
- &value, sizeof(value));
-
- if (Adapter->eNVMType == NVM_FLASH) {
- retval = PropagateCalParamsFromFlashToMemory(Adapter);
- if (retval)
- return retval;
- }
-
- retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams,
- sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR);
-
- if (retval)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT,
- MP_INIT, DBG_LVL_ALL,
- "configuration file not downloaded properly");
- else
- Adapter->bCfgDownloaded = TRUE;
-
- return retval;
-}
-
-int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter,
- struct bcm_firmware_info *psFwInfo)
-{
- int retval = STATUS_SUCCESS;
- PUCHAR buff = NULL;
-
- /* Config File is needed for the Driver to download the Config file and
- * Firmware. Check for the Config file to be first to be sent from the
- * Application
- */
- atomic_set(&Adapter->uiMBupdate, false);
- if (!Adapter->bCfgDownloaded &&
- psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) {
- /* Can't Download Firmware. */
- return -EINVAL;
- }
-
- /* If Config File, Finish the DDR Settings and then Download CFG File */
- if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR) {
- retval = bcm_download_config_file(Adapter, psFwInfo);
- } else {
- buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL);
- if (buff == NULL)
- return -ENOMEM;
-
- retval = copy_from_user(buff,
- psFwInfo->pvMappedFirmwareAddress,
- psFwInfo->u32FirmwareLength);
- if (retval != STATUS_SUCCESS) {
- retval = -EFAULT;
- goto error;
- }
-
- retval = buffDnldVerify(Adapter,
- buff,
- psFwInfo->u32FirmwareLength,
- psFwInfo->u32StartingAddress);
-
- if (retval != STATUS_SUCCESS)
- goto error;
- }
-
-error:
- kfree(buff);
- return retval;
-}
-
-static INT buffDnld(struct bcm_mini_adapter *Adapter,
- PUCHAR mappedbuffer, UINT u32FirmwareLength,
- ULONG u32StartingAddress)
-{
- unsigned int len = 0;
- int retval = STATUS_SUCCESS;
-
- len = u32FirmwareLength;
-
- while (u32FirmwareLength) {
- len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len);
-
- if (retval)
- break;
- u32StartingAddress += len;
- u32FirmwareLength -= len;
- mappedbuffer += len;
- }
- return retval;
-}
-
-static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter,
- PUCHAR mappedbuffer, UINT u32FirmwareLength,
- ULONG u32StartingAddress)
-{
- UINT len = u32FirmwareLength;
- INT retval = STATUS_SUCCESS;
- PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
- int bytes;
-
- if (NULL == readbackbuff)
- return -ENOMEM;
-
- while (u32FirmwareLength && !retval) {
- len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- bytes = rdm(Adapter, u32StartingAddress, readbackbuff, len);
-
- if (bytes < 0) {
- retval = bytes;
- break;
- }
-
- if (memcmp(readbackbuff, mappedbuffer, len) != 0) {
- pr_err("%s() failed. The firmware doesn't match what was written",
- __func__);
- retval = -EIO;
- }
-
- u32StartingAddress += len;
- u32FirmwareLength -= len;
- mappedbuffer += len;
-
- } /* end of while (u32FirmwareLength && !retval) */
- kfree(readbackbuff);
- return retval;
-}
-
-INT buffDnldVerify(struct bcm_mini_adapter *Adapter,
- unsigned char *mappedbuffer,
- unsigned int u32FirmwareLength,
- unsigned long u32StartingAddress)
-{
- INT status = STATUS_SUCCESS;
-
- status = buffDnld(Adapter, mappedbuffer,
- u32FirmwareLength, u32StartingAddress);
- if (status != STATUS_SUCCESS)
- goto error;
-
- status = buffRdbkVerify(Adapter, mappedbuffer,
- u32FirmwareLength, u32StartingAddress);
- if (status != STATUS_SUCCESS)
- goto error;
-error:
- return status;
-}
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
deleted file mode 100644
index 612c89fba341..000000000000
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ /dev/null
@@ -1,274 +0,0 @@
-#include "headers.h"
-
-/*
-Function: InterfaceIdleModeWakeup
-
-Description: This is the hardware specific Function for
- waking up HW device from Idle mode.
- A software abort pattern is written to the
- device to wake it and necessary power state
- transitions from host are performed here.
-
-Input parameters: IN struct bcm_mini_adapter *Adapter
- - Miniport Adapter Context
-
-Return: BCM_STATUS_SUCCESS - If Wakeup of the HW Interface
- was successful.
- Other - If an error occurred.
-*/
-
-/*
-Function: InterfaceIdleModeRespond
-
-Description: This is the hardware specific Function for
- responding to Idle mode request from target.
- Necessary power state transitions from host for
- idle mode or other device specific initializations
- are performed here.
-
-Input parameters: IN struct bcm_mini_adapter * Adapter
- - Miniport Adapter Context
-
-Return: BCM_STATUS_SUCCESS - If Idle mode response related
- HW configuration was successful.
- Other - If an error occurred.
-*/
-
-/*
-"dmem bfc02f00 100" tells how many time device went in Idle mode.
-this value will be at address bfc02fa4.just before value d0ea1dle.
-
-Set time value by writing at bfc02f98 7d0
-
-checking the Ack timer expire on kannon by running command
-d qcslog .. if it shows e means host has not send response
-to f/w with in 200 ms. Response should be
-send to f/w with in 200 ms after the Idle/Shutdown req issued
-
-*/
-
-
-int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter,
- unsigned int *puiBuffer)
-{
- int status = STATUS_SUCCESS;
- unsigned int uiRegRead = 0;
- int bytes;
-
- if (ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD) {
- if (ntohl(*(puiBuffer+1)) == 0) {
-
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
- &uiRegRead, sizeof(uiRegRead));
- if (status)
- return status;
-
- if (Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
- uiRegRead = 0x00000000;
- status = wrmalt(Adapter,
- DEBUG_INTERRUPT_GENERATOR_REGISTOR,
- &uiRegRead, sizeof(uiRegRead));
- if (status)
- return status;
- }
- /* Below Register should not br read in case of
- * Manual and Protocol Idle mode */
- else if (Adapter->ulPowerSaveMode !=
- DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
- /* clear on read Register */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0,
- &uiRegRead, sizeof(uiRegRead));
- if (bytes < 0) {
- status = bytes;
- return status;
- }
- /* clear on read Register */
- bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1,
- &uiRegRead, sizeof(uiRegRead));
- if (bytes < 0) {
- status = bytes;
- return status;
- }
- }
-
- /* Set Idle Mode Flag to False and
- * Clear IdleMode reg. */
- Adapter->IdleMode = false;
- Adapter->bTriedToWakeUpFromlowPowerMode = false;
-
- wake_up(&Adapter->lowpower_mode_wait_queue);
-
- } else {
- if (TRUE == Adapter->IdleMode)
- return status;
-
- uiRegRead = 0;
-
- if (Adapter->chip_id == BCS220_2 ||
- Adapter->chip_id == BCS220_2BC ||
- Adapter->chip_id == BCS250_BC ||
- Adapter->chip_id == BCS220_3) {
-
- bytes = rdmalt(Adapter, HPM_CONFIG_MSW,
- &uiRegRead, sizeof(uiRegRead));
- if (bytes < 0) {
- status = bytes;
- return status;
- }
-
-
- uiRegRead |= (1<<17);
-
- status = wrmalt(Adapter, HPM_CONFIG_MSW,
- &uiRegRead, sizeof(uiRegRead));
- if (status)
- return status;
- }
- SendIdleModeResponse(Adapter);
- }
- } else if (ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG) {
- OverrideServiceFlowParams(Adapter, puiBuffer);
- }
- return status;
-}
-
-static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter,
- unsigned int Pattern)
-{
- int status = STATUS_SUCCESS;
- unsigned int value;
- unsigned int chip_id;
- unsigned long timeout = 0, itr = 0;
-
- int lenwritten = 0;
- unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF};
- struct bcm_interface_adapter *psInterfaceAdapter =
- Adapter->pvInterfaceAdapter;
-
- /* Abort Bus suspend if its already suspended */
- if ((TRUE == psInterfaceAdapter->bSuspended) &&
- (TRUE == Adapter->bDoSuspend))
- status = usb_autopm_get_interface(
- psInterfaceAdapter->interface);
-
- if ((Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) ||
- (Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
- /* write the SW abort pattern. */
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
- &Pattern, sizeof(Pattern));
- if (status)
- return status;
- }
-
- if (Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
- value = 0x80000000;
- status = wrmalt(Adapter,
- DEBUG_INTERRUPT_GENERATOR_REGISTOR,
- &value, sizeof(value));
- if (status)
- return status;
- } else if (Adapter->ulPowerSaveMode !=
- DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
- /*
- * Get a Interrupt Out URB and send 8 Bytes Down
- * To be Done in Thread Context.
- * Not using Asynchronous Mechanism.
- */
- status = usb_interrupt_msg(psInterfaceAdapter->udev,
- usb_sndintpipe(psInterfaceAdapter->udev,
- psInterfaceAdapter->sIntrOut.int_out_endpointAddr),
- aucAbortPattern,
- 8,
- &lenwritten,
- 5000);
- if (status)
- return status;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- IDLE_MODE, DBG_LVL_ALL,
- "NOB Sent down :%d", lenwritten);
-
- /* mdelay(25); */
-
- timeout = jiffies + msecs_to_jiffies(50);
- while (time_after(timeout, jiffies)) {
- itr++;
- rdmalt(Adapter, CHIP_ID_REG, &chip_id, sizeof(UINT));
- if (0xbece3200 == (chip_id&~(0xF0)))
- chip_id = chip_id&~(0xF0);
- if (chip_id == Adapter->chip_id)
- break;
- }
- if (time_before(timeout, jiffies))
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- IDLE_MODE, DBG_LVL_ALL,
- "Not able to read chip-id even after 25 msec");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- IDLE_MODE, DBG_LVL_ALL,
- "Number of completed iteration to read chip-id :%lu", itr);
-
- status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC,
- &Pattern, sizeof(status));
- if (status)
- return status;
- }
- return status;
-}
-int InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter)
-{
- if (Adapter->bTriedToWakeUpFromlowPowerMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- IDLE_MODE, DBG_LVL_ALL,
- "Wake up already attempted.. ignoring\n");
- } else {
- Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
- InterfaceAbortIdlemode(Adapter, Adapter->usIdleModePattern);
-
- }
- return 0;
-}
-
-void InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiRegVal = 0;
- INT Status = 0;
- int bytes;
-
- if (Adapter->ulPowerSaveMode ==
- DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
- /* clear idlemode interrupt. */
- uiRegVal = 0;
- Status = wrmalt(Adapter,
- DEBUG_INTERRUPT_GENERATOR_REGISTOR,
- &uiRegVal, sizeof(uiRegVal));
- if (Status)
- return;
- }
-
- else {
-
-/* clear Interrupt EP registers. */
- bytes = rdmalt(Adapter,
- DEVICE_INT_OUT_EP_REG0,
- &uiRegVal, sizeof(uiRegVal));
- if (bytes < 0) {
- Status = bytes;
- return;
- }
-
- bytes = rdmalt(Adapter,
- DEVICE_INT_OUT_EP_REG1,
- &uiRegVal, sizeof(uiRegVal));
- if (bytes < 0) {
- Status = bytes;
- return;
- }
- }
-}
-
diff --git a/drivers/staging/bcm/InterfaceIdleMode.h b/drivers/staging/bcm/InterfaceIdleMode.h
deleted file mode 100644
index 2ef64003aa89..000000000000
--- a/drivers/staging/bcm/InterfaceIdleMode.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _INTERFACE_IDLEMODE_H
-#define _INTERFACE_IDLEMODE_H
-
-INT InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter);
-
-INT InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter,
- unsigned int *puiBuffer);
-
-VOID InterfaceWriteIdleModeWakePattern(struct bcm_mini_adapter *Adapter);
-
-INT InterfaceWakeUp(struct bcm_mini_adapter *Adapter);
-
-VOID InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter);
-#endif
-
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
deleted file mode 100644
index bb61d34886b3..000000000000
--- a/drivers/staging/bcm/InterfaceInit.c
+++ /dev/null
@@ -1,729 +0,0 @@
-#include "headers.h"
-#include <linux/usb/ch9.h>
-static struct usb_device_id InterfaceUsbtable[] = {
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SYM) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_TU25) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_226) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_326) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, InterfaceUsbtable);
-
-static int debug = -1;
-module_param(debug, uint, 0600);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-static const u32 default_msg =
- NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
- | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
- | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
-
-static int InterfaceAdapterInit(struct bcm_interface_adapter *Adapter);
-
-static void InterfaceAdapterFree(struct bcm_interface_adapter *psIntfAdapter)
-{
- int i = 0;
- struct bcm_mini_adapter *ps_ad = psIntfAdapter->psAdapter;
-
- /* Wake up the wait_queue... */
- if (ps_ad->LEDInfo.led_thread_running &
- BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ps_ad->DriverState = DRIVER_HALT;
- wake_up(&ps_ad->LEDInfo.notify_led_event);
- }
- reset_card_proc(ps_ad);
-
- /*
- * worst case time taken by the RDM/WRM will be 5 sec. will check after
- * every 100 ms to accertain the device is not being accessed. After
- * this No RDM/WRM should be made.
- */
- while (ps_ad->DeviceAccess) {
- BCM_DEBUG_PRINT(ps_ad, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL, "Device is being accessed.\n");
- msleep(100);
- }
- /* Free interrupt URB */
- /* ps_ad->device_removed = TRUE; */
- usb_free_urb(psIntfAdapter->psInterruptUrb);
-
- /* Free transmit URBs */
- for (i = 0; i < MAXIMUM_USB_TCB; i++) {
- if (psIntfAdapter->asUsbTcb[i].urb != NULL) {
- usb_free_urb(psIntfAdapter->asUsbTcb[i].urb);
- psIntfAdapter->asUsbTcb[i].urb = NULL;
- }
- }
- /* Free receive URB and buffers */
- for (i = 0; i < MAXIMUM_USB_RCB; i++) {
- if (psIntfAdapter->asUsbRcb[i].urb != NULL) {
- kfree(psIntfAdapter->asUsbRcb[i].urb->transfer_buffer);
- usb_free_urb(psIntfAdapter->asUsbRcb[i].urb);
- psIntfAdapter->asUsbRcb[i].urb = NULL;
- }
- }
- AdapterFree(ps_ad);
-}
-
-static void ConfigureEndPointTypesThroughEEPROM(
- struct bcm_mini_adapter *Adapter)
-{
- u32 ulReg;
- int bytes;
- struct bcm_interface_adapter *interfaceAdapter;
-
- /* Program EP2 MAX_PKT_SIZE */
- ulReg = ntohl(EP2_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x128, 4, TRUE);
- ulReg = ntohl(EP2_MPS);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x12C, 4, TRUE);
-
- ulReg = ntohl(EP2_CFG_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x132, 4, TRUE);
- interfaceAdapter =
- (struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter);
- if (interfaceAdapter->bHighSpeedDevice) {
- ulReg = ntohl(EP2_CFG_INT);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
- } else {
- /* USE BULK EP as TX in FS mode. */
- ulReg = ntohl(EP2_CFG_BULK);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
- }
-
- /* Program EP4 MAX_PKT_SIZE. */
- ulReg = ntohl(EP4_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x13C, 4, TRUE);
- ulReg = ntohl(EP4_MPS);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
-
- /* Program TX EP as interrupt(Alternate Setting) */
- bytes = rdmalt(Adapter, 0x0F0110F8, &ulReg, sizeof(u32));
- if (bytes < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL, "reading of Tx EP failed\n");
- return;
- }
- ulReg |= 0x6;
-
- ulReg = ntohl(ulReg);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1CC, 4, TRUE);
-
- ulReg = ntohl(EP4_CFG_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C8, 4, TRUE);
- /* Program ISOCHRONOUS EP size to zero. */
- ulReg = ntohl(ISO_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D2, 4, TRUE);
- ulReg = ntohl(ISO_MPS);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D6, 4, TRUE);
-
- /*
- * Update EEPROM Version.
- * Read 4 bytes from 508 and modify 511 and 510.
- */
- ReadBeceemEEPROM(Adapter, 0x1FC, &ulReg);
- ulReg &= 0x0101FFFF;
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1FC, 4, TRUE);
-
- /*
- * Update length field if required.
- * Also make the string NULL terminated.
- */
-
- ReadBeceemEEPROM(Adapter, 0xA8, &ulReg);
- if ((ulReg&0x00FF0000)>>16 > 0x30) {
- ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0xA8, 4, TRUE);
- }
- ReadBeceemEEPROM(Adapter, 0x148, &ulReg);
- if ((ulReg&0x00FF0000)>>16 > 0x30) {
- ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x148, 4, TRUE);
- }
- ulReg = 0;
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x122, 4, TRUE);
- ulReg = 0;
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C2, 4, TRUE);
-}
-
-static int usbbcm_device_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
- int retval;
- struct bcm_mini_adapter *psAdapter;
- struct bcm_interface_adapter *psIntfAdapter;
- struct net_device *ndev;
-
- /* Reserve one extra queue for the bit-bucket */
- ndev = alloc_etherdev_mq(sizeof(struct bcm_mini_adapter),
- NO_OF_QUEUES + 1);
- if (ndev == NULL) {
- dev_err(&udev->dev, DRV_NAME ": no memory for device\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(ndev, &intf->dev);
-
- psAdapter = netdev_priv(ndev);
- psAdapter->dev = ndev;
- psAdapter->msg_enable = netif_msg_init(debug, default_msg);
-
- /* Init default driver debug state */
-
- psAdapter->stDebugState.debug_level = DBG_LVL_CURR;
- psAdapter->stDebugState.type = DBG_TYPE_INITEXIT;
-
- /*
- * Technically, one can start using BCM_DEBUG_PRINT after this point.
- * However, realize that by default the Type/Subtype bitmaps are all
- * zero now; so no prints will actually appear until the TestApp turns
- * on debug paths via the ioctl(); so practically speaking, in early
- * init, no logging happens.
- *
- * A solution (used below): we explicitly set the bitmaps to 1 for
- * Type=DBG_TYPE_INITEXIT and ALL subtype's of the same. Now all bcm
- * debug statements get logged, enabling debug during early init.
- * Further, we turn this OFF once init_module() completes.
- */
-
- psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xff;
- BCM_SHOW_DEBUG_BITMAP(psAdapter);
-
- retval = InitAdapter(psAdapter);
- if (retval) {
- dev_err(&udev->dev, DRV_NAME ": InitAdapter Failed\n");
- AdapterFree(psAdapter);
- return retval;
- }
-
- /* Allocate interface adapter structure */
- psIntfAdapter = kzalloc(sizeof(struct bcm_interface_adapter),
- GFP_KERNEL);
- if (psIntfAdapter == NULL) {
- AdapterFree(psAdapter);
- return -ENOMEM;
- }
-
- psAdapter->pvInterfaceAdapter = psIntfAdapter;
- psIntfAdapter->psAdapter = psAdapter;
-
- /* Store usb interface in Interface Adapter */
- psIntfAdapter->interface = intf;
- usb_set_intfdata(intf, psIntfAdapter);
-
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "psIntfAdapter 0x%p\n", psIntfAdapter);
- retval = InterfaceAdapterInit(psIntfAdapter);
- if (retval) {
- /* If the Firmware/Cfg File is not present
- * then return success, let the application
- * download the files.
- */
- if (-ENOENT == retval) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "File Not Found. Use app to download.\n");
- return STATUS_SUCCESS;
- }
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL, "InterfaceAdapterInit failed.\n");
- usb_set_intfdata(intf, NULL);
- udev = interface_to_usbdev(intf);
- usb_put_dev(udev);
- InterfaceAdapterFree(psIntfAdapter);
- return retval;
- }
- if (psAdapter->chip_id > T3) {
- uint32_t uiNackZeroLengthInt = 4;
-
- retval =
- wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT,
- &uiNackZeroLengthInt,
- sizeof(uiNackZeroLengthInt));
- if (retval)
- return retval;
- }
-
- /* Check whether the USB-Device Supports remote Wake-Up */
- if (USB_CONFIG_ATT_WAKEUP & udev->actconfig->desc.bmAttributes) {
- /* If Suspend then only support dynamic suspend */
- if (psAdapter->bDoSuspend) {
-#ifdef CONFIG_PM
- pm_runtime_set_autosuspend_delay(&udev->dev, 0);
- intf->needs_remote_wakeup = 1;
- usb_enable_autosuspend(udev);
- device_init_wakeup(&intf->dev, 1);
- INIT_WORK(&psIntfAdapter->usbSuspendWork,
- putUsbSuspend);
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Enabling USB Auto-Suspend\n");
-#endif
- } else {
- intf->needs_remote_wakeup = 0;
- usb_disable_autosuspend(udev);
- }
- }
-
- psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0x0;
- return retval;
-}
-
-static void usbbcm_disconnect(struct usb_interface *intf)
-{
- struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf);
- struct bcm_mini_adapter *psAdapter;
- struct usb_device *udev = interface_to_usbdev(intf);
-
- if (psIntfAdapter == NULL)
- return;
-
- psAdapter = psIntfAdapter->psAdapter;
- netif_device_detach(psAdapter->dev);
-
- if (psAdapter->bDoSuspend)
- intf->needs_remote_wakeup = 0;
-
- psAdapter->device_removed = TRUE;
- usb_set_intfdata(intf, NULL);
- InterfaceAdapterFree(psIntfAdapter);
- usb_put_dev(udev);
-}
-
-static int AllocUsbCb(struct bcm_interface_adapter *psIntfAdapter)
-{
- int i = 0;
-
- for (i = 0; i < MAXIMUM_USB_TCB; i++) {
- psIntfAdapter->asUsbTcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
-
- if (psIntfAdapter->asUsbTcb[i].urb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Tx urb for index %d\n",
- i);
- return -ENOMEM;
- }
- }
-
- for (i = 0; i < MAXIMUM_USB_RCB; i++) {
- psIntfAdapter->asUsbRcb[i].urb = usb_alloc_urb(0, GFP_KERNEL);
-
- if (psIntfAdapter->asUsbRcb[i].urb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Rx urb for index %d\n",
- i);
- return -ENOMEM;
- }
-
- psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
- kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL);
-
- if (psIntfAdapter->asUsbRcb[i].urb->transfer_buffer == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "Can't allocate Rx buffer for index %d\n",
- i);
- return -ENOMEM;
- }
- psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length =
- MAX_DATA_BUFFER_SIZE;
- }
- return 0;
-}
-
-static int device_run(struct bcm_interface_adapter *psIntfAdapter)
-{
- int value = 0;
- UINT status = STATUS_SUCCESS;
- struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter;
-
- status = InitCardAndDownloadFirmware(psAd);
- if (status != STATUS_SUCCESS) {
- pr_err(DRV_NAME "InitCardAndDownloadFirmware failed.\n");
- return status;
- }
- if (psAd->fw_download_done) {
- if (StartInterruptUrb(psIntfAdapter)) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Cannot send interrupt in URB\n");
- }
-
- /*
- * now register the cntrl interface. after downloading the f/w
- * waiting for 5 sec to get the mailbox interrupt.
- */
- psAd->waiting_to_fw_download_done = false;
- value = wait_event_timeout(psAd->ioctl_fw_dnld_wait_queue,
- psAd->waiting_to_fw_download_done,
- 5 * HZ);
-
- if (value == 0)
- pr_err(DRV_NAME ": Timeout waiting for mailbox interrupt.\n");
-
- if (register_control_device_interface(psAd) < 0) {
- pr_err(DRV_NAME ": Register Control Device failed.\n");
- return -EIO;
- }
- }
- return 0;
-}
-
-static int select_alternate_setting_for_highspeed_modem(
- struct bcm_interface_adapter *psIntfAdapter,
- struct usb_endpoint_descriptor **endpoint,
- const struct usb_host_interface *iface_desc,
- int *usedIntOutForBulkTransfer)
-{
- int retval = 0;
- struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter;
-
- /* selecting alternate setting one as a default setting
- * for High Speed modem. */
- if (psIntfAdapter->bHighSpeedDevice)
- retval = usb_set_interface(psIntfAdapter->udev,
- DEFAULT_SETTING_0,
- ALTERNATE_SETTING_1);
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "BCM16 is applicable on this dongle\n");
- if (retval || !psIntfAdapter->bHighSpeedDevice) {
- *usedIntOutForBulkTransfer = EP2;
- *endpoint = &iface_desc->endpoint[EP2].desc;
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n");
- /*
- * If Modem is high speed device EP2 should be
- * INT OUT End point
- *
- * If Mode is FS then EP2 should be bulk end
- * point
- */
- if ((psIntfAdapter->bHighSpeedDevice &&
- !usb_endpoint_is_int_out(*endpoint)) ||
- (!psIntfAdapter->bHighSpeedDevice &&
- !usb_endpoint_is_bulk_out(*endpoint))) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Configuring the EEPROM\n");
- /* change the EP2, EP4 to INT OUT end point */
- ConfigureEndPointTypesThroughEEPROM(
- psAd);
-
- /*
- * It resets the device and if any thing
- * gets changed in USB descriptor it
- * will show fail and re-enumerate the
- * device
- */
- retval = usb_reset_device(psIntfAdapter->udev);
- if (retval) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT,
- DRV_ENTRY, DBG_LVL_ALL,
- "reset failed. Re-enumerating the device.\n");
- return retval;
- }
-
- }
- if (!psIntfAdapter->bHighSpeedDevice &&
- usb_endpoint_is_bulk_out(*endpoint)) {
- /*
- * Once BULK is selected in FS mode.
- * Revert it back to INT.
- * Else USB_IF will fail.
- */
- UINT _uiData = ntohl(EP2_CFG_INT);
-
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Reverting Bulk to INT as it is in Full Speed mode.\n");
- BeceemEEPROMBulkWrite(psAd, (PUCHAR) & _uiData, 0x136,
- 4, TRUE);
- }
- } else {
- *usedIntOutForBulkTransfer = EP4;
- *endpoint = &iface_desc->endpoint[EP4].desc;
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
- "Choosing AltSetting as a default setting.\n");
- if (!usb_endpoint_is_int_out(*endpoint)) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Dongle does not have BCM16 Fix.\n");
- /*
- * change the EP2, EP4 to INT OUT end point and use EP4
- * in altsetting
- */
- ConfigureEndPointTypesThroughEEPROM(psAd);
-
- /*
- * It resets the device and if any thing
- * gets changed in USB descriptor it
- * will show fail and re-enumerate the
- * device
- */
- retval = usb_reset_device(psIntfAdapter->udev);
- if (retval) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_INITEXIT,
- DRV_ENTRY, DBG_LVL_ALL,
- "reset failed. Re-enumerating the device.\n");
- return retval;
- }
- }
- }
-
- return 0;
-}
-
-static int InterfaceAdapterInit(struct bcm_interface_adapter *psIntfAdapter)
-{
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- size_t buffer_size;
- unsigned long value;
- int retval = 0;
- int usedIntOutForBulkTransfer = 0;
- bool bBcm16 = false;
- UINT uiData = 0;
- int bytes;
- struct bcm_mini_adapter *psAd = psIntfAdapter->psAdapter;
-
- /* Store the usb dev into interface adapter */
- psIntfAdapter->udev =
- usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
-
- psIntfAdapter->bHighSpeedDevice =
- (psIntfAdapter->udev->speed == USB_SPEED_HIGH);
- psAd->interface_rdm = BcmRDM;
- psAd->interface_wrm = BcmWRM;
-
- bytes = rdmalt(psAd, CHIP_ID_REG, (u32 *) &(psAd->chip_id),
- sizeof(u32));
- if (bytes < 0) {
- retval = bytes;
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_PRINTK, 0, 0,
- "CHIP ID Read Failed\n");
- return retval;
- }
-
- if (0xbece3200 == (psAd->chip_id & ~(0xF0)))
- psAd->chip_id &= ~0xF0;
-
- dev_info(&psIntfAdapter->udev->dev, "RDM Chip ID 0x%lx\n",
- psAd->chip_id);
-
- iface_desc = psIntfAdapter->interface->cur_altsetting;
-
- if (psAd->chip_id == T3B) {
- /* T3B device will have EEPROM, check if EEPROM is proper and
- * BCM16 can be done or not. */
- BeceemEEPROMBulkRead(psAd, &uiData, 0x0, 4);
- if (uiData == BECM)
- bBcm16 = TRUE;
-
- dev_info(&psIntfAdapter->udev->dev,
- "number of alternate setting %d\n",
- psIntfAdapter->interface->num_altsetting);
-
- if (bBcm16 == TRUE) {
- retval = select_alternate_setting_for_highspeed_modem(
- psIntfAdapter, &endpoint, iface_desc,
- &usedIntOutForBulkTransfer);
- if (retval)
- return retval;
- }
- }
-
- iface_desc = psIntfAdapter->interface->cur_altsetting;
-
- for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value) {
- endpoint = &iface_desc->endpoint[value].desc;
-
- if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr &&
- usb_endpoint_is_bulk_in(endpoint)) {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sBulkIn.bulk_in_size = buffer_size;
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkIn.bulk_in_pipe = usb_rcvbulkpipe(
- psIntfAdapter->udev,
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr);
- }
-
- if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
- usb_endpoint_is_bulk_out(endpoint)) {
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndbulkpipe(
- psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
- }
-
- if (!psIntfAdapter->sIntrIn.int_in_endpointAddr &&
- usb_endpoint_is_int_in(endpoint)) {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sIntrIn.int_in_size = buffer_size;
- psIntfAdapter->sIntrIn.int_in_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sIntrIn.int_in_interval =
- endpoint->bInterval;
- psIntfAdapter->sIntrIn.int_in_buffer =
- kmalloc(buffer_size, GFP_KERNEL);
- if (!psIntfAdapter->sIntrIn.int_in_buffer)
- return -EINVAL;
- }
-
- if (!psIntfAdapter->sIntrOut.int_out_endpointAddr &&
- usb_endpoint_is_int_out(endpoint)) {
- if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
- (psAd->chip_id == T3B) &&
- (value == usedIntOutForBulkTransfer)) {
- /*
- * use first intout end point as a bulk out end
- * point
- */
- buffer_size =
- le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sBulkOut.bulk_out_size =
- buffer_size;
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe =
- usb_sndintpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkOut
- .bulk_out_endpointAddr);
- psIntfAdapter->sBulkOut.int_out_interval =
- endpoint->bInterval;
- } else if (value == EP6) {
- buffer_size =
- le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sIntrOut.int_out_size =
- buffer_size;
- psIntfAdapter->sIntrOut.int_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sIntrOut.int_out_interval =
- endpoint->bInterval;
- psIntfAdapter->sIntrOut.int_out_buffer =
- kmalloc(buffer_size, GFP_KERNEL);
- if (!psIntfAdapter->sIntrOut.int_out_buffer)
- return -EINVAL;
- }
- }
- }
-
- usb_set_intfdata(psIntfAdapter->interface, psIntfAdapter);
-
- psAd->bcm_file_download = InterfaceFileDownload;
- psAd->bcm_file_readback_from_chip = InterfaceFileReadbackFromChip;
- psAd->interface_transmit = InterfaceTransmitPacket;
-
- retval = CreateInterruptUrb(psIntfAdapter);
-
- if (retval) {
- BCM_DEBUG_PRINT(psAd, DBG_TYPE_PRINTK, 0, 0,
- "Cannot create interrupt urb\n");
- return retval;
- }
-
- retval = AllocUsbCb(psIntfAdapter);
- if (retval)
- return retval;
-
- return device_run(psIntfAdapter);
-}
-
-static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
-{
- struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf);
-
- psIntfAdapter->bSuspended = TRUE;
-
- if (psIntfAdapter->bPreparingForBusSuspend) {
- psIntfAdapter->bPreparingForBusSuspend = false;
-
- if (psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE) {
- psIntfAdapter->psAdapter->IdleMode = TRUE;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Host Entered in PMU Idle Mode.\n");
- } else {
- psIntfAdapter->psAdapter->bShutStatus = TRUE;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_INITEXIT, DRV_ENTRY,
- DBG_LVL_ALL,
- "Host Entered in PMU Shutdown Mode.\n");
- }
- }
- psIntfAdapter->psAdapter->bPreparingForLowPowerMode = false;
-
- /* Signaling the control pkt path */
- wake_up(&psIntfAdapter->psAdapter->lowpower_mode_wait_queue);
-
- return 0;
-}
-
-static int InterfaceResume(struct usb_interface *intf)
-{
- struct bcm_interface_adapter *psIntfAdapter = usb_get_intfdata(intf);
-
- mdelay(100);
- psIntfAdapter->bSuspended = false;
-
- StartInterruptUrb(psIntfAdapter);
- InterfaceRx(psIntfAdapter);
- return 0;
-}
-
-static struct usb_driver usbbcm_driver = {
- .name = "usbbcm",
- .probe = usbbcm_device_probe,
- .disconnect = usbbcm_disconnect,
- .suspend = InterfaceSuspend,
- .resume = InterfaceResume,
- .id_table = InterfaceUsbtable,
- .supports_autosuspend = 1,
-};
-
-struct class *bcm_class;
-
-static __init int bcm_init(void)
-{
- int retval;
-
- pr_info("%s: %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
- pr_info("%s\n", DRV_COPYRIGHT);
-
- bcm_class = class_create(THIS_MODULE, DRV_NAME);
- if (IS_ERR(bcm_class)) {
- pr_err(DRV_NAME ": could not create class\n");
- return PTR_ERR(bcm_class);
- }
-
- retval = usb_register(&usbbcm_driver);
- if (retval < 0) {
- pr_err(DRV_NAME ": could not register usb driver\n");
- class_destroy(bcm_class);
- return retval;
- }
- return 0;
-}
-
-static __exit void bcm_exit(void)
-{
- usb_deregister(&usbbcm_driver);
- class_destroy(bcm_class);
-}
-
-module_init(bcm_init);
-module_exit(bcm_exit);
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h
deleted file mode 100644
index ffa6e9667ec4..000000000000
--- a/drivers/staging/bcm/InterfaceInit.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _INTERFACE_INIT_H
-#define _INTERFACE_INIT_H
-
-#define BCM_USB_VENDOR_ID_T3 0x198f
-#define BCM_USB_VENDOR_ID_FOXCONN 0x0489
-#define BCM_USB_VENDOR_ID_ZTE 0x19d2
-
-#define BCM_USB_PRODUCT_ID_T3 0x0300
-#define BCM_USB_PRODUCT_ID_T3B 0x0210
-#define BCM_USB_PRODUCT_ID_T3L 0x0220
-#define BCM_USB_PRODUCT_ID_SYM 0x15E
-#define BCM_USB_PRODUCT_ID_1901 0xe017
-#define BCM_USB_PRODUCT_ID_226 0x0132 /* not sure if this is valid */
-#define BCM_USB_PRODUCT_ID_ZTE_226 0x172
-#define BCM_USB_PRODUCT_ID_ZTE_326 0x173 /* ZTE AX326 */
-#define BCM_USB_PRODUCT_ID_ZTE_TU25 0x0007
-
-#define BCM_USB_MINOR_BASE 192
-
-int InterfaceInitialize(void);
-
-int InterfaceExit(void);
-
-int usbbcm_worker_thread(struct bcm_interface_adapter *psIntfAdapter);
-
-#endif
diff --git a/drivers/staging/bcm/InterfaceIsr.c b/drivers/staging/bcm/InterfaceIsr.c
deleted file mode 100644
index b9f8a7aa24fe..000000000000
--- a/drivers/staging/bcm/InterfaceIsr.c
+++ /dev/null
@@ -1,190 +0,0 @@
-#include "headers.h"
-
-
-static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
-{
- int status = urb->status;
- struct bcm_interface_adapter *psIntfAdapter =
- (struct bcm_interface_adapter *)urb->context;
- struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter;
-
- if (netif_msg_intr(Adapter))
- pr_info(PFX "%s: interrupt status %d\n",
- Adapter->dev->name, status);
-
- if (Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "Device has Got Removed.");
- return;
- }
-
- if ((Adapter->bPreparingForLowPowerMode && Adapter->bDoSuspend) ||
- psIntfAdapter->bSuspended ||
- psIntfAdapter->bPreparingForBusSuspend) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Interrupt call back is called while suspending the device");
- return;
- }
-
- switch (status) {
- /* success */
- case STATUS_SUCCESS:
- if (urb->actual_length) {
-
- if (psIntfAdapter->ulInterruptData[1] & 0xFF) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- INTF_INIT, DBG_LVL_ALL,
- "Got USIM interrupt");
- }
-
- if (psIntfAdapter->ulInterruptData[1] & 0xFF00) {
- atomic_set(&Adapter->CurrNumFreeTxDesc,
- (psIntfAdapter->ulInterruptData[1] &
- 0xFF00) >> 8);
- atomic_set(&Adapter->uiMBupdate, TRUE);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- INTF_INIT, DBG_LVL_ALL,
- "TX mailbox contains %d",
- atomic_read(&Adapter->CurrNumFreeTxDesc));
- }
- if (psIntfAdapter->ulInterruptData[1] >> 16) {
- Adapter->CurrNumRecvDescs =
- (psIntfAdapter->ulInterruptData[1] >> 16);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- INTF_INIT, DBG_LVL_ALL,
- "RX mailbox contains %d",
- Adapter->CurrNumRecvDescs);
- InterfaceRx(psIntfAdapter);
- }
- if (Adapter->fw_download_done &&
- !Adapter->downloadDDR &&
- atomic_read(&Adapter->CurrNumFreeTxDesc)) {
-
- psIntfAdapter->psAdapter->downloadDDR += 1;
- wake_up(&Adapter->tx_packet_wait_queue);
- }
- if (!Adapter->waiting_to_fw_download_done) {
- Adapter->waiting_to_fw_download_done = TRUE;
- wake_up(&Adapter->ioctl_fw_dnld_wait_queue);
- }
- if (!atomic_read(&Adapter->TxPktAvail)) {
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "Firing interrupt in URB");
- }
- break;
- case -ENOENT:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "URB has got disconnected....");
- return;
- case -EINPROGRESS:
- /*
- * This situation may happened when URBunlink is used. for
- * detail check usb_unlink_urb documentation.
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Impossibe condition has occurred... something very bad is going on");
- break;
- /* return; */
- case -EPIPE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Interrupt IN endPoint has got halted/stalled...need to clear this");
- Adapter->bEndPointHalted = TRUE;
- wake_up(&Adapter->tx_packet_wait_queue);
- urb->status = STATUS_SUCCESS;
- return;
- /* software-driven interface shutdown */
- case -ECONNRESET: /* URB got unlinked */
- case -ESHUTDOWN: /* hardware gone. this is the serious problem */
- /*
- * Occurs only when something happens with the
- * host controller device
- */
- case -ENODEV: /* Device got removed */
- case -EINVAL:
- /*
- * Some thing very bad happened with the URB. No
- * description is available.
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "interrupt urb error %d", status);
- urb->status = STATUS_SUCCESS;
- break;
- /* return; */
- default:
- /*
- * This is required to check what is the defaults conditions
- * when it occurs..
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,
- "GOT DEFAULT INTERRUPT URB STATUS :%d..Please Analyze it...",
- status);
- break;
- }
-
- StartInterruptUrb(psIntfAdapter);
-
-
-}
-
-int CreateInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
-{
- psIntfAdapter->psInterruptUrb = usb_alloc_urb(0, GFP_KERNEL);
- if (!psIntfAdapter->psInterruptUrb) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS,
- INTF_INIT, DBG_LVL_ALL,
- "Cannot allocate interrupt urb");
- return -ENOMEM;
- }
- psIntfAdapter->psInterruptUrb->transfer_buffer =
- psIntfAdapter->ulInterruptData;
- psIntfAdapter->psInterruptUrb->transfer_buffer_length =
- sizeof(psIntfAdapter->ulInterruptData);
-
- psIntfAdapter->sIntrIn.int_in_pipe = usb_rcvintpipe(psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_endpointAddr);
-
- usb_fill_int_urb(psIntfAdapter->psInterruptUrb, psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_pipe,
- psIntfAdapter->psInterruptUrb->transfer_buffer,
- psIntfAdapter->psInterruptUrb->transfer_buffer_length,
- read_int_callback, psIntfAdapter,
- psIntfAdapter->sIntrIn.int_in_interval);
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL, "Interrupt Interval: %d\n",
- psIntfAdapter->sIntrIn.int_in_interval);
- return 0;
-}
-
-
-INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter)
-{
- INT status = 0;
-
- if (!(psIntfAdapter->psAdapter->device_removed ||
- psIntfAdapter->psAdapter->bEndPointHalted ||
- psIntfAdapter->bSuspended ||
- psIntfAdapter->bPreparingForBusSuspend ||
- psIntfAdapter->psAdapter->StopAllXaction)) {
- status =
- usb_submit_urb(psIntfAdapter->psInterruptUrb, GFP_ATOMIC);
- if (status) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,
- "Cannot send inturb %d\n", status);
- if (status == -EPIPE) {
- psIntfAdapter->psAdapter->bEndPointHalted =
- TRUE;
- wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- }
- }
- }
- return status;
-}
-
diff --git a/drivers/staging/bcm/InterfaceIsr.h b/drivers/staging/bcm/InterfaceIsr.h
deleted file mode 100644
index 3073bd71cfeb..000000000000
--- a/drivers/staging/bcm/InterfaceIsr.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _INTERFACE_ISR_H
-#define _INTERFACE_ISR_H
-
-int CreateInterruptUrb(struct bcm_interface_adapter *psIntfAdapter);
-
-
-INT StartInterruptUrb(struct bcm_interface_adapter *psIntfAdapter);
-
-
-VOID InterfaceEnableInterrupt(struct bcm_mini_adapter *Adapter);
-
-VOID InterfaceDisableInterrupt(struct bcm_mini_adapter *Adapter);
-
-#endif
-
diff --git a/drivers/staging/bcm/InterfaceMacros.h b/drivers/staging/bcm/InterfaceMacros.h
deleted file mode 100644
index fedb79437f33..000000000000
--- a/drivers/staging/bcm/InterfaceMacros.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _INTERFACE_MACROS_H
-#define _INTERFACE_MACROS_H
-
-#define BCM_USB_MAX_READ_LENGTH 2048
-
-#define MAXIMUM_USB_TCB 128
-#define MAXIMUM_USB_RCB 128
-
-#define MAX_BUFFERS_PER_QUEUE 256
-
-#define MAX_DATA_BUFFER_SIZE 2048
-
-/* Num of Asynchronous reads pending */
-#define NUM_RX_DESC 64
-
-#define SYS_CFG 0x0F000C00
-
-#endif
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
deleted file mode 100644
index e5bcfec2a6cf..000000000000
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ /dev/null
@@ -1,247 +0,0 @@
-#include "headers.h"
-
-static int adapter_err_occurred(const struct bcm_interface_adapter *ad)
-{
- if (ad->psAdapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Device got removed");
- return -ENODEV;
- }
-
- if ((ad->psAdapter->StopAllXaction == TRUE) &&
- (ad->psAdapter->chip_id >= T3LPB)) {
- BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_OTHERS, RDM,
- DBG_LVL_ALL,
- "Currently Xaction is not allowed on the bus");
- return -EACCES;
- }
-
- if (ad->bSuspended == TRUE || ad->bPreparingForBusSuspend == TRUE) {
- BCM_DEBUG_PRINT(ad->psAdapter, DBG_TYPE_OTHERS, RDM,
- DBG_LVL_ALL,
- "Bus is in suspended states hence RDM not allowed..");
- return -EACCES;
- }
-
- return 0;
-}
-
-int InterfaceRDM(struct bcm_interface_adapter *psIntfAdapter,
- unsigned int addr,
- void *buff,
- int len)
-{
- int bytes;
- int err = 0;
-
- if (!psIntfAdapter)
- return -EINVAL;
-
- err = adapter_err_occurred(psIntfAdapter);
- if (err)
- return err;
-
- psIntfAdapter->psAdapter->DeviceAccess = TRUE;
-
- bytes = usb_control_msg(psIntfAdapter->udev,
- usb_rcvctrlpipe(psIntfAdapter->udev, 0),
- 0x02,
- 0xC2,
- (addr & 0xFFFF),
- ((addr >> 16) & 0xFFFF),
- buff,
- len,
- 5000);
-
- if (-ENODEV == bytes)
- psIntfAdapter->psAdapter->device_removed = TRUE;
-
- if (bytes < 0)
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM,
- DBG_LVL_ALL, "RDM failed status :%d", bytes);
- else
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM,
- DBG_LVL_ALL, "RDM sent %d", bytes);
-
- psIntfAdapter->psAdapter->DeviceAccess = false;
- return bytes;
-}
-
-int InterfaceWRM(struct bcm_interface_adapter *psIntfAdapter,
- unsigned int addr,
- void *buff,
- int len)
-{
- int retval = 0;
- int err = 0;
-
- if (!psIntfAdapter)
- return -EINVAL;
-
- err = adapter_err_occurred(psIntfAdapter);
- if (err)
- return err;
-
- psIntfAdapter->psAdapter->DeviceAccess = TRUE;
-
- retval = usb_control_msg(psIntfAdapter->udev,
- usb_sndctrlpipe(psIntfAdapter->udev, 0),
- 0x01,
- 0x42,
- (addr & 0xFFFF),
- ((addr >> 16) & 0xFFFF),
- buff,
- len,
- 5000);
-
- if (-ENODEV == retval)
- psIntfAdapter->psAdapter->device_removed = TRUE;
-
- if (retval < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM,
- DBG_LVL_ALL, "WRM failed status :%d", retval);
- psIntfAdapter->psAdapter->DeviceAccess = false;
- return retval;
- } else {
- psIntfAdapter->psAdapter->DeviceAccess = false;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, WRM,
- DBG_LVL_ALL, "WRM sent %d", retval);
- return STATUS_SUCCESS;
- }
-}
-
-int BcmRDM(void *arg,
- unsigned int addr,
- void *buff,
- int len)
-{
- return InterfaceRDM((struct bcm_interface_adapter *)arg, addr, buff,
- len);
-}
-
-int BcmWRM(void *arg,
- unsigned int addr,
- void *buff,
- int len)
-{
- return InterfaceWRM((struct bcm_interface_adapter *)arg, addr, buff,
- len);
-}
-
-int Bcm_clear_halt_of_endpoints(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_interface_adapter *psIntfAdapter =
- (struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter);
- int status = STATUS_SUCCESS;
-
- /*
- * usb_clear_halt - tells device to clear endpoint halt/stall condition
- * @dev: device whose endpoint is halted
- * @pipe: endpoint "pipe" being cleared
- * @ Context: !in_interrupt ()
- *
- * usb_clear_halt is the synchrnous call and returns 0 on success else
- * returns with error code.
- * This is used to clear halt conditions for bulk and interrupt
- * endpoints only.
- * Control and isochronous endpoints never halts.
- *
- * Any URBs queued for such an endpoint should normally be unlinked by
- * the driver before clearing the halt condition.
- *
- */
-
- /* Killing all the submitted urbs to different end points. */
- Bcm_kill_all_URBs(psIntfAdapter);
-
- /* clear the halted/stalled state for every end point */
- status = usb_clear_halt(psIntfAdapter->udev,
- psIntfAdapter->sIntrIn.int_in_pipe);
- if (status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Unable to Clear Halt of Interrupt IN end point. :%d ",
- status);
-
- status = usb_clear_halt(psIntfAdapter->udev,
- psIntfAdapter->sBulkIn.bulk_in_pipe);
- if (status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Unable to Clear Halt of Bulk IN end point. :%d ",
- status);
-
- status = usb_clear_halt(psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_pipe);
- if (status != STATUS_SUCCESS)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, INTF_INIT,
- DBG_LVL_ALL,
- "Unable to Clear Halt of Bulk OUT end point. :%d ",
- status);
-
- return status;
-}
-
-void Bcm_kill_all_URBs(struct bcm_interface_adapter *psIntfAdapter)
-{
- struct urb *tempUrb = NULL;
- unsigned int i;
-
- /*
- * usb_kill_urb - cancel a transfer request and wait for it to finish
- * @urb: pointer to URB describing a previously submitted request,
- * returns nothing as it is void returned API.
- *
- * This routine cancels an in-progress request. It is guaranteed that
- * upon return all completion handlers will have finished and the URB
- * will be totally idle and available for reuse
- *
- * This routine may not be used in an interrupt context (such as a
- * bottom half or a completion handler), or when holding a spinlock, or
- * in other situations where the caller can't schedule().
- *
- */
-
- /* Cancel submitted Interrupt-URB's */
- if (psIntfAdapter->psInterruptUrb) {
- if (psIntfAdapter->psInterruptUrb->status == -EINPROGRESS)
- usb_kill_urb(psIntfAdapter->psInterruptUrb);
- }
-
- /* Cancel All submitted TX URB's */
- for (i = 0; i < MAXIMUM_USB_TCB; i++) {
- tempUrb = psIntfAdapter->asUsbTcb[i].urb;
- if (tempUrb) {
- if (tempUrb->status == -EINPROGRESS)
- usb_kill_urb(tempUrb);
- }
- }
-
- for (i = 0; i < MAXIMUM_USB_RCB; i++) {
- tempUrb = psIntfAdapter->asUsbRcb[i].urb;
- if (tempUrb) {
- if (tempUrb->status == -EINPROGRESS)
- usb_kill_urb(tempUrb);
- }
- }
-
- atomic_set(&psIntfAdapter->uNumTcbUsed, 0);
- atomic_set(&psIntfAdapter->uCurrTcb, 0);
-
- atomic_set(&psIntfAdapter->uNumRcbUsed, 0);
- atomic_set(&psIntfAdapter->uCurrRcb, 0);
-}
-
-void putUsbSuspend(struct work_struct *work)
-{
- struct bcm_interface_adapter *psIntfAdapter = NULL;
- struct usb_interface *intf = NULL;
-
- psIntfAdapter = container_of(work, struct bcm_interface_adapter,
- usbSuspendWork);
- intf = psIntfAdapter->interface;
-
- if (psIntfAdapter->bSuspended == false)
- usb_autopm_put_interface(intf);
-}
-
diff --git a/drivers/staging/bcm/InterfaceMisc.h b/drivers/staging/bcm/InterfaceMisc.h
deleted file mode 100644
index 0e5e38b26329..000000000000
--- a/drivers/staging/bcm/InterfaceMisc.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __INTERFACE_MISC_H
-#define __INTERFACE_MISC_H
-
-INT
-InterfaceRDM(struct bcm_interface_adapter *psIntfAdapter,
- UINT addr,
- PVOID buff,
- INT len);
-
-INT
-InterfaceWRM(struct bcm_interface_adapter *psIntfAdapter,
- UINT addr,
- PVOID buff,
- INT len);
-
-
-int InterfaceFileDownload(PVOID psIntfAdapter,
- struct file *flp,
- unsigned int on_chip_loc);
-
-int InterfaceFileReadbackFromChip(PVOID psIntfAdapter,
- struct file *flp,
- unsigned int on_chip_loc);
-
-
-int BcmRDM(PVOID arg,
- UINT addr,
- PVOID buff,
- INT len);
-
-int BcmWRM(PVOID arg,
- UINT addr,
- PVOID buff,
- INT len);
-
-INT Bcm_clear_halt_of_endpoints(struct bcm_mini_adapter *Adapter);
-
-VOID Bcm_kill_all_URBs(struct bcm_interface_adapter *psIntfAdapter);
-
-#define DISABLE_USB_ZERO_LEN_INT 0x0F011878
-
-#endif /* __INTERFACE_MISC_H */
diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c
deleted file mode 100644
index 0f179b9382d3..000000000000
--- a/drivers/staging/bcm/InterfaceRx.c
+++ /dev/null
@@ -1,289 +0,0 @@
-#include "headers.h"
-
-static void handle_control_packet(struct bcm_interface_adapter *interface,
- struct bcm_mini_adapter *ad,
- struct bcm_leader *leader,
- struct sk_buff *skb,
- struct urb *urb)
-{
- BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL,
- "Received control pkt...");
- *(PUSHORT)skb->data = leader->Status;
- memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer +
- (sizeof(struct bcm_leader)), leader->PLength);
- skb->len = leader->PLength + sizeof(USHORT);
-
- spin_lock(&ad->control_queue_lock);
- ENQUEUEPACKET(ad->RxControlHead, ad->RxControlTail, skb);
- spin_unlock(&ad->control_queue_lock);
-
- atomic_inc(&ad->cntrlpktCnt);
- wake_up(&ad->process_rx_cntrlpkt);
-}
-
-static void format_eth_hdr_to_stack(struct bcm_interface_adapter *interface,
- struct bcm_mini_adapter *ad,
- struct bcm_leader *p_leader,
- struct sk_buff *skb,
- struct urb *urb,
- UINT ui_index,
- int queue_index,
- bool b_header_supression_endabled)
-{
- /*
- * Data Packet, Format a proper Ethernet Header
- * and give it to the stack
- */
- BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_DATA,
- DBG_LVL_ALL, "Received Data pkt...");
- skb_reserve(skb, 2 + SKB_RESERVE_PHS_BYTES);
- memcpy(skb->data+ETH_HLEN, (PUCHAR)urb->transfer_buffer +
- sizeof(struct bcm_leader), p_leader->PLength);
- skb->dev = ad->dev;
-
- /* currently skb->len has extra ETH_HLEN bytes in the beginning */
- skb_put(skb, p_leader->PLength + ETH_HLEN);
- ad->PackInfo[queue_index].uiTotalRxBytes += p_leader->PLength;
- ad->PackInfo[queue_index].uiThisPeriodRxBytes += p_leader->PLength;
- BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX, RX_DATA,
- DBG_LVL_ALL, "Received Data pkt of len :0x%X",
- p_leader->PLength);
-
- if (netif_running(ad->dev)) {
- /* Moving ahead by ETH_HLEN to the data ptr as received from FW */
- skb_pull(skb, ETH_HLEN);
- PHSReceive(ad, p_leader->Vcid, skb, &skb->len,
- NULL, b_header_supression_endabled);
-
- if (!ad->PackInfo[queue_index].bEthCSSupport) {
- skb_push(skb, ETH_HLEN);
-
- memcpy(skb->data, skb->dev->dev_addr, 6);
- memcpy(skb->data+6, skb->dev->dev_addr, 6);
- (*(skb->data+11))++;
- *(skb->data+12) = 0x08;
- *(skb->data+13) = 0x00;
- p_leader->PLength += ETH_HLEN;
- }
-
- skb->protocol = eth_type_trans(skb, ad->dev);
- netif_rx(skb);
- } else {
- BCM_DEBUG_PRINT(interface->psAdapter, DBG_TYPE_RX,
- RX_DATA, DBG_LVL_ALL,
- "i/f not up hance freeing SKB...");
- dev_kfree_skb(skb);
- }
-
- ++ad->dev->stats.rx_packets;
- ad->dev->stats.rx_bytes += p_leader->PLength;
-
- for (ui_index = 0; ui_index < MIBS_MAX_HIST_ENTRIES; ui_index++) {
- if ((p_leader->PLength <=
- MIBS_PKTSIZEHIST_RANGE*(ui_index+1)) &&
- (p_leader->PLength > MIBS_PKTSIZEHIST_RANGE*(ui_index)))
-
- ad->aRxPktSizeHist[ui_index]++;
- }
-}
-
-static int SearchVcid(struct bcm_mini_adapter *Adapter, unsigned short usVcid)
-{
- int iIndex = 0;
-
- for (iIndex = (NO_OF_QUEUES-1); iIndex >= 0; iIndex--)
- if (Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
- return iIndex;
- return NO_OF_QUEUES+1;
-
-}
-
-
-static struct bcm_usb_rcb *
-GetBulkInRcb(struct bcm_interface_adapter *psIntfAdapter)
-{
- struct bcm_usb_rcb *pRcb = NULL;
- UINT index = 0;
-
- if ((atomic_read(&psIntfAdapter->uNumRcbUsed) < MAXIMUM_USB_RCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction == false)) {
- index = atomic_read(&psIntfAdapter->uCurrRcb);
- pRcb = &psIntfAdapter->asUsbRcb[index];
- pRcb->bUsed = TRUE;
- pRcb->psIntfAdapter = psIntfAdapter;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX, RX_DPC,
- DBG_LVL_ALL, "Got Rx desc %d used %d", index,
- atomic_read(&psIntfAdapter->uNumRcbUsed));
- index = (index + 1) % MAXIMUM_USB_RCB;
- atomic_set(&psIntfAdapter->uCurrRcb, index);
- atomic_inc(&psIntfAdapter->uNumRcbUsed);
- }
- return pRcb;
-}
-
-/*this is receive call back - when pkt available for receive (BULK IN- end point)*/
-static void read_bulk_callback(struct urb *urb)
-{
- struct sk_buff *skb = NULL;
- bool bHeaderSupressionEnabled = false;
- int QueueIndex = NO_OF_QUEUES + 1;
- UINT uiIndex = 0;
- struct bcm_usb_rcb *pRcb = (struct bcm_usb_rcb *)urb->context;
- struct bcm_interface_adapter *psIntfAdapter = pRcb->psIntfAdapter;
- struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter;
- struct bcm_leader *pLeader = urb->transfer_buffer;
-
- if (unlikely(netif_msg_rx_status(Adapter)))
- pr_info(PFX "%s: rx urb status %d length %d\n",
- Adapter->dev->name, urb->status, urb->actual_length);
-
- if ((Adapter->device_removed == TRUE) ||
- (TRUE == Adapter->bEndPointHalted) ||
- (0 == urb->actual_length)) {
- pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- return;
- }
-
- if (urb->status != STATUS_SUCCESS) {
- if (urb->status == -EPIPE) {
- Adapter->bEndPointHalted = TRUE;
- wake_up(&Adapter->tx_packet_wait_queue);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC,
- DBG_LVL_ALL,
- "Rx URB has got cancelled. status :%d",
- urb->status);
- }
- pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- urb->status = STATUS_SUCCESS;
- return;
- }
-
- if (Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,
- "device is going in low power mode while PMU option selected..hence rx packet should not be process");
- return;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,
- "Read back done len %d\n", pLeader->PLength);
- if (!pLeader->PLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,
- "Leader Length 0");
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- return;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,
- "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX",
- pLeader->Status, pLeader->PLength, pLeader->Vcid);
- if (MAX_CNTL_PKT_SIZE < pLeader->PLength) {
- if (netif_msg_rx_err(Adapter))
- pr_info(PFX "%s: corrupted leader length...%d\n",
- Adapter->dev->name, pLeader->PLength);
- ++Adapter->dev->stats.rx_dropped;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- return;
- }
-
- QueueIndex = SearchVcid(Adapter, pLeader->Vcid);
- if (QueueIndex < NO_OF_QUEUES) {
- bHeaderSupressionEnabled =
- Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
- bHeaderSupressionEnabled =
- bHeaderSupressionEnabled & Adapter->bPHSEnabled;
- }
-
- skb = dev_alloc_skb(pLeader->PLength + SKB_RESERVE_PHS_BYTES +
- SKB_RESERVE_ETHERNET_HEADER);
- if (!skb) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "NO SKBUFF!!! Dropping the Packet");
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
- return;
- }
- /* If it is a control Packet, then call handle_bcm_packet ()*/
- if ((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) ||
- (!(pLeader->Status >= 0x20 && pLeader->Status <= 0x3F))) {
- handle_control_packet(psIntfAdapter, Adapter, pLeader, skb,
- urb);
- } else {
- format_eth_hdr_to_stack(psIntfAdapter, Adapter, pLeader, skb,
- urb, uiIndex, QueueIndex,
- bHeaderSupressionEnabled);
- }
- Adapter->PrevNumRecvDescs++;
- pRcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumRcbUsed);
-}
-
-static int ReceiveRcb(struct bcm_interface_adapter *psIntfAdapter,
- struct bcm_usb_rcb *pRcb)
-{
- struct urb *urb = pRcb->urb;
- int retval = 0;
-
- usb_fill_bulk_urb(urb, psIntfAdapter->udev,
- usb_rcvbulkpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr),
- urb->transfer_buffer,
- BCM_USB_MAX_READ_LENGTH,
- read_bulk_callback, pRcb);
-
- if (false == psIntfAdapter->psAdapter->device_removed &&
- false == psIntfAdapter->psAdapter->bEndPointHalted &&
- false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend) {
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_RX,
- RX_DPC, DBG_LVL_ALL,
- "failed submitting read urb, error %d",
- retval);
- /* if this return value is because of pipe halt. need to clear this. */
- if (retval == -EPIPE) {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
- wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- }
-
- }
- }
- return retval;
-}
-
-/*
-Function: InterfaceRx
-
-Description: This is the hardware specific Function for Receiving
- data packet/control packets from the device.
-
-Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context
-
-
-
-Return: TRUE - If Rx was successful.
- Other - If an error occurred.
-*/
-
-bool InterfaceRx(struct bcm_interface_adapter *psIntfAdapter)
-{
- USHORT RxDescCount = NUM_RX_DESC -
- atomic_read(&psIntfAdapter->uNumRcbUsed);
-
- struct bcm_usb_rcb *pRcb = NULL;
-
- while (RxDescCount) {
- pRcb = GetBulkInRcb(psIntfAdapter);
- if (pRcb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,
- DBG_TYPE_PRINTK, 0, 0,
- "Unable to get Rcb pointer");
- return false;
- }
- ReceiveRcb(psIntfAdapter, pRcb);
- RxDescCount--;
- }
- return TRUE;
-}
-
diff --git a/drivers/staging/bcm/InterfaceRx.h b/drivers/staging/bcm/InterfaceRx.h
deleted file mode 100644
index b4e858bcda34..000000000000
--- a/drivers/staging/bcm/InterfaceRx.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _INTERFACE_RX_H
-#define _INTERFACE_RX_H
-
-bool InterfaceRx(struct bcm_interface_adapter *Adapter);
-
-#endif
-
diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c
deleted file mode 100644
index 9b3f64b821ed..000000000000
--- a/drivers/staging/bcm/InterfaceTx.c
+++ /dev/null
@@ -1,213 +0,0 @@
-#include "headers.h"
-
-static void prepare_low_power_mode(struct urb *urb,
- struct bcm_interface_adapter *interface,
- struct bcm_mini_adapter *ps_adapter,
- struct bcm_mini_adapter *ad,
- struct bcm_link_request *p_control_msg,
- bool *b_power_down_msg)
-{
- if (((p_control_msg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
- (p_control_msg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE))) {
-
- *b_power_down_msg = TRUE;
- /*
- * This covers the bus err while Idle Request msg
- * sent down.
- */
- if (urb->status != STATUS_SUCCESS) {
- ps_adapter->bPreparingForLowPowerMode = false;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Idle Mode Request msg failed to reach to Modem");
- /* Signalling the cntrl pkt path in Ioctl */
- wake_up(&ps_adapter->lowpower_mode_wait_queue);
- StartInterruptUrb(interface);
- return;
- }
-
- if (ps_adapter->bDoSuspend == false) {
- ps_adapter->IdleMode = TRUE;
- /* since going in Idle mode completed hence making this var false */
- ps_adapter->bPreparingForLowPowerMode = false;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Host Entered in Idle Mode State...");
- /* Signalling the cntrl pkt path in Ioctl*/
- wake_up(&ps_adapter->lowpower_mode_wait_queue);
- }
-
- } else if ((p_control_msg->Leader.Status == LINK_UP_CONTROL_REQ) &&
- (p_control_msg->szData[0] == LINK_UP_ACK) &&
- (p_control_msg->szData[1] == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) &&
- (p_control_msg->szData[2] == SHUTDOWN_ACK_FROM_DRIVER)) {
- /*
- * This covers the bus err while shutdown Request
- * msg sent down.
- */
- if (urb->status != STATUS_SUCCESS) {
- ps_adapter->bPreparingForLowPowerMode = false;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Shutdown Request Msg failed to reach to Modem");
- /* Signalling the cntrl pkt path in Ioctl */
- wake_up(&ps_adapter->lowpower_mode_wait_queue);
- StartInterruptUrb(interface);
- return;
- }
-
- *b_power_down_msg = TRUE;
- if (ps_adapter->bDoSuspend == false) {
- ps_adapter->bShutStatus = TRUE;
- /*
- * since going in shutdown mode completed hence
- * making this var false
- */
- ps_adapter->bPreparingForLowPowerMode = false;
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Host Entered in shutdown Mode State...");
- /* Signalling the cntrl pkt path in Ioctl */
- wake_up(&ps_adapter->lowpower_mode_wait_queue);
- }
- }
-
- if (ps_adapter->bDoSuspend && *b_power_down_msg) {
- /* issuing bus suspend request */
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,
- "Issuing the Bus suspend request to USB stack");
- interface->bPreparingForBusSuspend = TRUE;
- schedule_work(&interface->usbSuspendWork);
- }
-}
-
-/*this is transmit call-back(BULK OUT)*/
-static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
-{
- struct bcm_usb_tcb *pTcb = (struct bcm_usb_tcb *)urb->context;
- struct bcm_interface_adapter *psIntfAdapter = pTcb->psIntfAdapter;
- struct bcm_link_request *pControlMsg =
- (struct bcm_link_request *)urb->transfer_buffer;
- struct bcm_mini_adapter *psAdapter = psIntfAdapter->psAdapter;
- bool bpowerDownMsg = false;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (unlikely(netif_msg_tx_done(Adapter)))
- pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name,
- urb->status);
-
- if (urb->status != STATUS_SUCCESS) {
- if (urb->status == -EPIPE) {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
- wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "Tx URB has got cancelled. status :%d",
- urb->status);
- }
- }
-
- pTcb->bUsed = false;
- atomic_dec(&psIntfAdapter->uNumTcbUsed);
-
- if (TRUE == psAdapter->bPreparingForLowPowerMode) {
- prepare_low_power_mode(urb, psIntfAdapter, psAdapter, Adapter,
- pControlMsg, &bpowerDownMsg);
- }
-
- usb_free_coherent(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
-}
-
-
-static struct bcm_usb_tcb *GetBulkOutTcb(struct bcm_interface_adapter *psIntfAdapter)
-{
- struct bcm_usb_tcb *pTcb = NULL;
- UINT index = 0;
-
- if ((atomic_read(&psIntfAdapter->uNumTcbUsed) < MAXIMUM_USB_TCB) &&
- (psIntfAdapter->psAdapter->StopAllXaction == false)) {
- index = atomic_read(&psIntfAdapter->uCurrTcb);
- pTcb = &psIntfAdapter->asUsbTcb[index];
- pTcb->bUsed = TRUE;
- pTcb->psIntfAdapter = psIntfAdapter;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX,
- NEXT_SEND, DBG_LVL_ALL,
- "Got Tx desc %d used %d",
- index,
- atomic_read(&psIntfAdapter->uNumTcbUsed));
- index = (index + 1) % MAXIMUM_USB_TCB;
- atomic_set(&psIntfAdapter->uCurrTcb, index);
- atomic_inc(&psIntfAdapter->uNumTcbUsed);
- }
- return pTcb;
-}
-
-static int TransmitTcb(struct bcm_interface_adapter *psIntfAdapter,
- struct bcm_usb_tcb *pTcb, PVOID data, int len)
-{
-
- struct urb *urb = pTcb->urb;
- int retval = 0;
-
- urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len,
- GFP_ATOMIC, &urb->transfer_dma);
- if (!urb->transfer_buffer) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "Error allocating memory\n");
- return -ENOMEM;
- }
- memcpy(urb->transfer_buffer, data, len);
- urb->transfer_buffer_length = len;
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL, "Sending Bulk out packet\n");
- /* For T3B,INT OUT end point will be used as bulk out end point */
- if ((psIntfAdapter->psAdapter->chip_id == T3B) &&
- (psIntfAdapter->bHighSpeedDevice == TRUE)) {
- usb_fill_int_urb(urb, psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_pipe,
- urb->transfer_buffer, len, write_bulk_callback, pTcb,
- psIntfAdapter->sBulkOut.int_out_interval);
- } else {
- usb_fill_bulk_urb(urb, psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_pipe,
- urb->transfer_buffer, len, write_bulk_callback, pTcb);
- }
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* For DMA transfer */
-
- if (false == psIntfAdapter->psAdapter->device_removed &&
- false == psIntfAdapter->psAdapter->bEndPointHalted &&
- false == psIntfAdapter->bSuspended &&
- false == psIntfAdapter->bPreparingForBusSuspend) {
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_TX,
- NEXT_SEND, DBG_LVL_ALL,
- "failed submitting write urb, error %d",
- retval);
- if (retval == -EPIPE) {
- psIntfAdapter->psAdapter->bEndPointHalted = TRUE;
- wake_up(&psIntfAdapter->psAdapter->tx_packet_wait_queue);
- }
- }
- }
- return retval;
-}
-
-int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len)
-{
- struct bcm_usb_tcb *pTcb = NULL;
- struct bcm_interface_adapter *psIntfAdapter = arg;
-
- pTcb = GetBulkOutTcb(psIntfAdapter);
- if (pTcb == NULL) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
- "No URB to transmit packet, dropping packet");
- return -EFAULT;
- }
- return TransmitTcb(psIntfAdapter, pTcb, data, len);
-}
-
diff --git a/drivers/staging/bcm/InterfaceTx.h b/drivers/staging/bcm/InterfaceTx.h
deleted file mode 100644
index 273147577c17..000000000000
--- a/drivers/staging/bcm/InterfaceTx.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _INTERFACE_TX_H
-#define _INTERFACE_TX_H
-
-INT InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len);
-
-#endif
-
diff --git a/drivers/staging/bcm/Ioctl.h b/drivers/staging/bcm/Ioctl.h
deleted file mode 100644
index fa5f8671612e..000000000000
--- a/drivers/staging/bcm/Ioctl.h
+++ /dev/null
@@ -1,226 +0,0 @@
-#ifndef _IOCTL_H_
-#define _IOCTL_H_
-
-struct bcm_rdm_buffer {
- unsigned long Register;
- unsigned long Length;
-} __packed;
-
-struct bcm_wrm_buffer {
- unsigned long Register;
- unsigned long Length;
- unsigned char Data[4];
-} __packed;
-
-struct bcm_ioctl_buffer {
- void __user *InputBuffer;
- unsigned long InputLength;
- void __user *OutputBuffer;
- unsigned long OutputLength;
-} __packed;
-
-struct bcm_gpio_info {
- unsigned int uiGpioNumber; /* valid numbers 0-15 */
- unsigned int uiGpioValue; /* 1 set ; 0 not set */
-} __packed;
-
-struct bcm_user_thread_req {
- /* 0->Inactivate LED thread. */
- /* 1->Activate the LED thread */
- unsigned int ThreadState;
-} __packed;
-
-#define LED_THREAD_ACTIVATION_REQ 1
-#define BCM_IOCTL 'k'
-#define IOCTL_SEND_CONTROL_MESSAGE _IOW(BCM_IOCTL, 0x801, int)
-#define IOCTL_BCM_REGISTER_WRITE _IOW(BCM_IOCTL, 0x802, int)
-#define IOCTL_BCM_REGISTER_READ _IOR(BCM_IOCTL, 0x803, int)
-#define IOCTL_BCM_COMMON_MEMORY_WRITE _IOW(BCM_IOCTL, 0x804, int)
-#define IOCTL_BCM_COMMON_MEMORY_READ _IOR(BCM_IOCTL, 0x805, int)
-#define IOCTL_GET_CONTROL_MESSAGE _IOR(BCM_IOCTL, 0x806, int)
-#define IOCTL_BCM_FIRMWARE_DOWNLOAD _IOW(BCM_IOCTL, 0x807, int)
-#define IOCTL_BCM_SET_SEND_VCID _IOW(BCM_IOCTL, 0x808, int)
-#define IOCTL_BCM_SWITCH_TRANSFER_MODE _IOW(BCM_IOCTL, 0x809, int)
-#define IOCTL_LINK_REQ _IOW(BCM_IOCTL, 0x80A, int)
-#define IOCTL_RSSI_LEVEL_REQ _IOW(BCM_IOCTL, 0x80B, int)
-#define IOCTL_IDLE_REQ _IOW(BCM_IOCTL, 0x80C, int)
-#define IOCTL_SS_INFO_REQ _IOW(BCM_IOCTL, 0x80D, int)
-#define IOCTL_GET_STATISTICS_POINTER _IOW(BCM_IOCTL, 0x80E, int)
-#define IOCTL_CM_REQUEST _IOW(BCM_IOCTL, 0x80F, int)
-#define IOCTL_INIT_PARAM_REQ _IOW(BCM_IOCTL, 0x810, int)
-#define IOCTL_MAC_ADDR_REQ _IOW(BCM_IOCTL, 0x811, int)
-#define IOCTL_MAC_ADDR_RESP _IOWR(BCM_IOCTL, 0x812, int)
-#define IOCTL_CLASSIFICATION_RULE _IOW(BCM_IOCTL, 0x813, char)
-#define IOCTL_CLOSE_NOTIFICATION _IO(BCM_IOCTL, 0x814)
-#define IOCTL_LINK_UP _IO(BCM_IOCTL, 0x815)
-#define IOCTL_LINK_DOWN _IO(BCM_IOCTL, 0x816, struct bcm_ioctl_buffer)
-#define IOCTL_CHIP_RESET _IO(BCM_IOCTL, 0x816)
-#define IOCTL_CINR_LEVEL_REQ _IOW(BCM_IOCTL, 0x817, char)
-#define IOCTL_WTM_CONTROL_REQ _IOW(BCM_IOCTL, 0x817, char)
-#define IOCTL_BE_BUCKET_SIZE _IOW(BCM_IOCTL, 0x818, unsigned long)
-#define IOCTL_RTPS_BUCKET_SIZE _IOW(BCM_IOCTL, 0x819, unsigned long)
-#define IOCTL_QOS_THRESHOLD _IOW(BCM_IOCTL, 0x820, unsigned long)
-#define IOCTL_DUMP_PACKET_INFO _IO(BCM_IOCTL, 0x821)
-#define IOCTL_GET_PACK_INFO _IOR(BCM_IOCTL, 0x823, int)
-#define IOCTL_BCM_GET_DRIVER_VERSION _IOR(BCM_IOCTL, 0x829, int)
-#define IOCTL_BCM_GET_CURRENT_STATUS _IOW(BCM_IOCTL, 0x828, int)
-#define IOCTL_BCM_GPIO_SET_REQUEST _IOW(BCM_IOCTL, 0x82A, int)
-#define IOCTL_BCM_GPIO_STATUS_REQUEST _IOW(BCM_IOCTL, 0x82b, int)
-#define IOCTL_BCM_GET_DSX_INDICATION _IOR(BCM_IOCTL, 0x854, int)
-#define IOCTL_BCM_BUFFER_DOWNLOAD_START _IOW(BCM_IOCTL, 0x855, int)
-#define IOCTL_BCM_BUFFER_DOWNLOAD _IOW(BCM_IOCTL, 0x856, int)
-#define IOCTL_BCM_BUFFER_DOWNLOAD_STOP _IOW(BCM_IOCTL, 0x857, int)
-#define IOCTL_BCM_REGISTER_WRITE_PRIVATE _IOW(BCM_IOCTL, 0x826, char)
-#define IOCTL_BCM_REGISTER_READ_PRIVATE _IOW(BCM_IOCTL, 0x827, char)
-#define IOCTL_BCM_SET_DEBUG _IOW(BCM_IOCTL, 0x824, struct bcm_ioctl_buffer)
-#define IOCTL_BCM_EEPROM_REGISTER_WRITE _IOW(BCM_IOCTL, 0x858, int)
-#define IOCTL_BCM_EEPROM_REGISTER_READ _IOR(BCM_IOCTL, 0x859, int)
-#define IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE _IOR(BCM_IOCTL, 0x860, int)
-#define IOCTL_BCM_SET_MAC_TRACING _IOW(BCM_IOCTL, 0x82c, int)
-#define IOCTL_BCM_GET_HOST_MIBS _IOW(BCM_IOCTL, 0x853, int)
-#define IOCTL_BCM_NVM_READ _IOR(BCM_IOCTL, 0x861, int)
-#define IOCTL_BCM_NVM_WRITE _IOW(BCM_IOCTL, 0x862, int)
-#define IOCTL_BCM_GET_NVM_SIZE _IOR(BCM_IOCTL, 0x863, int)
-#define IOCTL_BCM_CAL_INIT _IOR(BCM_IOCTL, 0x864, int)
-#define IOCTL_BCM_BULK_WRM _IOW(BCM_IOCTL, 0x90B, int)
-#define IOCTL_BCM_FLASH2X_SECTION_READ _IOR(BCM_IOCTL, 0x865, int)
-#define IOCTL_BCM_FLASH2X_SECTION_WRITE _IOW(BCM_IOCTL, 0x866, int)
-#define IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP _IOR(BCM_IOCTL, 0x867, int)
-#define IOCTL_BCM_SET_ACTIVE_SECTION _IOW(BCM_IOCTL, 0x868, int)
-#define IOCTL_BCM_IDENTIFY_ACTIVE_SECTION _IO(BCM_IOCTL, 0x869)
-#define IOCTL_BCM_COPY_SECTION _IOW(BCM_IOCTL, 0x870, int)
-#define IOCTL_BCM_GET_FLASH_CS_INFO _IOR(BCM_IOCTL, 0x871, int)
-#define IOCTL_BCM_SELECT_DSD _IOW(BCM_IOCTL, 0x872, int)
-#define IOCTL_BCM_NVM_RAW_READ _IOR(BCM_IOCTL, 0x875, int)
-#define IOCTL_BCM_CNTRLMSG_MASK _IOW(BCM_IOCTL, 0x874, int)
-#define IOCTL_BCM_GET_DEVICE_DRIVER_INFO _IOR(BCM_IOCTL, 0x877, int)
-#define IOCTL_BCM_TIME_SINCE_NET_ENTRY _IOR(BCM_IOCTL, 0x876, int)
-#define BCM_LED_THREAD_STATE_CHANGE_REQ _IOW(BCM_IOCTL, 0x878, int)
-#define IOCTL_BCM_GPIO_MULTI_REQUEST _IOW(BCM_IOCTL, 0x82D, struct bcm_ioctl_buffer)
-#define IOCTL_BCM_GPIO_MODE_REQUEST _IOW(BCM_IOCTL, 0x82E, struct bcm_ioctl_buffer)
-
-enum bcm_interface_type {
- BCM_MII,
- BCM_CARDBUS,
- BCM_USB,
- BCM_SDIO,
- BCM_PCMCIA
-};
-
-struct bcm_driver_info {
- enum bcm_nvm_type u32NVMType;
- unsigned int MaxRDMBufferSize;
- enum bcm_interface_type u32InterfaceType;
- unsigned int u32DSDStartOffset;
- unsigned int u32RxAlignmentCorrection;
- unsigned int u32Reserved[10];
-};
-
-struct bcm_nvm_readwrite {
- void __user *pBuffer;
- uint32_t uiOffset;
- uint32_t uiNumBytes;
- bool bVerify;
-};
-
-struct bcm_bulk_wrm_buffer {
- unsigned long Register;
- unsigned long SwapEndian;
- unsigned long Values[1];
-};
-
-enum bcm_flash2x_section_val {
- NO_SECTION_VAL = 0, /* no section chosen when absolute offset is given for RD/WR */
- ISO_IMAGE1,
- ISO_IMAGE2,
- DSD0,
- DSD1,
- DSD2,
- VSA0,
- VSA1,
- VSA2,
- SCSI,
- CONTROL_SECTION,
- ISO_IMAGE1_PART2,
- ISO_IMAGE1_PART3,
- ISO_IMAGE2_PART2,
- ISO_IMAGE2_PART3,
- TOTAL_SECTIONS
-};
-
-/*
- * Structure used for READ/WRITE Flash Map2.x
- */
-struct bcm_flash2x_readwrite {
- enum bcm_flash2x_section_val Section; /* section to be read/written */
- u32 offset; /* offset within section. */
- u32 numOfBytes; /* number of bytes from the offset */
- u32 bVerify;
- void __user *pDataBuff; /* buffer for reading/writing */
-};
-
-/*
- * This structure is used for coping one section to other.
- * there are two ways to copy one section to other.
- * it NOB =0, complete section will be copied on to other.
- * if NOB !=0, only NOB will be copied from the given offset.
- */
-
-struct bcm_flash2x_copy_section {
- enum bcm_flash2x_section_val SrcSection;
- enum bcm_flash2x_section_val DstSection;
- u32 offset;
- u32 numOfBytes;
-};
-
-/*
- * This section provide the complete bitmap of the Flash.
- * using this map lib/APP will issue read/write command.
- * Fields are defined as :
- * Bit [0] = section is present //1:present, 0: Not present
- * Bit [1] = section is valid //1: valid, 0: not valid
- * Bit [2] = Section is R/W //0: RW, 1: RO
- * Bit [3] = Section is Active or not 1 means Active, 0->inactive
- * Bit [7...3] = Reserved
- */
-
-struct bcm_flash2x_bitmap {
- unsigned char ISO_IMAGE1;
- unsigned char ISO_IMAGE2;
- unsigned char DSD0;
- unsigned char DSD1;
- unsigned char DSD2;
- unsigned char VSA0;
- unsigned char VSA1;
- unsigned char VSA2;
- unsigned char SCSI;
- unsigned char CONTROL_SECTION;
- /* Reserved for future use */
- unsigned char Reserved0;
- unsigned char Reserved1;
- unsigned char Reserved2;
-};
-
-struct bcm_time_elapsed {
- u64 ul64TimeElapsedSinceNetEntry;
- u32 uiReserved[4];
-};
-
-enum {
- WIMAX_IDX = 0, /* To access WiMAX chip GPIO's for GPIO_MULTI_INFO or GPIO_MULTI_MODE */
- HOST_IDX, /* To access Host chip GPIO's for GPIO_MULTI_INFO or GPIO_MULTI_MODE */
- MAX_IDX
-};
-
-struct bcm_gpio_multi_info {
- unsigned int uiGPIOCommand; /* 1 for set and 0 for get */
- unsigned int uiGPIOMask; /* set the corresponding bit to 1 to access GPIO */
- unsigned int uiGPIOValue; /* 0 or 1; value to be set when command is 1. */
-} __packed;
-
-struct bcm_gpio_multi_mode {
- unsigned int uiGPIOMode; /* 1 for OUT mode, 0 for IN mode */
- unsigned int uiGPIOMask; /* GPIO mask to set mode */
-} __packed;
-
-#endif
diff --git a/drivers/staging/bcm/Kconfig b/drivers/staging/bcm/Kconfig
deleted file mode 100644
index 8acf4b24a7c9..000000000000
--- a/drivers/staging/bcm/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config BCM_WIMAX
- tristate "Beceem BCS200/BCS220-3 and BCSM250 wimax support"
- depends on USB && NET
- help
- This is an experimental driver for the Beceem WIMAX chipset used
- by Sprint 4G.
diff --git a/drivers/staging/bcm/LeakyBucket.c b/drivers/staging/bcm/LeakyBucket.c
deleted file mode 100644
index d6b55f993b57..000000000000
--- a/drivers/staging/bcm/LeakyBucket.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/**********************************************************************
-* LEAKYBUCKET.C
-* This file contains the routines related to Leaky Bucket Algorithm.
-***********************************************************************/
-#include "headers.h"
-
-/**
- * UpdateTokenCount() - Calculates the token count for each channel
- * and updates the same in Adapter structure
- * @Adapter: Pointer to the Adapter structure.
- *
- * Return: None
- */
-static VOID UpdateTokenCount(register struct bcm_mini_adapter *Adapter)
-{
- ULONG liCurrentTime;
- INT i = 0;
- struct timeval tv;
- struct bcm_packet_info *curr_pi;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "=====>\n");
- if (NULL == Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS,
- DBG_LVL_ALL, "Adapter found NULL!\n");
- return;
- }
-
- do_gettimeofday(&tv);
- for (i = 0; i < NO_OF_QUEUES; i++) {
- curr_pi = &Adapter->PackInfo[i];
-
- if (TRUE == curr_pi->bValid && (1 == curr_pi->ucDirection)) {
- liCurrentTime = ((tv.tv_sec -
- curr_pi->stLastUpdateTokenAt.tv_sec)*1000 +
- (tv.tv_usec - curr_pi->stLastUpdateTokenAt.tv_usec) /
- 1000);
- if (0 != liCurrentTime) {
- curr_pi->uiCurrentTokenCount += (ULONG)
- ((curr_pi->uiMaxAllowedRate) *
- ((ULONG)((liCurrentTime)))/1000);
- memcpy(&curr_pi->stLastUpdateTokenAt, &tv,
- sizeof(struct timeval));
- curr_pi->liLastUpdateTokenAt = liCurrentTime;
- if (curr_pi->uiCurrentTokenCount >=
- curr_pi->uiMaxBucketSize) {
- curr_pi->uiCurrentTokenCount =
- curr_pi->uiMaxBucketSize;
- }
- }
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "<=====\n");
-}
-
-
-/**
- * IsPacketAllowedForFlow() - This function checks whether the given
- * packet from the specified queue can be allowed for transmission by
- * checking the token count.
- * @Adapter: Pointer to the Adpater structure.
- * @iQIndex: The queue Identifier.
- * @ulPacketLength: Number of bytes to be transmitted.
- *
- * Returns: The number of bytes allowed for transmission.
- */
-static ULONG GetSFTokenCount(struct bcm_mini_adapter *Adapter, struct bcm_packet_info *psSF)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "IsPacketAllowedForFlow ===>");
-
- /* Validate the parameters */
- if (NULL == Adapter || (psSF < Adapter->PackInfo &&
- (uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority])) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n",
- Adapter, (psSF-Adapter->PackInfo));
- return 0;
- }
-
- if (false != psSF->bValid && psSF->ucDirection) {
- if (0 != psSF->uiCurrentTokenCount) {
- return psSF->uiCurrentTokenCount;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS,
- DBG_LVL_ALL,
- "Not enough tokens in queue %zd Available %u\n",
- psSF-Adapter->PackInfo, psSF->uiCurrentTokenCount);
- psSF->uiPendedLast = 1;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "IPAFF: Queue %zd not valid\n",
- psSF-Adapter->PackInfo);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
- "IsPacketAllowedForFlow <===");
- return 0;
-}
-
-/**
-@ingroup tx_functions
-This function despatches packet from the specified queue.
-@return Zero(success) or Negative value(failure)
-*/
-static INT SendPacketFromQueue(struct bcm_mini_adapter *Adapter,/**<Logical Adapter*/
- struct bcm_packet_info *psSF, /**<Queue identifier*/
- struct sk_buff *Packet) /**<Pointer to the packet to be sent*/
-{
- INT Status = STATUS_FAILURE;
- UINT uiIndex = 0, PktLen = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL,
- "=====>");
- if (!Adapter || !Packet || !psSF) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL,
- "Got NULL Adapter or Packet");
- return -EINVAL;
- }
-
- if (psSF->liDrainCalculated == 0)
- psSF->liDrainCalculated = jiffies;
- /* send the packet to the fifo.. */
- PktLen = Packet->len;
- Status = SetupNextSend(Adapter, Packet, psSF->usVCID_Value);
- if (Status == 0) {
- for (uiIndex = 0; uiIndex < MIBS_MAX_HIST_ENTRIES; uiIndex++) {
- if ((PktLen <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) &&
- (PktLen > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
- Adapter->aTxPktSizeHist[uiIndex]++;
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL,
- "<=====");
- return Status;
-}
-
-static void get_data_packet(struct bcm_mini_adapter *ad,
- struct bcm_packet_info *ps_sf)
-{
- int packet_len;
- struct sk_buff *qpacket;
-
- if (!ps_sf->ucDirection)
- return;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "UpdateTokenCount ");
- if (ad->IdleMode || ad->bPreparingForLowPowerMode)
- return; /* in idle mode */
-
- /* Check for Free Descriptors */
- if (atomic_read(&ad->CurrNumFreeTxDesc) <=
- MINIMUM_PENDING_DESCRIPTORS) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- " No Free Tx Descriptor(%d) is available for Data pkt..",
- atomic_read(&ad->CurrNumFreeTxDesc));
- return;
- }
-
- spin_lock_bh(&ps_sf->SFQueueLock);
- qpacket = ps_sf->FirstTxQueue;
-
- if (qpacket) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "Dequeuing Data Packet");
-
- if (ps_sf->bEthCSSupport)
- packet_len = qpacket->len;
- else
- packet_len = qpacket->len - ETH_HLEN;
-
- packet_len <<= 3;
- if (packet_len <= GetSFTokenCount(ad, ps_sf)) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL, "Allowed bytes %d",
- (packet_len >> 3));
-
- DEQUEUEPACKET(ps_sf->FirstTxQueue, ps_sf->LastTxQueue);
- ps_sf->uiCurrentBytesOnHost -= (qpacket->len);
- ps_sf->uiCurrentPacketsOnHost--;
- atomic_dec(&ad->TotalPacketCount);
- spin_unlock_bh(&ps_sf->SFQueueLock);
-
- SendPacketFromQueue(ad, ps_sf, qpacket);
- ps_sf->uiPendedLast = false;
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL, "For Queue: %zd\n",
- ps_sf - ad->PackInfo);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- "\nAvailable Tokens = %d required = %d\n",
- ps_sf->uiCurrentTokenCount,
- packet_len);
- /*
- this part indicates that because of
- non-availability of the tokens
- pkt has not been send out hence setting the
- pending flag indicating the host to send it out
- first next iteration.
- */
- ps_sf->uiPendedLast = TRUE;
- spin_unlock_bh(&ps_sf->SFQueueLock);
- }
- } else {
- spin_unlock_bh(&ps_sf->SFQueueLock);
- }
-}
-
-static void send_control_packet(struct bcm_mini_adapter *ad,
- struct bcm_packet_info *ps_sf)
-{
- char *ctrl_packet = NULL;
- INT status = 0;
-
- if ((atomic_read(&ad->CurrNumFreeTxDesc) > 0) &&
- (atomic_read(&ad->index_rd_txcntrlpkt) !=
- atomic_read(&ad->index_wr_txcntrlpkt))) {
- ctrl_packet = ad->txctlpacket
- [(atomic_read(&ad->index_rd_txcntrlpkt)%MAX_CNTRL_PKTS)];
- if (ctrl_packet) {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- "Sending Control packet");
- status = SendControlPacket(ad, ctrl_packet);
- if (STATUS_SUCCESS == status) {
- spin_lock_bh(&ps_sf->SFQueueLock);
- ps_sf->NumOfPacketsSent++;
- ps_sf->uiSentBytes += ((struct bcm_leader *)ctrl_packet)->PLength;
- ps_sf->uiSentPackets++;
- atomic_dec(&ad->TotalPacketCount);
- ps_sf->uiCurrentBytesOnHost -= ((struct bcm_leader *)ctrl_packet)->PLength;
- ps_sf->uiCurrentPacketsOnHost--;
- atomic_inc(&ad->index_rd_txcntrlpkt);
- spin_unlock_bh(&ps_sf->SFQueueLock);
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- "SendControlPacket Failed\n");
- }
- } else {
- BCM_DEBUG_PRINT(ad, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- " Control Pkt is not available, Indexing is wrong....");
- }
- }
-}
-
-/**
- * CheckAndSendPacketFromIndex() - This function dequeues the
- * data/control packet from the specified queue for transmission.
- * @Adapter: Pointer to the driver control structure.
- * @iQIndex: The queue Identifier.
- *
- * Returns: None.
- */
-static VOID CheckAndSendPacketFromIndex(struct bcm_mini_adapter *Adapter,
- struct bcm_packet_info *psSF)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "%zd ====>", (psSF-Adapter->PackInfo));
- if ((psSF != &Adapter->PackInfo[HiPriority]) &&
- Adapter->LinkUpStatus &&
- atomic_read(&psSF->uiPerSFTxResourceCount)) { /* Get data packet */
-
- get_data_packet(Adapter, psSF);
- } else {
- send_control_packet(Adapter, psSF);
- }
-}
-
-
-/**
- * transmit_packets() - This function transmits the packets from
- * different queues, if free descriptors are available on target.
- * @Adapter: Pointer to the Adapter structure.
- *
- * Returns: None.
- */
-VOID transmit_packets(struct bcm_mini_adapter *Adapter)
-{
- UINT uiPrevTotalCount = 0;
- int iIndex = 0;
-
- bool exit_flag = TRUE;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "=====>");
-
- if (NULL == Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "Got NULL Adapter");
- return;
- }
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "Device removed");
- return;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "\nUpdateTokenCount ====>\n");
-
- UpdateTokenCount(Adapter);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "\nPruneQueueAllSF ====>\n");
-
- PruneQueueAllSF(Adapter);
-
- uiPrevTotalCount = atomic_read(&Adapter->TotalPacketCount);
-
- for (iIndex = HiPriority; iIndex >= 0; iIndex--) {
- if (!uiPrevTotalCount || (TRUE == Adapter->device_removed))
- break;
-
- if (Adapter->PackInfo[iIndex].bValid &&
- Adapter->PackInfo[iIndex].uiPendedLast &&
- Adapter->PackInfo[iIndex].uiCurrentBytesOnHost) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL,
- "Calling CheckAndSendPacketFromIndex..");
- CheckAndSendPacketFromIndex(Adapter,
- &Adapter->PackInfo[iIndex]);
- uiPrevTotalCount--;
- }
- }
-
- while (uiPrevTotalCount > 0 && !Adapter->device_removed) {
- exit_flag = TRUE;
- /* second iteration to parse non-pending queues */
- for (iIndex = HiPriority; iIndex >= 0; iIndex--) {
- if (!uiPrevTotalCount ||
- (TRUE == Adapter->device_removed))
- break;
-
- if (Adapter->PackInfo[iIndex].bValid &&
- Adapter->PackInfo[iIndex].uiCurrentBytesOnHost &&
- !Adapter->PackInfo[iIndex].uiPendedLast) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX,
- TX_PACKETS, DBG_LVL_ALL,
- "Calling CheckAndSendPacketFromIndex..");
- CheckAndSendPacketFromIndex(Adapter, &Adapter->PackInfo[iIndex]);
- uiPrevTotalCount--;
- exit_flag = false;
- }
- }
-
- if (Adapter->IdleMode || Adapter->bPreparingForLowPowerMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL, "In Idle Mode\n");
- break;
- }
- if (exit_flag == TRUE)
- break;
- } /* end of inner while loop */
-
- update_per_cid_rx(Adapter);
- Adapter->txtransmit_running = 0;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "<======");
-}
diff --git a/drivers/staging/bcm/Macros.h b/drivers/staging/bcm/Macros.h
deleted file mode 100644
index dc01e3016d4f..000000000000
--- a/drivers/staging/bcm/Macros.h
+++ /dev/null
@@ -1,352 +0,0 @@
-/*************************************
-* Macros.h
-**************************************/
-#ifndef __MACROS_H__
-#define __MACROS_H__
-
-#define TX_TIMER_PERIOD 10 /*10 msec*/
-#define MAX_CLASSIFIERS 100
-#define MAX_TARGET_DSX_BUFFERS 24
-
-#define MAX_CNTRL_PKTS 100
-#define MAX_DATA_PKTS 200
-#define MAX_ETH_SIZE 1536
-#define MAX_CNTL_PKT_SIZE 2048
-
-#define MTU_SIZE 1400
-#define TX_QLEN 5
-
-#define MAC_ADDR_REGISTER 0xbf60d000
-
-
-/* Quality of Service */
-#define NO_OF_QUEUES 17
-#define HiPriority (NO_OF_QUEUES-1)
-#define LowPriority 0
-#define BE 2
-#define rtPS 4
-#define ERTPS 5
-#define UGS 6
-
-#define BE_BUCKET_SIZE (1024*1024*100) /* 32kb */
-#define rtPS_BUCKET_SIZE (1024*1024*100) /* 8kb */
-#define MAX_ALLOWED_RATE (1024*1024*100)
-#define TX_PACKET_THRESHOLD 10
-#define XSECONDS (1*HZ)
-#define DSC_ACTIVATE_REQUEST 248
-#define QUEUE_DEPTH_OFFSET 0x1fc01000
-#define MAX_DEVICE_DESC_SIZE 2040
-#define MAX_CTRL_QUEUE_LEN 100
-#define MAX_APP_QUEUE_LEN 200
-#define MAX_LATENCY_ALLOWED 0xFFFFFFFF
-#define DEFAULT_UG_INTERVAL 250
-#define DEFAULT_UGI_FACTOR 4
-
-#define DEFAULT_PERSFCOUNT 60
-#define MAX_CONNECTIONS 10
-#define MAX_CLASS_NAME_LENGTH 32
-
-#define ETH_LENGTH_OF_ADDRESS 6
-#define MAX_MULTICAST_ADDRESSES 32
-#define IP_LENGTH_OF_ADDRESS 4
-
-#define IP_PACKET_ONLY_MODE 0
-#define ETH_PACKET_TUNNELING_MODE 1
-
-/* Link Request */
-#define SET_MAC_ADDRESS_REQUEST 0
-#define SYNC_UP_REQUEST 1
-#define SYNCED_UP 2
-#define LINK_UP_REQUEST 3
-#define LINK_CONNECTED 4
-#define SYNC_UP_NOTIFICATION 2
-#define LINK_UP_NOTIFICATION 4
-
-
-#define LINK_NET_ENTRY 0x0002
-#define HMC_STATUS 0x0004
-#define LINK_UP_CONTROL_REQ 0x83
-
-#define STATS_POINTER_REQ_STATUS 0x86
-#define NETWORK_ENTRY_REQ_PAYLOAD 198
-#define LINK_DOWN_REQ_PAYLOAD 226
-#define SYNC_UP_REQ_PAYLOAD 228
-#define STATISTICS_POINTER_REQ 237
-#define LINK_UP_REQ_PAYLOAD 245
-#define LINK_UP_ACK 246
-
-#define STATS_MSG_SIZE 4
-#define INDEX_TO_DATA 4
-
-#define GO_TO_IDLE_MODE_PAYLOAD 210
-#define COME_UP_FROM_IDLE_MODE_PAYLOAD 211
-#define IDLE_MODE_SF_UPDATE_MSG 187
-
-#define SKB_RESERVE_ETHERNET_HEADER 16
-#define SKB_RESERVE_PHS_BYTES 32
-
-#define IP_PACKET_ONLY_MODE 0
-#define ETH_PACKET_TUNNELING_MODE 1
-
-#define ETH_CS_802_3 1
-#define ETH_CS_802_1Q_VLAN 3
-#define IPV4_CS 1
-#define IPV6_CS 2
-#define ETH_CS_MASK 0x3f
-
-/** \brief Validity bit maps for TLVs in packet classification rule */
-
-#define PKT_CLASSIFICATION_USER_PRIORITY_VALID 0
-#define PKT_CLASSIFICATION_VLANID_VALID 1
-
-#ifndef MIN
-#define MIN(_a, _b) ((_a) < (_b) ? (_a) : (_b))
-#endif
-
-
-/*Leader related terms */
-#define LEADER_STATUS 0x00
-#define LEADER_STATUS_TCP_ACK 0x1
-#define LEADER_SIZE sizeof(struct bcm_leader)
-#define MAC_ADDR_REQ_SIZE sizeof(struct bcm_packettosend)
-#define SS_INFO_REQ_SIZE sizeof(struct bcm_packettosend)
-#define CM_REQUEST_SIZE (LEADER_SIZE + sizeof(stLocalSFChangeRequest))
-#define IDLE_REQ_SIZE sizeof(struct bcm_packettosend)
-
-
-#define MAX_TRANSFER_CTRL_BYTE_USB (2*1024)
-
-#define GET_MAILBOX1_REG_REQUEST 0x87
-#define GET_MAILBOX1_REG_RESPONSE 0x67
-#define VCID_CONTROL_PACKET 0x00
-
-#define TRANSMIT_NETWORK_DATA 0x00
-#define RECEIVED_NETWORK_DATA 0x20
-
-#define CM_RESPONSES 0xA0
-#define STATUS_RSP 0xA1
-#define LINK_CONTROL_RESP 0xA2
-#define IDLE_MODE_STATUS 0xA3
-#define STATS_POINTER_RESP 0xA6
-#define MGMT_MSG_INFO_SW_STATUS 0xA7
-#define AUTH_SS_HOST_MSG 0xA8
-
-#define CM_DSA_ACK_PAYLOAD 247
-#define CM_DSC_ACK_PAYLOAD 248
-#define CM_DSD_ACK_PAYLOAD 249
-#define CM_DSDEACTVATE 250
-#define TOTAL_MASKED_ADDRESS_IN_BYTES 32
-
-#define MAC_REQ 0
-#define LINK_RESP 1
-#define RSSI_INDICATION 2
-
-#define SS_INFO 4
-#define STATISTICS_INFO 5
-#define CM_INDICATION 6
-#define PARAM_RESP 7
-#define BUFFER_1K 1024
-#define BUFFER_2K (BUFFER_1K*2)
-#define BUFFER_4K (BUFFER_2K*2)
-#define BUFFER_8K (BUFFER_4K*2)
-#define BUFFER_16K (BUFFER_8K*2)
-#define DOWNLINK_DIR 0
-#define UPLINK_DIR 1
-
-#define BCM_SIGNATURE "BECEEM"
-
-
-#define GPIO_OUTPUT_REGISTER 0x0F00003C
-#define BCM_GPIO_OUTPUT_SET_REG 0x0F000040
-#define BCM_GPIO_OUTPUT_CLR_REG 0x0F000044
-#define GPIO_MODE_REGISTER 0x0F000034
-#define GPIO_PIN_STATE_REGISTER 0x0F000038
-
-struct bcm_link_state {
- unsigned char ucLinkStatus;
- unsigned char bIdleMode;
- unsigned char bShutdownMode;
-};
-
-enum enLinkStatus {
- WAIT_FOR_SYNC = 1,
- PHY_SYNC_ACHIVED = 2,
- LINKUP_IN_PROGRESS = 3,
- LINKUP_DONE = 4,
- DREG_RECEIVED = 5,
- LINK_STATUS_RESET_RECEIVED = 6,
- PERIODIC_WAKE_UP_NOTIFICATION_FRM_FW = 7,
- LINK_SHUTDOWN_REQ_FROM_FIRMWARE = 8,
- COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW = 9
-};
-
-enum bcm_phs_dsc_action {
- eAddPHSRule = 0,
- eSetPHSRule,
- eDeletePHSRule,
- eDeleteAllPHSRules
-};
-
-#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ 0x89 /* Host to Mac */
-#define CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP 0xA9 /* Mac to Host */
-#define MASK_DISABLE_HEADER_SUPPRESSION 0x10 /* 0b000010000 */
-#define MINIMUM_PENDING_DESCRIPTORS 5
-
-#define SHUTDOWN_HOSTINITIATED_REQUESTPAYLOAD 0xCC
-#define SHUTDOWN_ACK_FROM_DRIVER 0x1
-#define SHUTDOWN_NACK_FROM_DRIVER 0x2
-
-#define LINK_SYNC_UP_SUBTYPE 0x0001
-#define LINK_SYNC_DOWN_SUBTYPE 0x0001
-
-
-
-#define CONT_MODE 1
-#define SINGLE_DESCRIPTOR 1
-
-
-#define DESCRIPTOR_LENGTH 0x30
-#define FIRMWARE_DESCS_ADDRESS 0x1F100000
-
-
-#define CLOCK_RESET_CNTRL_REG_1 0x0F00000C
-#define CLOCK_RESET_CNTRL_REG_2 0x0F000840
-
-
-
-#define TX_DESCRIPTOR_HEAD_REGISTER 0x0F010034
-#define RX_DESCRIPTOR_HEAD_REGISTER 0x0F010094
-
-#define STATISTICS_BEGIN_ADDR 0xbf60f02c
-
-#define MAX_PENDING_CTRL_PACKET (MAX_CTRL_QUEUE_LEN-10)
-
-#define WIMAX_MAX_MTU (MTU_SIZE + ETH_HLEN)
-#define AUTO_LINKUP_ENABLE 0x2
-#define AUTO_SYNC_DISABLE 0x1
-#define AUTO_FIRM_DOWNLOAD 0x1
-#define SETTLE_DOWN_TIME 50
-
-#define HOST_BUS_SUSPEND_BIT 16
-
-#define IDLE_MESSAGE 0x81
-
-#define MIPS_CLOCK_133MHz 1
-
-#define TARGET_CAN_GO_TO_IDLE_MODE 2
-#define TARGET_CAN_NOT_GO_TO_IDLE_MODE 3
-#define IDLE_MODE_PAYLOAD_LENGTH 8
-
-#define IP_HEADER(Buffer) ((IPHeaderFormat *)(Buffer))
-#define IPV4 4
-#define IP_VERSION(byte) (((byte&0xF0)>>4))
-
-#define SET_MAC_ADDRESS 193
-#define SET_MAC_ADDRESS_RESPONSE 236
-
-#define IDLE_MODE_WAKEUP_PATTERN 0xd0ea1d1e
-#define IDLE_MODE_WAKEUP_NOTIFIER_ADDRESS 0x1FC02FA8
-#define IDLE_MODE_MAX_RETRY_COUNT 1000
-
-#define CONFIG_BEGIN_ADDR 0xBF60B000
-
-#define FIRMWARE_BEGIN_ADDR 0xBFC00000
-
-#define INVALID_QUEUE_INDEX NO_OF_QUEUES
-
-#define INVALID_PID ((pid_t)-1)
-#define DDR_80_MHZ 0
-#define DDR_100_MHZ 1
-#define DDR_120_MHZ 2 /* Additional Frequency for T3LP */
-#define DDR_133_MHZ 3
-#define DDR_140_MHZ 4 /* Not Used (Reserved for future) */
-#define DDR_160_MHZ 5 /* Additional Frequency for T3LP */
-#define DDR_180_MHZ 6 /* Not Used (Reserved for future) */
-#define DDR_200_MHZ 7 /* Not Used (Reserved for future) */
-
-#define MIPS_200_MHZ 0
-#define MIPS_160_MHZ 1
-
-#define PLL_800_MHZ 0
-#define PLL_266_MHZ 1
-
-#define DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING 0
-#define DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING 1
-#define DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN 2
-#define DEVICE_POWERSAVE_MODE_AS_RESERVED 3
-#define DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE 4
-
-
-#define EEPROM_REJECT_REG_1 0x0f003018
-#define EEPROM_REJECT_REG_2 0x0f00301c
-#define EEPROM_REJECT_REG_3 0x0f003008
-#define EEPROM_REJECT_REG_4 0x0f003020
-#define EEPROM_REJECT_MASK 0x0fffffff
-#define VSG_MODE 0x3
-
-/* Idle Mode Related Registers */
-#define DEBUG_INTERRUPT_GENERATOR_REGISTOR 0x0F00007C
-#define SW_ABORT_IDLEMODE_LOC 0x0FF01FFC
-
-#define SW_ABORT_IDLEMODE_PATTERN 0xd0ea1d1e
-#define DEVICE_INT_OUT_EP_REG0 0x0F011870
-#define DEVICE_INT_OUT_EP_REG1 0x0F011874
-
-#define BIN_FILE "/lib/firmware/macxvi200.bin"
-#define CFG_FILE "/lib/firmware/macxvi.cfg"
-#define SF_MAX_ALLOWED_PACKETS_TO_BACKUP 128
-#define MIN_VAL(x, y) ((x) < (y) ? (x) : (y))
-#define MAC_ADDRESS_SIZE 6
-#define EEPROM_COMMAND_Q_REG 0x0F003018
-#define EEPROM_READ_DATA_Q_REG 0x0F003020
-#define CHIP_ID_REG 0x0F000000
-#define GPIO_MODE_REG 0x0F000034
-#define GPIO_OUTPUT_REG 0x0F00003C
-#define WIMAX_MAX_ALLOWED_RATE (1024*1024*50)
-
-#define T3 0xbece0300
-#define TARGET_SFID_TXDESC_MAP_LOC 0xBFFFF400
-
-#define RWM_READ 0
-#define RWM_WRITE 1
-
-#define T3LPB 0xbece3300
-#define BCS220_2 0xbece3311
-#define BCS220_2BC 0xBECE3310
-#define BCS250_BC 0xbece3301
-#define BCS220_3 0xbece3321
-
-
-#define HPM_CONFIG_LDO145 0x0F000D54
-#define HPM_CONFIG_MSW 0x0F000D58
-
-#define T3B 0xbece0310
-enum bcm_nvm_type {
- NVM_AUTODETECT = 0,
- NVM_EEPROM,
- NVM_FLASH,
- NVM_UNKNOWN
-};
-
-enum bcm_pmu_modes {
- HYBRID_MODE_7C = 0,
- INTERNAL_MODE_6 = 1,
- HYBRID_MODE_6 = 2
-};
-
-#define MAX_RDM_WRM_RETIRES 1
-
-enum eAbortPattern {
- ABORT_SHUTDOWN_MODE = 1,
- ABORT_IDLE_REG = 1,
- ABORT_IDLE_MODE = 2,
- ABORT_IDLE_SYNCDOWN = 3
-};
-
-
-/* Offsets used by driver in skb cb variable */
-#define SKB_CB_CLASSIFICATION_OFFSET 0
-#define SKB_CB_LATENCY_OFFSET 1
-#define SKB_CB_TCPACK_OFFSET 2
-
-#endif /* __MACROS_H__ */
diff --git a/drivers/staging/bcm/Makefile b/drivers/staging/bcm/Makefile
deleted file mode 100644
index 652b7f87737c..000000000000
--- a/drivers/staging/bcm/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for Beceem USB Wimax card
-#
-
-obj-$(CONFIG_BCM_WIMAX) += bcm_wimax.o
-
-bcm_wimax-y := InterfaceDld.o InterfaceIdleMode.o InterfaceInit.o InterfaceRx.o \
- InterfaceIsr.o InterfaceMisc.o InterfaceTx.o \
- CmHost.o IPv6Protocol.o Qos.o Transmit.o\
- Bcmnet.o DDRInit.o HandleControlPacket.o\
- LeakyBucket.o Misc.o sort.o Bcmchar.o hostmibs.o PHSModule.o\
- led_control.o nvm.o vendorspecificextn.o
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
deleted file mode 100644
index 883f7394dee6..000000000000
--- a/drivers/staging/bcm/Misc.c
+++ /dev/null
@@ -1,1587 +0,0 @@
-#include "headers.h"
-
-static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, unsigned int loc);
-static void doPowerAutoCorrection(struct bcm_mini_adapter *psAdapter);
-static void HandleShutDownModeRequest(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer);
-static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter);
-static void beceem_protocol_reset(struct bcm_mini_adapter *Adapter);
-
-static void default_wimax_protocol_initialize(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiLoopIndex;
-
- for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES-1; uiLoopIndex++) {
- Adapter->PackInfo[uiLoopIndex].uiThreshold = TX_PACKET_THRESHOLD;
- Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate = MAX_ALLOWED_RATE;
- Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize = 20*1024*1024;
- }
-
- Adapter->BEBucketSize = BE_BUCKET_SIZE;
- Adapter->rtPSBucketSize = rtPS_BUCKET_SIZE;
- Adapter->LinkStatus = SYNC_UP_REQUEST;
- Adapter->TransferMode = IP_PACKET_ONLY_MODE;
- Adapter->usBestEffortQueueIndex = -1;
-}
-
-int InitAdapter(struct bcm_mini_adapter *psAdapter)
-{
- int i = 0;
- int Status = STATUS_SUCCESS;
-
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Initialising Adapter = %p", psAdapter);
-
- if (psAdapter == NULL) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter is NULL");
- return -EINVAL;
- }
-
- sema_init(&psAdapter->NVMRdmWrmLock, 1);
- sema_init(&psAdapter->rdmwrmsync, 1);
- spin_lock_init(&psAdapter->control_queue_lock);
- spin_lock_init(&psAdapter->txtransmitlock);
- sema_init(&psAdapter->RxAppControlQueuelock, 1);
- sema_init(&psAdapter->fw_download_sema, 1);
- sema_init(&psAdapter->LowPowerModeSync, 1);
-
- for (i = 0; i < NO_OF_QUEUES; i++)
- spin_lock_init(&psAdapter->PackInfo[i].SFQueueLock);
- i = 0;
-
- init_waitqueue_head(&psAdapter->process_rx_cntrlpkt);
- init_waitqueue_head(&psAdapter->tx_packet_wait_queue);
- init_waitqueue_head(&psAdapter->process_read_wait_queue);
- init_waitqueue_head(&psAdapter->ioctl_fw_dnld_wait_queue);
- init_waitqueue_head(&psAdapter->lowpower_mode_wait_queue);
- psAdapter->waiting_to_fw_download_done = TRUE;
- psAdapter->fw_download_done = false;
-
- default_wimax_protocol_initialize(psAdapter);
- for (i = 0; i < MAX_CNTRL_PKTS; i++) {
- psAdapter->txctlpacket[i] = kmalloc(MAX_CNTL_PKT_SIZE, GFP_KERNEL);
- if (!psAdapter->txctlpacket[i]) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No More Cntl pkts got, max got is %d", i);
- return -ENOMEM;
- }
- }
-
- if (AllocAdapterDsxBuffer(psAdapter)) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to allocate DSX buffers");
- return -EINVAL;
- }
-
- /* Initialize PHS interface */
- if (phs_init(&psAdapter->stBCMPhsContext, psAdapter) != 0) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%s:%d:Error PHS Init Failed=====>\n", __FILE__, __func__, __LINE__);
- return -ENOMEM;
- }
-
- Status = BcmAllocFlashCSStructure(psAdapter);
- if (Status) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Memory Allocation for Flash structure failed");
- return Status;
- }
-
- Status = vendorextnInit(psAdapter);
-
- if (STATUS_SUCCESS != Status) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Vendor Init Failed");
- return Status;
- }
-
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Adapter initialised");
-
- return STATUS_SUCCESS;
-}
-
-void AdapterFree(struct bcm_mini_adapter *Adapter)
-{
- int count;
-
- beceem_protocol_reset(Adapter);
- vendorextnExit(Adapter);
-
- if (Adapter->control_packet_handler && !IS_ERR(Adapter->control_packet_handler))
- kthread_stop(Adapter->control_packet_handler);
-
- if (Adapter->transmit_packet_thread && !IS_ERR(Adapter->transmit_packet_thread))
- kthread_stop(Adapter->transmit_packet_thread);
-
- wake_up(&Adapter->process_read_wait_queue);
-
- if (Adapter->LEDInfo.led_thread_running & (BCM_LED_THREAD_RUNNING_ACTIVELY | BCM_LED_THREAD_RUNNING_INACTIVELY))
- kthread_stop(Adapter->LEDInfo.led_cntrl_threadid);
-
- unregister_networkdev(Adapter);
-
- /* FIXME: use proper wait_event and refcounting */
- while (atomic_read(&Adapter->ApplicationRunning)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Waiting for Application to close.. %d\n", atomic_read(&Adapter->ApplicationRunning));
- msleep(100);
- }
- unregister_control_device_interface(Adapter);
- kfree(Adapter->pstargetparams);
-
- for (count = 0; count < MAX_CNTRL_PKTS; count++)
- kfree(Adapter->txctlpacket[count]);
-
- FreeAdapterDsxBuffer(Adapter);
- kfree(Adapter->pvInterfaceAdapter);
-
- /* Free the PHS Interface */
- PhsCleanup(&Adapter->stBCMPhsContext);
-
- BcmDeAllocFlashCSStructure(Adapter);
-
- free_netdev(Adapter->dev);
-}
-
-static int create_worker_threads(struct bcm_mini_adapter *psAdapter)
-{
- /* Rx Control Packets Processing */
- psAdapter->control_packet_handler = kthread_run((int (*)(void *))
- control_packet_handler, psAdapter, "%s-rx", DRV_NAME);
- if (IS_ERR(psAdapter->control_packet_handler)) {
- pr_notice(DRV_NAME ": could not create control thread\n");
- return PTR_ERR(psAdapter->control_packet_handler);
- }
-
- /* Tx Thread */
- psAdapter->transmit_packet_thread = kthread_run((int (*)(void *))
- tx_pkt_handler, psAdapter, "%s-tx", DRV_NAME);
- if (IS_ERR(psAdapter->transmit_packet_thread)) {
- pr_notice(DRV_NAME ": could not creat transmit thread\n");
- kthread_stop(psAdapter->control_packet_handler);
- return PTR_ERR(psAdapter->transmit_packet_thread);
- }
- return 0;
-}
-
-static struct file *open_firmware_file(struct bcm_mini_adapter *Adapter, const char *path)
-{
- struct file *flp = filp_open(path, O_RDONLY, S_IRWXU);
-
- if (IS_ERR(flp)) {
- pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp));
- flp = NULL;
- }
-
- if (Adapter->device_removed)
- flp = NULL;
-
- return flp;
-}
-
-/* Arguments:
- * Logical Adapter
- * Path to image file
- * Download Address on the chip
- */
-static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, unsigned int loc)
-{
- int errorno = 0;
- struct file *flp = NULL;
- struct timeval tv = {0};
-
- flp = open_firmware_file(Adapter, path);
- if (!flp) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path);
- return -ENOENT;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)file_inode(flp)->i_size, loc);
- do_gettimeofday(&tv);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "download start %lx", ((tv.tv_sec * 1000) + (tv.tv_usec / 1000)));
- if (Adapter->bcm_file_download(Adapter->pvInterfaceAdapter, flp, loc)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to download the firmware with error %x!!!", -EIO);
- errorno = -EIO;
- goto exit_download;
- }
- vfs_llseek(flp, 0, 0);
- if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!");
- errorno = -EIO;
- goto exit_download;
- }
-
-exit_download:
- filp_close(flp, NULL);
- return errorno;
-}
-
-/**
- * @ingroup ctrl_pkt_functions
- * This function copies the contents of given buffer
- * to the control packet and queues it for transmission.
- * @note Do not acquire the spinlock, as it it already acquired.
- * @return SUCCESS/FAILURE.
- * Arguments:
- * Logical Adapter
- * Control Packet Buffer
- */
-int CopyBufferToControlPacket(struct bcm_mini_adapter *Adapter, void *ioBuffer)
-{
- struct bcm_leader *pLeader = NULL;
- int Status = 0;
- unsigned char *ctrl_buff;
- unsigned int pktlen = 0;
- struct bcm_link_request *pLinkReq = NULL;
- PUCHAR pucAddIndication = NULL;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "======>");
- if (!ioBuffer) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Got Null Buffer\n");
- return -EINVAL;
- }
-
- pLinkReq = (struct bcm_link_request *)ioBuffer;
- pLeader = (struct bcm_leader *)ioBuffer; /* ioBuffer Contains sw_Status and Payload */
-
- if (Adapter->bShutStatus == TRUE &&
- pLinkReq->szData[0] == LINK_DOWN_REQ_PAYLOAD &&
- pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE) {
-
- /* Got sync down in SHUTDOWN..we could not process this. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC DOWN Request in Shut Down Mode..\n");
- return STATUS_FAILURE;
- }
-
- if ((pLeader->Status == LINK_UP_CONTROL_REQ) &&
- ((pLinkReq->szData[0] == LINK_UP_REQ_PAYLOAD &&
- (pLinkReq->szData[1] == LINK_SYNC_UP_SUBTYPE)) || /* Sync Up Command */
- pLinkReq->szData[0] == NETWORK_ENTRY_REQ_PAYLOAD)) /* Net Entry Command */ {
-
- if (Adapter->LinkStatus > PHY_SYNC_ACHIVED) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "LinkStatus is Greater than PHY_SYN_ACHIEVED");
- return STATUS_FAILURE;
- }
-
- if (Adapter->bShutStatus == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "SYNC UP IN SHUTDOWN..Device WakeUp\n");
- if (Adapter->bTriedToWakeUpFromlowPowerMode == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Waking up for the First Time..\n");
- Adapter->usIdleModePattern = ABORT_SHUTDOWN_MODE; /* change it to 1 for current support. */
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->bShutStatus, (5 * HZ));
-
- if (Status == -ERESTARTSYS)
- return Status;
-
- if (Adapter->bShutStatus) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Shutdown Mode Wake up Failed - No Wake Up Received\n");
- return STATUS_FAILURE;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Wakeup has been tried already...\n");
- }
- }
- }
-
- if (Adapter->IdleMode == TRUE) {
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is in Idle mode ... hence\n"); */
- if (pLeader->Status == LINK_UP_CONTROL_REQ || pLeader->Status == 0x80 ||
- pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ) {
-
- if ((pLeader->Status == LINK_UP_CONTROL_REQ) && (pLinkReq->szData[0] == LINK_DOWN_REQ_PAYLOAD)) {
- if (pLinkReq->szData[1] == LINK_SYNC_DOWN_SUBTYPE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Link Down Sent in Idle Mode\n");
- Adapter->usIdleModePattern = ABORT_IDLE_SYNCDOWN; /* LINK DOWN sent in Idle Mode */
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "ABORT_IDLE_MODE pattern is being written\n");
- Adapter->usIdleModePattern = ABORT_IDLE_REG;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "ABORT_IDLE_MODE pattern is being written\n");
- Adapter->usIdleModePattern = ABORT_IDLE_MODE;
- }
-
- /*Setting bIdleMode_tx_from_host to TRUE to indicate LED control thread to represent
- * the wake up from idlemode is from host
- */
- /* Adapter->LEDInfo.bIdleMode_tx_from_host = TRUE; */
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
-
- /* We should not send DREG message down while in idlemode. */
- if (LINK_DOWN_REQ_PAYLOAD == pLinkReq->szData[0])
- return STATUS_SUCCESS;
-
- Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->IdleMode, (5 * HZ));
-
- if (Status == -ERESTARTSYS)
- return Status;
-
- if (Adapter->IdleMode) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Idle Mode Wake up Failed - No Wake Up Received\n");
- return STATUS_FAILURE;
- }
- } else {
- return STATUS_SUCCESS;
- }
- }
-
- /* The Driver has to send control messages with a particular VCID */
- pLeader->Vcid = VCID_CONTROL_PACKET; /* VCID for control packet. */
-
- /* Allocate skb for Control Packet */
- pktlen = pLeader->PLength;
- ctrl_buff = (char *)Adapter->txctlpacket[atomic_read(&Adapter->index_wr_txcntrlpkt)%MAX_CNTRL_PKTS];
-
- if (!ctrl_buff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "mem allocation Failed");
- return -ENOMEM;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Control packet to be taken =%d and address is =%pincoming address is =%p and packet len=%x",
- atomic_read(&Adapter->index_wr_txcntrlpkt), ctrl_buff, ioBuffer, pktlen);
-
- if (pLeader) {
- if ((pLeader->Status == 0x80) ||
- (pLeader->Status == CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ)) {
- /*
- * Restructure the DSX message to handle Multiple classifier Support
- * Write the Service Flow param Structures directly to the target
- * and embed the pointers in the DSX messages sent to target.
- */
- /* Lets store the current length of the control packet we are transmitting */
- pucAddIndication = (PUCHAR)ioBuffer + LEADER_SIZE;
- pktlen = pLeader->PLength;
- Status = StoreCmControlResponseMessage(Adapter, pucAddIndication, &pktlen);
- if (Status != 1) {
- ClearTargetDSXBuffer(Adapter, ((struct bcm_add_indication_alt *)pucAddIndication)->u16TID, false);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, " Error Restoring The DSX Control Packet. Dsx Buffers on Target may not be Setup Properly ");
- return STATUS_FAILURE;
- }
- /*
- * update the leader to use the new length
- * The length of the control packet is length of message being sent + Leader length
- */
- pLeader->PLength = pktlen;
- }
- }
-
- if (pktlen + LEADER_SIZE > MAX_CNTL_PKT_SIZE)
- return -EINVAL;
-
- memset(ctrl_buff, 0, pktlen+LEADER_SIZE);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Copying the Control Packet Buffer with length=%d\n", pLeader->PLength);
- *(struct bcm_leader *)ctrl_buff = *pLeader;
- memcpy(ctrl_buff + LEADER_SIZE, ((PUCHAR)ioBuffer + LEADER_SIZE), pLeader->PLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Enqueuing the Control Packet");
-
- /* Update the statistics counters */
- spin_lock_bh(&Adapter->PackInfo[HiPriority].SFQueueLock);
- Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost += pLeader->PLength;
- Adapter->PackInfo[HiPriority].uiCurrentPacketsOnHost++;
- atomic_inc(&Adapter->TotalPacketCount);
- spin_unlock_bh(&Adapter->PackInfo[HiPriority].SFQueueLock);
- Adapter->PackInfo[HiPriority].bValid = TRUE;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "CurrBytesOnHost: %x bValid: %x",
- Adapter->PackInfo[HiPriority].uiCurrentBytesOnHost,
- Adapter->PackInfo[HiPriority].bValid);
- Status = STATUS_SUCCESS;
- /*Queue the packet for transmission */
- atomic_inc(&Adapter->index_wr_txcntrlpkt);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Calling transmit_packets");
- atomic_set(&Adapter->TxPktAvail, 1);
- wake_up(&Adapter->tx_packet_wait_queue);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "<====");
- return Status;
-}
-
-/******************************************************************
-* Function - LinkMessage()
-*
-* Description - This function builds the Sync-up and Link-up request
-* packet messages depending on the device Link status.
-*
-* Parameters - Adapter: Pointer to the Adapter structure.
-*
-* Returns - None.
-*******************************************************************/
-void LinkMessage(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_link_request *pstLinkRequest = NULL;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "=====>");
- if (Adapter->LinkStatus == SYNC_UP_REQUEST && Adapter->AutoSyncup) {
- pstLinkRequest = kzalloc(sizeof(struct bcm_link_request), GFP_ATOMIC);
- if (!pstLinkRequest) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!");
- return;
- }
- /* sync up request... */
- Adapter->LinkStatus = WAIT_FOR_SYNC; /* current link status */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For SyncUp...");
- pstLinkRequest->szData[0] = LINK_UP_REQ_PAYLOAD;
- pstLinkRequest->szData[1] = LINK_SYNC_UP_SUBTYPE;
- pstLinkRequest->Leader.Status = LINK_UP_CONTROL_REQ;
- pstLinkRequest->Leader.PLength = sizeof(ULONG);
- Adapter->bSyncUpRequestSent = TRUE;
-
- } else if (Adapter->LinkStatus == PHY_SYNC_ACHIVED && Adapter->AutoLinkUp) {
- pstLinkRequest = kzalloc(sizeof(struct bcm_link_request), GFP_ATOMIC);
- if (!pstLinkRequest) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Can not allocate memory for Link request!");
- return;
- }
- /* LINK_UP_REQUEST */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Requesting For LinkUp...");
- pstLinkRequest->szData[0] = LINK_UP_REQ_PAYLOAD;
- pstLinkRequest->szData[1] = LINK_NET_ENTRY;
- pstLinkRequest->Leader.Status = LINK_UP_CONTROL_REQ;
- pstLinkRequest->Leader.PLength = sizeof(ULONG);
- }
- if (pstLinkRequest) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Calling CopyBufferToControlPacket");
- CopyBufferToControlPacket(Adapter, pstLinkRequest);
- kfree(pstLinkRequest);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "LinkMessage <=====");
- return;
-}
-
-/**********************************************************************
-* Function - StatisticsResponse()
-*
-* Description - This function handles the Statistics response packet.
-*
-* Parameters - Adapter : Pointer to the Adapter structure.
-* - pvBuffer: Starting address of Statistic response data.
-*
-* Returns - None.
-************************************************************************/
-void StatisticsResponse(struct bcm_mini_adapter *Adapter, void *pvBuffer)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s====>", __func__);
- Adapter->StatisticsPointer = ntohl(*(__be32 *)pvBuffer);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %x", (unsigned int)Adapter->StatisticsPointer);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s <====", __func__);
-}
-
-/**********************************************************************
-* Function - LinkControlResponseMessage()
-*
-* Description - This function handles the Link response packets.
-*
-* Parameters - Adapter : Pointer to the Adapter structure.
-* - pucBuffer: Starting address of Link response data.
-*
-* Returns - None.
-***********************************************************************/
-void LinkControlResponseMessage(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "=====>");
-
- if (*pucBuffer == LINK_UP_ACK) {
- switch (*(pucBuffer+1)) {
- case PHY_SYNC_ACHIVED: /* SYNCed UP */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHY_SYNC_ACHIVED");
-
- if (Adapter->LinkStatus == LINKUP_DONE)
- beceem_protocol_reset(Adapter);
-
- Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX;
- Adapter->LinkStatus = PHY_SYNC_ACHIVED;
-
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = NO_NETWORK_ENTRY;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- LinkMessage(Adapter);
- break;
-
- case LINKUP_DONE:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LINKUP_DONE");
- Adapter->LinkStatus = LINKUP_DONE;
- Adapter->bPHSEnabled = *(pucBuffer+3);
- Adapter->bETHCSEnabled = *(pucBuffer+4) & ETH_CS_MASK;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "PHS Support Status Received In LinkUp Ack : %x\n", Adapter->bPHSEnabled);
-
- if ((false == Adapter->bShutStatus) && (false == Adapter->IdleMode)) {
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = NORMAL_OPERATION;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
- LinkMessage(Adapter);
- break;
-
- case WAIT_FOR_SYNC:
- /*
- * Driver to ignore the DREG_RECEIVED
- * WiMAX Application should handle this Message
- */
- /* Adapter->liTimeSinceLastNetEntry = 0; */
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
- Adapter->usBestEffortQueueIndex = INVALID_QUEUE_INDEX;
- Adapter->bTriedToWakeUpFromlowPowerMode = false;
- Adapter->IdleMode = false;
- beceem_protocol_reset(Adapter);
-
- break;
- case LINK_SHUTDOWN_REQ_FROM_FIRMWARE:
- case COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW:
- {
- HandleShutDownModeRequest(Adapter, pucBuffer);
- }
- break;
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "default case:LinkResponse %x", *(pucBuffer + 1));
- break;
- }
- } else if (SET_MAC_ADDRESS_RESPONSE == *pucBuffer) {
- PUCHAR puMacAddr = (pucBuffer + 1);
-
- Adapter->LinkStatus = SYNC_UP_REQUEST;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "MAC address response, sending SYNC_UP");
- LinkMessage(Adapter);
- memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "%s <=====", __func__);
-}
-
-void SendIdleModeResponse(struct bcm_mini_adapter *Adapter)
-{
- int status = 0, NVMAccess = 0, lowPwrAbortMsg = 0;
- struct timeval tv;
- struct bcm_link_request stIdleResponse = {{0} };
-
- memset(&tv, 0, sizeof(tv));
- stIdleResponse.Leader.Status = IDLE_MESSAGE;
- stIdleResponse.Leader.PLength = IDLE_MODE_PAYLOAD_LENGTH;
- stIdleResponse.szData[0] = GO_TO_IDLE_MODE_PAYLOAD;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, " ============>");
-
- /*********************************
- *down_trylock -
- * if [ semaphore is available ]
- * acquire semaphone and return value 0 ;
- * else
- * return non-zero value ;
- *
- ***********************************/
-
- NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- lowPwrAbortMsg = down_trylock(&Adapter->LowPowerModeSync);
-
-
- if ((NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) &&
- (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
-
- if (!NVMAccess)
- up(&Adapter->NVMRdmWrmLock);
-
- if (!lowPwrAbortMsg)
- up(&Adapter->LowPowerModeSync);
-
- stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "HOST IS NACKING Idle mode To F/W!!!!!!!!");
- Adapter->bPreparingForLowPowerMode = false;
- } else {
- stIdleResponse.szData[1] = TARGET_CAN_GO_TO_IDLE_MODE; /* 2; Idle ACK */
- Adapter->StatisticsPointer = 0;
-
- /* Wait for the LED to TURN OFF before sending ACK response */
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- int iRetVal = 0;
-
- /* Wake the LED Thread with IDLEMODE_ENTER State */
- Adapter->DriverState = LOWPOWER_MODE_ENTER;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld", jiffies);
- wake_up(&Adapter->LEDInfo.notify_led_event);
-
- /* Wait for 1 SEC for LED to OFF */
- iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000));
-
- /* If Timed Out to Sync IDLE MODE Enter, do IDLE mode Exit and Send NACK to device */
- if (iRetVal <= 0) {
- stIdleResponse.szData[1] = TARGET_CAN_NOT_GO_TO_IDLE_MODE; /* NACK- device access is going on. */
- Adapter->DriverState = NORMAL_OPERATION;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "NACKING Idle mode as time out happen from LED side!!!!!!!!");
- }
- }
-
- if (stIdleResponse.szData[1] == TARGET_CAN_GO_TO_IDLE_MODE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "ACKING IDLE MODE !!!!!!!!!");
- down(&Adapter->rdmwrmsync);
- Adapter->bPreparingForLowPowerMode = TRUE;
- up(&Adapter->rdmwrmsync);
- /* Killing all URBS. */
- if (Adapter->bDoSuspend == TRUE)
- Bcm_kill_all_URBs((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
- } else {
- Adapter->bPreparingForLowPowerMode = false;
- }
-
- if (!NVMAccess)
- up(&Adapter->NVMRdmWrmLock);
-
- if (!lowPwrAbortMsg)
- up(&Adapter->LowPowerModeSync);
- }
-
- status = CopyBufferToControlPacket(Adapter, &stIdleResponse);
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "fail to send the Idle mode Request\n");
- Adapter->bPreparingForLowPowerMode = false;
- StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
- }
- do_gettimeofday(&tv);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "IdleMode Msg submitter to Q :%ld ms", tv.tv_sec * 1000 + tv.tv_usec / 1000);
-}
-
-/******************************************************************
-* Function - DumpPackInfo()
-*
-* Description - This function dumps the all Queue(PackInfo[]) details.
-*
-* Parameters - Adapter: Pointer to the Adapter structure.
-*
-* Returns - None.
-*******************************************************************/
-void DumpPackInfo(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiLoopIndex = 0;
- unsigned int uiIndex = 0;
- unsigned int uiClsfrIndex = 0;
- struct bcm_classifier_rule *pstClassifierEntry = NULL;
-
- for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "*********** Showing Details Of Queue %d***** ******", uiLoopIndex);
- if (false == Adapter->PackInfo[uiLoopIndex].bValid) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid is false for %X index\n", uiLoopIndex);
- continue;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, " Dumping SF Rule Entry For SFID %lX\n", Adapter->PackInfo[uiLoopIndex].ulSFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, " ucDirection %X\n", Adapter->PackInfo[uiLoopIndex].ucDirection);
-
- if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Ipv6 Service Flow\n");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Ipv4 Service Flow\n");
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SF Traffic Priority %X\n", Adapter->PackInfo[uiLoopIndex].u8TrafficPriority);
-
- for (uiClsfrIndex = 0; uiClsfrIndex < MAX_CLASSIFIERS; uiClsfrIndex++) {
- pstClassifierEntry = &Adapter->astClassifierTable[uiClsfrIndex];
- if (!pstClassifierEntry->bUsed)
- continue;
-
- if (pstClassifierEntry->ulSFID != Adapter->PackInfo[uiLoopIndex].ulSFID)
- continue;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X Classifier Rule ID : %X\n", uiClsfrIndex, pstClassifierEntry->uiClassifierRuleIndex);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X usVCID_Value : %X\n", uiClsfrIndex, pstClassifierEntry->usVCID_Value);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bProtocolValid : %X\n", uiClsfrIndex, pstClassifierEntry->bProtocolValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bTOSValid : %X\n", uiClsfrIndex, pstClassifierEntry->bTOSValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bDestIpValid : %X\n", uiClsfrIndex, pstClassifierEntry->bDestIpValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tDumping Classifier Rule Entry For Index: %X bSrcIpValid : %X\n", uiClsfrIndex, pstClassifierEntry->bSrcIpValid);
-
- for (uiIndex = 0; uiIndex < MAX_PORT_RANGE; uiIndex++) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusSrcPortRangeLo:%X\n", pstClassifierEntry->usSrcPortRangeLo[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusSrcPortRangeHi:%X\n", pstClassifierEntry->usSrcPortRangeHi[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusDestPortRangeLo:%X\n", pstClassifierEntry->usDestPortRangeLo[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tusDestPortRangeHi:%X\n", pstClassifierEntry->usDestPortRangeHi[uiIndex]);
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucIPSourceAddressLength : 0x%x\n", pstClassifierEntry->ucIPSourceAddressLength);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucIPDestinationAddressLength : 0x%x\n", pstClassifierEntry->ucIPDestinationAddressLength);
- for (uiIndex = 0; uiIndex < pstClassifierEntry->ucIPSourceAddressLength; uiIndex++) {
- if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulSrcIpAddr :\n");
- DumpIpv6Address(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulSrcIpMask :\n");
- DumpIpv6Address(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulSrcIpAddr:%lX\n", pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulSrcIpMask:%lX\n", pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[uiIndex]);
- }
- }
-
- for (uiIndex = 0; uiIndex < pstClassifierEntry->ucIPDestinationAddressLength; uiIndex++) {
- if (Adapter->PackInfo[uiLoopIndex].ucIpVersion == IPV6) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulDestIpAddr :\n");
- DumpIpv6Address(pstClassifierEntry->stDestIpAddress.ulIpv6Addr);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tIpv6 ulDestIpMask :\n");
- DumpIpv6Address(pstClassifierEntry->stDestIpAddress.ulIpv6Mask);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulDestIpAddr:%lX\n", pstClassifierEntry->stDestIpAddress.ulIpv4Addr[uiIndex]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tulDestIpMask:%lX\n", pstClassifierEntry->stDestIpAddress.ulIpv4Mask[uiIndex]);
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tucProtocol:0x%X\n", pstClassifierEntry->ucProtocol[0]);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\tu8ClassifierRulePriority:%X\n", pstClassifierEntry->u8ClassifierRulePriority);
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ulSFID:%lX\n", Adapter->PackInfo[uiLoopIndex].ulSFID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "usVCID_Value:%X\n", Adapter->PackInfo[uiLoopIndex].usVCID_Value);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "PhsEnabled: 0x%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiThreshold:%X\n", Adapter->PackInfo[uiLoopIndex].uiThreshold);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bValid:%X\n", Adapter->PackInfo[uiLoopIndex].bValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bActive:%X\n", Adapter->PackInfo[uiLoopIndex].bActive);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActivateReqSent: %x", Adapter->PackInfo[uiLoopIndex].bActivateRequestSent);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "u8QueueType:%X\n", Adapter->PackInfo[uiLoopIndex].u8QueueType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxBucketSize:%X\n", Adapter->PackInfo[uiLoopIndex].uiMaxBucketSize);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiPerSFTxResourceCount:%X\n", atomic_read(&Adapter->PackInfo[uiLoopIndex].uiPerSFTxResourceCount));
- /* DumpDebug(DUMP_INFO,("bCSSupport:%X\n",Adapter->PackInfo[uiLoopIndex].bCSSupport)); */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CurrQueueDepthOnTarget: %x\n", Adapter->PackInfo[uiLoopIndex].uiCurrentQueueDepthOnTarget);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentBytesOnHost:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentBytesOnHost);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentPacketsOnHost:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentPacketsOnHost);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiDroppedCountBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiDroppedCountBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiDroppedCountPackets:%X\n", Adapter->PackInfo[uiLoopIndex].uiDroppedCountPackets);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiSentBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiSentBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiSentPackets:%X\n", Adapter->PackInfo[uiLoopIndex].uiSentPackets);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentDrainRate:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentDrainRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiThisPeriodSentBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiThisPeriodSentBytes);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "liDrainCalculated:%llX\n", Adapter->PackInfo[uiLoopIndex].liDrainCalculated);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiCurrentTokenCount:%X\n", Adapter->PackInfo[uiLoopIndex].uiCurrentTokenCount);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "liLastUpdateTokenAt:%llX\n", Adapter->PackInfo[uiLoopIndex].liLastUpdateTokenAt);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxAllowedRate:%X\n", Adapter->PackInfo[uiLoopIndex].uiMaxAllowedRate);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiPendedLast:%X\n", Adapter->PackInfo[uiLoopIndex].uiPendedLast);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "NumOfPacketsSent:%X\n", Adapter->PackInfo[uiLoopIndex].NumOfPacketsSent);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Direction: %x\n", Adapter->PackInfo[uiLoopIndex].ucDirection);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "CID: %x\n", Adapter->PackInfo[uiLoopIndex].usCID);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ProtocolValid: %x\n", Adapter->PackInfo[uiLoopIndex].bProtocolValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "TOSValid: %x\n", Adapter->PackInfo[uiLoopIndex].bTOSValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "DestIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bDestIpValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "SrcIpValid: %x\n", Adapter->PackInfo[uiLoopIndex].bSrcIpValid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ActiveSet: %x\n", Adapter->PackInfo[uiLoopIndex].bActiveSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AdmittedSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAdmittedSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "AuthzSet: %x\n", Adapter->PackInfo[uiLoopIndex].bAuthorizedSet);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "ClassifyPrority: %x\n", Adapter->PackInfo[uiLoopIndex].bClassifierPriority);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiMaxLatency: %x\n", Adapter->PackInfo[uiLoopIndex].uiMaxLatency);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO,
- DBG_LVL_ALL, "ServiceClassName: %*ph\n",
- 4, Adapter->PackInfo[uiLoopIndex].
- ucServiceClassName);
-/* BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "bHeaderSuppressionEnabled :%X\n", Adapter->PackInfo[uiLoopIndex].bHeaderSuppressionEnabled);
- * BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalTxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalTxBytes);
- * BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "uiTotalRxBytes:%X\n", Adapter->PackInfo[uiLoopIndex].uiTotalRxBytes);
- * DumpDebug(DUMP_INFO,(" uiRanOutOfResCount:%X\n",Adapter->PackInfo[uiLoopIndex].uiRanOutOfResCount));
- */
- }
-
- for (uiLoopIndex = 0; uiLoopIndex < MIBS_MAX_HIST_ENTRIES; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Adapter->aRxPktSizeHist[%x] = %x\n", uiLoopIndex, Adapter->aRxPktSizeHist[uiLoopIndex]);
-
- for (uiLoopIndex = 0; uiLoopIndex < MIBS_MAX_HIST_ENTRIES; uiLoopIndex++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Adapter->aTxPktSizeHist[%x] = %x\n", uiLoopIndex, Adapter->aTxPktSizeHist[uiLoopIndex]);
-}
-
-int reset_card_proc(struct bcm_mini_adapter *ps_adapter)
-{
- int retval = STATUS_SUCCESS;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_interface_adapter *psIntfAdapter = NULL;
- unsigned int value = 0, uiResetValue = 0;
- int bytes;
-
- psIntfAdapter = ((struct bcm_interface_adapter *)(ps_adapter->pvInterfaceAdapter));
- ps_adapter->bDDRInitDone = false;
-
- if (ps_adapter->chip_id >= T3LPB) {
- /* SYS_CFG register is write protected hence for modifying this reg value, it should be read twice before */
- rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
- rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
-
- /* making bit[6...5] same as was before f/w download. this setting force the h/w to */
- /* re-populated the SP RAM area with the string descriptor. */
- value = value | (ps_adapter->syscfgBefFwDld & 0x00000060);
- wrmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
- }
-
- /* killing all submitted URBs. */
- psIntfAdapter->psAdapter->StopAllXaction = TRUE;
- Bcm_kill_all_URBs(psIntfAdapter);
- /* Reset the UMA-B Device */
- if (ps_adapter->chip_id >= T3LPB) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Resetting UMA-B\n");
- retval = usb_reset_device(psIntfAdapter->udev);
- psIntfAdapter->psAdapter->StopAllXaction = false;
-
- if (retval != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reset failed with ret value :%d", retval);
- goto err_exit;
- }
-
- if (ps_adapter->chip_id == BCS220_2 ||
- ps_adapter->chip_id == BCS220_2BC ||
- ps_adapter->chip_id == BCS250_BC ||
- ps_adapter->chip_id == BCS220_3) {
-
- bytes = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
- if (bytes < 0) {
- retval = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
- goto err_exit;
- }
- /* setting 0th bit */
- value |= (1<<0);
- retval = wrmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
- goto err_exit;
- }
- }
- } else {
- bytes = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
- if (bytes < 0) {
- retval = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
- goto err_exit;
- }
- value &= (~(1<<16));
- retval = wrmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
- goto err_exit;
- }
-
- /* Toggling the GPIO 8, 9 */
- value = 0;
- retval = wrmalt(ps_adapter, GPIO_OUTPUT_REGISTER, &value, sizeof(value));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
- goto err_exit;
- }
- value = 0x300;
- retval = wrmalt(ps_adapter, GPIO_MODE_REGISTER, &value, sizeof(value));
- if (retval < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "write failed with status :%d", retval);
- goto err_exit;
- }
- mdelay(50);
- }
-
- /* ps_adapter->downloadDDR = false; */
- if (ps_adapter->bFlashBoot) {
- /* In flash boot mode MIPS state register has reverse polarity.
- * So just or with setting bit 30.
- * Make the MIPS in Reset state.
- */
- rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &uiResetValue, sizeof(uiResetValue));
- uiResetValue |= (1<<30);
- wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &uiResetValue, sizeof(uiResetValue));
- }
-
- if (ps_adapter->chip_id >= T3LPB) {
- uiResetValue = 0;
- /*
- * WA for SYSConfig Issue.
- * Read SYSCFG Twice to make it writable.
- */
- rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue));
- if (uiResetValue & (1<<4)) {
- uiResetValue = 0;
- rdmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue)); /* 2nd read to make it writable. */
- uiResetValue &= (~(1<<4));
- wrmalt(ps_adapter, SYS_CFG, &uiResetValue, sizeof(uiResetValue));
- }
- }
- uiResetValue = 0;
- wrmalt(ps_adapter, 0x0f01186c, &uiResetValue, sizeof(uiResetValue));
-
-err_exit:
- psIntfAdapter->psAdapter->StopAllXaction = false;
- return retval;
-}
-
-int run_card_proc(struct bcm_mini_adapter *ps_adapter)
-{
- int status = STATUS_SUCCESS;
- int bytes;
-
- unsigned int value = 0;
- {
- bytes = rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value));
- if (bytes < 0) {
- status = bytes;
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
- return status;
- }
-
- if (ps_adapter->bFlashBoot)
- value &= (~(1<<30));
- else
- value |= (1<<30);
-
- if (wrmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
- return STATUS_FAILURE;
- }
- }
- return status;
-}
-
-int InitCardAndDownloadFirmware(struct bcm_mini_adapter *ps_adapter)
-{
- int status;
- unsigned int value = 0;
- /*
- * Create the threads first and then download the
- * Firm/DDR Settings..
- */
- status = create_worker_threads(ps_adapter);
- if (status < 0)
- return status;
-
- status = bcm_parse_target_params(ps_adapter);
- if (status)
- return status;
-
- if (ps_adapter->chip_id >= T3LPB) {
- rdmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
- ps_adapter->syscfgBefFwDld = value;
-
- if ((value & 0x60) == 0)
- ps_adapter->bFlashBoot = TRUE;
- }
-
- reset_card_proc(ps_adapter);
-
- /* Initializing the NVM. */
- BcmInitNVM(ps_adapter);
- status = ddr_init(ps_adapter);
- if (status) {
- pr_err(DRV_NAME "ddr_init Failed\n");
- return status;
- }
-
- /* Download cfg file */
- status = buffDnldVerify(ps_adapter,
- (PUCHAR)ps_adapter->pstargetparams,
- sizeof(struct bcm_target_params),
- CONFIG_BEGIN_ADDR);
- if (status) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Error downloading CFG file");
- goto OUT;
- }
-
- if (register_networkdev(ps_adapter)) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Netdevice failed. Cleanup needs to be performed.");
- return -EIO;
- }
-
- if (false == ps_adapter->AutoFirmDld) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoFirmDld Disabled in CFG File..\n");
- /* If Auto f/w download is disable, register the control interface, */
- /* register the control interface after the mailbox. */
- if (register_control_device_interface(ps_adapter) < 0) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Control Device failed. Cleanup needs to be performed.");
- return -EIO;
- }
- return STATUS_SUCCESS;
- }
-
- /*
- * Do the LED Settings here. It will be used by the Firmware Download
- * Thread.
- */
-
- /*
- * 1. If the LED Settings fails, do not stop and do the Firmware download.
- * 2. This init would happened only if the cfg file is present, else
- * call from the ioctl context.
- */
-
- status = InitLedSettings(ps_adapter);
- if (status) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_PRINTK, 0, 0, "INIT LED FAILED\n");
- return status;
- }
-
- if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ps_adapter->DriverState = DRIVER_INIT;
- wake_up(&ps_adapter->LEDInfo.notify_led_event);
- }
-
- if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ps_adapter->DriverState = FW_DOWNLOAD;
- wake_up(&ps_adapter->LEDInfo.notify_led_event);
- }
-
- value = 0;
- wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
-
- if (ps_adapter->eNVMType == NVM_FLASH) {
- status = PropagateCalParamsFromFlashToMemory(ps_adapter);
- if (status) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Propagation of Cal param failed ..");
- goto OUT;
- }
- }
-
- /* Download Firmare */
- status = BcmFileDownload(ps_adapter, BIN_FILE, FIRMWARE_BEGIN_ADDR);
- if (status != 0) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Firmware File is present...\n");
- goto OUT;
- }
-
- status = run_card_proc(ps_adapter);
- if (status) {
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "run_card_proc Failed\n");
- goto OUT;
- }
-
- ps_adapter->fw_download_done = TRUE;
- mdelay(10);
-
-OUT:
- if (ps_adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- ps_adapter->DriverState = FW_DOWNLOAD_DONE;
- wake_up(&ps_adapter->LEDInfo.notify_led_event);
- }
-
- return status;
-}
-
-static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter)
-{
- struct file *flp = NULL;
- char *buff;
- int len = 0;
-
- buff = kmalloc(BUFFER_1K, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL);
- if (Adapter->pstargetparams == NULL) {
- kfree(buff);
- return -ENOMEM;
- }
-
- flp = open_firmware_file(Adapter, CFG_FILE);
- if (!flp) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "NOT ABLE TO OPEN THE %s FILE\n", CFG_FILE);
- kfree(buff);
- kfree(Adapter->pstargetparams);
- Adapter->pstargetparams = NULL;
- return -ENOENT;
- }
- len = kernel_read(flp, 0, buff, BUFFER_1K);
- filp_close(flp, NULL);
-
- if (len != sizeof(struct bcm_target_params)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n");
- kfree(buff);
- kfree(Adapter->pstargetparams);
- Adapter->pstargetparams = NULL;
- return -ENOENT;
- }
-
- /* Check for autolink in config params */
- /*
- * Values in Adapter->pstargetparams are in network byte order
- */
- memcpy(Adapter->pstargetparams, buff, sizeof(struct bcm_target_params));
- kfree(buff);
- beceem_parse_target_struct(Adapter);
- return STATUS_SUCCESS;
-}
-
-void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiHostDrvrCfg6 = 0, uiEEPROMFlag = 0;
-
- if (ntohl(Adapter->pstargetparams->m_u32PhyParameter2) & AUTO_SYNC_DISABLE) {
- pr_info(DRV_NAME ": AutoSyncup is Disabled\n");
- Adapter->AutoSyncup = false;
- } else {
- pr_info(DRV_NAME ": AutoSyncup is Enabled\n");
- Adapter->AutoSyncup = TRUE;
- }
-
- if (ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_LINKUP_ENABLE) {
- pr_info(DRV_NAME ": Enabling autolink up");
- Adapter->AutoLinkUp = TRUE;
- } else {
- pr_info(DRV_NAME ": Disabling autolink up");
- Adapter->AutoLinkUp = false;
- }
- /* Setting the DDR Setting.. */
- Adapter->DDRSetting = (ntohl(Adapter->pstargetparams->HostDrvrConfig6) >> 8)&0x0F;
- Adapter->ulPowerSaveMode = (ntohl(Adapter->pstargetparams->HostDrvrConfig6)>>12)&0x0F;
- pr_info(DRV_NAME ": DDR Setting: %x\n", Adapter->DDRSetting);
- pr_info(DRV_NAME ": Power Save Mode: %lx\n", Adapter->ulPowerSaveMode);
- if (ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_FIRM_DOWNLOAD) {
- pr_info(DRV_NAME ": Enabling Auto Firmware Download\n");
- Adapter->AutoFirmDld = TRUE;
- } else {
- pr_info(DRV_NAME ": Disabling Auto Firmware Download\n");
- Adapter->AutoFirmDld = false;
- }
- uiHostDrvrCfg6 = ntohl(Adapter->pstargetparams->HostDrvrConfig6);
- Adapter->bMipsConfig = (uiHostDrvrCfg6>>20)&0x01;
- pr_info(DRV_NAME ": MIPSConfig : 0x%X\n", Adapter->bMipsConfig);
- /* used for backward compatibility. */
- Adapter->bDPLLConfig = (uiHostDrvrCfg6>>19)&0x01;
- Adapter->PmuMode = (uiHostDrvrCfg6 >> 24) & 0x03;
- pr_info(DRV_NAME ": PMU MODE: %x", Adapter->PmuMode);
-
- if ((uiHostDrvrCfg6 >> HOST_BUS_SUSPEND_BIT) & (0x01)) {
- Adapter->bDoSuspend = TRUE;
- pr_info(DRV_NAME ": Making DoSuspend TRUE as per configFile");
- }
-
- uiEEPROMFlag = ntohl(Adapter->pstargetparams->m_u32EEPROMFlag);
- pr_info(DRV_NAME ": uiEEPROMFlag : 0x%X\n", uiEEPROMFlag);
- Adapter->eNVMType = (enum bcm_nvm_type)((uiEEPROMFlag>>4)&0x3);
- Adapter->bStatusWrite = (uiEEPROMFlag>>6)&0x1;
- Adapter->uiSectorSizeInCFG = 1024*(0xFFFF & ntohl(Adapter->pstargetparams->HostDrvrConfig4));
- Adapter->bSectorSizeOverride = (bool) ((ntohl(Adapter->pstargetparams->HostDrvrConfig4))>>16)&0x1;
-
- if (ntohl(Adapter->pstargetparams->m_u32PowerSavingModeOptions) & 0x01)
- Adapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE;
-
- if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
- doPowerAutoCorrection(Adapter);
-}
-
-static void doPowerAutoCorrection(struct bcm_mini_adapter *psAdapter)
-{
- unsigned int reporting_mode;
-
- reporting_mode = ntohl(psAdapter->pstargetparams->m_u32PowerSavingModeOptions) & 0x02;
- psAdapter->bIsAutoCorrectEnabled = !((char)(psAdapter->ulPowerSaveMode >> 3) & 0x1);
-
- if (reporting_mode) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "can't do suspen/resume as reporting mode is enable");
- psAdapter->bDoSuspend = false;
- }
-
- if (psAdapter->bIsAutoCorrectEnabled && (psAdapter->chip_id >= T3LPB)) {
- /* If reporting mode is enable, switch PMU to PMC */
- {
- psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING;
- psAdapter->bDoSuspend = false;
- }
-
- /* clearing space bit[15..12] */
- psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl((0xF << 12)));
- /* placing the power save mode option */
- psAdapter->pstargetparams->HostDrvrConfig6 |= htonl((psAdapter->ulPowerSaveMode << 12));
- } else if (psAdapter->bIsAutoCorrectEnabled == false) {
- /* remove the autocorrect disable bit set before dumping. */
- psAdapter->ulPowerSaveMode &= ~(1 << 3);
- psAdapter->pstargetparams->HostDrvrConfig6 &= ~(htonl(1 << 15));
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Using Forced User Choice: %lx\n", psAdapter->ulPowerSaveMode);
- }
-}
-
-static void convertEndian(unsigned char rwFlag, unsigned int *puiBuffer, unsigned int uiByteCount)
-{
- unsigned int uiIndex = 0;
-
- if (RWM_WRITE == rwFlag) {
- for (uiIndex = 0; uiIndex < (uiByteCount/sizeof(unsigned int)); uiIndex++)
- puiBuffer[uiIndex] = htonl(puiBuffer[uiIndex]);
- } else {
- for (uiIndex = 0; uiIndex < (uiByteCount/sizeof(unsigned int)); uiIndex++)
- puiBuffer[uiIndex] = ntohl(puiBuffer[uiIndex]);
- }
-}
-
-int rdm(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize)
-{
- return Adapter->interface_rdm(Adapter->pvInterfaceAdapter,
- uiAddress, pucBuff, sSize);
-}
-
-int wrm(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize)
-{
- int iRetVal;
-
- iRetVal = Adapter->interface_wrm(Adapter->pvInterfaceAdapter,
- uiAddress, pucBuff, sSize);
- return iRetVal;
-}
-
-int wrmalt(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size)
-{
- convertEndian(RWM_WRITE, pucBuff, size);
- return wrm(Adapter, uiAddress, (PUCHAR)pucBuff, size);
-}
-
-int rdmalt(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size)
-{
- int uiRetVal = 0;
-
- uiRetVal = rdm(Adapter, uiAddress, (PUCHAR)pucBuff, size);
- convertEndian(RWM_READ, (unsigned int *)pucBuff, size);
-
- return uiRetVal;
-}
-
-int wrmWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, PCHAR pucBuff, size_t sSize)
-{
- int status = STATUS_SUCCESS;
-
- down(&Adapter->rdmwrmsync);
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
-
- status = -EACCES;
- goto exit;
- }
-
- status = wrm(Adapter, uiAddress, pucBuff, sSize);
-exit:
- up(&Adapter->rdmwrmsync);
- return status;
-}
-
-int wrmaltWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size)
-{
- int iRetVal = STATUS_SUCCESS;
-
- down(&Adapter->rdmwrmsync);
-
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
-
- iRetVal = -EACCES;
- goto exit;
- }
-
- iRetVal = wrmalt(Adapter, uiAddress, pucBuff, size);
-exit:
- up(&Adapter->rdmwrmsync);
- return iRetVal;
-}
-
-int rdmaltWithLock(struct bcm_mini_adapter *Adapter, unsigned int uiAddress, unsigned int *pucBuff, size_t size)
-{
- int uiRetVal = STATUS_SUCCESS;
-
- down(&Adapter->rdmwrmsync);
- if ((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus == TRUE) ||
- (Adapter->bPreparingForLowPowerMode == TRUE)) {
-
- uiRetVal = -EACCES;
- goto exit;
- }
-
- uiRetVal = rdmalt(Adapter, uiAddress, pucBuff, size);
-exit:
- up(&Adapter->rdmwrmsync);
- return uiRetVal;
-}
-
-static void HandleShutDownModeWakeup(struct bcm_mini_adapter *Adapter)
-{
- int clear_abort_pattern = 0, Status = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n");
- /* target has woken up From Shut Down */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Clearing Shut Down Software abort pattern\n");
- Status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, (unsigned int *)&clear_abort_pattern, sizeof(clear_abort_pattern));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "WRM to SW_ABORT_IDLEMODE_LOC failed with err:%d", Status);
- return;
- }
-
- if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
- msleep(100);
- InterfaceHandleShutdownModeWakeup(Adapter);
- msleep(100);
- }
-
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = NO_NETWORK_ENTRY;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- Adapter->bTriedToWakeUpFromlowPowerMode = false;
- Adapter->bShutStatus = false;
- wake_up(&Adapter->lowpower_mode_wait_queue);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
-}
-
-static void SendShutModeResponse(struct bcm_mini_adapter *Adapter)
-{
- struct bcm_link_request stShutdownResponse;
- unsigned int NVMAccess = 0, lowPwrAbortMsg = 0;
- unsigned int Status = 0;
-
- memset(&stShutdownResponse, 0, sizeof(struct bcm_link_request));
- stShutdownResponse.Leader.Status = LINK_UP_CONTROL_REQ;
- stShutdownResponse.Leader.PLength = 8; /* 8 bytes; */
- stShutdownResponse.szData[0] = LINK_UP_ACK;
- stShutdownResponse.szData[1] = LINK_SHUTDOWN_REQ_FROM_FIRMWARE;
-
- /*********************************
- * down_trylock -
- * if [ semaphore is available ]
- * acquire semaphone and return value 0 ;
- * else
- * return non-zero value ;
- *
- ***********************************/
-
- NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- lowPwrAbortMsg = down_trylock(&Adapter->LowPowerModeSync);
-
- if (NVMAccess || lowPwrAbortMsg || atomic_read(&Adapter->TotalPacketCount)) {
- if (!NVMAccess)
- up(&Adapter->NVMRdmWrmLock);
-
- if (!lowPwrAbortMsg)
- up(&Adapter->LowPowerModeSync);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Device Access is going on NACK the Shut Down MODE\n");
- stShutdownResponse.szData[2] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */
- Adapter->bPreparingForLowPowerMode = false;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "Sending SHUTDOWN MODE ACK\n");
- stShutdownResponse.szData[2] = SHUTDOWN_ACK_FROM_DRIVER; /* ShutDown ACK */
-
- /* Wait for the LED to TURN OFF before sending ACK response */
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- int iRetVal = 0;
-
- /* Wake the LED Thread with LOWPOWER_MODE_ENTER State */
- Adapter->DriverState = LOWPOWER_MODE_ENTER;
- wake_up(&Adapter->LEDInfo.notify_led_event);
-
- /* Wait for 1 SEC for LED to OFF */
- iRetVal = wait_event_timeout(Adapter->LEDInfo.idleModeSyncEvent, Adapter->LEDInfo.bIdle_led_off, msecs_to_jiffies(1000));
-
- /* If Timed Out to Sync IDLE MODE Enter, do IDLE mode Exit and Send NACK to device */
- if (iRetVal <= 0) {
- stShutdownResponse.szData[1] = SHUTDOWN_NACK_FROM_DRIVER; /* NACK- device access is going on. */
- Adapter->DriverState = NO_NETWORK_ENTRY;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
-
- if (stShutdownResponse.szData[2] == SHUTDOWN_ACK_FROM_DRIVER) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "ACKING SHUTDOWN MODE !!!!!!!!!");
- down(&Adapter->rdmwrmsync);
- Adapter->bPreparingForLowPowerMode = TRUE;
- up(&Adapter->rdmwrmsync);
- /* Killing all URBS. */
- if (Adapter->bDoSuspend == TRUE)
- Bcm_kill_all_URBs((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
- } else {
- Adapter->bPreparingForLowPowerMode = false;
- }
-
- if (!NVMAccess)
- up(&Adapter->NVMRdmWrmLock);
-
- if (!lowPwrAbortMsg)
- up(&Adapter->LowPowerModeSync);
- }
-
- Status = CopyBufferToControlPacket(Adapter, &stShutdownResponse);
- if (Status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "fail to send the Idle mode Request\n");
- Adapter->bPreparingForLowPowerMode = false;
- StartInterruptUrb((struct bcm_interface_adapter *)(Adapter->pvInterfaceAdapter));
- }
-}
-
-static void HandleShutDownModeRequest(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer)
-{
- unsigned int uiResetValue = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "====>\n");
-
- if (*(pucBuffer+1) == COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW) {
- HandleShutDownModeWakeup(Adapter);
- } else if (*(pucBuffer+1) == LINK_SHUTDOWN_REQ_FROM_FIRMWARE) {
- /* Target wants to go to Shut Down Mode */
- /* InterfacePrepareForShutdown(Adapter); */
- if (Adapter->chip_id == BCS220_2 ||
- Adapter->chip_id == BCS220_2BC ||
- Adapter->chip_id == BCS250_BC ||
- Adapter->chip_id == BCS220_3) {
-
- rdmalt(Adapter, HPM_CONFIG_MSW, &uiResetValue, 4);
- uiResetValue |= (1<<17);
- wrmalt(Adapter, HPM_CONFIG_MSW, &uiResetValue, 4);
- }
-
- SendShutModeResponse(Adapter);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "ShutDownModeResponse:Notification received: Sending the response(Ack/Nack)\n");
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL, "<====\n");
-}
-
-void ResetCounters(struct bcm_mini_adapter *Adapter)
-{
- beceem_protocol_reset(Adapter);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
- atomic_set(&Adapter->cntrlpktCnt, 0);
- atomic_set(&Adapter->TotalPacketCount, 0);
- Adapter->fw_download_done = false;
- Adapter->LinkStatus = 0;
- Adapter->AutoLinkUp = false;
- Adapter->IdleMode = false;
- Adapter->bShutStatus = false;
-}
-
-struct bcm_classifier_rule *GetFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIP)
-{
- unsigned int uiIndex = 0;
-
- for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
- if ((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) &&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification) &&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress == SrcIP) &&
- !Adapter->astFragmentedPktClassifierTable[uiIndex].bOutOfOrderFragment)
-
- return Adapter->astFragmentedPktClassifierTable[uiIndex].pstMatchedClassifierEntry;
- }
- return NULL;
-}
-
-void AddFragIPClsEntry(struct bcm_mini_adapter *Adapter, struct bcm_fragmented_packet_info *psFragPktInfo)
-{
- unsigned int uiIndex = 0;
-
- for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
- if (!Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) {
- memcpy(&Adapter->astFragmentedPktClassifierTable[uiIndex], psFragPktInfo, sizeof(struct bcm_fragmented_packet_info));
- break;
- }
- }
-}
-
-void DelFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIp)
-{
- unsigned int uiIndex = 0;
-
- for (uiIndex = 0; uiIndex < MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES; uiIndex++) {
- if ((Adapter->astFragmentedPktClassifierTable[uiIndex].bUsed) &&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].usIpIdentification == usIpIdentification) &&
- (Adapter->astFragmentedPktClassifierTable[uiIndex].ulSrcIpAddress == SrcIp))
-
- memset(&Adapter->astFragmentedPktClassifierTable[uiIndex], 0, sizeof(struct bcm_fragmented_packet_info));
- }
-}
-
-void update_per_cid_rx(struct bcm_mini_adapter *Adapter)
-{
- unsigned int qindex = 0;
-
- if ((jiffies - Adapter->liDrainCalculated) < XSECONDS)
- return;
-
- for (qindex = 0; qindex < HiPriority; qindex++) {
- if (Adapter->PackInfo[qindex].ucDirection == 0) {
- Adapter->PackInfo[qindex].uiCurrentRxRate =
- (Adapter->PackInfo[qindex].uiCurrentRxRate +
- Adapter->PackInfo[qindex].uiThisPeriodRxBytes) / 2;
-
- Adapter->PackInfo[qindex].uiThisPeriodRxBytes = 0;
- } else {
- Adapter->PackInfo[qindex].uiCurrentDrainRate =
- (Adapter->PackInfo[qindex].uiCurrentDrainRate +
- Adapter->PackInfo[qindex].uiThisPeriodSentBytes) / 2;
- Adapter->PackInfo[qindex].uiThisPeriodSentBytes = 0;
- }
- }
- Adapter->liDrainCalculated = jiffies;
-}
-
-void update_per_sf_desc_cnts(struct bcm_mini_adapter *Adapter)
-{
- int iIndex = 0;
- u32 uibuff[MAX_TARGET_DSX_BUFFERS];
- int bytes;
-
- if (!atomic_read(&Adapter->uiMBupdate))
- return;
-
- bytes = rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (unsigned int *)uibuff, sizeof(unsigned int) * MAX_TARGET_DSX_BUFFERS);
- if (bytes < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
- return;
- }
-
- for (iIndex = 0; iIndex < HiPriority; iIndex++) {
- if (Adapter->PackInfo[iIndex].bValid && Adapter->PackInfo[iIndex].ucDirection) {
- if (Adapter->PackInfo[iIndex].usVCID_Value < MAX_TARGET_DSX_BUFFERS)
- atomic_set(&Adapter->PackInfo[iIndex].uiPerSFTxResourceCount, uibuff[Adapter->PackInfo[iIndex].usVCID_Value]);
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid VCID : %x\n", Adapter->PackInfo[iIndex].usVCID_Value);
- }
- }
- atomic_set(&Adapter->uiMBupdate, false);
-}
-
-void flush_queue(struct bcm_mini_adapter *Adapter, unsigned int iQIndex)
-{
- struct sk_buff *PacketToDrop = NULL;
- struct net_device_stats *netstats = &Adapter->dev->stats;
-
- spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
-
- while (Adapter->PackInfo[iQIndex].FirstTxQueue && atomic_read(&Adapter->TotalPacketCount)) {
- PacketToDrop = Adapter->PackInfo[iQIndex].FirstTxQueue;
- if (PacketToDrop && PacketToDrop->len) {
- netstats->tx_dropped++;
- DEQUEUEPACKET(Adapter->PackInfo[iQIndex].FirstTxQueue, Adapter->PackInfo[iQIndex].LastTxQueue);
- Adapter->PackInfo[iQIndex].uiCurrentPacketsOnHost--;
- Adapter->PackInfo[iQIndex].uiCurrentBytesOnHost -= PacketToDrop->len;
-
- /* Adding dropped statistics */
- Adapter->PackInfo[iQIndex].uiDroppedCountBytes += PacketToDrop->len;
- Adapter->PackInfo[iQIndex].uiDroppedCountPackets++;
- dev_kfree_skb(PacketToDrop);
- atomic_dec(&Adapter->TotalPacketCount);
- }
- }
- spin_unlock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
-}
-
-static void beceem_protocol_reset(struct bcm_mini_adapter *Adapter)
-{
- int i;
-
- if (netif_msg_link(Adapter))
- pr_notice(PFX "%s: protocol reset\n", Adapter->dev->name);
-
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
-
- Adapter->IdleMode = false;
- Adapter->LinkUpStatus = false;
- ClearTargetDSXBuffer(Adapter, 0, TRUE);
- /* Delete All Classifier Rules */
-
- for (i = 0; i < HiPriority; i++)
- DeleteAllClassifiersForSF(Adapter, i);
-
- flush_all_queues(Adapter);
-
- if (Adapter->TimerActive == TRUE)
- Adapter->TimerActive = false;
-
- memset(Adapter->astFragmentedPktClassifierTable, 0, sizeof(struct bcm_fragmented_packet_info) * MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
-
- for (i = 0; i < HiPriority; i++) {
- /* resetting only the first size (S_MIBS_SERVICEFLOW_TABLE) for the SF. */
- /* It is same between MIBs and SF. */
- memset(&Adapter->PackInfo[i].stMibsExtServiceFlowTable, 0, sizeof(struct bcm_mibs_parameters));
- }
-}
diff --git a/drivers/staging/bcm/PHSDefines.h b/drivers/staging/bcm/PHSDefines.h
deleted file mode 100644
index cd78ee4ffa22..000000000000
--- a/drivers/staging/bcm/PHSDefines.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef BCM_PHS_DEFINES_H
-#define BCM_PHS_DEFINES_H
-
-#define PHS_INVALID_TABLE_INDEX 0xffffffff
-#define PHS_MEM_TAG "_SHP"
-
-/* PHS Defines */
-#define STATUS_PHS_COMPRESSED 0xa1
-#define STATUS_PHS_NOCOMPRESSION 0xa2
-#define APPLY_PHS 1
-#define MAX_NO_BIT 7
-#define ZERO_PHSI 0
-#define VERIFY 0
-#define SIZE_MULTIPLE_32 4
-#define UNCOMPRESSED_PACKET 0
-#define DYNAMIC 0
-#define SUPPRESS 0x80
-#define NO_CLASSIFIER_MATCH 0
-#define SEND_PACKET_UNCOMPRESSED 0
-#define PHSI_IS_ZERO 0
-#define PHSI_LEN 1
-#define ERROR_LEN 0
-#define PHS_BUFFER_SIZE 1532
-#define MAX_PHSRULE_PER_SF 20
-#define MAX_SERVICEFLOWS 17
-
-/* PHS Error Defines */
-#define PHS_SUCCESS 0
-#define ERR_PHS_INVALID_DEVICE_EXETENSION 0x800
-#define ERR_PHS_INVALID_PHS_RULE 0x801
-#define ERR_PHS_RULE_ALREADY_EXISTS 0x802
-#define ERR_SF_MATCH_FAIL 0x803
-#define ERR_INVALID_CLASSIFIERTABLE_FOR_SF 0x804
-#define ERR_SFTABLE_FULL 0x805
-#define ERR_CLSASSIFIER_TABLE_FULL 0x806
-#define ERR_PHSRULE_MEMALLOC_FAIL 0x807
-#define ERR_CLSID_MATCH_FAIL 0x808
-#define ERR_PHSRULE_MATCH_FAIL 0x809
-
-struct bcm_phs_rule {
- u8 u8PHSI;
- u8 u8PHSFLength;
- u8 u8PHSF[MAX_PHS_LENGTHS];
- u8 u8PHSMLength;
- u8 u8PHSM[MAX_PHS_LENGTHS];
- u8 u8PHSS;
- u8 u8PHSV;
- u8 u8RefCnt;
- u8 bUnclassifiedPHSRule;
- u8 u8Reserved[3];
- long PHSModifiedBytes;
- unsigned long PHSModifiedNumPackets;
- unsigned long PHSErrorNumPackets;
-};
-
-enum bcm_phs_classifier_context {
- eActiveClassifierRuleContext,
- eOldClassifierRuleContext
-};
-
-struct bcm_phs_classifier_entry {
- u8 bUsed;
- u16 uiClassifierRuleId;
- u8 u8PHSI;
- struct bcm_phs_rule *pstPhsRule;
- u8 bUnclassifiedPHSRule;
-};
-
-struct bcm_phs_classifier_table {
- u16 uiTotalClassifiers;
- struct bcm_phs_classifier_entry stActivePhsRulesList[MAX_PHSRULE_PER_SF];
- struct bcm_phs_classifier_entry stOldPhsRulesList[MAX_PHSRULE_PER_SF];
- u16 uiOldestPhsRuleIndex;
-};
-
-struct bcm_phs_entry {
- u8 bUsed;
- u16 uiVcid;
- struct bcm_phs_classifier_table *pstClassifierTable;
-};
-
-struct bcm_phs_table {
- u16 uiTotalServiceFlows;
- struct bcm_phs_entry stSFList[MAX_SERVICEFLOWS];
-};
-
-struct bcm_phs_extension {
- /* PHS Specific data */
- struct bcm_phs_table *pstServiceFlowPhsRulesTable;
- void *CompressedTxBuffer;
- void *UnCompressedRxBuffer;
-};
-
-#endif
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
deleted file mode 100644
index 5f4e503d54ec..000000000000
--- a/drivers/staging/bcm/PHSModule.c
+++ /dev/null
@@ -1,1703 +0,0 @@
-#include "headers.h"
-
-static UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- struct bcm_phs_table *psServiceFlowTable,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI);
-
-static UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- struct bcm_phs_entry *pstServiceFlowEntry,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI);
-
-static UINT CreateClassifierPHSRule(B_UINT16 uiClsId,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *psPhsRule,
- enum bcm_phs_classifier_context eClsContext,
- B_UINT8 u8AssociatedPHSI);
-
-static UINT UpdateClassifierPHSRule(B_UINT16 uiClsId,
- struct bcm_phs_classifier_entry *pstClassifierEntry,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI);
-
-static bool ValidatePHSRuleComplete(const struct bcm_phs_rule *psPhsRule);
-
-static bool DerefPhsRule(B_UINT16 uiClsId,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *pstPhsRule);
-
-static UINT GetClassifierEntry(struct bcm_phs_classifier_table *pstClassifierTable,
- B_UINT32 uiClsid,
- enum bcm_phs_classifier_context eClsContext,
- struct bcm_phs_classifier_entry **ppstClassifierEntry);
-
-static UINT GetPhsRuleEntry(struct bcm_phs_classifier_table *pstClassifierTable,
- B_UINT32 uiPHSI,
- enum bcm_phs_classifier_context eClsContext,
- struct bcm_phs_rule **ppstPhsRule);
-
-static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable);
-
-static int phs_compress(struct bcm_phs_rule *phs_members,
- unsigned char *in_buf,
- unsigned char *out_buf,
- unsigned int *header_size,
- UINT *new_header_size);
-
-static int verify_suppress_phsf(unsigned char *in_buffer,
- unsigned char *out_buffer,
- unsigned char *phsf,
- unsigned char *phsm,
- unsigned int phss,
- unsigned int phsv,
- UINT *new_header_size);
-
-static int phs_decompress(unsigned char *in_buf,
- unsigned char *out_buf,
- struct bcm_phs_rule *phs_rules,
- UINT *header_size);
-
-static ULONG PhsCompress(void *pvContext,
- B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- void *pvInputBuffer,
- void *pvOutputBuffer,
- UINT *pOldHeaderSize,
- UINT *pNewHeaderSize);
-
-static ULONG PhsDeCompress(void *pvContext,
- B_UINT16 uiVcid,
- void *pvInputBuffer,
- void *pvOutputBuffer,
- UINT *pInHeaderSize,
- UINT *pOutHeaderSize);
-
-#define IN
-#define OUT
-
-/*
- * Function: PHSTransmit
- * Description: This routine handle PHS(Payload Header Suppression for Tx path.
- * It extracts a fragment of the NDIS_PACKET containing the header
- * to be suppressed. It then suppresses the header by invoking PHS exported compress routine.
- * The header data after suppression is copied back to the NDIS_PACKET.
- *
- * Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context
- * IN Packet - NDIS packet containing data to be transmitted
- * IN USHORT Vcid - vcid pertaining to connection on which the packet is being sent.Used to
- * identify PHS rule to be applied.
- * B_UINT16 uiClassifierRuleID - Classifier Rule ID
- * BOOLEAN bHeaderSuppressionEnabled - indicates if header suprression is enabled for SF.
- *
- * Return: STATUS_SUCCESS - If the send was successful.
- * Other - If an error occurred.
- */
-
-int PHSTransmit(struct bcm_mini_adapter *Adapter,
- struct sk_buff **pPacket,
- USHORT Vcid,
- B_UINT16 uiClassifierRuleID,
- bool bHeaderSuppressionEnabled,
- UINT *PacketLen,
- UCHAR bEthCSSupport)
-{
- /* PHS Sepcific */
- UINT unPHSPktHdrBytesCopied = 0;
- UINT unPhsOldHdrSize = 0;
- UINT unPHSNewPktHeaderLen = 0;
- /* Pointer to PHS IN Hdr Buffer */
- PUCHAR pucPHSPktHdrInBuf =
- Adapter->stPhsTxContextInfo.ucaHdrSuppressionInBuf;
- /* Pointer to PHS OUT Hdr Buffer */
- PUCHAR pucPHSPktHdrOutBuf =
- Adapter->stPhsTxContextInfo.ucaHdrSuppressionOutBuf;
- UINT usPacketType;
- UINT BytesToRemove = 0;
- bool bPHSI = 0;
- LONG ulPhsStatus = 0;
- UINT numBytesCompressed = 0;
- struct sk_buff *newPacket = NULL;
- struct sk_buff *Packet = *pPacket;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "In PHSTransmit");
-
- if (!bEthCSSupport)
- BytesToRemove = ETH_HLEN;
- /*
- * Accumulate the header upto the size we support suppression
- * from NDIS packet
- */
-
- usPacketType = ((struct ethhdr *)(Packet->data))->h_proto;
-
- pucPHSPktHdrInBuf = Packet->data + BytesToRemove;
- /* considering data after ethernet header */
- if ((*PacketLen - BytesToRemove) < MAX_PHS_LENGTHS)
- unPHSPktHdrBytesCopied = (*PacketLen - BytesToRemove);
- else
- unPHSPktHdrBytesCopied = MAX_PHS_LENGTHS;
-
- if ((unPHSPktHdrBytesCopied > 0) &&
- (unPHSPktHdrBytesCopied <= MAX_PHS_LENGTHS)) {
-
- /*
- * Step 2 Suppress Header using PHS and fill into intermediate
- * ucaPHSPktHdrOutBuf.
- * Suppress only if IP Header and PHS Enabled For the
- * Service Flow
- */
- if (((usPacketType == ETHERNET_FRAMETYPE_IPV4) ||
- (usPacketType == ETHERNET_FRAMETYPE_IPV6)) &&
- (bHeaderSuppressionEnabled)) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND,
- DBG_LVL_ALL,
- "\nTrying to PHS Compress Using Classifier rule 0x%X",
- uiClassifierRuleID);
- unPHSNewPktHeaderLen = unPHSPktHdrBytesCopied;
- ulPhsStatus = PhsCompress(&Adapter->stBCMPhsContext,
- Vcid,
- uiClassifierRuleID,
- pucPHSPktHdrInBuf,
- pucPHSPktHdrOutBuf,
- &unPhsOldHdrSize,
- &unPHSNewPktHeaderLen);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND,
- DBG_LVL_ALL,
- "\nPHS Old header Size : %d New Header Size %d\n",
- unPhsOldHdrSize, unPHSNewPktHeaderLen);
-
- if (unPHSNewPktHeaderLen == unPhsOldHdrSize) {
-
- if (ulPhsStatus == STATUS_PHS_COMPRESSED)
- bPHSI = *pucPHSPktHdrOutBuf;
-
- ulPhsStatus = STATUS_PHS_NOCOMPRESSION;
- }
-
- if (ulPhsStatus == STATUS_PHS_COMPRESSED) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- PHS_SEND, DBG_LVL_ALL,
- "PHS Sending packet Compressed");
-
- if (skb_cloned(Packet)) {
- newPacket =
- skb_copy(Packet, GFP_ATOMIC);
-
- if (newPacket == NULL)
- return STATUS_FAILURE;
-
- dev_kfree_skb(Packet);
- *pPacket = Packet = newPacket;
- pucPHSPktHdrInBuf =
- Packet->data + BytesToRemove;
- }
-
- numBytesCompressed = unPhsOldHdrSize -
- (unPHSNewPktHeaderLen + PHSI_LEN);
-
- memcpy(pucPHSPktHdrInBuf + numBytesCompressed,
- pucPHSPktHdrOutBuf,
- unPHSNewPktHeaderLen + PHSI_LEN);
- memcpy(Packet->data + numBytesCompressed,
- Packet->data, BytesToRemove);
- skb_pull(Packet, numBytesCompressed);
-
- return STATUS_SUCCESS;
- } else {
- /* if one byte headroom is not available,
- * increase it through skb_cow
- */
- if (!(skb_headroom(Packet) > 0)) {
-
- if (skb_cow(Packet, 1)) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_PRINTK,
- 0, 0,
- "SKB Cow Failed\n");
- return STATUS_FAILURE;
- }
- }
- skb_push(Packet, 1);
-
- /*
- * CAUTION: The MAC Header is getting corrupted
- * here for IP CS - can be saved by copying 14
- * Bytes. not needed .... hence corrupting it.
- */
- *(Packet->data + BytesToRemove) = bPHSI;
- return STATUS_SUCCESS;
- }
- } else {
-
- if (!bHeaderSuppressionEnabled)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- PHS_SEND, DBG_LVL_ALL,
- "\nHeader Suppression Disabled For SF: No PHS\n");
-
- return STATUS_SUCCESS;
- }
- }
-
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- * "PHSTransmit : Dumping data packet After PHS"); */
- return STATUS_SUCCESS;
-}
-
-int PHSReceive(struct bcm_mini_adapter *Adapter,
- USHORT usVcid,
- struct sk_buff *packet,
- UINT *punPacketLen,
- UCHAR *pucEthernetHdr,
- UINT bHeaderSuppressionEnabled)
-{
- u32 nStandardPktHdrLen = 0;
- u32 nTotalsuppressedPktHdrBytes = 0;
- int ulPhsStatus = 0;
- PUCHAR pucInBuff = NULL;
- UINT TotalBytesAdded = 0;
-
- if (!bHeaderSuppressionEnabled) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL,
- "\nPhs Disabled for incoming packet");
- return ulPhsStatus;
- }
-
- pucInBuff = packet->data;
-
- /* Restore PHS suppressed header */
- nStandardPktHdrLen = packet->len;
- ulPhsStatus = PhsDeCompress(&Adapter->stBCMPhsContext,
- usVcid,
- pucInBuff,
- Adapter->ucaPHSPktRestoreBuf,
- &nTotalsuppressedPktHdrBytes,
- &nStandardPktHdrLen);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL,
- "\nSuppressed PktHdrLen : 0x%x Restored PktHdrLen : 0x%x",
- nTotalsuppressedPktHdrBytes, nStandardPktHdrLen);
-
- if (ulPhsStatus != STATUS_PHS_COMPRESSED) {
- skb_pull(packet, 1);
- return STATUS_SUCCESS;
- } else {
- TotalBytesAdded = nStandardPktHdrLen -
- nTotalsuppressedPktHdrBytes - PHSI_LEN;
-
- if (TotalBytesAdded) {
- if (skb_headroom(packet) >= (SKB_RESERVE_ETHERNET_HEADER + TotalBytesAdded))
- skb_push(packet, TotalBytesAdded);
- else {
- if (skb_cow(packet, skb_headroom(packet) + TotalBytesAdded)) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_PRINTK, 0, 0,
- "cow failed in receive\n");
- return STATUS_FAILURE;
- }
-
- skb_push(packet, TotalBytesAdded);
- }
- }
-
- memcpy(packet->data, Adapter->ucaPHSPktRestoreBuf,
- nStandardPktHdrLen);
- }
-
- return STATUS_SUCCESS;
-}
-
-void DumpFullPacket(UCHAR *pBuf, UINT nPktLen)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,
- "Dumping Data Packet");
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,
- pBuf, nPktLen);
-}
-
-/*
- * Procedure: phs_init
- *
- * Description: This routine is responsible for allocating memory for classifier
- * and PHS rules.
- *
- * Arguments:
- * pPhsdeviceExtension - ptr to Device extension containing PHS Classifier rules
- * and PHS Rules , RX, TX buffer etc
- *
- * Returns:
- * TRUE(1) -If allocation of memory was successful.
- * FALSE -If allocation of memory fails.
- */
-int phs_init(struct bcm_phs_extension *pPhsdeviceExtension,
- struct bcm_mini_adapter *Adapter)
-{
- int i;
- struct bcm_phs_table *pstServiceFlowTable;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "\nPHS:phs_init function");
-
- if (pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
- return -EINVAL;
-
- pPhsdeviceExtension->pstServiceFlowPhsRulesTable =
- kzalloc(sizeof(struct bcm_phs_table), GFP_KERNEL);
-
- if (!pPhsdeviceExtension->pstServiceFlowPhsRulesTable) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL,
- "\nAllocation ServiceFlowPhsRulesTable failed");
- return -ENOMEM;
- }
-
- pstServiceFlowTable = pPhsdeviceExtension->pstServiceFlowPhsRulesTable;
- for (i = 0; i < MAX_SERVICEFLOWS; i++) {
- struct bcm_phs_entry sServiceFlow =
- pstServiceFlowTable->stSFList[i];
- sServiceFlow.pstClassifierTable =
- kzalloc(sizeof(struct bcm_phs_classifier_table),
- GFP_KERNEL);
- if (!sServiceFlow.pstClassifierTable) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "\nAllocation failed");
- free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
- pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
- return -ENOMEM;
- }
- }
-
- pPhsdeviceExtension->CompressedTxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
- if (pPhsdeviceExtension->CompressedTxBuffer == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "\nAllocation failed");
- free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
- pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
- return -ENOMEM;
- }
-
- pPhsdeviceExtension->UnCompressedRxBuffer =
- kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
- if (pPhsdeviceExtension->UnCompressedRxBuffer == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "\nAllocation failed");
- kfree(pPhsdeviceExtension->CompressedTxBuffer);
- free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
- pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
- return -ENOMEM;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "\n phs_init Successful");
- return STATUS_SUCCESS;
-}
-
-int PhsCleanup(IN struct bcm_phs_extension *pPHSDeviceExt)
-{
- if (pPHSDeviceExt->pstServiceFlowPhsRulesTable) {
- free_phs_serviceflow_rules(pPHSDeviceExt->pstServiceFlowPhsRulesTable);
- pPHSDeviceExt->pstServiceFlowPhsRulesTable = NULL;
- }
-
- kfree(pPHSDeviceExt->CompressedTxBuffer);
- pPHSDeviceExt->CompressedTxBuffer = NULL;
-
- kfree(pPHSDeviceExt->UnCompressedRxBuffer);
- pPHSDeviceExt->UnCompressedRxBuffer = NULL;
-
- return 0;
-}
-
-/*
- * PHS functions
- * PhsUpdateClassifierRule
- *
- * Routine Description:
- * Exported function to add or modify a PHS Rule.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context
- * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
- * IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies.
- * IN struct bcm_phs_rule *psPhsRule - The PHS Rule strcuture to be added to the PHS Rule table.
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-ULONG PhsUpdateClassifierRule(IN void *pvContext,
- IN B_UINT16 uiVcid ,
- IN B_UINT16 uiClsId ,
- IN struct bcm_phs_rule *psPhsRule,
- IN B_UINT8 u8AssociatedPHSI)
-{
- ULONG lStatus = 0;
- UINT nSFIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "PHS With Corr2 Changes\n");
-
- if (pDeviceExtension == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "Invalid Device Extension\n");
- return ERR_PHS_INVALID_DEVICE_EXETENSION;
- }
-
- if (u8AssociatedPHSI == 0)
- return ERR_PHS_INVALID_PHS_RULE;
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
-
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- /* This is a new SF. Create a mapping entry for this */
- lStatus = CreateSFToClassifierRuleMapping(uiVcid, uiClsId,
- pDeviceExtension->pstServiceFlowPhsRulesTable,
- psPhsRule,
- u8AssociatedPHSI);
- return lStatus;
- }
-
- /* SF already Exists Add PHS Rule to existing SF */
- lStatus = CreateClassiferToPHSRuleMapping(uiVcid, uiClsId,
- pstServiceFlowEntry,
- psPhsRule,
- u8AssociatedPHSI);
-
- return lStatus;
-}
-
-/*
- * PhsDeletePHSRule
- *
- * Routine Description:
- * Deletes the specified phs Rule within Vcid
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context
- * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
- * IN B_UINT8 u8PHSI - the PHS Index identifying PHS rule to be deleted.
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-ULONG PhsDeletePHSRule(IN void *pvContext,
- IN B_UINT16 uiVcid,
- IN B_UINT8 u8PHSI)
-{
- UINT nSFIndex = 0, nClsidIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext;
- struct bcm_phs_classifier_entry *curr_entry;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "======>\n");
-
- if (pDeviceExtension) {
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
-
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "SFID Match Failed\n");
- return ERR_SF_MATCH_FAIL;
- }
-
- pstClassifierRulesTable = pstServiceFlowEntry->pstClassifierTable;
- if (pstClassifierRulesTable) {
- for (nClsidIndex = 0; nClsidIndex < MAX_PHSRULE_PER_SF; nClsidIndex++) {
- curr_entry = &pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex];
- if (curr_entry->bUsed &&
- curr_entry->pstPhsRule &&
- (curr_entry->pstPhsRule->u8PHSI == u8PHSI)) {
-
- if (curr_entry->pstPhsRule->u8RefCnt)
- curr_entry->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_entry->pstPhsRule->u8RefCnt)
- kfree(curr_entry->pstPhsRule);
-
- memset(curr_entry,
- 0,
- sizeof(struct bcm_phs_classifier_entry));
- }
- }
- }
- }
- return 0;
-}
-
-/*
- * PhsDeleteClassifierRule
- *
- * Routine Description:
- * Exported function to Delete a PHS Rule for the SFID,CLSID Pair.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context
- * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
- * IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies.
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-ULONG PhsDeleteClassifierRule(IN void *pvContext,
- IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId)
-{
- UINT nSFIndex = 0, nClsidIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
-
- if (!pDeviceExtension)
- goto out;
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "SFID Match Failed\n");
- return ERR_SF_MATCH_FAIL;
- }
-
- nClsidIndex =
- GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
- uiClsId,
- eActiveClassifierRuleContext,
- &pstClassifierEntry);
-
- if ((nClsidIndex != PHS_INVALID_TABLE_INDEX) &&
- (!pstClassifierEntry->bUnclassifiedPHSRule)) {
- if (pstClassifierEntry->pstPhsRule) {
- if (pstClassifierEntry->pstPhsRule->u8RefCnt)
- pstClassifierEntry->pstPhsRule->u8RefCnt--;
-
- if (0 == pstClassifierEntry->pstPhsRule->u8RefCnt)
- kfree(pstClassifierEntry->pstPhsRule);
- }
- memset(pstClassifierEntry, 0,
- sizeof(struct bcm_phs_classifier_entry));
- }
-
- nClsidIndex =
- GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
- uiClsId,
- eOldClassifierRuleContext,
- &pstClassifierEntry);
-
- if ((nClsidIndex != PHS_INVALID_TABLE_INDEX) &&
- (!pstClassifierEntry->bUnclassifiedPHSRule)) {
- kfree(pstClassifierEntry->pstPhsRule);
- memset(pstClassifierEntry, 0,
- sizeof(struct bcm_phs_classifier_entry));
- }
-
-out:
- return 0;
-}
-
-/*
- * PhsDeleteSFRules
- *
- * Routine Description:
- * Exported function to Delete a all PHS Rules for the SFID.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context
- * IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rules need to be deleted
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-ULONG PhsDeleteSFRules(IN void *pvContext, IN B_UINT16 uiVcid)
-{
- UINT nSFIndex = 0, nClsidIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
- struct bcm_phs_classifier_entry *curr_clsf_entry;
- struct bcm_phs_classifier_entry *curr_rules_list;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "====>\n");
-
- if (!pDeviceExtension)
- goto out;
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "SFID Match Failed\n");
- return ERR_SF_MATCH_FAIL;
- }
-
- pstClassifierRulesTable = pstServiceFlowEntry->pstClassifierTable;
- if (pstClassifierRulesTable) {
- for (nClsidIndex = 0; nClsidIndex < MAX_PHSRULE_PER_SF; nClsidIndex++) {
- curr_clsf_entry =
- &pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex];
-
- curr_rules_list =
- &pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex];
-
- if (curr_clsf_entry->pstPhsRule) {
-
- if (curr_clsf_entry->pstPhsRule->u8RefCnt)
- curr_clsf_entry->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_clsf_entry->pstPhsRule->u8RefCnt)
- kfree(curr_clsf_entry->pstPhsRule);
-
- curr_clsf_entry->pstPhsRule = NULL;
- }
- memset(curr_clsf_entry, 0,
- sizeof(struct bcm_phs_classifier_entry));
- if (curr_rules_list->pstPhsRule) {
-
- if (curr_rules_list->pstPhsRule->u8RefCnt)
- curr_rules_list->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_rules_list->pstPhsRule->u8RefCnt)
- kfree(curr_rules_list->pstPhsRule);
-
- curr_rules_list->pstPhsRule = NULL;
- }
- memset(curr_rules_list, 0,
- sizeof(struct bcm_phs_classifier_entry));
- }
- }
- pstServiceFlowEntry->bUsed = false;
- pstServiceFlowEntry->uiVcid = 0;
-
-out:
- return 0;
-}
-
-/*
- * PhsCompress
- *
- * Routine Description:
- * Exported function to compress the data using PHS.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context.
- * IN B_UINT16 uiVcid - The Service Flow ID to which current
- * packet header compression applies.
- * IN UINT uiClsId - The Classifier ID to which current packet
- * header compression applies.
- * IN void *pvInputBuffer - The Input buffer containg packet header
- * data
- * IN void *pvOutputBuffer - The output buffer returned by this
- * function after PHS
- * IN UINT *pOldHeaderSize - The actual size of the header before PHS
- * IN UINT *pNewHeaderSize - The new size of the header after applying
- * PHS
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-static ULONG PhsCompress(IN void *pvContext,
- IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId,
- IN void *pvInputBuffer,
- OUT void *pvOutputBuffer,
- OUT UINT *pOldHeaderSize,
- OUT UINT *pNewHeaderSize)
-{
- UINT nSFIndex = 0, nClsidIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
- struct bcm_phs_rule *pstPhsRule = NULL;
- ULONG lStatus = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
-
- if (pDeviceExtension == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "Invalid Device Extension\n");
- lStatus = STATUS_PHS_NOCOMPRESSION;
- return lStatus;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "Suppressing header\n");
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "SFID Match Failed\n");
- lStatus = STATUS_PHS_NOCOMPRESSION;
- return lStatus;
- }
-
- nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
- uiClsId, eActiveClassifierRuleContext,
- &pstClassifierEntry);
-
- if (nClsidIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "No PHS Rule Defined For Classifier\n");
- lStatus = STATUS_PHS_NOCOMPRESSION;
- return lStatus;
- }
-
- /* get rule from SF id,Cls ID pair and proceed */
- pstPhsRule = pstClassifierEntry->pstPhsRule;
- if (!ValidatePHSRuleComplete(pstPhsRule)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "PHS Rule Defined For Classifier But Not Complete\n");
- lStatus = STATUS_PHS_NOCOMPRESSION;
- return lStatus;
- }
-
- /* Compress Packet */
- lStatus = phs_compress(pstPhsRule,
- (PUCHAR)pvInputBuffer,
- (PUCHAR)pvOutputBuffer,
- pOldHeaderSize,
- pNewHeaderSize);
-
- if (lStatus == STATUS_PHS_COMPRESSED) {
- pstPhsRule->PHSModifiedBytes +=
- *pOldHeaderSize - *pNewHeaderSize - 1;
- pstPhsRule->PHSModifiedNumPackets++;
- } else {
- pstPhsRule->PHSErrorNumPackets++;
- }
-
- return lStatus;
-}
-
-/*
- * PhsDeCompress
- *
- * Routine Description:
- * Exported function to restore the packet header in Rx path.
- *
- * Arguments:
- * IN void* pvContext - PHS Driver Specific Context.
- * IN B_UINT16 uiVcid - The Service Flow ID to which current
- * packet header restoration applies.
- * IN void *pvInputBuffer - The Input buffer containg suppressed
- * packet header data
- * OUT void *pvOutputBuffer - The output buffer returned by this
- * function after restoration
- * OUT UINT *pHeaderSize - The packet header size after restoration
- * is returned in this parameter.
- *
- * Return Value:
- *
- * 0 if successful,
- * >0 Error.
- */
-static ULONG PhsDeCompress(IN void *pvContext,
- IN B_UINT16 uiVcid,
- IN void *pvInputBuffer,
- OUT void *pvOutputBuffer,
- OUT UINT *pInHeaderSize,
- OUT UINT *pOutHeaderSize)
-{
- UINT nSFIndex = 0, nPhsRuleIndex = 0;
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_rule *pstPhsRule = NULL;
- UINT phsi;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_extension *pDeviceExtension =
- (struct bcm_phs_extension *)pvContext;
-
- *pInHeaderSize = 0;
- if (pDeviceExtension == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL, "Invalid Device Extension\n");
- return ERR_PHS_INVALID_DEVICE_EXETENSION;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL,
- "Restoring header\n");
-
- phsi = *((unsigned char *)(pvInputBuffer));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL,
- "PHSI To Be Used For restore : %x\n", phsi);
- if (phsi == UNCOMPRESSED_PACKET)
- return STATUS_PHS_NOCOMPRESSION;
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- uiVcid, &pstServiceFlowEntry);
- if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL,
- "SFID Match Failed During Lookup\n");
- return ERR_SF_MATCH_FAIL;
- }
-
- nPhsRuleIndex = GetPhsRuleEntry(pstServiceFlowEntry->pstClassifierTable,
- phsi,
- eActiveClassifierRuleContext,
- &pstPhsRule);
- if (nPhsRuleIndex == PHS_INVALID_TABLE_INDEX) {
- /* Phs Rule does not exist in active rules table. Lets try
- * in the old rules table. */
- nPhsRuleIndex = GetPhsRuleEntry(pstServiceFlowEntry->pstClassifierTable,
- phsi,
- eOldClassifierRuleContext,
- &pstPhsRule);
- if (nPhsRuleIndex == PHS_INVALID_TABLE_INDEX)
- return ERR_PHSRULE_MATCH_FAIL;
- }
-
- *pInHeaderSize = phs_decompress((PUCHAR)pvInputBuffer,
- (PUCHAR)pvOutputBuffer,
- pstPhsRule,
- pOutHeaderSize);
-
- pstPhsRule->PHSModifiedBytes += *pOutHeaderSize - *pInHeaderSize - 1;
-
- pstPhsRule->PHSModifiedNumPackets++;
- return STATUS_PHS_COMPRESSED;
-}
-
-/*
- * Procedure: free_phs_serviceflow_rules
- *
- * Description: This routine is responsible for freeing memory allocated for
- * PHS rules.
- *
- * Arguments:
- * rules - ptr to S_SERVICEFLOW_TABLE structure.
- *
- * Returns:
- * Does not return any value.
- */
-static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable)
-{
- int i, j;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- struct bcm_phs_classifier_entry *curr_act_rules_list;
- struct bcm_phs_classifier_entry *curr_old_rules_list;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "=======>\n");
-
- if (!psServiceFlowRulesTable)
- goto out;
-
- for (i = 0; i < MAX_SERVICEFLOWS; i++) {
- struct bcm_phs_entry stServiceFlowEntry =
- psServiceFlowRulesTable->stSFList[i];
- struct bcm_phs_classifier_table *pstClassifierRulesTable =
- stServiceFlowEntry.pstClassifierTable;
-
- if (pstClassifierRulesTable) {
- for (j = 0; j < MAX_PHSRULE_PER_SF; j++) {
- curr_act_rules_list =
- &pstClassifierRulesTable->stActivePhsRulesList[j];
-
- curr_old_rules_list =
- &pstClassifierRulesTable->stOldPhsRulesList[j];
-
- if (curr_act_rules_list->pstPhsRule) {
-
- if (curr_act_rules_list->pstPhsRule->u8RefCnt)
- curr_act_rules_list->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_act_rules_list->pstPhsRule->u8RefCnt)
- kfree(curr_act_rules_list->pstPhsRule);
-
- curr_act_rules_list->pstPhsRule = NULL;
- }
-
- if (curr_old_rules_list->pstPhsRule) {
-
- if (curr_old_rules_list->pstPhsRule->u8RefCnt)
- curr_old_rules_list->pstPhsRule->u8RefCnt--;
-
- if (0 == curr_old_rules_list->pstPhsRule->u8RefCnt)
- kfree(curr_old_rules_list->pstPhsRule);
-
- curr_old_rules_list->pstPhsRule = NULL;
- }
- }
- kfree(pstClassifierRulesTable);
- stServiceFlowEntry.pstClassifierTable =
- pstClassifierRulesTable = NULL;
- }
- }
-
-out:
-
- kfree(psServiceFlowRulesTable);
- psServiceFlowRulesTable = NULL;
-}
-
-static bool ValidatePHSRuleComplete(IN const struct bcm_phs_rule *psPhsRule)
-{
- return (psPhsRule &&
- psPhsRule->u8PHSI &&
- psPhsRule->u8PHSS &&
- psPhsRule->u8PHSFLength);
-}
-
-UINT GetServiceFlowEntry(IN struct bcm_phs_table *psServiceFlowTable,
- IN B_UINT16 uiVcid,
- struct bcm_phs_entry **ppstServiceFlowEntry)
-{
- int i;
- struct bcm_phs_entry *curr_sf_list;
-
- for (i = 0; i < MAX_SERVICEFLOWS; i++) {
- curr_sf_list = &psServiceFlowTable->stSFList[i];
- if (curr_sf_list->bUsed && (curr_sf_list->uiVcid == uiVcid)) {
- *ppstServiceFlowEntry = curr_sf_list;
- return i;
- }
- }
-
- *ppstServiceFlowEntry = NULL;
- return PHS_INVALID_TABLE_INDEX;
-}
-
-static UINT GetClassifierEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
- IN B_UINT32 uiClsid,
- enum bcm_phs_classifier_context eClsContext,
- OUT struct bcm_phs_classifier_entry **ppstClassifierEntry)
-{
- int i;
- struct bcm_phs_classifier_entry *psClassifierRules = NULL;
-
- for (i = 0; i < MAX_PHSRULE_PER_SF; i++) {
-
- if (eClsContext == eActiveClassifierRuleContext)
- psClassifierRules =
- &pstClassifierTable->stActivePhsRulesList[i];
- else
- psClassifierRules =
- &pstClassifierTable->stOldPhsRulesList[i];
-
- if (psClassifierRules->bUsed &&
- (psClassifierRules->uiClassifierRuleId == uiClsid)) {
- *ppstClassifierEntry = psClassifierRules;
- return i;
- }
- }
-
- *ppstClassifierEntry = NULL;
- return PHS_INVALID_TABLE_INDEX;
-}
-
-static UINT GetPhsRuleEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
- IN B_UINT32 uiPHSI,
- enum bcm_phs_classifier_context eClsContext,
- OUT struct bcm_phs_rule **ppstPhsRule)
-{
- int i;
- struct bcm_phs_classifier_entry *pstClassifierRule = NULL;
-
- for (i = 0; i < MAX_PHSRULE_PER_SF; i++) {
- if (eClsContext == eActiveClassifierRuleContext)
- pstClassifierRule =
- &pstClassifierTable->stActivePhsRulesList[i];
- else
- pstClassifierRule =
- &pstClassifierTable->stOldPhsRulesList[i];
-
- if (pstClassifierRule->bUsed &&
- (pstClassifierRule->u8PHSI == uiPHSI)) {
- *ppstPhsRule = pstClassifierRule->pstPhsRule;
- return i;
- }
- }
-
- *ppstPhsRule = NULL;
- return PHS_INVALID_TABLE_INDEX;
-}
-
-static UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId,
- IN struct bcm_phs_table *psServiceFlowTable,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI)
-{
- struct bcm_phs_classifier_table *psaClassifiertable = NULL;
- UINT uiStatus = 0;
- int iSfIndex;
- bool bFreeEntryFound = false;
- struct bcm_phs_entry *curr_list;
-
- /* Check for a free entry in SFID table */
- for (iSfIndex = 0; iSfIndex < MAX_SERVICEFLOWS; iSfIndex++) {
- curr_list = &psServiceFlowTable->stSFList[iSfIndex];
- if (!curr_list->bUsed) {
- bFreeEntryFound = TRUE;
- break;
- }
- }
-
- if (!bFreeEntryFound)
- return ERR_SFTABLE_FULL;
-
- psaClassifiertable = curr_list->pstClassifierTable;
- uiStatus = CreateClassifierPHSRule(uiClsId,
- psaClassifiertable,
- psPhsRule,
- eActiveClassifierRuleContext,
- u8AssociatedPHSI);
- if (uiStatus == PHS_SUCCESS) {
- /* Add entry at free index to the SF */
- curr_list->bUsed = TRUE;
- curr_list->uiVcid = uiVcid;
- }
-
- return uiStatus;
-}
-
-static UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
- IN B_UINT16 uiClsId,
- IN struct bcm_phs_entry *pstServiceFlowEntry,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI)
-{
- struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
- UINT uiStatus = PHS_SUCCESS;
- UINT nClassifierIndex = 0;
- struct bcm_phs_classifier_table *psaClassifiertable = NULL;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- psaClassifiertable = pstServiceFlowEntry->pstClassifierTable;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "==>");
-
- /* Check if the supplied Classifier already exists */
- nClassifierIndex = GetClassifierEntry(
- pstServiceFlowEntry->pstClassifierTable,
- uiClsId,
- eActiveClassifierRuleContext,
- &pstClassifierEntry);
-
- if (nClassifierIndex == PHS_INVALID_TABLE_INDEX) {
- /*
- * The Classifier doesn't exist. So its a new classifier being
- * added.
- * Add new entry to associate PHS Rule to the Classifier
- */
-
- uiStatus = CreateClassifierPHSRule(uiClsId, psaClassifiertable,
- psPhsRule,
- eActiveClassifierRuleContext,
- u8AssociatedPHSI);
- return uiStatus;
- }
-
- /*
- * The Classifier exists.The PHS Rule for this classifier
- * is being modified
- */
-
- if (pstClassifierEntry->u8PHSI == psPhsRule->u8PHSI) {
- if (pstClassifierEntry->pstPhsRule == NULL)
- return ERR_PHS_INVALID_PHS_RULE;
-
- /*
- * This rule already exists if any fields are changed for this
- * PHS rule update them.
- */
- /* If any part of PHSF is valid then we update PHSF */
- if (psPhsRule->u8PHSFLength) {
- /* update PHSF */
- memcpy(pstClassifierEntry->pstPhsRule->u8PHSF,
- psPhsRule->u8PHSF,
- MAX_PHS_LENGTHS);
- }
-
- if (psPhsRule->u8PHSFLength) {
- /* update PHSFLen */
- pstClassifierEntry->pstPhsRule->u8PHSFLength =
- psPhsRule->u8PHSFLength;
- }
-
- if (psPhsRule->u8PHSMLength) {
- /* update PHSM */
- memcpy(pstClassifierEntry->pstPhsRule->u8PHSM,
- psPhsRule->u8PHSM,
- MAX_PHS_LENGTHS);
- }
-
- if (psPhsRule->u8PHSMLength) {
- /* update PHSM Len */
- pstClassifierEntry->pstPhsRule->u8PHSMLength =
- psPhsRule->u8PHSMLength;
- }
-
- if (psPhsRule->u8PHSS) {
- /* update PHSS */
- pstClassifierEntry->pstPhsRule->u8PHSS =
- psPhsRule->u8PHSS;
- }
-
- /* update PHSV */
- pstClassifierEntry->pstPhsRule->u8PHSV = psPhsRule->u8PHSV;
- } else {
- /* A new rule is being set for this classifier. */
- uiStatus = UpdateClassifierPHSRule(uiClsId,
- pstClassifierEntry,
- psaClassifiertable,
- psPhsRule,
- u8AssociatedPHSI);
- }
-
- return uiStatus;
-}
-
-static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *psPhsRule,
- enum bcm_phs_classifier_context eClsContext,
- B_UINT8 u8AssociatedPHSI)
-{
- UINT iClassifierIndex = 0;
- bool bFreeEntryFound = false;
- struct bcm_phs_classifier_entry *psClassifierRules = NULL;
- UINT nStatus = PHS_SUCCESS;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL,
- "Inside CreateClassifierPHSRule");
-
- if (psaClassifiertable == NULL)
- return ERR_INVALID_CLASSIFIERTABLE_FOR_SF;
-
- if (eClsContext == eOldClassifierRuleContext) {
- /*
- * If An Old Entry for this classifier ID already exists in the
- * old rules table replace it.
- */
-
- iClassifierIndex = GetClassifierEntry(psaClassifiertable,
- uiClsId,
- eClsContext,
- &psClassifierRules);
-
- if (iClassifierIndex != PHS_INVALID_TABLE_INDEX) {
- /*
- * The Classifier already exists in the old rules table
- * Lets replace the old classifier with the new one.
- */
- bFreeEntryFound = TRUE;
- }
- }
-
- if (!bFreeEntryFound) {
- /* Continue to search for a free location to add the rule */
- for (iClassifierIndex = 0; iClassifierIndex <
- MAX_PHSRULE_PER_SF; iClassifierIndex++) {
- if (eClsContext == eActiveClassifierRuleContext)
- psClassifierRules = &psaClassifiertable->stActivePhsRulesList[iClassifierIndex];
- else
- psClassifierRules = &psaClassifiertable->stOldPhsRulesList[iClassifierIndex];
-
- if (!psClassifierRules->bUsed) {
- bFreeEntryFound = TRUE;
- break;
- }
- }
- }
-
- if (!bFreeEntryFound) {
-
- if (eClsContext == eActiveClassifierRuleContext)
- return ERR_CLSASSIFIER_TABLE_FULL;
- else {
- /* Lets replace the oldest rule if we are looking in
- * old Rule table */
- if (psaClassifiertable->uiOldestPhsRuleIndex >= MAX_PHSRULE_PER_SF)
- psaClassifiertable->uiOldestPhsRuleIndex = 0;
-
- iClassifierIndex =
- psaClassifiertable->uiOldestPhsRuleIndex;
- psClassifierRules =
- &psaClassifiertable->stOldPhsRulesList[iClassifierIndex];
-
- (psaClassifiertable->uiOldestPhsRuleIndex)++;
- }
- }
-
- if (eClsContext == eOldClassifierRuleContext) {
-
- if (psClassifierRules->pstPhsRule == NULL) {
-
- psClassifierRules->pstPhsRule =
- kmalloc(sizeof(struct bcm_phs_rule),
- GFP_KERNEL);
-
- if (NULL == psClassifierRules->pstPhsRule)
- return ERR_PHSRULE_MEMALLOC_FAIL;
- }
-
- psClassifierRules->bUsed = TRUE;
- psClassifierRules->uiClassifierRuleId = uiClsId;
- psClassifierRules->u8PHSI = psPhsRule->u8PHSI;
- psClassifierRules->bUnclassifiedPHSRule =
- psPhsRule->bUnclassifiedPHSRule;
-
- /* Update The PHS rule */
- memcpy(psClassifierRules->pstPhsRule, psPhsRule,
- sizeof(struct bcm_phs_rule));
- } else
- nStatus = UpdateClassifierPHSRule(uiClsId,
- psClassifierRules,
- psaClassifiertable,
- psPhsRule,
- u8AssociatedPHSI);
-
- return nStatus;
-}
-
-static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
- IN struct bcm_phs_classifier_entry *pstClassifierEntry,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI)
-{
- struct bcm_phs_rule *pstAddPhsRule = NULL;
- UINT nPhsRuleIndex = 0;
- bool bPHSRuleOrphaned = false;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- psPhsRule->u8RefCnt = 0;
-
- /* Step 1 Deref Any Exisiting PHS Rule in this classifier Entry */
- bPHSRuleOrphaned = DerefPhsRule(uiClsId, psaClassifiertable,
- pstClassifierEntry->pstPhsRule);
-
- /* Step 2 Search if there is a PHS Rule with u8AssociatedPHSI in
- * Classifier table for this SF */
- nPhsRuleIndex = GetPhsRuleEntry(psaClassifiertable, u8AssociatedPHSI,
- eActiveClassifierRuleContext,
- &pstAddPhsRule);
- if (PHS_INVALID_TABLE_INDEX == nPhsRuleIndex) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL,
- "\nAdding New PHSRuleEntry For Classifier");
-
- if (psPhsRule->u8PHSI == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL, "\nError PHSI is Zero\n");
- return ERR_PHS_INVALID_PHS_RULE;
- }
-
- /* Step 2.a PHS Rule Does Not Exist .Create New PHS Rule for
- * uiClsId */
- if (false == bPHSRuleOrphaned) {
-
- pstClassifierEntry->pstPhsRule =
- kmalloc(sizeof(struct bcm_phs_rule),
- GFP_KERNEL);
- if (NULL == pstClassifierEntry->pstPhsRule)
- return ERR_PHSRULE_MEMALLOC_FAIL;
- }
- memcpy(pstClassifierEntry->pstPhsRule, psPhsRule,
- sizeof(struct bcm_phs_rule));
- } else {
- /* Step 2.b PHS Rule Exists Tie uiClsId with the existing
- * PHS Rule */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH,
- DBG_LVL_ALL,
- "\nTying Classifier to Existing PHS Rule");
- if (bPHSRuleOrphaned) {
- kfree(pstClassifierEntry->pstPhsRule);
- pstClassifierEntry->pstPhsRule = NULL;
- }
- pstClassifierEntry->pstPhsRule = pstAddPhsRule;
- }
-
- pstClassifierEntry->bUsed = TRUE;
- pstClassifierEntry->u8PHSI = pstClassifierEntry->pstPhsRule->u8PHSI;
- pstClassifierEntry->uiClassifierRuleId = uiClsId;
- pstClassifierEntry->pstPhsRule->u8RefCnt++;
- pstClassifierEntry->bUnclassifiedPHSRule =
- pstClassifierEntry->pstPhsRule->bUnclassifiedPHSRule;
-
- return PHS_SUCCESS;
-}
-
-static bool DerefPhsRule(IN B_UINT16 uiClsId,
- struct bcm_phs_classifier_table *psaClassifiertable,
- struct bcm_phs_rule *pstPhsRule)
-{
- if (pstPhsRule == NULL)
- return false;
-
- if (pstPhsRule->u8RefCnt)
- pstPhsRule->u8RefCnt--;
-
- return (0 == pstPhsRule->u8RefCnt);
-}
-
-static void dbg_print_st_cls_entry(struct bcm_mini_adapter *ad,
- struct bcm_phs_entry *st_serv_flow_entry,
- struct bcm_phs_classifier_entry *st_cls_entry)
-{
- int k;
-
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n VCID : %#X", st_serv_flow_entry->uiVcid);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n ClassifierID : %#X", st_cls_entry->uiClassifierRuleId);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSRuleID : %#X", st_cls_entry->u8PHSI);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n****************PHS Rule********************\n");
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSI : %#X", st_cls_entry->pstPhsRule->u8PHSI);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSFLength : %#X ", st_cls_entry->pstPhsRule->u8PHSFLength);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSF : ");
-
- for (k = 0 ; k < st_cls_entry->pstPhsRule->u8PHSFLength; k++)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "%#X ", st_cls_entry->pstPhsRule->u8PHSF[k]);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSMLength : %#X", st_cls_entry->pstPhsRule->u8PHSMLength);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSM :");
-
- for (k = 0; k < st_cls_entry->pstPhsRule->u8PHSMLength; k++)
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "%#X ", st_cls_entry->pstPhsRule->u8PHSM[k]);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSS : %#X ", st_cls_entry->pstPhsRule->u8PHSS);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSV : %#X", st_cls_entry->pstPhsRule->u8PHSV);
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n********************************************\n");
-}
-
-static void phsrules_per_sf_dbg_print(struct bcm_mini_adapter *ad,
- struct bcm_phs_entry *st_serv_flow_entry)
-{
- int j, l;
- struct bcm_phs_classifier_entry st_cls_entry;
-
- for (j = 0; j < MAX_PHSRULE_PER_SF; j++) {
-
- for (l = 0; l < 2; l++) {
-
- if (l == 0) {
- st_cls_entry = st_serv_flow_entry->pstClassifierTable->stActivePhsRulesList[j];
- if (st_cls_entry.bUsed)
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- (DBG_LVL_ALL | DBG_NO_FUNC_PRINT),
- "\n Active PHS Rule :\n");
- } else {
- st_cls_entry = st_serv_flow_entry->pstClassifierTable->stOldPhsRulesList[j];
- if (st_cls_entry.bUsed)
- BCM_DEBUG_PRINT(ad,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- (DBG_LVL_ALL | DBG_NO_FUNC_PRINT),
- "\n Old PHS Rule :\n");
- }
-
- if (st_cls_entry.bUsed) {
- dbg_print_st_cls_entry(ad,
- st_serv_flow_entry,
- &st_cls_entry);
- }
- }
- }
-}
-
-void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension)
-{
- int i;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL,
- "\n Dumping PHS Rules :\n");
-
- for (i = 0; i < MAX_SERVICEFLOWS; i++) {
-
- struct bcm_phs_entry stServFlowEntry =
- pDeviceExtension->pstServiceFlowPhsRulesTable->stSFList[i];
-
- if (!stServFlowEntry.bUsed)
- continue;
-
- phsrules_per_sf_dbg_print(Adapter, &stServFlowEntry);
- }
-}
-
-/*
- * Procedure: phs_decompress
- *
- * Description: This routine restores the static fields within the packet.
- *
- * Arguments:
- * in_buf - ptr to incoming packet buffer.
- * out_buf - ptr to output buffer where the suppressed
- * header is copied.
- * decomp_phs_rules - ptr to PHS rule.
- * header_size - ptr to field which holds the phss or
- * phsf_length.
- *
- * Returns:
- * size - The number of bytes of dynamic fields present with in the
- * incoming packet header.
- * 0 - If PHS rule is NULL.If PHSI is 0 indicateing packet as
- * uncompressed.
- */
-static int phs_decompress(unsigned char *in_buf,
- unsigned char *out_buf,
- struct bcm_phs_rule *decomp_phs_rules,
- UINT *header_size)
-{
- int phss, size = 0;
- struct bcm_phs_rule *tmp_memb;
- int bit, i = 0;
- unsigned char *phsf, *phsm;
- int in_buf_len = *header_size - 1;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- in_buf++;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL,
- "====>\n");
- *header_size = 0;
-
- if (decomp_phs_rules == NULL)
- return 0;
-
- tmp_memb = decomp_phs_rules;
- /*
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,
- * "\nDECOMP:In phs_decompress PHSI 1 %d",phsi));
- * header_size = tmp_memb->u8PHSFLength;
- */
- phss = tmp_memb->u8PHSS;
- phsf = tmp_memb->u8PHSF;
- phsm = tmp_memb->u8PHSM;
-
- if (phss > MAX_PHS_LENGTHS)
- phss = MAX_PHS_LENGTHS;
-
- /*
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,
- * "\nDECOMP:
- * In phs_decompress PHSI %d phss %d index %d",phsi,phss,index));
- */
- while ((phss > 0) && (size < in_buf_len)) {
- bit = ((*phsm << i) & SUPPRESS);
-
- if (bit == SUPPRESS) {
- *out_buf = *phsf;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL,
- "\nDECOMP:In phss %d phsf %d output %d",
- phss, *phsf, *out_buf);
- } else {
- *out_buf = *in_buf;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE,
- DBG_LVL_ALL,
- "\nDECOMP:In phss %d input %d output %d",
- phss, *in_buf, *out_buf);
- in_buf++;
- size++;
- }
- out_buf++;
- phsf++;
- phss--;
- i++;
- *header_size = *header_size + 1;
-
- if (i > MAX_NO_BIT) {
- i = 0;
- phsm++;
- }
- }
-
- return size;
-}
-
-/*
- * Procedure: phs_compress
- *
- * Description: This routine suppresses the static fields within the packet.
- * Before that it will verify the fields to be suppressed with the corresponding
- * fields in the phsf. For verification it checks the phsv field of PHS rule.
- * If set and verification succeeds it suppresses the field.If any one static
- * field is found different none of the static fields are suppressed then the
- * packet is sent as uncompressed packet with phsi=0.
- *
- * Arguments:
- * phs_rule - ptr to PHS rule.
- * in_buf - ptr to incoming packet buffer.
- * out_buf - ptr to output buffer where the suppressed header is
- * copied.
- * header_size - ptr to field which holds the phss.
- *
- * Returns:
- * size - The number of bytes copied into the output buffer i.e
- * dynamic fields
- * 0 - If PHS rule is NULL.If PHSV field is not set. If the
- * verification fails.
- */
-static int phs_compress(struct bcm_phs_rule *phs_rule,
- unsigned char *in_buf,
- unsigned char *out_buf,
- UINT *header_size,
- UINT *new_header_size)
-{
- unsigned char *old_addr = out_buf;
- int suppress = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (phs_rule == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nphs_compress(): phs_rule null!");
- *out_buf = ZERO_PHSI;
- return STATUS_PHS_NOCOMPRESSION;
- }
-
- if (phs_rule->u8PHSS <= *new_header_size)
- *header_size = phs_rule->u8PHSS;
- else
- *header_size = *new_header_size;
-
- /* To copy PHSI */
- out_buf++;
- suppress = verify_suppress_phsf(in_buf, out_buf, phs_rule->u8PHSF,
- phs_rule->u8PHSM, phs_rule->u8PHSS,
- phs_rule->u8PHSV, new_header_size);
-
- if (suppress == STATUS_PHS_COMPRESSED) {
- *old_addr = (unsigned char)phs_rule->u8PHSI;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nCOMP:In phs_compress phsi %d",
- phs_rule->u8PHSI);
- } else {
- *old_addr = ZERO_PHSI;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nCOMP:In phs_compress PHSV Verification failed");
- }
-
- return suppress;
-}
-
-/*
- * Procedure: verify_suppress_phsf
- *
- * Description: This routine verifies the fields of the packet and if all the
- * static fields are equal it adds the phsi of that PHS rule.If any static
- * field differs it woun't suppress any field.
- *
- * Arguments:
- * rules_set - ptr to classifier_rules.
- * in_buffer - ptr to incoming packet buffer.
- * out_buffer - ptr to output buffer where the suppressed header is copied.
- * phsf - ptr to phsf.
- * phsm - ptr to phsm.
- * phss - variable holding phss.
- *
- * Returns:
- * size - The number of bytes copied into the output buffer i.e dynamic
- * fields.
- * 0 - Packet has failed the verification.
- */
-static int verify_suppress_phsf(unsigned char *in_buffer,
- unsigned char *out_buffer,
- unsigned char *phsf,
- unsigned char *phsm,
- unsigned int phss,
- unsigned int phsv,
- UINT *new_header_size)
-{
- unsigned int size = 0;
- int bit, i = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nCOMP:In verify_phsf PHSM - 0x%X", *phsm);
-
- if (phss > (*new_header_size))
- phss = *new_header_size;
-
- while (phss > 0) {
- bit = ((*phsm << i) & SUPPRESS);
- if (bit == SUPPRESS) {
- if (*in_buffer != *phsf) {
- if (phsv == VERIFY) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- PHS_SEND,
- DBG_LVL_ALL,
- "\nCOMP:In verify_phsf failed for field %d buf %d phsf %d",
- phss,
- *in_buffer,
- *phsf);
- return STATUS_PHS_NOCOMPRESSION;
- }
- } else
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- PHS_SEND,
- DBG_LVL_ALL,
- "\nCOMP:In verify_phsf success for field %d buf %d phsf %d",
- phss,
- *in_buffer,
- *phsf);
- } else {
- *out_buffer = *in_buffer;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- PHS_SEND,
- DBG_LVL_ALL,
- "\nCOMP:In copying_header input %d out %d",
- *in_buffer,
- *out_buffer);
- out_buffer++;
- size++;
- }
-
- in_buffer++;
- phsf++;
- phss--;
- i++;
-
- if (i > MAX_NO_BIT) {
- i = 0;
- phsm++;
- }
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,
- "\nCOMP:In verify_phsf success");
- *new_header_size = size;
- return STATUS_PHS_COMPRESSED;
-}
diff --git a/drivers/staging/bcm/PHSModule.h b/drivers/staging/bcm/PHSModule.h
deleted file mode 100644
index d84d60ba48f9..000000000000
--- a/drivers/staging/bcm/PHSModule.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef BCM_MINIPORT_PHSMODULE_H
-#define BCM_MINIPORT_PHSMODULE_H
-
-int PHSTransmit(struct bcm_mini_adapter *Adapter,
- struct sk_buff **pPacket,
- USHORT Vcid,
- B_UINT16 uiClassifierRuleID,
- bool bHeaderSuppressionEnabled,
- PUINT PacketLen,
- UCHAR bEthCSSupport);
-
-int PHSReceive(struct bcm_mini_adapter *Adapter,
- USHORT usVcid,
- struct sk_buff *packet,
- UINT *punPacketLen,
- UCHAR *pucEthernetHdr,
- UINT
- );
-
-
-void DumpDataPacketHeader(PUCHAR pPkt);
-
-void DumpFullPacket(UCHAR *pBuf, UINT nPktLen);
-
-void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension);
-
-
-int phs_init(struct bcm_phs_extension *pPhsdeviceExtension,
- struct bcm_mini_adapter *Adapter);
-
-int PhsCleanup(struct bcm_phs_extension *pPHSDeviceExt);
-
-/* Utility Functions */
-ULONG PhsUpdateClassifierRule(void *pvContext,
- B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- struct bcm_phs_rule *psPhsRule,
- B_UINT8 u8AssociatedPHSI);
-
-ULONG PhsDeletePHSRule(void *pvContext, B_UINT16 uiVcid, B_UINT8 u8PHSI);
-
-ULONG PhsDeleteClassifierRule(void *pvContext,
- B_UINT16 uiVcid,
- B_UINT16 uiClsId);
-
-ULONG PhsDeleteSFRules(void *pvContext, B_UINT16 uiVcid);
-
-
-bool ValidatePHSRule(struct bcm_phs_rule *psPhsRule);
-
-UINT GetServiceFlowEntry(struct bcm_phs_table *psServiceFlowTable,
- B_UINT16 uiVcid,
- struct bcm_phs_entry **ppstServiceFlowEntry);
-
-
-void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension);
-
-
-#endif
diff --git a/drivers/staging/bcm/Protocol.h b/drivers/staging/bcm/Protocol.h
deleted file mode 100644
index 9818128d9320..000000000000
--- a/drivers/staging/bcm/Protocol.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/************************************
-* Protocol.h
-*************************************/
-#ifndef __PROTOCOL_H__
-#define __PROTOCOL_H__
-
-#define IPV4 4
-#define IPV6 6
-
-struct ArpHeader {
- struct arphdr arp;
- unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
- unsigned char ar_sip[4]; /* sender IP address */
- unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
- unsigned char ar_tip[4]; /* target IP address */
-};
-
-struct bcm_transport_header {
- union {
- struct udphdr uhdr;
- struct tcphdr thdr;
- };
-} __packed;
-
-enum bcm_ip_frame_type {
- eNonIPPacket,
- eIPv4Packet,
- eIPv6Packet
-};
-
-enum bcm_eth_frame_type {
- eEthUnsupportedFrame,
- eEth802LLCFrame,
- eEth802LLCSNAPFrame,
- eEth802QVLANFrame,
- eEthOtherFrame
-};
-
-struct bcm_eth_packet_info {
- enum bcm_ip_frame_type eNwpktIPFrameType;
- enum bcm_eth_frame_type eNwpktEthFrameType;
- unsigned short usEtherType;
- unsigned char ucDSAP;
-};
-
-struct bcm_eth_q_frame {
- struct bcm_eth_header EThHdr;
- unsigned short UserPriority:3;
- unsigned short CFI:1;
- unsigned short VLANID:12;
- unsigned short EthType;
-} __packed;
-
-struct bcm_eth_llc_frame {
- struct bcm_eth_header EThHdr;
- unsigned char DSAP;
- unsigned char SSAP;
- unsigned char Control;
-} __packed;
-
-struct bcm_eth_llc_snap_frame {
- struct bcm_eth_header EThHdr;
- unsigned char DSAP;
- unsigned char SSAP;
- unsigned char Control;
- unsigned char OUI[3];
- unsigned short usEtherType;
-} __packed;
-
-struct bcm_ethernet2_frame {
- struct bcm_eth_header EThHdr;
-} __packed;
-
-#define ETHERNET_FRAMETYPE_IPV4 ntohs(0x0800)
-#define ETHERNET_FRAMETYPE_IPV6 ntohs(0x86dd)
-#define ETHERNET_FRAMETYPE_802QVLAN ntohs(0x8100)
-
-/* Per SF CS Specification Encodings */
-enum bcm_spec_encoding {
- eCSSpecUnspecified = 0,
- eCSPacketIPV4,
- eCSPacketIPV6,
- eCS802_3PacketEthernet,
- eCS802_1QPacketVLAN,
- eCSPacketIPV4Over802_3Ethernet,
- eCSPacketIPV6Over802_3Ethernet,
- eCSPacketIPV4Over802_1QVLAN,
- eCSPacketIPV6Over802_1QVLAN,
- eCSPacketUnsupported
-};
-
-#define IP6_HEADER_LEN 40
-#define IP_VERSION(byte) (((byte&0xF0)>>4))
-
-#define MAC_ADDRESS_SIZE 6
-#define ETH_AND_IP_HEADER_LEN (14 + 20)
-#define L4_SRC_PORT_LEN 2
-#define L4_DEST_PORT_LEN 2
-#define CTRL_PKT_LEN (8 + ETH_AND_IP_HEADER_LEN)
-
-#define ETH_ARP_FRAME 0x806
-#define ETH_IPV4_FRAME 0x800
-#define ETH_IPV6_FRAME 0x86DD
-#define UDP 0x11
-#define TCP 0x06
-
-#define ARP_OP_REQUEST 0x01
-#define ARP_OP_REPLY 0x02
-#define ARP_PKT_SIZE 60
-
-/* This is the format for the TCP packet header */
-struct bcm_tcp_header {
- unsigned short usSrcPort;
- unsigned short usDestPort;
- unsigned long ulSeqNumber;
- unsigned long ulAckNumber;
- unsigned char HeaderLength;
- unsigned char ucFlags;
- unsigned short usWindowsSize;
- unsigned short usChkSum;
- unsigned short usUrgetPtr;
-};
-
-#define TCP_HEADER_LEN sizeof(struct bcm_tcp_header)
-#define TCP_ACK 0x10 /* Bit 4 in tcpflags field. */
-#define GET_TCP_HEADER_LEN(byte) ((byte&0xF0)>>4)
-
-#endif /* __PROTOCOL_H__ */
diff --git a/drivers/staging/bcm/Prototypes.h b/drivers/staging/bcm/Prototypes.h
deleted file mode 100644
index 1ddc8b2539f6..000000000000
--- a/drivers/staging/bcm/Prototypes.h
+++ /dev/null
@@ -1,217 +0,0 @@
-#ifndef _PROTOTYPES_H_
-#define _PROTOTYPES_H_
-
-VOID LinkControlResponseMessage(struct bcm_mini_adapter *Adapter, PUCHAR pucBuffer);
-
-VOID StatisticsResponse(struct bcm_mini_adapter *Adapter, PVOID pvBuffer);
-
-VOID IdleModeResponse(struct bcm_mini_adapter *Adapter, PUINT puiBuffer);
-
-int control_packet_handler(struct bcm_mini_adapter *Adapter);
-
-VOID DeleteAllClassifiersForSF(struct bcm_mini_adapter *Adapter, UINT uiSearchRuleIndex);
-
-VOID flush_all_queues(struct bcm_mini_adapter *Adapter);
-
-int register_control_device_interface(struct bcm_mini_adapter *ps_adapter);
-
-void unregister_control_device_interface(struct bcm_mini_adapter *Adapter);
-
-INT CopyBufferToControlPacket(struct bcm_mini_adapter *Adapter,/**<Logical Adapter*/
- PVOID ioBuffer/**<Control Packet Buffer*/
- );
-
-VOID SortPackInfo(struct bcm_mini_adapter *Adapter);
-
-VOID SortClassifiers(struct bcm_mini_adapter *Adapter);
-
-VOID flush_all_queues(struct bcm_mini_adapter *Adapter);
-
-VOID PruneQueueAllSF(struct bcm_mini_adapter *Adapter);
-
-INT SearchSfid(struct bcm_mini_adapter *Adapter, UINT uiSfid);
-
-USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff *skb);
-
-bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushSrcPort);
-
-bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushSrcPort);
-
-bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucProtocol);
-
-INT SetupNextSend(struct bcm_mini_adapter *Adapter, /**<Logical Adapter*/
- struct sk_buff *Packet, /**<data buffer*/
- USHORT Vcid);
-
-VOID LinkMessage(struct bcm_mini_adapter *Adapter);
-
-VOID transmit_packets(struct bcm_mini_adapter *Adapter);
-
-INT SendControlPacket(struct bcm_mini_adapter *Adapter, /**<Logical Adapter*/
- char *pControlPacket/**<Control Packet*/
- );
-
-int register_networkdev(struct bcm_mini_adapter *Adapter);
-
-void unregister_networkdev(struct bcm_mini_adapter *Adapter);
-
-INT AllocAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
-
-VOID AdapterFree(struct bcm_mini_adapter *Adapter);
-
-INT FreeAdapterDsxBuffer(struct bcm_mini_adapter *Adapter);
-
-int tx_pkt_handler(struct bcm_mini_adapter *Adapter);
-
-int reset_card_proc(struct bcm_mini_adapter *Adapter);
-
-int run_card_proc(struct bcm_mini_adapter *Adapter);
-
-int InitCardAndDownloadFirmware(struct bcm_mini_adapter *ps_adapter);
-
-INT ReadMacAddressFromNVM(struct bcm_mini_adapter *Adapter);
-
-int register_control_device_interface(struct bcm_mini_adapter *ps_adapter);
-
-void DumpPackInfo(struct bcm_mini_adapter *Adapter);
-
-int rdm(struct bcm_mini_adapter *Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
-
-int wrm(struct bcm_mini_adapter *Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
-
-int wrmalt(struct bcm_mini_adapter *Adapter, UINT uiAddress, unsigned int *pucBuff, size_t sSize);
-
-int rdmalt(struct bcm_mini_adapter *Adapter, UINT uiAddress, unsigned int *pucBuff, size_t sSize);
-
-int get_dsx_sf_data_to_application(struct bcm_mini_adapter *Adapter, UINT uiSFId, void __user *user_buffer);
-
-void SendIdleModeResponse(struct bcm_mini_adapter *Adapter);
-
-int ProcessGetHostMibs(struct bcm_mini_adapter *Adapter, struct bcm_host_stats_mibs *buf);
-
-void GetDroppedAppCntrlPktMibs(struct bcm_host_stats_mibs *ioBuffer, struct bcm_tarang_data *pTarang);
-
-void beceem_parse_target_struct(struct bcm_mini_adapter *Adapter);
-
-int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo);
-
-void CopyMIBSExtendedSFParameters(struct bcm_mini_adapter *Adapter,
- struct bcm_connect_mgr_params *psfLocalSet, UINT uiSearchRuleIndex);
-
-VOID ResetCounters(struct bcm_mini_adapter *Adapter);
-
-int InitLedSettings(struct bcm_mini_adapter *Adapter);
-
-struct bcm_classifier_rule *GetFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIP);
-
-void AddFragIPClsEntry(struct bcm_mini_adapter *Adapter, struct bcm_fragmented_packet_info *psFragPktInfo);
-
-void DelFragIPClsEntry(struct bcm_mini_adapter *Adapter, USHORT usIpIdentification, ULONG SrcIp);
-
-void update_per_cid_rx(struct bcm_mini_adapter *Adapter);
-
-void update_per_sf_desc_cnts(struct bcm_mini_adapter *Adapter);
-
-void ClearTargetDSXBuffer(struct bcm_mini_adapter *Adapter, B_UINT16 TID, bool bFreeAll);
-
-void flush_queue(struct bcm_mini_adapter *Adapter, UINT iQIndex);
-
-INT flushAllAppQ(VOID);
-
-INT BeceemEEPROMBulkRead(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes);
-
-INT WriteBeceemEEPROM(struct bcm_mini_adapter *Adapter, UINT uiEEPROMOffset, UINT uiData);
-
-INT PropagateCalParamsFromFlashToMemory(struct bcm_mini_adapter *Adapter);
-
-INT BeceemEEPROMBulkWrite(
- struct bcm_mini_adapter *Adapter,
- PUCHAR pBuffer,
- UINT uiOffset,
- UINT uiNumBytes,
- bool bVerify);
-
-INT ReadBeceemEEPROM(struct bcm_mini_adapter *Adapter, UINT dwAddress, UINT *pdwData);
-
-INT BeceemNVMRead(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes);
-
-INT BeceemNVMWrite(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes,
- bool bVerify);
-
-INT BcmInitNVM(struct bcm_mini_adapter *Adapter);
-
-INT BcmUpdateSectorSize(struct bcm_mini_adapter *Adapter, UINT uiSectorSize);
-
-bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section);
-
-INT BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_bitmap *psFlash2xBitMap);
-
-INT BcmFlash2xBulkWrite(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- enum bcm_flash2x_section_val eFlashSectionVal,
- UINT uiOffset,
- UINT uiNumBytes,
- UINT bVerify);
-
-INT BcmFlash2xBulkRead(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- enum bcm_flash2x_section_val eFlashSectionVal,
- UINT uiOffsetWithinSectionVal,
- UINT uiNumBytes);
-
-INT BcmGetSectionValStartOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal);
-
-INT BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectVal);
-
-INT BcmAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter);
-
-INT BcmDeAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter);
-
-INT BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section sCopySectStrut);
-
-INT BcmFlash2xCorruptSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal);
-
-INT BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal);
-
-INT validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_readwrite *psFlash2xReadWrite);
-
-INT IsFlash2x(struct bcm_mini_adapter *Adapter);
-
-INT BcmCopySection(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val SrcSection,
- enum bcm_flash2x_section_val DstSection,
- UINT offset,
- UINT numOfBytes);
-
-bool IsNonCDLessDevice(struct bcm_mini_adapter *Adapter);
-
-VOID OverrideServiceFlowParams(struct bcm_mini_adapter *Adapter, PUINT puiBuffer);
-
-int wrmaltWithLock(struct bcm_mini_adapter *Adapter, UINT uiAddress, unsigned int *pucBuff, size_t sSize);
-
-int rdmaltWithLock(struct bcm_mini_adapter *Adapter, UINT uiAddress, unsigned int *pucBuff, size_t sSize);
-
-int wrmWithLock(struct bcm_mini_adapter *Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
-
-INT buffDnldVerify(struct bcm_mini_adapter *Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
- unsigned long u32StartingAddress);
-
-VOID putUsbSuspend(struct work_struct *work);
-
-bool IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios);
-
-#endif
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
deleted file mode 100644
index b3ac614cd35f..000000000000
--- a/drivers/staging/bcm/Qos.c
+++ /dev/null
@@ -1,1200 +0,0 @@
-/**
- * @file Qos.C
- * This file contains the routines related to Quality of Service.
-*/
-#include "headers.h"
-
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,
- PVOID pvEthPayload,
- struct bcm_eth_packet_info *pstEthCsPktInfo);
-
-static bool EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,
- struct sk_buff *skb,
- struct bcm_eth_packet_info *pstEthCsPktInfo,
- struct bcm_classifier_rule *pstClassifierRule,
- B_UINT8 EthCSCupport);
-
-static USHORT IpVersion4(struct bcm_mini_adapter *Adapter, struct iphdr *iphd,
- struct bcm_classifier_rule *pstClassifierRule);
-
-static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
-
-
-/*******************************************************************
-* Function - MatchSrcIpAddress()
-*
-* Description - Checks whether the Source IP address from the packet
-* matches with that of Queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ulSrcIP : Source IP address from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL .
-*********************************************************************/
-static bool MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule,
- ULONG ulSrcIP)
-{
- UCHAR ucLoopIndex = 0;
-
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- union u_ip_address *src_addr;
-
- ulSrcIP = ntohl(ulSrcIP);
- if (0 == pstClassifierRule->ucIPSourceAddressLength)
- return TRUE;
- for (ucLoopIndex = 0;
- ucLoopIndex < (pstClassifierRule->ucIPSourceAddressLength);
- ucLoopIndex++) {
- src_addr = &pstClassifierRule->stSrcIpAddress;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Src Ip Address Mask:0x%x PacketIp:0x%x and Classification:0x%x",
- (UINT)src_addr->ulIpv4Mask[ucLoopIndex],
- (UINT)ulSrcIP,
- (UINT)src_addr->ulIpv6Addr[ucLoopIndex]);
-
- if ((src_addr->ulIpv4Mask[ucLoopIndex] & ulSrcIP) ==
- (src_addr->ulIpv4Addr[ucLoopIndex] &
- src_addr->ulIpv4Mask[ucLoopIndex]))
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Src Ip Address Not Matched");
- return false;
-}
-
-
-/*******************************************************************
-* Function - MatchDestIpAddress()
-*
-* Description - Checks whether the Destination IP address from the packet
-* matches with that of Queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ulDestIP : Destination IP address from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL .
-*********************************************************************/
-static bool MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
-{
- UCHAR ucLoopIndex = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- union u_ip_address *dest_addr = &pstClassifierRule->stDestIpAddress;
-
- ulDestIP = ntohl(ulDestIP);
- if (0 == pstClassifierRule->ucIPDestinationAddressLength)
- return TRUE;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Destination Ip Address 0x%x 0x%x 0x%x ",
- (UINT)ulDestIP,
- (UINT)dest_addr->ulIpv4Mask[ucLoopIndex],
- (UINT)dest_addr->ulIpv4Addr[ucLoopIndex]);
-
- for (ucLoopIndex = 0;
- ucLoopIndex < (pstClassifierRule->ucIPDestinationAddressLength);
- ucLoopIndex++) {
- if ((dest_addr->ulIpv4Mask[ucLoopIndex] & ulDestIP) ==
- (dest_addr->ulIpv4Addr[ucLoopIndex] &
- dest_addr->ulIpv4Mask[ucLoopIndex]))
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Destination Ip Address Not Matched");
- return false;
-}
-
-
-/************************************************************************
-* Function - MatchTos()
-*
-* Description - Checks the TOS from the packet matches with that of queue.
-*
-* Parameters - pstClassifierRule : Pointer to the packet info structure.
-* - ucTypeOfService: TOS from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL.
-**************************************************************************/
-static bool MatchTos(struct bcm_classifier_rule *pstClassifierRule,
- UCHAR ucTypeOfService)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (3 != pstClassifierRule->ucIPTypeOfServiceLength)
- return TRUE;
-
- if (((pstClassifierRule->ucTosMask & ucTypeOfService) <=
- pstClassifierRule->ucTosHigh) &&
- ((pstClassifierRule->ucTosMask & ucTypeOfService) >=
- pstClassifierRule->ucTosLow))
- return TRUE;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Type Of Service Not Matched");
- return false;
-}
-
-
-/***************************************************************************
-* Function - MatchProtocol()
-*
-* Description - Checks the protocol from the packet matches with that of queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ucProtocol : Protocol from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL.
-****************************************************************************/
-bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,
- UCHAR ucProtocol)
-{
- UCHAR ucLoopIndex = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (0 == pstClassifierRule->ucProtocolLength)
- return TRUE;
- for (ucLoopIndex = 0;
- ucLoopIndex < pstClassifierRule->ucProtocolLength;
- ucLoopIndex++) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Protocol:0x%X Classification Protocol:0x%X",
- ucProtocol,
- pstClassifierRule->ucProtocol[ucLoopIndex]);
- if (pstClassifierRule->ucProtocol[ucLoopIndex] == ucProtocol)
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Protocol Not Matched");
- return false;
-}
-
-
-/***********************************************************************
-* Function - MatchSrcPort()
-*
-* Description - Checks, Source port from the packet matches with that of queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ushSrcPort : Source port from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL.
-***************************************************************************/
-bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,
- USHORT ushSrcPort)
-{
- UCHAR ucLoopIndex = 0;
-
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
-
- if (0 == pstClassifierRule->ucSrcPortRangeLength)
- return TRUE;
- for (ucLoopIndex = 0;
- ucLoopIndex < pstClassifierRule->ucSrcPortRangeLength;
- ucLoopIndex++) {
- if (ushSrcPort <= pstClassifierRule->usSrcPortRangeHi[ucLoopIndex] &&
- ushSrcPort >= pstClassifierRule->usSrcPortRangeLo[ucLoopIndex])
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Src Port: %x Not Matched ",
- ushSrcPort);
- return false;
-}
-
-
-/***********************************************************************
-* Function - MatchDestPort()
-*
-* Description - Checks, Destination port from packet matches with that of queue.
-*
-* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ushDestPort : Destination port from the packet.
-*
-* Returns - TRUE(If address matches) else FAIL.
-***************************************************************************/
-bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,
- USHORT ushDestPort)
-{
- UCHAR ucLoopIndex = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (0 == pstClassifierRule->ucDestPortRangeLength)
- return TRUE;
-
- for (ucLoopIndex = 0;
- ucLoopIndex < pstClassifierRule->ucDestPortRangeLength;
- ucLoopIndex++) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Matching Port:0x%X 0x%X 0x%X",
- ushDestPort,
- pstClassifierRule->usDestPortRangeLo[ucLoopIndex],
- pstClassifierRule->usDestPortRangeHi[ucLoopIndex]);
-
- if (ushDestPort <= pstClassifierRule->usDestPortRangeHi[ucLoopIndex] &&
- ushDestPort >= pstClassifierRule->usDestPortRangeLo[ucLoopIndex])
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Dest Port: %x Not Matched",
- ushDestPort);
- return false;
-}
-/**
- * @ingroup tx_functions
- * Compares IPV4 Ip address and port number
- * @return Queue Index.
-*/
-static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
- struct iphdr *iphd,
- struct bcm_classifier_rule *pstClassifierRule)
-{
- struct bcm_transport_header *xprt_hdr = NULL;
- bool bClassificationSucceed = false;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "========>");
-
- xprt_hdr = (struct bcm_transport_header *)((PUCHAR)iphd + sizeof(struct iphdr));
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Trying to see Direction = %d %d",
- pstClassifierRule->ucDirection,
- pstClassifierRule->usVCID_Value);
-
- /* Checking classifier validity */
- if (!pstClassifierRule->bUsed ||
- pstClassifierRule->ucDirection == DOWNLINK_DIR)
- goto out;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "is IPv6 check!");
- if (pstClassifierRule->bIpv6Protocol)
- goto out;
-
- /* Checking IP header parameter */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Trying to match Source IP Address");
- if (!MatchSrcIpAddress(pstClassifierRule, iphd->saddr))
- goto out;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Source IP Address Matched");
-
- if (!MatchDestIpAddress(pstClassifierRule, iphd->daddr))
- goto out;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Destination IP Address Matched");
-
- if (!MatchTos(pstClassifierRule, iphd->tos)) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "TOS Match failed\n");
- goto out;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "TOS Matched");
-
- if (!MatchProtocol(pstClassifierRule, iphd->protocol))
- goto out;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Protocol Matched");
-
- /*
- * if protocol is not TCP or UDP then no
- * need of comparing source port and destination port
- */
- if (iphd->protocol != TCP && iphd->protocol != UDP) {
- bClassificationSucceed = TRUE;
- goto out;
- }
- /* Checking Transport Layer Header field if present */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Source Port %04x",
- (iphd->protocol == UDP) ? xprt_hdr->uhdr.source : xprt_hdr->thdr.source);
-
- if (!MatchSrcPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP) ?
- xprt_hdr->uhdr.source : xprt_hdr->thdr.source)))
- goto out;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Src Port Matched");
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Destination Port %04x",
- (iphd->protocol == UDP) ? xprt_hdr->uhdr.dest :
- xprt_hdr->thdr.dest);
-
- if (!MatchDestPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP) ?
- xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest)))
- goto out;
- bClassificationSucceed = TRUE;
-
-out:
- if (TRUE == bClassificationSucceed) {
- INT iMatchedSFQueueIndex = 0;
-
- iMatchedSFQueueIndex =
- SearchSfid(Adapter, pstClassifierRule->ulSFID);
- if (iMatchedSFQueueIndex >= NO_OF_QUEUES)
- bClassificationSucceed = false;
- else if (false == Adapter->PackInfo[iMatchedSFQueueIndex].bActive)
- bClassificationSucceed = false;
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "IpVersion4 <==========");
-
- return bClassificationSucceed;
-}
-
-VOID PruneQueueAllSF(struct bcm_mini_adapter *Adapter)
-{
- UINT iIndex = 0;
-
- for (iIndex = 0; iIndex < HiPriority; iIndex++) {
- if (!Adapter->PackInfo[iIndex].bValid)
- continue;
-
- PruneQueue(Adapter, iIndex);
- }
-}
-
-
-/**
- * @ingroup tx_functions
- * This function checks if the max queue size for a queue
- * is less than number of bytes in the queue. If so -
- * drops packets from the Head till the number of bytes is
- * less than or equal to max queue size for the queue.
- */
-static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
-{
- struct sk_buff *PacketToDrop = NULL;
- struct net_device_stats *netstats;
- struct bcm_packet_info *curr_pack_info = &Adapter->PackInfo[iIndex];
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "=====> Index %d",
- iIndex);
-
- if (iIndex == HiPriority)
- return;
-
- if (!Adapter || (iIndex < 0) || (iIndex > HiPriority))
- return;
-
- /* To Store the netdevice statistic */
- netstats = &Adapter->dev->stats;
-
- spin_lock_bh(&curr_pack_info->SFQueueLock);
-
- while (1) {
-/* while((UINT)curr_pack_info->uiCurrentPacketsOnHost >
- SF_MAX_ALLOWED_PACKETS_TO_BACKUP) { */
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "uiCurrentBytesOnHost:%x uiMaxBucketSize :%x",
- curr_pack_info->uiCurrentBytesOnHost,
- curr_pack_info->uiMaxBucketSize);
-
- PacketToDrop = curr_pack_info->FirstTxQueue;
-
- if (PacketToDrop == NULL)
- break;
- if ((curr_pack_info->uiCurrentPacketsOnHost <
- SF_MAX_ALLOWED_PACKETS_TO_BACKUP) &&
- ((1000*(jiffies - *((B_UINT32 *)(PacketToDrop->cb) +
- SKB_CB_LATENCY_OFFSET))/HZ) <=
- curr_pack_info->uiMaxLatency))
- break;
-
- if (PacketToDrop) {
- if (netif_msg_tx_err(Adapter))
- pr_info(PFX "%s: tx queue %d overlimit\n",
- Adapter->dev->name, iIndex);
-
- netstats->tx_dropped++;
-
- DEQUEUEPACKET(curr_pack_info->FirstTxQueue,
- curr_pack_info->LastTxQueue);
- /* update current bytes and packets count */
- curr_pack_info->uiCurrentBytesOnHost -=
- PacketToDrop->len;
- curr_pack_info->uiCurrentPacketsOnHost--;
- /* update dropped bytes and packets counts */
- curr_pack_info->uiDroppedCountBytes += PacketToDrop->len;
- curr_pack_info->uiDroppedCountPackets++;
- dev_kfree_skb(PacketToDrop);
-
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "Dropped Bytes:%x Dropped Packets:%x",
- curr_pack_info->uiDroppedCountBytes,
- curr_pack_info->uiDroppedCountPackets);
-
- atomic_dec(&Adapter->TotalPacketCount);
- }
-
- spin_unlock_bh(&curr_pack_info->SFQueueLock);
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "TotalPacketCount:%x",
- atomic_read(&Adapter->TotalPacketCount));
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- PRUNE_QUEUE,
- DBG_LVL_ALL,
- "<=====");
-}
-
-VOID flush_all_queues(struct bcm_mini_adapter *Adapter)
-{
- INT iQIndex;
- UINT uiTotalPacketLength;
- struct sk_buff *PacketToDrop = NULL;
- struct bcm_packet_info *curr_packet_info;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- DBG_LVL_ALL,
- "=====>");
-
- /* down(&Adapter->data_packet_queue_lock); */
- for (iQIndex = LowPriority; iQIndex < HiPriority; iQIndex++) {
- struct net_device_stats *netstats = &Adapter->dev->stats;
-
- curr_packet_info = &Adapter->PackInfo[iQIndex];
-
- spin_lock_bh(&curr_packet_info->SFQueueLock);
- while (curr_packet_info->FirstTxQueue) {
- PacketToDrop = curr_packet_info->FirstTxQueue;
- if (PacketToDrop) {
- uiTotalPacketLength = PacketToDrop->len;
- netstats->tx_dropped++;
- } else
- uiTotalPacketLength = 0;
-
- DEQUEUEPACKET(curr_packet_info->FirstTxQueue,
- curr_packet_info->LastTxQueue);
-
- /* Free the skb */
- dev_kfree_skb(PacketToDrop);
-
- /* update current bytes and packets count */
- curr_packet_info->uiCurrentBytesOnHost -= uiTotalPacketLength;
- curr_packet_info->uiCurrentPacketsOnHost--;
-
- /* update dropped bytes and packets counts */
- curr_packet_info->uiDroppedCountBytes += uiTotalPacketLength;
- curr_packet_info->uiDroppedCountPackets++;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- DBG_LVL_ALL,
- "Dropped Bytes:%x Dropped Packets:%x",
- curr_packet_info->uiDroppedCountBytes,
- curr_packet_info->uiDroppedCountPackets);
- atomic_dec(&Adapter->TotalPacketCount);
- }
- spin_unlock_bh(&curr_packet_info->SFQueueLock);
- }
- /* up(&Adapter->data_packet_queue_lock); */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_OTHERS,
- DUMP_INFO,
- DBG_LVL_ALL,
- "<=====");
-}
-
-USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff *skb)
-{
- INT uiLoopIndex = 0;
- struct bcm_classifier_rule *pstClassifierRule = NULL;
- struct bcm_eth_packet_info stEthCsPktInfo;
- PVOID pvEThPayload = NULL;
- struct iphdr *pIpHeader = NULL;
- INT uiSfIndex = 0;
- USHORT usIndex = Adapter->usBestEffortQueueIndex;
- bool bFragmentedPkt = false, bClassificationSucceed = false;
- USHORT usCurrFragment = 0;
-
- struct bcm_tcp_header *pTcpHeader;
- UCHAR IpHeaderLength;
- UCHAR TcpHeaderLength;
-
- pvEThPayload = skb->data;
- *((UINT32 *) (skb->cb) + SKB_CB_TCPACK_OFFSET) = 0;
- EThCSGetPktInfo(Adapter, pvEThPayload, &stEthCsPktInfo);
-
- switch (stEthCsPktInfo.eNwpktEthFrameType) {
- case eEth802LLCFrame:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : 802LLCFrame\n");
- pIpHeader = pvEThPayload + sizeof(struct bcm_eth_llc_frame);
- break;
- case eEth802LLCSNAPFrame:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : 802LLC SNAP Frame\n");
- pIpHeader = pvEThPayload +
- sizeof(struct bcm_eth_llc_snap_frame);
- break;
- case eEth802QVLANFrame:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : 802.1Q VLANFrame\n");
- pIpHeader = pvEThPayload + sizeof(struct bcm_eth_q_frame);
- break;
- case eEthOtherFrame:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : ETH Other Frame\n");
- pIpHeader = pvEThPayload + sizeof(struct bcm_ethernet2_frame);
- break;
- default:
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : Unrecognized ETH Frame\n");
- pIpHeader = pvEThPayload + sizeof(struct bcm_ethernet2_frame);
- break;
- }
-
- if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet) {
- usCurrFragment = (ntohs(pIpHeader->frag_off) & IP_OFFSET);
- if ((ntohs(pIpHeader->frag_off) & IP_MF) || usCurrFragment)
- bFragmentedPkt = TRUE;
-
- if (bFragmentedPkt) {
- /* Fragmented Packet. Get Frag Classifier Entry. */
- pstClassifierRule = GetFragIPClsEntry(Adapter,
- pIpHeader->id,
- pIpHeader->saddr);
- if (pstClassifierRule) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "It is next Fragmented pkt");
- bClassificationSucceed = TRUE;
- }
- if (!(ntohs(pIpHeader->frag_off) & IP_MF)) {
- /* Fragmented Last packet . Remove Frag Classifier Entry */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "This is the last fragmented Pkt");
- DelFragIPClsEntry(Adapter,
- pIpHeader->id,
- pIpHeader->saddr);
- }
- }
- }
-
- for (uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--) {
- if (bClassificationSucceed)
- break;
- /*
- * Iterate through all classifiers which are already in order of priority
- * to classify the packet until match found
- */
- if (false == Adapter->astClassifierTable[uiLoopIndex].bUsed) {
- bClassificationSucceed = false;
- continue;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Adapter->PackInfo[%d].bvalid=True\n",
- uiLoopIndex);
-
- if (0 == Adapter->astClassifierTable[uiLoopIndex].ucDirection) {
- bClassificationSucceed = false; /* cannot be processed for classification. */
- continue; /* it is a down link connection */
- }
-
- pstClassifierRule = &Adapter->astClassifierTable[uiLoopIndex];
-
- uiSfIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
- if (uiSfIndex >= NO_OF_QUEUES) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Queue Not Valid. SearchSfid for this classifier Failed\n");
- continue;
- }
-
- if (Adapter->PackInfo[uiSfIndex].bEthCSSupport) {
-
- if (eEthUnsupportedFrame == stEthCsPktInfo.eNwpktEthFrameType) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- " ClassifyPacket : Packet Not a Valid Supported Ethernet Frame\n");
- bClassificationSucceed = false;
- continue;
- }
-
-
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Performing ETH CS Classification on Classifier Rule ID : %x Service Flow ID : %lx\n",
- pstClassifierRule->uiClassifierRuleIndex,
- Adapter->PackInfo[uiSfIndex].ulSFID);
- bClassificationSucceed = EThCSClassifyPkt(Adapter,
- skb,
- &stEthCsPktInfo,
- pstClassifierRule,
- Adapter->PackInfo[uiSfIndex].bEthCSSupport);
-
- if (!bClassificationSucceed) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ClassifyPacket : Ethernet CS Classification Failed\n");
- continue;
- }
- } else { /* No ETH Supported on this SF */
- if (eEthOtherFrame != stEthCsPktInfo.eNwpktEthFrameType) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- " ClassifyPacket : Packet Not a 802.3 Ethernet Frame... hence not allowed over non-ETH CS SF\n");
- bClassificationSucceed = false;
- continue;
- }
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Proceeding to IP CS Clasification");
-
- if (Adapter->PackInfo[uiSfIndex].bIPCSSupport) {
-
- if (stEthCsPktInfo.eNwpktIPFrameType == eNonIPPacket) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- " ClassifyPacket : Packet is Not an IP Packet\n");
- bClassificationSucceed = false;
- continue;
- }
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "Dump IP Header :\n");
- DumpFullPacket((PUCHAR)pIpHeader, 20);
-
- if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
- bClassificationSucceed = IpVersion4(Adapter,
- pIpHeader,
- pstClassifierRule);
- else if (stEthCsPktInfo.eNwpktIPFrameType == eIPv6Packet)
- bClassificationSucceed = IpVersion6(Adapter,
- pIpHeader,
- pstClassifierRule);
- }
- }
-
- if (bClassificationSucceed == TRUE) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "CF id : %d, SF ID is =%lu",
- pstClassifierRule->uiClassifierRuleIndex,
- pstClassifierRule->ulSFID);
-
- /* Store The matched Classifier in SKB */
- *((UINT32 *)(skb->cb)+SKB_CB_CLASSIFICATION_OFFSET) =
- pstClassifierRule->uiClassifierRuleIndex;
- if ((TCP == pIpHeader->protocol) && !bFragmentedPkt &&
- (ETH_AND_IP_HEADER_LEN + TCP_HEADER_LEN <=
- skb->len)) {
- IpHeaderLength = pIpHeader->ihl;
- pTcpHeader =
- (struct bcm_tcp_header *)(((PUCHAR)pIpHeader) +
- (IpHeaderLength*4));
- TcpHeaderLength = GET_TCP_HEADER_LEN(pTcpHeader->HeaderLength);
-
- if ((pTcpHeader->ucFlags & TCP_ACK) &&
- (ntohs(pIpHeader->tot_len) ==
- (IpHeaderLength*4)+(TcpHeaderLength*4)))
- *((UINT32 *) (skb->cb) + SKB_CB_TCPACK_OFFSET) =
- TCP_ACK;
- }
-
- usIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "index is =%d",
- usIndex);
-
- /*
- * If this is the first fragment of a Fragmented pkt,
- * add this CF. Only This CF should be used for all other
- * fragment of this Pkt.
- */
- if (bFragmentedPkt && (usCurrFragment == 0)) {
- /*
- * First Fragment of Fragmented Packet.
- * Create Frag CLS Entry
- */
- struct bcm_fragmented_packet_info stFragPktInfo;
-
- stFragPktInfo.bUsed = TRUE;
- stFragPktInfo.ulSrcIpAddress = pIpHeader->saddr;
- stFragPktInfo.usIpIdentification = pIpHeader->id;
- stFragPktInfo.pstMatchedClassifierEntry =
- pstClassifierRule;
- stFragPktInfo.bOutOfOrderFragment = false;
- AddFragIPClsEntry(Adapter, &stFragPktInfo);
- }
-
-
- }
-
- return bClassificationSucceed ? usIndex : INVALID_QUEUE_INDEX;
-}
-
-static bool EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule,
- PUCHAR Mac)
-{
- UINT i = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (pstClassifierRule->ucEthCSSrcMACLen == 0)
- return TRUE;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s\n", __func__);
- for (i = 0; i < MAC_ADDRESS_SIZE; i++) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n",
- i,
- Mac[i],
- pstClassifierRule->au8EThCSSrcMAC[i],
- pstClassifierRule->au8EThCSSrcMACMask[i]);
- if ((pstClassifierRule->au8EThCSSrcMAC[i] &
- pstClassifierRule->au8EThCSSrcMACMask[i]) !=
- (Mac[i] & pstClassifierRule->au8EThCSSrcMACMask[i]))
- return false;
- }
- return TRUE;
-}
-
-static bool EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule,
- PUCHAR Mac)
-{
- UINT i = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if (pstClassifierRule->ucEthCSDestMACLen == 0)
- return TRUE;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s\n",
- __func__);
- for (i = 0; i < MAC_ADDRESS_SIZE; i++) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n",
- i,
- Mac[i],
- pstClassifierRule->au8EThCSDestMAC[i],
- pstClassifierRule->au8EThCSDestMACMask[i]);
- if ((pstClassifierRule->au8EThCSDestMAC[i] &
- pstClassifierRule->au8EThCSDestMACMask[i]) !=
- (Mac[i] & pstClassifierRule->au8EThCSDestMACMask[i]))
- return false;
- }
- return TRUE;
-}
-
-static bool EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule,
- struct sk_buff *skb,
- struct bcm_eth_packet_info *pstEthCsPktInfo)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- if ((pstClassifierRule->ucEtherTypeLen == 0) ||
- (pstClassifierRule->au8EthCSEtherType[0] == 0))
- return TRUE;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s SrcEtherType:%x CLS EtherType[0]:%x\n",
- __func__,
- pstEthCsPktInfo->usEtherType,
- pstClassifierRule->au8EthCSEtherType[0]);
- if (pstClassifierRule->au8EthCSEtherType[0] == 1) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s CLS EtherType[1]:%x EtherType[2]:%x\n",
- __func__,
- pstClassifierRule->au8EthCSEtherType[1],
- pstClassifierRule->au8EthCSEtherType[2]);
-
- if (memcmp(&pstEthCsPktInfo->usEtherType,
- &pstClassifierRule->au8EthCSEtherType[1],
- 2) == 0)
- return TRUE;
- else
- return false;
- }
-
- if (pstClassifierRule->au8EthCSEtherType[0] == 2) {
- if (eEth802LLCFrame != pstEthCsPktInfo->eNwpktEthFrameType)
- return false;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s EthCS DSAP:%x EtherType[2]:%x\n",
- __func__,
- pstEthCsPktInfo->ucDSAP,
- pstClassifierRule->au8EthCSEtherType[2]);
- if (pstEthCsPktInfo->ucDSAP ==
- pstClassifierRule->au8EthCSEtherType[2])
- return TRUE;
- else
- return false;
-
- }
-
- return false;
-
-}
-
-static bool EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule,
- struct sk_buff *skb,
- struct bcm_eth_packet_info *pstEthCsPktInfo)
-{
- bool bClassificationSucceed = false;
- USHORT usVLANID;
- B_UINT8 uPriority = 0;
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s CLS UserPrio:%x CLS VLANID:%x\n",
- __func__,
- ntohs(*((USHORT *)pstClassifierRule->usUserPriority)),
- pstClassifierRule->usVLANID);
-
- /*
- * In case FW didn't receive the TLV,
- * the priority field should be ignored
- */
- if (pstClassifierRule->usValidityBitMap &
- (1<<PKT_CLASSIFICATION_USER_PRIORITY_VALID)) {
- if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
- return false;
-
- uPriority = (ntohs(*(USHORT *)(skb->data +
- sizeof(struct bcm_eth_header))) &
- 0xF000) >> 13;
-
- if ((uPriority >= pstClassifierRule->usUserPriority[0]) &&
- (uPriority <=
- pstClassifierRule->usUserPriority[1]))
- bClassificationSucceed = TRUE;
-
- if (!bClassificationSucceed)
- return false;
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS 802.1 D User Priority Rule Matched\n");
-
- bClassificationSucceed = false;
-
- if (pstClassifierRule->usValidityBitMap &
- (1<<PKT_CLASSIFICATION_VLANID_VALID)) {
- if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
- return false;
-
- usVLANID = ntohs(*(USHORT *)(skb->data +
- sizeof(struct bcm_eth_header))) & 0xFFF;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "%s Pkt VLANID %x Priority: %d\n",
- __func__,
- usVLANID,
- uPriority);
-
- if (usVLANID == ((pstClassifierRule->usVLANID & 0xFFF0) >> 4))
- bClassificationSucceed = TRUE;
-
- if (!bClassificationSucceed)
- return false;
- }
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS 802.1 Q VLAN ID Rule Matched\n");
-
- return TRUE;
-}
-
-
-static bool EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,
- struct sk_buff *skb,
- struct bcm_eth_packet_info *pstEthCsPktInfo,
- struct bcm_classifier_rule *pstClassifierRule,
- B_UINT8 EthCSCupport)
-{
- bool bClassificationSucceed = false;
-
- bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule,
- ((struct bcm_eth_header *)(skb->data))->au8SourceAddress);
- if (!bClassificationSucceed)
- return false;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS SrcMAC Matched\n");
-
- bClassificationSucceed = EthCSMatchDestMACAddress(pstClassifierRule,
- ((struct bcm_eth_header *)(skb->data))->au8DestinationAddress);
- if (!bClassificationSucceed)
- return false;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS DestMAC Matched\n");
-
- /* classify on ETHType/802.2SAP TLV */
- bClassificationSucceed = EthCSMatchEThTypeSAP(pstClassifierRule,
- skb,
- pstEthCsPktInfo);
- if (!bClassificationSucceed)
- return false;
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS EthType/802.2SAP Matched\n");
-
- /* classify on 802.1VLAN Header Parameters */
- bClassificationSucceed = EthCSMatchVLANRules(pstClassifierRule,
- skb,
- pstEthCsPktInfo);
- if (!bClassificationSucceed)
- return false;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "ETH CS 802.1 VLAN Rules Matched\n");
-
- return bClassificationSucceed;
-}
-
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,
- PVOID pvEthPayload,
- struct bcm_eth_packet_info *pstEthCsPktInfo)
-{
- USHORT u16Etype = ntohs(
- ((struct bcm_eth_header *)pvEthPayload)->u16Etype);
-
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCSGetPktInfo : Eth Hdr Type : %X\n",
- u16Etype);
- if (u16Etype > 0x5dc) {
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCSGetPktInfo : ETH2 Frame\n");
- /* ETH2 Frame */
- if (u16Etype == ETHERNET_FRAMETYPE_802QVLAN) {
- /* 802.1Q VLAN Header */
- pstEthCsPktInfo->eNwpktEthFrameType = eEth802QVLANFrame;
- u16Etype = ((struct bcm_eth_q_frame *)pvEthPayload)->EthType;
- /* ((ETH_CS_802_Q_FRAME*)pvEthPayload)->UserPriority */
- } else {
- pstEthCsPktInfo->eNwpktEthFrameType = eEthOtherFrame;
- u16Etype = ntohs(u16Etype);
- }
- } else {
- /* 802.2 LLC */
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "802.2 LLC Frame\n");
- pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCFrame;
- pstEthCsPktInfo->ucDSAP =
- ((struct bcm_eth_llc_frame *)pvEthPayload)->DSAP;
- if (pstEthCsPktInfo->ucDSAP == 0xAA && ((struct bcm_eth_llc_frame *)pvEthPayload)->SSAP == 0xAA) {
- /* SNAP Frame */
- pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCSNAPFrame;
- u16Etype = ((struct bcm_eth_llc_snap_frame *)pvEthPayload)->usEtherType;
- }
- }
- if (u16Etype == ETHERNET_FRAMETYPE_IPV4)
- pstEthCsPktInfo->eNwpktIPFrameType = eIPv4Packet;
- else if (u16Etype == ETHERNET_FRAMETYPE_IPV6)
- pstEthCsPktInfo->eNwpktIPFrameType = eIPv6Packet;
- else
- pstEthCsPktInfo->eNwpktIPFrameType = eNonIPPacket;
-
- pstEthCsPktInfo->usEtherType = ((struct bcm_eth_header *)pvEthPayload)->u16Etype;
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCsPktInfo->eNwpktIPFrameType : %x\n",
- pstEthCsPktInfo->eNwpktIPFrameType);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCsPktInfo->eNwpktEthFrameType : %x\n",
- pstEthCsPktInfo->eNwpktEthFrameType);
- BCM_DEBUG_PRINT(Adapter,
- DBG_TYPE_TX,
- IPV4_DBG,
- DBG_LVL_ALL,
- "EthCsPktInfo->usEtherType : %x\n",
- pstEthCsPktInfo->usEtherType);
-}
-
diff --git a/drivers/staging/bcm/Queue.h b/drivers/staging/bcm/Queue.h
deleted file mode 100644
index 460c0aee67f6..000000000000
--- a/drivers/staging/bcm/Queue.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*************************************
-* Queue.h
-**************************************/
-#ifndef __QUEUE_H__
-#define __QUEUE_H__
-
-
-
-#define ENQUEUEPACKET(_Head, _Tail, _Packet) \
-do { \
- if (!_Head) { \
- _Head = _Packet; \
- } \
- else { \
- (_Tail)->next = _Packet; \
- } \
- (_Packet)->next = NULL; \
- _Tail = _Packet; \
-} while (0)
-#define DEQUEUEPACKET(Head, Tail) \
-do { \
- if (Head) { \
- if (!Head->next) { \
- Tail = NULL; \
- } \
- Head = Head->next; \
- } \
-} while (0)
-#endif /* __QUEUE_H__ */
diff --git a/drivers/staging/bcm/TODO b/drivers/staging/bcm/TODO
deleted file mode 100644
index 8467f45d08a6..000000000000
--- a/drivers/staging/bcm/TODO
+++ /dev/null
@@ -1,26 +0,0 @@
-This driver is barely functional in its current state.
-
-Kevin McKinney(klmckinney1@gmail.com) and Matthias Beyer(mail@beyermatthias.de)
-are currently maintaining/cleaning up this driver. Please copy us on all
-patches. More maintainers are aways welcomed.
-
-BIG:
- - existing API is (/dev/tarang) should be replaced
- Is it possible to use same API as Intel Wimax stack and
- have same user level components.
- - Qos and queue model is non-standard and inflexible.
- Use existing TC Qos?
-
-TODO:
- - support more than one board - eliminate global variables
- - remove developer debug BCM_DEBUG() macros
- add a limited number of messages through netif_msg()
- - fix non-standard kernel style
- - checkpatch warnings
- - use request firmware
- - fix use of file I/O to load config with better API
- - merge some files together?
- - cleanup/eliminate debug messages
-
-
-
diff --git a/drivers/staging/bcm/Transmit.c b/drivers/staging/bcm/Transmit.c
deleted file mode 100644
index 622a482e9826..000000000000
--- a/drivers/staging/bcm/Transmit.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/**
- * @file Transmit.c
- * @defgroup tx_functions Transmission
- * @section Queueing
- * @dot
- * digraph transmit1 {
- * node[shape=box]
- * edge[weight=5;color=red]
- *
- * bcm_transmit->GetPacketQueueIndex[label="IP Packet"]
- * GetPacketQueueIndex->IpVersion4[label="IPV4"]
- * GetPacketQueueIndex->IpVersion6[label="IPV6"]
- * }
- *
- * @enddot
- *
- * @section De-Queueing
- * @dot
- * digraph transmit2 {
- * node[shape=box]
- * edge[weight=5;color=red]
- * interrupt_service_thread->transmit_packets
- * tx_pkt_hdler->transmit_packets
- * transmit_packets->CheckAndSendPacketFromIndex
- * transmit_packets->UpdateTokenCount
- * CheckAndSendPacketFromIndex->PruneQueue
- * CheckAndSendPacketFromIndex->IsPacketAllowedForFlow
- * CheckAndSendPacketFromIndex->SendControlPacket[label="control pkt"]
- * SendControlPacket->bcm_cmd53
- * CheckAndSendPacketFromIndex->SendPacketFromQueue[label="data pkt"]
- * SendPacketFromQueue->SetupNextSend->bcm_cmd53
- * }
- * @enddot
- */
-
-#include "headers.h"
-
-/**
- * @ingroup ctrl_pkt_functions
- * This function dispatches control packet to the h/w interface
- * @return zero(success) or -ve value(failure)
- */
-int SendControlPacket(struct bcm_mini_adapter *Adapter, char *pControlPacket)
-{
- struct bcm_leader *PLeader = (struct bcm_leader *)pControlPacket;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Tx");
- if (!pControlPacket || !Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "Got NULL Control Packet or Adapter");
- return STATUS_FAILURE;
- }
- if ((atomic_read(&Adapter->CurrNumFreeTxDesc) <
- ((PLeader->PLength-1)/MAX_DEVICE_DESC_SIZE)+1)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "NO FREE DESCRIPTORS TO SEND CONTROL PACKET");
- return STATUS_FAILURE;
- }
-
- /* Update the netdevice statistics */
- /* Dump Packet */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "Leader Status: %x", PLeader->Status);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "Leader VCID: %x", PLeader->Vcid);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "Leader Length: %x", PLeader->PLength);
- if (Adapter->device_removed)
- return 0;
-
- if (netif_msg_pktdata(Adapter))
- print_hex_dump(KERN_DEBUG, PFX "tx control: ", DUMP_PREFIX_NONE,
- 16, 1, pControlPacket,
- PLeader->PLength + LEADER_SIZE, 0);
-
- Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
- pControlPacket,
- (PLeader->PLength + LEADER_SIZE));
-
- atomic_dec(&Adapter->CurrNumFreeTxDesc);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL,
- "<=========");
- return STATUS_SUCCESS;
-}
-
-/**
- * @ingroup tx_functions
- * This function despatches the IP packets with the given vcid
- * to the target via the host h/w interface.
- * @return zero(success) or -ve value(failure)
- */
-int SetupNextSend(struct bcm_mini_adapter *Adapter,
- struct sk_buff *Packet, USHORT Vcid)
-{
- int status = 0;
- bool bHeaderSupressionEnabled = false;
- B_UINT16 uiClassifierRuleID;
- u16 QueueIndex = skb_get_queue_mapping(Packet);
- struct bcm_packet_info *curr_packet_info =
- &Adapter->PackInfo[QueueIndex];
- struct bcm_leader Leader = {0};
-
- if (Packet->len > MAX_DEVICE_DESC_SIZE) {
- status = STATUS_FAILURE;
- goto errExit;
- }
-
- /* Get the Classifier Rule ID */
- uiClassifierRuleID = *((UINT32 *) (Packet->cb) +
- SKB_CB_CLASSIFICATION_OFFSET);
-
- bHeaderSupressionEnabled = curr_packet_info->bHeaderSuppressionEnabled &
- Adapter->bPHSEnabled;
-
- if (Adapter->device_removed) {
- status = STATUS_FAILURE;
- goto errExit;
- }
-
- status = PHSTransmit(Adapter, &Packet, Vcid, uiClassifierRuleID,
- bHeaderSupressionEnabled,
- (UINT *)&Packet->len,
- curr_packet_info->bEthCSSupport);
-
- if (status != STATUS_SUCCESS) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,
- "PHS Transmit failed..\n");
- goto errExit;
- }
-
- Leader.Vcid = Vcid;
-
- if (TCP_ACK == *((UINT32 *) (Packet->cb) + SKB_CB_TCPACK_OFFSET))
- Leader.Status = LEADER_STATUS_TCP_ACK;
- else
- Leader.Status = LEADER_STATUS;
-
- if (curr_packet_info->bEthCSSupport) {
- Leader.PLength = Packet->len;
- if (skb_headroom(Packet) < LEADER_SIZE) {
- status = skb_cow(Packet, LEADER_SIZE);
- if (status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, NEXT_SEND,
- DBG_LVL_ALL,
- "bcm_transmit : Failed To Increase headRoom\n");
- goto errExit;
- }
- }
- skb_push(Packet, LEADER_SIZE);
- memcpy(Packet->data, &Leader, LEADER_SIZE);
- } else {
- Leader.PLength = Packet->len - ETH_HLEN;
- memcpy((struct bcm_leader *)skb_pull(Packet,
- (ETH_HLEN - LEADER_SIZE)),
- &Leader,
- LEADER_SIZE);
- }
-
- status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
- Packet->data,
- (Leader.PLength + LEADER_SIZE));
- if (status) {
- ++Adapter->dev->stats.tx_errors;
- if (netif_msg_tx_err(Adapter))
- pr_info(PFX "%s: transmit error %d\n",
- Adapter->dev->name,
- status);
- } else {
- struct net_device_stats *netstats = &Adapter->dev->stats;
-
- curr_packet_info->uiTotalTxBytes += Leader.PLength;
-
- netstats->tx_bytes += Leader.PLength;
- ++netstats->tx_packets;
-
- curr_packet_info->uiCurrentTokenCount -= Leader.PLength << 3;
- curr_packet_info->uiSentBytes += (Packet->len);
- curr_packet_info->uiSentPackets++;
- curr_packet_info->NumOfPacketsSent++;
-
- atomic_dec(&curr_packet_info->uiPerSFTxResourceCount);
- curr_packet_info->uiThisPeriodSentBytes += Leader.PLength;
- }
-
- atomic_dec(&Adapter->CurrNumFreeTxDesc);
-
-errExit:
- dev_kfree_skb(Packet);
- return status;
-}
-
-static int tx_pending(struct bcm_mini_adapter *Adapter)
-{
- return (atomic_read(&Adapter->TxPktAvail)
- && MINIMUM_PENDING_DESCRIPTORS <
- atomic_read(&Adapter->CurrNumFreeTxDesc))
- || Adapter->device_removed || (1 == Adapter->downloadDDR);
-}
-
-/**
- * @ingroup tx_functions
- * Transmit thread
- */
-int tx_pkt_handler(struct bcm_mini_adapter *Adapter)
-{
- int status = 0;
-
- while (!kthread_should_stop()) {
- /* FIXME - the timeout looks like workaround
- * for racey usage of TxPktAvail
- */
- if (Adapter->LinkUpStatus)
- wait_event_timeout(Adapter->tx_packet_wait_queue,
- tx_pending(Adapter),
- msecs_to_jiffies(10));
- else
- wait_event_interruptible(Adapter->tx_packet_wait_queue,
- tx_pending(Adapter));
-
- if (Adapter->device_removed)
- break;
-
- if (Adapter->downloadDDR == 1) {
- Adapter->downloadDDR += 1;
- status = download_ddr_settings(Adapter);
- if (status)
- pr_err(PFX "DDR DOWNLOAD FAILED! %d\n", status);
- continue;
- }
-
- /* Check end point for halt/stall. */
- if (Adapter->bEndPointHalted == TRUE) {
- Bcm_clear_halt_of_endpoints(Adapter);
- Adapter->bEndPointHalted = false;
- StartInterruptUrb((struct bcm_interface_adapter *)
- (Adapter->pvInterfaceAdapter));
- }
-
- if (Adapter->LinkUpStatus && !Adapter->IdleMode) {
- if (atomic_read(&Adapter->TotalPacketCount))
- update_per_sf_desc_cnts(Adapter);
- }
-
- if (atomic_read(&Adapter->CurrNumFreeTxDesc) &&
- Adapter->LinkStatus == SYNC_UP_REQUEST &&
- !Adapter->bSyncUpRequestSent) {
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS,
- DBG_LVL_ALL, "Calling LinkMessage");
- LinkMessage(Adapter);
- }
-
- if ((Adapter->IdleMode || Adapter->bShutStatus) &&
- atomic_read(&Adapter->TotalPacketCount)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX,
- TX_PACKETS, DBG_LVL_ALL,
- "Device in Low Power mode...waking up");
- Adapter->usIdleModePattern = ABORT_IDLE_MODE;
- Adapter->bWakeUpDevice = TRUE;
- wake_up(&Adapter->process_rx_cntrlpkt);
- }
-
- transmit_packets(Adapter);
- atomic_set(&Adapter->TxPktAvail, 0);
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL,
- "Exiting the tx thread..\n");
- Adapter->transmit_packet_thread = NULL;
- return 0;
-}
diff --git a/drivers/staging/bcm/Typedefs.h b/drivers/staging/bcm/Typedefs.h
deleted file mode 100644
index 90b3b25dd606..000000000000
--- a/drivers/staging/bcm/Typedefs.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/****************************
-* Typedefs.h
-****************************/
-#ifndef __TYPEDEFS_H__
-#define __TYPEDEFS_H__
-#define STATUS_SUCCESS 0
-#define STATUS_FAILURE -1
-
-
-#define TRUE 1
-
-
-typedef char CHAR;
-typedef int INT;
-typedef short SHORT;
-typedef long LONG;
-typedef void VOID;
-
-typedef unsigned char UCHAR;
-typedef unsigned char B_UINT8;
-typedef unsigned short USHORT;
-typedef unsigned short B_UINT16;
-typedef unsigned int UINT;
-typedef unsigned int B_UINT32;
-typedef unsigned long ULONG;
-typedef unsigned long DWORD;
-
-typedef char *PCHAR;
-typedef short *PSHORT;
-typedef int *PINT;
-typedef long *PLONG;
-typedef void *PVOID;
-
-typedef unsigned char *PUCHAR;
-typedef unsigned short *PUSHORT;
-typedef unsigned int *PUINT;
-typedef unsigned long *PULONG;
-typedef unsigned long long ULONG64;
-typedef unsigned long long LARGE_INTEGER;
-typedef unsigned int UINT32;
-#ifndef NULL
-#define NULL 0
-#endif
-
-
-#endif /* __TYPEDEFS_H__ */
-
diff --git a/drivers/staging/bcm/cntrl_SignalingInterface.h b/drivers/staging/bcm/cntrl_SignalingInterface.h
deleted file mode 100644
index 8683c2d4276e..000000000000
--- a/drivers/staging/bcm/cntrl_SignalingInterface.h
+++ /dev/null
@@ -1,311 +0,0 @@
-#ifndef CNTRL_SIGNALING_INTERFACE_
-#define CNTRL_SIGNALING_INTERFACE_
-
-#define DSA_REQ 11
-#define DSA_RSP 12
-#define DSA_ACK 13
-#define DSC_REQ 14
-#define DSC_RSP 15
-#define DSC_ACK 16
-#define DSD_REQ 17
-#define DSD_RSP 18
-#define DSD_ACK 19
-#define MAX_CLASSIFIERS_IN_SF 4
-
-#define MAX_STRING_LEN 20
-#define MAX_PHS_LENGTHS 255
-#define VENDOR_PHS_PARAM_LENGTH 10
-#define MAX_NUM_ACTIVE_BS 10
-#define AUTH_TOKEN_LENGTH 10
-#define NUM_HARQ_CHANNELS 16 /* Changed from 10 to 16 to accommodate all HARQ channels */
-#define VENDOR_CLASSIFIER_PARAM_LENGTH 1 /* Changed the size to 1 byte since we dnt use it */
-#define VENDOR_SPECIF_QOS_PARAM 1
-#define VENDOR_PHS_PARAM_LENGTH 10
-#define MBS_CONTENTS_ID_LENGTH 10
-#define GLOBAL_SF_CLASSNAME_LENGTH 6
-
-#define TYPE_OF_SERVICE_LENGTH 3
-#define IP_MASKED_SRC_ADDRESS_LENGTH 32
-#define IP_MASKED_DEST_ADDRESS_LENGTH 32
-#define PROTOCOL_SRC_PORT_RANGE_LENGTH 4
-#define PROTOCOL_DEST_PORT_RANGE_LENGTH 4
-#define ETHERNET_DEST_MAC_ADDR_LENGTH 12
-#define ETHERNET_SRC_MAC_ADDR_LENGTH 12
-#define NUM_ETHERTYPE_BYTES 3
-#define NUM_IPV6_FLOWLABLE_BYTES 3
-
-struct bcm_packet_class_rules {
- /* 16bit UserPriority Of The Service Flow */
- u16 u16UserPriority;
- /* 16bit VLANID Of The Service Flow */
- u16 u16VLANID;
- /* 16bit Packet Classification RuleIndex Of The Service Flow */
- u16 u16PacketClassificationRuleIndex;
- /* 8bit Classifier Rule Priority Of The Service Flow */
- u8 u8ClassifierRulePriority;
- /* Length of IP TypeOfService field */
- u8 u8IPTypeOfServiceLength;
- /* 3bytes IP TypeOfService */
- u8 u8IPTypeOfService[TYPE_OF_SERVICE_LENGTH];
- /* Protocol used in classification of Service Flow */
- u8 u8Protocol;
- /* Length of IP Masked Source Address */
- u8 u8IPMaskedSourceAddressLength;
- /* IP Masked Source Address used in classification for the Service Flow */
- u8 u8IPMaskedSourceAddress[IP_MASKED_SRC_ADDRESS_LENGTH];
- /* Length of IP Destination Address */
- u8 u8IPDestinationAddressLength;
- /* IP Destination Address used in classification for the Service Flow */
- u8 u8IPDestinationAddress[IP_MASKED_DEST_ADDRESS_LENGTH];
- /* Length of Protocol Source Port Range */
- u8 u8ProtocolSourcePortRangeLength;
- /* Protocol Source Port Range used in the Service Flow */
- u8 u8ProtocolSourcePortRange[PROTOCOL_SRC_PORT_RANGE_LENGTH];
- /* Length of Protocol Dest Port Range */
- u8 u8ProtocolDestPortRangeLength;
- /* Protocol Dest Port Range used in the Service Flow */
- u8 u8ProtocolDestPortRange[PROTOCOL_DEST_PORT_RANGE_LENGTH];
- /* Length of Ethernet Destination MAC Address */
- u8 u8EthernetDestMacAddressLength;
- /* Ethernet Destination MAC Address used in classification of the Service Flow */
- u8 u8EthernetDestMacAddress[ETHERNET_DEST_MAC_ADDR_LENGTH];
- /* Length of Ethernet Source MAC Address */
- u8 u8EthernetSourceMACAddressLength;
- /* Ethernet Source MAC Address used in classification of the Service Flow */
- u8 u8EthernetSourceMACAddress[ETHERNET_SRC_MAC_ADDR_LENGTH];
- /* Length of Ethertype */
- u8 u8EthertypeLength;
- /* 3bytes Ethertype Of The Service Flow */
- u8 u8Ethertype[NUM_ETHERTYPE_BYTES];
- /* 8bit Associated PHSI Of The Service Flow */
- u8 u8AssociatedPHSI;
- /* Length of Vendor Specific Classifier Param length Of The Service Flow */
- u8 u8VendorSpecificClassifierParamLength;
- /* Vendor Specific Classifier Param Of The Service Flow */
- u8 u8VendorSpecificClassifierParam[VENDOR_CLASSIFIER_PARAM_LENGTH];
- /* Length Of IPv6 Flow Lable of the Service Flow */
- u8 u8IPv6FlowLableLength;
- /* IPv6 Flow Lable Of The Service Flow */
- u8 u8IPv6FlowLable[NUM_IPV6_FLOWLABLE_BYTES];
- /* Action associated with the classifier rule */
- u8 u8ClassifierActionRule;
- u16 u16ValidityBitMap;
-};
-
-struct bcm_phs_rules {
- /* 8bit PHS Index Of The Service Flow */
- u8 u8PHSI;
- /* PHSF Length Of The Service Flow */
- u8 u8PHSFLength;
- /* String of bytes containing header information to be suppressed by the sending CS and reconstructed by the receiving CS */
- u8 u8PHSF[MAX_PHS_LENGTHS];
- /* PHSM Length Of The Service Flow */
- u8 u8PHSMLength;
- /* PHS Mask for the SF */
- u8 u8PHSM[MAX_PHS_LENGTHS];
- /* 8bit Total number of bytes to be suppressed for the Service Flow */
- u8 u8PHSS;
- /* 8bit Indicates whether or not Packet Header contents need to be verified prior to suppression */
- u8 u8PHSV;
- /* Vendor Specific PHS param Length Of The Service Flow */
- u8 u8VendorSpecificPHSParamsLength;
- /* Vendor Specific PHS param Of The Service Flow */
- u8 u8VendorSpecificPHSParams[VENDOR_PHS_PARAM_LENGTH];
- u8 u8Padding[2];
-};
-
-struct bcm_convergence_types {
- /* 8bit Phs Classfier Action Of The Service Flow */
- u8 u8ClassfierDSCAction;
- /* 8bit Phs DSC Action Of The Service Flow */
- u8 u8PhsDSCAction;
- /* 16bit Padding */
- u8 u8Padding[2];
- /* Packet classification rules structure */
- struct bcm_packet_class_rules cCPacketClassificationRule;
- /* Payload header suppression rules structure */
- struct bcm_phs_rules cPhsRule;
-};
-
-struct bcm_connect_mgr_params {
- /* 32bitSFID Of The Service Flow */
- u32 u32SFID;
- /* 32bit Maximum Sustained Traffic Rate of the Service Flow */
- u32 u32MaxSustainedTrafficRate;
- /* 32bit Maximum Traffic Burst allowed for the Service Flow */
- u32 u32MaxTrafficBurst;
- /* 32bit Minimum Reserved Traffic Rate of the Service Flow */
- u32 u32MinReservedTrafficRate;
- /* 32bit Tolerated Jitter of the Service Flow */
- u32 u32ToleratedJitter;
- /* 32bit Maximum Latency of the Service Flow */
- u32 u32MaximumLatency;
- /* 16bitCID Of The Service Flow */
- u16 u16CID;
- /* 16bit SAID on which the service flow being set up shall be mapped */
- u16 u16TargetSAID;
- /* 16bit ARQ window size negotiated */
- u16 u16ARQWindowSize;
- /* 16bit Total Tx delay incl sending, receiving & processing delays */
- u16 u16ARQRetryTxTimeOut;
- /* 16bit Total Rx delay incl sending, receiving & processing delays */
- u16 u16ARQRetryRxTimeOut;
- /* 16bit ARQ block lifetime */
- u16 u16ARQBlockLifeTime;
- /* 16bit ARQ Sync loss timeout */
- u16 u16ARQSyncLossTimeOut;
- /* 16bit ARQ Purge timeout */
- u16 u16ARQRxPurgeTimeOut;
- /* TODO::Remove this once we move to a new CORR2 driver
- * brief Size of an ARQ block
- */
- u16 u16ARQBlockSize;
- /* #endif */
- /* 16bit Nominal interval b/w consecutive SDU arrivals at MAC SAP */
- u16 u16SDUInterArrivalTime;
- /* 16bit Specifies the time base for rate measurement */
- u16 u16TimeBase;
- /* 16bit Interval b/w Successive Grant oppurtunities */
- u16 u16UnsolicitedGrantInterval;
- /* 16bit Interval b/w Successive Polling grant oppurtunities */
- u16 u16UnsolicitedPollingInterval;
- /* internal var to get the overhead */
- u16 u16MacOverhead;
- /* MBS contents Identifier */
- u16 u16MBSContentsID[MBS_CONTENTS_ID_LENGTH];
- /* MBS contents Identifier length */
- u8 u8MBSContentsIDLength;
- /* ServiceClassName Length Of The Service Flow */
- u8 u8ServiceClassNameLength;
- /* 32bytes ServiceClassName Of The Service Flow */
- u8 u8ServiceClassName[32];
- /* 8bit Indicates whether or not MBS service is requested for this Serivce Flow */
- u8 u8MBSService;
- /* 8bit QOS Parameter Set specifies proper application of QoS parameters to Provisioned, Admitted and Active sets */
- u8 u8QosParamSet;
- /* 8bit Traffic Priority Of the Service Flow */
- u8 u8TrafficPriority;
- /* 8bit Uplink Grant Scheduling Type of The Service Flow */
- u8 u8ServiceFlowSchedulingType;
- /* 8bit Request transmission Policy of the Service Flow */
- u8 u8RequesttransmissionPolicy;
- /* 8bit Specifies whether SDUs for this Service flow are of FixedLength or Variable length */
- u8 u8FixedLengthVSVariableLengthSDUIndicator;
- /* 8bit Length of the SDU for a fixed length SDU service flow */
- u8 u8SDUSize;
- /* 8bit Indicates whether or not ARQ is requested for this connection */
- u8 u8ARQEnable;
- /* < 8bit Indicates whether or not data has tobe delivered in order to higher layer */
- u8 u8ARQDeliverInOrder;
- /* 8bit Receiver ARQ ACK processing time */
- u8 u8RxARQAckProcessingTime;
- /* 8bit Convergence Sublayer Specification Of The Service Flow */
- u8 u8CSSpecification;
- /* 8 bit Type of data delivery service */
- u8 u8TypeOfDataDeliveryService;
- /* 8bit Specifies whether a service flow may generate Paging */
- u8 u8PagingPreference;
- /* 8bit Indicates the MBS Zone through which the connection or virtual connection is valid */
- u8 u8MBSZoneIdentifierassignment;
- /* 8bit Specifies whether traffic on SF should generate MOB_TRF_IND to MS in sleep mode */
- u8 u8TrafficIndicationPreference;
- /* 8bit Speciifes the length of predefined Global QoS parameter set encoding for this SF */
- u8 u8GlobalServicesClassNameLength;
- /* 6 byte Speciifes the predefined Global QoS parameter set encoding for this SF */
- u8 u8GlobalServicesClassName[GLOBAL_SF_CLASSNAME_LENGTH];
- /* 8bit Indicates whether or not SN feedback is enabled for the conn */
- u8 u8SNFeedbackEnabled;
- /* Indicates the size of the Fragment Sequence Number for the connection */
- u8 u8FSNSize;
- /* 8bit Number of CIDs in active BS list */
- u8 u8CIDAllocation4activeBSsLength;
- /* CIDs of BS in the active list */
- u8 u8CIDAllocation4activeBSs[MAX_NUM_ACTIVE_BS];
- /* Specifies if PDU extended subheader should be applied on every PDU on this conn */
- u8 u8PDUSNExtendedSubheader4HarqReordering;
- /* 8bit Specifies whether the connection uses HARQ or not */
- u8 u8HARQServiceFlows;
- /* Specifies the length of Authorization token */
- u8 u8AuthTokenLength;
- /* Specifies the Authorization token */
- u8 u8AuthToken[AUTH_TOKEN_LENGTH];
- /* specifes Number of HARQ channels used to carry data length */
- u8 u8HarqChannelMappingLength;
- /* specifes HARQ channels used to carry data */
- u8 u8HARQChannelMapping[NUM_HARQ_CHANNELS];
- /* 8bit Length of Vendor Specific QoS Params */
- u8 u8VendorSpecificQoSParamLength;
- /* 1byte Vendor Specific QoS Param Of The Service Flow */
- u8 u8VendorSpecificQoSParam[VENDOR_SPECIF_QOS_PARAM];
- /* indicates total classifiers in the SF */
- u8 u8TotalClassifiers; /* < Total number of valid classifiers */
- u8 bValid; /* < Validity flag */
- u8 u8Padding; /* < Padding byte */
- /*
- * Structure for Convergence SubLayer Types with a maximum of 4 classifiers
- */
- struct bcm_convergence_types cConvergenceSLTypes[MAX_CLASSIFIERS_IN_SF];
-};
-
-struct bcm_add_request {
- u8 u8Type; /* < Type */
- u8 eConnectionDir; /* < Connection direction */
- /* brief 16 bit TID */
- u16 u16TID; /* < 16bit TID */
- /* brief 16bitCID */
- u16 u16CID; /* < 16bit CID */
- /* brief 16bitVCID */
- u16 u16VCID; /* < 16bit VCID */
- struct bcm_connect_mgr_params *psfParameterSet; /* < connection manager parameters */
-};
-
-struct bcm_add_indication {
- u8 u8Type; /* < Type */
- u8 eConnectionDir; /* < Connection Direction */
- /* brief 16 bit TID */
- u16 u16TID; /* < TID */
- /* brief 16bitCID */
- u16 u16CID; /* < 16bitCID */
- /* brief 16bitVCID */
- u16 u16VCID; /* < 16bitVCID */
- struct bcm_connect_mgr_params *psfAuthorizedSet; /* Authorized set of connection manager parameters */
- struct bcm_connect_mgr_params *psfAdmittedSet; /* Admitted set of connection manager parameters */
- struct bcm_connect_mgr_params *psfActiveSet; /* Activeset of connection manager parameters */
- u8 u8CC; /* <Confirmation Code */
- u8 u8Padd; /* < 8-bit Padding */
- u16 u16Padd; /* < 16 bit Padding */
-};
-
-struct bcm_del_request {
- u8 u8Type; /* < Type */
- u8 u8Padding; /* < Padding byte */
- u16 u16TID; /* < TID */
- /* brief 32bitSFID */
- u32 u32SFID; /* < SFID */
-};
-
-struct bcm_del_indication {
- u8 u8Type; /* < Type */
- u8 u8Padding; /* < Padding */
- u16 u16TID; /* < TID */
- /* brief 16bitCID */
- u16 u16CID; /* < CID */
- /* brief 16bitVCID */
- u16 u16VCID; /* < VCID */
- /* brief 32bitSFID */
- u32 u32SFID; /* < SFID */
- /* brief 8bit Confirmation code */
- u8 u8ConfirmationCode; /* < Confirmation code */
- u8 u8Padding1[3]; /* < 3 byte Padding */
-};
-
-struct bcm_stim_sfhostnotify {
- u32 SFID; /* SFID of the service flow */
- u16 newCID; /* the new/changed CID */
- u16 VCID; /* Get new Vcid if the flow has been made active in CID update TLV, but was inactive earlier or the orig vcid */
- u8 RetainSF; /* Indication to Host if the SF is to be retained or deleted; if TRUE-retain else delete */
- u8 QoSParamSet; /* QoS paramset of the retained SF */
- u16 u16reserved; /* For byte alignment */
-};
-
-#endif
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
deleted file mode 100644
index a7d4af5ea9a7..000000000000
--- a/drivers/staging/bcm/headers.h
+++ /dev/null
@@ -1,78 +0,0 @@
-
-/*******************************************************************
-* Headers.h
-*******************************************************************/
-#ifndef __HEADERS_H__
-#define __HEADERS_H__
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/string.h>
-#include <linux/etherdevice.h>
-#include <linux/wait.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/stddef.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
-#include <linux/unistd.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/kthread.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/usb.h>
-#include <linux/uaccess.h>
-#include <net/ip.h>
-
-#include "Typedefs.h"
-#include "Macros.h"
-#include "HostMIBSInterface.h"
-#include "cntrl_SignalingInterface.h"
-#include "PHSDefines.h"
-#include "led_control.h"
-#include "Ioctl.h"
-#include "nvm.h"
-#include "target_params.h"
-#include "Adapter.h"
-#include "CmHost.h"
-#include "DDRInit.h"
-#include "Debug.h"
-#include "IPv6ProtocolHdr.h"
-#include "PHSModule.h"
-#include "Protocol.h"
-#include "Prototypes.h"
-#include "Queue.h"
-#include "vendorspecificextn.h"
-
-#include "InterfaceMacros.h"
-#include "InterfaceAdapter.h"
-#include "InterfaceIsr.h"
-#include "InterfaceMisc.h"
-#include "InterfaceRx.h"
-#include "InterfaceTx.h"
-#include "InterfaceIdleMode.h"
-#include "InterfaceInit.h"
-
-#define DRV_NAME "beceem"
-#define DEV_NAME "tarang"
-#define DRV_DESCRIPTION "Beceem Communications Inc. WiMAX driver"
-#define DRV_COPYRIGHT "Copyright 2010. Beceem Communications Inc"
-#define DRV_VERSION "5.2.45"
-#define PFX DRV_NAME " "
-
-extern struct class *bcm_class;
-
-#endif
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
deleted file mode 100644
index f9b08a5d8ce8..000000000000
--- a/drivers/staging/bcm/hostmibs.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * File Name: hostmibs.c
- *
- * Author: Beceem Communications Pvt. Ltd
- *
- * Abstract: This file contains the routines to copy the statistics used by
- * the driver to the Host MIBS structure and giving the same to Application.
- */
-
-#include "headers.h"
-
-INT ProcessGetHostMibs(struct bcm_mini_adapter *Adapter,
- struct bcm_host_stats_mibs *pstHostMibs)
-{
- struct bcm_phs_entry *pstServiceFlowEntry = NULL;
- struct bcm_phs_rule *pstPhsRule = NULL;
- struct bcm_phs_classifier_table *pstClassifierTable = NULL;
- struct bcm_phs_classifier_entry *pstClassifierRule = NULL;
- struct bcm_phs_extension *pDeviceExtension = &Adapter->stBCMPhsContext;
- struct bcm_mibs_host_info *host_info;
- UINT nClassifierIndex = 0;
- UINT nPhsTableIndex = 0;
- UINT nSfIndex = 0;
- UINT uiIndex = 0;
-
- if (pDeviceExtension == NULL) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS,
- DBG_LVL_ALL, "Invalid Device Extension\n");
- return STATUS_FAILURE;
- }
-
- /* Copy the classifier Table */
- for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS;
- nClassifierIndex++) {
- if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
- memcpy(&pstHostMibs->astClassifierTable[nClassifierIndex],
- &Adapter->astClassifierTable[nClassifierIndex],
- sizeof(struct bcm_mibs_classifier_rule));
- }
-
- /* Copy the SF Table */
- for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) {
- if (Adapter->PackInfo[nSfIndex].bValid) {
- memcpy(&pstHostMibs->astSFtable[nSfIndex],
- &Adapter->PackInfo[nSfIndex],
- sizeof(struct bcm_mibs_table));
- } else {
- /* If index in not valid,
- * don't process this for the PHS table.
- * Go For the next entry.
- */
- continue;
- }
-
- /* Retrieve the SFID Entry Index for requested Service Flow */
- if (PHS_INVALID_TABLE_INDEX ==
- GetServiceFlowEntry(pDeviceExtension->
- pstServiceFlowPhsRulesTable,
- Adapter->PackInfo[nSfIndex].
- usVCID_Value, &pstServiceFlowEntry))
-
- continue;
-
- pstClassifierTable = pstServiceFlowEntry->pstClassifierTable;
-
- for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) {
- pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex];
-
- if (pstClassifierRule->bUsed) {
- pstPhsRule = pstClassifierRule->pstPhsRule;
-
- pstHostMibs->astPhsRulesTable[nPhsTableIndex].
- ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
-
- memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
- &pstPhsRule->u8PHSI,
- sizeof(struct bcm_phs_rule));
- nPhsTableIndex++;
-
- }
-
- }
-
- }
-
- /* Copy other Host Statistics parameters */
- host_info = &pstHostMibs->stHostInfo;
- host_info->GoodTransmits = Adapter->dev->stats.tx_packets;
- host_info->GoodReceives = Adapter->dev->stats.rx_packets;
- host_info->CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc);
- host_info->BEBucketSize = Adapter->BEBucketSize;
- host_info->rtPSBucketSize = Adapter->rtPSBucketSize;
- host_info->TimerActive = Adapter->TimerActive;
- host_info->u32TotalDSD = Adapter->u32TotalDSD;
-
- memcpy(host_info->aTxPktSizeHist, Adapter->aTxPktSizeHist,
- sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
- memcpy(host_info->aRxPktSizeHist, Adapter->aRxPktSizeHist,
- sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
-
- return STATUS_SUCCESS;
-}
-
-VOID GetDroppedAppCntrlPktMibs(struct bcm_host_stats_mibs *pstHostMibs,
- struct bcm_tarang_data *pTarang)
-{
- memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
- &(pTarang->stDroppedAppCntrlMsgs),
- sizeof(struct bcm_mibs_dropped_cntrl_msg));
-}
-
-VOID CopyMIBSExtendedSFParameters(struct bcm_mini_adapter *Adapter,
- struct bcm_connect_mgr_params *psfLocalSet,
- UINT uiSearchRuleIndex)
-{
- struct bcm_mibs_parameters *t =
- &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable;
-
- t->wmanIfSfid = psfLocalSet->u32SFID;
- t->wmanIfCmnCpsMaxSustainedRate =
- psfLocalSet->u32MaxSustainedTrafficRate;
- t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
- t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
- t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
- t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
- t->wmanIfCmnCpsFixedVsVariableSduInd =
- psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
- t->wmanIfCmnCpsFixedVsVariableSduInd =
- ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd);
- t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
- t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize);
- t->wmanIfCmnCpsSfSchedulingType =
- psfLocalSet->u8ServiceFlowSchedulingType;
- t->wmanIfCmnCpsSfSchedulingType =
- ntohl(t->wmanIfCmnCpsSfSchedulingType);
- t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
- t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable);
- t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
- t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize);
- t->wmanIfCmnCpsArqBlockLifetime =
- ntohs(psfLocalSet->u16ARQBlockLifeTime);
- t->wmanIfCmnCpsArqBlockLifetime =
- ntohl(t->wmanIfCmnCpsArqBlockLifetime);
- t->wmanIfCmnCpsArqSyncLossTimeout =
- ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
- t->wmanIfCmnCpsArqSyncLossTimeout =
- ntohl(t->wmanIfCmnCpsArqSyncLossTimeout);
- t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
- t->wmanIfCmnCpsArqDeliverInOrder =
- ntohl(t->wmanIfCmnCpsArqDeliverInOrder);
- t->wmanIfCmnCpsArqRxPurgeTimeout =
- ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
- t->wmanIfCmnCpsArqRxPurgeTimeout =
- ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout);
- t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
- t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize);
- t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
- t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy);
- t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
- t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification);
- t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
- t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid);
-
-}
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
deleted file mode 100644
index 074fc39ed678..000000000000
--- a/drivers/staging/bcm/led_control.c
+++ /dev/null
@@ -1,952 +0,0 @@
-#include "headers.h"
-
-#define STATUS_IMAGE_CHECKSUM_MISMATCH -199
-#define EVENT_SIGNALED 1
-
-static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size)
-{
- B_UINT16 u16CheckSum = 0;
-
- while (u32Size--) {
- u16CheckSum += (B_UINT8)~(*pu8Buffer);
- pu8Buffer++;
- }
- return u16CheckSum;
-}
-
-bool IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios)
-{
- INT Status;
-
- Status = (Adapter->gpioBitMap & gpios) ^ gpios;
- if (Status)
- return false;
- else
- return TRUE;
-}
-
-static INT LED_Blink(struct bcm_mini_adapter *Adapter,
- UINT GPIO_Num,
- UCHAR uiLedIndex,
- ULONG timeout,
- INT num_of_time,
- enum bcm_led_events currdriverstate)
-{
- int Status = STATUS_SUCCESS;
- bool bInfinite = false;
-
- /* Check if num_of_time is -ve. If yes, blink led in infinite loop */
- if (num_of_time < 0) {
- bInfinite = TRUE;
- num_of_time = 1;
- }
- while (num_of_time) {
- if (currdriverstate == Adapter->DriverState)
- TURN_ON_LED(Adapter, GPIO_Num, uiLedIndex);
-
- /* Wait for timeout after setting on the LED */
- Status = wait_event_interruptible_timeout(
- Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState ||
- kthread_should_stop(),
- msecs_to_jiffies(timeout));
-
- if (kthread_should_stop()) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex);
- Status = EVENT_SIGNALED;
- break;
- }
- if (Status) {
- TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex);
- Status = EVENT_SIGNALED;
- break;
- }
-
- TURN_OFF_LED(Adapter, GPIO_Num, uiLedIndex);
- Status = wait_event_interruptible_timeout(
- Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState ||
- kthread_should_stop(),
- msecs_to_jiffies(timeout));
- if (bInfinite == false)
- num_of_time--;
- }
- return Status;
-}
-
-static INT ScaleRateofTransfer(ULONG rate)
-{
- if (rate <= 3)
- return rate;
- else if ((rate > 3) && (rate <= 100))
- return 5;
- else if ((rate > 100) && (rate <= 200))
- return 6;
- else if ((rate > 200) && (rate <= 300))
- return 7;
- else if ((rate > 300) && (rate <= 400))
- return 8;
- else if ((rate > 400) && (rate <= 500))
- return 9;
- else if ((rate > 500) && (rate <= 600))
- return 10;
- else
- return MAX_NUM_OF_BLINKS;
-}
-
-static INT blink_in_normal_bandwidth(struct bcm_mini_adapter *ad,
- INT *time,
- INT *time_tx,
- INT *time_rx,
- UCHAR GPIO_Num_tx,
- UCHAR uiTxLedIndex,
- UCHAR GPIO_Num_rx,
- UCHAR uiRxLedIndex,
- enum bcm_led_events currdriverstate,
- ulong *timeout)
-{
- /*
- * Assign minimum number of blinks of
- * either Tx or Rx.
- */
- *time = (*time_tx > *time_rx ? *time_rx : *time_tx);
-
- if (*time > 0) {
- /* Blink both Tx and Rx LEDs */
- if ((LED_Blink(ad, 1 << GPIO_Num_tx, uiTxLedIndex, *timeout,
- *time, currdriverstate) == EVENT_SIGNALED) ||
- (LED_Blink(ad, 1 << GPIO_Num_rx, uiRxLedIndex, *timeout,
- *time, currdriverstate) == EVENT_SIGNALED))
- return EVENT_SIGNALED;
- }
-
- if (*time == *time_tx) {
- /* Blink pending rate of Rx */
- if (LED_Blink(ad, (1 << GPIO_Num_rx), uiRxLedIndex, *timeout,
- *time_rx - *time,
- currdriverstate) == EVENT_SIGNALED)
- return EVENT_SIGNALED;
-
- *time = *time_rx;
- } else {
- /* Blink pending rate of Tx */
- if (LED_Blink(ad, 1 << GPIO_Num_tx, uiTxLedIndex, *timeout,
- *time_tx - *time,
- currdriverstate) == EVENT_SIGNALED)
- return EVENT_SIGNALED;
-
- *time = *time_tx;
- }
-
- return 0;
-}
-
-static INT LED_Proportional_Blink(struct bcm_mini_adapter *Adapter,
- UCHAR GPIO_Num_tx,
- UCHAR uiTxLedIndex,
- UCHAR GPIO_Num_rx,
- UCHAR uiRxLedIndex,
- enum bcm_led_events currdriverstate)
-{
- /* Initial values of TX and RX packets */
- ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0;
- /* values of TX and RX packets after 1 sec */
- ULONG64 Final_num_of_packts_tx = 0, Final_num_of_packts_rx = 0;
- /* Rate of transfer of Tx and Rx in 1 sec */
- ULONG64 rate_of_transfer_tx = 0, rate_of_transfer_rx = 0;
- int Status = STATUS_SUCCESS;
- INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0;
- UINT remDelay = 0;
- /* UINT GPIO_num = DISABLE_GPIO_NUM; */
- ulong timeout = 0;
-
- /* Read initial value of packets sent/received */
- Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets;
- Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets;
-
- /* Scale the rate of transfer to no of blinks. */
- num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
-
- while ((Adapter->device_removed == false)) {
- timeout = 50;
-
- if (EVENT_SIGNALED == blink_in_normal_bandwidth(Adapter,
- &num_of_time,
- &num_of_time_tx,
- &num_of_time_rx,
- GPIO_Num_tx,
- uiTxLedIndex,
- GPIO_Num_rx,
- uiRxLedIndex,
- currdriverstate,
- &timeout))
- return EVENT_SIGNALED;
-
-
- /*
- * If Tx/Rx rate is less than maximum blinks per second,
- * wait till delay completes to 1 second
- */
- remDelay = MAX_NUM_OF_BLINKS - num_of_time;
- if (remDelay > 0) {
- timeout = 100 * remDelay;
- Status = wait_event_interruptible_timeout(
- Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState
- || kthread_should_stop(),
- msecs_to_jiffies(timeout));
-
- if (kthread_should_stop()) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
- LED_DUMP_INFO, DBG_LVL_ALL,
- "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- return EVENT_SIGNALED;
- }
- if (Status)
- return EVENT_SIGNALED;
- }
-
- /* Turn off both Tx and Rx LEDs before next second */
- TURN_OFF_LED(Adapter, 1 << GPIO_Num_tx, uiTxLedIndex);
- TURN_OFF_LED(Adapter, 1 << GPIO_Num_rx, uiTxLedIndex);
-
- /*
- * Read the Tx & Rx packets transmission after 1 second and
- * calculate rate of transfer
- */
- Final_num_of_packts_tx = Adapter->dev->stats.tx_packets;
- Final_num_of_packts_rx = Adapter->dev->stats.rx_packets;
-
- rate_of_transfer_tx = Final_num_of_packts_tx -
- Initial_num_of_packts_tx;
- rate_of_transfer_rx = Final_num_of_packts_rx -
- Initial_num_of_packts_rx;
-
- /* Read initial value of packets sent/received */
- Initial_num_of_packts_tx = Final_num_of_packts_tx;
- Initial_num_of_packts_rx = Final_num_of_packts_rx;
-
- /* Scale the rate of transfer to no of blinks. */
- num_of_time_tx =
- ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx =
- ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
-
- }
- return Status;
-}
-
-/*
- * -----------------------------------------------------------------------------
- * Procedure: ValidateDSDParamsChecksum
- *
- * Description: Reads DSD Params and validates checkusm.
- *
- * Arguments:
- * Adapter - Pointer to Adapter structure.
- * ulParamOffset - Start offset of the DSD parameter to be read and
- * validated.
- * usParamLen - Length of the DSD Parameter.
- *
- * Returns:
- * <OSAL_STATUS_CODE>
- * -----------------------------------------------------------------------------
- */
-static INT ValidateDSDParamsChecksum(struct bcm_mini_adapter *Adapter,
- ULONG ulParamOffset,
- USHORT usParamLen)
-{
- INT Status = STATUS_SUCCESS;
- PUCHAR puBuffer = NULL;
- USHORT usChksmOrg = 0;
- USHORT usChecksumCalculated = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",
- ulParamOffset, usParamLen);
-
- puBuffer = kmalloc(usParamLen, GFP_KERNEL);
- if (!puBuffer) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: ValidateDSDParamsChecksum Allocation failed");
- return -ENOMEM;
-
- }
-
- /* Read the DSD data from the parameter offset. */
- if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)puBuffer,
- ulParamOffset, usParamLen)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
- goto exit;
- }
-
- /* Calculate the checksum of the data read from the DSD parameter. */
- usChecksumCalculated = CFG_CalculateChecksum(puBuffer, usParamLen);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread: usCheckSumCalculated = 0x%x\n",
- usChecksumCalculated);
-
- /*
- * End of the DSD parameter will have a TWO bytes checksum stored in it.
- * Read it and compare with the calculated Checksum.
- */
- if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)&usChksmOrg,
- ulParamOffset+usParamLen, 2)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
- goto exit;
- }
- usChksmOrg = ntohs(usChksmOrg);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread: usChksmOrg = 0x%x", usChksmOrg);
-
- /*
- * Compare the checksum calculated with the checksum read
- * from DSD section
- */
- if (usChecksumCalculated ^ usChksmOrg) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
- Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
- goto exit;
- }
-
-exit:
- kfree(puBuffer);
- return Status;
-}
-
-
-/*
- * -----------------------------------------------------------------------------
- * Procedure: ValidateHWParmStructure
- *
- * Description: Validates HW Parameters.
- *
- * Arguments:
- * Adapter - Pointer to Adapter structure.
- * ulHwParamOffset - Start offset of the HW parameter Section to be read
- * and validated.
- *
- * Returns:
- * <OSAL_STATUS_CODE>
- * -----------------------------------------------------------------------------
- */
-static INT ValidateHWParmStructure(struct bcm_mini_adapter *Adapter,
- ULONG ulHwParamOffset)
-{
-
- INT Status = STATUS_SUCCESS;
- USHORT HwParamLen = 0;
- /*
- * Add DSD start offset to the hwParamOffset to get
- * the actual address.
- */
- ulHwParamOffset += DSD_START_OFFSET;
-
- /* Read the Length of HW_PARAM structure */
- BeceemNVMRead(Adapter, (PUINT)&HwParamLen, ulHwParamOffset, 2);
- HwParamLen = ntohs(HwParamLen);
- if (0 == HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
- return STATUS_IMAGE_CHECKSUM_MISMATCH;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread:HwParamLen = 0x%x", HwParamLen);
- Status = ValidateDSDParamsChecksum(Adapter, ulHwParamOffset,
- HwParamLen);
- return Status;
-} /* ValidateHWParmStructure() */
-
-static int ReadLEDInformationFromEEPROM(struct bcm_mini_adapter *Adapter,
- UCHAR GPIO_Array[])
-{
- int Status = STATUS_SUCCESS;
-
- ULONG dwReadValue = 0;
- USHORT usHwParamData = 0;
- USHORT usEEPROMVersion = 0;
- UCHAR ucIndex = 0;
- UCHAR ucGPIOInfo[32] = {0};
-
- BeceemNVMRead(Adapter, (PUINT)&usEEPROMVersion,
- EEPROM_VERSION_OFFSET, 2);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "usEEPROMVersion: Minor:0x%X Major:0x%x",
- usEEPROMVersion & 0xFF,
- ((usEEPROMVersion >> 8) & 0xFF));
-
-
- if (((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION) {
- BeceemNVMRead(Adapter, (PUINT)&usHwParamData,
- EEPROM_HW_PARAM_POINTER_ADDRESS, 2);
- usHwParamData = ntohs(usHwParamData);
- dwReadValue = usHwParamData;
- } else {
- /*
- * Validate Compatibility section and then read HW param
- * if compatibility section is valid.
- */
- Status = ValidateDSDParamsChecksum(Adapter,
- DSD_START_OFFSET,
- COMPATIBILITY_SECTION_LENGTH_MAP5);
-
- if (Status != STATUS_SUCCESS)
- return Status;
-
- BeceemNVMRead(Adapter, (PUINT)&dwReadValue,
- EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5, 4);
- dwReadValue = ntohl(dwReadValue);
- }
-
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread: Start address of HW_PARAM structure = 0x%lx",
- dwReadValue);
-
- /*
- * Validate if the address read out is within the DSD.
- * Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
- * lower limit should be above DSD_START_OFFSET and
- * upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
- */
- if (dwReadValue < DSD_START_OFFSET ||
- dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
- return STATUS_IMAGE_CHECKSUM_MISMATCH;
-
- Status = ValidateHWParmStructure(Adapter, dwReadValue);
- if (Status)
- return Status;
-
- /*
- * Add DSD_START_OFFSET to the offset read from the EEPROM.
- * This will give the actual start HW Parameters start address.
- * To read GPIO section, add GPIO offset further.
- */
-
- dwReadValue += DSD_START_OFFSET;
- /* = start address of hw param section. */
- dwReadValue += GPIO_SECTION_START_OFFSET;
- /* = GPIO start offset within HW Param section. */
-
- /*
- * Read the GPIO values for 32 GPIOs from EEPROM and map the function
- * number to GPIO pin number to GPIO_Array
- */
- BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo, dwReadValue, 32);
- for (ucIndex = 0; ucIndex < 32; ucIndex++) {
-
- switch (ucGPIOInfo[ucIndex]) {
- case RED_LED:
- GPIO_Array[RED_LED] = ucIndex;
- Adapter->gpioBitMap |= (1 << ucIndex);
- break;
- case BLUE_LED:
- GPIO_Array[BLUE_LED] = ucIndex;
- Adapter->gpioBitMap |= (1 << ucIndex);
- break;
- case YELLOW_LED:
- GPIO_Array[YELLOW_LED] = ucIndex;
- Adapter->gpioBitMap |= (1 << ucIndex);
- break;
- case GREEN_LED:
- GPIO_Array[GREEN_LED] = ucIndex;
- Adapter->gpioBitMap |= (1 << ucIndex);
- break;
- default:
- break;
- }
-
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "GPIO's bit map correspond to LED :0x%X",
- Adapter->gpioBitMap);
- return Status;
-}
-
-
-static int ReadConfigFileStructure(struct bcm_mini_adapter *Adapter,
- bool *bEnableThread)
-{
- int Status = STATUS_SUCCESS;
- /* Array to store GPIO numbers from EEPROM */
- UCHAR GPIO_Array[NUM_OF_LEDS+1];
- UINT uiIndex = 0;
- UINT uiNum_of_LED_Type = 0;
- PUCHAR puCFGData = NULL;
- UCHAR bData = 0;
- struct bcm_led_state_info *curr_led_state;
-
- memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1);
-
- if (!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "Target Params not Avail.\n");
- return -ENOENT;
- }
-
- /* Populate GPIO_Array with GPIO numbers for LED functions */
- /* Read the GPIO numbers from EEPROM */
- Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array);
- if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) {
- *bEnableThread = false;
- return STATUS_SUCCESS;
- } else if (Status) {
- *bEnableThread = false;
- return Status;
- }
-
- /*
- * CONFIG file read successfully. Deallocate the memory of
- * uiFileNameBufferSize
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
- "LED Thread: Config file read successfully\n");
- puCFGData = (PUCHAR) &Adapter->pstargetparams->HostDrvrConfig1;
-
- /*
- * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
- * will have the information of LED type, LED on state for different
- * driver state and LED blink state.
- */
-
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- bData = *puCFGData;
- curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex];
-
- /*
- * Check Bit 8 for polarity. If it is set,
- * polarity is reverse polarity
- */
- if (bData & 0x80) {
- curr_led_state->BitPolarity = 0;
- /* unset the bit 8 */
- bData = bData & 0x7f;
- }
-
- curr_led_state->LED_Type = bData;
- if (bData <= NUM_OF_LEDS)
- curr_led_state->GPIO_Num = GPIO_Array[bData];
- else
- curr_led_state->GPIO_Num = DISABLE_GPIO_NUM;
-
- puCFGData++;
- bData = *puCFGData;
- curr_led_state->LED_On_State = bData;
- puCFGData++;
- bData = *puCFGData;
- curr_led_state->LED_Blink_State = bData;
- puCFGData++;
- }
-
- /*
- * Check if all the LED settings are disabled. If it is disabled,
- * dont launch the LED control thread.
- */
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex];
-
- if ((curr_led_state->LED_Type == DISABLE_GPIO_NUM) ||
- (curr_led_state->LED_Type == 0x7f) ||
- (curr_led_state->LED_Type == 0))
- uiNum_of_LED_Type++;
- }
- if (uiNum_of_LED_Type >= NUM_OF_LEDS)
- *bEnableThread = false;
-
- return Status;
-}
-
-/*
- * -----------------------------------------------------------------------------
- * Procedure: LedGpioInit
- *
- * Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode
- * and make the initial state to be OFF.
- *
- * Arguments:
- * Adapter - Pointer to MINI_ADAPTER structure.
- *
- * Returns: VOID
- *
- * -----------------------------------------------------------------------------
- */
-static VOID LedGpioInit(struct bcm_mini_adapter *Adapter)
-{
- UINT uiResetValue = 0;
- UINT uiIndex = 0;
- struct bcm_led_state_info *curr_led_state;
-
- /* Set all LED GPIO Mode to output mode */
- if (rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
- sizeof(uiResetValue)) < 0)
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "LED Thread: RDM Failed\n");
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- curr_led_state = &Adapter->LEDInfo.LEDState[uiIndex];
-
- if (curr_led_state->GPIO_Num != DISABLE_GPIO_NUM)
- uiResetValue |= (1 << curr_led_state->GPIO_Num);
-
- TURN_OFF_LED(Adapter, 1 << curr_led_state->GPIO_Num, uiIndex);
-
- }
- if (wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
- sizeof(uiResetValue)) < 0)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "LED Thread: WRM Failed\n");
-
- Adapter->LEDInfo.bIdle_led_off = false;
-}
-
-static INT BcmGetGPIOPinInfo(struct bcm_mini_adapter *Adapter,
- UCHAR *GPIO_num_tx,
- UCHAR *GPIO_num_rx,
- UCHAR *uiLedTxIndex,
- UCHAR *uiLedRxIndex,
- enum bcm_led_events currdriverstate)
-{
- UINT uiIndex = 0;
- struct bcm_led_state_info *led_state_info;
-
- *GPIO_num_tx = DISABLE_GPIO_NUM;
- *GPIO_num_rx = DISABLE_GPIO_NUM;
-
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- led_state_info = &Adapter->LEDInfo.LEDState[uiIndex];
-
- if (((currdriverstate == NORMAL_OPERATION) ||
- (currdriverstate == IDLEMODE_EXIT) ||
- (currdriverstate == FW_DOWNLOAD)) &&
- (led_state_info->LED_Blink_State & currdriverstate) &&
- (led_state_info->GPIO_Num != DISABLE_GPIO_NUM)) {
- if (*GPIO_num_tx == DISABLE_GPIO_NUM) {
- *GPIO_num_tx = led_state_info->GPIO_Num;
- *uiLedTxIndex = uiIndex;
- } else {
- *GPIO_num_rx = led_state_info->GPIO_Num;
- *uiLedRxIndex = uiIndex;
- }
- } else {
- if ((led_state_info->LED_On_State & currdriverstate) &&
- (led_state_info->GPIO_Num != DISABLE_GPIO_NUM)) {
- *GPIO_num_tx = led_state_info->GPIO_Num;
- *uiLedTxIndex = uiIndex;
- }
- }
- }
- return STATUS_SUCCESS;
-}
-
-static void handle_adapter_driver_state(struct bcm_mini_adapter *ad,
- enum bcm_led_events currdriverstate,
- UCHAR GPIO_num,
- UCHAR dummyGPIONum,
- UCHAR uiLedIndex,
- UCHAR dummyIndex,
- ulong timeout,
- UINT uiResetValue,
- UINT uiIndex)
-{
- switch (ad->DriverState) {
- case DRIVER_INIT:
- currdriverstate = DRIVER_INIT;
- /* ad->DriverState; */
- BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum,
- &uiLedIndex, &dummyIndex,
- currdriverstate);
-
- if (GPIO_num != DISABLE_GPIO_NUM)
- TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex);
-
- break;
- case FW_DOWNLOAD:
- /*
- * BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS,
- * LED_DUMP_INFO, DBG_LVL_ALL,
- * "LED Thread: FW_DN_DONE called\n");
- */
- currdriverstate = FW_DOWNLOAD;
- BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum,
- &uiLedIndex, &dummyIndex,
- currdriverstate);
-
- if (GPIO_num != DISABLE_GPIO_NUM) {
- timeout = 50;
- LED_Blink(ad, 1 << GPIO_num, uiLedIndex, timeout,
- -1, currdriverstate);
- }
- break;
- case FW_DOWNLOAD_DONE:
- currdriverstate = FW_DOWNLOAD_DONE;
- BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum,
- &uiLedIndex, &dummyIndex, currdriverstate);
- if (GPIO_num != DISABLE_GPIO_NUM)
- TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex);
- break;
-
- case SHUTDOWN_EXIT:
- /*
- * no break, continue to NO_NETWORK_ENTRY
- * state as well.
- */
- case NO_NETWORK_ENTRY:
- currdriverstate = NO_NETWORK_ENTRY;
- BcmGetGPIOPinInfo(ad, &GPIO_num, &dummyGPIONum,
- &uiLedIndex, &dummyGPIONum, currdriverstate);
- if (GPIO_num != DISABLE_GPIO_NUM)
- TURN_ON_LED(ad, 1 << GPIO_num, uiLedIndex);
- break;
- case NORMAL_OPERATION:
- {
- UCHAR GPIO_num_tx = DISABLE_GPIO_NUM;
- UCHAR GPIO_num_rx = DISABLE_GPIO_NUM;
- UCHAR uiLEDTx = 0;
- UCHAR uiLEDRx = 0;
-
- currdriverstate = NORMAL_OPERATION;
- ad->LEDInfo.bIdle_led_off = false;
-
- BcmGetGPIOPinInfo(ad, &GPIO_num_tx, &GPIO_num_rx,
- &uiLEDTx, &uiLEDRx, currdriverstate);
- if ((GPIO_num_tx == DISABLE_GPIO_NUM) &&
- (GPIO_num_rx == DISABLE_GPIO_NUM)) {
- GPIO_num = DISABLE_GPIO_NUM;
- } else {
- /*
- * If single LED is selected, use same
- * for both Tx and Rx
- */
- if (GPIO_num_tx == DISABLE_GPIO_NUM) {
- GPIO_num_tx = GPIO_num_rx;
- uiLEDTx = uiLEDRx;
- } else if (GPIO_num_rx == DISABLE_GPIO_NUM) {
- GPIO_num_rx = GPIO_num_tx;
- uiLEDRx = uiLEDTx;
- }
- /*
- * Blink the LED in proportionate
- * to Tx and Rx transmissions.
- */
- LED_Proportional_Blink(ad,
- GPIO_num_tx, uiLEDTx,
- GPIO_num_rx, uiLEDRx,
- currdriverstate);
- }
- }
- break;
- case LOWPOWER_MODE_ENTER:
- currdriverstate = LOWPOWER_MODE_ENTER;
- if (DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING ==
- ad->ulPowerSaveMode) {
- /* Turn OFF all the LED */
- uiResetValue = 0;
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- TURN_OFF_LED(ad,
- (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num),
- uiIndex);
- }
-
- }
- /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
- ad->LEDInfo.bLedInitDone = false;
- ad->LEDInfo.bIdle_led_off = TRUE;
- wake_up(&ad->LEDInfo.idleModeSyncEvent);
- GPIO_num = DISABLE_GPIO_NUM;
- break;
- case IDLEMODE_CONTINUE:
- currdriverstate = IDLEMODE_CONTINUE;
- GPIO_num = DISABLE_GPIO_NUM;
- break;
- case IDLEMODE_EXIT:
- break;
- case DRIVER_HALT:
- currdriverstate = DRIVER_HALT;
- GPIO_num = DISABLE_GPIO_NUM;
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED(ad,
- (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num),
- uiIndex);
- }
- /* ad->DriverState = DRIVER_INIT; */
- break;
- case LED_THREAD_INACTIVE:
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "InActivating LED thread...");
- currdriverstate = LED_THREAD_INACTIVE;
- ad->LEDInfo.led_thread_running =
- BCM_LED_THREAD_RUNNING_INACTIVELY;
- ad->LEDInfo.bLedInitDone = false;
- /* disable ALL LED */
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if (ad->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED(ad,
- (1 << ad->LEDInfo.LEDState[uiIndex].GPIO_Num),
- uiIndex);
- }
- break;
- case LED_THREAD_ACTIVE:
- BCM_DEBUG_PRINT(ad, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL, "Activating LED thread again...");
- if (ad->LinkUpStatus == false)
- ad->DriverState = NO_NETWORK_ENTRY;
- else
- ad->DriverState = NORMAL_OPERATION;
-
- ad->LEDInfo.led_thread_running =
- BCM_LED_THREAD_RUNNING_ACTIVELY;
- break;
- /* return; */
- default:
- break;
- }
-}
-
-static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
-{
- UINT uiIndex = 0;
- UCHAR GPIO_num = 0;
- UCHAR uiLedIndex = 0;
- UINT uiResetValue = 0;
- enum bcm_led_events currdriverstate = 0;
- ulong timeout = 0;
-
- INT Status = 0;
-
- UCHAR dummyGPIONum = 0;
- UCHAR dummyIndex = 0;
-
- /* currdriverstate = Adapter->DriverState; */
- Adapter->LEDInfo.bIdleMode_tx_from_host = false;
-
- /*
- * Wait till event is triggered
- *
- * wait_event(Adapter->LEDInfo.notify_led_event,
- * currdriverstate!= Adapter->DriverState);
- */
-
- GPIO_num = DISABLE_GPIO_NUM;
-
- while (TRUE) {
- /* Wait till event is triggered */
- if ((GPIO_num == DISABLE_GPIO_NUM)
- ||
- ((currdriverstate != FW_DOWNLOAD) &&
- (currdriverstate != NORMAL_OPERATION) &&
- (currdriverstate != LOWPOWER_MODE_ENTER))
- ||
- (currdriverstate == LED_THREAD_INACTIVE))
- Status = wait_event_interruptible(
- Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState
- || kthread_should_stop());
-
- if (kthread_should_stop() || Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- TURN_OFF_LED(Adapter, 1 << GPIO_num, uiLedIndex);
- return; /* STATUS_FAILURE; */
- }
-
- if (GPIO_num != DISABLE_GPIO_NUM)
- TURN_OFF_LED(Adapter, 1 << GPIO_num, uiLedIndex);
-
- if (Adapter->LEDInfo.bLedInitDone == false) {
- LedGpioInit(Adapter);
- Adapter->LEDInfo.bLedInitDone = TRUE;
- }
-
- handle_adapter_driver_state(Adapter,
- currdriverstate,
- GPIO_num,
- dummyGPIONum,
- uiLedIndex,
- dummyIndex,
- timeout,
- uiResetValue,
- uiIndex
- );
- }
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
-}
-
-int InitLedSettings(struct bcm_mini_adapter *Adapter)
-{
- int Status = STATUS_SUCCESS;
- bool bEnableThread = TRUE;
- UCHAR uiIndex = 0;
-
- /*
- * Initially set BitPolarity to normal polarity. The bit 8 of LED type
- * is used to change the polarity of the LED.
- */
-
- for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 1;
-
- /*
- * Read the LED settings of CONFIG file and map it
- * to GPIO numbers in EEPROM
- */
- Status = ReadConfigFileStructure(Adapter, &bEnableThread);
- if (STATUS_SUCCESS != Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "LED Thread: FAILED in ReadConfigFileStructure\n");
- return Status;
- }
-
- if (Adapter->LEDInfo.led_thread_running) {
- if (bEnableThread) {
- ;
- } else {
- Adapter->DriverState = DRIVER_HALT;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- }
-
- } else if (bEnableThread) {
- /* Create secondary thread to handle the LEDs */
- init_waitqueue_head(&Adapter->LEDInfo.notify_led_event);
- init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent);
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_RUNNING_ACTIVELY;
- Adapter->LEDInfo.bIdle_led_off = false;
- Adapter->LEDInfo.led_cntrl_threadid =
- kthread_run((int (*)(void *)) LEDControlThread,
- Adapter, "led_control_thread");
- if (IS_ERR(Adapter->LEDInfo.led_cntrl_threadid)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
- DBG_LVL_ALL,
- "Not able to spawn Kernel Thread\n");
- Adapter->LEDInfo.led_thread_running =
- BCM_LED_THREAD_DISABLED;
- return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
- }
- }
- return Status;
-}
diff --git a/drivers/staging/bcm/led_control.h b/drivers/staging/bcm/led_control.h
deleted file mode 100644
index 1b24bf4658af..000000000000
--- a/drivers/staging/bcm/led_control.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef _LED_CONTROL_H
-#define _LED_CONTROL_H
-
-#define NUM_OF_LEDS 4
-#define DSD_START_OFFSET 0x0200
-#define EEPROM_VERSION_OFFSET 0x020E
-#define EEPROM_HW_PARAM_POINTER_ADDRESS 0x0218
-#define EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5 0x0220
-#define GPIO_SECTION_START_OFFSET 0x03
-#define COMPATIBILITY_SECTION_LENGTH 42
-#define COMPATIBILITY_SECTION_LENGTH_MAP5 84
-#define EEPROM_MAP5_MAJORVERSION 5
-#define EEPROM_MAP5_MINORVERSION 0
-#define MAX_NUM_OF_BLINKS 10
-#define NUM_OF_GPIO_PINS 16
-#define DISABLE_GPIO_NUM 0xFF
-#define EVENT_SIGNALED 1
-#define MAX_FILE_NAME_BUFFER_SIZE 100
-
-#define TURN_ON_LED(ad, GPIO, index) do { \
- unsigned int gpio_val = GPIO; \
- (ad->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(ad, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)) : \
- wrmaltWithLock(ad, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)); \
- } while (0)
-
-#define TURN_OFF_LED(ad, GPIO, index) do { \
- unsigned int gpio_val = GPIO; \
- (ad->LEDInfo.LEDState[index].BitPolarity == 1) ? \
- wrmaltWithLock(ad, BCM_GPIO_OUTPUT_CLR_REG, &gpio_val, sizeof(gpio_val)) : \
- wrmaltWithLock(ad, BCM_GPIO_OUTPUT_SET_REG, &gpio_val, sizeof(gpio_val)); \
- } while (0)
-
-enum bcm_led_colors {
- RED_LED = 1,
- BLUE_LED = 2,
- YELLOW_LED = 3,
- GREEN_LED = 4
-};
-
-enum bcm_led_events {
- SHUTDOWN_EXIT = 0x00,
- DRIVER_INIT = 0x1,
- FW_DOWNLOAD = 0x2,
- FW_DOWNLOAD_DONE = 0x4,
- NO_NETWORK_ENTRY = 0x8,
- NORMAL_OPERATION = 0x10,
- LOWPOWER_MODE_ENTER = 0x20,
- IDLEMODE_CONTINUE = 0x40,
- IDLEMODE_EXIT = 0x80,
- LED_THREAD_INACTIVE = 0x100, /* Makes the LED thread Inactivce. It wil be equivallent to putting the thread on hold. */
- LED_THREAD_ACTIVE = 0x200, /* Makes the LED Thread Active back. */
- DRIVER_HALT = 0xff
-}; /* Enumerated values of different driver states */
-
-/*
- * Structure which stores the information of different LED types
- * and corresponding LED state information of driver states
- */
-struct bcm_led_state_info {
- unsigned char LED_Type; /* specify GPIO number - use 0xFF if not used */
- unsigned char LED_On_State; /* Bits set or reset for different states */
- unsigned char LED_Blink_State; /* Bits set or reset for blinking LEDs for different states */
- unsigned char GPIO_Num;
- unsigned char BitPolarity; /* To represent whether H/W is normal polarity or reverse polarity */
-};
-
-struct bcm_led_info {
- struct bcm_led_state_info LEDState[NUM_OF_LEDS];
- bool bIdleMode_tx_from_host; /* Variable to notify whether driver came out from idlemode due to Host or target */
- bool bIdle_led_off;
- wait_queue_head_t notify_led_event;
- wait_queue_head_t idleModeSyncEvent;
- struct task_struct *led_cntrl_threadid;
- int led_thread_running;
- bool bLedInitDone;
-};
-
-/* LED Thread state. */
-#define BCM_LED_THREAD_DISABLED 0 /* LED Thread is not running. */
-#define BCM_LED_THREAD_RUNNING_ACTIVELY 1 /* LED thread is running. */
-#define BCM_LED_THREAD_RUNNING_INACTIVELY 2 /* LED thread has been put on hold */
-
-#endif
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
deleted file mode 100644
index ce09473fbb1f..000000000000
--- a/drivers/staging/bcm/nvm.c
+++ /dev/null
@@ -1,4661 +0,0 @@
-#include "headers.h"
-
-#define DWORD unsigned int
-
-static int BcmDoChipSelect(struct bcm_mini_adapter *Adapter,
- unsigned int offset);
-static int BcmGetActiveDSD(struct bcm_mini_adapter *Adapter);
-static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter);
-static unsigned int BcmGetEEPROMSize(struct bcm_mini_adapter *Adapter);
-static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter);
-static unsigned int BcmGetFlashSectorSize(struct bcm_mini_adapter *Adapter,
- unsigned int FlashSectorSizeSig,
- unsigned int FlashSectorSize);
-
-static VOID BcmValidateNvmType(struct bcm_mini_adapter *Adapter);
-static int BcmGetNvmSize(struct bcm_mini_adapter *Adapter);
-static unsigned int BcmGetFlashSize(struct bcm_mini_adapter *Adapter);
-static enum bcm_nvm_type BcmGetNvmType(struct bcm_mini_adapter *Adapter);
-
-static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val eFlash2xSectionVal);
-
-static B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset);
-static int IsSectionWritable(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val Section);
-static int IsSectionExistInVendorInfo(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val section);
-
-static int ReadDSDPriority(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val dsd);
-static int ReadDSDSignature(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val dsd);
-static int ReadISOPriority(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val iso);
-static int ReadISOSignature(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val iso);
-
-static int CorruptDSDSig(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val eFlash2xSectionVal);
-static int CorruptISOSig(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val eFlash2xSectionVal);
-static int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter,
- PUCHAR pBuff,
- unsigned int uiSectAlignAddr);
-static int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
- PUINT pBuff,
- enum bcm_flash2x_section_val eFlash2xSectionVal,
- unsigned int uiOffset,
- unsigned int uiNumBytes);
-static enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter);
-static enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter);
-
-static int BeceemFlashBulkRead(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes);
-
-static int BeceemFlashBulkWrite(
- struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify);
-
-static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter);
-
-static int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter, unsigned int dwAddress, unsigned int *pdwData, unsigned int dwNumData);
-
-/* Procedure: ReadEEPROMStatusRegister
- *
- * Description: Reads the standard EEPROM Status Register.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * Returns:
- * OSAL_STATUS_CODE
- */
-static UCHAR ReadEEPROMStatusRegister(struct bcm_mini_adapter *Adapter)
-{
- UCHAR uiData = 0;
- DWORD dwRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY;
- unsigned int uiStatus = 0;
- unsigned int value = 0;
- unsigned int value1 = 0;
-
- /* Read the EEPROM status register */
- value = EEPROM_READ_STATUS_REGISTER;
- wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value));
-
- while (dwRetries != 0) {
- value = 0;
- uiStatus = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got removed hence exiting....");
- break;
- }
-
- /* Wait for Avail bit to be set. */
- if ((uiStatus & EEPROM_READ_DATA_AVAIL) != 0) {
- /* Clear the Avail/Full bits - which ever is set. */
- value = uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL);
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
- uiData = (UCHAR)value;
-
- break;
- }
-
- dwRetries--;
- if (dwRetries == 0) {
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x3004 = %x 0x3008 = %x, retries = %d failed.\n", value, value1, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY);
- return uiData;
- }
- if (!(dwRetries%RETRIES_PER_DELAY))
- udelay(1000);
- uiStatus = 0;
- }
- return uiData;
-} /* ReadEEPROMStatusRegister */
-
-/*
- * Procedure: ReadBeceemEEPROMBulk
- *
- * Description: This routine reads 16Byte data from EEPROM
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * dwAddress - EEPROM Offset to read the data from.
- * pdwData - Pointer to double word where data needs to be stored in. // dwNumWords - Number of words. Valid values are 4 ONLY.
- *
- * Returns:
- * OSAL_STATUS_CODE:
- */
-
-static int ReadBeceemEEPROMBulk(struct bcm_mini_adapter *Adapter,
- DWORD dwAddress,
- DWORD *pdwData,
- DWORD dwNumWords)
-{
- DWORD dwIndex = 0;
- DWORD dwRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY;
- unsigned int uiStatus = 0;
- unsigned int value = 0;
- unsigned int value1 = 0;
- UCHAR *pvalue;
-
- /* Flush the read and cmd queue. */
- value = (EEPROM_READ_QUEUE_FLUSH | EEPROM_CMD_QUEUE_FLUSH);
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
- value = 0;
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
-
- /* Clear the Avail/Full bits. */
- value = (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL);
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- value = dwAddress | ((dwNumWords == 4) ? EEPROM_16_BYTE_PAGE_READ : EEPROM_4_BYTE_PAGE_READ);
- wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value));
-
- while (dwRetries != 0) {
- uiStatus = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got Removed.hence exiting from loop...");
- return -ENODEV;
- }
-
- /* If we are reading 16 bytes we want to be sure that the queue
- * is full before we read. In the other cases we are ok if the
- * queue has data available
- */
- if (dwNumWords == 4) {
- if ((uiStatus & EEPROM_READ_DATA_FULL) != 0) {
- /* Clear the Avail/Full bits - which ever is set. */
- value = (uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL));
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- break;
- }
- } else if (dwNumWords == 1) {
- if ((uiStatus & EEPROM_READ_DATA_AVAIL) != 0) {
- /* We just got Avail and we have to read 32bits so we
- * need this sleep for Cardbus kind of devices.
- */
- if (Adapter->chip_id == 0xBECE0210)
- udelay(800);
-
- /* Clear the Avail/Full bits - which ever is set. */
- value = (uiStatus & (EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL));
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- break;
- }
- }
-
- uiStatus = 0;
-
- dwRetries--;
- if (dwRetries == 0) {
- value = 0;
- value1 = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "dwNumWords %d 0x3004 = %x 0x3008 = %x retries = %d failed.\n",
- dwNumWords, value, value1, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY);
- return STATUS_FAILURE;
- }
-
- if (!(dwRetries%RETRIES_PER_DELAY))
- udelay(1000);
- }
-
- for (dwIndex = 0; dwIndex < dwNumWords; dwIndex++) {
- /* We get only a byte at a time - from LSB to MSB. We shift it into an integer. */
- pvalue = (PUCHAR)(pdwData + dwIndex);
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
-
- pvalue[0] = value;
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
-
- pvalue[1] = value;
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
-
- pvalue[2] = value;
-
- value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
-
- pvalue[3] = value;
- }
-
- return STATUS_SUCCESS;
-} /* ReadBeceemEEPROMBulk() */
-
-/*
- * Procedure: ReadBeceemEEPROM
- *
- * Description: This routine reads 4 data from EEPROM. It uses 1 or 2 page
- * reads to do this operation.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - EEPROM Offset to read the data from.
- * pBuffer - Pointer to word where data needs to be stored in.
- *
- * Returns:
- * OSAL_STATUS_CODE:
- */
-
-int ReadBeceemEEPROM(struct bcm_mini_adapter *Adapter,
- DWORD uiOffset,
- DWORD *pBuffer)
-{
- unsigned int uiData[8] = {0};
- unsigned int uiByteOffset = 0;
- unsigned int uiTempOffset = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " ====> ");
-
- uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE);
- uiByteOffset = uiOffset - uiTempOffset;
-
- ReadBeceemEEPROMBulk(Adapter, uiTempOffset, (PUINT)&uiData[0], 4);
-
- /* A word can overlap at most over 2 pages. In that case we read the
- * next page too.
- */
- if (uiByteOffset > 12)
- ReadBeceemEEPROMBulk(Adapter, uiTempOffset + MAX_RW_SIZE, (PUINT)&uiData[4], 4);
-
- memcpy((PUCHAR)pBuffer, (((PUCHAR)&uiData[0]) + uiByteOffset), 4);
-
- return STATUS_SUCCESS;
-} /* ReadBeceemEEPROM() */
-
-int ReadMacAddressFromNVM(struct bcm_mini_adapter *Adapter)
-{
- int Status;
- unsigned char puMacAddr[6];
-
- Status = BeceemNVMRead(Adapter,
- (PUINT)&puMacAddr[0],
- INIT_PARAMS_1_MACADDRESS_ADDRESS,
- MAC_ADDRESS_SIZE);
-
- if (Status == STATUS_SUCCESS)
- memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
-
- return Status;
-}
-
-/*
- * Procedure: BeceemEEPROMBulkRead
- *
- * Description: Reads the EEPROM and returns the Data.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Buffer to store the data read from EEPROM
- * uiOffset - Offset of EEPROM from where data should be read
- * uiNumBytes - Number of bytes to be read from the EEPROM.
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if EEPROM read is successful.
- * <FAILURE> - if failed.
- */
-
-int BeceemEEPROMBulkRead(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- unsigned int uiData[4] = {0};
- /* unsigned int uiAddress = 0; */
- unsigned int uiBytesRemaining = uiNumBytes;
- unsigned int uiIndex = 0;
- unsigned int uiTempOffset = 0;
- unsigned int uiExtraBytes = 0;
- unsigned int uiFailureRetries = 0;
- PUCHAR pcBuff = (PUCHAR)pBuffer;
-
- if (uiOffset % MAX_RW_SIZE && uiBytesRemaining) {
- uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE);
- uiExtraBytes = uiOffset - uiTempOffset;
- ReadBeceemEEPROMBulk(Adapter, uiTempOffset, (PUINT)&uiData[0], 4);
- if (uiBytesRemaining >= (MAX_RW_SIZE - uiExtraBytes)) {
- memcpy(pBuffer, (((PUCHAR)&uiData[0]) + uiExtraBytes), MAX_RW_SIZE - uiExtraBytes);
- uiBytesRemaining -= (MAX_RW_SIZE - uiExtraBytes);
- uiIndex += (MAX_RW_SIZE - uiExtraBytes);
- uiOffset += (MAX_RW_SIZE - uiExtraBytes);
- } else {
- memcpy(pBuffer, (((PUCHAR)&uiData[0]) + uiExtraBytes), uiBytesRemaining);
- uiIndex += uiBytesRemaining;
- uiOffset += uiBytesRemaining;
- uiBytesRemaining = 0;
- }
- }
-
- while (uiBytesRemaining && uiFailureRetries != 128) {
- if (Adapter->device_removed)
- return -1;
-
- if (uiBytesRemaining >= MAX_RW_SIZE) {
- /* For the requests more than or equal to 16 bytes, use bulk
- * read function to make the access faster.
- * We read 4 Dwords of data
- */
- if (ReadBeceemEEPROMBulk(Adapter, uiOffset, &uiData[0], 4) == 0) {
- memcpy(pcBuff + uiIndex, &uiData[0], MAX_RW_SIZE);
- uiOffset += MAX_RW_SIZE;
- uiBytesRemaining -= MAX_RW_SIZE;
- uiIndex += MAX_RW_SIZE;
- } else {
- uiFailureRetries++;
- mdelay(3); /* sleep for a while before retry... */
- }
- } else if (uiBytesRemaining >= 4) {
- if (ReadBeceemEEPROM(Adapter, uiOffset, &uiData[0]) == 0) {
- memcpy(pcBuff + uiIndex, &uiData[0], 4);
- uiOffset += 4;
- uiBytesRemaining -= 4;
- uiIndex += 4;
- } else {
- uiFailureRetries++;
- mdelay(3); /* sleep for a while before retry... */
- }
- } else {
- /* Handle the reads less than 4 bytes... */
- PUCHAR pCharBuff = (PUCHAR)pBuffer;
-
- pCharBuff += uiIndex;
- if (ReadBeceemEEPROM(Adapter, uiOffset, &uiData[0]) == 0) {
- memcpy(pCharBuff, &uiData[0], uiBytesRemaining); /* copy only bytes requested. */
- uiBytesRemaining = 0;
- } else {
- uiFailureRetries++;
- mdelay(3); /* sleep for a while before retry... */
- }
- }
- }
-
- return 0;
-}
-
-/*
- * Procedure: BeceemFlashBulkRead
- *
- * Description: Reads the FLASH and returns the Data.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Buffer to store the data read from FLASH
- * uiOffset - Offset of FLASH from where data should be read
- * uiNumBytes - Number of bytes to be read from the FLASH.
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if FLASH read is successful.
- * <FAILURE> - if failed.
- */
-
-static int BeceemFlashBulkRead(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- unsigned int uiIndex = 0;
- unsigned int uiBytesToRead = uiNumBytes;
- int Status = 0;
- unsigned int uiPartOffset = 0;
- int bytes;
-
- if (Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device Got Removed");
- return -ENODEV;
- }
-
- /* Adding flash Base address
- * uiOffset = uiOffset + GetFlashBaseAddr(Adapter);
- */
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_read((uiOffset/FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes);
- return Status;
- #endif
-
- Adapter->SelectedChip = RESET_CHIP_SELECT;
-
- if (uiOffset % MAX_RW_SIZE) {
- BcmDoChipSelect(Adapter, uiOffset);
- uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- uiBytesToRead = MAX_RW_SIZE - (uiOffset % MAX_RW_SIZE);
- uiBytesToRead = MIN(uiNumBytes, uiBytesToRead);
-
- bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer + uiIndex, uiBytesToRead);
- if (bytes < 0) {
- Status = bytes;
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- return Status;
- }
-
- uiIndex += uiBytesToRead;
- uiOffset += uiBytesToRead;
- uiNumBytes -= uiBytesToRead;
- }
-
- while (uiNumBytes) {
- BcmDoChipSelect(Adapter, uiOffset);
- uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- uiBytesToRead = MIN(uiNumBytes, MAX_RW_SIZE);
-
- bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer + uiIndex, uiBytesToRead);
- if (bytes < 0) {
- Status = bytes;
- break;
- }
-
- uiIndex += uiBytesToRead;
- uiOffset += uiBytesToRead;
- uiNumBytes -= uiBytesToRead;
- }
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- return Status;
-}
-
-/*
- * Procedure: BcmGetFlashSize
- *
- * Description: Finds the size of FLASH.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * unsigned int - size of the FLASH Storage.
- *
- */
-
-static unsigned int BcmGetFlashSize(struct bcm_mini_adapter *Adapter)
-{
- if (IsFlash2x(Adapter))
- return Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header);
- else
- return 32 * 1024;
-}
-
-/*
- * Procedure: BcmGetEEPROMSize
- *
- * Description: Finds the size of EEPROM.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * unsigned int - size of the EEPROM Storage.
- *
- */
-
-static unsigned int BcmGetEEPROMSize(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiData = 0;
- unsigned int uiIndex = 0;
-
- /*
- * if EEPROM is present and already Calibrated,it will have
- * 'BECM' string at 0th offset.
- * To find the EEPROM size read the possible boundaries of the
- * EEPROM like 4K,8K etc..accessing the EEPROM beyond its size will
- * result in wrap around. So when we get the End of the EEPROM we will
- * get 'BECM' string which is indeed at offset 0.
- */
- BeceemEEPROMBulkRead(Adapter, &uiData, 0x0, 4);
- if (uiData == BECM) {
- for (uiIndex = 2; uiIndex <= 256; uiIndex *= 2) {
- BeceemEEPROMBulkRead(Adapter, &uiData, uiIndex * 1024, 4);
- if (uiData == BECM)
- return uiIndex * 1024;
- }
- } else {
- /*
- * EEPROM may not be present or not programmed
- */
- uiData = 0xBABEFACE;
- if (BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&uiData, 0, 4, TRUE) == 0) {
- uiData = 0;
- for (uiIndex = 2; uiIndex <= 256; uiIndex *= 2) {
- BeceemEEPROMBulkRead(Adapter, &uiData, uiIndex * 1024, 4);
- if (uiData == 0xBABEFACE)
- return uiIndex * 1024;
- }
- }
- }
- return 0;
-}
-
-/*
- * Procedure: FlashSectorErase
- *
- * Description: Finds the sector size of the FLASH.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * addr - sector start address
- * numOfSectors - number of sectors to be erased.
- *
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int FlashSectorErase(struct bcm_mini_adapter *Adapter,
- unsigned int addr,
- unsigned int numOfSectors)
-{
- unsigned int iIndex = 0, iRetries = 0;
- unsigned int uiStatus = 0;
- unsigned int value;
- int bytes;
-
- for (iIndex = 0; iIndex < numOfSectors; iIndex++) {
- value = 0x06000000;
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
-
- value = (0xd8000000 | (addr & 0xFFFFFF));
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- iRetries = 0;
-
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
- iRetries++;
- /* After every try lets make the CPU free for 10 ms. generally time taken by the
- * the sector erase cycle is 500 ms to 40000 msec. hence sleeping 10 ms
- * won't hamper performance in any case.
- */
- mdelay(10);
- } while ((uiStatus & 0x1) && (iRetries < 400));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "iRetries crossing the limit of 80000\n");
- return STATUS_FAILURE;
- }
-
- addr += Adapter->uiSectorSize;
- }
- return 0;
-}
-/*
- * Procedure: flashByteWrite
- *
- * Description: Performs Byte by Byte write to flash
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to.
- * pData - Address of Data to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int flashByteWrite(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset,
- PVOID pData)
-{
- unsigned int uiStatus = 0;
- int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */
- unsigned int value;
- ULONG ulData = *(PUCHAR)pData;
- int bytes;
- /*
- * need not write 0xFF because write requires an erase and erase will
- * make whole sector 0xFF.
- */
-
- if (0xFF == ulData)
- return STATUS_SUCCESS;
-
- /* DumpDebug(NVM_RW,("flashWrite ====>\n")); */
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write enable in FLASH_SPI_CMDQ_REG register fails");
- return STATUS_FAILURE;
- }
-
- if (wrm(Adapter, FLASH_SPI_WRITEQ_REG, (PCHAR)&ulData, 4) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DATA Write on FLASH_SPI_WRITEQ_REG fails");
- return STATUS_FAILURE;
- }
- value = (0x02000000 | (uiOffset & 0xFFFFFF));
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programming of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- /* __udelay(950); */
-
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
- iRetries--;
- if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
- udelay(1000);
-
- } while ((uiStatus & 0x1) && (iRetries > 0));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times.");
- return STATUS_FAILURE;
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: flashWrite
- *
- * Description: Performs write to flash
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to.
- * pData - Address of Data to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int flashWrite(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset,
- PVOID pData)
-{
- /* unsigned int uiStatus = 0;
- * int iRetries = 0;
- * unsigned int uiReadBack = 0;
- */
- unsigned int uiStatus = 0;
- int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */
- unsigned int value;
- unsigned int uiErasePattern[4] = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
- int bytes;
- /*
- * need not write 0xFFFFFFFF because write requires an erase and erase will
- * make whole sector 0xFFFFFFFF.
- */
- if (!memcmp(pData, uiErasePattern, MAX_RW_SIZE))
- return 0;
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
-
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write Enable of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- if (wrm(Adapter, uiOffset, (PCHAR)pData, MAX_RW_SIZE) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Data write fails...");
- return STATUS_FAILURE;
- }
-
- /* __udelay(950); */
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
-
- iRetries--;
- /* this will ensure that in there will be no changes in the current path.
- * currently one rdm/wrm takes 125 us.
- * Hence 125 *2 * FLASH_PER_RETRIES_DELAY > 3 ms(worst case delay)
- * Hence current implementation cycle will intoduce no delay in current path
- */
- if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
- udelay(1000);
- } while ((uiStatus & 0x1) && (iRetries > 0));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times.");
- return STATUS_FAILURE;
- }
-
- return STATUS_SUCCESS;
-}
-
-/*-----------------------------------------------------------------------------
- * Procedure: flashByteWriteStatus
- *
- * Description: Performs byte by byte write to flash with write done status check
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to.
- * pData - Address of the Data to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-static int flashByteWriteStatus(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset,
- PVOID pData)
-{
- unsigned int uiStatus = 0;
- int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */
- ULONG ulData = *(PUCHAR)pData;
- unsigned int value;
- int bytes;
-
- /*
- * need not write 0xFFFFFFFF because write requires an erase and erase will
- * make whole sector 0xFFFFFFFF.
- */
-
- if (0xFF == ulData)
- return STATUS_SUCCESS;
-
- /* DumpDebug(NVM_RW,("flashWrite ====>\n")); */
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write enable in FLASH_SPI_CMDQ_REG register fails");
- return STATUS_SUCCESS;
- }
- if (wrm(Adapter, FLASH_SPI_WRITEQ_REG, (PCHAR)&ulData, 4) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DATA Write on FLASH_SPI_WRITEQ_REG fails");
- return STATUS_FAILURE;
- }
- value = (0x02000000 | (uiOffset & 0xFFFFFF));
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programming of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- /* msleep(1); */
-
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
-
- iRetries--;
- if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
- udelay(1000);
-
- } while ((uiStatus & 0x1) && (iRetries > 0));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times.");
- return STATUS_FAILURE;
- }
-
- return STATUS_SUCCESS;
-}
-/*
- * Procedure: flashWriteStatus
- *
- * Description: Performs write to flash with write done status check
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to.
- * pData - Address of the Data to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int flashWriteStatus(struct bcm_mini_adapter *Adapter,
- unsigned int uiOffset,
- PVOID pData)
-{
- unsigned int uiStatus = 0;
- int iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; /* 3 */
- /* unsigned int uiReadBack = 0; */
- unsigned int value;
- unsigned int uiErasePattern[4] = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
- int bytes;
-
- /*
- * need not write 0xFFFFFFFF because write requires an erase and erase will
- * make whole sector 0xFFFFFFFF.
- */
- if (!memcmp(pData, uiErasePattern, MAX_RW_SIZE))
- return 0;
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write Enable of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
-
- if (wrm(Adapter, uiOffset, (PCHAR)pData, MAX_RW_SIZE) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Data write fails...");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
-
- do {
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- if (wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value)) < 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Programing of FLASH_SPI_CMDQ_REG fails");
- return STATUS_FAILURE;
- }
- /* __udelay(1); */
- bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
- if (bytes < 0) {
- uiStatus = bytes;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
- return uiStatus;
- }
- iRetries--;
- /* this will ensure that in there will be no changes in the current path.
- * currently one rdm/wrm takes 125 us.
- * Hence 125 *2 * FLASH_PER_RETRIES_DELAY >3 ms(worst case delay)
- * Hence current implementation cycle will intoduce no delay in current path
- */
- if (iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
- udelay(1000);
-
- } while ((uiStatus & 0x1) && (iRetries > 0));
-
- if (uiStatus & 0x1) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write fails even after checking status for 200 times.");
- return STATUS_FAILURE;
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: BcmRestoreBlockProtectStatus
- *
- * Description: Restores the original block protection status.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * ulWriteStatus -Original status
- * Returns:
- * <VOID>
- *
- */
-
-static VOID BcmRestoreBlockProtectStatus(struct bcm_mini_adapter *Adapter, ULONG ulWriteStatus)
-{
- unsigned int value;
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
-
- udelay(20);
- value = (FLASH_CMD_STATUS_REG_WRITE << 24) | (ulWriteStatus << 16);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- udelay(20);
-}
-
-/*
- * Procedure: BcmFlashUnProtectBlock
- *
- * Description: UnProtects appropriate blocks for writing.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiOffset - Offset of the flash where data needs to be written to. This should be Sector aligned.
- * Returns:
- * ULONG - Status value before UnProtect.
- *
- */
-
-static ULONG BcmFlashUnProtectBlock(struct bcm_mini_adapter *Adapter, unsigned int uiOffset, unsigned int uiLength)
-{
- ULONG ulStatus = 0;
- ULONG ulWriteStatus = 0;
- unsigned int value;
-
- uiOffset = uiOffset&0x000FFFFF;
- /*
- * Implemented only for 1MB Flash parts.
- */
- if (FLASH_PART_SST25VF080B == Adapter->ulFlashID) {
- /*
- * Get Current BP status.
- */
- value = (FLASH_CMD_STATUS_REG_READ << 24);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- udelay(10);
- /*
- * Read status will be WWXXYYZZ. We have to take only WW.
- */
- rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulStatus, sizeof(ulStatus));
- ulStatus >>= 24;
- ulWriteStatus = ulStatus;
- /*
- * Bits [5-2] give current block level protection status.
- * Bit5: BP3 - DONT CARE
- * BP2-BP0: 0 - NO PROTECTION, 1 - UPPER 1/16, 2 - UPPER 1/8, 3 - UPPER 1/4
- * 4 - UPPER 1/2. 5 to 7 - ALL BLOCKS
- */
-
- if (ulStatus) {
- if ((uiOffset+uiLength) <= 0x80000) {
- /*
- * Offset comes in lower half of 1MB. Protect the upper half.
- * Clear BP1 and BP0 and set BP2.
- */
- ulWriteStatus |= (0x4<<2);
- ulWriteStatus &= ~(0x3<<2);
- } else if ((uiOffset + uiLength) <= 0xC0000) {
- /*
- * Offset comes below Upper 1/4. Upper 1/4 can be protected.
- * Clear BP2 and set BP1 and BP0.
- */
- ulWriteStatus |= (0x3<<2);
- ulWriteStatus &= ~(0x1<<4);
- } else if ((uiOffset + uiLength) <= 0xE0000) {
- /*
- * Offset comes below Upper 1/8. Upper 1/8 can be protected.
- * Clear BP2 and BP0 and set BP1
- */
- ulWriteStatus |= (0x1<<3);
- ulWriteStatus &= ~(0x5<<2);
- } else if ((uiOffset + uiLength) <= 0xF0000) {
- /*
- * Offset comes below Upper 1/16. Only upper 1/16 can be protected.
- * Set BP0 and Clear BP2,BP1.
- */
- ulWriteStatus |= (0x1<<2);
- ulWriteStatus &= ~(0x3<<3);
- } else {
- /*
- * Unblock all.
- * Clear BP2,BP1 and BP0.
- */
- ulWriteStatus &= ~(0x7<<2);
- }
-
- value = (FLASH_CMD_WRITE_ENABLE << 24);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- udelay(20);
- value = (FLASH_CMD_STATUS_REG_WRITE << 24) | (ulWriteStatus << 16);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
- udelay(20);
- }
- }
- return ulStatus;
-}
-
-static int bulk_read_complete_sector(struct bcm_mini_adapter *ad,
- UCHAR read_bk[],
- PCHAR tmpbuff,
- unsigned int offset,
- unsigned int partoff)
-{
- unsigned int i;
- int j;
- int bulk_read_stat;
- FP_FLASH_WRITE_STATUS writef =
- ad->fpFlashWriteWithStatusCheck;
-
- for (i = 0; i < ad->uiSectorSize; i += MAX_RW_SIZE) {
- bulk_read_stat = BeceemFlashBulkRead(ad,
- (PUINT)read_bk,
- offset + i,
- MAX_RW_SIZE);
-
- if (bulk_read_stat != STATUS_SUCCESS)
- continue;
-
- if (ad->ulFlashWriteSize == 1) {
- for (j = 0; j < 16; j++) {
- if ((read_bk[j] != tmpbuff[i + j]) &&
- (STATUS_SUCCESS != (*writef)(ad, partoff + i + j, &tmpbuff[i + j]))) {
- return STATUS_FAILURE;
- }
- }
- } else {
- if ((memcmp(read_bk, &tmpbuff[i], MAX_RW_SIZE)) &&
- (STATUS_SUCCESS != (*writef)(ad, partoff + i, &tmpbuff[i]))) {
- return STATUS_FAILURE;
- }
- }
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: BeceemFlashBulkWrite
- *
- * Description: Performs write to the flash
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Data to be written.
- * uiOffset - Offset of the flash where data needs to be written to.
- * uiNumBytes - Number of bytes to be written.
- * bVerify - read verify flag.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int BeceemFlashBulkWrite(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify)
-{
- PCHAR pTempBuff = NULL;
- PUCHAR pcBuffer = (PUCHAR)pBuffer;
- unsigned int uiIndex = 0;
- unsigned int uiOffsetFromSectStart = 0;
- unsigned int uiSectAlignAddr = 0;
- unsigned int uiCurrSectOffsetAddr = 0;
- unsigned int uiSectBoundary = 0;
- unsigned int uiNumSectTobeRead = 0;
- UCHAR ucReadBk[16] = {0};
- ULONG ulStatus = 0;
- int Status = STATUS_SUCCESS;
- unsigned int uiTemp = 0;
- unsigned int index = 0;
- unsigned int uiPartOffset = 0;
-
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_write((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes);
- return Status;
- #endif
-
- uiOffsetFromSectStart = uiOffset & ~(Adapter->uiSectorSize - 1);
-
- /* Adding flash Base address
- * uiOffset = uiOffset + GetFlashBaseAddr(Adapter);
- */
-
- uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
- uiCurrSectOffsetAddr = uiOffset & (Adapter->uiSectorSize - 1);
- uiSectBoundary = uiSectAlignAddr + Adapter->uiSectorSize;
-
- pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL);
- if (!pTempBuff)
- goto BeceemFlashBulkWrite_EXIT;
- /*
- * check if the data to be written is overlapped across sectors
- */
- if (uiOffset+uiNumBytes < uiSectBoundary) {
- uiNumSectTobeRead = 1;
- } else {
- /* Number of sectors = Last sector start address/First sector start address */
- uiNumSectTobeRead = (uiCurrSectOffsetAddr + uiNumBytes) / Adapter->uiSectorSize;
- if ((uiCurrSectOffsetAddr + uiNumBytes)%Adapter->uiSectorSize)
- uiNumSectTobeRead++;
- }
- /* Check whether Requested sector is writable or not in case of flash2x write. But if write call is
- * for DSD calibration, allow it without checking of sector permission
- */
-
- if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == false)) {
- index = 0;
- uiTemp = uiNumSectTobeRead;
- while (uiTemp) {
- if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Sector Starting at offset <0X%X> is not writable",
- (uiOffsetFromSectStart + index * Adapter->uiSectorSize));
- Status = SECTOR_IS_NOT_WRITABLE;
- goto BeceemFlashBulkWrite_EXIT;
- }
- uiTemp = uiTemp - 1;
- index = index + 1;
- }
- }
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- while (uiNumSectTobeRead) {
- /* do_gettimeofday(&tv1);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "\nTime In start of write :%ld ms\n",(tv1.tv_sec *1000 + tv1.tv_usec /1000));
- */
- uiPartOffset = (uiSectAlignAddr & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- BcmDoChipSelect(Adapter, uiSectAlignAddr);
-
- if (0 != BeceemFlashBulkRead(Adapter,
- (PUINT)pTempBuff,
- uiOffsetFromSectStart,
- Adapter->uiSectorSize)) {
- Status = -1;
- goto BeceemFlashBulkWrite_EXIT;
- }
-
- /* do_gettimeofday(&tr);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by Read :%ld ms\n", (tr.tv_sec *1000 + tr.tv_usec/1000) - (tv1.tv_sec *1000 + tv1.tv_usec/1000));
- */
- ulStatus = BcmFlashUnProtectBlock(Adapter, uiSectAlignAddr, Adapter->uiSectorSize);
-
- if (uiNumSectTobeRead > 1) {
- memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr));
- pcBuffer += ((uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr)));
- uiNumBytes -= (uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr));
- } else {
- memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiNumBytes);
- }
-
- if (IsFlash2x(Adapter))
- SaveHeaderIfPresent(Adapter, (PUCHAR)pTempBuff, uiOffsetFromSectStart);
-
- FlashSectorErase(Adapter, uiPartOffset, 1);
- /* do_gettimeofday(&te);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by Erase :%ld ms\n", (te.tv_sec *1000 + te.tv_usec/1000) - (tr.tv_sec *1000 + tr.tv_usec/1000));
- */
- for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += Adapter->ulFlashWriteSize) {
- if (Adapter->device_removed) {
- Status = -1;
- goto BeceemFlashBulkWrite_EXIT;
- }
-
- if (STATUS_SUCCESS != (*Adapter->fpFlashWrite)(Adapter, uiPartOffset + uiIndex, (&pTempBuff[uiIndex]))) {
- Status = -1;
- goto BeceemFlashBulkWrite_EXIT;
- }
- }
-
- /* do_gettimeofday(&tw);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken in Write to Flash :%ld ms\n", (tw.tv_sec *1000 + tw.tv_usec/1000) - (te.tv_sec *1000 + te.tv_usec/1000));
- */
-
- if (STATUS_FAILURE == bulk_read_complete_sector(Adapter,
- ucReadBk,
- pTempBuff,
- uiOffsetFromSectStart,
- uiPartOffset)) {
- Status = STATUS_FAILURE;
- goto BeceemFlashBulkWrite_EXIT;
- }
-
- /* do_gettimeofday(&twv);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken in Write to Flash verification :%ld ms\n", (twv.tv_sec *1000 + twv.tv_usec/1000) - (tw.tv_sec *1000 + tw.tv_usec/1000));
- */
- if (ulStatus) {
- BcmRestoreBlockProtectStatus(Adapter, ulStatus);
- ulStatus = 0;
- }
-
- uiCurrSectOffsetAddr = 0;
- uiSectAlignAddr = uiSectBoundary;
- uiSectBoundary += Adapter->uiSectorSize;
- uiOffsetFromSectStart += Adapter->uiSectorSize;
- uiNumSectTobeRead--;
- }
- /* do_gettimeofday(&tv2);
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Time after Write :%ld ms\n",(tv2.tv_sec *1000 + tv2.tv_usec/1000));
- * BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Total time taken by in Write is :%ld ms\n", (tv2.tv_sec *1000 + tv2.tv_usec/1000) - (tv1.tv_sec *1000 + tv1.tv_usec/1000));
- *
- * Cleanup.
- */
-BeceemFlashBulkWrite_EXIT:
- if (ulStatus)
- BcmRestoreBlockProtectStatus(Adapter, ulStatus);
-
- kfree(pTempBuff);
-
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- return Status;
-}
-
-/*
- * Procedure: BeceemFlashBulkWriteStatus
- *
- * Description: Writes to Flash. Checks the SPI status after each write.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Data to be written.
- * uiOffset - Offset of the flash where data needs to be written to.
- * uiNumBytes - Number of bytes to be written.
- * bVerify - read verify flag.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int BeceemFlashBulkWriteStatus(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify)
-{
- PCHAR pTempBuff = NULL;
- PUCHAR pcBuffer = (PUCHAR)pBuffer;
- unsigned int uiIndex = 0;
- unsigned int uiOffsetFromSectStart = 0;
- unsigned int uiSectAlignAddr = 0;
- unsigned int uiCurrSectOffsetAddr = 0;
- unsigned int uiSectBoundary = 0;
- unsigned int uiNumSectTobeRead = 0;
- UCHAR ucReadBk[16] = {0};
- ULONG ulStatus = 0;
- unsigned int Status = STATUS_SUCCESS;
- unsigned int uiTemp = 0;
- unsigned int index = 0;
- unsigned int uiPartOffset = 0;
-
- uiOffsetFromSectStart = uiOffset & ~(Adapter->uiSectorSize - 1);
-
- /* uiOffset += Adapter->ulFlashCalStart;
- * Adding flash Base address
- * uiOffset = uiOffset + GetFlashBaseAddr(Adapter);
- */
- uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
- uiCurrSectOffsetAddr = uiOffset & (Adapter->uiSectorSize - 1);
- uiSectBoundary = uiSectAlignAddr + Adapter->uiSectorSize;
-
- pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL);
- if (!pTempBuff)
- goto BeceemFlashBulkWriteStatus_EXIT;
-
- /*
- * check if the data to be written is overlapped across sectors
- */
- if (uiOffset+uiNumBytes < uiSectBoundary) {
- uiNumSectTobeRead = 1;
- } else {
- /* Number of sectors = Last sector start address/First sector start address */
- uiNumSectTobeRead = (uiCurrSectOffsetAddr + uiNumBytes) / Adapter->uiSectorSize;
- if ((uiCurrSectOffsetAddr + uiNumBytes)%Adapter->uiSectorSize)
- uiNumSectTobeRead++;
- }
-
- if (IsFlash2x(Adapter) && (Adapter->bAllDSDWriteAllow == false)) {
- index = 0;
- uiTemp = uiNumSectTobeRead;
- while (uiTemp) {
- if (IsOffsetWritable(Adapter, uiOffsetFromSectStart + index * Adapter->uiSectorSize) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Sector Starting at offset <0X%x> is not writable",
- (uiOffsetFromSectStart + index * Adapter->uiSectorSize));
- Status = SECTOR_IS_NOT_WRITABLE;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
- uiTemp = uiTemp - 1;
- index = index + 1;
- }
- }
-
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- while (uiNumSectTobeRead) {
- uiPartOffset = (uiSectAlignAddr & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- BcmDoChipSelect(Adapter, uiSectAlignAddr);
- if (0 != BeceemFlashBulkRead(Adapter,
- (PUINT)pTempBuff,
- uiOffsetFromSectStart,
- Adapter->uiSectorSize)) {
- Status = -1;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
-
- ulStatus = BcmFlashUnProtectBlock(Adapter, uiOffsetFromSectStart, Adapter->uiSectorSize);
-
- if (uiNumSectTobeRead > 1) {
- memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr));
- pcBuffer += ((uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr)));
- uiNumBytes -= (uiSectBoundary - (uiSectAlignAddr + uiCurrSectOffsetAddr));
- } else {
- memcpy(&pTempBuff[uiCurrSectOffsetAddr], pcBuffer, uiNumBytes);
- }
-
- if (IsFlash2x(Adapter))
- SaveHeaderIfPresent(Adapter, (PUCHAR)pTempBuff, uiOffsetFromSectStart);
-
- FlashSectorErase(Adapter, uiPartOffset, 1);
-
- for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += Adapter->ulFlashWriteSize) {
- if (Adapter->device_removed) {
- Status = -1;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
-
- if (STATUS_SUCCESS != (*Adapter->fpFlashWriteWithStatusCheck)(Adapter, uiPartOffset+uiIndex, &pTempBuff[uiIndex])) {
- Status = -1;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
- }
-
- if (bVerify) {
- for (uiIndex = 0; uiIndex < Adapter->uiSectorSize; uiIndex += MAX_RW_SIZE) {
- if (STATUS_SUCCESS == BeceemFlashBulkRead(Adapter, (PUINT)ucReadBk, uiOffsetFromSectStart + uiIndex, MAX_RW_SIZE)) {
- if (memcmp(ucReadBk, &pTempBuff[uiIndex], MAX_RW_SIZE)) {
- Status = STATUS_FAILURE;
- goto BeceemFlashBulkWriteStatus_EXIT;
- }
- }
- }
- }
-
- if (ulStatus) {
- BcmRestoreBlockProtectStatus(Adapter, ulStatus);
- ulStatus = 0;
- }
-
- uiCurrSectOffsetAddr = 0;
- uiSectAlignAddr = uiSectBoundary;
- uiSectBoundary += Adapter->uiSectorSize;
- uiOffsetFromSectStart += Adapter->uiSectorSize;
- uiNumSectTobeRead--;
- }
-/*
- * Cleanup.
- */
-BeceemFlashBulkWriteStatus_EXIT:
- if (ulStatus)
- BcmRestoreBlockProtectStatus(Adapter, ulStatus);
-
- kfree(pTempBuff);
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- return Status;
-}
-
-/*
- * Procedure: PropagateCalParamsFromFlashToMemory
- *
- * Description: Dumps the calibration section of EEPROM to DDR.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-int PropagateCalParamsFromFlashToMemory(struct bcm_mini_adapter *Adapter)
-{
- PCHAR pBuff, pPtr;
- unsigned int uiEepromSize = 0;
- unsigned int uiBytesToCopy = 0;
- /* unsigned int uiIndex = 0; */
- unsigned int uiCalStartAddr = EEPROM_CALPARAM_START;
- unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC;
- unsigned int value;
- int Status = 0;
-
- /*
- * Write the signature first. This will ensure firmware does not access EEPROM.
- */
- value = 0xbeadbead;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- value = 0xbeadbead;
- wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
-
- if (0 != BeceemNVMRead(Adapter, &uiEepromSize, EEPROM_SIZE_OFFSET, 4))
- return -1;
-
- uiEepromSize = ntohl(uiEepromSize);
- uiEepromSize >>= 16;
-
- /*
- * subtract the auto init section size
- */
- uiEepromSize -= EEPROM_CALPARAM_START;
-
- if (uiEepromSize > 1024 * 1024)
- return -1;
-
- pBuff = kmalloc(uiEepromSize, GFP_KERNEL);
- if (pBuff == NULL)
- return -ENOMEM;
-
- if (0 != BeceemNVMRead(Adapter, (PUINT)pBuff, uiCalStartAddr, uiEepromSize)) {
- kfree(pBuff);
- return -1;
- }
-
- pPtr = pBuff;
-
- uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize);
-
- while (uiBytesToCopy) {
- Status = wrm(Adapter, uiMemoryLoc, (PCHAR)pPtr, uiBytesToCopy);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed with status :%d", Status);
- break;
- }
-
- pPtr += uiBytesToCopy;
- uiEepromSize -= uiBytesToCopy;
- uiMemoryLoc += uiBytesToCopy;
- uiBytesToCopy = MIN(BUFFER_4K, uiEepromSize);
- }
-
- kfree(pBuff);
- return Status;
-}
-
-/*
- * Procedure: BeceemEEPROMReadBackandVerify
- *
- * Description: Read back the data written and verifies.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Data to be written.
- * uiOffset - Offset of the flash where data needs to be written to.
- * uiNumBytes - Number of bytes to be written.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int BeceemEEPROMReadBackandVerify(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- unsigned int uiRdbk = 0;
- unsigned int uiIndex = 0;
- unsigned int uiData = 0;
- unsigned int auiData[4] = {0};
-
- while (uiNumBytes) {
- if (Adapter->device_removed)
- return -1;
-
- if (uiNumBytes >= MAX_RW_SIZE) {
- /* for the requests more than or equal to MAX_RW_SIZE bytes, use bulk read function to make the access faster. */
- BeceemEEPROMBulkRead(Adapter, &auiData[0], uiOffset, MAX_RW_SIZE);
-
- if (memcmp(&pBuffer[uiIndex], &auiData[0], MAX_RW_SIZE)) {
- /* re-write */
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, MAX_RW_SIZE, false);
- mdelay(3);
- BeceemEEPROMBulkRead(Adapter, &auiData[0], uiOffset, MAX_RW_SIZE);
-
- if (memcmp(&pBuffer[uiIndex], &auiData[0], MAX_RW_SIZE))
- return -1;
- }
- uiOffset += MAX_RW_SIZE;
- uiNumBytes -= MAX_RW_SIZE;
- uiIndex += 4;
- } else if (uiNumBytes >= 4) {
- BeceemEEPROMBulkRead(Adapter, &uiData, uiOffset, 4);
- if (uiData != pBuffer[uiIndex]) {
- /* re-write */
- BeceemEEPROMBulkWrite(Adapter, (PUCHAR)(pBuffer + uiIndex), uiOffset, 4, false);
- mdelay(3);
- BeceemEEPROMBulkRead(Adapter, &uiData, uiOffset, 4);
- if (uiData != pBuffer[uiIndex])
- return -1;
- }
- uiOffset += 4;
- uiNumBytes -= 4;
- uiIndex++;
- } else {
- /* Handle the reads less than 4 bytes... */
- uiData = 0;
- memcpy(&uiData, ((PUCHAR)pBuffer) + (uiIndex * sizeof(unsigned int)), uiNumBytes);
- BeceemEEPROMBulkRead(Adapter, &uiRdbk, uiOffset, 4);
-
- if (memcmp(&uiData, &uiRdbk, uiNumBytes))
- return -1;
-
- uiNumBytes = 0;
- }
- }
-
- return 0;
-}
-
-static VOID BcmSwapWord(unsigned int *ptr1)
-{
- unsigned int tempval = (unsigned int)*ptr1;
- char *ptr2 = (char *)&tempval;
- char *ptr = (char *)ptr1;
-
- ptr[0] = ptr2[3];
- ptr[1] = ptr2[2];
- ptr[2] = ptr2[1];
- ptr[3] = ptr2[0];
-}
-
-/*
- * Procedure: BeceemEEPROMWritePage
- *
- * Description: Performs page write (16bytes) to the EEPROM
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiData - Data to be written.
- * uiOffset - Offset of the EEPROM where data needs to be written to.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-static int BeceemEEPROMWritePage(struct bcm_mini_adapter *Adapter, unsigned int uiData[], unsigned int uiOffset)
-{
- unsigned int uiRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY;
- unsigned int uiStatus = 0;
- UCHAR uiEpromStatus = 0;
- unsigned int value = 0;
-
- /* Flush the Write/Read/Cmd queues. */
- value = (EEPROM_WRITE_QUEUE_FLUSH | EEPROM_CMD_QUEUE_FLUSH | EEPROM_READ_QUEUE_FLUSH);
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
- value = 0;
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
-
- /* Clear the Empty/Avail/Full bits. After this it has been confirmed
- * that the bit was cleared by reading back the register. See NOTE below.
- * We also clear the Read queues as we do a EEPROM status register read
- * later.
- */
- value = (EEPROM_WRITE_QUEUE_EMPTY | EEPROM_WRITE_QUEUE_AVAIL | EEPROM_WRITE_QUEUE_FULL | EEPROM_READ_DATA_AVAIL | EEPROM_READ_DATA_FULL);
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- /* Enable write */
- value = EEPROM_WRITE_ENABLE;
- wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value));
-
- /* We can write back to back 8bits * 16 into the queue and as we have
- * checked for the queue to be empty we can write in a burst.
- */
-
- value = uiData[0];
- BcmSwapWord(&value);
- wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4);
-
- value = uiData[1];
- BcmSwapWord(&value);
- wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4);
-
- value = uiData[2];
- BcmSwapWord(&value);
- wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4);
-
- value = uiData[3];
- BcmSwapWord(&value);
- wrm(Adapter, EEPROM_WRITE_DATAQ_REG, (PUCHAR)&value, 4);
-
- /* NOTE : After this write, on readback of EEPROM_SPI_Q_STATUS1_REG
- * shows that we see 7 for the EEPROM data write. Which means that
- * queue got full, also space is available as well as the queue is empty.
- * This may happen in sequence.
- */
- value = EEPROM_16_BYTE_PAGE_WRITE | uiOffset;
- wrmalt(Adapter, EEPROM_CMDQ_SPI_REG, &value, sizeof(value));
-
- /* Ideally we should loop here without tries and eventually succeed.
- * What we are checking if the previous write has completed, and this
- * may take time. We should wait till the Empty bit is set.
- */
- uiStatus = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
- while ((uiStatus & EEPROM_WRITE_QUEUE_EMPTY) == 0) {
- uiRetries--;
- if (uiRetries == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x0f003004 = %x, %d retries failed.\n", uiStatus, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY);
- return STATUS_FAILURE;
- }
-
- if (!(uiRetries%RETRIES_PER_DELAY))
- udelay(1000);
-
- uiStatus = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem got removed hence exiting from loop....");
- return -ENODEV;
- }
- }
-
- if (uiRetries != 0) {
- /* Clear the ones that are set - either, Empty/Full/Avail bits */
- value = (uiStatus & (EEPROM_WRITE_QUEUE_EMPTY | EEPROM_WRITE_QUEUE_AVAIL | EEPROM_WRITE_QUEUE_FULL));
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- }
-
- /* Here we should check if the EEPROM status register is correct before
- * proceeding. Bit 0 in the EEPROM Status register should be 0 before
- * we proceed further. A 1 at Bit 0 indicates that the EEPROM is busy
- * with the previous write. Note also that issuing this read finally
- * means the previous write to the EEPROM has completed.
- */
- uiRetries = MAX_EEPROM_RETRIES * RETRIES_PER_DELAY;
- uiEpromStatus = 0;
- while (uiRetries != 0) {
- uiEpromStatus = ReadEEPROMStatusRegister(Adapter);
- if (Adapter->device_removed == TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Modem has got removed hence exiting from loop...");
- return -ENODEV;
- }
- if ((EEPROM_STATUS_REG_WRITE_BUSY & uiEpromStatus) == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM status register = %x tries = %d\n", uiEpromStatus, (MAX_EEPROM_RETRIES * RETRIES_PER_DELAY - uiRetries));
- return STATUS_SUCCESS;
- }
- uiRetries--;
- if (uiRetries == 0) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "0x0f003004 = %x, for EEPROM status read %d retries failed.\n", uiEpromStatus, MAX_EEPROM_RETRIES * RETRIES_PER_DELAY);
- return STATUS_FAILURE;
- }
- uiEpromStatus = 0;
- if (!(uiRetries%RETRIES_PER_DELAY))
- udelay(1000);
- }
-
- return STATUS_SUCCESS;
-} /* BeceemEEPROMWritePage */
-
-/*
- * Procedure: BeceemEEPROMBulkWrite
- *
- * Description: Performs write to the EEPROM
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Data to be written.
- * uiOffset - Offset of the EEPROM where data needs to be written to.
- * uiNumBytes - Number of bytes to be written.
- * bVerify - read verify flag.
- * Returns:
- * OSAL_STATUS_CODE
- *
- */
-
-int BeceemEEPROMBulkWrite(struct bcm_mini_adapter *Adapter,
- PUCHAR pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify)
-{
- unsigned int uiBytesToCopy = uiNumBytes;
- /* unsigned int uiRdbk = 0; */
- unsigned int uiData[4] = {0};
- unsigned int uiIndex = 0;
- unsigned int uiTempOffset = 0;
- unsigned int uiExtraBytes = 0;
- /* PUINT puiBuffer = (PUINT)pBuffer;
- * int value;
- */
-
- if (uiOffset % MAX_RW_SIZE && uiBytesToCopy) {
- uiTempOffset = uiOffset - (uiOffset % MAX_RW_SIZE);
- uiExtraBytes = uiOffset - uiTempOffset;
-
- BeceemEEPROMBulkRead(Adapter, &uiData[0], uiTempOffset, MAX_RW_SIZE);
-
- if (uiBytesToCopy >= (16 - uiExtraBytes)) {
- memcpy((((PUCHAR)&uiData[0]) + uiExtraBytes), pBuffer, MAX_RW_SIZE - uiExtraBytes);
-
- if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiTempOffset))
- return STATUS_FAILURE;
-
- uiBytesToCopy -= (MAX_RW_SIZE - uiExtraBytes);
- uiIndex += (MAX_RW_SIZE - uiExtraBytes);
- uiOffset += (MAX_RW_SIZE - uiExtraBytes);
- } else {
- memcpy((((PUCHAR)&uiData[0]) + uiExtraBytes), pBuffer, uiBytesToCopy);
-
- if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiTempOffset))
- return STATUS_FAILURE;
-
- uiIndex += uiBytesToCopy;
- uiOffset += uiBytesToCopy;
- uiBytesToCopy = 0;
- }
- }
-
- while (uiBytesToCopy) {
- if (Adapter->device_removed)
- return -1;
-
- if (uiBytesToCopy >= MAX_RW_SIZE) {
- if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, (PUINT) &pBuffer[uiIndex], uiOffset))
- return STATUS_FAILURE;
-
- uiIndex += MAX_RW_SIZE;
- uiOffset += MAX_RW_SIZE;
- uiBytesToCopy -= MAX_RW_SIZE;
- } else {
- /*
- * To program non 16byte aligned data, read 16byte and then update.
- */
- BeceemEEPROMBulkRead(Adapter, &uiData[0], uiOffset, 16);
- memcpy(&uiData[0], pBuffer + uiIndex, uiBytesToCopy);
-
- if (STATUS_FAILURE == BeceemEEPROMWritePage(Adapter, uiData, uiOffset))
- return STATUS_FAILURE;
-
- uiBytesToCopy = 0;
- }
- }
-
- return 0;
-}
-
-/*
- * Procedure: BeceemNVMRead
- *
- * Description: Reads n number of bytes from NVM.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Buffer to store the data read from NVM
- * uiOffset - Offset of NVM from where data should be read
- * uiNumBytes - Number of bytes to be read from the NVM.
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if NVM read is successful.
- * <FAILURE> - if failed.
- */
-
-int BeceemNVMRead(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- int Status = 0;
-
- #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
- unsigned int uiTemp = 0, value;
- #endif
-
- if (Adapter->eNVMType == NVM_FLASH) {
- if (Adapter->bFlashRawRead == false) {
- if (IsSectionExistInVendorInfo(Adapter, Adapter->eActiveDSD))
- return vendorextnReadSection(Adapter, (PUCHAR)pBuffer, Adapter->eActiveDSD, uiOffset, uiNumBytes);
-
- uiOffset = uiOffset + Adapter->ulFlashCalStart;
- }
-
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_read((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes);
- #else
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
- Status = BeceemFlashBulkRead(Adapter,
- pBuffer,
- uiOffset,
- uiNumBytes);
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- #endif
- } else if (Adapter->eNVMType == NVM_EEPROM) {
- Status = BeceemEEPROMBulkRead(Adapter,
- pBuffer,
- uiOffset,
- uiNumBytes);
- } else {
- Status = -1;
- }
-
- return Status;
-}
-
-/*
- * Procedure: BeceemNVMWrite
- *
- * Description: Writes n number of bytes to NVM.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pBuffer - Buffer contains the data to be written.
- * uiOffset - Offset of NVM where data to be written to.
- * uiNumBytes - Number of bytes to be written..
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if NVM write is successful.
- * <FAILURE> - if failed.
- */
-
-int BeceemNVMWrite(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- bool bVerify)
-{
- int Status = 0;
- unsigned int uiTemp = 0;
- unsigned int uiMemoryLoc = EEPROM_CAL_DATA_INTERNAL_LOC;
- unsigned int uiIndex = 0;
-
- #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
- unsigned int value;
- #endif
-
- unsigned int uiFlashOffset = 0;
-
- if (Adapter->eNVMType == NVM_FLASH) {
- if (IsSectionExistInVendorInfo(Adapter, Adapter->eActiveDSD))
- Status = vendorextnWriteSection(Adapter, (PUCHAR)pBuffer, Adapter->eActiveDSD, uiOffset, uiNumBytes, bVerify);
- else {
- uiFlashOffset = uiOffset + Adapter->ulFlashCalStart;
-
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_write((uiFlashOffset / FLASH_PART_SIZE), (uiFlashOffset % FLASH_PART_SIZE), (unsigned char *)pBuffer, uiNumBytes);
- #else
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
-
- if (Adapter->bStatusWrite == TRUE)
- Status = BeceemFlashBulkWriteStatus(Adapter,
- pBuffer,
- uiFlashOffset,
- uiNumBytes ,
- bVerify);
- else
-
- Status = BeceemFlashBulkWrite(Adapter,
- pBuffer,
- uiFlashOffset,
- uiNumBytes,
- bVerify);
- #endif
- }
-
- if (uiOffset >= EEPROM_CALPARAM_START) {
- uiMemoryLoc += (uiOffset - EEPROM_CALPARAM_START);
- while (uiNumBytes) {
- if (uiNumBytes > BUFFER_4K) {
- wrm(Adapter, (uiMemoryLoc+uiIndex), (PCHAR)(pBuffer + (uiIndex / 4)), BUFFER_4K);
- uiNumBytes -= BUFFER_4K;
- uiIndex += BUFFER_4K;
- } else {
- wrm(Adapter, uiMemoryLoc+uiIndex, (PCHAR)(pBuffer + (uiIndex / 4)), uiNumBytes);
- uiNumBytes = 0;
- break;
- }
- }
- } else {
- if ((uiOffset + uiNumBytes) > EEPROM_CALPARAM_START) {
- ULONG ulBytesTobeSkipped = 0;
- PUCHAR pcBuffer = (PUCHAR)pBuffer; /* char pointer to take care of odd byte cases. */
-
- uiNumBytes -= (EEPROM_CALPARAM_START - uiOffset);
- ulBytesTobeSkipped += (EEPROM_CALPARAM_START - uiOffset);
- uiOffset += (EEPROM_CALPARAM_START - uiOffset);
- while (uiNumBytes) {
- if (uiNumBytes > BUFFER_4K) {
- wrm(Adapter, uiMemoryLoc + uiIndex, (PCHAR)&pcBuffer[ulBytesTobeSkipped + uiIndex], BUFFER_4K);
- uiNumBytes -= BUFFER_4K;
- uiIndex += BUFFER_4K;
- } else {
- wrm(Adapter, uiMemoryLoc + uiIndex, (PCHAR)&pcBuffer[ulBytesTobeSkipped + uiIndex], uiNumBytes);
- uiNumBytes = 0;
- break;
- }
- }
- }
- }
- /* restore the values. */
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- } else if (Adapter->eNVMType == NVM_EEPROM) {
- Status = BeceemEEPROMBulkWrite(Adapter,
- (PUCHAR)pBuffer,
- uiOffset,
- uiNumBytes,
- bVerify);
- if (bVerify)
- Status = BeceemEEPROMReadBackandVerify(Adapter, (PUINT)pBuffer, uiOffset, uiNumBytes);
- } else {
- Status = -1;
- }
- return Status;
-}
-
-/*
- * Procedure: BcmUpdateSectorSize
- *
- * Description: Updates the sector size to FLASH.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * uiSectorSize - sector size
- *
- * Returns:
- * OSAL_STATUS_SUCCESS - if NVM write is successful.
- * <FAILURE> - if failed.
- */
-
-int BcmUpdateSectorSize(struct bcm_mini_adapter *Adapter, unsigned int uiSectorSize)
-{
- int Status = -1;
- struct bcm_flash_cs_info sFlashCsInfo = {0};
- unsigned int uiTemp = 0;
- unsigned int uiSectorSig = 0;
- unsigned int uiCurrentSectorSize = 0;
- unsigned int value;
-
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
-
- /*
- * Before updating the sector size in the reserved area, check if already present.
- */
- BeceemFlashBulkRead(Adapter, (PUINT)&sFlashCsInfo, Adapter->ulFlashControlSectionStart, sizeof(sFlashCsInfo));
- uiSectorSig = ntohl(sFlashCsInfo.FlashSectorSizeSig);
- uiCurrentSectorSize = ntohl(sFlashCsInfo.FlashSectorSize);
-
- if (uiSectorSig == FLASH_SECTOR_SIZE_SIG) {
- if ((uiCurrentSectorSize <= MAX_SECTOR_SIZE) && (uiCurrentSectorSize >= MIN_SECTOR_SIZE)) {
- if (uiSectorSize == uiCurrentSectorSize) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Provided sector size is same as programmed in Flash");
- Status = STATUS_SUCCESS;
- goto Restore;
- }
- }
- }
-
- if ((uiSectorSize <= MAX_SECTOR_SIZE) && (uiSectorSize >= MIN_SECTOR_SIZE)) {
- sFlashCsInfo.FlashSectorSize = htonl(uiSectorSize);
- sFlashCsInfo.FlashSectorSizeSig = htonl(FLASH_SECTOR_SIZE_SIG);
-
- Status = BeceemFlashBulkWrite(Adapter,
- (PUINT)&sFlashCsInfo,
- Adapter->ulFlashControlSectionStart,
- sizeof(sFlashCsInfo),
- TRUE);
- }
-
-Restore:
- /* restore the values. */
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
-
- return Status;
-}
-
-/*
- * Procedure: BcmGetFlashSectorSize
- *
- * Description: Finds the sector size of the FLASH.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * unsigned int - sector size.
- *
- */
-
-static unsigned int BcmGetFlashSectorSize(struct bcm_mini_adapter *Adapter, unsigned int FlashSectorSizeSig, unsigned int FlashSectorSize)
-{
- unsigned int uiSectorSize = 0;
- unsigned int uiSectorSig = 0;
-
- if (Adapter->bSectorSizeOverride &&
- (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE &&
- Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE)) {
- Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG;
- } else {
- uiSectorSig = FlashSectorSizeSig;
-
- if (uiSectorSig == FLASH_SECTOR_SIZE_SIG) {
- uiSectorSize = FlashSectorSize;
- /*
- * If the sector size stored in the FLASH makes sense then use it.
- */
- if (uiSectorSize <= MAX_SECTOR_SIZE && uiSectorSize >= MIN_SECTOR_SIZE) {
- Adapter->uiSectorSize = uiSectorSize;
- } else if (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE &&
- Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE) {
- /* No valid size in FLASH, check if Config file has it. */
- Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG;
- } else {
- /* Init to Default, if none of the above works. */
- Adapter->uiSectorSize = DEFAULT_SECTOR_SIZE;
- }
- } else {
- if (Adapter->uiSectorSizeInCFG <= MAX_SECTOR_SIZE &&
- Adapter->uiSectorSizeInCFG >= MIN_SECTOR_SIZE)
- Adapter->uiSectorSize = Adapter->uiSectorSizeInCFG;
- else
- Adapter->uiSectorSize = DEFAULT_SECTOR_SIZE;
- }
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Sector size :%x\n", Adapter->uiSectorSize);
-
- return Adapter->uiSectorSize;
-}
-
-/*
- * Procedure: BcmInitEEPROMQueues
- *
- * Description: Initialization of EEPROM queues.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * <OSAL_STATUS_CODE>
- */
-
-static int BcmInitEEPROMQueues(struct bcm_mini_adapter *Adapter)
-{
- unsigned int value = 0;
- /* CHIP Bug : Clear the Avail bits on the Read queue. The default
- * value on this register is supposed to be 0x00001102.
- * But we get 0x00001122.
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Fixing reset value on 0x0f003004 register\n");
- value = EEPROM_READ_DATA_AVAIL;
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- /* Flush the all the EEPROM queues. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " Flushing the queues\n");
- value = EEPROM_ALL_QUEUE_FLUSH;
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
-
- value = 0;
- wrmalt(Adapter, SPI_FLUSH_REG, &value, sizeof(value));
-
- /* Read the EEPROM Status Register. Just to see, no real purpose. */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "EEPROM Status register value = %x\n", ReadEEPROMStatusRegister(Adapter));
-
- return STATUS_SUCCESS;
-} /* BcmInitEEPROMQueues() */
-
-/*
- * Procedure: BcmInitNVM
- *
- * Description: Initialization of NVM, EEPROM size,FLASH size, sector size etc.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * <OSAL_STATUS_CODE>
- */
-
-int BcmInitNVM(struct bcm_mini_adapter *ps_adapter)
-{
- BcmValidateNvmType(ps_adapter);
- BcmInitEEPROMQueues(ps_adapter);
-
- if (ps_adapter->eNVMType == NVM_AUTODETECT) {
- ps_adapter->eNVMType = BcmGetNvmType(ps_adapter);
- if (ps_adapter->eNVMType == NVM_UNKNOWN)
- BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_PRINTK, 0, 0, "NVM Type is unknown!!\n");
- } else if (ps_adapter->eNVMType == NVM_FLASH) {
- BcmGetFlashCSInfo(ps_adapter);
- }
-
- BcmGetNvmSize(ps_adapter);
-
- return STATUS_SUCCESS;
-}
-
-/* BcmGetNvmSize : set the EEPROM or flash size in Adapter.
- *
- * Input Parameter:
- * Adapter data structure
- * Return Value :
- * 0. means success;
- */
-
-static int BcmGetNvmSize(struct bcm_mini_adapter *Adapter)
-{
- if (Adapter->eNVMType == NVM_EEPROM)
- Adapter->uiNVMDSDSize = BcmGetEEPROMSize(Adapter);
- else if (Adapter->eNVMType == NVM_FLASH)
- Adapter->uiNVMDSDSize = BcmGetFlashSize(Adapter);
-
- return 0;
-}
-
-/*
- * Procedure: BcmValidateNvm
- *
- * Description: Validates the NVM Type option selected against the device
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * <VOID>
- */
-
-static VOID BcmValidateNvmType(struct bcm_mini_adapter *Adapter)
-{
- /*
- * if forcing the FLASH through CFG file, we should ensure device really has a FLASH.
- * Accessing the FLASH address without the FLASH being present can cause hang/freeze etc.
- * So if NVM_FLASH is selected for older chipsets, change it to AUTODETECT where EEPROM is 1st choice.
- */
-
- if (Adapter->eNVMType == NVM_FLASH &&
- Adapter->chip_id < 0xBECE3300)
- Adapter->eNVMType = NVM_AUTODETECT;
-}
-
-/*
- * Procedure: BcmReadFlashRDID
- *
- * Description: Reads ID from Serial Flash
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * Flash ID
- */
-
-static ULONG BcmReadFlashRDID(struct bcm_mini_adapter *Adapter)
-{
- ULONG ulRDID = 0;
- unsigned int value;
-
- /*
- * Read ID Instruction.
- */
- value = (FLASH_CMD_READ_ID << 24);
- wrmalt(Adapter, FLASH_SPI_CMDQ_REG, &value, sizeof(value));
-
- /* Delay */
- udelay(10);
-
- /*
- * Read SPI READQ REG. The output will be WWXXYYZZ.
- * The ID is 3Bytes long and is WWXXYY. ZZ needs to be Ignored.
- */
- rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulRDID, sizeof(ulRDID));
-
- return ulRDID >> 8;
-}
-
-int BcmAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter)
-{
- if (!psAdapter) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure point is NULL");
- return -EINVAL;
- }
- psAdapter->psFlashCSInfo = kzalloc(sizeof(struct bcm_flash_cs_info), GFP_KERNEL);
- if (psAdapter->psFlashCSInfo == NULL) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate memory for Flash 1.x");
- return -ENOMEM;
- }
-
- psAdapter->psFlash2xCSInfo = kzalloc(sizeof(struct bcm_flash2x_cs_info), GFP_KERNEL);
- if (!psAdapter->psFlash2xCSInfo) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate memory for Flash 2.x");
- kfree(psAdapter->psFlashCSInfo);
- return -ENOMEM;
- }
-
- psAdapter->psFlash2xVendorInfo = kzalloc(sizeof(struct bcm_flash2x_vendor_info), GFP_KERNEL);
- if (!psAdapter->psFlash2xVendorInfo) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Can't Allocate Vendor Info Memory for Flash 2.x");
- kfree(psAdapter->psFlashCSInfo);
- kfree(psAdapter->psFlash2xCSInfo);
- return -ENOMEM;
- }
-
- return STATUS_SUCCESS;
-}
-
-int BcmDeAllocFlashCSStructure(struct bcm_mini_adapter *psAdapter)
-{
- if (!psAdapter) {
- BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure point is NULL");
- return -EINVAL;
- }
- kfree(psAdapter->psFlashCSInfo);
- kfree(psAdapter->psFlash2xCSInfo);
- kfree(psAdapter->psFlash2xVendorInfo);
- return STATUS_SUCCESS;
-}
-
-static int BcmDumpFlash2XCSStructure(struct bcm_flash2x_cs_info *psFlash2xCSInfo, struct bcm_mini_adapter *Adapter)
-{
- unsigned int Index = 0;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "**********************FLASH2X CS Structure *******************");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is :%x", (psFlash2xCSInfo->MagicNumber));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Major Version :%d", MAJOR_VERSION(psFlash2xCSInfo->FlashLayoutVersion));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Minor Version :%d", MINOR_VERSION(psFlash2xCSInfo->FlashLayoutVersion));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " ISOImageMajorVersion:0x%x", (psFlash2xCSInfo->ISOImageVersion));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SCSIFirmwareMajorVersion :0x%x", (psFlash2xCSInfo->SCSIFirmwareVersion));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForPart1ISOImage :0x%x", (psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForScsiFirmware :0x%x", (psFlash2xCSInfo->OffsetFromZeroForScsiFirmware));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SizeOfScsiFirmware :0x%x", (psFlash2xCSInfo->SizeOfScsiFirmware));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForPart2ISOImage :0x%x", (psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSDStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSDStart));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSDEnd :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSDEnd));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSAStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSAStart));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSAEnd :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSAEnd));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForControlSectionStart :0x%x", (psFlash2xCSInfo->OffsetFromZeroForControlSectionStart));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForControlSectionData :0x%x", (psFlash2xCSInfo->OffsetFromZeroForControlSectionData));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "CDLessInactivityTimeout :0x%x", (psFlash2xCSInfo->CDLessInactivityTimeout));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "NewImageSignature :0x%x", (psFlash2xCSInfo->NewImageSignature));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashSectorSizeSig :0x%x", (psFlash2xCSInfo->FlashSectorSizeSig));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashSectorSize :0x%x", (psFlash2xCSInfo->FlashSectorSize));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashWriteSupportSize :0x%x", (psFlash2xCSInfo->FlashWriteSupportSize));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "TotalFlashSize :0x%X", (psFlash2xCSInfo->TotalFlashSize));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashBaseAddr :0x%x", (psFlash2xCSInfo->FlashBaseAddr));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FlashPartMaxSize :0x%x", (psFlash2xCSInfo->FlashPartMaxSize));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "IsCDLessDeviceBootSig :0x%x", (psFlash2xCSInfo->IsCDLessDeviceBootSig));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "MassStorageTimeout :0x%x", (psFlash2xCSInfo->MassStorageTimeout));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part1Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part1Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part1End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part1End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part2Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part2Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part2End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part2End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part3Start :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part3Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage1Part3End :0x%x", (psFlash2xCSInfo->OffsetISOImage1Part3End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part1Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part1Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part1End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part1End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part2Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part2Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part2End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part2End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part3Start :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part3Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetISOImage2Part3End :0x%x", (psFlash2xCSInfo->OffsetISOImage2Part3End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromDSDStartForDSDHeader :0x%x", (psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD1Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD1Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD1End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD1End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD2Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD2Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForDSD2End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForDSD2End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA1Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA1Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA1End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA1End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA2Start :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA2Start));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "OffsetFromZeroForVSA2End :0x%x", (psFlash2xCSInfo->OffsetFromZeroForVSA2End));
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Sector Access Bit Map is Defined as :");
-
- for (Index = 0; Index < (FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)); Index++)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SectorAccessBitMap[%d] :0x%x", Index,
- (psFlash2xCSInfo->SectorAccessBitMap[Index]));
-
- return STATUS_SUCCESS;
-}
-
-static int ConvertEndianOf2XCSStructure(struct bcm_flash2x_cs_info *psFlash2xCSInfo)
-{
- unsigned int Index = 0;
-
- psFlash2xCSInfo->MagicNumber = ntohl(psFlash2xCSInfo->MagicNumber);
- psFlash2xCSInfo->FlashLayoutVersion = ntohl(psFlash2xCSInfo->FlashLayoutVersion);
- /* psFlash2xCSInfo->FlashLayoutMinorVersion = ntohs(psFlash2xCSInfo->FlashLayoutMinorVersion); */
- psFlash2xCSInfo->ISOImageVersion = ntohl(psFlash2xCSInfo->ISOImageVersion);
- psFlash2xCSInfo->SCSIFirmwareVersion = ntohl(psFlash2xCSInfo->SCSIFirmwareVersion);
- psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage = ntohl(psFlash2xCSInfo->OffsetFromZeroForPart1ISOImage);
- psFlash2xCSInfo->OffsetFromZeroForScsiFirmware = ntohl(psFlash2xCSInfo->OffsetFromZeroForScsiFirmware);
- psFlash2xCSInfo->SizeOfScsiFirmware = ntohl(psFlash2xCSInfo->SizeOfScsiFirmware);
- psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage = ntohl(psFlash2xCSInfo->OffsetFromZeroForPart2ISOImage);
- psFlash2xCSInfo->OffsetFromZeroForDSDStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSDStart);
- psFlash2xCSInfo->OffsetFromZeroForDSDEnd = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSDEnd);
- psFlash2xCSInfo->OffsetFromZeroForVSAStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSAStart);
- psFlash2xCSInfo->OffsetFromZeroForVSAEnd = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSAEnd);
- psFlash2xCSInfo->OffsetFromZeroForControlSectionStart = ntohl(psFlash2xCSInfo->OffsetFromZeroForControlSectionStart);
- psFlash2xCSInfo->OffsetFromZeroForControlSectionData = ntohl(psFlash2xCSInfo->OffsetFromZeroForControlSectionData);
- psFlash2xCSInfo->CDLessInactivityTimeout = ntohl(psFlash2xCSInfo->CDLessInactivityTimeout);
- psFlash2xCSInfo->NewImageSignature = ntohl(psFlash2xCSInfo->NewImageSignature);
- psFlash2xCSInfo->FlashSectorSizeSig = ntohl(psFlash2xCSInfo->FlashSectorSizeSig);
- psFlash2xCSInfo->FlashSectorSize = ntohl(psFlash2xCSInfo->FlashSectorSize);
- psFlash2xCSInfo->FlashWriteSupportSize = ntohl(psFlash2xCSInfo->FlashWriteSupportSize);
- psFlash2xCSInfo->TotalFlashSize = ntohl(psFlash2xCSInfo->TotalFlashSize);
- psFlash2xCSInfo->FlashBaseAddr = ntohl(psFlash2xCSInfo->FlashBaseAddr);
- psFlash2xCSInfo->FlashPartMaxSize = ntohl(psFlash2xCSInfo->FlashPartMaxSize);
- psFlash2xCSInfo->IsCDLessDeviceBootSig = ntohl(psFlash2xCSInfo->IsCDLessDeviceBootSig);
- psFlash2xCSInfo->MassStorageTimeout = ntohl(psFlash2xCSInfo->MassStorageTimeout);
- psFlash2xCSInfo->OffsetISOImage1Part1Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part1Start);
- psFlash2xCSInfo->OffsetISOImage1Part1End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part1End);
- psFlash2xCSInfo->OffsetISOImage1Part2Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part2Start);
- psFlash2xCSInfo->OffsetISOImage1Part2End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part2End);
- psFlash2xCSInfo->OffsetISOImage1Part3Start = ntohl(psFlash2xCSInfo->OffsetISOImage1Part3Start);
- psFlash2xCSInfo->OffsetISOImage1Part3End = ntohl(psFlash2xCSInfo->OffsetISOImage1Part3End);
- psFlash2xCSInfo->OffsetISOImage2Part1Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part1Start);
- psFlash2xCSInfo->OffsetISOImage2Part1End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part1End);
- psFlash2xCSInfo->OffsetISOImage2Part2Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part2Start);
- psFlash2xCSInfo->OffsetISOImage2Part2End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part2End);
- psFlash2xCSInfo->OffsetISOImage2Part3Start = ntohl(psFlash2xCSInfo->OffsetISOImage2Part3Start);
- psFlash2xCSInfo->OffsetISOImage2Part3End = ntohl(psFlash2xCSInfo->OffsetISOImage2Part3End);
- psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader = ntohl(psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader);
- psFlash2xCSInfo->OffsetFromZeroForDSD1Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD1Start);
- psFlash2xCSInfo->OffsetFromZeroForDSD1End = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD1End);
- psFlash2xCSInfo->OffsetFromZeroForDSD2Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD2Start);
- psFlash2xCSInfo->OffsetFromZeroForDSD2End = ntohl(psFlash2xCSInfo->OffsetFromZeroForDSD2End);
- psFlash2xCSInfo->OffsetFromZeroForVSA1Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA1Start);
- psFlash2xCSInfo->OffsetFromZeroForVSA1End = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA1End);
- psFlash2xCSInfo->OffsetFromZeroForVSA2Start = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA2Start);
- psFlash2xCSInfo->OffsetFromZeroForVSA2End = ntohl(psFlash2xCSInfo->OffsetFromZeroForVSA2End);
-
- for (Index = 0; Index < (FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)); Index++)
- psFlash2xCSInfo->SectorAccessBitMap[Index] = ntohl(psFlash2xCSInfo->SectorAccessBitMap[Index]);
-
- return STATUS_SUCCESS;
-}
-
-static int ConvertEndianOfCSStructure(struct bcm_flash_cs_info *psFlashCSInfo)
-{
- /* unsigned int Index = 0; */
- psFlashCSInfo->MagicNumber = ntohl(psFlashCSInfo->MagicNumber);
- psFlashCSInfo->FlashLayoutVersion = ntohl(psFlashCSInfo->FlashLayoutVersion);
- psFlashCSInfo->ISOImageVersion = ntohl(psFlashCSInfo->ISOImageVersion);
- /* won't convert according to old assumption */
- psFlashCSInfo->SCSIFirmwareVersion = (psFlashCSInfo->SCSIFirmwareVersion);
- psFlashCSInfo->OffsetFromZeroForPart1ISOImage = ntohl(psFlashCSInfo->OffsetFromZeroForPart1ISOImage);
- psFlashCSInfo->OffsetFromZeroForScsiFirmware = ntohl(psFlashCSInfo->OffsetFromZeroForScsiFirmware);
- psFlashCSInfo->SizeOfScsiFirmware = ntohl(psFlashCSInfo->SizeOfScsiFirmware);
- psFlashCSInfo->OffsetFromZeroForPart2ISOImage = ntohl(psFlashCSInfo->OffsetFromZeroForPart2ISOImage);
- psFlashCSInfo->OffsetFromZeroForCalibrationStart = ntohl(psFlashCSInfo->OffsetFromZeroForCalibrationStart);
- psFlashCSInfo->OffsetFromZeroForCalibrationEnd = ntohl(psFlashCSInfo->OffsetFromZeroForCalibrationEnd);
- psFlashCSInfo->OffsetFromZeroForVSAStart = ntohl(psFlashCSInfo->OffsetFromZeroForVSAStart);
- psFlashCSInfo->OffsetFromZeroForVSAEnd = ntohl(psFlashCSInfo->OffsetFromZeroForVSAEnd);
- psFlashCSInfo->OffsetFromZeroForControlSectionStart = ntohl(psFlashCSInfo->OffsetFromZeroForControlSectionStart);
- psFlashCSInfo->OffsetFromZeroForControlSectionData = ntohl(psFlashCSInfo->OffsetFromZeroForControlSectionData);
- psFlashCSInfo->CDLessInactivityTimeout = ntohl(psFlashCSInfo->CDLessInactivityTimeout);
- psFlashCSInfo->NewImageSignature = ntohl(psFlashCSInfo->NewImageSignature);
- psFlashCSInfo->FlashSectorSizeSig = ntohl(psFlashCSInfo->FlashSectorSizeSig);
- psFlashCSInfo->FlashSectorSize = ntohl(psFlashCSInfo->FlashSectorSize);
- psFlashCSInfo->FlashWriteSupportSize = ntohl(psFlashCSInfo->FlashWriteSupportSize);
- psFlashCSInfo->TotalFlashSize = ntohl(psFlashCSInfo->TotalFlashSize);
- psFlashCSInfo->FlashBaseAddr = ntohl(psFlashCSInfo->FlashBaseAddr);
- psFlashCSInfo->FlashPartMaxSize = ntohl(psFlashCSInfo->FlashPartMaxSize);
- psFlashCSInfo->IsCDLessDeviceBootSig = ntohl(psFlashCSInfo->IsCDLessDeviceBootSig);
- psFlashCSInfo->MassStorageTimeout = ntohl(psFlashCSInfo->MassStorageTimeout);
-
- return STATUS_SUCCESS;
-}
-
-static int IsSectionExistInVendorInfo(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section)
-{
- return (Adapter->uiVendorExtnFlag &&
- (Adapter->psFlash2xVendorInfo->VendorSection[section].AccessFlags & FLASH2X_SECTION_PRESENT) &&
- (Adapter->psFlash2xVendorInfo->VendorSection[section].OffsetFromZeroForSectionStart != UNINIT_PTR_IN_CS));
-}
-
-static VOID UpdateVendorInfo(struct bcm_mini_adapter *Adapter)
-{
- B_UINT32 i = 0;
- unsigned int uiSizeSection = 0;
-
- Adapter->uiVendorExtnFlag = false;
-
- for (i = 0; i < TOTAL_SECTIONS; i++)
- Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart = UNINIT_PTR_IN_CS;
-
- if (STATUS_SUCCESS != vendorextnGetSectionInfo(Adapter, Adapter->psFlash2xVendorInfo))
- return;
-
- i = 0;
- while (i < TOTAL_SECTIONS) {
- if (!(Adapter->psFlash2xVendorInfo->VendorSection[i].AccessFlags & FLASH2X_SECTION_PRESENT)) {
- i++;
- continue;
- }
-
- Adapter->uiVendorExtnFlag = TRUE;
- uiSizeSection = (Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionEnd -
- Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart);
-
- switch (i) {
- case DSD0:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
- (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd = UNINIT_PTR_IN_CS;
- break;
-
- case DSD1:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
- (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End = UNINIT_PTR_IN_CS;
- break;
-
- case DSD2:
- if ((uiSizeSection >= (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header))) &&
- (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart))
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End = UNINIT_PTR_IN_CS;
- break;
- case VSA0:
- if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd = UNINIT_PTR_IN_CS;
- break;
-
- case VSA1:
- if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End = UNINIT_PTR_IN_CS;
- break;
- case VSA2:
- if (UNINIT_PTR_IN_CS != Adapter->psFlash2xVendorInfo->VendorSection[i].OffsetFromZeroForSectionStart)
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End = VENDOR_PTR_IN_CS;
- else
- Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start = Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End = UNINIT_PTR_IN_CS;
- break;
-
- default:
- break;
- }
- i++;
- }
-}
-
-/*
- * Procedure: BcmGetFlashCSInfo
- *
- * Description: Reads control structure and gets Cal section addresses.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * <VOID>
- */
-
-static int BcmGetFlashCSInfo(struct bcm_mini_adapter *Adapter)
-{
- /* struct bcm_flash_cs_info sFlashCsInfo = {0}; */
-
- #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
- unsigned int value;
- #endif
-
- unsigned int uiFlashLayoutMajorVersion;
-
- Adapter->uiFlashLayoutMinorVersion = 0;
- Adapter->uiFlashLayoutMajorVersion = 0;
- Adapter->ulFlashControlSectionStart = FLASH_CS_INFO_START_ADDR;
-
- Adapter->uiFlashBaseAdd = 0;
- Adapter->ulFlashCalStart = 0;
- memset(Adapter->psFlashCSInfo, 0 , sizeof(struct bcm_flash_cs_info));
- memset(Adapter->psFlash2xCSInfo, 0 , sizeof(struct bcm_flash2x_cs_info));
-
- if (!Adapter->bDDRInitDone) {
- value = FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT;
- wrmalt(Adapter, 0xAF00A080, &value, sizeof(value));
- }
-
- /* Reading first 8 Bytes to get the Flash Layout
- * MagicNumber(4 bytes) +FlashLayoutMinorVersion(2 Bytes) +FlashLayoutMajorVersion(2 Bytes)
- */
- BeceemFlashBulkRead(Adapter, (PUINT)Adapter->psFlashCSInfo, Adapter->ulFlashControlSectionStart, 8);
-
- Adapter->psFlashCSInfo->FlashLayoutVersion = ntohl(Adapter->psFlashCSInfo->FlashLayoutVersion);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Layout Version :%X", (Adapter->psFlashCSInfo->FlashLayoutVersion));
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Flash Layout Minor Version :%d\n", ntohs(sFlashCsInfo.FlashLayoutMinorVersion)); */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is :%x\n", ntohl(Adapter->psFlashCSInfo->MagicNumber));
-
- if (FLASH_CONTROL_STRUCT_SIGNATURE == ntohl(Adapter->psFlashCSInfo->MagicNumber)) {
- uiFlashLayoutMajorVersion = MAJOR_VERSION((Adapter->psFlashCSInfo->FlashLayoutVersion));
- Adapter->uiFlashLayoutMinorVersion = MINOR_VERSION((Adapter->psFlashCSInfo->FlashLayoutVersion));
- } else {
- Adapter->uiFlashLayoutMinorVersion = 0;
- uiFlashLayoutMajorVersion = 0;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "FLASH LAYOUT MAJOR VERSION :%X", uiFlashLayoutMajorVersion);
-
- if (uiFlashLayoutMajorVersion < FLASH_2X_MAJOR_NUMBER) {
- BeceemFlashBulkRead(Adapter, (PUINT)Adapter->psFlashCSInfo, Adapter->ulFlashControlSectionStart, sizeof(struct bcm_flash_cs_info));
- ConvertEndianOfCSStructure(Adapter->psFlashCSInfo);
- Adapter->ulFlashCalStart = (Adapter->psFlashCSInfo->OffsetFromZeroForCalibrationStart);
-
- if (!((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1)))
- Adapter->ulFlashControlSectionStart = Adapter->psFlashCSInfo->OffsetFromZeroForControlSectionStart;
-
- if ((FLASH_CONTROL_STRUCT_SIGNATURE == (Adapter->psFlashCSInfo->MagicNumber)) &&
- (SCSI_FIRMWARE_MINOR_VERSION <= MINOR_VERSION(Adapter->psFlashCSInfo->SCSIFirmwareVersion)) &&
- (FLASH_SECTOR_SIZE_SIG == (Adapter->psFlashCSInfo->FlashSectorSizeSig)) &&
- (BYTE_WRITE_SUPPORT == (Adapter->psFlashCSInfo->FlashWriteSupportSize))) {
- Adapter->ulFlashWriteSize = (Adapter->psFlashCSInfo->FlashWriteSupportSize);
- Adapter->fpFlashWrite = flashByteWrite;
- Adapter->fpFlashWriteWithStatusCheck = flashByteWriteStatus;
- } else {
- Adapter->ulFlashWriteSize = MAX_RW_SIZE;
- Adapter->fpFlashWrite = flashWrite;
- Adapter->fpFlashWriteWithStatusCheck = flashWriteStatus;
- }
-
- BcmGetFlashSectorSize(Adapter, (Adapter->psFlashCSInfo->FlashSectorSizeSig),
- (Adapter->psFlashCSInfo->FlashSectorSize));
- Adapter->uiFlashBaseAdd = Adapter->psFlashCSInfo->FlashBaseAddr & 0xFCFFFFFF;
- } else {
- if (BcmFlash2xBulkRead(Adapter, (PUINT)Adapter->psFlash2xCSInfo, NO_SECTION_VAL,
- Adapter->ulFlashControlSectionStart, sizeof(struct bcm_flash2x_cs_info))) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to read CS structure\n");
- return STATUS_FAILURE;
- }
-
- ConvertEndianOf2XCSStructure(Adapter->psFlash2xCSInfo);
- BcmDumpFlash2XCSStructure(Adapter->psFlash2xCSInfo, Adapter);
- if ((FLASH_CONTROL_STRUCT_SIGNATURE == Adapter->psFlash2xCSInfo->MagicNumber) &&
- (SCSI_FIRMWARE_MINOR_VERSION <= MINOR_VERSION(Adapter->psFlash2xCSInfo->SCSIFirmwareVersion)) &&
- (FLASH_SECTOR_SIZE_SIG == Adapter->psFlash2xCSInfo->FlashSectorSizeSig) &&
- (BYTE_WRITE_SUPPORT == Adapter->psFlash2xCSInfo->FlashWriteSupportSize)) {
- Adapter->ulFlashWriteSize = Adapter->psFlash2xCSInfo->FlashWriteSupportSize;
- Adapter->fpFlashWrite = flashByteWrite;
- Adapter->fpFlashWriteWithStatusCheck = flashByteWriteStatus;
- } else {
- Adapter->ulFlashWriteSize = MAX_RW_SIZE;
- Adapter->fpFlashWrite = flashWrite;
- Adapter->fpFlashWriteWithStatusCheck = flashWriteStatus;
- }
-
- BcmGetFlashSectorSize(Adapter, Adapter->psFlash2xCSInfo->FlashSectorSizeSig,
- Adapter->psFlash2xCSInfo->FlashSectorSize);
-
- UpdateVendorInfo(Adapter);
-
- BcmGetActiveDSD(Adapter);
- BcmGetActiveISO(Adapter);
- Adapter->uiFlashBaseAdd = Adapter->psFlash2xCSInfo->FlashBaseAddr & 0xFCFFFFFF;
- Adapter->ulFlashControlSectionStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart;
- }
- /*
- * Concerns: what if CS sector size does not match with this sector size ???
- * what is the indication of AccessBitMap in CS in flash 2.x ????
- */
- Adapter->ulFlashID = BcmReadFlashRDID(Adapter);
- Adapter->uiFlashLayoutMajorVersion = uiFlashLayoutMajorVersion;
-
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: BcmGetNvmType
- *
- * Description: Finds the type of NVM used.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- *
- * Returns:
- * NVM_TYPE
- *
- */
-
-static enum bcm_nvm_type BcmGetNvmType(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiData = 0;
-
- BeceemEEPROMBulkRead(Adapter, &uiData, 0x0, 4);
- if (uiData == BECM)
- return NVM_EEPROM;
-
- /*
- * Read control struct and get cal addresses before accessing the flash
- */
- BcmGetFlashCSInfo(Adapter);
-
- BeceemFlashBulkRead(Adapter, &uiData, 0x0 + Adapter->ulFlashCalStart, 4);
- if (uiData == BECM)
- return NVM_FLASH;
-
- /*
- * even if there is no valid signature on EEPROM/FLASH find out if they really exist.
- * if exist select it.
- */
- if (BcmGetEEPROMSize(Adapter))
- return NVM_EEPROM;
-
- /* TBD for Flash. */
- return NVM_UNKNOWN;
-}
-
-/*
- * BcmGetSectionValStartOffset - this will calculate the section's starting offset if section val is given
- * @Adapter : Drivers Private Data structure
- * @eFlashSectionVal : Flash secion value defined in enum bcm_flash2x_section_val
- *
- * Return value:-
- * On success it return the start offset of the provided section val
- * On Failure -returns STATUS_FAILURE
- */
-
-int BcmGetSectionValStartOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal)
-{
- /*
- * Considering all the section for which end offset can be calculated or directly given
- * in CS Structure. if matching case does not exist, return STATUS_FAILURE indicating section
- * endoffset can't be calculated or given in CS Structure.
- */
-
- int SectStartOffset = 0;
-
- SectStartOffset = INVALID_OFFSET;
-
- if (IsSectionExistInVendorInfo(Adapter, eFlashSectionVal))
- return Adapter->psFlash2xVendorInfo->VendorSection[eFlashSectionVal].OffsetFromZeroForSectionStart;
-
- switch (eFlashSectionVal) {
- case ISO_IMAGE1:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start);
- break;
- case ISO_IMAGE2:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start);
- break;
- case DSD0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart);
- break;
- case DSD1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start);
- break;
- case DSD2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start);
- break;
- case VSA0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart);
- break;
- case VSA1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start);
- break;
- case VSA2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start);
- break;
- case SCSI:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware);
- break;
- case CONTROL_SECTION:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart);
- break;
- case ISO_IMAGE1_PART2:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start);
- break;
- case ISO_IMAGE1_PART3:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start);
- break;
- case ISO_IMAGE2_PART2:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start);
- break;
- case ISO_IMAGE2_PART3:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start != UNINIT_PTR_IN_CS)
- SectStartOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start);
- break;
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Does not exist in Flash 2.x");
- SectStartOffset = INVALID_OFFSET;
- }
-
- return SectStartOffset;
-}
-
-/*
- * BcmGetSectionValEndOffset - this will calculate the section's Ending offset if section val is given
- * @Adapter : Drivers Private Data structure
- * @eFlashSectionVal : Flash secion value defined in enum bcm_flash2x_section_val
- *
- * Return value:-
- * On success it return the end offset of the provided section val
- * On Failure -returns STATUS_FAILURE
- */
-
-static int BcmGetSectionValEndOffset(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
-{
- int SectEndOffset = 0;
-
- SectEndOffset = INVALID_OFFSET;
- if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal))
- return Adapter->psFlash2xVendorInfo->VendorSection[eFlash2xSectionVal].OffsetFromZeroForSectionEnd;
-
- switch (eFlash2xSectionVal) {
- case ISO_IMAGE1:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End);
- break;
- case ISO_IMAGE2:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End);
- break;
- case DSD0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDEnd);
- break;
- case DSD1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1End);
- break;
- case DSD2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2End);
- break;
- case VSA0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAEnd);
- break;
- case VSA1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1End);
- break;
- case VSA2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2End);
- break;
- case SCSI:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS)
- SectEndOffset = ((Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware) +
- (Adapter->psFlash2xCSInfo->SizeOfScsiFirmware));
- break;
- case CONTROL_SECTION:
- /* Not Clear So Putting failure. confirm and fix it. */
- SectEndOffset = STATUS_FAILURE;
- break;
- case ISO_IMAGE1_PART2:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End);
- break;
- case ISO_IMAGE1_PART3:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End);
- break;
- case ISO_IMAGE2_PART2:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End);
- break;
- case ISO_IMAGE2_PART3:
- if (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End != UNINIT_PTR_IN_CS)
- SectEndOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End);
- break;
- default:
- SectEndOffset = INVALID_OFFSET;
- }
-
- return SectEndOffset;
-}
-
-/*
- * BcmFlash2xBulkRead:- Read API for Flash Map 2.x .
- * @Adapter :Driver Private Data Structure
- * @pBuffer : Buffer where data has to be put after reading
- * @eFlashSectionVal :Flash Section Val defined in enum bcm_flash2x_section_val
- * @uiOffsetWithinSectionVal :- Offset with in provided section
- * @uiNumBytes : Number of Bytes for Read
- *
- * Return value:-
- * return true on success and STATUS_FAILURE on fail.
- */
-
-int BcmFlash2xBulkRead(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- enum bcm_flash2x_section_val eFlash2xSectionVal,
- unsigned int uiOffsetWithinSectionVal,
- unsigned int uiNumBytes)
-{
- int Status = STATUS_SUCCESS;
- int SectionStartOffset = 0;
- unsigned int uiAbsoluteOffset = 0;
- unsigned int uiTemp = 0, value = 0;
-
- if (!Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure is NULL");
- return -EINVAL;
- }
- if (Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device has been removed");
- return -ENODEV;
- }
-
- /* NO_SECTION_VAL means absolute offset is given. */
- if (eFlash2xSectionVal == NO_SECTION_VAL)
- SectionStartOffset = 0;
- else
- SectionStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
-
- if (SectionStartOffset == STATUS_FAILURE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash 2.x Map ", eFlash2xSectionVal);
- return -EINVAL;
- }
-
- if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal))
- return vendorextnReadSection(Adapter, (PUCHAR)pBuffer, eFlash2xSectionVal, uiOffsetWithinSectionVal, uiNumBytes);
-
- /* calculating the absolute offset from FLASH; */
- uiAbsoluteOffset = uiOffsetWithinSectionVal + SectionStartOffset;
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
- Status = BeceemFlashBulkRead(Adapter, pBuffer, uiAbsoluteOffset, uiNumBytes);
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Read Failed with Status :%d", Status);
- return Status;
- }
-
- return Status;
-}
-
-/*
- * BcmFlash2xBulkWrite :-API for Writing on the Flash Map 2.x.
- * @Adapter :Driver Private Data Structure
- * @pBuffer : Buffer From where data has to taken for writing
- * @eFlashSectionVal :Flash Section Val defined in enum bcm_flash2x_section_val
- * @uiOffsetWithinSectionVal :- Offset with in provided section
- * @uiNumBytes : Number of Bytes for Write
- *
- * Return value:-
- * return true on success and STATUS_FAILURE on fail.
- *
- */
-
-int BcmFlash2xBulkWrite(struct bcm_mini_adapter *Adapter,
- PUINT pBuffer,
- enum bcm_flash2x_section_val eFlash2xSectVal,
- unsigned int uiOffset,
- unsigned int uiNumBytes,
- unsigned int bVerify)
-{
- int Status = STATUS_SUCCESS;
- unsigned int FlashSectValStartOffset = 0;
- unsigned int uiTemp = 0, value = 0;
-
- if (!Adapter) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Adapter structure is NULL");
- return -EINVAL;
- }
-
- if (Adapter->device_removed) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device has been removed");
- return -ENODEV;
- }
-
- /* NO_SECTION_VAL means absolute offset is given. */
- if (eFlash2xSectVal == NO_SECTION_VAL)
- FlashSectValStartOffset = 0;
- else
- FlashSectValStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectVal);
-
- if (FlashSectValStartOffset == STATUS_FAILURE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "This Section<%d> does not exist in Flash Map 2.x", eFlash2xSectVal);
- return -EINVAL;
- }
-
- if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectVal))
- return vendorextnWriteSection(Adapter, (PUCHAR)pBuffer, eFlash2xSectVal, uiOffset, uiNumBytes, bVerify);
-
- /* calculating the absolute offset from FLASH; */
- uiOffset = uiOffset + FlashSectValStartOffset;
-
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
-
- Status = BeceemFlashBulkWrite(Adapter, pBuffer, uiOffset, uiNumBytes, bVerify);
-
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Write failed with Status :%d", Status);
- return Status;
- }
-
- return Status;
-}
-
-/*
- * BcmGetActiveDSD : Set the Active DSD in Adapter Structure which has to be dumped in DDR
- * @Adapter :-Drivers private Data Structure
- *
- * Return Value:-
- * Return STATUS_SUCESS if get success in setting the right DSD else negative error code
- *
- */
-
-static int BcmGetActiveDSD(struct bcm_mini_adapter *Adapter)
-{
- enum bcm_flash2x_section_val uiHighestPriDSD = 0;
-
- uiHighestPriDSD = getHighestPriDSD(Adapter);
- Adapter->eActiveDSD = uiHighestPriDSD;
-
- if (DSD0 == uiHighestPriDSD)
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart;
- if (DSD1 == uiHighestPriDSD)
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start;
- if (DSD2 == uiHighestPriDSD)
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start;
- if (Adapter->eActiveDSD)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Active DSD :%d", Adapter->eActiveDSD);
- if (Adapter->eActiveDSD == 0) {
- /* if No DSD gets Active, Make Active the DSD with WR permission */
- if (IsSectionWritable(Adapter, DSD2)) {
- Adapter->eActiveDSD = DSD2;
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start;
- } else if (IsSectionWritable(Adapter, DSD1)) {
- Adapter->eActiveDSD = DSD1;
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start;
- } else if (IsSectionWritable(Adapter, DSD0)) {
- Adapter->eActiveDSD = DSD0;
- Adapter->ulFlashCalStart = Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart;
- }
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * BcmGetActiveISO :- Set the Active ISO in Adapter Data Structue
- * @Adapter : Driver private Data Structure
- *
- * Return Value:-
- * Sucsess:- STATUS_SUCESS
- * Failure- : negative erro code
- *
- */
-
-static int BcmGetActiveISO(struct bcm_mini_adapter *Adapter)
-{
- int HighestPriISO = 0;
-
- HighestPriISO = getHighestPriISO(Adapter);
-
- Adapter->eActiveISO = HighestPriISO;
- if (Adapter->eActiveISO == ISO_IMAGE2)
- Adapter->uiActiveISOOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start);
- else if (Adapter->eActiveISO == ISO_IMAGE1)
- Adapter->uiActiveISOOffset = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start);
-
- if (Adapter->eActiveISO)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Active ISO :%x", Adapter->eActiveISO);
-
- return STATUS_SUCCESS;
-}
-
-/*
- * IsOffsetWritable :- it will tell the access permission of the sector having passed offset
- * @Adapter : Drivers Private Data Structure
- * @uiOffset : Offset provided in the Flash
- *
- * Return Value:-
- * Success:-TRUE , offset is writable
- * Failure:-false, offset is RO
- *
- */
-
-static B_UINT8 IsOffsetWritable(struct bcm_mini_adapter *Adapter, unsigned int uiOffset)
-{
- unsigned int uiSectorNum = 0;
- unsigned int uiWordOfSectorPermission = 0;
- unsigned int uiBitofSectorePermission = 0;
- B_UINT32 permissionBits = 0;
-
- uiSectorNum = uiOffset/Adapter->uiSectorSize;
-
- /* calculating the word having this Sector Access permission from SectorAccessBitMap Array */
- uiWordOfSectorPermission = Adapter->psFlash2xCSInfo->SectorAccessBitMap[uiSectorNum / 16];
-
- /* calculating the bit index inside the word for this sector */
- uiBitofSectorePermission = 2 * (15 - uiSectorNum % 16);
-
- /* Setting Access permission */
- permissionBits = uiWordOfSectorPermission & (0x3 << uiBitofSectorePermission);
- permissionBits = (permissionBits >> uiBitofSectorePermission) & 0x3;
- if (permissionBits == SECTOR_READWRITE_PERMISSION)
- return TRUE;
- else
- return false;
-}
-
-static int BcmDumpFlash2xSectionBitMap(struct bcm_flash2x_bitmap *psFlash2xBitMap)
-{
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "***************Flash 2.x Section Bitmap***************");
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO_IMAGE1 :0X%x", psFlash2xBitMap->ISO_IMAGE1);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO_IMAGE2 :0X%x", psFlash2xBitMap->ISO_IMAGE2);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD0 :0X%x", psFlash2xBitMap->DSD0);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD1 :0X%x", psFlash2xBitMap->DSD1);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD2 :0X%x", psFlash2xBitMap->DSD2);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA0 :0X%x", psFlash2xBitMap->VSA0);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA1 :0X%x", psFlash2xBitMap->VSA1);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "VSA2 :0X%x", psFlash2xBitMap->VSA2);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SCSI :0X%x", psFlash2xBitMap->SCSI);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "CONTROL_SECTION :0X%x", psFlash2xBitMap->CONTROL_SECTION);
-
- return STATUS_SUCCESS;
-}
-
-/*
- * BcmGetFlash2xSectionalBitMap :- It will provide the bit map of all the section present in Flash
- * 8bit has been assigned to every section.
- * bit[0] :Section present or not
- * bit[1] :section is valid or not
- * bit[2] : Secton is read only or has write permission too.
- * bit[3] : Active Section -
- * bit[7...4] = Reserved .
- *
- * @Adapter:-Driver private Data Structure
- *
- * Return value:-
- * Success:- STATUS_SUCESS
- * Failure:- negative error code
- */
-
-int BcmGetFlash2xSectionalBitMap(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_bitmap *psFlash2xBitMap)
-{
- struct bcm_flash2x_cs_info *psFlash2xCSInfo = Adapter->psFlash2xCSInfo;
- enum bcm_flash2x_section_val uiHighestPriDSD = 0;
- enum bcm_flash2x_section_val uiHighestPriISO = 0;
- bool SetActiveDSDDone = false;
- bool SetActiveISODone = false;
-
- /* For 1.x map all the section except DSD0 will be shown as not present
- * This part will be used by calibration tool to detect the number of DSD present in Flash.
- */
- if (IsFlash2x(Adapter) == false) {
- psFlash2xBitMap->ISO_IMAGE2 = 0;
- psFlash2xBitMap->ISO_IMAGE1 = 0;
- psFlash2xBitMap->DSD0 = FLASH2X_SECTION_VALID | FLASH2X_SECTION_ACT | FLASH2X_SECTION_PRESENT; /* 0xF; 0000(Reseved)1(Active)0(RW)1(valid)1(present) */
- psFlash2xBitMap->DSD1 = 0;
- psFlash2xBitMap->DSD2 = 0;
- psFlash2xBitMap->VSA0 = 0;
- psFlash2xBitMap->VSA1 = 0;
- psFlash2xBitMap->VSA2 = 0;
- psFlash2xBitMap->CONTROL_SECTION = 0;
- psFlash2xBitMap->SCSI = 0;
- psFlash2xBitMap->Reserved0 = 0;
- psFlash2xBitMap->Reserved1 = 0;
- psFlash2xBitMap->Reserved2 = 0;
-
- return STATUS_SUCCESS;
- }
-
- uiHighestPriDSD = getHighestPriDSD(Adapter);
- uiHighestPriISO = getHighestPriISO(Adapter);
-
- /*
- * IS0 IMAGE 2
- */
- if ((psFlash2xCSInfo->OffsetISOImage2Part1Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->ISO_IMAGE2 = psFlash2xBitMap->ISO_IMAGE2 | FLASH2X_SECTION_PRESENT;
-
- if (ReadISOSignature(Adapter, ISO_IMAGE2) == ISO_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, ISO_IMAGE2) == false)
- psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_RO;
-
- if (SetActiveISODone == false && uiHighestPriISO == ISO_IMAGE2) {
- psFlash2xBitMap->ISO_IMAGE2 |= FLASH2X_SECTION_ACT;
- SetActiveISODone = TRUE;
- }
- }
-
- /*
- * IS0 IMAGE 1
- */
- if ((psFlash2xCSInfo->OffsetISOImage1Part1Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->ISO_IMAGE1 = psFlash2xBitMap->ISO_IMAGE1 | FLASH2X_SECTION_PRESENT;
-
- if (ReadISOSignature(Adapter, ISO_IMAGE1) == ISO_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, ISO_IMAGE1) == false)
- psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_RO;
-
- if (SetActiveISODone == false && uiHighestPriISO == ISO_IMAGE1) {
- psFlash2xBitMap->ISO_IMAGE1 |= FLASH2X_SECTION_ACT;
- SetActiveISODone = TRUE;
- }
- }
-
- /*
- * DSD2
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForDSD2Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->DSD2 = psFlash2xBitMap->DSD2 | FLASH2X_SECTION_PRESENT;
-
- if (ReadDSDSignature(Adapter, DSD2) == DSD_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, DSD2) == false) {
- psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_RO;
- } else {
- /* Means section is writable */
- if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD2)) {
- psFlash2xBitMap->DSD2 |= FLASH2X_SECTION_ACT;
- SetActiveDSDDone = TRUE;
- }
- }
- }
-
- /*
- * DSD 1
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForDSD1Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->DSD1 = psFlash2xBitMap->DSD1 | FLASH2X_SECTION_PRESENT;
-
- if (ReadDSDSignature(Adapter, DSD1) == DSD_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, DSD1) == false) {
- psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_RO;
- } else {
- /* Means section is writable */
- if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD1)) {
- psFlash2xBitMap->DSD1 |= FLASH2X_SECTION_ACT;
- SetActiveDSDDone = TRUE;
- }
- }
- }
-
- /*
- * For DSD 0
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForDSDStart) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->DSD0 = psFlash2xBitMap->DSD0 | FLASH2X_SECTION_PRESENT;
-
- if (ReadDSDSignature(Adapter, DSD0) == DSD_IMAGE_MAGIC_NUMBER)
- psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_VALID;
-
- /* Setting Access permission */
- if (IsSectionWritable(Adapter, DSD0) == false) {
- psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_RO;
- } else {
- /* Means section is writable */
- if ((SetActiveDSDDone == false) && (uiHighestPriDSD == DSD0)) {
- psFlash2xBitMap->DSD0 |= FLASH2X_SECTION_ACT;
- SetActiveDSDDone = TRUE;
- }
- }
- }
-
- /*
- * VSA 0
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForVSAStart) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->VSA0 = psFlash2xBitMap->VSA0 | FLASH2X_SECTION_PRESENT;
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_VALID;
-
- /* Calculation for extrating the Access permission */
- if (IsSectionWritable(Adapter, VSA0) == false)
- psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->VSA0 |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * VSA 1
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForVSA1Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->VSA1 = psFlash2xBitMap->VSA1 | FLASH2X_SECTION_PRESENT;
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_VALID;
-
- /* Checking For Access permission */
- if (IsSectionWritable(Adapter, VSA1) == false)
- psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->VSA1 |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * VSA 2
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForVSA2Start) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->VSA2 = psFlash2xBitMap->VSA2 | FLASH2X_SECTION_PRESENT;
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_VALID;
-
- /* Checking For Access permission */
- if (IsSectionWritable(Adapter, VSA2) == false)
- psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->VSA2 |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * SCSI Section
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForScsiFirmware) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->SCSI = psFlash2xBitMap->SCSI | FLASH2X_SECTION_PRESENT;
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->SCSI |= FLASH2X_SECTION_VALID;
-
- /* Checking For Access permission */
- if (IsSectionWritable(Adapter, SCSI) == false)
- psFlash2xBitMap->SCSI |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->SCSI |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * Control Section
- */
- if ((psFlash2xCSInfo->OffsetFromZeroForControlSectionStart) != UNINIT_PTR_IN_CS) {
- /* Setting the 0th Bit representing the Section is present or not. */
- psFlash2xBitMap->CONTROL_SECTION = psFlash2xBitMap->CONTROL_SECTION | (FLASH2X_SECTION_PRESENT);
-
- /* Setting the Access Bit. Map is not defined hece setting it always valid */
- psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_VALID;
-
- /* Checking For Access permission */
- if (IsSectionWritable(Adapter, CONTROL_SECTION) == false)
- psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_RO;
-
- /* By Default section is Active */
- psFlash2xBitMap->CONTROL_SECTION |= FLASH2X_SECTION_ACT;
- }
-
- /*
- * For Reserved Sections
- */
- psFlash2xBitMap->Reserved0 = 0;
- psFlash2xBitMap->Reserved0 = 0;
- psFlash2xBitMap->Reserved0 = 0;
- BcmDumpFlash2xSectionBitMap(psFlash2xBitMap);
-
- return STATUS_SUCCESS;
-}
-
-/*
- * BcmSetActiveSection :- Set Active section is used to make priority field highest over other
- * section of same type.
- *
- * @Adapater :- Bcm Driver Private Data Structure
- * @eFlash2xSectionVal :- Flash section val whose priority has to be made highest.
- *
- * Return Value:- Make the priorit highest else return erorr code
- *
- */
-
-int BcmSetActiveSection(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectVal)
-{
- unsigned int SectImagePriority = 0;
- int Status = STATUS_SUCCESS;
-
- /* struct bcm_dsd_header sDSD = {0};
- * struct bcm_iso_header sISO = {0};
- */
- int HighestPriDSD = 0;
- int HighestPriISO = 0;
-
- Status = IsSectionWritable(Adapter, eFlash2xSectVal);
- if (Status != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section <%d> is not writable", eFlash2xSectVal);
- return STATUS_FAILURE;
- }
-
- Adapter->bHeaderChangeAllowed = TRUE;
- switch (eFlash2xSectVal) {
- case ISO_IMAGE1:
- case ISO_IMAGE2:
- if (ReadISOSignature(Adapter, eFlash2xSectVal) == ISO_IMAGE_MAGIC_NUMBER) {
- HighestPriISO = getHighestPriISO(Adapter);
-
- if (HighestPriISO == eFlash2xSectVal) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given ISO<%x> already has highest priority", eFlash2xSectVal);
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = ReadISOPriority(Adapter, HighestPriISO) + 1;
-
- if ((SectImagePriority == 0) && IsSectionWritable(Adapter, HighestPriISO)) {
- /* This is a SPECIAL Case which will only happen if the current highest priority ISO has priority value = 0x7FFFFFFF.
- * We will write 1 to the current Highest priority ISO And then shall increase the priority of the requested ISO
- * by user
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "SectImagePriority wraparound happened, eFlash2xSectVal: 0x%x\n", eFlash2xSectVal);
- SectImagePriority = htonl(0x1);
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- HighestPriISO,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- Status = STATUS_FAILURE;
- break;
- }
-
- HighestPriISO = getHighestPriISO(Adapter);
-
- if (HighestPriISO == eFlash2xSectVal) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given ISO<%x> already has highest priority", eFlash2xSectVal);
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = 2;
- }
-
- SectImagePriority = htonl(SectImagePriority);
-
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- eFlash2xSectVal,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- break;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Signature is currupted. Hence can't increase the priority");
- Status = STATUS_FAILURE;
- break;
- }
- break;
- case DSD0:
- case DSD1:
- case DSD2:
- if (ReadDSDSignature(Adapter, eFlash2xSectVal) == DSD_IMAGE_MAGIC_NUMBER) {
- HighestPriDSD = getHighestPriDSD(Adapter);
- if (HighestPriDSD == eFlash2xSectVal) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given DSD<%x> already has highest priority", eFlash2xSectVal);
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = ReadDSDPriority(Adapter, HighestPriDSD) + 1;
- if (SectImagePriority == 0) {
- /* This is a SPECIAL Case which will only happen if the current highest priority DSD has priority value = 0x7FFFFFFF.
- * We will write 1 to the current Highest priority DSD And then shall increase the priority of the requested DSD
- * by user
- */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, NVM_RW, DBG_LVL_ALL, "SectImagePriority wraparound happened, eFlash2xSectVal: 0x%x\n", eFlash2xSectVal);
- SectImagePriority = htonl(0x1);
-
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- HighestPriDSD,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- break;
- }
-
- HighestPriDSD = getHighestPriDSD(Adapter);
-
- if (HighestPriDSD == eFlash2xSectVal) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Made the DSD: %x highest by reducing priority of other\n", eFlash2xSectVal);
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = htonl(0x2);
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- HighestPriDSD,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- break;
- }
-
- HighestPriDSD = getHighestPriDSD(Adapter);
- if (HighestPriDSD == eFlash2xSectVal) {
- Status = STATUS_SUCCESS;
- break;
- }
-
- SectImagePriority = 3;
- }
- SectImagePriority = htonl(SectImagePriority);
- Status = BcmFlash2xBulkWrite(Adapter,
- &SectImagePriority,
- eFlash2xSectVal,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
- SIGNATURE_SIZE,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Priority has not been written properly");
- Status = STATUS_FAILURE;
- break;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Signature is currupted. Hence can't increase the priority");
- Status = STATUS_FAILURE;
- break;
- }
- break;
- case VSA0:
- case VSA1:
- case VSA2:
- /* Has to be decided */
- break;
- default:
- Status = STATUS_FAILURE;
- break;
- }
-
- Adapter->bHeaderChangeAllowed = false;
- return Status;
-}
-
-/*
- * BcmCopyISO - Used only for copying the ISO section
- * @Adapater :- Bcm Driver Private Data Structure
- * @sCopySectStrut :- Section copy structure
- *
- * Return value:- SUCCESS if copies successfully else negative error code
- *
- */
-
-int BcmCopyISO(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_copy_section sCopySectStrut)
-{
- PCHAR Buff = NULL;
- enum bcm_flash2x_section_val eISOReadPart = 0, eISOWritePart = 0;
- unsigned int uiReadOffsetWithinPart = 0, uiWriteOffsetWithinPart = 0;
- unsigned int uiTotalDataToCopy = 0;
- bool IsThisHeaderSector = false;
- unsigned int sigOffset = 0;
- unsigned int ISOLength = 0;
- unsigned int Status = STATUS_SUCCESS;
- unsigned int SigBuff[MAX_RW_SIZE];
- unsigned int i = 0;
-
- if (ReadISOSignature(Adapter, sCopySectStrut.SrcSection) != ISO_IMAGE_MAGIC_NUMBER) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature");
- return STATUS_FAILURE;
- }
-
- Status = BcmFlash2xBulkRead(Adapter, &ISOLength,
- sCopySectStrut.SrcSection,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageSize),
- 4);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO\n");
- return Status;
- }
-
- ISOLength = htonl(ISOLength);
- if (ISOLength % Adapter->uiSectorSize)
- ISOLength = Adapter->uiSectorSize * (1 + ISOLength/Adapter->uiSectorSize);
-
- sigOffset = FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber);
-
- Buff = kzalloc(Adapter->uiSectorSize, GFP_KERNEL);
-
- if (!Buff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for section size");
- return -ENOMEM;
- }
-
- if (sCopySectStrut.SrcSection == ISO_IMAGE1 && sCopySectStrut.DstSection == ISO_IMAGE2) {
- eISOReadPart = ISO_IMAGE1;
- eISOWritePart = ISO_IMAGE2;
- uiReadOffsetWithinPart = 0;
- uiWriteOffsetWithinPart = 0;
-
- uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start);
-
- if (uiTotalDataToCopy < ISOLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature");
- Status = STATUS_FAILURE;
- goto out;
- }
-
- uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start);
-
- if (uiTotalDataToCopy < ISOLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Dest ISO Section does not have enough section size");
- Status = STATUS_FAILURE;
- goto out;
- }
-
- uiTotalDataToCopy = ISOLength;
-
- CorruptISOSig(Adapter, ISO_IMAGE2);
- while (uiTotalDataToCopy) {
- if (uiTotalDataToCopy == Adapter->uiSectorSize) {
- /* Setting for write of first sector. First sector is assumed to be written in last */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Writing the signature sector");
- eISOReadPart = ISO_IMAGE1;
- uiReadOffsetWithinPart = 0;
- eISOWritePart = ISO_IMAGE2;
- uiWriteOffsetWithinPart = 0;
- IsThisHeaderSector = TRUE;
- } else {
- uiReadOffsetWithinPart = uiReadOffsetWithinPart + Adapter->uiSectorSize;
- uiWriteOffsetWithinPart = uiWriteOffsetWithinPart + Adapter->uiSectorSize;
-
- if ((eISOReadPart == ISO_IMAGE1) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start))) {
- eISOReadPart = ISO_IMAGE1_PART2;
- uiReadOffsetWithinPart = 0;
- }
-
- if ((eISOReadPart == ISO_IMAGE1_PART2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start))) {
- eISOReadPart = ISO_IMAGE1_PART3;
- uiReadOffsetWithinPart = 0;
- }
-
- if ((eISOWritePart == ISO_IMAGE2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start))) {
- eISOWritePart = ISO_IMAGE2_PART2;
- uiWriteOffsetWithinPart = 0;
- }
-
- if ((eISOWritePart == ISO_IMAGE2_PART2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start))) {
- eISOWritePart = ISO_IMAGE2_PART3;
- uiWriteOffsetWithinPart = 0;
- }
- }
-
- Status = BcmFlash2xBulkRead(Adapter,
- (PUINT)Buff,
- eISOReadPart,
- uiReadOffsetWithinPart,
- Adapter->uiSectorSize);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOReadPart, uiReadOffsetWithinPart);
- break;
- }
-
- if (IsThisHeaderSector == TRUE) {
- /* If this is header sector write 0xFFFFFFFF at the sig time and in last write sig */
- memcpy(SigBuff, Buff + sigOffset, sizeof(SigBuff));
-
- for (i = 0; i < MAX_RW_SIZE; i++)
- *(Buff + sigOffset + i) = 0xFF;
- }
- Adapter->bHeaderChangeAllowed = TRUE;
- Status = BcmFlash2xBulkWrite(Adapter,
- (PUINT)Buff,
- eISOWritePart,
- uiWriteOffsetWithinPart,
- Adapter->uiSectorSize,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOWritePart, uiWriteOffsetWithinPart);
- break;
- }
-
- Adapter->bHeaderChangeAllowed = false;
- if (IsThisHeaderSector == TRUE) {
- WriteToFlashWithoutSectorErase(Adapter,
- SigBuff,
- eISOWritePart,
- sigOffset,
- MAX_RW_SIZE);
- IsThisHeaderSector = false;
- }
- /* subtracting the written Data */
- uiTotalDataToCopy = uiTotalDataToCopy - Adapter->uiSectorSize;
- }
- }
-
- if (sCopySectStrut.SrcSection == ISO_IMAGE2 && sCopySectStrut.DstSection == ISO_IMAGE1) {
- eISOReadPart = ISO_IMAGE2;
- eISOWritePart = ISO_IMAGE1;
- uiReadOffsetWithinPart = 0;
- uiWriteOffsetWithinPart = 0;
-
- uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage2Part3Start);
-
- if (uiTotalDataToCopy < ISOLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Source ISO Section does not have valid signature");
- Status = STATUS_FAILURE;
- goto out;
- }
-
- uiTotalDataToCopy = (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start) +
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3End) -
- (Adapter->psFlash2xCSInfo->OffsetISOImage1Part3Start);
-
- if (uiTotalDataToCopy < ISOLength) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "error as Dest ISO Section does not have enough section size");
- Status = STATUS_FAILURE;
- goto out;
- }
-
- uiTotalDataToCopy = ISOLength;
-
- CorruptISOSig(Adapter, ISO_IMAGE1);
-
- while (uiTotalDataToCopy) {
- if (uiTotalDataToCopy == Adapter->uiSectorSize) {
- /* Setting for write of first sector. First sector is assumed to be written in last */
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Writing the signature sector");
- eISOReadPart = ISO_IMAGE2;
- uiReadOffsetWithinPart = 0;
- eISOWritePart = ISO_IMAGE1;
- uiWriteOffsetWithinPart = 0;
- IsThisHeaderSector = TRUE;
- } else {
- uiReadOffsetWithinPart = uiReadOffsetWithinPart + Adapter->uiSectorSize;
- uiWriteOffsetWithinPart = uiWriteOffsetWithinPart + Adapter->uiSectorSize;
-
- if ((eISOReadPart == ISO_IMAGE2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start))) {
- eISOReadPart = ISO_IMAGE2_PART2;
- uiReadOffsetWithinPart = 0;
- }
-
- if ((eISOReadPart == ISO_IMAGE2_PART2) && (uiReadOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage2Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage2Part2Start))) {
- eISOReadPart = ISO_IMAGE2_PART3;
- uiReadOffsetWithinPart = 0;
- }
-
- if ((eISOWritePart == ISO_IMAGE1) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part1End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start))) {
- eISOWritePart = ISO_IMAGE1_PART2;
- uiWriteOffsetWithinPart = 0;
- }
-
- if ((eISOWritePart == ISO_IMAGE1_PART2) && (uiWriteOffsetWithinPart == (Adapter->psFlash2xCSInfo->OffsetISOImage1Part2End - Adapter->psFlash2xCSInfo->OffsetISOImage1Part2Start))) {
- eISOWritePart = ISO_IMAGE1_PART3;
- uiWriteOffsetWithinPart = 0;
- }
- }
-
- Status = BcmFlash2xBulkRead(Adapter,
- (PUINT)Buff,
- eISOReadPart,
- uiReadOffsetWithinPart,
- Adapter->uiSectorSize);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOReadPart, uiReadOffsetWithinPart);
- break;
- }
-
- if (IsThisHeaderSector == TRUE) {
- /* If this is header sector write 0xFFFFFFFF at the sig time and in last write sig */
- memcpy(SigBuff, Buff + sigOffset, sizeof(SigBuff));
-
- for (i = 0; i < MAX_RW_SIZE; i++)
- *(Buff + sigOffset + i) = 0xFF;
- }
- Adapter->bHeaderChangeAllowed = TRUE;
- Status = BcmFlash2xBulkWrite(Adapter,
- (PUINT)Buff,
- eISOWritePart,
- uiWriteOffsetWithinPart,
- Adapter->uiSectorSize,
- TRUE);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed while copying ISO: Part: %x, OffsetWithinPart: %x\n", eISOWritePart, uiWriteOffsetWithinPart);
- break;
- }
-
- Adapter->bHeaderChangeAllowed = false;
- if (IsThisHeaderSector == TRUE) {
- WriteToFlashWithoutSectorErase(Adapter,
- SigBuff,
- eISOWritePart,
- sigOffset,
- MAX_RW_SIZE);
-
- IsThisHeaderSector = false;
- }
-
- /* subtracting the written Data */
- uiTotalDataToCopy = uiTotalDataToCopy - Adapter->uiSectorSize;
- }
- }
-out:
- kfree(Buff);
-
- return Status;
-}
-
-/*
- * BcmFlash2xCorruptSig : this API is used to corrupt the written sig in Bcm Header present in flash section.
- * It will corrupt the sig, if Section is writable, by making first bytes as zero.
- * @Adapater :- Bcm Driver Private Data Structure
- * @eFlash2xSectionVal :- Flash section val which has header
- *
- * Return Value :-
- * Success :- If Section is present and writable, corrupt the sig and return STATUS_SUCCESS
- * Failure :-Return negative error code
- */
-
-int BcmFlash2xCorruptSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
-{
- int Status = STATUS_SUCCESS;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Value :%x\n", eFlash2xSectionVal);
-
- if ((eFlash2xSectionVal == DSD0) || (eFlash2xSectionVal == DSD1) || (eFlash2xSectionVal == DSD2)) {
- Status = CorruptDSDSig(Adapter, eFlash2xSectionVal);
- } else if (eFlash2xSectionVal == ISO_IMAGE1 || eFlash2xSectionVal == ISO_IMAGE2) {
- Status = CorruptISOSig(Adapter, eFlash2xSectionVal);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Given Section <%d>does not have Header", eFlash2xSectionVal);
- return STATUS_SUCCESS;
- }
- return Status;
-}
-
-/*
- *BcmFlash2xWriteSig :-this API is used to Write the sig if requested Section has
- * header and Write Permission.
- * @Adapater :- Bcm Driver Private Data Structure
- * @eFlashSectionVal :- Flash section val which has header
- *
- * Return Value :-
- * Success :- If Section is present and writable write the sig and return STATUS_SUCCESS
- * Failure :-Return negative error code
- */
-
-int BcmFlash2xWriteSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlashSectionVal)
-{
- unsigned int uiSignature = 0;
- unsigned int uiOffset = 0;
-
- /* struct bcm_dsd_header dsdHeader = {0}; */
- if (Adapter->bSigCorrupted == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Signature is not corrupted by driver, hence not restoring\n");
- return STATUS_SUCCESS;
- }
-
- if (Adapter->bAllDSDWriteAllow == false) {
- if (IsSectionWritable(Adapter, eFlashSectionVal) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Write signature");
- return SECTOR_IS_NOT_WRITABLE;
- }
- }
-
- if ((eFlashSectionVal == DSD0) || (eFlashSectionVal == DSD1) || (eFlashSectionVal == DSD2)) {
- uiSignature = htonl(DSD_IMAGE_MAGIC_NUMBER);
- uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader;
-
- uiOffset += FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber);
-
- if ((ReadDSDSignature(Adapter, eFlashSectionVal) & 0xFF000000) != CORRUPTED_PATTERN) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Corrupted Pattern is not there. Hence won't write sig");
- return STATUS_FAILURE;
- }
- } else if ((eFlashSectionVal == ISO_IMAGE1) || (eFlashSectionVal == ISO_IMAGE2)) {
- uiSignature = htonl(ISO_IMAGE_MAGIC_NUMBER);
- /* uiOffset = 0; */
- uiOffset = FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber);
- if ((ReadISOSignature(Adapter, eFlashSectionVal) & 0xFF000000) != CORRUPTED_PATTERN) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Currupted Pattern is not there. Hence won't write sig");
- return STATUS_FAILURE;
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "GIVEN SECTION< %d > IS NOT VALID FOR SIG WRITE...", eFlashSectionVal);
- return STATUS_FAILURE;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature");
-
- Adapter->bHeaderChangeAllowed = TRUE;
- Adapter->bSigCorrupted = false;
- BcmFlash2xBulkWrite(Adapter, &uiSignature, eFlashSectionVal, uiOffset, SIGNATURE_SIZE, TRUE);
- Adapter->bHeaderChangeAllowed = false;
-
- return STATUS_SUCCESS;
-}
-
-/*
- * validateFlash2xReadWrite :- This API is used to validate the user request for Read/Write.
- * if requested Bytes goes beyond the Requested section, it reports error.
- * @Adapater :- Bcm Driver Private Data Structure
- * @psFlash2xReadWrite :-Flash2x Read/write structure pointer
- *
- * Return values:-Return TRUE is request is valid else false.
- */
-
-int validateFlash2xReadWrite(struct bcm_mini_adapter *Adapter, struct bcm_flash2x_readwrite *psFlash2xReadWrite)
-{
- unsigned int uiNumOfBytes = 0;
- unsigned int uiSectStartOffset = 0;
- unsigned int uiSectEndOffset = 0;
-
- uiNumOfBytes = psFlash2xReadWrite->numOfBytes;
-
- if (IsSectionExistInFlash(Adapter, psFlash2xReadWrite->Section) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%x> does not exist in Flash", psFlash2xReadWrite->Section);
- return false;
- }
- uiSectStartOffset = BcmGetSectionValStartOffset(Adapter, psFlash2xReadWrite->Section);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Start offset :%x ,section :%d\n", uiSectStartOffset, psFlash2xReadWrite->Section);
- if ((psFlash2xReadWrite->Section == ISO_IMAGE1) || (psFlash2xReadWrite->Section == ISO_IMAGE2)) {
- if (psFlash2xReadWrite->Section == ISO_IMAGE1) {
- uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1) +
- BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1_PART2) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1_PART2) +
- BcmGetSectionValEndOffset(Adapter, ISO_IMAGE1_PART3) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1_PART3);
- } else if (psFlash2xReadWrite->Section == ISO_IMAGE2) {
- uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2) +
- BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2_PART2) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2_PART2) +
- BcmGetSectionValEndOffset(Adapter, ISO_IMAGE2_PART3) -
- BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2_PART3);
- }
-
- /* since this uiSectEndoffset is the size of iso Image. hence for calculating the virtual endoffset
- * it should be added in startoffset. so that check done in last of this function can be valued.
- */
- uiSectEndOffset = uiSectStartOffset + uiSectEndOffset;
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Total size of the ISO Image :%x", uiSectEndOffset);
- } else
- uiSectEndOffset = BcmGetSectionValEndOffset(Adapter, psFlash2xReadWrite->Section);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "End offset :%x\n", uiSectEndOffset);
-
- /* psFlash2xReadWrite->offset and uiNumOfBytes are user controlled and can lead to integer overflows */
- if (psFlash2xReadWrite->offset > uiSectEndOffset) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
- return false;
- }
- if (uiNumOfBytes > uiSectEndOffset) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
- return false;
- }
- /* Checking the boundary condition */
- if ((uiSectStartOffset + psFlash2xReadWrite->offset + uiNumOfBytes) <= uiSectEndOffset)
- return TRUE;
- else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid Request....");
- return false;
- }
-}
-
-/*
- * IsFlash2x :- check for Flash 2.x
- * Adapater :- Bcm Driver Private Data Structure
- *
- * Return value:-
- * return TRUE if flah2.x of hgher version else return false.
- */
-
-int IsFlash2x(struct bcm_mini_adapter *Adapter)
-{
- if (Adapter->uiFlashLayoutMajorVersion >= FLASH_2X_MAJOR_NUMBER)
- return TRUE;
- else
- return false;
-}
-
-/*
- * GetFlashBaseAddr :- Calculate the Flash Base address
- * @Adapater :- Bcm Driver Private Data Structure
- *
- * Return Value:-
- * Success :- Base Address of the Flash
- */
-
-static int GetFlashBaseAddr(struct bcm_mini_adapter *Adapter)
-{
- unsigned int uiBaseAddr = 0;
-
- if (Adapter->bDDRInitDone) {
- /*
- * For All Valid Flash Versions... except 1.1, take the value from FlashBaseAddr
- * In case of Raw Read... use the default value
- */
- if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == false) &&
- !((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1)))
- uiBaseAddr = Adapter->uiFlashBaseAdd;
- else
- uiBaseAddr = FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT;
- } else {
- /*
- * For All Valid Flash Versions... except 1.1, take the value from FlashBaseAddr
- * In case of Raw Read... use the default value
- */
- if (Adapter->uiFlashLayoutMajorVersion && (Adapter->bFlashRawRead == false) &&
- !((Adapter->uiFlashLayoutMajorVersion == 1) && (Adapter->uiFlashLayoutMinorVersion == 1)))
- uiBaseAddr = Adapter->uiFlashBaseAdd | FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT;
- else
- uiBaseAddr = FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT;
- }
-
- return uiBaseAddr;
-}
-
-/*
- * BcmCopySection :- This API is used to copy the One section in another. Both section should
- * be contiuous and of same size. Hence this Will not be applicabe to copy ISO.
- *
- * @Adapater :- Bcm Driver Private Data Structure
- * @SrcSection :- Source section From where data has to be copied
- * @DstSection :- Destination section to which data has to be copied
- * @offset :- Offset from/to where data has to be copied from one section to another.
- * @numOfBytes :- number of byes that has to be copyed from one section to another at given offset.
- * in case of numofBytes equal zero complete section will be copied.
- * Return Values-
- * Success : Return STATUS_SUCCESS
- * Faillure :- return negative error code
- */
-
-int BcmCopySection(struct bcm_mini_adapter *Adapter,
- enum bcm_flash2x_section_val SrcSection,
- enum bcm_flash2x_section_val DstSection,
- unsigned int offset,
- unsigned int numOfBytes)
-{
- unsigned int BuffSize = 0;
- unsigned int BytesToBeCopied = 0;
- PUCHAR pBuff = NULL;
- int Status = STATUS_SUCCESS;
-
- if (SrcSection == DstSection) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source and Destination should be different ...try again");
- return -EINVAL;
- }
-
- if ((SrcSection != DSD0) && (SrcSection != DSD1) && (SrcSection != DSD2)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source should be DSD subsection");
- return -EINVAL;
- }
-
- if ((DstSection != DSD0) && (DstSection != DSD1) && (DstSection != DSD2)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destination should be DSD subsection");
- return -EINVAL;
- }
-
- /* if offset zero means have to copy complete secton */
- if (numOfBytes == 0) {
- numOfBytes = BcmGetSectionValEndOffset(Adapter, SrcSection)
- - BcmGetSectionValStartOffset(Adapter, SrcSection);
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Section Size :0x%x", numOfBytes);
- }
-
- if ((offset + numOfBytes) > BcmGetSectionValEndOffset(Adapter, SrcSection)
- - BcmGetSectionValStartOffset(Adapter, SrcSection)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, " Input parameters going beyond the section offS: %x numB: %x of Source Section\n",
- offset, numOfBytes);
- return -EINVAL;
- }
-
- if ((offset + numOfBytes) > BcmGetSectionValEndOffset(Adapter, DstSection)
- - BcmGetSectionValStartOffset(Adapter, DstSection)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Input parameters going beyond the section offS: %x numB: %x of Destination Section\n",
- offset, numOfBytes);
- return -EINVAL;
- }
-
- if (numOfBytes > Adapter->uiSectorSize)
- BuffSize = Adapter->uiSectorSize;
- else
- BuffSize = numOfBytes;
-
- pBuff = kzalloc(BuffSize, GFP_KERNEL);
- if (!pBuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed.. ");
- return -ENOMEM;
- }
-
- BytesToBeCopied = Adapter->uiSectorSize;
- if (offset % Adapter->uiSectorSize)
- BytesToBeCopied = Adapter->uiSectorSize - (offset % Adapter->uiSectorSize);
- if (BytesToBeCopied > numOfBytes)
- BytesToBeCopied = numOfBytes;
-
- Adapter->bHeaderChangeAllowed = TRUE;
-
- do {
- Status = BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, SrcSection , offset, BytesToBeCopied);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read failed at offset :%d for NOB :%d", SrcSection, BytesToBeCopied);
- break;
- }
- Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pBuff, DstSection, offset, BytesToBeCopied, false);
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Write failed at offset :%d for NOB :%d", DstSection, BytesToBeCopied);
- break;
- }
- offset = offset + BytesToBeCopied;
- numOfBytes = numOfBytes - BytesToBeCopied;
- if (numOfBytes) {
- if (numOfBytes > Adapter->uiSectorSize)
- BytesToBeCopied = Adapter->uiSectorSize;
- else
- BytesToBeCopied = numOfBytes;
- }
- } while (numOfBytes > 0);
-
- kfree(pBuff);
- Adapter->bHeaderChangeAllowed = false;
-
- return Status;
-}
-
-/*
- * SaveHeaderIfPresent :- This API is use to Protect the Header in case of Header Sector write
- * @Adapater :- Bcm Driver Private Data Structure
- * @pBuff :- Data buffer that has to be written in sector having the header map.
- * @uiOffset :- Flash offset that has to be written.
- *
- * Return value :-
- * Success :- On success return STATUS_SUCCESS
- * Faillure :- Return negative error code
- */
-
-static int SaveHeaderIfPresent(struct bcm_mini_adapter *Adapter, PUCHAR pBuff, unsigned int uiOffset)
-{
- unsigned int offsetToProtect = 0, HeaderSizeToProtect = 0;
- bool bHasHeader = false;
- PUCHAR pTempBuff = NULL;
- unsigned int uiSectAlignAddr = 0;
- unsigned int sig = 0;
-
- /* making the offset sector aligned */
- uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
-
- if ((uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD2) - Adapter->uiSectorSize) ||
- (uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD1) - Adapter->uiSectorSize) ||
- (uiSectAlignAddr == BcmGetSectionValEndOffset(Adapter, DSD0) - Adapter->uiSectorSize)) {
- /* offset from the sector boundary having the header map */
- offsetToProtect = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader % Adapter->uiSectorSize;
- HeaderSizeToProtect = sizeof(struct bcm_dsd_header);
- bHasHeader = TRUE;
- }
-
- if (uiSectAlignAddr == BcmGetSectionValStartOffset(Adapter, ISO_IMAGE1) ||
- uiSectAlignAddr == BcmGetSectionValStartOffset(Adapter, ISO_IMAGE2)) {
- offsetToProtect = 0;
- HeaderSizeToProtect = sizeof(struct bcm_iso_header);
- bHasHeader = TRUE;
- }
- /* If Header is present overwrite passed buffer with this */
- if (bHasHeader && (Adapter->bHeaderChangeAllowed == false)) {
- pTempBuff = kzalloc(HeaderSizeToProtect, GFP_KERNEL);
- if (!pTempBuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed");
- return -ENOMEM;
- }
- /* Read header */
- BeceemFlashBulkRead(Adapter, (PUINT)pTempBuff, (uiSectAlignAddr + offsetToProtect), HeaderSizeToProtect);
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pTempBuff, HeaderSizeToProtect);
- /* Replace Buffer content with Header */
- memcpy(pBuff + offsetToProtect, pTempBuff, HeaderSizeToProtect);
-
- kfree(pTempBuff);
- }
- if (bHasHeader && Adapter->bSigCorrupted) {
- sig = *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber)));
- sig = ntohl(sig);
- if ((sig & 0xFF000000) != CORRUPTED_PATTERN) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Desired pattern is not at sig offset. Hence won't restore");
- Adapter->bSigCorrupted = false;
- return STATUS_SUCCESS;
- }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, " Corrupted sig is :%X", sig);
- *((PUINT)(pBuff + offsetToProtect + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber))) = htonl(DSD_IMAGE_MAGIC_NUMBER);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Restoring the signature in Header Write only");
- Adapter->bSigCorrupted = false;
- }
-
- return STATUS_SUCCESS;
-}
-
-/*
- * BcmDoChipSelect : This will selcet the appropriate chip for writing.
- * @Adapater :- Bcm Driver Private Data Structure
- *
- * OutPut:-
- * Select the Appropriate chip and retrn status Success
- */
-static int BcmDoChipSelect(struct bcm_mini_adapter *Adapter, unsigned int offset)
-{
- unsigned int FlashConfig = 0;
- int ChipNum = 0;
- unsigned int GPIOConfig = 0;
- unsigned int PartNum = 0;
-
- ChipNum = offset / FLASH_PART_SIZE;
-
- /*
- * Chip Select mapping to enable flash0.
- * To select flash 0, we have to OR with (0<<12).
- * ORing 0 will have no impact so not doing that part.
- * In future if Chip select value changes from 0 to non zero,
- * That needs be taken care with backward comaptibility. No worries for now.
- */
-
- /*
- * SelectedChip Variable is the selection that the host is 100% Sure the same as what the register will hold. This can be ONLY ensured
- * if the Chip doesn't goes to low power mode while the flash operation is in progress (NVMRdmWrmLock is taken)
- * Before every new Flash Write operation, we reset the variable. This is to ensure that after any wake-up from
- * power down modes (Idle mode/shutdown mode), the values in the register will be different.
- */
-
- if (Adapter->SelectedChip == ChipNum)
- return STATUS_SUCCESS;
-
- /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Selected Chip :%x", ChipNum); */
- Adapter->SelectedChip = ChipNum;
-
- /* bit[13..12] will select the appropriate chip */
- rdmalt(Adapter, FLASH_CONFIG_REG, &FlashConfig, 4);
- rdmalt(Adapter, FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
- {
- switch (ChipNum) {
- case 0:
- PartNum = 0;
- break;
- case 1:
- PartNum = 3;
- GPIOConfig |= (0x4 << CHIP_SELECT_BIT12);
- break;
- case 2:
- PartNum = 1;
- GPIOConfig |= (0x1 << CHIP_SELECT_BIT12);
- break;
- case 3:
- PartNum = 2;
- GPIOConfig |= (0x2 << CHIP_SELECT_BIT12);
- break;
- }
- }
- /* In case the bits already written in the FLASH_CONFIG_REG is same as what the user desired,
- * nothing to do... can return immediately.
- * ASSUMPTION: FLASH_GPIO_CONFIG_REG will be in sync with FLASH_CONFIG_REG.
- * Even if the chip goes to low power mode, it should wake with values in each register in sync with each other.
- * These values are not written by host other than during CHIP_SELECT.
- */
- if (PartNum == ((FlashConfig >> CHIP_SELECT_BIT12) & 0x3))
- return STATUS_SUCCESS;
-
- /* clearing the bit[13..12] */
- FlashConfig &= 0xFFFFCFFF;
- FlashConfig = (FlashConfig | (PartNum<<CHIP_SELECT_BIT12)); /* 00 */
-
- wrmalt(Adapter, FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
- udelay(100);
-
- wrmalt(Adapter, FLASH_CONFIG_REG, &FlashConfig, 4);
- udelay(100);
-
- return STATUS_SUCCESS;
-}
-
-static int ReadDSDSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
-{
- unsigned int uiDSDsig = 0;
- /* unsigned int sigoffsetInMap = 0;
- * struct bcm_dsd_header dsdHeader = {0};
- */
-
- /* sigoffsetInMap =(PUCHAR)&(dsdHeader.DSDImageMagicNumber) -(PUCHAR)&dsdHeader; */
-
- if (dsd != DSD0 && dsd != DSD1 && dsd != DSD2) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "passed section value is not for DSDs");
- return STATUS_FAILURE;
- }
- BcmFlash2xBulkRead(Adapter,
- &uiDSDsig,
- dsd,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImageMagicNumber),
- SIGNATURE_SIZE);
-
- uiDSDsig = ntohl(uiDSDsig);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD SIG :%x", uiDSDsig);
-
- return uiDSDsig;
-}
-
-static int ReadDSDPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val dsd)
-{
- /* unsigned int priOffsetInMap = 0 ; */
- unsigned int uiDSDPri = STATUS_FAILURE;
- /* struct bcm_dsd_header dsdHeader = {0};
- * priOffsetInMap = (PUCHAR)&(dsdHeader.DSDImagePriority) -(PUCHAR)&dsdHeader;
- */
- if (IsSectionWritable(Adapter, dsd)) {
- if (ReadDSDSignature(Adapter, dsd) == DSD_IMAGE_MAGIC_NUMBER) {
- BcmFlash2xBulkRead(Adapter,
- &uiDSDPri,
- dsd,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + FIELD_OFFSET_IN_HEADER(struct bcm_dsd_header *, DSDImagePriority),
- 4);
-
- uiDSDPri = ntohl(uiDSDPri);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSD<%x> Priority :%x", dsd, uiDSDPri);
- }
- }
-
- return uiDSDPri;
-}
-
-static enum bcm_flash2x_section_val getHighestPriDSD(struct bcm_mini_adapter *Adapter)
-{
- int DSDHighestPri = STATUS_FAILURE;
- int DsdPri = 0;
- enum bcm_flash2x_section_val HighestPriDSD = 0;
-
- if (IsSectionWritable(Adapter, DSD2)) {
- DSDHighestPri = ReadDSDPriority(Adapter, DSD2);
- HighestPriDSD = DSD2;
- }
-
- if (IsSectionWritable(Adapter, DSD1)) {
- DsdPri = ReadDSDPriority(Adapter, DSD1);
- if (DSDHighestPri < DsdPri) {
- DSDHighestPri = DsdPri;
- HighestPriDSD = DSD1;
- }
- }
-
- if (IsSectionWritable(Adapter, DSD0)) {
- DsdPri = ReadDSDPriority(Adapter, DSD0);
- if (DSDHighestPri < DsdPri) {
- DSDHighestPri = DsdPri;
- HighestPriDSD = DSD0;
- }
- }
- if (HighestPriDSD)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Highest DSD :%x , and its Pri :%x", HighestPriDSD, DSDHighestPri);
-
- return HighestPriDSD;
-}
-
-static int ReadISOSignature(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
-{
- unsigned int uiISOsig = 0;
- /* unsigned int sigoffsetInMap = 0;
- * struct bcm_iso_header ISOHeader = {0};
- * sigoffsetInMap =(PUCHAR)&(ISOHeader.ISOImageMagicNumber) -(PUCHAR)&ISOHeader;
- */
- if (iso != ISO_IMAGE1 && iso != ISO_IMAGE2) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "passed section value is not for ISOs");
- return STATUS_FAILURE;
- }
- BcmFlash2xBulkRead(Adapter,
- &uiISOsig,
- iso,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImageMagicNumber),
- SIGNATURE_SIZE);
-
- uiISOsig = ntohl(uiISOsig);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO SIG :%x", uiISOsig);
-
- return uiISOsig;
-}
-
-static int ReadISOPriority(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val iso)
-{
- unsigned int ISOPri = STATUS_FAILURE;
-
- if (IsSectionWritable(Adapter, iso)) {
- if (ReadISOSignature(Adapter, iso) == ISO_IMAGE_MAGIC_NUMBER) {
- BcmFlash2xBulkRead(Adapter,
- &ISOPri,
- iso,
- 0 + FIELD_OFFSET_IN_HEADER(struct bcm_iso_header *, ISOImagePriority),
- 4);
-
- ISOPri = ntohl(ISOPri);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISO<%x> Priority :%x", iso, ISOPri);
- }
- }
-
- return ISOPri;
-}
-
-static enum bcm_flash2x_section_val getHighestPriISO(struct bcm_mini_adapter *Adapter)
-{
- int ISOHighestPri = STATUS_FAILURE;
- int ISOPri = 0;
- enum bcm_flash2x_section_val HighestPriISO = NO_SECTION_VAL;
-
- if (IsSectionWritable(Adapter, ISO_IMAGE2)) {
- ISOHighestPri = ReadISOPriority(Adapter, ISO_IMAGE2);
- HighestPriISO = ISO_IMAGE2;
- }
-
- if (IsSectionWritable(Adapter, ISO_IMAGE1)) {
- ISOPri = ReadISOPriority(Adapter, ISO_IMAGE1);
- if (ISOHighestPri < ISOPri) {
- ISOHighestPri = ISOPri;
- HighestPriISO = ISO_IMAGE1;
- }
- }
- if (HighestPriISO)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Highest ISO :%x and its Pri :%x", HighestPriISO, ISOHighestPri);
-
- return HighestPriISO;
-}
-
-static int WriteToFlashWithoutSectorErase(struct bcm_mini_adapter *Adapter,
- PUINT pBuff,
- enum bcm_flash2x_section_val eFlash2xSectionVal,
- unsigned int uiOffset,
- unsigned int uiNumBytes)
-{
- #if !defined(BCM_SHM_INTERFACE) || defined(FLASH_DIRECT_ACCESS)
- unsigned int uiTemp = 0, value = 0;
- unsigned int i = 0;
- unsigned int uiPartOffset = 0;
- #endif
- unsigned int uiStartOffset = 0;
- /* Adding section start address */
- int Status = STATUS_SUCCESS;
- PUCHAR pcBuff = (PUCHAR)pBuff;
-
- if (uiNumBytes % Adapter->ulFlashWriteSize) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Writing without Sector Erase for non-FlashWriteSize number of bytes 0x%x\n", uiNumBytes);
- return STATUS_FAILURE;
- }
-
- uiStartOffset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal);
-
- if (IsSectionExistInVendorInfo(Adapter, eFlash2xSectionVal))
- return vendorextnWriteSectionWithoutErase(Adapter, pcBuff, eFlash2xSectionVal, uiOffset, uiNumBytes);
-
- uiOffset = uiOffset + uiStartOffset;
-
- #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
- Status = bcmflash_raw_writenoerase((uiOffset / FLASH_PART_SIZE), (uiOffset % FLASH_PART_SIZE), pcBuff, uiNumBytes);
- #else
- rdmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- value = 0;
- wrmalt(Adapter, 0x0f000C80, &value, sizeof(value));
-
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- BcmDoChipSelect(Adapter, uiOffset);
- uiPartOffset = (uiOffset & (FLASH_PART_SIZE - 1)) + GetFlashBaseAddr(Adapter);
-
- for (i = 0; i < uiNumBytes; i += Adapter->ulFlashWriteSize) {
- if (Adapter->ulFlashWriteSize == BYTE_WRITE_SUPPORT)
- Status = flashByteWrite(Adapter, uiPartOffset, pcBuff);
- else
- Status = flashWrite(Adapter, uiPartOffset, pcBuff);
-
- if (Status != STATUS_SUCCESS)
- break;
-
- pcBuff = pcBuff + Adapter->ulFlashWriteSize;
- uiPartOffset = uiPartOffset + Adapter->ulFlashWriteSize;
- }
- wrmalt(Adapter, 0x0f000C80, &uiTemp, sizeof(uiTemp));
- Adapter->SelectedChip = RESET_CHIP_SELECT;
- #endif
-
- return Status;
-}
-
-bool IsSectionExistInFlash(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val section)
-{
- bool SectionPresent = false;
-
- switch (section) {
- case ISO_IMAGE1:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage1Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectionPresent = TRUE;
- break;
- case ISO_IMAGE2:
- if ((Adapter->psFlash2xCSInfo->OffsetISOImage2Part1Start != UNINIT_PTR_IN_CS) &&
- (IsNonCDLessDevice(Adapter) == false))
- SectionPresent = TRUE;
- break;
- case DSD0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSDStart != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case DSD1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD1Start != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case DSD2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForDSD2Start != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case VSA0:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSAStart != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case VSA1:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA1Start != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case VSA2:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForVSA2Start != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case SCSI:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForScsiFirmware != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- case CONTROL_SECTION:
- if (Adapter->psFlash2xCSInfo->OffsetFromZeroForControlSectionStart != UNINIT_PTR_IN_CS)
- SectionPresent = TRUE;
- break;
- default:
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section Does not exist in Flash 2.x");
- SectionPresent = false;
- }
-
- return SectionPresent;
-}
-
-static int IsSectionWritable(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val Section)
-{
- int offset = STATUS_FAILURE;
- int Status = false;
-
- if (IsSectionExistInFlash(Adapter, Section) == false) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section <%d> does not exist", Section);
- return false;
- }
-
- offset = BcmGetSectionValStartOffset(Adapter, Section);
- if (offset == INVALID_OFFSET) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section<%d> does not exist", Section);
- return false;
- }
-
- if (IsSectionExistInVendorInfo(Adapter, Section))
- return !(Adapter->psFlash2xVendorInfo->VendorSection[Section].AccessFlags & FLASH2X_SECTION_RO);
-
- Status = IsOffsetWritable(Adapter, offset);
- return Status;
-}
-
-static int CorruptDSDSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
-{
- PUCHAR pBuff = NULL;
- unsigned int sig = 0;
- unsigned int uiOffset = 0;
- unsigned int BlockStatus = 0;
- unsigned int uiSectAlignAddr = 0;
-
- Adapter->bSigCorrupted = false;
- if (Adapter->bAllDSDWriteAllow == false) {
- if (IsSectionWritable(Adapter, eFlash2xSectionVal) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Corrupt signature");
- return SECTOR_IS_NOT_WRITABLE;
- }
- }
-
- pBuff = kzalloc(MAX_RW_SIZE, GFP_KERNEL);
- if (!pBuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Can't allocate memorey");
- return -ENOMEM;
- }
-
- uiOffset = Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(struct bcm_dsd_header);
- uiOffset -= MAX_RW_SIZE;
-
- BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, eFlash2xSectionVal, uiOffset, MAX_RW_SIZE);
-
- sig = *((PUINT)(pBuff + 12));
- sig = ntohl(sig);
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pBuff, MAX_RW_SIZE);
- /* Now corrupting the sig by corrupting 4th last Byte. */
- *(pBuff + 12) = 0;
-
- if (sig == DSD_IMAGE_MAGIC_NUMBER) {
- Adapter->bSigCorrupted = TRUE;
- if (Adapter->ulFlashWriteSize == BYTE_WRITE_SUPPORT) {
- uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
- BlockStatus = BcmFlashUnProtectBlock(Adapter, uiSectAlignAddr, Adapter->uiSectorSize);
-
- WriteToFlashWithoutSectorErase(Adapter, (PUINT)(pBuff + 12), eFlash2xSectionVal,
- (uiOffset + 12), BYTE_WRITE_SUPPORT);
- if (BlockStatus) {
- BcmRestoreBlockProtectStatus(Adapter, BlockStatus);
- BlockStatus = 0;
- }
- } else {
- WriteToFlashWithoutSectorErase(Adapter, (PUINT)pBuff, eFlash2xSectionVal,
- uiOffset, MAX_RW_SIZE);
- }
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "BCM Signature is not present in header");
- kfree(pBuff);
-
- return STATUS_FAILURE;
- }
-
- kfree(pBuff);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Corrupted the signature");
-
- return STATUS_SUCCESS;
-}
-
-static int CorruptISOSig(struct bcm_mini_adapter *Adapter, enum bcm_flash2x_section_val eFlash2xSectionVal)
-{
- PUCHAR pBuff = NULL;
- unsigned int sig = 0;
- unsigned int uiOffset = 0;
-
- Adapter->bSigCorrupted = false;
-
- if (IsSectionWritable(Adapter, eFlash2xSectionVal) != TRUE) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Section is not Writable...Hence can't Corrupt signature");
- return SECTOR_IS_NOT_WRITABLE;
- }
-
- pBuff = kzalloc(MAX_RW_SIZE, GFP_KERNEL);
- if (!pBuff) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Can't allocate memorey");
- return -ENOMEM;
- }
-
- uiOffset = 0;
-
- BcmFlash2xBulkRead(Adapter, (PUINT)pBuff, eFlash2xSectionVal, uiOffset, MAX_RW_SIZE);
-
- sig = *((PUINT)pBuff);
- sig = ntohl(sig);
-
- /* corrupt signature */
- *pBuff = 0;
-
- if (sig == ISO_IMAGE_MAGIC_NUMBER) {
- Adapter->bSigCorrupted = TRUE;
- WriteToFlashWithoutSectorErase(Adapter, (PUINT)pBuff, eFlash2xSectionVal,
- uiOffset, Adapter->ulFlashWriteSize);
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "BCM Signature is not present in header");
- kfree(pBuff);
-
- return STATUS_FAILURE;
- }
-
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Corrupted the signature");
- BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, pBuff, MAX_RW_SIZE);
-
- kfree(pBuff);
- return STATUS_SUCCESS;
-}
-
-bool IsNonCDLessDevice(struct bcm_mini_adapter *Adapter)
-{
- if (Adapter->psFlash2xCSInfo->IsCDLessDeviceBootSig == NON_CDLESS_DEVICE_BOOT_SIG)
- return TRUE;
- else
- return false;
-}
diff --git a/drivers/staging/bcm/nvm.h b/drivers/staging/bcm/nvm.h
deleted file mode 100644
index e765cca5d966..000000000000
--- a/drivers/staging/bcm/nvm.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/***************************************************************************************
- *
- * Copyright (c) Beceem Communications Inc.
- *
- * Module Name:
- * NVM.h
- *
- * Abstract:
- * This file has the prototypes,preprocessors and definitions various NVM libraries.
- *
- *
- * Revision History:
- * Who When What
- * -------- -------- ----------------------------------------------
- * Name Date Created/reviewed/modified
- *
- * Notes:
- *
- ****************************************************************************************/
-
-#ifndef _NVM_H_
-#define _NVM_H_
-
-struct bcm_flash_cs_info {
- u32 MagicNumber;
- /* let the magic number be 0xBECE-F1A5 - F1A5 for "flas-h" */
- u32 FlashLayoutVersion;
- u32 ISOImageVersion;
- u32 SCSIFirmwareVersion;
- u32 OffsetFromZeroForPart1ISOImage;
- u32 OffsetFromZeroForScsiFirmware;
- u32 SizeOfScsiFirmware;
- u32 OffsetFromZeroForPart2ISOImage;
- u32 OffsetFromZeroForCalibrationStart;
- u32 OffsetFromZeroForCalibrationEnd;
- u32 OffsetFromZeroForVSAStart;
- u32 OffsetFromZeroForVSAEnd;
- u32 OffsetFromZeroForControlSectionStart;
- u32 OffsetFromZeroForControlSectionData;
- u32 CDLessInactivityTimeout;
- u32 NewImageSignature;
- u32 FlashSectorSizeSig;
- u32 FlashSectorSize;
- u32 FlashWriteSupportSize;
- u32 TotalFlashSize;
- u32 FlashBaseAddr;
- u32 FlashPartMaxSize;
- u32 IsCDLessDeviceBootSig;
- /* MSC Timeout after reset to switch from MSC to NW Mode */
- u32 MassStorageTimeout;
-};
-
-#define FLASH2X_TOTAL_SIZE (64 * 1024 * 1024)
-#define DEFAULT_SECTOR_SIZE (64 * 1024)
-
-struct bcm_flash2x_cs_info {
- /* magic number as 0xBECE-F1A5 - F1A5 for "flas-h" */
- u32 MagicNumber;
- u32 FlashLayoutVersion;
- u32 ISOImageVersion;
- u32 SCSIFirmwareVersion;
- u32 OffsetFromZeroForPart1ISOImage;
- u32 OffsetFromZeroForScsiFirmware;
- u32 SizeOfScsiFirmware;
- u32 OffsetFromZeroForPart2ISOImage;
- u32 OffsetFromZeroForDSDStart;
- u32 OffsetFromZeroForDSDEnd;
- u32 OffsetFromZeroForVSAStart;
- u32 OffsetFromZeroForVSAEnd;
- u32 OffsetFromZeroForControlSectionStart;
- u32 OffsetFromZeroForControlSectionData;
- /* NO Data Activity timeout to switch from MSC to NW Mode */
- u32 CDLessInactivityTimeout;
- u32 NewImageSignature;
- u32 FlashSectorSizeSig;
- u32 FlashSectorSize;
- u32 FlashWriteSupportSize;
- u32 TotalFlashSize;
- u32 FlashBaseAddr;
- u32 FlashPartMaxSize;
- u32 IsCDLessDeviceBootSig;
- /* MSC Timeout after reset to switch from MSC to NW Mode */
- u32 MassStorageTimeout;
- /* Flash Map 2.0 Field */
- u32 OffsetISOImage1Part1Start;
- u32 OffsetISOImage1Part1End;
- u32 OffsetISOImage1Part2Start;
- u32 OffsetISOImage1Part2End;
- u32 OffsetISOImage1Part3Start;
- u32 OffsetISOImage1Part3End;
- u32 OffsetISOImage2Part1Start;
- u32 OffsetISOImage2Part1End;
- u32 OffsetISOImage2Part2Start;
- u32 OffsetISOImage2Part2End;
- u32 OffsetISOImage2Part3Start;
- u32 OffsetISOImage2Part3End;
- /* DSD Header offset from start of DSD */
- u32 OffsetFromDSDStartForDSDHeader;
- u32 OffsetFromZeroForDSD1Start;
- u32 OffsetFromZeroForDSD1End;
- u32 OffsetFromZeroForDSD2Start;
- u32 OffsetFromZeroForDSD2End;
- u32 OffsetFromZeroForVSA1Start;
- u32 OffsetFromZeroForVSA1End;
- u32 OffsetFromZeroForVSA2Start;
- u32 OffsetFromZeroForVSA2End;
- /*
- * ACCESS_BITS_PER_SECTOR 2
- * ACCESS_RW 0
- * ACCESS_RO 1
- * ACCESS_RESVD 2
- * ACCESS_RESVD 3
- */
- u32 SectorAccessBitMap[FLASH2X_TOTAL_SIZE / (DEFAULT_SECTOR_SIZE * 16)];
- /* All expansions to the control data structure should add here */
-};
-
-struct bcm_vendor_section_info {
- u32 OffsetFromZeroForSectionStart;
- u32 OffsetFromZeroForSectionEnd;
- u32 AccessFlags;
- u32 Reserved[16];
-};
-
-struct bcm_flash2x_vendor_info {
- struct bcm_vendor_section_info VendorSection[TOTAL_SECTIONS];
- u32 Reserved[16];
-};
-
-struct bcm_dsd_header {
- u32 DSDImageSize;
- u32 DSDImageCRC;
- u32 DSDImagePriority;
- /* We should not consider right now. Reading reserve is worthless. */
- u32 Reserved[252]; /* Resvd for DSD Header */
- u32 DSDImageMagicNumber;
-};
-
-struct bcm_iso_header {
- u32 ISOImageMagicNumber;
- u32 ISOImageSize;
- u32 ISOImageCRC;
- u32 ISOImagePriority;
- /* We should not consider right now. Reading reserve is worthless. */
- u32 Reserved[60]; /* Resvd for ISO Header extension */
-};
-
-#define EEPROM_BEGIN_CIS (0)
-#define EEPROM_BEGIN_NON_CIS (0x200)
-#define EEPROM_END (0x2000)
-#define INIT_PARAMS_SIGNATURE (0x95a7a597)
-#define MAX_INIT_PARAMS_LENGTH (2048)
-#define MAC_ADDRESS_OFFSET 0x200
-
-#define INIT_PARAMS_1_SIGNATURE_ADDRESS EEPROM_BEGIN_NON_CIS
-#define INIT_PARAMS_1_DATA_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+16)
-#define INIT_PARAMS_1_MACADDRESS_ADDRESS (MAC_ADDRESS_OFFSET)
-#define INIT_PARAMS_1_LENGTH_ADDRESS (INIT_PARAMS_1_SIGNATURE_ADDRESS+4)
-
-#define INIT_PARAMS_2_SIGNATURE_ADDRESS (EEPROM_BEGIN_NON_CIS + 2048 + 16)
-#define INIT_PARAMS_2_DATA_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 16)
-#define INIT_PARAMS_2_MACADDRESS_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 8)
-#define INIT_PARAMS_2_LENGTH_ADDRESS (INIT_PARAMS_2_SIGNATURE_ADDRESS + 4)
-
-#define EEPROM_SPI_DEV_CONFIG_REG 0x0F003000
-#define EEPROM_SPI_Q_STATUS1_REG 0x0F003004
-#define EEPROM_SPI_Q_STATUS1_MASK_REG 0x0F00300C
-
-#define EEPROM_SPI_Q_STATUS_REG 0x0F003008
-#define EEPROM_CMDQ_SPI_REG 0x0F003018
-#define EEPROM_WRITE_DATAQ_REG 0x0F00301C
-#define EEPROM_READ_DATAQ_REG 0x0F003020
-#define SPI_FLUSH_REG 0x0F00304C
-
-#define EEPROM_WRITE_ENABLE 0x06000000
-#define EEPROM_READ_STATUS_REGISTER 0x05000000
-#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000
-#define EEPROM_WRITE_QUEUE_EMPTY 0x00001000
-#define EEPROM_WRITE_QUEUE_AVAIL 0x00002000
-#define EEPROM_WRITE_QUEUE_FULL 0x00004000
-#define EEPROM_16_BYTE_PAGE_READ 0xFB000000
-#define EEPROM_4_BYTE_PAGE_READ 0x3B000000
-
-#define EEPROM_CMD_QUEUE_FLUSH 0x00000001
-#define EEPROM_WRITE_QUEUE_FLUSH 0x00000002
-#define EEPROM_READ_QUEUE_FLUSH 0x00000004
-#define EEPROM_ETH_QUEUE_FLUSH 0x00000008
-#define EEPROM_ALL_QUEUE_FLUSH 0x0000000f
-#define EEPROM_READ_ENABLE 0x06000000
-#define EEPROM_16_BYTE_PAGE_WRITE 0xFA000000
-#define EEPROM_READ_DATA_FULL 0x00000010
-#define EEPROM_READ_DATA_AVAIL 0x00000020
-#define EEPROM_READ_QUEUE_EMPTY 0x00000002
-#define EEPROM_CMD_QUEUE_EMPTY 0x00000100
-#define EEPROM_CMD_QUEUE_AVAIL 0x00000200
-#define EEPROM_CMD_QUEUE_FULL 0x00000400
-
-/* Most EEPROM status register bit 0 indicates if the EEPROM is busy
- * with a write if set 1. See the details of the EEPROM Status Register
- * in the EEPROM data sheet.
- */
-#define EEPROM_STATUS_REG_WRITE_BUSY 0x00000001
-
-/* We will have 1 mSec for every RETRIES_PER_DELAY count and have a max attempts of MAX_EEPROM_RETRIES
- * This will give us 80 mSec minimum of delay = 80mSecs
- */
-#define MAX_EEPROM_RETRIES 80
-#define RETRIES_PER_DELAY 64
-#define MAX_RW_SIZE 0x10
-#define MAX_READ_SIZE 0x10
-#define MAX_SECTOR_SIZE (512 * 1024)
-#define MIN_SECTOR_SIZE (1024)
-#define FLASH_SECTOR_SIZE_OFFSET 0xEFFFC
-#define FLASH_SECTOR_SIZE_SIG_OFFSET 0xEFFF8
-#define FLASH_SECTOR_SIZE_SIG 0xCAFEBABE
-#define FLASH_CS_INFO_START_ADDR 0xFF0000
-#define FLASH_CONTROL_STRUCT_SIGNATURE 0xBECEF1A5
-#define SCSI_FIRMWARE_MAJOR_VERSION 0x1
-#define SCSI_FIRMWARE_MINOR_VERSION 0x5
-#define BYTE_WRITE_SUPPORT 0x1
-#define FLASH_AUTO_INIT_BASE_ADDR 0xF00000
-#define FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT 0x1C000000
-#define FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT 0x1F000000
-#define FLASH_CONTIGIOUS_START_ADDR_BCS350 0x08000000
-#define FLASH_CONTIGIOUS_END_ADDR_BCS350 0x08FFFFFF
-#define FLASH_SIZE_ADDR 0xFFFFEC
-#define FLASH_SPI_CMDQ_REG 0xAF003040
-#define FLASH_SPI_WRITEQ_REG 0xAF003044
-#define FLASH_SPI_READQ_REG 0xAF003048
-#define FLASH_CONFIG_REG 0xAF003050
-#define FLASH_GPIO_CONFIG_REG 0xAF000030
-#define FLASH_CMD_WRITE_ENABLE 0x06
-#define FLASH_CMD_READ_ENABLE 0x03
-#define FLASH_CMD_RESET_WRITE_ENABLE 0x04
-#define FLASH_CMD_STATUS_REG_READ 0x05
-#define FLASH_CMD_STATUS_REG_WRITE 0x01
-#define FLASH_CMD_READ_ID 0x9F
-#define PAD_SELECT_REGISTER 0xAF000410
-#define FLASH_PART_SST25VF080B 0xBF258E
-#define EEPROM_CAL_DATA_INTERNAL_LOC 0xbFB00008
-#define EEPROM_CALPARAM_START 0x200
-#define EEPROM_SIZE_OFFSET 524
-
-/* As Read/Write time vaires from 1.5 to 3.0 ms.
- * so After Ignoring the rdm/wrm time(that is dependent on many factor like interface etc.),
- * here time calculated meets the worst case delay, 3.0 ms
- */
-#define MAX_FLASH_RETRIES 4
-#define FLASH_PER_RETRIES_DELAY 16
-#define EEPROM_MAX_CAL_AREA_SIZE 0xF0000
-#define BECM ntohl(0x4245434d)
-#define FLASH_2X_MAJOR_NUMBER 0x2
-#define DSD_IMAGE_MAGIC_NUMBER 0xBECE0D5D
-#define ISO_IMAGE_MAGIC_NUMBER 0xBECE0150
-#define NON_CDLESS_DEVICE_BOOT_SIG 0xBECEB007
-
-#define MINOR_VERSION(x) ((x >> 16) & 0xFFFF)
-#define MAJOR_VERSION(x) (x & 0xFFFF)
-
-#define CORRUPTED_PATTERN 0x0
-#define UNINIT_PTR_IN_CS 0xBBBBDDDD
-#define VENDOR_PTR_IN_CS 0xAAAACCCC
-#define FLASH2X_SECTION_PRESENT (1 << 0)
-#define FLASH2X_SECTION_VALID (1 << 1)
-#define FLASH2X_SECTION_RO (1 << 2)
-#define FLASH2X_SECTION_ACT (1 << 3)
-#define SECTOR_IS_NOT_WRITABLE STATUS_FAILURE
-#define INVALID_OFFSET STATUS_FAILURE
-#define INVALID_SECTION STATUS_FAILURE
-#define SECTOR_1K 1024
-#define SECTOR_64K (64 * SECTOR_1K)
-#define SECTOR_128K (2 * SECTOR_64K)
-#define SECTOR_256k (2 * SECTOR_128K)
-#define SECTOR_512K (2 * SECTOR_256k)
-#define FLASH_PART_SIZE (16 * 1024 * 1024)
-#define RESET_CHIP_SELECT -1
-#define CHIP_SELECT_BIT12 12
-#define SECTOR_READWRITE_PERMISSION 0
-#define SECTOR_READONLY 1
-#define SIGNATURE_SIZE 4
-#define DEFAULT_BUFF_SIZE 0x10000
-
-#define FIELD_OFFSET_IN_HEADER(HeaderPointer, Field) ((u8 *)&((HeaderPointer)(NULL))->Field - (u8 *)(NULL))
-
-#endif
-
diff --git a/drivers/staging/bcm/sort.c b/drivers/staging/bcm/sort.c
deleted file mode 100644
index ca0b17991512..000000000000
--- a/drivers/staging/bcm/sort.c
+++ /dev/null
@@ -1,52 +0,0 @@
-#include "headers.h"
-#include <linux/sort.h>
-
-/*
- * File Name: sort.c
- *
- * Author: Beceem Communications Pvt. Ltd
- *
- * Abstract: This file contains the routines sorting the classification rules.
- *
- * Copyright (c) 2007 Beceem Communications Pvt. Ltd
- */
-
-static int compare_packet_info(void const *a, void const *b)
-{
- struct bcm_packet_info const *pa = a;
- struct bcm_packet_info const *pb = b;
-
- if (!pa->bValid || !pb->bValid)
- return 0;
-
- return pa->u8TrafficPriority - pb->u8TrafficPriority;
-}
-
-VOID SortPackInfo(struct bcm_mini_adapter *Adapter)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL, "<=======");
-
- sort(Adapter->PackInfo, NO_OF_QUEUES, sizeof(struct bcm_packet_info),
- compare_packet_info, NULL);
-}
-
-static int compare_classifiers(void const *a, void const *b)
-{
- struct bcm_classifier_rule const *pa = a;
- struct bcm_classifier_rule const *pb = b;
-
- if (!pa->bUsed || !pb->bUsed)
- return 0;
-
- return pa->u8ClassifierRulePriority - pb->u8ClassifierRulePriority;
-}
-
-VOID SortClassifiers(struct bcm_mini_adapter *Adapter)
-{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
- DBG_LVL_ALL, "<=======");
-
- sort(Adapter->astClassifierTable, MAX_CLASSIFIERS,
- sizeof(struct bcm_classifier_rule), compare_classifiers, NULL);
-}
diff --git a/drivers/staging/bcm/target_params.h b/drivers/staging/bcm/target_params.h
deleted file mode 100644
index dc45f9ab854d..000000000000
--- a/drivers/staging/bcm/target_params.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef TARGET_PARAMS_H
-#define TARGET_PARAMS_H
-
-struct bcm_target_params {
- u32 m_u32CfgVersion;
- u32 m_u32CenterFrequency;
- u32 m_u32BandAScan;
- u32 m_u32BandBScan;
- u32 m_u32BandCScan;
- u32 m_u32ErtpsOptions;
- u32 m_u32PHSEnable;
- u32 m_u32HoEnable;
- u32 m_u32HoReserved1;
- u32 m_u32HoReserved2;
- u32 m_u32MimoEnable;
- u32 m_u32SecurityEnable;
- u32 m_u32PowerSavingModesEnable; /* bit 1: 1 Idlemode enable; bit2: 1 Sleepmode Enable */
- /* PowerSaving Mode Options:
- * bit 0 = 1: CPE mode - to keep pcmcia if alive;
- * bit 1 = 1: CINR reporting in Idlemode Msg
- * bit 2 = 1: Default PSC Enable in sleepmode
- */
- u32 m_u32PowerSavingModeOptions;
- u32 m_u32ArqEnable;
- /* From Version #3, the HARQ section renamed as general */
- u32 m_u32HarqEnable;
- u32 m_u32EEPROMFlag;
- /* BINARY TYPE - 4th MSByte: Interface Type - 3rd MSByte: Vendor Type - 2nd MSByte
- * Unused - LSByte
- */
- u32 m_u32Customize;
- u32 m_u32ConfigBW; /* In Hz */
- u32 m_u32ShutDownInitThresholdTimer;
- u32 m_u32RadioParameter;
- u32 m_u32PhyParameter1;
- u32 m_u32PhyParameter2;
- u32 m_u32PhyParameter3;
- u32 m_u32TestOptions; /* in eval mode only; lower 16bits = basic cid for testing; then bit 16 is test cqich,bit 17 test init rang; bit 18 test periodic rang and bit 19 is test harq ack/nack */
- u32 m_u32MaxMACDataperDLFrame;
- u32 m_u32MaxMACDataperULFrame;
- u32 m_u32Corr2MacFlags;
- u32 HostDrvrConfig1;
- u32 HostDrvrConfig2;
- u32 HostDrvrConfig3;
- u32 HostDrvrConfig4;
- u32 HostDrvrConfig5;
- u32 HostDrvrConfig6;
- u32 m_u32SegmentedPUSCenable;
- /* removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files..
- * BAMC Related Parameters
- * Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
- * bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
- */
- u32 m_u32BandAMCEnable;
-};
-
-#endif
diff --git a/drivers/staging/bcm/vendorspecificextn.c b/drivers/staging/bcm/vendorspecificextn.c
deleted file mode 100644
index 1d9bef6e4273..000000000000
--- a/drivers/staging/bcm/vendorspecificextn.c
+++ /dev/null
@@ -1,145 +0,0 @@
-#include "headers.h"
-/*
- * Procedure: vendorextnGetSectionInfo
- *
- * Description: Finds the type of NVM used.
- *
- * Arguments:
- * Adapter - ptr to Adapter object instance
- * pNVMType - ptr to NVM type.
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- *
- */
-INT vendorextnGetSectionInfo(PVOID pContext,
- struct bcm_flash2x_vendor_info *pVendorInfo)
-{
- return STATUS_FAILURE;
-}
-
-/*
- * Procedure: vendorextnInit
- *
- * Description: Initializing the vendor extension NVM interface
- *
- * Arguments:
- * Adapter - Pointer to MINI Adapter Structure
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- *
- *
- */
-INT vendorextnInit(struct bcm_mini_adapter *Adapter)
-{
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: vendorextnExit
- *
- * Description: Free the resource associated with vendor extension NVM interface
- *
- * Arguments:
- *
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- *
- *
- */
-INT vendorextnExit(struct bcm_mini_adapter *Adapter)
-{
- return STATUS_SUCCESS;
-}
-
-/*
- * Procedure: vendorextnIoctl
- *
- * Description: execute the vendor extension specific ioctl
- *
- * Arguments:
- * Adapter -Beceem private Adapter Structure
- * cmd -vendor extension specific Ioctl commad
- * arg -input parameter sent by vendor
- *
- * Returns:
- * CONTINUE_COMMON_PATH in case it is not meant to be processed
- * by vendor ioctls
- * STATUS_SUCCESS/STATUS_FAILURE as per the IOCTL return value
- */
-
-INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg)
-{
- return CONTINUE_COMMON_PATH;
-}
-
-
-
-/*
- * Procedure: vendorextnReadSection
- *
- * Description: Reads from a section of NVM
- *
- * Arguments:
- * pContext - ptr to Adapter object instance
- * pBuffer - Read the data from Vendor Area to this buffer
- * SectionVal - Value of type of Section
- * Offset - Read from the Offset of the Vendor Section.
- * numOfBytes - Read numOfBytes from the Vendor section to Buffer
- *
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- */
-
-INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer,
- enum bcm_flash2x_section_val SectionVal, UINT offset, UINT numOfBytes)
-{
- return STATUS_FAILURE;
-}
-
-
-
-/*
- * Procedure: vendorextnWriteSection
- *
- * Description: Write to a Section of NVM
- *
- * Arguments:
- * pContext - ptr to Adapter object instance
- * pBuffer - Write the data provided in the buffer
- * SectionVal - Value of type of Section
- * Offset - Writes to the Offset of the Vendor Section.
- * numOfBytes - Write num Bytes after reading from pBuffer.
- * bVerify - the Buffer Written should be verified.
- *
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- */
-INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer,
- enum bcm_flash2x_section_val SectionVal, UINT offset,
- UINT numOfBytes, bool bVerify)
-{
- return STATUS_FAILURE;
-}
-
-
-
-/*
- * Procedure: vendorextnWriteSectionWithoutErase
- *
- * Description: Write to a Section of NVM without erasing the sector
- *
- * Arguments:
- * pContext - ptr to Adapter object instance
- * pBuffer - Write the data provided in the buffer
- * SectionVal - Value of type of Section
- * Offset - Writes to the Offset of the Vendor Section.
- * numOfBytes - Write num Bytes after reading from pBuffer.
- *
- * Returns:
- * STATUS_SUCCESS/STATUS_FAILURE
- */
-INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer,
- enum bcm_flash2x_section_val SectionVal, UINT offset, UINT numOfBytes)
-{
- return STATUS_FAILURE;
-}
diff --git a/drivers/staging/bcm/vendorspecificextn.h b/drivers/staging/bcm/vendorspecificextn.h
deleted file mode 100644
index ff57f0570451..000000000000
--- a/drivers/staging/bcm/vendorspecificextn.h
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#ifndef __VENDOR_EXTN_NVM_H__
-#define __VENDOR_EXTN_NVM_H__
-
-#define CONTINUE_COMMON_PATH 0xFFFF
-
-INT vendorextnGetSectionInfo(PVOID pContext, struct bcm_flash2x_vendor_info *pVendorInfo);
-INT vendorextnExit(struct bcm_mini_adapter *Adapter);
-INT vendorextnInit(struct bcm_mini_adapter *Adapter);
-INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg);
-INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
- UINT offset, UINT numOfBytes);
-INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
- UINT offset, UINT numOfBytes, bool bVerify);
-INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
- UINT offset, UINT numOfBytes);
-
-#endif /* */
diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
new file mode 100644
index 000000000000..357af02c562c
--- /dev/null
+++ b/drivers/staging/clocking-wizard/Kconfig
@@ -0,0 +1,9 @@
+#
+# Xilinx Clocking Wizard Driver
+#
+
+config COMMON_CLK_XLNX_CLKWZRD
+ tristate "Xilinx Clocking Wizard"
+ depends on COMMON_CLK && OF
+ ---help---
+ Support for the Xilinx Clocking Wizard IP core clock generator.
diff --git a/drivers/staging/clocking-wizard/Makefile b/drivers/staging/clocking-wizard/Makefile
new file mode 100644
index 000000000000..5ad352f521fe
--- /dev/null
+++ b/drivers/staging/clocking-wizard/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/clocking-wizard/TODO b/drivers/staging/clocking-wizard/TODO
new file mode 100644
index 000000000000..ebe99db7d153
--- /dev/null
+++ b/drivers/staging/clocking-wizard/TODO
@@ -0,0 +1,12 @@
+TODO:
+ - support for fractional multiplier
+ - support for fractional divider (output 0 only)
+ - support for set_rate() operations (may benefit from Stephen Boyd's
+ refactoring of the clk primitives: https://lkml.org/lkml/2014/9/5/766)
+ - review arithmetic
+ - overflow after multiplication?
+ - maximize accuracy before divisions
+
+Patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ Sören Brinkmann <soren.brinkmann@xilinx.com>
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
new file mode 100644
index 000000000000..471d0877f382
--- /dev/null
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -0,0 +1,341 @@
+/*
+ * Xilinx 'Clocking Wizard' driver
+ *
+ * Copyright (C) 2013 - 2014 Xilinx
+ *
+ * Sören Brinkmann <soren.brinkmann@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+#define WZRD_NUM_OUTPUTS 7
+#define WZRD_ACLK_MAX_FREQ 250000000UL
+
+#define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
+
+#define WZRD_CLkOUT0_FRAC_EN BIT(18)
+#define WZRD_CLkFBOUT_FRAC_EN BIT(26)
+
+#define WZRD_CLKFBOUT_MULT_SHIFT 8
+#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_DIVCLK_DIVIDE_SHIFT 0
+#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_DIVIDE_SHIFT 0
+#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+
+enum clk_wzrd_int_clks {
+ wzrd_clk_mul,
+ wzrd_clk_mul_div,
+ wzrd_clk_int_max
+};
+
+/**
+ * struct clk_wzrd:
+ * @clk_data: Clock data
+ * @nb: Notifier block
+ * @base: Memory base
+ * @clk_in1: Handle to input clock 'clk_in1'
+ * @axi_clk: Handle to input clock 's_axi_aclk'
+ * @clks_internal: Internal clocks
+ * @clkout: Output clocks
+ * @speed_grade: Speed grade of the device
+ * @suspended: Flag indicating power state of the device
+ */
+struct clk_wzrd {
+ struct clk_onecell_data clk_data;
+ struct notifier_block nb;
+ void __iomem *base;
+ struct clk *clk_in1;
+ struct clk *axi_clk;
+ struct clk *clks_internal[wzrd_clk_int_max];
+ struct clk *clkout[WZRD_NUM_OUTPUTS];
+ int speed_grade;
+ bool suspended;
+};
+#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
+
+/* maximum frequencies for input/output clocks per speed grade */
+static const unsigned long clk_wzrd_max_freq[] = {
+ 800000000UL,
+ 933000000UL,
+ 1066000000UL
+};
+
+static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ unsigned long max;
+ struct clk_notifier_data *ndata = data;
+ struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
+
+ if (clk_wzrd->suspended)
+ return NOTIFY_OK;
+
+ if (ndata->clk == clk_wzrd->clk_in1)
+ max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
+ if (ndata->clk == clk_wzrd->axi_clk)
+ max = WZRD_ACLK_MAX_FREQ;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ if (ndata->new_rate > max)
+ return NOTIFY_BAD;
+ return NOTIFY_OK;
+ case POST_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static int __maybe_unused clk_wzrd_suspend(struct device *dev)
+{
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+ clk_wzrd->suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused clk_wzrd_resume(struct device *dev)
+{
+ int ret;
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(clk_wzrd->axi_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable s_axi_aclk\n");
+ return ret;
+ }
+
+ clk_wzrd->suspended = false;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
+ clk_wzrd_resume);
+
+static int clk_wzrd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ u32 reg;
+ unsigned long rate;
+ const char *clk_name;
+ struct clk_wzrd *clk_wzrd;
+ struct resource *mem;
+ struct device_node *np = pdev->dev.of_node;
+
+ clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
+ if (!clk_wzrd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_wzrd);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(clk_wzrd->base))
+ return PTR_ERR(clk_wzrd->base);
+
+ ret = of_property_read_u32(np, "speed-grade", &clk_wzrd->speed_grade);
+ if (!ret) {
+ if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
+ dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
+ clk_wzrd->speed_grade);
+ clk_wzrd->speed_grade = 0;
+ }
+ }
+
+ clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
+ if (IS_ERR(clk_wzrd->clk_in1)) {
+ if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "clk_in1 not found\n");
+ return PTR_ERR(clk_wzrd->clk_in1);
+ }
+
+ clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(clk_wzrd->axi_clk)) {
+ if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "s_axi_aclk not found\n");
+ return PTR_ERR(clk_wzrd->axi_clk);
+ }
+ ret = clk_prepare_enable(clk_wzrd->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
+ return ret;
+ }
+ rate = clk_get_rate(clk_wzrd->axi_clk);
+ if (rate > WZRD_ACLK_MAX_FREQ) {
+ dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
+ rate);
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
+ /* we don't support fractional div/mul yet */
+ reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLkFBOUT_FRAC_EN;
+ reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
+ WZRD_CLkOUT0_FRAC_EN;
+ if (reg)
+ dev_warn(&pdev->dev, "fractional div/mul not supported\n");
+
+ /* register multiplier */
+ reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
+ clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
+ if (!clk_name) {
+ ret = -ENOMEM;
+ goto err_disable_clk;
+ }
+ clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor(
+ &pdev->dev, clk_name,
+ __clk_get_name(clk_wzrd->clk_in1),
+ 0, reg, 1);
+ kfree(clk_name);
+ if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
+ dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
+ ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
+ goto err_disable_clk;
+ }
+
+ /* register div */
+ reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_DIVCLK_DIVIDE_MASK) >> WZRD_DIVCLK_DIVIDE_SHIFT;
+ clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
+ if (!clk_name) {
+ ret = -ENOMEM;
+ goto err_rm_int_clk;
+ }
+
+ clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_fixed_factor(
+ &pdev->dev, clk_name,
+ __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
+ 0, 1, reg);
+ if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
+ dev_err(&pdev->dev, "unable to register divider clock\n");
+ ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
+ goto err_rm_int_clk;
+ }
+
+ /* register div per output */
+ for (i = WZRD_NUM_OUTPUTS - 1; i >= 0 ; i--) {
+ const char *clkout_name;
+ if (of_property_read_string_index(np, "clock-output-names", i,
+ &clkout_name)) {
+ dev_err(&pdev->dev,
+ "clock output name not specified\n");
+ ret = -EINVAL;
+ goto err_rm_int_clks;
+ }
+ reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12);
+ reg &= WZRD_CLKOUT_DIVIDE_MASK;
+ reg >>= WZRD_CLKOUT_DIVIDE_SHIFT;
+ clk_wzrd->clkout[i] = clk_register_fixed_factor(&pdev->dev,
+ clkout_name, clk_name, 0, 1, reg);
+ if (IS_ERR(clk_wzrd->clkout[i])) {
+ int j;
+
+ for (j = i + 1; j < WZRD_NUM_OUTPUTS; j++)
+ clk_unregister(clk_wzrd->clkout[j]);
+ dev_err(&pdev->dev,
+ "unable to register divider clock\n");
+ ret = PTR_ERR(clk_wzrd->clkout[i]);
+ goto err_rm_int_clks;
+ }
+ }
+
+ kfree(clk_name);
+
+ clk_wzrd->clk_data.clks = clk_wzrd->clkout;
+ clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
+
+ if (clk_wzrd->speed_grade) {
+ clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
+
+ ret = clk_notifier_register(clk_wzrd->clk_in1,
+ &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
+
+ ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
+ }
+
+ return 0;
+
+err_rm_int_clks:
+ clk_unregister(clk_wzrd->clks_internal[1]);
+err_rm_int_clk:
+ kfree(clk_name);
+ clk_unregister(clk_wzrd->clks_internal[0]);
+err_disable_clk:
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+
+ return ret;
+}
+
+static int clk_wzrd_remove(struct platform_device *pdev)
+{
+ int i;
+ struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
+ clk_unregister(clk_wzrd->clkout[i]);
+ for (i = 0; i < wzrd_clk_int_max; i++)
+ clk_unregister(clk_wzrd->clks_internal[i]);
+
+ if (clk_wzrd->speed_grade) {
+ clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
+ clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
+ }
+
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id clk_wzrd_ids[] = {
+ { .compatible = "xlnx,clocking-wizard" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
+
+static struct platform_driver clk_wzrd_driver = {
+ .driver = {
+ .name = "clk-wizard",
+ .of_match_table = clk_wzrd_ids,
+ .pm = &clk_wzrd_dev_pm_ops,
+ },
+ .probe = clk_wzrd_probe,
+ .remove = clk_wzrd_remove,
+};
+module_platform_driver(clk_wzrd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
+MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/drivers/staging/clocking-wizard/dt-binding.txt
new file mode 100644
index 000000000000..723271e93316
--- /dev/null
+++ b/drivers/staging/clocking-wizard/dt-binding.txt
@@ -0,0 +1,30 @@
+Binding for Xilinx Clocking Wizard IP Core
+
+This binding uses the common clock binding[1]. Details about the devices can be
+found in the product guide[2].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Clocking Wizard Product Guide
+http://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-clk-wiz.pdf
+
+Required properties:
+ - compatible: Must be 'xlnx,clocking-wizard'
+ - reg: Base and size of the cores register space
+ - clocks: Handle to input clock
+ - clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
+ - clock-output-names: Names for the output clocks
+
+Optional properties:
+ - speed-grade: Speed grade of the device (valid values are 1..3)
+
+Example:
+ clock-generator@40040000 {
+ reg = <0x40040000 0x1000>;
+ compatible = "xlnx,clocking-wizard";
+ speed-grade = <1>;
+ clock-names = "clk_in1", "s_axi_aclk";
+ clocks = <&clkc 15>, <&clkc 15>;
+ clock-output-names = "clk_out0", "clk_out1", "clk_out2",
+ "clk_out3", "clk_out4", "clk_out5",
+ "clk_out6", "clk_out7";
+ };
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 152f4c12ea43..a8201fe87512 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -384,6 +384,7 @@ config COMEDI_DT282X
config COMEDI_DMM32AT
tristate "Diamond Systems MM-32-AT PC/104 board support"
+ select COMEDI_8255
---help---
Enable support for Diamond Systems MM-32-AT PC/104 boards
@@ -564,11 +565,14 @@ config COMEDI_S526
endif # COMEDI_ISA_DRIVERS
menuconfig COMEDI_PCI_DRIVERS
- bool "Comedi PCI drivers"
+ tristate "Comedi PCI drivers"
depends on PCI
---help---
Enable support for comedi PCI drivers.
+ To compile this support as a module, choose M here: the module will
+ be called comedi_pci.
+
if COMEDI_PCI_DRIVERS
config COMEDI_8255_PCI
@@ -595,14 +599,6 @@ config COMEDI_ADDI_WATCHDOG
boards. This module will be automatically selected when needed. The
module will be called addi_watchdog.
-config COMEDI_ADDI_APCI_035
- tristate "ADDI-DATA APCI_035 support"
- ---help---
- Enable support for ADDI-DATA APCI_035 cards
-
- To compile this driver as a module, choose M here: the module will be
- called addi_apci_035.
-
config COMEDI_ADDI_APCI_1032
tristate "ADDI-DATA APCI_1032 support"
---help---
@@ -939,11 +935,11 @@ config COMEDI_CB_PCIDDA
called cb_pcidda.
config COMEDI_CB_PCIMDAS
- tristate "MeasurementComputing PCIM-DAS1602/16 support"
+ tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
select COMEDI_8255
---help---
Enable support for ComputerBoards/MeasurementComputing PCI Migration
- series PCIM-DAS1602/16
+ series PCIM-DAS1602/16 and PCIe-DAS1602/16.
To compile this driver as a module, choose M here: the module will be
called cb_pcimdas.
@@ -1084,11 +1080,14 @@ config COMEDI_NI_TIOCMD
endif # COMEDI_PCI_DRIVERS
menuconfig COMEDI_PCMCIA_DRIVERS
- bool "Comedi PCMCIA drivers"
+ tristate "Comedi PCMCIA drivers"
depends on PCMCIA
---help---
Enable support for comedi PCMCIA drivers.
+ To compile this support as a module, choose M here: the module will
+ be called comedi_pcmcia.
+
if COMEDI_PCMCIA_DRIVERS
config COMEDI_CB_DAS16_CS
@@ -1160,11 +1159,14 @@ config COMEDI_QUATECH_DAQP_CS
endif # COMEDI_PCMCIA_DRIVERS
menuconfig COMEDI_USB_DRIVERS
- bool "Comedi USB drivers"
+ tristate "Comedi USB drivers"
depends on USB
---help---
Enable support for comedi USB drivers.
+ To compile this support as a module, choose M here: the module will
+ be called comedi_usb.
+
if COMEDI_USB_DRIVERS
config COMEDI_DT9812
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
index fae2d9090006..7f9dfb3923ab 100644
--- a/drivers/staging/comedi/Makefile
+++ b/drivers/staging/comedi/Makefile
@@ -2,12 +2,13 @@ ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
comedi-y := comedi_fops.o range.o drivers.o \
comedi_buf.o
-comedi-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
-comedi-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += comedi_pcmcia.o
-comedi-$(CONFIG_COMEDI_USB_DRIVERS) += comedi_usb.o
comedi-$(CONFIG_PROC_FS) += proc.o
comedi-$(CONFIG_COMPAT) += comedi_compat32.o
+obj-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_pci.o
+obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += comedi_pcmcia.o
+obj-$(CONFIG_COMEDI_USB_DRIVERS) += comedi_usb.o
+
obj-$(CONFIG_COMEDI) += comedi.o
obj-$(CONFIG_COMEDI) += kcomedilib/
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index c8c99e65423b..745574077352 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -367,6 +367,8 @@ enum comedi_support_level {
#define COMEDI_BUFCONFIG _IOR(CIO, 13, struct comedi_bufconfig)
#define COMEDI_BUFINFO _IOWR(CIO, 14, struct comedi_bufinfo)
#define COMEDI_POLL _IO(CIO, 15)
+#define COMEDI_SETRSUBD _IO(CIO, 16)
+#define COMEDI_SETWSUBD _IO(CIO, 17)
/* structures */
@@ -514,17 +516,6 @@ struct comedi_bufinfo {
#define COMEDI_MIN_SPEED ((unsigned int)0xffffffff)
-/* callback stuff */
-/* only relevant to kernel modules. */
-
-#define COMEDI_CB_EOS 1 /* end of scan */
-#define COMEDI_CB_EOA 2 /* end of acquisition/output */
-#define COMEDI_CB_BLOCK 4 /* data has arrived:
- * wakes up read() / write() */
-#define COMEDI_CB_EOBUF 8 /* DEPRECATED: end of buffer */
-#define COMEDI_CB_ERROR 16 /* card error during acquisition */
-#define COMEDI_CB_OVERFLOW 32 /* buffer overflow/underflow */
-
/**********************************************************/
/* everything after this line is ALPHA */
/**********************************************************/
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index c60a45ad12b9..19e7b229d15e 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -236,6 +236,7 @@ void comedi_buf_reset(struct comedi_subdevice *s)
async->buf_read_ptr = 0;
async->cur_chan = 0;
+ async->scans_done = 0;
async->scan_progress = 0;
async->munge_chan = 0;
async->munge_count = 0;
@@ -252,15 +253,15 @@ static unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
return free_end - async->buf_write_alloc_count;
}
-static unsigned int __comedi_buf_write_alloc(struct comedi_subdevice *s,
- unsigned int nbytes,
- int strict)
+/* allocates chunk for the writer from free buffer space */
+unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int available = comedi_buf_write_n_available(s);
if (nbytes > available)
- nbytes = strict ? 0 : available;
+ nbytes = available;
async->buf_write_alloc_count += nbytes;
@@ -272,13 +273,6 @@ static unsigned int __comedi_buf_write_alloc(struct comedi_subdevice *s,
return nbytes;
}
-
-/* allocates chunk for the writer from free buffer space */
-unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
- unsigned int nbytes)
-{
- return __comedi_buf_write_alloc(s, nbytes, 0);
-}
EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
/*
@@ -290,7 +284,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
{
struct comedi_async *async = s->async;
unsigned int count = 0;
- const unsigned num_sample_bytes = bytes_per_sample(s);
+ const unsigned num_sample_bytes = comedi_bytes_per_sample(s);
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
@@ -427,43 +421,11 @@ unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
}
EXPORT_SYMBOL_GPL(comedi_buf_read_free);
-int comedi_buf_put(struct comedi_subdevice *s, unsigned short x)
-{
- struct comedi_async *async = s->async;
- unsigned int n = __comedi_buf_write_alloc(s, sizeof(short), 1);
-
- if (n < sizeof(short)) {
- async->events |= COMEDI_CB_ERROR;
- return 0;
- }
- *(unsigned short *)(async->prealloc_buf + async->buf_write_ptr) = x;
- comedi_buf_write_free(s, sizeof(short));
- return 1;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_put);
-
-int comedi_buf_get(struct comedi_subdevice *s, unsigned short *x)
+static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
+ const void *data, unsigned int num_bytes)
{
struct comedi_async *async = s->async;
- unsigned int n = comedi_buf_read_n_available(s);
-
- if (n < sizeof(short))
- return 0;
- comedi_buf_read_alloc(s, sizeof(short));
- *x = *(unsigned short *)(async->prealloc_buf + async->buf_read_ptr);
- comedi_buf_read_free(s, sizeof(short));
- return 1;
-}
-EXPORT_SYMBOL_GPL(comedi_buf_get);
-
-void comedi_buf_memcpy_to(struct comedi_subdevice *s, unsigned int offset,
- const void *data, unsigned int num_bytes)
-{
- struct comedi_async *async = s->async;
- unsigned int write_ptr = async->buf_write_ptr + offset;
-
- if (write_ptr >= async->prealloc_bufsz)
- write_ptr %= async->prealloc_bufsz;
+ unsigned int write_ptr = async->buf_write_ptr;
while (num_bytes) {
unsigned int block_size;
@@ -481,17 +443,13 @@ void comedi_buf_memcpy_to(struct comedi_subdevice *s, unsigned int offset,
write_ptr = 0;
}
}
-EXPORT_SYMBOL_GPL(comedi_buf_memcpy_to);
-void comedi_buf_memcpy_from(struct comedi_subdevice *s, unsigned int offset,
- void *dest, unsigned int nbytes)
+static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
+ void *dest, unsigned int nbytes)
{
void *src;
struct comedi_async *async = s->async;
- unsigned int read_ptr = async->buf_read_ptr + offset;
-
- if (read_ptr >= async->prealloc_bufsz)
- read_ptr %= async->prealloc_bufsz;
+ unsigned int read_ptr = async->buf_read_ptr;
while (nbytes) {
unsigned int block_size;
@@ -509,69 +467,84 @@ void comedi_buf_memcpy_from(struct comedi_subdevice *s, unsigned int offset,
read_ptr = 0;
}
}
-EXPORT_SYMBOL_GPL(comedi_buf_memcpy_from);
/**
- * comedi_write_array_to_buffer - write data to comedi buffer
+ * comedi_buf_write_samples - write sample data to comedi buffer
* @s: comedi_subdevice struct
- * @data: destination
- * @num_bytes: number of bytes to write
+ * @data: samples
+ * @nsamples: number of samples
*
- * Writes up to num_bytes bytes of data to the comedi buffer associated with
- * the subdevice, marks it as written and updates the acquisition scan
- * progress.
+ * Writes nsamples to the comedi buffer associated with the subdevice, marks
+ * it as written and updates the acquisition scan progress.
*
* Returns the amount of data written in bytes.
*/
-unsigned int comedi_write_array_to_buffer(struct comedi_subdevice *s,
- const void *data,
- unsigned int num_bytes)
+unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data, unsigned int nsamples)
{
- struct comedi_async *async = s->async;
- unsigned int retval;
-
- if (num_bytes == 0)
- return 0;
+ unsigned int max_samples;
+ unsigned int nbytes;
- retval = comedi_buf_write_alloc(s, num_bytes);
- if (retval != num_bytes) {
+ /*
+ * Make sure there is enough room in the buffer for all the samples.
+ * If not, clamp the nsamples to the number that will fit, flag the
+ * buffer overrun and add the samples that fit.
+ */
+ max_samples = comedi_bytes_to_samples(s,
+ comedi_buf_write_n_available(s));
+ if (nsamples > max_samples) {
dev_warn(s->device->class_dev, "buffer overrun\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return 0;
+ s->async->events |= COMEDI_CB_OVERFLOW;
+ nsamples = max_samples;
}
- comedi_buf_memcpy_to(s, 0, data, num_bytes);
- comedi_buf_write_free(s, num_bytes);
- comedi_inc_scan_progress(s, num_bytes);
- async->events |= COMEDI_CB_BLOCK;
+ if (nsamples == 0)
+ return 0;
- return num_bytes;
+ nbytes = comedi_buf_write_alloc(s,
+ comedi_samples_to_bytes(s, nsamples));
+ comedi_buf_memcpy_to(s, data, nbytes);
+ comedi_buf_write_free(s, nbytes);
+ comedi_inc_scan_progress(s, nbytes);
+ s->async->events |= COMEDI_CB_BLOCK;
+
+ return nbytes;
}
-EXPORT_SYMBOL_GPL(comedi_write_array_to_buffer);
+EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
/**
- * comedi_read_array_from_buffer - read data from comedi buffer
+ * comedi_buf_read_samples - read sample data from comedi buffer
* @s: comedi_subdevice struct
* @data: destination
- * @num_bytes: number of bytes to read
+ * @nsamples: maximum number of samples to read
*
- * Reads up to num_bytes bytes of data from the comedi buffer associated with
- * the subdevice, marks it as read and updates the acquisition scan progress.
+ * Reads up to nsamples from the comedi buffer associated with the subdevice,
+ * marks it as read and updates the acquisition scan progress.
*
* Returns the amount of data read in bytes.
*/
-unsigned int comedi_read_array_from_buffer(struct comedi_subdevice *s,
- void *data, unsigned int num_bytes)
+unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples)
{
- if (num_bytes == 0)
+ unsigned int max_samples;
+ unsigned int nbytes;
+
+ /* clamp nsamples to the number of full samples available */
+ max_samples = comedi_bytes_to_samples(s,
+ comedi_buf_read_n_available(s));
+ if (nsamples > max_samples)
+ nsamples = max_samples;
+
+ if (nsamples == 0)
return 0;
- num_bytes = comedi_buf_read_alloc(s, num_bytes);
- comedi_buf_memcpy_from(s, 0, data, num_bytes);
- comedi_buf_read_free(s, num_bytes);
- comedi_inc_scan_progress(s, num_bytes);
+ nbytes = comedi_buf_read_alloc(s,
+ comedi_samples_to_bytes(s, nsamples));
+ comedi_buf_memcpy_from(s, data, nbytes);
+ comedi_buf_read_free(s, nbytes);
+ comedi_inc_scan_progress(s, nbytes);
s->async->events |= COMEDI_CB_BLOCK;
- return num_bytes;
+ return nbytes;
}
-EXPORT_SYMBOL_GPL(comedi_read_array_from_buffer);
+EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 9b6f96f1591c..5a4c74f703b3 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -416,6 +416,8 @@ static inline int raw_ioctl(struct file *file, unsigned int cmd,
case COMEDI_UNLOCK:
case COMEDI_CANCEL:
case COMEDI_POLL:
+ case COMEDI_SETRSUBD:
+ case COMEDI_SETWSUBD:
/* No translation needed. */
rc = translated_ioctl(file, cmd, arg);
break;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 9c32f0276009..f143cb64d69e 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -43,6 +43,22 @@
#include "comedi_internal.h"
+/**
+ * struct comedi_file - per-file private data for comedi device
+ * @dev: comedi_device struct
+ * @read_subdev: current "read" subdevice
+ * @write_subdev: current "write" subdevice
+ * @last_detach_count: last known detach count
+ * @last_attached: last known attached/detached state
+ */
+struct comedi_file {
+ struct comedi_device *dev;
+ struct comedi_subdevice *read_subdev;
+ struct comedi_subdevice *write_subdev;
+ unsigned int last_detach_count;
+ bool last_attached:1;
+};
+
#define COMEDI_NUM_MINORS 0x100
#define COMEDI_NUM_SUBDEVICE_MINORS \
(COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS)
@@ -239,6 +255,54 @@ comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor)
return dev->write_subdev;
}
+static void comedi_file_reset(struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+ struct comedi_subdevice *s, *read_s, *write_s;
+ unsigned int minor = iminor(file_inode(file));
+
+ read_s = dev->read_subdev;
+ write_s = dev->write_subdev;
+ if (minor >= COMEDI_NUM_BOARD_MINORS) {
+ s = comedi_subdevice_from_minor(dev, minor);
+ if (s == NULL || s->subdev_flags & SDF_CMD_READ)
+ read_s = s;
+ if (s == NULL || s->subdev_flags & SDF_CMD_WRITE)
+ write_s = s;
+ }
+ cfp->last_attached = dev->attached;
+ cfp->last_detach_count = dev->detach_count;
+ ACCESS_ONCE(cfp->read_subdev) = read_s;
+ ACCESS_ONCE(cfp->write_subdev) = write_s;
+}
+
+static void comedi_file_check(struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
+
+ if (cfp->last_attached != dev->attached ||
+ cfp->last_detach_count != dev->detach_count)
+ comedi_file_reset(file);
+}
+
+static struct comedi_subdevice *comedi_file_read_subdevice(struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+
+ comedi_file_check(file);
+ return ACCESS_ONCE(cfp->read_subdev);
+}
+
+static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+
+ comedi_file_check(file);
+ return ACCESS_ONCE(cfp->write_subdev);
+}
+
static int resize_async_buffer(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned new_size)
{
@@ -776,7 +840,6 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
struct comedi_devinfo __user *arg,
struct file *file)
{
- const unsigned minor = iminor(file_inode(file));
struct comedi_subdevice *s;
struct comedi_devinfo devinfo;
@@ -788,13 +851,13 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
strlcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
strlcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
- s = comedi_read_subdevice(dev, minor);
+ s = comedi_file_read_subdevice(file);
if (s)
devinfo.read_subdevice = s->index;
else
devinfo.read_subdevice = -1;
- s = comedi_write_subdevice(dev, minor);
+ s = comedi_file_write_subdevice(file);
if (s)
devinfo.write_subdevice = s->index;
else
@@ -991,7 +1054,7 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
if (s->busy != file)
return -EACCES;
- if (bi.bytes_read && (s->subdev_flags & SDF_CMD_READ)) {
+ if (bi.bytes_read && !(async->cmd.flags & CMDF_WRITE)) {
bi.bytes_read = comedi_buf_read_alloc(s, bi.bytes_read);
comedi_buf_read_free(s, bi.bytes_read);
@@ -1001,7 +1064,7 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
}
}
- if (bi.bytes_written && (s->subdev_flags & SDF_CMD_WRITE)) {
+ if (bi.bytes_written && (async->cmd.flags & CMDF_WRITE)) {
bi.bytes_written =
comedi_buf_write_alloc(s, bi.bytes_written);
comedi_buf_write_free(s, bi.bytes_written);
@@ -1451,6 +1514,21 @@ static int __comedi_get_user_cmd(struct comedi_device *dev,
return -EINVAL;
}
+ /*
+ * Set the CMDF_WRITE flag to the correct state if the subdevice
+ * supports only "read" commands or only "write" commands.
+ */
+ switch (s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) {
+ case SDF_CMD_READ:
+ cmd->flags &= ~CMDF_WRITE;
+ break;
+ case SDF_CMD_WRITE:
+ cmd->flags |= CMDF_WRITE;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -1552,9 +1630,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
comedi_buf_reset(s);
- async->cb_mask =
- COMEDI_CB_EOA | COMEDI_CB_BLOCK | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW;
+ async->cb_mask = COMEDI_CB_BLOCK | COMEDI_CB_CANCEL_MASK;
if (async->cmd.flags & CMDF_WAKE_EOS)
async->cb_mask |= COMEDI_CB_EOS;
@@ -1720,7 +1796,6 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
void *file)
{
struct comedi_subdevice *s;
- int ret;
if (arg >= dev->n_subdevices)
return -EINVAL;
@@ -1734,9 +1809,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
if (s->busy != file)
return -EBUSY;
- ret = do_cancel(dev, s);
-
- return ret;
+ return do_cancel(dev, s);
}
/*
@@ -1774,11 +1847,96 @@ static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg,
return -EINVAL;
}
+/*
+ * COMEDI_SETRSUBD ioctl
+ * sets the current "read" subdevice on a per-file basis
+ *
+ * arg:
+ * subdevice number
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * nothing
+ */
+static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
+ struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_subdevice *s_old, *s_new;
+
+ if (arg >= dev->n_subdevices)
+ return -EINVAL;
+
+ s_new = &dev->subdevices[arg];
+ s_old = comedi_file_read_subdevice(file);
+ if (s_old == s_new)
+ return 0; /* no change */
+
+ if (!(s_new->subdev_flags & SDF_CMD_READ))
+ return -EINVAL;
+
+ /*
+ * Check the file isn't still busy handling a "read" command on the
+ * old subdevice (if any).
+ */
+ if (s_old && s_old->busy == file && s_old->async &&
+ !(s_old->async->cmd.flags & CMDF_WRITE))
+ return -EBUSY;
+
+ ACCESS_ONCE(cfp->read_subdev) = s_new;
+ return 0;
+}
+
+/*
+ * COMEDI_SETWSUBD ioctl
+ * sets the current "write" subdevice on a per-file basis
+ *
+ * arg:
+ * subdevice number
+ *
+ * reads:
+ * nothing
+ *
+ * writes:
+ * nothing
+ */
+static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
+ struct file *file)
+{
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_subdevice *s_old, *s_new;
+
+ if (arg >= dev->n_subdevices)
+ return -EINVAL;
+
+ s_new = &dev->subdevices[arg];
+ s_old = comedi_file_write_subdevice(file);
+ if (s_old == s_new)
+ return 0; /* no change */
+
+ if (!(s_new->subdev_flags & SDF_CMD_WRITE))
+ return -EINVAL;
+
+ /*
+ * Check the file isn't still busy handling a "write" command on the
+ * old subdevice (if any).
+ */
+ if (s_old && s_old->busy == file && s_old->async &&
+ (s_old->async->cmd.flags & CMDF_WRITE))
+ return -EBUSY;
+
+ ACCESS_ONCE(cfp->write_subdev) = s_new;
+ return 0;
+}
+
static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ unsigned minor = iminor(file_inode(file));
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
int rc;
mutex_lock(&dev->mutex);
@@ -1867,6 +2025,12 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
case COMEDI_POLL:
rc = do_poll_ioctl(dev, arg, file);
break;
+ case COMEDI_SETRSUBD:
+ rc = do_setrsubd_ioctl(dev, arg, file);
+ break;
+ case COMEDI_SETWSUBD:
+ rc = do_setwsubd_ioctl(dev, arg, file);
+ break;
default:
rc = -ENOTTY;
break;
@@ -1900,8 +2064,8 @@ static struct vm_operations_struct comedi_vm_ops = {
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s;
struct comedi_async *async;
struct comedi_buf_map *bm = NULL;
@@ -1927,9 +2091,9 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
if (vma->vm_flags & VM_WRITE)
- s = comedi_write_subdevice(dev, minor);
+ s = comedi_file_write_subdevice(file);
else
- s = comedi_read_subdevice(dev, minor);
+ s = comedi_file_read_subdevice(file);
if (!s) {
retval = -EINVAL;
goto done;
@@ -1992,8 +2156,8 @@ done:
static unsigned int comedi_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s;
mutex_lock(&dev->mutex);
@@ -2003,21 +2167,23 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
goto done;
}
- s = comedi_read_subdevice(dev, minor);
+ s = comedi_file_read_subdevice(file);
if (s && s->async) {
poll_wait(file, &s->async->wait_head, wait);
if (!s->busy || !comedi_is_subdevice_running(s) ||
+ (s->async->cmd.flags & CMDF_WRITE) ||
comedi_buf_read_n_available(s) > 0)
mask |= POLLIN | POLLRDNORM;
}
- s = comedi_write_subdevice(dev, minor);
+ s = comedi_file_write_subdevice(file);
if (s && s->async) {
- unsigned int bps = bytes_per_sample(s);
+ unsigned int bps = comedi_bytes_per_sample(s);
poll_wait(file, &s->async->wait_head, wait);
comedi_buf_write_alloc(s, s->async->prealloc_bufsz);
if (!s->busy || !comedi_is_subdevice_running(s) ||
+ !(s->async->cmd.flags & CMDF_WRITE) ||
comedi_buf_write_n_allocated(s) >= bps)
mask |= POLLOUT | POLLWRNORM;
}
@@ -2034,8 +2200,8 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
struct comedi_async *async;
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
bool on_wait_queue = false;
bool attach_locked;
unsigned int old_detach_count;
@@ -2051,7 +2217,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
goto out;
}
- s = comedi_write_subdevice(dev, minor);
+ s = comedi_file_write_subdevice(file);
if (!s || !s->async) {
retval = -EIO;
goto out;
@@ -2065,6 +2231,10 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EACCES;
goto out;
}
+ if (!(async->cmd.flags & CMDF_WRITE)) {
+ retval = -EINVAL;
+ goto out;
+ }
add_wait_queue(&async->wait_head, &wait);
on_wait_queue = true;
@@ -2099,7 +2269,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
* meantime!), but check the subdevice pointer
* as well just in case.
*/
- new_s = comedi_write_subdevice(dev, minor);
+ new_s = comedi_file_write_subdevice(file);
if (dev->attached &&
old_detach_count == dev->detach_count &&
s == new_s && new_s->async == async)
@@ -2136,6 +2306,10 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EACCES;
break;
}
+ if (!(async->cmd.flags & CMDF_WRITE)) {
+ retval = -EINVAL;
+ break;
+ }
continue;
}
@@ -2170,8 +2344,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
struct comedi_async *async;
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
- const unsigned minor = iminor(file_inode(file));
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
unsigned int old_detach_count;
bool become_nonbusy = false;
bool attach_locked;
@@ -2187,7 +2361,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
goto out;
}
- s = comedi_read_subdevice(dev, minor);
+ s = comedi_file_read_subdevice(file);
if (!s || !s->async) {
retval = -EIO;
goto out;
@@ -2200,6 +2374,10 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EACCES;
goto out;
}
+ if (async->cmd.flags & CMDF_WRITE) {
+ retval = -EINVAL;
+ goto out;
+ }
add_wait_queue(&async->wait_head, &wait);
while (nbytes > 0 && !retval) {
@@ -2239,6 +2417,10 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EACCES;
break;
}
+ if (async->cmd.flags & CMDF_WRITE) {
+ retval = -EINVAL;
+ break;
+ }
continue;
}
m = copy_to_user(buf, async->prealloc_buf +
@@ -2276,7 +2458,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
* meantime!), but check the subdevice pointer as well just in
* case.
*/
- new_s = comedi_read_subdevice(dev, minor);
+ new_s = comedi_file_read_subdevice(file);
if (dev->attached && old_detach_count == dev->detach_count &&
s == new_s && new_s->async == async) {
if (become_nonbusy || comedi_buf_n_bytes_ready(s) == 0)
@@ -2294,6 +2476,7 @@ out:
static int comedi_open(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
+ struct comedi_file *cfp;
struct comedi_device *dev = comedi_dev_get_from_minor(minor);
int rc;
@@ -2302,6 +2485,12 @@ static int comedi_open(struct inode *inode, struct file *file)
return -ENODEV;
}
+ cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
+ if (!cfp)
+ return -ENOMEM;
+
+ cfp->dev = dev;
+
mutex_lock(&dev->mutex);
if (!dev->attached && !capable(CAP_NET_ADMIN)) {
dev_dbg(dev->class_dev, "not attached and not CAP_NET_ADMIN\n");
@@ -2323,26 +2512,31 @@ static int comedi_open(struct inode *inode, struct file *file)
}
dev->use_count++;
- file->private_data = dev;
+ file->private_data = cfp;
+ comedi_file_reset(file);
rc = 0;
out:
mutex_unlock(&dev->mutex);
- if (rc)
+ if (rc) {
comedi_dev_put(dev);
+ kfree(cfp);
+ }
return rc;
}
static int comedi_fasync(int fd, struct file *file, int on)
{
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
return fasync_helper(fd, file, on, &dev->async_queue);
}
static int comedi_close(struct inode *inode, struct file *file)
{
- struct comedi_device *dev = file->private_data;
+ struct comedi_file *cfp = file->private_data;
+ struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s = NULL;
int i;
@@ -2368,6 +2562,7 @@ static int comedi_close(struct inode *inode, struct file *file)
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
+ kfree(cfp);
return 0;
}
@@ -2395,14 +2590,14 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
if (!comedi_is_subdevice_running(s))
return;
- if (s->
- async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW)) {
+ if (s->async->events & COMEDI_CB_CANCEL_MASK)
runflags_mask |= SRF_RUNNING;
- }
- /* remember if an error event has occurred, so an error
- * can be returned the next time the user does a read() */
- if (s->async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)) {
+
+ /*
+ * Remember if an error event has occurred, so an error
+ * can be returned the next time the user does a read().
+ */
+ if (s->async->events & COMEDI_CB_ERROR_MASK) {
runflags_mask |= SRF_ERROR;
runflags |= SRF_ERROR;
}
diff --git a/drivers/staging/comedi/comedi_pci.c b/drivers/staging/comedi/comedi_pci.c
index aa0795a2660e..6ba59c977006 100644
--- a/drivers/staging/comedi/comedi_pci.c
+++ b/drivers/staging/comedi/comedi_pci.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -168,3 +169,18 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
+
+static int __init comedi_pci_init(void)
+{
+ return 0;
+}
+module_init(comedi_pci_init);
+
+static void __exit comedi_pci_exit(void)
+{
+}
+module_exit(comedi_pci_exit);
+
+MODULE_AUTHOR("http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi PCI interface module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_pcmcia.c b/drivers/staging/comedi/comedi_pcmcia.c
index 9d49d5d01ad9..0529bae8e5ac 100644
--- a/drivers/staging/comedi/comedi_pcmcia.c
+++ b/drivers/staging/comedi/comedi_pcmcia.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <pcmcia/cistpl.h>
@@ -154,3 +155,18 @@ void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_pcmcia_driver_unregister);
+
+static int __init comedi_pcmcia_init(void)
+{
+ return 0;
+}
+module_init(comedi_pcmcia_init);
+
+static void __exit comedi_pcmcia_exit(void)
+{
+}
+module_exit(comedi_pcmcia_exit);
+
+MODULE_AUTHOR("http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi PCMCIA interface module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedi_usb.c b/drivers/staging/comedi/comedi_usb.c
index 13f18bef6091..0b862a64c049 100644
--- a/drivers/staging/comedi/comedi_usb.c
+++ b/drivers/staging/comedi/comedi_usb.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/usb.h>
#include "comedidev.h"
@@ -114,3 +115,18 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+static int __init comedi_usb_init(void)
+{
+ return 0;
+}
+module_init(comedi_usb_init);
+
+static void __exit comedi_usb_exit(void)
+{
+}
+module_exit(comedi_usb_exit);
+
+MODULE_AUTHOR("http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi USB interface module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 1b2bbd56f6ef..77be191988ca 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -121,6 +121,7 @@ struct comedi_buf_map {
* @buf_read_ptr: buffer position for reader
* @cur_chan: current position in chanlist for scan (for those
* drivers that use it)
+ * @scans_done: the number of scans completed (COMEDI_CB_EOS)
* @scan_progress: amount received or sent for current scan (in bytes)
* @munge_chan: current position in chanlist for "munging"
* @munge_count: "munge" count (in bytes, modulo 2**32)
@@ -201,6 +202,7 @@ struct comedi_async {
unsigned int buf_write_ptr;
unsigned int buf_read_ptr;
unsigned int cur_chan;
+ unsigned int scans_done;
unsigned int scan_progress;
unsigned int munge_chan;
unsigned int munge_count;
@@ -213,6 +215,28 @@ struct comedi_async {
unsigned int x);
};
+/**
+ * comedi_async callback "events"
+ * @COMEDI_CB_EOS: end-of-scan
+ * @COMEDI_CB_EOA: end-of-acquisition/output
+ * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write()
+ * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer
+ * @COMEDI_CB_ERROR: card error during acquisition
+ * @COMEDI_CB_OVERFLOW: buffer overflow/underflow
+ *
+ * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred
+ * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
+ */
+#define COMEDI_CB_EOS (1 << 0)
+#define COMEDI_CB_EOA (1 << 1)
+#define COMEDI_CB_BLOCK (1 << 2)
+#define COMEDI_CB_EOBUF (1 << 3)
+#define COMEDI_CB_ERROR (1 << 4)
+#define COMEDI_CB_OVERFLOW (1 << 5)
+
+#define COMEDI_CB_ERROR_MASK (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)
+#define COMEDI_CB_CANCEL_MASK (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK)
+
struct comedi_driver {
struct comedi_driver *next;
@@ -391,12 +415,61 @@ static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
return val ^ s->maxdata ^ (s->maxdata >> 1);
}
-static inline unsigned int bytes_per_sample(const struct comedi_subdevice *subd)
+/**
+ * comedi_bytes_per_sample - determine subdevice sample size
+ * @s: comedi_subdevice struct
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the SDF_LSAMPL subdevice flag is set or not.
+ *
+ * Returns the subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short);
+}
+
+/**
+ * comedi_sample_shift - determine log2 of subdevice sample size
+ * @s: comedi_subdevice struct
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the SDF_LSAMPL subdevice flag is set or not. The log2 of the
+ * sample size will be 2 or 1 and can be used as the right operand of a
+ * bit-shift operator to multiply or divide something by the sample size.
+ *
+ * Returns log2 of the subdevice sample size.
+ */
+static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s)
{
- if (subd->subdev_flags & SDF_LSAMPL)
- return sizeof(unsigned int);
+ return s->subdev_flags & SDF_LSAMPL ? 2 : 1;
+}
- return sizeof(short);
+/**
+ * comedi_bytes_to_samples - converts a number of bytes to a number of samples
+ * @s: comedi_subdevice struct
+ * @nbytes: number of bytes
+ *
+ * Returns the number of bytes divided by the subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ return nbytes >> comedi_sample_shift(s);
+}
+
+/**
+ * comedi_samples_to_bytes - converts a number of samples to a number of bytes
+ * @s: comedi_subdevice struct
+ * @nsamples: number of samples
+ *
+ * Returns the number of samples multiplied by the subdevice sample size.
+ * Does not check for arithmetic overflow.
+ */
+static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ return nsamples << comedi_sample_shift(s);
}
/*
@@ -419,18 +492,10 @@ unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s);
unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n);
unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n);
-int comedi_buf_put(struct comedi_subdevice *s, unsigned short x);
-int comedi_buf_get(struct comedi_subdevice *s, unsigned short *x);
-
-void comedi_buf_memcpy_to(struct comedi_subdevice *s, unsigned int offset,
- const void *source, unsigned int num_bytes);
-void comedi_buf_memcpy_from(struct comedi_subdevice *s, unsigned int offset,
- void *destination, unsigned int num_bytes);
-unsigned int comedi_write_array_to_buffer(struct comedi_subdevice *s,
- const void *data,
- unsigned int num_bytes);
-unsigned int comedi_read_array_from_buffer(struct comedi_subdevice *s,
- void *data, unsigned int num_bytes);
+unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data, unsigned int nsamples);
+unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples);
/* drivers.c - general comedi driver functions */
@@ -451,6 +516,10 @@ int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
unsigned int comedi_dio_update_state(struct comedi_subdevice *,
unsigned int *data);
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
+unsigned int comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans);
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples);
void comedi_inc_scan_progress(struct comedi_subdevice *s,
unsigned int num_bytes);
@@ -493,8 +562,6 @@ void comedi_driver_unregister(struct comedi_driver *);
module_driver(__comedi_driver, comedi_driver_register, \
comedi_driver_unregister)
-#ifdef CONFIG_COMEDI_PCI_DRIVERS
-
/* comedi_pci.c - comedi PCI driver specific functions */
/*
@@ -538,36 +605,6 @@ void comedi_pci_driver_unregister(struct comedi_driver *, struct pci_driver *);
module_driver(__comedi_driver, comedi_pci_driver_register, \
comedi_pci_driver_unregister, &(__pci_driver))
-#else
-
-/*
- * Some of the comedi mixed ISA/PCI drivers call the PCI specific
- * functions. Provide some dummy functions if CONFIG_COMEDI_PCI_DRIVERS
- * is not enabled.
- */
-
-static inline struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev)
-{
- return NULL;
-}
-
-static inline int comedi_pci_enable(struct comedi_device *dev)
-{
- return -ENOSYS;
-}
-
-static inline void comedi_pci_disable(struct comedi_device *dev)
-{
-}
-
-static inline void comedi_pci_detach(struct comedi_device *dev)
-{
-}
-
-#endif /* CONFIG_COMEDI_PCI_DRIVERS */
-
-#ifdef CONFIG_COMEDI_PCMCIA_DRIVERS
-
/* comedi_pcmcia.c - comedi PCMCIA driver specific functions */
struct pcmcia_driver;
@@ -601,10 +638,6 @@ void comedi_pcmcia_driver_unregister(struct comedi_driver *,
module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
-#endif /* CONFIG_COMEDI_PCMCIA_DRIVERS */
-
-#ifdef CONFIG_COMEDI_USB_DRIVERS
-
/* comedi_usb.c - comedi USB driver specific functions */
struct usb_driver;
@@ -634,6 +667,4 @@ void comedi_usb_driver_unregister(struct comedi_driver *, struct usb_driver *);
module_driver(__comedi_driver, comedi_usb_driver_register, \
comedi_usb_driver_unregister, &(__usb_driver))
-#endif /* CONFIG_COMEDI_USB_DRIVERS */
-
#endif /* _COMEDIDEV_H */
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 3e5bccbc9c39..61802d7947ae 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -109,6 +109,10 @@ int comedi_alloc_subdev_readback(struct comedi_subdevice *s)
s->readback = kcalloc(s->n_chan, sizeof(*s->readback), GFP_KERNEL);
if (!s->readback)
return -ENOMEM;
+
+ if (!s->insn_read)
+ s->insn_read = comedi_readback_insn_read;
+
return 0;
}
EXPORT_SYMBOL_GPL(comedi_alloc_subdev_readback);
@@ -316,19 +320,91 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
case COMEDI_SUBD_DI:
case COMEDI_SUBD_DO:
case COMEDI_SUBD_DIO:
- bits_per_sample = 8 * bytes_per_sample(s);
- num_samples = (cmd->chanlist_len + bits_per_sample - 1) /
- bits_per_sample;
+ bits_per_sample = 8 * comedi_bytes_per_sample(s);
+ num_samples = DIV_ROUND_UP(cmd->scan_end_arg, bits_per_sample);
break;
default:
- num_samples = cmd->chanlist_len;
+ num_samples = cmd->scan_end_arg;
break;
}
- return num_samples * bytes_per_sample(s);
+ return comedi_samples_to_bytes(s, num_samples);
}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
/**
+ * comedi_nscans_left - return the number of scans left in the command
+ * @s: comedi_subdevice struct
+ * @nscans: the expected number of scans
+ *
+ * If nscans is 0, the number of scans available in the async buffer will be
+ * used. Otherwise the expected number of scans will be used.
+ *
+ * If the async command has a stop_src of TRIG_COUNT, the nscans will be
+ * checked against the number of scans left in the command.
+ *
+ * The return value will then be either the expected number of scans or the
+ * number of scans remaining in the command.
+ */
+unsigned int comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans)
+{
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+
+ if (nscans == 0) {
+ unsigned int nbytes = comedi_buf_read_n_available(s);
+
+ nscans = nbytes / comedi_bytes_per_scan(s);
+ }
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ unsigned int scans_left = 0;
+
+ if (async->scans_done < cmd->stop_arg)
+ scans_left = cmd->stop_arg - async->scans_done;
+
+ if (nscans > scans_left)
+ nscans = scans_left;
+ }
+ return nscans;
+}
+EXPORT_SYMBOL_GPL(comedi_nscans_left);
+
+/**
+ * comedi_nsamples_left - return the number of samples left in the command
+ * @s: comedi_subdevice struct
+ * @nsamples: the expected number of samples
+ *
+ * Returns the expected number of samples of the number of samples remaining
+ * in the command.
+ */
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* +1 to force comedi_nscans_left() to return the scans left */
+ unsigned int nscans = (nsamples / cmd->scan_end_arg) + 1;
+ unsigned int scans_left = comedi_nscans_left(s, nscans);
+ unsigned int scan_pos =
+ comedi_bytes_to_samples(s, async->scan_progress);
+ unsigned long long samples_left = 0;
+
+ if (scans_left) {
+ samples_left = ((unsigned long long)scans_left *
+ cmd->scan_end_arg) - scan_pos;
+ }
+
+ if (samples_left < nsamples)
+ nsamples = samples_left;
+ }
+ return nsamples;
+}
+EXPORT_SYMBOL_GPL(comedi_nsamples_left);
+
+/**
* comedi_inc_scan_progress - update scan progress in asynchronous command
* @s: comedi_subdevice struct
* @num_bytes: amount of data in bytes to increment scan progress
@@ -342,10 +418,24 @@ void comedi_inc_scan_progress(struct comedi_subdevice *s,
unsigned int num_bytes)
{
struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
unsigned int scan_length = comedi_bytes_per_scan(s);
+ /* track the 'cur_chan' for non-SDF_PACKED subdevices */
+ if (!(s->subdev_flags & SDF_PACKED)) {
+ async->cur_chan += comedi_bytes_to_samples(s, num_bytes);
+ async->cur_chan %= cmd->chanlist_len;
+ }
+
async->scan_progress += num_bytes;
if (async->scan_progress >= scan_length) {
+ unsigned int nscans = async->scan_progress / scan_length;
+
+ if (async->scans_done < (UINT_MAX - nscans))
+ async->scans_done += nscans;
+ else
+ async->scans_done = UINT_MAX;
+
async->scan_progress %= scan_length;
async->events |= COMEDI_CB_EOS;
}
@@ -376,7 +466,7 @@ unsigned int comedi_handle_events(struct comedi_device *dev,
if (events == 0)
return events;
- if (events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW))
+ if (events & COMEDI_CB_CANCEL_MASK)
s->cancel(dev, s);
comedi_event(dev, s);
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 6bc9ef3b25b3..84fdf20ca986 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -60,7 +60,6 @@ obj-$(CONFIG_COMEDI_S526) += s526.o
# Comedi PCI drivers
obj-$(CONFIG_COMEDI_8255_PCI) += 8255_pci.o
obj-$(CONFIG_COMEDI_ADDI_WATCHDOG) += addi_watchdog.o
-obj-$(CONFIG_COMEDI_ADDI_APCI_035) += addi_apci_035.o
obj-$(CONFIG_COMEDI_ADDI_APCI_1032) += addi_apci_1032.o
obj-$(CONFIG_COMEDI_ADDI_APCI_1500) += addi_apci_1500.o
obj-$(CONFIG_COMEDI_ADDI_APCI_1516) += addi_apci_1516.o
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
deleted file mode 100644
index 2e7fb218340f..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; either version 2 of the License, or (at your option) any later
-version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : ADDI DATA | Compiler : GCC |
- | Modulname : addi_common.c | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Author : | Date : |
- +-----------------------------------------------------------------------+
- | Description : ADDI COMMON Main Module |
- +-----------------------------------------------------------------------+
-*/
-
-static int i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned short w_Address = CR_CHAN(insn->chanspec);
- unsigned short w_Data;
-
- w_Data = addi_eeprom_readw(devpriv->i_IobaseAmcc,
- this_board->pc_EepromChip, 2 * w_Address);
- data[0] = w_Data;
-
- return insn->n;
-}
-
-static irqreturn_t v_ADDI_Interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- const struct addi_board *this_board = dev->board_ptr;
-
- this_board->interrupt(irq, d);
- return IRQ_RETVAL(1);
-}
-
-static int i_ADDI_Reset(struct comedi_device *dev)
-{
- const struct addi_board *this_board = dev->board_ptr;
-
- this_board->reset(dev);
- return 0;
-}
-
-static int addi_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv;
- struct comedi_subdevice *s;
- int ret, n_subdevices;
- unsigned int dw_Dummy;
-
- dev->board_name = this_board->pc_DriverName;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
-
- if (this_board->i_IorangeBase1)
- dev->iobase = pci_resource_start(pcidev, 1);
- else
- dev->iobase = pci_resource_start(pcidev, 0);
-
- devpriv->iobase = dev->iobase;
- devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
- devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
- devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3);
-
- /* Initialize parameters that can be overridden in EEPROM */
- devpriv->s_EeParameters.i_NbrAiChannel = this_board->i_NbrAiChannel;
- devpriv->s_EeParameters.i_NbrAoChannel = this_board->i_NbrAoChannel;
- devpriv->s_EeParameters.i_AiMaxdata = this_board->i_AiMaxdata;
- devpriv->s_EeParameters.i_AoMaxdata = this_board->i_AoMaxdata;
- devpriv->s_EeParameters.i_NbrDiChannel = this_board->i_NbrDiChannel;
- devpriv->s_EeParameters.i_NbrDoChannel = this_board->i_NbrDoChannel;
- devpriv->s_EeParameters.i_DoMaxdata = this_board->i_DoMaxdata;
- devpriv->s_EeParameters.i_Timer = this_board->i_Timer;
- devpriv->s_EeParameters.ui_MinAcquisitiontimeNs =
- this_board->ui_MinAcquisitiontimeNs;
- devpriv->s_EeParameters.ui_MinDelaytimeNs =
- this_board->ui_MinDelaytimeNs;
-
- /* ## */
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, v_ADDI_Interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- /* Read eepeom and fill addi_board Structure */
-
- if (this_board->i_PCIEeprom) {
- if (!(strcmp(this_board->pc_EepromChip, "S5920"))) {
- /* Set 3 wait stait */
- if (!(strcmp(dev->board_name, "apci035")))
- outl(0x80808082, devpriv->i_IobaseAmcc + 0x60);
- else
- outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
-
- /* Enable the interrupt for the controller */
- dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
- outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
- }
- addi_eeprom_read_info(dev, pci_resource_start(pcidev, 0));
- }
-
- n_subdevices = 7;
- ret = comedi_alloc_subdevices(dev, n_subdevices);
- if (ret)
- return ret;
-
- /* Allocate and Initialise AI Subdevice Structures */
- s = &dev->subdevices[0];
- if ((devpriv->s_EeParameters.i_NbrAiChannel)
- || (this_board->i_NbrAiChannelDiff)) {
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags =
- SDF_READABLE | SDF_COMMON | SDF_GROUND
- | SDF_DIFF;
- if (devpriv->s_EeParameters.i_NbrAiChannel)
- s->n_chan = devpriv->s_EeParameters.i_NbrAiChannel;
- else
- s->n_chan = this_board->i_NbrAiChannelDiff;
- s->maxdata = devpriv->s_EeParameters.i_AiMaxdata;
- s->len_chanlist = this_board->i_AiChannelList;
- s->range_table = this_board->pr_AiRangelist;
-
- s->insn_config = this_board->ai_config;
- s->insn_read = this_board->ai_read;
- s->insn_write = this_board->ai_write;
- s->insn_bits = this_board->ai_bits;
- s->do_cmdtest = this_board->ai_cmdtest;
- s->do_cmd = this_board->ai_cmd;
- s->cancel = this_board->ai_cancel;
-
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Allocate and Initialise AO Subdevice Structures */
- s = &dev->subdevices[1];
- if (devpriv->s_EeParameters.i_NbrAoChannel) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = devpriv->s_EeParameters.i_NbrAoChannel;
- s->maxdata = devpriv->s_EeParameters.i_AoMaxdata;
- s->len_chanlist =
- devpriv->s_EeParameters.i_NbrAoChannel;
- s->insn_write = this_board->ao_write;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
- /* Allocate and Initialise DI Subdevice Structures */
- s = &dev->subdevices[2];
- if (devpriv->s_EeParameters.i_NbrDiChannel) {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = devpriv->s_EeParameters.i_NbrDiChannel;
- s->maxdata = 1;
- s->len_chanlist =
- devpriv->s_EeParameters.i_NbrDiChannel;
- s->range_table = &range_digital;
- s->insn_config = this_board->di_config;
- s->insn_read = this_board->di_read;
- s->insn_write = this_board->di_write;
- s->insn_bits = this_board->di_bits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
- /* Allocate and Initialise DO Subdevice Structures */
- s = &dev->subdevices[3];
- if (devpriv->s_EeParameters.i_NbrDoChannel) {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags =
- SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = devpriv->s_EeParameters.i_NbrDoChannel;
- s->maxdata = devpriv->s_EeParameters.i_DoMaxdata;
- s->len_chanlist =
- devpriv->s_EeParameters.i_NbrDoChannel;
- s->range_table = &range_digital;
-
- /* insn_config - for digital output memory */
- s->insn_config = this_board->do_config;
- s->insn_write = this_board->do_write;
- s->insn_bits = this_board->do_bits;
- s->insn_read = this_board->do_read;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Allocate and Initialise Timer Subdevice Structures */
- s = &dev->subdevices[4];
- if (devpriv->s_EeParameters.i_Timer) {
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
- s->range_table = &range_digital;
-
- s->insn_write = this_board->timer_write;
- s->insn_read = this_board->timer_read;
- s->insn_config = this_board->timer_config;
- s->insn_bits = this_board->timer_bits;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- /* Allocate and Initialise TTL */
- s = &dev->subdevices[5];
- s->type = COMEDI_SUBD_UNUSED;
-
- /* EEPROM */
- s = &dev->subdevices[6];
- if (this_board->i_PCIEeprom) {
- s->type = COMEDI_SUBD_MEMORY;
- s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
- s->n_chan = 256;
- s->maxdata = 0xffff;
- s->insn_read = i_ADDIDATA_InsnReadEeprom;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
-
- i_ADDI_Reset(dev);
- return 0;
-}
-
-static void i_ADDI_Detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- i_ADDI_Reset(dev);
- comedi_pci_detach(dev);
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.h b/drivers/staging/comedi/drivers/addi-data/addi_common.h
deleted file mode 100644
index e2a3ffeee5cf..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-
-struct addi_board {
- const char *pc_DriverName; /* driver name */
- int i_IorangeBase1;
- int i_PCIEeprom; /* eeprom present or not */
- char *pc_EepromChip; /* type of chip */
- int i_NbrAiChannel; /* num of A/D chans */
- int i_NbrAiChannelDiff; /* num of A/D chans in diff mode */
- int i_AiChannelList; /* len of chanlist */
- int i_NbrAoChannel; /* num of D/A chans */
- int i_AiMaxdata; /* resolution of A/D */
- int i_AoMaxdata; /* resolution of D/A */
- const struct comedi_lrange *pr_AiRangelist; /* rangelist for A/D */
-
- int i_NbrDiChannel; /* Number of DI channels */
- int i_NbrDoChannel; /* Number of DO channels */
- int i_DoMaxdata; /* data to set all channels high */
-
- int i_Timer; /* timer subdevice present or not */
- unsigned int ui_MinAcquisitiontimeNs; /* Minimum Acquisition in Nano secs */
- unsigned int ui_MinDelaytimeNs; /* Minimum Delay in Nano secs */
-
- /* interrupt and reset */
- void (*interrupt)(int irq, void *d);
- int (*reset)(struct comedi_device *);
-
- /* Subdevice functions */
-
- /* ANALOG INPUT */
- int (*ai_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ai_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ai_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ai_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ai_cmdtest)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_cmd *);
- int (*ai_cmd)(struct comedi_device *, struct comedi_subdevice *);
- int (*ai_cancel)(struct comedi_device *, struct comedi_subdevice *);
-
- /* Analog Output */
- int (*ao_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- /* Digital Input */
- int (*di_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*di_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*di_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*di_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- /* Digital Output */
- int (*do_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*do_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*do_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*do_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-
- /* TIMER */
- int (*timer_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*timer_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*timer_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*timer_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-};
-
-struct addi_private {
- int iobase;
- int i_IobaseAmcc; /* base+size for AMCC chip */
- int i_IobaseAddon; /* addon base address */
- int i_IobaseReserved;
- unsigned int ui_AiActualScan; /* how many scans we finished */
- unsigned int ui_AiNbrofChannels; /* how many channels is measured */
- unsigned int ui_AiChannelList[32]; /* actual chanlist */
- unsigned int ui_AiReadData[32];
- unsigned short us_UseDma; /* To use Dma or not */
- unsigned char b_DmaDoubleBuffer; /* we can use double buffering */
- unsigned int ui_DmaActualBuffer; /* which buffer is used now */
- unsigned short *ul_DmaBufferVirtual[2]; /* pointers to DMA buffer */
- dma_addr_t ul_DmaBufferHw[2]; /* hw address of DMA buff */
- unsigned int ui_DmaBufferSize[2]; /* size of dma buffer in bytes */
- unsigned int ui_DmaBufferUsesize[2]; /* which size we may now used for transfer */
- unsigned char b_DigitalOutputRegister; /* Digital Output Register */
- unsigned char b_OutputMemoryStatus;
- unsigned char b_TimerSelectMode; /* Contain data written at iobase + 0C */
- unsigned char b_ModeSelectRegister; /* Contain data written at iobase + 0E */
- unsigned short us_OutputRegister; /* Contain data written at iobase + 0 */
- unsigned char b_Timer2Mode; /* Specify the timer 2 mode */
- unsigned char b_Timer2Interrupt; /* Timer2 interrupt enable or disable */
- unsigned int ai_running:1;
- unsigned char b_InterruptMode; /* eoc eos or dma */
- unsigned char b_EocEosInterrupt; /* Enable disable eoc eos interrupt */
- unsigned int ui_EocEosConversionTime;
- unsigned char b_ExttrigEnable; /* To enable or disable external trigger */
-
- /* Pointer to the current process */
- struct task_struct *tsk_Current;
-
- /* Parameters read from EEPROM overriding static board info */
- struct {
- int i_NbrAiChannel; /* num of A/D chans */
- int i_NbrAoChannel; /* num of D/A chans */
- int i_AiMaxdata; /* resolution of A/D */
- int i_AoMaxdata; /* resolution of D/A */
- int i_NbrDiChannel; /* Number of DI channels */
- int i_NbrDoChannel; /* Number of DO channels */
- int i_DoMaxdata; /* data to set all channels high */
- int i_Timer; /* timer subdevice present or not */
- unsigned int ui_MinAcquisitiontimeNs;
- /* Minimum Acquisition in Nano secs */
- unsigned int ui_MinDelaytimeNs;
- /* Minimum Delay in Nano secs */
- } s_EeParameters;
-};
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c b/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
deleted file mode 100644
index b731856c27da..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * addi_eeprom.c - ADDI EEPROM Module
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- * Project manager: Eric Stolz
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
-
-#include <linux/delay.h>
-
-#define NVRAM_USER_DATA_START 0x100
-
-#define NVCMD_BEGIN_READ (0x7 << 5) /* nvRam begin read command */
-#define NVCMD_LOAD_LOW (0x4 << 5) /* nvRam load low command */
-#define NVCMD_LOAD_HIGH (0x5 << 5) /* nvRam load high command */
-
-#define EE93C76_CLK_BIT (1 << 0)
-#define EE93C76_CS_BIT (1 << 1)
-#define EE93C76_DOUT_BIT (1 << 2)
-#define EE93C76_DIN_BIT (1 << 3)
-#define EE93C76_READ_CMD (0x0180 << 4)
-#define EE93C76_CMD_LEN 13
-
-#define EEPROM_DIGITALINPUT 0
-#define EEPROM_DIGITALOUTPUT 1
-#define EEPROM_ANALOGINPUT 2
-#define EEPROM_ANALOGOUTPUT 3
-#define EEPROM_TIMER 4
-#define EEPROM_WATCHDOG 5
-#define EEPROM_TIMER_WATCHDOG_COUNTER 10
-
-static void addi_eeprom_clk_93c76(unsigned long iobase, unsigned int val)
-{
- outl(val & ~EE93C76_CLK_BIT, iobase);
- udelay(100);
-
- outl(val | EE93C76_CLK_BIT, iobase);
- udelay(100);
-}
-
-static unsigned int addi_eeprom_cmd_93c76(unsigned long iobase,
- unsigned int cmd,
- unsigned char len)
-{
- unsigned int val = EE93C76_CS_BIT;
- int i;
-
- /* Toggle EEPROM's Chip select to get it out of Shift Register Mode */
- outl(val, iobase);
- udelay(100);
-
- /* Send EEPROM command - one bit at a time */
- for (i = (len - 1); i >= 0; i--) {
- if (cmd & (1 << i))
- val |= EE93C76_DOUT_BIT;
- else
- val &= ~EE93C76_DOUT_BIT;
-
- /* Write the command */
- outl(val, iobase);
- udelay(100);
-
- addi_eeprom_clk_93c76(iobase, val);
- }
- return val;
-}
-
-static unsigned short addi_eeprom_readw_93c76(unsigned long iobase,
- unsigned short addr)
-{
- unsigned short val = 0;
- unsigned int cmd;
- unsigned int tmp;
- int i;
-
- /* Send EEPROM read command and offset to EEPROM */
- cmd = EE93C76_READ_CMD | (addr / 2);
- cmd = addi_eeprom_cmd_93c76(iobase, cmd, EE93C76_CMD_LEN);
-
- /* Get the 16-bit value */
- for (i = 0; i < 16; i++) {
- addi_eeprom_clk_93c76(iobase, cmd);
-
- tmp = inl(iobase);
- udelay(100);
-
- val <<= 1;
- if (tmp & EE93C76_DIN_BIT)
- val |= 0x1;
- }
-
- /* Toggle EEPROM's Chip select to get it out of Shift Register Mode */
- outl(0, iobase);
- udelay(100);
-
- return val;
-}
-
-static void addi_eeprom_nvram_wait(unsigned long iobase)
-{
- unsigned char val;
-
- do {
- val = inb(iobase + AMCC_OP_REG_MCSR_NVCMD);
- } while (val & 0x80);
-}
-
-static unsigned short addi_eeprom_readw_nvram(unsigned long iobase,
- unsigned short addr)
-{
- unsigned short val = 0;
- unsigned char tmp;
- unsigned char i;
-
- for (i = 0; i < 2; i++) {
- /* Load the low 8 bit address */
- outb(NVCMD_LOAD_LOW, iobase + AMCC_OP_REG_MCSR_NVCMD);
- addi_eeprom_nvram_wait(iobase);
- outb((addr + i) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA);
- addi_eeprom_nvram_wait(iobase);
-
- /* Load the high 8 bit address */
- outb(NVCMD_LOAD_HIGH, iobase + AMCC_OP_REG_MCSR_NVCMD);
- addi_eeprom_nvram_wait(iobase);
- outb(((addr + i) >> 8) & 0xff,
- iobase + AMCC_OP_REG_MCSR_NVDATA);
- addi_eeprom_nvram_wait(iobase);
-
- /* Read the eeprom data byte */
- outb(NVCMD_BEGIN_READ, iobase + AMCC_OP_REG_MCSR_NVCMD);
- addi_eeprom_nvram_wait(iobase);
- tmp = inb(iobase + AMCC_OP_REG_MCSR_NVDATA);
- addi_eeprom_nvram_wait(iobase);
-
- if (i == 0)
- val |= tmp;
- else
- val |= (tmp << 8);
- }
-
- return val;
-}
-
-static unsigned short addi_eeprom_readw(unsigned long iobase,
- char *type,
- unsigned short addr)
-{
- unsigned short val = 0;
-
- /* Add the offset to the start of the user data */
- addr += NVRAM_USER_DATA_START;
-
- if (!strcmp(type, "S5920") || !strcmp(type, "S5933"))
- val = addi_eeprom_readw_nvram(iobase, addr);
-
- if (!strcmp(type, "93C76"))
- val = addi_eeprom_readw_93c76(iobase, addr);
-
- return val;
-}
-
-static void addi_eeprom_read_di_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- char *type = this_board->pc_EepromChip;
- unsigned short tmp;
-
- /* Number of channels */
- tmp = addi_eeprom_readw(iobase, type, addr + 6);
- devpriv->s_EeParameters.i_NbrDiChannel = tmp;
-
- /* Interruptible or not */
- tmp = addi_eeprom_readw(iobase, type, addr + 8);
- tmp = (tmp >> 7) & 0x01;
-
- /* How many interruptible logic */
- tmp = addi_eeprom_readw(iobase, type, addr + 10);
-}
-
-static void addi_eeprom_read_do_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- char *type = this_board->pc_EepromChip;
- unsigned short tmp;
-
- /* Number of channels */
- tmp = addi_eeprom_readw(iobase, type, addr + 6);
- devpriv->s_EeParameters.i_NbrDoChannel = tmp;
-
- devpriv->s_EeParameters.i_DoMaxdata = 0xffffffff >> (32 - tmp);
-}
-
-static void addi_eeprom_read_timer_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- struct addi_private *devpriv = dev->private;
-#if 0
- const struct addi_board *this_board = dev->board_ptr;
- char *type = this_board->pc_EepromChip;
- unsigned short offset = 0;
- unsigned short ntimers;
- unsigned short tmp;
- int i;
-
- /* Number of Timers */
- ntimers = addi_eeprom_readw(iobase, type, addr + 6);
-
- /* Read header size */
- for (i = 0; i < ntimers; i++) {
- unsigned short size;
- unsigned short res;
- unsigned short mode;
- unsigned short min_timing;
- unsigned short timebase;
-
- size = addi_eeprom_readw(iobase, type, addr + 8 + offset + 0);
-
- /* Resolution / Mode */
- tmp = addi_eeprom_readw(iobase, type, addr + 8 + offset + 2);
- res = (tmp >> 10) & 0x3f;
- mode = (tmp >> 4) & 0x3f;
-
- /* MinTiming / Timebase */
- tmp = addi_eeprom_readw(iobase, type, addr + 8 + offset + 4);
- min_timing = (tmp >> 6) & 0x3ff;
- Timebase = tmp & 0x3f;
-
- offset += size;
- }
-#endif
- /* Timer subdevice present */
- devpriv->s_EeParameters.i_Timer = 1;
-}
-
-static void addi_eeprom_read_ao_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- char *type = this_board->pc_EepromChip;
- unsigned short tmp;
-
- /* No of channels for 1st hard component */
- tmp = addi_eeprom_readw(iobase, type, addr + 10);
- devpriv->s_EeParameters.i_NbrAoChannel = (tmp >> 4) & 0x3ff;
-
- /* Resolution for 1st hard component */
- tmp = addi_eeprom_readw(iobase, type, addr + 16);
- tmp = (tmp >> 8) & 0xff;
- devpriv->s_EeParameters.i_AoMaxdata = 0xfff >> (16 - tmp);
-}
-
-static void addi_eeprom_read_ai_info(struct comedi_device *dev,
- unsigned long iobase,
- unsigned short addr)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- char *type = this_board->pc_EepromChip;
- unsigned short offset;
- unsigned short tmp;
-
- /* No of channels for 1st hard component */
- tmp = addi_eeprom_readw(iobase, type, addr + 10);
- devpriv->s_EeParameters.i_NbrAiChannel = (tmp >> 4) & 0x3ff;
- if (!strcmp(this_board->pc_DriverName, "apci3200"))
- devpriv->s_EeParameters.i_NbrAiChannel *= 4;
-
- tmp = addi_eeprom_readw(iobase, type, addr + 16);
- devpriv->s_EeParameters.ui_MinAcquisitiontimeNs = tmp * 1000;
-
- tmp = addi_eeprom_readw(iobase, type, addr + 30);
- devpriv->s_EeParameters.ui_MinDelaytimeNs = tmp * 1000;
-
- tmp = addi_eeprom_readw(iobase, type, addr + 20);
- /* dma = (tmp >> 13) & 0x01; */
-
- tmp = addi_eeprom_readw(iobase, type, addr + 72) & 0xff;
- if (tmp) { /* > 0 */
- /* offset of first analog input single header */
- offset = 74 + (2 * tmp) + (10 * (1 + (tmp / 16)));
- } else { /* = 0 */
- offset = 74;
- }
-
- /* Resolution */
- tmp = addi_eeprom_readw(iobase, type, addr + offset + 2) & 0x1f;
- devpriv->s_EeParameters.i_AiMaxdata = 0xffff >> (16 - tmp);
-}
-
-static void addi_eeprom_read_info(struct comedi_device *dev,
- unsigned long iobase)
-{
- const struct addi_board *this_board = dev->board_ptr;
- char *type = this_board->pc_EepromChip;
- unsigned short size;
- unsigned char nfuncs;
- int i;
-
- size = addi_eeprom_readw(iobase, type, 8);
- nfuncs = addi_eeprom_readw(iobase, type, 10) & 0xff;
-
- /* Read functionality details */
- for (i = 0; i < nfuncs; i++) {
- unsigned short offset = i * 4;
- unsigned short addr;
- unsigned char func;
-
- func = addi_eeprom_readw(iobase, type, 12 + offset) & 0x3f;
- addr = addi_eeprom_readw(iobase, type, 14 + offset);
-
- switch (func) {
- case EEPROM_DIGITALINPUT:
- addi_eeprom_read_di_info(dev, iobase, addr);
- break;
-
- case EEPROM_DIGITALOUTPUT:
- addi_eeprom_read_do_info(dev, iobase, addr);
- break;
-
- case EEPROM_ANALOGINPUT:
- addi_eeprom_read_ai_info(dev, iobase, addr);
- break;
-
- case EEPROM_ANALOGOUTPUT:
- addi_eeprom_read_ao_info(dev, iobase, addr);
- break;
-
- case EEPROM_TIMER:
- case EEPROM_WATCHDOG:
- case EEPROM_TIMER_WATCHDOG_COUNTER:
- addi_eeprom_read_timer_info(dev, iobase, addr);
- break;
- }
- }
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
deleted file mode 100644
index 53bb51bd77b5..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- */
-
-/* Card Specific information */
-#define APCI035_ADDRESS_RANGE 255
-
-/* Timer / Watchdog Related Defines */
-#define APCI035_TCW_SYNC_ENABLEDISABLE 0
-#define APCI035_TCW_RELOAD_VALUE 4
-#define APCI035_TCW_TIMEBASE 8
-#define APCI035_TCW_PROG 12
-#define APCI035_TCW_TRIG_STATUS 16
-#define APCI035_TCW_IRQ 20
-#define APCI035_TCW_WARN_TIMEVAL 24
-#define APCI035_TCW_WARN_TIMEBASE 28
-
-#define ADDIDATA_TIMER 0
-/* #define ADDIDATA_WATCHDOG 1 */
-
-#define APCI035_TW1 0
-#define APCI035_TW2 32
-#define APCI035_TW3 64
-#define APCI035_TW4 96
-
-#define APCI035_AI_OFFSET 0
-#define APCI035_TEMP 128
-#define APCI035_ALR_SEQ 4
-#define APCI035_START_STOP_INDEX 8
-#define APCI035_ALR_START_STOP 12
-#define APCI035_ALR_IRQ 16
-#define APCI035_EOS 20
-#define APCI035_CHAN_NO 24
-#define APCI035_CHAN_VAL 28
-#define APCI035_CONV_TIME_TIME_BASE 36
-#define APCI035_RELOAD_CONV_TIME_VAL 32
-#define APCI035_DELAY_TIME_TIME_BASE 44
-#define APCI035_RELOAD_DELAY_TIME_VAL 40
-#define ENABLE_EXT_TRIG 1
-#define ENABLE_EXT_GATE 2
-#define ENABLE_EXT_TRIG_GATE 3
-
-#define ANALOG_INPUT 0
-#define TEMPERATURE 1
-#define RESISTANCE 2
-
-#define ADDIDATA_GREATER_THAN_TEST 0
-#define ADDIDATA_LESS_THAN_TEST 1
-
-#define APCI035_MAXVOLT 2.5
-
-#define ADDIDATA_UNIPOLAR 1
-#define ADDIDATA_BIPOLAR 2
-
-/* ANALOG INPUT RANGE */
-static struct comedi_lrange range_apci035_ai = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-static int i_WatchdogNbr;
-static int i_Temp;
-static int i_Flag = 1;
-
-/*
- * Configures The Timer , Counter or Watchdog
- *
- * data[0] 0 = Configure As Timer, 1 = Configure As Watchdog
- * data[1] Watchdog number
- * data[2] Time base Unit
- * data[3] Reload Value
- * data[4] External Trigger, 1 = Enable, 0 = Disable
- * data[5] External Trigger Level
- * 00 = Trigger Disabled
- * 01 = Trigger Enabled (Low level)
- * 10 = Trigger Enabled (High Level)
- * 11 = Trigger Enabled (High/Low level)
- * data[6] External Gate, 1 = Enable, 0 = Disable
- * data[7] External Gate level
- * 00 = Gate Disabled
- * 01 = Gate Enabled (Low level)
- * 10 = Gate Enabled (High Level)
- * data[8] Warning Relay, 1 = Enable, 0 = Disable
- * data[9] Warning Delay available
- * data[10] Warning Relay Time unit
- * data[11] Warning Relay Time Reload value
- * data[12] Reset Relay, 1 = Enable, 0 = Disable
- * data[13] Interrupt, 1 = Enable, 0 = Disable
- */
-static int apci035_timer_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Status;
- unsigned int ui_Command;
- unsigned int ui_Mode;
-
- i_Temp = 0;
- devpriv->tsk_Current = current;
- devpriv->b_TimerSelectMode = data[0];
- i_WatchdogNbr = data[1];
- if (data[0] == 0)
- ui_Mode = 2;
- else
- ui_Mode = 0;
-
- ui_Command = 0;
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Set the reload value */
- outl(data[3], devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 4);
-
- /* Set the time unit */
- outl(data[2], devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 8);
- if (data[0] == ADDIDATA_TIMER) {
-
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Enable the timer mode */
- /* - Set the timer mode */
-
- ui_Command =
- (ui_Command & 0xFFF719E2UL) | ui_Mode << 13UL | 0x10UL;
-
- } else if (data[0] == ADDIDATA_WATCHDOG) {
-
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Disable the timer mode */
-
- ui_Command = ui_Command & 0xFFF819E2UL;
-
- } else {
- dev_err(dev->class_dev, "The parameter for Timer/watchdog selection is in error\n");
- return -EINVAL;
- }
-
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Disable the hardware trigger */
- ui_Command = ui_Command & 0xFFFFF89FUL;
- if (data[4] == 1) {
- /* Set the hardware trigger level */
- ui_Command = ui_Command | (data[5] << 5);
- }
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Disable the hardware gate */
- ui_Command = ui_Command & 0xFFFFF87FUL;
- if (data[6] == 1) {
- /* Set the hardware gate level */
- ui_Command = ui_Command | (data[7] << 7);
- }
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Disable the hardware output */
- ui_Command = ui_Command & 0xFFFFF9FBUL;
-
- /* Set the hardware output level */
- ui_Command = ui_Command | (data[8] << 2);
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- if (data[9] == 1) {
- /* Set the reload value */
- outl(data[11],
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 24);
-
- /* Set the time unite */
- outl(data[10],
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 28);
- }
-
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Disable the hardware output */
- ui_Command = ui_Command & 0xFFFFF9F7UL;
-
- /* Set the hardware output level */
- ui_Command = ui_Command | (data[12] << 3);
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Enable the watchdog interrupt */
- ui_Command = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Set the interrupt selection */
- ui_Status = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 16);
-
- ui_Command = (ui_Command & 0xFFFFF9FDUL) | (data[13] << 1);
- outl(ui_Command, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- return insn->n;
-}
-
-/*
- * Start / Stop The Selected Timer , or Watchdog
- *
- * data[0]
- * 0 - Stop Selected Timer/Watchdog
- * 1 - Start Selected Timer/Watch*dog
- * 2 - Trigger Selected Timer/Watchdog
- * 3 - Stop All Timer/Watchdog
- * 4 - Start All Timer/Watchdog
- * 5 - Trigger All Timer/Watchdog
- */
-static int apci035_timer_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Command;
- int i_Count;
-
- if (data[0] == 1) {
- ui_Command =
- inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Start the hardware */
- ui_Command = (ui_Command & 0xFFFFF9FFUL) | 0x1UL;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- }
- if (data[0] == 2) {
- ui_Command =
- inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
-
- /* Set the trigger command */
- ui_Command = (ui_Command & 0xFFFFF9FFUL) | 0x200UL;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- }
-
- if (data[0] == 0) {
- /* Stop The Watchdog */
- ui_Command = 0;
- /*
- * ui_Command = inl(devpriv->iobase+((i_WatchdogNbr-1)*32)+12);
- * ui_Command = ui_Command & 0xFFFFF9FEUL;
- */
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 12);
- }
- if (data[0] == 3) {
- /* stop all Watchdogs */
- ui_Command = 0;
- for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
- ui_Command = 0x2UL;
- else
- ui_Command = 0x10UL;
-
- i_WatchdogNbr = i_Count;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
- 0);
- }
-
- }
- if (data[0] == 4) {
- /* start all Watchdogs */
- ui_Command = 0;
- for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
- ui_Command = 0x1UL;
- else
- ui_Command = 0x8UL;
-
- i_WatchdogNbr = i_Count;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
- 0);
- }
- }
- if (data[0] == 5) {
- /* trigger all Watchdogs */
- ui_Command = 0;
- for (i_Count = 1; i_Count <= 4; i_Count++) {
- if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG)
- ui_Command = 0x4UL;
- else
- ui_Command = 0x20UL;
-
- i_WatchdogNbr = i_Count;
- outl(ui_Command,
- devpriv->iobase + ((i_WatchdogNbr - 1) * 32) +
- 0);
- }
- i_Temp = 1;
- }
- return insn->n;
-}
-
-/*
- * Read The Selected Timer , Counter or Watchdog
- *
- * data[0] software trigger status
- * data[1] hardware trigger status
- * data[2] Software clear status
- * data[3] Overflow status
- * data[4] Timer actual value
- */
-static int apci035_timer_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Status; /* Status register */
-
- i_WatchdogNbr = insn->unused[0];
-
- /* Get the status */
- ui_Status = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 16);
-
- /* Get the software trigger status */
- data[0] = ((ui_Status >> 1) & 1);
-
- /* Get the hardware trigger status */
- data[1] = ((ui_Status >> 2) & 1);
-
- /* Get the software clear status */
- data[2] = ((ui_Status >> 3) & 1);
-
- /* Get the overflow status */
- data[3] = ((ui_Status >> 0) & 1);
- if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER)
- data[4] = inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0);
-
- return insn->n;
-}
-
-/*
- * Configures The Analog Input Subdevice
- *
- * data[0] Warning delay value
- */
-static int apci035_ai_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- devpriv->tsk_Current = current;
- outl(0x200 | 0, devpriv->iobase + 128 + 0x4);
- outl(0, devpriv->iobase + 128 + 0);
-
- /* Initialise the warning value */
- outl(0x300 | 0, devpriv->iobase + 128 + 0x4);
- outl((data[0] << 8), devpriv->iobase + 128 + 0);
- outl(0x200000UL, devpriv->iobase + 128 + 12);
-
- return insn->n;
-}
-
-/*
- * Read value of the selected channel
- *
- * data[0] Digital Value Of Input
- */
-static int apci035_ai_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_CommandRegister;
-
- /* Set the start */
- ui_CommandRegister = 0x80000;
-
- /* Write the command register */
- outl(ui_CommandRegister, devpriv->iobase + 128 + 8);
-
- /* Read the digital value of the input */
- data[0] = inl(devpriv->iobase + 128 + 28);
- return insn->n;
-}
-
-static int apci035_reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- int i_Count;
-
- for (i_Count = 1; i_Count <= 4; i_Count++) {
- i_WatchdogNbr = i_Count;
-
- /* stop all timers */
- outl(0x0, devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 0);
- }
- outl(0x0, devpriv->iobase + 128 + 12); /* Disable the warning delay */
-
- return 0;
-}
-
-static void apci035_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_StatusRegister1;
- unsigned int ui_StatusRegister2;
- unsigned int ui_ReadCommand;
- unsigned int ui_ChannelNumber;
- unsigned int ui_DigitalTemperature;
-
- if (i_Temp == 1) {
- i_WatchdogNbr = i_Flag;
- i_Flag = i_Flag + 1;
- }
-
- /* Read the interrupt status register of temperature Warning */
- ui_StatusRegister1 = inl(devpriv->iobase + 128 + 16);
-
- /* Read the interrupt status register for Watchdog/timer */
- ui_StatusRegister2 =
- inl(devpriv->iobase + ((i_WatchdogNbr - 1) * 32) + 20);
-
- /* Test if warning relay interrupt */
- if ((((ui_StatusRegister1) & 0x8) == 0x8)) {
-
- /* Disable the temperature warning */
- ui_ReadCommand = inl(devpriv->iobase + 128 + 12);
- ui_ReadCommand = ui_ReadCommand & 0xFFDF0000UL;
- outl(ui_ReadCommand, devpriv->iobase + 128 + 12);
-
- /* Read the channel number */
- ui_ChannelNumber = inl(devpriv->iobase + 128 + 60);
-
- /* Read the digital temperature value */
- ui_DigitalTemperature = inl(devpriv->iobase + 128 + 60);
-
- /* send signal to the sample */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
-
- } else if ((ui_StatusRegister2 & 0x1) == 0x1) {
- /* send signal to the sample */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- }
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
index 0ea081e1e119..bfa9228c833f 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
@@ -158,7 +158,7 @@ static int apci1500_di_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_PatternPolarity = 0, i_PatternTransition = 0, i_PatternMask = 0;
int i_MaxChannel = 0, i_Count = 0, i_EventMask = 0;
int i_PatternTransitionCount = 0, i_RegValue;
@@ -466,7 +466,7 @@ static int apci1500_di_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_Event1InterruptStatus = 0, i_Event2InterruptStatus =
0, i_RegValue;
@@ -653,7 +653,7 @@ static int apci1500_di_read(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_DummyRead = 0;
/* Software reset */
@@ -789,7 +789,7 @@ static int apci1500_di_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
data[1] = inw(devpriv->i_IobaseAddon + APCI1500_DIGITAL_IP);
@@ -807,7 +807,7 @@ static int apci1500_do_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
devpriv->b_OutputMemoryStatus = data[0];
return insn->n;
@@ -821,7 +821,7 @@ static int apci1500_do_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
static unsigned int ui_Temp;
unsigned int ui_Temp1;
unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */
@@ -981,7 +981,7 @@ static int apci1500_timer_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_TimerCounterMode, i_MasterConfiguration;
devpriv->tsk_Current = current;
@@ -1471,7 +1471,7 @@ static int apci1500_timer_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_CommandAndStatusValue;
switch (data[0]) {
@@ -1731,7 +1731,7 @@ static int apci1500_timer_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_CommandAndStatusValue;
switch (data[0]) {
@@ -1895,7 +1895,7 @@ static int apci1500_do_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
unsigned int ui_Status;
int i_RegValue;
int i_Constant;
@@ -2011,11 +2011,11 @@ static int apci1500_do_bits(struct comedi_device *dev,
return insn->n;
}
-static void apci1500_interrupt(int irq, void *d)
+static irqreturn_t apci1500_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
unsigned int ui_InterruptStatus = 0;
int i_RegValue = 0;
@@ -2180,11 +2180,13 @@ static void apci1500_interrupt(int irq, void *d)
"Interrupt from unknown source\n");
}
+
+ return IRQ_HANDLED;
}
static int apci1500_reset(struct comedi_device *dev)
{
- struct addi_private *devpriv = dev->private;
+ struct apci1500_private *devpriv = dev->private;
int i_DummyRead = 0;
i_TimerCounter1Init = 0;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index 98de96953a29..fa99c8ca4f95 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -16,244 +16,189 @@
#define ADDIDATA_TIMER 0
#define ADDIDATA_COUNTER 1
#define ADDIDATA_WATCHDOG 2
-#define APCI1564_COUNTER1 0
-#define APCI1564_COUNTER2 1
-#define APCI1564_COUNTER3 2
-#define APCI1564_COUNTER4 3
-
-/*
- * devpriv->amcc_iobase Register Map
- */
-#define APCI1564_DI_REG 0x04
-#define APCI1564_DI_INT_MODE1_REG 0x08
-#define APCI1564_DI_INT_MODE2_REG 0x0c
-#define APCI1564_DI_INT_STATUS_REG 0x10
-#define APCI1564_DI_IRQ_REG 0x14
-#define APCI1564_DO_REG 0x18
-#define APCI1564_DO_INT_CTRL_REG 0x1c
-#define APCI1564_DO_INT_STATUS_REG 0x20
-#define APCI1564_DO_IRQ_REG 0x24
-#define APCI1564_WDOG_REG 0x28
-#define APCI1564_WDOG_RELOAD_REG 0x2c
-#define APCI1564_WDOG_TIMEBASE_REG 0x30
-#define APCI1564_WDOG_CTRL_REG 0x34
-#define APCI1564_WDOG_STATUS_REG 0x38
-#define APCI1564_WDOG_IRQ_REG 0x3c
-#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x40
-#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x44
-#define APCI1564_TIMER_REG 0x48
-#define APCI1564_TIMER_RELOAD_REG 0x4c
-#define APCI1564_TIMER_TIMEBASE_REG 0x50
-#define APCI1564_TIMER_CTRL_REG 0x54
-#define APCI1564_TIMER_STATUS_REG 0x58
-#define APCI1564_TIMER_IRQ_REG 0x5c
-#define APCI1564_TIMER_WARN_TIMEVAL_REG 0x60
-#define APCI1564_TIMER_WARN_TIMEBASE_REG 0x64
-
-/*
- * dev->iobase Register Map
- */
-#define APCI1564_COUNTER_REG(x) (0x00 + ((x) * 0x20))
-#define APCI1564_COUNTER_RELOAD_REG(x) (0x04 + ((x) * 0x20))
-#define APCI1564_COUNTER_TIMEBASE_REG(x) (0x08 + ((x) * 0x20))
-#define APCI1564_COUNTER_CTRL_REG(x) (0x0c + ((x) * 0x20))
-#define APCI1564_COUNTER_STATUS_REG(x) (0x10 + ((x) * 0x20))
-#define APCI1564_COUNTER_IRQ_REG(x) (0x14 + ((x) * 0x20))
-#define APCI1564_COUNTER_WARN_TIMEVAL_REG(x) (0x18 + ((x) * 0x20))
-#define APCI1564_COUNTER_WARN_TIMEBASE_REG(x) (0x1c + ((x) * 0x20))
-
-/*
- * Configures The Timer or Counter
- *
- * data[0] Configure as: 0 = Timer, 1 = Counter
- * data[1] 1 = Enable Interrupt, 0 = Disable Interrupt
- * data[2] Time Unit
- * data[3] Reload Value
- * data[4] Timer Mode
- * data[5] Timer Counter Watchdog Number
- * data[6] Counter Direction
- */
-static int apci1564_timer_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+
+static int apci1564_timer_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0;
+ unsigned int ctrl;
devpriv->tsk_current = current;
- if (data[0] == ADDIDATA_TIMER) {
- /* First Stop The Timer */
- ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- /* Stop The Timer */
- outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
-
- devpriv->timer_select_mode = ADDIDATA_TIMER;
- if (data[1] == 1) {
- /* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
- outl(0x02, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DO_IRQ_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_WDOG_IRQ_REG);
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER1));
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER2));
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER3));
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_IRQ_REG(APCI1564_COUNTER4));
- } else {
- /* disable Timer interrupt */
- outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- }
- /* Loading Timebase */
- outl(data[2], devpriv->amcc_iobase + APCI1564_TIMER_TIMEBASE_REG);
-
- /* Loading the Reload value */
- outl(data[3], devpriv->amcc_iobase + APCI1564_TIMER_RELOAD_REG);
-
- ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
- /* mode 2 */
- outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- } else if (data[0] == ADDIDATA_COUNTER) {
- devpriv->timer_select_mode = ADDIDATA_COUNTER;
- devpriv->mode_select_register = data[5];
-
- /* First Stop The Counter */
- ul_Command1 = inl(dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- /* Stop The Timer */
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
-
- /* Set the reload value */
- outl(data[3], dev->iobase +
- APCI1564_COUNTER_RELOAD_REG(data[5] - 1));
-
- /* Set the mode : */
- /* - Disable the hardware */
- /* - Disable the counter mode */
- /* - Disable the warning */
- /* - Disable the reset */
- /* - Disable the timer mode */
- /* - Enable the counter mode */
-
- ul_Command1 =
- (ul_Command1 & 0xFFFC19E2UL) | 0x80000UL |
- (unsigned int) ((unsigned int) data[4] << 16UL);
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
-
- /* Enable or Disable Interrupt */
- ul_Command1 = (ul_Command1 & 0xFFFFF9FD) | (data[1] << 1);
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
-
- /* Set the Up/Down selection */
- ul_Command1 = (ul_Command1 & 0xFFFBF9FFUL) | (data[6] << 18);
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(data[5] - 1));
+ /* First Stop The Timer */
+ ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ ctrl &= 0xfffff9fe;
+ /* Stop The Timer */
+ outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
+
+ if (data[1] == 1) {
+ /* Enable timer int & disable all the other int sources */
+ outl(0x02, devpriv->timer + ADDI_TCW_CTRL_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
+ outl(0x0, dev->iobase + APCI1564_DO_IRQ_REG);
+ outl(0x0, dev->iobase + APCI1564_WDOG_IRQ_REG);
+ if (devpriv->counters) {
+ unsigned long iobase;
+
+ iobase = devpriv->counters + ADDI_TCW_IRQ_REG;
+ outl(0x0, iobase + APCI1564_COUNTER(0));
+ outl(0x0, iobase + APCI1564_COUNTER(1));
+ outl(0x0, iobase + APCI1564_COUNTER(2));
+ }
} else {
- dev_err(dev->class_dev, "Invalid subdevice.\n");
+ /* disable Timer interrupt */
+ outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
}
+
+ /* Loading Timebase */
+ outl(data[2], devpriv->timer + ADDI_TCW_TIMEBASE_REG);
+
+ /* Loading the Reload value */
+ outl(data[3], devpriv->timer + ADDI_TCW_RELOAD_REG);
+
+ ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ ctrl &= 0xfff719e2;
+ ctrl |= (2 << 13) | 0x10;
+ /* mode 2 */
+ outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
+
return insn->n;
}
-/*
- * Start / Stop The Selected Timer or Counter
- *
- * data[0] Configure as: 0 = Timer, 1 = Counter
- * data[1] 0 = Stop, 1 = Start, 2 = Trigger Clear (Only Counter)
- */
-static int apci1564_timer_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci1564_timer_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0;
+ unsigned int ctrl;
+
+ ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ switch (data[1]) {
+ case 0: /* Stop The Timer */
+ ctrl &= 0xfffff9fe;
+ break;
+ case 1: /* Enable the Timer */
+ ctrl &= 0xfffff9ff;
+ ctrl |= 0x1;
+ break;
+ }
+ outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
- if (devpriv->timer_select_mode == ADDIDATA_TIMER) {
- if (data[1] == 1) {
- ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
+ return insn->n;
+}
- /* Enable the Timer */
- outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- } else if (data[1] == 0) {
- /* Stop The Timer */
+static int apci1564_timer_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+
+ /* Stores the status of the Timer */
+ data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) & 0x1;
+
+ /* Stores the Actual value of the Timer */
+ data[1] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
- ul_Command1 = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
- outl(ul_Command1, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- }
- } else if (devpriv->timer_select_mode == ADDIDATA_COUNTER) {
- ul_Command1 =
- inl(dev->iobase +
- APCI1564_COUNTER_CTRL_REG(devpriv->mode_select_register - 1));
- if (data[1] == 1) {
- /* Start the Counter subdevice */
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
- } else if (data[1] == 0) {
- /* Stops the Counter subdevice */
- ul_Command1 = 0;
-
- } else if (data[1] == 2) {
- /* Clears the Counter subdevice */
- ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x400;
- }
- outl(ul_Command1, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(devpriv->mode_select_register - 1));
- } else {
- dev_err(dev->class_dev, "Invalid subdevice.\n");
- }
return insn->n;
}
-/*
- * Read The Selected Timer or Counter
- */
-static int apci1564_timer_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int apci1564_counter_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct apci1564_private *devpriv = dev->private;
- unsigned int ul_Command1 = 0;
-
- if (devpriv->timer_select_mode == ADDIDATA_TIMER) {
- /* Stores the status of the Timer */
- data[0] = inl(devpriv->amcc_iobase + APCI1564_TIMER_STATUS_REG) & 0x1;
-
- /* Stores the Actual value of the Timer */
- data[1] = inl(devpriv->amcc_iobase + APCI1564_TIMER_REG);
- } else if (devpriv->timer_select_mode == ADDIDATA_COUNTER) {
- /* Read the Counter Actual Value. */
- data[0] =
- inl(dev->iobase +
- APCI1564_COUNTER_REG(devpriv->mode_select_register - 1));
- ul_Command1 =
- inl(dev->iobase +
- APCI1564_COUNTER_STATUS_REG(devpriv->mode_select_register - 1));
-
- /* Get the software trigger status */
- data[1] = (unsigned char) ((ul_Command1 >> 1) & 1);
-
- /* Get the hardware trigger status */
- data[2] = (unsigned char) ((ul_Command1 >> 2) & 1);
-
- /* Get the software clear status */
- data[3] = (unsigned char) ((ul_Command1 >> 3) & 1);
-
- /* Get the overflow status */
- data[4] = (unsigned char) ((ul_Command1 >> 0) & 1);
- } else {
- dev_err(dev->class_dev, "Invalid subdevice.\n");
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ unsigned int ctrl;
+
+ devpriv->tsk_current = current;
+
+ /* First Stop The Counter */
+ ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+ ctrl &= 0xfffff9fe;
+ /* Stop The Timer */
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ /* Set the reload value */
+ outl(data[3], iobase + ADDI_TCW_RELOAD_REG);
+
+ /* Set the mode : */
+ /* - Disable the hardware */
+ /* - Disable the counter mode */
+ /* - Disable the warning */
+ /* - Disable the reset */
+ /* - Disable the timer mode */
+ /* - Enable the counter mode */
+
+ ctrl &= 0xfffc19e2;
+ ctrl |= 0x80000 | (data[4] << 16);
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ /* Enable or Disable Interrupt */
+ ctrl &= 0xfffff9fd;
+ ctrl |= (data[1] << 1);
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ /* Set the Up/Down selection */
+ ctrl &= 0xfffbf9ff;
+ ctrl |= (data[6] << 18);
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ unsigned int ctrl;
+
+ ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+ switch (data[1]) {
+ case 0: /* Stops the Counter subdevice */
+ ctrl = 0;
+ break;
+ case 1: /* Start the Counter subdevice */
+ ctrl &= 0xfffff9ff;
+ ctrl |= 0x1;
+ break;
+ case 2: /* Clears the Counter subdevice */
+ ctrl &= 0xfffff9ff;
+ ctrl |= 0x400;
+ break;
}
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ unsigned int status;
+
+ /* Read the Counter Actual Value. */
+ data[0] = inl(iobase + ADDI_TCW_VAL_REG);
+
+ status = inl(iobase + ADDI_TCW_STATUS_REG);
+ data[1] = (status >> 1) & 1; /* software trigger status */
+ data[2] = (status >> 2) & 1; /* hardware trigger status */
+ data[3] = (status >> 3) & 1; /* software clear status */
+ data[4] = (status >> 0) & 1; /* overflow status */
+
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
deleted file mode 100644
index 2950815b65f4..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ /dev/null
@@ -1,2050 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : APCI-3120 | Compiler : GCC |
- | Module name : hwdrv_apci3120.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description :APCI3120 Module. Hardware abstraction Layer for APCI3120|
- +-----------------------------------------------------------------------+
- | UPDATE'S |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-#include <linux/delay.h>
-
-/*
- * ADDON RELATED ADDITIONS
- */
-/* Constant */
-#define APCI3120_ENABLE_TRANSFER_ADD_ON_LOW 0x00
-#define APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH 0x1200
-#define APCI3120_A2P_FIFO_MANAGEMENT 0x04000400L
-#define APCI3120_AMWEN_ENABLE 0x02
-#define APCI3120_A2P_FIFO_WRITE_ENABLE 0x01
-#define APCI3120_FIFO_ADVANCE_ON_BYTE_2 0x20000000L
-#define APCI3120_ENABLE_WRITE_TC_INT 0x00004000L
-#define APCI3120_CLEAR_WRITE_TC_INT 0x00040000L
-#define APCI3120_DISABLE_AMWEN_AND_A2P_FIFO_WRITE 0x0
-#define APCI3120_DISABLE_BUS_MASTER_ADD_ON 0x0
-#define APCI3120_DISABLE_BUS_MASTER_PCI 0x0
-
-/* ADD_ON ::: this needed since apci supports 16 bit interface to add on */
-#define APCI3120_ADD_ON_AGCSTS_LOW 0x3C
-#define APCI3120_ADD_ON_AGCSTS_HIGH (APCI3120_ADD_ON_AGCSTS_LOW + 2)
-#define APCI3120_ADD_ON_MWAR_LOW 0x24
-#define APCI3120_ADD_ON_MWAR_HIGH (APCI3120_ADD_ON_MWAR_LOW + 2)
-#define APCI3120_ADD_ON_MWTC_LOW 0x058
-#define APCI3120_ADD_ON_MWTC_HIGH (APCI3120_ADD_ON_MWTC_LOW + 2)
-
-/* AMCC */
-#define APCI3120_AMCC_OP_MCSR 0x3C
-#define APCI3120_AMCC_OP_REG_INTCSR 0x38
-
-/* for transfer count enable bit */
-#define AGCSTS_TC_ENABLE 0x10000000
-
-/* used for test on mixture of BIP/UNI ranges */
-#define APCI3120_BIPOLAR_RANGES 4
-
-#define APCI3120_ADDRESS_RANGE 16
-
-#define APCI3120_DISABLE 0
-#define APCI3120_ENABLE 1
-
-#define APCI3120_START 1
-#define APCI3120_STOP 0
-
-#define APCI3120_EOC_MODE 1
-#define APCI3120_EOS_MODE 2
-#define APCI3120_DMA_MODE 3
-
-/* DIGITAL INPUT-OUTPUT DEFINE */
-
-#define APCI3120_DIGITAL_OUTPUT 0x0d
-#define APCI3120_RD_STATUS 0x02
-#define APCI3120_RD_FIFO 0x00
-
-/* digital output insn_write ON /OFF selection */
-#define APCI3120_SET4DIGITALOUTPUTON 1
-#define APCI3120_SET4DIGITALOUTPUTOFF 0
-
-/* analog output SELECT BIT */
-#define APCI3120_ANALOG_OP_CHANNEL_1 0x0000
-#define APCI3120_ANALOG_OP_CHANNEL_2 0x4000
-#define APCI3120_ANALOG_OP_CHANNEL_3 0x8000
-#define APCI3120_ANALOG_OP_CHANNEL_4 0xc000
-#define APCI3120_ANALOG_OP_CHANNEL_5 0x0000
-#define APCI3120_ANALOG_OP_CHANNEL_6 0x4000
-#define APCI3120_ANALOG_OP_CHANNEL_7 0x8000
-#define APCI3120_ANALOG_OP_CHANNEL_8 0xc000
-
-/* Enable external trigger bit in nWrAddress */
-#define APCI3120_ENABLE_EXT_TRIGGER 0x8000
-
-/* ANALOG OUTPUT AND INPUT DEFINE */
-#define APCI3120_UNIPOLAR 0x80
-#define APCI3120_BIPOLAR 0x00
-#define APCI3120_ANALOG_OUTPUT_1 0x08
-#define APCI3120_ANALOG_OUTPUT_2 0x0a
-#define APCI3120_1_GAIN 0x00
-#define APCI3120_2_GAIN 0x10
-#define APCI3120_5_GAIN 0x20
-#define APCI3120_10_GAIN 0x30
-#define APCI3120_SEQ_RAM_ADDRESS 0x06
-#define APCI3120_RESET_FIFO 0x0c
-#define APCI3120_TIMER_0_MODE_2 0x01
-#define APCI3120_TIMER_0_MODE_4 0x2
-#define APCI3120_SELECT_TIMER_0_WORD 0x00
-#define APCI3120_ENABLE_TIMER0 0x1000
-#define APCI3120_CLEAR_PR 0xf0ff
-#define APCI3120_CLEAR_PA 0xfff0
-#define APCI3120_CLEAR_PA_PR (APCI3120_CLEAR_PR & APCI3120_CLEAR_PA)
-
-/* nWrMode_Select */
-#define APCI3120_ENABLE_SCAN 0x8
-#define APCI3120_DISABLE_SCAN (~APCI3120_ENABLE_SCAN)
-#define APCI3120_ENABLE_EOS_INT 0x2
-
-#define APCI3120_DISABLE_EOS_INT (~APCI3120_ENABLE_EOS_INT)
-#define APCI3120_ENABLE_EOC_INT 0x1
-#define APCI3120_DISABLE_EOC_INT (~APCI3120_ENABLE_EOC_INT)
-#define APCI3120_DISABLE_ALL_INTERRUPT_WITHOUT_TIMER \
- (APCI3120_DISABLE_EOS_INT & APCI3120_DISABLE_EOC_INT)
-#define APCI3120_DISABLE_ALL_INTERRUPT \
- (APCI3120_DISABLE_TIMER_INT & APCI3120_DISABLE_EOS_INT & APCI3120_DISABLE_EOC_INT)
-
-/* status register bits */
-#define APCI3120_EOC 0x8000
-#define APCI3120_EOS 0x2000
-
-/* software trigger dummy register */
-#define APCI3120_START_CONVERSION 0x02
-
-/* TIMER DEFINE */
-#define APCI3120_QUARTZ_A 70
-#define APCI3120_QUARTZ_B 50
-#define APCI3120_TIMER 1
-#define APCI3120_WATCHDOG 2
-#define APCI3120_TIMER_DISABLE 0
-#define APCI3120_TIMER_ENABLE 1
-#define APCI3120_ENABLE_TIMER2 0x4000
-#define APCI3120_DISABLE_TIMER2 (~APCI3120_ENABLE_TIMER2)
-#define APCI3120_ENABLE_TIMER_INT 0x04
-#define APCI3120_DISABLE_TIMER_INT (~APCI3120_ENABLE_TIMER_INT)
-#define APCI3120_WRITE_MODE_SELECT 0x0e
-#define APCI3120_SELECT_TIMER_0_WORD 0x00
-#define APCI3120_SELECT_TIMER_1_WORD 0x01
-#define APCI3120_TIMER_1_MODE_2 0x4
-
-/* $$ BIT FOR MODE IN nCsTimerCtr1 */
-#define APCI3120_TIMER_2_MODE_0 0x0
-#define APCI3120_TIMER_2_MODE_2 0x10
-#define APCI3120_TIMER_2_MODE_5 0x30
-
-/* $$ BIT FOR MODE IN nCsTimerCtr0 */
-#define APCI3120_SELECT_TIMER_2_LOW_WORD 0x02
-#define APCI3120_SELECT_TIMER_2_HIGH_WORD 0x03
-
-#define APCI3120_TIMER_CRT0 0x0d
-#define APCI3120_TIMER_CRT1 0x0c
-
-#define APCI3120_TIMER_VALUE 0x04
-#define APCI3120_TIMER_STATUS_REGISTER 0x0d
-#define APCI3120_RD_STATUS 0x02
-#define APCI3120_WR_ADDRESS 0x00
-#define APCI3120_ENABLE_WATCHDOG 0x20
-#define APCI3120_DISABLE_WATCHDOG (~APCI3120_ENABLE_WATCHDOG)
-#define APCI3120_ENABLE_TIMER_COUNTER 0x10
-#define APCI3120_DISABLE_TIMER_COUNTER (~APCI3120_ENABLE_TIMER_COUNTER)
-#define APCI3120_FC_TIMER 0x1000
-#define APCI3120_ENABLE_TIMER0 0x1000
-#define APCI3120_ENABLE_TIMER1 0x2000
-#define APCI3120_ENABLE_TIMER2 0x4000
-#define APCI3120_DISABLE_TIMER0 (~APCI3120_ENABLE_TIMER0)
-#define APCI3120_DISABLE_TIMER1 (~APCI3120_ENABLE_TIMER1)
-#define APCI3120_DISABLE_TIMER2 (~APCI3120_ENABLE_TIMER2)
-
-#define APCI3120_TIMER2_SELECT_EOS 0xc0
-#define APCI3120_COUNTER 3
-#define APCI3120_DISABLE_ALL_TIMER (APCI3120_DISABLE_TIMER0 & \
- APCI3120_DISABLE_TIMER1 & \
- APCI3120_DISABLE_TIMER2)
-
-#define MAX_ANALOGINPUT_CHANNELS 32
-
-struct str_AnalogReadInformation {
- /* EOC or EOS */
- unsigned char b_Type;
- /* Interrupt use or not */
- unsigned char b_InterruptFlag;
- /* Selection of the conversion time */
- unsigned int ui_ConvertTiming;
- /* Number of channel to read */
- unsigned char b_NbrOfChannel;
- /* Number of the channel to be read */
- unsigned int ui_ChannelList[MAX_ANALOGINPUT_CHANNELS];
- /* Gain of each channel */
- unsigned int ui_RangeList[MAX_ANALOGINPUT_CHANNELS];
-};
-
-/* ANALOG INPUT RANGE */
-static const struct comedi_lrange range_apci3120_ai = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-/* ANALOG OUTPUT RANGE */
-static const struct comedi_lrange range_apci3120_ao = {
- 2, {
- BIP_RANGE(10),
- UNI_RANGE(10)
- }
-};
-
-
-/* FUNCTION DEFINITIONS */
-static int apci3120_ai_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned int i;
-
- if ((data[0] != APCI3120_EOC_MODE) && (data[0] != APCI3120_EOS_MODE))
- return -1;
-
- /* Check for Conversion time to be added */
- devpriv->ui_EocEosConversionTime = data[2];
-
- if (data[0] == APCI3120_EOS_MODE) {
-
- /* Test the number of the channel */
- for (i = 0; i < data[3]; i++) {
-
- if (CR_CHAN(data[4 + i]) >=
- this_board->i_NbrAiChannel) {
- dev_err(dev->class_dev, "bad channel list\n");
- return -2;
- }
- }
-
- devpriv->b_InterruptMode = APCI3120_EOS_MODE;
-
- if (data[1])
- devpriv->b_EocEosInterrupt = APCI3120_ENABLE;
- else
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- /* Copy channel list and Range List to devpriv */
- devpriv->ui_AiNbrofChannels = data[3];
- for (i = 0; i < devpriv->ui_AiNbrofChannels; i++)
- devpriv->ui_AiChannelList[i] = data[4 + i];
-
- } else { /* EOC */
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
- if (data[1])
- devpriv->b_EocEosInterrupt = APCI3120_ENABLE;
- else
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- }
-
- return insn->n;
-}
-
-/*
- * This function will first check channel list is ok or not and then
- * initialize the sequence RAM with the polarity, Gain,Channel number.
- * If the last argument of function "check"is 1 then it only checks
- * the channel list is ok or not.
- */
-static int apci3120_setup_chan_list(struct comedi_device *dev,
- struct comedi_subdevice *s,
- int n_chan,
- unsigned int *chanlist,
- char check)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int i;
- unsigned int gain;
- unsigned short us_TmpValue;
-
- /* correct channel and range number check itself comedi/range.c */
- if (n_chan < 1) {
- if (!check)
- dev_err(dev->class_dev,
- "range/channel list is empty!\n");
- return 0;
- }
- /* All is ok, so we can setup channel/range list */
- if (check)
- return 1;
-
- /* Code to set the PA and PR...Here it set PA to 0 */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister & APCI3120_CLEAR_PA_PR;
- devpriv->us_OutputRegister = ((n_chan - 1) & 0xf) << 8;
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
-
- for (i = 0; i < n_chan; i++) {
- /* store range list to card */
- us_TmpValue = CR_CHAN(chanlist[i]); /* get channel number */
-
- if (CR_RANGE(chanlist[i]) < APCI3120_BIPOLAR_RANGES)
- us_TmpValue &= ((~APCI3120_UNIPOLAR) & 0xff); /* set bipolar */
- else
- us_TmpValue |= APCI3120_UNIPOLAR; /* enable unipolar */
-
- gain = CR_RANGE(chanlist[i]); /* get gain number */
- us_TmpValue |= ((gain & 0x03) << 4); /* <<4 for G0 and G1 bit in RAM */
- us_TmpValue |= i << 8; /* To select the RAM LOCATION */
- outw(us_TmpValue, dev->iobase + APCI3120_SEQ_RAM_ADDRESS);
- }
- return 1; /* we can serve this with scan logic */
-}
-
-/*
- * Reads analog input in synchronous mode EOC and EOS is selected
- * as per configured if no conversion time is set uses default
- * conversion time 10 microsec.
- */
-static int apci3120_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned short us_ConvertTiming, us_TmpValue, i;
- unsigned char b_Tmp;
-
- /* fix conversion time to 10 us */
- if (!devpriv->ui_EocEosConversionTime)
- us_ConvertTiming = 10;
- else
- us_ConvertTiming = (unsigned short) (devpriv->ui_EocEosConversionTime / 1000); /* nano to useconds */
-
- /* Clear software registers */
- devpriv->b_TimerSelectMode = 0;
- devpriv->b_ModeSelectRegister = 0;
- devpriv->us_OutputRegister = 0;
-
- if (insn->unused[0] == 222) { /* second insn read */
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ui_AiReadData[i];
- } else {
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- /*
- * Testing if board have the new Quartz and calculate the time value
- * to set in the timer
- */
- us_TmpValue =
- (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS);
-
- /* EL250804: Testing if board APCI3120 have the new Quartz or if it is an APCI3001 */
- if ((us_TmpValue & 0x00B0) == 0x00B0
- || !strcmp(this_board->pc_DriverName, "apci3001")) {
- us_ConvertTiming = (us_ConvertTiming * 2) - 2;
- } else {
- us_ConvertTiming =
- ((us_ConvertTiming * 12926) / 10000) - 1;
- }
-
- us_TmpValue = (unsigned short) devpriv->b_InterruptMode;
-
- switch (us_TmpValue) {
-
- case APCI3120_EOC_MODE:
-
- /*
- * Testing the interrupt flag and set the EOC bit Clears the FIFO
- */
- inw(devpriv->iobase + APCI3120_RESET_FIFO);
-
- /* Initialize the sequence array */
- if (!apci3120_setup_chan_list(dev, s, 1,
- &insn->chanspec, 0))
- return -EINVAL;
-
- /* Initialize Timer 0 mode 4 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xFC) |
- APCI3120_TIMER_0_MODE_4;
- outb(devpriv->b_TimerSelectMode,
- devpriv->iobase + APCI3120_TIMER_CRT1);
-
- /* Reset the scan bit and Disables the EOS, DMA, EOC interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_SCAN;
-
- if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) {
-
- /* Disables the EOS,DMA and enables the EOC interrupt */
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_EOS_INT) |
- APCI3120_ENABLE_EOC_INT;
- inw(devpriv->iobase);
-
- } else {
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_ALL_INTERRUPT_WITHOUT_TIMER;
- }
-
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* Sets gate 0 */
- devpriv->us_OutputRegister =
- (devpriv->
- us_OutputRegister & APCI3120_CLEAR_PA_PR) |
- APCI3120_ENABLE_TIMER0;
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
-
- /* Select Timer 0 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_0_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- /* Set the conversion time */
- outw(us_ConvertTiming,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- us_TmpValue =
- (unsigned short) inw(dev->iobase + APCI3120_RD_STATUS);
-
- if (devpriv->b_EocEosInterrupt == APCI3120_DISABLE) {
-
- do {
- /* Waiting for the end of conversion */
- us_TmpValue =
- inw(devpriv->iobase +
- APCI3120_RD_STATUS);
- } while ((us_TmpValue & APCI3120_EOC) ==
- APCI3120_EOC);
-
- /* Read the result in FIFO and put it in insn data pointer */
- us_TmpValue = inw(devpriv->iobase + 0);
- *data = us_TmpValue;
-
- inw(devpriv->iobase + APCI3120_RESET_FIFO);
- }
-
- break;
-
- case APCI3120_EOS_MODE:
-
- inw(devpriv->iobase);
- /* Clears the FIFO */
- inw(devpriv->iobase + APCI3120_RESET_FIFO);
- /* clear PA PR and disable timer 0 */
-
- devpriv->us_OutputRegister =
- (devpriv->
- us_OutputRegister & APCI3120_CLEAR_PA_PR) |
- APCI3120_DISABLE_TIMER0;
-
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
-
- if (!apci3120_setup_chan_list(dev, s,
- devpriv->ui_AiNbrofChannels,
- devpriv->ui_AiChannelList, 0))
- return -EINVAL;
-
- /* Initialize Timer 0 mode 2 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xFC) |
- APCI3120_TIMER_0_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- devpriv->iobase + APCI3120_TIMER_CRT1);
-
- /* Select Timer 0 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_0_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- /* Set the conversion time */
- outw(us_ConvertTiming,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Set the scan bit */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister | APCI3120_ENABLE_SCAN;
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* If Interrupt function is loaded */
- if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) {
- /* Disables the EOC,DMA and enables the EOS interrupt */
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_EOC_INT) |
- APCI3120_ENABLE_EOS_INT;
- inw(devpriv->iobase);
-
- } else
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_ALL_INTERRUPT_WITHOUT_TIMER;
-
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- inw(devpriv->iobase + APCI3120_RD_STATUS);
-
- /* Sets gate 0 */
- devpriv->us_OutputRegister =
- devpriv->
- us_OutputRegister | APCI3120_ENABLE_TIMER0;
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
-
- /* Start conversion */
- outw(0, devpriv->iobase + APCI3120_START_CONVERSION);
-
- /* Waiting of end of conversion if interrupt is not installed */
- if (devpriv->b_EocEosInterrupt == APCI3120_DISABLE) {
- /* Waiting the end of conversion */
- do {
- us_TmpValue =
- inw(devpriv->iobase +
- APCI3120_RD_STATUS);
- } while ((us_TmpValue & APCI3120_EOS) !=
- APCI3120_EOS);
-
- for (i = 0; i < devpriv->ui_AiNbrofChannels;
- i++) {
- /* Read the result in FIFO and write them in shared memory */
- us_TmpValue = inw(devpriv->iobase);
- data[i] = (unsigned int) us_TmpValue;
- }
-
- devpriv->b_InterruptMode = APCI3120_EOC_MODE; /* Restore defaults */
- }
- break;
-
- default:
- dev_err(dev->class_dev, "inputs wrong\n");
-
- }
- devpriv->ui_EocEosConversionTime = 0; /* re initializing the variable */
- }
-
- return insn->n;
-
-}
-
-static int apci3120_reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int i;
- unsigned short us_TmpValue;
-
- devpriv->ai_running = 0;
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
- devpriv->ui_EocEosConversionTime = 0; /* set eoc eos conv time to 0 */
-
- /* variables used in timer subdevice */
- devpriv->b_Timer2Mode = 0;
- devpriv->b_Timer2Interrupt = 0;
- devpriv->b_ExttrigEnable = 0; /* Disable ext trigger */
-
- /* Disable all interrupts, watchdog for the anolog output */
- devpriv->b_ModeSelectRegister = 0;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* Disables all counters, ext trigger and clears PA, PR */
- devpriv->us_OutputRegister = 0;
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
-
- /*
- * Code to set the all anolog o/p channel to 0v 8191 is decimal
- * value for zero(0 v)volt in bipolar mode(default)
- */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_1, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 1 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_2, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 2 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_3, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 3 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_4, dev->iobase + APCI3120_ANALOG_OUTPUT_1); /* channel 4 */
-
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_5, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 5 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_6, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 6 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_7, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 7 */
- outw(8191 | APCI3120_ANALOG_OP_CHANNEL_8, dev->iobase + APCI3120_ANALOG_OUTPUT_2); /* channel 8 */
-
- udelay(10);
-
- inw(dev->iobase + 0); /* make a dummy read */
- inb(dev->iobase + APCI3120_RESET_FIFO); /* flush FIFO */
- inw(dev->iobase + APCI3120_RD_STATUS); /* flush A/D status register */
-
- /* code to reset the RAM sequence */
- for (i = 0; i < 16; i++) {
- us_TmpValue = i << 8; /* select the location */
- outw(us_TmpValue, dev->iobase + APCI3120_SEQ_RAM_ADDRESS);
- }
- return 0;
-}
-
-static int apci3120_exttrig_enable(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
-
- devpriv->us_OutputRegister |= APCI3120_ENABLE_EXT_TRIGGER;
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
- return 0;
-}
-
-static int apci3120_exttrig_disable(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
-
- devpriv->us_OutputRegister &= ~APCI3120_ENABLE_EXT_TRIGGER;
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
- return 0;
-}
-
-static int apci3120_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct addi_private *devpriv = dev->private;
-
- /* Disable A2P Fifo write and AMWEN signal */
- outw(0, devpriv->i_IobaseAddon + 4);
-
- /* Disable Bus Master ADD ON */
- outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0);
- outw(0, devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0);
- outw(0, devpriv->i_IobaseAddon + 2);
-
- /* Disable BUS Master PCI */
- outl(0, devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR);
-
- /* Disable ext trigger */
- apci3120_exttrig_disable(dev);
-
- devpriv->us_OutputRegister = 0;
- /* stop counters */
- outw(devpriv->
- us_OutputRegister & APCI3120_DISABLE_TIMER0 &
- APCI3120_DISABLE_TIMER1, dev->iobase + APCI3120_WR_ADDRESS);
-
- outw(APCI3120_DISABLE_ALL_TIMER, dev->iobase + APCI3120_WR_ADDRESS);
-
- /* DISABLE_ALL_INTERRUPT */
- outb(APCI3120_DISABLE_ALL_INTERRUPT,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
- /* Flush FIFO */
- inb(dev->iobase + APCI3120_RESET_FIFO);
- inw(dev->iobase + APCI3120_RD_STATUS);
- devpriv->ui_AiActualScan = 0;
- s->async->cur_chan = 0;
- devpriv->ui_DmaActualBuffer = 0;
-
- devpriv->ai_running = 0;
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- apci3120_reset(dev);
- return 0;
-}
-
-static int apci3120_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
- int err = 0;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW);
- err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (err)
- return 1;
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(cmd->start_src);
- err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
- err |= cfc_check_trigger_is_unique(cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err)
- return 2;
-
- /* Step 3: check if arguments are trivially valid */
-
- err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
-
- if (cmd->scan_begin_src == TRIG_TIMER) /* Test Delay timing */
- err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, 100000);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- if (cmd->convert_arg)
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
- 10000);
- } else {
- err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
- }
-
- err |= cfc_check_trigger_arg_min(&cmd->chanlist_len, 1);
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
- else /* TRIG_NONE */
- err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
-
- if (err)
- return 3;
-
- /* step 4: fix up any arguments */
-
- if (cmd->scan_begin_src == TRIG_TIMER &&
- cmd->scan_begin_arg < cmd->convert_arg * cmd->scan_end_arg) {
- cmd->scan_begin_arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= -EINVAL;
- }
-
- if (err)
- return 4;
-
- return 0;
-}
-
-/*
- * This is used for analog input cyclic acquisition.
- * Performs the command operations.
- * If DMA is configured does DMA initialization otherwise does the
- * acquisition with EOS interrupt.
- */
-static int apci3120_cyclic_ai(int mode,
- struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned char b_Tmp;
- unsigned int ui_Tmp, ui_DelayTiming = 0, ui_TimerValue1 = 0, dmalen0 =
- 0, dmalen1 = 0, ui_TimerValue2 =
- 0, ui_TimerValue0, ui_ConvertTiming;
- unsigned short us_TmpValue;
-
- /* Resets the FIFO */
- inb(dev->iobase + APCI3120_RESET_FIFO);
-
- devpriv->ai_running = 1;
-
- /* clear software registers */
- devpriv->b_TimerSelectMode = 0;
- devpriv->us_OutputRegister = 0;
- devpriv->b_ModeSelectRegister = 0;
-
- /* Clear Timer Write TC int */
- outl(APCI3120_CLEAR_WRITE_TC_INT,
- devpriv->i_IobaseAmcc + APCI3120_AMCC_OP_REG_INTCSR);
-
- /* Disables All Timer */
- /* Sets PR and PA to 0 */
- devpriv->us_OutputRegister = devpriv->us_OutputRegister &
- APCI3120_DISABLE_TIMER0 &
- APCI3120_DISABLE_TIMER1 & APCI3120_CLEAR_PA_PR;
-
- outw(devpriv->us_OutputRegister, dev->iobase + APCI3120_WR_ADDRESS);
-
- /* Resets the FIFO */
- /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */
- inb(devpriv->iobase + APCI3120_RESET_FIFO);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- devpriv->ui_AiActualScan = 0;
- s->async->cur_chan = 0;
- devpriv->ui_DmaActualBuffer = 0;
-
- /* value for timer2 minus -2 has to be done */
- ui_TimerValue2 = cmd->stop_arg - 2;
- ui_ConvertTiming = cmd->convert_arg;
-
- if (mode == 2)
- ui_DelayTiming = cmd->scan_begin_arg;
-
- /* Initializes the sequence array */
- if (!apci3120_setup_chan_list(dev, s, devpriv->ui_AiNbrofChannels,
- cmd->chanlist, 0))
- return -EINVAL;
-
- us_TmpValue = (unsigned short) inw(dev->iobase + APCI3120_RD_STATUS);
-
- /* EL241003 Begin: add this section to replace floats calculation by integer calculations */
- /* EL250804: Testing if board APCI3120 have the new Quartz or if it is an APCI3001 */
- if ((us_TmpValue & 0x00B0) == 0x00B0
- || !strcmp(this_board->pc_DriverName, "apci3001")) {
- ui_TimerValue0 = ui_ConvertTiming * 2 - 2000;
- ui_TimerValue0 = ui_TimerValue0 / 1000;
-
- if (mode == 2) {
- ui_DelayTiming = ui_DelayTiming / 1000;
- ui_TimerValue1 = ui_DelayTiming * 2 - 200;
- ui_TimerValue1 = ui_TimerValue1 / 100;
- }
- } else {
- ui_ConvertTiming = ui_ConvertTiming / 1000;
- ui_TimerValue0 = ui_ConvertTiming * 12926 - 10000;
- ui_TimerValue0 = ui_TimerValue0 / 10000;
-
- if (mode == 2) {
- ui_DelayTiming = ui_DelayTiming / 1000;
- ui_TimerValue1 = ui_DelayTiming * 12926 - 1;
- ui_TimerValue1 = ui_TimerValue1 / 1000000;
- }
- }
- /* EL241003 End */
-
- if (devpriv->b_ExttrigEnable == APCI3120_ENABLE)
- apci3120_exttrig_enable(dev); /* activate EXT trigger */
- switch (mode) {
- case 1:
- /* init timer0 in mode 2 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xFC) | APCI3120_TIMER_0_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- dev->iobase + APCI3120_TIMER_CRT1);
-
- /* Select Timer 0 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_0_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
- /* Set the conversion time */
- outw(((unsigned short) ui_TimerValue0),
- dev->iobase + APCI3120_TIMER_VALUE);
- break;
-
- case 2:
- /* init timer1 in mode 2 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xF3) | APCI3120_TIMER_1_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- dev->iobase + APCI3120_TIMER_CRT1);
-
- /* Select Timer 1 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_1_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
- /* Set the conversion time */
- outw(((unsigned short) ui_TimerValue1),
- dev->iobase + APCI3120_TIMER_VALUE);
-
- /* init timer0 in mode 2 */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0xFC) | APCI3120_TIMER_0_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- dev->iobase + APCI3120_TIMER_CRT1);
-
- /* Select Timer 0 */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_0_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
-
- /* Set the conversion time */
- outw(((unsigned short) ui_TimerValue0),
- dev->iobase + APCI3120_TIMER_VALUE);
- break;
-
- }
- /* common for all modes */
- /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */
- devpriv->b_ModeSelectRegister = devpriv->b_ModeSelectRegister &
- APCI3120_DISABLE_SCAN;
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* If DMA is disabled */
- if (devpriv->us_UseDma == APCI3120_DISABLE) {
- /* disable EOC and enable EOS */
- devpriv->b_InterruptMode = APCI3120_EOS_MODE;
- devpriv->b_EocEosInterrupt = APCI3120_ENABLE;
-
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT) |
- APCI3120_ENABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- if (cmd->stop_src == TRIG_COUNT) {
- /*
- * configure Timer2 For counting EOS Reset gate 2 of Timer 2 to
- * disable it (Set Bit D14 to 0)
- */
- devpriv->us_OutputRegister =
- devpriv->
- us_OutputRegister & APCI3120_DISABLE_TIMER2;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
-
- /* DISABLE TIMER intERRUPT */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_TIMER_INT & 0xEF;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* (1) Init timer 2 in mode 0 and write timer value */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0x0F) |
- APCI3120_TIMER_2_MODE_0;
- outb(devpriv->b_TimerSelectMode,
- dev->iobase + APCI3120_TIMER_CRT1);
-
- /* Writing LOW unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
- outw(ui_TimerValue2 & 0xffff,
- dev->iobase + APCI3120_TIMER_VALUE);
-
- /* Writing HIGH unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, dev->iobase + APCI3120_TIMER_CRT0);
- outw((ui_TimerValue2 >> 16) & 0xffff,
- dev->iobase + APCI3120_TIMER_VALUE);
-
- /* (2) Reset FC_TIMER BIT Clearing timer status register */
- inb(dev->iobase + APCI3120_TIMER_STATUS_REGISTER);
- /* enable timer counter and disable watch dog */
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister |
- APCI3120_ENABLE_TIMER_COUNTER) &
- APCI3120_DISABLE_WATCHDOG;
- /* select EOS clock input for timer 2 */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister |
- APCI3120_TIMER2_SELECT_EOS;
- /* Enable timer2 interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister |
- APCI3120_ENABLE_TIMER_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
- devpriv->b_Timer2Mode = APCI3120_COUNTER;
- devpriv->b_Timer2Interrupt = APCI3120_ENABLE;
- }
- } else {
- /* If DMA Enabled */
- unsigned int scan_bytes = cmd->scan_end_arg * sizeof(short);
-
- devpriv->b_InterruptMode = APCI3120_DMA_MODE;
-
- /* Disables the EOC, EOS interrupt */
- devpriv->b_ModeSelectRegister = devpriv->b_ModeSelectRegister &
- APCI3120_DISABLE_EOC_INT & APCI3120_DISABLE_EOS_INT;
-
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- dmalen0 = devpriv->ui_DmaBufferSize[0];
- dmalen1 = devpriv->ui_DmaBufferSize[1];
-
- if (cmd->stop_src == TRIG_COUNT) {
- /*
- * Must we fill full first buffer? And must we fill
- * full second buffer when first is once filled?
- */
- if (dmalen0 > (cmd->stop_arg * scan_bytes)) {
- dmalen0 = cmd->stop_arg * scan_bytes;
- } else if (dmalen1 > (cmd->stop_arg * scan_bytes -
- dmalen0))
- dmalen1 = cmd->stop_arg * scan_bytes -
- dmalen0;
- }
-
- if (cmd->flags & CMDF_WAKE_EOS) {
- /* don't we want wake up every scan? */
- if (dmalen0 > scan_bytes) {
- dmalen0 = scan_bytes;
- if (cmd->scan_end_arg & 1)
- dmalen0 += 2;
- }
- if (dmalen1 > scan_bytes) {
- dmalen1 = scan_bytes;
- if (cmd->scan_end_arg & 1)
- dmalen1 -= 2;
- if (dmalen1 < 4)
- dmalen1 = 4;
- }
- } else { /* isn't output buff smaller that our DMA buff? */
- if (dmalen0 > s->async->prealloc_bufsz)
- dmalen0 = s->async->prealloc_bufsz;
- if (dmalen1 > s->async->prealloc_bufsz)
- dmalen1 = s->async->prealloc_bufsz;
- }
- devpriv->ui_DmaBufferUsesize[0] = dmalen0;
- devpriv->ui_DmaBufferUsesize[1] = dmalen1;
-
- /* Initialize DMA */
-
- /*
- * Set Transfer count enable bit and A2P_fifo reset bit in AGCSTS
- * register 1
- */
- ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO;
- outl(ui_Tmp, devpriv->i_IobaseAmcc + AMCC_OP_REG_AGCSTS);
-
- /* changed since 16 bit interface for add on */
- /* ENABLE BUS MASTER */
- outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW,
- devpriv->i_IobaseAddon + 2);
-
- outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH,
- devpriv->i_IobaseAddon + 2);
-
- /*
- * TO VERIFIED BEGIN JK 07.05.04: Comparison between WIN32 and Linux
- * driver
- */
- outw(0x1000, devpriv->i_IobaseAddon + 2);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- /* 2 No change */
- /* A2P FIFO MANAGEMENT */
- /* A2P fifo reset & transfer control enable */
- outl(APCI3120_A2P_FIFO_MANAGEMENT, devpriv->i_IobaseAmcc +
- APCI3120_AMCC_OP_MCSR);
-
- /*
- * 3
- * beginning address of dma buf The 32 bit address of dma buffer
- * is converted into two 16 bit addresses Can done by using _attach
- * and put into into an array array used may be for differnet pages
- */
-
- /* DMA Start Address Low */
- outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0);
- outw((devpriv->ul_DmaBufferHw[0] & 0xFFFF),
- devpriv->i_IobaseAddon + 2);
-
- /* DMA Start Address High */
- outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0);
- outw((devpriv->ul_DmaBufferHw[0] / 65536),
- devpriv->i_IobaseAddon + 2);
-
- /*
- * 4
- * amount of bytes to be transferred set transfer count used ADDON
- * MWTC register commented testing
- */
-
- /* Nbr of acquisition LOW */
- outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0);
- outw((devpriv->ui_DmaBufferUsesize[0] & 0xFFFF),
- devpriv->i_IobaseAddon + 2);
-
- /* Nbr of acquisition HIGH */
- outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0);
- outw((devpriv->ui_DmaBufferUsesize[0] / 65536),
- devpriv->i_IobaseAddon + 2);
-
- /*
- * 5
- * To configure A2P FIFO testing outl(
- * FIFO_ADVANCE_ON_BYTE_2,devpriv->i_IobaseAmcc+AMCC_OP_REG_INTCSR);
- */
-
- /* A2P FIFO RESET */
- /*
- * TO VERIFY BEGIN JK 07.05.04: Comparison between WIN32 and Linux
- * driver
- */
- outl(0x04000000UL, devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- /*
- * 6
- * ENABLE A2P FIFO WRITE AND ENABLE AMWEN AMWEN_ENABLE |
- * A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03
- */
-
- /*
- * 7
- * initialise end of dma interrupt AINT_WRITE_COMPL =
- * ENABLE_WRITE_TC_INT(ADDI)
- */
- /* A2P FIFO CONFIGURATE, END OF DMA intERRUPT INIT */
- outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 |
- APCI3120_ENABLE_WRITE_TC_INT),
- devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR);
-
- /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */
- /* ENABLE A2P FIFO WRITE AND ENABLE AMWEN */
- outw(3, devpriv->i_IobaseAddon + 4);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
-
- /* A2P FIFO RESET */
- /* BEGIN JK 07.05.04: Comparison between WIN32 and Linux driver */
- outl(0x04000000UL,
- devpriv->i_IobaseAmcc + APCI3120_AMCC_OP_MCSR);
- /* END JK 07.05.04: Comparison between WIN32 and Linux driver */
- }
-
- if (devpriv->us_UseDma == APCI3120_DISABLE &&
- cmd->stop_src == TRIG_COUNT) {
- /* set gate 2 to start conversion */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER2;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
- }
-
- switch (mode) {
- case 1:
- /* set gate 0 to start conversion */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER0;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
- break;
- case 2:
- /* set gate 0 and gate 1 */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER1;
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister | APCI3120_ENABLE_TIMER0;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
- break;
-
- }
-
- return 0;
-
-}
-
-/*
- * Does asynchronous acquisition.
- * Determines the mode 1 or 2.
- */
-static int apci3120_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- /* loading private structure with cmd structure inputs */
- devpriv->ui_AiNbrofChannels = cmd->chanlist_len;
-
- if (cmd->start_src == TRIG_EXT)
- devpriv->b_ExttrigEnable = APCI3120_ENABLE;
- else
- devpriv->b_ExttrigEnable = APCI3120_DISABLE;
-
- if (cmd->scan_begin_src == TRIG_FOLLOW)
- return apci3120_cyclic_ai(1, dev, s);
- /* TRIG_TIMER */
- return apci3120_cyclic_ai(2, dev, s);
-}
-
-/*
- * This function copies the data from DMA buffer to the Comedi buffer.
- */
-static void v_APCI3120_InterruptDmaMoveBlock16bit(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *dma_buffer,
- unsigned int num_samples)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
-
- devpriv->ui_AiActualScan +=
- (s->async->cur_chan + num_samples) / cmd->scan_end_arg;
- s->async->cur_chan += num_samples;
- s->async->cur_chan %= cmd->scan_end_arg;
-
- cfc_write_array_to_buffer(s, dma_buffer, num_samples * sizeof(short));
-}
-
-/*
- * This is a handler for the DMA interrupt.
- * This function copies the data to Comedi Buffer.
- * For continuous DMA it reinitializes the DMA operation.
- * For single mode DMA it stop the acquisition.
- */
-static void apci3120_interrupt_dma(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int next_dma_buf, samplesinbuf;
- unsigned long low_word, high_word, var;
- unsigned int ui_Tmp;
-
- samplesinbuf =
- devpriv->ui_DmaBufferUsesize[devpriv->ui_DmaActualBuffer] -
- inl(devpriv->i_IobaseAmcc + AMCC_OP_REG_MWTC);
-
- if (samplesinbuf <
- devpriv->ui_DmaBufferUsesize[devpriv->ui_DmaActualBuffer]) {
- dev_err(dev->class_dev, "Interrupted DMA transfer!\n");
- }
- if (samplesinbuf & 1) {
- dev_err(dev->class_dev, "Odd count of bytes in DMA ring!\n");
- apci3120_cancel(dev, s);
- return;
- }
- samplesinbuf = samplesinbuf >> 1; /* number of received samples */
- if (devpriv->b_DmaDoubleBuffer) {
- /* switch DMA buffers if is used double buffering */
- next_dma_buf = 1 - devpriv->ui_DmaActualBuffer;
-
- ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO;
- outl(ui_Tmp, devpriv->i_IobaseAddon + AMCC_OP_REG_AGCSTS);
-
- /* changed since 16 bit interface for add on */
- outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW,
- devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH, devpriv->i_IobaseAddon + 2); /* 0x1000 is out putted in windows driver */
-
- var = devpriv->ul_DmaBufferHw[next_dma_buf];
- low_word = var & 0xffff;
- var = devpriv->ul_DmaBufferHw[next_dma_buf];
- high_word = var / 65536;
-
- /* DMA Start Address Low */
- outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0);
- outw(low_word, devpriv->i_IobaseAddon + 2);
-
- /* DMA Start Address High */
- outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0);
- outw(high_word, devpriv->i_IobaseAddon + 2);
-
- var = devpriv->ui_DmaBufferUsesize[next_dma_buf];
- low_word = var & 0xffff;
- var = devpriv->ui_DmaBufferUsesize[next_dma_buf];
- high_word = var / 65536;
-
- /* Nbr of acquisition LOW */
- outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0);
- outw(low_word, devpriv->i_IobaseAddon + 2);
-
- /* Nbr of acquisition HIGH */
- outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0);
- outw(high_word, devpriv->i_IobaseAddon + 2);
-
- /*
- * To configure A2P FIFO
- * ENABLE A2P FIFO WRITE AND ENABLE AMWEN
- * AMWEN_ENABLE | A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03
- */
- outw(3, devpriv->i_IobaseAddon + 4);
- /* initialise end of dma interrupt AINT_WRITE_COMPL = ENABLE_WRITE_TC_INT(ADDI) */
- outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 |
- APCI3120_ENABLE_WRITE_TC_INT),
- devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR);
-
- }
- if (samplesinbuf) {
- v_APCI3120_InterruptDmaMoveBlock16bit(dev, s,
- devpriv->ul_DmaBufferVirtual[devpriv->
- ui_DmaActualBuffer], samplesinbuf);
-
- if (!(cmd->flags & CMDF_WAKE_EOS)) {
- s->async->events |= COMEDI_CB_EOS;
- comedi_event(dev, s);
- }
- }
- if (cmd->stop_src == TRIG_COUNT)
- if (devpriv->ui_AiActualScan >= cmd->stop_arg) {
- /* all data sampled */
- apci3120_cancel(dev, s);
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
-
- if (devpriv->b_DmaDoubleBuffer) { /* switch dma buffers */
- devpriv->ui_DmaActualBuffer = 1 - devpriv->ui_DmaActualBuffer;
- } else {
- /*
- * restart DMA if is not used double buffering
- * ADDED REINITIALISE THE DMA
- */
- ui_Tmp = AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO;
- outl(ui_Tmp, devpriv->i_IobaseAddon + AMCC_OP_REG_AGCSTS);
-
- /* changed since 16 bit interface for add on */
- outw(APCI3120_ADD_ON_AGCSTS_LOW, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_LOW,
- devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_AGCSTS_HIGH, devpriv->i_IobaseAddon + 0);
- outw(APCI3120_ENABLE_TRANSFER_ADD_ON_HIGH, devpriv->i_IobaseAddon + 2);
- /*
- * A2P FIFO MANAGEMENT
- * A2P fifo reset & transfer control enable
- */
- outl(APCI3120_A2P_FIFO_MANAGEMENT,
- devpriv->i_IobaseAmcc + AMCC_OP_REG_MCSR);
-
- var = devpriv->ul_DmaBufferHw[0];
- low_word = var & 0xffff;
- var = devpriv->ul_DmaBufferHw[0];
- high_word = var / 65536;
- outw(APCI3120_ADD_ON_MWAR_LOW, devpriv->i_IobaseAddon + 0);
- outw(low_word, devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_MWAR_HIGH, devpriv->i_IobaseAddon + 0);
- outw(high_word, devpriv->i_IobaseAddon + 2);
-
- var = devpriv->ui_DmaBufferUsesize[0];
- low_word = var & 0xffff; /* changed */
- var = devpriv->ui_DmaBufferUsesize[0];
- high_word = var / 65536;
- outw(APCI3120_ADD_ON_MWTC_LOW, devpriv->i_IobaseAddon + 0);
- outw(low_word, devpriv->i_IobaseAddon + 2);
- outw(APCI3120_ADD_ON_MWTC_HIGH, devpriv->i_IobaseAddon + 0);
- outw(high_word, devpriv->i_IobaseAddon + 2);
-
- /*
- * To configure A2P FIFO
- * ENABLE A2P FIFO WRITE AND ENABLE AMWEN
- * AMWEN_ENABLE | A2P_FIFO_WRITE_ENABLE (0x01|0x02)=0x03
- */
- outw(3, devpriv->i_IobaseAddon + 4);
- /* initialise end of dma interrupt AINT_WRITE_COMPL = ENABLE_WRITE_TC_INT(ADDI) */
- outl((APCI3120_FIFO_ADVANCE_ON_BYTE_2 |
- APCI3120_ENABLE_WRITE_TC_INT),
- devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR);
- }
-}
-
-/*
- * This function handles EOS interrupt.
- * This function copies the acquired data(from FIFO) to Comedi buffer.
- */
-static int apci3120_interrupt_handle_eos(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- int n_chan, i;
- int err = 1;
-
- n_chan = devpriv->ui_AiNbrofChannels;
-
- for (i = 0; i < n_chan; i++)
- err &= comedi_buf_put(s, inw(dev->iobase + 0));
-
- s->async->events |= COMEDI_CB_EOS;
-
- if (err == 0)
- s->async->events |= COMEDI_CB_OVERFLOW;
-
- comedi_event(dev, s);
-
- return 0;
-}
-
-static void apci3120_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned short int_daq;
- unsigned int int_amcc, ui_Check, i;
- unsigned short us_TmpValue;
- unsigned char b_DummyRead;
-
- ui_Check = 1;
-
- int_daq = inw(dev->iobase + APCI3120_RD_STATUS) & 0xf000; /* get IRQ reasons */
- int_amcc = inl(devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); /* get AMCC int register */
-
- if ((!int_daq) && (!(int_amcc & ANY_S593X_INT))) {
- dev_err(dev->class_dev, "IRQ from unknown source\n");
- return;
- }
-
- outl(int_amcc | 0x00ff0000, devpriv->i_IobaseAmcc + AMCC_OP_REG_INTCSR); /* shutdown IRQ reasons in AMCC */
-
- int_daq = (int_daq >> 12) & 0xF;
-
- if (devpriv->b_ExttrigEnable == APCI3120_ENABLE) {
- /* Disable ext trigger */
- apci3120_exttrig_disable(dev);
- devpriv->b_ExttrigEnable = APCI3120_DISABLE;
- }
- /* clear the timer 2 interrupt */
- inb(devpriv->i_IobaseAmcc + APCI3120_TIMER_STATUS_REGISTER);
-
- if (int_amcc & MASTER_ABORT_INT)
- dev_err(dev->class_dev, "AMCC IRQ - MASTER DMA ABORT!\n");
- if (int_amcc & TARGET_ABORT_INT)
- dev_err(dev->class_dev, "AMCC IRQ - TARGET DMA ABORT!\n");
-
- /* Ckeck if EOC interrupt */
- if (((int_daq & 0x8) == 0)
- && (devpriv->b_InterruptMode == APCI3120_EOC_MODE)) {
- if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) {
-
- /* Read the AI Value */
- devpriv->ui_AiReadData[0] =
- (unsigned int) inw(devpriv->iobase + 0);
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- } else {
- /* Disable EOC Interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT;
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- }
- }
-
- /* Check If EOS interrupt */
- if ((int_daq & 0x2) && (devpriv->b_InterruptMode == APCI3120_EOS_MODE)) {
-
- if (devpriv->b_EocEosInterrupt == APCI3120_ENABLE) { /* enable this in without DMA ??? */
-
- if (devpriv->ai_running) {
- ui_Check = 0;
- apci3120_interrupt_handle_eos(dev);
- devpriv->ui_AiActualScan++;
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister |
- APCI3120_ENABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase +
- APCI3120_WRITE_MODE_SELECT);
- } else {
- ui_Check = 0;
- for (i = 0; i < devpriv->ui_AiNbrofChannels;
- i++) {
- us_TmpValue = inw(devpriv->iobase + 0);
- devpriv->ui_AiReadData[i] =
- (unsigned int) us_TmpValue;
- }
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE;
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
-
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
-
- }
-
- } else {
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
- devpriv->b_EocEosInterrupt = APCI3120_DISABLE; /* Default settings */
- devpriv->b_InterruptMode = APCI3120_EOC_MODE;
- }
-
- }
- /* Timer2 interrupt */
- if (int_daq & 0x1) {
-
- switch (devpriv->b_Timer2Mode) {
- case APCI3120_COUNTER:
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* stop timer 2 */
- devpriv->us_OutputRegister =
- devpriv->
- us_OutputRegister & APCI3120_DISABLE_ALL_TIMER;
- outw(devpriv->us_OutputRegister,
- dev->iobase + APCI3120_WR_ADDRESS);
-
- /* stop timer 0 and timer 1 */
- apci3120_cancel(dev, s);
-
- /* UPDATE-0.7.57->0.7.68comedi_done(dev,s); */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
-
- break;
-
- case APCI3120_TIMER:
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- break;
-
- case APCI3120_WATCHDOG:
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- break;
-
- default:
-
- /* disable Timer Interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_TIMER_INT;
-
- outb(devpriv->b_ModeSelectRegister,
- dev->iobase + APCI3120_WRITE_MODE_SELECT);
-
- }
-
- b_DummyRead = inb(dev->iobase + APCI3120_TIMER_STATUS_REGISTER);
-
- }
-
- if ((int_daq & 0x4) && (devpriv->b_InterruptMode == APCI3120_DMA_MODE)) {
- if (devpriv->ai_running) {
-
- /* Clear Timer Write TC int */
- outl(APCI3120_CLEAR_WRITE_TC_INT,
- devpriv->i_IobaseAmcc +
- APCI3120_AMCC_OP_REG_INTCSR);
-
- /* Clears the timer status register */
- inw(dev->iobase + APCI3120_TIMER_STATUS_REGISTER);
- /* do some data transfer */
- apci3120_interrupt_dma(irq, d);
- } else {
- /* Stops the Timer */
- outw(devpriv->
- us_OutputRegister & APCI3120_DISABLE_TIMER0 &
- APCI3120_DISABLE_TIMER1,
- dev->iobase + APCI3120_WR_ADDRESS);
- }
-
- }
-}
-
-/*
- * Configure Timer 2
- *
- * data[0] = TIMER configure as timer
- * = WATCHDOG configure as watchdog
- * data[1] = Timer constant
- * data[2] = Timer2 interrupt (1)enable or(0) disable
- */
-static int apci3120_config_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Timervalue2;
- unsigned short us_TmpValue;
- unsigned char b_Tmp;
-
- if (!data[1])
- dev_err(dev->class_dev, "No timer constant!\n");
-
- devpriv->b_Timer2Interrupt = (unsigned char) data[2]; /* save info whether to enable or disable interrupt */
-
- ui_Timervalue2 = data[1] / 1000; /* convert nano seconds to u seconds */
-
- us_TmpValue = (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS);
-
- /*
- * EL250804: Testing if board APCI3120 have the new Quartz or if it
- * is an APCI3001 and calculate the time value to set in the timer
- */
- if ((us_TmpValue & 0x00B0) == 0x00B0
- || !strcmp(this_board->pc_DriverName, "apci3001")) {
- /* Calculate the time value to set in the timer */
- ui_Timervalue2 = ui_Timervalue2 / 50;
- } else {
- /* Calculate the time value to set in the timer */
- ui_Timervalue2 = ui_Timervalue2 / 70;
- }
-
- /* Reset gate 2 of Timer 2 to disable it (Set Bit D14 to 0) */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister & APCI3120_DISABLE_TIMER2;
- outw(devpriv->us_OutputRegister, devpriv->iobase + APCI3120_WR_ADDRESS);
-
- /* Disable TIMER Interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT & 0xEF;
-
- /* Disable Eoc and Eos Interrupts */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_EOC_INT &
- APCI3120_DISABLE_EOS_INT;
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
- if (data[0] == APCI3120_TIMER) { /* initialize timer */
- /* Set the Timer 2 in mode 2(Timer) */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0x0F) | APCI3120_TIMER_2_MODE_2;
- outb(devpriv->b_TimerSelectMode,
- devpriv->iobase + APCI3120_TIMER_CRT1);
-
- /*
- * Configure the timer 2 for writing the LOW unsigned short of timer
- * is Delay value You must make a b_tmp variable with
- * DigitalOutPutRegister because at Address_1+APCI3120_TIMER_CRT0
- * you can set the digital output and configure the timer 2,and if
- * you don't make this, digital output are erase (Set to 0)
- */
-
- /* Writing LOW unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
- outw(ui_Timervalue2 & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Writing HIGH unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
- outw((ui_Timervalue2 >> 16) & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
- /* timer2 in Timer mode enabled */
- devpriv->b_Timer2Mode = APCI3120_TIMER;
-
- } else { /* Initialize Watch dog */
-
- /* Set the Timer 2 in mode 5(Watchdog) */
- devpriv->b_TimerSelectMode =
- (devpriv->
- b_TimerSelectMode & 0x0F) | APCI3120_TIMER_2_MODE_5;
- outb(devpriv->b_TimerSelectMode,
- devpriv->iobase + APCI3120_TIMER_CRT1);
-
- /*
- * Configure the timer 2 for writing the LOW unsigned short of timer
- * is Delay value You must make a b_tmp variable with
- * DigitalOutPutRegister because at Address_1+APCI3120_TIMER_CRT0
- * you can set the digital output and configure the timer 2,and if
- * you don't make this, digital output are erase (Set to 0)
- */
-
- /* Writing LOW unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
- outw(ui_Timervalue2 & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Writing HIGH unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- outw((ui_Timervalue2 >> 16) & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
- /* watchdog enabled */
- devpriv->b_Timer2Mode = APCI3120_WATCHDOG;
-
- }
-
- return insn->n;
-
-}
-
-/*
- * To start and stop the timer
- *
- * data[0] = 1 (start)
- * = 0 (stop)
- * = 2 (write new value)
- * data[1] = new value
- *
- * devpriv->b_Timer2Mode = 0 DISABLE
- * = 1 Timer
- * = 2 Watch dog
- */
-static int apci3120_write_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = dev->board_ptr;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Timervalue2 = 0;
- unsigned short us_TmpValue;
- unsigned char b_Tmp;
-
- if ((devpriv->b_Timer2Mode != APCI3120_WATCHDOG)
- && (devpriv->b_Timer2Mode != APCI3120_TIMER)) {
- dev_err(dev->class_dev, "timer2 not configured\n");
- return -EINVAL;
- }
-
- if (data[0] == 2) { /* write new value */
- if (devpriv->b_Timer2Mode != APCI3120_TIMER) {
- dev_err(dev->class_dev,
- "timer2 not configured in TIMER MODE\n");
- return -EINVAL;
- }
-
- if (data[1])
- ui_Timervalue2 = data[1];
- else
- ui_Timervalue2 = 0;
- }
-
- switch (data[0]) {
- case APCI3120_START:
-
- /* Reset FC_TIMER BIT */
- inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER);
- if (devpriv->b_Timer2Mode == APCI3120_TIMER) { /* start timer */
- /* Enable Timer */
- devpriv->b_ModeSelectRegister =
- devpriv->b_ModeSelectRegister & 0x0B;
- } else { /* start watch dog */
- /* Enable WatchDog */
- devpriv->b_ModeSelectRegister =
- (devpriv->
- b_ModeSelectRegister & 0x0B) |
- APCI3120_ENABLE_WATCHDOG;
- }
-
- /* enable disable interrupt */
- if ((devpriv->b_Timer2Interrupt) == APCI3120_ENABLE) {
-
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister |
- APCI3120_ENABLE_TIMER_INT;
- /* save the task structure to pass info to user */
- devpriv->tsk_Current = current;
- } else {
-
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_TIMER_INT;
- }
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- if (devpriv->b_Timer2Mode == APCI3120_TIMER) { /* start timer */
- /* For Timer mode is Gate2 must be activated timer started */
- devpriv->us_OutputRegister =
- devpriv->
- us_OutputRegister | APCI3120_ENABLE_TIMER2;
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
- }
-
- break;
-
- case APCI3120_STOP:
- if (devpriv->b_Timer2Mode == APCI3120_TIMER) {
- /* Disable timer */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_TIMER_COUNTER;
- } else {
- /* Disable WatchDog */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister &
- APCI3120_DISABLE_WATCHDOG;
- }
- /* Disable timer interrupt */
- devpriv->b_ModeSelectRegister =
- devpriv->
- b_ModeSelectRegister & APCI3120_DISABLE_TIMER_INT;
-
- /* Write above states to register */
- outb(devpriv->b_ModeSelectRegister,
- devpriv->iobase + APCI3120_WRITE_MODE_SELECT);
-
- /* Reset Gate 2 */
- devpriv->us_OutputRegister =
- devpriv->us_OutputRegister & APCI3120_DISABLE_TIMER_INT;
- outw(devpriv->us_OutputRegister,
- devpriv->iobase + APCI3120_WR_ADDRESS);
-
- /* Reset FC_TIMER BIT */
- inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER);
-
- break;
-
- case 2: /* write new value to Timer */
- if (devpriv->b_Timer2Mode != APCI3120_TIMER) {
- dev_err(dev->class_dev,
- "timer2 not configured in TIMER MODE\n");
- return -EINVAL;
- }
- us_TmpValue =
- (unsigned short) inw(devpriv->iobase + APCI3120_RD_STATUS);
-
- /*
- * EL250804: Testing if board APCI3120 have the new Quartz or if it
- * is an APCI3001 and calculate the time value to set in the timer
- */
- if ((us_TmpValue & 0x00B0) == 0x00B0
- || !strcmp(this_board->pc_DriverName, "apci3001")) {
- /* Calculate the time value to set in the timer */
- ui_Timervalue2 = ui_Timervalue2 / 50;
- } else {
- /* Calculate the time value to set in the timer */
- ui_Timervalue2 = ui_Timervalue2 / 70;
- }
- /* Writing LOW unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- outw(ui_Timervalue2 & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Writing HIGH unsigned short */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- outw((ui_Timervalue2 >> 16) & 0xffff,
- devpriv->iobase + APCI3120_TIMER_VALUE);
-
- break;
- default:
- return -EINVAL; /* Not a valid input */
- }
-
- return insn->n;
-}
-
-/*
- * Read the Timer value
- *
- * for Timer: data[0]= Timer constant
- *
- * for watchdog: data[0] = 0 (still running)
- * = 1 (run down)
- */
-static int apci3120_read_insn_timer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_Tmp;
- unsigned short us_TmpValue, us_TmpValue_2, us_StatusValue;
-
- if ((devpriv->b_Timer2Mode != APCI3120_WATCHDOG)
- && (devpriv->b_Timer2Mode != APCI3120_TIMER)) {
- dev_err(dev->class_dev, "timer2 not configured\n");
- }
- if (devpriv->b_Timer2Mode == APCI3120_TIMER) {
-
- /* Read the LOW unsigned short of Timer 2 register */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_LOW_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- us_TmpValue = inw(devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* Read the HIGH unsigned short of Timer 2 register */
- b_Tmp = ((devpriv->
- b_DigitalOutputRegister) & 0xF0) |
- APCI3120_SELECT_TIMER_2_HIGH_WORD;
- outb(b_Tmp, devpriv->iobase + APCI3120_TIMER_CRT0);
-
- us_TmpValue_2 = inw(devpriv->iobase + APCI3120_TIMER_VALUE);
-
- /* combining both words */
- data[0] = (unsigned int) ((us_TmpValue) | ((us_TmpValue_2) << 16));
-
- } else { /* Read watch dog status */
-
- us_StatusValue = inw(devpriv->iobase + APCI3120_RD_STATUS);
- us_StatusValue =
- ((us_StatusValue & APCI3120_FC_TIMER) >> 12) & 1;
- if (us_StatusValue == 1) {
- /* RESET FC_TIMER BIT */
- inb(devpriv->iobase + APCI3120_TIMER_STATUS_REGISTER);
- }
- data[0] = us_StatusValue; /* when data[0] = 1 then the watch dog has rundown */
- }
- return insn->n;
-}
-
-static int apci3120_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int val;
-
- /* the input channels are bits 11:8 of the status reg */
- val = inw(devpriv->iobase + APCI3120_RD_STATUS);
- data[1] = (val >> 8) & 0xf;
-
- return insn->n;
-}
-
-static int apci3120_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- if (comedi_dio_update_state(s, data)) {
- /* The do channels are bits 7:4 of the do register */
- devpriv->b_DigitalOutputRegister = s->state << 4;
-
- outb(devpriv->b_DigitalOutputRegister,
- devpriv->iobase + APCI3120_DIGITAL_OUTPUT);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int apci3120_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Range, ui_Channel;
- unsigned short us_TmpValue;
-
- ui_Range = CR_RANGE(insn->chanspec);
- ui_Channel = CR_CHAN(insn->chanspec);
-
- if (ui_Range) { /* if 1 then unipolar */
-
- if (data[0] != 0)
- data[0] =
- ((((ui_Channel & 0x03) << 14) & 0xC000) | (1 <<
- 13) | (data[0] + 8191));
- else
- data[0] =
- ((((ui_Channel & 0x03) << 14) & 0xC000) | (1 <<
- 13) | 8192);
-
- } else { /* if 0 then bipolar */
- data[0] =
- ((((ui_Channel & 0x03) << 14) & 0xC000) | (0 << 13) |
- data[0]);
-
- }
-
- do { /* Waiting of DA_READY BIT */
- us_TmpValue =
- ((unsigned short) inw(devpriv->iobase +
- APCI3120_RD_STATUS)) & 0x0001;
- } while (us_TmpValue != 0x0001);
-
- if (ui_Channel <= 3)
- /*
- * for channel 0-3 out at the register 1 (wrDac1-8) data[i]
- * typecasted to ushort since word write is to be done
- */
- outw((unsigned short) data[0],
- devpriv->iobase + APCI3120_ANALOG_OUTPUT_1);
- else
- /*
- * for channel 4-7 out at the register 2 (wrDac5-8) data[i]
- * typecasted to ushort since word write is to be done
- */
- outw((unsigned short) data[0],
- devpriv->iobase + APCI3120_ANALOG_OUTPUT_2);
-
- return insn->n;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
deleted file mode 100644
index 5e321f91172f..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ /dev/null
@@ -1,3003 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-3200 | Compiler : GCC |
- | Module name : hwdrv_apci3200.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-3200 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 02.07.04 | J. Krauth | Modification from the driver in order to |
- | | | correct some errors when using several boards. |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
- | 26.10.04 | J. Krauth | - Update for COMEDI 0.7.68 |
- | | | - Read eeprom value |
- | | | - Append APCI-3300 |
- +----------+-----------+------------------------------------------------+
-*/
-
-/* Card Specific information */
-/* #define APCI3200_ADDRESS_RANGE 264 */
-
-/* Analog Input related Defines */
-#define APCI3200_AI_OFFSET_GAIN 0
-#define APCI3200_AI_SC_TEST 4
-#define APCI3200_AI_IRQ 8
-#define APCI3200_AI_AUTOCAL 12
-#define APCI3200_RELOAD_CONV_TIME_VAL 32
-#define APCI3200_CONV_TIME_TIME_BASE 36
-#define APCI3200_RELOAD_DELAY_TIME_VAL 40
-#define APCI3200_DELAY_TIME_TIME_BASE 44
-#define APCI3200_AI_MODULE1 0
-#define APCI3200_AI_MODULE2 64
-#define APCI3200_AI_MODULE3 128
-#define APCI3200_AI_MODULE4 192
-#define TRUE 1
-#define FALSE 0
-#define APCI3200_AI_EOSIRQ 16
-#define APCI3200_AI_EOS 20
-#define APCI3200_AI_CHAN_ID 24
-#define APCI3200_AI_CHAN_VAL 28
-#define ANALOG_INPUT 0
-#define TEMPERATURE 1
-#define RESISTANCE 2
-
-#define ENABLE_EXT_TRIG 1
-#define ENABLE_EXT_GATE 2
-#define ENABLE_EXT_TRIG_GATE 3
-
-#define APCI3200_MAXVOLT 2.5
-#define ADDIDATA_GREATER_THAN_TEST 0
-#define ADDIDATA_LESS_THAN_TEST 1
-
-#define ADDIDATA_UNIPOLAR 1
-#define ADDIDATA_BIPOLAR 2
-
-#define MAX_MODULE 4
-
-/* ANALOG INPUT RANGE */
-static const struct comedi_lrange range_apci3200_ai = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-static const struct comedi_lrange range_apci3300_ai = {
- 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-int MODULE_NO;
-struct {
- int i_Gain;
- int i_Polarity;
- int i_OffsetRange;
- int i_Coupling;
- int i_SingleDiff;
- int i_AutoCalibration;
- unsigned int ui_ReloadValue;
- unsigned int ui_TimeUnitReloadVal;
- int i_Interrupt;
- int i_ModuleSelection;
-} Config_Parameters_Module1, Config_Parameters_Module2,
- Config_Parameters_Module3, Config_Parameters_Module4;
-
-
-struct str_ADDIDATA_RTDStruct {
- unsigned int ul_NumberOfValue;
- unsigned int *pul_ResistanceValue;
- unsigned int *pul_TemperatureValue;
-};
-
-struct str_Module {
- unsigned long ul_CurrentSourceCJC;
- unsigned long ul_CurrentSource[5];
- unsigned long ul_GainFactor[8]; /* Gain Factor */
- unsigned int w_GainValue[10];
-};
-
-struct str_BoardInfos {
-
- int i_CJCAvailable;
- int i_CJCPolarity;
- int i_CJCGain;
- int i_InterruptFlag;
- int i_ADDIDATAPolarity;
- int i_ADDIDATAGain;
- int i_AutoCalibration;
- int i_ADDIDATAConversionTime;
- int i_ADDIDATAConversionTimeUnit;
- int i_ADDIDATAType;
- int i_ChannelNo;
- int i_ChannelCount;
- int i_ScanType;
- int i_FirstChannel;
- int i_LastChannel;
- int i_Sum;
- int i_Offset;
- unsigned int ui_Channel_num;
- int i_Count;
- int i_Initialised;
- unsigned int ui_InterruptChannelValue[144]; /* Buffer */
- unsigned char b_StructInitialized;
- /* 7 is the maximal number of channels */
- unsigned int ui_ScanValueArray[7 + 12];
-
- int i_ConnectionType;
- int i_NbrOfModule;
- struct str_Module s_Module[MAX_MODULE];
-};
-
-/* BEGIN JK 06.07.04: Management of sevrals boards */
-/*
- int i_CJCAvailable=1;
- int i_CJCPolarity=0;
- int i_CJCGain=2;/* changed from 0 to 2 */
- int i_InterruptFlag=0;
- int i_ADDIDATAPolarity;
- int i_ADDIDATAGain;
- int i_AutoCalibration=0; /* : auto calibration */
- int i_ADDIDATAConversionTime;
- int i_ADDIDATAConversionTimeUnit;
- int i_ADDIDATAType;
- int i_ChannelNo;
- int i_ChannelCount=0;
- int i_ScanType;
- int i_FirstChannel;
- int i_LastChannel;
- int i_Sum=0;
- int i_Offset;
- unsigned int ui_Channel_num=0;
- static int i_Count=0;
- int i_Initialised=0;
- unsigned int ui_InterruptChannelValue[96]; /* Buffer */
-*/
-struct str_BoardInfos s_BoardInfos[100]; /* 100 will be the max number of boards to be used */
-/* END JK 06.07.04: Management of sevrals boards */
-
-#define AMCC_OP_REG_MCSR 0x3c
-#define EEPROM_BUSY 0x80000000
-#define NVCMD_LOAD_LOW (0x4 << 5) /* nvRam load low command */
-#define NVCMD_LOAD_HIGH (0x5 << 5) /* nvRam load high command */
-#define NVCMD_BEGIN_READ (0x7 << 5) /* nvRam begin read command */
-#define NVCMD_BEGIN_WRITE (0x6 << 5) /* EEPROM begin write command */
-
-static int i_AddiHeaderRW_ReadEeprom(int i_NbOfWordsToRead,
- unsigned int dw_PCIBoardEepromAddress,
- unsigned short w_EepromStartAddress,
- unsigned short *pw_DataRead)
-{
- unsigned int dw_eeprom_busy = 0;
- int i_Counter = 0;
- int i_WordCounter;
- int i;
- unsigned char pb_ReadByte[1];
- unsigned char b_ReadLowByte = 0;
- unsigned char b_ReadHighByte = 0;
- unsigned char b_SelectedAddressLow = 0;
- unsigned char b_SelectedAddressHigh = 0;
- unsigned short w_ReadWord = 0;
-
- for (i_WordCounter = 0; i_WordCounter < i_NbOfWordsToRead;
- i_WordCounter++) {
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- for (i_Counter = 0; i_Counter < 2; i_Counter++) {
- b_SelectedAddressLow = (w_EepromStartAddress + i_Counter) % 256; /* Read the low 8 bit part */
- b_SelectedAddressHigh = (w_EepromStartAddress + i_Counter) / 256; /* Read the high 8 bit part */
-
- /* Select the load low address mode */
- outb(NVCMD_LOAD_LOW,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 3);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Load the low address */
- outb(b_SelectedAddressLow,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 2);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Select the load high address mode */
- outb(NVCMD_LOAD_HIGH,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 3);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Load the high address */
- outb(b_SelectedAddressHigh,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 2);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Select the READ mode */
- outb(NVCMD_BEGIN_READ,
- dw_PCIBoardEepromAddress + AMCC_OP_REG_MCSR +
- 3);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Read data into the EEPROM */
- *pb_ReadByte =
- inb(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR + 2);
-
- /* Wait on busy */
- do {
- dw_eeprom_busy =
- inl(dw_PCIBoardEepromAddress +
- AMCC_OP_REG_MCSR);
- dw_eeprom_busy = dw_eeprom_busy & EEPROM_BUSY;
- } while (dw_eeprom_busy == EEPROM_BUSY);
-
- /* Select the upper address part */
- if (i_Counter == 0)
- b_ReadLowByte = pb_ReadByte[0];
- else
- b_ReadHighByte = pb_ReadByte[0];
-
-
- /* Sleep */
- msleep(1);
-
- }
- w_ReadWord =
- (b_ReadLowByte | (((unsigned short)b_ReadHighByte) *
- 256));
-
- pw_DataRead[i_WordCounter] = w_ReadWord;
-
- w_EepromStartAddress += 2; /* to read the next word */
-
- } /* for (...) i_NbOfWordsToRead */
- return 0;
-}
-
-static void v_GetAPCI3200EepromCalibrationValue(unsigned int dw_PCIBoardEepromAddress,
- struct str_BoardInfos *BoardInformations)
-{
- unsigned short w_AnalogInputMainHeaderAddress;
- unsigned short w_AnalogInputComponentAddress;
- unsigned short w_NumberOfModuls = 0;
- unsigned short w_CurrentSources[2];
- unsigned short w_ModulCounter = 0;
- unsigned short w_FirstHeaderSize = 0;
- unsigned short w_NumberOfInputs = 0;
- unsigned short w_CJCFlag = 0;
- unsigned short w_NumberOfGainValue = 0;
- unsigned short w_SingleHeaderAddress = 0;
- unsigned short w_SingleHeaderSize = 0;
- unsigned short w_Input = 0;
- unsigned short w_GainFactorAddress = 0;
- unsigned short w_GainFactorValue[2];
- unsigned short w_GainIndex = 0;
- unsigned short w_GainValue = 0;
-
- /*****************************************/
- /** Get the Analog input header address **/
- /*****************************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, 0x116, /* w_EepromStartAddress: Analog input header address */
- &w_AnalogInputMainHeaderAddress);
-
- /*******************************************/
- /** Compute the real analog input address **/
- /*******************************************/
- w_AnalogInputMainHeaderAddress = w_AnalogInputMainHeaderAddress + 0x100;
-
- /******************************/
- /** Get the number of moduls **/
- /******************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputMainHeaderAddress + 0x02, /* w_EepromStartAddress: Number of conponment */
- &w_NumberOfModuls);
-
- for (w_ModulCounter = 0; w_ModulCounter < w_NumberOfModuls;
- w_ModulCounter++) {
- /***********************************/
- /** Compute the component address **/
- /***********************************/
- w_AnalogInputComponentAddress =
- w_AnalogInputMainHeaderAddress +
- (w_FirstHeaderSize * w_ModulCounter) + 0x04;
-
- /****************************/
- /** Read first header size **/
- /****************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress, /* Address of the first header */
- &w_FirstHeaderSize);
-
- w_FirstHeaderSize = w_FirstHeaderSize >> 4;
-
- /***************************/
- /** Read number of inputs **/
- /***************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 0x06, /* Number of inputs for the first modul */
- &w_NumberOfInputs);
-
- w_NumberOfInputs = w_NumberOfInputs >> 4;
-
- /***********************/
- /** Read the CJC flag **/
- /***********************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 0x08, /* CJC flag */
- &w_CJCFlag);
-
- w_CJCFlag = (w_CJCFlag >> 3) & 0x1; /* Get only the CJC flag */
-
- /*******************************/
- /** Read number of gain value **/
- /*******************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 0x44, /* Number of gain value */
- &w_NumberOfGainValue);
-
- w_NumberOfGainValue = w_NumberOfGainValue & 0xFF;
-
- /***********************************/
- /** Compute single header address **/
- /***********************************/
- w_SingleHeaderAddress =
- w_AnalogInputComponentAddress + 0x46 +
- (((w_NumberOfGainValue / 16) + 1) * 2) +
- (6 * w_NumberOfGainValue) +
- (4 * (((w_NumberOfGainValue / 16) + 1) * 2));
-
- /********************************************/
- /** Read current sources value for input 1 **/
- /********************************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_SingleHeaderAddress, /* w_EepromStartAddress: Single header address */
- &w_SingleHeaderSize);
-
- w_SingleHeaderSize = w_SingleHeaderSize >> 4;
-
- /*************************************/
- /** Read gain factor for the module **/
- /*************************************/
- w_GainFactorAddress = w_AnalogInputComponentAddress;
-
- for (w_GainIndex = 0; w_GainIndex < w_NumberOfGainValue;
- w_GainIndex++) {
- /************************************/
- /** Read gain value for the module **/
- /************************************/
- i_AddiHeaderRW_ReadEeprom(1, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 70 + (2 * (1 + (w_NumberOfGainValue / 16))) + (0x02 * w_GainIndex), /* Gain value */
- &w_GainValue);
-
- BoardInformations->s_Module[w_ModulCounter].
- w_GainValue[w_GainIndex] = w_GainValue;
-
- /*************************************/
- /** Read gain factor for the module **/
- /*************************************/
- i_AddiHeaderRW_ReadEeprom(2, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress, w_AnalogInputComponentAddress + 70 + ((2 * w_NumberOfGainValue) + (2 * (1 + (w_NumberOfGainValue / 16)))) + (0x04 * w_GainIndex), /* Gain factor */
- w_GainFactorValue);
-
- BoardInformations->s_Module[w_ModulCounter].
- ul_GainFactor[w_GainIndex] =
- (w_GainFactorValue[1] << 16) +
- w_GainFactorValue[0];
- }
-
- /***************************************************************/
- /** Read current source value for each channels of the module **/
- /***************************************************************/
- for (w_Input = 0; w_Input < w_NumberOfInputs; w_Input++) {
- /********************************************/
- /** Read current sources value for input 1 **/
- /********************************************/
- i_AddiHeaderRW_ReadEeprom(2, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress,
- (w_Input * w_SingleHeaderSize) +
- w_SingleHeaderAddress + 0x0C, w_CurrentSources);
-
- /************************************/
- /** Save the current sources value **/
- /************************************/
- BoardInformations->s_Module[w_ModulCounter].
- ul_CurrentSource[w_Input] =
- (w_CurrentSources[0] +
- ((w_CurrentSources[1] & 0xFFF) << 16));
- }
-
- /***************************************/
- /** Read the CJC current source value **/
- /***************************************/
- i_AddiHeaderRW_ReadEeprom(2, /* i_NbOfWordsToRead */
- dw_PCIBoardEepromAddress,
- (w_Input * w_SingleHeaderSize) + w_SingleHeaderAddress +
- 0x0C, w_CurrentSources);
-
- /************************************/
- /** Save the current sources value **/
- /************************************/
- BoardInformations->s_Module[w_ModulCounter].
- ul_CurrentSourceCJC =
- (w_CurrentSources[0] +
- ((w_CurrentSources[1] & 0xFFF) << 16));
- }
-}
-
-static int i_APCI3200_GetChannelCalibrationValue(struct comedi_device *dev,
- unsigned int ui_Channel_num,
- unsigned int *CJCCurrentSource,
- unsigned int *ChannelCurrentSource,
- unsigned int *ChannelGainFactor)
-{
- int i_DiffChannel = 0;
- int i_Module = 0;
-
- /* Test if single or differential mode */
- if (s_BoardInfos[dev->minor].i_ConnectionType == 1) {
- /* if diff */
-
- if (ui_Channel_num <= 1)
- i_DiffChannel = ui_Channel_num, i_Module = 0;
- else if ((ui_Channel_num >= 2) && (ui_Channel_num <= 3))
- i_DiffChannel = ui_Channel_num - 2, i_Module = 1;
- else if ((ui_Channel_num >= 4) && (ui_Channel_num <= 5))
- i_DiffChannel = ui_Channel_num - 4, i_Module = 2;
- else if ((ui_Channel_num >= 6) && (ui_Channel_num <= 7))
- i_DiffChannel = ui_Channel_num - 6, i_Module = 3;
-
- } else {
- /* if single */
- if ((ui_Channel_num == 0) || (ui_Channel_num == 1))
- i_DiffChannel = 0, i_Module = 0;
- else if ((ui_Channel_num == 2) || (ui_Channel_num == 3))
- i_DiffChannel = 1, i_Module = 0;
- else if ((ui_Channel_num == 4) || (ui_Channel_num == 5))
- i_DiffChannel = 0, i_Module = 1;
- else if ((ui_Channel_num == 6) || (ui_Channel_num == 7))
- i_DiffChannel = 1, i_Module = 1;
- else if ((ui_Channel_num == 8) || (ui_Channel_num == 9))
- i_DiffChannel = 0, i_Module = 2;
- else if ((ui_Channel_num == 10) || (ui_Channel_num == 11))
- i_DiffChannel = 1, i_Module = 2;
- else if ((ui_Channel_num == 12) || (ui_Channel_num == 13))
- i_DiffChannel = 0, i_Module = 3;
- else if ((ui_Channel_num == 14) || (ui_Channel_num == 15))
- i_DiffChannel = 1, i_Module = 3;
- }
-
- /* Test if thermocouple or RTD mode */
- *CJCCurrentSource =
- s_BoardInfos[dev->minor].s_Module[i_Module].ul_CurrentSourceCJC;
-
- *ChannelCurrentSource =
- s_BoardInfos[dev->minor].s_Module[i_Module].
- ul_CurrentSource[i_DiffChannel];
- /* } */
- /* } */
-
- /* Channle gain factor */
- *ChannelGainFactor =
- s_BoardInfos[dev->minor].s_Module[i_Module].
- ul_GainFactor[s_BoardInfos[dev->minor].i_ADDIDATAGain];
- /* End JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- return 0;
-}
-
-static int apci3200_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[1] = inl(devpriv->i_IobaseReserved) & 0xf;
-
- return insn->n;
-}
-
-static int apci3200_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- s->state = inl(devpriv->i_IobaseAddon) & 0xf;
-
- if (comedi_dio_update_state(s, data))
- outl(s->state, devpriv->i_IobaseAddon);
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int i_APCI3200_Read1AnalogInputChannel(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- unsigned int ui_ChannelNo = 0;
- unsigned int ui_CommandRegister = 0;
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_ChannelNo=i_ChannelNo; */
- ui_ChannelNo = s_BoardInfos[dev->minor].i_ChannelNo;
-
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /*********************************/
- /* Write the channel to configure */
- /*********************************/
- /* Begin JK 20.10.2004: Bad channel value is used when using differential mode */
- /* outl(0 | ui_Channel_num , devpriv->iobase+i_Offset + 0x4); */
- /* outl(0 | s_BoardInfos [dev->minor].ui_Channel_num , devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 0x4); */
- outl(0 | s_BoardInfos[dev->minor].i_ChannelNo,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x4);
- /* End JK 20.10.2004: Bad channel value is used when using differential mode */
-
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
-
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
-
- /**************************************************************************/
- /* Set the start end stop index to the selected channel and set the start */
- /**************************************************************************/
-
- ui_CommandRegister = ui_ChannelNo | (ui_ChannelNo << 8) | 0x80000;
-
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /* Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /******************************/
- /* Write the command register */
- /******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(ui_CommandRegister, devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*Test if interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
- /*************************/
- /*Read the EOC Status bit */
- /*************************/
-
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
-
- } while (ui_EOC != 1);
-
- /***************************************/
- /* Read the digital value of the input */
- /***************************************/
-
- /* data[0] = inl (devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- /* END JK 06.07.04: Management of sevrals boards */
-
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCalibrationOffsetValue(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Temp = 0, ui_EOC = 0;
- unsigned int ui_CommandRegister = 0;
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /*********************************/
- /* Write the channel to configure */
- /*********************************/
- /* Begin JK 20.10.2004: This seems not necessary ! */
- /* outl(0 | ui_Channel_num , devpriv->iobase+i_Offset + 0x4); */
- /* outl(0 | s_BoardInfos [dev->minor].ui_Channel_num , devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 0x4); */
- /* End JK 20.10.2004: This seems not necessary ! */
-
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
- /*****************************/
- /*Read the calibration offset */
- /*****************************/
- /* ui_Temp = inl(devpriv->iobase+i_Offset + 12); */
- ui_Temp = inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
-
- /*********************************/
- /*Configure the Offset Conversion */
- /*********************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((ui_Temp | 0x00020000), devpriv->iobase+i_Offset + 12); */
- outl((ui_Temp | 0x00020000),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /*******************************/
- /*Initialise ui_CommandRegister */
- /*******************************/
-
- ui_CommandRegister = 0;
-
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /**********************/
- /*Start the conversion */
- /**********************/
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
-
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister, devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*Test if interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
- /*******************/
- /*Read the EOC flag */
- /*******************/
-
- /* ui_EOC = inl (devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
-
- } while (ui_EOC != 1);
-
- /**************************************************/
- /*Read the digital value of the calibration Offset */
- /**************************************************/
-
- /* data[0] = inl(devpriv->iobase+i_Offset+ 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCalibrationGainValue(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- int ui_CommandRegister = 0;
-
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /*********************************/
- /* Write the channel to configure */
- /*********************************/
- /* Begin JK 20.10.2004: This seems not necessary ! */
- /* outl(0 | ui_Channel_num , devpriv->iobase+i_Offset + 0x4); */
- /* outl(0 | s_BoardInfos [dev->minor].ui_Channel_num , devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 0x4); */
- /* End JK 20.10.2004: This seems not necessary ! */
-
- /***************************/
- /*Read the calibration gain */
- /***************************/
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
- /*******************************/
- /*Configure the Gain Conversion */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00040000 , devpriv->iobase+i_Offset + 12); */
- outl(0x00040000,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
-
- /*******************************/
- /*Initialise ui_CommandRegister */
- /*******************************/
-
- ui_CommandRegister = 0;
-
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /**********************/
- /*Start the conversion */
- /**********************/
-
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister , devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*Test if interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
-
- /*******************/
- /*Read the EOC flag */
- /*******************/
-
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
-
- } while (ui_EOC != 1);
-
- /************************************************/
- /*Read the digital value of the calibration Gain */
- /************************************************/
-
- /* data[0] = inl(devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
-
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCJCValue(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- int ui_CommandRegister = 0;
-
- /******************************/
- /*Set the converting time unit */
- /******************************/
-
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
-
- /******************************/
- /*Configure the CJC Conversion */
- /******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl( 0x00000400 , devpriv->iobase+i_Offset + 4); */
- outl(0x00000400,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /*******************************/
- /*Initialise dw_CommandRegister */
- /*******************************/
- ui_CommandRegister = 0;
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /**********************/
- /*Start the conversion */
- /**********************/
-
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
-
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister , devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*Test if interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
-
- /*******************/
- /*Read the EOC flag */
- /*******************/
-
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
-
- } while (ui_EOC != 1);
-
- /***********************************/
- /*Read the digital value of the CJC */
- /***********************************/
-
- /* data[0] = inl(devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCJCCalOffset(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- int ui_CommandRegister = 0;
-
- /*******************************************/
- /*Read calibration offset value for the CJC */
- /*******************************************/
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
- /******************************/
- /*Configure the CJC Conversion */
- /******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00000400 , devpriv->iobase+i_Offset + 4); */
- outl(0x00000400,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /*********************************/
- /*Configure the Offset Conversion */
- /*********************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00020000, devpriv->iobase+i_Offset + 12); */
- outl(0x00020000,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /*******************************/
- /*Initialise ui_CommandRegister */
- /*******************************/
- ui_CommandRegister = 0;
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
-
- /**********************/
- /*Start the conversion */
- /**********************/
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister,devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
- /*******************/
- /*Read the EOC flag */
- /*******************/
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
- } while (ui_EOC != 1);
-
- /**************************************************/
- /*Read the digital value of the calibration Offset */
- /**************************************************/
- /* data[0] = inl(devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- }
- return 0;
-}
-
-static int i_APCI3200_ReadCJCCalGain(struct comedi_device *dev,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_EOC = 0;
- int ui_CommandRegister = 0;
-
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTimeUnit , devpriv->iobase+i_Offset + 36); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /**************************/
- /* Set the convert timing */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(i_ADDIDATAConversionTime , devpriv->iobase+i_Offset + 32); */
- outl(s_BoardInfos[dev->minor].i_ADDIDATAConversionTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
- /******************************/
- /*Configure the CJC Conversion */
- /******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00000400,devpriv->iobase+i_Offset + 4); */
- outl(0x00000400,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /*******************************/
- /*Configure the Gain Conversion */
- /*******************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(0x00040000,devpriv->iobase+i_Offset + 12); */
- outl(0x00040000,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
-
- /*******************************/
- /*Initialise dw_CommandRegister */
- /*******************************/
- ui_CommandRegister = 0;
- /*Test if the interrupt is enable */
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
- /*Enable the interrupt */
- ui_CommandRegister = ui_CommandRegister | 0x00100000;
- }
- /**********************/
- /*Start the conversion */
- /**********************/
- ui_CommandRegister = ui_CommandRegister | 0x00080000;
- /***************************/
- /*Write the command regiter */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_CommandRegister ,devpriv->iobase+i_Offset + 8); */
- outl(ui_CommandRegister,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0) {
- do {
- /*******************/
- /*Read the EOC flag */
- /*******************/
- /* ui_EOC = inl(devpriv->iobase+i_Offset + 20) & 1; */
- ui_EOC = inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 20) & 1;
- } while (ui_EOC != 1);
- /************************************************/
- /*Read the digital value of the calibration Gain */
- /************************************************/
- /* data[0] = inl (devpriv->iobase+i_Offset + 28); */
- data[0] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- }
- return 0;
-}
-
-static int apci3200_reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- int i_Temp;
- unsigned int dw_Dummy;
-
- /* i_InterruptFlag=0; */
- /* i_Initialised==0; */
- /* i_Count=0; */
- /* i_Sum=0; */
-
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- s_BoardInfos[dev->minor].i_Initialised = 0;
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
- s_BoardInfos[dev->minor].b_StructInitialized = 0;
-
- outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
-
- /* Enable the interrupt for the controller */
- dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
- outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
- outl(0, devpriv->i_IobaseAddon); /* Resets the output */
- /***************/
- /*Empty the buffer */
- /**************/
- for (i_Temp = 0; i_Temp <= 95; i_Temp++) {
- /* ui_InterruptChannelValue[i_Temp]=0; */
- s_BoardInfos[dev->minor].ui_InterruptChannelValue[i_Temp] = 0;
- } /* for(i_Temp=0;i_Temp<=95;i_Temp++) */
- /*****************************/
- /*Reset the START and IRQ bit */
- /*****************************/
- for (i_Temp = 0; i_Temp <= 192;) {
- while (((inl(devpriv->iobase + i_Temp + 12) >> 19) & 1) != 1) ;
- outl(0, devpriv->iobase + i_Temp + 8);
- i_Temp = i_Temp + 64;
- } /* for(i_Temp=0;i_Temp<=192;i_Temp+64) */
- return 0;
-}
-
-/*
- * Read value of the selected channel
- *
- * data[0] : Digital Value Of Input
- * data[1] : Calibration Offset Value
- * data[2] : Calibration Gain Value
- * data[3] : CJC value
- * data[4] : CJC offset value
- * data[5] : CJC gain value
- * data[6] : CJC current source from eeprom
- * data[7] : Channel current source from eeprom
- * data[8] : Channle gain factor from eeprom
- */
-static int apci3200_ai_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned int ui_DummyValue = 0;
- int i_ConvertCJCCalibration;
- int i = 0;
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_Initialised==0) */
- if (s_BoardInfos[dev->minor].i_Initialised == 0)
- /* END JK 06.07.04: Management of sevrals boards */
- {
- apci3200_reset(dev);
- return -EINVAL;
- } /* if(i_Initialised==0); */
-
- switch (insn->unused[0]) {
- case 0:
-
- i_APCI3200_Read1AnalogInputChannel(dev, s, insn,
- &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count+0]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->minor].
- i_Count + 0] = ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* Begin JK 25.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- i_APCI3200_GetChannelCalibrationValue(dev,
- s_BoardInfos[dev->minor].ui_Channel_num,
- &s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->minor].
- i_Count + 6],
- &s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->minor].
- i_Count + 7],
- &s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->minor].
- i_Count + 8]);
- /* End JK 25.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE) && (i_CJCAvailable==1)) */
- if ((s_BoardInfos[dev->minor].i_ADDIDATAType == 2)
- && (s_BoardInfos[dev->minor].i_InterruptFlag == FALSE)
- && (s_BoardInfos[dev->minor].i_CJCAvailable == 1))
- /* END JK 06.07.04: Management of sevrals boards */
- {
- i_APCI3200_ReadCJCValue(dev, &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count + 3]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->
- minor].i_Count + 3] = ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE)) */
- else {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count + 3]=0; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->
- minor].i_Count + 3] = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* elseif((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE) && (i_CJCAvailable==1)) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if (( i_AutoCalibration == FALSE) && (i_InterruptFlag == FALSE)) */
- if ((s_BoardInfos[dev->minor].i_AutoCalibration == FALSE)
- && (s_BoardInfos[dev->minor].i_InterruptFlag == FALSE))
- /* END JK 06.07.04: Management of sevrals boards */
- {
- i_APCI3200_ReadCalibrationOffsetValue(dev,
- &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count + 1]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->
- minor].i_Count + 1] = ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
- i_APCI3200_ReadCalibrationGainValue(dev,
- &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count + 2]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos[dev->
- minor].i_Count + 2] = ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if (( i_AutoCalibration == FALSE) && (i_InterruptFlag == FALSE)) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE)&& (i_CJCAvailable==1)) */
- if ((s_BoardInfos[dev->minor].i_ADDIDATAType == 2)
- && (s_BoardInfos[dev->minor].i_InterruptFlag == FALSE)
- && (s_BoardInfos[dev->minor].i_CJCAvailable == 1))
- /* END JK 06.07.04: Management of sevrals boards */
- {
- /**********************************************************/
- /*Test if the Calibration channel must be read for the CJC */
- /**********************************************************/
- /**********************************/
- /*Test if the polarity is the same */
- /**********************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_CJCPolarity!=i_ADDIDATAPolarity) */
- if (s_BoardInfos[dev->minor].i_CJCPolarity !=
- s_BoardInfos[dev->minor].i_ADDIDATAPolarity)
- /* END JK 06.07.04: Management of sevrals boards */
- {
- i_ConvertCJCCalibration = 1;
- } /* if(i_CJCPolarity!=i_ADDIDATAPolarity) */
- else {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_CJCGain==i_ADDIDATAGain) */
- if (s_BoardInfos[dev->minor].i_CJCGain ==
- s_BoardInfos[dev->minor].i_ADDIDATAGain)
- /* END JK 06.07.04: Management of sevrals boards */
- {
- i_ConvertCJCCalibration = 0;
- } /* if(i_CJCGain==i_ADDIDATAGain) */
- else {
- i_ConvertCJCCalibration = 1;
- } /* elseif(i_CJCGain==i_ADDIDATAGain) */
- } /* elseif(i_CJCPolarity!=i_ADDIDATAPolarity) */
- if (i_ConvertCJCCalibration == 1) {
- i_APCI3200_ReadCJCCalOffset(dev,
- &ui_DummyValue);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count+4]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos
- [dev->minor].i_Count + 4] =
- ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
-
- i_APCI3200_ReadCJCCalGain(dev, &ui_DummyValue);
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count+5]=ui_DummyValue; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos
- [dev->minor].i_Count + 5] =
- ui_DummyValue;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(i_ConvertCJCCalibration==1) */
- else {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ui_InterruptChannelValue[i_Count+4]=0; */
- /* ui_InterruptChannelValue[i_Count+5]=0; */
-
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos
- [dev->minor].i_Count + 4] = 0;
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[s_BoardInfos
- [dev->minor].i_Count + 5] = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* elseif(i_ConvertCJCCalibration==1) */
- } /* if((i_ADDIDATAType==2) && (i_InterruptFlag == FALSE)) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_ScanType!=1) */
- if (s_BoardInfos[dev->minor].i_ScanType != 1) {
- /* i_Count=0; */
- s_BoardInfos[dev->minor].i_Count = 0;
- } /* if(i_ScanType!=1) */
- else {
- /* i_Count=i_Count +6; */
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /* s_BoardInfos [dev->minor].i_Count=s_BoardInfos [dev->minor].i_Count +6; */
- s_BoardInfos[dev->minor].i_Count =
- s_BoardInfos[dev->minor].i_Count + 9;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- } /* else if(i_ScanType!=1) */
-
- /* if((i_ScanType==1) &&(i_InterruptFlag==1)) */
- if ((s_BoardInfos[dev->minor].i_ScanType == 1)
- && (s_BoardInfos[dev->minor].i_InterruptFlag == 1)) {
- /* i_Count=i_Count-6; */
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /* s_BoardInfos [dev->minor].i_Count=s_BoardInfos [dev->minor].i_Count-6; */
- s_BoardInfos[dev->minor].i_Count =
- s_BoardInfos[dev->minor].i_Count - 9;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- }
- /* if(i_ScanType==0) */
- if (s_BoardInfos[dev->minor].i_ScanType == 0) {
- /*
- data[0]= ui_InterruptChannelValue[0];
- data[1]= ui_InterruptChannelValue[1];
- data[2]= ui_InterruptChannelValue[2];
- data[3]= ui_InterruptChannelValue[3];
- data[4]= ui_InterruptChannelValue[4];
- data[5]= ui_InterruptChannelValue[5];
- */
- data[0] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[0];
- data[1] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[1];
- data[2] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[2];
- data[3] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[3];
- data[4] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[4];
- data[5] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[5];
-
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- i_APCI3200_GetChannelCalibrationValue(dev,
- s_BoardInfos[dev->minor].ui_Channel_num,
- &data[6], &data[7], &data[8]);
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- }
- break;
- case 1:
-
- for (i = 0; i < insn->n; i++) {
- /* data[i]=ui_InterruptChannelValue[i]; */
- data[i] =
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue[i];
- }
-
- /* i_Count=0; */
- /* i_Sum=0; */
- /* if(i_ScanType==1) */
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
- if (s_BoardInfos[dev->minor].i_ScanType == 1) {
- /* i_Initialised=0; */
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].i_Initialised = 0;
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- }
- break;
- default:
- printk("\nThe parameters passed are in error\n");
- apci3200_reset(dev);
- return -EINVAL;
- } /* switch(insn->unused[0]) */
-
- return insn->n;
-}
-
-/*
- * Configures The Analog Input Subdevice
- *
- * data[0] = 0 Normal AI
- * = 1 RTD
- * = 2 THERMOCOUPLE
- * data[1] = Gain To Use
- * data[2] = 0 Bipolar
- * = 1 Unipolar
- * data[3] = Offset Range
- * data[4] = 0 DC Coupling
- * = 1 AC Coupling
- * data[5] = 0 Single
- * = 1 Differential
- * data[6] = TimerReloadValue
- * data[7] = ConvertingTimeUnit
- * data[8] = 0 Analog voltage measurement
- * = 1 Resistance measurement
- * = 2 Temperature measurement
- * data[9] = 0 Interrupt Disable
- * = 1 INterrupt Enable
- * data[10] = Type of Thermocouple
- * data[11] = single channel Module Number
- * data[12] = 0 Single Read
- * = 1 Read more channel
- * = 2 Single scan
- * = 3 Continuous Scan
- * data[13] = Number of channels to read
- * data[14] = 0 RTD not used
- * = 1 RTD 2 wire connection
- * = 2 RTD 3 wire connection
- * = 3 RTD 4 wire connection
- */
-static int apci3200_ai_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ul_Config = 0, ul_Temp = 0;
- unsigned int ui_ChannelNo = 0;
- unsigned int ui_Dummy = 0;
- int i_err = 0;
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* Initialize the structure */
- if (s_BoardInfos[dev->minor].b_StructInitialized != 1) {
- s_BoardInfos[dev->minor].i_CJCAvailable = 1;
- s_BoardInfos[dev->minor].i_CJCPolarity = 0;
- s_BoardInfos[dev->minor].i_CJCGain = 2; /* changed from 0 to 2 */
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- s_BoardInfos[dev->minor].i_AutoCalibration = 0; /* : auto calibration */
- s_BoardInfos[dev->minor].i_ChannelCount = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
- s_BoardInfos[dev->minor].ui_Channel_num = 0;
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Initialised = 0;
- s_BoardInfos[dev->minor].b_StructInitialized = 1;
-
- /* Begin JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- s_BoardInfos[dev->minor].i_ConnectionType = 0;
- /* End JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /* Begin JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- memset(s_BoardInfos[dev->minor].s_Module, 0,
- sizeof(s_BoardInfos[dev->minor].s_Module[MAX_MODULE]));
-
- v_GetAPCI3200EepromCalibrationValue(devpriv->i_IobaseAmcc,
- &s_BoardInfos[dev->minor]);
- /* End JK 21.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- }
-
- if (data[0] != 0 && data[0] != 1 && data[0] != 2) {
- printk("\nThe selection of acquisition type is in error\n");
- i_err++;
- } /* if(data[0]!=0 && data[0]!=1 && data[0]!=2) */
- if (data[0] == 1) {
- if (data[14] != 0 && data[14] != 1 && data[14] != 2
- && data[14] != 4) {
- printk("\n Error in selection of RTD connection type\n");
- i_err++;
- } /* if(data[14]!=0 && data[14]!=1 && data[14]!=2 && data[14]!=4) */
- } /* if(data[0]==1 ) */
- if (data[1] < 0 || data[1] > 7) {
- printk("\nThe selection of gain is in error\n");
- i_err++;
- } /* if(data[1]<0 || data[1]>7) */
- if (data[2] != 0 && data[2] != 1) {
- printk("\nThe selection of polarity is in error\n");
- i_err++;
- } /* if(data[2]!=0 && data[2]!=1) */
- if (data[3] != 0) {
- printk("\nThe selection of offset range is in error\n");
- i_err++;
- } /* if(data[3]!=0) */
- if (data[4] != 0 && data[4] != 1) {
- printk("\nThe selection of coupling is in error\n");
- i_err++;
- } /* if(data[4]!=0 && data[4]!=1) */
- if (data[5] != 0 && data[5] != 1) {
- printk("\nThe selection of single/differential mode is in error\n");
- i_err++;
- } /* if(data[5]!=0 && data[5]!=1) */
- if (data[8] != 0 && data[8] != 1 && data[2] != 2) {
- printk("\nError in selection of functionality\n");
- } /* if(data[8]!=0 && data[8]!=1 && data[2]!=2) */
- if (data[12] == 0 || data[12] == 1) {
- if (data[6] != 20 && data[6] != 40 && data[6] != 80
- && data[6] != 160) {
- printk("\nThe selection of conversion time reload value is in error\n");
- i_err++;
- } /* if (data[6]!=20 && data[6]!=40 && data[6]!=80 && data[6]!=160 ) */
- if (data[7] != 2) {
- printk("\nThe selection of conversion time unit is in error\n");
- i_err++;
- } /* if(data[7]!=2) */
- }
- if (data[9] != 0 && data[9] != 1) {
- printk("\nThe selection of interrupt enable is in error\n");
- i_err++;
- } /* if(data[9]!=0 && data[9]!=1) */
- if (data[11] < 0 || data[11] > 4) {
- printk("\nThe selection of module is in error\n");
- i_err++;
- } /* if(data[11] <0 || data[11]>1) */
- if (data[12] < 0 || data[12] > 3) {
- printk("\nThe selection of singlechannel/scan selection is in error\n");
- i_err++;
- } /* if(data[12] < 0 || data[12]> 3) */
- if (data[13] < 0 || data[13] > 16) {
- printk("\nThe selection of number of channels is in error\n");
- i_err++;
- } /* if(data[13] <0 ||data[13] >15) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /*
- i_ChannelCount=data[13];
- i_ScanType=data[12];
- i_ADDIDATAPolarity = data[2];
- i_ADDIDATAGain=data[1];
- i_ADDIDATAConversionTime=data[6];
- i_ADDIDATAConversionTimeUnit=data[7];
- i_ADDIDATAType=data[0];
- */
-
- /* Save acquisition configuration for the actual board */
- s_BoardInfos[dev->minor].i_ChannelCount = data[13];
- s_BoardInfos[dev->minor].i_ScanType = data[12];
- s_BoardInfos[dev->minor].i_ADDIDATAPolarity = data[2];
- s_BoardInfos[dev->minor].i_ADDIDATAGain = data[1];
- s_BoardInfos[dev->minor].i_ADDIDATAConversionTime = data[6];
- s_BoardInfos[dev->minor].i_ADDIDATAConversionTimeUnit = data[7];
- s_BoardInfos[dev->minor].i_ADDIDATAType = data[0];
- /* Begin JK 19.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- s_BoardInfos[dev->minor].i_ConnectionType = data[5];
- /* End JK 19.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* Begin JK 19.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- memset(s_BoardInfos[dev->minor].ui_ScanValueArray, 0, (7 + 12) * sizeof(unsigned int)); /* 7 is the maximal number of channels */
- /* End JK 19.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /* BEGIN JK 02.07.04 : This while can't be do, it block the process when using severals boards */
- /* while(i_InterruptFlag==1) */
- while (s_BoardInfos[dev->minor].i_InterruptFlag == 1) {
-#ifndef MSXBOX
- udelay(1);
-#else
- /* In the case where the driver is compiled for the MSX-Box */
- /* we used a printk to have a little delay because udelay */
- /* seems to be broken under the MSX-Box. */
- /* This solution hat to be studied. */
- printk("");
-#endif
- }
- /* END JK 02.07.04 : This while can't be do, it block the process when using severals boards */
-
- ui_ChannelNo = CR_CHAN(insn->chanspec); /* get the channel */
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=ui_ChannelNo; */
- /* ui_Channel_num =ui_ChannelNo; */
-
- s_BoardInfos[dev->minor].i_ChannelNo = ui_ChannelNo;
- s_BoardInfos[dev->minor].ui_Channel_num = ui_ChannelNo;
-
- /* END JK 06.07.04: Management of sevrals boards */
-
- if (data[5] == 0) {
- if (ui_ChannelNo > 15) {
- printk("\nThe Selection of the channel is in error\n");
- i_err++;
- } /* if(ui_ChannelNo>15) */
- } /* if(data[5]==0) */
- else {
- if (data[14] == 2) {
- if (ui_ChannelNo > 3) {
- printk("\nThe Selection of the channel is in error\n");
- i_err++;
- } /* if(ui_ChannelNo>3) */
- } /* if(data[14]==2) */
- else {
- if (ui_ChannelNo > 7) {
- printk("\nThe Selection of the channel is in error\n");
- i_err++;
- } /* if(ui_ChannelNo>7) */
- } /* elseif(data[14]==2) */
- } /* elseif(data[5]==0) */
- if (data[12] == 0 || data[12] == 1) {
- switch (data[5]) {
- case 0:
- if (ui_ChannelNo <= 3) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo <=3) */
- if (ui_ChannelNo >= 4 && ui_ChannelNo <= 7) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=64; */
- s_BoardInfos[dev->minor].i_Offset = 64;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo >=4 && ui_ChannelNo <=7) */
- if (ui_ChannelNo >= 8 && ui_ChannelNo <= 11) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=128; */
- s_BoardInfos[dev->minor].i_Offset = 128;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo >=8 && ui_ChannelNo <=11) */
- if (ui_ChannelNo >= 12 && ui_ChannelNo <= 15) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=192; */
- s_BoardInfos[dev->minor].i_Offset = 192;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo >=12 && ui_ChannelNo <=15) */
- break;
- case 1:
- if (data[14] == 2) {
- if (ui_ChannelNo == 0) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo ==0 ) */
- if (ui_ChannelNo == 1) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 64;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo ==1) */
- if (ui_ChannelNo == 2) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=128; */
- s_BoardInfos[dev->minor].i_Offset = 128;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo ==2 ) */
- if (ui_ChannelNo == 3) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=192; */
- s_BoardInfos[dev->minor].i_Offset = 192;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo ==3) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=0; */
- s_BoardInfos[dev->minor].i_ChannelNo = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- ui_ChannelNo = 0;
- break;
- } /* if(data[14]==2) */
- if (ui_ChannelNo <= 1) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(ui_ChannelNo <=1) */
- if (ui_ChannelNo >= 2 && ui_ChannelNo <= 3) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=i_ChannelNo-2; */
- /* i_Offset=64; */
- s_BoardInfos[dev->minor].i_ChannelNo =
- s_BoardInfos[dev->minor].i_ChannelNo -
- 2;
- s_BoardInfos[dev->minor].i_Offset = 64;
- /* END JK 06.07.04: Management of sevrals boards */
- ui_ChannelNo = ui_ChannelNo - 2;
- } /* if(ui_ChannelNo >=2 && ui_ChannelNo <=3) */
- if (ui_ChannelNo >= 4 && ui_ChannelNo <= 5) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=i_ChannelNo-4; */
- /* i_Offset=128; */
- s_BoardInfos[dev->minor].i_ChannelNo =
- s_BoardInfos[dev->minor].i_ChannelNo -
- 4;
- s_BoardInfos[dev->minor].i_Offset = 128;
- /* END JK 06.07.04: Management of sevrals boards */
- ui_ChannelNo = ui_ChannelNo - 4;
- } /* if(ui_ChannelNo >=4 && ui_ChannelNo <=5) */
- if (ui_ChannelNo >= 6 && ui_ChannelNo <= 7) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_ChannelNo=i_ChannelNo-6; */
- /* i_Offset=192; */
- s_BoardInfos[dev->minor].i_ChannelNo =
- s_BoardInfos[dev->minor].i_ChannelNo -
- 6;
- s_BoardInfos[dev->minor].i_Offset = 192;
- /* END JK 06.07.04: Management of sevrals boards */
- ui_ChannelNo = ui_ChannelNo - 6;
- } /* if(ui_ChannelNo >=6 && ui_ChannelNo <=7) */
- break;
-
- default:
- printk("\n This selection of polarity does not exist\n");
- i_err++;
- } /* switch(data[2]) */
- } /* if(data[12]==0 || data[12]==1) */
- else {
- switch (data[11]) {
- case 1:
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=0; */
- s_BoardInfos[dev->minor].i_Offset = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- break;
- case 2:
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=64; */
- s_BoardInfos[dev->minor].i_Offset = 64;
- /* END JK 06.07.04: Management of sevrals boards */
- break;
- case 3:
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=128; */
- s_BoardInfos[dev->minor].i_Offset = 128;
- /* END JK 06.07.04: Management of sevrals boards */
- break;
- case 4:
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Offset=192; */
- s_BoardInfos[dev->minor].i_Offset = 192;
- /* END JK 06.07.04: Management of sevrals boards */
- break;
- default:
- printk("\nError in module selection\n");
- i_err++;
- } /* switch(data[11]) */
- } /* elseif(data[12]==0 || data[12]==1) */
- if (i_err) {
- apci3200_reset(dev);
- return -EINVAL;
- }
- /* if(i_ScanType!=1) */
- if (s_BoardInfos[dev->minor].i_ScanType != 1) {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Count=0; */
- /* i_Sum=0; */
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(i_ScanType!=1) */
-
- ul_Config =
- data[1] | (data[2] << 6) | (data[5] << 7) | (data[3] << 8) |
- (data[4] << 9);
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* END JK 06.07.04: Management of sevrals boards */
- /*********************************/
- /* Write the channel to configure */
- /*********************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* outl(0 | ui_ChannelNo , devpriv->iobase+i_Offset + 0x4); */
- outl(0 | ui_ChannelNo,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x4);
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* END JK 06.07.04: Management of sevrals boards */
- /**************************/
- /* Reset the configuration */
- /**************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* outl(0 , devpriv->iobase+i_Offset + 0x0); */
- outl(0, devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x0);
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* END JK 06.07.04: Management of sevrals boards */
-
- /***************************/
- /* Write the configuration */
- /***************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* outl(ul_Config , devpriv->iobase+i_Offset + 0x0); */
- outl(ul_Config,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x0);
- /* END JK 06.07.04: Management of sevrals boards */
-
- /***************************/
- /*Reset the calibration bit */
- /***************************/
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* ul_Temp = inl(devpriv->iobase+i_Offset + 12); */
- ul_Temp = inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* outl((ul_Temp & 0xFFF9FFFF) , devpriv->iobase+.i_Offset + 12); */
- outl((ul_Temp & 0xFFF9FFFF),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /* END JK 06.07.04: Management of sevrals boards */
-
- if (data[9] == 1) {
- devpriv->tsk_Current = current;
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_InterruptFlag=1; */
- s_BoardInfos[dev->minor].i_InterruptFlag = 1;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* if(data[9]==1) */
- else {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- /* END JK 06.07.04: Management of sevrals boards */
- } /* else if(data[9]==1) */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Initialised=1; */
- s_BoardInfos[dev->minor].i_Initialised = 1;
- /* END JK 06.07.04: Management of sevrals boards */
-
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* if(i_ScanType==1) */
- if (s_BoardInfos[dev->minor].i_ScanType == 1)
- /* END JK 06.07.04: Management of sevrals boards */
- {
- /* BEGIN JK 06.07.04: Management of sevrals boards */
- /* i_Sum=i_Sum+1; */
- s_BoardInfos[dev->minor].i_Sum =
- s_BoardInfos[dev->minor].i_Sum + 1;
- /* END JK 06.07.04: Management of sevrals boards */
-
- insn->unused[0] = 0;
- apci3200_ai_read(dev, s, insn, &ui_Dummy);
- }
-
- return insn->n;
-}
-
-/*
- * Tests the Selected Anlog Input Channel
- *
- * data[0] = 0 TestAnalogInputShortCircuit
- * = 1 TestAnalogInputConnection
- *
- * data[0] : Digital value obtained
- * data[1] : calibration offset
- * data[2] : calibration gain
- */
-static int apci3200_ai_bits_test(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Configuration = 0;
- int i_Temp; /* ,i_TimeUnit; */
-
- /* if(i_Initialised==0) */
-
- if (s_BoardInfos[dev->minor].i_Initialised == 0) {
- apci3200_reset(dev);
- return -EINVAL;
- } /* if(i_Initialised==0); */
- if (data[0] != 0 && data[0] != 1) {
- printk("\nError in selection of functionality\n");
- apci3200_reset(dev);
- return -EINVAL;
- } /* if(data[0]!=0 && data[0]!=1) */
-
- if (data[0] == 1) /* Perform Short Circuit TEST */
- {
- /**************************/
- /*Set the short-cicuit bit */
- /**************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].
- i_Offset + 12) >> 19) & 1) !=
- 1) ;
- /* outl((0x00001000 |i_ChannelNo) , devpriv->iobase+i_Offset + 4); */
- outl((0x00001000 | s_BoardInfos[dev->minor].i_ChannelNo),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 4);
- /*************************/
- /*Set the time unit to ns */
- /*************************/
- /* i_TimeUnit= i_ADDIDATAConversionTimeUnit;
- i_ADDIDATAConversionTimeUnit= 1; */
- /* i_Temp= i_InterruptFlag ; */
- i_Temp = s_BoardInfos[dev->minor].i_InterruptFlag;
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- i_APCI3200_Read1AnalogInputChannel(dev, s, insn, data);
- /* if(i_AutoCalibration == FALSE) */
- if (s_BoardInfos[dev->minor].i_AutoCalibration == FALSE) {
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].
- i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl((0x00001000 |i_ChannelNo) , devpriv->iobase+i_Offset + 4); */
- outl((0x00001000 | s_BoardInfos[dev->minor].
- i_ChannelNo),
- devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 4);
- data++;
- i_APCI3200_ReadCalibrationOffsetValue(dev, data);
- data++;
- i_APCI3200_ReadCalibrationGainValue(dev, data);
- }
- } else {
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].
- i_Offset + 12) >> 19) & 1) !=
- 1) ;
- /* outl((0x00000800|i_ChannelNo) , devpriv->iobase+i_Offset + 4); */
- outl((0x00000800 | s_BoardInfos[dev->minor].i_ChannelNo),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 4);
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 0); */
- ui_Configuration =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 0);
- /*************************/
- /*Set the time unit to ns */
- /*************************/
- /* i_TimeUnit= i_ADDIDATAConversionTimeUnit;
- i_ADDIDATAConversionTimeUnit= 1; */
- /* i_Temp= i_InterruptFlag ; */
- i_Temp = s_BoardInfos[dev->minor].i_InterruptFlag;
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- i_APCI3200_Read1AnalogInputChannel(dev, s, insn, data);
- /* if(i_AutoCalibration == FALSE) */
- if (s_BoardInfos[dev->minor].i_AutoCalibration == FALSE) {
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].
- i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((0x00000800|i_ChannelNo) , devpriv->iobase+i_Offset + 4); */
- outl((0x00000800 | s_BoardInfos[dev->minor].
- i_ChannelNo),
- devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 4);
- data++;
- i_APCI3200_ReadCalibrationOffsetValue(dev, data);
- data++;
- i_APCI3200_ReadCalibrationGainValue(dev, data);
- }
- }
- /* i_InterruptFlag=i_Temp ; */
- s_BoardInfos[dev->minor].i_InterruptFlag = i_Temp;
- return insn->n;
-}
-
-static int apci3200_ai_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- apci3200_reset(dev);
- return insn->n;
-}
-
-static int apci3200_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
-{
-
- int err = 0;
- unsigned int ui_ConvertTime = 0;
- unsigned int ui_ConvertTimeBase = 0;
- unsigned int ui_DelayTime = 0;
- unsigned int ui_DelayTimeBase = 0;
- int i_NbrOfChannel = 0;
- int i_Cpt = 0;
- double d_ConversionTimeForAllChannels = 0.0;
- double d_SCANTimeNewUnit = 0.0;
- unsigned int arg;
-
- /* Step 1 : check if triggers are trivially valid */
-
- err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER | TRIG_FOLLOW);
- err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
- err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
- err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
-
- if (s_BoardInfos[dev->minor].i_InterruptFlag == 0)
- err |= -EINVAL;
-
- if (err) {
- apci3200_reset(dev);
- return 1;
- }
-
- /* Step 2a : make sure trigger sources are unique */
-
- err |= cfc_check_trigger_is_unique(&cmd->start_src);
- err |= cfc_check_trigger_is_unique(&cmd->scan_begin_src);
- err |= cfc_check_trigger_is_unique(&cmd->stop_src);
-
- /* Step 2b : and mutually compatible */
-
- if (err) {
- apci3200_reset(dev);
- return 2;
- }
-
- /* Step 3: check if arguments are trivially valid */
-
- switch (cmd->start_src) {
- case TRIG_NOW:
- err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
- break;
- case TRIG_EXT:
- /* validate the trigger edge selection */
- arg = cmd->start_arg & 0xffff;
- if (arg < 1 || arg > 3) {
- cmd->start_arg &= ~0xffff;
- cmd->start_arg |= 1;
- err |= -EINVAL;
- }
- /* validate the trigger mode selection */
- arg = cmd->start_arg >> 16;
- if (arg != 2) {
- cmd->start_arg &= ~(0xffff << 16);
- cmd->start_arg |= (2 << 16);
- err |= -EINVAL;
- }
- break;
- }
-
- err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
-
- /* i_FirstChannel=cmd->chanlist[0]; */
- s_BoardInfos[dev->minor].i_FirstChannel = cmd->chanlist[0];
- /* i_LastChannel=cmd->chanlist[1]; */
- s_BoardInfos[dev->minor].i_LastChannel = cmd->chanlist[1];
-
- if (cmd->convert_src == TRIG_TIMER) {
- ui_ConvertTime = cmd->convert_arg & 0xFFFF;
- ui_ConvertTimeBase = cmd->convert_arg >> 16;
- if (ui_ConvertTime != 20 && ui_ConvertTime != 40
- && ui_ConvertTime != 80 && ui_ConvertTime != 160)
- {
- printk("\nThe selection of conversion time reload value is in error\n");
- err++;
- } /* if (ui_ConvertTime!=20 && ui_ConvertTime!=40 && ui_ConvertTime!=80 && ui_ConvertTime!=160 ) */
- if (ui_ConvertTimeBase != 2) {
- printk("\nThe selection of conversion time unit is in error\n");
- err++;
- } /* if(ui_ConvertTimeBase!=2) */
- } else {
- ui_ConvertTime = 0;
- ui_ConvertTimeBase = 0;
- }
- if (cmd->scan_begin_src == TRIG_FOLLOW) {
- ui_DelayTime = 0;
- ui_DelayTimeBase = 0;
- } /* if(cmd->scan_begin_src==TRIG_FOLLOW) */
- else {
- ui_DelayTime = cmd->scan_begin_arg & 0xFFFF;
- ui_DelayTimeBase = cmd->scan_begin_arg >> 16;
- if (ui_DelayTimeBase != 2 && ui_DelayTimeBase != 3) {
- err++;
- printk("\nThe Delay time base selection is in error\n");
- }
- if (ui_DelayTime < 1 || ui_DelayTime > 1023) {
- err++;
- printk("\nThe Delay time value is in error\n");
- }
- if (err) {
- apci3200_reset(dev);
- return 3;
- }
- fpu_begin();
- d_SCANTimeNewUnit = (double)ui_DelayTime;
- /* i_NbrOfChannel= i_LastChannel-i_FirstChannel + 4; */
- i_NbrOfChannel =
- s_BoardInfos[dev->minor].i_LastChannel -
- s_BoardInfos[dev->minor].i_FirstChannel + 4;
- /**********************************************************/
- /*calculate the total conversion time for all the channels */
- /**********************************************************/
- d_ConversionTimeForAllChannels =
- (double)((double)ui_ConvertTime /
- (double)i_NbrOfChannel);
-
- /*******************************/
- /*Convert the frequence in time */
- /*******************************/
- d_ConversionTimeForAllChannels =
- (double)1.0 / d_ConversionTimeForAllChannels;
- ui_ConvertTimeBase = 3;
- /***********************************/
- /*Test if the time unit is the same */
- /***********************************/
-
- if (ui_DelayTimeBase <= ui_ConvertTimeBase) {
-
- for (i_Cpt = 0;
- i_Cpt < (ui_ConvertTimeBase - ui_DelayTimeBase);
- i_Cpt++) {
-
- d_ConversionTimeForAllChannels =
- d_ConversionTimeForAllChannels * 1000;
- d_ConversionTimeForAllChannels =
- d_ConversionTimeForAllChannels + 1;
- }
- } else {
- for (i_Cpt = 0;
- i_Cpt < (ui_DelayTimeBase - ui_ConvertTimeBase);
- i_Cpt++) {
- d_SCANTimeNewUnit = d_SCANTimeNewUnit * 1000;
-
- }
- }
-
- if (d_ConversionTimeForAllChannels >= d_SCANTimeNewUnit) {
-
- printk("\nSCAN Delay value cannot be used\n");
- /*********************************/
- /*SCAN Delay value cannot be used */
- /*********************************/
- err++;
- }
- fpu_end();
- } /* else if(cmd->scan_begin_src==TRIG_FOLLOW) */
-
- if (err) {
- apci3200_reset(dev);
- return 4;
- }
-
- return 0;
-}
-
-static int apci3200_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_Configuration = 0;
-
- /* i_InterruptFlag=0; */
- /* i_Initialised=0; */
- /* i_Count=0; */
- /* i_Sum=0; */
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- s_BoardInfos[dev->minor].i_Initialised = 0;
- s_BoardInfos[dev->minor].i_Count = 0;
- s_BoardInfos[dev->minor].i_Sum = 0;
-
- /*******************/
- /*Read the register */
- /*******************/
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 8); */
- ui_Configuration =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- /*****************************/
- /*Reset the START and IRQ bit */
- /*****************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((ui_Configuration & 0xFFE7FFFF),devpriv->iobase+i_Offset + 8); */
- outl((ui_Configuration & 0xFFE7FFFF),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- return 0;
-}
-
-/*
- * Does asynchronous acquisition
- * Determines the mode 1 or 2.
- */
-static int apci3200_ai_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int ui_Configuration = 0;
- /* INT i_CurrentSource = 0; */
- unsigned int ui_Trigger = 0;
- unsigned int ui_TriggerEdge = 0;
- unsigned int ui_Triggermode = 0;
- unsigned int ui_ScanMode = 0;
- unsigned int ui_ConvertTime = 0;
- unsigned int ui_ConvertTimeBase = 0;
- unsigned int ui_DelayTime = 0;
- unsigned int ui_DelayTimeBase = 0;
- unsigned int ui_DelayMode = 0;
-
- /* i_FirstChannel=cmd->chanlist[0]; */
- /* i_LastChannel=cmd->chanlist[1]; */
- s_BoardInfos[dev->minor].i_FirstChannel = cmd->chanlist[0];
- s_BoardInfos[dev->minor].i_LastChannel = cmd->chanlist[1];
- if (cmd->start_src == TRIG_EXT) {
- ui_Trigger = 1;
- ui_TriggerEdge = cmd->start_arg & 0xFFFF;
- ui_Triggermode = cmd->start_arg >> 16;
- } /* if(cmd->start_src==TRIG_EXT) */
- else {
- ui_Trigger = 0;
- } /* elseif(cmd->start_src==TRIG_EXT) */
-
- if (cmd->stop_src == TRIG_COUNT) {
- ui_ScanMode = 0;
- } /* if (cmd->stop_src==TRIG_COUNT) */
- else {
- ui_ScanMode = 2;
- } /* else if (cmd->stop_src==TRIG_COUNT) */
-
- if (cmd->scan_begin_src == TRIG_FOLLOW) {
- ui_DelayTime = 0;
- ui_DelayTimeBase = 0;
- ui_DelayMode = 0;
- } /* if(cmd->scan_begin_src==TRIG_FOLLOW) */
- else {
- ui_DelayTime = cmd->scan_begin_arg & 0xFFFF;
- ui_DelayTimeBase = cmd->scan_begin_arg >> 16;
- ui_DelayMode = 1;
- } /* else if(cmd->scan_begin_src==TRIG_FOLLOW) */
- if (cmd->convert_src == TRIG_TIMER) {
- ui_ConvertTime = cmd->convert_arg & 0xFFFF;
- ui_ConvertTimeBase = cmd->convert_arg >> 16;
- } else {
- ui_ConvertTime = 0;
- ui_ConvertTimeBase = 0;
- }
-
- /* if(i_ADDIDATAType ==1 || ((i_ADDIDATAType==2))) */
- /* { */
- /**************************************************/
- /*Read the old configuration of the current source */
- /**************************************************/
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 12); */
- ui_Configuration =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /***********************************************/
- /*Write the configuration of the current source */
- /***********************************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((ui_Configuration & 0xFFC00000 ), devpriv->iobase+i_Offset +12); */
- outl((ui_Configuration & 0xFFC00000),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 12);
- /* } */
- ui_Configuration = 0;
-
- /* ui_Configuration = i_FirstChannel |(i_LastChannel << 8)| 0x00100000 | */
- ui_Configuration =
- s_BoardInfos[dev->minor].i_FirstChannel | (s_BoardInfos[dev->
- minor].
- i_LastChannel << 8) | 0x00100000 | (ui_Trigger << 24) |
- (ui_TriggerEdge << 25) | (ui_Triggermode << 27) | (ui_DelayMode
- << 18) | (ui_ScanMode << 16);
-
- /*************************/
- /*Write the Configuration */
- /*************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl( ui_Configuration, devpriv->iobase+i_Offset + 0x8); */
- outl(ui_Configuration,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 0x8);
- /***********************/
- /*Write the Delay Value */
- /***********************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_DelayTime,devpriv->iobase+i_Offset + 40); */
- outl(ui_DelayTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 40);
- /***************************/
- /*Write the Delay time base */
- /***************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_DelayTimeBase,devpriv->iobase+i_Offset + 44); */
- outl(ui_DelayTimeBase,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 44);
- /*********************************/
- /*Write the conversion time value */
- /*********************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_ConvertTime,devpriv->iobase+i_Offset + 32); */
- outl(ui_ConvertTime,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 32);
-
- /********************************/
- /*Write the conversion time base */
- /********************************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl(ui_ConvertTimeBase,devpriv->iobase+i_Offset + 36); */
- outl(ui_ConvertTimeBase,
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 36);
- /*******************/
- /*Read the register */
- /*******************/
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 4); */
- ui_Configuration =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /******************/
- /*Set the SCAN bit */
- /******************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
-
- /* outl(((ui_Configuration & 0x1E0FF) | 0x00002000),devpriv->iobase+i_Offset + 4); */
- outl(((ui_Configuration & 0x1E0FF) | 0x00002000),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 4);
- /*******************/
- /*Read the register */
- /*******************/
- ui_Configuration = 0;
- /* ui_Configuration = inl(devpriv->iobase+i_Offset + 8); */
- ui_Configuration =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
-
- /*******************/
- /*Set the START bit */
- /*******************/
- /* while (((inl(devpriv->iobase+i_Offset+12)>>19) & 1) != 1); */
- while (((inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset +
- 12) >> 19) & 1) != 1) ;
- /* outl((ui_Configuration | 0x00080000),devpriv->iobase+i_Offset + 8); */
- outl((ui_Configuration | 0x00080000),
- devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 8);
- return 0;
-}
-
-/*
- * This function copies the acquired data(from FIFO) to Comedi buffer.
- */
-static int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- unsigned int ui_StatusRegister = 0;
-
- /* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* comedi_async *async = s->async; */
- /* UINT *data; */
- /* data=async->data+async->buf_int_ptr;//new samples added from here onwards */
- int n = 0, i = 0;
- /* END JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /************************************/
- /*Read the interrupt status register */
- /************************************/
- /* ui_StatusRegister = inl(devpriv->iobase+i_Offset + 16); */
- ui_StatusRegister =
- inl(devpriv->iobase + s_BoardInfos[dev->minor].i_Offset + 16);
-
- /*************************/
- /*Test if interrupt occur */
- /*************************/
-
- if ((ui_StatusRegister & 0x2) == 0x2) {
- /*************************/
- /*Read the channel number */
- /*************************/
- /* ui_ChannelNumber = inl(devpriv->iobase+i_Offset + 24); */
- /* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* This value is not used */
- /* ui_ChannelNumber = inl(devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 24); */
- /* END JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /*************************************/
- /*Read the digital Analog Input value */
- /*************************************/
-
- /* data[i_Count] = inl(devpriv->iobase+i_Offset + 28); */
- /* Begin JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* data[s_BoardInfos [dev->minor].i_Count] = inl(devpriv->iobase+s_BoardInfos [dev->minor].i_Offset + 28); */
- s_BoardInfos[dev->minor].ui_ScanValueArray[s_BoardInfos[dev->
- minor].i_Count] =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
- /* End JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /* if((i_Count == (i_LastChannel-i_FirstChannel+3))) */
- if ((s_BoardInfos[dev->minor].i_Count ==
- (s_BoardInfos[dev->minor].i_LastChannel -
- s_BoardInfos[dev->minor].
- i_FirstChannel + 3))) {
-
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- s_BoardInfos[dev->minor].i_Count++;
-
- for (i = s_BoardInfos[dev->minor].i_FirstChannel;
- i <= s_BoardInfos[dev->minor].i_LastChannel;
- i++) {
- i_APCI3200_GetChannelCalibrationValue(dev, i,
- &s_BoardInfos[dev->minor].
- ui_ScanValueArray[s_BoardInfos[dev->
- minor].i_Count + ((i -
- s_BoardInfos
- [dev->minor].
- i_FirstChannel)
- * 3)],
- &s_BoardInfos[dev->minor].
- ui_ScanValueArray[s_BoardInfos[dev->
- minor].i_Count + ((i -
- s_BoardInfos
- [dev->minor].
- i_FirstChannel)
- * 3) + 1],
- &s_BoardInfos[dev->minor].
- ui_ScanValueArray[s_BoardInfos[dev->
- minor].i_Count + ((i -
- s_BoardInfos
- [dev->minor].
- i_FirstChannel)
- * 3) + 2]);
- }
-
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /* i_Count=-1; */
-
- s_BoardInfos[dev->minor].i_Count = -1;
-
- /* async->buf_int_count+=(i_LastChannel-i_FirstChannel+4)*sizeof(unsigned int); */
- /* Begin JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* async->buf_int_count+=(s_BoardInfos [dev->minor].i_LastChannel-s_BoardInfos [dev->minor].i_FirstChannel+4)*sizeof(unsigned int); */
- /* End JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* async->buf_int_ptr+=(i_LastChannel-i_FirstChannel+4)*sizeof(unsigned int); */
- /* Begin JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* async->buf_int_ptr+=(s_BoardInfos [dev->minor].i_LastChannel-s_BoardInfos [dev->minor].i_FirstChannel+4)*sizeof(unsigned int); */
- /* comedi_eos(dev,s); */
-
- /* Set the event type (Comedi Buffer End Of Scan) */
- s->async->events |= COMEDI_CB_EOS;
-
- /* Test if enougth memory is available and allocate it for 7 values */
- n = comedi_buf_write_alloc(s,
- (7 + 12) * sizeof(unsigned int));
-
- /* If not enough memory available, event is set to Comedi Buffer Error */
- if (n > ((7 + 12) * sizeof(unsigned int))) {
- printk("\ncomedi_buf_write_alloc n = %i", n);
- s->async->events |= COMEDI_CB_ERROR;
- }
- /* Write all 7 scan values in the comedi buffer */
- comedi_buf_memcpy_to(s, 0,
- (unsigned int *) s_BoardInfos[dev->minor].
- ui_ScanValueArray, (7 + 12) * sizeof(unsigned int));
-
- /* Update comedi buffer pinters indexes */
- comedi_buf_write_free(s,
- (7 + 12) * sizeof(unsigned int));
-
- /* Send events */
- comedi_event(dev, s);
- /* End JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
-
- /* BEGIN JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- /* */
- /* if (s->async->buf_int_ptr>=s->async->data_len) // for buffer rool over */
- /* { */
- /* /* buffer rollover */ */
- /* s->async->buf_int_ptr=0; */
- /* comedi_eobuf(dev,s); */
- /* } */
- /* End JK 18.10.2004: APCI-3200 Driver update 0.7.57 -> 0.7.68 */
- }
- /* i_Count++; */
- s_BoardInfos[dev->minor].i_Count++;
- }
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].i_InterruptFlag = 0;
- return 0;
-}
-
-static void apci3200_interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- unsigned int ui_StatusRegister = 0;
- unsigned int ui_ChannelNumber = 0;
- int i_CalibrationFlag = 0;
- int i_CJCFlag = 0;
- unsigned int ui_DummyValue = 0;
- unsigned int ui_DigitalTemperature = 0;
- unsigned int ui_DigitalInput = 0;
- int i_ConvertCJCCalibration;
- /* BEGIN JK TEST */
- int i_ReturnValue = 0;
- /* END JK TEST */
-
- /* switch(i_ScanType) */
- switch (s_BoardInfos[dev->minor].i_ScanType) {
- case 0:
- case 1:
- /* switch(i_ADDIDATAType) */
- switch (s_BoardInfos[dev->minor].i_ADDIDATAType) {
- case 0:
- case 1:
-
- /************************************/
- /*Read the interrupt status register */
- /************************************/
- /* ui_StatusRegister = inl(devpriv->iobase+i_Offset + 16); */
- ui_StatusRegister =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 16);
- if ((ui_StatusRegister & 0x2) == 0x2) {
- /* i_CalibrationFlag = ((inl(devpriv->iobase+i_Offset + 12) & 0x00060000) >> 17); */
- i_CalibrationFlag =
- ((inl(devpriv->iobase +
- s_BoardInfos[dev->
- minor].
- i_Offset +
- 12) & 0x00060000) >>
- 17);
- /*************************/
- /*Read the channel number */
- /*************************/
- /* ui_ChannelNumber = inl(devpriv->iobase+i_Offset + 24); */
-
- /*************************************/
- /*Read the digital analog input value */
- /*************************************/
- /* ui_DigitalInput = inl(devpriv->iobase+i_Offset + 28); */
- ui_DigitalInput =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
-
- /***********************************************/
- /* Test if the value read is the channel value */
- /***********************************************/
- if (i_CalibrationFlag == 0) {
- /* ui_InterruptChannelValue[i_Count + 0] = ui_DigitalInput; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 0] = ui_DigitalInput;
-
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /*
- i_APCI3200_GetChannelCalibrationValue (dev, s_BoardInfos [dev->minor].ui_Channel_num,
- &s_BoardInfos [dev->minor].ui_InterruptChannelValue[s_BoardInfos [dev->minor].i_Count + 6],
- &s_BoardInfos [dev->minor].ui_InterruptChannelValue[s_BoardInfos [dev->minor].i_Count + 7],
- &s_BoardInfos [dev->minor].ui_InterruptChannelValue[s_BoardInfos [dev->minor].i_Count + 8]);
- */
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /******************************************************/
- /*Start the conversion of the calibration offset value */
- /******************************************************/
- i_APCI3200_ReadCalibrationOffsetValue
- (dev, &ui_DummyValue);
- } /* if (i_CalibrationFlag == 0) */
- /**********************************************************/
- /* Test if the value read is the calibration offset value */
- /**********************************************************/
-
- if (i_CalibrationFlag == 1) {
-
- /******************/
- /* Save the value */
- /******************/
-
- /* ui_InterruptChannelValue[i_Count + 1] = ui_DigitalInput; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 1] = ui_DigitalInput;
-
- /******************************************************/
- /* Start the conversion of the calibration gain value */
- /******************************************************/
- i_APCI3200_ReadCalibrationGainValue(dev,
- &ui_DummyValue);
- } /* if (i_CalibrationFlag == 1) */
- /******************************************************/
- /*Test if the value read is the calibration gain value */
- /******************************************************/
-
- if (i_CalibrationFlag == 2) {
-
- /****************/
- /*Save the value */
- /****************/
- /* ui_InterruptChannelValue[i_Count + 2] = ui_DigitalInput; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 2] = ui_DigitalInput;
- /* if(i_ScanType==1) */
- if (s_BoardInfos[dev->minor].
- i_ScanType == 1) {
-
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].
- i_InterruptFlag = 0;
- /* i_Count=i_Count + 6; */
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /* s_BoardInfos [dev->minor].i_Count=s_BoardInfos [dev->minor].i_Count + 6; */
- s_BoardInfos[dev->minor].
- i_Count =
- s_BoardInfos[dev->
- minor].i_Count + 9;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- } /* if(i_ScanType==1) */
- else {
- /* i_Count=0; */
- s_BoardInfos[dev->minor].
- i_Count = 0;
- } /* elseif(i_ScanType==1) */
- /* if(i_ScanType!=1) */
- if (s_BoardInfos[dev->minor].
- i_ScanType != 1) {
- i_ReturnValue = send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- } /* if(i_ScanType!=1) */
- else {
- /* if(i_ChannelCount==i_Sum) */
- if (s_BoardInfos[dev->minor].
- i_ChannelCount ==
- s_BoardInfos[dev->
- minor].i_Sum) {
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- }
- } /* if(i_ScanType!=1) */
- } /* if (i_CalibrationFlag == 2) */
- } /* if ((ui_StatusRegister & 0x2) == 0x2) */
-
- break;
-
- case 2:
- /************************************/
- /*Read the interrupt status register */
- /************************************/
-
- /* ui_StatusRegister = inl(devpriv->iobase+i_Offset + 16); */
- ui_StatusRegister =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 16);
- /*************************/
- /*Test if interrupt occur */
- /*************************/
-
- if ((ui_StatusRegister & 0x2) == 0x2) {
-
- /* i_CJCFlag = ((inl(devpriv->iobase+i_Offset + 4) & 0x00000400) >> 10); */
- i_CJCFlag =
- ((inl(devpriv->iobase +
- s_BoardInfos[dev->
- minor].
- i_Offset +
- 4) & 0x00000400) >> 10);
-
- /* i_CalibrationFlag = ((inl(devpriv->iobase+i_Offset + 12) & 0x00060000) >> 17); */
- i_CalibrationFlag =
- ((inl(devpriv->iobase +
- s_BoardInfos[dev->
- minor].
- i_Offset +
- 12) & 0x00060000) >>
- 17);
-
- /*************************/
- /*Read the channel number */
- /*************************/
-
- /* ui_ChannelNumber = inl(devpriv->iobase+i_Offset + 24); */
- ui_ChannelNumber =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 24);
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- s_BoardInfos[dev->minor].ui_Channel_num =
- ui_ChannelNumber;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
-
- /************************************/
- /*Read the digital temperature value */
- /************************************/
- /* ui_DigitalTemperature = inl(devpriv->iobase+i_Offset + 28); */
- ui_DigitalTemperature =
- inl(devpriv->iobase +
- s_BoardInfos[dev->minor].i_Offset + 28);
-
- /*********************************************/
- /*Test if the value read is the channel value */
- /*********************************************/
-
- if ((i_CalibrationFlag == 0)
- && (i_CJCFlag == 0)) {
- /* ui_InterruptChannelValue[i_Count + 0]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 0] =
- ui_DigitalTemperature;
-
- /*********************************/
- /*Start the conversion of the CJC */
- /*********************************/
- i_APCI3200_ReadCJCValue(dev,
- &ui_DummyValue);
-
- } /* if ((i_CalibrationFlag == 0) && (i_CJCFlag == 0)) */
-
- /*****************************************/
- /*Test if the value read is the CJC value */
- /*****************************************/
-
- if ((i_CJCFlag == 1)
- && (i_CalibrationFlag == 0)) {
- /* ui_InterruptChannelValue[i_Count + 3]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 3] =
- ui_DigitalTemperature;
-
- /******************************************************/
- /*Start the conversion of the calibration offset value */
- /******************************************************/
- i_APCI3200_ReadCalibrationOffsetValue
- (dev, &ui_DummyValue);
- } /* if ((i_CJCFlag == 1) && (i_CalibrationFlag == 0)) */
-
- /********************************************************/
- /*Test if the value read is the calibration offset value */
- /********************************************************/
-
- if ((i_CalibrationFlag == 1)
- && (i_CJCFlag == 0)) {
- /* ui_InterruptChannelValue[i_Count + 1]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 1] =
- ui_DigitalTemperature;
-
- /****************************************************/
- /*Start the conversion of the calibration gain value */
- /****************************************************/
- i_APCI3200_ReadCalibrationGainValue(dev,
- &ui_DummyValue);
-
- } /* if ((i_CalibrationFlag == 1) && (i_CJCFlag == 0)) */
-
- /******************************************************/
- /*Test if the value read is the calibration gain value */
- /******************************************************/
-
- if ((i_CalibrationFlag == 2)
- && (i_CJCFlag == 0)) {
- /* ui_InterruptChannelValue[i_Count + 2]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 2] =
- ui_DigitalTemperature;
-
- /**********************************************************/
- /*Test if the Calibration channel must be read for the CJC */
- /**********************************************************/
-
- /*Test if the polarity is the same */
- /**********************************/
- /* if(i_CJCPolarity!=i_ADDIDATAPolarity) */
- if (s_BoardInfos[dev->minor].
- i_CJCPolarity !=
- s_BoardInfos[dev->minor].
- i_ADDIDATAPolarity) {
- i_ConvertCJCCalibration = 1;
- } /* if(i_CJCPolarity!=i_ADDIDATAPolarity) */
- else {
- /* if(i_CJCGain==i_ADDIDATAGain) */
- if (s_BoardInfos[dev->minor].
- i_CJCGain ==
- s_BoardInfos[dev->
- minor].
- i_ADDIDATAGain) {
- i_ConvertCJCCalibration
- = 0;
- } /* if(i_CJCGain==i_ADDIDATAGain) */
- else {
- i_ConvertCJCCalibration
- = 1;
- } /* elseif(i_CJCGain==i_ADDIDATAGain) */
- } /* elseif(i_CJCPolarity!=i_ADDIDATAPolarity) */
- if (i_ConvertCJCCalibration == 1) {
- /****************************************************************/
- /*Start the conversion of the calibration gain value for the CJC */
- /****************************************************************/
- i_APCI3200_ReadCJCCalOffset(dev,
- &ui_DummyValue);
-
- } /* if(i_ConvertCJCCalibration==1) */
- else {
- /* ui_InterruptChannelValue[i_Count + 4]=0; */
- /* ui_InterruptChannelValue[i_Count + 5]=0; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->
- minor].i_Count +
- 4] = 0;
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->
- minor].i_Count +
- 5] = 0;
- } /* elseif(i_ConvertCJCCalibration==1) */
- } /* else if ((i_CalibrationFlag == 2) && (i_CJCFlag == 0)) */
-
- /********************************************************************/
- /*Test if the value read is the calibration offset value for the CJC */
- /********************************************************************/
-
- if ((i_CalibrationFlag == 1)
- && (i_CJCFlag == 1)) {
- /* ui_InterruptChannelValue[i_Count + 4]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 4] =
- ui_DigitalTemperature;
-
- /****************************************************************/
- /*Start the conversion of the calibration gain value for the CJC */
- /****************************************************************/
- i_APCI3200_ReadCJCCalGain(dev,
- &ui_DummyValue);
-
- } /* if ((i_CalibrationFlag == 1) && (i_CJCFlag == 1)) */
-
- /******************************************************************/
- /*Test if the value read is the calibration gain value for the CJC */
- /******************************************************************/
-
- if ((i_CalibrationFlag == 2)
- && (i_CJCFlag == 1)) {
- /* ui_InterruptChannelValue[i_Count + 5]=ui_DigitalTemperature; */
- s_BoardInfos[dev->minor].
- ui_InterruptChannelValue
- [s_BoardInfos[dev->minor].
- i_Count + 5] =
- ui_DigitalTemperature;
-
- /* if(i_ScanType==1) */
- if (s_BoardInfos[dev->minor].
- i_ScanType == 1) {
-
- /* i_InterruptFlag=0; */
- s_BoardInfos[dev->minor].
- i_InterruptFlag = 0;
- /* i_Count=i_Count + 6; */
- /* Begin JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- /* s_BoardInfos [dev->minor].i_Count=s_BoardInfos [dev->minor].i_Count + 6; */
- s_BoardInfos[dev->minor].
- i_Count =
- s_BoardInfos[dev->
- minor].i_Count + 9;
- /* End JK 22.10.2004: APCI-3200 / APCI-3300 Reading of EEPROM values */
- } /* if(i_ScanType==1) */
- else {
- /* i_Count=0; */
- s_BoardInfos[dev->minor].
- i_Count = 0;
- } /* elseif(i_ScanType==1) */
-
- /* if(i_ScanType!=1) */
- if (s_BoardInfos[dev->minor].
- i_ScanType != 1) {
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
- } /* if(i_ScanType!=1) */
- else {
- /* if(i_ChannelCount==i_Sum) */
- if (s_BoardInfos[dev->minor].
- i_ChannelCount ==
- s_BoardInfos[dev->
- minor].i_Sum) {
- send_sig(SIGIO, devpriv->tsk_Current, 0); /* send signal to the sample */
-
- } /* if(i_ChannelCount==i_Sum) */
- } /* else if(i_ScanType!=1) */
- } /* if ((i_CalibrationFlag == 2) && (i_CJCFlag == 1)) */
-
- } /* else if ((ui_StatusRegister & 0x2) == 0x2) */
- break;
- } /* switch(i_ADDIDATAType) */
- break;
- case 2:
- case 3:
- i_APCI3200_InterruptHandleEos(dev);
- break;
- } /* switch(i_ScanType) */
- return;
-}
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
deleted file mode 100644
index af70c8401880..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ /dev/null
@@ -1,77 +0,0 @@
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include "../comedidev.h"
-#include "comedi_fc.h"
-#include "amcc_s5933.h"
-
-#include "addi-data/addi_common.h"
-
-#define ADDIDATA_WATCHDOG 2 /* Or shold it be something else */
-
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_apci035.c"
-#include "addi-data/addi_common.c"
-
-static const struct addi_board apci035_boardtypes[] = {
- {
- .pc_DriverName = "apci035",
- .i_IorangeBase1 = APCI035_ADDRESS_RANGE,
- .i_PCIEeprom = 1,
- .pc_EepromChip = "S5920",
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 0xff,
- .pr_AiRangelist = &range_apci035_ai,
- .i_Timer = 1,
- .ui_MinAcquisitiontimeNs = 10000,
- .ui_MinDelaytimeNs = 100000,
- .interrupt = apci035_interrupt,
- .reset = apci035_reset,
- .ai_config = apci035_ai_config,
- .ai_read = apci035_ai_read,
- .timer_config = apci035_timer_config,
- .timer_write = apci035_timer_write,
- .timer_read = apci035_timer_read,
- },
-};
-
-static int apci035_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- dev->board_ptr = &apci035_boardtypes[0];
-
- return addi_auto_attach(dev, context);
-}
-
-static struct comedi_driver apci035_driver = {
- .driver_name = "addi_apci_035",
- .module = THIS_MODULE,
- .auto_attach = apci035_auto_attach,
- .detach = i_ADDI_Detach,
-};
-
-static int apci035_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci035_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci035_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x0300) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci035_pci_table);
-
-static struct pci_driver apci035_pci_driver = {
- .name = "addi_apci_035",
- .id_table = apci035_pci_table,
- .probe = apci035_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci035_driver, apci035_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 840cb289507a..bf14165297b7 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -258,9 +258,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d)
outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG);
s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff;
- comedi_buf_put(s, s->state);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
/* enable the interrupt */
outl(ctrl, dev->iobase + APCI1032_CTRL_REG);
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index b7a284ac6649..30b132c3d092 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -1,54 +1,109 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
-#include "addi-data/addi_common.h"
+struct apci1500_private {
+ int iobase;
+ int i_IobaseAmcc;
+ int i_IobaseAddon;
+ int i_IobaseReserved;
+ unsigned char b_OutputMemoryStatus;
+ struct task_struct *tsk_Current;
+};
-#include "addi-data/addi_eeprom.c"
#include "addi-data/hwdrv_apci1500.c"
-#include "addi-data/addi_common.c"
-
-static const struct addi_board apci1500_boardtypes[] = {
- {
- .pc_DriverName = "apci1500",
- .i_IorangeBase1 = APCI1500_ADDRESS_RANGE,
- .i_PCIEeprom = 0,
- .i_NbrDiChannel = 16,
- .i_NbrDoChannel = 16,
- .i_DoMaxdata = 0xffff,
- .i_Timer = 1,
- .interrupt = apci1500_interrupt,
- .reset = apci1500_reset,
- .di_config = apci1500_di_config,
- .di_read = apci1500_di_read,
- .di_write = apci1500_di_write,
- .di_bits = apci1500_di_insn_bits,
- .do_config = apci1500_do_config,
- .do_write = apci1500_do_write,
- .do_bits = apci1500_do_bits,
- .timer_config = apci1500_timer_config,
- .timer_write = apci1500_timer_write,
- .timer_read = apci1500_timer_read,
- .timer_bits = apci1500_timer_bits,
- },
-};
static int apci1500_auto_attach(struct comedi_device *dev,
unsigned long context)
{
- dev->board_ptr = &apci1500_boardtypes[0];
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct apci1500_private *devpriv;
+ struct comedi_subdevice *s;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ dev->iobase = pci_resource_start(pcidev, 1);
+ devpriv->iobase = dev->iobase;
+ devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
+ devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
+ devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3);
+
+ if (pcidev->irq > 0) {
+ ret = request_irq(pcidev->irq, apci1500_interrupt, IRQF_SHARED,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
+
+ ret = comedi_alloc_subdevices(dev, 3);
+ if (ret)
+ return ret;
- return addi_auto_attach(dev, context);
+ /* Allocate and Initialise DI Subdevice Structures */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = apci1500_di_config;
+ s->insn_read = apci1500_di_read;
+ s->insn_write = apci1500_di_write;
+ s->insn_bits = apci1500_di_insn_bits;
+
+ /* Allocate and Initialise DO Subdevice Structures */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_config = apci1500_do_config;
+ s->insn_write = apci1500_do_write;
+ s->insn_bits = apci1500_do_bits;
+
+ /* Allocate and Initialise Timer Subdevice Structures */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_TIMER;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 1;
+ s->maxdata = 0;
+ s->len_chanlist = 1;
+ s->range_table = &range_digital;
+ s->insn_write = apci1500_timer_write;
+ s->insn_read = apci1500_timer_read;
+ s->insn_config = apci1500_timer_config;
+ s->insn_bits = apci1500_timer_bits;
+
+ apci1500_reset(dev);
+
+ return 0;
+}
+
+static void apci1500_detach(struct comedi_device *dev)
+{
+ if (dev->iobase)
+ apci1500_reset(dev);
+ comedi_pci_detach(dev);
}
static struct comedi_driver apci1500_driver = {
.driver_name = "addi_apci_1500",
.module = THIS_MODULE,
.auto_attach = apci1500_auto_attach,
- .detach = i_ADDI_Detach,
+ .detach = apci1500_detach,
};
static int apci1500_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index 55d00fd94c91..d8410415cc90 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -163,7 +163,7 @@ static int apci1516_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
if (this_board->do_nchan) {
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = this_board->do_nchan;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 688b015a834e..6872b69da5db 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -28,16 +28,91 @@
#include "../comedidev.h"
#include "comedi_fc.h"
-#include "amcc_s5933.h"
+#include "addi_tcw.h"
#include "addi_watchdog.h"
+/*
+ * PCI BAR 0
+ *
+ * PLD Revision 1.0 I/O Mapping
+ * 0x00 93C76 EEPROM
+ * 0x04 - 0x18 Timer 12-Bit
+ *
+ * PLD Revision 2.x I/O Mapping
+ * 0x00 93C76 EEPROM
+ * 0x04 - 0x14 Digital Input
+ * 0x18 - 0x25 Digital Output
+ * 0x28 - 0x44 Watchdog 8-Bit
+ * 0x48 - 0x64 Timer 12-Bit
+ */
+#define APCI1564_EEPROM_REG 0x00
+#define APCI1564_EEPROM_VCC_STATUS (1 << 8)
+#define APCI1564_EEPROM_TO_REV(x) (((x) >> 4) & 0xf)
+#define APCI1564_EEPROM_DI (1 << 3)
+#define APCI1564_EEPROM_DO (1 << 2)
+#define APCI1564_EEPROM_CS (1 << 1)
+#define APCI1564_EEPROM_CLK (1 << 0)
+#define APCI1564_REV1_TIMER_IOBASE 0x04
+#define APCI1564_REV2_MAIN_IOBASE 0x04
+#define APCI1564_REV2_TIMER_IOBASE 0x48
+
+/*
+ * PCI BAR 1
+ *
+ * PLD Revision 1.0 I/O Mapping
+ * 0x00 - 0x10 Digital Input
+ * 0x14 - 0x20 Digital Output
+ * 0x24 - 0x3c Watchdog 8-Bit
+ *
+ * PLD Revision 2.x I/O Mapping
+ * 0x00 Counter_0
+ * 0x20 Counter_1
+ * 0x30 Counter_3
+ */
+#define APCI1564_REV1_MAIN_IOBASE 0x00
+
+/*
+ * dev->iobase Register Map
+ * PLD Revision 1.0 - PCI BAR 1 + 0x00
+ * PLD Revision 2.x - PCI BAR 0 + 0x04
+ */
+#define APCI1564_DI_REG 0x00
+#define APCI1564_DI_INT_MODE1_REG 0x04
+#define APCI1564_DI_INT_MODE2_REG 0x08
+#define APCI1564_DI_INT_STATUS_REG 0x0c
+#define APCI1564_DI_IRQ_REG 0x10
+#define APCI1564_DO_REG 0x14
+#define APCI1564_DO_INT_CTRL_REG 0x18
+#define APCI1564_DO_INT_STATUS_REG 0x1c
+#define APCI1564_DO_IRQ_REG 0x20
+#define APCI1564_WDOG_REG 0x24
+#define APCI1564_WDOG_RELOAD_REG 0x28
+#define APCI1564_WDOG_TIMEBASE_REG 0x2c
+#define APCI1564_WDOG_CTRL_REG 0x30
+#define APCI1564_WDOG_STATUS_REG 0x34
+#define APCI1564_WDOG_IRQ_REG 0x38
+#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x3c
+#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x40
+
+/*
+ * devpriv->timer Register Map (see addi_tcw.h for register/bit defines)
+ * PLD Revision 1.0 - PCI BAR 0 + 0x04
+ * PLD Revision 2.x - PCI BAR 0 + 0x48
+ */
+
+/*
+ * devpriv->counters Register Map (see addi_tcw.h for register/bit defines)
+ * PLD Revision 2.x - PCI BAR 1 + 0x00
+ */
+#define APCI1564_COUNTER(x) ((x) * 0x20)
+
struct apci1564_private {
- unsigned int amcc_iobase; /* base of AMCC I/O registers */
+ unsigned long eeprom; /* base address of EEPROM register */
+ unsigned long timer; /* base address of 12-bit timer */
+ unsigned long counters; /* base address of 32-bit counters */
unsigned int mode1; /* riding-edge/high level channels */
unsigned int mode2; /* falling-edge/low level channels */
unsigned int ctrl; /* interrupt mode OR (edge) . AND (level) */
- unsigned char timer_select_mode;
- unsigned char mode_select_register;
struct task_struct *tsk_current;
};
@@ -48,27 +123,30 @@ static int apci1564_reset(struct comedi_device *dev)
struct apci1564_private *devpriv = dev->private;
/* Disable the input interrupts and reset status register */
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
- inl(devpriv->amcc_iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE2_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
+ inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
/* Reset the output channels and disable interrupts */
- outl(0x0, devpriv->amcc_iobase + APCI1564_DO_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DO_INT_CTRL_REG);
+ outl(0x0, dev->iobase + APCI1564_DO_REG);
+ outl(0x0, dev->iobase + APCI1564_DO_INT_CTRL_REG);
/* Reset the watchdog registers */
- addi_watchdog_reset(devpriv->amcc_iobase + APCI1564_WDOG_REG);
+ addi_watchdog_reset(dev->iobase + APCI1564_WDOG_REG);
/* Reset the timer registers */
- outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_RELOAD_REG);
+ outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
+ outl(0x0, devpriv->timer + ADDI_TCW_RELOAD_REG);
+
+ if (devpriv->counters) {
+ unsigned long iobase = devpriv->counters + ADDI_TCW_CTRL_REG;
- /* Reset the counter registers */
- outl(0x0, dev->iobase + APCI1564_COUNTER_CTRL_REG(APCI1564_COUNTER1));
- outl(0x0, dev->iobase + APCI1564_COUNTER_CTRL_REG(APCI1564_COUNTER2));
- outl(0x0, dev->iobase + APCI1564_COUNTER_CTRL_REG(APCI1564_COUNTER3));
- outl(0x0, dev->iobase + APCI1564_COUNTER_CTRL_REG(APCI1564_COUNTER4));
+ /* Reset the counter registers */
+ outl(0x0, iobase + APCI1564_COUNTER(0));
+ outl(0x0, iobase + APCI1564_COUNTER(1));
+ outl(0x0, iobase + APCI1564_COUNTER(2));
+ }
return 0;
}
@@ -82,55 +160,52 @@ static irqreturn_t apci1564_interrupt(int irq, void *d)
unsigned int ctrl;
unsigned int chan;
- /* check interrupt is from this device */
- if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) &
- INTCSR_INTR_ASSERTED) == 0)
- return IRQ_NONE;
-
- status = inl(devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
+ status = inl(dev->iobase + APCI1564_DI_IRQ_REG);
if (status & APCI1564_DI_INT_ENABLE) {
/* disable the interrupt */
outl(status & APCI1564_DI_INT_DISABLE,
- devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
+ dev->iobase + APCI1564_DI_IRQ_REG);
- s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG)
- & 0xffff;
- comedi_buf_put(s, s->state);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(dev, s);
+ s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG) &
+ 0xffff;
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
/* enable the interrupt */
- outl(status, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
+ outl(status, dev->iobase + APCI1564_DI_IRQ_REG);
}
- status = inl(devpriv->amcc_iobase + APCI1564_TIMER_IRQ_REG);
+ status = inl(devpriv->timer + ADDI_TCW_IRQ_REG);
if (status & 0x01) {
/* Disable Timer Interrupt */
- ctrl = inl(devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
+ ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
/* Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_current, 0);
/* Enable Timer Interrupt */
- outl(ctrl, devpriv->amcc_iobase + APCI1564_TIMER_CTRL_REG);
+ outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
}
- for (chan = 0; chan < 4; chan++) {
- status = inl(dev->iobase + APCI1564_COUNTER_IRQ_REG(chan));
- if (status & 0x01) {
- /* Disable Counter Interrupt */
- ctrl = inl(dev->iobase +
- APCI1564_COUNTER_CTRL_REG(chan));
- outl(0x0, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(chan));
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_current, 0);
-
- /* Enable Counter Interrupt */
- outl(ctrl, dev->iobase +
- APCI1564_COUNTER_CTRL_REG(chan));
+ if (devpriv->counters) {
+ for (chan = 0; chan < 4; chan++) {
+ unsigned long iobase;
+
+ iobase = devpriv->counters + APCI1564_COUNTER(chan);
+
+ status = inl(iobase + ADDI_TCW_IRQ_REG);
+ if (status & 0x01) {
+ /* Disable Counter Interrupt */
+ ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+ outl(0x0, iobase + ADDI_TCW_CTRL_REG);
+
+ /* Send a signal to from kernel to user space */
+ send_sig(SIGIO, devpriv->tsk_current, 0);
+
+ /* Enable Counter Interrupt */
+ outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
+ }
}
}
@@ -142,9 +217,7 @@ static int apci1564_di_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct apci1564_private *devpriv = dev->private;
-
- data[1] = inl(devpriv->amcc_iobase + APCI1564_DI_REG);
+ data[1] = inl(dev->iobase + APCI1564_DI_REG);
return insn->n;
}
@@ -154,12 +227,10 @@ static int apci1564_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct apci1564_private *devpriv = dev->private;
-
- s->state = inl(devpriv->amcc_iobase + APCI1564_DO_REG);
+ s->state = inl(dev->iobase + APCI1564_DO_REG);
if (comedi_dio_update_state(s, data))
- outl(s->state, devpriv->amcc_iobase + APCI1564_DO_REG);
+ outl(s->state, dev->iobase + APCI1564_DO_REG);
data[1] = s->state;
@@ -171,9 +242,7 @@ static int apci1564_diag_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct apci1564_private *devpriv = dev->private;
-
- data[1] = inl(devpriv->amcc_iobase + APCI1564_DO_INT_STATUS_REG) & 3;
+ data[1] = inl(dev->iobase + APCI1564_DO_INT_STATUS_REG) & 3;
return insn->n;
}
@@ -227,10 +296,10 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
devpriv->ctrl = 0;
devpriv->mode1 = 0;
devpriv->mode2 = 0;
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
- inl(devpriv->amcc_iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE2_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
+ inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
break;
case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
if (devpriv->ctrl != (APCI1564_DI_INT_ENABLE |
@@ -342,9 +411,9 @@ static int apci1564_cos_cmd(struct comedi_device *dev,
return -EINVAL;
}
- outl(devpriv->mode1, devpriv->amcc_iobase + APCI1564_DI_INT_MODE1_REG);
- outl(devpriv->mode2, devpriv->amcc_iobase + APCI1564_DI_INT_MODE2_REG);
- outl(devpriv->ctrl, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
+ outl(devpriv->mode1, dev->iobase + APCI1564_DI_INT_MODE1_REG);
+ outl(devpriv->mode2, dev->iobase + APCI1564_DI_INT_MODE2_REG);
+ outl(devpriv->ctrl, dev->iobase + APCI1564_DI_IRQ_REG);
return 0;
}
@@ -352,12 +421,10 @@ static int apci1564_cos_cmd(struct comedi_device *dev,
static int apci1564_cos_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct apci1564_private *devpriv = dev->private;
-
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_IRQ_REG);
- inl(devpriv->amcc_iobase + APCI1564_DI_INT_STATUS_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE1_REG);
- outl(0x0, devpriv->amcc_iobase + APCI1564_DI_INT_MODE2_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
+ inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE1_REG);
+ outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
return 0;
}
@@ -368,6 +435,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct apci1564_private *devpriv;
struct comedi_subdevice *s;
+ unsigned int val;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
@@ -378,8 +446,20 @@ static int apci1564_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->amcc_iobase = pci_resource_start(pcidev, 0);
+ /* read the EEPROM register and check the I/O map revision */
+ devpriv->eeprom = pci_resource_start(pcidev, 0);
+ val = inl(devpriv->eeprom + APCI1564_EEPROM_REG);
+ if (APCI1564_EEPROM_TO_REV(val) == 0) {
+ /* PLD Revision 1.0 I/O Mapping */
+ dev->iobase = pci_resource_start(pcidev, 1) +
+ APCI1564_REV1_MAIN_IOBASE;
+ devpriv->timer = devpriv->eeprom + APCI1564_REV1_TIMER_IOBASE;
+ } else {
+ /* PLD Revision 2.x I/O Mapping */
+ dev->iobase = devpriv->eeprom + APCI1564_REV2_MAIN_IOBASE;
+ devpriv->timer = devpriv->eeprom + APCI1564_REV2_TIMER_IOBASE;
+ devpriv->counters = pci_resource_start(pcidev, 1);
+ }
apci1564_reset(dev);
@@ -390,7 +470,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
dev->irq = pcidev->irq;
}
- ret = comedi_alloc_subdevices(dev, 6);
+ ret = comedi_alloc_subdevices(dev, 7);
if (ret)
return ret;
@@ -406,7 +486,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
/* Allocate and Initialise DO Subdevice Structures */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 32;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -431,26 +511,40 @@ static int apci1564_auto_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_UNUSED;
}
- /* Allocate and Initialise Timer Subdevice Structures */
+ /* Timer subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
+ s->maxdata = 0x0fff;
s->range_table = &range_digital;
- s->insn_write = apci1564_timer_write;
- s->insn_read = apci1564_timer_read;
- s->insn_config = apci1564_timer_config;
+ s->insn_config = apci1564_timer_insn_config;
+ s->insn_write = apci1564_timer_insn_write;
+ s->insn_read = apci1564_timer_insn_read;
- /* Initialize the watchdog subdevice */
+ /* Counter subdevice */
s = &dev->subdevices[4];
- ret = addi_watchdog_init(s, devpriv->amcc_iobase + APCI1564_WDOG_REG);
+ if (devpriv->counters) {
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE | SDF_LSAMPL;
+ s->n_chan = 3;
+ s->maxdata = 0xffffffff;
+ s->range_table = &range_digital;
+ s->insn_config = apci1564_counter_insn_config;
+ s->insn_write = apci1564_counter_insn_write;
+ s->insn_read = apci1564_counter_insn_read;
+ } else {
+ s->type = COMEDI_SUBD_UNUSED;
+ }
+
+ /* Initialize the watchdog subdevice */
+ s = &dev->subdevices[5];
+ ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_REG);
if (ret)
return ret;
/* Initialize the diagnostic status subdevice */
- s = &dev->subdevices[5];
+ s = &dev->subdevices[6];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 2;
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index 4162e2dc2860..a1248dab369f 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -140,7 +140,7 @@ static int apci16xx_auto_attach(struct comedi_device *dev,
for (i = 0; i < n_subdevs; i++) {
s = &dev->subdevices[i];
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_WRITEABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = ((i * 32) < board->n_chan) ? 32 : last;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index aea3da325359..eebf4f151b39 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -47,7 +47,6 @@
struct apci2032_int_private {
spinlock_t spinlock;
- unsigned int stop_count;
bool active;
unsigned char enabled_isns;
};
@@ -148,7 +147,6 @@ static int apci2032_int_cmd(struct comedi_device *dev,
spin_lock_irqsave(&subpriv->spinlock, flags);
subpriv->enabled_isns = enabled_isns;
- subpriv->stop_count = cmd->stop_arg;
subpriv->active = true;
outl(enabled_isns, dev->iobase + APCI2032_INT_CTRL_REG);
@@ -178,7 +176,6 @@ static irqreturn_t apci2032_interrupt(int irq, void *d)
struct comedi_cmd *cmd = &s->async->cmd;
struct apci2032_int_private *subpriv;
unsigned int val;
- bool do_event = false;
if (!dev->attached)
return IRQ_NONE;
@@ -212,27 +209,16 @@ static irqreturn_t apci2032_interrupt(int irq, void *d)
bits |= (1 << i);
}
- if (comedi_buf_put(s, bits)) {
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- if (cmd->stop_src == TRIG_COUNT &&
- subpriv->stop_count > 0) {
- subpriv->stop_count--;
- if (subpriv->stop_count == 0) {
- /* end of acquisition */
- s->async->events |= COMEDI_CB_EOA;
- apci2032_int_stop(dev, s);
- }
- }
- } else {
- apci2032_int_stop(dev, s);
- s->async->events |= COMEDI_CB_OVERFLOW;
- }
- do_event = true;
+ comedi_buf_write_samples(s, &bits, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
}
spin_unlock(&subpriv->spinlock);
- if (do_event)
- comedi_event(dev, s);
+
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -274,7 +260,7 @@ static int apci2032_auto_attach(struct comedi_device *dev,
/* Initialize the digital output subdevice */
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 32;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -303,7 +289,7 @@ static int apci2032_auto_attach(struct comedi_device *dev,
return -ENOMEM;
spin_lock_init(&subpriv->spinlock);
s->private = subpriv;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_PACKED;
s->len_chanlist = 2;
s->do_cmdtest = apci2032_int_cmdtest;
s->do_cmd = apci2032_int_cmd;
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index 51ab1f937bae..1f9d13661ac9 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -98,7 +98,7 @@ static int apci2200_auto_attach(struct comedi_device *dev,
/* Initialize the digital output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index ba71e24a56fd..c65f9407fd06 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -1,70 +1,993 @@
+/*
+ * addi_apci_3120.c
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/interrupt.h>
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
-#include "addi-data/addi_common.h"
+/*
+ * PCI BAR 0 register map (devpriv->amcc)
+ * see amcc_s5933.h for register and bit defines
+ */
+#define APCI3120_FIFO_ADVANCE_ON_BYTE_2 (1 << 29)
+
+/*
+ * PCI BAR 1 register map (dev->iobase)
+ */
+#define APCI3120_AI_FIFO_REG 0x00
+#define APCI3120_CTRL_REG 0x00
+#define APCI3120_CTRL_EXT_TRIG (1 << 15)
+#define APCI3120_CTRL_GATE(x) (1 << (12 + (x)))
+#define APCI3120_CTRL_PR(x) (((x) & 0xf) << 8)
+#define APCI3120_CTRL_PA(x) (((x) & 0xf) << 0)
+#define APCI3120_AI_SOFTTRIG_REG 0x02
+#define APCI3120_STATUS_REG 0x02
+#define APCI3120_STATUS_EOC_INT (1 << 15)
+#define APCI3120_STATUS_AMCC_INT (1 << 14)
+#define APCI3120_STATUS_EOS_INT (1 << 13)
+#define APCI3120_STATUS_TIMER2_INT (1 << 12)
+#define APCI3120_STATUS_INT_MASK (0xf << 12)
+#define APCI3120_STATUS_TO_DI_BITS(x) (((x) >> 8) & 0xf)
+#define APCI3120_STATUS_TO_VERSION(x) (((x) >> 4) & 0xf)
+#define APCI3120_STATUS_FIFO_FULL (1 << 2)
+#define APCI3120_STATUS_FIFO_EMPTY (1 << 1)
+#define APCI3120_STATUS_DA_READY (1 << 0)
+#define APCI3120_TIMER_REG 0x04
+#define APCI3120_CHANLIST_REG 0x06
+#define APCI3120_CHANLIST_INDEX(x) (((x) & 0xf) << 8)
+#define APCI3120_CHANLIST_UNIPOLAR (1 << 7)
+#define APCI3120_CHANLIST_GAIN(x) (((x) & 0x3) << 4)
+#define APCI3120_CHANLIST_MUX(x) (((x) & 0xf) << 0)
+#define APCI3120_AO_REG(x) (0x08 + (((x) / 4) * 2))
+#define APCI3120_AO_MUX(x) (((x) & 0x3) << 14)
+#define APCI3120_AO_DATA(x) ((x) << 0)
+#define APCI3120_TIMER_MODE_REG 0x0c
+#define APCI3120_TIMER_MODE(_t, _m) ((_m) << ((_t) * 2))
+#define APCI3120_TIMER_MODE0 0 /* I8254_MODE0 */
+#define APCI3120_TIMER_MODE2 1 /* I8254_MODE2 */
+#define APCI3120_TIMER_MODE4 2 /* I8254_MODE4 */
+#define APCI3120_TIMER_MODE5 3 /* I8254_MODE5 */
+#define APCI3120_TIMER_MODE_MASK(_t) (3 << ((_t) * 2))
+#define APCI3120_CTR0_REG 0x0d
+#define APCI3120_CTR0_DO_BITS(x) ((x) << 4)
+#define APCI3120_CTR0_TIMER_SEL(x) ((x) << 0)
+#define APCI3120_MODE_REG 0x0e
+#define APCI3120_MODE_TIMER2_CLK_OSC (0 << 6)
+#define APCI3120_MODE_TIMER2_CLK_OUT1 (1 << 6)
+#define APCI3120_MODE_TIMER2_CLK_EOC (2 << 6)
+#define APCI3120_MODE_TIMER2_CLK_EOS (3 << 6)
+#define APCI3120_MODE_TIMER2_CLK_MASK (3 << 6)
+#define APCI3120_MODE_TIMER2_AS_TIMER (0 << 4)
+#define APCI3120_MODE_TIMER2_AS_COUNTER (1 << 4)
+#define APCI3120_MODE_TIMER2_AS_WDOG (2 << 4)
+#define APCI3120_MODE_TIMER2_AS_MASK (3 << 4) /* sets AS_TIMER */
+#define APCI3120_MODE_SCAN_ENA (1 << 3)
+#define APCI3120_MODE_TIMER2_IRQ_ENA (1 << 2)
+#define APCI3120_MODE_EOS_IRQ_ENA (1 << 1)
+#define APCI3120_MODE_EOC_IRQ_ENA (1 << 0)
+
+/*
+ * PCI BAR 2 register map (devpriv->addon)
+ */
+#define APCI3120_ADDON_ADDR_REG 0x00
+#define APCI3120_ADDON_DATA_REG 0x02
+#define APCI3120_ADDON_CTRL_REG 0x04
+#define APCI3120_ADDON_CTRL_AMWEN_ENA (1 << 1)
+#define APCI3120_ADDON_CTRL_A2P_FIFO_ENA (1 << 0)
-#include "addi-data/hwdrv_apci3120.c"
+/*
+ * Board revisions
+ */
+#define APCI3120_REVA 0xa
+#define APCI3120_REVB 0xb
+#define APCI3120_REVA_OSC_BASE 70 /* 70ns = 14.29MHz */
+#define APCI3120_REVB_OSC_BASE 50 /* 50ns = 20MHz */
+
+static const struct comedi_lrange apci3120_ai_range = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1)
+ }
+};
enum apci3120_boardid {
BOARD_APCI3120,
BOARD_APCI3001,
};
-static const struct addi_board apci3120_boardtypes[] = {
+struct apci3120_board {
+ const char *name;
+ unsigned int ai_is_16bit:1;
+ unsigned int has_ao:1;
+};
+
+static const struct apci3120_board apci3120_boardtypes[] = {
[BOARD_APCI3120] = {
- .pc_DriverName = "apci3120",
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_NbrAoChannel = 8,
- .i_AiMaxdata = 0xffff,
- .i_AoMaxdata = 0x3fff,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 0x0f,
- .interrupt = apci3120_interrupt,
+ .name = "apci3120",
+ .ai_is_16bit = 1,
+ .has_ao = 1,
},
[BOARD_APCI3001] = {
- .pc_DriverName = "apci3001",
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 0xfff,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 0x0f,
- .interrupt = apci3120_interrupt,
+ .name = "apci3001",
},
};
-static irqreturn_t v_ADDI_Interrupt(int irq, void *d)
+struct apci3120_dmabuf {
+ unsigned short *virt;
+ dma_addr_t hw;
+ unsigned int size;
+ unsigned int use_size;
+};
+
+struct apci3120_private {
+ unsigned long amcc;
+ unsigned long addon;
+ unsigned int osc_base;
+ unsigned int use_dma:1;
+ unsigned int use_double_buffer:1;
+ unsigned int cur_dmabuf:1;
+ struct apci3120_dmabuf dmabuf[2];
+ unsigned char do_bits;
+ unsigned char timer_mode;
+ unsigned char mode;
+ unsigned short ctrl;
+};
+
+static void apci3120_addon_write(struct comedi_device *dev,
+ unsigned int val, unsigned int reg)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ /* 16-bit interface for AMCC add-on registers */
+
+ outw(reg, devpriv->addon + APCI3120_ADDON_ADDR_REG);
+ outw(val & 0xffff, devpriv->addon + APCI3120_ADDON_DATA_REG);
+
+ outw(reg + 2, devpriv->addon + APCI3120_ADDON_ADDR_REG);
+ outw((val >> 16) & 0xffff, devpriv->addon + APCI3120_ADDON_DATA_REG);
+}
+
+static void apci3120_init_dma(struct comedi_device *dev,
+ struct apci3120_dmabuf *dmabuf)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ /* AMCC - enable transfer count and reset A2P FIFO */
+ outl(AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO,
+ devpriv->amcc + AMCC_OP_REG_AGCSTS);
+
+ /* Add-On - enable transfer count and reset A2P FIFO */
+ apci3120_addon_write(dev, AGCSTS_TC_ENABLE | AGCSTS_RESET_A2P_FIFO,
+ AMCC_OP_REG_AGCSTS);
+
+ /* AMCC - enable transfers and reset A2P flags */
+ outl(RESET_A2P_FLAGS | EN_A2P_TRANSFERS,
+ devpriv->amcc + AMCC_OP_REG_MCSR);
+
+ /* Add-On - DMA start address */
+ apci3120_addon_write(dev, dmabuf->hw, AMCC_OP_REG_AMWAR);
+
+ /* Add-On - Number of acquisitions */
+ apci3120_addon_write(dev, dmabuf->use_size, AMCC_OP_REG_AMWTC);
+
+ /* AMCC - enable write complete (DMA) and set FIFO advance */
+ outl(APCI3120_FIFO_ADVANCE_ON_BYTE_2 | AINT_WRITE_COMPL,
+ devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ /* Add-On - enable DMA */
+ outw(APCI3120_ADDON_CTRL_AMWEN_ENA | APCI3120_ADDON_CTRL_A2P_FIFO_ENA,
+ devpriv->addon + APCI3120_ADDON_CTRL_REG);
+}
+
+static void apci3120_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ struct apci3120_dmabuf *dmabuf0 = &devpriv->dmabuf[0];
+ struct apci3120_dmabuf *dmabuf1 = &devpriv->dmabuf[1];
+ unsigned int dmalen0 = dmabuf0->size;
+ unsigned int dmalen1 = dmabuf1->size;
+ unsigned int scan_bytes;
+
+ scan_bytes = comedi_samples_to_bytes(s, cmd->scan_end_arg);
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /*
+ * Must we fill full first buffer? And must we fill
+ * full second buffer when first is once filled?
+ */
+ if (dmalen0 > (cmd->stop_arg * scan_bytes))
+ dmalen0 = cmd->stop_arg * scan_bytes;
+ else if (dmalen1 > (cmd->stop_arg * scan_bytes - dmalen0))
+ dmalen1 = cmd->stop_arg * scan_bytes - dmalen0;
+ }
+
+ if (cmd->flags & CMDF_WAKE_EOS) {
+ /* don't we want wake up every scan? */
+ if (dmalen0 > scan_bytes) {
+ dmalen0 = scan_bytes;
+ if (cmd->scan_end_arg & 1)
+ dmalen0 += 2;
+ }
+ if (dmalen1 > scan_bytes) {
+ dmalen1 = scan_bytes;
+ if (cmd->scan_end_arg & 1)
+ dmalen1 -= 2;
+ if (dmalen1 < 4)
+ dmalen1 = 4;
+ }
+ } else {
+ /* isn't output buff smaller that our DMA buff? */
+ if (dmalen0 > s->async->prealloc_bufsz)
+ dmalen0 = s->async->prealloc_bufsz;
+ if (dmalen1 > s->async->prealloc_bufsz)
+ dmalen1 = s->async->prealloc_bufsz;
+ }
+ dmabuf0->use_size = dmalen0;
+ dmabuf1->use_size = dmalen1;
+
+ apci3120_init_dma(dev, dmabuf0);
+}
+
+/*
+ * There are three timers on the board. They all use the same base
+ * clock with a fixed prescaler for each timer. The base clock used
+ * depends on the board version and type.
+ *
+ * APCI-3120 Rev A boards OSC = 14.29MHz base clock (~70ns)
+ * APCI-3120 Rev B boards OSC = 20MHz base clock (50ns)
+ * APCI-3001 boards OSC = 20MHz base clock (50ns)
+ *
+ * The prescalers for each timer are:
+ * Timer 0 CLK = OSC/10
+ * Timer 1 CLK = OSC/1000
+ * Timer 2 CLK = OSC/1000
+ */
+static unsigned int apci3120_ns_to_timer(struct comedi_device *dev,
+ unsigned int timer,
+ unsigned int ns,
+ unsigned int flags)
+{
+ struct apci3120_private *devpriv = dev->private;
+ unsigned int prescale = (timer == 0) ? 10 : 1000;
+ unsigned int timer_base = devpriv->osc_base * prescale;
+ unsigned int divisor;
+
+ switch (flags & CMDF_ROUND_MASK) {
+ case CMDF_ROUND_UP:
+ divisor = DIV_ROUND_UP(ns, timer_base);
+ break;
+ case CMDF_ROUND_DOWN:
+ divisor = ns / timer_base;
+ break;
+ case CMDF_ROUND_NEAREST:
+ default:
+ divisor = DIV_ROUND_CLOSEST(ns, timer_base);
+ break;
+ }
+
+ if (timer == 2) {
+ /* timer 2 is 24-bits */
+ if (divisor > 0x00ffffff)
+ divisor = 0x00ffffff;
+ } else {
+ /* timers 0 and 1 are 16-bits */
+ if (divisor > 0xffff)
+ divisor = 0xffff;
+ }
+ /* the timers require a minimum divisor of 2 */
+ if (divisor < 2)
+ divisor = 2;
+
+ return divisor;
+}
+
+static void apci3120_clr_timer2_interrupt(struct comedi_device *dev)
+{
+ /* a dummy read of APCI3120_CTR0_REG clears the timer 2 interrupt */
+ inb(dev->iobase + APCI3120_CTR0_REG);
+}
+
+static void apci3120_timer_write(struct comedi_device *dev,
+ unsigned int timer, unsigned int val)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ /* write 16-bit value to timer (lower 16-bits of timer 2) */
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
+ APCI3120_CTR0_TIMER_SEL(timer),
+ dev->iobase + APCI3120_CTR0_REG);
+ outw(val & 0xffff, dev->iobase + APCI3120_TIMER_REG);
+
+ if (timer == 2) {
+ /* write upper 16-bits to timer 2 */
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
+ APCI3120_CTR0_TIMER_SEL(timer + 1),
+ dev->iobase + APCI3120_CTR0_REG);
+ outw((val >> 16) & 0xffff, dev->iobase + APCI3120_TIMER_REG);
+ }
+}
+
+static unsigned int apci3120_timer_read(struct comedi_device *dev,
+ unsigned int timer)
+{
+ struct apci3120_private *devpriv = dev->private;
+ unsigned int val;
+
+ /* read 16-bit value from timer (lower 16-bits of timer 2) */
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
+ APCI3120_CTR0_TIMER_SEL(timer),
+ dev->iobase + APCI3120_CTR0_REG);
+ val = inw(dev->iobase + APCI3120_TIMER_REG);
+
+ if (timer == 2) {
+ /* read upper 16-bits from timer 2 */
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits) |
+ APCI3120_CTR0_TIMER_SEL(timer + 1),
+ dev->iobase + APCI3120_CTR0_REG);
+ val |= (inw(dev->iobase + APCI3120_TIMER_REG) << 16);
+ }
+
+ return val;
+}
+
+static void apci3120_timer_set_mode(struct comedi_device *dev,
+ unsigned int timer, unsigned int mode)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ devpriv->timer_mode &= ~APCI3120_TIMER_MODE_MASK(timer);
+ devpriv->timer_mode |= APCI3120_TIMER_MODE(timer, mode);
+ outb(devpriv->timer_mode, dev->iobase + APCI3120_TIMER_MODE_REG);
+}
+
+static void apci3120_timer_enable(struct comedi_device *dev,
+ unsigned int timer, bool enable)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ if (enable)
+ devpriv->ctrl |= APCI3120_CTRL_GATE(timer);
+ else
+ devpriv->ctrl &= ~APCI3120_CTRL_GATE(timer);
+ outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
+}
+
+static void apci3120_exttrig_enable(struct comedi_device *dev, bool enable)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ if (enable)
+ devpriv->ctrl |= APCI3120_CTRL_EXT_TRIG;
+ else
+ devpriv->ctrl &= ~APCI3120_CTRL_EXT_TRIG;
+ outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
+}
+
+static void apci3120_set_chanlist(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ int n_chan, unsigned int *chanlist)
+{
+ struct apci3120_private *devpriv = dev->private;
+ int i;
+
+ /* set chanlist for scan */
+ for (i = 0; i < n_chan; i++) {
+ unsigned int chan = CR_CHAN(chanlist[i]);
+ unsigned int range = CR_RANGE(chanlist[i]);
+ unsigned int val;
+
+ val = APCI3120_CHANLIST_MUX(chan) |
+ APCI3120_CHANLIST_GAIN(range) |
+ APCI3120_CHANLIST_INDEX(i);
+
+ if (comedi_range_is_unipolar(s, range))
+ val |= APCI3120_CHANLIST_UNIPOLAR;
+
+ outw(val, dev->iobase + APCI3120_CHANLIST_REG);
+ }
+
+ /* a dummy read of APCI3120_TIMER_MODE_REG resets the ai FIFO */
+ inw(dev->iobase + APCI3120_TIMER_MODE_REG);
+
+ /* set scan length (PR) and scan start (PA) */
+ devpriv->ctrl = APCI3120_CTRL_PR(n_chan - 1) | APCI3120_CTRL_PA(0);
+ outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
+
+ /* enable chanlist scanning if necessary */
+ if (n_chan > 1)
+ devpriv->mode |= APCI3120_MODE_SCAN_ENA;
+}
+
+static void apci3120_interrupt_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ struct apci3120_dmabuf *dmabuf;
+ unsigned int nbytes;
+ unsigned int nsamples;
+
+ dmabuf = &devpriv->dmabuf[devpriv->cur_dmabuf];
+
+ nbytes = dmabuf->use_size - inl(devpriv->amcc + AMCC_OP_REG_MWTC);
+
+ if (nbytes < dmabuf->use_size)
+ dev_err(dev->class_dev, "Interrupted DMA transfer!\n");
+ if (nbytes & 1) {
+ dev_err(dev->class_dev, "Odd count of bytes in DMA ring!\n");
+ async->events |= COMEDI_CB_ERROR;
+ return;
+ }
+
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ if (nsamples) {
+ comedi_buf_write_samples(s, dmabuf->virt, nsamples);
+
+ if (!(cmd->flags & CMDF_WAKE_EOS))
+ async->events |= COMEDI_CB_EOS;
+ }
+
+ if ((async->events & COMEDI_CB_CANCEL_MASK) ||
+ (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg))
+ return;
+
+ if (devpriv->use_double_buffer) {
+ /* switch DMA buffers for next interrupt */
+ devpriv->cur_dmabuf = !devpriv->cur_dmabuf;
+ dmabuf = &devpriv->dmabuf[devpriv->cur_dmabuf];
+ apci3120_init_dma(dev, dmabuf);
+ } else {
+ /* restart DMA if not using double buffering */
+ apci3120_init_dma(dev, dmabuf);
+ }
+}
+
+static irqreturn_t apci3120_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- const struct addi_board *this_board = dev->board_ptr;
+ struct apci3120_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned int status;
+ unsigned int int_amcc;
+
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ int_amcc = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ if (!(status & APCI3120_STATUS_INT_MASK) &&
+ !(int_amcc & ANY_S593X_INT)) {
+ dev_err(dev->class_dev, "IRQ from unknown source\n");
+ return IRQ_NONE;
+ }
+
+ outl(int_amcc | AINT_INT_MASK, devpriv->amcc + AMCC_OP_REG_INTCSR);
- this_board->interrupt(irq, d);
- return IRQ_RETVAL(1);
+ if (devpriv->ctrl & APCI3120_CTRL_EXT_TRIG)
+ apci3120_exttrig_enable(dev, false);
+
+ if (int_amcc & MASTER_ABORT_INT)
+ dev_err(dev->class_dev, "AMCC IRQ - MASTER DMA ABORT!\n");
+ if (int_amcc & TARGET_ABORT_INT)
+ dev_err(dev->class_dev, "AMCC IRQ - TARGET DMA ABORT!\n");
+
+ if ((status & APCI3120_STATUS_EOC_INT) == 0 &&
+ (devpriv->mode & APCI3120_MODE_EOC_IRQ_ENA)) {
+ /* nothing to do... EOC mode is not currently used */
+ }
+
+ if ((status & APCI3120_STATUS_EOS_INT) &&
+ (devpriv->mode & APCI3120_MODE_EOS_IRQ_ENA)) {
+ unsigned short val;
+ int i;
+
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ val = inw(dev->iobase + APCI3120_AI_FIFO_REG);
+ comedi_buf_write_samples(s, &val, 1);
+ }
+
+ devpriv->mode |= APCI3120_MODE_EOS_IRQ_ENA;
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+ }
+
+ if (status & APCI3120_STATUS_TIMER2_INT) {
+ /*
+ * for safety...
+ * timer2 interrupts are not enabled in the driver
+ */
+ apci3120_clr_timer2_interrupt(dev);
+ }
+
+ if (status & APCI3120_STATUS_AMCC_INT) {
+ /* AMCC- Clear write complete interrupt (DMA) */
+ outl(AINT_WT_COMPLETE, devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ /* do some data transfer */
+ apci3120_interrupt_dma(dev, s);
+ }
+
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+
+ comedi_handle_events(dev, s);
+
+ return IRQ_HANDLED;
+}
+
+static int apci3120_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int divisor;
+
+ /* set default mode bits */
+ devpriv->mode = APCI3120_MODE_TIMER2_CLK_OSC |
+ APCI3120_MODE_TIMER2_AS_TIMER;
+
+ /* AMCC- Clear write complete interrupt (DMA) */
+ outl(AINT_WT_COMPLETE, devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ devpriv->cur_dmabuf = 0;
+
+ /* load chanlist for command scan */
+ apci3120_set_chanlist(dev, s, cmd->chanlist_len, cmd->chanlist);
+
+ if (cmd->start_src == TRIG_EXT)
+ apci3120_exttrig_enable(dev, true);
+
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ /*
+ * Timer 1 is used in MODE2 (rate generator) to set the
+ * start time for each scan.
+ */
+ divisor = apci3120_ns_to_timer(dev, 1, cmd->scan_begin_arg,
+ cmd->flags);
+ apci3120_timer_set_mode(dev, 1, APCI3120_TIMER_MODE2);
+ apci3120_timer_write(dev, 1, divisor);
+ }
+
+ /*
+ * Timer 0 is used in MODE2 (rate generator) to set the conversion
+ * time for each acquisition.
+ */
+ divisor = apci3120_ns_to_timer(dev, 0, cmd->convert_arg, cmd->flags);
+ apci3120_timer_set_mode(dev, 0, APCI3120_TIMER_MODE2);
+ apci3120_timer_write(dev, 0, divisor);
+
+ if (devpriv->use_dma)
+ apci3120_setup_dma(dev, s);
+ else
+ devpriv->mode |= APCI3120_MODE_EOS_IRQ_ENA;
+
+ /* set mode to enable acquisition */
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+
+ if (cmd->scan_begin_src == TRIG_TIMER)
+ apci3120_timer_enable(dev, 1, true);
+ apci3120_timer_enable(dev, 0, true);
+
+ return 0;
+}
+
+static int apci3120_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ unsigned int arg;
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src,
+ TRIG_TIMER | TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->start_src);
+ err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+
+ if (cmd->scan_begin_src == TRIG_TIMER) /* Test Delay timing */
+ err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, 100000);
+
+ /* minimum conversion time per sample is 10us */
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
+
+ err |= cfc_check_trigger_arg_min(&cmd->chanlist_len, 1);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* Step 4: fix up any arguments */
+
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ /* scan begin must be larger than the scan time */
+ arg = cmd->convert_arg * cmd->scan_end_arg;
+ err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
+ }
+
+ if (err)
+ return 4;
+
+ /* Step 5: check channel list if it exists */
+
+ return 0;
+}
+
+static int apci3120_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ /* Add-On - disable DMA */
+ outw(0, devpriv->addon + 4);
+
+ /* Add-On - disable bus master */
+ apci3120_addon_write(dev, 0, AMCC_OP_REG_AGCSTS);
+
+ /* AMCC - disable bus master */
+ outl(0, devpriv->amcc + AMCC_OP_REG_MCSR);
+
+ /* disable all counters, ext trigger, and reset scan */
+ devpriv->ctrl = 0;
+ outw(devpriv->ctrl, dev->iobase + APCI3120_CTRL_REG);
+
+ /* DISABLE_ALL_INTERRUPT */
+ devpriv->mode = 0;
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+
+ inw(dev->iobase + APCI3120_STATUS_REG);
+ devpriv->cur_dmabuf = 0;
+
+ return 0;
+}
+
+static int apci3120_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ if ((status & APCI3120_STATUS_EOC_INT) == 0)
+ return 0;
+ return -EBUSY;
+}
+
+static int apci3120_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3120_private *devpriv = dev->private;
+ unsigned int divisor;
+ int ret;
+ int i;
+
+ /* set mode for A/D conversions by software trigger with timer 0 */
+ devpriv->mode = APCI3120_MODE_TIMER2_CLK_OSC |
+ APCI3120_MODE_TIMER2_AS_TIMER;
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+
+ /* load chanlist for single channel scan */
+ apci3120_set_chanlist(dev, s, 1, &insn->chanspec);
+
+ /*
+ * Timer 0 is used in MODE4 (software triggered strobe) to set the
+ * conversion time for each acquisition. Each conversion is triggered
+ * when the divisor is written to the timer, The conversion is done
+ * when the EOC bit in the status register is '0'.
+ */
+ apci3120_timer_set_mode(dev, 0, APCI3120_TIMER_MODE4);
+ apci3120_timer_enable(dev, 0, true);
+
+ /* fixed conversion time of 10 us */
+ divisor = apci3120_ns_to_timer(dev, 0, 10000, CMDF_ROUND_NEAREST);
+
+ for (i = 0; i < insn->n; i++) {
+ /* trigger conversion */
+ apci3120_timer_write(dev, 0, divisor);
+
+ ret = comedi_timeout(dev, s, insn, apci3120_ai_eoc, 0);
+ if (ret)
+ return ret;
+
+ data[i] = inw(dev->iobase + APCI3120_AI_FIFO_REG);
+ }
+
+ return insn->n;
+}
+
+static int apci3120_ao_ready(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ if (status & APCI3120_STATUS_DA_READY)
+ return 0;
+ return -EBUSY;
+}
+
+static int apci3120_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val = data[i];
+ int ret;
+
+ ret = comedi_timeout(dev, s, insn, apci3120_ao_ready, 0);
+ if (ret)
+ return ret;
+
+ outw(APCI3120_AO_MUX(chan) | APCI3120_AO_DATA(val),
+ dev->iobase + APCI3120_AO_REG(chan));
+
+ s->readback[chan] = val;
+ }
+
+ return insn->n;
+}
+
+static int apci3120_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int status;
+
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ data[1] = APCI3120_STATUS_TO_DI_BITS(status);
+
+ return insn->n;
+}
+
+static int apci3120_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3120_private *devpriv = dev->private;
+
+ if (comedi_dio_update_state(s, data)) {
+ devpriv->do_bits = s->state;
+ outb(APCI3120_CTR0_DO_BITS(devpriv->do_bits),
+ dev->iobase + APCI3120_CTR0_REG);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int apci3120_timer_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3120_private *devpriv = dev->private;
+ unsigned int divisor;
+ unsigned int status;
+ unsigned int mode;
+ unsigned int timer_mode;
+
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ apci3120_clr_timer2_interrupt(dev);
+ divisor = apci3120_ns_to_timer(dev, 2, data[1],
+ CMDF_ROUND_DOWN);
+ apci3120_timer_write(dev, 2, divisor);
+ apci3120_timer_enable(dev, 2, true);
+ break;
+
+ case INSN_CONFIG_DISARM:
+ apci3120_timer_enable(dev, 2, false);
+ apci3120_clr_timer2_interrupt(dev);
+ break;
+
+ case INSN_CONFIG_GET_COUNTER_STATUS:
+ data[1] = 0;
+ data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+ COMEDI_COUNTER_TERMINAL_COUNT;
+
+ if (devpriv->ctrl & APCI3120_CTRL_GATE(2)) {
+ data[1] |= COMEDI_COUNTER_ARMED;
+ data[1] |= COMEDI_COUNTER_COUNTING;
+ }
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ if (status & APCI3120_STATUS_TIMER2_INT) {
+ data[1] &= ~COMEDI_COUNTER_COUNTING;
+ data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+ }
+ break;
+
+ case INSN_CONFIG_SET_COUNTER_MODE:
+ switch (data[1]) {
+ case I8254_MODE0:
+ mode = APCI3120_MODE_TIMER2_AS_COUNTER;
+ timer_mode = APCI3120_TIMER_MODE0;
+ break;
+ case I8254_MODE2:
+ mode = APCI3120_MODE_TIMER2_AS_TIMER;
+ timer_mode = APCI3120_TIMER_MODE2;
+ break;
+ case I8254_MODE4:
+ mode = APCI3120_MODE_TIMER2_AS_TIMER;
+ timer_mode = APCI3120_TIMER_MODE4;
+ break;
+ case I8254_MODE5:
+ mode = APCI3120_MODE_TIMER2_AS_WDOG;
+ timer_mode = APCI3120_TIMER_MODE5;
+ break;
+ default:
+ return -EINVAL;
+ }
+ apci3120_timer_enable(dev, 2, false);
+ apci3120_clr_timer2_interrupt(dev);
+ apci3120_timer_set_mode(dev, 2, timer_mode);
+ devpriv->mode &= ~APCI3120_MODE_TIMER2_AS_MASK;
+ devpriv->mode |= mode;
+ outb(devpriv->mode, dev->iobase + APCI3120_MODE_REG);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return insn->n;
+}
+
+static int apci3120_timer_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ int i;
+
+ for (i = 0; i < insn->n; i++)
+ data[i] = apci3120_timer_read(dev, 2);
+
+ return insn->n;
+}
+
+static void apci3120_dma_alloc(struct comedi_device *dev)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct apci3120_dmabuf *dmabuf;
+ int order;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ dmabuf = &devpriv->dmabuf[i];
+ for (order = 2; order >= 0; order--) {
+ dmabuf->virt = dma_alloc_coherent(dev->hw_dev,
+ PAGE_SIZE << order,
+ &dmabuf->hw,
+ GFP_KERNEL);
+ if (dmabuf->virt)
+ break;
+ }
+ if (!dmabuf->virt)
+ break;
+ dmabuf->size = PAGE_SIZE << order;
+
+ if (i == 0)
+ devpriv->use_dma = 1;
+ if (i == 1)
+ devpriv->use_double_buffer = 1;
+ }
+}
+
+static void apci3120_dma_free(struct comedi_device *dev)
+{
+ struct apci3120_private *devpriv = dev->private;
+ struct apci3120_dmabuf *dmabuf;
+ int i;
+
+ if (!devpriv)
+ return;
+
+ for (i = 0; i < 2; i++) {
+ dmabuf = &devpriv->dmabuf[i];
+ if (dmabuf->virt) {
+ dma_free_coherent(dev->hw_dev, dmabuf->size,
+ dmabuf->virt, dmabuf->hw);
+ }
+ }
+}
+
+static void apci3120_reset(struct comedi_device *dev)
+{
+ /* disable all interrupt sources */
+ outb(0, dev->iobase + APCI3120_MODE_REG);
+
+ /* disable all counters, ext trigger, and reset scan */
+ outw(0, dev->iobase + APCI3120_CTRL_REG);
+
+ /* clear interrupt status */
+ inw(dev->iobase + APCI3120_STATUS_REG);
}
static int apci3120_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct addi_board *this_board = NULL;
- struct addi_private *devpriv;
+ const struct apci3120_board *this_board = NULL;
+ struct apci3120_private *devpriv;
struct comedi_subdevice *s;
- int ret, order, i;
+ unsigned int status;
+ int ret;
if (context < ARRAY_SIZE(apci3120_boardtypes))
this_board = &apci3120_boardtypes[context];
if (!this_board)
return -ENODEV;
dev->board_ptr = this_board;
- dev->board_name = this_board->pc_DriverName;
+ dev->board_name = this_board->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -76,136 +999,100 @@ static int apci3120_auto_attach(struct comedi_device *dev,
pci_set_master(pcidev);
dev->iobase = pci_resource_start(pcidev, 1);
- devpriv->iobase = dev->iobase;
- devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
- devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
- devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3);
+ devpriv->amcc = pci_resource_start(pcidev, 0);
+ devpriv->addon = pci_resource_start(pcidev, 2);
+
+ apci3120_reset(dev);
if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, v_ADDI_Interrupt, IRQF_SHARED,
+ ret = request_irq(pcidev->irq, apci3120_interrupt, IRQF_SHARED,
dev->board_name, dev);
- if (ret == 0)
+ if (ret == 0) {
dev->irq = pcidev->irq;
- }
- /* Allocate DMA buffers */
- for (i = 0; i < 2; i++) {
- for (order = 2; order >= 0; order--) {
- devpriv->ul_DmaBufferVirtual[i] =
- dma_alloc_coherent(dev->hw_dev, PAGE_SIZE << order,
- &devpriv->ul_DmaBufferHw[i],
- GFP_KERNEL);
-
- if (devpriv->ul_DmaBufferVirtual[i])
- break;
+ apci3120_dma_alloc(dev);
}
- if (!devpriv->ul_DmaBufferVirtual[i])
- break;
- devpriv->ui_DmaBufferSize[i] = PAGE_SIZE << order;
}
- if (devpriv->ul_DmaBufferVirtual[0])
- devpriv->us_UseDma = 1;
- if (devpriv->ul_DmaBufferVirtual[1])
- devpriv->b_DmaDoubleBuffer = 1;
+ status = inw(dev->iobase + APCI3120_STATUS_REG);
+ if (APCI3120_STATUS_TO_VERSION(status) == APCI3120_REVB ||
+ context == BOARD_APCI3001)
+ devpriv->osc_base = APCI3120_REVB_OSC_BASE;
+ else
+ devpriv->osc_base = APCI3120_REVA_OSC_BASE;
ret = comedi_alloc_subdevices(dev, 5);
if (ret)
return ret;
- /* Allocate and Initialise AI Subdevice Structures */
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags =
- SDF_READABLE | SDF_COMMON | SDF_GROUND
- | SDF_DIFF;
- if (this_board->i_NbrAiChannel)
- s->n_chan = this_board->i_NbrAiChannel;
- else
- s->n_chan = this_board->i_NbrAiChannelDiff;
- s->maxdata = this_board->i_AiMaxdata;
- s->len_chanlist = this_board->i_AiChannelList;
- s->range_table = &range_apci3120_ai;
-
- s->insn_config = apci3120_ai_insn_config;
- s->insn_read = apci3120_ai_insn_read;
- s->do_cmdtest = apci3120_ai_cmdtest;
- s->do_cmd = apci3120_ai_cmd;
- s->cancel = apci3120_cancel;
-
- /* Allocate and Initialise AO Subdevice Structures */
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
+ s->n_chan = 16;
+ s->maxdata = this_board->ai_is_16bit ? 0xffff : 0x0fff;
+ s->range_table = &apci3120_ai_range;
+ s->insn_read = apci3120_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmdtest = apci3120_ai_cmdtest;
+ s->do_cmd = apci3120_ai_cmd;
+ s->cancel = apci3120_cancel;
+ }
+
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- if (this_board->i_NbrAoChannel) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = this_board->i_NbrAoChannel;
- s->maxdata = this_board->i_AoMaxdata;
- s->len_chanlist = this_board->i_NbrAoChannel;
- s->range_table = &range_apci3120_ao;
- s->insn_write = apci3120_ao_insn_write;
+ if (this_board->has_ao) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->n_chan = 8;
+ s->maxdata = 0x3fff;
+ s->range_table = &range_bipolar10;
+ s->insn_write = apci3120_ao_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
- /* Allocate and Initialise DI Subdevice Structures */
+ /* Digital Input subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = this_board->i_NbrDiChannel;
- s->maxdata = 1;
- s->len_chanlist = this_board->i_NbrDiChannel;
- s->range_table = &range_digital;
- s->insn_bits = apci3120_di_insn_bits;
-
- /* Allocate and Initialise DO Subdevice Structures */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci3120_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags =
- SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = this_board->i_NbrDoChannel;
- s->maxdata = this_board->i_DoMaxdata;
- s->len_chanlist = this_board->i_NbrDoChannel;
- s->range_table = &range_digital;
- s->insn_bits = apci3120_do_insn_bits;
-
- /* Allocate and Initialise Timer Subdevice Structures */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 1;
- s->maxdata = 0;
- s->len_chanlist = 1;
- s->range_table = &range_digital;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci3120_do_insn_bits;
- s->insn_write = apci3120_write_insn_timer;
- s->insn_read = apci3120_read_insn_timer;
- s->insn_config = apci3120_config_insn_timer;
+ /* Timer subdevice */
+ s = &dev->subdevices[4];
+ s->type = COMEDI_SUBD_TIMER;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 1;
+ s->maxdata = 0x00ffffff;
+ s->insn_config = apci3120_timer_insn_config;
+ s->insn_read = apci3120_timer_insn_read;
- apci3120_reset(dev);
return 0;
}
static void apci3120_detach(struct comedi_device *dev)
{
- struct addi_private *devpriv = dev->private;
-
- if (dev->iobase)
- apci3120_reset(dev);
comedi_pci_detach(dev);
- if (devpriv) {
- unsigned int i;
-
- for (i = 0; i < 2; i++) {
- if (devpriv->ul_DmaBufferVirtual[i]) {
- dma_free_coherent(dev->hw_dev,
- devpriv->ui_DmaBufferSize[i],
- devpriv->
- ul_DmaBufferVirtual[i],
- devpriv->ul_DmaBufferHw[i]);
- }
- }
- }
+ apci3120_dma_free(dev);
}
static struct comedi_driver apci3120_driver = {
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
deleted file mode 100644
index fe6897eff3db..000000000000
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ /dev/null
@@ -1,125 +0,0 @@
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <asm/i387.h>
-
-#include "../comedidev.h"
-#include "comedi_fc.h"
-#include "amcc_s5933.h"
-
-#include "addi-data/addi_common.h"
-
-static void fpu_begin(void)
-{
- kernel_fpu_begin();
-}
-
-static void fpu_end(void)
-{
- kernel_fpu_end();
-}
-
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_apci3200.c"
-#include "addi-data/addi_common.c"
-
-enum apci3200_boardid {
- BOARD_APCI3200,
- BOARD_APCI3300,
-};
-
-static const struct addi_board apci3200_boardtypes[] = {
- [BOARD_APCI3200] = {
- .pc_DriverName = "apci3200",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = 1,
- .pc_EepromChip = "S5920",
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 0x3ffff,
- .pr_AiRangelist = &range_apci3200_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .ui_MinAcquisitiontimeNs = 10000,
- .ui_MinDelaytimeNs = 100000,
- .interrupt = apci3200_interrupt,
- .reset = apci3200_reset,
- .ai_config = apci3200_ai_config,
- .ai_read = apci3200_ai_read,
- .ai_write = apci3200_ai_write,
- .ai_bits = apci3200_ai_bits_test,
- .ai_cmdtest = apci3200_ai_cmdtest,
- .ai_cmd = apci3200_ai_cmd,
- .ai_cancel = apci3200_cancel,
- .di_bits = apci3200_di_insn_bits,
- .do_bits = apci3200_do_insn_bits,
- },
- [BOARD_APCI3300] = {
- .pc_DriverName = "apci3300",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = 1,
- .pc_EepromChip = "S5920",
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 8,
- .i_AiMaxdata = 0x3ffff,
- .pr_AiRangelist = &range_apci3300_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .ui_MinAcquisitiontimeNs = 10000,
- .ui_MinDelaytimeNs = 100000,
- .interrupt = apci3200_interrupt,
- .reset = apci3200_reset,
- .ai_config = apci3200_ai_config,
- .ai_read = apci3200_ai_read,
- .ai_write = apci3200_ai_write,
- .ai_bits = apci3200_ai_bits_test,
- .ai_cmdtest = apci3200_ai_cmdtest,
- .ai_cmd = apci3200_ai_cmd,
- .ai_cancel = apci3200_cancel,
- .di_bits = apci3200_di_insn_bits,
- .do_bits = apci3200_do_insn_bits,
- },
-};
-
-static int apci3200_auto_attach(struct comedi_device *dev,
- unsigned long context)
-{
- const struct addi_board *board = NULL;
-
- if (context < ARRAY_SIZE(apci3200_boardtypes))
- board = &apci3200_boardtypes[context];
- if (!board)
- return -ENODEV;
- dev->board_ptr = board;
-
- return addi_auto_attach(dev, context);
-}
-
-static struct comedi_driver apci3200_driver = {
- .driver_name = "addi_apci_3200",
- .module = THIS_MODULE,
- .auto_attach = apci3200_auto_attach,
- .detach = i_ADDI_Detach,
-};
-
-static int apci3200_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci3200_driver, id->driver_data);
-}
-
-static const struct pci_device_id apci3200_pci_table[] = {
- { PCI_VDEVICE(ADDIDATA, 0x3000), BOARD_APCI3200 },
- { PCI_VDEVICE(ADDIDATA, 0x3007), BOARD_APCI3300 },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci3200_pci_table);
-
-static struct pci_driver apci3200_pci_driver = {
- .name = "addi_apci_3200",
- .id_table = apci3200_pci_table,
- .probe = apci3200_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci3200_driver, apci3200_pci_driver);
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 010efa3fed6c..a726efcea6a5 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -357,12 +357,11 @@ static int apci3501_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
if (ao_n_chan) {
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = ao_n_chan;
s->maxdata = 0x3fff;
s->range_table = &apci3501_ao_range;
s->insn_write = apci3501_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -383,7 +382,7 @@ static int apci3501_auto_attach(struct comedi_device *dev,
/* Initialize the digital output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -392,7 +391,7 @@ static int apci3501_auto_attach(struct comedi_device *dev,
/* Initialize the timer/watchdog subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 1;
s->maxdata = 0;
s->len_chanlist = 1;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index a296bd5b2c0c..c173810a3b5b 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -371,10 +371,10 @@ static irqreturn_t apci3xxx_irq_handler(int irq, void *d)
writel(status, dev->mmio + 16);
val = readl(dev->mmio + 28);
- comedi_buf_put(s, val);
+ comedi_buf_write_samples(s, &val, 1);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -849,12 +849,11 @@ static int apci3xxx_auto_attach(struct comedi_device *dev,
if (board->has_ao) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 4;
s->maxdata = 0x0fff;
s->range_table = &apci3xxx_ao_range;
s->insn_write = apci3xxx_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -880,7 +879,7 @@ static int apci3xxx_auto_attach(struct comedi_device *dev,
if (board->has_dig_out) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -893,7 +892,7 @@ static int apci3xxx_auto_attach(struct comedi_device *dev,
if (board->has_ttl_io) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 24;
s->maxdata = 1;
s->io_bits = 0xff; /* channels 0-7 are always outputs */
diff --git a/drivers/staging/comedi/drivers/addi_tcw.h b/drivers/staging/comedi/drivers/addi_tcw.h
new file mode 100644
index 000000000000..8794d4cbbfb0
--- /dev/null
+++ b/drivers/staging/comedi/drivers/addi_tcw.h
@@ -0,0 +1,56 @@
+#ifndef _ADDI_TCW_H
+#define _ADDI_TCW_H
+
+/*
+ * Following are the generic definitions for the ADDI-DATA timer/counter/
+ * watchdog (TCW) registers and bits. Some of the registers are not used
+ * depending on the use of the TCW.
+ */
+
+#define ADDI_TCW_VAL_REG 0x00
+
+#define ADDI_TCW_SYNC_REG 0x00
+#define ADDI_TCW_SYNC_CTR_TRIG (1 << 8)
+#define ADDI_TCW_SYNC_CTR_DIS (1 << 7)
+#define ADDI_TCW_SYNC_CTR_ENA (1 << 6)
+#define ADDI_TCW_SYNC_TIMER_TRIG (1 << 5)
+#define ADDI_TCW_SYNC_TIMER_DIS (1 << 4)
+#define ADDI_TCW_SYNC_TIMER_ENA (1 << 3)
+#define ADDI_TCW_SYNC_WDOG_TRIG (1 << 2)
+#define ADDI_TCW_SYNC_WDOG_DIS (1 << 1)
+#define ADDI_TCW_SYNC_WDOG_ENA (1 << 0)
+
+#define ADDI_TCW_RELOAD_REG 0x04
+
+#define ADDI_TCW_TIMEBASE_REG 0x08
+
+#define ADDI_TCW_CTRL_REG 0x0c
+#define ADDI_TCW_CTRL_EXT_CLK_STATUS (1 << 21)
+#define ADDI_TCW_CTRL_CASCADE (1 << 20)
+#define ADDI_TCW_CTRL_CNTR_ENA (1 << 19)
+#define ADDI_TCW_CTRL_CNT_UP (1 << 18)
+#define ADDI_TCW_CTRL_EXT_CLK(x) ((x) << 16)
+#define ADDI_TCW_CTRL_OUT(x) ((x) << 11)
+#define ADDI_TCW_CTRL_GATE (1 << 10)
+#define ADDI_TCW_CTRL_TRIG (1 << 9)
+#define ADDI_TCW_CTRL_EXT_GATE(x) ((x) << 7)
+#define ADDI_TCW_CTRL_EXT_TRIG(x) ((x) << 5)
+#define ADDI_TCW_CTRL_TIMER_ENA (1 << 4)
+#define ADDI_TCW_CTRL_RESET_ENA (1 << 3)
+#define ADDI_TCW_CTRL_WARN_ENA (1 << 2)
+#define ADDI_TCW_CTRL_IRQ_ENA (1 << 1)
+#define ADDI_TCW_CTRL_ENA (1 << 0)
+
+#define ADDI_TCW_STATUS_REG 0x10
+#define ADDI_TCW_STATUS_SOFT_CLR (1 << 3)
+#define ADDI_TCW_STATUS_SOFT_TRIG (1 << 1)
+#define ADDI_TCW_STATUS_OVERFLOW (1 << 0)
+
+#define ADDI_TCW_IRQ_REG 0x14
+#define ADDI_TCW_IRQ (1 << 0)
+
+#define ADDI_TCW_WARN_TIMEVAL_REG 0x18
+
+#define ADDI_TCW_WARN_TIMEBASE_REG 0x1c
+
+#endif
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.c b/drivers/staging/comedi/drivers/addi_watchdog.c
index 23031feaa095..c5b082d4e51e 100644
--- a/drivers/staging/comedi/drivers/addi_watchdog.c
+++ b/drivers/staging/comedi/drivers/addi_watchdog.c
@@ -20,21 +20,9 @@
#include <linux/module.h>
#include "../comedidev.h"
+#include "addi_tcw.h"
#include "addi_watchdog.h"
-/*
- * Register offsets/defines for the addi-data watchdog
- */
-#define ADDI_WDOG_REG 0x00
-#define ADDI_WDOG_RELOAD_REG 0x04
-#define ADDI_WDOG_TIMEBASE 0x08
-#define ADDI_WDOG_CTRL_REG 0x0c
-#define ADDI_WDOG_CTRL_ENABLE (1 << 0)
-#define ADDI_WDOG_CTRL_SW_TRIG (1 << 9)
-#define ADDI_WDOG_STATUS_REG 0x10
-#define ADDI_WDOG_STATUS_ENABLED (1 << 0)
-#define ADDI_WDOG_STATUS_SW_TRIG (1 << 1)
-
struct addi_watchdog_private {
unsigned long iobase;
unsigned int wdog_ctrl;
@@ -60,9 +48,9 @@ static int addi_watchdog_insn_config(struct comedi_device *dev,
switch (data[0]) {
case INSN_CONFIG_ARM:
- spriv->wdog_ctrl = ADDI_WDOG_CTRL_ENABLE;
+ spriv->wdog_ctrl = ADDI_TCW_CTRL_ENA;
reload = data[1] & s->maxdata;
- outl(reload, spriv->iobase + ADDI_WDOG_RELOAD_REG);
+ outl(reload, spriv->iobase + ADDI_TCW_RELOAD_REG);
/* Time base is 20ms, let the user know the timeout */
dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n",
@@ -75,7 +63,7 @@ static int addi_watchdog_insn_config(struct comedi_device *dev,
return -EINVAL;
}
- outl(spriv->wdog_ctrl, spriv->iobase + ADDI_WDOG_CTRL_REG);
+ outl(spriv->wdog_ctrl, spriv->iobase + ADDI_TCW_CTRL_REG);
return insn->n;
}
@@ -89,7 +77,7 @@ static int addi_watchdog_insn_read(struct comedi_device *dev,
int i;
for (i = 0; i < insn->n; i++)
- data[i] = inl(spriv->iobase + ADDI_WDOG_STATUS_REG);
+ data[i] = inl(spriv->iobase + ADDI_TCW_STATUS_REG);
return insn->n;
}
@@ -109,8 +97,8 @@ static int addi_watchdog_insn_write(struct comedi_device *dev,
/* "ping" the watchdog */
for (i = 0; i < insn->n; i++) {
- outl(spriv->wdog_ctrl | ADDI_WDOG_CTRL_SW_TRIG,
- spriv->iobase + ADDI_WDOG_CTRL_REG);
+ outl(spriv->wdog_ctrl | ADDI_TCW_CTRL_TRIG,
+ spriv->iobase + ADDI_TCW_CTRL_REG);
}
return insn->n;
@@ -118,8 +106,8 @@ static int addi_watchdog_insn_write(struct comedi_device *dev,
void addi_watchdog_reset(unsigned long iobase)
{
- outl(0x0, iobase + ADDI_WDOG_CTRL_REG);
- outl(0x0, iobase + ADDI_WDOG_RELOAD_REG);
+ outl(0x0, iobase + ADDI_TCW_CTRL_REG);
+ outl(0x0, iobase + ADDI_TCW_RELOAD_REG);
}
EXPORT_SYMBOL_GPL(addi_watchdog_reset);
@@ -134,7 +122,7 @@ int addi_watchdog_init(struct comedi_subdevice *s, unsigned long iobase)
spriv->iobase = iobase;
s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 1;
s->maxdata = 0xff;
s->insn_config = addi_watchdog_insn_config;
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 0ad46fe492c9..528f15c25dae 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -169,7 +169,6 @@ static int pci6208_auto_attach(struct comedi_device *dev,
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = pci6208_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index d18d8f21af23..47f6c0e9f014 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -133,8 +133,6 @@ static const struct comedi_lrange pci9111_ai_range = {
struct pci9111_private_data {
unsigned long lcr_io_base;
- int stop_counter;
-
unsigned int scan_delay;
unsigned int chunk_counter;
unsigned int chunk_num_samples;
@@ -404,12 +402,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
outb(CR_RANGE(cmd->chanlist[0]) & PCI9111_AI_RANGE_MASK,
dev->iobase + PCI9111_AI_RANGE_STAT_REG);
- /* Set counter */
- if (cmd->stop_src == TRIG_COUNT)
- dev_private->stop_counter = cmd->stop_arg * cmd->chanlist_len;
- else /* TRIG_NONE */
- dev_private->stop_counter = 0;
-
/* Set timer pacer */
dev_private->scan_delay = 0;
if (cmd->convert_src == TRIG_TIMER) {
@@ -435,7 +427,6 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
}
outb(trig, dev->iobase + PCI9111_AI_TRIG_CTRL_REG);
- dev_private->stop_counter *= (1 + dev_private->scan_delay);
dev_private->chunk_counter = 0;
dev_private->chunk_num_samples = cmd->chanlist_len *
(1 + dev_private->scan_delay);
@@ -452,7 +443,7 @@ static void pci9111_ai_munge(struct comedi_device *dev,
unsigned int maxdata = s->maxdata;
unsigned int invert = (maxdata + 1) >> 1;
unsigned int shift = (maxdata == 0xffff) ? 0 : 4;
- unsigned int num_samples = num_bytes / sizeof(short);
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
unsigned int i;
for (i = 0; i < num_samples; i++)
@@ -464,22 +455,14 @@ static void pci9111_handle_fifo_half_full(struct comedi_device *dev,
{
struct pci9111_private_data *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int total = 0;
unsigned int samples;
- if (cmd->stop_src == TRIG_COUNT &&
- PCI9111_FIFO_HALF_SIZE > devpriv->stop_counter)
- samples = devpriv->stop_counter;
- else
- samples = PCI9111_FIFO_HALF_SIZE;
-
+ samples = comedi_nsamples_left(s, PCI9111_FIFO_HALF_SIZE);
insw(dev->iobase + PCI9111_AI_FIFO_REG,
devpriv->ai_bounce_buffer, samples);
if (devpriv->scan_delay < 1) {
- total = cfc_write_array_to_buffer(s,
- devpriv->ai_bounce_buffer,
- samples * sizeof(short));
+ comedi_buf_write_samples(s, devpriv->ai_bounce_buffer, samples);
} else {
unsigned int pos = 0;
unsigned int to_read;
@@ -492,17 +475,15 @@ static void pci9111_handle_fifo_half_full(struct comedi_device *dev,
if (to_read > samples - pos)
to_read = samples - pos;
- total += cfc_write_array_to_buffer(s,
+ comedi_buf_write_samples(s,
devpriv->ai_bounce_buffer + pos,
- to_read * sizeof(short));
+ to_read);
} else {
to_read = devpriv->chunk_num_samples -
devpriv->chunk_counter;
if (to_read > samples - pos)
to_read = samples - pos;
-
- total += to_read * sizeof(short);
}
pos += to_read;
@@ -513,8 +494,6 @@ static void pci9111_handle_fifo_half_full(struct comedi_device *dev,
devpriv->chunk_counter = 0;
}
}
-
- devpriv->stop_counter -= total / sizeof(short);
}
static irqreturn_t pci9111_interrupt(int irq, void *p_device)
@@ -561,7 +540,7 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
dev_dbg(dev->class_dev, "fifo overflow\n");
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -571,14 +550,14 @@ static irqreturn_t pci9111_interrupt(int irq, void *p_device)
pci9111_handle_fifo_half_full(dev, s);
}
- if (cmd->stop_src == TRIG_COUNT && dev_private->stop_counter == 0)
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
outb(0, dev->iobase + PCI9111_INT_CLR_REG);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -752,7 +731,6 @@ static int pci9111_auto_attach(struct comedi_device *dev,
s->len_chanlist = 1;
s->range_table = &range_bipolar10;
s->insn_write = pci9111_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -768,7 +746,7 @@ static int pci9111_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index e18fd9569a2b..26603582e71a 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -221,7 +221,6 @@ struct pci9118_private {
unsigned char int_ctrl;
unsigned char ai_cfg;
unsigned int ai_do; /* what do AI? 0=nothing, 1 to 4 mode */
- unsigned int ai_act_scan; /* how many scans we finished */
unsigned int ai_n_realscanlen; /*
* what we must transfer for one
* outgoing scan include front/back adds
@@ -447,51 +446,112 @@ static void interrupt_pci9118_ai_mode4_switch(struct comedi_device *dev,
outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
}
-static unsigned int defragment_dma_buffer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *dma_buffer,
- unsigned int num_samples)
+static unsigned int valid_samples_in_act_dma_buf(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int n_raw_samples)
{
struct pci9118_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int i = 0, j = 0;
- unsigned int start_pos = devpriv->ai_add_front,
- stop_pos = devpriv->ai_add_front + cmd->chanlist_len;
- unsigned int raw_scanlen = devpriv->ai_add_front + cmd->chanlist_len +
- devpriv->ai_add_back;
+ unsigned int start_pos = devpriv->ai_add_front;
+ unsigned int stop_pos = start_pos + cmd->chanlist_len;
+ unsigned int span_len = stop_pos + devpriv->ai_add_back;
+ unsigned int dma_pos = devpriv->ai_act_dmapos;
+ unsigned int whole_spans, n_samples, x;
- for (i = 0; i < num_samples; i++) {
- if (devpriv->ai_act_dmapos >= start_pos &&
- devpriv->ai_act_dmapos < stop_pos) {
- dma_buffer[j++] = dma_buffer[i];
+ if (span_len == cmd->chanlist_len)
+ return n_raw_samples; /* use all samples */
+
+ /*
+ * Not all samples are to be used. Buffer contents consist of a
+ * possibly non-whole number of spans and a region of each span
+ * is to be used.
+ *
+ * Account for samples in whole number of spans.
+ */
+ whole_spans = n_raw_samples / span_len;
+ n_samples = whole_spans * cmd->chanlist_len;
+ n_raw_samples -= whole_spans * span_len;
+
+ /*
+ * Deal with remaining samples which could overlap up to two spans.
+ */
+ while (n_raw_samples) {
+ if (dma_pos < start_pos) {
+ /* Skip samples before start position. */
+ x = start_pos - dma_pos;
+ if (x > n_raw_samples)
+ x = n_raw_samples;
+ dma_pos += x;
+ n_raw_samples -= x;
+ if (!n_raw_samples)
+ break;
}
- devpriv->ai_act_dmapos++;
- devpriv->ai_act_dmapos %= raw_scanlen;
+ if (dma_pos < stop_pos) {
+ /* Include samples before stop position. */
+ x = stop_pos - dma_pos;
+ if (x > n_raw_samples)
+ x = n_raw_samples;
+ n_samples += x;
+ dma_pos += x;
+ n_raw_samples -= x;
+ }
+ /* Advance to next span. */
+ start_pos += span_len;
+ stop_pos += span_len;
}
-
- return j;
+ return n_samples;
}
-static int move_block_from_dma(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned short *dma_buffer,
- unsigned int num_samples)
+static void move_block_from_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned short *dma_buffer,
+ unsigned int n_raw_samples)
{
struct pci9118_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int num_bytes;
-
- num_samples = defragment_dma_buffer(dev, s, dma_buffer, num_samples);
- devpriv->ai_act_scan +=
- (s->async->cur_chan + num_samples) / cmd->scan_end_arg;
- s->async->cur_chan += num_samples;
- s->async->cur_chan %= cmd->scan_end_arg;
- num_bytes =
- cfc_write_array_to_buffer(s, dma_buffer,
- num_samples * sizeof(short));
- if (num_bytes < num_samples * sizeof(short))
- return -1;
- return 0;
+ unsigned int start_pos = devpriv->ai_add_front;
+ unsigned int stop_pos = start_pos + cmd->chanlist_len;
+ unsigned int span_len = stop_pos + devpriv->ai_add_back;
+ unsigned int dma_pos = devpriv->ai_act_dmapos;
+ unsigned int x;
+
+ if (span_len == cmd->chanlist_len) {
+ /* All samples are to be copied. */
+ comedi_buf_write_samples(s, dma_buffer, n_raw_samples);
+ dma_pos += n_raw_samples;
+ } else {
+ /*
+ * Not all samples are to be copied. Buffer contents consist
+ * of a possibly non-whole number of spans and a region of
+ * each span is to be copied.
+ */
+ while (n_raw_samples) {
+ if (dma_pos < start_pos) {
+ /* Skip samples before start position. */
+ x = start_pos - dma_pos;
+ if (x > n_raw_samples)
+ x = n_raw_samples;
+ dma_pos += x;
+ n_raw_samples -= x;
+ if (!n_raw_samples)
+ break;
+ }
+ if (dma_pos < stop_pos) {
+ /* Copy samples before stop position. */
+ x = stop_pos - dma_pos;
+ if (x > n_raw_samples)
+ x = n_raw_samples;
+ comedi_buf_write_samples(s, dma_buffer, x);
+ dma_pos += x;
+ n_raw_samples -= x;
+ }
+ /* Advance to next span. */
+ start_pos += span_len;
+ stop_pos += span_len;
+ }
+ }
+ /* Update position in span for next time. */
+ devpriv->ai_act_dmapos = dma_pos % span_len;
}
static void pci9118_exttrg_enable(struct comedi_device *dev, bool enable)
@@ -578,9 +638,7 @@ static int pci9118_ai_cancel(struct comedi_device *dev,
devpriv->ai_do = 0;
devpriv->usedma = 0;
- devpriv->ai_act_scan = 0;
devpriv->ai_act_dmapos = 0;
- s->async->cur_chan = 0;
s->async->inttrig = NULL;
devpriv->ai_neverending = 0;
devpriv->dma_actbuf = 0;
@@ -594,8 +652,9 @@ static void pci9118_ai_munge(struct comedi_device *dev,
unsigned int start_chan_index)
{
struct pci9118_private *devpriv = dev->private;
- unsigned int i, num_samples = num_bytes / sizeof(short);
unsigned short *array = data;
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
+ unsigned int i;
for (i = 0; i < num_samples; i++) {
if (devpriv->usedma)
@@ -617,17 +676,11 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
sampl = inl(dev->iobase + PCI9118_AI_FIFO_REG);
- cfc_write_to_buffer(s, sampl);
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->scan_end_arg) {
- /* one scan done */
- s->async->cur_chan %= cmd->scan_end_arg;
- devpriv->ai_act_scan++;
- if (!devpriv->ai_neverending) {
- /* all data sampled? */
- if (devpriv->ai_act_scan >= cmd->stop_arg)
- s->async->events |= COMEDI_CB_EOA;
- }
+ comedi_buf_write_samples(s, &sampl, 1);
+
+ if (!devpriv->ai_neverending) {
+ if (s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
}
}
@@ -637,39 +690,37 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
struct pci9118_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
struct pci9118_dmabuf *dmabuf = &devpriv->dmabuf[devpriv->dma_actbuf];
- unsigned int next_dma_buf, samplesinbuf, sampls, m;
+ unsigned int n_all = comedi_bytes_to_samples(s, dmabuf->use_size);
+ unsigned int n_valid;
+ bool more_dma;
- samplesinbuf = dmabuf->use_size >> 1; /* number of received samples */
+ /* determine whether more DMA buffers to do after this one */
+ n_valid = valid_samples_in_act_dma_buf(dev, s, n_all);
+ more_dma = n_valid < comedi_nsamples_left(s, n_valid + 1);
- if (devpriv->dma_doublebuf) { /*
- * switch DMA buffers if is used
- * double buffering
- */
- next_dma_buf = 1 - devpriv->dma_actbuf;
- pci9118_amcc_setup_dma(dev, next_dma_buf);
- if (devpriv->ai_do == 4)
- interrupt_pci9118_ai_mode4_switch(dev, next_dma_buf);
+ /* switch DMA buffers and restart DMA if double buffering */
+ if (more_dma && devpriv->dma_doublebuf) {
+ devpriv->dma_actbuf = 1 - devpriv->dma_actbuf;
+ pci9118_amcc_setup_dma(dev, devpriv->dma_actbuf);
+ if (devpriv->ai_do == 4) {
+ interrupt_pci9118_ai_mode4_switch(dev,
+ devpriv->dma_actbuf);
+ }
}
- if (samplesinbuf) {
- /* how many samples is to end of buffer */
- m = s->async->prealloc_bufsz >> 1;
- sampls = m;
- move_block_from_dma(dev, s, dmabuf->virt, samplesinbuf);
- m = m - sampls; /* m=how many samples was transferred */
- }
+ if (n_all)
+ move_block_from_dma(dev, s, dmabuf->virt, n_all);
if (!devpriv->ai_neverending) {
- /* all data sampled? */
- if (devpriv->ai_act_scan >= cmd->stop_arg)
+ if (s->async->scans_done >= cmd->stop_arg)
s->async->events |= COMEDI_CB_EOA;
}
- if (devpriv->dma_doublebuf) {
- /* switch dma buffers */
- devpriv->dma_actbuf = 1 - devpriv->dma_actbuf;
- } else {
- /* restart DMA if is not used double buffering */
+ if (s->async->events & COMEDI_CB_CANCEL_MASK)
+ more_dma = false;
+
+ /* restart DMA if not double buffering */
+ if (more_dma && !devpriv->dma_doublebuf) {
pci9118_amcc_setup_dma(dev, 0);
if (devpriv->ai_do == 4)
interrupt_pci9118_ai_mode4_switch(dev, 0);
@@ -766,7 +817,7 @@ static irqreturn_t pci9118_interrupt(int irq, void *d)
interrupt_pci9118_ai_onesample(dev, s);
interrupt_exit:
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1123,9 +1174,7 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
inl(dev->iobase + PCI9118_AI_STATUS_REG);
inl(dev->iobase + PCI9118_INT_CTRL_REG);
- devpriv->ai_act_scan = 0;
devpriv->ai_act_dmapos = 0;
- s->async->cur_chan = 0;
if (devpriv->usedma) {
Compute_and_setup_dma(dev, s);
@@ -1624,7 +1673,6 @@ static int pci9118_common_attach(struct comedi_device *dev,
s->maxdata = 0x0fff;
s->range_table = &range_bipolar10;
s->insn_write = pci9118_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 5539bd294862..d02df7d0c629 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -298,7 +298,6 @@ static const struct boardtype boardtypes[] = {
struct pci1710_private {
unsigned int CntrlReg; /* Control register */
- unsigned int ai_act_scan; /* how many scans we finished */
unsigned char ai_et;
unsigned int ai_et_CntrlReg;
unsigned int ai_et_MuxVal;
@@ -730,16 +729,12 @@ static int pci171x_ai_cancel(struct comedi_device *dev,
break;
}
- devpriv->ai_act_scan = 0;
- s->async->cur_chan = 0;
-
return 0;
}
static void pci1710_handle_every_sample(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pci1710_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int status;
unsigned int val;
@@ -749,14 +744,14 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
if (status & Status_FE) {
dev_dbg(dev->class_dev, "A/D FIFO empty (%4x)\n", status);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
if (status & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", status);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
@@ -770,27 +765,19 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
break;
}
- comedi_buf_put(s, val & s->maxdata);
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len)
- s->async->cur_chan = 0;
+ val &= s->maxdata;
+ comedi_buf_write_samples(s, &val, 1);
-
- if (s->async->cur_chan == 0) { /* one scan done */
- devpriv->ai_act_scan++;
- if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ai_act_scan >= cmd->stop_arg) {
- /* all data sampled */
- s->async->events |= COMEDI_CB_EOA;
- break;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg) {
+ s->async->events |= COMEDI_CB_EOA;
+ break;
}
}
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
/*
@@ -799,8 +786,6 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
static int move_block_from_fifo(struct comedi_device *dev,
struct comedi_subdevice *s, int n, int turn)
{
- struct pci1710_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
unsigned int val;
int ret;
int i;
@@ -814,13 +799,8 @@ static int move_block_from_fifo(struct comedi_device *dev,
return ret;
}
- comedi_buf_put(s, val & s->maxdata);
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- }
+ val &= s->maxdata;
+ comedi_buf_write_samples(s, &val, 1);
}
return 0;
}
@@ -829,48 +809,47 @@ static void pci1710_handle_fifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct boardtype *this_board = dev->board_ptr;
- struct pci1710_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int m, samplesinbuf;
+ unsigned int nsamples;
+ unsigned int m;
m = inw(dev->iobase + PCI171x_STATUS);
if (!(m & Status_FH)) {
dev_dbg(dev->class_dev, "A/D FIFO not half full! (%4x)\n", m);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
if (m & Status_FF) {
dev_dbg(dev->class_dev,
"A/D FIFO Full status (Fatal Error!) (%4x)\n", m);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
- samplesinbuf = this_board->fifo_half_size;
- if (samplesinbuf * sizeof(short) >= s->async->prealloc_bufsz) {
- m = s->async->prealloc_bufsz / sizeof(short);
+ nsamples = this_board->fifo_half_size;
+ if (comedi_samples_to_bytes(s, nsamples) >= s->async->prealloc_bufsz) {
+ m = comedi_bytes_to_samples(s, s->async->prealloc_bufsz);
if (move_block_from_fifo(dev, s, m, 0))
return;
- samplesinbuf -= m;
+ nsamples -= m;
}
- if (samplesinbuf) {
- if (move_block_from_fifo(dev, s, samplesinbuf, 1))
+ if (nsamples) {
+ if (move_block_from_fifo(dev, s, nsamples, 1))
return;
}
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ai_act_scan >= cmd->stop_arg) {
- /* all data sampled */
+ s->async->scans_done >= cmd->stop_arg) {
s->async->events |= COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
outb(0, dev->iobase + PCI171x_CLRINT); /* clear our INT request */
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
/*
@@ -928,9 +907,6 @@ static int pci171x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outb(0, dev->iobase + PCI171x_CLRFIFO);
outb(0, dev->iobase + PCI171x_CLRINT);
- devpriv->ai_act_scan = 0;
- s->async->cur_chan = 0;
-
devpriv->CntrlReg &= Control_CNT0;
if ((cmd->flags & CMDF_WAKE_EOS) == 0)
devpriv->CntrlReg |= Control_ONEFH;
@@ -1208,7 +1184,7 @@ static int pci1710_auto_attach(struct comedi_device *dev,
if (this_board->n_dichan) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_READABLE;
s->n_chan = this_board->n_dichan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dichan;
@@ -1220,7 +1196,7 @@ static int pci1710_auto_attach(struct comedi_device *dev,
if (this_board->n_dochan) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = this_board->n_dochan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dochan;
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 1610e2b406f3..65f854e1eb66 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -1,205 +1,124 @@
/*
- comedi/drivers/pci1723.c
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ * adv_pci1723.c
+ * Comedi driver for the Advantech PCI-1723 card.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: adv_pci1723
-Description: Advantech PCI-1723
-Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk>
-Devices: [Advantech] PCI-1723 (adv_pci1723)
-Updated: Mon, 14 Apr 2008 15:12:56 +0100
-Status: works
-
-Configuration Options:
- [0] - PCI bus of device (optional)
- [1] - PCI slot of device (optional)
-
- If bus/slot is not specified, the first supported
- PCI device found will be used.
-
-Subdevice 0 is 8-channel AO, 16-bit, range +/- 10 V.
-
-Subdevice 1 is 16-channel DIO. The channels are configurable as input or
-output in 2 groups (0 to 7, 8 to 15). Configuring any channel implicitly
-configures all channels in the same group.
-
-TODO:
-
-1. Add the two milliamp ranges to the AO subdevice (0 to 20 mA, 4 to 20 mA).
-2. Read the initial ranges and values of the AO subdevice at start-up instead
- of reinitializing them.
-3. Implement calibration.
-*/
+ * Driver: adv_pci1723
+ * Description: Advantech PCI-1723
+ * Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk>
+ * Devices: (Advantech) PCI-1723 [adv_pci1723]
+ * Updated: Mon, 14 Apr 2008 15:12:56 +0100
+ * Status: works
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ *
+ * Subdevice 0 is 8-channel AO, 16-bit, range +/- 10 V.
+ *
+ * Subdevice 1 is 16-channel DIO. The channels are configurable as
+ * input or output in 2 groups (0 to 7, 8 to 15). Configuring any
+ * channel implicitly configures all channels in the same group.
+ *
+ * TODO:
+ * 1. Add the two milliamp ranges to the AO subdevice (0 to 20 mA,
+ * 4 to 20 mA).
+ * 2. Read the initial ranges and values of the AO subdevice at
+ * start-up instead of reinitializing them.
+ * 3. Implement calibration.
+ */
#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
-/* all the registers for the pci1723 board */
-#define PCI1723_DA(N) ((N)<<1) /* W: D/A register N (0 to 7) */
-
-#define PCI1723_SYN_SET 0x12 /* synchronized set register */
-#define PCI1723_ALL_CHNNELE_SYN_STROBE 0x12
- /* synchronized status register */
-
-#define PCI1723_RANGE_CALIBRATION_MODE 0x14
- /* range and calibration mode */
-#define PCI1723_RANGE_CALIBRATION_STATUS 0x14
- /* range and calibration status */
-
-#define PCI1723_CONTROL_CMD_CALIBRATION_FUN 0x16
- /*
- * SADC control command for
- * calibration function
- */
-#define PCI1723_STATUS_CMD_CALIBRATION_FUN 0x16
- /*
- * SADC control status for
- * calibration function
- */
-
-#define PCI1723_CALIBRATION_PARA_STROBE 0x18
- /* Calibration parameter strobe */
-
-#define PCI1723_DIGITAL_IO_PORT_SET 0x1A /* Digital I/O port setting */
-#define PCI1723_DIGITAL_IO_PORT_MODE 0x1A /* Digital I/O port mode */
-
-#define PCI1723_WRITE_DIGITAL_OUTPUT_CMD 0x1C
- /* Write digital output command */
-#define PCI1723_READ_DIGITAL_INPUT_DATA 0x1C /* Read digital input data */
-
-#define PCI1723_WRITE_CAL_CMD 0x1E /* Write calibration command */
-#define PCI1723_READ_CAL_STATUS 0x1E /* Read calibration status */
-
-#define PCI1723_SYN_STROBE 0x20 /* Synchronized strobe */
-
-#define PCI1723_RESET_ALL_CHN_STROBE 0x22
- /* Reset all D/A channels strobe */
-
-#define PCI1723_RESET_CAL_CONTROL_STROBE 0x24
- /*
- * Reset the calibration
- * controller strobe
- */
-
-#define PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE 0x26
- /*
- * Change D/A channels output
- * type strobe
- */
-
-#define PCI1723_SELECT_CALIBRATION 0x28 /* Select the calibration Ref_V */
-
-struct pci1723_private {
- unsigned char da_range[8]; /* D/A output range for each channel */
- unsigned short ao_data[8]; /* data output buffer */
-};
-
/*
- * The pci1723 card reset;
+ * PCI Bar 2 I/O Register map (dev->iobase)
*/
-static int pci1723_reset(struct comedi_device *dev)
+#define PCI1723_AO_REG(x) (0x00 + ((x) * 2))
+#define PCI1723_BOARD_ID_REG 0x10
+#define PCI1723_BOARD_ID_MASK (0xf << 0)
+#define PCI1723_SYNC_CTRL_REG 0x12
+#define PCI1723_SYNC_CTRL_ASYNC (0 << 0)
+#define PCI1723_SYNC_CTRL_SYNC (1 << 0)
+#define PCI1723_CTRL_REG 0x14
+#define PCI1723_CTRL_BUSY (1 << 15)
+#define PCI1723_CTRL_INIT (1 << 14)
+#define PCI1723_CTRL_SELF (1 << 8)
+#define PCI1723_CTRL_IDX(x) (((x) & 0x3) << 6)
+#define PCI1723_CTRL_RANGE(x) (((x) & 0x3) << 4)
+#define PCI1723_CTRL_GAIN (0 << 3)
+#define PCI1723_CTRL_OFFSET (1 << 3)
+#define PCI1723_CTRL_CHAN(x) (((x) & 0x7) << 0)
+#define PCI1723_CALIB_CTRL_REG 0x16
+#define PCI1723_CALIB_CTRL_CS (1 << 2)
+#define PCI1723_CALIB_CTRL_DAT (1 << 1)
+#define PCI1723_CALIB_CTRL_CLK (1 << 0)
+#define PCI1723_CALIB_STROBE_REG 0x18
+#define PCI1723_DIO_CTRL_REG 0x1a
+#define PCI1723_DIO_CTRL_HDIO (1 << 1)
+#define PCI1723_DIO_CTRL_LDIO (1 << 0)
+#define PCI1723_DIO_DATA_REG 0x1c
+#define PCI1723_CALIB_DATA_REG 0x1e
+#define PCI1723_SYNC_STROBE_REG 0x20
+#define PCI1723_RESET_AO_STROBE_REG 0x22
+#define PCI1723_RESET_CALIB_STROBE_REG 0x24
+#define PCI1723_RANGE_STROBE_REG 0x26
+#define PCI1723_VREF_REG 0x28
+#define PCI1723_VREF_NEG10V (0 << 0)
+#define PCI1723_VREF_0V (1 << 0)
+#define PCI1723_VREF_POS10V (3 << 0)
+
+static int pci1723_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pci1723_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
- outw(0x01, dev->iobase + PCI1723_SYN_SET);
- /* set synchronous output mode */
-
- for (i = 0; i < 8; i++) {
- /* set all outputs to 0V */
- devpriv->ao_data[i] = 0x8000;
- outw(devpriv->ao_data[i], dev->iobase + PCI1723_DA(i));
- /* set all ranges to +/- 10V */
- devpriv->da_range[i] = 0;
- outw(((devpriv->da_range[i] << 4) | i),
- PCI1723_RANGE_CALIBRATION_MODE);
- }
-
- outw(0, dev->iobase + PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE);
- /* update ranges */
- outw(0, dev->iobase + PCI1723_SYN_STROBE); /* update outputs */
-
- /* set asynchronous output mode */
- outw(0, dev->iobase + PCI1723_SYN_SET);
-
- return 0;
-}
-
-static int pci1723_insn_read_ao(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pci1723_private *devpriv = dev->private;
- int n, chan;
-
- chan = CR_CHAN(insn->chanspec);
- for (n = 0; n < insn->n; n++)
- data[n] = devpriv->ao_data[chan];
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val = data[i];
- return n;
-}
-
-/*
- analog data output;
-*/
-static int pci1723_ao_write_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pci1723_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int n;
-
- for (n = 0; n < insn->n; n++) {
- devpriv->ao_data[chan] = data[n];
- outw(data[n], dev->iobase + PCI1723_DA(chan));
+ outw(val, dev->iobase + PCI1723_AO_REG(chan));
+ s->readback[chan] = val;
}
- return n;
+ return insn->n;
}
-/*
- digital i/o config/query
-*/
static int pci1723_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- unsigned short mode;
+ unsigned int mask = (chan < 8) ? 0x00ff : 0xff00;
+ unsigned short mode = 0x0000; /* assume output */
int ret;
- if (chan < 8)
- mask = 0x00ff;
- else
- mask = 0xff00;
-
ret = comedi_dio_insn_config(dev, s, insn, data, mask);
if (ret)
return ret;
- /* update hardware DIO mode */
- mode = 0x0000; /* assume output */
if (!(s->io_bits & 0x00ff))
- mode |= 0x0001; /* low byte input */
+ mode |= PCI1723_DIO_CTRL_LDIO; /* low byte input */
if (!(s->io_bits & 0xff00))
- mode |= 0x0002; /* high byte input */
- outw(mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET);
+ mode |= PCI1723_DIO_CTRL_HDIO; /* high byte input */
+ outw(mode, dev->iobase + PCI1723_DIO_CTRL_REG);
return insn->n;
}
@@ -210,24 +129,21 @@ static int pci1723_dio_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
- outw(s->state, dev->iobase + PCI1723_WRITE_DIGITAL_OUTPUT_CMD);
+ outw(s->state, dev->iobase + PCI1723_DIO_DATA_REG);
- data[1] = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA);
+ data[1] = inw(dev->iobase + PCI1723_DIO_DATA_REG);
return insn->n;
}
static int pci1723_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+ unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct pci1723_private *devpriv;
struct comedi_subdevice *s;
+ unsigned int val;
int ret;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
+ int i;
ret = comedi_pci_enable(dev);
if (ret)
@@ -239,61 +155,57 @@ static int pci1723_auto_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- dev->write_subdev = s;
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 8;
s->maxdata = 0xffff;
- s->len_chanlist = 8;
s->range_table = &range_bipolar10;
- s->insn_write = pci1723_ao_write_winsn;
- s->insn_read = pci1723_insn_read_ao;
+ s->insn_write = pci1723_ao_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ /* synchronously reset all analog outputs to 0V, +/-10V range */
+ outw(PCI1723_SYNC_CTRL_SYNC, dev->iobase + PCI1723_SYNC_CTRL_REG);
+ for (i = 0; i < s->n_chan; i++) {
+ outw(PCI1723_CTRL_RANGE(0) | PCI1723_CTRL_CHAN(i),
+ PCI1723_CTRL_REG);
+ outw(0, dev->iobase + PCI1723_RANGE_STROBE_REG);
+
+ outw(0x8000, dev->iobase + PCI1723_AO_REG(i));
+ s->readback[i] = 0x8000;
+ }
+ outw(0, dev->iobase + PCI1723_SYNC_STROBE_REG);
+
+ /* disable syncronous control */
+ outw(PCI1723_SYNC_CTRL_ASYNC, dev->iobase + PCI1723_SYNC_CTRL_REG);
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
- s->len_chanlist = 16;
s->range_table = &range_digital;
s->insn_config = pci1723_dio_insn_config;
s->insn_bits = pci1723_dio_insn_bits;
- /* read DIO config */
- switch (inw(dev->iobase + PCI1723_DIGITAL_IO_PORT_MODE) & 0x03) {
- case 0x00: /* low byte output, high byte output */
- s->io_bits = 0xFFFF;
- break;
- case 0x01: /* low byte input, high byte output */
- s->io_bits = 0xFF00;
- break;
- case 0x02: /* low byte output, high byte input */
- s->io_bits = 0x00FF;
- break;
- case 0x03: /* low byte input, high byte input */
- s->io_bits = 0x0000;
- break;
- }
- /* read DIO port state */
- s->state = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA);
-
- pci1723_reset(dev);
+ /* get initial DIO direction and state */
+ val = inw(dev->iobase + PCI1723_DIO_CTRL_REG);
+ if (!(val & PCI1723_DIO_CTRL_LDIO))
+ s->io_bits |= 0x00ff; /* low byte output */
+ if (!(val & PCI1723_DIO_CTRL_HDIO))
+ s->io_bits |= 0xff00; /* high byte output */
+ s->state = inw(dev->iobase + PCI1723_DIO_DATA_REG);
return 0;
}
-static void pci1723_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- pci1723_reset(dev);
- comedi_pci_detach(dev);
-}
-
static struct comedi_driver adv_pci1723_driver = {
.driver_name = "adv_pci1723",
.module = THIS_MODULE,
.auto_attach = pci1723_auto_attach,
- .detach = pci1723_detach,
+ .detach = comedi_pci_detach,
};
static int adv_pci1723_pci_probe(struct pci_dev *dev,
@@ -318,5 +230,5 @@ static struct pci_driver adv_pci1723_pci_driver = {
module_comedi_pci_driver(adv_pci1723_driver, adv_pci1723_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Advantech PCI-1723 Comedi driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
index 2697758b1ed9..a8d28403262e 100644
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ b/drivers/staging/comedi/drivers/adv_pci1724.c
@@ -1,122 +1,76 @@
/*
- comedi/drivers/adv_pci1724.c
- This is a driver for the Advantech PCI-1724U card.
-
- Author: Frank Mori Hess <fmh6jj@gmail.com>
- Copyright (C) 2013 GnuBIO Inc
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * adv_pci1724.c
+ * Comedi driver for the Advantech PCI-1724U card.
+ *
+ * Author: Frank Mori Hess <fmh6jj@gmail.com>
+ * Copyright (C) 2013 GnuBIO Inc
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-
-Driver: adv_1724
-Description: Advantech PCI-1724U
-Author: Frank Mori Hess <fmh6jj@gmail.com>
-Status: works
-Updated: 2013-02-09
-Devices: [Advantech] PCI-1724U (adv_pci1724)
-
-Subdevice 0 is the analog output.
-Subdevice 1 is the offset calibration for the analog output.
-Subdevice 2 is the gain calibration for the analog output.
-
-The calibration offset and gains have quite a large effect
-on the analog output, so it is possible to adjust the analog output to
-have an output range significantly different from the board's
-nominal output ranges. For a calibrated +/- 10V range, the analog
-output's offset will be set somewhere near mid-range (0x2000) and its
-gain will be near maximum (0x3fff).
-
-There is really no difference between the board's documented 0-20mA
-versus 4-20mA output ranges. To pick one or the other is simply a matter
-of adjusting the offset and gain calibration until the board outputs in
-the desired range.
-
-Configuration options:
- None
-
-Manual configuration of comedi devices is not supported by this driver;
-supported PCI devices are configured as comedi devices automatically.
-
-*/
+ * Driver: adv_pci1724
+ * Description: Advantech PCI-1724U
+ * Devices: (Advantech) PCI-1724U [adv_pci1724]
+ * Author: Frank Mori Hess <fmh6jj@gmail.com>
+ * Updated: 2013-02-09
+ * Status: works
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ *
+ * Subdevice 0 is the analog output.
+ * Subdevice 1 is the offset calibration for the analog output.
+ * Subdevice 2 is the gain calibration for the analog output.
+ *
+ * The calibration offset and gains have quite a large effect on the
+ * analog output, so it is possible to adjust the analog output to
+ * have an output range significantly different from the board's
+ * nominal output ranges. For a calibrated +/-10V range, the analog
+ * output's offset will be set somewhere near mid-range (0x2000) and
+ * its gain will be near maximum (0x3fff).
+ *
+ * There is really no difference between the board's documented 0-20mA
+ * versus 4-20mA output ranges. To pick one or the other is simply a
+ * matter of adjusting the offset and gain calibration until the board
+ * outputs in the desired range.
+ */
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/pci.h>
#include "../comedidev.h"
-#define PCI_VENDOR_ID_ADVANTECH 0x13fe
-
-#define NUM_AO_CHANNELS 32
-
-/* register offsets */
-enum board_registers {
- DAC_CONTROL_REG = 0x0,
- SYNC_OUTPUT_REG = 0x4,
- EEPROM_CONTROL_REG = 0x8,
- SYNC_OUTPUT_TRIGGER_REG = 0xc,
- BOARD_ID_REG = 0x10
-};
-
-/* bit definitions for registers */
-enum dac_control_contents {
- DAC_DATA_MASK = 0x3fff,
- DAC_DESTINATION_MASK = 0xc000,
- DAC_NORMAL_MODE = 0xc000,
- DAC_OFFSET_MODE = 0x8000,
- DAC_GAIN_MODE = 0x4000,
- DAC_CHANNEL_SELECT_MASK = 0xf0000,
- DAC_GROUP_SELECT_MASK = 0xf00000
-};
-
-static uint32_t dac_data_bits(uint16_t dac_data)
-{
- return dac_data & DAC_DATA_MASK;
-}
-
-static uint32_t dac_channel_select_bits(unsigned channel)
-{
- return (channel << 16) & DAC_CHANNEL_SELECT_MASK;
-}
-
-static uint32_t dac_group_select_bits(unsigned group)
-{
- return (1 << (20 + group)) & DAC_GROUP_SELECT_MASK;
-}
-
-static uint32_t dac_channel_and_group_select_bits(unsigned comedi_channel)
-{
- return dac_channel_select_bits(comedi_channel % 8) |
- dac_group_select_bits(comedi_channel / 8);
-}
-
-enum sync_output_contents {
- SYNC_MODE = 0x1,
- DAC_BUSY = 0x2, /* dac state machine is not ready */
-};
-
-enum sync_output_trigger_contents {
- SYNC_TRIGGER_BITS = 0x0 /* any value works */
-};
-
-enum board_id_contents {
- BOARD_ID_MASK = 0xf
-};
-
-static const struct comedi_lrange ao_ranges_1724 = {
+/*
+ * PCI bar 2 Register I/O map (dev->iobase)
+ */
+#define PCI1724_DAC_CTRL_REG 0x00
+#define PCI1724_DAC_CTRL_GX(x) (1 << (20 + ((x) / 8)))
+#define PCI1724_DAC_CTRL_CX(x) (((x) % 8) << 16)
+#define PCI1724_DAC_CTRL_MODE_GAIN (1 << 14)
+#define PCI1724_DAC_CTRL_MODE_OFFSET (2 << 14)
+#define PCI1724_DAC_CTRL_MODE_NORMAL (3 << 14)
+#define PCI1724_DAC_CTRL_MODE_MASK (3 << 14)
+#define PCI1724_DAC_CTRL_DATA(x) (((x) & 0x3fff) << 0)
+#define PCI1724_SYNC_CTRL_REG 0x04
+#define PCI1724_SYNC_CTRL_DACSTAT (1 << 1)
+#define PCI1724_SYNC_CTRL_SYN (1 << 0)
+#define PCI1724_EEPROM_CTRL_REG 0x08
+#define PCI1724_SYNC_TRIG_REG 0x0c /* any value works */
+#define PCI1724_BOARD_ID_REG 0x10
+#define PCI1724_BOARD_ID_MASK (0xf << 0)
+
+static const struct comedi_lrange adv_pci1724_ao_ranges = {
4, {
BIP_RANGE(10),
RANGE_mA(0, 20),
@@ -125,254 +79,120 @@ static const struct comedi_lrange ao_ranges_1724 = {
}
};
-/* this structure is for data unique to this hardware driver. */
-struct adv_pci1724_private {
- int ao_value[NUM_AO_CHANNELS];
- int offset_value[NUM_AO_CHANNELS];
- int gain_value[NUM_AO_CHANNELS];
-};
-
-static int wait_for_dac_idle(struct comedi_device *dev)
-{
- static const int timeout = 10000;
- int i;
-
- for (i = 0; i < timeout; ++i) {
- if ((inl(dev->iobase + SYNC_OUTPUT_REG) & DAC_BUSY) == 0)
- break;
- udelay(1);
- }
- if (i == timeout) {
- dev_err(dev->class_dev,
- "Timed out waiting for dac to become idle\n");
- return -EIO;
- }
- return 0;
-}
-
-static int set_dac(struct comedi_device *dev, unsigned mode, unsigned channel,
- unsigned data)
-{
- int retval;
- unsigned control_bits;
-
- retval = wait_for_dac_idle(dev);
- if (retval < 0)
- return retval;
-
- control_bits = mode;
- control_bits |= dac_channel_and_group_select_bits(channel);
- control_bits |= dac_data_bits(data);
- outl(control_bits, dev->iobase + DAC_CONTROL_REG);
- return 0;
-}
-
-static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int adv_pci1724_dac_idle(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- struct adv_pci1724_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
- int retval;
- int i;
+ unsigned int status;
- /* turn off synchronous mode */
- outl(0, dev->iobase + SYNC_OUTPUT_REG);
-
- for (i = 0; i < insn->n; ++i) {
- retval = set_dac(dev, DAC_NORMAL_MODE, channel, data[i]);
- if (retval < 0)
- return retval;
- devpriv->ao_value[channel] = data[i];
- }
- return insn->n;
+ status = inl(dev->iobase + PCI1724_SYNC_CTRL_REG);
+ if ((status & PCI1724_SYNC_CTRL_DACSTAT) == 0)
+ return 0;
+ return -EBUSY;
}
-static int ao_readback_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int adv_pci1724_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct adv_pci1724_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
- int i;
-
- if (devpriv->ao_value[channel] < 0) {
- dev_err(dev->class_dev,
- "Cannot read back channels which have not yet been written to\n");
- return -EIO;
- }
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->ao_value[channel];
-
- return insn->n;
-}
-
-static int offset_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct adv_pci1724_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
- int retval;
- int i;
-
- /* turn off synchronous mode */
- outl(0, dev->iobase + SYNC_OUTPUT_REG);
-
- for (i = 0; i < insn->n; ++i) {
- retval = set_dac(dev, DAC_OFFSET_MODE, channel, data[i]);
- if (retval < 0)
- return retval;
- devpriv->offset_value[channel] = data[i];
- }
-
- return insn->n;
-}
-
-static int offset_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct adv_pci1724_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned long mode = (unsigned long)s->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int ctrl;
+ int ret;
int i;
- if (devpriv->offset_value[channel] < 0) {
- dev_err(dev->class_dev,
- "Cannot read back channels which have not yet been written to\n");
- return -EIO;
- }
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->offset_value[channel];
-
- return insn->n;
-}
-
-static int gain_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct adv_pci1724_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
- int retval;
- int i;
+ ctrl = PCI1724_DAC_CTRL_GX(chan) | PCI1724_DAC_CTRL_CX(chan) | mode;
/* turn off synchronous mode */
- outl(0, dev->iobase + SYNC_OUTPUT_REG);
+ outl(0, dev->iobase + PCI1724_SYNC_CTRL_REG);
for (i = 0; i < insn->n; ++i) {
- retval = set_dac(dev, DAC_GAIN_MODE, channel, data[i]);
- if (retval < 0)
- return retval;
- devpriv->gain_value[channel] = data[i];
- }
+ unsigned int val = data[i];
- return insn->n;
-}
+ ret = comedi_timeout(dev, s, insn, adv_pci1724_dac_idle, 0);
+ if (ret)
+ return ret;
-static int gain_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct adv_pci1724_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
- int i;
+ outl(ctrl | PCI1724_DAC_CTRL_DATA(val),
+ dev->iobase + PCI1724_DAC_CTRL_REG);
- if (devpriv->gain_value[channel] < 0) {
- dev_err(dev->class_dev,
- "Cannot read back channels which have not yet been written to\n");
- return -EIO;
+ s->readback[chan] = val;
}
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->gain_value[channel];
return insn->n;
}
-/* Allocate and initialize the subdevice structures.
- */
-static int setup_subdevices(struct comedi_device *dev)
+static int adv_pci1724_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
+ unsigned int board_id;
int ret;
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ dev->iobase = pci_resource_start(pcidev, 2);
+ board_id = inl(dev->iobase + PCI1724_BOARD_ID_REG);
+ dev_info(dev->class_dev, "board id: %d\n",
+ board_id & PCI1724_BOARD_ID_MASK);
+
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
- /* analog output subdevice */
+ /* Analog Output subdevice */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
- s->n_chan = NUM_AO_CHANNELS;
- s->maxdata = 0x3fff;
- s->range_table = &ao_ranges_1724;
- s->insn_read = ao_readback_insn;
- s->insn_write = ao_winsn;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
+ s->n_chan = 32;
+ s->maxdata = 0x3fff;
+ s->range_table = &adv_pci1724_ao_ranges;
+ s->insn_write = adv_pci1724_insn_write;
+ s->private = (void *)PCI1724_DAC_CTRL_MODE_NORMAL;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
- /* offset calibration */
+ /* Offset Calibration subdevice */
s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = NUM_AO_CHANNELS;
- s->insn_read = offset_read_insn;
- s->insn_write = offset_write_insn;
- s->maxdata = 0x3fff;
+ s->type = COMEDI_SUBD_CALIB;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
+ s->n_chan = 32;
+ s->maxdata = 0x3fff;
+ s->insn_write = adv_pci1724_insn_write;
+ s->private = (void *)PCI1724_DAC_CTRL_MODE_OFFSET;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
- /* gain calibration */
+ /* Gain Calibration subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_CALIB;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = NUM_AO_CHANNELS;
- s->insn_read = gain_read_insn;
- s->insn_write = gain_write_insn;
- s->maxdata = 0x3fff;
-
- return 0;
-}
-
-static int adv_pci1724_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct adv_pci1724_private *devpriv;
- int i;
- int retval;
- unsigned int board_id;
-
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- /* init software copies of output values to indicate we don't know
- * what the output value is since it has never been written. */
- for (i = 0; i < NUM_AO_CHANNELS; ++i) {
- devpriv->ao_value[i] = -1;
- devpriv->offset_value[i] = -1;
- devpriv->gain_value[i] = -1;
- }
-
- retval = comedi_pci_enable(dev);
- if (retval)
- return retval;
-
- dev->iobase = pci_resource_start(pcidev, 2);
- board_id = inl(dev->iobase + BOARD_ID_REG) & BOARD_ID_MASK;
- dev_info(dev->class_dev, "board id: %d\n", board_id);
-
- retval = setup_subdevices(dev);
- if (retval < 0)
- return retval;
+ s->type = COMEDI_SUBD_CALIB;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
+ s->n_chan = 32;
+ s->maxdata = 0x3fff;
+ s->insn_write = adv_pci1724_insn_write;
+ s->private = (void *)PCI1724_DAC_CTRL_MODE_GAIN;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
- dev_info(dev->class_dev, "%s (pci %s) attached, board id: %u\n",
- dev->board_name, pci_name(pcidev), board_id);
return 0;
}
static struct comedi_driver adv_pci1724_driver = {
- .driver_name = "adv_pci1724",
- .module = THIS_MODULE,
- .auto_attach = adv_pci1724_auto_attach,
- .detach = comedi_pci_detach,
+ .driver_name = "adv_pci1724",
+ .module = THIS_MODULE,
+ .auto_attach = adv_pci1724_auto_attach,
+ .detach = comedi_pci_detach,
};
static int adv_pci1724_pci_probe(struct pci_dev *dev,
@@ -389,12 +209,11 @@ static const struct pci_device_id adv_pci1724_pci_table[] = {
MODULE_DEVICE_TABLE(pci, adv_pci1724_pci_table);
static struct pci_driver adv_pci1724_pci_driver = {
- .name = "adv_pci1724",
- .id_table = adv_pci1724_pci_table,
- .probe = adv_pci1724_pci_probe,
- .remove = comedi_pci_auto_unconfig,
+ .name = "adv_pci1724",
+ .id_table = adv_pci1724_pci_table,
+ .probe = adv_pci1724_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
};
-
module_comedi_pci_driver(adv_pci1724_driver, adv_pci1724_pci_driver);
MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index f2e2d7e163bf..09609d6d02da 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -930,7 +930,7 @@ static int pci1760_attach(struct comedi_device *dev)
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_READABLE;
s->n_chan = 8;
s->maxdata = 1;
s->len_chanlist = 8;
@@ -939,7 +939,7 @@ static int pci1760_attach(struct comedi_device *dev)
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->len_chanlist = 8;
@@ -978,7 +978,7 @@ static int pci_dio_add_di(struct comedi_device *dev,
const struct dio_boardtype *this_board = dev->board_ptr;
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON | d->specflags;
+ s->subdev_flags = SDF_READABLE | d->specflags;
if (d->chans > 16)
s->subdev_flags |= SDF_LSAMPL;
s->n_chan = d->chans;
@@ -1008,7 +1008,7 @@ static int pci_dio_add_do(struct comedi_device *dev,
const struct dio_boardtype *this_board = dev->board_ptr;
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
if (d->chans > 16)
s->subdev_flags |= SDF_LSAMPL;
s->n_chan = d->chans;
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index 538277a691b2..fbc3e5aa94cb 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -212,7 +212,6 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
s->maxdata = 0x0fff;
s->range_table = &range_aio_aio12_8;
s->insn_write = aio_aio12_8_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index cf6a497b092c..d4b8c0195bd3 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -110,6 +110,8 @@
#define AGCSTS_TCZERO_MASK 0x000000c0
#define AGCSTS_FIFO_ST_MASK 0x0000003f
+#define AGCSTS_TC_ENABLE 0x10000000
+
#define AGCSTS_RESET_MBFLAGS 0x08000000
#define AGCSTS_RESET_P2A_FIFO 0x04000000
#define AGCSTS_RESET_A2P_FIFO 0x02000000
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 2c1bfb09601d..26aad705aad3 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -120,7 +120,6 @@ struct dio200_subdev_intr {
unsigned int ofs;
unsigned int valid_isns;
unsigned int enabled_isns;
- unsigned int stopcount;
bool active:1;
};
@@ -256,7 +255,6 @@ static void dio200_read_scan_intr(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int triggered)
{
- struct dio200_subdev_intr *subpriv = s->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned short val;
unsigned int n, ch;
@@ -267,26 +265,12 @@ static void dio200_read_scan_intr(struct comedi_device *dev,
if (triggered & (1U << ch))
val |= (1U << n);
}
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s, val)) {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Error! Stop acquisition. */
- dio200_stop_intr(dev, s);
- s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
- dev_err(dev->class_dev, "buffer overflow\n");
- }
- /* Check for end of acquisition. */
- if (cmd->stop_src == TRIG_COUNT) {
- if (subpriv->stopcount > 0) {
- subpriv->stopcount--;
- if (subpriv->stopcount == 0) {
- s->async->events |= COMEDI_CB_EOA;
- dio200_stop_intr(dev, s);
- }
- }
- }
+ comedi_buf_write_samples(s, &val, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
}
static int dio200_handle_read_intr(struct comedi_device *dev,
@@ -297,13 +281,11 @@ static int dio200_handle_read_intr(struct comedi_device *dev,
unsigned triggered;
unsigned intstat;
unsigned cur_enabled;
- unsigned int oldevents;
unsigned long flags;
triggered = 0;
spin_lock_irqsave(&subpriv->spinlock, flags);
- oldevents = s->async->events;
if (board->has_int_sce) {
/*
* Collect interrupt sources that have triggered and disable
@@ -356,8 +338,7 @@ static int dio200_handle_read_intr(struct comedi_device *dev,
}
spin_unlock_irqrestore(&subpriv->spinlock, flags);
- if (oldevents != s->async->events)
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
return (triggered != 0);
}
@@ -436,7 +417,6 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
spin_lock_irqsave(&subpriv->spinlock, flags);
subpriv->active = true;
- subpriv->stopcount = cmd->stop_arg;
if (cmd->start_src == TRIG_INT)
s->async->inttrig = dio200_inttrig_start_intr;
@@ -469,7 +449,7 @@ static int dio200_subdev_intr_init(struct comedi_device *dev,
dio200_write8(dev, subpriv->ofs, 0);
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_PACKED;
if (board->has_int_sce) {
s->n_chan = DIO200_MAX_ISNS;
s->len_chanlist = DIO200_MAX_ISNS;
diff --git a/drivers/staging/comedi/drivers/amplc_pc236_common.c b/drivers/staging/comedi/drivers/amplc_pc236_common.c
index 963c5d868b81..be87172d1f3f 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236_common.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236_common.c
@@ -135,9 +135,8 @@ static irqreturn_t pc236_interrupt(int irq, void *d)
handled = pc236_intr_check(dev);
if (dev->attached && handled) {
- comedi_buf_put(s, 0);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
}
return IRQ_RETVAL(handled);
}
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index f8e551d8fd9e..b1946ce6ecc1 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -83,7 +83,7 @@ static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s = &dev->subdevices[0];
/* digital output subdevice */
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 3bbbb57f19d6..924c8298c7a0 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -381,7 +381,6 @@ struct pci224_private {
unsigned short daccon;
unsigned int cached_div1;
unsigned int cached_div2;
- unsigned int ao_stop_count;
unsigned short ao_enab; /* max 16 channels so 'short' will do */
unsigned char intsce;
};
@@ -514,30 +513,21 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
{
struct pci224_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int bytes_per_scan = cfc_bytes_per_scan(s);
- unsigned int num_scans;
+ unsigned int num_scans = comedi_nscans_left(s, 0);
unsigned int room;
unsigned short dacstat;
unsigned int i, n;
- /* Determine number of scans available in buffer. */
- num_scans = comedi_buf_read_n_available(s) / bytes_per_scan;
- if (cmd->stop_src == TRIG_COUNT) {
- /* Fixed number of scans. */
- if (num_scans > devpriv->ao_stop_count)
- num_scans = devpriv->ao_stop_count;
- }
-
/* Determine how much room is in the FIFO (in samples). */
dacstat = inw(dev->iobase + PCI224_DACCON);
switch (dacstat & PCI224_DACCON_FIFOFL_MASK) {
case PCI224_DACCON_FIFOFL_EMPTY:
room = PCI224_FIFO_ROOM_EMPTY;
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ao_stop_count == 0) {
+ s->async->scans_done >= cmd->stop_arg) {
/* FIFO empty at end of counted acquisition. */
s->async->events |= COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
break;
@@ -568,25 +558,23 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
/* Process scans. */
for (n = 0; n < num_scans; n++) {
- cfc_read_array_from_buffer(s, &devpriv->ao_scan_vals[0],
- bytes_per_scan);
+ comedi_buf_read_samples(s, &devpriv->ao_scan_vals[0],
+ cmd->chanlist_len);
for (i = 0; i < cmd->chanlist_len; i++) {
outw(devpriv->ao_scan_vals[devpriv->ao_scan_order[i]],
dev->iobase + PCI224_DACDATA);
}
}
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ao_stop_count -= num_scans;
- if (devpriv->ao_stop_count == 0) {
- /*
- * Change FIFO interrupt trigger level to wait
- * until FIFO is empty.
- */
- devpriv->daccon = COMBINE(devpriv->daccon,
- PCI224_DACCON_FIFOINTR_EMPTY,
- PCI224_DACCON_FIFOINTR_MASK);
- outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg) {
+ /*
+ * Change FIFO interrupt trigger level to wait
+ * until FIFO is empty.
+ */
+ devpriv->daccon = COMBINE(devpriv->daccon,
+ PCI224_DACCON_FIFOINTR_EMPTY,
+ PCI224_DACCON_FIFOINTR_MASK);
+ outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
}
if ((devpriv->daccon & PCI224_DACCON_TRIG_MASK) ==
PCI224_DACCON_TRIG_NONE) {
@@ -618,7 +606,7 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static int pci224_ao_inttrig_start(struct comedi_device *dev,
@@ -908,14 +896,6 @@ static int pci224_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->scan_begin_src == TRIG_TIMER)
pci224_ao_start_pacer(dev, s);
- /*
- * Sort out end of acquisition.
- */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ao_stop_count = cmd->stop_arg;
- else /* TRIG_EXT | TRIG_NONE */
- devpriv->ao_stop_count = 0;
-
spin_lock_irqsave(&devpriv->ao_spinlock, flags);
if (cmd->start_src == TRIG_INT) {
s->async->inttrig = pci224_ao_inttrig_start;
@@ -1095,7 +1075,6 @@ pci224_auto_attach(struct comedi_device *dev, unsigned long context_model)
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = thisboard->ao_range;
s->insn_write = pci224_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
s->len_chanlist = s->n_chan;
dev->write_subdev = s;
s->do_cmd = pci224_ao_cmd;
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 01796cd28e5b..49806a5e514c 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -35,7 +35,7 @@
* automatically.
*
* The PCI230+ and PCI260+ have the same PCI device IDs as the PCI230 and
- * PCI260, but can be distinguished by the the size of the PCI regions. A
+ * PCI260, but can be distinguished by the size of the PCI regions. A
* card will be configured as a "+" model if detected as such.
*
* Subdevices:
@@ -490,9 +490,6 @@ struct pci230_private {
spinlock_t ai_stop_spinlock; /* Spin lock for stopping AI command */
spinlock_t ao_stop_spinlock; /* Spin lock for stopping AO command */
unsigned long daqio; /* PCI230's DAQ I/O space */
- unsigned int ai_scan_count; /* Number of AI scans remaining */
- unsigned int ai_scan_pos; /* Current position within AI scan */
- unsigned int ao_scan_count; /* Number of AO scans remaining. */
int intr_cpuid; /* ID of CPU running ISR */
unsigned short hwver; /* Hardware version (for '+' models) */
unsigned short adccon; /* ADCCON register value */
@@ -1074,37 +1071,27 @@ static void pci230_ao_stop(struct comedi_device *dev,
static void pci230_handle_ao_nofifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pci230_private *devpriv = dev->private;
- unsigned short data;
- int i, ret;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
+ unsigned short data;
+ int i;
- if (cmd->stop_src == TRIG_COUNT && devpriv->ao_scan_count == 0)
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
return;
+
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- /* Read sample from Comedi's circular buffer. */
- ret = comedi_buf_get(s, &data);
- if (ret == 0) {
- s->async->events |= COMEDI_CB_OVERFLOW;
- pci230_ao_stop(dev, s);
- dev_err(dev->class_dev, "AO buffer underrun\n");
+ if (!comedi_buf_read_samples(s, &data, 1)) {
+ async->events |= COMEDI_CB_OVERFLOW;
return;
}
pci230_ao_write_nofifo(dev, data, chan);
s->readback[chan] = data;
}
- async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ao_scan_count--;
- if (devpriv->ao_scan_count == 0) {
- /* End of acquisition. */
- async->events |= COMEDI_CB_EOA;
- pci230_ao_stop(dev, s);
- }
- }
+
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
}
/*
@@ -1117,26 +1104,18 @@ static bool pci230_handle_ao_fifo(struct comedi_device *dev,
struct pci230_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int num_scans;
+ unsigned int num_scans = comedi_nscans_left(s, 0);
unsigned int room;
unsigned short dacstat;
unsigned int i, n;
unsigned int events = 0;
- bool running;
/* Get DAC FIFO status. */
dacstat = inw(devpriv->daqio + PCI230_DACCON);
- /* Determine number of scans available in buffer. */
- num_scans = comedi_buf_read_n_available(s) / cfc_bytes_per_scan(s);
- if (cmd->stop_src == TRIG_COUNT) {
- /* Fixed number of scans. */
- if (num_scans > devpriv->ao_scan_count)
- num_scans = devpriv->ao_scan_count;
- if (devpriv->ao_scan_count == 0) {
- /* End of acquisition. */
- events |= COMEDI_CB_EOA;
- }
- }
+
+ if (cmd->stop_src == TRIG_COUNT && num_scans == 0)
+ events |= COMEDI_CB_EOA;
+
if (events == 0) {
/* Check for FIFO underrun. */
if (dacstat & PCI230P2_DAC_FIFO_UNDERRUN_LATCHED) {
@@ -1175,27 +1154,22 @@ static bool pci230_handle_ao_fifo(struct comedi_device *dev,
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned short datum;
- comedi_buf_get(s, &datum);
+ comedi_buf_read_samples(s, &datum, 1);
pci230_ao_write_fifo(dev, datum, chan);
s->readback[chan] = datum;
}
}
- events |= COMEDI_CB_EOS | COMEDI_CB_BLOCK;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ao_scan_count -= num_scans;
- if (devpriv->ao_scan_count == 0) {
- /*
- * All data for the command has been written
- * to FIFO. Set FIFO interrupt trigger level
- * to 'empty'.
- */
- devpriv->daccon =
- (devpriv->daccon &
- ~PCI230P2_DAC_INT_FIFO_MASK) |
- PCI230P2_DAC_INT_FIFO_EMPTY;
- outw(devpriv->daccon,
- devpriv->daqio + PCI230_DACCON);
- }
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ /*
+ * All data for the command has been written
+ * to FIFO. Set FIFO interrupt trigger level
+ * to 'empty'.
+ */
+ devpriv->daccon &= ~PCI230P2_DAC_INT_FIFO_MASK;
+ devpriv->daccon |= PCI230P2_DAC_INT_FIFO_EMPTY;
+ outw(devpriv->daccon, devpriv->daqio + PCI230_DACCON);
}
/* Check if FIFO underrun occurred while writing to FIFO. */
dacstat = inw(devpriv->daqio + PCI230_DACCON);
@@ -1204,15 +1178,8 @@ static bool pci230_handle_ao_fifo(struct comedi_device *dev,
events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
}
}
- if (events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW)) {
- /* Stopping AO due to completion or error. */
- pci230_ao_stop(dev, s);
- running = false;
- } else {
- running = true;
- }
async->events |= events;
- return running;
+ return !(async->events & COMEDI_CB_CANCEL_MASK);
}
static int pci230_ao_inttrig_scan_begin(struct comedi_device *dev,
@@ -1235,7 +1202,7 @@ static int pci230_ao_inttrig_scan_begin(struct comedi_device *dev,
/* Not using DAC FIFO. */
spin_unlock_irqrestore(&devpriv->ao_stop_spinlock, irqflags);
pci230_handle_ao_nofifo(dev, s);
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
} else {
/* Using DAC FIFO. */
/* Read DACSWTRIG register to trigger conversion. */
@@ -1265,7 +1232,7 @@ static void pci230_ao_start(struct comedi_device *dev,
/* Preload FIFO data. */
run = pci230_handle_ao_fifo(dev, s);
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
if (!run) {
/* Stopped. */
return;
@@ -1354,8 +1321,6 @@ static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return -EBUSY;
}
- devpriv->ao_scan_count = cmd->stop_arg;
-
/*
* Set range - see analogue output range table; 0 => unipolar 10V,
* 1 => bipolar +/-10V range scale
@@ -1754,19 +1719,15 @@ static void pci230_ai_update_fifo_trigger_level(struct comedi_device *dev,
{
struct pci230_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int scanlen = cmd->scan_end_arg;
unsigned int wake;
unsigned short triglev;
unsigned short adccon;
if (cmd->flags & CMDF_WAKE_EOS)
- wake = scanlen - devpriv->ai_scan_pos;
- else if (cmd->stop_src != TRIG_COUNT ||
- devpriv->ai_scan_count >= PCI230_ADC_FIFOLEVEL_HALFFULL ||
- scanlen >= PCI230_ADC_FIFOLEVEL_HALFFULL)
- wake = PCI230_ADC_FIFOLEVEL_HALFFULL;
+ wake = cmd->scan_end_arg - s->async->cur_chan;
else
- wake = devpriv->ai_scan_count * scanlen - devpriv->ai_scan_pos;
+ wake = comedi_nsamples_left(s, PCI230_ADC_FIFOLEVEL_HALFFULL);
+
if (wake >= PCI230_ADC_FIFOLEVEL_HALFFULL) {
triglev = PCI230_ADC_INT_FIFO_HALF;
} else if (wake > 1 && devpriv->hwver > 0) {
@@ -2059,28 +2020,17 @@ static void pci230_handle_ai(struct comedi_device *dev,
struct pci230_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int scanlen = cmd->scan_end_arg;
- unsigned int events = 0;
unsigned int status_fifo;
unsigned int i;
unsigned int todo;
unsigned int fifoamount;
+ unsigned short val;
/* Determine number of samples to read. */
- if (cmd->stop_src != TRIG_COUNT) {
- todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
- } else if (devpriv->ai_scan_count == 0) {
- todo = 0;
- } else if (devpriv->ai_scan_count > PCI230_ADC_FIFOLEVEL_HALFFULL ||
- scanlen > PCI230_ADC_FIFOLEVEL_HALFFULL) {
- todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
- } else {
- todo = devpriv->ai_scan_count * scanlen - devpriv->ai_scan_pos;
- if (todo > PCI230_ADC_FIFOLEVEL_HALFFULL)
- todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
- }
+ todo = comedi_nsamples_left(s, PCI230_ADC_FIFOLEVEL_HALFFULL);
if (todo == 0)
return;
+
fifoamount = 0;
for (i = 0; i < todo; i++) {
if (fifoamount == 0) {
@@ -2092,7 +2042,7 @@ static void pci230_handle_ai(struct comedi_device *dev,
* unnoticed by the caller.
*/
dev_err(dev->class_dev, "AI FIFO overrun\n");
- events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
+ async->events |= COMEDI_CB_ERROR;
break;
} else if (status_fifo & PCI230_ADC_FIFO_EMPTY) {
/* FIFO empty. */
@@ -2111,37 +2061,23 @@ static void pci230_handle_ai(struct comedi_device *dev,
fifoamount = 1;
}
}
- /* Read sample and store in Comedi's circular buffer. */
- if (comedi_buf_put(s, pci230_ai_read(dev)) == 0) {
- events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
- dev_err(dev->class_dev, "AI buffer overflow\n");
+
+ val = pci230_ai_read(dev);
+ if (!comedi_buf_write_samples(s, &val, 1))
break;
- }
+
fifoamount--;
- devpriv->ai_scan_pos++;
- if (devpriv->ai_scan_pos == scanlen) {
- /* End of scan. */
- devpriv->ai_scan_pos = 0;
- devpriv->ai_scan_count--;
- async->events |= COMEDI_CB_EOS;
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ break;
}
}
- if (cmd->stop_src == TRIG_COUNT && devpriv->ai_scan_count == 0) {
- /* End of acquisition. */
- events |= COMEDI_CB_EOA;
- } else {
- /* More samples required, tell Comedi to block. */
- events |= COMEDI_CB_BLOCK;
- }
- async->events |= events;
- if (async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
- COMEDI_CB_OVERFLOW)) {
- /* disable hardware conversions */
- pci230_ai_stop(dev, s);
- } else {
- /* update FIFO interrupt trigger level */
+
+ /* update FIFO interrupt trigger level if still running */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK))
pci230_ai_update_fifo_trigger_level(dev, s);
- }
}
static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -2177,9 +2113,6 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (!pci230_claim_shared(dev, res_mask, OWNER_AICMD))
return -EBUSY;
- devpriv->ai_scan_count = cmd->stop_arg;
- devpriv->ai_scan_pos = 0; /* Position within scan. */
-
/*
* Steps:
* - Set channel scan list.
@@ -2355,7 +2288,8 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
unsigned char status_int, valid_status_int, temp_ier;
struct comedi_device *dev = (struct comedi_device *)d;
struct pci230_private *devpriv = dev->private;
- struct comedi_subdevice *s;
+ struct comedi_subdevice *s_ao = dev->write_subdev;
+ struct comedi_subdevice *s_ai = dev->read_subdev;
unsigned long irqflags;
/* Read interrupt status/enable register. */
@@ -2385,23 +2319,14 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
* two.
*/
- if (valid_status_int & PCI230_INT_ZCLK_CT1) {
- s = dev->write_subdev;
- pci230_handle_ao_nofifo(dev, s);
- comedi_event(dev, s);
- }
+ if (valid_status_int & PCI230_INT_ZCLK_CT1)
+ pci230_handle_ao_nofifo(dev, s_ao);
- if (valid_status_int & PCI230P2_INT_DAC) {
- s = dev->write_subdev;
- pci230_handle_ao_fifo(dev, s);
- comedi_event(dev, s);
- }
+ if (valid_status_int & PCI230P2_INT_DAC)
+ pci230_handle_ao_fifo(dev, s_ao);
- if (valid_status_int & PCI230_INT_ADC) {
- s = dev->read_subdev;
- pci230_handle_ai(dev, s);
- comedi_event(dev, s);
- }
+ if (valid_status_int & PCI230_INT_ADC)
+ pci230_handle_ai(dev, s_ai);
/* Reenable interrupts. */
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
@@ -2410,6 +2335,9 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
devpriv->intr_running = false;
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
+ comedi_handle_events(dev, s_ao);
+ comedi_handle_events(dev, s_ai);
+
return IRQ_HANDLED;
}
@@ -2583,7 +2511,6 @@ static int pci230_auto_attach(struct comedi_device *dev,
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = &pci230_ao_range;
s->insn_write = pci230_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
s->len_chanlist = 2;
if (dev->irq) {
dev->write_subdev = s;
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index 2259bee98d48..0d2224b832ac 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -71,7 +71,7 @@ static int pci263_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
/* digital output subdevice */
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index e03dd6e71415..e7cb7032a910 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -265,7 +265,7 @@ static int c6xdigio_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
/* pwm output subdevice */
s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 500;
s->range_table = &range_unknown;
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index f88880aea6da..0a48d2a961d5 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -305,7 +305,6 @@ static int das16cs_auto_attach(struct comedi_device *dev,
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = &das16cs_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 1ec363b7505c..669b1703eb99 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -346,8 +346,6 @@ struct cb_pcidas_private {
/* divisors of master clock for analog input pacing */
unsigned int divisor1;
unsigned int divisor2;
- /* number of analog input samples remaining */
- unsigned int count;
/* bits to write to registers */
unsigned int adc_fifo_bits;
unsigned int s5933_intcsr_bits;
@@ -358,11 +356,6 @@ struct cb_pcidas_private {
/* divisors of master clock for analog output pacing */
unsigned int ao_divisor1;
unsigned int ao_divisor2;
- /* number of analog output samples remaining */
- unsigned int ao_count;
- unsigned int caldac_value[NUM_CHANNELS_8800];
- unsigned int trimpot_value[NUM_CHANNELS_8402];
- unsigned int dac08_value;
unsigned int calibration_source;
};
@@ -596,25 +589,14 @@ static void write_calibration_bitstream(struct comedi_device *dev,
}
}
-static int caldac_8800_write(struct comedi_device *dev, unsigned int address,
- uint8_t value)
+static void caldac_8800_write(struct comedi_device *dev,
+ unsigned int chan, uint8_t val)
{
struct cb_pcidas_private *devpriv = dev->private;
- static const int num_caldac_channels = 8;
static const int bitstream_length = 11;
- unsigned int bitstream = ((address & 0x7) << 8) | value;
+ unsigned int bitstream = ((chan & 0x7) << 8) | val;
static const int caldac_8800_udelay = 1;
- if (address >= num_caldac_channels) {
- dev_err(dev->class_dev, "illegal caldac channel\n");
- return -1;
- }
-
- if (value == devpriv->caldac_value[address])
- return 1;
-
- devpriv->caldac_value[address] = value;
-
write_calibration_bitstream(dev, cal_enable_bits(dev), bitstream,
bitstream_length);
@@ -623,75 +605,62 @@ static int caldac_8800_write(struct comedi_device *dev, unsigned int address,
devpriv->control_status + CALIBRATION_REG);
udelay(caldac_8800_udelay);
outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG);
-
- return 1;
}
-static int caldac_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas_caldac_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
- return caldac_8800_write(dev, channel, data[0]);
-}
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
-static int caldac_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
-
- data[0] = devpriv->caldac_value[CR_CHAN(insn->chanspec)];
+ if (s->readback[chan] != val) {
+ caldac_8800_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
/* 1602/16 pregain offset */
static void dac08_write(struct comedi_device *dev, unsigned int value)
{
struct cb_pcidas_private *devpriv = dev->private;
- unsigned long cal_reg;
-
- if (devpriv->dac08_value != value) {
- devpriv->dac08_value = value;
-
- cal_reg = devpriv->control_status + CALIBRATION_REG;
- value &= 0xff;
- value |= cal_enable_bits(dev);
+ value &= 0xff;
+ value |= cal_enable_bits(dev);
- /* latch the new value into the caldac */
- outw(value, cal_reg);
- udelay(1);
- outw(value | SELECT_DAC08_BIT, cal_reg);
- udelay(1);
- outw(value, cal_reg);
- udelay(1);
- }
+ /* latch the new value into the caldac */
+ outw(value, devpriv->control_status + CALIBRATION_REG);
+ udelay(1);
+ outw(value | SELECT_DAC08_BIT,
+ devpriv->control_status + CALIBRATION_REG);
+ udelay(1);
+ outw(value, devpriv->control_status + CALIBRATION_REG);
+ udelay(1);
}
-static int dac08_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas_dac08_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int i;
-
- for (i = 0; i < insn->n; i++)
- dac08_write(dev, data[i]);
-
- return insn->n;
-}
+ unsigned int chan = CR_CHAN(insn->chanspec);
-static int dac08_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
- data[0] = devpriv->dac08_value;
+ if (s->readback[chan] != val) {
+ dac08_write(dev, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
static int trimpot_7376_write(struct comedi_device *dev, uint8_t value)
@@ -740,50 +709,41 @@ static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel,
return 0;
}
-static int cb_pcidas_trimpot_write(struct comedi_device *dev,
- unsigned int channel, unsigned int value)
+static void cb_pcidas_trimpot_write(struct comedi_device *dev,
+ unsigned int chan, unsigned int val)
{
const struct cb_pcidas_board *thisboard = dev->board_ptr;
- struct cb_pcidas_private *devpriv = dev->private;
-
- if (devpriv->trimpot_value[channel] == value)
- return 1;
- devpriv->trimpot_value[channel] = value;
switch (thisboard->trimpot) {
case AD7376:
- trimpot_7376_write(dev, value);
+ trimpot_7376_write(dev, val);
break;
case AD8402:
- trimpot_8402_write(dev, channel, value);
+ trimpot_8402_write(dev, chan, val);
break;
default:
dev_err(dev->class_dev, "driver bug?\n");
- return -1;
+ break;
}
-
- return 1;
}
-static int trimpot_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas_trimpot_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
- return cb_pcidas_trimpot_write(dev, channel, data[0]);
-}
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
-static int trimpot_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct cb_pcidas_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
-
- data[0] = devpriv->trimpot_value[channel];
+ if (s->readback[chan] != val) {
+ cb_pcidas_trimpot_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
static int cb_pcidas_ai_check_chanlist(struct comedi_device *dev,
@@ -976,9 +936,6 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER || cmd->convert_src == TRIG_TIMER)
cb_pcidas_ai_load_counters(dev);
- /* set number of conversions */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count = cmd->chanlist_len * cmd->stop_arg;
/* enable interrupts */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->adc_fifo_bits |= INTE;
@@ -1134,32 +1091,34 @@ static int cb_pcidas_cancel(struct comedi_device *dev,
return 0;
}
+static void cb_pcidas_ao_load_fifo(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ struct cb_pcidas_private *devpriv = dev->private;
+ unsigned int nbytes;
+
+ nsamples = comedi_nsamples_left(s, nsamples);
+ nbytes = comedi_buf_read_samples(s, devpriv->ao_buffer, nsamples);
+
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, nsamples);
+}
+
static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
const struct cb_pcidas_board *thisboard = dev->board_ptr;
struct cb_pcidas_private *devpriv = dev->private;
- unsigned int num_bytes, num_points = thisboard->fifo_size;
struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &s->async->cmd;
+ struct comedi_cmd *cmd = &async->cmd;
unsigned long flags;
if (trig_num != cmd->start_arg)
return -EINVAL;
- /* load up fifo */
- if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count < num_points)
- num_points = devpriv->ao_count;
-
- num_bytes = cfc_read_array_from_buffer(s, devpriv->ao_buffer,
- num_points * sizeof(short));
- num_points = num_bytes / sizeof(short);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ao_count -= num_points;
- /* write data to board's fifo */
- outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, num_bytes);
+ cb_pcidas_ao_load_fifo(dev, s, thisboard->fifo_size);
/* enable dac half-full and empty interrupts */
spin_lock_irqsave(&dev->spinlock, flags);
@@ -1224,9 +1183,6 @@ static int cb_pcidas_ao_cmd(struct comedi_device *dev,
if (cmd->scan_begin_src == TRIG_TIMER)
cb_pcidas_ao_load_counters(dev);
- /* set number of conversions */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ao_count = cmd->chanlist_len * cmd->stop_arg;
/* set pacer source */
spin_lock_irqsave(&dev->spinlock, flags);
switch (cmd->scan_begin_src) {
@@ -1275,8 +1231,6 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
struct comedi_subdevice *s = dev->write_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int half_fifo = thisboard->fifo_size / 2;
- unsigned int num_points;
unsigned long flags;
if (status & DAEMI) {
@@ -1286,32 +1240,17 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
devpriv->control_status + INT_ADCFIFO);
spin_unlock_irqrestore(&dev->spinlock, flags);
if (inw(devpriv->ao_registers + DAC_CSR) & DAC_EMPTY) {
- if (cmd->stop_src == TRIG_NONE ||
- (cmd->stop_src == TRIG_COUNT
- && devpriv->ao_count)) {
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ } else {
dev_err(dev->class_dev, "dac fifo underflow\n");
async->events |= COMEDI_CB_ERROR;
}
- async->events |= COMEDI_CB_EOA;
}
} else if (status & DAHFI) {
- unsigned int num_bytes;
+ cb_pcidas_ao_load_fifo(dev, s, thisboard->fifo_size / 2);
- /* figure out how many points we are writing to fifo */
- num_points = half_fifo;
- if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ao_count < num_points)
- num_points = devpriv->ao_count;
- num_bytes =
- cfc_read_array_from_buffer(s, devpriv->ao_buffer,
- num_points * sizeof(short));
- num_points = num_bytes / sizeof(short);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ao_count -= num_points;
- /* write data to board's fifo */
- outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer,
- num_points);
/* clear half-full interrupt latch */
spin_lock_irqsave(&dev->spinlock, flags);
outw(devpriv->adc_fifo_bits | DAHFI,
@@ -1319,7 +1258,7 @@ static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status)
spin_unlock_irqrestore(&dev->spinlock, flags);
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
@@ -1362,18 +1301,15 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
/* if fifo half-full */
if (status & ADHFI) {
/* read data */
- num_samples = half_fifo;
- if (cmd->stop_src == TRIG_COUNT &&
- num_samples > devpriv->count) {
- num_samples = devpriv->count;
- }
+ num_samples = comedi_nsamples_left(s, half_fifo);
insw(devpriv->adc_fifo + ADCDATA, devpriv->ai_buffer,
num_samples);
- cfc_write_array_to_buffer(s, devpriv->ai_buffer,
- num_samples * sizeof(short));
- devpriv->count -= num_samples;
- if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0)
+ comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
+
/* clear half-full interrupt latch */
spin_lock_irqsave(&dev->spinlock, flags);
outw(devpriv->adc_fifo_bits | INT,
@@ -1382,14 +1318,17 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
/* else if fifo not empty */
} else if (status & (ADNEI | EOBI)) {
for (i = 0; i < timeout; i++) {
+ unsigned short val;
+
/* break if fifo is empty */
if ((ADNE & inw(devpriv->control_status +
INT_ADCFIFO)) == 0)
break;
- cfc_write_to_buffer(s, inw(devpriv->adc_fifo));
+ val = inw(devpriv->adc_fifo);
+ comedi_buf_write_samples(s, &val, 1);
+
if (cmd->stop_src == TRIG_COUNT &&
- --devpriv->count == 0) {
- /* end of acquisition */
+ async->scans_done >= cmd->stop_arg) {
async->events |= COMEDI_CB_EOA;
break;
}
@@ -1419,7 +1358,7 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1503,7 +1442,6 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
s->range_table = &cb_pcidas_ao_ranges;
/* default to no fifo (*insn_write) */
s->insn_write = cb_pcidas_ao_nofifo_winsn;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -1542,10 +1480,16 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = NUM_CHANNELS_8800;
s->maxdata = 0xff;
- s->insn_read = caldac_read_insn;
- s->insn_write = caldac_write_insn;
- for (i = 0; i < s->n_chan; i++)
+ s->insn_write = cb_pcidas_caldac_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
caldac_8800_write(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
/* trim potentiometer */
s = &dev->subdevices[5];
@@ -1558,10 +1502,16 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
s->n_chan = NUM_CHANNELS_8402;
s->maxdata = 0xff;
}
- s->insn_read = trimpot_read_insn;
- s->insn_write = trimpot_write_insn;
- for (i = 0; i < s->n_chan; i++)
+ s->insn_write = cb_pcidas_trimpot_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
cb_pcidas_trimpot_write(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
/* dac08 caldac */
s = &dev->subdevices[6];
@@ -1569,10 +1519,17 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = NUM_CHANNELS_DAC08;
- s->insn_read = dac08_read_insn;
- s->insn_write = dac08_write_insn;
s->maxdata = 0xff;
- dac08_write(dev, s->maxdata / 2);
+ s->insn_write = cb_pcidas_dac08_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
+ dac08_write(dev, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
} else
s->type = COMEDI_SUBD_UNUSED;
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 3b6bffc66918..eddb7ace43df 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1065,8 +1065,6 @@ struct pcidas64_private {
/* local address (used by dma controller) */
uint32_t local0_iobase;
uint32_t local1_iobase;
- /* number of analog input samples remaining */
- unsigned int ai_count;
/* dma buffers for analog input */
uint16_t *ai_buffer[MAX_AI_DMA_RING_COUNT];
/* physical addresses of ai dma buffers */
@@ -1087,8 +1085,6 @@ struct pcidas64_private {
dma_addr_t ao_dma_desc_bus_addr;
/* keeps track of buffer where the next ao sample should go */
unsigned int ao_dma_index;
- /* number of analog output samples remaining */
- unsigned long ao_count;
unsigned int hw_revision; /* stc chip hardware revision number */
/* last bits sent to INTR_ENABLE_REG register */
unsigned int intr_enable_bits;
@@ -1109,9 +1105,6 @@ struct pcidas64_private {
uint8_t i2c_cal_range_bits;
/* configure digital triggers to trigger on falling edge */
unsigned int ext_trig_falling;
- /* states of various devices stored to enable read-back */
- unsigned int ad8402_state[2];
- unsigned int caldac_state[8];
short ai_cmd_running;
unsigned int ai_fifo_segment_length;
struct ext_clock_info ext_clock;
@@ -2199,10 +2192,6 @@ static void setup_sample_counters(struct comedi_device *dev,
{
struct pcidas64_private *devpriv = dev->private;
- if (cmd->stop_src == TRIG_COUNT) {
- /* set software count */
- devpriv->ai_count = cmd->stop_arg * cmd->chanlist_len;
- }
/* load hardware conversion counter */
if (use_hw_sample_counter(cmd)) {
writew(cmd->stop_arg & 0xffff,
@@ -2642,8 +2631,6 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
{
struct pcidas64_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
unsigned int i;
uint16_t prepost_bits;
int read_segment, read_index, write_segment, write_index;
@@ -2672,26 +2659,21 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
devpriv->ai_fifo_segment_length - read_index;
else
num_samples = write_index - read_index;
-
- if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->ai_count == 0)
- break;
- if (num_samples > devpriv->ai_count)
- num_samples = devpriv->ai_count;
-
- devpriv->ai_count -= num_samples;
- }
-
if (num_samples < 0) {
dev_err(dev->class_dev,
"cb_pcidas64: bug! num_samples < 0\n");
break;
}
+ num_samples = comedi_nsamples_left(s, num_samples);
+ if (num_samples == 0)
+ break;
+
for (i = 0; i < num_samples; i++) {
- cfc_write_to_buffer(s,
- readw(devpriv->main_iobase +
- ADC_FIFO_REG));
+ unsigned short val;
+
+ val = readw(devpriv->main_iobase + ADC_FIFO_REG);
+ comedi_buf_write_samples(s, &val, 1);
}
} while (read_segment != write_segment);
@@ -2706,33 +2688,30 @@ static void pio_drain_ai_fifo_32(struct comedi_device *dev)
{
struct pcidas64_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
+ unsigned int nsamples;
unsigned int i;
- unsigned int max_transfer = 100000;
uint32_t fifo_data;
int write_code =
readw(devpriv->main_iobase + ADC_WRITE_PNTR_REG) & 0x7fff;
int read_code =
readw(devpriv->main_iobase + ADC_READ_PNTR_REG) & 0x7fff;
- if (cmd->stop_src == TRIG_COUNT) {
- if (max_transfer > devpriv->ai_count)
- max_transfer = devpriv->ai_count;
+ nsamples = comedi_nsamples_left(s, 100000);
+ for (i = 0; read_code != write_code && i < nsamples;) {
+ unsigned short val;
- }
- for (i = 0; read_code != write_code && i < max_transfer;) {
fifo_data = readl(dev->mmio + ADC_FIFO_REG);
- cfc_write_to_buffer(s, fifo_data & 0xffff);
+ val = fifo_data & 0xffff;
+ comedi_buf_write_samples(s, &val, 1);
i++;
- if (i < max_transfer) {
- cfc_write_to_buffer(s, (fifo_data >> 16) & 0xffff);
+ if (i < nsamples) {
+ val = (fifo_data >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &val, 1);
i++;
}
read_code = readw(devpriv->main_iobase + ADC_READ_PNTR_REG) &
0x7fff;
}
- devpriv->ai_count -= i;
}
/* empty fifo */
@@ -2750,8 +2729,7 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
{
const struct pcidas64_board *thisboard = dev->board_ptr;
struct pcidas64_private *devpriv = dev->private;
- struct comedi_async *async = dev->read_subdev->async;
- struct comedi_cmd *cmd = &async->cmd;
+ struct comedi_subdevice *s = dev->read_subdev;
uint32_t next_transfer_addr;
int j;
int num_samples = 0;
@@ -2772,16 +2750,10 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
devpriv->ai_buffer_bus_addr[devpriv->ai_dma_index] +
DMA_BUFFER_SIZE) && j < ai_dma_ring_count(thisboard); j++) {
/* transfer data from dma buffer to comedi buffer */
- num_samples = dma_transfer_size(dev);
- if (cmd->stop_src == TRIG_COUNT) {
- if (num_samples > devpriv->ai_count)
- num_samples = devpriv->ai_count;
- devpriv->ai_count -= num_samples;
- }
- cfc_write_array_to_buffer(dev->read_subdev,
- devpriv->ai_buffer[devpriv->
- ai_dma_index],
- num_samples * sizeof(uint16_t));
+ num_samples = comedi_nsamples_left(s, dma_transfer_size(dev));
+ comedi_buf_write_samples(s,
+ devpriv->ai_buffer[devpriv->ai_dma_index],
+ num_samples);
devpriv->ai_dma_index = (devpriv->ai_dma_index + 1) %
ai_dma_ring_count(thisboard);
}
@@ -2831,12 +2803,12 @@ static void handle_ai_interrupt(struct comedi_device *dev,
spin_unlock_irqrestore(&dev->spinlock, flags);
}
/* if we are have all the data, then quit */
- if ((cmd->stop_src == TRIG_COUNT && (int)devpriv->ai_count <= 0) ||
- (cmd->stop_src == TRIG_EXT && (status & ADC_STOP_BIT))) {
+ if ((cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) ||
+ (cmd->stop_src == TRIG_EXT && (status & ADC_STOP_BIT)))
async->events |= COMEDI_CB_EOA;
- }
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static inline unsigned int prev_ao_dma_index(struct comedi_device *dev)
@@ -2871,22 +2843,6 @@ static int last_ao_dma_load_completed(struct comedi_device *dev)
return 1;
}
-static int ao_stopped_by_error(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct pcidas64_private *devpriv = dev->private;
-
- if (cmd->stop_src == TRIG_NONE)
- return 1;
- if (cmd->stop_src == TRIG_COUNT) {
- if (devpriv->ao_count)
- return 1;
- if (last_ao_dma_load_completed(dev) == 0)
- return 1;
- }
- return 0;
-}
-
static inline int ao_dma_needs_restart(struct comedi_device *dev,
unsigned short dma_status)
{
@@ -2912,32 +2868,39 @@ static void restart_ao_dma(struct comedi_device *dev)
dma_start_sync(dev, 0);
}
+static unsigned int cb_pcidas64_ao_fill_buffer(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned short *dest,
+ unsigned int max_bytes)
+{
+ unsigned int nsamples = comedi_bytes_to_samples(s, max_bytes);
+ unsigned int actual_bytes;
+
+ nsamples = comedi_nsamples_left(s, nsamples);
+ actual_bytes = comedi_buf_read_samples(s, dest, nsamples);
+
+ return comedi_bytes_to_samples(s, actual_bytes);
+}
+
static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
struct pcidas64_private *devpriv = dev->private;
- unsigned int num_bytes, buffer_index, prev_buffer_index;
+ struct comedi_subdevice *s = dev->write_subdev;
+ unsigned int buffer_index = devpriv->ao_dma_index;
+ unsigned int prev_buffer_index = prev_ao_dma_index(dev);
+ unsigned int nsamples;
+ unsigned int nbytes;
unsigned int next_bits;
- buffer_index = devpriv->ao_dma_index;
- prev_buffer_index = prev_ao_dma_index(dev);
-
- num_bytes = comedi_buf_read_n_available(dev->write_subdev);
- if (num_bytes > DMA_BUFFER_SIZE)
- num_bytes = DMA_BUFFER_SIZE;
- if (cmd->stop_src == TRIG_COUNT && num_bytes > devpriv->ao_count)
- num_bytes = devpriv->ao_count;
- num_bytes -= num_bytes % bytes_in_sample;
-
- if (num_bytes == 0)
+ nsamples = cb_pcidas64_ao_fill_buffer(dev, s,
+ devpriv->ao_buffer[buffer_index],
+ DMA_BUFFER_SIZE);
+ if (nsamples == 0)
return 0;
- num_bytes = cfc_read_array_from_buffer(dev->write_subdev,
- devpriv->
- ao_buffer[buffer_index],
- num_bytes);
- devpriv->ao_dma_desc[buffer_index].transfer_size =
- cpu_to_le32(num_bytes);
+ nbytes = comedi_samples_to_bytes(s, nsamples);
+ devpriv->ao_dma_desc[buffer_index].transfer_size = cpu_to_le32(nbytes);
/* set end of chain bit so we catch underruns */
next_bits = le32_to_cpu(devpriv->ao_dma_desc[buffer_index].next);
next_bits |= PLX_END_OF_CHAIN_BIT;
@@ -2949,9 +2912,8 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
devpriv->ao_dma_desc[prev_buffer_index].next = cpu_to_le32(next_bits);
devpriv->ao_dma_index = (buffer_index + 1) % AO_DMA_RING_COUNT;
- devpriv->ao_count -= num_bytes;
- return num_bytes;
+ return nbytes;
}
static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
@@ -3016,11 +2978,14 @@ static void handle_ao_interrupt(struct comedi_device *dev,
}
if ((status & DAC_DONE_BIT)) {
- async->events |= COMEDI_CB_EOA;
- if (ao_stopped_by_error(dev, cmd))
+ if ((cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) ||
+ last_ao_dma_load_completed(dev))
+ async->events |= COMEDI_CB_EOA;
+ else
async->events |= COMEDI_CB_ERROR;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static irqreturn_t handle_interrupt(int irq, void *d)
@@ -3191,7 +3156,9 @@ static void set_dac_interval_regs(struct comedi_device *dev,
static int prep_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
{
struct pcidas64_private *devpriv = dev->private;
- unsigned int num_bytes;
+ struct comedi_subdevice *s = dev->write_subdev;
+ unsigned int nsamples;
+ unsigned int nbytes;
int i;
/* clear queue pointer too, since external queue has
@@ -3199,22 +3166,23 @@ static int prep_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
writew(0, devpriv->main_iobase + ADC_QUEUE_CLEAR_REG);
writew(0, devpriv->main_iobase + DAC_BUFFER_CLEAR_REG);
- num_bytes = (DAC_FIFO_SIZE / 2) * bytes_in_sample;
- if (cmd->stop_src == TRIG_COUNT &&
- num_bytes / bytes_in_sample > devpriv->ao_count)
- num_bytes = devpriv->ao_count * bytes_in_sample;
- num_bytes = cfc_read_array_from_buffer(dev->write_subdev,
- devpriv->ao_bounce_buffer,
- num_bytes);
- for (i = 0; i < num_bytes / bytes_in_sample; i++) {
+ nsamples = cb_pcidas64_ao_fill_buffer(dev, s,
+ devpriv->ao_bounce_buffer,
+ DAC_FIFO_SIZE);
+ if (nsamples == 0)
+ return -1;
+
+ for (i = 0; i < nsamples; i++) {
writew(devpriv->ao_bounce_buffer[i],
devpriv->main_iobase + DAC_FIFO_REG);
}
- devpriv->ao_count -= num_bytes / bytes_in_sample;
- if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count == 0)
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
return 0;
- num_bytes = load_ao_dma_buffer(dev, cmd);
- if (num_bytes == 0)
+
+ nbytes = load_ao_dma_buffer(dev, cmd);
+ if (nbytes == 0)
return -1;
load_ao_dma(dev, cmd);
@@ -3275,7 +3243,6 @@ static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
writew(0x0, devpriv->main_iobase + DAC_CONTROL0_REG);
devpriv->ao_dma_index = 0;
- devpriv->ao_count = cmd->stop_arg * cmd->chanlist_len;
set_dac_select_reg(dev, cmd);
set_dac_interval_regs(dev, cmd);
@@ -3580,9 +3547,6 @@ static void caldac_write(struct comedi_device *dev, unsigned int channel,
unsigned int value)
{
const struct pcidas64_board *thisboard = dev->board_ptr;
- struct pcidas64_private *devpriv = dev->private;
-
- devpriv->caldac_state[channel] = value;
switch (thisboard->layout) {
case LAYOUT_60XX:
@@ -3597,33 +3561,27 @@ static void caldac_write(struct comedi_device *dev, unsigned int channel,
}
}
-static int calib_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas64_calib_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pcidas64_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
-
- /* return immediately if setting hasn't changed, since
- * programming these things is slow */
- if (devpriv->caldac_state[channel] == data[0])
- return 1;
-
- caldac_write(dev, channel, data[0]);
-
- return 1;
-}
+ unsigned int chan = CR_CHAN(insn->chanspec);
-static int calib_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
+ /*
+ * Programming the calib device is slow. Only write the
+ * last data value if the value has changed.
+ */
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
- data[0] = devpriv->caldac_state[channel];
+ if (s->readback[chan] != val) {
+ caldac_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
static void ad8402_write(struct comedi_device *dev, unsigned int channel,
@@ -3635,8 +3593,6 @@ static void ad8402_write(struct comedi_device *dev, unsigned int channel,
unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff);
static const int ad8402_udelay = 1;
- devpriv->ad8402_state[channel] = value;
-
register_bits = SELECT_8402_64XX_BIT;
udelay(ad8402_udelay);
writew(register_bits, devpriv->main_iobase + CALIBRATION_REG);
@@ -3658,35 +3614,27 @@ static void ad8402_write(struct comedi_device *dev, unsigned int channel,
}
/* for pci-das6402/16, channel 0 is analog input gain and channel 1 is offset */
-static int ad8402_write_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int cb_pcidas64_ad8402_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pcidas64_private *devpriv = dev->private;
- int channel = CR_CHAN(insn->chanspec);
-
- /* return immediately if setting hasn't changed, since
- * programming these things is slow */
- if (devpriv->ad8402_state[channel] == data[0])
- return 1;
-
- devpriv->ad8402_state[channel] = data[0];
-
- ad8402_write(dev, channel, data[0]);
-
- return 1;
-}
+ unsigned int chan = CR_CHAN(insn->chanspec);
-static int ad8402_read_insn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct pcidas64_private *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
+ /*
+ * Programming the calib device is slow. Only write the
+ * last data value if the value has changed.
+ */
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
- data[0] = devpriv->ad8402_state[channel];
+ if (s->readback[chan] != val) {
+ ad8402_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
- return 1;
+ return insn->n;
}
static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
@@ -3816,7 +3764,6 @@ static int setup_subdevices(struct comedi_device *dev)
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = thisboard->ao_range_table;
s->insn_write = ao_winsn;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -3849,7 +3796,7 @@ static int setup_subdevices(struct comedi_device *dev)
if (thisboard->layout == LAYOUT_64XX) {
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -3895,10 +3842,16 @@ static int setup_subdevices(struct comedi_device *dev)
s->maxdata = 0xfff;
else
s->maxdata = 0xff;
- s->insn_read = calib_read_insn;
- s->insn_write = calib_write_insn;
- for (i = 0; i < s->n_chan; i++)
+ s->insn_write = cb_pcidas64_calib_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
caldac_write(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
/* 2 channel ad8402 potentiometer */
s = &dev->subdevices[7];
@@ -3906,11 +3859,17 @@ static int setup_subdevices(struct comedi_device *dev)
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 2;
- s->insn_read = ad8402_read_insn;
- s->insn_write = ad8402_write_insn;
s->maxdata = 0xff;
- for (i = 0; i < s->n_chan; i++)
+ s->insn_write = cb_pcidas64_ad8402_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
ad8402_write(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
} else
s->type = COMEDI_SUBD_UNUSED;
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index fe4d2544f3dc..70dd2c9eecdb 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -1,40 +1,45 @@
/*
- comedi/drivers/cb_pcimdas.c
- Comedi driver for Computer Boards PCIM-DAS1602/16
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/cb_pcimdas.c
+ * Comedi driver for Computer Boards PCIM-DAS1602/16 and PCIe-DAS1602/16
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: cb_pcimdas
-Description: Measurement Computing PCI Migration series boards
-Devices: [ComputerBoards] PCIM-DAS1602/16 (cb_pcimdas)
-Author: Richard Bytheway
-Updated: Wed, 13 Nov 2002 12:34:56 +0000
-Status: experimental
-
-Written to support the PCIM-DAS1602/16 on a 2.4 series kernel.
-
-Configuration Options:
- [0] - PCI bus number
- [1] - PCI slot number
-
-Developed from cb_pcidas and skel by Richard Bytheway (mocelet@sucs.org).
-Only supports DIO, AO and simple AI in it's present form.
-No interrupts, multi channel or FIFO AI,
-although the card looks like it could support this.
-See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
-*/
+ * Driver: cb_pcimdas
+ * Description: Measurement Computing PCI Migration series boards
+ * Devices: [ComputerBoards] PCIM-DAS1602/16 (cb_pcimdas), PCIe-DAS1602/16
+ * Author: Richard Bytheway
+ * Updated: Mon, 13 Oct 2014 11:57:39 +0000
+ * Status: experimental
+ *
+ * Written to support the PCIM-DAS1602/16 and PCIe-DAS1602/16.
+ *
+ * Configuration Options:
+ * none
+ *
+ * Manual configuration of PCI(e) cards is not supported; they are configured
+ * automatically.
+ *
+ * Developed from cb_pcidas and skel by Richard Bytheway (mocelet@sucs.org).
+ * Only supports DIO, AO and simple AI in it's present form.
+ * No interrupts, multi channel or FIFO AI,
+ * although the card looks like it could support this.
+ *
+ * http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf
+ * http://www.mccdaq.com/PDFs/Manuals/pcie-das1602-16.pdf
+ */
#include <linux/module.h>
#include <linux/pci.h>
@@ -45,7 +50,7 @@ See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
#include "plx9052.h"
#include "8255.h"
-/* Registers for the PCIM-DAS1602/16 */
+/* Registers for the PCIM-DAS1602/16 and PCIe-DAS1602/16 */
/* DAC Offsets */
#define ADC_TRIG 0
@@ -221,7 +226,6 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
/* ranges are hardware settable, but not software readable. */
s->range_table = &range_unknown;
s->insn_write = cb_pcimdas_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -251,7 +255,8 @@ static int cb_pcimdas_pci_probe(struct pci_dev *dev,
}
static const struct pci_device_id cb_pcimdas_pci_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0056) }, /* PCIM-DAS1602/16 */
+ { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0115) }, /* PCIe-DAS1602/16 */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, cb_pcimdas_pci_table);
@@ -265,5 +270,5 @@ static struct pci_driver cb_pcimdas_pci_driver = {
module_comedi_pci_driver(cb_pcimdas_driver, cb_pcimdas_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for PCIM-DAS1602/16 and PCIe-DAS1602/16");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 8450c99af8b0..85b2f4ab1ba4 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -61,8 +61,7 @@ struct bonded_device {
};
struct comedi_bond_private {
-# define MAX_BOARD_NAME 256
- char name[MAX_BOARD_NAME];
+ char name[256];
struct bonded_device **devs;
unsigned ndevs;
unsigned nchans;
@@ -262,12 +261,10 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
{
/* Append dev:subdev to devpriv->name */
char buf[20];
- int left =
- MAX_BOARD_NAME - strlen(devpriv->name) - 1;
snprintf(buf, sizeof(buf), "%u:%u ",
bdev->minor, bdev->subdev);
- buf[sizeof(buf) - 1] = 0;
- strncat(devpriv->name, buf, left);
+ strlcat(devpriv->name, buf,
+ sizeof(devpriv->name));
}
}
diff --git a/drivers/staging/comedi/drivers/comedi_fc.h b/drivers/staging/comedi/drivers/comedi_fc.h
index ce2835972507..756be931c1a4 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.h
+++ b/drivers/staging/comedi/drivers/comedi_fc.h
@@ -23,49 +23,6 @@
#include "../comedidev.h"
-static inline unsigned int cfc_bytes_per_scan(struct comedi_subdevice *s)
-{
- return comedi_bytes_per_scan(s);
-}
-
-static inline void cfc_inc_scan_progress(struct comedi_subdevice *s,
- unsigned int num_bytes)
-{
- comedi_inc_scan_progress(s, num_bytes);
-}
-
-static inline unsigned int cfc_write_array_to_buffer(struct comedi_subdevice *s,
- const void *data,
- unsigned int num_bytes)
-{
- return comedi_write_array_to_buffer(s, data, num_bytes);
-}
-
-static inline unsigned int cfc_write_to_buffer(struct comedi_subdevice *s,
- unsigned short data)
-{
- return comedi_write_array_to_buffer(s, &data, sizeof(data));
-};
-
-static inline unsigned int cfc_write_long_to_buffer(struct comedi_subdevice *s,
- unsigned int data)
-{
- return comedi_write_array_to_buffer(s, &data, sizeof(data));
-};
-
-static inline unsigned int
-cfc_read_array_from_buffer(struct comedi_subdevice *s, void *data,
- unsigned int num_bytes)
-{
- return comedi_read_array_from_buffer(s, data, num_bytes);
-}
-
-static inline unsigned int cfc_handle_events(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- return comedi_handle_events(dev, s);
-}
-
/**
* cfc_check_trigger_src() - trivially validate a comedi_cmd trigger source
* @src: pointer to the trigger source to validate
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index bf002988192d..3bac903c8627 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -225,10 +225,9 @@ static irqreturn_t parport_interrupt(int irq, void *d)
if (!(ctrl & PARPORT_CTRL_IRQ_ENA))
return IRQ_NONE;
- comedi_buf_put(s, 0);
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
- comedi_event(dev, s);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 00c03df72523..e56525a1c8f3 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -52,18 +52,23 @@ zero volts).
#include "comedi_fc.h"
#include <linux/timer.h>
+#include <linux/ktime.h>
#define N_CHANS 8
+enum waveform_state_bits {
+ WAVEFORM_AI_RUNNING = 0
+};
+
/* Data unique to this driver */
struct waveform_private {
struct timer_list timer;
- struct timeval last; /* time last timer interrupt occurred */
+ ktime_t last; /* time last timer interrupt occurred */
unsigned int uvolt_amplitude; /* waveform amplitude in microvolts */
unsigned long usec_period; /* waveform period in microseconds */
unsigned long usec_current; /* current time (mod waveform period) */
unsigned long usec_remainder; /* usec since last scan */
- unsigned long ai_count; /* number of conversions remaining */
+ unsigned long state_bits;
unsigned int scan_period; /* scan period in usec */
unsigned int convert_period; /* conversion period in usec */
unsigned int ao_loopbacks[N_CHANS];
@@ -164,36 +169,29 @@ static void waveform_ai_interrupt(unsigned long arg)
{
struct comedi_device *dev = (struct comedi_device *)arg;
struct waveform_private *devpriv = dev->private;
- struct comedi_async *async = dev->read_subdev->async;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int i, j;
/* all times in microsec */
unsigned long elapsed_time;
unsigned int num_scans;
- struct timeval now;
- bool stopping = false;
+ ktime_t now;
+
+ /* check command is still active */
+ if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
+ return;
- do_gettimeofday(&now);
+ now = ktime_get();
- elapsed_time =
- 1000000 * (now.tv_sec - devpriv->last.tv_sec) + now.tv_usec -
- devpriv->last.tv_usec;
+ elapsed_time = ktime_to_us(ktime_sub(now, devpriv->last));
devpriv->last = now;
num_scans =
(devpriv->usec_remainder + elapsed_time) / devpriv->scan_period;
devpriv->usec_remainder =
(devpriv->usec_remainder + elapsed_time) % devpriv->scan_period;
- if (cmd->stop_src == TRIG_COUNT) {
- unsigned int remaining = cmd->stop_arg - devpriv->ai_count;
-
- if (num_scans >= remaining) {
- /* about to finish */
- num_scans = remaining;
- stopping = true;
- }
- }
-
+ num_scans = comedi_nscans_left(s, num_scans);
for (i = 0; i < num_scans; i++) {
for (j = 0; j < cmd->chanlist_len; j++) {
unsigned short sample;
@@ -203,20 +201,19 @@ static void waveform_ai_interrupt(unsigned long arg)
devpriv->usec_current +
i * devpriv->scan_period +
j * devpriv->convert_period);
- cfc_write_to_buffer(dev->read_subdev, sample);
+ comedi_buf_write_samples(s, &sample, 1);
}
}
- devpriv->ai_count += i;
devpriv->usec_current += elapsed_time;
devpriv->usec_current %= devpriv->usec_period;
- if (stopping)
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
async->events |= COMEDI_CB_EOA;
else
mod_timer(&devpriv->timer, jiffies + 1);
- comedi_event(dev, dev->read_subdev);
+ comedi_handle_events(dev, s);
}
static int waveform_ai_cmdtest(struct comedi_device *dev,
@@ -308,7 +305,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
return -1;
}
- devpriv->ai_count = 0;
devpriv->scan_period = cmd->scan_begin_arg / nano_per_micro;
if (cmd->convert_src == TRIG_NOW)
@@ -316,11 +312,16 @@ static int waveform_ai_cmd(struct comedi_device *dev,
else /* TRIG_TIMER */
devpriv->convert_period = cmd->convert_arg / nano_per_micro;
- do_gettimeofday(&devpriv->last);
- devpriv->usec_current = devpriv->last.tv_usec % devpriv->usec_period;
+ devpriv->last = ktime_get();
+ devpriv->usec_current =
+ ((u32)ktime_to_us(devpriv->last)) % devpriv->usec_period;
devpriv->usec_remainder = 0;
devpriv->timer.expires = jiffies + 1;
+ /* mark command as active */
+ smp_mb__before_atomic();
+ set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
+ smp_mb__after_atomic();
add_timer(&devpriv->timer);
return 0;
}
@@ -330,7 +331,11 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
- del_timer_sync(&devpriv->timer);
+ /* mark command as no longer active */
+ clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
+ smp_mb__after_atomic();
+ /* cannot call del_timer_sync() as may be called from timer routine */
+ del_timer(&devpriv->timer);
return 0;
}
@@ -405,7 +410,7 @@ static int waveform_attach(struct comedi_device *dev,
dev->write_subdev = s;
/* analog output subdevice (loopback) */
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = N_CHANS;
s->maxdata = 0xffff;
s->range_table = &waveform_ai_ranges;
@@ -432,7 +437,7 @@ static void waveform_detach(struct comedi_device *dev)
struct waveform_private *devpriv = dev->private;
if (devpriv)
- waveform_ai_cancel(dev, dev->read_subdev);
+ del_timer_sync(&devpriv->timer);
}
static struct comedi_driver waveform_driver = {
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
index 34cbe83f0ce7..beb36c8dd00a 100644
--- a/drivers/staging/comedi/drivers/dac02.c
+++ b/drivers/staging/comedi/drivers/dac02.c
@@ -129,7 +129,6 @@ static int dac02_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0x0fff;
s->range_table = &das02_ao_ranges;
s->insn_write = dac02_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index e5b5a8133b34..96697fbb5239 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -707,7 +707,6 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
s->n_chan = 2;
s->maxdata = 0xffff;
s->insn_write = daqboard2000_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
s->range_table = &range_bipolar10;
result = comedi_alloc_subdev_readback(s);
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index bdb671a66e22..20a9f0eb72b5 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -474,7 +474,6 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
s->maxdata = (1 << thisboard->ao_nbits) - 1;
s->range_table = &range_bipolar5;
s->insn_write = das08_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -507,7 +506,7 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
/* do */
if (thisboard->do_nchan) {
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = thisboard->do_nchan;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index 2d8e86cec47a..2436057304a3 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -541,6 +541,7 @@ static void das16_interrupt(struct comedi_device *dev)
struct comedi_cmd *cmd = &async->cmd;
unsigned long spin_flags;
unsigned long dma_flags;
+ unsigned int nsamples;
int num_bytes, residue;
int buffer_index;
@@ -583,21 +584,25 @@ static void das16_interrupt(struct comedi_device *dev)
spin_unlock_irqrestore(&dev->spinlock, spin_flags);
- cfc_write_array_to_buffer(s,
- devpriv->dma_buffer[buffer_index], num_bytes);
+ nsamples = comedi_bytes_to_samples(s, num_bytes);
+ comedi_buf_write_samples(s, devpriv->dma_buffer[buffer_index],
+ nsamples);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static void das16_timer_interrupt(unsigned long arg)
{
struct comedi_device *dev = (struct comedi_device *)arg;
struct das16_private_struct *devpriv = dev->private;
+ unsigned long flags;
das16_interrupt(dev);
+ spin_lock_irqsave(&dev->spinlock, flags);
if (devpriv->timer_running)
mod_timer(&devpriv->timer, jiffies + timer_period());
+ spin_unlock_irqrestore(&dev->spinlock, flags);
}
static int das16_ai_check_chanlist(struct comedi_device *dev,
@@ -764,7 +769,7 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
return -1;
}
- devpriv->adc_byte_count = cmd->stop_arg * cfc_bytes_per_scan(s);
+ devpriv->adc_byte_count = cmd->stop_arg * comedi_bytes_per_scan(s);
if (devpriv->can_burst)
outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV_REG);
@@ -814,7 +819,8 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
enable_dma(devpriv->dma_chan);
release_dma_lock(flags);
- /* set up interrupt */
+ /* set up timer */
+ spin_lock_irqsave(&dev->spinlock, flags);
devpriv->timer_running = 1;
devpriv->timer.expires = jiffies + timer_period();
add_timer(&devpriv->timer);
@@ -823,6 +829,7 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->can_burst)
outb(0, dev->iobase + DAS1600_CONV_REG);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
@@ -856,8 +863,9 @@ static void das16_ai_munge(struct comedi_device *dev,
unsigned int num_bytes,
unsigned int start_chan_index)
{
- unsigned int i, num_samples = num_bytes / sizeof(short);
unsigned short *data = array;
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
+ unsigned int i;
for (i = 0; i < num_samples; i++) {
data[i] = le16_to_cpu(data[i]);
@@ -1167,7 +1175,6 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0x0fff;
s->range_table = devpriv->user_ao_range_table;
s->insn_write = das16_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -1226,6 +1233,8 @@ static void das16_detach(struct comedi_device *dev)
int i;
if (devpriv) {
+ if (devpriv->timer.data)
+ del_timer_sync(&devpriv->timer);
if (dev->iobase)
das16_reset(dev);
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index 24b63c452f51..80f41b7e8273 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -442,8 +442,7 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
num_samples = FIFO_SIZE;
insw(dev->iobase, devpriv->ai_buffer, num_samples);
munge_sample_array(devpriv->ai_buffer, num_samples);
- cfc_write_array_to_buffer(s, devpriv->ai_buffer,
- num_samples * sizeof(short));
+ comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
devpriv->adc_count += num_samples;
if (cmd->stop_src == TRIG_COUNT) {
@@ -460,7 +459,7 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
dev_err(dev->class_dev, "fifo overflow\n");
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -598,7 +597,7 @@ static int das16m1_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
/* do */
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index a53d87ce9b14..be825d21a185 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -421,7 +421,6 @@ static const struct das1800_board das1800_boards[] = {
};
struct das1800_private {
- unsigned int count; /* number of data points left to be taken */
unsigned int divisor1; /* value to load into board's counter 1 for timed conversions */
unsigned int divisor2; /* value to load into board's counter 2 for timed conversions */
int irq_dma_bits; /* bits for control register b */
@@ -479,42 +478,33 @@ static void das1800_handle_fifo_half_full(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
- int numPoints = 0; /* number of points to read */
- struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2);
- numPoints = FIFO_SIZE / 2;
- /* if we only need some of the points */
- if (cmd->stop_src == TRIG_COUNT && devpriv->count < numPoints)
- numPoints = devpriv->count;
- insw(dev->iobase + DAS1800_FIFO, devpriv->ai_buf0, numPoints);
- munge_data(dev, devpriv->ai_buf0, numPoints);
- cfc_write_array_to_buffer(s, devpriv->ai_buf0,
- numPoints * sizeof(devpriv->ai_buf0[0]));
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count -= numPoints;
+ insw(dev->iobase + DAS1800_FIFO, devpriv->ai_buf0, nsamples);
+ munge_data(dev, devpriv->ai_buf0, nsamples);
+ comedi_buf_write_samples(s, devpriv->ai_buf0, nsamples);
}
static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct das1800_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
unsigned short dpnt;
int unipolar;
- struct comedi_cmd *cmd = &s->async->cmd;
unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
while (inb(dev->iobase + DAS1800_STATUS) & FNE) {
- if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0)
- break;
dpnt = inw(dev->iobase + DAS1800_FIFO);
/* convert to unsigned type if we are in a bipolar mode */
if (!unipolar)
;
dpnt = munge_bipolar_sample(dev, dpnt);
- cfc_write_to_buffer(s, dpnt);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count--;
+ comedi_buf_write_samples(s, &dpnt, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ break;
}
}
@@ -525,8 +515,8 @@ static void das1800_flush_dma_channel(struct comedi_device *dev,
unsigned int channel, uint16_t *buffer)
{
struct das1800_private *devpriv = dev->private;
- unsigned int num_bytes, num_samples;
- struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int nbytes;
+ unsigned int nsamples;
disable_dma(channel);
@@ -535,17 +525,12 @@ static void das1800_flush_dma_channel(struct comedi_device *dev,
clear_dma_ff(channel);
/* figure out how many points to read */
- num_bytes = devpriv->dma_transfer_size - get_dma_residue(channel);
- num_samples = num_bytes / sizeof(short);
-
- /* if we only need some of the points */
- if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_samples)
- num_samples = devpriv->count;
+ nbytes = devpriv->dma_transfer_size - get_dma_residue(channel);
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ nsamples = comedi_nsamples_left(s, nsamples);
- munge_data(dev, buffer, num_samples);
- cfc_write_array_to_buffer(s, buffer, num_bytes);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count -= num_samples;
+ munge_data(dev, buffer, nsamples);
+ comedi_buf_write_samples(s, buffer, nsamples);
}
/* flushes remaining data from board when external trigger has stopped acquisition
@@ -649,14 +634,13 @@ static void das1800_ai_handler(struct comedi_device *dev)
das1800_handle_fifo_not_empty(dev, s);
}
- async->events |= COMEDI_CB_BLOCK;
/* if the card's fifo has overflowed */
if (status & OVF) {
/* clear OVF interrupt bit */
outb(CLEAR_INTR_MASK & ~OVF, dev->iobase + DAS1800_STATUS);
dev_err(dev->class_dev, "FIFO overflow\n");
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
/* stop taking data if appropriate */
@@ -670,11 +654,12 @@ static void das1800_ai_handler(struct comedi_device *dev)
else
das1800_handle_fifo_not_empty(dev, s);
async->events |= COMEDI_CB_EOA;
- } else if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0) { /* stop_src TRIG_COUNT */
+ } else if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
async->events |= COMEDI_CB_EOA;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static int das1800_ai_poll(struct comedi_device *dev,
@@ -1102,9 +1087,6 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
/* interrupt fifo half full */
devpriv->irq_dma_bits |= FIMD;
}
- /* determine how many conversions we need */
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count = cmd->stop_arg * cmd->chanlist_len;
das1800_cancel(dev, s);
@@ -1515,7 +1497,7 @@ static int das1800_attach(struct comedi_device *dev,
/* do */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = thisboard->do_n_chan;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index ab6e40608885..780f4f646ea0 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include "../comedidev.h"
+#include "comedi_fc.h"
#include "8253.h"
/*
@@ -192,26 +193,197 @@ static void das6402_enable_counter(struct comedi_device *dev, bool load)
}
}
+static unsigned int das6402_ai_read_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
+
+ val = inw(dev->iobase + DAS6402_AI_DATA_REG);
+ if (s->maxdata == 0x0fff)
+ val >>= 4;
+ return val;
+}
+
static irqreturn_t das6402_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned int status;
+
+ status = inb(dev->iobase + DAS6402_STATUS_REG);
+ if ((status & DAS6402_STATUS_INT) == 0)
+ return IRQ_NONE;
+
+ if (status & DAS6402_STATUS_FFULL) {
+ async->events |= COMEDI_CB_OVERFLOW;
+ } else if (status & DAS6402_STATUS_FFNE) {
+ unsigned int val;
+
+ val = das6402_ai_read_sample(dev, s);
+ comedi_buf_write_samples(s, &val, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+ }
das6402_clear_all_interrupts(dev);
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
+static void das6402_ai_set_mode(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chanspec,
+ unsigned int mode)
+{
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+
+ mode |= DAS6402_MODE_RANGE(range);
+ if (aref == AREF_GROUND)
+ mode |= DAS6402_MODE_SE;
+ if (comedi_range_is_unipolar(s, range))
+ mode |= DAS6402_MODE_UNI;
+
+ das6402_set_mode(dev, mode);
+}
+
static int das6402_ai_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- return -EINVAL;
+ struct das6402_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int chan_lo = CR_CHAN(cmd->chanlist[0]);
+ unsigned int chan_hi = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]);
+
+ das6402_ai_set_mode(dev, s, cmd->chanlist[0], DAS6402_MODE_FIFONEPTY);
+
+ /* load the mux for chanlist conversion */
+ outw(DAS6402_AI_MUX_HI(chan_hi) | DAS6402_AI_MUX_LO(chan_lo),
+ dev->iobase + DAS6402_AI_MUX_REG);
+
+ das6402_enable_counter(dev, true);
+
+ /* enable interrupt and pacer trigger */
+ outb(DAS6402_CTRL_INTE |
+ DAS6402_CTRL_IRQ(devpriv->irq) |
+ DAS6402_CTRL_PACER_TRIG, dev->iobase + DAS6402_CTRL_REG);
+
+ return 0;
+}
+
+static int das6402_ai_check_chanlist(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ unsigned int chan0 = CR_CHAN(cmd->chanlist[0]);
+ unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
+ unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
+ int i;
+
+ for (i = 1; i < cmd->chanlist_len; i++) {
+ unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+ unsigned int range = CR_RANGE(cmd->chanlist[i]);
+ unsigned int aref = CR_AREF(cmd->chanlist[i]);
+
+ if (chan != chan0 + i) {
+ dev_dbg(dev->class_dev,
+ "chanlist must be consecutive\n");
+ return -EINVAL;
+ }
+
+ if (range != range0) {
+ dev_dbg(dev->class_dev,
+ "chanlist must have the same range\n");
+ return -EINVAL;
+ }
+
+ if (aref != aref0) {
+ dev_dbg(dev->class_dev,
+ "chanlist must have the same reference\n");
+ return -EINVAL;
+ }
+
+ if (aref0 == AREF_DIFF && chan > (s->n_chan / 2)) {
+ dev_dbg(dev->class_dev,
+ "chanlist differential channel to large\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
}
static int das6402_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- return -EINVAL;
+ struct das6402_private *devpriv = dev->private;
+ int err = 0;
+ unsigned int arg;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg, 10000);
+ err |= cfc_check_trigger_arg_min(&cmd->chanlist_len, 1);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* step 4: fix up any arguments */
+
+ if (cmd->convert_src == TRIG_TIMER) {
+ arg = cmd->convert_arg;
+ i8253_cascade_ns_to_timer(I8254_OSC_BASE_10MHZ,
+ &devpriv->divider1,
+ &devpriv->divider2,
+ &arg, cmd->flags);
+ err |= cfc_check_trigger_arg_is(&cmd->convert_arg, arg);
+ }
+
+ if (err)
+ return 4;
+
+ /* Step 5: check channel list if it exists */
+ if (cmd->chanlist && cmd->chanlist_len > 0)
+ err |= das6402_ai_check_chanlist(dev, s, cmd);
+
+ if (err)
+ return 5;
+
+ return 0;
}
static int das6402_ai_cancel(struct comedi_device *dev,
@@ -246,26 +418,17 @@ static int das6402_ai_insn_read(struct comedi_device *dev,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int range = CR_RANGE(insn->chanspec);
unsigned int aref = CR_AREF(insn->chanspec);
- unsigned int val;
int ret;
int i;
- val = DAS6402_MODE_RANGE(range) | DAS6402_MODE_POLLED;
- if (aref == AREF_DIFF) {
- if (chan > s->n_chan / 2)
- return -EINVAL;
- } else {
- val |= DAS6402_MODE_SE;
- }
- if (comedi_range_is_unipolar(s, range))
- val |= DAS6402_MODE_UNI;
+ if (aref == AREF_DIFF && chan > (s->n_chan / 2))
+ return -EINVAL;
/* enable software conversion trigger */
outb(DAS6402_CTRL_SOFT_TRIG, dev->iobase + DAS6402_CTRL_REG);
- das6402_set_mode(dev, val);
+ das6402_ai_set_mode(dev, s, insn->chanspec, DAS6402_MODE_POLLED);
/* load the mux for single channel conversion */
outw(DAS6402_AI_MUX_HI(chan) | DAS6402_AI_MUX_LO(chan),
@@ -279,12 +442,7 @@ static int das6402_ai_insn_read(struct comedi_device *dev,
if (ret)
break;
- val = inw(dev->iobase + DAS6402_AI_DATA_REG);
-
- if (s->maxdata == 0x0fff)
- val >>= 4;
-
- data[i] = val;
+ data[i] = das6402_ai_read_sample(dev, s);
}
das6402_ai_clear_eoc(dev);
@@ -497,7 +655,7 @@ static int das6402_attach(struct comedi_device *dev,
/* Analog Output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = board->maxdata;
s->range_table = &das6402_ao_ranges;
@@ -520,7 +678,7 @@ static int das6402_attach(struct comedi_device *dev,
/* Digital Input subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index d75e5528258c..e5bdc2423445 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -219,7 +219,6 @@ static const struct das800_board das800_boards[] = {
};
struct das800_private {
- unsigned int count; /* number of data points left to be taken */
unsigned int divisor1; /* counter 1 value for timed conversions */
unsigned int divisor2; /* counter 2 value for timed conversions */
unsigned int do_bits; /* digital output bits */
@@ -286,9 +285,6 @@ static void das800_set_frequency(struct comedi_device *dev)
static int das800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct das800_private *devpriv = dev->private;
-
- devpriv->count = 0;
das800_disable(dev);
return 0;
}
@@ -399,7 +395,6 @@ static int das800_ai_do_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct das800_board *thisboard = dev->board_ptr;
- struct das800_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int gain = CR_RANGE(cmd->chanlist[0]);
@@ -422,11 +417,6 @@ static int das800_ai_do_cmd(struct comedi_device *dev,
gain &= 0xf;
outb(gain, dev->iobase + DAS800_GAIN);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->count = cmd->stop_arg * cmd->chanlist_len;
- else /* TRIG_NONE */
- devpriv->count = 0;
-
/* enable auto channel scan, send interrupts on end of conversion
* and set clock source to internal or external
*/
@@ -509,25 +499,28 @@ static irqreturn_t das800_interrupt(int irq, void *d)
if (s->maxdata == 0x0fff)
val >>= 4; /* 12-bit sample */
- /* if there are more data points to collect */
- if (cmd->stop_src == TRIG_NONE || devpriv->count > 0) {
- /* write data point to buffer */
- cfc_write_to_buffer(s, val & s->maxdata);
- devpriv->count--;
+ val &= s->maxdata;
+ comedi_buf_write_samples(s, &val, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ break;
}
}
- async->events |= COMEDI_CB_BLOCK;
if (fifo_overflow) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
- if (cmd->stop_src == TRIG_NONE || devpriv->count > 0) {
- /* Re-enable card's interrupt.
- * We already have spinlock, so indirect addressing is safe */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ /*
+ * Re-enable card's interrupt.
+ * We already have spinlock, so indirect addressing is safe
+ */
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits,
CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
@@ -535,9 +528,8 @@ static irqreturn_t das800_interrupt(int irq, void *d)
/* otherwise, stop taking data */
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
das800_disable(dev);
- async->events |= COMEDI_CB_EOA;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -738,7 +730,7 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* Digital Output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 7215e09305cf..6df298a99cc6 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -1,125 +1,139 @@
/*
- comedi/drivers/dmm32at.c
- Diamond Systems mm32at code for a Comedi driver
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * dmm32at.c
+ * Diamond Systems Diamond-MM-32-AT Comedi driver
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: dmm32at
-Description: Diamond Systems mm32at driver.
-Devices:
-Author: Perry J. Piplani <perry.j.piplani@nasa.gov>
-Updated: Fri Jun 4 09:13:24 CDT 2004
-Status: experimental
-
-This driver is for the Diamond Systems MM-32-AT board
-http://www.diamondsystems.com/products/diamondmm32at It is being used
-on serveral projects inside NASA, without problems so far. For analog
-input commands, TRIG_EXT is not yet supported at all..
-
-Configuration Options:
- comedi_config /dev/comedi0 dmm32at baseaddr,irq
-*/
+ * Driver: dmm32at
+ * Description: Diamond Systems Diamond-MM-32-AT
+ * Devices: (Diamond Systems) Diamond-MM-32-AT [dmm32at]
+ * Author: Perry J. Piplani <perry.j.piplani@nasa.gov>
+ * Updated: Fri Jun 4 09:13:24 CDT 2004
+ * Status: experimental
+ *
+ * Configuration Options:
+ * comedi_config /dev/comedi0 dmm32at baseaddr,irq
+ *
+ * This driver is for the Diamond Systems MM-32-AT board
+ * http://www.diamondsystems.com/products/diamondmm32at
+ *
+ * It is being used on serveral projects inside NASA, without
+ * problems so far. For analog input commands, TRIG_EXT is not
+ * yet supported.
+ */
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
+#include "8255.h"
#include "comedi_fc.h"
/* Board register addresses */
-#define DMM32AT_CONV 0x00
-#define DMM32AT_AILSB 0x00
-#define DMM32AT_AUXDOUT 0x01
-#define DMM32AT_AIMSB 0x01
-#define DMM32AT_AILOW 0x02
-#define DMM32AT_AIHIGH 0x03
-
-#define DMM32AT_DACSTAT 0x04
-#define DMM32AT_DACLSB_REG 0x04
-#define DMM32AT_DACMSB_REG 0x05
-#define DMM32AT_DACMSB_CHAN(x) ((x) << 6)
-
-#define DMM32AT_FIFOCNTRL 0x07
-#define DMM32AT_FIFOSTAT 0x07
-
-#define DMM32AT_CNTRL 0x08
-#define DMM32AT_AISTAT 0x08
-
-#define DMM32AT_INTCLOCK 0x09
-
-#define DMM32AT_CNTRDIO 0x0a
-
-#define DMM32AT_AICONF 0x0b
-#define DMM32AT_AIRBACK 0x0b
+#define DMM32AT_AI_START_CONV_REG 0x00
+#define DMM32AT_AI_LSB_REG 0x00
+#define DMM32AT_AUX_DOUT_REG 0x01
+#define DMM32AT_AUX_DOUT2 (1 << 2) /* J3.42 - OUT2 (OUT2EN) */
+#define DMM32AT_AUX_DOUT1 (1 << 1) /* J3.43 */
+#define DMM32AT_AUX_DOUT0 (1 << 0) /* J3.44 - OUT0 (OUT0EN) */
+#define DMM32AT_AI_MSB_REG 0x01
+#define DMM32AT_AI_LO_CHAN_REG 0x02
+#define DMM32AT_AI_HI_CHAN_REG 0x03
+#define DMM32AT_AUX_DI_REG 0x04
+#define DMM32AT_AUX_DI_DACBUSY (1 << 7)
+#define DMM32AT_AUX_DI_CALBUSY (1 << 6)
+#define DMM32AT_AUX_DI3 (1 << 3) /* J3.45 - ADCLK (CLKSEL) */
+#define DMM32AT_AUX_DI2 (1 << 2) /* J3.46 - GATE12 (GT12EN) */
+#define DMM32AT_AUX_DI1 (1 << 1) /* J3.47 - GATE0 (GT0EN) */
+#define DMM32AT_AUX_DI0 (1 << 0) /* J3.48 - CLK0 (SRC0) */
+#define DMM32AT_AO_LSB_REG 0x04
+#define DMM32AT_AO_MSB_REG 0x05
+#define DMM32AT_AO_MSB_DACH(x) ((x) << 6)
+#define DMM32AT_FIFO_DEPTH_REG 0x06
+#define DMM32AT_FIFO_CTRL_REG 0x07
+#define DMM32AT_FIFO_CTRL_FIFOEN (1 << 3)
+#define DMM32AT_FIFO_CTRL_SCANEN (1 << 2)
+#define DMM32AT_FIFO_CTRL_FIFORST (1 << 1)
+#define DMM32AT_FIFO_STATUS_REG 0x07
+#define DMM32AT_FIFO_STATUS_EF (1 << 7)
+#define DMM32AT_FIFO_STATUS_HF (1 << 6)
+#define DMM32AT_FIFO_STATUS_FF (1 << 5)
+#define DMM32AT_FIFO_STATUS_OVF (1 << 4)
+#define DMM32AT_FIFO_STATUS_FIFOEN (1 << 3)
+#define DMM32AT_FIFO_STATUS_SCANEN (1 << 2)
+#define DMM32AT_FIFO_STATUS_PAGE_MASK (3 << 0)
+#define DMM32AT_CTRL_REG 0x08
+#define DMM32AT_CTRL_RESETA (1 << 5)
+#define DMM32AT_CTRL_RESETD (1 << 4)
+#define DMM32AT_CTRL_INTRST (1 << 3)
+#define DMM32AT_CTRL_PAGE_8254 (0 << 0)
+#define DMM32AT_CTRL_PAGE_8255 (1 << 0)
+#define DMM32AT_CTRL_PAGE_CALIB (3 << 0)
+#define DMM32AT_AI_STATUS_REG 0x08
+#define DMM32AT_AI_STATUS_STS (1 << 7)
+#define DMM32AT_AI_STATUS_SD1 (1 << 6)
+#define DMM32AT_AI_STATUS_SD0 (1 << 5)
+#define DMM32AT_AI_STATUS_ADCH_MASK (0x1f << 0)
+#define DMM32AT_INTCLK_REG 0x09
+#define DMM32AT_INTCLK_ADINT (1 << 7)
+#define DMM32AT_INTCLK_DINT (1 << 6)
+#define DMM32AT_INTCLK_TINT (1 << 5)
+#define DMM32AT_INTCLK_CLKEN (1 << 1) /* 1=see below 0=software */
+#define DMM32AT_INTCLK_CLKSEL (1 << 0) /* 1=OUT2 0=EXTCLK */
+#define DMM32AT_CTRDIO_CFG_REG 0x0a
+#define DMM32AT_CTRDIO_CFG_FREQ12 (1 << 7) /* CLK12 1=100KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_FREQ0 (1 << 6) /* CLK0 1=10KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_OUT2EN (1 << 5) /* J3.42 1=OUT2 is DOUT2 */
+#define DMM32AT_CTRDIO_CFG_OUT0EN (1 << 4) /* J3,44 1=OUT0 is DOUT0 */
+#define DMM32AT_CTRDIO_CFG_GT0EN (1 << 2) /* J3.47 1=DIN1 is GATE0 */
+#define DMM32AT_CTRDIO_CFG_SRC0 (1 << 1) /* CLK0 is 0=FREQ0 1=J3.48 */
+#define DMM32AT_CTRDIO_CFG_GT12EN (1 << 0) /* J3.46 1=DIN2 is GATE12 */
+#define DMM32AT_AI_CFG_REG 0x0b
+#define DMM32AT_AI_CFG_SCINT_20US (0 << 4)
+#define DMM32AT_AI_CFG_SCINT_15US (1 << 4)
+#define DMM32AT_AI_CFG_SCINT_10US (2 << 4)
+#define DMM32AT_AI_CFG_SCINT_5US (3 << 4)
+#define DMM32AT_AI_CFG_RANGE (1 << 3) /* 0=5V 1=10V */
+#define DMM32AT_AI_CFG_ADBU (1 << 2) /* 0=bipolar 1=unipolar */
+#define DMM32AT_AI_CFG_GAIN(x) ((x) << 0)
+#define DMM32AT_AI_READBACK_REG 0x0b
+#define DMM32AT_AI_READBACK_WAIT (1 << 7) /* DMM32AT_AI_STATUS_STS */
+#define DMM32AT_AI_READBACK_RANGE (1 << 3)
+#define DMM32AT_AI_READBACK_ADBU (1 << 2)
+#define DMM32AT_AI_READBACK_GAIN_MASK (3 << 0)
#define DMM32AT_CLK1 0x0d
#define DMM32AT_CLK2 0x0e
#define DMM32AT_CLKCT 0x0f
-#define DMM32AT_DIOA 0x0c
-#define DMM32AT_DIOB 0x0d
-#define DMM32AT_DIOC 0x0e
-#define DMM32AT_DIOCONF 0x0f
+#define DMM32AT_8255_IOBASE 0x0c /* Page 1 registers */
/* Board register values. */
-/* DMM32AT_DACSTAT 0x04 */
-#define DMM32AT_DACBUSY 0x80
-
-/* DMM32AT_FIFOCNTRL 0x07 */
-#define DMM32AT_FIFORESET 0x02
-#define DMM32AT_SCANENABLE 0x04
-
-/* DMM32AT_CNTRL 0x08 */
-#define DMM32AT_RESET 0x20
-#define DMM32AT_INTRESET 0x08
-#define DMM32AT_CLKACC 0x00
-#define DMM32AT_DIOACC 0x01
-
-/* DMM32AT_AISTAT 0x08 */
-#define DMM32AT_STATUS 0x80
-
-/* DMM32AT_INTCLOCK 0x09 */
-#define DMM32AT_ADINT 0x80
-#define DMM32AT_CLKSEL 0x03
-
-/* DMM32AT_CNTRDIO 0x0a */
-#define DMM32AT_FREQ12 0x80
-
-/* DMM32AT_AICONF 0x0b */
+/* DMM32AT_AI_CFG_REG 0x0b */
#define DMM32AT_RANGE_U10 0x0c
#define DMM32AT_RANGE_U5 0x0d
#define DMM32AT_RANGE_B10 0x08
#define DMM32AT_RANGE_B5 0x00
-#define DMM32AT_SCINT_20 0x00
-#define DMM32AT_SCINT_15 0x10
-#define DMM32AT_SCINT_10 0x20
-#define DMM32AT_SCINT_5 0x30
/* DMM32AT_CLKCT 0x0f */
#define DMM32AT_CLKCT1 0x56 /* mode3 counter 1 - write low byte only */
#define DMM32AT_CLKCT2 0xb6 /* mode3 counter 2 - write high and low byte */
-/* DMM32AT_DIOCONF 0x0f */
-#define DMM32AT_DIENABLE 0x80
-#define DMM32AT_DIRA 0x10
-#define DMM32AT_DIRB 0x02
-#define DMM32AT_DIRCL 0x01
-#define DMM32AT_DIRCH 0x08
-
/* board AI ranges in comedi structure */
static const struct comedi_lrange dmm32at_airanges = {
4, {
@@ -150,12 +164,36 @@ static const struct comedi_lrange dmm32at_aoranges = {
}
};
-struct dmm32at_private {
- int data;
- int ai_inuse;
- unsigned int ai_scans_left;
- unsigned char dio_config;
-};
+static void dmm32at_ai_set_chanspec(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chanspec, int nchan)
+{
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int last_chan = (chan + nchan - 1) % s->n_chan;
+
+ outb(DMM32AT_FIFO_CTRL_FIFORST, dev->iobase + DMM32AT_FIFO_CTRL_REG);
+
+ if (nchan > 1)
+ outb(DMM32AT_FIFO_CTRL_SCANEN,
+ dev->iobase + DMM32AT_FIFO_CTRL_REG);
+
+ outb(chan, dev->iobase + DMM32AT_AI_LO_CHAN_REG);
+ outb(last_chan, dev->iobase + DMM32AT_AI_HI_CHAN_REG);
+ outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AI_CFG_REG);
+}
+
+static unsigned int dmm32at_ai_get_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
+
+ val = inb(dev->iobase + DMM32AT_AI_LSB_REG);
+ val |= (inb(dev->iobase + DMM32AT_AI_MSB_REG) << 8);
+
+ /* munge two's complement value to offset binary */
+ return comedi_offset_munge(s, val);
+}
static int dmm32at_ai_status(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -165,75 +203,39 @@ static int dmm32at_ai_status(struct comedi_device *dev,
unsigned char status;
status = inb(dev->iobase + context);
- if ((status & DMM32AT_STATUS) == 0)
+ if ((status & DMM32AT_AI_STATUS_STS) == 0)
return 0;
return -EBUSY;
}
-static int dmm32at_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int dmm32at_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int n;
- unsigned int d;
- unsigned short msb, lsb;
- unsigned char chan;
- int range;
int ret;
+ int i;
- /* get the channel and range number */
-
- chan = CR_CHAN(insn->chanspec) & (s->n_chan - 1);
- range = CR_RANGE(insn->chanspec);
-
- /* zero scan and fifo control and reset fifo */
- outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL);
-
- /* write the ai channel range regs */
- outb(chan, dev->iobase + DMM32AT_AILOW);
- outb(chan, dev->iobase + DMM32AT_AIHIGH);
- /* set the range bits */
- outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AICONF);
+ dmm32at_ai_set_chanspec(dev, s, insn->chanspec, 1);
/* wait for circuit to settle */
- ret = comedi_timeout(dev, s, insn, dmm32at_ai_status, DMM32AT_AIRBACK);
+ ret = comedi_timeout(dev, s, insn, dmm32at_ai_status,
+ DMM32AT_AI_READBACK_REG);
if (ret)
return ret;
- /* convert n samples */
- for (n = 0; n < insn->n; n++) {
- /* trigger conversion */
- outb(0xff, dev->iobase + DMM32AT_CONV);
+ for (i = 0; i < insn->n; i++) {
+ outb(0xff, dev->iobase + DMM32AT_AI_START_CONV_REG);
- /* wait for conversion to end */
ret = comedi_timeout(dev, s, insn, dmm32at_ai_status,
- DMM32AT_AISTAT);
+ DMM32AT_AI_STATUS_REG);
if (ret)
return ret;
- /* read data */
- lsb = inb(dev->iobase + DMM32AT_AILSB);
- msb = inb(dev->iobase + DMM32AT_AIMSB);
-
- /* invert sign bit to make range unsigned, this is an
- idiosyncrasy of the diamond board, it return
- conversions as a signed value, i.e. -32768 to
- 32767, flipping the bit and interpreting it as
- signed gives you a range of 0 to 65535 which is
- used by comedi */
- d = ((msb ^ 0x0080) << 8) + lsb;
-
- data[n] = d;
+ data[i] = dmm32at_ai_get_sample(dev, s);
}
- /* return the number of samples read/written */
- return n;
-}
-
-static int dmm32at_ns_to_timer(unsigned int *ns, unsigned int flags)
-{
- /* trivial timer */
- return *ns;
+ return insn->n;
}
static int dmm32at_ai_check_chanlist(struct comedi_device *dev,
@@ -273,10 +275,8 @@ static int dmm32at_ai_cmdtest(struct comedi_device *dev,
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
- err |= cfc_check_trigger_src(&cmd->scan_begin_src,
- TRIG_TIMER /*| TRIG_EXT */);
- err |= cfc_check_trigger_src(&cmd->convert_src,
- TRIG_TIMER /*| TRIG_EXT */);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
@@ -285,8 +285,6 @@ static int dmm32at_ai_cmdtest(struct comedi_device *dev,
/* Step 2a : make sure trigger sources are unique */
- err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
- err |= cfc_check_trigger_is_unique(cmd->convert_src);
err |= cfc_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
@@ -298,67 +296,32 @@ static int dmm32at_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
-#define MAX_SCAN_SPEED 1000000 /* in nanoseconds */
-#define MIN_SCAN_SPEED 1000000000 /* in nanoseconds */
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
- MAX_SCAN_SPEED);
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg,
- MIN_SCAN_SPEED);
- } else {
- /* external trigger */
- /* should be level/edge, hi/lo specification here */
- /* should specify multiple external triggers */
- err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 9);
- }
+ err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000);
+ err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 1000000000);
- if (cmd->convert_src == TRIG_TIMER) {
- if (cmd->convert_arg >= 17500)
- cmd->convert_arg = 20000;
- else if (cmd->convert_arg >= 12500)
- cmd->convert_arg = 15000;
- else if (cmd->convert_arg >= 7500)
- cmd->convert_arg = 10000;
- else
- cmd->convert_arg = 5000;
- } else {
- /* external trigger */
- /* see above */
- err |= cfc_check_trigger_arg_max(&cmd->convert_arg, 9);
- }
+ if (cmd->convert_arg >= 17500)
+ cmd->convert_arg = 20000;
+ else if (cmd->convert_arg >= 12500)
+ cmd->convert_arg = 15000;
+ else if (cmd->convert_arg >= 7500)
+ cmd->convert_arg = 10000;
+ else
+ cmd->convert_arg = 5000;
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
- if (cmd->stop_src == TRIG_COUNT) {
- err |= cfc_check_trigger_arg_max(&cmd->stop_arg, 0xfffffff0);
+ if (cmd->stop_src == TRIG_COUNT)
err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
- } else {
- /* TRIG_NONE */
+ else /* TRIG_NONE */
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
- }
if (err)
return 3;
- /* step 4: fix up any arguments */
+ /* Step 4: fix up any arguments */
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->scan_begin_arg;
- dmm32at_ns_to_timer(&arg, cmd->flags);
- err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
- }
- if (cmd->convert_src == TRIG_TIMER) {
- arg = cmd->convert_arg;
- dmm32at_ns_to_timer(&arg, cmd->flags);
- err |= cfc_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->scan_end_arg;
- err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
- arg);
- }
- }
+ arg = cmd->convert_arg * cmd->scan_end_arg;
+ err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
if (err)
return 4;
@@ -384,11 +347,11 @@ static void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec)
hi2 = (both2 & 0xff00) >> 8;
lo2 = both2 & 0x00ff;
- /* set the counter frequency to 10mhz */
- outb(0, dev->iobase + DMM32AT_CNTRDIO);
+ /* set counter clocks to 10MHz, disable all aux dio */
+ outb(0, dev->iobase + DMM32AT_CTRDIO_CFG_REG);
/* get access to the clock regs */
- outb(DMM32AT_CLKACC, dev->iobase + DMM32AT_CNTRL);
+ outb(DMM32AT_CTRL_PAGE_8254, dev->iobase + DMM32AT_CTRL_REG);
/* write the counter 1 control word and low byte to counter */
outb(DMM32AT_CLKCT1, dev->iobase + DMM32AT_CLKCT);
@@ -400,65 +363,37 @@ static void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec)
outb(hi2, dev->iobase + DMM32AT_CLK2);
/* enable the ai conversion interrupt and the clock to start scans */
- outb(DMM32AT_ADINT | DMM32AT_CLKSEL, dev->iobase + DMM32AT_INTCLOCK);
+ outb(DMM32AT_INTCLK_ADINT |
+ DMM32AT_INTCLK_CLKEN | DMM32AT_INTCLK_CLKSEL,
+ dev->iobase + DMM32AT_INTCLK_REG);
}
static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct dmm32at_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int range;
- unsigned char chanlo, chanhi;
int ret;
- if (!cmd->chanlist)
- return -EINVAL;
-
- /* get the channel list and range */
- chanlo = CR_CHAN(cmd->chanlist[0]) & (s->n_chan - 1);
- chanhi = chanlo + cmd->chanlist_len - 1;
- if (chanhi >= s->n_chan)
- return -EINVAL;
- range = CR_RANGE(cmd->chanlist[0]);
-
- /* reset fifo */
- outb(DMM32AT_FIFORESET, dev->iobase + DMM32AT_FIFOCNTRL);
-
- /* set scan enable */
- outb(DMM32AT_SCANENABLE, dev->iobase + DMM32AT_FIFOCNTRL);
-
- /* write the ai channel range regs */
- outb(chanlo, dev->iobase + DMM32AT_AILOW);
- outb(chanhi, dev->iobase + DMM32AT_AIHIGH);
-
- /* set the range bits */
- outb(dmm32at_rangebits[range], dev->iobase + DMM32AT_AICONF);
+ dmm32at_ai_set_chanspec(dev, s, cmd->chanlist[0], cmd->chanlist_len);
/* reset the interrupt just in case */
- outb(DMM32AT_INTRESET, dev->iobase + DMM32AT_CNTRL);
-
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ai_scans_left = cmd->stop_arg;
- else { /* TRIG_NONE */
- devpriv->ai_scans_left = 0xffffffff; /* indicates TRIG_NONE to
- * isr */
- }
+ outb(DMM32AT_CTRL_INTRST, dev->iobase + DMM32AT_CTRL_REG);
/*
* wait for circuit to settle
* we don't have the 'insn' here but it's not needed
*/
- ret = comedi_timeout(dev, s, NULL, dmm32at_ai_status, DMM32AT_AIRBACK);
+ ret = comedi_timeout(dev, s, NULL, dmm32at_ai_status,
+ DMM32AT_AI_READBACK_REG);
if (ret)
return ret;
- if (devpriv->ai_scans_left > 1) {
+ if (cmd->stop_src == TRIG_NONE || cmd->stop_arg > 1) {
/* start the clock and enable the interrupts */
dmm32at_setaitimer(dev, cmd->scan_begin_arg);
} else {
/* start the interrups and initiate a single scan */
- outb(DMM32AT_ADINT, dev->iobase + DMM32AT_INTCLOCK);
- outb(0xff, dev->iobase + DMM32AT_CONV);
+ outb(DMM32AT_INTCLK_ADINT, dev->iobase + DMM32AT_INTCLK_REG);
+ outb(0xff, dev->iobase + DMM32AT_AI_START_CONV_REG);
}
return 0;
@@ -468,19 +403,16 @@ static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int dmm32at_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct dmm32at_private *devpriv = dev->private;
-
- devpriv->ai_scans_left = 1;
+ /* disable further interrupts and clocks */
+ outb(0x0, dev->iobase + DMM32AT_INTCLK_REG);
return 0;
}
static irqreturn_t dmm32at_isr(int irq, void *d)
{
struct comedi_device *dev = d;
- struct dmm32at_private *devpriv = dev->private;
unsigned char intstat;
- unsigned int samp;
- unsigned short msb, lsb;
+ unsigned int val;
int i;
if (!dev->attached) {
@@ -488,38 +420,26 @@ static irqreturn_t dmm32at_isr(int irq, void *d)
return IRQ_HANDLED;
}
- intstat = inb(dev->iobase + DMM32AT_INTCLOCK);
+ intstat = inb(dev->iobase + DMM32AT_INTCLK_REG);
- if (intstat & DMM32AT_ADINT) {
+ if (intstat & DMM32AT_INTCLK_ADINT) {
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
for (i = 0; i < cmd->chanlist_len; i++) {
- /* read data */
- lsb = inb(dev->iobase + DMM32AT_AILSB);
- msb = inb(dev->iobase + DMM32AT_AIMSB);
-
- /* invert sign bit to make range unsigned */
- samp = ((msb ^ 0x0080) << 8) + lsb;
- comedi_buf_put(s, samp);
+ val = dmm32at_ai_get_sample(dev, s);
+ comedi_buf_write_samples(s, &val, 1);
}
- if (devpriv->ai_scans_left != 0xffffffff) { /* TRIG_COUNT */
- devpriv->ai_scans_left--;
- if (devpriv->ai_scans_left == 0) {
- /* disable further interrupts and clocks */
- outb(0x0, dev->iobase + DMM32AT_INTCLOCK);
- /* set the buffer to be flushed with an EOF */
- s->async->events |= COMEDI_CB_EOA;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
- }
- /* flush the buffer */
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
}
/* reset the interrupt */
- outb(DMM32AT_INTRESET, dev->iobase + DMM32AT_CNTRL);
+ outb(DMM32AT_CTRL_INTRST, dev->iobase + DMM32AT_CTRL_REG);
return IRQ_HANDLED;
}
@@ -530,8 +450,8 @@ static int dmm32at_ao_eoc(struct comedi_device *dev,
{
unsigned char status;
- status = inb(dev->iobase + DMM32AT_DACSTAT);
- if ((status & DMM32AT_DACBUSY) == 0)
+ status = inb(dev->iobase + DMM32AT_AUX_DI_REG);
+ if ((status & DMM32AT_AUX_DI_DACBUSY) == 0)
return 0;
return -EBUSY;
}
@@ -549,9 +469,9 @@ static int dmm32at_ao_insn_write(struct comedi_device *dev,
int ret;
/* write LSB then MSB + chan to load DAC */
- outb(val & 0xff, dev->iobase + DMM32AT_DACLSB_REG);
- outb((val >> 8) | DMM32AT_DACMSB_CHAN(chan),
- dev->iobase + DMM32AT_DACMSB_REG);
+ outb(val & 0xff, dev->iobase + DMM32AT_AO_LSB_REG);
+ outb((val >> 8) | DMM32AT_AO_MSB_DACH(chan),
+ dev->iobase + DMM32AT_AO_MSB_REG);
/* wait for circuit to settle */
ret = comedi_timeout(dev, s, insn, dmm32at_ao_eoc, 0);
@@ -559,7 +479,7 @@ static int dmm32at_ao_insn_write(struct comedi_device *dev,
return ret;
/* dummy read to update DAC */
- inb(dev->iobase + DMM32AT_DACMSB_REG);
+ inb(dev->iobase + DMM32AT_AO_MSB_REG);
s->readback[chan] = val;
}
@@ -567,136 +487,82 @@ static int dmm32at_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-static int dmm32at_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct dmm32at_private *devpriv = dev->private;
- unsigned int mask;
- unsigned int val;
-
- mask = comedi_dio_update_state(s, data);
- if (mask) {
- /* get access to the DIO regs */
- outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL);
-
- /* if either part of dio is set for output */
- if (((devpriv->dio_config & DMM32AT_DIRCL) == 0) ||
- ((devpriv->dio_config & DMM32AT_DIRCH) == 0)) {
- val = (s->state & 0x00ff0000) >> 16;
- outb(val, dev->iobase + DMM32AT_DIOC);
- }
- if ((devpriv->dio_config & DMM32AT_DIRB) == 0) {
- val = (s->state & 0x0000ff00) >> 8;
- outb(val, dev->iobase + DMM32AT_DIOB);
- }
- if ((devpriv->dio_config & DMM32AT_DIRA) == 0) {
- val = (s->state & 0x000000ff);
- outb(val, dev->iobase + DMM32AT_DIOA);
- }
- }
-
- val = inb(dev->iobase + DMM32AT_DIOA);
- val |= inb(dev->iobase + DMM32AT_DIOB) << 8;
- val |= inb(dev->iobase + DMM32AT_DIOC) << 16;
- s->state = val;
-
- data[1] = val;
-
- return insn->n;
-}
-
-static int dmm32at_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int dmm32at_8255_io(struct comedi_device *dev,
+ int dir, int port, int data, unsigned long regbase)
{
- struct dmm32at_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask;
- unsigned char chanbit;
- int ret;
-
- if (chan < 8) {
- mask = 0x0000ff;
- chanbit = DMM32AT_DIRA;
- } else if (chan < 16) {
- mask = 0x00ff00;
- chanbit = DMM32AT_DIRB;
- } else if (chan < 20) {
- mask = 0x0f0000;
- chanbit = DMM32AT_DIRCL;
- } else {
- mask = 0xf00000;
- chanbit = DMM32AT_DIRCH;
- }
-
- ret = comedi_dio_insn_config(dev, s, insn, data, mask);
- if (ret)
- return ret;
-
- if (data[0] == INSN_CONFIG_DIO_OUTPUT)
- devpriv->dio_config &= ~chanbit;
- else
- devpriv->dio_config |= chanbit;
/* get access to the DIO regs */
- outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL);
- /* set the DIO's to the new configuration setting */
- outb(devpriv->dio_config, dev->iobase + DMM32AT_DIOCONF);
+ outb(DMM32AT_CTRL_PAGE_8255, dev->iobase + DMM32AT_CTRL_REG);
- return insn->n;
+ if (dir) {
+ outb(data, dev->iobase + regbase + port);
+ return 0;
+ }
+ return inb(dev->iobase + regbase + port);
}
-static int dmm32at_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
+/* Make sure the board is there and put it to a known state */
+static int dmm32at_reset(struct comedi_device *dev)
{
- struct dmm32at_private *devpriv;
- int ret;
- struct comedi_subdevice *s;
unsigned char aihi, ailo, fifostat, aistat, intstat, airback;
- ret = comedi_request_region(dev, it->options[0], 0x10);
- if (ret)
- return ret;
-
- /* the following just makes sure the board is there and gets
- it to a known state */
-
/* reset the board */
- outb(DMM32AT_RESET, dev->iobase + DMM32AT_CNTRL);
+ outb(DMM32AT_CTRL_RESETA, dev->iobase + DMM32AT_CTRL_REG);
/* allow a millisecond to reset */
udelay(1000);
/* zero scan and fifo control */
- outb(0x0, dev->iobase + DMM32AT_FIFOCNTRL);
+ outb(0x0, dev->iobase + DMM32AT_FIFO_CTRL_REG);
/* zero interrupt and clock control */
- outb(0x0, dev->iobase + DMM32AT_INTCLOCK);
+ outb(0x0, dev->iobase + DMM32AT_INTCLK_REG);
/* write a test channel range, the high 3 bits should drop */
- outb(0x80, dev->iobase + DMM32AT_AILOW);
- outb(0xff, dev->iobase + DMM32AT_AIHIGH);
+ outb(0x80, dev->iobase + DMM32AT_AI_LO_CHAN_REG);
+ outb(0xff, dev->iobase + DMM32AT_AI_HI_CHAN_REG);
/* set the range at 10v unipolar */
- outb(DMM32AT_RANGE_U10, dev->iobase + DMM32AT_AICONF);
+ outb(DMM32AT_RANGE_U10, dev->iobase + DMM32AT_AI_CFG_REG);
/* should take 10 us to settle, here's a hundred */
udelay(100);
/* read back the values */
- ailo = inb(dev->iobase + DMM32AT_AILOW);
- aihi = inb(dev->iobase + DMM32AT_AIHIGH);
- fifostat = inb(dev->iobase + DMM32AT_FIFOSTAT);
- aistat = inb(dev->iobase + DMM32AT_AISTAT);
- intstat = inb(dev->iobase + DMM32AT_INTCLOCK);
- airback = inb(dev->iobase + DMM32AT_AIRBACK);
-
- if ((ailo != 0x00) || (aihi != 0x1f) || (fifostat != 0x80) ||
- (aistat != 0x60 || (intstat != 0x00) || airback != 0x0c)) {
- dev_err(dev->class_dev, "board detection failed\n");
+ ailo = inb(dev->iobase + DMM32AT_AI_LO_CHAN_REG);
+ aihi = inb(dev->iobase + DMM32AT_AI_HI_CHAN_REG);
+ fifostat = inb(dev->iobase + DMM32AT_FIFO_STATUS_REG);
+ aistat = inb(dev->iobase + DMM32AT_AI_STATUS_REG);
+ intstat = inb(dev->iobase + DMM32AT_INTCLK_REG);
+ airback = inb(dev->iobase + DMM32AT_AI_READBACK_REG);
+
+ /*
+ * NOTE: The (DMM32AT_AI_STATUS_SD1 | DMM32AT_AI_STATUS_SD0)
+ * test makes this driver only work if the board is configured
+ * with all A/D channels set for single-ended operation.
+ */
+ if (ailo != 0x00 || aihi != 0x1f ||
+ fifostat != DMM32AT_FIFO_STATUS_EF ||
+ aistat != (DMM32AT_AI_STATUS_SD1 | DMM32AT_AI_STATUS_SD0) ||
+ intstat != 0x00 || airback != 0x0c)
return -EIO;
+
+ return 0;
+}
+
+static int dmm32at_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
+{
+ struct comedi_subdevice *s;
+ int ret;
+
+ ret = comedi_request_region(dev, it->options[0], 0x10);
+ if (ret)
+ return ret;
+
+ ret = dmm32at_reset(dev);
+ if (ret) {
+ dev_err(dev->class_dev, "board detection failed\n");
+ return ret;
}
if (it->options[1]) {
@@ -706,65 +572,45 @@ static int dmm32at_attach(struct comedi_device *dev,
dev->irq = it->options[1];
}
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* analog input subdevice */
- s->type = COMEDI_SUBD_AI;
- /* we support single-ended (ground) and differential */
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
- s->n_chan = 32;
- s->maxdata = 0xffff;
- s->range_table = &dmm32at_airanges;
- s->insn_read = dmm32at_ai_rinsn;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
+ s->n_chan = 32;
+ s->maxdata = 0xffff;
+ s->range_table = &dmm32at_airanges;
+ s->insn_read = dmm32at_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 32;
- s->do_cmd = dmm32at_ai_cmd;
- s->do_cmdtest = dmm32at_ai_cmdtest;
- s->cancel = dmm32at_ai_cancel;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = dmm32at_ai_cmd;
+ s->do_cmdtest = dmm32at_ai_cmdtest;
+ s->cancel = dmm32at_ai_cancel;
}
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- /* analog output subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 0x0fff;
- s->range_table = &dmm32at_aoranges;
- s->insn_write = dmm32at_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 0x0fff;
+ s->range_table = &dmm32at_aoranges;
+ s->insn_write = dmm32at_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
+ /* Digital I/O subdevice */
s = &dev->subdevices[2];
- /* digital i/o subdevice */
-
- /* get access to the DIO regs */
- outb(DMM32AT_DIOACC, dev->iobase + DMM32AT_CNTRL);
- /* set the DIO's to the defualt input setting */
- devpriv->dio_config = DMM32AT_DIRA | DMM32AT_DIRB |
- DMM32AT_DIRCL | DMM32AT_DIRCH | DMM32AT_DIENABLE;
- outb(devpriv->dio_config, dev->iobase + DMM32AT_DIOCONF);
-
- /* set up the subdevice */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->state = 0;
- s->range_table = &range_digital;
- s->insn_bits = dmm32at_dio_insn_bits;
- s->insn_config = dmm32at_dio_insn_config;
+ ret = subdev_8255_init(dev, s, dmm32at_8255_io, DMM32AT_8255_IOBASE);
+ if (ret)
+ return ret;
return 0;
}
@@ -778,5 +624,5 @@ static struct comedi_driver dmm32at_driver = {
module_comedi_driver(dmm32at_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi: Diamond Systems Diamond-MM-32-AT");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index e97386343a0e..b96e60ffad73 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -597,7 +597,6 @@ havetype:
devpriv->dac_range_types[0] = dac_range_lkup(it->options[4]);
devpriv->dac_range_types[1] = dac_range_lkup(it->options[5]);
s->insn_write = dt2801_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 1736e397ad2c..d660f277487e 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -418,7 +418,6 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->range_type_list[0] = dac_range_types[devpriv->dac_range[0]];
devpriv->range_type_list[1] = dac_range_types[devpriv->dac_range[1]];
s->insn_write = dt2811_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 9216c35c414e..9805be13005a 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -230,7 +230,7 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
s->async->events |= COMEDI_CB_EOA;
}
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index cc974a5e5cf6..2be98bb9a809 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -449,13 +449,29 @@ static void dt282x_munge(struct comedi_device *dev,
}
}
+static unsigned int dt282x_ao_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ int cur_dma)
+{
+ struct dt282x_private *devpriv = dev->private;
+ void *ptr = devpriv->dma[cur_dma].buf;
+ unsigned int nsamples = comedi_bytes_to_samples(s, devpriv->dma_maxsize);
+ unsigned int nbytes;
+
+ nbytes = comedi_buf_read_samples(s, ptr, nsamples);
+ if (nbytes)
+ dt282x_prep_ao_dma(dev, cur_dma, nbytes);
+ else
+ dev_err(dev->class_dev, "AO underrun\n");
+
+ return nbytes;
+}
+
static void dt282x_ao_dma_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct dt282x_private *devpriv = dev->private;
int cur_dma = devpriv->current_dma_index;
- void *ptr = devpriv->dma[cur_dma].buf;
- int size;
outw(devpriv->supcsr | DT2821_SUPCSR_CLRDMADNE,
dev->iobase + DT2821_SUPCSR_REG);
@@ -464,13 +480,8 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev,
devpriv->current_dma_index = 1 - cur_dma;
- size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize);
- if (size == 0) {
- dev_err(dev->class_dev, "AO underrun\n");
+ if (!dt282x_ao_setup_dma(dev, s, cur_dma))
s->async->events |= COMEDI_CB_OVERFLOW;
- } else {
- dt282x_prep_ao_dma(dev, cur_dma, size);
- }
}
static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
@@ -480,6 +491,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
int cur_dma = devpriv->current_dma_index;
void *ptr = devpriv->dma[cur_dma].buf;
int size = devpriv->dma[cur_dma].size;
+ unsigned int nsamples = comedi_bytes_to_samples(s, size);
int ret;
outw(devpriv->supcsr | DT2821_SUPCSR_CLRDMADNE,
@@ -490,13 +502,11 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev,
devpriv->current_dma_index = 1 - cur_dma;
dt282x_munge(dev, s, ptr, size);
- ret = cfc_write_array_to_buffer(s, ptr, size);
- if (ret != size) {
- s->async->events |= COMEDI_CB_OVERFLOW;
+ ret = comedi_buf_write_samples(s, ptr, nsamples);
+ if (ret != size)
return;
- }
- devpriv->nread -= size / 2;
+ devpriv->nread -= nsamples;
if (devpriv->nread < 0) {
dev_info(dev->class_dev, "nread off by one\n");
devpriv->nread = 0;
@@ -555,7 +565,6 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
}
#if 0
if (adcsr & DT2821_ADCSR_ADDONE) {
- int ret;
unsigned short data;
data = inw(dev->iobase + DT2821_ADDAT_REG);
@@ -563,10 +572,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
if (devpriv->ad_2scomp)
data = comedi_offset_munge(s, data);
- ret = comedi_buf_put(s, data);
-
- if (ret == 0)
- s->async->events |= COMEDI_CB_OVERFLOW;
+ comedi_buf_write_samples(s, &data, 1);
devpriv->nread--;
if (!devpriv->nread) {
@@ -579,8 +585,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
handled = 1;
}
#endif
- cfc_handle_events(dev, s);
- cfc_handle_events(dev, s_ao);
+ comedi_handle_events(dev, s);
+ comedi_handle_events(dev, s_ao);
return IRQ_RETVAL(handled);
}
@@ -916,26 +922,15 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
{
struct dt282x_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int size;
if (trig_num != cmd->start_src)
return -EINVAL;
- size = cfc_read_array_from_buffer(s, devpriv->dma[0].buf,
- devpriv->dma_maxsize);
- if (size == 0) {
- dev_err(dev->class_dev, "AO underrun\n");
+ if (!dt282x_ao_setup_dma(dev, s, 0))
return -EPIPE;
- }
- dt282x_prep_ao_dma(dev, 0, size);
- size = cfc_read_array_from_buffer(s, devpriv->dma[1].buf,
- devpriv->dma_maxsize);
- if (size == 0) {
- dev_err(dev->class_dev, "AO underrun\n");
+ if (!dt282x_ao_setup_dma(dev, s, 1))
return -EPIPE;
- }
- dt282x_prep_ao_dma(dev, 1, size);
outw(devpriv->supcsr | DT2821_SUPCSR_STRIG,
dev->iobase + DT2821_SUPCSR_REG);
@@ -1236,7 +1231,6 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* ranges are per-channel, set by jumpers on the board */
s->range_table = &dt282x_ao_range;
s->insn_write = dt282x_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
if (dev->irq) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 825561046b6f..1d9a7a63e06f 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -315,7 +315,7 @@ static void dt3k_ai_empty_fifo(struct comedi_device *dev,
for (i = 0; i < count; i++) {
data = readw(dev->mmio + DPR_ADC_buffer + rear);
- comedi_buf_put(s, data);
+ comedi_buf_write_samples(s, &data, 1);
rear++;
if (rear >= AI_FIFO_DEPTH)
rear = 0;
@@ -351,10 +351,8 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
status = readw(dev->mmio + DPR_Intr_Flag);
- if (status & DT3000_ADFULL) {
+ if (status & DT3000_ADFULL)
dt3k_ai_empty_fifo(dev, s);
- s->async->events |= COMEDI_CB_BLOCK;
- }
if (status & (DT3000_ADSWERR | DT3000_ADHWERR))
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
@@ -363,7 +361,7 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
if (debug_n_ints >= 10)
s->async->events |= COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -699,7 +697,6 @@ static int dt3000_auto_attach(struct comedi_device *dev,
s->len_chanlist = 1;
s->range_table = &range_bipolar10;
s->insn_write = dt3k_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 77bb89fee327..06c601d8fdff 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -804,7 +804,7 @@ static int dt9812_auto_attach(struct comedi_device *dev,
/* Digital Output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -822,7 +822,7 @@ static int dt9812_auto_attach(struct comedi_device *dev,
/* Analog Output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table = is_unipolar ? &range_unipolar2_5 : &range_bipolar10;
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index 608aee0c3a15..1b6324c6eb29 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -218,7 +218,7 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
/* digital input */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -228,7 +228,7 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
/* digital output */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index 5a1e3c8fc01c..e1f493241cd6 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -135,7 +135,6 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0x0fff;
s->range_table = &range_fl512;
s->insn_write = fl512_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index b8975a4606ea..0979f536ed39 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -196,8 +196,8 @@ static void gsc_hpdi_drain_dma(struct comedi_device *dev, unsigned int channel)
size = devpriv->dio_count;
devpriv->dio_count -= size;
}
- cfc_write_array_to_buffer(s, devpriv->desc_dio_buffer[idx],
- size * sizeof(uint32_t));
+ comedi_buf_write_samples(s, devpriv->desc_dio_buffer[idx],
+ size);
idx++;
idx %= devpriv->num_dma_descriptors;
start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
@@ -272,7 +272,7 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
if (devpriv->dio_count == 0)
async->events |= COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -689,7 +689,7 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[0];
dev->read_subdev = s;
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_LSAMPL |
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL |
SDF_CMD_READ;
s->n_chan = 32;
s->len_chanlist = 32;
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index f4e1c1cf4178..1ea168620103 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -53,7 +53,7 @@ Configuration options: not applicable, uses PCI auto config
#define ICP_MULTI_AI 2 /* R: Analogue input data */
#define ICP_MULTI_DAC_CSR 4 /* R/W: DAC command/status register */
#define ICP_MULTI_AO 6 /* R/W: Analogue output data */
-#define ICP_MULTI_DI 8 /* R/W: Digital inouts */
+#define ICP_MULTI_DI 8 /* R/W: Digital inputs */
#define ICP_MULTI_DO 0x0A /* R/W: Digital outputs */
#define ICP_MULTI_INT_EN 0x0C /* R/W: Interrupt enable register */
#define ICP_MULTI_INT_STAT 0x0E /* R/W: Interrupt status register */
@@ -319,7 +319,7 @@ static int icp_multi_insn_bits_do(struct comedi_device *dev,
if (comedi_dio_update_state(s, data))
writew(s->state, dev->mmio + ICP_MULTI_DO);
- data[1] = readw(dev->mmio + ICP_MULTI_DI);
+ data[1] = s->state;
return insn->n;
}
@@ -495,7 +495,6 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
s->len_chanlist = 4;
s->range_table = &range_analog;
s->insn_write = icp_multi_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -512,7 +511,7 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->len_chanlist = 8;
@@ -521,7 +520,7 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[4];
s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 0xffff;
s->len_chanlist = 4;
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index cc5fd75b8bc0..1085d66935fe 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -392,7 +392,6 @@ static int ii20k_init_module(struct comedi_device *dev,
s->maxdata = 0xffff;
s->range_table = &ii20k_ao_ranges;
s->insn_write = ii20k_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 6561b00bea59..915685c1c85c 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -44,8 +44,6 @@ broken.
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
#include "../comedidev.h"
@@ -53,10 +51,7 @@ broken.
#include "8253.h"
#include "plx9052.h"
-#if 0
-/* file removed due to GPL incompatibility */
-#include "me4000_fw.h"
-#endif
+#define ME4000_FIRMWARE "me4000_firmware.bin"
/*
* ME4000 Register map and bit defines
@@ -333,27 +328,20 @@ static const struct comedi_lrange me4000_ai_range = {
}
};
-#define FIRMWARE_NOT_AVAILABLE 1
-#if FIRMWARE_NOT_AVAILABLE
-extern unsigned char *xilinx_firm;
-#endif
-
-static int xilinx_download(struct comedi_device *dev)
+static int me4000_xilinx_download(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct me4000_info *info = dev->private;
unsigned long xilinx_iobase = pci_resource_start(pcidev, 5);
- u32 value = 0;
- wait_queue_head_t queue;
- int idx = 0;
- int size = 0;
- unsigned int intcsr;
+ unsigned int file_length;
+ unsigned int val;
+ unsigned int i;
if (!xilinx_iobase)
return -ENODEV;
- init_waitqueue_head(&queue);
-
/*
* Set PLX local interrupt 2 polarity to high.
* Interrupt is thrown by init pin of xilinx.
@@ -361,61 +349,58 @@ static int xilinx_download(struct comedi_device *dev)
outl(PLX9052_INTCSR_LI2POL, info->plx_regbase + PLX9052_INTCSR);
/* Set /CS and /WRITE of the Xilinx */
- value = inl(info->plx_regbase + PLX9052_CNTRL);
- value |= PLX9052_CNTRL_UIO2_DATA;
- outl(value, info->plx_regbase + PLX9052_CNTRL);
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val |= PLX9052_CNTRL_UIO2_DATA;
+ outl(val, info->plx_regbase + PLX9052_CNTRL);
/* Init Xilinx with CS1 */
inb(xilinx_iobase + 0xC8);
/* Wait until /INIT pin is set */
udelay(20);
- intcsr = inl(info->plx_regbase + PLX9052_INTCSR);
- if (!(intcsr & PLX9052_INTCSR_LI2STAT)) {
+ val = inl(info->plx_regbase + PLX9052_INTCSR);
+ if (!(val & PLX9052_INTCSR_LI2STAT)) {
dev_err(dev->class_dev, "Can't init Xilinx\n");
return -EIO;
}
/* Reset /CS and /WRITE of the Xilinx */
- value = inl(info->plx_regbase + PLX9052_CNTRL);
- value &= ~PLX9052_CNTRL_UIO2_DATA;
- outl(value, info->plx_regbase + PLX9052_CNTRL);
- if (FIRMWARE_NOT_AVAILABLE) {
- dev_err(dev->class_dev,
- "xilinx firmware unavailable due to licensing, aborting");
- return -EIO;
- } else {
- /* Download Xilinx firmware */
- size = (xilinx_firm[0] << 24) + (xilinx_firm[1] << 16) +
- (xilinx_firm[2] << 8) + xilinx_firm[3];
- udelay(10);
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val &= ~PLX9052_CNTRL_UIO2_DATA;
+ outl(val, info->plx_regbase + PLX9052_CNTRL);
- for (idx = 0; idx < size; idx++) {
- outb(xilinx_firm[16 + idx], xilinx_iobase);
- udelay(10);
+ /* Download Xilinx firmware */
+ file_length = (((unsigned int)data[0] & 0xff) << 24) +
+ (((unsigned int)data[1] & 0xff) << 16) +
+ (((unsigned int)data[2] & 0xff) << 8) +
+ ((unsigned int)data[3] & 0xff);
+ udelay(10);
- /* Check if BUSY flag is low */
- if (inl(info->plx_regbase + PLX9052_CNTRL) & PLX9052_CNTRL_UIO1_DATA) {
- dev_err(dev->class_dev,
- "Xilinx is still busy (idx = %d)\n",
- idx);
- return -EIO;
- }
+ for (i = 0; i < file_length; i++) {
+ outb(data[16 + i], xilinx_iobase);
+ udelay(10);
+
+ /* Check if BUSY flag is low */
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ if (val & PLX9052_CNTRL_UIO1_DATA) {
+ dev_err(dev->class_dev,
+ "Xilinx is still busy (i = %d)\n", i);
+ return -EIO;
}
}
/* If done flag is high download was successful */
- if (inl(info->plx_regbase + PLX9052_CNTRL) & PLX9052_CNTRL_UIO0_DATA) {
- } else {
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ if (!(val & PLX9052_CNTRL_UIO0_DATA)) {
dev_err(dev->class_dev, "DONE flag is not set\n");
dev_err(dev->class_dev, "Download not successful\n");
return -EIO;
}
/* Set /CS and /WRITE */
- value = inl(info->plx_regbase + PLX9052_CNTRL);
- value |= PLX9052_CNTRL_UIO2_DATA;
- outl(value, info->plx_regbase + PLX9052_CNTRL);
+ val = inl(info->plx_regbase + PLX9052_CNTRL);
+ val |= PLX9052_CNTRL_UIO2_DATA;
+ outl(val, info->plx_regbase + PLX9052_CNTRL);
return 0;
}
@@ -431,7 +416,7 @@ static void me4000_reset(struct comedi_device *dev)
val |= PLX9052_CNTRL_PCI_RESET;
outl(val, info->plx_regbase + PLX9052_CNTRL);
val &= ~PLX9052_CNTRL_PCI_RESET;
- outl(val , info->plx_regbase + PLX9052_CNTRL);
+ outl(val, info->plx_regbase + PLX9052_CNTRL);
/* 0x8000 to the DACs means an output voltage of 0V */
for (chan = 0; chan < 4; chan++)
@@ -848,9 +833,6 @@ static int me4000_ai_do_cmd_test(struct comedi_device *dev,
unsigned int scan_ticks;
int err = 0;
- /* Only rounding flags are implemented */
- cmd->flags &= CMDF_ROUND_NEAREST | CMDF_ROUND_UP | CMDF_ROUND_DOWN;
-
/* Round the timer arguments */
ai_round_cmd_args(dev, s, cmd, &init_ticks, &scan_ticks, &chan_ticks);
@@ -1092,8 +1074,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
} else if ((tmp & ME4000_AI_STATUS_BIT_FF_DATA)
&& !(tmp & ME4000_AI_STATUS_BIT_HF_DATA)
&& (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
- s->async->events |= COMEDI_CB_BLOCK;
-
c = ME4000_AI_FIFO_COUNT / 2;
} else {
dev_err(dev->class_dev,
@@ -1119,7 +1099,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
lval ^= 0x8000;
- if (!comedi_buf_put(s, lval)) {
+ if (!comedi_buf_write_samples(s, &lval, 1)) {
/*
* Buffer overflow, so stop conversion
* and disable all interrupts
@@ -1128,11 +1108,6 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
ME4000_AI_CTRL_BIT_SC_IRQ);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
- s->async->events |= COMEDI_CB_OVERFLOW;
-
- dev_err(dev->class_dev, "Buffer overflow\n");
-
break;
}
}
@@ -1146,7 +1121,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
ME4000_IRQ_STATUS_BIT_SC) {
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOA;
+ s->async->events |= COMEDI_CB_EOA;
/*
* Acquisition is complete, so stop
@@ -1164,11 +1139,8 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
lval ^= 0x8000;
- if (!comedi_buf_put(s, lval)) {
- dev_err(dev->class_dev, "Buffer overflow\n");
- s->async->events |= COMEDI_CB_OVERFLOW;
+ if (!comedi_buf_write_samples(s, &lval, 1))
break;
- }
}
/* Work is done, so reset the interrupt */
@@ -1178,8 +1150,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
}
- if (s->async->events)
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1397,8 +1368,9 @@ static int me4000_auto_attach(struct comedi_device *dev,
if (!info->plx_regbase || !dev->iobase || !info->timer_regbase)
return -ENODEV;
- result = xilinx_download(dev);
- if (result)
+ result = comedi_load_firmware(dev, &pcidev->dev, ME4000_FIRMWARE,
+ me4000_xilinx_download, 0);
+ if (result < 0)
return result;
me4000_reset(dev);
@@ -1449,12 +1421,11 @@ static int me4000_auto_attach(struct comedi_device *dev,
if (thisboard->ao_nchan) {
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_COMMON | SDF_GROUND;
+ s->subdev_flags = SDF_WRITABLE | SDF_COMMON | SDF_GROUND;
s->n_chan = thisboard->ao_nchan;
s->maxdata = 0xFFFF; /* 16 bit DAC */
s->range_table = &range_bipolar10;
s->insn_write = me4000_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
result = comedi_alloc_subdev_readback(s);
if (result)
@@ -1561,3 +1532,4 @@ module_comedi_pci_driver(me4000_driver, me4000_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(ME4000_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 00eaaf8ac148..b5278c11e622 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -511,13 +511,12 @@ static int me_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
if (board->has_ao) {
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_COMMON;
+ s->subdev_flags = SDF_WRITABLE | SDF_COMMON;
s->n_chan = 4;
s->maxdata = 0x0fff;
s->len_chanlist = 4;
s->range_table = &me_ao_range;
s->insn_write = me_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -528,7 +527,7 @@ static int me_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 32;
s->maxdata = 1;
s->len_chanlist = 32;
diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
index c8d3a22c5896..af21bc180c46 100644
--- a/drivers/staging/comedi/drivers/mf6x4.c
+++ b/drivers/staging/comedi/drivers/mf6x4.c
@@ -259,7 +259,6 @@ static int mf6x4_auto_attach(struct comedi_device *dev, unsigned long context)
s->maxdata = 0x3fff; /* 14 bits DAC */
s->range_table = &range_bipolar10;
s->insn_write = mf6x4_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 4f7829010a99..ffc9e61d6cdd 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -494,13 +494,10 @@ EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
unsigned mite_dma_tcr(struct mite_channel *mite_chan)
{
struct mite_struct *mite = mite_chan->mite;
- int tcr;
int lkar;
lkar = readl(mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
- tcr = readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
-
- return tcr;
+ return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
}
EXPORT_SYMBOL_GPL(mite_dma_tcr);
@@ -542,7 +539,7 @@ int mite_sync_input_dma(struct mite_channel *mite_chan,
return 0;
comedi_buf_write_free(s, count);
- cfc_inc_scan_progress(s, count);
+ comedi_inc_scan_progress(s, count);
async->events |= COMEDI_CB_BLOCK;
return 0;
}
@@ -553,7 +550,7 @@ int mite_sync_output_dma(struct mite_channel *mite_chan,
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- u32 stop_count = cmd->stop_arg * cfc_bytes_per_scan(s);
+ u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
unsigned int old_alloc_count = async->buf_read_alloc_count;
u32 nbytes_ub, nbytes_lb;
int count;
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index f710c8e81320..8471219210b6 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -238,7 +238,6 @@ static int multiq3_attach(struct comedi_device *dev,
s->maxdata = 0xfff;
s->range_table = &range_bipolar5;
s->insn_write = multiq3_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 45fb601e4080..f99847f3999f 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -208,9 +208,8 @@ static irqreturn_t ni6527_interrupt(int irq, void *d)
return IRQ_NONE;
if (status & NI6527_STATUS_EDGE) {
- comedi_buf_put(s, 0);
- s->async->events |= COMEDI_CB_EOS;
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
}
writeb(NI6527_CLR_IRQS, dev->mmio + NI6527_CLR_REG);
@@ -238,9 +237,6 @@ static int ni6527_intr_cmdtest(struct comedi_device *dev,
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
- if (err)
- return 2;
-
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3b642861eb36..bcb326e31562 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -508,9 +508,9 @@ static irqreturn_t ni_65xx_interrupt(int irq, void *d)
writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT,
dev->mmio + NI_65XX_CLR_REG);
- comedi_buf_put(s, 0);
- s->async->events |= COMEDI_CB_EOS;
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
@@ -534,9 +534,6 @@ static int ni_65xx_intr_cmdtest(struct comedi_device *dev,
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
- if (err)
- return 2;
-
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 5b6794c8232e..1e4dd82b12ea 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -780,7 +780,7 @@ static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
struct ni_gpct *counter = s->private;
ni_tio_handle_interrupt(counter, s);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static irqreturn_t ni_660x_interrupt(int irq, void *d)
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 54721deb80cc..c42a81c0bfa1 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -228,7 +228,6 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
s->range_table = &range_bipolar10;
}
s->insn_write = ni_670x_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 72ec857d073e..69e543a0bf22 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -168,7 +168,6 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
struct comedi_cmd *cmd;
unsigned int max_points, num_points, residue, leftover;
unsigned short dpnt;
- static const int sample_size = sizeof(devpriv->dma_buffer[0]);
if (!dev->attached) {
dev_err(dev->class_dev, "premature interrupt\n");
@@ -188,14 +187,14 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
if (status & OVFL_BIT) {
dev_err(dev->class_dev, "fifo overflow\n");
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
if ((status & DMA_TC_BIT) == 0) {
dev_err(dev->class_dev,
"caught non-dma interrupt? Aborting.\n");
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -206,12 +205,12 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
clear_dma_ff(devpriv->dma);
/* figure out how many points to read */
- max_points = devpriv->dma_transfer_size / sample_size;
+ max_points = comedi_bytes_to_samples(s, devpriv->dma_transfer_size);
/* residue is the number of points left to be done on the dma
* transfer. It should always be zero at this point unless
* the stop_src is set to external triggering.
*/
- residue = get_dma_residue(devpriv->dma) / sample_size;
+ residue = comedi_bytes_to_samples(s, get_dma_residue(devpriv->dma));
num_points = max_points - residue;
if (devpriv->count < num_points && cmd->stop_src == TRIG_COUNT)
num_points = devpriv->count;
@@ -219,7 +218,8 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
/* figure out how many points will be stored next time */
leftover = 0;
if (cmd->stop_src == TRIG_NONE) {
- leftover = devpriv->dma_transfer_size / sample_size;
+ leftover = comedi_bytes_to_samples(s,
+ devpriv->dma_transfer_size);
} else if (devpriv->count > max_points) {
leftover = devpriv->count - max_points;
if (leftover > max_points)
@@ -237,7 +237,7 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
dpnt = devpriv->dma_buffer[i];
/* convert from 2's complement to unsigned coding */
dpnt ^= 0x8000;
- cfc_write_to_buffer(s, dpnt);
+ comedi_buf_write_samples(s, &dpnt, 1);
if (cmd->stop_src == TRIG_COUNT) {
if (--devpriv->count == 0) { /* end of acquisition */
async->events |= COMEDI_CB_EOA;
@@ -248,14 +248,13 @@ static irqreturn_t a2150_interrupt(int irq, void *d)
/* re-enable dma */
if (leftover) {
set_dma_addr(devpriv->dma, virt_to_bus(devpriv->dma_buffer));
- set_dma_count(devpriv->dma, leftover * sample_size);
+ set_dma_count(devpriv->dma,
+ comedi_samples_to_bytes(s, leftover));
enable_dma(devpriv->dma);
}
release_dma_lock(flags);
- async->events |= COMEDI_CB_BLOCK;
-
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
/* clear interrupt */
outw(0x00, dev->iobase + DMA_TC_CLEAR_REG);
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index 3e1ce5866147..05370a4a74a5 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -244,47 +244,31 @@ static int atao_calib_insn_write(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct atao_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int bitstring;
- unsigned int val;
- int bit;
- if (insn->n == 0)
- return 0;
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
+ unsigned int bitstring = ((chan & 0x7) << 8) | val;
+ unsigned int bits;
+ int bit;
- devpriv->caldac[chan] = data[insn->n - 1] & s->maxdata;
+ /* write the channel and last data value to the caldac */
+ /* clock the bitstring to the caldac; MSB -> LSB */
+ for (bit = 1 << 10; bit; bit >>= 1) {
+ bits = (bit & bitstring) ? ATAO_CFG2_SDATA : 0;
- /* write the channel and last data value to the caldac */
- bitstring = ((chan & 0x7) << 8) | devpriv->caldac[chan];
+ outw(bits, dev->iobase + ATAO_CFG2_REG);
+ outw(bits | ATAO_CFG2_SCLK,
+ dev->iobase + ATAO_CFG2_REG);
+ }
- /* clock the bitstring to the caldac; MSB -> LSB */
- for (bit = 1 << 10; bit; bit >>= 1) {
- val = (bit & bitstring) ? ATAO_CFG2_SDATA : 0;
+ /* strobe the caldac to load the value */
+ outw(ATAO_CFG2_CALLD(chan), dev->iobase + ATAO_CFG2_REG);
+ outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG);
- outw(val, dev->iobase + ATAO_CFG2_REG);
- outw(val | ATAO_CFG2_SCLK, dev->iobase + ATAO_CFG2_REG);
+ s->readback[chan] = val;
}
- /* strobe the caldac to load the value */
- outw(ATAO_CFG2_CALLD(chan), dev->iobase + ATAO_CFG2_REG);
- outw(ATAO_CFG2_CALLD_NOP, dev->iobase + ATAO_CFG2_REG);
-
- return insn->n;
-}
-
-static int atao_calib_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct atao_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->caldac[chan];
-
return insn->n;
}
@@ -344,7 +328,6 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0x0fff;
s->range_table = it->options[3] ? &range_unipolar10 : &range_bipolar10;
s->insn_write = atao_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -366,9 +349,12 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = (board->n_ao_chans * 2) + 1;
s->maxdata = 0xff;
- s->insn_read = atao_calib_insn_read;
s->insn_write = atao_calib_insn_write;
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
/* EEPROM subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_UNUSED;
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index fc3c19de7005..c484c89c94b5 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -217,10 +217,12 @@ static irqreturn_t atmio16d_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
+ unsigned short val;
- comedi_buf_put(s, inw(dev->iobase + AD_FIFO_REG));
+ val = inw(dev->iobase + AD_FIFO_REG);
+ comedi_buf_write_samples(s, &val, 1);
+ comedi_handle_events(dev, s);
- comedi_event(dev, s);
return IRQ_HANDLED;
}
@@ -298,7 +300,6 @@ static int atmio16d_ai_cmd(struct comedi_device *dev,
* It is still uber-experimental */
reset_counters(dev);
- s->async->cur_chan = 0;
/* check if scanning multiple channels */
if (cmd->chanlist_len < 2) {
@@ -691,7 +692,6 @@ static int atmio16d_attach(struct comedi_device *dev,
break;
}
s->insn_write = atmio16d_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index f6e5cd15a409..ac2c01f9dfdc 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -37,8 +37,6 @@ struct labpc_boardinfo {
struct labpc_private {
/* number of data points left to be taken */
unsigned long long count;
- /* software copy of analog output values */
- unsigned int ao_value[NUM_AO_CHAN];
/* software copys of bits written to command registers */
unsigned int cmd1;
unsigned int cmd2;
@@ -70,10 +68,6 @@ struct labpc_private {
unsigned int dma_transfer_size;
/* we are using dma/fifo-half-full/etc. */
enum transfer_type current_transfer;
- /* stores contents of board's eeprom */
- unsigned int eeprom_data[EEPROM_SIZE];
- /* stores settings of calibration dacs */
- unsigned int caldac[16];
/*
* function pointers so we can use inb/outb or readb/writeb as
* appropriate
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index 35bc2c25ddfb..d89d5852aeea 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -818,7 +818,7 @@ static int labpc_drain_fifo(struct comedi_device *dev)
devpriv->count--;
}
data = labpc_read_adc_fifo(dev);
- cfc_write_to_buffer(dev->read_subdev, data);
+ comedi_buf_write_samples(dev->read_subdev, &data, 1);
devpriv->stat1 = devpriv->read_byte(dev, STAT1_REG);
}
if (i == timeout) {
@@ -876,7 +876,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* clear error interrupt */
devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
dev_err(dev->class_dev, "overrun\n");
return IRQ_HANDLED;
}
@@ -896,7 +896,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* clear error interrupt */
devpriv->write_byte(dev, 0x1, ADC_FIFO_CLEAR_REG);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
dev_err(dev->class_dev, "overflow\n");
return IRQ_HANDLED;
}
@@ -914,10 +914,22 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
async->events |= COMEDI_CB_EOA;
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
+static void labpc_ao_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan, unsigned int val)
+{
+ struct labpc_private *devpriv = dev->private;
+
+ devpriv->write_byte(dev, val & 0xff, DAC_LSB_REG(chan));
+ devpriv->write_byte(dev, (val >> 8) & 0xff, DAC_MSB_REG(chan));
+
+ s->readback[chan] = val;
+}
+
static int labpc_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -927,7 +939,6 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
struct labpc_private *devpriv = dev->private;
int channel, range;
unsigned long flags;
- int lsb, msb;
channel = CR_CHAN(insn->chanspec);
@@ -950,25 +961,7 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
devpriv->write_byte(dev, devpriv->cmd6, CMD6_REG);
}
/* send data */
- lsb = data[0] & 0xff;
- msb = (data[0] >> 8) & 0xff;
- devpriv->write_byte(dev, lsb, DAC_LSB_REG(channel));
- devpriv->write_byte(dev, msb, DAC_MSB_REG(channel));
-
- /* remember value for readback */
- devpriv->ao_value[channel] = data[0];
-
- return 1;
-}
-
-static int labpc_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct labpc_private *devpriv = dev->private;
-
- data[0] = devpriv->ao_value[CR_CHAN(insn->chanspec)];
+ labpc_ao_write(dev, s, channel, data[0]);
return 1;
}
@@ -1085,29 +1078,13 @@ static unsigned int labpc_eeprom_read_status(struct comedi_device *dev)
return value;
}
-static int labpc_eeprom_write(struct comedi_device *dev,
- unsigned int address, unsigned int value)
+static void labpc_eeprom_write(struct comedi_device *dev,
+ unsigned int address, unsigned int value)
{
struct labpc_private *devpriv = dev->private;
const int write_enable_instruction = 0x6;
const int write_instruction = 0x2;
const int write_length = 8; /* 8 bit write lengths to eeprom */
- const int write_in_progress_bit = 0x1;
- const int timeout = 10000;
- int i;
-
- /* make sure there isn't already a write in progress */
- for (i = 0; i < timeout; i++) {
- if ((labpc_eeprom_read_status(dev) & write_in_progress_bit) ==
- 0)
- break;
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "eeprom write timed out\n");
- return -ETIME;
- }
- /* update software copy of eeprom */
- devpriv->eeprom_data[address] = value;
/* enable read/write to eeprom */
devpriv->cmd5 &= ~CMD5_EEPROMCS;
@@ -1140,8 +1117,6 @@ static int labpc_eeprom_write(struct comedi_device *dev,
devpriv->cmd5 &= ~(CMD5_EEPROMCS | CMD5_WRTPRT);
udelay(1);
devpriv->write_byte(dev, devpriv->cmd5, CMD5_REG);
-
- return 0;
}
/* writes to 8 bit calibration dacs */
@@ -1150,10 +1125,6 @@ static void write_caldac(struct comedi_device *dev, unsigned int channel,
{
struct labpc_private *devpriv = dev->private;
- if (value == devpriv->caldac[channel])
- return;
- devpriv->caldac[channel] = value;
-
/* clear caldac load bit and make sure we don't write to eeprom */
devpriv->cmd5 &= ~(CMD5_CALDACLD | CMD5_EEPROMCS | CMD5_WRTPRT);
udelay(1);
@@ -1184,25 +1155,30 @@ static int labpc_calib_insn_write(struct comedi_device *dev,
* Only write the last data value to the caldac. Preceding
* data would be overwritten anyway.
*/
- if (insn->n > 0)
- write_caldac(dev, chan, data[insn->n - 1]);
+ if (insn->n > 0) {
+ unsigned int val = data[insn->n - 1];
+
+ if (s->readback[chan] != val) {
+ write_caldac(dev, chan, val);
+ s->readback[chan] = val;
+ }
+ }
return insn->n;
}
-static int labpc_calib_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int labpc_eeprom_ready(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- struct labpc_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
-
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->caldac[chan];
+ unsigned int status;
- return insn->n;
+ /* make sure there isn't already a write in progress */
+ status = labpc_eeprom_read_status(dev);
+ if ((status & 0x1) == 0)
+ return 0;
+ return -EBUSY;
}
static int labpc_eeprom_insn_write(struct comedi_device *dev,
@@ -1222,25 +1198,15 @@ static int labpc_eeprom_insn_write(struct comedi_device *dev,
* data would be overwritten anyway.
*/
if (insn->n > 0) {
- ret = labpc_eeprom_write(dev, chan, data[insn->n - 1]);
+ unsigned int val = data[insn->n - 1];
+
+ ret = comedi_timeout(dev, s, insn, labpc_eeprom_ready, 0);
if (ret)
return ret;
- }
-
- return insn->n;
-}
-
-static int labpc_eeprom_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct labpc_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- int i;
- for (i = 0; i < insn->n; i++)
- data[i] = devpriv->eeprom_data[chan];
+ labpc_eeprom_write(dev, chan, val);
+ s->readback[chan] = val;
+ }
return insn->n;
}
@@ -1309,19 +1275,15 @@ int labpc_common_attach(struct comedi_device *dev,
s->n_chan = NUM_AO_CHAN;
s->maxdata = 0x0fff;
s->range_table = &range_labpc_ao;
- s->insn_read = labpc_ao_insn_read;
s->insn_write = labpc_ao_insn_write;
- /* initialize analog outputs to a known value */
- for (i = 0; i < s->n_chan; i++) {
- short lsb, msb;
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
- devpriv->ao_value[i] = s->maxdata / 2;
- lsb = devpriv->ao_value[i] & 0xff;
- msb = (devpriv->ao_value[i] >> 8) & 0xff;
- devpriv->write_byte(dev, lsb, DAC_LSB_REG(i));
- devpriv->write_byte(dev, msb, DAC_MSB_REG(i));
- }
+ /* initialize analog outputs to a known value */
+ for (i = 0; i < s->n_chan; i++)
+ labpc_ao_write(dev, s, i, s->maxdata / 2);
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -1342,11 +1304,16 @@ int labpc_common_attach(struct comedi_device *dev,
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 16;
s->maxdata = 0xff;
- s->insn_read = labpc_calib_insn_read;
s->insn_write = labpc_calib_insn_write;
- for (i = 0; i < s->n_chan; i++)
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < s->n_chan; i++) {
write_caldac(dev, i, s->maxdata / 2);
+ s->readback[i] = s->maxdata / 2;
+ }
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -1358,11 +1325,14 @@ int labpc_common_attach(struct comedi_device *dev,
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = EEPROM_SIZE;
s->maxdata = 0xff;
- s->insn_read = labpc_eeprom_insn_read;
s->insn_write = labpc_eeprom_insn_write;
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
for (i = 0; i < s->n_chan; i++)
- devpriv->eeprom_data[i] = labpc_eeprom_read(dev, i);
+ s->readback[i] = labpc_eeprom_read(dev, i);
} else {
s->type = COMEDI_SUBD_UNUSED;
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.c b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
index 967202e0635e..6d386050e59d 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
@@ -91,7 +91,6 @@ void labpc_drain_dma(struct comedi_device *dev)
int status;
unsigned long flags;
unsigned int max_points, num_points, residue, leftover;
- int i;
status = devpriv->stat1;
@@ -122,9 +121,7 @@ void labpc_drain_dma(struct comedi_device *dev)
leftover = max_points;
}
- /* write data to comedi buffer */
- for (i = 0; i < num_points; i++)
- cfc_write_to_buffer(s, devpriv->dma_buffer[i]);
+ comedi_buf_write_samples(s, devpriv->dma_buffer, num_points);
if (cmd->stop_src == TRIG_COUNT)
devpriv->count -= num_points;
@@ -133,8 +130,6 @@ void labpc_drain_dma(struct comedi_device *dev)
set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
set_dma_count(devpriv->dma_chan, leftover * sample_size);
release_dma_lock(flags);
-
- async->events |= COMEDI_CB_BLOCK;
}
EXPORT_SYMBOL_GPL(labpc_drain_dma);
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 320b080149b6..11e70173712d 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -686,13 +686,12 @@ static inline void ni_set_ai_dma_channel(struct comedi_device *dev, int channel)
{
unsigned bitfield;
- if (channel >= 0) {
+ if (channel >= 0)
bitfield =
(ni_stc_dma_channel_select_bitfield(channel) <<
AI_DMA_Select_Shift) & AI_DMA_Select_Mask;
- } else {
+ else
bitfield = 0;
- }
ni_set_bitfield(dev, AI_AO_Select, AI_DMA_Select_Mask, bitfield);
}
@@ -701,13 +700,12 @@ static inline void ni_set_ao_dma_channel(struct comedi_device *dev, int channel)
{
unsigned bitfield;
- if (channel >= 0) {
+ if (channel >= 0)
bitfield =
(ni_stc_dma_channel_select_bitfield(channel) <<
AO_DMA_Select_Shift) & AO_DMA_Select_Mask;
- } else {
+ else
bitfield = 0;
- }
ni_set_bitfield(dev, AI_AO_Select, AO_DMA_Select_Mask, bitfield);
}
@@ -1127,31 +1125,18 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
struct comedi_subdevice *s, int n)
{
struct ni_private *devpriv = dev->private;
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- int chan;
int i;
unsigned short d;
u32 packed_data;
- int range;
- int err = 1;
- chan = async->cur_chan;
for (i = 0; i < n; i++) {
- err &= comedi_buf_get(s, &d);
- if (err == 0)
- break;
-
- range = CR_RANGE(cmd->chanlist[chan]);
+ comedi_buf_read_samples(s, &d, 1);
if (devpriv->is_6xxx) {
packed_data = d & 0xffff;
/* 6711 only has 16 bit wide ao fifo */
if (!devpriv->is_6711) {
- err &= comedi_buf_get(s, &d);
- if (err == 0)
- break;
- chan++;
+ comedi_buf_read_samples(s, &d, 1);
i++;
packed_data |= (d << 16) & 0xffff0000;
}
@@ -1159,12 +1144,7 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
} else {
ni_writew(dev, d, DAC_FIFO_Data);
}
- chan++;
- chan %= cmd->chanlist_len;
}
- async->cur_chan = chan;
- if (err == 0)
- async->events |= COMEDI_CB_OVERFLOW;
}
/*
@@ -1187,21 +1167,20 @@ static int ni_ao_fifo_half_empty(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct ni_board_struct *board = dev->board_ptr;
- int n;
+ unsigned int nbytes;
+ unsigned int nsamples;
- n = comedi_buf_read_n_available(s);
- if (n == 0) {
+ nbytes = comedi_buf_read_n_available(s);
+ if (nbytes == 0) {
s->async->events |= COMEDI_CB_OVERFLOW;
return 0;
}
- n /= sizeof(short);
- if (n > board->ao_fifo_depth / 2)
- n = board->ao_fifo_depth / 2;
-
- ni_ao_fifo_load(dev, s, n);
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ if (nsamples > board->ao_fifo_depth / 2)
+ nsamples = board->ao_fifo_depth / 2;
- s->async->events |= COMEDI_CB_BLOCK;
+ ni_ao_fifo_load(dev, s, nsamples);
return 1;
}
@@ -1211,7 +1190,8 @@ static int ni_ao_prep_fifo(struct comedi_device *dev,
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
- int n;
+ unsigned int nbytes;
+ unsigned int nsamples;
/* reset fifo */
ni_stc_writew(dev, 1, DAC_FIFO_Clear);
@@ -1219,17 +1199,17 @@ static int ni_ao_prep_fifo(struct comedi_device *dev,
ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
/* load some data */
- n = comedi_buf_read_n_available(s);
- if (n == 0)
+ nbytes = comedi_buf_read_n_available(s);
+ if (nbytes == 0)
return 0;
- n /= sizeof(short);
- if (n > board->ao_fifo_depth)
- n = board->ao_fifo_depth;
+ nsamples = comedi_bytes_to_samples(s, nbytes);
+ if (nsamples > board->ao_fifo_depth)
+ nsamples = board->ao_fifo_depth;
- ni_ao_fifo_load(dev, s, n);
+ ni_ao_fifo_load(dev, s, nsamples);
- return n;
+ return nsamples;
}
static void ni_ai_fifo_read(struct comedi_device *dev,
@@ -1237,44 +1217,42 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
{
struct ni_private *devpriv = dev->private;
struct comedi_async *async = s->async;
+ u32 dl;
+ unsigned short data;
int i;
if (devpriv->is_611x) {
- unsigned short data[2];
- u32 dl;
-
for (i = 0; i < n / 2; i++) {
dl = ni_readl(dev, ADC_FIFO_Data_611x);
/* This may get the hi/lo data in the wrong order */
- data[0] = (dl >> 16) & 0xffff;
- data[1] = dl & 0xffff;
- cfc_write_array_to_buffer(s, data, sizeof(data));
+ data = (dl >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
/* Check if there's a single sample stuck in the FIFO */
if (n % 2) {
dl = ni_readl(dev, ADC_FIFO_Data_611x);
- data[0] = dl & 0xffff;
- cfc_write_to_buffer(s, data[0]);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
} else if (devpriv->is_6143) {
- unsigned short data[2];
- u32 dl;
-
/* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */
for (i = 0; i < n / 2; i++) {
dl = ni_readl(dev, AIFIFO_Data_6143);
- data[0] = (dl >> 16) & 0xffff;
- data[1] = dl & 0xffff;
- cfc_write_array_to_buffer(s, data, sizeof(data));
+ data = (dl >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
if (n % 2) {
/* Assume there is a single sample stuck in the FIFO */
/* Get stranded sample into FIFO */
ni_writel(dev, 0x01, AIFIFO_Control_6143);
dl = ni_readl(dev, AIFIFO_Data_6143);
- data[0] = (dl >> 16) & 0xffff;
- cfc_write_to_buffer(s, data[0]);
+ data = (dl >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
} else {
if (n > sizeof(devpriv->ai_fifo_buffer) /
@@ -1288,9 +1266,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
devpriv->ai_fifo_buffer[i] =
ni_readw(dev, ADC_FIFO_Data_Register);
}
- cfc_write_array_to_buffer(s, devpriv->ai_fifo_buffer,
- n *
- sizeof(devpriv->ai_fifo_buffer[0]));
+ comedi_buf_write_samples(s, devpriv->ai_fifo_buffer, n);
}
}
@@ -1313,8 +1289,8 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- unsigned short data[2];
u32 dl;
+ unsigned short data;
unsigned short fifo_empty;
int i;
@@ -1324,9 +1300,10 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
dl = ni_readl(dev, ADC_FIFO_Data_611x);
/* This may get the hi/lo data in the wrong order */
- data[0] = (dl >> 16);
- data[1] = (dl & 0xffff);
- cfc_write_array_to_buffer(s, data, sizeof(data));
+ data = dl >> 16;
+ comedi_buf_write_samples(s, &data, 1);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
} else if (devpriv->is_6143) {
i = 0;
@@ -1334,9 +1311,10 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
dl = ni_readl(dev, AIFIFO_Data_6143);
/* This may get the hi/lo data in the wrong order */
- data[0] = (dl >> 16);
- data[1] = (dl & 0xffff);
- cfc_write_array_to_buffer(s, data, sizeof(data));
+ data = dl >> 16;
+ comedi_buf_write_samples(s, &data, 1);
+ data = dl & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
i += 2;
}
/* Check if stranded sample is present */
@@ -1344,8 +1322,8 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
/* Get stranded sample into FIFO */
ni_writel(dev, 0x01, AIFIFO_Control_6143);
dl = ni_readl(dev, AIFIFO_Data_6143);
- data[0] = (dl >> 16) & 0xffff;
- cfc_write_to_buffer(s, data[0]);
+ data = (dl >> 16) & 0xffff;
+ comedi_buf_write_samples(s, &data, 1);
}
} else {
@@ -1364,10 +1342,7 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
devpriv->ai_fifo_buffer[i] =
ni_readw(dev, ADC_FIFO_Data_Register);
}
- cfc_write_array_to_buffer(s, devpriv->ai_fifo_buffer,
- i *
- sizeof(devpriv->
- ai_fifo_buffer[0]));
+ comedi_buf_write_samples(s, devpriv->ai_fifo_buffer, i);
}
}
}
@@ -1386,7 +1361,7 @@ static void get_last_sample_611x(struct comedi_device *dev)
if (ni_readb(dev, XXX_Status) & 0x80) {
dl = ni_readl(dev, ADC_FIFO_Data_611x);
data = (dl & 0xffff);
- cfc_write_to_buffer(s, data);
+ comedi_buf_write_samples(s, &data, 1);
}
}
@@ -1408,7 +1383,7 @@ static void get_last_sample_6143(struct comedi_device *dev)
/* This may get the hi/lo data in the wrong order */
data = (dl >> 16) & 0xffff;
- cfc_write_to_buffer(s, data);
+ comedi_buf_write_samples(s, &data, 1);
}
}
@@ -1462,7 +1437,7 @@ static void handle_gpct_interrupt(struct comedi_device *dev,
ni_tio_handle_interrupt(&devpriv->counter_dev->counters[counter_index],
s);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
#endif
}
@@ -1518,7 +1493,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (comedi_is_subdevice_running(s)) {
s->async->events |=
COMEDI_CB_ERROR | COMEDI_CB_EOA;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
return;
}
@@ -1533,7 +1508,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & (AI_Overrun_St | AI_Overflow_St))
s->async->events |= COMEDI_CB_OVERFLOW;
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return;
}
if (status & AI_SC_TC_St) {
@@ -1559,7 +1534,7 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if ((status & AI_STOP_St))
ni_handle_eos(dev, s);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1635,7 +1610,7 @@ static void handle_b_interrupt(struct comedi_device *dev,
}
#endif
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -1645,12 +1620,12 @@ static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
struct ni_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int length = num_bytes / bytes_per_sample(s);
+ unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
unsigned short *array = data;
unsigned int *larray = data;
unsigned int i;
- for (i = 0; i < length; i++) {
+ for (i = 0; i < nsamples; i++) {
#ifdef PCIDMA
if (s->subdev_flags & SDF_LSAMPL)
larray[i] = le32_to_cpu(larray[i]);
@@ -2253,9 +2228,6 @@ static int ni_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
/* Step 1 : check if triggers are trivially valid */
- if ((cmd->flags & CMDF_WRITE))
- cmd->flags &= ~CMDF_WRITE;
-
err |= cfc_check_trigger_src(&cmd->start_src,
TRIG_NOW | TRIG_INT | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
@@ -2762,11 +2734,11 @@ static void ni_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int chan_index)
{
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int length = num_bytes / bytes_per_sample(s);
+ unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
unsigned short *array = data;
unsigned int i;
- for (i = 0; i < length; i++) {
+ for (i = 0; i < nsamples; i++) {
unsigned int range = CR_RANGE(cmd->chanlist[chan_index]);
unsigned short val = array[i];
@@ -2981,12 +2953,15 @@ static int ni_ao_insn_config(struct comedi_device *dev,
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
+ unsigned int nbytes;
switch (data[0]) {
case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
switch (data[1]) {
case COMEDI_OUTPUT:
- data[2] = 1 + board->ao_fifo_depth * sizeof(short);
+ nbytes = comedi_samples_to_bytes(s,
+ board->ao_fifo_depth);
+ data[2] = 1 + nbytes;
if (devpriv->mite)
data[2] += devpriv->mite->fifo_size;
break;
@@ -3288,9 +3263,6 @@ static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
/* Step 1 : check if triggers are trivially valid */
- if ((cmd->flags & CMDF_WRITE) == 0)
- cmd->flags |= CMDF_WRITE;
-
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_INT | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
TRIG_TIMER | TRIG_EXT);
@@ -3515,9 +3487,6 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
- if (err)
- return 2;
-
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
@@ -3693,7 +3662,7 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
M_Offset_CDIO_Command);
/* s->async->events |= COMEDI_CB_EOA; */
}
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
static int ni_serial_hw_readwrite8(struct comedi_device *dev,
@@ -3976,7 +3945,7 @@ static unsigned ni_gpct_to_stc_register(enum ni_gpct_register reg)
stc_register = Interrupt_B_Enable_Register;
break;
default:
- printk("%s: unhandled register 0x%x in switch.\n",
+ pr_err("%s: unhandled register 0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
@@ -5472,7 +5441,6 @@ static int ni_E_init(struct comedi_device *dev,
s->range_table = board->ao_range_table;
s->insn_config = ni_ao_insn_config;
s->insn_write = ni_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 5252cba82e5e..db7e8aac67b5 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -384,11 +384,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct mite_struct *mite = devpriv->mite;
-
- /* int i, j; */
- unsigned int auxdata = 0;
- unsigned short data1 = 0;
- unsigned short data2 = 0;
+ unsigned int auxdata;
int flags;
int status;
int work = 0;
@@ -451,13 +447,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
goto out;
}
auxdata = readl(dev->mmio + Group_1_FIFO);
- data1 = auxdata & 0xffff;
- data2 = (auxdata & 0xffff0000) >> 16;
- comedi_buf_put(s, data1);
- comedi_buf_put(s, data2);
+ comedi_buf_write_samples(s, &auxdata, 1);
flags = readb(dev->mmio + Group_1_Flags);
}
- async->events |= COMEDI_CB_BLOCK;
}
if (flags & CountExpired) {
@@ -485,7 +477,7 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
}
out:
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
#if 0
if (!tag)
writeb(0x03, dev->mmio + Master_DMA_And_Interrupt_Control);
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 29efce30eb7f..bd69c3f0acdc 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -334,7 +334,7 @@ static inline unsigned RTSI_Output_Bit(unsigned channel, int is_mseries)
max_channel = 6;
}
if (channel > max_channel) {
- printk("%s: bug, invalid RTSI_channel=%i\n", __func__, channel);
+ pr_err("%s: bug, invalid RTSI_channel=%i\n", __func__, channel);
return 0;
}
return 1 << (base_bit_shift + channel);
@@ -1090,7 +1090,7 @@ static inline int M_Offset_Static_AI_Control(int i)
0x263,
};
if (((unsigned)i) >= ARRAY_SIZE(offset)) {
- printk("%s: invalid channel=%i\n", __func__, i);
+ pr_err("%s: invalid channel=%i\n", __func__, i);
return offset[0];
}
return offset[i];
@@ -1105,7 +1105,7 @@ static inline int M_Offset_AO_Reference_Attenuation(int channel)
0x267
};
if (((unsigned)channel) >= ARRAY_SIZE(offset)) {
- printk("%s: invalid channel=%i\n", __func__, channel);
+ pr_err("%s: invalid channel=%i\n", __func__, channel);
return offset[0];
}
return offset[channel];
@@ -1114,7 +1114,7 @@ static inline int M_Offset_AO_Reference_Attenuation(int channel)
static inline unsigned M_Offset_PFI_Output_Select(unsigned n)
{
if (n < 1 || n > NUM_PFI_OUTPUT_SELECT_REGS) {
- printk("%s: invalid pfi output select register=%i\n",
+ pr_err("%s: invalid pfi output select register=%i\n",
__func__, n);
return M_Offset_PFI_Output_Select_1;
}
@@ -1171,7 +1171,7 @@ static inline unsigned MSeries_PLL_In_Source_Select_RTSI_Bits(unsigned
RTSI_channel)
{
if (RTSI_channel > 7) {
- printk("%s: bug, invalid RTSI_channel=%i\n", __func__,
+ pr_err("%s: bug, invalid RTSI_channel=%i\n", __func__,
RTSI_channel);
return 0;
}
@@ -1192,7 +1192,7 @@ static inline unsigned MSeries_PLL_Divisor_Bits(unsigned divisor)
{
static const unsigned max_divisor = 0x10;
if (divisor < 1 || divisor > max_divisor) {
- printk("%s: bug, invalid divisor=%i\n", __func__, divisor);
+ pr_err("%s: bug, invalid divisor=%i\n", __func__, divisor);
return 0;
}
return (divisor & 0xf) << 8;
@@ -1202,7 +1202,7 @@ static inline unsigned MSeries_PLL_Multiplier_Bits(unsigned multiplier)
{
static const unsigned max_multiplier = 0x100;
if (multiplier < 1 || multiplier > max_multiplier) {
- printk("%s: bug, invalid multiplier=%i\n", __func__,
+ pr_err("%s: bug, invalid multiplier=%i\n", __func__,
multiplier);
return 0;
}
@@ -1464,7 +1464,7 @@ struct ni_private {
unsigned short ai_fifo_buffer[0x2000];
uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE];
- uint32_t serial_number;
+ __be32 serial_number;
struct mite_struct *mite;
struct mite_channel *ai_mite_chan;
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 26e7291c4a51..6037bec77ef1 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -371,9 +371,8 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
of gate interrupt via dma read/write
and report bogus gate errors */
if (counter->counter_dev->variant !=
- ni_gpct_variant_660x) {
+ ni_gpct_variant_660x)
*gate_error = 1;
- }
}
}
if (gxx_status & GI_TC_ERROR(cidx)) {
@@ -450,11 +449,10 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
return;
}
gpct_mite_status = mite_get_status(counter->mite_chan);
- if (gpct_mite_status & CHSR_LINKC) {
+ if (gpct_mite_status & CHSR_LINKC)
writel(CHOR_CLRLC,
counter->mite_chan->mite->mite_io_addr +
MITE_CHOR(counter->mite_chan->channel));
- }
mite_sync_input_dma(counter->mite_chan, s);
spin_unlock_irqrestore(&counter->lock, flags);
}
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index df7ada8611f4..3b5a1b90366d 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -561,7 +561,7 @@ static int ni6501_auto_attach(struct comedi_device *dev,
/* Counter subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_LSAMPL;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
s->n_chan = 1;
s->maxdata = 0xffffffff;
s->insn_read = ni6501_cnt_insn_read;
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index 47f4887108a7..938aebc8e0ea 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -156,7 +156,6 @@ static const struct pcl711_board boardtypes[] = {
};
struct pcl711_private {
- unsigned int ntrig;
unsigned int divisor1;
unsigned int divisor2;
};
@@ -199,7 +198,6 @@ static int pcl711_ai_cancel(struct comedi_device *dev,
static irqreturn_t pcl711_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
- struct pcl711_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int data;
@@ -213,16 +211,14 @@ static irqreturn_t pcl711_interrupt(int irq, void *d)
outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
- if (comedi_buf_put(s, data) == 0) {
- s->async->events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
- } else {
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- if (cmd->stop_src == TRIG_COUNT && !(--devpriv->ntrig)) {
- pcl711_ai_set_mode(dev, PCL711_MODE_SOFTTRIG);
- s->async->events |= COMEDI_CB_EOA;
- }
- }
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &data, 1);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
+
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
@@ -373,14 +369,10 @@ static void pcl711_ai_load_counters(struct comedi_device *dev)
static int pcl711_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct pcl711_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
pcl711_set_changain(dev, s, cmd->chanlist[0]);
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ntrig = cmd->stop_arg;
-
if (cmd->scan_begin_src == TRIG_TIMER) {
pcl711_ai_load_counters(dev);
outb(PCL711_INT_STAT_CLR, dev->iobase + PCL711_INT_STAT_REG);
@@ -505,7 +497,6 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xfff;
s->range_table = &range_bipolar5;
s->insn_write = pcl711_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index dc179bd02dfd..86f713fdf1d0 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -235,9 +235,8 @@ static irqreturn_t pcl726_interrupt(int irq, void *d)
if (devpriv->cmd_running) {
pcl726_intr_cancel(dev, s);
- comedi_buf_put(s, 0);
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- comedi_event(dev, s);
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
}
return IRQ_HANDLED;
@@ -377,7 +376,6 @@ static int pcl726_attach(struct comedi_device *dev,
s->maxdata = 0x0fff;
s->range_table_list = devpriv->rangelist;
s->insn_write = pcl726_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index fd5ea6e01619..ac243ca5e0f8 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -512,7 +512,6 @@ struct pcl812_private {
unsigned int last_ai_chanspec;
unsigned char mode_reg_int; /* there is stored INT number for some card */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
- unsigned int ai_act_scan; /* how many scans we finished */
unsigned int dmapages;
unsigned int hwdmasize;
unsigned long dmabuf[2]; /* PTR to DMA buf */
@@ -556,8 +555,8 @@ static void pcl812_ai_setup_dma(struct comedi_device *dev,
/* we use EOS, so adapt DMA buffer to one scan */
if (devpriv->ai_eos) {
- devpriv->dmabytestomove[0] = cfc_bytes_per_scan(s);
- devpriv->dmabytestomove[1] = cfc_bytes_per_scan(s);
+ devpriv->dmabytestomove[0] = comedi_bytes_per_scan(s);
+ devpriv->dmabytestomove[1] = comedi_bytes_per_scan(s);
devpriv->dma_runs_to_end = 1;
} else {
devpriv->dmabytestomove[0] = devpriv->hwdmasize;
@@ -572,7 +571,7 @@ static void pcl812_ai_setup_dma(struct comedi_device *dev,
devpriv->dma_runs_to_end = 1;
} else {
/* how many samples we must transfer? */
- bytes = cmd->stop_arg * cfc_bytes_per_scan(s);
+ bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
/* how many DMA pages we must fill */
devpriv->dma_runs_to_end =
@@ -807,9 +806,7 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_dma = 0;
}
- devpriv->ai_act_scan = 0;
devpriv->ai_poll_ptr = 0;
- s->async->cur_chan = 0;
/* don't we want wake up every scan? */
if (cmd->flags & CMDF_WAKE_EOS) {
@@ -841,21 +838,10 @@ static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static bool pcl812_ai_next_chan(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcl812_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- s->async->events |= COMEDI_CB_BLOCK;
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- s->async->events |= COMEDI_CB_EOS;
- }
-
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ai_act_scan >= cmd->stop_arg) {
- /* all data sampled */
+ s->async->scans_done >= cmd->stop_arg) {
s->async->events |= COMEDI_CB_EOA;
return false;
}
@@ -867,7 +853,9 @@ static void pcl812_handle_eoc(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int chan = s->async->cur_chan;
unsigned int next_chan;
+ unsigned short val;
if (pcl812_ai_eoc(dev, s, NULL, 0)) {
dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
@@ -875,13 +863,12 @@ static void pcl812_handle_eoc(struct comedi_device *dev,
return;
}
- comedi_buf_put(s, pcl812_ai_get_sample(dev, s));
+ val = pcl812_ai_get_sample(dev, s);
+ comedi_buf_write_samples(s, &val, 1);
/* Set up next channel. Added by abbotti 2010-01-20, but untested. */
- next_chan = s->async->cur_chan + 1;
- if (next_chan >= cmd->chanlist_len)
- next_chan = 0;
- if (cmd->chanlist[s->async->cur_chan] != cmd->chanlist[next_chan])
+ next_chan = s->async->cur_chan;
+ if (cmd->chanlist[chan] != cmd->chanlist[next_chan])
pcl812_ai_set_chan_range(dev, cmd->chanlist[next_chan], 0);
pcl812_ai_next_chan(dev, s);
@@ -893,9 +880,11 @@ static void transfer_from_dma_buf(struct comedi_device *dev,
unsigned int bufptr, unsigned int len)
{
unsigned int i;
+ unsigned short val;
for (i = len; i; i--) {
- comedi_buf_put(s, ptr[bufptr++]);
+ val = ptr[bufptr++];
+ comedi_buf_write_samples(s, &val, 1);
if (!pcl812_ai_next_chan(dev, s))
break;
@@ -939,7 +928,7 @@ static irqreturn_t pcl812_interrupt(int irq, void *d)
pcl812_ai_clear_eoc(dev);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -1335,7 +1324,6 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
break;
}
s->insn_write = pcl812_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index aa6487132017..73deb4bd5c93 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -122,7 +122,6 @@ struct pcl816_private {
int next_dma_buf; /* which DMA buffer will be used next round */
long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
- int ai_act_scan; /* how many scans we finished */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
unsigned int divisor1;
unsigned int divisor2;
@@ -160,7 +159,7 @@ static void pcl816_ai_setup_dma(struct comedi_device *dev,
bytes = devpriv->hwdmasize;
if (cmd->stop_src == TRIG_COUNT) {
/* how many */
- bytes = cmd->stop_arg * cfc_bytes_per_scan(s);
+ bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
/* how many DMA pages we must fill */
devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
@@ -286,21 +285,10 @@ static int pcl816_ai_eoc(struct comedi_device *dev,
static bool pcl816_ai_next_chan(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcl816_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- s->async->events |= COMEDI_CB_BLOCK;
-
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan++;
- s->async->events |= COMEDI_CB_EOS;
- }
-
if (cmd->stop_src == TRIG_COUNT &&
- devpriv->ai_act_scan >= cmd->stop_arg) {
- /* all data sampled */
+ s->async->scans_done >= cmd->stop_arg) {
s->async->events |= COMEDI_CB_EOA;
return false;
}
@@ -313,10 +301,12 @@ static void transfer_from_dma_buf(struct comedi_device *dev,
unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
+ unsigned short val;
int i;
for (i = 0; i < len; i++) {
- comedi_buf_put(s, ptr[bufptr++]);
+ val = ptr[bufptr++];
+ comedi_buf_write_samples(s, &val, 1);
if (!pcl816_ai_next_chan(dev, s))
return;
@@ -355,7 +345,7 @@ static irqreturn_t pcl816_interrupt(int irq, void *d)
pcl816_ai_clear_eoc(dev);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -508,8 +498,6 @@ static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
pcl816_ai_setup_chanlist(dev, cmd->chanlist, seglen);
udelay(1);
- devpriv->ai_act_scan = 0;
- s->async->cur_chan = 0;
devpriv->ai_cmd_running = 1;
devpriv->ai_poll_ptr = 0;
devpriv->ai_cmd_canceled = 0;
@@ -566,7 +554,7 @@ static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_poll_ptr = top1; /* new buffer position */
spin_unlock_irqrestore(&dev->spinlock, flags);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return comedi_buf_n_bytes_ready(s);
}
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index ac19e83ce62a..8edea35532a9 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -313,8 +313,6 @@ struct pcl818_private {
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
unsigned int ns_min; /* manimal allowed delay between samples (in us) for actual card */
int i8253_osc_base; /* 1/frequency of on board oscilator in ns */
- int ai_act_scan; /* how many scans we finished */
- int ai_act_chan; /* actual position in actual scan */
unsigned int act_chanlist[16]; /* MUX setting for actual AI operations */
unsigned int act_chanlist_len; /* how long is actual MUX list */
unsigned int act_chanlist_pos; /* actual position in MUX list */
@@ -352,7 +350,7 @@ static void pcl818_ai_setup_dma(struct comedi_device *dev,
disable_dma(devpriv->dma); /* disable dma */
bytes = devpriv->hwdmasize;
if (cmd->stop_src == TRIG_COUNT) {
- bytes = cmd->stop_arg * cfc_bytes_per_scan(s);
+ bytes = cmd->stop_arg * comedi_bytes_per_scan(s);
devpriv->dma_runs_to_end = bytes / devpriv->hwdmasize;
devpriv->last_dma_run = bytes % devpriv->hwdmasize;
devpriv->dma_runs_to_end--;
@@ -521,21 +519,12 @@ static bool pcl818_ai_next_chan(struct comedi_device *dev,
struct pcl818_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- s->async->events |= COMEDI_CB_BLOCK;
-
devpriv->act_chanlist_pos++;
if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- s->async->cur_chan++;
- if (s->async->cur_chan >= cmd->chanlist_len) {
- s->async->cur_chan = 0;
- devpriv->ai_act_scan--;
- s->async->events |= COMEDI_CB_EOS;
- }
-
- if (cmd->stop_src == TRIG_COUNT && devpriv->ai_act_scan == 0) {
- /* all data sampled */
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg) {
s->async->events |= COMEDI_CB_EOA;
return false;
}
@@ -560,7 +549,7 @@ static void pcl818_handle_eoc(struct comedi_device *dev,
if (pcl818_ai_dropout(dev, s, chan))
return;
- comedi_buf_put(s, val);
+ comedi_buf_write_samples(s, &val, 1);
pcl818_ai_next_chan(dev, s);
}
@@ -589,7 +578,7 @@ static void pcl818_handle_dma(struct comedi_device *dev,
if (pcl818_ai_dropout(dev, s, chan))
break;
- comedi_buf_put(s, val);
+ comedi_buf_write_samples(s, &val, 1);
if (!pcl818_ai_next_chan(dev, s))
break;
@@ -630,7 +619,7 @@ static void pcl818_handle_fifo(struct comedi_device *dev,
if (pcl818_ai_dropout(dev, s, chan))
break;
- comedi_buf_put(s, val);
+ comedi_buf_write_samples(s, &val, 1);
if (!pcl818_ai_next_chan(dev, s))
break;
@@ -642,6 +631,7 @@ static irqreturn_t pcl818_interrupt(int irq, void *d)
struct comedi_device *dev = d;
struct pcl818_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_cmd *cmd = &s->async->cmd;
if (!dev->attached || !devpriv->ai_cmd_running) {
pcl818_ai_clear_eoc(dev);
@@ -655,7 +645,7 @@ static irqreturn_t pcl818_interrupt(int irq, void *d)
* being reprogrammed while a DMA transfer is in
* progress.
*/
- devpriv->ai_act_scan = 0;
+ s->async->scans_done = cmd->stop_arg;
s->cancel(dev, s);
return IRQ_HANDLED;
}
@@ -669,7 +659,7 @@ static irqreturn_t pcl818_interrupt(int irq, void *d)
pcl818_ai_clear_eoc(dev);
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
@@ -829,8 +819,6 @@ static int pcl818_ai_cmd(struct comedi_device *dev,
pcl818_ai_setup_chanlist(dev, cmd->chanlist, seglen);
devpriv->ai_data_len = s->async->prealloc_bufsz;
- devpriv->ai_act_scan = cmd->stop_arg;
- devpriv->ai_act_chan = 0;
devpriv->ai_cmd_running = 1;
devpriv->ai_cmd_canceled = 0;
devpriv->act_chanlist_pos = 0;
@@ -873,7 +861,8 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
if (devpriv->dma) {
if (cmd->stop_src == TRIG_NONE ||
- (cmd->stop_src == TRIG_COUNT && devpriv->ai_act_scan > 0)) {
+ (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done < cmd->stop_arg)) {
if (!devpriv->ai_cmd_canceled) {
/*
* Wait for running dma transfer to end,
@@ -1169,7 +1158,6 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->range_table = &range_unknown;
}
s->insn_write = pcl818_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index fc40ee2b34e9..f0059e935da0 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -190,7 +190,6 @@ struct pcmmio_private {
spinlock_t pagelock; /* protects the page registers */
spinlock_t spinlock; /* protects the member variables */
unsigned int enabled_mask;
- unsigned int stop_count;
unsigned int active:1;
};
@@ -337,7 +336,6 @@ static void pcmmio_handle_dio_intr(struct comedi_device *dev,
{
struct pcmmio_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int oldevents = s->async->events;
unsigned int val = 0;
unsigned long flags;
int i;
@@ -357,31 +355,16 @@ static void pcmmio_handle_dio_intr(struct comedi_device *dev,
val |= (1 << i);
}
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s, val) &&
- comedi_buf_put(s, val >> 16)) {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Overflow! Stop acquisition!! */
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmmio_stop_intr(dev, s);
- }
+ comedi_buf_write_samples(s, &val, 1);
- /* Check for end of acquisition. */
- if (cmd->stop_src == TRIG_COUNT && devpriv->stop_count > 0) {
- devpriv->stop_count--;
- if (devpriv->stop_count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmmio_stop_intr(dev, s);
- }
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
done:
spin_unlock_irqrestore(&devpriv->spinlock, flags);
- if (oldevents != s->async->events)
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
}
static irqreturn_t interrupt_pcmmio(int irq, void *d)
@@ -481,8 +464,6 @@ static int pcmmio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
spin_lock_irqsave(&devpriv->spinlock, flags);
devpriv->active = 1;
- devpriv->stop_count = cmd->stop_arg;
-
/* Set up start of acquisition. */
if (cmd->start_src == TRIG_INT)
s->async->inttrig = pcmmio_inttrig_start_intr;
@@ -751,7 +732,6 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xffff;
s->range_table = &pcmmio_ao_ranges;
s->insn_write = pcmmio_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -774,7 +754,7 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->insn_config = pcmmio_dio_insn_config;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
+ s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL | SDF_PACKED;
s->len_chanlist = s->n_chan;
s->cancel = pcmmio_cancel;
s->do_cmd = pcmmio_cmd;
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index d4fe2ec25ecf..0f5483b6147f 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -130,7 +130,6 @@ struct pcmuio_asic {
spinlock_t pagelock; /* protects the page registers */
spinlock_t spinlock; /* protects member variables */
unsigned int enabled_mask;
- unsigned int stop_count;
unsigned int active:1;
};
@@ -317,7 +316,6 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
int asic = pcmuio_subdevice_to_asic(s);
struct pcmuio_asic *chip = &devpriv->asics[asic];
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned oldevents = s->async->events;
unsigned int val = 0;
unsigned long flags;
unsigned int i;
@@ -337,33 +335,16 @@ static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
val |= (1 << i);
}
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s, val) &&
- comedi_buf_put(s, val >> 16)) {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Overflow! Stop acquisition!! */
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmuio_stop_intr(dev, s);
- }
+ comedi_buf_write_samples(s, &val, 1);
- /* Check for end of acquisition. */
- if (cmd->stop_src == TRIG_COUNT) {
- if (chip->stop_count > 0) {
- chip->stop_count--;
- if (chip->stop_count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmuio_stop_intr(dev, s);
- }
- }
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg)
+ s->async->events |= COMEDI_CB_EOA;
done:
spin_unlock_irqrestore(&chip->spinlock, flags);
- if (oldevents != s->async->events)
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
}
static int pcmuio_handle_asic_interrupt(struct comedi_device *dev, int asic)
@@ -487,8 +468,6 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
spin_lock_irqsave(&chip->spinlock, flags);
chip->active = 1;
- chip->stop_count = cmd->stop_arg;
-
/* Set up start of acquisition. */
if (cmd->start_src == TRIG_INT)
s->async->inttrig = pcmuio_inttrig_start_intr;
@@ -614,7 +593,8 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if ((i == 0 && dev->irq) || (i == 2 && devpriv->irq2)) {
/* setup the interrupt subdevice */
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
+ s->subdev_flags |= SDF_CMD_READ | SDF_LSAMPL |
+ SDF_PACKED;
s->len_chanlist = s->n_chan;
s->cancel = pcmuio_cancel;
s->do_cmd = pcmuio_cmd;
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 6407df0404f0..96098110b0b3 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -65,8 +65,6 @@ struct daqp_private {
enum { semaphore, buffer } interrupt_mode;
struct completion eos;
-
- int count;
};
/* The DAQP communicates with the system through a 16 byte I/O window. */
@@ -194,6 +192,7 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
struct comedi_device *dev = dev_id;
struct daqp_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_cmd *cmd = &s->async->cmd;
int loop_limit = 10000;
int status;
@@ -221,18 +220,16 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
data |= inb(dev->iobase + DAQP_FIFO) << 8;
data ^= 0x8000;
- comedi_buf_put(s, data);
+ comedi_buf_write_samples(s, &data, 1);
/* If there's a limit, decrement it
* and stop conversion if zero
*/
- if (devpriv->count > 0) {
- devpriv->count--;
- if (devpriv->count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- break;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ s->async->scans_done >= cmd->stop_arg) {
+ s->async->events |= COMEDI_CB_EOA;
+ break;
}
if ((loop_limit--) <= 0)
@@ -245,9 +242,7 @@ static enum irqreturn daqp_interrupt(int irq, void *dev_id)
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
}
- s->async->events |= COMEDI_CB_BLOCK;
-
- cfc_handle_events(dev, s);
+ comedi_handle_events(dev, s);
}
return IRQ_HANDLED;
}
@@ -575,12 +570,16 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
*/
if (cmd->stop_src == TRIG_COUNT) {
- devpriv->count = cmd->stop_arg * cmd->scan_end_arg;
- threshold = 2 * devpriv->count;
- while (threshold > DAQP_FIFO_SIZE * 3 / 4)
- threshold /= 2;
+ unsigned long long nsamples;
+ unsigned long long nbytes;
+
+ nsamples = (unsigned long long)cmd->stop_arg *
+ cmd->scan_end_arg;
+ nbytes = nsamples * comedi_bytes_per_sample(s);
+ while (nbytes > DAQP_FIFO_SIZE * 3 / 4)
+ nbytes /= 2;
+ threshold = nbytes;
} else {
- devpriv->count = -1;
threshold = DAQP_FIFO_SIZE / 2;
}
@@ -736,12 +735,11 @@ static int daqp_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table = &range_bipolar5;
s->insn_write = daqp_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
@@ -756,7 +754,7 @@ static int daqp_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 1;
s->maxdata = 1;
s->insn_bits = daqp_do_insn_bits;
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 7d4cb140959c..581aa58d9c0a 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -379,7 +379,6 @@ struct rtd_private {
long ai_count; /* total transfer size (samples) */
int xfer_count; /* # to transfer data. 0->1/2FIFO */
int flags; /* flag event modes */
- DECLARE_BITMAP(chan_is_bipolar, RTD_MAX_CHANLIST);
unsigned fifosz;
};
@@ -438,7 +437,6 @@ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
unsigned int chanspec, int index)
{
const struct rtd_boardinfo *board = dev->board_ptr;
- struct rtd_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(chanspec);
unsigned int range = CR_RANGE(chanspec);
unsigned int aref = CR_AREF(chanspec);
@@ -451,17 +449,14 @@ static unsigned short rtd_convert_chan_gain(struct comedi_device *dev,
/* +-5 range */
r |= 0x000;
r |= (range & 0x7) << 4;
- __set_bit(index, devpriv->chan_is_bipolar);
} else if (range < board->range_uni10) {
/* +-10 range */
r |= 0x100;
r |= ((range - board->range_bip10) & 0x7) << 4;
- __set_bit(index, devpriv->chan_is_bipolar);
} else {
/* +10 range */
r |= 0x200;
r |= ((range - board->range_uni10) & 0x7) << 4;
- __clear_bit(index, devpriv->chan_is_bipolar);
}
switch (aref) {
@@ -561,6 +556,7 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
+ unsigned int range = CR_RANGE(insn->chanspec);
int ret;
int n;
@@ -586,9 +582,11 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
/* read data */
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
d = d >> 3; /* low 3 bits are marker lines */
- if (test_bit(0, devpriv->chan_is_bipolar))
- /* convert to comedi unsigned data */
+
+ /* convert bipolar data to comedi unsigned data */
+ if (comedi_range_is_bipolar(s, range))
d = comedi_offset_munge(s, d);
+
data[n] = d & s->maxdata;
}
@@ -606,9 +604,12 @@ static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s,
int count)
{
struct rtd_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
int ii;
for (ii = 0; ii < count; ii++) {
+ unsigned int range = CR_RANGE(cmd->chanlist[async->cur_chan]);
unsigned short d;
if (0 == devpriv->ai_count) { /* done */
@@ -618,41 +619,13 @@ static int ai_read_n(struct comedi_device *dev, struct comedi_subdevice *s,
d = readw(devpriv->las1 + LAS1_ADC_FIFO);
d = d >> 3; /* low 3 bits are marker lines */
- if (test_bit(s->async->cur_chan, devpriv->chan_is_bipolar))
- /* convert to comedi unsigned data */
- d = comedi_offset_munge(s, d);
- d &= s->maxdata;
-
- if (!comedi_buf_put(s, d))
- return -1;
-
- if (devpriv->ai_count > 0) /* < 0, means read forever */
- devpriv->ai_count--;
- }
- return 0;
-}
-
-/*
- unknown amout of data is waiting in fifo.
-*/
-static int ai_read_dregs(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct rtd_private *devpriv = dev->private;
-
- while (readl(dev->mmio + LAS0_ADC) & FS_ADC_NOT_EMPTY) {
- unsigned short d = readw(devpriv->las1 + LAS1_ADC_FIFO);
-
- if (0 == devpriv->ai_count) { /* done */
- continue; /* read rest */
- }
- d = d >> 3; /* low 3 bits are marker lines */
- if (test_bit(s->async->cur_chan, devpriv->chan_is_bipolar))
- /* convert to comedi unsigned data */
+ /* convert bipolar data to comedi unsigned data */
+ if (comedi_range_is_bipolar(s, range))
d = comedi_offset_munge(s, d);
d &= s->maxdata;
- if (!comedi_buf_put(s, d))
+ if (!comedi_buf_write_samples(s, &d, 1))
return -1;
if (devpriv->ai_count > 0) /* < 0, means read forever */
@@ -703,8 +676,6 @@ static irqreturn_t rtd_interrupt(int irq, void *d)
if (0 == devpriv->ai_count)
goto xfer_done;
-
- comedi_event(dev, s);
} else if (devpriv->xfer_count > 0) {
if (fifo_status & FS_ADC_NOT_EMPTY) {
/* FIFO not empty */
@@ -713,8 +684,6 @@ static irqreturn_t rtd_interrupt(int irq, void *d)
if (0 == devpriv->ai_count)
goto xfer_done;
-
- comedi_event(dev, s);
}
}
}
@@ -726,28 +695,16 @@ static irqreturn_t rtd_interrupt(int irq, void *d)
/* clear the interrupt */
writew(status, dev->mmio + LAS0_CLEAR);
readw(dev->mmio + LAS0_CLEAR);
+
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
xfer_abort:
- writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
s->async->events |= COMEDI_CB_ERROR;
- devpriv->ai_count = 0; /* stop and don't transfer any more */
- /* fall into xfer_done */
xfer_done:
- /* pacer stop source: SOFTWARE */
- writel(0, dev->mmio + LAS0_PACER_STOP);
- writel(0, dev->mmio + LAS0_PACER); /* stop pacer */
- writel(0, dev->mmio + LAS0_ADC_CONVERSION);
- writew(0, dev->mmio + LAS0_IT);
-
- if (devpriv->ai_count > 0) { /* there shouldn't be anything left */
- fifo_status = readl(dev->mmio + LAS0_ADC);
- ai_read_dregs(dev, s); /* read anything left in FIFO */
- }
-
- s->async->events |= COMEDI_CB_EOA; /* signal end to comedi */
- comedi_event(dev, s);
+ s->async->events |= COMEDI_CB_EOA;
/* clear the interrupt */
status = readw(dev->mmio + LAS0_IT);
@@ -757,6 +714,8 @@ xfer_done:
fifo_status = readl(dev->mmio + LAS0_ADC);
overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
+ comedi_handle_events(dev, s);
+
return IRQ_HANDLED;
}
@@ -1083,6 +1042,7 @@ static int rtd_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_count = 0; /* stop and don't transfer any more */
status = readw(dev->mmio + LAS0_IT);
overrun = readl(dev->mmio + LAS0_OVERRUN) & 0xffff;
+ writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
return 0;
}
@@ -1303,7 +1263,6 @@ static int rtd_auto_attach(struct comedi_device *dev,
s->maxdata = 0x0fff;
s->range_table = &rtd_ao_range;
s->insn_write = rtd_ao_winsn;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index e3d9f44cefb9..67b4b378bd01 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -313,7 +313,6 @@ static int rti800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
? rti800_ao_ranges[it->options[7]]
: &range_unknown;
s->insn_write = rti800_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index c81b01c40f12..96c3974207ae 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -100,7 +100,6 @@ static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xfff;
s->n_chan = 8;
s->insn_write = rti802_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index 75872c6aec2a..6f3e8a08e75c 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -583,7 +583,6 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = s526_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 0e7621e890c3..14932c5f3798 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -78,7 +78,6 @@ struct s626_buffer_dma {
struct s626_private {
uint8_t ai_cmd_running; /* ai_cmd is running */
- int ai_sample_count; /* number of samples to acquire */
unsigned int ai_sample_timer; /* time between samples in
* units of the timer */
int ai_convert_count; /* conversion counter */
@@ -1480,7 +1479,6 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev)
* from the final ADC of the previous poll list scan.
*/
uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
- bool finished = false;
int i;
/* get the data and hand it over to comedi */
@@ -1494,36 +1492,21 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev)
tempdata = s626_ai_reg_to_uint(*readaddr);
readaddr++;
- /* put data into read buffer */
- cfc_write_to_buffer(s, tempdata);
+ comedi_buf_write_samples(s, &tempdata, 1);
}
- /* end of scan occurs */
- async->events |= COMEDI_CB_EOS;
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
- if (cmd->stop_src == TRIG_COUNT) {
- devpriv->ai_sample_count--;
- if (devpriv->ai_sample_count <= 0) {
- devpriv->ai_cmd_running = 0;
-
- /* Stop RPS program */
- s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
-
- /* send end of acquisition */
- async->events |= COMEDI_CB_EOA;
-
- /* disable master interrupt */
- finished = true;
- }
- }
+ if (async->events & COMEDI_CB_CANCEL_MASK)
+ devpriv->ai_cmd_running = 0;
if (devpriv->ai_cmd_running && cmd->scan_begin_src == TRIG_EXT)
s626_dio_set_irq(dev, cmd->scan_begin_arg);
- /* tell comedi that data is there */
- comedi_event(dev, s);
+ comedi_handle_events(dev, s);
- return finished;
+ return !devpriv->ai_cmd_running;
}
static irqreturn_t s626_irq_handler(int irq, void *d)
@@ -1970,13 +1953,13 @@ static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags)
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- divider = (*nanosec + base / 2) / base;
+ divider = DIV_ROUND_CLOSEST(*nanosec, base);
break;
case CMDF_ROUND_DOWN:
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
- divider = (*nanosec + base - 1) / base;
+ divider = DIV_ROUND_UP(*nanosec, base);
break;
}
@@ -2102,8 +2085,6 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
break;
}
- devpriv->ai_sample_count = cmd->stop_arg;
-
s626_reset_adc(dev, ppl);
switch (cmd->start_src) {
@@ -2820,7 +2801,6 @@ static int s626_auto_attach(struct comedi_device *dev,
s->maxdata = 0x3fff;
s->range_table = &range_bipolar10;
s->insn_write = s626_ao_insn_write;
- s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index 167f82418cb4..71226ee9064e 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -742,7 +742,7 @@ static int serial2002_attach(struct comedi_device *dev,
/* digital output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -760,7 +760,7 @@ static int serial2002_attach(struct comedi_device *dev,
/* analog output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = NULL;
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 5adbfedf780f..4737dbf8e01d 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -1,37 +1,34 @@
/*
- comedi/drivers/usbdux.c
- Copyright (C) 2003-2007 Bernd Porr, Bernd.Porr@f2s.com
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * usbdux.c
+ * Copyright (C) 2003-2014 Bernd Porr, mail@berndporr.me.uk
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
+
/*
-Driver: usbdux
-Description: University of Stirling USB DAQ & INCITE Technology Limited
-Devices: [ITL] USB-DUX (usbdux.o)
-Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 8 Dec 2008
-Status: Stable
-Configuration options:
- You have to upload firmware with the -i option. The
- firmware is usually installed under /usr/share/usb or
- /usr/local/share/usb or /lib/firmware.
-
-Connection scheme for the counter at the digital port:
- 0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1.
- The sampling rate of the counter is approximately 500Hz.
-
-Please note that under USB2.0 the length of the channel list determines
-the max sampling rate. If you sample only one channel you get 8kHz
-sampling rate. If you sample two channels you get 4kHz and so on.
-*/
+ * Driver: usbdux
+ * Description: University of Stirling USB DAQ & INCITE Technology Limited
+ * Devices: (ITL) USB-DUX [usbdux]
+ * Author: Bernd Porr <mail@berndporr.me.uk>
+ * Updated: 10 Oct 2014
+ * Status: Stable
+ * Connection scheme for the counter at the digital port:
+ * 0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1.
+ * The sampling rate of the counter is approximately 500Hz.
+ *
+ * Note that under USB2.0 the length of the channel list determines
+ * the max sampling rate. If you sample only one channel you get 8kHz
+ * sampling rate. If you sample two channels you get 4kHz and so on.
+ */
+
/*
* I must give credit here to Chris Baugher who
* wrote the driver for AT-MIO-16d. I used some parts of this
@@ -205,9 +202,6 @@ struct usbdux_private {
unsigned int ao_cmd_running:1;
unsigned int pwm_cmd_running:1;
- /* number of samples to acquire */
- int ai_sample_count;
- int ao_sample_count;
/* time between samples in units of the timer */
unsigned int ai_timer;
unsigned int ao_timer;
@@ -253,128 +247,107 @@ static int usbdux_ai_cancel(struct comedi_device *dev,
return 0;
}
-/* analogue IN - interrupt service routine */
+static void usbduxsub_ai_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
+{
+ struct usbdux_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ int ret;
+ int i;
+
+ devpriv->ai_counter--;
+ if (devpriv->ai_counter == 0) {
+ devpriv->ai_counter = devpriv->ai_timer;
+
+ /* get the data from the USB bus and hand it over to comedi */
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ unsigned int range = CR_RANGE(cmd->chanlist[i]);
+ uint16_t val = le16_to_cpu(devpriv->in_buf[i]);
+
+ /* bipolar data is two's-complement */
+ if (comedi_range_is_bipolar(s, range))
+ val ^= ((s->maxdata + 1) >> 1);
+
+ /* transfer data */
+ if (!comedi_buf_write_samples(s, &val, 1))
+ return;
+ }
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+ }
+
+ /* if command is still running, resubmit urb */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->dev = comedi_to_usb_dev(dev);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(dev->class_dev,
+ "urb resubmit failed in int-context! err=%d\n",
+ ret);
+ if (ret == -EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler!\n");
+ async->events |= COMEDI_CB_ERROR;
+ }
+ }
+}
+
static void usbduxsub_ai_isoc_irq(struct urb *urb)
{
struct comedi_device *dev = urb->context;
struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
struct usbdux_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
- int i, err;
- /* first we test if something unusual has just happened */
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ai_cmd_running)
+ return;
+
switch (urb->status) {
case 0:
/* copy the result in the transfer buffer */
memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
+ usbduxsub_ai_handle_urb(dev, s, urb);
break;
+
case -EILSEQ:
- /* error in the ISOchronous data */
- /* we don't copy the data into the transfer buffer */
- /* and recycle the last data byte */
+ /*
+ * error in the ISOchronous data
+ * we don't copy the data into the transfer buffer
+ * and recycle the last data byte
+ */
dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
+ usbduxsub_ai_handle_urb(dev, s, urb);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
- /* happens after an unlink command */
- if (devpriv->ai_cmd_running) {
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* stop the transfer w/o unlink */
- usbdux_ai_stop(dev, 0);
- }
- return;
+ /* after an unlink command, unplug, ... etc */
+ async->events |= COMEDI_CB_ERROR;
+ break;
default:
- /* a real error on the bus */
- /* pass error to comedi if we are really running a command */
- if (devpriv->ai_cmd_running) {
- dev_err(dev->class_dev,
- "Non-zero urb status received in ai intr context: %d\n",
- urb->status);
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* don't do an unlink here */
- usbdux_ai_stop(dev, 0);
- }
- return;
+ /* a real error */
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in ai intr context: %d\n",
+ urb->status);
+ async->events |= COMEDI_CB_ERROR;
+ break;
}
/*
- * at this point we are reasonably sure that nothing dodgy has happened
- * are we running a command?
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
*/
- if (unlikely(!devpriv->ai_cmd_running)) {
- /*
- * not running a command, do not continue execution if no
- * asynchronous command is running in particular not resubmit
- */
- return;
- }
-
- urb->dev = comedi_to_usb_dev(dev);
-
- /* resubmit the urb */
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err < 0)) {
- dev_err(dev->class_dev,
- "urb resubmit failed in int-context! err=%d\n", err);
- if (err == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler!\n");
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* don't do an unlink here */
+ if (async->events & COMEDI_CB_CANCEL_MASK)
usbdux_ai_stop(dev, 0);
- return;
- }
-
- devpriv->ai_counter--;
- if (likely(devpriv->ai_counter > 0))
- return;
-
- /* timer zero, transfer measurements to comedi */
- devpriv->ai_counter = devpriv->ai_timer;
-
- /* test, if we transmit only a fixed number of samples */
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, fixed number of samples */
- devpriv->ai_sample_count--;
- /* all samples received? */
- if (devpriv->ai_sample_count < 0) {
- /* prevent a resubmit next time */
- usbdux_ai_stop(dev, 0);
- /* say comedi that the acquistion is over */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
- }
- /* get the data from the USB bus and hand it over to comedi */
- for (i = 0; i < cmd->chanlist_len; i++) {
- unsigned int range = CR_RANGE(cmd->chanlist[i]);
- uint16_t val = le16_to_cpu(devpriv->in_buf[i]);
-
- /* bipolar data is two's-complement */
- if (comedi_range_is_bipolar(s, range))
- val ^= ((s->maxdata + 1) >> 1);
- /* transfer data */
- err = comedi_buf_put(s, val);
- if (unlikely(err == 0)) {
- /* buffer overflow */
- usbdux_ai_stop(dev, 0);
- return;
- }
- }
- /* tell comedi that data is there */
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
comedi_event(dev, s);
}
@@ -402,71 +375,25 @@ static int usbdux_ao_cancel(struct comedi_device *dev,
return 0;
}
-static void usbduxsub_ao_isoc_irq(struct urb *urb)
+static void usbduxsub_ao_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
{
- struct comedi_device *dev = urb->context;
- struct comedi_subdevice *s = dev->write_subdev;
struct usbdux_private *devpriv = dev->private;
- struct comedi_cmd *cmd = &s->async->cmd;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
uint8_t *datap;
int ret;
int i;
- switch (urb->status) {
- case 0:
- /* success */
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* after an unlink command, unplug, ... etc */
- /* no unlink needed here. Already shutting down. */
- if (devpriv->ao_cmd_running) {
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- usbdux_ao_stop(dev, 0);
- }
- return;
-
- default:
- /* a real error */
- if (devpriv->ao_cmd_running) {
- dev_err(dev->class_dev,
- "Non-zero urb status received in ao intr context: %d\n",
- urb->status);
- s->async->events |= COMEDI_CB_ERROR;
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- /* we do an unlink if we are in the high speed mode */
- usbdux_ao_stop(dev, 0);
- }
- return;
- }
-
- /* are we actually running? */
- if (!devpriv->ao_cmd_running)
- return;
-
- /* normal operation: executing a command in this subdevice */
devpriv->ao_counter--;
- if ((int)devpriv->ao_counter <= 0) {
- /* timer zero */
+ if (devpriv->ao_counter == 0) {
devpriv->ao_counter = devpriv->ao_timer;
- /* handle non continous acquisition */
- if (cmd->stop_src == TRIG_COUNT) {
- /* fixed number of samples */
- devpriv->ao_sample_count--;
- if (devpriv->ao_sample_count < 0) {
- /* all samples transmitted */
- usbdux_ao_stop(dev, 0);
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- /* no resubmit of the urb */
- return;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ return;
}
/* transmit data to the USB bus */
@@ -476,26 +403,25 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned short val;
- ret = comedi_buf_get(s, &val);
- if (ret < 0) {
+ if (!comedi_buf_read_samples(s, &val, 1)) {
dev_err(dev->class_dev, "buffer underflow\n");
- s->async->events |= (COMEDI_CB_EOA |
- COMEDI_CB_OVERFLOW);
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
}
+
/* pointer to the DA */
*datap++ = val & 0xff;
*datap++ = (val >> 8) & 0xff;
*datap++ = chan << 6;
s->readback[chan] = val;
-
- s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(dev, s);
}
}
- urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- if (devpriv->ao_cmd_running) {
+
+ /* if command is still running, resubmit urb for BULK transfer */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->transfer_buffer_length = SIZEOUTBUF;
+ urb->dev = comedi_to_usb_dev(dev);
+ urb->status = 0;
if (devpriv->high_speed)
urb->interval = 8; /* uframes */
else
@@ -512,16 +438,54 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
if (ret == -EL2NSYNC)
dev_err(dev->class_dev,
"buggy USB host controller or bug in IRQ handling!\n");
-
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* don't do an unlink here */
- usbdux_ao_stop(dev, 0);
+ async->events |= COMEDI_CB_ERROR;
}
}
}
+static void usbduxsub_ao_isoc_irq(struct urb *urb)
+{
+ struct comedi_device *dev = urb->context;
+ struct comedi_subdevice *s = dev->write_subdev;
+ struct comedi_async *async = s->async;
+ struct usbdux_private *devpriv = dev->private;
+
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ao_cmd_running)
+ return;
+
+ switch (urb->status) {
+ case 0:
+ usbduxsub_ao_handle_urb(dev, s, urb);
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNABORTED:
+ /* after an unlink command, unplug, ... etc */
+ async->events |= COMEDI_CB_ERROR;
+ break;
+
+ default:
+ /* a real error */
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in ao intr context: %d\n",
+ urb->status);
+ async->events |= COMEDI_CB_ERROR;
+ break;
+ }
+
+ /*
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
+ */
+ if (async->events & COMEDI_CB_CANCEL_MASK)
+ usbdux_ao_stop(dev, 0);
+
+ comedi_event(dev, s);
+}
+
static int usbdux_submit_urbs(struct comedi_device *dev,
struct urb **urbs, int num_urbs,
int input_urb)
@@ -725,9 +689,6 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->ai_cmd_running)
goto ai_cmd_exit;
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
-
devpriv->dux_commands[1] = len;
for (i = 0; i < len; ++i) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
@@ -765,14 +726,6 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_counter = devpriv->ai_timer;
- if (cmd->stop_src == TRIG_COUNT) {
- /* data arrives as one packet */
- devpriv->ai_sample_count = cmd->stop_arg;
- } else {
- /* continous acquisition */
- devpriv->ai_sample_count = 0;
- }
-
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
devpriv->ai_cmd_running = 1;
@@ -1023,9 +976,6 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->ao_cmd_running)
goto ao_cmd_exit;
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
-
/* we count in steps of 1ms (125us) */
/* 125us mode not used yet */
if (0) { /* (devpriv->high_speed) */
@@ -1044,24 +994,6 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ao_counter = devpriv->ao_timer;
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous */
- /* counter */
- /* high speed also scans everything at once */
- if (0) { /* (devpriv->high_speed) */
- devpriv->ao_sample_count = cmd->stop_arg *
- cmd->scan_end_arg;
- } else {
- /* there's no scan as the scan has been */
- /* perf inside the FX2 */
- /* data arrives as one packet */
- devpriv->ao_sample_count = cmd->stop_arg;
- }
- } else {
- /* continous acquisition */
- devpriv->ao_sample_count = 0;
- }
-
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
devpriv->ao_cmd_running = 1;
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index f85818dd5e11..ddc4cb9d5ed4 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004 Bernd Porr, Bernd.Porr@f2s.com
+ * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,6 +13,15 @@
*/
/*
+ * Driver: usbduxfast
+ * Description: University of Stirling USB DAQ & INCITE Technology Limited
+ * Devices: (ITL) USB-DUX [usbduxfast]
+ * Author: Bernd Porr <mail@berndporr.me.uk>
+ * Updated: 10 Oct 2014
+ * Status: stable
+ */
+
+/*
* I must give credit here to Chris Baugher who
* wrote the driver for AT-MIO-16d. I used some parts of this
* driver. I also must give credits to David Brownell
@@ -152,7 +161,6 @@ struct usbduxfast_private {
uint8_t *duxbuf;
int8_t *inbuf;
short int ai_cmd_running; /* asynchronous command is running */
- long int ai_sample_count; /* number of samples to acquire */
int ignore; /* counter which ignores the first
buffers */
struct semaphore sem;
@@ -227,114 +235,82 @@ static int usbduxfast_ai_cancel(struct comedi_device *dev,
return ret;
}
-/*
- * analogue IN
- * interrupt service routine
- */
+static void usbduxfast_ai_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
+{
+ struct usbduxfast_private *devpriv = dev->private;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ int ret;
+
+ if (devpriv->ignore) {
+ devpriv->ignore--;
+ } else {
+ unsigned int nsamples;
+
+ nsamples = comedi_bytes_to_samples(s, urb->actual_length);
+ nsamples = comedi_nsamples_left(s, nsamples);
+ comedi_buf_write_samples(s, urb->transfer_buffer, nsamples);
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+ }
+
+ /* if command is still running, resubmit urb for BULK transfer */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->dev = comedi_to_usb_dev(dev);
+ urb->status = 0;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "urb resubm failed: %d", ret);
+ async->events |= COMEDI_CB_ERROR;
+ }
+ }
+}
+
static void usbduxfast_ai_interrupt(struct urb *urb)
{
struct comedi_device *dev = urb->context;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- struct usb_device *usb = comedi_to_usb_dev(dev);
struct usbduxfast_private *devpriv = dev->private;
- int n, err;
- /* are we running a command? */
- if (unlikely(!devpriv->ai_cmd_running)) {
- /*
- * not running a command
- * do not continue execution if no asynchronous command
- * is running in particular not resubmit
- */
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ai_cmd_running)
return;
- }
- /* first we test if something unusual has just happened */
switch (urb->status) {
case 0:
+ usbduxfast_ai_handle_urb(dev, s, urb);
break;
- /*
- * happens after an unlink command or when the device
- * is plugged out
- */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
- /* tell this comedi */
- async->events |= COMEDI_CB_EOA;
+ /* after an unlink command, unplug, ... etc */
async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- /* stop the transfer w/o unlink */
- usbduxfast_ai_stop(dev, 0);
- return;
+ break;
default:
+ /* a real error */
dev_err(dev->class_dev,
"non-zero urb status received in ai intr context: %d\n",
urb->status);
- async->events |= COMEDI_CB_EOA;
async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
- usbduxfast_ai_stop(dev, 0);
- return;
- }
-
- if (!devpriv->ignore) {
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, fixed number of samples */
- n = urb->actual_length / sizeof(uint16_t);
- if (unlikely(devpriv->ai_sample_count < n)) {
- unsigned int num_bytes;
-
- /* partial sample received */
- num_bytes = devpriv->ai_sample_count *
- sizeof(uint16_t);
- cfc_write_array_to_buffer(s,
- urb->transfer_buffer,
- num_bytes);
- usbduxfast_ai_stop(dev, 0);
- /* tell comedi that the acquistion is over */
- async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
- devpriv->ai_sample_count -= n;
- }
- /* write the full buffer to comedi */
- err = cfc_write_array_to_buffer(s, urb->transfer_buffer,
- urb->actual_length);
- if (unlikely(err == 0)) {
- /* buffer overflow */
- usbduxfast_ai_stop(dev, 0);
- return;
- }
-
- /* tell comedi that data is there */
- comedi_event(dev, s);
- } else {
- /* ignore this packet */
- devpriv->ignore--;
+ break;
}
/*
- * command is still running
- * resubmit urb for BULK transfer
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
*/
- urb->dev = usb;
- urb->status = 0;
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- dev_err(dev->class_dev,
- "urb resubm failed: %d", err);
- async->events |= COMEDI_CB_EOA;
- async->events |= COMEDI_CB_ERROR;
- comedi_event(dev, s);
+ if (async->events & COMEDI_CB_CANCEL_MASK)
usbduxfast_ai_stop(dev, 0);
- }
+
+ comedi_event(dev, s);
}
static int usbduxfast_submit_urb(struct comedi_device *dev)
@@ -499,8 +475,6 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
up(&devpriv->sem);
return -EBUSY;
}
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
/*
* ignore the first buffers from the device if there
@@ -810,11 +784,6 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
return result;
}
- if (cmd->stop_src == TRIG_COUNT)
- devpriv->ai_sample_count = cmd->stop_arg * cmd->scan_end_arg;
- else /* TRIG_NONE */
- devpriv->ai_sample_count = 0;
-
if ((cmd->start_src == TRIG_NOW) || (cmd->start_src == TRIG_EXT)) {
/* enable this acquisition operation */
devpriv->ai_cmd_running = 1;
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index ebd68e365bac..dc19435b6520 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,6 +1,6 @@
/*
* usbduxsigma.c
- * Copyright (C) 2011 Bernd Porr, Bernd.Porr@f2s.com
+ * Copyright (C) 2011-2014 Bernd Porr, mail@berndporr.me.uk
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,9 +17,9 @@
* Driver: usbduxsigma
* Description: University of Stirling USB DAQ & INCITE Technology Limited
* Devices: (ITL) USB-DUX [usbduxsigma]
- * Author: Bernd Porr <BerndPorr@f2s.com>
- * Updated: 8 Nov 2011
- * Status: testing
+ * Author: Bernd Porr <mail@berndporr.me.uk>
+ * Updated: 10 Oct 2014
+ * Status: stable
*/
/*
@@ -164,9 +164,6 @@ struct usbduxsigma_private {
unsigned ao_cmd_running:1;
unsigned pwm_cmd_running:1;
- /* number of samples to acquire */
- int ai_sample_count;
- int ao_sample_count;
/* time between samples in units of the timer */
unsigned int ai_timer;
unsigned int ao_timer;
@@ -211,23 +208,75 @@ static int usbduxsigma_ai_cancel(struct comedi_device *dev,
return 0;
}
-static void usbduxsigma_ai_urb_complete(struct urb *urb)
+static void usbduxsigma_ai_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
{
- struct comedi_device *dev = urb->context;
struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
unsigned int dio_state;
uint32_t val;
int ret;
int i;
- /* first we test if something unusual has just happened */
+ devpriv->ai_counter--;
+ if (devpriv->ai_counter == 0) {
+ devpriv->ai_counter = devpriv->ai_timer;
+
+ /* get the state of the dio pins to allow external trigger */
+ dio_state = be32_to_cpu(devpriv->in_buf[0]);
+
+ /* get the data from the USB bus and hand it over to comedi */
+ for (i = 0; i < cmd->chanlist_len; i++) {
+ /* transfer data, note first byte is the DIO state */
+ val = be32_to_cpu(devpriv->in_buf[i+1]);
+ val &= 0x00ffffff; /* strip status byte */
+ val ^= 0x00800000; /* convert to unsigned */
+
+ if (!comedi_buf_write_samples(s, &val, 1))
+ return;
+ }
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+ }
+
+ /* if command is still running, resubmit urb */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->dev = comedi_to_usb_dev(dev);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(dev->class_dev,
+ "%s: urb resubmit failed (%d)\n",
+ __func__, ret);
+ if (ret == -EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler\n");
+ async->events |= COMEDI_CB_ERROR;
+ }
+ }
+}
+
+static void usbduxsigma_ai_urb_complete(struct urb *urb)
+{
+ struct comedi_device *dev = urb->context;
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ai_cmd_running)
+ return;
+
switch (urb->status) {
case 0:
/* copy the result in the transfer buffer */
memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
+ usbduxsigma_ai_handle_urb(dev, s, urb);
break;
+
case -EILSEQ:
/*
* error in the ISOchronous data
@@ -235,7 +284,7 @@ static void usbduxsigma_ai_urb_complete(struct urb *urb)
* and recycle the last data byte
*/
dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
-
+ usbduxsigma_ai_handle_urb(dev, s, urb);
break;
case -ECONNRESET:
@@ -243,86 +292,24 @@ static void usbduxsigma_ai_urb_complete(struct urb *urb)
case -ESHUTDOWN:
case -ECONNABORTED:
/* happens after an unlink command */
- if (devpriv->ai_cmd_running) {
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- /* we are still running a command, tell comedi */
- s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
- comedi_event(dev, s);
- }
- return;
+ async->events |= COMEDI_CB_ERROR;
+ break;
default:
- /*
- * a real error on the bus
- * pass error to comedi if we are really running a command
- */
- if (devpriv->ai_cmd_running) {
- dev_err(dev->class_dev,
- "%s: non-zero urb status (%d)\n",
- __func__, urb->status);
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
- comedi_event(dev, s);
- }
- return;
- }
-
- if (unlikely(!devpriv->ai_cmd_running))
- return;
-
- urb->dev = comedi_to_usb_dev(dev);
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(ret < 0)) {
- dev_err(dev->class_dev, "%s: urb resubmit failed (%d)\n",
- __func__, ret);
- if (ret == -EL2NSYNC)
- dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler\n");
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
- comedi_event(dev, s);
- return;
- }
-
- /* get the state of the dio pins to allow external trigger */
- dio_state = be32_to_cpu(devpriv->in_buf[0]);
-
- devpriv->ai_counter--;
- if (likely(devpriv->ai_counter > 0))
- return;
-
- /* timer zero, transfer measurements to comedi */
- devpriv->ai_counter = devpriv->ai_timer;
-
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, fixed number of samples */
- devpriv->ai_sample_count--;
- if (devpriv->ai_sample_count < 0) {
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- /* acquistion is over, tell comedi */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
+ /* a real error */
+ dev_err(dev->class_dev, "%s: non-zero urb status (%d)\n",
+ __func__, urb->status);
+ async->events |= COMEDI_CB_ERROR;
+ break;
}
- /* get the data from the USB bus and hand it over to comedi */
- for (i = 0; i < cmd->chanlist_len; i++) {
- /* transfer data, note first byte is the DIO state */
- val = be32_to_cpu(devpriv->in_buf[i+1]);
- val &= 0x00ffffff; /* strip status byte */
- val ^= 0x00800000; /* convert to unsigned */
+ /*
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
+ */
+ if (async->events & COMEDI_CB_CANCEL_MASK)
+ usbduxsigma_ai_stop(dev, 0);
- ret = cfc_write_array_to_buffer(s, &val, sizeof(uint32_t));
- if (unlikely(ret == 0)) {
- /* buffer overflow */
- usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
- return;
- }
- }
- /* tell comedi that data is there */
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
comedi_event(dev, s);
}
@@ -349,64 +336,25 @@ static int usbduxsigma_ao_cancel(struct comedi_device *dev,
return 0;
}
-static void usbduxsigma_ao_urb_complete(struct urb *urb)
+static void usbduxsigma_ao_handle_urb(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct urb *urb)
{
- struct comedi_device *dev = urb->context;
struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- struct comedi_cmd *cmd = &s->async->cmd;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
uint8_t *datap;
int ret;
int i;
- switch (urb->status) {
- case 0:
- /* success */
- break;
-
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNABORTED:
- /* happens after an unlink command */
- if (devpriv->ao_cmd_running) {
- usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- }
- return;
-
- default:
- /* a real error */
- if (devpriv->ao_cmd_running) {
- dev_err(dev->class_dev,
- "%s: non-zero urb status (%d)\n",
- __func__, urb->status);
- usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
- s->async->events |= (COMEDI_CB_ERROR | COMEDI_CB_EOA);
- comedi_event(dev, s);
- }
- return;
- }
-
- if (!devpriv->ao_cmd_running)
- return;
-
devpriv->ao_counter--;
- if ((int)devpriv->ao_counter <= 0) {
- /* timer zero, transfer from comedi */
+ if (devpriv->ao_counter == 0) {
devpriv->ao_counter = devpriv->ao_timer;
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, fixed number of samples */
- devpriv->ao_sample_count--;
- if (devpriv->ao_sample_count < 0) {
- usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
- /* acquistion is over, tell comedi */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(dev, s);
- return;
- }
+ if (cmd->stop_src == TRIG_COUNT &&
+ async->scans_done >= cmd->stop_arg) {
+ async->events |= COMEDI_CB_EOA;
+ return;
}
/* transmit data to the USB bus */
@@ -416,46 +364,86 @@ static void usbduxsigma_ao_urb_complete(struct urb *urb)
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned short val;
- ret = comedi_buf_get(s, &val);
- if (ret < 0) {
+ if (!comedi_buf_read_samples(s, &val, 1)) {
dev_err(dev->class_dev, "buffer underflow\n");
- s->async->events |= (COMEDI_CB_EOA |
- COMEDI_CB_OVERFLOW);
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
}
+
*datap++ = val;
*datap++ = chan;
s->readback[chan] = val;
-
- s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(dev, s);
}
}
- urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = comedi_to_usb_dev(dev);
- urb->status = 0;
- if (devpriv->high_speed)
- urb->interval = 8; /* uframes */
- else
- urb->interval = 1; /* frames */
- urb->number_of_packets = 1;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEOUTBUF;
- urb->iso_frame_desc[0].status = 0;
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "%s: urb resubmit failed (%d)\n",
- __func__, ret);
- if (ret == -EL2NSYNC)
+ /* if command is still running, resubmit urb */
+ if (!(async->events & COMEDI_CB_CANCEL_MASK)) {
+ urb->transfer_buffer_length = SIZEOUTBUF;
+ urb->dev = comedi_to_usb_dev(dev);
+ urb->status = 0;
+ if (devpriv->high_speed)
+ urb->interval = 8; /* uframes */
+ else
+ urb->interval = 1; /* frames */
+ urb->number_of_packets = 1;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEOUTBUF;
+ urb->iso_frame_desc[0].status = 0;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
dev_err(dev->class_dev,
- "buggy USB host controller or bug in IRQ handler\n");
- usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
- s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
- comedi_event(dev, s);
+ "%s: urb resubmit failed (%d)\n",
+ __func__, ret);
+ if (ret == -EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler\n");
+ async->events |= COMEDI_CB_ERROR;
+ }
}
}
+static void usbduxsigma_ao_urb_complete(struct urb *urb)
+{
+ struct comedi_device *dev = urb->context;
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->write_subdev;
+ struct comedi_async *async = s->async;
+
+ /* exit if not running a command, do not resubmit urb */
+ if (!devpriv->ao_cmd_running)
+ return;
+
+ switch (urb->status) {
+ case 0:
+ usbduxsigma_ao_handle_urb(dev, s, urb);
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -ECONNABORTED:
+ /* happens after an unlink command */
+ async->events |= COMEDI_CB_ERROR;
+ break;
+
+ default:
+ /* a real error */
+ dev_err(dev->class_dev, "%s: non-zero urb status (%d)\n",
+ __func__, urb->status);
+ async->events |= COMEDI_CB_ERROR;
+ break;
+ }
+
+ /*
+ * comedi_handle_events() cannot be used in this driver. The (*cancel)
+ * operation would unlink the urb.
+ */
+ if (async->events & COMEDI_CB_CANCEL_MASK)
+ usbduxsigma_ao_stop(dev, 0);
+
+ comedi_event(dev, s);
+}
+
static int usbduxsigma_submit_urbs(struct comedi_device *dev,
struct urb **urbs, int num_urbs,
int input_urb)
@@ -584,14 +572,6 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
if (devpriv->ai_timer < 1)
err |= -EINVAL;
- if (cmd->stop_src == TRIG_COUNT) {
- /* data arrives as one packet */
- devpriv->ai_sample_count = cmd->stop_arg;
- } else {
- /* continuous acquisition */
- devpriv->ai_sample_count = 0;
- }
-
if (err)
return 4;
@@ -692,8 +672,6 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev,
down(&devpriv->sem);
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
for (i = 0; i < len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
@@ -957,25 +935,6 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
if (devpriv->ao_timer < 1)
err |= -EINVAL;
- if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous, use counter */
- if (high_speed) {
- /* high speed also scans everything at once */
- devpriv->ao_sample_count = cmd->stop_arg *
- cmd->scan_end_arg;
- } else {
- /*
- * There's no scan as the scan has been
- * handled inside the FX2. Data arrives as
- * one packet.
- */
- devpriv->ao_sample_count = cmd->stop_arg;
- }
- } else {
- /* continuous acquisition */
- devpriv->ao_sample_count = 0;
- }
-
if (err)
return 4;
@@ -991,9 +950,6 @@ static int usbduxsigma_ao_cmd(struct comedi_device *dev,
down(&devpriv->sem);
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
-
devpriv->ao_counter = devpriv->ao_timer;
if (cmd->start_src == TRIG_NOW) {
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 71003416edcf..a19a56ee0eef 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -797,7 +797,7 @@ static int vmk80xx_init_subdevices(struct comedi_device *dev)
/* Analog output subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = boardinfo->ao_nchans;
s->maxdata = 0x00ff;
s->range_table = boardinfo->range;
@@ -819,7 +819,7 @@ static int vmk80xx_init_subdevices(struct comedi_device *dev)
/* Digital output subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
+ s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -834,7 +834,7 @@ static int vmk80xx_init_subdevices(struct comedi_device *dev)
s->insn_read = vmk80xx_cnt_insn_read;
s->insn_config = vmk80xx_cnt_insn_config;
if (devpriv->model == VMK8055_MODEL) {
- s->subdev_flags |= SDF_WRITEABLE;
+ s->subdev_flags |= SDF_WRITABLE;
s->insn_write = vmk80xx_cnt_insn_write;
}
@@ -842,7 +842,7 @@ static int vmk80xx_init_subdevices(struct comedi_device *dev)
if (devpriv->model == VMK8061_MODEL) {
s = &dev->subdevices[5];
s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = boardinfo->pwm_nchans;
s->maxdata = boardinfo->pwm_maxdata;
s->insn_read = vmk80xx_pwm_insn_read;
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index b6849545b810..9a1dc56f21d1 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -97,39 +97,6 @@ int do_rangeinfo_ioctl(struct comedi_device *dev,
return 0;
}
-static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec)
-{
- unsigned int aref;
-
- /* disable reporting invalid arefs... maybe someday */
- return 0;
-
- aref = CR_AREF(chanspec);
- switch (aref) {
- case AREF_DIFF:
- if (s->subdev_flags & SDF_DIFF)
- return 0;
- break;
- case AREF_COMMON:
- if (s->subdev_flags & SDF_COMMON)
- return 0;
- break;
- case AREF_GROUND:
- if (s->subdev_flags & SDF_GROUND)
- return 0;
- break;
- case AREF_OTHER:
- if (s->subdev_flags & SDF_OTHER)
- return 0;
- break;
- default:
- break;
- }
- dev_dbg(s->device->class_dev, "subdevice does not support aref %i",
- aref);
- return 1;
-}
-
/**
* comedi_check_chanlist() - Validate each element in a chanlist.
* @s: comedi_subdevice struct
@@ -153,8 +120,7 @@ int comedi_check_chanlist(struct comedi_subdevice *s, int n,
else
range_len = 0;
if (chan >= s->n_chan ||
- CR_RANGE(chanspec) >= range_len ||
- aref_invalid(s, chanspec)) {
+ CR_RANGE(chanspec) >= range_len) {
dev_warn(dev->class_dev,
"bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
i, chanspec, chan, range_len);
diff --git a/drivers/staging/cptm1217/clearpad_tm1217.c b/drivers/staging/cptm1217/clearpad_tm1217.c
index edf9ff2ea25b..7f265ce0dd13 100644
--- a/drivers/staging/cptm1217/clearpad_tm1217.c
+++ b/drivers/staging/cptm1217/clearpad_tm1217.c
@@ -278,7 +278,7 @@ static void cp_tm1217_get_data(struct cp_tm1217_device *ts)
static irqreturn_t cp_tm1217_sample_thread(int irq, void *handle)
{
- struct cp_tm1217_device *ts = (struct cp_tm1217_device *) handle;
+ struct cp_tm1217_device *ts = handle;
u8 req[2];
int retval;
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
index d0be1cebf5a9..bdb5317e3d9d 100644
--- a/drivers/staging/dgap/dgap.c
+++ b/drivers/staging/dgap/dgap.c
@@ -65,144 +65,6 @@
#include "dgap.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
-MODULE_SUPPORTED_DEVICE("dgap");
-
-static int dgap_start(void);
-static void dgap_init_globals(void);
-static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
- int boardnum);
-static void dgap_cleanup_board(struct board_t *brd);
-static void dgap_poll_handler(ulong dummy);
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void dgap_remove_one(struct pci_dev *dev);
-static int dgap_do_remap(struct board_t *brd);
-static void dgap_release_remap(struct board_t *brd);
-static irqreturn_t dgap_intr(int irq, void *voidbrd);
-
-static int dgap_tty_open(struct tty_struct *tty, struct file *file);
-static void dgap_tty_close(struct tty_struct *tty, struct file *file);
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
- struct channel_t *ch);
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg);
-static int dgap_tty_digigeta(struct channel_t *ch,
- struct digi_t __user *retinfo);
-static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, struct digi_t __user *new_info);
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
-static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, int __user *new_info);
-static int dgap_tty_write_room(struct tty_struct *tty);
-static int dgap_tty_chars_in_buffer(struct tty_struct *tty);
-static void dgap_tty_start(struct tty_struct *tty);
-static void dgap_tty_stop(struct tty_struct *tty);
-static void dgap_tty_throttle(struct tty_struct *tty);
-static void dgap_tty_unthrottle(struct tty_struct *tty);
-static void dgap_tty_flush_chars(struct tty_struct *tty);
-static void dgap_tty_flush_buffer(struct tty_struct *tty);
-static void dgap_tty_hangup(struct tty_struct *tty);
-static int dgap_wait_for_drain(struct tty_struct *tty);
-static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, unsigned int command,
- unsigned int __user *value);
-static int dgap_get_modem_info(struct channel_t *ch,
- unsigned int __user *value);
-static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, int __user *new_info);
-static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un,
- int __user *retinfo);
-static int dgap_tty_tiocmget(struct tty_struct *tty);
-static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set,
- unsigned int clear);
-static int dgap_tty_send_break(struct tty_struct *tty, int msec);
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int count);
-static void dgap_tty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios);
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
-static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
-
-static int dgap_tty_register(struct board_t *brd);
-static void dgap_tty_unregister(struct board_t *brd);
-static int dgap_tty_init(struct board_t *);
-static void dgap_tty_free(struct board_t *);
-static void dgap_cleanup_tty(struct board_t *);
-static void dgap_carrier(struct channel_t *ch);
-static void dgap_input(struct channel_t *ch);
-
-/*
- * Our function prototypes from dgap_fep5
- */
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
-static int dgap_event(struct board_t *bd);
-
-static void dgap_poll_tasklet(unsigned long data);
-static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
- u8 byte2, uint ncmds);
-static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds);
-static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
-static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type);
-static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
- unsigned char *fbuf, int *len);
-static uint dgap_get_custom_baud(struct channel_t *ch);
-static void dgap_firmware_reset_port(struct channel_t *ch);
-
-/*
- * Function prototypes from dgap_parse.c.
- */
-static int dgap_gettok(char **in);
-static char *dgap_getword(char **in);
-static int dgap_checknode(struct cnode *p);
-
-/*
- * Function prototypes from dgap_sysfs.h
- */
-static void dgap_create_ports_sysfiles(struct board_t *bd);
-static void dgap_remove_ports_sysfiles(struct board_t *bd);
-
-static int dgap_create_driver_sysfiles(struct pci_driver *);
-static void dgap_remove_driver_sysfiles(struct pci_driver *);
-
-static void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
-static void dgap_remove_tty_sysfs(struct device *c);
-
-/*
- * Function prototypes from dgap_parse.h
- */
-static int dgap_parsefile(char **in);
-static struct cnode *dgap_find_config(int type, int bus, int slot);
-static uint dgap_config_get_num_prts(struct board_t *bd);
-static char *dgap_create_config_string(struct board_t *bd, char *string);
-static uint dgap_config_get_useintr(struct board_t *bd);
-static uint dgap_config_get_altpin(struct board_t *bd);
-
-static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len);
-static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len);
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
-static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len);
-#endif
-static int dgap_alloc_flipbuf(struct board_t *brd);
-static void dgap_free_flipbuf(struct board_t *brd);
-static int dgap_request_irq(struct board_t *brd);
-static void dgap_free_irq(struct board_t *brd);
-
-static void dgap_get_vpd(struct board_t *brd);
-static void dgap_do_reset_board(struct board_t *brd);
-static int dgap_test_bios(struct board_t *brd);
-static int dgap_test_fep(struct board_t *brd);
-static int dgap_tty_register_ports(struct board_t *brd);
-static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
- struct board_t *brd);
-static void dgap_cleanup_nodes(void);
-
-static void dgap_cleanup_module(void);
-
-module_exit(dgap_cleanup_module);
-
/*
* File operations permitted on Control/Management major.
*/
@@ -218,7 +80,6 @@ static int dgap_poll_tick = 20; /* Poll interval - 20 ms */
static struct class *dgap_class;
-static struct board_t *dgap_boards_by_major[256];
static uint dgap_count = 500;
/*
@@ -297,13 +158,6 @@ static struct board_id dgap_ids[] = {
{0,} /* 0 terminated list. */
};
-static struct pci_driver dgap_driver = {
- .name = "dgap",
- .probe = dgap_init_one,
- .id_table = dgap_pci_tbl,
- .remove = dgap_remove_one,
-};
-
struct firmware_info {
u8 *conf_name; /* dgap.conf */
u8 *bios_name; /* BIOS filename */
@@ -366,29 +220,6 @@ static struct ktermios dgap_default_termios = {
.c_line = 0,
};
-static const struct tty_operations dgap_tty_ops = {
- .open = dgap_tty_open,
- .close = dgap_tty_close,
- .write = dgap_tty_write,
- .write_room = dgap_tty_write_room,
- .flush_buffer = dgap_tty_flush_buffer,
- .chars_in_buffer = dgap_tty_chars_in_buffer,
- .flush_chars = dgap_tty_flush_chars,
- .ioctl = dgap_tty_ioctl,
- .set_termios = dgap_tty_set_termios,
- .stop = dgap_tty_stop,
- .start = dgap_tty_start,
- .throttle = dgap_tty_throttle,
- .unthrottle = dgap_tty_unthrottle,
- .hangup = dgap_tty_hangup,
- .put_char = dgap_tty_put_char,
- .tiocmget = dgap_tty_tiocmget,
- .tiocmset = dgap_tty_tiocmset,
- .break_ctl = dgap_tty_send_break,
- .wait_until_sent = dgap_tty_wait_until_sent,
- .send_xchar = dgap_tty_send_xchar
-};
-
/*
* Our needed internal static variables from dgap_parse.c
*/
@@ -456,1078 +287,1226 @@ static struct toklist dgap_tlist[] = {
{ 0, NULL }
};
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
/*
- * init_module()
- *
- * Module load. This is where it all starts.
+ * dgap_sindex: much like index(), but it looks for a match of any character in
+ * the group, and returns that position. If the first character is a ^, then
+ * this will match the first occurrence not in that group.
*/
-static int dgap_init_module(void)
+static char *dgap_sindex(char *string, char *group)
{
- int rc;
+ char *ptr;
- pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
+ if (!string || !group)
+ return NULL;
- rc = dgap_start();
- if (rc)
- return rc;
+ if (*group == '^') {
+ group++;
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ break;
+ }
+ if (*ptr == '\0')
+ return string;
+ }
+ } else {
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ return string;
+ }
+ }
+ }
- rc = pci_register_driver(&dgap_driver);
- if (rc)
- goto err_cleanup;
+ return NULL;
+}
- rc = dgap_create_driver_sysfiles(&dgap_driver);
- if (rc)
- goto err_cleanup;
+/*
+ * get a word from the input stream, also keep track of current line number.
+ * words are separated by whitespace.
+ */
+static char *dgap_getword(char **in)
+{
+ char *ret_ptr = *in;
- dgap_driver_state = DRIVER_READY;
+ char *ptr = dgap_sindex(*in, " \t\n");
- return 0;
+ /* If no word found, return null */
+ if (!ptr)
+ return NULL;
-err_cleanup:
+ /* Mark new location for our buffer */
+ *ptr = '\0';
+ *in = ptr + 1;
- dgap_cleanup_module();
+ /* Eat any extra spaces/tabs/newlines that might be present */
+ while (*in && **in && ((**in == ' ') ||
+ (**in == '\t') ||
+ (**in == '\n'))) {
+ **in = '\0';
+ *in = *in + 1;
+ }
- return rc;
+ return ret_ptr;
}
-module_init(dgap_init_module);
+
/*
- * Start of driver.
+ * Get a token from the input file; return 0 if end of file is reached
*/
-static int dgap_start(void)
+static int dgap_gettok(char **in)
{
- int rc;
- unsigned long flags;
- struct device *device;
-
- /*
- * make sure that the globals are
- * init'd before we do anything else
- */
- dgap_init_globals();
-
- dgap_numboards = 0;
+ char *w;
+ struct toklist *t;
- pr_info("For the tools package please visit http://www.digi.com\n");
+ if (strstr(dgap_cword, "board")) {
+ w = dgap_getword(in);
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_brdtype; t->token != 0; t++) {
+ if (!strcmp(w, t->string))
+ return t->token;
+ }
+ } else {
+ while ((w = dgap_getword(in))) {
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if (!strcmp(w, t->string))
+ return t->token;
+ }
+ }
+ }
- /*
- * Register our base character device into the kernel.
- */
+ return 0;
+}
- /*
- * Register management/dpa devices
- */
- rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops);
- if (rc < 0)
- return rc;
+/*
+ * dgap_checknode: see if all the necessary info has been supplied for a node
+ * before creating the next node.
+ */
+static int dgap_checknode(struct cnode *p)
+{
+ switch (p->type) {
+ case LNODE:
+ if (p->u.line.v_speed == 0) {
+ pr_err("line speed not specified");
+ return 1;
+ }
+ return 0;
- dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
- if (IS_ERR(dgap_class)) {
- rc = PTR_ERR(dgap_class);
- goto failed_class;
- }
+ case CNODE:
+ if (p->u.conc.v_speed == 0) {
+ pr_err("concentrator line speed not specified");
+ return 1;
+ }
+ if (p->u.conc.v_nport == 0) {
+ pr_err("number of ports on concentrator not specified");
+ return 1;
+ }
+ if (p->u.conc.v_id == 0) {
+ pr_err("concentrator id letter not specified");
+ return 1;
+ }
+ return 0;
- device = device_create(dgap_class, NULL,
- MKDEV(DIGI_DGAP_MAJOR, 0),
- NULL, "dgap_mgmt");
- if (IS_ERR(device)) {
- rc = PTR_ERR(device);
- goto failed_device;
+ case MNODE:
+ if (p->u.module.v_nport == 0) {
+ pr_err("number of ports on EBI module not specified");
+ return 1;
+ }
+ if (p->u.module.v_id == 0) {
+ pr_err("EBI module id letter not specified");
+ return 1;
+ }
+ return 0;
}
+ return 0;
+}
- /* Start the poller */
- spin_lock_irqsave(&dgap_poll_lock, flags);
- init_timer(&dgap_poll_timer);
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- dgap_poll_timer.expires = dgap_poll_time;
- spin_unlock_irqrestore(&dgap_poll_lock, flags);
+/*
+ * Given a board pointer, returns whether we should use interrupts or not.
+ */
+static uint dgap_config_get_useintr(struct board_t *bd)
+{
+ struct cnode *p;
- add_timer(&dgap_poll_timer);
+ if (!bd)
+ return 0;
- return rc;
+ for (p = bd->bd_config; p; p = p->next) {
+ if (p->type == INTRNODE) {
+ /*
+ * check for pcxr types.
+ */
+ return p->u.useintr;
+ }
+ }
-failed_device:
- class_destroy(dgap_class);
-failed_class:
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
- return rc;
+ /* If not found, then don't turn on interrupts. */
+ return 0;
}
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+/*
+ * Given a board pointer, returns whether we turn on altpin or not.
+ */
+static uint dgap_config_get_altpin(struct board_t *bd)
{
- int rc;
- struct board_t *brd;
+ struct cnode *p;
- if (dgap_numboards >= MAXBOARDS)
- return -EPERM;
+ if (!bd)
+ return 0;
- rc = pci_enable_device(pdev);
- if (rc)
- return -EIO;
+ for (p = bd->bd_config; p; p = p->next) {
+ if (p->type == ANODE) {
+ /*
+ * check for pcxr types.
+ */
+ return p->u.altpin;
+ }
+ }
- brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards);
- if (IS_ERR(brd))
- return PTR_ERR(brd);
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
- rc = dgap_firmware_load(pdev, ent->driver_data, brd);
- if (rc)
- goto cleanup_brd;
+/*
+ * Given a specific type of board, if found, detached link and
+ * returns the first occurrence in the list.
+ */
+static struct cnode *dgap_find_config(int type, int bus, int slot)
+{
+ struct cnode *p, *prev, *prev2, *found;
- rc = dgap_alloc_flipbuf(brd);
- if (rc)
- goto cleanup_brd;
+ p = &dgap_head;
- rc = dgap_tty_register(brd);
- if (rc)
- goto free_flipbuf;
+ while (p->next) {
+ prev = p;
+ p = p->next;
- rc = dgap_request_irq(brd);
- if (rc)
- goto unregister_tty;
+ if (p->type != BNODE)
+ continue;
- /*
- * Do tty device initialization.
- */
- rc = dgap_tty_init(brd);
- if (rc < 0)
- goto free_irq;
+ if (p->u.board.type != type)
+ continue;
- rc = dgap_tty_register_ports(brd);
- if (rc)
- goto tty_free;
+ if (p->u.board.v_pcibus &&
+ p->u.board.pcibus != bus)
+ continue;
- brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
+ if (p->u.board.v_pcislot &&
+ p->u.board.pcislot != slot)
+ continue;
- dgap_board[dgap_numboards++] = brd;
+ found = p;
+ /*
+ * Keep walking thru the list till we
+ * find the next board.
+ */
+ while (p->next) {
+ prev2 = p;
+ p = p->next;
- return 0;
+ if (p->type != BNODE)
+ continue;
-tty_free:
- dgap_tty_free(brd);
-free_irq:
- dgap_free_irq(brd);
-unregister_tty:
- dgap_tty_unregister(brd);
-free_flipbuf:
- dgap_free_flipbuf(brd);
-cleanup_brd:
- dgap_cleanup_nodes();
- dgap_release_remap(brd);
- kfree(brd);
+ /*
+ * Mark the end of our 1 board
+ * chain of configs.
+ */
+ prev2->next = NULL;
- return rc;
-}
+ /*
+ * Link the "next" board to the
+ * previous board, effectively
+ * "unlinking" our board from
+ * the main config.
+ */
+ prev->next = p;
-static void dgap_remove_one(struct pci_dev *dev)
-{
- /* Do Nothing */
+ return found;
+ }
+ /*
+ * It must be the last board in the list.
+ */
+ prev->next = NULL;
+ return found;
+ }
+ return NULL;
}
/*
- * dgap_cleanup_module()
- *
- * Module unload. This is where it all ends.
+ * Given a board pointer, walks the config link, counting up
+ * all ports user specified should be on the board.
+ * (This does NOT mean they are all actually present right now tho)
*/
-static void dgap_cleanup_module(void)
+static uint dgap_config_get_num_prts(struct board_t *bd)
{
- int i;
- ulong lock_flags;
-
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_stop = 1;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
- /* Turn off poller right away. */
- del_timer_sync(&dgap_poll_timer);
+ int count = 0;
+ struct cnode *p;
- dgap_remove_driver_sysfiles(&dgap_driver);
+ if (!bd)
+ return 0;
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
- class_destroy(dgap_class);
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+ for (p = bd->bd_config; p; p = p->next) {
- for (i = 0; i < dgap_numboards; ++i) {
- dgap_remove_ports_sysfiles(dgap_board[i]);
- dgap_cleanup_tty(dgap_board[i]);
- dgap_cleanup_board(dgap_board[i]);
+ switch (p->type) {
+ case BNODE:
+ /*
+ * check for pcxr types.
+ */
+ if (p->u.board.type > EPCFE)
+ count += p->u.board.nport;
+ break;
+ case CNODE:
+ count += p->u.conc.nport;
+ break;
+ case MNODE:
+ count += p->u.module.nport;
+ break;
+ }
}
-
- dgap_cleanup_nodes();
-
- if (dgap_numboards)
- pci_unregister_driver(&dgap_driver);
+ return count;
}
-/*
- * dgap_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgap_cleanup_board(struct board_t *brd)
+static char *dgap_create_config_string(struct board_t *bd, char *string)
{
- int i;
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return;
-
- dgap_free_irq(brd);
-
- tasklet_kill(&brd->helper_tasklet);
+ char *ptr = string;
+ struct cnode *p;
+ struct cnode *q;
+ int speed;
- dgap_release_remap(brd);
+ if (!bd) {
+ *ptr = 0xff;
+ return string;
+ }
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++)
- kfree(brd->channels[i]);
+ for (p = bd->bd_config; p; p = p->next) {
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
+ switch (p->type) {
+ case LNODE:
+ *ptr = '\0';
+ ptr++;
+ *ptr = p->u.line.speed;
+ ptr++;
+ break;
+ case CNODE:
+ /*
+ * Because the EPC/con concentrators can have EM modules
+ * hanging off of them, we have to walk ahead in the
+ * list and keep adding the number of ports on each EM
+ * to the config. UGH!
+ */
+ speed = p->u.conc.speed;
+ q = p->next;
+ if (q && (q->type == MNODE)) {
+ *ptr = (p->u.conc.nport + 0x80);
+ ptr++;
+ p = q;
+ while (q->next && (q->next->type) == MNODE) {
+ *ptr = (q->u.module.nport + 0x80);
+ ptr++;
+ p = q;
+ q = q->next;
+ }
+ *ptr = q->u.module.nport;
+ ptr++;
+ } else {
+ *ptr = p->u.conc.nport;
+ ptr++;
+ }
- dgap_board[brd->boardnum] = NULL;
+ *ptr = speed;
+ ptr++;
+ break;
+ }
+ }
- kfree(brd);
+ *ptr = 0xff;
+ return string;
}
/*
- * dgap_found_board()
- *
- * A board has been found, init it.
+ * Parse a configuration file read into memory as a string.
*/
-static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
- int boardnum)
+static int dgap_parsefile(char **in)
{
- struct board_t *brd;
- unsigned int pci_irq;
- int i;
- int ret;
-
- /* get the board structure and prep it */
- brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
- if (!brd)
- return ERR_PTR(-ENOMEM);
+ struct cnode *p, *brd, *line, *conc;
+ int rc;
+ char *s;
+ int linecnt = 0;
- /* store the info for the board we've found */
- brd->magic = DGAP_BOARD_MAGIC;
- brd->boardnum = boardnum;
- brd->vendor = dgap_pci_tbl[id].vendor;
- brd->device = dgap_pci_tbl[id].device;
- brd->pdev = pdev;
- brd->pci_bus = pdev->bus->number;
- brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgap_ids[id].name;
- brd->maxports = dgap_ids[id].maxports;
- brd->type = dgap_ids[id].config_type;
- brd->dpatype = dgap_ids[id].dpatype;
- brd->dpastatus = BD_NOFEP;
- init_waitqueue_head(&brd->state_wait);
+ p = &dgap_head;
+ brd = line = conc = NULL;
- spin_lock_init(&brd->bd_lock);
+ /* perhaps we are adding to an existing list? */
+ while (p->next)
+ p = p->next;
- brd->inhibit_poller = FALSE;
- brd->wait_for_bios = 0;
- brd->wait_for_fep = 0;
+ /* file must start with a BEGIN */
+ while ((rc = dgap_gettok(in)) != BEGIN) {
+ if (rc == 0) {
+ pr_err("unexpected EOF");
+ return -1;
+ }
+ }
- for (i = 0; i < MAXPORTS; i++)
- brd->channels[i] = NULL;
+ for (; ;) {
+ int board_type = 0;
+ int conc_type = 0;
+ int module_type = 0;
- /* store which card & revision we have */
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+ rc = dgap_gettok(in);
+ if (rc == 0) {
+ pr_err("unexpected EOF");
+ return -1;
+ }
- pci_irq = pdev->irq;
- brd->irq = pci_irq;
+ switch (rc) {
+ case BEGIN: /* should only be 1 begin */
+ pr_err("unexpected config_begin\n");
+ return -1;
- /* get the PCI Base Address Registers */
+ case END:
+ return 0;
- /* Xr Jupiter and EPC use BAR 2 */
- if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
- brd->membase = pci_resource_start(pdev, 2);
- brd->membase_end = pci_resource_end(pdev, 2);
- }
- /* Everyone else uses BAR 0 */
- else {
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
- }
+ case BOARD: /* board info */
+ if (dgap_checknode(p))
+ return -1;
- if (!brd->membase) {
- ret = -ENODEV;
- goto free_brd;
- }
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
+ p = p->next;
- /*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
- brd->port = brd->membase + PCI_IO_OFFSET;
- brd->port_end = brd->port + PCI_IO_SIZE;
+ p->type = BNODE;
+ p->u.board.status = kstrdup("No", GFP_KERNEL);
+ line = conc = NULL;
+ brd = p;
+ linecnt = -1;
- /*
- * Special initialization for non-PLX boards
- */
- if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
- unsigned short cmd;
+ board_type = dgap_gettok(in);
+ if (board_type == 0) {
+ pr_err("board !!type not specified");
+ return -1;
+ }
- pci_write_config_byte(pdev, 0x40, 0);
- pci_write_config_byte(pdev, 0x46, 0);
+ p->u.board.type = board_type;
- /* Limit burst length to 2 doubleword transactions */
- pci_write_config_byte(pdev, 0x42, 1);
+ break;
- /*
- * Enable IO and mem if not already done.
- * This was needed for support on Itanium.
- */
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
+ case IO: /* i/o port */
+ if (p->type != BNODE) {
+ pr_err("IO port only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.portstr = kstrdup(s, GFP_KERNEL);
+ if (kstrtol(s, 0, &p->u.board.port)) {
+ pr_err("bad number for IO port");
+ return -1;
+ }
+ p->u.board.v_port = 1;
+ break;
- /* init our poll helper tasklet */
- tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
- (unsigned long) brd);
+ case MEM: /* memory address */
+ if (p->type != BNODE) {
+ pr_err("memory address only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.addrstr = kstrdup(s, GFP_KERNEL);
+ if (kstrtoul(s, 0, &p->u.board.addr)) {
+ pr_err("bad number for memory address");
+ return -1;
+ }
+ p->u.board.v_addr = 1;
+ break;
- ret = dgap_do_remap(brd);
- if (ret)
- goto free_brd;
+ case PCIINFO: /* pci information */
+ if (p->type != BNODE) {
+ pr_err("memory address only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL);
+ if (kstrtoul(s, 0, &p->u.board.pcibus)) {
+ pr_err("bad number for pci bus");
+ return -1;
+ }
+ p->u.board.v_pcibus = 1;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL);
+ if (kstrtoul(s, 0, &p->u.board.pcislot)) {
+ pr_err("bad number for pci slot");
+ return -1;
+ }
+ p->u.board.v_pcislot = 1;
+ break;
- pr_info("dgap: board %d: %s (rev %d), irq %ld\n",
- boardnum, brd->name, brd->rev, brd->irq);
+ case METHOD:
+ if (p->type != BNODE) {
+ pr_err("install method only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.method = kstrdup(s, GFP_KERNEL);
+ p->u.board.v_method = 1;
+ break;
- return brd;
+ case STATUS:
+ if (p->type != BNODE) {
+ pr_err("config status only valid for boards");
+ return -1;
+ }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.status = kstrdup(s, GFP_KERNEL);
+ break;
-free_brd:
- kfree(brd);
+ case NPORTS: /* number of ports */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.board.nport)) {
+ pr_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.board.v_nport = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.nport)) {
+ pr_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.conc.v_nport = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.module.nport)) {
+ pr_err("bad number for number of ports");
+ return -1;
+ }
+ p->u.module.v_nport = 1;
+ } else {
+ pr_err("nports only valid for concentrators or modules");
+ return -1;
+ }
+ break;
- return ERR_PTR(ret);
-}
+ case ID: /* letter ID used in tty name */
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.board.status = kstrdup(s, GFP_KERNEL);
-static int dgap_request_irq(struct board_t *brd)
-{
- int rc;
+ if (p->type == CNODE) {
+ p->u.conc.id = kstrdup(s, GFP_KERNEL);
+ p->u.conc.v_id = 1;
+ } else if (p->type == MNODE) {
+ p->u.module.id = kstrdup(s, GFP_KERNEL);
+ p->u.module.v_id = 1;
+ } else {
+ pr_err("id only valid for concentrators or modules");
+ return -1;
+ }
+ break;
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -ENODEV;
+ case STARTO: /* start offset of ID */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.board.start)) {
+ pr_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.board.v_start = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.start)) {
+ pr_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.conc.v_start = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.module.start)) {
+ pr_err("bad number for start of tty count");
+ return -1;
+ }
+ p->u.module.v_start = 1;
+ } else {
+ pr_err("start only valid for concentrators or modules");
+ return -1;
+ }
+ break;
- /*
- * Set up our interrupt handler if we are set to do interrupts.
- */
- if (dgap_config_get_useintr(brd) && brd->irq) {
+ case TTYN: /* tty name prefix */
+ if (dgap_checknode(p))
+ return -1;
- rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- if (!rc)
- brd->intr_used = 1;
- }
- return 0;
-}
+ p = p->next;
+ p->type = TNODE;
-static void dgap_free_irq(struct board_t *brd)
-{
- if (brd->intr_used && brd->irq)
- free_irq(brd->irq, brd);
-}
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.ttyname = kstrdup(s, GFP_KERNEL);
+ if (!p->u.ttyname)
+ return -1;
-static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
- struct board_t *brd)
-{
- const struct firmware *fw;
- char *tmp_ptr;
- int ret;
- char *dgap_config_buf;
+ break;
- dgap_get_vpd(brd);
- dgap_do_reset_board(brd);
+ case CU: /* cu name prefix */
+ if (dgap_checknode(p))
+ return -1;
- if (fw_info[card_type].conf_name) {
- ret = request_firmware(&fw, fw_info[card_type].conf_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "config file %s not found\n",
- fw_info[card_type].conf_name);
- return ret;
- }
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL);
- if (!dgap_config_buf) {
- release_firmware(fw);
- return -ENOMEM;
- }
+ p = p->next;
+ p->type = CUNODE;
- memcpy(dgap_config_buf, fw->data, fw->size);
- release_firmware(fw);
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.cuname = kstrdup(s, GFP_KERNEL);
+ if (!p->u.cuname)
+ return -1;
- /*
- * preserve dgap_config_buf
- * as dgap_parsefile would
- * otherwise alter it.
- */
- tmp_ptr = dgap_config_buf;
+ break;
- if (dgap_parsefile(&tmp_ptr) != 0) {
- kfree(dgap_config_buf);
- return -EINVAL;
- }
- kfree(dgap_config_buf);
- }
+ case LINE: /* line information */
+ if (dgap_checknode(p))
+ return -1;
+ if (!brd) {
+ pr_err("must specify board before line info");
+ return -1;
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ pr_err("line not valid for PC/em");
+ return -1;
+ }
- /*
- * Match this board to a config the user created for us.
- */
- brd->bd_config =
- dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /*
- * Because the 4 port Xr products share the same PCI ID
- * as the 8 port Xr products, if we receive a NULL config
- * back, and this is a PAPORT8 board, retry with a
- * PAPORT4 attempt as well.
- */
- if (brd->type == PAPORT8 && !brd->bd_config)
- brd->bd_config =
- dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
+ p = p->next;
+ p->type = LNODE;
+ conc = NULL;
+ line = p;
+ linecnt++;
+ break;
- if (!brd->bd_config) {
- dev_err(&pdev->dev, "No valid configuration found\n");
- return -EINVAL;
- }
+ case CONC: /* concentrator information */
+ if (dgap_checknode(p))
+ return -1;
+ if (!line) {
+ pr_err("must specify line info before concentrator");
+ return -1;
+ }
- if (fw_info[card_type].bios_name) {
- ret = request_firmware(&fw, fw_info[card_type].bios_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "bios file %s not found\n",
- fw_info[card_type].bios_name);
- return ret;
- }
- dgap_do_bios_load(brd, fw->data, fw->size);
- release_firmware(fw);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /* Wait for BIOS to test board... */
- ret = dgap_test_bios(brd);
- if (ret)
- return ret;
- }
+ p = p->next;
+ p->type = CNODE;
+ conc = p;
- if (fw_info[card_type].fep_name) {
- ret = request_firmware(&fw, fw_info[card_type].fep_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "dgap: fep file %s not found\n",
- fw_info[card_type].fep_name);
- return ret;
- }
- dgap_do_fep_load(brd, fw->data, fw->size);
- release_firmware(fw);
+ if (linecnt)
+ brd->u.board.conc2++;
+ else
+ brd->u.board.conc1++;
- /* Wait for FEP to load on board... */
- ret = dgap_test_fep(brd);
- if (ret)
- return ret;
- }
+ conc_type = dgap_gettok(in);
+ if (conc_type == 0 || conc_type != CX ||
+ conc_type != EPC) {
+ pr_err("failed to set a type of concentratros");
+ return -1;
+ }
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
- /*
- * If this is a CX or EPCX, we need to see if the firmware
- * is requesting a concentrator image from us.
- */
- if ((bd->type == PCX) || (bd->type == PEPC)) {
- chk_addr = (u16 *) (vaddr + DOWNREQ);
- /* Nonzero if FEP is requesting concentrator image. */
- check = readw(chk_addr);
- vaddr = brd->re_map_membase;
- }
+ p->u.conc.type = conc_type;
- if (fw_info[card_type].con_name && check && vaddr) {
- ret = request_firmware(&fw, fw_info[card_type].con_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "conc file %s not found\n",
- fw_info[card_type].con_name);
- return ret;
- }
- /* Put concentrator firmware loading code here */
- offset = readw((u16 *) (vaddr + DOWNREQ));
- memcpy_toio(offset, fw->data, fw->size);
+ break;
- dgap_do_conc_load(brd, (char *)fw->data, fw->size)
- release_firmware(fw);
- }
-#endif
+ case MOD: /* EBI module */
+ if (dgap_checknode(p))
+ return -1;
+ if (!brd) {
+ pr_err("must specify board info before EBI modules");
+ return -1;
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ linecnt = 0;
+ break;
+ default:
+ if (!conc) {
+ pr_err("must specify concentrator info before EBI module");
+ return -1;
+ }
+ }
- return 0;
-}
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
-/*
- * Remap PCI memory.
- */
-static int dgap_do_remap(struct board_t *brd)
-{
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
+ p = p->next;
+ p->type = MNODE;
- if (!request_mem_region(brd->membase, 0x200000, "dgap"))
- return -ENOMEM;
+ if (linecnt)
+ brd->u.board.module2++;
+ else
+ brd->u.board.module1++;
- if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
- "dgap")) {
- release_mem_region(brd->membase, 0x200000);
- return -ENOMEM;
- }
+ module_type = dgap_gettok(in);
+ if (module_type == 0 || module_type != PORTS ||
+ module_type != MODEM) {
+ pr_err("failed to set a type of module");
+ return -1;
+ }
- brd->re_map_membase = ioremap(brd->membase, 0x200000);
- if (!brd->re_map_membase) {
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- return -ENOMEM;
- }
+ p->u.module.type = module_type;
- brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
- if (!brd->re_map_port) {
- release_mem_region(brd->membase, 0x200000);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- iounmap(brd->re_map_membase);
- return -ENOMEM;
- }
+ break;
- return 0;
-}
+ case CABLE:
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.line.cable = kstrdup(s, GFP_KERNEL);
+ p->u.line.v_cable = 1;
+ }
+ break;
-static void dgap_release_remap(struct board_t *brd)
-{
- if (brd->re_map_membase) {
- release_mem_region(brd->membase, 0x200000);
- iounmap(brd->re_map_membase);
- }
+ case SPEED: /* sync line speed indication */
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.line.speed)) {
+ pr_err("bad number for line speed");
+ return -1;
+ }
+ p->u.line.v_speed = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.conc.speed)) {
+ pr_err("bad number for line speed");
+ return -1;
+ }
+ p->u.conc.v_speed = 1;
+ } else {
+ pr_err("speed valid only for lines or concentrators.");
+ return -1;
+ }
+ break;
- if (brd->re_map_port) {
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- iounmap(brd->re_map_port);
- }
-}
-/*****************************************************************************
-*
-* Function:
-*
-* dgap_poll_handler
-*
-* Author:
-*
-* Scott H Kilau
-*
-* Parameters:
-*
-* dummy -- ignored
-*
-* Return Values:
-*
-* none
-*
-* Description:
-*
-* As each timer expires, it determines (a) whether the "transmit"
-* waiter needs to be woken up, and (b) whether the poller needs to
-* be rescheduled.
-*
-******************************************************************************/
+ case CONNECT:
+ if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ p->u.conc.connect = kstrdup(s, GFP_KERNEL);
+ p->u.conc.v_connect = 1;
+ }
+ break;
+ case PRINT: /* transparent print name prefix */
+ if (dgap_checknode(p))
+ return -1;
-static void dgap_poll_handler(ulong dummy)
-{
- int i;
- struct board_t *brd;
- unsigned long lock_flags;
- ulong new_time;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- dgap_poll_counter++;
+ p = p->next;
+ p->type = PNODE;
- /*
- * Do not start the board state machine until
- * driver tells us its up and running, and has
- * everything it needs.
- */
- if (dgap_driver_state != DRIVER_READY)
- goto schedule_poller;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpeced end of file");
+ return -1;
+ }
+ p->u.printname = kstrdup(s, GFP_KERNEL);
+ if (!p->u.printname)
+ return -1;
- /*
- * If we have just 1 board, or the system is not SMP,
- * then use the typical old style poller.
- * Otherwise, use our new tasklet based poller, which should
- * speed things up for multiple boards.
- */
- if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) {
- for (i = 0; i < dgap_numboards; i++) {
+ break;
- brd = dgap_board[i];
+ case CMAJOR: /* major number */
+ if (dgap_checknode(p))
+ return -1;
- if (brd->state == BOARD_FAILED)
- continue;
- if (!brd->intr_running)
- /* Call the real board poller directly */
- dgap_poll_tasklet((unsigned long) brd);
- }
- } else {
- /*
- * Go thru each board, kicking off a
- * tasklet for each if needed
- */
- for (i = 0; i < dgap_numboards; i++) {
- brd = dgap_board[i];
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /*
- * Attempt to grab the board lock.
- *
- * If we can't get it, no big deal, the next poll
- * will get it. Basically, I just really don't want
- * to spin in here, because I want to kick off my
- * tasklets as fast as I can, and then get out the
- * poller.
- */
- if (!spin_trylock(&brd->bd_lock))
- continue;
+ p = p->next;
+ p->type = JNODE;
- /*
- * If board is in a failed state, don't bother
- * scheduling a tasklet
- */
- if (brd->state == BOARD_FAILED) {
- spin_unlock(&brd->bd_lock);
- continue;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
}
+ if (kstrtol(s, 0, &p->u.majornumber)) {
+ pr_err("bad number for major number");
+ return -1;
+ }
+ break;
- /* Schedule a poll helper task */
- if (!brd->intr_running)
- tasklet_schedule(&brd->helper_tasklet);
-
- /*
- * Can't do DGAP_UNLOCK here, as we don't have
- * lock_flags because we did a trylock above.
- */
- spin_unlock(&brd->bd_lock);
- }
- }
+ case ALTPIN: /* altpin setting */
+ if (dgap_checknode(p))
+ return -1;
-schedule_poller:
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
+ p = p->next;
+ p->type = ANODE;
- new_time = dgap_poll_time - jiffies;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.altpin)) {
+ pr_err("bad number for altpin");
+ return -1;
+ }
+ break;
- if ((ulong) new_time >= 2 * dgap_poll_tick) {
- dgap_poll_time =
- jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- }
+ case USEINTR: /* enable interrupt setting */
+ if (dgap_checknode(p))
+ return -1;
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_timer.expires = dgap_poll_time;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- if (!dgap_poll_stop)
- add_timer(&dgap_poll_timer);
-}
+ p = p->next;
+ p->type = INTRNODE;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.useintr)) {
+ pr_err("bad number for useintr");
+ return -1;
+ }
+ break;
-/*
- * dgap_intr()
- *
- * Driver interrupt handler.
- */
-static irqreturn_t dgap_intr(int irq, void *voidbrd)
-{
- struct board_t *brd = (struct board_t *) voidbrd;
+ case TTSIZ: /* size of tty structure */
+ if (dgap_checknode(p))
+ return -1;
- if (!brd)
- return IRQ_NONE;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- /*
- * Check to make sure its for us.
- */
- if (brd->magic != DGAP_BOARD_MAGIC)
- return IRQ_NONE;
+ p = p->next;
+ p->type = TSNODE;
- brd->intr_count++;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.ttysize)) {
+ pr_err("bad number for ttysize");
+ return -1;
+ }
+ break;
- /*
- * Schedule tasklet to run at a better time.
- */
- tasklet_schedule(&brd->helper_tasklet);
- return IRQ_HANDLED;
-}
+ case CHSIZ: /* channel structure size */
+ if (dgap_checknode(p))
+ return -1;
-/*
- * dgap_init_globals()
- *
- * This is where we initialize the globals from the static insmod
- * configuration variables. These are declared near the head of
- * this file.
- */
-static void dgap_init_globals(void)
-{
- int i;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- for (i = 0; i < MAXBOARDS; i++)
- dgap_board[i] = NULL;
+ p = p->next;
+ p->type = CSNODE;
- init_timer(&dgap_poll_timer);
-}
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.chsize)) {
+ pr_err("bad number for chsize");
+ return -1;
+ }
+ break;
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
+ case BSSIZ: /* board structure size */
+ if (dgap_checknode(p))
+ return -1;
-/*
- * dgap_tty_register()
- *
- * Init the tty subsystem for this board.
- */
-static int dgap_tty_register(struct board_t *brd)
-{
- int rc;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- brd->serial_driver = tty_alloc_driver(MAXPORTS, 0);
- if (IS_ERR(brd->serial_driver))
- return PTR_ERR(brd->serial_driver);
+ p = p->next;
+ p->type = BSNODE;
- snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_",
- brd->boardnum);
- brd->serial_driver->name = brd->serial_name;
- brd->serial_driver->name_base = 0;
- brd->serial_driver->major = 0;
- brd->serial_driver->minor_start = 0;
- brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->serial_driver->init_termios = dgap_default_termios;
- brd->serial_driver->driver_name = DRVSTR;
- brd->serial_driver->flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
-
- /* The kernel wants space to store pointers to tty_structs */
- brd->serial_driver->ttys =
- kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
- if (!brd->serial_driver->ttys) {
- rc = -ENOMEM;
- goto free_serial_drv;
- }
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.bssize)) {
+ pr_err("bad number for bssize");
+ return -1;
+ }
+ break;
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->serial_driver, &dgap_tty_ops);
+ case UNTSIZ: /* sched structure size */
+ if (dgap_checknode(p))
+ return -1;
- /*
- * If we're doing transparent print, we have to do all of the above
- * again, separately so we don't get the LD confused about what major
- * we are when we get into the dgap_tty_open() routine.
- */
- brd->print_driver = tty_alloc_driver(MAXPORTS, 0);
- if (IS_ERR(brd->print_driver)) {
- rc = PTR_ERR(brd->print_driver);
- goto free_serial_drv;
- }
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_",
- brd->boardnum);
- brd->print_driver->name = brd->print_name;
- brd->print_driver->name_base = 0;
- brd->print_driver->major = 0;
- brd->print_driver->minor_start = 0;
- brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->print_driver->init_termios = dgap_default_termios;
- brd->print_driver->driver_name = DRVSTR;
- brd->print_driver->flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
-
- /* The kernel wants space to store pointers to tty_structs */
- brd->print_driver->ttys =
- kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
- if (!brd->print_driver->ttys) {
- rc = -ENOMEM;
- goto free_print_drv;
- }
+ p = p->next;
+ p->type = USNODE;
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->print_driver, &dgap_tty_ops);
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.unsize)) {
+ pr_err("bad number for schedsize");
+ return -1;
+ }
+ break;
- /* Register tty devices */
- rc = tty_register_driver(brd->serial_driver);
- if (rc < 0)
- goto free_print_drv;
+ case F2SIZ: /* f2200 structure size */
+ if (dgap_checknode(p))
+ return -1;
- /* Register Transparent Print devices */
- rc = tty_register_driver(brd->print_driver);
- if (rc < 0)
- goto unregister_serial_drv;
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- dgap_boards_by_major[brd->serial_driver->major] = brd;
- brd->dgap_serial_major = brd->serial_driver->major;
+ p = p->next;
+ p->type = FSNODE;
- dgap_boards_by_major[brd->print_driver->major] = brd;
- brd->dgap_transparent_print_major = brd->print_driver->major;
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.f2size)) {
+ pr_err("bad number for f2200size");
+ return -1;
+ }
+ break;
- return 0;
+ case VPSIZ: /* vpix structure size */
+ if (dgap_checknode(p))
+ return -1;
-unregister_serial_drv:
- tty_unregister_driver(brd->serial_driver);
-free_print_drv:
- put_tty_driver(brd->print_driver);
-free_serial_drv:
- put_tty_driver(brd->serial_driver);
+ p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
+ if (!p->next)
+ return -1;
- return rc;
-}
+ p = p->next;
+ p->type = VSNODE;
-static void dgap_tty_unregister(struct board_t *brd)
-{
- tty_unregister_driver(brd->print_driver);
- tty_unregister_driver(brd->serial_driver);
- put_tty_driver(brd->print_driver);
- put_tty_driver(brd->serial_driver);
+ s = dgap_getword(in);
+ if (!s) {
+ pr_err("unexpected end of file");
+ return -1;
+ }
+ if (kstrtol(s, 0, &p->u.vpixsize)) {
+ pr_err("bad number for vpixsize");
+ return -1;
+ }
+ break;
+ }
+ }
}
-/*
- * dgap_tty_init()
- *
- * Init the tty subsystem. Called once per board after board has been
- * downloaded and init'ed.
- */
-static int dgap_tty_init(struct board_t *brd)
+static void dgap_cleanup_nodes(void)
{
- int i;
- int tlw;
- uint true_count;
- u8 __iomem *vaddr;
- u8 modem;
- struct channel_t *ch;
- struct bs_t __iomem *bs;
- struct cm_t __iomem *cm;
- int ret;
-
- /*
- * Initialize board structure elements.
- */
-
- vaddr = brd->re_map_membase;
- true_count = readw((vaddr + NCHAN));
-
- brd->nasync = dgap_config_get_num_prts(brd);
-
- if (!brd->nasync)
- brd->nasync = brd->maxports;
+ struct cnode *p;
- if (brd->nasync > brd->maxports)
- brd->nasync = brd->maxports;
+ p = &dgap_head;
- if (true_count != brd->nasync) {
- dev_warn(&brd->pdev->dev,
- "%s configured for %d ports, has %d ports.\n",
- brd->name, brd->nasync, true_count);
+ while (p) {
+ struct cnode *tmp = p->next;
- if ((brd->type == PPCM) &&
- (true_count == 64 || true_count == 0)) {
- dev_warn(&brd->pdev->dev,
- "Please make SURE the EBI cable running from the card\n");
- dev_warn(&brd->pdev->dev,
- "to each EM module is plugged into EBI IN!\n");
+ if (p->type == NULLNODE) {
+ p = tmp;
+ continue;
}
- brd->nasync = true_count;
-
- /* If no ports, don't bother going any further */
- if (!brd->nasync) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return -EIO;
+ switch (p->type) {
+ case BNODE:
+ kfree(p->u.board.portstr);
+ kfree(p->u.board.addrstr);
+ kfree(p->u.board.pcibusstr);
+ kfree(p->u.board.pcislotstr);
+ kfree(p->u.board.method);
+ break;
+ case CNODE:
+ kfree(p->u.conc.id);
+ kfree(p->u.conc.connect);
+ break;
+ case MNODE:
+ kfree(p->u.module.id);
+ break;
+ case TNODE:
+ kfree(p->u.ttyname);
+ break;
+ case CUNODE:
+ kfree(p->u.cuname);
+ break;
+ case LNODE:
+ kfree(p->u.line.cable);
+ break;
+ case PNODE:
+ kfree(p->u.printname);
+ break;
}
- }
- /*
- * Allocate channel memory that might not have been allocated
- * when the driver was first loaded.
- */
- for (i = 0; i < brd->nasync; i++) {
- brd->channels[i] =
- kzalloc(sizeof(struct channel_t), GFP_KERNEL);
- if (!brd->channels[i]) {
- ret = -ENOMEM;
- goto free_chan;
- }
+ kfree(p->u.board.status);
+ kfree(p);
+ p = tmp;
}
+}
- ch = brd->channels[0];
- vaddr = brd->re_map_membase;
-
- bs = (struct bs_t __iomem *) ((ulong) vaddr + CHANBUF);
- cm = (struct cm_t __iomem *) ((ulong) vaddr + CMDBUF);
-
- brd->bd_bs = bs;
-
- /* Set up channel variables */
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
-
- spin_lock_init(&ch->ch_lock);
-
- /* Store all our magic numbers */
- ch->magic = DGAP_CHANNEL_MAGIC;
- ch->ch_tun.magic = DGAP_UNIT_MAGIC;
- ch->ch_tun.un_type = DGAP_SERIAL;
- ch->ch_tun.un_ch = ch;
- ch->ch_tun.un_dev = i;
-
- ch->ch_pun.magic = DGAP_UNIT_MAGIC;
- ch->ch_pun.un_type = DGAP_PRINT;
- ch->ch_pun.un_ch = ch;
- ch->ch_pun.un_dev = i;
+/*
+ * Retrives the current custom baud rate from FEP memory,
+ * and returns it back to the user.
+ * Returns 0 on error.
+ */
+static uint dgap_get_custom_baud(struct channel_t *ch)
+{
+ u8 __iomem *vaddr;
+ ulong offset;
+ uint value;
- ch->ch_vaddr = vaddr;
- ch->ch_bs = bs;
- ch->ch_cm = cm;
- ch->ch_bd = brd;
- ch->ch_portnum = i;
- ch->ch_digi = dgap_digi_init;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return 0;
- /*
- * Set up digi dsr and dcd bits based on altpin flag.
- */
- if (dgap_config_get_altpin(brd)) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- ch->ch_digi.digi_flags |= DIGI_ALTPIN;
- } else {
- ch->ch_cd = DM_CD;
- ch->ch_dsr = DM_DSR;
- }
+ if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
+ return 0;
- ch->ch_taddr = vaddr + (ioread16(&(ch->ch_bs->tx_seg)) << 4);
- ch->ch_raddr = vaddr + (ioread16(&(ch->ch_bs->rx_seg)) << 4);
- ch->ch_tx_win = 0;
- ch->ch_rx_win = 0;
- ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
- ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
- ch->ch_tstart = 0;
- ch->ch_rstart = 0;
+ if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
+ return 0;
- /*
- * Set queue water marks, interrupt mask,
- * and general tty parameters.
- */
- tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
- ch->ch_tsize / 2;
- ch->ch_tlw = tlw;
+ vaddr = ch->ch_bd->re_map_membase;
- dgap_cmdw(ch, STLOW, tlw, 0);
+ if (!vaddr)
+ return 0;
- dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
+ /*
+ * Go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28)
+ + LINE_SPEED;
- dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
+ value = readw(vaddr + offset);
+ return value;
+}
- ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+/*
+ * Remap PCI memory.
+ */
+static int dgap_remap(struct board_t *brd)
+{
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -EIO;
- init_waitqueue_head(&ch->ch_flags_wait);
- init_waitqueue_head(&ch->ch_tun.un_flags_wait);
- init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+ if (!request_mem_region(brd->membase, 0x200000, "dgap"))
+ return -ENOMEM;
- /* Turn on all modem interrupts for now */
- modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
- writeb(modem, &(ch->ch_bs->m_int));
+ if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
+ "dgap")) {
+ release_mem_region(brd->membase, 0x200000);
+ return -ENOMEM;
+ }
- /*
- * Set edelay to 0 if interrupts are turned on,
- * otherwise set edelay to the usual 100.
- */
- if (brd->intr_used)
- writew(0, &(ch->ch_bs->edelay));
- else
- writew(100, &(ch->ch_bs->edelay));
+ brd->re_map_membase = ioremap(brd->membase, 0x200000);
+ if (!brd->re_map_membase) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ return -ENOMEM;
+ }
- writeb(1, &(ch->ch_bs->idata));
+ brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
+ if (!brd->re_map_port) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ iounmap(brd->re_map_membase);
+ return -ENOMEM;
}
return 0;
-
-free_chan:
- while (--i >= 0) {
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- return ret;
}
-/*
- * dgap_tty_free()
- *
- * Free the channles which are allocated in dgap_tty_init().
- */
-static void dgap_tty_free(struct board_t *brd)
+static void dgap_unmap(struct board_t *brd)
{
- int i;
-
- for (i = 0; i < brd->nasync; i++)
- kfree(brd->channels[i]);
+ iounmap(brd->re_map_port);
+ iounmap(brd->re_map_membase);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ release_mem_region(brd->membase, 0x200000);
}
+
/*
- * dgap_cleanup_tty()
+ * dgap_parity_scan()
*
- * Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
+ * Convert the FEP5 way of reporting parity errors and breaks into
+ * the Linux line discipline way.
*/
-static void dgap_cleanup_tty(struct board_t *brd)
+static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
+ unsigned char *fbuf, int *len)
{
- struct device *dev;
- int i;
+ int l = *len;
+ int count = 0;
+ unsigned char *in, *cout, *fout;
+ unsigned char c;
- dgap_boards_by_major[brd->serial_driver->major] = NULL;
- brd->dgap_serial_major = 0;
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->serial_ports[i]);
- dev = brd->channels[i]->ch_tun.un_sysfs;
- dgap_remove_tty_sysfs(dev);
- tty_unregister_device(brd->serial_driver, i);
- }
- tty_unregister_driver(brd->serial_driver);
- put_tty_driver(brd->serial_driver);
- kfree(brd->serial_ports);
+ in = cbuf;
+ cout = cbuf;
+ fout = fbuf;
- dgap_boards_by_major[brd->print_driver->major] = NULL;
- brd->dgap_transparent_print_major = 0;
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->printer_ports[i]);
- dev = brd->channels[i]->ch_pun.un_sysfs;
- dgap_remove_tty_sysfs(dev);
- tty_unregister_device(brd->print_driver, i);
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ while (l--) {
+ c = *in++;
+ switch (ch->pscan_state) {
+ default:
+ /* reset to sanity and fall through */
+ ch->pscan_state = 0;
+
+ case 0:
+ /* No FF seen yet */
+ if (c == (unsigned char) '\377')
+ /* delete this character from stream */
+ ch->pscan_state = 1;
+ else {
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ }
+ break;
+
+ case 1:
+ /* first FF seen */
+ if (c == (unsigned char) '\377') {
+ /* doubled ff, transform to single ff */
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ ch->pscan_state = 0;
+ } else {
+ /* save value examination in next state */
+ ch->pscan_savechar = c;
+ ch->pscan_state = 2;
+ }
+ break;
+
+ case 2:
+ /* third character of ff sequence */
+
+ *cout++ = c;
+
+ if (ch->pscan_savechar == 0x0) {
+
+ if (c == 0x0) {
+ ch->ch_err_break++;
+ *fout++ = TTY_BREAK;
+ } else {
+ ch->ch_err_parity++;
+ *fout++ = TTY_PARITY;
+ }
+ }
+
+ count += 1;
+ ch->pscan_state = 0;
+ }
}
- tty_unregister_driver(brd->print_driver);
- put_tty_driver(brd->print_driver);
- kfree(brd->printer_ports);
+ *len = count;
}
/*=======================================================================
@@ -1738,6 +1717,33 @@ static void dgap_input(struct channel_t *ch)
}
+static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
+ struct un_t *un, u32 mask,
+ unsigned long *irq_flags1,
+ unsigned long *irq_flags2)
+{
+ if (!(un->un_flags & mask))
+ return;
+
+ un->un_flags &= ~mask;
+
+ if (!(un->un_flags & UN_ISOPEN))
+ return;
+
+ if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ un->un_tty->ldisc->ops->write_wakeup) {
+ spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
+
+ (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
+
+ spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
+ spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
+ }
+ wake_up_interruptible(&un->un_tty->write_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+}
+
/************************************************************************
* Determines when CARRIER changes state and takes appropriate
* action.
@@ -1852,163 +1858,1207 @@ static void dgap_carrier(struct channel_t *ch)
ch->ch_flags &= ~CH_CD;
}
-/************************************************************************
+/*=======================================================================
*
- * TTY Entry points and helper functions
+ * dgap_event - FEP to host event processing routine.
*
- ************************************************************************/
-
-/*
- * dgap_tty_open()
+ * bd - Board of current event.
*
- */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file)
+ *=======================================================================*/
+static int dgap_event(struct board_t *bd)
{
- struct board_t *brd;
struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- uint major;
- uint minor;
- int rc;
ulong lock_flags;
ulong lock_flags2;
- u16 head;
+ struct bs_t __iomem *bs;
+ u8 __iomem *event;
+ u8 __iomem *vaddr;
+ struct ev_t __iomem *eaddr;
+ uint head;
+ uint tail;
+ int port;
+ int reason;
+ int modem;
+ int b1;
- major = MAJOR(tty_devnum(tty));
- minor = MINOR(tty_devnum(tty));
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return -EIO;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
- if (major > 255)
+ if (!vaddr) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -EIO;
+ }
- /* Get board pointer from our array of majors we have allocated */
- brd = dgap_boards_by_major[major];
- if (!brd)
+ eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+
+ if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
+ (head | tail) & 03) {
+ /* Let go of board lock */
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -EIO;
+ }
/*
- * If board is not yet up to a state of READY, go to
- * sleep waiting for it to happen or they cancel the open.
+ * Loop to process all the events in the buffer.
*/
- rc = wait_event_interruptible(brd->state_wait,
- (brd->state & BOARD_READY));
+ while (tail != head) {
- if (rc)
- return rc;
+ /*
+ * Get interrupt information.
+ */
- spin_lock_irqsave(&brd->bd_lock, lock_flags);
+ event = bd->re_map_membase + tail + EVSTART;
- /* The wait above should guarantee this cannot happen */
- if (brd->state != BOARD_READY) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ port = ioread8(event);
+ reason = ioread8(event + 1);
+ modem = ioread8(event + 2);
+ b1 = ioread8(event + 3);
+
+ /*
+ * Make sure the interrupt is valid.
+ */
+ if (port >= bd->nasync)
+ goto next;
+
+ if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
+ goto next;
+
+ ch = bd->channels[port];
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ goto next;
+
+ /*
+ * If we have made it here, the event was valid.
+ * Lock down the channel.
+ */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ bs = ch->ch_bs;
+
+ if (!bs) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ goto next;
+ }
+
+ /*
+ * Process received data.
+ */
+ if (reason & IFDATA) {
+
+ /*
+ * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
+ * input could send some data to ld, which in turn
+ * could do a callback to one of our other functions.
+ */
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ dgap_input(ch);
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ if (ch->ch_flags & CH_RACTIVE)
+ ch->ch_flags |= CH_RENABLE;
+ else
+ writeb(1, &(bs->idata));
+
+ if (ch->ch_flags & CH_RWAIT) {
+ ch->ch_flags &= ~CH_RWAIT;
+
+ wake_up_interruptible
+ (&ch->ch_tun.un_flags_wait);
+ }
+ }
+
+ /*
+ * Process Modem change signals.
+ */
+ if (reason & IFMODEM) {
+ ch->ch_mistat = modem;
+ dgap_carrier(ch);
+ }
+
+ /*
+ * Process break.
+ */
+ if (reason & IFBREAK) {
+
+ if (ch->ch_tun.un_tty) {
+ /* A break has been indicated */
+ ch->ch_err_break++;
+ tty_buffer_request_room
+ (ch->ch_tun.un_tty->port, 1);
+ tty_insert_flip_char(ch->ch_tun.un_tty->port,
+ 0, TTY_BREAK);
+ tty_flip_buffer_push(ch->ch_tun.un_tty->port);
+ }
+ }
+
+ /*
+ * Process Transmit low.
+ */
+ if (reason & IFTLW) {
+ dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
+ &lock_flags, &lock_flags2);
+ dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
+ &lock_flags, &lock_flags2);
+ if (ch->ch_flags & CH_WLOW) {
+ ch->ch_flags &= ~CH_WLOW;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ /*
+ * Process Transmit empty.
+ */
+ if (reason & IFTEM) {
+ dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
+ &lock_flags, &lock_flags2);
+ dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
+ &lock_flags, &lock_flags2);
+ if (ch->ch_flags & CH_WEMPTY) {
+ ch->ch_flags &= ~CH_WEMPTY;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+
+next:
+ tail = (tail + 4) & (EVMAX - EVSTART - 4);
}
- /* If opened device is greater than our number of ports, bail. */
- if (MINOR(tty_devnum(tty)) > brd->nasync) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ writew(tail, &(eaddr->ev_tail));
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+
+ return 0;
+}
+
+/*
+ * Our board poller function.
+ */
+static void dgap_poll_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ ulong lock_flags;
+ char __iomem *vaddr;
+ u16 head, tail;
+
+ if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
+ return;
+
+ if (bd->inhibit_poller)
+ return;
+
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if (bd->state == BOARD_READY) {
+
+ struct ev_t __iomem *eaddr;
+
+ if (!bd->re_map_membase) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+ if (!bd->re_map_port) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
+ }
+
+ if (!bd->nasync)
+ goto out;
+
+ eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * If there is an event pending. Go service it.
+ */
+ if (head != tail) {
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ dgap_event(bd);
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ }
+
+out:
+ /*
+ * If board is doing interrupts, ACK the interrupt.
+ */
+ if (bd && bd->intr_running)
+ readb(bd->re_map_port + 2);
+
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ return;
}
- ch = brd->channels[minor];
- if (!ch) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+}
+
+/*
+ * dgap_found_board()
+ *
+ * A board has been found, init it.
+ */
+static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
+ int boardnum)
+{
+ struct board_t *brd;
+ unsigned int pci_irq;
+ int i;
+ int ret;
+
+ /* get the board structure and prep it */
+ brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
+ if (!brd)
+ return ERR_PTR(-ENOMEM);
+
+ /* store the info for the board we've found */
+ brd->magic = DGAP_BOARD_MAGIC;
+ brd->boardnum = boardnum;
+ brd->vendor = dgap_pci_tbl[id].vendor;
+ brd->device = dgap_pci_tbl[id].device;
+ brd->pdev = pdev;
+ brd->pci_bus = pdev->bus->number;
+ brd->pci_slot = PCI_SLOT(pdev->devfn);
+ brd->name = dgap_ids[id].name;
+ brd->maxports = dgap_ids[id].maxports;
+ brd->type = dgap_ids[id].config_type;
+ brd->dpatype = dgap_ids[id].dpatype;
+ brd->dpastatus = BD_NOFEP;
+ init_waitqueue_head(&brd->state_wait);
+
+ spin_lock_init(&brd->bd_lock);
+
+ brd->inhibit_poller = FALSE;
+ brd->wait_for_bios = 0;
+ brd->wait_for_fep = 0;
+
+ for (i = 0; i < MAXPORTS; i++)
+ brd->channels[i] = NULL;
+
+ /* store which card & revision we have */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+
+ pci_irq = pdev->irq;
+ brd->irq = pci_irq;
+
+ /* get the PCI Base Address Registers */
+
+ /* Xr Jupiter and EPC use BAR 2 */
+ if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
+ brd->membase = pci_resource_start(pdev, 2);
+ brd->membase_end = pci_resource_end(pdev, 2);
+ }
+ /* Everyone else uses BAR 0 */
+ else {
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
}
- /* Grab channel lock */
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ if (!brd->membase) {
+ ret = -ENODEV;
+ goto free_brd;
+ }
- /* Figure out our type */
- if (major == brd->dgap_serial_major) {
- un = &brd->channels[minor]->ch_tun;
- un->un_type = DGAP_SERIAL;
- } else if (major == brd->dgap_transparent_print_major) {
- un = &brd->channels[minor]->ch_pun;
- un->un_type = DGAP_PRINT;
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+ brd->port = brd->membase + PCI_IO_OFFSET;
+ brd->port_end = brd->port + PCI_IO_SIZE;
+
+ /*
+ * Special initialization for non-PLX boards
+ */
+ if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
+ unsigned short cmd;
+
+ pci_write_config_byte(pdev, 0x40, 0);
+ pci_write_config_byte(pdev, 0x46, 0);
+
+ /* Limit burst length to 2 doubleword transactions */
+ pci_write_config_byte(pdev, 0x42, 1);
+
+ /*
+ * Enable IO and mem if not already done.
+ * This was needed for support on Itanium.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ /* init our poll helper tasklet */
+ tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
+ (unsigned long) brd);
+
+ ret = dgap_remap(brd);
+ if (ret)
+ goto free_brd;
+
+ pr_info("dgap: board %d: %s (rev %d), irq %ld\n",
+ boardnum, brd->name, brd->rev, brd->irq);
+
+ return brd;
+
+free_brd:
+ kfree(brd);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * dgap_intr()
+ *
+ * Driver interrupt handler.
+ */
+static irqreturn_t dgap_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = voidbrd;
+
+ if (!brd)
+ return IRQ_NONE;
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGAP_BOARD_MAGIC)
+ return IRQ_NONE;
+
+ brd->intr_count++;
+
+ /*
+ * Schedule tasklet to run at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+ return IRQ_HANDLED;
+}
+
+/*****************************************************************************
+*
+* Function:
+*
+* dgap_poll_handler
+*
+* Author:
+*
+* Scott H Kilau
+*
+* Parameters:
+*
+* dummy -- ignored
+*
+* Return Values:
+*
+* none
+*
+* Description:
+*
+* As each timer expires, it determines (a) whether the "transmit"
+* waiter needs to be woken up, and (b) whether the poller needs to
+* be rescheduled.
+*
+******************************************************************************/
+
+static void dgap_poll_handler(ulong dummy)
+{
+ unsigned int i;
+ struct board_t *brd;
+ unsigned long lock_flags;
+ ulong new_time;
+
+ dgap_poll_counter++;
+
+ /*
+ * Do not start the board state machine until
+ * driver tells us its up and running, and has
+ * everything it needs.
+ */
+ if (dgap_driver_state != DRIVER_READY)
+ goto schedule_poller;
+
+ /*
+ * If we have just 1 board, or the system is not SMP,
+ * then use the typical old style poller.
+ * Otherwise, use our new tasklet based poller, which should
+ * speed things up for multiple boards.
+ */
+ if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) {
+ for (i = 0; i < dgap_numboards; i++) {
+
+ brd = dgap_board[i];
+
+ if (brd->state == BOARD_FAILED)
+ continue;
+ if (!brd->intr_running)
+ /* Call the real board poller directly */
+ dgap_poll_tasklet((unsigned long) brd);
+ }
} else {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ /*
+ * Go thru each board, kicking off a
+ * tasklet for each if needed
+ */
+ for (i = 0; i < dgap_numboards; i++) {
+ brd = dgap_board[i];
+
+ /*
+ * Attempt to grab the board lock.
+ *
+ * If we can't get it, no big deal, the next poll
+ * will get it. Basically, I just really don't want
+ * to spin in here, because I want to kick off my
+ * tasklets as fast as I can, and then get out the
+ * poller.
+ */
+ if (!spin_trylock(&brd->bd_lock))
+ continue;
+
+ /*
+ * If board is in a failed state, don't bother
+ * scheduling a tasklet
+ */
+ if (brd->state == BOARD_FAILED) {
+ spin_unlock(&brd->bd_lock);
+ continue;
+ }
+
+ /* Schedule a poll helper task */
+ if (!brd->intr_running)
+ tasklet_schedule(&brd->helper_tasklet);
+
+ /*
+ * Can't do DGAP_UNLOCK here, as we don't have
+ * lock_flags because we did a trylock above.
+ */
+ spin_unlock(&brd->bd_lock);
+ }
}
- /* Store our unit into driver_data, so we always have it available. */
- tty->driver_data = un;
+schedule_poller:
/*
- * Error if channel info pointer is NULL.
+ * Schedule ourself back at the nominal wakeup interval.
*/
- bs = ch->ch_bs;
- if (!bs) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
+
+ new_time = dgap_poll_time - jiffies;
+
+ if ((ulong) new_time >= 2 * dgap_poll_tick) {
+ dgap_poll_time =
+ jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ }
+
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_timer.expires = dgap_poll_time;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
+
+ if (!dgap_poll_stop)
+ add_timer(&dgap_poll_timer);
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdb - Sends a 2 byte command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * byte1 - Integer containing first byte to be sent.
+ * byte2 - Integer containing second byte to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
+ u8 byte2, uint ncmds)
+{
+ char __iomem *vaddr;
+ struct __iomem cm_t *cm_addr;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
}
/*
- * Initialize tty's
+ * Put the data in the circular command buffer.
*/
- if (!(un->un_flags & UN_ISOPEN)) {
- /* Store important variables. */
- un->un_tty = tty;
+ writeb(cmd, (vaddr + head + CMDSTART + 0));
+ writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
+ writeb(byte1, (vaddr + head + CMDSTART + 2));
+ writeb(byte2, (vaddr + head + CMDSTART + 3));
- /* Maybe do something here to the TTY struct as well? */
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
}
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdw - Sends a 1 word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds)
+{
+ char __iomem *vaddr;
+ struct __iomem cm_t *cm_addr;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
/*
- * Initialize if neither terminal or printer is open.
+ * Check if board is still alive.
*/
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
- ch->ch_mforce = 0;
- ch->ch_mval = 0;
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (vaddr + head + CMDSTART + 0));
+ writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
+ writew((u16) word, (vaddr + head + CMDSTART + 2));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_cmdw_ext - Sends a extended word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
+{
+ char __iomem *vaddr;
+ struct __iomem cm_t *cm_addr;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED)
+ return;
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+
+ /* Write an FF to tell the FEP that we want an extended command */
+ writeb((u8) 0xff, (vaddr + head + CMDSTART + 0));
+
+ writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
+ writew((u16) cmd, (vaddr + head + CMDSTART + 2));
+
+ /*
+ * If the second part of the command won't fit,
+ * put it at the beginning of the circular buffer.
+ */
+ if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
+ writew((u16) word, (vaddr + CMDSTART));
+ else
+ writew((u16) word, (vaddr + head + CMDSTART + 4));
+
+ head = (head + 8) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+/*=======================================================================
+ *
+ * dgap_wmove - Write data to FEP buffer.
+ *
+ * ch - Pointer to channel structure.
+ * buf - Poiter to characters to be moved.
+ * cnt - Number of characters to move.
+ *
+ *=======================================================================*/
+static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
+{
+ int n;
+ char __iomem *taddr;
+ struct bs_t __iomem *bs;
+ u16 head;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check parameters.
+ */
+ bs = ch->ch_bs;
+ head = readw(&(bs->tx_head));
+
+ /*
+ * If pointers are out of range, just return.
+ */
+ if ((cnt > ch->ch_tsize) ||
+ (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
+ return;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ n = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (cnt >= n) {
+ cnt -= n;
+ taddr = ch->ch_taddr + head;
+ memcpy_toio(taddr, buf, n);
+ head = ch->ch_tstart;
+ buf += n;
+ }
+
+ /*
+ * Move rest of data.
+ */
+ taddr = ch->ch_taddr + head;
+ n = cnt;
+ memcpy_toio(taddr, buf, n);
+ head += cnt;
+
+ writew(head, &(bs->tx_head));
+}
+
+/*
+ * Calls the firmware to reset this channel.
+ */
+static void dgap_firmware_reset_port(struct channel_t *ch)
+{
+ dgap_cmdb(ch, CHRESET, 0, 0, 0);
+
+ /*
+ * Now that the channel is reset, we need to make sure
+ * all the current settings get reapplied to the port
+ * in the firmware.
+ *
+ * So we will set the driver's cache of firmware
+ * settings all to 0, and then call param.
+ */
+ ch->ch_fepiflag = 0;
+ ch->ch_fepcflag = 0;
+ ch->ch_fepoflag = 0;
+ ch->ch_fepstartc = 0;
+ ch->ch_fepstopc = 0;
+ ch->ch_fepastartc = 0;
+ ch->ch_fepastopc = 0;
+ ch->ch_mostat = 0;
+ ch->ch_hflow = 0;
+}
+
+/*=======================================================================
+ *
+ * dgap_param - Set Digi parameters.
+ *
+ * struct tty_struct * - TTY for port.
+ *
+ *=======================================================================*/
+static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type)
+{
+ u16 head;
+ u16 cflag;
+ u16 iflag;
+ u8 mval;
+ u8 hflow;
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+
+ /* flush rx */
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+
+ /* flush tx */
+ head = readw(&(ch->ch_bs->tx_head));
+ writew(head, &(ch->ch_bs->tx_tail));
+ ch->ch_flags |= (CH_BAUD0);
+
+ /* Drop RTS and DTR */
+ ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
+ mval = D_DTR(ch) | D_RTS(ch);
+ ch->ch_baud_info = 0;
+
+ } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
/*
- * Flush input queue.
+ * Tell the fep to do the command
*/
- head = readw(&(bs->rx_head));
- writew(head, &(bs->rx_tail));
- ch->ch_flags = 0;
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
+ dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
+ /*
+ * Now go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ ch->ch_custom_speed = dgap_get_custom_baud(ch);
+ ch->ch_baud_info = ch->ch_custom_speed;
- /* TODO: flush our TTY struct here? */
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+
+ } else {
+ /*
+ * Set baud rate, character size, and parity.
+ */
+
+
+ int iindex = 0;
+ int jindex = 0;
+ int baud = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 14400, 57600, 230400, 76800,
+ 115200, 230400, 28800, 460800,
+ 921600, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /*
+ * Only use the TXPrint baud rate if the
+ * terminal unit is NOT open
+ */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ un_type == DGAP_PRINT)
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) &&
+ (jindex >= 0) && (jindex < 16))
+ baud = bauds[iindex][jindex];
+ else
+ baud = 0;
+
+ if (baud == 0)
+ baud = 9600;
+
+ ch->ch_baud_info = baud;
+
+ /*
+ * CBAUD has bit position 0x1000 set these days to
+ * indicate Linux baud rate remap.
+ * We use a different bit assignment for high speed.
+ * Clear this bit out while grabbing the parts of
+ * "cflag" we want.
+ */
+ cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
+ CSTOPB | CSIZE);
+
+ /*
+ * HUPCL bit is used by FEP to indicate fast baud
+ * table is to be used.
+ */
+ if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
+ (ch->ch_c_cflag & CBAUDEX))
+ cflag |= HUPCL;
+
+ if ((ch->ch_c_cflag & CBAUDEX) &&
+ !(ch->ch_digi.digi_flags & DIGI_FAST)) {
+ /*
+ * The below code is trying to guarantee that only
+ * baud rates 115200, 230400, 460800, 921600 are
+ * remapped. We use exclusive or because the various
+ * baud rates share common bit positions and therefore
+ * can't be tested for easily.
+ */
+ tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
+ int baudpart = 0;
+
+ /*
+ * Map high speed requests to index
+ * into FEP's baud table
+ */
+ switch (tcflag) {
+ case B57600:
+ baudpart = 1;
+ break;
+#ifdef B76800
+ case B76800:
+ baudpart = 2;
+ break;
+#endif
+ case B115200:
+ baudpart = 3;
+ break;
+ case B230400:
+ baudpart = 9;
+ break;
+ case B460800:
+ baudpart = 11;
+ break;
+#ifdef B921600
+ case B921600:
+ baudpart = 12;
+ break;
+#endif
+ default:
+ baudpart = 0;
+ }
+
+ if (baudpart)
+ cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
+ }
+
+ cflag &= 0xffff;
+
+ if (cflag != ch->ch_fepcflag) {
+ ch->ch_fepcflag = (u16) (cflag & 0xffff);
+
+ /*
+ * Okay to have channel and board
+ * locks held calling this
+ */
+ dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
+ }
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
}
- dgap_carrier(ch);
/*
- * Run param in case we changed anything
+ * Get input flags.
*/
- dgap_param(ch, brd, un->un_type);
+ iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
+ INPCK | ISTRIP | IXON | IXANY | IXOFF);
+
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE)) {
+ iflag &= ~(IXON | IXOFF);
+ ch->ch_c_iflag &= ~(IXON | IXOFF);
+ }
/*
- * follow protocol for opening port
+ * Only the IBM Xr card can switch between
+ * 232 and 422 modes on the fly
+ */
+ if (bd->device == PCI_DEV_XR_IBM_DID) {
+ if (ch->ch_digi.digi_flags & DIGI_422)
+ dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
+ else
+ dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
+ iflag |= IALTPIN;
+
+ if (iflag != ch->ch_fepiflag) {
+ ch->ch_fepiflag = iflag;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
+ }
+
+ /*
+ * Select hardware handshaking.
*/
+ hflow = 0;
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ if (ch->ch_c_cflag & CRTSCTS)
+ hflow |= (D_RTS(ch) | D_CTS(ch));
+ if (ch->ch_digi.digi_flags & RTSPACE)
+ hflow |= D_RTS(ch);
+ if (ch->ch_digi.digi_flags & DTRPACE)
+ hflow |= D_DTR(ch);
+ if (ch->ch_digi.digi_flags & CTSPACE)
+ hflow |= D_CTS(ch);
+ if (ch->ch_digi.digi_flags & DSRPACE)
+ hflow |= D_DSR(ch);
+ if (ch->ch_digi.digi_flags & DCDPACE)
+ hflow |= D_CD(ch);
- rc = dgap_block_til_ready(tty, file, ch);
+ if (hflow != ch->ch_hflow) {
+ ch->ch_hflow = hflow;
- if (!un->un_tty)
- return -ENODEV;
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SHFLOW, (u8) hflow, 0xff, 0);
+ }
- /* No going back now, increment our unit and channel counters */
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- ch->ch_open_count++;
- un->un_open_count++;
- un->un_flags |= (UN_ISOPEN);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ /*
+ * Set RTS and/or DTR Toggle if needed,
+ * but only if product is FEP5+ based.
+ */
+ if (bd->bd_flags & BD_FEP5PLUS) {
+ u16 hflow2 = 0;
- return rc;
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
+ hflow2 |= (D_RTS(ch));
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
+ hflow2 |= (D_DTR(ch));
+
+ dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
+ }
+
+ /*
+ * Set modem control lines.
+ */
+
+ mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
+
+ if (ch->ch_mostat ^ mval) {
+ ch->ch_mostat = mval;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SMODEM, (u8) mval, D_RTS(ch)|D_DTR(ch), 0);
+ }
+
+ /*
+ * Read modem signals, and then call carrier function.
+ */
+ ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+ dgap_carrier(ch);
+
+ /*
+ * Set the start and stop characters.
+ */
+ if (ch->ch_startc != ch->ch_fepstartc ||
+ ch->ch_stopc != ch->ch_fepstopc) {
+ ch->ch_fepstartc = ch->ch_startc;
+ ch->ch_fepstopc = ch->ch_stopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
+ }
+
+ /*
+ * Set the Auxiliary start and stop characters.
+ */
+ if (ch->ch_astartc != ch->ch_fepastartc ||
+ ch->ch_astopc != ch->ch_fepastopc) {
+ ch->ch_fepastartc = ch->ch_astartc;
+ ch->ch_fepastopc = ch->ch_astopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
+ }
+
+ return 0;
}
/*
@@ -2143,15 +3193,18 @@ static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
}
/*
- * dgap_tty_hangup()
+ * dgap_tty_flush_buffer()
*
- * Hangup the port. Like a close, but don't wait for output to drain.
+ * Flush Tx buffer (make in == out)
*/
-static void dgap_tty_hangup(struct tty_struct *tty)
+static void dgap_tty_flush_buffer(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -2168,21 +3221,39 @@ static void dgap_tty_hangup(struct tty_struct *tty)
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
- /* flush the transmit queues */
- dgap_tty_flush_buffer(tty);
+ spin_lock_irqsave(&bd->bd_lock, lock_flags);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
}
/*
- * dgap_tty_close()
+ * dgap_tty_hangup()
*
+ * Hangup the port. Like a close, but don't wait for output to drain.
*/
-static void dgap_tty_close(struct tty_struct *tty, struct file *file)
+static void dgap_tty_hangup(struct tty_struct *tty)
{
- struct ktermios *ts;
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
- ulong lock_flags;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -2199,107 +3270,8 @@ static void dgap_tty_close(struct tty_struct *tty, struct file *file)
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
- ts = &tty->termios;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- /*
- * Determine if this is the last close or not - and if we agree about
- * which type of close it is with the Line Discipline
- */
- if ((tty->count == 1) && (un->un_open_count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. un_open_count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- un->un_open_count = 1;
- }
-
- if (--un->un_open_count < 0)
- un->un_open_count = 0;
-
- ch->ch_open_count--;
-
- if (ch->ch_open_count && un->un_open_count) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
- return;
- }
-
- /* OK, its the last close on the unit */
-
- un->un_flags |= UN_CLOSING;
-
- tty->closing = 1;
-
- /*
- * Only officially close channel if count is 0 and
- * DIGI_PRINTER bit is not set.
- */
- if ((ch->ch_open_count == 0) &&
- !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- /* wait for output to drain */
- /* This will also return if we take an interrupt */
-
- dgap_wait_for_drain(tty);
-
- dgap_tty_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- tty->closing = 0;
-
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
- if (ch->ch_c_cflag & HUPCL) {
- ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
- dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0);
-
- /*
- * Go to sleep to ensure RTS/DTR
- * have been dropped for modems to see it.
- */
- spin_unlock_irqrestore(&ch->ch_lock,
- lock_flags);
-
- /* .25 second delay for dropping RTS/DTR */
- schedule_timeout_interruptible(msecs_to_jiffies(250));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- }
-
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
- ch->ch_baud_info = 0;
-
- }
-
- /*
- * turn off print device when closing print device.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int) ch->ch_digi.digi_offlen);
- ch->ch_flags &= ~CH_PRON;
- }
-
- un->un_tty = NULL;
- un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
- tty->driver_data = NULL;
-
- wake_up_interruptible(&ch->ch_flags_wait);
- wake_up_interruptible(&un->un_flags_wait);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ /* flush the transmit queues */
+ dgap_tty_flush_buffer(tty);
}
/*
@@ -2599,22 +3571,6 @@ static int dgap_tty_write_room(struct tty_struct *tty)
}
/*
- * dgap_tty_put_char()
- *
- * Put a character into ch->ch_buf
- *
- * - used by the line discipline for OPOST processing
- */
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
-{
- /*
- * Simply call tty_write.
- */
- dgap_tty_write(tty, &c, 1);
- return 1;
-}
-
-/*
* dgap_tty_write()
*
* Take data from the user or kernel and send it out to the FEP.
@@ -2629,7 +3585,6 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
char __iomem *vaddr;
u16 head, tail, tmask, remain;
int bufcount, n;
- int orig_count;
ulong lock_flags;
if (!tty)
@@ -2650,13 +3605,6 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
if (!count)
return 0;
- /*
- * Store original amount of characters passed in.
- * This helps to figure out if we should ask the FEP
- * to send us an event when it has more space available.
- */
- orig_count = count;
-
spin_lock_irqsave(&ch->ch_lock, lock_flags);
/* Get our space available for the channel from the board */
@@ -2784,6 +3732,22 @@ static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
}
/*
+ * dgap_tty_put_char()
+ *
+ * Put a character into ch->ch_buf
+ *
+ * - used by the line discipline for OPOST processing
+ */
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /*
+ * Simply call tty_write.
+ */
+ dgap_tty_write(tty, &c, 1);
+ return 1;
+}
+
+/*
* Return modem signals to ld.
*/
static int dgap_tty_tiocmget(struct tty_struct *tty)
@@ -3444,13 +4408,189 @@ static void dgap_tty_unthrottle(struct tty_struct *tty)
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
-static void dgap_tty_start(struct tty_struct *tty)
+static struct board_t *find_board_by_major(unsigned int major)
{
- struct board_t *bd;
+ unsigned int i;
+
+ for (i = 0; i < MAXBOARDS; i++) {
+ struct board_t *brd = dgap_board[i];
+
+ if (!brd)
+ return NULL;
+ if (major == brd->serial_driver->major ||
+ major == brd->print_driver->major)
+ return brd;
+ }
+
+ return NULL;
+}
+
+/************************************************************************
+ *
+ * TTY Entry points and helper functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_open()
+ *
+ */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct board_t *brd;
struct channel_t *ch;
struct un_t *un;
+ struct bs_t __iomem *bs;
+ uint major;
+ uint minor;
+ int rc;
ulong lock_flags;
ulong lock_flags2;
+ u16 head;
+
+ major = MAJOR(tty_devnum(tty));
+ minor = MINOR(tty_devnum(tty));
+
+ brd = find_board_by_major(major);
+ if (!brd)
+ return -EIO;
+
+ /*
+ * If board is not yet up to a state of READY, go to
+ * sleep waiting for it to happen or they cancel the open.
+ */
+ rc = wait_event_interruptible(brd->state_wait,
+ (brd->state & BOARD_READY));
+
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&brd->bd_lock, lock_flags);
+
+ /* The wait above should guarantee this cannot happen */
+ if (brd->state != BOARD_READY) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ /* If opened device is greater than our number of ports, bail. */
+ if (MINOR(tty_devnum(tty)) > brd->nasync) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ ch = brd->channels[minor];
+ if (!ch) {
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ /* Grab channel lock */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+
+ /* Figure out our type */
+ if (major == brd->serial_driver->major) {
+ un = &brd->channels[minor]->ch_tun;
+ un->un_type = DGAP_SERIAL;
+ } else if (major == brd->print_driver->major) {
+ un = &brd->channels[minor]->ch_pun;
+ un->un_type = DGAP_PRINT;
+ } else {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ /* Store our unit into driver_data, so we always have it available. */
+ tty->driver_data = un;
+
+ /*
+ * Error if channel info pointer is NULL.
+ */
+ bs = ch->ch_bs;
+ if (!bs) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+ return -EIO;
+ }
+
+ /*
+ * Initialize tty's
+ */
+ if (!(un->un_flags & UN_ISOPEN)) {
+ /* Store important variables. */
+ un->un_tty = tty;
+
+ /* Maybe do something here to the TTY struct as well? */
+ }
+
+ /*
+ * Initialize if neither terminal or printer is open.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+
+ ch->ch_mforce = 0;
+ ch->ch_mval = 0;
+
+ /*
+ * Flush input queue.
+ */
+ head = readw(&(bs->rx_head));
+ writew(head, &(bs->rx_tail));
+
+ ch->ch_flags = 0;
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ /* TODO: flush our TTY struct here? */
+ }
+
+ dgap_carrier(ch);
+ /*
+ * Run param in case we changed anything
+ */
+ dgap_param(ch, brd, un->un_type);
+
+ /*
+ * follow protocol for opening port
+ */
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
+
+ rc = dgap_block_til_ready(tty, file, ch);
+
+ if (!un->un_tty)
+ return -ENODEV;
+
+ /* No going back now, increment our unit and channel counters */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ ch->ch_open_count++;
+ un->un_open_count++;
+ un->un_flags |= (UN_ISOPEN);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ return rc;
+}
+
+/*
+ * dgap_tty_close()
+ *
+ */
+static void dgap_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -3467,16 +4607,110 @@ static void dgap_tty_start(struct tty_struct *tty)
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ ts = &tty->termios;
- dgap_cmdw(ch, RESUMETX, 0, 0);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
+ /*
+ * Determine if this is the last close or not - and if we agree about
+ * which type of close it is with the Line Discipline
+ */
+ if ((tty->count == 1) && (un->un_open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. un_open_count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ un->un_open_count = 1;
+ }
+
+ if (--un->un_open_count < 0)
+ un->un_open_count = 0;
+
+ ch->ch_open_count--;
+
+ if (ch->ch_open_count && un->un_open_count) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* OK, its the last close on the unit */
+
+ un->un_flags |= UN_CLOSING;
+
+ tty->closing = 1;
+
+ /*
+ * Only officially close channel if count is 0 and
+ * DIGI_PRINTER bit is not set.
+ */
+ if ((ch->ch_open_count == 0) &&
+ !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* wait for output to drain */
+ /* This will also return if we take an interrupt */
+
+ dgap_wait_for_drain(tty);
+
+ dgap_tty_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ tty->closing = 0;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (ch->ch_c_cflag & HUPCL) {
+ ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
+ dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0);
+
+ /*
+ * Go to sleep to ensure RTS/DTR
+ * have been dropped for modems to see it.
+ */
+ spin_unlock_irqrestore(&ch->ch_lock,
+ lock_flags);
+
+ /* .25 second delay for dropping RTS/DTR */
+ schedule_timeout_interruptible(msecs_to_jiffies(250));
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ }
+
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+ ch->ch_baud_info = 0;
+
+ }
+
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ un->un_tty = NULL;
+ un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
+ tty->driver_data = NULL;
+
+ wake_up_interruptible(&ch->ch_flags_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
}
-static void dgap_tty_stop(struct tty_struct *tty)
+static void dgap_tty_start(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
@@ -3502,26 +4736,13 @@ static void dgap_tty_stop(struct tty_struct *tty)
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- dgap_cmdw(ch, PAUSETX, 0, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
-/*
- * dgap_tty_flush_chars()
- *
- * Flush the cook buffer
- *
- * Note to self, and any other poor souls who venture here:
- *
- * flush in this case DOES NOT mean dispose of the data.
- * instead, it means "stop buffering and send it if you
- * haven't already." Just guess how I figured that out... SRW 2-Jun-98
- *
- * It is also always called in interrupt context - JAR 8-Sept-99
- */
-static void dgap_tty_flush_chars(struct tty_struct *tty)
+static void dgap_tty_stop(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
@@ -3547,25 +4768,32 @@ static void dgap_tty_flush_chars(struct tty_struct *tty)
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- /* TODO: Do something here */
+ dgap_cmdw(ch, PAUSETX, 0, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
/*
- * dgap_tty_flush_buffer()
+ * dgap_tty_flush_chars()
*
- * Flush Tx buffer (make in == out)
+ * Flush the cook buffer
+ *
+ * Note to self, and any other poor souls who venture here:
+ *
+ * flush in this case DOES NOT mean dispose of the data.
+ * instead, it means "stop buffering and send it if you
+ * haven't already." Just guess how I figured that out... SRW 2-Jun-98
+ *
+ * It is also always called in interrupt context - JAR 8-Sept-99
*/
-static void dgap_tty_flush_buffer(struct tty_struct *tty)
+static void dgap_tty_flush_chars(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
- u16 head;
if (!tty || tty->magic != TTY_MAGIC)
return;
@@ -3585,24 +4813,10 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty)
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- ch->ch_flags &= ~CH_STOP;
- head = readw(&(ch->ch_bs->tx_head));
- dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
+ /* TODO: Do something here */
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
}
/*****************************************************************************
@@ -3996,1614 +5210,151 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
}
}
-static int dgap_alloc_flipbuf(struct board_t *brd)
-{
- /*
- * allocate flip buffer for board.
- */
- brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipbuf)
- return -ENOMEM;
-
- brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipflagbuf) {
- kfree(brd->flipbuf);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void dgap_free_flipbuf(struct board_t *brd)
-{
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
-}
-
-/*
- * Create pr and tty device entries
- */
-static int dgap_tty_register_ports(struct board_t *brd)
-{
- struct channel_t *ch;
- int i;
- int ret;
-
- brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports),
- GFP_KERNEL);
- if (!brd->serial_ports)
- return -ENOMEM;
-
- brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports),
- GFP_KERNEL);
- if (!brd->printer_ports) {
- ret = -ENOMEM;
- goto free_serial_ports;
- }
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_init(&brd->serial_ports[i]);
- tty_port_init(&brd->printer_ports[i]);
- }
-
- ch = brd->channels[0];
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
-
- struct device *classp;
-
- classp = tty_port_register_device(&brd->serial_ports[i],
- brd->serial_driver,
- i, NULL);
-
- if (IS_ERR(classp)) {
- ret = PTR_ERR(classp);
- goto unregister_ttys;
- }
-
- dgap_create_tty_sysfs(&ch->ch_tun, classp);
- ch->ch_tun.un_sysfs = classp;
-
- classp = tty_port_register_device(&brd->printer_ports[i],
- brd->print_driver,
- i, NULL);
-
- if (IS_ERR(classp)) {
- ret = PTR_ERR(classp);
- goto unregister_ttys;
- }
-
- dgap_create_tty_sysfs(&ch->ch_pun, classp);
- ch->ch_pun.un_sysfs = classp;
- }
- dgap_create_ports_sysfiles(brd);
-
- return 0;
-
-unregister_ttys:
- while (i >= 0) {
- ch = brd->channels[i];
- if (ch->ch_tun.un_sysfs) {
- dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs);
- tty_unregister_device(brd->serial_driver, i);
- }
-
- if (ch->ch_pun.un_sysfs) {
- dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs);
- tty_unregister_device(brd->print_driver, i);
- }
- i--;
- }
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->serial_ports[i]);
- tty_port_destroy(&brd->printer_ports[i]);
- }
-
- kfree(brd->printer_ports);
- brd->printer_ports = NULL;
-
-free_serial_ports:
- kfree(brd->serial_ports);
- brd->serial_ports = NULL;
-
- return ret;
-}
-
-/*
- * Copies the BIOS code from the user to the board,
- * and starts the BIOS running.
- */
-static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len)
-{
- u8 __iomem *addr;
- uint offset;
- int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- /*
- * clear POST area
- */
- for (i = 0; i < 16; i++)
- writeb(0, addr + POSTAREA + i);
-
- /*
- * Download bios
- */
- offset = 0x1000;
- memcpy_toio(addr + offset, ubios, len);
-
- writel(0x0bf00401, addr);
- writel(0, (addr + 4));
-
- /* Clear the reset, and change states. */
- writeb(FEPCLR, brd->re_map_port);
-}
-
-/*
- * Checks to see if the BIOS completed running on the card.
- */
-static int dgap_test_bios(struct board_t *brd)
-{
- u8 __iomem *addr;
- u16 word;
- u16 err1;
- u16 err2;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return -EINVAL;
-
- addr = brd->re_map_membase;
- word = readw(addr + POSTAREA);
-
- /*
- * It can take 5-6 seconds for a board to
- * pass the bios self test and post results.
- * Give it 10 seconds.
- */
- brd->wait_for_bios = 0;
- while (brd->wait_for_bios < 1000) {
- /* Check to see if BIOS thinks board is good. (GD). */
- if (word == *(u16 *) "GD")
- return 0;
- msleep_interruptible(10);
- brd->wait_for_bios++;
- word = readw(addr + POSTAREA);
- }
-
- /* Gave up on board after too long of time taken */
- err1 = readw(addr + SEQUENCE);
- err2 = readw(addr + ERROR);
- dev_warn(&brd->pdev->dev, "%s failed diagnostics. Error #(%x,%x).\n",
- brd->name, err1, err2);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOBIOS;
-
- return -EIO;
-}
-
-/*
- * Copies the FEP code from the user to the board,
- * and starts the FEP running.
- */
-static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len)
-{
- u8 __iomem *addr;
- uint offset;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- /*
- * Download FEP
- */
- offset = 0x1000;
- memcpy_toio(addr + offset, ufep, len);
-
- /*
- * If board is a concentrator product, we need to give
- * it its config string describing how the concentrators look.
- */
- if ((brd->type == PCX) || (brd->type == PEPC)) {
- u8 string[100];
- u8 __iomem *config;
- u8 *xconfig;
- int i = 0;
-
- xconfig = dgap_create_config_string(brd, string);
-
- /* Write string to board memory */
- config = addr + CONFIG;
- for (; i < CONFIGSIZE; i++, config++, xconfig++) {
- writeb(*xconfig, config);
- if ((*xconfig & 0xff) == 0xff)
- break;
- }
- }
-
- writel(0xbfc01004, (addr + 0xc34));
- writel(0x3, (addr + 0xc30));
-
-}
-
-/*
- * Waits for the FEP to report thats its ready for us to use.
- */
-static int dgap_test_fep(struct board_t *brd)
-{
- u8 __iomem *addr;
- u16 word;
- u16 err1;
- u16 err2;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return -EINVAL;
-
- addr = brd->re_map_membase;
- word = readw(addr + FEPSTAT);
-
- /*
- * It can take 2-3 seconds for the FEP to
- * be up and running. Give it 5 secs.
- */
- brd->wait_for_fep = 0;
- while (brd->wait_for_fep < 500) {
- /* Check to see if FEP is up and running now. */
- if (word == *(u16 *) "OS") {
- /*
- * Check to see if the board can support FEP5+ commands.
- */
- word = readw(addr + FEP5_PLUS);
- if (word == *(u16 *) "5A")
- brd->bd_flags |= BD_FEP5PLUS;
-
- return 0;
- }
- msleep_interruptible(10);
- brd->wait_for_fep++;
- word = readw(addr + FEPSTAT);
- }
-
- /* Gave up on board after too long of time taken */
- err1 = readw(addr + SEQUENCE);
- err2 = readw(addr + ERROR);
- dev_warn(&brd->pdev->dev,
- "FEPOS for %s not functioning. Error #(%x,%x).\n",
- brd->name, err1, err2);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
-
- return -EIO;
-}
-
-/*
- * Physically forces the FEP5 card to reset itself.
- */
-static void dgap_do_reset_board(struct board_t *brd)
-{
- u8 check;
- u32 check1;
- u32 check2;
- int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
- !brd->re_map_membase || !brd->re_map_port)
- return;
-
- /* FEPRST does not vary among supported boards */
- writeb(FEPRST, brd->re_map_port);
-
- for (i = 0; i <= 1000; i++) {
- check = readb(brd->re_map_port) & 0xe;
- if (check == FEPRST)
- break;
- udelay(10);
-
- }
- if (i > 1000) {
- dev_warn(&brd->pdev->dev,
- "dgap: Board not resetting... Failing board.\n");
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-
- /*
- * Make sure there really is memory out there.
- */
- writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
- writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
- check1 = readl(brd->re_map_membase + LOWMEM);
- check2 = readl(brd->re_map_membase + HIGHMEM);
-
- if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
- dev_warn(&brd->pdev->dev,
- "No memory at %p for board.\n",
- brd->re_map_membase);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-}
-
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
-/*
- * Sends a concentrator image into the FEP5 board.
- */
-static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len)
-{
- char __iomem *vaddr;
- u16 offset;
- struct downld_t *to_dp;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- vaddr = brd->re_map_membase;
-
- offset = readw((u16 *) (vaddr + DOWNREQ));
- to_dp = (struct downld_t *) (vaddr + (int) offset);
- memcpy_toio(to_dp, uaddr, len);
-
- /* Tell card we have data for it */
- writew(0, vaddr + (DOWNREQ));
-
- brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
-}
-#endif
-
-#define EXPANSION_ROM_SIZE (64 * 1024)
-#define FEP5_ROM_MAGIC (0xFEFFFFFF)
-
-static void dgap_get_vpd(struct board_t *brd)
-{
- u32 magic;
- u32 base_offset;
- u16 rom_offset;
- u16 vpd_offset;
- u16 image_length;
- u16 i;
- u8 byte1;
- u8 byte2;
-
- /*
- * Poke the magic number at the PCI Rom Address location.
- * If VPD is supported, the value read from that address
- * will be non-zero.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- /* VPD not supported, bail */
- if (!magic)
- return;
-
- /*
- * To get to the OTPROM memory, we have to send the boards base
- * address or'ed with 1 into the PCI Rom Address location.
- */
- magic = brd->membase | 0x01;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- byte1 = readb(brd->re_map_membase);
- byte2 = readb(brd->re_map_membase + 1);
-
- /*
- * If the board correctly swapped to the OTPROM memory,
- * the first 2 bytes (header) should be 0x55, 0xAA
- */
- if (byte1 == 0x55 && byte2 == 0xAA) {
-
- base_offset = 0;
-
- /*
- * We have to run through all the OTPROM memory looking
- * for the VPD offset.
- */
- while (base_offset <= EXPANSION_ROM_SIZE) {
-
- /*
- * Lots of magic numbers here.
- *
- * The VPD offset is located inside the ROM Data
- * Structure.
- *
- * We also have to remember the length of each
- * ROM Data Structure, so we can "hop" to the next
- * entry if the VPD isn't in the current
- * ROM Data Structure.
- */
- rom_offset = readw(brd->re_map_membase +
- base_offset + 0x18);
- image_length = readw(brd->re_map_membase +
- rom_offset + 0x10) * 512;
- vpd_offset = readw(brd->re_map_membase +
- rom_offset + 0x08);
-
- /* Found the VPD entry */
- if (vpd_offset)
- break;
-
- /* We didn't find a VPD entry, go to next ROM entry. */
- base_offset += image_length;
-
- byte1 = readb(brd->re_map_membase + base_offset);
- byte2 = readb(brd->re_map_membase + base_offset + 1);
-
- /*
- * If the new ROM offset doesn't have 0x55, 0xAA
- * as its header, we have run out of ROM.
- */
- if (byte1 != 0x55 || byte2 != 0xAA)
- break;
- }
-
- /*
- * If we have a VPD offset, then mark the board
- * as having a valid VPD, and copy VPDSIZE (512) bytes of
- * that VPD to the buffer we have in our board structure.
- */
- if (vpd_offset) {
- brd->bd_flags |= BD_HAS_VPD;
- for (i = 0; i < VPDSIZE; i++) {
- brd->vpd[i] = readb(brd->re_map_membase +
- vpd_offset + i);
- }
- }
- }
-
- /*
- * We MUST poke the magic number at the PCI Rom Address location again.
- * This makes the card report the regular board memory back to us,
- * rather than the OTPROM memory.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-}
-
-/*
- * Our board poller function.
- */
-static void dgap_poll_tasklet(unsigned long data)
-{
- struct board_t *bd = (struct board_t *) data;
- ulong lock_flags;
- char __iomem *vaddr;
- u16 head, tail;
-
- if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
- return;
-
- if (bd->inhibit_poller)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
- if (bd->state == BOARD_READY) {
-
- struct ev_t __iomem *eaddr;
-
- if (!bd->re_map_membase) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
- if (!bd->re_map_port) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- if (!bd->nasync)
- goto out;
-
- eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * If there is an event pending. Go service it.
- */
- if (head != tail) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- dgap_event(bd);
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- }
-
-out:
- /*
- * If board is doing interrupts, ACK the interrupt.
- */
- if (bd && bd->intr_running)
- readb(bd->re_map_port + 2);
-
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*=======================================================================
- *
- * dgap_cmdb - Sends a 2 byte command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * byte1 - Integer containing first byte to be sent.
- * byte2 - Integer containing second byte to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
- u8 byte2, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (vaddr + head + CMDSTART + 0));
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writeb(byte1, (vaddr + head + CMDSTART + 2));
- writeb(byte2, (vaddr + head + CMDSTART + 3));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_cmdw - Sends a 1 word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (vaddr + head + CMDSTART + 0));
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writew((u16) word, (vaddr + head + CMDSTART + 2));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
- head = readw(&(cm_addr->cm_head));
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
-
- /* Write an FF to tell the FEP that we want an extended command */
- writeb((u8) 0xff, (vaddr + head + CMDSTART + 0));
-
- writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writew((u16) cmd, (vaddr + head + CMDSTART + 2));
-
- /*
- * If the second part of the command won't fit,
- * put it at the beginning of the circular buffer.
- */
- if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
- writew((u16) word, (vaddr + CMDSTART));
- else
- writew((u16) word, (vaddr + head + CMDSTART + 4));
-
- head = (head + 8) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &(cm_addr->cm_head));
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
-
- head = readw(&(cm_addr->cm_head));
- tail = readw(&(cm_addr->cm_tail));
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
+static const struct tty_operations dgap_tty_ops = {
+ .open = dgap_tty_open,
+ .close = dgap_tty_close,
+ .write = dgap_tty_write,
+ .write_room = dgap_tty_write_room,
+ .flush_buffer = dgap_tty_flush_buffer,
+ .chars_in_buffer = dgap_tty_chars_in_buffer,
+ .flush_chars = dgap_tty_flush_chars,
+ .ioctl = dgap_tty_ioctl,
+ .set_termios = dgap_tty_set_termios,
+ .stop = dgap_tty_stop,
+ .start = dgap_tty_start,
+ .throttle = dgap_tty_throttle,
+ .unthrottle = dgap_tty_unthrottle,
+ .hangup = dgap_tty_hangup,
+ .put_char = dgap_tty_put_char,
+ .tiocmget = dgap_tty_tiocmget,
+ .tiocmset = dgap_tty_tiocmset,
+ .break_ctl = dgap_tty_send_break,
+ .wait_until_sent = dgap_tty_wait_until_sent,
+ .send_xchar = dgap_tty_send_xchar
+};
-/*=======================================================================
- *
- * dgap_wmove - Write data to FEP buffer.
+/************************************************************************
*
- * ch - Pointer to channel structure.
- * buf - Poiter to characters to be moved.
- * cnt - Number of characters to move.
+ * TTY Initialization/Cleanup Functions
*
- *=======================================================================*/
-static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
-{
- int n;
- char __iomem *taddr;
- struct bs_t __iomem *bs;
- u16 head;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check parameters.
- */
- bs = ch->ch_bs;
- head = readw(&(bs->tx_head));
-
- /*
- * If pointers are out of range, just return.
- */
- if ((cnt > ch->ch_tsize) ||
- (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
- return;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- n = ch->ch_tstart + ch->ch_tsize - head;
-
- if (cnt >= n) {
- cnt -= n;
- taddr = ch->ch_taddr + head;
- memcpy_toio(taddr, buf, n);
- head = ch->ch_tstart;
- buf += n;
- }
-
- /*
- * Move rest of data.
- */
- taddr = ch->ch_taddr + head;
- n = cnt;
- memcpy_toio(taddr, buf, n);
- head += cnt;
-
- writew(head, &(bs->tx_head));
-}
-
-/*
- * Retrives the current custom baud rate from FEP memory,
- * and returns it back to the user.
- * Returns 0 on error.
- */
-static uint dgap_get_custom_baud(struct channel_t *ch)
-{
- u8 __iomem *vaddr;
- ulong offset;
- uint value;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
- return 0;
-
- if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
- return 0;
-
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return 0;
-
- /*
- * Go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28)
- + LINE_SPEED;
-
- value = readw(vaddr + offset);
- return value;
-}
+ ************************************************************************/
/*
- * Calls the firmware to reset this channel.
- */
-static void dgap_firmware_reset_port(struct channel_t *ch)
-{
- dgap_cmdb(ch, CHRESET, 0, 0, 0);
-
- /*
- * Now that the channel is reset, we need to make sure
- * all the current settings get reapplied to the port
- * in the firmware.
- *
- * So we will set the driver's cache of firmware
- * settings all to 0, and then call param.
- */
- ch->ch_fepiflag = 0;
- ch->ch_fepcflag = 0;
- ch->ch_fepoflag = 0;
- ch->ch_fepstartc = 0;
- ch->ch_fepstopc = 0;
- ch->ch_fepastartc = 0;
- ch->ch_fepastopc = 0;
- ch->ch_mostat = 0;
- ch->ch_hflow = 0;
-}
-
-/*=======================================================================
- *
- * dgap_param - Set Digi parameters.
- *
- * struct tty_struct * - TTY for port.
+ * dgap_tty_register()
*
- *=======================================================================*/
-static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type)
+ * Init the tty subsystem for this board.
+ */
+static int dgap_tty_register(struct board_t *brd)
{
- u16 head;
- u16 cflag;
- u16 iflag;
- u8 mval;
- u8 hflow;
-
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
-
- /* flush rx */
- head = readw(&(ch->ch_bs->rx_head));
- writew(head, &(ch->ch_bs->rx_tail));
-
- /* flush tx */
- head = readw(&(ch->ch_bs->tx_head));
- writew(head, &(ch->ch_bs->tx_tail));
-
- ch->ch_flags |= (CH_BAUD0);
-
- /* Drop RTS and DTR */
- ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
- mval = D_DTR(ch) | D_RTS(ch);
- ch->ch_baud_info = 0;
-
- } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
- /*
- * Tell the fep to do the command
- */
-
- dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
-
- /*
- * Now go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- ch->ch_custom_speed = dgap_get_custom_baud(ch);
- ch->ch_baud_info = ch->ch_custom_speed;
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
-
- } else {
- /*
- * Set baud rate, character size, and parity.
- */
-
-
- int iindex = 0;
- int jindex = 0;
- int baud = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 14400, 57600, 230400, 76800,
- 115200, 230400, 28800, 460800,
- 921600, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /*
- * Only use the TXPrint baud rate if the
- * terminal unit is NOT open
- */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
- un_type == DGAP_PRINT)
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) &&
- (jindex >= 0) && (jindex < 16))
- baud = bauds[iindex][jindex];
- else
- baud = 0;
-
- if (baud == 0)
- baud = 9600;
-
- ch->ch_baud_info = baud;
-
- /*
- * CBAUD has bit position 0x1000 set these days to
- * indicate Linux baud rate remap.
- * We use a different bit assignment for high speed.
- * Clear this bit out while grabbing the parts of
- * "cflag" we want.
- */
- cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
- CSTOPB | CSIZE);
-
- /*
- * HUPCL bit is used by FEP to indicate fast baud
- * table is to be used.
- */
- if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
- (ch->ch_c_cflag & CBAUDEX))
- cflag |= HUPCL;
-
- if ((ch->ch_c_cflag & CBAUDEX) &&
- !(ch->ch_digi.digi_flags & DIGI_FAST)) {
- /*
- * The below code is trying to guarantee that only
- * baud rates 115200, 230400, 460800, 921600 are
- * remapped. We use exclusive or because the various
- * baud rates share common bit positions and therefore
- * can't be tested for easily.
- */
- tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
- int baudpart = 0;
-
- /*
- * Map high speed requests to index
- * into FEP's baud table
- */
- switch (tcflag) {
- case B57600:
- baudpart = 1;
- break;
-#ifdef B76800
- case B76800:
- baudpart = 2;
- break;
-#endif
- case B115200:
- baudpart = 3;
- break;
- case B230400:
- baudpart = 9;
- break;
- case B460800:
- baudpart = 11;
- break;
-#ifdef B921600
- case B921600:
- baudpart = 12;
- break;
-#endif
- default:
- baudpart = 0;
- }
-
- if (baudpart)
- cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
- }
-
- cflag &= 0xffff;
-
- if (cflag != ch->ch_fepcflag) {
- ch->ch_fepcflag = (u16) (cflag & 0xffff);
-
- /*
- * Okay to have channel and board
- * locks held calling this
- */
- dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
- }
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
- }
-
- /*
- * Get input flags.
- */
- iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
- INPCK | ISTRIP | IXON | IXANY | IXOFF);
-
- if ((ch->ch_startc == _POSIX_VDISABLE) ||
- (ch->ch_stopc == _POSIX_VDISABLE)) {
- iflag &= ~(IXON | IXOFF);
- ch->ch_c_iflag &= ~(IXON | IXOFF);
- }
-
- /*
- * Only the IBM Xr card can switch between
- * 232 and 422 modes on the fly
- */
- if (bd->device == PCI_DEV_XR_IBM_DID) {
- if (ch->ch_digi.digi_flags & DIGI_422)
- dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
- else
- dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
- }
-
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
- iflag |= IALTPIN;
-
- if (iflag != ch->ch_fepiflag) {
- ch->ch_fepiflag = iflag;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
- }
-
- /*
- * Select hardware handshaking.
- */
- hflow = 0;
-
- if (ch->ch_c_cflag & CRTSCTS)
- hflow |= (D_RTS(ch) | D_CTS(ch));
- if (ch->ch_digi.digi_flags & RTSPACE)
- hflow |= D_RTS(ch);
- if (ch->ch_digi.digi_flags & DTRPACE)
- hflow |= D_DTR(ch);
- if (ch->ch_digi.digi_flags & CTSPACE)
- hflow |= D_CTS(ch);
- if (ch->ch_digi.digi_flags & DSRPACE)
- hflow |= D_DSR(ch);
- if (ch->ch_digi.digi_flags & DCDPACE)
- hflow |= D_CD(ch);
+ int rc;
- if (hflow != ch->ch_hflow) {
- ch->ch_hflow = hflow;
+ brd->serial_driver = tty_alloc_driver(MAXPORTS,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+ if (IS_ERR(brd->serial_driver))
+ return PTR_ERR(brd->serial_driver);
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SHFLOW, (u8) hflow, 0xff, 0);
- }
+ snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_",
+ brd->boardnum);
+ brd->serial_driver->name = brd->serial_name;
+ brd->serial_driver->name_base = 0;
+ brd->serial_driver->major = 0;
+ brd->serial_driver->minor_start = 0;
+ brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->serial_driver->init_termios = dgap_default_termios;
+ brd->serial_driver->driver_name = DRVSTR;
/*
- * Set RTS and/or DTR Toggle if needed,
- * but only if product is FEP5+ based.
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
*/
- if (bd->bd_flags & BD_FEP5PLUS) {
- u16 hflow2 = 0;
-
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
- hflow2 |= (D_RTS(ch));
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
- hflow2 |= (D_DTR(ch));
-
- dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
- }
+ tty_set_operations(brd->serial_driver, &dgap_tty_ops);
/*
- * Set modem control lines.
+ * If we're doing transparent print, we have to do all of the above
+ * again, separately so we don't get the LD confused about what major
+ * we are when we get into the dgap_tty_open() routine.
*/
-
- mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
-
- if (ch->ch_mostat ^ mval) {
- ch->ch_mostat = mval;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SMODEM, (u8) mval, D_RTS(ch)|D_DTR(ch), 0);
+ brd->print_driver = tty_alloc_driver(MAXPORTS,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+ if (IS_ERR(brd->print_driver)) {
+ rc = PTR_ERR(brd->print_driver);
+ goto free_serial_drv;
}
- /*
- * Read modem signals, and then call carrier function.
- */
- ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
- dgap_carrier(ch);
+ snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_",
+ brd->boardnum);
+ brd->print_driver->name = brd->print_name;
+ brd->print_driver->name_base = 0;
+ brd->print_driver->major = 0;
+ brd->print_driver->minor_start = 0;
+ brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->print_driver->init_termios = dgap_default_termios;
+ brd->print_driver->driver_name = DRVSTR;
/*
- * Set the start and stop characters.
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
*/
- if (ch->ch_startc != ch->ch_fepstartc ||
- ch->ch_stopc != ch->ch_fepstopc) {
- ch->ch_fepstartc = ch->ch_startc;
- ch->ch_fepstopc = ch->ch_stopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
- }
+ tty_set_operations(brd->print_driver, &dgap_tty_ops);
- /*
- * Set the Auxiliary start and stop characters.
- */
- if (ch->ch_astartc != ch->ch_fepastartc ||
- ch->ch_astopc != ch->ch_fepastopc) {
- ch->ch_fepastartc = ch->ch_astartc;
- ch->ch_fepastopc = ch->ch_astopc;
+ /* Register tty devices */
+ rc = tty_register_driver(brd->serial_driver);
+ if (rc < 0)
+ goto free_print_drv;
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
- }
+ /* Register Transparent Print devices */
+ rc = tty_register_driver(brd->print_driver);
+ if (rc < 0)
+ goto unregister_serial_drv;
return 0;
-}
-
-/*
- * dgap_parity_scan()
- *
- * Convert the FEP5 way of reporting parity errors and breaks into
- * the Linux line discipline way.
- */
-static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
- unsigned char *fbuf, int *len)
-{
- int l = *len;
- int count = 0;
- unsigned char *in, *cout, *fout;
- unsigned char c;
-
- in = cbuf;
- cout = cbuf;
- fout = fbuf;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
- while (l--) {
- c = *in++;
- switch (ch->pscan_state) {
- default:
- /* reset to sanity and fall through */
- ch->pscan_state = 0;
-
- case 0:
- /* No FF seen yet */
- if (c == (unsigned char) '\377')
- /* delete this character from stream */
- ch->pscan_state = 1;
- else {
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- }
- break;
-
- case 1:
- /* first FF seen */
- if (c == (unsigned char) '\377') {
- /* doubled ff, transform to single ff */
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- ch->pscan_state = 0;
- } else {
- /* save value examination in next state */
- ch->pscan_savechar = c;
- ch->pscan_state = 2;
- }
- break;
-
- case 2:
- /* third character of ff sequence */
-
- *cout++ = c;
-
- if (ch->pscan_savechar == 0x0) {
-
- if (c == 0x0) {
- ch->ch_err_break++;
- *fout++ = TTY_BREAK;
- } else {
- ch->ch_err_parity++;
- *fout++ = TTY_PARITY;
- }
- }
+unregister_serial_drv:
+ tty_unregister_driver(brd->serial_driver);
+free_print_drv:
+ put_tty_driver(brd->print_driver);
+free_serial_drv:
+ put_tty_driver(brd->serial_driver);
- count += 1;
- ch->pscan_state = 0;
- }
- }
- *len = count;
+ return rc;
}
-static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
- struct un_t *un, u32 mask,
- unsigned long *irq_flags1,
- unsigned long *irq_flags2)
+static void dgap_tty_unregister(struct board_t *brd)
{
- if (!(un->un_flags & mask))
- return;
-
- un->un_flags &= ~mask;
-
- if (!(un->un_flags & UN_ISOPEN))
- return;
-
- if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- un->un_tty->ldisc->ops->write_wakeup) {
- spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
-
- (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
-
- spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
- spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
- }
- wake_up_interruptible(&un->un_tty->write_wait);
- wake_up_interruptible(&un->un_flags_wait);
+ tty_unregister_driver(brd->print_driver);
+ tty_unregister_driver(brd->serial_driver);
+ put_tty_driver(brd->print_driver);
+ put_tty_driver(brd->serial_driver);
}
-/*=======================================================================
- *
- * dgap_event - FEP to host event processing routine.
- *
- * bd - Board of current event.
- *
- *=======================================================================*/
-static int dgap_event(struct board_t *bd)
+static int dgap_alloc_flipbuf(struct board_t *brd)
{
- struct channel_t *ch;
- ulong lock_flags;
- ulong lock_flags2;
- struct bs_t __iomem *bs;
- u8 __iomem *event;
- u8 __iomem *vaddr;
- struct ev_t __iomem *eaddr;
- uint head;
- uint tail;
- int port;
- int reason;
- int modem;
- int b1;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- if (!vaddr) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
- eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&(eaddr->ev_head));
- tail = readw(&(eaddr->ev_tail));
-
- /*
- * Forget it if pointers out of range.
- */
-
- if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
- (head | tail) & 03) {
- /* Let go of board lock */
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
/*
- * Loop to process all the events in the buffer.
+ * allocate flip buffer for board.
*/
- while (tail != head) {
-
- /*
- * Get interrupt information.
- */
-
- event = bd->re_map_membase + tail + EVSTART;
-
- port = ioread8(event);
- reason = ioread8(event + 1);
- modem = ioread8(event + 2);
- b1 = ioread8(event + 3);
-
- /*
- * Make sure the interrupt is valid.
- */
- if (port >= bd->nasync)
- goto next;
-
- if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
- goto next;
-
- ch = bd->channels[port];
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- goto next;
-
- /*
- * If we have made it here, the event was valid.
- * Lock down the channel.
- */
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- bs = ch->ch_bs;
-
- if (!bs) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- goto next;
- }
-
- /*
- * Process received data.
- */
- if (reason & IFDATA) {
-
- /*
- * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
- * input could send some data to ld, which in turn
- * could do a callback to one of our other functions.
- */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- dgap_input(ch);
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (ch->ch_flags & CH_RACTIVE)
- ch->ch_flags |= CH_RENABLE;
- else
- writeb(1, &(bs->idata));
-
- if (ch->ch_flags & CH_RWAIT) {
- ch->ch_flags &= ~CH_RWAIT;
-
- wake_up_interruptible
- (&ch->ch_tun.un_flags_wait);
- }
- }
-
- /*
- * Process Modem change signals.
- */
- if (reason & IFMODEM) {
- ch->ch_mistat = modem;
- dgap_carrier(ch);
- }
-
- /*
- * Process break.
- */
- if (reason & IFBREAK) {
-
- if (ch->ch_tun.un_tty) {
- /* A break has been indicated */
- ch->ch_err_break++;
- tty_buffer_request_room
- (ch->ch_tun.un_tty->port, 1);
- tty_insert_flip_char(ch->ch_tun.un_tty->port,
- 0, TTY_BREAK);
- tty_flip_buffer_push(ch->ch_tun.un_tty->port);
- }
- }
-
- /*
- * Process Transmit low.
- */
- if (reason & IFTLW) {
- dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
- &lock_flags, &lock_flags2);
- dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
- &lock_flags, &lock_flags2);
- if (ch->ch_flags & CH_WLOW) {
- ch->ch_flags &= ~CH_WLOW;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- /*
- * Process Transmit empty.
- */
- if (reason & IFTEM) {
- dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
- &lock_flags, &lock_flags2);
- dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
- &lock_flags, &lock_flags2);
- if (ch->ch_flags & CH_WEMPTY) {
- ch->ch_flags &= ~CH_WEMPTY;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+ brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
+ if (!brd->flipbuf)
+ return -ENOMEM;
-next:
- tail = (tail + 4) & (EVMAX - EVSTART - 4);
+ brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
+ if (!brd->flipflagbuf) {
+ kfree(brd->flipbuf);
+ return -ENOMEM;
}
- writew(tail, &(eaddr->ev_tail));
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
return 0;
}
-static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
-
-
-static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards);
-}
-static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
-
-
-static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
-
-
-static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
-}
-static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
-
-static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
-}
-
-static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
- dgap_driver_pollrate_store);
-
-static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgap_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
-
- return rc;
-}
-
-static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
+static void dgap_free_flipbuf(struct board_t *brd)
{
- struct device_driver *driverfs = &dgap_driver->driver;
-
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_pollrate);
- driver_remove_file(driverfs, &driver_attr_pollcounter);
+ kfree(brd->flipbuf);
+ kfree(brd->flipflagbuf);
}
static struct board_t *dgap_verify_board(struct device *p)
@@ -5626,7 +5377,7 @@ static ssize_t dgap_ports_state_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5647,7 +5398,7 @@ static ssize_t dgap_ports_baud_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5668,7 +5419,7 @@ static ssize_t dgap_ports_msignals_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5705,7 +5456,7 @@ static ssize_t dgap_ports_iflag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5725,7 +5476,7 @@ static ssize_t dgap_ports_cflag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5745,7 +5496,7 @@ static ssize_t dgap_ports_oflag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5765,7 +5516,7 @@ static ssize_t dgap_ports_lflag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5785,7 +5536,7 @@ static ssize_t dgap_ports_digi_flag_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5805,7 +5556,7 @@ static ssize_t dgap_ports_rxcount_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5825,7 +5576,7 @@ static ssize_t dgap_ports_txcount_show(struct device *p,
{
struct board_t *bd;
int count = 0;
- int i;
+ unsigned int i;
bd = dgap_verify_board(p);
if (!bd)
@@ -5839,39 +5590,6 @@ static ssize_t dgap_ports_txcount_show(struct device *p,
}
static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-static void dgap_create_ports_sysfiles(struct board_t *bd)
-{
- dev_set_drvdata(&bd->pdev->dev, bd);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
-}
-
-/* removes all the sys files created for that port */
-static void dgap_remove_ports_sysfiles(struct board_t *bd)
-{
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
- device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
-}
-
static ssize_t dgap_tty_state_show(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -6242,7 +5960,6 @@ static ssize_t dgap_tty_name_show(struct device *d,
}
ncount += cptr->u.module.nport;
-
}
}
@@ -6266,1086 +5983,1211 @@ static struct attribute *dgap_sysfs_tty_entries[] = {
NULL
};
-static struct attribute_group dgap_tty_attribute_group = {
- .name = NULL,
- .attrs = dgap_sysfs_tty_entries,
-};
-static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
+/* this function creates the sys files that will export each signal status
+ * to sysfs each value will be put in a separate filename
+ */
+static void dgap_create_ports_sysfiles(struct board_t *bd)
{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
- if (ret)
- return;
-
- dev_set_drvdata(c, un);
-
+ dev_set_drvdata(&bd->pdev->dev, bd);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
}
-static void dgap_remove_tty_sysfs(struct device *c)
+/* removes all the sys files created for that port */
+static void dgap_remove_ports_sysfiles(struct board_t *bd)
{
- sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
}
-static void dgap_cleanup_nodes(void)
+/*
+ * Copies the BIOS code from the user to the board,
+ * and starts the BIOS running.
+ */
+static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len)
{
- struct cnode *p;
+ u8 __iomem *addr;
+ uint offset;
+ unsigned int i;
- p = &dgap_head;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
- while (p) {
- struct cnode *tmp = p->next;
+ addr = brd->re_map_membase;
- if (p->type == NULLNODE) {
- p = tmp;
- continue;
- }
+ /*
+ * clear POST area
+ */
+ for (i = 0; i < 16; i++)
+ writeb(0, addr + POSTAREA + i);
- switch (p->type) {
- case BNODE:
- kfree(p->u.board.portstr);
- kfree(p->u.board.addrstr);
- kfree(p->u.board.pcibusstr);
- kfree(p->u.board.pcislotstr);
- kfree(p->u.board.method);
- break;
- case CNODE:
- kfree(p->u.conc.id);
- kfree(p->u.conc.connect);
- break;
- case MNODE:
- kfree(p->u.module.id);
- break;
- case TNODE:
- kfree(p->u.ttyname);
- break;
- case CUNODE:
- kfree(p->u.cuname);
- break;
- case LNODE:
- kfree(p->u.line.cable);
- break;
- case PNODE:
- kfree(p->u.printname);
- break;
- }
+ /*
+ * Download bios
+ */
+ offset = 0x1000;
+ memcpy_toio(addr + offset, ubios, len);
- kfree(p->u.board.status);
- kfree(p);
- p = tmp;
- }
+ writel(0x0bf00401, addr);
+ writel(0, (addr + 4));
+
+ /* Clear the reset, and change states. */
+ writeb(FEPCLR, brd->re_map_port);
}
+
/*
- * Parse a configuration file read into memory as a string.
+ * Checks to see if the BIOS completed running on the card.
*/
-static int dgap_parsefile(char **in)
+static int dgap_test_bios(struct board_t *brd)
{
- struct cnode *p, *brd, *line, *conc;
- int rc;
- char *s;
- int linecnt = 0;
+ u8 __iomem *addr;
+ u16 word;
+ u16 err1;
+ u16 err2;
- p = &dgap_head;
- brd = line = conc = NULL;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return -EINVAL;
- /* perhaps we are adding to an existing list? */
- while (p->next)
- p = p->next;
+ addr = brd->re_map_membase;
+ word = readw(addr + POSTAREA);
- /* file must start with a BEGIN */
- while ((rc = dgap_gettok(in)) != BEGIN) {
- if (rc == 0) {
- pr_err("unexpected EOF");
- return -1;
- }
+ /*
+ * It can take 5-6 seconds for a board to
+ * pass the bios self test and post results.
+ * Give it 10 seconds.
+ */
+ brd->wait_for_bios = 0;
+ while (brd->wait_for_bios < 1000) {
+ /* Check to see if BIOS thinks board is good. (GD). */
+ if (word == *(u16 *) "GD")
+ return 0;
+ msleep_interruptible(10);
+ brd->wait_for_bios++;
+ word = readw(addr + POSTAREA);
}
- for (; ;) {
- int board_type = 0;
- int conc_type = 0;
- int module_type = 0;
+ /* Gave up on board after too long of time taken */
+ err1 = readw(addr + SEQUENCE);
+ err2 = readw(addr + ERROR);
+ dev_warn(&brd->pdev->dev, "%s failed diagnostics. Error #(%x,%x).\n",
+ brd->name, err1, err2);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOBIOS;
- rc = dgap_gettok(in);
- if (rc == 0) {
- pr_err("unexpected EOF");
- return -1;
- }
+ return -EIO;
+}
- switch (rc) {
- case BEGIN: /* should only be 1 begin */
- pr_err("unexpected config_begin\n");
- return -1;
+/*
+ * Copies the FEP code from the user to the board,
+ * and starts the FEP running.
+ */
+static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len)
+{
+ u8 __iomem *addr;
+ uint offset;
- case END:
- return 0;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
- case BOARD: /* board info */
- if (dgap_checknode(p))
- return -1;
+ addr = brd->re_map_membase;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ /*
+ * Download FEP
+ */
+ offset = 0x1000;
+ memcpy_toio(addr + offset, ufep, len);
- p = p->next;
+ /*
+ * If board is a concentrator product, we need to give
+ * it its config string describing how the concentrators look.
+ */
+ if ((brd->type == PCX) || (brd->type == PEPC)) {
+ u8 string[100];
+ u8 __iomem *config;
+ u8 *xconfig;
+ unsigned int i = 0;
- p->type = BNODE;
- p->u.board.status = kstrdup("No", GFP_KERNEL);
- line = conc = NULL;
- brd = p;
- linecnt = -1;
+ xconfig = dgap_create_config_string(brd, string);
- board_type = dgap_gettok(in);
- if (board_type == 0) {
- pr_err("board !!type not specified");
- return -1;
- }
+ /* Write string to board memory */
+ config = addr + CONFIG;
+ for (; i < CONFIGSIZE; i++, config++, xconfig++) {
+ writeb(*xconfig, config);
+ if ((*xconfig & 0xff) == 0xff)
+ break;
+ }
+ }
- p->u.board.type = board_type;
+ writel(0xbfc01004, (addr + 0xc34));
+ writel(0x3, (addr + 0xc30));
- break;
+}
- case IO: /* i/o port */
- if (p->type != BNODE) {
- pr_err("IO port only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.portstr = kstrdup(s, GFP_KERNEL);
- if (kstrtol(s, 0, &p->u.board.port)) {
- pr_err("bad number for IO port");
- return -1;
- }
- p->u.board.v_port = 1;
- break;
+/*
+ * Waits for the FEP to report thats its ready for us to use.
+ */
+static int dgap_test_fep(struct board_t *brd)
+{
+ u8 __iomem *addr;
+ u16 word;
+ u16 err1;
+ u16 err2;
- case MEM: /* memory address */
- if (p->type != BNODE) {
- pr_err("memory address only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.addrstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.addr)) {
- pr_err("bad number for memory address");
- return -1;
- }
- p->u.board.v_addr = 1;
- break;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return -EINVAL;
- case PCIINFO: /* pci information */
- if (p->type != BNODE) {
- pr_err("memory address only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.pcibus)) {
- pr_err("bad number for pci bus");
- return -1;
- }
- p->u.board.v_pcibus = 1;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.pcislot)) {
- pr_err("bad number for pci slot");
- return -1;
- }
- p->u.board.v_pcislot = 1;
- break;
+ addr = brd->re_map_membase;
+ word = readw(addr + FEPSTAT);
- case METHOD:
- if (p->type != BNODE) {
- pr_err("install method only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.method = kstrdup(s, GFP_KERNEL);
- p->u.board.v_method = 1;
- break;
+ /*
+ * It can take 2-3 seconds for the FEP to
+ * be up and running. Give it 5 secs.
+ */
+ brd->wait_for_fep = 0;
+ while (brd->wait_for_fep < 500) {
+ /* Check to see if FEP is up and running now. */
+ if (word == *(u16 *) "OS") {
+ /*
+ * Check to see if the board can support FEP5+ commands.
+ */
+ word = readw(addr + FEP5_PLUS);
+ if (word == *(u16 *) "5A")
+ brd->bd_flags |= BD_FEP5PLUS;
- case STATUS:
- if (p->type != BNODE) {
- pr_err("config status only vaild for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.board.status = kstrdup(s, GFP_KERNEL);
- break;
+ return 0;
+ }
+ msleep_interruptible(10);
+ brd->wait_for_fep++;
+ word = readw(addr + FEPSTAT);
+ }
- case NPORTS: /* number of ports */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.board.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.board.v_nport = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.conc.v_nport = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.module.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.module.v_nport = 1;
- } else {
- pr_err("nports only valid for concentrators or modules");
- return -1;
- }
- break;
+ /* Gave up on board after too long of time taken */
+ err1 = readw(addr + SEQUENCE);
+ err2 = readw(addr + ERROR);
+ dev_warn(&brd->pdev->dev,
+ "FEPOS for %s not functioning. Error #(%x,%x).\n",
+ brd->name, err1, err2);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
- case ID: /* letter ID used in tty name */
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
+ return -EIO;
+}
- p->u.board.status = kstrdup(s, GFP_KERNEL);
+/*
+ * Physically forces the FEP5 card to reset itself.
+ */
+static void dgap_do_reset_board(struct board_t *brd)
+{
+ u8 check;
+ u32 check1;
+ u32 check2;
+ unsigned int i;
- if (p->type == CNODE) {
- p->u.conc.id = kstrdup(s, GFP_KERNEL);
- p->u.conc.v_id = 1;
- } else if (p->type == MNODE) {
- p->u.module.id = kstrdup(s, GFP_KERNEL);
- p->u.module.v_id = 1;
- } else {
- pr_err("id only valid for concentrators or modules");
- return -1;
- }
- break;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
+ !brd->re_map_membase || !brd->re_map_port)
+ return;
- case STARTO: /* start offset of ID */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.board.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.board.v_start = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.conc.v_start = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.module.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.module.v_start = 1;
- } else {
- pr_err("start only valid for concentrators or modules");
- return -1;
- }
+ /* FEPRST does not vary among supported boards */
+ writeb(FEPRST, brd->re_map_port);
+
+ for (i = 0; i <= 1000; i++) {
+ check = readb(brd->re_map_port) & 0xe;
+ if (check == FEPRST)
break;
+ udelay(10);
- case TTYN: /* tty name prefix */
- if (dgap_checknode(p))
- return -1;
+ }
+ if (i > 1000) {
+ dev_warn(&brd->pdev->dev,
+ "dgap: Board not resetting... Failing board.\n");
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ /*
+ * Make sure there really is memory out there.
+ */
+ writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
+ writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
+ check1 = readl(brd->re_map_membase + LOWMEM);
+ check2 = readl(brd->re_map_membase + HIGHMEM);
- p = p->next;
- p->type = TNODE;
+ if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
+ dev_warn(&brd->pdev->dev,
+ "No memory at %p for board.\n",
+ brd->re_map_membase);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+}
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.ttyname = kstrdup(s, GFP_KERNEL);
- if (!p->u.ttyname)
- return -1;
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+/*
+ * Sends a concentrator image into the FEP5 board.
+ */
+static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len)
+{
+ char __iomem *vaddr;
+ u16 offset;
+ struct downld_t *to_dp;
- break;
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
- case CU: /* cu name prefix */
- if (dgap_checknode(p))
- return -1;
+ vaddr = brd->re_map_membase;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ to_dp = (struct downld_t *) (vaddr + (int) offset);
+ memcpy_toio(to_dp, uaddr, len);
- p = p->next;
- p->type = CUNODE;
+ /* Tell card we have data for it */
+ writew(0, vaddr + (DOWNREQ));
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.cuname = kstrdup(s, GFP_KERNEL);
- if (!p->u.cuname)
- return -1;
+ brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
+}
+#endif
- break;
+#define EXPANSION_ROM_SIZE (64 * 1024)
+#define FEP5_ROM_MAGIC (0xFEFFFFFF)
- case LINE: /* line information */
- if (dgap_checknode(p))
- return -1;
- if (!brd) {
- pr_err("must specify board before line info");
- return -1;
- }
- switch (brd->u.board.type) {
- case PPCM:
- pr_err("line not vaild for PC/em");
- return -1;
- }
+static void dgap_get_vpd(struct board_t *brd)
+{
+ u32 magic;
+ u32 base_offset;
+ u16 rom_offset;
+ u16 vpd_offset;
+ u16 image_length;
+ u16 i;
+ u8 byte1;
+ u8 byte2;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ /*
+ * Poke the magic number at the PCI Rom Address location.
+ * If VPD is supported, the value read from that address
+ * will be non-zero.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
- p = p->next;
- p->type = LNODE;
- conc = NULL;
- line = p;
- linecnt++;
- break;
+ /* VPD not supported, bail */
+ if (!magic)
+ return;
- case CONC: /* concentrator information */
- if (dgap_checknode(p))
- return -1;
- if (!line) {
- pr_err("must specify line info before concentrator");
- return -1;
- }
+ /*
+ * To get to the OTPROM memory, we have to send the boards base
+ * address or'ed with 1 into the PCI Rom Address location.
+ */
+ magic = brd->membase | 0x01;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ byte1 = readb(brd->re_map_membase);
+ byte2 = readb(brd->re_map_membase + 1);
- p = p->next;
- p->type = CNODE;
- conc = p;
+ /*
+ * If the board correctly swapped to the OTPROM memory,
+ * the first 2 bytes (header) should be 0x55, 0xAA
+ */
+ if (byte1 == 0x55 && byte2 == 0xAA) {
- if (linecnt)
- brd->u.board.conc2++;
- else
- brd->u.board.conc1++;
+ base_offset = 0;
- conc_type = dgap_gettok(in);
- if (conc_type == 0 || conc_type != CX ||
- conc_type != EPC) {
- pr_err("failed to set a type of concentratros");
- return -1;
- }
+ /*
+ * We have to run through all the OTPROM memory looking
+ * for the VPD offset.
+ */
+ while (base_offset <= EXPANSION_ROM_SIZE) {
- p->u.conc.type = conc_type;
+ /*
+ * Lots of magic numbers here.
+ *
+ * The VPD offset is located inside the ROM Data
+ * Structure.
+ *
+ * We also have to remember the length of each
+ * ROM Data Structure, so we can "hop" to the next
+ * entry if the VPD isn't in the current
+ * ROM Data Structure.
+ */
+ rom_offset = readw(brd->re_map_membase +
+ base_offset + 0x18);
+ image_length = readw(brd->re_map_membase +
+ rom_offset + 0x10) * 512;
+ vpd_offset = readw(brd->re_map_membase +
+ rom_offset + 0x08);
- break;
+ /* Found the VPD entry */
+ if (vpd_offset)
+ break;
- case MOD: /* EBI module */
- if (dgap_checknode(p))
- return -1;
- if (!brd) {
- pr_err("must specify board info before EBI modules");
- return -1;
- }
- switch (brd->u.board.type) {
- case PPCM:
- linecnt = 0;
+ /* We didn't find a VPD entry, go to next ROM entry. */
+ base_offset += image_length;
+
+ byte1 = readb(brd->re_map_membase + base_offset);
+ byte2 = readb(brd->re_map_membase + base_offset + 1);
+
+ /*
+ * If the new ROM offset doesn't have 0x55, 0xAA
+ * as its header, we have run out of ROM.
+ */
+ if (byte1 != 0x55 || byte2 != 0xAA)
break;
- default:
- if (!conc) {
- pr_err("must specify concentrator info before EBI module");
- return -1;
- }
+ }
+
+ /*
+ * If we have a VPD offset, then mark the board
+ * as having a valid VPD, and copy VPDSIZE (512) bytes of
+ * that VPD to the buffer we have in our board structure.
+ */
+ if (vpd_offset) {
+ brd->bd_flags |= BD_HAS_VPD;
+ for (i = 0; i < VPDSIZE; i++) {
+ brd->vpd[i] = readb(brd->re_map_membase +
+ vpd_offset + i);
}
+ }
+ }
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ /*
+ * We MUST poke the magic number at the PCI Rom Address location again.
+ * This makes the card report the regular board memory back to us,
+ * rather than the OTPROM memory.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+}
- p = p->next;
- p->type = MNODE;
- if (linecnt)
- brd->u.board.module2++;
- else
- brd->u.board.module1++;
+static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
+}
+static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
- module_type = dgap_gettok(in);
- if (module_type == 0 || module_type != PORTS ||
- module_type != MODEM) {
- pr_err("failed to set a type of module");
- return -1;
- }
- p->u.module.type = module_type;
+static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards);
+}
+static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
- break;
- case CABLE:
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.line.cable = kstrdup(s, GFP_KERNEL);
- p->u.line.v_cable = 1;
- }
- break;
+static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
+}
+static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
- case SPEED: /* sync line speed indication */
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.line.speed)) {
- pr_err("bad number for line speed");
- return -1;
- }
- p->u.line.v_speed = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.speed)) {
- pr_err("bad number for line speed");
- return -1;
- }
- p->u.conc.v_speed = 1;
- } else {
- pr_err("speed valid only for lines or concentrators.");
- return -1;
- }
- break;
- case CONNECT:
- if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- p->u.conc.connect = kstrdup(s, GFP_KERNEL);
- p->u.conc.v_connect = 1;
- }
- break;
- case PRINT: /* transparent print name prefix */
- if (dgap_checknode(p))
- return -1;
+static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
+}
+static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
+}
- p = p->next;
- p->type = PNODE;
+static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
+ return -EINVAL;
+ return count;
+}
+static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
+ dgap_driver_pollrate_store);
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.printname = kstrdup(s, GFP_KERNEL);
- if (!p->u.printname)
- return -1;
- break;
+static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ int rc = 0;
+ struct device_driver *driverfs = &dgap_driver->driver;
- case CMAJOR: /* major number */
- if (dgap_checknode(p))
- return -1;
+ rc |= driver_create_file(driverfs, &driver_attr_version);
+ rc |= driver_create_file(driverfs, &driver_attr_boards);
+ rc |= driver_create_file(driverfs, &driver_attr_maxboards);
+ rc |= driver_create_file(driverfs, &driver_attr_pollrate);
+ rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ return rc;
+}
- p = p->next;
- p->type = JNODE;
+static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ struct device_driver *driverfs = &dgap_driver->driver;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.majornumber)) {
- pr_err("bad number for major number");
- return -1;
- }
- break;
+ driver_remove_file(driverfs, &driver_attr_version);
+ driver_remove_file(driverfs, &driver_attr_boards);
+ driver_remove_file(driverfs, &driver_attr_maxboards);
+ driver_remove_file(driverfs, &driver_attr_pollrate);
+ driver_remove_file(driverfs, &driver_attr_pollcounter);
+}
- case ALTPIN: /* altpin setting */
- if (dgap_checknode(p))
- return -1;
+static struct attribute_group dgap_tty_attribute_group = {
+ .name = NULL,
+ .attrs = dgap_sysfs_tty_entries,
+};
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
+{
+ int ret;
- p = p->next;
- p->type = ANODE;
+ ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
+ if (ret)
+ return;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.altpin)) {
- pr_err("bad number for altpin");
- return -1;
- }
- break;
+ dev_set_drvdata(c, un);
- case USEINTR: /* enable interrupt setting */
- if (dgap_checknode(p))
- return -1;
+}
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+static void dgap_remove_tty_sysfs(struct device *c)
+{
+ sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+}
- p = p->next;
- p->type = INTRNODE;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.useintr)) {
- pr_err("bad number for useintr");
- return -1;
- }
- break;
+/*
+ * Create pr and tty device entries
+ */
+static int dgap_tty_register_ports(struct board_t *brd)
+{
+ struct channel_t *ch;
+ int i;
+ int ret;
- case TTSIZ: /* size of tty structure */
- if (dgap_checknode(p))
- return -1;
+ brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports),
+ GFP_KERNEL);
+ if (!brd->serial_ports)
+ return -ENOMEM;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports),
+ GFP_KERNEL);
+ if (!brd->printer_ports) {
+ ret = -ENOMEM;
+ goto free_serial_ports;
+ }
- p = p->next;
- p->type = TSNODE;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_init(&brd->serial_ports[i]);
+ tty_port_init(&brd->printer_ports[i]);
+ }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.ttysize)) {
- pr_err("bad number for ttysize");
- return -1;
- }
- break;
+ ch = brd->channels[0];
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
- case CHSIZ: /* channel structure size */
- if (dgap_checknode(p))
- return -1;
+ struct device *classp;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ classp = tty_port_register_device(&brd->serial_ports[i],
+ brd->serial_driver,
+ i, NULL);
- p = p->next;
- p->type = CSNODE;
+ if (IS_ERR(classp)) {
+ ret = PTR_ERR(classp);
+ goto unregister_ttys;
+ }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.chsize)) {
- pr_err("bad number for chsize");
- return -1;
- }
- break;
+ dgap_create_tty_sysfs(&ch->ch_tun, classp);
+ ch->ch_tun.un_sysfs = classp;
- case BSSIZ: /* board structure size */
- if (dgap_checknode(p))
- return -1;
+ classp = tty_port_register_device(&brd->printer_ports[i],
+ brd->print_driver,
+ i, NULL);
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ if (IS_ERR(classp)) {
+ ret = PTR_ERR(classp);
+ goto unregister_ttys;
+ }
- p = p->next;
- p->type = BSNODE;
+ dgap_create_tty_sysfs(&ch->ch_pun, classp);
+ ch->ch_pun.un_sysfs = classp;
+ }
+ dgap_create_ports_sysfiles(brd);
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.bssize)) {
- pr_err("bad number for bssize");
- return -1;
- }
- break;
+ return 0;
- case UNTSIZ: /* sched structure size */
- if (dgap_checknode(p))
- return -1;
+unregister_ttys:
+ while (i >= 0) {
+ ch = brd->channels[i];
+ if (ch->ch_tun.un_sysfs) {
+ dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs);
+ tty_unregister_device(brd->serial_driver, i);
+ }
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ if (ch->ch_pun.un_sysfs) {
+ dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs);
+ tty_unregister_device(brd->print_driver, i);
+ }
+ i--;
+ }
- p = p->next;
- p->type = USNODE;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->serial_ports[i]);
+ tty_port_destroy(&brd->printer_ports[i]);
+ }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.unsize)) {
- pr_err("bad number for schedsize");
- return -1;
- }
- break;
+ kfree(brd->printer_ports);
+ brd->printer_ports = NULL;
- case F2SIZ: /* f2200 structure size */
- if (dgap_checknode(p))
- return -1;
+free_serial_ports:
+ kfree(brd->serial_ports);
+ brd->serial_ports = NULL;
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+ return ret;
+}
- p = p->next;
- p->type = FSNODE;
+/*
+ * dgap_cleanup_tty()
+ *
+ * Uninitialize the TTY portion of this driver. Free all memory and
+ * resources.
+ */
+static void dgap_cleanup_tty(struct board_t *brd)
+{
+ struct device *dev;
+ unsigned int i;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.f2size)) {
- pr_err("bad number for f2200size");
- return -1;
- }
- break;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->serial_ports[i]);
+ dev = brd->channels[i]->ch_tun.un_sysfs;
+ dgap_remove_tty_sysfs(dev);
+ tty_unregister_device(brd->serial_driver, i);
+ }
+ tty_unregister_driver(brd->serial_driver);
+ put_tty_driver(brd->serial_driver);
+ kfree(brd->serial_ports);
- case VPSIZ: /* vpix structure size */
- if (dgap_checknode(p))
- return -1;
+ for (i = 0; i < brd->nasync; i++) {
+ tty_port_destroy(&brd->printer_ports[i]);
+ dev = brd->channels[i]->ch_pun.un_sysfs;
+ dgap_remove_tty_sysfs(dev);
+ tty_unregister_device(brd->print_driver, i);
+ }
+ tty_unregister_driver(brd->print_driver);
+ put_tty_driver(brd->print_driver);
+ kfree(brd->printer_ports);
+}
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -1;
+static int dgap_request_irq(struct board_t *brd)
+{
+ int rc;
- p = p->next;
- p->type = VSNODE;
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -ENODEV;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.vpixsize)) {
- pr_err("bad number for vpixsize");
- return -1;
- }
- break;
- }
+ /*
+ * Set up our interrupt handler if we are set to do interrupts.
+ */
+ if (dgap_config_get_useintr(brd) && brd->irq) {
+
+ rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
+
+ if (!rc)
+ brd->intr_used = 1;
}
+ return 0;
}
-/*
- * dgap_sindex: much like index(), but it looks for a match of any character in
- * the group, and returns that position. If the first character is a ^, then
- * this will match the first occurrence not in that group.
- */
-static char *dgap_sindex(char *string, char *group)
+static void dgap_free_irq(struct board_t *brd)
{
- char *ptr;
+ if (brd->intr_used && brd->irq)
+ free_irq(brd->irq, brd);
+}
- if (!string || !group)
- return (char *) NULL;
+static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
+ struct board_t *brd)
+{
+ const struct firmware *fw;
+ char *tmp_ptr;
+ int ret;
+ char *dgap_config_buf;
- if (*group == '^') {
- group++;
- for (; *string; string++) {
- for (ptr = group; *ptr; ptr++) {
- if (*ptr == *string)
- break;
- }
- if (*ptr == '\0')
- return string;
+ dgap_get_vpd(brd);
+ dgap_do_reset_board(brd);
+
+ if (fw_info[card_type].conf_name) {
+ ret = request_firmware(&fw, fw_info[card_type].conf_name,
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "config file %s not found\n",
+ fw_info[card_type].conf_name);
+ return ret;
}
- } else {
- for (; *string; string++) {
- for (ptr = group; *ptr; ptr++) {
- if (*ptr == *string)
- return string;
- }
+
+ dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL);
+ if (!dgap_config_buf) {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ memcpy(dgap_config_buf, fw->data, fw->size);
+ release_firmware(fw);
+
+ /*
+ * preserve dgap_config_buf
+ * as dgap_parsefile would
+ * otherwise alter it.
+ */
+ tmp_ptr = dgap_config_buf;
+
+ if (dgap_parsefile(&tmp_ptr) != 0) {
+ kfree(dgap_config_buf);
+ return -EINVAL;
}
+ kfree(dgap_config_buf);
}
- return (char *) NULL;
-}
+ /*
+ * Match this board to a config the user created for us.
+ */
+ brd->bd_config =
+ dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
-/*
- * Get a token from the input file; return 0 if end of file is reached
- */
-static int dgap_gettok(char **in)
-{
- char *w;
- struct toklist *t;
+ /*
+ * Because the 4 port Xr products share the same PCI ID
+ * as the 8 port Xr products, if we receive a NULL config
+ * back, and this is a PAPORT8 board, retry with a
+ * PAPORT4 attempt as well.
+ */
+ if (brd->type == PAPORT8 && !brd->bd_config)
+ brd->bd_config =
+ dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
- if (strstr(dgap_cword, "board")) {
- w = dgap_getword(in);
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_brdtype; t->token != 0; t++) {
- if (!strcmp(w, t->string))
- return t->token;
+ if (!brd->bd_config) {
+ dev_err(&pdev->dev, "No valid configuration found\n");
+ return -EINVAL;
+ }
+
+ if (fw_info[card_type].bios_name) {
+ ret = request_firmware(&fw, fw_info[card_type].bios_name,
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "bios file %s not found\n",
+ fw_info[card_type].bios_name);
+ return ret;
}
- } else {
- while ((w = dgap_getword(in))) {
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_tlist; t->token != 0; t++) {
- if (!strcmp(w, t->string))
- return t->token;
- }
+ dgap_do_bios_load(brd, fw->data, fw->size);
+ release_firmware(fw);
+
+ /* Wait for BIOS to test board... */
+ ret = dgap_test_bios(brd);
+ if (ret)
+ return ret;
+ }
+
+ if (fw_info[card_type].fep_name) {
+ ret = request_firmware(&fw, fw_info[card_type].fep_name,
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "dgap: fep file %s not found\n",
+ fw_info[card_type].fep_name);
+ return ret;
+ }
+ dgap_do_fep_load(brd, fw->data, fw->size);
+ release_firmware(fw);
+
+ /* Wait for FEP to load on board... */
+ ret = dgap_test_fep(brd);
+ if (ret)
+ return ret;
+ }
+
+#ifdef DIGI_CONCENTRATORS_SUPPORTED
+ /*
+ * If this is a CX or EPCX, we need to see if the firmware
+ * is requesting a concentrator image from us.
+ */
+ if ((bd->type == PCX) || (bd->type == PEPC)) {
+ chk_addr = (u16 *) (vaddr + DOWNREQ);
+ /* Nonzero if FEP is requesting concentrator image. */
+ check = readw(chk_addr);
+ vaddr = brd->re_map_membase;
+ }
+
+ if (fw_info[card_type].con_name && check && vaddr) {
+ ret = request_firmware(&fw, fw_info[card_type].con_name,
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "conc file %s not found\n",
+ fw_info[card_type].con_name);
+ return ret;
}
+ /* Put concentrator firmware loading code here */
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ memcpy_toio(offset, fw->data, fw->size);
+
+ dgap_do_conc_load(brd, (char *)fw->data, fw->size)
+ release_firmware(fw);
}
+#endif
return 0;
}
/*
- * get a word from the input stream, also keep track of current line number.
- * words are separated by whitespace.
+ * dgap_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
*/
-static char *dgap_getword(char **in)
+static int dgap_tty_init(struct board_t *brd)
{
- char *ret_ptr = *in;
+ int i;
+ int tlw;
+ uint true_count;
+ u8 __iomem *vaddr;
+ u8 modem;
+ struct channel_t *ch;
+ struct bs_t __iomem *bs;
+ struct cm_t __iomem *cm;
+ int ret;
- char *ptr = dgap_sindex(*in, " \t\n");
+ /*
+ * Initialize board structure elements.
+ */
- /* If no word found, return null */
- if (!ptr)
- return NULL;
+ vaddr = brd->re_map_membase;
+ true_count = readw((vaddr + NCHAN));
- /* Mark new location for our buffer */
- *ptr = '\0';
- *in = ptr + 1;
+ brd->nasync = dgap_config_get_num_prts(brd);
- /* Eat any extra spaces/tabs/newlines that might be present */
- while (*in && **in && ((**in == ' ') ||
- (**in == '\t') ||
- (**in == '\n'))) {
- **in = '\0';
- *in = *in + 1;
- }
+ if (!brd->nasync)
+ brd->nasync = brd->maxports;
- return ret_ptr;
-}
+ if (brd->nasync > brd->maxports)
+ brd->nasync = brd->maxports;
-/*
- * dgap_checknode: see if all the necessary info has been supplied for a node
- * before creating the next node.
- */
-static int dgap_checknode(struct cnode *p)
-{
- switch (p->type) {
- case LNODE:
- if (p->u.line.v_speed == 0) {
- pr_err("line speed not specified");
- return 1;
- }
- return 0;
+ if (true_count != brd->nasync) {
+ dev_warn(&brd->pdev->dev,
+ "%s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count);
- case CNODE:
- if (p->u.conc.v_speed == 0) {
- pr_err("concentrator line speed not specified");
- return 1;
- }
- if (p->u.conc.v_nport == 0) {
- pr_err("number of ports on concentrator not specified");
- return 1;
+ if ((brd->type == PPCM) &&
+ (true_count == 64 || true_count == 0)) {
+ dev_warn(&brd->pdev->dev,
+ "Please make SURE the EBI cable running from the card\n");
+ dev_warn(&brd->pdev->dev,
+ "to each EM module is plugged into EBI IN!\n");
}
- if (p->u.conc.v_id == 0) {
- pr_err("concentrator id letter not specified");
- return 1;
+
+ brd->nasync = true_count;
+
+ /* If no ports, don't bother going any further */
+ if (!brd->nasync) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return -EIO;
}
- return 0;
+ }
- case MNODE:
- if (p->u.module.v_nport == 0) {
- pr_err("number of ports on EBI module not specified");
- return 1;
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ brd->channels[i] =
+ kzalloc(sizeof(struct channel_t), GFP_KERNEL);
+ if (!brd->channels[i]) {
+ ret = -ENOMEM;
+ goto free_chan;
}
- if (p->u.module.v_id == 0) {
- pr_err("EBI module id letter not specified");
- return 1;
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ bs = (struct bs_t __iomem *) ((ulong) vaddr + CHANBUF);
+ cm = (struct cm_t __iomem *) ((ulong) vaddr + CMDBUF);
+
+ brd->bd_bs = bs;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
+
+ spin_lock_init(&ch->ch_lock);
+
+ /* Store all our magic numbers */
+ ch->magic = DGAP_CHANNEL_MAGIC;
+ ch->ch_tun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_tun.un_type = DGAP_SERIAL;
+ ch->ch_tun.un_ch = ch;
+ ch->ch_tun.un_dev = i;
+
+ ch->ch_pun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_pun.un_type = DGAP_PRINT;
+ ch->ch_pun.un_ch = ch;
+ ch->ch_pun.un_dev = i;
+
+ ch->ch_vaddr = vaddr;
+ ch->ch_bs = bs;
+ ch->ch_cm = cm;
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+ ch->ch_digi = dgap_digi_init;
+
+ /*
+ * Set up digi dsr and dcd bits based on altpin flag.
+ */
+ if (dgap_config_get_altpin(brd)) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ ch->ch_digi.digi_flags |= DIGI_ALTPIN;
+ } else {
+ ch->ch_cd = DM_CD;
+ ch->ch_dsr = DM_DSR;
}
- return 0;
+
+ ch->ch_taddr = vaddr + (ioread16(&(ch->ch_bs->tx_seg)) << 4);
+ ch->ch_raddr = vaddr + (ioread16(&(ch->ch_bs->rx_seg)) << 4);
+ ch->ch_tx_win = 0;
+ ch->ch_rx_win = 0;
+ ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
+ ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
+ ch->ch_tstart = 0;
+ ch->ch_rstart = 0;
+
+ /*
+ * Set queue water marks, interrupt mask,
+ * and general tty parameters.
+ */
+ tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
+ ch->ch_tsize / 2;
+ ch->ch_tlw = tlw;
+
+ dgap_cmdw(ch, STLOW, tlw, 0);
+
+ dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
+
+ dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
+
+ ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ init_waitqueue_head(&ch->ch_tun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+
+ /* Turn on all modem interrupts for now */
+ modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
+ writeb(modem, &(ch->ch_bs->m_int));
+
+ /*
+ * Set edelay to 0 if interrupts are turned on,
+ * otherwise set edelay to the usual 100.
+ */
+ if (brd->intr_used)
+ writew(0, &(ch->ch_bs->edelay));
+ else
+ writew(100, &(ch->ch_bs->edelay));
+
+ writeb(1, &(ch->ch_bs->idata));
}
+
return 0;
+
+free_chan:
+ while (--i >= 0) {
+ kfree(brd->channels[i]);
+ brd->channels[i] = NULL;
+ }
+ return ret;
}
/*
- * Given a board pointer, returns whether we should use interrupts or not.
+ * dgap_tty_free()
+ *
+ * Free the channles which are allocated in dgap_tty_init().
*/
-static uint dgap_config_get_useintr(struct board_t *bd)
+static void dgap_tty_free(struct board_t *brd)
{
- struct cnode *p;
+ int i;
- if (!bd)
- return 0;
+ for (i = 0; i < brd->nasync; i++)
+ kfree(brd->channels[i]);
+}
- for (p = bd->bd_config; p; p = p->next) {
- if (p->type == INTRNODE) {
- /*
- * check for pcxr types.
- */
- return p->u.useintr;
- }
- }
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+ struct board_t *brd;
+
+ if (dgap_numboards >= MAXBOARDS)
+ return -EPERM;
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return -EIO;
+
+ brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
+
+ rc = dgap_firmware_load(pdev, ent->driver_data, brd);
+ if (rc)
+ goto cleanup_brd;
+
+ rc = dgap_alloc_flipbuf(brd);
+ if (rc)
+ goto cleanup_brd;
+
+ rc = dgap_tty_register(brd);
+ if (rc)
+ goto free_flipbuf;
+
+ rc = dgap_request_irq(brd);
+ if (rc)
+ goto unregister_tty;
+
+ /*
+ * Do tty device initialization.
+ */
+ rc = dgap_tty_init(brd);
+ if (rc < 0)
+ goto free_irq;
+
+ rc = dgap_tty_register_ports(brd);
+ if (rc)
+ goto tty_free;
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ dgap_board[dgap_numboards++] = brd;
- /* If not found, then don't turn on interrupts. */
return 0;
+
+tty_free:
+ dgap_tty_free(brd);
+free_irq:
+ dgap_free_irq(brd);
+unregister_tty:
+ dgap_tty_unregister(brd);
+free_flipbuf:
+ dgap_free_flipbuf(brd);
+cleanup_brd:
+ dgap_cleanup_nodes();
+ dgap_unmap(brd);
+ kfree(brd);
+
+ return rc;
}
+static void dgap_remove_one(struct pci_dev *dev)
+{
+ /* Do Nothing */
+}
+
+static struct pci_driver dgap_driver = {
+ .name = "dgap",
+ .probe = dgap_init_one,
+ .id_table = dgap_pci_tbl,
+ .remove = dgap_remove_one,
+};
+
/*
- * Given a board pointer, returns whether we turn on altpin or not.
+ * Start of driver.
*/
-static uint dgap_config_get_altpin(struct board_t *bd)
+static int dgap_start(void)
{
- struct cnode *p;
+ int rc;
+ unsigned long flags;
+ struct device *device;
- if (!bd)
- return 0;
+ dgap_numboards = 0;
- for (p = bd->bd_config; p; p = p->next) {
- if (p->type == ANODE) {
- /*
- * check for pcxr types.
- */
- return p->u.altpin;
- }
+ pr_info("For the tools package please visit http://www.digi.com\n");
+
+ /*
+ * Register our base character device into the kernel.
+ */
+
+ /*
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops);
+ if (rc < 0)
+ return rc;
+
+ dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
+ if (IS_ERR(dgap_class)) {
+ rc = PTR_ERR(dgap_class);
+ goto failed_class;
}
- /* If not found, then don't turn on interrupts. */
- return 0;
+ device = device_create(dgap_class, NULL,
+ MKDEV(DIGI_DGAP_MAJOR, 0),
+ NULL, "dgap_mgmt");
+ if (IS_ERR(device)) {
+ rc = PTR_ERR(device);
+ goto failed_device;
+ }
+
+ /* Start the poller */
+ spin_lock_irqsave(&dgap_poll_lock, flags);
+ init_timer(&dgap_poll_timer);
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ dgap_poll_timer.expires = dgap_poll_time;
+ spin_unlock_irqrestore(&dgap_poll_lock, flags);
+
+ add_timer(&dgap_poll_timer);
+
+ return rc;
+
+failed_device:
+ class_destroy(dgap_class);
+failed_class:
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+ return rc;
}
-/*
- * Given a specific type of board, if found, detached link and
- * returns the first occurrence in the list.
- */
-static struct cnode *dgap_find_config(int type, int bus, int slot)
+static void dgap_stop(void)
{
- struct cnode *p, *prev, *prev2, *found;
+ unsigned long lock_flags;
- p = &dgap_head;
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_stop = 1;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
- while (p->next) {
- prev = p;
- p = p->next;
+ del_timer_sync(&dgap_poll_timer);
- if (p->type != BNODE)
- continue;
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
+ class_destroy(dgap_class);
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+}
- if (p->u.board.type != type)
- continue;
+/*
+ * dgap_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgap_cleanup_board(struct board_t *brd)
+{
+ unsigned int i;
- if (p->u.board.v_pcibus &&
- p->u.board.pcibus != bus)
- continue;
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return;
- if (p->u.board.v_pcislot &&
- p->u.board.pcislot != slot)
- continue;
+ dgap_free_irq(brd);
- found = p;
- /*
- * Keep walking thru the list till we
- * find the next board.
- */
- while (p->next) {
- prev2 = p;
- p = p->next;
+ tasklet_kill(&brd->helper_tasklet);
- if (p->type != BNODE)
- continue;
+ dgap_unmap(brd);
- /*
- * Mark the end of our 1 board
- * chain of configs.
- */
- prev2->next = NULL;
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++)
+ kfree(brd->channels[i]);
- /*
- * Link the "next" board to the
- * previous board, effectively
- * "unlinking" our board from
- * the main config.
- */
- prev->next = p;
+ kfree(brd->flipbuf);
+ kfree(brd->flipflagbuf);
- return found;
- }
- /*
- * It must be the last board in the list.
- */
- prev->next = NULL;
- return found;
- }
- return NULL;
+ dgap_board[brd->boardnum] = NULL;
+
+ kfree(brd);
}
+
+/************************************************************************
+ *
+ * Driver load/unload functions
+ *
+ ************************************************************************/
+
/*
- * Given a board pointer, walks the config link, counting up
- * all ports user specified should be on the board.
- * (This does NOT mean they are all actually present right now tho)
+ * init_module()
+ *
+ * Module load. This is where it all starts.
*/
-static uint dgap_config_get_num_prts(struct board_t *bd)
+static int dgap_init_module(void)
{
- int count = 0;
- struct cnode *p;
+ int rc;
- if (!bd)
- return 0;
+ pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
- for (p = bd->bd_config; p; p = p->next) {
+ rc = dgap_start();
+ if (rc)
+ return rc;
- switch (p->type) {
- case BNODE:
- /*
- * check for pcxr types.
- */
- if (p->u.board.type > EPCFE)
- count += p->u.board.nport;
- break;
- case CNODE:
- count += p->u.conc.nport;
- break;
- case MNODE:
- count += p->u.module.nport;
- break;
- }
- }
- return count;
+ rc = pci_register_driver(&dgap_driver);
+ if (rc)
+ goto err_stop;
+
+ rc = dgap_create_driver_sysfiles(&dgap_driver);
+ if (rc)
+ goto err_unregister;
+
+ dgap_driver_state = DRIVER_READY;
+
+ return 0;
+
+err_unregister:
+ pci_unregister_driver(&dgap_driver);
+err_stop:
+ dgap_stop();
+
+ return rc;
}
-static char *dgap_create_config_string(struct board_t *bd, char *string)
+/*
+ * dgap_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+static void dgap_cleanup_module(void)
{
- char *ptr = string;
- struct cnode *p;
- struct cnode *q;
- int speed;
+ unsigned int i;
+ ulong lock_flags;
- if (!bd) {
- *ptr = 0xff;
- return string;
- }
+ spin_lock_irqsave(&dgap_poll_lock, lock_flags);
+ dgap_poll_stop = 1;
+ spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
- for (p = bd->bd_config; p; p = p->next) {
+ /* Turn off poller right away. */
+ del_timer_sync(&dgap_poll_timer);
- switch (p->type) {
- case LNODE:
- *ptr = '\0';
- ptr++;
- *ptr = p->u.line.speed;
- ptr++;
- break;
- case CNODE:
- /*
- * Because the EPC/con concentrators can have EM modules
- * hanging off of them, we have to walk ahead in the
- * list and keep adding the number of ports on each EM
- * to the config. UGH!
- */
- speed = p->u.conc.speed;
- q = p->next;
- if (q && (q->type == MNODE)) {
- *ptr = (p->u.conc.nport + 0x80);
- ptr++;
- p = q;
- while (q->next && (q->next->type) == MNODE) {
- *ptr = (q->u.module.nport + 0x80);
- ptr++;
- p = q;
- q = q->next;
- }
- *ptr = q->u.module.nport;
- ptr++;
- } else {
- *ptr = p->u.conc.nport;
- ptr++;
- }
+ dgap_remove_driver_sysfiles(&dgap_driver);
- *ptr = speed;
- ptr++;
- break;
- }
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
+ class_destroy(dgap_class);
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+
+ for (i = 0; i < dgap_numboards; ++i) {
+ dgap_remove_ports_sysfiles(dgap_board[i]);
+ dgap_cleanup_tty(dgap_board[i]);
+ dgap_cleanup_board(dgap_board[i]);
}
- *ptr = 0xff;
- return string;
+ dgap_cleanup_nodes();
+
+ if (dgap_numboards)
+ pci_unregister_driver(&dgap_driver);
}
+
+module_init(dgap_init_module);
+module_exit(dgap_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
+MODULE_SUPPORTED_DEVICE("dgap");
diff --git a/drivers/staging/dgap/dgap.h b/drivers/staging/dgap/dgap.h
index 14e2ed0fe39b..684033156e8c 100644
--- a/drivers/staging/dgap/dgap.h
+++ b/drivers/staging/dgap/dgap.h
@@ -584,9 +584,6 @@ struct board_t {
struct tty_port *printer_ports;
char print_name[200];
- u32 dgap_serial_major;
- u32 dgap_transparent_print_major;
-
struct bs_t __iomem *bd_bs; /* Base structure pointer */
char *flipbuf; /* Our flip buffer, alloced if */
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index a17f4f6095c8..bedc5221b6fc 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -724,10 +724,8 @@ static void cls_tasklet(unsigned long data)
int state = 0;
int ports = 0;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
- APR(("poll_tasklet() - NULL or bad bd.\n"));
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- }
/* Cache a couple board values */
spin_lock_irqsave(&bd->bd_lock, flags);
@@ -794,25 +792,17 @@ static void cls_tasklet(unsigned long data)
*/
static irqreturn_t cls_intr(int irq, void *voidbrd)
{
- struct dgnc_board *brd = (struct dgnc_board *) voidbrd;
+ struct dgnc_board *brd = voidbrd;
uint i = 0;
unsigned char poll_reg;
unsigned long flags;
- if (!brd) {
- APR(("Received interrupt (%d) with null board associated\n",
- irq));
- return IRQ_NONE;
- }
-
/*
- * Check to make sure its for us.
+ * Check to make sure it didn't receive interrupt with a null board
+ * associated or a board pointer that wasn't ours.
*/
- if (brd->magic != DGNC_BOARD_MAGIC) {
- APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n",
- irq));
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
return IRQ_NONE;
- }
spin_lock_irqsave(&brd->bd_intr_lock, flags);
@@ -928,8 +918,6 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE
| UART_LSR_FE);
ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
- dgnc_sniff_nowait_nolock(ch, "UART READ",
- ch->ch_rqueue + head, 1);
qleft--;
@@ -964,7 +952,6 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
unsigned long flags;
struct channel_t *ch;
struct un_t *un;
- int rc = 0;
if (!tty || tty->magic != TTY_MAGIC)
return -ENXIO;
@@ -984,12 +971,11 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
/*
* NOTE: Do something with time passed in.
*/
- rc = wait_event_interruptible(un->un_flags_wait,
- ((un->un_flags & UN_EMPTY) == 0));
/* If ret is non-zero, user ctrl-c'ed us */
- return rc;
+ return wait_event_interruptible(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0));
}
@@ -1098,8 +1084,6 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
- dgnc_sniff_nowait_nolock(ch, "UART WRITE",
- ch->ch_wqueue + ch->ch_w_tail, 1);
ch->ch_w_tail++;
ch->ch_w_tail &= WQUEUEMASK;
ch->ch_txcount++;
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 21546659ff07..ba98ff348112 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -49,16 +49,6 @@ MODULE_AUTHOR("Digi International, http://www.digi.com");
MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
MODULE_SUPPORTED_DEVICE("dgnc");
-/*
- * insmod command line overrideable parameters
- *
- * NOTE: we use a set of macros to create the variables, which allows
- * us to specify the variable type, name, initial value, and description.
- */
-PARM_INT(debug, 0x00, 0644, "Driver debugging level");
-PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
-PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
-
/**************************************************************************
*
* protos for this file
@@ -207,8 +197,6 @@ static int __init dgnc_init_module(void)
{
int rc = 0;
- APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
-
/*
* Initialize global stuff
*/
@@ -254,8 +242,6 @@ static int dgnc_start(void)
/* make sure that the globals are init'd before we do anything else */
dgnc_init_globals();
- APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
-
/*
* Register our base character device into the kernel.
* This allows the download daemon to connect to the downld device
@@ -265,7 +251,7 @@ static int dgnc_start(void)
*/
rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
if (rc <= 0) {
- APR(("Can't register dgnc driver device (%d)\n", rc));
+ pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
return -ENXIO;
}
dgnc_Major = rc;
@@ -281,7 +267,7 @@ static int dgnc_start(void)
rc = dgnc_tty_preinit();
if (rc < 0) {
- APR(("tty preinit - not enough memory (%d)\n", rc));
+ pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc);
return rc;
}
@@ -468,7 +454,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
brd->membase = pci_resource_start(pdev, 4);
if (!brd->membase) {
- APR(("card has no PCI IO resources, failing board.\n"));
+ dev_err(&brd->pdev->dev,
+ "Card has no PCI IO resources, failing.\n");
return -ENODEV;
}
@@ -555,7 +542,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
break;
default:
- APR(("Did not find any compatible Neo or Classic PCI boards in system.\n"));
+ dev_err(&brd->pdev->dev,
+ "Didn't find any compatible Neo/Classic PCI boards.\n");
return -ENXIO;
}
@@ -567,7 +555,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
rc = dgnc_tty_register(brd);
if (rc < 0) {
dgnc_tty_uninit(brd);
- APR(("Can't register tty devices (%d)\n", rc));
+ pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
goto failed;
@@ -575,7 +563,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
rc = dgnc_finalize_board_init(brd);
if (rc < 0) {
- APR(("Can't finalize board init (%d)\n", rc));
+ pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
@@ -585,7 +573,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
rc = dgnc_tty_init(brd);
if (rc < 0) {
dgnc_tty_uninit(brd);
- APR(("Can't init tty devices (%d)\n", rc));
+ pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
@@ -744,9 +732,6 @@ static void dgnc_init_globals(void)
{
int i = 0;
- dgnc_rawreadok = rawreadok;
- dgnc_trcbuf_size = trcbuf_size;
- dgnc_debug = debug;
dgnc_NumBoards = 0;
for (i = 0; i < MAXBOARDS; i++)
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index f901957c757f..a8157eba28da 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -42,53 +42,13 @@
*
*************************************************************************/
-/*
- * Driver identification, error and debugging statments
- *
- * In theory, you can change all occurrences of "digi" in the next
- * three lines, and the driver printk's will all automagically change.
- *
- * APR((fmt, args, ...)); Always prints message
- */
+/* Driver identification and error statments */
#define PROCSTR "dgnc" /* /proc entries */
#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
-#define DRVSTR "dgnc" /* Driver name string
- * displayed by APR */
-#define APR(args) do { printk(DRVSTR": "); printk args; \
- } while (0)
-#define RAPR(args) do { printk args; } while (0)
+#define DRVSTR "dgnc" /* Driver name string */
#define TRC_TO_CONSOLE 1
-/*
- * Debugging levels can be set using debug insmod variable
- * They can also be compiled out completely.
- */
-
-#define DBG_INIT (dgnc_debug & 0x01)
-#define DBG_BASIC (dgnc_debug & 0x02)
-#define DBG_CORE (dgnc_debug & 0x04)
-
-#define DBG_OPEN (dgnc_debug & 0x08)
-#define DBG_CLOSE (dgnc_debug & 0x10)
-#define DBG_READ (dgnc_debug & 0x20)
-#define DBG_WRITE (dgnc_debug & 0x40)
-
-#define DBG_IOCTL (dgnc_debug & 0x80)
-
-#define DBG_PROC (dgnc_debug & 0x100)
-#define DBG_PARAM (dgnc_debug & 0x200)
-#define DBG_PSCAN (dgnc_debug & 0x400)
-#define DBG_EVENT (dgnc_debug & 0x800)
-
-#define DBG_DRAIN (dgnc_debug & 0x1000)
-#define DBG_MSIGS (dgnc_debug & 0x2000)
-
-#define DBG_MGMT (dgnc_debug & 0x4000)
-#define DBG_INTR (dgnc_debug & 0x8000)
-
-#define DBG_CARR (dgnc_debug & 0x10000)
-
/* Number of boards we support at once. */
#define MAXBOARDS 20
#define MAXPORTS 8
@@ -134,8 +94,6 @@
#define _POSIX_VDISABLE '\0'
#endif
-#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
-#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
/*
* All the possible states the driver can be while being loaded.
@@ -342,13 +300,6 @@ struct un_t {
#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
-/*
- * Definitions for ch_sniff_flags
- */
-#define SNIFF_OPEN 0x1
-#define SNIFF_WAIT_DATA 0x2
-#define SNIFF_WAIT_SPACE 0x4
-
/* Our Read/Error/Write queue sizes */
#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
@@ -442,21 +393,13 @@ struct channel_t {
struct proc_dir_entry *proc_entry_pointer;
struct dgnc_proc_entry *dgnc_channel_table;
- uint ch_sniff_in;
- uint ch_sniff_out;
- char *ch_sniff_buf; /* Sniff buffer for proc */
- ulong ch_sniff_flags; /* Channel flags */
- wait_queue_head_t ch_sniff_wait;
};
/*
* Our Global Variables.
*/
extern uint dgnc_Major; /* Our driver/mgmt major */
-extern int dgnc_debug; /* Debug variable */
-extern int dgnc_rawreadok; /* Set if user wants rawreads */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
-extern int dgnc_trcbuf_size; /* Size of the ringbuffer */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern uint dgnc_NumBoards; /* Total number of boards */
extern struct dgnc_board *dgnc_Board[MAXBOARDS]; /* Array of board structs */
diff --git a/drivers/staging/dgnc/dgnc_kcompat.h b/drivers/staging/dgnc/dgnc_kcompat.h
index eaec7e6a28e1..566cad0d33e7 100644
--- a/drivers/staging/dgnc/dgnc_kcompat.h
+++ b/drivers/staging/dgnc/dgnc_kcompat.h
@@ -43,22 +43,4 @@
# endif
-# define PARM_STR(VAR, INIT, PERM, DESC) \
- static char *VAR = INIT; \
- char *dgnc_##VAR; \
- module_param(VAR, charp, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-# define PARM_INT(VAR, INIT, PERM, DESC) \
- static int VAR = INIT; \
- int dgnc_##VAR; \
- module_param(VAR, int, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
-# define PARM_ULONG(VAR, INIT, PERM, DESC) \
- static ulong VAR = INIT; \
- ulong dgnc_##VAR; \
- module_param(VAR, long, PERM); \
- MODULE_PARM_DESC(VAR, DESC);
-
#endif /* ! __DGNC_KCOMPAT_H */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index a5bd08fef270..c9a8a9825cfb 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -530,10 +530,11 @@ static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
int linestatus;
unsigned long flags;
- if (!brd)
- return;
-
- if (brd->magic != DGNC_BOARD_MAGIC)
+ /*
+ * Check to make sure it didn't receive interrupt with a null board
+ * associated or a board pointer that wasn't ours.
+ */
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
return;
if (port > brd->maxports)
@@ -869,10 +870,8 @@ static void neo_tasklet(unsigned long data)
int state = 0;
int ports = 0;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
- APR(("poll_tasklet() - NULL or bad bd.\n"));
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return;
- }
/* Cache a couple board values */
spin_lock_irqsave(&bd->bd_lock, flags);
@@ -945,7 +944,7 @@ static void neo_tasklet(unsigned long data)
*/
static irqreturn_t neo_intr(int irq, void *voidbrd)
{
- struct dgnc_board *brd = (struct dgnc_board *) voidbrd;
+ struct dgnc_board *brd = voidbrd;
struct channel_t *ch;
int port = 0;
int type = 0;
@@ -955,18 +954,12 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
unsigned long flags;
unsigned long flags2;
- if (!brd) {
- APR(("Received interrupt (%d) with null board associated\n", irq));
- return IRQ_NONE;
- }
-
/*
- * Check to make sure its for us.
+ * Check to make sure it didn't receive interrupt with a null board
+ * associated or a board pointer that wasn't ours.
*/
- if (brd->magic != DGNC_BOARD_MAGIC) {
- APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
return IRQ_NONE;
- }
brd->intr_count++;
@@ -1224,7 +1217,6 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
/* Copy data from uart to the queue */
memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
- dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, n);
/*
* Since RX_FIFO_DATA_ERROR was 0, we are guarenteed
@@ -1310,7 +1302,6 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
ch->ch_equeue[head] = (unsigned char) linestatus;
- dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1);
/* Ditch any remaining linestatus value. */
linestatus = 0;
@@ -1563,7 +1554,6 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
}
memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
- dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + tail, s);
/* Add and flip queue if needed */
tail = (tail + s) & WQUEUEMASK;
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
index 6c3b387622e9..2fd34ca70c59 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -63,39 +63,6 @@ static ssize_t dgnc_driver_maxboards_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR(maxboards, S_IRUSR, dgnc_driver_maxboards_show, NULL);
-static ssize_t dgnc_driver_debug_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", dgnc_debug);
-}
-
-static ssize_t dgnc_driver_debug_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- int ret;
-
- ret = sscanf(buf, "0x%x\n", &dgnc_debug);
- if (ret != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(debug, (S_IRUSR | S_IWUSR), dgnc_driver_debug_show, dgnc_driver_debug_store);
-
-
-static ssize_t dgnc_driver_rawreadok_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%x\n", dgnc_rawreadok);
-}
-
-static ssize_t dgnc_driver_rawreadok_store(struct device_driver *ddp, const char *buf, size_t count)
-{
- int ret;
-
- ret = sscanf(buf, "0x%x\n", &dgnc_rawreadok);
- if (ret != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(rawreadok, (S_IRUSR | S_IWUSR), dgnc_driver_rawreadok_show, dgnc_driver_rawreadok_store);
-
static ssize_t dgnc_driver_pollrate_show(struct device_driver *ddp, char *buf)
{
@@ -122,8 +89,6 @@ void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
rc |= driver_create_file(driverfs, &driver_attr_version);
rc |= driver_create_file(driverfs, &driver_attr_boards);
rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_debug);
- rc |= driver_create_file(driverfs, &driver_attr_rawreadok);
rc |= driver_create_file(driverfs, &driver_attr_pollrate);
if (rc)
printk(KERN_ERR "DGNC: sysfs driver_create_file failed!\n");
@@ -137,8 +102,6 @@ void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
driver_remove_file(driverfs, &driver_attr_version);
driver_remove_file(driverfs, &driver_attr_boards);
driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_debug);
- driver_remove_file(driverfs, &driver_attr_rawreadok);
driver_remove_file(driverfs, &driver_attr_pollrate);
}
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 03c15069731f..f81a375f8bc1 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -49,7 +49,6 @@
#include <linux/delay.h> /* For udelay */
#include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
#include <linux/pci.h>
-
#include "dgnc_driver.h"
#include "dgnc_tty.h"
#include "dgnc_types.h"
@@ -233,7 +232,8 @@ int dgnc_tty_register(struct dgnc_board *brd)
/* Register tty devices */
rc = tty_register_driver(&brd->SerialDriver);
if (rc < 0) {
- APR(("Can't register tty device (%d)\n", rc));
+ dev_dbg(&brd->pdev->dev,
+ "Can't register tty device (%d)\n", rc);
return rc;
}
brd->dgnc_Major_Serial_Registered = TRUE;
@@ -281,7 +281,9 @@ int dgnc_tty_register(struct dgnc_board *brd)
/* Register Transparent Print devices */
rc = tty_register_driver(&brd->PrintDriver);
if (rc < 0) {
- APR(("Can't register Transparent Print device (%d)\n", rc));
+ dev_dbg(&brd->pdev->dev,
+ "Can't register Transparent Print device(%d)\n",
+ rc);
return rc;
}
brd->dgnc_Major_TransparentPrint_Registered = TRUE;
@@ -371,7 +373,6 @@ int dgnc_tty_init(struct dgnc_board *brd)
init_waitqueue_head(&ch->ch_flags_wait);
init_waitqueue_head(&ch->ch_tun.un_flags_wait);
init_waitqueue_head(&ch->ch_pun.un_flags_wait);
- init_waitqueue_head(&ch->ch_sniff_wait);
{
struct device *classp;
@@ -446,127 +447,6 @@ void dgnc_tty_uninit(struct dgnc_board *brd)
#define TMPBUFLEN (1024)
-/*
- * dgnc_sniff - Dump data out to the "sniff" buffer if the
- * proc sniff file is opened...
- */
-void dgnc_sniff_nowait_nolock(struct channel_t *ch, unsigned char *text, unsigned char *buf, int len)
-{
- struct timeval tv;
- int n;
- int r;
- int nbuf;
- int i;
- int tmpbuflen;
- char *tmpbuf;
- char *p;
- int too_much_data;
-
- tmpbuf = kzalloc(TMPBUFLEN, GFP_ATOMIC);
- if (!tmpbuf)
- return;
- p = tmpbuf;
-
- /* Leave if sniff not open */
- if (!(ch->ch_sniff_flags & SNIFF_OPEN))
- goto exit;
-
- do_gettimeofday(&tv);
-
- /* Create our header for data dump */
- p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
- tmpbuflen = p - tmpbuf;
-
- do {
- too_much_data = 0;
-
- for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
- p += sprintf(p, "%02x ", *buf);
- buf++;
- tmpbuflen = p - tmpbuf;
- }
-
- if (tmpbuflen < (TMPBUFLEN - 4)) {
- if (i > 0)
- p += sprintf(p - 1, "%s\n", ">");
- else
- p += sprintf(p, "%s\n", ">");
- } else {
- too_much_data = 1;
- len -= i;
- }
-
- nbuf = strlen(tmpbuf);
- p = tmpbuf;
-
- /*
- * Loop while data remains.
- */
- while (nbuf > 0 && ch->ch_sniff_buf) {
- /*
- * Determine the amount of available space left in the
- * buffer. If there's none, wait until some appears.
- */
- n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
-
- /*
- * If there is no space left to write to in our sniff buffer,
- * we have no choice but to drop the data.
- * We *cannot* sleep here waiting for space, because this
- * function was probably called by the interrupt/timer routines!
- */
- if (n == 0)
- goto exit;
-
- /*
- * Copy as much data as will fit.
- */
-
- if (n > nbuf)
- n = nbuf;
-
- r = SNIFF_MAX - ch->ch_sniff_in;
-
- if (r <= n) {
- memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
-
- n -= r;
- ch->ch_sniff_in = 0;
- p += r;
- nbuf -= r;
- }
-
- memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
-
- ch->ch_sniff_in += n;
- p += n;
- nbuf -= n;
-
- /*
- * Wakeup any thread waiting for data
- */
- if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
- ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
- wake_up_interruptible(&ch->ch_sniff_wait);
- }
- }
-
- /*
- * If the user sent us too much data to push into our tmpbuf,
- * we need to keep looping around on all the data.
- */
- if (too_much_data) {
- p = tmpbuf;
- tmpbuflen = 0;
- }
-
- } while (too_much_data);
-
-exit:
- kfree(tmpbuf);
-}
-
-
/*=======================================================================
*
* dgnc_wmove - Write data to transmit queue.
@@ -781,8 +661,6 @@ void dgnc_input(struct channel_t *ch)
tty_insert_flip_string(tp->port, ch->ch_rqueue + tail, s);
}
- dgnc_sniff_nowait_nolock(ch, "USER READ", ch->ch_rqueue + tail, s);
-
tail += s;
n -= s;
/* Flip queue if needed */
@@ -1546,14 +1424,18 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
- APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
+ dev_dbg(tty->dev,
+ "tty->count is 1, un open count is %d\n",
+ un->un_open_count);
un->un_open_count = 1;
}
if (un->un_open_count)
un->un_open_count--;
else
- APR(("bad serial port open count of %d\n", un->un_open_count));
+ dev_dbg(tty->dev,
+ "bad serial port open count of %d\n",
+ un->un_open_count);
ch->ch_open_count--;
@@ -1974,7 +1856,6 @@ static int dgnc_tty_write(struct tty_struct *tty,
if (n >= remain) {
n -= remain;
memcpy(ch->ch_wqueue + head, buf, remain);
- dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
head = 0;
buf += remain;
}
@@ -1985,7 +1866,6 @@ static int dgnc_tty_write(struct tty_struct *tty,
*/
remain = n;
memcpy(ch->ch_wqueue + head, buf, remain);
- dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
head += remain;
}
@@ -2325,8 +2205,6 @@ static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, uns
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return ret;
- ret = 0;
-
ret = get_user(arg, value);
if (ret)
return ret;
@@ -3089,7 +2967,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
struct digi_getcounter buf;
buf.norun = ch->ch_err_overrun;
- buf.noflow = 0; /* The driver doesn't keep this stat */
+ buf.noflow = 0; /* The driver doesn't keep this stat */
buf.nframe = ch->ch_err_frame;
buf.nparity = ch->ch_err_parity;
buf.nbreak = ch->ch_err_break;
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 58eef257c2ec..3975f0407143 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -37,6 +37,4 @@ void dgnc_carrier(struct channel_t *ch);
void dgnc_wakeup_writes(struct channel_t *ch);
void dgnc_check_queue_flow_control(struct channel_t *ch);
-void dgnc_sniff_nowait_nolock(struct channel_t *ch, unsigned char *text, unsigned char *buf, int nbuf);
-
#endif
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 1ae0013fb118..eb178fcb7954 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -2622,7 +2622,7 @@ static int nbu2ss_ep_enable(
return -EINVAL;
}
- ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ ep_type = usb_endpoint_type(desc);
if ((ep_type == USB_ENDPOINT_XFER_CONTROL)
|| (ep_type == USB_ENDPOINT_XFER_ISOC)) {
@@ -2644,7 +2644,7 @@ static int nbu2ss_ep_enable(
spin_lock_irqsave(&udc->lock, flags);
ep->desc = desc;
- ep->epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ ep->epnum = usb_endpoint_num(desc);
ep->direct = desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
ep->ep_type = ep_type;
ep->wedged = 0;
@@ -2722,8 +2722,7 @@ static void nbu2ss_ep_free_request(
if (_req != NULL) {
req = container_of(_req, struct nbu2ss_req, req);
- if (req != NULL)
- kfree(req);
+ kfree(req);
}
}
@@ -3491,7 +3490,6 @@ static struct platform_driver udc_driver = {
.suspend = nbu2ss_drv_suspend,
.resume = nbu2ss_drv_resume,
.driver = {
- .owner = THIS_MODULE,
.name = driver_name,
},
};
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/boot.h b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
index 60c015c1c28a..e4a698528520 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/boot.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/boot.h
@@ -1,28 +1,28 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
+ FT1000 driver for Flarion Flash OFDM NIC Device
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
---------------------------------------------------------------------------
- File: boot.h
+ File: boot.h
- Description: boatloader
+ Description: boatloader
- History:
- 1/11/05 Whc Ported to Linux.
+ History:
+ 1/11/05 Whc Ported to Linux.
----------------------------------------------------------------------------*/
+ ---------------------------------------------------------------------------*/
#ifndef _BOOTH_
#define _BOOTH_
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
index 1d52738fff49..5992670f7747 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
@@ -1,21 +1,21 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
+ FT1000 driver for Flarion Flash OFDM NIC Device
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
----------------------------------------------------------------------------
- Description: Common structures and defines
----------------------------------------------------------------------------*/
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
+ ---------------------------------------------------------------------------
+ Description: Common structures and defines
+ ---------------------------------------------------------------------------*/
#ifndef _FT1000H_
#define _FT1000H_
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
index 1f8b3ca35c69..922478e1cb57 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
@@ -1,30 +1,30 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
+ FT1000 driver for Flarion Flash OFDM NIC Device
- Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
- Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
- Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
+ Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
+ Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds.
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds.
- This file was modified to support the Flarion Flash OFDM NIC Device
- by Wai Chan (w.chan@flarion.com).
+ This file was modified to support the Flarion Flash OFDM NIC Device
+ by Wai Chan (w.chan@flarion.com).
- Port for kernel 2.6 created by Patrik Ostrihon (patrik.ostrihon@pwc.sk)
+ Port for kernel 2.6 created by Patrik Ostrihon (patrik.ostrihon@pwc.sk)
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
------------------------------------------------------------------------------*/
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
+ -----------------------------------------------------------------------------*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -80,11 +80,11 @@ static int ft1000_confcheck(struct pcmcia_device *link, void *priv_data)
/*======================================================================
- ft1000_config() is scheduled to run after a CARD_INSERTION event
- is received, to configure the PCMCIA socket, and to make the
- device available to the system.
+ ft1000_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ device available to the system.
-======================================================================*/
+ ======================================================================*/
static int ft1000_config(struct pcmcia_device *link)
{
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
index c1856f7d1e26..06b0e9cfb9b1 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
@@ -1,24 +1,26 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
+ FT1000 driver for Flarion Flash OFDM NIC Device
+
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
--------------------------------------------------------------------------
- Description: This module will handshake with the DSP bootloader to
- download the DSP runtime image.
+ Description: This module will handshake with the DSP bootloader to
+ download the DSP runtime image.
----------------------------------------------------------------------------*/
+ ---------------------------------------------------------------------------*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define __KERNEL_SYSCALLS__
@@ -99,7 +101,7 @@ struct dsp_file_hdr {
u32 version_data_offset; /* Offset were scrambled version data begins. */
u32 version_data_size; /* Size, in words, of scrambled version data. */
u32 nDspImages; /* Number of DSP images in file. */
-} __attribute__ ((packed));
+} __packed;
struct dsp_image_info {
u32 coff_date; /* Date/time when DSP Coff image was built. */
@@ -110,11 +112,11 @@ struct dsp_image_info {
u32 version; /* Embedded version # of DSP code. */
unsigned short checksum; /* Dsp File checksum */
unsigned short pad1;
-} __attribute__ ((packed));
+} __packed;
void card_bootload(struct net_device *dev)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
unsigned long flags;
u32 *pdata;
u32 size;
@@ -123,7 +125,7 @@ void card_bootload(struct net_device *dev)
netdev_dbg(dev, "card_bootload is called\n");
- pdata = (u32 *) bootimage;
+ pdata = (u32 *)bootimage;
size = sizeof(bootimage);
/* check for odd word */
@@ -146,7 +148,7 @@ void card_bootload(struct net_device *dev)
u16 get_handshake(struct net_device *dev, u16 expected_value)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
u16 handshake;
u32 tempx;
int loopcnt;
@@ -161,12 +163,12 @@ u16 get_handshake(struct net_device *dev, u16 expected_value)
} else {
tempx =
ntohl(ft1000_read_dpram_mag_32
- (dev, DWNLD_MAG_HANDSHAKE_LOC));
- handshake = (u16) tempx;
+ (dev, DWNLD_MAG_HANDSHAKE_LOC));
+ handshake = (u16)tempx;
}
if ((handshake == expected_value)
- || (handshake == HANDSHAKE_RESET_VALUE)) {
+ || (handshake == HANDSHAKE_RESET_VALUE)) {
return handshake;
}
loopcnt++;
@@ -180,7 +182,7 @@ u16 get_handshake(struct net_device *dev, u16 expected_value)
void put_handshake(struct net_device *dev, u16 handshake_value)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
u32 tempx;
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -188,7 +190,7 @@ void put_handshake(struct net_device *dev, u16 handshake_value)
DWNLD_HANDSHAKE_LOC);
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, handshake_value); /* Handshake */
} else {
- tempx = (u32) handshake_value;
+ tempx = (u32)handshake_value;
tempx = ntohl(tempx);
ft1000_write_dpram_mag_32(dev, DWNLD_MAG_HANDSHAKE_LOC, tempx); /* Handshake */
}
@@ -196,7 +198,7 @@ void put_handshake(struct net_device *dev, u16 handshake_value)
u16 get_request_type(struct net_device *dev)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
u16 request_type;
u32 tempx;
@@ -206,7 +208,7 @@ u16 get_request_type(struct net_device *dev)
} else {
tempx = ft1000_read_dpram_mag_32(dev, DWNLD_MAG_TYPE_LOC);
tempx = ntohl(tempx);
- request_type = (u16) tempx;
+ request_type = (u16)tempx;
}
return request_type;
@@ -215,7 +217,7 @@ u16 get_request_type(struct net_device *dev)
long get_request_value(struct net_device *dev)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
long value;
u16 w_val;
@@ -244,7 +246,7 @@ long get_request_value(struct net_device *dev)
void put_request_value(struct net_device *dev, long lvalue)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
u16 size;
u32 tempx;
@@ -271,11 +273,11 @@ void put_request_value(struct net_device *dev, long lvalue)
u16 hdr_checksum(struct pseudo_hdr *pHdr)
{
- u16 *usPtr = (u16 *) pHdr;
+ u16 *usPtr = (u16 *)pHdr;
u16 chksum;
chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
- usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
+ usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
return chksum;
}
@@ -283,7 +285,7 @@ u16 hdr_checksum(struct pseudo_hdr *pHdr)
int card_download(struct net_device *dev, const u8 *pFileStart,
size_t FileLength)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *)netdev_priv(dev);
int Status = SUCCESS;
u32 uiState;
u16 handshake;
@@ -316,13 +318,13 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
file_version = *(long *)pFileStart;
if (file_version != 6) {
- printk(KERN_ERR "ft1000: unsupported firmware version %ld\n", file_version);
+ pr_err("unsupported firmware version %ld\n", file_version);
Status = FAILURE;
}
uiState = STATE_START_DWNLD;
- pFileHdr5 = (struct dsp_file_hdr *) pFileStart;
+ pFileHdr5 = (struct dsp_file_hdr *)pFileStart;
pUsFile = (u16 *) ((long)pFileStart + pFileHdr5->loader_offset);
pUcFile = (u8 *) ((long)pFileStart + pFileHdr5->loader_offset);
@@ -376,7 +378,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
break;
}
if ((word_length * 2 + (long)pUcFile) >
- (long)pBootEnd) {
+ (long)pBootEnd) {
/*
* Error, beyond boot code range.
*/
@@ -390,8 +392,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
if (word_length & 0x01)
word_length++;
word_length = word_length / 2;
@@ -402,12 +404,12 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
(*pUsFile++ << 16);
pUcFile += 4;
outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ dev->base_addr +
+ FT1000_REG_MAG_DPDATAL);
}
spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
+ dpram_lock,
+ flags);
break;
default:
Status = FAILURE;
@@ -430,7 +432,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
switch (request) {
case REQUEST_FILE_CHECKSUM:
netdev_dbg(dev,
- "ft1000_dnld: REQUEST_FOR_CHECKSUM\n");
+ "ft1000_dnld: REQUEST_FOR_CHECKSUM\n");
put_request_value(dev, image_chksum);
break;
case REQUEST_RUN_ADDRESS:
@@ -468,7 +470,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
break;
}
if ((word_length * 2 + (long)pUcFile) >
- (long)pCodeEnd) {
+ (long)pCodeEnd) {
/*
* Error, beyond boot code range.
*/
@@ -479,8 +481,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
if (word_length & 0x01)
word_length++;
word_length = word_length / 2;
@@ -491,8 +493,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
(*pUsFile++ << 16);
pUcFile += 4;
outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ dev->base_addr +
+ FT1000_REG_MAG_DPDATAL);
}
break;
@@ -502,9 +504,9 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
(long)(info->DSPInfoBlklen + 1) / 2;
put_request_value(dev, word_length);
pMailBoxData =
- (struct drv_msg *) &info->DSPInfoBlk[0];
+ (struct drv_msg *)&info->DSPInfoBlk[0];
pUsData =
- (u16 *) &pMailBoxData->data[0];
+ (u16 *)&pMailBoxData->data[0];
/* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock,
flags);
@@ -528,8 +530,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
if (word_length & 0x01)
word_length++;
@@ -540,13 +542,13 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
templong |=
(*pUsData++ << 16);
outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ dev->base_addr +
+ FT1000_REG_MAG_DPDATAL);
}
}
spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
+ dpram_lock,
+ flags);
break;
case REQUEST_VERSION_INFO:
@@ -555,8 +557,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
put_request_value(dev, word_length);
pUsFile =
(u16 *) ((long)pFileStart +
- pFileHdr5->
- version_data_offset);
+ pFileHdr5->
+ version_data_offset);
/* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock,
flags);
@@ -564,8 +566,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
* Position ASIC DPRAM auto-increment pointer.
*/
outw(DWNLD_MAG_PS_HDR_LOC,
- dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
if (word_length & 0x01)
word_length++;
word_length = word_length / 2;
@@ -578,12 +580,12 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
templong |=
(temp << 16);
outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ dev->base_addr +
+ FT1000_REG_MAG_DPDATAL);
}
spin_unlock_irqrestore(&info->
- dpram_lock,
- flags);
+ dpram_lock,
+ flags);
break;
case REQUEST_CODE_BY_VERSION:
@@ -592,14 +594,14 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
get_request_value(dev);
pDspImageInfoV6 =
(struct dsp_image_info *) ((long)
- pFileStart
- +
- sizeof
- (struct dsp_file_hdr));
+ pFileStart
+ +
+ sizeof
+ (struct dsp_file_hdr));
for (imageN = 0;
- imageN <
- pFileHdr5->nDspImages;
- imageN++) {
+ imageN <
+ pFileHdr5->nDspImages;
+ imageN++) {
temp = (u16)
(pDspImageInfoV6->
version);
@@ -610,30 +612,30 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
templong |=
(temp << 16);
if (templong ==
- requested_version) {
+ requested_version) {
bGoodVersion =
true;
pUsFile =
(u16
*) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- begin_offset);
+ pFileStart
+ +
+ pDspImageInfoV6->
+ begin_offset);
pUcFile =
(u8
*) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- begin_offset);
+ pFileStart
+ +
+ pDspImageInfoV6->
+ begin_offset);
pCodeEnd =
(u8
*) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- end_offset);
+ pFileStart
+ +
+ pDspImageInfoV6->
+ end_offset);
run_address =
pDspImageInfoV6->
run_address;
@@ -645,10 +647,10 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
pDspImageInfoV6->
checksum;
netdev_dbg(dev,
- "ft1000_dnld: image_chksum = 0x%8x\n",
- (unsigned
- int)
- image_chksum);
+ "ft1000_dnld: image_chksum = 0x%8x\n",
+ (unsigned
+ int)
+ image_chksum);
break;
}
pDspImageInfoV6++;
@@ -674,25 +676,25 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
break;
case STATE_DONE_DWNLD:
- if (((unsigned long) (pUcFile) - (unsigned long) pFileStart) >=
- (unsigned long) FileLength) {
+ if (((unsigned long)(pUcFile) - (unsigned long) pFileStart) >=
+ (unsigned long)FileLength) {
uiState = STATE_DONE_FILE;
break;
}
- pHdr = (struct pseudo_hdr *) pUsFile;
+ pHdr = (struct pseudo_hdr *)pUsFile;
if (pHdr->portdest == 0x80 /* DspOAM */
- && (pHdr->portsrc == 0x00 /* Driver */
+ && (pHdr->portsrc == 0x00 /* Driver */
|| pHdr->portsrc == 0x10 /* FMM */)) {
uiState = STATE_SECTION_PROV;
} else {
netdev_dbg(dev,
- "FT1000:download:Download error: Bad Port IDs in Pseudo Record\n");
+ "Download error: Bad Port IDs in Pseudo Record\n");
netdev_dbg(dev, "\t Port Source = 0x%2.2x\n",
- pHdr->portsrc);
+ pHdr->portsrc);
netdev_dbg(dev, "\t Port Destination = 0x%2.2x\n",
- pHdr->portdest);
+ pHdr->portdest);
Status = FAILURE;
}
@@ -700,7 +702,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
case STATE_SECTION_PROV:
- pHdr = (struct pseudo_hdr *) pUcFile;
+ pHdr = (struct pseudo_hdr *)pUcFile;
if (pHdr->checksum == hdr_checksum(pHdr)) {
if (pHdr->portdest != 0x80 /* Dsp OAM */) {
@@ -715,8 +717,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
GFP_ATOMIC);
if (pbuffer) {
memcpy(pbuffer, (void *)pUcFile,
- (u32) (usHdrLength +
- sizeof(struct pseudo_hdr)));
+ (u32) (usHdrLength +
+ sizeof(struct pseudo_hdr)));
/* link provisioning data */
pprov_record =
kmalloc(sizeof(struct prov_record),
@@ -725,15 +727,15 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
pprov_record->pprov_data =
pbuffer;
list_add_tail(&pprov_record->
- list,
- &info->prov_list);
+ list,
+ &info->prov_list);
/* Move to next entry if available */
pUcFile =
- (u8 *) ((unsigned long) pUcFile +
- (unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
+ (u8 *)((unsigned long) pUcFile +
+ (unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
if ((unsigned long) (pUcFile) -
- (unsigned long) (pFileStart) >=
- (unsigned long) FileLength) {
+ (unsigned long) (pFileStart) >=
+ (unsigned long)FileLength) {
uiState =
STATE_DONE_FILE;
}
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index 44575c78cf0c..d5475b7270a8 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -1,22 +1,24 @@
/*---------------------------------------------------------------------------
- FT1000 driver for Flarion Flash OFDM NIC Device
-
- Copyright (C) 2002 Flarion Technologies, All rights reserved.
- Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
- Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option) any
- later version. This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details. You should have received a copy of the GNU General Public
- License along with this program; if not, write to the
- Free Software Foundation, Inc., 59 Temple Place -
- Suite 330, Boston, MA 02111-1307, USA.
+ FT1000 driver for Flarion Flash OFDM NIC Device
+
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
+ Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
-------------------------------------------------------------------------*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -44,12 +46,6 @@
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#ifdef FT_DEBUG
-#define DEBUG(n, args...) printk(KERN_DEBUG args);
-#else
-#define DEBUG(n, args...)
-#endif
-
#include <linux/delay.h>
#include "ft1000.h"
@@ -57,7 +53,7 @@ static const struct firmware *fw_entry;
static void ft1000_hbchk(u_long data);
static struct timer_list poll_timer = {
- .function = ft1000_hbchk
+ .function = ft1000_hbchk
};
static u16 cmdbuffer[1024];
@@ -72,7 +68,7 @@ static void ft1000_disable_interrupts(struct net_device *dev);
/* new kernel */
MODULE_AUTHOR("");
MODULE_DESCRIPTION
- ("Support for Flarion Flash OFDM NIC Device. Support for PCMCIA when used with ft1000_cs.");
+("Support for Flarion Flash OFDM NIC Device. Support for PCMCIA when used with ft1000_cs.");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("FT1000");
@@ -80,15 +76,15 @@ MODULE_SUPPORTED_DEVICE("FT1000");
/*---------------------------------------------------------------------------
- Function: ft1000_read_fifo_len
- Description: This function will read the ASIC Uplink FIFO status register
- which will return the number of bytes remaining in the Uplink FIFO.
- Sixteen bytes are subtracted to make sure that the ASIC does not
- reach its threshold.
- Input:
- dev - network device structure
- Output:
- value - number of bytes available in the ASIC Uplink FIFO.
+ Function: ft1000_read_fifo_len
+ Description: This function will read the ASIC Uplink FIFO status register
+ which will return the number of bytes remaining in the Uplink FIFO.
+ Sixteen bytes are subtracted to make sure that the ASIC does not
+ reach its threshold.
+ Input:
+ dev - network device structure
+ Output:
+ value - number of bytes available in the ASIC Uplink FIFO.
-------------------------------------------------------------------------*/
static inline u16 ft1000_read_fifo_len(struct net_device *dev)
@@ -103,14 +99,14 @@ static inline u16 ft1000_read_fifo_len(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_read_dpram
- Description: This function will read the specific area of dpram
- (Electrabuzz ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
+ Function: ft1000_read_dpram
+ Description: This function will read the specific area of dpram
+ (Electrabuzz ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
-------------------------------------------------------------------------*/
u16 ft1000_read_dpram(struct net_device *dev, int offset)
@@ -130,19 +126,19 @@ u16 ft1000_read_dpram(struct net_device *dev, int offset)
/*---------------------------------------------------------------------------
- Function: ft1000_write_dpram
- Description: This function will write to a specific area of dpram
- (Electrabuzz ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
+ Function: ft1000_write_dpram
+ Description: This function will write to a specific area of dpram
+ (Electrabuzz ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
-------------------------------------------------------------------------*/
static inline void ft1000_write_dpram(struct net_device *dev,
- int offset, u16 value)
+ int offset, u16 value)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
@@ -156,14 +152,14 @@ static inline void ft1000_write_dpram(struct net_device *dev,
/*---------------------------------------------------------------------------
- Function: ft1000_read_dpram_mag_16
- Description: This function will read the specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
+ Function: ft1000_read_dpram_mag_16
+ Description: This function will read the specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
-------------------------------------------------------------------------*/
u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
@@ -188,19 +184,19 @@ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
/*---------------------------------------------------------------------------
- Function: ft1000_write_dpram_mag_16
- Description: This function will write to a specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
+ Function: ft1000_write_dpram_mag_16
+ Description: This function will write to a specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
-------------------------------------------------------------------------*/
static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
- int offset, u16 value, int Index)
+ int offset, u16 value, int Index)
{
struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
@@ -218,14 +214,14 @@ static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
/*---------------------------------------------------------------------------
- Function: ft1000_read_dpram_mag_32
- Description: This function will read the specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- Output:
- value - value of dpram
+ Function: ft1000_read_dpram_mag_32
+ Description: This function will read the specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ Output:
+ value - value of dpram
-------------------------------------------------------------------------*/
u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
@@ -245,15 +241,15 @@ u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
/*---------------------------------------------------------------------------
- Function: ft1000_write_dpram_mag_32
- Description: This function will write to a specific area of dpram
- (Magnemite ASIC only)
- Input:
- dev - device structure
- offset - index of dpram
- value - value to write
- Output:
- none.
+ Function: ft1000_write_dpram_mag_32
+ Description: This function will write to a specific area of dpram
+ (Magnemite ASIC only)
+ Input:
+ dev - device structure
+ offset - index of dpram
+ value - value to write
+ Output:
+ none.
-------------------------------------------------------------------------*/
void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
@@ -270,57 +266,51 @@ void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
/*---------------------------------------------------------------------------
- Function: ft1000_enable_interrupts
- Description: This function will enable interrupts base on the current interrupt mask.
- Input:
- dev - device structure
- Output:
- None.
+ Function: ft1000_enable_interrupts
+ Description: This function will enable interrupts base on the current interrupt mask.
+ Input:
+ dev - device structure
+ Output:
+ None.
-------------------------------------------------------------------------*/
static void ft1000_enable_interrupts(struct net_device *dev)
{
u16 tempword;
- DEBUG(1, "ft1000_hw:ft1000_enable_interrupts()\n");
ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_DEFAULT_MASK);
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
- DEBUG(1,
- "ft1000_hw:ft1000_enable_interrupts:current interrupt enable mask = 0x%x\n",
- tempword);
+ pr_debug("current interrupt enable mask = 0x%x\n", tempword);
}
/*---------------------------------------------------------------------------
- Function: ft1000_disable_interrupts
- Description: This function will disable all interrupts.
- Input:
- dev - device structure
- Output:
- None.
+ Function: ft1000_disable_interrupts
+ Description: This function will disable all interrupts.
+ Input:
+ dev - device structure
+ Output:
+ None.
-------------------------------------------------------------------------*/
static void ft1000_disable_interrupts(struct net_device *dev)
{
u16 tempword;
- DEBUG(1, "ft1000_hw: ft1000_disable_interrupts()\n");
ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_MASK_ALL);
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
- DEBUG(1,
- "ft1000_hw:ft1000_disable_interrupts:current interrupt enable mask = 0x%x\n",
- tempword);
+ pr_debug("current interrupt enable mask = 0x%x\n", tempword);
}
/*---------------------------------------------------------------------------
- Function: ft1000_reset_asic
- Description: This function will call the Card Service function to reset the
- ASIC.
- Input:
- dev - device structure
- Output:
- none
+ Function: ft1000_reset_asic
+ Description: This function will call the Card Service function to reset the
+ ASIC.
+ Input:
+ dev - device structure
+ Output:
+ none
-------------------------------------------------------------------------*/
static void ft1000_reset_asic(struct net_device *dev)
@@ -329,8 +319,6 @@ static void ft1000_reset_asic(struct net_device *dev)
struct ft1000_pcmcia *pcmcia = info->priv;
u16 tempword;
- DEBUG(1, "ft1000_hw:ft1000_reset_asic called\n");
-
(*info->ft1000_reset) (pcmcia->link);
/*
@@ -351,22 +339,22 @@ static void ft1000_reset_asic(struct net_device *dev)
}
/* clear interrupts */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
}
/*---------------------------------------------------------------------------
- Function: ft1000_reset_card
- Description: This function will reset the card
- Input:
- dev - device structure
- Output:
- status - false (card reset fail)
- true (card reset successful)
+ Function: ft1000_reset_card
+ Description: This function will reset the card
+ Input:
+ dev - device structure
+ Output:
+ status - false (card reset fail)
+ true (card reset successful)
-------------------------------------------------------------------------*/
static int ft1000_reset_card(struct net_device *dev)
@@ -377,8 +365,6 @@ static int ft1000_reset_card(struct net_device *dev)
unsigned long flags;
struct prov_record *ptr;
- DEBUG(1, "ft1000_hw:ft1000_reset_card called.....\n");
-
info->CardReady = 0;
info->ProgConStat = 0;
info->squeseqnum = 0;
@@ -388,8 +374,7 @@ static int ft1000_reset_card(struct net_device *dev)
/* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
- DEBUG(0,
- "ft1000_hw:ft1000_reset_card:deleting provisioning record\n");
+ pr_debug("deleting provisioning record\n");
ptr = list_entry(info->prov_list.next, struct prov_record, list);
list_del(&ptr->list);
kfree(ptr->pprov_data);
@@ -397,11 +382,10 @@ static int ft1000_reset_card(struct net_device *dev)
}
if (info->AsicID == ELECTRABUZZ_ID) {
- DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting DSP\n");
+ pr_debug("resetting DSP\n");
ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
} else {
- DEBUG(1,
- "ft1000_hw:ft1000_reset_card:resetting ASIC and DSP\n");
+ pr_debug("resetting ASIC and DSP\n");
ft1000_write_reg(dev, FT1000_REG_RESET,
(DSP_RESET_BIT | ASIC_RESET_BIT));
}
@@ -428,17 +412,16 @@ static int ft1000_reset_card(struct net_device *dev)
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
- DEBUG(1, "ft1000_hw:ft1000_reset_card:resetting ASIC\n");
+ pr_debug("resetting ASIC\n");
mdelay(10);
/* reset ASIC */
ft1000_reset_asic(dev);
- DEBUG(1, "ft1000_hw:ft1000_reset_card:downloading dsp image\n");
+ pr_debug("downloading dsp image\n");
if (info->AsicID == MAGNEMITE_ID) {
/* Put dsp in reset and take ASIC out of reset */
- DEBUG(0,
- "ft1000_hw:ft1000_reset_card:Put DSP in reset and take ASIC out of reset\n");
+ pr_debug("Put DSP in reset and take ASIC out of reset\n");
ft1000_write_reg(dev, FT1000_REG_RESET, DSP_RESET_BIT);
/* Setting MAGNEMITE ASIC to big endian mode */
@@ -450,7 +433,7 @@ static int ft1000_reset_card(struct net_device *dev)
ft1000_write_reg(dev, FT1000_REG_RESET, 0);
/* FLARION_DSP_ACTIVE; */
mdelay(10);
- DEBUG(0, "ft1000_hw:ft1000_reset_card:Take DSP out of reset\n");
+ pr_debug("Take DSP out of reset\n");
/* Wait for 0xfefe indicating dsp ready before starting download */
for (i = 0; i < 50; i++) {
@@ -464,8 +447,7 @@ static int ft1000_reset_card(struct net_device *dev)
}
if (i == 50) {
- DEBUG(0,
- "ft1000_hw:ft1000_reset_card:No FEFE detected from DSP\n");
+ pr_debug("No FEFE detected from DSP\n");
return false;
}
@@ -476,10 +458,10 @@ static int ft1000_reset_card(struct net_device *dev)
}
if (card_download(dev, fw_entry->data, fw_entry->size)) {
- DEBUG(1, "card download unsuccessful\n");
+ pr_debug("card download unsuccessful\n");
return false;
} else {
- DEBUG(1, "card download successful\n");
+ pr_debug("card download successful\n");
}
mdelay(10);
@@ -494,8 +476,7 @@ static int ft1000_reset_card(struct net_device *dev)
/* Initialize DSP heartbeat area to ho */
ft1000_write_dpram(dev, FT1000_HI_HO, ho);
tempword = ft1000_read_dpram(dev, FT1000_HI_HO);
- DEBUG(1, "ft1000_hw:ft1000_reset_asic:hi_ho value = 0x%x\n",
- tempword);
+ pr_debug("hi_ho value = 0x%x\n", tempword);
} else {
/* Initialize DSP heartbeat area to ho */
ft1000_write_dpram_mag_16(dev, FT1000_MAG_HI_HO, ho_mag,
@@ -503,8 +484,7 @@ static int ft1000_reset_card(struct net_device *dev)
tempword =
ft1000_read_dpram_mag_16(dev, FT1000_MAG_HI_HO,
FT1000_MAG_HI_HO_INDX);
- DEBUG(1, "ft1000_hw:ft1000_reset_card:hi_ho value = 0x%x\n",
- tempword);
+ pr_debug("hi_ho value = 0x%x\n", tempword);
}
info->CardReady = 1;
@@ -521,14 +501,14 @@ static int ft1000_reset_card(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_chkcard
- Description: This function will check if the device is presently available on
- the system.
- Input:
- dev - device structure
- Output:
- status - false (device is not present)
- true (device is present)
+ Function: ft1000_chkcard
+ Description: This function will check if the device is presently available on
+ the system.
+ Input:
+ dev - device structure
+ Output:
+ status - false (device is not present)
+ true (device is present)
-------------------------------------------------------------------------*/
static int ft1000_chkcard(struct net_device *dev)
@@ -541,8 +521,7 @@ static int ft1000_chkcard(struct net_device *dev)
*/
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
if (tempword == 0) {
- DEBUG(1,
- "ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
+ pr_debug("IMASK = 0 Card not detected\n");
return false;
}
/*
@@ -551,8 +530,7 @@ static int ft1000_chkcard(struct net_device *dev)
*/
tempword = ft1000_read_reg(dev, FT1000_REG_ASIC_ID);
if (tempword == 0xffff) {
- DEBUG(1,
- "ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
+ pr_debug("Version = 0xffff Card not detected\n");
return false;
}
return true;
@@ -561,13 +539,13 @@ static int ft1000_chkcard(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_hbchk
- Description: This function will perform the heart beat check of the DSP as
- well as the ASIC.
- Input:
- dev - device structure
- Output:
- none
+ Function: ft1000_hbchk
+ Description: This function will perform the heart beat check of the DSP as
+ well as the ASIC.
+ Input:
+ dev - device structure
+ Output:
+ none
-------------------------------------------------------------------------*/
static void ft1000_hbchk(u_long data)
@@ -586,11 +564,10 @@ static void ft1000_hbchk(u_long data)
} else {
tempword =
ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX));
+ (dev, FT1000_MAG_HI_HO,
+ FT1000_MAG_HI_HO_INDX));
}
- DEBUG(1, "ft1000_hw:ft1000_hbchk:hi_ho value = 0x%x\n",
- tempword);
+ pr_debug("hi_ho value = 0x%x\n", tempword);
/* Let's perform another check if ho is not detected */
if (tempword != ho) {
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -601,8 +578,7 @@ static void ft1000_hbchk(u_long data)
}
}
if (tempword != ho) {
- printk(KERN_INFO
- "ft1000: heartbeat failed - no ho detected\n");
+ pr_info("heartbeat failed - no ho detected\n");
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -632,8 +608,7 @@ static void ft1000_hbchk(u_long data)
}
info->DrvErrNum = DSP_HB_INFO;
if (ft1000_reset_card(dev) == 0) {
- printk(KERN_INFO
- "ft1000: Hardware Failure Detected - PC Card disabled\n");
+ pr_info("Hardware Failure Detected - PC Card disabled\n");
info->ProgConStat = 0xff;
return;
}
@@ -650,8 +625,7 @@ static void ft1000_hbchk(u_long data)
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
}
if (tempword & FT1000_DB_HB) {
- printk(KERN_INFO
- "ft1000: heartbeat doorbell not clear by firmware\n");
+ pr_info("heartbeat doorbell not clear by firmware\n");
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -681,8 +655,7 @@ static void ft1000_hbchk(u_long data)
}
info->DrvErrNum = DSP_HB_INFO;
if (ft1000_reset_card(dev) == 0) {
- printk(KERN_INFO
- "ft1000: Hardware Failure Detected - PC Card disabled\n");
+ pr_info("Hardware Failure Detected - PC Card disabled\n");
info->ProgConStat = 0xff;
return;
}
@@ -708,8 +681,8 @@ static void ft1000_hbchk(u_long data)
} else {
tempword =
ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_HI_HO,
- FT1000_MAG_HI_HO_INDX));
+ (dev, FT1000_MAG_HI_HO,
+ FT1000_MAG_HI_HO_INDX));
}
/* Let's write hi again if fail */
if (tempword != hi) {
@@ -730,8 +703,7 @@ static void ft1000_hbchk(u_long data)
}
if (tempword != hi) {
- printk(KERN_INFO
- "ft1000: heartbeat failed - cannot write hi into DPRAM\n");
+ pr_info("heartbeat failed - cannot write hi into DPRAM\n");
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
ft1000_read_dpram(dev, FT1000_DSP_TIMER0);
@@ -761,8 +733,7 @@ static void ft1000_hbchk(u_long data)
}
info->DrvErrNum = DSP_HB_INFO;
if (ft1000_reset_card(dev) == 0) {
- printk(KERN_INFO
- "ft1000: Hardware Failure Detected - PC Card disabled\n");
+ pr_info("Hardware Failure Detected - PC Card disabled\n");
info->ProgConStat = 0xff;
return;
}
@@ -778,19 +749,19 @@ static void ft1000_hbchk(u_long data)
/* Schedule this module to run every 2 seconds */
poll_timer.expires = jiffies + (2 * HZ);
- poll_timer.data = (u_long) dev;
+ poll_timer.data = (u_long)dev;
add_timer(&poll_timer);
}
/*---------------------------------------------------------------------------
- Function: ft1000_send_cmd
- Description:
- Input:
- Output:
+ Function: ft1000_send_cmd
+ Description:
+ Input:
+ Output:
-------------------------------------------------------------------------*/
-static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
+static void ft1000_send_cmd(struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
{
struct ft1000_info *info = netdev_priv(dev);
int i;
@@ -802,26 +773,26 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
if ((size & 0x0001)) {
size++;
}
- DEBUG(1, "FT1000:ft1000_send_cmd:total length = %d\n", size);
- DEBUG(1, "FT1000:ft1000_send_cmd:length = %d\n", ntohs(*ptempbuffer));
+ pr_debug("total length = %d\n", size);
+ pr_debug("length = %d\n", ntohs(*ptempbuffer));
/*
* put message into slow queue area
* All messages are in the form total_len + pseudo header + message body
*/
spin_lock_irqsave(&info->dpram_lock, flags);
- /* Make sure SLOWQ doorbell is clear */
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- i=0;
- while (tempword & FT1000_DB_DPRAM_TX) {
- mdelay(10);
- i++;
- if (i==10) {
- spin_unlock_irqrestore(&info->dpram_lock, flags);
- return;
- }
- tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- }
+ /* Make sure SLOWQ doorbell is clear */
+ tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
+ i = 0;
+ while (tempword & FT1000_DB_DPRAM_TX) {
+ mdelay(10);
+ i++;
+ if (i == 10) {
+ spin_unlock_irqrestore(&info->dpram_lock, flags);
+ return;
+ }
+ tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
+ }
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
@@ -830,8 +801,7 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size);
/* Write pseudo header and messgae body */
for (i = 0; i < (size >> 1); i++) {
- DEBUG(1, "FT1000:ft1000_send_cmd:data %d = 0x%x\n", i,
- *ptempbuffer);
+ pr_debug("data %d = 0x%x\n", i, *ptempbuffer);
tempword = htons(*ptempbuffer++);
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, tempword);
}
@@ -844,18 +814,16 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_TX_BASE + 1);
for (i = 0; i < (size >> 2); i++) {
- DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n",
- *ptempbuffer);
+ pr_debug("data = 0x%x\n", *ptempbuffer);
outw(*ptempbuffer++,
- dev->base_addr + FT1000_REG_MAG_DPDATAL);
- DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n",
- *ptempbuffer);
+ dev->base_addr + FT1000_REG_MAG_DPDATAL);
+ pr_debug("data = 0x%x\n", *ptempbuffer);
outw(*ptempbuffer++,
- dev->base_addr + FT1000_REG_MAG_DPDATAH);
+ dev->base_addr + FT1000_REG_MAG_DPDATAH);
}
- DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n", *ptempbuffer);
+ pr_debug("data = 0x%x\n", *ptempbuffer);
outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAL);
- DEBUG(1, "FT1000:ft1000_send_cmd:data = 0x%x\n", *ptempbuffer);
+ pr_debug("data = 0x%x\n", *ptempbuffer);
outw(*ptempbuffer++, dev->base_addr + FT1000_REG_MAG_DPDATAH);
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
@@ -866,19 +834,19 @@ static void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size,
/*---------------------------------------------------------------------------
- Function: ft1000_receive_cmd
- Description: This function will read a message from the dpram area.
- Input:
- dev - network device structure
- pbuffer - caller supply address to buffer
- pnxtph - pointer to next pseudo header
- Output:
- Status = 0 (unsuccessful)
- = 1 (successful)
+ Function: ft1000_receive_cmd
+ Description: This function will read a message from the dpram area.
+ Input:
+ dev - network device structure
+ pbuffer - caller supply address to buffer
+ pnxtph - pointer to next pseudo header
+ Output:
+ Status = 0 (unsuccessful)
+ = 1 (successful)
-------------------------------------------------------------------------*/
static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
- int maxsz, u16 *pnxtph)
+ int maxsz, u16 *pnxtph)
{
struct ft1000_info *info = netdev_priv(dev);
u16 size;
@@ -888,20 +856,18 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
unsigned long flags;
if (info->AsicID == ELECTRABUZZ_ID) {
- size = ( ft1000_read_dpram(dev, *pnxtph) ) + sizeof(struct pseudo_hdr);
+ size = (ft1000_read_dpram(dev, *pnxtph)) + sizeof(struct pseudo_hdr);
} else {
size =
ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_PH_LEN,
- FT1000_MAG_PH_LEN_INDX)) + sizeof(struct pseudo_hdr);
+ (dev, FT1000_MAG_PH_LEN,
+ FT1000_MAG_PH_LEN_INDX)) + sizeof(struct pseudo_hdr);
}
if (size > maxsz) {
- DEBUG(1,
- "FT1000:ft1000_receive_cmd:Invalid command length = %d\n",
- size);
+ pr_debug("Invalid command length = %d\n", size);
return false;
} else {
- ppseudohdr = (u16 *) pbuffer;
+ ppseudohdr = (u16 *)pbuffer;
spin_lock_irqsave(&info->dpram_lock, flags);
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
@@ -915,26 +881,26 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_RX_BASE);
*pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH);
- DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer);
+ pr_debug("received data = 0x%x\n", *pbuffer);
pbuffer++;
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
FT1000_DPRAM_MAG_RX_BASE + 1);
for (i = 0; i <= (size >> 2); i++) {
*pbuffer =
inw(dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
+ FT1000_REG_MAG_DPDATAL);
pbuffer++;
*pbuffer =
inw(dev->base_addr +
- FT1000_REG_MAG_DPDATAH);
+ FT1000_REG_MAG_DPDATAH);
pbuffer++;
}
/* copy odd aligned word */
*pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAL);
- DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer);
+ pr_debug("received data = 0x%x\n", *pbuffer);
pbuffer++;
*pbuffer = inw(dev->base_addr + FT1000_REG_MAG_DPDATAH);
- DEBUG(1, "ft1000_hw:received data = 0x%x\n", *pbuffer);
+ pr_debug("received data = 0x%x\n", *pbuffer);
pbuffer++;
}
if (size & 0x0001) {
@@ -953,8 +919,7 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
tempword ^= *ppseudohdr++;
}
if ((tempword != *ppseudohdr)) {
- DEBUG(1,
- "FT1000:ft1000_receive_cmd:Pseudo header checksum mismatch\n");
+ pr_debug("Pseudo header checksum mismatch\n");
/* Drop this message */
return false;
}
@@ -964,13 +929,13 @@ static bool ft1000_receive_cmd(struct net_device *dev, u16 *pbuffer,
/*---------------------------------------------------------------------------
- Function: ft1000_proc_drvmsg
- Description: This function will process the various driver messages.
- Input:
- dev - device structure
- pnxtph - pointer to next pseudo header
- Output:
- none
+ Function: ft1000_proc_drvmsg
+ Description: This function will process the various driver messages.
+ Input:
+ dev - device structure
+ pnxtph - pointer to next pseudo header
+ Output:
+ none
-------------------------------------------------------------------------*/
static void ft1000_proc_drvmsg(struct net_device *dev)
@@ -992,25 +957,24 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
u16 wrd;
} convert;
- if (info->AsicID == ELECTRABUZZ_ID) {
- tempword = FT1000_DPRAM_RX_BASE+2;
- }
- else {
- tempword = FT1000_DPRAM_MAG_RX_BASE;
- }
- if ( ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword) ) {
+ if (info->AsicID == ELECTRABUZZ_ID) {
+ tempword = FT1000_DPRAM_RX_BASE+2;
+ }
+ else {
+ tempword = FT1000_DPRAM_MAG_RX_BASE;
+ }
+ if (ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword)) {
/* Get the message type which is total_len + PSEUDO header + msgtype + message body */
- pdrvmsg = (struct drv_msg *) & cmdbuffer[0];
+ pdrvmsg = (struct drv_msg *)&cmdbuffer[0];
msgtype = ntohs(pdrvmsg->type);
- DEBUG(1, "Command message type = 0x%x\n", msgtype);
+ pr_debug("Command message type = 0x%x\n", msgtype);
switch (msgtype) {
case DSP_PROVISION:
- DEBUG(0,
- "Got a provisioning request message from DSP\n");
+ pr_debug("Got a provisioning request message from DSP\n");
mdelay(25);
while (list_empty(&info->prov_list) == 0) {
- DEBUG(0, "Sending a provisioning message\n");
+ pr_debug("Sending a provisioning message\n");
/* Make sure SLOWQ doorbell is clear */
tempword =
ft1000_read_reg(dev, FT1000_REG_DOORBELL);
@@ -1025,25 +989,25 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
ptr =
list_entry(info->prov_list.next,
struct prov_record, list);
- len = *(u16 *) ptr->pprov_data;
+ len = *(u16 *)ptr->pprov_data;
len = htons(len);
- pmsg = (u16 *) ptr->pprov_data;
- ppseudo_hdr = (struct pseudo_hdr *) pmsg;
+ pmsg = (u16 *)ptr->pprov_data;
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
/* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
ppseudo_hdr->portsrc = 0;
/* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
- DEBUG(1, "checksum = 0x%x\n",
- ppseudo_hdr->checksum);
+ pr_debug("checksum = 0x%x\n",
+ ppseudo_hdr->checksum);
for (i = 1; i < 7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
- DEBUG(1, "checksum = 0x%x\n",
- ppseudo_hdr->checksum);
+ pr_debug("checksum = 0x%x\n",
+ ppseudo_hdr->checksum);
}
- ft1000_send_cmd (dev, (u16 *)ptr->pprov_data, len, SLOWQ_TYPE);
+ ft1000_send_cmd(dev, (u16 *)ptr->pprov_data, len, SLOWQ_TYPE);
list_del(&ptr->list);
kfree(ptr->pprov_data);
kfree(ptr);
@@ -1055,19 +1019,29 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
info->CardReady = 1;
break;
case MEDIA_STATE:
- pmediamsg = (struct media_msg *) & cmdbuffer[0];
+ pmediamsg = (struct media_msg *)&cmdbuffer[0];
if (info->ProgConStat != 0xFF) {
- if (pmediamsg->state) {
- DEBUG(1, "Media is up\n");
- if (info->mediastate == 0) {
- netif_carrier_on(dev);
- netif_wake_queue(dev);
- info->mediastate = 1;
- do_gettimeofday(&tv);
- info->ConTm = tv.tv_sec;
+ if (pmediamsg->state) {
+ pr_debug("Media is up\n");
+ if (info->mediastate == 0) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ info->mediastate = 1;
+ do_gettimeofday(&tv);
+ info->ConTm = tv.tv_sec;
+ }
+ } else {
+ pr_debug("Media is down\n");
+ if (info->mediastate == 1) {
+ info->mediastate = 0;
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ info->ConTm = 0;
+ }
}
- } else {
- DEBUG(1, "Media is down\n");
+ }
+ else {
+ pr_debug("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
netif_carrier_off(dev);
@@ -1075,25 +1049,15 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
info->ConTm = 0;
}
}
- }
- else {
- DEBUG(1, "Media is down\n");
- if (info->mediastate == 1) {
- info->mediastate = 0;
- netif_carrier_off(dev);
- netif_stop_queue(dev);
- info->ConTm = 0;
- }
- }
break;
case DSP_INIT_MSG:
- pdspinitmsg = (struct dsp_init_msg *) & cmdbuffer[0];
+ pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[0];
memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
- DEBUG(1, "DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
- info->DspVer[0], info->DspVer[1], info->DspVer[2],
- info->DspVer[3]);
+ pr_debug("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
+ info->DspVer[0], info->DspVer[1],
+ info->DspVer[2], info->DspVer[3]);
memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
- HWSERNUMSZ);
+ HWSERNUMSZ);
memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
dev->dev_addr[0] = info->eui64[0];
@@ -1104,34 +1068,33 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
dev->dev_addr[5] = info->eui64[7];
if (ntohs(pdspinitmsg->length) ==
- (sizeof(struct dsp_init_msg) - 20)) {
+ (sizeof(struct dsp_init_msg) - 20)) {
memcpy(info->ProductMode,
- pdspinitmsg->ProductMode, MODESZ);
+ pdspinitmsg->ProductMode, MODESZ);
memcpy(info->RfCalVer, pdspinitmsg->RfCalVer,
- CALVERSZ);
+ CALVERSZ);
memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
- CALDATESZ);
- DEBUG(1, "RFCalVer = 0x%2x 0x%2x\n",
- info->RfCalVer[0], info->RfCalVer[1]);
+ CALDATESZ);
+ pr_debug("RFCalVer = 0x%2x 0x%2x\n",
+ info->RfCalVer[0], info->RfCalVer[1]);
}
- break ;
+ break;
case DSP_STORE_INFO:
- DEBUG(1, "FT1000:drivermsg:Got DSP_STORE_INFO\n");
+ pr_debug("Got DSP_STORE_INFO\n");
tempword = ntohs(pdrvmsg->length);
info->DSPInfoBlklen = tempword;
if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (u16 *) & pdrvmsg->data[0];
+ pmsg = (u16 *)&pdrvmsg->data[0];
for (i = 0; i < ((tempword + 1) / 2); i++) {
- DEBUG(1,
- "FT1000:drivermsg:dsp info data = 0x%x\n",
- *pmsg);
+ pr_debug("dsp info data = 0x%x\n",
+ *pmsg);
info->DSPInfoBlk[i + 10] = *pmsg++;
}
}
break;
case DSP_GET_INFO:
- DEBUG(1, "FT1000:drivermsg:Got DSP_GET_INFO\n");
+ pr_debug("Got DSP_GET_INFO\n");
/*
* copy dsp info block to dsp
* allow any outstanding ioctl to finish
@@ -1152,8 +1115,8 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
* Put message into Slow Queue
* Form Pseudo header
*/
- pmsg = (u16 *) info->DSPInfoBlk;
- ppseudo_hdr = (struct pseudo_hdr *) pmsg;
+ pmsg = (u16 *)info->DSPInfoBlk;
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
ppseudo_hdr->length =
htons(info->DSPInfoBlklen + 4);
ppseudo_hdr->source = 0x10;
@@ -1177,12 +1140,12 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
info->DSPInfoBlk[8] = 0x7200;
info->DSPInfoBlk[9] =
htons(info->DSPInfoBlklen);
- ft1000_send_cmd (dev, (u16 *)info->DSPInfoBlk, (u16)(info->DSPInfoBlklen+4), 0);
+ ft1000_send_cmd(dev, (u16 *)info->DSPInfoBlk, (u16)(info->DSPInfoBlklen+4), 0);
}
break;
case GET_DRV_ERR_RPT_MSG:
- DEBUG(1, "FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
+ pr_debug("Got GET_DRV_ERR_RPT_MSG\n");
/*
* copy driver error message to dsp
* allow any outstanding ioctl to finish
@@ -1203,8 +1166,8 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
* Put message into Slow Queue
* Form Pseudo header
*/
- pmsg = (u16 *) & tempbuffer[0];
- ppseudo_hdr = (struct pseudo_hdr *) pmsg;
+ pmsg = (u16 *)&tempbuffer[0];
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
ppseudo_hdr->length = htons(0x0012);
ppseudo_hdr->source = 0x10;
ppseudo_hdr->destination = 0x20;
@@ -1220,11 +1183,11 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
/* Insert application id */
ppseudo_hdr->portsrc = 0;
/* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- for (i=1; i<7; i++) {
- ppseudo_hdr->checksum ^= *pmsg++;
- }
- pmsg = (u16 *) & tempbuffer[16];
+ ppseudo_hdr->checksum = *pmsg++;
+ for (i = 1; i < 7; i++) {
+ ppseudo_hdr->checksum ^= *pmsg++;
+ }
+ pmsg = (u16 *)&tempbuffer[16];
*pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
*pmsg++ = htons(0x000e);
*pmsg++ = htons(info->DSP_TIME[0]);
@@ -1239,7 +1202,7 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
*pmsg++ = convert.wrd;
*pmsg++ = htons(info->DrvErrNum);
- ft1000_send_cmd (dev, (u16 *)&tempbuffer[0], (u16)(0x0012), 0);
+ ft1000_send_cmd(dev, (u16 *)&tempbuffer[0], (u16)(0x0012), 0);
info->DrvErrNum = 0;
}
@@ -1252,14 +1215,14 @@ static void ft1000_proc_drvmsg(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_parse_dpram_msg
- Description: This function will parse the message received from the DSP
- via the DPRAM interface.
- Input:
- dev - device structure
- Output:
- status - FAILURE
- SUCCESS
+ Function: ft1000_parse_dpram_msg
+ Description: This function will parse the message received from the DSP
+ via the DPRAM interface.
+ Input:
+ dev - device structure
+ Output:
+ status - FAILURE
+ SUCCESS
-------------------------------------------------------------------------*/
static int ft1000_parse_dpram_msg(struct net_device *dev)
@@ -1270,11 +1233,10 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
u16 nxtph;
u16 total_len;
int i = 0;
- int cnt;
unsigned long flags;
doorbell = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
- DEBUG(1, "Doorbell = 0x%x\n", doorbell);
+ pr_debug("Doorbell = 0x%x\n", doorbell);
if (doorbell & FT1000_ASIC_RESET_REQ) {
/* Copy DSP session record from info block */
@@ -1291,7 +1253,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
FT1000_DPRAM_MAG_RX_BASE);
for (i = 0; i < MAX_DSP_SESS_REC / 2; i++) {
outl(info->DSPSess.MagRec[i],
- dev->base_addr + FT1000_REG_MAG_DPDATA);
+ dev->base_addr + FT1000_REG_MAG_DPDATA);
}
}
spin_unlock_irqrestore(&info->dpram_lock, flags);
@@ -1299,7 +1261,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
/* clear ASIC RESET request */
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_ASIC_RESET_REQ);
- DEBUG(1, "Got an ASIC RESET Request\n");
+ pr_debug("Got an ASIC RESET Request\n");
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_ASIC_RESET_DSP);
@@ -1311,8 +1273,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
if (doorbell & FT1000_DSP_ASIC_RESET) {
- DEBUG(0,
- "FT1000:ft1000_parse_dpram_msg: Got a dsp ASIC reset message\n");
+ pr_debug("Got a dsp ASIC reset message\n");
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_DSP_ASIC_RESET);
udelay(200);
@@ -1320,8 +1281,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
}
if (doorbell & FT1000_DB_DPRAM_RX) {
- DEBUG(1,
- "FT1000:ft1000_parse_dpram_msg: Got a slow queue message\n");
+ pr_debug("Got a slow queue message\n");
nxtph = FT1000_DPRAM_RX_BASE + 2;
if (info->AsicID == ELECTRABUZZ_ID) {
total_len =
@@ -1329,14 +1289,12 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
} else {
total_len =
ntohs(ft1000_read_dpram_mag_16
- (dev, FT1000_MAG_TOTAL_LEN,
- FT1000_MAG_TOTAL_LEN_INDX));
+ (dev, FT1000_MAG_TOTAL_LEN,
+ FT1000_MAG_TOTAL_LEN_INDX));
}
- DEBUG(1, "FT1000:ft1000_parse_dpram_msg:total length = %d\n",
- total_len);
+ pr_debug("total length = %d\n", total_len);
if ((total_len < MAX_CMD_SQSIZE) && (total_len > sizeof(struct pseudo_hdr))) {
- total_len += nxtph;
- cnt = 0;
+ total_len += nxtph;
/*
* ft1000_read_reg will return a value that needs to be byteswap
* in order to get DSP_QID_OFFSET.
@@ -1353,7 +1311,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
(dev, FT1000_MAG_PORT_ID,
FT1000_MAG_PORT_ID_INDX) & 0xff);
}
- DEBUG(1, "DSP_QID = 0x%x\n", portid);
+ pr_debug("DSP_QID = 0x%x\n", portid);
if (portid == DRIVERID) {
/* We are assumming one driver message from the DSP at a time. */
@@ -1389,7 +1347,7 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
FT1000_MAG_DSP_TIMER3_INDX);
}
info->DrvErrNum = DSP_CONDRESET_INFO;
- DEBUG(1, "ft1000_hw:DSP conditional reset requested\n");
+ pr_debug("DSP conditional reset requested\n");
ft1000_reset_card(dev);
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_DB_COND_RESET);
@@ -1397,9 +1355,9 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
/* let's clear any unexpected doorbells from DSP */
doorbell =
doorbell & ~(FT1000_DB_DPRAM_RX | FT1000_ASIC_RESET_REQ |
- FT1000_DB_COND_RESET | 0xff00);
+ FT1000_DB_COND_RESET | 0xff00);
if (doorbell) {
- DEBUG(1, "Clearing unexpected doorbell = 0x%x\n", doorbell);
+ pr_debug("Clearing unexpected doorbell = 0x%x\n", doorbell);
ft1000_write_reg(dev, FT1000_REG_DOORBELL, doorbell);
}
@@ -1409,14 +1367,14 @@ static int ft1000_parse_dpram_msg(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_flush_fifo
- Description: This function will flush one packet from the downlink
- FIFO.
- Input:
- dev - device structure
- drv_err - driver error causing the flush fifo
- Output:
- None.
+ Function: ft1000_flush_fifo
+ Description: This function will flush one packet from the downlink
+ FIFO.
+ Input:
+ dev - device structure
+ drv_err - driver error causing the flush fifo
+ Output:
+ None.
-------------------------------------------------------------------------*/
static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
@@ -1427,7 +1385,6 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
u32 templong;
u16 tempword;
- DEBUG(1, "ft1000:ft1000_hw:ft1000_flush_fifo called\n");
if (pcmcia->PktIntfErr > MAX_PH_ERR) {
if (info->AsicID == ELECTRABUZZ_ID) {
info->DSP_TIME[0] =
@@ -1514,7 +1471,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
*/
tempword =
inw(dev->base_addr +
- FT1000_REG_SUP_IMASK);
+ FT1000_REG_SUP_IMASK);
if (tempword == 0) {
/* This indicates that we can not communicate with the ASIC */
info->DrvErrNum =
@@ -1533,23 +1490,23 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
} while ((tempword & 0x03) != 0x03);
if (info->AsicID == ELECTRABUZZ_ID) {
i++;
- DEBUG(0, "Flushing FIFO complete = %x\n", tempword);
+ pr_debug("Flushing FIFO complete = %x\n", tempword);
/* Flush last word in FIFO. */
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
/* Update FIFO counter for DSP */
i = i * 2;
- DEBUG(0, "Flush Data byte count to dsp = %d\n", i);
+ pr_debug("Flush Data byte count to dsp = %d\n", i);
info->fifo_cnt += i;
ft1000_write_dpram(dev, FT1000_FIFO_LEN,
info->fifo_cnt);
} else {
- DEBUG(0, "Flushing FIFO complete = %x\n", tempword);
+ pr_debug("Flushing FIFO complete = %x\n", tempword);
/* Flush last word in FIFO */
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
tempword = inw(dev->base_addr + FT1000_REG_SUP_STAT);
- DEBUG(0, "FT1000_REG_SUP_STAT = 0x%x\n", tempword);
+ pr_debug("FT1000_REG_SUP_STAT = 0x%x\n", tempword);
tempword = inw(dev->base_addr + FT1000_REG_MAG_DFSR);
- DEBUG(0, "FT1000_REG_MAG_DFSR = 0x%x\n", tempword);
+ pr_debug("FT1000_REG_MAG_DFSR = 0x%x\n", tempword);
}
if (DrvErrNum) {
pcmcia->PktIntfErr++;
@@ -1559,15 +1516,15 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
/*---------------------------------------------------------------------------
- Function: ft1000_copy_up_pkt
- Description: This function will pull Flarion packets out of the Downlink
- FIFO and convert it to an ethernet packet. The ethernet packet will
- then be deliver to the TCP/IP stack.
- Input:
- dev - device structure
- Output:
- status - FAILURE
- SUCCESS
+ Function: ft1000_copy_up_pkt
+ Description: This function will pull Flarion packets out of the Downlink
+ FIFO and convert it to an ethernet packet. The ethernet packet will
+ then be deliver to the TCP/IP stack.
+ Input:
+ dev - device structure
+ Output:
+ status - FAILURE
+ SUCCESS
-------------------------------------------------------------------------*/
static int ft1000_copy_up_pkt(struct net_device *dev)
@@ -1583,7 +1540,6 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
u32 *ptemplong;
u32 templong;
- DEBUG(1, "ft1000_copy_up_pkt\n");
/* Read length */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
@@ -1593,10 +1549,10 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
len = ntohs(tempword);
}
chksum = tempword;
- DEBUG(1, "Number of Bytes in FIFO = %d\n", len);
+ pr_debug("Number of Bytes in FIFO = %d\n", len);
if (len > ENET_MAX_SIZE) {
- DEBUG(0, "size of ethernet packet invalid\n");
+ pr_debug("size of ethernet packet invalid\n");
if (info->AsicID == MAGNEMITE_ID) {
/* Read High word to complete 32 bit access */
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
@@ -1609,7 +1565,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
skb = dev_alloc_skb(len + 12 + 2);
if (skb == NULL) {
- DEBUG(0, "No Network buffers available\n");
+ pr_debug("No Network buffers available\n");
/* Read High word to complete 32 bit access */
if (info->AsicID == MAGNEMITE_ID) {
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
@@ -1618,7 +1574,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
info->stats.rx_errors++;
return FAILURE;
}
- pbuffer = (u8 *) skb_put(skb, len + 12);
+ pbuffer = (u8 *)skb_put(skb, len + 12);
/* Pseudo header */
if (info->AsicID == ELECTRABUZZ_ID) {
@@ -1630,37 +1586,37 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
} else {
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRL);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
chksum ^= tempword;
/* read checksum value */
tempword = ft1000_read_reg(dev, FT1000_REG_MAG_DFRH);
- DEBUG(1, "Pseudo = 0x%x\n", tempword);
+ pr_debug("Pseudo = 0x%x\n", tempword);
}
if (chksum != tempword) {
- DEBUG(0, "Packet checksum mismatch 0x%x 0x%x\n", chksum,
- tempword);
+ pr_debug("Packet checksum mismatch 0x%x 0x%x\n",
+ chksum, tempword);
ft1000_flush_fifo(dev, DSP_PKTPHCKSUM_INFO);
info->stats.rx_errors++;
kfree_skb(skb);
@@ -1687,7 +1643,7 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
for (i = 0; i < len / 2; i++) {
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
*pbuffer++ = (u8) (tempword >> 8);
- *pbuffer++ = (u8) tempword;
+ *pbuffer++ = (u8)tempword;
if (ft1000_chkcard(dev) == false) {
kfree_skb(skb);
return FAILURE;
@@ -1700,25 +1656,25 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
*pbuffer++ = (u8) (tempword >> 8);
}
} else {
- ptemplong = (u32 *) pbuffer;
+ ptemplong = (u32 *)pbuffer;
for (i = 0; i < len / 4; i++) {
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
- DEBUG(1, "Data = 0x%8x\n", templong);
+ pr_debug("Data = 0x%8x\n", templong);
*ptemplong++ = templong;
}
/* Need to read one more word if odd align. */
if (len & 0x0003) {
templong = inl(dev->base_addr + FT1000_REG_MAG_DFR);
- DEBUG(1, "Data = 0x%8x\n", templong);
+ pr_debug("Data = 0x%8x\n", templong);
*ptemplong++ = templong;
}
}
- DEBUG(1, "Data passed to Protocol layer:\n");
+ pr_debug("Data passed to Protocol layer:\n");
for (i = 0; i < len + 12; i++) {
- DEBUG(1, "Protocol Data: 0x%x\n ", *ptemp++);
+ pr_debug("Protocol Data: 0x%x\n", *ptemp++);
}
skb->dev = dev;
@@ -1745,20 +1701,20 @@ static int ft1000_copy_up_pkt(struct net_device *dev)
/*---------------------------------------------------------------------------
- Function: ft1000_copy_down_pkt
- Description: This function will take an ethernet packet and convert it to
- a Flarion packet prior to sending it to the ASIC Downlink
- FIFO.
- Input:
- dev - device structure
- packet - address of ethernet packet
- len - length of IP packet
- Output:
- status - FAILURE
- SUCCESS
+ Function: ft1000_copy_down_pkt
+ Description: This function will take an ethernet packet and convert it to
+ a Flarion packet prior to sending it to the ASIC Downlink
+ FIFO.
+ Input:
+ dev - device structure
+ packet - address of ethernet packet
+ len - length of IP packet
+ Output:
+ status - FAILURE
+ SUCCESS
-------------------------------------------------------------------------*/
-static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
+static int ft1000_copy_down_pkt(struct net_device *dev, u16 *packet, u16 len)
{
struct ft1000_info *info = netdev_priv(dev);
struct ft1000_pcmcia *pcmcia = info->priv;
@@ -1770,8 +1726,6 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
int i;
u32 *plong;
- DEBUG(1, "ft1000_hw: copy_down_pkt()\n");
-
/* Check if there is room on the FIFO */
if (len > ft1000_read_fifo_len(dev)) {
udelay(10);
@@ -1791,8 +1745,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
udelay(20);
}
if (len > ft1000_read_fifo_len(dev)) {
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:Transmit FIFO is fulli - pkt drop\n");
+ pr_debug("Transmit FIFO is full - pkt drop\n");
info->stats.tx_errors++;
return SUCCESS;
}
@@ -1823,39 +1776,30 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
if (info->AsicID == ELECTRABUZZ_ID) {
/* copy first word to UFIFO_BEG reg */
ft1000_write_reg(dev, FT1000_REG_UFIFO_BEG, pseudo.buff[0]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 0 BEG = 0x%04x\n",
- pseudo.buff[0]);
+ pr_debug("data 0 BEG = 0x%04x\n", pseudo.buff[0]);
/* copy subsequent words to UFIFO_MID reg */
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[1]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 1 MID = 0x%04x\n",
- pseudo.buff[1]);
+ pr_debug("data 1 MID = 0x%04x\n", pseudo.buff[1]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[2]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 2 MID = 0x%04x\n",
- pseudo.buff[2]);
+ pr_debug("data 2 MID = 0x%04x\n", pseudo.buff[2]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[3]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 3 MID = 0x%04x\n",
- pseudo.buff[3]);
+ pr_debug("data 3 MID = 0x%04x\n", pseudo.buff[3]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[4]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 4 MID = 0x%04x\n",
- pseudo.buff[4]);
+ pr_debug("data 4 MID = 0x%04x\n", pseudo.buff[4]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[5]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 5 MID = 0x%04x\n",
- pseudo.buff[5]);
+ pr_debug("data 5 MID = 0x%04x\n", pseudo.buff[5]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[6]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 6 MID = 0x%04x\n",
- pseudo.buff[6]);
+ pr_debug("data 6 MID = 0x%04x\n", pseudo.buff[6]);
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID, pseudo.buff[7]);
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:data 7 MID = 0x%04x\n",
- pseudo.buff[7]);
+ pr_debug("data 7 MID = 0x%04x\n", pseudo.buff[7]);
/* Write PPP type + IP Packet into Downlink FIFO */
for (i = 0; i < (len >> 1) - 1; i++) {
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
htons(*packet));
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
+ pr_debug("data %d MID = 0x%04x\n",
+ i + 8, htons(*packet));
packet++;
}
@@ -1863,41 +1807,33 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
if (len & 0x0001) {
ft1000_write_reg(dev, FT1000_REG_UFIFO_MID,
htons(*packet));
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data MID = 0x%04x\n",
- htons(*packet));
+ pr_debug("data MID = 0x%04x\n", htons(*packet));
packet++;
ft1000_write_reg(dev, FT1000_REG_UFIFO_END,
htons(*packet));
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
+ pr_debug("data %d MID = 0x%04x\n",
+ i + 8, htons(*packet));
} else {
ft1000_write_reg(dev, FT1000_REG_UFIFO_END,
htons(*packet));
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data %d MID = 0x%04x\n",
- i + 8, htons(*packet));
+ pr_debug("data %d MID = 0x%04x\n",
+ i + 8, htons(*packet));
}
} else {
- outl(*(u32 *) & pseudo.buff[0],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n",
- *(u32 *) & pseudo.buff[0]);
- outl(*(u32 *) & pseudo.buff[2],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n",
- *(u32 *) & pseudo.buff[2]);
- outl(*(u32 *) & pseudo.buff[4],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n",
- *(u32 *) & pseudo.buff[4]);
- outl(*(u32 *) & pseudo.buff[6],
- dev->base_addr + FT1000_REG_MAG_UFDR);
- DEBUG(1, "ft1000_copy_down_pkt: Pseudo = 0x%8x\n",
- *(u32 *) & pseudo.buff[6]);
-
- plong = (u32 *) packet;
+ outl(*(u32 *)&pseudo.buff[0],
+ dev->base_addr + FT1000_REG_MAG_UFDR);
+ pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[0]);
+ outl(*(u32 *)&pseudo.buff[2],
+ dev->base_addr + FT1000_REG_MAG_UFDR);
+ pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[2]);
+ outl(*(u32 *)&pseudo.buff[4],
+ dev->base_addr + FT1000_REG_MAG_UFDR);
+ pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[4]);
+ outl(*(u32 *)&pseudo.buff[6],
+ dev->base_addr + FT1000_REG_MAG_UFDR);
+ pr_debug("Pseudo = 0x%8x\n", *(u32 *)&pseudo.buff[6]);
+
+ plong = (u32 *)packet;
/* Write PPP type + IP Packet into Downlink FIFO */
for (i = 0; i < (len >> 2); i++) {
outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
@@ -1905,9 +1841,7 @@ static int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
/* Check for odd alignment */
if (len & 0x0003) {
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:data = 0x%8x\n",
- *plong);
+ pr_debug("data = 0x%8x\n", *plong);
outl(*plong++, dev->base_addr + FT1000_REG_MAG_UFDR);
}
outl(1, dev->base_addr + FT1000_REG_MAG_UFER);
@@ -1928,19 +1862,14 @@ static struct net_device_stats *ft1000_stats(struct net_device *dev)
static int ft1000_open(struct net_device *dev)
{
-
- DEBUG(0, "ft1000_hw: ft1000_open is called\n");
-
ft1000_reset_card(dev);
- DEBUG(0, "ft1000_hw: ft1000_open is ended\n");
/* schedule ft1000_hbchk to perform periodic heartbeat checks on DSP and ASIC */
init_timer(&poll_timer);
poll_timer.expires = jiffies + (2 * HZ);
- poll_timer.data = (u_long) dev;
+ poll_timer.data = (u_long)dev;
add_timer(&poll_timer);
- DEBUG(0, "ft1000_hw: ft1000_open is ended2\n");
return 0;
}
@@ -1948,13 +1877,11 @@ static int ft1000_close(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
- DEBUG(0, "ft1000_hw: ft1000_close()\n");
-
info->CardReady = 0;
del_timer(&poll_timer);
if (ft1000_card_present == 1) {
- DEBUG(0, "Media is down\n");
+ pr_debug("Media is down\n");
netif_stop_queue(dev);
ft1000_disable_interrupts(dev);
@@ -1971,31 +1898,28 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ft1000_info *info = netdev_priv(dev);
u8 *pdata;
- DEBUG(1, "ft1000_hw: ft1000_start_xmit()\n");
if (skb == NULL) {
- DEBUG(1, "ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n");
+ pr_debug("skb == NULL!!!\n");
return 0;
}
- DEBUG(1, "ft1000_hw: ft1000_start_xmit:length of packet = %d\n",
- skb->len);
+ pr_debug("length of packet = %d\n", skb->len);
- pdata = (u8 *) skb->data;
+ pdata = (u8 *)skb->data;
if (info->mediastate == 0) {
/* Drop packet is mediastate is down */
- DEBUG(1, "ft1000_hw:ft1000_copy_down_pkt:mediastate is down\n");
+ pr_debug("mediastate is down\n");
return SUCCESS;
}
if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) {
/* Drop packet which has invalid size */
- DEBUG(1,
- "ft1000_hw:ft1000_copy_down_pkt:invalid ethernet length\n");
+ pr_debug("invalid ethernet length\n");
return SUCCESS;
}
ft1000_copy_down_pkt(dev, (u16 *) (pdata + ENET_HEADER_SIZE - 2),
- skb->len - ENET_HEADER_SIZE + 2);
+ skb->len - ENET_HEADER_SIZE + 2);
dev_kfree_skb(skb);
@@ -2004,14 +1928,12 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *)dev_id;
+ struct net_device *dev = dev_id;
struct ft1000_info *info = netdev_priv(dev);
u16 tempword;
u16 inttype;
int cnt;
- DEBUG(1, "ft1000_hw: ft1000_interrupt()\n");
-
if (info->CardReady == 0) {
ft1000_disable_interrupts(dev);
return IRQ_HANDLED;
@@ -2033,19 +1955,19 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
ft1000_parse_dpram_msg(dev);
if (inttype & ISR_RCV) {
- DEBUG(1, "Data in FIFO\n");
+ pr_debug("Data in FIFO\n");
cnt = 0;
do {
/* Check if we have packets in the Downlink FIFO */
if (info->AsicID == ELECTRABUZZ_ID) {
tempword =
- ft1000_read_reg(dev,
- FT1000_REG_DFIFO_STAT);
+ ft1000_read_reg(dev,
+ FT1000_REG_DFIFO_STAT);
} else {
tempword =
- ft1000_read_reg(dev,
- FT1000_REG_MAG_DFSR);
+ ft1000_read_reg(dev,
+ FT1000_REG_MAG_DFSR);
}
if (tempword & 0x1f) {
ft1000_copy_up_pkt(dev);
@@ -2058,12 +1980,13 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
}
/* clear interrupts */
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
- DEBUG(1, "ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
ft1000_write_reg(dev, FT1000_REG_SUP_ISR, tempword);
/* Read interrupt type */
- inttype = ft1000_read_reg (dev, FT1000_REG_SUP_ISR);
- DEBUG(1, "ft1000_hw: interrupt status register after clear = 0x%x\n", inttype);
+ inttype = ft1000_read_reg(dev, FT1000_REG_SUP_ISR);
+ pr_debug("interrupt status register after clear = 0x%x\n",
+ inttype);
}
ft1000_enable_interrupts(dev);
return IRQ_HANDLED;
@@ -2075,8 +1998,6 @@ void stop_ft1000_card(struct net_device *dev)
struct prov_record *ptr;
/* int cnt; */
- DEBUG(0, "ft1000_hw: stop_ft1000_card()\n");
-
info->CardReady = 0;
ft1000_card_present = 0;
netif_stop_queue(dev);
@@ -2105,7 +2026,7 @@ void stop_ft1000_card(struct net_device *dev)
}
static void ft1000_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
struct ft1000_info *ft_info;
ft_info = netdev_priv(dev);
@@ -2121,6 +2042,7 @@ static void ft1000_get_drvinfo(struct net_device *dev,
static u32 ft1000_get_link(struct net_device *dev)
{
struct ft1000_info *info;
+
info = netdev_priv(dev);
return info->mediastate;
}
@@ -2131,37 +2053,36 @@ static const struct ethtool_ops ops = {
};
struct net_device *init_ft1000_card(struct pcmcia_device *link,
- void *ft1000_reset)
+ void *ft1000_reset)
{
struct ft1000_info *info;
struct ft1000_pcmcia *pcmcia;
struct net_device *dev;
static const struct net_device_ops ft1000ops = /* Slavius 21.10.2009 due to kernel changes */
- {
- .ndo_open = &ft1000_open,
- .ndo_stop = &ft1000_close,
- .ndo_start_xmit = &ft1000_start_xmit,
- .ndo_get_stats = &ft1000_stats,
- };
+ {
+ .ndo_open = &ft1000_open,
+ .ndo_stop = &ft1000_close,
+ .ndo_start_xmit = &ft1000_start_xmit,
+ .ndo_get_stats = &ft1000_stats,
+ };
- DEBUG(1, "ft1000_hw: init_ft1000_card()\n");
- DEBUG(1, "ft1000_hw: irq = %d\n", link->irq);
- DEBUG(1, "ft1000_hw: port = 0x%04x\n", link->resource[0]->start);
+ pr_debug("irq = %d, port = 0x%04llx\n",
+ link->irq, (unsigned long long)link->resource[0]->start);
flarion_ft1000_cnt++;
if (flarion_ft1000_cnt > 1) {
flarion_ft1000_cnt--;
- printk(KERN_INFO
- "ft1000: This driver can not support more than one instance\n");
+ dev_info(&link->dev,
+ "This driver can not support more than one instance\n");
return NULL;
}
dev = alloc_etherdev(sizeof(struct ft1000_info));
if (!dev) {
- printk(KERN_ERR "ft1000: failed to allocate etherdev\n");
+ dev_err(&link->dev, "Failed to allocate etherdev\n");
return NULL;
}
@@ -2170,9 +2091,9 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
memset(info, 0, sizeof(struct ft1000_info));
- DEBUG(1, "address of dev = 0x%8x\n", (u32) dev);
- DEBUG(1, "address of dev info = 0x%8x\n", (u32) info);
- DEBUG(0, "device name = %s\n", dev->name);
+ pr_debug("address of dev = 0x%p\n", dev);
+ pr_debug("address of dev info = 0x%p\n", info);
+ pr_debug("device name = %s\n", dev->name);
memset(&info->stats, 0, sizeof(struct net_device_stats));
@@ -2204,41 +2125,41 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
dev->netdev_ops = &ft1000ops; /* Slavius 21.10.2009 due to kernel changes */
- DEBUG(0, "device name = %s\n", dev->name);
+ pr_debug("device name = %s\n", dev->name);
dev->irq = link->irq;
dev->base_addr = link->resource[0]->start;
if (pcmcia_get_mac_from_cis(link, dev)) {
- printk(KERN_ERR "ft1000: Could not read mac address\n");
+ netdev_err(dev, "Could not read mac address\n");
goto err_dev;
}
if (request_irq(dev->irq, ft1000_interrupt, IRQF_SHARED, dev->name, dev)) {
- printk(KERN_ERR "ft1000: Could not request_irq\n");
+ netdev_err(dev, "Could not request_irq\n");
goto err_dev;
}
if (request_region(dev->base_addr, 256, dev->name) == NULL) {
- printk(KERN_ERR "ft1000: Could not request_region\n");
+ netdev_err(dev, "Could not request_region\n");
goto err_irq;
}
if (register_netdev(dev) != 0) {
- DEBUG(0, "ft1000: Could not register netdev");
+ pr_debug("Could not register netdev\n");
goto err_reg;
}
info->AsicID = ft1000_read_reg(dev, FT1000_REG_ASIC_ID);
if (info->AsicID == ELECTRABUZZ_ID) {
- DEBUG(0, "ft1000_hw: ELECTRABUZZ ASIC\n");
+ pr_debug("ELECTRABUZZ ASIC\n");
if (request_firmware(&fw_entry, "ft1000.img", &link->dev) != 0) {
- printk(KERN_INFO "ft1000: Could not open ft1000.img\n");
+ pr_info("Could not open ft1000.img\n");
goto err_unreg;
}
} else {
- DEBUG(0, "ft1000_hw: MAGNEMITE ASIC\n");
+ pr_debug("MAGNEMITE ASIC\n");
if (request_firmware(&fw_entry, "ft2000.img", &link->dev) != 0) {
- printk(KERN_INFO "ft1000: Could not open ft2000.img\n");
+ pr_info("Could not open ft2000.img\n");
goto err_unreg;
}
}
@@ -2247,8 +2168,8 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
ft1000_card_present = 1;
dev->ethtool_ops = &ops;
- printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr);
+ pr_info("%s: addr 0x%04lx irq %d, MAC addr %pM\n",
+ dev->name, dev->base_addr, dev->irq, dev->dev_addr);
return dev;
err_unreg:
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 0f347ab0d300..c8d278229940 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -1,31 +1,34 @@
/*
-*---------------------------------------------------------------------------
-* FT1000 driver for Flarion Flash OFDM NIC Device
-*
-* Copyright (C) 2006 Flarion Technologies, All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License as published by the Free
-* Software Foundation; either version 2 of the License, or (at your option) any
-* later version. This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details. You should have received a copy of the GNU General Public
-* License along with this program; if not, write to the
-* Free Software Foundation, Inc., 59 Temple Place -
-* Suite 330, Boston, MA 02111-1307, USA.
-*---------------------------------------------------------------------------
-*
-* File: ft1000_chdev.c
-*
-* Description: Custom character device dispatch routines.
-*
-* History:
-* 8/29/02 Whc Ported to Linux.
-* 6/05/06 Whc Porting to Linux 2.6.9
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * FT1000 driver for Flarion Flash OFDM NIC Device
+ *
+ * Copyright (C) 2006 Flarion Technologies, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version. This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details. You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place -
+ * Suite 330, Boston, MA 02111-1307, USA.
+ *---------------------------------------------------------------------------
+ *
+ * File: ft1000_chdev.c
+ *
+ * Description: Custom character device dispatch routines.
+ *
+ * History:
+ * 8/29/02 Whc Ported to Linux.
+ * 6/05/06 Whc Porting to Linux 2.6.9
+ *
+ *---------------------------------------------------------------------------
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -38,12 +41,12 @@
#include <linux/debugfs.h>
#include "ft1000_usb.h"
-static int ft1000_flarion_cnt = 0;
+static int ft1000_flarion_cnt;
static int ft1000_open(struct inode *inode, struct file *file);
static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait);
static long ft1000_ioctl(struct file *file, unsigned int command,
- unsigned long argument);
+ unsigned long argument);
static int ft1000_release(struct inode *inode, struct file *file);
/* List to free receive command buffer pool */
@@ -55,8 +58,8 @@ spinlock_t free_buff_lock;
int numofmsgbuf = 0;
/*
-* Table of entry-point routines for char device
-*/
+ * Table of entry-point routines for char device
+ */
static const struct file_operations ft1000fops = {
.unlocked_ioctl = ft1000_ioctl,
.poll = ft1000_poll_dev,
@@ -66,104 +69,104 @@ static const struct file_operations ft1000fops = {
};
/*
----------------------------------------------------------------------------
-* Function: ft1000_get_buffer
-*
-* Parameters:
-*
-* Returns:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ ---------------------------------------------------------------------------
+ * Function: ft1000_get_buffer
+ *
+ * Parameters:
+ *
+ * Returns:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist)
{
- unsigned long flags;
+ unsigned long flags;
struct dpram_blk *ptr;
- spin_lock_irqsave(&free_buff_lock, flags);
- /* Check if buffer is available */
- if (list_empty(bufflist)) {
- DEBUG("ft1000_get_buffer: No more buffer - %d\n", numofmsgbuf);
- ptr = NULL;
- } else {
- numofmsgbuf--;
- ptr = list_entry(bufflist->next, struct dpram_blk, list);
- list_del(&ptr->list);
- /* DEBUG("ft1000_get_buffer: number of free msg buffers = %d\n", numofmsgbuf); */
- }
- spin_unlock_irqrestore(&free_buff_lock, flags);
-
- return ptr;
+ spin_lock_irqsave(&free_buff_lock, flags);
+ /* Check if buffer is available */
+ if (list_empty(bufflist)) {
+ pr_debug("No more buffer - %d\n", numofmsgbuf);
+ ptr = NULL;
+ } else {
+ numofmsgbuf--;
+ ptr = list_entry(bufflist->next, struct dpram_blk, list);
+ list_del(&ptr->list);
+ /* pr_debug("number of free msg buffers = %d\n", numofmsgbuf); */
+ }
+ spin_unlock_irqrestore(&free_buff_lock, flags);
+
+ return ptr;
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_free_buffer
-*
-* Parameters:
-*
-* Returns:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_free_buffer
+ *
+ * Parameters:
+ *
+ * Returns:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist)
{
- unsigned long flags;
-
- spin_lock_irqsave(&free_buff_lock, flags);
- /* Put memory back to list */
- list_add_tail(&pdpram_blk->list, plist);
- numofmsgbuf++;
- /*DEBUG("ft1000_free_buffer: number of free msg buffers = %d\n", numofmsgbuf); */
- spin_unlock_irqrestore(&free_buff_lock, flags);
+ unsigned long flags;
+
+ spin_lock_irqsave(&free_buff_lock, flags);
+ /* Put memory back to list */
+ list_add_tail(&pdpram_blk->list, plist);
+ numofmsgbuf++;
+ /*pr_debug("number of free msg buffers = %d\n", numofmsgbuf); */
+ spin_unlock_irqrestore(&free_buff_lock, flags);
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_CreateDevice
-*
-* Parameters: dev - pointer to adapter object
-*
-* Returns: 0 if successful
-*
-* Description: Creates a private char device.
-*
-* Notes: Only called by init_module().
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_CreateDevice
+ *
+ * Parameters: dev - pointer to adapter object
+ *
+ * Returns: 0 if successful
+ *
+ * Description: Creates a private char device.
+ *
+ * Notes: Only called by init_module().
+ *
+ *---------------------------------------------------------------------------
+ */
int ft1000_create_dev(struct ft1000_usb *dev)
{
- int result;
- int i;
+ int result;
+ int i;
struct dentry *dir, *file;
struct ft1000_debug_dirs *tmp;
- /* make a new device name */
- sprintf(dev->DeviceName, "%s%d", "FT1000_", dev->CardNumber);
+ /* make a new device name */
+ sprintf(dev->DeviceName, "%s%d", "FT1000_", dev->CardNumber);
- DEBUG("%s: number of instance = %d\n", __func__, ft1000_flarion_cnt);
- DEBUG("DeviceCreated = %x\n", dev->DeviceCreated);
+ pr_debug("number of instance = %d\n", ft1000_flarion_cnt);
+ pr_debug("DeviceCreated = %x\n", dev->DeviceCreated);
- if (dev->DeviceCreated) {
- DEBUG("%s: \"%s\" already registered\n", __func__, dev->DeviceName);
- return -EIO;
- }
+ if (dev->DeviceCreated) {
+ pr_debug("\"%s\" already registered\n", dev->DeviceName);
+ return -EIO;
+ }
- /* register the device */
- DEBUG("%s: \"%s\" debugfs device registration\n", __func__, dev->DeviceName);
+ /* register the device */
+ pr_debug("\"%s\" debugfs device registration\n", dev->DeviceName);
tmp = kmalloc(sizeof(struct ft1000_debug_dirs), GFP_KERNEL);
if (tmp == NULL) {
@@ -178,7 +181,7 @@ int ft1000_create_dev(struct ft1000_usb *dev)
}
file = debugfs_create_file("device", S_IRUGO | S_IWUSR, dir,
- dev, &ft1000fops);
+ dev, &ft1000fops);
if (IS_ERR(file)) {
result = PTR_ERR(file);
goto debug_file_fail;
@@ -189,25 +192,25 @@ int ft1000_create_dev(struct ft1000_usb *dev)
tmp->int_number = dev->CardNumber;
list_add(&(tmp->list), &(dev->nodes.list));
- DEBUG("%s: registered debugfs directory \"%s\"\n", __func__, dev->DeviceName);
-
- /* initialize application information */
- dev->appcnt = 0;
- for (i=0; i<MAX_NUM_APP; i++) {
- dev->app_info[i].nTxMsg = 0;
- dev->app_info[i].nRxMsg = 0;
- dev->app_info[i].nTxMsgReject = 0;
- dev->app_info[i].nRxMsgMiss = 0;
- dev->app_info[i].fileobject = NULL;
- dev->app_info[i].app_id = i+1;
- dev->app_info[i].DspBCMsgFlag = 0;
- dev->app_info[i].NumOfMsg = 0;
- init_waitqueue_head(&dev->app_info[i].wait_dpram_msg);
- INIT_LIST_HEAD(&dev->app_info[i].app_sqlist);
- }
-
- dev->DeviceCreated = TRUE;
- ft1000_flarion_cnt++;
+ pr_debug("registered debugfs directory \"%s\"\n", dev->DeviceName);
+
+ /* initialize application information */
+ dev->appcnt = 0;
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ dev->app_info[i].nTxMsg = 0;
+ dev->app_info[i].nRxMsg = 0;
+ dev->app_info[i].nTxMsgReject = 0;
+ dev->app_info[i].nRxMsgMiss = 0;
+ dev->app_info[i].fileobject = NULL;
+ dev->app_info[i].app_id = i+1;
+ dev->app_info[i].DspBCMsgFlag = 0;
+ dev->app_info[i].NumOfMsg = 0;
+ init_waitqueue_head(&dev->app_info[i].wait_dpram_msg);
+ INIT_LIST_HEAD(&dev->app_info[i].app_sqlist);
+ }
+
+ dev->DeviceCreated = TRUE;
+ ft1000_flarion_cnt++;
return 0;
@@ -220,33 +223,29 @@ fail:
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_DestroyDeviceDEBUG
-*
-* Parameters: dev - pointer to adapter object
-*
-* Description: Destroys a private char device.
-*
-* Notes: Only called by cleanup_module().
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_DestroyDeviceDEBUG
+ *
+ * Parameters: dev - pointer to adapter object
+ *
+ * Description: Destroys a private char device.
+ *
+ * Notes: Only called by cleanup_module().
+ *
+ *---------------------------------------------------------------------------
+ */
void ft1000_destroy_dev(struct net_device *netdev)
{
struct ft1000_info *info = netdev_priv(netdev);
struct ft1000_usb *dev = info->priv;
- int i;
+ int i;
struct dpram_blk *pdpram_blk;
struct dpram_blk *ptr;
struct list_head *pos, *q;
struct ft1000_debug_dirs *dir;
- DEBUG("%s called\n", __func__);
-
-
-
- if (dev->DeviceCreated) {
- ft1000_flarion_cnt--;
+ if (dev->DeviceCreated) {
+ ft1000_flarion_cnt--;
list_for_each_safe(pos, q, &dev->nodes.list) {
dir = list_entry(pos, struct ft1000_debug_dirs, list);
if (dir->int_number == dev->CardNumber) {
@@ -256,29 +255,28 @@ void ft1000_destroy_dev(struct net_device *netdev)
kfree(dir);
}
}
- DEBUG("%s: unregistered device \"%s\"\n", __func__,
- dev->DeviceName);
-
- /* Make sure we free any memory reserve for slow Queue */
- for (i=0; i<MAX_NUM_APP; i++) {
- while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
- pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- ft1000_free_buffer(pdpram_blk, &freercvpool);
-
- }
- wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
- }
-
- /* Remove buffer allocated for receive command data */
- if (ft1000_flarion_cnt == 0) {
- while (list_empty(&freercvpool) == 0) {
- ptr = list_entry(freercvpool.next, struct dpram_blk, list);
- list_del(&ptr->list);
- kfree(ptr->pbuffer);
- kfree(ptr);
- }
- }
+ pr_debug("unregistered device \"%s\"\n", dev->DeviceName);
+
+ /* Make sure we free any memory reserve for slow Queue */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
+ pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ list_del(&pdpram_blk->list);
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+
+ }
+ wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
+ }
+
+ /* Remove buffer allocated for receive command data */
+ if (ft1000_flarion_cnt == 0) {
+ while (list_empty(&freercvpool) == 0) {
+ ptr = list_entry(freercvpool.next, struct dpram_blk, list);
+ list_del(&ptr->list);
+ kfree(ptr->pbuffer);
+ kfree(ptr);
+ }
+ }
dev->DeviceCreated = FALSE;
}
@@ -286,507 +284,499 @@ void ft1000_destroy_dev(struct net_device *netdev)
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_open
-*
-* Parameters:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_open
+ *
+ * Parameters:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
static int ft1000_open(struct inode *inode, struct file *file)
{
struct ft1000_info *info;
struct ft1000_usb *dev = (struct ft1000_usb *)inode->i_private;
- int i,num;
+ int i, num;
- DEBUG("%s called\n", __func__);
- num = (MINOR(inode->i_rdev) & 0xf);
- DEBUG("ft1000_open: minor number=%d\n", num);
+ num = (MINOR(inode->i_rdev) & 0xf);
+ pr_debug("minor number=%d\n", num);
info = file->private_data = netdev_priv(dev->net);
- DEBUG("f_owner = %p number of application = %d\n", (&file->f_owner), dev->appcnt);
-
- /* Check if maximum number of application exceeded */
- if (dev->appcnt > MAX_NUM_APP) {
- DEBUG("Maximum number of application exceeded\n");
- return -EACCES;
- }
-
- /* Search for available application info block */
- for (i=0; i<MAX_NUM_APP; i++) {
- if ((dev->app_info[i].fileobject == NULL)) {
- break;
- }
- }
-
- /* Fail due to lack of application info block */
- if (i == MAX_NUM_APP) {
- DEBUG("Could not find an application info block\n");
- return -EACCES;
- }
-
- dev->appcnt++;
- dev->app_info[i].fileobject = &file->f_owner;
- dev->app_info[i].nTxMsg = 0;
- dev->app_info[i].nRxMsg = 0;
- dev->app_info[i].nTxMsgReject = 0;
- dev->app_info[i].nRxMsgMiss = 0;
+ pr_debug("f_owner = %p number of application = %d\n",
+ &file->f_owner, dev->appcnt);
+
+ /* Check if maximum number of application exceeded */
+ if (dev->appcnt > MAX_NUM_APP) {
+ pr_debug("Maximum number of application exceeded\n");
+ return -EACCES;
+ }
+
+ /* Search for available application info block */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if ((dev->app_info[i].fileobject == NULL)) {
+ break;
+ }
+ }
+
+ /* Fail due to lack of application info block */
+ if (i == MAX_NUM_APP) {
+ pr_debug("Could not find an application info block\n");
+ return -EACCES;
+ }
+
+ dev->appcnt++;
+ dev->app_info[i].fileobject = &file->f_owner;
+ dev->app_info[i].nTxMsg = 0;
+ dev->app_info[i].nRxMsg = 0;
+ dev->app_info[i].nTxMsgReject = 0;
+ dev->app_info[i].nRxMsgMiss = 0;
nonseekable_open(inode, file);
- return 0;
+ return 0;
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_poll_dev
-*
-* Parameters:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_poll_dev
+ *
+ * Parameters:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
{
- struct net_device *netdev = file->private_data;
+ struct net_device *netdev = file->private_data;
struct ft1000_info *info = netdev_priv(netdev);
struct ft1000_usb *dev = info->priv;
- int i;
-
- /* DEBUG("ft1000_poll_dev called\n"); */
- if (ft1000_flarion_cnt == 0) {
- DEBUG("FT1000:ft1000_poll_dev called when ft1000_flarion_cnt is zero\n");
- return (-EBADF);
- }
-
- /* Search for matching file object */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (dev->app_info[i].fileobject == &file->f_owner) {
- /* DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", dev->app_info[i].app_id); */
- break;
- }
- }
-
- /* Could not find application info block */
- if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
- return (-EACCES);
- }
-
- if (list_empty(&dev->app_info[i].app_sqlist) == 0) {
- DEBUG("FT1000:ft1000_poll_dev:Message detected in slow queue\n");
- return(POLLIN | POLLRDNORM | POLLPRI);
- }
-
- poll_wait(file, &dev->app_info[i].wait_dpram_msg, wait);
- /* DEBUG("FT1000:ft1000_poll_dev:Polling for data from DSP\n"); */
+ int i;
+
+ if (ft1000_flarion_cnt == 0) {
+ pr_debug("called with ft1000_flarion_cnt value zero\n");
+ return -EBADF;
+ }
+
+ /* Search for matching file object */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (dev->app_info[i].fileobject == &file->f_owner) {
+ /* pr_debug("Message is for AppId = %d\n", dev->app_info[i].app_id); */
+ break;
+ }
+ }
+
+ /* Could not find application info block */
+ if (i == MAX_NUM_APP) {
+ pr_debug("Could not find application info block\n");
+ return -EACCES;
+ }
+
+ if (list_empty(&dev->app_info[i].app_sqlist) == 0) {
+ pr_debug("Message detected in slow queue\n");
+ return(POLLIN | POLLRDNORM | POLLPRI);
+ }
+
+ poll_wait(file, &dev->app_info[i].wait_dpram_msg, wait);
+ /* pr_debug("Polling for data from DSP\n"); */
return 0;
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_ioctl
-*
-* Parameters:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_ioctl
+ *
+ * Parameters:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
static long ft1000_ioctl(struct file *file, unsigned int command,
- unsigned long argument)
+ unsigned long argument)
{
- void __user *argp = (void __user *)argument;
+ void __user *argp = (void __user *)argument;
struct ft1000_info *info;
- struct ft1000_usb *ft1000dev;
- int result=0;
- int cmd;
- int i;
- u16 tempword;
- unsigned long flags;
- struct timeval tv;
+ struct ft1000_usb *ft1000dev;
+ int result = 0;
+ int cmd;
+ int i;
+ u16 tempword;
+ unsigned long flags;
+ struct timeval tv;
struct IOCTL_GET_VER get_ver_data;
struct IOCTL_GET_DSP_STAT get_stat_data;
- u8 ConnectionMsg[] = {0x00,0x44,0x10,0x20,0x80,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x93,0x64,
- 0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x0a,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x02,0x37,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x01,0x00,0x01,0x7f,0x00,
- 0x00,0x01,0x00,0x00};
+ u8 ConnectionMsg[] = {0x00, 0x44, 0x10, 0x20, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x93, 0x64,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x37, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x00, 0x00};
+
+ unsigned short ledStat = 0;
+ unsigned short conStat = 0;
+
+ if (ft1000_flarion_cnt == 0) {
+ pr_debug("called with ft1000_flarion_cnt of zero\n");
+ return -EBADF;
+ }
- unsigned short ledStat=0;
- unsigned short conStat=0;
+ /* pr_debug("command = 0x%x argument = 0x%8x\n", command, (u32)argument); */
- /* DEBUG("ft1000_ioctl called\n"); */
+ info = file->private_data;
+ ft1000dev = info->priv;
+ cmd = _IOC_NR(command);
+ /* pr_debug("cmd = 0x%x\n", cmd); */
+
+ /* process the command */
+ switch (cmd) {
+ case IOCTL_REGISTER_CMD:
+ pr_debug("IOCTL_FT1000_REGISTER called\n");
+ result = get_user(tempword, (__u16 __user *)argp);
+ if (result) {
+ pr_debug("result = %d failed to get_user\n", result);
+ break;
+ }
+ if (tempword == DSPBCMSGID) {
+ /* Search for matching file object */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ ft1000dev->app_info[i].DspBCMsgFlag = 1;
+ pr_debug("Registered for broadcast messages\n");
+ break;
+ }
+ }
+ }
+ break;
- if (ft1000_flarion_cnt == 0) {
- DEBUG("FT1000:ft1000_ioctl called when ft1000_flarion_cnt is zero\n");
- return (-EBADF);
- }
+ case IOCTL_GET_VER_CMD:
+ pr_debug("IOCTL_FT1000_GET_VER called\n");
- /* DEBUG("FT1000:ft1000_ioctl:command = 0x%x argument = 0x%8x\n", command, (u32)argument); */
+ get_ver_data.drv_ver = FT1000_DRV_VER;
- info = file->private_data;
- ft1000dev = info->priv;
- cmd = _IOC_NR(command);
- /* DEBUG("FT1000:ft1000_ioctl:cmd = 0x%x\n", cmd); */
-
- /* process the command */
- switch (cmd) {
- case IOCTL_REGISTER_CMD:
- DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_REGISTER called\n");
- result = get_user(tempword, (__u16 __user*)argp);
- if (result) {
- DEBUG("result = %d failed to get_user\n", result);
- break;
- }
- if (tempword == DSPBCMSGID) {
- /* Search for matching file object */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- ft1000dev->app_info[i].DspBCMsgFlag = 1;
- DEBUG("FT1000:ft1000_ioctl:Registered for broadcast messages\n");
- break;
- }
- }
- }
- break;
-
- case IOCTL_GET_VER_CMD:
- DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_VER called\n");
-
- get_ver_data.drv_ver = FT1000_DRV_VER;
-
- if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data))) {
- DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
- result = -EFAULT;
- break;
- }
-
- DEBUG("FT1000:ft1000_ioctl:driver version = 0x%x\n",(unsigned int)get_ver_data.drv_ver);
-
- break;
- case IOCTL_CONNECT:
- /* Connect Message */
- DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_CONNECT\n");
- ConnectionMsg[79] = 0xfc;
- result = card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
-
- break;
- case IOCTL_DISCONNECT:
- /* Disconnect Message */
- DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_DISCONNECT\n");
- ConnectionMsg[79] = 0xfd;
- result = card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
- break;
- case IOCTL_GET_DSP_STAT_CMD:
- /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DSP_STAT called\n"); */
- memset(&get_stat_data, 0, sizeof(get_stat_data));
- memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
- memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
- memcpy(get_stat_data.Sku, info->Sku, SKUSZ);
- memcpy(get_stat_data.eui64, info->eui64, EUISZ);
-
- if (info->ProgConStat != 0xFF) {
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
- get_stat_data.LedStat = ntohs(ledStat);
- DEBUG("FT1000:ft1000_ioctl: LedStat = 0x%x\n", get_stat_data.LedStat);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
- get_stat_data.ConStat = ntohs(conStat);
- DEBUG("FT1000:ft1000_ioctl: ConStat = 0x%x\n", get_stat_data.ConStat);
- } else {
- get_stat_data.ConStat = 0x0f;
- }
-
-
- get_stat_data.nTxPkts = info->stats.tx_packets;
- get_stat_data.nRxPkts = info->stats.rx_packets;
- get_stat_data.nTxBytes = info->stats.tx_bytes;
- get_stat_data.nRxBytes = info->stats.rx_bytes;
- do_gettimeofday(&tv);
- get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
- DEBUG("Connection Time = %d\n", (int)get_stat_data.ConTm);
- if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data))) {
- DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
- result = -EFAULT;
- break;
- }
- DEBUG("ft1000_chioctl: GET_DSP_STAT succeed\n");
- break;
- case IOCTL_SET_DPRAM_CMD:
- {
+ if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data))) {
+ pr_debug("copy fault occurred\n");
+ result = -EFAULT;
+ break;
+ }
+
+ pr_debug("driver version = 0x%x\n",
+ (unsigned int)get_ver_data.drv_ver);
+
+ break;
+ case IOCTL_CONNECT:
+ /* Connect Message */
+ pr_debug("IOCTL_FT1000_CONNECT\n");
+ ConnectionMsg[79] = 0xfc;
+ result = card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
+
+ break;
+ case IOCTL_DISCONNECT:
+ /* Disconnect Message */
+ pr_debug("IOCTL_FT1000_DISCONNECT\n");
+ ConnectionMsg[79] = 0xfd;
+ result = card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
+ break;
+ case IOCTL_GET_DSP_STAT_CMD:
+ /* pr_debug("IOCTL_FT1000_GET_DSP_STAT\n"); */
+ memset(&get_stat_data, 0, sizeof(get_stat_data));
+ memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
+ memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
+ memcpy(get_stat_data.Sku, info->Sku, SKUSZ);
+ memcpy(get_stat_data.eui64, info->eui64, EUISZ);
+
+ if (info->ProgConStat != 0xFF) {
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
+ get_stat_data.LedStat = ntohs(ledStat);
+ pr_debug("LedStat = 0x%x\n", get_stat_data.LedStat);
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
+ get_stat_data.ConStat = ntohs(conStat);
+ pr_debug("ConStat = 0x%x\n", get_stat_data.ConStat);
+ } else {
+ get_stat_data.ConStat = 0x0f;
+ }
+
+
+ get_stat_data.nTxPkts = info->stats.tx_packets;
+ get_stat_data.nRxPkts = info->stats.rx_packets;
+ get_stat_data.nTxBytes = info->stats.tx_bytes;
+ get_stat_data.nRxBytes = info->stats.rx_bytes;
+ do_gettimeofday(&tv);
+ get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
+ pr_debug("Connection Time = %d\n", (int)get_stat_data.ConTm);
+ if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data))) {
+ pr_debug("copy fault occurred\n");
+ result = -EFAULT;
+ break;
+ }
+ pr_debug("GET_DSP_STAT succeed\n");
+ break;
+ case IOCTL_SET_DPRAM_CMD:
+ {
struct IOCTL_DPRAM_BLK *dpram_data = NULL;
/* struct IOCTL_DPRAM_COMMAND dpram_command; */
- u16 qtype;
- u16 msgsz;
+ u16 qtype;
+ u16 msgsz;
struct pseudo_hdr *ppseudo_hdr;
- u16 *pmsg;
- u16 total_len;
- u16 app_index;
- u16 status;
+ u16 *pmsg;
+ u16 total_len;
+ u16 app_index;
+ u16 status;
- /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_SET_DPRAM called\n");*/
+ /* pr_debug("IOCTL_FT1000_SET_DPRAM called\n");*/
- if (ft1000_flarion_cnt == 0) {
- return (-EBADF);
- }
+ if (ft1000_flarion_cnt == 0)
+ return -EBADF;
- if (ft1000dev->DrvMsgPend) {
- return (-ENOTTY);
- }
+ if (ft1000dev->DrvMsgPend)
+ return -ENOTTY;
- if (ft1000dev->fProvComplete == 0) {
- return (-EACCES);
- }
+ if (ft1000dev->fProvComplete == 0)
+ return -EACCES;
- ft1000dev->fAppMsgPend = 1;
+ ft1000dev->fAppMsgPend = 1;
- if (info->CardReady) {
+ if (info->CardReady) {
- /* DEBUG("FT1000:ft1000_ioctl: try to SET_DPRAM \n"); */
+ /* pr_debug("try to SET_DPRAM\n"); */
- /* Get the length field to see how many bytes to copy */
- result = get_user(msgsz, (__u16 __user *)argp);
- if (result)
- break;
- msgsz = ntohs(msgsz);
- /* DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz); */
-
- if (msgsz > MAX_CMD_SQSIZE) {
- DEBUG("FT1000:ft1000_ioctl: bad message length = %d\n", msgsz);
- result = -EINVAL;
- break;
- }
-
- result = -ENOMEM;
- dpram_data = kmalloc(msgsz + 2, GFP_KERNEL);
- if (!dpram_data)
- break;
+ /* Get the length field to see how many bytes to copy */
+ result = get_user(msgsz, (__u16 __user *)argp);
+ if (result)
+ break;
+ msgsz = ntohs(msgsz);
+ /* pr_debug("length of message = %d\n", msgsz); */
+
+ if (msgsz > MAX_CMD_SQSIZE) {
+ pr_debug("bad message length = %d\n", msgsz);
+ result = -EINVAL;
+ break;
+ }
- if (copy_from_user(dpram_data, argp, msgsz+2)) {
- DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
- result = -EFAULT;
- } else {
- /* Check if this message came from a registered application */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- break;
- }
- }
- if (i==MAX_NUM_APP) {
- DEBUG("FT1000:No matching application fileobject\n");
- result = -EINVAL;
- kfree(dpram_data);
- break;
- }
- app_index = i;
-
- /* Check message qtype type which is the lower byte within qos_class */
- qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
- /* DEBUG("FT1000_ft1000_ioctl: qtype = %d\n", qtype); */
- if (qtype) {
- } else {
- /* Put message into Slow Queue */
- /* Only put a message into the DPRAM if msg doorbell is available */
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- /* DEBUG("FT1000_ft1000_ioctl: READ REGISTER tempword=%x\n", tempword); */
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 2ms and try again due to DSP doorbell busy */
- mdelay(2);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 1ms and try again due to DSP doorbell busy */
- mdelay(1);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- /* Suspend for 3ms and try again due to DSP doorbell busy */
- mdelay(3);
- status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- if (tempword & FT1000_DB_DPRAM_TX) {
- DEBUG("FT1000:ft1000_ioctl:Doorbell not available\n");
- result = -ENOTTY;
- kfree(dpram_data);
- break;
- }
- }
- }
- }
- }
-
- /*DEBUG("FT1000_ft1000_ioctl: finished reading register\n"); */
-
- /* Make sure we are within the limits of the slow queue memory limitation */
- if ((msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ)) {
- /* Need to put sequence number plus new checksum for message */
- pmsg = (u16 *)&dpram_data->pseudohdr;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- total_len = msgsz+2;
- if (total_len & 0x1) {
- total_len++;
- }
-
- /* Insert slow queue sequence number */
- ppseudo_hdr->seq_num = info->squeseqnum++;
- ppseudo_hdr->portsrc = ft1000dev->app_info[app_index].app_id;
- /* Calculate new checksum */
- ppseudo_hdr->checksum = *pmsg++;
- /* DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum); */
- for (i=1; i<7; i++) {
- ppseudo_hdr->checksum ^= *pmsg++;
- /* DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum); */
- }
- pmsg++;
- ppseudo_hdr = (struct pseudo_hdr *)pmsg;
- result = card_send_command(ft1000dev,(unsigned short*)dpram_data,total_len+2);
-
-
- ft1000dev->app_info[app_index].nTxMsg++;
- } else {
- result = -EINVAL;
- }
- }
- }
- } else {
- DEBUG("FT1000:ft1000_ioctl: Card not ready take messages\n");
- result = -EACCES;
- }
- kfree(dpram_data);
-
- }
- break;
- case IOCTL_GET_DPRAM_CMD:
- {
+ result = -ENOMEM;
+ dpram_data = kmalloc(msgsz + 2, GFP_KERNEL);
+ if (!dpram_data)
+ break;
+
+ if (copy_from_user(dpram_data, argp, msgsz+2)) {
+ pr_debug("copy fault occurred\n");
+ result = -EFAULT;
+ } else {
+ /* Check if this message came from a registered application */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ break;
+ }
+ }
+ if (i == MAX_NUM_APP) {
+ pr_debug("No matching application fileobject\n");
+ result = -EINVAL;
+ kfree(dpram_data);
+ break;
+ }
+ app_index = i;
+
+ /* Check message qtype type which is the lower byte within qos_class */
+ qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
+ /* pr_debug("qtype = %d\n", qtype); */
+ if (qtype) {
+ } else {
+ /* Put message into Slow Queue */
+ /* Only put a message into the DPRAM if msg doorbell is available */
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ /* pr_debug("READ REGISTER tempword=%x\n", tempword); */
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ /* Suspend for 2ms and try again due to DSP doorbell busy */
+ mdelay(2);
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ /* Suspend for 1ms and try again due to DSP doorbell busy */
+ mdelay(1);
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ /* Suspend for 3ms and try again due to DSP doorbell busy */
+ mdelay(3);
+ status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+ if (tempword & FT1000_DB_DPRAM_TX) {
+ pr_debug("Doorbell not available\n");
+ result = -ENOTTY;
+ kfree(dpram_data);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /*pr_debug("finished reading register\n"); */
+
+ /* Make sure we are within the limits of the slow queue memory limitation */
+ if ((msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ)) {
+ /* Need to put sequence number plus new checksum for message */
+ pmsg = (u16 *)&dpram_data->pseudohdr;
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
+ total_len = msgsz+2;
+ if (total_len & 0x1) {
+ total_len++;
+ }
+
+ /* Insert slow queue sequence number */
+ ppseudo_hdr->seq_num = info->squeseqnum++;
+ ppseudo_hdr->portsrc = ft1000dev->app_info[app_index].app_id;
+ /* Calculate new checksum */
+ ppseudo_hdr->checksum = *pmsg++;
+ /* pr_debug("checksum = 0x%x\n", ppseudo_hdr->checksum); */
+ for (i = 1; i < 7; i++) {
+ ppseudo_hdr->checksum ^= *pmsg++;
+ /* pr_debug("checksum = 0x%x\n", ppseudo_hdr->checksum); */
+ }
+ pmsg++;
+ ppseudo_hdr = (struct pseudo_hdr *)pmsg;
+ result = card_send_command(ft1000dev, (unsigned short *)dpram_data, total_len+2);
+
+
+ ft1000dev->app_info[app_index].nTxMsg++;
+ } else {
+ result = -EINVAL;
+ }
+ }
+ }
+ } else {
+ pr_debug("Card not ready take messages\n");
+ result = -EACCES;
+ }
+ kfree(dpram_data);
+
+ }
+ break;
+ case IOCTL_GET_DPRAM_CMD:
+ {
struct dpram_blk *pdpram_blk;
struct IOCTL_DPRAM_BLK __user *pioctl_dpram;
- int msglen;
-
- /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM called\n"); */
-
- if (ft1000_flarion_cnt == 0) {
- return (-EBADF);
- }
-
- /* Search for matching file object */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- /*DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
- break;
- }
- }
-
- /* Could not find application info block */
- if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
- result = -EBADF;
- break;
- }
-
- result = 0;
- pioctl_dpram = argp;
- if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
- /* DEBUG("FT1000:ft1000_ioctl:Message detected in slow queue\n"); */
- spin_lock_irqsave(&free_buff_lock, flags);
- pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- ft1000dev->app_info[i].NumOfMsg--;
- /* DEBUG("FT1000:ft1000_ioctl:NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg); */
- spin_unlock_irqrestore(&free_buff_lock, flags);
- msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
- result = get_user(msglen, &pioctl_dpram->total_len);
- if (result)
- break;
- msglen = htons(msglen);
- /* DEBUG("FT1000:ft1000_ioctl:msg length = %x\n", msglen); */
- if (copy_to_user (&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen)) {
- DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
- result = -EFAULT;
+ int msglen;
+
+ /* pr_debug("IOCTL_FT1000_GET_DPRAM called\n"); */
+
+ if (ft1000_flarion_cnt == 0)
+ return -EBADF;
+
+ /* Search for matching file object */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ /*pr_debug("Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
+ break;
+ }
+ }
+
+ /* Could not find application info block */
+ if (i == MAX_NUM_APP) {
+ pr_debug("Could not find application info block\n");
+ result = -EBADF;
break;
- }
+ }
+
+ result = 0;
+ pioctl_dpram = argp;
+ if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
+ /* pr_debug("Message detected in slow queue\n"); */
+ spin_lock_irqsave(&free_buff_lock, flags);
+ pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ list_del(&pdpram_blk->list);
+ ft1000dev->app_info[i].NumOfMsg--;
+ /* pr_debug("NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg); */
+ spin_unlock_irqrestore(&free_buff_lock, flags);
+ msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
+ result = get_user(msglen, &pioctl_dpram->total_len);
+ if (result)
+ break;
+ msglen = htons(msglen);
+ /* pr_debug("msg length = %x\n", msglen); */
+ if (copy_to_user(&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen)) {
+ pr_debug("copy fault occurred\n");
+ result = -EFAULT;
+ break;
+ }
+
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+ result = msglen;
+ }
+ /* pr_debug("IOCTL_FT1000_GET_DPRAM no message\n"); */
+ }
+ break;
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- result = msglen;
- }
- /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM no message\n"); */
- }
- break;
-
- default:
- DEBUG("FT1000:ft1000_ioctl:unknown command: 0x%x\n", command);
- result = -ENOTTY;
- break;
- }
- ft1000dev->fAppMsgPend = 0;
- return result;
+ default:
+ pr_debug("unknown command: 0x%x\n", command);
+ result = -ENOTTY;
+ break;
+ }
+ ft1000dev->fAppMsgPend = 0;
+ return result;
}
/*
-*---------------------------------------------------------------------------
-* Function: ft1000_release
-*
-* Parameters:
-*
-* Description:
-*
-* Notes:
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * Function: ft1000_release
+ *
+ * Parameters:
+ *
+ * Description:
+ *
+ * Notes:
+ *
+ *---------------------------------------------------------------------------
+ */
static int ft1000_release(struct inode *inode, struct file *file)
{
struct ft1000_info *info;
- struct net_device *dev;
- struct ft1000_usb *ft1000dev;
- int i;
+ struct net_device *dev;
+ struct ft1000_usb *ft1000dev;
+ int i;
struct dpram_blk *pdpram_blk;
- DEBUG("ft1000_release called\n");
-
- dev = file->private_data;
+ dev = file->private_data;
info = netdev_priv(dev);
ft1000dev = info->priv;
- if (ft1000_flarion_cnt == 0) {
- ft1000dev->appcnt--;
- return (-EBADF);
- }
-
- /* Search for matching file object */
- for (i=0; i<MAX_NUM_APP; i++) {
- if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- /* DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
- break;
- }
- }
-
- if (i==MAX_NUM_APP)
- return 0;
-
- while (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
- DEBUG("Remove and free memory queue up on slow queue\n");
- pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
- list_del(&pdpram_blk->list);
- ft1000_free_buffer(pdpram_blk, &freercvpool);
- }
-
- /* initialize application information */
- ft1000dev->appcnt--;
- DEBUG("ft1000_chdev:%s:appcnt = %d\n", __func__, ft1000dev->appcnt);
- ft1000dev->app_info[i].fileobject = NULL;
-
- return 0;
+ if (ft1000_flarion_cnt == 0) {
+ ft1000dev->appcnt--;
+ return -EBADF;
+ }
+
+ /* Search for matching file object */
+ for (i = 0; i < MAX_NUM_APP; i++) {
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ /* pr_debug("Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
+ break;
+ }
+ }
+
+ if (i == MAX_NUM_APP)
+ return 0;
+
+ while (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
+ pr_debug("Remove and free memory queue up on slow queue\n");
+ pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+ list_del(&pdpram_blk->list);
+ ft1000_free_buffer(pdpram_blk, &freercvpool);
+ }
+
+ /* initialize application information */
+ ft1000dev->appcnt--;
+ pr_debug("appcnt = %d\n", ft1000dev->appcnt);
+ ft1000dev->app_info[i].fileobject = NULL;
+
+ return 0;
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 37707da09e9c..e8126325877b 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -1,8 +1,10 @@
/*
-* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
-*
-* This file is part of Express Card USB Driver
-*/
+ * CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
+ *
+ * This file is part of Express Card USB Driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -117,17 +119,16 @@ static int check_usb_db(struct ft1000_usb *ft1000dev)
while (loopcnt < 10) {
status = ft1000_read_register(ft1000dev, &temp,
- FT1000_REG_DOORBELL);
- DEBUG("check_usb_db: read FT1000_REG_DOORBELL value is %x\n",
- temp);
+ FT1000_REG_DOORBELL);
+ pr_debug("read FT1000_REG_DOORBELL value is %x\n", temp);
if (temp & 0x0080) {
- DEBUG("FT1000:Got checkusb doorbell\n");
+ pr_debug("Got checkusb doorbell\n");
status = ft1000_write_register(ft1000dev, 0x0080,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
status = ft1000_write_register(ft1000dev, 0x0100,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
status = ft1000_write_register(ft1000dev, 0x8000,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
break;
}
loopcnt++;
@@ -138,13 +139,13 @@ static int check_usb_db(struct ft1000_usb *ft1000dev)
loopcnt = 0;
while (loopcnt < 20) {
status = ft1000_read_register(ft1000dev, &temp,
- FT1000_REG_DOORBELL);
- DEBUG("FT1000:check_usb_db:Doorbell = 0x%x\n", temp);
+ FT1000_REG_DOORBELL);
+ pr_debug("Doorbell = 0x%x\n", temp);
if (temp & 0x8000) {
loopcnt++;
msleep(10);
} else {
- DEBUG("check_usb_db: door bell is cleared, return 0\n");
+ pr_debug("door bell is cleared, return 0\n");
return 0;
}
}
@@ -164,23 +165,22 @@ static u16 get_handshake(struct ft1000_usb *ft1000dev, u16 expected_value)
while (loopcnt < 100) {
/* Need to clear downloader doorbell if Hartley ASIC */
status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_RX,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (ft1000dev->fcodeldr) {
- DEBUG(" get_handshake: fcodeldr is %d\n",
- ft1000dev->fcodeldr);
+ pr_debug("fcodeldr is %d\n", ft1000dev->fcodeldr);
ft1000dev->fcodeldr = 0;
status = check_usb_db(ft1000dev);
if (status != 0) {
- DEBUG("get_handshake: check_usb_db failed\n");
+ pr_debug("check_usb_db failed\n");
break;
}
status = ft1000_write_register(ft1000dev,
- FT1000_DB_DNLD_RX,
- FT1000_REG_DOORBELL);
+ FT1000_DB_DNLD_RX,
+ FT1000_REG_DOORBELL);
}
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
+ DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
handshake = ntohs(handshake);
if (status)
@@ -209,12 +209,12 @@ static void put_handshake(struct ft1000_usb *ft1000dev, u16 handshake_value)
tempword = (u16)(tempx & 0xffff);
status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC,
- tempword, 0);
+ tempword, 0);
tempword = (u16)(tempx >> 16);
status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC,
- tempword, 1);
+ tempword, 1);
status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
}
static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
@@ -230,27 +230,27 @@ static u16 get_handshake_usb(struct ft1000_usb *ft1000dev, u16 expected_value)
while (loopcnt < 100) {
if (ft1000dev->usbboot == 2) {
status = ft1000_read_dpram32(ft1000dev, 0,
- (u8 *)&(ft1000dev->tempbuf[0]), 64);
+ (u8 *)&(ft1000dev->tempbuf[0]), 64);
for (temp = 0; temp < 16; temp++) {
- DEBUG("tempbuf %d = 0x%x\n", temp,
- ft1000dev->tempbuf[temp]);
+ pr_debug("tempbuf %d = 0x%x\n",
+ temp, ft1000dev->tempbuf[temp]);
}
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC,
- (u8 *)&handshake, 1);
- DEBUG("handshake from read_dpram16 = 0x%x\n",
- handshake);
+ DWNLD_MAG1_HANDSHAKE_LOC,
+ (u8 *)&handshake, 1);
+ pr_debug("handshake from read_dpram16 = 0x%x\n",
+ handshake);
if (ft1000dev->dspalive == ft1000dev->tempbuf[6]) {
handshake = 0;
} else {
handshake = ft1000dev->tempbuf[1];
ft1000dev->dspalive =
- ft1000dev->tempbuf[6];
+ ft1000dev->tempbuf[6];
}
} else {
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_HANDSHAKE_LOC,
- (u8 *)&handshake, 1);
+ DWNLD_MAG1_HANDSHAKE_LOC,
+ (u8 *)&handshake, 1);
}
loopcnt++;
@@ -281,12 +281,12 @@ static u16 get_request_type(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
+ DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
tempx = ntohl(tempx);
} else {
tempx = 0;
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
+ DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
tempx |= (tempword << 16);
tempx = ntohl(tempx);
}
@@ -304,7 +304,7 @@ static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
+ DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
tempx = ntohl(tempx);
} else {
if (ft1000dev->usbboot == 2) {
@@ -313,8 +313,8 @@ static u16 get_request_type_usb(struct ft1000_usb *ft1000dev)
} else {
tempx = 0;
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_TYPE_LOC,
- (u8 *)&tempword, 1);
+ DWNLD_MAG1_TYPE_LOC,
+ (u8 *)&tempword, 1);
}
tempx |= (tempword << 16);
tempx = ntohl(tempx);
@@ -332,14 +332,14 @@ static long get_request_value(struct ft1000_usb *ft1000dev)
if (ft1000dev->bootmode == 1) {
status = fix_ft1000_read_dpram32(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
+ DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
value = ntohl(value);
} else {
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0);
+ DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0);
value = tempword;
status = ft1000_read_dpram16(ft1000dev,
- DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
+ DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
value |= (tempword << 16);
value = ntohl(value);
}
@@ -369,7 +369,7 @@ static u16 hdr_checksum(struct pseudo_hdr *pHdr)
chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
- usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
+ usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
return chksum;
}
@@ -387,7 +387,7 @@ static int check_buffers(u16 *buff_w, u16 *buff_r, int len, int offset)
}
static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
- u16 tempbuffer[], u16 dpram)
+ u16 tempbuffer[], u16 dpram)
{
int status;
u16 resultbuffer[64];
@@ -395,38 +395,38 @@ static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
for (i = 0; i < 10; i++) {
status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 64);
+ (u8 *)&tempbuffer[0], 64);
if (status == 0) {
/* Work around for ASIC bit stuffing problem. */
if ((tempbuffer[31] & 0xfe00) == 0xfe00) {
status = ft1000_write_dpram32(ft1000dev,
- dpram+12, (u8 *)&tempbuffer[24],
- 64);
+ dpram+12, (u8 *)&tempbuffer[24],
+ 64);
}
/* Let's check the data written */
status = ft1000_read_dpram32(ft1000dev, dpram,
- (u8 *)&resultbuffer[0], 64);
+ (u8 *)&resultbuffer[0], 64);
if ((tempbuffer[31] & 0xfe00) == 0xfe00) {
if (check_buffers(tempbuffer, resultbuffer, 28,
- 0)) {
- DEBUG("FT1000:download:DPRAM write failed 1 during bootloading\n");
+ 0)) {
+ pr_debug("DPRAM write failed 1 during bootloading\n");
usleep_range(9000, 11000);
break;
}
status = ft1000_read_dpram32(ft1000dev,
- dpram+12,
- (u8 *)&resultbuffer[0], 64);
+ dpram+12,
+ (u8 *)&resultbuffer[0], 64);
if (check_buffers(tempbuffer, resultbuffer, 16,
- 24)) {
- DEBUG("FT1000:download:DPRAM write failed 2 during bootloading\n");
+ 24)) {
+ pr_debug("DPRAM write failed 2 during bootloading\n");
usleep_range(9000, 11000);
break;
}
} else {
if (check_buffers(tempbuffer, resultbuffer, 32,
- 0)) {
- DEBUG("FT1000:download:DPRAM write failed 3 during bootloading\n");
+ 0)) {
+ pr_debug("DPRAM write failed 3 during bootloading\n");
usleep_range(9000, 11000);
break;
}
@@ -445,7 +445,7 @@ static int write_dpram32_and_check(struct ft1000_usb *ft1000dev,
* long word_length - length of the buffer to be written to DPRAM
*/
static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
- long word_length)
+ long word_length)
{
int status = 0;
u16 dpram;
@@ -453,7 +453,7 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
u16 tempword;
u16 tempbuffer[64];
- /*DEBUG("FT1000:download:start word_length = %d\n",(int)word_length); */
+ /*pr_debug("start word_length = %d\n",(int)word_length); */
dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
tempword = *(*pUsFile);
(*pUsFile)++;
@@ -483,21 +483,22 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
}
}
- /*DEBUG("write_blk: loopcnt is %d\n", loopcnt); */
- /*DEBUG("write_blk: bootmode = %d\n", bootmode); */
- /*DEBUG("write_blk: dpram = %x\n", dpram); */
+ /*pr_debug("loopcnt is %d\n", loopcnt); */
+ /*pr_debug("write_blk: bootmode = %d\n", bootmode); */
+ /*pr_debug("write_blk: dpram = %x\n", dpram); */
if (ft1000dev->bootmode == 0) {
if (dpram >= 0x3F4)
status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 8);
+ (u8 *)&tempbuffer[0], 8);
else
status = ft1000_write_dpram32(ft1000dev, dpram,
- (u8 *)&tempbuffer[0], 64);
+ (u8 *)&tempbuffer[0], 64);
} else {
status = write_dpram32_and_check(ft1000dev, tempbuffer,
- dpram);
+ dpram);
if (status != 0) {
- DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]);
+ pr_debug("Write failed tempbuffer[31] = 0x%x\n",
+ tempbuffer[31]);
break;
}
}
@@ -508,7 +509,7 @@ static int write_blk(struct ft1000_usb *ft1000dev, u16 **pUsFile, u8 **pUcFile,
static void usb_dnld_complete(struct urb *urb)
{
- /* DEBUG("****** usb_dnld_complete\n"); */
+ /* pr_debug("****** usb_dnld_complete\n"); */
}
/* writes a block of DSP image to DPRAM
@@ -548,22 +549,21 @@ static int write_blk_fifo(struct ft1000_usb *ft1000dev, u16 **pUsFile,
}
static int scram_start_dwnld(struct ft1000_usb *ft1000dev, u16 *hshake,
- u32 *state)
+ u32 *state)
{
int status = 0;
- DEBUG("FT1000:STATE_START_DWNLD\n");
if (ft1000dev->usbboot)
*hshake = get_handshake_usb(ft1000dev, HANDSHAKE_DSP_BL_READY);
else
*hshake = get_handshake(ft1000dev, HANDSHAKE_DSP_BL_READY);
if (*hshake == HANDSHAKE_DSP_BL_READY) {
- DEBUG("scram_dnldr: handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n");
+ pr_debug("handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n");
put_handshake(ft1000dev, HANDSHAKE_DRIVER_READY);
} else if (*hshake == HANDSHAKE_TIMEOUT_VALUE) {
status = -ETIMEDOUT;
} else {
- DEBUG("FT1000:download:Download error: Handshake failed\n");
+ pr_debug("Download error: Handshake failed\n");
status = -ENETRESET;
}
*state = STATE_BOOT_DWNLD;
@@ -571,22 +571,22 @@ static int scram_start_dwnld(struct ft1000_usb *ft1000dev, u16 *hshake,
}
static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
- u8 **c_file, const u8 *endpoint, bool boot_case)
+ u8 **c_file, const u8 *endpoint, bool boot_case)
{
long word_length;
int status = 0;
- /*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/
word_length = get_request_value(ft1000dev);
- /*DEBUG("FT1000:word_length = 0x%x\n", (int)word_length); */
+ /*pr_debug("word_length = 0x%x\n", (int)word_length); */
/*NdisMSleep (100); */
if (word_length > MAX_LENGTH) {
- DEBUG("FT1000:download:Download error: Max length exceeded\n");
+ pr_debug("Download error: Max length exceeded\n");
return -1;
}
if ((word_length * 2 + (long)c_file) > (long)endpoint) {
/* Error, beyond boot code range.*/
- DEBUG("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n", (int)word_length);
+ pr_debug("Download error: Requested len=%d exceeds BOOT code boundary\n",
+ (int)word_length);
return -1;
}
if (word_length & 0x1)
@@ -595,14 +595,14 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file,
if (boot_case) {
status = write_blk(ft1000dev, s_file, c_file, word_length);
- /*DEBUG("write_blk returned %d\n", status); */
+ /*pr_debug("write_blk returned %d\n", status); */
} else {
status = write_blk_fifo(ft1000dev, s_file, c_file, word_length);
if (ft1000dev->usbboot == 0)
ft1000dev->usbboot++;
if (ft1000dev->usbboot == 1)
status |= ft1000_write_dpram16(ft1000dev,
- DWNLD_MAG1_PS_HDR_LOC, 0, 0);
+ DWNLD_MAG1_PS_HDR_LOC, 0, 0);
}
return status;
}
@@ -641,8 +641,6 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
struct prov_record *pprov_record;
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
- DEBUG("Entered scram_dnldr...\n");
-
ft1000dev->fcodeldr = 0;
ft1000dev->usbboot = 0;
ft1000dev->dspalive = 0xffff;
@@ -653,7 +651,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
state = STATE_START_DWNLD;
- file_hdr = (struct dsp_file_hdr *)pFileStart;
+ file_hdr = pFileStart;
ft1000_write_register(ft1000dev, 0x800, FT1000_REG_MAG_WATERMARK);
@@ -674,7 +672,7 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
break;
case STATE_BOOT_DWNLD:
- DEBUG("FT1000:STATE_BOOT_DWNLD\n");
+ pr_debug("STATE_BOOT_DWNLD\n");
ft1000dev->bootmode = 1;
handshake = get_handshake(ft1000dev, HANDSHAKE_REQUEST);
if (handshake == HANDSHAKE_REQUEST) {
@@ -684,35 +682,34 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
request = get_request_type(ft1000dev);
switch (request) {
case REQUEST_RUN_ADDRESS:
- DEBUG("FT1000:REQUEST_RUN_ADDRESS\n");
+ pr_debug("REQUEST_RUN_ADDRESS\n");
put_request_value(ft1000dev,
loader_code_address);
break;
case REQUEST_CODE_LENGTH:
- DEBUG("FT1000:REQUEST_CODE_LENGTH\n");
+ pr_debug("REQUEST_CODE_LENGTH\n");
put_request_value(ft1000dev,
loader_code_size);
break;
case REQUEST_DONE_BL:
- DEBUG("FT1000:REQUEST_DONE_BL\n");
+ pr_debug("REQUEST_DONE_BL\n");
/* Reposition ptrs to beginning of code section */
s_file = (u16 *) (boot_end);
c_file = (u8 *) (boot_end);
- /* DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file); */
- /* DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file); */
+ /* pr_debug("download:s_file = 0x%8x\n", (int)s_file); */
+ /* pr_debug("FT1000:download:c_file = 0x%8x\n", (int)c_file); */
state = STATE_CODE_DWNLD;
ft1000dev->fcodeldr = 1;
break;
case REQUEST_CODE_SEGMENT:
status = request_code_segment(ft1000dev,
- &s_file, &c_file,
- (const u8 *)boot_end,
- true);
- break;
+ &s_file, &c_file,
+ (const u8 *)boot_end,
+ true);
+ break;
default:
- DEBUG
- ("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n",
- request);
+ pr_debug("Download error: Bad request type=%d in BOOT download state\n",
+ request);
status = -1;
break;
}
@@ -723,68 +720,60 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
put_handshake(ft1000dev,
HANDSHAKE_RESPONSE);
} else {
- DEBUG
- ("FT1000:download:Download error: Handshake failed\n");
+ pr_debug("Download error: Handshake failed\n");
status = -1;
}
break;
case STATE_CODE_DWNLD:
- /* DEBUG("FT1000:STATE_CODE_DWNLD\n"); */
+ /* pr_debug("STATE_CODE_DWNLD\n"); */
ft1000dev->bootmode = 0;
if (ft1000dev->usbboot)
handshake =
- get_handshake_usb(ft1000dev,
- HANDSHAKE_REQUEST);
+ get_handshake_usb(ft1000dev,
+ HANDSHAKE_REQUEST);
else
handshake =
- get_handshake(ft1000dev, HANDSHAKE_REQUEST);
+ get_handshake(ft1000dev, HANDSHAKE_REQUEST);
if (handshake == HANDSHAKE_REQUEST) {
/*
* Get type associated with the request.
*/
if (ft1000dev->usbboot)
request =
- get_request_type_usb(ft1000dev);
+ get_request_type_usb(ft1000dev);
else
request = get_request_type(ft1000dev);
switch (request) {
case REQUEST_FILE_CHECKSUM:
- DEBUG
- ("FT1000:download:image_chksum = 0x%8x\n",
- image_chksum);
+ pr_debug("image_chksum = 0x%8x\n",
+ image_chksum);
put_request_value(ft1000dev,
image_chksum);
break;
case REQUEST_RUN_ADDRESS:
- DEBUG
- ("FT1000:download: REQUEST_RUN_ADDRESS\n");
+ pr_debug("REQUEST_RUN_ADDRESS\n");
if (correct_version) {
- DEBUG
- ("FT1000:download:run_address = 0x%8x\n",
- (int)run_address);
+ pr_debug("run_address = 0x%8x\n",
+ (int)run_address);
put_request_value(ft1000dev,
run_address);
} else {
- DEBUG
- ("FT1000:download:Download error: Got Run address request before image offset request.\n");
+ pr_debug("Download error: Got Run address request before image offset request\n");
status = -1;
break;
}
break;
case REQUEST_CODE_LENGTH:
- DEBUG
- ("FT1000:download:REQUEST_CODE_LENGTH\n");
+ pr_debug("REQUEST_CODE_LENGTH\n");
if (correct_version) {
- DEBUG
- ("FT1000:download:run_size = 0x%8x\n",
- (int)run_size);
+ pr_debug("run_size = 0x%8x\n",
+ (int)run_size);
put_request_value(ft1000dev,
run_size);
} else {
- DEBUG
- ("FT1000:download:Download error: Got Size request before image offset request.\n");
+ pr_debug("Download error: Got Size request before image offset request\n");
status = -1;
break;
}
@@ -793,69 +782,66 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
ft1000dev->usbboot = 3;
/* Reposition ptrs to beginning of provisioning section */
s_file =
- (u16 *) (pFileStart +
- file_hdr->commands_offset);
+ (u16 *) (pFileStart +
+ file_hdr->commands_offset);
c_file =
- (u8 *) (pFileStart +
- file_hdr->commands_offset);
+ (u8 *) (pFileStart +
+ file_hdr->commands_offset);
state = STATE_DONE_DWNLD;
break;
case REQUEST_CODE_SEGMENT:
- /* DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n"); */
+ /* pr_debug("REQUEST_CODE_SEGMENT - CODELOADER\n"); */
if (!correct_version) {
- DEBUG
- ("FT1000:download:Download error: Got Code Segment request before image offset request.\n");
+ pr_debug("Download error: Got Code Segment request before image offset request\n");
status = -1;
break;
}
status = request_code_segment(ft1000dev,
- &s_file, &c_file,
- (const u8 *)code_end,
- false);
+ &s_file, &c_file,
+ (const u8 *)code_end,
+ false);
break;
case REQUEST_MAILBOX_DATA:
- DEBUG
- ("FT1000:download: REQUEST_MAILBOX_DATA\n");
+ pr_debug("REQUEST_MAILBOX_DATA\n");
/* Convert length from byte count to word count. Make sure we round up. */
word_length =
- (long)(pft1000info->DSPInfoBlklen +
- 1) / 2;
+ (long)(pft1000info->DSPInfoBlklen +
+ 1) / 2;
put_request_value(ft1000dev,
word_length);
mailbox_data =
- (struct drv_msg *)&(pft1000info->
- DSPInfoBlk[0]);
+ (struct drv_msg *)&(pft1000info->
+ DSPInfoBlk[0]);
/*
* Position ASIC DPRAM auto-increment pointer.
*/
- data = (u16 *) &mailbox_data->data[0];
- dpram = (u16) DWNLD_MAG1_PS_HDR_LOC;
+ data = (u16 *)&mailbox_data->data[0];
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
- word_length = (word_length / 2);
+ word_length = word_length / 2;
for (; word_length > 0; word_length--) { /* In words */
templong = *data++;
templong |= (*data++ << 16);
status =
- fix_ft1000_write_dpram32
- (ft1000dev, dpram++,
- (u8 *) &templong);
+ fix_ft1000_write_dpram32
+ (ft1000dev, dpram++,
+ (u8 *)&templong);
}
break;
case REQUEST_VERSION_INFO:
- DEBUG
- ("FT1000:download:REQUEST_VERSION_INFO\n");
+ pr_debug("REQUEST_VERSION_INFO\n");
word_length =
- file_hdr->version_data_size;
+ file_hdr->version_data_size;
put_request_value(ft1000dev,
word_length);
/*
@@ -863,15 +849,15 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
*/
s_file =
- (u16 *) (pFileStart +
- file_hdr->
- version_data_offset);
+ (u16 *) (pFileStart +
+ file_hdr->
+ version_data_offset);
- dpram = (u16) DWNLD_MAG1_PS_HDR_LOC;
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
- word_length = (word_length / 2);
+ word_length = word_length / 2;
for (; word_length > 0; word_length--) { /* In words */
@@ -879,26 +865,25 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
temp = ntohs(*s_file++);
templong |= (temp << 16);
status =
- fix_ft1000_write_dpram32
- (ft1000dev, dpram++,
- (u8 *) &templong);
+ fix_ft1000_write_dpram32
+ (ft1000dev, dpram++,
+ (u8 *)&templong);
}
break;
case REQUEST_CODE_BY_VERSION:
- DEBUG
- ("FT1000:download:REQUEST_CODE_BY_VERSION\n");
+ pr_debug("REQUEST_CODE_BY_VERSION\n");
correct_version = false;
requested_version =
- get_request_value(ft1000dev);
+ get_request_value(ft1000dev);
dsp_img_info =
- (struct dsp_image_info *)(pFileStart
- +
- sizeof
- (struct
- dsp_file_hdr));
+ (struct dsp_image_info *)(pFileStart
+ +
+ sizeof
+ (struct
+ dsp_file_hdr));
for (image = 0;
image < file_hdr->nDspImages;
@@ -907,30 +892,29 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
if (dsp_img_info->version ==
requested_version) {
correct_version = true;
- DEBUG
- ("FT1000:download: correct_version is TRUE\n");
+ pr_debug("correct_version is TRUE\n");
s_file =
- (u16 *) (pFileStart
- +
- dsp_img_info->
- begin_offset);
+ (u16 *) (pFileStart
+ +
+ dsp_img_info->
+ begin_offset);
c_file =
- (u8 *) (pFileStart +
- dsp_img_info->
- begin_offset);
+ (u8 *) (pFileStart +
+ dsp_img_info->
+ begin_offset);
code_end =
- (u8 *) (pFileStart +
- dsp_img_info->
- end_offset);
+ (u8 *) (pFileStart +
+ dsp_img_info->
+ end_offset);
run_address =
- dsp_img_info->
- run_address;
+ dsp_img_info->
+ run_address;
run_size =
- dsp_img_info->
- image_size;
+ dsp_img_info->
+ image_size;
image_chksum =
- (u32) dsp_img_info->
- checksum;
+ (u32)dsp_img_info->
+ checksum;
break;
}
dsp_img_info++;
@@ -941,18 +925,16 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
/*
* Error, beyond boot code range.
*/
- DEBUG
- ("FT1000:download:Download error: Bad Version Request = 0x%x.\n",
- (int)requested_version);
+ pr_debug("Download error: Bad Version Request = 0x%x.\n",
+ (int)requested_version);
status = -1;
break;
}
break;
default:
- DEBUG
- ("FT1000:download:Download error: Bad request type=%d in CODE download state.\n",
- request);
+ pr_debug("Download error: Bad request type=%d in CODE download state.\n",
+ request);
status = -1;
break;
}
@@ -963,20 +945,19 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
put_handshake(ft1000dev,
HANDSHAKE_RESPONSE);
} else {
- DEBUG
- ("FT1000:download:Download error: Handshake failed\n");
+ pr_debug("Download error: Handshake failed\n");
status = -1;
}
break;
case STATE_DONE_DWNLD:
- DEBUG("FT1000:download:Code loader is done...\n");
+ pr_debug("Code loader is done...\n");
state = STATE_SECTION_PROV;
break;
case STATE_SECTION_PROV:
- DEBUG("FT1000:download:STATE_SECTION_PROV\n");
+ pr_debug("STATE_SECTION_PROV\n");
pseudo_header = (struct pseudo_hdr *)c_file;
if (pseudo_header->checksum ==
@@ -990,9 +971,9 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
/* Get buffer for provisioning data */
pbuffer =
- kmalloc((pseudo_header_len +
- sizeof(struct pseudo_hdr)),
- GFP_ATOMIC);
+ kmalloc((pseudo_header_len +
+ sizeof(struct pseudo_hdr)),
+ GFP_ATOMIC);
if (pbuffer) {
memcpy(pbuffer, (void *)c_file,
(u32) (pseudo_header_len +
@@ -1000,20 +981,20 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
pseudo_hdr)));
/* link provisioning data */
pprov_record =
- kmalloc(sizeof(struct prov_record),
- GFP_ATOMIC);
+ kmalloc(sizeof(struct prov_record),
+ GFP_ATOMIC);
if (pprov_record) {
pprov_record->pprov_data =
- pbuffer;
+ pbuffer;
list_add_tail(&pprov_record->
list,
&pft1000info->
prov_list);
/* Move to next entry if available */
c_file =
- (u8 *) ((unsigned long)
- c_file +
- (u32) ((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
+ (u8 *) ((unsigned long)
+ c_file +
+ (u32) ((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
if ((unsigned long)(c_file) -
(unsigned long)(pFileStart)
>=
@@ -1031,13 +1012,12 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
/* Checksum did not compute */
status = -1;
}
- DEBUG
- ("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n",
- state, status);
+ pr_debug("after STATE_SECTION_PROV, state = %d, status= %d\n",
+ state, status);
break;
case STATE_DONE_PROV:
- DEBUG("FT1000:download:STATE_DONE_PROV\n");
+ pr_debug("STATE_DONE_PROV\n");
state = STATE_DONE_FILE;
break;
@@ -1050,21 +1030,21 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart,
break;
/****
- // Check if Card is present
- status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
- if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
- break;
- }
-
- status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
- if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
- break;
- }
+ // Check if Card is present
+ status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
+ if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
+ break;
+ }
+
+ status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
+ if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
+ break;
+ }
****/
} /* End while */
- DEBUG("Download exiting with status = 0x%8x\n", status);
+ pr_debug("Download exiting with status = 0x%8x\n", status);
ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX,
FT1000_REG_DOORBELL);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 2e13e7b7ec10..d12cfc9aa32a 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -1,8 +1,10 @@
/* CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved.
-*
-*
-* This file is part of Express Card USB Driver
-*/
+ *
+ *
+ * This file is part of Express Card USB Driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -35,16 +37,16 @@ static u8 tempbuffer[1600];
#define MAX_RCV_LOOP 100
/* send a control message via USB interface synchronously
-* Parameters: ft1000_usb - device structure
-* pipe - usb control message pipe
-* request - control request
-* requesttype - control message request type
-* value - value to be written or 0
-* index - register index
-* data - data buffer to hold the read/write values
-* size - data size
-* timeout - control message time out value
-*/
+ * Parameters: ft1000_usb - device structure
+ * pipe - usb control message pipe
+ * request - control request
+ * requesttype - control message request type
+ * value - value to be written or 0
+ * index - register index
+ * data - data buffer to hold the read/write values
+ * size - data size
+ * timeout - control message time out value
+ */
static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
u8 request, u8 requesttype, u16 value, u16 index,
void *data, u16 size, int timeout)
@@ -52,7 +54,7 @@ static int ft1000_control(struct ft1000_usb *ft1000dev, unsigned int pipe,
int ret;
if ((ft1000dev == NULL) || (ft1000dev->dev == NULL)) {
- DEBUG("ft1000dev or ft1000dev->dev == NULL, failure\n");
+ pr_debug("ft1000dev or ft1000dev->dev == NULL, failure\n");
return -ENODEV;
}
@@ -171,7 +173,7 @@ int ft1000_read_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer,
/* write into DPRAM a number of bytes */
int ft1000_write_dpram16(struct ft1000_usb *ft1000dev, u16 indx, u16 value,
- u8 highlow)
+ u8 highlow)
{
int ret = 0;
u8 request;
@@ -212,7 +214,7 @@ int fix_ft1000_read_dpram32(struct ft1000_usb *ft1000dev, u16 indx,
*buffer++ = buf[pos++];
*buffer++ = buf[pos++];
} else {
- DEBUG("fix_ft1000_read_dpram32: DPRAM32 Read failed\n");
+ pr_debug("DPRAM32 Read failed\n");
*buffer++ = 0;
*buffer++ = 0;
*buffer++ = 0;
@@ -246,7 +248,7 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
buf[pos2++] = *buffer++;
ret = ft1000_write_dpram32(ft1000dev, pos1, buf, 16);
} else {
- DEBUG("fix_ft1000_write_dpram32: DPRAM32 Read failed\n");
+ pr_debug("DPRAM32 Read failed\n");
return ret;
}
@@ -270,8 +272,7 @@ int fix_ft1000_write_dpram32(struct ft1000_usb *ft1000dev, u16 indx, u8 *buffer)
for (i = 0; i < 16; i++) {
if (tempbuffer[i] != resultbuffer[i]) {
ret = -1;
- DEBUG("%s Failed to write\n",
- __func__);
+ pr_debug("Failed to write\n");
}
}
}
@@ -287,19 +288,19 @@ static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
u16 tempword;
status = ft1000_write_register(ft1000dev, HOST_INTF_BE,
- FT1000_REG_SUP_CTRL);
+ FT1000_REG_SUP_CTRL);
status = ft1000_read_register(ft1000dev, &tempword,
FT1000_REG_SUP_CTRL);
if (value) {
- DEBUG("Reset DSP\n");
+ pr_debug("Reset DSP\n");
status = ft1000_read_register(ft1000dev, &tempword,
FT1000_REG_RESET);
tempword |= DSP_RESET_BIT;
status = ft1000_write_register(ft1000dev, tempword,
FT1000_REG_RESET);
} else {
- DEBUG("Activate DSP\n");
+ pr_debug("Activate DSP\n");
status = ft1000_read_register(ft1000dev, &tempword,
FT1000_REG_RESET);
tempword |= DSP_ENCRYPTED;
@@ -318,18 +319,18 @@ static void card_reset_dsp(struct ft1000_usb *ft1000dev, bool value)
}
/* send a command to ASIC
-* Parameters: ft1000_usb - device structure
-* ptempbuffer - command buffer
-* size - command buffer size
-*/
+ * Parameters: ft1000_usb - device structure
+ * ptempbuffer - command buffer
+ * size - command buffer size
+ */
int card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
- int size)
+ int size)
{
int ret;
unsigned short temp;
unsigned char *commandbuf;
- DEBUG("card_send_command: enter card_send_command... size=%d\n", size);
+ pr_debug("enter card_send_command... size=%d\n", size);
commandbuf = kmalloc(size + 2, GFP_KERNEL);
if (!commandbuf)
@@ -355,7 +356,7 @@ int card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
return ret;
usleep_range(900, 1100);
ret = ft1000_write_register(ft1000dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (ret)
return ret;
usleep_range(900, 1100);
@@ -364,7 +365,7 @@ int card_send_command(struct ft1000_usb *ft1000dev, void *ptempbuffer,
#if 0
if ((temp & 0x0100) == 0)
- DEBUG("card_send_command: Message sent\n");
+ pr_debug("Message sent\n");
#endif
return ret;
}
@@ -390,7 +391,7 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
status = ft1000_write_register(ft1000dev, tempword, FT1000_REG_RESET);
msleep(1000);
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_RESET);
- DEBUG("Reset Register = 0x%x\n", tempword);
+ pr_debug("Reset Register = 0x%x\n", tempword);
/* Toggle DSP reset */
card_reset_dsp(ft1000dev, 1);
@@ -399,13 +400,13 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
msleep(1000);
status =
- ft1000_write_register(ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
+ ft1000_write_register(ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
/* Let's check for FEFE */
status =
- ft1000_read_dpram32(ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX,
- (u8 *) &templong, 4);
- DEBUG("templong (fefe) = 0x%8x\n", templong);
+ ft1000_read_dpram32(ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX,
+ (u8 *)&templong, 4);
+ pr_debug("templong (fefe) = 0x%8x\n", templong);
/* call codeloader */
status = scram_dnldr(ft1000dev, pFileStart, FileLength);
@@ -415,8 +416,6 @@ int dsp_reload(struct ft1000_usb *ft1000dev)
msleep(1000);
- DEBUG("dsp_reload returned\n");
-
return 0;
}
@@ -427,8 +426,6 @@ static void ft1000_reset_asic(struct net_device *dev)
struct ft1000_usb *ft1000dev = info->priv;
u16 tempword;
- DEBUG("ft1000_hw:ft1000_reset_asic called\n");
-
/* Let's use the register provided by the Magnemite ASIC to reset the
* ASIC and DSP.
*/
@@ -442,10 +439,10 @@ static void ft1000_reset_asic(struct net_device *dev)
/* clear interrupts */
ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_ISR);
- DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
ft1000_write_register(ft1000dev, tempword, FT1000_REG_SUP_ISR);
ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_ISR);
- DEBUG("ft1000_hw: interrupt status register = 0x%x\n", tempword);
+ pr_debug("interrupt status register = 0x%x\n", tempword);
}
static int ft1000_reset_card(struct net_device *dev)
@@ -455,38 +452,36 @@ static int ft1000_reset_card(struct net_device *dev)
u16 tempword;
struct prov_record *ptr;
- DEBUG("ft1000_hw:ft1000_reset_card called.....\n");
-
ft1000dev->fCondResetPend = true;
info->CardReady = 0;
ft1000dev->fProvComplete = false;
/* Make sure we free any memory reserve for provisioning */
while (list_empty(&info->prov_list) == 0) {
- DEBUG("ft1000_reset_card:deleting provisioning record\n");
+ pr_debug("deleting provisioning record\n");
ptr =
- list_entry(info->prov_list.next, struct prov_record, list);
+ list_entry(info->prov_list.next, struct prov_record, list);
list_del(&ptr->list);
kfree(ptr->pprov_data);
kfree(ptr);
}
- DEBUG("ft1000_hw:ft1000_reset_card: reset asic\n");
+ pr_debug("reset asic\n");
ft1000_reset_asic(dev);
- DEBUG("ft1000_hw:ft1000_reset_card: call dsp_reload\n");
+ pr_debug("call dsp_reload\n");
dsp_reload(ft1000dev);
- DEBUG("dsp reload successful\n");
+ pr_debug("dsp reload successful\n");
mdelay(10);
/* Initialize DSP heartbeat area */
ft1000_write_dpram16(ft1000dev, FT1000_MAG_HI_HO, ho_mag,
FT1000_MAG_HI_HO_INDX);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (u8 *) &tempword,
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (u8 *)&tempword,
FT1000_MAG_HI_HO_INDX);
- DEBUG("ft1000_hw:ft1000_reset_card:hi_ho value = 0x%x\n", tempword);
+ pr_debug("hi_ho value = 0x%x\n", tempword);
info->CardReady = 1;
@@ -508,8 +503,8 @@ static void ft1000_usb_transmit_complete(struct urb *urb)
}
/* take an ethernet packet and convert it to a Flarion
-* packet prior to sending it to the ASIC Downlink FIFO.
-*/
+ * packet prior to sending it to the ASIC Downlink FIFO.
+ */
static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
{
struct ft1000_info *pInfo = netdev_priv(netdev);
@@ -520,14 +515,13 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
struct pseudo_hdr hdr;
if (!pInfo->CardReady) {
- DEBUG("ft1000_copy_down_pkt::Card Not Ready\n");
+ pr_debug("Card Not Ready\n");
return -ENODEV;
}
count = sizeof(struct pseudo_hdr) + len;
if (count > MAX_BUF_SIZE) {
- DEBUG("Error:ft1000_copy_down_pkt:Message Size Overflow!\n");
- DEBUG("size = %d\n", count);
+ pr_debug("Message Size Overflow! size = %d\n", count);
return -EINVAL;
}
@@ -545,7 +539,7 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
hdr.control = 0x00;
hdr.checksum = hdr.length ^ hdr.source ^ hdr.destination ^
- hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^ hdr.control;
+ hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^ hdr.control;
memcpy(&pFt1000Dev->tx_buf[0], &hdr, sizeof(hdr));
memcpy(&(pFt1000Dev->tx_buf[sizeof(struct pseudo_hdr)]), packet, len);
@@ -559,12 +553,12 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
pFt1000Dev->tx_buf, count,
ft1000_usb_transmit_complete, (void *)pFt1000Dev);
- t = (u8 *) pFt1000Dev->tx_urb->transfer_buffer;
+ t = (u8 *)pFt1000Dev->tx_urb->transfer_buffer;
ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
if (ret) {
- DEBUG("ft1000 failed tx_urb %d\n", ret);
+ pr_debug("failed tx_urb %d\n", ret);
return ret;
}
pInfo->stats.tx_packets++;
@@ -574,9 +568,9 @@ static int ft1000_copy_down_pkt(struct net_device *netdev, u8 *packet, u16 len)
}
/* transmit an ethernet packet
-* Parameters: skb - socket buffer to be sent
-* dev - network device
-*/
+ * Parameters: skb - socket buffer to be sent
+ * dev - network device
+ */
static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ft1000_info *pInfo = netdev_priv(dev);
@@ -585,30 +579,30 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
int maxlen, pipe;
if (skb == NULL) {
- DEBUG("ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n");
+ pr_debug("skb == NULL!!!\n");
return NETDEV_TX_OK;
}
if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
- DEBUG("network driver is closed, return\n");
+ pr_debug("network driver is closed, return\n");
goto err;
}
pipe =
- usb_sndbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_out_endpointAddr);
+ usb_sndbulkpipe(pFt1000Dev->dev, pFt1000Dev->bulk_out_endpointAddr);
maxlen = usb_maxpacket(pFt1000Dev->dev, pipe, usb_pipeout(pipe));
- pdata = (u8 *) skb->data;
+ pdata = (u8 *)skb->data;
if (pInfo->mediastate == 0) {
/* Drop packet is mediastate is down */
- DEBUG("ft1000_hw:ft1000_start_xmit:mediastate is down\n");
+ pr_debug("mediastate is down\n");
goto err;
}
if ((skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE)) {
/* Drop packet which has invalid size */
- DEBUG("ft1000_hw:ft1000_start_xmit:invalid ethernet length\n");
+ pr_debug("invalid ethernet length\n");
goto err;
}
@@ -628,7 +622,7 @@ static int ft1000_open(struct net_device *dev)
struct ft1000_usb *pFt1000Dev = pInfo->priv;
struct timeval tv;
- DEBUG("ft1000_open is called for card %d\n", pFt1000Dev->CardNumber);
+ pr_debug("ft1000_open is called for card %d\n", pFt1000Dev->CardNumber);
pInfo->stats.rx_bytes = 0;
pInfo->stats.tx_bytes = 0;
@@ -676,11 +670,9 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
char card_nr[2];
u8 gCardIndex = 0;
- DEBUG("Enter init_ft1000_netdev...\n");
-
netdev = alloc_etherdev(sizeof(struct ft1000_info));
if (!netdev) {
- DEBUG("init_ft1000_netdev: can not allocate network device\n");
+ pr_debug("can not allocate network device\n");
return -ENOMEM;
}
@@ -690,7 +682,7 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
dev_alloc_name(netdev, netdev->name);
- DEBUG("init_ft1000_netdev: network device name is %s\n", netdev->name);
+ pr_debug("network device name is %s\n", netdev->name);
if (strncmp(netdev->name, "eth", 3) == 0) {
card_nr[0] = netdev->name[3];
@@ -702,7 +694,7 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
}
ft1000dev->CardNumber = gCardIndex;
- DEBUG("card number = %d\n", ft1000dev->CardNumber);
+ pr_debug("card number = %d\n", ft1000dev->CardNumber);
} else {
netdev_err(ft1000dev->net, "ft1000: Invalid device name\n");
ret_val = -ENXIO;
@@ -738,7 +730,7 @@ int init_ft1000_netdev(struct ft1000_usb *ft1000dev)
ft1000dev->net = netdev;
- DEBUG("Initialize free_buff_lock and freercvpool\n");
+ pr_debug("Initialize free_buff_lock and freercvpool\n");
spin_lock_init(&free_buff_lock);
/* initialize a list of buffers to be use for queuing
@@ -790,7 +782,6 @@ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
netdev = ft1000dev->net;
pInfo = netdev_priv(ft1000dev->net);
- DEBUG("Enter reg_ft1000_netdev...\n");
ft1000_read_register(ft1000dev, &pInfo->AsicID, FT1000_REG_ASIC_ID);
@@ -799,23 +790,21 @@ int reg_ft1000_netdev(struct ft1000_usb *ft1000dev,
rc = register_netdev(netdev);
if (rc) {
- DEBUG("reg_ft1000_netdev: could not register network device\n");
+ pr_debug("could not register network device\n");
free_netdev(netdev);
return rc;
}
ft1000_create_dev(ft1000dev);
- DEBUG("reg_ft1000_netdev returned\n");
-
pInfo->CardReady = 1;
return 0;
}
/* take a packet from the FIFO up link and
-* convert it into an ethernet packet and deliver it to the IP stack
-*/
+ * convert it into an ethernet packet and deliver it to the IP stack
+ */
static int ft1000_copy_up_pkt(struct urb *urb)
{
struct ft1000_info *info = urb->context;
@@ -832,14 +821,14 @@ static int ft1000_copy_up_pkt(struct urb *urb)
u16 *chksum;
if (ft1000dev->status & FT1000_STATUS_CLOSING) {
- DEBUG("network driver is closed, return\n");
+ pr_debug("network driver is closed, return\n");
return 0;
}
/* Read length */
len = urb->transfer_buffer_length;
lena = urb->actual_length;
- chksum = (u16 *) ft1000dev->rx_buf;
+ chksum = (u16 *)ft1000dev->rx_buf;
tempword = *chksum++;
for (i = 1; i < 7; i++)
@@ -854,13 +843,13 @@ static int ft1000_copy_up_pkt(struct urb *urb)
skb = dev_alloc_skb(len + 12 + 2);
if (skb == NULL) {
- DEBUG("ft1000_copy_up_pkt: No Network buffers available\n");
+ pr_debug("No Network buffers available\n");
info->stats.rx_errors++;
ft1000_submit_rx_urb(info);
return -1;
}
- pbuffer = (u8 *) skb_put(skb, len + 12);
+ pbuffer = (u8 *)skb_put(skb, len + 12);
/* subtract the number of bytes read already */
ptemp = pbuffer;
@@ -905,7 +894,7 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
struct ft1000_usb *pFt1000Dev = info->priv;
if (pFt1000Dev->status & FT1000_STATUS_CLOSING) {
- DEBUG("network driver is closed, return\n");
+ pr_debug("network driver is closed, return\n");
return -ENODEV;
}
@@ -914,13 +903,12 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
usb_rcvbulkpipe(pFt1000Dev->dev,
pFt1000Dev->bulk_in_endpointAddr),
pFt1000Dev->rx_buf, MAX_BUF_SIZE,
- (usb_complete_t) ft1000_copy_up_pkt, info);
+ (usb_complete_t)ft1000_copy_up_pkt, info);
result = usb_submit_urb(pFt1000Dev->rx_urb, GFP_ATOMIC);
if (result) {
- pr_err("ft1000_submit_rx_urb: submitting rx_urb %d failed\n",
- result);
+ pr_err("submitting rx_urb %d failed\n", result);
return result;
}
@@ -935,7 +923,7 @@ int ft1000_close(struct net_device *net)
ft1000dev->status |= FT1000_STATUS_CLOSING;
- DEBUG("ft1000_close: pInfo=%p, ft1000dev=%p\n", pInfo, ft1000dev);
+ pr_debug("pInfo=%p, ft1000dev=%p\n", pInfo, ft1000dev);
netif_carrier_off(net);
netif_stop_queue(net);
ft1000dev->status &= ~FT1000_STATUS_CLOSING;
@@ -952,7 +940,7 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
int status;
if (dev->fCondResetPend) {
- DEBUG("ft1000_hw:ft1000_chkcard:Card is being reset, return FALSE\n");
+ pr_debug("Card is being reset, return FALSE\n");
return TRUE;
}
/* Mask register is used to check for device presence since it is never
@@ -960,7 +948,7 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
*/
status = ft1000_read_register(dev, &tempword, FT1000_REG_SUP_IMASK);
if (tempword == 0) {
- DEBUG("ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
+ pr_debug("IMASK = 0 Card not detected\n");
return FALSE;
}
/* The system will return the value of 0xffff for the version register
@@ -969,17 +957,17 @@ static int ft1000_chkcard(struct ft1000_usb *dev)
status = ft1000_read_register(dev, &tempword, FT1000_REG_ASIC_ID);
if (tempword != 0x1b01) {
dev->status |= FT1000_STATUS_CLOSING;
- DEBUG("ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
+ pr_debug("Version = 0xffff Card not detected\n");
return FALSE;
}
return TRUE;
}
/* read a message from the dpram area.
-* Input:
-* dev - network device structure
-* pbuffer - caller supply address to buffer
-*/
+ * Input:
+ * dev - network device structure
+ * pbuffer - caller supply address to buffer
+ */
static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer,
int maxsz)
{
@@ -990,46 +978,45 @@ static bool ft1000_receive_cmd(struct ft1000_usb *dev, u16 *pbuffer,
u16 tempword;
ret =
- ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (u8 *) &size,
- FT1000_MAG_PH_LEN_INDX);
+ ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (u8 *)&size,
+ FT1000_MAG_PH_LEN_INDX);
size = ntohs(size) + PSEUDOSZ;
if (size > maxsz) {
- DEBUG("FT1000:ft1000_receive_cmd:Invalid command length = %d\n",
- size);
+ pr_debug("Invalid command length = %d\n", size);
return FALSE;
}
- ppseudohdr = (u16 *) pbuffer;
+ ppseudohdr = (u16 *)pbuffer;
ft1000_write_register(dev, FT1000_DPRAM_MAG_RX_BASE,
FT1000_REG_DPRAM_ADDR);
ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
+ ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
pbuffer++;
ft1000_write_register(dev, FT1000_DPRAM_MAG_RX_BASE + 1,
FT1000_REG_DPRAM_ADDR);
for (i = 0; i <= (size >> 2); i++) {
ret =
- ft1000_read_register(dev, pbuffer,
- FT1000_REG_MAG_DPDATAL);
+ ft1000_read_register(dev, pbuffer,
+ FT1000_REG_MAG_DPDATAL);
pbuffer++;
ret =
- ft1000_read_register(dev, pbuffer,
- FT1000_REG_MAG_DPDATAH);
+ ft1000_read_register(dev, pbuffer,
+ FT1000_REG_MAG_DPDATAH);
pbuffer++;
}
/* copy odd aligned word */
ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAL);
+ ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAL);
pbuffer++;
ret =
- ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
+ ft1000_read_register(dev, pbuffer, FT1000_REG_MAG_DPDATAH);
pbuffer++;
if (size & 0x0001) {
/* copy odd byte from fifo */
ret =
- ft1000_read_register(dev, &tempword,
- FT1000_REG_DPRAM_DATA);
+ ft1000_read_register(dev, &tempword,
+ FT1000_REG_DPRAM_DATA);
*pbuffer = ntohs(tempword);
}
/* Check if pseudo header checksum is good
@@ -1058,17 +1045,15 @@ static int ft1000_dsp_prov(void *arg)
int status;
u16 TempShortBuf[256];
- DEBUG("*** DspProv Entered\n");
-
while (list_empty(&info->prov_list) == 0) {
- DEBUG("DSP Provisioning List Entry\n");
+ pr_debug("DSP Provisioning List Entry\n");
/* Check if doorbell is available */
- DEBUG("check if doorbell is cleared\n");
+ pr_debug("check if doorbell is cleared\n");
status =
- ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
+ ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
if (status) {
- DEBUG("ft1000_dsp_prov::ft1000_read_register error\n");
+ pr_debug("ft1000_read_register error\n");
break;
}
@@ -1076,7 +1061,7 @@ static int ft1000_dsp_prov(void *arg)
mdelay(10);
i++;
if (i == 10) {
- DEBUG("FT1000:ft1000_dsp_prov:message drop\n");
+ pr_debug("message drop\n");
return -1;
}
ft1000_read_register(dev, &tempword,
@@ -1084,17 +1069,17 @@ static int ft1000_dsp_prov(void *arg)
}
if (!(tempword & FT1000_DB_DPRAM_TX)) {
- DEBUG("*** Provision Data Sent to DSP\n");
+ pr_debug("*** Provision Data Sent to DSP\n");
/* Send provisioning data */
ptr =
- list_entry(info->prov_list.next, struct prov_record,
- list);
- len = *(u16 *) ptr->pprov_data;
+ list_entry(info->prov_list.next, struct prov_record,
+ list);
+ len = *(u16 *)ptr->pprov_data;
len = htons(len);
len += PSEUDOSZ;
- pmsg = (u16 *) ptr->pprov_data;
+ pmsg = (u16 *)ptr->pprov_data;
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
/* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
@@ -1109,12 +1094,12 @@ static int ft1000_dsp_prov(void *arg)
memcpy(&TempShortBuf[2], ppseudo_hdr, len);
status =
- ft1000_write_dpram32(dev, 0,
- (u8 *) &TempShortBuf[0],
- (unsigned short)(len + 2));
+ ft1000_write_dpram32(dev, 0,
+ (u8 *)&TempShortBuf[0],
+ (unsigned short)(len + 2));
status =
- ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
+ ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
+ FT1000_REG_DOORBELL);
list_del(&ptr->list);
kfree(ptr->pprov_data);
@@ -1123,7 +1108,7 @@ static int ft1000_dsp_prov(void *arg)
usleep_range(9000, 11000);
}
- DEBUG("DSP Provisioning List Entry finished\n");
+ pr_debug("DSP Provisioning List Entry finished\n");
msleep(100);
@@ -1158,37 +1143,26 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
status = ft1000_read_dpram32(dev, 0x200, cmdbuffer, size);
#ifdef JDEBUG
- DEBUG("ft1000_proc_drvmsg:cmdbuffer\n");
- for (i = 0; i < size; i += 5) {
- if ((i + 5) < size)
- DEBUG("0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", cmdbuffer[i],
- cmdbuffer[i + 1], cmdbuffer[i + 2],
- cmdbuffer[i + 3], cmdbuffer[i + 4]);
- else {
- for (j = i; j < size; j++)
- DEBUG("0x%x ", cmdbuffer[j]);
- DEBUG("\n");
- break;
- }
- }
+ print_hex_dump_debug("cmdbuffer: ", HEX_DUMP_OFFSET, 16, 1,
+ cmdbuffer, size, true);
#endif
pdrvmsg = (struct drv_msg *)&cmdbuffer[2];
msgtype = ntohs(pdrvmsg->type);
- DEBUG("ft1000_proc_drvmsg:Command message type = 0x%x\n", msgtype);
+ pr_debug("Command message type = 0x%x\n", msgtype);
switch (msgtype) {
case MEDIA_STATE:{
- DEBUG("ft1000_proc_drvmsg:Command message type = MEDIA_STATE");
+ pr_debug("Command message type = MEDIA_STATE\n");
pmediamsg = (struct media_msg *)&cmdbuffer[0];
if (info->ProgConStat != 0xFF) {
if (pmediamsg->state) {
- DEBUG("Media is up\n");
+ pr_debug("Media is up\n");
if (info->mediastate == 0) {
if (dev->NetDevRegDone)
netif_wake_queue(dev->net);
info->mediastate = 1;
}
} else {
- DEBUG("Media is down\n");
+ pr_debug("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
if (dev->NetDevRegDone)
@@ -1196,7 +1170,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
}
}
} else {
- DEBUG("Media is down\n");
+ pr_debug("Media is down\n");
if (info->mediastate == 1) {
info->mediastate = 0;
info->ConTm = 0;
@@ -1205,20 +1179,20 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
break;
}
case DSP_INIT_MSG:{
- DEBUG("ft1000_proc_drvmsg:Command message type = DSP_INIT_MSG");
+ pr_debug("Command message type = DSP_INIT_MSG\n");
pdspinitmsg = (struct dsp_init_msg *)&cmdbuffer[2];
memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
- DEBUG("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
- info->DspVer[0], info->DspVer[1], info->DspVer[2],
- info->DspVer[3]);
+ pr_debug("DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
+ info->DspVer[0], info->DspVer[1], info->DspVer[2],
+ info->DspVer[3]);
memcpy(info->HwSerNum, pdspinitmsg->HwSerNum,
HWSERNUMSZ);
memcpy(info->Sku, pdspinitmsg->Sku, SKUSZ);
memcpy(info->eui64, pdspinitmsg->eui64, EUISZ);
- DEBUG("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
- info->eui64[0], info->eui64[1], info->eui64[2],
- info->eui64[3], info->eui64[4], info->eui64[5],
- info->eui64[6], info->eui64[7]);
+ pr_debug("EUI64=%2x.%2x.%2x.%2x.%2x.%2x.%2x.%2x\n",
+ info->eui64[0], info->eui64[1], info->eui64[2],
+ info->eui64[3], info->eui64[4], info->eui64[5],
+ info->eui64[6], info->eui64[7]);
dev->net->dev_addr[0] = info->eui64[0];
dev->net->dev_addr[1] = info->eui64[1];
dev->net->dev_addr[2] = info->eui64[2];
@@ -1229,17 +1203,17 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
if (ntohs(pdspinitmsg->length) ==
(sizeof(struct dsp_init_msg) - 20)) {
memcpy(info->ProductMode, pdspinitmsg->ProductMode,
- MODESZ);
+ MODESZ);
memcpy(info->RfCalVer, pdspinitmsg->RfCalVer, CALVERSZ);
memcpy(info->RfCalDate, pdspinitmsg->RfCalDate,
CALDATESZ);
- DEBUG("RFCalVer = 0x%2x 0x%2x\n", info->RfCalVer[0],
- info->RfCalVer[1]);
+ pr_debug("RFCalVer = 0x%2x 0x%2x\n",
+ info->RfCalVer[0], info->RfCalVer[1]);
}
break;
}
case DSP_PROVISION:{
- DEBUG("ft1000_proc_drvmsg:Command message type = DSP_PROVISION\n");
+ pr_debug("Command message type = DSP_PROVISION\n");
/* kick off dspprov routine to start provisioning
* Send provisioning data to DSP
@@ -1252,21 +1226,20 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
} else {
dev->fProvComplete = true;
status = ft1000_write_register(dev, FT1000_DB_HB,
- FT1000_REG_DOORBELL);
- DEBUG("FT1000:drivermsg:No more DSP provisioning data in dsp image\n");
+ FT1000_REG_DOORBELL);
+ pr_debug("No more DSP provisioning data in dsp image\n");
}
- DEBUG("ft1000_proc_drvmsg:DSP PROVISION is done\n");
+ pr_debug("DSP PROVISION is done\n");
break;
}
case DSP_STORE_INFO:{
- DEBUG("ft1000_proc_drvmsg:Command message type = DSP_STORE_INFO");
- DEBUG("FT1000:drivermsg:Got DSP_STORE_INFO\n");
+ pr_debug("Command message type = DSP_STORE_INFO");
tempword = ntohs(pdrvmsg->length);
info->DSPInfoBlklen = tempword;
if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (u16 *) &pdrvmsg->data[0];
+ pmsg = (u16 *)&pdrvmsg->data[0];
for (i = 0; i < ((tempword + 1) / 2); i++) {
- DEBUG("FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg);
+ pr_debug("dsp info data = 0x%x\n", *pmsg);
info->DSPInfoBlk[i + 10] = *pmsg++;
}
} else {
@@ -1275,33 +1248,33 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
break;
}
case DSP_GET_INFO:{
- DEBUG("FT1000:drivermsg:Got DSP_GET_INFO\n");
+ pr_debug("Got DSP_GET_INFO\n");
/* copy dsp info block to dsp */
dev->DrvMsgPend = 1;
/* allow any outstanding ioctl to finish */
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX)
break;
}
}
/* Put message into Slow Queue Form Pseudo header */
- pmsg = (u16 *) info->DSPInfoBlk;
+ pmsg = (u16 *)info->DSPInfoBlk;
*pmsg++ = 0;
*pmsg++ = htons(info->DSPInfoBlklen + 20 + info->DSPInfoBlklen);
ppseudo_hdr =
- (struct pseudo_hdr *)(u16 *) &info->DSPInfoBlk[2];
+ (struct pseudo_hdr *)(u16 *)&info->DSPInfoBlk[2];
ppseudo_hdr->length = htons(info->DSPInfoBlklen + 4
- + info->DSPInfoBlklen);
+ + info->DSPInfoBlklen);
ppseudo_hdr->source = 0x10;
ppseudo_hdr->destination = 0x20;
ppseudo_hdr->portdest = 0;
@@ -1323,31 +1296,31 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
info->DSPInfoBlk[10] = 0x7200;
info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
status = ft1000_write_dpram32(dev, 0,
- (u8 *)&info->DSPInfoBlk[0],
- (unsigned short)(info->DSPInfoBlklen + 22));
+ (u8 *)&info->DSPInfoBlk[0],
+ (unsigned short)(info->DSPInfoBlklen + 22));
status = ft1000_write_register(dev, FT1000_DB_DPRAM_TX,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
dev->DrvMsgPend = 0;
break;
}
case GET_DRV_ERR_RPT_MSG:{
- DEBUG("FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
+ pr_debug("Got GET_DRV_ERR_RPT_MSG\n");
/* copy driver error message to dsp */
dev->DrvMsgPend = 1;
/* allow any outstanding ioctl to finish */
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
mdelay(10);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX)
mdelay(10);
}
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
/* Put message into Slow Queue Form Pseudo header */
- pmsg = (u16 *) &tempbuffer[0];
+ pmsg = (u16 *)&tempbuffer[0];
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
ppseudo_hdr->length = htons(0x0012);
ppseudo_hdr->source = 0x10;
@@ -1368,7 +1341,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
for (i = 1; i < 7; i++)
ppseudo_hdr->checksum ^= *pmsg++;
- pmsg = (u16 *) &tempbuffer[16];
+ pmsg = (u16 *)&tempbuffer[16];
*pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
*pmsg++ = htons(0x000e);
*pmsg++ = htons(info->DSP_TIME[0]);
@@ -1384,7 +1357,7 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
*pmsg++ = htons(info->DrvErrNum);
status = card_send_command(dev, (unsigned char *)&tempbuffer[0],
- (u16)(0x0012 + PSEUDOSZ));
+ (u16)(0x0012 + PSEUDOSZ));
if (status)
goto out;
info->DrvErrNum = 0;
@@ -1399,7 +1372,6 @@ static int ft1000_proc_drvmsg(struct ft1000_usb *dev, u16 size)
status = 0;
out:
kfree(cmdbuffer);
- DEBUG("return from ft1000_proc_drvmsg\n");
return status;
}
@@ -1412,32 +1384,32 @@ static int dsp_broadcast_msg_id(struct ft1000_usb *dev)
for (i = 0; i < MAX_NUM_APP; i++) {
if ((dev->app_info[i].DspBCMsgFlag)
- && (dev->app_info[i].fileobject)
- && (dev->app_info[i].NumOfMsg
- < MAX_MSG_LIMIT)) {
+ && (dev->app_info[i].fileobject)
+ && (dev->app_info[i].NumOfMsg
+ < MAX_MSG_LIMIT)) {
pdpram_blk = ft1000_get_buffer(&freercvpool);
if (pdpram_blk == NULL) {
- DEBUG("Out of memory in free receive command pool\n");
+ pr_debug("Out of memory in free receive command pool\n");
dev->app_info[i].nRxMsgMiss++;
return -1;
}
if (ft1000_receive_cmd(dev, pdpram_blk->pbuffer,
- MAX_CMD_SQSIZE)) {
+ MAX_CMD_SQSIZE)) {
/* Put message into the
* appropriate application block
*/
dev->app_info[i].nRxMsg++;
spin_lock_irqsave(&free_buff_lock, flags);
list_add_tail(&pdpram_blk->list,
- &dev->app_info[i] .app_sqlist);
+ &dev->app_info[i] .app_sqlist);
dev->app_info[i].NumOfMsg++;
spin_unlock_irqrestore(&free_buff_lock, flags);
wake_up_interruptible(&dev->app_info[i]
- .wait_dpram_msg);
+ .wait_dpram_msg);
} else {
dev->app_info[i].nRxMsgMiss++;
ft1000_free_buffer(pdpram_blk, &freercvpool);
- DEBUG("pdpram_blk::ft1000_get_buffer NULL\n");
+ pr_debug("ft1000_get_buffer NULL\n");
return -1;
}
}
@@ -1452,7 +1424,7 @@ static int handle_misc_portid(struct ft1000_usb *dev)
pdpram_blk = ft1000_get_buffer(&freercvpool);
if (pdpram_blk == NULL) {
- DEBUG("Out of memory in free receive command pool\n");
+ pr_debug("Out of memory in free receive command pool\n");
return -1;
}
if (!ft1000_receive_cmd(dev, pdpram_blk->pbuffer, MAX_CMD_SQSIZE))
@@ -1461,11 +1433,12 @@ static int handle_misc_portid(struct ft1000_usb *dev)
/* Search for correct application block */
for (i = 0; i < MAX_NUM_APP; i++) {
if (dev->app_info[i].app_id == ((struct pseudo_hdr *)
- pdpram_blk->pbuffer)->portdest)
+ pdpram_blk->pbuffer)->portdest)
break;
}
if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_parse_dpram_msg: No application matching id = %d\n", ((struct pseudo_hdr *)pdpram_blk->pbuffer)->portdest);
+ pr_debug("No application matching id = %d\n",
+ ((struct pseudo_hdr *)pdpram_blk->pbuffer)->portdest);
goto exit_failure;
} else if (dev->app_info[i].NumOfMsg > MAX_MSG_LIMIT) {
goto exit_failure;
@@ -1495,26 +1468,26 @@ int ft1000_poll(void *dev_id)
u16 portid;
if (ft1000_chkcard(dev) == FALSE) {
- DEBUG("ft1000_poll::ft1000_chkcard: failed\n");
+ pr_debug("failed\n");
return -1;
}
status = ft1000_read_register(dev, &tempword, FT1000_REG_DOORBELL);
if (!status) {
if (tempword & FT1000_DB_DPRAM_RX) {
status = ft1000_read_dpram16(dev,
- 0x200, (u8 *)&data, 0);
+ 0x200, (u8 *)&data, 0);
size = ntohs(data) + 16 + 2;
if (size % 4) {
modulo = 4 - (size % 4);
size = size + modulo;
}
status = ft1000_read_dpram16(dev, 0x201,
- (u8 *)&portid, 1);
+ (u8 *)&portid, 1);
portid &= 0xff;
if (size < MAX_CMD_SQSIZE) {
switch (portid) {
case DRIVERID:
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
+ pr_debug("FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid DRIVERID\n");
status = ft1000_proc_drvmsg(dev, size);
if (status != 0)
return status;
@@ -1527,87 +1500,88 @@ int ft1000_poll(void *dev_id)
break;
}
} else
- DEBUG("FT1000:dpc:Invalid total length for SlowQ = %d\n", size);
+ pr_debug("Invalid total length for SlowQ = %d\n",
+ size);
status = ft1000_write_register(dev,
- FT1000_DB_DPRAM_RX,
- FT1000_REG_DOORBELL);
+ FT1000_DB_DPRAM_RX,
+ FT1000_REG_DOORBELL);
} else if (tempword & FT1000_DSP_ASIC_RESET) {
/* Let's reset the ASIC from the Host side as well */
status = ft1000_write_register(dev, ASIC_RESET_BIT,
- FT1000_REG_RESET);
+ FT1000_REG_RESET);
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_RESET);
+ FT1000_REG_RESET);
i = 0;
while (tempword & ASIC_RESET_BIT) {
status = ft1000_read_register(dev, &tempword,
- FT1000_REG_RESET);
+ FT1000_REG_RESET);
usleep_range(9000, 11000);
i++;
if (i == 100)
break;
}
if (i == 100) {
- DEBUG("Unable to reset ASIC\n");
+ pr_debug("Unable to reset ASIC\n");
return 0;
}
usleep_range(9000, 11000);
/* Program WMARK register */
status = ft1000_write_register(dev, 0x600,
- FT1000_REG_MAG_WATERMARK);
+ FT1000_REG_MAG_WATERMARK);
/* clear ASIC reset doorbell */
status = ft1000_write_register(dev,
- FT1000_DSP_ASIC_RESET,
- FT1000_REG_DOORBELL);
+ FT1000_DSP_ASIC_RESET,
+ FT1000_REG_DOORBELL);
usleep_range(9000, 11000);
} else if (tempword & FT1000_ASIC_RESET_REQ) {
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
+ pr_debug("FT1000_REG_DOORBELL message type: FT1000_ASIC_RESET_REQ\n");
/* clear ASIC reset request from DSP */
status = ft1000_write_register(dev,
- FT1000_ASIC_RESET_REQ,
- FT1000_REG_DOORBELL);
+ FT1000_ASIC_RESET_REQ,
+ FT1000_REG_DOORBELL);
status = ft1000_write_register(dev, HOST_INTF_BE,
- FT1000_REG_SUP_CTRL);
+ FT1000_REG_SUP_CTRL);
/* copy dsp session record from Adapter block */
status = ft1000_write_dpram32(dev, 0,
- (u8 *)&info->DSPSess.Rec[0], 1024);
+ (u8 *)&info->DSPSess.Rec[0], 1024);
status = ft1000_write_register(dev, 0x600,
- FT1000_REG_MAG_WATERMARK);
+ FT1000_REG_MAG_WATERMARK);
/* ring doorbell to tell DSP that
* ASIC is out of reset
* */
status = ft1000_write_register(dev,
- FT1000_ASIC_RESET_DSP,
- FT1000_REG_DOORBELL);
+ FT1000_ASIC_RESET_DSP,
+ FT1000_REG_DOORBELL);
} else if (tempword & FT1000_DB_COND_RESET) {
- DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
+ pr_debug("FT1000_REG_DOORBELL message type: FT1000_DB_COND_RESET\n");
if (!dev->fAppMsgPend) {
/* Reset ASIC and DSP */
status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER0,
- (u8 *)&(info->DSP_TIME[0]),
- FT1000_MAG_DSP_TIMER0_INDX);
+ FT1000_MAG_DSP_TIMER0,
+ (u8 *)&(info->DSP_TIME[0]),
+ FT1000_MAG_DSP_TIMER0_INDX);
status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER1,
- (u8 *)&(info->DSP_TIME[1]),
- FT1000_MAG_DSP_TIMER1_INDX);
+ FT1000_MAG_DSP_TIMER1,
+ (u8 *)&(info->DSP_TIME[1]),
+ FT1000_MAG_DSP_TIMER1_INDX);
status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER2,
- (u8 *)&(info->DSP_TIME[2]),
- FT1000_MAG_DSP_TIMER2_INDX);
+ FT1000_MAG_DSP_TIMER2,
+ (u8 *)&(info->DSP_TIME[2]),
+ FT1000_MAG_DSP_TIMER2_INDX);
status = ft1000_read_dpram16(dev,
- FT1000_MAG_DSP_TIMER3,
- (u8 *)&(info->DSP_TIME[3]),
- FT1000_MAG_DSP_TIMER3_INDX);
+ FT1000_MAG_DSP_TIMER3,
+ (u8 *)&(info->DSP_TIME[3]),
+ FT1000_MAG_DSP_TIMER3_INDX);
info->CardReady = 0;
info->DrvErrNum = DSP_CONDRESET_INFO;
- DEBUG("ft1000_hw:DSP conditional reset requested\n");
+ pr_debug("DSP conditional reset requested\n");
info->ft1000_reset(dev->net);
} else {
dev->fProvComplete = false;
dev->fCondResetPend = true;
}
ft1000_write_register(dev, FT1000_DB_COND_RESET,
- FT1000_REG_DOORBELL);
+ FT1000_REG_DOORBELL);
}
}
return 0;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
index cb644a58d9f3..e9472bebda0b 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
@@ -1,30 +1,30 @@
/*
-*---------------------------------------------------------------------------
-* FT1000 driver for Flarion Flash OFDM NIC Device
-*
-* Copyright (C) 2002 Flarion Technologies, All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License as published by the Free
-* Software Foundation; either version 2 of the License, or (at your option) any
-* later version. This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-* more details. You should have received a copy of the GNU General Public
-* License along with this program; if not, write to the
-* Free Software Foundation, Inc., 59 Temple Place -
-* Suite 330, Boston, MA 02111-1307, USA.
-*---------------------------------------------------------------------------
-*
-* File: ft1000_ioctl.h
-*
-* Description: Common structures and defines relating to IOCTL
-*
-* History:
-* 11/5/02 Whc Created.
-*
-*---------------------------------------------------------------------------
-*/
+ *---------------------------------------------------------------------------
+ * FT1000 driver for Flarion Flash OFDM NIC Device
+ *
+ * Copyright (C) 2002 Flarion Technologies, All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version. This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details. You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place -
+ * Suite 330, Boston, MA 02111-1307, USA.
+ *---------------------------------------------------------------------------
+ *
+ * File: ft1000_ioctl.h
+ *
+ * Description: Common structures and defines relating to IOCTL
+ *
+ * History:
+ * 11/5/02 Whc Created.
+ *
+ *---------------------------------------------------------------------------
+ */
#ifndef _FT1000IOCTLH_
#define _FT1000IOCTLH_
@@ -94,8 +94,8 @@ struct IOCTL_DPRAM_COMMAND {
} __packed;
/*
-* Custom IOCTL command codes
-*/
+ * Custom IOCTL command codes
+ */
#define FT1000_MAGIC_CODE 'F'
#define IOCTL_REGISTER_CMD 0
@@ -106,8 +106,8 @@ struct IOCTL_DPRAM_COMMAND {
#define IOCTL_CONNECT 10
#define IOCTL_DISCONNECT 11
-#define IOCTL_FT1000_GET_DSP_STAT _IOR(FT1000_MAGIC_CODE, \
- IOCTL_GET_DSP_STAT_CMD, \
+#define IOCTL_FT1000_GET_DSP_STAT _IOR(FT1000_MAGIC_CODE, \
+ IOCTL_GET_DSP_STAT_CMD, \
struct IOCTL_GET_DSP_STAT)
#define IOCTL_FT1000_GET_VER _IOR(FT1000_MAGIC_CODE, IOCTL_GET_VER_CMD, \
struct IOCTL_GET_VER)
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 39be30c0eedf..a6b55f42c07c 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -7,6 +7,9 @@
* $Id:
*====================================================
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -45,7 +48,7 @@ static int ft1000_poll_thread(void *arg)
if (!gPollingfailed) {
ret = ft1000_poll(arg);
if (ret != 0) {
- DEBUG("ft1000_poll_thread: polling failed\n");
+ pr_debug("polling failed\n");
gPollingfailed = true;
}
}
@@ -71,9 +74,8 @@ static int ft1000_probe(struct usb_interface *interface,
return -ENOMEM;
dev = interface_to_usbdev(interface);
- DEBUG("ft1000_probe: usb device descriptor info:\n");
- DEBUG("ft1000_probe: number of configuration is %d\n",
- dev->descriptor.bNumConfigurations);
+ pr_debug("usb device descriptor info - number of configuration is %d\n",
+ dev->descriptor.bNumConfigurations);
ft1000dev->dev = dev;
ft1000dev->status = 0;
@@ -85,60 +87,56 @@ static int ft1000_probe(struct usb_interface *interface,
goto err_fw;
}
- DEBUG("ft1000_probe is called\n");
numaltsetting = interface->num_altsetting;
- DEBUG("ft1000_probe: number of alt settings is :%d\n", numaltsetting);
+ pr_debug("number of alt settings is: %d\n", numaltsetting);
iface_desc = interface->cur_altsetting;
- DEBUG("ft1000_probe: number of endpoints is %d\n",
- iface_desc->desc.bNumEndpoints);
- DEBUG("ft1000_probe: descriptor type is %d\n",
- iface_desc->desc.bDescriptorType);
- DEBUG("ft1000_probe: interface number is %d\n",
- iface_desc->desc.bInterfaceNumber);
- DEBUG("ft1000_probe: alternatesetting is %d\n",
- iface_desc->desc.bAlternateSetting);
- DEBUG("ft1000_probe: interface class is %d\n",
- iface_desc->desc.bInterfaceClass);
- DEBUG("ft1000_probe: control endpoint info:\n");
- DEBUG("ft1000_probe: descriptor0 type -- %d\n",
- iface_desc->endpoint[0].desc.bmAttributes);
- DEBUG("ft1000_probe: descriptor1 type -- %d\n",
- iface_desc->endpoint[1].desc.bmAttributes);
- DEBUG("ft1000_probe: descriptor2 type -- %d\n",
- iface_desc->endpoint[2].desc.bmAttributes);
+ pr_debug("number of endpoints is: %d\n",
+ iface_desc->desc.bNumEndpoints);
+ pr_debug("descriptor type is: %d\n", iface_desc->desc.bDescriptorType);
+ pr_debug("interface number is: %d\n",
+ iface_desc->desc.bInterfaceNumber);
+ pr_debug("alternatesetting is: %d\n",
+ iface_desc->desc.bAlternateSetting);
+ pr_debug("interface class is: %d\n", iface_desc->desc.bInterfaceClass);
+ pr_debug("control endpoint info:\n");
+ pr_debug("descriptor0 type -- %d\n",
+ iface_desc->endpoint[0].desc.bmAttributes);
+ pr_debug("descriptor1 type -- %d\n",
+ iface_desc->endpoint[1].desc.bmAttributes);
+ pr_debug("descriptor2 type -- %d\n",
+ iface_desc->endpoint[2].desc.bmAttributes);
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
endpoint =
- (struct usb_endpoint_descriptor *)&iface_desc->
- endpoint[i].desc;
- DEBUG("endpoint %d\n", i);
- DEBUG("bEndpointAddress=%x, bmAttributes=%x\n",
- endpoint->bEndpointAddress, endpoint->bmAttributes);
+ (struct usb_endpoint_descriptor *)&iface_desc->
+ endpoint[i].desc;
+ pr_debug("endpoint %d\n", i);
+ pr_debug("bEndpointAddress=%x, bmAttributes=%x\n",
+ endpoint->bEndpointAddress, endpoint->bmAttributes);
if ((endpoint->bEndpointAddress & USB_DIR_IN)
&& ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
ft1000dev->bulk_in_endpointAddr =
- endpoint->bEndpointAddress;
- DEBUG("ft1000_probe: in: %d\n",
- endpoint->bEndpointAddress);
+ endpoint->bEndpointAddress;
+ pr_debug("in: %d\n", endpoint->bEndpointAddress);
}
if (!(endpoint->bEndpointAddress & USB_DIR_IN)
&& ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
ft1000dev->bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- DEBUG("ft1000_probe: out: %d\n",
- endpoint->bEndpointAddress);
+ endpoint->bEndpointAddress;
+ pr_debug("out: %d\n", endpoint->bEndpointAddress);
}
}
- DEBUG("bulk_in=%d, bulk_out=%d\n", ft1000dev->bulk_in_endpointAddr,
- ft1000dev->bulk_out_endpointAddr);
+ pr_debug("bulk_in=%d, bulk_out=%d\n",
+ ft1000dev->bulk_in_endpointAddr,
+ ft1000dev->bulk_out_endpointAddr);
ret = request_firmware(&dsp_fw, "ft3000.img", &dev->dev);
if (ret < 0) {
- pr_err("Error request_firmware().\n");
+ pr_err("Error request_firmware()\n");
goto err_fw;
}
@@ -155,7 +153,7 @@ static int ft1000_probe(struct usb_interface *interface,
FileLength = dsp_fw->size;
release_firmware(dsp_fw);
- DEBUG("ft1000_probe: start downloading dsp image...\n");
+ pr_debug("start downloading dsp image...\n");
ret = init_ft1000_netdev(ft1000dev);
if (ret)
@@ -163,7 +161,7 @@ static int ft1000_probe(struct usb_interface *interface,
pft1000info = netdev_priv(ft1000dev->net);
- DEBUG("In probe: pft1000info=%p\n", pft1000info);
+ pr_debug("pft1000info=%p\n", pft1000info);
ret = dsp_reload(ft1000dev);
if (ret) {
pr_err("Problem with DSP image loading\n");
@@ -172,7 +170,7 @@ static int ft1000_probe(struct usb_interface *interface,
gPollingfailed = false;
ft1000dev->pPollThread =
- kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
+ kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
if (IS_ERR(ft1000dev->pPollThread)) {
ret = PTR_ERR(ft1000dev->pPollThread);
@@ -187,10 +185,10 @@ static int ft1000_probe(struct usb_interface *interface,
goto err_thread;
}
msleep(100);
- DEBUG("ft1000_probe::Waiting for Card Ready\n");
+ pr_debug("Waiting for Card Ready\n");
}
- DEBUG("ft1000_probe::Card Ready!!!! Registering network device\n");
+ pr_debug("Card Ready!!!! Registering network device\n");
ret = reg_ft1000_netdev(ft1000dev, interface);
if (ret)
@@ -216,24 +214,21 @@ static void ft1000_disconnect(struct usb_interface *interface)
struct ft1000_info *pft1000info;
struct ft1000_usb *ft1000dev;
- DEBUG("ft1000_disconnect is called\n");
-
- pft1000info = (struct ft1000_info *) usb_get_intfdata(interface);
- DEBUG("In disconnect pft1000info=%p\n", pft1000info);
+ pft1000info = (struct ft1000_info *)usb_get_intfdata(interface);
+ pr_debug("In disconnect pft1000info=%p\n", pft1000info);
if (pft1000info) {
ft1000dev = pft1000info->priv;
if (ft1000dev->pPollThread)
kthread_stop(ft1000dev->pPollThread);
- DEBUG("ft1000_disconnect: threads are terminated\n");
+ pr_debug("threads are terminated\n");
if (ft1000dev->net) {
- DEBUG("ft1000_disconnect: destroy char driver\n");
+ pr_debug("destroy char driver\n");
ft1000_destroy_dev(ft1000dev->net);
unregister_netdev(ft1000dev->net);
- DEBUG
- ("ft1000_disconnect: network device unregistered\n");
+ pr_debug("network device unregistered\n");
free_netdev(ft1000dev->net);
}
@@ -241,7 +236,7 @@ static void ft1000_disconnect(struct usb_interface *interface)
usb_free_urb(ft1000dev->rx_urb);
usb_free_urb(ft1000dev->tx_urb);
- DEBUG("ft1000_disconnect: urb freed\n");
+ pr_debug("urb freed\n");
kfree(ft1000dev);
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index 8f7ccae57f31..fea60d5651a7 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -28,8 +28,6 @@ struct app_info_block {
*/
} __packed;
-#define DEBUG(args...) pr_info(args)
-
#define FALSE 0
#define TRUE 1
@@ -137,7 +135,7 @@ extern spinlock_t free_buff_lock;
int ft1000_create_dev(struct ft1000_usb *dev);
void ft1000_destroy_dev(struct net_device *dev);
extern int card_send_command(struct ft1000_usb *ft1000dev,
- void *ptempbuffer, int size);
+ void *ptempbuffer, int size);
struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist);
void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist);
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index af0c3878358c..73deae3cd9eb 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -278,7 +278,6 @@ static void fwtty_send_txn_async(struct fwtty_peer *peer,
len, fwtty_common_callback, txn);
}
-
static void __fwtty_restart_tx(struct fwtty_port *port)
{
int len, avail;
@@ -508,7 +507,6 @@ static void fwtty_do_hangup(struct work_struct *work)
tty_kref_put(tty);
}
-
static void fwtty_emit_breaks(struct work_struct *work)
{
struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks);
@@ -1622,7 +1620,6 @@ static inline int mgmt_pkt_expected_len(__be16 code)
case FWSC_VIRT_CABLE_PLUG_RSP: /* | FWSC_RSP_OK */
return sizeof(pkt.hdr) + sizeof(pkt.plug_rsp);
-
case FWSC_VIRT_CABLE_UNPLUG:
case FWSC_VIRT_CABLE_UNPLUG_RSP:
case FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK:
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index c657639f884b..73eede163820 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -626,7 +626,7 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
void *addr = buf + sizeof(struct iphdr) +
sizeof(struct udphdr) +
offsetof(struct dhcp_packet, chaddr);
- memcpy(nic->dest_mac_addr, addr, ETH_ALEN);
+ ether_addr_copy(nic->dest_mac_addr, addr);
}
}
@@ -639,7 +639,7 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
}
/* Format the data so that it can be put to skb */
- memcpy(mac_header_data, nic->dest_mac_addr, ETH_ALEN);
+ ether_addr_copy(mac_header_data, nic->dest_mac_addr);
memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
vlan_eth.h_vlan_TCI = htons(nic->vlan_id);
@@ -842,9 +842,9 @@ static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
{
/* Form the dev_addr */
if (!mac_address)
- memcpy(dev_addr, gdm_lte_macaddr, ETH_ALEN);
+ ether_addr_copy(dev_addr, gdm_lte_macaddr);
else
- memcpy(dev_addr, mac_address, ETH_ALEN);
+ ether_addr_copy(dev_addr, mac_address);
/* The last byte of the mac address
* should be less than or equal to 0xFC
@@ -858,7 +858,7 @@ static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
memcpy(nic_src, dev_addr, 3);
/* Copy the nic_dest from dev_addr*/
- memcpy(nic_dest, dev_addr, ETH_ALEN);
+ ether_addr_copy(nic_dest, dev_addr);
}
static void validate_mac_address(u8 *mac_address)
diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h
index 0163b243d3e0..3d50383c6ced 100644
--- a/drivers/staging/gdm724x/gdm_mux.h
+++ b/drivers/staging/gdm724x/gdm_mux.h
@@ -35,10 +35,10 @@
#define RETRY_TIMER 30 /* msec */
struct mux_pkt_header {
- unsigned int start_flag;
- unsigned int seq_num;
- unsigned int payload_size;
- unsigned short packet_type;
+ __le32 start_flag;
+ __le32 seq_num;
+ __le32 payload_size;
+ __le16 packet_type;
unsigned char data[0];
};
diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c
index f5a3378c3951..9cab54bfa6f4 100644
--- a/drivers/staging/gdm72xx/gdm_wimax.c
+++ b/drivers/staging/gdm72xx/gdm_wimax.c
@@ -115,7 +115,7 @@ static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg,
{
struct nic *nic = netdev_priv(dev);
- u8 *buf = (u8 *)msg;
+ u8 *buf = msg;
u16 hci_cmd = (buf[0]<<8) | buf[1];
u16 hci_len = (buf[2]<<8) | buf[3];
@@ -605,10 +605,8 @@ static void gdm_wimax_netif_rx(struct net_device *dev, char *buf, int len)
int ret;
skb = dev_alloc_skb(len + 2);
- if (!skb) {
- netdev_err(dev, "%s: dev_alloc_skb failed!\n", __func__);
+ if (!skb)
return;
- }
skb_reserve(skb, 2);
dev->stats.rx_packets++;
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 6aa9d7c30139..6da72858d28c 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -46,27 +46,6 @@ static char *file = "xlinx_fpga_firmware.bit";
module_param(file, charp, S_IRUGO);
MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
-#ifdef DEBUG_FPGA
-static void datadump(char *msg, void *m, int n)
-{
- int i;
- unsigned char *c;
-
- pr_info("=== %s ===\n", msg);
-
- c = m;
-
- for (i = 0; i < n; i++) {
- if ((i&0xf) == 0)
- pr_info(KERN_INFO "\n 0x%4x: ", i);
-
- pr_info("%02X ", c[i]);
- }
-
- pr_info("\n");
-}
-#endif /* DEBUG_FPGA */
-
static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
{
memcpy(buf, bitdata + *offset, rdsize);
@@ -157,12 +136,10 @@ static void gs_print_header(struct fpgaimage *fimage)
static void gs_read_bitstream(struct fpgaimage *fimage)
{
char *bitdata;
- int size;
int offset;
offset = 0;
bitdata = (char *)fimage->fw_entry->data;
- size = fimage->fw_entry->size;
readmagic_bitstream(bitdata, &offset);
readinfo_bitstream(bitdata, fimage->filename, &offset);
@@ -195,15 +172,15 @@ static int gs_read_image(struct fpgaimage *fimage)
return 0;
}
-static int gs_load_image(struct fpgaimage *fimage, char *file)
+static int gs_load_image(struct fpgaimage *fimage, char *fw_file)
{
int err;
- pr_info("load fpgaimage %s\n", file);
+ pr_info("load fpgaimage %s\n", fw_file);
- err = request_firmware(&fimage->fw_entry, file, &firmware_pdev->dev);
+ err = request_firmware(&fimage->fw_entry, fw_file, &firmware_pdev->dev);
if (err != 0) {
- pr_err("firmware %s is missing, cannot continue.\n", file);
+ pr_err("firmware %s is missing, cannot continue.\n", fw_file);
return err;
}
@@ -220,9 +197,9 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
size = fimage->lendata;
#ifdef DEBUG_FPGA
- datadump("bitfile sample", bitdata, 0x100);
+ print_hex_dump_bytes("bitfile sample: ", DUMP_PREFIX_OFFSET,
+ bitdata, 0x100);
#endif /* DEBUG_FPGA */
-
if (!xl_supported_prog_bus_width(bus_bytes)) {
pr_err("unsupported program bus width %d\n",
bus_bytes);
@@ -316,10 +293,8 @@ static int gs_fpgaboot(void)
struct fpgaimage *fimage;
fimage = kmalloc(sizeof(struct fpgaimage), GFP_KERNEL);
- if (fimage == NULL) {
- pr_err("No memory is available\n");
- goto err_out;
- }
+ if (!fimage)
+ return -ENOMEM;
err = gs_load_image(fimage, file);
if (err) {
@@ -361,50 +336,44 @@ err_out2:
err_out1:
kfree(fimage);
-err_out:
return -1;
}
static int __init gs_fpgaboot_init(void)
{
- int err, r;
-
- r = -1;
+ int err;
pr_info("FPGA DOWNLOAD --->\n");
pr_info("FPGA image file name: %s\n", file);
err = init_driver();
- if (err != 0) {
+ if (err) {
pr_err("FPGA DRIVER INIT FAIL!!\n");
- return r;
+ return err;
}
err = xl_init_io();
if (err) {
pr_err("GPIO INIT FAIL!!\n");
- r = -1;
goto errout;
}
err = gs_fpgaboot();
if (err) {
pr_err("FPGA DOWNLOAD FAIL!!\n");
- r = -1;
goto errout;
}
pr_info("FPGA DOWNLOAD DONE <---\n");
- r = 0;
- return r;
+ return 0;
errout:
finish_driver();
- return r;
+ return err;
}
static void __exit gs_fpgaboot_exit(void)
diff --git a/drivers/staging/iio/Documentation/generic_buffer.c b/drivers/staging/iio/Documentation/generic_buffer.c
index 044ea196aa6f..de4647e2495e 100644
--- a/drivers/staging/iio/Documentation/generic_buffer.c
+++ b/drivers/staging/iio/Documentation/generic_buffer.c
@@ -47,6 +47,7 @@ int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
{
int bytes = 0;
int i = 0;
+
while (i < num_channels) {
if (bytes % channels[i].bytes == 0)
channels[i].location = bytes;
@@ -74,12 +75,14 @@ void print2byte(int input, struct iio_channel_info *info)
input = input >> info->shift;
if (info->is_signed) {
int16_t val = input;
+
val &= (1 << info->bits_used) - 1;
val = (int16_t)(val << (16 - info->bits_used)) >>
(16 - info->bits_used);
printf("%05f ", ((float)val + info->offset)*info->scale);
} else {
uint16_t val = input;
+
val &= (1 << info->bits_used) - 1;
printf("%05f ", ((float)val + info->offset)*info->scale);
}
@@ -97,6 +100,7 @@ void process_scan(char *data,
int num_channels)
{
int k;
+
for (k = 0; k < num_channels; k++)
switch (channels[k].bytes) {
/* only a few cases implemented so far */
@@ -158,11 +162,12 @@ int main(int argc, char **argv)
char *buffer_access;
int scan_size;
int noevents = 0;
+ int notrigger = 0;
char *dummy;
struct iio_channel_info *channels;
- while ((c = getopt(argc, argv, "l:w:c:et:n:")) != -1) {
+ while ((c = getopt(argc, argv, "l:w:c:et:n:g")) != -1) {
switch (c) {
case 'n':
device_name = optarg;
@@ -183,6 +188,9 @@ int main(int argc, char **argv)
case 'l':
buf_len = strtoul(optarg, &dummy, 10);
break;
+ case 'g':
+ notrigger = 1;
+ break;
case '?':
return -1;
}
@@ -201,28 +209,32 @@ int main(int argc, char **argv)
printf("iio device number being used is %d\n", dev_num);
asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
- if (trigger_name == NULL) {
- /*
- * Build the trigger name. If it is device associated its
- * name is <device_name>_dev[n] where n matches the device
- * number found above
- */
- ret = asprintf(&trigger_name,
- "%s-dev%d", device_name, dev_num);
- if (ret < 0) {
- ret = -ENOMEM;
- goto error_ret;
+
+ if (!notrigger) {
+ if (trigger_name == NULL) {
+ /*
+ * Build the trigger name. If it is device associated
+ * its name is <device_name>_dev[n] where n matches
+ * the device number found above.
+ */
+ ret = asprintf(&trigger_name,
+ "%s-dev%d", device_name, dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
}
- }
- /* Verify the trigger exists */
- trig_num = find_type_by_name(trigger_name, "trigger");
- if (trig_num < 0) {
- printf("Failed to find the trigger %s\n", trigger_name);
- ret = -ENODEV;
- goto error_free_triggername;
- }
- printf("iio trigger number being used is %d\n", trig_num);
+ /* Verify the trigger exists */
+ trig_num = find_type_by_name(trigger_name, "trigger");
+ if (trig_num < 0) {
+ printf("Failed to find the trigger %s\n", trigger_name);
+ ret = -ENODEV;
+ goto error_free_triggername;
+ }
+ printf("iio trigger number being used is %d\n", trig_num);
+ } else
+ printf("trigger-less mode selected\n");
/*
* Parse the files in scan_elements to identify what channels are
@@ -246,14 +258,18 @@ int main(int argc, char **argv)
ret = -ENOMEM;
goto error_free_triggername;
}
- printf("%s %s\n", dev_dir_name, trigger_name);
- /* Set the device trigger to be the data ready trigger found above */
- ret = write_sysfs_string_and_verify("trigger/current_trigger",
- dev_dir_name,
- trigger_name);
- if (ret < 0) {
- printf("Failed to write current_trigger file\n");
- goto error_free_buf_dir_name;
+
+ if (!notrigger) {
+ printf("%s %s\n", dev_dir_name, trigger_name);
+ /* Set the device trigger to be the data ready trigger found
+ * above */
+ ret = write_sysfs_string_and_verify("trigger/current_trigger",
+ dev_dir_name,
+ trigger_name);
+ if (ret < 0) {
+ printf("Failed to write current_trigger file\n");
+ goto error_free_buf_dir_name;
+ }
}
/* Setup ring buffer parameters */
@@ -323,9 +339,10 @@ int main(int argc, char **argv)
if (ret < 0)
goto error_close_buffer_access;
- /* Disconnect the trigger - just write a dummy name. */
- write_sysfs_string("trigger/current_trigger",
- dev_dir_name, "NULL");
+ if (!notrigger)
+ /* Disconnect the trigger - just write a dummy name. */
+ write_sysfs_string("trigger/current_trigger",
+ dev_dir_name, "NULL");
error_close_buffer_access:
close(fp);
diff --git a/drivers/staging/iio/Documentation/iio_event_monitor.c b/drivers/staging/iio/Documentation/iio_event_monitor.c
index 569d6f8face5..940ed2399e73 100644
--- a/drivers/staging/iio/Documentation/iio_event_monitor.c
+++ b/drivers/staging/iio/Documentation/iio_event_monitor.c
@@ -69,16 +69,29 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_X] = "x",
[IIO_MOD_Y] = "y",
[IIO_MOD_Z] = "z",
+ [IIO_MOD_X_AND_Y] = "x&y",
+ [IIO_MOD_X_AND_Z] = "x&z",
+ [IIO_MOD_Y_AND_Z] = "y&z",
+ [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
+ [IIO_MOD_X_OR_Y] = "x|y",
+ [IIO_MOD_X_OR_Z] = "x|z",
+ [IIO_MOD_Y_OR_Z] = "y|z",
+ [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
[IIO_MOD_LIGHT_BOTH] = "both",
[IIO_MOD_LIGHT_IR] = "ir",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
- [IIO_MOD_LIGHT_BOTH] = "both",
- [IIO_MOD_LIGHT_IR] = "ir",
[IIO_MOD_LIGHT_CLEAR] = "clear",
[IIO_MOD_LIGHT_RED] = "red",
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
+ [IIO_MOD_QUATERNION] = "quaternion",
+ [IIO_MOD_TEMP_AMBIENT] = "ambient",
+ [IIO_MOD_TEMP_OBJECT] = "object",
+ [IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
+ [IIO_MOD_NORTH_TRUE] = "from_north_true",
+ [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
+ [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
};
static bool event_is_known(struct iio_event_data *event)
@@ -118,6 +131,14 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_X:
case IIO_MOD_Y:
case IIO_MOD_Z:
+ case IIO_MOD_X_AND_Y:
+ case IIO_MOD_X_AND_Z:
+ case IIO_MOD_Y_AND_Z:
+ case IIO_MOD_X_AND_Y_AND_Z:
+ case IIO_MOD_X_OR_Y:
+ case IIO_MOD_X_OR_Z:
+ case IIO_MOD_Y_OR_Z:
+ case IIO_MOD_X_OR_Y_OR_Z:
case IIO_MOD_LIGHT_BOTH:
case IIO_MOD_LIGHT_IR:
case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
@@ -126,6 +147,13 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_LIGHT_RED:
case IIO_MOD_LIGHT_GREEN:
case IIO_MOD_LIGHT_BLUE:
+ case IIO_MOD_QUATERNION:
+ case IIO_MOD_TEMP_AMBIENT:
+ case IIO_MOD_TEMP_OBJECT:
+ case IIO_MOD_NORTH_MAGN:
+ case IIO_MOD_NORTH_TRUE:
+ case IIO_MOD_NORTH_MAGN_TILT_COMP:
+ case IIO_MOD_NORTH_TRUE_TILT_COMP:
break;
default:
return false;
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 0973a092224a..568eff06f803 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -34,6 +34,7 @@ inline int iioutils_break_up_name(const char *full_name,
char *current;
char *w, *r;
char *working;
+
current = strdup(full_name);
working = strtok(current, "_\0");
w = working;
@@ -335,6 +336,7 @@ inline int build_channel_array(const char *device_dir,
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
int current_enabled = 0;
+
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
@@ -506,6 +508,7 @@ inline int _write_sysfs_int(char *filename, char *basedir, int val, int verify)
FILE *sysfsfp;
int test;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL)
return -ENOMEM;
sprintf(temp, "%s/%s", basedir, filename);
@@ -554,6 +557,7 @@ int _write_sysfs_string(char *filename, char *basedir, char *val, int verify)
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL) {
printf("Memory allocation failed\n");
return -ENOMEM;
@@ -614,6 +618,7 @@ int read_sysfs_posint(char *filename, char *basedir)
int ret;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL) {
printf("Memory allocation failed");
return -ENOMEM;
@@ -636,6 +641,7 @@ int read_sysfs_float(char *filename, char *basedir, float *val)
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL) {
printf("Memory allocation failed");
return -ENOMEM;
@@ -658,6 +664,7 @@ int read_sysfs_string(const char *filename, const char *basedir, char *str)
int ret = 0;
FILE *sysfsfp;
char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+
if (temp == NULL) {
printf("Memory allocation failed");
return -ENOMEM;
diff --git a/drivers/staging/iio/Documentation/lsiio.c b/drivers/staging/iio/Documentation/lsiio.c
index 24ae9694eb41..98a0de098130 100644
--- a/drivers/staging/iio/Documentation/lsiio.c
+++ b/drivers/staging/iio/Documentation/lsiio.c
@@ -46,6 +46,7 @@ static int dump_channels(const char *dev_dir_name)
{
DIR *dp;
const struct dirent *ent;
+
dp = opendir(dev_dir_name);
if (dp == NULL)
return -errno;
@@ -62,17 +63,17 @@ static int dump_one_device(const char *dev_dir_name)
{
char name[IIO_MAX_NAME_LENGTH];
int dev_idx;
+ int retval;
- sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device),
+ retval = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device),
"%i", &dev_idx);
+ if (retval != 1)
+ return -EINVAL;
read_sysfs_string("name", dev_dir_name, name);
printf("Device %03d: %s\n", dev_idx, name);
- if (verblevel >= VERBLEVEL_SENSORS) {
- int ret = dump_channels(dev_dir_name);
- if (ret)
- return ret;
- }
+ if (verblevel >= VERBLEVEL_SENSORS)
+ return dump_channels(dev_dir_name);
return 0;
}
@@ -80,9 +81,12 @@ static int dump_one_trigger(const char *dev_dir_name)
{
char name[IIO_MAX_NAME_LENGTH];
int dev_idx;
+ int retval;
- sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
+ retval = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
"%i", &dev_idx);
+ if (retval != 1)
+ return -EINVAL;
read_sysfs_string("name", dev_dir_name, name);
printf("Trigger %03d: %s\n", dev_idx, name);
return 0;
@@ -107,6 +111,7 @@ static void dump_devices(void)
while (ent = readdir(dp), ent != NULL) {
if (check_prefix(ent->d_name, type_device)) {
char *dev_dir_name;
+
asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
dump_one_device(dev_dir_name);
free(dev_dir_name);
@@ -118,6 +123,7 @@ static void dump_devices(void)
while (ent = readdir(dp), ent != NULL) {
if (check_prefix(ent->d_name, type_trigger)) {
char *dev_dir_name;
+
asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
dump_one_trigger(dev_dir_name);
free(dev_dir_name);
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index ad45dfbdf417..07b7ffa00ab5 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -9,53 +9,71 @@ config ADIS16201
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16201 dual-axis
+ Say Y here to build support for Analog Devices adis16201 dual-axis
digital inclinometer and accelerometer.
+ To compile this driver as a module, say M here: the module will
+ be called adis16201.
+
config ADIS16203
tristate "Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16203 Programmable
+ Say Y here to build support for Analog Devices adis16203 Programmable
360 Degrees Inclinometer.
+ To compile this driver as a module, say M here: the module will be
+ called adis16203.
+
config ADIS16204
tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16204 Programmable
+ Say Y here to build support for Analog Devices adis16204 Programmable
High-g Digital Impact Sensor and Recorder.
+ To compile this driver as a module, say M here: the module will be
+ called adis16204.
+
config ADIS16209
tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16209 dual-axis digital inclinometer
+ Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
and accelerometer.
+ To compile this driver as a module, say M here: the module will be
+ called adis16209.
+
config ADIS16220
tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor"
depends on SPI
select IIO_ADIS_LIB
help
- Say yes here to build support for Analog Devices adis16220 programmable
+ Say Y here to build support for Analog Devices adis16220 programmable
digital vibration sensor.
+ To compile this driver as a module, say M here: the module will be
+ called adis16220.
+
config ADIS16240
tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
help
- Say yes here to build support for Analog Devices adis16240 programmable
+ Say Y here to build support for Analog Devices adis16240 programmable
impact Sensor and recorder.
+ To compile this driver as a module, say M here: the module will be
+ called adis16240.
+
config LIS3L02DQ
tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
depends on SPI
@@ -63,16 +81,21 @@ config LIS3L02DQ
depends on !IIO_BUFFER || IIO_KFIFO_BUF
depends on GPIOLIB
help
- Say yes here to build SPI support for the ST microelectronics
+ Say Y here to build SPI support for the ST microelectronics
accelerometer. The driver supplies direct access via sysfs files
and an event interface via a character device.
+ To compile this driver as a module, say M here: the module will be
+ called lis3l02dq.
+
config SCA3000
depends on IIO_BUFFER
depends on SPI
tristate "VTI SCA3000 series accelerometers"
help
- Say yes here to build support for the VTI SCA3000 series of SPI
+ Say Y here to build support for the VTI SCA3000 series of SPI
accelerometers. These devices use a hardware ring buffer.
+ To compile this driver as a module, say M here: the module will be
+ called sca3000.
endmenu
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 61f94221b8b7..9efc77b0ebdd 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -35,6 +35,7 @@ irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
iio_trigger_poll(st->trig);
return IRQ_HANDLED;
}
+
return IRQ_WAKE_THREAD;
}
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index c110a255d4e8..f6526aa22e8a 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -223,7 +223,8 @@ static int ad7192_setup(struct ad7192_state *st,
id &= AD7192_ID_MASK;
if (id != st->devid)
- dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n", id);
+ dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n",
+ id);
switch (pdata->clock_source_sel) {
case AD7192_CLK_EXT_MCLK1_2:
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index d215edf66af2..4d4870787eed 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -188,7 +188,7 @@ static void ad7280_delay(struct ad7280_state *st)
if (st->readback_delay_us < 50)
udelay(st->readback_delay_us);
else
- msleep(1);
+ usleep_range(250, 500);
}
static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 7511839ba94e..9e24b4d4455f 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -141,7 +141,6 @@ static struct platform_driver ad7606_driver = {
.id_table = ad7606_driver_ids,
.driver = {
.name = "ad7606",
- .owner = THIS_MODULE,
.pm = AD7606_PAR_PM_OPS,
},
};
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 6a8ecd73a1a7..7303983e64a7 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -23,7 +23,7 @@ static int ad7606_spi_read_block(struct device *dev,
int i, ret;
unsigned short *data = buf;
- ret = spi_read(spi, (u8 *)buf, count * 2);
+ ret = spi_read(spi, buf, count * 2);
if (ret < 0) {
dev_err(&spi->dev, "SPI read error\n");
return ret;
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 734a7e4886a0..48b1c3740030 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -152,7 +152,8 @@ static ssize_t ad7816_show_available_modes(struct device *dev,
return sprintf(buf, "full\npower-save\n");
}
-static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7816_show_available_modes, NULL, 0);
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7816_show_available_modes,
+ NULL, 0);
static ssize_t ad7816_show_channel(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index a876ce755351..5331c442fcfc 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -116,7 +116,7 @@ static const struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
{
- struct lpc32xx_adc_info *info = (struct lpc32xx_adc_info *) dev_id;
+ struct lpc32xx_adc_info *info = dev_id;
/* Read value and clear irq */
info->value = __raw_readl(LPC32XX_ADC_VALUE(info->adc_base)) &
@@ -204,7 +204,6 @@ static struct platform_driver lpc32xx_adc_driver = {
.probe = lpc32xx_adc_probe,
.driver = {
.name = MOD_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(lpc32xx_adc_match),
},
};
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 2a29b9baec0d..f053535385bf 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -455,7 +455,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
* SoC's delay unit and start the conversion later
* and automatically.
*/
- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
+ mxs_lradc_reg_wrt(lradc,
+ LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
LRADC_DELAY_TRIGGER_DELAYS(1 << 3) | /* trigger DELAY unit#3 */
LRADC_DELAY_KICK |
LRADC_DELAY_DELAY(lradc->settling_delay),
@@ -513,7 +514,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
* SoC's delay unit and start the conversion later
* and automatically.
*/
- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
+ mxs_lradc_reg_wrt(lradc,
+ LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
LRADC_DELAY_TRIGGER_DELAYS(1 << 3) | /* trigger DELAY unit#3 */
LRADC_DELAY_KICK |
LRADC_DELAY_DELAY(lradc->settling_delay), LRADC_DELAY(2));
@@ -1668,7 +1670,6 @@ static int mxs_lradc_remove(struct platform_device *pdev)
static struct platform_driver mxs_lradc_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = mxs_lradc_dt_ids,
},
.probe = mxs_lradc_probe,
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
index c5492ba50751..8ad71691fc09 100644
--- a/drivers/staging/iio/adc/spear_adc.c
+++ b/drivers/staging/iio/adc/spear_adc.c
@@ -98,7 +98,7 @@ static void spear_adc_set_clk(struct spear_adc_state *st, u32 val)
u32 clk_high, clk_low, count;
u32 apb_clk = clk_get_rate(st->clk);
- count = (apb_clk + val - 1) / val;
+ count = DIV_ROUND_UP(apb_clk, val);
clk_low = count / 2;
clk_high = count - clk_low;
st->current_clk = apb_clk / count;
@@ -226,7 +226,7 @@ static const struct iio_chan_spec spear_adc_iio_channels[] = {
static irqreturn_t spear_adc_isr(int irq, void *dev_id)
{
- struct spear_adc_state *st = (struct spear_adc_state *)dev_id;
+ struct spear_adc_state *st = dev_id;
/* Read value to clear IRQ */
st->value = spear_adc_get_average(st);
@@ -389,7 +389,6 @@ static struct platform_driver spear_adc_driver = {
.remove = spear_adc_remove,
.driver = {
.name = SPEAR_ADC_MOD_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(spear_adc_dt_ids),
},
};
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
index e6795e0bed1d..0ed7e13e2283 100644
--- a/drivers/staging/iio/addac/Kconfig
+++ b/drivers/staging/iio/addac/Kconfig
@@ -10,6 +10,9 @@ config ADT7316
Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318
and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC.
+ To compile this driver as a module, choose M here: the module will
+ be called adt7316.
+
config ADT7316_SPI
tristate "support SPI bus connection"
depends on SPI && ADT7316
@@ -18,6 +21,9 @@ config ADT7316_SPI
Say yes here to build SPI bus support for Analog Devices ADT7316/7/8
and ADT7516/7/9.
+ To compile this driver as a module, choose M here: the module will
+ be called adt7316_spi.
+
config ADT7316_I2C
tristate "support I2C bus connection"
depends on I2C && ADT7316
@@ -25,4 +31,7 @@ config ADT7316_I2C
Say yes here to build I2C bus support for Analog Devices ADT7316/7/8
and ADT7516/7/9.
+ To compile this driver as a module, choose M here: the module will
+ be called adt7316_i2c.
+
endmenu
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
index ec50bf34628d..ec40fbb698a6 100644
--- a/drivers/staging/iio/addac/adt7316.h
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -30,6 +30,7 @@ extern const struct dev_pm_ops adt7316_pm_ops;
#else
#define ADT7316_PM_OPS NULL
#endif
-int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
+int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
+ const char *name);
#endif
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index 88b199bb2926..f62f68fd6f3f 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -7,7 +7,10 @@ config ADIS16060
tristate "Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver"
depends on SPI
help
- Say yes here to build support for Analog Devices adis16060 wide bandwidth
+ Say Y (yes) here to build support for Analog Devices adis16060 wide bandwidth
yaw rate gyroscope with SPI.
+ To compile this driver as a module, say M here: the module will be
+ called adis16060. If unsure, say N.
+
endmenu
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index e0d88fa2a5b5..423f96bdf595 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -1040,8 +1040,8 @@ static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
filter_delay = z * (chip->tsl2x7x_settings.persistence & 0x0F);
- y = (filter_delay / 1000);
- z = (filter_delay % 1000);
+ y = filter_delay / 1000;
+ z = filter_delay % 1000;
return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
}
@@ -1086,8 +1086,8 @@ static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
z = y * TSL2X7X_MIN_ITIME;
filter_delay = z * ((chip->tsl2x7x_settings.persistence & 0xF0) >> 4);
- y = (filter_delay / 1000);
- z = (filter_delay % 1000);
+ y = filter_delay / 1000;
+ z = filter_delay % 1000;
return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
}
@@ -1573,8 +1573,7 @@ static struct attribute *tsl2x7x_ALS_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available\
- .dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
@@ -1591,8 +1590,7 @@ static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available\
- .dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
@@ -1611,8 +1609,7 @@ static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_in_illuminance0_calibscale_available.attr,
&dev_attr_in_illuminance0_integration_time.attr,
- &iio_const_attr_in_illuminance0_integration_time_available\
- .dev_attr.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
index e53274b64ae1..64cd3704ec6e 100644
--- a/drivers/staging/iio/meter/Kconfig
+++ b/drivers/staging/iio/meter/Kconfig
@@ -10,6 +10,9 @@ config ADE7753
Say yes here to build support for Analog Devices ADE7753 Single-Phase Multifunction
Metering IC with di/dt Sensor Interface.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7753.
+
config ADE7754
tristate "Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver"
depends on SPI
@@ -17,6 +20,9 @@ config ADE7754
Say yes here to build support for Analog Devices ADE7754 Polyphase
Multifunction Energy Metering IC Driver.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7754.
+
config ADE7758
tristate "Analog Devices ADE7758 Poly Phase Multifunction Energy Metering IC Driver"
depends on SPI
@@ -26,6 +32,9 @@ config ADE7758
Say yes here to build support for Analog Devices ADE7758 Polyphase
Multifunction Energy Metering IC with Per Phase Information Driver.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7758.
+
config ADE7759
tristate "Analog Devices ADE7759 Active Energy Metering IC Driver"
depends on SPI
@@ -33,6 +42,9 @@ config ADE7759
Say yes here to build support for Analog Devices ADE7758 Active Energy
Metering IC with di/dt Sensor Interface.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7759.
+
config ADE7854
tristate "Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver"
depends on SPI || I2C
@@ -40,6 +52,9 @@ config ADE7854
Say yes here to build support for Analog Devices ADE7854/58/68/78 Polyphase
Multifunction Energy Metering IC Driver.
+ To compile this driver as a module, choose M here: the
+ module will be called ade7854.
+
config ADE7854_I2C
tristate "support I2C bus connection"
depends on ADE7854 && I2C
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
index 2fd18c60323d..710a2f3e787e 100644
--- a/drivers/staging/iio/trigger/Kconfig
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -1,4 +1,4 @@
-#
+ #
# Industrial I/O standalone triggers
#
comment "Triggers - standalone"
@@ -12,6 +12,9 @@ config IIO_PERIODIC_RTC_TRIGGER
Provides support for using periodic capable real time
clocks as IIO triggers.
+ To compile this driver as a module, choose M here: the
+ module will be called iio-trig-periodic-rtc.
+
config IIO_BFIN_TMR_TRIGGER
tristate "Blackfin TIMER trigger"
depends on BLACKFIN
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index a21b7c514776..2af8d677d4ed 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -279,7 +279,6 @@ static int iio_bfin_tmr_trigger_remove(struct platform_device *pdev)
static struct platform_driver iio_bfin_tmr_trigger_driver = {
.driver = {
.name = "iio_bfin_tmr_trigger",
- .owner = THIS_MODULE,
},
.probe = iio_bfin_tmr_trigger_probe,
.remove = iio_bfin_tmr_trigger_remove,
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index 8f0a2ffa7150..a24caf73ae0b 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -72,7 +72,8 @@ static ssize_t iio_trig_periodic_write_freq(struct device *dev,
if (val > 0) {
ret = rtc_irq_set_freq(trig_info->rtc, &trig_info->task, val);
if (ret == 0 && trig_info->state && trig_info->frequency == 0)
- ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 1);
+ ret = rtc_irq_set_state(trig_info->rtc,
+ &trig_info->task, 1);
} else if (val == 0) {
ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 0);
} else
@@ -206,7 +207,6 @@ static struct platform_driver iio_trig_periodic_rtc_driver = {
.remove = iio_trig_periodic_rtc_remove,
.driver = {
.name = "iio_prtc_trigger",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
deleted file mode 100644
index 29636fb13959..000000000000
--- a/drivers/staging/imx-drm/TODO
+++ /dev/null
@@ -1,17 +0,0 @@
-TODO:
-- get DRM Maintainer review for this code
-- decide where to put the base driver. It is not specific to a subsystem
- and would be used by DRM/KMS and media/V4L2
-
-Missing features (not necessarily for moving out of staging):
-
-- Add support for IC (Image converter)
-- Add support for CSI (CMOS Sensor interface)
-- Add support for VDIC (Video Deinterlacer)
-
-Many work-in-progress patches for the above features exist. Contact
-Sascha Hauer <kernel@pengutronix.de> if you are interested in working
-on a specific feature.
-
-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org> and
-Sascha Hauer <kernel@pengutronix.de>
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 8888b2756174..2e5a9e5965b1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -242,18 +242,6 @@ do { \
#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
-void libcfs_log_goto(struct libcfs_debug_msg_data *, const char *, long_ptr_t);
-#define GOTO(label, rc) \
-do { \
- if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
- libcfs_log_goto(&msgdata, #label, (long_ptr_t)(rc)); \
- } else { \
- (void)(rc); \
- } \
- goto label; \
-} while (0)
-
int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format1, ...)
__attribute__ ((format (printf, 2, 3)));
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 8f5cdd584f85..62b575deac3a 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -930,8 +930,7 @@ kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
- CDEBUG(D_NET, "Closing conn -> %s, "
- "version: %x, reason: %d\n",
+ CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
libcfs_nid2str(peer->ibp_nid),
conn->ibc_version, why);
@@ -958,8 +957,7 @@ kiblnd_close_stale_conns_locked (kib_peer_t *peer,
conn->ibc_incarnation == incarnation)
continue;
- CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
- "incarnation:%#llx(%x, %#llx)\n",
+ CDEBUG(D_NET, "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
libcfs_nid2str(peer->ibp_nid),
conn->ibc_version, conn->ibc_incarnation,
version, incarnation);
@@ -1599,8 +1597,7 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
if (fps->fps_increasing) {
spin_unlock(&fps->fps_lock);
- CDEBUG(D_NET, "Another thread is allocating new "
- "FMR pool, waiting for her to complete\n");
+ CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
schedule();
goto again;
@@ -1801,8 +1798,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
if (ps->ps_increasing) {
/* another thread is allocating a new pool */
spin_unlock(&ps->ps_lock);
- CDEBUG(D_NET, "Another thread is allocating new "
- "%s pool, waiting for her to complete\n",
+ CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
ps->ps_name);
schedule();
goto again;
@@ -2411,7 +2407,7 @@ kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
goto out;
}
- mr_size = (1ULL << hdev->ibh_mr_shift);
+ mr_size = 1ULL << hdev->ibh_mr_shift;
mm_size = (unsigned long)high_memory - PAGE_OFFSET;
hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
@@ -3081,7 +3077,7 @@ kiblnd_startup (lnet_ni_t *ni)
LIBCFS_ALLOC(net, sizeof(*net));
ni->ni_data = net;
if (net == NULL)
- goto failed;
+ goto net_failed;
do_gettimeofday(&tv);
net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
@@ -3147,6 +3143,7 @@ failed:
if (net->ibn_dev == NULL && ibdev != NULL)
kiblnd_destroy_dev(ibdev);
+net_failed:
kiblnd_shutdown(ni);
CDEBUG(D_NET, "kiblnd_startup failed\n");
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 14c9c8d18d02..b48d7edf5669 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1116,8 +1116,7 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
}
if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
- CERROR("RDMA too fragmented for %s (%d): "
- "%d/%d src %d/%d dst frags\n",
+ CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
IBLND_RDMA_FRAGS(conn->ibc_version),
srcidx, srcrd->rd_nfrags,
@@ -2254,8 +2253,8 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (ni == NULL || /* no matching net */
ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept %s on %s (%s:%d:%pI4h): "
- "bad dst nid %s\n", libcfs_nid2str(nid),
+ CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
+ libcfs_nid2str(nid),
ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
ibdev->ibd_ifname, ibdev->ibd_nnets,
&ibdev->ibd_ifip,
@@ -2295,8 +2294,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
IBLND_RDMA_FRAGS(version)) {
- CERROR("Can't accept %s(version %x): "
- "incompatible max_frags %d (%d wanted)\n",
+ CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
IBLND_RDMA_FRAGS(version));
@@ -2502,8 +2500,7 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
break;
}
- CNETERR("%s: retrying (%s), %x, %x, "
- "queue_dep: %d, max_frag: %d, msg_size: %d\n",
+ CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
libcfs_nid2str(peer->ibp_nid),
reason, IBLND_MSG_VERSION, version,
cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version),
@@ -2679,8 +2676,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
}
if (ver != msg->ibm_version) {
- CERROR("%s replied version %x is different with "
- "requested version %x\n",
+ CERROR("%s replied version %x is different with requested version %x\n",
libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
rc = -EPROTO;
goto failed;
@@ -2724,8 +2720,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (rc != 0) {
- CERROR("Bad connection reply from %s, rc = %d, "
- "version: %x max_frags: %d\n",
+ CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
libcfs_nid2str(peer->ibp_nid), rc,
msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
goto failed;
@@ -3060,8 +3055,7 @@ kiblnd_check_conns (int idx)
}
if (timedout) {
- CERROR("Timed out RDMA with %s (%lu): "
- "c: %u, oc: %u, rc: %u\n",
+ CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
libcfs_nid2str(peer->ibp_nid),
cfs_duration_sec(cfs_time_current() -
peer->ibp_last_alive),
@@ -3334,10 +3328,8 @@ kiblnd_scheduler(void *arg)
rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
if (rc != 0) {
- CWARN("Failed to bind on CPT %d, please verify whether "
- "all CPUs are healthy and reload modules if necessary, "
- "otherwise your system might under risk of low "
- "performance\n", sched->ibs_cpt);
+ CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
+ sched->ibs_cpt);
}
spin_lock_irqsave(&sched->ibs_lock, flags);
@@ -3369,8 +3361,7 @@ kiblnd_scheduler(void *arg)
rc = ib_req_notify_cq(conn->ibc_cq,
IB_CQ_NEXT_COMP);
if (rc < 0) {
- CWARN("%s: ib_req_notify_cq failed: %d, "
- "closing connection\n",
+ CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
@@ -3383,8 +3374,7 @@ kiblnd_scheduler(void *arg)
}
if (rc < 0) {
- CWARN("%s: ib_poll_cq failed: %d, "
- "closing connection\n",
+ CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
rc);
kiblnd_close_conn(conn, -EIO);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index cefdfb6b1bec..8b4a8e9a29b4 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -222,8 +222,7 @@ kiblnd_tunables_init (void)
*kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
- CWARN("Concurrent sends %d is lower than message queue size: %d, "
- "performance may drop slightly.\n",
+ CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
*kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 038854e8302f..9188b34e6948 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -337,8 +337,7 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
&route->ksnr_ipaddr,
&conn->ksnc_myipaddr);
} else {
- CDEBUG(D_NET, "Rebinding %s %pI4h from "
- "%pI4h to %pI4h\n",
+ CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
libcfs_id2str(peer->ksnp_id),
&route->ksnr_ipaddr,
&route->ksnr_myipaddr,
@@ -974,8 +973,7 @@ ksocknal_accept (lnet_ni_t *ni, struct socket *sock)
LIBCFS_ALLOC(cr, sizeof(*cr));
if (cr == NULL) {
- LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
- "%pI4h: memory exhausted\n",
+ LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
&peer_ip);
return -ENOMEM;
}
@@ -1288,8 +1286,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
* socket callbacks.
*/
- CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
- " incarnation:%lld sched[%d:%d]\n",
+ CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
&conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
conn->ksnc_port, incarnation, cpt,
@@ -1638,37 +1635,32 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
case SOCKNAL_RX_LNET_PAYLOAD:
last_rcv = conn->ksnc_rx_deadline -
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
- CERROR("Completing partial receive from %s[%d]"
- ", ip %pI4h:%d, with error, wanted: %d, left: %d, "
- "last alive is %ld secs ago\n",
+ CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
cfs_duration_sec(cfs_time_sub(cfs_time_current(),
- last_rcv)));
+ last_rcv)));
lnet_finalize (conn->ksnc_peer->ksnp_ni,
conn->ksnc_cookie, -EIO);
break;
case SOCKNAL_RX_LNET_HEADER:
if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of lnet header from %s"
- ", ip %pI4h:%d, with error, protocol: %d.x.\n",
+ CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_proto->pro_version);
break;
case SOCKNAL_RX_KSM_HEADER:
if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of ksock message from %s"
- ", ip %pI4h:%d, with error, protocol: %d.x.\n",
+ CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_proto->pro_version);
break;
case SOCKNAL_RX_SLOP:
if (conn->ksnc_rx_started)
- CERROR("Incomplete receive of slops from %s"
- ", ip %pI4h:%d, with error\n",
+ CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr, conn->ksnc_port);
break;
@@ -2348,16 +2340,11 @@ ksocknal_base_shutdown(void)
static __u64
ksocknal_new_incarnation (void)
{
- struct timeval tv;
/* The incarnation number is the time this module loaded and it
- * identifies this particular instance of the socknal. Hopefully
- * we won't be able to reboot more frequently than 1MHz for the
- * foreseeable future :) */
-
- do_gettimeofday(&tv);
-
- return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ * identifies this particular instance of the socknal.
+ */
+ return ktime_get_ns();
}
static int
@@ -2516,22 +2503,21 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
ksock_route_t *route;
ksock_conn_t *conn;
- CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
- "closing %d, accepting %d, err %d, zcookie %llu, "
- "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
- atomic_read(&peer->ksnp_refcount),
- peer->ksnp_sharecount, peer->ksnp_closing,
- peer->ksnp_accepting, peer->ksnp_error,
- peer->ksnp_zc_next_cookie,
- !list_empty(&peer->ksnp_tx_queue),
- !list_empty(&peer->ksnp_zc_req_list));
+ CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
+ libcfs_id2str(peer->ksnp_id),
+ atomic_read(&peer->ksnp_refcount),
+ peer->ksnp_sharecount, peer->ksnp_closing,
+ peer->ksnp_accepting, peer->ksnp_error,
+ peer->ksnp_zc_next_cookie,
+ !list_empty(&peer->ksnp_tx_queue),
+ !list_empty(&peer->ksnp_zc_req_list));
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry(tmp, ksock_route_t, ksnr_list);
- CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
- "del %d\n", atomic_read(&route->ksnr_refcount),
- route->ksnr_scheduled, route->ksnr_connecting,
- route->ksnr_connected, route->ksnr_deleted);
+ CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
+ atomic_read(&route->ksnr_refcount),
+ route->ksnr_scheduled, route->ksnr_connecting,
+ route->ksnr_connected, route->ksnr_deleted);
}
list_for_each (tmp, &peer->ksnp_conns) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index d29f5f134b89..e6c1d3647952 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -551,19 +551,16 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
if (!conn->ksnc_closing) {
switch (rc) {
case -ECONNRESET:
- LCONSOLE_WARN("Host %pI4h reset our connection "
- "while we were sending data; it may have "
- "rebooted.\n",
+ LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
&conn->ksnc_ipaddr);
break;
default:
- LCONSOLE_WARN("There was an unexpected network error "
- "while writing to %pI4h: %d.\n",
+ LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
&conn->ksnc_ipaddr, rc);
break;
}
- CDEBUG(D_NET, "[%p] Error %d on write to %s"
- " ip %pI4h:%d\n", conn, rc,
+ CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
+ conn, rc,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
@@ -799,8 +796,7 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
if (!(route->ksnr_retry_interval == 0 || /* first attempt */
cfs_time_aftereq(now, route->ksnr_timeout))) {
CDEBUG(D_NET,
- "Too soon to retry route %pI4h "
- "(cnted %d, interval %ld, %ld secs later)\n",
+ "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
&route->ksnr_ipaddr,
route->ksnr_connected,
route->ksnr_retry_interval,
@@ -874,8 +870,8 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
write_unlock_bh(g_lock);
if ((id.pid & LNET_PID_USERFLAG) != 0) {
- CERROR("Refusing to create a connection to "
- "userspace process %s\n", libcfs_id2str(id));
+ CERROR("Refusing to create a connection to userspace process %s\n",
+ libcfs_id2str(id));
return -EHOSTUNREACH;
}
@@ -1132,18 +1128,17 @@ ksocknal_process_receive (ksock_conn_t *conn)
LASSERT (rc != -EAGAIN);
if (rc == 0)
- CDEBUG(D_NET, "[%p] EOF from %s"
- " ip %pI4h:%d\n", conn,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
+ conn,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
else if (!conn->ksnc_closing)
- CERROR("[%p] Error %d on read from %s"
- " ip %pI4h:%d\n",
- conn, rc,
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
+ conn, rc,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
/* it's not an error if conn is being closed */
ksocknal_close_conn_and_siblings (conn,
@@ -1724,10 +1719,10 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
/* Unexpected magic! */
- CERROR("Bad magic(1) %#08x (%#08x expected) from "
- "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
- LNET_PROTO_TCP_MAGIC,
- &conn->ksnc_ipaddr);
+ CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
+ __cpu_to_le32 (hello->kshm_magic),
+ LNET_PROTO_TCP_MAGIC,
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1755,10 +1750,9 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
}
- CERROR("Unknown protocol version (%d.x expected)"
- " from %pI4h\n",
- conn->ksnc_proto->pro_version,
- &conn->ksnc_ipaddr);
+ CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
+ conn->ksnc_proto->pro_version,
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1778,8 +1772,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
*incarnation = hello->kshm_src_incarnation;
if (hello->kshm_src_nid == LNET_NID_ANY) {
- CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
- "from %pI4h\n", &conn->ksnc_ipaddr);
+ CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1810,10 +1804,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
if (peerid->pid != recv_id.pid ||
peerid->nid != recv_id.nid) {
- LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
- " %pI4h, but they claimed they were "
- "%s; please check your Lustre "
- "configuration.\n",
+ LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
libcfs_id2str(*peerid),
&conn->ksnc_ipaddr,
libcfs_id2str(recv_id));
@@ -2199,8 +2190,7 @@ ksocknal_connd (void *arg)
if (ksocknal_connect(route)) {
/* consecutive retry */
if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
- CWARN("massive consecutive "
- "re-connecting to %pI4h\n",
+ CWARN("massive consecutive re-connecting to %pI4h\n",
&route->ksnr_ipaddr);
cons_retry = 0;
}
@@ -2264,25 +2254,20 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
switch (error) {
case ECONNRESET:
- CNETERR("A connection with %s "
- "(%pI4h:%d) was reset; "
- "it may have rebooted.\n",
+ CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
break;
case ETIMEDOUT:
- CNETERR("A connection with %s "
- "(%pI4h:%d) timed out; the "
- "network or node may be down.\n",
+ CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
break;
default:
- CNETERR("An unexpected network error %d "
- "occurred with %s "
- "(%pI4h:%d\n", error,
+ CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
+ error,
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
@@ -2297,8 +2282,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%pI4h:%d), "
- "state %d wanted %d left %d\n",
+ CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port,
@@ -2315,8 +2299,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
/* Timed out messages queued for sending or
* buffered in the socket's send buffer */
ksocknal_conn_addref(conn);
- CNETERR("Timeout sending data to %s (%pI4h:%d) "
- "the network or that node may be down.\n",
+ CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
@@ -2500,9 +2483,7 @@ ksocknal_check_peer_timeouts (int idx)
spin_unlock(&peer->ksnp_lock);
read_unlock(&ksocknal_data.ksnd_global_lock);
- CERROR("Total %d stale ZC_REQs for peer %s detected; the "
- "oldest(%p) timed out %ld secs ago, "
- "resid: %d, wmem: %d\n",
+ CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
n, libcfs_nid2str(peer->ksnp_id.nid), tx,
cfs_duration_sec(cfs_time_current() - deadline),
resid, conn->ksnc_sock->sk->sk_wmem_queued);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 9dde548070af..ea9d80f40cab 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -515,8 +515,8 @@ ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
if (rc != 0) {
- CNETERR("Error %d sending HELLO payload (%d)"
- " to %pI4h/%d\n", rc, hello->kshm_nips,
+ CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
+ rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
}
out:
@@ -560,8 +560,8 @@ ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
if (rc != 0) {
- CNETERR("Error %d sending HELLO payload (%d)"
- " to %pI4h/%d\n", rc, hello->kshm_nips,
+ CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
+ rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
}
@@ -595,10 +595,9 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
/* ...and check we got what we expected */
if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
- CERROR("Expecting a HELLO hdr,"
- " but got type %d from %pI4h\n",
- le32_to_cpu (hdr->type),
- &conn->ksnc_ipaddr);
+ CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n",
+ le32_to_cpu(hdr->type),
+ &conn->ksnc_ipaddr);
rc = -EPROTO;
goto out;
}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 60bc2ae4fdf1..faceb9505d84 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -37,6 +37,7 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
#include <linux/log2.h>
+#include <linux/ktime.h>
#define D_LNI D_CONSOLE
@@ -276,7 +277,7 @@ lnet_find_lnd_by_type(int type)
struct list_head *tmp;
/* holding lnd mutex */
- list_for_each (tmp, &the_lnet.ln_lnds) {
+ list_for_each(tmp, &the_lnet.ln_lnds) {
lnd = list_entry(tmp, lnd_t, lnd_list);
if ((int)lnd->lnd_type == type)
@@ -417,17 +418,9 @@ static __u64
lnet_create_interface_cookie(void)
{
/* NB the interface cookie in wire handles guards against delayed
- * replies and ACKs appearing valid after reboot. Initialisation time,
- * even if it's only implemented to millisecond resolution is probably
- * easily good enough. */
- struct timeval tv;
- __u64 cookie;
-
- do_gettimeofday(&tv);
- cookie = tv.tv_sec;
- cookie *= 1000000;
- cookie += tv.tv_usec;
- return cookie;
+ * replies and ACKs appearing valid after reboot.
+ */
+ return ktime_get_ns();
}
static char *
@@ -1652,7 +1645,6 @@ lnet_destroy_ping_info(void)
offsetof(lnet_ping_info_t,
pi_ni[the_lnet.ln_ping_info->pi_nnis]));
the_lnet.ln_ping_info = NULL;
- return;
}
int
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index e4d906a65635..3225c069637d 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -218,7 +218,7 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
}
-int
+static int
lnet_md_validate(lnet_md_t *umd)
{
if (umd->start == NULL && umd->length != 0) {
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 4b9567d67f33..c8c1ed84fe5c 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -561,7 +561,7 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
}
EXPORT_SYMBOL(lnet_extract_kiov);
-void
+static void
lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
@@ -599,7 +599,7 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
lnet_finalize(ni, msg, rc);
}
-void
+static void
lnet_setpayloadbuffer(lnet_msg_t *msg)
{
lnet_libmd_t *md = msg->msg_md;
@@ -639,7 +639,7 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
msg->msg_hdr.payload_length = cpu_to_le32(len);
}
-void
+static void
lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
{
void *priv = msg->msg_private;
@@ -654,7 +654,7 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
lnet_finalize(ni, msg, rc);
}
-int
+static int
lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
{
int rc;
@@ -668,8 +668,7 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
&msg->msg_private);
if (rc != 0) {
- CERROR("recv from %s / send to %s aborted: "
- "eager_recv failed %d\n",
+ CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
libcfs_nid2str(msg->msg_rxpeer->lp_nid),
libcfs_id2str(msg->msg_target), rc);
LASSERT(rc < 0); /* required by my callers */
@@ -679,7 +678,7 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
}
/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
-void
+static void
lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
{
unsigned long last_alive = 0;
@@ -731,7 +730,7 @@ lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
-int
+static int
lnet_peer_alive_locked(lnet_peer_t *lp)
{
unsigned long now = cfs_time_current();
@@ -753,8 +752,7 @@ lnet_peer_alive_locked(lnet_peer_t *lp)
if (time_before(now, next_query)) {
if (lp->lp_alive)
- CWARN("Unexpected aliveness of peer %s: "
- "%d < %d (%d/%d)\n",
+ CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
libcfs_nid2str(lp->lp_nid),
(int)now, (int)next_query,
lnet_queryinterval,
@@ -817,8 +815,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
(msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
lnet_net_unlock(cpt);
- CNETERR("Aborting message for %s: LNetM[DE]Unlink() already "
- "called on the MD/ME.\n",
+ CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
libcfs_id2str(msg->msg_target));
if (do_send)
lnet_finalize(ni, msg, -ECANCELED);
@@ -871,7 +868,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
}
-lnet_rtrbufpool_t *
+static lnet_rtrbufpool_t *
lnet_msg2bufpool(lnet_msg_t *msg)
{
lnet_rtrbufpool_t *rbp;
@@ -891,7 +888,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
return rbp;
}
-int
+static int
lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
{
/* lnet_parse is going to lnet_net_unlock immediately after this, so it
@@ -1220,8 +1217,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
src_ni = lnet_nid2ni_locked(src_nid, cpt);
if (src_ni == NULL) {
lnet_net_unlock(cpt);
- LCONSOLE_WARN("Can't send to %s: src %s is not a "
- "local nid\n", libcfs_nid2str(dst_nid),
+ LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
+ libcfs_nid2str(dst_nid),
libcfs_nid2str(src_nid));
return -EINVAL;
}
@@ -1283,8 +1280,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
lnet_ni_decref_locked(src_ni, cpt);
lnet_net_unlock(cpt);
- LCONSOLE_WARN("No route to %s via %s "
- "(all routers down)\n",
+ LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
libcfs_id2str(msg->msg_target),
libcfs_nid2str(src_nid));
return -EHOSTUNREACH;
@@ -1676,8 +1672,7 @@ lnet_print_hdr(lnet_hdr_t *hdr)
break;
case LNET_MSG_PUT:
- CWARN(" Ptl index %d, ack md %#llx.%#llx, "
- "match bits %llu\n",
+ CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
hdr->msg.put.ptl_index,
hdr->msg.put.ack_wmd.wh_interface_cookie,
hdr->msg.put.ack_wmd.wh_object_cookie,
@@ -1688,8 +1683,8 @@ lnet_print_hdr(lnet_hdr_t *hdr)
break;
case LNET_MSG_GET:
- CWARN(" Ptl index %d, return md %#llx.%#llx, "
- "match bits %llu\n", hdr->msg.get.ptl_index,
+ CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
+ hdr->msg.get.ptl_index,
hdr->msg.get.return_wmd.wh_interface_cookie,
hdr->msg.get.return_wmd.wh_object_cookie,
hdr->msg.get.match_bits);
@@ -1699,16 +1694,14 @@ lnet_print_hdr(lnet_hdr_t *hdr)
break;
case LNET_MSG_ACK:
- CWARN(" dst md %#llx.%#llx, "
- "manipulated length %d\n",
+ CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
hdr->msg.ack.dst_wmd.wh_interface_cookie,
hdr->msg.ack.dst_wmd.wh_object_cookie,
hdr->msg.ack.mlength);
break;
case LNET_MSG_REPLY:
- CWARN(" dst md %#llx.%#llx, "
- "length %d\n",
+ CWARN(" dst md %#llx.%#llx, length %d\n",
hdr->msg.reply.dst_wmd.wh_interface_cookie,
hdr->msg.reply.dst_wmd.wh_object_cookie,
hdr->payload_length);
@@ -1757,8 +1750,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
case LNET_MSG_REPLY:
if (payload_length >
(__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
- CERROR("%s, src %s: bad %s payload %d "
- "(%d max expected)\n",
+ CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
lnet_msgtyp2str(type),
@@ -1794,40 +1786,36 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (!for_me) {
if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
/* should have gone direct */
- CERROR("%s, src %s: Bad dest nid %s "
- "(should have been sent direct)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
+ CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
return -EPROTO;
}
if (lnet_islocalnid(dest_nid)) {
/* dest is another local NI; sender should have used
* this node's NID on its own network */
- CERROR("%s, src %s: Bad dest nid %s "
- "(it's my nid but on a different network)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
+ CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
return -EPROTO;
}
if (rdma_req && type == LNET_MSG_GET) {
- CERROR("%s, src %s: Bad optimized GET for %s "
- "(final destination must be me)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
+ CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
return -EPROTO;
}
if (!the_lnet.ln_routing) {
- CERROR("%s, src %s: Dropping message for %s "
- "(routing not enabled)\n",
- libcfs_nid2str(from_nid),
- libcfs_nid2str(src_nid),
- libcfs_nid2str(dest_nid));
+ CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
goto drop;
}
}
@@ -1882,8 +1870,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
if (rc != 0) {
lnet_net_unlock(cpt);
- CERROR("%s, src %s: Dropping %s "
- "(error %d looking up sender)\n",
+ CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
lnet_msgtyp2str(type), rc);
lnet_msg_free(msg);
@@ -2003,12 +1990,11 @@ lnet_recv_delayed_msg_list(struct list_head *head)
LASSERT(msg->msg_rxpeer != NULL);
LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
- CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
- "match %llu offset %d length %d.\n",
- libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
- msg->msg_hdr.msg.put.match_bits,
- msg->msg_hdr.msg.put.offset,
- msg->msg_hdr.payload_length);
+ CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
+ libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
+ msg->msg_hdr.msg.put.match_bits,
+ msg->msg_hdr.msg.put.offset,
+ msg->msg_hdr.payload_length);
lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 720c73be4d3c..19ed696344fe 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -192,8 +192,7 @@ lnet_try_match_md(lnet_libmd_t *md,
}
/* Commit to this ME/MD */
- CDEBUG(D_NET, "Incoming %s index %x from %s of "
- "length %d/%d into md %#llx [%d] + %d\n",
+ CDEBUG(D_NET, "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n",
(info->mi_opc == LNET_MD_OP_PUT) ? "put" : "get",
info->mi_portal, libcfs_id2str(info->mi_id), mlength,
info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index be31dfc5fa4b..17e1643fd675 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -35,7 +35,7 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
-int
+static int
lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
LASSERT(!lntmsg->msg_routing);
@@ -44,7 +44,7 @@ lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
return lnet_parse(ni, &lntmsg->msg_hdr, ni->ni_nid, lntmsg, 0);
}
-int
+static int
lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int delayed, unsigned int niov,
struct iovec *iov, lnet_kiov_t *kiov,
@@ -86,7 +86,7 @@ lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
static int lolnd_instanced;
-void
+static void
lolnd_shutdown(lnet_ni_t *ni)
{
CDEBUG(D_NET, "shutdown\n");
@@ -95,7 +95,7 @@ lolnd_shutdown(lnet_ni_t *ni)
lolnd_instanced = 0;
}
-int
+static int
lolnd_startup(lnet_ni_t *ni)
{
LASSERT(ni->ni_lnd == &the_lolnd);
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index e84d59d23ae0..3c23677bc280 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(config_on_load, "configure network at module load");
static struct mutex lnet_config_mutex;
-int
+static int
lnet_configure(void *arg)
{
/* 'arg' only there so I can be passed to cfs_create_thread() */
@@ -63,7 +63,7 @@ lnet_configure(void *arg)
return rc;
}
-int
+static int
lnet_unconfigure(void)
{
int refcount;
@@ -83,7 +83,7 @@ lnet_unconfigure(void)
return (refcount == 0) ? 0 : -EBUSY;
}
-int
+static int
lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
{
int rc;
@@ -110,7 +110,7 @@ lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
-int
+static int __init
init_lnet(void)
{
int rc;
@@ -135,7 +135,7 @@ init_lnet(void)
return 0;
}
-void
+static void __exit
fini_lnet(void)
{
int rc;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index b5b8fb576bfb..c667b5b76761 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -457,8 +457,7 @@ lnet_check_routes(void)
lnet_net_unlock(cpt);
- CERROR("Routes to %s via %s and %s not "
- "supported\n",
+ CERROR("Routes to %s via %s and %s not supported\n",
libcfs_net2str(net),
libcfs_nid2str(nid1),
libcfs_nid2str(nid2));
@@ -752,7 +751,7 @@ lnet_router_checker_event(lnet_event_t *event)
lnet_net_unlock(lp->lp_cpt);
}
-void
+static void
lnet_wait_known_routerstate(void)
{
lnet_peer_t *rtr;
@@ -784,7 +783,7 @@ lnet_wait_known_routerstate(void)
}
}
-void
+static void
lnet_update_ni_status_locked(void)
{
lnet_ni_t *ni;
@@ -824,7 +823,7 @@ lnet_update_ni_status_locked(void)
}
}
-void
+static void
lnet_destroy_rc_data(lnet_rc_data_t *rcd)
{
LASSERT(list_empty(&rcd->rcd_list));
@@ -845,7 +844,7 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd)
LIBCFS_FREE(rcd, sizeof(*rcd));
}
-lnet_rc_data_t *
+static lnet_rc_data_t *
lnet_create_rc_data_locked(lnet_peer_t *gateway)
{
lnet_rc_data_t *rcd = NULL;
@@ -959,8 +958,7 @@ lnet_ping_router_locked (lnet_peer_t *rtr)
secs = lnet_router_check_interval(rtr);
CDEBUG(D_NET,
- "rtr %s %d: deadline %lu ping_notsent %d alive %d "
- "alive_count %d lp_ping_timestamp %lu\n",
+ "rtr %s %d: deadline %lu ping_notsent %d alive %d alive_count %d lp_ping_timestamp %lu\n",
libcfs_nid2str(rtr->lp_nid), secs,
rtr->lp_ping_deadline, rtr->lp_ping_notsent,
rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
@@ -1010,9 +1008,7 @@ lnet_router_checker_start(void)
if (check_routers_before_use &&
dead_router_check_interval <= 0) {
- LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
- " set if 'check_routers_before_use' is set"
- "\n");
+ LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
return -EINVAL;
}
@@ -1224,7 +1220,7 @@ rescan:
return 0;
}
-void
+static void
lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
{
int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
@@ -1235,7 +1231,7 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
LIBCFS_FREE(rb, sz);
}
-lnet_rtrbuf_t *
+static lnet_rtrbuf_t *
lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
{
int npages = rbp->rbp_npages;
@@ -1270,7 +1266,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
return rb;
}
-void
+static void
lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
{
int npages = rbp->rbp_npages;
@@ -1299,7 +1295,7 @@ lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
rbp->rbp_nbuffers = rbp->rbp_credits = 0;
}
-int
+static int
lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
{
lnet_rtrbuf_t *rb;
@@ -1333,7 +1329,7 @@ lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
return 0;
}
-void
+static void
lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
{
INIT_LIST_HEAD(&rbp->rbp_msgs);
@@ -1370,8 +1366,8 @@ lnet_nrb_tiny_calculate(int npages)
if (tiny_router_buffers < 0) {
LCONSOLE_ERROR_MSG(0x10c,
- "tiny_router_buffers=%d invalid when "
- "routing enabled\n", tiny_router_buffers);
+ "tiny_router_buffers=%d invalid when routing enabled\n",
+ tiny_router_buffers);
return -1;
}
@@ -1389,8 +1385,8 @@ lnet_nrb_small_calculate(int npages)
if (small_router_buffers < 0) {
LCONSOLE_ERROR_MSG(0x10c,
- "small_router_buffers=%d invalid when "
- "routing enabled\n", small_router_buffers);
+ "small_router_buffers=%d invalid when routing enabled\n",
+ small_router_buffers);
return -1;
}
@@ -1408,8 +1404,8 @@ lnet_nrb_large_calculate(int npages)
if (large_router_buffers < 0) {
LCONSOLE_ERROR_MSG(0x10c,
- "large_router_buffers=%d invalid when "
- "routing enabled\n", large_router_buffers);
+ "large_router_buffers=%d invalid when routing enabled\n",
+ large_router_buffers);
return -1;
}
@@ -1442,8 +1438,7 @@ lnet_rtrpools_alloc(int im_a_router)
} else if (!strcmp(forwarding, "enabled")) {
/* explicitly enabled */
} else {
- LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
- "'enabled' or 'disabled'\n");
+ LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either 'enabled' or 'disabled'\n");
return -EINVAL;
}
@@ -1520,11 +1515,10 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
/* can't do predictions... */
if (cfs_time_after(when, now)) {
- CWARN ("Ignoring prediction from %s of %s %s "
- "%ld seconds in the future\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid), alive ? "up" : "down",
- cfs_duration_sec(cfs_time_sub(when, now)));
+ CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
+ (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ libcfs_nid2str(nid), alive ? "up" : "down",
+ cfs_duration_sec(cfs_time_sub(when, now)));
return -EINVAL;
}
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 6e8f7e2bbcfc..46cde7036f1d 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -164,8 +164,8 @@ static int proc_lnet_stats(struct ctl_table *table, int write,
__proc_lnet_stats);
}
-int proc_lnet_routes(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_lnet_routes(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
const int tmpsiz = 256;
char *tmpstr;
@@ -290,8 +290,8 @@ int proc_lnet_routes(struct ctl_table *table, int write, void __user *buffer,
return rc;
}
-int proc_lnet_routers(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_lnet_routers(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int rc = 0;
char *tmpstr;
@@ -425,8 +425,8 @@ int proc_lnet_routers(struct ctl_table *table, int write, void __user *buffer,
return rc;
}
-int proc_lnet_peers(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_lnet_peers(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
const int tmpsiz = 256;
struct lnet_peer_table *ptable;
@@ -657,8 +657,8 @@ static int proc_lnet_buffers(struct ctl_table *table, int write,
__proc_lnet_buffers);
}
-int proc_lnet_nis(struct ctl_table *table, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_lnet_nis(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int tmpsiz = 128 * LNET_CPT_NUMBER;
int rc = 0;
@@ -791,20 +791,17 @@ static struct lnet_portal_rotors portal_rotors[] = {
{
.pr_value = LNET_PTL_ROTOR_ON,
.pr_name = "ON",
- .pr_desc = "round-robin dispatch all PUT messages for "
- "wildcard portals"
+ .pr_desc = "round-robin dispatch all PUT messages for wildcard portals"
},
{
.pr_value = LNET_PTL_ROTOR_RR_RT,
.pr_name = "RR_RT",
- .pr_desc = "round-robin dispatch routed PUT message for "
- "wildcard portals"
+ .pr_desc = "round-robin dispatch routed PUT message for wildcard portals"
},
{
.pr_value = LNET_PTL_ROTOR_HASH_RT,
.pr_name = "HASH_RT",
- .pr_desc = "dispatch routed PUT message by hashing source "
- "NID for wildcard portals"
+ .pr_desc = "dispatch routed PUT message by hashing source NID for wildcard portals"
},
{
.pr_value = -1,
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index a94f336d578c..463da076fa70 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -233,7 +233,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
}
}
-int
+static int
brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
{
int i;
@@ -358,7 +358,7 @@ out:
return;
}
-void
+static void
brw_server_rpc_done(srpc_server_rpc_t *rpc)
{
srpc_bulk_t *blk = rpc->srpc_bulk;
@@ -378,7 +378,7 @@ brw_server_rpc_done(srpc_server_rpc_t *rpc)
sfw_free_pages(rpc);
}
-int
+static int
brw_bulk_ready(srpc_server_rpc_t *rpc, int status)
{
__u64 magic = BRW_MAGIC;
@@ -414,7 +414,7 @@ brw_bulk_ready(srpc_server_rpc_t *rpc, int status)
return 0;
}
-int
+static int
brw_server_handle(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index ae7b0fcd818d..5bc615309e72 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -45,7 +45,7 @@
#include "../../include/linux/lnet/lnetst.h"
#include "console.h"
-int
+static int
lst_session_new_ioctl(lstio_session_new_args_t *args)
{
char *name;
@@ -82,7 +82,7 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
return rc;
}
-int
+static int
lst_session_end_ioctl(lstio_session_end_args_t *args)
{
if (args->lstio_ses_key != console_session.ses_key)
@@ -91,7 +91,7 @@ lst_session_end_ioctl(lstio_session_end_args_t *args)
return lstcon_session_end();
}
-int
+static int
lst_session_info_ioctl(lstio_session_info_args_t *args)
{
/* no checking of key */
@@ -113,7 +113,7 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
args->lstio_ses_nmlen);
}
-int
+static int
lst_debug_ioctl(lstio_debug_args_t *args)
{
char *name = NULL;
@@ -194,7 +194,7 @@ out:
return rc;
}
-int
+static int
lst_group_add_ioctl(lstio_group_add_args_t *args)
{
char *name;
@@ -228,7 +228,7 @@ lst_group_add_ioctl(lstio_group_add_args_t *args)
return rc;
}
-int
+static int
lst_group_del_ioctl(lstio_group_del_args_t *args)
{
int rc;
@@ -262,7 +262,7 @@ lst_group_del_ioctl(lstio_group_del_args_t *args)
return rc;
}
-int
+static int
lst_group_update_ioctl(lstio_group_update_args_t *args)
{
int rc;
@@ -320,7 +320,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
return rc;
}
-int
+static int
lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
{
unsigned feats;
@@ -365,7 +365,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
return rc;
}
-int
+static int
lst_group_list_ioctl(lstio_group_list_args_t *args)
{
if (args->lstio_grp_key != console_session.ses_key)
@@ -382,7 +382,7 @@ lst_group_list_ioctl(lstio_group_list_args_t *args)
args->lstio_grp_namep);
}
-int
+static int
lst_group_info_ioctl(lstio_group_info_args_t *args)
{
char *name;
@@ -446,7 +446,7 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
return 0;
}
-int
+static int
lst_batch_add_ioctl(lstio_batch_add_args_t *args)
{
int rc;
@@ -480,7 +480,7 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args)
return rc;
}
-int
+static int
lst_batch_run_ioctl(lstio_batch_run_args_t *args)
{
int rc;
@@ -515,7 +515,7 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args)
return rc;
}
-int
+static int
lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
{
int rc;
@@ -551,7 +551,7 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
return rc;
}
-int
+static int
lst_batch_query_ioctl(lstio_batch_query_args_t *args)
{
char *name;
@@ -593,7 +593,7 @@ lst_batch_query_ioctl(lstio_batch_query_args_t *args)
return rc;
}
-int
+static int
lst_batch_list_ioctl(lstio_batch_list_args_t *args)
{
if (args->lstio_bat_key != console_session.ses_key)
@@ -610,7 +610,7 @@ lst_batch_list_ioctl(lstio_batch_list_args_t *args)
args->lstio_bat_namep);
}
-int
+static int
lst_batch_info_ioctl(lstio_batch_info_args_t *args)
{
char *name;
@@ -675,7 +675,7 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
return rc;
}
-int
+static int
lst_stat_query_ioctl(lstio_stat_args_t *args)
{
int rc;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index a3a60d6e9081..9999b0dc03e4 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -88,7 +88,7 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
spin_unlock(&rpc->crpc_lock);
}
-int
+static int
lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc)
{
@@ -113,7 +113,7 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
return 0;
}
-int
+static int
lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
{
@@ -182,7 +182,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
atomic_dec(&console_session.ses_rpc_counter);
}
-void
+static void
lstcon_rpc_post(lstcon_rpc_t *crpc)
{
lstcon_rpc_trans_t *trans = crpc->crp_trans;
@@ -383,7 +383,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
return rc;
}
-int
+static int
lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
{
lstcon_node_t *nd = crpc->crp_node;
@@ -718,7 +718,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
return &pid[idx % SFW_ID_PER_PAGE];
}
-int
+static int
lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
int dist, int span, int nkiov, lnet_kiov_t *kiov)
{
@@ -772,7 +772,7 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
return 0;
}
-int
+static int
lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
{
test_ping_req_t *prq = &req->tsr_u.ping;
@@ -783,7 +783,7 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
return 0;
}
-int
+static int
lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
{
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
@@ -795,7 +795,7 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
return 0;
}
-int
+static int
lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
{
test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1;
@@ -915,7 +915,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
return rc;
}
-int
+static int
lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
lstcon_node_t *nd, srpc_msg_t *reply)
{
@@ -1162,7 +1162,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
return rc;
}
-void
+static void
lstcon_rpc_pinger(void *arg)
{
stt_timer_t *ptimer = (stt_timer_t *)arg;
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 5dad9f1f9462..49cb6543d538 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -1208,8 +1208,7 @@ again:
lstcon_rpc_trans_destroy(trans);
/* return if any error */
- CDEBUG(D_NET, "Failed to add test %s, "
- "RPC error %d, framework error %d\n",
+ CDEBUG(D_NET, "Failed to add test %s, RPC error %d, framework error %d\n",
transop == LST_TRANS_TSBCLIADD ? "client" : "server",
lstcon_trans_stat()->trs_rpc_errno,
lstcon_trans_stat()->trs_fwk_errno);
@@ -1885,8 +1884,7 @@ lstcon_session_feats_check(unsigned feats)
spin_unlock(&console_session.ses_rpc_lock);
if (rc != 0) {
- CERROR("remote features %x do not match with "
- "session features %x of console\n",
+ CERROR("remote features %x do not match with session features %x of console\n",
feats, console_session.ses_features);
}
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index df04ab7de835..cc9d1826ae66 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -156,7 +156,7 @@ sfw_register_test (srpc_service_t *service, sfw_test_client_ops_t *cliops)
return 0;
}
-void
+static void
sfw_add_session_timer (void)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -176,7 +176,7 @@ sfw_add_session_timer (void)
return;
}
-int
+static int
sfw_del_session_timer (void)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -238,7 +238,7 @@ sfw_deactivate_session (void)
}
-void
+static void
sfw_session_expired (void *data)
{
sfw_session_t *sn = data;
@@ -284,15 +284,14 @@ sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
}
/* completion handler for incoming framework RPCs */
-void
+static void
sfw_server_rpc_done(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
int status = rpc->srpc_status;
CDEBUG (D_NET,
- "Incoming framework RPC done: "
- "service %s, peer %s, status %s:%d\n",
+ "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer),
swi_state2str(rpc->srpc_wi.swi_state),
status);
@@ -302,7 +301,7 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
return;
}
-void
+static void
sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
{
LASSERT (rpc->crpc_bulk.bk_niov == 0);
@@ -310,8 +309,7 @@ sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
CDEBUG (D_NET,
- "Outgoing framework RPC done: "
- "service %d, peer %s, status %s:%d:%d\n",
+ "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
swi_state2str(rpc->crpc_wi.swi_state),
rpc->crpc_aborted, rpc->crpc_status);
@@ -325,7 +323,7 @@ sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
spin_unlock(&sfw_data.fw_lock);
}
-sfw_batch_t *
+static sfw_batch_t *
sfw_find_batch (lst_bid_t bid)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -341,7 +339,7 @@ sfw_find_batch (lst_bid_t bid)
return NULL;
}
-sfw_batch_t *
+static sfw_batch_t *
sfw_bid2batch (lst_bid_t bid)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -367,7 +365,7 @@ sfw_bid2batch (lst_bid_t bid)
return bat;
}
-int
+static int
sfw_get_stats (srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -479,7 +477,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
return 0;
}
-int
+static int
sfw_remove_session (srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -511,7 +509,7 @@ sfw_remove_session (srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
return 0;
}
-int
+static int
sfw_debug_session (srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -532,7 +530,7 @@ sfw_debug_session (srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
return 0;
}
-void
+static void
sfw_test_rpc_fini (srpc_client_rpc_t *rpc)
{
sfw_test_unit_t *tsu = rpc->crpc_priv;
@@ -554,7 +552,7 @@ sfw_test_buffers(sfw_test_instance_t *tsi)
return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA);
}
-int
+static int
sfw_load_test(struct sfw_test_instance *tsi)
{
struct sfw_test_case *tsc;
@@ -575,8 +573,8 @@ sfw_load_test(struct sfw_test_instance *tsi)
rc = srpc_service_add_buffers(svc, nbuf);
if (rc != 0) {
- CWARN("Failed to reserve enough buffers: "
- "service %s, %d needed: %d\n", svc->sv_name, nbuf, rc);
+ CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
+ svc->sv_name, nbuf, rc);
/* NB: this error handler is not strictly correct, because
* it may release more buffers than already allocated,
* but it doesn't matter because request portal should
@@ -591,7 +589,7 @@ sfw_load_test(struct sfw_test_instance *tsi)
return 0;
}
-void
+static void
sfw_unload_test(struct sfw_test_instance *tsi)
{
struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
@@ -609,7 +607,7 @@ sfw_unload_test(struct sfw_test_instance *tsi)
return;
}
-void
+static void
sfw_destroy_test_instance (sfw_test_instance_t *tsi)
{
srpc_client_rpc_t *rpc;
@@ -643,7 +641,7 @@ clean:
return;
}
-void
+static void
sfw_destroy_batch (sfw_batch_t *tsb)
{
sfw_test_instance_t *tsi;
@@ -682,7 +680,7 @@ sfw_destroy_session (sfw_session_t *sn)
return;
}
-void
+static void
sfw_unpack_addtest_req(srpc_msg_t *msg)
{
srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
@@ -727,7 +725,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
return;
}
-int
+static int
sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
{
srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg;
@@ -865,7 +863,7 @@ sfw_test_unit_done (sfw_test_unit_t *tsu)
return;
}
-void
+static void
sfw_test_rpc_done (srpc_client_rpc_t *rpc)
{
sfw_test_unit_t *tsu = rpc->crpc_priv;
@@ -944,7 +942,7 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
return 0;
}
-int
+static int
sfw_run_test (swi_workitem_t *wi)
{
sfw_test_unit_t *tsu = wi->swi_workitem.wi_data;
@@ -994,7 +992,7 @@ test_done:
return 1;
}
-int
+static int
sfw_run_batch (sfw_batch_t *tsb)
{
swi_workitem_t *wi;
@@ -1072,7 +1070,7 @@ sfw_stop_batch (sfw_batch_t *tsb, int force)
return 0;
}
-int
+static int
sfw_query_batch (sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
{
sfw_test_instance_t *tsi;
@@ -1117,7 +1115,7 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
return 0;
}
-int
+static int
sfw_add_test (srpc_server_rpc_t *rpc)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -1187,7 +1185,7 @@ sfw_add_test (srpc_server_rpc_t *rpc)
return 0;
}
-int
+static int
sfw_control_batch (srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
@@ -1228,7 +1226,7 @@ sfw_control_batch (srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
return 0;
}
-int
+static int
sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
@@ -1270,8 +1268,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
if (sn != NULL &&
sn->sn_features != request->msg_ses_feats) {
- CNETERR("Features of framework RPC don't match "
- "features of current session: %x/%x\n",
+ CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
request->msg_ses_feats, sn->sn_features);
reply->msg_body.reply.status = EPROTO;
reply->msg_body.reply.sid = sn->sn_id;
@@ -1334,7 +1331,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
return rc;
}
-int
+static int
sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
@@ -1348,8 +1345,7 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
spin_lock(&sfw_data.fw_lock);
if (status != 0) {
- CERROR("Bulk transfer failed for RPC: "
- "service %s, peer %s, status %d\n",
+ CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
spin_unlock(&sfw_data.fw_lock);
return -EIO;
@@ -1664,12 +1660,10 @@ sfw_startup (void)
}
if (session_timeout == 0)
- CWARN ("Zero session_timeout specified "
- "- test sessions never expire.\n");
+ CWARN("Zero session_timeout specified - test sessions never expire.\n");
if (rpc_timeout == 0)
- CWARN ("Zero rpc_timeout specified "
- "- test RPC never expire.\n");
+ CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
memset(&sfw_data, 0, sizeof(struct smoketest_framework));
@@ -1727,8 +1721,7 @@ sfw_startup (void)
rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
if (rc != 0) {
- CWARN("Failed to reserve enough buffers: "
- "service %s, %d needed: %d\n",
+ CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
sv->sv_name, sv->sv_wi_total, rc);
error = -ENOMEM;
}
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index 6dd4309dc5ea..c6ef5b0d8de1 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -55,7 +55,7 @@ static int lst_init_step = LST_INIT_NONE;
struct cfs_wi_sched *lst_sched_serial;
struct cfs_wi_sched **lst_sched_test;
-void
+static void
lnet_selftest_fini(void)
{
int i;
@@ -90,18 +90,7 @@ lnet_selftest_fini(void)
return;
}
-void
-lnet_selftest_structure_assertion(void)
-{
- CLASSERT(sizeof(srpc_msg_t) == 160);
- CLASSERT(sizeof(srpc_test_reqst_t) == 70);
- CLASSERT(offsetof(srpc_msg_t, msg_body.tes_reqst.tsr_concur) == 72);
- CLASSERT(offsetof(srpc_msg_t, msg_body.tes_reqst.tsr_ndest) == 78);
- CLASSERT(sizeof(srpc_stat_reply_t) == 136);
- CLASSERT(sizeof(srpc_stat_reqst_t) == 28);
-}
-
-int
+static int
lnet_selftest_init(void)
{
int nscheds;
@@ -130,8 +119,8 @@ lnet_selftest_init(void)
rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
nthrs, &lst_sched_test[i]);
if (rc != 0) {
- CERROR("Failed to create CPT affinity WI scheduler "
- "%d for LST\n", i);
+ CERROR("Failed to create CPT affinity WI scheduler %d for LST\n",
+ i);
goto error;
}
}
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 750cac4afbb2..d8c0df6e6852 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -44,7 +44,7 @@
#define LST_PING_TEST_MAGIC 0xbabeface
-int ping_srv_workitems = SFW_TEST_WI_MAX;
+static int ping_srv_workitems = SFW_TEST_WI_MAX;
module_param(ping_srv_workitems, int, 0644);
MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems");
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index a9f29d8833a9..f753add7bfb3 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -87,7 +87,7 @@ void srpc_set_counters (const srpc_counters_t *cnt)
spin_unlock(&srpc_data.rpc_glock);
}
-int
+static int
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
{
nob = min(nob, (int)PAGE_CACHE_SIZE);
@@ -170,7 +170,7 @@ srpc_next_id (void)
return id;
}
-void
+static void
srpc_init_server_rpc(struct srpc_server_rpc *rpc,
struct srpc_service_cd *scd,
struct srpc_buffer *buffer)
@@ -351,7 +351,7 @@ srpc_remove_service (srpc_service_t *sv)
return 0;
}
-int
+static int
srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
int len, int options, lnet_process_id_t peer,
lnet_handle_md_t *mdh, srpc_event_t *ev)
@@ -391,7 +391,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
return 0;
}
-int
+static int
srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
int options, lnet_process_id_t peer, lnet_nid_t self,
lnet_handle_md_t *mdh, srpc_event_t *ev)
@@ -443,7 +443,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
return 0;
}
-int
+static int
srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
{
@@ -452,7 +452,7 @@ srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
LNET_NID_ANY, mdh, ev);
}
-int
+static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
lnet_handle_md_t *mdh, srpc_event_t *ev)
{
@@ -466,7 +466,7 @@ srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
LNET_MD_OP_PUT, any, mdh, ev);
}
-int
+static int
srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
{
struct srpc_service *sv = scd->scd_svc;
@@ -678,9 +678,7 @@ srpc_finish_service(struct srpc_service *sv)
rpc = list_entry(scd->scd_rpc_active.next,
struct srpc_server_rpc, srpc_list);
- CNETERR("Active RPC %p on shutdown: sv %s, peer %s, "
- "wi %s scheduled %d running %d, "
- "ev fired %d type %d status %d lnet %d\n",
+ CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
swi_state2str(rpc->srpc_wi.swi_state),
rpc->srpc_wi.swi_workitem.wi_scheduled,
@@ -697,7 +695,7 @@ srpc_finish_service(struct srpc_service *sv)
}
/* called with sv->sv_lock held */
-void
+static void
srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
@@ -787,7 +785,7 @@ srpc_shutdown_service(srpc_service_t *sv)
}
}
-int
+static int
srpc_send_request (srpc_client_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->crpc_reqstev;
@@ -807,7 +805,7 @@ srpc_send_request (srpc_client_rpc_t *rpc)
return rc;
}
-int
+static int
srpc_prepare_reply (srpc_client_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->crpc_replyev;
@@ -831,7 +829,7 @@ srpc_prepare_reply (srpc_client_rpc_t *rpc)
return rc;
}
-int
+static int
srpc_prepare_bulk (srpc_client_rpc_t *rpc)
{
srpc_bulk_t *bk = &rpc->crpc_bulk;
@@ -863,7 +861,7 @@ srpc_prepare_bulk (srpc_client_rpc_t *rpc)
return rc;
}
-int
+static int
srpc_do_bulk (srpc_server_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->srpc_ev;
@@ -891,7 +889,7 @@ srpc_do_bulk (srpc_server_rpc_t *rpc)
}
/* only called from srpc_handle_rpc */
-void
+static void
srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
{
struct srpc_service_cd *scd = rpc->srpc_scd;
@@ -1066,7 +1064,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
return 0;
}
-void
+static void
srpc_client_rpc_expired (void *data)
{
srpc_client_rpc_t *rpc = data;
@@ -1108,7 +1106,7 @@ srpc_add_client_rpc_timer (srpc_client_rpc_t *rpc)
*
* Upon exit the RPC expiry timer is not queued and the handler is not
* running on any CPU. */
-void
+static void
srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
{
/* timer not planted or already exploded */
@@ -1129,7 +1127,7 @@ srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
}
}
-void
+static void
srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
{
swi_workitem_t *wi = &rpc->crpc_wi;
@@ -1236,20 +1234,18 @@ srpc_send_rpc (swi_workitem_t *wi)
if (reply->msg_type != type ||
(reply->msg_magic != SRPC_MSG_MAGIC &&
reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
- CWARN ("Bad message from %s: type %u (%d expected),"
- " magic %u (%d expected).\n",
- libcfs_id2str(rpc->crpc_dest),
- reply->msg_type, type,
- reply->msg_magic, SRPC_MSG_MAGIC);
+ CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
+ libcfs_id2str(rpc->crpc_dest),
+ reply->msg_type, type,
+ reply->msg_magic, SRPC_MSG_MAGIC);
rc = -EBADMSG;
break;
}
if (do_bulk && reply->msg_body.reply.status != 0) {
- CWARN ("Remote error %d at %s, unlink bulk buffer in "
- "case peer didn't initiate bulk transfer\n",
- reply->msg_body.reply.status,
- libcfs_id2str(rpc->crpc_dest));
+ CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
+ reply->msg_body.reply.status,
+ libcfs_id2str(rpc->crpc_dest));
LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
}
@@ -1393,7 +1389,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
}
/* when in kernel always called with LNET_LOCK() held, and in thread context */
-void
+static void
srpc_lnet_ev_handler(lnet_event_t *ev)
{
struct srpc_service_cd *scd;
@@ -1504,11 +1500,10 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
msg->msg_type != __swab32(type)) ||
(msg->msg_magic != SRPC_MSG_MAGIC &&
msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
- CERROR ("Dropping RPC (%s) from %s: "
- "status %d mlength %d type %u magic %u.\n",
- sv->sv_name, libcfs_id2str(ev->initiator),
- ev->status, ev->mlength,
- msg->msg_type, msg->msg_magic);
+ CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
+ sv->sv_name, libcfs_id2str(ev->initiator),
+ ev->status, ev->mlength,
+ msg->msg_type, msg->msg_magic);
/* NB can't call srpc_service_recycle_buffer here since
* it may call LNetM[DE]Attach. The invalid magic tells
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 91d4caa4edb0..f8352c2a7d37 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -121,7 +121,7 @@ stt_del_timer(stt_timer_t *timer)
}
/* called with stt_data.stt_lock held */
-int
+static int
stt_expire_list(struct list_head *slot, unsigned long now)
{
int expired = 0;
@@ -145,7 +145,7 @@ stt_expire_list(struct list_head *slot, unsigned long now)
return expired;
}
-int
+static int
stt_check_timers(unsigned long *last)
{
int expired = 0;
@@ -168,7 +168,7 @@ stt_check_timers(unsigned long *last)
}
-int
+static int
stt_timer_main(void *arg)
{
cfs_block_allsigs();
@@ -187,7 +187,7 @@ stt_timer_main(void *arg)
return 0;
}
-int
+static int
stt_start_timer_thread(void)
{
struct task_struct *task;
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 4f65ba1158bf..6725467ef4d0 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -57,5 +57,5 @@ config LUSTRE_TRANSLATE_ERRNOS
config LUSTRE_LLITE_LLOOP
tristate "Lustre virtual block device"
depends on LUSTRE_FS && BLOCK
- depends on !PPC_64K_PAGES && !ARM64_64K_PAGES
+ depends on !PPC_64K_PAGES && !ARM64_64K_PAGES && !MICROBLAZE_64K_PAGES && !PAGE_SIZE_64KB && !IA64_PAGE_SIZE_64KB && !PARISC_PAGE_SIZE_64KB
default m
diff --git a/drivers/staging/lustre/lustre/include/dt_object.h b/drivers/staging/lustre/lustre/include/dt_object.h
index 212ebaea8555..be4c7d95e788 100644
--- a/drivers/staging/lustre/lustre/include/dt_object.h
+++ b/drivers/staging/lustre/lustre/include/dt_object.h
@@ -617,7 +617,7 @@ struct dt_index_operations {
int (*load)(const struct lu_env *env,
const struct dt_it *di, __u64 hash);
int (*key_rec)(const struct lu_env *env,
- const struct dt_it *di, void* key_rec);
+ const struct dt_it *di, void *key_rec);
} dio_it;
};
@@ -667,7 +667,7 @@ static inline int lu_device_is_dt(const struct lu_device *d)
return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_DT);
}
-static inline struct dt_device * lu2dt_dev(struct lu_device *l)
+static inline struct dt_device *lu2dt_dev(struct lu_device *l)
{
LASSERT(lu_device_is_dt(l));
return container_of0(l, struct dt_device, dd_lu_dev);
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index e94ab343ab25..8156b4c0f568 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -91,8 +91,6 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
# define inode_dio_read(i) atomic_inc(&(i)->i_dio_count)
/* inode_dio_done(i) use as-is for read unlock */
-#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock)
#ifndef FS_HAS_FIEMAP
#define FS_HAS_FIEMAP (0)
@@ -139,8 +137,7 @@ ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
);
path_put(&path);
return rc;
- }
- else
+ } else
return -ENOSYS;
}
@@ -149,8 +146,7 @@ static inline int ll_quota_off(struct super_block *sb, int off, int remount)
if (sb->s_qcop->quota_off) {
return sb->s_qcop->quota_off(sb, off
);
- }
- else
+ } else
return -ENOSYS;
}
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
index 9d7e28ace42d..9cd8683573ce 100644
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ b/drivers/staging/lustre/lustre/include/linux/obd.h
@@ -87,8 +87,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
if (task == NULL)
continue;
- LCONSOLE_WARN("%s:%d: lock %p was acquired"
- " by <%s:%d:%s:%d> for %lu seconds.\n",
+ LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
current->comm, current->pid,
lock, task->comm, task->pid,
lock->func, lock->line,
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index ccb6cd42a67d..cfe503b7df62 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -374,8 +374,8 @@ static inline void s2dhms(struct dhms *ts, time_t secs)
#define JOBSTATS_PROCNAME_UID "procname_uid"
#define JOBSTATS_NODELOCAL "nodelocal"
-extern int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
- int *val, int mult);
+extern int lprocfs_write_frac_helper(const char __user *buffer,
+ unsigned long count, int *val, int mult);
extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
long val, int mult);
#if defined (CONFIG_PROC_FS)
@@ -557,7 +557,7 @@ extern void lprocfs_free_obd_stats(struct obd_device *obddev);
extern void lprocfs_free_md_stats(struct obd_device *obddev);
struct obd_export;
struct nid_stat;
-extern int lprocfs_add_clear_entry(struct obd_device * obd,
+extern int lprocfs_add_clear_entry(struct obd_device *obd,
struct proc_dir_entry *entry);
extern int lprocfs_exp_setup(struct obd_export *exp,
lnet_nid_t *peer_nid, int *newnid);
@@ -647,7 +647,7 @@ extern int lprocfs_rd_kbytesavail(struct seq_file *m, void *data);
extern int lprocfs_rd_filestotal(struct seq_file *m, void *data);
extern int lprocfs_rd_filesfree(struct seq_file *m, void *data);
-extern int lprocfs_write_helper(const char *buffer, unsigned long count,
+extern int lprocfs_write_helper(const char __user *buffer, unsigned long count,
int *val);
extern int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
extern int lprocfs_write_u64_helper(const char *buffer, unsigned long count,
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 6015ee5c4b64..2ddb2b054d8d 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1120,7 +1120,7 @@ struct lu_context_key {
};
#define LU_KEY_INIT(mod, type) \
- static void* mod##_key_init(const struct lu_context *ctx, \
+ static void *mod##_key_init(const struct lu_context *ctx, \
struct lu_context_key *key) \
{ \
type *value; \
@@ -1137,7 +1137,7 @@ struct lu_context_key {
#define LU_KEY_FINI(mod, type) \
static void mod##_key_fini(const struct lu_context *ctx, \
- struct lu_context_key *key, void* data) \
+ struct lu_context_key *key, void *data) \
{ \
type *info = data; \
\
diff --git a/drivers/staging/lustre/lustre/include/lustre_capa.h b/drivers/staging/lustre/lustre/include/lustre_capa.h
index ab6b9ea98a70..fe19534ebd8f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_capa.h
+++ b/drivers/staging/lustre/lustre/include/lustre_capa.h
@@ -154,7 +154,7 @@ static inline __u32 capa_expiry(struct lustre_capa *capa)
}
void _debug_capa(struct lustre_capa *, struct libcfs_debug_msg_data *,
- const char *fmt, ... );
+ const char *fmt, ...);
#define DEBUG_CAPA(level, capa, fmt, args...) \
do { \
if (((level) & D_CANTMASK) != 0 || \
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 515b835ce14d..9b2833131744 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -368,8 +368,7 @@ static inline void check_lcd(char *obd_name, int index,
if (strnlen((char*)lcd->lcd_uuid, length) == length) {
lcd->lcd_uuid[length - 1] = '\0';
- LCONSOLE_ERROR("the client UUID (%s) on %s for exports"
- "stored in last_rcvd(index = %d) is bad!\n",
+ LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n",
lcd->lcd_uuid, obd_name, index);
}
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 14ac46f45fd1..83bc0a9d7d4c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -1446,7 +1446,7 @@ static inline void check_res_locked(struct ldlm_resource *res)
assert_spin_locked(&res->lr_lock);
}
-struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
+struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock);
void unlock_res_and_lock(struct ldlm_lock *lock);
/* ldlm_pool.c */
diff --git a/drivers/staging/lustre/lustre/include/lustre_eacl.h b/drivers/staging/lustre/lustre/include/lustre_eacl.h
index b94f76a3301b..0f8f76c43ee1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_eacl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_eacl.h
@@ -74,7 +74,7 @@ typedef struct {
extern ext_acl_xattr_header *
lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size);
extern int
-lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
+lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
posix_acl_xattr_header **out);
extern void
lustre_posix_acl_xattr_free(posix_acl_xattr_header *header, int size);
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 12c7590e61fa..bf135630c39a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -85,7 +85,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
/* client.c */
-int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
+int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
struct client_obd *client_conn2cli(struct lustre_handle *conn);
struct md_open_data;
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 0a024d3cfeb7..36396d1c94dc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -2627,12 +2627,7 @@ __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
char *lustre_msg_get_jobid(struct lustre_msg *msg);
__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
-__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
-#else
-# warning "remove checksum compatibility support for b1_8"
__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
-#endif
void lustre_msg_set_handle(struct lustre_msg *msg,
struct lustre_handle *handle);
void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
@@ -2951,7 +2946,7 @@ void ptlrpcd_decref(void);
* procfs output related functions
* @{
*/
-const char* ll_opcode2str(__u32 opcode);
+const char *ll_opcode2str(__u32 opcode);
#if defined (CONFIG_PROC_FS)
void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 882e40bd584c..4a29261c514d 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -414,7 +414,7 @@ do { \
#define EXP_MD_COUNTER_INCREMENT(exp, op)
#endif
-static inline int lprocfs_nid_ldlm_stats_init(struct nid_stat* tmp)
+static inline int lprocfs_nid_ldlm_stats_init(struct nid_stat *tmp)
{
/* Always add in ldlm_stats */
tmp->nid_ldlm_stats = lprocfs_alloc_stats(LDLM_LAST_OPC - LDLM_FIRST_OPC
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index a3d7a7292417..eab2bd60241b 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -73,6 +73,7 @@ static inline int extent_compare(struct interval_node_extent *e1,
struct interval_node_extent *e2)
{
int rc;
+
if (e1->start == e2->start) {
if (e1->end < e2->end)
rc = -1;
@@ -321,6 +322,7 @@ static void interval_insert_color(struct interval_node *node,
/* Parent is RED, so gparent must not be NULL */
if (node_is_left_child(parent)) {
struct interval_node *uncle;
+
uncle = gparent->in_right;
if (uncle && node_is_red(uncle)) {
uncle->in_color = INTERVAL_BLACK;
@@ -340,6 +342,7 @@ static void interval_insert_color(struct interval_node *node,
__rotate_right(gparent, root);
} else {
struct interval_node *uncle;
+
uncle = gparent->in_left;
if (uncle && node_is_red(uncle)) {
uncle->in_color = INTERVAL_BLACK;
@@ -427,6 +430,7 @@ static void interval_erase_color(struct interval_node *node,
} else {
if (node_is_black_or_0(tmp->in_right)) {
struct interval_node *o_left;
+
o_left = tmp->in_left;
if (o_left)
o_left->in_color = INTERVAL_BLACK;
@@ -458,6 +462,7 @@ static void interval_erase_color(struct interval_node *node,
} else {
if (node_is_black_or_0(tmp->in_left)) {
struct interval_node *o_right;
+
o_right = tmp->in_right;
if (o_right)
o_right->in_color = INTERVAL_BLACK;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index 0c09b611f4a6..a89eebaedabf 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -182,7 +182,9 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
root = &res->lr_itree[idx].lit_root;
found = interval_insert(&node->li_node, root);
if (found) { /* The policy group found. */
- struct ldlm_interval *tmp = ldlm_interval_detach(lock);
+ struct ldlm_interval *tmp;
+
+ tmp = ldlm_interval_detach(lock);
LASSERT(tmp != NULL);
ldlm_interval_free(tmp);
ldlm_interval_attach(to_ldlm_interval(found), lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index b798daa094bc..a4c252febfe4 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -260,7 +260,8 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { NULL };
- CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
+ CDEBUG(D_DLMTRACE,
+ "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
*flags, new->l_policy_data.l_flock.owner,
new->l_policy_data.l_flock.pid, mode,
req->l_policy_data.l_flock.start,
@@ -291,6 +292,7 @@ reprocess:
}
} else {
int reprocess_failed = 0;
+
lockmode_verify(mode);
/* This loop determines if there are existing locks
@@ -496,7 +498,8 @@ reprocess:
new->l_policy_data.l_flock.end + 1;
new2->l_conn_export = lock->l_conn_export;
if (lock->l_export != NULL) {
- new2->l_export = class_export_lock_get(lock->l_export, new2);
+ new2->l_export = class_export_lock_get(lock->l_export,
+ new2);
if (new2->l_export->exp_lock_hash &&
hlist_unhashed(&new2->l_exp_hash))
cfs_hash_add(new2->l_export->exp_lock_hash,
@@ -619,8 +622,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "sleeping");
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
fwd.fwd_lock = lock;
obd = class_exp2obd(lock->l_conn_export);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index f997566ef11e..6c6c57ca91de 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -91,7 +91,8 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
}
void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, ldlm_side_t);
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
+ ldlm_side_t);
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
/* ldlm_request.c */
@@ -214,6 +215,7 @@ static inline struct ldlm_extent *
ldlm_interval_extent(struct ldlm_interval *node)
{
struct ldlm_lock *lock;
+
LASSERT(!list_empty(&node->li_group));
lock = list_entry(node->li_group.next, struct ldlm_lock,
@@ -244,7 +246,7 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
\
return lprocfs_rd_uint(m, &tmp); \
} \
- struct __##var##__dummy_read {;} /* semicolon catcher */
+ struct __##var##__dummy_read {; } /* semicolon catcher */
#define LDLM_POOL_PROC_WRITER(var, type) \
static int lprocfs_wr_##var(struct file *file, const char *buffer, \
@@ -266,7 +268,7 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
\
return rc; \
} \
- struct __##var##__dummy_write {;} /* semicolon catcher */
+ struct __##var##__dummy_write {; } /* semicolon catcher */
static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
{
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index c21e30a074b9..c5c86e73ca52 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -603,7 +603,8 @@ int client_disconnect_export(struct obd_export *exp)
/* obd_force == local only */
ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
obd->obd_force ? LCF_LOCAL : 0, NULL);
- ldlm_namespace_free_prior(obd->obd_namespace, imp, obd->obd_force);
+ ldlm_namespace_free_prior(obd->obd_namespace, imp,
+ obd->obd_force);
}
/* There's no need to hold sem while disconnecting an import,
@@ -858,8 +859,8 @@ void ldlm_dump_export_locks(struct obd_export *exp)
if (!list_empty(&exp->exp_locks_list)) {
struct ldlm_lock *lock;
- CERROR("dumping locks for export %p,"
- "ignore if the unmount doesn't hang\n", exp);
+ CERROR("dumping locks for export %p,ignore if the unmount doesn't hang\n",
+ exp);
list_for_each_entry(lock, &exp->exp_locks_list,
l_exp_refs_link)
LDLM_ERROR(lock, "lock:");
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 6140130b6056..8191005464b1 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -226,6 +226,7 @@ EXPORT_SYMBOL(ldlm_lock_put);
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
{
int rc = 0;
+
if (!list_empty(&lock->l_lru)) {
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
@@ -575,7 +576,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */
- if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
+ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
lu_ref_add(&lock->l_reference, "handle", current);
return lock;
}
@@ -811,8 +812,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
/* If we received a blocked AST and this was the last reference,
* run the callback. */
if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
- CERROR("FL_CBPENDING set on non-local lock--just a "
- "warning\n");
+ CERROR("FL_CBPENDING set on non-local lock--just a warning\n");
LDLM_DEBUG(lock, "final decref done on cbpending lock");
@@ -859,6 +859,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
+
LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
@@ -981,7 +982,6 @@ static void search_granted_lock(struct list_head *queue,
prev->res_link = queue->prev;
prev->mode_link = &req->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
- return;
}
/**
@@ -1287,6 +1287,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
+
if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock,
LDLM_FL_WAIT_NOREPROC,
@@ -1340,9 +1341,10 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
} else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res %llu/%llu (%llu %llu)",
- ns, type, mode, res_id->name[0], res_id->name[1],
+ ns, type, mode, res_id->name[0],
+ res_id->name[1],
(type == LDLM_PLAIN || type == LDLM_IBITS) ?
- res_id->name[2] :policy->l_extent.start,
+ res_id->name[2] : policy->l_extent.start,
(type == LDLM_PLAIN || type == LDLM_IBITS) ?
res_id->name[3] : policy->l_extent.end);
}
@@ -1453,7 +1455,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
memcpy(data, lvb, size);
} else {
- LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d",
+ LDLM_ERROR(lock,
+ "Replied unexpected lquota LVB size %d",
size);
return -EINVAL;
}
@@ -1641,8 +1644,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
ldlm_grant_lock(lock, NULL);
goto out;
} else {
- CERROR("This is client-side-only module, cannot handle "
- "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
LBUG();
}
@@ -1820,24 +1822,24 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
arg->list = rpc_list;
switch (ast_type) {
- case LDLM_WORK_BL_AST:
- arg->type = LDLM_BL_CALLBACK;
- work_ast_lock = ldlm_work_bl_ast_lock;
- break;
- case LDLM_WORK_CP_AST:
- arg->type = LDLM_CP_CALLBACK;
- work_ast_lock = ldlm_work_cp_ast_lock;
- break;
- case LDLM_WORK_REVOKE_AST:
- arg->type = LDLM_BL_CALLBACK;
- work_ast_lock = ldlm_work_revoke_ast_lock;
- break;
- case LDLM_WORK_GL_AST:
- arg->type = LDLM_GL_CALLBACK;
- work_ast_lock = ldlm_work_gl_ast_lock;
- break;
- default:
- LBUG();
+ case LDLM_WORK_BL_AST:
+ arg->type = LDLM_BL_CALLBACK;
+ work_ast_lock = ldlm_work_bl_ast_lock;
+ break;
+ case LDLM_WORK_CP_AST:
+ arg->type = LDLM_CP_CALLBACK;
+ work_ast_lock = ldlm_work_cp_ast_lock;
+ break;
+ case LDLM_WORK_REVOKE_AST:
+ arg->type = LDLM_BL_CALLBACK;
+ work_ast_lock = ldlm_work_revoke_ast_lock;
+ break;
+ case LDLM_WORK_GL_AST:
+ arg->type = LDLM_GL_CALLBACK;
+ work_ast_lock = ldlm_work_gl_ast_lock;
+ break;
+ default:
+ LBUG();
}
/* We create a ptlrpc request set with flow control extension.
@@ -1904,8 +1906,7 @@ void ldlm_reprocess_all(struct ldlm_resource *res)
LIST_HEAD(rpc_list);
if (!ns_is_client(ldlm_res_to_ns(res))) {
- CERROR("This is client-side-only module, cannot handle "
- "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
LBUG();
}
}
@@ -2038,8 +2039,7 @@ int ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
ecl->ecl_loop++;
if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
CDEBUG(D_INFO,
- "Cancel lock %p for export %p (loop %d), still have "
- "%d locks left on hash table.\n",
+ "Cancel lock %p for export %p (loop %d), still have %d locks left on hash table.\n",
lock, exp, ecl->ecl_loop,
atomic_read(&hs->hs_count));
}
@@ -2169,8 +2169,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
lock->l_completion_ast(lock, 0, NULL);
}
} else {
- CERROR("This is client-side-only module, cannot handle "
- "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n");
LBUG();
}
unlock_res_and_lock(lock);
@@ -2224,23 +2223,21 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
} else if (exp && exp->exp_obd != NULL) {
struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
+
nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
}
if (resource == NULL) {
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s "
- "remote: %#llx expref: %d pid: %u timeout: %lu "
- "lvb_type: %d\n",
- lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+ " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
va_end(args);
return;
}
@@ -2248,90 +2245,78 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
switch (resource->lr_type) {
case LDLM_EXTENT:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: "DLDLMRES" rrc: %d type: %s [%llu->%llu] "
- "(req %llu->%llu) flags: %#llx nid: %s remote: "
- "%#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_extent.start,
- lock->l_policy_data.l_extent.end,
- lock->l_req_extent.start, lock->l_req_extent.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
+ " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock), lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_policy_data.l_extent.start,
+ lock->l_policy_data.l_extent.end,
+ lock->l_req_extent.start, lock->l_req_extent.end,
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
case LDLM_FLOCK:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: "DLDLMRES" rrc: %d type: %s pid: %d "
- "[%llu->%llu] flags: %#llx nid: %s "
- "remote: %#llx expref: %d pid: %u timeout: %lu\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_flock.pid,
- lock->l_policy_data.l_flock.start,
- lock->l_policy_data.l_flock.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout);
+ " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n",
+ ldlm_lock_to_ns_name(lock), lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_policy_data.l_flock.pid,
+ lock->l_policy_data.l_flock.start,
+ lock->l_policy_data.l_flock.end,
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout);
break;
case LDLM_IBITS:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: "DLDLMRES" bits %#llx rrc: %d type: %s "
- "flags: %#llx nid: %s remote: %#llx expref: %d "
- "pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- lock->l_policy_data.l_inodebits.bits,
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
+ " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ lock->l_policy_data.l_inodebits.bits,
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
default:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
- "res: "DLDLMRES" rrc: %d type: %s flags: %#llx "
- "nid: %s remote: %#llx expref: %d pid: %u "
- "timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- PLDLMRES(resource),
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout,
- lock->l_lvb_type);
+ " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
}
va_end(args);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 91cf7ebae114..98fbd3f7e751 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -158,13 +158,15 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
unlock_res_and_lock(lock);
if (do_ast) {
- CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
- lock, lock->l_blocking_ast);
+ CDEBUG(D_DLMTRACE,
+ "Lock %p already unused, calling callback (%p)\n", lock,
+ lock->l_blocking_ast);
if (lock->l_blocking_ast != NULL)
lock->l_blocking_ast(lock, ld, lock->l_ast_data,
LDLM_CB_BLOCKING);
} else {
- CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
+ CDEBUG(D_DLMTRACE,
+ "Lock %p is referenced, will be cancelled later\n",
lock);
}
@@ -190,6 +192,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
+
while (to > 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
@@ -210,9 +213,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
LASSERT(lock->l_lvb_data != NULL);
if (unlikely(lock->l_lvb_len < lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than "
- "expectation, expected = %d, "
- "replied = %d",
+ LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
lock->l_lvb_len, lvb_len);
rc = -EINVAL;
goto out;
@@ -639,8 +640,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
if (!lock) {
- CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock "
- "disappeared\n", dlm_req->lock_handle[0].cookie);
+ CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock disappeared\n",
+ dlm_req->lock_handle[0].cookie);
rc = ldlm_callback_reply(req, -EINVAL);
ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
&dlm_req->lock_handle[0]);
@@ -663,8 +664,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
if (((lock->l_flags & LDLM_FL_CANCELING) &&
(lock->l_flags & LDLM_FL_BL_DONE)) ||
(lock->l_flags & LDLM_FL_FAILED)) {
- LDLM_DEBUG(lock, "callback on lock "
- "%#llx - lock disappeared\n",
+ LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
unlock_res_and_lock(lock);
LDLM_LOCK_RELEASE(lock);
@@ -724,7 +724,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
{
struct ldlm_bl_work_item *blwi = NULL;
- static unsigned int num_bl = 0;
+ static unsigned int num_bl;
spin_lock(&blp->blp_lock);
/* process a request from the blp_list at least every blp_num_threads */
@@ -887,6 +887,7 @@ void ldlm_put_ref(void)
mutex_lock(&ldlm_ref_mutex);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup();
+
if (rc)
CERROR("ldlm_cleanup failed: %d\n", rc);
else
@@ -969,6 +970,7 @@ static cfs_hash_ops_t ldlm_export_lock_ops = {
int ldlm_init_export(struct obd_export *exp)
{
int rc;
+
exp->exp_lock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
@@ -1049,7 +1051,7 @@ static int ldlm_setup(void)
.so_req_handler = ldlm_callback_handler,
},
};
- ldlm_state->ldlm_cb_service = \
+ ldlm_state->ldlm_cb_service =
ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
if (IS_ERR(ldlm_state->ldlm_cb_service)) {
CERROR("failed to start service\n");
@@ -1077,7 +1079,7 @@ static int ldlm_setup(void)
blp->blp_min_threads = LDLM_NTHRS_INIT;
blp->blp_max_threads = LDLM_NTHRS_MAX;
} else {
- blp->blp_min_threads = blp->blp_max_threads = \
+ blp->blp_min_threads = blp->blp_max_threads =
min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
ldlm_num_threads));
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 6054eee848d3..4c838f615a64 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -54,10 +54,10 @@
* calculated as the number of locks in LRU * lock live time in seconds. If
* CLV > SLV - lock is canceled.
*
- * Client has LVF, that is, lock volume factor which regulates how much sensitive
- * client should be about last SLV from server. The higher LVF is the more locks
- * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
- * that client will cancel locks 2 times faster.
+ * Client has LVF, that is, lock volume factor which regulates how much
+ * sensitive client should be about last SLV from server. The higher LVF is the
+ * more locks will be canceled on client. Default value for it is 1. Setting LVF
+ * to 2 means that client will cancel locks 2 times faster.
*
* Locks on a client will be canceled more intensively in these cases:
* (1) if SLV is smaller, that is, load is higher on the server;
@@ -70,11 +70,12 @@
* if flow is getting thinner, more and more particles become outside of it and
* as particles are locks, they should be canceled.
*
- * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
- * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
- * cleanups. Flow definition to allow more easy understanding of the logic belongs
- * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
- * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
+ * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
+ * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
+ * LVF and many cleanups. Flow definition to allow more easy understanding of
+ * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
+ * cleanups and fixes. And design and implementation are done by Yury Umanets
+ * (umka@clusterfs.com).
*
* Glossary for terms used:
*
@@ -265,16 +266,15 @@ static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
* SLV. And the opposite, the more grant plan is over-consumed
* (load time) the faster drops SLV.
*/
- slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
+ slv_factor = grant_usage << LDLM_POOL_SLV_SHIFT;
do_div(slv_factor, limit);
slv = slv * slv_factor;
slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
- if (slv > ldlm_pool_slv_max(limit)) {
+ if (slv > ldlm_pool_slv_max(limit))
slv = ldlm_pool_slv_max(limit);
- } else if (slv < ldlm_pool_slv_min(limit)) {
+ else if (slv < ldlm_pool_slv_min(limit))
slv = ldlm_pool_slv_min(limit);
- }
pl->pl_server_lock_volume = slv;
}
@@ -614,8 +614,8 @@ int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
lprocfs_counter_add(pl->pl_stats,
LDLM_POOL_SHRINK_FREED_STAT,
cancel);
- CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
- "shrunk %d\n", pl->pl_name, nr, cancel);
+ CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
+ pl->pl_name, nr, cancel);
}
}
return cancel;
@@ -636,7 +636,7 @@ int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
}
EXPORT_SYMBOL(ldlm_pool_setup);
-#if defined (CONFIG_PROC_FS)
+#if defined(CONFIG_PROC_FS)
static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
{
int granted, grant_rate, cancel_rate, grant_step;
@@ -696,8 +696,9 @@ LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
LDLM_POOL_PROC_WRITER(recalc_period, int);
-static ssize_t lprocfs_recalc_period_seq_write(struct file *file, const char *buf,
- size_t len, loff_t *off)
+static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
+ const char *buf, size_t len,
+ loff_t *off)
{
struct seq_file *seq = file->private_data;
@@ -943,6 +944,7 @@ EXPORT_SYMBOL(ldlm_pool_del);
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
__u64 slv;
+
spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
spin_unlock(&pl->pl_lock);
@@ -971,6 +973,7 @@ EXPORT_SYMBOL(ldlm_pool_set_slv);
__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
{
__u64 slv;
+
spin_lock(&pl->pl_lock);
slv = pl->pl_client_lock_volume;
spin_unlock(&pl->pl_lock);
@@ -1132,23 +1135,27 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
}
-static unsigned long ldlm_pools_srv_count(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_srv_count(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
}
-static unsigned long ldlm_pools_srv_scan(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
sc->gfp_mask);
}
-static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_cli_count(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
}
-static unsigned long ldlm_pools_cli_scan(struct shrinker *s, struct shrink_control *sc)
+static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
+ struct shrink_control *sc)
{
return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
sc->gfp_mask);
@@ -1194,10 +1201,8 @@ int ldlm_pools_recalc(ldlm_side_t client)
* of limit.
*/
if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
- CWARN("\"Modest\" pools eat out 2/3 of server locks "
- "limit (%d of %lu). This means that you have too "
- "many clients for this amount of server RAM. "
- "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
+ CWARN("\"Modest\" pools eat out 2/3 of server locks limit (%d of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n",
+ nr_l, LDLM_POOL_HOST_L);
equal = 1;
}
@@ -1205,8 +1210,7 @@ int ldlm_pools_recalc(ldlm_side_t client)
* The rest is given to greedy namespaces.
*/
list_for_each_entry(ns, ldlm_namespace_list(client),
- ns_list_chain)
- {
+ ns_list_chain) {
if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
continue;
@@ -1383,9 +1387,8 @@ static int ldlm_pools_thread_start(void)
static void ldlm_pools_thread_stop(void)
{
- if (ldlm_pools_thread == NULL) {
+ if (ldlm_pools_thread == NULL)
return;
- }
thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
wake_up(&ldlm_pools_thread->t_ctl_waitq);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 9ce437b18793..287da325d928 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -95,16 +95,14 @@ int ldlm_expired_completion_wait(void *data)
struct obd_device *obd;
if (lock->l_conn_export == NULL) {
- static unsigned long next_dump = 0, last_dump = 0;
+ static unsigned long next_dump, last_dump;
LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago)\n",
lock->l_last_activity,
cfs_time_sub(get_seconds(),
lock->l_last_activity));
- LDLM_DEBUG(lock, "lock timed out (enqueued at "CFS_TIME_T", "
- CFS_DURATION_T"s ago); not entering recovery in "
- "server code, just going back to sleep",
+ LDLM_DEBUG(lock, "lock timed out (enqueued at " CFS_TIME_T ", " CFS_DURATION_T "s ago); not entering recovery in server code, just going back to sleep",
lock->l_last_activity,
cfs_time_sub(get_seconds(),
lock->l_last_activity));
@@ -137,6 +135,7 @@ EXPORT_SYMBOL(ldlm_expired_completion_wait);
int ldlm_get_enq_timeout(struct ldlm_lock *lock)
{
int timeout = at_get(ldlm_lock_to_ns_at(lock));
+
if (AT_OFF)
return obd_timeout / 2;
/* Since these are non-updating timeouts, we should be conservative.
@@ -191,8 +190,7 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
return ldlm_completion_tail(lock);
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "going forward");
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
ldlm_reprocess_all(lock->l_resource);
return 0;
}
@@ -240,17 +238,15 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "sleeping");
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
noreproc:
obd = class_exp2obd(lock->l_conn_export);
/* if this is a local lock, then there is no import */
- if (obd != NULL) {
+ if (obd != NULL)
imp = obd->u.cli.cl_import;
- }
/* Wait a long time for enqueue - server may have to callback a
lock from another client. Server will evict the other client if it
@@ -324,8 +320,7 @@ int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
if (rc < 0)
CERROR("ldlm_cli_cancel: %d\n", rc);
} else {
- LDLM_DEBUG(lock, "Lock still has references, will be "
- "cancelled later");
+ LDLM_DEBUG(lock, "Lock still has references, will be cancelled later");
}
return 0;
}
@@ -483,8 +478,7 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
if (need_cancel)
LDLM_DEBUG(lock,
- "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
- "LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
+ "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
else
LDLM_DEBUG(lock, "lock was granted or failed in race");
@@ -557,8 +551,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = size;
goto cleanup;
} else if (unlikely(size > lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than "
- "expectation, expected = %d, replied = %d",
+ LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
lvb_len, size);
rc = -EINVAL;
goto cleanup;
@@ -608,6 +601,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
* again. */
if ((*flags) & LDLM_FL_LOCK_CHANGED) {
int newmode = reply->lock_desc.l_req_mode;
+
LASSERT(!is_replay);
if (newmode && newmode != lock->l_req_mode) {
LDLM_DEBUG(lock, "server returned different mode %s",
@@ -676,6 +670,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
if (lock->l_completion_ast != NULL) {
int err = lock->l_completion_ast(lock, *flags, NULL);
+
if (!rc)
rc = err;
if (rc)
@@ -725,6 +720,7 @@ static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
int off)
{
int size = req_capsule_msg_size(pill, loc);
+
return ldlm_req_handles_avail(size, off);
}
@@ -733,6 +729,7 @@ static inline int ldlm_format_handles_avail(struct obd_import *imp,
enum req_location loc, int off)
{
int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
+
return ldlm_req_handles_avail(size, off);
}
@@ -1107,8 +1104,7 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
unlock_res_and_lock(lock);
if (local_only) {
- CDEBUG(D_DLMTRACE, "not sending request (at caller's "
- "instruction)\n");
+ CDEBUG(D_DLMTRACE, "not sending request (at caller's instruction)\n");
rc = LDLM_FL_LOCAL_ONLY;
}
ldlm_lock_cancel(lock);
@@ -1223,8 +1219,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
rc = ptlrpc_queue_wait(req);
}
if (rc == LUSTRE_ESTALE) {
- CDEBUG(D_DLMTRACE, "client/server (nid %s) "
- "out of sync -- not fatal\n",
+ CDEBUG(D_DLMTRACE, "client/server (nid %s) out of sync -- not fatal\n",
libcfs_nid2str(req->rq_import->
imp_connection->c_peer.nid));
rc = 0;
@@ -1235,8 +1230,8 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
} else if (rc != ELDLM_OK) {
/* -ESHUTDOWN is common on umount */
CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
- "Got rc %d from cancel RPC: "
- "canceling anyway\n", rc);
+ "Got rc %d from cancel RPC: canceling anyway\n",
+ rc);
break;
}
sent = count;
@@ -1279,7 +1274,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
* server-side namespace is not possible. */
if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
lustre_msg_get_limit(req->rq_repmsg) == 0) {
- DEBUG_REQ(D_HA, req, "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
+ DEBUG_REQ(D_HA, req,
+ "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
lustre_msg_get_slv(req->rq_repmsg),
lustre_msg_get_limit(req->rq_repmsg));
return 0;
@@ -1416,19 +1412,20 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
{
ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
+
lock_res_and_lock(lock);
/* don't check added & count since we want to process all locks
* from unused list */
switch (lock->l_resource->lr_type) {
- case LDLM_EXTENT:
- case LDLM_IBITS:
- if (cb && cb(lock))
- break;
- default:
- result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
+ case LDLM_EXTENT:
+ case LDLM_IBITS:
+ if (cb && cb(lock))
break;
+ default:
+ result = LDLM_POLICY_SKIP_LOCK;
+ lock->l_flags |= LDLM_FL_SKIPPED;
+ break;
}
unlock_res_and_lock(lock);
@@ -1594,8 +1591,9 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
* sending any RPCs or waiting for any
* outstanding RPC to complete.
*/
-static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *cancels,
- int count, int max, int flags)
+static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
+ struct list_head *cancels, int count, int max,
+ int flags)
{
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
@@ -1730,6 +1728,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
int flags)
{
int added;
+
added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
if (added <= 0)
return added;
@@ -1918,7 +1917,8 @@ struct ldlm_cli_cancel_arg {
void *lc_opaque;
};
-static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
+ struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
@@ -2014,6 +2014,7 @@ struct iter_helper_data {
static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
{
struct iter_helper_data *helper = closure;
+
return helper->iter(lock, helper->closure);
}
@@ -2080,7 +2081,8 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
/* we use l_pending_chain here, because it's unused on clients. */
LASSERTF(list_empty(&lock->l_pending_chain),
"lock %p next %p prev %p\n",
- lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
+ lock, &lock->l_pending_chain.next,
+ &lock->l_pending_chain.prev);
/* bug 9573: don't replay locks left after eviction, or
* bug 17614: locks being actively cancelled. Get a reference
* on a lock so that it does not disappear under us (e.g. due to cancel)
@@ -2114,8 +2116,7 @@ static int replay_lock_interpret(const struct lu_env *env,
lock = ldlm_handle2lock(&aa->lock_handle);
if (!lock) {
- CERROR("received replay ack for unknown local cookie %#llx"
- " remote cookie %#llx from server %s id %s\n",
+ CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n",
aa->lock_handle.cookie, reply->lock_handle.cookie,
req->rq_export->exp_client_uuid.uuid,
libcfs_id2str(req->rq_peer));
@@ -2243,9 +2244,8 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
int canceled;
LIST_HEAD(cancels);
- CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before"
- "replay for namespace %s (%d)\n",
- ldlm_ns_name(ns), ns->ns_nr_unused);
+ CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
+ ldlm_ns_name(ns), ns->ns_nr_unused);
/* We don't need to care whether or not LRU resize is enabled
* because the LDLM_CANCEL_NO_WAIT policy doesn't use the
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index a785b7a7d1b3..1f150e46f50e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -71,7 +71,7 @@ extern unsigned int ldlm_cancel_unused_locks_before_replay;
* DDOS. */
unsigned int ldlm_dump_granted_max = 256;
-#if defined (CONFIG_PROC_FS)
+#if defined(CONFIG_PROC_FS)
static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer,
size_t count, loff_t *off)
{
@@ -93,7 +93,7 @@ int ldlm_proc_setup(void)
&ldlm_dump_granted_max },
{ "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops,
&ldlm_cancel_unused_locks_before_replay },
- { NULL }};
+ { NULL } };
LASSERT(ldlm_ns_proc_dir == NULL);
ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
@@ -215,8 +215,8 @@ static ssize_t lprocfs_lru_size_seq_write(struct file *file,
LDLM_CANCEL_PASSED);
if (canceled < unused) {
CDEBUG(D_DLMTRACE,
- "not all requested locks are canceled, "
- "requested: %d, canceled: %d\n", unused,
+ "not all requested locks are canceled, requested: %d, canceled: %d\n",
+ unused,
canceled);
return -EINVAL;
}
@@ -385,8 +385,8 @@ int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
#undef MAX_STRING_SIZE
#else /* CONFIG_PROC_FS */
-#define ldlm_namespace_proc_unregister(ns) ({;})
-#define ldlm_namespace_proc_register(ns) ({0;})
+#define ldlm_namespace_proc_unregister(ns) ({; })
+#define ldlm_namespace_proc_register(ns) ({0; })
#endif /* CONFIG_PROC_FS */
@@ -454,7 +454,8 @@ static void *ldlm_res_hop_object(struct hlist_node *hnode)
return hlist_entry(hnode, struct ldlm_resource, lr_hash);
}
-static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_get_locked(struct cfs_hash *hs,
+ struct hlist_node *hnode)
{
struct ldlm_resource *res;
@@ -462,7 +463,8 @@ static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnod
ldlm_resource_getref(res);
}
-static void ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_put_locked(struct cfs_hash *hs,
+ struct hlist_node *hnode)
{
struct ldlm_resource *res;
@@ -501,7 +503,7 @@ cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
.hs_put = ldlm_res_hop_put
};
-typedef struct {
+struct ldlm_ns_hash_def {
ldlm_ns_type_t nsd_type;
/** hash bucket bits */
unsigned nsd_bkt_bits;
@@ -509,9 +511,9 @@ typedef struct {
unsigned nsd_all_bits;
/** hash operations */
cfs_hash_ops_t *nsd_hops;
-} ldlm_ns_hash_def_t;
+};
-ldlm_ns_hash_def_t ldlm_ns_hash_defs[] = {
+struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
{
.nsd_type = LDLM_NS_TYPE_MDC,
.nsd_bkt_bits = 11,
@@ -563,7 +565,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
{
struct ldlm_namespace *ns = NULL;
struct ldlm_ns_bucket *nsb;
- ldlm_ns_hash_def_t *nsd;
+ struct ldlm_ns_hash_def *nsd;
struct cfs_hash_bd bd;
int idx;
int rc;
@@ -576,7 +578,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
return NULL;
}
- for (idx = 0;;idx++) {
+ for (idx = 0;; idx++) {
nsd = &ldlm_ns_hash_defs[idx];
if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
CERROR("Unknown type %d for ns %s\n", ns_type, name);
@@ -735,8 +737,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
} else {
ldlm_resource_unlink_lock(lock);
unlock_res(res);
- LDLM_DEBUG(lock, "Freeing a lock still held by a "
- "client node");
+ LDLM_DEBUG(lock, "Freeing a lock still held by a client node");
ldlm_lock_destroy(lock);
}
LDLM_LOCK_RELEASE(lock);
@@ -805,6 +806,7 @@ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
if (atomic_read(&ns->ns_bref) > 0) {
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
int rc;
+
CDEBUG(D_DLMTRACE,
"dlm namespace %s free waiting on refcount %d\n",
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
@@ -818,16 +820,14 @@ force_wait:
/* Forced cleanups should be able to reclaim all references,
* so it's safe to wait forever... we can't leak locks... */
if (force && rc == -ETIMEDOUT) {
- LCONSOLE_ERROR("Forced cleanup waiting for %s "
- "namespace with %d resources in use, "
- "(rc=%d)\n", ldlm_ns_name(ns),
+ LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
+ ldlm_ns_name(ns),
atomic_read(&ns->ns_bref), rc);
goto force_wait;
}
if (atomic_read(&ns->ns_bref)) {
- LCONSOLE_ERROR("Cleanup waiting for %s namespace "
- "with %d resources in use, (rc=%d)\n",
+ LCONSOLE_ERROR("Cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
ldlm_ns_name(ns),
atomic_read(&ns->ns_bref), rc);
return ELDLM_NAMESPACE_EXISTS;
@@ -1335,6 +1335,7 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
list_for_each(tmp, ldlm_namespace_list(client)) {
struct ldlm_namespace *ns;
+
ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
ldlm_namespace_dump(level, ns);
}
@@ -1404,8 +1405,8 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) &&
++granted > ldlm_dump_granted_max) {
- CDEBUG(level, "only dump %d granted locks to "
- "avoid DDOS.\n", granted);
+ CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n",
+ granted);
break;
}
}
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index ba43b3067fa3..a7a7ac626aaf 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -318,9 +318,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
if (t >= 1 && matched == n) {
/* don't print warning for lctl set_param debug=0 or -1 */
if (m != 0 && m != -1)
- CWARN("You are trying to use a numerical value for the "
- "mask - this will be deprecated in a future "
- "release.\n");
+ CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n");
*mask = m;
return 0;
}
@@ -375,8 +373,8 @@ void libcfs_debug_dumplog(void)
(void *)(long)current_pid(),
"libcfs_debug_dumper");
if (IS_ERR(dumper))
- pr_err("LustreError: cannot start log dump thread:"
- " %ld\n", PTR_ERR(dumper));
+ pr_err("LustreError: cannot start log dump thread: %ld\n",
+ PTR_ERR(dumper));
else
schedule();
@@ -412,7 +410,7 @@ int libcfs_debug_init(unsigned long bufsize)
max = TCD_MAX_PAGES;
} else {
max = (max / num_possible_cpus());
- max = (max << (20 - PAGE_CACHE_SHIFT));
+ max = max << (20 - PAGE_CACHE_SHIFT);
}
rc = cfs_tracefile_init(max);
@@ -460,11 +458,3 @@ void libcfs_debug_set_level(unsigned int debug_level)
}
EXPORT_SYMBOL(libcfs_debug_set_level);
-
-void libcfs_log_goto(struct libcfs_debug_msg_data *msgdata, const char *label,
- long_ptr_t rc)
-{
- libcfs_debug_msg(msgdata, "Process leaving via %s (rc=%lu : %ld : %#lx)\n",
- label, (ulong_ptr_t)rc, rc, rc);
-}
-EXPORT_SYMBOL(libcfs_log_goto);
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
index e73ca3df9734..92444b0fe2a3 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -103,18 +103,18 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
}
switch (set) {
- case CFS_FAIL_LOC_NOSET:
- case CFS_FAIL_LOC_VALUE:
- break;
- case CFS_FAIL_LOC_ORSET:
- cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
- break;
- case CFS_FAIL_LOC_RESET:
- cfs_fail_loc = value;
- break;
- default:
- LASSERTF(0, "called with bad set %u\n", set);
- break;
+ case CFS_FAIL_LOC_NOSET:
+ case CFS_FAIL_LOC_VALUE:
+ break;
+ case CFS_FAIL_LOC_ORSET:
+ cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
+ break;
+ case CFS_FAIL_LOC_RESET:
+ cfs_fail_loc = value;
+ break;
+ default:
+ LASSERTF(0, "called with bad set %u\n", set);
+ break;
}
return 1;
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 3b67b7b6428c..2d1e6729e996 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -1121,8 +1121,7 @@ cfs_hash_destroy(struct cfs_hash *hs)
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
hlist_for_each_safe(hnode, pos, hhead) {
LASSERTF(!cfs_hash_with_assert_empty(hs),
- "hash %s bucket %u(%u) is not "
- " empty: %u items left\n",
+ "hash %s bucket %u(%u) is not empty: %u items left\n",
hs->hs_name, bd.bd_bucket->hsb_index,
bd.bd_offset, bd.bd_bucket->hsb_count);
/* can't assert key valicate, because we
@@ -1371,8 +1370,7 @@ cfs_hash_lookup(struct cfs_hash *hs, const void *key)
EXPORT_SYMBOL(cfs_hash_lookup);
static void
-cfs_hash_for_each_enter(struct cfs_hash *hs)
-{
+cfs_hash_for_each_enter(struct cfs_hash *hs) {
LASSERT(!cfs_hash_is_exiting(hs));
if (!cfs_hash_with_rehash(hs))
@@ -1397,8 +1395,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
}
static void
-cfs_hash_for_each_exit(struct cfs_hash *hs)
-{
+cfs_hash_for_each_exit(struct cfs_hash *hs) {
int remained;
int bits;
@@ -1429,8 +1426,7 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
*/
static __u64
cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
- void *data, int remove_safe)
-{
+ void *data, int remove_safe) {
struct hlist_node *hnode;
struct hlist_node *pos;
struct cfs_hash_bd bd;
@@ -1523,8 +1519,7 @@ EXPORT_SYMBOL(cfs_hash_for_each);
void
cfs_hash_for_each_safe(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data)
-{
+ cfs_hash_for_each_cb_t func, void *data) {
cfs_hash_for_each_tight(hs, func, data, 1);
}
EXPORT_SYMBOL(cfs_hash_for_each_safe);
@@ -1572,8 +1567,8 @@ EXPORT_SYMBOL(cfs_hash_size_get);
* two cases, so iteration has to be stopped on change.
*/
static int
-cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data)
-{
+cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
+ void *data) {
struct hlist_node *hnode;
struct hlist_node *tmp;
struct cfs_hash_bd bd;
@@ -1634,8 +1629,7 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *
int
cfs_hash_for_each_nolock(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data)
-{
+ cfs_hash_for_each_cb_t func, void *data) {
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs))
@@ -1667,8 +1661,7 @@ EXPORT_SYMBOL(cfs_hash_for_each_nolock);
*/
int
cfs_hash_for_each_empty(struct cfs_hash *hs,
- cfs_hash_for_each_cb_t func, void *data)
-{
+ cfs_hash_for_each_cb_t func, void *data) {
unsigned i = 0;
if (cfs_hash_with_no_lock(hs))
@@ -1726,8 +1719,7 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
*/
void
cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
- cfs_hash_for_each_cb_t func, void *data)
-{
+ cfs_hash_for_each_cb_t func, void *data) {
struct hlist_node *hnode;
struct cfs_hash_bd bds[2];
unsigned i;
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
index dbb81b6cc200..31a558115a96 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
@@ -38,7 +38,7 @@
#include "../../include/linux/libcfs/libcfs.h"
/** Global CPU partition table */
-struct cfs_cpt_table *cfs_cpt_table __read_mostly = NULL;
+struct cfs_cpt_table *cfs_cpt_table __read_mostly;
EXPORT_SYMBOL(cfs_cpt_table);
#ifndef HAVE_LIBCFS_CPT
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
index 224c65b5ce4e..05f7595f18aa 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -333,8 +333,8 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
/* caller doesn't know the partition ID */
cpt = cptab->ctb_cpu2cpt[cpu];
if (cpt < 0) { /* not set in this CPT-table */
- CDEBUG(D_INFO, "Try to unset cpu %d which is "
- "not in CPT-table %p\n", cpt, cptab);
+ CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n",
+ cpt, cptab);
return;
}
@@ -384,8 +384,8 @@ cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
int i;
if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) {
- CDEBUG(D_INFO, "No online CPU is found in the CPU mask "
- "for CPU partition %d\n", cpt);
+ CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
+ cpt);
return 0;
}
@@ -579,9 +579,8 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
}
if (any_online_cpu(*cpumask) == NR_CPUS) {
- CERROR("No online CPU found in CPU partition %d, did someone "
- "do CPU hotplug on system? You might need to reload "
- "Lustre modules to keep system working well.\n", cpt);
+ CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
+ cpt);
return -EINVAL;
}
@@ -737,16 +736,12 @@ cfs_cpt_table_create(int ncpt)
ncpt = rc;
if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
- CWARN("CPU partition number %d is larger than suggested "
- "value (%d), your system may have performance"
- "issue or run out of memory while under pressure\n",
+ CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
ncpt, rc);
}
if (num_online_cpus() % ncpt != 0) {
- CERROR("CPU number %d is not multiple of cpu_npartition %d, "
- "please try different cpu_npartitions value or"
- "set pattern string by cpu_pattern=STRING\n",
+ CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
(int)num_online_cpus(), ncpt);
goto failed;
}
@@ -796,8 +791,7 @@ cfs_cpt_table_create(int ncpt)
if (cpt != ncpt ||
num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
- CERROR("Expect %d(%d) CPU partitions but got %d(%d), "
- "CPU hotplug/unplug while setting?\n",
+ CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
cptab->ctb_nparts, num, cpt,
cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask));
goto failed;
@@ -808,8 +802,7 @@ cfs_cpt_table_create(int ncpt)
return cptab;
failed:
- CERROR("Failed to setup CPU-partition-table with %d "
- "CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
+ CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
ncpt, num_online_nodes(), num_online_cpus());
if (mask != NULL)
@@ -975,9 +968,8 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids;
mutex_unlock(&cpt_data.cpt_mutex);
CDEBUG(warn ? D_WARNING : D_INFO,
- "Lustre: can't support CPU plug-out well now, "
- "performance and stability could be impacted "
- "[CPU %u action: %lx]\n", cpu, action);
+ "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
+ cpu, action);
}
return NOTIFY_OK;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
index 3298ddf6d3be..12005a71aa73 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
@@ -87,8 +87,7 @@ void libcfs_run_debug_log_upcall(char *file)
rc = call_usermodehelper(argv[0], argv, envp, 1);
if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET debug log upcall %s %s; "
- "check /proc/sys/lnet/debug_log_upcall\n",
+ CERROR("Error %d invoking LNET debug log upcall %s %s; check /proc/sys/lnet/debug_log_upcall\n",
rc, argv[0], argv[1]);
} else {
CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n",
@@ -114,8 +113,7 @@ void libcfs_run_upcall(char **argv)
rc = call_usermodehelper(argv[0], argv, envp, 1);
if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; "
- "check /proc/sys/lnet/upcall\n",
+ CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /proc/sys/lnet/upcall\n",
rc, argv[0], argv[1],
argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
index bbe2c68c18a6..83d3f08a37b2 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
@@ -365,8 +365,8 @@ static int __proc_cpt_table(void *data, int write,
if (rc >= 0)
break;
- LIBCFS_FREE(buf, len);
if (rc == -EFBIG) {
+ LIBCFS_FREE(buf, len);
len <<= 1;
continue;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
index 939b33dd6520..b91a1f95bbd0 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
@@ -279,8 +279,7 @@ libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
(char *)&tv, sizeof(tv));
if (rc != 0) {
- CERROR("Can't set socket send timeout "
- "%ld.%06d: %d\n",
+ CERROR("Can't set socket send timeout %ld.%06d: %d\n",
(long)tv.tv_sec, (int)tv.tv_usec, rc);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 7561030c96e6..5917c31c7ed6 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -196,8 +196,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
*/
if (printk_ratelimit())
- printk(KERN_WARNING "debug daemon buffer overflowed; "
- "discarding 10%% of pages (%d of %ld)\n",
+ printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
pgcount + 1, tcd->tcd_cur_pages);
INIT_LIST_HEAD(&pc.pc_pages);
@@ -357,8 +356,8 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
}
if (*(string_buf+needed-1) != '\n')
- printk(KERN_INFO "format at %s:%d:%s doesn't end in "
- "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
+ printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
+ file, msgdata->msg_line, msgdata->msg_fn);
header.ph_len = known_size + needed;
debug_buf = (char *)page_address(tage->page) + tage->used;
@@ -715,8 +714,8 @@ int cfs_tracefile_dump_all_pages(char *filename)
kunmap(tage->page);
if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u but wrote "
- "%d\n", tage->used, rc);
+ printk(KERN_WARNING "wanted to write %u but wrote %d\n",
+ tage->used, rc);
put_pages_back(&pc);
__LASSERT(list_empty(&pc.pc_pages));
break;
@@ -875,8 +874,8 @@ int cfs_trace_daemon_command(char *str)
strcpy(cfs_tracefile, str);
printk(KERN_INFO
- "Lustre: debug daemon will attempt to start writing "
- "to %s (%lukB max)\n", cfs_tracefile,
+ "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
+ cfs_tracefile,
(long)(cfs_tracefile_size >> 10));
cfs_trace_start_thread();
@@ -914,15 +913,15 @@ int cfs_trace_set_debug_mb(int mb)
if (mb < num_possible_cpus()) {
printk(KERN_WARNING
- "Lustre: %d MB is too small for debug buffer size, "
- "setting it to %d MB.\n", mb, num_possible_cpus());
+ "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
+ mb, num_possible_cpus());
mb = num_possible_cpus();
}
if (mb > limit) {
printk(KERN_WARNING
- "Lustre: %d MB is too large for debug buffer size, "
- "setting it to %d MB.\n", mb, limit);
+ "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
+ mb, limit);
mb = limit;
}
@@ -1004,8 +1003,8 @@ static int tracefiled(void *arg)
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
filp = NULL;
- printk(KERN_WARNING "couldn't open %s: "
- "%d\n", cfs_tracefile, rc);
+ printk(KERN_WARNING "couldn't open %s: %d\n",
+ cfs_tracefile, rc);
}
}
cfs_tracefile_read_unlock();
@@ -1034,8 +1033,8 @@ static int tracefiled(void *arg)
kunmap(tage->page);
if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u "
- "but wrote %d\n", tage->used, rc);
+ printk(KERN_WARNING "wanted to write %u but wrote %d\n",
+ tage->used, rc);
put_pages_back(&pc);
__LASSERT(list_empty(&pc.pc_pages));
}
@@ -1047,8 +1046,7 @@ static int tracefiled(void *arg)
if (!list_empty(&pc.pc_pages)) {
int i;
- printk(KERN_ALERT "Lustre: trace pages aren't "
- " empty\n");
+ printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
printk(KERN_ERR "total cpus(%d): ",
num_possible_cpus());
for (i = 0; i < num_possible_cpus(); i++)
@@ -1061,8 +1059,8 @@ static int tracefiled(void *arg)
i = 0;
list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
linkage)
- printk(KERN_ERR "page %d belongs to cpu "
- "%d\n", ++i, tage->cpu);
+ printk(KERN_ERR "page %d belongs to cpu %d\n",
+ ++i, tage->cpu);
printk(KERN_ERR "There are %d pages unwritten\n",
i);
}
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index f692261e9b5c..5bb9c85cec81 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -259,8 +259,8 @@ void ll_invalidate_aliases(struct inode *inode)
ll_lock_dcache(inode);
ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
- CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p "
- "inode %p flags %d\n", dentry, dentry, dentry->d_parent,
+ CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
+ dentry, dentry, dentry->d_parent,
dentry->d_inode, dentry->d_flags);
if (unlikely(dentry == dentry->d_sb->s_root)) {
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index a79fd65ec4c6..407718a0026f 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -163,7 +163,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
- page_pool = kzalloc(sizeof(page) * max_pages, GFP_NOFS);
+ page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
if (page_pool) {
page_pool[0] = page0;
} else {
@@ -228,8 +228,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
if (ll_pagevec_add(&lru_pvec, page) == 0)
ll_pagevec_lru_add_file(&lru_pvec);
} else {
- CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
- " %d\n", offset, ret);
+ CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
+ offset, ret);
}
page_cache_release(page);
}
@@ -275,14 +275,14 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
struct page *page;
int found;
- TREE_READ_LOCK_IRQ(mapping);
+ spin_lock_irq(&mapping->tree_lock);
found = radix_tree_gang_lookup(&mapping->page_tree,
(void **)&page, offset, 1);
if (found > 0) {
struct lu_dirpage *dp;
page_cache_get(page);
- TREE_READ_UNLOCK_IRQ(mapping);
+ spin_unlock_irq(&mapping->tree_lock);
/*
* In contrast to find_lock_page() we are sure that directory
* page cannot be truncated (while DLM lock is held) and,
@@ -326,7 +326,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
}
} else {
- TREE_READ_UNLOCK_IRQ(mapping);
+ spin_unlock_irq(&mapping->tree_lock);
page = NULL;
}
return page;
@@ -600,8 +600,8 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
int api32 = ll_need_32bit_api(sbi);
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu "
- " 32bit_api %d\n", inode->i_ino, inode->i_generation,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
+ inode->i_ino, inode->i_generation,
inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
if (lfd->lfd_pos == MDS_DIR_END_OFF) {
@@ -715,10 +715,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
break;
}
default: {
- CDEBUG(D_IOCTL, "bad userland LOV MAGIC:"
- " %#08x != %#08x nor %#08x\n",
- lump->lmm_magic, LOV_USER_MAGIC_V1,
- LOV_USER_MAGIC_V3);
+ CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ lump->lmm_magic, LOV_USER_MAGIC_V1,
+ LOV_USER_MAGIC_V3);
return -EINVAL;
}
}
@@ -814,8 +813,8 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr failed on inode "
- "%lu/%u: rc %d\n", inode->i_ino,
+ CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
+ inode->i_ino,
inode->i_generation, rc);
goto out;
}
@@ -1013,8 +1012,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
copy->hc_hai.hai_action == HSMA_ARCHIVE);
iput(inode);
if (rc) {
- CDEBUG(D_HSM, "Could not read file data version. "
- "Request could not be confirmed.\n");
+ CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
if (hpk.hpk_errval == 0)
hpk.hpk_errval = -rc;
goto progress;
@@ -1028,8 +1026,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
* to check anyway. */
if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
(copy->hc_data_version != data_version)) {
- CDEBUG(D_HSM, "File data version mismatched. "
- "File content was changed during archiving. "
+ CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
DFID", start:%#llx current:%#llx\n",
PFID(&copy->hc_hai.hai_fid),
copy->hc_data_version, data_version);
@@ -1384,7 +1381,7 @@ lmv_out_free:
if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
return -EFAULT;
- if ((lumv1->lmm_magic == LOV_USER_MAGIC_V3) ) {
+ if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
return -EFAULT;
}
@@ -1509,8 +1506,7 @@ out_rmdir:
cmd == LL_IOC_MDC_GETINFO)) {
rc = 0;
goto skip_lmm;
- }
- else
+ } else
goto out_req;
}
@@ -1694,64 +1690,6 @@ out_poll:
OBD_FREE_PTR(check);
return rc;
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
- case LL_IOC_QUOTACTL_18: {
- /* copy the old 1.x quota struct for internal use, then copy
- * back into old format struct. For 1.8 compatibility. */
- struct if_quotactl_18 *qctl_18;
- struct if_quotactl *qctl_20;
-
- qctl_18 = kzalloc(sizeof(*qctl_18), GFP_NOFS);
- if (!qctl_18)
- return -ENOMEM;
-
- qctl_20 = kzalloc(sizeof(*qctl_20), GFP_NOFS);
- if (!qctl_20) {
- rc = -ENOMEM;
- goto out_quotactl_18;
- }
-
- if (copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18))) {
- rc = -ENOMEM;
- goto out_quotactl_20;
- }
-
- QCTL_COPY(qctl_20, qctl_18);
- qctl_20->qc_idx = 0;
-
- /* XXX: dqb_valid was borrowed as a flag to mark that
- * only mds quota is wanted */
- if (qctl_18->qc_cmd == Q_GETQUOTA &&
- qctl_18->qc_dqblk.dqb_valid) {
- qctl_20->qc_valid = QC_MDTIDX;
- qctl_20->qc_dqblk.dqb_valid = 0;
- } else if (qctl_18->obd_uuid.uuid[0] != '\0') {
- qctl_20->qc_valid = QC_UUID;
- qctl_20->obd_uuid = qctl_18->obd_uuid;
- } else {
- qctl_20->qc_valid = QC_GENERAL;
- }
-
- rc = quotactl_ioctl(sbi, qctl_20);
-
- if (rc == 0) {
- QCTL_COPY(qctl_18, qctl_20);
- qctl_18->obd_uuid = qctl_20->obd_uuid;
-
- if (copy_to_user((void *)arg, qctl_18,
- sizeof(*qctl_18)))
- rc = -EFAULT;
- }
-
-out_quotactl_20:
- OBD_FREE_PTR(qctl_20);
-out_quotactl_18:
- OBD_FREE_PTR(qctl_18);
- return rc;
- }
-#else
-#warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
-#endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
case LL_IOC_QUOTACTL: {
struct if_quotactl *qctl;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index a2ae9a68a9a0..35a2df01528c 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -170,8 +170,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
* OSTs and send setattr to back to MDS. */
rc = ll_som_update(inode, op_data);
if (rc) {
- CERROR("inode %lu mdc Size-on-MDS update failed: "
- "rc = %d\n", inode->i_ino, rc);
+ CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n",
+ inode->i_ino, rc);
rc = 0;
}
} else if (rc) {
@@ -247,7 +247,7 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
return 0;
}
- och=*och_p;
+ och = *och_p;
*och_p = NULL;
mutex_unlock(&lli->lli_och_mutex);
@@ -358,7 +358,7 @@ int ll_file_release(struct inode *inode, struct file *file)
fd = LUSTRE_FPRIVATE(file);
LASSERT(fd != NULL);
- /* The last ref on @file, maybe not the the owner pid of statahead.
+ /* The last ref on @file, maybe not the owner pid of statahead.
* Different processes can open the same dir, "ll_opendir_key" means:
* it is me that should stop the statahead thread. */
if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd &&
@@ -975,8 +975,8 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
obdo_refresh_inode(inode, obdo, obdo->o_valid);
- CDEBUG(D_INODE, "objid "DOSTID" size %llu, blocks %llu,"
- " blksize %lu\n", POSTID(oi), i_size_read(inode),
+ CDEBUG(D_INODE, "objid " DOSTID " size %llu, blocks %llu, blksize %lu\n",
+ POSTID(oi), i_size_read(inode),
(unsigned long long)inode->i_blocks,
1UL << inode->i_blkbits);
}
@@ -1403,8 +1403,8 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name failed "
- "on %s: rc %d\n", filename, rc);
+ CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
+ filename, rc);
goto out;
}
@@ -2221,8 +2221,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cmd == LL_IOC_SETFLAGS) {
if ((flags & LL_FILE_IGNORE_LOCK) &&
!(file->f_flags & O_DIRECT)) {
- CERROR("%s: unable to disable locking on "
- "non-O_DIRECT file\n", current->comm);
+ CERROR("%s: unable to disable locking on non-O_DIRECT file\n",
+ current->comm);
return -EINVAL;
}
@@ -2848,7 +2848,7 @@ ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
struct lustre_handle *lockh, __u64 flags,
ldlm_mode_t mode)
{
- ldlm_policy_data_t policy = { .l_inodebits = {bits}};
+ ldlm_policy_data_t policy = { .l_inodebits = {bits} };
struct lu_fid *fid;
ldlm_mode_t rc;
diff --git a/drivers/staging/lustre/lustre/llite/llite_capa.c b/drivers/staging/lustre/lustre/llite/llite_capa.c
index b1e39ee412cd..aec9a44120c0 100644
--- a/drivers/staging/lustre/lustre/llite/llite_capa.c
+++ b/drivers/staging/lustre/lustre/llite/llite_capa.c
@@ -540,8 +540,7 @@ static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
if (rc == -EIO && !capa_is_expired(ocapa)) {
delay_capa_renew(ocapa, 120);
DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
- "renewal failed: -EIO, "
- "retry in 2 mins");
+ "renewal failed: -EIO, retry in 2 mins");
ll_capa_renewal_retries++;
goto retry;
} else {
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 84e0003f2daf..21b4a5026776 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -90,8 +90,7 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n",
+ CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n",
inode->i_ino, inode->i_generation,
lli->lli_flags);
/* DONE_WRITING is allowed and inode has no dirty page. */
@@ -124,8 +123,8 @@ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n", inode->i_ino, inode->i_generation,
+ CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
+ inode->i_ino, inode->i_generation,
lli->lli_flags);
if (!cl_local_size(inode)) {
@@ -218,8 +217,8 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
LASSERT(op_data != NULL);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n", inode->i_ino, inode->i_generation,
+ CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
+ inode->i_ino, inode->i_generation,
lli->lli_flags);
OBDO_ALLOC(oa);
@@ -238,9 +237,8 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
if (rc) {
oa->o_valid = 0;
if (rc != -ENOENT)
- CERROR("inode_getattr failed (%d): unable to "
- "send a Size-on-MDS attribute update "
- "for inode %lu/%u\n", rc, inode->i_ino,
+ CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n",
+ rc, inode->i_ino,
inode->i_generation);
} else {
CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 77d1c12704b4..37306e0c7aad 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1435,8 +1435,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
* case the dcache being cleared */
if (it->d.lustre.it_remote_lock_mode) {
handle.cookie = it->d.lustre.it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p"
- "(%lu/%u) for remote lock %#llx\n", inode,
+ CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n",
+ inode,
inode->i_ino, inode->i_generation,
handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode, NULL);
@@ -1444,8 +1444,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
handle.cookie = it->d.lustre.it_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)"
- " for lock %#llx\n", inode, inode->i_ino,
+ CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u) for lock %#llx\n",
+ inode, inode->i_ino,
inode->i_generation, handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode,
@@ -1489,8 +1489,8 @@ static inline void __d_lustre_invalidate(struct dentry *dentry)
*/
static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
{
- CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p "
- "refc %d\n", dentry, dentry,
+ CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
+ dentry, dentry,
dentry->d_parent, dentry->d_inode, d_count(dentry));
spin_lock_nested(&dentry->d_lock,
@@ -1509,24 +1509,6 @@ static inline void d_lustre_revalidate(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
-/* Compatibility for old (1.8) compiled userspace quota code */
-struct if_quotactl_18 {
- __u32 qc_cmd;
- __u32 qc_type;
- __u32 qc_id;
- __u32 qc_stat;
- struct obd_dqinfo qc_dqinfo;
- struct obd_dqblk qc_dqblk;
- char obd_type[16];
- struct obd_uuid obd_uuid;
-};
-#define LL_IOC_QUOTACTL_18 _IOWR('f', 162, struct if_quotactl_18 *)
-/* End compatibility for old (1.8) compiled userspace quota code */
-#else
-#warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
-#endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
-
enum {
LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */
LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 7b6b9e2e0102..6e423aa6a6e4 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -250,12 +250,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
data->ocd_brw_size = MD_MAX_BRW_SIZE;
- err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, data, NULL);
+ err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
+ data, NULL);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
- "recovery, of which this client is not a "
- "part. Please wait for recovery to complete,"
- " abort, or time out.\n", md);
+ LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ md);
goto out;
} else if (err) {
CERROR("cannot connect to %s: rc = %d\n", md, err);
@@ -267,8 +266,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
LUSTRE_SEQ_METADATA);
if (err) {
- CERROR("%s: Can't init metadata layer FID infrastructure, "
- "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
+ CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
+ sbi->ll_md_exp->exp_obd->obd_name, err);
goto out_md;
}
@@ -296,10 +295,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
- LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
- "feature(s) needed for correct operation "
- "of this client (%s). Please upgrade "
- "server or downgrade client.\n",
+ LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
OBD_FREE(buf, PAGE_CACHE_SIZE);
err = -EPROTO;
@@ -325,8 +321,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
!(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
- LCONSOLE_INFO("Disabling user_xattr feature because "
- "it is not supported on the server\n");
+ LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
@@ -351,8 +346,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
} else {
if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
- LCONSOLE_INFO("client claims to be remote, but server "
- "rejected, forced to be local.\n");
+ LCONSOLE_INFO("client claims to be remote, but server rejected, forced to be local.\n");
}
}
@@ -429,8 +423,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
- "ocd_grant: %d\n", data->ocd_connect_flags,
+ CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
+ data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
obd->obd_upcall.onu_owner = &sbi->ll_lco;
@@ -441,10 +435,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
NULL);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
- "recovery, of which this client is not a "
- "part. Please wait for recovery to "
- "complete, abort, or time out.\n", dt);
+ LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ dt);
goto out_md;
} else if (err) {
CERROR("%s: Cannot connect to %s: rc = %d\n",
@@ -457,8 +449,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
LUSTRE_SEQ_METADATA);
if (err) {
- CERROR("%s: Can't init data layer FID infrastructure, "
- "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
+ CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
+ sbi->ll_dt_exp->exp_obd->obd_name, err);
goto out_dt;
}
@@ -698,9 +690,9 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
list_for_each(tmp, &dentry->d_subdirs)
subdirs++;
- CERROR("dentry %p dump: name=%pd parent=%p, inode=%p, count=%u,"
- " flags=0x%x, fsdata=%p, %d subdirs\n", dentry, dentry,
- dentry->d_parent, dentry->d_inode, d_count(dentry),
+ CERROR("dentry %p dump: name=%pd parent=%pd (%p), inode=%p, count=%u, flags=0x%x, fsdata=%p, %d subdirs\n",
+ dentry, dentry, dentry->d_parent, dentry->d_parent,
+ dentry->d_inode, d_count(dentry),
dentry->d_flags, dentry->d_fsdata, subdirs);
if (dentry->d_inode != NULL)
ll_dump_inode(dentry->d_inode);
@@ -710,6 +702,7 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
list_for_each(tmp, &dentry->d_subdirs) {
struct dentry *d = list_entry(tmp, struct dentry, d_child);
+
lustre_dump_dentry(d, recur - 1);
}
}
@@ -754,9 +747,9 @@ void ll_kill_super(struct super_block *sb)
return;
sbi = ll_s2sbi(sb);
- /* we need to restore s_dev from changed for clustered NFS before put_super
- * because new kernels have cached s_dev and change sb->s_dev in
- * put_super not affected real removing devices */
+ /* we need to restore s_dev from changed for clustered NFS before
+ * put_super because new kernels have cached s_dev and change sb->s_dev
+ * in put_super not affected real removing devices */
if (sbi) {
sb->s_dev = sbi->ll_sdev_orig;
sbi->ll_umounting = 1;
@@ -814,25 +807,6 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 5, 50, 0)
- tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
- if (tmp) {
- /* Ignore deprecated mount option. The client will
- * always try to mount with ACL support, whether this
- * is used depends on whether server supports it. */
- LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
- "mount option 'acl'.\n");
- goto next;
- }
- tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
- if (tmp) {
- LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
- "mount option 'noacl'.\n");
- goto next;
- }
-#else
-#warning "{no}acl options have been deprecated since 1.8, please remove them"
-#endif
tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
if (tmp) {
*flags |= tmp;
@@ -1038,9 +1012,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
if (lprof == NULL) {
- LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
- " read from the MGS. Does that filesystem "
- "exist?\n", profilenm);
+ LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
+ profilenm);
err = -EINVAL;
goto out_free;
}
@@ -1119,9 +1092,8 @@ void ll_put_super(struct super_block *sb)
}
next = 0;
- while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
+ while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
class_manual_cleanup(obd);
- }
if (sbi->ll_flags & LL_SBI_VERBOSE)
LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
@@ -1150,14 +1122,14 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
lock_res_and_lock(lock);
if (lock->l_resource->lr_lvb_inode) {
struct ll_inode_info *lli;
+
lli = ll_i2info(lock->l_resource->lr_lvb_inode);
if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
inode = igrab(lock->l_resource->lr_lvb_inode);
} else {
inode = lock->l_resource->lr_lvb_inode;
LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
- D_WARNING, lock, "lr_lvb_inode %p is "
- "bogus: magic %08x",
+ D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
lock->l_resource->lr_lvb_inode,
lli->lli_inode_magic);
inode = NULL;
@@ -1730,11 +1702,11 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (body->valid & OBD_MD_FLTYPE)
inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
LASSERT(inode->i_mode != 0);
- if (S_ISREG(inode->i_mode)) {
- inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
- } else {
+ if (S_ISREG(inode->i_mode))
+ inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
+ LL_MAX_BLKSIZE_BITS);
+ else
inode->i_blkbits = inode->i_sb->s_blocksize_bits;
- }
if (body->valid & OBD_MD_FLUID)
inode->i_uid = make_kuid(&init_user_ns, body->uid);
if (body->valid & OBD_MD_FLGID)
@@ -1778,9 +1750,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (lli->lli_flags & (LLIF_DONE_WRITING |
LLIF_EPOCH_PENDING |
LLIF_SOM_DIRTY)) {
- CERROR("ino %lu flags %u still has "
- "size authority! do not trust "
- "the size got from MDS\n",
+ CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n",
inode->i_ino, lli->lli_flags);
} else {
/* Use old size assignment to avoid
@@ -1848,6 +1818,7 @@ void ll_read_inode2(struct inode *inode, void *opaque)
if (S_ISREG(inode->i_mode)) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
+
inode->i_op = &ll_file_inode_operations;
inode->i_fop = sbi->ll_fop;
inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
@@ -1878,11 +1849,10 @@ void ll_delete_inode(struct inode *inode)
/* Workaround for LU-118 */
if (inode->i_data.nrpages) {
- TREE_READ_LOCK_IRQ(&inode->i_data);
- TREE_READ_UNLOCK_IRQ(&inode->i_data);
+ spin_lock_irq(&inode->i_data.tree_lock);
+ spin_unlock_irq(&inode->i_data.tree_lock);
LASSERTF(inode->i_data.nrpages == 0,
- "inode=%lu/%u(%p) nrpages=%lu, see "
- "http://jira.whamcloud.com/browse/LU-118\n",
+ "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
inode->i_ino, inode->i_generation, inode,
inode->i_data.nrpages);
}
@@ -2164,7 +2134,13 @@ int ll_obd_statfs(struct inode *inode, void *arg)
__u32 flags;
int len = 0, rc;
- if (!inode || !(sbi = ll_i2sbi(inode))) {
+ if (!inode) {
+ rc = -EINVAL;
+ goto out_statfs;
+ }
+
+ sbi = ll_i2sbi(inode);
+ if (!sbi) {
rc = -EINVAL;
goto out_statfs;
}
@@ -2396,7 +2372,7 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
return buf;
}
-static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
+static char *ll_d_path(struct dentry *dentry, char *buf, int bufsize)
{
char *path = NULL;
@@ -2426,8 +2402,8 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
}
CDEBUG(D_WARNING,
- "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
- "(rc %d)\n", ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
+ "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
+ ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
PFID(&obj->cob_header.coh_lu.loh_fid),
(path && !IS_ERR(path)) ? path : "", ioret);
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index ba1c047ae927..479bf428780c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -234,8 +234,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
*/
unlock_page(vmpage);
- CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
- "been written out, retry.\n",
+ CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
vmpage, vmpage->index);
*retry = true;
@@ -366,8 +365,7 @@ restart:
vmf->page = NULL;
if (!printed && ++count > 16) {
- CWARN("the page is under heavy contention,"
- "maybe your app(%s) needs revising :-)\n",
+ CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
current->comm);
printed = true;
}
@@ -393,8 +391,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = ll_page_mkwrite0(vma, vmf->page, &retry);
if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy"
- " contention.\n",
+ CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n",
current->comm, vmf->pgoff,
file_inode(vma->vm_file)->i_ino);
printed = true;
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
index 586f49a374ec..f4da156f3874 100644
--- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
+++ b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
@@ -131,8 +131,8 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
spin_lock(&rct->rct_lock);
e = __rct_search(rct, key);
if (unlikely(e != NULL)) {
- CWARN("Unexpected stale rmtacl_entry found: "
- "[key: %d] [ops: %d]\n", (int)key, ops);
+ CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n",
+ (int)key, ops);
rce_free(e);
}
list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
@@ -263,8 +263,7 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
spin_lock(&et->et_lock);
e = __et_search_del(et, key, fid, type);
if (unlikely(e != NULL)) {
- CWARN("Unexpected stale eacl_entry found: "
- "[key: %d] [fid: "DFID"] [type: %d]\n",
+ CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n",
(int)key, PFID(fid), type);
ee_free(e);
}
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 9e31b789b790..031248840642 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -777,8 +777,8 @@ static int __init lloop_init(void)
if (max_loop < 1 || max_loop > 256) {
max_loop = MAX_LOOP_DEFAULT;
- CWARN("lloop: invalid max_loop (must be between"
- " 1 and 256), using default (%u)\n", max_loop);
+ CWARN("lloop: invalid max_loop (must be between 1 and 256), using default (%u)\n",
+ max_loop);
}
lloop_major = register_blkdev(0, "lloop");
@@ -792,11 +792,11 @@ static int __init lloop_init(void)
if (ll_iocontrol_magic == NULL)
goto out_mem1;
- loop_dev = kzalloc(max_loop * sizeof(*loop_dev), GFP_KERNEL);
+ loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL);
if (!loop_dev)
goto out_mem1;
- disks = kzalloc(max_loop * sizeof(*disks), GFP_KERNEL);
+ disks = kcalloc(max_loop, sizeof(*disks), GFP_KERNEL);
if (!disks)
goto out_mem2;
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 3b3df9f03422..e6a909e6faf0 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -227,8 +227,9 @@ static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
return lprocfs_seq_read_frac_helper(m, pages_number, mult);
}
-static ssize_t ll_max_readahead_mb_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_max_readahead_mb_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -269,7 +270,7 @@ static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
}
static ssize_t ll_max_readahead_per_file_mb_seq_write(struct file *file,
- const char *buffer,
+ const char __user *buffer,
size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
@@ -283,8 +284,7 @@ static ssize_t ll_max_readahead_per_file_mb_seq_write(struct file *file,
if (pages_number < 0 ||
pages_number > sbi->ll_ra_info.ra_max_pages) {
- CERROR("can't set file readahead more than"
- "max_read_ahead_mb %lu MB\n",
+ CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
sbi->ll_ra_info.ra_max_pages);
return -ERANGE;
}
@@ -313,7 +313,7 @@ static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *unused)
}
static ssize_t ll_max_read_ahead_whole_mb_seq_write(struct file *file,
- const char *buffer,
+ const char __user *buffer,
size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
@@ -329,9 +329,8 @@ static ssize_t ll_max_read_ahead_whole_mb_seq_write(struct file *file,
* algorithm does this anyway so it's pointless to set it larger. */
if (pages_number < 0 ||
pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
- CERROR("can't set max_read_ahead_whole_mb more than "
- "max_read_ahead_per_file_mb: %lu\n",
- sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
+ CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
+ sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
return -ERANGE;
}
@@ -469,8 +468,9 @@ static int ll_checksum_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
}
-static ssize_t ll_checksum_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_checksum_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -504,8 +504,9 @@ static int ll_max_rw_chunk_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%lu\n", ll_s2sbi(sb)->ll_max_rw_chunk);
}
-static ssize_t ll_max_rw_chunk_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_max_rw_chunk_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
int rc, val;
@@ -533,8 +534,8 @@ static int ll_rd_track_id(struct seq_file *m, enum stats_track_type type)
}
}
-static int ll_wr_track_id(const char *buffer, unsigned long count, void *data,
- enum stats_track_type type)
+static int ll_wr_track_id(const char __user *buffer, unsigned long count,
+ void *data, enum stats_track_type type)
{
struct super_block *sb = data;
int rc, pid;
@@ -556,8 +557,9 @@ static int ll_track_pid_seq_show(struct seq_file *m, void *v)
return ll_rd_track_id(m, STATS_TRACK_PID);
}
-static ssize_t ll_track_pid_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_track_pid_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_PID);
@@ -569,8 +571,9 @@ static int ll_track_ppid_seq_show(struct seq_file *m, void *v)
return ll_rd_track_id(m, STATS_TRACK_PPID);
}
-static ssize_t ll_track_ppid_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_track_ppid_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_PPID);
@@ -582,8 +585,9 @@ static int ll_track_gid_seq_show(struct seq_file *m, void *v)
return ll_rd_track_id(m, STATS_TRACK_GID);
}
-static ssize_t ll_track_gid_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_track_gid_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_GID);
@@ -598,8 +602,9 @@ static int ll_statahead_max_seq_show(struct seq_file *m, void *v)
return seq_printf(m, "%u\n", sbi->ll_sa_max);
}
-static ssize_t ll_statahead_max_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_statahead_max_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -612,8 +617,8 @@ static ssize_t ll_statahead_max_seq_write(struct file *file, const char *buffer,
if (val >= 0 && val <= LL_SA_RPC_MAX)
sbi->ll_sa_max = val;
else
- CERROR("Bad statahead_max value %d. Valid values are in the "
- "range [0, %d]\n", val, LL_SA_RPC_MAX);
+ CERROR("Bad statahead_max value %d. Valid values are in the range [0, %d]\n",
+ val, LL_SA_RPC_MAX);
return count;
}
@@ -628,8 +633,9 @@ static int ll_statahead_agl_seq_show(struct seq_file *m, void *v)
sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
}
-static ssize_t ll_statahead_agl_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_statahead_agl_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -672,8 +678,9 @@ static int ll_lazystatfs_seq_show(struct seq_file *m, void *v)
(sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
}
-static ssize_t ll_lazystatfs_seq_write(struct file *file, const char *buffer,
- size_t count, loff_t *off)
+static ssize_t ll_lazystatfs_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
@@ -761,8 +768,8 @@ static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
while (flags != 0) {
if (ARRAY_SIZE(str) <= i) {
- CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
- "flags please.\n", ll_get_fsname(sb, NULL, 0));
+ CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
+ ll_get_fsname(sb, NULL, 0));
return -EINVAL;
}
@@ -787,7 +794,8 @@ static int ll_xattr_cache_seq_show(struct seq_file *m, void *v)
return rc;
}
-static ssize_t ll_xattr_cache_seq_write(struct file *file, const char *buffer,
+static ssize_t ll_xattr_cache_seq_write(struct file *file,
+ const char __user *buffer,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
@@ -813,7 +821,7 @@ LPROC_SEQ_FOPS(ll_xattr_cache);
static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "uuid", &ll_sb_uuid_fops, NULL, 0 },
- //{ "mntpt_path", ll_rd_path, 0, 0 },
+ /* { "mntpt_path", ll_rd_path, 0, 0 }, */
{ "fstype", &ll_fstype_fops, NULL, 0 },
{ "site", &ll_site_stats_fops, NULL, 0 },
{ "blocksize", &ll_blksize_fops, NULL, 0 },
@@ -823,7 +831,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
{ "filestotal", &ll_filestotal_fops, NULL, 0 },
{ "filesfree", &ll_filesfree_fops, NULL, 0 },
{ "client_type", &ll_client_type_fops, NULL, 0 },
- //{ "filegroups", lprocfs_rd_filegroups, 0, 0 },
+ /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
{ "max_read_ahead_mb", &ll_max_readahead_mb_fops, NULL },
{ "max_read_ahead_per_file_mb", &ll_max_readahead_per_file_mb_fops,
NULL },
@@ -1133,8 +1141,8 @@ static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
read_cum += r;
write_cum += w;
end = 1 << (i + LL_HIST_START - units);
- seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | "
- "%14lu %4lu %4lu\n", start, *unitp, end, *unitp,
+ seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
+ start, *unitp, end, *unitp,
(i == LL_HIST_MAX - 1) ? '+' : ' ',
r, pct(r, read_tot), pct(read_cum, read_tot),
w, pct(w, write_tot), pct(write_cum, write_tot));
@@ -1160,8 +1168,7 @@ static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
- "write anything in this file to activate, "
- "then 0 or \"[D/d]isabled\" to deactivate\n");
+ "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
}
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
@@ -1239,8 +1246,7 @@ static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
- "write anything in this file to activate, "
- "then 0 or \"[D/d]isabled\" to deactivate\n");
+ "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
}
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
@@ -1418,8 +1424,7 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
if (!sbi->ll_rw_stats_on) {
seq_printf(seq, "disabled\n"
- "write anything in this file to activate, "
- "then 0 or \"[D/d]isabled\" to deactivate\n");
+ "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
}
spin_lock(&sbi->ll_process_lock);
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 8e926b385a60..1bf891bd321a 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -83,8 +83,8 @@ static int ll_set_inode(struct inode *inode, void *opaque)
lli->lli_fid = body->fid1;
if (unlikely(!(body->valid & OBD_MD_FLTYPE))) {
- CERROR("Can not initialize inode "DFID" without object type: "
- "valid = %#llx\n", PFID(&lli->lli_fid), body->valid);
+ CERROR("Can not initialize inode " DFID " without object type: valid = %#llx\n",
+ PFID(&lli->lli_fid), body->valid);
return -EINVAL;
}
@@ -598,8 +598,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
long long lookup_flags = LOOKUP_OPEN;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,"
- "open_flags %x,mode %x opened %d\n",
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,open_flags %x,mode %x opened %d\n",
dentry, dir->i_ino,
dir->i_generation, dir, file, open_flags, mode, *opened);
@@ -843,8 +842,7 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
{
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),"
- "flags=%u, excl=%d\n",
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u, excl=%d\n",
dentry, dir->i_ino,
dir->i_generation, dir, mode, want_excl);
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
index c05a9126cfe3..a58182600dae 100644
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ b/drivers/staging/lustre/lustre/llite/remote_perm.c
@@ -194,7 +194,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
if (!lli->lli_remote_perms)
lli->lli_remote_perms = perm_hash;
- else if (perm_hash)
+ else
free_rmtperm_hash(perm_hash);
head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
@@ -209,8 +209,7 @@ again:
continue;
if (tmp->lrp_fsgid != perm->rp_fsgid)
continue;
- if (lrp)
- free_ll_remote_perm(lrp);
+ free_ll_remote_perm(lrp);
lrp = tmp;
break;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 1f53b9863385..10a0421366d0 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -121,8 +121,8 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
/* this is too bad. Someone is trying to write the
* page w/o holding inode mutex. This means we can
* add dirty pages into cache during truncate */
- CERROR("Proc %s is dirting page w/o inode lock, this"
- "will break truncate.\n", current->comm);
+ CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
+ current->comm);
dump_stack();
LBUG();
return ERR_PTR(-EIO);
@@ -145,7 +145,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
*/
io->ci_lockreq = CILR_NEVER;
- pos = (vmpage->index << PAGE_CACHE_SHIFT);
+ pos = vmpage->index << PAGE_CACHE_SHIFT;
/* Create a temp IO to serve write. */
result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
@@ -606,8 +606,8 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
else
pg_count = start_left + st_pgs * (end - start - 1) + end_left;
- CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu"
- "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
+ CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
+ st_off, st_len, st_pgs, off, length, pg_count);
return pg_count;
}
@@ -667,10 +667,10 @@ static int ll_read_ahead_pages(const struct lu_env *env,
/* FIXME: This assertion only is valid when it is for
* forward read-ahead, it will be fixed when backward
* read-ahead is implemented */
- LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu"
- "rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx,
- ria->ria_start, ria->ria_end, ria->ria_stoff,
- ria->ria_length, ria->ria_pages);
+ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
+ page_idx,
+ ria->ria_start, ria->ria_end, ria->ria_stoff,
+ ria->ria_length, ria->ria_pages);
offset = page_idx - ria->ria_stoff;
offset = offset % (ria->ria_length);
if (offset > ria->ria_pages) {
@@ -927,8 +927,8 @@ static void ras_stride_increase_window(struct ll_readahead_state *ras,
LASSERT(ras->ras_stride_length > 0);
LASSERTF(ras->ras_window_start + ras->ras_window_len
- >= ras->ras_stride_offset, "window_start %lu, window_len %lu"
- " stride_offset %lu\n", ras->ras_window_start,
+ >= ras->ras_stride_offset, "window_start %lu, window_len %lu stride_offset %lu\n",
+ ras->ras_window_start,
ras->ras_window_len, ras->ras_stride_offset);
stride_len = ras->ras_window_start + ras->ras_window_len -
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 4c77ae8b9350..2f21304046aa 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -183,7 +183,7 @@ static int ll_set_page_dirty(struct page *vmpage)
return __set_page_dirty_nobuffers(vmpage);
}
-#define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
+#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
static inline int ll_get_user_pages(int rw, unsigned long user_addr,
size_t size, struct page ***pages,
@@ -417,7 +417,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
if (likely(result > 0)) {
- int n = (result + offs + PAGE_SIZE - 1) / PAGE_SIZE;
+ int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
result = ll_direct_IO_26_seg(env, io, rw, inode,
file->f_mapping,
result, file_offset,
@@ -535,7 +535,7 @@ const struct address_space_operations ll_aops = {
#else
const struct address_space_operations_ext ll_aops = {
.orig_aops.readpage = ll_readpage,
-// .orig_aops.readpages = ll_readpages,
+/* .orig_aops.readpages = ll_readpages, */
.orig_aops.direct_IO = ll_direct_IO_26,
.orig_aops.writepage = ll_writepage,
.orig_aops.writepages = ll_writepages,
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 09d965e76842..6ad9dd0fe2b3 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -334,8 +334,7 @@ static void ll_sa_entry_put(struct ll_statahead_info *sai,
LASSERT(ll_sa_entry_unhashed(entry));
ll_sa_entry_cleanup(sai, entry);
- if (entry->se_inode)
- iput(entry->se_inode);
+ iput(entry->se_inode);
OBD_FREE(entry, entry->se_size);
atomic_dec(&sai->sai_cache_count);
@@ -915,7 +914,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
return rc;
}
-static void ll_statahead_one(struct dentry *parent, const char* entry_name,
+static void ll_statahead_one(struct dentry *parent, const char *entry_name,
int entry_name_len)
{
struct inode *dir = parent->d_inode;
@@ -1491,10 +1490,7 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
sai->sai_consecutive_miss++;
if (sa_low_hit(sai) && thread_is_running(thread)) {
atomic_inc(&sbi->ll_sa_wrong);
- CDEBUG(D_READA, "Statahead for dir "DFID" hit "
- "ratio too low: hit/miss %llu/%llu"
- ", sent/replied %llu/%llu, stopping "
- "statahead thread\n",
+ CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
PFID(&lli->lli_fid), sai->sai_hit,
sai->sai_miss, sai->sai_sent,
sai->sai_replied);
@@ -1612,8 +1608,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
} else if ((*dentryp)->d_inode != inode) {
/* revalidate, but inode is recreated */
CDEBUG(D_READA,
- "stale dentry %pd inode %lu/%u, "
- "statahead inode %lu/%u\n",
+ "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
*dentryp,
(*dentryp)->d_inode->i_ino,
(*dentryp)->d_inode->i_generation,
@@ -1665,8 +1660,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
if (unlikely(sai->sai_inode != parent->d_inode)) {
struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
- CWARN("Race condition, someone changed %pd just now: "
- "old parent "DFID", new parent "DFID"\n",
+ CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
*dentryp,
PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
dput(parent);
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index e61dbed120a3..6aff155651cc 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -162,7 +162,7 @@ static int __init init_lustre_lite(void)
/* Nodes with small feet have little entropy
* the NID for this node gives the most entropy in the low bits */
- for (i=0; ; i++) {
+ for (i = 0; ; i++) {
if (LNetGetId(i, &lnet_id) == -ENOENT) {
break;
}
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index eccd3a717a4d..686b6a574cc5 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -100,8 +100,8 @@ static int ll_readlink_internal(struct inode *inode,
if (*symname == NULL ||
strnlen(*symname, symlen) != symlen - 1) {
/* not full/NULL terminated */
- CERROR("inode %lu: symlink not NULL terminated string"
- "of length %d\n", inode->i_ino, symlen - 1);
+ CERROR("inode %lu: symlink not NULL terminated string of length %d\n",
+ inode->i_ino, symlen - 1);
rc = -EPROTO;
goto failed;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index e540a6d286f8..930f6010203e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -709,7 +709,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
}
- if (fio->ft_mkwrite ) {
+ if (fio->ft_mkwrite) {
pgoff_t last_index;
/*
* Capture the size while holding the lli_trunc_sem from above
@@ -720,9 +720,8 @@ static int vvp_io_fault_start(const struct lu_env *env,
last_index = cl_index(obj, size - 1);
if (last_index < fio->ft_index) {
CDEBUG(D_PAGE,
- "llite: mkwrite and truncate race happened: "
- "%p: 0x%lx 0x%lx\n",
- vmpage->mapping, fio->ft_index, last_index);
+ "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
+ vmpage->mapping, fio->ft_index, last_index);
/*
* We need to return if we are
* passed the end of the file. This will propagate
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 4626346f6ee1..954ed08c6af2 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -376,8 +376,7 @@ static int vvp_page_print(const struct lu_env *env,
struct ccc_page *vp = cl2ccc_page(slice);
struct page *vmpage = vp->cpg_page;
- (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
- "vm@%p ",
+ (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
vp->cpg_write_queued, vmpage);
if (vmpage != NULL) {
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 3151baf5585c..b439936b4524 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -201,8 +201,7 @@ int ll_setxattr_common(struct inode *inode, const char *name,
#endif
if (rc) {
if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
- LCONSOLE_INFO("Disabling user_xattr feature because "
- "it is not supported on the server\n");
+ LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
return rc;
@@ -234,6 +233,9 @@ int ll_setxattr(struct dentry *dentry, const char *name,
struct lov_user_md *lump = (struct lov_user_md *)value;
int rc = 0;
+ if (size != 0 && size < sizeof(struct lov_user_md))
+ return -EINVAL;
+
/* Attributes that are saved via getxattr will always have
* the stripe_offset as 0. Instead, the MDS should be
* allowed to pick the starting OST index. b=17846 */
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 627cbe242f22..e2badf17d95e 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -126,9 +126,7 @@ static int ll_xattr_cache_add(struct list_head *cache,
return -ENOMEM;
}
- xattr->xe_namelen = strlen(xattr_name) + 1;
-
- xattr->xe_name = kzalloc(xattr->xe_namelen, GFP_NOFS);
+ xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
if (!xattr->xe_name) {
CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
xattr->xe_namelen);
@@ -141,7 +139,6 @@ static int ll_xattr_cache_add(struct list_head *cache,
goto err_value;
}
- memcpy(xattr->xe_name, xattr_name, xattr->xe_namelen);
memcpy(xattr->xe_value, xattr_val, xattr_val_len);
xattr->xe_vallen = xattr_val_len;
list_add(&xattr->xe_list, cache);
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index e8421f04beda..ee235926f52b 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -75,8 +75,7 @@ int lmv_fld_lookup(struct lmv_obd *lmv,
*mds, PFID(fid));
if (*mds >= lmv->desc.ld_tgt_count) {
- CERROR("FLD lookup got invalid mds #%x (max: %x) "
- "for fid="DFID"\n", *mds, lmv->desc.ld_tgt_count,
+ CERROR("FLD lookup got invalid mds #%x (max: %x) for fid=" DFID "\n", *mds, lmv->desc.ld_tgt_count,
PFID(fid));
rc = -EINVAL;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 5106124b7d92..d22d57b4ff38 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -186,8 +186,8 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
- CDEBUG(D_INODE, "OPEN_INTENT with fid1="DFID", fid2="DFID","
- " name='%s' -> mds #%d\n", PFID(&op_data->op_fid1),
+ CDEBUG(D_INODE, "OPEN_INTENT with fid1=" DFID ", fid2=" DFID ", name='%s' -> mds #%d\n",
+ PFID(&op_data->op_fid1),
PFID(&op_data->op_fid2), op_data->op_name, tgt->ltd_idx);
rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, flags,
@@ -226,8 +226,8 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
* this is normal situation, we should not print error here,
* only debug info.
*/
- CDEBUG(D_INODE, "Can't handle remote %s: dir "DFID"("DFID"):"
- "%*s: %d\n", LL_IT2STR(it), PFID(&op_data->op_fid2),
+ CDEBUG(D_INODE, "Can't handle remote %s: dir " DFID "(" DFID "):%*s: %d\n",
+ LL_IT2STR(it), PFID(&op_data->op_fid2),
PFID(&op_data->op_fid1), op_data->op_namelen,
op_data->op_name, rc);
return rc;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index b911e7643874..852d78721ca9 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -42,8 +42,8 @@
#define LMV_MAX_TGT_COUNT 128
-#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex);
-#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex);
+#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex)
+#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex)
#define LL_IT2STR(it) \
((it) ? ldlm_it2str((it)->it_op) : "0")
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 1a5821289c39..9f3837412cdf 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -325,8 +325,8 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize,
cookiesize, def_cookiesize);
if (rc) {
- CERROR("%s: obd_init_ea_size() failed on MDT target %d:"
- " rc = %d.\n", obd->obd_name, i, rc);
+ CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d.\n",
+ obd->obd_name, i, rc);
break;
}
}
@@ -427,8 +427,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
mdc_obd->obd_type->typ_name,
mdc_obd->obd_name);
if (mdc_symlink == NULL) {
- CERROR("Could not register LMV target "
- "/proc/fs/lustre/%s/%s/target_obds/%s.",
+ CERROR("Could not register LMV target /proc/fs/lustre/%s/%s/target_obds/%s.",
obd->obd_type->typ_name, obd->obd_name,
mdc_obd->obd_name);
lprocfs_remove(&lmv_proc_dir);
@@ -474,8 +473,8 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) {
tgt = lmv->tgts[index];
- CERROR("%s: UUID %s already assigned at LOV target index %d:"
- " rc = %d\n", obd->obd_name,
+ CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
+ obd->obd_name,
obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
lmv_init_unlock(lmv);
return -EEXIST;
@@ -600,8 +599,7 @@ int lmv_check_connect(struct obd_device *obd)
--lmv->desc.ld_active_tgt_count;
rc2 = obd_disconnect(tgt->ltd_exp);
if (rc2) {
- CERROR("LMV target %s disconnect on "
- "MDC idx %d: error %d\n",
+ CERROR("LMV target %s disconnect on MDC idx %d: error %d\n",
tgt->ltd_uuid.uuid, i, rc2);
}
}
@@ -865,10 +863,9 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
if (err) {
if (lmv->tgts[i]->ltd_active) {
/* permanent error */
- CERROR("error: iocontrol MDC %s on MDT"
- "idx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
+ lmv->tgts[i]->ltd_uuid.uuid,
+ i, cmd, err);
rc = err;
lk->lk_flags |= LK_FLG_STOP;
/* unregister from previous MDS */
@@ -925,7 +922,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
__u32 index;
memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
- if ((index >= count))
+ if (index >= count)
return -ENODEV;
if (lmv->tgts[index] == NULL ||
@@ -1147,10 +1144,9 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
return err;
} else if (err) {
if (lmv->tgts[i]->ltd_active) {
- CERROR("error: iocontrol MDC %s on MDT"
- "idx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
+ lmv->tgts[i]->ltd_uuid.uuid,
+ i, cmd, err);
if (!rc)
rc = err;
}
@@ -1234,8 +1230,8 @@ static int lmv_placement_policy(struct obd_device *obd,
if (lum->lum_type == LMV_STRIPE_TYPE &&
lum->lum_stripe_offset != -1) {
if (lum->lum_stripe_offset >= lmv->desc.ld_tgt_count) {
- CERROR("%s: Stripe_offset %d > MDT count %d:"
- " rc = %d\n", obd->obd_name,
+ CERROR("%s: Stripe_offset %d > MDT count %d: rc = %d\n",
+ obd->obd_name,
lum->lum_stripe_offset,
lmv->desc.ld_tgt_count, -ERANGE);
return -ERANGE;
@@ -1298,8 +1294,8 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
rc = lmv_placement_policy(obd, op_data, &mds);
if (rc) {
- CERROR("Can't get target for allocating fid, "
- "rc %d\n", rc);
+ CERROR("Can't get target for allocating fid, rc %d\n",
+ rc);
return rc;
}
@@ -2310,7 +2306,6 @@ retry:
static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
struct lmv_obd *lmv = &obd->u.lmv;
- int rc = 0;
switch (stage) {
case OBD_CLEANUP_EARLY:
@@ -2324,7 +2319,7 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
default:
break;
}
- return rc;
+ return 0;
}
static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 9e21e5efcdb6..e9ec39c5a6c2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -348,9 +348,8 @@ const struct lsm_operations lsm_v3_ops = {
void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm)
{
- CDEBUG(level, "lsm %p, objid "DOSTID", maxbytes %#llx, magic 0x%08X,"
- " stripe_size %u, stripe_count %u, refc: %d,"
- " layout_gen %u, pool ["LOV_POOLNAMEF"]\n", lsm,
+ CDEBUG(level, "lsm %p, objid " DOSTID ", maxbytes %#llx, magic 0x%08X, stripe_size %u, stripe_count %u, refc: %d, layout_gen %u, pool [" LOV_POOLNAMEF "]\n",
+ lsm,
POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
lsm->lsm_stripe_size, lsm->lsm_stripe_count,
atomic_read(&lsm->lsm_refc), lsm->lsm_layout_gen,
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 94dfd64bd283..ea503d2a19f8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -168,8 +168,8 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
if (imp->imp_invalid) {
- CDEBUG(D_CONFIG, "not connecting OSC %s; administratively "
- "disabled\n", obd_uuid2str(tgt_uuid));
+ CDEBUG(D_CONFIG, "not connecting OSC %s; administratively disabled\n",
+ obd_uuid2str(tgt_uuid));
return 0;
}
@@ -201,10 +201,9 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
osc_obd->obd_type->typ_name,
osc_obd->obd_name);
if (osc_symlink == NULL) {
- CERROR("could not register LOV target "
- "/proc/fs/lustre/%s/%s/target_obds/%s.",
- obd->obd_type->typ_name, obd->obd_name,
- osc_obd->obd_name);
+ CERROR("could not register LOV target /proc/fs/lustre/%s/%s/target_obds/%s.",
+ obd->obd_type->typ_name, obd->obd_name,
+ osc_obd->obd_name);
lprocfs_remove(&lov_proc_dir);
obd->obd_proc_private = NULL;
}
@@ -726,8 +725,7 @@ void lov_fix_desc_stripe_size(__u64 *val)
{
if (*val < LOV_MIN_STRIPE_SIZE) {
if (*val != 0)
- LCONSOLE_INFO("Increasing default stripe size to "
- "minimum %u\n",
+ LCONSOLE_INFO("Increasing default stripe size to minimum %u\n",
LOV_DESC_STRIPE_SIZE_DEFAULT);
*val = LOV_DESC_STRIPE_SIZE_DEFAULT;
} else if (*val & (LOV_MIN_STRIPE_SIZE - 1)) {
@@ -847,7 +845,6 @@ out:
static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
- int rc = 0;
struct lov_obd *lov = &obd->u.lov;
switch (stage) {
@@ -865,7 +862,7 @@ static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
break;
}
- return rc;
+ return 0;
}
static int lov_cleanup(struct obd_device *obd)
@@ -900,8 +897,7 @@ static int lov_cleanup(struct obd_device *obd)
/* We should never get here - these
should have been removed in the
disconnect. */
- CERROR("lov tgt %d not cleaned!"
- " deathrow=%d, lovrc=%d\n",
+ CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n",
i, lov->lov_death_row,
atomic_read(&lov->lov_refcount));
lov_del_target(obd, i, NULL, 0);
@@ -1176,8 +1172,8 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
list_for_each(pos, &lovset->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
- CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
- "%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
+ CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
+ POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, rqset);
@@ -1256,8 +1252,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
oti->oti_logcookies = set->set_cookies + req->rq_stripe;
- CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
- "%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
+ CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
+ POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
@@ -1568,8 +1564,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
if (lov->lov_tgts[i]->ltd_active) {
CDEBUG(err == -ENOTTY ?
D_IOCTL : D_WARNING,
- "iocontrol OSC %s on OST "
- "idx %d cmd %x: err = %d\n",
+ "iocontrol OSC %s on OST idx %d cmd %x: err = %d\n",
lov_uuid2str(lov, i),
i, cmd, err);
if (!rc)
@@ -2266,8 +2261,8 @@ static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
/* Skip quota check on the administratively disabled OSTs. */
if (!lov->lov_tgts[i]->ltd_activate) {
- CWARN("lov idx %d was administratively disabled, "
- "skip quotacheck on it.\n", i);
+ CWARN("lov idx %d was administratively disabled, skip quotacheck on it.\n",
+ i);
continue;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 5edd6a3a9c54..5356d5324176 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -438,8 +438,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
if (copy_from_user(&lum, lump, lum_size)) {
rc = -EFAULT;
goto out_set;
- }
- else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
+ } else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
(lum.lmm_magic != LOV_USER_MAGIC_V3)) {
rc = -EINVAL;
goto out_set;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index e8732cc30ce2..4e59995e0042 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -194,10 +194,8 @@ static __u64 mds_pack_open_flags(__u64 flags, __u32 mode)
cr_flags |= MDS_OPEN_SYNC;
if (flags & O_DIRECTORY)
cr_flags |= MDS_OPEN_DIRECTORY;
-#ifdef FMODE_EXEC
- if (flags & FMODE_EXEC)
+ if (flags & __FMODE_EXEC)
cr_flags |= MDS_FMODE_EXEC;
-#endif
if (cl_is_lov_delay_create(flags))
cr_flags |= MDS_OPEN_DELAY_CREATE;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index b58147ee62b6..8c9b4f5494e9 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -296,10 +296,8 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
} else {
if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
mode = LCK_CW;
-#ifdef FMODE_EXEC
- else if (it->it_flags & FMODE_EXEC)
+ else if (it->it_flags & __FMODE_EXEC)
mode = LCK_PR;
-#endif
else
mode = LCK_CR;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 14e1ba1675f6..3b0f245a8780 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -60,7 +60,7 @@ struct mdc_renew_capa_args {
static int mdc_cleanup(struct obd_device *obd);
-int mdc_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
+static int mdc_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
const struct req_msg_field *field, struct obd_capa **oc)
{
struct lustre_capa *capa;
@@ -147,7 +147,7 @@ out:
}
/* This should be mdc_get_info("rootfid") */
-int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid,
+static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid,
struct obd_capa **pc)
{
return send_getstatus(class_exp2cliimp(exp), rootfid, pc,
@@ -214,7 +214,7 @@ static int mdc_getattr_common(struct obd_export *exp,
return 0;
}
-int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
+static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
@@ -258,7 +258,7 @@ int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
-int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
+static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
@@ -441,7 +441,7 @@ static int mdc_xattr_common(struct obd_export *exp,
return rc;
}
-int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
+static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, u64 valid, const char *xattr_name,
const char *input, int input_size, int output_size,
int flags, __u32 suppgid, struct ptlrpc_request **request)
@@ -452,7 +452,7 @@ int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
suppgid, request);
}
-int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
+static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, u64 valid, const char *xattr_name,
const char *input, int input_size, int output_size,
int flags, struct ptlrpc_request **request)
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index bc263adf09d4..60d2b0f12693 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -510,8 +510,6 @@ static void do_requeue(struct config_llog_data *cld)
static int mgc_requeue_thread(void *data)
{
- int rc = 0;
-
CDEBUG(D_MGC, "Starting requeue thread\n");
/* Keep trying failed locks periodically */
@@ -592,7 +590,7 @@ static int mgc_requeue_thread(void *data)
complete(&rq_exit);
CDEBUG(D_MGC, "Ending requeue thread\n");
- return rc;
+ return 0;
}
/* Add a cld to the list to requeue. Start the requeue thread if needed.
@@ -736,8 +734,7 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
"ll_cfg_requeue"));
if (IS_ERR_VALUE(rc)) {
- CERROR("%s: Cannot start requeue thread (%d),"
- "no more log updates!\n",
+ CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n",
obd->obd_name, rc);
goto err_cleanup;
}
@@ -1021,8 +1018,7 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
sptlrpc_flavor2name(&cli->cl_flvr_mgc,
str, sizeof(str));
- LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but "
- "currently %s is in use\n",
+ LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
(char *) val, str);
rc = -EPERM;
}
@@ -1055,8 +1051,6 @@ static int mgc_import_event(struct obd_device *obd,
struct obd_import *imp,
enum obd_import_event event)
{
- int rc = 0;
-
LASSERT(imp->imp_obd == obd);
CDEBUG(D_MGC, "import event %#x\n", event);
@@ -1090,7 +1084,7 @@ static int mgc_import_event(struct obd_device *obd,
CERROR("Unknown import event %#x\n", event);
LBUG();
}
- return rc;
+ return 0;
}
enum {
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
index 2619bfeceb8b..9a69f6b35a0e 100644
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ b/drivers/staging/lustre/lustre/obdclass/acl.c
@@ -171,17 +171,17 @@ EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
/*
* Filter out the "nobody" entries in the posix ACL.
*/
-int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
+int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
posix_acl_xattr_header **out)
{
int count, i, j, rc = 0;
__u32 id;
posix_acl_xattr_header *new;
- if (unlikely(size < 0))
- return -EINVAL;
- else if (!size)
+ if (!size)
return 0;
+ if (size < sizeof(*new))
+ return -EINVAL;
OBD_ALLOC(new, size);
if (unlikely(new == NULL))
diff --git a/drivers/staging/lustre/lustre/obdclass/capa.c b/drivers/staging/lustre/lustre/obdclass/capa.c
index cd1abce378ea..d206b1046a18 100644
--- a/drivers/staging/lustre/lustre/obdclass/capa.c
+++ b/drivers/staging/lustre/lustre/obdclass/capa.c
@@ -272,8 +272,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
if (IS_ERR(tfm)) {
- CERROR("crypto_alloc_tfm failed, check whether your kernel"
- "has crypto support!\n");
+ CERROR("crypto_alloc_tfm failed, check whether your kernel has crypto support!\n");
return PTR_ERR(tfm);
}
keylen = alg->ha_keylen;
@@ -302,7 +301,7 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0);
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
return PTR_ERR(tfm);
@@ -355,7 +354,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0);
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
return PTR_ERR(tfm);
@@ -407,14 +406,13 @@ EXPORT_SYMBOL(capa_cpy);
void _debug_capa(struct lustre_capa *c,
struct libcfs_debug_msg_data *msgdata,
- const char *fmt, ... )
+ const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
libcfs_debug_vmsg2(msgdata, fmt, args,
- " capability@%p fid "DFID" opc %#llx uid %llu"
- " gid %llu flags %u alg %d keyid %u timeout %u "
- "expiry %u\n", c, PFID(capa_fid(c)), capa_opc(c),
+ " capability@%p fid " DFID " opc %#llx uid %llu gid %llu flags %u alg %d keyid %u timeout %u expiry %u\n",
+ c, PFID(capa_fid(c)), capa_opc(c),
capa_uid(c), capa_gid(c), capa_flags(c),
capa_alg(c), capa_keyid(c), capa_timeout(c),
capa_expiry(c));
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index f2383a497cbe..3141b6043708 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -1622,8 +1622,7 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
if (rc < 0) {
- CERROR("SYNC IO failed with error: %d, try to cancel "
- "%d remaining pages\n",
+ CERROR("SYNC IO failed with error: %d, try to cancel %d remaining pages\n",
rc, atomic_read(&anchor->csi_sync_nr));
(void)cl_io_cancel(env, io, queue);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index b204531ef710..b081167f9767 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -129,8 +129,7 @@ static void cl_lock_trace0(int level, const struct lu_env *env,
const char *func, const int line)
{
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
- CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
- "(%p/%d/%d) at %s():%d\n",
+ CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
prefix, lock, atomic_read(&lock->cll_ref),
lock->cll_guarder, lock->cll_depth,
lock->cll_state, lock->cll_error, lock->cll_holds,
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 7265ecbc6f9d..89a3fb2e56b2 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -144,13 +144,11 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
CERROR("%s%salloc of %s (%llu bytes) failed at %s:%d\n",
ptr ? "force " :"", type, name, (__u64)size, file,
line);
- CERROR("%llu total bytes and %llu total pages "
- "(%llu bytes) allocated by Lustre, "
- "%d total bytes by LNET\n",
+ CERROR("%llu total bytes and %llu total pages (%llu bytes) allocated by Lustre, %d total bytes by LNET\n",
obd_memory_sum(),
obd_pages_sum() << PAGE_CACHE_SHIFT,
obd_pages_sum(),
- atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
return 1;
}
return 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index d0f8f875ddd6..9c934e6d2ea1 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,6 +40,7 @@
#define DEBUG_SUBSYSTEM D_OTHER
+#include <linux/unaligned/access_ok.h>
#include "../include/obd_support.h"
#include "../include/lustre_debug.h"
@@ -60,14 +61,11 @@ int block_debug_setup(void *addr, int len, __u64 off, __u64 id)
{
LASSERT(addr);
- off = cpu_to_le64 (off);
- id = cpu_to_le64 (id);
- memcpy(addr, (char *)&off, LPDS);
- memcpy(addr + LPDS, (char *)&id, LPDS);
-
+ put_unaligned_le64(off, addr);
+ put_unaligned_le64(id, addr+LPDS);
addr += len - LPDS - LPDS;
- memcpy(addr, (char *)&off, LPDS);
- memcpy(addr + LPDS, (char *)&id, LPDS);
+ put_unaligned_le64(off, addr);
+ put_unaligned_le64(id, addr+LPDS);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/dt_object.c b/drivers/staging/lustre/lustre/obdclass/dt_object.c
index 52256c26bf07..e7be26ec7521 100644
--- a/drivers/staging/lustre/lustre/obdclass/dt_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/dt_object.c
@@ -332,8 +332,7 @@ static struct dt_object *dt_reg_open(const struct lu_env *env,
result = dt_lookup_dir(env, p, name, fid);
if (result == 0){
o = dt_locate(env, dt, fid);
- }
- else
+ } else
o = ERR_PTR(result);
return o;
@@ -950,8 +949,8 @@ int lprocfs_dt_rd_blksize(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
*eof = 1;
rc = snprintf(page, count, "%u\n",
@@ -967,8 +966,8 @@ int lprocfs_dt_rd_kbytestotal(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
__u32 blk_size = osfs.os_bsize >> 10;
__u64 result = osfs.os_blocks;
@@ -989,8 +988,8 @@ int lprocfs_dt_rd_kbytesfree(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
__u32 blk_size = osfs.os_bsize >> 10;
__u64 result = osfs.os_bfree;
@@ -1011,8 +1010,8 @@ int lprocfs_dt_rd_kbytesavail(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
__u32 blk_size = osfs.os_bsize >> 10;
__u64 result = osfs.os_bavail;
@@ -1033,8 +1032,8 @@ int lprocfs_dt_rd_filestotal(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
*eof = 1;
rc = snprintf(page, count, "%llu\n", osfs.os_files);
@@ -1049,8 +1048,8 @@ int lprocfs_dt_rd_filesfree(char *page, char **start, off_t off,
{
struct dt_device *dt = data;
struct obd_statfs osfs;
-
int rc = dt_statfs(NULL, dt, &osfs);
+
if (rc == 0) {
*eof = 1;
rc = snprintf(page, count, "%llu\n", osfs.os_ffree);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index c314e9c2343e..736ca410aca3 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -317,7 +317,7 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
result->obd_minor, new_obd_minor);
obd_devs[result->obd_minor] = NULL;
- result->obd_name[0]='\0';
+ result->obd_name[0] = '\0';
}
result = ERR_PTR(-EEXIST);
break;
@@ -524,8 +524,8 @@ void class_obd_list(void)
/* Search for a client OBD connected to tgt_uuid. If grp_uuid is
specified, then only the client with that uuid is returned,
otherwise any client connected to the tgt is returned. */
-struct obd_device * class_find_client_obd(struct obd_uuid *tgt_uuid,
- const char * typ_name,
+struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
+ const char *typ_name,
struct obd_uuid *grp_uuid)
{
int i;
@@ -557,7 +557,7 @@ EXPORT_SYMBOL(class_find_client_obd);
searching at *next, and if a device is found, the next index to look
at is saved in *next. If next is NULL, then the first matching device
will always be returned. */
-struct obd_device * class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
+struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
{
int i;
@@ -1087,8 +1087,7 @@ void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
spin_lock(&exp->exp_locks_list_guard);
LASSERT(lock->l_exp_refs_nr > 0);
if (lock->l_exp_refs_target != exp) {
- LCONSOLE_WARN("lock %p, "
- "mismatching export pointers: %p, %p\n",
+ LCONSOLE_WARN("lock %p, mismatching export pointers: %p, %p\n",
lock, lock->l_exp_refs_target, exp);
}
if (-- lock->l_exp_refs_nr == 0) {
@@ -1259,8 +1258,7 @@ static void class_disconnect_export_list(struct list_head *list,
}
class_export_get(exp);
- CDEBUG(D_HA, "%s: disconnecting export at %s (%p), "
- "last request at "CFS_TIME_T"\n",
+ CDEBUG(D_HA, "%s: disconnecting export at %s (%p), last request at " CFS_TIME_T "\n",
exp->exp_obd->obd_name, obd_export_nid2str(exp),
exp, exp->exp_last_request_time);
/* release one export reference anyway */
@@ -1284,8 +1282,8 @@ void class_disconnect_exports(struct obd_device *obd)
spin_unlock(&obd->obd_dev_lock);
if (!list_empty(&work_list)) {
- CDEBUG(D_HA, "OBD device %d (%p) has exports, "
- "disconnecting them\n", obd->obd_minor, obd);
+ CDEBUG(D_HA, "OBD device %d (%p) has exports, disconnecting them\n",
+ obd->obd_minor, obd);
class_disconnect_export_list(&work_list,
exp_flags_from_obd(obd));
} else
@@ -1422,8 +1420,8 @@ int obd_export_evict_by_nid(struct obd_device *obd, const char *nid)
LASSERTF(doomed_exp != obd->obd_self_export,
"self-export is hashed by NID?\n");
exports_evicted++;
- LCONSOLE_WARN("%s: evicting %s (at %s) by administrative "
- "request\n", obd->obd_name,
+ LCONSOLE_WARN("%s: evicting %s (at %s) by administrative request\n",
+ obd->obd_name,
obd_uuid2str(&doomed_exp->exp_client_uuid),
obd_export_nid2str(doomed_exp));
class_fail_export(doomed_exp);
@@ -1546,9 +1544,7 @@ void obd_exports_barrier(struct obd_device *obd)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(waited));
if (waited > 5 && IS_PO2(waited)) {
- LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
- "more than %d seconds. "
- "The obd refcount = %d. Is it stuck?\n",
+ LCONSOLE_WARN("%s is waiting for obd_unlinked_exports more than %d seconds. The obd refcount = %d. Is it stuck?\n",
obd->obd_name, waited,
atomic_read(&obd->obd_refcount));
dump_exports(obd, 1);
@@ -1783,7 +1779,7 @@ EXPORT_SYMBOL(kuc_len);
* @param p Pointer to payload area
* @returns Pointer to kuc header
*/
-struct kuc_hdr * kuc_ptr(void *p)
+struct kuc_hdr *kuc_ptr(void *p)
{
struct kuc_hdr *lh = ((struct kuc_hdr *)p) - 1;
LASSERT(lh->kuc_magic == KUC_MAGIC);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 7eaaaa648dfb..66ceab20c743 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -166,14 +166,14 @@ int obd_ioctl_popdata(void *arg, void *data, int len)
EXPORT_SYMBOL(obd_ioctl_popdata);
/* opening /dev/obd */
-static int obd_class_open(struct inode * inode, struct file * file)
+static int obd_class_open(struct inode *inode, struct file *file)
{
try_module_get(THIS_MODULE);
return 0;
}
/* closing /dev/obd */
-static int obd_class_release(struct inode * inode, struct file * file)
+static int obd_class_release(struct inode *inode, struct file *file)
{
module_put(THIS_MODULE);
return 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index 38a9b319355e..dd46e7358160 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -202,9 +202,8 @@ static int proc_max_dirty_pages_in_mb(struct ctl_table *table, int write,
/* Don't allow them to let dirty pages exceed 90% of system
* memory and set a hard minimum of 4MB. */
if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
- CERROR("Refusing to set max dirty pages to %u, which "
- "is more than 90%% of available RAM; setting "
- "to %lu\n", obd_max_dirty_pages,
+ CERROR("Refusing to set max dirty pages to %u, which is more than 90%% of available RAM; setting to %lu\n",
+ obd_max_dirty_pages,
((totalram_pages / 10) * 9));
obd_max_dirty_pages = ((totalram_pages / 10) * 9);
} else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 3ab05292152c..114be4a78ccf 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -346,8 +346,8 @@ repeat:
}
if (rec->lrh_len == 0 ||
rec->lrh_len > LLOG_CHUNK_SIZE) {
- CWARN("invalid length %d in llog record for "
- "index %d/%d\n", rec->lrh_len,
+ CWARN("invalid length %d in llog record for index %d/%d\n",
+ rec->lrh_len,
rec->lrh_index, index);
rc = -EINVAL;
goto out;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index 6e139cf372c4..4b850fc5f5d9 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -228,8 +228,7 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
(llh->llh_count == 1)) {
rc = llog_destroy(env, loghandle);
if (rc)
- CERROR("%s: failure destroying log during "
- "cleanup: rc = %d\n",
+ CERROR("%s: failure destroying log during cleanup: rc = %d\n",
loghandle->lgh_ctxt->loc_obd->obd_name,
rc);
@@ -746,8 +745,7 @@ int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
llog_cat_set_first_idx(cathandle, index);
rc = llog_cancel_rec(env, cathandle, index);
if (rc == 0)
- CDEBUG(D_HA, "cancel plain log at index"
- " %u of catalog "DOSTID"\n",
+ CDEBUG(D_HA, "cancel plain log at index %u of catalog " DOSTID "\n",
index, POSTID(&cathandle->lgh_id.lgl_oi));
return rc;
}
@@ -810,8 +808,8 @@ int llog_cat_init_and_process(const struct lu_env *env,
rc = llog_process_or_fork(env, llh, cat_cancel_cb, NULL, NULL, false);
if (rc)
- CERROR("%s: llog_process() with cat_cancel_cb failed: rc = "
- "%d\n", llh->lgh_ctxt->loc_obd->obd_name, rc);
+ CERROR("%s: llog_process() with cat_cancel_cb failed: rc = %d\n",
+ llh->lgh_ctxt->loc_obd->obd_name, rc);
return 0;
}
EXPORT_SYMBOL(llog_cat_init_and_process);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index da769db0af77..978d886a1103 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -42,7 +42,7 @@
#include "llog_internal.h"
/* helper functions for calling the llog obd methods */
-static struct llog_ctxt* llog_new_ctxt(struct obd_device *obd)
+static struct llog_ctxt *llog_new_ctxt(struct obd_device *obd)
{
struct llog_ctxt *ctxt;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index bfac8387021e..d3ec90e85eb9 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -400,8 +400,7 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
}
marker->cm_createtime = createtime;
marker->cm_canceltime = canceltime;
- CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) "
- "for target %s, converting\n",
+ CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) for target %s, converting\n",
marker->cm_tgtname);
} else if (swab) {
__swab64s(&marker->cm_createtime);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 61e04af2464f..3b7dfc367722 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -177,7 +177,7 @@ int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
}
EXPORT_SYMBOL(lprocfs_read_frac_helper);
-int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
+int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count,
int *val, int mult)
{
char kernbuf[20], *end, *pbuf;
@@ -1400,8 +1400,8 @@ int lprocfs_alloc_obd_stats(struct obd_device *obd, unsigned num_private_stats)
* LPROCFS_OBD_OP_INIT(.., .., opname)
* is missing from the list above. */
LASSERTF(stats->ls_cnt_header[i].lc_name != NULL,
- "Missing obd_stat initializer obd_op "
- "operation at offset %d.\n", i - num_private_stats);
+ "Missing obd_stat initializer obd_op operation at offset %d.\n",
+ i - num_private_stats);
}
rc = lprocfs_register_stats(obd->obd_proc_entry, "stats", stats);
if (rc < 0) {
@@ -1486,8 +1486,7 @@ int lprocfs_alloc_md_stats(struct obd_device *obd,
for (i = num_private_stats; i < num_stats; i++) {
if (stats->ls_cnt_header[i].lc_name == NULL) {
- CERROR("Missing md_stat initializer md_op "
- "operation at offset %d. Aborting.\n",
+ CERROR("Missing md_stat initializer md_op operation at offset %d. Aborting.\n",
i - num_private_stats);
LBUG();
}
@@ -1607,8 +1606,7 @@ LPROC_SEQ_FOPS_RO(lproc_exp_hash);
int lprocfs_nid_stats_clear_read(struct seq_file *m, void *data)
{
return seq_printf(m, "%s\n",
- "Write into this file to clear all nid stats and "
- "stale nid entries");
+ "Write into this file to clear all nid stats and stale nid entries");
}
EXPORT_SYMBOL(lprocfs_nid_stats_clear_read);
@@ -1819,7 +1817,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
}
EXPORT_SYMBOL(lprocfs_read_helper);
-int lprocfs_write_helper(const char *buffer, unsigned long count,
+int lprocfs_write_helper(const char __user *buffer, unsigned long count,
int *val)
{
return lprocfs_write_frac_helper(buffer, count, val, 1);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 2fc037cfb62f..83bf168c2939 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -866,8 +866,7 @@ static int lu_htable_order(void)
/* clear off unreasonable cache setting. */
if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
- CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
- " the range of (0, %u]. Will use default value: %u.\n",
+ CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
lu_cache_percent, LU_CACHE_PERCENT_MAX,
LU_CACHE_PERCENT_DEFAULT);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 5e7b3d7cc984..6ce9adc2f11c 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -835,7 +835,7 @@ int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
LIST_HEAD(lustre_profile_list);
-struct lustre_profile *class_get_profile(const char * prof)
+struct lustre_profile *class_get_profile(const char *prof)
{
struct lustre_profile *lprof;
@@ -1443,8 +1443,7 @@ int class_config_llog_handler(const struct lu_env *env,
if (!(clli->cfg_flags & CFG_F_COMPAT146) &&
!(clli->cfg_flags & CFG_F_MARKER) &&
(lcfg->lcfg_command != LCFG_MARKER)) {
- CWARN("Config not inside markers, ignoring! "
- "(inst: %p, uuid: %s, flags: %#x)\n",
+ CWARN("Config not inside markers, ignoring! (inst: %p, uuid: %s, flags: %#x)\n",
clli->cfg_instance,
clli->cfg_uuid.uuid, clli->cfg_flags);
clli->cfg_flags |= CFG_F_SKIP;
@@ -1467,14 +1466,12 @@ int class_config_llog_handler(const struct lu_env *env,
if ((lcfg->lcfg_command == LCFG_ATTACH && typename &&
strcmp(typename, "mds") == 0)) {
- CWARN("For 1.8 interoperability, rename obd "
- "type from mds to mdt\n");
+ CWARN("For 1.8 interoperability, rename obd type from mds to mdt\n");
typename[2] = 't';
}
if ((lcfg->lcfg_command == LCFG_SETUP && index &&
strcmp(index, "type") == 0)) {
- CDEBUG(D_INFO, "For 1.8 interoperability, "
- "set this index to '0'\n");
+ CDEBUG(D_INFO, "For 1.8 interoperability, set this index to '0'\n");
index[0] = '0';
index[1] = 0;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 1260c8713bc6..4f39cdee1b5c 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -100,19 +100,12 @@ int lustre_process_log(struct super_block *sb, char *logname,
OBD_FREE_PTR(bufs);
if (rc == -EINVAL)
- LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s'"
- "failed from the MGS (%d). Make sure this "
- "client and the MGS are running compatible "
- "versions of Lustre.\n",
+ LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n",
mgc->obd_name, logname, rc);
if (rc)
- LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' "
- "failed (%d). This may be the result of "
- "communication errors between this node and "
- "the MGS, a bad configuration, or other "
- "errors. See the syslog for more "
- "information.\n", mgc->obd_name, logname,
+ LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n",
+ mgc->obd_name, logname,
rc);
/* class_obd_list(); */
@@ -297,11 +290,8 @@ int lustre_start_mgc(struct super_block *sb)
if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
/* LMD_FLG_NOIR is for test purpose only */
LCONSOLE_WARN(
- "Trying to mount a client with IR setting "
- "not compatible with current mgc. "
- "Force to use current mgc setting that is "
- "IR %s.\n",
- has_ir ? "enabled" : "disabled");
+ "Trying to mount a client with IR setting not compatible with current mgc. Force to use current mgc setting that is IR %s.\n",
+ has_ir ? "enabled" : "disabled");
if (has_ir)
*flags &= ~LMD_FLG_NOIR;
else
@@ -998,16 +988,14 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
LASSERT(lmd);
if (!options) {
- LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that "
- "/sbin/mount.lustre is installed.\n");
+ LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that /sbin/mount.lustre is installed.\n");
return -EINVAL;
}
/* Options should be a string - try to detect old lmd data */
if ((raw->lmd_magic & 0xffffff00) == (LMD_MAGIC & 0xffffff00)) {
- LCONSOLE_ERROR_MSG(0x163, "You're using an old version of "
- "/sbin/mount.lustre. Please install "
- "version %s\n", LUSTRE_VERSION_STRING);
+ LCONSOLE_ERROR_MSG(0x163, "You're using an old version of /sbin/mount.lustre. Please install version %s\n",
+ LUSTRE_VERSION_STRING);
return -EINVAL;
}
lmd->lmd_magic = LMD_MAGIC;
@@ -1139,8 +1127,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
}
if (!devname) {
- LCONSOLE_ERROR_MSG(0x164, "Can't find the device name "
- "(need mount option 'device=...')\n");
+ LCONSOLE_ERROR_MSG(0x164, "Can't find the device name (need mount option 'device=...')\n");
goto invalid;
}
@@ -1232,9 +1219,7 @@ int lustre_fill_super(struct super_block *sb, void *data, int silent)
if (client_fill_super == NULL)
request_module("lustre");
if (client_fill_super == NULL) {
- LCONSOLE_ERROR_MSG(0x165, "Nothing registered for "
- "client mount! Is the 'lustre' "
- "module loaded?\n");
+ LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n");
lustre_put_lsi(sb);
rc = -ENODEV;
} else {
@@ -1249,8 +1234,7 @@ int lustre_fill_super(struct super_block *sb, void *data, int silent)
/* c_f_s will call lustre_common_put_super on failure */
}
} else {
- CERROR("This is client-side-only module, "
- "cannot handle server mount.\n");
+ CERROR("This is client-side-only module, cannot handle server mount.\n");
rc = -EINVAL;
}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 98e4290919d0..5f6d9441bc44 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -698,14 +698,16 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
int cleanup = 0;
OBD_ALLOC_PTR(ed);
- if (ed == NULL)
- GOTO(out, rc = -ENOMEM);
+ if (ed == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
cleanup = 1;
cd = &ed->ed_cl;
rc = cl_device_init(cd, t);
if (rc)
- GOTO(out, rc);
+ goto out;
cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
cd->cd_ops = &echo_device_cl_ops;
@@ -719,24 +721,26 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
if (tgt == NULL) {
CERROR("Can not find tgt device %s\n",
lustre_cfg_string(cfg, 1));
- GOTO(out, rc = -ENODEV);
+ rc = -ENODEV;
+ goto out;
}
next = tgt->obd_lu_dev;
if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
CERROR("echo MDT client must be run on server\n");
- GOTO(out, rc = -EOPNOTSUPP);
+ rc = -EOPNOTSUPP;
+ goto out;
}
rc = echo_site_init(env, ed);
if (rc)
- GOTO(out, rc);
+ goto out;
cleanup = 3;
rc = echo_client_setup(env, obd, cfg);
if (rc)
- GOTO(out, rc);
+ goto out;
ed->ed_ec = &obd->u.echo_client;
cleanup = 4;
@@ -749,15 +753,17 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
tgt_type_name = tgt->obd_type->typ_name;
if (next != NULL) {
LASSERT(next != NULL);
- if (next->ld_site != NULL)
- GOTO(out, rc = -EBUSY);
+ if (next->ld_site != NULL) {
+ rc = -EBUSY;
+ goto out;
+ }
next->ld_site = &ed->ed_site->cs_lu;
rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
next->ld_type->ldt_name,
NULL);
if (rc)
- GOTO(out, rc);
+ goto out;
/* Tricky case, I have to determine the obd type since
* CLIO uses the different parameters to initialize
@@ -865,8 +871,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
spin_lock(&ec->ec_lock);
while (!list_empty(&ec->ec_objects)) {
spin_unlock(&ec->ec_lock);
- CERROR("echo_client still has objects at cleanup time, "
- "wait for 1 second\n");
+ CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
lu_site_purge(env, &ed->ed_site->cs_lu, -1);
@@ -968,15 +973,19 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
fid = &info->eti_fid;
rc = ostid_to_fid(fid, &lsm->lsm_oi, 0);
- if (rc != 0)
- GOTO(out, eco = ERR_PTR(rc));
+ if (rc != 0) {
+ eco = ERR_PTR(rc);
+ goto out;
+ }
/* In the function below, .hs_keycmp resolves to
* lu_obj_hop_keycmp() */
/* coverity[overrun-buffer-val] */
obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
- if (IS_ERR(obj))
- GOTO(out, eco = (void *)obj);
+ if (IS_ERR(obj)) {
+ eco = (void *)obj;
+ goto out;
+ }
eco = cl2echo_obj(obj);
if (eco->eo_deleted) {
@@ -1076,7 +1085,7 @@ static int cl_echo_enqueue(struct echo_object *eco, u64 start, u64 end,
io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco));
if (result < 0)
- GOTO(out, result);
+ goto out;
LASSERT(result == 0);
result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0);
@@ -1182,7 +1191,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, obj);
if (rc < 0)
- GOTO(out, rc);
+ goto out;
LASSERT(rc == 0);
@@ -1191,7 +1200,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
CEF_NEVER);
if (rc < 0)
- GOTO(error_lock, rc);
+ goto error_lock;
for (i = 0; i < npages; i++) {
LASSERT(pages[i]);
@@ -1318,7 +1327,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
rc = echo_alloc_memmd(ed, &lsm);
if (rc < 0) {
CERROR("Cannot allocate md: rc = %d\n", rc);
- GOTO(failed, rc);
+ goto failed;
}
if (ulsm != NULL) {
@@ -1326,7 +1335,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
rc = echo_copyin_lsm (ed, lsm, ulsm, ulsm_nob);
if (rc != 0)
- GOTO(failed, rc);
+ goto failed;
if (lsm->lsm_stripe_count == 0)
lsm->lsm_stripe_count = ec->ec_nstripes;
@@ -1363,7 +1372,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
if (rc != 0) {
CERROR("Cannot create objects: rc = %d\n", rc);
- GOTO(failed, rc);
+ goto failed;
}
created = 1;
}
@@ -1373,8 +1382,10 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
oa->o_valid |= OBD_MD_FLID;
eco = cl_echo_object_find(ed, &lsm);
- if (IS_ERR(eco))
- GOTO(failed, rc = PTR_ERR(eco));
+ if (IS_ERR(eco)) {
+ rc = PTR_ERR(eco);
+ goto failed;
+ }
cl_echo_object_put(eco);
CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
@@ -1642,8 +1653,10 @@ static int echo_client_prep_commit(const struct lu_env *env,
OBD_ALLOC(lnb, npages * sizeof(struct niobuf_local));
OBD_ALLOC(rnb, npages * sizeof(struct niobuf_remote));
- if (lnb == NULL || rnb == NULL)
- GOTO(out, ret = -ENOMEM);
+ if (lnb == NULL || rnb == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (rw == OBD_BRW_WRITE && async)
brw_flags |= OBD_BRW_ASYNC;
@@ -1671,7 +1684,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
lnb, oti, NULL);
if (ret != 0)
- GOTO(out, ret);
+ goto out;
LASSERT(lpages == npages);
for (i = 0; i < lpages; i++) {
@@ -1704,7 +1717,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
rnb, npages, lnb, oti, ret);
if (ret != 0)
- GOTO(out, ret);
+ goto out;
/* Reset oti otherwise it would confuse ldiskfs. */
memset(oti, 0, sizeof(*oti));
@@ -1862,21 +1875,27 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
return -ENOMEM;
rc = lu_env_init(env, LCT_DT_THREAD);
- if (rc)
- GOTO(out, rc = -ENOMEM);
+ if (rc) {
+ rc = -ENOMEM;
+ goto out;
+ }
switch (cmd) {
case OBD_IOC_CREATE: /* may create echo object */
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
data->ioc_plen1, &dummy_oti);
- GOTO(out, rc);
+ goto out;
case OBD_IOC_DESTROY:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
@@ -1886,7 +1905,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
eco->eo_deleted = 1;
echo_put_object(eco);
}
- GOTO(out, rc);
+ goto out;
case OBD_IOC_GETATTR:
rc = echo_get_object(&eco, ed, oa);
@@ -1897,11 +1916,13 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = obd_getattr(env, ec->ec_exp, &oinfo);
echo_put_object(eco);
}
- GOTO(out, rc);
+ goto out;
case OBD_IOC_SETATTR:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
@@ -1912,17 +1933,19 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
echo_put_object(eco);
}
- GOTO(out, rc);
+ goto out;
case OBD_IOC_BRW_WRITE:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rw = OBD_BRW_WRITE;
/* fall through */
case OBD_IOC_BRW_READ:
rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
- GOTO(out, rc);
+ goto out;
case ECHO_IOC_GET_STRIPE:
rc = echo_get_object(&eco, ed, oa);
@@ -1931,11 +1954,13 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
data->ioc_plen1);
echo_put_object(eco);
}
- GOTO(out, rc);
+ goto out;
case ECHO_IOC_SET_STRIPE:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
if (data->ioc_pbuf1 == NULL) { /* unset */
rc = echo_get_object(&eco, ed, oa);
@@ -1948,25 +1973,28 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
data->ioc_pbuf1,
data->ioc_plen1, &dummy_oti);
}
- GOTO (out, rc);
+ goto out;
case ECHO_IOC_ENQUEUE:
- if (!capable(CFS_CAP_SYS_ADMIN))
- GOTO (out, rc = -EPERM);
+ if (!capable(CFS_CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
rc = echo_client_enqueue(exp, oa,
data->ioc_conn1, /* lock mode */
data->ioc_offset,
data->ioc_count);/*extent*/
- GOTO (out, rc);
+ goto out;
case ECHO_IOC_CANCEL:
rc = echo_client_cancel(exp, oa);
- GOTO (out, rc);
+ goto out;
default:
CERROR ("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
- GOTO (out, rc = -ENOTTY);
+ rc = -ENOTTY;
+ goto out;
}
out:
@@ -2084,11 +2112,13 @@ static int echo_client_disconnect(struct obd_export *exp)
{
int rc;
- if (exp == NULL)
- GOTO(out, rc = -EINVAL);
+ if (exp == NULL) {
+ rc = -EINVAL;
+ goto out;
+ }
rc = class_disconnect(exp);
- GOTO(out, rc);
+ goto out;
out:
return rc;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 7734d666b7a1..370e6d4896c6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -178,76 +178,113 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
int page_count;
int rc = 0;
- if (!osc_object_is_locked(obj))
- GOTO(out, rc = 9);
+ if (!osc_object_is_locked(obj)) {
+ rc = 9;
+ goto out;
+ }
- if (ext->oe_state >= OES_STATE_MAX)
- GOTO(out, rc = 10);
+ if (ext->oe_state >= OES_STATE_MAX) {
+ rc = 10;
+ goto out;
+ }
- if (atomic_read(&ext->oe_refc) <= 0)
- GOTO(out, rc = 20);
+ if (atomic_read(&ext->oe_refc) <= 0) {
+ rc = 20;
+ goto out;
+ }
- if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users))
- GOTO(out, rc = 30);
+ if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users)) {
+ rc = 30;
+ goto out;
+ }
switch (ext->oe_state) {
case OES_INV:
if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
- GOTO(out, rc = 35);
- GOTO(out, rc = 0);
- break;
+ rc = 35;
+ else
+ rc = 0;
+ goto out;
case OES_ACTIVE:
- if (atomic_read(&ext->oe_users) == 0)
- GOTO(out, rc = 40);
- if (ext->oe_hp)
- GOTO(out, rc = 50);
- if (ext->oe_fsync_wait && !ext->oe_urgent)
- GOTO(out, rc = 55);
+ if (atomic_read(&ext->oe_users) == 0) {
+ rc = 40;
+ goto out;
+ }
+ if (ext->oe_hp) {
+ rc = 50;
+ goto out;
+ }
+ if (ext->oe_fsync_wait && !ext->oe_urgent) {
+ rc = 55;
+ goto out;
+ }
break;
case OES_CACHE:
- if (ext->oe_grants == 0)
- GOTO(out, rc = 60);
- if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp)
- GOTO(out, rc = 65);
+ if (ext->oe_grants == 0) {
+ rc = 60;
+ goto out;
+ }
+ if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp) {
+ rc = 65;
+ goto out;
+ }
default:
- if (atomic_read(&ext->oe_users) > 0)
- GOTO(out, rc = 70);
+ if (atomic_read(&ext->oe_users) > 0) {
+ rc = 70;
+ goto out;
+ }
}
- if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start)
- GOTO(out, rc = 80);
+ if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start) {
+ rc = 80;
+ goto out;
+ }
- if (ext->oe_osclock == NULL && ext->oe_grants > 0)
- GOTO(out, rc = 90);
+ if (ext->oe_osclock == NULL && ext->oe_grants > 0) {
+ rc = 90;
+ goto out;
+ }
if (ext->oe_osclock) {
struct cl_lock_descr *descr;
descr = &ext->oe_osclock->cll_descr;
if (!(descr->cld_start <= ext->oe_start &&
- descr->cld_end >= ext->oe_max_end))
- GOTO(out, rc = 100);
+ descr->cld_end >= ext->oe_max_end)) {
+ rc = 100;
+ goto out;
+ }
}
- if (ext->oe_nr_pages > ext->oe_mppr)
- GOTO(out, rc = 105);
+ if (ext->oe_nr_pages > ext->oe_mppr) {
+ rc = 105;
+ goto out;
+ }
/* Do not verify page list if extent is in RPC. This is because an
* in-RPC extent is supposed to be exclusively accessible w/o lock. */
- if (ext->oe_state > OES_CACHE)
- GOTO(out, rc = 0);
+ if (ext->oe_state > OES_CACHE) {
+ rc = 0;
+ goto out;
+ }
- if (!extent_debug)
- GOTO(out, rc = 0);
+ if (!extent_debug) {
+ rc = 0;
+ goto out;
+ }
page_count = 0;
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
pgoff_t index = oap2cl_page(oap)->cp_index;
++page_count;
- if (index > ext->oe_end || index < ext->oe_start)
- GOTO(out, rc = 110);
+ if (index > ext->oe_end || index < ext->oe_start) {
+ rc = 110;
+ goto out;
+ }
+ }
+ if (page_count != ext->oe_nr_pages) {
+ rc = 120;
+ goto out;
}
- if (page_count != ext->oe_nr_pages)
- GOTO(out, rc = 120);
out:
if (rc != 0)
@@ -536,10 +573,9 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
/**
* Drop user count of osc_extent, and unplug IO asynchronously.
*/
-int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
+void osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
{
struct osc_object *obj = ext->oe_obj;
- int rc = 0;
LASSERT(atomic_read(&ext->oe_users) > 0);
LASSERT(sanity_check(ext) == 0);
@@ -571,7 +607,6 @@ int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
osc_io_unplug_async(env, osc_cli(obj), obj);
}
osc_extent_put(env, ext);
- return rc;
}
static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
@@ -776,8 +811,10 @@ restart:
rc = osc_extent_wait(env, conflict, OES_INV);
osc_extent_put(env, conflict);
conflict = NULL;
- if (rc < 0)
- GOTO(out, found = ERR_PTR(rc));
+ if (rc < 0) {
+ found = ERR_PTR(rc);
+ goto out;
+ }
goto restart;
}
@@ -934,7 +971,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
io->ci_obj = cl_object_top(osc2cl(obj));
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (rc < 0)
- GOTO(out, rc);
+ goto out;
/* discard all pages with index greater then trunc_index */
list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
@@ -1114,21 +1151,27 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
osc_object_lock(obj);
LASSERT(sanity_check_nolock(ext) == 0);
end_chunk = ext->oe_end >> ppc_bits;
- if (chunk > end_chunk + 1)
- GOTO(out, rc = -ERANGE);
+ if (chunk > end_chunk + 1) {
+ rc = -ERANGE;
+ goto out;
+ }
- if (end_chunk >= chunk)
- GOTO(out, rc = 0);
+ if (end_chunk >= chunk) {
+ rc = 0;
+ goto out;
+ }
LASSERT(end_chunk + 1 == chunk);
/* try to expand this extent to cover @index */
end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
next = next_extent(ext);
- if (next != NULL && next->oe_start <= end_index)
+ if (next != NULL && next->oe_start <= end_index) {
/* complex mode - overlapped with the next extent,
* this case will be handled by osc_extent_find() */
- GOTO(out, rc = -EAGAIN);
+ rc = -EAGAIN;
+ goto out;
+ }
ext->oe_end = end_index;
ext->oe_grants += chunksize;
@@ -1497,12 +1540,16 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
* of queued writes and create a discontiguous rpc stream */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
cli->cl_dirty_max < PAGE_CACHE_SIZE ||
- cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
- GOTO(out, rc = -EDQUOT);
+ cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
+ rc = -EDQUOT;
+ goto out;
+ }
/* Hopefully normal case - cache space and write credits available */
- if (osc_enter_cache_try(cli, oap, bytes, 0))
- GOTO(out, rc = 0);
+ if (osc_enter_cache_try(cli, oap, bytes, 0)) {
+ rc = 0;
+ goto out;
+ }
/* We can get here for two reasons: too many dirty pages in cache, or
* run out of grants. In both cases we should write dirty pages out.
@@ -1530,16 +1577,18 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
/* l_wait_event is interrupted by signal */
if (rc < 0) {
list_del_init(&ocw.ocw_entry);
- GOTO(out, rc);
+ goto out;
}
LASSERT(list_empty(&ocw.ocw_entry));
rc = ocw.ocw_rc;
if (rc != -EDQUOT)
- GOTO(out, rc);
- if (osc_enter_cache_try(cli, oap, bytes, 0))
- GOTO(out, rc = 0);
+ goto out;
+ if (osc_enter_cache_try(cli, oap, bytes, 0)) {
+ rc = 0;
+ goto out;
+ }
}
out:
client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -1562,8 +1611,8 @@ void osc_wake_cache_waiters(struct client_obd *cli)
if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
(atomic_read(&obd_dirty_pages) + 1 >
obd_max_dirty_pages)) {
- CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
- "osc max %ld, sys max %d\n", cli->cl_dirty,
+ CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
+ cli->cl_dirty,
cli->cl_dirty_max, obd_max_dirty_pages);
goto wakeup;
}
@@ -2401,14 +2450,15 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
* one making the extent active, we could deadlock waiting for
* the page writeback to clear but it won't because the extent
* is active and won't be written out. */
- GOTO(out, rc = -EAGAIN);
+ rc = -EAGAIN;
+ goto out;
default:
break;
}
rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
if (rc)
- GOTO(out, rc);
+ goto out;
spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index ebbd95c0cea8..365b2787b3c8 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -678,7 +678,7 @@ struct osc_extent {
int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
int sent, int rc);
-int osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
+void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 8138856fda8c..a7f08bc48166 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -1067,14 +1067,12 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
* conflicts, we do not wait but return 0 so the
* request is send to the server
*/
- CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
- "with %p, no wait, send to server\n",
+ CDEBUG(D_DLMTRACE, "group lock %p is conflicted with %p, no wait, send to server\n",
lock, conflict);
cl_lock_put(env, conflict);
rc = 0;
} else {
- CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
- "will wait\n",
+ CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
lock, conflict);
LASSERT(lock->cll_conflict == NULL);
lu_ref_add(&conflict->cll_reference, "cancel-wait",
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 69000584619d..92c202f70395 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -140,8 +140,7 @@ static int osc_object_print(const struct lu_env *env, void *cookie,
struct lov_oinfo *oinfo = osc->oo_oinfo;
struct osc_async_rc *ar = &oinfo->loi_ar;
- (*p)(env, cookie, "id: "DOSTID" "
- "idx: %d gen: %d kms_valid: %u kms %llu rc: %d force_sync: %d min_xid: %llu ",
+ (*p)(env, cookie, "id: " DOSTID " idx: %d gen: %d kms_valid: %u kms %llu rc: %d force_sync: %d min_xid: %llu ",
POSTID(&oinfo->loi_oi), oinfo->loi_ost_idx,
oinfo->loi_ost_gen, oinfo->loi_kms_valid, oinfo->loi_kms,
ar->ar_rc, ar->ar_force_sync, ar->ar_min_xid);
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index fcd079b1af01..76ba58b09c5d 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -369,12 +369,7 @@ static int osc_page_print(const struct lu_env *env,
struct osc_object *obj = cl2osc(slice->cpl_obj);
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
- return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
- "1< %#x %d %u %s %s > "
- "2< %llu %u %u %#x %#x | %p %p %p > "
- "3< %s %p %d %lu %d > "
- "4< %d %d %d %lu %s | %s %s %s %s > "
- "5< %s %s %s %s | %d %s | %d %s %s>\n",
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
opg,
/* 1 */
oap->oap_magic, oap->oap_cmd,
@@ -550,8 +545,8 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
LINVRNT(osc_page_protected(env, opg,
crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
- LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
- "magic 0x%x\n", oap, oap->oap_magic);
+ LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n",
+ oap, oap->oap_magic);
LASSERT(oap->oap_async_flags & ASYNC_READY);
LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 44657a06b8a5..b9450b95f1c5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -1078,9 +1078,9 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
- "chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name,
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
+ CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
+ cli->cl_import->imp_obd->obd_name,
+ cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
list_empty(&cli->cl_grant_shrink_list))
@@ -1171,8 +1171,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
/* warn if we try to combine flags that we don't know to be
* safe to combine */
if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
- CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
- "report this at http://bugs.whamcloud.com/\n",
+ CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
p1->flag, p2->flag);
}
return 0;
@@ -1343,8 +1342,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
"i: %d/%d pg: %p off: %llu, count: %u\n",
i, page_count, pg, pg->off, pg->count);
LASSERTF(i == 0 || pg->off > pg_prev->off,
- "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
- " prev_pg %p [pri %lu ind %lu] off %llu\n",
+ "i %d p_c %u pg %p [pri %lu ind %lu] off %llu prev_pg %p [pri %lu ind %lu] off %llu\n",
i, page_count,
pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
pg_prev->pg, page_private(pg_prev->pg),
@@ -1467,16 +1465,16 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
cksum_type);
if (cksum_type != client_cksum_type)
- msg = "the server did not use the checksum type specified in "
- "the original request - likely a protocol problem";
+ msg = "the server did not use the checksum type specified in the original request - likely a protocol problem"
+ ;
else if (new_cksum == server_cksum)
- msg = "changed on the client after we checksummed it - "
- "likely false positive due to mmap IO (bug 11742)";
+ msg = "changed on the client after we checksummed it - likely false positive due to mmap IO (bug 11742)"
+ ;
else if (new_cksum == client_cksum)
msg = "changed in transit before arrival at OST";
else
- msg = "changed in transit AND doesn't match the original - "
- "likely false positive due to mmap IO (bug 11742)";
+ msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)"
+ ;
LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
" object "DOSTID" extent [%llu-%llu]\n",
@@ -1486,8 +1484,8 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
POSTID(&oa->o_oi), pga[0]->off,
pga[page_count-1]->off + pga[page_count-1]->count - 1);
- CERROR("original client csum %x (type %x), server csum %x (type %x), "
- "client csum now %x\n", client_cksum, client_cksum_type,
+ CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
+ client_cksum, client_cksum_type,
server_cksum, cksum_type, new_cksum);
return 1;
}
@@ -1601,23 +1599,21 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
}
if (server_cksum != client_cksum) {
- LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
- "%s%s%s inode "DFID" object "DOSTID
- " extent [%llu-%llu]\n",
+ LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from %s%s%s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
req->rq_import->imp_obd->obd_name,
libcfs_nid2str(peer->nid),
via, router,
body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_seq : (__u64)0,
+ body->oa.o_parent_seq : (__u64)0,
body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_oid : 0,
+ body->oa.o_parent_oid : 0,
body->oa.o_valid & OBD_MD_FLFID ?
- body->oa.o_parent_ver : 0,
+ body->oa.o_parent_ver : 0,
POSTID(&body->oa.o_oi),
aa->aa_ppga[0]->off,
aa->aa_ppga[aa->aa_page_count-1]->off +
aa->aa_ppga[aa->aa_page_count-1]->count -
- 1);
+ 1);
CERROR("client %x, server %x, cksum_type %x\n",
client_cksum, server_cksum, cksum_type);
cksum_counter = 0;
@@ -1771,8 +1767,7 @@ static int brw_interpret(const struct lu_env *env,
if (osc_recoverable_error(rc)) {
if (req->rq_import_generation !=
req->rq_import->imp_generation) {
- CDEBUG(D_HA, "%s: resend cross eviction for object: "
- ""DOSTID", rc = %d.\n",
+ CDEBUG(D_HA, "%s: resend cross eviction for object: " DOSTID ", rc = %d.\n",
req->rq_import->imp_obd->obd_name,
POSTID(&aa->aa_oa->o_oi), rc);
} else if (rc == -EINPROGRESS ||
@@ -3013,8 +3008,8 @@ static int osc_reconnect(const struct lu_env *env,
cli->cl_lost_grant = 0;
client_obd_list_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
- " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
+ CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
+ data->ocd_connect_flags,
data->ocd_version, data->ocd_grant, lost_grant);
}
@@ -3217,8 +3212,6 @@ out_ptlrpcd:
static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
- int rc = 0;
-
switch (stage) {
case OBD_CLEANUP_EARLY: {
struct obd_import *imp;
@@ -3253,7 +3246,7 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
break;
}
}
- return rc;
+ return 0;
}
int osc_cleanup(struct obd_device *obd)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 38cc931a189c..dc9e406f3212 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -265,8 +265,7 @@ static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
so just keep minimal history here */
oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
if (oldse != 0)
- CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d "
- "has changed from %d to %d\n",
+ CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
req->rq_import->imp_obd->obd_name, req->rq_request_portal,
oldse, at_get(&at->iat_service_estimate[idx]));
}
@@ -297,8 +296,7 @@ static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
oldnl = at_measured(&at->iat_net_latency, nl);
if (oldnl != 0)
- CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) "
- "has changed from %d to %d\n",
+ CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) has changed from %d to %d\n",
req->rq_import->imp_obd->obd_name,
obd_uuid2str(
&req->rq_import->imp_connection->c_remote_uuid),
@@ -371,8 +369,8 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in "CFS_DURATION_T"s "
- "("CFS_DURATION_T"s)", req->rq_early_count,
+ "Early reply #%d, new deadline in " CFS_DURATION_T "s (" CFS_DURATION_T "s)",
+ req->rq_early_count,
cfs_time_sub(req->rq_deadline, get_seconds()),
cfs_time_sub(req->rq_deadline, olddl));
@@ -445,8 +443,8 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
LASSERTF(list_empty(&pool->prp_req_list) ||
size == pool->prp_rq_size,
- "Trying to change pool size with nonempty pool "
- "from %d to %d bytes\n", pool->prp_rq_size, size);
+ "Trying to change pool size with nonempty pool from %d to %d bytes\n",
+ pool->prp_rq_size, size);
spin_lock(&pool->prp_lock);
pool->prp_rq_size = size;
@@ -1146,11 +1144,10 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
struct obd_import *imp = req->rq_import;
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
if (ptlrpc_console_allow(req))
- LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s,"
- " operation %s failed with %d.\n",
+ LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s, operation %s failed with %d.\n",
imp->imp_obd->obd_name,
libcfs_nid2str(
- imp->imp_connection->c_peer.nid),
+ imp->imp_connection->c_peer.nid),
ll_opcode2str(opc), err);
return err < 0 ? err : -EINVAL;
}
@@ -1206,8 +1203,7 @@ static int after_reply(struct ptlrpc_request *req)
if (req->rq_reply_truncate) {
if (ptlrpc_no_resend(req)) {
- DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
- " expected: %d, actual size: %d",
+ DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d",
req->rq_nob_received, req->rq_repbuf_len);
return -EOVERFLOW;
}
@@ -1259,8 +1255,7 @@ static int after_reply(struct ptlrpc_request *req)
/* new xid is already allocated for bulk in
* ptlrpc_check_set() */
req->rq_xid = ptlrpc_next_xid();
- DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
- "resend on EINPROGRESS");
+ DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS");
}
/* Readjust the timeout for current conditions */
@@ -1412,8 +1407,8 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
req->rq_waiting = 1;
spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
- "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: (%s != %s)",
+ lustre_msg_get_status(req->rq_reqmsg),
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
LASSERT(list_empty(&req->rq_list));
@@ -1450,8 +1445,8 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
}
}
- CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
- " %s:%s:%d:%llu:%s:%d\n", current_comm(),
+ CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
+ current_comm(),
imp->imp_obd->obd_uuid.uuid,
lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
libcfs_nid2str(imp->imp_connection->c_peer.nid),
@@ -1828,12 +1823,11 @@ interpret:
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
- "Completed RPC pname:cluuid:pid:xid:nid:"
- "opc %s:%s:%d:%llu:%s:%d\n",
- current_comm(), imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
+ current_comm(), imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
spin_lock(&imp->imp_lock);
/* Request already may be not on sending or delaying list. This
@@ -2215,10 +2209,8 @@ EXPORT_SYMBOL(ptlrpc_set_wait);
*/
static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
- if (request == NULL) {
+ if (request == NULL)
return;
- }
-
LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */
LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 32dfffa76a5e..7f8644e01112 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -128,8 +128,8 @@ void reply_in_callback(lnet_event_t *ev)
((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
/* Early reply */
DEBUG_REQ(D_ADAPTTO, req,
- "Early reply received: mlen=%u offset=%d replen=%d "
- "replied=%d unlinked=%d", ev->mlength, ev->offset,
+ "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
+ ev->mlength, ev->offset,
req->rq_replen, req->rq_replied, ev->unlinked);
req->rq_early_count++; /* number received, client side */
@@ -313,8 +313,7 @@ void request_in_callback(lnet_event_t *ev)
}
req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
if (req == NULL) {
- CERROR("Can't allocate incoming request descriptor: "
- "Dropping %s RPC from %s\n",
+ CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
service->srv_name,
libcfs_id2str(ev->initiator));
return;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 2e7e7171ca63..4ceb90db02a3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -157,18 +157,14 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
&target_start, &target_len);
if (imp->imp_replayable) {
- LCONSOLE_WARN("%s: Connection to %.*s (at %s) was "
- "lost; in progress operations using this "
- "service will wait for recovery to complete\n",
- imp->imp_obd->obd_name, target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
+ LCONSOLE_WARN("%s: Connection to %.*s (at %s) was lost; in progress operations using this service will wait for recovery to complete\n",
+ imp->imp_obd->obd_name, target_len, target_start,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid));
} else {
- LCONSOLE_ERROR_MSG(0x166, "%s: Connection to "
- "%.*s (at %s) was lost; in progress "
- "operations using this service will fail\n",
- imp->imp_obd->obd_name,
- target_len, target_start,
- libcfs_nid2str(imp->imp_connection->c_peer.nid));
+ LCONSOLE_ERROR_MSG(0x166, "%s: Connection to %.*s (at %s) was lost; in progress operations using this service will fail\n",
+ imp->imp_obd->obd_name,
+ target_len, target_start,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid));
}
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
spin_unlock(&imp->imp_lock);
@@ -328,8 +324,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
* sluggish nets). Let's check this. If there
* is no inflight and unregistering != 0, this
* is bug. */
- LASSERTF(count == 0, "Some RPCs are still "
- "unregistering: %d\n", count);
+ LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n",
+ count);
/* Let's save one loop as soon as inflight have
* dropped to zero. No new inflights possible at
@@ -353,12 +349,11 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
"still on delayed list");
}
- CERROR("%s: RPCs in \"%s\" phase found (%d). "
- "Network is sluggish? Waiting them "
- "to error out.\n", cli_tgt,
+ CERROR("%s: RPCs in \"%s\" phase found (%d). Network is sluggish? Waiting them to error out.\n",
+ cli_tgt,
ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
atomic_read(&imp->
- imp_unregistering));
+ imp_unregistering));
}
spin_unlock(&imp->imp_lock);
}
@@ -413,8 +408,7 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
if (ptlrpc_set_import_discon(imp, conn_cnt)) {
if (!imp->imp_replayable) {
- CDEBUG(D_HA, "import %s@%s for %s not replayable, "
- "auto-deactivating\n",
+ CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid,
imp->imp_obd->obd_name);
@@ -541,8 +535,8 @@ static int import_select_connection(struct obd_import *imp)
at_reset(at, CONNECTION_SWITCH_MAX);
}
LASSERT(imp_conn->oic_last_attempt);
- CDEBUG(D_HA, "%s: tried all connections, increasing latency "
- "to %ds\n", imp->imp_obd->obd_name, at_get(at));
+ CDEBUG(D_HA, "%s: tried all connections, increasing latency to %ds\n",
+ imp->imp_obd->obd_name, at_get(at));
}
imp_conn->oic_last_attempt = cfs_time_current_64();
@@ -564,8 +558,7 @@ static int import_select_connection(struct obd_import *imp)
deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
&target_start, &target_len);
- CDEBUG(D_HA, "%s: Connection changing to"
- " %.*s (at %s)\n",
+ CDEBUG(D_HA, "%s: Connection changing to %.*s (at %s)\n",
imp->imp_obd->obd_name,
target_len, target_start,
libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
@@ -935,14 +928,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
lustre_msg_get_handle(
request->rq_repmsg)->cookie);
} else {
- LCONSOLE_WARN("Evicted from %s (at %s) "
- "after server handle changed from %#llx to %#llx\n",
+ LCONSOLE_WARN("Evicted from %s (at %s) after server handle changed from %#llx to %#llx\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection-> \
c_remote_uuid.uuid,
imp->imp_remote_handle.cookie,
lustre_msg_get_handle(
- request->rq_repmsg)->cookie);
+ request->rq_repmsg)->cookie);
}
@@ -962,8 +954,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
}
if (imp->imp_invalid) {
- CDEBUG(D_HA, "%s: reconnected but import is invalid; "
- "marking evicted\n", imp->imp_obd->obd_name);
+ CDEBUG(D_HA, "%s: reconnected but import is invalid; marking evicted\n",
+ imp->imp_obd->obd_name);
IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
} else if (MSG_CONNECT_RECOVERING & msg_flags) {
CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
@@ -985,8 +977,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
imp->imp_last_replay_transno = 0;
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
} else {
- DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags"
- " not set: %x)", imp->imp_obd->obd_name, msg_flags);
+ DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
+ imp->imp_obd->obd_name, msg_flags);
imp->imp_remote_handle =
*lustre_msg_get_handle(request->rq_repmsg);
IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
@@ -994,17 +986,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
/* Sanity checks for a reconnected import. */
if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
- CERROR("imp_replayable flag does not match server "
- "after reconnect. We should LBUG right here.\n");
+ CERROR("imp_replayable flag does not match server after reconnect. We should LBUG right here.\n");
}
if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
lustre_msg_get_last_committed(request->rq_repmsg) <
aa->pcaa_peer_committed) {
- CERROR("%s went back in time (transno %lld"
- " was previously committed, server now claims %lld"
- ")! See https://bugzilla.lustre.org/show_bug.cgi?"
- "id=9646\n",
+ CERROR("%s went back in time (transno %lld was previously committed, server now claims %lld)! See https://bugzilla.lustre.org/show_bug.cgi?id=9646\n",
obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
lustre_msg_get_last_committed(request->rq_repmsg));
}
@@ -1013,8 +1001,7 @@ finish:
rc = ptlrpc_import_recovery_state_machine(imp);
if (rc != 0) {
if (rc == -ENOTCONN) {
- CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;"
- "invalidating and reconnecting\n",
+ CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid);
ptlrpc_connect_import(imp);
@@ -1034,9 +1021,7 @@ finish:
if ((imp->imp_connect_flags_orig & OBD_CONNECT_IBITS) &&
!(ocd->ocd_connect_flags & OBD_CONNECT_IBITS)) {
- LCONSOLE_WARN("%s: MDS %s does not support ibits "
- "lock, either very old or invalid: "
- "requested %llx, replied %llx\n",
+ LCONSOLE_WARN("%s: MDS %s does not support ibits lock, either very old or invalid: requested %llx, replied %llx\n",
imp->imp_obd->obd_name,
imp->imp_connection->c_remote_uuid.uuid,
imp->imp_connect_flags_orig,
@@ -1052,13 +1037,12 @@ finish:
LUSTRE_VERSION_OFFSET_WARN)) {
/* Sigh, some compilers do not like #ifdef in the middle
of macro arguments */
- const char *older = "older. Consider upgrading server "
- "or downgrading client";
- const char *newer = "newer than client version. "
- "Consider upgrading client";
+ const char *older = "older. Consider upgrading server or downgrading client"
+ ;
+ const char *newer = "newer than client version. Consider upgrading client"
+ ;
- LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) "
- "is much %s (%s)\n",
+ LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
obd2cli_tgt(imp->imp_obd),
OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
OBD_OCD_VERSION_MINOR(ocd->ocd_version),
@@ -1095,10 +1079,7 @@ finish:
* the checksum types it doesn't support */
if ((ocd->ocd_cksum_types &
cksum_types_supported_client()) == 0) {
- LCONSOLE_WARN("The negotiation of the checksum "
- "algorithm to use with server %s "
- "failed (%x/%x), disabling "
- "checksums\n",
+ LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
obd2cli_tgt(imp->imp_obd),
ocd->ocd_cksum_types,
cksum_types_supported_client());
@@ -1191,17 +1172,13 @@ out:
* connection from liblustre clients, so we
* should never see this from VFS context
*/
- LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
- "(%d.%d.%d.%d)"
- " refused connection from this client "
- "with an incompatible version (%s). "
- "Client must be recompiled\n",
- obd2cli_tgt(imp->imp_obd),
- OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
- OBD_OCD_VERSION_MINOR(ocd->ocd_version),
- OBD_OCD_VERSION_PATCH(ocd->ocd_version),
- OBD_OCD_VERSION_FIX(ocd->ocd_version),
- LUSTRE_VERSION_STRING);
+ LCONSOLE_ERROR_MSG(0x16a, "Server %s version (%d.%d.%d.%d) refused connection from this client with an incompatible version (%s). Client must be recompiled\n",
+ obd2cli_tgt(imp->imp_obd),
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version),
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version),
+ OBD_OCD_VERSION_FIX(ocd->ocd_version),
+ LUSTRE_VERSION_STRING);
ptlrpc_deactivate_import(imp);
IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
}
@@ -1237,8 +1214,7 @@ static int completed_replay_interpret(const struct lu_env *env,
"%s: version recovery fails, reconnecting\n",
req->rq_import->imp_obd->obd_name);
} else {
- CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
- "reconnecting\n",
+ CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, reconnecting\n",
req->rq_import->imp_obd->obd_name,
req->rq_status);
}
@@ -1343,9 +1319,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
/* Don't care about MGC eviction */
if (strcmp(imp->imp_obd->obd_type->typ_name,
LUSTRE_MGC_NAME) != 0) {
- LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted "
- "by %.*s; in progress operations "
- "using this service will fail.\n",
+ LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted by %.*s; in progress operations using this service will fail.\n",
imp->imp_obd->obd_name, target_len,
target_start);
}
@@ -1455,8 +1429,7 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
break;
default:
rc = -EINVAL;
- CERROR("%s: don't know how to disconnect from %s "
- "(connect_op %d): rc = %d\n",
+ CERROR("%s: don't know how to disconnect from %s (connect_op %d): rc = %d\n",
imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
imp->imp_connect_op, rc);
return rc;
@@ -1607,8 +1580,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
at->at_current = max(at->at_current, at_min);
if (at->at_current != old)
- CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d "
- "(val=%u) hist %u %u %u %u\n", at,
+ CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d (val=%u) hist %u %u %u %u\n",
+ at,
old, at->at_current, at->at_current - old, val,
at->at_hist[0], at->at_hist[1], at->at_hist[2],
at->at_hist[3]);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 5b8337187b59..dc5ceb55d001 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -980,18 +980,7 @@ EXPORT_SYMBOL(RMF_CONN);
struct req_msg_field RMF_CONNECT_DATA =
DEFINE_MSGF("cdata",
RMF_F_NO_SIZE_CHECK /* we allow extra space for interop */,
-#if LUSTRE_VERSION_CODE > OBD_OCD_VERSION(2, 7, 50, 0)
sizeof(struct obd_connect_data),
-#else
-/* For interoperability with 1.8 and 2.0 clients/servers.
- * The RPC verification code allows larger RPC buffers, but not
- * smaller buffers. Until we no longer need to keep compatibility
- * with older servers/clients we can only check that the buffer
- * size is at least as large as obd_connect_data_v1. That is not
- * not in itself harmful, since the chance of just corrupting this
- * field is low. See JIRA LU-16 for details. */
- sizeof(struct obd_connect_data_v1),
-#endif
lustre_swab_connect, NULL);
EXPORT_SYMBOL(RMF_CONNECT_DATA);
@@ -1897,8 +1886,8 @@ swabber_dumper_helper(struct req_capsule *pill,
swabber(value);
ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of swabbed field %s "
- "follows\n", field->rmf_name);
+ CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n",
+ field->rmf_name);
field->rmf_dumper(value);
}
@@ -1914,8 +1903,7 @@ swabber_dumper_helper(struct req_capsule *pill,
i < n;
i++, p += field->rmf_size) {
if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, "
- "element %d follows\n",
+ CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, element %d follows\n",
do_swab ? "unswabbed " : "", field->rmf_name, i);
field->rmf_dumper(p);
}
@@ -1923,8 +1911,8 @@ swabber_dumper_helper(struct req_capsule *pill,
continue;
swabber(p);
if (dump) {
- CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, "
- "element %d follows\n", field->rmf_name, i);
+ CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, element %d follows\n",
+ field->rmf_name, i);
field->rmf_dumper(value);
}
}
@@ -1983,8 +1971,7 @@ static void *__req_capsule_get(struct req_capsule *pill,
*/
len = lustre_msg_buflen(msg, offset);
if ((len % field->rmf_size) != 0) {
- CERROR("%s: array field size mismatch "
- "%d modulo %d != 0 (%d)\n",
+ CERROR("%s: array field size mismatch %d modulo %d != 0 (%d)\n",
field->rmf_name, len, field->rmf_size, loc);
return NULL;
}
@@ -1997,8 +1984,7 @@ static void *__req_capsule_get(struct req_capsule *pill,
if (value == NULL) {
DEBUG_REQ(D_ERROR, pill->rc_req,
- "Wrong buffer for field `%s' (%d of %d) "
- "in format `%s': %d vs. %d (%s)\n",
+ "Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n",
field->rmf_name, offset, lustre_msg_bufcount(msg),
fmt->rf_name, lustre_msg_buflen(msg, offset), len,
rcl_names[loc]);
@@ -2013,7 +1999,7 @@ static void *__req_capsule_get(struct req_capsule *pill,
/**
* Dump a request and/or reply
*/
-void __req_capsule_dump(struct req_capsule *pill, enum req_location loc)
+static void __req_capsule_dump(struct req_capsule *pill, enum req_location loc)
{
const struct req_format *fmt;
const struct req_msg_field *field;
@@ -2031,8 +2017,8 @@ void __req_capsule_dump(struct req_capsule *pill, enum req_location loc)
* have a specific dumper
*/
len = req_capsule_get_size(pill, field, loc);
- CDEBUG(D_RPCTRACE, "Field %s has no dumper function;"
- "field size is %d\n", field->rmf_name, len);
+ CDEBUG(D_RPCTRACE, "Field %s has no dumper function; field size is %d\n",
+ field->rmf_name, len);
} else {
/* It's the dumping side-effect that we're interested in */
(void) __req_capsule_get(pill, field, loc, NULL, 1);
@@ -2184,8 +2170,7 @@ void req_capsule_set_size(struct req_capsule *pill,
(size > 0)) {
if ((field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
(size % field->rmf_size != 0)) {
- CERROR("%s: array field size mismatch "
- "%d %% %d != 0 (%d)\n",
+ CERROR("%s: array field size mismatch %d %% %d != 0 (%d)\n",
field->rmf_name, size, field->rmf_size, loc);
LBUG();
} else if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 56f825fbb17c..e9baf5bbee3a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -334,8 +334,7 @@ static int llog_client_read_header(const struct lu_env *env,
llh_hdr->lrh_type, LLOG_HDR_MAGIC);
rc = -EIO;
} else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
- CERROR("incorrectly sized log header: %#x "
- "(expecting %#x)\n",
+ CERROR("incorrectly sized log header: %#x (expecting %#x)\n",
llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
CERROR("you may need to re-run lconf --write_conf.\n");
rc = -EIO;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 87b9764a4f19..4011e0050fcb 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -175,7 +175,7 @@ const char *ll_opcode2str(__u32 opcode)
return ll_rpc_opcode_table[offset].opname;
}
-const char* ll_eopcode2str(__u32 opcode)
+const char *ll_eopcode2str(__u32 opcode)
{
LASSERT(ll_eopcode_table[opcode].opcode == opcode);
return ll_eopcode_table[opcode].opname;
@@ -694,8 +694,7 @@ default_queue:
if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
rc = -ENODEV;
goto out;
- }
- else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
+ } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
queue = PTLRPC_NRS_QUEUE_REG;
/**
@@ -746,8 +745,7 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
svcpt->scp_service->srv_name, svcpt->scp_cpt,
srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
LASSERTF(!list_empty(&svcpt->scp_hist_reqs),
- "%s:%d: seek offset %llu, request seq %llu, "
- "last culled %llu\n",
+ "%s:%d: seek offset %llu, request seq %llu, last culled %llu\n",
svcpt->scp_service->srv_name, svcpt->scp_cpt,
seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
e = &srhi->srhi_req->rq_history_list;
@@ -814,8 +812,8 @@ ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
int i;
if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
- CWARN("Failed to read request history because size of loff_t "
- "%d can't match size of u64\n", (int)sizeof(loff_t));
+ CWARN("Failed to read request history because size of loff_t %d can't match size of u64\n",
+ (int)sizeof(loff_t));
return NULL;
}
@@ -1298,14 +1296,12 @@ int lprocfs_wr_import(struct file *file, const char *buffer,
if (*endptr) {
CERROR("config: wrong instance # %s\n", ptr);
} else if (inst != imp->imp_connect_data.ocd_instance) {
- CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted "
- "target(%u/%u), reconnecting...\n",
+ CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted target(%u/%u), reconnecting...\n",
imp->imp_obd->obd_name,
imp->imp_connect_data.ocd_instance, inst);
do_reconn = 1;
} else {
- CDEBUG(D_INFO, "IR: %s has already been connecting to "
- "new target(%u)\n",
+ CDEBUG(D_INFO, "IR: %s has already been connecting to new target(%u)\n",
imp->imp_obd->obd_name, inst);
}
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index c1e8aa4d5ec4..f715e9a8b996 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -224,8 +224,8 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
total_md - desc->bd_md_count);
spin_unlock(&desc->bd_lock);
- CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
- "xid x%#llx-%#llx, portal %u\n", desc->bd_md_count,
+ CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n",
+ desc->bd_md_count,
desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
desc->bd_iov_count, desc->bd_nob,
desc->bd_last_xid, req->rq_xid, desc->bd_portal);
@@ -337,8 +337,7 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
if (req->rq_reqmsg &&
!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
- "req_flags=%#x magic=%d:%x/%x len=%d\n",
+ CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%d:%x/%x len=%d\n",
flags, lustre_msg_get_flags(req->rq_reqmsg),
lustre_msg_is_v1(req->rq_reqmsg),
lustre_msg_get_magic(req->rq_reqmsg),
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 181301bd2083..d5fd7215c72f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -770,8 +770,8 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name);
if (tmp != NULL) {
- CERROR("NRS policy %s has been registered, can't register it "
- "for %s\n", policy->pol_desc->pd_name,
+ CERROR("NRS policy %s has been registered, can't register it for %s\n",
+ policy->pol_desc->pd_name,
svcpt->scp_service->srv_name);
nrs_policy_put_locked(tmp);
@@ -882,8 +882,7 @@ static int nrs_register_policies_locked(struct ptlrpc_nrs *nrs)
if (nrs_policy_compatible(svc, desc)) {
rc = nrs_policy_register(nrs, desc);
if (rc != 0) {
- CERROR("Failed to register NRS policy %s for "
- "partition %d of service %s: %d\n",
+ CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n",
desc->pd_name, svcpt->scp_cpt,
svc->srv_name, rc);
/**
@@ -1082,8 +1081,7 @@ again:
if (rc == -ENOENT) {
rc = 0;
} else if (rc != 0) {
- CERROR("Failed to unregister NRS policy %s for "
- "partition %d of service %s: %d\n",
+ CERROR("Failed to unregister NRS policy %s for partition %d of service %s: %d\n",
desc->pd_name, svcpt->scp_cpt,
svcpt->scp_service->srv_name, rc);
return rc;
@@ -1145,18 +1143,15 @@ int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) &&
(conf->nc_flags & (PTLRPC_NRS_FL_FALLBACK |
PTLRPC_NRS_FL_REG_START))) {
- CERROR("NRS: failing to register policy %s. Please check "
- "policy flags; external policies cannot act as fallback "
- "policies, or be started immediately upon registration "
- "without interaction with lprocfs\n", conf->nc_name);
+ CERROR("NRS: failing to register policy %s. Please check policy flags; external policies cannot act as fallback policies, or be started immediately upon registration without interaction with lprocfs\n",
+ conf->nc_name);
return -EINVAL;
}
mutex_lock(&nrs_core.nrs_mutex);
if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) {
- CERROR("NRS: failing to register policy %s which has already "
- "been registered with NRS core!\n",
+ CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n",
conf->nc_name);
rc = -EEXIST;
goto fail;
@@ -1209,8 +1204,7 @@ again:
nrs = nrs_svcpt2nrs(svcpt, hp);
rc = nrs_policy_register(nrs, desc);
if (rc != 0) {
- CERROR("Failed to register NRS policy %s for "
- "partition %d of service %s: %d\n",
+ CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n",
desc->pd_name, svcpt->scp_cpt,
svcpt->scp_service->srv_name, rc);
@@ -1281,8 +1275,7 @@ int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf)
LASSERT(conf != NULL);
if (conf->nc_flags & PTLRPC_NRS_FL_FALLBACK) {
- CERROR("Unable to unregister a fallback policy, unless the "
- "PTLRPC service is stopping.\n");
+ CERROR("Unable to unregister a fallback policy, unless the PTLRPC service is stopping.\n");
return -EPERM;
}
@@ -1292,8 +1285,7 @@ int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf)
desc = nrs_policy_find_desc_locked(conf->nc_name);
if (desc == NULL) {
- CERROR("Failing to unregister NRS policy %s which has "
- "not been registered with NRS core!\n",
+ CERROR("Failing to unregister NRS policy %s which has not been registered with NRS core!\n",
conf->nc_name);
rc = -ENOENT;
goto not_exist;
@@ -1304,9 +1296,8 @@ int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf)
rc = nrs_policy_unregister_locked(desc);
if (rc < 0) {
if (rc == -EBUSY)
- CERROR("Please first stop policy %s on all service "
- "partitions and then retry to unregister the "
- "policy.\n", conf->nc_name);
+ CERROR("Please first stop policy %s on all service partitions and then retry to unregister the policy.\n",
+ conf->nc_name);
goto fail;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 50556db15f79..2f45f7657830 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -412,8 +412,8 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size)
buflen = m->lm_buflens[n];
if (unlikely(buflen < min_size)) {
- CERROR("msg %p buffer[%d] size %d too small "
- "(required %d, opc=%d)\n", m, n, buflen, min_size,
+ CERROR("msg %p buffer[%d] size %d too small (required %d, opc=%d)\n",
+ m, n, buflen, min_size,
n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
return NULL;
}
@@ -749,21 +749,19 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
slen = strnlen(str, blen);
if (slen == blen) { /* not NULL terminated */
- CERROR("can't unpack non-NULL terminated string in "
- "msg %p buffer[%d] len %d\n", m, index, blen);
+ CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n",
+ m, index, blen);
return NULL;
}
if (max_len == 0) {
if (slen != blen - 1) {
- CERROR("can't unpack short string in msg %p "
- "buffer[%d] len %d: strlen %d\n",
+ CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n",
m, index, blen, slen);
return NULL;
}
} else if (slen > max_len) {
- CERROR("can't unpack oversized string in msg %p "
- "buffer[%d] len %d strlen %d: max %d expected\n",
+ CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n",
m, index, blen, slen, max_len);
return NULL;
}
@@ -1313,43 +1311,17 @@ __u32 lustre_msg_get_cksum(struct lustre_msg *msg)
}
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
-/*
- * In 1.6 and 1.8 the checksum was computed only on struct ptlrpc_body as
- * it was in 1.6 (88 bytes, smaller than the full size in 1.8). It makes
- * more sense to compute the checksum on the full ptlrpc_body, regardless
- * of what size it is, but in order to keep interoperability with 1.8 we
- * can optionally also checksum only the first 88 bytes (caller decides). */
-# define ptlrpc_body_cksum_size_compat18 88
-
-__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18)
-#else
-# warning "remove checksum compatibility support for b1_8"
__u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
-#endif
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2: {
struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
- __u32 crc;
- unsigned int hsize = 4;
- __u32 len = compat18 ? ptlrpc_body_cksum_size_compat18 :
- lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF);
- LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
- len, NULL, 0, (unsigned char *)&crc,
- &hsize);
- return crc;
-#else
-# warning "remove checksum compatibility support for b1_8"
__u32 crc;
unsigned int hsize = 4;
cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF),
NULL, 0, (unsigned char *)&crc, &hsize);
return crc;
-#endif
}
default:
CERROR("incorrect message magic: %08x\n", msg->lm_magic);
@@ -2284,8 +2256,8 @@ void lustre_swab_quota_body(struct quota_body *b)
void dump_ioo(struct obd_ioobj *ioo)
{
CDEBUG(D_RPCTRACE,
- "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
- "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
+ "obd_ioobj: ioo_oid=" DOSTID ", ioo_max_brw=%#x, ioo_bufct=%d\n",
+ POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
ioo->ioo_bufcnt);
}
EXPORT_SYMBOL(dump_ioo);
@@ -2356,8 +2328,7 @@ void dump_obdo(struct obdo *oa)
CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
oa->o_handle.cookie);
if (valid & OBD_MD_FLCOOKIE)
- CDEBUG(D_RPCTRACE, "obdo: o_lcookie = "
- "(llog_cookie dumping not yet implemented)\n");
+ CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n");
}
EXPORT_SYMBOL(dump_obdo);
@@ -2421,17 +2392,15 @@ void _debug_req(struct ptlrpc_request *req,
va_start(args, fmt);
libcfs_debug_vmsg2(msgdata, fmt, args,
- " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d"
- " lens %d/%d e %d to %d dl "CFS_TIME_T" ref %d "
- "fl "REQ_FLAGS_FMT"/%x/%x rc %d/%d\n",
+ " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %d dl " CFS_TIME_T " ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
req, req->rq_xid, req->rq_transno,
req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
req->rq_import ?
- req->rq_import->imp_obd->obd_name :
- req->rq_export ?
- req->rq_export->exp_client_uuid.uuid :
- "<?>",
+ req->rq_import->imp_obd->obd_name :
+ req->rq_export ?
+ req->rq_export->exp_client_uuid.uuid :
+ "<?>",
libcfs_nid2str(nid),
req->rq_request_portal, req->rq_reply_portal,
req->rq_reqlen, req->rq_replen,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 20341b27a06a..340d98a64137 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -206,8 +206,7 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp,
spin_unlock(&imp->imp_lock);
- CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA, "%s->%s: level %s/%u "
- "force %u force_next %u deactive %u pingable %u suppress %u\n",
+ CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA, "%s->%s: level %s/%u force %u force_next %u deactive %u pingable %u suppress %u\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(level), level, force, force_next,
imp->imp_deactive, imp->imp_pingable, suppress);
@@ -220,8 +219,7 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp,
} else if (level != LUSTRE_IMP_FULL ||
imp->imp_obd->obd_no_recov ||
imp_is_deactive(imp)) {
- CDEBUG(D_HA, "%s->%s: not pinging (in recovery "
- "or recovery disabled: %s)\n",
+ CDEBUG(D_HA, "%s->%s: not pinging (in recovery or recovery disabled: %s)\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(level));
if (force) {
@@ -334,11 +332,7 @@ int ptlrpc_start_pinger(void)
thread_is_running(&pinger_thread), &lwi);
if (suppress_pings)
- CWARN("Pings will be suppressed at the request of the "
- "administrator. The configuration shall meet the "
- "additional requirements described in the manual. "
- "(Search for the \"suppress_pings\" kernel module "
- "parameter.)\n");
+ CWARN("Pings will be suppressed at the request of the administrator. The configuration shall meet the additional requirements described in the manual. (Search for the \"suppress_pings\" kernel module parameter.)\n");
return 0;
}
@@ -428,7 +422,7 @@ EXPORT_SYMBOL(ptlrpc_pinger_del_import);
* Register a timeout callback to the pinger list, and the callback will
* be called when timeout happens.
*/
-struct timeout_item* ptlrpc_new_timeout(int time, enum timeout_event event,
+struct timeout_item *ptlrpc_new_timeout(int time, enum timeout_event event,
timeout_cb_t cb, void *data)
{
struct timeout_item *ti;
@@ -448,7 +442,7 @@ struct timeout_item* ptlrpc_new_timeout(int time, enum timeout_event event,
}
/**
- * Register timeout event on the the pinger thread.
+ * Register timeout event on the pinger thread.
* Note: the timeout list is an sorted list with increased timeout value.
*/
static struct timeout_item*
@@ -623,11 +617,7 @@ static int ping_evictor_main(void *arg)
if (expire_time > exp->exp_last_request_time) {
class_export_get(exp);
spin_unlock(&obd->obd_dev_lock);
- LCONSOLE_WARN("%s: haven't heard from client %s"
- " (at %s) in %ld seconds. I think"
- " it's dead, and I am evicting"
- " it. exp %p, cur %ld expire %ld"
- " last %ld\n",
+ LCONSOLE_WARN("%s: haven't heard from client %s (at %s) in %ld seconds. I think it's dead, and I am evicting it. exp %p, cur %ld expire %ld last %ld\n",
obd->obd_name,
obd_uuid2str(&exp->exp_client_uuid),
obd_export_nid2str(exp),
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 357ea9f8bd57..cbcc541cac43 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -356,10 +356,9 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
if (atomic_read(&ps->set_new_count)) {
rc = ptlrpcd_steal_rqset(set, ps);
if (rc > 0)
- CDEBUG(D_RPCTRACE, "transfer %d"
- " async RPCs [%d->%d]\n",
- rc, partner->pc_index,
- pc->pc_index);
+ CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n",
+ rc, partner->pc_index,
+ pc->pc_index);
}
ptlrpc_reqset_put(ps);
} while (rc == 0 && pc->pc_cursor != first);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index e1bc77b83ffb..7b1d72947330 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -237,8 +237,7 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
if (ptlrpc_set_import_discon(imp,
lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
if (!imp->imp_replayable) {
- CDEBUG(D_HA, "import %s@%s for %s not replayable, "
- "auto-deactivating\n",
+ CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid,
imp->imp_obd->obd_name);
@@ -274,8 +273,8 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active)
/* When deactivating, mark import invalid, and abort in-flight
* requests. */
if (!active) {
- LCONSOLE_WARN("setting import %s INACTIVE by administrator "
- "request\n", obd2cli_tgt(imp->imp_obd));
+ LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n",
+ obd2cli_tgt(imp->imp_obd));
/* set before invalidate to avoid messages about imp_inval
* set without imp_deactive in ptlrpc_import_delay_req */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 4ce76852e6ee..21e9dc9d5580 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -209,7 +209,7 @@ EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
{
- snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
+ strlcpy(buf, sptlrpc_flavor2name_base(sf->sf_rpc), bufsize);
/*
* currently we don't support customized bulk specification for
@@ -220,10 +220,9 @@ char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
bspec[0] = '-';
sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
- strncat(buf, bspec, bufsize);
+ strlcat(buf, bspec, bufsize);
}
- buf[bufsize - 1] = '\0';
return buf;
}
EXPORT_SYMBOL(sptlrpc_flavor2name);
@@ -457,8 +456,8 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
LASSERT(req->rq_reqlen);
LASSERT(req->rq_replen);
- CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
- "switch sec %p(%s) -> %p(%s)\n", req,
+ CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
+ req,
oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
@@ -1842,8 +1841,8 @@ int sptlrpc_target_export_check(struct obd_export *exp,
req->rq_svc_ctx,
&flavor);
} else {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
- "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
+ exp, exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
spin_unlock(&exp->exp_lock);
@@ -1856,13 +1855,12 @@ int sptlrpc_target_export_check(struct obd_export *exp,
if (exp->exp_flvr_expire[0]) {
if (exp->exp_flvr_expire[0] >= get_seconds()) {
if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
- "middle one ("CFS_DURATION_T")\n", exp,
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (" CFS_DURATION_T ")\n", exp,
exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[0] -
- get_seconds());
+ get_seconds());
spin_unlock(&exp->exp_lock);
return 0;
}
@@ -1881,13 +1879,13 @@ int sptlrpc_target_export_check(struct obd_export *exp,
if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
if (exp->exp_flvr_expire[1] >= get_seconds()) {
if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
- "oldest one ("CFS_DURATION_T")\n", exp,
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (" CFS_DURATION_T ")\n",
+ exp,
exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[1] -
- get_seconds());
+ get_seconds());
spin_unlock(&exp->exp_lock);
return 0;
}
@@ -1907,8 +1905,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
spin_unlock(&exp->exp_lock);
- CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
- "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
+ CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
exp, exp->exp_obd->obd_name,
req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index cc68a1cf24e3..0dabd83fd46f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -772,8 +772,7 @@ void sptlrpc_enc_pool_fini(void)
if (page_pools.epp_st_access > 0) {
CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, "
- "access %lu, missing %lu, max qlen %u, max wait "
+ "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait "
CFS_TIME_T"/%d\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
index 099cec3b669f..4e132435b450 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -101,16 +101,7 @@ int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
if (req->rq_early) {
cksums = lustre_msg_get_cksum(req->rq_repdata);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
- if (lustre_msghdr_get_flags(req->rq_reqmsg) &
- MSGHDR_CKSUM_INCOMPAT18)
- cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 0);
- else
- cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 1);
-#else
-# warning "remove checksum compatibility support for b1_8"
cksumc = lustre_msg_calc_cksum(req->rq_repmsg);
-#endif
if (cksumc != cksums) {
CDEBUG(D_SEC,
"early reply checksum mismatch: %08x != %08x\n",
@@ -371,16 +362,7 @@ int null_authorize(struct ptlrpc_request *req)
} else {
__u32 cksum;
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
- if (lustre_msghdr_get_flags(req->rq_reqmsg) &
- MSGHDR_CKSUM_INCOMPAT18)
- cksum = lustre_msg_calc_cksum(rs->rs_repbuf, 0);
- else
- cksum = lustre_msg_calc_cksum(rs->rs_repbuf, 1);
-#else
-# warning "remove checksum compatibility support for b1_8"
cksum = lustre_msg_calc_cksum(rs->rs_repbuf);
-#endif
lustre_msg_set_cksum(rs->rs_repbuf, cksum);
req->rq_reply_off = 0;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 3d72b810c45c..a79cd53010a4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -938,8 +938,8 @@ int plain_svc_wrap_bulk(struct ptlrpc_request *req,
rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
tokenv);
if (rc) {
- CERROR("bulk read: server failed to compute "
- "checksum: %d\n", rc);
+ CERROR("bulk read: server failed to compute checksum: %d\n",
+ rc);
} else {
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
corrupt_bulk_data(desc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index a8df8a792333..635b12b22cef 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -580,8 +580,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
svc->srv_nthrs_cpt_init = init;
if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
- CDEBUG(D_OTHER, "%s: This service may have more threads (%d) "
- "than the given soft limit (%d)\n",
+ CDEBUG(D_OTHER, "%s: This service may have more threads (%d) than the given soft limit (%d)\n",
svc->srv_name, nthrs * svc->srv_ncpts,
tc->tc_nthrs_max);
}
@@ -1251,8 +1250,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
/* deadline is when the client expects us to reply, margin is the
difference between clients' and servers' expectations */
DEBUG_REQ(D_ADAPTTO, req,
- "%ssending early reply (deadline %+lds, margin %+lds) for "
- "%d+%d", AT_OFF ? "AT off - not " : "",
+ "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d",
+ AT_OFF ? "AT off - not " : "",
olddl, olddl - at_get(&svcpt->scp_at_estimate),
at_get(&svcpt->scp_at_estimate), at_extra);
@@ -1260,17 +1259,15 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
return 0;
if (olddl < 0) {
- DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
- "not sending early reply. Consider increasing "
- "at_early_margin (%d)?", olddl, at_early_margin);
+ DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?",
+ olddl, at_early_margin);
/* Return an error so we're not re-added to the timed list. */
return -ETIMEDOUT;
}
if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
- "but no AT support");
+ DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, but no AT support");
return -ENOSYS;
}
@@ -1296,8 +1293,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
* we may be past adaptive_max */
if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate)) {
- DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
- "(%ld/%ld), not sending early reply\n",
+ DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%ld), not sending early reply\n",
olddl, req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate) -
get_seconds());
@@ -1329,8 +1325,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
LASSERT(atomic_read(&req->rq_refcount));
/** if it is last refcount then early reply isn't needed */
if (atomic_read(&req->rq_refcount) == 1) {
- DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
- "abort sending early reply\n");
+ DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, abort sending early reply\n");
rc = -EINVAL;
goto out;
}
@@ -1454,16 +1449,14 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
spin_unlock(&svcpt->scp_at_lock);
- CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
- "replies\n", first, at_extra, counter);
+ CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early replies\n",
+ first, at_extra, counter);
if (first < 0) {
/* We're already past request deadlines before we even get a
chance to send early replies */
- LCONSOLE_WARN("%s: This server is not able to keep up with "
- "request traffic (cpu-bound).\n",
+ LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
svcpt->scp_service->srv_name);
- CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
- "delay="CFS_DURATION_T"(jiff)\n",
+ CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=" CFS_DURATION_T "(jiff)\n",
counter, svcpt->scp_nreqs_incoming,
svcpt->scp_nreqs_active,
at_get(&svcpt->scp_at_estimate), delay);
@@ -1825,8 +1818,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
if (rc == 0) {
rc = sptlrpc_target_export_check(req->rq_export, req);
if (rc)
- DEBUG_REQ(D_ERROR, req, "DROPPING req with "
- "illegal security flavor,");
+ DEBUG_REQ(D_ERROR, req, "DROPPING req with illegal security flavor,");
}
if (rc)
@@ -1942,18 +1934,17 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
/* Discard requests queued for longer than the deadline.
The deadline is increased if we send an early reply. */
if (get_seconds() > request->rq_deadline) {
- DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
- ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
+ DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n",
libcfs_id2str(request->rq_peer),
cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
+ request->rq_arrival_time.tv_sec),
cfs_time_sub(get_seconds(),
- request->rq_deadline));
+ request->rq_deadline));
goto put_conn;
}
- CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x%llu:%s:%d\n", current_comm(),
+ CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d\n",
+ current_comm(),
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
@@ -1986,26 +1977,24 @@ put_conn:
do_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
- CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x%llu:%s:%d Request processed in "
- "%ldus (%ldus total) trans %llu rc %d/%d\n",
- current_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg),
- request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg),
- timediff,
- cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
- (request->rq_repmsg ?
- lustre_msg_get_transno(request->rq_repmsg) :
- request->rq_transno),
- request->rq_status,
- (request->rq_repmsg ?
- lustre_msg_get_status(request->rq_repmsg) : -999));
+ CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n",
+ current_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ (request->rq_export ?
+ atomic_read(&request->rq_export->exp_refcount) : -99),
+ lustre_msg_get_status(request->rq_reqmsg),
+ request->rq_xid,
+ libcfs_id2str(request->rq_peer),
+ lustre_msg_get_opc(request->rq_reqmsg),
+ timediff,
+ cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
+ (request->rq_repmsg ?
+ lustre_msg_get_transno(request->rq_repmsg) :
+ request->rq_transno),
+ request->rq_status,
+ (request->rq_repmsg ?
+ lustre_msg_get_status(request->rq_repmsg) : -999));
if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
__u32 op = lustre_msg_get_opc(request->rq_reqmsg);
int opc = opcode_offset(op);
@@ -2557,8 +2546,8 @@ static int ptlrpc_start_hr_threads(void)
if (!IS_ERR_VALUE(rc))
continue;
- CERROR("Reply handling thread %d:%d Failed on starting: "
- "rc = %d\n", i, j, rc);
+ CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n",
+ i, j, rc);
ptlrpc_stop_hr_threads();
return rc;
}
@@ -2920,8 +2909,7 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
rc = l_wait_event(svcpt->scp_waitq,
svcpt->scp_nrqbds_posted == 0, &lwi);
if (rc == -ETIMEDOUT) {
- CWARN("Service %s waiting for "
- "request buffers\n",
+ CWARN("Service %s waiting for request buffers\n",
svcpt->scp_service->srv_name);
}
spin_lock(&svcpt->scp_lock);
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 655cf5037b0b..2a054a99d433 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -27,10 +27,18 @@ source "drivers/staging/media/davinci_vpfe/Kconfig"
source "drivers/staging/media/dt3155v4l/Kconfig"
-source "drivers/staging/media/omap24xx/Kconfig"
+source "drivers/staging/media/tlg2300/Kconfig"
+
+source "drivers/staging/media/mn88472/Kconfig"
+
+source "drivers/staging/media/mn88473/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/parport/Kconfig"
+
+source "drivers/staging/media/vino/Kconfig"
+
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 6dbe578178cd..412b28408398 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -4,6 +4,9 @@ obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
-obj-$(CONFIG_VIDEO_OMAP2) += omap24xx/
-obj-$(CONFIG_VIDEO_TCM825X) += omap24xx/
+obj-$(CONFIG_DVB_MN88472) += mn88472/
+obj-$(CONFIG_DVB_MN88473) += mn88473/
+obj-y += parport/
+obj-$(CONFIG_VIDEO_TLG2300) += tlg2300/
+obj-y += vino/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 2bba370a47ca..60a57b2a8fb2 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2327,9 +2327,10 @@ static int bcm2048_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, BCM2048_DRIVER_CARD,
sizeof(capability->card));
snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
- capability->version = BCM2048_DRIVER_VERSION;
- capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ capability->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
V4L2_CAP_HW_FREQ_SEEK;
+ capability->capabilities = capability->device_caps |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -2707,7 +2708,7 @@ static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
* bcm2048_i2c_driver - i2c driver interface
*/
static const struct i2c_device_id bcm2048_id[] = {
- { "bcm2048" , 0 },
+ { "bcm2048", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, bcm2048_id);
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 73e7b2c9e4a7..657ea480c6e7 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -527,7 +527,7 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
u8 val;
#endif
for (i = 0; i < 100; i++) {
- msleep(10);
+ usleep_range(10000, 11000);
#if 0
read_reg(ci, 0x06, &val);
dev_info(&ci->i2c->dev, "%d:%02x\n", i, val);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index bdc7f005b3ba..704fa202ee18 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -37,15 +37,15 @@
/* ipipe input format's */
static const unsigned int ipipe_input_fmts[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_SGRBG12_1X12,
- V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
- V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
};
/* ipipe output format's */
static const unsigned int ipipe_output_fmts[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
};
static int ipipe_validate_lutdpc_params(struct vpfe_ipipe_lutdpc *lutdpc)
@@ -1457,7 +1457,7 @@ ipipe_try_format(struct vpfe_ipipe_device *ipipe,
/* If not found, use SBGGR10 as default */
if (i >= ARRAY_SIZE(ipipe_input_fmts))
- fmt->code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ fmt->code = MEDIA_BUS_FMT_SGRBG12_1X12;
} else if (pad == IPIPE_PAD_SOURCE) {
for (i = 0; i < ARRAY_SIZE(ipipe_output_fmts); i++)
if (fmt->code == ipipe_output_fmts[i])
@@ -1465,7 +1465,7 @@ ipipe_try_format(struct vpfe_ipipe_device *ipipe,
/* If not found, use UYVY as default */
if (i >= ARRAY_SIZE(ipipe_output_fmts))
- fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
}
fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
@@ -1642,7 +1642,7 @@ ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = IPIPE_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
ipipe_set_format(sd, fh, &format);
@@ -1650,7 +1650,7 @@ ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = IPIPE_PAD_SOURCE;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
ipipe_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
index cf4204603eb8..d81b29e19309 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
@@ -120,8 +120,8 @@ struct vpfe_ipipe_device {
enum ipipe_input_entity input;
unsigned int output;
struct v4l2_ctrl_handler ctrls;
- void *__iomem base_addr;
- void *__iomem isp5_base_addr;
+ void __iomem *base_addr;
+ void __iomem *isp5_base_addr;
struct ipipe_module_params config;
};
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index b2daf5e63f88..2a3a56b88de1 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -24,7 +24,7 @@
#define IPIPE_MODE_CONTINUOUS 0
#define IPIPE_MODE_SINGLE_SHOT 1
-static void ipipe_clock_enable(void *__iomem base_addr)
+static void ipipe_clock_enable(void __iomem *base_addr)
{
/* enable IPIPE MMR for register write access */
regw_ip(base_addr, IPIPE_GCK_MMR_DEFAULT, IPIPE_GCK_MMR);
@@ -34,7 +34,7 @@ static void ipipe_clock_enable(void *__iomem base_addr)
}
static void
-rsz_set_common_params(void *__iomem rsz_base, struct resizer_params *params)
+rsz_set_common_params(void __iomem *rsz_base, struct resizer_params *params)
{
struct rsz_common_params *rsz_common = &params->rsz_common;
u32 val;
@@ -66,7 +66,7 @@ rsz_set_common_params(void *__iomem rsz_base, struct resizer_params *params)
}
static void
-rsz_set_rsz_regs(void *__iomem rsz_base, unsigned int rsz_id,
+rsz_set_rsz_regs(void __iomem *rsz_base, unsigned int rsz_id,
struct resizer_params *params)
{
struct resizer_scale_param *rsc_params;
@@ -171,7 +171,7 @@ rsz_set_rsz_regs(void *__iomem rsz_base, unsigned int rsz_id,
/*set the registers of either RSZ0 or RSZ1 */
static void
-ipipe_setup_resizer(void *__iomem rsz_base, struct resizer_params *params)
+ipipe_setup_resizer(void __iomem *rsz_base, struct resizer_params *params)
{
/* enable MMR gate to write to Resizer */
regw_rsz(rsz_base, 1, RSZ_GCK_MMR);
@@ -196,12 +196,12 @@ ipipe_setup_resizer(void *__iomem rsz_base, struct resizer_params *params)
rsz_set_rsz_regs(rsz_base, RSZ_B, params);
}
-static u32 ipipe_get_color_pat(enum v4l2_mbus_pixelcode pix)
+static u32 ipipe_get_color_pat(u32 pix)
{
switch (pix) {
- case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
return ipipe_sgrbg_pattern;
default:
@@ -211,23 +211,23 @@ static u32 ipipe_get_color_pat(enum v4l2_mbus_pixelcode pix)
static int ipipe_get_data_path(struct vpfe_ipipe_device *ipipe)
{
- enum v4l2_mbus_pixelcode temp_pix_fmt;
+ u32 temp_pix_fmt;
switch (ipipe->formats[IPIPE_PAD_SINK].code) {
- case V4L2_MBUS_FMT_SBGGR8_1X8:
- case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SGRBG12_1X12:
- temp_pix_fmt = V4L2_MBUS_FMT_SGRBG12_1X12;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ temp_pix_fmt = MEDIA_BUS_FMT_SGRBG12_1X12;
break;
default:
- temp_pix_fmt = V4L2_MBUS_FMT_UYVY8_2X8;
+ temp_pix_fmt = MEDIA_BUS_FMT_UYVY8_2X8;
}
- if (temp_pix_fmt == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (temp_pix_fmt == MEDIA_BUS_FMT_SGRBG12_1X12) {
if (ipipe->formats[IPIPE_PAD_SOURCE].code ==
- V4L2_MBUS_FMT_SGRBG12_1X12)
+ MEDIA_BUS_FMT_SGRBG12_1X12)
return IPIPE_RAW2RAW;
return IPIPE_RAW2YUV;
}
@@ -302,8 +302,8 @@ int config_rsz_hw(struct vpfe_resizer_device *resizer,
struct resizer_params *config)
{
struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- void *__iomem ipipe_base = vpfe_dev->vpfe_ipipe.base_addr;
- void *__iomem rsz_base = vpfe_dev->vpfe_resizer.base_addr;
+ void __iomem *ipipe_base = vpfe_dev->vpfe_ipipe.base_addr;
+ void __iomem *rsz_base = vpfe_dev->vpfe_resizer.base_addr;
/* enable VPSS clock */
vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
@@ -315,7 +315,7 @@ int config_rsz_hw(struct vpfe_resizer_device *resizer,
}
static void
-rsz_set_y_address(void *__iomem rsz_base, unsigned int address,
+rsz_set_y_address(void __iomem *rsz_base, unsigned int address,
unsigned int offset)
{
u32 val;
@@ -330,7 +330,7 @@ rsz_set_y_address(void *__iomem rsz_base, unsigned int address,
}
static void
-rsz_set_c_address(void *__iomem rsz_base, unsigned int address,
+rsz_set_c_address(void __iomem *rsz_base, unsigned int address,
unsigned int offset)
{
u32 val;
@@ -352,7 +352,7 @@ rsz_set_c_address(void *__iomem rsz_base, unsigned int address,
* @address: the address to set
*/
int
-resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
+resizer_set_outaddr(void __iomem *rsz_base, struct resizer_params *params,
int resize_no, unsigned int address)
{
struct resizer_scale_param *rsc_param;
@@ -411,7 +411,7 @@ resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
}
void
-ipipe_set_lutdpc_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_lutdpc_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_lutdpc *dpc)
{
u32 max_tbl_size = LUT_DPC_MAX_SIZE >> 1;
@@ -446,7 +446,7 @@ ipipe_set_lutdpc_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
}
static void
-set_dpc_thresholds(void *__iomem base_addr,
+set_dpc_thresholds(void __iomem *base_addr,
struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_thr)
{
regw_ip(base_addr, dpc_thr->corr_thr.r & OTFDPC_DPC2_THR_MASK,
@@ -467,7 +467,7 @@ set_dpc_thresholds(void *__iomem base_addr,
DPC_OTF_2D_THR_B);
}
-void ipipe_set_otfdpc_regs(void *__iomem base_addr,
+void ipipe_set_otfdpc_regs(void __iomem *base_addr,
struct vpfe_ipipe_otfdpc *otfdpc)
{
struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0 = &otfdpc->alg_cfg.dpc_2_0;
@@ -523,7 +523,7 @@ void ipipe_set_otfdpc_regs(void *__iomem base_addr,
/* 2D Noise filter */
void
-ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
+ipipe_set_d2f_regs(void __iomem *base_addr, unsigned int id,
struct vpfe_ipipe_nf *noise_filter)
{
@@ -571,7 +571,7 @@ ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
(((decimal & 0x1f) | ((integer & 0x7) << 5)))
/* Green Imbalance Correction */
-void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic)
+void ipipe_set_gic_regs(void __iomem *base_addr, struct vpfe_ipipe_gic *gic)
{
u32 val;
@@ -609,7 +609,7 @@ void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic)
#define IPIPE_U13Q9(decimal, integer) \
(((decimal & 0x1ff) | ((integer & 0xf) << 9)))
/* White balance */
-void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb)
+void ipipe_set_wb_regs(void __iomem *base_addr, struct vpfe_ipipe_wb *wb)
{
u32 val;
@@ -635,7 +635,7 @@ void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb)
}
/* CFA */
-void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa)
+void ipipe_set_cfa_regs(void __iomem *base_addr, struct vpfe_ipipe_cfa *cfa)
{
ipipe_clock_enable(base_addr);
@@ -671,7 +671,7 @@ void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa)
}
void
-ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
+ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
struct vpfe_ipipe_rgb2rgb *rgb)
{
u32 offset_mask = RGB2RGB_1_OFST_MASK;
@@ -724,7 +724,7 @@ ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
}
static void
-ipipe_update_gamma_tbl(void *__iomem isp5_base_addr,
+ipipe_update_gamma_tbl(void __iomem *isp5_base_addr,
struct vpfe_ipipe_gamma_entry *table, int size, u32 addr)
{
int count;
@@ -738,7 +738,7 @@ ipipe_update_gamma_tbl(void *__iomem isp5_base_addr,
}
void
-ipipe_set_gamma_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_gamma_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_gamma *gamma)
{
int table_size;
@@ -770,7 +770,7 @@ ipipe_set_gamma_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
}
void
-ipipe_set_3d_lut_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_3d_lut_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_3d_lut *lut_3d)
{
struct vpfe_ipipe_3d_lut_entry *tbl;
@@ -819,7 +819,7 @@ ipipe_set_3d_lut_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
/* Lumina adjustments */
void
-ipipe_set_lum_adj_regs(void *__iomem base_addr, struct ipipe_lum_adj *lum_adj)
+ipipe_set_lum_adj_regs(void __iomem *base_addr, struct ipipe_lum_adj *lum_adj)
{
u32 val;
@@ -834,7 +834,7 @@ ipipe_set_lum_adj_regs(void *__iomem base_addr, struct ipipe_lum_adj *lum_adj)
#define IPIPE_S12Q8(decimal, integer) \
(((decimal & 0xff) | ((integer & 0xf) << 8)))
-void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
+void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
struct vpfe_ipipe_rgb2yuv *yuv)
{
u32 val;
@@ -866,7 +866,7 @@ void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
/* YUV 422 conversion */
void
-ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
+ipipe_set_yuv422_conv_regs(void __iomem *base_addr,
struct vpfe_ipipe_yuv422_conv *conv)
{
u32 val;
@@ -879,7 +879,7 @@ ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
}
void
-ipipe_set_gbce_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_gbce_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_gbce *gbce)
{
unsigned int count;
@@ -906,7 +906,7 @@ ipipe_set_gbce_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
}
void
-ipipe_set_ee_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
+ipipe_set_ee_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
struct vpfe_ipipe_yee *ee)
{
unsigned int count;
@@ -950,7 +950,7 @@ ipipe_set_ee_regs(void *__iomem base_addr, void *__iomem isp5_base_addr,
}
/* Chromatic Artifact Correction. CAR */
-static void ipipe_set_mf(void *__iomem base_addr)
+static void ipipe_set_mf(void __iomem *base_addr)
{
/* typ to dynamic switch */
regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
@@ -959,7 +959,7 @@ static void ipipe_set_mf(void *__iomem base_addr)
}
static void
-ipipe_set_gain_ctrl(void *__iomem base_addr, struct vpfe_ipipe_car *car)
+ipipe_set_gain_ctrl(void __iomem *base_addr, struct vpfe_ipipe_car *car)
{
regw_ip(base_addr, VPFE_IPIPE_CAR_CHR_GAIN_CTRL, CAR_TYP);
regw_ip(base_addr, car->hpf, CAR_HPF_TYP);
@@ -975,7 +975,7 @@ ipipe_set_gain_ctrl(void *__iomem base_addr, struct vpfe_ipipe_car *car)
CAR_GN2_MIN);
}
-void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car)
+void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car)
{
u32 val;
@@ -1010,7 +1010,7 @@ void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car)
}
/* Chromatic Gain Suppression */
-void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs)
+void ipipe_set_cgs_regs(void __iomem *base_addr, struct vpfe_ipipe_cgs *cgs)
{
ipipe_clock_enable(base_addr);
regw_ip(base_addr, cgs->en, CGS_EN);
@@ -1025,12 +1025,12 @@ void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs)
regw_ip(base_addr, cgs->h_min, CGS_GN1_H_MIN);
}
-void rsz_src_enable(void *__iomem rsz_base, int enable)
+void rsz_src_enable(void __iomem *rsz_base, int enable)
{
regw_rsz(rsz_base, enable, RSZ_SRC_EN);
}
-int rsz_enable(void *__iomem rsz_base, int rsz_id, int enable)
+int rsz_enable(void __iomem *rsz_base, int rsz_id, int enable)
{
if (rsz_id == RSZ_A) {
regw_rsz(rsz_base, enable, RSZ_EN_A);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
index 81176fb9d164..2bf2f7a69173 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
@@ -490,29 +490,29 @@
#define RSZ_RGB_TYP_SHIFT 0
#define RSZ_RGB_ALPHA_MASK 0xff
-static inline u32 regr_ip(void *__iomem addr, u32 offset)
+static inline u32 regr_ip(void __iomem *addr, u32 offset)
{
return readl(addr + offset);
}
-static inline void regw_ip(void *__iomem addr, u32 val, u32 offset)
+static inline void regw_ip(void __iomem *addr, u32 val, u32 offset)
{
writel(val, addr + offset);
}
-static inline u32 w_ip_table(void *__iomem addr, u32 val, u32 offset)
+static inline u32 w_ip_table(void __iomem *addr, u32 val, u32 offset)
{
writel(val, addr + offset);
return val;
}
-static inline u32 regr_rsz(void *__iomem addr, u32 offset)
+static inline u32 regr_rsz(void __iomem *addr, u32 offset)
{
return readl(addr + offset);
}
-static inline u32 regw_rsz(void *__iomem addr, u32 val, u32 offset)
+static inline u32 regw_rsz(void __iomem *addr, u32 val, u32 offset)
{
writel(val, addr + offset);
@@ -520,39 +520,39 @@ static inline u32 regw_rsz(void *__iomem addr, u32 val, u32 offset)
}
int config_ipipe_hw(struct vpfe_ipipe_device *ipipe);
-int resizer_set_outaddr(void *__iomem rsz_base, struct resizer_params *params,
+int resizer_set_outaddr(void __iomem *rsz_base, struct resizer_params *params,
int resize_no, unsigned int address);
-int rsz_enable(void *__iomem rsz_base, int rsz_id, int enable);
-void rsz_src_enable(void *__iomem rsz_base, int enable);
+int rsz_enable(void __iomem *rsz_base, int rsz_id, int enable);
+void rsz_src_enable(void __iomem *rsz_base, int enable);
void rsz_set_in_pix_format(unsigned char y_c);
int config_rsz_hw(struct vpfe_resizer_device *resizer,
struct resizer_params *config);
-void ipipe_set_d2f_regs(void *__iomem base_addr, unsigned int id,
+void ipipe_set_d2f_regs(void __iomem *base_addr, unsigned int id,
struct vpfe_ipipe_nf *noise_filter);
-void ipipe_set_rgb2rgb_regs(void *__iomem base_addr, unsigned int id,
+void ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
struct vpfe_ipipe_rgb2rgb *rgb);
-void ipipe_set_yuv422_conv_regs(void *__iomem base_addr,
+void ipipe_set_yuv422_conv_regs(void __iomem *base_addr,
struct vpfe_ipipe_yuv422_conv *conv);
-void ipipe_set_lum_adj_regs(void *__iomem base_addr,
+void ipipe_set_lum_adj_regs(void __iomem *base_addr,
struct ipipe_lum_adj *lum_adj);
-void ipipe_set_rgb2ycbcr_regs(void *__iomem base_addr,
+void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
struct vpfe_ipipe_rgb2yuv *yuv);
-void ipipe_set_lutdpc_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_lutdpc *lutdpc);
-void ipipe_set_otfdpc_regs(void *__iomem base_addr,
+void ipipe_set_lutdpc_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_lutdpc *lutdpc);
+void ipipe_set_otfdpc_regs(void __iomem *base_addr,
struct vpfe_ipipe_otfdpc *otfdpc);
-void ipipe_set_3d_lut_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_3d_lut *lut_3d);
-void ipipe_set_gamma_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_gamma *gamma);
-void ipipe_set_ee_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_yee *ee);
-void ipipe_set_gbce_regs(void *__iomem base_addr,
- void *__iomem isp5_base_addr, struct vpfe_ipipe_gbce *gbce);
-void ipipe_set_gic_regs(void *__iomem base_addr, struct vpfe_ipipe_gic *gic);
-void ipipe_set_cfa_regs(void *__iomem base_addr, struct vpfe_ipipe_cfa *cfa);
-void ipipe_set_car_regs(void *__iomem base_addr, struct vpfe_ipipe_car *car);
-void ipipe_set_cgs_regs(void *__iomem base_addr, struct vpfe_ipipe_cgs *cgs);
-void ipipe_set_wb_regs(void *__iomem base_addr, struct vpfe_ipipe_wb *wb);
+void ipipe_set_3d_lut_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_3d_lut *lut_3d);
+void ipipe_set_gamma_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_gamma *gamma);
+void ipipe_set_ee_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_yee *ee);
+void ipipe_set_gbce_regs(void __iomem *base_addr,
+ void __iomem *isp5_base_addr, struct vpfe_ipipe_gbce *gbce);
+void ipipe_set_gic_regs(void __iomem *base_addr, struct vpfe_ipipe_gic *gic);
+void ipipe_set_cfa_regs(void __iomem *base_addr, struct vpfe_ipipe_cfa *cfa);
+void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car);
+void ipipe_set_cgs_regs(void __iomem *base_addr, struct vpfe_ipipe_cgs *cgs);
+void ipipe_set_wb_regs(void __iomem *base_addr, struct vpfe_ipipe_wb *wb);
#endif /* _DAVINCI_VPFE_DM365_IPIPE_HW_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index 6d4893b44c1f..87d42e18377d 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -23,42 +23,42 @@
#include "vpfe_mc_capture.h"
static const unsigned int ipipeif_input_fmts[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_SGRBG12_1X12,
- V4L2_MBUS_FMT_Y8_1X8,
- V4L2_MBUS_FMT_UV8_1X8,
- V4L2_MBUS_FMT_YDYUYDYV8_1X16,
- V4L2_MBUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_UV8_1X8,
+ MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
};
static const unsigned int ipipeif_output_fmts[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_SGRBG12_1X12,
- V4L2_MBUS_FMT_Y8_1X8,
- V4L2_MBUS_FMT_UV8_1X8,
- V4L2_MBUS_FMT_YDYUYDYV8_1X16,
- V4L2_MBUS_FMT_SBGGR8_1X8,
- V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
- V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_UV8_1X8,
+ MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
};
static int
-ipipeif_get_pack_mode(enum v4l2_mbus_pixelcode in_pix_fmt)
+ipipeif_get_pack_mode(u32 in_pix_fmt)
{
switch (in_pix_fmt) {
- case V4L2_MBUS_FMT_SBGGR8_1X8:
- case V4L2_MBUS_FMT_Y8_1X8:
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
return IPIPEIF_5_1_PACK_8_BIT;
- case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
return IPIPEIF_5_1_PACK_8_BIT_A_LAW;
- case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
return IPIPEIF_5_1_PACK_16_BIT;
- case V4L2_MBUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
return IPIPEIF_5_1_PACK_12_BIT;
default:
@@ -107,8 +107,8 @@ ipipeif_get_cfg_src1(struct vpfe_ipipeif_device *ipipeif)
informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
if (ipipeif->input == IPIPEIF_INPUT_MEMORY &&
- (informat->code == V4L2_MBUS_FMT_Y8_1X8 ||
- informat->code == V4L2_MBUS_FMT_UV8_1X8))
+ (informat->code == MEDIA_BUS_FMT_Y8_1X8 ||
+ informat->code == MEDIA_BUS_FMT_UV8_1X8))
return IPIPEIF_CCDC;
return IPIPEIF_SRC1_PARALLEL_PORT;
@@ -122,11 +122,11 @@ ipipeif_get_data_shift(struct vpfe_ipipeif_device *ipipeif)
informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
switch (informat->code) {
- case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
return IPIPEIF_5_1_BITS11_0;
- case V4L2_MBUS_FMT_Y8_1X8:
- case V4L2_MBUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
return IPIPEIF_5_1_BITS11_0;
default:
@@ -143,7 +143,7 @@ ipipeif_get_source(struct vpfe_ipipeif_device *ipipeif)
if (ipipeif->input == IPIPEIF_INPUT_ISIF)
return IPIPEIF_CCDC;
- if (informat->code == V4L2_MBUS_FMT_UYVY8_2X8)
+ if (informat->code == MEDIA_BUS_FMT_UYVY8_2X8)
return IPIPEIF_SDRAM_YUV;
return IPIPEIF_SDRAM_RAW;
@@ -190,7 +190,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
struct v4l2_mbus_framefmt *informat, *outformat;
struct ipipeif_params params = ipipeif->config;
enum ipipeif_input_source ipipeif_source;
- enum v4l2_mbus_pixelcode isif_port_if;
+ u32 isif_port_if;
void *ipipeif_base_addr;
unsigned int val;
int data_shift;
@@ -268,16 +268,16 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
ipipeif_write(val, ipipeif_base_addr, IPIPEIF_INIRSZ);
isif_port_if = informat->code;
- if (isif_port_if == V4L2_MBUS_FMT_Y8_1X8)
- isif_port_if = V4L2_MBUS_FMT_YUYV8_1X16;
- else if (isif_port_if == V4L2_MBUS_FMT_UV8_1X8)
- isif_port_if = V4L2_MBUS_FMT_SGRBG12_1X12;
+ if (isif_port_if == MEDIA_BUS_FMT_Y8_1X8)
+ isif_port_if = MEDIA_BUS_FMT_YUYV8_1X16;
+ else if (isif_port_if == MEDIA_BUS_FMT_UV8_1X8)
+ isif_port_if = MEDIA_BUS_FMT_SGRBG12_1X12;
/* Enable DPCM decompression */
switch (ipipeif_source) {
case IPIPEIF_SDRAM_RAW:
val = 0;
- if (outformat->code == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8) {
+ if (outformat->code == MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8) {
val = 1;
val |= (IPIPEIF_DPCM_8BIT_10BIT & 1) <<
IPIPEIF_DPCM_BITS_SHIFT;
@@ -296,9 +296,9 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
/* configure CFG2 */
val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CFG2);
switch (isif_port_if) {
- case V4L2_MBUS_FMT_YUYV8_1X16:
- case V4L2_MBUS_FMT_UYVY8_2X8:
- case V4L2_MBUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
@@ -344,16 +344,16 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
val |= VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_VDPOL_SHIFT;
switch (isif_port_if) {
- case V4L2_MBUS_FMT_YUYV8_1X16:
- case V4L2_MBUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
- case V4L2_MBUS_FMT_Y8_1X8:
- case V4L2_MBUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
SETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
val |= IPIPEIF_CBCR_Y << IPIPEIF_CFG2_YUV8P_SHIFT;
@@ -421,7 +421,7 @@ static int
ipipeif_get_config(struct v4l2_subdev *sd, void __user *arg)
{
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct ipipeif_params *config = (struct ipipeif_params *)arg;
+ struct ipipeif_params *config = arg;
struct device *dev = ipipeif->subdev.v4l2_dev->dev;
if (!arg) {
@@ -462,7 +462,7 @@ ipipeif_get_config(struct v4l2_subdev *sd, void __user *arg)
static long ipipeif_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
- struct ipipeif_params *config = (struct ipipeif_params *)arg;
+ struct ipipeif_params *config = arg;
int ret = -ENOIOCTLCMD;
switch (cmd) {
@@ -625,7 +625,7 @@ ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
/* If not found, use SBGGR10 as default */
if (i >= ARRAY_SIZE(ipipeif_input_fmts))
- fmt->code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ fmt->code = MEDIA_BUS_FMT_SGRBG12_1X12;
} else if (pad == IPIPEIF_PAD_SOURCE) {
for (i = 0; i < ARRAY_SIZE(ipipeif_output_fmts); i++)
if (fmt->code == ipipeif_output_fmts[i])
@@ -633,7 +633,7 @@ ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
/* If not found, use UYVY as default */
if (i >= ARRAY_SIZE(ipipeif_output_fmts))
- fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
}
fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
@@ -770,7 +770,7 @@ ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = IPIPEIF_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
ipipeif_set_format(sd, fh, &format);
@@ -778,7 +778,7 @@ ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = IPIPEIF_PAD_SOURCE;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
ipipeif_set_format(sd, fh, &format);
@@ -805,9 +805,9 @@ ipipeif_video_in_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
return -EINVAL;
switch (ipipeif->formats[IPIPEIF_PAD_SINK].code) {
- case V4L2_MBUS_FMT_Y8_1X8:
- case V4L2_MBUS_FMT_UV8_1X8:
- case V4L2_MBUS_FMT_YDYUYDYV8_1X16:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width;
break;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
index 608701fc5fed..cea3d61335af 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
@@ -134,7 +134,7 @@ struct vpfe_ipipeif_device {
unsigned int output;
struct vpfe_video_device video_in;
struct v4l2_ctrl_handler ctrls;
- void *__iomem ipipeif_base_addr;
+ void __iomem *ipipeif_base_addr;
struct ipipeif_params config;
int dpcm_predictor;
int gain;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 0d535b062e4e..0ba0bf2c1cff 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -27,13 +27,13 @@
#define MAX_HEIGHT 4096
static const unsigned int isif_fmts[] = {
- V4L2_MBUS_FMT_YUYV8_2X8,
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_YUYV8_1X16,
- V4L2_MBUS_FMT_YUYV10_1X20,
- V4L2_MBUS_FMT_SGRBG12_1X12,
- V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8,
- V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
};
#define ISIF_COLPTN_R_Ye 0x0
@@ -70,17 +70,17 @@ static const u32 isif_srggb_pattern =
ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP15_4 |
ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP17_6;
-static inline u32 isif_read(void *__iomem base_addr, u32 offset)
+static inline u32 isif_read(void __iomem *base_addr, u32 offset)
{
return readl(base_addr + offset);
}
-static inline void isif_write(void *__iomem base_addr, u32 val, u32 offset)
+static inline void isif_write(void __iomem *base_addr, u32 val, u32 offset)
{
writel(val, base_addr + offset);
}
-static inline u32 isif_merge(void *__iomem base_addr, u32 mask, u32 val,
+static inline u32 isif_merge(void __iomem *base_addr, u32 mask, u32 val,
u32 offset)
{
u32 new_val = (isif_read(base_addr, offset) & ~mask) | (val & mask);
@@ -154,7 +154,7 @@ enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev)
static int
isif_set_pixel_format(struct vpfe_isif_device *isif, unsigned int pixfmt)
{
- if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12) {
if (pixfmt == V4L2_PIX_FMT_SBGGR16)
isif->isif_cfg.data_pack = ISIF_PACK_16BIT;
else if ((pixfmt == V4L2_PIX_FMT_SGRBG10DPCM8) ||
@@ -184,7 +184,7 @@ static int
isif_set_frame_format(struct vpfe_isif_device *isif,
enum isif_frmfmt frm_fmt)
{
- if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12)
isif->isif_cfg.bayer.frm_fmt = frm_fmt;
else
isif->isif_cfg.ycbcr.frm_fmt = frm_fmt;
@@ -196,7 +196,7 @@ static int isif_set_image_window(struct vpfe_isif_device *isif)
{
struct v4l2_rect *win = &isif->crop;
- if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12) {
isif->isif_cfg.bayer.win.top = win->top;
isif->isif_cfg.bayer.win.left = win->left;
isif->isif_cfg.bayer.win.width = win->width;
@@ -214,7 +214,7 @@ static int isif_set_image_window(struct vpfe_isif_device *isif)
static int
isif_set_buftype(struct vpfe_isif_device *isif, enum isif_buftype buf_type)
{
- if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12)
isif->isif_cfg.bayer.buf_type = buf_type;
else
isif->isif_cfg.ycbcr.buf_type = buf_type;
@@ -296,7 +296,7 @@ isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
/* If not found, use YUYV8_2x8 as default */
if (i >= ARRAY_SIZE(isif_fmts))
- fmt->format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ fmt->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
/* Clamp the size. */
fmt->format.width = clamp_t(u32, width, 32, MAX_WIDTH);
@@ -429,7 +429,7 @@ static int isif_get_params(struct v4l2_subdev *sd, void *params)
struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
/* only raw module parameters can be set through the IOCTL */
- if (isif->formats[ISIF_PAD_SINK].code != V4L2_MBUS_FMT_SGRBG12_1X12)
+ if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
return -EINVAL;
memcpy(params, &isif->isif_cfg.bayer.config_params,
sizeof(isif->isif_cfg.bayer.config_params));
@@ -604,7 +604,7 @@ static int isif_set_params(struct v4l2_subdev *sd, void *params)
int ret = -EINVAL;
/* only raw module parameters can be set through the IOCTL */
- if (isif->formats[ISIF_PAD_SINK].code != V4L2_MBUS_FMT_SGRBG12_1X12)
+ if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
return ret;
memcpy(&isif_raw_params, params, sizeof(isif_raw_params));
@@ -646,7 +646,7 @@ static void isif_config_gain_offset(struct vpfe_isif_device *isif)
{
struct vpfe_isif_gain_offsets_adj *gain_off_ptr =
&isif->isif_cfg.bayer.config_params.gain_offset;
- void *__iomem base = isif->isif_cfg.base_addr;
+ void __iomem *base = isif->isif_cfg.base_addr;
u32 val;
val = ((gain_off_ptr->gain_sdram_en & 1) << GAIN_SDRAM_EN_SHIFT) |
@@ -1041,19 +1041,19 @@ isif_config_culling(struct vpfe_isif_device *isif, struct vpfe_isif_cul *cul)
static int isif_get_pix_fmt(u32 mbus_code)
{
switch (mbus_code) {
- case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
return ISIF_PIXFMT_RAW;
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
- case V4L2_MBUS_FMT_YUYV10_2X10:
- case V4L2_MBUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_Y8_1X8:
return ISIF_PIXFMT_YCBCR_8BIT;
- case V4L2_MBUS_FMT_YUYV8_1X16:
- case V4L2_MBUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
return ISIF_PIXFMT_YCBCR_16BIT;
default:
@@ -1121,11 +1121,11 @@ static int isif_config_raw(struct v4l2_subdev *sd, int mode)
ISIF_FRM_FMT_MASK) << ISIF_FRM_FMT_SHIFT) | ((pix_fmt &
ISIF_INPUT_MASK) << ISIF_INPUT_SHIFT);
- /* currently only V4L2_MBUS_FMT_SGRBG12_1X12 is
+ /* currently only MEDIA_BUS_FMT_SGRBG12_1X12 is
* supported. shift appropriately depending on
* different MBUS fmt's added
*/
- if (format->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ if (format->code == MEDIA_BUS_FMT_SGRBG12_1X12)
val |= ((VPFE_ISIF_NO_SHIFT &
ISIF_DATASFT_MASK) << ISIF_DATASFT_SHIFT);
@@ -1154,7 +1154,7 @@ static int isif_config_raw(struct v4l2_subdev *sd, int mode)
/* Configure Gain & Offset */
isif_config_gain_offset(isif);
/* Configure Color pattern */
- if (format->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ if (format->code == MEDIA_BUS_FMT_SGRBG12_1X12)
val = isif_sgrbg_pattern;
else
/* default set to rggb */
@@ -1254,8 +1254,8 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
(((params->vd_pol & ISIF_VD_POL_MASK) << ISIF_VD_POL_SHIFT));
/* pack the data to 8-bit CCDCCFG */
switch (format->code) {
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
pr_debug("Invalid pix_fmt(input mode)\n");
return -EINVAL;
@@ -1266,7 +1266,7 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR;
break;
- case V4L2_MBUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
pr_debug("Invalid pix_fmt(input mode)\n");
return -EINVAL;
@@ -1278,7 +1278,7 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
ISIF_BW656_ENABLE;
break;
- case V4L2_MBUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
pr_debug("Invalid pix_fmt(input mode)\n");
return -EINVAL;
@@ -1286,7 +1286,7 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
break;
- case V4L2_MBUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
ccdcfg |= ISIF_PACK_8BIT;
ccdcfg |= ISIF_YCINSWP_YCBCR;
if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
@@ -1295,7 +1295,7 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
}
break;
- case V4L2_MBUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
pr_debug("Invalid pix_fmt(input mode)\n");
return -EINVAL;
@@ -1313,8 +1313,8 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
ISIF_PIX_ORDER_SHIFT;
isif_write(isif->isif_cfg.base_addr, ccdcfg, CCDCFG);
/* configure video window */
- if (format->code == V4L2_MBUS_FMT_YUYV10_1X20 ||
- format->code == V4L2_MBUS_FMT_YUYV8_1X16)
+ if (format->code == MEDIA_BUS_FMT_YUYV10_1X20 ||
+ format->code == MEDIA_BUS_FMT_YUYV8_1X16)
isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
else
isif_setwin(isif, &params->win, params->frm_fmt, 2, mode);
@@ -1345,17 +1345,17 @@ static int isif_configure(struct v4l2_subdev *sd, int mode)
format = &isif->formats[ISIF_PAD_SINK];
switch (format->code) {
- case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
return isif_config_raw(sd, mode);
- case V4L2_MBUS_FMT_YUYV8_2X8:
- case V4L2_MBUS_FMT_UYVY8_2X8:
- case V4L2_MBUS_FMT_YUYV10_2X10:
- case V4L2_MBUS_FMT_Y8_1X8:
- case V4L2_MBUS_FMT_YUYV8_1X16:
- case V4L2_MBUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
return isif_config_ycbcr(sd, mode);
default:
@@ -1602,6 +1602,7 @@ isif_pad_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_rect *rect;
+
rect = v4l2_subdev_get_try_crop(fh, ISIF_PAD_SINK);
memcpy(&crop->rect, rect, sizeof(*rect));
} else {
@@ -1630,7 +1631,7 @@ isif_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = ISIF_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
format.format.width = MAX_WIDTH;
format.format.height = MAX_HEIGHT;
isif_set_format(sd, fh, &format);
@@ -1638,7 +1639,7 @@ isif_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = ISIF_PAD_SOURCE;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
+ format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
format.format.width = MAX_WIDTH;
format.format.height = MAX_HEIGHT;
isif_set_format(sd, fh, &format);
@@ -1991,7 +1992,7 @@ int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev)
struct media_entity *me = &sd->entity;
static resource_size_t res_len;
struct resource *res;
- void *__iomem addr;
+ void __iomem *addr;
int status;
int i = 0;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.h b/drivers/staging/media/davinci_vpfe/dm365_isif.h
index 473fd2cfe350..89e814e9c0d7 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.h
@@ -159,9 +159,9 @@ struct isif_oper_config {
struct isif_params_raw bayer;
enum isif_data_pack data_pack;
struct isif_gain_values isif_gain_params;
- void *__iomem base_addr;
- void *__iomem linear_tbl0_addr;
- void *__iomem linear_tbl1_addr;
+ void __iomem *base_addr;
+ void __iomem *linear_tbl0_addr;
+ void __iomem *linear_tbl1_addr;
};
#define ISIF_PAD_SINK 0
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 8828d6c2aab1..75e70e14b724 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -35,18 +35,18 @@
#define MIN_OUT_HEIGHT 2
static const unsigned int resizer_input_formats[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_Y8_1X8,
- V4L2_MBUS_FMT_UV8_1X8,
- V4L2_MBUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_UV8_1X8,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
};
static const unsigned int resizer_output_formats[] = {
- V4L2_MBUS_FMT_UYVY8_2X8,
- V4L2_MBUS_FMT_Y8_1X8,
- V4L2_MBUS_FMT_UV8_1X8,
- V4L2_MBUS_FMT_YDYUYDYV8_1X16,
- V4L2_MBUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_UV8_1X8,
+ MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
};
/* resizer_calculate_line_length() - This function calculates the line length of
@@ -54,17 +54,17 @@ static const unsigned int resizer_output_formats[] = {
* output.
*/
static void
-resizer_calculate_line_length(enum v4l2_mbus_pixelcode pix, int width,
- int height, int *line_len, int *line_len_c)
+resizer_calculate_line_length(u32 pix, int width, int height,
+ int *line_len, int *line_len_c)
{
*line_len = 0;
*line_len_c = 0;
- if (pix == V4L2_MBUS_FMT_UYVY8_2X8 ||
- pix == V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (pix == MEDIA_BUS_FMT_UYVY8_2X8 ||
+ pix == MEDIA_BUS_FMT_SGRBG12_1X12) {
*line_len = width << 1;
- } else if (pix == V4L2_MBUS_FMT_Y8_1X8 ||
- pix == V4L2_MBUS_FMT_UV8_1X8) {
+ } else if (pix == MEDIA_BUS_FMT_Y8_1X8 ||
+ pix == MEDIA_BUS_FMT_UV8_1X8) {
*line_len = width;
*line_len_c = width;
} else {
@@ -85,11 +85,11 @@ resizer_validate_output_image_format(struct device *dev,
struct v4l2_mbus_framefmt *format,
int *in_line_len, int *in_line_len_c)
{
- if (format->code != V4L2_MBUS_FMT_UYVY8_2X8 &&
- format->code != V4L2_MBUS_FMT_Y8_1X8 &&
- format->code != V4L2_MBUS_FMT_UV8_1X8 &&
- format->code != V4L2_MBUS_FMT_YDYUYDYV8_1X16 &&
- format->code != V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (format->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
+ format->code != MEDIA_BUS_FMT_Y8_1X8 &&
+ format->code != MEDIA_BUS_FMT_UV8_1X8 &&
+ format->code != MEDIA_BUS_FMT_YDYUYDYV8_1X16 &&
+ format->code != MEDIA_BUS_FMT_SGRBG12_1X12) {
dev_err(dev, "Invalid Mbus format, %d\n", format->code);
return -EINVAL;
}
@@ -149,7 +149,7 @@ configure_resizer_out_params(struct vpfe_resizer_device *resizer, int index,
param->rsz_en[index] = DISABLE;
return;
}
- output = (struct vpfe_rsz_output_spec *)output_spec;
+ output = output_spec;
param->rsz_en[index] = ENABLE;
if (partial) {
param->rsz_rsc_param[index].h_flip = output->h_flip;
@@ -281,7 +281,7 @@ resizer_calculate_sdram_offsets(struct vpfe_resizer_device *resizer, int index)
param->ext_mem_param[index].c_offset = 0;
param->ext_mem_param[index].flip_ofst_y = 0;
param->ext_mem_param[index].flip_ofst_c = 0;
- if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) {
+ if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16) {
/* YUV 420 */
yuv_420 = 1;
bytesperpixel = 1;
@@ -322,7 +322,7 @@ static int resizer_configure_output_win(struct vpfe_resizer_device *resizer)
outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
output_specs.vst_y = param->user_config.vst;
- if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
output_specs.vst_c = param->user_config.vst;
configure_resizer_out_params(resizer, RSZ_A, &output_specs, 0, 0);
@@ -336,7 +336,7 @@ static int resizer_configure_output_win(struct vpfe_resizer_device *resizer)
if (param->rsz_en[RSZ_B])
resizer_calculate_resize_ratios(resizer, RSZ_B);
- if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
resizer_enable_422_420_conversion(param, RSZ_A, ENABLE);
else
resizer_enable_422_420_conversion(param, RSZ_A, DISABLE);
@@ -447,26 +447,26 @@ resizer_configure_common_in_params(struct vpfe_resizer_device *resizer)
param->rsz_common.source = IPIPE_DATA;
switch (informat->code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
param->rsz_common.src_img_fmt = RSZ_IMG_422;
param->rsz_common.raw_flip = 0;
break;
- case V4L2_MBUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
param->rsz_common.src_img_fmt = RSZ_IMG_420;
/* Select y */
param->rsz_common.y_c = 0;
param->rsz_common.raw_flip = 0;
break;
- case V4L2_MBUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
param->rsz_common.src_img_fmt = RSZ_IMG_420;
/* Select y */
param->rsz_common.y_c = 1;
param->rsz_common.raw_flip = 0;
break;
- case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
param->rsz_common.raw_flip = 1;
break;
@@ -519,7 +519,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
configure_resizer_out_params(resizer, RSZ_B,
&cont_config->output2, 0, 1);
- if (outformat2->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ if (outformat2->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
resizer_enable_422_420_conversion(param,
RSZ_B, ENABLE);
else
@@ -540,15 +540,15 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
static inline int
resizer_validate_input_image_format(struct device *dev,
- enum v4l2_mbus_pixelcode pix,
+ u32 pix,
int width, int height, int *line_len)
{
int val;
- if (pix != V4L2_MBUS_FMT_UYVY8_2X8 &&
- pix != V4L2_MBUS_FMT_Y8_1X8 &&
- pix != V4L2_MBUS_FMT_UV8_1X8 &&
- pix != V4L2_MBUS_FMT_SGRBG12_1X12) {
+ if (pix != MEDIA_BUS_FMT_UYVY8_2X8 &&
+ pix != MEDIA_BUS_FMT_Y8_1X8 &&
+ pix != MEDIA_BUS_FMT_UV8_1X8 &&
+ pix != MEDIA_BUS_FMT_SGRBG12_1X12) {
dev_err(dev,
"resizer validate output: pix format not supported, %d\n", pix);
return -EINVAL;
@@ -560,7 +560,7 @@ resizer_validate_input_image_format(struct device *dev,
return -EINVAL;
}
- if (pix == V4L2_MBUS_FMT_UV8_1X8)
+ if (pix == MEDIA_BUS_FMT_UV8_1X8)
resizer_calculate_line_length(pix, width,
height, &val, line_len);
else
@@ -633,7 +633,7 @@ resizer_calculate_normal_f_div_param(struct device *dev, int input_width,
if (!(val % 2)) {
h1 = val;
} else {
- val = (input_width << 7);
+ val = input_width << 7;
val -= rsz >> 1;
val /= rsz << 1;
val <<= 1;
@@ -709,12 +709,12 @@ resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
configure_resizer_out_params(resizer, RSZ_A,
&param->user_config.output1, 0, 1);
- if (outformat1->code == V4L2_MBUS_FMT_SGRBG12_1X12)
+ if (outformat1->code == MEDIA_BUS_FMT_SGRBG12_1X12)
param->rsz_common.raw_flip = 1;
else
param->rsz_common.raw_flip = 0;
- if (outformat1->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ if (outformat1->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
resizer_enable_422_420_conversion(param,
RSZ_A, ENABLE);
else
@@ -732,7 +732,7 @@ resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
configure_resizer_out_params(resizer, RSZ_B,
&param->user_config.output2, 0, 1);
- if (outformat2->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16)
+ if (outformat2->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
resizer_enable_422_420_conversion(param,
RSZ_B, ENABLE);
else
@@ -745,7 +745,7 @@ resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
resizer_calculate_resize_ratios(resizer, RSZ_A);
resizer_calculate_sdram_offsets(resizer, RSZ_A);
/* Overriding resize ratio calculation */
- if (informat->code == V4L2_MBUS_FMT_UV8_1X8) {
+ if (informat->code == MEDIA_BUS_FMT_UV8_1X8) {
param->rsz_rsc_param[RSZ_A].v_dif =
(((informat->height + 1) * 2) * 256) /
(param->rsz_rsc_param[RSZ_A].o_vsz + 1);
@@ -756,7 +756,7 @@ resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
resizer_calculate_resize_ratios(resizer, RSZ_B);
resizer_calculate_sdram_offsets(resizer, RSZ_B);
/* Overriding resize ratio calculation */
- if (informat->code == V4L2_MBUS_FMT_UV8_1X8) {
+ if (informat->code == MEDIA_BUS_FMT_UV8_1X8) {
param->rsz_rsc_param[RSZ_B].v_dif =
(((informat->height + 1) * 2) * 256) /
(param->rsz_rsc_param[RSZ_B].o_vsz + 1);
@@ -1218,12 +1218,12 @@ static long resizer_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
switch (cmd) {
case VIDIOC_VPFE_RSZ_S_CONFIG:
- user_config = (struct vpfe_rsz_config *)arg;
+ user_config = arg;
ret = resizer_set_configuration(resizer, user_config);
break;
case VIDIOC_VPFE_RSZ_G_CONFIG:
- user_config = (struct vpfe_rsz_config *)arg;
+ user_config = arg;
if (!user_config->config) {
dev_err(dev, "error in VIDIOC_VPFE_RSZ_G_CONFIG\n");
return -EINVAL;
@@ -1340,7 +1340,7 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
}
/* If not found, use UYVY as default */
if (i >= ARRAY_SIZE(resizer_input_formats))
- fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
MAX_IN_WIDTH);
@@ -1357,7 +1357,7 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
}
/* If not found, use UYVY as default */
if (i >= ARRAY_SIZE(resizer_output_formats))
- fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
max_out_width);
@@ -1375,7 +1375,7 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
}
/* If not found, use UYVY as default */
if (i >= ARRAY_SIZE(resizer_output_formats))
- fmt->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
max_out_width);
@@ -1548,7 +1548,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESIZER_CROP_PAD_SINK;
format.which = which;
- format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_HEIGHT;
resizer_set_format(sd, fh, &format);
@@ -1556,7 +1556,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESIZER_CROP_PAD_SOURCE;
format.which = which;
- format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_WIDTH;
resizer_set_format(sd, fh, &format);
@@ -1564,7 +1564,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESIZER_CROP_PAD_SOURCE2;
format.which = which;
- format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_WIDTH;
resizer_set_format(sd, fh, &format);
@@ -1572,7 +1572,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SINK;
format.which = which;
- format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_HEIGHT;
resizer_set_format(sd, fh, &format);
@@ -1580,7 +1580,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SOURCE;
format.which = which;
- format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
resizer_set_format(sd, fh, &format);
@@ -1588,7 +1588,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SINK;
format.which = which;
- format.format.code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
format.format.width = MAX_IN_WIDTH;
format.format.height = MAX_IN_HEIGHT;
resizer_set_format(sd, fh, &format);
@@ -1596,7 +1596,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SOURCE;
format.which = which;
- format.format.code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
format.format.width = IPIPE_MAX_OUTPUT_WIDTH_B;
format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_B;
resizer_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
index 59a79422b914..93b0f44030aa 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
@@ -228,7 +228,7 @@ struct vpfe_resizer_device {
struct dm365_resizer_device resizer_a;
struct dm365_resizer_device resizer_b;
struct resizer_params config;
- void *__iomem base_addr;
+ void __iomem *base_addr;
};
int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index a862b28092e4..a350a20955f1 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -99,47 +99,47 @@ void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
struct v4l2_pix_format *pix)
{
switch (mbus->code) {
- case V4L2_MBUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
pix->pixelformat = V4L2_PIX_FMT_UYVY;
pix->bytesperline = pix->width * 2;
break;
- case V4L2_MBUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
pix->pixelformat = V4L2_PIX_FMT_YUYV;
pix->bytesperline = pix->width * 2;
break;
- case V4L2_MBUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
pix->pixelformat = V4L2_PIX_FMT_UYVY;
pix->bytesperline = pix->width * 2;
break;
- case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
pix->pixelformat = V4L2_PIX_FMT_SBGGR16;
pix->bytesperline = pix->width * 2;
break;
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
pix->pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8;
pix->bytesperline = pix->width;
break;
- case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
pix->pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8;
pix->bytesperline = pix->width;
break;
- case V4L2_MBUS_FMT_YDYUYDYV8_1X16:
+ case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
pix->pixelformat = V4L2_PIX_FMT_NV12;
pix->bytesperline = pix->width;
break;
- case V4L2_MBUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
pix->pixelformat = V4L2_PIX_FMT_GREY;
pix->bytesperline = pix->width;
break;
- case V4L2_MBUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
pix->pixelformat = V4L2_PIX_FMT_UV8;
pix->bytesperline = pix->width;
break;
@@ -707,7 +707,6 @@ static int vpfe_remove(struct platform_device *pdev)
static struct platform_driver vpfe_driver = {
.driver = {
.name = CAPTURE_DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = vpfe_probe,
.remove = vpfe_remove,
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 6f9171c39bdc..06d48d5eb0a0 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -600,11 +600,11 @@ static int vpfe_querycap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
else
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- cap->device_caps = cap->capabilities;
- cap->version = VPFE_CAPTURE_VERSION_CODE;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 40580228a6c7..293ffda503e0 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -512,10 +512,9 @@ dt3155_ioc_querycap(struct file *filp, void *p, struct v4l2_capability *cap)
strcpy(cap->driver, DT3155_NAME);
strcpy(cap->card, DT3155_NAME " frame grabber");
sprintf(cap->bus_info, "PCI:%s", pci_name(pd->pdev));
- cap->version =
- KERNEL_VERSION(DT3155_VER_MAJ, DT3155_VER_MIN, DT3155_VER_EXT);
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
DT3155_CAPTURE_METHOD;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
index e60a59fc252b..6879c4651b46 100644
--- a/drivers/staging/media/lirc/Kconfig
+++ b/drivers/staging/media/lirc/Kconfig
@@ -18,12 +18,6 @@ config LIRC_BT829
help
Driver for the IR interface on BT829-based hardware
-config LIRC_IGORPLUGUSB
- tristate "Igor Cesko's USB IR Receiver"
- depends on LIRC && USB
- help
- Driver for Igor Cesko's USB IR Receiver
-
config LIRC_IMON
tristate "Legacy SoundGraph iMON Receiver and Display"
depends on LIRC && USB
diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile
index b90fcabddab6..5430adf0475d 100644
--- a/drivers/staging/media/lirc/Makefile
+++ b/drivers/staging/media/lirc/Makefile
@@ -4,7 +4,6 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o
-obj-$(CONFIG_LIRC_IGORPLUGUSB) += lirc_igorplugusb.o
obj-$(CONFIG_LIRC_IMON) += lirc_imon.o
obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o
obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index 4c806ba41323..44f565547179 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -56,11 +56,6 @@ static unsigned char do_get_bits(void);
#define DRIVER_NAME "lirc_bt829"
static bool debug;
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG DRIVER_NAME ": "fmt, ## args); \
- } while (0)
static int atir_minor;
static phys_addr_t pci_addr_phys;
@@ -101,7 +96,7 @@ static int atir_add_to_buf(void *data, struct lirc_buffer *buf)
status = poll_main();
key = (status >> 8) & 0xFF;
if (status & 0xFF) {
- dprintk("reading key %02X\n", key);
+ dev_dbg(atir_driver.dev, "reading key %02X\n", key);
lirc_buffer_write(buf, &key);
return 0;
}
@@ -110,13 +105,13 @@ static int atir_add_to_buf(void *data, struct lirc_buffer *buf)
static int atir_set_use_inc(void *data)
{
- dprintk("driver is opened\n");
+ dev_dbg(atir_driver.dev, "driver is opened\n");
return 0;
}
static void atir_set_use_dec(void *data)
{
- dprintk("driver is closed\n");
+ dev_dbg(atir_driver.dev, "driver is closed\n");
}
int init_module(void)
@@ -154,7 +149,8 @@ int init_module(void)
rc = atir_minor;
goto err_unmap;
}
- dprintk("driver is registered on minor %d\n", atir_minor);
+ dev_dbg(atir_driver.dev, "driver is registered on minor %d\n",
+ atir_minor);
return 0;
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
deleted file mode 100644
index 431d1e86ebf9..000000000000
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * lirc_igorplugusb - USB remote support for LIRC
- *
- * Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware.
- * See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm
- *
- * The device can only record bursts of up to 36 pulses/spaces.
- * Works fine with RC5. Longer commands lead to device buffer overrun.
- * (Maybe a better firmware or a microcontroller with more ram can help?)
- *
- * Version 0.1 [beta status]
- *
- * Copyright (C) 2004 Jan M. Hochstein
- * <hochstein@algo.informatik.tu-darmstadt.de>
- *
- * This driver was derived from:
- * Paul Miller <pmiller9@users.sourceforge.net>
- * "lirc_atiusb" module
- * Vladimir Dergachev <volodya@minspring.com>'s 2002
- * "USB ATI Remote support" (input device)
- * Adrian Dewhurst <sailor-lk@sailorfrag.net>'s 2002
- * "USB StreamZap remote driver" (LIRC)
- * Artur Lipowski <alipowski@kki.net.pl>'s 2002
- * "lirc_dev" and "lirc_gpio" LIRC modules
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/kmod.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/usb.h>
-#include <linux/time.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-
-/* module identification */
-#define DRIVER_VERSION "0.2"
-#define DRIVER_AUTHOR \
- "Jan M. Hochstein <hochstein@algo.informatik.tu-darmstadt.de>"
-#define DRIVER_DESC "Igorplug USB remote driver for LIRC"
-#define DRIVER_NAME "lirc_igorplugusb"
-
-/* One mode2 pulse/space has 4 bytes. */
-#define CODE_LENGTH sizeof(int)
-
-/* Igor's firmware cannot record bursts longer than 36. */
-#define DEVICE_BUFLEN 36
-
-/*
- * Header at the beginning of the device's buffer:
- * unsigned char data_length
- * unsigned char data_start (!=0 means ring-buffer overrun)
- * unsigned char counter (incremented by each burst)
- */
-#define DEVICE_HEADERLEN 3
-
-/* This is for the gap */
-#define ADDITIONAL_LIRC_BYTES 2
-
-/* times to poll per second */
-#define SAMPLE_RATE 100
-static int sample_rate = SAMPLE_RATE;
-
-
-/**** Igor's USB Request Codes */
-
-#define SET_INFRABUFFER_EMPTY 1
-/**
- * Params: none
- * Answer: empty
- */
-
-#define GET_INFRACODE 2
-/**
- * Params:
- * wValue: offset to begin reading infra buffer
- *
- * Answer: infra data
- */
-
-#define SET_DATAPORT_DIRECTION 3
-/**
- * Params:
- * wValue: (byte) 1 bit for each data port pin (0=in, 1=out)
- *
- * Answer: empty
- */
-
-#define GET_DATAPORT_DIRECTION 4
-/**
- * Params: none
- *
- * Answer: (byte) 1 bit for each data port pin (0=in, 1=out)
- */
-
-#define SET_OUT_DATAPORT 5
-/**
- * Params:
- * wValue: byte to write to output data port
- *
- * Answer: empty
- */
-
-#define GET_OUT_DATAPORT 6
-/**
- * Params: none
- *
- * Answer: least significant 3 bits read from output data port
- */
-
-#define GET_IN_DATAPORT 7
-/**
- * Params: none
- *
- * Answer: least significant 3 bits read from input data port
- */
-
-#define READ_EEPROM 8
-/**
- * Params:
- * wValue: offset to begin reading EEPROM
- *
- * Answer: EEPROM bytes
- */
-
-#define WRITE_EEPROM 9
-/**
- * Params:
- * wValue: offset to EEPROM byte
- * wIndex: byte to write
- *
- * Answer: empty
- */
-
-#define SEND_RS232 10
-/**
- * Params:
- * wValue: byte to send
- *
- * Answer: empty
- */
-
-#define RECV_RS232 11
-/**
- * Params: none
- *
- * Answer: byte received
- */
-
-#define SET_RS232_BAUD 12
-/**
- * Params:
- * wValue: byte to write to UART bit rate register (UBRR)
- *
- * Answer: empty
- */
-
-#define GET_RS232_BAUD 13
-/**
- * Params: none
- *
- * Answer: byte read from UART bit rate register (UBRR)
- */
-
-
-/* data structure for each usb remote */
-struct igorplug {
-
- /* usb */
- struct usb_device *usbdev;
- int devnum;
-
- unsigned char *buf_in;
- unsigned int len_in;
- int in_space;
- struct timeval last_time;
-
- dma_addr_t dma_in;
-
- /* lirc */
- struct lirc_driver *d;
-
- /* handle sending (init strings) */
- int send_flags;
-};
-
-static int unregister_from_lirc(struct igorplug *ir)
-{
- struct lirc_driver *d;
- int devnum;
-
- devnum = ir->devnum;
- d = ir->d;
-
- if (!d) {
- dev_err(&ir->usbdev->dev,
- "%s: called with NULL lirc driver struct!\n", __func__);
- return -EINVAL;
- }
-
- dev_dbg(&ir->usbdev->dev, "calling lirc_unregister_driver\n");
- lirc_unregister_driver(d->minor);
-
- return devnum;
-}
-
-static int set_use_inc(void *data)
-{
- struct igorplug *ir = data;
-
- if (!ir) {
- printk(DRIVER_NAME "[?]: set_use_inc called with no context\n");
- return -EIO;
- }
-
- dev_dbg(&ir->usbdev->dev, "set use inc\n");
-
- if (!ir->usbdev)
- return -ENODEV;
-
- return 0;
-}
-
-static void set_use_dec(void *data)
-{
- struct igorplug *ir = data;
-
- if (!ir) {
- printk(DRIVER_NAME "[?]: set_use_dec called with no context\n");
- return;
- }
-
- dev_dbg(&ir->usbdev->dev, "set use dec\n");
-}
-
-static void send_fragment(struct igorplug *ir, struct lirc_buffer *buf,
- int i, int max)
-{
- int code;
-
- /* MODE2: pulse/space (PULSE_BIT) in 1us units */
- while (i < max) {
- /* 1 Igor-tick = 85.333333 us */
- code = (unsigned int)ir->buf_in[i] * 85 +
- (unsigned int)ir->buf_in[i] / 3;
- ir->last_time.tv_usec += code;
- if (ir->in_space)
- code |= PULSE_BIT;
- lirc_buffer_write(buf, (unsigned char *)&code);
- /* 1 chunk = CODE_LENGTH bytes */
- ir->in_space ^= 1;
- ++i;
- }
-}
-
-/**
- * Called in user context.
- * return 0 if data was added to the buffer and
- * -ENODATA if none was available. This should add some number of bits
- * evenly divisible by code_length to the buffer
- */
-static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
-{
- int ret;
- struct igorplug *ir = (struct igorplug *)data;
-
- if (!ir || !ir->usbdev) /* Has the device been removed? */
- return -ENODEV;
-
- memset(ir->buf_in, 0, ir->len_in);
-
- ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- GET_INFRACODE, USB_TYPE_VENDOR | USB_DIR_IN,
- 0/* offset */, /*unused*/0,
- ir->buf_in, ir->len_in,
- /*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
- if (ret > 0) {
- int code, timediff;
- struct timeval now;
-
- /* ACK packet has 1 byte --> ignore */
- if (ret < DEVICE_HEADERLEN)
- return -ENODATA;
-
- dev_dbg(&ir->usbdev->dev, "Got %d bytes. Header: %*ph\n",
- ret, 3, ir->buf_in);
-
- do_gettimeofday(&now);
- timediff = now.tv_sec - ir->last_time.tv_sec;
- if (timediff + 1 > PULSE_MASK / 1000000)
- timediff = PULSE_MASK;
- else {
- timediff *= 1000000;
- timediff += now.tv_usec - ir->last_time.tv_usec;
- }
- ir->last_time.tv_sec = now.tv_sec;
- ir->last_time.tv_usec = now.tv_usec;
-
- /* create leading gap */
- code = timediff;
- lirc_buffer_write(buf, (unsigned char *)&code);
- ir->in_space = 1; /* next comes a pulse */
-
- if (ir->buf_in[2] == 0)
- send_fragment(ir, buf, DEVICE_HEADERLEN, ret);
- else {
- dev_warn(&ir->usbdev->dev,
- "[%d]: Device buffer overrun.\n", ir->devnum);
- /* HHHNNNNNNNNNNNOOOOOOOO H = header
- <---[2]---> N = newer
- <---------ret--------> O = older */
- ir->buf_in[2] %= ret - DEVICE_HEADERLEN; /* sanitize */
- /* keep even-ness to not desync pulse/pause */
- send_fragment(ir, buf, DEVICE_HEADERLEN +
- ir->buf_in[2] - (ir->buf_in[2] & 1), ret);
- send_fragment(ir, buf, DEVICE_HEADERLEN,
- DEVICE_HEADERLEN + ir->buf_in[2]);
- }
-
- ret = usb_control_msg(
- ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN,
- /*unused*/0, /*unused*/0,
- /*dummy*/ir->buf_in, /*dummy*/ir->len_in,
- /*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
- if (ret < 0)
- printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n",
- ir->devnum, ret);
- return 0;
- } else if (ret < 0)
- printk(DRIVER_NAME "[%d]: GET_INFRACODE: error %d\n",
- ir->devnum, ret);
-
- return -ENODATA;
-}
-
-static int igorplugusb_remote_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *dev;
- struct usb_host_interface *idesc = NULL;
- struct usb_endpoint_descriptor *ep;
- struct igorplug *ir = NULL;
- struct lirc_driver *driver = NULL;
- int devnum, pipe, maxp;
- char buf[63], name[128] = "";
- int ret;
-
- dev_dbg(&intf->dev, "%s: usb probe called.\n", __func__);
-
- dev = interface_to_usbdev(intf);
-
- idesc = intf->cur_altsetting;
-
- if (idesc->desc.bNumEndpoints != 1)
- return -ENODEV;
-
- ep = &idesc->endpoint->desc;
- if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
- != USB_DIR_IN)
- || (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- != USB_ENDPOINT_XFER_CONTROL)
- return -ENODEV;
-
- pipe = usb_rcvctrlpipe(dev, ep->bEndpointAddress);
- devnum = dev->devnum;
- maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
-
- dev_dbg(&intf->dev, "%s: bytes_in_key=%zu maxp=%d\n",
- __func__, CODE_LENGTH, maxp);
-
- ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL);
- if (!ir)
- return -ENOMEM;
-
- driver = devm_kzalloc(&intf->dev, sizeof(*driver), GFP_KERNEL);
- if (!driver)
- return -ENOMEM;
-
- ir->buf_in = usb_alloc_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
- GFP_ATOMIC, &ir->dma_in);
- if (!ir->buf_in)
- return -ENOMEM;
-
- strcpy(driver->name, DRIVER_NAME " ");
- driver->minor = -1;
- driver->code_length = CODE_LENGTH * 8; /* in bits */
- driver->features = LIRC_CAN_REC_MODE2;
- driver->data = ir;
- driver->chunk_size = CODE_LENGTH;
- driver->buffer_size = DEVICE_BUFLEN + ADDITIONAL_LIRC_BYTES;
- driver->set_use_inc = &set_use_inc;
- driver->set_use_dec = &set_use_dec;
- driver->sample_rate = sample_rate; /* per second */
- driver->add_to_buf = &igorplugusb_remote_poll;
- driver->dev = &intf->dev;
- driver->owner = THIS_MODULE;
-
- ret = lirc_register_driver(driver);
- if (ret < 0) {
- usb_free_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
- ir->buf_in, ir->dma_in);
- return ret;
- }
-
- driver->minor = ret;
- ir->d = driver;
- ir->devnum = devnum;
- ir->usbdev = dev;
- ir->len_in = DEVICE_BUFLEN + DEVICE_HEADERLEN;
- ir->in_space = 1; /* First mode2 event is a space. */
- do_gettimeofday(&ir->last_time);
-
- if (dev->descriptor.iManufacturer
- && usb_string(dev, dev->descriptor.iManufacturer,
- buf, sizeof(buf)) > 0)
- strlcpy(name, buf, sizeof(name));
- if (dev->descriptor.iProduct
- && usb_string(dev, dev->descriptor.iProduct, buf, sizeof(buf)) > 0)
- snprintf(name + strlen(name), sizeof(name) - strlen(name),
- " %s", buf);
- printk(DRIVER_NAME "[%d]: %s on usb%d:%d\n", devnum, name,
- dev->bus->busnum, devnum);
-
- /* clear device buffer */
- ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN,
- /*unused*/0, /*unused*/0,
- /*dummy*/ir->buf_in, /*dummy*/ir->len_in,
- /*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
- if (ret < 0)
- printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n",
- devnum, ret);
-
- usb_set_intfdata(intf, ir);
- return 0;
-}
-
-static void igorplugusb_remote_disconnect(struct usb_interface *intf)
-{
- struct usb_device *usbdev = interface_to_usbdev(intf);
- struct igorplug *ir = usb_get_intfdata(intf);
- struct device *dev = &intf->dev;
- int devnum;
-
- usb_set_intfdata(intf, NULL);
-
- if (!ir || !ir->d)
- return;
-
- ir->usbdev = NULL;
-
- usb_free_coherent(usbdev, ir->len_in, ir->buf_in, ir->dma_in);
-
- devnum = unregister_from_lirc(ir);
-
- dev_info(dev, DRIVER_NAME "[%d]: %s done\n", devnum, __func__);
-}
-
-static struct usb_device_id igorplugusb_remote_id_table[] = {
- /* Igor Plug USB (Atmel's Manufact. ID) */
- { USB_DEVICE(0x03eb, 0x0002) },
- /* Fit PC2 Infrared Adapter */
- { USB_DEVICE(0x03eb, 0x21fe) },
-
- /* Terminating entry */
- { }
-};
-
-static struct usb_driver igorplugusb_remote_driver = {
- .name = DRIVER_NAME,
- .probe = igorplugusb_remote_probe,
- .disconnect = igorplugusb_remote_disconnect,
- .id_table = igorplugusb_remote_id_table
-};
-
-module_usb_driver(igorplugusb_remote_driver);
-
-#include <linux/vermagic.h>
-MODULE_INFO(vermagic, VERMAGIC_STRING);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(usb, igorplugusb_remote_id_table);
-
-module_param(sample_rate, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(sample_rate, "Sampling rate in Hz (default: 100)");
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 7aca44f28c5a..9ce7d9990e3e 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -495,7 +495,7 @@ static int ir_open(void *data)
/* prevent races with disconnect */
mutex_lock(&driver_lock);
- context = (struct imon_context *)data;
+ context = data;
/* initial IR protocol decode variables */
context->rx.count = 0;
@@ -516,7 +516,7 @@ static void ir_close(void *data)
{
struct imon_context *context;
- context = (struct imon_context *)data;
+ context = data;
if (!context) {
pr_err("%s: no context for device\n", __func__);
return;
@@ -572,29 +572,6 @@ static void submit_data(struct imon_context *context)
wake_up(&context->driver->rbuf->wait_poll);
}
-static inline int tv2int(const struct timeval *a, const struct timeval *b)
-{
- int usecs = 0;
- int sec = 0;
-
- if (b->tv_usec > a->tv_usec) {
- usecs = 1000000;
- sec--;
- }
-
- usecs += a->tv_usec - b->tv_usec;
-
- sec += a->tv_sec - b->tv_sec;
- sec *= 1000;
- usecs /= 1000;
- sec += usecs;
-
- if (sec < 0)
- sec = 1000;
-
- return sec;
-}
-
/**
* Process the incoming packet
*/
@@ -606,7 +583,6 @@ static void imon_incoming_packet(struct imon_context *context,
struct device *dev = context->driver->dev;
int octet, bit;
unsigned char mask;
- int i;
/*
* just bail out if no listening IR client
@@ -620,13 +596,8 @@ static void imon_incoming_packet(struct imon_context *context,
return;
}
- if (debug) {
- dev_info(dev, "raw packet: ");
- for (i = 0; i < len; ++i)
- printk("%02x ", buf[i]);
- printk("\n");
- }
-
+ if (debug)
+ dev_info(dev, "raw packet: %*ph\n", len, buf);
/*
* Translate received data to pulse and space lengths.
* Received data is active low, i.e. pulses are 0 and
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 5156c2181016..19c5c21babf5 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -605,7 +605,6 @@ static struct platform_driver lirc_parallel_driver = {
.resume = lirc_parallel_resume,
.driver = {
.name = LIRC_DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index c20ef56202bf..4a268200cbf5 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -488,7 +488,7 @@ static int ir_open(void *data)
/* prevent races with disconnect */
mutex_lock(&disconnect_lock);
- context = (struct sasem_context *) data;
+ context = data;
mutex_lock(&context->ctx_lock);
@@ -530,7 +530,7 @@ static void ir_close(void *data)
{
struct sasem_context *context;
- context = (struct sasem_context *)data;
+ context = data;
if (!context) {
pr_err("%s: no context for device\n", __func__);
return;
@@ -573,7 +573,6 @@ static void incoming_packet(struct sasem_context *context,
unsigned char *buf = urb->transfer_buffer;
long ms;
struct timeval tv;
- int i;
if (len != 8) {
dev_warn(&context->dev->dev,
@@ -582,13 +581,8 @@ static void incoming_packet(struct sasem_context *context,
return;
}
- if (debug) {
- printk(KERN_INFO "Incoming data: ");
- for (i = 0; i < 8; ++i)
- printk(KERN_CONT "%02x ", buf[i]);
- printk(KERN_CONT "\n");
- }
-
+ if (debug)
+ dev_info(&context->dev->dev, "Incoming data: %*ph\n", len, buf);
/*
* Lirc could deal with the repeat code, but we really need to block it
* if it arrives too late. Otherwise we could repeat the wrong code.
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 181b92bfe25d..eb4ccb8d2a93 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -1060,7 +1060,6 @@ static struct platform_driver lirc_serial_driver = {
.resume = lirc_serial_resume,
.driver = {
.name = "lirc_serial",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index 2ee55eaf2a53..39f4733fb1ee 100644
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
@@ -140,12 +140,6 @@ static int rx_buf[RBUF_LEN];
static unsigned int rx_tail, rx_head;
static bool debug;
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG LIRC_DRIVER_NAME ": " \
- fmt, ## args); \
- } while (0)
/* SECTION: Prototypes */
@@ -322,7 +316,7 @@ static void add_read_queue(int flag, unsigned long val)
unsigned int new_rx_tail;
int newval;
- dprintk("add flag %d with val %lu\n", flag, val);
+ pr_debug("add flag %d with val %lu\n", flag, val);
newval = val & PULSE_MASK;
@@ -342,7 +336,7 @@ static void add_read_queue(int flag, unsigned long val)
}
new_rx_tail = (rx_tail + 1) & (RBUF_LEN - 1);
if (new_rx_tail == rx_head) {
- dprintk("Buffer overrun.\n");
+ pr_debug("Buffer overrun.\n");
return;
}
rx_buf[rx_tail] = newval;
@@ -439,7 +433,8 @@ static void sir_timeout(unsigned long data)
outb(UART_FCR_CLEAR_RCVR, io + UART_FCR);
/* determine 'virtual' pulse end: */
pulse_end = delta(&last_tv, &last_intr_tv);
- dprintk("timeout add %d for %lu usec\n", last_value, pulse_end);
+ dev_dbg(driver.dev, "timeout add %d for %lu usec\n",
+ last_value, pulse_end);
add_read_queue(last_value, pulse_end);
last_value = 0;
last_tv = last_intr_tv;
@@ -479,14 +474,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
do_gettimeofday(&curr_tv);
deltv = delta(&last_tv, &curr_tv);
deltintrtv = delta(&last_intr_tv, &curr_tv);
- dprintk("t %lu, d %d\n", deltintrtv, (int)data);
+ dev_dbg(driver.dev, "t %lu, d %d\n",
+ deltintrtv, (int)data);
/*
* if nothing came in last X cycles,
* it was gap
*/
if (deltintrtv > TIME_CONST * threshold) {
if (last_value) {
- dprintk("GAP\n");
+ dev_dbg(driver.dev, "GAP\n");
/* simulate signal change */
add_read_queue(last_value,
deltv -
@@ -931,7 +927,6 @@ static struct platform_driver lirc_sir_driver = {
.remove = lirc_sir_remove,
.driver = {
.name = "lirc_sir",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 567feba0011c..cc872fb4ca68 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -152,23 +152,12 @@ struct tx_data_struct {
static struct tx_data_struct *tx_data;
static struct mutex tx_data_lock;
-#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
- ## args)
-#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
static bool debug; /* debug output */
static bool tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
-#define dprintk(fmt, args...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \
- ## args); \
- } while (0)
-
/* struct IR reference counting */
static struct IR *get_ir_device(struct IR *ir, bool ir_devices_lock_held)
@@ -199,7 +188,7 @@ static void release_ir_device(struct kref *ref)
lirc_unregister_driver(ir->l.minor);
ir->l.minor = MAX_IRCTL_DEVICES;
}
- if (ir->rbuf.fifo_initialized)
+ if (kfifo_initialized(&ir->rbuf.fifo))
lirc_buffer_free(&ir->rbuf);
list_del(&ir->list);
kfree(ir);
@@ -333,7 +322,7 @@ static int add_to_buf(struct IR *ir)
struct IR_tx *tx;
if (lirc_buffer_full(rbuf)) {
- dprintk("buffer overflow\n");
+ dev_dbg(ir->l.dev, "buffer overflow\n");
return -EOVERFLOW;
}
@@ -379,16 +368,17 @@ static int add_to_buf(struct IR *ir)
*/
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(ir->l.dev, "i2c_master_send failed with %d\n",
+ ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
- zilog_error("unable to read from the IR chip "
+ dev_err(ir->l.dev, "unable to read from the IR chip "
"after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
- zilog_error("polling the IR receiver chip failed, "
+ dev_err(ir->l.dev, "polling the IR receiver chip failed, "
"trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -415,13 +405,14 @@ static int add_to_buf(struct IR *ir)
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
- zilog_error("i2c_master_recv failed with %d -- "
+ dev_err(ir->l.dev, "i2c_master_recv failed with %d -- "
"keeping last read buffer\n", ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
- dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
+ dev_dbg(ir->l.dev, "key (0x%02x/0x%02x)\n",
+ rx->b[0], rx->b[1]);
}
/* key pressed ? */
@@ -472,7 +463,7 @@ static int lirc_thread(void *arg)
struct IR *ir = arg;
struct lirc_buffer *rbuf = ir->l.rbuf;
- dprintk("poll thread started\n");
+ dev_dbg(ir->l.dev, "poll thread started\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -500,7 +491,7 @@ static int lirc_thread(void *arg)
wake_up_interruptible(&rbuf->wait_poll);
}
- dprintk("poll thread ended\n");
+ dev_dbg(ir->l.dev, "poll thread ended\n");
return 0;
}
@@ -644,7 +635,7 @@ static int get_key_data(unsigned char *buf,
return -EPROTO;
corrupt:
- zilog_error("firmware is corrupt\n");
+ pr_err("firmware is corrupt\n");
return -EFAULT;
}
@@ -662,10 +653,11 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
buf[0] = (unsigned char)(i + 1);
for (j = 0; j < tosend; ++j)
buf[1 + j] = data_block[i + j];
- dprintk("%*ph", 5, buf);
+ dev_dbg(tx->ir->l.dev, "%*ph", 5, buf);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n",
+ ret);
return ret < 0 ? ret : -EFAULT;
}
i += tosend;
@@ -689,7 +681,7 @@ static int send_boot_data(struct IR_tx *tx)
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -706,21 +698,22 @@ static int send_boot_data(struct IR_tx *tx)
}
if (ret != 1) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
- zilog_error("i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
return 0;
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
- zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
+ dev_err(tx->ir->l.dev, "unexpected IR TX init response: %02x\n",
+ buf[0]);
return 0;
}
- zilog_notify("Zilog/Hauppauge IR blaster firmware version "
+ dev_notice(tx->ir->l.dev, "Zilog/Hauppauge IR blaster firmware version "
"%d.%d.%d loaded\n", buf[1], buf[2], buf[3]);
return 0;
@@ -730,15 +723,13 @@ static int send_boot_data(struct IR_tx *tx)
static void fw_unload_locked(void)
{
if (tx_data) {
- if (tx_data->code_sets)
- vfree(tx_data->code_sets);
+ vfree(tx_data->code_sets);
- if (tx_data->datap)
- vfree(tx_data->datap);
+ vfree(tx_data->datap);
vfree(tx_data);
tx_data = NULL;
- dprintk("successfully unloaded IR blaster firmware\n");
+ pr_debug("successfully unloaded IR blaster firmware\n");
}
}
@@ -768,17 +759,16 @@ static int fw_load(struct IR_tx *tx)
/* Request codeset data file */
ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
if (ret != 0) {
- zilog_error("firmware haup-ir-blaster.bin not available (%d)\n",
+ dev_err(tx->ir->l.dev, "firmware haup-ir-blaster.bin not available (%d)\n",
ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
- dprintk("firmware of size %zu loaded\n", fw_entry->size);
+ dev_dbg(tx->ir->l.dev, "firmware of size %zu loaded\n", fw_entry->size);
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
if (tx_data == NULL) {
- zilog_error("out of memory\n");
release_firmware(fw_entry);
ret = -ENOMEM;
goto out;
@@ -788,7 +778,6 @@ static int fw_load(struct IR_tx *tx)
/* Copy the data so hotplug doesn't get confused and timeout */
tx_data->datap = vmalloc(fw_entry->size);
if (tx_data->datap == NULL) {
- zilog_error("out of memory\n");
release_firmware(fw_entry);
vfree(tx_data);
ret = -ENOMEM;
@@ -803,7 +792,7 @@ static int fw_load(struct IR_tx *tx)
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
- zilog_error("unsupported code set file version (%u, expected"
+ dev_err(tx->ir->l.dev, "unsupported code set file version (%u, expected"
"1) -- please upgrade to a newer driver",
version);
fw_unload_locked();
@@ -820,7 +809,8 @@ static int fw_load(struct IR_tx *tx)
&tx_data->num_code_sets))
goto corrupt;
- dprintk("%u IR blaster codesets loaded\n", tx_data->num_code_sets);
+ dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n",
+ tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
tx_data->num_code_sets * sizeof(char *));
@@ -884,7 +874,7 @@ static int fw_load(struct IR_tx *tx)
goto out;
corrupt:
- zilog_error("firmware is corrupt\n");
+ dev_err(tx->ir->l.dev, "firmware is corrupt\n");
fw_unload_locked();
ret = -EFAULT;
@@ -904,9 +894,9 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
unsigned int m;
DECLARE_WAITQUEUE(wait, current);
- dprintk("read called\n");
+ dev_dbg(ir->l.dev, "read called\n");
if (n % rbuf->chunk_size) {
- dprintk("read result = -EINVAL\n");
+ dev_dbg(ir->l.dev, "read result = -EINVAL\n");
return -EINVAL;
}
@@ -950,7 +940,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
unsigned char buf[MAX_XFER_SIZE];
if (rbuf->chunk_size > sizeof(buf)) {
- zilog_error("chunk_size is too big (%d)!\n",
+ dev_err(ir->l.dev, "chunk_size is too big (%d)!\n",
rbuf->chunk_size);
ret = -EINVAL;
break;
@@ -964,7 +954,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
retries++;
}
if (retries >= 5) {
- zilog_error("Buffer read failed!\n");
+ dev_err(ir->l.dev, "Buffer read failed!\n");
ret = -EIO;
}
}
@@ -974,7 +964,8 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
- dprintk("read result = %d (%s)\n", ret, ret ? "Error" : "OK");
+ dev_dbg(ir->l.dev, "read result = %d (%s)\n",
+ ret, ret ? "Error" : "OK");
return ret ? ret : written;
}
@@ -990,7 +981,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
- zilog_error("failed to get data for code %u, key %u -- check "
+ dev_err(tx->ir->l.dev, "failed to get data for code %u, key %u -- check "
"lircd.conf entries\n", code, key);
return ret;
} else if (ret != 0)
@@ -1006,7 +997,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x40;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1019,18 +1010,18 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
}
if (ret != 1) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
- zilog_error("i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
if (buf[0] != 0xA0) {
- zilog_error("unexpected IR TX response #1: %02x\n",
+ dev_err(tx->ir->l.dev, "unexpected IR TX response #1: %02x\n",
buf[0]);
return -EFAULT;
}
@@ -1040,7 +1031,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x80;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- zilog_error("i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1050,7 +1041,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
* going to skip this whole mess and say we're done on the HD PVR
*/
if (!tx->post_tx_ready_poll) {
- dprintk("sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1066,11 +1057,11 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
- dprintk("NAK expected: i2c_master_send "
+ dev_dbg(tx->ir->l.dev, "NAK expected: i2c_master_send "
"failed with %d (try %d)\n", ret, i+1);
}
if (ret != 1) {
- zilog_error("IR TX chip never got ready: last i2c_master_send "
+ dev_err(tx->ir->l.dev, "IR TX chip never got ready: last i2c_master_send "
"failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1078,16 +1069,17 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
/* Seems to be an 'ok' response */
i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
- zilog_error("i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
return -EFAULT;
}
if (buf[0] != 0x80) {
- zilog_error("unexpected IR TX response #2: %02x\n", buf[0]);
+ dev_err(tx->ir->l.dev, "unexpected IR TX response #2: %02x\n",
+ buf[0]);
return -EFAULT;
}
/* Oh good, it worked */
- dprintk("sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1173,11 +1165,11 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
- zilog_error("sending to the IR transmitter chip "
+ dev_err(tx->ir->l.dev, "sending to the IR transmitter chip "
"failed, trying reset\n");
if (failures >= 3) {
- zilog_error("unable to send to the IR chip "
+ dev_err(tx->ir->l.dev, "unable to send to the IR chip "
"after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
@@ -1212,7 +1204,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
struct lirc_buffer *rbuf = ir->l.rbuf;
unsigned int ret;
- dprintk("poll called\n");
+ dev_dbg(ir->l.dev, "poll called\n");
rx = get_ir_rx(ir);
if (rx == NULL) {
@@ -1220,7 +1212,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
- dprintk("poll result = POLLERR\n");
+ dev_dbg(ir->l.dev, "poll result = POLLERR\n");
return POLLERR;
}
@@ -1233,7 +1225,8 @@ static unsigned int poll(struct file *filep, poll_table *wait)
/* Indicate what ops could happen immediately without blocking */
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN|POLLRDNORM);
- dprintk("poll result = %s\n", ret ? "POLLIN|POLLRDNORM" : "none");
+ dev_dbg(ir->l.dev, "poll result = %s\n",
+ ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
@@ -1340,7 +1333,7 @@ static int close(struct inode *node, struct file *filep)
struct IR *ir = filep->private_data;
if (ir == NULL) {
- zilog_error("close: no private_data attached to the file!\n");
+ dev_err(ir->l.dev, "close: no private_data attached to the file!\n");
return -ENODEV;
}
@@ -1452,7 +1445,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
int ret;
bool tx_probe = false;
- dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
+ dev_dbg(&client->dev, "%s: %s on i2c-%d (%s), client addr=0x%02x\n",
__func__, id->name, adap->nr, adap->name, client->addr);
/*
@@ -1465,7 +1458,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
else if (tx_only) /* module option */
return -ENXIO;
- zilog_info("probing IR %s on %s (i2c-%d)\n",
+ pr_info("probing IR %s on %s (i2c-%d)\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_lock(&ir_devices_lock);
@@ -1547,7 +1540,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Rx client is also ready or not needed */
if (rx == NULL && !tx_only) {
- zilog_info("probe of IR Tx on %s (i2c-%d) done. Waiting"
+ dev_info(tx->ir->l.dev, "probe of IR Tx on %s (i2c-%d) done. Waiting"
" on IR Rx.\n", adap->name, adap->nr);
goto out_ok;
}
@@ -1586,7 +1579,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
- zilog_error("%s: could not start IR Rx polling thread"
+ dev_err(tx->ir->l.dev, "%s: could not start IR Rx polling thread"
"\n", __func__);
/* Failed kthread, so put back the ir ref */
put_ir_device(ir, true);
@@ -1599,7 +1592,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Tx client is also ready */
if (tx == NULL) {
- zilog_info("probe of IR Rx on %s (i2c-%d) done. Waiting"
+ pr_info("probe of IR Rx on %s (i2c-%d) done. Waiting"
" on IR Tx.\n", adap->name, adap->nr);
goto out_ok;
}
@@ -1609,12 +1602,12 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->l.minor = minor; /* module option: user requested minor number */
ir->l.minor = lirc_register_driver(&ir->l);
if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
- zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
+ dev_err(tx->ir->l.dev, "%s: \"minor\" must be between 0 and %d (%d)!\n",
__func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
ret = -EBADRQC;
goto out_put_xx;
}
- zilog_info("IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
+ dev_info(ir->l.dev, "IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
adap->name, adap->nr, ir->l.minor);
out_ok:
@@ -1623,7 +1616,7 @@ out_ok:
if (tx != NULL)
put_ir_tx(tx, true);
put_ir_device(ir, true);
- zilog_info("probe of IR %s on %s (i2c-%d) done\n",
+ dev_info(ir->l.dev, "probe of IR %s on %s (i2c-%d) done\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
return 0;
@@ -1636,7 +1629,7 @@ out_put_xx:
out_put_ir:
put_ir_device(ir, true);
out_no_ir:
- zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
+ dev_err(&client->dev, "%s: probing IR %s on %s (i2c-%d) failed with %d\n",
__func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
ret);
mutex_unlock(&ir_devices_lock);
@@ -1647,7 +1640,7 @@ static int __init zilog_init(void)
{
int ret;
- zilog_notify("Zilog/Hauppauge IR driver initializing\n");
+ pr_notice("Zilog/Hauppauge IR driver initializing\n");
mutex_init(&tx_data_lock);
@@ -1655,9 +1648,9 @@ static int __init zilog_init(void)
ret = i2c_add_driver(&driver);
if (ret)
- zilog_error("initialization failed\n");
+ pr_err("initialization failed\n");
else
- zilog_notify("initialization complete\n");
+ pr_notice("initialization complete\n");
return ret;
}
@@ -1667,7 +1660,7 @@ static void __exit zilog_exit(void)
i2c_del_driver(&driver);
/* if loaded */
fw_unload();
- zilog_notify("Zilog/Hauppauge IR driver unloaded\n");
+ pr_notice("Zilog/Hauppauge IR driver unloaded\n");
}
module_init(zilog_init);
diff --git a/drivers/staging/media/mn88472/Kconfig b/drivers/staging/media/mn88472/Kconfig
new file mode 100644
index 000000000000..a85c90a60bce
--- /dev/null
+++ b/drivers/staging/media/mn88472/Kconfig
@@ -0,0 +1,7 @@
+config DVB_MN88472
+ tristate "Panasonic MN88472"
+ depends on DVB_CORE && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
diff --git a/drivers/staging/media/mn88472/Makefile b/drivers/staging/media/mn88472/Makefile
new file mode 100644
index 000000000000..5987b7e6d82a
--- /dev/null
+++ b/drivers/staging/media/mn88472/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_DVB_MN88472) += mn88472.o
+
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/dvb-frontends/
+ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/staging/media/mn88472/TODO b/drivers/staging/media/mn88472/TODO
new file mode 100644
index 000000000000..b90a14be3beb
--- /dev/null
+++ b/drivers/staging/media/mn88472/TODO
@@ -0,0 +1,21 @@
+Driver general quality is not good enough for mainline. Also, other
+device drivers (USB-bridge, tuner) needed for Astrometa receiver in
+question could need some changes. However, if that driver is mainlined
+due to some other device than Astrometa, unrelated TODOs could be
+skipped. In that case rtl28xxu driver needs module parameter to prevent
+driver loading.
+
+Required TODOs:
+* missing lock flags
+* I2C errors
+* tuner sensitivity
+
+*Do not* send any patch fixing checkpatch.pl issues. Currently it passes
+checkpatch.pl tests. I don't want waste my time to review this kind of
+trivial stuff. *Do not* add missing register I/O error checks. Those are
+missing for the reason it is much easier to compare I2C data sniffs when
+there is less lines. Those error checks are about the last thing to be added.
+
+Patches should be submitted to:
+linux-media@vger.kernel.org and Antti Palosaari <crope@iki.fi>
+
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
new file mode 100644
index 000000000000..52de8f85d36c
--- /dev/null
+++ b/drivers/staging/media/mn88472/mn88472.c
@@ -0,0 +1,523 @@
+/*
+ * Panasonic MN88472 DVB-T/T2/C demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mn88472_priv.h"
+
+static int mn88472_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ s->min_delay_ms = 400;
+ return 0;
+}
+
+static int mn88472_set_frontend(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i;
+ u32 if_frequency = 0;
+ u8 delivery_system_val, if_val[3], bw_val[7], bw_val2;
+
+ dev_dbg(&client->dev,
+ "delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d\n",
+ c->delivery_system, c->modulation,
+ c->frequency, c->symbol_rate, c->inversion);
+
+ if (!dev->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ delivery_system_val = 0x02;
+ break;
+ case SYS_DVBT2:
+ delivery_system_val = 0x03;
+ break;
+ case SYS_DVBC_ANNEX_A:
+ delivery_system_val = 0x04;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ if (c->bandwidth_hz <= 6000000) {
+ /* IF 3570000 Hz, BW 6000000 Hz */
+ memcpy(if_val, "\x2c\x94\xdb", 3);
+ memcpy(bw_val, "\xbf\x55\x55\x15\x6b\x15\x6b", 7);
+ bw_val2 = 0x02;
+ } else if (c->bandwidth_hz <= 7000000) {
+ /* IF 4570000 Hz, BW 7000000 Hz */
+ memcpy(if_val, "\x39\x11\xbc", 3);
+ memcpy(bw_val, "\xa4\x00\x00\x0f\x2c\x0f\x2c", 7);
+ bw_val2 = 0x01;
+ } else if (c->bandwidth_hz <= 8000000) {
+ /* IF 4570000 Hz, BW 8000000 Hz */
+ memcpy(if_val, "\x39\x11\xbc", 3);
+ memcpy(bw_val, "\x8f\x80\x00\x08\xee\x08\xee", 7);
+ bw_val2 = 0x00;
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ /* IF 5070000 Hz, BW 8000000 Hz */
+ memcpy(if_val, "\x3f\x50\x2c", 3);
+ memcpy(bw_val, "\x8f\x80\x00\x08\xee\x08\xee", 7);
+ bw_val2 = 0x00;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (ret)
+ goto err;
+ }
+
+ if (fe->ops.tuner_ops.get_if_frequency) {
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
+ if (ret)
+ goto err;
+
+ dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
+ }
+
+ switch (if_frequency) {
+ case 3570000:
+ case 4570000:
+ case 5070000:
+ break;
+ default:
+ dev_err(&client->dev, "IF frequency %d not supported\n",
+ if_frequency);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
+ ret = regmap_write(dev->regmap[2], 0xef, 0x13);
+ ret = regmap_write(dev->regmap[2], 0xf9, 0x13);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap[2], 0x00, 0x66);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x01, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x02, 0x01);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x04, bw_val2);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < sizeof(if_val); i++) {
+ ret = regmap_write(dev->regmap[2], 0x10 + i, if_val[i]);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < sizeof(bw_val); i++) {
+ ret = regmap_write(dev->regmap[2], 0x13 + i, bw_val[i]);
+ if (ret)
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ ret = regmap_write(dev->regmap[0], 0x07, 0x26);
+ ret = regmap_write(dev->regmap[0], 0xb0, 0x0a);
+ ret = regmap_write(dev->regmap[0], 0xb4, 0x00);
+ ret = regmap_write(dev->regmap[0], 0xcd, 0x1f);
+ ret = regmap_write(dev->regmap[0], 0xd4, 0x0a);
+ ret = regmap_write(dev->regmap[0], 0xd6, 0x48);
+ ret = regmap_write(dev->regmap[0], 0x00, 0xba);
+ ret = regmap_write(dev->regmap[0], 0x01, 0x13);
+ if (ret)
+ goto err;
+ break;
+ case SYS_DVBT2:
+ ret = regmap_write(dev->regmap[2], 0x2b, 0x13);
+ ret = regmap_write(dev->regmap[2], 0x4f, 0x05);
+ ret = regmap_write(dev->regmap[1], 0xf6, 0x05);
+ ret = regmap_write(dev->regmap[0], 0xb0, 0x0a);
+ ret = regmap_write(dev->regmap[0], 0xb4, 0xf6);
+ ret = regmap_write(dev->regmap[0], 0xcd, 0x01);
+ ret = regmap_write(dev->regmap[0], 0xd4, 0x09);
+ ret = regmap_write(dev->regmap[0], 0xd6, 0x46);
+ ret = regmap_write(dev->regmap[2], 0x30, 0x80);
+ ret = regmap_write(dev->regmap[2], 0x32, 0x00);
+ if (ret)
+ goto err;
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = regmap_write(dev->regmap[0], 0xb0, 0x0b);
+ ret = regmap_write(dev->regmap[0], 0xb4, 0x00);
+ ret = regmap_write(dev->regmap[0], 0xcd, 0x17);
+ ret = regmap_write(dev->regmap[0], 0xd4, 0x09);
+ ret = regmap_write(dev->regmap[0], 0xd6, 0x48);
+ ret = regmap_write(dev->regmap[1], 0x00, 0xb0);
+ if (ret)
+ goto err;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = regmap_write(dev->regmap[0], 0x46, 0x00);
+ ret = regmap_write(dev->regmap[0], 0xae, 0x00);
+ ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
+ ret = regmap_write(dev->regmap[0], 0xd9, 0xe3);
+ ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
+ if (ret)
+ goto err;
+
+ dev->delivery_system = c->delivery_system;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88472_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ unsigned int utmp;
+
+ *status = 0;
+
+ if (!dev->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ /* FIXME: implement me */
+ utmp = 0x08; /* DVB-C lock value */
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = regmap_read(dev->regmap[1], 0x84, &utmp);
+ if (ret)
+ goto err;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (utmp == 0x08)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
+ FE_HAS_SYNC | FE_HAS_LOCK;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88472_init(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+ int ret, len, remaining;
+ const struct firmware *fw = NULL;
+ u8 *fw_file = MN88472_FIRMWARE;
+
+ dev_dbg(&client->dev, "\n");
+
+ /* set cold state by default */
+ dev->warm = false;
+
+ /* power on */
+ ret = regmap_write(dev->regmap[2], 0x05, 0x00);
+ if (ret)
+ goto err;
+
+ ret = regmap_bulk_write(dev->regmap[2], 0x0b, "\x00\x00", 2);
+ if (ret)
+ goto err;
+
+ /* request the firmware, this will block and timeout */
+ ret = request_firmware(&fw, fw_file, &client->dev);
+ if (ret) {
+ dev_err(&client->dev, "firmare file '%s' not found\n",
+ fw_file);
+ goto err;
+ }
+
+ dev_info(&client->dev, "downloading firmware from file '%s'\n",
+ fw_file);
+
+ ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
+ if (ret)
+ goto err;
+
+ for (remaining = fw->size; remaining > 0;
+ remaining -= (dev->i2c_wr_max - 1)) {
+ len = remaining;
+ if (len > (dev->i2c_wr_max - 1))
+ len = (dev->i2c_wr_max - 1);
+
+ ret = regmap_bulk_write(dev->regmap[0], 0xf6,
+ &fw->data[fw->size - remaining], len);
+ if (ret) {
+ dev_err(&client->dev,
+ "firmware download failed=%d\n", ret);
+ goto err;
+ }
+ }
+
+ ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
+ if (ret)
+ goto err;
+
+ release_firmware(fw);
+ fw = NULL;
+
+ /* warm state */
+ dev->warm = true;
+
+ return 0;
+err:
+ if (fw)
+ release_firmware(fw);
+
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88472_sleep(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+ int ret;
+
+ dev_dbg(&client->dev, "\n");
+
+ /* power off */
+ ret = regmap_write(dev->regmap[2], 0x0b, 0x30);
+
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
+ if (ret)
+ goto err;
+
+ dev->delivery_system = SYS_UNDEFINED;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static struct dvb_frontend_ops mn88472_ops = {
+ .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
+ .info = {
+ .name = "Panasonic MN88472",
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_MUTE_TS |
+ FE_CAN_2G_MODULATION |
+ FE_CAN_MULTISTREAM
+ },
+
+ .get_tune_settings = mn88472_get_tune_settings,
+
+ .init = mn88472_init,
+ .sleep = mn88472_sleep,
+
+ .set_frontend = mn88472_set_frontend,
+
+ .read_status = mn88472_read_status,
+};
+
+static int mn88472_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mn88472_config *config = client->dev.platform_data;
+ struct mn88472_dev *dev;
+ int ret;
+ unsigned int utmp;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+
+ dev_dbg(&client->dev, "\n");
+
+ /* Caller really need to provide pointer for frontend we create. */
+ if (config->fe == NULL) {
+ dev_err(&client->dev, "frontend pointer not defined\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev->i2c_wr_max = config->i2c_wr_max;
+ dev->client[0] = client;
+ dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
+ if (IS_ERR(dev->regmap[0])) {
+ ret = PTR_ERR(dev->regmap[0]);
+ goto err_kfree;
+ }
+
+ /* check demod answers to I2C */
+ ret = regmap_read(dev->regmap[0], 0x00, &utmp);
+ if (ret)
+ goto err_regmap_0_regmap_exit;
+
+ /*
+ * Chip has three I2C addresses for different register pages. Used
+ * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
+ * 0x1a and 0x1c, in order to get own I2C client for each register page.
+ */
+ dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
+ if (dev->client[1] == NULL) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "I2C registration failed\n");
+ if (ret)
+ goto err_regmap_0_regmap_exit;
+ }
+ dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
+ if (IS_ERR(dev->regmap[1])) {
+ ret = PTR_ERR(dev->regmap[1]);
+ goto err_client_1_i2c_unregister_device;
+ }
+ i2c_set_clientdata(dev->client[1], dev);
+
+ dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
+ if (dev->client[2] == NULL) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "2nd I2C registration failed\n");
+ if (ret)
+ goto err_regmap_1_regmap_exit;
+ }
+ dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
+ if (IS_ERR(dev->regmap[2])) {
+ ret = PTR_ERR(dev->regmap[2]);
+ goto err_client_2_i2c_unregister_device;
+ }
+ i2c_set_clientdata(dev->client[2], dev);
+
+ /* create dvb_frontend */
+ memcpy(&dev->fe.ops, &mn88472_ops, sizeof(struct dvb_frontend_ops));
+ dev->fe.demodulator_priv = client;
+ *config->fe = &dev->fe;
+ i2c_set_clientdata(client, dev);
+
+ dev_info(&client->dev, "Panasonic MN88472 successfully attached\n");
+ return 0;
+
+err_client_2_i2c_unregister_device:
+ i2c_unregister_device(dev->client[2]);
+err_regmap_1_regmap_exit:
+ regmap_exit(dev->regmap[1]);
+err_client_1_i2c_unregister_device:
+ i2c_unregister_device(dev->client[1]);
+err_regmap_0_regmap_exit:
+ regmap_exit(dev->regmap[0]);
+err_kfree:
+ kfree(dev);
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88472_remove(struct i2c_client *client)
+{
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ regmap_exit(dev->regmap[2]);
+ i2c_unregister_device(dev->client[2]);
+
+ regmap_exit(dev->regmap[1]);
+ i2c_unregister_device(dev->client[1]);
+
+ regmap_exit(dev->regmap[0]);
+
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id mn88472_id_table[] = {
+ {"mn88472", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mn88472_id_table);
+
+static struct i2c_driver mn88472_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mn88472",
+ },
+ .probe = mn88472_probe,
+ .remove = mn88472_remove,
+ .id_table = mn88472_id_table,
+};
+
+module_i2c_driver(mn88472_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Panasonic MN88472 DVB-T/T2/C demodulator driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(MN88472_FIRMWARE);
diff --git a/drivers/staging/media/mn88472/mn88472_priv.h b/drivers/staging/media/mn88472/mn88472_priv.h
new file mode 100644
index 000000000000..1095949f040d
--- /dev/null
+++ b/drivers/staging/media/mn88472/mn88472_priv.h
@@ -0,0 +1,36 @@
+/*
+ * Panasonic MN88472 DVB-T/T2/C demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MN88472_PRIV_H
+#define MN88472_PRIV_H
+
+#include "dvb_frontend.h"
+#include "mn88472.h"
+#include <linux/firmware.h>
+#include <linux/regmap.h>
+
+#define MN88472_FIRMWARE "dvb-demod-mn88472-02.fw"
+
+struct mn88472_dev {
+ struct i2c_client *client[3];
+ struct regmap *regmap[3];
+ struct dvb_frontend fe;
+ u16 i2c_wr_max;
+ fe_delivery_system_t delivery_system;
+ bool warm; /* FW running */
+};
+
+#endif
diff --git a/drivers/staging/media/mn88473/Kconfig b/drivers/staging/media/mn88473/Kconfig
new file mode 100644
index 000000000000..6c9ebf51c2c7
--- /dev/null
+++ b/drivers/staging/media/mn88473/Kconfig
@@ -0,0 +1,7 @@
+config DVB_MN88473
+ tristate "Panasonic MN88473"
+ depends on DVB_CORE && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
diff --git a/drivers/staging/media/mn88473/Makefile b/drivers/staging/media/mn88473/Makefile
new file mode 100644
index 000000000000..fac55410ce55
--- /dev/null
+++ b/drivers/staging/media/mn88473/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_DVB_MN88473) += mn88473.o
+
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/dvb-frontends/
+ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/staging/media/mn88473/TODO b/drivers/staging/media/mn88473/TODO
new file mode 100644
index 000000000000..b90a14be3beb
--- /dev/null
+++ b/drivers/staging/media/mn88473/TODO
@@ -0,0 +1,21 @@
+Driver general quality is not good enough for mainline. Also, other
+device drivers (USB-bridge, tuner) needed for Astrometa receiver in
+question could need some changes. However, if that driver is mainlined
+due to some other device than Astrometa, unrelated TODOs could be
+skipped. In that case rtl28xxu driver needs module parameter to prevent
+driver loading.
+
+Required TODOs:
+* missing lock flags
+* I2C errors
+* tuner sensitivity
+
+*Do not* send any patch fixing checkpatch.pl issues. Currently it passes
+checkpatch.pl tests. I don't want waste my time to review this kind of
+trivial stuff. *Do not* add missing register I/O error checks. Those are
+missing for the reason it is much easier to compare I2C data sniffs when
+there is less lines. Those error checks are about the last thing to be added.
+
+Patches should be submitted to:
+linux-media@vger.kernel.org and Antti Palosaari <crope@iki.fi>
+
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/staging/media/mn88473/mn88473.c
new file mode 100644
index 000000000000..a333744b76b9
--- /dev/null
+++ b/drivers/staging/media/mn88473/mn88473.c
@@ -0,0 +1,464 @@
+/*
+ * Panasonic MN88473 DVB-T/T2/C demodulator driver
+ *
+ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mn88473_priv.h"
+
+static int mn88473_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ s->min_delay_ms = 1000;
+ return 0;
+}
+
+static int mn88473_set_frontend(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88473_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i;
+ u32 if_frequency;
+ u8 delivery_system_val, if_val[3], bw_val[7];
+
+ dev_dbg(&client->dev,
+ "delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%d stream_id=%d\n",
+ c->delivery_system, c->modulation,
+ c->frequency, c->bandwidth_hz, c->symbol_rate,
+ c->inversion, c->stream_id);
+
+ if (!dev->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ delivery_system_val = 0x02;
+ break;
+ case SYS_DVBT2:
+ delivery_system_val = 0x03;
+ break;
+ case SYS_DVBC_ANNEX_A:
+ delivery_system_val = 0x04;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ if (c->bandwidth_hz <= 6000000) {
+ /* IF 3570000 Hz, BW 6000000 Hz */
+ memcpy(if_val, "\x24\x8e\x8a", 3);
+ memcpy(bw_val, "\xe9\x55\x55\x1c\x29\x1c\x29", 7);
+ } else if (c->bandwidth_hz <= 7000000) {
+ /* IF 4570000 Hz, BW 7000000 Hz */
+ memcpy(if_val, "\x2e\xcb\xfb", 3);
+ memcpy(bw_val, "\xc8\x00\x00\x17\x0a\x17\x0a", 7);
+ } else if (c->bandwidth_hz <= 8000000) {
+ /* IF 4570000 Hz, BW 8000000 Hz */
+ memcpy(if_val, "\x2e\xcb\xfb", 3);
+ memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ /* IF 5070000 Hz, BW 8000000 Hz */
+ memcpy(if_val, "\x33\xea\xb3", 3);
+ memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (ret)
+ goto err;
+ }
+
+ if (fe->ops.tuner_ops.get_if_frequency) {
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
+ if (ret)
+ goto err;
+
+ dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
+ } else {
+ if_frequency = 0;
+ }
+
+ switch (if_frequency) {
+ case 3570000:
+ case 4570000:
+ case 5070000:
+ break;
+ default:
+ dev_err(&client->dev, "IF frequency %d not supported\n",
+ if_frequency);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = regmap_write(dev->regmap[2], 0x05, 0x00);
+ ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
+ ret = regmap_write(dev->regmap[2], 0xef, 0x13);
+ ret = regmap_write(dev->regmap[2], 0xf9, 0x13);
+ ret = regmap_write(dev->regmap[2], 0x00, 0x18);
+ ret = regmap_write(dev->regmap[2], 0x01, 0x01);
+ ret = regmap_write(dev->regmap[2], 0x02, 0x21);
+ ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
+ ret = regmap_write(dev->regmap[2], 0x0b, 0x00);
+
+ for (i = 0; i < sizeof(if_val); i++) {
+ ret = regmap_write(dev->regmap[2], 0x10 + i, if_val[i]);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < sizeof(bw_val); i++) {
+ ret = regmap_write(dev->regmap[2], 0x13 + i, bw_val[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = regmap_write(dev->regmap[2], 0x2d, 0x3b);
+ ret = regmap_write(dev->regmap[2], 0x2e, 0x00);
+ ret = regmap_write(dev->regmap[2], 0x56, 0x0d);
+ ret = regmap_write(dev->regmap[0], 0x01, 0xba);
+ ret = regmap_write(dev->regmap[0], 0x02, 0x13);
+ ret = regmap_write(dev->regmap[0], 0x03, 0x80);
+ ret = regmap_write(dev->regmap[0], 0x04, 0xba);
+ ret = regmap_write(dev->regmap[0], 0x05, 0x91);
+ ret = regmap_write(dev->regmap[0], 0x07, 0xe7);
+ ret = regmap_write(dev->regmap[0], 0x08, 0x28);
+ ret = regmap_write(dev->regmap[0], 0x0a, 0x1a);
+ ret = regmap_write(dev->regmap[0], 0x13, 0x1f);
+ ret = regmap_write(dev->regmap[0], 0x19, 0x03);
+ ret = regmap_write(dev->regmap[0], 0x1d, 0xb0);
+ ret = regmap_write(dev->regmap[0], 0x2a, 0x72);
+ ret = regmap_write(dev->regmap[0], 0x2d, 0x00);
+ ret = regmap_write(dev->regmap[0], 0x3c, 0x00);
+ ret = regmap_write(dev->regmap[0], 0x3f, 0xf8);
+ ret = regmap_write(dev->regmap[0], 0x40, 0xf4);
+ ret = regmap_write(dev->regmap[0], 0x41, 0x08);
+ ret = regmap_write(dev->regmap[0], 0xd2, 0x29);
+ ret = regmap_write(dev->regmap[0], 0xd4, 0x55);
+ ret = regmap_write(dev->regmap[1], 0x10, 0x10);
+ ret = regmap_write(dev->regmap[1], 0x11, 0xab);
+ ret = regmap_write(dev->regmap[1], 0x12, 0x0d);
+ ret = regmap_write(dev->regmap[1], 0x13, 0xae);
+ ret = regmap_write(dev->regmap[1], 0x14, 0x1d);
+ ret = regmap_write(dev->regmap[1], 0x15, 0x9d);
+ ret = regmap_write(dev->regmap[1], 0xbe, 0x08);
+ ret = regmap_write(dev->regmap[2], 0x09, 0x08);
+ ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
+ ret = regmap_write(dev->regmap[0], 0xb2, 0x37);
+ ret = regmap_write(dev->regmap[0], 0xd7, 0x04);
+ ret = regmap_write(dev->regmap[2], 0x32, 0x80);
+ ret = regmap_write(dev->regmap[2], 0x36, 0x00);
+ ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
+ if (ret)
+ goto err;
+
+ dev->delivery_system = c->delivery_system;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88473_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88473_dev *dev = i2c_get_clientdata(client);
+ int ret;
+
+ *status = 0;
+
+ if (!dev->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
+ FE_HAS_SYNC | FE_HAS_LOCK;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88473_init(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88473_dev *dev = i2c_get_clientdata(client);
+ int ret, len, remaining;
+ const struct firmware *fw = NULL;
+ u8 *fw_file = MN88473_FIRMWARE;
+
+ dev_dbg(&client->dev, "\n");
+
+ if (dev->warm)
+ return 0;
+
+ /* request the firmware, this will block and timeout */
+ ret = request_firmware(&fw, fw_file, &client->dev);
+ if (ret) {
+ dev_err(&client->dev, "firmare file '%s' not found\n", fw_file);
+ goto err_request_firmware;
+ }
+
+ dev_info(&client->dev, "downloading firmware from file '%s'\n",
+ fw_file);
+
+ ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
+ if (ret)
+ goto err;
+
+ for (remaining = fw->size; remaining > 0;
+ remaining -= (dev->i2c_wr_max - 1)) {
+ len = remaining;
+ if (len > (dev->i2c_wr_max - 1))
+ len = (dev->i2c_wr_max - 1);
+
+ ret = regmap_bulk_write(dev->regmap[0], 0xf6,
+ &fw->data[fw->size - remaining], len);
+ if (ret) {
+ dev_err(&client->dev, "firmware download failed=%d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
+ if (ret)
+ goto err;
+
+ release_firmware(fw);
+ fw = NULL;
+
+ /* warm state */
+ dev->warm = true;
+
+ return 0;
+
+err:
+ release_firmware(fw);
+err_request_firmware:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88473_sleep(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88473_dev *dev = i2c_get_clientdata(client);
+ int ret;
+
+ dev_dbg(&client->dev, "\n");
+
+ ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
+ if (ret)
+ goto err;
+
+ dev->delivery_system = SYS_UNDEFINED;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static struct dvb_frontend_ops mn88473_ops = {
+ .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_AC},
+ .info = {
+ .name = "Panasonic MN88473",
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_MUTE_TS |
+ FE_CAN_2G_MODULATION |
+ FE_CAN_MULTISTREAM
+ },
+
+ .get_tune_settings = mn88473_get_tune_settings,
+
+ .init = mn88473_init,
+ .sleep = mn88473_sleep,
+
+ .set_frontend = mn88473_set_frontend,
+
+ .read_status = mn88473_read_status,
+};
+
+static int mn88473_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mn88473_config *config = client->dev.platform_data;
+ struct mn88473_dev *dev;
+ int ret;
+ unsigned int utmp;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+
+ dev_dbg(&client->dev, "\n");
+
+ /* Caller really need to provide pointer for frontend we create. */
+ if (config->fe == NULL) {
+ dev_err(&client->dev, "frontend pointer not defined\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev->i2c_wr_max = config->i2c_wr_max;
+ dev->client[0] = client;
+ dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
+ if (IS_ERR(dev->regmap[0])) {
+ ret = PTR_ERR(dev->regmap[0]);
+ goto err_kfree;
+ }
+
+ /* check demod answers to I2C */
+ ret = regmap_read(dev->regmap[0], 0x00, &utmp);
+ if (ret)
+ goto err_regmap_0_regmap_exit;
+
+ /*
+ * Chip has three I2C addresses for different register pages. Used
+ * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
+ * 0x1a and 0x1c, in order to get own I2C client for each register page.
+ */
+ dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
+ if (dev->client[1] == NULL) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "I2C registration failed\n");
+ if (ret)
+ goto err_regmap_0_regmap_exit;
+ }
+ dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
+ if (IS_ERR(dev->regmap[1])) {
+ ret = PTR_ERR(dev->regmap[1]);
+ goto err_client_1_i2c_unregister_device;
+ }
+ i2c_set_clientdata(dev->client[1], dev);
+
+ dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
+ if (dev->client[2] == NULL) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "2nd I2C registration failed\n");
+ if (ret)
+ goto err_regmap_1_regmap_exit;
+ }
+ dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
+ if (IS_ERR(dev->regmap[2])) {
+ ret = PTR_ERR(dev->regmap[2]);
+ goto err_client_2_i2c_unregister_device;
+ }
+ i2c_set_clientdata(dev->client[2], dev);
+
+ /* create dvb_frontend */
+ memcpy(&dev->fe.ops, &mn88473_ops, sizeof(struct dvb_frontend_ops));
+ dev->fe.demodulator_priv = client;
+ *config->fe = &dev->fe;
+ i2c_set_clientdata(client, dev);
+
+ dev_info(&dev->client[0]->dev, "Panasonic MN88473 successfully attached\n");
+ return 0;
+
+err_client_2_i2c_unregister_device:
+ i2c_unregister_device(dev->client[2]);
+err_regmap_1_regmap_exit:
+ regmap_exit(dev->regmap[1]);
+err_client_1_i2c_unregister_device:
+ i2c_unregister_device(dev->client[1]);
+err_regmap_0_regmap_exit:
+ regmap_exit(dev->regmap[0]);
+err_kfree:
+ kfree(dev);
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88473_remove(struct i2c_client *client)
+{
+ struct mn88473_dev *dev = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ regmap_exit(dev->regmap[2]);
+ i2c_unregister_device(dev->client[2]);
+
+ regmap_exit(dev->regmap[1]);
+ i2c_unregister_device(dev->client[1]);
+
+ regmap_exit(dev->regmap[0]);
+
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id mn88473_id_table[] = {
+ {"mn88473", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mn88473_id_table);
+
+static struct i2c_driver mn88473_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mn88473",
+ },
+ .probe = mn88473_probe,
+ .remove = mn88473_remove,
+ .id_table = mn88473_id_table,
+};
+
+module_i2c_driver(mn88473_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Panasonic MN88473 DVB-T/T2/C demodulator driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(MN88473_FIRMWARE);
diff --git a/drivers/staging/media/mn88473/mn88473_priv.h b/drivers/staging/media/mn88473/mn88473_priv.h
new file mode 100644
index 000000000000..78af112fb41d
--- /dev/null
+++ b/drivers/staging/media/mn88473/mn88473_priv.h
@@ -0,0 +1,36 @@
+/*
+ * Panasonic MN88473 DVB-T/T2/C demodulator driver
+ *
+ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MN88473_PRIV_H
+#define MN88473_PRIV_H
+
+#include "dvb_frontend.h"
+#include "mn88473.h"
+#include <linux/firmware.h>
+#include <linux/regmap.h>
+
+#define MN88473_FIRMWARE "dvb-demod-mn88473-01.fw"
+
+struct mn88473_dev {
+ struct i2c_client *client[3];
+ struct regmap *regmap[3];
+ struct dvb_frontend fe;
+ u16 i2c_wr_max;
+ fe_delivery_system_t delivery_system;
+ bool warm; /* FW running */
+};
+
+#endif
diff --git a/drivers/staging/media/omap24xx/Kconfig b/drivers/staging/media/omap24xx/Kconfig
deleted file mode 100644
index 82e569a21c46..000000000000
--- a/drivers/staging/media/omap24xx/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
-config VIDEO_V4L2_INT_DEVICE
- tristate
-
-config VIDEO_OMAP2
- tristate "OMAP2 Camera Capture Interface driver (DEPRECATED)"
- depends on VIDEO_DEV && ARCH_OMAP2
- select VIDEOBUF_DMA_SG
- select VIDEO_V4L2_INT_DEVICE
- ---help---
- This is a v4l2 driver for the TI OMAP2 camera capture interface
-
- It uses the deprecated int-device API. Since this driver is no
- longer actively maintained and nobody is interested in converting
- it to the subdev API, this driver will be removed soon.
-
- If you do want to keep this driver in the kernel, and are willing
- to convert it to the subdev API, then please contact the linux-media
- mailinglist.
-
-config VIDEO_TCM825X
- tristate "TCM825x camera sensor support (DEPRECATED)"
- depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
- select VIDEO_V4L2_INT_DEVICE
- ---help---
- This is a driver for the Toshiba TCM825x VGA camera sensor.
- It is used for example in Nokia N800.
-
- It uses the deprecated int-device API. Since this driver is no
- longer actively maintained and nobody is interested in converting
- it to the subdev API, this driver will be removed soon.
-
- If you do want to keep this driver in the kernel, and are willing
- to convert it to the subdev API, then please contact the linux-media
- mailinglist.
diff --git a/drivers/staging/media/omap24xx/Makefile b/drivers/staging/media/omap24xx/Makefile
deleted file mode 100644
index c2e7175599c2..000000000000
--- a/drivers/staging/media/omap24xx/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
-
-obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
-obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
-obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
diff --git a/drivers/staging/media/omap24xx/omap24xxcam-dma.c b/drivers/staging/media/omap24xx/omap24xxcam-dma.c
deleted file mode 100644
index c427eb94ea66..000000000000
--- a/drivers/staging/media/omap24xx/omap24xxcam-dma.c
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * drivers/media/platform/omap24xxcam-dma.c
- *
- * Copyright (C) 2004 MontaVista Software, Inc.
- * Copyright (C) 2004 Texas Instruments.
- * Copyright (C) 2007 Nokia Corporation.
- *
- * Contact: Sakari Ailus <sakari.ailus@nokia.com>
- *
- * Based on code from Andy Lowe <source@mvista.com> and
- * David Cohen <david.cohen@indt.org.br>.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/scatterlist.h>
-
-#include "omap24xxcam.h"
-
-/*
- *
- * DMA hardware.
- *
- */
-
-/* Ack all interrupt on CSR and IRQSTATUS_L0 */
-static void omap24xxcam_dmahw_ack_all(void __iomem *base)
-{
- u32 csr;
- int i;
-
- for (i = 0; i < NUM_CAMDMA_CHANNELS; ++i) {
- csr = omap24xxcam_reg_in(base, CAMDMA_CSR(i));
- /* ack interrupt in CSR */
- omap24xxcam_reg_out(base, CAMDMA_CSR(i), csr);
- }
- omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, 0xf);
-}
-
-/* Ack dmach on CSR and IRQSTATUS_L0 */
-static u32 omap24xxcam_dmahw_ack_ch(void __iomem *base, int dmach)
-{
- u32 csr;
-
- csr = omap24xxcam_reg_in(base, CAMDMA_CSR(dmach));
- /* ack interrupt in CSR */
- omap24xxcam_reg_out(base, CAMDMA_CSR(dmach), csr);
- /* ack interrupt in IRQSTATUS */
- omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, (1 << dmach));
-
- return csr;
-}
-
-static int omap24xxcam_dmahw_running(void __iomem *base, int dmach)
-{
- return omap24xxcam_reg_in(base, CAMDMA_CCR(dmach)) & CAMDMA_CCR_ENABLE;
-}
-
-static void omap24xxcam_dmahw_transfer_setup(void __iomem *base, int dmach,
- dma_addr_t start, u32 len)
-{
- omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
- CAMDMA_CCR_SEL_SRC_DST_SYNC
- | CAMDMA_CCR_BS
- | CAMDMA_CCR_DST_AMODE_POST_INC
- | CAMDMA_CCR_SRC_AMODE_POST_INC
- | CAMDMA_CCR_FS
- | CAMDMA_CCR_WR_ACTIVE
- | CAMDMA_CCR_RD_ACTIVE
- | CAMDMA_CCR_SYNCHRO_CAMERA);
- omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(dmach), 0);
- omap24xxcam_reg_out(base, CAMDMA_CEN(dmach), len);
- omap24xxcam_reg_out(base, CAMDMA_CFN(dmach), 1);
- omap24xxcam_reg_out(base, CAMDMA_CSDP(dmach),
- CAMDMA_CSDP_WRITE_MODE_POSTED
- | CAMDMA_CSDP_DST_BURST_EN_32
- | CAMDMA_CSDP_DST_PACKED
- | CAMDMA_CSDP_SRC_BURST_EN_32
- | CAMDMA_CSDP_SRC_PACKED
- | CAMDMA_CSDP_DATA_TYPE_8BITS);
- omap24xxcam_reg_out(base, CAMDMA_CSSA(dmach), 0);
- omap24xxcam_reg_out(base, CAMDMA_CDSA(dmach), start);
- omap24xxcam_reg_out(base, CAMDMA_CSEI(dmach), 0);
- omap24xxcam_reg_out(base, CAMDMA_CSFI(dmach), DMA_THRESHOLD);
- omap24xxcam_reg_out(base, CAMDMA_CDEI(dmach), 0);
- omap24xxcam_reg_out(base, CAMDMA_CDFI(dmach), 0);
- omap24xxcam_reg_out(base, CAMDMA_CSR(dmach),
- CAMDMA_CSR_MISALIGNED_ERR
- | CAMDMA_CSR_SECURE_ERR
- | CAMDMA_CSR_TRANS_ERR
- | CAMDMA_CSR_BLOCK
- | CAMDMA_CSR_DROP);
- omap24xxcam_reg_out(base, CAMDMA_CICR(dmach),
- CAMDMA_CICR_MISALIGNED_ERR_IE
- | CAMDMA_CICR_SECURE_ERR_IE
- | CAMDMA_CICR_TRANS_ERR_IE
- | CAMDMA_CICR_BLOCK_IE
- | CAMDMA_CICR_DROP_IE);
-}
-
-static void omap24xxcam_dmahw_transfer_start(void __iomem *base, int dmach)
-{
- omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
- CAMDMA_CCR_SEL_SRC_DST_SYNC
- | CAMDMA_CCR_BS
- | CAMDMA_CCR_DST_AMODE_POST_INC
- | CAMDMA_CCR_SRC_AMODE_POST_INC
- | CAMDMA_CCR_ENABLE
- | CAMDMA_CCR_FS
- | CAMDMA_CCR_SYNCHRO_CAMERA);
-}
-
-static void omap24xxcam_dmahw_transfer_chain(void __iomem *base, int dmach,
- int free_dmach)
-{
- int prev_dmach, ch;
-
- if (dmach == 0)
- prev_dmach = NUM_CAMDMA_CHANNELS - 1;
- else
- prev_dmach = dmach - 1;
- omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(prev_dmach),
- CAMDMA_CLNK_CTRL_ENABLE_LNK | dmach);
- /* Did we chain the DMA transfer before the previous one
- * finished?
- */
- ch = (dmach + free_dmach) % NUM_CAMDMA_CHANNELS;
- while (!(omap24xxcam_reg_in(base, CAMDMA_CCR(ch))
- & CAMDMA_CCR_ENABLE)) {
- if (ch == dmach) {
- /* The previous transfer has ended and this one
- * hasn't started, so we must not have chained
- * to the previous one in time. We'll have to
- * start it now.
- */
- omap24xxcam_dmahw_transfer_start(base, dmach);
- break;
- }
- ch = (ch + 1) % NUM_CAMDMA_CHANNELS;
- }
-}
-
-/* Abort all chained DMA transfers. After all transfers have been
- * aborted and the DMA controller is idle, the completion routines for
- * any aborted transfers will be called in sequence. The DMA
- * controller may not be idle after this routine completes, because
- * the completion routines might start new transfers.
- */
-static void omap24xxcam_dmahw_abort_ch(void __iomem *base, int dmach)
-{
- /* mask all interrupts from this channel */
- omap24xxcam_reg_out(base, CAMDMA_CICR(dmach), 0);
- /* unlink this channel */
- omap24xxcam_reg_merge(base, CAMDMA_CLNK_CTRL(dmach), 0,
- CAMDMA_CLNK_CTRL_ENABLE_LNK);
- /* disable this channel */
- omap24xxcam_reg_merge(base, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE);
-}
-
-static void omap24xxcam_dmahw_init(void __iomem *base)
-{
- omap24xxcam_reg_out(base, CAMDMA_OCP_SYSCONFIG,
- CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY
- | CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE
- | CAMDMA_OCP_SYSCONFIG_AUTOIDLE);
-
- omap24xxcam_reg_merge(base, CAMDMA_GCR, 0x10,
- CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH);
-
- omap24xxcam_reg_out(base, CAMDMA_IRQENABLE_L0, 0xf);
-}
-
-/*
- *
- * Individual DMA channel handling.
- *
- */
-
-/* Start a DMA transfer from the camera to memory.
- * Returns zero if the transfer was successfully started, or non-zero if all
- * DMA channels are already in use or starting is currently inhibited.
- */
-static int omap24xxcam_dma_start(struct omap24xxcam_dma *dma, dma_addr_t start,
- u32 len, dma_callback_t callback, void *arg)
-{
- unsigned long flags;
- int dmach;
-
- spin_lock_irqsave(&dma->lock, flags);
-
- if (!dma->free_dmach || atomic_read(&dma->dma_stop)) {
- spin_unlock_irqrestore(&dma->lock, flags);
- return -EBUSY;
- }
-
- dmach = dma->next_dmach;
-
- dma->ch_state[dmach].callback = callback;
- dma->ch_state[dmach].arg = arg;
-
- omap24xxcam_dmahw_transfer_setup(dma->base, dmach, start, len);
-
- /* We're ready to start the DMA transfer. */
-
- if (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
- /* A transfer is already in progress, so try to chain to it. */
- omap24xxcam_dmahw_transfer_chain(dma->base, dmach,
- dma->free_dmach);
- } else {
- /* No transfer is in progress, so we'll just start this one
- * now.
- */
- omap24xxcam_dmahw_transfer_start(dma->base, dmach);
- }
-
- dma->next_dmach = (dma->next_dmach + 1) % NUM_CAMDMA_CHANNELS;
- dma->free_dmach--;
-
- spin_unlock_irqrestore(&dma->lock, flags);
-
- return 0;
-}
-
-/* Abort all chained DMA transfers. After all transfers have been
- * aborted and the DMA controller is idle, the completion routines for
- * any aborted transfers will be called in sequence. The DMA
- * controller may not be idle after this routine completes, because
- * the completion routines might start new transfers.
- */
-static void omap24xxcam_dma_abort(struct omap24xxcam_dma *dma, u32 csr)
-{
- unsigned long flags;
- int dmach, i, free_dmach;
- dma_callback_t callback;
- void *arg;
-
- spin_lock_irqsave(&dma->lock, flags);
-
- /* stop any DMA transfers in progress */
- dmach = (dma->next_dmach + dma->free_dmach) % NUM_CAMDMA_CHANNELS;
- for (i = 0; i < NUM_CAMDMA_CHANNELS; i++) {
- omap24xxcam_dmahw_abort_ch(dma->base, dmach);
- dmach = (dmach + 1) % NUM_CAMDMA_CHANNELS;
- }
-
- /* We have to be careful here because the callback routine
- * might start a new DMA transfer, and we only want to abort
- * transfers that were started before this routine was called.
- */
- free_dmach = dma->free_dmach;
- while ((dma->free_dmach < NUM_CAMDMA_CHANNELS) &&
- (free_dmach < NUM_CAMDMA_CHANNELS)) {
- dmach = (dma->next_dmach + dma->free_dmach)
- % NUM_CAMDMA_CHANNELS;
- callback = dma->ch_state[dmach].callback;
- arg = dma->ch_state[dmach].arg;
- dma->free_dmach++;
- free_dmach++;
- if (callback) {
- /* leave interrupts disabled during callback */
- spin_unlock(&dma->lock);
- (*callback) (dma, csr, arg);
- spin_lock(&dma->lock);
- }
- }
-
- spin_unlock_irqrestore(&dma->lock, flags);
-}
-
-/* Abort all chained DMA transfers. After all transfers have been
- * aborted and the DMA controller is idle, the completion routines for
- * any aborted transfers will be called in sequence. If the completion
- * routines attempt to start a new DMA transfer it will fail, so the
- * DMA controller will be idle after this routine completes.
- */
-static void omap24xxcam_dma_stop(struct omap24xxcam_dma *dma, u32 csr)
-{
- atomic_inc(&dma->dma_stop);
- omap24xxcam_dma_abort(dma, csr);
- atomic_dec(&dma->dma_stop);
-}
-
-/* Camera DMA interrupt service routine. */
-void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma)
-{
- int dmach;
- dma_callback_t callback;
- void *arg;
- u32 csr;
- const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
- | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
- | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
-
- spin_lock(&dma->lock);
-
- if (dma->free_dmach == NUM_CAMDMA_CHANNELS) {
- /* A camera DMA interrupt occurred while all channels
- * are idle, so we'll acknowledge the interrupt in the
- * IRQSTATUS register and exit.
- */
- omap24xxcam_dmahw_ack_all(dma->base);
- spin_unlock(&dma->lock);
- return;
- }
-
- while (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
- dmach = (dma->next_dmach + dma->free_dmach)
- % NUM_CAMDMA_CHANNELS;
- if (omap24xxcam_dmahw_running(dma->base, dmach)) {
- /* This buffer hasn't finished yet, so we're done. */
- break;
- }
- csr = omap24xxcam_dmahw_ack_ch(dma->base, dmach);
- if (csr & csr_error) {
- /* A DMA error occurred, so stop all DMA
- * transfers in progress.
- */
- spin_unlock(&dma->lock);
- omap24xxcam_dma_stop(dma, csr);
- return;
- }
- callback = dma->ch_state[dmach].callback;
- arg = dma->ch_state[dmach].arg;
- dma->free_dmach++;
- if (callback) {
- spin_unlock(&dma->lock);
- (*callback) (dma, csr, arg);
- spin_lock(&dma->lock);
- }
- }
-
- spin_unlock(&dma->lock);
-
- omap24xxcam_sgdma_process(
- container_of(dma, struct omap24xxcam_sgdma, dma));
-}
-
-void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dma->lock, flags);
-
- omap24xxcam_dmahw_init(dma->base);
-
- spin_unlock_irqrestore(&dma->lock, flags);
-}
-
-static void omap24xxcam_dma_init(struct omap24xxcam_dma *dma,
- void __iomem *base)
-{
- int ch;
-
- /* group all channels on DMA IRQ0 and unmask irq */
- spin_lock_init(&dma->lock);
- dma->base = base;
- dma->free_dmach = NUM_CAMDMA_CHANNELS;
- dma->next_dmach = 0;
- for (ch = 0; ch < NUM_CAMDMA_CHANNELS; ch++) {
- dma->ch_state[ch].callback = NULL;
- dma->ch_state[ch].arg = NULL;
- }
-}
-
-/*
- *
- * Scatter-gather DMA.
- *
- * High-level DMA construct for transferring whole picture frames to
- * memory that is discontinuous.
- *
- */
-
-/* DMA completion routine for the scatter-gather DMA fragments. */
-static void omap24xxcam_sgdma_callback(struct omap24xxcam_dma *dma, u32 csr,
- void *arg)
-{
- struct omap24xxcam_sgdma *sgdma =
- container_of(dma, struct omap24xxcam_sgdma, dma);
- int sgslot = (int)arg;
- struct sgdma_state *sg_state;
- const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
- | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
- | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
-
- spin_lock(&sgdma->lock);
-
- /* We got an interrupt, we can remove the timer */
- del_timer(&sgdma->reset_timer);
-
- sg_state = sgdma->sg_state + sgslot;
- if (!sg_state->queued_sglist) {
- spin_unlock(&sgdma->lock);
- printk(KERN_ERR "%s: sgdma completed when none queued!\n",
- __func__);
- return;
- }
-
- sg_state->csr |= csr;
- if (!--sg_state->queued_sglist) {
- /* Queue for this sglist is empty, so check to see if we're
- * done.
- */
- if ((sg_state->next_sglist == sg_state->sglen)
- || (sg_state->csr & csr_error)) {
- sgdma_callback_t callback = sg_state->callback;
- void *arg = sg_state->arg;
- u32 sg_csr = sg_state->csr;
- /* All done with this sglist */
- sgdma->free_sgdma++;
- if (callback) {
- spin_unlock(&sgdma->lock);
- (*callback) (sgdma, sg_csr, arg);
- return;
- }
- }
- }
-
- spin_unlock(&sgdma->lock);
-}
-
-/* Start queued scatter-gather DMA transfers. */
-void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma)
-{
- unsigned long flags;
- int queued_sgdma, sgslot;
- struct sgdma_state *sg_state;
- const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
- | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
- | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
-
- spin_lock_irqsave(&sgdma->lock, flags);
-
- queued_sgdma = NUM_SG_DMA - sgdma->free_sgdma;
- sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
- while (queued_sgdma > 0) {
- sg_state = sgdma->sg_state + sgslot;
- while ((sg_state->next_sglist < sg_state->sglen) &&
- !(sg_state->csr & csr_error)) {
- const struct scatterlist *sglist;
- unsigned int len;
-
- sglist = sg_state->sglist + sg_state->next_sglist;
- /* try to start the next DMA transfer */
- if (sg_state->next_sglist + 1 == sg_state->sglen) {
- /*
- * On the last sg, we handle the case where
- * cam->img.pix.sizeimage % PAGE_ALIGN != 0
- */
- len = sg_state->len - sg_state->bytes_read;
- } else {
- len = sg_dma_len(sglist);
- }
-
- if (omap24xxcam_dma_start(&sgdma->dma,
- sg_dma_address(sglist),
- len,
- omap24xxcam_sgdma_callback,
- (void *)sgslot)) {
- /* DMA start failed */
- spin_unlock_irqrestore(&sgdma->lock, flags);
- return;
- }
- /* DMA start was successful */
- sg_state->next_sglist++;
- sg_state->bytes_read += len;
- sg_state->queued_sglist++;
-
- /* We start the reset timer */
- mod_timer(&sgdma->reset_timer, jiffies + HZ);
- }
- queued_sgdma--;
- sgslot = (sgslot + 1) % NUM_SG_DMA;
- }
-
- spin_unlock_irqrestore(&sgdma->lock, flags);
-}
-
-/*
- * Queue a scatter-gather DMA transfer from the camera to memory.
- * Returns zero if the transfer was successfully queued, or non-zero
- * if all of the scatter-gather slots are already in use.
- */
-int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
- const struct scatterlist *sglist, int sglen,
- int len, sgdma_callback_t callback, void *arg)
-{
- unsigned long flags;
- struct sgdma_state *sg_state;
-
- if ((sglen < 0) || ((sglen > 0) && !sglist))
- return -EINVAL;
-
- spin_lock_irqsave(&sgdma->lock, flags);
-
- if (!sgdma->free_sgdma) {
- spin_unlock_irqrestore(&sgdma->lock, flags);
- return -EBUSY;
- }
-
- sg_state = sgdma->sg_state + sgdma->next_sgdma;
-
- sg_state->sglist = sglist;
- sg_state->sglen = sglen;
- sg_state->next_sglist = 0;
- sg_state->bytes_read = 0;
- sg_state->len = len;
- sg_state->queued_sglist = 0;
- sg_state->csr = 0;
- sg_state->callback = callback;
- sg_state->arg = arg;
-
- sgdma->next_sgdma = (sgdma->next_sgdma + 1) % NUM_SG_DMA;
- sgdma->free_sgdma--;
-
- spin_unlock_irqrestore(&sgdma->lock, flags);
-
- omap24xxcam_sgdma_process(sgdma);
-
- return 0;
-}
-
-/* Sync scatter-gather DMA by aborting any DMA transfers currently in progress.
- * Any queued scatter-gather DMA transactions that have not yet been started
- * will remain queued. The DMA controller will be idle after this routine
- * completes. When the scatter-gather queue is restarted, the next
- * scatter-gather DMA transfer will begin at the start of a new transaction.
- */
-void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma)
-{
- unsigned long flags;
- int sgslot;
- struct sgdma_state *sg_state;
- u32 csr = CAMDMA_CSR_TRANS_ERR;
-
- /* stop any DMA transfers in progress */
- omap24xxcam_dma_stop(&sgdma->dma, csr);
-
- spin_lock_irqsave(&sgdma->lock, flags);
-
- if (sgdma->free_sgdma < NUM_SG_DMA) {
- sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
- sg_state = sgdma->sg_state + sgslot;
- if (sg_state->next_sglist != 0) {
- /* This DMA transfer was in progress, so abort it. */
- sgdma_callback_t callback = sg_state->callback;
- void *arg = sg_state->arg;
-
- sgdma->free_sgdma++;
- if (callback) {
- /* leave interrupts masked */
- spin_unlock(&sgdma->lock);
- (*callback) (sgdma, csr, arg);
- spin_lock(&sgdma->lock);
- }
- }
- }
-
- spin_unlock_irqrestore(&sgdma->lock, flags);
-}
-
-void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
- void __iomem *base,
- void (*reset_callback)(unsigned long data),
- unsigned long reset_callback_data)
-{
- int sg;
-
- spin_lock_init(&sgdma->lock);
- sgdma->free_sgdma = NUM_SG_DMA;
- sgdma->next_sgdma = 0;
- for (sg = 0; sg < NUM_SG_DMA; sg++) {
- sgdma->sg_state[sg].sglen = 0;
- sgdma->sg_state[sg].next_sglist = 0;
- sgdma->sg_state[sg].bytes_read = 0;
- sgdma->sg_state[sg].queued_sglist = 0;
- sgdma->sg_state[sg].csr = 0;
- sgdma->sg_state[sg].callback = NULL;
- sgdma->sg_state[sg].arg = NULL;
- }
-
- omap24xxcam_dma_init(&sgdma->dma, base);
- setup_timer(&sgdma->reset_timer, reset_callback, reset_callback_data);
-}
diff --git a/drivers/staging/media/omap24xx/omap24xxcam.c b/drivers/staging/media/omap24xx/omap24xxcam.c
deleted file mode 100644
index d590b3e8b70c..000000000000
--- a/drivers/staging/media/omap24xx/omap24xxcam.c
+++ /dev/null
@@ -1,1882 +0,0 @@
-/*
- * drivers/media/platform/omap24xxcam.c
- *
- * OMAP 2 camera block driver.
- *
- * Copyright (C) 2004 MontaVista Software, Inc.
- * Copyright (C) 2004 Texas Instruments.
- * Copyright (C) 2007-2008 Nokia Corporation.
- *
- * Contact: Sakari Ailus <sakari.ailus@nokia.com>
- *
- * Based on code from Andy Lowe <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/videodev2.h>
-#include <linux/pci.h> /* needed for videobufs */
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-
-#include "omap24xxcam.h"
-
-#define OMAP24XXCAM_VERSION "0.0.1"
-
-#define RESET_TIMEOUT_NS 10000
-
-static void omap24xxcam_reset(struct omap24xxcam_device *cam);
-static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam);
-static void omap24xxcam_device_unregister(struct v4l2_int_device *s);
-static int omap24xxcam_remove(struct platform_device *pdev);
-
-/* module parameters */
-static int video_nr = -1; /* video device minor (-1 ==> auto assign) */
-/*
- * Maximum amount of memory to use for capture buffers.
- * Default is 4800KB, enough to double-buffer SXGA.
- */
-static int capture_mem = 1280 * 960 * 2 * 2;
-
-static struct v4l2_int_device omap24xxcam;
-
-/*
- *
- * Clocks.
- *
- */
-
-static void omap24xxcam_clock_put(struct omap24xxcam_device *cam)
-{
- if (cam->ick != NULL && !IS_ERR(cam->ick))
- clk_put(cam->ick);
- if (cam->fck != NULL && !IS_ERR(cam->fck))
- clk_put(cam->fck);
-
- cam->ick = cam->fck = NULL;
-}
-
-static int omap24xxcam_clock_get(struct omap24xxcam_device *cam)
-{
- int rval = 0;
-
- cam->fck = clk_get(cam->dev, "fck");
- if (IS_ERR(cam->fck)) {
- dev_err(cam->dev, "can't get camera fck");
- rval = PTR_ERR(cam->fck);
- omap24xxcam_clock_put(cam);
- return rval;
- }
-
- cam->ick = clk_get(cam->dev, "ick");
- if (IS_ERR(cam->ick)) {
- dev_err(cam->dev, "can't get camera ick");
- rval = PTR_ERR(cam->ick);
- omap24xxcam_clock_put(cam);
- }
-
- return rval;
-}
-
-static void omap24xxcam_clock_on(struct omap24xxcam_device *cam)
-{
- clk_enable(cam->fck);
- clk_enable(cam->ick);
-}
-
-static void omap24xxcam_clock_off(struct omap24xxcam_device *cam)
-{
- clk_disable(cam->fck);
- clk_disable(cam->ick);
-}
-
-/*
- *
- * Camera core
- *
- */
-
-/*
- * Set xclk.
- *
- * To disable xclk, use value zero.
- */
-static void omap24xxcam_core_xclk_set(const struct omap24xxcam_device *cam,
- u32 xclk)
-{
- if (xclk) {
- u32 divisor = CAM_MCLK / xclk;
-
- if (divisor == 1)
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
- CC_CTRL_XCLK,
- CC_CTRL_XCLK_DIV_BYPASS);
- else
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
- CC_CTRL_XCLK, divisor);
- } else
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
- CC_CTRL_XCLK, CC_CTRL_XCLK_DIV_STABLE_LOW);
-}
-
-static void omap24xxcam_core_hwinit(const struct omap24xxcam_device *cam)
-{
- /*
- * Setting the camera core AUTOIDLE bit causes problems with frame
- * synchronization, so we will clear the AUTOIDLE bit instead.
- */
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_SYSCONFIG,
- CC_SYSCONFIG_AUTOIDLE);
-
- /* program the camera interface DMA packet size */
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_DMA,
- CC_CTRL_DMA_EN | (DMA_THRESHOLD / 4 - 1));
-
- /* enable camera core error interrupts */
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQENABLE,
- CC_IRQENABLE_FW_ERR_IRQ
- | CC_IRQENABLE_FSC_ERR_IRQ
- | CC_IRQENABLE_SSC_ERR_IRQ
- | CC_IRQENABLE_FIFO_OF_IRQ);
-}
-
-/*
- * Enable the camera core.
- *
- * Data transfer to the camera DMA starts from next starting frame.
- */
-static void omap24xxcam_core_enable(const struct omap24xxcam_device *cam)
-{
-
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
- cam->cc_ctrl);
-}
-
-/*
- * Disable camera core.
- *
- * The data transfer will be stopped immediately (CC_CTRL_CC_RST). The
- * core internal state machines will be reset. Use
- * CC_CTRL_CC_FRAME_TRIG instead if you want to transfer the current
- * frame completely.
- */
-static void omap24xxcam_core_disable(const struct omap24xxcam_device *cam)
-{
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
- CC_CTRL_CC_RST);
-}
-
-/* Interrupt service routine for camera core interrupts. */
-static void omap24xxcam_core_isr(struct omap24xxcam_device *cam)
-{
- u32 cc_irqstatus;
- const u32 cc_irqstatus_err =
- CC_IRQSTATUS_FW_ERR_IRQ
- | CC_IRQSTATUS_FSC_ERR_IRQ
- | CC_IRQSTATUS_SSC_ERR_IRQ
- | CC_IRQSTATUS_FIFO_UF_IRQ
- | CC_IRQSTATUS_FIFO_OF_IRQ;
-
- cc_irqstatus = omap24xxcam_reg_in(cam->mmio_base + CC_REG_OFFSET,
- CC_IRQSTATUS);
- omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQSTATUS,
- cc_irqstatus);
-
- if (cc_irqstatus & cc_irqstatus_err
- && !atomic_read(&cam->in_reset)) {
- dev_dbg(cam->dev, "resetting camera, cc_irqstatus 0x%x\n",
- cc_irqstatus);
- omap24xxcam_reset(cam);
- }
-}
-
-/*
- *
- * videobuf_buffer handling.
- *
- * Memory for mmapped videobuf_buffers is not allocated
- * conventionally, but by several kmalloc allocations and then
- * creating the scatterlist on our own. User-space buffers are handled
- * normally.
- *
- */
-
-/*
- * Free the memory-mapped buffer memory allocated for a
- * videobuf_buffer and the associated scatterlist.
- */
-static void omap24xxcam_vbq_free_mmap_buffer(struct videobuf_buffer *vb)
-{
- struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
- size_t alloc_size;
- struct page *page;
- int i;
-
- if (dma->sglist == NULL)
- return;
-
- i = dma->sglen;
- while (i) {
- i--;
- alloc_size = sg_dma_len(&dma->sglist[i]);
- page = sg_page(&dma->sglist[i]);
- do {
- ClearPageReserved(page++);
- } while (alloc_size -= PAGE_SIZE);
- __free_pages(sg_page(&dma->sglist[i]),
- get_order(sg_dma_len(&dma->sglist[i])));
- }
-
- kfree(dma->sglist);
- dma->sglist = NULL;
-}
-
-/* Release all memory related to the videobuf_queue. */
-static void omap24xxcam_vbq_free_mmap_buffers(struct videobuf_queue *vbq)
-{
- int i;
-
- mutex_lock(&vbq->vb_lock);
-
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == vbq->bufs[i])
- continue;
- if (V4L2_MEMORY_MMAP != vbq->bufs[i]->memory)
- continue;
- vbq->ops->buf_release(vbq, vbq->bufs[i]);
- omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
- kfree(vbq->bufs[i]);
- vbq->bufs[i] = NULL;
- }
-
- mutex_unlock(&vbq->vb_lock);
-
- videobuf_mmap_free(vbq);
-}
-
-/*
- * Allocate physically as contiguous as possible buffer for video
- * frame and allocate and build DMA scatter-gather list for it.
- */
-static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb)
-{
- unsigned int order;
- size_t alloc_size, size = vb->bsize; /* vb->bsize is page aligned */
- struct page *page;
- int max_pages, err = 0, i = 0;
- struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
-
- /*
- * allocate maximum size scatter-gather list. Note this is
- * overhead. We may not use as many entries as we allocate
- */
- max_pages = vb->bsize >> PAGE_SHIFT;
- dma->sglist = kcalloc(max_pages, sizeof(*dma->sglist), GFP_KERNEL);
- if (dma->sglist == NULL) {
- err = -ENOMEM;
- goto out;
- }
-
- while (size) {
- order = get_order(size);
- /*
- * do not over-allocate even if we would get larger
- * contiguous chunk that way
- */
- if ((PAGE_SIZE << order) > size)
- order--;
-
- /* try to allocate as many contiguous pages as possible */
- page = alloc_pages(GFP_KERNEL, order);
- /* if allocation fails, try to allocate smaller amount */
- while (page == NULL) {
- order--;
- page = alloc_pages(GFP_KERNEL, order);
- if (page == NULL && !order) {
- err = -ENOMEM;
- goto out;
- }
- }
- size -= (PAGE_SIZE << order);
-
- /* append allocated chunk of pages into scatter-gather list */
- sg_set_page(&dma->sglist[i], page, PAGE_SIZE << order, 0);
- dma->sglen++;
- i++;
-
- alloc_size = (PAGE_SIZE << order);
-
- /* clear pages before giving them to user space */
- memset(page_address(page), 0, alloc_size);
-
- /* mark allocated pages reserved */
- do {
- SetPageReserved(page++);
- } while (alloc_size -= PAGE_SIZE);
- }
- /*
- * REVISIT: not fully correct to assign nr_pages == sglen but
- * video-buf is passing nr_pages for e.g. unmap_sg calls
- */
- dma->nr_pages = dma->sglen;
- dma->direction = PCI_DMA_FROMDEVICE;
-
- return 0;
-
-out:
- omap24xxcam_vbq_free_mmap_buffer(vb);
- return err;
-}
-
-static int omap24xxcam_vbq_alloc_mmap_buffers(struct videobuf_queue *vbq,
- unsigned int count)
-{
- int i, err = 0;
- struct omap24xxcam_fh *fh =
- container_of(vbq, struct omap24xxcam_fh, vbq);
-
- mutex_lock(&vbq->vb_lock);
-
- for (i = 0; i < count; i++) {
- err = omap24xxcam_vbq_alloc_mmap_buffer(vbq->bufs[i]);
- if (err)
- goto out;
- dev_dbg(fh->cam->dev, "sglen is %d for buffer %d\n",
- videobuf_to_dma(vbq->bufs[i])->sglen, i);
- }
-
- mutex_unlock(&vbq->vb_lock);
-
- return 0;
-out:
- while (i) {
- i--;
- omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
- }
-
- mutex_unlock(&vbq->vb_lock);
-
- return err;
-}
-
-/*
- * This routine is called from interrupt context when a scatter-gather DMA
- * transfer of a videobuf_buffer completes.
- */
-static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
- u32 csr, void *arg)
-{
- struct omap24xxcam_device *cam =
- container_of(sgdma, struct omap24xxcam_device, sgdma);
- struct omap24xxcam_fh *fh = cam->streaming->private_data;
- struct videobuf_buffer *vb = (struct videobuf_buffer *)arg;
- const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
- | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
- | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
- unsigned long flags;
-
- spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
- if (--cam->sgdma_in_queue == 0)
- omap24xxcam_core_disable(cam);
- spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
-
- v4l2_get_timestamp(&vb->ts);
- vb->field_count = atomic_add_return(2, &fh->field_count);
- if (csr & csr_error) {
- vb->state = VIDEOBUF_ERROR;
- if (!atomic_read(&fh->cam->in_reset)) {
- dev_dbg(cam->dev, "resetting camera, csr 0x%x\n", csr);
- omap24xxcam_reset(cam);
- }
- } else
- vb->state = VIDEOBUF_DONE;
- wake_up(&vb->done);
-}
-
-static void omap24xxcam_vbq_release(struct videobuf_queue *vbq,
- struct videobuf_buffer *vb)
-{
- struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
-
- /* wait for buffer, especially to get out of the sgdma queue */
- videobuf_waiton(vbq, vb, 0, 0);
- if (vb->memory == V4L2_MEMORY_MMAP) {
- dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen,
- dma->direction);
- dma->direction = DMA_NONE;
- } else {
- videobuf_dma_unmap(vbq->dev, videobuf_to_dma(vb));
- videobuf_dma_free(videobuf_to_dma(vb));
- }
-
- vb->state = VIDEOBUF_NEEDS_INIT;
-}
-
-/*
- * Limit the number of available kernel image capture buffers based on the
- * number requested, the currently selected image size, and the maximum
- * amount of memory permitted for kernel capture buffers.
- */
-static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt,
- unsigned int *size)
-{
- struct omap24xxcam_fh *fh = vbq->priv_data;
-
- if (*cnt <= 0)
- *cnt = VIDEO_MAX_FRAME; /* supply a default number of buffers */
-
- if (*cnt > VIDEO_MAX_FRAME)
- *cnt = VIDEO_MAX_FRAME;
-
- *size = fh->pix.sizeimage;
-
- /* accessing fh->cam->capture_mem is ok, it's constant */
- if (*size * *cnt > fh->cam->capture_mem)
- *cnt = fh->cam->capture_mem / *size;
-
- return 0;
-}
-
-static int omap24xxcam_dma_iolock(struct videobuf_queue *vbq,
- struct videobuf_dmabuf *dma)
-{
- int err = 0;
-
- dma->direction = PCI_DMA_FROMDEVICE;
- if (!dma_map_sg(vbq->dev, dma->sglist, dma->sglen, dma->direction)) {
- kfree(dma->sglist);
- dma->sglist = NULL;
- dma->sglen = 0;
- err = -EIO;
- }
-
- return err;
-}
-
-static int omap24xxcam_vbq_prepare(struct videobuf_queue *vbq,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
-{
- struct omap24xxcam_fh *fh = vbq->priv_data;
- int err = 0;
-
- /*
- * Accessing pix here is okay since it's constant while
- * streaming is on (and we only get called then).
- */
- if (vb->baddr) {
- /* This is a userspace buffer. */
- if (fh->pix.sizeimage > vb->bsize) {
- /* The buffer isn't big enough. */
- err = -EINVAL;
- } else
- vb->size = fh->pix.sizeimage;
- } else {
- if (vb->state != VIDEOBUF_NEEDS_INIT) {
- /*
- * We have a kernel bounce buffer that has
- * already been allocated.
- */
- if (fh->pix.sizeimage > vb->size) {
- /*
- * The image size has been changed to
- * a larger size since this buffer was
- * allocated, so we need to free and
- * reallocate it.
- */
- omap24xxcam_vbq_release(vbq, vb);
- vb->size = fh->pix.sizeimage;
- }
- } else {
- /* We need to allocate a new kernel bounce buffer. */
- vb->size = fh->pix.sizeimage;
- }
- }
-
- if (err)
- return err;
-
- vb->width = fh->pix.width;
- vb->height = fh->pix.height;
- vb->field = field;
-
- if (vb->state == VIDEOBUF_NEEDS_INIT) {
- if (vb->memory == V4L2_MEMORY_MMAP)
- /*
- * we have built the scatter-gather list by ourself so
- * do the scatter-gather mapping as well
- */
- err = omap24xxcam_dma_iolock(vbq, videobuf_to_dma(vb));
- else
- err = videobuf_iolock(vbq, vb, NULL);
- }
-
- if (!err)
- vb->state = VIDEOBUF_PREPARED;
- else
- omap24xxcam_vbq_release(vbq, vb);
-
- return err;
-}
-
-static void omap24xxcam_vbq_queue(struct videobuf_queue *vbq,
- struct videobuf_buffer *vb)
-{
- struct omap24xxcam_fh *fh = vbq->priv_data;
- struct omap24xxcam_device *cam = fh->cam;
- enum videobuf_state state = vb->state;
- unsigned long flags;
- int err;
-
- /*
- * FIXME: We're marking the buffer active since we have no
- * pretty way of marking it active exactly when the
- * scatter-gather transfer starts.
- */
- vb->state = VIDEOBUF_ACTIVE;
-
- err = omap24xxcam_sgdma_queue(&fh->cam->sgdma,
- videobuf_to_dma(vb)->sglist,
- videobuf_to_dma(vb)->sglen, vb->size,
- omap24xxcam_vbq_complete, vb);
-
- if (!err) {
- spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
- if (++cam->sgdma_in_queue == 1
- && !atomic_read(&cam->in_reset))
- omap24xxcam_core_enable(cam);
- spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
- } else {
- /*
- * Oops. We're not supposed to get any errors here.
- * The only way we could get an error is if we ran out
- * of scatter-gather DMA slots, but we are supposed to
- * have at least as many scatter-gather DMA slots as
- * video buffers so that can't happen.
- */
- dev_err(cam->dev, "failed to queue a video buffer for dma!\n");
- dev_err(cam->dev, "likely a bug in the driver!\n");
- vb->state = state;
- }
-}
-
-static struct videobuf_queue_ops omap24xxcam_vbq_ops = {
- .buf_setup = omap24xxcam_vbq_setup,
- .buf_prepare = omap24xxcam_vbq_prepare,
- .buf_queue = omap24xxcam_vbq_queue,
- .buf_release = omap24xxcam_vbq_release,
-};
-
-/*
- *
- * OMAP main camera system
- *
- */
-
-/*
- * Reset camera block to power-on state.
- */
-static void omap24xxcam_poweron_reset(struct omap24xxcam_device *cam)
-{
- int max_loop = RESET_TIMEOUT_NS;
-
- /* Reset whole camera subsystem */
- omap24xxcam_reg_out(cam->mmio_base,
- CAM_SYSCONFIG,
- CAM_SYSCONFIG_SOFTRESET);
-
- /* Wait till it's finished */
- while (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
- & CAM_SYSSTATUS_RESETDONE)
- && --max_loop) {
- ndelay(1);
- }
-
- if (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
- & CAM_SYSSTATUS_RESETDONE))
- dev_err(cam->dev, "camera soft reset timeout\n");
-}
-
-/*
- * (Re)initialise the camera block.
- */
-static void omap24xxcam_hwinit(struct omap24xxcam_device *cam)
-{
- omap24xxcam_poweron_reset(cam);
-
- /* set the camera subsystem autoidle bit */
- omap24xxcam_reg_out(cam->mmio_base, CAM_SYSCONFIG,
- CAM_SYSCONFIG_AUTOIDLE);
-
- /* set the camera MMU autoidle bit */
- omap24xxcam_reg_out(cam->mmio_base,
- CAMMMU_REG_OFFSET + CAMMMU_SYSCONFIG,
- CAMMMU_SYSCONFIG_AUTOIDLE);
-
- omap24xxcam_core_hwinit(cam);
-
- omap24xxcam_dma_hwinit(&cam->sgdma.dma);
-}
-
-/*
- * Callback for dma transfer stalling.
- */
-static void omap24xxcam_stalled_dma_reset(unsigned long data)
-{
- struct omap24xxcam_device *cam = (struct omap24xxcam_device *)data;
-
- if (!atomic_read(&cam->in_reset)) {
- dev_dbg(cam->dev, "dma stalled, resetting camera\n");
- omap24xxcam_reset(cam);
- }
-}
-
-/*
- * Stop capture. Mark we're doing a reset, stop DMA transfers and
- * core. (No new scatter-gather transfers will be queued whilst
- * in_reset is non-zero.)
- *
- * If omap24xxcam_capture_stop is called from several places at
- * once, only the first call will have an effect. Similarly, the last
- * call omap24xxcam_streaming_cont will have effect.
- *
- * Serialisation is ensured by using cam->core_enable_disable_lock.
- */
-static void omap24xxcam_capture_stop(struct omap24xxcam_device *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
-
- if (atomic_inc_return(&cam->in_reset) != 1) {
- spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
- return;
- }
-
- omap24xxcam_core_disable(cam);
-
- spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
-
- omap24xxcam_sgdma_sync(&cam->sgdma);
-}
-
-/*
- * Reset and continue streaming.
- *
- * Note: Resetting the camera FIFO via the CC_RST bit in the CC_CTRL
- * register is supposed to be sufficient to recover from a camera
- * interface error, but it doesn't seem to be enough. If we only do
- * that then subsequent image captures are out of sync by either one
- * or two times DMA_THRESHOLD bytes. Resetting and re-initializing the
- * entire camera subsystem prevents the problem with frame
- * synchronization.
- */
-static void omap24xxcam_capture_cont(struct omap24xxcam_device *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
-
- if (atomic_read(&cam->in_reset) != 1)
- goto out;
-
- omap24xxcam_hwinit(cam);
-
- omap24xxcam_sensor_if_enable(cam);
-
- omap24xxcam_sgdma_process(&cam->sgdma);
-
- if (cam->sgdma_in_queue)
- omap24xxcam_core_enable(cam);
-
-out:
- atomic_dec(&cam->in_reset);
- spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
-}
-
-static ssize_t
-omap24xxcam_streaming_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct omap24xxcam_device *cam = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", cam->streaming ? "active" : "inactive");
-}
-static DEVICE_ATTR(streaming, S_IRUGO, omap24xxcam_streaming_show, NULL);
-
-/*
- * Stop capture and restart it. I.e. reset the camera during use.
- */
-static void omap24xxcam_reset(struct omap24xxcam_device *cam)
-{
- omap24xxcam_capture_stop(cam);
- omap24xxcam_capture_cont(cam);
-}
-
-/*
- * The main interrupt handler.
- */
-static irqreturn_t omap24xxcam_isr(int irq, void *arg)
-{
- struct omap24xxcam_device *cam = (struct omap24xxcam_device *)arg;
- u32 irqstatus;
- unsigned int irqhandled = 0;
-
- irqstatus = omap24xxcam_reg_in(cam->mmio_base, CAM_IRQSTATUS);
-
- if (irqstatus &
- (CAM_IRQSTATUS_DMA_IRQ2 | CAM_IRQSTATUS_DMA_IRQ1
- | CAM_IRQSTATUS_DMA_IRQ0)) {
- omap24xxcam_dma_isr(&cam->sgdma.dma);
- irqhandled = 1;
- }
- if (irqstatus & CAM_IRQSTATUS_CC_IRQ) {
- omap24xxcam_core_isr(cam);
- irqhandled = 1;
- }
- if (irqstatus & CAM_IRQSTATUS_MMU_IRQ)
- dev_err(cam->dev, "unhandled camera MMU interrupt!\n");
-
- return IRQ_RETVAL(irqhandled);
-}
-
-/*
- *
- * Sensor handling.
- *
- */
-
-/*
- * Enable the external sensor interface. Try to negotiate interface
- * parameters with the sensor and start using the new ones. The calls
- * to sensor_if_enable and sensor_if_disable need not to be balanced.
- */
-static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam)
-{
- int rval;
- struct v4l2_ifparm p;
-
- rval = vidioc_int_g_ifparm(cam->sdev, &p);
- if (rval) {
- dev_err(cam->dev, "vidioc_int_g_ifparm failed with %d\n", rval);
- return rval;
- }
-
- cam->if_type = p.if_type;
-
- cam->cc_ctrl = CC_CTRL_CC_EN;
-
- switch (p.if_type) {
- case V4L2_IF_TYPE_BT656:
- if (p.u.bt656.frame_start_on_rising_vs)
- cam->cc_ctrl |= CC_CTRL_NOBT_SYNCHRO;
- if (p.u.bt656.bt_sync_correct)
- cam->cc_ctrl |= CC_CTRL_BT_CORRECT;
- if (p.u.bt656.swap)
- cam->cc_ctrl |= CC_CTRL_PAR_ORDERCAM;
- if (p.u.bt656.latch_clk_inv)
- cam->cc_ctrl |= CC_CTRL_PAR_CLK_POL;
- if (p.u.bt656.nobt_hs_inv)
- cam->cc_ctrl |= CC_CTRL_NOBT_HS_POL;
- if (p.u.bt656.nobt_vs_inv)
- cam->cc_ctrl |= CC_CTRL_NOBT_VS_POL;
-
- switch (p.u.bt656.mode) {
- case V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT:
- cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT8;
- break;
- case V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT:
- cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT10;
- break;
- case V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT:
- cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT12;
- break;
- case V4L2_IF_TYPE_BT656_MODE_BT_8BIT:
- cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT8;
- break;
- case V4L2_IF_TYPE_BT656_MODE_BT_10BIT:
- cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT10;
- break;
- default:
- dev_err(cam->dev,
- "bt656 interface mode %d not supported\n",
- p.u.bt656.mode);
- return -EINVAL;
- }
- /*
- * The clock rate that the sensor wants has changed.
- * We have to adjust the xclk from OMAP 2 side to
- * match the sensor's wish as closely as possible.
- */
- if (p.u.bt656.clock_curr != cam->if_u.bt656.xclk) {
- u32 xclk = p.u.bt656.clock_curr;
- u32 divisor;
-
- if (xclk == 0)
- return -EINVAL;
-
- if (xclk > CAM_MCLK)
- xclk = CAM_MCLK;
-
- divisor = CAM_MCLK / xclk;
- if (divisor * xclk < CAM_MCLK)
- divisor++;
- if (CAM_MCLK / divisor < p.u.bt656.clock_min
- && divisor > 1)
- divisor--;
- if (divisor > 30)
- divisor = 30;
-
- xclk = CAM_MCLK / divisor;
-
- if (xclk < p.u.bt656.clock_min
- || xclk > p.u.bt656.clock_max)
- return -EINVAL;
-
- cam->if_u.bt656.xclk = xclk;
- }
- omap24xxcam_core_xclk_set(cam, cam->if_u.bt656.xclk);
- break;
- default:
- /* FIXME: how about other interfaces? */
- dev_err(cam->dev, "interface type %d not supported\n",
- p.if_type);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void omap24xxcam_sensor_if_disable(const struct omap24xxcam_device *cam)
-{
- switch (cam->if_type) {
- case V4L2_IF_TYPE_BT656:
- omap24xxcam_core_xclk_set(cam, 0);
- break;
- }
-}
-
-/*
- * Initialise the sensor hardware.
- */
-static int omap24xxcam_sensor_init(struct omap24xxcam_device *cam)
-{
- int err = 0;
- struct v4l2_int_device *sdev = cam->sdev;
-
- omap24xxcam_clock_on(cam);
- err = omap24xxcam_sensor_if_enable(cam);
- if (err) {
- dev_err(cam->dev, "sensor interface could not be enabled at "
- "initialisation, %d\n", err);
- cam->sdev = NULL;
- goto out;
- }
-
- /* power up sensor during sensor initialization */
- vidioc_int_s_power(sdev, 1);
-
- err = vidioc_int_dev_init(sdev);
- if (err) {
- dev_err(cam->dev, "cannot initialize sensor, error %d\n", err);
- /* Sensor init failed --- it's nonexistent to us! */
- cam->sdev = NULL;
- goto out;
- }
-
- dev_info(cam->dev, "sensor is %s\n", sdev->name);
-
-out:
- omap24xxcam_sensor_if_disable(cam);
- omap24xxcam_clock_off(cam);
-
- vidioc_int_s_power(sdev, 0);
-
- return err;
-}
-
-static void omap24xxcam_sensor_exit(struct omap24xxcam_device *cam)
-{
- if (cam->sdev)
- vidioc_int_dev_exit(cam->sdev);
-}
-
-static void omap24xxcam_sensor_disable(struct omap24xxcam_device *cam)
-{
- omap24xxcam_sensor_if_disable(cam);
- omap24xxcam_clock_off(cam);
- vidioc_int_s_power(cam->sdev, 0);
-}
-
-/*
- * Power-up and configure camera sensor. It's ready for capturing now.
- */
-static int omap24xxcam_sensor_enable(struct omap24xxcam_device *cam)
-{
- int rval;
-
- omap24xxcam_clock_on(cam);
-
- omap24xxcam_sensor_if_enable(cam);
-
- rval = vidioc_int_s_power(cam->sdev, 1);
- if (rval)
- goto out;
-
- rval = vidioc_int_init(cam->sdev);
- if (rval)
- goto out;
-
- return 0;
-
-out:
- omap24xxcam_sensor_disable(cam);
-
- return rval;
-}
-
-static void omap24xxcam_sensor_reset_work(struct work_struct *work)
-{
- struct omap24xxcam_device *cam =
- container_of(work, struct omap24xxcam_device,
- sensor_reset_work);
-
- if (atomic_read(&cam->reset_disable))
- return;
-
- omap24xxcam_capture_stop(cam);
-
- if (vidioc_int_reset(cam->sdev) == 0) {
- vidioc_int_init(cam->sdev);
- } else {
- /* Can't reset it by vidioc_int_reset. */
- omap24xxcam_sensor_disable(cam);
- omap24xxcam_sensor_enable(cam);
- }
-
- omap24xxcam_capture_cont(cam);
-}
-
-/*
- *
- * IOCTL interface.
- *
- */
-
-static int vidioc_querycap(struct file *file, void *fh,
- struct v4l2_capability *cap)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
-
- strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver));
- strlcpy(cap->card, cam->vfd->name, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
-
- return 0;
-}
-
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_fmtdesc *f)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
-
- return vidioc_int_enum_fmt_cap(cam->sdev, f);
-}
-
-static int vidioc_g_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- int rval;
-
- mutex_lock(&cam->mutex);
- rval = vidioc_int_g_fmt_cap(cam->sdev, f);
- mutex_unlock(&cam->mutex);
-
- return rval;
-}
-
-static int vidioc_s_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- int rval;
-
- mutex_lock(&cam->mutex);
- if (cam->streaming) {
- rval = -EBUSY;
- goto out;
- }
-
- rval = vidioc_int_s_fmt_cap(cam->sdev, f);
-
-out:
- mutex_unlock(&cam->mutex);
-
- if (!rval) {
- mutex_lock(&ofh->vbq.vb_lock);
- ofh->pix = f->fmt.pix;
- mutex_unlock(&ofh->vbq.vb_lock);
- }
-
- memset(f, 0, sizeof(*f));
- vidioc_g_fmt_vid_cap(file, fh, f);
-
- return rval;
-}
-
-static int vidioc_try_fmt_vid_cap(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- int rval;
-
- mutex_lock(&cam->mutex);
- rval = vidioc_int_try_fmt_cap(cam->sdev, f);
- mutex_unlock(&cam->mutex);
-
- return rval;
-}
-
-static int vidioc_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *b)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- int rval;
-
- mutex_lock(&cam->mutex);
- if (cam->streaming) {
- mutex_unlock(&cam->mutex);
- return -EBUSY;
- }
-
- omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
- mutex_unlock(&cam->mutex);
-
- rval = videobuf_reqbufs(&ofh->vbq, b);
-
- /*
- * Either videobuf_reqbufs failed or the buffers are not
- * memory-mapped (which would need special attention).
- */
- if (rval < 0 || b->memory != V4L2_MEMORY_MMAP)
- goto out;
-
- rval = omap24xxcam_vbq_alloc_mmap_buffers(&ofh->vbq, rval);
- if (rval)
- omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
-
-out:
- return rval;
-}
-
-static int vidioc_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *b)
-{
- struct omap24xxcam_fh *ofh = fh;
-
- return videobuf_querybuf(&ofh->vbq, b);
-}
-
-static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
-{
- struct omap24xxcam_fh *ofh = fh;
-
- return videobuf_qbuf(&ofh->vbq, b);
-}
-
-static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- struct videobuf_buffer *vb;
- int rval;
-
-videobuf_dqbuf_again:
- rval = videobuf_dqbuf(&ofh->vbq, b, file->f_flags & O_NONBLOCK);
- if (rval)
- goto out;
-
- vb = ofh->vbq.bufs[b->index];
-
- mutex_lock(&cam->mutex);
- /* _needs_reset returns -EIO if reset is required. */
- rval = vidioc_int_g_needs_reset(cam->sdev, (void *)vb->baddr);
- mutex_unlock(&cam->mutex);
- if (rval == -EIO)
- schedule_work(&cam->sensor_reset_work);
- else
- rval = 0;
-
-out:
- /*
- * This is a hack. We don't want to show -EIO to the user
- * space. Requeue the buffer and try again if we're not doing
- * this in non-blocking mode.
- */
- if (rval == -EIO) {
- videobuf_qbuf(&ofh->vbq, b);
- if (!(file->f_flags & O_NONBLOCK))
- goto videobuf_dqbuf_again;
- /*
- * We don't have a videobuf_buffer now --- maybe next
- * time...
- */
- rval = -EAGAIN;
- }
-
- return rval;
-}
-
-static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- int rval;
-
- mutex_lock(&cam->mutex);
- if (cam->streaming) {
- rval = -EBUSY;
- goto out;
- }
-
- rval = omap24xxcam_sensor_if_enable(cam);
- if (rval) {
- dev_dbg(cam->dev, "vidioc_int_g_ifparm failed\n");
- goto out;
- }
-
- rval = videobuf_streamon(&ofh->vbq);
- if (!rval) {
- cam->streaming = file;
- sysfs_notify(&cam->dev->kobj, NULL, "streaming");
- }
-
-out:
- mutex_unlock(&cam->mutex);
-
- return rval;
-}
-
-static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- struct videobuf_queue *q = &ofh->vbq;
- int rval;
-
- atomic_inc(&cam->reset_disable);
-
- flush_work(&cam->sensor_reset_work);
-
- rval = videobuf_streamoff(q);
- if (!rval) {
- mutex_lock(&cam->mutex);
- cam->streaming = NULL;
- mutex_unlock(&cam->mutex);
- sysfs_notify(&cam->dev->kobj, NULL, "streaming");
- }
-
- atomic_dec(&cam->reset_disable);
-
- return rval;
-}
-
-static int vidioc_enum_input(struct file *file, void *fh,
- struct v4l2_input *inp)
-{
- if (inp->index > 0)
- return -EINVAL;
-
- strlcpy(inp->name, "camera", sizeof(inp->name));
- inp->type = V4L2_INPUT_TYPE_CAMERA;
-
- return 0;
-}
-
-static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
-{
- *i = 0;
-
- return 0;
-}
-
-static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
-{
- if (i > 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *fh,
- struct v4l2_queryctrl *a)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
-
- return vidioc_int_queryctrl(cam->sdev, a);
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh,
- struct v4l2_control *a)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- int rval;
-
- mutex_lock(&cam->mutex);
- rval = vidioc_int_g_ctrl(cam->sdev, a);
- mutex_unlock(&cam->mutex);
-
- return rval;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh,
- struct v4l2_control *a)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- int rval;
-
- mutex_lock(&cam->mutex);
- rval = vidioc_int_s_ctrl(cam->sdev, a);
- mutex_unlock(&cam->mutex);
-
- return rval;
-}
-
-static int vidioc_g_parm(struct file *file, void *fh,
- struct v4l2_streamparm *a) {
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- int rval;
-
- mutex_lock(&cam->mutex);
- rval = vidioc_int_g_parm(cam->sdev, a);
- mutex_unlock(&cam->mutex);
-
- return rval;
-}
-
-static int vidioc_s_parm(struct file *file, void *fh,
- struct v4l2_streamparm *a)
-{
- struct omap24xxcam_fh *ofh = fh;
- struct omap24xxcam_device *cam = ofh->cam;
- struct v4l2_streamparm old_streamparm;
- int rval;
-
- mutex_lock(&cam->mutex);
- if (cam->streaming) {
- rval = -EBUSY;
- goto out;
- }
-
- old_streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- rval = vidioc_int_g_parm(cam->sdev, &old_streamparm);
- if (rval)
- goto out;
-
- rval = vidioc_int_s_parm(cam->sdev, a);
- if (rval)
- goto out;
-
- rval = omap24xxcam_sensor_if_enable(cam);
- /*
- * Revert to old streaming parameters if enabling sensor
- * interface with the new ones failed.
- */
- if (rval)
- vidioc_int_s_parm(cam->sdev, &old_streamparm);
-
-out:
- mutex_unlock(&cam->mutex);
-
- return rval;
-}
-
-/*
- *
- * File operations.
- *
- */
-
-static unsigned int omap24xxcam_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct omap24xxcam_fh *fh = file->private_data;
- struct omap24xxcam_device *cam = fh->cam;
- struct videobuf_buffer *vb;
-
- mutex_lock(&cam->mutex);
- if (cam->streaming != file) {
- mutex_unlock(&cam->mutex);
- return POLLERR;
- }
- mutex_unlock(&cam->mutex);
-
- mutex_lock(&fh->vbq.vb_lock);
- if (list_empty(&fh->vbq.stream)) {
- mutex_unlock(&fh->vbq.vb_lock);
- return POLLERR;
- }
- vb = list_entry(fh->vbq.stream.next, struct videobuf_buffer, stream);
- mutex_unlock(&fh->vbq.vb_lock);
-
- poll_wait(file, &vb->done, wait);
-
- if (vb->state == VIDEOBUF_DONE || vb->state == VIDEOBUF_ERROR)
- return POLLIN | POLLRDNORM;
-
- return 0;
-}
-
-static int omap24xxcam_mmap_buffers(struct file *file,
- struct vm_area_struct *vma)
-{
- struct omap24xxcam_fh *fh = file->private_data;
- struct omap24xxcam_device *cam = fh->cam;
- struct videobuf_queue *vbq = &fh->vbq;
- unsigned int first, last, size, i, j;
- int err = 0;
-
- mutex_lock(&cam->mutex);
- if (cam->streaming) {
- mutex_unlock(&cam->mutex);
- return -EBUSY;
- }
- mutex_unlock(&cam->mutex);
- mutex_lock(&vbq->vb_lock);
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (NULL == vbq->bufs[first])
- continue;
- if (V4L2_MEMORY_MMAP != vbq->bufs[first]->memory)
- continue;
- if (vbq->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
- break;
- }
-
- /* look for last buffer to map */
- for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
- if (NULL == vbq->bufs[last])
- continue;
- if (V4L2_MEMORY_MMAP != vbq->bufs[last]->memory)
- continue;
- size += vbq->bufs[last]->bsize;
- if (size == (vma->vm_end - vma->vm_start))
- break;
- }
-
- size = 0;
- for (i = first; i <= last && i < VIDEO_MAX_FRAME; i++) {
- struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]);
-
- for (j = 0; j < dma->sglen; j++) {
- err = remap_pfn_range(
- vma, vma->vm_start + size,
- page_to_pfn(sg_page(&dma->sglist[j])),
- sg_dma_len(&dma->sglist[j]), vma->vm_page_prot);
- if (err)
- goto out;
- size += sg_dma_len(&dma->sglist[j]);
- }
- }
-
-out:
- mutex_unlock(&vbq->vb_lock);
-
- return err;
-}
-
-static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct omap24xxcam_fh *fh = file->private_data;
- int rval;
-
- /* let the video-buf mapper check arguments and set-up structures */
- rval = videobuf_mmap_mapper(&fh->vbq, vma);
- if (rval)
- return rval;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- /* do mapping to our allocated buffers */
- rval = omap24xxcam_mmap_buffers(file, vma);
- /*
- * In case of error, free vma->vm_private_data allocated by
- * videobuf_mmap_mapper.
- */
- if (rval)
- kfree(vma->vm_private_data);
-
- return rval;
-}
-
-static int omap24xxcam_open(struct file *file)
-{
- struct omap24xxcam_device *cam = omap24xxcam.priv;
- struct omap24xxcam_fh *fh;
- struct v4l2_format format;
-
- if (!cam || !cam->vfd)
- return -ENODEV;
-
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (fh == NULL)
- return -ENOMEM;
-
- mutex_lock(&cam->mutex);
- if (cam->sdev == NULL || !try_module_get(cam->sdev->module)) {
- mutex_unlock(&cam->mutex);
- goto out_try_module_get;
- }
-
- if (atomic_inc_return(&cam->users) == 1) {
- omap24xxcam_hwinit(cam);
- if (omap24xxcam_sensor_enable(cam)) {
- mutex_unlock(&cam->mutex);
- goto out_omap24xxcam_sensor_enable;
- }
- }
- mutex_unlock(&cam->mutex);
-
- fh->cam = cam;
- mutex_lock(&cam->mutex);
- vidioc_int_g_fmt_cap(cam->sdev, &format);
- mutex_unlock(&cam->mutex);
- /* FIXME: how about fh->pix when there are more users? */
- fh->pix = format.fmt.pix;
-
- file->private_data = fh;
-
- spin_lock_init(&fh->vbq_lock);
-
- videobuf_queue_sg_init(&fh->vbq, &omap24xxcam_vbq_ops, NULL,
- &fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), fh, NULL);
-
- return 0;
-
-out_omap24xxcam_sensor_enable:
- omap24xxcam_poweron_reset(cam);
- module_put(cam->sdev->module);
-
-out_try_module_get:
- kfree(fh);
-
- return -ENODEV;
-}
-
-static int omap24xxcam_release(struct file *file)
-{
- struct omap24xxcam_fh *fh = file->private_data;
- struct omap24xxcam_device *cam = fh->cam;
-
- atomic_inc(&cam->reset_disable);
-
- flush_work(&cam->sensor_reset_work);
-
- /* stop streaming capture */
- videobuf_streamoff(&fh->vbq);
-
- mutex_lock(&cam->mutex);
- if (cam->streaming == file) {
- cam->streaming = NULL;
- mutex_unlock(&cam->mutex);
- sysfs_notify(&cam->dev->kobj, NULL, "streaming");
- } else {
- mutex_unlock(&cam->mutex);
- }
-
- atomic_dec(&cam->reset_disable);
-
- omap24xxcam_vbq_free_mmap_buffers(&fh->vbq);
-
- /*
- * Make sure the reset work we might have scheduled is not
- * pending! It may be run *only* if we have users. (And it may
- * not be scheduled anymore since streaming is already
- * disabled.)
- */
- flush_work(&cam->sensor_reset_work);
-
- mutex_lock(&cam->mutex);
- if (atomic_dec_return(&cam->users) == 0) {
- omap24xxcam_sensor_disable(cam);
- omap24xxcam_poweron_reset(cam);
- }
- mutex_unlock(&cam->mutex);
-
- file->private_data = NULL;
-
- module_put(cam->sdev->module);
- kfree(fh);
-
- return 0;
-}
-
-static struct v4l2_file_operations omap24xxcam_fops = {
- .ioctl = video_ioctl2,
- .poll = omap24xxcam_poll,
- .mmap = omap24xxcam_mmap,
- .open = omap24xxcam_open,
- .release = omap24xxcam_release,
-};
-
-/*
- *
- * Power management.
- *
- */
-
-#ifdef CONFIG_PM
-static int omap24xxcam_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
-
- if (atomic_read(&cam->users) == 0)
- return 0;
-
- if (!atomic_read(&cam->reset_disable))
- omap24xxcam_capture_stop(cam);
-
- omap24xxcam_sensor_disable(cam);
- omap24xxcam_poweron_reset(cam);
-
- return 0;
-}
-
-static int omap24xxcam_resume(struct platform_device *pdev)
-{
- struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
-
- if (atomic_read(&cam->users) == 0)
- return 0;
-
- omap24xxcam_hwinit(cam);
- omap24xxcam_sensor_enable(cam);
-
- if (!atomic_read(&cam->reset_disable))
- omap24xxcam_capture_cont(cam);
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
-static const struct v4l2_ioctl_ops omap24xxcam_ioctl_fops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_parm = vidioc_g_parm,
- .vidioc_s_parm = vidioc_s_parm,
-};
-
-/*
- *
- * Camera device (i.e. /dev/video).
- *
- */
-
-static int omap24xxcam_device_register(struct v4l2_int_device *s)
-{
- struct omap24xxcam_device *cam = s->u.slave->master->priv;
- struct video_device *vfd;
- int rval;
-
- /* We already have a slave. */
- if (cam->sdev)
- return -EBUSY;
-
- cam->sdev = s;
-
- if (device_create_file(cam->dev, &dev_attr_streaming) != 0) {
- dev_err(cam->dev, "could not register sysfs entry\n");
- rval = -EBUSY;
- goto err;
- }
-
- /* initialize the video_device struct */
- vfd = cam->vfd = video_device_alloc();
- if (!vfd) {
- dev_err(cam->dev, "could not allocate video device struct\n");
- rval = -ENOMEM;
- goto err;
- }
- vfd->release = video_device_release;
-
- vfd->v4l2_dev = &cam->v4l2_dev;
-
- strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name));
- vfd->fops = &omap24xxcam_fops;
- vfd->ioctl_ops = &omap24xxcam_ioctl_fops;
-
- omap24xxcam_hwinit(cam);
-
- rval = omap24xxcam_sensor_init(cam);
- if (rval)
- goto err;
-
- if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) {
- dev_err(cam->dev, "could not register V4L device\n");
- rval = -EBUSY;
- goto err;
- }
-
- omap24xxcam_poweron_reset(cam);
-
- dev_info(cam->dev, "registered device %s\n",
- video_device_node_name(vfd));
-
- return 0;
-
-err:
- omap24xxcam_device_unregister(s);
-
- return rval;
-}
-
-static void omap24xxcam_device_unregister(struct v4l2_int_device *s)
-{
- struct omap24xxcam_device *cam = s->u.slave->master->priv;
-
- omap24xxcam_sensor_exit(cam);
-
- if (cam->vfd) {
- if (!video_is_registered(cam->vfd)) {
- /*
- * The device was never registered, so release the
- * video_device struct directly.
- */
- video_device_release(cam->vfd);
- } else {
- /*
- * The unregister function will release the
- * video_device struct as well as
- * unregistering it.
- */
- video_unregister_device(cam->vfd);
- }
- cam->vfd = NULL;
- }
-
- device_remove_file(cam->dev, &dev_attr_streaming);
-
- cam->sdev = NULL;
-}
-
-static struct v4l2_int_master omap24xxcam_master = {
- .attach = omap24xxcam_device_register,
- .detach = omap24xxcam_device_unregister,
-};
-
-static struct v4l2_int_device omap24xxcam = {
- .module = THIS_MODULE,
- .name = CAM_NAME,
- .type = v4l2_int_type_master,
- .u = {
- .master = &omap24xxcam_master
- },
-};
-
-/*
- *
- * Driver initialisation and deinitialisation.
- *
- */
-
-static int omap24xxcam_probe(struct platform_device *pdev)
-{
- struct omap24xxcam_device *cam;
- struct resource *mem;
- int irq;
-
- cam = kzalloc(sizeof(*cam), GFP_KERNEL);
- if (!cam) {
- dev_err(&pdev->dev, "could not allocate memory\n");
- goto err;
- }
-
- platform_set_drvdata(pdev, cam);
-
- cam->dev = &pdev->dev;
-
- if (v4l2_device_register(&pdev->dev, &cam->v4l2_dev)) {
- dev_err(&pdev->dev, "v4l2_device_register failed\n");
- goto err;
- }
-
- /*
- * Impose a lower limit on the amount of memory allocated for
- * capture. We require at least enough memory to double-buffer
- * QVGA (300KB).
- */
- if (capture_mem < 320 * 240 * 2 * 2)
- capture_mem = 320 * 240 * 2 * 2;
- cam->capture_mem = capture_mem;
-
- /* request the mem region for the camera registers */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(cam->dev, "no mem resource?\n");
- goto err;
- }
- if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
- dev_err(cam->dev,
- "cannot reserve camera register I/O region\n");
- goto err;
- }
- cam->mmio_base_phys = mem->start;
- cam->mmio_size = resource_size(mem);
-
- /* map the region */
- cam->mmio_base = ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
- if (!cam->mmio_base) {
- dev_err(cam->dev, "cannot map camera register I/O region\n");
- goto err;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(cam->dev, "no irq for camera?\n");
- goto err;
- }
-
- /* install the interrupt service routine */
- if (request_irq(irq, omap24xxcam_isr, 0, CAM_NAME, cam)) {
- dev_err(cam->dev,
- "could not install interrupt service routine\n");
- goto err;
- }
- cam->irq = irq;
-
- if (omap24xxcam_clock_get(cam))
- goto err;
-
- INIT_WORK(&cam->sensor_reset_work, omap24xxcam_sensor_reset_work);
-
- mutex_init(&cam->mutex);
- spin_lock_init(&cam->core_enable_disable_lock);
-
- omap24xxcam_sgdma_init(&cam->sgdma,
- cam->mmio_base + CAMDMA_REG_OFFSET,
- omap24xxcam_stalled_dma_reset,
- (unsigned long)cam);
-
- omap24xxcam.priv = cam;
-
- if (v4l2_int_device_register(&omap24xxcam))
- goto err;
-
- return 0;
-
-err:
- omap24xxcam_remove(pdev);
- return -ENODEV;
-}
-
-static int omap24xxcam_remove(struct platform_device *pdev)
-{
- struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
-
- if (!cam)
- return 0;
-
- if (omap24xxcam.priv != NULL)
- v4l2_int_device_unregister(&omap24xxcam);
- omap24xxcam.priv = NULL;
-
- omap24xxcam_clock_put(cam);
-
- if (cam->irq) {
- free_irq(cam->irq, cam);
- cam->irq = 0;
- }
-
- if (cam->mmio_base) {
- iounmap((void *)cam->mmio_base);
- cam->mmio_base = 0;
- }
-
- if (cam->mmio_base_phys) {
- release_mem_region(cam->mmio_base_phys, cam->mmio_size);
- cam->mmio_base_phys = 0;
- }
-
- v4l2_device_unregister(&cam->v4l2_dev);
-
- kfree(cam);
-
- return 0;
-}
-
-static struct platform_driver omap24xxcam_driver = {
- .probe = omap24xxcam_probe,
- .remove = omap24xxcam_remove,
-#ifdef CONFIG_PM
- .suspend = omap24xxcam_suspend,
- .resume = omap24xxcam_resume,
-#endif
- .driver = {
- .name = CAM_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(omap24xxcam_driver);
-
-MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
-MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(OMAP24XXCAM_VERSION);
-module_param(video_nr, int, 0);
-MODULE_PARM_DESC(video_nr,
- "Minor number for video device (-1 ==> auto assign)");
-module_param(capture_mem, int, 0);
-MODULE_PARM_DESC(capture_mem, "Maximum amount of memory for capture "
- "buffers (default 4800kiB)");
diff --git a/drivers/staging/media/omap24xx/omap24xxcam.h b/drivers/staging/media/omap24xx/omap24xxcam.h
deleted file mode 100644
index 233bb40cfec3..000000000000
--- a/drivers/staging/media/omap24xx/omap24xxcam.h
+++ /dev/null
@@ -1,596 +0,0 @@
-/*
- * drivers/media/platform/omap24xxcam.h
- *
- * Copyright (C) 2004 MontaVista Software, Inc.
- * Copyright (C) 2004 Texas Instruments.
- * Copyright (C) 2007 Nokia Corporation.
- *
- * Contact: Sakari Ailus <sakari.ailus@nokia.com>
- *
- * Based on code from Andy Lowe <source@mvista.com>.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef OMAP24XXCAM_H
-#define OMAP24XXCAM_H
-
-#include <media/videobuf-dma-sg.h>
-#include <media/v4l2-device.h>
-#include "v4l2-int-device.h"
-
-/*
- *
- * General driver related definitions.
- *
- */
-
-#define CAM_NAME "omap24xxcam"
-
-#define CAM_MCLK 96000000
-
-/* number of bytes transferred per DMA request */
-#define DMA_THRESHOLD 32
-
-/*
- * NUM_CAMDMA_CHANNELS is the number of logical channels provided by
- * the camera DMA controller.
- */
-#define NUM_CAMDMA_CHANNELS 4
-
-/*
- * NUM_SG_DMA is the number of scatter-gather DMA transfers that can
- * be queued. (We don't have any overlay sglists now.)
- */
-#define NUM_SG_DMA (VIDEO_MAX_FRAME)
-
-/*
- *
- * Register definitions.
- *
- */
-
-/* subsystem register block offsets */
-#define CC_REG_OFFSET 0x00000400
-#define CAMDMA_REG_OFFSET 0x00000800
-#define CAMMMU_REG_OFFSET 0x00000C00
-
-/* define camera subsystem register offsets */
-#define CAM_REVISION 0x000
-#define CAM_SYSCONFIG 0x010
-#define CAM_SYSSTATUS 0x014
-#define CAM_IRQSTATUS 0x018
-#define CAM_GPO 0x040
-#define CAM_GPI 0x050
-
-/* define camera core register offsets */
-#define CC_REVISION 0x000
-#define CC_SYSCONFIG 0x010
-#define CC_SYSSTATUS 0x014
-#define CC_IRQSTATUS 0x018
-#define CC_IRQENABLE 0x01C
-#define CC_CTRL 0x040
-#define CC_CTRL_DMA 0x044
-#define CC_CTRL_XCLK 0x048
-#define CC_FIFODATA 0x04C
-#define CC_TEST 0x050
-#define CC_GENPAR 0x054
-#define CC_CCPFSCR 0x058
-#define CC_CCPFECR 0x05C
-#define CC_CCPLSCR 0x060
-#define CC_CCPLECR 0x064
-#define CC_CCPDFR 0x068
-
-/* define camera dma register offsets */
-#define CAMDMA_REVISION 0x000
-#define CAMDMA_IRQSTATUS_L0 0x008
-#define CAMDMA_IRQSTATUS_L1 0x00C
-#define CAMDMA_IRQSTATUS_L2 0x010
-#define CAMDMA_IRQSTATUS_L3 0x014
-#define CAMDMA_IRQENABLE_L0 0x018
-#define CAMDMA_IRQENABLE_L1 0x01C
-#define CAMDMA_IRQENABLE_L2 0x020
-#define CAMDMA_IRQENABLE_L3 0x024
-#define CAMDMA_SYSSTATUS 0x028
-#define CAMDMA_OCP_SYSCONFIG 0x02C
-#define CAMDMA_CAPS_0 0x064
-#define CAMDMA_CAPS_2 0x06C
-#define CAMDMA_CAPS_3 0x070
-#define CAMDMA_CAPS_4 0x074
-#define CAMDMA_GCR 0x078
-#define CAMDMA_CCR(n) (0x080 + (n)*0x60)
-#define CAMDMA_CLNK_CTRL(n) (0x084 + (n)*0x60)
-#define CAMDMA_CICR(n) (0x088 + (n)*0x60)
-#define CAMDMA_CSR(n) (0x08C + (n)*0x60)
-#define CAMDMA_CSDP(n) (0x090 + (n)*0x60)
-#define CAMDMA_CEN(n) (0x094 + (n)*0x60)
-#define CAMDMA_CFN(n) (0x098 + (n)*0x60)
-#define CAMDMA_CSSA(n) (0x09C + (n)*0x60)
-#define CAMDMA_CDSA(n) (0x0A0 + (n)*0x60)
-#define CAMDMA_CSEI(n) (0x0A4 + (n)*0x60)
-#define CAMDMA_CSFI(n) (0x0A8 + (n)*0x60)
-#define CAMDMA_CDEI(n) (0x0AC + (n)*0x60)
-#define CAMDMA_CDFI(n) (0x0B0 + (n)*0x60)
-#define CAMDMA_CSAC(n) (0x0B4 + (n)*0x60)
-#define CAMDMA_CDAC(n) (0x0B8 + (n)*0x60)
-#define CAMDMA_CCEN(n) (0x0BC + (n)*0x60)
-#define CAMDMA_CCFN(n) (0x0C0 + (n)*0x60)
-#define CAMDMA_COLOR(n) (0x0C4 + (n)*0x60)
-
-/* define camera mmu register offsets */
-#define CAMMMU_REVISION 0x000
-#define CAMMMU_SYSCONFIG 0x010
-#define CAMMMU_SYSSTATUS 0x014
-#define CAMMMU_IRQSTATUS 0x018
-#define CAMMMU_IRQENABLE 0x01C
-#define CAMMMU_WALKING_ST 0x040
-#define CAMMMU_CNTL 0x044
-#define CAMMMU_FAULT_AD 0x048
-#define CAMMMU_TTB 0x04C
-#define CAMMMU_LOCK 0x050
-#define CAMMMU_LD_TLB 0x054
-#define CAMMMU_CAM 0x058
-#define CAMMMU_RAM 0x05C
-#define CAMMMU_GFLUSH 0x060
-#define CAMMMU_FLUSH_ENTRY 0x064
-#define CAMMMU_READ_CAM 0x068
-#define CAMMMU_READ_RAM 0x06C
-#define CAMMMU_EMU_FAULT_AD 0x070
-
-/* Define bit fields within selected registers */
-#define CAM_REVISION_MAJOR (15 << 4)
-#define CAM_REVISION_MAJOR_SHIFT 4
-#define CAM_REVISION_MINOR (15 << 0)
-#define CAM_REVISION_MINOR_SHIFT 0
-
-#define CAM_SYSCONFIG_SOFTRESET (1 << 1)
-#define CAM_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define CAM_SYSSTATUS_RESETDONE (1 << 0)
-
-#define CAM_IRQSTATUS_CC_IRQ (1 << 4)
-#define CAM_IRQSTATUS_MMU_IRQ (1 << 3)
-#define CAM_IRQSTATUS_DMA_IRQ2 (1 << 2)
-#define CAM_IRQSTATUS_DMA_IRQ1 (1 << 1)
-#define CAM_IRQSTATUS_DMA_IRQ0 (1 << 0)
-
-#define CAM_GPO_CAM_S_P_EN (1 << 1)
-#define CAM_GPO_CAM_CCP_MODE (1 << 0)
-
-#define CAM_GPI_CC_DMA_REQ1 (1 << 24)
-#define CAP_GPI_CC_DMA_REQ0 (1 << 23)
-#define CAP_GPI_CAM_MSTANDBY (1 << 21)
-#define CAP_GPI_CAM_WAIT (1 << 20)
-#define CAP_GPI_CAM_S_DATA (1 << 17)
-#define CAP_GPI_CAM_S_CLK (1 << 16)
-#define CAP_GPI_CAM_P_DATA (0xFFF << 3)
-#define CAP_GPI_CAM_P_DATA_SHIFT 3
-#define CAP_GPI_CAM_P_VS (1 << 2)
-#define CAP_GPI_CAM_P_HS (1 << 1)
-#define CAP_GPI_CAM_P_CLK (1 << 0)
-
-#define CC_REVISION_MAJOR (15 << 4)
-#define CC_REVISION_MAJOR_SHIFT 4
-#define CC_REVISION_MINOR (15 << 0)
-#define CC_REVISION_MINOR_SHIFT 0
-
-#define CC_SYSCONFIG_SIDLEMODE (3 << 3)
-#define CC_SYSCONFIG_SIDLEMODE_FIDLE (0 << 3)
-#define CC_SYSCONFIG_SIDLEMODE_NIDLE (1 << 3)
-#define CC_SYSCONFIG_SOFTRESET (1 << 1)
-#define CC_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define CC_SYSSTATUS_RESETDONE (1 << 0)
-
-#define CC_IRQSTATUS_FS_IRQ (1 << 19)
-#define CC_IRQSTATUS_LE_IRQ (1 << 18)
-#define CC_IRQSTATUS_LS_IRQ (1 << 17)
-#define CC_IRQSTATUS_FE_IRQ (1 << 16)
-#define CC_IRQSTATUS_FW_ERR_IRQ (1 << 10)
-#define CC_IRQSTATUS_FSC_ERR_IRQ (1 << 9)
-#define CC_IRQSTATUS_SSC_ERR_IRQ (1 << 8)
-#define CC_IRQSTATUS_FIFO_NOEMPTY_IRQ (1 << 4)
-#define CC_IRQSTATUS_FIFO_FULL_IRQ (1 << 3)
-#define CC_IRQSTATUS_FIFO_THR_IRQ (1 << 2)
-#define CC_IRQSTATUS_FIFO_OF_IRQ (1 << 1)
-#define CC_IRQSTATUS_FIFO_UF_IRQ (1 << 0)
-
-#define CC_IRQENABLE_FS_IRQ (1 << 19)
-#define CC_IRQENABLE_LE_IRQ (1 << 18)
-#define CC_IRQENABLE_LS_IRQ (1 << 17)
-#define CC_IRQENABLE_FE_IRQ (1 << 16)
-#define CC_IRQENABLE_FW_ERR_IRQ (1 << 10)
-#define CC_IRQENABLE_FSC_ERR_IRQ (1 << 9)
-#define CC_IRQENABLE_SSC_ERR_IRQ (1 << 8)
-#define CC_IRQENABLE_FIFO_NOEMPTY_IRQ (1 << 4)
-#define CC_IRQENABLE_FIFO_FULL_IRQ (1 << 3)
-#define CC_IRQENABLE_FIFO_THR_IRQ (1 << 2)
-#define CC_IRQENABLE_FIFO_OF_IRQ (1 << 1)
-#define CC_IRQENABLE_FIFO_UF_IRQ (1 << 0)
-
-#define CC_CTRL_CC_ONE_SHOT (1 << 20)
-#define CC_CTRL_CC_IF_SYNCHRO (1 << 19)
-#define CC_CTRL_CC_RST (1 << 18)
-#define CC_CTRL_CC_FRAME_TRIG (1 << 17)
-#define CC_CTRL_CC_EN (1 << 16)
-#define CC_CTRL_NOBT_SYNCHRO (1 << 13)
-#define CC_CTRL_BT_CORRECT (1 << 12)
-#define CC_CTRL_PAR_ORDERCAM (1 << 11)
-#define CC_CTRL_PAR_CLK_POL (1 << 10)
-#define CC_CTRL_NOBT_HS_POL (1 << 9)
-#define CC_CTRL_NOBT_VS_POL (1 << 8)
-#define CC_CTRL_PAR_MODE (7 << 1)
-#define CC_CTRL_PAR_MODE_SHIFT 1
-#define CC_CTRL_PAR_MODE_NOBT8 (0 << 1)
-#define CC_CTRL_PAR_MODE_NOBT10 (1 << 1)
-#define CC_CTRL_PAR_MODE_NOBT12 (2 << 1)
-#define CC_CTRL_PAR_MODE_BT8 (4 << 1)
-#define CC_CTRL_PAR_MODE_BT10 (5 << 1)
-#define CC_CTRL_PAR_MODE_FIFOTEST (7 << 1)
-#define CC_CTRL_CCP_MODE (1 << 0)
-
-#define CC_CTRL_DMA_EN (1 << 8)
-#define CC_CTRL_DMA_FIFO_THRESHOLD (0x7F << 0)
-#define CC_CTRL_DMA_FIFO_THRESHOLD_SHIFT 0
-
-#define CC_CTRL_XCLK_DIV (0x1F << 0)
-#define CC_CTRL_XCLK_DIV_SHIFT 0
-#define CC_CTRL_XCLK_DIV_STABLE_LOW (0 << 0)
-#define CC_CTRL_XCLK_DIV_STABLE_HIGH (1 << 0)
-#define CC_CTRL_XCLK_DIV_BYPASS (31 << 0)
-
-#define CC_TEST_FIFO_RD_POINTER (0xFF << 24)
-#define CC_TEST_FIFO_RD_POINTER_SHIFT 24
-#define CC_TEST_FIFO_WR_POINTER (0xFF << 16)
-#define CC_TEST_FIFO_WR_POINTER_SHIFT 16
-#define CC_TEST_FIFO_LEVEL (0xFF << 8)
-#define CC_TEST_FIFO_LEVEL_SHIFT 8
-#define CC_TEST_FIFO_LEVEL_PEAK (0xFF << 0)
-#define CC_TEST_FIFO_LEVEL_PEAK_SHIFT 0
-
-#define CC_GENPAR_FIFO_DEPTH (7 << 0)
-#define CC_GENPAR_FIFO_DEPTH_SHIFT 0
-
-#define CC_CCPDFR_ALPHA (0xFF << 8)
-#define CC_CCPDFR_ALPHA_SHIFT 8
-#define CC_CCPDFR_DATAFORMAT (15 << 0)
-#define CC_CCPDFR_DATAFORMAT_SHIFT 0
-#define CC_CCPDFR_DATAFORMAT_YUV422BE (0 << 0)
-#define CC_CCPDFR_DATAFORMAT_YUV422 (1 << 0)
-#define CC_CCPDFR_DATAFORMAT_YUV420 (2 << 0)
-#define CC_CCPDFR_DATAFORMAT_RGB444 (4 << 0)
-#define CC_CCPDFR_DATAFORMAT_RGB565 (5 << 0)
-#define CC_CCPDFR_DATAFORMAT_RGB888NDE (6 << 0)
-#define CC_CCPDFR_DATAFORMAT_RGB888 (7 << 0)
-#define CC_CCPDFR_DATAFORMAT_RAW8NDE (8 << 0)
-#define CC_CCPDFR_DATAFORMAT_RAW8 (9 << 0)
-#define CC_CCPDFR_DATAFORMAT_RAW10NDE (10 << 0)
-#define CC_CCPDFR_DATAFORMAT_RAW10 (11 << 0)
-#define CC_CCPDFR_DATAFORMAT_RAW12NDE (12 << 0)
-#define CC_CCPDFR_DATAFORMAT_RAW12 (13 << 0)
-#define CC_CCPDFR_DATAFORMAT_JPEG8 (15 << 0)
-
-#define CAMDMA_REVISION_MAJOR (15 << 4)
-#define CAMDMA_REVISION_MAJOR_SHIFT 4
-#define CAMDMA_REVISION_MINOR (15 << 0)
-#define CAMDMA_REVISION_MINOR_SHIFT 0
-
-#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE (3 << 12)
-#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY (0 << 12)
-#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_NSTANDBY (1 << 12)
-#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_SSTANDBY (2 << 12)
-#define CAMDMA_OCP_SYSCONFIG_FUNC_CLOCK (1 << 9)
-#define CAMDMA_OCP_SYSCONFIG_OCP_CLOCK (1 << 8)
-#define CAMDMA_OCP_SYSCONFIG_EMUFREE (1 << 5)
-#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE (3 << 3)
-#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE (0 << 3)
-#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_NIDLE (1 << 3)
-#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_SIDLE (2 << 3)
-#define CAMDMA_OCP_SYSCONFIG_SOFTRESET (1 << 1)
-#define CAMDMA_OCP_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define CAMDMA_SYSSTATUS_RESETDONE (1 << 0)
-
-#define CAMDMA_GCR_ARBITRATION_RATE (0xFF << 16)
-#define CAMDMA_GCR_ARBITRATION_RATE_SHIFT 16
-#define CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH (0xFF << 0)
-#define CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH_SHIFT 0
-
-#define CAMDMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
-#define CAMDMA_CCR_PREFETCH (1 << 23)
-#define CAMDMA_CCR_SUPERVISOR (1 << 22)
-#define CAMDMA_CCR_SECURE (1 << 21)
-#define CAMDMA_CCR_BS (1 << 18)
-#define CAMDMA_CCR_TRANSPARENT_COPY_ENABLE (1 << 17)
-#define CAMDMA_CCR_CONSTANT_FILL_ENABLE (1 << 16)
-#define CAMDMA_CCR_DST_AMODE (3 << 14)
-#define CAMDMA_CCR_DST_AMODE_CONST_ADDR (0 << 14)
-#define CAMDMA_CCR_DST_AMODE_POST_INC (1 << 14)
-#define CAMDMA_CCR_DST_AMODE_SGL_IDX (2 << 14)
-#define CAMDMA_CCR_DST_AMODE_DBL_IDX (3 << 14)
-#define CAMDMA_CCR_SRC_AMODE (3 << 12)
-#define CAMDMA_CCR_SRC_AMODE_CONST_ADDR (0 << 12)
-#define CAMDMA_CCR_SRC_AMODE_POST_INC (1 << 12)
-#define CAMDMA_CCR_SRC_AMODE_SGL_IDX (2 << 12)
-#define CAMDMA_CCR_SRC_AMODE_DBL_IDX (3 << 12)
-#define CAMDMA_CCR_WR_ACTIVE (1 << 10)
-#define CAMDMA_CCR_RD_ACTIVE (1 << 9)
-#define CAMDMA_CCR_SUSPEND_SENSITIVE (1 << 8)
-#define CAMDMA_CCR_ENABLE (1 << 7)
-#define CAMDMA_CCR_PRIO (1 << 6)
-#define CAMDMA_CCR_FS (1 << 5)
-#define CAMDMA_CCR_SYNCHRO ((3 << 19) | (31 << 0))
-#define CAMDMA_CCR_SYNCHRO_CAMERA 0x01
-
-#define CAMDMA_CLNK_CTRL_ENABLE_LNK (1 << 15)
-#define CAMDMA_CLNK_CTRL_NEXTLCH_ID (0x1F << 0)
-#define CAMDMA_CLNK_CTRL_NEXTLCH_ID_SHIFT 0
-
-#define CAMDMA_CICR_MISALIGNED_ERR_IE (1 << 11)
-#define CAMDMA_CICR_SUPERVISOR_ERR_IE (1 << 10)
-#define CAMDMA_CICR_SECURE_ERR_IE (1 << 9)
-#define CAMDMA_CICR_TRANS_ERR_IE (1 << 8)
-#define CAMDMA_CICR_PACKET_IE (1 << 7)
-#define CAMDMA_CICR_BLOCK_IE (1 << 5)
-#define CAMDMA_CICR_LAST_IE (1 << 4)
-#define CAMDMA_CICR_FRAME_IE (1 << 3)
-#define CAMDMA_CICR_HALF_IE (1 << 2)
-#define CAMDMA_CICR_DROP_IE (1 << 1)
-
-#define CAMDMA_CSR_MISALIGNED_ERR (1 << 11)
-#define CAMDMA_CSR_SUPERVISOR_ERR (1 << 10)
-#define CAMDMA_CSR_SECURE_ERR (1 << 9)
-#define CAMDMA_CSR_TRANS_ERR (1 << 8)
-#define CAMDMA_CSR_PACKET (1 << 7)
-#define CAMDMA_CSR_SYNC (1 << 6)
-#define CAMDMA_CSR_BLOCK (1 << 5)
-#define CAMDMA_CSR_LAST (1 << 4)
-#define CAMDMA_CSR_FRAME (1 << 3)
-#define CAMDMA_CSR_HALF (1 << 2)
-#define CAMDMA_CSR_DROP (1 << 1)
-
-#define CAMDMA_CSDP_SRC_ENDIANNESS (1 << 21)
-#define CAMDMA_CSDP_SRC_ENDIANNESS_LOCK (1 << 20)
-#define CAMDMA_CSDP_DST_ENDIANNESS (1 << 19)
-#define CAMDMA_CSDP_DST_ENDIANNESS_LOCK (1 << 18)
-#define CAMDMA_CSDP_WRITE_MODE (3 << 16)
-#define CAMDMA_CSDP_WRITE_MODE_WRNP (0 << 16)
-#define CAMDMA_CSDP_WRITE_MODE_POSTED (1 << 16)
-#define CAMDMA_CSDP_WRITE_MODE_POSTED_LAST_WRNP (2 << 16)
-#define CAMDMA_CSDP_DST_BURST_EN (3 << 14)
-#define CAMDMA_CSDP_DST_BURST_EN_1 (0 << 14)
-#define CAMDMA_CSDP_DST_BURST_EN_16 (1 << 14)
-#define CAMDMA_CSDP_DST_BURST_EN_32 (2 << 14)
-#define CAMDMA_CSDP_DST_BURST_EN_64 (3 << 14)
-#define CAMDMA_CSDP_DST_PACKED (1 << 13)
-#define CAMDMA_CSDP_WR_ADD_TRSLT (15 << 9)
-#define CAMDMA_CSDP_WR_ADD_TRSLT_ENABLE_MREQADD (3 << 9)
-#define CAMDMA_CSDP_SRC_BURST_EN (3 << 7)
-#define CAMDMA_CSDP_SRC_BURST_EN_1 (0 << 7)
-#define CAMDMA_CSDP_SRC_BURST_EN_16 (1 << 7)
-#define CAMDMA_CSDP_SRC_BURST_EN_32 (2 << 7)
-#define CAMDMA_CSDP_SRC_BURST_EN_64 (3 << 7)
-#define CAMDMA_CSDP_SRC_PACKED (1 << 6)
-#define CAMDMA_CSDP_RD_ADD_TRSLT (15 << 2)
-#define CAMDMA_CSDP_RD_ADD_TRSLT_ENABLE_MREQADD (3 << 2)
-#define CAMDMA_CSDP_DATA_TYPE (3 << 0)
-#define CAMDMA_CSDP_DATA_TYPE_8BITS (0 << 0)
-#define CAMDMA_CSDP_DATA_TYPE_16BITS (1 << 0)
-#define CAMDMA_CSDP_DATA_TYPE_32BITS (2 << 0)
-
-#define CAMMMU_SYSCONFIG_AUTOIDLE (1 << 0)
-
-/*
- *
- * Declarations.
- *
- */
-
-/* forward declarations */
-struct omap24xxcam_sgdma;
-struct omap24xxcam_dma;
-
-typedef void (*sgdma_callback_t)(struct omap24xxcam_sgdma *cam,
- u32 status, void *arg);
-typedef void (*dma_callback_t)(struct omap24xxcam_dma *cam,
- u32 status, void *arg);
-
-struct channel_state {
- dma_callback_t callback;
- void *arg;
-};
-
-/* sgdma state for each of the possible videobuf_buffers + 2 overlays */
-struct sgdma_state {
- const struct scatterlist *sglist;
- int sglen; /* number of sglist entries */
- int next_sglist; /* index of next sglist entry to process */
- unsigned int bytes_read; /* number of bytes read */
- unsigned int len; /* total length of sglist (excluding
- * bytes due to page alignment) */
- int queued_sglist; /* number of sglist entries queued for DMA */
- u32 csr; /* DMA return code */
- sgdma_callback_t callback;
- void *arg;
-};
-
-/* physical DMA channel management */
-struct omap24xxcam_dma {
- spinlock_t lock; /* Lock for the whole structure. */
-
- void __iomem *base; /* base address for dma controller */
-
- /* While dma_stop!=0, an attempt to start a new DMA transfer will
- * fail.
- */
- atomic_t dma_stop;
- int free_dmach; /* number of dma channels free */
- int next_dmach; /* index of next dma channel to use */
- struct channel_state ch_state[NUM_CAMDMA_CHANNELS];
-};
-
-/* scatter-gather DMA (scatterlist stuff) management */
-struct omap24xxcam_sgdma {
- struct omap24xxcam_dma dma;
-
- spinlock_t lock; /* Lock for the fields below. */
- int free_sgdma; /* number of free sg dma slots */
- int next_sgdma; /* index of next sg dma slot to use */
- struct sgdma_state sg_state[NUM_SG_DMA];
-
- /* Reset timer data */
- struct timer_list reset_timer;
-};
-
-/* per-device data structure */
-struct omap24xxcam_device {
- /*** mutex ***/
- /*
- * mutex serialises access to this structure. Also camera
- * opening and releasing is synchronised by this.
- */
- struct mutex mutex;
-
- struct v4l2_device v4l2_dev;
-
- /*** general driver state information ***/
- atomic_t users;
- /*
- * Lock to serialise core enabling and disabling and access to
- * sgdma_in_queue.
- */
- spinlock_t core_enable_disable_lock;
- /*
- * Number or sgdma requests in scatter-gather queue, protected
- * by the lock above.
- */
- int sgdma_in_queue;
- /*
- * Sensor interface parameters: interface type, CC_CTRL
- * register value and interface specific data.
- */
- int if_type;
- union {
- struct parallel {
- u32 xclk;
- } bt656;
- } if_u;
- u32 cc_ctrl;
-
- /*** subsystem structures ***/
- struct omap24xxcam_sgdma sgdma;
-
- /*** hardware resources ***/
- unsigned int irq;
- void __iomem *mmio_base;
- unsigned long mmio_base_phys;
- unsigned long mmio_size;
-
- /*** interfaces and device ***/
- struct v4l2_int_device *sdev;
- struct device *dev;
- struct video_device *vfd;
-
- /*** camera and sensor reset related stuff ***/
- struct work_struct sensor_reset_work;
- /*
- * We're in the middle of a reset. Don't enable core if this
- * is non-zero! This exists to help decisionmaking in a case
- * where videobuf_qbuf is called while we are in the middle of
- * a reset.
- */
- atomic_t in_reset;
- /*
- * Non-zero if we don't want any resets for now. Used to
- * prevent reset work to run when we're about to stop
- * streaming.
- */
- atomic_t reset_disable;
-
- /*** video device parameters ***/
- int capture_mem;
-
- /*** camera module clocks ***/
- struct clk *fck;
- struct clk *ick;
-
- /*** capture data ***/
- /* file handle, if streaming is on */
- struct file *streaming;
-};
-
-/* Per-file handle data. */
-struct omap24xxcam_fh {
- spinlock_t vbq_lock; /* spinlock for the videobuf queue */
- struct videobuf_queue vbq;
- struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
- atomic_t field_count; /* field counter for videobuf_buffer */
- /* accessing cam here doesn't need serialisation: it's constant */
- struct omap24xxcam_device *cam;
-};
-
-/*
- *
- * Register I/O functions.
- *
- */
-
-static inline u32 omap24xxcam_reg_in(u32 __iomem *base, u32 offset)
-{
- return readl(base + offset);
-}
-
-static inline u32 omap24xxcam_reg_out(u32 __iomem *base, u32 offset,
- u32 val)
-{
- writel(val, base + offset);
- return val;
-}
-
-static inline u32 omap24xxcam_reg_merge(u32 __iomem *base, u32 offset,
- u32 val, u32 mask)
-{
- u32 __iomem *addr = base + offset;
- u32 new_val = (readl(addr) & ~mask) | (val & mask);
-
- writel(new_val, addr);
- return new_val;
-}
-
-/*
- *
- * Function prototypes.
- *
- */
-
-/* dma prototypes */
-
-void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma);
-void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma);
-
-/* sgdma prototypes */
-
-void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma);
-int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
- const struct scatterlist *sglist, int sglen,
- int len, sgdma_callback_t callback, void *arg);
-void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma);
-void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
- void __iomem *base,
- void (*reset_callback)(unsigned long data),
- unsigned long reset_callback_data);
-void omap24xxcam_sgdma_exit(struct omap24xxcam_sgdma *sgdma);
-
-#endif
diff --git a/drivers/staging/media/omap24xx/tcm825x.c b/drivers/staging/media/omap24xx/tcm825x.c
deleted file mode 100644
index 9d9ecf1fc4ae..000000000000
--- a/drivers/staging/media/omap24xx/tcm825x.c
+++ /dev/null
@@ -1,938 +0,0 @@
-/*
- * drivers/media/i2c/tcm825x.c
- *
- * TCM825X camera sensor driver.
- *
- * Copyright (C) 2007 Nokia Corporation.
- *
- * Contact: Sakari Ailus <sakari.ailus@nokia.com>
- *
- * Based on code from David Cohen <david.cohen@indt.org.br>
- *
- * This driver was based on ov9640 sensor driver from MontaVista
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include "v4l2-int-device.h"
-
-#include "tcm825x.h"
-
-/*
- * The sensor has two fps modes: the lower one just gives half the fps
- * at the same xclk than the high one.
- */
-#define MAX_FPS 30
-#define MIN_FPS 8
-#define MAX_HALF_FPS (MAX_FPS / 2)
-#define HIGH_FPS_MODE_LOWER_LIMIT 14
-#define DEFAULT_FPS MAX_HALF_FPS
-
-struct tcm825x_sensor {
- const struct tcm825x_platform_data *platform_data;
- struct v4l2_int_device *v4l2_int_device;
- struct i2c_client *i2c_client;
- struct v4l2_pix_format pix;
- struct v4l2_fract timeperframe;
-};
-
-/* list of image formats supported by TCM825X sensor */
-static const struct v4l2_fmtdesc tcm825x_formats[] = {
- {
- .description = "YUYV (YUV 4:2:2), packed",
- .pixelformat = V4L2_PIX_FMT_UYVY,
- }, {
- /* Note: V4L2 defines RGB565 as:
- *
- * Byte 0 Byte 1
- * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
- *
- * We interpret RGB565 as:
- *
- * Byte 0 Byte 1
- * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
- */
- .description = "RGB565, le",
- .pixelformat = V4L2_PIX_FMT_RGB565,
- },
-};
-
-#define TCM825X_NUM_CAPTURE_FORMATS ARRAY_SIZE(tcm825x_formats)
-
-/*
- * TCM825X register configuration for all combinations of pixel format and
- * image size
- */
-static const struct tcm825x_reg subqcif = { 0x20, TCM825X_PICSIZ };
-static const struct tcm825x_reg qcif = { 0x18, TCM825X_PICSIZ };
-static const struct tcm825x_reg cif = { 0x14, TCM825X_PICSIZ };
-static const struct tcm825x_reg qqvga = { 0x0c, TCM825X_PICSIZ };
-static const struct tcm825x_reg qvga = { 0x04, TCM825X_PICSIZ };
-static const struct tcm825x_reg vga = { 0x00, TCM825X_PICSIZ };
-
-static const struct tcm825x_reg yuv422 = { 0x00, TCM825X_PICFMT };
-static const struct tcm825x_reg rgb565 = { 0x02, TCM825X_PICFMT };
-
-/* Our own specific controls */
-#define V4L2_CID_ALC V4L2_CID_PRIVATE_BASE
-#define V4L2_CID_H_EDGE_EN (V4L2_CID_PRIVATE_BASE + 1)
-#define V4L2_CID_V_EDGE_EN (V4L2_CID_PRIVATE_BASE + 2)
-#define V4L2_CID_LENS (V4L2_CID_PRIVATE_BASE + 3)
-#define V4L2_CID_MAX_EXPOSURE_TIME (V4L2_CID_PRIVATE_BASE + 4)
-#define V4L2_CID_LAST_PRIV V4L2_CID_MAX_EXPOSURE_TIME
-
-/* Video controls */
-static struct vcontrol {
- struct v4l2_queryctrl qc;
- u16 reg;
- u16 start_bit;
-} video_control[] = {
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
- },
- .reg = TCM825X_AG,
- .start_bit = 0,
- },
- {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- },
- .reg = TCM825X_MRG,
- .start_bit = 0,
- },
- {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- },
- .reg = TCM825X_MBG,
- .start_bit = 0,
- },
- {
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 0,
- },
- .reg = TCM825X_AWBSW,
- .start_bit = 7,
- },
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure Time",
- .minimum = 0,
- .maximum = 0x1fff,
- .step = 1,
- },
- .reg = TCM825X_ESRSPD_U,
- .start_bit = 0,
- },
- {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror Image",
- .minimum = 0,
- .maximum = 1,
- .step = 0,
- },
- .reg = TCM825X_H_INV,
- .start_bit = 6,
- },
- {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vertical Flip",
- .minimum = 0,
- .maximum = 1,
- .step = 0,
- },
- .reg = TCM825X_V_INV,
- .start_bit = 7,
- },
- /* Private controls */
- {
- {
- .id = V4L2_CID_ALC,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Luminance Control",
- .minimum = 0,
- .maximum = 1,
- .step = 0,
- },
- .reg = TCM825X_ALCSW,
- .start_bit = 7,
- },
- {
- {
- .id = V4L2_CID_H_EDGE_EN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Horizontal Edge Enhancement",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- },
- .reg = TCM825X_HDTG,
- .start_bit = 0,
- },
- {
- {
- .id = V4L2_CID_V_EDGE_EN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Vertical Edge Enhancement",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- },
- .reg = TCM825X_VDTG,
- .start_bit = 0,
- },
- {
- {
- .id = V4L2_CID_LENS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Lens Shading Compensation",
- .minimum = 0,
- .maximum = 0x3f,
- .step = 1,
- },
- .reg = TCM825X_LENS,
- .start_bit = 0,
- },
- {
- {
- .id = V4L2_CID_MAX_EXPOSURE_TIME,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Maximum Exposure Time",
- .minimum = 0,
- .maximum = 0x3,
- .step = 1,
- },
- .reg = TCM825X_ESRLIM,
- .start_bit = 5,
- },
-};
-
-
-static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] = {
- &subqcif, &qqvga, &qcif, &qvga, &cif, &vga };
-
-static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] = {
- &yuv422, &rgb565 };
-
-/*
- * Read a value from a register in an TCM825X sensor device. The value is
- * returned in 'val'.
- * Returns zero if successful, or non-zero otherwise.
- */
-static int tcm825x_read_reg(struct i2c_client *client, int reg)
-{
- int err;
- struct i2c_msg msg[2];
- u8 reg_buf, data_buf = 0;
-
- if (!client->adapter)
- return -ENODEV;
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &reg_buf;
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &data_buf;
-
- reg_buf = reg;
-
- err = i2c_transfer(client->adapter, msg, 2);
- if (err < 0)
- return err;
- return data_buf;
-}
-
-/*
- * Write a value to a register in an TCM825X sensor device.
- * Returns zero if successful, or non-zero otherwise.
- */
-static int tcm825x_write_reg(struct i2c_client *client, u8 reg, u8 val)
-{
- int err;
- struct i2c_msg msg[1];
- unsigned char data[2];
-
- if (!client->adapter)
- return -ENODEV;
-
- msg->addr = client->addr;
- msg->flags = 0;
- msg->len = 2;
- msg->buf = data;
- data[0] = reg;
- data[1] = val;
- err = i2c_transfer(client->adapter, msg, 1);
- if (err >= 0)
- return 0;
- return err;
-}
-
-static int __tcm825x_write_reg_mask(struct i2c_client *client,
- u8 reg, u8 val, u8 mask)
-{
- int rc;
-
- /* need to do read - modify - write */
- rc = tcm825x_read_reg(client, reg);
- if (rc < 0)
- return rc;
-
- rc &= (~mask); /* Clear the masked bits */
- val &= mask; /* Enforce mask on value */
- val |= rc;
-
- /* write the new value to the register */
- rc = tcm825x_write_reg(client, reg, val);
- if (rc)
- return rc;
-
- return 0;
-}
-
-#define tcm825x_write_reg_mask(client, regmask, val) \
- __tcm825x_write_reg_mask(client, TCM825X_ADDR((regmask)), val, \
- TCM825X_MASK((regmask)))
-
-
-/*
- * Initialize a list of TCM825X registers.
- * The list of registers is terminated by the pair of values
- * { TCM825X_REG_TERM, TCM825X_VAL_TERM }.
- * Returns zero if successful, or non-zero otherwise.
- */
-static int tcm825x_write_default_regs(struct i2c_client *client,
- const struct tcm825x_reg *reglist)
-{
- int err;
- const struct tcm825x_reg *next = reglist;
-
- while (!((next->reg == TCM825X_REG_TERM)
- && (next->val == TCM825X_VAL_TERM))) {
- err = tcm825x_write_reg(client, next->reg, next->val);
- if (err) {
- dev_err(&client->dev, "register writing failed\n");
- return err;
- }
- next++;
- }
-
- return 0;
-}
-
-static struct vcontrol *find_vctrl(int id)
-{
- int i;
-
- if (id < V4L2_CID_BASE)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(video_control); i++)
- if (video_control[i].qc.id == id)
- return &video_control[i];
-
- return NULL;
-}
-
-/*
- * Find the best match for a requested image capture size. The best match
- * is chosen as the nearest match that has the same number or fewer pixels
- * as the requested size, or the smallest image size if the requested size
- * has fewer pixels than the smallest image.
- */
-static enum image_size tcm825x_find_size(struct v4l2_int_device *s,
- unsigned int width,
- unsigned int height)
-{
- enum image_size isize;
- unsigned long pixels = width * height;
- struct tcm825x_sensor *sensor = s->priv;
-
- for (isize = subQCIF; isize < VGA; isize++) {
- if (tcm825x_sizes[isize + 1].height
- * tcm825x_sizes[isize + 1].width > pixels) {
- dev_dbg(&sensor->i2c_client->dev, "size %d\n", isize);
-
- return isize;
- }
- }
-
- dev_dbg(&sensor->i2c_client->dev, "format default VGA\n");
-
- return VGA;
-}
-
-/*
- * Configure the TCM825X for current image size, pixel format, and
- * frame period. fper is the frame period (in seconds) expressed as a
- * fraction. Returns zero if successful, or non-zero otherwise. The
- * actual frame period is returned in fper.
- */
-static int tcm825x_configure(struct v4l2_int_device *s)
-{
- struct tcm825x_sensor *sensor = s->priv;
- struct v4l2_pix_format *pix = &sensor->pix;
- enum image_size isize = tcm825x_find_size(s, pix->width, pix->height);
- struct v4l2_fract *fper = &sensor->timeperframe;
- enum pixel_format pfmt;
- int err;
- u32 tgt_fps;
- u8 val;
-
- /* common register initialization */
- err = tcm825x_write_default_regs(
- sensor->i2c_client, sensor->platform_data->default_regs());
- if (err)
- return err;
-
- /* configure image size */
- val = tcm825x_siz_reg[isize]->val;
- dev_dbg(&sensor->i2c_client->dev,
- "configuring image size %d\n", isize);
- err = tcm825x_write_reg_mask(sensor->i2c_client,
- tcm825x_siz_reg[isize]->reg, val);
- if (err)
- return err;
-
- /* configure pixel format */
- switch (pix->pixelformat) {
- default:
- case V4L2_PIX_FMT_RGB565:
- pfmt = RGB565;
- break;
- case V4L2_PIX_FMT_UYVY:
- pfmt = YUV422;
- break;
- }
-
- dev_dbg(&sensor->i2c_client->dev,
- "configuring pixel format %d\n", pfmt);
- val = tcm825x_fmt_reg[pfmt]->val;
-
- err = tcm825x_write_reg_mask(sensor->i2c_client,
- tcm825x_fmt_reg[pfmt]->reg, val);
- if (err)
- return err;
-
- /*
- * For frame rate < 15, the FPS reg (addr 0x02, bit 7) must be
- * set. Frame rate will be halved from the normal.
- */
- tgt_fps = fper->denominator / fper->numerator;
- if (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) {
- val = tcm825x_read_reg(sensor->i2c_client, 0x02);
- val |= 0x80;
- tcm825x_write_reg(sensor->i2c_client, 0x02, val);
- }
-
- return 0;
-}
-
-static int ioctl_queryctrl(struct v4l2_int_device *s,
- struct v4l2_queryctrl *qc)
-{
- struct vcontrol *control;
-
- control = find_vctrl(qc->id);
-
- if (control == NULL)
- return -EINVAL;
-
- *qc = control->qc;
-
- return 0;
-}
-
-static int ioctl_g_ctrl(struct v4l2_int_device *s,
- struct v4l2_control *vc)
-{
- struct tcm825x_sensor *sensor = s->priv;
- struct i2c_client *client = sensor->i2c_client;
- int val, r;
- struct vcontrol *lvc;
-
- /* exposure time is special, spread across 2 registers */
- if (vc->id == V4L2_CID_EXPOSURE) {
- int val_lower, val_upper;
-
- val_upper = tcm825x_read_reg(client,
- TCM825X_ADDR(TCM825X_ESRSPD_U));
- if (val_upper < 0)
- return val_upper;
- val_lower = tcm825x_read_reg(client,
- TCM825X_ADDR(TCM825X_ESRSPD_L));
- if (val_lower < 0)
- return val_lower;
-
- vc->value = ((val_upper & 0x1f) << 8) | (val_lower);
- return 0;
- }
-
- lvc = find_vctrl(vc->id);
- if (lvc == NULL)
- return -EINVAL;
-
- r = tcm825x_read_reg(client, TCM825X_ADDR(lvc->reg));
- if (r < 0)
- return r;
- val = r & TCM825X_MASK(lvc->reg);
- val >>= lvc->start_bit;
-
- if (val < 0)
- return val;
-
- if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP)
- val ^= sensor->platform_data->is_upside_down();
-
- vc->value = val;
- return 0;
-}
-
-static int ioctl_s_ctrl(struct v4l2_int_device *s,
- struct v4l2_control *vc)
-{
- struct tcm825x_sensor *sensor = s->priv;
- struct i2c_client *client = sensor->i2c_client;
- struct vcontrol *lvc;
- int val = vc->value;
-
- /* exposure time is special, spread across 2 registers */
- if (vc->id == V4L2_CID_EXPOSURE) {
- int val_lower, val_upper;
-
- val_lower = val & TCM825X_MASK(TCM825X_ESRSPD_L);
- val_upper = (val >> 8) & TCM825X_MASK(TCM825X_ESRSPD_U);
-
- if (tcm825x_write_reg_mask(client,
- TCM825X_ESRSPD_U, val_upper))
- return -EIO;
-
- if (tcm825x_write_reg_mask(client,
- TCM825X_ESRSPD_L, val_lower))
- return -EIO;
-
- return 0;
- }
-
- lvc = find_vctrl(vc->id);
- if (lvc == NULL)
- return -EINVAL;
-
- if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP)
- val ^= sensor->platform_data->is_upside_down();
-
- val = val << lvc->start_bit;
- if (tcm825x_write_reg_mask(client, lvc->reg, val))
- return -EIO;
-
- return 0;
-}
-
-static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
- struct v4l2_fmtdesc *fmt)
-{
- int index = fmt->index;
-
- switch (fmt->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (index >= TCM825X_NUM_CAPTURE_FORMATS)
- return -EINVAL;
- break;
-
- default:
- return -EINVAL;
- }
-
- fmt->flags = tcm825x_formats[index].flags;
- strlcpy(fmt->description, tcm825x_formats[index].description,
- sizeof(fmt->description));
- fmt->pixelformat = tcm825x_formats[index].pixelformat;
-
- return 0;
-}
-
-static int ioctl_try_fmt_cap(struct v4l2_int_device *s,
- struct v4l2_format *f)
-{
- struct tcm825x_sensor *sensor = s->priv;
- enum image_size isize;
- int ifmt;
- struct v4l2_pix_format *pix = &f->fmt.pix;
-
- isize = tcm825x_find_size(s, pix->width, pix->height);
- dev_dbg(&sensor->i2c_client->dev, "isize = %d num_capture = %lu\n",
- isize, (unsigned long)TCM825X_NUM_CAPTURE_FORMATS);
-
- pix->width = tcm825x_sizes[isize].width;
- pix->height = tcm825x_sizes[isize].height;
-
- for (ifmt = 0; ifmt < TCM825X_NUM_CAPTURE_FORMATS; ifmt++)
- if (pix->pixelformat == tcm825x_formats[ifmt].pixelformat)
- break;
-
- if (ifmt == TCM825X_NUM_CAPTURE_FORMATS)
- ifmt = 0; /* Default = YUV 4:2:2 */
-
- pix->pixelformat = tcm825x_formats[ifmt].pixelformat;
- pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = pix->width * TCM825X_BYTES_PER_PIXEL;
- pix->sizeimage = pix->bytesperline * pix->height;
- pix->priv = 0;
- dev_dbg(&sensor->i2c_client->dev, "format = 0x%08x\n",
- pix->pixelformat);
-
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_UYVY:
- default:
- pix->colorspace = V4L2_COLORSPACE_JPEG;
- break;
- case V4L2_PIX_FMT_RGB565:
- pix->colorspace = V4L2_COLORSPACE_SRGB;
- break;
- }
-
- return 0;
-}
-
-static int ioctl_s_fmt_cap(struct v4l2_int_device *s,
- struct v4l2_format *f)
-{
- struct tcm825x_sensor *sensor = s->priv;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- int rval;
-
- rval = ioctl_try_fmt_cap(s, f);
- if (rval)
- return rval;
-
- rval = tcm825x_configure(s);
-
- sensor->pix = *pix;
-
- return rval;
-}
-
-static int ioctl_g_fmt_cap(struct v4l2_int_device *s,
- struct v4l2_format *f)
-{
- struct tcm825x_sensor *sensor = s->priv;
-
- f->fmt.pix = sensor->pix;
-
- return 0;
-}
-
-static int ioctl_g_parm(struct v4l2_int_device *s,
- struct v4l2_streamparm *a)
-{
- struct tcm825x_sensor *sensor = s->priv;
- struct v4l2_captureparm *cparm = &a->parm.capture;
-
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- memset(a, 0, sizeof(*a));
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- cparm->capability = V4L2_CAP_TIMEPERFRAME;
- cparm->timeperframe = sensor->timeperframe;
-
- return 0;
-}
-
-static int ioctl_s_parm(struct v4l2_int_device *s,
- struct v4l2_streamparm *a)
-{
- struct tcm825x_sensor *sensor = s->priv;
- struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
- u32 tgt_fps; /* target frames per secound */
- int rval;
-
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if ((timeperframe->numerator == 0)
- || (timeperframe->denominator == 0)) {
- timeperframe->denominator = DEFAULT_FPS;
- timeperframe->numerator = 1;
- }
-
- tgt_fps = timeperframe->denominator / timeperframe->numerator;
-
- if (tgt_fps > MAX_FPS) {
- timeperframe->denominator = MAX_FPS;
- timeperframe->numerator = 1;
- } else if (tgt_fps < MIN_FPS) {
- timeperframe->denominator = MIN_FPS;
- timeperframe->numerator = 1;
- }
-
- sensor->timeperframe = *timeperframe;
-
- rval = tcm825x_configure(s);
-
- return rval;
-}
-
-static int ioctl_s_power(struct v4l2_int_device *s, int on)
-{
- struct tcm825x_sensor *sensor = s->priv;
-
- return sensor->platform_data->power_set(on);
-}
-
-/*
- * Given the image capture format in pix, the nominal frame period in
- * timeperframe, calculate the required xclk frequency.
- *
- * TCM825X input frequency characteristics are:
- * Minimum 11.9 MHz, Typical 24.57 MHz and maximum 25/27 MHz
- */
-
-static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
-{
- struct tcm825x_sensor *sensor = s->priv;
- struct v4l2_fract *timeperframe = &sensor->timeperframe;
- u32 tgt_xclk; /* target xclk */
- u32 tgt_fps; /* target frames per secound */
- int rval;
-
- rval = sensor->platform_data->ifparm(p);
- if (rval)
- return rval;
-
- tgt_fps = timeperframe->denominator / timeperframe->numerator;
-
- tgt_xclk = (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) ?
- (2457 * tgt_fps) / MAX_HALF_FPS :
- (2457 * tgt_fps) / MAX_FPS;
- tgt_xclk *= 10000;
-
- tgt_xclk = min(tgt_xclk, (u32)TCM825X_XCLK_MAX);
- tgt_xclk = max(tgt_xclk, (u32)TCM825X_XCLK_MIN);
-
- p->u.bt656.clock_curr = tgt_xclk;
-
- return 0;
-}
-
-static int ioctl_g_needs_reset(struct v4l2_int_device *s, void *buf)
-{
- struct tcm825x_sensor *sensor = s->priv;
-
- return sensor->platform_data->needs_reset(s, buf, &sensor->pix);
-}
-
-static int ioctl_reset(struct v4l2_int_device *s)
-{
- return -EBUSY;
-}
-
-static int ioctl_init(struct v4l2_int_device *s)
-{
- return tcm825x_configure(s);
-}
-
-static int ioctl_dev_exit(struct v4l2_int_device *s)
-{
- return 0;
-}
-
-static int ioctl_dev_init(struct v4l2_int_device *s)
-{
- struct tcm825x_sensor *sensor = s->priv;
- int r;
-
- r = tcm825x_read_reg(sensor->i2c_client, 0x01);
- if (r < 0)
- return r;
- if (r == 0) {
- dev_err(&sensor->i2c_client->dev, "device not detected\n");
- return -EIO;
- }
- return 0;
-}
-
-static struct v4l2_int_ioctl_desc tcm825x_ioctl_desc[] = {
- { vidioc_int_dev_init_num,
- (v4l2_int_ioctl_func *)ioctl_dev_init },
- { vidioc_int_dev_exit_num,
- (v4l2_int_ioctl_func *)ioctl_dev_exit },
- { vidioc_int_s_power_num,
- (v4l2_int_ioctl_func *)ioctl_s_power },
- { vidioc_int_g_ifparm_num,
- (v4l2_int_ioctl_func *)ioctl_g_ifparm },
- { vidioc_int_g_needs_reset_num,
- (v4l2_int_ioctl_func *)ioctl_g_needs_reset },
- { vidioc_int_reset_num,
- (v4l2_int_ioctl_func *)ioctl_reset },
- { vidioc_int_init_num,
- (v4l2_int_ioctl_func *)ioctl_init },
- { vidioc_int_enum_fmt_cap_num,
- (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap },
- { vidioc_int_try_fmt_cap_num,
- (v4l2_int_ioctl_func *)ioctl_try_fmt_cap },
- { vidioc_int_g_fmt_cap_num,
- (v4l2_int_ioctl_func *)ioctl_g_fmt_cap },
- { vidioc_int_s_fmt_cap_num,
- (v4l2_int_ioctl_func *)ioctl_s_fmt_cap },
- { vidioc_int_g_parm_num,
- (v4l2_int_ioctl_func *)ioctl_g_parm },
- { vidioc_int_s_parm_num,
- (v4l2_int_ioctl_func *)ioctl_s_parm },
- { vidioc_int_queryctrl_num,
- (v4l2_int_ioctl_func *)ioctl_queryctrl },
- { vidioc_int_g_ctrl_num,
- (v4l2_int_ioctl_func *)ioctl_g_ctrl },
- { vidioc_int_s_ctrl_num,
- (v4l2_int_ioctl_func *)ioctl_s_ctrl },
-};
-
-static struct v4l2_int_slave tcm825x_slave = {
- .ioctls = tcm825x_ioctl_desc,
- .num_ioctls = ARRAY_SIZE(tcm825x_ioctl_desc),
-};
-
-static struct tcm825x_sensor tcm825x;
-
-static struct v4l2_int_device tcm825x_int_device = {
- .module = THIS_MODULE,
- .name = TCM825X_NAME,
- .priv = &tcm825x,
- .type = v4l2_int_type_slave,
- .u = {
- .slave = &tcm825x_slave,
- },
-};
-
-static int tcm825x_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
-{
- struct tcm825x_sensor *sensor = &tcm825x;
-
- if (i2c_get_clientdata(client))
- return -EBUSY;
-
- sensor->platform_data = client->dev.platform_data;
-
- if (sensor->platform_data == NULL
- || !sensor->platform_data->is_okay())
- return -ENODEV;
-
- sensor->v4l2_int_device = &tcm825x_int_device;
-
- sensor->i2c_client = client;
- i2c_set_clientdata(client, sensor);
-
- /* Make the default capture format QVGA RGB565 */
- sensor->pix.width = tcm825x_sizes[QVGA].width;
- sensor->pix.height = tcm825x_sizes[QVGA].height;
- sensor->pix.pixelformat = V4L2_PIX_FMT_RGB565;
-
- return v4l2_int_device_register(sensor->v4l2_int_device);
-}
-
-static int tcm825x_remove(struct i2c_client *client)
-{
- struct tcm825x_sensor *sensor = i2c_get_clientdata(client);
-
- if (!client->adapter)
- return -ENODEV; /* our client isn't attached */
-
- v4l2_int_device_unregister(sensor->v4l2_int_device);
-
- return 0;
-}
-
-static const struct i2c_device_id tcm825x_id[] = {
- { "tcm825x", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tcm825x_id);
-
-static struct i2c_driver tcm825x_i2c_driver = {
- .driver = {
- .name = TCM825X_NAME,
- },
- .probe = tcm825x_probe,
- .remove = tcm825x_remove,
- .id_table = tcm825x_id,
-};
-
-static struct tcm825x_sensor tcm825x = {
- .timeperframe = {
- .numerator = 1,
- .denominator = DEFAULT_FPS,
- },
-};
-
-static int __init tcm825x_init(void)
-{
- int rval;
-
- rval = i2c_add_driver(&tcm825x_i2c_driver);
- if (rval)
- pr_info("%s: failed registering " TCM825X_NAME "\n",
- __func__);
-
- return rval;
-}
-
-static void __exit tcm825x_exit(void)
-{
- i2c_del_driver(&tcm825x_i2c_driver);
-}
-
-/*
- * FIXME: Menelaus isn't ready (?) at module_init stage, so use
- * late_initcall for now.
- */
-late_initcall(tcm825x_init);
-module_exit(tcm825x_exit);
-
-MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
-MODULE_DESCRIPTION("TCM825x camera sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/omap24xx/tcm825x.h b/drivers/staging/media/omap24xx/tcm825x.h
deleted file mode 100644
index 8a29636d1ad4..000000000000
--- a/drivers/staging/media/omap24xx/tcm825x.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * drivers/media/i2c/tcm825x.h
- *
- * Register definitions for the TCM825X CameraChip.
- *
- * Author: David Cohen (david.cohen@indt.org.br)
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
- * This file was based on ov9640.h from MontaVista
- */
-
-#ifndef TCM825X_H
-#define TCM825X_H
-
-#include <linux/videodev2.h>
-
-#include "v4l2-int-device.h"
-
-#define TCM825X_NAME "tcm825x"
-
-#define TCM825X_MASK(x) (x & 0x00ff)
-#define TCM825X_ADDR(x) ((x & 0xff00) >> 8)
-
-/* The TCM825X I2C sensor chip has a fixed slave address of 0x3d. */
-#define TCM825X_I2C_ADDR 0x3d
-
-/*
- * define register offsets for the TCM825X sensor chip
- * OFFSET(8 bits) + MASK(8 bits)
- * MASK bit 4 and 3 are used when the register uses more than one address
- */
-#define TCM825X_FPS 0x0280
-#define TCM825X_ACF 0x0240
-#define TCM825X_DOUTBUF 0x020C
-#define TCM825X_DCLKP 0x0202
-#define TCM825X_ACFDET 0x0201
-#define TCM825X_DOUTSW 0x0380
-#define TCM825X_DATAHZ 0x0340
-#define TCM825X_PICSIZ 0x033c
-#define TCM825X_PICFMT 0x0302
-#define TCM825X_V_INV 0x0480
-#define TCM825X_H_INV 0x0440
-#define TCM825X_ESRLSW 0x0430
-#define TCM825X_V_LENGTH 0x040F
-#define TCM825X_ALCSW 0x0580
-#define TCM825X_ESRLIM 0x0560
-#define TCM825X_ESRSPD_U 0x051F
-#define TCM825X_ESRSPD_L 0x06FF
-#define TCM825X_AG 0x07FF
-#define TCM825X_ESRSPD2 0x06FF
-#define TCM825X_ALCMODE 0x0830
-#define TCM825X_ALCH 0x080F
-#define TCM825X_ALCL 0x09FF
-#define TCM825X_AWBSW 0x0A80
-#define TCM825X_MRG 0x0BFF
-#define TCM825X_MBG 0x0CFF
-#define TCM825X_GAMSW 0x0D80
-#define TCM825X_HDTG 0x0EFF
-#define TCM825X_VDTG 0x0FFF
-#define TCM825X_HDTCORE 0x10F0
-#define TCM825X_VDTCORE 0x100F
-#define TCM825X_CONT 0x11FF
-#define TCM825X_BRIGHT 0x12FF
-#define TCM825X_VHUE 0x137F
-#define TCM825X_UHUE 0x147F
-#define TCM825X_VGAIN 0x153F
-#define TCM825X_UGAIN 0x163F
-#define TCM825X_UVCORE 0x170F
-#define TCM825X_SATU 0x187F
-#define TCM825X_MHMODE 0x1980
-#define TCM825X_MHLPFSEL 0x1940
-#define TCM825X_YMODE 0x1930
-#define TCM825X_MIXHG 0x1907
-#define TCM825X_LENS 0x1A3F
-#define TCM825X_AGLIM 0x1BE0
-#define TCM825X_LENSRPOL 0x1B10
-#define TCM825X_LENSRGAIN 0x1B0F
-#define TCM825X_ES100S 0x1CFF
-#define TCM825X_ES120S 0x1DFF
-#define TCM825X_DMASK 0x1EC0
-#define TCM825X_CODESW 0x1E20
-#define TCM825X_CODESEL 0x1E10
-#define TCM825X_TESPIC 0x1E04
-#define TCM825X_PICSEL 0x1E03
-#define TCM825X_HNUM 0x20FF
-#define TCM825X_VOUTPH 0x287F
-#define TCM825X_ESROUT 0x327F
-#define TCM825X_ESROUT2 0x33FF
-#define TCM825X_AGOUT 0x34FF
-#define TCM825X_DGOUT 0x353F
-#define TCM825X_AGSLOW1 0x39C0
-#define TCM825X_FLLSMODE 0x3930
-#define TCM825X_FLLSLIM 0x390F
-#define TCM825X_DETSEL 0x3AF0
-#define TCM825X_ACDETNC 0x3A0F
-#define TCM825X_AGSLOW2 0x3BC0
-#define TCM825X_DG 0x3B3F
-#define TCM825X_REJHLEV 0x3CFF
-#define TCM825X_ALCLOCK 0x3D80
-#define TCM825X_FPSLNKSW 0x3D40
-#define TCM825X_ALCSPD 0x3D30
-#define TCM825X_REJH 0x3D03
-#define TCM825X_SHESRSW 0x3E80
-#define TCM825X_ESLIMSEL 0x3E40
-#define TCM825X_SHESRSPD 0x3E30
-#define TCM825X_ELSTEP 0x3E0C
-#define TCM825X_ELSTART 0x3E03
-#define TCM825X_AGMIN 0x3FFF
-#define TCM825X_PREGRG 0x423F
-#define TCM825X_PREGBG 0x433F
-#define TCM825X_PRERG 0x443F
-#define TCM825X_PREBG 0x453F
-#define TCM825X_MSKBR 0x477F
-#define TCM825X_MSKGR 0x487F
-#define TCM825X_MSKRB 0x497F
-#define TCM825X_MSKGB 0x4A7F
-#define TCM825X_MSKRG 0x4B7F
-#define TCM825X_MSKBG 0x4C7F
-#define TCM825X_HDTCSW 0x4D80
-#define TCM825X_VDTCSW 0x4D40
-#define TCM825X_DTCYL 0x4D3F
-#define TCM825X_HDTPSW 0x4E80
-#define TCM825X_VDTPSW 0x4E40
-#define TCM825X_DTCGAIN 0x4E3F
-#define TCM825X_DTLLIMSW 0x4F10
-#define TCM825X_DTLYLIM 0x4F0F
-#define TCM825X_YLCUTLMSK 0x5080
-#define TCM825X_YLCUTL 0x503F
-#define TCM825X_YLCUTHMSK 0x5180
-#define TCM825X_YLCUTH 0x513F
-#define TCM825X_UVSKNC 0x527F
-#define TCM825X_UVLJ 0x537F
-#define TCM825X_WBGMIN 0x54FF
-#define TCM825X_WBGMAX 0x55FF
-#define TCM825X_WBSPDUP 0x5603
-#define TCM825X_ALLAREA 0x5820
-#define TCM825X_WBLOCK 0x5810
-#define TCM825X_WB2SP 0x580F
-#define TCM825X_KIZUSW 0x5920
-#define TCM825X_PBRSW 0x5910
-#define TCM825X_ABCSW 0x5903
-#define TCM825X_PBDLV 0x5AFF
-#define TCM825X_PBC1LV 0x5BFF
-
-#define TCM825X_NUM_REGS (TCM825X_ADDR(TCM825X_PBC1LV) + 1)
-
-#define TCM825X_BYTES_PER_PIXEL 2
-
-#define TCM825X_REG_TERM 0xff /* terminating list entry for reg */
-#define TCM825X_VAL_TERM 0xff /* terminating list entry for val */
-
-/* define a structure for tcm825x register initialization values */
-struct tcm825x_reg {
- u8 val;
- u16 reg;
-};
-
-enum image_size { subQCIF = 0, QQVGA, QCIF, QVGA, CIF, VGA };
-enum pixel_format { YUV422 = 0, RGB565 };
-#define NUM_IMAGE_SIZES 6
-#define NUM_PIXEL_FORMATS 2
-
-#define TCM825X_XCLK_MIN 11900000
-#define TCM825X_XCLK_MAX 25000000
-
-struct capture_size {
- unsigned long width;
- unsigned long height;
-};
-
-struct tcm825x_platform_data {
- /* Is the sensor usable? Doesn't yet mean it's there, but you
- * can try! */
- int (*is_okay)(void);
- /* Set power state, zero is off, non-zero is on. */
- int (*power_set)(int power);
- /* Default registers written after power-on or reset. */
- const struct tcm825x_reg * (*default_regs)(void);
- int (*needs_reset)(struct v4l2_int_device *s, void *buf,
- struct v4l2_pix_format *fmt);
- int (*ifparm)(struct v4l2_ifparm *p);
- int (*is_upside_down)(void);
-};
-
-/* Array of image sizes supported by TCM825X. These must be ordered from
- * smallest image size to largest.
- */
-static const struct capture_size tcm825x_sizes[] = {
- { 128, 96 }, /* subQCIF */
- { 160, 120 }, /* QQVGA */
- { 176, 144 }, /* QCIF */
- { 320, 240 }, /* QVGA */
- { 352, 288 }, /* CIF */
- { 640, 480 }, /* VGA */
-};
-
-#endif /* ifndef TCM825X_H */
diff --git a/drivers/staging/media/omap24xx/v4l2-int-device.c b/drivers/staging/media/omap24xx/v4l2-int-device.c
deleted file mode 100644
index 427a89033a1d..000000000000
--- a/drivers/staging/media/omap24xx/v4l2-int-device.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * drivers/media/video/v4l2-int-device.c
- *
- * V4L2 internal ioctl interface.
- *
- * Copyright (C) 2007 Nokia Corporation.
- *
- * Contact: Sakari Ailus <sakari.ailus@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/sort.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-#include "v4l2-int-device.h"
-
-static DEFINE_MUTEX(mutex);
-static LIST_HEAD(int_list);
-
-void v4l2_int_device_try_attach_all(void)
-{
- struct v4l2_int_device *m, *s;
-
- list_for_each_entry(m, &int_list, head) {
- if (m->type != v4l2_int_type_master)
- continue;
-
- list_for_each_entry(s, &int_list, head) {
- if (s->type != v4l2_int_type_slave)
- continue;
-
- /* Slave is connected? */
- if (s->u.slave->master)
- continue;
-
- /* Slave wants to attach to master? */
- if (s->u.slave->attach_to[0] != 0
- && strncmp(m->name, s->u.slave->attach_to,
- V4L2NAMESIZE))
- continue;
-
- if (!try_module_get(m->module))
- continue;
-
- s->u.slave->master = m;
- if (m->u.master->attach(s)) {
- s->u.slave->master = NULL;
- module_put(m->module);
- continue;
- }
- }
- }
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
-
-static int ioctl_sort_cmp(const void *a, const void *b)
-{
- const struct v4l2_int_ioctl_desc *d1 = a, *d2 = b;
-
- if (d1->num > d2->num)
- return 1;
-
- if (d1->num < d2->num)
- return -1;
-
- return 0;
-}
-
-int v4l2_int_device_register(struct v4l2_int_device *d)
-{
- if (d->type == v4l2_int_type_slave)
- sort(d->u.slave->ioctls, d->u.slave->num_ioctls,
- sizeof(struct v4l2_int_ioctl_desc),
- &ioctl_sort_cmp, NULL);
- mutex_lock(&mutex);
- list_add(&d->head, &int_list);
- v4l2_int_device_try_attach_all();
- mutex_unlock(&mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_register);
-
-void v4l2_int_device_unregister(struct v4l2_int_device *d)
-{
- mutex_lock(&mutex);
- list_del(&d->head);
- if (d->type == v4l2_int_type_slave
- && d->u.slave->master != NULL) {
- d->u.slave->master->u.master->detach(d);
- module_put(d->u.slave->master->module);
- d->u.slave->master = NULL;
- }
- mutex_unlock(&mutex);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_unregister);
-
-/* Adapted from search_extable in extable.c. */
-static v4l2_int_ioctl_func *find_ioctl(struct v4l2_int_slave *slave, int cmd,
- v4l2_int_ioctl_func *no_such_ioctl)
-{
- const struct v4l2_int_ioctl_desc *first = slave->ioctls;
- const struct v4l2_int_ioctl_desc *last =
- first + slave->num_ioctls - 1;
-
- while (first <= last) {
- const struct v4l2_int_ioctl_desc *mid;
-
- mid = (last - first) / 2 + first;
-
- if (mid->num < cmd)
- first = mid + 1;
- else if (mid->num > cmd)
- last = mid - 1;
- else
- return mid->func;
- }
-
- return no_such_ioctl;
-}
-
-static int no_such_ioctl_0(struct v4l2_int_device *d)
-{
- return -ENOIOCTLCMD;
-}
-
-int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
-{
- return ((v4l2_int_ioctl_func_0 *)
- find_ioctl(d->u.slave, cmd,
- (v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
-
-static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
-{
- return -ENOIOCTLCMD;
-}
-
-int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
-{
- return ((v4l2_int_ioctl_func_1 *)
- find_ioctl(d->u.slave, cmd,
- (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/omap24xx/v4l2-int-device.h b/drivers/staging/media/omap24xx/v4l2-int-device.h
deleted file mode 100644
index 0286c95814ff..000000000000
--- a/drivers/staging/media/omap24xx/v4l2-int-device.h
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * include/media/v4l2-int-device.h
- *
- * V4L2 internal ioctl interface.
- *
- * Copyright (C) 2007 Nokia Corporation.
- *
- * Contact: Sakari Ailus <sakari.ailus@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef V4L2_INT_DEVICE_H
-#define V4L2_INT_DEVICE_H
-
-#include <media/v4l2-common.h>
-
-#define V4L2NAMESIZE 32
-
-/*
- *
- * The internal V4L2 device interface core.
- *
- */
-
-enum v4l2_int_type {
- v4l2_int_type_master = 1,
- v4l2_int_type_slave
-};
-
-struct module;
-
-struct v4l2_int_device;
-
-struct v4l2_int_master {
- int (*attach)(struct v4l2_int_device *slave);
- void (*detach)(struct v4l2_int_device *slave);
-};
-
-typedef int (v4l2_int_ioctl_func)(struct v4l2_int_device *);
-typedef int (v4l2_int_ioctl_func_0)(struct v4l2_int_device *);
-typedef int (v4l2_int_ioctl_func_1)(struct v4l2_int_device *, void *);
-
-struct v4l2_int_ioctl_desc {
- int num;
- v4l2_int_ioctl_func *func;
-};
-
-struct v4l2_int_slave {
- /* Don't touch master. */
- struct v4l2_int_device *master;
-
- char attach_to[V4L2NAMESIZE];
-
- int num_ioctls;
- struct v4l2_int_ioctl_desc *ioctls;
-};
-
-struct v4l2_int_device {
- /* Don't touch head. */
- struct list_head head;
-
- struct module *module;
-
- char name[V4L2NAMESIZE];
-
- enum v4l2_int_type type;
- union {
- struct v4l2_int_master *master;
- struct v4l2_int_slave *slave;
- } u;
-
- void *priv;
-};
-
-void v4l2_int_device_try_attach_all(void);
-
-int v4l2_int_device_register(struct v4l2_int_device *d);
-void v4l2_int_device_unregister(struct v4l2_int_device *d);
-
-int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd);
-int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg);
-
-/*
- *
- * Types and definitions for IOCTL commands.
- *
- */
-
-enum v4l2_power {
- V4L2_POWER_OFF = 0,
- V4L2_POWER_ON,
- V4L2_POWER_STANDBY,
-};
-
-/* Slave interface type. */
-enum v4l2_if_type {
- /*
- * Parallel 8-, 10- or 12-bit interface, used by for example
- * on certain image sensors.
- */
- V4L2_IF_TYPE_BT656,
-};
-
-enum v4l2_if_type_bt656_mode {
- /*
- * Modes without Bt synchronisation codes. Separate
- * synchronisation signal lines are used.
- */
- V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT,
- V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT,
- V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT,
- /*
- * Use Bt synchronisation codes. The vertical and horizontal
- * synchronisation is done based on synchronisation codes.
- */
- V4L2_IF_TYPE_BT656_MODE_BT_8BIT,
- V4L2_IF_TYPE_BT656_MODE_BT_10BIT,
-};
-
-struct v4l2_if_type_bt656 {
- /*
- * 0: Frame begins when vsync is high.
- * 1: Frame begins when vsync changes from low to high.
- */
- unsigned frame_start_on_rising_vs:1;
- /* Use Bt synchronisation codes for sync correction. */
- unsigned bt_sync_correct:1;
- /* Swap every two adjacent image data elements. */
- unsigned swap:1;
- /* Inverted latch clock polarity from slave. */
- unsigned latch_clk_inv:1;
- /* Hs polarity. 0 is active high, 1 active low. */
- unsigned nobt_hs_inv:1;
- /* Vs polarity. 0 is active high, 1 active low. */
- unsigned nobt_vs_inv:1;
- enum v4l2_if_type_bt656_mode mode;
- /* Minimum accepted bus clock for slave (in Hz). */
- u32 clock_min;
- /* Maximum accepted bus clock for slave. */
- u32 clock_max;
- /*
- * Current wish of the slave. May only change in response to
- * ioctls that affect image capture.
- */
- u32 clock_curr;
-};
-
-struct v4l2_ifparm {
- enum v4l2_if_type if_type;
- union {
- struct v4l2_if_type_bt656 bt656;
- } u;
-};
-
-/* IOCTL command numbers. */
-enum v4l2_int_ioctl_num {
- /*
- *
- * "Proper" V4L ioctls, as in struct video_device.
- *
- */
- vidioc_int_enum_fmt_cap_num = 1,
- vidioc_int_g_fmt_cap_num,
- vidioc_int_s_fmt_cap_num,
- vidioc_int_try_fmt_cap_num,
- vidioc_int_queryctrl_num,
- vidioc_int_g_ctrl_num,
- vidioc_int_s_ctrl_num,
- vidioc_int_cropcap_num,
- vidioc_int_g_crop_num,
- vidioc_int_s_crop_num,
- vidioc_int_g_parm_num,
- vidioc_int_s_parm_num,
- vidioc_int_querystd_num,
- vidioc_int_s_std_num,
- vidioc_int_s_video_routing_num,
-
- /*
- *
- * Strictly internal ioctls.
- *
- */
- /* Initialise the device when slave attaches to the master. */
- vidioc_int_dev_init_num = 1000,
- /* Delinitialise the device at slave detach. */
- vidioc_int_dev_exit_num,
- /* Set device power state. */
- vidioc_int_s_power_num,
- /*
- * Get slave private data, e.g. platform-specific slave
- * configuration used by the master.
- */
- vidioc_int_g_priv_num,
- /* Get slave interface parameters. */
- vidioc_int_g_ifparm_num,
- /* Does the slave need to be reset after VIDIOC_DQBUF? */
- vidioc_int_g_needs_reset_num,
- vidioc_int_enum_framesizes_num,
- vidioc_int_enum_frameintervals_num,
-
- /*
- *
- * VIDIOC_INT_* ioctls.
- *
- */
- /* VIDIOC_INT_RESET */
- vidioc_int_reset_num,
- /* VIDIOC_INT_INIT */
- vidioc_int_init_num,
-
- /*
- *
- * Start of private ioctls.
- *
- */
- vidioc_int_priv_start_num = 2000,
-};
-
-/*
- *
- * IOCTL wrapper functions for better type checking.
- *
- */
-
-#define V4L2_INT_WRAPPER_0(name) \
- static inline int vidioc_int_##name(struct v4l2_int_device *d) \
- { \
- return v4l2_int_ioctl_0(d, vidioc_int_##name##_num); \
- } \
- \
- static inline struct v4l2_int_ioctl_desc \
- vidioc_int_##name##_cb(int (*func) \
- (struct v4l2_int_device *)) \
- { \
- struct v4l2_int_ioctl_desc desc; \
- \
- desc.num = vidioc_int_##name##_num; \
- desc.func = (v4l2_int_ioctl_func *)func; \
- \
- return desc; \
- }
-
-#define V4L2_INT_WRAPPER_1(name, arg_type, asterisk) \
- static inline int vidioc_int_##name(struct v4l2_int_device *d, \
- arg_type asterisk arg) \
- { \
- return v4l2_int_ioctl_1(d, vidioc_int_##name##_num, \
- (void *)(unsigned long)arg); \
- } \
- \
- static inline struct v4l2_int_ioctl_desc \
- vidioc_int_##name##_cb(int (*func) \
- (struct v4l2_int_device *, \
- arg_type asterisk)) \
- { \
- struct v4l2_int_ioctl_desc desc; \
- \
- desc.num = vidioc_int_##name##_num; \
- desc.func = (v4l2_int_ioctl_func *)func; \
- \
- return desc; \
- }
-
-V4L2_INT_WRAPPER_1(enum_fmt_cap, struct v4l2_fmtdesc, *);
-V4L2_INT_WRAPPER_1(g_fmt_cap, struct v4l2_format, *);
-V4L2_INT_WRAPPER_1(s_fmt_cap, struct v4l2_format, *);
-V4L2_INT_WRAPPER_1(try_fmt_cap, struct v4l2_format, *);
-V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *);
-V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *);
-V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *);
-V4L2_INT_WRAPPER_1(cropcap, struct v4l2_cropcap, *);
-V4L2_INT_WRAPPER_1(g_crop, struct v4l2_crop, *);
-V4L2_INT_WRAPPER_1(s_crop, struct v4l2_crop, *);
-V4L2_INT_WRAPPER_1(g_parm, struct v4l2_streamparm, *);
-V4L2_INT_WRAPPER_1(s_parm, struct v4l2_streamparm, *);
-V4L2_INT_WRAPPER_1(querystd, v4l2_std_id, *);
-V4L2_INT_WRAPPER_1(s_std, v4l2_std_id, *);
-V4L2_INT_WRAPPER_1(s_video_routing, struct v4l2_routing, *);
-
-V4L2_INT_WRAPPER_0(dev_init);
-V4L2_INT_WRAPPER_0(dev_exit);
-V4L2_INT_WRAPPER_1(s_power, enum v4l2_power, );
-V4L2_INT_WRAPPER_1(g_priv, void, *);
-V4L2_INT_WRAPPER_1(g_ifparm, struct v4l2_ifparm, *);
-V4L2_INT_WRAPPER_1(g_needs_reset, void, *);
-V4L2_INT_WRAPPER_1(enum_framesizes, struct v4l2_frmsizeenum, *);
-V4L2_INT_WRAPPER_1(enum_frameintervals, struct v4l2_frmivalenum, *);
-
-V4L2_INT_WRAPPER_0(reset);
-V4L2_INT_WRAPPER_0(init);
-
-#endif
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index d548371db65a..cc1dfadd91eb 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1357,10 +1357,8 @@ static int iss_probe(struct platform_device *pdev)
return -EINVAL;
iss = devm_kzalloc(&pdev->dev, sizeof(*iss), GFP_KERNEL);
- if (!iss) {
- dev_err(&pdev->dev, "Could not allocate memory\n");
+ if (!iss)
return -ENOMEM;
- }
mutex_init(&iss->iss_mutex);
@@ -1476,7 +1474,6 @@ static struct platform_driver iss_driver = {
.remove = iss_remove,
.id_table = omap4iss_id_table,
.driver = {
- .owner = THIS_MODULE,
.name = "omap4iss",
},
};
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index 92c2d5b743c7..21971c675b8c 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -93,20 +93,20 @@ static void csi2_recv_config(struct iss_csi2_device *csi2,
}
static const unsigned int csi2_input_fmts[] = {
- V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
- V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
- V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
- V4L2_MBUS_FMT_SGBRG10_1X10,
- V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
- V4L2_MBUS_FMT_SBGGR8_1X8,
- V4L2_MBUS_FMT_SGBRG8_1X8,
- V4L2_MBUS_FMT_SGRBG8_1X8,
- V4L2_MBUS_FMT_SRGGB8_1X8,
- V4L2_MBUS_FMT_UYVY8_1X16,
- V4L2_MBUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
};
/* To set the format on the CSI2 requires a mapping function that takes
@@ -201,26 +201,26 @@ static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
int fmtidx, destidx;
switch (fmt->code) {
- case V4L2_MBUS_FMT_SGRBG10_1X10:
- case V4L2_MBUS_FMT_SRGGB10_1X10:
- case V4L2_MBUS_FMT_SBGGR10_1X10:
- case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
fmtidx = 0;
break;
- case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8:
- case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
fmtidx = 1;
break;
- case V4L2_MBUS_FMT_SBGGR8_1X8:
- case V4L2_MBUS_FMT_SGBRG8_1X8:
- case V4L2_MBUS_FMT_SGRBG8_1X8:
- case V4L2_MBUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
fmtidx = 2;
break;
- case V4L2_MBUS_FMT_UYVY8_1X16:
- case V4L2_MBUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
fmtidx = 3;
break;
default:
@@ -817,7 +817,7 @@ csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
- enum v4l2_mbus_pixelcode pixelcode;
+ u32 pixelcode;
struct v4l2_mbus_framefmt *format;
const struct iss_format_info *info;
unsigned int i;
@@ -832,7 +832,7 @@ csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(csi2_input_fmts))
- fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -1020,7 +1020,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = CSI2_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
csi2_set_format(sd, fh, &format);
@@ -1231,7 +1231,7 @@ static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
v4l2_subdev_init(sd, &csi2_ops);
sd->internal_ops = &csi2_internal_ops;
- sprintf(name, "CSI2%s", subname);
+ snprintf(name, sizeof(name), "CSI2%s", subname);
snprintf(sd->name, sizeof(sd->name), "OMAP4 ISS %s", name);
sd->grp_id = 1 << 16; /* group ID for iss subdevs */
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
index 54042008154c..a1a46ef8319b 100644
--- a/drivers/staging/media/omap4iss/iss_ipipe.c
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -28,10 +28,10 @@ __ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
unsigned int pad, enum v4l2_subdev_format_whence which);
static const unsigned int ipipe_fmts[] = {
- V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
};
/*
@@ -211,7 +211,7 @@ ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(ipipe_fmts))
- fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
/* Clamp the input size. */
fmt->width = clamp_t(u32, width, 1, 8192);
@@ -223,7 +223,7 @@ ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SINK, which);
memcpy(fmt, format, sizeof(*fmt));
- fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
fmt->width = clamp_t(u32, width, 32, fmt->width);
fmt->height = clamp_t(u32, height, 32, fmt->height);
fmt->colorspace = V4L2_COLORSPACE_JPEG;
@@ -257,7 +257,7 @@ static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- code->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ code->code = MEDIA_BUS_FMT_UYVY8_1X16;
break;
default:
@@ -385,7 +385,7 @@ static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
memset(&format, 0, sizeof(format));
format.pad = IPIPE_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
ipipe_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
index 75f6a15ad202..32a748398ced 100644
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -24,12 +24,12 @@
#include "iss_ipipeif.h"
static const unsigned int ipipeif_fmts[] = {
- V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SGBRG10_1X10,
- V4L2_MBUS_FMT_UYVY8_1X16,
- V4L2_MBUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
};
/*
@@ -140,8 +140,8 @@ static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
/* Select ISIF/IPIPEIF input format */
switch (format->code) {
- case V4L2_MBUS_FMT_UYVY8_1X16:
- case V4L2_MBUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
ISIF_MODESET_CCDW_MASK,
@@ -151,25 +151,25 @@ static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
IPIPEIF_CFG2_YUV8, IPIPEIF_CFG2_YUV16);
break;
- case V4L2_MBUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
isif_ccolp = ISIF_CCOLP_CP0_F0_GR |
ISIF_CCOLP_CP1_F0_R |
ISIF_CCOLP_CP2_F0_B |
ISIF_CCOLP_CP3_F0_GB;
goto cont_raw;
- case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
isif_ccolp = ISIF_CCOLP_CP0_F0_R |
ISIF_CCOLP_CP1_F0_GR |
ISIF_CCOLP_CP2_F0_GB |
ISIF_CCOLP_CP3_F0_B;
goto cont_raw;
- case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
isif_ccolp = ISIF_CCOLP_CP0_F0_B |
ISIF_CCOLP_CP1_F0_GB |
ISIF_CCOLP_CP2_F0_GR |
ISIF_CCOLP_CP3_F0_R;
goto cont_raw;
- case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
isif_ccolp = ISIF_CCOLP_CP0_F0_GB |
ISIF_CCOLP_CP1_F0_B |
ISIF_CCOLP_CP2_F0_R |
@@ -415,7 +415,7 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(ipipeif_fmts))
- fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
/* Clamp the input size. */
fmt->width = clamp_t(u32, width, 1, 8192);
@@ -625,7 +625,7 @@ static int ipipeif_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = IPIPEIF_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
ipipeif_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
index a21e356cce3a..88522a8cdf56 100644
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -24,8 +24,8 @@
#include "iss_resizer.h"
static const unsigned int resizer_fmts[] = {
- V4L2_MBUS_FMT_UYVY8_1X16,
- V4L2_MBUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
};
/*
@@ -156,8 +156,8 @@ static void resizer_set_outaddr(struct iss_resizer_device *resizer, u32 addr)
addr & 0xffff);
/* Program UV buffer address... Hardcoded to be contiguous! */
- if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) &&
- (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) {
+ if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) &&
+ (outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) {
u32 c_addr = addr + (resizer->video_out.bpl_value *
(outformat->height - 1));
@@ -242,8 +242,8 @@ static void resizer_configure(struct iss_resizer_device *resizer)
resizer->video_out.bpl_value);
/* UYVY -> NV12 conversion */
- if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) &&
- (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) {
+ if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) &&
+ (outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) {
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420,
RSZ_420_CEN | RSZ_420_YEN);
@@ -457,7 +457,7 @@ resizer_try_format(struct iss_resizer_device *resizer,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
- enum v4l2_mbus_pixelcode pixelcode;
+ u32 pixelcode;
struct v4l2_mbus_framefmt *format;
unsigned int width = fmt->width;
unsigned int height = fmt->height;
@@ -472,7 +472,7 @@ resizer_try_format(struct iss_resizer_device *resizer,
/* If not found, use UYVY as default */
if (i >= ARRAY_SIZE(resizer_fmts))
- fmt->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
/* Clamp the input size. */
fmt->width = clamp_t(u32, width, 1, 8192);
@@ -485,8 +485,8 @@ resizer_try_format(struct iss_resizer_device *resizer,
which);
memcpy(fmt, format, sizeof(*fmt));
- if ((pixelcode == V4L2_MBUS_FMT_YUYV8_1_5X8) &&
- (fmt->code == V4L2_MBUS_FMT_UYVY8_1X16))
+ if ((pixelcode == MEDIA_BUS_FMT_YUYV8_1_5X8) &&
+ (fmt->code == MEDIA_BUS_FMT_UYVY8_1X16))
fmt->code = pixelcode;
/* The data formatter truncates the number of horizontal output
@@ -537,9 +537,9 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
}
switch (format->code) {
- case V4L2_MBUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
if (code->index == 1)
- code->code = V4L2_MBUS_FMT_YUYV8_1_5X8;
+ code->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
else
return -EINVAL;
break;
@@ -680,7 +680,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- format.format.code = V4L2_MBUS_FMT_UYVY8_1X16;
+ format.format.code = MEDIA_BUS_FMT_UYVY8_1X16;
format.format.width = 4096;
format.format.height = 4096;
resizer_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 5d6250337fec..cdee5966cbca 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -34,67 +34,67 @@ MODULE_PARM_DESC(debug, "activates debug info");
*/
static struct iss_format_info formats[] = {
- { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
- V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
+ { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_GREY, 8, "Greyscale 8 bpp", },
- { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
- V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
+ { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_Y10, 10, "Greyscale 10 bpp", },
- { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
- V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
+ { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_Y12, 12, "Greyscale 12 bpp", },
- { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
- V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
+ { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR8, 8, "BGGR Bayer 8 bpp", },
- { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
- V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
+ { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG8, 8, "GBRG Bayer 8 bpp", },
- { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
- V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
+ { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG8, 8, "GRBG Bayer 8 bpp", },
- { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
- V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
+ { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB8, 8, "RGGB Bayer 8 bpp", },
- { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
- V4L2_MBUS_FMT_SGRBG10_1X10, 0,
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_1X10, 0,
V4L2_PIX_FMT_SGRBG10DPCM8, 8, "GRBG Bayer 10 bpp DPCM8", },
- { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR10, 10, "BGGR Bayer 10 bpp", },
- { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
- V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG10, 10, "GBRG Bayer 10 bpp", },
- { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG10, 10, "GRBG Bayer 10 bpp", },
- { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB10, 10, "RGGB Bayer 10 bpp", },
- { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
- V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
+ { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR12, 12, "BGGR Bayer 12 bpp", },
- { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
- V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
+ { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG12, 12, "GBRG Bayer 12 bpp", },
- { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
- V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
+ { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG12, 12, "GRBG Bayer 12 bpp", },
- { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
- V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
+ { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB12, 12, "RGGB Bayer 12 bpp", },
- { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
- V4L2_MBUS_FMT_UYVY8_1X16, 0,
+ { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_UYVY8_1X16, 0,
V4L2_PIX_FMT_UYVY, 16, "YUV 4:2:2 (UYVY)", },
- { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
- V4L2_MBUS_FMT_YUYV8_1X16, 0,
+ { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16, 0,
V4L2_PIX_FMT_YUYV, 16, "YUV 4:2:2 (YUYV)", },
- { V4L2_MBUS_FMT_YUYV8_1_5X8, V4L2_MBUS_FMT_YUYV8_1_5X8,
- V4L2_MBUS_FMT_YUYV8_1_5X8, 0,
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, MEDIA_BUS_FMT_YUYV8_1_5X8,
+ MEDIA_BUS_FMT_YUYV8_1_5X8, 0,
V4L2_PIX_FMT_NV12, 8, "YUV 4:2:0 (NV12)", },
};
const struct iss_format_info *
-omap4iss_video_format_info(enum v4l2_mbus_pixelcode code)
+omap4iss_video_format_info(u32 code)
{
unsigned int i;
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index 9dccdb154e1a..f11fce2cb977 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -43,10 +43,10 @@ struct v4l2_pix_format;
* @description: Human-readable format description
*/
struct iss_format_info {
- enum v4l2_mbus_pixelcode code;
- enum v4l2_mbus_pixelcode truncated;
- enum v4l2_mbus_pixelcode uncompressed;
- enum v4l2_mbus_pixelcode flavor;
+ u32 code;
+ u32 truncated;
+ u32 uncompressed;
+ u32 flavor;
u32 pixelformat;
unsigned int bpp;
const char *description;
@@ -199,6 +199,6 @@ void omap4iss_video_cancel_stream(struct iss_video *video);
struct media_pad *omap4iss_video_remote_pad(struct iss_video *video);
const struct iss_format_info *
-omap4iss_video_format_info(enum v4l2_mbus_pixelcode code);
+omap4iss_video_format_info(u32 code);
#endif /* OMAP4_ISS_VIDEO_H */
diff --git a/drivers/media/parport/Kconfig b/drivers/staging/media/parport/Kconfig
index 948c981d9f05..15974efdba1d 100644
--- a/drivers/media/parport/Kconfig
+++ b/drivers/staging/media/parport/Kconfig
@@ -7,18 +7,22 @@ menuconfig MEDIA_PARPORT_SUPPORT
if MEDIA_PARPORT_SUPPORT
config VIDEO_BWQCAM
- tristate "Quickcam BW Video For Linux"
+ tristate "Quickcam BW Video For Linux (Deprecated)"
depends on PARPORT && VIDEO_V4L2
select VIDEOBUF2_VMALLOC
help
Say Y have if you the black and white version of the QuickCam
camera. See the next option for the color version.
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
To compile this driver as a module, choose M here: the
module will be called bw-qcam.
config VIDEO_CQCAM
- tristate "QuickCam Colour Video For Linux"
+ tristate "QuickCam Colour Video For Linux (Deprecated)"
depends on PARPORT && VIDEO_V4L2
help
This is the video4linux driver for the colour version of the
@@ -28,18 +32,26 @@ config VIDEO_CQCAM
as a module (c-qcam).
Read <file:Documentation/video4linux/CQcam.txt> for more information.
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
config VIDEO_PMS
- tristate "Mediavision Pro Movie Studio Video For Linux"
+ tristate "Mediavision Pro Movie Studio Video For Linux (Deprecated)"
depends on ISA && VIDEO_V4L2
help
Say Y if you have the ISA Mediavision Pro Movie Studio
capture card.
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
To compile this driver as a module, choose M here: the
module will be called pms.
config VIDEO_W9966
- tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux"
+ tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux (Deprecated)"
depends on PARPORT_1284 && PARPORT && VIDEO_V4L2
help
Video4linux driver for Winbond's w9966 based Webcams.
@@ -50,4 +62,8 @@ config VIDEO_W9966
Check out <file:Documentation/video4linux/w9966.txt> for more
information.
+
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
endif
diff --git a/drivers/media/parport/Makefile b/drivers/staging/media/parport/Makefile
index 4eea06d7af5b..4eea06d7af5b 100644
--- a/drivers/media/parport/Makefile
+++ b/drivers/staging/media/parport/Makefile
diff --git a/drivers/media/parport/bw-qcam.c b/drivers/staging/media/parport/bw-qcam.c
index 67b9da1dc43f..67b9da1dc43f 100644
--- a/drivers/media/parport/bw-qcam.c
+++ b/drivers/staging/media/parport/bw-qcam.c
diff --git a/drivers/media/parport/c-qcam.c b/drivers/staging/media/parport/c-qcam.c
index b9010bd3ed3e..b9010bd3ed3e 100644
--- a/drivers/media/parport/c-qcam.c
+++ b/drivers/staging/media/parport/c-qcam.c
diff --git a/drivers/media/parport/pms.c b/drivers/staging/media/parport/pms.c
index e6b497528cea..e6b497528cea 100644
--- a/drivers/media/parport/pms.c
+++ b/drivers/staging/media/parport/pms.c
diff --git a/drivers/media/parport/w9966.c b/drivers/staging/media/parport/w9966.c
index f7502f3a6a3c..f7502f3a6a3c 100644
--- a/drivers/media/parport/w9966.c
+++ b/drivers/staging/media/parport/w9966.c
diff --git a/drivers/media/usb/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig
index 645d915267e6..81784c6f7b88 100644
--- a/drivers/media/usb/tlg2300/Kconfig
+++ b/drivers/staging/media/tlg2300/Kconfig
@@ -1,5 +1,5 @@
config VIDEO_TLG2300
- tristate "Telegent TLG2300 USB video capture support"
+ tristate "Telegent TLG2300 USB video capture support (Deprecated)"
depends on VIDEO_DEV && I2C && SND && DVB_CORE
select VIDEO_TUNER
select VIDEO_TVEEPROM
@@ -12,5 +12,9 @@ config VIDEO_TLG2300
This is a video4linux driver for Telegent tlg2300 based TV cards.
The driver supports V4L2, DVB-T and radio.
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
To compile this driver as a module, choose M here: the
module will be called poseidon
diff --git a/drivers/media/usb/tlg2300/Makefile b/drivers/staging/media/tlg2300/Makefile
index 137f8e38cdec..137f8e38cdec 100644
--- a/drivers/media/usb/tlg2300/Makefile
+++ b/drivers/staging/media/tlg2300/Makefile
diff --git a/drivers/media/usb/tlg2300/pd-alsa.c b/drivers/staging/media/tlg2300/pd-alsa.c
index dd8fe100590f..dd8fe100590f 100644
--- a/drivers/media/usb/tlg2300/pd-alsa.c
+++ b/drivers/staging/media/tlg2300/pd-alsa.c
diff --git a/drivers/media/usb/tlg2300/pd-common.h b/drivers/staging/media/tlg2300/pd-common.h
index 9e23ad32d2fe..9e23ad32d2fe 100644
--- a/drivers/media/usb/tlg2300/pd-common.h
+++ b/drivers/staging/media/tlg2300/pd-common.h
diff --git a/drivers/media/usb/tlg2300/pd-dvb.c b/drivers/staging/media/tlg2300/pd-dvb.c
index ca4994a5190c..ca4994a5190c 100644
--- a/drivers/media/usb/tlg2300/pd-dvb.c
+++ b/drivers/staging/media/tlg2300/pd-dvb.c
diff --git a/drivers/media/usb/tlg2300/pd-main.c b/drivers/staging/media/tlg2300/pd-main.c
index b31f4791b8ff..b31f4791b8ff 100644
--- a/drivers/media/usb/tlg2300/pd-main.c
+++ b/drivers/staging/media/tlg2300/pd-main.c
diff --git a/drivers/media/usb/tlg2300/pd-radio.c b/drivers/staging/media/tlg2300/pd-radio.c
index b391194a840c..b391194a840c 100644
--- a/drivers/media/usb/tlg2300/pd-radio.c
+++ b/drivers/staging/media/tlg2300/pd-radio.c
diff --git a/drivers/media/usb/tlg2300/pd-video.c b/drivers/staging/media/tlg2300/pd-video.c
index 8cd7f02fcf9f..8cd7f02fcf9f 100644
--- a/drivers/media/usb/tlg2300/pd-video.c
+++ b/drivers/staging/media/tlg2300/pd-video.c
diff --git a/drivers/media/usb/tlg2300/vendorcmds.h b/drivers/staging/media/tlg2300/vendorcmds.h
index ba6f4ae3b2c2..ba6f4ae3b2c2 100644
--- a/drivers/media/usb/tlg2300/vendorcmds.h
+++ b/drivers/staging/media/tlg2300/vendorcmds.h
diff --git a/drivers/staging/media/vino/Kconfig b/drivers/staging/media/vino/Kconfig
new file mode 100644
index 000000000000..03700dadafd8
--- /dev/null
+++ b/drivers/staging/media/vino/Kconfig
@@ -0,0 +1,24 @@
+config VIDEO_VINO
+ tristate "SGI Vino Video For Linux (Deprecated)"
+ depends on I2C && SGI_IP22 && VIDEO_V4L2
+ select VIDEO_SAA7191 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to build in support for the Vino video input system found
+ on SGI Indy machines.
+
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
+config VIDEO_SAA7191
+ tristate "Philips SAA7191 video decoder (Deprecated)"
+ depends on VIDEO_V4L2 && I2C
+ ---help---
+ Support for the Philips SAA7191 video decoder.
+
+ This driver is deprecated and will be removed soon. If you have
+ hardware for this and you want to work on this driver, then contact
+ the linux-media mailinglist.
+
+ To compile this driver as a module, choose M here: the
+ module will be called saa7191.
diff --git a/drivers/staging/media/vino/Makefile b/drivers/staging/media/vino/Makefile
new file mode 100644
index 000000000000..914c2513687c
--- /dev/null
+++ b/drivers/staging/media/vino/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_VINO) += indycam.o
+obj-$(CONFIG_VIDEO_VINO) += vino.o
+obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
diff --git a/drivers/media/platform/indycam.c b/drivers/staging/media/vino/indycam.c
index f1d192bbcb4c..f1d192bbcb4c 100644
--- a/drivers/media/platform/indycam.c
+++ b/drivers/staging/media/vino/indycam.c
diff --git a/drivers/media/platform/indycam.h b/drivers/staging/media/vino/indycam.h
index 881f21c474c4..881f21c474c4 100644
--- a/drivers/media/platform/indycam.h
+++ b/drivers/staging/media/vino/indycam.h
diff --git a/drivers/media/i2c/saa7191.c b/drivers/staging/media/vino/saa7191.c
index 8e9699268a63..8e9699268a63 100644
--- a/drivers/media/i2c/saa7191.c
+++ b/drivers/staging/media/vino/saa7191.c
diff --git a/drivers/media/i2c/saa7191.h b/drivers/staging/media/vino/saa7191.h
index 803c74d6066f..803c74d6066f 100644
--- a/drivers/media/i2c/saa7191.h
+++ b/drivers/staging/media/vino/saa7191.h
diff --git a/drivers/media/platform/vino.c b/drivers/staging/media/vino/vino.c
index 91d44ea16f27..2c85357f774d 100644
--- a/drivers/media/platform/vino.c
+++ b/drivers/staging/media/vino/vino.c
@@ -2932,10 +2932,8 @@ static int vino_querycap(struct file *file, void *__fh,
strcpy(cap->driver, vino_driver_name);
strcpy(cap->card, vino_driver_description);
strcpy(cap->bus_info, vino_bus_name);
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING;
- // V4L2_CAP_OVERLAY, V4L2_CAP_READWRITE
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/vino.h b/drivers/staging/media/vino/vino.h
index de2d615ae7c9..de2d615ae7c9 100644
--- a/drivers/media/platform/vino.h
+++ b/drivers/staging/media/vino/vino.h
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index ad19ba9df3c7..5ecb3e6a5bb3 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -1133,7 +1133,6 @@ static struct platform_driver xlr_net_driver = {
.remove = xlr_net_remove,
.driver = {
.name = "xlr-net",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index a93208adbfcf..093535c6217b 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -973,7 +973,6 @@ static struct platform_driver nvec_device_driver = {
.remove = tegra_nvec_remove,
.driver = {
.name = "nvec",
- .owner = THIS_MODULE,
.pm = &nvec_pm_ops,
.of_match_table = nvidia_nvec_of_match,
}
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index c17a1c3eb3ca..e881e6b26a4c 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -181,7 +181,6 @@ static struct platform_driver nvec_kbd_driver = {
.remove = nvec_kbd_remove,
.driver = {
.name = "nvec-kbd",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/nvec/nvec_paz00.c b/drivers/staging/nvec/nvec_paz00.c
index e2e675a6e95a..f0cea0e43c96 100644
--- a/drivers/staging/nvec/nvec_paz00.c
+++ b/drivers/staging/nvec/nvec_paz00.c
@@ -87,7 +87,6 @@ static struct platform_driver nvec_paz00_driver = {
.remove = nvec_paz00_remove,
.driver = {
.name = "nvec-paz00",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index 6446e151866f..6a1459d4f8fb 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -432,7 +432,6 @@ static struct platform_driver nvec_power_driver = {
.remove = nvec_power_remove,
.driver = {
.name = "nvec-power",
- .owner = THIS_MODULE,
}
};
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index f56f1db15bad..4fd63c239454 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -177,7 +177,6 @@ static struct platform_driver nvec_mouse_driver = {
.remove = nvec_mouse_remove,
.driver = {
.name = "nvec-mouse",
- .owner = THIS_MODULE,
.pm = &nvec_mouse_pm_ops,
},
};
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 5f9db4cbb381..6b8b108c4e6d 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -456,6 +456,16 @@ struct octeon_temp_buffer {
u8 data[0];
};
+static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
+{
+ return container_of(p, struct octeon_hcd, usb);
+}
+
+static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
+{
+ return container_of((void *)p, struct usb_hcd, hcd_priv);
+}
+
/**
* octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
* (if needed)
@@ -640,8 +650,7 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
{
if (pipe->pid_toggle)
return 2; /* Data1 */
- else
- return 0; /* Data0 */
+ return 0; /* Data0 */
}
/**
@@ -744,7 +753,7 @@ static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
* such that USB is as close as possible to 125Mhz
*/
{
- int divisor = (octeon_get_clock_rate()+125000000-1)/125000000;
+ int divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000);
/* Lower than 4 doesn't seem to work properly */
if (divisor < 4)
divisor = 4;
@@ -1328,7 +1337,8 @@ static void __cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
/* Loop writing the FIFO data for this packet into memory */
while (bytes > 0) {
- *ptr++ = __cvmx_usb_read_csr32(usb, USB_FIFO_ADDRESS(channel, usb->index));
+ *ptr++ = __cvmx_usb_read_csr32(usb,
+ USB_FIFO_ADDRESS(channel, usb->index));
bytes -= 4;
}
CVMX_SYNCW;
@@ -1479,7 +1489,8 @@ static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
fifo = &usb->nonperiodic;
fifo->entry[fifo->head].channel = channel;
- fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
+ fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb,
+ CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
fifo->head++;
if (fifo->head > MAX_CHANNELS)
@@ -1501,6 +1512,9 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
int channel,
struct cvmx_usb_pipe *pipe)
{
+ struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
+ struct usb_hcd *hcd = octeon_to_hcd(priv);
+ struct device *dev = hcd->self.controller;
struct cvmx_usb_transaction *transaction =
list_first_entry(&pipe->transactions, typeof(*transaction),
node);
@@ -1517,7 +1531,7 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
switch (transaction->stage) {
case CVMX_USB_STAGE_NON_CONTROL:
case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- cvmx_dprintf("%s: ERROR - Non control stage\n", __func__);
+ dev_err(dev, "%s: ERROR - Non control stage\n", __func__);
break;
case CVMX_USB_STAGE_SETUP:
usbc_hctsiz.s.pid = 3; /* Setup */
@@ -1607,8 +1621,8 @@ static void __cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
* Calculate the number of packets to transfer. If the length is zero
* we still need to transfer one packet
*/
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) /
- pipe->max_packet;
+ packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
+ pipe->max_packet);
if (packets_to_transfer == 0)
packets_to_transfer = 1;
else if ((packets_to_transfer > 1) &&
@@ -1700,7 +1714,9 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
usbc_hcintmsk.s.stallmsk = 1;
usbc_hcintmsk.s.xfercomplmsk = 1;
}
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), usbc_hcintmsk.u32);
+ __cvmx_usb_write_csr32(usb,
+ CVMX_USBCX_HCINTMSKX(channel, usb->index),
+ usbc_hcintmsk.u32);
/* Enable the channel interrupt to propagate */
usbc_haintmsk.u32 = __cvmx_usb_read_csr32(usb,
@@ -1853,8 +1869,7 @@ static void __cvmx_usb_start_channel(struct cvmx_usb_state *usb,
* zero we still need to transfer one packet
*/
packets_to_transfer =
- (bytes_to_transfer + pipe->max_packet - 1) /
- pipe->max_packet;
+ DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
if (packets_to_transfer == 0)
packets_to_transfer = 1;
else if ((packets_to_transfer > 1) &&
@@ -2110,16 +2125,6 @@ done:
union cvmx_usbcx_gintmsk, sofmsk, need_sof);
}
-static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
-{
- return container_of(p, struct octeon_hcd, usb);
-}
-
-static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
-{
- return container_of((void *)p, struct usb_hcd, hcd_priv);
-}
-
static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
enum cvmx_usb_complete status,
struct cvmx_usb_pipe *pipe,
@@ -2583,6 +2588,9 @@ static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
*/
static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
{
+ struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
+ struct usb_hcd *hcd = octeon_to_hcd(priv);
+ struct device *dev = hcd->self.controller;
union cvmx_usbcx_hcintx usbc_hcint;
union cvmx_usbcx_hctsizx usbc_hctsiz;
union cvmx_usbcx_hccharx usbc_hcchar;
@@ -2640,8 +2648,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* Channel halt isn't needed.
*/
} else {
- cvmx_dprintf("USB%d: Channel %d interrupt without halt\n",
- usb->index, channel);
+ dev_err(dev, "USB%d: Channel %d interrupt without halt\n",
+ usb->index, channel);
return 0;
}
}
@@ -2881,9 +2889,11 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
struct usb_ctrlrequest *header =
cvmx_phys_to_ptr(transaction->control_header);
if (header->wLength)
- transaction->stage = CVMX_USB_STAGE_DATA;
+ transaction->stage =
+ CVMX_USB_STAGE_DATA;
else
- transaction->stage = CVMX_USB_STAGE_STATUS;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
@@ -2891,9 +2901,11 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
struct usb_ctrlrequest *header =
cvmx_phys_to_ptr(transaction->control_header);
if (header->wLength)
- transaction->stage = CVMX_USB_STAGE_DATA;
+ transaction->stage =
+ CVMX_USB_STAGE_DATA;
else
- transaction->stage = CVMX_USB_STAGE_STATUS;
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS;
}
break;
case CVMX_USB_STAGE_DATA:
@@ -3015,7 +3027,8 @@ static int __cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* is complete, the pipe sleeps until the next
* schedule interval
*/
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ if (pipe->transfer_dir ==
+ CVMX_USB_DIRECTION_OUT) {
/*
* If no space left or this wasn't a max
* size packet then this transfer is
@@ -3882,7 +3895,6 @@ static struct of_device_id octeon_usb_match[] = {
static struct platform_driver octeon_usb_driver = {
.driver = {
.name = "OcteonUSB",
- .owner = THIS_MODULE,
.of_match_table = octeon_usb_match,
},
.probe = octeon_usb_probe,
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index b2b6c3cd2bed..fcbe836aa997 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -61,66 +61,7 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
-struct cvm_napi_wrapper {
- struct napi_struct napi;
-} ____cacheline_aligned_in_smp;
-
-static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
-
-struct cvm_oct_core_state {
- int baseline_cores;
- /*
- * The number of additional cores that could be processing
- * input packets.
- */
- atomic_t available_cores;
- cpumask_t cpu_state;
-} ____cacheline_aligned_in_smp;
-
-static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
-
-static int cvm_irq_cpu;
-
-static void cvm_oct_enable_napi(void *_)
-{
- int cpu = smp_processor_id();
- napi_schedule(&cvm_oct_napi[cpu].napi);
-}
-
-static void cvm_oct_enable_one_cpu(void)
-{
- int v;
- int cpu;
-
- /* Check to see if more CPUs are available for receive processing... */
- v = atomic_sub_if_positive(1, &core_state.available_cores);
- if (v < 0)
- return;
-
- /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
- for_each_online_cpu(cpu) {
- if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
- v = smp_call_function_single(cpu, cvm_oct_enable_napi,
- NULL, 0);
- if (v)
- panic("Can't enable NAPI.");
- break;
- }
- }
-}
-
-static void cvm_oct_no_more_work(void)
-{
- int cpu = smp_processor_id();
-
- if (cpu == cvm_irq_cpu) {
- enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- return;
- }
-
- cpu_clear(cpu, core_state.cpu_state);
- atomic_add(1, &core_state.available_cores);
-}
+static struct napi_struct cvm_oct_napi;
/**
* cvm_oct_do_interrupt - interrupt handler.
@@ -132,8 +73,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
{
/* Disable the IRQ and start napi_poll. */
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- cvm_irq_cpu = smp_processor_id();
- cvm_oct_enable_napi(NULL);
+ napi_schedule(&cvm_oct_napi);
return IRQ_HANDLED;
}
@@ -186,13 +126,15 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
if (*ptr == 0xd5) {
/*
- printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt);
+ printk_ratelimited("Port %d received 0xd5 preamble\n",
+ work->ipprt);
*/
work->packet_ptr.s.addr += i + 1;
work->len -= i + 5;
} else if ((*ptr & 0xf) == 0xd) {
/*
- printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt);
+ printk_ratelimited("Port %d received 0x?d preamble\n",
+ work->ipprt);
*/
work->packet_ptr.s.addr += i;
work->len -= i + 4;
@@ -278,28 +220,15 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
break;
}
- pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
+ pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
+ sizeof(void *));
prefetch(pskb);
if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
- cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+ cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
+ CVMX_POW_NO_WAIT);
did_work_request = 1;
}
-
- if (rx_count == 0) {
- /*
- * First time through, see if there is enough
- * work waiting to merit waking another
- * CPU.
- */
- union cvmx_pow_wq_int_cntx counts;
- int backlog;
- int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
- counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
- backlog = counts.s.iq_cnt + counts.s.ds_cnt;
- if (backlog > budget * cores_in_use && napi != NULL)
- cvm_oct_enable_one_cpu();
- }
rx_count++;
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
@@ -322,7 +251,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* buffer.
*/
if (likely(skb_in_hw)) {
- skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
+ skb->data = skb->head + work->packet_ptr.s.addr -
+ cvmx_ptr_to_phys(skb->head);
prefetch(skb->data);
skb->len = work->len;
skb_set_tail_pointer(skb, skb->len);
@@ -359,7 +289,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
/* No packet buffers to free */
} else {
int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr = work->packet_ptr;
+ union cvmx_buf_ptr segment_ptr =
+ work->packet_ptr;
int len = work->len;
while (segments--) {
@@ -375,8 +306,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* one: int segment_size =
* segment_ptr.s.size;
*/
- int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
- (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
+ int segment_size =
+ CVMX_FPA_PACKET_POOL_SIZE -
+ (segment_ptr.s.addr -
+ (((segment_ptr.s.addr >> 7) -
+ segment_ptr.s.back) << 7));
/*
* Don't copy more than what
* is left in the packet.
@@ -407,8 +341,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev);
skb->dev = dev;
- if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
- work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
+ if (unlikely(work->word2.s.not_IP ||
+ work->word2.s.IP_exc ||
+ work->word2.s.L4_error ||
+ !work->word2.s.tcp_or_udp))
skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -416,11 +352,15 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
/* Increment RX stats for virtual ports */
if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
#ifdef CONFIG_64BIT
- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
+ atomic64_add(1,
+ (atomic64_t *)&priv->stats.rx_packets);
+ atomic64_add(skb->len,
+ (atomic64_t *)&priv->stats.rx_bytes);
#else
- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
+ atomic_add(1,
+ (atomic_t *)&priv->stats.rx_packets);
+ atomic_add(skb->len,
+ (atomic_t *)&priv->stats.rx_bytes);
#endif
}
netif_receive_skb(skb);
@@ -431,9 +371,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
dev->name);
*/
#ifdef CONFIG_64BIT
- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
+ atomic64_add(1,
+ (atomic64_t *)&priv->stats.rx_dropped);
#else
- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
+ atomic_add(1,
+ (atomic_t *)&priv->stats.rx_dropped);
#endif
dev_kfree_skb_irq(skb);
}
@@ -476,7 +418,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (rx_count < budget && napi != NULL) {
/* No more work */
napi_complete(napi);
- cvm_oct_no_more_work();
+ enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
}
return rx_count;
}
@@ -511,18 +453,10 @@ void cvm_oct_rx_initialize(void)
if (NULL == dev_for_napi)
panic("No net_devices were allocated.");
- if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
- atomic_set(&core_state.available_cores, max_rx_cpus);
- else
- atomic_set(&core_state.available_cores, num_online_cpus());
- core_state.baseline_cores = atomic_read(&core_state.available_cores);
-
- core_state.cpu_state = CPU_MASK_NONE;
- for_each_possible_cpu(i) {
- netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
- cvm_oct_napi_poll, rx_napi_weight);
- napi_enable(&cvm_oct_napi[i].napi);
- }
+ netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
+ rx_napi_weight);
+ napi_enable(&cvm_oct_napi);
+
/* Register an IRQ handler to receive POW interrupts */
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
@@ -543,15 +477,11 @@ void cvm_oct_rx_initialize(void)
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
-
- /* Scheduld NAPI now. This will indirectly enable interrupts. */
- cvm_oct_enable_one_cpu();
+ /* Schedule NAPI now. This will indirectly enable the interrupt. */
+ napi_schedule(&cvm_oct_napi);
}
void cvm_oct_rx_shutdown(void)
{
- int i;
- /* Shutdown all of the NAPIs */
- for_each_possible_cpu(i)
- netif_napi_del(&cvm_oct_napi[i].napi);
+ netif_napi_del(&cvm_oct_napi);
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 4e54d8540219..b7a7854d3f7e 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -77,6 +77,7 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
{
int32_t undo;
+
undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
MAX_SKB_TO_FREE;
if (undo > 0)
@@ -89,6 +90,7 @@ static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
static void cvm_oct_kick_tx_poll_watchdog(void)
{
union cvmx_ciu_timx ciu_timx;
+
ciu_timx.u64 = 0;
ciu_timx.s.one_shot = 1;
ciu_timx.s.len = cvm_oct_tx_poll_interval;
@@ -118,9 +120,11 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
total_freed += skb_to_free;
if (skb_to_free > 0) {
struct sk_buff *to_free_list = NULL;
+
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
while (skb_to_free > 0) {
struct sk_buff *t;
+
t = __skb_dequeue(&priv->tx_free_list[qos]);
t->next = to_free_list;
to_free_list = t;
@@ -131,6 +135,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
+
to_free_list = to_free_list->next;
dev_kfree_skb_any(t);
}
@@ -258,6 +263,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
if (gmx_prt_cfg.s.duplex == 0) {
int add_bytes = 64 - skb->len;
+
if ((skb_tail_pointer(skb) + add_bytes) <=
skb_end_pointer(skb))
memset(__skb_put(skb, add_bytes), 0,
@@ -289,6 +295,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
+
hw_buffer.s.addr = XKPHYS_TO_PHYS(
(u64)(page_address(fs->page.p) +
fs->page_offset));
@@ -495,6 +502,7 @@ skip_xmit:
while (skb_to_free > 0) {
struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+
t->next = to_free_list;
to_free_list = t;
skb_to_free--;
@@ -505,6 +513,7 @@ skip_xmit:
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
+
to_free_list = to_free_list->next;
dev_kfree_skb_any(t);
}
@@ -550,6 +559,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+
if (unlikely(work == NULL)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
dev->name);
@@ -713,6 +723,7 @@ static void cvm_oct_tx_do_cleanup(unsigned long arg)
for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
if (cvm_oct_device[port]) {
struct net_device *dev = cvm_oct_device[port];
+
cvm_oct_free_tx_skbs(dev);
}
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 8f9e3fb4871d..ee321496dcdd 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -98,12 +98,6 @@ MODULE_PARM_DESC(pow_send_list, "\n"
"\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
"\tusing the pow_send_group.");
-int max_rx_cpus = -1;
-module_param(max_rx_cpus, int, 0444);
-MODULE_PARM_DESC(max_rx_cpus, "\n"
- "\t\tThe maximum number of CPUs to use for packet reception.\n"
- "\t\tUse -1 to use all available CPUs.");
-
int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
@@ -452,7 +446,7 @@ int cvm_oct_common_init(struct net_device *dev)
mac = of_get_mac_address(priv->of_node);
if (mac)
- memcpy(dev->dev_addr, mac, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, mac);
else
eth_hw_addr_random(dev);
@@ -877,7 +871,6 @@ static struct platform_driver cvm_oct_driver = {
.probe = cvm_oct_probe,
.remove = cvm_oct_remove,
.driver = {
- .owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.of_match_table = cvm_oct_match,
},
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index d0e321119914..f48dc766fada 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -99,7 +99,6 @@ extern struct workqueue_struct *cvm_oct_poll_queue;
extern atomic_t cvm_oct_poll_queue_stopping;
extern u64 cvm_oct_tx_poll_interval;
-extern int max_rx_cpus;
extern int rx_napi_weight;
#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index eb83b28b8cd1..6a9a8815477c 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -682,8 +682,7 @@ static int dcon_remove(struct i2c_client *client)
free_irq(DCON_IRQ, dcon);
- if (dcon->bl_dev)
- backlight_device_unregister(dcon->bl_dev);
+ backlight_device_unregister(dcon->bl_dev);
if (dcon_device != NULL)
platform_device_unregister(dcon_device);
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index e88045228607..8543bb29a138 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -224,7 +224,6 @@ static struct platform_driver g_oz_plat_drv = {
.resume = oz_plat_resume,
.driver = {
.name = OZ_PLAT_DEV_NAME,
- .owner = THIS_MODULE,
},
};
@@ -354,8 +353,7 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
}
spin_lock(&g_tasklet_lock);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
- if (cancel_urbl)
- oz_free_urb_link(cancel_urbl);
+ oz_free_urb_link(cancel_urbl);
}
/*
@@ -523,8 +521,7 @@ static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
}
}
spin_unlock_bh(&port->ozhcd->hcd_lock);
- if (urbl)
- oz_free_urb_link(urbl);
+ oz_free_urb_link(urbl);
return urbl ? 0 : -EIDRM;
}
@@ -730,7 +727,7 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
{
/* Cleanup the current configuration and report reset to the core.
*/
- struct oz_port *port = (struct oz_port *)hport;
+ struct oz_port *port = hport;
struct oz_hcd *ozhcd = port->ozhcd;
oz_dbg(ON, "PD Reset\n");
@@ -749,7 +746,7 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
int length, int offset, int total_size)
{
- struct oz_port *port = (struct oz_port *)hport;
+ struct oz_port *port = hport;
struct urb *urb;
int err = 0;
@@ -889,7 +886,7 @@ static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
int data_len)
{
- struct oz_port *port = (struct oz_port *)hport;
+ struct oz_port *port = hport;
struct urb *urb;
struct usb_ctrlrequest *setup;
struct usb_hcd *hcd = port->ozhcd->hcd;
@@ -1036,7 +1033,7 @@ static inline int oz_usb_get_frame_number(void)
int oz_hcd_heartbeat(void *hport)
{
int rc = 0;
- struct oz_port *port = (struct oz_port *)hport;
+ struct oz_port *port = hport;
struct oz_hcd *ozhcd = port->ozhcd;
struct oz_urb_link *urbl, *n;
LIST_HEAD(xfr_list);
@@ -1914,7 +1911,7 @@ static void oz_get_hub_descriptor(struct usb_hcd *hcd,
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
- desc->wHubCharacteristics = (__force __u16)cpu_to_le16(0x0001);
+ desc->wHubCharacteristics = cpu_to_le16(0x0001);
desc->bNbrPorts = OZ_NB_PORTS;
}
@@ -2032,11 +2029,11 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
break;
case USB_PORT_FEAT_C_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
- clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
+ clear_bits = USB_PORT_STAT_C_CONNECTION << 16;
break;
case USB_PORT_FEAT_C_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
- clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
+ clear_bits = USB_PORT_STAT_C_ENABLE << 16;
break;
case USB_PORT_FEAT_C_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
@@ -2046,7 +2043,7 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
break;
case USB_PORT_FEAT_C_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
- clear_bits = (USB_PORT_FEAT_C_RESET << 16);
+ clear_bits = USB_PORT_FEAT_C_RESET << 16;
break;
case USB_PORT_FEAT_TEST:
oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index be7ee01c50ab..d434d8c6fff6 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -56,7 +56,7 @@ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
u8 index, __le16 windex, int offset, int len)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_get_desc_req *body;
@@ -92,7 +92,7 @@ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
*/
static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
@@ -115,7 +115,7 @@ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
*/
static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
@@ -140,7 +140,7 @@ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
u8 recipient, u8 index, __le16 feature)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
@@ -166,7 +166,7 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
u8 request, __le16 value, __le16 index, const u8 *data, int data_len)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
@@ -244,7 +244,7 @@ int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
*/
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
{
- struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt_buf *eb;
int i;
diff --git a/drivers/staging/panel/TODO b/drivers/staging/panel/TODO
index a4be749bcdfc..2db3f994b632 100644
--- a/drivers/staging/panel/TODO
+++ b/drivers/staging/panel/TODO
@@ -1,6 +1,5 @@
TODO:
- checkpatch.pl cleanups
- - Lindent
- review major/minor usages
- review userspace api
- see if all of this could be easier done in userspace instead.
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 6d1a32097d3c..98325b7b4462 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -133,6 +133,8 @@
#define LCD_ESCAPE_LEN 24 /* max chars for LCD escape command */
#define LCD_ESCAPE_CHAR 27 /* use char 27 for escape command */
+#define NOT_SET -1
+
/* macros to simplify use of the parallel port */
#define r_ctr(x) (parport_read_control((x)->port))
#define r_dtr(x) (parport_read_data((x)->port))
@@ -210,6 +212,10 @@ static pmask_t phys_prev;
static char inputs_stable;
/* these variables are specific to the keypad */
+static struct {
+ bool enabled;
+} keypad;
+
static char keypad_buffer[KEYPAD_BUFFER];
static int keypad_buflen;
static int keypad_start;
@@ -217,17 +223,50 @@ static char keypressed;
static wait_queue_head_t keypad_read_wait;
/* lcd-specific variables */
-
-/* contains the LCD config state */
-static unsigned long int lcd_flags;
-/* contains the LCD X offset */
-static unsigned long int lcd_addr_x;
-/* contains the LCD Y offset */
-static unsigned long int lcd_addr_y;
-/* current escape sequence, 0 terminated */
-static char lcd_escape[LCD_ESCAPE_LEN + 1];
-/* not in escape state. >=0 = escape cmd len */
-static int lcd_escape_len = -1;
+static struct {
+ bool enabled;
+ bool initialized;
+ bool must_clear;
+
+ /* TODO: use bool here? */
+ char left_shift;
+
+ int height;
+ int width;
+ int bwidth;
+ int hwidth;
+ int charset;
+ int proto;
+ int light_tempo;
+
+ /* TODO: use union here? */
+ struct {
+ int e;
+ int rs;
+ int rw;
+ int cl;
+ int da;
+ int bl;
+ } pins;
+
+ /* contains the LCD config state */
+ unsigned long int flags;
+
+ /* Contains the LCD X and Y offset */
+ struct {
+ unsigned long int x;
+ unsigned long int y;
+ } addr;
+
+ /* Current escape sequence and it's length or -1 if outside */
+ struct {
+ char buf[LCD_ESCAPE_LEN + 1];
+ int len;
+ } esc_seq;
+} lcd;
+
+/* Needed only for init */
+static int selected_lcd_type = NOT_SET;
/*
* Bit masks to convert LCD signals to parallel port outputs.
@@ -302,14 +341,15 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
/*
* Construct custom config from the kernel's configuration
*/
-#define DEFAULT_PROFILE PANEL_PROFILE_LARGE
#define DEFAULT_PARPORT 0
-#define DEFAULT_LCD LCD_TYPE_OLD
-#define DEFAULT_KEYPAD KEYPAD_TYPE_OLD
+#define DEFAULT_PROFILE PANEL_PROFILE_LARGE
+#define DEFAULT_KEYPAD_TYPE KEYPAD_TYPE_OLD
+#define DEFAULT_LCD_TYPE LCD_TYPE_OLD
+#define DEFAULT_LCD_HEIGHT 2
#define DEFAULT_LCD_WIDTH 40
#define DEFAULT_LCD_BWIDTH 40
#define DEFAULT_LCD_HWIDTH 64
-#define DEFAULT_LCD_HEIGHT 2
+#define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL
#define DEFAULT_LCD_PROTO LCD_PROTO_PARALLEL
#define DEFAULT_LCD_PIN_E PIN_AUTOLF
@@ -318,27 +358,31 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
#define DEFAULT_LCD_PIN_SCL PIN_STROBE
#define DEFAULT_LCD_PIN_SDA PIN_D0
#define DEFAULT_LCD_PIN_BL PIN_NOT_SET
-#define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL
-
-#ifdef CONFIG_PANEL_PROFILE
-#undef DEFAULT_PROFILE
-#define DEFAULT_PROFILE CONFIG_PANEL_PROFILE
-#endif
#ifdef CONFIG_PANEL_PARPORT
#undef DEFAULT_PARPORT
#define DEFAULT_PARPORT CONFIG_PANEL_PARPORT
#endif
+#ifdef CONFIG_PANEL_PROFILE
+#undef DEFAULT_PROFILE
+#define DEFAULT_PROFILE CONFIG_PANEL_PROFILE
+#endif
+
#if DEFAULT_PROFILE == 0 /* custom */
#ifdef CONFIG_PANEL_KEYPAD
-#undef DEFAULT_KEYPAD
-#define DEFAULT_KEYPAD CONFIG_PANEL_KEYPAD
+#undef DEFAULT_KEYPAD_TYPE
+#define DEFAULT_KEYPAD_TYPE CONFIG_PANEL_KEYPAD
#endif
#ifdef CONFIG_PANEL_LCD
-#undef DEFAULT_LCD
-#define DEFAULT_LCD CONFIG_PANEL_LCD
+#undef DEFAULT_LCD_TYPE
+#define DEFAULT_LCD_TYPE CONFIG_PANEL_LCD
+#endif
+
+#ifdef CONFIG_PANEL_LCD_HEIGHT
+#undef DEFAULT_LCD_HEIGHT
+#define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT
#endif
#ifdef CONFIG_PANEL_LCD_WIDTH
@@ -356,9 +400,9 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
#define DEFAULT_LCD_HWIDTH CONFIG_PANEL_LCD_HWIDTH
#endif
-#ifdef CONFIG_PANEL_LCD_HEIGHT
-#undef DEFAULT_LCD_HEIGHT
-#define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT
+#ifdef CONFIG_PANEL_LCD_CHARSET
+#undef DEFAULT_LCD_CHARSET
+#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
#endif
#ifdef CONFIG_PANEL_LCD_PROTO
@@ -396,25 +440,18 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
#define DEFAULT_LCD_PIN_BL CONFIG_PANEL_LCD_PIN_BL
#endif
-#ifdef CONFIG_PANEL_LCD_CHARSET
-#undef DEFAULT_LCD_CHARSET
-#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
-#endif
-
#endif /* DEFAULT_PROFILE == 0 */
/* global variables */
-static int keypad_open_cnt; /* #times opened */
-static int lcd_open_cnt; /* #times opened */
+
+/* Device single-open policy control */
+static atomic_t lcd_available = ATOMIC_INIT(1);
+static atomic_t keypad_available = ATOMIC_INIT(1);
+
static struct pardevice *pprt;
-static int lcd_initialized;
static int keypad_initialized;
-static int light_tempo;
-
-static char lcd_must_clear;
-static char lcd_left_shift;
static char init_in_progress;
static void (*lcd_write_cmd)(int);
@@ -426,59 +463,51 @@ static struct timer_list scan_timer;
MODULE_DESCRIPTION("Generic parallel port LCD/Keypad driver");
-static int parport = -1;
+static int parport = DEFAULT_PARPORT;
module_param(parport, int, 0000);
MODULE_PARM_DESC(parport, "Parallel port index (0=lpt1, 1=lpt2, ...)");
-static int lcd_height = -1;
+static int profile = DEFAULT_PROFILE;
+module_param(profile, int, 0000);
+MODULE_PARM_DESC(profile,
+ "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; "
+ "4=16x2 nexcom; default=40x2, old kp");
+
+static int keypad_type = NOT_SET;
+module_param(keypad_type, int, 0000);
+MODULE_PARM_DESC(keypad_type,
+ "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
+
+static int lcd_type = NOT_SET;
+module_param(lcd_type, int, 0000);
+MODULE_PARM_DESC(lcd_type,
+ "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
+
+static int lcd_height = NOT_SET;
module_param(lcd_height, int, 0000);
MODULE_PARM_DESC(lcd_height, "Number of lines on the LCD");
-static int lcd_width = -1;
+static int lcd_width = NOT_SET;
module_param(lcd_width, int, 0000);
MODULE_PARM_DESC(lcd_width, "Number of columns on the LCD");
-static int lcd_bwidth = -1; /* internal buffer width (usually 40) */
+static int lcd_bwidth = NOT_SET; /* internal buffer width (usually 40) */
module_param(lcd_bwidth, int, 0000);
MODULE_PARM_DESC(lcd_bwidth, "Internal LCD line width (40)");
-static int lcd_hwidth = -1; /* hardware buffer width (usually 64) */
+static int lcd_hwidth = NOT_SET; /* hardware buffer width (usually 64) */
module_param(lcd_hwidth, int, 0000);
MODULE_PARM_DESC(lcd_hwidth, "LCD line hardware address (64)");
-static int lcd_enabled = -1;
-module_param(lcd_enabled, int, 0000);
-MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead");
-
-static int keypad_enabled = -1;
-module_param(keypad_enabled, int, 0000);
-MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
-
-static int lcd_type = -1;
-module_param(lcd_type, int, 0000);
-MODULE_PARM_DESC(lcd_type,
- "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
+static int lcd_charset = NOT_SET;
+module_param(lcd_charset, int, 0000);
+MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
-static int lcd_proto = -1;
+static int lcd_proto = NOT_SET;
module_param(lcd_proto, int, 0000);
MODULE_PARM_DESC(lcd_proto,
"LCD communication: 0=parallel (//), 1=serial, 2=TI LCD Interface");
-static int lcd_charset = -1;
-module_param(lcd_charset, int, 0000);
-MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
-
-static int keypad_type = -1;
-module_param(keypad_type, int, 0000);
-MODULE_PARM_DESC(keypad_type,
- "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
-
-static int profile = DEFAULT_PROFILE;
-module_param(profile, int, 0000);
-MODULE_PARM_DESC(profile,
- "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; "
- "4=16x2 nexcom; default=40x2, old kp");
-
/*
* These are the parallel port pins the LCD control signals are connected to.
* Set this to 0 if the signal is not used. Set it to its opposite value
@@ -503,20 +532,31 @@ module_param(lcd_rw_pin, int, 0000);
MODULE_PARM_DESC(lcd_rw_pin,
"# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)");
-static int lcd_bl_pin = PIN_NOT_SET;
-module_param(lcd_bl_pin, int, 0000);
-MODULE_PARM_DESC(lcd_bl_pin,
- "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
+static int lcd_cl_pin = PIN_NOT_SET;
+module_param(lcd_cl_pin, int, 0000);
+MODULE_PARM_DESC(lcd_cl_pin,
+ "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
static int lcd_da_pin = PIN_NOT_SET;
module_param(lcd_da_pin, int, 0000);
MODULE_PARM_DESC(lcd_da_pin,
"# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)");
-static int lcd_cl_pin = PIN_NOT_SET;
-module_param(lcd_cl_pin, int, 0000);
-MODULE_PARM_DESC(lcd_cl_pin,
- "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
+static int lcd_bl_pin = PIN_NOT_SET;
+module_param(lcd_bl_pin, int, 0000);
+MODULE_PARM_DESC(lcd_bl_pin,
+ "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
+
+/* Deprecated module parameters - consider not using them anymore */
+
+static int lcd_enabled = NOT_SET;
+module_param(lcd_enabled, int, 0000);
+MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead");
+
+static int keypad_enabled = NOT_SET;
+module_param(keypad_enabled, int, 0000);
+MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
+
static const unsigned char *lcd_char_conv;
@@ -748,7 +788,7 @@ static void lcd_send_serial(int byte)
/* turn the backlight on or off */
static void lcd_backlight(int on)
{
- if (lcd_bl_pin == PIN_NONE)
+ if (lcd.pins.bl == PIN_NONE)
return;
/* The backlight is activated by setting the AUTOFEED line to +5V */
@@ -847,23 +887,23 @@ static void lcd_write_data_tilcd(int data)
static void lcd_gotoxy(void)
{
lcd_write_cmd(0x80 /* set DDRAM address */
- | (lcd_addr_y ? lcd_hwidth : 0)
+ | (lcd.addr.y ? lcd.hwidth : 0)
/* we force the cursor to stay at the end of the
line if it wants to go farther */
- | ((lcd_addr_x < lcd_bwidth) ? lcd_addr_x &
- (lcd_hwidth - 1) : lcd_bwidth - 1));
+ | ((lcd.addr.x < lcd.bwidth) ? lcd.addr.x &
+ (lcd.hwidth - 1) : lcd.bwidth - 1));
}
static void lcd_print(char c)
{
- if (lcd_addr_x < lcd_bwidth) {
+ if (lcd.addr.x < lcd.bwidth) {
if (lcd_char_conv != NULL)
c = lcd_char_conv[(unsigned char)c];
lcd_write_data(c);
- lcd_addr_x++;
+ lcd.addr.x++;
}
/* prevents the cursor from wrapping onto the next line */
- if (lcd_addr_x == lcd_bwidth)
+ if (lcd.addr.x == lcd.bwidth)
lcd_gotoxy();
}
@@ -872,12 +912,12 @@ static void lcd_clear_fast_s(void)
{
int pos;
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) {
+ for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
lcd_send_serial(0x5F); /* R/W=W, RS=1 */
lcd_send_serial(' ' & 0x0F);
lcd_send_serial((' ' >> 4) & 0x0F);
@@ -885,8 +925,8 @@ static void lcd_clear_fast_s(void)
}
spin_unlock_irq(&pprt_lock);
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
}
@@ -895,12 +935,12 @@ static void lcd_clear_fast_p8(void)
{
int pos;
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) {
+ for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
/* present the data to the data port */
w_dtr(pprt, ' ');
@@ -923,8 +963,8 @@ static void lcd_clear_fast_p8(void)
}
spin_unlock_irq(&pprt_lock);
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
}
@@ -933,12 +973,12 @@ static void lcd_clear_fast_tilcd(void)
{
int pos;
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) {
+ for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
/* present the data to the data port */
w_dtr(pprt, ' ');
udelay(60);
@@ -946,8 +986,8 @@ static void lcd_clear_fast_tilcd(void)
spin_unlock_irq(&pprt_lock);
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
}
@@ -955,15 +995,15 @@ static void lcd_clear_fast_tilcd(void)
static void lcd_clear_display(void)
{
lcd_write_cmd(0x01); /* clear display */
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
/* we must wait a few milliseconds (15) */
long_sleep(15);
}
static void lcd_init_display(void)
{
- lcd_flags = ((lcd_height > 1) ? LCD_FLAG_N : 0)
+ lcd.flags = ((lcd.height > 1) ? LCD_FLAG_N : 0)
| LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B;
long_sleep(20); /* wait 20 ms after power-up for the paranoid */
@@ -976,8 +1016,8 @@ static void lcd_init_display(void)
long_sleep(10);
lcd_write_cmd(0x30 /* set font height and lines number */
- | ((lcd_flags & LCD_FLAG_F) ? 4 : 0)
- | ((lcd_flags & LCD_FLAG_N) ? 8 : 0)
+ | ((lcd.flags & LCD_FLAG_F) ? 4 : 0)
+ | ((lcd.flags & LCD_FLAG_N) ? 8 : 0)
);
long_sleep(10);
@@ -985,12 +1025,12 @@ static void lcd_init_display(void)
long_sleep(10);
lcd_write_cmd(0x08 /* set display mode */
- | ((lcd_flags & LCD_FLAG_D) ? 4 : 0)
- | ((lcd_flags & LCD_FLAG_C) ? 2 : 0)
- | ((lcd_flags & LCD_FLAG_B) ? 1 : 0)
+ | ((lcd.flags & LCD_FLAG_D) ? 4 : 0)
+ | ((lcd.flags & LCD_FLAG_C) ? 2 : 0)
+ | ((lcd.flags & LCD_FLAG_B) ? 1 : 0)
);
- lcd_backlight((lcd_flags & LCD_FLAG_L) ? 1 : 0);
+ lcd_backlight((lcd.flags & LCD_FLAG_L) ? 1 : 0);
long_sleep(10);
@@ -1013,100 +1053,101 @@ static inline int handle_lcd_special_code(void)
int processed = 0;
- char *esc = lcd_escape + 2;
- int oldflags = lcd_flags;
+ char *esc = lcd.esc_seq.buf + 2;
+ int oldflags = lcd.flags;
/* check for display mode flags */
switch (*esc) {
case 'D': /* Display ON */
- lcd_flags |= LCD_FLAG_D;
+ lcd.flags |= LCD_FLAG_D;
processed = 1;
break;
case 'd': /* Display OFF */
- lcd_flags &= ~LCD_FLAG_D;
+ lcd.flags &= ~LCD_FLAG_D;
processed = 1;
break;
case 'C': /* Cursor ON */
- lcd_flags |= LCD_FLAG_C;
+ lcd.flags |= LCD_FLAG_C;
processed = 1;
break;
case 'c': /* Cursor OFF */
- lcd_flags &= ~LCD_FLAG_C;
+ lcd.flags &= ~LCD_FLAG_C;
processed = 1;
break;
case 'B': /* Blink ON */
- lcd_flags |= LCD_FLAG_B;
+ lcd.flags |= LCD_FLAG_B;
processed = 1;
break;
case 'b': /* Blink OFF */
- lcd_flags &= ~LCD_FLAG_B;
+ lcd.flags &= ~LCD_FLAG_B;
processed = 1;
break;
case '+': /* Back light ON */
- lcd_flags |= LCD_FLAG_L;
+ lcd.flags |= LCD_FLAG_L;
processed = 1;
break;
case '-': /* Back light OFF */
- lcd_flags &= ~LCD_FLAG_L;
+ lcd.flags &= ~LCD_FLAG_L;
processed = 1;
break;
case '*':
/* flash back light using the keypad timer */
if (scan_timer.function != NULL) {
- if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0))
+ if (lcd.light_tempo == 0
+ && ((lcd.flags & LCD_FLAG_L) == 0))
lcd_backlight(1);
- light_tempo = FLASH_LIGHT_TEMPO;
+ lcd.light_tempo = FLASH_LIGHT_TEMPO;
}
processed = 1;
break;
case 'f': /* Small Font */
- lcd_flags &= ~LCD_FLAG_F;
+ lcd.flags &= ~LCD_FLAG_F;
processed = 1;
break;
case 'F': /* Large Font */
- lcd_flags |= LCD_FLAG_F;
+ lcd.flags |= LCD_FLAG_F;
processed = 1;
break;
case 'n': /* One Line */
- lcd_flags &= ~LCD_FLAG_N;
+ lcd.flags &= ~LCD_FLAG_N;
processed = 1;
break;
case 'N': /* Two Lines */
- lcd_flags |= LCD_FLAG_N;
+ lcd.flags |= LCD_FLAG_N;
break;
case 'l': /* Shift Cursor Left */
- if (lcd_addr_x > 0) {
+ if (lcd.addr.x > 0) {
/* back one char if not at end of line */
- if (lcd_addr_x < lcd_bwidth)
+ if (lcd.addr.x < lcd.bwidth)
lcd_write_cmd(0x10);
- lcd_addr_x--;
+ lcd.addr.x--;
}
processed = 1;
break;
case 'r': /* shift cursor right */
- if (lcd_addr_x < lcd_width) {
+ if (lcd.addr.x < lcd.width) {
/* allow the cursor to pass the end of the line */
- if (lcd_addr_x <
- (lcd_bwidth - 1))
+ if (lcd.addr.x <
+ (lcd.bwidth - 1))
lcd_write_cmd(0x14);
- lcd_addr_x++;
+ lcd.addr.x++;
}
processed = 1;
break;
case 'L': /* shift display left */
- lcd_left_shift++;
+ lcd.left_shift++;
lcd_write_cmd(0x18);
processed = 1;
break;
case 'R': /* shift display right */
- lcd_left_shift--;
+ lcd.left_shift--;
lcd_write_cmd(0x1C);
processed = 1;
break;
case 'k': { /* kill end of line */
int x;
- for (x = lcd_addr_x; x < lcd_bwidth; x++)
+ for (x = lcd.addr.x; x < lcd.bwidth; x++)
lcd_write_data(' ');
/* restore cursor position */
@@ -1116,7 +1157,7 @@ static inline int handle_lcd_special_code(void)
}
case 'I': /* reinitialize display */
lcd_init_display();
- lcd_left_shift = 0;
+ lcd.left_shift = 0;
processed = 1;
break;
case 'G': {
@@ -1187,11 +1228,11 @@ static inline int handle_lcd_special_code(void)
while (*esc) {
if (*esc == 'x') {
esc++;
- if (kstrtoul(esc, 10, &lcd_addr_x) < 0)
+ if (kstrtoul(esc, 10, &lcd.addr.x) < 0)
break;
} else if (*esc == 'y') {
esc++;
- if (kstrtoul(esc, 10, &lcd_addr_y) < 0)
+ if (kstrtoul(esc, 10, &lcd.addr.y) < 0)
break;
} else {
break;
@@ -1204,25 +1245,25 @@ static inline int handle_lcd_special_code(void)
}
/* Check whether one flag was changed */
- if (oldflags != lcd_flags) {
+ if (oldflags != lcd.flags) {
/* check whether one of B,C,D flags were changed */
- if ((oldflags ^ lcd_flags) &
+ if ((oldflags ^ lcd.flags) &
(LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
/* set display mode */
lcd_write_cmd(0x08
- | ((lcd_flags & LCD_FLAG_D) ? 4 : 0)
- | ((lcd_flags & LCD_FLAG_C) ? 2 : 0)
- | ((lcd_flags & LCD_FLAG_B) ? 1 : 0));
+ | ((lcd.flags & LCD_FLAG_D) ? 4 : 0)
+ | ((lcd.flags & LCD_FLAG_C) ? 2 : 0)
+ | ((lcd.flags & LCD_FLAG_B) ? 1 : 0));
/* check whether one of F,N flags was changed */
- else if ((oldflags ^ lcd_flags) & (LCD_FLAG_F | LCD_FLAG_N))
+ else if ((oldflags ^ lcd.flags) & (LCD_FLAG_F | LCD_FLAG_N))
lcd_write_cmd(0x30
- | ((lcd_flags & LCD_FLAG_F) ? 4 : 0)
- | ((lcd_flags & LCD_FLAG_N) ? 8 : 0));
+ | ((lcd.flags & LCD_FLAG_F) ? 4 : 0)
+ | ((lcd.flags & LCD_FLAG_N) ? 8 : 0));
/* check whether L flag was changed */
- else if ((oldflags ^ lcd_flags) & (LCD_FLAG_L)) {
- if (lcd_flags & (LCD_FLAG_L))
+ else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L)) {
+ if (lcd.flags & (LCD_FLAG_L))
lcd_backlight(1);
- else if (light_tempo == 0)
+ else if (lcd.light_tempo == 0)
/* switch off the light only when the tempo
lighting is gone */
lcd_backlight(0);
@@ -1235,29 +1276,29 @@ static inline int handle_lcd_special_code(void)
static void lcd_write_char(char c)
{
/* first, we'll test if we're in escape mode */
- if ((c != '\n') && lcd_escape_len >= 0) {
+ if ((c != '\n') && lcd.esc_seq.len >= 0) {
/* yes, let's add this char to the buffer */
- lcd_escape[lcd_escape_len++] = c;
- lcd_escape[lcd_escape_len] = 0;
+ lcd.esc_seq.buf[lcd.esc_seq.len++] = c;
+ lcd.esc_seq.buf[lcd.esc_seq.len] = 0;
} else {
/* aborts any previous escape sequence */
- lcd_escape_len = -1;
+ lcd.esc_seq.len = -1;
switch (c) {
case LCD_ESCAPE_CHAR:
/* start of an escape sequence */
- lcd_escape_len = 0;
- lcd_escape[lcd_escape_len] = 0;
+ lcd.esc_seq.len = 0;
+ lcd.esc_seq.buf[lcd.esc_seq.len] = 0;
break;
case '\b':
/* go back one char and clear it */
- if (lcd_addr_x > 0) {
+ if (lcd.addr.x > 0) {
/* check if we're not at the
end of the line */
- if (lcd_addr_x < lcd_bwidth)
+ if (lcd.addr.x < lcd.bwidth)
/* back one char */
lcd_write_cmd(0x10);
- lcd_addr_x--;
+ lcd.addr.x--;
}
/* replace with a space */
lcd_write_data(' ');
@@ -1271,15 +1312,15 @@ static void lcd_write_char(char c)
case '\n':
/* flush the remainder of the current line and
go to the beginning of the next line */
- for (; lcd_addr_x < lcd_bwidth; lcd_addr_x++)
+ for (; lcd.addr.x < lcd.bwidth; lcd.addr.x++)
lcd_write_data(' ');
- lcd_addr_x = 0;
- lcd_addr_y = (lcd_addr_y + 1) % lcd_height;
+ lcd.addr.x = 0;
+ lcd.addr.y = (lcd.addr.y + 1) % lcd.height;
lcd_gotoxy();
break;
case '\r':
/* go to the beginning of the same line */
- lcd_addr_x = 0;
+ lcd.addr.x = 0;
lcd_gotoxy();
break;
case '\t':
@@ -1295,32 +1336,32 @@ static void lcd_write_char(char c)
/* now we'll see if we're in an escape mode and if the current
escape sequence can be understood. */
- if (lcd_escape_len >= 2) {
+ if (lcd.esc_seq.len >= 2) {
int processed = 0;
- if (!strcmp(lcd_escape, "[2J")) {
+ if (!strcmp(lcd.esc_seq.buf, "[2J")) {
/* clear the display */
lcd_clear_fast();
processed = 1;
- } else if (!strcmp(lcd_escape, "[H")) {
+ } else if (!strcmp(lcd.esc_seq.buf, "[H")) {
/* cursor to home */
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
lcd_gotoxy();
processed = 1;
}
/* codes starting with ^[[L */
- else if ((lcd_escape_len >= 3) &&
- (lcd_escape[0] == '[') &&
- (lcd_escape[1] == 'L')) {
+ else if ((lcd.esc_seq.len >= 3) &&
+ (lcd.esc_seq.buf[0] == '[') &&
+ (lcd.esc_seq.buf[1] == 'L')) {
processed = handle_lcd_special_code();
}
/* LCD special escape codes */
/* flush the escape sequence if it's been processed
or if it is getting too long. */
- if (processed || (lcd_escape_len >= LCD_ESCAPE_LEN))
- lcd_escape_len = -1;
+ if (processed || (lcd.esc_seq.len >= LCD_ESCAPE_LEN))
+ lcd.esc_seq.len = -1;
} /* escape codes */
}
@@ -1347,23 +1388,22 @@ static ssize_t lcd_write(struct file *file,
static int lcd_open(struct inode *inode, struct file *file)
{
- if (lcd_open_cnt)
+ if (!atomic_dec_and_test(&lcd_available))
return -EBUSY; /* open only once at a time */
if (file->f_mode & FMODE_READ) /* device is write-only */
return -EPERM;
- if (lcd_must_clear) {
+ if (lcd.must_clear) {
lcd_clear_display();
- lcd_must_clear = 0;
+ lcd.must_clear = false;
}
- lcd_open_cnt++;
return nonseekable_open(inode, file);
}
static int lcd_release(struct inode *inode, struct file *file)
{
- lcd_open_cnt--;
+ atomic_inc(&lcd_available);
return 0;
}
@@ -1375,9 +1415,9 @@ static const struct file_operations lcd_fops = {
};
static struct miscdevice lcd_dev = {
- LCD_MINOR,
- "lcd",
- &lcd_fops
+ .minor = LCD_MINOR,
+ .name = "lcd",
+ .fops = &lcd_fops,
};
/* public function usable from the kernel for any purpose */
@@ -1386,7 +1426,7 @@ static void panel_lcd_print(const char *s)
const char *tmp = s;
int count = strlen(s);
- if (lcd_enabled && lcd_initialized) {
+ if (lcd.enabled && lcd.initialized) {
for (; count-- > 0; tmp++) {
if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
/* let's be a little nice with other processes
@@ -1401,183 +1441,173 @@ static void panel_lcd_print(const char *s)
/* initialize the LCD driver */
static void lcd_init(void)
{
- switch (lcd_type) {
+ switch (selected_lcd_type) {
case LCD_TYPE_OLD:
/* parallel mode, 8 bits */
- if (lcd_proto < 0)
- lcd_proto = LCD_PROTO_PARALLEL;
- if (lcd_charset < 0)
- lcd_charset = LCD_CHARSET_NORMAL;
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = PIN_STROBE;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = PIN_AUTOLF;
-
- if (lcd_width < 0)
- lcd_width = 40;
- if (lcd_bwidth < 0)
- lcd_bwidth = 40;
- if (lcd_hwidth < 0)
- lcd_hwidth = 64;
- if (lcd_height < 0)
- lcd_height = 2;
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_STROBE;
+ lcd.pins.rs = PIN_AUTOLF;
+
+ lcd.width = 40;
+ lcd.bwidth = 40;
+ lcd.hwidth = 64;
+ lcd.height = 2;
break;
case LCD_TYPE_KS0074:
/* serial mode, ks0074 */
- if (lcd_proto < 0)
- lcd_proto = LCD_PROTO_SERIAL;
- if (lcd_charset < 0)
- lcd_charset = LCD_CHARSET_KS0074;
- if (lcd_bl_pin == PIN_NOT_SET)
- lcd_bl_pin = PIN_AUTOLF;
- if (lcd_cl_pin == PIN_NOT_SET)
- lcd_cl_pin = PIN_STROBE;
- if (lcd_da_pin == PIN_NOT_SET)
- lcd_da_pin = PIN_D0;
-
- if (lcd_width < 0)
- lcd_width = 16;
- if (lcd_bwidth < 0)
- lcd_bwidth = 40;
- if (lcd_hwidth < 0)
- lcd_hwidth = 16;
- if (lcd_height < 0)
- lcd_height = 2;
+ lcd.proto = LCD_PROTO_SERIAL;
+ lcd.charset = LCD_CHARSET_KS0074;
+ lcd.pins.bl = PIN_AUTOLF;
+ lcd.pins.cl = PIN_STROBE;
+ lcd.pins.da = PIN_D0;
+
+ lcd.width = 16;
+ lcd.bwidth = 40;
+ lcd.hwidth = 16;
+ lcd.height = 2;
break;
case LCD_TYPE_NEXCOM:
/* parallel mode, 8 bits, generic */
- if (lcd_proto < 0)
- lcd_proto = LCD_PROTO_PARALLEL;
- if (lcd_charset < 0)
- lcd_charset = LCD_CHARSET_NORMAL;
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = PIN_AUTOLF;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = PIN_SELECP;
- if (lcd_rw_pin == PIN_NOT_SET)
- lcd_rw_pin = PIN_INITP;
-
- if (lcd_width < 0)
- lcd_width = 16;
- if (lcd_bwidth < 0)
- lcd_bwidth = 40;
- if (lcd_hwidth < 0)
- lcd_hwidth = 64;
- if (lcd_height < 0)
- lcd_height = 2;
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_AUTOLF;
+ lcd.pins.rs = PIN_SELECP;
+ lcd.pins.rw = PIN_INITP;
+
+ lcd.width = 16;
+ lcd.bwidth = 40;
+ lcd.hwidth = 64;
+ lcd.height = 2;
break;
case LCD_TYPE_CUSTOM:
/* customer-defined */
- if (lcd_proto < 0)
- lcd_proto = DEFAULT_LCD_PROTO;
- if (lcd_charset < 0)
- lcd_charset = DEFAULT_LCD_CHARSET;
+ lcd.proto = DEFAULT_LCD_PROTO;
+ lcd.charset = DEFAULT_LCD_CHARSET;
/* default geometry will be set later */
break;
case LCD_TYPE_HANTRONIX:
/* parallel mode, 8 bits, hantronix-like */
default:
- if (lcd_proto < 0)
- lcd_proto = LCD_PROTO_PARALLEL;
- if (lcd_charset < 0)
- lcd_charset = LCD_CHARSET_NORMAL;
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = PIN_STROBE;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = PIN_SELECP;
-
- if (lcd_width < 0)
- lcd_width = 16;
- if (lcd_bwidth < 0)
- lcd_bwidth = 40;
- if (lcd_hwidth < 0)
- lcd_hwidth = 64;
- if (lcd_height < 0)
- lcd_height = 2;
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_STROBE;
+ lcd.pins.rs = PIN_SELECP;
+
+ lcd.width = 16;
+ lcd.bwidth = 40;
+ lcd.hwidth = 64;
+ lcd.height = 2;
break;
}
+ /* Overwrite with module params set on loading */
+ if (lcd_height != NOT_SET)
+ lcd.height = lcd_height;
+ if (lcd_width != NOT_SET)
+ lcd.width = lcd_width;
+ if (lcd_bwidth != NOT_SET)
+ lcd.bwidth = lcd_bwidth;
+ if (lcd_hwidth != NOT_SET)
+ lcd.hwidth = lcd_hwidth;
+ if (lcd_charset != NOT_SET)
+ lcd.charset = lcd_charset;
+ if (lcd_proto != NOT_SET)
+ lcd.proto = lcd_proto;
+ if (lcd_e_pin != PIN_NOT_SET)
+ lcd.pins.e = lcd_e_pin;
+ if (lcd_rs_pin != PIN_NOT_SET)
+ lcd.pins.rs = lcd_rs_pin;
+ if (lcd_rw_pin != PIN_NOT_SET)
+ lcd.pins.rw = lcd_rw_pin;
+ if (lcd_cl_pin != PIN_NOT_SET)
+ lcd.pins.cl = lcd_cl_pin;
+ if (lcd_da_pin != PIN_NOT_SET)
+ lcd.pins.da = lcd_da_pin;
+ if (lcd_bl_pin != PIN_NOT_SET)
+ lcd.pins.bl = lcd_bl_pin;
+
/* this is used to catch wrong and default values */
- if (lcd_width <= 0)
- lcd_width = DEFAULT_LCD_WIDTH;
- if (lcd_bwidth <= 0)
- lcd_bwidth = DEFAULT_LCD_BWIDTH;
- if (lcd_hwidth <= 0)
- lcd_hwidth = DEFAULT_LCD_HWIDTH;
- if (lcd_height <= 0)
- lcd_height = DEFAULT_LCD_HEIGHT;
-
- if (lcd_proto == LCD_PROTO_SERIAL) { /* SERIAL */
+ if (lcd.width <= 0)
+ lcd.width = DEFAULT_LCD_WIDTH;
+ if (lcd.bwidth <= 0)
+ lcd.bwidth = DEFAULT_LCD_BWIDTH;
+ if (lcd.hwidth <= 0)
+ lcd.hwidth = DEFAULT_LCD_HWIDTH;
+ if (lcd.height <= 0)
+ lcd.height = DEFAULT_LCD_HEIGHT;
+
+ if (lcd.proto == LCD_PROTO_SERIAL) { /* SERIAL */
lcd_write_cmd = lcd_write_cmd_s;
lcd_write_data = lcd_write_data_s;
lcd_clear_fast = lcd_clear_fast_s;
- if (lcd_cl_pin == PIN_NOT_SET)
- lcd_cl_pin = DEFAULT_LCD_PIN_SCL;
- if (lcd_da_pin == PIN_NOT_SET)
- lcd_da_pin = DEFAULT_LCD_PIN_SDA;
+ if (lcd.pins.cl == PIN_NOT_SET)
+ lcd.pins.cl = DEFAULT_LCD_PIN_SCL;
+ if (lcd.pins.da == PIN_NOT_SET)
+ lcd.pins.da = DEFAULT_LCD_PIN_SDA;
- } else if (lcd_proto == LCD_PROTO_PARALLEL) { /* PARALLEL */
+ } else if (lcd.proto == LCD_PROTO_PARALLEL) { /* PARALLEL */
lcd_write_cmd = lcd_write_cmd_p8;
lcd_write_data = lcd_write_data_p8;
lcd_clear_fast = lcd_clear_fast_p8;
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = DEFAULT_LCD_PIN_E;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = DEFAULT_LCD_PIN_RS;
- if (lcd_rw_pin == PIN_NOT_SET)
- lcd_rw_pin = DEFAULT_LCD_PIN_RW;
+ if (lcd.pins.e == PIN_NOT_SET)
+ lcd.pins.e = DEFAULT_LCD_PIN_E;
+ if (lcd.pins.rs == PIN_NOT_SET)
+ lcd.pins.rs = DEFAULT_LCD_PIN_RS;
+ if (lcd.pins.rw == PIN_NOT_SET)
+ lcd.pins.rw = DEFAULT_LCD_PIN_RW;
} else {
lcd_write_cmd = lcd_write_cmd_tilcd;
lcd_write_data = lcd_write_data_tilcd;
lcd_clear_fast = lcd_clear_fast_tilcd;
}
- if (lcd_bl_pin == PIN_NOT_SET)
- lcd_bl_pin = DEFAULT_LCD_PIN_BL;
-
- if (lcd_e_pin == PIN_NOT_SET)
- lcd_e_pin = PIN_NONE;
- if (lcd_rs_pin == PIN_NOT_SET)
- lcd_rs_pin = PIN_NONE;
- if (lcd_rw_pin == PIN_NOT_SET)
- lcd_rw_pin = PIN_NONE;
- if (lcd_bl_pin == PIN_NOT_SET)
- lcd_bl_pin = PIN_NONE;
- if (lcd_cl_pin == PIN_NOT_SET)
- lcd_cl_pin = PIN_NONE;
- if (lcd_da_pin == PIN_NOT_SET)
- lcd_da_pin = PIN_NONE;
-
- if (lcd_charset < 0)
- lcd_charset = DEFAULT_LCD_CHARSET;
-
- if (lcd_charset == LCD_CHARSET_KS0074)
+ if (lcd.pins.bl == PIN_NOT_SET)
+ lcd.pins.bl = DEFAULT_LCD_PIN_BL;
+
+ if (lcd.pins.e == PIN_NOT_SET)
+ lcd.pins.e = PIN_NONE;
+ if (lcd.pins.rs == PIN_NOT_SET)
+ lcd.pins.rs = PIN_NONE;
+ if (lcd.pins.rw == PIN_NOT_SET)
+ lcd.pins.rw = PIN_NONE;
+ if (lcd.pins.bl == PIN_NOT_SET)
+ lcd.pins.bl = PIN_NONE;
+ if (lcd.pins.cl == PIN_NOT_SET)
+ lcd.pins.cl = PIN_NONE;
+ if (lcd.pins.da == PIN_NOT_SET)
+ lcd.pins.da = PIN_NONE;
+
+ if (lcd.charset == NOT_SET)
+ lcd.charset = DEFAULT_LCD_CHARSET;
+
+ if (lcd.charset == LCD_CHARSET_KS0074)
lcd_char_conv = lcd_char_conv_ks0074;
else
lcd_char_conv = NULL;
- if (lcd_bl_pin != PIN_NONE)
+ if (lcd.pins.bl != PIN_NONE)
init_scan_timer();
- pin_to_bits(lcd_e_pin, lcd_bits[LCD_PORT_D][LCD_BIT_E],
+ pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
lcd_bits[LCD_PORT_C][LCD_BIT_E]);
- pin_to_bits(lcd_rs_pin, lcd_bits[LCD_PORT_D][LCD_BIT_RS],
+ pin_to_bits(lcd.pins.rs, lcd_bits[LCD_PORT_D][LCD_BIT_RS],
lcd_bits[LCD_PORT_C][LCD_BIT_RS]);
- pin_to_bits(lcd_rw_pin, lcd_bits[LCD_PORT_D][LCD_BIT_RW],
+ pin_to_bits(lcd.pins.rw, lcd_bits[LCD_PORT_D][LCD_BIT_RW],
lcd_bits[LCD_PORT_C][LCD_BIT_RW]);
- pin_to_bits(lcd_bl_pin, lcd_bits[LCD_PORT_D][LCD_BIT_BL],
+ pin_to_bits(lcd.pins.bl, lcd_bits[LCD_PORT_D][LCD_BIT_BL],
lcd_bits[LCD_PORT_C][LCD_BIT_BL]);
- pin_to_bits(lcd_cl_pin, lcd_bits[LCD_PORT_D][LCD_BIT_CL],
+ pin_to_bits(lcd.pins.cl, lcd_bits[LCD_PORT_D][LCD_BIT_CL],
lcd_bits[LCD_PORT_C][LCD_BIT_CL]);
- pin_to_bits(lcd_da_pin, lcd_bits[LCD_PORT_D][LCD_BIT_DA],
+ pin_to_bits(lcd.pins.da, lcd_bits[LCD_PORT_D][LCD_BIT_DA],
lcd_bits[LCD_PORT_C][LCD_BIT_DA]);
/* before this line, we must NOT send anything to the display.
* Since lcd_init_display() needs to write data, we have to
* enable mark the LCD initialized just before. */
- lcd_initialized = 1;
+ lcd.initialized = true;
lcd_init_display();
/* display a short message */
@@ -1589,10 +1619,10 @@ static void lcd_init(void)
panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-"
PANEL_VERSION);
#endif
- lcd_addr_x = 0;
- lcd_addr_y = 0;
+ lcd.addr.x = 0;
+ lcd.addr.y = 0;
/* clear the display on the next device opening */
- lcd_must_clear = 1;
+ lcd.must_clear = true;
lcd_gotoxy();
}
@@ -1627,20 +1657,19 @@ static ssize_t keypad_read(struct file *file,
static int keypad_open(struct inode *inode, struct file *file)
{
- if (keypad_open_cnt)
+ if (!atomic_dec_and_test(&keypad_available))
return -EBUSY; /* open only once at a time */
if (file->f_mode & FMODE_WRITE) /* device is read-only */
return -EPERM;
keypad_buflen = 0; /* flush the buffer on opening */
- keypad_open_cnt++;
return 0;
}
static int keypad_release(struct inode *inode, struct file *file)
{
- keypad_open_cnt--;
+ atomic_inc(&keypad_available);
return 0;
}
@@ -1652,9 +1681,9 @@ static const struct file_operations keypad_fops = {
};
static struct miscdevice keypad_dev = {
- KEYPAD_MINOR,
- "keypad",
- &keypad_fops
+ .minor = KEYPAD_MINOR,
+ .name = "keypad",
+ .fops = &keypad_fops,
};
static void keypad_send_key(const char *string, int max_len)
@@ -1663,7 +1692,7 @@ static void keypad_send_key(const char *string, int max_len)
return;
/* send the key to the device only if a process is attached to it. */
- if (keypad_open_cnt > 0) {
+ if (!atomic_read(&keypad_available)) {
while (max_len-- && keypad_buflen < KEYPAD_BUFFER && *string) {
keypad_buffer[(keypad_start + keypad_buflen++) %
KEYPAD_BUFFER] = *string++;
@@ -1917,7 +1946,7 @@ static void panel_process_inputs(void)
static void panel_scan_timer(void)
{
- if (keypad_enabled && keypad_initialized) {
+ if (keypad.enabled && keypad_initialized) {
if (spin_trylock_irq(&pprt_lock)) {
phys_scan_contacts();
@@ -1929,14 +1958,16 @@ static void panel_scan_timer(void)
panel_process_inputs();
}
- if (lcd_enabled && lcd_initialized) {
+ if (lcd.enabled && lcd.initialized) {
if (keypressed) {
- if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0))
+ if (lcd.light_tempo == 0
+ && ((lcd.flags & LCD_FLAG_L) == 0))
lcd_backlight(1);
- light_tempo = FLASH_LIGHT_TEMPO;
- } else if (light_tempo > 0) {
- light_tempo--;
- if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0))
+ lcd.light_tempo = FLASH_LIGHT_TEMPO;
+ } else if (lcd.light_tempo > 0) {
+ lcd.light_tempo--;
+ if (lcd.light_tempo == 0
+ && ((lcd.flags & LCD_FLAG_L) == 0))
lcd_backlight(0);
}
}
@@ -2108,7 +2139,7 @@ static void keypad_init(void)
static int panel_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
- if (lcd_enabled && lcd_initialized) {
+ if (lcd.enabled && lcd.initialized) {
switch (code) {
case SYS_DOWN:
panel_lcd_print
@@ -2164,13 +2195,13 @@ static void panel_attach(struct parport *port)
/* must init LCD first, just in case an IRQ from the keypad is
* generated at keypad init
*/
- if (lcd_enabled) {
+ if (lcd.enabled) {
lcd_init();
if (misc_register(&lcd_dev))
goto err_unreg_device;
}
- if (keypad_enabled) {
+ if (keypad.enabled) {
keypad_init();
if (misc_register(&keypad_dev))
goto err_lcd_unreg;
@@ -2178,7 +2209,7 @@ static void panel_attach(struct parport *port)
return;
err_lcd_unreg:
- if (lcd_enabled)
+ if (lcd.enabled)
misc_deregister(&lcd_dev);
err_unreg_device:
parport_unregister_device(pprt);
@@ -2196,14 +2227,14 @@ static void panel_detach(struct parport *port)
return;
}
- if (keypad_enabled && keypad_initialized) {
+ if (keypad.enabled && keypad_initialized) {
misc_deregister(&keypad_dev);
keypad_initialized = 0;
}
- if (lcd_enabled && lcd_initialized) {
+ if (lcd.enabled && lcd.initialized) {
misc_deregister(&lcd_dev);
- lcd_initialized = 0;
+ lcd.initialized = false;
}
parport_release(pprt);
@@ -2218,72 +2249,89 @@ static struct parport_driver panel_driver = {
};
/* init function */
-static int panel_init(void)
+static int __init panel_init_module(void)
{
- /* for backwards compatibility */
- if (keypad_type < 0)
- keypad_type = keypad_enabled;
-
- if (lcd_type < 0)
- lcd_type = lcd_enabled;
-
- if (parport < 0)
- parport = DEFAULT_PARPORT;
+ int selected_keypad_type = NOT_SET;
/* take care of an eventual profile */
switch (profile) {
case PANEL_PROFILE_CUSTOM:
/* custom profile */
- if (keypad_type < 0)
- keypad_type = DEFAULT_KEYPAD;
- if (lcd_type < 0)
- lcd_type = DEFAULT_LCD;
+ selected_keypad_type = DEFAULT_KEYPAD_TYPE;
+ selected_lcd_type = DEFAULT_LCD_TYPE;
break;
case PANEL_PROFILE_OLD:
/* 8 bits, 2*16, old keypad */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_OLD;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_OLD;
- if (lcd_width < 0)
+ selected_keypad_type = KEYPAD_TYPE_OLD;
+ selected_lcd_type = LCD_TYPE_OLD;
+
+ /* TODO: This two are a little hacky, sort it out later */
+ if (lcd_width == NOT_SET)
lcd_width = 16;
- if (lcd_hwidth < 0)
+ if (lcd_hwidth == NOT_SET)
lcd_hwidth = 16;
break;
case PANEL_PROFILE_NEW:
/* serial, 2*16, new keypad */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_NEW;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_KS0074;
+ selected_keypad_type = KEYPAD_TYPE_NEW;
+ selected_lcd_type = LCD_TYPE_KS0074;
break;
case PANEL_PROFILE_HANTRONIX:
/* 8 bits, 2*16 hantronix-like, no keypad */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_NONE;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_HANTRONIX;
+ selected_keypad_type = KEYPAD_TYPE_NONE;
+ selected_lcd_type = LCD_TYPE_HANTRONIX;
break;
case PANEL_PROFILE_NEXCOM:
/* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_NEXCOM;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_NEXCOM;
+ selected_keypad_type = KEYPAD_TYPE_NEXCOM;
+ selected_lcd_type = LCD_TYPE_NEXCOM;
break;
case PANEL_PROFILE_LARGE:
/* 8 bits, 2*40, old keypad */
- if (keypad_type < 0)
- keypad_type = KEYPAD_TYPE_OLD;
- if (lcd_type < 0)
- lcd_type = LCD_TYPE_OLD;
+ selected_keypad_type = KEYPAD_TYPE_OLD;
+ selected_lcd_type = LCD_TYPE_OLD;
break;
}
- lcd_enabled = (lcd_type > 0);
- keypad_enabled = (keypad_type > 0);
+ /*
+ * Init lcd struct with load-time values to preserve exact current
+ * functionality (at least for now).
+ */
+ lcd.height = lcd_height;
+ lcd.width = lcd_width;
+ lcd.bwidth = lcd_bwidth;
+ lcd.hwidth = lcd_hwidth;
+ lcd.charset = lcd_charset;
+ lcd.proto = lcd_proto;
+ lcd.pins.e = lcd_e_pin;
+ lcd.pins.rs = lcd_rs_pin;
+ lcd.pins.rw = lcd_rw_pin;
+ lcd.pins.cl = lcd_cl_pin;
+ lcd.pins.da = lcd_da_pin;
+ lcd.pins.bl = lcd_bl_pin;
+
+ /* Leave it for now, just in case */
+ lcd.esc_seq.len = -1;
+
+ /*
+ * Overwrite selection with module param values (both keypad and lcd),
+ * where the deprecated params have lower prio.
+ */
+ if (keypad_enabled != NOT_SET)
+ selected_keypad_type = keypad_enabled;
+ if (keypad_type != NOT_SET)
+ selected_keypad_type = keypad_type;
- switch (keypad_type) {
+ keypad.enabled = (selected_keypad_type > 0);
+
+ if (lcd_enabled != NOT_SET)
+ selected_lcd_type = lcd_enabled;
+ if (lcd_type != NOT_SET)
+ selected_lcd_type = lcd_type;
+
+ lcd.enabled = (selected_lcd_type > 0);
+
+ switch (selected_keypad_type) {
case KEYPAD_TYPE_OLD:
keypad_profile = old_keypad_profile;
break;
@@ -2306,7 +2354,7 @@ static int panel_init(void)
return -EIO;
}
- if (!lcd_enabled && !keypad_enabled) {
+ if (!lcd.enabled && !keypad.enabled) {
/* no device enabled, let's release the parport */
if (pprt) {
parport_release(pprt);
@@ -2333,11 +2381,6 @@ static int panel_init(void)
return 0;
}
-static int __init panel_init_module(void)
-{
- return panel_init();
-}
-
static void __exit panel_cleanup_module(void)
{
unregister_reboot_notifier(&panel_notifier);
@@ -2346,16 +2389,16 @@ static void __exit panel_cleanup_module(void)
del_timer_sync(&scan_timer);
if (pprt != NULL) {
- if (keypad_enabled) {
+ if (keypad.enabled) {
misc_deregister(&keypad_dev);
keypad_initialized = 0;
}
- if (lcd_enabled) {
+ if (lcd.enabled) {
panel_lcd_print("\x0cLCD driver " PANEL_VERSION
"\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-");
misc_deregister(&lcd_dev);
- lcd_initialized = 0;
+ lcd.initialized = false;
}
/* TODO: free all input signals */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 9224e029ef2b..d61842ed673e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -888,7 +888,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pbss_network->Rssi = 0;
- memcpy(pbss_network->MacAddress, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ ether_addr_copy(pbss_network->MacAddress, myid(&(padapter->eeprompriv)));
/* beacon interval */
p = rtw_get_beacon_interval_from_ie(ie);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
@@ -1164,7 +1164,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
if (!paclnode->valid) {
INIT_LIST_HEAD(&paclnode->list);
- memcpy(paclnode->addr, addr, ETH_ALEN);
+ ether_addr_copy(paclnode->addr, addr);
paclnode->valid = true;
@@ -1186,7 +1186,6 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
{
struct list_head *plist, *phead;
- int ret = 0;
struct rtw_wlan_acl_node *paclnode;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
@@ -1217,7 +1216,7 @@ int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
spin_unlock_bh(&(pacl_node_q->lock));
DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
- return ret;
+ return 0;
}
static void update_bcn_fixed_ie(struct adapter *padapter)
@@ -1753,7 +1752,6 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
{
struct list_head *phead, *plist;
- int ret = 0;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1761,7 +1759,7 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
- return ret;
+ return 0;
DBG_88E(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
@@ -1782,13 +1780,12 @@ int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
issue_action_spct_ch_switch(padapter, bc_addr, new_ch, ch_offset);
- return ret;
+ return 0;
}
int rtw_sta_flush(struct adapter *padapter)
{
struct list_head *phead, *plist;
- int ret = 0;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1798,7 +1795,7 @@ int rtw_sta_flush(struct adapter *padapter)
DBG_88E(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
- return ret;
+ return 0;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
@@ -1822,7 +1819,7 @@ int rtw_sta_flush(struct adapter *padapter)
associated_clients_update(padapter, true);
- return ret;
+ return 0;
}
/* called > TSR LEVEL for USB or SDIO Interface*/
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index eddef9cd2e16..4b4346244953 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -167,7 +167,7 @@ int rtw_cmd_thread(void *context)
struct cmd_obj *pcmd;
u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
- struct adapter *padapter = (struct adapter *)context;
+ struct adapter *padapter = context;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
allow_signal(SIGTERM);
@@ -433,8 +433,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
if (psecnetwork == NULL) {
- if (pcmd != NULL)
- kfree(pcmd);
+ kfree(pcmd);
res = _FAIL;
@@ -456,7 +455,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork->IELength = 0;
/* Added by Albert 2009/02/18 */
- /* If the the driver wants to use the bssid to create the connection. */
+ /* If the driver wants to use the bssid to create the connection. */
/* If not, we have to copy the connecting AP's MAC address to it so that */
/* the driver just has the bssid information for PMKIDList searching. */
@@ -638,7 +637,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
ether_addr_copy(psetstakey_para->addr, sta->hwaddr);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm;
+ psetstakey_para->algorithm = (unsigned char)psecuritypriv->dot11PrivacyAlgrthm;
else
GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 1f72f7d8097e..bc3fe10ff247 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -45,7 +45,7 @@ int proc_get_write_reg(char *page, char **start,
int proc_set_write_reg(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
char tmp[32];
u32 addr, val, len;
@@ -577,7 +577,7 @@ int proc_get_rx_signal(char *page, char **start,
int proc_set_rx_signal(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
char tmp[32];
u32 is_signal_dbg;
@@ -627,7 +627,7 @@ int proc_get_ht_enable(char *page, char **start,
int proc_set_ht_enable(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct registry_priv *pregpriv = &padapter->registrypriv;
char tmp[32];
@@ -669,7 +669,7 @@ int proc_get_cbw40_enable(char *page, char **start,
int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct registry_priv *pregpriv = &padapter->registrypriv;
char tmp[32];
@@ -710,7 +710,7 @@ int proc_get_ampdu_enable(char *page, char **start,
int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct registry_priv *pregpriv = &padapter->registrypriv;
char tmp[32];
@@ -771,7 +771,7 @@ int proc_get_rx_stbc(char *page, char **start,
int proc_set_rx_stbc(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct registry_priv *pregpriv = &padapter->registrypriv;
char tmp[32];
@@ -800,7 +800,7 @@ int proc_get_rssi_disp(char *page, char **start,
int proc_set_rssi_disp(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
char tmp[32];
u32 enable = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 7006088d1ad0..8816d116a8b8 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -106,13 +106,13 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
efuseTbl = kzalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
if (efuseTbl == NULL) {
DBG_88E("%s: alloc efuseTbl fail!\n", __func__);
- goto exit;
+ return;
}
eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
if (eFuseWord == NULL) {
DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
- goto exit;
+ goto eFuseWord_failed;
}
/* 0. Refresh efuse init map as all oxFF. */
@@ -210,10 +210,10 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
/* */
exit:
- kfree(efuseTbl);
+ kfree(eFuseWord);
- if (eFuseWord)
- kfree(eFuseWord);
+eFuseWord_failed:
+ kfree(efuseTbl);
}
static void efuse_read_phymap_from_txpktbuf(
@@ -250,7 +250,7 @@ static void efuse_read_phymap_from_txpktbuf(
while (!(reg_0x143 = usb_read8(adapter, REG_TXPKTBUF_DBG)) &&
(passing_time = rtw_get_passing_time_ms(start)) < 1000) {
DBG_88E("%s polling reg_0x143:0x%02x, reg_0x106:0x%02x\n", __func__, reg_0x143, usb_read8(adapter, 0x106));
- msleep(1);
+ usleep_range(1000, 2000);
}
lo32 = usb_read32(adapter, REG_PKTBUF_DBG_DATA_L);
@@ -322,7 +322,6 @@ void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _si
iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
iol_mode_enable(Adapter, 0);
}
- return;
}
/* Do not support BT */
@@ -332,56 +331,56 @@ void EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, v
case TYPE_EFUSE_MAX_SECTION:
{
u8 *pMax_section;
- pMax_section = (u8 *)pOut;
+ pMax_section = pOut;
*pMax_section = EFUSE_MAX_SECTION_88E;
}
break;
case TYPE_EFUSE_REAL_CONTENT_LEN:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
}
break;
case TYPE_EFUSE_CONTENT_LEN_BANK:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
}
break;
case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
}
break;
case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
}
break;
case TYPE_EFUSE_MAP_LEN:
{
u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
+ pu2Tmp = pOut;
*pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
}
break;
case TYPE_EFUSE_PROTECT_BYTES_BANK:
{
u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
+ pu1Tmp = pOut;
*pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
}
break;
default:
{
u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
+ pu1Tmp = pOut;
*pu1Tmp = 0;
}
break;
@@ -638,10 +637,9 @@ static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuse
if ((tmp_header & 0x0F) == 0x0F) { /* word_en PG fail */
if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
return false;
- } else {
- efuse_addr++;
- continue;
}
+ efuse_addr++;
+ continue;
} else if (pg_header != tmp_header) { /* offset PG fail */
struct pgpkt fixPkt;
fixPkt.offset = ((pg_header_temp & 0xE0) >> 5) | ((tmp_header & 0xF0) >> 1);
@@ -708,14 +706,13 @@ static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u
if (badworden == 0x0F) {
/* write ok */
return true;
- } else {
- /* reorganize other pg packet */
- PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data);
- if (!PgWriteSuccess)
- return false;
- else
- return true;
}
+ /* reorganize other pg packet */
+ PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data);
+ if (!PgWriteSuccess)
+ return false;
+ else
+ return true;
}
static bool
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 755d3effd0a7..f2c3ca79c0c9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -159,7 +159,7 @@ u8 *rtw_set_ie
return pbuf + len + 2;
}
-inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
+inline u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode,
u8 new_ch, u8 ch_switch_cnt)
{
u8 ie_data[3];
@@ -870,7 +870,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
if (elen < 4) {
if (show_errors) {
DBG_88E("short vendor specific information element ignored (len=%lu)\n",
- (unsigned long) elen);
+ (unsigned long)elen);
}
return -1;
}
@@ -890,7 +890,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
if (elen < 5) {
DBG_88E("short WME information element ignored (len=%lu)\n",
- (unsigned long) elen);
+ (unsigned long)elen);
return -1;
}
switch (pos[4]) {
@@ -905,7 +905,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
break;
default:
DBG_88E("unknown WME information element ignored (subtype=%d len=%lu)\n",
- pos[4], (unsigned long) elen);
+ pos[4], (unsigned long)elen);
return -1;
}
break;
@@ -916,7 +916,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
break;
default:
DBG_88E("Unknown Microsoft information element ignored (type=%d len=%lu)\n",
- pos[3], (unsigned long) elen);
+ pos[3], (unsigned long)elen);
return -1;
}
break;
@@ -929,13 +929,13 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
break;
default:
DBG_88E("Unknown Broadcom information element ignored (type=%d len=%lu)\n",
- pos[3], (unsigned long) elen);
+ pos[3], (unsigned long)elen);
return -1;
}
break;
default:
DBG_88E("unknown vendor specific information element ignored (vendor OUI %02x:%02x:%02x len=%lu)\n",
- pos[0], pos[1], pos[2], (unsigned long) elen);
+ pos[0], pos[1], pos[2], (unsigned long)elen);
return -1;
}
return 0;
@@ -969,7 +969,7 @@ enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
if (elen > left) {
if (show_errors) {
DBG_88E("IEEE 802.11 element parse failed (id=%d elen=%d left=%lu)\n",
- id, elen, (unsigned long) left);
+ id, elen, (unsigned long)left);
}
return ParseFailed;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index fc280ce57d2c..2faf6b2e8129 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -98,7 +98,6 @@ u8 rtw_do_join(struct adapter *padapter)
pibss = padapter->registrypriv.dev_network.MacAddress;
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(padapter);
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 384be22052e5..1b8264b978da 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
@@ -28,7 +24,7 @@
/* */
void BlinkTimerCallback(void *data)
{
- struct LED_871x *pLed = (struct LED_871x *)data;
+ struct LED_871x *pLed = data;
struct adapter *padapter = pLed->padapter;
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
@@ -228,7 +224,8 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->bLedWPSBlinkInProgress = false;
} else {
pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ _set_timer(&(pLed->BlinkTimer),
+ LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA);
}
break;
default:
@@ -392,7 +389,8 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed->bLedOn) {
pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ _set_timer(&(pLed->BlinkTimer),
+ LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA);
} else {
pLed->BlinkingLedState = RTW_LED_ON;
_set_timer(&(pLed->BlinkTimer), 0);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 149c271e966d..d4632da50c1d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -674,7 +674,6 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n"));
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
@@ -1334,7 +1333,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
- memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
@@ -1364,7 +1362,7 @@ void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
*/
void _rtw_join_timeout_handler (void *function_context)
{
- struct adapter *adapter = (struct adapter *)function_context;
+ struct adapter *adapter = function_context;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
@@ -1406,7 +1404,7 @@ void _rtw_join_timeout_handler (void *function_context)
*/
void rtw_scan_timeout_handler (void *function_context)
{
- struct adapter *adapter = (struct adapter *)function_context;
+ struct adapter *adapter = function_context;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
@@ -1437,7 +1435,7 @@ void rtw_dynamic_check_timer_handlder(void *function_context)
struct registry_priv *pregistrypriv = &adapter->registrypriv;
if (!adapter)
- goto exit;
+ return;
if (!adapter->hw_init_completed)
goto exit;
if ((adapter->bDriverStopped) || (adapter->bSurpriseRemoved))
@@ -2117,7 +2115,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
if (0 == issued) {
DBG_88E("rtw_issue_addbareq_cmd, p=%d\n", priority);
psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
- rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra);
+ rtw_addbareq_cmd(padapter, (u8)priority, pattrib->ra);
}
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 70b1bc3e0e63..e4b7ee4c99d5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -227,7 +227,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
pmlmeext->cur_channel = padapter->registrypriv.channel;
pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pmlmeext->oper_channel = pmlmeext->cur_channel ;
+ pmlmeext->oper_channel = pmlmeext->cur_channel;
pmlmeext->oper_bwmode = pmlmeext->cur_bwmode;
pmlmeext->oper_ch_offset = pmlmeext->cur_ch_offset;
pmlmeext->retry = 0;
@@ -371,7 +371,6 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
int init_mlme_ext_priv(struct adapter *padapter)
{
- int res = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -397,7 +396,7 @@ int init_mlme_ext_priv(struct adapter *padapter)
pmlmeext->active_keep_alive_check = true;
- return res;
+ return _SUCCESS;
}
void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
@@ -945,7 +944,7 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
}
pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
- if (pstat == (struct sta_info *)NULL) {
+ if (pstat == NULL) {
status = _RSON_CLS2_;
goto asoc_class2_error;
}
@@ -1554,7 +1553,6 @@ unsigned int OnAtim(struct adapter *padapter, struct recv_frame *precv_frame)
unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_frame)
{
- unsigned int ret = _FAIL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->rx_data;
@@ -1587,7 +1585,7 @@ unsigned int on_action_spct(struct adapter *padapter, struct recv_frame *precv_f
}
exit:
- return ret;
+ return _FAIL;
}
unsigned int OnAction_qos(struct adapter *padapter, struct recv_frame *precv_frame)
@@ -2000,7 +1998,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
DBG_88E("%s, alloc mgnt frame fail\n", __func__);
return;
}
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
spin_lock_bh(&pmlmepriv->bcn_update_lock);
#endif /* if defined (CONFIG_88EU_AP_MODE) */
@@ -2027,7 +2025,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
SetFrameSubType(pframe, WIFI_BEACON);
pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
int len_diff;
@@ -2042,8 +2040,8 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
);
pframe += (cur_network->IELength+len_diff);
pattrib->pktlen += (cur_network->IELength+len_diff);
- wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof (struct rtw_ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
- pattrib->pktlen-sizeof (struct rtw_ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct rtw_ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
+ pattrib->pktlen-sizeof(struct rtw_ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
@@ -2101,7 +2099,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
/* todo:HT for adhoc */
_issue_bcn:
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
pmlmepriv->update_bcn = false;
spin_unlock_bh(&pmlmepriv->bcn_update_lock);
@@ -2130,7 +2128,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
__le16 *fctrl;
unsigned char *mac, *bssid;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
-#if defined (CONFIG_88EU_AP_MODE)
+#if defined(CONFIG_88EU_AP_MODE)
u8 *pwps_ie;
uint wps_ielen;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -2323,8 +2321,8 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_PROBEREQ);
- pframe += sizeof (struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
if (pssid)
pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &(pattrib->pktlen));
@@ -3209,7 +3207,7 @@ exit:
return ret;
}
-void issue_action_spct_ch_switch (struct adapter *padapter, u8 *ra, u8 new_ch, u8 ch_offset)
+void issue_action_spct_ch_switch(struct adapter *padapter, u8 *ra, u8 new_ch, u8 ch_offset)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
@@ -3260,7 +3258,7 @@ void issue_action_spct_ch_switch (struct adapter *padapter, u8 *ra, u8 new_ch, u
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
}
- pframe = rtw_set_ie_ch_switch (pframe, &(pattrib->pktlen), 0, new_ch, 0);
+ pframe = rtw_set_ie_ch_switch(pframe, &(pattrib->pktlen), 0, new_ch, 0);
pframe = rtw_set_ie_secondary_ch_offset(pframe, &(pattrib->pktlen),
hal_ch_offset_to_secondary_ch_offset(ch_offset));
@@ -4835,7 +4833,7 @@ void linked_status_chk(struct adapter *padapter)
void survey_timer_hdl(void *function_context)
{
- struct adapter *padapter = (struct adapter *)function_context;
+ struct adapter *padapter = function_context;
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -4912,7 +4910,7 @@ void link_timer_hdl(void *function_context)
void addba_timer_hdl(void *function_context)
{
- struct sta_info *psta = (struct sta_info *)function_context;
+ struct sta_info *psta = function_context;
struct ht_priv *phtpriv;
if (!psta)
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 27ed83cca193..df463a29b641 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -279,12 +279,11 @@ void rtw_ps_processor(struct adapter *padapter)
exit:
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
pwrpriv->ps_processing = false;
- return;
}
static void pwr_state_check_handler(void *FunctionContext)
{
- struct adapter *padapter = (struct adapter *)FunctionContext;
+ struct adapter *padapter = FunctionContext;
rtw_ps_cmd(padapter);
}
@@ -527,7 +526,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->LpsIdleCount = 0;
if (padapter->registrypriv.mp_mode == 1)
- pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE ;
+ pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE;
else
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
@@ -577,7 +576,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
if (pwrpriv->ps_processing) {
DBG_88E("%s wait ps_processing...\n", __func__);
while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
- msleep(10);
+ usleep_range(1000, 3000);
if (pwrpriv->ps_processing)
DBG_88E("%s wait ps_processing timeout\n", __func__);
else
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 4d56dbad2a7d..bd79e9e7105a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -46,7 +46,7 @@ void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
- memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
+ memset((u8 *)psta_recvpriv, 0, sizeof(struct sta_recv_priv));
spin_lock_init(&psta_recvpriv->lock);
@@ -109,7 +109,7 @@ exit:
return res;
}
-void _rtw_free_recv_priv (struct recv_priv *precvpriv)
+void _rtw_free_recv_priv(struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
@@ -124,7 +124,7 @@ void _rtw_free_recv_priv (struct recv_priv *precvpriv)
}
-struct recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
struct recv_frame *hdr;
struct list_head *plist, *phead;
@@ -797,7 +797,7 @@ exit:
return ret;
}
-static int ap2sta_data_frame (
+static int ap2sta_data_frame(
struct adapter *adapter,
struct recv_frame *precv_frame,
struct sta_info **psta)
@@ -1266,7 +1266,7 @@ static int validate_recv_frame(struct adapter *adapter,
u8 bDumpRxPkt;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
u8 *ptr = precv_frame->rx_data;
- u8 ver = (unsigned char) (*ptr)&0x3;
+ u8 ver = (unsigned char)(*ptr)&0x3;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
@@ -1373,7 +1373,6 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
u8 *psnap_type;
struct ieee80211_snap_hdr *psnap;
- int ret = _SUCCESS;
struct adapter *adapter = precvframe->adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *ptr = precvframe->rx_data;
@@ -1428,7 +1427,7 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
memcpy(ptr+12, &be_tmp, 2);
}
- return ret;
+ return _SUCCESS;
}
/* perform defrag */
@@ -1624,7 +1623,6 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
- int ret = _SUCCESS;
nr_subframes = 0;
pattrib = &prframe->attrib;
@@ -1728,7 +1726,7 @@ exit:
prframe->len = 0;
rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
- return ret;
+ return _SUCCESS;
}
static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
@@ -1949,7 +1947,7 @@ _err_exit:
void rtw_reordering_ctrl_timeout_handler(void *pcontext)
{
- struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
+ struct recv_reorder_ctrl *preorder_ctrl = pcontext;
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -1981,7 +1979,7 @@ static int process_recv_indicatepkts(struct adapter *padapter,
}
}
} else { /* B/G mode */
- retval = wlanhdr_to_ethhdr (prframe);
+ retval = wlanhdr_to_ethhdr(prframe);
if (retval != _SUCCESS) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("wlanhdr_to_ethhdr: drop pkt\n"));
return retval;
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index f9096a512da5..bd8d60a230e9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -189,7 +189,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
arcfour_encrypt(&mycontext, payload+length, crc, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *) round_up((size_t)(pframe), 4);
+ pframe = (u8 *)round_up((size_t)(pframe), 4);
}
}
}
@@ -258,7 +258,7 @@ static void secmicputuint32(u8 *p, u32 val)
{
long i;
for (i = 0; i < 4; i++) {
- *p++ = (u8) (val & 0xff);
+ *p++ = (u8)(val & 0xff);
val >>= 8;
}
}
@@ -621,14 +621,14 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
arcfour_encrypt(&mycontext, payload, payload, length);
arcfour_encrypt(&mycontext, payload+length, crc, 4);
} else {
- length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
*((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
arcfour_init(&mycontext, rc4key, 16);
arcfour_encrypt(&mycontext, payload, payload, length);
arcfour_encrypt(&mycontext, payload+length, crc, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *) round_up((size_t)(pframe), 4);
+ pframe = (u8 *)round_up((size_t)(pframe), 4);
}
}
} else {
@@ -953,8 +953,8 @@ static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
for (i = 8; i < 14; i++)
mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
- mic_iv[14] = (unsigned char) (payload_length / 256);
- mic_iv[15] = (unsigned char) (payload_length % 256);
+ mic_iv[14] = (unsigned char)(payload_length / 256);
+ mic_iv[15] = (unsigned char)(payload_length % 256);
}
/************************************************/
@@ -1045,8 +1045,8 @@ static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists,
ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
for (i = 8; i < 14; i++)
ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
- ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
- ctr_preload[15] = (unsigned char) (c % 256);
+ ctr_preload[14] = (unsigned char)(c / 256); /* Ctr */
+ ctr_preload[15] = (unsigned char)(c % 256);
}
/************************************/
@@ -1219,7 +1219,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
/* 4 start to encrypt each fragment */
- if ((pattrib->encrypt == _AES_)) {
+ if (pattrib->encrypt == _AES_) {
if (pattrib->psta)
stainfo = pattrib->psta;
else
@@ -1238,11 +1238,11 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
} else{
- length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *) round_up((size_t)(pframe), 8);
+ pframe = (u8 *)round_up((size_t)(pframe), 8);
}
}
} else{
@@ -1460,7 +1460,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
u32 res = _SUCCESS;
pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
/* 4 start to encrypt each fragment */
- if ((prxattrib->encrypt == _AES_)) {
+ if (prxattrib->encrypt == _AES_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo != NULL) {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n"));
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index e1dc8fa82d38..dc9d0ddf6b3a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -29,7 +29,7 @@
static void _rtw_init_stainfo(struct sta_info *psta)
{
- memset((u8 *)psta, 0, sizeof (struct sta_info));
+ memset((u8 *)psta, 0, sizeof(struct sta_info));
spin_lock_init(&psta->lock);
INIT_LIST_HEAD(&psta->list);
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index d300369977fa..324c1a7fd0bc 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1394,7 +1394,6 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
DBG_88E("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
} else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
- (!memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
(!memcmp(pIE->data, BROADCOM_OUI2, 3))) {
DBG_88E("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 639ace06a3d6..7a71df167464 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -37,7 +37,7 @@ static void _init_txservq(struct tx_servq *ptxservq)
void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
- memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv));
+ memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv));
spin_lock_init(&psta_xmitpriv->lock);
_init_txservq(&psta_xmitpriv->be_q);
_init_txservq(&psta_xmitpriv->bk_q);
@@ -223,7 +223,7 @@ exit:
return res;
}
-void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
+void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
{
int i;
struct adapter *padapter = pxmitpriv->adapter;
@@ -691,7 +691,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
payload = pframe;
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- payload = (u8 *) round_up((size_t)(payload), 4);
+ payload = (u8 *)round_up((size_t)(payload), 4);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
("=== curfragnum=%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n",
curfragnum, *payload, *(payload+1),
@@ -772,7 +772,7 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
return _SUCCESS;
}
-s32 rtw_make_wlanhdr (struct adapter *padapter , u8 *hdr, struct pkt_attrib *pattrib)
+s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib)
{
u16 *qc;
@@ -1025,8 +1025,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
/* adding icv, if necessary... */
if (pattrib->iv_len) {
- if (psta != NULL) {
- switch (pattrib->encrypt) {
+ switch (pattrib->encrypt) {
case _WEP40_:
case _WEP104_:
WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
@@ -1043,7 +1042,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
else
AES_IV(pattrib->iv, psta->dot11txpn, 0);
break;
- }
}
memcpy(pframe, pattrib->iv, pattrib->iv_len);
@@ -1098,7 +1096,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
addr = (size_t)(pframe);
- mem_start = (unsigned char *) round_up(addr, 4) + hw_hdr_offset;
+ mem_start = (unsigned char *)round_up(addr, 4) + hw_hdr_offset;
memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
}
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index 80e8cc92c10a..1e963bf9e48b 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -173,7 +173,7 @@ static bool set_baseband_agc_config(struct adapter *adapt)
u32 v1 = array[i];
u32 v2 = array[i+1];
- if (v1 < 0xCDCDCDCD){
+ if (v1 < 0xCDCDCDCD) {
phy_set_bb_reg(adapt, v1, bMaskDWord, v2);
udelay(1);
}
@@ -552,7 +552,7 @@ static void store_pwrindex_offset(struct adapter *Adapter, u32 regaddr, u32 bitm
}
}
-static void rtl_addr_delay(struct adapter *adapt, u32 addr, u32 bit_mask ,u32 data)
+static void rtl_addr_delay(struct adapter *adapt, u32 addr, u32 bit_mask, u32 data)
{
if (addr == 0xfe) {
msleep(50);
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 17b7f3750547..3b2875481fc5 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -84,7 +84,7 @@ static void _rtl88e_fw_block_write(struct adapter *adapt,
static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
{
u32 fwlen = *pfwlen;
- u8 remain = (u8) (fwlen % 4);
+ u8 remain = (u8)(fwlen % 4);
remain = (remain == 0) ? 0 : (4 - remain);
@@ -101,7 +101,7 @@ static void _rtl88e_fw_page_write(struct adapter *adapt,
u32 page, const u8 *buffer, u32 size)
{
u8 value8;
- u8 u8page = (u8) (page & 0x07);
+ u8 u8page = (u8)(page & 0x07);
value8 = (usb_read8(adapt, REG_MCUFWDL + 2) & 0xF8) | u8page;
@@ -193,13 +193,13 @@ int rtl88eu_download_fw(struct adapter *adapt)
u32 fwsize;
int err;
- if (request_firmware(&fw, fw_name, device)){
+ if (request_firmware(&fw, fw_name, device)) {
dev_err(device, "Firmware %s not available\n", fw_name);
return -ENOENT;
}
if (fw->size > FW_8188E_SIZE) {
- dev_err(device,"Firmware size exceed 0x%X. Check it.\n",
+ dev_err(device, "Firmware size exceed 0x%X. Check it.\n",
FW_8188E_SIZE);
return -1;
}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 538a0f65d09c..4bdbed28774e 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -275,13 +275,6 @@ void rtw_hal_write_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
bitmask, data);
}
-s32 rtw_hal_interrupt_handler(struct adapter *adapt)
-{
- if (adapt->HalFunc.interrupt_handler)
- return adapt->HalFunc.interrupt_handler(adapt);
- return _FAIL;
-}
-
void rtw_hal_set_bwmode(struct adapter *adapt,
enum ht_channel_width bandwidth, u8 offset)
{
@@ -329,15 +322,6 @@ void rtw_hal_sreset_init(struct adapter *adapt)
adapt->HalFunc.sreset_init_value(adapt);
}
-u8 rtw_hal_sreset_get_wifi_status(struct adapter *adapt)
-{
- u8 status = 0;
-
- if (adapt->HalFunc.sreset_get_wifi_status)
- status = adapt->HalFunc.sreset_get_wifi_status(adapt);
- return status;
-}
-
void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
{
if (adapter->HalFunc.hal_notch_filter)
diff --git a/drivers/staging/rtl8188eu/hal/mac_cfg.c b/drivers/staging/rtl8188eu/hal/mac_cfg.c
index c0e7fa938059..febc83a5adb8 100644
--- a/drivers/staging/rtl8188eu/hal/mac_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/mac_cfg.c
@@ -127,7 +127,7 @@ bool rtl88eu_phy_mac_config(struct adapter *adapt)
ptrarray = array_MAC_REG_8188E;
for (i = 0; i < arraylength; i = i + 2)
- usb_write8(adapt, ptrarray[i], (u8) ptrarray[i + 1]);
+ usb_write8(adapt, ptrarray[i], (u8)ptrarray[i + 1]);
usb_write8(adapt, REG_MAX_AGGR_NUM, MAX_AGGR_NUM);
return true;
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index e4df83710ca6..9873998011d2 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -437,8 +437,8 @@ void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
{
struct adapter *adapter = pDM_Odm->Adapter;
- pDM_Odm->bCckHighPower = (bool) phy_query_bb_reg(adapter, 0x824, BIT9);
- pDM_Odm->RFPathRxEnable = (u8) phy_query_bb_reg(adapter, 0xc04, 0x0F);
+ pDM_Odm->bCckHighPower = (bool)phy_query_bb_reg(adapter, 0x824, BIT9);
+ pDM_Odm->RFPathRxEnable = (u8)phy_query_bb_reg(adapter, 0xc04, 0x0F);
ODM_InitDebugSetting(pDM_Odm);
}
@@ -529,7 +529,7 @@ void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
struct adapter *adapter = pDM_Odm->Adapter;
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
- pDM_DigTable->CurIGValue = (u8) phy_query_bb_reg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
+ pDM_DigTable->CurIGValue = (u8)phy_query_bb_reg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
@@ -620,7 +620,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
} else if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
/* 1 Lower Bound for 88E AntDiv */
if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
- DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ DIG_Dynamic_MIN = (u8)pDM_DigTable->AntDiv_RSSI_max;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
pDM_DigTable->AntDiv_RSSI_max));
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 4e4e21936e7c..29f87dffbad3 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -118,7 +118,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
cck_highpwr = dm_odm->bCckHighPower;
- cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
+ cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a;
/* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
/* The RSSI formula should be modified according to the gain table */
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index 5342af778eb0..d3c6873925ba 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -278,7 +278,7 @@ static void rtl88eu_dm_hw_ant_div(struct odm_dm_struct *dm_odm)
struct rtw_dig *dig_table = &dm_odm->DM_DigTable;
struct sta_info *entry;
u32 i, min_rssi = 0xFF, ant_div_max_rssi = 0, max_rssi = 0;
- u32 local_min_rssi,local_max_rssi;
+ u32 local_min_rssi, local_max_rssi;
u32 main_rssi, aux_rssi;
u8 RxIdleAnt = 0, target_ant = 7;
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index c4f7f358a81c..3f663fe151ba 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -478,7 +478,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
/* 2.4G, decrease power */
{0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
/* 2.4G, increase power */
- {0, 0, -1, -2, -3, -4,-4, -4, -4, -5, -7, -8,-9, -9, -10},
+ {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10},
};
u8 thermal_mapping[2][index_mapping_NUM_88E] = {
/* 2.4G, decrease power */
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index c2fac34c8132..eea4c8a6022b 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -131,7 +131,7 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
/* powerbase1 for HT MCS rates */
static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
u8 *pwr_level_bw20, u8 *pwr_level_bw40,
- u8 channel,u32 *ofdmbase, u32 *mcs_base)
+ u8 channel, u32 *ofdmbase, u32 *mcs_base)
{
struct hal_data_8188e *hal_data = GET_HAL_DATA(adapt);
u32 powerbase0, powerbase1;
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index ddc2f55fe13f..5dc11cae2ef9 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -164,7 +164,7 @@ do { \
#define B3WIREDATALENGTH 0x800
#define BRFSI_RFENV 0x10
-static void rtl_rfreg_delay(struct adapter *adapt, enum rf_radio_path rfpath,u32 addr, u32 mask, u32 data)
+static void rtl_rfreg_delay(struct adapter *adapt, enum rf_radio_path rfpath, u32 addr, u32 mask, u32 data)
{
if (addr == 0xfe) {
mdelay(50);
@@ -190,7 +190,7 @@ static void rtl8188e_config_rf_reg(struct adapter *adapt,
u32 content = 0x1000; /*RF Content: radio_a_txt*/
u32 maskforphyset = (u32)(content & 0xE000);
- rtl_rfreg_delay(adapt, RF90_PATH_A, addr| maskforphyset,
+ rtl_rfreg_delay(adapt, RF90_PATH_A, addr | maskforphyset,
RFREG_OFFSET_MASK,
data);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 023a3d84ee8b..7f30dea1b53b 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -150,11 +150,9 @@ u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
if (haldata->fw_ractrl) {
- __le32 lmask;
memset(buf, 0, 3);
- lmask = cpu_to_le32(mask);
- memcpy(buf, &lmask, 3);
+ put_unaligned_le32(mask, buf);
FillH2CCmd_88E(adapt, H2C_DM_MACID_CFG, 3, buf);
} else {
@@ -254,7 +252,7 @@ void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
{
u8 opmode, macid;
u16 mst_rpt = le16_to_cpu(mstatus_rpt);
- opmode = (u8) mst_rpt;
+ opmode = (u8)mst_rpt;
macid = (u8)(mst_rpt >> 8);
DBG_88E("### %s: MStatus=%x MACID=%d\n", __func__, opmode, macid);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index dab4c337a863..01566210bbd2 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -155,6 +155,8 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
bool fw_ps_awake = true;
u8 hw_init_completed = false;
struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct mlme_priv *pmlmepriv = NULL;
+ u8 bLinked = false;
hw_init_completed = Adapter->hw_init_completed;
@@ -170,22 +172,20 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
fw_ps_awake = false;
/* ODM */
- if (hw_init_completed) {
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
- u8 bLinked = false;
-
- if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))) {
- if (Adapter->stapriv.asoc_sta_count > 2)
- bLinked = true;
- } else {/* Station mode */
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- bLinked = true;
- }
-
- ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
- ODM_DMWatchdog(&hal_data->odmpriv);
+ pmlmepriv = &Adapter->mlmepriv;
+
+ if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE |
+ WIFI_ADHOC_MASTER_STATE))) {
+ if (Adapter->stapriv.asoc_sta_count > 2)
+ bLinked = true;
+ } else {/* Station mode */
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ bLinked = true;
}
+
+ ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
+ ODM_DMWatchdog(&hal_data->odmpriv);
skip_dm:
/* Check GPIO to determine current RF on/off and Pbc status. */
/* Check Hardware Radio ON/OFF or not */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index d6fe5e6aa4f4..7d460eaafa35 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -181,7 +181,8 @@ static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable
switch (eVariable) {
case HAL_ODM_STA_INFO:
{
- struct sta_info *psta = (struct sta_info *)pValue1;
+ struct sta_info *psta = pValue1;
+
if (bSet) {
DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, psta);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
index 7a4f754d86df..a6ba53b488e3 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -25,7 +25,7 @@
void dump_txrpt_ccx_88e(void *buf)
{
- struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
+ struct txrpt_ccx_88e *txrpt_ccx = buf;
DBG_88E("%s:\n"
"tag1:%u, pkt_num:%u, txdma_underflow:%u, int_bt:%u, int_tri:%u, int_ccx:%u\n"
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index be9eede6931d..594c1da9db23 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -399,7 +399,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
mem_addr += w_sz;
- mem_addr = (u8 *) round_up((size_t)mem_addr, 4);
+ mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
}
rtw_free_xmitframe(pxmitpriv, pxmitframe);
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index caf2ca3a47e7..14650e91c78a 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -1673,7 +1673,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
pRegToSet = RegToSet_Normal; /* 0xb972a841; */
FactorToSet = *((u8 *)val);
if (FactorToSet <= 3) {
- FactorToSet = (1<<(FactorToSet + 2));
+ FactorToSet = 1 << (FactorToSet + 2);
if (FactorToSet > 0xf)
FactorToSet = 0xf;
@@ -2012,7 +2012,7 @@ static u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eV
u8 bRSSIDump = *((u8 *)pValue);
struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
if (bRSSIDump)
- dm_ocm->DebugComponents = ODM_COMP_DIG|ODM_COMP_FA_CNT ;
+ dm_ocm->DebugComponents = ODM_COMP_DIG|ODM_COMP_FA_CNT;
else
dm_ocm->DebugComponents = 0;
}
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 9191993dd3f5..3b476d80f64d 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -304,8 +304,6 @@ void rtw_hal_write_rfreg(struct adapter *padapter,
enum rf_radio_path eRFPath, u32 RegAddr,
u32 BitMask, u32 Data);
-s32 rtw_hal_interrupt_handler(struct adapter *padapter);
-
void rtw_hal_set_bwmode(struct adapter *padapter,
enum ht_channel_width Bandwidth, u8 Offset);
void rtw_hal_set_chan(struct adapter *padapter, u8 channel);
@@ -317,7 +315,6 @@ void rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
struct wlan_bssid_ex *src);
void rtw_hal_sreset_init(struct adapter *padapter);
-u8 rtw_hal_sreset_get_wifi_status(struct adapter *padapter);
void rtw_hal_notch_filter(struct adapter *adapter, bool enable);
void rtw_hal_reset_security_engine(struct adapter *adapter);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211_ext.h b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
index 1052d1817a97..15e53d380ad0 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211_ext.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
@@ -103,24 +103,24 @@ struct wme_parameter_element {
#define WPA_PUT_LE16(a, val) \
do { \
- (a)[1] = ((u16) (val)) >> 8; \
- (a)[0] = ((u16) (val)) & 0xff; \
+ (a)[1] = ((u16)(val)) >> 8; \
+ (a)[0] = ((u16)(val)) & 0xff; \
} while (0)
#define WPA_PUT_BE32(a, val) \
do { \
- (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ (a)[0] = (u8)((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8)((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8)((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8)(((u32) (val)) & 0xff); \
} while (0)
#define WPA_PUT_LE32(a, val) \
do { \
- (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[0] = (u8) (((u32) (val)) & 0xff); \
+ (a)[3] = (u8)((((u32) (val)) >> 24) & 0xff); \
+ (a)[2] = (u8)((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8)((((u32) (val)) >> 8) & 0xff); \
+ (a)[0] = (u8)(((u32) (val)) & 0xff); \
} while (0)
#define RSN_SELECTOR_PUT(a, val) WPA_PUT_BE32((u8 *)(a), (val))
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index db7b44e16c48..914f831a5b77 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -83,9 +83,8 @@
#define ODM_COMP_INIT BIT31
/*------------------------Export Marco Definition---------------------------*/
-#define DbgPrint pr_info
#define RT_PRINTK(fmt, args...) \
- DbgPrint("%s(): " fmt, __func__, ## args);
+ pr_info("%s(): " fmt, __func__, ## args);
#ifndef ASSERT
#define ASSERT(expr)
@@ -94,40 +93,18 @@
#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
if (((comp) & pDM_Odm->DebugComponents) && \
(level <= pDM_Odm->DebugLevel)) { \
- DbgPrint("[ODM-8188E] "); \
- RT_PRINTK fmt; \
- }
-
-#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) \
- if (((comp) & pDM_Odm->DebugComponents) && \
- (level <= pDM_Odm->DebugLevel)) { \
+ pr_info("[ODM-8188E] "); \
RT_PRINTK fmt; \
}
#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
if (!(expr)) { \
- DbgPrint("Assertion failed! %s at ......\n", #expr); \
- DbgPrint(" ......%s,%s,line=%d\n", __FILE__, \
+ pr_info("Assertion failed! %s at ......\n", #expr); \
+ pr_info(" ......%s,%s,line=%d\n", __FILE__, \
__func__, __LINE__); \
RT_PRINTK fmt; \
ASSERT(false); \
}
-#define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
-#define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
-#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
-
-#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
- if (((comp) & pDM_Odm->DebugComponents) && \
- (level <= pDM_Odm->DebugLevel)) { \
- int __i; \
- u8 *__ptr = (u8 *)ptr; \
- DbgPrint("[ODM] "); \
- DbgPrint(title_str); \
- DbgPrint(" "); \
- for (__i = 0; __i < 6; __i++) \
- DbgPrint("%02X%s", __ptr[__i], (__i == 5)?"":"-");\
- DbgPrint("\n"); \
- }
void ODM_InitDebugSetting(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index fed9c86890b4..82f58f87656a 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -182,8 +182,8 @@ u64 rtw_modular64(u64 x, u64 y);
/* Macros for handling unaligned memory accesses */
-#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
- ((u32) (a)[2]))
+#define RTW_GET_BE24(a) ((((u32)(a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
+ ((u32)(a)[2]))
void rtw_buf_free(u8 **buf, u32 *buf_len);
void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len);
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index a38616e3cad2..971bf457f32d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -106,7 +106,7 @@ extern u32 GlobalDebugLevel;
u8 *ptr = (u8 *)_hexdata; \
pr_info("%s", DRIVER_PREFIX); \
pr_info(_titlestring); \
- for (__i = 0; __i < (int)_hexdatalen; __i++ ) { \
+ for (__i = 0; __i < (int)_hexdatalen; __i++) { \
pr_info("%02X%s", ptr[__i], \
(((__i + 1) % 4) == 0) ? \
" " : " "); \
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index c5194b620da4..23f0cfe312f3 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
#ifndef __RTW_LED_H_
@@ -27,7 +23,7 @@
#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
-#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
+#define LED_BLINK_WPS_SUCCESS_INTERVAL_ALPHA 5000
enum LED_CTL_MODE {
LED_CTL_POWER_ON,
@@ -92,7 +88,7 @@ struct LED_871x {
void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
-struct led_priv{
+struct led_priv {
/* add for led control */
struct LED_871x SwLed0;
u8 bRegUseLed;
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index d699ca19ef16..8d72ccf5f2a0 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -448,7 +448,7 @@ struct mlme_ext_priv {
int init_mlme_ext_priv(struct adapter *adapter);
int init_hw_mlme_ext(struct adapter *padapter);
-void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
+void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext);
extern void init_mlme_ext_timer(struct adapter *padapter);
extern void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
@@ -646,8 +646,8 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter,
void linked_status_chk(struct adapter *padapter);
-void survey_timer_hdl (void *function_context);
-void link_timer_hdl (void *funtion_context);
+void survey_timer_hdl(void *function_context);
+void link_timer_hdl(void *funtion_context);
void addba_timer_hdl(void *function_context);
#define set_survey_timer(mlmeext, ms) \
@@ -708,15 +708,15 @@ u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf);
#ifdef _RTW_CMD_C_
static struct cmd_hdl wlancmds[] = {
- GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), join_cmd_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct disconnect_parm), disconnect_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), createbss_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct setopmode_parm), setopmode_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct sitesurvey_parm), sitesurvey_cmd_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct setauth_parm), setauth_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct setkey_parm), setkey_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct set_stakey_parm), set_stakey_hdl)
- GEN_MLME_EXT_HANDLER(sizeof (struct set_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), join_cmd_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), createbss_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL)
GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl)
GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), tx_beacon_hdl)
@@ -787,7 +787,7 @@ static struct fwevent wlanevents[] = {
{0, NULL},
{0, NULL},
{0, &rtw_survey_event_callback}, /*8*/
- {sizeof (struct surveydone_event), &rtw_surveydone_event_callback},/*9*/
+ {sizeof(struct surveydone_event), &rtw_surveydone_event_callback},/*9*/
{0, &rtw_joinbss_event_callback}, /*10*/
{sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
{sizeof(struct stadel_event), &rtw_stadel_event_callback},
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index a88ebf41bba1..8dbdfafd52b5 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -766,27 +766,27 @@ enum ht_cap_ampdu_factor {
#define OP_MODE_20MHZ_HT_STA_ASSOCED 2
#define OP_MODE_MIXED 3
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8) BIT(0) | BIT(1))
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8) BIT(0))
-#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8) BIT(0) | BIT(1))
-#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8) BIT(2))
-#define HT_INFO_HT_PARAM_RIFS_MODE ((u8) BIT(3))
-#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8) BIT(4))
-#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8) BIT(5))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8)BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8)BIT(0))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8)BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8)BIT(2))
+#define HT_INFO_HT_PARAM_RIFS_MODE ((u8)BIT(3))
+#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8)BIT(4))
+#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8)BIT(5))
#define HT_INFO_OPERATION_MODE_OP_MODE_MASK \
- ((u16) (0x0001 | 0x0002))
+ ((u16)(0x0001 | 0x0002))
#define HT_INFO_OPERATION_MODE_OP_MODE_OFFSET 0
-#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8) BIT(2))
-#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
-#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
-
-#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16) BIT(6))
-#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16) BIT(7))
-#define HT_INFO_STBC_PARAM_SECONDARY_BC ((u16) BIT(8))
-#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16) BIT(9))
-#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16) BIT(10))
-#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16) BIT(11))
+#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8)BIT(2))
+#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8)BIT(3))
+#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8)BIT(4))
+
+#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16)BIT(6))
+#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16)BIT(7))
+#define HT_INFO_STBC_PARAM_SECONDARY_BC ((u16)BIT(8))
+#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16)BIT(9))
+#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16)BIT(10))
+#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16)BIT(11))
/* ===============WPS Section=============== */
/* For WPSv1.0 */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index d598fec4abbf..24a8f5ac96e5 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -361,7 +361,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
- if (param_len < (u32) ((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
+ if (param_len < (u32)((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
ret = -EINVAL;
goto exit;
}
@@ -512,14 +512,12 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
}
if (ielen) {
- buf = kzalloc(ielen, GFP_KERNEL);
+ buf = kmemdup(pie, ielen, GFP_KERNEL);
if (buf == NULL) {
ret = -ENOMEM;
goto exit;
}
- memcpy(buf, pie, ielen);
-
/* dump */
{
int i;
@@ -1136,7 +1134,8 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
struct iw_scan_req *req = (struct iw_scan_req *)extra;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- int len = min((int)req->essid_len, IW_ESSID_MAX_SIZE);
+ int len = min_t(int, req->essid_len,
+ IW_ESSID_MAX_SIZE);
memcpy(ssid[0].Ssid, req->essid, len);
ssid[0].SsidLength = len;
@@ -1417,7 +1416,7 @@ static int rtw_wx_set_rate(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- int i, ret = 0;
+ int i;
u8 datarates[NumRates];
u32 target_rate = wrqu->bitrate.value;
u32 fixed = wrqu->bitrate.fixed;
@@ -1490,7 +1489,7 @@ set_rate:
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("datarate_inx =%d\n", datarates[i]));
}
- return ret;
+ return 0;
}
static int rtw_wx_get_rate(struct net_device *dev,
@@ -2699,10 +2698,8 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
ie_len = len-12-2;/* 12 = param header, 2:no packed */
- if (pmlmepriv->wps_beacon_ie) {
- kfree(pmlmepriv->wps_beacon_ie);
- pmlmepriv->wps_beacon_ie = NULL;
- }
+ kfree(pmlmepriv->wps_beacon_ie);
+ pmlmepriv->wps_beacon_ie = NULL;
if (ie_len > 0) {
pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
@@ -2736,10 +2733,8 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
ie_len = len-12-2;/* 12 = param header, 2:no packed */
- if (pmlmepriv->wps_probe_resp_ie) {
- kfree(pmlmepriv->wps_probe_resp_ie);
- pmlmepriv->wps_probe_resp_ie = NULL;
- }
+ kfree(pmlmepriv->wps_probe_resp_ie);
+ pmlmepriv->wps_probe_resp_ie = NULL;
if (ie_len > 0) {
pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
@@ -2768,10 +2763,8 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
ie_len = len-12-2;/* 12 = param header, 2:no packed */
- if (pmlmepriv->wps_assoc_resp_ie) {
- kfree(pmlmepriv->wps_assoc_resp_ie);
- pmlmepriv->wps_assoc_resp_ie = NULL;
- }
+ kfree(pmlmepriv->wps_assoc_resp_ie);
+ pmlmepriv->wps_assoc_resp_ie = NULL;
if (ie_len > 0) {
pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 08a80f759b8d..88a909c9e457 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -512,7 +512,6 @@ void rtw_proc_remove_one(struct net_device *dev)
static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
{
- uint status = _SUCCESS;
struct registry_priv *registry_par = &padapter->registrypriv;
@@ -527,7 +526,7 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->channel = (u8)rtw_channel;
registry_par->wireless_mode = (u8)rtw_wireless_mode;
- registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
+ registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense;
registry_par->vcs_type = (u8)rtw_vcs_type;
registry_par->rts_thresh = (u16)rtw_rts_thresh;
registry_par->frag_thresh = (u16)rtw_frag_thresh;
@@ -582,7 +581,7 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
snprintf(registry_par->ifname, 16, "%s", ifname);
snprintf(registry_par->if2name, 16, "%s", if2name);
registry_par->notch_filter = (u8)rtw_notch_filter;
- return status;
+ return _SUCCESS;
}
static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
@@ -760,7 +759,6 @@ void rtw_stop_drv_threads(struct adapter *padapter)
static u8 rtw_init_default_value(struct adapter *padapter)
{
- u8 ret = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -803,12 +801,11 @@ static u8 rtw_init_default_value(struct adapter *padapter)
padapter->bWritePortCancel = false;
padapter->bRxRSSIDisplay = 0;
padapter->bNotifyChannelChange = 0;
- return ret;
+ return _SUCCESS;
}
u8 rtw_reset_drv_sw(struct adapter *padapter)
{
- u8 ret8 = _SUCCESS;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
@@ -833,7 +830,7 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
rtw_set_signal_stat_timer(&padapter->recvpriv);
- return ret8;
+ return _SUCCESS;
}
u8 rtw_init_drv_sw(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 8af4a8d24cce..abcb3a8589ef 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -51,7 +51,7 @@ void *rtw_malloc2d(int h, int w, int size)
{
int j;
- void **a = (void **)kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL);
+ void **a = kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL);
if (a == NULL) {
pr_info("%s: alloc memory fail!\n", __func__);
return NULL;
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index d9d55d12fd5f..99ce077007f4 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -148,36 +148,21 @@ static int rtw_android_set_block(struct net_device *net, char *command,
int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
{
int ret = 0;
- char *command = NULL;
+ char *command;
int cmd_num;
int bytes_written = 0;
struct android_wifi_priv_cmd priv_cmd;
- if (!ifr->ifr_data) {
- ret = -EINVAL;
- goto exit;
- }
- if (copy_from_user(&priv_cmd, ifr->ifr_data,
- sizeof(struct android_wifi_priv_cmd))) {
- ret = -EFAULT;
- goto exit;
- }
- command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
- if (!command) {
- DBG_88E("%s: failed to allocate memory\n", __func__);
- ret = -ENOMEM;
- goto exit;
- }
- if (!access_ok(VERIFY_READ, priv_cmd.buf, priv_cmd.total_len)) {
- DBG_88E("%s: failed to access memory\n", __func__);
- ret = -EFAULT;
- goto exit;
- }
- if (copy_from_user(command, (char __user *)priv_cmd.buf,
- priv_cmd.total_len)) {
- ret = -EFAULT;
- goto exit;
- }
+ if (!ifr->ifr_data)
+ return -EINVAL;
+ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(priv_cmd)))
+ return -EFAULT;
+ if (priv_cmd.total_len < 1)
+ return -EINVAL;
+ command = memdup_user(priv_cmd.buf, priv_cmd.total_len);
+ if (IS_ERR(command))
+ return PTR_ERR(command);
+ command[priv_cmd.total_len - 1] = 0;
DBG_88E("%s: Android private cmd \"%s\" on %s\n",
__func__, command, ifr->ifr_name);
cmd_num = rtw_android_cmdstr_to_num(command);
@@ -191,7 +176,7 @@ int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
DBG_88E("%s: Ignore private cmd \"%s\" - iface %s is down\n",
__func__, command, ifr->ifr_name);
ret = 0;
- goto exit;
+ goto free;
}
switch (cmd_num) {
case ANDROID_WIFI_CMD_STOP:
@@ -279,7 +264,7 @@ response:
} else {
ret = bytes_written;
}
-exit:
+free:
kfree(command);
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 2f87150a21b7..bee39c2278f1 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -63,7 +63,6 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_config_descriptor *pconf_desc;
struct usb_host_interface *phost_iface;
struct usb_interface_descriptor *piface_desc;
- struct usb_host_endpoint *phost_endp;
struct usb_endpoint_descriptor *pendp_desc;
struct usb_device *pusbd;
@@ -92,24 +91,22 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
int ep_num;
- phost_endp = phost_iface->endpoint + i;
-
- if (phost_endp) {
- pendp_desc = &phost_endp->desc;
- ep_num = usb_endpoint_num(pendp_desc);
-
- if (usb_endpoint_is_bulk_in(pendp_desc)) {
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_int_in(pendp_desc)) {
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
- pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] = ep_num;
- pdvobjpriv->RtNumOutPipes++;
- }
- pdvobjpriv->ep_num[i] = ep_num;
+ pendp_desc = &phost_iface->endpoint[i].desc;
+
+ ep_num = usb_endpoint_num(pendp_desc);
+
+ if (usb_endpoint_is_bulk_in(pendp_desc)) {
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
+ pdvobjpriv->RtNumInPipes++;
+ } else if (usb_endpoint_is_int_in(pendp_desc)) {
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
+ pdvobjpriv->RtNumInPipes++;
+ } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
+ pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
+ ep_num;
+ pdvobjpriv->RtNumOutPipes++;
}
+ pdvobjpriv->ep_num[i] = ep_num;
}
if (pusbd->speed == USB_SPEED_HIGH)
@@ -557,8 +554,6 @@ static void rtw_dev_remove(struct usb_interface *pusb_intf)
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
DBG_88E("-r871xu_dev_remove, done\n");
-
- return;
}
static struct usb_driver rtl8188e_usb_drv = {
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index ba1e178fb510..d2efa9dfc8c0 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -160,10 +160,10 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
switch (haldata->UsbRxAggMode) {
case USB_RX_AGG_DMA:
case USB_RX_AGG_MIX:
- pkt_offset = (u16) round_up(pkt_offset, 128);
+ pkt_offset = (u16)round_up(pkt_offset, 128);
break;
case USB_RX_AGG_USB:
- pkt_offset = (u16) round_up(pkt_offset, 4);
+ pkt_offset = (u16)round_up(pkt_offset, 4);
break;
case USB_RX_AGG_DISABLE:
default:
@@ -843,7 +843,7 @@ void usb_write_port_cancel(struct adapter *padapter)
void rtl8188eu_recv_tasklet(void *priv)
{
struct sk_buff *pskb;
- struct adapter *adapt = (struct adapter *)priv;
+ struct adapter *adapt = priv;
struct recv_priv *precvpriv = &adapt->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
@@ -862,7 +862,7 @@ void rtl8188eu_recv_tasklet(void *priv)
void rtl8188eu_xmit_tasklet(void *priv)
{
int ret = false;
- struct adapter *adapt = (struct adapter *)priv;
+ struct adapter *adapt = priv;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 0ce47b07ef86..5acf9a9dddeb 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -46,7 +46,7 @@ void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
}
-uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
+uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
{
uint len = 0;
@@ -66,13 +66,7 @@ uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
int rtw_endofpktfile(struct pkt_file *pfile)
{
-
- if (pfile->pkt_len == 0) {
- return true;
- }
-
-
- return false;
+ return pfile->pkt_len == 0;
}
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 0ffed2d06b58..552d943b1761 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1287,7 +1287,7 @@ void rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
struct tx_desc_cmd *entry,
- struct cb_desc *cb_desc, struct sk_buff* skb)
+ struct cb_desc *cb_desc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
@@ -1302,7 +1302,7 @@ void rtl8192_tx_fill_cmd_desc(struct net_device *dev,
if (cb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) {
entry->CmdInit = DESC_PACKET_TYPE_INIT;
} else {
- struct tx_desc * entry_tmp = (struct tx_desc *)entry;
+ struct tx_desc *entry_tmp = (struct tx_desc *)entry;
entry_tmp->CmdInit = DESC_PACKET_TYPE_NORMAL;
entry_tmp->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index b6ce8c3b2f8c..885315cac3a4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1449,13 +1449,11 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- TempVal = 0;
TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24));
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- TempVal = 0;
TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ;
@@ -1465,13 +1463,11 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ;
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- TempVal = 0;
TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24));
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- TempVal = 0;
TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ;
@@ -1493,7 +1489,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
- TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+
@@ -1501,7 +1496,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
- TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
@@ -1515,7 +1509,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
- TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+
@@ -1523,7 +1516,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
- TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 6da57847a533..0415e02b4eff 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -16,6 +16,8 @@
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include "rtllib.h"
#include "rtl819x_BA.h"
@@ -79,7 +81,6 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
struct sk_buff *skb = NULL;
struct rtllib_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
- u16 tmp = 0;
u16 len = ieee->tx_headroom + 9;
RTLLIB_DEBUG(RTLLIB_DL_TRACE | RTLLIB_DL_BA, "========>%s(), frame(%d)"
@@ -115,15 +116,15 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
if (ACT_ADDBARSP == type) {
RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");
- tmp = StatusCode;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(StatusCode, tag);
tag += 2;
}
- tmp = pBA->BaParamSet.shortData;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(pBA->BaParamSet.shortData, tag);
tag += 2;
- tmp = pBA->BaTimeoutValue;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(pBA->BaTimeoutValue, tag);
tag += 2;
if (ACT_ADDBAREQ == type) {
@@ -143,7 +144,6 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
struct sk_buff *skb = NULL;
struct rtllib_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
- u16 tmp = 0;
u16 len = 6 + ieee->tx_headroom;
if (net_ratelimit())
@@ -178,11 +178,11 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
*tag++ = ACT_CAT_BA;
*tag++ = ACT_DELBA;
- tmp = DelbaParamSet.shortData;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(DelbaParamSet.shortData, tag);
tag += 2;
- tmp = ReasonCode;
- memcpy(tag, (u8 *)&tmp, 2);
+
+ put_unaligned_le16(ReasonCode, tag);
tag += 2;
RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA|RTLLIB_DL_BA, skb->data, skb->len);
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 2d82f8993ea1..cef2dc27103f 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -2762,8 +2762,6 @@ extern void rtllib_stop_scan(struct rtllib_device *ieee);
extern bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
extern void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
extern void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
-extern inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee);
-extern u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee);
extern void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee,
short pwr);
extern void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
@@ -2944,12 +2942,12 @@ void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
extern const long rtllib_wlan_frequencies[];
-extern inline void rtllib_increment_scans(struct rtllib_device *ieee)
+static inline void rtllib_increment_scans(struct rtllib_device *ieee)
{
ieee->scans++;
}
-extern inline int rtllib_get_scans(struct rtllib_device *ieee)
+static inline int rtllib_get_scans(struct rtllib_device *ieee)
{
return ieee->scans;
}
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 1c2014fd8d81..cf11b042b93a 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1415,10 +1415,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
return 1;
rx_dropped:
- if (rxb != NULL) {
- kfree(rxb);
- rxb = NULL;
- }
ieee->stats.rx_dropped++;
/* Returning 0 indicates to caller that we have not handled the SKB--
@@ -2691,7 +2687,7 @@ void rtllib_rx_mgt(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *stats)
{
- struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data ;
+ struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data;
if ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) !=
RTLLIB_STYPE_PROBE_RESP) &&
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index abb6729ae279..d992a754e72d 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -193,7 +193,7 @@ MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
return QueryRate;
}
-u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
+static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
u8 rate;
@@ -343,7 +343,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
}
}
-inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
+static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
{
unsigned int len, rate_len;
u8 *tag;
@@ -1311,7 +1311,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
}
if (beacon->bCkipSupported) {
- static u8 AironetIeOui[] = {0x00, 0x01, 0x66};
+ static const u8 AironetIeOui[] = {0x00, 0x01, 0x66};
u8 CcxAironetBuf[30];
struct octet_string osCcxAironetIE;
@@ -1331,10 +1331,11 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
}
if (beacon->bCcxRmEnable) {
- static u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01, 0x00};
+ static const u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01,
+ 0x00};
struct octet_string osCcxRmCap;
- osCcxRmCap.Octet = CcxRmCapBuf;
+ osCcxRmCap.Octet = (u8 *) CcxRmCapBuf;
osCcxRmCap.Length = sizeof(CcxRmCapBuf);
tag = skb_put(skb, ccxrm_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
@@ -3167,6 +3168,7 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
cancel_delayed_work(&ieee->associate_retry_wq);
destroy_workqueue(ieee->wq);
up(&ieee->wx_sem);
+ tasklet_kill(&ieee->ps_task);
}
/********************************************************
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 73de9e9669f6..d401dbf4c7c6 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -713,8 +713,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
while(!list_empty(&pTS->RxPendingPktList)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__func__);
pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
- if( SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
+ if (SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) ||
+ SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
{
/* This protect buffer from overflow. */
if(index >= REORDER_WIN_SIZE) {
@@ -800,9 +800,8 @@ static u8 parse_subframe(struct sk_buff *skb,
// Null packet, don't indicate it to upper layer
ChkLength = LLCOffset;/* + (Frame_WEP(frame)!=0 ?Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead:0);*/
- if( skb->len <= ChkLength ) {
+ if (skb->len <= ChkLength)
return 0;
- }
skb_pull(skb, LLCOffset);
@@ -1035,10 +1034,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
{
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->RxLastFragNum is %d,frag is %d,pRxTS->RxLastSeqNum is %d,seq is %d\n",__func__,pRxTS->RxLastFragNum,frag,pRxTS->RxLastSeqNum,WLAN_GET_SEQ_SEQ(sc));
- if( (fc & (1<<11)) &&
- (frag == pRxTS->RxLastFragNum) &&
- (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum) )
- {
+ if ((fc & (1<<11)) &&
+ (frag == pRxTS->RxLastFragNum) &&
+ (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum)) {
goto rx_dropped;
}
else
@@ -2456,7 +2454,7 @@ static inline void ieee80211_process_probe_response(
// then wireless adapter should do active scan from ch1~11 and
// passive scan from ch12~14
- if( !IsLegalChannel(ieee, network.channel) )
+ if (!IsLegalChannel(ieee, network.channel))
return;
if(ieee->bGlobalDomain)
{
@@ -2465,8 +2463,7 @@ static inline void ieee80211_process_probe_response(
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
{
- if( !IsLegalChannel(ieee, network.channel) )
- {
+ if (!IsLegalChannel(ieee, network.channel)) {
printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel);
return;
}
@@ -2487,8 +2484,7 @@ static inline void ieee80211_process_probe_response(
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
{
- if( !IsLegalChannel(ieee, network.channel) )
- {
+ if (!IsLegalChannel(ieee, network.channel)) {
printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel);
return;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index a85bb232be97..d1471877e19d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -411,7 +411,7 @@ static void ieee80211_send_probe(struct ieee80211_device *ieee)
}
}
-void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
+static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
{
if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){
ieee80211_send_probe(ieee);
@@ -517,7 +517,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
goto out;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
if(channel_map[ieee->current_network.channel] == 1)
- ieee80211_send_probe_requests(ieee);
+ ieee80211_send_probe_requests(ieee);
queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
@@ -804,12 +804,11 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
*(tag++) = ieee->current_network.channel;
if(atim_len){
- u16 val16;
*(tag++) = MFIE_TYPE_IBSS_SET;
*(tag++) = 2;
- //*((u16*)(tag)) = cpu_to_le16(ieee->current_network.atim_window);
- val16 = cpu_to_le16(ieee->current_network.atim_window);
- memcpy((u8 *)tag, (u8 *)&val16, 2);
+
+ put_unaligned_le16(ieee->current_network.atim_window,
+ (u8 *)tag);
tag+=2;
}
@@ -1043,10 +1042,9 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
{
ccxrm_ie_len = 6+2;
}
- if( beacon->BssCcxVerNumber >= 2 )
- {
+ if (beacon->BssCcxVerNumber >= 2)
cxvernum_ie_len = 5+2;
- }
+
#ifdef THOMAS_TURBO
len = sizeof(struct ieee80211_assoc_request_frame)+ 2
+ beacon->ssid_len//essid tagged val
@@ -1103,7 +1101,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
if(ieee->short_slot)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
if (wmm_info_len) //QOS
- hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_QOS);
+ hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_QOS);
hdr->listen_interval = 0xa; //FIXME
@@ -1118,8 +1116,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
ieee80211_MFIE_Brate(ieee, &tag);
ieee80211_MFIE_Grate(ieee, &tag);
// For CCX 1 S13, CKIP. Added by Annie, 2006-08-14.
- if( beacon->bCkipSupported )
- {
+ if (beacon->bCkipSupported) {
static u8 AironetIeOui[] = {0x00, 0x01, 0x66}; // "4500-client"
u8 CcxAironetBuf[30];
OCTET_STRING osCcxAironetIE;
@@ -1158,8 +1155,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
tag += osCcxRmCap.Length;
}
- if( beacon->BssCcxVerNumber >= 2 )
- {
+ if (beacon->BssCcxVerNumber >= 2) {
u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
OCTET_STRING osCcxVerNum;
CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
@@ -1533,7 +1529,7 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
break;
if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies))
- ieee80211_softmac_new_net(ieee, target);
+ ieee80211_softmac_new_net(ieee, target);
}
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 82ea533a0cf4..644368df6342 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -374,7 +374,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
goto out;
}
- if ( ieee->state == IEEE80211_LINKED){
+ if (ieee->state == IEEE80211_LINKED) {
queue_work(ieee->wq, &ieee->wx_sync_scan_wq);
/* intentionally forget to up sem */
return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 57bef219687b..fca73c7c9fbe 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -529,8 +529,7 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
}
}
// For test , CTS replace with RTS
- if( 0 )
- {
+ if (0) {
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 68f5ede86633..ae1b3cf2866c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -172,7 +172,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+ start = iwe_stream_add_point(info, start, stop, &iwe, custom);
/* Add quality statistics */
/* TODO: Fix these values... */
iwe.cmd = IWEVQUAL;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 51552d42d66c..cd196cec0dd9 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -110,7 +110,7 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
struct sk_buff *skb = NULL;
struct ieee80211_hdr_3addr *BAReq = NULL;
u8 *tag = NULL;
- u16 tmp = 0;
+ __le16 tmp = 0;
u16 len = ieee->tx_headroom + 9;
//category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) + BA Timeout Value(2) + BA Start SeqCtrl(2)(or StatusCode(2))
IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA, "========>%s(), frame(%d) sentd to:%pM, ieee->dev:%p\n", __func__, type, Dst, ieee->dev);
@@ -196,7 +196,7 @@ static struct sk_buff *ieee80211_DELBA(
struct sk_buff *skb = NULL;
struct ieee80211_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
- u16 tmp = 0;
+ __le16 tmp = 0;
//len = head len + DELBA Parameter Set(2) + Reason Code(2)
u16 len = 6 + ieee->tx_headroom;
@@ -342,8 +342,7 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
PSEQUENCE_CONTROL pBaStartSeqCtrl = NULL;
PRX_TS_RECORD pTS = NULL;
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
- {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
IEEE80211_DEBUG(IEEE80211_DL_ERR,
" Invalid skb len in BAREQ(%d / %zu)\n",
skb->len,
@@ -444,8 +443,7 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
PBA_PARAM_SET pBaParamSet = NULL;
u16 ReasonCode;
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
- {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
IEEE80211_DEBUG(IEEE80211_DL_ERR,
" Invalid skb len in BARSP(%d / %zu)\n",
skb->len,
@@ -463,10 +461,9 @@ int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb)
// Check the capability
// Since we can always receive A-MPDU, we just check if it is under HT mode.
- if( ieee->current_network.qos_data.active == 0 ||
- ieee->pHTInfo->bCurrentHTSupport == false ||
- ieee->pHTInfo->bCurrentAMPDUEnable == false )
- {
+ if (ieee->current_network.qos_data.active == 0 ||
+ ieee->pHTInfo->bCurrentHTSupport == false ||
+ ieee->pHTInfo->bCurrentAMPDUEnable == false) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bCurrentAMPDUEnable);
ReasonCode = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
@@ -577,8 +574,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
u16 *pReasonCode = NULL;
u8 *dst = NULL;
- if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
- {
+ if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 6) {
IEEE80211_DEBUG(IEEE80211_DL_ERR,
" Invalid skb len in DELBA(%d / %zu)\n",
skb->len,
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index 1ea2cd392670..e60d926a3973 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -602,8 +602,7 @@ void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u
// TODO: Nedd to take care of this part
IEEE80211_DEBUG(IEEE80211_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
- if( IsEncrypt)
- {
+ if (IsEncrypt) {
pCapELE->MPDUDensity = 7; // 8us
pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
}
@@ -951,8 +950,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
- if( pHTInfo->bCurrentHTSupport == false )
- {
+ if (pHTInfo->bCurrentHTSupport == false) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n");
return;
}
@@ -1043,7 +1041,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily
if (ieee->current_network.bssht.bdRT2RTAggregation)
{
- if( ieee->pairwise_key_type != KEY_TYPE_NA)
+ if (ieee->pairwise_key_type != KEY_TYPE_NA)
// Realtek may set 32k in security mode and 64k for others
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
@@ -1332,8 +1330,7 @@ u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame)
{
if(ieee->pHTInfo->bCurrentHTSupport)
{
- if( (IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1)
- {
+ if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
IEEE80211_DEBUG(IEEE80211_DL_HT, "HT CONTROL FILED EXIST!!\n");
return true;
}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 929ac29197cc..e031a253e2ae 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1791,8 +1791,8 @@ static void rtl8192_link_change(struct net_device *dev)
}
static struct ieee80211_qos_parameters def_qos_parameters = {
- {3, 3, 3, 3},/* cw_min */
- {7, 7, 7, 7},/* cw_max */
+ {cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
+ {cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
{2, 2, 2, 2},/* aifs */
{0, 0, 0, 0},/* flags */
{0, 0, 0, 0} /* tx_op_limit */
@@ -1821,8 +1821,11 @@ static void rtl8192_qos_activate(struct work_struct *work)
struct net_device *dev = priv->ieee80211->dev;
struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
u8 mode = priv->ieee80211->current_network.mode;
- u8 u1bAIFS;
+ u32 u1bAIFS;
u32 u4bAcParam;
+ u32 op_limit;
+ u32 cw_max;
+ u32 cw_min;
int i;
mutex_lock(&priv->mutex);
@@ -1835,11 +1838,14 @@ static void rtl8192_qos_activate(struct work_struct *work)
for (i = 0; i < QOS_QUEUE_NUM; i++) {
//Mode G/A: slotTimeTimer = 9; Mode B: 20
u1bAIFS = qos_parameters->aifs[i] * ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
- u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[i]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(qos_parameters->cw_max[i]))<< AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(qos_parameters->cw_min[i]))<< AC_PARAM_ECW_MIN_OFFSET)|
- ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
-
+ u1bAIFS <<= AC_PARAM_AIFS_OFFSET;
+ op_limit = (u32)le16_to_cpu(qos_parameters->tx_op_limit[i]);
+ op_limit <<= AC_PARAM_TXOP_LIMIT_OFFSET;
+ cw_max = (u32)le16_to_cpu(qos_parameters->cw_max[i]);
+ cw_max <<= AC_PARAM_ECW_MAX_OFFSET;
+ cw_min = (u32)le16_to_cpu(qos_parameters->cw_min[i]);
+ cw_min <<= AC_PARAM_ECW_MIN_OFFSET;
+ u4bAcParam = op_limit | cw_max | cw_min | u1bAIFS;
write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
}
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index b0b66fba563b..936565d46014 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -1476,14 +1476,12 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
//Write 0xa24 ~ 0xa27
- TempVal = 0;
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
//Write 0xa28 0xa29
- TempVal = 0;
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
@@ -1496,14 +1494,12 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
//Write 0xa24 ~ 0xa27
- TempVal = 0;
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
//Write 0xa28 0xa29
- TempVal = 0;
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8) ;
@@ -1528,7 +1524,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
//Write 0xa24 ~ 0xa27
- TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+
@@ -1537,7 +1532,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
//Write 0xa28 0xa29
- TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ;
@@ -1556,7 +1550,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter1, TempVal);
//Write 0xa24 ~ 0xa27
- TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
(CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+
@@ -1565,7 +1558,6 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
rCCK0_TxFilter2, TempVal);
//Write 0xa28 0xa29
- TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ;
@@ -1810,85 +1802,6 @@ void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type,
}
} /* DM_ChangeDynamicInitGainThresh */
-void
-dm_change_rxpath_selection_setting(
- struct net_device *dev,
- s32 DM_Type,
- s32 DM_Value)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- prate_adaptive pRA = (prate_adaptive)&(priv->rate_adaptive);
-
-
- if(DM_Type == 0)
- {
- if(DM_Value > 1)
- DM_Value = 1;
- DM_RxPathSelTable.Enable = (u8)DM_Value;
- }
- else if(DM_Type == 1)
- {
- if(DM_Value > 1)
- DM_Value = 1;
- DM_RxPathSelTable.DbgMode = (u8)DM_Value;
- }
- else if(DM_Type == 2)
- {
- if(DM_Value > 40)
- DM_Value = 40;
- DM_RxPathSelTable.SS_TH_low = (u8)DM_Value;
- }
- else if(DM_Type == 3)
- {
- if(DM_Value > 25)
- DM_Value = 25;
- DM_RxPathSelTable.diff_TH = (u8)DM_Value;
- }
- else if(DM_Type == 4)
- {
- if(DM_Value >= CCK_Rx_Version_MAX)
- DM_Value = CCK_Rx_Version_1;
- DM_RxPathSelTable.cck_method= (u8)DM_Value;
- }
- else if(DM_Type == 10)
- {
- if(DM_Value > 100)
- DM_Value = 50;
- DM_RxPathSelTable.rf_rssi[0] = (u8)DM_Value;
- }
- else if(DM_Type == 11)
- {
- if(DM_Value > 100)
- DM_Value = 50;
- DM_RxPathSelTable.rf_rssi[1] = (u8)DM_Value;
- }
- else if(DM_Type == 12)
- {
- if(DM_Value > 100)
- DM_Value = 50;
- DM_RxPathSelTable.rf_rssi[2] = (u8)DM_Value;
- }
- else if(DM_Type == 13)
- {
- if(DM_Value > 100)
- DM_Value = 50;
- DM_RxPathSelTable.rf_rssi[3] = (u8)DM_Value;
- }
- else if(DM_Type == 20)
- {
- if(DM_Value > 1)
- DM_Value = 1;
- pRA->ping_rssi_enable = (u8)DM_Value;
- }
- else if(DM_Type == 21)
- {
- if(DM_Value > 30)
- DM_Value = 30;
- pRA->ping_rssi_thresh_for_ra = DM_Value;
- }
-}
-
-
/*-----------------------------------------------------------------------------
* Function: dm_dig_init()
*
@@ -3554,7 +3467,6 @@ static void dm_check_txrateandretrycount(struct net_device *dev)
static void dm_send_rssi_tofw(struct net_device *dev)
{
- DCMD_TXCMD_T tx_cmd;
struct r8192_priv *priv = ieee80211_priv(dev);
// If we test chariot, we should stop the TX command ?
@@ -3562,9 +3474,6 @@ static void dm_send_rssi_tofw(struct net_device *dev)
// 0x1e0(byte) to notify driver.
write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
return;
- tx_cmd.Op = TXCMD_SET_RX_RSSI;
- tx_cmd.Length = 4;
- tx_cmd.Value = priv->undecorated_smoothed_pwdb;
}
/*---------------------------Define function prototype------------------------*/
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 28f60d2dbe5b..361d2d0c3df1 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -171,7 +171,6 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
int *parms = (int *)extra;
int enable = (parms[0] > 0);
- short prev = priv->crcmon;
down(&priv->wx_sem);
@@ -183,11 +182,6 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
DMESG("bad CRC in monitor mode are %s",
priv->crcmon ? "accepted" : "rejected");
- if (prev != priv->crcmon && priv->up) {
- /* rtl8180_down(dev); */
- /* rtl8180_up(dev); */
- }
-
up(&priv->wx_sem);
return 0;
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index f66ad8a0dfe0..c230be290ab6 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -45,6 +45,7 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
unsigned char *seg_ptr;
cb_desc *tcb_desc;
u8 bLastIniPkt;
+ u8 index;
firmware_init_param(dev);
//Fragmentation might be required
@@ -78,18 +79,19 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
* Transform from little endian to big endian
* and pending zero
*/
- for (i=0; i < frag_length; i+=4) {
- *seg_ptr++ = ((i+0)<frag_length)?code_virtual_address[i+3]:0;
- *seg_ptr++ = ((i+1)<frag_length)?code_virtual_address[i+2]:0;
- *seg_ptr++ = ((i+2)<frag_length)?code_virtual_address[i+1]:0;
- *seg_ptr++ = ((i+3)<frag_length)?code_virtual_address[i+0]:0;
+ for (i = 0; i < frag_length; i += 4) {
+ *seg_ptr++ = ((i+0) < frag_length)?code_virtual_address[i+3] : 0;
+ *seg_ptr++ = ((i+1) < frag_length)?code_virtual_address[i+2] : 0;
+ *seg_ptr++ = ((i+2) < frag_length)?code_virtual_address[i+1] : 0;
+ *seg_ptr++ = ((i+3) < frag_length)?code_virtual_address[i+0] : 0;
}
- tcb_desc->txbuf_size= (u16)i;
+ tcb_desc->txbuf_size = (u16)i;
skb_put(skb, i);
- if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop)) {
+ index = tcb_desc->queue_index;
+ if (!priv->ieee80211->check_nic_enough_desc(dev, index) ||
+ (!skb_queue_empty(&priv->ieee80211->skb_waitQ[index])) ||
+ (priv->ieee80211->queue_stop)) {
RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
} else {
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.h b/drivers/staging/rtl8192u/r819xU_firmware.h
index c48c884aa1af..cfa222350a9a 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.h
+++ b/drivers/staging/rtl8192u/r819xU_firmware.h
@@ -12,15 +12,15 @@
#define GET_COMMAND_PACKET_FRAG_THRESHOLD(v) (4*(v/4) - 8 - USB_HWDESC_HEADER_LEN)
//#endif
-typedef enum _firmware_init_step{
+typedef enum _firmware_init_step {
FW_INIT_STEP0_BOOT = 0,
FW_INIT_STEP1_MAIN = 1,
FW_INIT_STEP2_DATA = 2,
-}firmware_init_step_e;
+} firmware_init_step_e;
-typedef enum _opt_rst_type{
+typedef enum _opt_rst_type {
OPT_SYSTEM_RESET = 0,
OPT_FIRMWARE_RESET = 1,
-}opt_rst_type_e;
+} opt_rst_type_e;
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index e9c15fe8ded5..058960251bac 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1463,6 +1463,7 @@ void rtl8192_SwChnl_WorkItem(struct net_device *dev)
u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
+
RT_TRACE(COMP_CH, "%s(), SwChnlInProgress: %d\n", __func__,
priv->SwChnlInProgress);
if (!priv->up)
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 1d6ade05fa18..324da34383ea 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -86,7 +86,7 @@ static u32 rtl871x_open_fw(struct _adapter *padapter, const u8 **ppmappedfw)
(int)padapter->fw->size);
return 0;
}
- *ppmappedfw = (u8 *)((*praw)->data);
+ *ppmappedfw = (*praw)->data;
return (*praw)->size;
}
@@ -136,15 +136,10 @@ static void update_fwhdr(struct fw_hdr *pfwhdr, const u8 *pmappedfw)
static u8 chk_fwhdr(struct fw_hdr *pfwhdr, u32 ulfilelength)
{
u32 fwhdrsz, fw_sz;
- u8 intf, rfconf;
/* check signature */
if ((pfwhdr->signature != 0x8712) && (pfwhdr->signature != 0x8192))
return _FAIL;
- /* check interface */
- intf = (u8)((pfwhdr->version&0x3000) >> 12);
- /* check rf_conf */
- rfconf = (u8)((pfwhdr->version&0xC000) >> 14);
/* check fw_priv_sze & sizeof(struct fw_priv) */
if (pfwhdr->fw_priv_sz != sizeof(struct fw_priv))
return _FAIL;
@@ -162,7 +157,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
sint i;
u8 tmp8, tmp8_a;
u16 tmp16;
- u32 maxlen = 0, tmp32; /* for compare usage */
+ u32 maxlen = 0; /* for compare usage */
uint dump_imem_sz, imem_sz, dump_emem_sz, emem_sz; /* max = 49152; */
struct fw_hdr fwhdr;
u32 ulfilelength; /* FW file size */
@@ -262,7 +257,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
if (tmp8_a != (tmp8|BIT(2)))
goto exit_fail;
- tmp32 = r8712_read32(padapter, TCR);
+ r8712_read32(padapter, TCR);
/* 4.polling IMEM Ready */
i = 100;
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index fe9459e483c5..57868085ce58 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -124,11 +124,10 @@ u8 *r8712_get_ie(u8 *pbuf, sint index, sint *len, sint limit)
if (*p == index) {
*len = *(p + 1);
return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
}
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
if (i >= limit)
break;
}
@@ -237,10 +236,9 @@ unsigned char *r8712_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
goto check_next_ie;
*wpa_ie_len = *(pbuf + 1);
return pbuf;
- } else {
- *wpa_ie_len = 0;
- return NULL;
}
+ *wpa_ie_len = 0;
+ return NULL;
check_next_ie:
limit = limit - (pbuf - pie) - 2 - len;
if (limit <= 0)
@@ -370,13 +368,12 @@ int r8712_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher,
int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
u8 *wpa_ie, u16 *wpa_len)
{
- u8 authmode, sec_idx;
+ u8 authmode;
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint cnt;
/*Search required WPA or WPA2 IE and copy to sec_ie[ ]*/
cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
- sec_idx = 0;
while (cnt < in_len) {
authmode = in_ie[cnt];
if ((authmode == _WPA_IE_ID_) &&
@@ -414,7 +411,7 @@ int r8712_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
cnt += in_ie[cnt+1]+2;
match = true;
break;
- } else
+ }
cnt += in_ie[cnt+1]+2; /* goto next */
}
return match;
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index d7a357b8d2d1..5153ad9c2c75 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -88,17 +88,13 @@ static inline u32 _down_sema(struct semaphore *sema)
{
if (down_interruptible(sema))
return _FAIL;
- else
- return _SUCCESS;
+ return _SUCCESS;
}
static inline u32 end_of_queue_search(struct list_head *head,
struct list_head *plist)
{
- if (head == plist)
- return true;
- else
- return false;
+ return (head == plist);
}
static inline void sleep_schedulable(int ms)
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 495ee1205e02..0631f3638257 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -146,7 +146,7 @@ void r8712_os_read_port(struct _adapter *padapter, struct recv_buf *precvbuf)
dev_kfree_skb_any(precvbuf->pskb);
precvbuf->pskb = NULL;
precvbuf->reuse = false;
- if (precvbuf->irp_pending == false)
+ if (!precvbuf->irp_pending)
r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
(unsigned char *)precvbuf);
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 720e8a15db96..62e53cc1d8b9 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -157,10 +157,8 @@ static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
u32 val;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct readBB_parm *prdbbparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- prdbbparm = (struct readBB_parm *)pcmd->parmbuf;
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
@@ -174,10 +172,8 @@ static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 write_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct writeBB_parm *pwritebbparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- pwritebbparm = (struct writeBB_parm *)pcmd->parmbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
@@ -190,10 +186,8 @@ static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
u32 val;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct readRF_parm *prdrfparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- prdrfparm = (struct readRF_parm *)pcmd->parmbuf;
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
@@ -207,10 +201,8 @@ static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct writeRF_parm *pwriterfparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- pwriterfparm = (struct writeRF_parm *)pcmd->parmbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
@@ -222,9 +214,7 @@ static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 sys_suspend_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- struct usb_suspend_parm *psetusbsuspend;
- psetusbsuspend = (struct usb_suspend_parm *)pcmd->parmbuf;
r8712_free_cmd_obj(pcmd);
return H2C_SUCCESS;
}
@@ -320,7 +310,7 @@ void r8712_fw_cmd_data(struct _adapter *pAdapter, u32 *value, u8 flag)
int r8712_cmd_thread(void *context)
{
struct cmd_obj *pcmd;
- unsigned int cmdsz, wr_sz, *pcmdbuf, *prspbuf;
+ unsigned int cmdsz, wr_sz, *pcmdbuf;
struct tx_desc *pdesc;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct _adapter *padapter = (struct _adapter *)context;
@@ -342,7 +332,6 @@ _next:
continue;
}
pcmdbuf = (unsigned int *)pcmdpriv->cmd_buf;
- prspbuf = (unsigned int *)pcmdpriv->rsp_buf;
pdesc = (struct tx_desc *)pcmdbuf;
memset(pdesc, 0, TXDESC_SIZE);
pcmd = cmd_hdl_filter(padapter, pcmd);
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index c9eeb4270ab9..d95716999498 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -219,13 +219,12 @@ u16 r8712_efuse_get_current_size(struct _adapter *padapter)
{
int bContinual = true;
u16 efuse_addr = 0;
- u8 hoffset = 0, hworden = 0;
+ u8 hworden = 0;
u8 efuse_data, word_cnts = 0;
while (bContinual && efuse_one_byte_read(padapter, efuse_addr,
&efuse_data) && (efuse_addr < efuse_available_max_size)) {
if (efuse_data != 0xFF) {
- hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
word_cnts = calculate_word_cnts(hworden);
/* read next header */
@@ -414,19 +413,18 @@ u8 r8712_efuse_pg_packet_write(struct _adapter *padapter, const u8 offset,
bResult = false;
}
break;
- } else { /* write header fail */
- bResult = false;
- if (0xFF == efuse_data)
- return bResult; /* nothing damaged. */
- /* call rescue procedure */
- if (fix_header(padapter, efuse_data, efuse_addr) ==
- false)
- return false; /* rescue fail */
-
- if (++repeat_times > _REPEAT_THRESHOLD_) /* fail */
- break;
- /* otherwise, take another risk... */
}
+ /* write header fail */
+ bResult = false;
+ if (0xFF == efuse_data)
+ return bResult; /* nothing damaged. */
+ /* call rescue procedure */
+ if (!fix_header(padapter, efuse_data, efuse_addr))
+ return false; /* rescue fail */
+
+ if (++repeat_times > _REPEAT_THRESHOLD_) /* fail */
+ break;
+ /* otherwise, take another risk... */
}
return bResult;
}
@@ -541,15 +539,16 @@ u8 r8712_efuse_map_write(struct _adapter *padapter, u16 addr, u16 cnts,
}
idx++;
break;
- } else {
- if ((data[idx] != pktdata[i]) || (data[idx+1] !=
- pktdata[i+1])) {
- word_en &= ~BIT(i >> 1);
- newdata[j++] = data[idx];
- newdata[j++] = data[idx + 1];
- }
- idx += 2;
}
+
+ if ((data[idx] != pktdata[i]) || (data[idx+1] !=
+ pktdata[i+1])) {
+ word_en &= ~BIT(i >> 1);
+ newdata[j++] = data[idx];
+ newdata[j++] = data[idx + 1];
+ }
+ idx += 2;
+
if (idx == cnts)
break;
}
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index b27806209268..cd8b444b255b 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -158,8 +158,6 @@ int r8712_free_recvframe(union recv_frame *precvframe,
static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
struct recv_stat *prxstat)
{
- u32 *pphy_info;
- struct phy_stat *pphy_stat;
u16 drvinfo_sz = 0;
drvinfo_sz = (le32_to_cpu(prxstat->rxdw0)&0x000f0000)>>16;
@@ -189,10 +187,6 @@ static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
/*Offset 16*/
/*Offset 20*/
/*phy_info*/
- if (drvinfo_sz) {
- pphy_stat = (struct phy_stat *)(prxstat+1);
- pphy_info = (u32 *)prxstat+1;
- }
}
/*perform defrag*/
@@ -200,7 +194,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
- u8 *data, wlanhdr_offset;
+ u8 wlanhdr_offset;
u8 curfragnum;
struct recv_frame_hdr *pfhdr, *pnfhdr;
union recv_frame *prframe, *pnextrframe;
@@ -223,7 +217,6 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
curfragnum++;
plist = &defrag_q->queue;
plist = plist->next;
- data = get_recvframe_data(prframe);
while (end_of_queue_search(phead, plist) == false) {
pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
pnfhdr = &pnextrframe->u.hdr;
@@ -436,13 +429,11 @@ void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
{
uint voffset;
u8 *poffset;
- u16 pkt_len, cmd_len, drvinfo_sz;
- u8 eid, cmd_seq;
+ u16 cmd_len, drvinfo_sz;
struct recv_stat *prxstat;
poffset = (u8 *)prxcmdbuf;
voffset = *(uint *)poffset;
- pkt_len = le32_to_cpu(voffset) & 0x00003fff;
prxstat = (struct recv_stat *)prxcmdbuf;
drvinfo_sz = ((le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16);
drvinfo_sz = drvinfo_sz << 3;
@@ -450,8 +441,6 @@ void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
do {
voffset = *(uint *)poffset;
cmd_len = (u16)(le32_to_cpu(voffset) & 0xffff);
- cmd_seq = (u8)((le32_to_cpu(voffset) >> 24) & 0x7f);
- eid = (u8)((le32_to_cpu(voffset) >> 16) & 0xff);
r8712_event_handle(padapter, (uint *)poffset);
poffset += (cmd_len + 8);/*8 bytes alignment*/
} while (le32_to_cpu(voffset) & BIT(31));
@@ -533,11 +522,10 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
if (bforced == true) {
if (list_empty(phead))
return true;
- else {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
- pattrib = &prframe->u.hdr.attrib;
- preorder_ctrl->indicate_seq = pattrib->seq_num;
- }
+
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pattrib = &prframe->u.hdr.attrib;
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication.
* Check if there is any packet need indicate. */
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index d9c1561e3272..fe5e315319f7 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -205,12 +205,12 @@ void r8712_free_cmd_obj(struct cmd_obj *pcmd)
{
if ((pcmd->cmdcode != _JoinBss_CMD_) &&
(pcmd->cmdcode != _CreateBss_CMD_))
- kfree((unsigned char *)pcmd->parmbuf);
+ kfree(pcmd->parmbuf);
if (pcmd->rsp != NULL) {
if (pcmd->rspsz != 0)
- kfree((unsigned char *)pcmd->rsp);
+ kfree(pcmd->rsp);
}
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
}
/*
@@ -232,7 +232,7 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
return _FAIL;
psurveyPara = kmalloc(sizeof(*psurveyPara), GFP_ATOMIC);
if (psurveyPara == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara,
@@ -264,7 +264,7 @@ u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset)
return _FAIL;
pbsetdataratepara = kmalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
if (pbsetdataratepara == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara,
@@ -286,7 +286,7 @@ u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
return _FAIL;
psetchplanpara = kmalloc(sizeof(*psetchplanpara), GFP_ATOMIC);
if (psetchplanpara == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetchplanpara,
@@ -307,7 +307,7 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
return _FAIL;
pssetbasicratepara = kmalloc(sizeof(*pssetbasicratepara), GFP_ATOMIC);
if (pssetbasicratepara == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pssetbasicratepara,
@@ -329,7 +329,7 @@ u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
if (pwriteptmparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetPT));
@@ -349,7 +349,7 @@ u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
if (pwriteptmparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetDIG));
@@ -369,7 +369,7 @@ u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
if (pwriteptmparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetRA));
@@ -389,7 +389,7 @@ u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val)
return _FAIL;
pwriterfparm = kmalloc(sizeof(*pwriterfparm), GFP_ATOMIC);
if (pwriterfparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, pwriterfparm, GEN_CMD_CODE(_SetRFReg));
@@ -410,7 +410,7 @@ u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
return _FAIL;
prdrfparm = kmalloc(sizeof(*prdrfparm), GFP_ATOMIC);
if (prdrfparm == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
INIT_LIST_HEAD(&ph2c->list);
@@ -470,7 +470,6 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
{
- u8 *auth;
uint t_len = 0;
struct ndis_wlan_bssid_ex *psecnetwork;
struct cmd_obj *pcmd;
@@ -517,7 +516,6 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
return _FAIL;
}
memcpy(psecnetwork, &pnetwork->network, t_len);
- auth = &psecuritypriv->authenticator_ie[0];
psecuritypriv->authenticator_ie[0] = (unsigned char)
psecnetwork->IELength;
if ((psecnetwork->IELength-12) < (256 - 1))
@@ -527,7 +525,7 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
memcpy(&psecuritypriv->authenticator_ie[1],
&psecnetwork->IEs[12], (256-1));
psecnetwork->IELength = 0;
- /* If the the driver wants to use the bssid to create the connection.
+ /* If the driver wants to use the bssid to create the connection.
* If not, we copy the connecting AP's MAC address to it so that
* the driver just has the bssid information for PMKIDList searching.
*/
@@ -626,7 +624,7 @@ u8 r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */
return _FAIL;
pdisconnect = kmalloc(sizeof(*pdisconnect), GFP_ATOMIC);
if (pdisconnect == NULL) {
- kfree((u8 *)pdisconnect_cmd);
+ kfree(pdisconnect_cmd);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(pdisconnect_cmd, pdisconnect,
@@ -648,7 +646,7 @@ u8 r8712_setopmode_cmd(struct _adapter *padapter,
return _FAIL;
psetop = kmalloc(sizeof(*psetop), GFP_ATOMIC);
if (psetop == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
@@ -672,13 +670,13 @@ u8 r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
return _FAIL;
psetstakey_para = kmalloc(sizeof(*psetstakey_para), GFP_ATOMIC);
if (psetstakey_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
psetstakey_rsp = kmalloc(sizeof(*psetstakey_rsp), GFP_ATOMIC);
if (psetstakey_rsp == NULL) {
- kfree((u8 *) ph2c);
- kfree((u8 *) psetstakey_para);
+ kfree(ph2c);
+ kfree(psetstakey_para);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
@@ -712,7 +710,7 @@ u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode)
return _FAIL;
psetrfintfsparm = kmalloc(sizeof(*psetrfintfsparm), GFP_ATOMIC);
if (psetrfintfsparm == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetrfintfsparm,
@@ -734,7 +732,7 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter,
return _FAIL;
psetrttblparm = kmalloc(sizeof(*psetrttblparm), GFP_ATOMIC);
if (psetrttblparm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm,
@@ -755,7 +753,7 @@ u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
return _FAIL;
prdtssiparm = kmalloc(sizeof(*prdtssiparm), GFP_ATOMIC);
if (prdtssiparm == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
INIT_LIST_HEAD(&ph2c->list);
@@ -781,7 +779,7 @@ u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
return _FAIL;
psetMacAddr_para = kmalloc(sizeof(*psetMacAddr_para), GFP_ATOMIC);
if (psetMacAddr_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetMacAddr_para,
@@ -803,13 +801,13 @@ u8 r8712_setassocsta_cmd(struct _adapter *padapter, u8 *mac_addr)
return _FAIL;
psetassocsta_para = kmalloc(sizeof(*psetassocsta_para), GFP_ATOMIC);
if (psetassocsta_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
psetassocsta_rsp = kmalloc(sizeof(*psetassocsta_rsp), GFP_ATOMIC);
if (psetassocsta_rsp == NULL) {
- kfree((u8 *)ph2c);
- kfree((u8 *)psetassocsta_para);
+ kfree(ph2c);
+ kfree(psetassocsta_para);
return _FAIL;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetassocsta_para, _SetAssocSta_CMD_);
@@ -831,7 +829,7 @@ u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid)
return _FAIL;
paddbareq_parm = kmalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
if (paddbareq_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
return _FAIL;
}
paddbareq_parm->tid = tid;
@@ -852,7 +850,7 @@ u8 r8712_wdg_wk_cmd(struct _adapter *padapter)
return _FAIL;
pdrvintcmd_param = kmalloc(sizeof(*pdrvintcmd_param), GFP_ATOMIC);
if (pdrvintcmd_param == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
return _FAIL;
}
pdrvintcmd_param->i_cid = WDG_WK_CID;
@@ -1026,7 +1024,7 @@ u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
return _FAIL;
param = kzalloc(sizeof(*param), GFP_ATOMIC);
if (param == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index d7b63aedead7..e4e5b13cb927 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -93,7 +93,7 @@ static uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl)
pintfhdl->intf_option = 0;
pintfhdl->adapter = dev;
pintfhdl->intf_dev = (u8 *)&(adapter->dvobjpriv);
- if (_init_intf_hdl(adapter, pintfhdl) == false)
+ if (!_init_intf_hdl(adapter, pintfhdl))
goto register_intf_hdl_fail;
return _SUCCESS;
register_intf_hdl_fail:
@@ -142,7 +142,7 @@ uint r8712_alloc_io_queue(struct _adapter *adapter)
alloc_io_queue_fail:
if (pio_queue) {
kfree(pio_queue->pallocated_free_ioreqs_buf);
- kfree((u8 *)pio_queue);
+ kfree(pio_queue);
}
adapter->pio_queue = NULL;
return _FAIL;
@@ -156,6 +156,6 @@ void r8712_free_io_queue(struct _adapter *adapter)
kfree(pio_queue->pallocated_free_ioreqs_buf);
adapter->pio_queue = NULL;
unregister_intf_hdl(&pio_queue->intf);
- kfree((u8 *)pio_queue);
+ kfree(pio_queue);
}
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 8e42ce06e5d7..73b7d864ccbd 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -170,7 +170,7 @@ static inline char *translate_scan(struct _adapter *padapter,
s8 *p;
u32 i = 0, ht_ielen = 0;
u16 cap, ht_cap = false, mcs_rate;
- u8 rssi, bw_40MHz = 0, short_GI = 0;
+ u8 rssi;
if ((pnetwork->network.Configuration.DSConfig < 1) ||
(pnetwork->network.Configuration.DSConfig > 14)) {
@@ -197,10 +197,6 @@ static inline char *translate_scan(struct _adapter *padapter,
ht_cap = true;
pht_capie = (struct ieee80211_ht_cap *)(p + 2);
memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2);
- bw_40MHz = (pht_capie->cap_info&IEEE80211_HT_CAP_SUP_WIDTH)
- ? 1 : 0;
- short_GI = (pht_capie->cap_info&(IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
@@ -287,12 +283,10 @@ static inline char *translate_scan(struct _adapter *padapter,
u8 wpa_ie[255], rsn_ie[255];
u16 wpa_len = 0, rsn_len = 0;
int n;
- sint out_len = 0;
- out_len = r8712_get_sec_ie(pnetwork->network.IEs,
- pnetwork->network.
- IELength, rsn_ie, &rsn_len,
- wpa_ie, &wpa_len);
+ r8712_get_sec_ie(pnetwork->network.IEs,
+ pnetwork->network.IELength, rsn_ie, &rsn_len,
+ wpa_ie, &wpa_len);
if (wpa_len > 0) {
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "wpa_ie=");
@@ -505,14 +499,14 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
}
}
exit:
- kfree((u8 *)pwep);
+ kfree(pwep);
return ret;
}
static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
unsigned short ielen)
{
- u8 *buf = NULL, *pos = NULL;
+ u8 *buf = NULL;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
@@ -522,7 +516,6 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
buf = kmemdup(pie, ielen, GFP_ATOMIC);
if (buf == NULL)
return -ENOMEM;
- pos = buf;
if (ielen < RSN_HEADER_LEN) {
ret = -EINVAL;
goto exit;
@@ -1133,13 +1126,11 @@ static int r871x_wx_set_mlme(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
- u16 reason;
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *) extra;
if (mlme == NULL)
return -1;
- reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
if (!r8712_set_802_11_disassociate(padapter))
@@ -2216,7 +2207,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
}
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
ret = -EFAULT;
- kfree((u8 *)param);
+ kfree(param);
return ret;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 9d47eb472837..6318a0e65e6e 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -77,7 +77,7 @@ static u8 do_join(struct _adapter *padapter)
/* when set_ssid/set_bssid for do_join(), but scanning queue
* is empty we try to issue sitesurvey firstly
*/
- if (pmlmepriv->sitesurveyctrl.traffic_busy == false)
+ if (!pmlmepriv->sitesurveyctrl.traffic_busy)
r8712_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid);
return true;
} else {
@@ -143,8 +143,7 @@ u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid)
_FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid,
ETH_ALEN)) {
- if (check_fwstate(pmlmepriv,
- WIFI_STATION_STATE) == false)
+ if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
goto _Abort_Set_BSSID; /* driver is in
* WIFI_ADHOC_MASTER_STATE */
} else {
@@ -177,7 +176,7 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
- if (padapter->hw_init_completed == false)
+ if (!padapter->hw_init_completed)
return;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) {
@@ -188,10 +187,9 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) &&
(!memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid,
ssid->SsidLength))) {
- if ((check_fwstate(pmlmepriv,
- WIFI_STATION_STATE) == false)) {
- if (r8712_is_same_ibss(padapter,
- pnetwork) == false) {
+ if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ if (!r8712_is_same_ibss(padapter,
+ pnetwork)) {
/* if in WIFI_ADHOC_MASTER_STATE or
* WIFI_ADHOC_STATE, create bss or
* rejoin again
@@ -227,7 +225,7 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
}
if (padapter->securitypriv.btkip_countermeasure == true)
goto _Abort_Set_SSID;
- if (validate_ssid(ssid) == false)
+ if (!validate_ssid(ssid))
goto _Abort_Set_SSID;
memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
@@ -308,10 +306,10 @@ u8 r8712_set_802_11_bssid_list_scan(struct _adapter *padapter)
unsigned long irqL;
u8 ret = true;
- if (padapter == NULL)
+ if (!padapter)
return false;
pmlmepriv = &padapter->mlmepriv;
- if (padapter->hw_init_completed == false)
+ if (!padapter->hw_init_completed)
return false;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) ||
@@ -345,13 +343,9 @@ u8 r8712_set_802_11_authentication_mode(struct _adapter *padapter,
u8 r8712_set_802_11_add_wep(struct _adapter *padapter,
struct NDIS_802_11_WEP *wep)
{
- u8 bdefaultkey;
- u8 btransmitkey;
sint keyid;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- bdefaultkey = (wep->KeyIndex & 0x40000000) > 0 ? false : true;
- btransmitkey = (wep->KeyIndex & 0x80000000) > 0 ? true : false;
keyid = wep->KeyIndex & 0x3fffffff;
if (keyid >= WEP_KEYS)
return false;
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 00f2e0fc4ac5..b7462e8145d6 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -923,7 +923,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
ignore_joinbss_callback:
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
if (sizeof(struct list_head) == 4 * sizeof(u32))
- kfree((u8 *)pnetwork);
+ kfree(pnetwork);
}
void r8712_stassoc_event_callback(struct _adapter *adapter, u8 *pbuf)
@@ -1218,7 +1218,7 @@ sint r8712_set_auth(struct _adapter *adapter,
psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC);
if (psetauthparm == NULL) {
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
return _FAIL;
}
psetauthparm->mode = (u8)psecuritypriv->AuthAlgrthm;
@@ -1372,7 +1372,7 @@ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
u8 *out_ie, uint in_len)
{
- u8 authmode = 0, securitytype, match;
+ u8 authmode = 0, match;
u8 sec_ie[255], uncst_oui[4], bkup_ie[255];
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint ielength, cnt, remove_cnt;
@@ -1399,21 +1399,17 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
switch (ndissecuritytype) {
case Ndis802_11Encryption1Enabled:
case Ndis802_11Encryption1KeyAbsent:
- securitytype = _WEP40_;
uncst_oui[3] = 0x1;
break;
case Ndis802_11Encryption2Enabled:
case Ndis802_11Encryption2KeyAbsent:
- securitytype = _TKIP_;
uncst_oui[3] = 0x2;
break;
case Ndis802_11Encryption3Enabled:
case Ndis802_11Encryption3KeyAbsent:
- securitytype = _AES_;
uncst_oui[3] = 0x4;
break;
default:
- securitytype = _NO_PRIVACY_;
break;
}
/*Search required WPA or WPA2 IE and copy to sec_ie[] */
@@ -1705,7 +1701,7 @@ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
u8 *out_ie, uint in_len, uint *pout_len)
{
u32 ielen, out_len;
- unsigned char *p, *pframe;
+ unsigned char *p;
struct ieee80211_ht_cap ht_capie;
unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1717,10 +1713,8 @@ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
if (p && (ielen > 0)) {
if (pqospriv->qos_option == 0) {
out_len = *pout_len;
- pframe = r8712_set_ie(out_ie+out_len,
- _VENDOR_SPECIFIC_IE_,
- _WMM_IE_Length_,
- WMM_IE, pout_len);
+ r8712_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_,
+ _WMM_IE_Length_, WMM_IE, pout_len);
pqospriv->qos_option = 1;
}
out_len = *pout_len;
@@ -1733,9 +1727,9 @@ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie,
IEEE80211_HT_CAP_DSSSCCK40;
ht_capie.ampdu_params_info = (IEEE80211_HT_CAP_AMPDU_FACTOR &
0x03) | (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00);
- pframe = r8712_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
- sizeof(struct ieee80211_ht_cap),
- (unsigned char *)&ht_capie, pout_len);
+ r8712_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
+ sizeof(struct ieee80211_ht_cap),
+ (unsigned char *)&ht_capie, pout_len);
phtpriv->ht_option = 1;
}
return phtpriv->ht_option;
@@ -1748,7 +1742,6 @@ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len)
int i, len;
struct sta_info *bmc_sta, *psta;
struct ieee80211_ht_cap *pht_capie;
- struct ieee80211_ht_addt_info *pht_addtinfo;
struct recv_reorder_ctrl *preorder_ctrl;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
@@ -1801,8 +1794,6 @@ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len)
p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs),
_HT_ADD_INFO_IE_, &len,
ie_len-sizeof(struct NDIS_802_11_FIXED_IEs));
- if (p && len > 0)
- pht_addtinfo = (struct ieee80211_ht_addt_info *)(p + 2);
}
void r8712_issue_addbareq_cmd(struct _adapter *padapter, int priority)
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 2ec660ad78af..3d913b9701bb 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -56,7 +56,7 @@ static int init_mp_priv(struct mp_priv *pmp_priv)
pmp_priv->pallocated_mp_xmitframe_buf = kmalloc(NR_MP_XMITFRAME *
sizeof(struct mp_xmit_frame) + 4,
GFP_ATOMIC);
- if (pmp_priv->pallocated_mp_xmitframe_buf == NULL) {
+ if (!pmp_priv->pallocated_mp_xmitframe_buf) {
res = _FAIL;
goto _exit_init_mp_priv;
}
@@ -173,7 +173,7 @@ u8 r8712_bb_reg_write(struct _adapter *pAdapter, u16 offset, u32 value)
oldValue = r8712_bb_reg_read(pAdapter, iocmd.value);
oldValue &= (0xFFFFFFFF >> ((4 - shift) * 8));
value = oldValue | (newValue << (shift * 8));
- if (fw_iocmd_write(pAdapter, iocmd, value) == false)
+ if (!fw_iocmd_write(pAdapter, iocmd, value))
return false;
iocmd.value += 4;
oldValue = r8712_bb_reg_read(pAdapter, iocmd.value);
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index 9827ff8143b2..a16f15e91992 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -1431,11 +1431,8 @@ unsigned int mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv)
/*-------------------------------------------------------------------------*/
uint oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv)
{
- u8 bpwrup;
-
if (poid_par_priv->type_of_oid != SET_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
- bpwrup = *(u8 *)poid_par_priv->information_buf;
/*CALL the power_down function*/
return RNDIS_STATUS_SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 51dcf5555b62..ed2844d2b02a 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -156,11 +156,9 @@ static void rpwm_workitem_callback(struct work_struct *work)
struct pwrctrl_priv, rpwm_workitem);
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
- u8 cpwm = pwrpriv->cpwm;
-
if (pwrpriv->cpwm != pwrpriv->rpwm) {
_enter_pwrlock(&pwrpriv->lock);
- cpwm = r8712_read8(padapter, SDIO_HCPWM);
+ r8712_read8(padapter, SDIO_HCPWM);
pwrpriv->rpwm_retry = 1;
r8712_set_rpwm(padapter, pwrpriv->rpwm);
up(&pwrpriv->lock);
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 9b99a71670f3..06f15f81c4d8 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -601,7 +601,7 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
{
/*remove the wlanhdr and add the eth_hdr*/
sint rmv_len;
- u16 eth_type, len;
+ u16 len;
u8 bsnaphdr;
u8 *psnap_type;
struct ieee80211_snap_hdr *psnap;
@@ -635,7 +635,6 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
ptr += rmv_len;
*ptr = 0x87;
*(ptr+1) = 0x12;
- eth_type = 0x8712;
/* append rx status for mp test packets */
ptr = recvframe_pull(precvframe, (rmv_len -
sizeof(struct ethhdr) + 2) - 24);
@@ -658,27 +657,11 @@ s32 r8712_recv_entry(union recv_frame *precvframe)
{
struct _adapter *padapter;
struct recv_priv *precvpriv;
- struct mlme_priv *pmlmepriv;
- struct recv_stat *prxstat;
- struct dvobj_priv *pdev;
- u8 *phead, *pdata, *ptail, *pend;
- struct __queue *pfree_recv_queue, *ppending_recv_queue;
s32 ret = _SUCCESS;
- struct intf_hdl *pintfhdl;
padapter = precvframe->u.hdr.adapter;
- pintfhdl = &padapter->pio_queue->intf;
- pmlmepriv = &padapter->mlmepriv;
precvpriv = &(padapter->recvpriv);
- pdev = &padapter->dvobjpriv;
- pfree_recv_queue = &(precvpriv->free_recv_queue);
- ppending_recv_queue = &(precvpriv->recv_pending_queue);
- phead = precvframe->u.hdr.rx_head;
- pdata = precvframe->u.hdr.rx_data;
- ptail = precvframe->u.hdr.rx_tail;
- pend = precvframe->u.hdr.rx_end;
- prxstat = (struct recv_stat *)phead;
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_RX);
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index 92ca8997e5bc..77487bb9d3c0 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -170,11 +170,8 @@ static inline u8 *recvframe_put(union recv_frame *precvframe, sint sz)
/* used for append sz bytes from ptr to rx_tail, update rx_tail and
* return the updated rx_tail to the caller
* after putting, rx_tail must be still larger than rx_end. */
- unsigned char *prev_rx_tail;
-
if (precvframe == NULL)
return NULL;
- prev_rx_tail = precvframe->u.hdr.rx_tail;
precvframe->u.hdr.rx_tail += sz;
if (precvframe->u.hdr.rx_tail > precvframe->u.hdr.rx_end) {
precvframe->u.hdr.rx_tail -= sz;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 8faf22bb7c90..c653ad6854b4 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -578,7 +578,7 @@ u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe)
u8 ttkey[16];
u8 crc[4];
struct arc4context mycontext;
- u32 curfragnum, length, prwskeylen;
+ u32 curfragnum, length;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 txpn;
@@ -600,7 +600,6 @@ u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe)
&pattrib->ra[0]);
if (stainfo != NULL) {
prwskey = &stainfo->x_UncstKey.skey[0];
- prwskeylen = 16;
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
iv = pframe + pattrib->hdrlen;
@@ -655,7 +654,7 @@ u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
u8 ttkey[16];
u8 crc[4];
struct arc4context mycontext;
- u32 length, prwskeylen;
+ u32 length;
u8 *pframe, *payload, *iv, *prwskey, idx = 0;
union pn48 txpn;
struct sta_info *stainfo;
@@ -683,7 +682,6 @@ u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
return _FAIL;
} else
prwskey = &stainfo->x_UncstKey.skey[0];
- prwskeylen = 16;
GET_TKIP_PN(iv, txpn);
pnl = (u16)(txpn.val);
pnh = (u32)(txpn.val >> 16);
@@ -1154,7 +1152,6 @@ u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
{ /* exclude ICV */
/* Intermediate Buffers */
sint curfragnum, length;
- u32 prwskeylen;
u8 *pframe, *prwskey;
struct sta_info *stainfo;
struct pkt_attrib *pattrib = &((struct xmit_frame *)
@@ -1174,7 +1171,6 @@ u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
&pattrib->ra[0]);
if (stainfo != NULL) {
prwskey = &stainfo->x_UncstKey.skey[0];
- prwskeylen = 16;
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
if ((curfragnum + 1) == pattrib->nr_frags) {
@@ -1363,7 +1359,6 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
{ /* exclude ICV */
/* Intermediate Buffers */
sint length;
- u32 prwskeylen;
u8 *pframe, *prwskey, *iv, idx;
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)
@@ -1387,7 +1382,6 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
} else
prwskey = &stainfo->x_UncstKey.skey[0];
- prwskeylen = 16;
length = ((union recv_frame *)precvframe)->
u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
aes_decipher(prwskey, prxattrib->hdrlen, pframe,
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index e769bb5c5fb8..4c9b98e8210e 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -79,13 +79,11 @@ static void mfree_all_stainfo(struct sta_priv *pstapriv)
{
unsigned long irqL;
struct list_head *plist, *phead;
- struct sta_info *psta = NULL;
spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL);
phead = &pstapriv->free_sta_queue.queue;
plist = phead->next;
while ((end_of_queue_search(phead, plist)) == false) {
- psta = LIST_CONTAINOR(plist, struct sta_info, list);
plist = plist->next;
}
@@ -109,7 +107,6 @@ u32 _r8712_free_sta_priv(struct sta_priv *pstapriv)
struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- uint tmp_aid;
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
@@ -127,7 +124,6 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
psta = LIST_CONTAINOR(pfree_sta_queue->queue.next,
struct sta_info, list);
list_del_init(&(psta->list));
- tmp_aid = psta->aid;
_init_stainfo(psta);
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
index = wifi_mac_hash(hwaddr);
@@ -267,15 +263,10 @@ struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
void r8712_init_bcmc_stainfo(struct _adapter *padapter)
{
- struct sta_info *psta;
- struct tx_servq *ptxservq;
unsigned char bcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
- psta = r8712_alloc_stainfo(pstapriv, bcast_addr);
- if (psta == NULL)
- return;
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ r8712_alloc_stainfo(pstapriv, bcast_addr);
}
struct sta_info *r8712_get_bcmc_stainfo(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index f49acaf04076..62a377e7fdc7 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -184,7 +184,6 @@ void _free_xmit_priv(struct xmit_priv *pxmitpriv)
sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
struct pkt_attrib *pattrib)
{
- uint i;
struct pkt_file pktfile;
struct sta_info *psta = NULL;
struct ethhdr etherhdr;
@@ -199,7 +198,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
_r8712_open_pktfile(pkt, &pktfile);
- i = _r8712_pktfile_read(&pktfile, (unsigned char *)&etherhdr, ETH_HLEN);
+ _r8712_pktfile_read(&pktfile, (unsigned char *)&etherhdr, ETH_HLEN);
pattrib->ether_type = ntohs(etherhdr.h_proto);
@@ -236,7 +235,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
/* for mp storing the txcmd per packet,
* according to the info of txcmd to update pattrib */
/*get MP_TXDESC_SIZE bytes txcmd per packet*/
- i = _r8712_pktfile_read(&pktfile, (u8 *)&txdesc, TXDESC_SIZE);
+ _r8712_pktfile_read(&pktfile, (u8 *)&txdesc, TXDESC_SIZE);
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
pattrib->pctrl = 1;
@@ -347,7 +346,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
static sint xmitframe_addmic(struct _adapter *padapter,
struct xmit_frame *pxmitframe)
{
- u32 curfragnum, length, datalen;
+ u32 curfragnum, length;
u8 *pframe, *payload, mic[8];
struct mic_data micdata;
struct sta_info *stainfo;
@@ -369,7 +368,6 @@ static sint xmitframe_addmic(struct _adapter *padapter,
u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0};
- datalen = pattrib->pktlen - pattrib->hdrlen;
pframe = pxmitframe->buf_addr + TXDESC_OFFSET;
if (bmcst) {
if (!memcmp(psecuritypriv->XGrptxmickey
@@ -486,7 +484,7 @@ static sint make_wlanhdr(struct _adapter *padapter , u8 *hdr,
memset(hdr, 0, WLANHDR_OFFSET);
SetFrameSubType(fctrl, pattrib->subtype);
if (pattrib->subtype & WIFI_DATA_TYPE) {
- if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
/* to_ds = 1, fr_ds = 0; */
SetToDs(fctrl);
memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv),
@@ -825,16 +823,13 @@ void r8712_free_xmitframe(struct xmit_priv *pxmitpriv,
unsigned long irqL;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
struct _adapter *padapter = pxmitpriv->adapter;
- struct sk_buff *pndis_pkt = NULL;
if (pxmitframe == NULL)
return;
spin_lock_irqsave(&pfree_xmit_queue->lock, irqL);
list_del_init(&pxmitframe->list);
- if (pxmitframe->pkt) {
- pndis_pkt = pxmitframe->pkt;
+ if (pxmitframe->pkt)
pxmitframe->pkt = NULL;
- }
list_add_tail(&pxmitframe->list, &pfree_xmit_queue->queue);
pxmitpriv->free_xmitframe_cnt++;
spin_unlock_irqrestore(&pfree_xmit_queue->lock, irqL);
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index a3d733b145eb..7d0d1719b136 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -255,9 +255,6 @@ static struct drv_priv drvpriv = {
static uint r8712_usb_dvobj_init(struct _adapter *padapter)
{
uint status = _SUCCESS;
- struct usb_device_descriptor *pdev_desc;
- struct usb_host_config *phost_conf;
- struct usb_config_descriptor *pconf_desc;
struct usb_host_interface *phost_iface;
struct usb_interface_descriptor *piface_desc;
struct dvobj_priv *pdvobjpriv = &padapter->dvobjpriv;
@@ -265,9 +262,6 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
pdvobjpriv->padapter = padapter;
padapter->EepromAddressSize = 6;
- pdev_desc = &pusbd->descriptor;
- phost_conf = pusbd->actconfig;
- pconf_desc = &phost_conf->desc;
phost_iface = &pintf->altsetting[0];
piface_desc = &phost_iface->desc;
pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
@@ -292,13 +286,13 @@ static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
void rtl871x_intf_stop(struct _adapter *padapter)
{
/*disable_hw_interrupt*/
- if (padapter->bSurpriseRemoved == false) {
+ if (!padapter->bSurpriseRemoved) {
/*device still exists, so driver can do i/o operation
* TODO: */
}
/* cancel in irp */
- if (padapter->dvobjpriv.inirp_deinit != NULL)
+ if (padapter->dvobjpriv.inirp_deinit)
padapter->dvobjpriv.inirp_deinit(padapter);
/* cancel out irp */
r8712_usb_write_port_cancel(padapter);
@@ -318,7 +312,7 @@ void r871x_dev_unload(struct _adapter *padapter)
r8712_stop_drv_threads(padapter);
/*s5.*/
- if (padapter->bSurpriseRemoved == false) {
+ if (!padapter->bSurpriseRemoved) {
padapter->hw_init_completed = false;
rtl8712_hal_deinit(padapter);
}
@@ -402,7 +396,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
/* step 3.
* initialize the dvobj_priv
*/
- if (padapter->dvobj_init == NULL)
+ if (!padapter->dvobj_init)
goto error;
else {
status = padapter->dvobj_init(padapter);
@@ -568,7 +562,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
((mac[0] == 0x00) && (mac[1] == 0x00) &&
(mac[2] == 0x00) && (mac[3] == 0x00) &&
(mac[4] == 0x00) && (mac[5] == 0x00)) ||
- (AutoloadFail == false)) {
+ (!AutoloadFail)) {
mac[0] = 0x00;
mac[1] = 0xe0;
mac[2] = 0x4c;
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index e89d2b07fcb9..c3a4e3f26b40 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -168,7 +168,6 @@ static void usb_write_mem_complete(struct urb *purb)
void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
{
unsigned int pipe;
- int status;
struct _adapter *padapter = (struct _adapter *)pintfhdl->adapter;
struct intf_priv *pintfpriv = pintfhdl->pintfpriv;
struct io_queue *pio_queue = (struct io_queue *)padapter->pio_queue;
@@ -186,7 +185,7 @@ void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
usb_fill_bulk_urb(piorw_urb, pusbd, pipe,
wmem, cnt, usb_write_mem_complete,
pio_queue);
- status = usb_submit_urb(piorw_urb, GFP_ATOMIC);
+ usb_submit_urb(piorw_urb, GFP_ATOMIC);
_down_sema(&pintfpriv->io_retevt);
}
@@ -267,7 +266,7 @@ u32 r8712_usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
adapter->pwrctrlpriv.pnp_bstop_trx)
return _FAIL;
- if ((precvbuf->reuse == false) || (precvbuf->pskb == NULL)) {
+ if (!precvbuf->reuse == false || !precvbuf->pskb) {
precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
if (NULL != precvbuf->pskb)
precvbuf->reuse = true;
@@ -275,10 +274,10 @@ u32 r8712_usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
if (precvbuf != NULL) {
r8712_init_recvbuf(adapter, precvbuf);
/* re-assign for linux based on skb */
- if ((precvbuf->reuse == false) || (precvbuf->pskb == NULL)) {
+ if (!precvbuf->reuse || !precvbuf->pskb) {
precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev,
MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
- if (precvbuf->pskb == NULL)
+ if (!precvbuf->pskb)
return _FAIL;
tmpaddr = (addr_t)precvbuf->pskb->data;
alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 0ac9130faf6c..039b598152bc 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -79,7 +79,6 @@ sint r8712_endofpktfile(struct pkt_file *pfile)
void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
{
- int i;
struct ethhdr etherhdr;
struct iphdr ip_hdr;
u16 UserPriority = 0;
@@ -89,8 +88,7 @@ void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
/* get UserPriority from IP hdr*/
if (pattrib->ether_type == 0x0800) {
- i = _r8712_pktfile_read(ppktfile, (u8 *)&ip_hdr,
- sizeof(ip_hdr));
+ _r8712_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
/*UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3 ;*/
UserPriority = ip_hdr.tos >> 5;
} else {
diff --git a/drivers/staging/rtl8723au/Makefile b/drivers/staging/rtl8723au/Makefile
index a9aae2163639..3e8989018a88 100644
--- a/drivers/staging/rtl8723au/Makefile
+++ b/drivers/staging/rtl8723au/Makefile
@@ -2,7 +2,6 @@ r8723au-y := \
core/rtw_cmd.o \
core/rtw_efuse.o \
core/rtw_ieee80211.o \
- core/rtw_led.o \
core/rtw_mlme.o \
core/rtw_mlme_ext.o \
core/rtw_pwrctrl.o \
@@ -33,8 +32,6 @@ r8723au-y := \
hal/rtl8723a_rf6052.o \
hal/rtl8723a_rxdesc.o \
hal/rtl8723a_sreset.o \
- hal/rtl8723a_xmit.o \
- hal/rtl8723au_led.o \
hal/rtl8723au_recv.o \
hal/rtl8723au_xmit.o \
hal/usb_halinit.o \
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index 6b4092f05da5..e394d12c36b0 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -231,12 +231,10 @@ void expire_timeout_chk23a(struct rtw_adapter *padapter)
psta->expire_to--;
}
- if (psta->expire_to <= 0)
- {
+ if (psta->expire_to <= 0) {
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- if (padapter->registrypriv.wifi_spec == 1)
- {
+ if (padapter->registrypriv.wifi_spec == 1) {
psta->expire_to = pstapriv->expire_to;
continue;
}
@@ -308,15 +306,12 @@ void expire_timeout_chk23a(struct rtw_adapter *padapter)
ret = issue_nulldata23a(padapter, psta->hwaddr, 0, 3, 50);
psta->keep_alive_trycnt++;
- if (ret == _SUCCESS)
- {
+ if (ret == _SUCCESS) {
DBG_8723A("asoc check, sta(" MAC_FMT ") is alive\n", MAC_ARG(psta->hwaddr));
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
continue;
- }
- else if (psta->keep_alive_trycnt <= 3)
- {
+ } else if (psta->keep_alive_trycnt <= 3) {
DBG_8723A("ack check for asoc expire, keep_alive_trycnt =%d\n", psta->keep_alive_trycnt);
psta->expire_to = 1;
continue;
@@ -363,8 +358,7 @@ void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_l
return;
/* b/g mode ra_bitmap */
- for (i = 0; i < sizeof(psta->bssrateset); i++)
- {
+ for (i = 0; i < sizeof(psta->bssrateset); i++) {
if (psta->bssrateset[i])
tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value23a(psta->bssrateset[i]&0x7f);
}
@@ -406,8 +400,7 @@ void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_l
raid = networktype_to_raid23a(sta_band);
init_rate = get_highest_rate_idx23a(tx_ra_bitmap&0x0fffffff)&0x3f;
- if (psta->aid < NUM_STA)
- {
+ if (psta->aid < NUM_STA) {
u8 arg = 0;
arg = psta->mac_id&0x1f;
@@ -436,11 +429,8 @@ void add_RATid23a(struct rtw_adapter *padapter, struct sta_info *psta, u8 rssi_l
psta->raid = raid;
psta->init_rate = init_rate;
- }
- else
- {
+ } else
DBG_8723A("station aid %d exceed the max number\n", psta->aid);
- }
}
static void update_bmc_sta(struct rtw_adapter *padapter)
@@ -453,8 +443,7 @@ static void update_bmc_sta(struct rtw_adapter *padapter)
struct wlan_bssid_ex *pcur_network = &pmlmepriv->cur_network.network;
struct sta_info *psta = rtw_get_bcmc_stainfo23a(padapter);
- if (psta)
- {
+ if (psta) {
psta->aid = 0;/* default set to 0 */
psta->mac_id = psta->aid + 1;
@@ -474,8 +463,7 @@ static void update_bmc_sta(struct rtw_adapter *padapter)
psta->bssratelen = supportRateNum;
/* b/g mode ra_bitmap */
- for (i = 0; i < supportRateNum; i++)
- {
+ for (i = 0; i < supportRateNum; i++) {
if (psta->bssrateset[i])
tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value23a(psta->bssrateset[i]&0x7f);
}
@@ -522,11 +510,8 @@ static void update_bmc_sta(struct rtw_adapter *padapter)
psta->state = _FW_LINKED;
spin_unlock_bh(&psta->lock);
- }
- else
- {
+ } else
DBG_8723A("add_RATid23a_bmc_sta error!\n");
- }
}
/* notes: */
@@ -561,8 +546,7 @@ void update_sta_info23a_apmode23a(struct rtw_adapter *padapter, struct sta_info
/* ERP */
VCS_update23a(padapter, psta);
/* HT related cap */
- if (phtpriv_sta->ht_option)
- {
+ if (phtpriv_sta->ht_option) {
/* check if sta supports rx ampdu */
phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
@@ -580,9 +564,7 @@ void update_sta_info23a_apmode23a(struct rtw_adapter *padapter, struct sta_info
psta->qos_option = true;
- }
- else
- {
+ } else {
phtpriv_sta->ampdu_enable = false;
phtpriv_sta->sgi = false;
@@ -654,7 +636,7 @@ static void start_bss_network(struct rtw_adapter *padapter, u8 *pbuf)
bcn_interval = (u16)pnetwork->beacon_interval;
cur_channel = pnetwork->DSConfig;
- cur_bwmode = HT_CHANNEL_WIDTH_20;;
+ cur_bwmode = HT_CHANNEL_WIDTH_20;
cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
/* check if there is wps ie, */
@@ -1122,7 +1104,6 @@ int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr)
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
- int ret = 0;
DBG_8723A("%s(acl_num =%d) = %pM\n", __func__, pacl_list->num, addr);
@@ -1148,7 +1129,7 @@ int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr)
DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num);
- return ret;
+ return 0;
}
static void update_bcn_fixed_ie(struct rtw_adapter *padapter)
@@ -1217,8 +1198,6 @@ static void update_bcn_wmm_ie(struct rtw_adapter *padapter)
static void update_bcn_wps_ie(struct rtw_adapter *padapter)
{
DBG_8723A("%s\n", __func__);
-
- return;
}
static void update_bcn_p2p_ie(struct rtw_adapter *padapter)
@@ -1261,8 +1240,7 @@ void update_beacon23a(struct rtw_adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
spin_lock_bh(&pmlmepriv->bcn_update_lock);
- switch (ie_id)
- {
+ switch (ie_id) {
case 0xFF:
/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
update_bcn_fixed_ie(padapter);
@@ -1389,8 +1367,7 @@ static int rtw_ht_operation_update(struct rtw_adapter *padapter)
void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated)
{
/* update associated stations cap. */
- if (updated == true)
- {
+ if (updated == true) {
struct list_head *phead, *plist, *ptmp;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1416,34 +1393,27 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE))
- {
- if (!psta->no_short_preamble_set)
- {
+ if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
+ if (!psta->no_short_preamble_set) {
psta->no_short_preamble_set = 1;
pmlmepriv->num_sta_no_short_preamble++;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 1))
- {
+ (pmlmepriv->num_sta_no_short_preamble == 1)) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
}
- }
- else
- {
- if (psta->no_short_preamble_set)
- {
+ } else {
+ if (psta->no_short_preamble_set) {
psta->no_short_preamble_set = 0;
pmlmepriv->num_sta_no_short_preamble--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_preamble == 0))
- {
+ (pmlmepriv->num_sta_no_short_preamble == 0)) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
@@ -1451,32 +1421,25 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
}
}
- if (psta->flags & WLAN_STA_NONERP)
- {
- if (!psta->nonerp_set)
- {
+ if (psta->flags & WLAN_STA_NONERP) {
+ if (!psta->nonerp_set) {
psta->nonerp_set = 1;
pmlmepriv->num_sta_non_erp++;
- if (pmlmepriv->num_sta_non_erp == 1)
- {
+ if (pmlmepriv->num_sta_non_erp == 1) {
beacon_updated = true;
update_beacon23a(padapter, WLAN_EID_ERP_INFO, NULL, true);
}
}
- }
- else
- {
- if (psta->nonerp_set)
- {
+ } else {
+ if (psta->nonerp_set) {
psta->nonerp_set = 0;
pmlmepriv->num_sta_non_erp--;
- if (pmlmepriv->num_sta_non_erp == 0)
- {
+ if (pmlmepriv->num_sta_non_erp == 0) {
beacon_updated = true;
update_beacon23a(padapter, WLAN_EID_ERP_INFO, NULL, true);
}
@@ -1484,42 +1447,34 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
}
- if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME))
- {
- if (!psta->no_short_slot_time_set)
- {
+ if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)) {
+ if (!psta->no_short_slot_time_set) {
psta->no_short_slot_time_set = 1;
pmlmepriv->num_sta_no_short_slot_time++;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 1))
- {
+ (pmlmepriv->num_sta_no_short_slot_time == 1)) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
}
- }
- else
- {
- if (psta->no_short_slot_time_set)
- {
+ } else {
+ if (psta->no_short_slot_time_set) {
psta->no_short_slot_time_set = 0;
pmlmepriv->num_sta_no_short_slot_time--;
if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
- (pmlmepriv->num_sta_no_short_slot_time == 0))
- {
+ (pmlmepriv->num_sta_no_short_slot_time == 0)) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
}
}
- if (psta->flags & WLAN_STA_HT)
- {
+ if (psta->flags & WLAN_STA_HT) {
u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
DBG_8723A("HT: STA " MAC_FMT " HT Capabilities "
@@ -1552,9 +1507,7 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
pmlmepriv->num_sta_ht_20mhz);
}
- }
- else
- {
+ } else {
if (!psta->no_ht_set) {
psta->no_ht_set = 1;
pmlmepriv->num_sta_no_ht++;
@@ -1567,8 +1520,7 @@ void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info
}
}
- if (rtw_ht_operation_update(padapter) > 0)
- {
+ if (rtw_ht_operation_update(padapter) > 0) {
update_beacon23a(padapter, WLAN_EID_HT_CAPABILITY, NULL, false);
update_beacon23a(padapter, WLAN_EID_HT_OPERATION, NULL, true);
}
@@ -1592,8 +1544,7 @@ u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info
psta->no_short_preamble_set = 0;
pmlmepriv->num_sta_no_short_preamble--;
if (pmlmeext->cur_wireless_mode > WIRELESS_11B
- && pmlmepriv->num_sta_no_short_preamble == 0)
- {
+ && pmlmepriv->num_sta_no_short_preamble == 0) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
@@ -1602,8 +1553,7 @@ u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info
if (psta->nonerp_set) {
psta->nonerp_set = 0;
pmlmepriv->num_sta_non_erp--;
- if (pmlmepriv->num_sta_non_erp == 0)
- {
+ if (pmlmepriv->num_sta_non_erp == 0) {
beacon_updated = true;
update_beacon23a(padapter, WLAN_EID_ERP_INFO,
NULL, true);
@@ -1614,8 +1564,7 @@ u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info
psta->no_short_slot_time_set = 0;
pmlmepriv->num_sta_no_short_slot_time--;
if (pmlmeext->cur_wireless_mode > WIRELESS_11B
- && pmlmepriv->num_sta_no_short_slot_time == 0)
- {
+ && pmlmepriv->num_sta_no_short_slot_time == 0) {
beacon_updated = true;
update_beacon23a(padapter, 0xFF, NULL, true);
}
@@ -1636,8 +1585,7 @@ u8 bss_cap_update_on_sta_leave23a(struct rtw_adapter *padapter, struct sta_info
pmlmepriv->num_sta_ht_20mhz--;
}
- if (rtw_ht_operation_update(padapter) > 0)
- {
+ if (rtw_ht_operation_update(padapter) > 0) {
update_beacon23a(padapter, WLAN_EID_HT_CAPABILITY, NULL, false);
update_beacon23a(padapter, WLAN_EID_HT_OPERATION, NULL, true);
}
@@ -1657,8 +1605,7 @@ u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool acti
if (!psta)
return beacon_updated;
- if (active == true)
- {
+ if (active) {
/* tear down Rx AMPDU */
send_delba23a(padapter, 0, psta->hwaddr);/* recipient */
@@ -1698,7 +1645,6 @@ u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool acti
int rtw_ap_inform_ch_switch23a (struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset)
{
struct list_head *phead, *plist;
- int ret = 0;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1706,7 +1652,7 @@ int rtw_ap_inform_ch_switch23a (struct rtw_adapter *padapter, u8 new_ch, u8 ch_o
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if ((pmlmeinfo->state&0x03) != MSR_AP)
- return ret;
+ return 0;
DBG_8723A("%s(%s): with ch:%u, offset:%u\n", __func__,
padapter->pnetdev->name, new_ch, ch_offset);
@@ -1724,13 +1670,12 @@ int rtw_ap_inform_ch_switch23a (struct rtw_adapter *padapter, u8 new_ch, u8 ch_o
issue_action_spct_ch_switch23a (padapter, bc_addr, new_ch, ch_offset);
- return ret;
+ return 0;
}
int rtw_sta_flush23a(struct rtw_adapter *padapter)
{
struct list_head *phead, *plist, *ptmp;
- int ret = 0;
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1743,7 +1688,7 @@ int rtw_sta_flush23a(struct rtw_adapter *padapter)
DBG_8723A("%s(%s)\n", __func__, padapter->pnetdev->name);
if ((pmlmeinfo->state&0x03) != MSR_AP)
- return ret;
+ return 0;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
@@ -1769,7 +1714,7 @@ int rtw_sta_flush23a(struct rtw_adapter *padapter)
associated_clients_update23a(padapter, true);
- return ret;
+ return 0;
}
/* called > TSR LEVEL for USB or SDIO Interface*/
@@ -1788,13 +1733,10 @@ void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
psta->qos_option = 0;
/* update 802.11n ht cap. */
- if (WLAN_STA_HT&flags)
- {
+ if (WLAN_STA_HT&flags) {
psta->htpriv.ht_option = true;
psta->qos_option = 1;
- }
- else
- {
+ } else {
psta->htpriv.ht_option = false;
}
@@ -1807,8 +1749,7 @@ void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
/* called >= TSR LEVEL for USB or SDIO Interface*/
void ap_sta_info_defer_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
{
- if (psta->state & _FW_LINKED)
- {
+ if (psta->state & _FW_LINKED) {
/* add ratid */
add_RATid23a(padapter, psta, 0);/* DM_RATR_STA_INIT */
}
@@ -1819,7 +1760,7 @@ void rtw_ap_restore_network(struct rtw_adapter *padapter)
{
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct sta_priv * pstapriv = &padapter->stapriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct list_head *phead, *plist, *ptmp;
diff --git a/drivers/staging/rtl8723au/core/rtw_cmd.c b/drivers/staging/rtl8723au/core/rtw_cmd.c
index 4eaa50297b95..60e0ded8ae02 100644
--- a/drivers/staging/rtl8723au/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723au/core/rtw_cmd.c
@@ -36,25 +36,25 @@ static struct cmd_hdl wlancmds[] = {
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), join_cmd_hdl23a) /*14*/
- GEN_MLME_EXT_HANDLER(sizeof (struct disconnect_parm), disconnect_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct wlan_bssid_ex), createbss_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct setopmode_parm), setopmode_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct sitesurvey_parm), sitesurvey_cmd_hdl23a) /*18*/
- GEN_MLME_EXT_HANDLER(sizeof (struct setauth_parm), setauth_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct setkey_parm), setkey_hdl23a) /*20*/
- GEN_MLME_EXT_HANDLER(sizeof (struct set_stakey_parm), set_stakey_hdl23a)
- GEN_MLME_EXT_HANDLER(sizeof (struct set_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct del_assocsta_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setstapwrstate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setbasicrate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getbasicrate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setdatarate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getdatarate_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct setphyinfo_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getphyinfo_parm), NULL) /*30*/
- GEN_MLME_EXT_HANDLER(sizeof (struct setphy_parm), NULL)
- GEN_MLME_EXT_HANDLER(sizeof (struct getphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), join_cmd_hdl23a) /*14*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct wlan_bssid_ex), createbss_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl23a) /*18*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl23a) /*20*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl23a)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct del_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setstapwrstate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setphyinfo_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getphyinfo_parm), NULL) /*30*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct setphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getphy_parm), NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
GEN_MLME_EXT_HANDLER(0, NULL)
@@ -359,6 +359,7 @@ int rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter,
/* prepare ssid list */
if (ssid) {
int i;
+
for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
if (ssid[i].ssid_len) {
memcpy(&psurveyPara->ssid[i], &ssid[i],
@@ -371,6 +372,7 @@ int rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter,
/* prepare channel list */
if (ch) {
int i;
+
for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
if (ch[i].hw_value &&
!(ch[i].flags & IEEE80211_CHAN_DISABLED)) {
@@ -389,8 +391,6 @@ int rtw_sitesurvey_cmd23a(struct rtw_adapter *padapter,
mod_timer(&pmlmepriv->scan_to_timer, jiffies +
msecs_to_jiffies(SCANNING_TIMEOUT));
- rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
-
pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
} else
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
@@ -415,8 +415,6 @@ int rtw_createbss_cmd23a(struct rtw_adapter *padapter)
pdev_network = &padapter->registrypriv.dev_network;
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
-
if (pmlmepriv->assoc_ssid.ssid_len == 0) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
(" createbss for Any SSid:%s\n",
@@ -465,8 +463,6 @@ int rtw_joinbss_cmd23a(struct rtw_adapter *padapter,
ifmode = pnetwork->network.ifmode;
- rtw_led_control(padapter, LED_CTL_START_TO_LINK);
-
if (pmlmepriv->assoc_ssid.ssid_len == 0) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
("+Join cmd: Any SSid\n"));
@@ -599,7 +595,7 @@ exit:
return res;
}
-int rtw_disassoc_cmd23a(struct rtw_adapter*padapter, u32 deauth_timeout_ms,
+int rtw_disassoc_cmd23a(struct rtw_adapter *padapter, u32 deauth_timeout_ms,
bool enqueue)
{
struct cmd_obj *cmdobj = NULL;
@@ -719,6 +715,7 @@ int rtw_setstakey_cmd23a(struct rtw_adapter *padapter, u8 *psta, u8 unicast_key)
memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
} else {
int idx = psecuritypriv->dot118021XGrpKeyid;
+
memcpy(&psetstakey_para->key,
&psecuritypriv->dot118021XGrpKey[idx].skey, 16);
}
@@ -786,7 +783,7 @@ exit:
return res;
}
-int rtw_addbareq_cmd23a(struct rtw_adapter*padapter, u8 tid, u8 *addr)
+int rtw_addbareq_cmd23a(struct rtw_adapter *padapter, u8 tid, u8 *addr)
{
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct cmd_obj *ph2c;
@@ -822,7 +819,7 @@ exit:
return res;
}
-int rtw_dynamic_chk_wk_cmd23a(struct rtw_adapter*padapter)
+int rtw_dynamic_chk_wk_cmd23a(struct rtw_adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
@@ -859,7 +856,7 @@ exit:
* This is only ever called from on_action_spct23a_ch_switch () which isn't
* called from anywhere itself
*/
-int rtw_set_ch_cmd23a(struct rtw_adapter*padapter, u8 ch, u8 bw, u8 ch_offset,
+int rtw_set_ch_cmd23a(struct rtw_adapter *padapter, u8 ch, u8 bw, u8 ch_offset,
u8 enqueue)
{
struct cmd_obj *pcmdobj;
@@ -919,34 +916,34 @@ static void traffic_status_watchdog(struct rtw_adapter *padapter)
u8 bHigherBusyTxTraffic = false;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int BusyThreshold = 100;
+ struct rt_link_detect *ldi = &pmlmepriv->LinkDetectInfo;
+
/* */
/* Determine if our traffic is busy now */
/* */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
if (rtl8723a_BT_coexist(padapter))
BusyThreshold = 50;
- else if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
+ else if (ldi->bBusyTraffic)
BusyThreshold = 75;
/* if we raise bBusyTraffic in last watchdog, using
lower threshold. */
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold ||
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold) {
+ if (ldi->NumRxOkInPeriod > BusyThreshold ||
+ ldi->NumTxOkInPeriod > BusyThreshold) {
bBusyTraffic = true;
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod >
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ if (ldi->NumRxOkInPeriod > ldi->NumTxOkInPeriod)
bRxBusyTraffic = true;
else
bTxBusyTraffic = true;
}
/* Higher Tx/Rx data. */
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
+ if (ldi->NumRxOkInPeriod > 4000 ||
+ ldi->NumTxOkInPeriod > 4000) {
bHigherBusyTraffic = true;
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod >
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ if (ldi->NumRxOkInPeriod > ldi->NumTxOkInPeriod)
bHigherBusyRxTraffic = true;
else
bHigherBusyTxTraffic = true;
@@ -955,9 +952,9 @@ static void traffic_status_watchdog(struct rtw_adapter *padapter)
if (!rtl8723a_BT_coexist(padapter) ||
!rtl8723a_BT_using_antenna_1(padapter)) {
/* check traffic for powersaving. */
- if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod +
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
- pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod >2)
+ if (((ldi->NumRxUnicastOkInPeriod +
+ ldi->NumTxOkInPeriod) > 8) ||
+ ldi->NumRxUnicastOkInPeriod > 2)
bEnterPS = false;
else
bEnterPS = true;
@@ -971,15 +968,15 @@ static void traffic_status_watchdog(struct rtw_adapter *padapter)
} else
LPS_Leave23a(padapter);
- pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
- pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
- pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic;
- pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
- pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
+ ldi->NumRxOkInPeriod = 0;
+ ldi->NumTxOkInPeriod = 0;
+ ldi->NumRxUnicastOkInPeriod = 0;
+ ldi->bBusyTraffic = bBusyTraffic;
+ ldi->bTxBusyTraffic = bTxBusyTraffic;
+ ldi->bRxBusyTraffic = bRxBusyTraffic;
+ ldi->bHigherBusyTraffic = bHigherBusyTraffic;
+ ldi->bHigherBusyRxTraffic = bHigherBusyRxTraffic;
+ ldi->bHigherBusyTxTraffic = bHigherBusyTxTraffic;
}
static void dynamic_chk_wk_hdl(struct rtw_adapter *padapter, u8 *pbuf, int sz)
@@ -1017,46 +1014,45 @@ static void lps_ctrl_wk_hdl(struct rtw_adapter *padapter, u8 lps_ctrl_type)
check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))
return;
- switch (lps_ctrl_type)
- {
- case LPS_CTRL_SCAN:
- rtl8723a_BT_wifiscan_notify(padapter, true);
- if (!rtl8723a_BT_using_antenna_1(padapter)) {
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- LPS_Leave23a(padapter);
+ switch (lps_ctrl_type) {
+ case LPS_CTRL_SCAN:
+ rtl8723a_BT_wifiscan_notify(padapter, true);
+ if (!rtl8723a_BT_using_antenna_1(padapter)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ LPS_Leave23a(padapter);
}
- break;
- case LPS_CTRL_JOINBSS:
+ break;
+ case LPS_CTRL_JOINBSS:
+ LPS_Leave23a(padapter);
+ break;
+ case LPS_CTRL_CONNECT:
+ mstatus = 1;/* connect */
+ /* Reset LPS Setting */
+ padapter->pwrctrlpriv.LpsIdleCount = 0;
+ rtl8723a_set_FwJoinBssReport_cmd(padapter, 1);
+ rtl8723a_BT_mediastatus_notify(padapter, mstatus);
+ break;
+ case LPS_CTRL_DISCONNECT:
+ mstatus = 0;/* disconnect */
+ rtl8723a_BT_mediastatus_notify(padapter, mstatus);
+ if (!rtl8723a_BT_using_antenna_1(padapter))
LPS_Leave23a(padapter);
- break;
- case LPS_CTRL_CONNECT:
- mstatus = 1;/* connect */
- /* Reset LPS Setting */
- padapter->pwrctrlpriv.LpsIdleCount = 0;
- rtl8723a_set_FwJoinBssReport_cmd(padapter, 1);
- rtl8723a_BT_mediastatus_notify(padapter, mstatus);
- break;
- case LPS_CTRL_DISCONNECT:
- mstatus = 0;/* disconnect */
- rtl8723a_BT_mediastatus_notify(padapter, mstatus);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- rtl8723a_set_FwJoinBssReport_cmd(padapter, 0);
- break;
- case LPS_CTRL_SPECIAL_PACKET:
- pwrpriv->DelayLPSLastTimeStamp = jiffies;
- rtl8723a_BT_specialpacket_notify(padapter);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- break;
- case LPS_CTRL_LEAVE:
- rtl8723a_BT_lps_leave(padapter);
- if (!rtl8723a_BT_using_antenna_1(padapter))
- LPS_Leave23a(padapter);
- break;
+ rtl8723a_set_FwJoinBssReport_cmd(padapter, 0);
+ break;
+ case LPS_CTRL_SPECIAL_PACKET:
+ pwrpriv->DelayLPSLastTimeStamp = jiffies;
+ rtl8723a_BT_specialpacket_notify(padapter);
+ if (!rtl8723a_BT_using_antenna_1(padapter))
+ LPS_Leave23a(padapter);
+ break;
+ case LPS_CTRL_LEAVE:
+ rtl8723a_BT_lps_leave(padapter);
+ if (!rtl8723a_BT_using_antenna_1(padapter))
+ LPS_Leave23a(padapter);
+ break;
- default:
- break;
+ default:
+ break;
}
}
@@ -1098,7 +1094,7 @@ exit:
return res;
}
-int rtw_ps_cmd23a(struct rtw_adapter*padapter)
+int rtw_ps_cmd23a(struct rtw_adapter *padapter)
{
struct cmd_obj *ppscmd;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
@@ -1147,12 +1143,12 @@ static void rtw_chk_hi_queue_hdl(struct rtw_adapter *padapter)
val = rtl8723a_chk_hi_queue_empty(padapter);
- while (val == false) {
+ while (!val) {
msleep(100);
cnt++;
- if (cnt>10)
+ if (cnt > 10)
break;
val = rtl8723a_chk_hi_queue_empty(padapter);
@@ -1168,7 +1164,7 @@ static void rtw_chk_hi_queue_hdl(struct rtw_adapter *padapter)
}
}
-int rtw_chk_hi_queue_cmd23a(struct rtw_adapter*padapter)
+int rtw_chk_hi_queue_cmd23a(struct rtw_adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
@@ -1305,8 +1301,7 @@ int rtw_drvextra_cmd_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
- switch (pdrvextra_cmd->ec_id)
- {
+ switch (pdrvextra_cmd->ec_id) {
case DYNAMIC_CHK_WK_CID:
dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf,
pdrvextra_cmd->type_size);
diff --git a/drivers/staging/rtl8723au/core/rtw_efuse.c b/drivers/staging/rtl8723au/core/rtw_efuse.c
index 9f6ce7d071cd..81960e788f89 100644
--- a/drivers/staging/rtl8723au/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723au/core/rtw_efuse.c
@@ -117,12 +117,7 @@ Efuse_GetCurrentSize23a(struct rtw_adapter *pAdapter, u8 efuseType)
u8
Efuse_CalculateWordCnts23a(u8 word_en)
{
- u8 word_cnts = 0;
- if (!(word_en & BIT(0))) word_cnts++; /* 0 : write enable */
- if (!(word_en & BIT(1))) word_cnts++;
- if (!(word_en & BIT(2))) word_cnts++;
- if (!(word_en & BIT(3))) word_cnts++;
- return word_cnts;
+ return hweight8((~word_en) & 0xf);
}
/* */
@@ -181,7 +176,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
switch (type) {
case TYPE_EFUSE_MAX_SECTION:
- pMax_section = (u8 *) pOut;
+ pMax_section = pOut;
if (efuseType == EFUSE_WIFI)
*pMax_section = EFUSE_MAX_SECTION_8723A;
@@ -190,7 +185,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_EFUSE_REAL_CONTENT_LEN:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723A;
@@ -199,7 +194,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723A -
@@ -210,7 +205,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723A -
@@ -221,7 +216,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_EFUSE_MAP_LEN:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_MAP_LEN_8723A;
@@ -230,7 +225,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_EFUSE_PROTECT_BYTES_BANK:
- pu1Tmp = (u8 *) pOut;
+ pu1Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu1Tmp = EFUSE_OOB_PROTECT_BYTES;
@@ -239,7 +234,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
case TYPE_EFUSE_CONTENT_LEN_BANK:
- pu2Tmp = (u16 *) pOut;
+ pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723A;
@@ -248,7 +243,7 @@ EFUSE_GetEfuseDefinition23a(struct rtw_adapter *pAdapter, u8 efuseType,
break;
default:
- pu1Tmp = (u8 *) pOut;
+ pu1Tmp = pOut;
*pu1Tmp = 0;
break;
}
diff --git a/drivers/staging/rtl8723au/core/rtw_ieee80211.c b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
index 6274cb397c92..bbbcfc8257da 100644
--- a/drivers/staging/rtl8723au/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
@@ -69,6 +69,7 @@ int rtw_get_bit_value_from_ieee_value23a(u8 val)
{2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0};
int i = 0;
+
while (dot11_rate_table[i] != 0) {
if (dot11_rate_table[i] == val)
return BIT(i);
@@ -301,8 +302,7 @@ void rtw_set_supported_rate23a(u8 *SupportedRates, uint mode)
memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
- switch (mode)
- {
+ switch (mode) {
case WIRELESS_11B:
memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
break;
diff --git a/drivers/staging/rtl8723au/core/rtw_led.c b/drivers/staging/rtl8723au/core/rtw_led.c
deleted file mode 100644
index 989cda29a57e..000000000000
--- a/drivers/staging/rtl8723au/core/rtw_led.c
+++ /dev/null
@@ -1,1893 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <rtl8723a_led.h>
-
-/* */
-/* Description: */
-/* Callback function of LED BlinkTimer, */
-/* it just schedules to corresponding BlinkWorkItem/led_blink_hdl23a */
-/* */
-static void BlinkTimerCallback(unsigned long data)
-{
- struct led_8723a *pLed = (struct led_8723a *)data;
- struct rtw_adapter *padapter = pLed->padapter;
-
- /* DBG_8723A("%s\n", __func__); */
-
- if ((padapter->bSurpriseRemoved == true) || (padapter->bDriverStopped == true))
- {
- /* DBG_8723A("%s bSurpriseRemoved:%d, bDriverStopped:%d\n", __func__, padapter->bSurpriseRemoved, padapter->bDriverStopped); */
- return;
- }
- schedule_work(&pLed->BlinkWorkItem);
-}
-
-/* */
-/* Description: */
-/* Callback function of LED BlinkWorkItem. */
-/* We dispatch acture LED blink action according to LedStrategy. */
-/* */
-void BlinkWorkItemCallback23a(struct work_struct *work)
-{
- struct led_8723a *pLed = container_of(work, struct led_8723a, BlinkWorkItem);
- BlinkHandler23a(pLed);
-}
-
-/* */
-/* Description: */
-/* Reset status of led_8723a object. */
-/* */
-void ResetLedStatus23a(struct led_8723a * pLed) {
-
- pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
- pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
-
- pLed->bLedBlinkInProgress = false; /* true if it is blinking, false o.w.. */
- pLed->bLedWPSBlinkInProgress = false;
-
- pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */
- pLed->BlinkingLedState = LED_UNKNOWN; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
-
- pLed->bLedNoLinkBlinkInProgress = false;
- pLed->bLedLinkBlinkInProgress = false;
- pLed->bLedStartToLinkBlinkInProgress = false;
- pLed->bLedScanBlinkInProgress = false;
-}
-
- /* */
-/* Description: */
-/* Initialize an led_8723a object. */
-/* */
-void
-InitLed871x23a(struct rtw_adapter *padapter, struct led_8723a *pLed, enum led_pin_8723a LedPin)
-{
- pLed->padapter = padapter;
- pLed->LedPin = LedPin;
-
- ResetLedStatus23a(pLed);
-
- setup_timer(&pLed->BlinkTimer, BlinkTimerCallback, (unsigned long)pLed);
-
- INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback23a);
-}
-
-/* */
-/* Description: */
-/* DeInitialize an led_8723a object. */
-/* */
-void
-DeInitLed871x23a(struct led_8723a *pLed)
-{
- cancel_work_sync(&pLed->BlinkWorkItem);
- del_timer_sync(&pLed->BlinkTimer);
- ResetLedStatus23a(pLed);
-}
-
-/* Description: */
-/* Implementation of LED blinking behavior. */
-/* It toggle off LED and schedule corresponding timer if necessary. */
-
-static void SwLedBlink(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- /* Determine if we shall change LED state again. */
- pLed->BlinkTimes--;
- switch (pLed->CurrLedState) {
-
- case LED_BLINK_NORMAL:
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- case LED_BLINK_StartToBlink:
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- bStopBlinking = true;
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
- bStopBlinking = true;
- else if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- case LED_BLINK_WPS:
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- default:
- bStopBlinking = true;
- break;
- }
-
- if (bStopBlinking) {
- if ((check_fwstate(pmlmepriv, _FW_LINKED)) && !pLed->bLedOn)
- SwLedOn23a(padapter, pLed);
- else if ((check_fwstate(pmlmepriv, _FW_LINKED)) && pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
-
- pLed->BlinkTimes = 0;
- pLed->bLedBlinkInProgress = false;
- } else {
- /* Assign LED state to toggle. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- /* Schedule a timer to toggle LED state. */
- switch (pLed->CurrLedState) {
- case LED_BLINK_NORMAL:
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
- break;
- case LED_BLINK_SLOWLY:
- case LED_BLINK_StartToBlink:
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
- break;
- case LED_BLINK_WPS:
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_LONG_INTERVAL));
- break;
- default:
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
- break;
- }
- }
-}
-
-static void SwLedBlink1(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- unsigned long delay = 0;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- ResetLedStatus23a(pLed);
- return;
- }
- switch (pLed->CurrLedState) {
- case LED_BLINK_SLOWLY:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- break;
- case LED_BLINK_NORMAL:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- break;
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- } else {
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- } else {
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->BlinkTimes = 0;
- pLed->bLedBlinkInProgress = false;
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- break;
- case LED_BLINK_WPS:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- break;
- case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- bStopBlinking = false;
- else
- bStopBlinking = true;
- if (bStopBlinking) {
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
-
- pLed->bLedWPSBlinkInProgress = false;
- } else {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
- }
- break;
- default:
- break;
- }
- if (delay)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-}
-
-static void SwLedBlink2(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
- switch (pLed->CurrLedState) {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("stop scan blink CurrLedState %d\n",
- pLed->CurrLedState));
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("stop scan blink CurrLedState %d\n",
- pLed->CurrLedState));
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
- }
- break;
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("stop CurrLedState %d\n", pLed->CurrLedState));
-
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
- ("stop CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
- }
- }
- break;
- default:
- break;
- }
-}
-
-static void SwLedBlink3(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- }
- else
- {
- if (pLed->CurrLedState != LED_BLINK_WPS_STOP)
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- switch (pLed->CurrLedState)
- {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- {
- bStopBlinking = true;
- }
-
- if (bStopBlinking)
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- }
- else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- SwLedOn23a(padapter, pLed);
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedScanBlinkInProgress = false;
- }
- else
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- }
- else
- {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
- }
- break;
-
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- {
- bStopBlinking = true;
- }
- if (bStopBlinking)
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- } else if (check_fwstate(pmlmepriv,
- _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
-
- if (!pLed->bLedOn)
- SwLedOn23a(padapter, pLed);
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedBlinkInProgress = false;
- }
- else
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- }
- else
- {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
- }
- }
- break;
-
- case LED_BLINK_WPS:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- break;
-
- case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- {
- pLed->BlinkingLedState = RTW_LED_OFF;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
- bStopBlinking = false;
- } else {
- bStopBlinking = true;
- }
-
- if (bStopBlinking)
- {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on)
- {
- SwLedOff23a(padapter, pLed);
- }
- else
- {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- pLed->bLedWPSBlinkInProgress = false;
- }
- break;
-
- default:
- break;
- }
-}
-
-static void SwLedBlink4(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct led_8723a *pLed1 = &ledpriv->SwLed1;
- u8 bStopBlinking = false;
- unsigned long delay = 0;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- if (!pLed1->bLedWPSBlinkInProgress && pLed1->BlinkingLedState == LED_UNKNOWN)
- {
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
- SwLedOff23a(padapter, pLed1);
- }
-
- switch (pLed->CurrLedState)
- {
- case LED_BLINK_SLOWLY:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- break;
-
- case LED_BLINK_StartToBlink:
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_SLOWLY_INTERVAL;
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NORMAL_INTERVAL;
- }
- break;
-
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- bStopBlinking = false;
- }
-
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->bLedNoLinkBlinkInProgress = false;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- }
- break;
-
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- bStopBlinking = true;
- }
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- }
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- }
- break;
-
- case LED_BLINK_WPS:
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_SLOWLY_INTERVAL;
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NORMAL_INTERVAL;
- }
- break;
-
- case LED_BLINK_WPS_STOP: /* WPS authentication fail */
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- delay = LED_BLINK_NORMAL_INTERVAL;
- break;
-
- case LED_BLINK_WPS_STOP_OVERLAP: /* WPS session overlap */
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- if (pLed->bLedOn) {
- pLed->BlinkTimes = 1;
- } else {
- bStopBlinking = true;
- }
- }
-
- if (bStopBlinking) {
- pLed->BlinkTimes = 10;
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- delay = LED_BLINK_NORMAL_INTERVAL;
- }
- break;
-
- default:
- break;
- }
- if (delay)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink4 CurrLedState %d\n", pLed->CurrLedState));
-}
-
-static void SwLedBlink5(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- u8 bStopBlinking = false;
- unsigned long delay = 0;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
-
- switch (pLed->CurrLedState)
- {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- bStopBlinking = true;
- }
-
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
-
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- }
- break;
-
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- bStopBlinking = true;
- }
-
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
-
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff23a(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- }
- break;
-
- default:
- break;
- }
-
- if (delay)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink5 CurrLedState %d\n", pLed->CurrLedState));
-}
-
-static void SwLedBlink6(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
- } else {
- SwLedOff23a(padapter, pLed);
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
- }
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("<==== blink6\n"));
-}
-
-/* ALPHA, added by chiyoko, 20090106 */
-static void
-SwLedControlMode1(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- long delay = -1;
-
- switch (LedAction)
- {
- case LED_CTL_POWER_ON:
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (pLed->bLedNoLinkBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_LINK:
- if (pLed->bLedLinkBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_LINK_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic &&
- check_fwstate(pmlmepriv, _FW_LINKED))
- ;
- else if (pLed->bLedScanBlinkInProgress == false) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed->bLedWPSBlinkInProgress == false) {
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_STOP_WPS:
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- } else {
- pLed->bLedWPSBlinkInProgress = true;
- }
-
- pLed->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = 0;
- }
- break;
-
- case LED_CTL_STOP_WPS_FAIL:
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_NO_LINK_INTERVAL_ALPHA;
- break;
-
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
-
- SwLedOff23a(padapter, pLed);
- break;
-
- default:
- break;
-
- }
-
- if (delay != -1)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
-}
-
- /* Arcadyan/Sitecom , added by chiyoko, 20090216 */
-static void
-SwLedControlMode2(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
- long delay = -1;
-
- switch (LedAction) {
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
- ;
- else if (pLed->bLedScanBlinkInProgress == false) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false &&
- check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
-
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- break;
- case LED_CTL_LINK:
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
-
- delay = 0;
- break;
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed->bLedWPSBlinkInProgress == false) {
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = 0;
- }
- break;
- case LED_CTL_STOP_WPS:
- pLed->bLedWPSBlinkInProgress = false;
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = 0;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- break;
- case LED_CTL_STOP_WPS_FAIL:
- pLed->bLedWPSBlinkInProgress = false;
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff23a(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = 0;
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
- }
- break;
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (!IS_LED_BLINKING(pLed))
- {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = 0;
- }
- break;
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- delay = 0;
- break;
- default:
- break;
-
- }
-
- if (delay != -1)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
-}
-
- /* COREGA, added by chiyoko, 20090316 */
-static void
-SwLedControlMode3(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
- long delay = -1;
-
- switch (LedAction)
- {
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
- ;
- else if (pLed->bLedScanBlinkInProgress == false) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false &&
- check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
-
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_FASTER_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_LINK:
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
-
- delay = 0;
- break;
-
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed->bLedWPSBlinkInProgress == false) {
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = LED_BLINK_SCAN_INTERVAL_ALPHA;
- }
- break;
-
- case LED_CTL_STOP_WPS:
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- } else {
- pLed->bLedWPSBlinkInProgress = true;
- }
-
- pLed->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA;
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- delay = 0;
- }
-
- break;
-
- case LED_CTL_STOP_WPS_FAIL:
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = 0;
- break;
-
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (!IS_LED_BLINKING(pLed))
- {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- delay = 0;
- }
- break;
-
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- delay = 0;
- break;
-
- default:
- break;
-
- }
-
- if (delay != -1)
- mod_timer(&pLed->BlinkTimer, jiffies + msecs_to_jiffies(delay));
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
-}
-
- /* Edimax-Belkin, added by chiyoko, 20090413 */
-static void
-SwLedControlMode4(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
- struct led_8723a *pLed1 = &ledpriv->SwLed1;
-
- switch (LedAction)
- {
- case LED_CTL_START_TO_LINK:
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- mod_timer(&pLed->BlinkTimer, jiffies);
- }
-
- if (pLed->bLedStartToLinkBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
-
- pLed->bLedStartToLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_StartToBlink;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer,
- jiffies + msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
- }
- }
- break;
-
- case LED_CTL_LINK:
- case LED_CTL_NO_LINK:
- /* LED1 settings */
- if (LedAction == LED_CTL_LINK) {
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- mod_timer(&pLed->BlinkTimer, jiffies);
- }
- }
-
- if (pLed->bLedNoLinkBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic &&
- check_fwstate(pmlmepriv, _FW_LINKED))
- ;
- else if (pLed->bLedScanBlinkInProgress == false) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN ||
- IS_LED_WPS_BLINKING(pLed)) {
- return;
- }
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- del_timer_sync(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- mod_timer(&pLed->BlinkTimer, jiffies);
- }
-
- if (pLed->bLedWPSBlinkInProgress == false) {
- if (pLed->bLedNoLinkBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress == true) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- {
- pLed->BlinkingLedState = RTW_LED_OFF;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SLOWLY_INTERVAL));
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
- }
- }
- break;
-
- case LED_CTL_STOP_WPS: /* WPS connect success */
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
- break;
-
- case LED_CTL_STOP_WPS_FAIL: /* WPS authentication fail */
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
-
- /* LED1 settings */
- if (pLed1->bLedWPSBlinkInProgress)
- del_timer_sync(&pLed1->BlinkTimer);
- else
- pLed1->bLedWPSBlinkInProgress = true;
-
- pLed1->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed1->bLedOn)
- pLed1->BlinkingLedState = RTW_LED_OFF;
- else
- pLed1->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
-
- break;
-
- case LED_CTL_STOP_WPS_FAIL_OVERLAP: /* WPS session overlap */
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
-
- /* LED1 settings */
- if (pLed1->bLedWPSBlinkInProgress)
- del_timer_sync(&pLed1->BlinkTimer);
- else
- pLed1->bLedWPSBlinkInProgress = true;
-
- pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
- pLed1->BlinkTimes = 10;
- if (pLed1->bLedOn)
- pLed1->BlinkingLedState = RTW_LED_OFF;
- else
- pLed1->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_NORMAL_INTERVAL));
-
- break;
-
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedNoLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedStartToLinkBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedStartToLinkBlinkInProgress = false;
- }
-
- if (pLed1->bLedWPSBlinkInProgress) {
- del_timer_sync(&pLed1->BlinkTimer);
- pLed1->bLedWPSBlinkInProgress = false;
- }
-
- pLed1->BlinkingLedState = LED_UNKNOWN;
- SwLedOff23a(padapter, pLed);
- SwLedOff23a(padapter, pLed1);
- break;
-
- default:
- break;
-
- }
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
-}
-
- /* Sercomm-Belkin, added by chiyoko, 20090415 */
-static void
-SwLedControlMode5(struct rtw_adapter *padapter, enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct led_8723a *pLed = &ledpriv->SwLed0;
-
- switch (LedAction)
- {
- case LED_CTL_POWER_ON:
- case LED_CTL_NO_LINK:
- case LED_CTL_LINK: /* solid blue */
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
-
- mod_timer(&pLed->BlinkTimer, jiffies);
- break;
-
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic &&
- check_fwstate(pmlmepriv, _FW_LINKED))
- ;
- else if (pLed->bLedScanBlinkInProgress == false)
- {
- if (pLed->bLedBlinkInProgress == true)
- {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (pLed->bLedBlinkInProgress == false) {
- if (pLed->CurrLedState == LED_BLINK_SCAN) {
- return;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed->BlinkTimer, jiffies +
- msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
- }
- break;
-
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedBlinkInProgress) {
- del_timer_sync(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
-
- SwLedOff23a(padapter, pLed);
- break;
-
- default:
- break;
-
- }
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
-}
-
- /* WNC-Corega, added by chiyoko, 20090902 */
-static void SwLedControlMode6(struct rtw_adapter *padapter,
- enum led_ctl_mode LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct led_8723a *pLed0 = &ledpriv->SwLed0;
-
- switch (LedAction) {
- case LED_CTL_POWER_ON:
- case LED_CTL_LINK:
- case LED_CTL_NO_LINK:
- del_timer_sync(&pLed0->BlinkTimer);
- pLed0->CurrLedState = RTW_LED_ON;
- pLed0->BlinkingLedState = RTW_LED_ON;
- mod_timer(&pLed0->BlinkTimer, jiffies);
- break;
- case LED_CTL_POWER_OFF:
- SwLedOff23a(padapter, pLed0);
- break;
- default:
- break;
- }
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("ledcontrol 6 Led %d\n", pLed0->CurrLedState));
-}
-
-/* */
-/* Description: */
-/* Handler function of LED Blinking. */
-/* We dispatch acture LED blink action according to LedStrategy. */
-/* */
-void BlinkHandler23a(struct led_8723a *pLed)
-{
- struct rtw_adapter *padapter = pLed->padapter;
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- /* DBG_8723A("%s (%s:%d)\n", __func__, current->comm, current->pid); */
-
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- return;
-
- switch (ledpriv->LedStrategy) {
- case SW_LED_MODE0:
- SwLedBlink(pLed);
- break;
- case SW_LED_MODE1:
- SwLedBlink1(pLed);
- break;
- case SW_LED_MODE2:
- SwLedBlink2(pLed);
- break;
- case SW_LED_MODE3:
- SwLedBlink3(pLed);
- break;
- case SW_LED_MODE4:
- SwLedBlink4(pLed);
- break;
- case SW_LED_MODE5:
- SwLedBlink5(pLed);
- break;
- case SW_LED_MODE6:
- SwLedBlink6(pLed);
- break;
- default:
- break;
- }
-}
-
-void
-LedControl871x23a(struct rtw_adapter *padapter, enum led_ctl_mode LedAction) {
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- if ((padapter->bSurpriseRemoved == true) ||
- (padapter->bDriverStopped == true) ||
- (padapter->hw_init_completed == false)) {
- return;
- }
-
- if (ledpriv->bRegUseLed == false)
- return;
-
- /* if (!priv->up) */
- /* return; */
-
- /* if (priv->bInHctTest) */
- /* return; */
-
- if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
- padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
- (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
- LedAction == LED_CTL_SITE_SURVEY ||
- LedAction == LED_CTL_LINK ||
- LedAction == LED_CTL_NO_LINK ||
- LedAction == LED_CTL_POWER_ON)) {
- return;
- }
-
- switch (ledpriv->LedStrategy) {
- case SW_LED_MODE0:
- break;
- case SW_LED_MODE1:
- SwLedControlMode1(padapter, LedAction);
- break;
- case SW_LED_MODE2:
- SwLedControlMode2(padapter, LedAction);
- break;
- case SW_LED_MODE3:
- SwLedControlMode3(padapter, LedAction);
- break;
- case SW_LED_MODE4:
- SwLedControlMode4(padapter, LedAction);
- break;
- case SW_LED_MODE5:
- SwLedControlMode5(padapter, LedAction);
- break;
- case SW_LED_MODE6:
- SwLedControlMode6(padapter, LedAction);
- break;
- default:
- break;
- }
-
- RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("LedStrategy:%d, LedAction %d\n", ledpriv->LedStrategy, LedAction));
-}
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme.c b/drivers/staging/rtl8723au/core/rtw_mlme.c
index 1f6006439bbb..7299ef0a2e54 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme.c
@@ -50,7 +50,6 @@ static void rtw_init_mlme_timer(struct rtw_adapter *padapter)
int rtw_init_mlme_priv23a(struct rtw_adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- int res = _SUCCESS;
pmlmepriv->nic_hdl = padapter;
@@ -68,7 +67,7 @@ int rtw_init_mlme_priv23a(struct rtw_adapter *padapter)
rtw_clear_scan_deny(padapter);
rtw_init_mlme_timer(padapter);
- return res;
+ return _SUCCESS;
}
#ifdef CONFIG_8723AU_AP_MODE
@@ -110,7 +109,6 @@ struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv, gfp_t gfp)
pnetwork->network_type = 0;
pnetwork->fixed = false;
pnetwork->last_scanned = jiffies;
- pnetwork->aid = 0;
pnetwork->join_res = 0;
}
@@ -218,8 +216,6 @@ void rtw_generate_random_ibss23a(u8 *pibss)
pibss[3] = curtime & 0xff;/* p[0]; */
pibss[4] = (curtime >> 8) & 0xff;/* p[1]; */
pibss[5] = (curtime >> 16) & 0xff;/* p[2]; */
-
- return;
}
void rtw_set_roaming(struct rtw_adapter *adapter, u8 to_roaming)
@@ -356,12 +352,12 @@ rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue)
void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
struct rtw_adapter *padapter, bool update_ie)
{
- u8 ss_ori = dst->PhyInfo.SignalStrength;
- u8 sq_ori = dst->PhyInfo.SignalQuality;
+ u8 ss_ori = dst->SignalStrength;
+ u8 sq_ori = dst->SignalQuality;
long rssi_ori = dst->Rssi;
- u8 ss_smp = src->PhyInfo.SignalStrength;
- u8 sq_smp = src->PhyInfo.SignalQuality;
+ u8 ss_smp = src->SignalStrength;
+ u8 sq_smp = src->SignalQuality;
long rssi_smp = src->Rssi;
u8 ss_final;
@@ -389,16 +385,16 @@ void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
rssi_final = rssi_ori;
} else {
if (sq_smp != 101) { /* from the right channel */
- ss_final = ((u32)src->PhyInfo.SignalStrength +
- (u32)dst->PhyInfo.SignalStrength * 4) / 5;
- sq_final = ((u32)src->PhyInfo.SignalQuality +
- (u32)dst->PhyInfo.SignalQuality * 4) / 5;
+ ss_final = ((u32)src->SignalStrength +
+ (u32)dst->SignalStrength * 4) / 5;
+ sq_final = ((u32)src->SignalQuality +
+ (u32)dst->SignalQuality * 4) / 5;
rssi_final = src->Rssi+dst->Rssi * 4 / 5;
} else {
/* bss info not receiving from the right channel, use
the original RX signal infos */
- ss_final = dst->PhyInfo.SignalStrength;
- sq_final = dst->PhyInfo.SignalQuality;
+ ss_final = dst->SignalStrength;
+ sq_final = dst->SignalQuality;
rssi_final = dst->Rssi;
}
@@ -407,14 +403,13 @@ void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
if (update_ie)
memcpy(dst, src, get_wlan_bssid_ex_sz(src));
- dst->PhyInfo.SignalStrength = ss_final;
- dst->PhyInfo.SignalQuality = sq_final;
+ dst->SignalStrength = ss_final;
+ dst->SignalQuality = sq_final;
dst->Rssi = rssi_final;
DBG_8723A("%s %s(%pM), SignalStrength:%u, SignalQuality:%u, "
"RawRSSI:%ld\n", __func__, dst->Ssid.ssid, dst->MacAddress,
- dst->PhyInfo.SignalStrength,
- dst->PhyInfo.SignalQuality, dst->Rssi);
+ dst->SignalStrength, dst->SignalQuality, dst->Rssi);
}
static void update_current_network(struct rtw_adapter *adapter,
@@ -487,12 +482,11 @@ static void rtw_update_scanned_network(struct rtw_adapter *adapter,
pnetwork->last_scanned = jiffies;
pnetwork->network_type = 0;
- pnetwork->aid = 0;
pnetwork->join_res = 0;
/* bss info not receiving from the right channel */
- if (pnetwork->network.PhyInfo.SignalQuality == 101)
- pnetwork->network.PhyInfo.SignalQuality = 0;
+ if (pnetwork->network.SignalQuality == 101)
+ pnetwork->network.SignalQuality = 0;
} else {
/*
* we have an entry and we are going to update it. But
@@ -579,8 +573,6 @@ void rtw_atimdone_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf)
{
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
("receive atimdone_evet\n"));
-
- return;
}
void rtw_survey_event_cb23a(struct rtw_adapter *adapter, const u8 *pbuf)
@@ -650,8 +642,6 @@ exit:
kfree(survey->bss);
survey->bss = NULL;
-
- return;
}
void
@@ -825,8 +815,6 @@ void rtw_indicate_connect23a(struct rtw_adapter *padapter)
if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
set_fwstate(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_LINK);
-
rtw_cfg80211_indicate_connect(padapter);
netif_carrier_on(padapter->pnetdev);
@@ -871,10 +859,7 @@ void rtw_indicate_disconnect23a(struct rtw_adapter *padapter)
_clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
rtw_clear_scan_deny(padapter);
-
}
rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_DISCONNECT, 1);
@@ -1028,22 +1013,18 @@ rtw_joinbss_update_network23a(struct rtw_adapter *padapter,
cur_network->network.beacon_interval =
ptarget_wlan->network.beacon_interval;
cur_network->network.tsf = ptarget_wlan->network.tsf;
- cur_network->aid = pnetwork->join_res;
rtw_set_signal_stat_timer(&padapter->recvpriv);
padapter->recvpriv.signal_strength =
- ptarget_wlan->network.PhyInfo.SignalStrength;
- padapter->recvpriv.signal_qual =
- ptarget_wlan->network.PhyInfo.SignalQuality;
+ ptarget_wlan->network.SignalStrength;
+ padapter->recvpriv.signal_qual = ptarget_wlan->network.SignalQuality;
/*
* the ptarget_wlan->network.Rssi is raw data, we use
- * ptarget_wlan->network.PhyInfo.SignalStrength instead (has scaled)
+ * ptarget_wlan->network.SignalStrength instead (has scaled)
*/
- padapter->recvpriv.rssi = translate_percentage_to_dbm(
- ptarget_wlan->network.PhyInfo.SignalStrength);
- DBG_8723A("%s signal_strength:%3u, rssi:%3d, signal_qual:%3u\n",
+ DBG_8723A("%s signal_strength:%3u, signal_qual:%3u\n",
__func__, padapter->recvpriv.signal_strength,
- padapter->recvpriv.rssi, padapter->recvpriv.signal_qual);
+ padapter->recvpriv.signal_qual);
rtw_set_signal_stat_timer(&padapter->recvpriv);
/* update fw_state will clr _FW_UNDER_LINKING here indirectly */
@@ -1132,7 +1113,7 @@ void rtw_joinbss_event_prehandle23a(struct rtw_adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
/* s1. find ptarget_wlan */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (the_same_macaddr == true) {
+ if (the_same_macaddr) {
ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
} else {
pcur_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
@@ -1515,18 +1496,21 @@ out:
inline bool rtw_is_scan_deny(struct rtw_adapter *adapter)
{
struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+
return (atomic_read(&mlmepriv->set_scan_deny) != 0) ? true : false;
}
void rtw_clear_scan_deny(struct rtw_adapter *adapter)
{
struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+
atomic_set(&mlmepriv->set_scan_deny, 0);
}
void rtw_set_scan_deny_timer_hdl(unsigned long data)
{
struct rtw_adapter *adapter = (struct rtw_adapter *)data;
+
rtw_clear_scan_deny(adapter);
}
@@ -1540,7 +1524,8 @@ void rtw_set_scan_deny(struct rtw_adapter *adapter, u32 ms)
}
#if defined(IEEE80211_SCAN_RESULT_EXPIRE)
-#define RTW_SCAN_RESULT_EXPIRE IEEE80211_SCAN_RESULT_EXPIRE/HZ*1000 -1000 /* 3000 -1000 */
+#define RTW_SCAN_RESULT_EXPIRE \
+ ((IEEE80211_SCAN_RESULT_EXPIRE / (HZ*1000)) - 1000) /* 3000 -1000 */
#else
#define RTW_SCAN_RESULT_EXPIRE 2000
#endif
@@ -1766,7 +1751,7 @@ exit:
return ret;
}
-int rtw_set_auth23a(struct rtw_adapter * adapter,
+int rtw_set_auth23a(struct rtw_adapter *adapter,
struct security_priv *psecuritypriv)
{
struct cmd_obj *pcmd;
@@ -2151,6 +2136,7 @@ bool rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie,
if (p && p[1] > 0) {
u32 rx_packet_offset, max_recvbuf_sz;
+
if (pmlmepriv->qos_option == 0) {
out_len = *pout_len;
pframe = rtw_set_ie23a(out_ie + out_len,
@@ -2165,9 +2151,9 @@ bool rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie,
memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap));
- ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ ht_capie.cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_DSSSCCK40;
+ IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_DSSSCCK40);
GetHalDefVar8192CUsb(padapter, HAL_DEF_RX_PACKET_OFFSET,
&rx_packet_offset);
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
index 3eb77de17e3a..0e0f73c86e53 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -331,6 +331,7 @@ rtw_update_TSF(struct mlme_ext_priv *pmlmeext, struct ieee80211_mgmt *mgmt)
int rtw_ch_set_search_ch23a(struct rt_channel_info *ch_set, const u32 ch)
{
int i;
+
for (i = 0; ch_set[i]. ChannelNum != 0; i++) {
if (ch == ch_set[i].ChannelNum)
break;
@@ -566,7 +567,6 @@ static u8 init_channel_set(struct rtw_adapter *padapter, u8 cplan,
int init_mlme_ext_priv23a(struct rtw_adapter *padapter)
{
- int res = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -593,7 +593,7 @@ int init_mlme_ext_priv23a(struct rtw_adapter *padapter)
pmlmeext->mlmeext_init = true;
pmlmeext->active_keep_alive_check = true;
- return res;
+ return _SUCCESS;
}
void free_mlme_ext_priv23a (struct mlme_ext_priv *pmlmeext)
@@ -680,8 +680,7 @@ void mgt_dispatcher23a(struct rtw_adapter *padapter,
}
#ifdef CONFIG_8723AU_AP_MODE
- switch (stype)
- {
+ switch (stype) {
case IEEE80211_STYPE_AUTH:
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
ptable->func = &OnAuth23a;
@@ -1572,6 +1571,7 @@ OnAssocReq23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
pstat->uapsd_bk = 0;
if (pmlmepriv->qos_option) {
const u8 *end = pos + left;
+
p = pos;
for (;;) {
@@ -2335,12 +2335,9 @@ static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
__func__, hidden_ssid_mode, ssid_ie, ssid_len_ori); */
if (ssid_ie && ssid_len_ori > 0) {
- switch (hidden_ssid_mode)
- {
+ switch (hidden_ssid_mode) {
case 1:
next_ie = ssid_ie + 2 + ssid_len_ori;
- remain_len = 0;
-
remain_len = ies_len -(next_ie-ies);
ssid_ie[1] = 0;
@@ -2599,7 +2596,9 @@ static void issue_probersp(struct rtw_adapter *padapter, unsigned char *da,
if (ssid_ie && cur_network->Ssid.ssid_len) {
uint remainder_ielen;
u8 *remainder_ie;
+
remainder_ie = ssid_ie + 2;
+
remainder_ielen = pframe - remainder_ie;
DBG_8723A_LEVEL(_drv_warning_, "%s(%s): "
@@ -2862,6 +2861,7 @@ static void issue_auth(struct rtw_adapter *padapter, struct sta_info *psta,
if (psta) { /* for AP mode */
#ifdef CONFIG_8723AU_AP_MODE
unsigned short val16;
+
ether_addr_copy(mgmt->da, psta->hwaddr);
ether_addr_copy(mgmt->sa, myid(&padapter->eeprompriv));
ether_addr_copy(mgmt->bssid, myid(&padapter->eeprompriv));
@@ -3306,6 +3306,7 @@ static void issue_assocreq(struct rtw_adapter *padapter)
!memcmp(p + 2, WMM_OUI23A, 4) ||
!memcmp(p + 2, WPS_OUI23A, 4)) {
u8 plen = p[1];
+
if (!padapter->registrypriv.wifi_spec) {
/* Commented by Kurt 20110629 */
/* In some older APs, WPS handshake */
@@ -3997,7 +3998,7 @@ int send_beacon23a(struct rtw_adapter *padapter)
yield();
bxmitok = rtl8723a_get_bcn_valid(padapter);
poll++;
- } while ((poll % 10) != 0 && bxmitok == false &&
+ } while ((poll % 10) != 0 && !bxmitok &&
!padapter->bSurpriseRemoved &&
!padapter->bDriverStopped);
@@ -4070,6 +4071,7 @@ static void rtw_site_survey(struct rtw_adapter *padapter)
if (ScanType == SCAN_ACTIVE) /* obey the channel plan setting... */
{
int i;
+
for (i = 0;i<RTW_SSID_SCAN_AMOUNT;i++) {
if (pmlmeext->sitesurvey_res.ssid[i].ssid_len) {
/* todo: to issue two probe req??? */
@@ -4197,9 +4199,9 @@ static struct wlan_bssid_ex *collect_bss_info(struct rtw_adapter *padapter,
/* get the signal strength */
/* in dBM.raw data */
bssid->Rssi = precv_frame->attrib.phy_info.RecvSignalPower;
- bssid->PhyInfo.SignalQuality =
+ bssid->SignalQuality =
precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
- bssid->PhyInfo.SignalStrength =
+ bssid->SignalStrength =
precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
/* checking SSID */
@@ -4293,6 +4295,7 @@ static struct wlan_bssid_ex *collect_bss_info(struct rtw_adapter *padapter,
bssid->IELength);
if (p && p[1] > 0) {
struct ieee80211_ht_cap *pHT_caps;
+
pHT_caps = (struct ieee80211_ht_cap *)(p + 2);
if (pHT_caps->cap_info &
@@ -4305,7 +4308,7 @@ static struct wlan_bssid_ex *collect_bss_info(struct rtw_adapter *padapter,
/* mark bss info receiving from nearby channel as SignalQuality 101 */
if (bssid->DSConfig != rtw_get_oper_ch23a(padapter))
- bssid->PhyInfo.SignalQuality = 101;
+ bssid->SignalQuality = 101;
return bssid;
fail:
@@ -4319,6 +4322,7 @@ static void start_create_ibss(struct rtw_adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
+
pmlmeext->cur_channel = (u8)pnetwork->DSConfig;
pmlmeinfo->bcn_interval = pnetwork->beacon_interval;
@@ -4354,9 +4358,7 @@ static void start_create_ibss(struct rtw_adapter *padapter)
report_join_res23a(padapter, 1);
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
}
- }
- else
- {
+ } else {
DBG_8723A("%s: invalid cap:%x\n", __func__, caps);
return;
}
@@ -4414,9 +4416,7 @@ static void start_clnt_join(struct rtw_adapter *padapter)
pmlmeinfo->state = MSR_ADHOC;
report_join_res23a(padapter, 1);
- }
- else
- {
+ } else {
/* DBG_8723A("marc: invalid cap:%x\n", caps); */
return;
}
@@ -4480,16 +4480,12 @@ int receive_disconnect23a(struct rtw_adapter *padapter,
DBG_8723A("%s\n", __func__);
- if ((pmlmeinfo->state&0x03) == MSR_INFRA)
- {
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
- {
+ if ((pmlmeinfo->state&0x03) == MSR_INFRA) {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
pmlmeinfo->state = MSR_NOLINK;
report_del_sta_event23a(padapter, MacAddr, reason);
- }
- else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE)
- {
+ } else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE) {
pmlmeinfo->state = MSR_NOLINK;
report_join_res23a(padapter, -2);
}
@@ -4866,7 +4862,7 @@ void report_join_res23a(struct rtw_adapter *padapter, int res)
pjoinbss_evt = (struct joinbss_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
memcpy((unsigned char *)&pjoinbss_evt->network.network,
&pmlmeinfo->network, sizeof(struct wlan_bssid_ex));
- pjoinbss_evt->network.join_res = pjoinbss_evt->network.aid = res;
+ pjoinbss_evt->network.join_res = res;
DBG_8723A("report_join_res23a(%d)\n", res);
@@ -4995,8 +4991,7 @@ void update_sta_info23a(struct rtw_adapter *padapter, struct sta_info *psta)
VCS_update23a(padapter, psta);
/* HT */
- if (pmlmepriv->htpriv.ht_option)
- {
+ if (pmlmepriv->htpriv.ht_option) {
psta->htpriv.ht_option = true;
psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
@@ -5006,9 +5001,7 @@ void update_sta_info23a(struct rtw_adapter *padapter, struct sta_info *psta)
psta->qos_option = true;
- }
- else
- {
+ } else {
psta->htpriv.ht_option = false;
psta->htpriv.ampdu_enable = false;
@@ -5050,12 +5043,10 @@ void mlmeext_joinbss_event_callback23a(struct rtw_adapter *padapter,
goto exit_mlmeext_joinbss_event_callback23a;
}
- if ((pmlmeinfo->state&0x03) == MSR_ADHOC)
- {
+ if ((pmlmeinfo->state&0x03) == MSR_ADHOC) {
/* for bc/mc */
psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
- if (psta_bmc)
- {
+ if (psta_bmc) {
pmlmeinfo->FW_sta_info[psta_bmc->mac_id].psta = psta_bmc;
update_bmc_sta_support_rate23a(padapter, psta_bmc->mac_id);
Update_RA_Entry23a(padapter, psta_bmc);
@@ -5086,8 +5077,7 @@ void mlmeext_joinbss_event_callback23a(struct rtw_adapter *padapter,
set_channel_bwmode23a(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
psta = rtw_get_stainfo23a(pstapriv, cur_network->MacAddress);
- if (psta) /* only for infra. mode */
- {
+ if (psta) { /* only for infra. mode */
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
/* DBG_8723A("set_sta_rate23a\n"); */
@@ -5123,8 +5113,7 @@ void mlmeext_sta_add_event_callback23a(struct rtw_adapter *padapter,
if ((pmlmeinfo->state & 0x03) == MSR_ADHOC) {
/* adhoc master or sta_count>1 */
- if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
- {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
/* nothing to do */
} else { /* adhoc client */
/* correcting TSF */
diff --git a/drivers/staging/rtl8723au/core/rtw_pwrctrl.c b/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
index 1b2af7381d82..e2d51afe522c 100644
--- a/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723au/core/rtw_pwrctrl.c
@@ -159,7 +159,7 @@ void rtw_ps_processor23a(struct rtw_adapter *padapter)
if (pwrpriv->ips_mode_req == IPS_NONE)
goto exit;
- if (rtw_pwr_unassociated_idle(padapter) == false)
+ if (!rtw_pwr_unassociated_idle(padapter))
goto exit;
if (pwrpriv->rf_pwrstate == rf_on &&
@@ -172,12 +172,12 @@ void rtw_ps_processor23a(struct rtw_adapter *padapter)
exit:
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
pwrpriv->ps_processing = false;
- return;
}
static void pwr_state_check_handler(unsigned long data)
{
struct rtw_adapter *padapter = (struct rtw_adapter *)data;
+
rtw_ps_cmd23a(padapter);
}
@@ -338,8 +338,7 @@ s32 LPS_RF_ON_check23a(struct rtw_adapter *padapter, u32 delay_ms)
start_time = jiffies;
end_time = start_time + msecs_to_jiffies(delay_ms);
- while (1)
- {
+ while (1) {
bAwake = rtl8723a_get_fwlps_rf_on(padapter);
if (bAwake == true)
break;
@@ -470,6 +469,7 @@ void rtw_free_pwrctrl_priv(struct rtw_adapter *adapter)
inline void rtw_set_ips_deny23a(struct rtw_adapter *padapter, u32 ms)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
pwrpriv->ips_deny_time = jiffies + msecs_to_jiffies(ms);
}
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 5bc7734d9a72..559dddee2648 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -215,6 +215,7 @@ u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter)
{
u32 cnt = 0;
struct recv_frame *pending_frame;
+
while ((pending_frame = rtw_alloc_recvframe23a(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe23a(pending_frame);
DBG_8723A("%s: dequeue uc_swdec_pending_queue\n", __func__);
@@ -239,6 +240,7 @@ int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *
int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue)
{
unsigned long irqL;
+
spin_lock_irqsave(&queue->lock, irqL);
list_del_init(&precvbuf->list);
@@ -364,6 +366,7 @@ int recvframe_chkmic(struct rtw_adapter *adapter,
if (bmic_err == true) {
int i;
+
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("\n *(pframemic-8)-*(pframemic-1) ="
"0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:"
@@ -483,6 +486,7 @@ struct recv_frame *decryptor(struct rtw_adapter *padapter,
if (prxattrib->encrypt > 0) {
u8 *iv = precv_frame->pkt->data + prxattrib->hdrlen;
+
prxattrib->key_index = (((iv[3]) >> 6) & 0x3);
if (prxattrib->key_index > WEP_KEYS) {
@@ -564,59 +568,27 @@ static struct recv_frame *portctrl(struct rtw_adapter *adapter,
("########portctrl:adapter->securitypriv.dot11AuthAlgrthm ="
"%d\n", adapter->securitypriv.dot11AuthAlgrthm));
+ prtnframe = precv_frame;
+
if (auth_alg == dot11AuthAlgrthm_8021X) {
/* get ether_type */
ptr = pfhdr->pkt->data + pfhdr->attrib.hdrlen;
ether_type = (ptr[6] << 8) | ptr[7];
- if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ if (psta && psta->ieee8021x_blocked) {
/* blocked */
/* only accept EAPOL frame */
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
("########portctrl:psta->ieee8021x_blocked =="
"1\n"));
- if (ether_type == eapol_type) {
- prtnframe = precv_frame;
- } else {
+ if (ether_type != eapol_type) {
/* free this frame */
rtw_free_recvframe23a(precv_frame);
prtnframe = NULL;
}
- } else {
- /* allowed */
- /* check decryption status, and decrypt the frame if needed */
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("########portctrl:psta->ieee8021x_blocked =="
- "0\n"));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("portctrl:precv_frame->hdr.attrib.privacy ="
- "%x\n", precv_frame->attrib.privacy));
-
- if (pattrib->bdecrypted == 0) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("portctrl:prxstat->decrypted =%x\n",
- pattrib->bdecrypted));
- }
-
- prtnframe = precv_frame;
- /* check is the EAPOL frame or not (Rekey) */
- if (ether_type == eapol_type) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
- ("########portctrl:ether_type == "
- "0x888e\n"));
- /* check Rekey */
-
- prtnframe = precv_frame;
- } else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
- ("########portctrl:ether_type = 0x%04x"
- "\n", ether_type));
- }
}
- } else {
- prtnframe = precv_frame;
}
return prtnframe;
@@ -1066,6 +1038,7 @@ int sta2ap_data_frame(struct rtw_adapter *adapter,
}
} else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
+
if (!ether_addr_equal(pattrib->ra, myhwaddr)) {
ret = RTW_RX_HANDLED;
goto exit;
@@ -1405,8 +1378,7 @@ static int validate_recv_data_frame(struct rtw_adapter *adapter,
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
("\n pattrib->encrypt =%d\n", pattrib->encrypt));
- switch (pattrib->encrypt)
- {
+ switch (pattrib->encrypt) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
pattrib->iv_len = IEEE80211_WEP_IV_LEN;
@@ -1505,8 +1477,7 @@ static int validate_recv_frame(struct rtw_adapter *adapter,
if (unlikely(bDumpRxPkt == 1))
dump_rx_pkt(skb, type, bDumpRxPkt);
- switch (type)
- {
+ switch (type) {
case IEEE80211_FTYPE_MGMT:
retval = validate_recv_mgnt_frame(adapter, precv_frame);
if (retval == _FAIL) {
@@ -1524,7 +1495,6 @@ static int validate_recv_frame(struct rtw_adapter *adapter,
retval = _FAIL; /* only data frame return _SUCCESS */
break;
case IEEE80211_FTYPE_DATA:
- rtw_led_control(adapter, LED_CTL_RX);
pattrib->qos = (subtype & IEEE80211_STYPE_QOS_DATA) ? 1 : 0;
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
@@ -1551,8 +1521,6 @@ static int wlanhdr_to_ethhdr (struct recv_frame *precvframe)
u16 eth_type, len, hdrlen;
u8 bsnaphdr;
u8 *psnap;
-
- int ret = _SUCCESS;
struct rtw_adapter *adapter = precvframe->adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -1613,7 +1581,7 @@ static int wlanhdr_to_ethhdr (struct recv_frame *precvframe)
}
- return ret;
+ return _SUCCESS;
}
/* perform defrag */
@@ -1691,7 +1659,7 @@ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
skb_put(skb, pnfhdr->pkt->len);
prframe->attrib.icv_len = pnfhdr->attrib.icv_len;
- };
+ }
/* free the defrag_q queue and return the prframe */
rtw_free_recvframe23a_queue(defrag_q);
@@ -2177,8 +2145,7 @@ int process_recv_indicatepkts(struct rtw_adapter *padapter,
return retval;
}
}
- } else /* B/G mode */
- {
+ } else { /* B/G mode */
retval = wlanhdr_to_ethhdr(prframe);
if (retval != _SUCCESS) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
@@ -2238,8 +2205,6 @@ static int recv_func_posthandle(struct rtw_adapter *padapter,
struct recv_priv *precvpriv = &padapter->recvpriv;
/* DATA FRAME */
- rtw_led_control(padapter, LED_CTL_RX);
-
prframe = decryptor(padapter, prframe);
if (prframe == NULL) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
@@ -2349,66 +2314,52 @@ void rtw_signal_stat_timer_hdl23a(unsigned long data)
u8 _alpha = 3; /* this value is based on converging_constant = 5000 */
/* and sampling_interval = 1000 */
- if (adapter->recvpriv.is_signal_dbg) {
- /* update the user specific value, signal_strength_dbg, */
- /* to signal_strength, rssi */
- adapter->recvpriv.signal_strength =
- adapter->recvpriv.signal_strength_dbg;
- adapter->recvpriv.rssi =
- (s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
- } else {
- if (recvpriv->signal_strength_data.update_req == 0) {
- /* update_req is clear, means we got rx */
- avg_signal_strength =
- recvpriv->signal_strength_data.avg_val;
- num_signal_strength =
- recvpriv->signal_strength_data.total_num;
- /* after avg_vals are acquired, we can re-stat */
- /* the signal values */
- recvpriv->signal_strength_data.update_req = 1;
- }
+ if (recvpriv->signal_strength_data.update_req == 0) {
+ /* update_req is clear, means we got rx */
+ avg_signal_strength = recvpriv->signal_strength_data.avg_val;
+ num_signal_strength = recvpriv->signal_strength_data.total_num;
+ /* after avg_vals are acquired, we can re-stat */
+ /* the signal values */
+ recvpriv->signal_strength_data.update_req = 1;
+ }
+
+ if (recvpriv->signal_qual_data.update_req == 0) {
+ /* update_req is clear, means we got rx */
+ avg_signal_qual = recvpriv->signal_qual_data.avg_val;
+ num_signal_qual = recvpriv->signal_qual_data.total_num;
+ /* after avg_vals are acquired, we can re-stat */
+ /*the signal values */
+ recvpriv->signal_qual_data.update_req = 1;
+ }
+
+ /* update value of signal_strength, rssi, signal_qual */
+ if (!check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY)) {
+ tmp_s = (avg_signal_strength + (_alpha - 1) *
+ recvpriv->signal_strength);
+ if (tmp_s %_alpha)
+ tmp_s = tmp_s / _alpha + 1;
+ else
+ tmp_s = tmp_s / _alpha;
+ if (tmp_s > 100)
+ tmp_s = 100;
- if (recvpriv->signal_qual_data.update_req == 0) {
- /* update_req is clear, means we got rx */
- avg_signal_qual = recvpriv->signal_qual_data.avg_val;
- num_signal_qual = recvpriv->signal_qual_data.total_num;
- /* after avg_vals are acquired, we can re-stat */
- /*the signal values */
- recvpriv->signal_qual_data.update_req = 1;
- }
+ tmp_q = avg_signal_qual + (_alpha - 1) * recvpriv->signal_qual;
+ if (tmp_q %_alpha)
+ tmp_q = tmp_q / _alpha + 1;
+ else
+ tmp_q = tmp_q / _alpha;
+ if (tmp_q > 100)
+ tmp_q = 100;
- /* update value of signal_strength, rssi, signal_qual */
- if (!check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY)) {
- tmp_s = (avg_signal_strength + (_alpha - 1) *
- recvpriv->signal_strength);
- if (tmp_s %_alpha)
- tmp_s = tmp_s / _alpha + 1;
- else
- tmp_s = tmp_s / _alpha;
- if (tmp_s > 100)
- tmp_s = 100;
-
- tmp_q = (avg_signal_qual + (_alpha - 1) *
- recvpriv->signal_qual);
- if (tmp_q %_alpha)
- tmp_q = tmp_q / _alpha + 1;
- else
- tmp_q = tmp_q / _alpha;
- if (tmp_q > 100)
- tmp_q = 100;
-
- recvpriv->signal_strength = tmp_s;
- recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
- recvpriv->signal_qual = tmp_q;
-
- DBG_8723A("%s signal_strength:%3u, rssi:%3d, "
- "signal_qual:%3u, num_signal_strength:%u, "
- "num_signal_qual:%u\n",
- __func__, recvpriv->signal_strength,
- recvpriv->rssi, recvpriv->signal_qual,
- num_signal_strength, num_signal_qual
- );
- }
+ recvpriv->signal_strength = tmp_s;
+ recvpriv->signal_qual = tmp_q;
+
+ DBG_8723A("%s signal_strength:%3u, signal_qual:%3u, "
+ "num_signal_strength:%u, num_signal_qual:%u\n",
+ __func__, recvpriv->signal_strength,
+ recvpriv->signal_qual, num_signal_strength,
+ num_signal_qual);
}
+
rtw_set_signal_stat_timer(recvpriv);
}
diff --git a/drivers/staging/rtl8723au/core/rtw_security.c b/drivers/staging/rtl8723au/core/rtw_security.c
index 76371ae69377..715a47414bdd 100644
--- a/drivers/staging/rtl8723au/core/rtw_security.c
+++ b/drivers/staging/rtl8723au/core/rtw_security.c
@@ -23,19 +23,18 @@
#define CRC32_POLY 0x04c11db7
-struct arc4context
-{
+struct arc4context {
u32 x;
u32 y;
u8 state[256];
};
-static void arcfour_init(struct arc4context *parc4ctx, u8 * key, u32 key_len)
+static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
{
u32 t, u;
u32 keyindex;
u32 stateindex;
- u8 * state;
+ u8 *state;
u32 counter;
state = parc4ctx->state;
@@ -45,8 +44,7 @@ static void arcfour_init(struct arc4context *parc4ctx, u8 * key, u32 key_len)
state[counter] = (u8)counter;
keyindex = 0;
stateindex = 0;
- for (counter = 0; counter < 256; counter++)
- {
+ for (counter = 0; counter < 256; counter++) {
t = state[counter];
stateindex = (stateindex + key[keyindex] + t) & 0xff;
u = state[stateindex];
@@ -62,7 +60,7 @@ static u32 arcfour_byte( struct arc4context *parc4ctx)
u32 x;
u32 y;
u32 sx, sy;
- u8 * state;
+ u8 *state;
state = parc4ctx->state;
x = (parc4ctx->x + 1) & 0xff;
@@ -78,8 +76,8 @@ static u32 arcfour_byte( struct arc4context *parc4ctx)
}
static void arcfour_encrypt( struct arc4context *parc4ctx,
- u8 * dest,
- u8 * src,
+ u8 *dest,
+ u8 *src,
u32 len)
{
u32 i;
@@ -114,8 +112,7 @@ static void crc32_init(void)
c = 0x12340000;
- for (i = 0; i < 256; ++i)
- {
+ for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
@@ -221,7 +218,7 @@ void rtw_wep_decrypt23a(struct rtw_adapter *padapter,
u8 keyindex;
struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sk_buff * skb = precvframe->pkt;
+ struct sk_buff *skb = precvframe->pkt;
pframe = skb->data;
@@ -260,33 +257,29 @@ void rtw_wep_decrypt23a(struct rtw_adapter *padapter,
crc[1], payload[length - 3],
crc[0], payload[length - 4]));
}
-
- return;
}
/* 3 ===== TKIP related ===== */
-static u32 secmicgetuint32(u8 * p)
+static u32 secmicgetuint32(u8 *p)
/* Convert from Byte[] to u32 in a portable way */
{
s32 i;
u32 res = 0;
- for (i = 0; i<4; i++)
- {
+ for (i = 0; i<4; i++) {
res |= ((u32)(*p++)) << (8*i);
}
return res;
}
-static void secmicputuint32(u8 * p, u32 val)
+static void secmicputuint32(u8 *p, u32 val)
/* Convert from long to Byte[] in a portable way */
{
long i;
- for (i = 0; i<4; i++)
- {
+ for (i = 0; i<4; i++) {
*p++ = (u8) (val & 0xff);
val >>= 8;
}
@@ -304,7 +297,7 @@ static void secmicclear(struct mic_data *pmicdata)
}
-void rtw_secmicsetkey23a(struct mic_data *pmicdata, u8 * key)
+void rtw_secmicsetkey23a(struct mic_data *pmicdata, u8 *key)
{
/* Set the key */
@@ -322,8 +315,7 @@ void rtw_secmicappend23abyte23a(struct mic_data *pmicdata, u8 b)
pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
pmicdata->nBytesInM++;
/* Process the word if it is full. */
- if (pmicdata->nBytesInM >= 4)
- {
+ if (pmicdata->nBytesInM >= 4) {
pmicdata->L ^= pmicdata->M;
pmicdata->R ^= ROL32(pmicdata->L, 17);
pmicdata->L += pmicdata->R;
@@ -340,19 +332,18 @@ void rtw_secmicappend23abyte23a(struct mic_data *pmicdata, u8 b)
}
-void rtw_secmicappend23a(struct mic_data *pmicdata, u8 * src, u32 nbytes)
+void rtw_secmicappend23a(struct mic_data *pmicdata, u8 *src, u32 nbytes)
{
/* This is simple */
- while(nbytes > 0)
- {
+ while(nbytes > 0) {
rtw_secmicappend23abyte23a(pmicdata, *src++);
nbytes--;
}
}
-void rtw_secgetmic23a(struct mic_data *pmicdata, u8 * dst)
+void rtw_secgetmic23a(struct mic_data *pmicdata, u8 *dst)
{
/* Append the minimum padding */
@@ -362,8 +353,7 @@ void rtw_secgetmic23a(struct mic_data *pmicdata, u8 * dst)
rtw_secmicappend23abyte23a(pmicdata, 0);
rtw_secmicappend23abyte23a(pmicdata, 0);
/* and then zeroes until the length is a multiple of 4 */
- while(pmicdata->nBytesInM != 0)
- {
+ while(pmicdata->nBytesInM != 0) {
rtw_secmicappend23abyte23a(pmicdata, 0);
}
/* The appendByte function has already computed the result. */
@@ -374,7 +364,8 @@ void rtw_secgetmic23a(struct mic_data *pmicdata, u8 * dst)
}
-void rtw_seccalctkipmic23a(u8 * key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
+void rtw_seccalctkipmic23a(u8 *key, u8 *header, u8 *data, u32 data_len,
+ u8 *mic_code, u8 pri)
{
struct mic_data micdata;
@@ -531,8 +522,8 @@ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
/* Now compute an unbalanced Feistel cipher with 80-bit block */
/* size on the 80-bit block P1K[], using the 128-bit key TK[] */
- for (i = 0; i < PHASE1_LOOP_CNT ;i++)
- { /* Each add operation here is mod 2**16 */
+ for (i = 0; i < PHASE1_LOOP_CNT ;i++) {
+ /* Each add operation here is mod 2**16 */
p1k[0] += _S_(p1k[4] ^ TK16((i&1)+0));
p1k[1] += _S_(p1k[0] ^ TK16((i&1)+2));
p1k[2] += _S_(p1k[1] ^ TK16((i&1)+4));
@@ -602,8 +593,7 @@ static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
/* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
- for (i = 0;i<6;i++)
- {
+ for (i = 0;i<6;i++) {
rc4key[4+2*i] = Lo8(PPK[i]);
rc4key[5+2*i] = Hi8(PPK[i]);
}
@@ -649,8 +639,7 @@ int rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
if (stainfo!= NULL) {
- if (!(stainfo->state &_FW_LINKED))
- {
+ if (!(stainfo->state &_FW_LINKED)) {
DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
return _FAIL;
}
@@ -728,7 +717,7 @@ int rtw_tkip_decrypt23a(struct rtw_adapter *padapter,
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct sk_buff * skb = precvframe->pkt;
+ struct sk_buff *skb = precvframe->pkt;
int res = _SUCCESS;
pframe = skb->data;
@@ -887,8 +876,7 @@ static void byte_sub(u8 *in, u8 *out)
{
int i;
- for (i = 0; i< 16; i++)
- {
+ for (i = 0; i< 16; i++) {
out[i] = sbox(in[i]);
}
@@ -928,8 +916,7 @@ static void mix_column(u8 *in, u8 *out)
u8 temp[4];
u8 tempb[4];
- for (i = 0 ; i<4; i++)
- {
+ for (i = 0 ; i<4; i++) {
if ((in[i] & 0x80) == 0x80)
add1b[i] = 0x1b;
else
@@ -951,11 +938,9 @@ static void mix_column(u8 *in, u8 *out)
andf7[2] = in[2] & 0x7f;
andf7[3] = in[3] & 0x7f;
- for (i = 3; i>0; i--) /* logical shift left 1 bit */
- {
+ for (i = 3; i>0; i--) { /* logical shift left 1 bit */
andf7[i] = andf7[i] << 1;
- if ((andf7[i-1] & 0x80) == 0x80)
- {
+ if ((andf7[i-1] & 0x80) == 0x80) {
andf7[i] = (andf7[i] | 0x01);
}
}
@@ -988,21 +973,15 @@ static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
for (i = 0; i<16; i++) round_key[i] = key[i];
- for (round = 0; round < 11; round++)
- {
- if (round == 0)
- {
+ for (round = 0; round < 11; round++) {
+ if (round == 0) {
xor_128(round_key, data, ciphertext);
next_key(round_key, round);
- }
- else if (round == 10)
- {
+ } else if (round == 10) {
byte_sub(ciphertext, intermediatea);
shift_row(intermediatea, intermediateb);
xor_128(intermediateb, round_key, ciphertext);
- }
- else /* 1 - 9 */
- {
+ } else { /* 1 - 9 */
byte_sub(ciphertext, intermediatea);
shift_row(intermediatea, intermediateb);
mix_column(&intermediateb[0], &intermediatea[0]);
@@ -1088,20 +1067,17 @@ static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists,
mic_header2[6] = 0x00;
mic_header2[7] = 0x00; /* mpdu[23]; */
- if (!qc_exists && a4_exists)
- {
+ if (!qc_exists && a4_exists) {
for (i = 0;i<6;i++) mic_header2[8+i] = mpdu[24+i]; /* A4 */
}
- if (qc_exists && !a4_exists)
- {
+ if (qc_exists && !a4_exists) {
mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
mic_header2[9] = mpdu[25] & 0x00;
}
- if (qc_exists && a4_exists)
- {
+ if (qc_exists && a4_exists) {
for (i = 0;i<6;i++) mic_header2[8+i] = mpdu[24+i]; /* A4 */
mic_header2[14] = mpdu[30] & 0x0f;
diff --git a/drivers/staging/rtl8723au/core/rtw_sreset.c b/drivers/staging/rtl8723au/core/rtw_sreset.c
index 58ed980795a6..29a29d92a6ac 100644
--- a/drivers/staging/rtl8723au/core/rtw_sreset.c
+++ b/drivers/staging/rtl8723au/core/rtw_sreset.c
@@ -107,7 +107,7 @@ static void sreset_restore_network_station(struct rtw_adapter *padapter)
mlmeext_joinbss_event_callback23a(padapter, 1);
/* restore Sequence No. */
- rtl8723au_write8(padapter, 0x4dc, padapter->xmitpriv.nqos_ssn);
+ rtl8723au_write8(padapter, REG_NQOS_SEQ, padapter->xmitpriv.nqos_ssn);
sreset_restore_security_station(padapter);
}
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
index 09c44a55d4a6..69d9e0f17fd8 100644
--- a/drivers/staging/rtl8723au/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
@@ -608,8 +608,6 @@ void WMMOnAssocRsp23a(struct rtw_adapter *padapter)
DBG_8723A("wmm_para_seq(%d): %d\n", i,
pxmitpriv->wmm_para_seq[i]);
}
-
- return;
}
static void bwmode_update_check(struct rtw_adapter *padapter, const u8 *p)
@@ -750,7 +748,6 @@ void HT_caps_handler23a(struct rtw_adapter *padapter, const u8 *p)
else
cap->mcs.rx_mask[i] &= MCS_rate_2R23A[i];
}
- return;
}
void HT_info_handler23a(struct rtw_adapter *padapter, const u8 *p)
@@ -771,7 +768,6 @@ void HT_info_handler23a(struct rtw_adapter *padapter, const u8 *p)
pmlmeinfo->HT_info_enable = 1;
memcpy(&pmlmeinfo->HT_info, p + 2, p[1]);
- return;
}
void HTOnAssocRsp23a(struct rtw_adapter *padapter)
@@ -833,7 +829,7 @@ void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
psta->cts2self = 0;
break;
case 1: /* on */
- if (pregpriv->vcs_type == 1) { /* 1:RTS/CTS 2:CTS to self */
+ if (pregpriv->vcs_type == RTS_CTS) {
psta->rtsen = 1;
psta->cts2self = 0;
} else {
@@ -844,7 +840,7 @@ void VCS_update23a(struct rtw_adapter *padapter, struct sta_info *psta)
case 2: /* auto */
default:
if (pmlmeinfo->ERP_enable && pmlmeinfo->ERP_IE & BIT(1)) {
- if (pregpriv->vcs_type == 1) {
+ if (pregpriv->vcs_type == RTS_CTS) {
psta->rtsen = 1;
psta->cts2self = 0;
} else {
@@ -870,7 +866,7 @@ int rtw_check_bcn_info23a(struct rtw_adapter *Adapter,
int pie_len, ssid_len, privacy;
const u8 *p, *ssid;
- if (is_client_associated_to_ap23a(Adapter) == false)
+ if (!is_client_associated_to_ap23a(Adapter))
return _SUCCESS;
if (unlikely(!ieee80211_is_beacon(mgmt->frame_control))) {
@@ -1080,7 +1076,7 @@ bool is_ap_in_tkip23a(struct rtw_adapter *padapter)
return false;
}
-bool should_forbid_n_rate23a(struct rtw_adapter * padapter)
+bool should_forbid_n_rate23a(struct rtw_adapter *padapter)
{
u32 i;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1153,6 +1149,7 @@ bool is_ap_in_wep23a(struct rtw_adapter *padapter)
static int wifirate2_ratetbl_inx23a(unsigned char rate)
{
int inx = 0;
+
rate = rate & 0x7f;
switch (rate) {
@@ -1311,6 +1308,7 @@ unsigned char check_assoc_AP23a(u8 *pframe, uint len)
u8 epigram_vendor_flag;
u8 ralink_vendor_flag;
const u8 *p;
+
epigram_vendor_flag = 0;
ralink_vendor_flag = 0;
@@ -1324,7 +1322,6 @@ unsigned char check_assoc_AP23a(u8 *pframe, uint len)
DBG_8723A("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
} else if (!memcmp(p + 2, BROADCOM_OUI1, 3) ||
- !memcmp(p + 2, BROADCOM_OUI2, 3) ||
!memcmp(p + 2, BROADCOM_OUI2, 3)) {
DBG_8723A("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c
index 7a8038156cea..7a5e6bf0d1ae 100644
--- a/drivers/staging/rtl8723au/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723au/core/rtw_xmit.c
@@ -22,9 +22,6 @@
#include <usb_ops.h>
#include <rtl8723a_xmit.h>
-static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
-static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
-
static void _init_txservq(struct tx_servq *ptxservq)
{
@@ -180,16 +177,8 @@ int _rtw_init_xmit_priv23a(struct xmit_priv *pxmitpriv,
for (i = 0; i < 4; i ++)
pxmitpriv->wmm_para_seq[i] = i;
- pxmitpriv->txirp_cnt = 1;
-
sema_init(&pxmitpriv->tx_retevt, 0);
- /* per AC pending irp */
- pxmitpriv->beq_cnt = 0;
- pxmitpriv->bkq_cnt = 0;
- pxmitpriv->viq_cnt = 0;
- pxmitpriv->voq_cnt = 0;
-
pxmitpriv->ack_tx = false;
mutex_init(&pxmitpriv->ack_tx_mutex);
rtw_sctx_init23a(&pxmitpriv->ack_tx_ops, 0);
@@ -315,6 +304,7 @@ static void update_attrib_vcs_info(struct rtw_adapter *padapter, struct xmit_fra
/* check HT op mode */
if (pattrib->ht_en) {
u8 HTOpMode = pmlmeinfo->HT_protection;
+
if ((pmlmeext->cur_bwmode && (HTOpMode == 2 || HTOpMode == 3)) ||
(!pmlmeext->cur_bwmode && HTOpMode == 3)) {
pattrib->vcs_mode = RTS_CTS;
@@ -464,6 +454,7 @@ static int update_attrib(struct rtw_adapter *padapter,
if (pattrib->pktlen > 282 + 24) {
if (pattrib->ether_type == ETH_P_IP) {/* IP header */
u8 *pframe = skb->data;
+
pframe += ETH_HLEN;
if ((pframe[21] == 68 && pframe[23] == 67) ||
@@ -1048,21 +1039,23 @@ s32 rtw_txframes_sta_ac_pending23a(struct rtw_adapter *padapter,
return ptxservq->qcnt;
}
-/*
- * Calculate wlan 802.11 packet MAX size from pkt_attrib
- * This function doesn't consider fragment case
+/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
+ * IEEE LLC/SNAP header contains 8 octets
+ * First 3 octets comprise the LLC portion
+ * SNAP portion, 5 octets, is divided into two fields:
+ * Organizationally Unique Identifier(OUI), 3 octets,
+ * type, defined by that organization, 2 octets.
*/
-u32 rtw_calculate_wlan_pkt_size_by_attribue23a(struct pkt_attrib *pattrib)
+static int rtw_put_snap(u8 *data, u16 h_proto)
{
- u32 len = 0;
-
- len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */
- len += SNAP_SIZE + sizeof(u16); /* LLC */
- len += pattrib->pktlen;
- if (pattrib->encrypt == WLAN_CIPHER_SUITE_TKIP) len += 8; /* MIC */
- len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */
+ if (h_proto == ETH_P_IPX || h_proto == ETH_P_AARP)
+ ether_addr_copy(data, bridge_tunnel_header);
+ else
+ ether_addr_copy(data, rfc1042_header);
- return len;
+ data += ETH_ALEN;
+ put_unaligned_be16(h_proto, data);
+ return ETH_ALEN + sizeof(u16);
}
/*
@@ -1188,7 +1181,7 @@ int rtw_xmitframe_coalesce23a(struct rtw_adapter *padapter, struct sk_buff *skb,
mpdu_len -= pattrib->iv_len;
}
if (frg_inx == 0) {
- llc_sz = rtw_put_snap23a(pframe, pattrib->ether_type);
+ llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
pframe += llc_sz;
mpdu_len -= llc_sz;
}
@@ -1258,34 +1251,6 @@ exit:
return res;
}
-/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
- * IEEE LLC/SNAP header contains 8 octets
- * First 3 octets comprise the LLC portion
- * SNAP portion, 5 octets, is divided into two fields:
- * Organizationally Unique Identifier(OUI), 3 octets,
- * type, defined by that organization, 2 octets.
- */
-s32 rtw_put_snap23a(u8 *data, u16 h_proto)
-{
- struct ieee80211_snap_hdr *snap;
- u8 *oui;
-
- snap = (struct ieee80211_snap_hdr *)data;
- snap->dsap = 0xaa;
- snap->ssap = 0xaa;
- snap->ctrl = 0x03;
-
- if (h_proto == 0x8137 || h_proto == 0x80f3)
- oui = P802_1H_OUI;
- else
- oui = RFC1042_OUI;
- snap->oui[0] = oui[0];
- snap->oui[1] = oui[1];
- snap->oui[2] = oui[2];
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
- return SNAP_SIZE + sizeof(u16);
-}
-
void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len)
{
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1293,7 +1258,7 @@ void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len)
uint protection;
const u8 *p;
- switch (pxmitpriv->vcs_setting) {
+ switch (pregistrypriv->vrtl_carrier_sense) {
case DISABLE_VCS:
pxmitpriv->vcs = NONE_VCS;
break;
@@ -1326,7 +1291,7 @@ void rtw_count_tx_stats23a(struct rtw_adapter *padapter, struct xmit_frame *pxmi
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
pxmitpriv->tx_bytes += sz;
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod++;
@@ -1893,18 +1858,6 @@ u32 rtw_get_ff_hwaddr23a(struct xmit_frame *pxmitframe)
return addr;
}
-static void do_queue_select(struct rtw_adapter *padapter, struct pkt_attrib *pattrib)
-{
- u8 qsel;
-
- qsel = pattrib->priority;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("### do_queue_select priority =%d , qsel = %d\n",
- pattrib->priority, qsel));
-
- pattrib->qsel = qsel;
-}
-
/*
* The main transmit(tx) entry
*
@@ -1936,9 +1889,7 @@ int rtw_xmit23a(struct rtw_adapter *padapter, struct sk_buff *skb)
}
pxmitframe->pkt = skb;
- rtw_led_control(padapter, LED_CTL_TX);
-
- do_queue_select(padapter, &pxmitframe->attrib);
+ pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
#ifdef CONFIG_8723AU_AP_MODE
spin_lock_bh(&pxmitpriv->lock);
@@ -2411,11 +2362,6 @@ void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status)
}
}
-void rtw_sctx_done23a(struct submit_ctx **sctx)
-{
- rtw23a_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
-}
-
int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms)
{
struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
diff --git a/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c b/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
index 4b41bc4ce1e6..179a1ba03029 100644
--- a/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
+++ b/drivers/staging/rtl8723au/hal/HalDMOutSrc8723A_CE.c
@@ -612,7 +612,7 @@ static void _PHY_PathADDAOn(struct rtw_adapter *pAdapter, u32 *ADDAReg, bool isP
u32 i;
pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2T) {
+ if (!is2T) {
pathOn = 0x0bdb25a0;
PHY_SetBBReg(pAdapter, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
} else {
diff --git a/drivers/staging/rtl8723au/hal/hal_com.c b/drivers/staging/rtl8723au/hal/hal_com.c
index bf919f6e4128..bf4cae20bd12 100644
--- a/drivers/staging/rtl8723au/hal/hal_com.c
+++ b/drivers/staging/rtl8723au/hal/hal_com.c
@@ -463,7 +463,7 @@ void rtl8723a_set_ampdu_factor(struct rtw_adapter *padapter, u8 FactorToSet)
MaxAggNum = 0xF;
if (FactorToSet <= 3) {
- FactorToSet = (1 << (FactorToSet + 2));
+ FactorToSet = 1 << (FactorToSet + 2);
if (FactorToSet > MaxAggNum)
FactorToSet = MaxAggNum;
@@ -727,7 +727,7 @@ void rtl8723a_fifo_cleanup(struct rtw_adapter *padapter)
rtl8723au_write8(padapter, REG_TXPAUSE, 0xff);
/* keep sn */
- padapter->xmitpriv.nqos_ssn = rtl8723au_read16(padapter, REG_NQOS_SEQ);
+ padapter->xmitpriv.nqos_ssn = rtl8723au_read8(padapter, REG_NQOS_SEQ);
if (pwrpriv->bkeepfwalive != true) {
u32 v32;
diff --git a/drivers/staging/rtl8723au/hal/odm_HWConfig.c b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
index 29d844d66cae..fb3cc872f205 100644
--- a/drivers/staging/rtl8723au/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
@@ -391,20 +391,11 @@ static void odm_Process_RSSIForDM(struct dm_odm_t *pDM_Odm,
}
}
-/* Endianness before calling this API */
-static void ODM_PhyStatusQuery23a_92CSeries(struct dm_odm_t *pDM_Odm,
- struct phy_info *pPhyInfo,
- u8 *pPhyStatus,
- struct odm_packet_info *pPktinfo)
+void ODM_PhyStatusQuery23a(struct dm_odm_t *pDM_Odm, struct phy_info *pPhyInfo,
+ u8 *pPhyStatus, struct odm_packet_info *pPktinfo)
{
odm_RxPhyStatus92CSeries_Parsing(pDM_Odm, pPhyInfo,
pPhyStatus, pPktinfo);
odm_Process_RSSIForDM(pDM_Odm, pPhyInfo, pPktinfo);
}
-
-void ODM_PhyStatusQuery23a(struct dm_odm_t *pDM_Odm, struct phy_info *pPhyInfo,
- u8 *pPhyStatus, struct odm_packet_info *pPktinfo)
-{
- ODM_PhyStatusQuery23a_92CSeries(pDM_Odm, pPhyInfo, pPhyStatus, pPktinfo);
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
index 9054a987f06b..86a83975f4f0 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
@@ -340,7 +340,7 @@ static u8 bthci_GetAssocInfo(struct rtw_adapter *padapter, u8 EntryNum)
tempBuf, TotalLen-BaseMemoryShift);
pAmpAsoc = (struct amp_assoc_structure *)tempBuf;
- pAmpAsoc->Length = le16_to_cpu(pAmpAsoc->Length);
+ le16_to_cpus(&pAmpAsoc->Length);
BaseMemoryShift += 3 + pAmpAsoc->Length;
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("TypeID = 0x%x, ", pAmpAsoc->TypeID));
@@ -1759,18 +1759,6 @@ static enum hci_status bthci_CmdReadConnectionAcceptTimeout(struct rtw_adapter *
return status;
}
-/* 7.3.3 */
-static enum hci_status
-bthci_CmdSetEventFilter(
- struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd
- )
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-
- return status;
-}
-
/* 7.3.14 */
static enum hci_status
bthci_CmdWriteConnectionAcceptTimeout(
@@ -2982,19 +2970,12 @@ bthci_CmdReadLinkQuality(
return status;
}
-static enum hci_status bthci_CmdReadRSSI(struct rtw_adapter *padapter)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
- return status;
-}
-
static enum hci_status
bthci_CmdCreateLogicalLink(
struct rtw_adapter *padapter,
struct packet_irp_hcicmd_data *pHciCmd
)
{
- enum hci_status status = HCI_STATUS_SUCCESS;
struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
@@ -3003,7 +2984,7 @@ bthci_CmdCreateLogicalLink(
bthci_BuildLogicalLink(padapter, pHciCmd,
HCI_CREATE_LOGICAL_LINK);
- return status;
+ return HCI_STATUS_SUCCESS;
}
static enum hci_status
@@ -3012,7 +2993,6 @@ bthci_CmdAcceptLogicalLink(
struct packet_irp_hcicmd_data *pHciCmd
)
{
- enum hci_status status = HCI_STATUS_SUCCESS;
struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
@@ -3021,7 +3001,7 @@ bthci_CmdAcceptLogicalLink(
bthci_BuildLogicalLink(padapter, pHciCmd,
HCI_ACCEPT_LOGICAL_LINK);
- return status;
+ return HCI_STATUS_SUCCESS;
}
static enum hci_status
@@ -4138,15 +4118,6 @@ bthci_CmdHostBufferSize(struct rtw_adapter *padapter,
}
static enum hci_status
-bthci_CmdHostNumberOfCompletedPackets(struct rtw_adapter *padapter,
- struct packet_irp_hcicmd_data *pHciCmd)
-{
- enum hci_status status = HCI_STATUS_SUCCESS;
-
- return status;
-}
-
-static enum hci_status
bthci_UnknownCMD(struct rtw_adapter *padapter, struct packet_irp_hcicmd_data *pHciCmd)
{
enum hci_status status = HCI_STATUS_UNKNOW_HCI_CMD;
@@ -4219,7 +4190,6 @@ bthci_HandleOGFSetEventMaskCMD(struct rtw_adapter *padapter,
break;
case HCI_SET_EVENT_FILTER:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_SET_EVENT_FILTER\n"));
- status = bthci_CmdSetEventFilter(padapter, pHciCmd);
break;
case HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_WRITE_CONNECTION_ACCEPT_TIMEOUT\n"));
@@ -4235,7 +4205,6 @@ bthci_HandleOGFSetEventMaskCMD(struct rtw_adapter *padapter,
break;
case HCI_HOST_NUMBER_OF_COMPLETED_PACKETS:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_HOST_NUMBER_OF_COMPLETED_PACKETS\n"));
- status = bthci_CmdHostNumberOfCompletedPackets(padapter, pHciCmd);
break;
case HCI_READ_LINK_SUPERVISION_TIMEOUT:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LINK_SUPERVISION_TIMEOUT\n"));
@@ -4323,7 +4292,6 @@ bthci_HandleOGFStatusParameters(struct rtw_adapter *padapter,
break;
case HCI_READ_RSSI:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_RSSI\n"));
- status = bthci_CmdReadRSSI(padapter);
break;
case HCI_READ_LOCAL_AMP_INFO:
RTPRINT(FIOCTL, IOCTL_BT_HCICMD, ("HCI_READ_LOCAL_AMP_INFO\n"));
@@ -10671,7 +10639,7 @@ void BTDM_BBBackOffLevel(struct rtw_adapter *padapter, u8 type)
void BTDM_FWCoexAllOff(struct rtw_adapter *padapter)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
RTPRINT(FBT, BT_TRACE, ("BTDM_FWCoexAllOff()\n"));
if (pHalData->bt_coexist.bFWCoexistAllOff)
@@ -10685,7 +10653,7 @@ void BTDM_FWCoexAllOff(struct rtw_adapter *padapter)
void BTDM_SWCoexAllOff(struct rtw_adapter *padapter)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
RTPRINT(FBT, BT_TRACE, ("BTDM_SWCoexAllOff()\n"));
if (pHalData->bt_coexist.bSWCoexistAllOff)
@@ -10698,7 +10666,7 @@ void BTDM_SWCoexAllOff(struct rtw_adapter *padapter)
void BTDM_HWCoexAllOff(struct rtw_adapter *padapter)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
RTPRINT(FBT, BT_TRACE, ("BTDM_HWCoexAllOff()\n"));
if (pHalData->bt_coexist.bHWCoexistAllOff)
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
index 271c33d6ca5a..7b56411cc3c8 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
@@ -115,19 +115,16 @@ exit:
int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param)
{
- int res = _SUCCESS;
-
*((u32 *)param) = cpu_to_le32(*((u32 *)param));
FillH2CCmd(padapter, RSSI_SETTING_EID, 3, param);
- return res;
+ return _SUCCESS;
}
int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg)
{
u8 buf[5];
- int res = _SUCCESS;
memset(buf, 0, 5);
mask = cpu_to_le32(mask);
@@ -136,7 +133,7 @@ int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg)
FillH2CCmd(padapter, MACID_CONFIG_EID, 5, buf);
- return res;
+ return _SUCCESS;
}
/* bitmap[0:27] = tx_rate_bitmap */
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index 8523908d5e5f..a5eadd4e2580 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -47,29 +47,19 @@ static void _FWDownloadEnable(struct rtw_adapter *padapter, bool enable)
}
}
-static int _BlockWrite(struct rtw_adapter *padapter, void *buffer, u32 buffSize)
-{
- int ret;
-
- if (buffSize > MAX_PAGE_SIZE)
- return _FAIL;
-
- ret = rtl8723au_writeN(padapter, FW_8723A_START_ADDRESS,
- buffSize, buffer);
-
- return ret;
-}
-
static int
_PageWrite(struct rtw_adapter *padapter, u32 page, void *buffer, u32 size)
{
u8 value8;
u8 u8Page = (u8) (page & 0x07);
+ if (size > MAX_PAGE_SIZE)
+ return _FAIL;
+
value8 = (rtl8723au_read8(padapter, REG_MCUFWDL + 2) & 0xF8) | u8Page;
rtl8723au_write8(padapter, REG_MCUFWDL + 2, value8);
- return _BlockWrite(padapter, buffer, size);
+ return rtl8723au_writeN(padapter, FW_8723A_START_ADDRESS, size, buffer);
}
static int _WriteFW(struct rtw_adapter *padapter, void *buffer, u32 size)
@@ -743,84 +733,6 @@ u16 rtl8723a_EfuseGetCurrentSize_BT(struct rtw_adapter *padapter)
return retU2;
}
-bool
-rtl8723a_EfusePgPacketRead(struct rtw_adapter *padapter, u8 offset, u8 *data)
-{
- u8 efuse_data, word_cnts = 0;
- u16 efuse_addr = 0;
- u8 hoffset = 0, hworden = 0;
- u8 i;
- u8 max_section = 0;
- s32 ret;
-
- if (data == NULL)
- return false;
-
- EFUSE_GetEfuseDefinition23a(padapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION,
- &max_section);
- if (offset > max_section) {
- DBG_8723A("%s: Packet offset(%d) is illegal(>%d)!\n",
- __func__, offset, max_section);
- return false;
- }
-
- memset(data, 0xFF, PGPKT_DATA_SIZE);
- ret = true;
-
- /* */
- /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the
- end of Efuse by CP. */
- /* Skip dummy parts to prevent unexpected data read from Efuse. */
- /* By pass right now. 2009.02.19. */
- /* */
- while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- if (efuse_OneByteRead23a(padapter, efuse_addr++, &efuse_data) ==
- _FAIL) {
- ret = false;
- break;
- }
-
- if (efuse_data == 0xFF)
- break;
-
- if (EXT_HEADER(efuse_data)) {
- hoffset = GET_HDR_OFFSET_2_0(efuse_data);
- efuse_OneByteRead23a(padapter, efuse_addr++, &efuse_data);
- if (ALL_WORDS_DISABLED(efuse_data)) {
- DBG_8723A("%s: Error!! All words disabled!\n",
- __func__);
- continue;
- }
-
- hoffset |= ((efuse_data & 0xF0) >> 1);
- hworden = efuse_data & 0x0F;
- } else {
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
- }
-
- if (hoffset == offset) {
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- /* Check word enable condition in the section */
- if (!(hworden & (0x01 << i))) {
- ReadEFuseByte23a(padapter, efuse_addr++,
- &efuse_data);
- data[i * 2] = efuse_data;
-
- ReadEFuseByte23a(padapter, efuse_addr++,
- &efuse_data);
- data[(i * 2) + 1] = efuse_data;
- }
- }
- } else {
- word_cnts = Efuse_CalculateWordCnts23a(hworden);
- efuse_addr += word_cnts * 2;
- }
- }
-
- return ret;
-}
-
void rtl8723a_read_chip_version(struct rtw_adapter *padapter)
{
u32 value32;
@@ -1126,6 +1038,21 @@ exit:
return ret;
}
+void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf)
+{
+ struct txrpt_ccx_8723a *txrpt_ccx = buf;
+ struct submit_ctx *pack_tx_ops = &adapter->xmitpriv.ack_tx_ops;
+
+ if (txrpt_ccx->int_ccx && adapter->xmitpriv.ack_tx) {
+ if (txrpt_ccx->pkt_ok)
+ rtw23a_sctx_done_err(&pack_tx_ops,
+ RTW_SCTX_DONE_SUCCESS);
+ else
+ rtw23a_sctx_done_err(&pack_tx_ops,
+ RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+}
+
void rtl8723a_InitAntenna_Selection(struct rtw_adapter *padapter)
{
u8 val;
@@ -1326,18 +1253,17 @@ c. APSD_CTRL 0x600[7:0] = 0x40
d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
***************************************/
- u8 eRFPath = 0, value8 = 0;
+ u8 value8;
rtl8723au_write8(padapter, REG_TXPAUSE, 0xFF);
- PHY_SetRFReg(padapter, (enum RF_RADIO_PATH) eRFPath, 0x0, bMaskByte0, 0x0);
+ PHY_SetRFReg(padapter, RF_PATH_A, 0x0, bMaskByte0, 0x0);
- value8 |= APSDOFF;
+ value8 = APSDOFF;
rtl8723au_write8(padapter, REG_APSD_CTRL, value8); /* 0x40 */
/* Set BB reset at first */
- value8 = 0;
- value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
+ value8 = FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn;
rtl8723au_write8(padapter, REG_SYS_FUNC_EN, value8); /* 0x16 */
/* Set global reset. */
@@ -1350,11 +1276,6 @@ e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
/* RT_TRACE(COMP_INIT, DBG_LOUD, ("======> RF off and reset BB.\n")); */
}
-static void _DisableRFAFEAndResetBB(struct rtw_adapter *padapter)
-{
- _DisableRFAFEAndResetBB8192C(padapter);
-}
-
static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
bool bWithoutHWSM)
{
@@ -1368,18 +1289,18 @@ static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
i. SYS_FUNC_EN 0x02[10]= 1 enable MCU register,
(8051 enable)
******************************/
- u16 valu16 = 0;
+ u16 valu16;
rtl8723au_write8(padapter, REG_MCUFWDL, 0);
valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN);
/* reset MCU , 8051 */
rtl8723au_write16(padapter, REG_SYS_FUNC_EN,
- valu16 & (~FEN_CPUEN));
+ valu16 & ~FEN_CPUEN);
valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN) & 0x0FFF;
/* reset MAC */
rtl8723au_write16(padapter, REG_SYS_FUNC_EN,
- valu16 | (FEN_HWPDN | FEN_ELDR));
+ valu16 | FEN_HWPDN | FEN_ELDR);
valu16 = rtl8723au_read16(padapter, REG_SYS_FUNC_EN);
/* enable MCU , 8051 */
@@ -1387,43 +1308,41 @@ static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
valu16 | FEN_CPUEN);
} else {
u8 retry_cnts = 0;
+ u8 val8;
+
+ val8 = rtl8723au_read8(padapter, REG_MCUFWDL);
/* 2010/08/12 MH For USB SS, we can not stop 8051 when we
are trying to enter IPS/HW&SW radio off. For
S3/S4/S5/Disable, we can stop 8051 because */
/* we will init FW when power on again. */
/* If we want to SS mode, we can not reset 8051. */
- if (rtl8723au_read8(padapter, REG_MCUFWDL) & BIT(1)) {
+ if ((val8 & BIT(1)) && padapter->bFWReady) {
/* IF fw in RAM code, do reset */
- if (padapter->bFWReady) {
- /* 2010/08/25 MH Accordign to RD alfred's
- suggestion, we need to disable other */
- /* HRCV INT to influence 8051 reset. */
- rtl8723au_write8(padapter, REG_FWIMR, 0x20);
- /* 2011/02/15 MH According to Alex's
- suggestion, close mask to prevent
- incorrect FW write operation. */
- rtl8723au_write8(padapter, REG_FTIMR, 0x00);
- rtl8723au_write8(padapter, REG_FSIMR, 0x00);
-
- /* 8051 reset by self */
- rtl8723au_write8(padapter, REG_HMETFR + 3,
- 0x20);
-
- while ((retry_cnts++ < 100) &&
- (FEN_CPUEN &
- rtl8723au_read16(padapter,
- REG_SYS_FUNC_EN))) {
- udelay(50); /* us */
- }
+ /* 2010/08/25 MH Accordign to RD alfred's
+ suggestion, we need to disable other */
+ /* HRCV INT to influence 8051 reset. */
+ rtl8723au_write8(padapter, REG_FWIMR, 0x20);
+ /* 2011/02/15 MH According to Alex's
+ suggestion, close mask to prevent
+ incorrect FW write operation. */
+ rtl8723au_write8(padapter, REG_FTIMR, 0x00);
+ rtl8723au_write8(padapter, REG_FSIMR, 0x00);
+
+ /* 8051 reset by self */
+ rtl8723au_write8(padapter, REG_HMETFR + 3, 0x20);
+
+ while ((retry_cnts++ < 100) &&
+ (rtl8723au_read16(padapter, REG_SYS_FUNC_EN) &
+ FEN_CPUEN)) {
+ udelay(50); /* us */
+ }
- if (retry_cnts >= 100) {
- /* Reset MAC and Enable 8051 */
- rtl8723au_write8(padapter,
- REG_SYS_FUNC_EN + 1,
- 0x50);
- mdelay(10);
- }
+ if (retry_cnts >= 100) {
+ /* Reset MAC and Enable 8051 */
+ rtl8723au_write8(padapter,
+ REG_SYS_FUNC_EN + 1, 0x50);
+ mdelay(10);
}
}
/* Reset MAC and Enable 8051 */
@@ -1450,12 +1369,6 @@ static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
}
}
-static void _ResetDigitalProcedure1(struct rtw_adapter *padapter,
- bool bWithoutHWSM)
-{
- _ResetDigitalProcedure1_92C(padapter, bWithoutHWSM);
-}
-
static void _ResetDigitalProcedure2(struct rtw_adapter *padapter)
{
/*****************************
@@ -1472,8 +1385,8 @@ m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
{
struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- u16 value16 = 0;
- u8 value8 = 0;
+ u16 value16;
+ u8 value8;
if (bWithoutHWSM) {
/*****************************
@@ -1487,7 +1400,7 @@ static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
/* rtl8723au_write8(padapter, REG_LDOV12D_CTRL, 0x54); */
value8 = rtl8723au_read8(padapter, REG_LDOV12D_CTRL);
- value8 &= (~LDV12_EN);
+ value8 &= ~LDV12_EN;
rtl8723au_write8(padapter, REG_LDOV12D_CTRL, value8);
/* RT_TRACE(COMP_INIT, DBG_LOUD,
(" REG_LDOV12D_CTRL Reg0x21:0x%02x.\n", value8)); */
@@ -1509,9 +1422,9 @@ static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
use HW to shut down 8051 automatically. */
/* Becasue suspend operatione need the asistance of 8051
to wait for 3ms. */
- value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+ value16 = APDM_HOST | AFSM_HSUS | PFM_ALDN;
} else {
- value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+ value16 = APDM_HOST | AFSM_HSUS | PFM_ALDN;
}
rtl8723au_write16(padapter, REG_APS_FSMCO, value16); /* 0x4802 */
@@ -1522,16 +1435,14 @@ static void _DisableAnalog(struct rtw_adapter *padapter, bool bWithoutHWSM)
/* HW Auto state machine */
int CardDisableHWSM(struct rtw_adapter *padapter, u8 resetMCU)
{
- int rtStatus = _SUCCESS;
-
if (padapter->bSurpriseRemoved) {
- return rtStatus;
+ return _SUCCESS;
}
/* RF Off Sequence ==== */
- _DisableRFAFEAndResetBB(padapter);
+ _DisableRFAFEAndResetBB8192C(padapter);
/* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure1(padapter, false);
+ _ResetDigitalProcedure1_92C(padapter, false);
/* ==== Pull GPIO PIN to balance level and LED control ====== */
_DisableGPIO(padapter);
@@ -1542,25 +1453,21 @@ int CardDisableHWSM(struct rtw_adapter *padapter, u8 resetMCU)
RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
("======> Card disable finished.\n"));
- return rtStatus;
+ return _SUCCESS;
}
/* without HW Auto state machine */
int CardDisableWithoutHWSM(struct rtw_adapter *padapter)
{
- int rtStatus = _SUCCESS;
-
- /* RT_TRACE(COMP_INIT, DBG_LOUD,
- ("======> Card Disable Without HWSM .\n")); */
if (padapter->bSurpriseRemoved) {
- return rtStatus;
+ return _SUCCESS;
}
/* RF Off Sequence ==== */
- _DisableRFAFEAndResetBB(padapter);
+ _DisableRFAFEAndResetBB8192C(padapter);
/* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure1(padapter, true);
+ _ResetDigitalProcedure1_92C(padapter, true);
/* ==== Pull GPIO PIN to balance level and LED control ====== */
_DisableGPIO(padapter);
@@ -1573,29 +1480,27 @@ int CardDisableWithoutHWSM(struct rtw_adapter *padapter)
/* RT_TRACE(COMP_INIT, DBG_LOUD,
("<====== Card Disable Without HWSM .\n")); */
- return rtStatus;
+ return _SUCCESS;
}
void Hal_InitPGData(struct rtw_adapter *padapter, u8 *PROMContent)
{
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
- if (false == pEEPROM->bautoload_fail_flag) { /* autoload OK. */
+ if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
if (!pEEPROM->EepromOrEfuse) {
/* Read EFUSE real map to shadow. */
EFUSE_ShadowMapUpdate23a(padapter, EFUSE_WIFI);
- memcpy((void *)PROMContent,
- (void *)pEEPROM->efuse_eeprom_data,
+ memcpy(PROMContent, pEEPROM->efuse_eeprom_data,
HWSET_MAX_SIZE);
}
- } else { /* autoload fail */
+ } else {
RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
("AutoLoad Fail reported from CR9346!!\n"));
-/* pHalData->AutoloadFailFlag = true; */
/* update to default value 0xFF */
- if (false == pEEPROM->EepromOrEfuse)
+ if (!pEEPROM->EepromOrEfuse)
EFUSE_ShadowMapUpdate23a(padapter, EFUSE_WIFI);
- memcpy((void *)PROMContent, (void *)pEEPROM->efuse_eeprom_data,
+ memcpy(PROMContent, pEEPROM->efuse_eeprom_data,
HWSET_MAX_SIZE);
}
}
@@ -1945,13 +1850,13 @@ Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter,
/* */
/* ThermalMeter from EEPROM */
/* */
- if (AutoloadFail == false)
+ if (!AutoloadFail)
pHalData->EEPROMThermalMeter =
PROMContent[EEPROM_THERMAL_METER_8723A];
else
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
- if ((pHalData->EEPROMThermalMeter == 0xff) || (AutoloadFail == true)) {
+ if ((pHalData->EEPROMThermalMeter == 0xff) || AutoloadFail) {
pHalData->bAPKThermalMeterIgnore = true;
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
}
@@ -1960,10 +1865,6 @@ Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter,
pHalData->EEPROMThermalMeter);
}
-void Hal_InitChannelPlan23a(struct rtw_adapter *padapter)
-{
-}
-
static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
{
u16 *usPtr = (u16 *) ptxdesc;
@@ -1981,254 +1882,6 @@ static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
ptxdesc->txdw7 |= cpu_to_le32(checksum & 0x0000ffff);
}
-static void fill_txdesc_sectype(struct pkt_attrib *pattrib,
- struct txdesc_8723a *ptxdesc)
-{
- if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
- switch (pattrib->encrypt) {
- /* SEC_TYPE */
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- ptxdesc->sectype = 1;
- break;
-
- case WLAN_CIPHER_SUITE_CCMP:
- ptxdesc->sectype = 3;
- break;
-
- case 0:
- default:
- break;
- }
- }
-}
-
-static void fill_txdesc_vcs(struct pkt_attrib *pattrib,
- struct txdesc_8723a *ptxdesc)
-{
- /* DBG_8723A("cvs_mode =%d\n", pattrib->vcs_mode); */
-
- switch (pattrib->vcs_mode) {
- case RTS_CTS:
- ptxdesc->rtsen = 1;
- break;
-
- case CTS_TO_SELF:
- ptxdesc->cts2self = 1;
- break;
-
- case NONE_VCS:
- default:
- break;
- }
-
- if (pattrib->vcs_mode) {
- ptxdesc->hw_rts_en = 1; /* ENABLE HW RTS */
-
- /* Set RTS BW */
- if (pattrib->ht_en) {
- if (pattrib->bwmode & HT_CHANNEL_WIDTH_40)
- ptxdesc->rts_bw = 1;
-
- switch (pattrib->ch_offset) {
- case HAL_PRIME_CHNL_OFFSET_DONT_CARE:
- ptxdesc->rts_sc = 0;
- break;
-
- case HAL_PRIME_CHNL_OFFSET_LOWER:
- ptxdesc->rts_sc = 1;
- break;
-
- case HAL_PRIME_CHNL_OFFSET_UPPER:
- ptxdesc->rts_sc = 2;
- break;
-
- default:
- ptxdesc->rts_sc = 3; /* Duplicate */
- break;
- }
- }
- }
-}
-
-static void fill_txdesc_phy(struct pkt_attrib *pattrib,
- struct txdesc_8723a *ptxdesc)
-{
- if (pattrib->ht_en) {
- if (pattrib->bwmode & HT_CHANNEL_WIDTH_40)
- ptxdesc->data_bw = 1;
-
- switch (pattrib->ch_offset) {
- case HAL_PRIME_CHNL_OFFSET_DONT_CARE:
- ptxdesc->data_sc = 0;
- break;
-
- case HAL_PRIME_CHNL_OFFSET_LOWER:
- ptxdesc->data_sc = 1;
- break;
-
- case HAL_PRIME_CHNL_OFFSET_UPPER:
- ptxdesc->data_sc = 2;
- break;
-
- default:
- ptxdesc->data_sc = 3; /* Duplicate */
- break;
- }
- }
-}
-
-static void rtl8723a_fill_default_txdesc(struct xmit_frame *pxmitframe,
- u8 *pbuf)
-{
- struct rtw_adapter *padapter;
- struct hal_data_8723a *pHalData;
- struct dm_priv *pdmpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
- struct pkt_attrib *pattrib;
- struct txdesc_8723a *ptxdesc;
- s32 bmcst;
-
- padapter = pxmitframe->padapter;
- pHalData = GET_HAL_DATA(padapter);
- pdmpriv = &pHalData->dmpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
-
- pattrib = &pxmitframe->attrib;
- bmcst = is_multicast_ether_addr(pattrib->ra);
-
- ptxdesc = (struct txdesc_8723a *)pbuf;
-
- if (pxmitframe->frame_tag == DATA_FRAMETAG) {
- ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
-
- if (pattrib->ampdu_en == true)
- ptxdesc->agg_en = 1; /* AGG EN */
- else
- ptxdesc->bk = 1; /* AGG BK */
-
- ptxdesc->qsel = pattrib->qsel;
- ptxdesc->rate_id = pattrib->raid;
-
- fill_txdesc_sectype(pattrib, ptxdesc);
-
- ptxdesc->seq = pattrib->seqnum;
-
- if ((pattrib->ether_type != 0x888e) &&
- (pattrib->ether_type != 0x0806) &&
- (pattrib->dhcp_pkt != 1)) {
- /* Non EAP & ARP & DHCP type data packet */
-
- fill_txdesc_vcs(pattrib, ptxdesc);
- fill_txdesc_phy(pattrib, ptxdesc);
-
- ptxdesc->rtsrate = 8; /* RTS Rate = 24M */
- ptxdesc->data_ratefb_lmt = 0x1F;
- ptxdesc->rts_ratefb_lmt = 0xF;
-
- /* use REG_INIDATA_RATE_SEL value */
- ptxdesc->datarate =
- pdmpriv->INIDATA_RATE[pattrib->mac_id];
-
- } else {
- /* EAP data packet and ARP packet. */
- /* Use the 1M data rate to send the EAP/ARP packet. */
- /* This will maybe make the handshake smooth. */
-
- ptxdesc->bk = 1; /* AGG BK */
- ptxdesc->userate = 1; /* driver uses rate */
- if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
- ptxdesc->data_short = 1;
- ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
- }
- } else if (pxmitframe->frame_tag == MGNT_FRAMETAG) {
-/* RT_TRACE(_module_hal_xmit_c_, _drv_notice_,
- ("%s: MGNT_FRAMETAG\n", __func__)); */
-
- ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
- ptxdesc->qsel = pattrib->qsel;
- ptxdesc->rate_id = pattrib->raid; /* Rate ID */
- ptxdesc->seq = pattrib->seqnum;
- ptxdesc->userate = 1; /* driver uses rate, 1M */
- ptxdesc->rty_lmt_en = 1; /* retry limit enable */
- ptxdesc->data_rt_lmt = 6; /* retry limit = 6 */
-
- /* CCX-TXRPT ack for xmit mgmt frames. */
- if (pxmitframe->ack_report)
- ptxdesc->ccx = 1;
-
- ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
- } else if (pxmitframe->frame_tag == TXAGG_FRAMETAG) {
- RT_TRACE(_module_hal_xmit_c_, _drv_warning_,
- ("%s: TXAGG_FRAMETAG\n", __func__));
- } else {
- RT_TRACE(_module_hal_xmit_c_, _drv_warning_,
- ("%s: frame_tag = 0x%x\n", __func__,
- pxmitframe->frame_tag));
-
- ptxdesc->macid = 4; /* CAM_ID(MAC_ID) */
- ptxdesc->rate_id = 6; /* Rate ID */
- ptxdesc->seq = pattrib->seqnum;
- ptxdesc->userate = 1; /* driver uses rate */
- ptxdesc->datarate = MRateToHwRate23a(pmlmeext->tx_rate);
- }
-
- ptxdesc->pktlen = pattrib->last_txcmdsz;
- ptxdesc->offset = TXDESC_SIZE + OFFSET_SZ;
- if (bmcst)
- ptxdesc->bmc = 1;
- ptxdesc->ls = 1;
- ptxdesc->fs = 1;
- ptxdesc->own = 1;
-
- /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
- /* (1) The sequence number of each non-Qos frame / broadcast /
- * multicast / mgnt frame should be controled by Hw because Fw
- * will also send null data which we cannot control when Fw LPS enable.
- * --> default enable non-Qos data sequense number.
- 2010.06.23. by tynli. */
- /* (2) Enable HW SEQ control for beacon packet,
- * because we use Hw beacon. */
- /* (3) Use HW Qos SEQ to control the seq num of Ext port
- * non-Qos packets. */
- /* 2010.06.23. Added by tynli. */
- if (!pattrib->qos_en) {
- /* Hw set sequence number */
- ptxdesc->hwseq_en = 1; /* HWSEQ_EN */
- ptxdesc->hwseq_sel = 0; /* HWSEQ_SEL */
- }
-}
-
-/*
- * Description:
- *
- * Parameters:
- * pxmitframe xmitframe
- * pbuf where to fill tx desc
- */
-void rtl8723a_update_txdesc(struct xmit_frame *pxmitframe, u8 *pbuf)
-{
- struct tx_desc *pdesc;
-
- pdesc = (struct tx_desc *)pbuf;
- memset(pdesc, 0, sizeof(struct tx_desc));
-
- rtl8723a_fill_default_txdesc(pxmitframe, pbuf);
-
- pdesc->txdw0 = cpu_to_le32(pdesc->txdw0);
- pdesc->txdw1 = cpu_to_le32(pdesc->txdw1);
- pdesc->txdw2 = cpu_to_le32(pdesc->txdw2);
- pdesc->txdw3 = cpu_to_le32(pdesc->txdw3);
- pdesc->txdw4 = cpu_to_le32(pdesc->txdw4);
- pdesc->txdw5 = cpu_to_le32(pdesc->txdw5);
- pdesc->txdw6 = cpu_to_le32(pdesc->txdw6);
- pdesc->txdw7 = cpu_to_le32(pdesc->txdw7);
- rtl8723a_cal_txdesc_chksum(pdesc);
-}
-
/*
* Description: In normal chip, we should send some packet to Hw which
* will be used by Fw in FW LPS mode. The function is to fill the Tx
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
index 3d4d7ec27509..88e91cd8ebb9 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_phycfg.c
@@ -418,7 +418,6 @@ PHY_SetRFReg(struct rtw_adapter *Adapter, enum RF_RADIO_PATH eRFPath,
*---------------------------------------------------------------------------*/
int PHY_MACConfig8723A(struct rtw_adapter *Adapter)
{
- int rtStatus = _SUCCESS;
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
bool is92C = IS_92C_SERIAL(pHalData->VersionID);
@@ -433,7 +432,7 @@ int PHY_MACConfig8723A(struct rtw_adapter *Adapter)
if (is92C && (BOARD_USB_DONGLE == pHalData->BoardType))
rtl8723au_write8(Adapter, 0x40, 0x04);
- return rtStatus;
+ return _SUCCESS;
}
/**
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
index 2dc0886e5f90..1aad4384471c 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
@@ -42,21 +42,6 @@
#include <rtl8723a_hal.h>
#include <usb_ops_linux.h>
-/*---------------------------Define Local Constant---------------------------*/
-/* Define local structure for debug!!!!! */
-struct rf_shadow_compare_map {
- /* Shadow register value */
- u32 Value;
- /* Compare or not flag */
- u8 Compare;
- /* Record If it had ever modified unpredicted */
- u8 ErrorOrNot;
- /* Recorver Flag */
- u8 Recorver;
- /* */
- u8 Driver_Write;
-};
-
/*-----------------------------------------------------------------------------
* Function: PHY_RF6052SetBandwidth()
*
@@ -71,20 +56,23 @@ struct rf_shadow_compare_map {
*
* Note: For RF type 0222D
*---------------------------------------------------------------------------*/
-void rtl8723a_phy_rf6052set_bw(
- struct rtw_adapter *Adapter,
- enum ht_channel_width Bandwidth) /* 20M or 40M */
+void rtl8723a_phy_rf6052set_bw(struct rtw_adapter *Adapter,
+ enum ht_channel_width Bandwidth) /* 20M or 40M */
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
- pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | 0x0400);
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ pHalData->RfRegChnlVal[0] =
+ (pHalData->RfRegChnlVal[0] & 0xfffff3ff) | 0x0400;
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask,
+ pHalData->RfRegChnlVal[0]);
break;
case HT_CHANNEL_WIDTH_40:
- pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff));
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ pHalData->RfRegChnlVal[0] =
+ (pHalData->RfRegChnlVal[0] & 0xfffff3ff);
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask,
+ pHalData->RfRegChnlVal[0]);
break;
default:
break;
@@ -108,7 +96,8 @@ void rtl8723a_phy_rf6052set_bw(
*
*---------------------------------------------------------------------------*/
-void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerlevel)
+void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter,
+ u8 *pPowerlevel)
{
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &pHalData->dmpriv;
@@ -118,7 +107,8 @@ void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerleve
u8 idx1, idx2;
u8 *ptr;
- /* According to SD3 eechou's suggestion, we need to disable turbo scan for RU. */
+ /* According to SD3 eechou's suggestion, we need to disable
+ turbo scan for RU. */
/* Otherwise, external PA will be broken if power index > 0x20. */
if (pHalData->EEPROMRegulatory != 0 || pHalData->ExternalPA)
TurboScanOff = true;
@@ -131,29 +121,37 @@ void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerleve
if (TurboScanOff) {
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
- (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
- /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
+ TxAGC[idx1] = pPowerlevel[idx1] |
+ (pPowerlevel[idx1] << 8) |
+ (pPowerlevel[idx1] << 16) |
+ (pPowerlevel[idx1] << 24);
+ /* 2010/10/18 MH For external PA module.
+ We need to limit power index to be less
+ than 0x20. */
if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
TxAGC[idx1] = 0x20;
}
}
} else {
-/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
-/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
-/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx
+ * power. It shall be determined by power training mechanism. */
+/* Currently, we cannot fully disable driver dynamic tx power
+ * mechanism because it is referenced by BT coexist mechanism. */
+/* In the future, two mechanism shall be separated from each other
+ * and maintained independantly. Thanks for Lanhsin's reminder. */
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
TxAGC[RF_PATH_A] = 0x10101010;
TxAGC[RF_PATH_B] = 0x10101010;
- } else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) {
+ } else if (pdmpriv->DynamicTxHighPowerLvl ==
+ TxHighPwrLevel_Level2) {
TxAGC[RF_PATH_A] = 0x00000000;
TxAGC[RF_PATH_B] = 0x00000000;
} else {
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
- (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
+ TxAGC[idx1] = pPowerlevel[idx1] |
+ (pPowerlevel[idx1] << 8) |
+ (pPowerlevel[idx1] << 16) |
+ (pPowerlevel[idx1] << 24);
}
if (pHalData->EEPROMRegulatory == 0) {
@@ -178,29 +176,24 @@ void rtl823a_phy_rf6052setccktxpower(struct rtw_adapter *Adapter, u8 *pPowerleve
}
/* rf-A cck tx power */
- tmpval = TxAGC[RF_PATH_A]&0xff;
+ tmpval = TxAGC[RF_PATH_A] & 0xff;
PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
- tmpval = TxAGC[RF_PATH_A]>>8;
+ tmpval = TxAGC[RF_PATH_A] >> 8;
PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
/* rf-B cck tx power */
- tmpval = TxAGC[RF_PATH_B]>>24;
+ tmpval = TxAGC[RF_PATH_B] >> 24;
PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
- tmpval = TxAGC[RF_PATH_B]&0x00ffffff;
+ tmpval = TxAGC[RF_PATH_B] & 0x00ffffff;
PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
} /* PHY_RF6052SetCckTxPower */
/* powerbase0 for OFDM rates */
/* powerbase1 for HT MCS rates */
-static void getPowerBase(
- struct rtw_adapter *Adapter,
- u8 *pPowerLevel,
- u8 Channel,
- u32 *OfdmBase,
- u32 *MCSBase
- )
+static void getPowerBase(struct rtw_adapter *Adapter, u8 *pPowerLevel,
+ u8 Channel, u32 *OfdmBase, u32 *MCSBase)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
u32 powerBase0, powerBase1;
u8 Legacy_pwrdiff = 0;
s8 HT20_pwrdiff = 0;
@@ -211,8 +204,9 @@ static void getPowerBase(
Legacy_pwrdiff = pHalData->TxPwrLegacyHtDiff[i][Channel-1];
powerBase0 = powerlevel[i] + Legacy_pwrdiff;
- powerBase0 = (powerBase0<<24) | (powerBase0<<16) | (powerBase0<<8) | powerBase0;
- *(OfdmBase+i) = powerBase0;
+ powerBase0 = powerBase0 << 24 | powerBase0 << 16 |
+ powerBase0 << 8 | powerBase0;
+ *(OfdmBase + i) = powerBase0;
}
for (i = 0; i < 2; i++) {
@@ -222,36 +216,35 @@ static void getPowerBase(
powerlevel[i] += HT20_pwrdiff;
}
powerBase1 = powerlevel[i];
- powerBase1 = (powerBase1<<24) | (powerBase1<<16) | (powerBase1<<8) | powerBase1;
- *(MCSBase+i) = powerBase1;
+ powerBase1 = powerBase1 << 24 | powerBase1 << 16 |
+ powerBase1 << 8 | powerBase1;
+ *(MCSBase + i) = powerBase1;
}
}
-static void getTxPowerWriteValByRegulatory(
- struct rtw_adapter *Adapter,
- u8 Channel,
- u8 index,
- u32 *powerBase0,
- u32 *powerBase1,
- u32 *pOutWriteVal
- )
+static void
+getTxPowerWriteValByRegulatory(struct rtw_adapter *Adapter, u8 Channel,
+ u8 index, u32 *powerBase0, u32 *powerBase1,
+ u32 *pOutWriteVal)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &pHalData->dmpriv;
- u8 i, chnlGroup = 0, pwr_diff_limit[4];
- u32 writeVal, customer_limit, rf;
+ u8 i, chnlGroup = 0, pwr_diff_limit[4];
+ u32 writeVal, customer_limit, rf;
/* Index 0 & 1 = legacy OFDM, 2-5 = HT_MCS rate */
for (rf = 0; rf < 2; rf++) {
switch (pHalData->EEPROMRegulatory) {
case 0: /* Realtek better performance */
- /* increase power diff defined by Realtek for large power */
+ /* increase power diff defined by Realtek for
+ * large power */
chnlGroup = 0;
writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
((index < 2) ? powerBase0[rf] : powerBase1[rf]);
break;
case 1: /* Realtek regulatory */
- /* increase power diff defined by Realtek for regulatory */
+ /* increase power diff defined by Realtek for
+ * regulatory */
if (pHalData->pwrGroupCnt == 1)
chnlGroup = 0;
if (pHalData->pwrGroupCnt >= 3) {
@@ -262,17 +255,20 @@ static void getTxPowerWriteValByRegulatory(
else if (Channel > 9)
chnlGroup = 2;
- if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ if (pHalData->CurrentChannelBW ==
+ HT_CHANNEL_WIDTH_20)
chnlGroup++;
else
chnlGroup += 4;
}
writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf?8:0)] +
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
break;
case 2: /* Better regulatory */
- /* don't increase any power diff */
- writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ /* don't increase any power diff */
+ writeVal = ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
break;
case 3: /* Customer defined power diff. */
chnlGroup = 0;
@@ -299,28 +295,34 @@ static void getTxPowerWriteValByRegulatory(
break;
}
-/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
-/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
-/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power.
+ It shall be determined by power training mechanism. */
+/* Currently, we cannot fully disable driver dynamic tx power mechanism
+ because it is referenced by BT coexist mechanism. */
+/* In the future, two mechanism shall be separated from each other and
+ maintained independantly. Thanks for Lanhsin's reminder. */
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1)
writeVal = 0x14141414;
- else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2)
+ else if (pdmpriv->DynamicTxHighPowerLvl ==
+ TxHighPwrLevel_Level2)
writeVal = 0x00000000;
- /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
- /* This mechanism is only applied when Driver-Highpower-Mechanism is OFF. */
+ /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
+ /* This mechanism is only applied when
+ Driver-Highpower-Mechanism is OFF. */
if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT1)
writeVal = writeVal - 0x06060606;
else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT2)
writeVal = writeVal;
- *(pOutWriteVal+rf) = writeVal;
+ *(pOutWriteVal + rf) = writeVal;
}
}
-static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue)
+static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index,
+ u32 *pValue)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
u16 RegOffset_A[6] = {
rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
@@ -338,12 +340,13 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
for (rf = 0; rf < 2; rf++) {
writeVal = pValue[rf];
for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8)((writeVal & (0x7f<<(i*8)))>>(i*8));
- if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = (u8)((writeVal &
+ (0x7f << (i * 8))) >> (i * 8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
pwr_val[i] = RF6052_MAX_TX_PWR;
}
- writeVal = (pwr_val[3]<<24) | (pwr_val[2]<<16) |
- (pwr_val[1]<<8) | pwr_val[0];
+ writeVal = pwr_val[3] << 24 | pwr_val[2] << 16 |
+ pwr_val[1] << 8 | pwr_val[0];
if (rf == 0)
RegOffset = RegOffset_A[index];
@@ -352,7 +355,8 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
PHY_SetBBReg(Adapter, RegOffset, bMaskDWord, writeVal);
- /* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */
+ /* 201005115 Joseph: Set Tx Power diff for Tx power
+ training mechanism. */
if (((pHalData->rf_type == RF_2T2R) &&
(RegOffset == rTxAGC_A_Mcs15_Mcs12 ||
RegOffset == rTxAGC_B_Mcs15_Mcs12)) ||
@@ -360,15 +364,19 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
(RegOffset == rTxAGC_A_Mcs07_Mcs04 ||
RegOffset == rTxAGC_B_Mcs07_Mcs04))) {
writeVal = pwr_val[3];
- if (RegOffset == rTxAGC_A_Mcs15_Mcs12 || RegOffset == rTxAGC_A_Mcs07_Mcs04)
+ if (RegOffset == rTxAGC_A_Mcs15_Mcs12 ||
+ RegOffset == rTxAGC_A_Mcs07_Mcs04)
RegOffset = 0xc90;
- if (RegOffset == rTxAGC_B_Mcs15_Mcs12 || RegOffset == rTxAGC_B_Mcs07_Mcs04)
+ if (RegOffset == rTxAGC_B_Mcs15_Mcs12 ||
+ RegOffset == rTxAGC_B_Mcs07_Mcs04)
RegOffset = 0xc98;
for (i = 0; i < 3; i++) {
if (i != 2)
- writeVal = (writeVal > 8) ? (writeVal-8) : 0;
+ writeVal = (writeVal > 8) ?
+ (writeVal - 8) : 0;
else
- writeVal = (writeVal > 6) ? (writeVal-6) : 0;
+ writeVal = (writeVal > 6) ?
+ (writeVal - 6) : 0;
rtl8723au_write8(Adapter, RegOffset + i,
(u8)writeVal);
}
@@ -379,8 +387,9 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
* Function: PHY_RF6052SetOFDMTxPower
*
* Overview: For legacy and HY OFDM, we must read EEPROM TX power index for
- * different channel and read original value in TX power register area from
- * 0xe00. We increase offset and original value to be correct tx pwr.
+ * different channel and read original value in TX power
+ * register area from 0xe00. We increase offset and
+ * original value to be correct tx pwr.
*
* Input: NONE
*
@@ -389,20 +398,23 @@ static void writeOFDMPowerReg(struct rtw_adapter *Adapter, u8 index, u32 *pValue
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 11/05/2008 MHC Simulate 8192 series method.
- * 01/06/2009 MHC 1. Prevent Path B tx power overflow or underflow dure to
- * A/B pwr difference or legacy/HT pwr diff.
- * 2. We concern with path B legacy/HT OFDM difference.
- * 01/22/2009 MHC Support new EPRO format from SD3.
+ * When Remark
+ * 11/05/2008 MHC Simulate 8192 series method.
+ * 01/06/2009 MHC 1. Prevent Path B tx power overflow or
+ * underflow dure to A/B pwr difference or
+ * legacy/HT pwr diff.
+ * 2. We concern with path B legacy/HT OFDM difference.
+ * 01/22/2009 MHC Support new EPRO format from SD3.
*
*---------------------------------------------------------------------------*/
-void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter, u8 *pPowerLevel, u8 Channel)
+void rtl8723a_PHY_RF6052SetOFDMTxPower(struct rtw_adapter *Adapter,
+ u8 *pPowerLevel, u8 Channel)
{
u32 writeVal[2], powerBase0[2], powerBase1[2];
u8 index = 0;
- getPowerBase(Adapter, pPowerLevel, Channel, &powerBase0[0], &powerBase1[0]);
+ getPowerBase(Adapter, pPowerLevel, Channel,
+ &powerBase0[0], &powerBase1[0]);
for (index = 0; index < 6; index++) {
getTxPowerWriteValByRegulatory(Adapter, Channel, index,
@@ -416,7 +428,7 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
{
u32 u4RegValue = 0;
u8 eRFPath;
- struct bb_reg_define *pPhyReg;
+ struct bb_reg_define *pPhyReg;
int rtStatus = _SUCCESS;
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
@@ -430,15 +442,17 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
/*----Store original RFENV control type----*/
switch (eRFPath) {
case RF_PATH_A:
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs,
+ bRFSI_RFENV);
break;
case RF_PATH_B:
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16);
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs,
+ bRFSI_RFENV << 16);
break;
}
/*----Set RF_ENV enable----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
udelay(1);/* PlatformStallExecution(1); */
/*----Set RF_ENV output high----*/
@@ -446,10 +460,12 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
udelay(1);/* PlatformStallExecution(1); */
/* Set bit number of Address and Data for RF register */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength,
+ 0x0); /* Set 1 to 4 bits for 8255 */
udelay(1);/* PlatformStallExecution(1); */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength,
+ 0x0); /* Set 0 to 12 bits for 8255 */
udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
@@ -464,15 +480,16 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
/*----Restore RFENV control type----*/;
switch (eRFPath) {
case RF_PATH_A:
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
+ bRFSI_RFENV, u4RegValue);
break;
case RF_PATH_B:
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
+ bRFSI_RFENV << 16, u4RegValue);
break;
}
if (rtStatus != _SUCCESS) {
- /* RT_TRACE(COMP_FPGA, DBG_LOUD, ("phy_RF6052_Config_ParaFile():Radio[%d] Fail!!", eRFPath)); */
goto phy_RF6052_Config_ParaFile_Fail;
}
}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c b/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c
deleted file mode 100644
index 6ea2f9efef64..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723a_xmit.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#define _RTL8723A_XMIT_C_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-#include <rtl8723a_hal.h>
-
-void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf)
-{
- struct txrpt_ccx_8723a *txrpt_ccx = buf;
-
- if (txrpt_ccx->int_ccx) {
- if (txrpt_ccx->pkt_ok)
- rtw_ack_tx_done23a(&adapter->xmitpriv, RTW_SCTX_DONE_SUCCESS);
- else
- rtw_ack_tx_done23a(&adapter->xmitpriv, RTW_SCTX_DONE_CCX_PKT_FAIL);
- }
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_led.c b/drivers/staging/rtl8723au/hal/rtl8723au_led.c
deleted file mode 100644
index b946636af9b3..000000000000
--- a/drivers/staging/rtl8723au/hal/rtl8723au_led.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-
-#include "drv_types.h"
-#include "rtl8723a_hal.h"
-#include "rtl8723a_led.h"
-#include "usb_ops_linux.h"
-
-/* */
-/* LED object. */
-/* */
-
-/* */
-/* Prototype of protected function. */
-/* */
-
-/* */
-/* LED_819xUsb routines. */
-/* */
-
-/* Description: */
-/* Turn on LED according to LedPin specified. */
-void SwLedOn23a(struct rtw_adapter *padapter, struct led_8723a *pLed)
-{
- u8 LedCfg = 0;
-
- if ((padapter->bSurpriseRemoved == true) || (padapter->bDriverStopped == true))
- return;
- switch (pLed->LedPin) {
- case LED_PIN_GPIO0:
- break;
- case LED_PIN_LED0:
- /* SW control led0 on. */
- rtl8723au_write8(padapter, REG_LEDCFG0,
- (LedCfg&0xf0)|BIT(5)|BIT(6));
- break;
- case LED_PIN_LED1:
- /* SW control led1 on. */
- rtl8723au_write8(padapter, REG_LEDCFG1, (LedCfg&0x00)|BIT(6));
- break;
- case LED_PIN_LED2:
- LedCfg = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* SW control led1 on. */
- rtl8723au_write8(padapter, REG_LEDCFG2, (LedCfg&0x80)|BIT(5));
- break;
- default:
- break;
- }
- pLed->bLedOn = true;
-}
-
-/* Description: */
-/* Turn off LED according to LedPin specified. */
-void SwLedOff23a(struct rtw_adapter *padapter, struct led_8723a *pLed)
-{
- u8 LedCfg = 0;
- /* struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); */
-
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- goto exit;
-
- switch (pLed->LedPin) {
- case LED_PIN_GPIO0:
- break;
- case LED_PIN_LED0:
- /* SW control led0 on. */
- rtl8723au_write8(padapter, REG_LEDCFG0,
- (LedCfg&0xf0)|BIT(5)|BIT(6));
- break;
- case LED_PIN_LED1:
- /* SW control led1 on. */
- rtl8723au_write8(padapter, REG_LEDCFG1,
- (LedCfg&0x00)|BIT(5)|BIT(6));
- break;
- case LED_PIN_LED2:
- LedCfg = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* SW control led1 on. */
- rtl8723au_write8(padapter, REG_LEDCFG2,
- (LedCfg&0x80)|BIT(3)|BIT(5));
- break;
- default:
- break;
- }
-exit:
- pLed->bLedOn = false;
-}
-
-/* Interface to manipulate LED objects. */
-
-/* Description: */
-/* Initialize all LED_871x objects. */
-void
-rtl8723au_InitSwLeds(struct rtw_adapter *padapter)
-{
- struct led_priv *pledpriv = &padapter->ledpriv;
-
- pledpriv->LedControlHandler = LedControl871x23a;
- /* 8723as-vau wifi used led2 */
- InitLed871x23a(padapter, &pledpriv->SwLed0, LED_PIN_LED2);
-
-/* InitLed871x23a(padapter,&pledpriv->SwLed1, LED_PIN_LED2); */
-}
-
-/* Description: */
-/* DeInitialize all LED_819xUsb objects. */
-void
-rtl8723au_DeInitSwLeds(struct rtw_adapter *padapter)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- DeInitLed871x23a(&ledpriv->SwLed0);
-}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
index a67850fe6e5d..6070510bb470 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723au_xmit.c
@@ -21,18 +21,6 @@
/* include <rtl8192c_hal.h> */
#include <rtl8723a_hal.h>
-static void do_queue_select(struct rtw_adapter *padapter, struct pkt_attrib *pattrib)
-{
- u8 qsel;
-
- qsel = pattrib->priority;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("### do_queue_select priority =%d , qsel = %d\n",
- pattrib->priority, qsel));
-
- pattrib->qsel = qsel;
-}
-
static int urb_zero_packet_chk(struct rtw_adapter *padapter, int sz)
{
int blnSetTxDescOffset;
@@ -163,7 +151,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
memset(ptxdesc, 0, sizeof(struct tx_desc));
- if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f);
@@ -215,7 +203,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
}
- } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
+ } else if (pxmitframe->frame_tag == MGNT_FRAMETAG) {
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id&0x1f);
@@ -240,10 +228,11 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate23a(pmlmeext->tx_rate));
- } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
+ } else if (pxmitframe->frame_tag == TXAGG_FRAMETAG) {
DBG_8723A("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
} else {
- DBG_8723A("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
+ DBG_8723A("pxmitframe->frame_tag = %d\n",
+ pxmitframe->frame_tag);
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32((4)&0x1f);/* CAM_ID(MAC_ID) */
@@ -306,10 +295,10 @@ static int rtw_dump_xframe(struct rtw_adapter *padapter,
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
- (pxmitframe->attrib.ether_type != 0x0806) &&
- (pxmitframe->attrib.ether_type != 0x888e) &&
- (pxmitframe->attrib.dhcp_pkt != 1))
+ if (pxmitframe->frame_tag == DATA_FRAMETAG &&
+ pxmitframe->attrib.ether_type != ETH_P_ARP &&
+ pxmitframe->attrib.ether_type != ETH_P_PAE &&
+ pxmitframe->attrib.dhcp_pkt != 1)
rtw_issue_addbareq_cmd23a(padapter, pxmitframe);
mem_addr = pxmitframe->buf_addr;
@@ -392,7 +381,7 @@ bool rtl8723au_xmitframe_complete(struct rtw_adapter *padapter,
pxmitbuf->priv_data = pxmitframe;
- if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
if (pxmitframe->attrib.priority <= 15)/* TID0~15 */
res = rtw_xmitframe_coalesce23a(padapter, pxmitframe->pkt, pxmitframe);
@@ -440,7 +429,7 @@ bool rtl8723au_hal_xmit(struct rtw_adapter *padapter,
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- do_queue_select(padapter, pattrib);
+ pattrib->qsel = pattrib->priority;
spin_lock_bh(&pxmitpriv->lock);
#ifdef CONFIG_8723AU_AP_MODE
diff --git a/drivers/staging/rtl8723au/hal/usb_halinit.c b/drivers/staging/rtl8723au/hal/usb_halinit.c
index a5de03d45aa9..febe5cedef8f 100644
--- a/drivers/staging/rtl8723au/hal/usb_halinit.c
+++ b/drivers/staging/rtl8723au/hal/usb_halinit.c
@@ -21,11 +21,13 @@
#include <HalPwrSeqCmd.h>
#include <Hal8723PwrSeq.h>
#include <rtl8723a_hal.h>
-#include <rtl8723a_led.h>
#include <linux/ieee80211.h>
#include <usb_ops.h>
+static void phy_SsPwrSwitch92CU(struct rtw_adapter *Adapter,
+ enum rt_rf_power_state eRFPowerState);
+
static void
_ConfigChipOutEP(struct rtw_adapter *pAdapter, u8 NumOutPipe)
{
@@ -61,42 +63,28 @@ _ConfigChipOutEP(struct rtw_adapter *pAdapter, u8 NumOutPipe)
(u32)NumOutPipe, (u32)pHalData->OutEpNumber)); */
}
-static bool rtl8723au_set_queue_pipe_mapping(struct rtw_adapter *pAdapter,
- u8 NumInPipe, u8 NumOutPipe)
+bool rtl8723au_chip_configure(struct rtw_adapter *padapter)
{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(pAdapter);
- bool result = false;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+ u8 NumInPipe = pdvobjpriv->RtNumInPipes;
+ u8 NumOutPipe = pdvobjpriv->RtNumOutPipes;
- _ConfigChipOutEP(pAdapter, NumOutPipe);
+ _ConfigChipOutEP(padapter, NumOutPipe);
/* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
if (pHalData->OutEpNumber == 1) {
if (NumInPipe != 1)
- return result;
+ return false;
}
- result = Hal_MappingOutPipe23a(pAdapter, NumOutPipe);
-
- return result;
-}
-
-void rtl8723au_chip_configure(struct rtw_adapter *padapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
-
- pHalData->interfaceIndex = pdvobjpriv->InterfaceNumber;
-
- rtl8723au_set_queue_pipe_mapping(padapter,
- pdvobjpriv->RtNumInPipes,
- pdvobjpriv->RtNumOutPipes);
+ return Hal_MappingOutPipe23a(padapter, NumOutPipe);
}
static int _InitPowerOn(struct rtw_adapter *padapter)
{
- int status = _SUCCESS;
- u16 value16 = 0;
- u8 value8 = 0;
+ u16 value16;
+ u8 value8;
/* RSV_CTRL 0x1C[7:0] = 0x00
unlock ISO/CLK/Power control register */
@@ -123,7 +111,7 @@ static int _InitPowerOn(struct rtw_adapter *padapter)
/* for Efuse PG, suggest by Jackie 2011.11.23 */
PHY_SetBBReg(padapter, REG_EFUSE_CTRL, BIT(28)|BIT(29)|BIT(30), 0x06);
- return status;
+ return _SUCCESS;
}
/* Shall USB interface init this? */
@@ -313,7 +301,7 @@ static void _InitNormalChipThreeOutEpPriority(struct rtw_adapter *Adapter)
_InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
}
-static void _InitNormalChipQueuePriority(struct rtw_adapter *Adapter)
+static void _InitQueuePriority(struct rtw_adapter *Adapter)
{
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
@@ -333,11 +321,6 @@ static void _InitNormalChipQueuePriority(struct rtw_adapter *Adapter)
}
}
-static void _InitQueuePriority(struct rtw_adapter *Adapter)
-{
- _InitNormalChipQueuePriority(Adapter);
-}
-
static void _InitTransferPageSize(struct rtw_adapter *Adapter)
{
/* Tx page size is always 128. */
@@ -442,18 +425,6 @@ static void _InitEDCA(struct rtw_adapter *Adapter)
rtl8723au_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
}
-static void _InitHWLed(struct rtw_adapter *Adapter)
-{
- struct led_priv *pledpriv = &Adapter->ledpriv;
-
- if (pledpriv->LedStrategy != HW_LED)
- return;
-
-/* HW led control */
-/* to do .... */
-/* must consider cases of antenna diversity/ commbo card/solo card/mini card */
-}
-
static void _InitRDGSetting(struct rtw_adapter *Adapter)
{
rtl8723au_write8(Adapter, REG_RD_CTRL, 0xFF);
@@ -480,7 +451,7 @@ static void _InitRFType(struct rtw_adapter *Adapter)
pHalData->rf_chip = RF_6052;
- if (is92CU == false) {
+ if (!is92CU) {
pHalData->rf_type = RF_1T1R;
DBG_8723A("Set RF Chip ID to RF_6052 and RF type to 1T1R.\n");
return;
@@ -527,23 +498,22 @@ enum rt_rf_power_state RfOnOffDetect23a(struct rtw_adapter *pAdapter)
return rfpowerstate;
}
-void _ps_open_RF23a(struct rtw_adapter *padapter);
-
int rtl8723au_hal_init(struct rtw_adapter *Adapter)
{
- u8 val8 = 0;
- u32 boundary;
- int status = _SUCCESS;
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u8 val8 = 0;
+ u32 boundary;
+ int status = _SUCCESS;
+ bool mac_on;
unsigned long init_start_time = jiffies;
Adapter->hw_init_completed = false;
if (Adapter->pwrctrlpriv.bkeepfwalive) {
- _ps_open_RF23a(Adapter);
+ phy_SsPwrSwitch92CU(Adapter, rf_on);
if (pHalData->bIQKInitialized) {
rtl8723a_phy_iq_calibrate(Adapter, true);
@@ -566,9 +536,9 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
/* 0x100 value of first mac is 0xEA while 0x100 value of secondary
is 0x00 */
if (val8 == 0xEA) {
- pHalData->bMACFuncEnable = false;
+ mac_on = false;
} else {
- pHalData->bMACFuncEnable = true;
+ mac_on = true;
RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
("%s: MAC has already power on\n", __func__));
}
@@ -587,7 +557,7 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
boundary = WMM_NORMAL_TX_PAGE_BOUNDARY;
}
- if (!pHalData->bMACFuncEnable) {
+ if (!mac_on) {
status = InitLLTTable23a(Adapter, boundary);
if (status == _FAIL) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
@@ -673,7 +643,7 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
pHalData->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum RF_RADIO_PATH)0, RF_CHNLBW, bRFRegOffsetMask);
pHalData->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum RF_RADIO_PATH)1, RF_CHNLBW, bRFRegOffsetMask);
- if (!pHalData->bMACFuncEnable) {
+ if (!mac_on) {
_InitQueueReservedPage(Adapter);
_InitTxBufferBoundary(Adapter);
}
@@ -694,8 +664,6 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
_InitRetryFunction(Adapter);
rtl8723a_InitBeaconParameters(Adapter);
- _InitHWLed(Adapter);
-
_BBTurnOnBlock(Adapter);
/* NicIFSetMacAddress(padapter, padapter->PermanentAddress); */
@@ -806,266 +774,108 @@ exit:
}
static void phy_SsPwrSwitch92CU(struct rtw_adapter *Adapter,
- enum rt_rf_power_state eRFPowerState,
- int bRegSSPwrLvl)
+ enum rt_rf_power_state eRFPowerState)
{
struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
- u8 value8;
- u8 bytetmp;
+ u8 sps0;
+
+ sps0 = rtl8723au_read8(Adapter, REG_SPS0_CTRL);
switch (eRFPowerState) {
case rf_on:
- if (bRegSSPwrLvl == 1) {
- /* 1. Enable MAC Clock. Can not be enabled now. */
- /* WriteXBYTE(REG_SYS_CLKR+1,
- ReadXBYTE(REG_SYS_CLKR+1) | BIT(3)); */
-
- /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL,
- rtl8723au_read8(Adapter, REG_SPS0_CTRL) |
- BIT(0) | BIT(3));
-
- /* 3. restore BB, AFE control register. */
- /* RF */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 1);
- else
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 1);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 0);
-
- /* AFE */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x63DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x631B25A0);
-
- /* 4. issue 3-wire command that RF set to Rx idle
- mode. This is used to re-write the RX idle mode. */
- /* We can only prvide a usual value instead and then
- HW will modify the value by itself. */
- PHY_SetRFReg(Adapter, RF_PATH_A, 0,
- bRFRegOffsetMask, 0x32D95);
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetRFReg(Adapter, RF_PATH_B, 0,
- bRFRegOffsetMask, 0x32D95);
- }
- } else { /* Level 2 or others. */
- /* h. AFE_PLL_CTRL 0x28[7:0] = 0x80
- disable AFE PLL */
- rtl8723au_write8(Adapter, REG_AFE_PLL_CTRL, 0x81);
-
- /* i. AFE_XTAL_CTRL 0x24[15:0] = 0x880F
- gated AFE DIG_CLOCK */
- rtl8723au_write16(Adapter, REG_AFE_XTAL_CTRL, 0x800F);
- mdelay(1);
-
- /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL,
- rtl8723au_read8(Adapter, REG_SPS0_CTRL) |
- BIT(0) | BIT(3));
-
- /* 3. restore BB, AFE control register. */
- /* RF */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 1);
- else
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 1);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 0);
-
- /* AFE */
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA,
- bMaskDWord, 0x63DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA,
- bMaskDWord, 0x631B25A0);
-
- /* 4. issue 3-wire command that RF set to Rx idle
- mode. This is used to re-write the RX idle mode. */
- /* We can only prvide a usual value instead and
- then HW will modify the value by itself. */
- PHY_SetRFReg(Adapter, RF_PATH_A, 0,
+ /* 1. Enable MAC Clock. Can not be enabled now. */
+ /* WriteXBYTE(REG_SYS_CLKR+1,
+ ReadXBYTE(REG_SYS_CLKR+1) | BIT(3)); */
+
+ /* 2. Force PWM, Enable SPS18_LDO_Marco_Block */
+ rtl8723au_write8(Adapter, REG_SPS0_CTRL,
+ sps0 | BIT(0) | BIT(3));
+
+ /* 3. restore BB, AFE control register. */
+ /* RF */
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x380038, 1);
+ else
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x38, 1);
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 1);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 0);
+
+ /* AFE */
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x63DB25A0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x631B25A0);
+
+ /* 4. issue 3-wire command that RF set to Rx idle
+ mode. This is used to re-write the RX idle mode. */
+ /* We can only prvide a usual value instead and then
+ HW will modify the value by itself. */
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0x32D95);
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetRFReg(Adapter, RF_PATH_B, 0,
bRFRegOffsetMask, 0x32D95);
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetRFReg(Adapter, RF_PATH_B, 0,
- bRFRegOffsetMask, 0x32D95);
- }
-
- /* 5. gated MAC Clock */
- bytetmp = rtl8723au_read8(Adapter, REG_APSD_CTRL);
- rtl8723au_write8(Adapter, REG_APSD_CTRL,
- bytetmp & ~BIT(6));
-
- mdelay(10);
-
- /* Set BB reset at first */
- /* 0x16 */
- rtl8723au_write8(Adapter, REG_SYS_FUNC_EN, 0x17);
-
- /* Enable TX */
- rtl8723au_write8(Adapter, REG_TXPAUSE, 0x0);
}
break;
case rf_sleep:
case rf_off:
- value8 = rtl8723au_read8(Adapter, REG_SPS0_CTRL);
if (IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID))
- value8 &= ~BIT(0);
+ sps0 &= ~BIT(0);
else
- value8 &= ~(BIT(0) | BIT(3));
- if (bRegSSPwrLvl == 1) {
- RT_TRACE(_module_hal_init_c_, _drv_err_, ("SS LVL1\n"));
- /* Disable RF and BB only for SelectSuspend. */
-
- /* 1. Set BB/RF to shutdown. */
- /* (1) Reg878[5:3]= 0 RF rx_code for
- preamble power saving */
- /* (2)Reg878[21:19]= 0 Turn off RF-B */
- /* (3) RegC04[7:4]= 0 Turn off all paths
- for packet detection */
- /* (4) Reg800[1] = 1 enable preamble power
- saving */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
- PHY_QueryBBReg(Adapter, rFPGA0_XAB_RFParameter,
- bMaskDWord);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
- PHY_QueryBBReg(Adapter, rOFDM0_TRxPathEnable,
- bMaskDWord);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
- PHY_QueryBBReg(Adapter, rFPGA0_RFMOD,
- bMaskDWord);
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 0);
- } else if (pHalData->rf_type == RF_1T1R) {
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 0);
- }
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 1);
-
- /* 2 .AFE control register to power down. bit[30:22] */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
- PHY_QueryBBReg(Adapter, rRx_Wait_CCA,
- bMaskDWord);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x00DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x001B25A0);
-
- /* 3. issue 3-wire command that RF set to power down.*/
- PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetRFReg(Adapter, RF_PATH_B, 0,
- bRFRegOffsetMask, 0);
-
- /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL, value8);
- } else { /* Level 2 or others. */
- RT_TRACE(_module_hal_init_c_, _drv_err_, ("SS LVL2\n"));
- {
- u8 eRFPath = RF_PATH_A, value8 = 0;
- rtl8723au_write8(Adapter, REG_TXPAUSE, 0xFF);
- PHY_SetRFReg(Adapter,
- (enum RF_RADIO_PATH)eRFPath,
- 0x0, bMaskByte0, 0x0);
- value8 |= APSDOFF;
- /* 0x40 */
- rtl8723au_write8(Adapter, REG_APSD_CTRL,
- value8);
-
- /* After switch APSD, we need to delay
- for stability */
- mdelay(10);
-
- /* Set BB reset at first */
- value8 = 0;
- value8 |= (FEN_USBD | FEN_USBA |
- FEN_BB_GLB_RSTn);
- /* 0x16 */
- rtl8723au_write8(Adapter, REG_SYS_FUNC_EN,
- value8);
- }
-
- /* Disable RF and BB only for SelectSuspend. */
-
- /* 1. Set BB/RF to shutdown. */
- /* (1) Reg878[5:3]= 0 RF rx_code for
- preamble power saving */
- /* (2)Reg878[21:19]= 0 Turn off RF-B */
- /* (3) RegC04[7:4]= 0 Turn off all paths for
- packet detection */
- /* (4) Reg800[1] = 1 enable preamble power
- saving */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
- PHY_QueryBBReg(Adapter, rFPGA0_XAB_RFParameter,
- bMaskDWord);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
- PHY_QueryBBReg(Adapter, rOFDM0_TRxPathEnable,
- bMaskDWord);
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
- PHY_QueryBBReg(Adapter, rFPGA0_RFMOD,
- bMaskDWord);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x380038, 0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
- 0x38, 0);
- PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 1);
-
- /* 2 .AFE control register to power down. bit[30:22] */
- Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
- PHY_QueryBBReg(Adapter, rRx_Wait_CCA,
- bMaskDWord);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x00DB25A0);
- else if (pHalData->rf_type == RF_1T1R)
- PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
- 0x001B25A0);
-
- /* 3. issue 3-wire command that RF set to power down. */
- PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0);
- if (pHalData->rf_type == RF_2T2R)
- PHY_SetRFReg(Adapter, RF_PATH_B, 0,
- bRFRegOffsetMask, 0);
-
- /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
- rtl8723au_write8(Adapter, REG_SPS0_CTRL, value8);
-
- /* 2010/10/13 MH/Isaachsu exchange sequence. */
- /* h. AFE_PLL_CTRL 0x28[7:0] = 0x80
- disable AFE PLL */
- rtl8723au_write8(Adapter, REG_AFE_PLL_CTRL, 0x80);
- mdelay(1);
-
- /* i. AFE_XTAL_CTRL 0x24[15:0] = 0x880F
- gated AFE DIG_CLOCK */
- rtl8723au_write16(Adapter, REG_AFE_XTAL_CTRL, 0xA80F);
+ sps0 &= ~(BIT(0) | BIT(3));
+
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("SS LVL1\n"));
+ /* Disable RF and BB only for SelectSuspend. */
+
+ /* 1. Set BB/RF to shutdown. */
+ /* (1) Reg878[5:3]= 0 RF rx_code for
+ preamble power saving */
+ /* (2)Reg878[21:19]= 0 Turn off RF-B */
+ /* (3) RegC04[7:4]= 0 Turn off all paths
+ for packet detection */
+ /* (4) Reg800[1] = 1 enable preamble power saving */
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF0] =
+ PHY_QueryBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ bMaskDWord);
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF1] =
+ PHY_QueryBBReg(Adapter, rOFDM0_TRxPathEnable,
+ bMaskDWord);
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_RF2] =
+ PHY_QueryBBReg(Adapter, rFPGA0_RFMOD, bMaskDWord);
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter,
+ 0x380038, 0);
+ } else if (pHalData->rf_type == RF_1T1R) {
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, 0x38, 0);
}
+ PHY_SetBBReg(Adapter, rOFDM0_TRxPathEnable, 0xf0, 0);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, BIT(1), 1);
+
+ /* 2 .AFE control register to power down. bit[30:22] */
+ Adapter->pwrctrlpriv.PS_BBRegBackup[PSBBREG_AFE0] =
+ PHY_QueryBBReg(Adapter, rRx_Wait_CCA, bMaskDWord);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x00DB25A0);
+ else if (pHalData->rf_type == RF_1T1R)
+ PHY_SetBBReg(Adapter, rRx_Wait_CCA, bMaskDWord,
+ 0x001B25A0);
+
+ /* 3. issue 3-wire command that RF set to power down.*/
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0, bRFRegOffsetMask, 0);
+ if (pHalData->rf_type == RF_2T2R)
+ PHY_SetRFReg(Adapter, RF_PATH_B, 0,
+ bRFRegOffsetMask, 0);
+
+ /* 4. Force PFM , disable SPS18_LDO_Marco_Block */
+ rtl8723au_write8(Adapter, REG_SPS0_CTRL, sps0);
break;
default:
break;
}
-
-} /* phy_PowerSwitch92CU */
-
-void _ps_open_RF23a(struct rtw_adapter *padapter)
-{
- /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
- phy_SsPwrSwitch92CU(padapter, rf_on, 1);
}
static void CardDisableRTL8723U(struct rtw_adapter *Adapter)
@@ -1142,8 +952,7 @@ int rtl8723au_inirp_init(struct rtw_adapter *Adapter)
/* issue Rx irp to receive data */
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
- if (rtl8723au_read_port(Adapter, RECV_BULK_IN_ADDR, 0,
- precvbuf) == _FAIL) {
+ if (rtl8723au_read_port(Adapter, 0, precvbuf) == _FAIL) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
("usb_rx_init: usb_read_port error\n"));
status = _FAIL;
@@ -1151,9 +960,9 @@ int rtl8723au_inirp_init(struct rtw_adapter *Adapter)
}
precvbuf++;
}
- if (rtl8723au_read_interrupt(Adapter, RECV_INT_IN_ADDR) == _FAIL) {
+ if (rtl8723au_read_interrupt(Adapter) == _FAIL) {
RT_TRACE(_module_hci_hal_init_c_, _drv_err_,
- ("usb_rx_init: usb_read_interrupt error\n"));
+ ("%s: usb_read_interrupt error\n", __func__));
status = _FAIL;
}
pHalData->IntrMask[0] = rtl8723au_read32(Adapter, REG_USB_HIMR);
@@ -1209,14 +1018,6 @@ static void _ReadBoardType(struct rtw_adapter *Adapter, u8 *PROMContent,
pHalData->ExternalPA = 1;
}
-static void _ReadLEDSetting(struct rtw_adapter *Adapter, u8 *PROMContent,
- bool AutoloadFail)
-{
- struct led_priv *pledpriv = &Adapter->ledpriv;
-
- pledpriv->LedStrategy = HW_LED;
-}
-
static void Hal_EfuseParseMACAddr_8723AU(struct rtw_adapter *padapter,
u8 *hwinfo, bool AutoLoadFail)
{
@@ -1263,7 +1064,6 @@ static void readAdapterInfo(struct rtw_adapter *padapter)
pEEPROM->bautoload_fail_flag);
Hal_EfuseParseThermalMeter_8723A(padapter, hwinfo,
pEEPROM->bautoload_fail_flag);
- _ReadLEDSetting(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
/* _ReadRFSetting(Adapter, PROMContent, pEEPROM->bautoload_fail_flag); */
/* _ReadPSSetting(Adapter, PROMContent, pEEPROM->bautoload_fail_flag); */
Hal_EfuseParseAntennaDiversity(padapter, hwinfo,
@@ -1276,10 +1076,6 @@ static void readAdapterInfo(struct rtw_adapter *padapter)
pEEPROM->bautoload_fail_flag);
Hal_EfuseParseXtal_8723A(padapter, hwinfo,
pEEPROM->bautoload_fail_flag);
- /* */
- /* The following part initialize some vars by PG info. */
- /* */
- Hal_InitChannelPlan23a(padapter);
/* hal_CustomizedBehavior_8723U(Adapter); */
@@ -1311,13 +1107,6 @@ static void _ReadRFType(struct rtw_adapter *Adapter)
pHalData->rf_chip = RF_6052;
}
-static void _ReadSilmComboMode(struct rtw_adapter *Adapter)
-{
- struct hal_data_8723a *pHalData = GET_HAL_DATA(Adapter);
-
- pHalData->SlimComboDbg = false; /* Default is not debug mode. */
-}
-
/* */
/* Description: */
/* We should set Efuse cell selection to WiFi cell in default. */
@@ -1350,10 +1139,6 @@ void rtl8723a_read_adapter_info(struct rtw_adapter *Adapter)
_ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
_ReadPROMContent(Adapter);
- /* 2010/10/25 MH THe function must be called after
- borad_type & IC-Version recognize. */
- _ReadSilmComboMode(Adapter);
-
/* MSG_8723A("%s()(done), rf_chip = 0x%x, rf_type = 0x%x\n",
__func__, pHalData->rf_chip, pHalData->rf_type); */
@@ -1417,17 +1202,17 @@ int GetHalDefVar8192CUsb(struct rtw_adapter *Adapter,
void rtl8723a_update_ramask(struct rtw_adapter *padapter,
u32 mac_id, u8 rssi_level)
{
- u8 init_rate = 0;
- u8 networkType, raid;
- u32 mask, rate_bitmap;
- u8 shortGIrate = false;
- int supportRateNum = 0;
struct sta_info *psta;
- struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct FW_Sta_Info *fw_sta;
+ struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
+ u8 init_rate, networkType, raid;
+ u32 mask, rate_bitmap;
+ u8 shortGIrate = false;
+ int supportRateNum;
if (mac_id >= NUM_STA) /* CAM_SIZE */
return;
@@ -1456,8 +1241,8 @@ void rtl8723a_update_ramask(struct rtw_adapter *padapter,
break;
case 1:/* for broadcast/multicast */
- supportRateNum = rtw_get_rateset_len23a(
- pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ fw_sta = &pmlmeinfo->FW_sta_info[mac_id];
+ supportRateNum = rtw_get_rateset_len23a(fw_sta->SupportedRates);
if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
networkType = WIRELESS_11B;
else
@@ -1469,23 +1254,22 @@ void rtl8723a_update_ramask(struct rtw_adapter *padapter,
break;
default: /* for each sta in IBSS */
- supportRateNum = rtw_get_rateset_len23a(
- pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ fw_sta = &pmlmeinfo->FW_sta_info[mac_id];
+ supportRateNum = rtw_get_rateset_len23a(fw_sta->SupportedRates);
networkType = judge_network_type23a(padapter,
- pmlmeinfo->FW_sta_info[mac_id].SupportedRates,
- supportRateNum) & 0xf;
+ fw_sta->SupportedRates,
+ supportRateNum) & 0xf;
/* pmlmeext->cur_wireless_mode = networkType; */
raid = networktype_to_raid23a(networkType);
mask = update_supported_rate23a(cur_network->SupportedRates,
- supportRateNum);
+ supportRateNum);
/* todo: support HT in IBSS */
break;
}
/* mask &= 0x0fffffff; */
- rate_bitmap = 0x0fffffff;
rate_bitmap = ODM_Get_Rate_Bitmap23a(pHalData, mac_id, mask,
rssi_level);
DBG_8723A("%s => mac_id:%d, networkType:0x%02x, "
@@ -1493,15 +1277,14 @@ void rtl8723a_update_ramask(struct rtw_adapter *padapter,
__func__, mac_id, networkType, mask, rssi_level, rate_bitmap);
mask &= rate_bitmap;
- mask |= ((raid<<28)&0xf0000000);
+ mask |= ((raid << 28) & 0xf0000000);
- init_rate = get_highest_rate_idx23a(mask)&0x3f;
+ init_rate = get_highest_rate_idx23a(mask) & 0x3f;
if (pHalData->fw_ractrl == true) {
u8 arg = 0;
- /* arg = (cam_idx-4)&0x1f;MACID */
- arg = mac_id&0x1f;/* MACID */
+ arg = mac_id & 0x1f;/* MACID */
arg |= BIT(7);
@@ -1516,7 +1299,7 @@ void rtl8723a_update_ramask(struct rtw_adapter *padapter,
if (shortGIrate == true)
init_rate |= BIT(6);
- rtl8723au_write8(padapter, (REG_INIDATA_RATE_SEL+mac_id),
+ rtl8723au_write8(padapter, (REG_INIDATA_RATE_SEL + mac_id),
init_rate);
}
diff --git a/drivers/staging/rtl8723au/hal/usb_ops_linux.c b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
index c1b04c13c392..a6d16adce107 100644
--- a/drivers/staging/rtl8723au/hal/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
@@ -317,7 +317,7 @@ urb_submit:
}
}
-int rtl8723au_read_interrupt(struct rtw_adapter *adapter, u32 addr)
+int rtl8723au_read_interrupt(struct rtw_adapter *adapter)
{
int err;
unsigned int pipe;
@@ -545,8 +545,7 @@ static void usb_read_port_complete(struct urb *purb)
("usb_read_port_complete: (purb->actual_"
"length > MAX_RECVBUF_SZ) || (purb->actual_"
"length < RXDESC_SIZE)\n"));
- rtl8723au_read_port(padapter, RECV_BULK_IN_ADDR, 0,
- precvbuf);
+ rtl8723au_read_port(padapter, 0, precvbuf);
DBG_8723A("%s()-%d: RX Warning!\n",
__func__, __LINE__);
} else {
@@ -561,8 +560,7 @@ static void usb_read_port_complete(struct urb *purb)
tasklet_schedule(&precvpriv->recv_tasklet);
precvbuf->pskb = NULL;
- rtl8723au_read_port(padapter, RECV_BULK_IN_ADDR, 0,
- precvbuf);
+ rtl8723au_read_port(padapter, 0, precvbuf);
}
} else {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
@@ -596,8 +594,7 @@ static void usb_read_port_complete(struct urb *purb)
break;
case -EPROTO:
case -EOVERFLOW:
- rtl8723au_read_port(padapter, RECV_BULK_IN_ADDR, 0,
- precvbuf);
+ rtl8723au_read_port(padapter, 0, precvbuf);
break;
case -EINPROGRESS:
DBG_8723A("ERROR: URB IS IN PROGRESS!\n");
@@ -608,18 +605,18 @@ static void usb_read_port_complete(struct urb *purb)
}
}
-int rtl8723au_read_port(struct rtw_adapter *adapter, u32 addr, u32 cnt,
+int rtl8723au_read_port(struct rtw_adapter *adapter, u32 cnt,
struct recv_buf *precvbuf)
{
+ struct urb *purb;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ struct usb_device *pusbd = pdvobj->pusbdev;
int err;
unsigned int pipe;
unsigned long tmpaddr;
unsigned long alignment;
int ret = _SUCCESS;
- struct urb *purb;
- struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
- struct recv_priv *precvpriv = &adapter->recvpriv;
- struct usb_device *pusbd = pdvobj->pusbdev;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
diff --git a/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h b/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
index bbeaab48057a..c834b3a738d7 100644
--- a/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
+++ b/drivers/staging/rtl8723au/include/Hal8723UHWImg_CE.h
@@ -19,7 +19,7 @@ extern u8 Rtl8723UFwUMCBCutImgArrayWithoutBT[Rtl8723UUMCBCutImgArrayWithoutBTLen
extern const u8 Rtl8723SFwUMCBCutMPImgArray[Rtl8723SUMCBCutMPImgArrayLength];
#define Rtl8723EBTImgArrayLength 15276
-extern u8 Rtl8723EFwBTImgArray[Rtl8723EBTImgArrayLength] ;
+extern u8 Rtl8723EFwBTImgArray[Rtl8723EBTImgArrayLength];
#define Rtl8723UPHY_REG_Array_PGLength 336
extern u32 Rtl8723UPHY_REG_Array_PG[Rtl8723UPHY_REG_Array_PGLength];
diff --git a/drivers/staging/rtl8723au/include/drv_types.h b/drivers/staging/rtl8723au/include/drv_types.h
index 9870f87bdc70..e83463aeb9b1 100644
--- a/drivers/staging/rtl8723au/include/drv_types.h
+++ b/drivers/staging/rtl8723au/include/drv_types.h
@@ -51,7 +51,6 @@ enum _NIC_VERSION {
#include <rtw_debug.h>
#include <rtw_rf.h>
#include <rtw_event.h>
-#include <rtw_led.h>
#include <rtw_mlme_ext.h>
#include <rtw_ap.h>
@@ -228,7 +227,6 @@ struct rtw_adapter {
struct registry_priv registrypriv;
struct pwrctrl_priv pwrctrlpriv;
struct eeprom_priv eeprompriv;
- struct led_priv ledpriv;
u32 setband;
diff --git a/drivers/staging/rtl8723au/include/odm_debug.h b/drivers/staging/rtl8723au/include/odm_debug.h
index 4d935a33ccb3..83be5bab9e09 100644
--- a/drivers/staging/rtl8723au/include/odm_debug.h
+++ b/drivers/staging/rtl8723au/include/odm_debug.h
@@ -91,8 +91,7 @@
#define ODM_COMP_INIT BIT(31)
/*------------------------Export Macro Definition---------------------------*/
- #define DbgPrint printk
- #define RT_PRINTK(fmt, args...) DbgPrint("%s(): " fmt, __func__, ## args);
+ #define RT_PRINTK(fmt, args...) printk("%s(): " fmt, __func__, ## args);
#ifndef ASSERT
#define ASSERT(expr)
@@ -101,38 +100,17 @@
#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel)) \
{ \
- DbgPrint("[ODM-8723A] "); \
- RT_PRINTK fmt; \
- }
-
-#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) \
- if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel)) \
- { \
+ printk("[ODM-8723A] "); \
RT_PRINTK fmt; \
}
#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
if(!(expr)) { \
- DbgPrint("Assertion failed! %s at ......\n", #expr); \
- DbgPrint(" ......%s,%s,line=%d\n", __FILE__, __func__, __LINE__);\
+ printk("Assertion failed! %s at ......\n", #expr); \
+ printk(" ......%s,%s,line=%d\n", __FILE__, __func__, __LINE__);\
RT_PRINTK fmt; \
ASSERT(false); \
}
-#define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
-#define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
-#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
-
-#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
- if(((comp) & pDM_Odm->DebugComponents) && (level <= pDM_Odm->DebugLevel){ \
- int __i; \
- u8 * __ptr = (u8 *)ptr; \
- DbgPrint("[ODM] "); \
- DbgPrint(title_str); \
- DbgPrint(" "); \
- for (__i=0; __i < 6; __i++) \
- DbgPrint("%02X%s", __ptr[__i], (__i == 5) ? "" : "-"); \
- DbgPrint("\n"); \
- }
void ODM_InitDebugSetting23a(struct dm_odm_t *pDM_Odm);
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_dm.h b/drivers/staging/rtl8723au/include/rtl8723a_dm.h
index 18112225e53f..bf236e8e47a2 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_dm.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_dm.h
@@ -37,8 +37,7 @@ enum{
#define IQK_BB_REG_NUM 9
#define HP_THERMAL_NUM 8
/* duplicate code,will move to ODM ######### */
-struct dm_priv
-{
+struct dm_priv {
u32 InitODMFlag;
/* Upper and Lower Signal threshold for Rate Adaptive*/
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_hal.h b/drivers/staging/rtl8723au/include/rtl8723a_hal.h
index ee203a572cb7..e14633678b52 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_hal.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_hal.h
@@ -372,16 +372,9 @@ struct hal_data_8723a {
/* 2010/08/09 MH Add CU power down mode. */
u8 pwrdown;
- /* Add for dual MAC 0--Mac0 1--Mac1 */
- u32 interfaceIndex;
-
u8 OutEpQueueSel;
u8 OutEpNumber;
- /* 2010/11/22 MH Add for slim combo debug mode selective. */
- /* This is used for fix the drawback of CU TSMC-A/UMC-A cut. HW auto suspend ability. Close BT clock. */
- bool SlimComboDbg;
-
/* */
/* Add For EEPROM Efuse switch and Efuse Shadow map Setting */
/* */
@@ -405,8 +398,6 @@ struct hal_data_8723a {
* 2011/02/23 MH Add for 8723 mylti function definition. The define should be moved to an */
/* independent file in the future. */
- bool bMACFuncEnable;
-
/* Interrupt related register information. */
u32 IntArray[2];
u32 IntrMask[2];
@@ -518,8 +509,6 @@ void Hal_EfuseParseRateIndicationOption(struct rtw_adapter *padapter, u8 *hwinfo
void Hal_EfuseParseXtal_8723A(struct rtw_adapter *pAdapter, u8 *hwinfo, u8 AutoLoadFail);
void Hal_EfuseParseThermalMeter_8723A(struct rtw_adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
-void Hal_InitChannelPlan23a(struct rtw_adapter *padapter);
-
/* register */
void SetBcnCtrlReg23a(struct rtw_adapter *padapter, u8 SetBits, u8 ClearBits);
void rtl8723a_InitBeaconParameters(struct rtw_adapter *padapter);
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_led.h b/drivers/staging/rtl8723au/include/rtl8723a_led.h
deleted file mode 100644
index 1623d186feb4..000000000000
--- a/drivers/staging/rtl8723au/include/rtl8723a_led.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTL8723A_LED_H__
-#define __RTL8723A_LED_H__
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-
-/* */
-/* Interface to manipulate LED objects. */
-/* */
-void rtl8723au_InitSwLeds(struct rtw_adapter *padapter);
-void rtl8723au_DeInitSwLeds(struct rtw_adapter *padapter);
-void SwLedOn23a(struct rtw_adapter *padapter, struct led_8723a * pLed);
-void SwLedOff23a(struct rtw_adapter *padapter, struct led_8723a * pLed);
-
-#endif
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_recv.h b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
index 885d4d3859f8..0177bbc1c1cf 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_recv.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_recv.h
@@ -28,15 +28,11 @@
#define MAX_RECVBUF_SZ 15360 /* 15k < 16k */
-#define RECV_BULK_IN_ADDR 0x80
-#define RECV_INT_IN_ADDR 0x81
-
#define PHY_RSSI_SLID_WIN_MAX 100
#define PHY_LINKQUALITY_SLID_WIN_MAX 20
-struct phy_stat
-{
+struct phy_stat {
unsigned int phydw0;
unsigned int phydw1;
unsigned int phydw2;
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_xmit.h b/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
index 815560c6e1d7..7db29f40ab70 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_xmit.h
@@ -212,7 +212,6 @@ struct txrpt_ccx_8723a {
#define txrpt_ccx_qtime_8723a(txrpt_ccx) ((txrpt_ccx)->ccx_qtime0+((txrpt_ccx)->ccx_qtime1<<8))
void handle_txrpt_ccx_8723a(struct rtw_adapter *adapter, void *buf);
-void rtl8723a_update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem);
void rtl8723a_fill_fake_txdesc(struct rtw_adapter *padapter, u8 *pDesc, u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
int rtl8723au_hal_xmitframe_enqueue(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe);
diff --git a/drivers/staging/rtl8723au/include/rtw_cmd.h b/drivers/staging/rtl8723au/include/rtw_cmd.h
index ef67068a5fe3..71044107d13b 100644
--- a/drivers/staging/rtl8723au/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723au/include/rtw_cmd.h
@@ -17,7 +17,6 @@
#include <wlan_bssdef.h>
#include <rtw_rf.h>
-#include <rtw_led.h>
#define C2H_MEM_SZ (16*1024)
@@ -450,8 +449,7 @@ struct getrfintfs_parm {
u8 rfintfs;
};
-struct Tx_Beacon_param
-{
+struct Tx_Beacon_param {
struct wlan_bssid_ex network;
};
diff --git a/drivers/staging/rtl8723au/include/rtw_ht.h b/drivers/staging/rtl8723au/include/rtw_ht.h
index cfc947daf08b..780eb8944118 100644
--- a/drivers/staging/rtl8723au/include/rtw_ht.h
+++ b/drivers/staging/rtl8723au/include/rtw_ht.h
@@ -19,8 +19,7 @@
#include "linux/ieee80211.h"
#include "wifi.h"
-struct ht_priv
-{
+struct ht_priv {
bool ht_option;
bool ampdu_enable;/* for enable Tx A-MPDU */
/* u8 baddbareq_issued[16]; */
diff --git a/drivers/staging/rtl8723au/include/rtw_led.h b/drivers/staging/rtl8723au/include/rtw_led.h
deleted file mode 100644
index c071da587efd..000000000000
--- a/drivers/staging/rtl8723au/include/rtw_led.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- ******************************************************************************/
-#ifndef __RTW_LED_H_
-#define __RTW_LED_H_
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
-
-#define LED_BLINK_NORMAL_INTERVAL 100
-#define LED_BLINK_SLOWLY_INTERVAL 200
-#define LED_BLINK_LONG_INTERVAL 400
-
-#define LED_BLINK_NO_LINK_INTERVAL_ALPHA 1000
-#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
-#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
-#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
-#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
-
-#define LED_BLINK_NORMAL_INTERVAL_NETTRONIX 100
-#define LED_BLINK_SLOWLY_INTERVAL_NETTRONIX 2000
-
-#define LED_BLINK_SLOWLY_INTERVAL_PORNET 1000
-#define LED_BLINK_NORMAL_INTERVAL_PORNET 100
-
-#define LED_BLINK_FAST_INTERVAL_BITLAND 30
-
-/* 060403, rcnjko: Customized for AzWave. */
-#define LED_CM2_BLINK_ON_INTERVAL 250
-#define LED_CM2_BLINK_OFF_INTERVAL 4750
-
-#define LED_CM8_BLINK_INTERVAL 500 /* for QMI */
-#define LED_CM8_BLINK_OFF_INTERVAL 3750 /* for QMI */
-
-/* 080124, lanhsin: Customized for RunTop */
-#define LED_RunTop_BLINK_INTERVAL 300
-
-/* 060421, rcnjko: Customized for Sercomm Printer Server case. */
-#define LED_CM3_BLINK_INTERVAL 1500
-
-enum led_ctl_mode {
- LED_CTL_POWER_ON = 1,
- LED_CTL_LINK = 2,
- LED_CTL_NO_LINK = 3,
- LED_CTL_TX = 4,
- LED_CTL_RX = 5,
- LED_CTL_SITE_SURVEY = 6,
- LED_CTL_POWER_OFF = 7,
- LED_CTL_START_TO_LINK = 8,
- LED_CTL_START_WPS = 9,
- LED_CTL_STOP_WPS = 10,
- LED_CTL_START_WPS_BOTTON = 11, /* added for runtop */
- LED_CTL_STOP_WPS_FAIL = 12, /* added for ALPHA */
- LED_CTL_STOP_WPS_FAIL_OVERLAP = 13, /* added for BELKIN */
- LED_CTL_CONNECTION_NO_TRANSFER = 14,
-};
-
-enum led_state_872x {
- LED_UNKNOWN = 0,
- RTW_LED_ON = 1,
- RTW_LED_OFF = 2,
- LED_BLINK_NORMAL = 3,
- LED_BLINK_SLOWLY = 4,
- LED_BLINK_POWER_ON = 5,
- LED_BLINK_SCAN = 6, /* LED is blinking during scanning period, the # of times to blink is depend on time for scanning. */
- LED_BLINK_NO_LINK = 7, /* LED is blinking during no link state. */
- LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer Server case */
- LED_BLINK_TXRX = 9,
- LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
- LED_BLINK_WPS_STOP = 11, /* for ALPHA */
- LED_BLINK_WPS_STOP_OVERLAP = 12, /* for BELKIN */
- LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
- LED_BLINK_CAMEO = 14,
- LED_BLINK_XAVI = 15,
- LED_BLINK_ALWAYS_ON = 16,
-};
-
-enum led_pin_8723a {
- LED_PIN_NULL = 0,
- LED_PIN_LED0 = 1,
- LED_PIN_LED1 = 2,
- LED_PIN_LED2 = 3,
- LED_PIN_GPIO0 = 4,
-};
-
-struct led_8723a {
- struct rtw_adapter *padapter;
-
- enum led_pin_8723a LedPin; /* Identify how to implement this SW led. */
- enum led_state_872x CurrLedState; /* Current LED state. */
- enum led_state_872x BlinkingLedState; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
-
- u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
-
- u8 bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
-
- u8 bLedWPSBlinkInProgress;
-
- u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
-
- struct timer_list BlinkTimer; /* Timer object for led blinking. */
-
- u8 bSWLedCtrl;
-
- /* ALPHA, added by chiyoko, 20090106 */
- u8 bLedNoLinkBlinkInProgress;
- u8 bLedLinkBlinkInProgress;
- u8 bLedStartToLinkBlinkInProgress;
- u8 bLedScanBlinkInProgress;
-
- struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to manipulate H/W to blink LED. */
-};
-
-#define IS_LED_WPS_BLINKING(_LED_871x) (((struct led_8723a *)_LED_871x)->CurrLedState==LED_BLINK_WPS \
- || ((struct led_8723a *)_LED_871x)->CurrLedState==LED_BLINK_WPS_STOP \
- || ((struct led_8723a *)_LED_871x)->bLedWPSBlinkInProgress)
-
-#define IS_LED_BLINKING(_LED_871x) (((struct led_8723a *)_LED_871x)->bLedWPSBlinkInProgress \
- ||((struct led_8723a *)_LED_871x)->bLedScanBlinkInProgress)
-
-/* */
-/* LED customization. */
-/* */
-
-enum led_strategy_8723a {
- SW_LED_MODE0 = 0, /* SW control 1 LED via GPIO0. It is default option. */
- SW_LED_MODE1= 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
- SW_LED_MODE2 = 2, /* SW control 1 LED via GPIO0, customized for AzWave 8187 minicard. */
- SW_LED_MODE3 = 3, /* SW control 1 LED via GPIO0, customized for Sercomm Printer Server case. */
- SW_LED_MODE4 = 4, /* for Edimax / Belkin */
- SW_LED_MODE5 = 5, /* for Sercomm / Belkin */
- SW_LED_MODE6 = 6, /* for 88CU minicard, porting from ce SW_LED_MODE7 */
- HW_LED = 50, /* HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes, see MAC.CONFIG1 for details.) */
- LED_ST_NONE = 99,
-};
-
-void LedControl871x23a(struct rtw_adapter *padapter, enum led_ctl_mode LedAction);
-
-struct led_priv{
- /* add for led controll */
- struct led_8723a SwLed0;
- struct led_8723a SwLed1;
- enum led_strategy_8723a LedStrategy;
- u8 bRegUseLed;
- void (*LedControlHandler)(struct rtw_adapter *padapter, enum led_ctl_mode LedAction);
- /* add for led controll */
-};
-
-#define rtw_led_control(adapter, LedAction)
-
-void BlinkWorkItemCallback23a(struct work_struct *work);
-
-void ResetLedStatus23a(struct led_8723a *pLed);
-
-void
-InitLed871x23a(
- struct rtw_adapter *padapter,
- struct led_8723a *pLed,
- enum led_pin_8723a LedPin
-);
-
-void
-DeInitLed871x23a(struct led_8723a *pLed);
-
-/* hal... */
-void BlinkHandler23a(struct led_8723a *pLed);
-
-#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme.h b/drivers/staging/rtl8723au/include/rtw_mlme.h
index 2ff01eb8fc0c..a6751f138336 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme.h
@@ -270,7 +270,7 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
{
spin_lock_bh(&pmlmepriv->lock);
- if (check_fwstate(pmlmepriv, state) == true)
+ if (check_fwstate(pmlmepriv, state))
pmlmepriv->fw_state ^= state;
spin_unlock_bh(&pmlmepriv->lock);
}
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
index 97c3c4478f29..51dba1fa4c5d 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
@@ -270,8 +270,7 @@ struct action_handler {
int (*func)(struct rtw_adapter *padapter, struct recv_frame *precv_frame);
};
-struct ss_res
-{
+struct ss_res {
int state;
int bss_cnt;
int channel_idx;
@@ -318,8 +317,7 @@ struct FW_Sta_Info {
* 5. ... and so on, till survey done.
*/
-struct mlme_ext_info
-{
+struct mlme_ext_info {
u32 state;
u32 reauth_count;
u32 reassoc_count;
diff --git a/drivers/staging/rtl8723au/include/rtw_recv.h b/drivers/staging/rtl8723au/include/rtw_recv.h
index f846bb5e7ab7..dc784be3ddd9 100644
--- a/drivers/staging/rtl8723au/include/rtw_recv.h
+++ b/drivers/staging/rtl8723au/include/rtw_recv.h
@@ -200,9 +200,6 @@ struct recv_priv {
u8 *precv_buf;
/* For display the phy informatiom */
- u8 is_signal_dbg; /* for debug */
- u8 signal_strength_dbg; /* for debug */
- s8 rssi;
s8 rxpwdb;
u8 signal_strength;
u8 signal_qual;
diff --git a/drivers/staging/rtl8723au/include/rtw_xmit.h b/drivers/staging/rtl8723au/include/rtw_xmit.h
index 32a844170327..2b7d6d08238b 100644
--- a/drivers/staging/rtl8723au/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723au/include/rtw_xmit.h
@@ -196,7 +196,6 @@ enum {
void rtw_sctx_init23a(struct submit_ctx *sctx, int timeout_ms);
int rtw_sctx_wait23a(struct submit_ctx *sctx);
void rtw23a_sctx_done_err(struct submit_ctx **sctx, int status);
-void rtw_sctx_done23a(struct submit_ctx **sctx);
struct xmit_buf {
struct list_head list, list2;
@@ -295,10 +294,6 @@ struct xmit_priv {
struct rtw_adapter *adapter;
- u8 vcs_setting;
- u8 vcs;
- u8 vcs_type;
-
u64 tx_bytes;
u64 tx_pkts;
u64 tx_drop;
@@ -307,6 +302,8 @@ struct xmit_priv {
struct hw_xmit *hwxmits;
u8 hwxmit_entry;
+ u8 vcs;
+ u8 nqos_ssn;
u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength from
* large to small. it's value is 0->vo, 1->vi,
@@ -314,14 +311,8 @@ struct xmit_priv {
*/
struct semaphore tx_retevt;/* all tx return event; */
- u8 txirp_cnt;/* */
struct tasklet_struct xmit_tasklet;
- /* per AC pending irp */
- int beq_cnt;
- int bkq_cnt;
- int viq_cnt;
- int voq_cnt;
struct rtw_queue free_xmitbuf_queue;
struct list_head xmitbuf_list; /* track buffers for cleanup */
@@ -332,7 +323,6 @@ struct xmit_priv {
struct list_head xmitextbuf_list; /* track buffers for cleanup */
uint free_xmit_extbuf_cnt;
- u16 nqos_ssn;
int ack_tx;
struct mutex ack_tx_mutex;
struct submit_ctx ack_tx_ops;
@@ -349,7 +339,6 @@ s32 rtw_free_xmitbuf23a(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
void rtw_count_tx_stats23a(struct rtw_adapter *padapter,
struct xmit_frame *pxmitframe, int sz);
void rtw_update_protection23a(struct rtw_adapter *padapter, u8 *ie, uint ie_len);
-s32 rtw_put_snap23a(u8 *data, u16 h_proto);
struct xmit_frame *rtw_alloc_xmitframe23a_ext(struct xmit_priv *pxmitpriv);
struct xmit_frame *rtw_alloc_xmitframe23a_once(struct xmit_priv *pxmitpriv);
s32 rtw_free_xmitframe23a(struct xmit_priv *pxmitpriv,
@@ -363,8 +352,6 @@ struct xmit_frame *rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv,
struct hw_xmit *phwxmit_i, int entry);
s32 rtw_xmit23a_classifier(struct rtw_adapter *padapter,
struct xmit_frame *pxmitframe);
-u32 rtw_calculate_wlan_pkt_size_by_attribue23a(struct pkt_attrib *pattrib);
-#define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue23a(&f->attrib)
s32 rtw_xmitframe_coalesce23a(struct rtw_adapter *padapter, struct sk_buff *pkt,
struct xmit_frame *pxmitframe);
s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
@@ -391,7 +378,6 @@ void xmit_delivery_enabled_frames23a(struct rtw_adapter *padapter,
u8 qos_acm23a(u8 acm_mask, u8 priority);
u32 rtw_get_ff_hwaddr23a(struct xmit_frame *pxmitframe);
int rtw_ack_tx_wait23a(struct xmit_priv *pxmitpriv, u32 timeout_ms);
-void rtw_ack_tx_done23a(struct xmit_priv *pxmitpriv, int status);
/* include after declaring struct xmit_buf, in order to avoid warning */
#include <xmit_osdep.h>
diff --git a/drivers/staging/rtl8723au/include/usb_ops.h b/drivers/staging/rtl8723au/include/usb_ops.h
index ade8bc71572a..ff11e13b24a8 100644
--- a/drivers/staging/rtl8723au/include/usb_ops.h
+++ b/drivers/staging/rtl8723au/include/usb_ops.h
@@ -63,6 +63,6 @@ static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
atomic_set(&dvobj->continual_urb_error, 0);
}
-void rtl8723au_chip_configure(struct rtw_adapter *padapter);
+bool rtl8723au_chip_configure(struct rtw_adapter *padapter);
#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/rtl8723au/include/usb_ops_linux.h b/drivers/staging/rtl8723au/include/usb_ops_linux.h
index bf68bbb41f9c..af2f14b8b360 100644
--- a/drivers/staging/rtl8723au/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8723au/include/usb_ops_linux.h
@@ -21,13 +21,13 @@
#define MAX_USBCTRL_VENDORREQ_TIMES 10
-int rtl8723au_read_port(struct rtw_adapter *adapter, u32 addr, u32 cnt,
+int rtl8723au_read_port(struct rtw_adapter *adapter, u32 cnt,
struct recv_buf *precvbuf);
void rtl8723au_read_port_cancel(struct rtw_adapter *padapter);
int rtl8723au_write_port(struct rtw_adapter *padapter, u32 addr, u32 cnt,
struct xmit_buf *pxmitbuf);
void rtl8723au_write_port_cancel(struct rtw_adapter *padapter);
-int rtl8723au_read_interrupt(struct rtw_adapter *adapter, u32 addr);
+int rtl8723au_read_interrupt(struct rtw_adapter *adapter);
u8 rtl8723au_read8(struct rtw_adapter *padapter, u16 addr);
u16 rtl8723au_read16(struct rtw_adapter *padapter, u16 addr);
diff --git a/drivers/staging/rtl8723au/include/wlan_bssdef.h b/drivers/staging/rtl8723au/include/wlan_bssdef.h
index 96e8074a7c18..95b32e15a4d0 100644
--- a/drivers/staging/rtl8723au/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8723au/include/wlan_bssdef.h
@@ -57,23 +57,6 @@ enum {
Ndis802_11Encryption3KeyAbsent,
};
-/* Key mapping keys require a BSSID */
-struct ndis_802_11_key {
- u32 Length; /* Length of this structure */
- u32 KeyIndex;
- u32 KeyLength; /* length of key in bytes */
- unsigned char BSSID[6];
- unsigned long long KeyRSC;
- u8 KeyMaterial[32]; /* variable length depending on above field */
-};
-
-struct wlan_phy_info {
- u8 SignalStrength;/* in percentage) */
- u8 SignalQuality;/* in percentage) */
- u8 Optimum_antenna; /* for Antenna diversity */
- u8 Reserved_0;
-};
-
struct wlan_bcn_info {
/* these infor get from rtw_get_encrypt_info when
* * translate scan to UI */
@@ -99,7 +82,8 @@ struct wlan_bssid_ex {
u32 DSConfig; /* Frequency, units are kHz */
enum nl80211_iftype ifmode;
unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
- struct wlan_phy_info PhyInfo;
+ u8 SignalStrength;/* in percentage */
+ u8 SignalQuality;/* in percentage */
u32 IELength;
u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and capability info*/
} __packed;
@@ -115,7 +99,6 @@ struct wlan_network {
/* set to fixed when not to be removed as site-surveying */
int fixed;
unsigned long last_scanned; /* timestamp for the network */
- int aid; /* will only be valid when a BSS is joined. */
int join_res;
struct wlan_bssid_ex network; /* must be the last item */
struct wlan_bcn_info BcnInfo;
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index bd6953af0a03..82a8c06ab347 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -275,7 +275,8 @@ static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter,
&pnetwork->network)) {
notify_signal = 100 * translate_percentage_to_dbm(padapter->recvpriv.signal_strength); /* dbm */
} else {
- notify_signal = 100 * translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength); /* dbm */
+ notify_signal = 100 * translate_percentage_to_dbm(
+ pnetwork->network.SignalStrength); /* dbm */
}
bss = cfg80211_inform_bss(wiphy, notify_channel,
@@ -471,7 +472,6 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
int set_tx, const u8 *sta_addr,
struct key_params *keyparms)
{
- int ret = 0;
int key_len;
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
struct rtw_adapter *padapter = netdev_priv(dev);
@@ -708,7 +708,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
exit:
- return ret;
+ return 0;
}
#endif
@@ -850,7 +850,6 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, u8 key_index,
dot11PrivacyAlgrthm;
}
}
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { /* adhoc mode */
}
}
@@ -2364,7 +2363,6 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
ie_offset = offsetof(struct ieee80211_mgmt,
u.reassoc_req.variable);
- sinfo.filled = 0;
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
sinfo.assoc_req_ies = pmgmt_frame + ie_offset;
sinfo.assoc_req_ies_len = frame_len - ie_offset;
@@ -2432,20 +2430,16 @@ void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
static int rtw_cfg80211_monitor_if_open(struct net_device *ndev)
{
- int ret = 0;
-
DBG_8723A("%s\n", __func__);
- return ret;
+ return 0;
}
static int rtw_cfg80211_monitor_if_close(struct net_device *ndev)
{
- int ret = 0;
-
DBG_8723A("%s\n", __func__);
- return ret;
+ return 0;
}
static int rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb,
@@ -2574,11 +2568,9 @@ fail:
static int
rtw_cfg80211_monitor_if_set_mac_address(struct net_device *ndev, void *addr)
{
- int ret = 0;
-
DBG_8723A("%s\n", __func__);
- return ret;
+ return 0;
}
static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
@@ -2856,8 +2848,10 @@ static int cfg80211_rtw_add_station(struct wiphy *wiphy,
}
static int cfg80211_rtw_del_station(struct wiphy *wiphy,
- struct net_device *ndev, const u8 *mac)
+ struct net_device *ndev,
+ struct station_del_parameters *params)
{
+ const u8 *mac = params->mac;
int ret = 0;
struct list_head *phead, *plist, *ptmp;
u8 updated = 0;
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index b34eaec9dd48..9966d16342b3 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -175,7 +175,6 @@ static int netdev_close(struct net_device *pnetdev);
static int loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev)
{
struct registry_priv *registry_par = &padapter->registrypriv;
- int status = _SUCCESS;
GlobalDebugLevel23A = rtw_debug;
registry_par->chip_version = (u8)rtw_chip_version;
@@ -234,7 +233,7 @@ static int loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev)
snprintf(registry_par->if2name, 16, "%s", if2name);
registry_par->notch_filter = (u8)rtw_notch_filter;
registry_par->regulatory_tid = (u8)rtw_regulatory_id;
- return status;
+ return _SUCCESS;
}
static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
@@ -384,12 +383,9 @@ static int rtw_init_default_value(struct rtw_adapter *padapter)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- int ret = _SUCCESS;
/* xmit_priv */
- pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense;
pxmitpriv->vcs = pregistrypriv->vcs_type;
- pxmitpriv->vcs_type = pregistrypriv->vcs_type;
/* pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; */
pxmitpriv->frag_len = pregistrypriv->frag_thresh;
@@ -425,7 +421,7 @@ static int rtw_init_default_value(struct rtw_adapter *padapter)
/* misc. */
padapter->bReadPortCancel = false;
padapter->bWritePortCancel = false;
- return ret;
+ return _SUCCESS;
}
int rtw_reset_drv_sw23a(struct rtw_adapter *padapter)
@@ -546,9 +542,6 @@ void rtw_cancel_all_timer23a(struct rtw_adapter *padapter)
RT_TRACE(_module_os_intfs_c_, _drv_info_,
("%s:cancel dynamic_chk_timer!\n", __func__));
- RT_TRACE(_module_os_intfs_c_, _drv_info_,
- ("%s:cancel DeInitSwLeds!\n", __func__));
-
del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer);
del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer);
@@ -685,8 +678,6 @@ int netdev_open23a(struct net_device *pnetdev)
rtw_cfg80211_init_wiphy(padapter);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
padapter->bup = true;
}
padapter->net_closed = false;
@@ -768,8 +759,6 @@ int rtw_ips_pwr_up23a(struct rtw_adapter *padapter)
result = ips_netdrv_open(padapter);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
DBG_8723A("<=== rtw_ips_pwr_up23a.............. in %dms\n",
jiffies_to_msecs(jiffies - start_time));
return result;
@@ -784,8 +773,6 @@ void rtw_ips_pwr_down23a(struct rtw_adapter *padapter)
padapter->bCardDisableWOHSM = true;
padapter->net_closed = true;
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
-
rtw_ips_dev_unload23a(padapter);
padapter->bCardDisableWOHSM = false;
DBG_8723A("<=== rtw_ips_pwr_down23a..................... in %dms\n",
@@ -844,8 +831,6 @@ static int netdev_close(struct net_device *pnetdev)
rtw_free_assoc_resources23a(padapter, 1);
/* s2-4. */
rtw_free_network_queue23a(padapter);
- /* Close LED */
- rtw_led_control(padapter, LED_CTL_POWER_OFF);
}
rtw_scan_abort23a(padapter);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 865743ecd855..373a617ace54 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -59,21 +59,6 @@ static struct usb_driver rtl8723a_usb_drv = {
static struct usb_driver *usb_drv = &rtl8723a_usb_drv;
-static inline int RT_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
-{
- return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd);
-}
-
-static inline int RT_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
-{
- return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd);
-}
-
-static inline int RT_usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
-{
- return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd);
-}
-
static int rtw_init_intf_priv(struct dvobj_priv *dvobj)
{
mutex_init(&dvobj->usb_vendor_req_mutex);
@@ -143,21 +128,21 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
le16_to_cpu(pendp_desc->wMaxPacketSize));
DBG_8723A("bInterval =%x\n", pendp_desc->bInterval);
- if (RT_usb_endpoint_is_bulk_in(pendp_desc)) {
- DBG_8723A("RT_usb_endpoint_is_bulk_in = %x\n",
+ if (usb_endpoint_is_bulk_in(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_bulk_in = %x\n",
usb_endpoint_num(pendp_desc));
pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
usb_endpoint_num(pendp_desc);
pdvobjpriv->RtNumInPipes++;
- } else if (RT_usb_endpoint_is_int_in(pendp_desc)) {
- DBG_8723A("RT_usb_endpoint_is_int_in = %x, Interval = %x\n",
+ } else if (usb_endpoint_is_int_in(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_int_in = %x, Interval = %x\n",
usb_endpoint_num(pendp_desc),
pendp_desc->bInterval);
pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] =
usb_endpoint_num(pendp_desc);
pdvobjpriv->RtNumInPipes++;
- } else if (RT_usb_endpoint_is_bulk_out(pendp_desc)) {
- DBG_8723A("RT_usb_endpoint_is_bulk_out = %x\n",
+ } else if (usb_endpoint_is_bulk_out(pendp_desc)) {
+ DBG_8723A("usb_endpoint_is_bulk_out = %x\n",
usb_endpoint_num(pendp_desc));
pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
usb_endpoint_num(pendp_desc);
@@ -257,6 +242,7 @@ void rtl8723a_usb_intf_stop(struct rtw_adapter *padapter)
static void rtw_dev_unload(struct rtw_adapter *padapter)
{
+ struct submit_ctx *pack_tx_ops = &padapter->xmitpriv.ack_tx_ops;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_dev_unload\n"));
if (padapter->bup) {
@@ -264,8 +250,8 @@ static void rtw_dev_unload(struct rtw_adapter *padapter)
padapter->bDriverStopped = true;
if (padapter->xmitpriv.ack_tx)
- rtw_ack_tx_done23a(&padapter->xmitpriv,
- RTW_SCTX_DONE_DRV_STOP);
+ rtw23a_sctx_done_err(&pack_tx_ops,
+ RTW_SCTX_DONE_DRV_STOP);
/* s3. */
rtl8723a_usb_intf_stop(padapter);
@@ -322,8 +308,6 @@ int rtw_hw_suspend23a(struct rtw_adapter *padapter)
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
_clr_fwstate_(pmlmepriv, _FW_LINKED);
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
rtw_os_indicate_disconnect23a(padapter);
/* donnot enqueue cmd */
@@ -546,7 +530,8 @@ static struct rtw_adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
rtl8723a_read_chip_version(padapter);
/* step usb endpoint mapping */
- rtl8723au_chip_configure(padapter);
+ if (!rtl8723au_chip_configure(padapter))
+ goto free_hal_data;
/* step read efuse/eeprom data and get mac_addr */
rtl8723a_read_adapter_info(padapter);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
index a3349ac57bae..3e19b3b2c1c2 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -18,13 +18,6 @@
#include <usb_ops_linux.h>
#include <rtw_sreset.h>
-struct zero_bulkout_context {
- void *pbuf;
- void *purb;
- void *pirp;
- void *padapter;
-};
-
void rtl8723au_read_port_cancel(struct rtw_adapter *padapter)
{
struct recv_buf *precvbuf;
@@ -53,18 +46,6 @@ static void usb_write_port23a_complete(struct urb *purb)
unsigned long irqL;
switch (pxmitbuf->flags) {
- case VO_QUEUE_INX:
- pxmitpriv->voq_cnt--;
- break;
- case VI_QUEUE_INX:
- pxmitpriv->viq_cnt--;
- break;
- case BE_QUEUE_INX:
- pxmitpriv->beq_cnt--;
- break;
- case BK_QUEUE_INX:
- pxmitpriv->bkq_cnt--;
- break;
case HIGH_QUEUE_INX:
#ifdef CONFIG_8723AU_AP_MODE
rtw_chk_hi_queue_cmd23a(padapter);
@@ -166,19 +147,15 @@ int rtl8723au_write_port(struct rtw_adapter *padapter, u32 addr, u32 cnt,
switch (addr) {
case VO_QUEUE_INX:
- pxmitpriv->voq_cnt++;
pxmitbuf->flags = VO_QUEUE_INX;
break;
case VI_QUEUE_INX:
- pxmitpriv->viq_cnt++;
pxmitbuf->flags = VI_QUEUE_INX;
break;
case BE_QUEUE_INX:
- pxmitpriv->beq_cnt++;
pxmitbuf->flags = BE_QUEUE_INX;
break;
case BK_QUEUE_INX:
- pxmitpriv->bkq_cnt++;
pxmitbuf->flags = BK_QUEUE_INX;
break;
case HIGH_QUEUE_INX:
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 228e48339b9e..b4612fb615f6 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -2599,9 +2599,9 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
if (count > sector_cnt) {
if (mode_2k)
- ms_card->seq_mode |= MODE_2K_SEQ;
+ ms_card->seq_mode = MODE_2K_SEQ;
else
- ms_card->seq_mode |= MODE_512_SEQ;
+ ms_card->seq_mode = MODE_512_SEQ;
}
} else {
count = sector_cnt;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 2d2527c3aea2..c74f1b8108f6 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -418,7 +418,7 @@ static void rtsx_shutdown(struct pci_dev *pci)
static int rtsx_control_thread(void *__dev)
{
- struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
struct Scsi_Host *host = rtsx_to_host(dev);
@@ -527,7 +527,7 @@ SkipForAbort:
static int rtsx_polling_thread(void *__dev)
{
- struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
struct sd_info *sd_card = &(chip->sd_card);
struct xd_info *xd_card = &(chip->xd_card);
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index a7ade8b4e7f2..9593d8132938 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -126,10 +126,11 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
if (chip->ignore_sd && CHK_SDIO_EXIST(chip)) {
if (chip->asic_code) {
RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF,
- MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+ MS_INS_PU | SD_WP_PU |
+ SD_CD_PU | SD_CMD_PU);
} else {
RTSX_WRITE_REG(chip, FPGA_PULL_CTL, 0xFF,
- FPGA_SD_PULL_CTL_EN);
+ FPGA_SD_PULL_CTL_EN);
}
RTSX_WRITE_REG(chip, CARD_SHARE_MODE, 0xFF, CARD_SHARE_48_SD);
@@ -137,7 +138,7 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
RTSX_WRITE_REG(chip, 0xFF2C, 0x01, 0x01);
RTSX_WRITE_REG(chip, SDIO_CTRL, 0xFF,
- SDIO_BUS_CTRL | SDIO_CD_CTRL);
+ SDIO_BUS_CTRL | SDIO_CD_CTRL);
chip->sd_int = 1;
chip->sd_io = 1;
@@ -201,7 +202,7 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
TRACE_RET(chip, STATUS_FAIL);
} else {
RTSX_WRITE_REG(chip, FPGA_PULL_CTL,
- FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+ FPGA_SD_PULL_CTL_BIT | 0x20, 0);
}
retval = card_share_mode(chip, SD_CARD);
if (retval != STATUS_SUCCESS)
@@ -226,6 +227,87 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
}
#endif
+static int rtsx_reset_aspm(struct rtsx_chip *chip)
+{
+ int ret;
+
+ if (chip->dynamic_aspm) {
+ if (!CHK_SDIO_EXIST(chip) || !CHECK_PID(chip, 0x5288))
+ return STATUS_SUCCESS;
+
+ ret = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF,
+ chip->aspm_l0s_l1_en);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ return STATUS_SUCCESS;
+ }
+
+ if (CHECK_PID(chip, 0x5208))
+ RTSX_WRITE_REG(chip, ASPM_FORCE_CTL, 0xFF, 0x3F);
+ ret = rtsx_write_config_byte(chip, LCTLR, chip->aspm_l0s_l1_en);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ chip->aspm_level[0] = chip->aspm_l0s_l1_en;
+ if (CHK_SDIO_EXIST(chip)) {
+ chip->aspm_level[1] = chip->aspm_l0s_l1_en;
+ ret = rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
+ 0xC0, 0xFF, chip->aspm_l0s_l1_en);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ chip->aspm_enabled = 1;
+
+ return STATUS_SUCCESS;
+}
+
+static int rtsx_enable_pcie_intr(struct rtsx_chip *chip)
+{
+ int ret;
+
+ if (!chip->asic_code || !CHECK_PID(chip, 0x5208)) {
+ rtsx_enable_bus_int(chip);
+ return STATUS_SUCCESS;
+ }
+
+ if (chip->phy_debug_mode) {
+ RTSX_WRITE_REG(chip, CDRESUMECTL, 0x77, 0);
+ rtsx_disable_bus_int(chip);
+ } else {
+ rtsx_enable_bus_int(chip);
+ }
+
+ if (chip->ic_version >= IC_VER_D) {
+ u16 reg;
+
+ ret = rtsx_read_phy_register(chip, 0x00, &reg);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ reg &= 0xFE7F;
+ reg |= 0x80;
+ ret = rtsx_write_phy_register(chip, 0x00, reg);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ ret = rtsx_read_phy_register(chip, 0x1C, &reg);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+
+ reg &= 0xFFF7;
+ ret = rtsx_write_phy_register(chip, 0x1C, reg);
+ if (ret != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ }
+
+ if (chip->driver_first_load && (chip->ic_version < IC_VER_C))
+ rtsx_calibration(chip);
+
+ return STATUS_SUCCESS;
+}
+
int rtsx_reset_chip(struct rtsx_chip *chip)
{
int retval;
@@ -268,7 +350,7 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
#ifdef LED_AUTO_BLINK
RTSX_WRITE_REG(chip, CARD_AUTO_BLINK, 0xFF,
- LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
+ LED_BLINK_SPEED | BLINK_EN | LED_GPIO0);
#endif
if (chip->asic_code) {
@@ -288,39 +370,9 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
/* Enable ASPM */
if (chip->aspm_l0s_l1_en) {
- if (chip->dynamic_aspm) {
- if (CHK_SDIO_EXIST(chip)) {
- if (CHECK_PID(chip, 0x5288)) {
- retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
- }
- }
- } else {
- if (CHECK_PID(chip, 0x5208))
- RTSX_WRITE_REG(chip, ASPM_FORCE_CTL,
- 0xFF, 0x3F);
-
- retval = rtsx_write_config_byte(chip, LCTLR,
- chip->aspm_l0s_l1_en);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- chip->aspm_level[0] = chip->aspm_l0s_l1_en;
- if (CHK_SDIO_EXIST(chip)) {
- chip->aspm_level[1] = chip->aspm_l0s_l1_en;
- if (CHECK_PID(chip, 0x5288))
- retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
- else
- retval = rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF, chip->aspm_l0s_l1_en);
-
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- }
-
- chip->aspm_enabled = 1;
- }
+ retval = rtsx_reset_aspm(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
} else {
if (chip->asic_code && CHECK_PID(chip, 0x5208)) {
retval = rtsx_write_phy_register(chip, 0x07, 0x0129);
@@ -338,91 +390,38 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
TRACE_RET(chip, STATUS_FAIL);
if (CHK_SDIO_EXIST(chip)) {
- if (CHECK_PID(chip, 0x5288))
- retval = rtsx_write_cfg_dw(chip, 2, 0xC0,
- 0xFF00, 0x0100);
- else
- retval = rtsx_write_cfg_dw(chip, 1, 0xC0,
- 0xFF00, 0x0100);
+ retval = rtsx_write_cfg_dw(chip,
+ CHECK_PID(chip, 0x5288) ? 2 : 1,
+ 0xC0, 0xFF00, 0x0100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
-
}
- if (CHECK_PID(chip, 0x5288)) {
- if (!CHK_SDIO_EXIST(chip)) {
- retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF,
- 0x0103);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
+ if (CHECK_PID(chip, 0x5288) && !CHK_SDIO_EXIST(chip)) {
+ retval = rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFFFF, 0x0103);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
- }
+ retval = rtsx_write_cfg_dw(chip, 2, 0x84, 0xFF, 0x03);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
}
RTSX_WRITE_REG(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
RTSX_WRITE_REG(chip, PERST_GLITCH_WIDTH, 0xFF, 0x80);
- /* Enable PCIE interrupt */
- if (chip->asic_code) {
- if (CHECK_PID(chip, 0x5208)) {
- if (chip->phy_debug_mode) {
- RTSX_WRITE_REG(chip, CDRESUMECTL, 0x77, 0);
- rtsx_disable_bus_int(chip);
- } else {
- rtsx_enable_bus_int(chip);
- }
-
- if (chip->ic_version >= IC_VER_D) {
- u16 reg;
-
- retval = rtsx_read_phy_register(chip, 0x00,
- &reg);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- reg &= 0xFE7F;
- reg |= 0x80;
- retval = rtsx_write_phy_register(chip, 0x00,
- reg);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- retval = rtsx_read_phy_register(chip, 0x1C,
- &reg);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- reg &= 0xFFF7;
- retval = rtsx_write_phy_register(chip, 0x1C,
- reg);
- if (retval != STATUS_SUCCESS)
- TRACE_RET(chip, STATUS_FAIL);
-
- }
-
- if (chip->driver_first_load &&
- (chip->ic_version < IC_VER_C))
- rtsx_calibration(chip);
-
- } else {
- rtsx_enable_bus_int(chip);
- }
- } else {
- rtsx_enable_bus_int(chip);
- }
+ retval = rtsx_enable_pcie_intr(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
chip->need_reset = 0;
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (chip->hw_bypass_sd)
- goto NextCard;
+ goto nextcard;
dev_dbg(rtsx_dev(chip), "In %s, chip->int_reg = 0x%x\n", __func__,
chip->int_reg);
if (chip->int_reg & SD_EXIST) {
@@ -443,10 +442,10 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
} else {
chip->sd_io = 0;
RTSX_WRITE_REG(chip, SDIO_CTRL, SDIO_BUS_CTRL | SDIO_CD_CTRL,
- 0);
+ 0);
}
-NextCard:
+nextcard:
if (chip->int_reg & XD_EXIST)
chip->need_reset |= XD_CARD;
if (chip->int_reg & MS_EXIST)
@@ -484,10 +483,10 @@ NextCard:
if (chip->ft2_fast_mode) {
RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF,
- MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
+ MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
udelay(chip->pmos_pwr_on_interval);
RTSX_WRITE_REG(chip, CARD_PWR_CTL, 0xFF,
- MS_POWER_ON | SD_POWER_ON);
+ MS_POWER_ON | SD_POWER_ON);
wait_timeout(200);
}
@@ -540,10 +539,7 @@ static int rts5208_init(struct rtsx_chip *chip)
RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
RTSX_READ_REG(chip, CLK_SEL, &val);
- if (val == 0)
- chip->asic_code = 1;
- else
- chip->asic_code = 0;
+ chip->asic_code = val == 0 ? 1 : 0;
if (chip->asic_code) {
retval = rtsx_read_phy_register(chip, 0x1C, &reg);
@@ -553,10 +549,7 @@ static int rts5208_init(struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "Value of phy register 0x1C is 0x%x\n",
reg);
chip->ic_version = (reg >> 4) & 0x07;
- if (reg & PHY_DEBUG_MODE)
- chip->phy_debug_mode = 1;
- else
- chip->phy_debug_mode = 0;
+ chip->phy_debug_mode = reg & PHY_DEBUG_MODE ? 1 : 0;
} else {
RTSX_READ_REG(chip, 0xFE80, &val);
@@ -566,16 +559,10 @@ static int rts5208_init(struct rtsx_chip *chip)
RTSX_READ_REG(chip, PDINFO, &val);
dev_dbg(rtsx_dev(chip), "PDINFO: 0x%x\n", val);
- if (val & AUX_PWR_DETECTED)
- chip->aux_pwr_exist = 1;
- else
- chip->aux_pwr_exist = 0;
+ chip->aux_pwr_exist = val & AUX_PWR_DETECTED ? 1 : 0;
RTSX_READ_REG(chip, 0xFE50, &val);
- if (val & 0x01)
- chip->hw_bypass_sd = 1;
- else
- chip->hw_bypass_sd = 0;
+ chip->hw_bypass_sd = val & 0x01 ? 1 : 0;
rtsx_read_config_byte(chip, 0x0E, &val);
if (val & 0x80)
@@ -585,10 +572,7 @@ static int rts5208_init(struct rtsx_chip *chip)
if (chip->use_hw_setting) {
RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
- if (val & 0x80)
- chip->auto_delink_en = 1;
- else
- chip->auto_delink_en = 0;
+ chip->auto_delink_en = val & 0x80 ? 1 : 0;
}
return STATUS_SUCCESS;
@@ -602,33 +586,21 @@ static int rts5288_init(struct rtsx_chip *chip)
RTSX_WRITE_REG(chip, CLK_SEL, 0x03, 0x03);
RTSX_READ_REG(chip, CLK_SEL, &val);
- if (val == 0)
- chip->asic_code = 1;
- else
- chip->asic_code = 0;
+ chip->asic_code = val == 0 ? 1 : 0;
chip->ic_version = 0;
chip->phy_debug_mode = 0;
RTSX_READ_REG(chip, PDINFO, &val);
dev_dbg(rtsx_dev(chip), "PDINFO: 0x%x\n", val);
- if (val & AUX_PWR_DETECTED)
- chip->aux_pwr_exist = 1;
- else
- chip->aux_pwr_exist = 0;
+ chip->aux_pwr_exist = val & AUX_PWR_DETECTED ? 1 : 0;
RTSX_READ_REG(chip, CARD_SHARE_MODE, &val);
dev_dbg(rtsx_dev(chip), "CARD_SHARE_MODE: 0x%x\n", val);
- if (val & 0x04)
- chip->baro_pkg = QFN;
- else
- chip->baro_pkg = LQFP;
+ chip->baro_pkg = val & 0x04 ? QFN : LQFP;
RTSX_READ_REG(chip, 0xFE5A, &val);
- if (val & 0x10)
- chip->hw_bypass_sd = 1;
- else
- chip->hw_bypass_sd = 0;
+ chip->hw_bypass_sd = val & 0x10 ? 1 : 0;
retval = rtsx_read_cfg_dw(chip, 0, 0x718, &lval);
if (retval != STATUS_SUCCESS)
@@ -643,16 +615,12 @@ static int rts5288_init(struct rtsx_chip *chip)
if (chip->use_hw_setting) {
RTSX_READ_REG(chip, CHANGE_LINK_STATE, &val);
- if (val & 0x80)
- chip->auto_delink_en = 1;
- else
- chip->auto_delink_en = 0;
+ chip->auto_delink_en = val & 0x80 ? 1 : 0;
if (CHECK_BARO_PKG(chip, LQFP))
chip->lun_mode = SD_MS_1LUN;
else
chip->lun_mode = DEFAULT_SINGLE;
-
}
return STATUS_SUCCESS;
@@ -660,9 +628,9 @@ static int rts5288_init(struct rtsx_chip *chip)
int rtsx_init_chip(struct rtsx_chip *chip)
{
- struct sd_info *sd_card = &(chip->sd_card);
- struct xd_info *xd_card = &(chip->xd_card);
- struct ms_info *ms_card = &(chip->ms_card);
+ struct sd_info *sd_card = &chip->sd_card;
+ struct xd_info *xd_card = &chip->xd_card;
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
unsigned int i;
@@ -740,7 +708,6 @@ int rtsx_init_chip(struct rtsx_chip *chip)
retval = rts5288_init(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
-
}
if (chip->ss_en == 2)
@@ -842,7 +809,6 @@ static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
} else {
if (reg0 & 0x03)
maybe_support_aspm = 1;
-
}
if (reg_changed) {
@@ -859,15 +825,15 @@ static void rtsx_monitor_aspm_config(struct rtsx_chip *chip)
chip->sdio_aspm = 0;
}
rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFF,
- 0x30 | chip->aspm_level[0] |
- (chip->aspm_level[1] << 2));
+ 0x30 | chip->aspm_level[0] |
+ (chip->aspm_level[1] << 2));
}
}
void rtsx_polling_func(struct rtsx_chip *chip)
{
#ifdef SUPPORT_SD_LOCK
- struct sd_info *sd_card = &(chip->sd_card);
+ struct sd_info *sd_card = &chip->sd_card;
#endif
int ss_allowed;
@@ -875,7 +841,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
return;
if (rtsx_chk_stat(chip, RTSX_STAT_DELINK))
- goto Delink_Stage;
+ goto delink_stage;
if (chip->polling_config) {
u8 val;
@@ -888,7 +854,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
#ifdef SUPPORT_OCP
if (chip->ocp_int) {
- rtsx_read_register(chip, OCPSTAT, &(chip->ocp_stat));
+ rtsx_read_register(chip, OCPSTAT, &chip->ocp_stat);
if (chip->card_exist & SD_CARD)
sd_power_off_card3v3(chip);
@@ -932,7 +898,6 @@ void rtsx_polling_func(struct rtsx_chip *chip)
rtsx_read_cfg_dw(chip, 1, 0x04, &val);
if (val & 0x07)
ss_allowed = 0;
-
}
}
} else {
@@ -958,7 +923,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
#ifdef SUPPORT_SDIO_ASPM
if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) &&
- chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
+ chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
if (chip->sd_io) {
dynamic_configure_sdio_aspm(chip);
} else {
@@ -966,7 +931,8 @@ void rtsx_polling_func(struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n");
rtsx_write_register(chip,
ASPM_FORCE_CTL, 0xFC,
- 0x30 | (chip->aspm_level[1] << 2));
+ 0x30 |
+ (chip->aspm_level[1] << 2));
chip->sdio_aspm = 1;
}
}
@@ -988,9 +954,10 @@ void rtsx_polling_func(struct rtsx_chip *chip)
turn_off_led(chip, LED_GPIO);
- if (chip->auto_power_down && !chip->card_ready && !chip->sd_io)
- rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
-
+ if (chip->auto_power_down && !chip->card_ready &&
+ !chip->sd_io)
+ rtsx_force_power_down(chip,
+ SSC_PDCTL | OC_PDCTL);
}
}
@@ -1013,7 +980,6 @@ void rtsx_polling_func(struct rtsx_chip *chip)
break;
}
-
#ifdef SUPPORT_OCP
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
if (chip->ocp_stat &
@@ -1024,7 +990,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
if (chip->card_exist & SD_CARD) {
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
- 0);
+ 0);
card_power_off(chip, SD_CARD);
chip->card_fail |= SD_CARD;
}
@@ -1032,7 +998,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
if (chip->ocp_stat & (MS_OC_NOW | MS_OC_EVER)) {
if (chip->card_exist & MS_CARD) {
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
- 0);
+ 0);
card_power_off(chip, MS_CARD);
chip->card_fail |= MS_CARD;
}
@@ -1043,15 +1009,15 @@ void rtsx_polling_func(struct rtsx_chip *chip)
chip->ocp_stat);
if (chip->card_exist & SD_CARD) {
rtsx_write_register(chip, CARD_OE, SD_OUTPUT_EN,
- 0);
+ 0);
chip->card_fail |= SD_CARD;
} else if (chip->card_exist & MS_CARD) {
rtsx_write_register(chip, CARD_OE, MS_OUTPUT_EN,
- 0);
+ 0);
chip->card_fail |= MS_CARD;
} else if (chip->card_exist & XD_CARD) {
rtsx_write_register(chip, CARD_OE, XD_OUTPUT_EN,
- 0);
+ 0);
chip->card_fail |= XD_CARD;
}
card_power_off(chip, SD_CARD);
@@ -1059,9 +1025,9 @@ void rtsx_polling_func(struct rtsx_chip *chip)
}
#endif
-Delink_Stage:
+delink_stage:
if (chip->auto_delink_en && chip->auto_delink_allowed &&
- !chip->card_ready && !chip->card_ejected && !chip->sd_io) {
+ !chip->card_ready && !chip->card_ejected && !chip->sd_io) {
int enter_L1 = chip->auto_delink_in_L1 && (
chip->aspm_l0s_l1_en || chip->ss_en);
int delink_stage1_cnt = chip->delink_stage1_step;
@@ -1081,27 +1047,33 @@ Delink_Stage:
dev_dbg(rtsx_dev(chip), "False card inserted, do force delink\n");
if (enter_L1)
- rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
+ rtsx_write_register(chip,
+ HOST_SLEEP_STATE,
+ 0x03, 1);
rtsx_write_register(chip,
- CHANGE_LINK_STATE, 0x0A,
- 0x0A);
+ CHANGE_LINK_STATE,
+ 0x0A, 0x0A);
if (enter_L1)
rtsx_enter_L1(chip);
- chip->auto_delink_cnt = delink_stage3_cnt + 1;
+ chip->auto_delink_cnt =
+ delink_stage3_cnt + 1;
} else {
dev_dbg(rtsx_dev(chip), "No card inserted, do delink\n");
if (enter_L1)
- rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03, 1);
+ rtsx_write_register(chip,
+ HOST_SLEEP_STATE,
+ 0x03, 1);
- rtsx_write_register(chip, CHANGE_LINK_STATE, 0x02, 0x02);
+ rtsx_write_register(chip,
+ CHANGE_LINK_STATE,
+ 0x02, 0x02);
if (enter_L1)
rtsx_enter_L1(chip);
-
}
}
@@ -1115,7 +1087,7 @@ Delink_Stage:
rtsx_set_phy_reg_bit(chip, 0x1C, 2);
rtsx_write_register(chip, CHANGE_LINK_STATE,
- 0x0A, 0x0A);
+ 0x0A, 0x0A);
}
chip->auto_delink_cnt++;
@@ -1219,7 +1191,7 @@ int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data)
}
int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
- u32 val)
+ u32 val)
{
u8 mode = 0, tmp;
int i;
@@ -1279,7 +1251,7 @@ int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val)
}
int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
- int len)
+ int len)
{
u32 *data, *mask;
u16 offset = addr % 4;
@@ -1324,7 +1296,7 @@ int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
for (i = 0; i < dw_len; i++) {
retval = rtsx_write_cfg_dw(chip, func, aligned_addr + i * 4,
- mask[i], data[i]);
+ mask[i], data[i]);
if (retval != STATUS_SUCCESS) {
vfree(data);
vfree(mask);
@@ -1339,7 +1311,7 @@ int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
}
int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
- int len)
+ int len)
{
u32 *data;
u16 offset = addr % 4;
@@ -1360,7 +1332,7 @@ int rtsx_read_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf,
for (i = 0; i < dw_len; i++) {
retval = rtsx_read_cfg_dw(chip, func, aligned_addr + i * 4,
- data + i);
+ data + i);
if (retval != STATUS_SUCCESS) {
vfree(data);
TRACE_RET(chip, STATUS_FAIL);
@@ -1522,7 +1494,7 @@ int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
- if (0 == (value & (1 << bit))) {
+ if ((value & (1 << bit)) == 0) {
value |= (1 << bit);
retval = rtsx_write_phy_register(chip, reg, value);
if (retval != STATUS_SUCCESS)
@@ -1595,12 +1567,9 @@ void rtsx_enter_ss(struct rtsx_chip *chip)
rtsx_force_power_down(chip, SSC_PDCTL | OC_PDCTL);
}
- if (CHK_SDIO_EXIST(chip)) {
- if (CHECK_PID(chip, 0x5288))
- rtsx_write_cfg_dw(chip, 2, 0xC0, 0xFF00, 0x0100);
- else
- rtsx_write_cfg_dw(chip, 1, 0xC0, 0xFF00, 0x0100);
- }
+ if (CHK_SDIO_EXIST(chip))
+ rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
+ 0xC0, 0xFF00, 0x0100);
if (chip->auto_delink_en) {
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x01, 0x01);
@@ -1666,7 +1635,7 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (((chip->int_reg & int_enable) == 0) ||
- (chip->int_reg == 0xFFFFFFFF))
+ (chip->int_reg == 0xFFFFFFFF))
return STATUS_FAIL;
status = chip->int_reg &= (int_enable | 0x7FFFFF);
@@ -1676,12 +1645,12 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
if (status & SD_INT) {
if (status & SD_EXIST) {
- set_bit(SD_NR, &(chip->need_reset));
+ set_bit(SD_NR, &chip->need_reset);
} else {
- set_bit(SD_NR, &(chip->need_release));
+ set_bit(SD_NR, &chip->need_release);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
- clear_bit(SD_NR, &(chip->need_reset));
+ clear_bit(SD_NR, &chip->need_reset);
}
} else {
/* If multi-luns, it's possible that
@@ -1691,35 +1660,35 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
all existed cards should be reset.
*/
if (exit_ss && (status & SD_EXIST))
- set_bit(SD_NR, &(chip->need_reinit));
+ set_bit(SD_NR, &chip->need_reinit);
}
if (!CHECK_PID(chip, 0x5288) || CHECK_BARO_PKG(chip, QFN)) {
if (status & XD_INT) {
if (status & XD_EXIST) {
- set_bit(XD_NR, &(chip->need_reset));
+ set_bit(XD_NR, &chip->need_reset);
} else {
- set_bit(XD_NR, &(chip->need_release));
+ set_bit(XD_NR, &chip->need_release);
chip->xd_reset_counter = 0;
chip->xd_show_cnt = 0;
- clear_bit(XD_NR, &(chip->need_reset));
+ clear_bit(XD_NR, &chip->need_reset);
}
} else {
if (exit_ss && (status & XD_EXIST))
- set_bit(XD_NR, &(chip->need_reinit));
+ set_bit(XD_NR, &chip->need_reinit);
}
}
if (status & MS_INT) {
if (status & MS_EXIST) {
- set_bit(MS_NR, &(chip->need_reset));
+ set_bit(MS_NR, &chip->need_reset);
} else {
- set_bit(MS_NR, &(chip->need_release));
+ set_bit(MS_NR, &chip->need_release);
chip->ms_reset_counter = 0;
chip->ms_show_cnt = 0;
- clear_bit(MS_NR, &(chip->need_reset));
+ clear_bit(MS_NR, &chip->need_reset);
}
} else {
if (exit_ss && (status & MS_EXIST))
- set_bit(MS_NR, &(chip->need_reinit));
+ set_bit(MS_NR, &chip->need_reinit);
}
}
@@ -1727,10 +1696,8 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
chip->ocp_int = ocp_int & status;
#endif
- if (chip->sd_io) {
- if (chip->int_reg & DATA_DONE_INT)
- chip->int_reg &= ~(u32)DATA_DONE_INT;
- }
+ if (chip->sd_io && (chip->int_reg & DATA_DONE_INT))
+ chip->int_reg &= ~(u32)DATA_DONE_INT;
return STATUS_SUCCESS;
}
@@ -1774,14 +1741,14 @@ void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
if (pm_stat == PM_S1) {
dev_dbg(rtsx_dev(chip), "Host enter S1\n");
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
- HOST_ENTER_S1);
+ HOST_ENTER_S1);
} else if (pm_stat == PM_S3) {
if (chip->s3_pwr_off_delay > 0)
wait_timeout(chip->s3_pwr_off_delay);
dev_dbg(rtsx_dev(chip), "Host enter S3\n");
rtsx_write_register(chip, HOST_SLEEP_STATE, 0x03,
- HOST_ENTER_S3);
+ HOST_ENTER_S3);
}
if (chip->do_delink_before_power_down && chip->auto_delink_en)
@@ -1796,31 +1763,25 @@ void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
void rtsx_enable_aspm(struct rtsx_chip *chip)
{
- if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
- if (!chip->aspm_enabled) {
- dev_dbg(rtsx_dev(chip), "Try to enable ASPM\n");
- chip->aspm_enabled = 1;
+ if (chip->aspm_l0s_l1_en && chip->dynamic_aspm && !chip->aspm_enabled) {
+ dev_dbg(rtsx_dev(chip), "Try to enable ASPM\n");
+ chip->aspm_enabled = 1;
- if (chip->asic_code && CHECK_PID(chip, 0x5208))
- rtsx_write_phy_register(chip, 0x07, 0);
- if (CHECK_PID(chip, 0x5208)) {
- rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3,
- 0x30 | chip->aspm_level[0]);
- } else {
- rtsx_write_config_byte(chip, LCTLR,
- chip->aspm_l0s_l1_en);
- }
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_write_phy_register(chip, 0x07, 0);
+ if (CHECK_PID(chip, 0x5208)) {
+ rtsx_write_register(chip, ASPM_FORCE_CTL, 0xF3,
+ 0x30 | chip->aspm_level[0]);
+ } else {
+ rtsx_write_config_byte(chip, LCTLR,
+ chip->aspm_l0s_l1_en);
+ }
- if (CHK_SDIO_EXIST(chip)) {
- u16 val = chip->aspm_l0s_l1_en | 0x0100;
+ if (CHK_SDIO_EXIST(chip)) {
+ u16 val = chip->aspm_l0s_l1_en | 0x0100;
- if (CHECK_PID(chip, 0x5288))
- rtsx_write_cfg_dw(chip, 2, 0xC0,
- 0xFFFF, val);
- else
- rtsx_write_cfg_dw(chip, 1, 0xC0,
- 0xFFFF, val);
- }
+ rtsx_write_cfg_dw(chip, CHECK_PID(chip, 0x5288) ? 2 : 1,
+ 0xC0, 0xFFF, val);
}
}
}
@@ -1830,21 +1791,19 @@ void rtsx_disable_aspm(struct rtsx_chip *chip)
if (CHECK_PID(chip, 0x5208))
rtsx_monitor_aspm_config(chip);
- if (chip->aspm_l0s_l1_en && chip->dynamic_aspm) {
- if (chip->aspm_enabled) {
- dev_dbg(rtsx_dev(chip), "Try to disable ASPM\n");
- chip->aspm_enabled = 0;
+ if (chip->aspm_l0s_l1_en && chip->dynamic_aspm && chip->aspm_enabled) {
+ dev_dbg(rtsx_dev(chip), "Try to disable ASPM\n");
+ chip->aspm_enabled = 0;
- if (chip->asic_code && CHECK_PID(chip, 0x5208))
- rtsx_write_phy_register(chip, 0x07, 0x0129);
- if (CHECK_PID(chip, 0x5208))
- rtsx_write_register(chip, ASPM_FORCE_CTL,
- 0xF3, 0x30);
- else
- rtsx_write_config_byte(chip, LCTLR, 0x00);
+ if (chip->asic_code && CHECK_PID(chip, 0x5208))
+ rtsx_write_phy_register(chip, 0x07, 0x0129);
+ if (CHECK_PID(chip, 0x5208))
+ rtsx_write_register(chip, ASPM_FORCE_CTL,
+ 0xF3, 0x30);
+ else
+ rtsx_write_config_byte(chip, LCTLR, 0x00);
- wait_timeout(1);
- }
+ wait_timeout(1);
}
}
@@ -1907,7 +1866,7 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
for (j = 0; j < 256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
- *ptr);
+ *ptr);
ptr++;
}
@@ -1921,7 +1880,7 @@ int rtsx_write_ppbuf(struct rtsx_chip *chip, u8 *buf, int buf_len)
for (j = 0; j < buf_len%256; j++) {
rtsx_add_cmd(chip, WRITE_REG_CMD, reg_addr++, 0xFF,
- *ptr);
+ *ptr);
ptr++;
}
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index bbbf7968a0b6..11610826acf1 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -584,9 +584,8 @@ static int start_stop_unit(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case MAKE_MEDIUM_READY:
case LOAD_MEDIUM:
- if (check_card_ready(chip, lun)) {
+ if (check_card_ready(chip, lun))
return TRANSPORT_GOOD;
- }
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 0a67dca72dff..756a9687c293 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -539,7 +539,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
else
- sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
+ sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
chip->sgi = 0;
for (j = 0; j < sg_cnt; j++) {
@@ -728,15 +728,13 @@ int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
- if (use_sg) {
+ if (use_sg)
err = rtsx_transfer_sglist_adma_partial(chip, card,
(struct scatterlist *)buf, use_sg,
index, offset, (int)len, dma_dir, timeout);
- } else {
+ else
err = rtsx_transfer_buf(chip, card,
buf, len, dma_dir, timeout);
- }
-
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
index b4b112372776..899bc2079dbe 100644
--- a/drivers/staging/rts5208/rtsx_transport.h
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -46,7 +46,7 @@ void rtsx_add_cmd(struct rtsx_chip *chip,
void rtsx_send_cmd_no_wait(struct rtsx_chip *chip);
int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout);
-extern inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
+static inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
{
#ifdef CMD_USING_SG
return (u8 *)(chip->host_sg_tbl_ptr);
diff --git a/drivers/staging/skein/Kconfig b/drivers/staging/skein/Kconfig
index b9172bfcdc1b..012a8233376e 100644
--- a/drivers/staging/skein/Kconfig
+++ b/drivers/staging/skein/Kconfig
@@ -1,8 +1,8 @@
config CRYPTO_SKEIN
- bool "Skein digest algorithm"
+ tristate "Skein digest algorithm"
depends on (X86 || UML_X86) && 64BIT && CRYPTO
- select CRYPTO_THREEFISH
select CRYPTO_HASH
+ select CRYPTO_ALGAPI
help
Skein secure hash algorithm is one of 5 finalists from the NIST SHA3
competition.
@@ -12,21 +12,5 @@ config CRYPTO_SKEIN
http://www.skein-hash.info/sites/default/files/skein1.3.pdf
- for more information. This module depends on the threefish block
- cipher module.
-
-config CRYPTO_THREEFISH
- bool "Threefish tweakable block cipher"
- depends on (X86 || UML_X86) && 64BIT && CRYPTO
- select CRYPTO_ALGAPI
- help
- Threefish cipher algorithm is the tweakable block cipher underneath
- the Skein family of secure hash algorithms. Skein is one of 5
- finalists from the NIST SHA3 competition.
-
- Skein is optimized for modern, 64bit processors and is highly
- customizable. See:
-
- http://www.skein-hash.info/sites/default/files/skein1.3.pdf
-
- for more information.
+ for more information. This module also contains the threefish block
+ cipher algorithm.
diff --git a/drivers/staging/skein/Makefile b/drivers/staging/skein/Makefile
index a14aaddd829c..b7f947fb98f0 100644
--- a/drivers/staging/skein/Makefile
+++ b/drivers/staging/skein/Makefile
@@ -1,9 +1,10 @@
#
# Makefile for the skein secure hash algorithm
#
-obj-$(CONFIG_CRYPTO_SKEIN) += skein.o \
- skein_api.o \
- skein_block.o
-
-obj-$(CONFIG_CRYPTO_THREEFISH) += threefish_block.o \
- threefish_api.o
+obj-$(CONFIG_CRYPTO_SKEIN) += skein.o
+skein-y := skein_base.o \
+ skein_api.o \
+ skein_block.o \
+ threefish_block.o \
+ threefish_api.o \
+ skein_generic.o
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index 6e700eefc00c..5bfce076f7c8 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -31,7 +31,7 @@ int skein_ctx_prepare(struct skein_ctx *ctx, enum skein_size size)
{
skein_assert_ret(ctx && size, SKEIN_FAIL);
- memset(ctx , 0, sizeof(struct skein_ctx));
+ memset(ctx, 0, sizeof(struct skein_ctx));
ctx->skein_size = size;
return SKEIN_SUCCESS;
diff --git a/drivers/staging/skein/skein_api.h b/drivers/staging/skein/skein_api.h
index e02fa19d9458..171b87549548 100644
--- a/drivers/staging/skein/skein_api.h
+++ b/drivers/staging/skein/skein_api.h
@@ -79,7 +79,7 @@ OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include "skein.h"
+#include "skein_base.h"
/**
* Which Skein size to use
diff --git a/drivers/staging/skein/skein.c b/drivers/staging/skein/skein_base.c
index 8cc83587b1f1..7e700a6b5788 100644
--- a/drivers/staging/skein/skein.c
+++ b/drivers/staging/skein/skein_base.c
@@ -8,10 +8,9 @@
**
************************************************************************/
-#define SKEIN_PORT_CODE /* instantiate any code in skein_port.h */
-
#include <linux/string.h> /* get the memcpy/memset functions */
-#include "skein.h" /* get the Skein API definitions */
+#include <linux/export.h>
+#include "skein_base.h" /* get the Skein API definitions */
#include "skein_iv.h" /* get precomputed IVs */
#include "skein_block.h"
@@ -125,8 +124,6 @@ int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len,
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
cfg.w[2] = skein_swap64(tree_info);
- skein_show_key(256, &ctx->h, key, key_bytes);
-
/* compute the initial chaining values from config block */
skein_256_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
@@ -233,8 +230,6 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(256, &ctx->h, n,
- hash_val+i*SKEIN_256_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -354,8 +349,6 @@ int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len,
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
cfg.w[2] = skein_swap64(tree_info);
- skein_show_key(512, &ctx->h, key, key_bytes);
-
/* compute the initial chaining values from config block */
skein_512_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
@@ -462,8 +455,6 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(512, &ctx->h, n,
- hash_val+i*SKEIN_512_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -578,8 +569,6 @@ int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len,
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
cfg.w[2] = skein_swap64(tree_info);
- skein_show_key(1024, &ctx->h, key, key_bytes);
-
/* compute the initial chaining values from config block */
skein_1024_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
@@ -686,8 +675,6 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(1024, &ctx->h, n,
- hash_val+i*SKEIN_1024_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -795,8 +782,6 @@ int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(256, &ctx->h, n,
- hash_val+i*SKEIN_256_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -834,8 +819,6 @@ int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(256, &ctx->h, n,
- hash_val+i*SKEIN_512_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -873,8 +856,6 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val)
/* "output" the ctr mode bytes */
skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
n);
- skein_show_final(256, &ctx->h, n,
- hash_val+i*SKEIN_1024_BLOCK_BYTES);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
diff --git a/drivers/staging/skein/skein.h b/drivers/staging/skein/skein_base.h
index e6669f196e5d..3c7f8ad3627d 100644
--- a/drivers/staging/skein/skein.h
+++ b/drivers/staging/skein/skein_base.h
@@ -15,10 +15,6 @@
**
** The "default" note explains what happens when the switch is not defined.
**
-** SKEIN_DEBUG -- make callouts from inside Skein code
-** to examine/display intermediate values.
-** [default: no callouts (no overhead)]
-**
** SKEIN_ERR_CHECK -- how error checking is handled inside Skein
** code. If not defined, most error checking
** is disabled (for performance). Otherwise,
@@ -28,9 +24,10 @@
**
***************************************************************************/
-#ifndef rotl_64
-#define rotl_64(x, N) (((x) << (N)) | ((x) >> (64-(N))))
-#endif
+/*Skein digest sizes for crypto api*/
+#define SKEIN256_DIGEST_BIT_SIZE 256
+#define SKEIN512_DIGEST_BIT_SIZE 512
+#define SKEIN1024_DIGEST_BIT_SIZE 1024
/* below two prototype assume we are handed aligned data */
#define skein_put64_lsb_first(dst08, src64, b_cnt) memcpy(dst08, src64, b_cnt)
@@ -44,12 +41,12 @@ enum {
SKEIN_BAD_HASHLEN = 2
};
-#define SKEIN_MODIFIER_WORDS (2) /* number of modifier (tweak) words */
+#define SKEIN_MODIFIER_WORDS 2 /* number of modifier (tweak) words */
-#define SKEIN_256_STATE_WORDS (4)
-#define SKEIN_512_STATE_WORDS (8)
-#define SKEIN_1024_STATE_WORDS (16)
-#define SKEIN_MAX_STATE_WORDS (16)
+#define SKEIN_256_STATE_WORDS 4
+#define SKEIN_512_STATE_WORDS 8
+#define SKEIN_1024_STATE_WORDS 16
+#define SKEIN_MAX_STATE_WORDS 16
#define SKEIN_256_STATE_BYTES (8*SKEIN_256_STATE_WORDS)
#define SKEIN_512_STATE_BYTES (8*SKEIN_512_STATE_WORDS)
@@ -87,6 +84,11 @@ struct skein_1024_ctx { /* 1024-bit Skein hash context structure */
u8 b[SKEIN_1024_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
};
+static inline u64 rotl_64(u64 x, u8 N)
+{
+ return (x << N) | (x >> (64 - N));
+}
+
/* Skein APIs for (incremental) "straight hashing" */
int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len);
int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len);
@@ -273,19 +275,6 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
(hdr).tweak[1] |= SKEIN_T1_TREE_LEVEL(height); \
}
-/*****************************************************************
-** "Internal" Skein definitions for debugging and error checking
-******************************************************************/
-#ifdef SKEIN_DEBUG /* examine/display intermediate values? */
-#include "skein_debug.h"
-#else /* default is no callouts */
-#define skein_show_block(bits, ctx, x, blk_ptr, w_ptr, ks_event_ptr, ks_odd_ptr)
-#define skein_show_round(bits, ctx, r, x)
-#define skein_show_r_ptr(bits, ctx, r, x_ptr)
-#define skein_show_final(bits, ctx, cnt, out_ptr)
-#define skein_show_key(bits, ctx, key, key_bytes)
-#endif
-
/* ignore all asserts, for performance */
#define skein_assert_ret(x, ret_code)
#define skein_assert(x)
diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c
index 616364faf92e..66261ab25c88 100644
--- a/drivers/staging/skein/skein_block.c
+++ b/drivers/staging/skein/skein_block.c
@@ -15,7 +15,7 @@
************************************************************************/
#include <linux/string.h>
-#include "skein.h"
+#include "skein_base.h"
#include "skein_block.h"
#ifndef SKEIN_USE_ASM
@@ -26,32 +26,27 @@
#define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */
#endif
-#define BLK_BITS (WCNT*64) /* some useful definitions for code here */
+#define BLK_BITS (WCNT * 64) /* some useful definitions for code here */
#define KW_TWK_BASE (0)
#define KW_KEY_BASE (3)
#define ks (kw + KW_KEY_BASE)
#define ts (kw + KW_TWK_BASE)
#ifdef SKEIN_DEBUG
-#define debug_save_tweak(ctx) { \
- ctx->h.tweak[0] = ts[0]; ctx->h.tweak[1] = ts[1]; }
+#define debug_save_tweak(ctx) \
+{ \
+ ctx->h.tweak[0] = ts[0]; \
+ ctx->h.tweak[1] = ts[1]; \
+}
#else
#define debug_save_tweak(ctx)
#endif
-/***************************** SKEIN_256 ******************************/
#if !(SKEIN_USE_ASM & 256)
-void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
- size_t blk_cnt, size_t byte_cnt_add)
- { /* do it in C */
- enum {
- WCNT = SKEIN_256_STATE_WORDS
- };
#undef RCNT
-#define RCNT (SKEIN_256_ROUNDS_TOTAL/8)
-
+#define RCNT (SKEIN_256_ROUNDS_TOTAL / 8)
#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_256 (((SKEIN_LOOP)/100)%10)
+#define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10)
#else
#define SKEIN_UNROLL_256 (0)
#endif
@@ -60,17 +55,329 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
#if (RCNT % SKEIN_UNROLL_256)
#error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */
#endif
- size_t r;
- u64 kw[WCNT+4+RCNT*2]; /* key schedule: chaining vars + tweak + "rot"*/
+#endif
+#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
+do { \
+ X##p0 += X##p1; \
+ X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 ^= X##p0; \
+ X##p2 += X##p3; \
+ X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 ^= X##p2; \
+} while (0)
+
+#if SKEIN_UNROLL_256 == 0
+#define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \
+do { \
+ ROUND256(p0, p1, p2, p3, ROT, r_num); \
+} while (0)
+
+#define I256(R) \
+do { \
+ /* inject the key schedule value */ \
+ X0 += ks[((R) + 1) % 5]; \
+ X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \
+ X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \
+ X3 += ks[((R) + 4) % 5] + (R) + 1; \
+} while (0)
#else
- u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
+/* looping version */
+#define R256(p0, p1, p2, p3, ROT, r_num) \
+do { \
+ ROUND256(p0, p1, p2, p3, ROT, r_num); \
+} while (0)
+
+#define I256(R) \
+do { \
+ /* inject the key schedule value */ \
+ X0 += ks[r + (R) + 0]; \
+ X1 += ks[r + (R) + 1] + ts[r + (R) + 0]; \
+ X2 += ks[r + (R) + 2] + ts[r + (R) + 1]; \
+ X3 += ks[r + (R) + 3] + r + (R); \
+ /* rotate key schedule */ \
+ ks[r + (R) + 4] = ks[r + (R) - 1]; \
+ ts[r + (R) + 2] = ts[r + (R) - 1]; \
+} while (0)
+#endif
+#define R256_8_ROUNDS(R) \
+do { \
+ R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \
+ R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \
+ R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \
+ R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \
+ I256(2 * (R)); \
+ R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \
+ R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \
+ R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \
+ R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \
+ I256(2 * (R) + 1); \
+} while (0)
+
+#define R256_UNROLL_R(NN) \
+ ((SKEIN_UNROLL_256 == 0 && \
+ SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \
+ (SKEIN_UNROLL_256 > (NN)))
+
+#if (SKEIN_UNROLL_256 > 14)
+#error "need more unrolling in skein_256_process_block"
+#endif
+#endif
+
+#if !(SKEIN_USE_ASM & 512)
+#undef RCNT
+#define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
+
+#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
+#define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
+#else
+#define SKEIN_UNROLL_512 (0)
+#endif
+
+#if SKEIN_UNROLL_512
+#if (RCNT % SKEIN_UNROLL_512)
+#error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */
+#endif
+#endif
+#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
+do { \
+ X##p0 += X##p1; \
+ X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 ^= X##p0; \
+ X##p2 += X##p3; \
+ X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 ^= X##p2; \
+ X##p4 += X##p5; \
+ X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 ^= X##p4; \
+ X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3); \
+ X##p7 ^= X##p6; \
+} while (0)
+
+#if SKEIN_UNROLL_512 == 0
+#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \
+do { \
+ ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num); \
+} while (0)
+
+#define I512(R) \
+do { \
+ /* inject the key schedule value */ \
+ X0 += ks[((R) + 1) % 9]; \
+ X1 += ks[((R) + 2) % 9]; \
+ X2 += ks[((R) + 3) % 9]; \
+ X3 += ks[((R) + 4) % 9]; \
+ X4 += ks[((R) + 5) % 9]; \
+ X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \
+ X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \
+ X7 += ks[((R) + 8) % 9] + (R) + 1; \
+} while (0)
+
+#else /* looping version */
+#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
+do { \
+ ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num); \
+} while (0)
+
+#define I512(R) \
+do { \
+ /* inject the key schedule value */ \
+ X0 += ks[r + (R) + 0]; \
+ X1 += ks[r + (R) + 1]; \
+ X2 += ks[r + (R) + 2]; \
+ X3 += ks[r + (R) + 3]; \
+ X4 += ks[r + (R) + 4]; \
+ X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \
+ X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \
+ X7 += ks[r + (R) + 7] + r + (R); \
+ /* rotate key schedule */ \
+ ks[r + (R) + 8] = ks[r + (R) - 1]; \
+ ts[r + (R) + 2] = ts[r + (R) - 1]; \
+} while (0)
+#endif /* end of looped code definitions */
+#define R512_8_ROUNDS(R) /* do 8 full rounds */ \
+do { \
+ R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \
+ R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \
+ R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \
+ R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \
+ I512(2 * (R)); \
+ R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \
+ R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \
+ R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \
+ R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \
+ I512(2 * (R) + 1); /* and key injection */ \
+} while (0)
+#define R512_UNROLL_R(NN) \
+ ((SKEIN_UNROLL_512 == 0 && \
+ SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
+ (SKEIN_UNROLL_512 > (NN)))
+
+#if (SKEIN_UNROLL_512 > 14)
+#error "need more unrolling in skein_512_process_block"
+#endif
+#endif
+
+#if !(SKEIN_USE_ASM & 1024)
+#undef RCNT
+#define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
+#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
+#define SKEIN_UNROLL_1024 ((SKEIN_LOOP)%10)
+#else
+#define SKEIN_UNROLL_1024 (0)
+#endif
+
+#if (SKEIN_UNROLL_1024 != 0)
+#if (RCNT % SKEIN_UNROLL_1024)
+#error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */
+#endif
+#endif
+#define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
+ pF, ROT, r_num) \
+do { \
+ X##p0 += X##p1; \
+ X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 ^= X##p0; \
+ X##p2 += X##p3; \
+ X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 ^= X##p2; \
+ X##p4 += X##p5; \
+ X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 ^= X##p4; \
+ X##p6 += X##p7; \
+ X##p7 = rotl_64(X##p7, ROT##_3); \
+ X##p7 ^= X##p6; \
+ X##p8 += X##p9; \
+ X##p9 = rotl_64(X##p9, ROT##_4); \
+ X##p9 ^= X##p8; \
+ X##pA += X##pB; \
+ X##pB = rotl_64(X##pB, ROT##_5); \
+ X##pB ^= X##pA; \
+ X##pC += X##pD; \
+ X##pD = rotl_64(X##pD, ROT##_6); \
+ X##pD ^= X##pC; \
+ X##pE += X##pF; \
+ X##pF = rotl_64(X##pF, ROT##_7); \
+ X##pF ^= X##pE; \
+} while (0)
+
+#if SKEIN_UNROLL_1024 == 0
+#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
+ ROT, rn) \
+do { \
+ ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
+ pF, ROT, rn); \
+} while (0)
+
+#define I1024(R) \
+do { \
+ /* inject the key schedule value */ \
+ X00 += ks[((R) + 1) % 17]; \
+ X01 += ks[((R) + 2) % 17]; \
+ X02 += ks[((R) + 3) % 17]; \
+ X03 += ks[((R) + 4) % 17]; \
+ X04 += ks[((R) + 5) % 17]; \
+ X05 += ks[((R) + 6) % 17]; \
+ X06 += ks[((R) + 7) % 17]; \
+ X07 += ks[((R) + 8) % 17]; \
+ X08 += ks[((R) + 9) % 17]; \
+ X09 += ks[((R) + 10) % 17]; \
+ X10 += ks[((R) + 11) % 17]; \
+ X11 += ks[((R) + 12) % 17]; \
+ X12 += ks[((R) + 13) % 17]; \
+ X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \
+ X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \
+ X15 += ks[((R) + 16) % 17] + (R) + 1; \
+} while (0)
+#else /* looping version */
+#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
+ ROT, rn) \
+do { \
+ ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
+ pF, ROT, rn); \
+} while (0)
+
+#define I1024(R) \
+do { \
+ /* inject the key schedule value */ \
+ X00 += ks[r + (R) + 0]; \
+ X01 += ks[r + (R) + 1]; \
+ X02 += ks[r + (R) + 2]; \
+ X03 += ks[r + (R) + 3]; \
+ X04 += ks[r + (R) + 4]; \
+ X05 += ks[r + (R) + 5]; \
+ X06 += ks[r + (R) + 6]; \
+ X07 += ks[r + (R) + 7]; \
+ X08 += ks[r + (R) + 8]; \
+ X09 += ks[r + (R) + 9]; \
+ X10 += ks[r + (R) + 10]; \
+ X11 += ks[r + (R) + 11]; \
+ X12 += ks[r + (R) + 12]; \
+ X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \
+ X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \
+ X15 += ks[r + (R) + 15] + r + (R); \
+ /* rotate key schedule */ \
+ ks[r + (R) + 16] = ks[r + (R) - 1]; \
+ ts[r + (R) + 2] = ts[r + (R) - 1]; \
+} while (0)
+
+#endif
+#define R1024_8_ROUNDS(R) \
+do { \
+ R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
+ R1024_0, 8*(R) + 1); \
+ R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
+ R1024_1, 8*(R) + 2); \
+ R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
+ R1024_2, 8*(R) + 3); \
+ R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
+ R1024_3, 8*(R) + 4); \
+ I1024(2*(R)); \
+ R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
+ R1024_4, 8*(R) + 5); \
+ R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
+ R1024_5, 8*(R) + 6); \
+ R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
+ R1024_6, 8*(R) + 7); \
+ R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
+ R1024_7, 8*(R) + 8); \
+ I1024(2*(R)+1); \
+} while (0)
+
+#define R1024_UNROLL_R(NN) \
+ ((SKEIN_UNROLL_1024 == 0 && \
+ SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
+ (SKEIN_UNROLL_1024 > (NN)))
+
+#if (SKEIN_UNROLL_1024 > 14)
+#error "need more unrolling in Skein_1024_Process_Block"
+#endif
+#endif
+
+/***************************** SKEIN_256 ******************************/
+#if !(SKEIN_USE_ASM & 256)
+void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
+ size_t blk_cnt, size_t byte_cnt_add)
+{ /* do it in C */
+ enum {
+ WCNT = SKEIN_256_STATE_WORDS
+ };
+ size_t r;
+#if SKEIN_UNROLL_256
+ /* key schedule: chaining vars + tweak + "rot"*/
+ u64 kw[WCNT+4+RCNT*2];
+#else
+ /* key schedule words : chaining vars + tweak */
+ u64 kw[WCNT+4];
#endif
u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
u64 w[WCNT]; /* local copy of input block */
#ifdef SKEIN_DEBUG
const u64 *X_ptr[4]; /* use for debugging (help cc put Xn in regs) */
- X_ptr[0] = &X0; X_ptr[1] = &X1; X_ptr[2] = &X2; X_ptr[3] = &X3;
+ X_ptr[0] = &X0;
+ X_ptr[1] = &X1;
+ X_ptr[2] = &X2;
+ X_ptr[3] = &X3;
#endif
skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
ts[0] = ctx->h.tweak[0];
@@ -94,132 +401,62 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
/* get input block in little-endian format */
skein_get64_lsb_first(w, blk_ptr, WCNT);
debug_save_tweak(ctx);
- skein_show_block(BLK_BITS, &ctx->h, ctx->x, blk_ptr, w, ks, ts);
- X0 = w[0] + ks[0]; /* do the first full key injection */
+ /* do the first full key injection */
+ X0 = w[0] + ks[0];
X1 = w[1] + ks[1] + ts[0];
X2 = w[2] + ks[2] + ts[1];
X3 = w[3] + ks[3];
- /* show starting state values */
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL,
- x_ptr);
-
blk_ptr += SKEIN_256_BLOCK_BYTES;
/* run the rounds */
-
-#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
-do { \
- X##p0 += X##p1; X##p1 = rotl_64(X##p1, ROT##_0); X##p1 ^= X##p0; \
- X##p2 += X##p3; X##p3 = rotl_64(X##p3, ROT##_1); X##p3 ^= X##p2; \
-} while (0)
-
-#if SKEIN_UNROLL_256 == 0
-#define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \
-do { \
- ROUND256(p0, p1, p2, p3, ROT, r_num); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, r_num, X_ptr); \
-} while (0)
-
-#define I256(R) \
-do { \
- /* inject the key schedule value */ \
- X0 += ks[((R)+1) % 5]; \
- X1 += ks[((R)+2) % 5] + ts[((R)+1) % 3]; \
- X2 += ks[((R)+3) % 5] + ts[((R)+2) % 3]; \
- X3 += ks[((R)+4) % 5] + (R)+1; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-#else /* looping version */
-#define R256(p0, p1, p2, p3, ROT, r_num) \
-do { \
- ROUND256(p0, p1, p2, p3, ROT, r_num); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + r_num, X_ptr); \
-} while (0)
-
-#define I256(R) \
-do { \
- /* inject the key schedule value */ \
- X0 += ks[r+(R)+0]; \
- X1 += ks[r+(R)+1] + ts[r+(R)+0]; \
- X2 += ks[r+(R)+2] + ts[r+(R)+1]; \
- X3 += ks[r+(R)+3] + r+(R); \
- /* rotate key schedule */ \
- ks[r + (R) + 4] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-
- for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_256)
+ for (r = 1;
+ r < (SKEIN_UNROLL_256 ? 2 * RCNT : 2);
+ r += (SKEIN_UNROLL_256 ? 2 * SKEIN_UNROLL_256 : 1)) {
+ R256_8_ROUNDS(0);
+#if R256_UNROLL_R(1)
+ R256_8_ROUNDS(1);
+#endif
+#if R256_UNROLL_R(2)
+ R256_8_ROUNDS(2);
+#endif
+#if R256_UNROLL_R(3)
+ R256_8_ROUNDS(3);
+#endif
+#if R256_UNROLL_R(4)
+ R256_8_ROUNDS(4);
+#endif
+#if R256_UNROLL_R(5)
+ R256_8_ROUNDS(5);
+#endif
+#if R256_UNROLL_R(6)
+ R256_8_ROUNDS(6);
+#endif
+#if R256_UNROLL_R(7)
+ R256_8_ROUNDS(7);
+#endif
+#if R256_UNROLL_R(8)
+ R256_8_ROUNDS(8);
+#endif
+#if R256_UNROLL_R(9)
+ R256_8_ROUNDS(9);
+#endif
+#if R256_UNROLL_R(10)
+ R256_8_ROUNDS(10);
+#endif
+#if R256_UNROLL_R(11)
+ R256_8_ROUNDS(11);
+#endif
+#if R256_UNROLL_R(12)
+ R256_8_ROUNDS(12);
+#endif
+#if R256_UNROLL_R(13)
+ R256_8_ROUNDS(13);
+#endif
+#if R256_UNROLL_R(14)
+ R256_8_ROUNDS(14);
#endif
- {
-#define R256_8_ROUNDS(R) \
-do { \
- R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \
- R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \
- R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \
- R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \
- I256(2 * (R)); \
- R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \
- R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \
- R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \
- R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \
- I256(2 * (R) + 1); \
-} while (0)
-
- R256_8_ROUNDS(0);
-
-#define R256_UNROLL_R(NN) \
- ((SKEIN_UNROLL_256 == 0 && \
- SKEIN_256_ROUNDS_TOTAL/8 > (NN)) || \
- (SKEIN_UNROLL_256 > (NN)))
-
- #if R256_UNROLL_R(1)
- R256_8_ROUNDS(1);
- #endif
- #if R256_UNROLL_R(2)
- R256_8_ROUNDS(2);
- #endif
- #if R256_UNROLL_R(3)
- R256_8_ROUNDS(3);
- #endif
- #if R256_UNROLL_R(4)
- R256_8_ROUNDS(4);
- #endif
- #if R256_UNROLL_R(5)
- R256_8_ROUNDS(5);
- #endif
- #if R256_UNROLL_R(6)
- R256_8_ROUNDS(6);
- #endif
- #if R256_UNROLL_R(7)
- R256_8_ROUNDS(7);
- #endif
- #if R256_UNROLL_R(8)
- R256_8_ROUNDS(8);
- #endif
- #if R256_UNROLL_R(9)
- R256_8_ROUNDS(9);
- #endif
- #if R256_UNROLL_R(10)
- R256_8_ROUNDS(10);
- #endif
- #if R256_UNROLL_R(11)
- R256_8_ROUNDS(11);
- #endif
- #if R256_UNROLL_R(12)
- R256_8_ROUNDS(12);
- #endif
- #if R256_UNROLL_R(13)
- R256_8_ROUNDS(13);
- #endif
- #if R256_UNROLL_R(14)
- R256_8_ROUNDS(14);
- #endif
- #if (SKEIN_UNROLL_256 > 14)
-#error "need more unrolling in skein_256_process_block"
- #endif
}
/* do the final "feedforward" xor, update context chaining */
ctx->x[0] = X0 ^ w[0];
@@ -227,8 +464,6 @@ do { \
ctx->x[2] = X2 ^ w[2];
ctx->x[3] = X3 ^ w[3];
- skein_show_round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->x);
-
ts[1] &= ~SKEIN_T1_FLAG_FIRST;
} while (--blk_cnt);
ctx->h.tweak[0] = ts[0];
@@ -256,20 +491,8 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
enum {
WCNT = SKEIN_512_STATE_WORDS
};
-#undef RCNT
-#define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
-
-#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
-#else
-#define SKEIN_UNROLL_512 (0)
-#endif
-
-#if SKEIN_UNROLL_512
-#if (RCNT % SKEIN_UNROLL_512)
-#error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */
-#endif
size_t r;
+#if SKEIN_UNROLL_512
u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot"*/
#else
u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
@@ -279,8 +502,14 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
#ifdef SKEIN_DEBUG
const u64 *X_ptr[8]; /* use for debugging (help cc put Xn in regs) */
- X_ptr[0] = &X0; X_ptr[1] = &X1; X_ptr[2] = &X2; X_ptr[3] = &X3;
- X_ptr[4] = &X4; X_ptr[5] = &X5; X_ptr[6] = &X6; X_ptr[7] = &X7;
+ X_ptr[0] = &X0;
+ X_ptr[1] = &X1;
+ X_ptr[2] = &X2;
+ X_ptr[3] = &X3;
+ X_ptr[4] = &X4;
+ X_ptr[5] = &X5;
+ X_ptr[6] = &X6;
+ X_ptr[7] = &X7;
#endif
skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
@@ -310,143 +539,68 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
/* get input block in little-endian format */
skein_get64_lsb_first(w, blk_ptr, WCNT);
debug_save_tweak(ctx);
- skein_show_block(BLK_BITS, &ctx->h, ctx->x, blk_ptr, w, ks, ts);
- X0 = w[0] + ks[0]; /* do the first full key injection */
- X1 = w[1] + ks[1];
- X2 = w[2] + ks[2];
- X3 = w[3] + ks[3];
- X4 = w[4] + ks[4];
- X5 = w[5] + ks[5] + ts[0];
- X6 = w[6] + ks[6] + ts[1];
- X7 = w[7] + ks[7];
+ /* do the first full key injection */
+ X0 = w[0] + ks[0];
+ X1 = w[1] + ks[1];
+ X2 = w[2] + ks[2];
+ X3 = w[3] + ks[3];
+ X4 = w[4] + ks[4];
+ X5 = w[5] + ks[5] + ts[0];
+ X6 = w[6] + ks[6] + ts[1];
+ X7 = w[7] + ks[7];
blk_ptr += SKEIN_512_BLOCK_BYTES;
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL,
- X_ptr);
/* run the rounds */
-#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
-do { \
- X##p0 += X##p1; X##p1 = rotl_64(X##p1, ROT##_0); X##p1 ^= X##p0; \
- X##p2 += X##p3; X##p3 = rotl_64(X##p3, ROT##_1); X##p3 ^= X##p2; \
- X##p4 += X##p5; X##p5 = rotl_64(X##p5, ROT##_2); X##p5 ^= X##p4; \
- X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3); X##p7 ^= X##p6; \
-} while (0)
-
-#if SKEIN_UNROLL_512 == 0
-#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \
-do { \
- ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
- skein_show_r_ptr(BLK_BITS, &ctx->h, r_num, X_ptr); \
-} while (0)
-
-#define I512(R) \
-do { \
- /* inject the key schedule value */ \
- X0 += ks[((R) + 1) % 9]; \
- X1 += ks[((R) + 2) % 9]; \
- X2 += ks[((R) + 3) % 9]; \
- X3 += ks[((R) + 4) % 9]; \
- X4 += ks[((R) + 5) % 9]; \
- X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \
- X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \
- X7 += ks[((R) + 8) % 9] + (R) + 1; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-#else /* looping version */
-#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
-do { \
- ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + r_num, X_ptr); \
-} while (0)
-
-#define I512(R) \
-do { \
- /* inject the key schedule value */ \
- X0 += ks[r + (R) + 0]; \
- X1 += ks[r + (R) + 1]; \
- X2 += ks[r + (R) + 2]; \
- X3 += ks[r + (R) + 3]; \
- X4 += ks[r + (R) + 4]; \
- X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \
- X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \
- X7 += ks[r + (R) + 7] + r + (R); \
- /* rotate key schedule */ \
- ks[r + (R) + 8] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-
- for (r = 1; r < 2 * RCNT; r += 2 * SKEIN_UNROLL_512)
-#endif /* end of looped code definitions */
- {
-#define R512_8_ROUNDS(R) /* do 8 full rounds */ \
-do { \
- R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \
- R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \
- R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \
- R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \
- I512(2 * (R)); \
- R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \
- R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \
- R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \
- R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \
- I512(2 * (R) + 1); /* and key injection */ \
-} while (0)
+ for (r = 1;
+ r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
+ r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
R512_8_ROUNDS(0);
-#define R512_UNROLL_R(NN) \
- ((SKEIN_UNROLL_512 == 0 && \
- SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
- (SKEIN_UNROLL_512 > (NN)))
-
- #if R512_UNROLL_R(1)
+#if R512_UNROLL_R(1)
R512_8_ROUNDS(1);
- #endif
- #if R512_UNROLL_R(2)
+#endif
+#if R512_UNROLL_R(2)
R512_8_ROUNDS(2);
- #endif
- #if R512_UNROLL_R(3)
+#endif
+#if R512_UNROLL_R(3)
R512_8_ROUNDS(3);
- #endif
- #if R512_UNROLL_R(4)
+#endif
+#if R512_UNROLL_R(4)
R512_8_ROUNDS(4);
- #endif
- #if R512_UNROLL_R(5)
+#endif
+#if R512_UNROLL_R(5)
R512_8_ROUNDS(5);
- #endif
- #if R512_UNROLL_R(6)
+#endif
+#if R512_UNROLL_R(6)
R512_8_ROUNDS(6);
- #endif
- #if R512_UNROLL_R(7)
+#endif
+#if R512_UNROLL_R(7)
R512_8_ROUNDS(7);
- #endif
- #if R512_UNROLL_R(8)
+#endif
+#if R512_UNROLL_R(8)
R512_8_ROUNDS(8);
- #endif
- #if R512_UNROLL_R(9)
+#endif
+#if R512_UNROLL_R(9)
R512_8_ROUNDS(9);
- #endif
- #if R512_UNROLL_R(10)
+#endif
+#if R512_UNROLL_R(10)
R512_8_ROUNDS(10);
- #endif
- #if R512_UNROLL_R(11)
+#endif
+#if R512_UNROLL_R(11)
R512_8_ROUNDS(11);
- #endif
- #if R512_UNROLL_R(12)
+#endif
+#if R512_UNROLL_R(12)
R512_8_ROUNDS(12);
- #endif
- #if R512_UNROLL_R(13)
+#endif
+#if R512_UNROLL_R(13)
R512_8_ROUNDS(13);
- #endif
- #if R512_UNROLL_R(14)
+#endif
+#if R512_UNROLL_R(14)
R512_8_ROUNDS(14);
- #endif
- #if (SKEIN_UNROLL_512 > 14)
-#error "need more unrolling in skein_512_process_block"
- #endif
+#endif
}
/* do the final "feedforward" xor, update context chaining */
@@ -458,7 +612,6 @@ do { \
ctx->x[5] = X5 ^ w[5];
ctx->x[6] = X6 ^ w[6];
ctx->x[7] = X7 ^ w[7];
- skein_show_round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->x);
ts[1] &= ~SKEIN_T1_FLAG_FIRST;
} while (--blk_cnt);
@@ -487,20 +640,8 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
enum {
WCNT = SKEIN_1024_STATE_WORDS
};
-#undef RCNT
-#define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
-
-#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_1024 ((SKEIN_LOOP)%10)
-#else
-#define SKEIN_UNROLL_1024 (0)
-#endif
-
-#if (SKEIN_UNROLL_1024 != 0)
-#if (RCNT % SKEIN_UNROLL_1024)
-#error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */
-#endif
size_t r;
+#if (SKEIN_UNROLL_1024 != 0)
u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot" */
#else
u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
@@ -510,16 +651,6 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
u64 X00, X01, X02, X03, X04, X05, X06, X07,
X08, X09, X10, X11, X12, X13, X14, X15;
u64 w[WCNT]; /* local copy of input block */
-#ifdef SKEIN_DEBUG
- const u64 *X_ptr[16]; /* use for debugging (help cc put Xn in regs) */
-
- X_ptr[0] = &X00; X_ptr[1] = &X01; X_ptr[2] = &X02;
- X_ptr[3] = &X03; X_ptr[4] = &X04; X_ptr[5] = &X05;
- X_ptr[6] = &X06; X_ptr[7] = &X07; X_ptr[8] = &X08;
- X_ptr[9] = &X09; X_ptr[10] = &X10; X_ptr[11] = &X11;
- X_ptr[12] = &X12; X_ptr[13] = &X13; X_ptr[14] = &X14;
- X_ptr[15] = &X15;
-#endif
skein_assert(blk_cnt != 0); /* never call with blk_cnt == 0! */
ts[0] = ctx->h.tweak[0];
@@ -548,192 +679,81 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
ks[13] = ctx->x[13];
ks[14] = ctx->x[14];
ks[15] = ctx->x[15];
- ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
- ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^
- ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^
+ ks[16] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^
+ ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^
+ ks[8] ^ ks[9] ^ ks[10] ^ ks[11] ^
ks[12] ^ ks[13] ^ ks[14] ^ ks[15] ^ SKEIN_KS_PARITY;
- ts[2] = ts[0] ^ ts[1];
+ ts[2] = ts[0] ^ ts[1];
/* get input block in little-endian format */
skein_get64_lsb_first(w, blk_ptr, WCNT);
debug_save_tweak(ctx);
- skein_show_block(BLK_BITS, &ctx->h, ctx->x, blk_ptr, w, ks, ts);
-
- X00 = w[0] + ks[0]; /* do the first full key injection */
- X01 = w[1] + ks[1];
- X02 = w[2] + ks[2];
- X03 = w[3] + ks[3];
- X04 = w[4] + ks[4];
- X05 = w[5] + ks[5];
- X06 = w[6] + ks[6];
- X07 = w[7] + ks[7];
- X08 = w[8] + ks[8];
- X09 = w[9] + ks[9];
- X10 = w[10] + ks[10];
- X11 = w[11] + ks[11];
- X12 = w[12] + ks[12];
- X13 = w[13] + ks[13] + ts[0];
- X14 = w[14] + ks[14] + ts[1];
- X15 = w[15] + ks[15];
-
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INITIAL,
- X_ptr);
-
-#define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, r_num) \
-do { \
- X##p0 += X##p1; X##p1 = rotl_64(X##p1, ROT##_0); X##p1 ^= X##p0; \
- X##p2 += X##p3; X##p3 = rotl_64(X##p3, ROT##_1); X##p3 ^= X##p2; \
- X##p4 += X##p5; X##p5 = rotl_64(X##p5, ROT##_2); X##p5 ^= X##p4; \
- X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3); X##p7 ^= X##p6; \
- X##p8 += X##p9; X##p9 = rotl_64(X##p9, ROT##_4); X##p9 ^= X##p8; \
- X##pA += X##pB; X##pB = rotl_64(X##pB, ROT##_5); X##pB ^= X##pA; \
- X##pC += X##pD; X##pD = rotl_64(X##pD, ROT##_6); X##pD ^= X##pC; \
- X##pE += X##pF; X##pF = rotl_64(X##pF, ROT##_7); X##pF ^= X##pE; \
-} while (0)
-
-#if SKEIN_UNROLL_1024 == 0
-#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
- ROT, rn) \
-do { \
- ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, rn); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, rn, X_ptr); \
-} while (0)
-
-#define I1024(R) \
-do { \
- /* inject the key schedule value */ \
- X00 += ks[((R) + 1) % 17]; \
- X01 += ks[((R) + 2) % 17]; \
- X02 += ks[((R) + 3) % 17]; \
- X03 += ks[((R) + 4) % 17]; \
- X04 += ks[((R) + 5) % 17]; \
- X05 += ks[((R) + 6) % 17]; \
- X06 += ks[((R) + 7) % 17]; \
- X07 += ks[((R) + 8) % 17]; \
- X08 += ks[((R) + 9) % 17]; \
- X09 += ks[((R) + 10) % 17]; \
- X10 += ks[((R) + 11) % 17]; \
- X11 += ks[((R) + 12) % 17]; \
- X12 += ks[((R) + 13) % 17]; \
- X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \
- X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \
- X15 += ks[((R) + 16) % 17] + (R) + 1; \
- skein_show_r_ptr(BLK_BITS, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-#else /* looping version */
-#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \
- ROT, rn) \
-do { \
- ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \
- pF, ROT, rn); \
- skein_show_r_ptr(BLK_BITS, &ctx->h, 4 * (r - 1) + rn, X_ptr); \
-} while (0)
-
-#define I1024(R) \
-do { \
- /* inject the key schedule value */ \
- X00 += ks[r + (R) + 0]; \
- X01 += ks[r + (R) + 1]; \
- X02 += ks[r + (R) + 2]; \
- X03 += ks[r + (R) + 3]; \
- X04 += ks[r + (R) + 4]; \
- X05 += ks[r + (R) + 5]; \
- X06 += ks[r + (R) + 6]; \
- X07 += ks[r + (R) + 7]; \
- X08 += ks[r + (R) + 8]; \
- X09 += ks[r + (R) + 9]; \
- X10 += ks[r + (R) + 10]; \
- X11 += ks[r + (R) + 11]; \
- X12 += ks[r + (R) + 12]; \
- X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \
- X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \
- X15 += ks[r + (R) + 15] + r + (R); \
- /* rotate key schedule */ \
- ks[r + (R) + 16] = ks[r + (R) - 1]; \
- ts[r + (R) + 2] = ts[r + (R) - 1]; \
- skein_show_r_ptr(BLK_BITSi, &ctx->h, SKEIN_RND_KEY_INJECT, X_ptr); \
-} while (0)
-
- for (r = 1; r <= 2 * RCNT; r += 2 * SKEIN_UNROLL_1024)
-#endif
- {
-#define R1024_8_ROUNDS(R) \
-do { \
- R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
- R1024_0, 8*(R) + 1); \
- R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
- R1024_1, 8*(R) + 2); \
- R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
- R1024_2, 8*(R) + 3); \
- R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
- R1024_3, 8*(R) + 4); \
- I1024(2*(R)); \
- R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15, \
- R1024_4, 8*(R) + 5); \
- R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, 05, 08, 01, \
- R1024_5, 8*(R) + 6); \
- R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, 11, 10, 09, \
- R1024_6, 8*(R) + 7); \
- R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, 03, 12, 07, \
- R1024_7, 8*(R) + 8); \
- I1024(2*(R)+1); \
-} while (0)
+ /* do the first full key injection */
+ X00 = w[0] + ks[0];
+ X01 = w[1] + ks[1];
+ X02 = w[2] + ks[2];
+ X03 = w[3] + ks[3];
+ X04 = w[4] + ks[4];
+ X05 = w[5] + ks[5];
+ X06 = w[6] + ks[6];
+ X07 = w[7] + ks[7];
+ X08 = w[8] + ks[8];
+ X09 = w[9] + ks[9];
+ X10 = w[10] + ks[10];
+ X11 = w[11] + ks[11];
+ X12 = w[12] + ks[12];
+ X13 = w[13] + ks[13] + ts[0];
+ X14 = w[14] + ks[14] + ts[1];
+ X15 = w[15] + ks[15];
+
+ for (r = 1;
+ r < (SKEIN_UNROLL_1024 ? 2 * RCNT : 2);
+ r += (SKEIN_UNROLL_1024 ? 2 * SKEIN_UNROLL_1024 : 1)) {
R1024_8_ROUNDS(0);
-
-#define R1024_UNROLL_R(NN) \
- ((SKEIN_UNROLL_1024 == 0 && \
- SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
- (SKEIN_UNROLL_1024 > (NN)))
-
- #if R1024_UNROLL_R(1)
+#if R1024_UNROLL_R(1)
R1024_8_ROUNDS(1);
- #endif
- #if R1024_UNROLL_R(2)
+#endif
+#if R1024_UNROLL_R(2)
R1024_8_ROUNDS(2);
- #endif
- #if R1024_UNROLL_R(3)
+#endif
+#if R1024_UNROLL_R(3)
R1024_8_ROUNDS(3);
- #endif
- #if R1024_UNROLL_R(4)
+#endif
+#if R1024_UNROLL_R(4)
R1024_8_ROUNDS(4);
- #endif
- #if R1024_UNROLL_R(5)
+#endif
+#if R1024_UNROLL_R(5)
R1024_8_ROUNDS(5);
- #endif
- #if R1024_UNROLL_R(6)
+#endif
+#if R1024_UNROLL_R(6)
R1024_8_ROUNDS(6);
- #endif
- #if R1024_UNROLL_R(7)
+#endif
+#if R1024_UNROLL_R(7)
R1024_8_ROUNDS(7);
- #endif
- #if R1024_UNROLL_R(8)
+#endif
+#if R1024_UNROLL_R(8)
R1024_8_ROUNDS(8);
- #endif
- #if R1024_UNROLL_R(9)
+#endif
+#if R1024_UNROLL_R(9)
R1024_8_ROUNDS(9);
- #endif
- #if R1024_UNROLL_R(10)
+#endif
+#if R1024_UNROLL_R(10)
R1024_8_ROUNDS(10);
- #endif
- #if R1024_UNROLL_R(11)
+#endif
+#if R1024_UNROLL_R(11)
R1024_8_ROUNDS(11);
- #endif
- #if R1024_UNROLL_R(12)
+#endif
+#if R1024_UNROLL_R(12)
R1024_8_ROUNDS(12);
- #endif
- #if R1024_UNROLL_R(13)
+#endif
+#if R1024_UNROLL_R(13)
R1024_8_ROUNDS(13);
- #endif
- #if R1024_UNROLL_R(14)
+#endif
+#if R1024_UNROLL_R(14)
R1024_8_ROUNDS(14);
- #endif
-#if (SKEIN_UNROLL_1024 > 14)
-#error "need more unrolling in Skein_1024_Process_Block"
- #endif
+#endif
}
/* do the final "feedforward" xor, update context chaining */
@@ -754,8 +774,6 @@ do { \
ctx->x[14] = X14 ^ w[14];
ctx->x[15] = X15 ^ w[15];
- skein_show_round(BLK_BITS, &ctx->h, SKEIN_RND_FEED_FWD, ctx->x);
-
ts[1] &= ~SKEIN_T1_FLAG_FIRST;
blk_ptr += SKEIN_1024_BLOCK_BYTES;
} while (--blk_cnt);
diff --git a/drivers/staging/skein/skein_block.h b/drivers/staging/skein/skein_block.h
index bd7bdc35df29..9d40f4a5267b 100644
--- a/drivers/staging/skein/skein_block.h
+++ b/drivers/staging/skein/skein_block.h
@@ -10,7 +10,7 @@
#ifndef _SKEIN_BLOCK_H_
#define _SKEIN_BLOCK_H_
-#include "skein.h" /* get the Skein API definitions */
+#include "skein_base.h" /* get the Skein API definitions */
void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
size_t blk_cnt, size_t byte_cnt_add);
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
new file mode 100644
index 000000000000..85bd7d0168b0
--- /dev/null
+++ b/drivers/staging/skein/skein_generic.c
@@ -0,0 +1,216 @@
+/*
+ * Cryptographic API.
+ *
+ * Skein256 Hash Algorithm.
+ *
+ * Derived from cryptoapi implementation, adapted for in-place
+ * scatterlist interface.
+ *
+ * Copyright (c) Eric Rost <eric.rost@mybabylon.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <crypto/internal/hash.h>
+#include "skein_base.h"
+
+
+static int skein256_init(struct shash_desc *desc)
+{
+ return skein_256_init((struct skein_256_ctx *) shash_desc_ctx(desc),
+ SKEIN256_DIGEST_BIT_SIZE);
+}
+
+static int skein256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc),
+ data, len);
+}
+
+static int skein256_final(struct shash_desc *desc, u8 *out)
+{
+ return skein_256_final((struct skein_256_ctx *)shash_desc_ctx(desc),
+ out);
+}
+
+static int skein256_export(struct shash_desc *desc, void *out)
+{
+ struct skein_256_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int skein256_import(struct shash_desc *desc, const void *in)
+{
+ struct skein_256_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static int skein512_init(struct shash_desc *desc)
+{
+ return skein_512_init((struct skein_512_ctx *)shash_desc_ctx(desc),
+ SKEIN512_DIGEST_BIT_SIZE);
+}
+
+static int skein512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc),
+ data, len);
+}
+
+static int skein512_final(struct shash_desc *desc, u8 *out)
+{
+ return skein_512_final((struct skein_512_ctx *)shash_desc_ctx(desc),
+ out);
+}
+
+static int skein512_export(struct shash_desc *desc, void *out)
+{
+ struct skein_512_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int skein512_import(struct shash_desc *desc, const void *in)
+{
+ struct skein_512_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static int skein1024_init(struct shash_desc *desc)
+{
+ return skein_1024_init((struct skein_1024_ctx *)shash_desc_ctx(desc),
+ SKEIN1024_DIGEST_BIT_SIZE);
+}
+
+static int skein1024_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc),
+ data, len);
+}
+
+static int skein1024_final(struct shash_desc *desc, u8 *out)
+{
+ return skein_1024_final((struct skein_1024_ctx *)shash_desc_ctx(desc),
+ out);
+}
+
+static int skein1024_export(struct shash_desc *desc, void *out)
+{
+ struct skein_1024_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int skein1024_import(struct shash_desc *desc, const void *in)
+{
+ struct skein_1024_ctx *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg alg256 = {
+ .digestsize = (SKEIN256_DIGEST_BIT_SIZE / 8),
+ .init = skein256_init,
+ .update = skein256_update,
+ .final = skein256_final,
+ .export = skein256_export,
+ .import = skein256_import,
+ .descsize = sizeof(struct skein_256_ctx),
+ .statesize = sizeof(struct skein_256_ctx),
+ .base = {
+ .cra_name = "skein256",
+ .cra_driver_name = "skein",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SKEIN_256_BLOCK_BYTES,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct shash_alg alg512 = {
+ .digestsize = (SKEIN512_DIGEST_BIT_SIZE / 8),
+ .init = skein512_init,
+ .update = skein512_update,
+ .final = skein512_final,
+ .export = skein512_export,
+ .import = skein512_import,
+ .descsize = sizeof(struct skein_512_ctx),
+ .statesize = sizeof(struct skein_512_ctx),
+ .base = {
+ .cra_name = "skein512",
+ .cra_driver_name = "skein",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SKEIN_512_BLOCK_BYTES,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct shash_alg alg1024 = {
+ .digestsize = (SKEIN1024_DIGEST_BIT_SIZE / 8),
+ .init = skein1024_init,
+ .update = skein1024_update,
+ .final = skein1024_final,
+ .export = skein1024_export,
+ .import = skein1024_import,
+ .descsize = sizeof(struct skein_1024_ctx),
+ .statesize = sizeof(struct skein_1024_ctx),
+ .base = {
+ .cra_name = "skein1024",
+ .cra_driver_name = "skein",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SKEIN_1024_BLOCK_BYTES,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init skein_generic_init(void)
+{
+ if (crypto_register_shash(&alg256))
+ goto out;
+ if (crypto_register_shash(&alg512))
+ goto unreg256;
+ if (crypto_register_shash(&alg1024))
+ goto unreg512;
+
+ return 0;
+
+
+unreg512:
+ crypto_unregister_shash(&alg512);
+unreg256:
+ crypto_unregister_shash(&alg256);
+out:
+ return -1;
+}
+
+static void __exit skein_generic_fini(void)
+{
+ crypto_unregister_shash(&alg256);
+ crypto_unregister_shash(&alg512);
+ crypto_unregister_shash(&alg1024);
+}
+
+module_init(skein_generic_init);
+module_exit(skein_generic_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Skein Hash Algorithm");
+
+MODULE_ALIAS("skein");
diff --git a/drivers/staging/skein/skein_iv.h b/drivers/staging/skein/skein_iv.h
index d9dc1d5ed551..8a06314d0ed4 100644
--- a/drivers/staging/skein/skein_iv.h
+++ b/drivers/staging/skein/skein_iv.h
@@ -1,7 +1,7 @@
#ifndef _SKEIN_IV_H_
#define _SKEIN_IV_H_
-#include "skein.h" /* get Skein macros and types */
+#include "skein_base.h" /* get Skein macros and types */
/*
***************** Pre-computed Skein IVs *******************
diff --git a/drivers/staging/skein/threefish_api.h b/drivers/staging/skein/threefish_api.h
index 8d5ddf8b3a9b..8e0a0b77ecce 100644
--- a/drivers/staging/skein/threefish_api.h
+++ b/drivers/staging/skein/threefish_api.h
@@ -29,7 +29,7 @@
*/
#include <linux/types.h>
-#include "skein.h"
+#include "skein_base.h"
#define KEY_SCHEDULE_CONST 0x1BD11BDAA9FC1A22L
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 56ca3b6c1444..42d62ef56cb8 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -498,12 +498,14 @@ static int slic_card_download(struct adapter *adapter)
slic_reg32_write(&slic_regs->slic_wcs,
baseaddress + codeaddr, FLUSH);
/* Write out instruction to low addr */
- slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH);
+ slic_reg32_write(&slic_regs->slic_wcs,
+ instruction, FLUSH);
instruction = *(u32 *)(fw->data + index);
index += 4;
/* Write out instruction to high addr */
- slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH);
+ slic_reg32_write(&slic_regs->slic_wcs,
+ instruction, FLUSH);
instruction = *(u32 *)(fw->data + index);
index += 4;
}
@@ -596,8 +598,7 @@ static void slic_mac_address_config(struct adapter *adapter)
u32 value2;
__iomem struct slic_regs *slic_regs = adapter->slic_regs;
- value = *(u32 *) &adapter->currmacaddr[2];
- value = ntohl(value);
+ value = ntohl(*(__be32 *) &adapter->currmacaddr[2]);
slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH);
slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH);
@@ -1533,14 +1534,18 @@ retry_rcvqfill:
dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
__func__);
dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n", skb->data);
+ dev_err(dev, " skbdata[%p]\n",
+ skb->data);
dev_err(dev, " skblen[%x]\n", skb->len);
dev_err(dev, " paddr[%p]\n", paddr);
dev_err(dev, " paddrl[%x]\n", paddrl);
dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
+ dev_err(dev, " rcvq->head[%p]\n",
+ rcvq->head);
+ dev_err(dev, " rcvq->tail[%p]\n",
+ rcvq->tail);
+ dev_err(dev, " rcvq->count[%x]\n",
+ rcvq->count);
dev_err(dev, "SKIP THIS SKB!!!!!!!!\n");
goto retry_rcvqfill;
}
@@ -1549,14 +1554,18 @@ retry_rcvqfill:
dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
__func__);
dev_err(dev, "skb[%p] PROBLEM\n", skb);
- dev_err(dev, " skbdata[%p]\n", skb->data);
+ dev_err(dev, " skbdata[%p]\n",
+ skb->data);
dev_err(dev, " skblen[%x]\n", skb->len);
dev_err(dev, " paddr[%p]\n", paddr);
dev_err(dev, " paddrl[%x]\n", paddrl);
dev_err(dev, " paddrh[%x]\n", paddrh);
- dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
- dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
- dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
+ dev_err(dev, " rcvq->head[%p]\n",
+ rcvq->head);
+ dev_err(dev, " rcvq->tail[%p]\n",
+ rcvq->tail);
+ dev_err(dev, " rcvq->count[%x]\n",
+ rcvq->count);
dev_err(dev, "GIVE TO CARD ANYWAY\n");
}
#endif
@@ -1612,7 +1621,7 @@ static int slic_rcvqueue_init(struct adapter *adapter)
rcvq->size = SLIC_RCVQ_ENTRIES;
rcvq->errors = 0;
rcvq->count = 0;
- i = (SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES);
+ i = SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES;
count = 0;
while (i) {
count += slic_rcvqueue_fill(adapter);
@@ -1788,7 +1797,7 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address)
if (mcaddr == NULL)
return 1;
- memcpy(mcaddr->address, address, ETH_ALEN);
+ ether_addr_copy(mcaddr->address, address);
mcaddr->next = adapter->mcastaddrs;
adapter->mcastaddrs = mcaddr;
@@ -1885,7 +1894,8 @@ static void slic_xmit_fail(struct adapter *adapter,
break;
case XMIT_FAIL_HOSTCMD_FAIL:
dev_err(&adapter->netdev->dev,
- "xmit_start skb[%p] type[%x] No host commands available\n", skb, skb->pkt_type);
+ "xmit_start skb[%p] type[%x] No host commands available\n",
+ skb, skb->pkt_type);
break;
}
}
@@ -2097,7 +2107,8 @@ static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
}
} else if (isr & ISR_XDROP) {
dev_err(&dev->dev,
- "isr & ISR_ERR [%x] ISR_XDROP\n", isr);
+ "isr & ISR_ERR [%x] ISR_XDROP\n",
+ isr);
} else {
dev_err(&dev->dev,
"isr & ISR_ERR [%x]\n",
@@ -2341,7 +2352,8 @@ static int slic_if_init(struct adapter *adapter)
SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
#else
slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
- slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr, FLUSH);
+ slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr,
+ FLUSH);
#endif
spin_unlock_irqrestore(&adapter->bit64reglock.lock,
adapter->bit64reglock.flags);
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index bcc7f62654f4..b12c76de60b0 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -81,7 +81,7 @@ static ssize_t chars_chartab_show(struct kobject *kobj,
static void report_char_chartab_status(int reset, int received, int used,
int rejected, int do_characters)
{
- char *object_type[] = {
+ static char const *object_type[] = {
"character class entries",
"character descriptions",
};
@@ -809,10 +809,10 @@ static ssize_t message_store_helper(const char *buf, size_t count,
if (msg_stored == -ENOMEM)
reset = 1;
break;
- } else {
- used++;
}
+ used++;
+
cp = linefeed + 1;
}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 3f30a1b6e72c..e9f0c150d246 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -695,7 +695,7 @@ static void say_next_word(struct vc_data *vc)
static void spell_word(struct vc_data *vc)
{
- static char *delay_str[] = { "", ",", ".", ". .", ". . ." };
+ static char const *delay_str[] = { "", ",", ".", ". .", ". . ." };
char *cp = buf, *str_cap = spk_str_caps_stop;
char *cp1, *last_cap = spk_str_caps_stop;
u_char ch;
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index d7d515273896..4e059ea78d4c 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -231,7 +231,7 @@ static void do_catch_up(struct spk_synth *synth)
if (ch == '\n')
ch = PROCSPEECH;
spk_out(ch);
- if ((jiffies >= jiff_max) && (ch == SPACE)) {
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
spk_out(PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock, flags);
delay_time_val = delay_time->u.n.value;
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 4ed38898a17a..cef20fdda646 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -229,7 +229,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
ch = PROCSPEECH;
outb_p(ch, synth_port);
SWAIT;
- if ((jiffies >= jiff_max) && (ch == SPACE)) {
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
diff --git a/drivers/staging/unisys/channels/channel.c b/drivers/staging/unisys/channels/channel.c
index b4bdee4b575b..74cc4d6b515f 100644
--- a/drivers/staging/unisys/channels/channel.c
+++ b/drivers/staging/unisys/channels/channel.c
@@ -43,45 +43,45 @@
* Return value:
* 1 if the insertion succeeds, 0 if the queue was full.
*/
-unsigned char
-visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, u32 Queue, void *pSignal)
+unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue,
+ void *sig)
{
void __iomem *psignal;
unsigned int head, tail, nof;
- SIGNAL_QUEUE_HEADER __iomem *pqhdr =
- (SIGNAL_QUEUE_HEADER __iomem *)
- ((char __iomem *) pChannel + readq(&pChannel->oChannelSpace))
- + Queue;
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)
+ ((char __iomem *)ch + readq(&ch->ch_space_offset))
+ + queue;
/* capture current head and tail */
- head = readl(&pqhdr->Head);
- tail = readl(&pqhdr->Tail);
+ head = readl(&pqhdr->head);
+ tail = readl(&pqhdr->tail);
/* queue is full if (head + 1) % n equals tail */
- if (((head + 1) % readl(&pqhdr->MaxSignalSlots)) == tail) {
- nof = readq(&pqhdr->NumOverflows) + 1;
- writeq(nof, &pqhdr->NumOverflows);
+ if (((head + 1) % readl(&pqhdr->max_slots)) == tail) {
+ nof = readq(&pqhdr->num_overflows) + 1;
+ writeq(nof, &pqhdr->num_overflows);
return 0;
}
/* increment the head index */
- head = (head + 1) % readl(&pqhdr->MaxSignalSlots);
+ head = (head + 1) % readl(&pqhdr->max_slots);
/* copy signal to the head location from the area pointed to
* by pSignal
*/
- psignal = (char __iomem *)pqhdr + readq(&pqhdr->oSignalBase) +
- (head * readl(&pqhdr->SignalSize));
- memcpy_toio(psignal, pSignal, readl(&pqhdr->SignalSize));
+ psignal = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
+ (head * readl(&pqhdr->signal_size));
+ memcpy_toio(psignal, sig, readl(&pqhdr->signal_size));
mb(); /* channel synch */
- writel(head, &pqhdr->Head);
+ writel(head, &pqhdr->head);
- writeq(readq(&pqhdr->NumSignalsSent) + 1, &pqhdr->NumSignalsSent);
+ writeq(readq(&pqhdr->num_sent) + 1, &pqhdr->num_sent);
return 1;
}
-EXPORT_SYMBOL_GPL(visor_signal_insert);
+EXPORT_SYMBOL_GPL(spar_signal_insert);
/*
* Routine Description:
@@ -102,40 +102,40 @@ EXPORT_SYMBOL_GPL(visor_signal_insert);
* 1 if the removal succeeds, 0 if the queue was empty.
*/
unsigned char
-visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, u32 Queue, void *pSignal)
+spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig)
{
void __iomem *psource;
unsigned int head, tail;
- SIGNAL_QUEUE_HEADER __iomem *pqhdr =
- (SIGNAL_QUEUE_HEADER __iomem *) ((char __iomem *) pChannel +
- readq(&pChannel->oChannelSpace)) + Queue;
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)((char __iomem *)ch +
+ readq(&ch->ch_space_offset)) + queue;
/* capture current head and tail */
- head = readl(&pqhdr->Head);
- tail = readl(&pqhdr->Tail);
+ head = readl(&pqhdr->head);
+ tail = readl(&pqhdr->tail);
/* queue is empty if the head index equals the tail index */
if (head == tail) {
- writeq(readq(&pqhdr->NumEmptyCnt) + 1, &pqhdr->NumEmptyCnt);
+ writeq(readq(&pqhdr->num_empty) + 1, &pqhdr->num_empty);
return 0;
}
/* advance past the 'empty' front slot */
- tail = (tail + 1) % readl(&pqhdr->MaxSignalSlots);
+ tail = (tail + 1) % readl(&pqhdr->max_slots);
/* copy signal from tail location to the area pointed to by pSignal */
- psource = (char __iomem *) pqhdr + readq(&pqhdr->oSignalBase) +
- (tail * readl(&pqhdr->SignalSize));
- memcpy_fromio(pSignal, psource, readl(&pqhdr->SignalSize));
+ psource = (char __iomem *)pqhdr + readq(&pqhdr->sig_base_offset) +
+ (tail * readl(&pqhdr->signal_size));
+ memcpy_fromio(sig, psource, readl(&pqhdr->signal_size));
mb(); /* channel synch */
- writel(tail, &pqhdr->Tail);
+ writel(tail, &pqhdr->tail);
- writeq(readq(&pqhdr->NumSignalsReceived) + 1,
- &pqhdr->NumSignalsReceived);
+ writeq(readq(&pqhdr->num_received) + 1,
+ &pqhdr->num_received);
return 1;
}
-EXPORT_SYMBOL_GPL(visor_signal_remove);
+EXPORT_SYMBOL_GPL(spar_signal_remove);
/*
* Routine Description:
@@ -156,18 +156,18 @@ EXPORT_SYMBOL_GPL(visor_signal_remove);
* Return value:
* # of signals copied.
*/
-unsigned int
-SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue, void *pSignal)
+unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
+ void *sig)
{
void *psource;
- unsigned int head, tail, signalCount = 0;
- pSIGNAL_QUEUE_HEADER pqhdr =
- (pSIGNAL_QUEUE_HEADER) ((char *) pChannel +
- pChannel->oChannelSpace) + Queue;
+ unsigned int head, tail, count = 0;
+ struct signal_queue_header *pqhdr =
+ (struct signal_queue_header *)((char *)ch +
+ ch->ch_space_offset) + queue;
/* capture current head and tail */
- head = pqhdr->Head;
- tail = pqhdr->Tail;
+ head = pqhdr->head;
+ tail = pqhdr->tail;
/* queue is empty if the head index equals the tail index */
if (head == tail)
@@ -175,25 +175,25 @@ SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue, void *pSignal)
while (head != tail) {
/* advance past the 'empty' front slot */
- tail = (tail + 1) % pqhdr->MaxSignalSlots;
+ tail = (tail + 1) % pqhdr->max_slots;
/* copy signal from tail location to the area pointed
* to by pSignal
*/
psource =
- (char *) pqhdr + pqhdr->oSignalBase +
- (tail * pqhdr->SignalSize);
- memcpy((char *) pSignal + (pqhdr->SignalSize * signalCount),
- psource, pqhdr->SignalSize);
+ (char *)pqhdr + pqhdr->sig_base_offset +
+ (tail * pqhdr->signal_size);
+ memcpy((char *)sig + (pqhdr->signal_size * count),
+ psource, pqhdr->signal_size);
mb(); /* channel synch */
- pqhdr->Tail = tail;
+ pqhdr->tail = tail;
- signalCount++;
- pqhdr->NumSignalsReceived++;
+ count++;
+ pqhdr->num_received++;
}
- return signalCount;
+ return count;
}
/*
@@ -207,13 +207,13 @@ SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue, void *pSignal)
* Return value:
* 1 if the signal queue is empty, 0 otherwise.
*/
-unsigned char
-visor_signalqueue_empty(CHANNEL_HEADER __iomem *pChannel, u32 Queue)
+unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
+ u32 queue)
{
- SIGNAL_QUEUE_HEADER __iomem *pqhdr =
- (SIGNAL_QUEUE_HEADER __iomem *) ((char __iomem *) pChannel +
- readq(&pChannel->oChannelSpace)) + Queue;
- return readl(&pqhdr->Head) == readl(&pqhdr->Tail);
+ struct signal_queue_header __iomem *pqhdr =
+ (struct signal_queue_header __iomem *)((char __iomem *)ch +
+ readq(&ch->ch_space_offset)) + queue;
+ return readl(&pqhdr->head) == readl(&pqhdr->tail);
}
-EXPORT_SYMBOL_GPL(visor_signalqueue_empty);
+EXPORT_SYMBOL_GPL(spar_signalqueue_empty);
diff --git a/drivers/staging/unisys/channels/chanstub.c b/drivers/staging/unisys/channels/chanstub.c
index d54c5d635a94..b6fd126f16f1 100644
--- a/drivers/staging/unisys/channels/chanstub.c
+++ b/drivers/staging/unisys/channels/chanstub.c
@@ -42,26 +42,26 @@ channel_mod_exit(void)
}
unsigned char
-SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
+SignalInsert_withLock(struct channel_header __iomem *pChannel, u32 Queue,
void *pSignal, spinlock_t *lock)
{
unsigned char result;
unsigned long flags;
spin_lock_irqsave(lock, flags);
- result = visor_signal_insert(pChannel, Queue, pSignal);
+ result = spar_signal_insert(pChannel, Queue, pSignal);
spin_unlock_irqrestore(lock, flags);
return result;
}
unsigned char
-SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
+SignalRemove_withLock(struct channel_header __iomem *pChannel, u32 Queue,
void *pSignal, spinlock_t *lock)
{
unsigned char result;
spin_lock(lock);
- result = visor_signal_remove(pChannel, Queue, pSignal);
+ result = spar_signal_remove(pChannel, Queue, pSignal);
spin_unlock(lock);
return result;
}
diff --git a/drivers/staging/unisys/channels/chanstub.h b/drivers/staging/unisys/channels/chanstub.h
index d08e2c69d2ad..1531759a1b31 100644
--- a/drivers/staging/unisys/channels/chanstub.h
+++ b/drivers/staging/unisys/channels/chanstub.h
@@ -15,9 +15,9 @@
#ifndef __CHANSTUB_H__
#define __CHANSTUB_H__
-unsigned char SignalInsert_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
- void *pSignal, spinlock_t *lock);
-unsigned char SignalRemove_withLock(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
- void *pSignal, spinlock_t *lock);
+unsigned char SignalInsert_withLock(struct channel_header __iomem *pChannel,
+ u32 Queue, void *pSignal, spinlock_t *lock);
+unsigned char SignalRemove_withLock(struct channel_header __iomem *pChannel,
+ u32 Queue, void *pSignal, spinlock_t *lock);
#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/channel.h b/drivers/staging/unisys/common-spar/include/channels/channel.h
index c25dfbf7f6bc..6fb6e5b3ddaf 100644
--- a/drivers/staging/unisys/common-spar/include/channels/channel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/channel.h
@@ -50,43 +50,12 @@
#define ULTRA_CHANNEL_PROTOCOL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
-#define CHANNEL_GUID_MISMATCH(chType, chName, field, expected, actual, fil, \
- lin, logCtx) \
- do { \
- pr_err("Channel mismatch on channel=%s(%pUL) field=%s expected=%pUL actual=%pUL @%s:%d\n", \
- chName, &chType, field, \
- &expected, &actual, \
- fil, lin); \
- } while (0)
-#define CHANNEL_U32_MISMATCH(chType, chName, field, expected, actual, fil, \
- lin, logCtx) \
- do { \
- pr_err("Channel mismatch on channel=%s(%pUL) field=%s expected=0x%-8.8lx actual=0x%-8.8lx @%s:%d\n", \
- chName, &chType, field, \
- (unsigned long)expected, (unsigned long)actual, \
- fil, lin); \
- } while (0)
-
-#define CHANNEL_U64_MISMATCH(chType, chName, field, expected, actual, fil, \
- lin, logCtx) \
- do { \
- pr_err("Channel mismatch on channel=%s(%pUL) field=%s expected=0x%-8.8Lx actual=0x%-8.8Lx @%s:%d\n", \
- chName, &chType, field, \
- (unsigned long long)expected, \
- (unsigned long long)actual, \
- fil, lin); \
- } while (0)
-
-#define UltraLogEvent(logCtx, EventId, Severity, SubsystemMask, pFunctionName, \
- LineNumber, Str, args...) \
- pr_info(Str, ## args)
-
-typedef enum {
+enum channel_serverstate {
CHANNELSRV_UNINITIALIZED = 0, /* channel is in an undefined state */
CHANNELSRV_READY = 1 /* channel has been initialized by server */
-} CHANNEL_SERVERSTATE;
+};
-typedef enum {
+enum channel_clientstate {
CHANNELCLI_DETACHED = 0,
CHANNELCLI_DISABLED = 1, /* client can see channel but is NOT
* allowed to use it unless given TBD
@@ -100,32 +69,32 @@ typedef enum {
* using channel */
CHANNELCLI_OWNED = 5 /* "no worries" state - client can
* access channel anytime */
-} CHANNEL_CLIENTSTATE;
+};
+
static inline const u8 *
ULTRA_CHANNELCLI_STRING(u32 v)
{
switch (v) {
case CHANNELCLI_DETACHED:
- return (const u8 *) ("DETACHED");
+ return (const u8 *)("DETACHED");
case CHANNELCLI_DISABLED:
- return (const u8 *) ("DISABLED");
+ return (const u8 *)("DISABLED");
case CHANNELCLI_ATTACHING:
- return (const u8 *) ("ATTACHING");
+ return (const u8 *)("ATTACHING");
case CHANNELCLI_ATTACHED:
- return (const u8 *) ("ATTACHED");
+ return (const u8 *)("ATTACHED");
case CHANNELCLI_BUSY:
- return (const u8 *) ("BUSY");
+ return (const u8 *)("BUSY");
case CHANNELCLI_OWNED:
- return (const u8 *) ("OWNED");
+ return (const u8 *)("OWNED");
default:
break;
}
- return (const u8 *) ("?");
+ return (const u8 *)("?");
}
-#define ULTRA_CHANNELSRV_IS_READY(x) ((x) == CHANNELSRV_READY)
-#define ULTRA_CHANNEL_SERVER_READY(pChannel) \
- (ULTRA_CHANNELSRV_IS_READY(readl(&(pChannel)->SrvState)))
+#define SPAR_CHANNEL_SERVER_READY(ch) \
+ (readl(&(ch)->srv_state) == CHANNELSRV_READY)
#define ULTRA_VALID_CHANNELCLI_TRANSITION(o, n) \
(((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \
@@ -145,60 +114,41 @@ ULTRA_CHANNELCLI_STRING(u32 v)
(((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_OWNED)) || (0)) \
? (1) : (0))
-#define ULTRA_CHANNEL_CLIENT_CHK_TRANSITION(old, new, chanId, logCtx, \
+#define SPAR_CHANNEL_CLIENT_CHK_TRANSITION(old, new, id, log, \
file, line) \
do { \
if (!ULTRA_VALID_CHANNELCLI_TRANSITION(old, new)) \
- UltraLogEvent(logCtx, \
- CHANNELSTATE_DIAG_EVENTID_TRANSITERR, \
- CHANNELSTATE_DIAG_SEVERITY, \
- CHANNELSTATE_DIAG_SUBSYS, \
- __func__, __LINE__, \
- "%s Channel StateTransition INVALID! (%s) %s(%d)-->%s(%d) @%s:%d\n", \
- chanId, "CliState<x>", \
- ULTRA_CHANNELCLI_STRING(old), \
- old, \
- ULTRA_CHANNELCLI_STRING(new), \
- new, \
- PathName_Last_N_Nodes((u8 *)file, 4), \
- line); \
+ pr_info("%s Channel StateTransition INVALID! (%s) %s(%d)-->%s(%d) @%s:%d\n", \
+ id, "CliState<x>", \
+ ULTRA_CHANNELCLI_STRING(old), \
+ old, \
+ ULTRA_CHANNELCLI_STRING(new), \
+ new, \
+ pathname_last_n_nodes((u8 *)file, 4), \
+ line); \
} while (0)
-#define ULTRA_CHANNEL_CLIENT_TRANSITION(pChan, chanId, \
- newstate, logCtx) \
+#define SPAR_CHANNEL_CLIENT_TRANSITION(ch, id, newstate, log) \
do { \
- ULTRA_CHANNEL_CLIENT_CHK_TRANSITION( \
- readl(&(((CHANNEL_HEADER __iomem *) \
- (pChan))->CliStateOS)), \
- newstate, \
- chanId, logCtx, __FILE__, __LINE__); \
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK, \
- CHANNELSTATE_DIAG_SEVERITY, \
- CHANNELSTATE_DIAG_SUBSYS, \
- __func__, __LINE__, \
- "%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n", \
- chanId, "CliStateOS", \
- ULTRA_CHANNELCLI_STRING( \
- readl(&((CHANNEL_HEADER __iomem *) \
- (pChan))->CliStateOS)), \
- readl(&((CHANNEL_HEADER __iomem *) \
- (pChan))->CliStateOS), \
- ULTRA_CHANNELCLI_STRING(newstate), \
- newstate, \
- PathName_Last_N_Nodes(__FILE__, 4), __LINE__); \
- writel(newstate, &((CHANNEL_HEADER __iomem *) \
- (pChan))->CliStateOS); \
+ SPAR_CHANNEL_CLIENT_CHK_TRANSITION( \
+ readl(&(((struct channel_header __iomem *)\
+ (ch))->cli_state_os)), \
+ newstate, id, log, __FILE__, __LINE__); \
+ pr_info("%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n", \
+ id, "CliStateOS", \
+ ULTRA_CHANNELCLI_STRING( \
+ readl(&((struct channel_header __iomem *)\
+ (ch))->cli_state_os)), \
+ readl(&((struct channel_header __iomem *)\
+ (ch))->cli_state_os), \
+ ULTRA_CHANNELCLI_STRING(newstate), \
+ newstate, \
+ pathname_last_n_nodes(__FILE__, 4), __LINE__); \
+ writel(newstate, &((struct channel_header __iomem *)\
+ (ch))->cli_state_os); \
mb(); /* required for channel synch */ \
} while (0)
-#define ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(pChan, chanId, logCtx) \
- ULTRA_channel_client_acquire_os(pChan, chanId, logCtx, \
- (char *)__FILE__, __LINE__, \
- (char *)__func__)
-#define ULTRA_CHANNEL_CLIENT_RELEASE_OS(pChan, chanId, logCtx) \
- ULTRA_channel_client_release_os(pChan, chanId, logCtx, \
- (char *)__FILE__, __LINE__, (char *)__func__)
-
/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorBoot: */
/* throttling invalid boot channel statetransition error due to client
* disabled */
@@ -239,98 +189,98 @@ ULTRA_CHANNELCLI_STRING(u32 v)
#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
/* Common Channel Header */
-typedef struct _CHANNEL_HEADER {
- u64 Signature; /* Signature */
- u32 LegacyState; /* DEPRECATED - being replaced by */
+struct channel_header {
+ u64 signature; /* Signature */
+ u32 legacy_state; /* DEPRECATED - being replaced by */
/* / SrvState, CliStateBoot, and CliStateOS below */
- u32 HeaderSize; /* sizeof(CHANNEL_HEADER) */
- u64 Size; /* Total size of this channel in bytes */
- u64 Features; /* Flags to modify behavior */
- uuid_le Type; /* Channel type: data, bus, control, etc. */
- u64 PartitionHandle; /* ID of guest partition */
- u64 Handle; /* Device number of this channel in client */
- u64 oChannelSpace; /* Offset in bytes to channel specific area */
- u32 VersionId; /* CHANNEL_HEADER Version ID */
- u32 PartitionIndex; /* Index of guest partition */
- uuid_le ZoneGuid; /* Guid of Channel's zone */
- u32 oClientString; /* offset from channel header to
+ u32 header_size; /* sizeof(struct channel_header) */
+ u64 size; /* Total size of this channel in bytes */
+ u64 features; /* Flags to modify behavior */
+ uuid_le chtype; /* Channel type: data, bus, control, etc. */
+ u64 partition_handle; /* ID of guest partition */
+ u64 handle; /* Device number of this channel in client */
+ u64 ch_space_offset; /* Offset in bytes to channel specific area */
+ u32 version_id; /* struct channel_header Version ID */
+ u32 partition_index; /* Index of guest partition */
+ uuid_le zone_uuid; /* Guid of Channel's zone */
+ u32 cli_str_offset; /* offset from channel header to
* nul-terminated ClientString (0 if
* ClientString not present) */
- u32 CliStateBoot; /* CHANNEL_CLIENTSTATE of pre-boot
+ u32 cli_state_boot; /* CHANNEL_CLIENTSTATE of pre-boot
* EFI client of this channel */
- u32 CmdStateCli; /* CHANNEL_COMMANDSTATE (overloaded in
+ u32 cmd_state_cli; /* CHANNEL_COMMANDSTATE (overloaded in
* Windows drivers, see ServerStateUp,
* ServerStateDown, etc) */
- u32 CliStateOS; /* CHANNEL_CLIENTSTATE of Guest OS
+ u32 cli_state_os; /* CHANNEL_CLIENTSTATE of Guest OS
* client of this channel */
- u32 ChannelCharacteristics; /* CHANNEL_CHARACTERISTIC_<xxx> */
- u32 CmdStateSrv; /* CHANNEL_COMMANDSTATE (overloaded in
+ u32 ch_characteristic; /* CHANNEL_CHARACTERISTIC_<xxx> */
+ u32 cmd_state_srv; /* CHANNEL_COMMANDSTATE (overloaded in
* Windows drivers, see ServerStateUp,
* ServerStateDown, etc) */
- u32 SrvState; /* CHANNEL_SERVERSTATE */
- u8 CliErrorBoot; /* bits to indicate err states for
+ u32 srv_state; /* CHANNEL_SERVERSTATE */
+ u8 cli_error_boot; /* bits to indicate err states for
* boot clients, so err messages can
* be throttled */
- u8 CliErrorOS; /* bits to indicate err states for OS
+ u8 cli_error_os; /* bits to indicate err states for OS
* clients, so err messages can be
* throttled */
- u8 Filler[1]; /* Pad out to 128 byte cacheline */
+ u8 filler[1]; /* Pad out to 128 byte cacheline */
/* Please add all new single-byte values below here */
- u8 RecoverChannel;
-} CHANNEL_HEADER, *pCHANNEL_HEADER, ULTRA_CHANNEL_PROTOCOL;
+ u8 recover_channel;
+};
#define ULTRA_CHANNEL_ENABLE_INTS (0x1ULL << 0)
/* Subheader for the Signal Type variation of the Common Channel */
-typedef struct _SIGNAL_QUEUE_HEADER {
+struct signal_queue_header {
/* 1st cache line */
- u32 VersionId; /* SIGNAL_QUEUE_HEADER Version ID */
- u32 Type; /* Queue type: storage, network */
- u64 Size; /* Total size of this queue in bytes */
- u64 oSignalBase; /* Offset to signal queue area */
- u64 FeatureFlags; /* Flags to modify behavior */
- u64 NumSignalsSent; /* Total # of signals placed in this queue */
- u64 NumOverflows; /* Total # of inserts failed due to
+ u32 version; /* SIGNAL_QUEUE_HEADER Version ID */
+ u32 chtype; /* Queue type: storage, network */
+ u64 size; /* Total size of this queue in bytes */
+ u64 sig_base_offset; /* Offset to signal queue area */
+ u64 features; /* Flags to modify behavior */
+ u64 num_sent; /* Total # of signals placed in this queue */
+ u64 num_overflows; /* Total # of inserts failed due to
* full queue */
- u32 SignalSize; /* Total size of a signal for this queue */
- u32 MaxSignalSlots; /* Max # of slots in queue, 1 slot is
+ u32 signal_size; /* Total size of a signal for this queue */
+ u32 max_slots; /* Max # of slots in queue, 1 slot is
* always empty */
- u32 MaxSignals; /* Max # of signals in queue
+ u32 max_signals; /* Max # of signals in queue
* (MaxSignalSlots-1) */
- u32 Head; /* Queue head signal # */
+ u32 head; /* Queue head signal # */
/* 2nd cache line */
- u64 NumSignalsReceived; /* Total # of signals removed from this queue */
- u32 Tail; /* Queue tail signal # (on separate
+ u64 num_received; /* Total # of signals removed from this queue */
+ u32 tail; /* Queue tail signal # (on separate
* cache line) */
- u32 Reserved1; /* Reserved field */
- u64 Reserved2; /* Resrved field */
- u64 ClientQueue;
- u64 NumInterruptsReceived; /* Total # of Interrupts received. This
+ u32 reserved1; /* Reserved field */
+ u64 reserved2; /* Reserved field */
+ u64 client_queue;
+ u64 num_irq_received; /* Total # of Interrupts received. This
* is incremented by the ISR in the
* guest windows driver */
- u64 NumEmptyCnt; /* Number of times that visor_signal_remove
+ u64 num_empty; /* Number of times that visor_signal_remove
* is called and returned Empty
* Status. */
- u32 ErrorFlags; /* Error bits set during SignalReinit
+ u32 errorflags; /* Error bits set during SignalReinit
* to denote trouble with client's
* fields */
- u8 Filler[12]; /* Pad out to 64 byte cacheline */
-} SIGNAL_QUEUE_HEADER, *pSIGNAL_QUEUE_HEADER;
+ u8 filler[12]; /* Pad out to 64 byte cacheline */
+};
#pragma pack(pop)
-#define SignalInit(chan, QHDRFLD, QDATAFLD, QDATATYPE, ver, typ) \
+#define spar_signal_init(chan, QHDRFLD, QDATAFLD, QDATATYPE, ver, typ) \
do { \
memset(&chan->QHDRFLD, 0, sizeof(chan->QHDRFLD)); \
- chan->QHDRFLD.VersionId = ver; \
- chan->QHDRFLD.Type = typ; \
- chan->QHDRFLD.Size = sizeof(chan->QDATAFLD); \
- chan->QHDRFLD.SignalSize = sizeof(QDATATYPE); \
- chan->QHDRFLD.oSignalBase = (u64)(chan->QDATAFLD)- \
+ chan->QHDRFLD.version = ver; \
+ chan->QHDRFLD.chtype = typ; \
+ chan->QHDRFLD.size = sizeof(chan->QDATAFLD); \
+ chan->QHDRFLD.signal_size = sizeof(QDATATYPE); \
+ chan->QHDRFLD.sig_base_offset = (u64)(chan->QDATAFLD)- \
(u64)(&chan->QHDRFLD); \
- chan->QHDRFLD.MaxSignalSlots = \
+ chan->QHDRFLD.max_slots = \
sizeof(chan->QDATAFLD)/sizeof(QDATATYPE); \
- chan->QHDRFLD.MaxSignals = chan->QHDRFLD.MaxSignalSlots-1; \
+ chan->QHDRFLD.max_signals = chan->QHDRFLD.max_slots-1; \
} while (0)
/* Generic function useful for validating any type of channel when it is
@@ -339,64 +289,62 @@ typedef struct _SIGNAL_QUEUE_HEADER {
* is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
*/
static inline int
-ULTRA_check_channel_client(void __iomem *pChannel,
- uuid_le expectedTypeGuid,
- char *channelName,
- u64 expectedMinBytes,
- u32 expectedVersionId,
- u64 expectedSignature,
- char *fileName, int lineNumber, void *logCtx)
+spar_check_channel_client(void __iomem *ch,
+ uuid_le expected_uuid,
+ char *chname,
+ u64 expected_min_bytes,
+ u32 expected_version,
+ u64 expected_signature)
{
- if (uuid_le_cmp(expectedTypeGuid, NULL_UUID_LE) != 0) {
+ if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) {
uuid_le guid;
memcpy_fromio(&guid,
- &((CHANNEL_HEADER __iomem *)(pChannel))->Type,
+ &((struct channel_header __iomem *)(ch))->chtype,
sizeof(guid));
/* caller wants us to verify type GUID */
- if (uuid_le_cmp(guid, expectedTypeGuid) != 0) {
- CHANNEL_GUID_MISMATCH(expectedTypeGuid, channelName,
- "type", expectedTypeGuid,
- guid, fileName,
- lineNumber, logCtx);
+ if (uuid_le_cmp(guid, expected_uuid) != 0) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
+ chname, &expected_uuid,
+ &expected_uuid, &guid);
return 0;
}
}
- if (expectedMinBytes > 0) /* caller wants us to verify
+ if (expected_min_bytes > 0) { /* caller wants us to verify
* channel size */
- if (readq(&((CHANNEL_HEADER __iomem *)
- (pChannel))->Size) < expectedMinBytes) {
- CHANNEL_U64_MISMATCH(expectedTypeGuid, channelName,
- "size", expectedMinBytes,
- readq(&((CHANNEL_HEADER __iomem *)
- (pChannel))->Size),
- fileName,
- lineNumber, logCtx);
+ unsigned long long bytes =
+ readq(&((struct channel_header __iomem *)
+ (ch))->size);
+ if (bytes < expected_min_bytes) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
+ chname, &expected_uuid,
+ (unsigned long long)expected_min_bytes, bytes);
return 0;
}
- if (expectedVersionId > 0) /* caller wants us to verify
+ }
+ if (expected_version > 0) { /* caller wants us to verify
* channel version */
- if (readl(&((CHANNEL_HEADER __iomem *) (pChannel))->VersionId)
- != expectedVersionId) {
- CHANNEL_U32_MISMATCH(expectedTypeGuid, channelName,
- "version", expectedVersionId,
- readl(&((CHANNEL_HEADER __iomem *)
- (pChannel))->VersionId),
- fileName, lineNumber, logCtx);
+ unsigned long ver = readl(&((struct channel_header __iomem *)
+ (ch))->version_id);
+ if (ver != expected_version) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8lx\n",
+ chname, &expected_uuid,
+ (unsigned long)expected_version, ver);
return 0;
}
- if (expectedSignature > 0) /* caller wants us to verify
+ }
+ if (expected_signature > 0) { /* caller wants us to verify
* channel signature */
- if (readq(&((CHANNEL_HEADER __iomem *) (pChannel))->Signature)
- != expectedSignature) {
- CHANNEL_U64_MISMATCH(expectedTypeGuid, channelName,
- "signature", expectedSignature,
- readq(&((CHANNEL_HEADER __iomem *)
- (pChannel))->Signature),
- fileName,
- lineNumber, logCtx);
+ unsigned long long sig =
+ readq(&((struct channel_header __iomem *)
+ (ch))->signature);
+ if (sig != expected_signature) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8llx actual=0x%-8.8llx\n",
+ chname, &expected_uuid,
+ expected_signature, sig);
return 0;
}
+ }
return 1;
}
@@ -405,19 +353,16 @@ ULTRA_check_channel_client(void __iomem *pChannel,
* Note that <logCtx> is only needed for callers in the EFI environment, and
* is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
*/
-static inline int
-ULTRA_check_channel_server(uuid_le typeGuid,
- char *channelName,
- u64 expectedMinBytes,
- u64 actualBytes,
- char *fileName, int lineNumber, void *logCtx)
+static inline int spar_check_channel_server(uuid_le typeuuid, char *name,
+ u64 expected_min_bytes,
+ u64 actual_bytes)
{
- if (expectedMinBytes > 0) /* caller wants us to verify
+ if (expected_min_bytes > 0) /* caller wants us to verify
* channel size */
- if (actualBytes < expectedMinBytes) {
- CHANNEL_U64_MISMATCH(typeGuid, channelName, "size",
- expectedMinBytes, actualBytes,
- fileName, lineNumber, logCtx);
+ if (actual_bytes < expected_min_bytes) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8llx actual=0x%-8.8llx\n",
+ name, &typeuuid, expected_min_bytes,
+ actual_bytes);
return 0;
}
return 1;
@@ -430,7 +375,7 @@ ULTRA_check_channel_server(uuid_le typeGuid,
* in it, the return pointer will be to the beginning of the string.
*/
static inline u8 *
-PathName_Last_N_Nodes(u8 *s, unsigned int n)
+pathname_last_n_nodes(u8 *s, unsigned int n)
{
u8 *p = s;
unsigned int node_count = 0;
@@ -455,59 +400,43 @@ PathName_Last_N_Nodes(u8 *s, unsigned int n)
}
static inline int
-ULTRA_channel_client_acquire_os(void __iomem *pChannel, u8 *chanId,
- void *logCtx, char *file, int line, char *func)
+spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
{
- CHANNEL_HEADER __iomem *pChan = pChannel;
+ struct channel_header __iomem *hdr = ch;
- if (readl(&pChan->CliStateOS) == CHANNELCLI_DISABLED) {
- if ((readb(&pChan->CliErrorOS)
+ if (readl(&hdr->cli_state_os) == CHANNELCLI_DISABLED) {
+ if ((readb(&hdr->cli_error_os)
& ULTRA_CLIERROROS_THROTTLEMSG_DISABLED) == 0) {
/* we are NOT throttling this message */
- writeb(readb(&pChan->CliErrorOS) |
+ writeb(readb(&hdr->cli_error_os) |
ULTRA_CLIERROROS_THROTTLEMSG_DISABLED,
- &pChan->CliErrorOS);
+ &hdr->cli_error_os);
/* throttle until acquire successful */
- UltraLogEvent(logCtx,
- CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition INVALID! - acquire failed because OS client DISABLED @%s:%d\n",
- chanId, PathName_Last_N_Nodes(
- (u8 *) file, 4), line);
+ pr_info("%s Channel StateTransition INVALID! - acquire failed because OS client DISABLED\n",
+ id);
}
return 0;
}
- if ((readl(&pChan->CliStateOS) != CHANNELCLI_OWNED)
- && (readl(&pChan->CliStateBoot) == CHANNELCLI_DISABLED)) {
+ if ((readl(&hdr->cli_state_os) != CHANNELCLI_OWNED) &&
+ (readl(&hdr->cli_state_boot) == CHANNELCLI_DISABLED)) {
/* Our competitor is DISABLED, so we can transition to OWNED */
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition (%s) %s(%d)-->%s(%d) @%s:%d\n",
- chanId, "CliStateOS",
- ULTRA_CHANNELCLI_STRING(
- readl(&pChan->CliStateOS)),
- readl(&pChan->CliStateOS),
- ULTRA_CHANNELCLI_STRING(CHANNELCLI_OWNED),
- CHANNELCLI_OWNED,
- PathName_Last_N_Nodes((u8 *) file, 4), line);
- writel(CHANNELCLI_OWNED, &pChan->CliStateOS);
+ pr_info("%s Channel StateTransition (%s) %s(%d)-->%s(%d)\n",
+ id, "cli_state_os",
+ ULTRA_CHANNELCLI_STRING(readl(&hdr->cli_state_os)),
+ readl(&hdr->cli_state_os),
+ ULTRA_CHANNELCLI_STRING(CHANNELCLI_OWNED),
+ CHANNELCLI_OWNED);
+ writel(CHANNELCLI_OWNED, &hdr->cli_state_os);
mb(); /* required for channel synch */
}
- if (readl(&pChan->CliStateOS) == CHANNELCLI_OWNED) {
- if (readb(&pChan->CliErrorOS) != 0) {
+ if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) {
+ if (readb(&hdr->cli_error_os) != 0) {
/* we are in an error msg throttling state;
* come out of it */
- UltraLogEvent(logCtx,
- CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel OS client acquire now successful @%s:%d\n",
- chanId, PathName_Last_N_Nodes((u8 *) file,
- 4), line);
- writeb(0, &pChan->CliErrorOS);
+ pr_info("%s Channel OS client acquire now successful\n",
+ id);
+ writeb(0, &hdr->cli_error_os);
}
return 1;
}
@@ -515,95 +444,67 @@ ULTRA_channel_client_acquire_os(void __iomem *pChannel, u8 *chanId,
/* We have to do it the "hard way". We transition to BUSY,
* and can use the channel iff our competitor has not also
* transitioned to BUSY. */
- if (readl(&pChan->CliStateOS) != CHANNELCLI_ATTACHED) {
- if ((readb(&pChan->CliErrorOS)
+ if (readl(&hdr->cli_state_os) != CHANNELCLI_ATTACHED) {
+ if ((readb(&hdr->cli_error_os)
& ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED) == 0) {
/* we are NOT throttling this message */
- writeb(readb(&pChan->CliErrorOS) |
+ writeb(readb(&hdr->cli_error_os) |
ULTRA_CLIERROROS_THROTTLEMSG_NOTATTACHED,
- &pChan->CliErrorOS);
+ &hdr->cli_error_os);
/* throttle until acquire successful */
- UltraLogEvent(logCtx,
- CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition INVALID! - acquire failed because OS client NOT ATTACHED (state=%s(%d)) @%s:%d\n",
- chanId,
- ULTRA_CHANNELCLI_STRING(
- readl(&pChan->CliStateOS)),
- readl(&pChan->CliStateOS),
- PathName_Last_N_Nodes((u8 *) file, 4),
- line);
+ pr_info("%s Channel StateTransition INVALID! - acquire failed because OS client NOT ATTACHED (state=%s(%d))\n",
+ id, ULTRA_CHANNELCLI_STRING(
+ readl(&hdr->cli_state_os)),
+ readl(&hdr->cli_state_os));
}
return 0;
}
- writel(CHANNELCLI_BUSY, &pChan->CliStateOS);
+ writel(CHANNELCLI_BUSY, &hdr->cli_state_os);
mb(); /* required for channel synch */
- if (readl(&pChan->CliStateBoot) == CHANNELCLI_BUSY) {
- if ((readb(&pChan->CliErrorOS)
+ if (readl(&hdr->cli_state_boot) == CHANNELCLI_BUSY) {
+ if ((readb(&hdr->cli_error_os)
& ULTRA_CLIERROROS_THROTTLEMSG_BUSY) == 0) {
/* we are NOT throttling this message */
- writeb(readb(&pChan->CliErrorOS) |
+ writeb(readb(&hdr->cli_error_os) |
ULTRA_CLIERROROS_THROTTLEMSG_BUSY,
- &pChan->CliErrorOS);
+ &hdr->cli_error_os);
/* throttle until acquire successful */
- UltraLogEvent(logCtx,
- CHANNELSTATE_DIAG_EVENTID_TRANSITBUSY,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition failed - host OS acquire failed because boot BUSY @%s:%d\n",
- chanId, PathName_Last_N_Nodes((u8 *) file,
- 4), line);
+ pr_info("%s Channel StateTransition failed - host OS acquire failed because boot BUSY\n",
+ id);
}
/* reset busy */
- writel(CHANNELCLI_ATTACHED, &pChan->CliStateOS);
+ writel(CHANNELCLI_ATTACHED, &hdr->cli_state_os);
mb(); /* required for channel synch */
return 0;
}
- if (readb(&pChan->CliErrorOS) != 0) {
+ if (readb(&hdr->cli_error_os) != 0) {
/* we are in an error msg throttling state; come out of it */
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel OS client acquire now successful @%s:%d\n",
- chanId, PathName_Last_N_Nodes((u8 *) file, 4),
- line);
- writeb(0, &pChan->CliErrorOS);
+ pr_info("%s Channel OS client acquire now successful\n", id);
+ writeb(0, &hdr->cli_error_os);
}
return 1;
}
static inline void
-ULTRA_channel_client_release_os(void __iomem *pChannel, u8 *chanId,
- void *logCtx, char *file, int line, char *func)
+spar_channel_client_release_os(void __iomem *ch, u8 *id)
{
- CHANNEL_HEADER __iomem *pChan = pChannel;
+ struct channel_header __iomem *hdr = ch;
- if (readb(&pChan->CliErrorOS) != 0) {
+ if (readb(&hdr->cli_error_os) != 0) {
/* we are in an error msg throttling state; come out of it */
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITOK,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel OS client error state cleared @%s:%d\n",
- chanId, PathName_Last_N_Nodes((u8 *) file, 4),
- line);
- writeb(0, &pChan->CliErrorOS);
+ pr_info("%s Channel OS client error state cleared\n", id);
+ writeb(0, &hdr->cli_error_os);
}
- if (readl(&pChan->CliStateOS) == CHANNELCLI_OWNED)
+ if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED)
return;
- if (readl(&pChan->CliStateOS) != CHANNELCLI_BUSY) {
- UltraLogEvent(logCtx, CHANNELSTATE_DIAG_EVENTID_TRANSITERR,
- CHANNELSTATE_DIAG_SEVERITY,
- CHANNELSTATE_DIAG_SUBSYS, func, line,
- "%s Channel StateTransition INVALID! - release failed because OS client NOT BUSY (state=%s(%d)) @%s:%d\n",
- chanId,
- ULTRA_CHANNELCLI_STRING(
- readl(&pChan->CliStateOS)),
- readl(&pChan->CliStateOS),
- PathName_Last_N_Nodes((u8 *) file, 4), line);
+ if (readl(&hdr->cli_state_os) != CHANNELCLI_BUSY) {
+ pr_info("%s Channel StateTransition INVALID! - release failed because OS client NOT BUSY (state=%s(%d))\n",
+ id, ULTRA_CHANNELCLI_STRING(
+ readl(&hdr->cli_state_os)),
+ readl(&hdr->cli_state_os));
/* return; */
}
- writel(CHANNELCLI_ATTACHED, &pChan->CliStateOS); /* release busy */
+ writel(CHANNELCLI_ATTACHED, &hdr->cli_state_os); /* release busy */
}
/*
@@ -625,8 +526,8 @@ ULTRA_channel_client_release_os(void __iomem *pChannel, u8 *chanId,
* full.
*/
-unsigned char visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
- void *pSignal);
+unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue,
+ void *sig);
/*
* Routine Description:
@@ -647,8 +548,8 @@ unsigned char visor_signal_insert(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
* empty.
*/
-unsigned char visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
- void *pSignal);
+unsigned char spar_signal_remove(struct channel_header __iomem *ch, u32 queue,
+ void *sig);
/*
* Routine Description:
@@ -669,8 +570,8 @@ unsigned char visor_signal_remove(CHANNEL_HEADER __iomem *pChannel, u32 Queue,
* Return value:
* # of signals copied.
*/
-unsigned int SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue,
- void *pSignal);
+unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
+ void *sig);
/*
* Routine Description:
@@ -683,7 +584,7 @@ unsigned int SignalRemoveAll(pCHANNEL_HEADER pChannel, u32 Queue,
* Return value:
* 1 if the signal queue is empty, 0 otherwise.
*/
-unsigned char visor_signalqueue_empty(CHANNEL_HEADER __iomem *pChannel,
- u32 Queue);
+unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
+ u32 queue);
#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/channel_guid.h b/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
index 63c67ca4c9ec..706363fc3e9a 100644
--- a/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
+++ b/drivers/staging/unisys/common-spar/include/channels/channel_guid.h
@@ -20,45 +20,42 @@
/* Used in IOChannel
* {414815ed-c58c-11da-95a9-00e08161165f}
*/
-#define ULTRA_VHBA_CHANNEL_PROTOCOL_GUID \
+#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x414815ed, 0xc58c, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le UltraVhbaChannelProtocolGuid =
- ULTRA_VHBA_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_vhba_channel_protocol_uuid =
+ SPAR_VHBA_CHANNEL_PROTOCOL_UUID;
/* Used in IOChannel
* {8cd5994d-c58e-11da-95a9-00e08161165f}
*/
-#define ULTRA_VNIC_CHANNEL_PROTOCOL_GUID \
+#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le UltraVnicChannelProtocolGuid =
- ULTRA_VNIC_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_vnic_channel_protocol_uuid =
+ SPAR_VNIC_CHANNEL_PROTOCOL_UUID;
/* Used in IOChannel
* {72120008-4AAB-11DC-8530-444553544200}
*/
-#define ULTRA_SIOVM_GUID \
+#define SPAR_SIOVM_UUID \
UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
-static const uuid_le UltraSIOVMGuid = ULTRA_SIOVM_GUID;
-
+static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
/* Used in visornoop/visornoop_main.c
* {5b52c5ac-e5f5-4d42-8dff-429eaecd221f}
*/
-#define ULTRA_CONTROLDIRECTOR_CHANNEL_PROTOCOL_GUID \
+#define SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x5b52c5ac, 0xe5f5, 0x4d42, \
0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f)
-static const uuid_le UltraControlDirectorChannelProtocolGuid =
- ULTRA_CONTROLDIRECTOR_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_controldirector_channel_protocol_uuid =
+ SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID;
/* Used in visorchipset/visorchipset_main.c
* {B4E79625-AEDE-4EAA-9E11-D3EDDCD4504C}
*/
-#define ULTRA_DIAG_POOL_CHANNEL_PROTOCOL_GUID \
+#define SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID \
UUID_LE(0xb4e79625, 0xaede, 0x4eaa, \
0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c)
-
-
diff --git a/drivers/staging/unisys/common-spar/include/channels/controlframework.h b/drivers/staging/unisys/common-spar/include/channels/controlframework.h
index fd4726e754ea..33d9caf337c8 100644
--- a/drivers/staging/unisys/common-spar/include/channels/controlframework.h
+++ b/drivers/staging/unisys/common-spar/include/channels/controlframework.h
@@ -28,50 +28,35 @@
#include <linux/types.h>
#include "channel.h"
-#define ULTRA_MEMORY_COUNT_Ki 1024
-
-/* Scale order 0 is one 32-bit (4-byte) word (in 64 or 128-bit
- * architecture potentially 64 or 128-bit word) */
-#define ULTRA_MEMORY_PAGE_WORD 4
-
-/* Define Ki scale page to be traditional 4KB page */
-#define ULTRA_MEMORY_PAGE_Ki (ULTRA_MEMORY_PAGE_WORD * ULTRA_MEMORY_COUNT_Ki)
-typedef struct _ULTRA_SEGMENT_STATE {
- u16 Enabled:1; /* Bit 0: May enter other states */
- u16 Active:1; /* Bit 1: Assigned to active partition */
- u16 Alive:1; /* Bit 2: Configure message sent to
+struct spar_segment_state {
+ u16 enabled:1; /* Bit 0: May enter other states */
+ u16 active:1; /* Bit 1: Assigned to active partition */
+ u16 alive:1; /* Bit 2: Configure message sent to
* service/server */
- u16 Revoked:1; /* Bit 3: similar to partition state
+ u16 revoked:1; /* Bit 3: similar to partition state
* ShuttingDown */
- u16 Allocated:1; /* Bit 4: memory (device/port number)
+ u16 allocated:1; /* Bit 4: memory (device/port number)
* has been selected by Command */
- u16 Known:1; /* Bit 5: has been introduced to the
+ u16 known:1; /* Bit 5: has been introduced to the
* service/guest partition */
- u16 Ready:1; /* Bit 6: service/Guest partition has
+ u16 ready:1; /* Bit 6: service/Guest partition has
* responded to introduction */
- u16 Operating:1; /* Bit 7: resource is configured and
+ u16 operating:1; /* Bit 7: resource is configured and
* operating */
/* Note: don't use high bit unless we need to switch to ushort
* which is non-compliant */
-} ULTRA_SEGMENT_STATE;
-static const ULTRA_SEGMENT_STATE SegmentStateRunning = {
+};
+
+static const struct spar_segment_state segment_state_running = {
1, 1, 1, 0, 1, 1, 1, 1
};
-static const ULTRA_SEGMENT_STATE SegmentStatePaused = {
+
+static const struct spar_segment_state segment_state_paused = {
1, 1, 1, 0, 1, 1, 1, 0
};
-static const ULTRA_SEGMENT_STATE SegmentStateStandby = {
+
+static const struct spar_segment_state segment_state_standby = {
1, 1, 0, 0, 1, 1, 1, 0
};
-typedef union {
- u64 Full;
- struct {
- u8 Major; /* will be 1 for the first release and
- * increment thereafter */
- u8 Minor;
- u16 Maintenance;
- u32 Revision; /* Subversion revision */
- } Part;
-} ULTRA_COMPONENT_VERSION;
#endif /* _CONTROL_FRAMEWORK_H_ not defined */
diff --git a/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h b/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
index d08c198e0de3..a66db7968d6c 100644
--- a/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/controlvmchannel.h
@@ -27,12 +27,12 @@ enum { INVALID_GUEST_FIRMWARE, SAMPLE_GUEST_FIRMWARE,
};
/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
-#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_GUID \
+#define SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \
0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
-static const uuid_le UltraControlvmChannelProtocolGuid =
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_controlvm_channel_protocol_uuid =
+ SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE \
ULTRA_CHANNEL_PROTOCOL_SIGNATURE
@@ -45,19 +45,13 @@ static const uuid_le UltraControlvmChannelProtocolGuid =
* channel struct withOUT needing to increment this. */
#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1
-#define ULTRA_CONTROLVM_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, \
- UltraControlvmChannelProtocolGuid, \
+#define SPAR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \
+ spar_check_channel_client(ch, \
+ spar_controlvm_channel_protocol_uuid, \
"controlvm", \
- sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL), \
+ sizeof(struct spar_controlvm_channel_protocol), \
ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_CONTROLVM_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraControlvmChannelProtocolGuid, \
- "controlvm", \
- sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL), \
- actualBytes, __FILE__, __LINE__, logCtx))
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE)
#define MY_DEVICE_INDEX 0
#define MAX_MACDATA_LEN 8 /* number of bytes for MAC address in config packet */
@@ -88,7 +82,7 @@ static const uuid_le UltraControlvmChannelProtocolGuid =
* - issued on the EventQueue queue (q #2) in the ControlVm channel
* - responded to on the EventAckQueue queue (q #3) in the ControlVm channel
*/
-typedef enum {
+enum controlvm_id {
CONTROLVM_INVALID = 0,
/* SWITCH commands required Parameter: SwitchNumber */
/* BUS commands required Parameter: BusNumber */
@@ -117,9 +111,9 @@ typedef enum {
CONTROLVM_CHIPSET_READY = 0x304, /* CP --> SP */
CONTROLVM_CHIPSET_SELFTEST = 0x305, /* CP --> SP */
-} CONTROLVM_ID;
+};
-struct InterruptInfo {
+struct irq_info {
/**< specifies interrupt info. It is used to send interrupts
* for this channel. The peer at the end of this channel
* who has registered an interrupt (using recv fields
@@ -128,495 +122,390 @@ struct InterruptInfo {
* interrupt. Currently this is used by IOPart-SP to wake
* up GP when Data Channel transitions from empty to
* non-empty.*/
- u64 sendInterruptHandle;
+ u64 send_irq_handle;
/**< specifies interrupt handle. It is used to retrieve the
* corresponding interrupt pin from Monitor; and the
* interrupt pin is used to connect to the corresponding
- * intrrupt. Used by IOPart-GP only. */
- u64 recvInterruptHandle;
+ * interrupt. Used by IOPart-GP only. */
+ u64 recv_irq_handle;
/**< specifies interrupt vector. It, interrupt pin, and shared are
* used to connect to the corresponding interrupt. Used by
* IOPart-GP only. */
- u32 recvInterruptVector;
+ u32 recv_irq_vector;
/**< specifies if the recvInterrupt is shared. It, interrupt pin
* and vector are used to connect to 0 = not shared; 1 = shared.
* the corresponding interrupt. Used by IOPart-GP only. */
- u8 recvInterruptShared;
+ u8 recv_irq_shared;
u8 reserved[3]; /* Natural alignment purposes */
};
-struct PciId {
- u16 Domain;
- u8 Bus;
- u8 Slot;
- u8 Func;
- u8 Reserved[3]; /* Natural alignment purposes */
-};
-
-struct PciConfigHdr {
- u16 VendorId;
- u16 SubSysVendor;
- u16 DeviceId;
- u16 SubSysDevice;
- u32 ClassCode;
- u32 Reserved; /* Natural alignment purposes */
-};
-
-struct ScsiId {
- u32 Bus;
- u32 Target;
- u32 Lun;
- u32 Host; /* Command should ignore this for *
- * DiskArrival/RemovalEvents */
-};
-
-struct WWID {
- u32 wwid1;
- u32 wwid2;
-};
-
-struct virtDiskInfo {
- u32 switchNo; /* defined by SWITCH_CREATE */
- u32 externalPortNo; /* 0 for SAS RAID provided (external)
- * virtual disks, 1 for virtual disk
- * images, 2 for gold disk images */
- u16 VirtualDiskIndex; /* Index of disk descriptor in the
- * VirtualDisk segment associated with
- * externalPortNo */
- u16 Reserved1;
- u32 Reserved2;
+struct pci_id {
+ u16 domain;
+ u8 bus;
+ u8 slot;
+ u8 func;
+ u8 reserved[3]; /* Natural alignment purposes */
};
-typedef enum {
- CONTROLVM_ACTION_NONE = 0,
- CONTROLVM_ACTION_SET_RESTORE = 0x05E7,
- CONTROLVM_ACTION_CLEAR_RESTORE = 0x0C18,
- CONTROLVM_ACTION_RESTORING = 0x08E5,
- CONTROLVM_ACTION_RESTORE_BUSY = 0x0999,
- CONTROLVM_ACTION_CLEAR_NVRAM = 0xB01
-} CONTROLVM_ACTION;
-
-typedef enum _ULTRA_TOOL_ACTIONS {
- /* enumeration that defines intended action */
- ULTRA_TOOL_ACTION_NONE = 0, /* normal boot of boot disk */
- ULTRA_TOOL_ACTION_INSTALL = 1, /* install source disk(s) to boot
- * disk */
- ULTRA_TOOL_ACTION_CAPTURE = 2, /* capture boot disk to target disk(s)
- * as 'gold image' */
- ULTRA_TOOL_ACTION_REPAIR = 3, /* use source disk(s) to repair
- * installation on boot disk */
- ULTRA_TOOL_ACTION_CLEAN = 4, /* 'scrub' virtual disk before
- * releasing back to storage pool */
- ULTRA_TOOL_ACTION_UPGRADE = 5, /* upgrade to use content of images
- * referenced from newer blueprint */
- ULTRA_TOOL_ACTION_DIAG = 6, /* use tool to invoke diagnostic script
- * provided by blueprint */
- ULTRA_TOOL_ACTION_FAILED = 7, /* used when tool fails installation
- and cannot continue */
- ULTRA_TOOL_ACTION_COUNT = 8
-} ULTRA_TOOL_ACTIONS;
-
-typedef struct _ULTRA_EFI_SPAR_INDICATION {
- u64 BootToFirmwareUI:1; /* Bit 0: Stop in uefi ui */
- u64 ClearNvram:1; /* Bit 1: Clear NVRAM */
- u64 ClearCmos:1; /* Bit 2: Clear CMOS */
- u64 BootToTool:1; /* Bit 3: Run install tool */
+struct efi_spar_indication {
+ u64 boot_to_fw_ui:1; /* Bit 0: Stop in uefi ui */
+ u64 clear_nvram:1; /* Bit 1: Clear NVRAM */
+ u64 clear_cmos:1; /* Bit 2: Clear CMOS */
+ u64 boot_to_tool:1; /* Bit 3: Run install tool */
/* remaining bits are available */
-} ULTRA_EFI_SPAR_INDICATION;
+};
-typedef enum {
+enum ultra_chipset_feature {
ULTRA_CHIPSET_FEATURE_REPLY = 0x00000001,
ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
ULTRA_CHIPSET_FEATURE_PCIVBUS = 0x00000004
-} ULTRA_CHIPSET_FEATURE;
+};
/** This is the common structure that is at the beginning of every
* ControlVm message (both commands and responses) in any ControlVm
* queue. Commands are easily distinguished from responses by
* looking at the flags.response field.
*/
-typedef struct _CONTROLVM_MESSAGE_HEADER {
- u32 Id; /* See CONTROLVM_ID. */
+struct controlvm_message_header {
+ u32 id; /* See CONTROLVM_ID. */
/* For requests, indicates the message type. */
/* For responses, indicates the type of message we are responding to. */
- u32 MessageSize; /* Includes size of this struct + size
+ u32 message_size; /* Includes size of this struct + size
* of message */
- u32 SegmentIndex; /* Index of segment containing Vm
+ u32 segment_index; /* Index of segment containing Vm
* message/information */
- u32 CompletionStatus; /* Error status code or result of
+ u32 completion_status; /* Error status code or result of
* message completion */
struct {
u32 failed:1; /**< =1 in a response to * signify
* failure */
- u32 responseExpected:1; /**< =1 in all messages that expect a
+ u32 response_expected:1; /**< =1 in all messages that expect a
* response (Control ignores this
* bit) */
u32 server:1; /**< =1 in all bus & device-related
* messages where the message
* receiver is to act as the bus or
* device server */
- u32 testMessage:1; /**< =1 for testing use only
+ u32 test_message:1; /**< =1 for testing use only
* (Control and Command ignore this
* bit) */
- u32 partialCompletion:1; /**< =1 if there are forthcoming
+ u32 partial_completion:1; /**< =1 if there are forthcoming
* responses/acks associated
* with this message */
u32 preserve:1; /**< =1 this is to let us know to
* preserve channel contents
* (for running guests)*/
- u32 writerInDiag:1; /**< =1 the DiagWriter is active in the
+ u32 writer_in_diag:1; /**< =1 the DiagWriter is active in the
* Diagnostic Partition*/
-
- /* remaining bits in this 32-bit word are available */
- } Flags;
- u32 Reserved; /* Natural alignment */
- u64 MessageHandle; /* Identifies the particular message instance,
+ } flags;
+ u32 reserved; /* Natural alignment */
+ u64 message_handle; /* Identifies the particular message instance,
* and is used to match particular */
/* request instances with the corresponding response instance. */
- u64 PayloadVmOffset; /* Offset of payload area from start of this
+ u64 payload_vm_offset; /* Offset of payload area from start of this
* instance of ControlVm segment */
- u32 PayloadMaxBytes; /* Maximum bytes allocated in payload
+ u32 payload_max_bytes; /* Maximum bytes allocated in payload
* area of ControlVm segment */
- u32 PayloadBytes; /* Actual number of bytes of payload
+ u32 payload_bytes; /* Actual number of bytes of payload
* area to copy between IO/Command; */
/* if non-zero, there is a payload to copy. */
-} CONTROLVM_MESSAGE_HEADER;
-
-typedef struct _CONTROLVM_PACKET_DEVICE_CREATE {
- u32 busNo; /**< bus # (0..n-1) from the msg receiver's
- * perspective */
+};
- /* Control uses header SegmentIndex field to access bus number... */
- u32 devNo; /**< bus-relative (0..n-1) device number */
- u64 channelAddr; /**< Guest physical address of the channel, which
- * can be dereferenced by the receiver
- * of this ControlVm command */
- u64 channelBytes; /**< specifies size of the channel in bytes */
- uuid_le dataTypeGuid;/**< specifies format of data in channel */
- uuid_le devInstGuid; /**< instance guid for the device */
- struct InterruptInfo intr; /**< specifies interrupt information */
-} CONTROLVM_PACKET_DEVICE_CREATE; /* for CONTROLVM_DEVICE_CREATE */
-
-typedef struct _CONTROLVM_PACKET_DEVICE_CONFIGURE {
- u32 busNo; /**< bus # (0..n-1) from the msg
+struct controlvm_packet_device_create {
+ u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */
+ u32 dev_no; /* bus-relative (0..n-1) device number */
+ u64 channel_addr; /* Guest physical address of the channel, which
+ * can be dereferenced by the receiver of this
+ * ControlVm command */
+ u64 channel_bytes; /* specifies size of the channel in bytes */
+ uuid_le data_type_uuid; /* specifies format of data in channel */
+ uuid_le dev_inst_uuid; /* instance guid for the device */
+ struct irq_info intr; /* specifies interrupt information */
+}; /* for CONTROLVM_DEVICE_CREATE */
+
+struct controlvm_packet_device_configure {
+ u32 bus_no; /**< bus # (0..n-1) from the msg
* receiver's perspective */
/* Control uses header SegmentIndex field to access bus number... */
- u32 devNo; /**< bus-relative (0..n-1) device number */
-} CONTROLVM_PACKET_DEVICE_CONFIGURE; /* for CONTROLVM_DEVICE_CONFIGURE */
+ u32 dev_no; /**< bus-relative (0..n-1) device number */
+} ; /* for CONTROLVM_DEVICE_CONFIGURE */
-typedef struct _CONTROLVM_MESSAGE_DEVICE_CREATE {
- CONTROLVM_MESSAGE_HEADER Header;
- CONTROLVM_PACKET_DEVICE_CREATE Packet;
-} CONTROLVM_MESSAGE_DEVICE_CREATE; /* total 128 bytes */
+struct controlvm_message_device_create {
+ struct controlvm_message_header header;
+ struct controlvm_packet_device_create packet;
+}; /* total 128 bytes */
-typedef struct _CONTROLVM_MESSAGE_DEVICE_CONFIGURE {
- CONTROLVM_MESSAGE_HEADER Header;
- CONTROLVM_PACKET_DEVICE_CONFIGURE Packet;
-} CONTROLVM_MESSAGE_DEVICE_CONFIGURE; /* total 56 bytes */
+struct controlvm_message_device_configure {
+ struct controlvm_message_header header;
+ struct controlvm_packet_device_configure packet;
+}; /* total 56 bytes */
/* This is the format for a message in any ControlVm queue. */
-typedef struct _CONTROLVM_MESSAGE_PACKET {
+struct controlvm_message_packet {
union {
-
- /* BEGIN Request messages */
struct {
- u32 busNo; /*< bus # (0..n-1) from the msg
- * receiver's perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
- u32 deviceCount; /*< indicates the max number of
- * devices on this bus */
- u64 channelAddr; /*< Guest physical address of the
- * channel, which can be
- * dereferenced by the receiver
- * of this ControlVm command */
- u64 channelBytes; /*< size of the channel in bytes */
- uuid_le busDataTypeGuid;/*< indicates format of data in
- bus channel */
- uuid_le busInstGuid; /*< instance guid for the bus */
- } createBus; /* for CONTROLVM_BUS_CREATE */
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_count; /* indicates the max number of
+ * devices on this bus */
+ u64 channel_addr; /* Guest physical address of
+ * the channel, which can be
+ * dereferenced by the receiver
+ * of this ControlVm command */
+ u64 channel_bytes; /* size of the channel */
+ uuid_le bus_data_type_uuid; /* indicates format of
+ * data in bus channel*/
+ uuid_le bus_inst_uuid; /* instance uuid for the bus */
+ } create_bus; /* for CONTROLVM_BUS_CREATE */
struct {
- u32 busNo; /*< bus # (0..n-1) from the msg
- * receiver's perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
u32 reserved; /* Natural alignment purposes */
- } destroyBus; /* for CONTROLVM_BUS_DESTROY */
+ } destroy_bus; /* for CONTROLVM_BUS_DESTROY */
struct {
- u32 busNo; /*< bus # (0..n-1) from the
- * msg receiver's
- * perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
- u32 reserved1; /* for alignment purposes */
- u64 guestHandle; /* This is used to convert
- * guest physical address to real
- * physical address for DMA, for ex. */
- u64 recvBusInterruptHandle;/*< specifies interrupt
- * info. It is used by SP to register
- * to receive interrupts from the CP.
- * This interrupt is used for bus
- * level notifications. The
- * corresponding
- * sendBusInterruptHandle is kept in
- * CP. */
- } configureBus; /* for CONTROLVM_BUS_CONFIGURE */
-
+ u32 bus_no; /* bus # (0..n-1) from the receiver's
+ * perspective */
+ u32 reserved1; /* for alignment purposes */
+ u64 guest_handle; /* This is used to convert
+ * guest physical address to
+ * physical address */
+ u64 recv_bus_irq_handle;
+ /* specifies interrupt info. It is used by SP
+ * to register to receive interrupts from the
+ * CP. This interrupt is used for bus level
+ * notifications. The corresponding
+ * sendBusInterruptHandle is kept in CP. */
+ } configure_bus; /* for CONTROLVM_BUS_CONFIGURE */
/* for CONTROLVM_DEVICE_CREATE */
- CONTROLVM_PACKET_DEVICE_CREATE createDevice;
+ struct controlvm_packet_device_create create_device;
struct {
- u32 busNo; /*< bus # (0..n-1) from the msg
- * receiver's perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
- u32 devNo; /*< bus-relative (0..n-1) device
- * number */
- } destroyDevice; /* for CONTROLVM_DEVICE_DESTROY */
-
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_no; /* bus-relative (0..n-1) device # */
+ } destroy_device; /* for CONTROLVM_DEVICE_DESTROY */
/* for CONTROLVM_DEVICE_CONFIGURE */
- CONTROLVM_PACKET_DEVICE_CONFIGURE configureDevice;
+ struct controlvm_packet_device_configure configure_device;
struct {
- u32 busNo; /*< bus # (0..n-1) from the msg
- * receiver's perspective */
-
- /* Control uses header SegmentIndex field to access bus number... */
- u32 devNo; /*< bus-relative (0..n-1) device
- * number */
- } reconfigureDevice; /* for CONTROLVM_DEVICE_RECONFIGURE */
+ u32 bus_no; /* bus # (0..n-1) from the msg
+ * receiver's perspective */
+ u32 dev_no; /* bus-relative (0..n-1) device # */
+ } reconfigure_device; /* for CONTROLVM_DEVICE_RECONFIGURE */
struct {
- u32 busNo;
- ULTRA_SEGMENT_STATE state;
+ u32 bus_no;
+ struct spar_segment_state state;
u8 reserved[2]; /* Natural alignment purposes */
- } busChangeState; /* for CONTROLVM_BUS_CHANGESTATE */
+ } bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */
struct {
- u32 busNo;
- u32 devNo;
- ULTRA_SEGMENT_STATE state;
+ u32 bus_no;
+ u32 dev_no;
+ struct spar_segment_state state;
struct {
- u32 physicalDevice:1; /* =1 if message is for
+ u32 phys_device:1; /* =1 if message is for
* a physical device */
- /* remaining bits in this 32-bit word are available */
} flags;
u8 reserved[2]; /* Natural alignment purposes */
- } deviceChangeState; /* for CONTROLVM_DEVICE_CHANGESTATE */
+ } device_change_state; /* for CONTROLVM_DEVICE_CHANGESTATE */
struct {
- u32 busNo;
- u32 devNo;
- ULTRA_SEGMENT_STATE state;
+ u32 bus_no;
+ u32 dev_no;
+ struct spar_segment_state state;
u8 reserved[6]; /* Natural alignment purposes */
- } deviceChangeStateEvent; /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
+ } device_change_state_event;
+ /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
struct {
- u32 busCount; /*< indicates the max number of busses */
- u32 switchCount; /*< indicates the max number of
- * switches (applicable for service
- * partition only) */
- ULTRA_CHIPSET_FEATURE features;
- u32 platformNumber; /* Platform Number */
- } initChipset; /* for CONTROLVM_CHIPSET_INIT */
+ u32 bus_count; /* indicates the max number of busses */
+ u32 switch_count; /* indicates the max number of
+ * switches if a service partition */
+ enum ultra_chipset_feature features;
+ u32 platform_number; /* Platform Number */
+ } init_chipset; /* for CONTROLVM_CHIPSET_INIT */
struct {
- u32 Options; /*< reserved */
- u32 Test; /*< bit 0 set to run embedded selftest */
- } chipsetSelftest; /* for CONTROLVM_CHIPSET_SELFTEST */
-
- /* END Request messages */
-
- /* BEGIN Response messages */
-
- /* END Response messages */
-
- /* BEGIN Event messages */
+ u32 options; /* reserved */
+ u32 test; /* bit 0 set to run embedded selftest */
+ } chipset_selftest; /* for CONTROLVM_CHIPSET_SELFTEST */
+ u64 addr; /* a physical address of something, that can be
+ * dereferenced by the receiver of this
+ * ControlVm command (depends on command id) */
+ u64 handle; /* a handle of something (depends on command
+ * id) */
+ };
+};
- /* END Event messages */
+/* All messages in any ControlVm queue have this layout. */
+struct controlvm_message {
+ struct controlvm_message_header hdr;
+ struct controlvm_message_packet cmd;
+};
- /* BEGIN Ack messages */
+struct device_map {
+ GUEST_PHYSICAL_ADDRESS device_channel_address;
+ u64 device_channel_size;
+ u32 ca_index;
+ u32 reserved; /* natural alignment */
+ u64 reserved2; /* Align structure on 32-byte boundary */
+};
- /* END Ack messages */
- u64 addr; /*< a physical address of something, that
- * can be dereferenced by the receiver of
- * this ControlVm command (depends on
- * command id) */
- u64 handle; /*< a handle of something (depends on
- * command id) */
- };
-} CONTROLVM_MESSAGE_PACKET;
+struct guest_devices {
+ struct device_map video_channel;
+ struct device_map keyboard_channel;
+ struct device_map network_channel;
+ struct device_map storage_channel;
+ struct device_map console_channel;
+ u32 partition_index;
+ u32 pad;
+};
-/* All messages in any ControlVm queue have this layout. */
-typedef struct _CONTROLVM_MESSAGE {
- CONTROLVM_MESSAGE_HEADER hdr;
- CONTROLVM_MESSAGE_PACKET cmd;
-} CONTROLVM_MESSAGE;
-
-typedef struct _DEVICE_MAP {
- GUEST_PHYSICAL_ADDRESS DeviceChannelAddress;
- u64 DeviceChannelSize;
- u32 CA_Index;
- u32 Reserved; /* natural alignment */
- u64 Reserved2; /* Align structure on 32-byte boundary */
-} DEVICE_MAP;
-
-typedef struct _GUEST_DEVICES {
- DEVICE_MAP VideoChannel;
- DEVICE_MAP KeyboardChannel;
- DEVICE_MAP NetworkChannel;
- DEVICE_MAP StorageChannel;
- DEVICE_MAP ConsoleChannel;
- u32 PartitionIndex;
- u32 Pad;
-} GUEST_DEVICES;
-
-typedef struct _ULTRA_CONTROLVM_CHANNEL_PROTOCOL {
- CHANNEL_HEADER Header;
- GUEST_PHYSICAL_ADDRESS gpControlVm; /* guest physical address of
+struct spar_controlvm_channel_protocol {
+ struct channel_header header;
+ GUEST_PHYSICAL_ADDRESS gp_controlvm; /* guest physical address of
* this channel */
- GUEST_PHYSICAL_ADDRESS gpPartitionTables; /* guest physical address of
- * partition tables */
- GUEST_PHYSICAL_ADDRESS gpDiagGuest; /* guest physical address of
+ GUEST_PHYSICAL_ADDRESS gp_partition_tables;/* guest physical address of
+ * partition tables */
+ GUEST_PHYSICAL_ADDRESS gp_diag_guest; /* guest physical address of
* diagnostic channel */
- GUEST_PHYSICAL_ADDRESS gpBootRomDisk; /* guest phys addr of (read
+ GUEST_PHYSICAL_ADDRESS gp_boot_romdisk;/* guest phys addr of (read
* only) Boot ROM disk */
- GUEST_PHYSICAL_ADDRESS gpBootRamDisk; /* guest phys addr of writable
+ GUEST_PHYSICAL_ADDRESS gp_boot_ramdisk;/* guest phys addr of writable
* Boot RAM disk */
- GUEST_PHYSICAL_ADDRESS gpAcpiTable; /* guest phys addr of acpi
+ GUEST_PHYSICAL_ADDRESS gp_acpi_table; /* guest phys addr of acpi
* table */
- GUEST_PHYSICAL_ADDRESS gpControlChannel; /* guest phys addr of control
- * channel */
- GUEST_PHYSICAL_ADDRESS gpDiagRomDisk; /* guest phys addr of diagnostic
+ GUEST_PHYSICAL_ADDRESS gp_control_channel;/* guest phys addr of control
+ * channel */
+ GUEST_PHYSICAL_ADDRESS gp_diag_romdisk;/* guest phys addr of diagnostic
* ROM disk */
- GUEST_PHYSICAL_ADDRESS gpNvram; /* guest phys addr of NVRAM
+ GUEST_PHYSICAL_ADDRESS gp_nvram; /* guest phys addr of NVRAM
* channel */
- u64 RequestPayloadOffset; /* Offset to request payload area */
- u64 EventPayloadOffset; /* Offset to event payload area */
- u32 RequestPayloadBytes; /* Bytes available in request payload
+ u64 request_payload_offset; /* Offset to request payload area */
+ u64 event_payload_offset; /* Offset to event payload area */
+ u32 request_payload_bytes; /* Bytes available in request payload
* area */
- u32 EventPayloadBytes; /* Bytes available in event payload area */
- u32 ControlChannelBytes;
- u32 NvramChannelBytes; /* Bytes in PartitionNvram segment */
- u32 MessageBytes; /* sizeof(CONTROLVM_MESSAGE) */
- u32 MessageCount; /* CONTROLVM_MESSAGE_MAX */
- GUEST_PHYSICAL_ADDRESS gpSmbiosTable; /* guest phys addr of SMBIOS
+ u32 event_payload_bytes;/* Bytes available in event payload area */
+ u32 control_channel_bytes;
+ u32 nvram_channel_bytes; /* Bytes in PartitionNvram segment */
+ u32 message_bytes; /* sizeof(CONTROLVM_MESSAGE) */
+ u32 message_count; /* CONTROLVM_MESSAGE_MAX */
+ GUEST_PHYSICAL_ADDRESS gp_smbios_table;/* guest phys addr of SMBIOS
* tables */
- GUEST_PHYSICAL_ADDRESS gpPhysicalSmbiosTable; /* guest phys addr of
- * SMBIOS table */
+ GUEST_PHYSICAL_ADDRESS gp_physical_smbios_table;/* guest phys addr of
+ * SMBIOS table */
/* ULTRA_MAX_GUESTS_PER_SERVICE */
- GUEST_DEVICES gpObsoleteGuestDevices[16];
+ struct guest_devices gp_obsolete_guest_devices[16];
/* guest physical address of EFI firmware image base */
- GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareImageBase;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_image_base;
/* guest physical address of EFI firmware entry point */
- GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareEntryPoint;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_entry_point;
/* guest EFI firmware image size */
- u64 VirtualGuestFirmwareImageSize;
+ u64 virtual_guest_firmware_image_size;
/* GPA = 1MB where EFI firmware image is copied to */
- GUEST_PHYSICAL_ADDRESS VirtualGuestFirmwareBootBase;
- GUEST_PHYSICAL_ADDRESS VirtualGuestImageBase;
- GUEST_PHYSICAL_ADDRESS VirtualGuestImageSize;
- u64 PrototypeControlChannelOffset;
- GUEST_PHYSICAL_ADDRESS VirtualGuestPartitionHandle;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_firmware_boot_base;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_image_base;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_image_size;
+ u64 prototype_control_channel_offset;
+ GUEST_PHYSICAL_ADDRESS virtual_guest_partition_handle;
- u16 RestoreAction; /* Restore Action field to restore the guest
+ u16 restore_action; /* Restore Action field to restore the guest
* partition */
- u16 DumpAction; /* For Windows guests it shows if the visordisk
+ u16 dump_action; /* For Windows guests it shows if the visordisk
* is running in dump mode */
- u16 NvramFailCount;
- u16 SavedCrashMsgCount; /* = CONTROLVM_CRASHMSG_MAX */
- u32 SavedCrashMsgOffset; /* Offset to request payload area needed
+ u16 nvram_fail_count;
+ u16 saved_crash_message_count; /* = CONTROLVM_CRASHMSG_MAX */
+ u32 saved_crash_message_offset; /* Offset to request payload area needed
* for crash dump */
- u32 InstallationError; /* Type of error encountered during
+ u32 installation_error; /* Type of error encountered during
* installation */
- u32 InstallationTextId; /* Id of string to display */
- u16 InstallationRemainingSteps; /* Number of remaining installation
- * steps (for progress bars) */
- u8 ToolAction; /* ULTRA_TOOL_ACTIONS Installation Action
+ u32 installation_text_id; /* Id of string to display */
+ u16 installation_remaining_steps;/* Number of remaining installation
+ * steps (for progress bars) */
+ u8 tool_action; /* ULTRA_TOOL_ACTIONS Installation Action
* field */
- u8 Reserved; /* alignment */
- ULTRA_EFI_SPAR_INDICATION EfiSparIndication;
- ULTRA_EFI_SPAR_INDICATION EfiSparIndicationSupported;
- u32 SPReserved;
- u8 Reserved2[28]; /* Force signals to begin on 128-byte cache
+ u8 reserved; /* alignment */
+ struct efi_spar_indication efi_spar_ind;
+ struct efi_spar_indication efi_spar_ind_supported;
+ u32 sp_reserved;
+ u8 reserved2[28]; /* Force signals to begin on 128-byte cache
* line */
- SIGNAL_QUEUE_HEADER RequestQueue; /* Service or guest partition
- * uses this queue to send
- * requests to Control */
- SIGNAL_QUEUE_HEADER ResponseQueue; /* Control uses this queue to
- * respond to service or guest
- * partition requests */
- SIGNAL_QUEUE_HEADER EventQueue; /* Control uses this queue to
+ struct signal_queue_header request_queue;/* Service or guest partition
+ * uses this queue to send
+ * requests to Control */
+ struct signal_queue_header response_queue;/* Control uses this queue to
+ * respond to service or guest
+ * partition requests */
+ struct signal_queue_header event_queue; /* Control uses this queue to
* send events to service or
* guest partition */
- SIGNAL_QUEUE_HEADER EventAckQueue; /* Service or guest partition
- * uses this queue to ack
- * Control events */
+ struct signal_queue_header event_ack_queue;/* Service or guest partition
+ * uses this queue to ack
+ * Control events */
/* Request fixed-size message pool - does not include payload */
- CONTROLVM_MESSAGE RequestMsg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
/* Response fixed-size message pool - does not include payload */
- CONTROLVM_MESSAGE ResponseMsg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX];
/* Event fixed-size message pool - does not include payload */
- CONTROLVM_MESSAGE EventMsg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX];
/* Ack fixed-size message pool - does not include payload */
- CONTROLVM_MESSAGE EventAckMsg[CONTROLVM_MESSAGE_MAX];
+ struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX];
/* Message stored during IOVM creation to be reused after crash */
- CONTROLVM_MESSAGE SavedCrashMsg[CONTROLVM_CRASHMSG_MAX];
-} ULTRA_CONTROLVM_CHANNEL_PROTOCOL;
+ struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
+};
/* Offsets for VM channel attributes... */
#define VM_CH_REQ_QUEUE_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, RequestQueue)
+ offsetof(struct spar_controlvm_channel_protocol, request_queue)
#define VM_CH_RESP_QUEUE_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ResponseQueue)
+ offsetof(struct spar_controlvm_channel_protocol, response_queue)
#define VM_CH_EVENT_QUEUE_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventQueue)
+ offsetof(struct spar_controlvm_channel_protocol, event_queue)
#define VM_CH_ACK_QUEUE_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventAckQueue)
+ offsetof(struct spar_controlvm_channel_protocol, event_ack_queue)
#define VM_CH_REQ_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, RequestMsg)
+ offsetof(struct spar_controlvm_channel_protocol, request_msg)
#define VM_CH_RESP_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ResponseMsg)
+ offsetof(struct spar_controlvm_channel_protocol, response_msg)
#define VM_CH_EVENT_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventMsg)
+ offsetof(struct spar_controlvm_channel_protocol, event_msg)
#define VM_CH_ACK_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, EventAckMsg)
+ offsetof(struct spar_controlvm_channel_protocol, event_ack_msg)
#define VM_CH_CRASH_MSG_OFFSET \
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, SavedCrashMsg)
+ offsetof(struct spar_controlvm_channel_protocol, saved_crash_msg)
/* The following header will be located at the beginning of PayloadVmOffset for
- * various ControlVm commands. The receiver of a ControlVm command with a
- * PayloadVmOffset will dereference this address and then use ConnectionOffset,
- * InitiatorOffset, and TargetOffset to get the location of UTF-8 formatted
- * strings that can be parsed to obtain command-specific information. The value
- * of TotalLength should equal PayloadBytes. The format of the strings at
- * PayloadVmOffset will take different forms depending on the message. See the
- * following Wiki page for more information:
- * https://ustr-linux-1.na.uis.unisys.com/spar/index.php/ControlVm_Parameters_Area
+ * various ControlVm commands. The receiver of a ControlVm command with a
+ * PayloadVmOffset will dereference this address and then use connection_offset,
+ * initiator_offset, and target_offset to get the location of UTF-8 formatted
+ * strings that can be parsed to obtain command-specific information. The value
+ * of total_length should equal PayloadBytes. The format of the strings at
+ * PayloadVmOffset will take different forms depending on the message.
*/
-typedef struct _ULTRA_CONTROLVM_PARAMETERS_HEADER {
- u32 TotalLength;
- u32 HeaderLength;
- u32 ConnectionOffset;
- u32 ConnectionLength;
- u32 InitiatorOffset;
- u32 InitiatorLength;
- u32 TargetOffset;
- u32 TargetLength;
- u32 ClientOffset;
- u32 ClientLength;
- u32 NameOffset;
- u32 NameLength;
- uuid_le Id;
- u32 Revision;
- u32 Reserved; /* Natural alignment */
-} ULTRA_CONTROLVM_PARAMETERS_HEADER;
+struct spar_controlvm_parameters_header {
+ u32 total_length;
+ u32 header_length;
+ u32 connection_offset;
+ u32 connection_length;
+ u32 initiator_offset;
+ u32 initiator_length;
+ u32 target_offset;
+ u32 target_length;
+ u32 client_offset;
+ u32 client_length;
+ u32 name_offset;
+ u32 name_length;
+ uuid_le id;
+ u32 revision;
+ u32 reserved; /* Natural alignment */
+};
#endif /* __CONTROLVMCHANNEL_H__ */
diff --git a/drivers/staging/unisys/common-spar/include/channels/diagchannel.h b/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
index 9912e51b89b5..e8fb8678a8e2 100644
--- a/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/diagchannel.h
@@ -37,12 +37,12 @@
#include "channel.h"
/* {EEA7A573-DB82-447c-8716-EFBEAAAE4858} */
-#define ULTRA_DIAG_CHANNEL_PROTOCOL_GUID \
+#define SPAR_DIAG_CHANNEL_PROTOCOL_UUID \
UUID_LE(0xeea7a573, 0xdb82, 0x447c, \
0x87, 0x16, 0xef, 0xbe, 0xaa, 0xae, 0x48, 0x58)
-static const uuid_le UltraDiagChannelProtocolGuid =
- ULTRA_DIAG_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_diag_channel_protocol_uuid =
+ SPAR_DIAG_CHANNEL_PROTOCOL_UUID;
/* {E850F968-3263-4484-8CA5-2A35D087A5A8} */
#define ULTRA_DIAG_ROOT_CHANNEL_PROTOCOL_GUID \
@@ -58,19 +58,20 @@ static const uuid_le UltraDiagChannelProtocolGuid =
* increment this. */
#define ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID 2
-#define ULTRA_DIAG_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, \
- UltraDiagChannelProtocolGuid, \
- "diag", \
- sizeof(ULTRA_DIAG_CHANNEL_PROTOCOL), \
- ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_DIAG_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraDiagChannelProtocolGuid, \
- "diag", \
- sizeof(ULTRA_DIAG_CHANNEL_PROTOCOL), \
- actualBytes, __FILE__, __LINE__, logCtx))
+#define SPAR_DIAG_CHANNEL_OK_CLIENT(ch)\
+ (spar_check_channel_client(ch,\
+ spar_diag_channel_protocol_uuid,\
+ "diag",\
+ sizeof(struct spar_diag_channel_protocol),\
+ ULTRA_DIAG_CHANNEL_PROTOCOL_VERSIONID,\
+ ULTRA_DIAG_CHANNEL_PROTOCOL_SIGNATURE))
+
+#define SPAR_DIAG_CHANNEL_OK_SERVER(bytes)\
+ (spar_check_channel_server(spar_diag_channel_protocol_uuid,\
+ "diag",\
+ sizeof(struct spar_diag_channel_protocol),\
+ bytes))
+
#define MAX_MODULE_NAME_SIZE 128 /* Maximum length of module name... */
#define MAX_ADDITIONAL_INFO_SIZE 256 /* Maximum length of any additional info
* accompanying event... */
@@ -105,21 +106,21 @@ static const uuid_le UltraDiagChannelProtocolGuid =
/* Copied from EFI's EFI_TIME struct in efidef.h. EFI headers are not allowed
* in some of the Supervisor areas, such as Monitor, so it has been "ported" here
* for use in diagnostic event timestamps... */
-typedef struct _DIAG_EFI_TIME {
- u16 Year; /* 1998 - 20XX */
- u8 Month; /* 1 - 12 */
- u8 Day; /* 1 - 31 */
- u8 Hour; /* 0 - 23 */
- u8 Minute; /* 0 - 59 */
- u8 Second; /* 0 - 59 */
- u8 Pad1;
- u32 Nanosecond; /* 0 - 999, 999, 999 */
- s16 TimeZone; /* -1440 to 1440 or 2047 */
- u8 Daylight;
- u8 Pad2;
-} DIAG_EFI_TIME;
-
-typedef enum {
+struct diag_efi_time {
+ u16 year; /* 1998 - 20XX */
+ u8 month; /* 1 - 12 */
+ u8 day; /* 1 - 31 */
+ u8 hour; /* 0 - 23 */
+ u8 minute; /* 0 - 59 */
+ u8 second; /* 0 - 59 */
+ u8 pad1;
+ u32 nanosecond; /* 0 - 999, 999, 999 */
+ s16 timezone; /* -1440 to 1440 or 2047 */
+ u8 daylight;
+ u8 pad2;
+};
+
+enum spar_component_types {
ULTRA_COMPONENT_GUEST = 0,
ULTRA_COMPONENT_MONITOR = 0x01,
ULTRA_COMPONENT_CCM = 0x02, /* Common Control module */
@@ -144,9 +145,9 @@ typedef enum {
ULTRA_COMPONENT_PSERVICES = 0x17,
ULTRA_COMPONENT_PDIAG = 0x18
/* RESERVED 0x18 - 0x1F */
-} ULTRA_COMPONENT_TYPES;
+};
-/* Structure: DIAG_CHANNEL_EVENT Purpose: Contains attributes that make up an
+/* Structure: diag_channel_event Purpose: Contains attributes that make up an
* event to be written to the DIAG_CHANNEL memory. Attributes: EventId: Id of
* the diagnostic event to write to memory. Severity: Severity of the event
* (Error, Info, etc). ModuleName: Module/file name where event originated.
@@ -155,40 +156,40 @@ typedef enum {
* Reserved: Padding to align structure on a 64-byte cache line boundary.
* AdditionalInfo: Array of characters for additional event info (may be
* empty). */
-typedef struct _DIAG_CHANNEL_EVENT {
- u32 EventId;
- u32 Severity;
- u8 ModuleName[MAX_MODULE_NAME_SIZE];
- u32 LineNumber;
- DIAG_EFI_TIME Timestamp; /* Size = 16 bytes */
- u32 PartitionNumber; /* Filled in by Diag Switch as pool blocks are
+struct diag_channel_event {
+ u32 event_id;
+ u32 severity;
+ u8 module_name[MAX_MODULE_NAME_SIZE];
+ u32 line_number;
+ struct diag_efi_time timestamp; /* Size = 16 bytes */
+ u32 partition_number; /* Filled in by Diag Switch as pool blocks are
* filled */
- u16 VirtualProcessorNumber;
- u16 LogicalProcessorNumber;
- u8 ComponentType; /* ULTRA_COMPONENT_TYPES */
- u8 Subsystem;
- u16 Reserved0; /* pad to u64 alignment */
- u32 BlockNumber; /* filled in by DiagSwitch as pool blocks are
+ u16 vcpu_number;
+ u16 lcpu_number;
+ u8 component_type; /* ULTRA_COMPONENT_TYPES */
+ u8 subsystem;
+ u16 reserved0; /* pad to u64 alignment */
+ u32 block_no; /* filled in by DiagSwitch as pool blocks are
* filled */
- u32 BlockNumberHigh;
- u32 EventNumber; /* filled in by DiagSwitch as pool blocks are
+ u32 block_no_high;
+ u32 event_no; /* filled in by DiagSwitch as pool blocks are
* filled */
- u32 EventNumberHigh;
+ u32 event_no_high;
- /* The BlockNumber and EventNumber fields are set only by DiagSwitch
+ /* The block_no and event_no fields are set only by DiagSwitch
* and referenced only by WinDiagDisplay formatting tool as
* additional diagnostic information. Other tools including
* WinDiagDisplay currently ignore these 'Reserved' bytes. */
- u8 Reserved[8];
- u8 AdditionalInfo[MAX_ADDITIONAL_INFO_SIZE];
+ u8 reserved[8];
+ u8 additional_info[MAX_ADDITIONAL_INFO_SIZE];
- /* NOTE: Changesto DIAG_CHANNEL_EVENT generally need to be reflected in
+ /* NOTE: Changes to diag_channel_event generally need to be reflected in
* existing copies *
* - for AppOS at
* GuestLinux/visordiag_early/supervisor_diagchannel.h *
* - for WinDiagDisplay at
* EFI/Ultra/Tools/WinDiagDisplay/WinDiagDisplay/diagstruct.h */
-} DIAG_CHANNEL_EVENT;
+};
/* Levels of severity for diagnostic events, in order from lowest severity to
* highest (i.e. fatal errors are the most severe, and should always be logged,
@@ -201,7 +202,8 @@ typedef struct _DIAG_CHANNEL_EVENT {
* they are valid for controlling the amount of event data. This enum is also
* defined in DotNet\sParFramework\ControlFramework\ControlFramework.cs. If a
* change is made to this enum, they should also be reflected in that file. */
-typedef enum { DIAG_SEVERITY_ENUM_BEGIN = 0,
+enum diag_severity {
+ DIAG_SEVERITY_ENUM_BEGIN = 0,
DIAG_SEVERITY_OVERRIDE = DIAG_SEVERITY_ENUM_BEGIN,
DIAG_SEVERITY_VERBOSE = DIAG_SEVERITY_OVERRIDE, /* 0 */
DIAG_SEVERITY_INFO = DIAG_SEVERITY_VERBOSE + 1, /* 1 */
@@ -212,7 +214,7 @@ typedef enum { DIAG_SEVERITY_ENUM_BEGIN = 0,
DIAG_SEVERITY_ENUM_END = DIAG_SEVERITY_SHUTOFF, /* 5 */
DIAG_SEVERITY_NONFATAL_ERR = DIAG_SEVERITY_ERR,
DIAG_SEVERITY_FATAL_ERR = DIAG_SEVERITY_PRINT
-} DIAG_SEVERITY;
+};
/* Event Cause enums
*
@@ -233,26 +235,24 @@ typedef enum { DIAG_SEVERITY_ENUM_BEGIN = 0,
* If a change is made to this enum, they should also be reflected in that
* file. */
-
-
/* A cause value "DIAG_CAUSE_FILE_XFER" together with a severity value of
* "DIAG_SEVERITY_PRINT" (=4), is used for transferring text or binary file to
* the Diag partition. This cause-severity combination will be used by Logger
* DiagSwitch to segregate events into block types. The files are transferred in
-* 256 byte chunks maximum, in the AdditionalInfo field of the DIAG_CHANNEL_EVENT
+* 256 byte chunks maximum, in the AdditionalInfo field of the diag_channel_event
* structure. In the file transfer mode, some event fields will have different
* meaning: EventId specifies the file offset, severity specifies the block type,
* ModuleName specifies the filename, LineNumber specifies the number of valid
* data bytes in an event and AdditionalInfo contains up to 256 bytes of data. */
/* The Diag DiagWriter appends event blocks to events.raw as today, and for data
- * blocks uses DIAG_CHANNEL_EVENT
+ * blocks uses diag_channel_event
* PartitionNumber to extract and append 'AdditionalInfo' to filename (specified
* by ModuleName). */
/* The Dell PDiag uses this new mechanism to stash DSET .zip onto the
* 'diagnostic' virtual disk. */
-typedef enum {
+enum diag_cause {
DIAG_CAUSE_UNKNOWN = 0,
DIAG_CAUSE_UNKNOWN_DEBUG = DIAG_CAUSE_UNKNOWN + 1, /* 1 */
DIAG_CAUSE_DEBUG = DIAG_CAUSE_UNKNOWN_DEBUG + 1, /* 2 */
@@ -264,7 +264,7 @@ typedef enum {
DIAG_CAUSE_INTERNAL_ERROR = DIAG_CAUSE_INVALID_REQUEST + 1, /* 8 */
DIAG_CAUSE_FILE_XFER = DIAG_CAUSE_INTERNAL_ERROR + 1, /* 9 */
DIAG_CAUSE_ENUM_END = DIAG_CAUSE_FILE_XFER /* 9 */
-} DIAG_CAUSE;
+};
/* Event Cause category defined into the byte 2 of Severity */
#define CAUSE_DEBUG (DIAG_CAUSE_DEBUG << CAUSE_SHIFT_AMT)
@@ -344,7 +344,7 @@ typedef enum {
#define CAUSE_FILE_XFER_SEVERITY_PRINT \
(CAUSE_FILE_XFER | DIAG_SEVERITY_PRINT)
-/* Structure: DIAG_CHANNEL_PROTOCOL_HEADER
+/* Structure: diag_channel_protocol_header
*
* Purpose: Contains attributes that make up the header specific to the
* DIAG_CHANNEL area.
@@ -362,12 +362,12 @@ typedef enum {
* whether events are logged. Any event's severity for a
* particular subsystem below this level will be discarded.
*/
-typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
- volatile u32 DiagLock;
- u8 IsChannelInitialized;
- u8 Reserved[3];
- u8 SubsystemSeverityFilter[64];
-} DIAG_CHANNEL_PROTOCOL_HEADER;
+struct diag_channel_protocol_header {
+ u32 diag_lock;
+ u8 channel_initialized;
+ u8 reserved[3];
+ u8 subsystem_severity_filter[64];
+};
/* The Diagram for the Diagnostic Channel: */
/* ----------------------- */
@@ -375,19 +375,20 @@ typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
/* ----------------------- */
/* | Signal Queue Header | Defined by SIGNAL_QUEUE_HEADER */
/* ----------------------- */
-/* | DiagChannel Header | Defined by DIAG_CHANNEL_PROTOCOL_HEADER */
+/* | DiagChannel Header | Defined by diag_channel_protocol_header */
/* ----------------------- */
-/* | Channel Event Info | Defined by (DIAG_CHANNEL_EVENT * MAX_EVENTS) */
+/* | Channel Event Info | Defined by diag_channel_event*MAX_EVENTS */
/* ----------------------- */
/* | Reserved | Reserved (pad out to 4MB) */
/* ----------------------- */
/* Offsets/sizes for diagnostic channel attributes... */
-#define DIAG_CH_QUEUE_HEADER_OFFSET (sizeof(ULTRA_CHANNEL_PROTOCOL))
-#define DIAG_CH_QUEUE_HEADER_SIZE (sizeof(SIGNAL_QUEUE_HEADER))
+#define DIAG_CH_QUEUE_HEADER_OFFSET (sizeof(struct channel_header))
+#define DIAG_CH_QUEUE_HEADER_SIZE (sizeof(struct signal_queue_header))
#define DIAG_CH_PROTOCOL_HEADER_OFFSET \
(DIAG_CH_QUEUE_HEADER_OFFSET + DIAG_CH_QUEUE_HEADER_SIZE)
-#define DIAG_CH_PROTOCOL_HEADER_SIZE (sizeof(DIAG_CHANNEL_PROTOCOL_HEADER))
+#define DIAG_CH_PROTOCOL_HEADER_SIZE \
+ (sizeof(struct diag_channel_protocol_header))
#define DIAG_CH_EVENT_OFFSET \
(DIAG_CH_PROTOCOL_HEADER_OFFSET + DIAG_CH_PROTOCOL_HEADER_SIZE)
#define DIAG_CH_SIZE (4096 * 1024)
@@ -397,7 +398,7 @@ typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
#define DIAG_CH_LRG_SIZE (2 * DIAG_CH_SIZE) /* 8 MB */
/*
- * Structure: ULTRA_DIAG_CHANNEL_PROTOCOL
+ * Structure: spar_diag_channel_protocol
*
* Purpose: Contains attributes that make up the DIAG_CHANNEL memory.
*
@@ -409,19 +410,18 @@ typedef struct _DIAG_CHANNEL_PROTOCOL_HEADER {
* store event.
*
* DiagChannelHeader: Diagnostic channel header info (see
- * DIAG_CHANNEL_PROTOCOL_HEADER comments).
+ * diag_channel_protocol_header comments).
*
* Events: Area where diagnostic events (up to MAX_EVENTS) are written.
*
*Reserved: Reserved area to allow for correct channel size padding.
*/
-typedef struct _ULTRA_DIAG_CHANNEL_PROTOCOL {
- ULTRA_CHANNEL_PROTOCOL CommonChannelHeader;
- SIGNAL_QUEUE_HEADER QueueHeader;
- DIAG_CHANNEL_PROTOCOL_HEADER DiagChannelHeader;
- DIAG_CHANNEL_EVENT Events[(DIAG_CH_SIZE - DIAG_CH_EVENT_OFFSET) /
- sizeof(DIAG_CHANNEL_EVENT)];
-}
-ULTRA_DIAG_CHANNEL_PROTOCOL;
+struct spar_diag_channel_protocol {
+ struct channel_header common_channel_header;
+ struct signal_queue_header queue_header;
+ struct diag_channel_protocol_header diag_channel_header;
+ struct diag_channel_event events[(DIAG_CH_SIZE - DIAG_CH_EVENT_OFFSET) /
+ sizeof(struct diag_channel_event)];
+};
#endif
diff --git a/drivers/staging/unisys/common-spar/include/channels/iochannel.h b/drivers/staging/unisys/common-spar/include/channels/iochannel.h
index b1dd73d1f42c..eb7efe484f6f 100644
--- a/drivers/staging/unisys/common-spar/include/channels/iochannel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/iochannel.h
@@ -8,7 +8,6 @@
* this file. Note: Everything is OS-independent because this file is
* used by Windows, Linux and possible EFI drivers. */
-
/*
* Communication flow between the IOPart and GuestPart uses the channel headers
* channel state. The following states are currently being used:
@@ -60,42 +59,22 @@
#define ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID 2
#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID 1
-#define ULTRA_VHBA_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, UltraVhbaChannelProtocolGuid, \
- "vhba", MIN_IO_CHANNEL_SIZE, \
- ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VHBA_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraVhbaChannelProtocolGuid, \
- "vhba", MIN_IO_CHANNEL_SIZE, actualBytes, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VNIC_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, UltraVnicChannelProtocolGuid, \
- "vnic", MIN_IO_CHANNEL_SIZE, \
- ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VNIC_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraVnicChannelProtocolGuid, \
- "vnic", MIN_IO_CHANNEL_SIZE, actualBytes, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VSWITCH_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, UltraVswitchChannelProtocolGuid, \
- "vswitch", MIN_IO_CHANNEL_SIZE, \
- ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-#define ULTRA_VSWITCH_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraVswitchChannelProtocolGuid, \
- "vswitch", MIN_IO_CHANNEL_SIZE, \
- actualBytes, \
- __FILE__, __LINE__, logCtx))
+#define SPAR_VHBA_CHANNEL_OK_CLIENT(ch) \
+ (spar_check_channel_client(ch, spar_vhba_channel_protocol_uuid, \
+ "vhba", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE))
+
+#define SPAR_VNIC_CHANNEL_OK_CLIENT(ch) \
+ (spar_check_channel_client(ch, spar_vnic_channel_protocol_uuid, \
+ "vnic", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE))
+
/*
* Everything necessary to handle SCSI & NIC traffic between Guest Partition and
* IO Partition is defined below. */
-
/*
* Defines and enums.
*/
@@ -172,8 +151,9 @@
* SCSI Host value */
/* various types of network packets that can be sent in cmdrsp */
-typedef enum { NET_RCV_POST = 0, /* submit buffer to hold receiving
- * incoming packet */
+enum net_types {
+ NET_RCV_POST = 0, /* submit buffer to hold receiving
+ * incoming packet */
/* virtnic -> uisnic */
NET_RCV, /* incoming packet received */
/* uisnic -> virtpci */
@@ -195,7 +175,7 @@ typedef enum { NET_RCV_POST = 0, /* submit buffer to hold receiving
* its MAC addr */
NET_MACADDR_ACK, /* MAC address */
-} NET_TYPES;
+};
#define ETH_HEADER_SIZE 14 /* size of ethernet header */
@@ -211,19 +191,23 @@ typedef enum { NET_RCV_POST = 0, /* submit buffer to hold receiving
#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */
#endif /* MAX_MACADDR_LEN */
-#define ETH_IS_LOCALLY_ADMINISTERED(Address) \
- (((u8 *) (Address))[0] & ((u8) 0x02))
+#define ETH_IS_LOCALLY_ADMINISTERED(address) \
+ (((u8 *)(address))[0] & ((u8)0x02))
#define NIC_VENDOR_ID 0x0008000B
/* various types of scsi task mgmt commands */
-typedef enum { TASK_MGMT_ABORT_TASK =
- 1, TASK_MGMT_BUS_RESET, TASK_MGMT_LUN_RESET,
- TASK_MGMT_TARGET_RESET,
-} TASK_MGMT_TYPES;
+enum task_mgmt_types {
+ TASK_MGMT_ABORT_TASK = 1,
+ TASK_MGMT_BUS_RESET,
+ TASK_MGMT_LUN_RESET,
+ TASK_MGMT_TARGET_RESET,
+};
/* various types of vdisk mgmt commands */
-typedef enum { VDISK_MGMT_ACQUIRE = 1, VDISK_MGMT_RELEASE,
-} VDISK_MGMT_TYPES;
+enum vdisk_mgmt_types {
+ VDISK_MGMT_ACQUIRE = 1,
+ VDISK_MGMT_RELEASE,
+};
/* this is used in the vdest field */
#define VDEST_ALL 0xFFFF
@@ -242,7 +226,6 @@ typedef enum { VDISK_MGMT_ACQUIRE = 1, VDISK_MGMT_RELEASE,
/*
* structs with pragma pack */
-
/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
@@ -377,16 +360,16 @@ struct uiscmdrsp_scsi {
do { \
memset(buf, 0, \
MINNUM(len, \
- (unsigned int) NO_DISK_INQUIRY_RESULT_LEN)); \
- buf[2] = (u8) SCSI_SPC2_VER; \
+ (unsigned int)NO_DISK_INQUIRY_RESULT_LEN)); \
+ buf[2] = (u8)SCSI_SPC2_VER; \
if (lun == 0) { \
- buf[0] = (u8) lun0notpresent; \
- buf[3] = (u8) DEV_HISUPPORT; \
+ buf[0] = (u8)lun0notpresent; \
+ buf[3] = (u8)DEV_HISUPPORT; \
} else \
- buf[0] = (u8) notpresent; \
- buf[4] = (u8) ( \
+ buf[0] = (u8)notpresent; \
+ buf[4] = (u8)( \
MINNUM(len, \
- (unsigned int) NO_DISK_INQUIRY_RESULT_LEN) - 5); \
+ (unsigned int)NO_DISK_INQUIRY_RESULT_LEN) - 5);\
if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \
buf[8] = 'D'; \
buf[9] = 'E'; \
@@ -410,12 +393,10 @@ struct uiscmdrsp_scsi {
} \
} while (0)
-
/*
* Struct & Defines to support sense information.
*/
-
/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
* initialized in exactly the manner that is recommended in Windows (hence the
* odd values).
@@ -429,21 +410,21 @@ struct uiscmdrsp_scsi {
* AdditionalSenseLength contains will be sizeof(sense_data)-8=10.
*/
struct sense_data {
- u8 ErrorCode:7;
- u8 Valid:1;
- u8 SegmentNumber;
- u8 SenseKey:4;
- u8 Reserved:1;
- u8 IncorrectLength:1;
- u8 EndOfMedia:1;
- u8 FileMark:1;
- u8 Information[4];
- u8 AdditionalSenseLength;
- u8 CommandSpecificInformation[4];
- u8 AdditionalSenseCode;
- u8 AdditionalSenseCodeQualifier;
- u8 FieldReplaceableUnitCode;
- u8 SenseKeySpecific[3];
+ u8 errorcode:7;
+ u8 valid:1;
+ u8 segment_number;
+ u8 sense_key:4;
+ u8 reserved:1;
+ u8 incorrect_length:1;
+ u8 end_of_media:1;
+ u8 file_mark:1;
+ u8 information[4];
+ u8 additional_sense_length;
+ u8 command_specific_information[4];
+ u8 additional_sense_code;
+ u8 additional_sense_code_qualifier;
+ u8 fru_code;
+ u8 sense_key_specific[3];
};
/* some SCSI ADSENSE codes */
@@ -484,7 +465,6 @@ struct net_pkt_xmt {
* each fragment */
char ethhdr[ETH_HEADER_SIZE]; /* the ethernet header */
struct {
-
/* these are needed for csum at uisnic end */
u8 valid; /* 1 = rest of this struct is valid - else
* ignore */
@@ -528,13 +508,12 @@ struct net_pkt_rcvpost {
* to be describable */
struct phys_info frag; /* physical page information for the
* single fragment 2K rcv buf */
- u64 UniqueNum; /* This is used to make sure that
+ u64 unique_num; /* This is used to make sure that
* receive posts are returned to */
/* the Adapter which sent them origonally. */
};
struct net_pkt_rcv {
-
/* the number of receive buffers that can be chained */
/* is based on max mtu and size of each rcv buf */
u32 rcv_done_len; /* length of received data */
@@ -544,8 +523,8 @@ struct net_pkt_rcv {
* that must be chained; */
/* each entry is a receive buffer provided by NET_RCV_POST. */
/* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
- u64 UniqueNum;
- u32 RcvsDroppedDelta;
+ u64 unique_num;
+ u32 rcvs_dropped_delta;
};
struct net_pkt_enbdis {
@@ -560,7 +539,7 @@ struct net_pkt_macaddr {
/* cmd rsp packet used for VNIC network traffic */
struct uiscmdrsp_net {
- NET_TYPES type;
+ enum net_types type;
void *buf;
union {
struct net_pkt_xmt xmt; /* used for NET_XMIT */
@@ -576,7 +555,7 @@ struct uiscmdrsp_net {
};
struct uiscmdrsp_scsitaskmgmt {
- TASK_MGMT_TYPES tasktype;
+ enum task_mgmt_types tasktype;
/* the type of task */
struct uisscsi_dest vdest;
@@ -594,7 +573,7 @@ struct uiscmdrsp_scsitaskmgmt {
* For windows guests, this is a pointer to a location that a waiting
* thread is testing to see if the taskmgmt command has completed.
* When the rsp is received by guest, the thread receiving the
- * response uses this to notify the the thread waiting for taskmgmt
+ * response uses this to notify the thread waiting for taskmgmt
* command completion. Its value is preserved by iopart & returned
* as is in the task mgmt rsp. */
void *notifyresult;
@@ -615,7 +594,7 @@ struct uiscmdrsp_scsitaskmgmt {
/* Note that the vHba pointer is not used by the Client/Guest side. */
struct uiscmdrsp_disknotify {
u8 add; /* 0-remove, 1-add */
- void *vHba; /* Pointer to vhba_info for channel info to
+ void *v_hba; /* Pointer to vhba_info for channel info to
* route msg */
u32 channel, id, lun; /* SCSI Path of Disk to added or removed */
};
@@ -623,7 +602,7 @@ struct uiscmdrsp_disknotify {
/* The following is used by virthba/vSCSI to send the Acquire/Release commands
* to the IOVM. */
struct uiscmdrsp_vdiskmgmt {
- VDISK_MGMT_TYPES vdisktype;
+ enum vdisk_mgmt_types vdisktype;
/* the type of task */
struct uisscsi_dest vdest;
@@ -641,7 +620,7 @@ struct uiscmdrsp_vdiskmgmt {
* For windows guests, this is a pointer to a location that a waiting
* thread is testing to see if the taskmgmt command has completed.
* When the rsp is received by guest, the thread receiving the
- * response uses this to notify the the thread waiting for taskmgmt
+ * response uses this to notify the thread waiting for taskmgmt
* command completion. Its value is preserved by iopart & returned
* as is in the task mgmt rsp. */
void *notifyresult;
@@ -683,11 +662,11 @@ struct uiscmdrsp {
/* This is just the header of the IO channel. It is assumed that directly after
* this header there is a large region of memory which contains the command and
-* response queues as specified in cmdQ and rspQ SIGNAL_QUEUE_HEADERS. */
-typedef struct _ULTRA_IO_CHANNEL_PROTOCOL {
- CHANNEL_HEADER ChannelHeader;
- SIGNAL_QUEUE_HEADER cmdQ;
- SIGNAL_QUEUE_HEADER rspQ;
+* response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS. */
+struct spar_io_channel_protocol {
+ struct channel_header channel_header;
+ struct signal_queue_header cmd_q;
+ struct signal_queue_header rsp_q;
union {
struct {
struct vhba_wwnn wwnn; /* 8 bytes */
@@ -697,14 +676,14 @@ typedef struct _ULTRA_IO_CHANNEL_PROTOCOL {
u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
u32 num_rcv_bufs; /* 4 */
u32 mtu; /* 4 */
- uuid_le zoneGuid; /* 16 */
+ uuid_le zone_uuid; /* 16 */
} vnic; /* total 30 */
};
#define MAX_CLIENTSTRING_LEN 1024
- u8 clientString[MAX_CLIENTSTRING_LEN]; /* NULL terminated - so holds
+ u8 client_string[MAX_CLIENTSTRING_LEN];/* NULL terminated - so holds
* max - 1 bytes */
-} ULTRA_IO_CHANNEL_PROTOCOL;
+};
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
@@ -733,144 +712,17 @@ typedef struct _ULTRA_IO_CHANNEL_PROTOCOL {
* INLINE functions for initializing and accessing I/O data channels
*/
-
-#define NUMSIGNALS(x, q) (((ULTRA_IO_CHANNEL_PROTOCOL *)(x))->q.MaxSignalSlots)
-#define SIZEOF_PROTOCOL (COVER(sizeof(ULTRA_IO_CHANNEL_PROTOCOL), 64))
+#define SIZEOF_PROTOCOL (COVER(sizeof(struct spar_io_channel_protocol), 64))
#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
-#define IO_CHANNEL_SIZE(x) COVER(SIZEOF_PROTOCOL + \
- (NUMSIGNALS(x, cmdQ) + \
- NUMSIGNALS(x, rspQ)) * SIZEOF_CMDRSP, 4096)
#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \
2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096)
-#ifdef __GNUC__
-/* These defines should only ever be used in service partitons */
-/* because they rely on the size of uiscmdrsp */
-#define QSLOTSFROMBYTES(bytes) (((bytes-SIZEOF_PROTOCOL)/2)/SIZEOF_CMDRSP)
-#define QSIZEFROMBYTES(bytes) (QSLOTSFROMBYTES(bytes)*SIZEOF_CMDRSP)
-#define SignalQInit(x) \
- do { \
- x->cmdQ.Size = QSIZEFROMBYTES(x->ChannelHeader.Size); \
- x->cmdQ.oSignalBase = SIZEOF_PROTOCOL - \
- offsetof(ULTRA_IO_CHANNEL_PROTOCOL, cmdQ); \
- x->cmdQ.SignalSize = SIZEOF_CMDRSP; \
- x->cmdQ.MaxSignalSlots = \
- QSLOTSFROMBYTES(x->ChannelHeader.Size); \
- x->cmdQ.MaxSignals = x->cmdQ.MaxSignalSlots - 1; \
- x->rspQ.Size = QSIZEFROMBYTES(x->ChannelHeader.Size); \
- x->rspQ.oSignalBase = \
- (SIZEOF_PROTOCOL + x->cmdQ.Size) - \
- offsetof(ULTRA_IO_CHANNEL_PROTOCOL, rspQ); \
- x->rspQ.SignalSize = SIZEOF_CMDRSP; \
- x->rspQ.MaxSignalSlots = \
- QSLOTSFROMBYTES(x->ChannelHeader.Size); \
- x->rspQ.MaxSignals = x->rspQ.MaxSignalSlots - 1; \
- x->ChannelHeader.oChannelSpace = \
- offsetof(ULTRA_IO_CHANNEL_PROTOCOL, cmdQ); \
- } while (0)
-
-#define INIT_CLIENTSTRING(chan, type, clientStr, clientStrLen) \
- do { \
- if (clientStr) { \
- chan->ChannelHeader.oClientString = \
- offsetof(type, clientString); \
- memcpy(chan->clientString, clientStr, \
- MINNUM(clientStrLen, \
- (u32) (MAX_CLIENTSTRING_LEN - 1))); \
- chan->clientString[MINNUM(clientStrLen, \
- (u32) (MAX_CLIENTSTRING_LEN \
- - 1))] \
- = '\0'; \
- } \
- else \
- if (clientStrLen > 0) \
- return 0; \
- } while (0)
-
-
-#define ULTRA_IO_CHANNEL_SERVER_READY(x, chanId, logCtx) \
- ULTRA_CHANNEL_SERVER_TRANSITION(x, chanId, SrvState, CHANNELSRV_READY, \
- logCtx)
-
-#define ULTRA_IO_CHANNEL_SERVER_NOTREADY(x, chanId, logCtx) \
- ULTRA_CHANNEL_SERVER_TRANSITION(x, chanId, SrvState, \
- CHANNELSRV_UNINITIALIZED, logCtx)
-
-static inline int ULTRA_VHBA_init_channel(ULTRA_IO_CHANNEL_PROTOCOL *x,
- struct vhba_wwnn *wwnn,
- struct vhba_config_max *max,
- unsigned char *clientStr,
- u32 clientStrLen, u64 bytes) {
- memset(x, 0, sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
- x->ChannelHeader.VersionId = ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID;
- x->ChannelHeader.Signature = ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE;
- x->ChannelHeader.SrvState = CHANNELSRV_UNINITIALIZED;
- x->ChannelHeader.HeaderSize = sizeof(x->ChannelHeader);
- x->ChannelHeader.Size = COVER(bytes, 4096);
- x->ChannelHeader.Type = UltraVhbaChannelProtocolGuid;
- x->ChannelHeader.ZoneGuid = NULL_UUID_LE;
- x->vhba.wwnn = *wwnn;
- x->vhba.max = *max;
- INIT_CLIENTSTRING(x, ULTRA_IO_CHANNEL_PROTOCOL, clientStr,
- clientStrLen);
- SignalQInit(x);
- if ((x->cmdQ.MaxSignalSlots > MAX_NUMSIGNALS) ||
- (x->rspQ.MaxSignalSlots > MAX_NUMSIGNALS)) {
- return 0;
- }
- if ((x->cmdQ.MaxSignalSlots < MIN_NUMSIGNALS) ||
- (x->rspQ.MaxSignalSlots < MIN_NUMSIGNALS)) {
- return 0;
- }
- return 1;
-}
-
-static inline void ULTRA_VHBA_set_max(ULTRA_IO_CHANNEL_PROTOCOL *x,
- struct vhba_config_max *max) {
- x->vhba.max = *max;
-}
-
-static inline int ULTRA_VNIC_init_channel(ULTRA_IO_CHANNEL_PROTOCOL *x,
- unsigned char *macaddr,
- u32 num_rcv_bufs, u32 mtu,
- uuid_le zoneGuid,
- unsigned char *clientStr,
- u32 clientStrLen,
- u64 bytes) {
- memset(x, 0, sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
- x->ChannelHeader.VersionId = ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID;
- x->ChannelHeader.Signature = ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE;
- x->ChannelHeader.SrvState = CHANNELSRV_UNINITIALIZED;
- x->ChannelHeader.HeaderSize = sizeof(x->ChannelHeader);
- x->ChannelHeader.Size = COVER(bytes, 4096);
- x->ChannelHeader.Type = UltraVnicChannelProtocolGuid;
- x->ChannelHeader.ZoneGuid = NULL_UUID_LE;
- memcpy(x->vnic.macaddr, macaddr, MAX_MACADDR_LEN);
- x->vnic.num_rcv_bufs = num_rcv_bufs;
- x->vnic.mtu = mtu;
- x->vnic.zoneGuid = zoneGuid;
- INIT_CLIENTSTRING(x, ULTRA_IO_CHANNEL_PROTOCOL, clientStr,
- clientStrLen);
- SignalQInit(x);
- if ((x->cmdQ.MaxSignalSlots > MAX_NUMSIGNALS) ||
- (x->rspQ.MaxSignalSlots > MAX_NUMSIGNALS)) {
- return 0;
- }
- if ((x->cmdQ.MaxSignalSlots < MIN_NUMSIGNALS) ||
- (x->rspQ.MaxSignalSlots < MIN_NUMSIGNALS)) {
- return 0;
- }
- return 1;
-}
-
-#endif /* __GNUC__ */
/*
* INLINE function for expanding a guest's pfn-off-size into multiple 4K page
* pfn-off-size entires.
*/
-
/* we deal with 4K page sizes when we it comes to passing page information
* between */
/* Guest and IOPartition. */
@@ -900,13 +752,12 @@ add_physinfo_entries(u32 inp_pfn, /* input - specifies the pfn to be used
firstlen = PI_PAGE_SIZE - inp_off;
if (inp_len <= firstlen) {
-
/* the input entry spans only one page - add as is */
if (index >= max_pi_arr_entries)
return 0;
pi_arr[index].pi_pfn = inp_pfn;
- pi_arr[index].pi_off = (u16) inp_off;
- pi_arr[index].pi_len = (u16) inp_len;
+ pi_arr[index].pi_off = (u16)inp_off;
+ pi_arr[index].pi_len = (u16)inp_len;
return index + 1;
}
@@ -924,9 +775,8 @@ add_physinfo_entries(u32 inp_pfn, /* input - specifies the pfn to be used
else {
pi_arr[index + i].pi_off = 0;
pi_arr[index + i].pi_len =
- (u16) MINNUM(len, (u32) PI_PAGE_SIZE);
+ (u16)MINNUM(len, (u32)PI_PAGE_SIZE);
}
-
}
return index + i;
}
diff --git a/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h b/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
index 1231c454176f..2c42ce16e0cf 100644
--- a/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
+++ b/drivers/staging/unisys/common-spar/include/channels/vbuschannel.h
@@ -28,65 +28,61 @@
#include "channel.h"
/* {193b331b-c58f-11da-95a9-00e08161165f} */
-#define ULTRA_VBUS_CHANNEL_PROTOCOL_GUID \
+#define SPAR_VBUS_CHANNEL_PROTOCOL_UUID \
UUID_LE(0x193b331b, 0xc58f, 0x11da, \
0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le UltraVbusChannelProtocolGuid =
- ULTRA_VBUS_CHANNEL_PROTOCOL_GUID;
+static const uuid_le spar_vbus_channel_protocol_uuid =
+ SPAR_VBUS_CHANNEL_PROTOCOL_UUID;
-#define ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
/* Must increment this whenever you insert or delete fields within this channel
* struct. Also increment whenever you change the meaning of fields within this
* channel struct so as to break pre-existing software. Note that you can
* usually add fields to the END of the channel struct withOUT needing to
* increment this. */
-#define ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
+#define SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
-#define ULTRA_VBUS_CHANNEL_OK_CLIENT(pChannel, logCtx) \
- (ULTRA_check_channel_client(pChannel, \
- UltraVbusChannelProtocolGuid, \
- "vbus", \
- sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL), \
- ULTRA_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VBUS_CHANNEL_PROTOCOL_SIGNATURE, \
- __FILE__, __LINE__, logCtx))
-
-#define ULTRA_VBUS_CHANNEL_OK_SERVER(actualBytes, logCtx) \
- (ULTRA_check_channel_server(UltraVbusChannelProtocolGuid, \
- "vbus", \
- sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL), \
- actualBytes, \
- __FILE__, __LINE__, logCtx))
+#define SPAR_VBUS_CHANNEL_OK_CLIENT(ch) \
+ spar_check_channel_client(ch, \
+ spar_vbus_channel_protocol_uuid, \
+ "vbus", \
+ sizeof(struct spar_vbus_channel_protocol),\
+ SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
+ SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE)
+#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes) \
+ (spar_check_channel_server(spar_vbus_channel_protocol_uuid, \
+ "vbus", \
+ sizeof(struct ultra_vbus_channel_protocol),\
+ actual_bytes))
#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
-typedef struct _ULTRA_VBUS_HEADERINFO {
- u32 structBytes; /* size of this struct in bytes */
- u32 deviceInfoStructBytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */
- u32 devInfoCount; /* num of items in DevInfo member */
+struct spar_vbus_headerinfo {
+ u32 struct_bytes; /* size of this struct in bytes */
+ u32 device_info_struct_bytes; /* sizeof(ULTRA_VBUS_DEVICEINFO) */
+ u32 dev_info_count; /* num of items in DevInfo member */
/* (this is the allocated size) */
- u32 chpInfoByteOffset; /* byte offset from beginning of this struct */
- /* to the the ChpInfo struct (below) */
- u32 busInfoByteOffset; /* byte offset from beginning of this struct */
- /* to the the BusInfo struct (below) */
- u32 devInfoByteOffset; /* byte offset from beginning of this struct */
- /* to the the DevInfo array (below) */
+ u32 chp_info_offset; /* byte offset from beginning of this struct */
+ /* to the ChpInfo struct (below) */
+ u32 bus_info_offset; /* byte offset from beginning of this struct */
+ /* to the BusInfo struct (below) */
+ u32 dev_info_offset; /* byte offset from beginning of this struct */
+ /* to the DevInfo array (below) */
u8 reserved[104];
-} ULTRA_VBUS_HEADERINFO;
+};
-typedef struct _ULTRA_VBUS_CHANNEL_PROTOCOL {
- ULTRA_CHANNEL_PROTOCOL ChannelHeader; /* initialized by server */
- ULTRA_VBUS_HEADERINFO HdrInfo; /* initialized by server */
+struct spar_vbus_channel_protocol {
+ struct channel_header channel_header; /* initialized by server */
+ struct spar_vbus_headerinfo hdr_info; /* initialized by server */
/* the remainder of this channel is filled in by the client */
- ULTRA_VBUS_DEVICEINFO ChpInfo; /* describes client chipset device and
- * driver */
- ULTRA_VBUS_DEVICEINFO BusInfo; /* describes client bus device and
- * driver */
- ULTRA_VBUS_DEVICEINFO DevInfo[0]; /* describes client device and
- * driver for */
- /* each device on the bus */
-} ULTRA_VBUS_CHANNEL_PROTOCOL;
+ struct ultra_vbus_deviceinfo chp_info;
+ /* describes client chipset device and driver */
+ struct ultra_vbus_deviceinfo bus_info;
+ /* describes client bus device and driver */
+ struct ultra_vbus_deviceinfo dev_info[0];
+ /* describes client device and driver for each device on the bus */
+};
#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
(sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
diff --git a/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h b/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
index 3bbdc2bb7ebf..9b6d3e69355c 100644
--- a/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
+++ b/drivers/staging/unisys/common-spar/include/vbusdeviceinfo.h
@@ -25,13 +25,13 @@
* It is filled in by the client side to provide info about the device
* and driver from the client's perspective.
*/
-typedef struct _ULTRA_VBUS_DEVICEINFO {
- u8 devType[16]; /* short string identifying the device type */
- u8 drvName[16]; /* driver .sys file name */
- u8 infoStrings[96]; /* sequence of tab-delimited id strings: */
+struct ultra_vbus_deviceinfo {
+ u8 devtype[16]; /* short string identifying the device type */
+ u8 drvname[16]; /* driver .sys file name */
+ u8 infostrs[96]; /* sequence of tab-delimited id strings: */
/* <DRIVER_REV> <DRIVER_VERTAG> <DRIVER_COMPILETIME> */
u8 reserved[128]; /* pad size to 256 bytes */
-} ULTRA_VBUS_DEVICEINFO;
+};
#pragma pack(pop)
@@ -63,8 +63,9 @@ vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
p++;
remain--;
chars++;
- } else if (p == NULL)
+ } else if (p == NULL) {
chars++;
+ }
nonprintable_streak = 0;
}
if (remain > 0) {
@@ -72,10 +73,12 @@ vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
p++;
remain--;
chars++;
- } else if (p == NULL)
+ } else if (p == NULL) {
chars++;
- } else
+ }
+ } else {
nonprintable_streak = 1;
+ }
src++;
srcmax--;
}
@@ -115,7 +118,7 @@ vbuschannel_itoa(char *p, int remain, int num)
}
/* form a backwards decimal ascii string in <s> */
while (num > 0) {
- if (digits >= (int) sizeof(s))
+ if (digits >= (int)sizeof(s))
return 0;
s[digits++] = (num % 10) + '0';
num = num / 10;
@@ -147,15 +150,15 @@ vbuschannel_itoa(char *p, int remain, int num)
* Returns the number of bytes written to <p>.
*/
static inline int
-vbuschannel_devinfo_to_string(ULTRA_VBUS_DEVICEINFO *devinfo,
- char *p, int remain, int devix)
+vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo,
+ char *p, int remain, int devix)
{
char *psrc;
int nsrc, x, i, pad;
int chars = 0;
- psrc = &(devinfo->devType[0]);
- nsrc = sizeof(devinfo->devType);
+ psrc = &devinfo->devtype[0];
+ nsrc = sizeof(devinfo->devtype);
if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
return 0;
@@ -184,8 +187,8 @@ vbuschannel_devinfo_to_string(ULTRA_VBUS_DEVICEINFO *devinfo,
VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
/* emit driver name */
- psrc = &(devinfo->drvName[0]);
- nsrc = sizeof(devinfo->drvName);
+ psrc = &devinfo->drvname[0];
+ nsrc = sizeof(devinfo->drvname);
x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
p += x;
remain -= x;
@@ -196,8 +199,8 @@ vbuschannel_devinfo_to_string(ULTRA_VBUS_DEVICEINFO *devinfo,
VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
/* emit strings */
- psrc = &(devinfo->infoStrings[0]);
- nsrc = sizeof(devinfo->infoStrings);
+ psrc = &devinfo->infostrs[0];
+ nsrc = sizeof(devinfo->infostrs);
x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
p += x;
remain -= x;
diff --git a/drivers/staging/unisys/common-spar/include/vmcallinterface.h b/drivers/staging/unisys/common-spar/include/vmcallinterface.h
index 0b5b5626af5a..78333719c496 100644
--- a/drivers/staging/unisys/common-spar/include/vmcallinterface.h
+++ b/drivers/staging/unisys/common-spar/include/vmcallinterface.h
@@ -33,7 +33,7 @@
/* define subsystem number for AppOS, used in uislib driver */
#define MDS_APPOS 0x4000000000000000L /* subsystem = 62 - AppOS */
-typedef enum { /* VMCALL identification tuples */
+enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
/* Note: when a new VMCALL is added:
* - the 1st 2 hex digits correspond to one of the
* VMCALL_MONITOR_INTERFACE types and
@@ -66,7 +66,7 @@ typedef enum { /* VMCALL identification tuples */
* ULTRA_SERVICE_CAPABILITY_TIME
* capable guest to make
* VMCALL */
-} VMCALL_MONITOR_INTERFACE_METHOD_TUPLE;
+};
#define VMCALL_SUCCESS 0
#define VMCALL_SUCCESSFUL(result) (result == 0)
@@ -76,12 +76,12 @@ typedef enum { /* VMCALL identification tuples */
__unisys_vmcall_gnuc(tuple, reg_ebx, reg_ecx)
#define unisys_extended_vmcall(tuple, reg_ebx, reg_ecx, reg_edx) \
__unisys_extended_vmcall_gnuc(tuple, reg_ebx, reg_ecx, reg_edx)
-#define ISSUE_IO_VMCALL(InterfaceMethod, param, result) \
- (result = unisys_vmcall(InterfaceMethod, (param) & 0xFFFFFFFF, \
+#define ISSUE_IO_VMCALL(method, param, result) \
+ (result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \
(param) >> 32))
-#define ISSUE_IO_EXTENDED_VMCALL(InterfaceMethod, param1, param2, \
+#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, \
param3, result) \
- (result = unisys_extended_vmcall(InterfaceMethod, param1, \
+ (result = unisys_extended_vmcall(method, param1, \
param2, param3))
/* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
@@ -107,21 +107,20 @@ struct phys_info {
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
-typedef struct phys_info IO_DATA_STRUCTURE;
/* ///////////// BEGIN PRAGMA PACK PUSH 1 ///////////////////////// */
/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
#pragma pack(push, 1)
/* Parameters to VMCALL_IO_CONTROLVM_ADDR interface */
-typedef struct _VMCALL_IO_CONTROLVM_ADDR_PARAMS {
+struct vmcall_io_controlvm_addr_params {
/* The Guest-relative physical address of the ControlVm channel.
* This VMCall fills this in with the appropriate address. */
- u64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
+ u64 address; /* contents provided by this VMCALL (OUT) */
/* the size of the ControlVm channel in bytes This VMCall fills this
* in with the appropriate address. */
- u32 ChannelBytes; /* contents provided by this VMCALL (OUT) */
- u8 Unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
-} VMCALL_IO_CONTROLVM_ADDR_PARAMS;
+ u32 channel_bytes; /* contents provided by this VMCALL (OUT) */
+ u8 unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
+};
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
@@ -130,11 +129,11 @@ typedef struct _VMCALL_IO_CONTROLVM_ADDR_PARAMS {
/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
#pragma pack(push, 1)
/* Parameters to VMCALL_IO_DIAG_ADDR interface */
-typedef struct _VMCALL_IO_DIAG_ADDR_PARAMS {
+struct vmcall_io_diag_addr_params {
/* The Guest-relative physical address of the diagnostic channel.
* This VMCall fills this in with the appropriate address. */
- u64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
-} VMCALL_IO_DIAG_ADDR_PARAMS;
+ u64 address; /* contents provided by this VMCALL (OUT) */
+};
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
@@ -143,25 +142,25 @@ typedef struct _VMCALL_IO_DIAG_ADDR_PARAMS {
/* ///////////// ONLY STRUCT TYPE SHOULD BE BELOW */
#pragma pack(push, 1)
/* Parameters to VMCALL_IO_VISORSERIAL_ADDR interface */
-typedef struct _VMCALL_IO_VISORSERIAL_ADDR_PARAMS {
+struct vmcall_io_visorserial_addr_params {
/* The Guest-relative physical address of the serial console
* channel. This VMCall fills this in with the appropriate
* address. */
- u64 ChannelAddress; /* contents provided by this VMCALL (OUT) */
-} VMCALL_IO_VISORSERIAL_ADDR_PARAMS;
+ u64 address; /* contents provided by this VMCALL (OUT) */
+};
#pragma pack(pop)
/* ///////////// END PRAGMA PACK PUSH 1 /////////////////////////// */
/* Parameters to VMCALL_CHANNEL_MISMATCH interface */
-typedef struct _VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS {
- u8 ChannelName[32]; /* Null terminated string giving name of channel
+struct vmcall_channel_version_mismatch_params {
+ u8 chname[32]; /* Null terminated string giving name of channel
* (IN) */
- u8 ItemName[32]; /* Null terminated string giving name of
+ u8 item_name[32]; /* Null terminated string giving name of
* mismatched item (IN) */
- u32 SourceLineNumber; /* line# where invoked. (IN) */
- u8 SourceFileName[36]; /* source code where invoked - Null terminated
+ u32 line_no; /* line# where invoked. (IN) */
+ u8 file_name[36]; /* source code where invoked - Null terminated
* string (IN) */
-} VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS;
+};
#endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/include/timskmod.h b/drivers/staging/unisys/include/timskmod.h
index b14494ff6c1b..cff7983dab85 100644
--- a/drivers/staging/unisys/include/timskmod.h
+++ b/drivers/staging/unisys/include/timskmod.h
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
@@ -71,7 +70,6 @@
/** Try to evaulate the provided expression, and do a RETINT(x) iff
* the expression evaluates to < 0.
- * @param x the expression to try
*/
#define ASSERT(cond) \
do { if (!(cond)) \
@@ -89,11 +87,6 @@
(void *)(p2) = SWAPPOINTERS_TEMP; \
} while (0)
-/**
- * @addtogroup driverlogging
- * @{
- */
-
#define PRINTKDRV(fmt, args...) LOGINF(fmt, ## args)
#define TBDDRV(fmt, args...) LOGERR(fmt, ## args)
#define HUHDRV(fmt, args...) LOGERR(fmt, ## args)
@@ -114,8 +107,6 @@
#define INFODEVX(devno, fmt, args...) LOGINFDEVX(devno, fmt, ## args)
#define DEBUGDEV(devname, fmt, args...) DBGINFDEV(devname, fmt, ## args)
-/* @} */
-
/** Verifies the consistency of your PRIVATEDEVICEDATA structure using
* conventional "signature" fields:
* <p>
@@ -139,7 +130,7 @@
((fd)->sig2 == fd))
/** Sleep for an indicated number of seconds (for use in kernel mode).
- * @param x the number of seconds to sleep.
+ * x - the number of seconds to sleep.
*/
#define SLEEP(x) \
do { current->state = TASK_INTERRUPTIBLE; \
@@ -147,17 +138,13 @@
} while (0)
/** Sleep for an indicated number of jiffies (for use in kernel mode).
- * @param x the number of jiffies to sleep.
+ * x - the number of jiffies to sleep.
*/
#define SLEEPJIFFIES(x) \
do { current->state = TASK_INTERRUPTIBLE; \
schedule_timeout(x); \
} while (0)
-#ifndef max
-#define max(a, b) (((a) > (b)) ? (a) : (b))
-#endif
-
static inline struct cdev *cdev_alloc_init(struct module *owner,
const struct file_operations *fops)
{
diff --git a/drivers/staging/unisys/include/uisqueue.h b/drivers/staging/unisys/include/uisqueue.h
index 5178270b98d1..25b6181d78af 100644
--- a/drivers/staging/unisys/include/uisqueue.h
+++ b/drivers/staging/unisys/include/uisqueue.h
@@ -34,8 +34,7 @@
#include "controlvmcompletionstatus.h"
struct uisqueue_info {
-
- CHANNEL_HEADER __iomem *chan;
+ struct channel_header __iomem *chan;
/* channel containing queues in which scsi commands &
* responses are queued
*/
@@ -48,8 +47,8 @@ struct uisqueue_info {
u64 non_empty_wakeup_cnt;
struct {
- SIGNAL_QUEUE_HEADER reserved1; /* */
- SIGNAL_QUEUE_HEADER reserved2; /* */
+ struct signal_queue_header reserved1; /* */
+ struct signal_queue_header reserved2; /* */
} safe_uis_queue;
unsigned int (*send_int_if_needed)(struct uisqueue_info *info,
unsigned int whichcqueue,
@@ -119,7 +118,7 @@ struct extport_info {
*/
struct switch_info *swtch;
- struct PciId pci_id;
+ struct pci_id pci_id;
char name[MAX_NAME_SIZE_UISQUEUE];
union {
struct vhba_wwnn wwnn;
@@ -133,7 +132,7 @@ struct device_info {
u64 channel_bytes;
uuid_le channel_uuid;
uuid_le instance_uuid;
- struct InterruptInfo intr;
+ struct irq_info intr;
struct switch_info *swtch;
char devid[30]; /* "vbus<busno>:dev<devno>" */
u16 polling;
@@ -149,30 +148,27 @@ struct device_info {
unsigned long long last_on_list_cnt;
};
-typedef enum {
+enum switch_type {
RECOVERY_LAN = 1,
IB_LAN = 2
-} SWITCH_TYPE;
+};
struct bus_info {
- u32 busNo, deviceCount;
+ u32 bus_no, device_count;
struct device_info **device;
- u64 guestHandle, recvBusInterruptHandle;
- uuid_le busInstGuid;
- ULTRA_VBUS_CHANNEL_PROTOCOL __iomem *pBusChannel;
- int busChannelBytes;
+ u64 guest_handle, recv_bus_irq_handle;
+ uuid_le bus_inst_uuid;
+ struct ultra_vbus_channel_protocol __iomem *bus_channel;
+ int bus_channel_bytes;
struct proc_dir_entry *proc_dir; /* proc/uislib/vbus/<x> */
struct proc_dir_entry *proc_info; /* proc/uislib/vbus/<x>/info */
char name[25];
- char partitionName[99];
+ char partition_name[99];
struct bus_info *next;
- u8 localVnic; /* 1 if local vnic created internally
+ u8 local_vnic; /* 1 if local vnic created internally
* by IOVM; 0 otherwise... */
};
-#define DEDICATED_SWITCH(s) ((s->extPortCount == 1) && \
- (s->intPortCount == 1))
-
struct sn_list_entry {
struct uisscsi_dest pdest; /* scsi bus, target, lun for
* phys disk */
@@ -183,23 +179,12 @@ struct sn_list_entry {
struct sn_list_entry *next;
};
-struct network_policy {
- u32 promiscuous:1;
- u32 macassign:1;
- u32 peerforwarding:1;
- u32 nonotify:1;
- u32 standby:1;
- u32 callhome:2;
- char ip_addr[30];
-};
-
/*
* IO messages sent to UisnicControlChanFunc & UissdControlChanFunc by
* code that processes the ControlVm channel messages.
*/
-
-typedef enum {
+enum iopart_msg_type {
IOPART_ADD_VNIC,
IOPART_DEL_VNIC,
IOPART_DEL_ALL_VNICS,
@@ -219,7 +204,7 @@ typedef enum {
IOPART_RESUME_VDISK,
IOPART_ADD_DEVICE, /* add generic device */
IOPART_DEL_DEVICE, /* del generic device */
-} IOPART_MSG_TYPE;
+};
struct add_virt_iopart {
void *chanptr; /* pointer to data channel */
@@ -228,7 +213,7 @@ struct add_virt_iopart {
* for DMA, for ex. */
u64 recv_bus_irq_handle; /* used to register to receive
* bus level interrupts. */
- struct InterruptInfo intr; /* contains recv & send
+ struct irq_info intr; /* contains recv & send
* interrupt info */
/* recvInterruptHandle is used to register to receive
* interrupts on the data channel. Used by GuestLinux/Windows
@@ -259,21 +244,15 @@ struct add_vdisk_iopart {
struct uisscsi_dest pdest; /* scsi bus, target, lun for phys disk */
u8 sernum[MAX_SERIAL_NUM]; /* serial num of physical disk */
u32 serlen; /* length of serial num */
- u32 bus_no;
- u32 dev_no;
};
struct del_vdisk_iopart {
void *chanptr; /* pointer to data channel */
struct uisscsi_dest vdest; /* scsi bus, target, lun for virt disk */
- u32 bus_no;
- u32 dev_no;
};
struct del_virt_iopart {
void *chanptr; /* pointer to data channel */
- u32 bus_no;
- u32 dev_no;
};
struct det_virt_iopart { /* detach internal port */
@@ -297,8 +276,7 @@ struct del_switch_iopart { /* destroy switch */
};
struct io_msgs {
-
- IOPART_MSG_TYPE msgtype;
+ enum iopart_msg_type msgtype;
/* additional params needed by some messages */
union {
@@ -329,7 +307,7 @@ struct io_msgs {
* the ControlVm channel messages.
*/
-typedef enum {
+enum guestpart_msg_type {
GUEST_ADD_VBUS,
GUEST_ADD_VHBA,
GUEST_ADD_VNIC,
@@ -344,15 +322,15 @@ typedef enum {
GUEST_PAUSE_VNIC,
GUEST_RESUME_VHBA,
GUEST_RESUME_VNIC
-} GUESTPART_MSG_TYPE;
+};
struct add_vbus_guestpart {
void __iomem *chanptr; /* pointer to data channel for bus -
* NOT YET USED */
- u32 busNo; /* bus number to be created/deleted */
- u32 deviceCount; /* max num of devices on bus */
- uuid_le busTypeGuid; /* indicates type of bus */
- uuid_le busInstGuid; /* instance guid for device */
+ u32 bus_no; /* bus number to be created/deleted */
+ u32 dev_count; /* max num of devices on bus */
+ uuid_le bus_uuid; /* indicates type of bus */
+ uuid_le instance_uuid; /* instance guid for device */
};
struct del_vbus_guestpart {
@@ -367,7 +345,7 @@ struct add_virt_guestpart {
u32 bus_no; /* bus number for the operation */
u32 device_no; /* number of device on the bus */
uuid_le instance_uuid; /* instance guid for device */
- struct InterruptInfo intr; /* recv/send interrupt info */
+ struct irq_info intr; /* recv/send interrupt info */
/* recvInterruptHandle contains info needed in order to
* register to receive interrupts on the data channel.
* sendInterruptHandle contains handle which is provided to
@@ -394,8 +372,7 @@ struct init_chipset_guestpart {
};
struct guest_msgs {
-
- GUESTPART_MSG_TYPE msgtype;
+ enum guestpart_msg_type msgtype;
/* additional params needed by messages */
union {
diff --git a/drivers/staging/unisys/include/uisutils.h b/drivers/staging/unisys/include/uisutils.h
index 74e7cf65502c..7414220676d3 100644
--- a/drivers/staging/unisys/include/uisutils.h
+++ b/drivers/staging/unisys/include/uisutils.h
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/uuid.h>
+#include <linux/if_ether.h>
#include "vmcallinterface.h"
#include "channel.h"
@@ -43,39 +44,38 @@
/* global function pointers that act as callback functions into
* uisnicmod, uissdmod, and virtpcimod
*/
-extern int (*UisnicControlChanFunc)(struct io_msgs *);
-extern int (*UissdControlChanFunc)(struct io_msgs *);
-extern int (*VirtControlChanFunc)(struct guest_msgs *);
+extern int (*uisnic_control_chan_func)(struct io_msgs *);
+extern int (*uissd_control_chan_func)(struct io_msgs *);
+extern int (*virt_control_chan_func)(struct guest_msgs *);
/* Return values of above callback functions: */
#define CCF_ERROR 0 /* completed and failed */
#define CCF_OK 1 /* completed successfully */
#define CCF_PENDING 2 /* operation still pending */
-extern atomic_t UisUtils_Registered_Services;
+extern atomic_t uisutils_registered_services;
-typedef unsigned int MACARRAY[MAX_MACADDR_LEN];
-typedef struct ReqHandlerInfo_struct {
- uuid_le switchTypeGuid;
+struct req_handler_info {
+ uuid_le switch_uuid;
int (*controlfunc)(struct io_msgs *);
unsigned long min_channel_bytes;
- int (*Server_Channel_Ok)(unsigned long channelBytes);
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr, u32 clientStrLen, u64 bytes);
+ int (*server_channel_ok)(unsigned long channel_bytes);
+ int (*server_channel_init)(void *x, unsigned char *client_str,
+ u32 client_str_len, u64 bytes);
char switch_type_name[99];
struct list_head list_link; /* links into ReqHandlerInfo_list */
-} ReqHandlerInfo_t;
+};
-ReqHandlerInfo_t *ReqHandlerAdd(uuid_le switchTypeGuid,
+struct req_handler_info *req_handler_add(uuid_le switch_uuid,
const char *switch_type_name,
int (*controlfunc)(struct io_msgs *),
unsigned long min_channel_bytes,
- int (*Server_Channel_Ok)(unsigned long
- channelBytes),
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr,
- u32 clientStrLen, u64 bytes));
-ReqHandlerInfo_t *ReqHandlerFind(uuid_le switchTypeGuid);
-int ReqHandlerDel(uuid_le switchTypeGuid);
+ int (*svr_channel_ok)(unsigned long
+ channel_bytes),
+ int (*svr_channel_init)(void *x,
+ unsigned char *client_str,
+ u32 client_str_len, u64 bytes));
+struct req_handler_info *req_handler_find(uuid_le switch_uuid);
+int req_handler_del(uuid_le switch_uuid);
#define uislib_ioremap_cache(addr, size) \
dbg_ioremap_cache(addr, size, __FILE__, __LINE__)
@@ -114,52 +114,49 @@ int uisutil_add_proc_line_ex(int *total, char **buffer, int *buffer_remaining,
char *format, ...);
int uisctrl_register_req_handler(int type, void *fptr,
- ULTRA_VBUS_DEVICEINFO *chipset_driver_info);
-int uisctrl_register_req_handler_ex(uuid_le switchTypeGuid,
- const char *switch_type_name,
- int (*fptr)(struct io_msgs *),
- unsigned long min_channel_bytes,
- int (*Server_Channel_Ok)(unsigned long
- channelBytes),
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr,
- u32 clientStrLen, u64 bytes),
- ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo);
-
-int uisctrl_unregister_req_handler_ex(uuid_le switchTypeGuid);
+ struct ultra_vbus_deviceinfo *chipset_driver_info);
+int uisctrl_register_req_handler_ex(uuid_le switch_guid,
+ const char *switch_type_name,
+ int (*fptr)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*svr_channel_ok)(unsigned long
+ channel_bytes),
+ int (*svr_channel_init)(void *x,
+ unsigned char *client_str,
+ u32 client_str_len,
+ u64 bytes),
+ struct ultra_vbus_deviceinfo *chipset_driver_info);
+
+int uisctrl_unregister_req_handler_ex(uuid_le switch_uuid);
unsigned char *util_map_virt(struct phys_info *sg);
void util_unmap_virt(struct phys_info *sg);
unsigned char *util_map_virt_atomic(struct phys_info *sg);
void util_unmap_virt_atomic(void *buf);
-int uislib_server_inject_add_vnic(u32 switchNo, u32 BusNo, u32 numIntPorts,
- u32 numExtPorts, MACARRAY pmac[],
- pCHANNEL_HEADER **chan);
-void uislib_server_inject_del_vnic(u32 switchNo, u32 busNo, u32 numIntPorts,
- u32 numExtPorts);
-int uislib_client_inject_add_bus(u32 busNo, uuid_le instGuid,
- u64 channelAddr, ulong nChannelBytes);
-int uislib_client_inject_del_bus(u32 busNo);
-
-int uislib_client_inject_add_vhba(u32 busNo, u32 devNo,
+int uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
+ u64 channel_addr, ulong n_channel_bytes);
+int uislib_client_inject_del_bus(u32 bus_no);
+
+int uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
u64 phys_chan_addr, u32 chan_bytes,
- int is_test_addr, uuid_le instGuid,
- struct InterruptInfo *intr);
-int uislib_client_inject_pause_vhba(u32 busNo, u32 devNo);
-int uislib_client_inject_resume_vhba(u32 busNo, u32 devNo);
-int uislib_client_inject_del_vhba(u32 busNo, u32 devNo);
-int uislib_client_inject_add_vnic(u32 busNo, u32 devNo,
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr);
+int uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no);
+int uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no);
+int uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no);
+int uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
u64 phys_chan_addr, u32 chan_bytes,
- int is_test_addr, uuid_le instGuid,
- struct InterruptInfo *intr);
-int uislib_client_inject_pause_vnic(u32 busNo, u32 devNo);
-int uislib_client_inject_resume_vnic(u32 busNo, u32 devNo);
-int uislib_client_inject_del_vnic(u32 busNo, u32 devNo);
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr);
+int uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no);
+int uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no);
+int uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no);
#ifdef STORAGE_CHANNEL
u64 uislib_storage_channel(int client_id);
#endif
int uislib_get_owned_pdest(struct uisscsi_dest *pdest);
-int uislib_send_event(CONTROLVM_ID id, CONTROLVM_MESSAGE_PACKET *event);
+int uislib_send_event(enum controlvm_id id,
+ struct controlvm_message_packet *event);
/* structure used by vhba & vnic to keep track of queue & thread info */
struct chaninfo {
@@ -182,11 +179,14 @@ struct chaninfo {
set_current_state(TASK_INTERRUPTIBLE); \
schedule_timeout(msecs_to_jiffies(x)); \
}
+
#define UIS_THREAD_WAIT_USEC(x) { \
set_current_state(TASK_INTERRUPTIBLE); \
schedule_timeout(usecs_to_jiffies(x)); \
}
+
#define UIS_THREAD_WAIT UIS_THREAD_WAIT_MSEC(5)
+
#define UIS_THREAD_WAIT_SEC(x) { \
set_current_state(TASK_INTERRUPTIBLE); \
schedule_timeout((x)*HZ); \
@@ -224,42 +224,42 @@ unsigned int uisutil_copy_fragsinfo_from_skb(unsigned char *calling_ctx,
static inline unsigned int
issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
{
- VMCALL_IO_CONTROLVM_ADDR_PARAMS params;
+ struct vmcall_io_controlvm_addr_params params;
int result = VMCALL_SUCCESS;
u64 physaddr;
physaddr = virt_to_phys(&params);
ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
if (VMCALL_SUCCESSFUL(result)) {
- *control_addr = params.ChannelAddress;
- *control_bytes = params.ChannelBytes;
+ *control_addr = params.address;
+ *control_bytes = params.channel_bytes;
}
return result;
}
static inline unsigned int issue_vmcall_io_diag_addr(u64 *diag_channel_addr)
{
- VMCALL_IO_DIAG_ADDR_PARAMS params;
+ struct vmcall_io_diag_addr_params params;
int result = VMCALL_SUCCESS;
u64 physaddr;
physaddr = virt_to_phys(&params);
ISSUE_IO_VMCALL(VMCALL_IO_DIAG_ADDR, physaddr, result);
if (VMCALL_SUCCESSFUL(result))
- *diag_channel_addr = params.ChannelAddress;
+ *diag_channel_addr = params.address;
return result;
}
static inline unsigned int issue_vmcall_io_visorserial_addr(u64 *channel_addr)
{
- VMCALL_IO_VISORSERIAL_ADDR_PARAMS params;
+ struct vmcall_io_visorserial_addr_params params;
int result = VMCALL_SUCCESS;
u64 physaddr;
physaddr = virt_to_phys(&params);
ISSUE_IO_VMCALL(VMCALL_IO_VISORSERIAL_ADDR, physaddr, result);
if (VMCALL_SUCCESSFUL(result))
- *channel_addr = params.ChannelAddress;
+ *channel_addr = params.address;
return result;
}
@@ -273,17 +273,8 @@ static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
return result;
}
-static inline s64 issue_vmcall_measurement_do_nothing(void)
-{
- u64 result = VMCALL_SUCCESS;
- u64 physaddr = 0;
-
- ISSUE_IO_VMCALL(VMCALL_MEASUREMENT_DO_NOTHING, physaddr, result);
- return result;
-}
-
struct log_info_t {
- volatile unsigned long long last_cycles;
+ unsigned long long last_cycles;
unsigned long long delta_sum[64];
unsigned long long delta_cnt[64];
unsigned long long max_delta[64];
@@ -302,44 +293,29 @@ static inline unsigned int issue_vmcall_channel_mismatch(const char *chname,
const char *item_name, u32 line_no,
const char *path_n_fn)
{
- VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS params;
+ struct vmcall_channel_version_mismatch_params params;
int result = VMCALL_SUCCESS;
u64 physaddr;
char *last_slash = NULL;
- strlcpy(params.ChannelName, chname,
- lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS, ChannelName));
- strlcpy(params.ItemName, item_name,
- lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS, ItemName));
- params.SourceLineNumber = line_no;
+ strlcpy(params.chname, chname, sizeof(params.chname));
+ strlcpy(params.item_name, item_name, sizeof(params.item_name));
+ params.line_no = line_no;
last_slash = strrchr(path_n_fn, '/');
if (last_slash != NULL) {
last_slash++;
- strlcpy(params.SourceFileName, last_slash,
- lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS,
- SourceFileName));
+ strlcpy(params.file_name, last_slash, sizeof(params.file_name));
} else
- strlcpy(params.SourceFileName,
+ strlcpy(params.file_name,
"Cannot determine source filename",
- lengthof(VMCALL_CHANNEL_VERSION_MISMATCH_PARAMS,
- SourceFileName));
+ sizeof(params.file_name));
physaddr = virt_to_phys(&params);
ISSUE_IO_VMCALL(VMCALL_CHANNEL_VERSION_MISMATCH, physaddr, result);
return result;
}
-static inline unsigned int issue_vmcall_fatal(void)
-{
- int result = VMCALL_SUCCESS;
- u64 physaddr = 0;
-
- ISSUE_IO_VMCALL(VMCALL_GENERIC_SURRENDER_QUANTUM_FOREVER, physaddr,
- result);
- return result;
-}
-
#define UIS_DAEMONIZE(nam)
void *uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln);
#define UISCACHEALLOC(cur_pool) uislib_cache_alloc(cur_pool, __FILE__, __LINE__)
diff --git a/drivers/staging/unisys/include/vbushelper.h b/drivers/staging/unisys/include/vbushelper.h
index 1bde549ec0df..84abe5f99f54 100644
--- a/drivers/staging/unisys/include/vbushelper.h
+++ b/drivers/staging/unisys/include/vbushelper.h
@@ -26,19 +26,19 @@
#define TARGET_HOSTNAME "linuxguest"
static inline void bus_device_info_init(
- ULTRA_VBUS_DEVICEINFO * bus_device_info_ptr,
+ struct ultra_vbus_deviceinfo *bus_device_info_ptr,
const char *dev_type, const char *drv_name,
const char *ver, const char *ver_tag)
{
- memset(bus_device_info_ptr, 0, sizeof(ULTRA_VBUS_DEVICEINFO));
- snprintf(bus_device_info_ptr->devType,
- sizeof(bus_device_info_ptr->devType),
+ memset(bus_device_info_ptr, 0, sizeof(struct ultra_vbus_deviceinfo));
+ snprintf(bus_device_info_ptr->devtype,
+ sizeof(bus_device_info_ptr->devtype),
"%s", (dev_type) ? dev_type : "unknownType");
- snprintf(bus_device_info_ptr->drvName,
- sizeof(bus_device_info_ptr->drvName),
+ snprintf(bus_device_info_ptr->drvname,
+ sizeof(bus_device_info_ptr->drvname),
"%s", (drv_name) ? drv_name : "unknownDriver");
- snprintf(bus_device_info_ptr->infoStrings,
- sizeof(bus_device_info_ptr->infoStrings), "%s\t%s\t%s",
+ snprintf(bus_device_info_ptr->infostrs,
+ sizeof(bus_device_info_ptr->infostrs), "%s\t%s\t%s",
(ver) ? ver : "unknownVer",
(ver_tag) ? ver_tag : "unknownVerTag",
TARGET_HOSTNAME);
diff --git a/drivers/staging/unisys/uislib/uislib.c b/drivers/staging/unisys/uislib/uislib.c
index 706f1c0c2c6e..7c87452a9f14 100644
--- a/drivers/staging/unisys/uislib/uislib.c
+++ b/drivers/staging/unisys/uislib/uislib.c
@@ -57,7 +57,7 @@
#define __MYFILE__ "uislib.c"
/* global function pointers that act as callback functions into virtpcimod */
-int (*VirtControlChanFunc)(struct guest_msgs *);
+int (*virt_control_chan_func)(struct guest_msgs *);
static int ProcReadBufferValid;
static char *ProcReadBuffer; /* Note this MUST be global,
@@ -121,12 +121,12 @@ static const struct file_operations debugfs_info_fops = {
};
static void
-init_msg_header(CONTROLVM_MESSAGE *msg, u32 id, uint rsp, uint svr)
+init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr)
{
- memset(msg, 0, sizeof(CONTROLVM_MESSAGE));
- msg->hdr.Id = id;
- msg->hdr.Flags.responseExpected = rsp;
- msg->hdr.Flags.server = svr;
+ memset(msg, 0, sizeof(struct controlvm_message));
+ msg->hdr.id = id;
+ msg->hdr.flags.response_expected = rsp;
+ msg->hdr.flags.server = svr;
}
static __iomem void *
@@ -142,7 +142,7 @@ init_vbus_channel(u64 channelAddr, u32 channelBytes)
rc = NULL;
goto Away;
}
- if (!ULTRA_VBUS_CHANNEL_OK_CLIENT(pChan, NULL)) {
+ if (!SPAR_VBUS_CHANNEL_OK_CLIENT(pChan)) {
ERRDRV("%s channel cannot be used", __func__);
uislib_iounmap(pChan);
rc = NULL;
@@ -154,7 +154,7 @@ Away:
}
static int
-create_bus(CONTROLVM_MESSAGE *msg, char *buf)
+create_bus(struct controlvm_message *msg, char *buf)
{
u32 busNo, deviceCount;
struct bus_info *tmp, *bus;
@@ -168,8 +168,8 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
return CONTROLVM_RESP_ERROR_MAX_BUSES;
}
- busNo = msg->cmd.createBus.busNo;
- deviceCount = msg->cmd.createBus.deviceCount;
+ busNo = msg->cmd.create_bus.bus_no;
+ deviceCount = msg->cmd.create_bus.dev_count;
POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, busNo, deviceCount,
POSTCODE_SEVERITY_INFO);
@@ -188,25 +188,25 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
/* Currently by default, the bus Number is the GuestHandle.
* Configure Bus message can override this.
*/
- if (msg->hdr.Flags.testMessage) {
+ if (msg->hdr.flags.test_message) {
/* This implies we're the IOVM so set guest handle to 0... */
- bus->guestHandle = 0;
- bus->busNo = busNo;
- bus->localVnic = 1;
+ bus->guest_handle = 0;
+ bus->bus_no = busNo;
+ bus->local_vnic = 1;
} else
- bus->busNo = bus->guestHandle = busNo;
- sprintf(bus->name, "%d", (int) bus->busNo);
- bus->deviceCount = deviceCount;
+ bus->bus_no = bus->guest_handle = busNo;
+ sprintf(bus->name, "%d", (int) bus->bus_no);
+ bus->device_count = deviceCount;
bus->device =
(struct device_info **) ((char *) bus + sizeof(struct bus_info));
- bus->busInstGuid = msg->cmd.createBus.busInstGuid;
- bus->busChannelBytes = 0;
- bus->pBusChannel = NULL;
+ bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid;
+ bus->bus_channel_bytes = 0;
+ bus->bus_channel = NULL;
/* add bus to our bus list - but check for duplicates first */
read_lock(&BusListLock);
for (tmp = BusListHead; tmp; tmp = tmp->next) {
- if (tmp->busNo == bus->busNo)
+ if (tmp->bus_no == bus->bus_no)
break;
}
read_unlock(&BusListLock);
@@ -215,39 +215,39 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
* reject add
*/
LOGERR("CONTROLVM_BUS_CREATE Failed: bus %d already exists.\n",
- bus->busNo);
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ bus->bus_no);
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
POSTCODE_SEVERITY_ERR);
kfree(bus);
return CONTROLVM_RESP_ERROR_ALREADY_DONE;
}
- if ((msg->cmd.createBus.channelAddr != 0)
- && (msg->cmd.createBus.channelBytes != 0)) {
- bus->busChannelBytes = msg->cmd.createBus.channelBytes;
- bus->pBusChannel =
- init_vbus_channel(msg->cmd.createBus.channelAddr,
- msg->cmd.createBus.channelBytes);
+ if ((msg->cmd.create_bus.channel_addr != 0)
+ && (msg->cmd.create_bus.channel_bytes != 0)) {
+ bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes;
+ bus->bus_channel =
+ init_vbus_channel(msg->cmd.create_bus.channel_addr,
+ msg->cmd.create_bus.channel_bytes);
}
/* the msg is bound for virtpci; send guest_msgs struct to callback */
- if (!msg->hdr.Flags.server) {
+ if (!msg->hdr.flags.server) {
struct guest_msgs cmd;
cmd.msgtype = GUEST_ADD_VBUS;
- cmd.add_vbus.busNo = busNo;
- cmd.add_vbus.chanptr = bus->pBusChannel;
- cmd.add_vbus.deviceCount = deviceCount;
- cmd.add_vbus.busTypeGuid = msg->cmd.createBus.busDataTypeGuid;
- cmd.add_vbus.busInstGuid = msg->cmd.createBus.busInstGuid;
- if (!VirtControlChanFunc) {
+ cmd.add_vbus.bus_no = busNo;
+ cmd.add_vbus.chanptr = bus->bus_channel;
+ cmd.add_vbus.dev_count = deviceCount;
+ cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid;
+ cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid;
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci callback not registered.");
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
POSTCODE_SEVERITY_ERR);
kfree(bus);
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci GUEST_ADD_VBUS returned error.");
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
POSTCODE_SEVERITY_ERR);
kfree(bus);
return
@@ -266,26 +266,26 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
BusListCount++;
write_unlock(&BusListLock);
- POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->bus_no,
POSTCODE_SEVERITY_INFO);
return CONTROLVM_RESP_SUCCESS;
}
static int
-destroy_bus(CONTROLVM_MESSAGE *msg, char *buf)
+destroy_bus(struct controlvm_message *msg, char *buf)
{
int i;
struct bus_info *bus, *prev = NULL;
struct guest_msgs cmd;
u32 busNo;
- busNo = msg->cmd.destroyBus.busNo;
+ busNo = msg->cmd.destroy_bus.bus_no;
read_lock(&BusListLock);
bus = BusListHead;
while (bus) {
- if (bus->busNo == busNo)
+ if (bus->bus_no == busNo)
break;
prev = bus;
bus = bus->next;
@@ -299,7 +299,7 @@ destroy_bus(CONTROLVM_MESSAGE *msg, char *buf)
}
/* verify that this bus has no devices. */
- for (i = 0; i < bus->deviceCount; i++) {
+ for (i = 0; i < bus->device_count; i++) {
if (bus->device[i] != NULL) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: device %i attached to bus %d.",
i, busNo);
@@ -309,18 +309,18 @@ destroy_bus(CONTROLVM_MESSAGE *msg, char *buf)
}
read_unlock(&BusListLock);
- if (msg->hdr.Flags.server)
+ if (msg->hdr.flags.server)
goto remove;
/* client messages require us to call the virtpci callback associated
with this bus. */
cmd.msgtype = GUEST_DEL_VBUS;
cmd.del_vbus.bus_no = busNo;
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci callback not registered.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_BUS_DESTROY Failed: virtpci GUEST_DEL_VBUS returned error.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
}
@@ -335,9 +335,9 @@ remove:
BusListCount--;
write_unlock(&BusListLock);
- if (bus->pBusChannel) {
- uislib_iounmap(bus->pBusChannel);
- bus->pBusChannel = NULL;
+ if (bus->bus_channel) {
+ uislib_iounmap(bus->bus_channel);
+ bus->bus_channel = NULL;
}
kfree(bus);
@@ -345,17 +345,17 @@ remove:
}
static int
-create_device(CONTROLVM_MESSAGE *msg, char *buf)
+create_device(struct controlvm_message *msg, char *buf)
{
struct device_info *dev;
struct bus_info *bus;
u32 busNo, devNo;
int result = CONTROLVM_RESP_SUCCESS;
u64 minSize = MIN_IO_CHANNEL_SIZE;
- ReqHandlerInfo_t *pReqHandler;
+ struct req_handler_info *pReqHandler;
- busNo = msg->cmd.createDevice.busNo;
- devNo = msg->cmd.createDevice.devNo;
+ busNo = msg->cmd.create_device.bus_no;
+ devNo = msg->cmd.create_device.dev_no;
POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
POSTCODE_SEVERITY_INFO);
@@ -368,26 +368,26 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
}
- dev->channel_uuid = msg->cmd.createDevice.dataTypeGuid;
- dev->intr = msg->cmd.createDevice.intr;
- dev->channel_addr = msg->cmd.createDevice.channelAddr;
+ dev->channel_uuid = msg->cmd.create_device.data_type_uuid;
+ dev->intr = msg->cmd.create_device.intr;
+ dev->channel_addr = msg->cmd.create_device.channel_addr;
dev->bus_no = busNo;
dev->dev_no = devNo;
sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */
sprintf(dev->devid, "vbus%u:dev%u", (unsigned) busNo, (unsigned) devNo);
/* map the channel memory for the device. */
- if (msg->hdr.Flags.testMessage)
+ if (msg->hdr.flags.test_message)
dev->chanptr = (void __iomem *)__va(dev->channel_addr);
else {
- pReqHandler = ReqHandlerFind(dev->channel_uuid);
+ pReqHandler = req_handler_find(dev->channel_uuid);
if (pReqHandler)
/* generic service handler registered for this
* channel
*/
minSize = pReqHandler->min_channel_bytes;
- if (minSize > msg->cmd.createDevice.channelBytes) {
+ if (minSize > msg->cmd.create_device.channel_bytes) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: channel size is too small, channel size:0x%lx, required size:0x%lx",
- (ulong) msg->cmd.createDevice.channelBytes,
+ (ulong) msg->cmd.create_device.channel_bytes,
(ulong) minSize);
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
POSTCODE_SEVERITY_ERR);
@@ -396,27 +396,27 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
}
dev->chanptr =
uislib_ioremap_cache(dev->channel_addr,
- msg->cmd.createDevice.channelBytes);
+ msg->cmd.create_device.channel_bytes);
if (!dev->chanptr) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: ioremap_cache of channelAddr:%Lx for channelBytes:%llu failed",
dev->channel_addr,
- msg->cmd.createDevice.channelBytes);
+ msg->cmd.create_device.channel_bytes);
result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
POSTCODE_SEVERITY_ERR);
goto Away;
}
}
- dev->instance_uuid = msg->cmd.createDevice.devInstGuid;
- dev->channel_bytes = msg->cmd.createDevice.channelBytes;
+ dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid;
+ dev->channel_bytes = msg->cmd.create_device.channel_bytes;
read_lock(&BusListLock);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: device (%d) >= deviceCount (%d).",
- devNo, bus->deviceCount);
+ devNo, bus->device_count);
result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
devNo, busNo,
@@ -439,17 +439,18 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
/* the msg is bound for virtpci; send
* guest_msgs struct to callback
*/
- if (!msg->hdr.Flags.server) {
+ if (!msg->hdr.flags.server) {
struct guest_msgs cmd;
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVhbaChannelProtocolGuid)) {
- wait_for_valid_guid(&((CHANNEL_HEADER
- __iomem *) (dev->
+ spar_vhba_channel_protocol_uuid)) {
+ wait_for_valid_guid(&((
+ struct channel_header
+ __iomem *) (dev->
chanptr))->
- Type);
- if (!ULTRA_VHBA_CHANNEL_OK_CLIENT
- (dev->chanptr, NULL)) {
+ chtype);
+ if (!SPAR_VHBA_CHANNEL_OK_CLIENT
+ (dev->chanptr)) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed:[CLIENT]VHBA dev %d chan invalid.",
devNo);
POSTCODE_LINUX_4
@@ -468,13 +469,14 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
cmd.add_vhba.intr = dev->intr;
} else
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVnicChannelProtocolGuid)) {
- wait_for_valid_guid(&((CHANNEL_HEADER
- __iomem *) (dev->
+ spar_vnic_channel_protocol_uuid)) {
+ wait_for_valid_guid(&((
+ struct channel_header
+ __iomem *) (dev->
chanptr))->
- Type);
- if (!ULTRA_VNIC_CHANNEL_OK_CLIENT
- (dev->chanptr, NULL)) {
+ chtype);
+ if (!SPAR_VNIC_CHANNEL_OK_CLIENT
+ (dev->chanptr)) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: VNIC[CLIENT] dev %d chan invalid.",
devNo);
POSTCODE_LINUX_4
@@ -500,7 +502,7 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
goto Away;
}
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci callback not registered.");
POSTCODE_LINUX_4
(DEVICE_CREATE_FAILURE_PC, devNo,
@@ -509,7 +511,7 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
goto Away;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: virtpci GUEST_ADD_[VHBA||VNIC] returned error.");
POSTCODE_LINUX_4
(DEVICE_CREATE_FAILURE_PC, devNo,
@@ -532,7 +534,7 @@ create_device(CONTROLVM_MESSAGE *msg, char *buf)
result = CONTROLVM_RESP_ERROR_BUS_INVALID;
Away:
- if (!msg->hdr.Flags.testMessage) {
+ if (!msg->hdr.flags.test_message) {
uislib_iounmap(dev->chanptr);
dev->chanptr = NULL;
}
@@ -542,7 +544,7 @@ Away:
}
static int
-pause_device(CONTROLVM_MESSAGE *msg)
+pause_device(struct controlvm_message *msg)
{
u32 busNo, devNo;
struct bus_info *bus;
@@ -550,16 +552,16 @@ pause_device(CONTROLVM_MESSAGE *msg)
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.deviceChangeState.busNo;
- devNo = msg->cmd.deviceChangeState.devNo;
+ busNo = msg->cmd.device_change_state.bus_no;
+ devNo = msg->cmd.device_change_state.dev_no;
read_lock(&BusListLock);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->deviceCount);
+ devNo, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
@@ -585,22 +587,22 @@ pause_device(CONTROLVM_MESSAGE *msg)
* guest_msgs struct to callback
*/
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVhbaChannelProtocolGuid)) {
+ spar_vhba_channel_protocol_uuid)) {
cmd.msgtype = GUEST_PAUSE_VHBA;
cmd.pause_vhba.chanptr = dev->chanptr;
} else if (!uuid_le_cmp(dev->channel_uuid,
- UltraVnicChannelProtocolGuid)) {
+ spar_vnic_channel_protocol_uuid)) {
cmd.msgtype = GUEST_PAUSE_VNIC;
cmd.pause_vnic.chanptr = dev->chanptr;
} else {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: unknown channelTypeGuid.\n");
return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
}
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: virtpci callback not registered.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:pause Failed: virtpci GUEST_PAUSE_[VHBA||VNIC] returned error.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
@@ -610,7 +612,7 @@ pause_device(CONTROLVM_MESSAGE *msg)
}
static int
-resume_device(CONTROLVM_MESSAGE *msg)
+resume_device(struct controlvm_message *msg)
{
u32 busNo, devNo;
struct bus_info *bus;
@@ -618,16 +620,16 @@ resume_device(CONTROLVM_MESSAGE *msg)
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.deviceChangeState.busNo;
- devNo = msg->cmd.deviceChangeState.devNo;
+ busNo = msg->cmd.device_change_state.bus_no;
+ devNo = msg->cmd.device_change_state.dev_no;
read_lock(&BusListLock);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->deviceCount);
+ devNo, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
@@ -654,22 +656,22 @@ resume_device(CONTROLVM_MESSAGE *msg)
*/
if (retval == CONTROLVM_RESP_SUCCESS) {
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVhbaChannelProtocolGuid)) {
+ spar_vhba_channel_protocol_uuid)) {
cmd.msgtype = GUEST_RESUME_VHBA;
cmd.resume_vhba.chanptr = dev->chanptr;
} else if (!uuid_le_cmp(dev->channel_uuid,
- UltraVnicChannelProtocolGuid)) {
+ spar_vnic_channel_protocol_uuid)) {
cmd.msgtype = GUEST_RESUME_VNIC;
cmd.resume_vnic.chanptr = dev->chanptr;
} else {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: unknown channelTypeGuid.\n");
return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
}
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: virtpci callback not registered.");
return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_DEVICE_CHANGESTATE:resume Failed: virtpci GUEST_RESUME_[VHBA||VNIC] returned error.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
@@ -679,7 +681,7 @@ resume_device(CONTROLVM_MESSAGE *msg)
}
static int
-destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
+destroy_device(struct controlvm_message *msg, char *buf)
{
u32 busNo, devNo;
struct bus_info *bus;
@@ -687,17 +689,17 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
struct guest_msgs cmd;
int retval = CONTROLVM_RESP_SUCCESS;
- busNo = msg->cmd.destroyDevice.busNo;
- devNo = msg->cmd.destroyDevice.devNo;
+ busNo = msg->cmd.destroy_device.bus_no;
+ devNo = msg->cmd.destroy_device.bus_no;
read_lock(&BusListLock);
LOGINF("destroy_device called for busNo=%u, devNo=%u", busNo, devNo);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("CONTROLVM_DEVICE_DESTORY Failed: device(%d) >= deviceCount(%d).",
- devNo, bus->deviceCount);
+ devNo, bus->device_count);
retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
} else {
/* make sure this device exists */
@@ -724,11 +726,11 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
* guest_msgs struct to callback
*/
if (!uuid_le_cmp(dev->channel_uuid,
- UltraVhbaChannelProtocolGuid)) {
+ spar_vhba_channel_protocol_uuid)) {
cmd.msgtype = GUEST_DEL_VHBA;
cmd.del_vhba.chanptr = dev->chanptr;
} else if (!uuid_le_cmp(dev->channel_uuid,
- UltraVnicChannelProtocolGuid)) {
+ spar_vnic_channel_protocol_uuid)) {
cmd.msgtype = GUEST_DEL_VNIC;
cmd.del_vnic.chanptr = dev->chanptr;
} else {
@@ -736,12 +738,12 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
return
CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
}
- if (!VirtControlChanFunc) {
+ if (!virt_control_chan_func) {
LOGERR("CONTROLVM_DEVICE_DESTORY Failed: virtpci callback not registered.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
}
- if (!VirtControlChanFunc(&cmd)) {
+ if (!virt_control_chan_func(&cmd)) {
LOGERR("CONTROLVM_DEVICE_DESTROY Failed: virtpci GUEST_DEL_[VHBA||VNIC] returned error.");
return
CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
@@ -756,7 +758,7 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
uislib_disable_channel_interrupts(busNo, devNo);
}
/* unmap the channel memory for the device. */
- if (!msg->hdr.Flags.testMessage) {
+ if (!msg->hdr.flags.test_message) {
LOGINF("destroy_device, doing iounmap");
uislib_iounmap(dev->chanptr);
}
@@ -767,23 +769,23 @@ destroy_device(CONTROLVM_MESSAGE *msg, char *buf)
}
static int
-init_chipset(CONTROLVM_MESSAGE *msg, char *buf)
+init_chipset(struct controlvm_message *msg, char *buf)
{
POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
- MaxBusCount = msg->cmd.initChipset.busCount;
- PlatformNumber = msg->cmd.initChipset.platformNumber;
+ MaxBusCount = msg->cmd.init_chipset.bus_count;
+ PlatformNumber = msg->cmd.init_chipset.platform_number;
PhysicalDataChan = 0;
/* We need to make sure we have our functions registered
* before processing messages. If we are a test vehicle the
- * testMessage for init_chipset will be set. We can ignore the
+ * test_message for init_chipset will be set. We can ignore the
* waits for the callbacks, since this will be manually entered
- * from a user. If no testMessage is set, we will wait for the
+ * from a user. If no test_message is set, we will wait for the
* functions.
*/
- if (!msg->hdr.Flags.testMessage)
- WAIT_ON_CALLBACK(VirtControlChanFunc);
+ if (!msg->hdr.flags.test_message)
+ WAIT_ON_CALLBACK(virt_control_chan_func);
chipset_inited = 1;
POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
@@ -794,10 +796,10 @@ init_chipset(CONTROLVM_MESSAGE *msg, char *buf)
static int
delete_bus_glue(u32 busNo)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
- msg.cmd.destroyBus.busNo = busNo;
+ msg.cmd.destroy_bus.bus_no = busNo;
if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("destroy_bus failed. busNo=0x%x\n", busNo);
return 0;
@@ -808,11 +810,11 @@ delete_bus_glue(u32 busNo)
static int
delete_device_glue(u32 busNo, u32 devNo)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
- msg.cmd.destroyDevice.busNo = busNo;
- msg.cmd.destroyDevice.devNo = devNo;
+ msg.cmd.destroy_device.bus_no = busNo;
+ msg.cmd.destroy_device.dev_no = devNo;
if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("destroy_device failed. busNo=0x%x devNo=0x%x\n", busNo,
devNo);
@@ -822,14 +824,14 @@ delete_device_glue(u32 busNo, u32 devNo)
}
int
-uislib_client_inject_add_bus(u32 busNo, uuid_le instGuid,
- u64 channelAddr, ulong nChannelBytes)
+uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
+ u64 channel_addr, ulong n_channel_bytes)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
- LOGINF("enter busNo=0x%x\n", busNo);
+ LOGINF("enter busNo=0x%x\n", bus_no);
/* step 0: init the chipset */
- POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
if (!chipset_inited) {
/* step: initialize the chipset */
@@ -841,31 +843,32 @@ uislib_client_inject_add_bus(u32 busNo, uuid_le instGuid,
* after number 4, then the add_vnic will fail, and the
* ultraboot will fail.
*/
- msg.cmd.initChipset.busCount = 23;
- msg.cmd.initChipset.switchCount = 0;
+ msg.cmd.init_chipset.bus_count = 23;
+ msg.cmd.init_chipset.switch_count = 0;
if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("init_chipset failed.\n");
return 0;
}
LOGINF("chipset initialized\n");
- POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, busNo,
+ POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, bus_no,
POSTCODE_SEVERITY_INFO);
}
/* step 1: create a bus */
- POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_WARNING);
+ POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no,
+ POSTCODE_SEVERITY_WARNING);
init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
- msg.cmd.createBus.busNo = busNo;
- msg.cmd.createBus.deviceCount = 23; /* devNo+1; */
- msg.cmd.createBus.channelAddr = channelAddr;
- msg.cmd.createBus.channelBytes = nChannelBytes;
+ msg.cmd.create_bus.bus_no = bus_no;
+ msg.cmd.create_bus.dev_count = 23; /* devNo+1; */
+ msg.cmd.create_bus.channel_addr = channel_addr;
+ msg.cmd.create_bus.channel_bytes = n_channel_bytes;
if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("create_bus failed.\n");
- POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
+ POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
return 0;
}
- POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
return 1;
}
@@ -873,26 +876,26 @@ EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
int
-uislib_client_inject_del_bus(u32 busNo)
+uislib_client_inject_del_bus(u32 bus_no)
{
- return delete_bus_glue(busNo);
+ return delete_bus_glue(bus_no);
}
EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus);
int
-uislib_client_inject_pause_vhba(u32 busNo, u32 devNo)
+uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
int rc;
init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
- msg.cmd.deviceChangeState.busNo = busNo;
- msg.cmd.deviceChangeState.devNo = devNo;
- msg.cmd.deviceChangeState.state = SegmentStateStandby;
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_standby;
rc = pause_device(&msg);
if (rc != CONTROLVM_RESP_SUCCESS) {
LOGERR("VHBA pause_device failed. busNo=0x%x devNo=0x%x\n",
- busNo, devNo);
+ bus_no, dev_no);
return rc;
}
return 0;
@@ -900,19 +903,19 @@ uislib_client_inject_pause_vhba(u32 busNo, u32 devNo)
EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba);
int
-uislib_client_inject_resume_vhba(u32 busNo, u32 devNo)
+uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
int rc;
init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
- msg.cmd.deviceChangeState.busNo = busNo;
- msg.cmd.deviceChangeState.devNo = devNo;
- msg.cmd.deviceChangeState.state = SegmentStateRunning;
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_running;
rc = resume_device(&msg);
if (rc != CONTROLVM_RESP_SUCCESS) {
LOGERR("VHBA resume_device failed. busNo=0x%x devNo=0x%x\n",
- busNo, devNo);
+ bus_no, dev_no);
return rc;
}
return 0;
@@ -921,19 +924,19 @@ uislib_client_inject_resume_vhba(u32 busNo, u32 devNo)
EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
int
-uislib_client_inject_add_vhba(u32 busNo, u32 devNo,
+uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
u64 phys_chan_addr, u32 chan_bytes,
- int is_test_addr, uuid_le instGuid,
- struct InterruptInfo *intr)
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
- LOGINF(" enter busNo=0x%x devNo=0x%x\n", busNo, devNo);
+ LOGINF(" enter busNo=0x%x devNo=0x%x\n", bus_no, dev_no);
/* chipset init'ed with bus bus has been previously created -
* Verify it still exists step 2: create the VHBA device on the
* bus
*/
- POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
@@ -941,16 +944,16 @@ uislib_client_inject_add_vhba(u32 busNo, u32 devNo,
/* signify that the physical channel address does NOT
* need to be ioremap()ed
*/
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.createDevice.busNo = busNo;
- msg.cmd.createDevice.devNo = devNo;
- msg.cmd.createDevice.devInstGuid = instGuid;
+ msg.hdr.flags.test_message = 1;
+ msg.cmd.create_device.bus_no = bus_no;
+ msg.cmd.create_device.dev_no = dev_no;
+ msg.cmd.create_device.dev_inst_uuid = inst_uuid;
if (intr)
- msg.cmd.createDevice.intr = *intr;
+ msg.cmd.create_device.intr = *intr;
else
- memset(&msg.cmd.createDevice.intr, 0,
- sizeof(struct InterruptInfo));
- msg.cmd.createDevice.channelAddr = phys_chan_addr;
+ memset(&msg.cmd.create_device.intr, 0,
+ sizeof(struct irq_info));
+ msg.cmd.create_device.channel_addr = phys_chan_addr;
if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
@@ -958,41 +961,41 @@ uislib_client_inject_add_vhba(u32 busNo, u32 devNo,
MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
return 0;
}
- msg.cmd.createDevice.channelBytes = chan_bytes;
- msg.cmd.createDevice.dataTypeGuid = UltraVhbaChannelProtocolGuid;
+ msg.cmd.create_device.channel_bytes = chan_bytes;
+ msg.cmd.create_device.data_type_uuid = spar_vhba_channel_protocol_uuid;
if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("VHBA create_device failed.\n");
- POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
return 0;
}
- POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
return 1;
}
EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba);
int
-uislib_client_inject_del_vhba(u32 busNo, u32 devNo)
+uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no)
{
- return delete_device_glue(busNo, devNo);
+ return delete_device_glue(bus_no, dev_no);
}
EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba);
int
-uislib_client_inject_add_vnic(u32 busNo, u32 devNo,
+uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
u64 phys_chan_addr, u32 chan_bytes,
- int is_test_addr, uuid_le instGuid,
- struct InterruptInfo *intr)
+ int is_test_addr, uuid_le inst_uuid,
+ struct irq_info *intr)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
- LOGINF(" enter busNo=0x%x devNo=0x%x\n", busNo, devNo);
+ LOGINF(" enter busNo=0x%x devNo=0x%x\n", bus_no, dev_no);
/* chipset init'ed with bus bus has been previously created -
* Verify it still exists step 2: create the VNIC device on the
* bus
*/
- POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
@@ -1000,16 +1003,16 @@ uislib_client_inject_add_vnic(u32 busNo, u32 devNo,
/* signify that the physical channel address does NOT
* need to be ioremap()ed
*/
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.createDevice.busNo = busNo;
- msg.cmd.createDevice.devNo = devNo;
- msg.cmd.createDevice.devInstGuid = instGuid;
+ msg.hdr.flags.test_message = 1;
+ msg.cmd.create_device.bus_no = bus_no;
+ msg.cmd.create_device.dev_no = dev_no;
+ msg.cmd.create_device.dev_inst_uuid = inst_uuid;
if (intr)
- msg.cmd.createDevice.intr = *intr;
+ msg.cmd.create_device.intr = *intr;
else
- memset(&msg.cmd.createDevice.intr, 0,
- sizeof(struct InterruptInfo));
- msg.cmd.createDevice.channelAddr = phys_chan_addr;
+ memset(&msg.cmd.create_device.intr, 0,
+ sizeof(struct irq_info));
+ msg.cmd.create_device.channel_addr = phys_chan_addr;
if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
LOGERR("wrong channel size.chan_bytes = 0x%x IO_CHANNEL_SIZE= 0x%x\n",
chan_bytes, (unsigned int) MIN_IO_CHANNEL_SIZE);
@@ -1017,35 +1020,35 @@ uislib_client_inject_add_vnic(u32 busNo, u32 devNo,
MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
return 0;
}
- msg.cmd.createDevice.channelBytes = chan_bytes;
- msg.cmd.createDevice.dataTypeGuid = UltraVnicChannelProtocolGuid;
+ msg.cmd.create_device.channel_bytes = chan_bytes;
+ msg.cmd.create_device.data_type_uuid = spar_vnic_channel_protocol_uuid;
if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
LOGERR("VNIC create_device failed.\n");
- POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
return 0;
}
- POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, devNo, busNo,
+ POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
return 1;
}
EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic);
int
-uislib_client_inject_pause_vnic(u32 busNo, u32 devNo)
+uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
int rc;
init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
- msg.cmd.deviceChangeState.busNo = busNo;
- msg.cmd.deviceChangeState.devNo = devNo;
- msg.cmd.deviceChangeState.state = SegmentStateStandby;
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_standby;
rc = pause_device(&msg);
if (rc != CONTROLVM_RESP_SUCCESS) {
LOGERR("VNIC pause_device failed. busNo=0x%x devNo=0x%x\n",
- busNo, devNo);
+ bus_no, dev_no);
return -1;
}
return 0;
@@ -1053,19 +1056,19 @@ uislib_client_inject_pause_vnic(u32 busNo, u32 devNo)
EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic);
int
-uislib_client_inject_resume_vnic(u32 busNo, u32 devNo)
+uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no)
{
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
int rc;
init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
- msg.cmd.deviceChangeState.busNo = busNo;
- msg.cmd.deviceChangeState.devNo = devNo;
- msg.cmd.deviceChangeState.state = SegmentStateRunning;
+ msg.cmd.device_change_state.bus_no = bus_no;
+ msg.cmd.device_change_state.dev_no = dev_no;
+ msg.cmd.device_change_state.state = segment_state_running;
rc = resume_device(&msg);
if (rc != CONTROLVM_RESP_SUCCESS) {
LOGERR("VNIC resume_device failed. busNo=0x%x devNo=0x%x\n",
- busNo, devNo);
+ bus_no, dev_no);
return -1;
}
return 0;
@@ -1074,88 +1077,12 @@ uislib_client_inject_resume_vnic(u32 busNo, u32 devNo)
EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
int
-uislib_client_inject_del_vnic(u32 busNo, u32 devNo)
+uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no)
{
- return delete_device_glue(busNo, devNo);
+ return delete_device_glue(bus_no, dev_no);
}
EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic);
-static int
-uislib_client_add_vnic(u32 busNo)
-{
- BOOL busCreated = FALSE;
- int devNo = 0; /* Default to 0, since only one device
- * will be created for this bus... */
- CONTROLVM_MESSAGE msg;
-
- init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.createBus.busNo = busNo;
- msg.cmd.createBus.deviceCount = 4;
- msg.cmd.createBus.channelAddr = 0;
- msg.cmd.createBus.channelBytes = 0;
- if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
- LOGERR("client create_bus failed");
- return 0;
- }
- busCreated = TRUE;
-
- init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.createDevice.busNo = busNo;
- msg.cmd.createDevice.devNo = devNo;
- msg.cmd.createDevice.devInstGuid = NULL_UUID_LE;
- memset(&msg.cmd.createDevice.intr, 0, sizeof(struct InterruptInfo));
- msg.cmd.createDevice.channelAddr = PhysicalDataChan;
- msg.cmd.createDevice.channelBytes = MIN_IO_CHANNEL_SIZE;
- msg.cmd.createDevice.dataTypeGuid = UltraVnicChannelProtocolGuid;
- if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
- LOGERR("client create_device failed");
- goto AwayCleanup;
- }
-
- return 1;
-
-AwayCleanup:
- if (busCreated) {
- init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.destroyBus.busNo = busNo;
- if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
- LOGERR("client destroy_bus failed.\n");
- }
-
- return 0;
-} /* end uislib_client_add_vnic */
-EXPORT_SYMBOL_GPL(uislib_client_add_vnic);
-
-static int
-uislib_client_delete_vnic(u32 busNo)
-{
- int devNo = 0; /* Default to 0, since only one device
- * will be created for this bus... */
- CONTROLVM_MESSAGE msg;
-
- init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.destroyDevice.busNo = busNo;
- msg.cmd.destroyDevice.devNo = devNo;
- if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
- /* Don't error exit - try to see if bus can be destroyed... */
- LOGERR("client destroy_device failed.\n");
- }
-
- init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
- msg.hdr.Flags.testMessage = 1;
- msg.cmd.destroyBus.busNo = busNo;
- if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
- LOGERR("client destroy_bus failed.\n");
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(uislib_client_delete_vnic);
-/* end client_delete_vnic */
-
void *
uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln)
{
@@ -1206,17 +1133,17 @@ info_debugfs_read_helper(char **buff, int *buff_len)
for (bus = BusListHead; bus; bus = bus->next) {
if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n",
- bus, bus->busNo, bus->deviceCount) < 0)
+ bus, bus->bus_no, bus->device_count) < 0)
goto err_done_unlock;
if (PLINE(" Devices:\n") < 0)
goto err_done_unlock;
- for (i = 0; i < bus->deviceCount; i++) {
+ for (i = 0; i < bus->device_count; i++) {
if (bus->device[i]) {
if (PLINE(" busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n",
- bus->busNo, i, bus->device[i],
+ bus->bus_no, i, bus->device[i],
bus->device[i]->chanptr,
bus->device[i]->swtch) < 0)
goto err_done_unlock;
@@ -1232,7 +1159,7 @@ info_debugfs_read_helper(char **buff, int *buff_len)
read_unlock(&BusListLock);
if (PLINE("UisUtils_Registered_Services: %d\n",
- atomic_read(&UisUtils_Registered_Services)) < 0)
+ atomic_read(&uisutils_registered_services)) < 0)
goto err_done;
if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n",
cycles_before_wait, wait_cycles) < 0)
@@ -1294,9 +1221,9 @@ find_dev(u32 busNo, u32 devNo)
read_lock(&BusListLock);
for (bus = BusListHead; bus; bus = bus->next) {
- if (bus->busNo == busNo) {
+ if (bus->bus_no == busNo) {
/* make sure the device number is valid */
- if (devNo >= bus->deviceCount) {
+ if (devNo >= bus->device_count) {
LOGERR("%s bad busNo, devNo=%d,%d",
__func__,
(int) (busNo), (int) (devNo));
@@ -1560,13 +1487,13 @@ uislib_mod_init(void)
LOGINF("sizeof(uiscmdrsp_net):%lu\n",
(ulong) sizeof(struct uiscmdrsp_net));
LOGINF("sizeof(CONTROLVM_MESSAGE):%lu bytes\n",
- (ulong) sizeof(CONTROLVM_MESSAGE));
- LOGINF("sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL):%lu bytes\n",
- (ulong) sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL));
+ (ulong) sizeof(struct controlvm_message));
+ LOGINF("sizeof(struct spar_controlvm_channel_protocol):%lu bytes\n",
+ (ulong) sizeof(struct spar_controlvm_channel_protocol));
LOGINF("sizeof(CHANNEL_HEADER):%lu bytes\n",
- (ulong) sizeof(CHANNEL_HEADER));
- LOGINF("sizeof(ULTRA_IO_CHANNEL_PROTOCOL):%lu bytes\n",
- (ulong) sizeof(ULTRA_IO_CHANNEL_PROTOCOL));
+ (ulong) sizeof(struct channel_header));
+ LOGINF("sizeof(struct spar_io_channel_protocol):%lu bytes\n",
+ (ulong) sizeof(struct spar_io_channel_protocol));
LOGINF("SIZEOF_CMDRSP:%lu bytes\n", SIZEOF_CMDRSP);
LOGINF("SIZEOF_PROTOCOL:%lu bytes\n", SIZEOF_PROTOCOL);
@@ -1574,7 +1501,7 @@ uislib_mod_init(void)
BusListHead = NULL;
BusListCount = MaxBusCount = 0;
rwlock_init(&BusListLock);
- VirtControlChanFunc = NULL;
+ virt_control_chan_func = NULL;
/* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
* then map this physical address to a virtual address. */
diff --git a/drivers/staging/unisys/uislib/uisqueue.c b/drivers/staging/unisys/uislib/uisqueue.c
index 44208841bd5a..f9f8442d58c5 100644
--- a/drivers/staging/unisys/uislib/uisqueue.c
+++ b/drivers/staging/unisys/uislib/uisqueue.c
@@ -81,13 +81,13 @@ do_locked_client_insert(struct uisqueue_info *queueinfo,
u8 rc = 0;
spin_lock_irqsave(lock, flags);
- if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(queueinfo->chan, channelId, NULL))
+ if (!spar_channel_client_acquire_os(queueinfo->chan, channelId))
goto unlock;
- if (visor_signal_insert(queueinfo->chan, whichqueue, pSignal)) {
+ if (spar_signal_insert(queueinfo->chan, whichqueue, pSignal)) {
queueinfo->packets_sent++;
rc = 1;
}
- ULTRA_CHANNEL_CLIENT_RELEASE_OS(queueinfo->chan, channelId, NULL);
+ spar_channel_client_release_os(queueinfo->chan, channelId);
unlock:
spin_unlock_irqrestore((spinlock_t *)lock, flags);
return rc;
@@ -125,7 +125,7 @@ int
uisqueue_get_cmdrsp(struct uisqueue_info *queueinfo,
void *cmdrsp, unsigned int whichqueue)
{
- if (!visor_signal_remove(queueinfo->chan, whichqueue, cmdrsp))
+ if (!spar_signal_remove(queueinfo->chan, whichqueue, cmdrsp))
return 0;
queueinfo->packets_received++;
diff --git a/drivers/staging/unisys/uislib/uisutils.c b/drivers/staging/unisys/uislib/uisutils.c
index 8ff6d26ff00b..4a5b86773927 100644
--- a/drivers/staging/unisys/uislib/uisutils.c
+++ b/drivers/staging/unisys/uislib/uisutils.c
@@ -25,9 +25,7 @@
#include "uisutils.h"
#include "version.h"
#include "vbushelper.h"
-#include <linux/uuid.h>
#include <linux/skbuff.h>
-#include <linux/uuid.h>
#ifdef CONFIG_HIGHMEM
#include <linux/highmem.h>
#endif
@@ -39,7 +37,7 @@
#define __MYFILE__ "uisutils.c"
/* exports */
-atomic_t UisUtils_Registered_Services = ATOMIC_INIT(0);
+atomic_t uisutils_registered_services = ATOMIC_INIT(0);
/* num registrations via
* uisctrl_register_req_handler() or
* uisctrl_register_req_handler_ex() */
@@ -75,20 +73,20 @@ EXPORT_SYMBOL_GPL(uisutil_add_proc_line_ex);
int
uisctrl_register_req_handler(int type, void *fptr,
- ULTRA_VBUS_DEVICEINFO *chipset_driver_info)
+ struct ultra_vbus_deviceinfo *chipset_driver_info)
{
LOGINF("type = %d, fptr = 0x%p.\n", type, fptr);
switch (type) {
case 2:
if (fptr) {
- if (!VirtControlChanFunc)
- atomic_inc(&UisUtils_Registered_Services);
- VirtControlChanFunc = fptr;
+ if (!virt_control_chan_func)
+ atomic_inc(&uisutils_registered_services);
+ virt_control_chan_func = fptr;
} else {
- if (VirtControlChanFunc)
- atomic_dec(&UisUtils_Registered_Services);
- VirtControlChanFunc = NULL;
+ if (virt_control_chan_func)
+ atomic_dec(&uisutils_registered_services);
+ virt_control_chan_func = NULL;
}
break;
@@ -105,76 +103,75 @@ uisctrl_register_req_handler(int type, void *fptr,
EXPORT_SYMBOL_GPL(uisctrl_register_req_handler);
int
-uisctrl_register_req_handler_ex(uuid_le switchTypeGuid,
- const char *switch_type_name,
- int (*controlfunc)(struct io_msgs *),
- unsigned long min_channel_bytes,
- int (*Server_Channel_Ok)(unsigned long
- channelBytes),
- int (*Server_Channel_Init)
- (void *x, unsigned char *clientStr,
- u32 clientStrLen, u64 bytes),
- ULTRA_VBUS_DEVICEINFO *chipset_DriverInfo)
+uisctrl_register_req_handler_ex(uuid_le switch_uuid,
+ const char *switch_type_name,
+ int (*controlfunc)(struct io_msgs *),
+ unsigned long min_channel_bytes,
+ int (*server_channel_ok)(unsigned long channel_bytes),
+ int (*server_channel_init)(void *x,
+ unsigned char *client_str,
+ u32 client_str_len, u64 bytes),
+ struct ultra_vbus_deviceinfo *chipset_driver_info)
{
- ReqHandlerInfo_t *pReqHandlerInfo;
+ struct req_handler_info *pReqHandlerInfo;
int rc = 0; /* assume failure */
LOGINF("type=%pUL, controlfunc=0x%p.\n",
- &switchTypeGuid, controlfunc);
+ &switch_uuid, controlfunc);
if (!controlfunc) {
- LOGERR("%pUL: controlfunc must be supplied\n", &switchTypeGuid);
+ LOGERR("%pUL: controlfunc must be supplied\n", &switch_uuid);
goto Away;
}
- if (!Server_Channel_Ok) {
+ if (!server_channel_ok) {
LOGERR("%pUL: Server_Channel_Ok must be supplied\n",
- &switchTypeGuid);
+ &switch_uuid);
goto Away;
}
- if (!Server_Channel_Init) {
+ if (!server_channel_init) {
LOGERR("%pUL: Server_Channel_Init must be supplied\n",
- &switchTypeGuid);
+ &switch_uuid);
goto Away;
}
- pReqHandlerInfo = ReqHandlerAdd(switchTypeGuid,
+ pReqHandlerInfo = req_handler_add(switch_uuid,
switch_type_name,
controlfunc,
min_channel_bytes,
- Server_Channel_Ok, Server_Channel_Init);
+ server_channel_ok, server_channel_init);
if (!pReqHandlerInfo) {
- LOGERR("failed to add %pUL to server list\n", &switchTypeGuid);
+ LOGERR("failed to add %pUL to server list\n", &switch_uuid);
goto Away;
}
- atomic_inc(&UisUtils_Registered_Services);
+ atomic_inc(&uisutils_registered_services);
rc = 1; /* success */
Away:
if (rc) {
- if (chipset_DriverInfo)
- bus_device_info_init(chipset_DriverInfo, "chipset",
+ if (chipset_driver_info)
+ bus_device_info_init(chipset_driver_info, "chipset",
"uislib", VERSION, NULL);
} else
- LOGERR("failed to register type %pUL.\n", &switchTypeGuid);
+ LOGERR("failed to register type %pUL.\n", &switch_uuid);
return rc;
}
EXPORT_SYMBOL_GPL(uisctrl_register_req_handler_ex);
int
-uisctrl_unregister_req_handler_ex(uuid_le switchTypeGuid)
+uisctrl_unregister_req_handler_ex(uuid_le switch_uuid)
{
int rc = 0; /* assume failure */
- LOGINF("type=%pUL.\n", &switchTypeGuid);
- if (ReqHandlerDel(switchTypeGuid) < 0) {
+ LOGINF("type=%pUL.\n", &switch_uuid);
+ if (req_handler_del(switch_uuid) < 0) {
LOGERR("failed to remove %pUL from server list\n",
- &switchTypeGuid);
+ &switch_uuid);
goto Away;
}
- atomic_dec(&UisUtils_Registered_Services);
+ atomic_dec(&uisutils_registered_services);
rc = 1; /* success */
Away:
if (!rc)
- LOGERR("failed to unregister type %pUL.\n", &switchTypeGuid);
+ LOGERR("failed to unregister type %pUL.\n", &switch_uuid);
return rc;
}
EXPORT_SYMBOL_GPL(uisctrl_unregister_req_handler_ex);
@@ -275,11 +272,11 @@ dolist: if (skb_shinfo(skb)->frag_list) {
}
EXPORT_SYMBOL_GPL(uisutil_copy_fragsinfo_from_skb);
-static LIST_HEAD(ReqHandlerInfo_list); /* list of ReqHandlerInfo_t */
+static LIST_HEAD(ReqHandlerInfo_list); /* list of struct req_handler_info */
static DEFINE_SPINLOCK(ReqHandlerInfo_list_lock);
-ReqHandlerInfo_t *
-ReqHandlerAdd(uuid_le switchTypeGuid,
+struct req_handler_info *
+req_handler_add(uuid_le switch_uuid,
const char *switch_type_name,
int (*controlfunc)(struct io_msgs *),
unsigned long min_channel_bytes,
@@ -287,16 +284,16 @@ ReqHandlerAdd(uuid_le switchTypeGuid,
int (*Server_Channel_Init)
(void *x, unsigned char *clientStr, u32 clientStrLen, u64 bytes))
{
- ReqHandlerInfo_t *rc = NULL;
+ struct req_handler_info *rc = NULL;
rc = kzalloc(sizeof(*rc), GFP_ATOMIC);
if (!rc)
return NULL;
- rc->switchTypeGuid = switchTypeGuid;
+ rc->switch_uuid = switch_uuid;
rc->controlfunc = controlfunc;
rc->min_channel_bytes = min_channel_bytes;
- rc->Server_Channel_Ok = Server_Channel_Ok;
- rc->Server_Channel_Init = Server_Channel_Init;
+ rc->server_channel_ok = Server_Channel_Ok;
+ rc->server_channel_init = Server_Channel_Init;
if (switch_type_name)
strncpy(rc->switch_type_name, switch_type_name,
sizeof(rc->switch_type_name) - 1);
@@ -307,16 +304,16 @@ ReqHandlerAdd(uuid_le switchTypeGuid,
return rc;
}
-ReqHandlerInfo_t *
-ReqHandlerFind(uuid_le switchTypeGuid)
+struct req_handler_info *
+req_handler_find(uuid_le switch_uuid)
{
struct list_head *lelt, *tmp;
- ReqHandlerInfo_t *entry = NULL;
+ struct req_handler_info *entry = NULL;
spin_lock(&ReqHandlerInfo_list_lock);
list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
- entry = list_entry(lelt, ReqHandlerInfo_t, list_link);
- if (uuid_le_cmp(entry->switchTypeGuid, switchTypeGuid) == 0) {
+ entry = list_entry(lelt, struct req_handler_info, list_link);
+ if (uuid_le_cmp(entry->switch_uuid, switch_uuid) == 0) {
spin_unlock(&ReqHandlerInfo_list_lock);
return entry;
}
@@ -326,16 +323,16 @@ ReqHandlerFind(uuid_le switchTypeGuid)
}
int
-ReqHandlerDel(uuid_le switchTypeGuid)
+req_handler_del(uuid_le switch_uuid)
{
struct list_head *lelt, *tmp;
- ReqHandlerInfo_t *entry = NULL;
+ struct req_handler_info *entry = NULL;
int rc = -1;
spin_lock(&ReqHandlerInfo_list_lock);
list_for_each_safe(lelt, tmp, &ReqHandlerInfo_list) {
- entry = list_entry(lelt, ReqHandlerInfo_t, list_link);
- if (uuid_le_cmp(entry->switchTypeGuid, switchTypeGuid) == 0) {
+ entry = list_entry(lelt, struct req_handler_info, list_link);
+ if (uuid_le_cmp(entry->switch_uuid, switch_uuid) == 0) {
list_del(lelt);
kfree(entry);
rc++;
diff --git a/drivers/staging/unisys/virthba/virthba.c b/drivers/staging/unisys/virthba/virthba.c
index 938e2c82c1ab..d7a629b5f111 100644
--- a/drivers/staging/unisys/virthba/virthba.c
+++ b/drivers/staging/unisys/virthba/virthba.c
@@ -101,7 +101,6 @@ static DEF_SCSI_QCMD(virthba_queue_command)
#define virthba_queue_command virthba_queue_command_lck
#endif
-
static int virthba_slave_alloc(struct scsi_device *scsidev);
static int virthba_slave_configure(struct scsi_device *scsidev);
static void virthba_slave_destroy(struct scsi_device *scsidev);
@@ -172,7 +171,7 @@ struct virthba_info {
struct virtpci_dev *virtpcidev;
struct list_head dev_info_list;
struct chaninfo chinfo;
- struct InterruptInfo intr; /* use recvInterrupt info to receive
+ struct irq_info intr; /* use recvInterrupt info to receive
interrupts when IOs complete */
int interrupt_vector;
struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests
@@ -420,8 +419,8 @@ static irqreturn_t
virthba_ISR(int irq, void *dev_id)
{
struct virthba_info *virthbainfo = (struct virthba_info *) dev_id;
- CHANNEL_HEADER __iomem *pChannelHeader;
- SIGNAL_QUEUE_HEADER __iomem *pqhdr;
+ struct channel_header __iomem *pChannelHeader;
+ struct signal_queue_header __iomem *pqhdr;
u64 mask;
unsigned long long rc1;
@@ -429,24 +428,24 @@ virthba_ISR(int irq, void *dev_id)
return IRQ_NONE;
virthbainfo->interrupts_rcvd++;
pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
- if (((readq(&pChannelHeader->Features)
+ if (((readq(&pChannelHeader->features)
& ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0)
- && ((readq(&pChannelHeader->Features) &
+ && ((readq(&pChannelHeader->features) &
ULTRA_IO_DRIVER_DISABLES_INTS) !=
0)) {
virthbainfo->interrupts_disabled++;
mask = ~ULTRA_CHANNEL_ENABLE_INTS;
rc1 = uisqueue_interlocked_and(virthbainfo->flags_addr, mask);
}
- if (visor_signalqueue_empty(pChannelHeader, IOCHAN_FROM_IOPART)) {
+ if (spar_signalqueue_empty(pChannelHeader, IOCHAN_FROM_IOPART)) {
virthbainfo->interrupts_notme++;
return IRQ_NONE;
}
- pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
+ pqhdr = (struct signal_queue_header __iomem *)
((char __iomem *) pChannelHeader +
- readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
- writeq(readq(&pqhdr->NumInterruptsReceived) + 1,
- &pqhdr->NumInterruptsReceived);
+ readq(&pChannelHeader->ch_space_offset)) + IOCHAN_FROM_IOPART;
+ writeq(readq(&pqhdr->num_irq_received) + 1,
+ &pqhdr->num_irq_received);
atomic_set(&virthbainfo->interrupt_rcvd, 1);
wake_up_interruptible(&virthbainfo->rsp_queue);
return IRQ_HANDLED;
@@ -461,17 +460,17 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
int rsp;
int i;
irq_handler_t handler = virthba_ISR;
- CHANNEL_HEADER __iomem *pChannelHeader;
- SIGNAL_QUEUE_HEADER __iomem *pqhdr;
+ struct channel_header __iomem *pChannelHeader;
+ struct signal_queue_header __iomem *pqhdr;
u64 mask;
LOGVER("entering virthba_probe...\n");
- LOGVER("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ LOGVER("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
LOGINF("entering virthba_probe...\n");
- LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
/* call scsi_host_alloc to register a scsi host adapter
* instance - this virthba that has just been created is an
@@ -578,18 +577,18 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
INIT_WORK(&virthbainfo->serverdown_completion,
virthba_serverdown_complete);
- writeq(readq(&virthbainfo->chinfo.queueinfo->chan->Features) |
+ writeq(readq(&virthbainfo->chinfo.queueinfo->chan->features) |
ULTRA_IO_CHANNEL_IS_POLLING,
- &virthbainfo->chinfo.queueinfo->chan->Features);
+ &virthbainfo->chinfo.queueinfo->chan->features);
/* start thread that will receive scsicmnd responses */
DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n",
virthbainfo->chinfo.queueinfo, &virthbainfo->chinfo.threadinfo);
pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
- pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
+ pqhdr = (struct signal_queue_header __iomem *)
((char __iomem *)pChannelHeader +
- readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
- virthbainfo->flags_addr = &pqhdr->FeatureFlags;
+ readq(&pChannelHeader->ch_space_offset)) + IOCHAN_FROM_IOPART;
+ virthbainfo->flags_addr = &pqhdr->features;
if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
process_incoming_rsps,
@@ -603,16 +602,16 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
return -ENODEV;
}
LOGINF("sendInterruptHandle=0x%16llX",
- virthbainfo->intr.sendInterruptHandle);
+ virthbainfo->intr.send_irq_handle);
LOGINF("recvInterruptHandle=0x%16llX",
- virthbainfo->intr.recvInterruptHandle);
+ virthbainfo->intr.recv_irq_handle);
LOGINF("recvInterruptVector=0x%8X",
- virthbainfo->intr.recvInterruptVector);
+ virthbainfo->intr.recv_irq_vector);
LOGINF("recvInterruptShared=0x%2X",
- virthbainfo->intr.recvInterruptShared);
+ virthbainfo->intr.recv_irq_shared);
LOGINF("scsihost.hostt->name=%s", scsihost->hostt->name);
virthbainfo->interrupt_vector =
- virthbainfo->intr.recvInterruptHandle & INTERRUPT_VECTOR_MASK;
+ virthbainfo->intr.recv_irq_handle & INTERRUPT_VECTOR_MASK;
rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED,
scsihost->hostt->name, virthbainfo);
if (rsp != 0) {
@@ -622,7 +621,7 @@ virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
} else {
u64 __iomem *Features_addr =
- &virthbainfo->chinfo.queueinfo->chan->Features;
+ &virthbainfo->chinfo.queueinfo->chan->features;
LOGERR("request_irq(%d) uislib_virthba_ISR request succeeded\n",
virthbainfo->interrupt_vector);
mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
@@ -649,8 +648,8 @@ virthba_remove(struct virtpci_dev *virtpcidev)
struct Scsi_Host *scsihost =
(struct Scsi_Host *) virtpcidev->scsi.scsihost;
- LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ LOGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
virthbainfo = (struct virthba_info *) scsihost->hostdata;
if (virthbainfo->interrupt_vector != -1)
free_irq(virthbainfo->interrupt_vector, virthbainfo);
@@ -674,7 +673,7 @@ virthba_remove(struct virtpci_dev *virtpcidev)
}
static int
-forward_vdiskmgmt_command(VDISK_MGMT_TYPES vdiskcmdtype,
+forward_vdiskmgmt_command(enum vdisk_mgmt_types vdiskcmdtype,
struct Scsi_Host *scsihost,
struct uisscsi_dest *vdest)
{
@@ -738,7 +737,8 @@ forward_vdiskmgmt_command(VDISK_MGMT_TYPES vdiskcmdtype,
/*****************************************************/
static int
-forward_taskmgmt_command(TASK_MGMT_TYPES tasktype, struct scsi_device *scsidev)
+forward_taskmgmt_command(enum task_mgmt_types tasktype,
+ struct scsi_device *scsidev)
{
struct uiscmdrsp *cmdrsp;
struct virthba_info *virthbainfo =
@@ -1142,9 +1142,9 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
scsicmd, cmdrsp->scsi.cmnd[0],
scsidev->host->host_no, scsidev->id,
scsidev->channel, scsidev->lun,
- cmdrsp->scsi.linuxstat, sd->Valid, sd->SenseKey,
- sd->AdditionalSenseCode,
- sd->AdditionalSenseCodeQualifier);
+ cmdrsp->scsi.linuxstat, sd->valid, sd->sense_key,
+ sd->additional_sense_code,
+ sd->additional_sense_code_qualifier);
if (atomic_read(&vdisk->error_count) ==
VIRTHBA_ERROR_COUNT) {
LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%llu>\n",
@@ -1276,8 +1276,8 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
while (1) {
spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
- if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(dc->queueinfo->chan,
- "vhba", NULL)) {
+ if (!spar_channel_client_acquire_os(dc->queueinfo->chan,
+ "vhba")) {
spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
flags);
virthbainfo->acquire_failed_cnt++;
@@ -1285,8 +1285,7 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
}
qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp,
IOCHAN_FROM_IOPART);
- ULTRA_CHANNEL_CLIENT_RELEASE_OS(dc->queueinfo->chan,
- "vhba", NULL);
+ spar_channel_client_release_os(dc->queueinfo->chan, "vhba");
spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags);
if (qrslt == 0)
break;
@@ -1310,7 +1309,7 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
* a Client/Guest Partition. Let's be
* safe and set it to NULL now. Do
* not use it here! */
- cmdrsp->disknotify.vHba = NULL;
+ cmdrsp->disknotify.v_hba = NULL;
process_disk_notify(shost, cmdrsp);
} else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
if (!del_scsipending_entry(virthbainfo,
@@ -1323,7 +1322,6 @@ drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
}
}
-
/* main function for the thread that waits for scsi commands to arrive
* in a specified queue
*/
@@ -1453,7 +1451,7 @@ static ssize_t enable_ints_write(struct file *file,
if (VirtHbasOpen[i].virthbainfo != NULL) {
virthbainfo = VirtHbasOpen[i].virthbainfo;
Features_addr =
- &virthbainfo->chinfo.queueinfo->chan->Features;
+ &virthbainfo->chinfo.queueinfo->chan->features;
if (new_value == 1) {
mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
ULTRA_IO_DRIVER_DISABLES_INTS);
@@ -1482,8 +1480,8 @@ virthba_serverup(struct virtpci_dev *virtpcidev)
(struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
scsihost)->hostdata;
- DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
if (!virthbainfo->serverdown) {
DBGINF("Server up message received while server is already up.\n");
@@ -1498,9 +1496,9 @@ virthba_serverup(struct virtpci_dev *virtpcidev)
/* Must transition channel to ATTACHED state BEFORE we
* can start using the device again
*/
- ULTRA_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
- dev_name(&virtpcidev->generic_dev),
- CHANNELCLI_ATTACHED, NULL);
+ SPAR_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
+ dev_name(&virtpcidev->generic_dev),
+ CHANNELCLI_ATTACHED, NULL);
/* Start Processing the IOVM Response Queue Again */
if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
@@ -1573,13 +1571,13 @@ virthba_serverdown_complete(struct work_struct *work)
virtpcidev = virthbainfo->virtpcidev;
- DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
virthbainfo->serverdown = true;
virthbainfo->serverchangingstate = false;
/* Return the ServerDown response to Command */
- visorchipset_device_pause_response(virtpcidev->busNo,
- virtpcidev->deviceNo, 0);
+ visorchipset_device_pause_response(virtpcidev->bus_no,
+ virtpcidev->device_no, 0);
}
/* As per VirtpciFunc returns 1 for success and 0 for failure */
@@ -1591,8 +1589,8 @@ virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
scsihost)->hostdata;
DBGINF("virthba_serverdown");
- DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
- virtpcidev->deviceNo);
+ DBGINF("virtpcidev bus_no<<%d>>devNo<<%d>>", virtpcidev->bus_no,
+ virtpcidev->device_no);
if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) {
virthbainfo->serverchangingstate = true;
diff --git a/drivers/staging/unisys/virthba/virthba.h b/drivers/staging/unisys/virthba/virthba.h
index d4b809b0c7bc..59901668d4f4 100644
--- a/drivers/staging/unisys/virthba/virthba.h
+++ b/drivers/staging/unisys/virthba/virthba.h
@@ -19,13 +19,9 @@
* Unisys Virtual HBA driver header
*/
-
-
#ifndef __VIRTHBA_H__
#define __VIRTHBA_H__
-
#define VIRTHBA_VERSION "01.00"
-
#endif /* __VIRTHBA_H__ */
diff --git a/drivers/staging/unisys/virtpci/virtpci.c b/drivers/staging/unisys/virtpci/virtpci.c
index ee9f8260cd15..39b828dce503 100644
--- a/drivers/staging/unisys/virtpci/virtpci.c
+++ b/drivers/staging/unisys/virtpci/virtpci.c
@@ -50,6 +50,7 @@ struct driver_private {
struct module_kobject *mkobj;
struct device_driver *driver;
};
+
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
/* bus_id went away in 2.6.30 - the size was 20 bytes, so we'll define
@@ -109,7 +110,7 @@ static int virtpci_device_probe(struct device *dev);
static int virtpci_device_remove(struct device *dev);
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset);
+ size_t len, loff_t *offset);
static const struct file_operations debugfs_info_fops = {
.read = info_debugfs_read,
@@ -137,7 +138,7 @@ static struct device virtpci_rootbus_device = {
};
/* filled in with info about parent chipset driver when we register with it */
-static ULTRA_VBUS_DEVICEINFO Chipset_DriverInfo;
+static struct ultra_vbus_deviceinfo chipset_driver_info;
static const struct sysfs_ops virtpci_driver_sysfs_ops = {
.show = virtpci_driver_attr_show,
@@ -148,11 +149,11 @@ static struct kobj_type virtpci_driver_kobj_type = {
.sysfs_ops = &virtpci_driver_sysfs_ops,
};
-static struct virtpci_dev *VpcidevListHead;
-static DEFINE_RWLOCK(VpcidevListLock);
+static struct virtpci_dev *vpcidev_list_head;
+static DEFINE_RWLOCK(vpcidev_list_lock);
/* filled in with info about this driver, wrt it servicing client busses */
-static ULTRA_VBUS_DEVICEINFO Bus_DriverInfo;
+static struct ultra_vbus_deviceinfo bus_driver_info;
/*****************************************************/
/* debugfs entries */
@@ -171,13 +172,12 @@ struct virtpci_busdev {
/*****************************************************/
static inline
-int WAIT_FOR_IO_CHANNEL(ULTRA_IO_CHANNEL_PROTOCOL __iomem *chanptr)
+int WAIT_FOR_IO_CHANNEL(struct spar_io_channel_protocol __iomem *chanptr)
{
int count = 120;
while (count > 0) {
-
- if (ULTRA_CHANNEL_SERVER_READY(&chanptr->ChannelHeader))
+ if (SPAR_CHANNEL_SERVER_READY(&chanptr->channel_header))
return 1;
UIS_THREAD_WAIT_SEC(1);
count--;
@@ -186,8 +186,8 @@ int WAIT_FOR_IO_CHANNEL(ULTRA_IO_CHANNEL_PROTOCOL __iomem *chanptr)
}
/* Write the contents of <info> to the ULTRA_VBUS_CHANNEL_PROTOCOL.ChpInfo. */
-static int write_vbus_chpInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
- ULTRA_VBUS_DEVICEINFO *info)
+static int write_vbus_chp_info(struct spar_vbus_channel_protocol *chan,
+ struct ultra_vbus_deviceinfo *info)
{
int off;
@@ -195,18 +195,18 @@ static int write_vbus_chpInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
LOGERR("vbus channel not present");
return -1;
}
- off = sizeof(ULTRA_CHANNEL_PROTOCOL) + chan->HdrInfo.chpInfoByteOffset;
- if (chan->HdrInfo.chpInfoByteOffset == 0) {
- LOGERR("vbus channel not used, because chpInfoByteOffset == 0");
+ off = sizeof(struct channel_header) + chan->hdr_info.chp_info_offset;
+ if (chan->hdr_info.chp_info_offset == 0) {
+ LOGERR("vbus channel not used, because chp_info_offset == 0");
return -1;
}
- memcpy(((u8 *) (chan)) + off, info, sizeof(*info));
+ memcpy(((u8 *)(chan)) + off, info, sizeof(*info));
return 0;
}
/* Write the contents of <info> to the ULTRA_VBUS_CHANNEL_PROTOCOL.BusInfo. */
-static int write_vbus_busInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
- ULTRA_VBUS_DEVICEINFO *info)
+static int write_vbus_bus_info(struct spar_vbus_channel_protocol *chan,
+ struct ultra_vbus_deviceinfo *info)
{
int off;
@@ -214,12 +214,12 @@ static int write_vbus_busInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
LOGERR("vbus channel not present");
return -1;
}
- off = sizeof(ULTRA_CHANNEL_PROTOCOL) + chan->HdrInfo.busInfoByteOffset;
- if (chan->HdrInfo.busInfoByteOffset == 0) {
- LOGERR("vbus channel not used, because busInfoByteOffset == 0");
+ off = sizeof(struct channel_header) + chan->hdr_info.bus_info_offset;
+ if (chan->hdr_info.bus_info_offset == 0) {
+ LOGERR("vbus channel not used, because bus_info_offset == 0");
return -1;
}
- memcpy(((u8 *) (chan)) + off, info, sizeof(*info));
+ memcpy(((u8 *)(chan)) + off, info, sizeof(*info));
return 0;
}
@@ -227,8 +227,8 @@ static int write_vbus_busInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
* ULTRA_VBUS_CHANNEL_PROTOCOL.DevInfo[<devix>].
*/
static int
-write_vbus_devInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
- ULTRA_VBUS_DEVICEINFO *info, int devix)
+write_vbus_dev_info(struct spar_vbus_channel_protocol *chan,
+ struct ultra_vbus_deviceinfo *info, int devix)
{
int off;
@@ -237,14 +237,14 @@ write_vbus_devInfo(ULTRA_VBUS_CHANNEL_PROTOCOL *chan,
return -1;
}
off =
- (sizeof(ULTRA_CHANNEL_PROTOCOL) +
- chan->HdrInfo.devInfoByteOffset) +
- (chan->HdrInfo.deviceInfoStructBytes * devix);
- if (chan->HdrInfo.devInfoByteOffset == 0) {
- LOGERR("vbus channel not used, because devInfoByteOffset == 0");
+ (sizeof(struct channel_header) +
+ chan->hdr_info.dev_info_offset) +
+ (chan->hdr_info.device_info_struct_bytes * devix);
+ if (chan->hdr_info.dev_info_offset == 0) {
+ LOGERR("vbus channel not used, because dev_info_offset == 0");
return -1;
}
- memcpy(((u8 *) (chan)) + off, info, sizeof(*info));
+ memcpy(((u8 *)(chan)) + off, info, sizeof(*info));
return 0;
}
@@ -256,13 +256,13 @@ static int add_vbus(struct add_vbus_guestpart *addparams)
int ret;
struct device *vbus;
- vbus = kzalloc(sizeof(struct device), GFP_ATOMIC);
+ vbus = kzalloc(sizeof(*vbus), GFP_ATOMIC);
POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (!vbus)
return 0;
- dev_set_name(vbus, "vbus%d", addparams->busNo);
+ dev_set_name(vbus, "vbus%d", addparams->bus_no);
vbus->release = virtpci_bus_release;
vbus->parent = &virtpci_rootbus_device; /* root bus is parent */
vbus->bus = &virtpci_bus_type; /* bus type */
@@ -279,11 +279,12 @@ static int add_vbus(struct add_vbus_guestpart *addparams)
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return 0;
}
- write_vbus_chpInfo(vbus->platform_data /* chanptr */ ,
- &Chipset_DriverInfo);
- write_vbus_busInfo(vbus->platform_data /* chanptr */ , &Bus_DriverInfo);
+ write_vbus_chp_info(vbus->platform_data /* chanptr */ ,
+ &chipset_driver_info);
+ write_vbus_bus_info(vbus->platform_data /* chanptr */ ,
+ &bus_driver_info);
LOGINF("Added vbus %d; device %s created successfully\n",
- addparams->busNo, BUS_ID(vbus));
+ addparams->bus_no, BUS_ID(vbus));
POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
return 1;
}
@@ -293,26 +294,15 @@ static int add_vbus(struct add_vbus_guestpart *addparams)
*/
#define GET_SCSIADAPINFO_FROM_CHANPTR(chanptr) { \
memcpy_fromio(&scsi.wwnn, \
- &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ &((struct spar_io_channel_protocol __iomem *) \
chanptr)->vhba.wwnn, \
sizeof(struct vhba_wwnn)); \
memcpy_fromio(&scsi.max, \
- &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
+ &((struct spar_io_channel_protocol __iomem *) \
chanptr)->vhba.max, \
sizeof(struct vhba_config_max)); \
}
-/* find bus device with the busid that matches - match_busid matches bus_id */
-#define GET_BUS_DEV(busno) { \
- sprintf(busid, "vbus%d", busno); \
- vbus = bus_find_device(&virtpci_bus_type, NULL, \
- (void *)busid, match_busid); \
- if (!vbus) { \
- LOGERR("**** FAILED to find vbus %s\n", busid); \
- return 0; \
- } \
-}
-
/* adds a vhba
* returns 0 failure, 1 success,
*/
@@ -325,7 +315,7 @@ static int add_vhba(struct add_virt_guestpart *addparams)
POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (!WAIT_FOR_IO_CHANNEL
- ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) addparams->chanptr)) {
+ ((struct spar_io_channel_protocol __iomem *)addparams->chanptr)) {
LOGERR("Timed out. Channel not ready\n");
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return 0;
@@ -333,7 +323,14 @@ static int add_vhba(struct add_virt_guestpart *addparams)
GET_SCSIADAPINFO_FROM_CHANPTR(addparams->chanptr);
- GET_BUS_DEV(addparams->bus_no);
+ /* find bus device with the busid that matches match_busid */
+ sprintf(busid, "vbus%d", addparams->bus_no);
+ vbus = bus_find_device(&virtpci_bus_type, NULL,
+ (void *)busid, match_busid);
+ if (!vbus) {
+ LOGERR("**** FAILED to find vbus %s\n", busid);
+ return 0;
+ }
LOGINF("Adding vhba wwnn:%x:%x config:%d-%d-%d-%d chanptr:%p\n",
scsi.wwnn.wwnn1, scsi.wwnn.wwnn2,
@@ -347,7 +344,6 @@ static int add_vhba(struct add_virt_guestpart *addparams)
POSTCODE_SEVERITY_INFO);
}
return i;
-
}
/* for CHANSOCK macaddr is AUTO-GENERATED; for normal channels,
@@ -355,17 +351,17 @@ static int add_vhba(struct add_virt_guestpart *addparams)
*/
#define GET_NETADAPINFO_FROM_CHANPTR(chanptr) { \
memcpy_fromio(net.mac_addr, \
- ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
- chanptr)->vnic.macaddr, \
+ ((struct spar_io_channel_protocol __iomem *) \
+ chanptr)->vnic.macaddr, \
MAX_MACADDR_LEN); \
net.num_rcv_bufs = \
- readl(&((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
- chanptr)->vnic.num_rcv_bufs); \
- net.mtu = readl(&((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
- chanptr)->vnic.mtu); \
- memcpy_fromio(&net.zoneGuid, \
- &((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) \
- chanptr)->vnic.zoneGuid, \
+ readl(&((struct spar_io_channel_protocol __iomem *)\
+ chanptr)->vnic.num_rcv_bufs); \
+ net.mtu = readl(&((struct spar_io_channel_protocol __iomem *) \
+ chanptr)->vnic.mtu); \
+ memcpy_fromio(&net.zone_uuid, \
+ &((struct spar_io_channel_protocol __iomem *)\
+ chanptr)->vnic.zone_uuid, \
sizeof(uuid_le)); \
}
@@ -382,7 +378,7 @@ add_vnic(struct add_virt_guestpart *addparams)
POSTCODE_LINUX_2(VPCI_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (!WAIT_FOR_IO_CHANNEL
- ((ULTRA_IO_CHANNEL_PROTOCOL __iomem *) addparams->chanptr)) {
+ ((struct spar_io_channel_protocol __iomem *)addparams->chanptr)) {
LOGERR("Timed out, channel not ready\n");
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return 0;
@@ -390,12 +386,19 @@ add_vnic(struct add_virt_guestpart *addparams)
GET_NETADAPINFO_FROM_CHANPTR(addparams->chanptr);
- GET_BUS_DEV(addparams->bus_no);
+ /* find bus device with the busid that matches match_busid */
+ sprintf(busid, "vbus%d", addparams->bus_no);
+ vbus = bus_find_device(&virtpci_bus_type, NULL,
+ (void *)busid, match_busid);
+ if (!vbus) {
+ LOGERR("**** FAILED to find vbus %s\n", busid);
+ return 0;
+ }
LOGINF("Adding vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x rcvbufs:%d mtu:%d chanptr:%p%pUL\n",
- net.mac_addr[0], net.mac_addr[1], net.mac_addr[2], net.mac_addr[3],
- net.mac_addr[4], net.mac_addr[5], net.num_rcv_bufs, net.mtu,
- addparams->chanptr, &net.zoneGuid);
+ net.mac_addr[0], net.mac_addr[1], net.mac_addr[2],
+ net.mac_addr[3], net.mac_addr[4], net.mac_addr[5],
+ net.num_rcv_bufs, net.mtu, addparams->chanptr, &net.zone_uuid);
i = virtpci_device_add(vbus, VIRTNIC_TYPE, addparams, NULL, &net);
if (i) {
LOGINF("Added vnic macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -417,7 +420,15 @@ delete_vbus(struct del_vbus_guestpart *delparams)
struct device *vbus;
unsigned char busid[BUS_ID_SIZE];
- GET_BUS_DEV(delparams->bus_no);
+ /* find bus device with the busid that matches match_busid */
+ sprintf(busid, "vbus%d", delparams->bus_no);
+ vbus = bus_find_device(&virtpci_bus_type, NULL,
+ (void *)busid, match_busid);
+ if (!vbus) {
+ LOGERR("**** FAILED to find vbus %s\n", busid);
+ return 0;
+ }
+
/* ensure that bus has no devices? -- TBD */
LOGINF("Deleting %s\n", BUS_ID(vbus));
if (delete_vbus_device(vbus, NULL))
@@ -430,9 +441,9 @@ static int
delete_vbus_device(struct device *vbus, void *data)
{
int checkforroot = (data != NULL);
- struct device *pDev = &virtpci_rootbus_device;
+ struct device *dev = &virtpci_rootbus_device;
- if ((checkforroot) && match_busid(vbus, (void *) BUS_ID(pDev))) {
+ if ((checkforroot) && match_busid(vbus, (void *)BUS_ID(dev))) {
/* skip it - don't delete root bus */
LOGINF("skipping root bus\n");
return 0; /* pretend no error */
@@ -590,10 +601,10 @@ static void delete_all(void)
struct virtpci_dev *tmpvpcidev, *nextvpcidev;
/* delete the entire vhba/vnic list in one shot */
- write_lock_irqsave(&VpcidevListLock, flags);
- tmpvpcidev = VpcidevListHead;
- VpcidevListHead = NULL;
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_lock_irqsave(&vpcidev_list_lock, flags);
+ tmpvpcidev = vpcidev_list_head;
+ vpcidev_list_head = NULL;
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
/* delete one vhba/vnic at a time */
while (tmpvpcidev) {
@@ -607,24 +618,32 @@ static void delete_all(void)
/* now delete each vbus */
if (bus_for_each_dev
- (&virtpci_bus_type, NULL, (void *) 1, delete_vbus_device))
+ (&virtpci_bus_type, NULL, (void *)1, delete_vbus_device))
LOGERR("delete of all vbus failed\n");
}
/* deletes all vnics or vhbas
* returns 0 failure, 1 success,
*/
-static int delete_all_virt(VIRTPCI_DEV_TYPE devtype, struct del_vbus_guestpart *delparams)
+static int delete_all_virt(enum virtpci_dev_type devtype,
+ struct del_vbus_guestpart *delparams)
{
int i;
unsigned char busid[BUS_ID_SIZE];
struct device *vbus;
- GET_BUS_DEV(delparams->bus_no);
+ /* find bus device with the busid that matches match_busid */
+ sprintf(busid, "vbus%d", delparams->bus_no);
+ vbus = bus_find_device(&virtpci_bus_type, NULL,
+ (void *)busid, match_busid);
+ if (!vbus) {
+ LOGERR("**** FAILED to find vbus %s\n", busid);
+ return 0;
+ }
if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
LOGERR("**** FAILED to delete all devices; devtype:%d not vhba:%d or vnic:%d\n",
- devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
return 0;
}
@@ -694,10 +713,10 @@ virtpci_match_device(const struct pci_device_id *ids,
{
while (ids->vendor || ids->subvendor || ids->class_mask) {
DBGINF("ids->vendor:%x dev->vendor:%x ids->device:%x dev->device:%x\n",
- ids->vendor, dev->vendor, ids->device, dev->device);
+ ids->vendor, dev->vendor, ids->device, dev->device);
- if ((ids->vendor == dev->vendor)
- && (ids->device == dev->device))
+ if ((ids->vendor == dev->vendor) &&
+ (ids->device == dev->device))
return ids;
ids++;
@@ -752,15 +771,15 @@ static int virtpci_device_resume(struct device *dev)
/* For a child device just created on a client bus, fill in
* information about the driver that is controlling this device into
- * the the appropriate slot within the vbus channel of the bus
+ * the appropriate slot within the vbus channel of the bus
* instance.
*/
-static void fix_vbus_devInfo(struct device *dev, int devNo, int devType,
- struct virtpci_driver *virtpcidrv)
+static void fix_vbus_dev_info(struct device *dev, int dev_no, int dev_type,
+ struct virtpci_driver *virtpcidrv)
{
struct device *vbus;
- void *pChan;
- ULTRA_VBUS_DEVICEINFO devInfo;
+ void *chan;
+ struct ultra_vbus_deviceinfo dev_info;
const char *stype;
if (!dev) {
@@ -776,12 +795,12 @@ static void fix_vbus_devInfo(struct device *dev, int devNo, int devType,
LOGERR("%s dev has no parent bus", __func__);
return;
}
- pChan = vbus->platform_data;
- if (!pChan) {
+ chan = vbus->platform_data;
+ if (!chan) {
LOGERR("%s dev bus has no channel", __func__);
return;
}
- switch (devType) {
+ switch (dev_type) {
case PCI_DEVICE_ID_VIRTHBA:
stype = "vHBA";
break;
@@ -792,17 +811,17 @@ static void fix_vbus_devInfo(struct device *dev, int devNo, int devType,
stype = "unknown";
break;
}
- bus_device_info_init(&devInfo, stype,
- virtpcidrv->name,
- virtpcidrv->version,
- virtpcidrv->vertag);
- write_vbus_devInfo(pChan, &devInfo, devNo);
+ bus_device_info_init(&dev_info, stype,
+ virtpcidrv->name,
+ virtpcidrv->version,
+ virtpcidrv->vertag);
+ write_vbus_dev_info(chan, &dev_info, dev_no);
/* Re-write bus+chipset info, because it is possible that this
* was previously written by our good counterpart, visorbus.
*/
- write_vbus_chpInfo(pChan, &Chipset_DriverInfo);
- write_vbus_busInfo(pChan, &Bus_DriverInfo);
+ write_vbus_chp_info(chan, &chipset_driver_info);
+ write_vbus_bus_info(chan, &bus_driver_info);
}
/* This function is called to query the existence of a specific device
@@ -842,13 +861,14 @@ static int virtpci_device_probe(struct device *dev)
*/
error = virtpcidrv->probe(virtpcidev, id);
if (!error) {
- fix_vbus_devInfo(dev, virtpcidev->deviceNo,
- virtpcidev->device, virtpcidrv);
+ fix_vbus_dev_info(dev, virtpcidev->device_no,
+ virtpcidev->device, virtpcidrv);
virtpcidev->mydriver = virtpcidrv;
POSTCODE_LINUX_2(VPCI_PROBE_EXIT_PC,
POSTCODE_SEVERITY_INFO);
- } else
+ } else {
put_device(dev);
+ }
}
POSTCODE_LINUX_2(VPCI_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
return error; /* -ENODEV for probe failure */
@@ -896,17 +916,20 @@ static void virtpci_bus_release(struct device *dev)
/* Adapter functions */
/*****************************************************/
+/* scsi is expected to be NULL for VNIC add
+ * net is expected to be NULL for VHBA add
+ */
static int virtpci_device_add(struct device *parentbus, int devtype,
struct add_virt_guestpart *addparams,
- struct scsi_adap_info *scsi, /* NULL for VNIC add */
- struct net_adap_info *net /* NULL for VHBA add */)
+ struct scsi_adap_info *scsi,
+ struct net_adap_info *net)
{
struct virtpci_dev *virtpcidev = NULL;
struct virtpci_dev *tmpvpcidev = NULL, *prev;
unsigned long flags;
int ret;
- ULTRA_IO_CHANNEL_PROTOCOL __iomem *pIoChan = NULL;
- struct device *pDev;
+ struct spar_io_channel_protocol __iomem *io_chan = NULL;
+ struct device *dev;
LOGINF("virtpci_device_add parentbus:%p chanptr:%p\n", parentbus,
addparams->chanptr);
@@ -915,14 +938,14 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
if ((devtype != VIRTHBA_TYPE) && (devtype != VIRTNIC_TYPE)) {
LOGERR("**** FAILED to add device; devtype:%d not vhba:%d or vnic:%d\n",
- devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
+ devtype, VIRTHBA_TYPE, VIRTNIC_TYPE);
POSTCODE_LINUX_3(VPCI_CREATE_FAILURE_PC, devtype,
POSTCODE_SEVERITY_ERR);
return 0;
}
/* add a Virtual Device */
- virtpcidev = kzalloc(sizeof(struct virtpci_dev), GFP_ATOMIC);
+ virtpcidev = kzalloc(sizeof(*virtpcidev), GFP_ATOMIC);
if (virtpcidev == NULL) {
LOGERR("can't add device - malloc FALLED\n");
POSTCODE_LINUX_2(MALLOC_FAILURE_PC, POSTCODE_SEVERITY_ERR);
@@ -939,14 +962,14 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
virtpcidev->net = *net;
}
virtpcidev->vendor = PCI_VENDOR_ID_UNISYS;
- virtpcidev->busNo = addparams->bus_no;
- virtpcidev->deviceNo = addparams->device_no;
+ virtpcidev->bus_no = addparams->bus_no;
+ virtpcidev->device_no = addparams->device_no;
virtpcidev->queueinfo.chan = addparams->chanptr;
virtpcidev->queueinfo.send_int_if_needed = NULL;
/* Set up safe queue... */
- pIoChan = (ULTRA_IO_CHANNEL_PROTOCOL __iomem *)
+ io_chan = (struct spar_io_channel_protocol __iomem *)
virtpcidev->queueinfo.chan;
virtpcidev->intr = addparams->intr;
@@ -962,8 +985,8 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
/* add the vhba/vnic to virtpci device list - but check for
* duplicate wwnn/macaddr first
*/
- write_lock_irqsave(&VpcidevListLock, flags);
- for (tmpvpcidev = VpcidevListHead; tmpvpcidev;
+ write_lock_irqsave(&vpcidev_list_lock, flags);
+ for (tmpvpcidev = vpcidev_list_head; tmpvpcidev;
tmpvpcidev = tmpvpcidev->next) {
if (devtype == VIRTHBA_TYPE) {
if ((tmpvpcidev->scsi.wwnn.wwnn1 == scsi->wwnn.wwnn1) &&
@@ -984,7 +1007,7 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
/* found a vhba/vnic already in the list with same
* wwnn or macaddr - reject add
*/
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
kfree(virtpcidev);
LOGERR("**** FAILED vhba/vnic already exists in the list\n");
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
@@ -992,26 +1015,26 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
}
/* add it at the head */
- if (!VpcidevListHead)
- VpcidevListHead = virtpcidev;
- else {
+ if (!vpcidev_list_head) {
+ vpcidev_list_head = virtpcidev;
+ } else {
/* insert virtpcidev at the head of our linked list of
* vpcidevs
*/
- virtpcidev->next = VpcidevListHead;
- VpcidevListHead = virtpcidev;
+ virtpcidev->next = vpcidev_list_head;
+ vpcidev_list_head = virtpcidev;
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
/* Must transition channel to ATTACHED state BEFORE
* registering the device, because polling of the channel
* queues can begin at any time after device_register().
*/
- pDev = &virtpcidev->generic_dev;
- ULTRA_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
- BUS_ID(pDev),
- CHANNELCLI_ATTACHED, NULL);
+ dev = &virtpcidev->generic_dev;
+ SPAR_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
+ BUS_ID(dev),
+ CHANNELCLI_ATTACHED, NULL);
/* don't register until device has been added to
* list. Otherwise, a device_unregister from this function can
@@ -1031,24 +1054,24 @@ static int virtpci_device_add(struct device *parentbus, int devtype,
*/
if (ret) {
LOGERR("device_register returned %d\n", ret);
- pDev = &virtpcidev->generic_dev;
- ULTRA_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
- BUS_ID(pDev),
- CHANNELCLI_DETACHED, NULL);
+ dev = &virtpcidev->generic_dev;
+ SPAR_CHANNEL_CLIENT_TRANSITION(addparams->chanptr,
+ BUS_ID(dev),
+ CHANNELCLI_DETACHED, NULL);
/* remove virtpcidev, the one we just added, from the list */
- write_lock_irqsave(&VpcidevListLock, flags);
- for (tmpvpcidev = VpcidevListHead, prev = NULL;
+ write_lock_irqsave(&vpcidev_list_lock, flags);
+ for (tmpvpcidev = vpcidev_list_head, prev = NULL;
tmpvpcidev;
prev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
if (tmpvpcidev == virtpcidev) {
if (prev)
prev->next = tmpvpcidev->next;
else
- VpcidevListHead = tmpvpcidev->next;
+ vpcidev_list_head = tmpvpcidev->next;
break;
}
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
kfree(virtpcidev);
return 0;
}
@@ -1080,9 +1103,9 @@ static int virtpci_device_serverdown(struct device *parentbus,
}
/* find the vhba or vnic in virtpci device list */
- write_lock_irqsave(&VpcidevListLock, flags);
+ write_lock_irqsave(&vpcidev_list_lock, flags);
- for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL;
+ for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL;
(tmpvpcidev && !found);
prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
if (tmpvpcidev->devtype != devtype)
@@ -1110,7 +1133,7 @@ static int virtpci_device_serverdown(struct device *parentbus,
vpcidriver = tmpvpcidev->mydriver;
rc = vpcidriver->suspend(tmpvpcidev, 0);
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
if (!found) {
LOGERR("**** FAILED to find vhba/vnic in the list\n");
@@ -1139,9 +1162,9 @@ static int virtpci_device_serverup(struct device *parentbus,
}
/* find the vhba or vnic in virtpci device list */
- write_lock_irqsave(&VpcidevListLock, flags);
+ write_lock_irqsave(&vpcidev_list_lock, flags);
- for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL;
+ for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL;
(tmpvpcidev && !found);
prevvpcidev = tmpvpcidev, tmpvpcidev = tmpvpcidev->next) {
if (tmpvpcidev->devtype != devtype)
@@ -1172,12 +1195,13 @@ static int virtpci_device_serverup(struct device *parentbus,
* ever have a bus that contains NO devices, since we
* would never even get here in that case.
*/
- fix_vbus_devInfo(&tmpvpcidev->generic_dev, tmpvpcidev->deviceNo,
- tmpvpcidev->device, vpcidriver);
+ fix_vbus_dev_info(&tmpvpcidev->generic_dev,
+ tmpvpcidev->device_no,
+ tmpvpcidev->device, vpcidriver);
rc = vpcidriver->resume(tmpvpcidev);
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
if (!found) {
LOGERR("**** FAILED to find vhba/vnic in the list\n");
@@ -1218,8 +1242,8 @@ static int virtpci_device_del(struct device *parentbus,
* device_unregister after we release the lock; otherwise we
* encounter "schedule while atomic"
*/
- write_lock_irqsave(&VpcidevListLock, flags);
- for (tmpvpcidev = VpcidevListHead, prevvpcidev = NULL; tmpvpcidev;) {
+ write_lock_irqsave(&vpcidev_list_lock, flags);
+ for (tmpvpcidev = vpcidev_list_head, prevvpcidev = NULL; tmpvpcidev;) {
if (tmpvpcidev->devtype != devtype)
DEL_CONTINUE;
@@ -1253,7 +1277,7 @@ static int virtpci_device_del(struct device *parentbus,
/* not at head */
prevvpcidev->next = tmpvpcidev->next;
else
- VpcidevListHead = tmpvpcidev->next;
+ vpcidev_list_head = tmpvpcidev->next;
/* add it to our deletelist */
tmpvpcidev->next = dellist;
@@ -1268,9 +1292,9 @@ static int virtpci_device_del(struct device *parentbus,
if (prevvpcidev)
tmpvpcidev = prevvpcidev->next;
else
- tmpvpcidev = VpcidevListHead;
+ tmpvpcidev = vpcidev_list_head;
}
- write_unlock_irqrestore(&VpcidevListLock, flags);
+ write_unlock_irqrestore(&vpcidev_list_lock, flags);
if (!all && (count == 0)) {
LOGERR("**** FAILED to find vhba/vnic in the list\n");
@@ -1425,7 +1449,7 @@ static int print_vbus(struct device *vbus, void *data)
}
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
+ size_t len, loff_t *offset)
{
ssize_t bytes_read = 0;
int str_pos = 0;
@@ -1446,18 +1470,19 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
printparam.buf = vbuf;
printparam.len = &len;
if (bus_for_each_dev(&virtpci_bus_type, NULL,
- (void *) &printparam, print_vbus))
+ (void *)&printparam, print_vbus))
LOGERR("Failed to find bus\n");
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
"\n Virtual PCI devices\n");
- read_lock_irqsave(&VpcidevListLock, flags);
- tmpvpcidev = VpcidevListHead;
+ read_lock_irqsave(&vpcidev_list_lock, flags);
+ tmpvpcidev = vpcidev_list_head;
while (tmpvpcidev) {
if (tmpvpcidev->devtype == VIRTHBA_TYPE) {
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
"[%d:%d] VHba:%08x:%08x max-config:%d-%d-%d-%d",
- tmpvpcidev->busNo, tmpvpcidev->deviceNo,
+ tmpvpcidev->bus_no,
+ tmpvpcidev->device_no,
tmpvpcidev->scsi.wwnn.wwnn1,
tmpvpcidev->scsi.wwnn.wwnn2,
tmpvpcidev->scsi.max.max_channel,
@@ -1467,7 +1492,8 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
} else {
str_pos += scnprintf(vbuf + str_pos, len - str_pos,
"[%d:%d] VNic:%02x:%02x:%02x:%02x:%02x:%02x num_rcv_bufs:%d mtu:%d",
- tmpvpcidev->busNo, tmpvpcidev->deviceNo,
+ tmpvpcidev->bus_no,
+ tmpvpcidev->device_no,
tmpvpcidev->net.mac_addr[0],
tmpvpcidev->net.mac_addr[1],
tmpvpcidev->net.mac_addr[2],
@@ -1482,7 +1508,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
tmpvpcidev->queueinfo.chan);
tmpvpcidev = tmpvpcidev->next;
}
- read_unlock_irqrestore(&VpcidevListLock, flags);
+ read_unlock_irqrestore(&vpcidev_list_lock, flags);
str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
@@ -1498,7 +1524,6 @@ static int __init virtpci_mod_init(void)
{
int ret;
-
if (!unisys_spar_platform)
return -ENODEV;
@@ -1515,8 +1540,8 @@ static int __init virtpci_mod_init(void)
return ret;
}
DBGINF("bus_register successful\n");
- bus_device_info_init(&Bus_DriverInfo, "clientbus", "virtpci",
- VERSION, NULL);
+ bus_device_info_init(&bus_driver_info, "clientbus", "virtpci",
+ VERSION, NULL);
/* create a root bus used to parent all the virtpci buses. */
ret = device_register(&virtpci_rootbus_device);
@@ -1529,8 +1554,8 @@ static int __init virtpci_mod_init(void)
}
DBGINF("device_register successful ret:%x\n", ret);
- if (!uisctrl_register_req_handler(2, (void *) &virtpci_ctrlchan_func,
- &Chipset_DriverInfo)) {
+ if (!uisctrl_register_req_handler(2, (void *)&virtpci_ctrlchan_func,
+ &chipset_driver_info)) {
LOGERR("uisctrl_register_req_handler ****FAILED.\n");
POSTCODE_LINUX_2(VPCI_CREATE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
device_unregister(&virtpci_rootbus_device);
@@ -1539,11 +1564,11 @@ static int __init virtpci_mod_init(void)
}
LOGINF("successfully registered virtpci_ctrlchan_func (0x%p) as callback.\n",
- (void *) &virtpci_ctrlchan_func);
+ (void *)&virtpci_ctrlchan_func);
/* create debugfs directory and info file inside. */
virtpci_debugfs_dir = debugfs_create_dir("virtpci", NULL);
debugfs_create_file("info", S_IRUSR, virtpci_debugfs_dir,
- NULL, &debugfs_info_fops);
+ NULL, &debugfs_info_fops);
LOGINF("Leaving\n");
POSTCODE_LINUX_2(VPCI_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
return 0;
@@ -1561,7 +1586,6 @@ static void __exit virtpci_mod_exit(void)
bus_unregister(&virtpci_bus_type);
debugfs_remove_recursive(virtpci_debugfs_dir);
LOGINF("Leaving\n");
-
}
module_init(virtpci_mod_init);
diff --git a/drivers/staging/unisys/virtpci/virtpci.h b/drivers/staging/unisys/virtpci/virtpci.h
index 6e26956c79f4..9d85f55e8169 100644
--- a/drivers/staging/unisys/virtpci/virtpci.h
+++ b/drivers/staging/unisys/virtpci/virtpci.h
@@ -42,25 +42,25 @@ struct net_adap_info {
u8 mac_addr[MAX_MACADDR_LEN];
int num_rcv_bufs;
unsigned mtu;
- uuid_le zoneGuid;
+ uuid_le zone_uuid;
};
-typedef enum {
+enum virtpci_dev_type {
VIRTHBA_TYPE = 0,
VIRTNIC_TYPE = 1,
VIRTBUS_TYPE = 6,
-} VIRTPCI_DEV_TYPE;
+};
struct virtpci_dev {
- VIRTPCI_DEV_TYPE devtype; /* indicates type of the
+ enum virtpci_dev_type devtype; /* indicates type of the
* virtual pci device */
struct virtpci_driver *mydriver; /* which driver has allocated
* this device */
unsigned short vendor; /* vendor id for device */
unsigned short device; /* device id for device */
- u32 busNo; /* number of bus on which device exists */
- u32 deviceNo; /* device's number on the bus */
- struct InterruptInfo intr; /* interrupt info */
+ u32 bus_no; /* number of bus on which device exists */
+ u32 device_no; /* device's number on the bus */
+ struct irq_info intr; /* interrupt info */
struct device generic_dev; /* generic device */
union {
struct scsi_adap_info scsi;
@@ -80,15 +80,15 @@ struct virtpci_driver {
const struct pci_device_id *id_table; /* must be non-NULL for probe
* to be called */
int (*probe)(struct virtpci_dev *dev,
- const struct pci_device_id *id); /* device inserted */
+ const struct pci_device_id *id); /* device inserted */
void (*remove)(struct virtpci_dev *dev); /* Device removed (NULL if
* not a hot-plug capable
* driver) */
int (*suspend)(struct virtpci_dev *dev,
- u32 state); /* Device suspended */
+ u32 state); /* Device suspended */
int (*resume)(struct virtpci_dev *dev); /* Device woken up */
int (*enable_wake)(struct virtpci_dev *dev,
- u32 state, int enable); /* Enable wake event */
+ u32 state, int enable); /* Enable wake event */
struct device_driver core_driver; /* VIRTPCI core fills this in */
};
diff --git a/drivers/staging/unisys/visorchannel/globals.h b/drivers/staging/unisys/visorchannel/globals.h
index 07653b8dea7b..581ed83fe6d0 100644
--- a/drivers/staging/unisys/visorchannel/globals.h
+++ b/drivers/staging/unisys/visorchannel/globals.h
@@ -25,5 +25,4 @@
#define MYDRVNAME "visorchannel"
-
#endif
diff --git a/drivers/staging/unisys/visorchannel/visorchannel.h b/drivers/staging/unisys/visorchannel/visorchannel.h
index 9a4d7b6755d1..5061edff959a 100644
--- a/drivers/staging/unisys/visorchannel/visorchannel.h
+++ b/drivers/staging/unisys/visorchannel/visorchannel.h
@@ -66,7 +66,7 @@ char *visorchannel_id(VISORCHANNEL *channel, char *s);
char *visorchannel_zoneid(VISORCHANNEL *channel, char *s);
u64 visorchannel_get_clientpartition(VISORCHANNEL *channel);
uuid_le visorchannel_get_uuid(VISORCHANNEL *channel);
-MEMREGION *visorchannel_get_memregion(VISORCHANNEL *channel);
+struct memregion *visorchannel_get_memregion(VISORCHANNEL *channel);
char *visorchannel_uuid_id(uuid_le *guid, char *s);
void visorchannel_debug(VISORCHANNEL *channel, int nQueues,
struct seq_file *seq, u32 off);
diff --git a/drivers/staging/unisys/visorchannel/visorchannel_funcs.c b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
index 01a44c553500..36559d5fa673 100644
--- a/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
+++ b/drivers/staging/unisys/visorchannel/visorchannel_funcs.c
@@ -29,8 +29,8 @@
#define MYDRVNAME "visorchannel"
struct VISORCHANNEL_Tag {
- MEMREGION *memregion; /* from visor_memregion_create() */
- CHANNEL_HEADER chan_hdr;
+ struct memregion *memregion; /* from visor_memregion_create() */
+ struct channel_header chan_hdr;
uuid_le guid;
ulong size;
BOOL needs_lock;
@@ -38,10 +38,10 @@ struct VISORCHANNEL_Tag {
spinlock_t remove_lock;
struct {
- SIGNAL_QUEUE_HEADER req_queue;
- SIGNAL_QUEUE_HEADER rsp_queue;
- SIGNAL_QUEUE_HEADER event_queue;
- SIGNAL_QUEUE_HEADER ack_queue;
+ struct signal_queue_header req_queue;
+ struct signal_queue_header rsp_queue;
+ struct signal_queue_header event_queue;
+ struct signal_queue_header ack_queue;
} safe_uis_queue;
};
@@ -60,7 +60,7 @@ visorchannel_create_guts(HOSTADDRESS physaddr, ulong channelBytes,
if (p == NULL) {
ERRDRV("allocation failed: (status=0)\n");
rc = NULL;
- goto Away;
+ goto cleanup;
}
p->memregion = NULL;
p->needs_lock = needs_lock;
@@ -70,39 +70,39 @@ visorchannel_create_guts(HOSTADDRESS physaddr, ulong channelBytes,
/* prepare chan_hdr (abstraction to read/write channel memory) */
if (parent == NULL)
p->memregion =
- visor_memregion_create(physaddr, sizeof(CHANNEL_HEADER));
+ visor_memregion_create(physaddr,
+ sizeof(struct channel_header));
else
p->memregion =
visor_memregion_create_overlapped(parent->memregion,
- off,
- sizeof(CHANNEL_HEADER));
+ off, sizeof(struct channel_header));
if (p->memregion == NULL) {
ERRDRV("visor_memregion_create failed failed: (status=0)\n");
rc = NULL;
- goto Away;
+ goto cleanup;
}
if (visor_memregion_read(p->memregion, 0, &p->chan_hdr,
- sizeof(CHANNEL_HEADER)) < 0) {
+ sizeof(struct channel_header)) < 0) {
ERRDRV("visor_memregion_read failed: (status=0)\n");
rc = NULL;
- goto Away;
+ goto cleanup;
}
if (channelBytes == 0)
/* we had better be a CLIENT of this channel */
- channelBytes = (ulong) p->chan_hdr.Size;
+ channelBytes = (ulong)p->chan_hdr.size;
if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
/* we had better be a CLIENT of this channel */
- guid = p->chan_hdr.Type;
+ guid = p->chan_hdr.chtype;
if (visor_memregion_resize(p->memregion, channelBytes) < 0) {
ERRDRV("visor_memregion_resize failed: (status=0)\n");
rc = NULL;
- goto Away;
+ goto cleanup;
}
p->size = channelBytes;
p->guid = guid;
rc = p;
-Away:
+cleanup:
if (rc == NULL) {
if (p != NULL) {
@@ -194,14 +194,14 @@ EXPORT_SYMBOL_GPL(visorchannel_id);
char *
visorchannel_zoneid(VISORCHANNEL *channel, char *s)
{
- return visorchannel_uuid_id(&channel->chan_hdr.ZoneGuid, s);
+ return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s);
}
EXPORT_SYMBOL_GPL(visorchannel_zoneid);
HOSTADDRESS
visorchannel_get_clientpartition(VISORCHANNEL *channel)
{
- return channel->chan_hdr.PartitionHandle;
+ return channel->chan_hdr.partition_handle;
}
EXPORT_SYMBOL_GPL(visorchannel_get_clientpartition);
@@ -212,7 +212,7 @@ visorchannel_get_uuid(VISORCHANNEL *channel)
}
EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
-MEMREGION *
+struct memregion *
visorchannel_get_memregion(VISORCHANNEL *channel)
{
return channel->memregion;
@@ -225,8 +225,11 @@ visorchannel_read(VISORCHANNEL *channel, ulong offset,
{
int rc = visor_memregion_read(channel->memregion, offset,
local, nbytes);
- if ((rc >= 0) && (offset == 0) && (nbytes >= sizeof(CHANNEL_HEADER)))
- memcpy(&channel->chan_hdr, local, sizeof(CHANNEL_HEADER));
+ if ((rc >= 0) && (offset == 0) &&
+ (nbytes >= sizeof(struct channel_header))) {
+ memcpy(&channel->chan_hdr, local,
+ sizeof(struct channel_header));
+ }
return rc;
}
EXPORT_SYMBOL_GPL(visorchannel_read);
@@ -235,8 +238,9 @@ int
visorchannel_write(VISORCHANNEL *channel, ulong offset,
void *local, ulong nbytes)
{
- if (offset == 0 && nbytes >= sizeof(CHANNEL_HEADER))
- memcpy(&channel->chan_hdr, local, sizeof(CHANNEL_HEADER));
+ if (offset == 0 && nbytes >= sizeof(struct channel_header))
+ memcpy(&channel->chan_hdr, local,
+ sizeof(struct channel_header));
return visor_memregion_write(channel->memregion, offset, local, nbytes);
}
EXPORT_SYMBOL_GPL(visorchannel_write);
@@ -251,7 +255,7 @@ visorchannel_clear(VISORCHANNEL *channel, ulong offset, u8 ch, ulong nbytes)
if (buf == NULL) {
ERRDRV("%s failed memory allocation", __func__);
- goto Away;
+ goto cleanup;
}
memset(buf, ch, bufsize);
while (nbytes > 0) {
@@ -264,14 +268,14 @@ visorchannel_clear(VISORCHANNEL *channel, ulong offset, u8 ch, ulong nbytes)
buf, thisbytes);
if (x < 0) {
rc = x;
- goto Away;
+ goto cleanup;
}
written += thisbytes;
nbytes -= thisbytes;
}
rc = 0;
-Away:
+cleanup:
if (buf != NULL) {
vfree(buf);
buf = NULL;
@@ -283,7 +287,7 @@ EXPORT_SYMBOL_GPL(visorchannel_clear);
void __iomem *
visorchannel_get_header(VISORCHANNEL *channel)
{
- return (void __iomem *) &(channel->chan_hdr);
+ return (void __iomem *)&channel->chan_hdr;
}
EXPORT_SYMBOL_GPL(visorchannel_get_header);
@@ -291,14 +295,15 @@ EXPORT_SYMBOL_GPL(visorchannel_get_header);
* channel header
*/
#define SIG_QUEUE_OFFSET(chan_hdr, q) \
- ((chan_hdr)->oChannelSpace + ((q) * sizeof(SIGNAL_QUEUE_HEADER)))
+ ((chan_hdr)->ch_space_offset + \
+ ((q) * sizeof(struct signal_queue_header)))
/** Return offset of a specific queue entry (data) from the beginning of a
* channel header
*/
#define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \
- (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->oSignalBase + \
- ((slot) * (sig_hdr)->SignalSize))
+ (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \
+ ((slot) * (sig_hdr)->signal_size))
/** Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
* into host memory
@@ -306,39 +311,42 @@ EXPORT_SYMBOL_GPL(visorchannel_get_header);
#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
(visor_memregion_write(channel->memregion, \
SIG_QUEUE_OFFSET(&channel->chan_hdr, queue)+ \
- offsetof(SIGNAL_QUEUE_HEADER, FIELD), \
+ offsetof(struct signal_queue_header, FIELD),\
&((sig_hdr)->FIELD), \
sizeof((sig_hdr)->FIELD)) >= 0)
static BOOL
sig_read_header(VISORCHANNEL *channel, u32 queue,
- SIGNAL_QUEUE_HEADER *sig_hdr)
+ struct signal_queue_header *sig_hdr)
{
BOOL rc = FALSE;
- if (channel->chan_hdr.oChannelSpace < sizeof(CHANNEL_HEADER)) {
+ if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header)) {
ERRDRV("oChannelSpace too small: (status=%d)\n", rc);
- goto Away;
+ goto cleanup;
}
/* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
if (visor_memregion_read(channel->memregion,
SIG_QUEUE_OFFSET(&channel->chan_hdr, queue),
- sig_hdr, sizeof(SIGNAL_QUEUE_HEADER)) < 0) {
+ sig_hdr,
+ sizeof(struct signal_queue_header)) < 0) {
ERRDRV("queue=%d SIG_QUEUE_OFFSET=%d",
queue, (int)SIG_QUEUE_OFFSET(&channel->chan_hdr, queue));
- ERRDRV("visor_memregion_read of signal queue failed: (status=%d)\n", rc);
- goto Away;
+ ERRDRV("visor_memregion_read of signal queue failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
rc = TRUE;
-Away:
+cleanup:
return rc;
}
static BOOL
sig_do_data(VISORCHANNEL *channel, u32 queue,
- SIGNAL_QUEUE_HEADER *sig_hdr, u32 slot, void *data, BOOL is_write)
+ struct signal_queue_header *sig_hdr, u32 slot, void *data,
+ BOOL is_write)
{
BOOL rc = FALSE;
int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue,
@@ -346,53 +354,55 @@ sig_do_data(VISORCHANNEL *channel, u32 queue,
if (is_write) {
if (visor_memregion_write(channel->memregion,
signal_data_offset,
- data, sig_hdr->SignalSize) < 0) {
- ERRDRV("visor_memregion_write of signal data failed: (status=%d)\n", rc);
- goto Away;
+ data, sig_hdr->signal_size) < 0) {
+ ERRDRV("visor_memregion_write of signal data failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
} else {
if (visor_memregion_read(channel->memregion, signal_data_offset,
- data, sig_hdr->SignalSize) < 0) {
- ERRDRV("visor_memregion_read of signal data failed: (status=%d)\n", rc);
- goto Away;
+ data, sig_hdr->signal_size) < 0) {
+ ERRDRV("visor_memregion_read of signal data failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
}
rc = TRUE;
-Away:
+cleanup:
return rc;
}
static inline BOOL
sig_read_data(VISORCHANNEL *channel, u32 queue,
- SIGNAL_QUEUE_HEADER *sig_hdr, u32 slot, void *data)
+ struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
return sig_do_data(channel, queue, sig_hdr, slot, data, FALSE);
}
static inline BOOL
sig_write_data(VISORCHANNEL *channel, u32 queue,
- SIGNAL_QUEUE_HEADER *sig_hdr, u32 slot, void *data)
+ struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
return sig_do_data(channel, queue, sig_hdr, slot, data, TRUE);
}
static inline unsigned char
-safe_sig_queue_validate(pSIGNAL_QUEUE_HEADER psafe_sqh,
- pSIGNAL_QUEUE_HEADER punsafe_sqh,
+safe_sig_queue_validate(struct signal_queue_header *psafe_sqh,
+ struct signal_queue_header *punsafe_sqh,
u32 *phead, u32 *ptail)
{
- if ((*phead >= psafe_sqh->MaxSignalSlots)
- || (*ptail >= psafe_sqh->MaxSignalSlots)) {
+ if ((*phead >= psafe_sqh->max_slots) ||
+ (*ptail >= psafe_sqh->max_slots)) {
/* Choose 0 or max, maybe based on current tail value */
*phead = 0;
*ptail = 0;
/* Sync with client as necessary */
- punsafe_sqh->Head = *phead;
- punsafe_sqh->Tail = *ptail;
+ punsafe_sqh->head = *phead;
+ punsafe_sqh->tail = *ptail;
ERRDRV("safe_sig_queue_validate: head = 0x%x, tail = 0x%x, MaxSlots = 0x%x",
- *phead, *ptail, psafe_sqh->MaxSignalSlots);
+ *phead, *ptail, psafe_sqh->max_slots);
return 0;
}
return 1;
@@ -402,41 +412,42 @@ BOOL
visorchannel_signalremove(VISORCHANNEL *channel, u32 queue, void *msg)
{
BOOL rc = FALSE;
- SIGNAL_QUEUE_HEADER sig_hdr;
+ struct signal_queue_header sig_hdr;
if (channel->needs_lock)
spin_lock(&channel->remove_lock);
if (!sig_read_header(channel, queue, &sig_hdr)) {
rc = FALSE;
- goto Away;
+ goto cleanup;
}
- if (sig_hdr.Head == sig_hdr.Tail) {
+ if (sig_hdr.head == sig_hdr.tail) {
rc = FALSE; /* no signals to remove */
- goto Away;
+ goto cleanup;
}
- sig_hdr.Tail = (sig_hdr.Tail + 1) % sig_hdr.MaxSignalSlots;
- if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.Tail, msg)) {
+ sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
+ if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg)) {
ERRDRV("sig_read_data failed: (status=%d)\n", rc);
- goto Away;
+ goto cleanup;
}
- sig_hdr.NumSignalsReceived++;
+ sig_hdr.num_received++;
/* For each data field in SIGNAL_QUEUE_HEADER that was modified,
* update host memory.
*/
mb(); /* required for channel synch */
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, Tail)) {
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail)) {
ERRDRV("visor_memregion_write of Tail failed: (status=%d)\n",
rc);
- goto Away;
+ goto cleanup;
}
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumSignalsReceived)) {
- ERRDRV("visor_memregion_write of NumSignalsReceived failed: (status=%d)\n", rc);
- goto Away;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received)) {
+ ERRDRV("visor_memregion_write of NumSignalsReceived failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
rc = TRUE;
-Away:
+cleanup:
if (channel->needs_lock)
spin_unlock(&channel->remove_lock);
@@ -448,48 +459,50 @@ BOOL
visorchannel_signalinsert(VISORCHANNEL *channel, u32 queue, void *msg)
{
BOOL rc = FALSE;
- SIGNAL_QUEUE_HEADER sig_hdr;
+ struct signal_queue_header sig_hdr;
if (channel->needs_lock)
spin_lock(&channel->insert_lock);
if (!sig_read_header(channel, queue, &sig_hdr)) {
rc = FALSE;
- goto Away;
+ goto cleanup;
}
- sig_hdr.Head = ((sig_hdr.Head + 1) % sig_hdr.MaxSignalSlots);
- if (sig_hdr.Head == sig_hdr.Tail) {
- sig_hdr.NumOverflows++;
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumOverflows)) {
- ERRDRV("visor_memregion_write of NumOverflows failed: (status=%d)\n", rc);
- goto Away;
+ sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots);
+ if (sig_hdr.head == sig_hdr.tail) {
+ sig_hdr.num_overflows++;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows)) {
+ ERRDRV("visor_memregion_write of NumOverflows failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
rc = FALSE;
- goto Away;
+ goto cleanup;
}
- if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.Head, msg)) {
+ if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg)) {
ERRDRV("sig_write_data failed: (status=%d)\n", rc);
- goto Away;
+ goto cleanup;
}
- sig_hdr.NumSignalsSent++;
+ sig_hdr.num_sent++;
/* For each data field in SIGNAL_QUEUE_HEADER that was modified,
* update host memory.
*/
mb(); /* required for channel synch */
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, Head)) {
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, head)) {
ERRDRV("visor_memregion_write of Head failed: (status=%d)\n",
rc);
- goto Away;
+ goto cleanup;
}
- if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, NumSignalsSent)) {
- ERRDRV("visor_memregion_write of NumSignalsSent failed: (status=%d)\n", rc);
- goto Away;
+ if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent)) {
+ ERRDRV("visor_memregion_write of NumSignalsSent failed: (status=%d)\n",
+ rc);
+ goto cleanup;
}
rc = TRUE;
-Away:
+cleanup:
if (channel->needs_lock)
spin_unlock(&channel->insert_lock);
@@ -497,59 +510,58 @@ Away:
}
EXPORT_SYMBOL_GPL(visorchannel_signalinsert);
-
int
visorchannel_signalqueue_slots_avail(VISORCHANNEL *channel, u32 queue)
{
- SIGNAL_QUEUE_HEADER sig_hdr;
+ struct signal_queue_header sig_hdr;
u32 slots_avail, slots_used;
u32 head, tail;
if (!sig_read_header(channel, queue, &sig_hdr))
return 0;
- head = sig_hdr.Head;
- tail = sig_hdr.Tail;
+ head = sig_hdr.head;
+ tail = sig_hdr.tail;
if (head < tail)
- head = head + sig_hdr.MaxSignalSlots;
+ head = head + sig_hdr.max_slots;
slots_used = (head - tail);
- slots_avail = sig_hdr.MaxSignals - slots_used;
- return (int) slots_avail;
+ slots_avail = sig_hdr.max_signals - slots_used;
+ return (int)slots_avail;
}
EXPORT_SYMBOL_GPL(visorchannel_signalqueue_slots_avail);
int
visorchannel_signalqueue_max_slots(VISORCHANNEL *channel, u32 queue)
{
- SIGNAL_QUEUE_HEADER sig_hdr;
+ struct signal_queue_header sig_hdr;
if (!sig_read_header(channel, queue, &sig_hdr))
return 0;
- return (int) sig_hdr.MaxSignals;
+ return (int)sig_hdr.max_signals;
}
EXPORT_SYMBOL_GPL(visorchannel_signalqueue_max_slots);
static void
-sigqueue_debug(SIGNAL_QUEUE_HEADER *q, int which, struct seq_file *seq)
+sigqueue_debug(struct signal_queue_header *q, int which, struct seq_file *seq)
{
seq_printf(seq, "Signal Queue #%d\n", which);
- seq_printf(seq, " VersionId = %lu\n", (ulong) q->VersionId);
- seq_printf(seq, " Type = %lu\n", (ulong) q->Type);
+ seq_printf(seq, " VersionId = %lu\n", (ulong)q->version);
+ seq_printf(seq, " Type = %lu\n", (ulong)q->chtype);
seq_printf(seq, " oSignalBase = %llu\n",
- (long long) q->oSignalBase);
- seq_printf(seq, " SignalSize = %lu\n", (ulong) q->SignalSize);
+ (long long)q->sig_base_offset);
+ seq_printf(seq, " SignalSize = %lu\n", (ulong)q->signal_size);
seq_printf(seq, " MaxSignalSlots = %lu\n",
- (ulong) q->MaxSignalSlots);
- seq_printf(seq, " MaxSignals = %lu\n", (ulong) q->MaxSignals);
+ (ulong)q->max_slots);
+ seq_printf(seq, " MaxSignals = %lu\n", (ulong)q->max_signals);
seq_printf(seq, " FeatureFlags = %-16.16Lx\n",
- (long long) q->FeatureFlags);
+ (long long)q->features);
seq_printf(seq, " NumSignalsSent = %llu\n",
- (long long) q->NumSignalsSent);
+ (long long)q->num_sent);
seq_printf(seq, " NumSignalsReceived = %llu\n",
- (long long) q->NumSignalsReceived);
+ (long long)q->num_received);
seq_printf(seq, " NumOverflows = %llu\n",
- (long long) q->NumOverflows);
- seq_printf(seq, " Head = %lu\n", (ulong) q->Head);
- seq_printf(seq, " Tail = %lu\n", (ulong) q->Tail);
+ (long long)q->num_overflows);
+ seq_printf(seq, " Head = %lu\n", (ulong)q->head);
+ seq_printf(seq, " Tail = %lu\n", (ulong)q->tail);
}
void
@@ -558,9 +570,9 @@ visorchannel_debug(VISORCHANNEL *channel, int nQueues,
{
HOSTADDRESS addr = 0;
ulong nbytes = 0, nbytes_region = 0;
- MEMREGION *memregion = NULL;
- CHANNEL_HEADER hdr;
- CHANNEL_HEADER *phdr = &hdr;
+ struct memregion *memregion = NULL;
+ struct channel_header hdr;
+ struct channel_header *phdr = &hdr;
int i = 0;
int errcode = 0;
@@ -576,7 +588,7 @@ visorchannel_debug(VISORCHANNEL *channel, int nQueues,
addr = visor_memregion_get_physaddr(memregion);
nbytes_region = visor_memregion_get_nbytes(memregion);
errcode = visorchannel_read(channel, off,
- phdr, sizeof(CHANNEL_HEADER));
+ phdr, sizeof(struct channel_header));
if (errcode < 0) {
seq_printf(seq,
"Read of channel header failed with errcode=%d)\n",
@@ -584,39 +596,41 @@ visorchannel_debug(VISORCHANNEL *channel, int nQueues,
if (off == 0) {
phdr = &channel->chan_hdr;
seq_puts(seq, "(following data may be stale)\n");
- } else
+ } else {
return;
+ }
}
- nbytes = (ulong) (phdr->Size);
+ nbytes = (ulong)(phdr->size);
seq_printf(seq, "--- Begin channel @0x%-16.16Lx for 0x%lx bytes (region=0x%lx bytes) ---\n",
addr + off, nbytes, nbytes_region);
- seq_printf(seq, "Type = %pUL\n", &phdr->Type);
- seq_printf(seq, "ZoneGuid = %pUL\n", &phdr->ZoneGuid);
+ seq_printf(seq, "Type = %pUL\n", &phdr->chtype);
+ seq_printf(seq, "ZoneGuid = %pUL\n", &phdr->zone_uuid);
seq_printf(seq, "Signature = 0x%-16.16Lx\n",
- (long long) phdr->Signature);
- seq_printf(seq, "LegacyState = %lu\n", (ulong) phdr->LegacyState);
- seq_printf(seq, "SrvState = %lu\n", (ulong) phdr->SrvState);
- seq_printf(seq, "CliStateBoot = %lu\n", (ulong) phdr->CliStateBoot);
- seq_printf(seq, "CliStateOS = %lu\n", (ulong) phdr->CliStateOS);
- seq_printf(seq, "HeaderSize = %lu\n", (ulong) phdr->HeaderSize);
- seq_printf(seq, "Size = %llu\n", (long long) phdr->Size);
+ (long long)phdr->signature);
+ seq_printf(seq, "LegacyState = %lu\n", (ulong)phdr->legacy_state);
+ seq_printf(seq, "SrvState = %lu\n", (ulong)phdr->srv_state);
+ seq_printf(seq, "CliStateBoot = %lu\n", (ulong)phdr->cli_state_boot);
+ seq_printf(seq, "CliStateOS = %lu\n", (ulong)phdr->cli_state_os);
+ seq_printf(seq, "HeaderSize = %lu\n", (ulong)phdr->header_size);
+ seq_printf(seq, "Size = %llu\n", (long long)phdr->size);
seq_printf(seq, "Features = 0x%-16.16llx\n",
- (long long) phdr->Features);
+ (long long)phdr->features);
seq_printf(seq, "PartitionHandle = 0x%-16.16llx\n",
- (long long) phdr->PartitionHandle);
+ (long long)phdr->partition_handle);
seq_printf(seq, "Handle = 0x%-16.16llx\n",
- (long long) phdr->Handle);
- seq_printf(seq, "VersionId = %lu\n", (ulong) phdr->VersionId);
+ (long long)phdr->handle);
+ seq_printf(seq, "VersionId = %lu\n", (ulong)phdr->version_id);
seq_printf(seq, "oChannelSpace = %llu\n",
- (long long) phdr->oChannelSpace);
- if ((phdr->oChannelSpace == 0) || (errcode < 0))
+ (long long)phdr->ch_space_offset);
+ if ((phdr->ch_space_offset == 0) || (errcode < 0))
;
else
for (i = 0; i < nQueues; i++) {
- SIGNAL_QUEUE_HEADER q;
+ struct signal_queue_header q;
errcode = visorchannel_read(channel,
- off + phdr->oChannelSpace +
+ off +
+ phdr->ch_space_offset +
(i * sizeof(q)),
&q, sizeof(q));
if (errcode < 0) {
@@ -643,15 +657,17 @@ visorchannel_dump_section(VISORCHANNEL *chan, char *s,
fmtbufsize = 100 * COVQ(len, 16);
buf = kmalloc(len, GFP_KERNEL|__GFP_NORETRY);
+ if (!buf)
+ return;
fmtbuf = kmalloc(fmtbufsize, GFP_KERNEL|__GFP_NORETRY);
- if (buf == NULL || fmtbuf == NULL)
- goto Away;
+ if (!fmtbuf)
+ goto fmt_failed;
errcode = visorchannel_read(chan, off, buf, len);
if (errcode < 0) {
ERRDRV("%s failed to read %s from channel errcode=%d",
s, __func__, errcode);
- goto Away;
+ goto read_failed;
}
seq_printf(seq, "channel %s:\n", s);
tbuf = buf;
@@ -663,14 +679,9 @@ visorchannel_dump_section(VISORCHANNEL *chan, char *s,
len -= 16;
}
-Away:
- if (buf != NULL) {
- kfree(buf);
- buf = NULL;
- }
- if (fmtbuf != NULL) {
- kfree(fmtbuf);
- fmtbuf = NULL;
- }
+read_failed:
+ kfree(fmtbuf);
+fmt_failed:
+ kfree(buf);
}
EXPORT_SYMBOL_GPL(visorchannel_dump_section);
diff --git a/drivers/staging/unisys/visorchipset/file.c b/drivers/staging/unisys/visorchipset/file.c
index 3321764069de..373fa36b7119 100644
--- a/drivers/staging/unisys/visorchipset/file.c
+++ b/drivers/staging/unisys/visorchipset/file.c
@@ -155,9 +155,9 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
return -ENXIO;
}
visorchannel_read(*PControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- gpControlChannel), &addr,
- sizeof(addr));
+ offsetof(struct spar_controlvm_channel_protocol,
+ gp_control_channel),
+ &addr, sizeof(addr));
if (addr == 0) {
ERRDRV("%s control channel address is 0", __func__);
return -ENXIO;
diff --git a/drivers/staging/unisys/visorchipset/parser.c b/drivers/staging/unisys/visorchipset/parser.c
index 661aaae9b154..9edbd3bbd186 100644
--- a/drivers/staging/unisys/visorchipset/parser.c
+++ b/drivers/staging/unisys/visorchipset/parser.c
@@ -47,8 +47,8 @@ parser_init_guts(u64 addr, u32 bytes, BOOL isLocal,
int allocbytes = sizeof(PARSER_CONTEXT) + bytes;
PARSER_CONTEXT *rc = NULL;
PARSER_CONTEXT *ctx = NULL;
- MEMREGION *rgn = NULL;
- ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+ struct memregion *rgn = NULL;
+ struct spar_controlvm_parameters_header *phdr = NULL;
if (tryAgain)
*tryAgain = FALSE;
@@ -110,27 +110,29 @@ parser_init_guts(u64 addr, u32 bytes, BOOL isLocal,
rc = ctx;
goto Away;
}
- phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
- if (phdr->TotalLength != bytes) {
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ if (phdr->total_length != bytes) {
ERRDRV("%s - bad total length %lu (should be %lu)",
__func__,
- (ulong) (phdr->TotalLength), (ulong) (bytes));
+ (ulong) (phdr->total_length), (ulong) (bytes));
rc = NULL;
goto Away;
}
- if (phdr->TotalLength < phdr->HeaderLength) {
+ if (phdr->total_length < phdr->header_length) {
ERRDRV("%s - total length < header length (%lu < %lu)",
__func__,
- (ulong) (phdr->TotalLength),
- (ulong) (phdr->HeaderLength));
+ (ulong) (phdr->total_length),
+ (ulong) (phdr->header_length));
rc = NULL;
goto Away;
}
- if (phdr->HeaderLength < sizeof(ULTRA_CONTROLVM_PARAMETERS_HEADER)) {
+ if (phdr->header_length <
+ sizeof(struct spar_controlvm_parameters_header)) {
ERRDRV("%s - header is too small (%lu < %lu)",
__func__,
- (ulong) (phdr->HeaderLength),
- (ulong) (sizeof(ULTRA_CONTROLVM_PARAMETERS_HEADER)));
+ (ulong) (phdr->header_length),
+ (ulong)(sizeof(
+ struct spar_controlvm_parameters_header)));
rc = NULL;
goto Away;
}
@@ -159,7 +161,7 @@ parser_init(u64 addr, u32 bytes, BOOL isLocal, BOOL *tryAgain)
}
/* Call this instead of parser_init() if the payload area consists of just
- * a sequence of bytes, rather than a ULTRA_CONTROLVM_PARAMETERS_HEADER
+ * a sequence of bytes, rather than a struct spar_controlvm_parameters_header
* structures. Afterwards, you can call parser_simpleString_get() or
* parser_byteStream_get() to obtain the data.
*/
@@ -196,44 +198,44 @@ parser_byteStream_get(PARSER_CONTEXT *ctx, ulong *nbytes)
uuid_le
parser_id_get(PARSER_CONTEXT *ctx)
{
- ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+ struct spar_controlvm_parameters_header *phdr = NULL;
if (ctx == NULL) {
ERRDRV("%s (%s:%d) - no context",
__func__, __FILE__, __LINE__);
return NULL_UUID_LE;
}
- phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
- return phdr->Id;
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+ return phdr->id;
}
void
parser_param_start(PARSER_CONTEXT *ctx, PARSER_WHICH_STRING which_string)
{
- ULTRA_CONTROLVM_PARAMETERS_HEADER *phdr = NULL;
+ struct spar_controlvm_parameters_header *phdr = NULL;
if (ctx == NULL) {
ERRDRV("%s (%s:%d) - no context",
__func__, __FILE__, __LINE__);
goto Away;
}
- phdr = (ULTRA_CONTROLVM_PARAMETERS_HEADER *) (ctx->data);
+ phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
switch (which_string) {
case PARSERSTRING_INITIATOR:
- ctx->curr = ctx->data + phdr->InitiatorOffset;
- ctx->bytes_remaining = phdr->InitiatorLength;
+ ctx->curr = ctx->data + phdr->initiator_offset;
+ ctx->bytes_remaining = phdr->initiator_length;
break;
case PARSERSTRING_TARGET:
- ctx->curr = ctx->data + phdr->TargetOffset;
- ctx->bytes_remaining = phdr->TargetLength;
+ ctx->curr = ctx->data + phdr->target_offset;
+ ctx->bytes_remaining = phdr->target_length;
break;
case PARSERSTRING_CONNECTION:
- ctx->curr = ctx->data + phdr->ConnectionOffset;
- ctx->bytes_remaining = phdr->ConnectionLength;
+ ctx->curr = ctx->data + phdr->connection_offset;
+ ctx->bytes_remaining = phdr->connection_length;
break;
case PARSERSTRING_NAME:
- ctx->curr = ctx->data + phdr->NameOffset;
- ctx->bytes_remaining = phdr->NameLength;
+ ctx->curr = ctx->data + phdr->name_offset;
+ ctx->bytes_remaining = phdr->name_length;
break;
default:
ERRDRV("%s - bad which_string %d", __func__, which_string);
diff --git a/drivers/staging/unisys/visorchipset/testing.h b/drivers/staging/unisys/visorchipset/testing.h
index 015d502cbb16..573aa8b5ba6a 100644
--- a/drivers/staging/unisys/visorchipset/testing.h
+++ b/drivers/staging/unisys/visorchipset/testing.h
@@ -23,8 +23,9 @@
#include "globals.h"
#include "controlvmchannel.h"
-void test_produce_test_message(CONTROLVM_MESSAGE *msg, int isLocalTestAddr);
-BOOL test_consume_test_message(CONTROLVM_MESSAGE *msg);
+void test_produce_test_message(struct controlvm_message *msg,
+ int isLocalTestAddr);
+BOOL test_consume_test_message(struct controlvm_message *msg);
void test_manufacture_vnic_client_add(void *p);
void test_manufacture_vnic_client_add_phys(HOSTADDRESS addr);
void test_manufacture_preamble_messages(void);
diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
index 2bf2e2f368ef..46dad63fa2c8 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset.h
+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
@@ -31,85 +31,85 @@
/** Describes the state from the perspective of which controlvm messages have
* been received for a bus or device.
*/
-typedef struct {
+struct visorchipset_state {
u32 created:1;
u32 attached:1;
u32 configured:1;
u32 running:1;
/* Add new fields above. */
/* Remaining bits in this 32-bit word are unused. */
-} VISORCHIPSET_STATE;
+};
-typedef enum {
+enum visorchipset_addresstype {
/** address is guest physical, but outside of the physical memory
* region that is controlled by the running OS (this is the normal
* address type for Supervisor channels)
*/
- ADDRTYPE_localPhysical,
+ ADDRTYPE_LOCALPHYSICAL,
/** address is guest physical, and withIN the confines of the
* physical memory controlled by the running OS.
*/
- ADDRTYPE_localTest,
-} VISORCHIPSET_ADDRESSTYPE;
+ ADDRTYPE_LOCALTEST,
+};
-typedef enum {
- CRASH_dev,
- CRASH_bus,
-} CRASH_OBJ_TYPE;
+enum crash_obj_type {
+ CRASH_DEV,
+ CRASH_BUS,
+};
/** Attributes for a particular Supervisor channel.
*/
-typedef struct {
- VISORCHIPSET_ADDRESSTYPE addrType;
- HOSTADDRESS channelAddr;
- struct InterruptInfo intr;
- u64 nChannelBytes;
- uuid_le channelTypeGuid;
- uuid_le channelInstGuid;
+struct visorchipset_channel_info {
+ enum visorchipset_addresstype addr_type;
+ HOSTADDRESS channel_addr;
+ struct irq_info intr;
+ u64 n_channel_bytes;
+ uuid_le channel_type_uuid;
+ uuid_le channel_inst_uuid;
-} VISORCHIPSET_CHANNEL_INFO;
+};
/** Attributes for a particular Supervisor device.
* Any visorchipset client can query these attributes using
* visorchipset_get_client_device_info() or
* visorchipset_get_server_device_info().
*/
-typedef struct {
+struct visorchipset_device_info {
struct list_head entry;
- u32 busNo;
- u32 devNo;
- uuid_le devInstGuid;
- VISORCHIPSET_STATE state;
- VISORCHIPSET_CHANNEL_INFO chanInfo;
- u32 Reserved1; /* CONTROLVM_ID */
- u64 Reserved2;
- u32 switchNo; /* when devState.attached==1 */
- u32 internalPortNo; /* when devState.attached==1 */
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr; /* CONTROLVM_MESSAGE */
+ u32 bus_no;
+ u32 dev_no;
+ uuid_le dev_inst_uuid;
+ struct visorchipset_state state;
+ struct visorchipset_channel_info chan_info;
+ u32 reserved1; /* control_vm_id */
+ u64 reserved2;
+ u32 switch_no; /* when devState.attached==1 */
+ u32 internal_port_no; /* when devState.attached==1 */
+ struct controlvm_message_header pending_msg_hdr;/* CONTROLVM_MESSAGE */
/** For private use by the bus driver */
void *bus_driver_context;
-} VISORCHIPSET_DEVICE_INFO;
+};
-static inline VISORCHIPSET_DEVICE_INFO *
-finddevice(struct list_head *list, u32 busNo, u32 devNo)
+static inline struct visorchipset_device_info *finddevice(
+ struct list_head *list, u32 bus_no, u32 dev_no)
{
- VISORCHIPSET_DEVICE_INFO *p;
+ struct visorchipset_device_info *p;
list_for_each_entry(p, list, entry) {
- if (p->busNo == busNo && p->devNo == devNo)
+ if (p->bus_no == bus_no && p->dev_no == dev_no)
return p;
}
return NULL;
}
-static inline void delbusdevices(struct list_head *list, u32 busNo)
+static inline void delbusdevices(struct list_head *list, u32 bus_no)
{
- VISORCHIPSET_DEVICE_INFO *p, *tmp;
+ struct visorchipset_device_info *p, *tmp;
list_for_each_entry_safe(p, tmp, list, entry) {
- if (p->busNo == busNo) {
+ if (p->bus_no == bus_no) {
list_del(&p->entry);
kfree(p);
}
@@ -122,37 +122,37 @@ static inline void delbusdevices(struct list_head *list, u32 busNo)
* Any visorchipset client can query these attributes using
* visorchipset_get_client_bus_info() or visorchipset_get_bus_info().
*/
-typedef struct {
+struct visorchipset_bus_info {
struct list_head entry;
- u32 busNo;
- VISORCHIPSET_STATE state;
- VISORCHIPSET_CHANNEL_INFO chanInfo;
- uuid_le partitionGuid;
- u64 partitionHandle;
+ u32 bus_no;
+ struct visorchipset_state state;
+ struct visorchipset_channel_info chan_info;
+ uuid_le partition_uuid;
+ u64 partition_handle;
u8 *name; /* UTF8 */
u8 *description; /* UTF8 */
- u64 Reserved1;
- u32 Reserved2;
- MYPROCOBJECT *procObject;
+ u64 reserved1;
+ u32 reserved2;
+ MYPROCOBJECT *proc_object;
struct {
u32 server:1;
/* Add new fields above. */
/* Remaining bits in this 32-bit word are unused. */
} flags;
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr; /* CONTROLVM MsgHdr */
+ struct controlvm_message_header pending_msg_hdr;/* CONTROLVM MsgHdr */
/** For private use by the bus driver */
void *bus_driver_context;
- u64 devNo;
+ u64 dev_no;
-} VISORCHIPSET_BUS_INFO;
+};
-static inline VISORCHIPSET_BUS_INFO *
-findbus(struct list_head *list, u32 busNo)
+static inline struct visorchipset_bus_info *
+findbus(struct list_head *list, u32 bus_no)
{
- VISORCHIPSET_BUS_INFO *p;
+ struct visorchipset_bus_info *p;
list_for_each_entry(p, list, entry) {
- if (p->busNo == busNo)
+ if (p->bus_no == bus_no)
return p;
}
return NULL;
@@ -160,75 +160,73 @@ findbus(struct list_head *list, u32 busNo)
/** Attributes for a particular Supervisor switch.
*/
-typedef struct {
- u32 switchNo;
- VISORCHIPSET_STATE state;
- uuid_le switchTypeGuid;
- u8 *authService1;
- u8 *authService2;
- u8 *authService3;
- u8 *securityContext;
- u64 Reserved;
- u32 Reserved2; /* CONTROLVM_ID */
+struct visorchipset_switch_info {
+ u32 switch_no;
+ struct visorchipset_state state;
+ uuid_le switch_type_uuid;
+ u8 *authservice1;
+ u8 *authservice2;
+ u8 *authservice3;
+ u8 *security_context;
+ u64 reserved;
+ u32 reserved2; /* control_vm_id */
struct device dev;
BOOL dev_exists;
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
-
-} VISORCHIPSET_SWITCH_INFO;
+ struct controlvm_message_header pending_msg_hdr;
+};
/** Attributes for a particular Supervisor external port, which is connected
* to a specific switch.
*/
-typedef struct {
- u32 switchNo;
- u32 externalPortNo;
- VISORCHIPSET_STATE state;
- uuid_le networkZoneGuid;
- int pdPort;
+struct visorchipset_externalport_info {
+ u32 switch_no;
+ u32 external_port_no;
+ struct visorchipset_state state;
+ uuid_le network_zone_uuid;
+ int pd_port;
u8 *ip;
- u8 *ipNetmask;
- u8 *ipBroadcast;
- u8 *ipNetwork;
- u8 *ipGateway;
- u8 *ipDNS;
- u64 Reserved1;
- u32 Reserved2; /* CONTROLVM_ID */
+ u8 *ip_netmask;
+ u8 *ip_broadcast;
+ u8 *ip_network;
+ u8 *ip_gateway;
+ u8 *ip_dns;
+ u64 reserved1;
+ u32 reserved2; /* control_vm_id */
struct device dev;
BOOL dev_exists;
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
-
-} VISORCHIPSET_EXTERNALPORT_INFO;
+ struct controlvm_message_header pending_msg_hdr;
+};
/** Attributes for a particular Supervisor internal port, which is how a
* device connects to a particular switch.
*/
-typedef struct {
- u32 switchNo;
- u32 internalPortNo;
- VISORCHIPSET_STATE state;
- u32 busNo; /* valid only when state.attached == 1 */
- u32 devNo; /* valid only when state.attached == 1 */
- u64 Reserved1;
- u32 Reserved2; /* CONTROLVM_ID */
- CONTROLVM_MESSAGE_HEADER pendingMsgHdr;
- MYPROCOBJECT *procObject;
-
-} VISORCHIPSET_INTERNALPORT_INFO;
+struct visorchipset_internalport_info {
+ u32 switch_no;
+ u32 internal_port_no;
+ struct visorchipset_state state;
+ u32 bus_no; /* valid only when state.attached == 1 */
+ u32 dev_no; /* valid only when state.attached == 1 */
+ u64 reserved1;
+ u32 reserved2; /* CONTROLVM_ID */
+ struct controlvm_message_header pending_msg_hdr;
+ MYPROCOBJECT *proc_object;
+
+};
/* These functions will be called from within visorchipset when certain
* events happen. (The implementation of these functions is outside of
* visorchipset.)
*/
-typedef struct {
- void (*bus_create)(ulong busNo);
- void (*bus_destroy)(ulong busNo);
- void (*device_create)(ulong busNo, ulong devNo);
- void (*device_destroy)(ulong busNo, ulong devNo);
- void (*device_pause)(ulong busNo, ulong devNo);
- void (*device_resume)(ulong busNo, ulong devNo);
- int (*get_channel_info)(uuid_le typeGuid, ulong *minSize,
- ulong *maxSize);
-} VISORCHIPSET_BUSDEV_NOTIFIERS;
+struct visorchipset_busdev_notifiers {
+ void (*bus_create)(ulong bus_no);
+ void (*bus_destroy)(ulong bus_no);
+ void (*device_create)(ulong bus_no, ulong dev_no);
+ void (*device_destroy)(ulong bus_no, ulong dev_no);
+ void (*device_pause)(ulong bus_no, ulong dev_no);
+ void (*device_resume)(ulong bus_no, ulong dev_no);
+ int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
+ ulong *max_size);
+};
/* These functions live inside visorchipset, and will be called to indicate
* responses to specific events (by code outside of visorchipset).
@@ -236,14 +234,14 @@ typedef struct {
* 0 = it worked
* -1 = it failed
*/
-typedef struct {
- void (*bus_create)(ulong busNo, int response);
- void (*bus_destroy)(ulong busNo, int response);
- void (*device_create)(ulong busNo, ulong devNo, int response);
- void (*device_destroy)(ulong busNo, ulong devNo, int response);
- void (*device_pause)(ulong busNo, ulong devNo, int response);
- void (*device_resume)(ulong busNo, ulong devNo, int response);
-} VISORCHIPSET_BUSDEV_RESPONDERS;
+struct visorchipset_busdev_responders {
+ void (*bus_create)(ulong bus_no, int response);
+ void (*bus_destroy)(ulong bus_no, int response);
+ void (*device_create)(ulong bus_no, ulong dev_no, int response);
+ void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
+ void (*device_pause)(ulong bus_no, ulong dev_no, int response);
+ void (*device_resume)(ulong bus_no, ulong dev_no, int response);
+};
/** Register functions (in the bus driver) to get called by visorchipset
* whenever a bus or device appears for which this service partition is
@@ -252,9 +250,10 @@ typedef struct {
* responses.
*/
void
-visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
- VISORCHIPSET_BUSDEV_RESPONDERS *responders,
- ULTRA_VBUS_DEVICEINFO *driverInfo);
+visorchipset_register_busdev_client(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info);
/** Register functions (in the bus driver) to get called by visorchipset
* whenever a bus or device appears for which this service partition is
@@ -263,47 +262,31 @@ visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
* responses.
*/
void
-visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
- VISORCHIPSET_BUSDEV_RESPONDERS *responders,
- ULTRA_VBUS_DEVICEINFO *driverInfo);
+visorchipset_register_busdev_server(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info);
-typedef void (*SPARREPORTEVENT_COMPLETE_FUNC) (CONTROLVM_MESSAGE *msg,
+typedef void (*SPARREPORTEVENT_COMPLETE_FUNC) (struct controlvm_message *msg,
int status);
-void visorchipset_device_pause_response(ulong busNo, ulong devNo, int response);
+void visorchipset_device_pause_response(ulong bus_no, ulong dev_no,
+ int response);
-BOOL visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo);
-BOOL visorchipset_get_device_info(ulong busNo, ulong devNo,
- VISORCHIPSET_DEVICE_INFO *devInfo);
-BOOL visorchipset_get_switch_info(ulong switchNo,
- VISORCHIPSET_SWITCH_INFO *switchInfo);
-BOOL visorchipset_get_externalport_info(ulong switchNo, ulong externalPortNo,
- VISORCHIPSET_EXTERNALPORT_INFO
- *externalPortInfo);
-BOOL visorchipset_set_bus_context(ulong busNo, void *context);
-BOOL visorchipset_set_device_context(ulong busNo, ulong devNo, void *context);
+BOOL visorchipset_get_bus_info(ulong bus_no,
+ struct visorchipset_bus_info *bus_info);
+BOOL visorchipset_get_device_info(ulong bus_no, ulong dev_no,
+ struct visorchipset_device_info *dev_info);
+BOOL visorchipset_set_bus_context(ulong bus_no, void *context);
+BOOL visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context);
int visorchipset_chipset_ready(void);
int visorchipset_chipset_selftest(void);
int visorchipset_chipset_notready(void);
-void visorchipset_controlvm_respond_reportEvent(CONTROLVM_MESSAGE *msg,
- void *payload);
-void visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type);
+void visorchipset_save_message(struct controlvm_message *msg,
+ enum crash_obj_type type);
void *visorchipset_cache_alloc(struct kmem_cache *pool,
BOOL ok_to_block, char *fn, int ln);
void visorchipset_cache_free(struct kmem_cache *pool, void *p,
char *fn, int ln);
-#if defined(TRANSMITFILE_DEBUG) || defined(DEBUG)
-#define DBG_GETFILE_PAYLOAD(msg, controlvm_header) \
- LOGINF(msg, \
- (ulong)controlvm_header.PayloadVmOffset, \
- (ulong)controlvm_header.PayloadMaxBytes)
-#define DBG_GETFILE(fmt, ...) LOGINF(fmt, ##__VA_ARGS__)
-#define DBG_PUTFILE(fmt, ...) LOGINF(fmt, ##__VA_ARGS__)
-#else
-#define DBG_GETFILE_PAYLOAD(msg, controlvm_header)
-#define DBG_GETFILE(fmt, ...)
-#define DBG_PUTFILE(fmt, ...)
-#endif
-
#endif
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_main.c b/drivers/staging/unisys/visorchipset/visorchipset_main.c
index e5df39554a1a..7e6be32cf7bb 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset_main.c
+++ b/drivers/staging/unisys/visorchipset/visorchipset_main.c
@@ -72,26 +72,28 @@ static struct workqueue_struct *Periodic_controlvm_workqueue;
static DEFINE_SEMAPHORE(NotifierLock);
typedef struct {
- CONTROLVM_MESSAGE message;
+ struct controlvm_message message;
unsigned int crc;
} MESSAGE_ENVELOPE;
-static CONTROLVM_MESSAGE_HEADER g_DiagMsgHdr;
-static CONTROLVM_MESSAGE_HEADER g_ChipSetMsgHdr;
-static CONTROLVM_MESSAGE_HEADER g_DelDumpMsgHdr;
+static struct controlvm_message_header g_DiagMsgHdr;
+static struct controlvm_message_header g_ChipSetMsgHdr;
+static struct controlvm_message_header g_DelDumpMsgHdr;
static const uuid_le UltraDiagPoolChannelProtocolGuid =
- ULTRA_DIAG_POOL_CHANNEL_PROTOCOL_GUID;
+ SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
/* 0xffffff is an invalid Bus/Device number */
static ulong g_diagpoolBusNo = 0xffffff;
static ulong g_diagpoolDevNo = 0xffffff;
-static CONTROLVM_MESSAGE_PACKET g_DeviceChangeStatePacket;
+static struct controlvm_message_packet g_DeviceChangeStatePacket;
/* Only VNIC and VHBA channels are sent to visorclientbus (aka
* "visorhackbus")
*/
#define FOR_VISORHACKBUS(channel_type_guid) \
- (((uuid_le_cmp(channel_type_guid, UltraVnicChannelProtocolGuid) == 0)\
- || (uuid_le_cmp(channel_type_guid, UltraVhbaChannelProtocolGuid) == 0)))
+ (((uuid_le_cmp(channel_type_guid,\
+ spar_vnic_channel_protocol_uuid) == 0)\
+ || (uuid_le_cmp(channel_type_guid,\
+ spar_vhba_channel_protocol_uuid) == 0)))
#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
#define is_diagpool_channel(channel_type_guid) \
@@ -112,12 +114,12 @@ typedef struct {
/* Manages the request payload in the controlvm channel */
static CONTROLVM_PAYLOAD_INFO ControlVm_payload_info;
-static pCHANNEL_HEADER Test_Vnic_channel;
+static struct channel_header *Test_Vnic_channel;
typedef struct {
- CONTROLVM_MESSAGE_HEADER Dumpcapture_header;
- CONTROLVM_MESSAGE_HEADER Gettextdump_header;
- CONTROLVM_MESSAGE_HEADER Dumpcomplete_header;
+ struct controlvm_message_header Dumpcapture_header;
+ struct controlvm_message_header Gettextdump_header;
+ struct controlvm_message_header Dumpcomplete_header;
BOOL Gettextdump_outstanding;
u32 crc32;
ulong length;
@@ -134,7 +136,7 @@ static LIVEDUMP_INFO LiveDump_info;
* this scenario, we simply stash the controlvm message, then attempt to
* process it again the next time controlvm_periodic_work() runs.
*/
-static CONTROLVM_MESSAGE ControlVm_Pending_Msg;
+static struct controlvm_message ControlVm_Pending_Msg;
static BOOL ControlVm_Pending_Msg_Valid = FALSE;
/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
@@ -180,7 +182,7 @@ struct putfile_request {
u64 sig; /* PUTFILE_REQUEST_SIG */
/* header from original TransmitFile request */
- CONTROLVM_MESSAGE_HEADER controlvm_header;
+ struct controlvm_message_header controlvm_header;
u64 file_request_number; /* from original TransmitFile request */
/* link to next struct putfile_request */
@@ -218,7 +220,7 @@ struct parahotplug_request {
struct list_head list;
int id;
unsigned long expiration;
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message msg;
};
static LIST_HEAD(Parahotplug_request_list);
@@ -228,8 +230,8 @@ static void parahotplug_process_list(void);
/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
* CONTROLVM_REPORTEVENT.
*/
-static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Server_Notifiers;
-static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Client_Notifiers;
+static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
+static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
static void bus_create_response(ulong busNo, int response);
static void bus_destroy_response(ulong busNo, int response);
@@ -237,7 +239,7 @@ static void device_create_response(ulong busNo, ulong devNo, int response);
static void device_destroy_response(ulong busNo, ulong devNo, int response);
static void device_resume_response(ulong busNo, ulong devNo, int response);
-static VISORCHIPSET_BUSDEV_RESPONDERS BusDev_Responders = {
+static struct visorchipset_busdev_responders BusDev_Responders = {
.bus_create = bus_create_response,
.bus_destroy = bus_destroy_response,
.device_create = device_create_response,
@@ -342,13 +344,14 @@ static struct platform_device Visorchipset_platform_device = {
};
/* Function prototypes */
-static void controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response);
-static void controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr,
- int response,
- ULTRA_CHIPSET_FEATURE features);
-static void controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *
- msgHdr, int response,
- ULTRA_SEGMENT_STATE state);
+static void controlvm_respond(struct controlvm_message_header *msgHdr,
+ int response);
+static void controlvm_respond_chipset_init(
+ struct controlvm_message_header *msgHdr, int response,
+ enum ultra_chipset_feature features);
+static void controlvm_respond_physdev_changestate(
+ struct controlvm_message_header *msgHdr, int response,
+ struct spar_segment_state state);
static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
@@ -357,8 +360,8 @@ static ssize_t toolaction_show(struct device *dev,
u8 toolAction;
visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- ToolAction), &toolAction, sizeof(u8));
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action), &toolAction, sizeof(u8));
return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
}
@@ -373,7 +376,7 @@ static ssize_t toolaction_store(struct device *dev,
return -EINVAL;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ToolAction),
+ offsetof(struct spar_controlvm_channel_protocol, tool_action),
&toolAction, sizeof(u8));
if (ret)
@@ -385,14 +388,14 @@ static ssize_t boottotool_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- ULTRA_EFI_SPAR_INDICATION efiSparIndication;
+ struct efi_spar_indication efiSparIndication;
visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- EfiSparIndication), &efiSparIndication,
- sizeof(ULTRA_EFI_SPAR_INDICATION));
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind), &efiSparIndication,
+ sizeof(struct efi_spar_indication));
return scnprintf(buf, PAGE_SIZE, "%u\n",
- efiSparIndication.BootToTool);
+ efiSparIndication.boot_to_tool);
}
static ssize_t boottotool_store(struct device *dev,
@@ -400,17 +403,17 @@ static ssize_t boottotool_store(struct device *dev,
const char *buf, size_t count)
{
int val, ret;
- ULTRA_EFI_SPAR_INDICATION efiSparIndication;
+ struct efi_spar_indication efiSparIndication;
if (kstrtoint(buf, 10, &val) != 0)
return -EINVAL;
- efiSparIndication.BootToTool = val;
+ efiSparIndication.boot_to_tool = val;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- EfiSparIndication),
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind),
&(efiSparIndication),
- sizeof(ULTRA_EFI_SPAR_INDICATION));
+ sizeof(struct efi_spar_indication));
if (ret)
return ret;
@@ -423,7 +426,7 @@ static ssize_t error_show(struct device *dev, struct device_attribute *attr,
u32 error;
visorchannel_read(ControlVm_channel, offsetof(
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL, InstallationError),
+ struct spar_controlvm_channel_protocol, installation_error),
&error, sizeof(u32));
return scnprintf(buf, PAGE_SIZE, "%i\n", error);
}
@@ -438,8 +441,8 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- InstallationError),
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
&error, sizeof(u32));
if (ret)
return ret;
@@ -452,7 +455,7 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
u32 textId;
visorchannel_read(ControlVm_channel, offsetof(
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL, InstallationTextId),
+ struct spar_controlvm_channel_protocol, installation_text_id),
&textId, sizeof(u32));
return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
}
@@ -467,8 +470,8 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- InstallationTextId),
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
&textId, sizeof(u32));
if (ret)
return ret;
@@ -482,8 +485,8 @@ static ssize_t remaining_steps_show(struct device *dev,
u16 remainingSteps;
visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- InstallationRemainingSteps),
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
&remainingSteps,
sizeof(u16));
return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
@@ -499,8 +502,8 @@ static ssize_t remaining_steps_store(struct device *dev,
return -EINVAL;
ret = visorchannel_write(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- InstallationRemainingSteps),
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
&remainingSteps, sizeof(u16));
if (ret)
return ret;
@@ -539,11 +542,11 @@ testUnicode(void)
static void
busInfo_clear(void *v)
{
- VISORCHIPSET_BUS_INFO *p = (VISORCHIPSET_BUS_INFO *) (v);
+ struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
- if (p->procObject) {
- visor_proc_DestroyObject(p->procObject);
- p->procObject = NULL;
+ if (p->proc_object) {
+ visor_proc_DestroyObject(p->proc_object);
+ p->proc_object = NULL;
}
kfree(p->name);
p->name = NULL;
@@ -552,16 +555,17 @@ busInfo_clear(void *v)
p->description = NULL;
p->state.created = 0;
- memset(p, 0, sizeof(VISORCHIPSET_BUS_INFO));
+ memset(p, 0, sizeof(struct visorchipset_bus_info));
}
static void
devInfo_clear(void *v)
{
- VISORCHIPSET_DEVICE_INFO *p = (VISORCHIPSET_DEVICE_INFO *) (v);
+ struct visorchipset_device_info *p =
+ (struct visorchipset_device_info *)(v);
p->state.created = 0;
- memset(p, 0, sizeof(VISORCHIPSET_DEVICE_INFO));
+ memset(p, 0, sizeof(struct visorchipset_device_info));
}
static u8
@@ -585,9 +589,10 @@ clear_chipset_events(void)
}
void
-visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
- VISORCHIPSET_BUSDEV_RESPONDERS *responders,
- ULTRA_VBUS_DEVICEINFO *driverInfo)
+visorchipset_register_busdev_server(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info)
{
down(&NotifierLock);
if (notifiers == NULL) {
@@ -600,8 +605,8 @@ visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
}
if (responders)
*responders = BusDev_Responders;
- if (driverInfo)
- bus_device_info_init(driverInfo, "chipset", "visorchipset",
+ if (driver_info)
+ bus_device_info_init(driver_info, "chipset", "visorchipset",
VERSION, NULL);
up(&NotifierLock);
@@ -609,9 +614,10 @@ visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
void
-visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
- VISORCHIPSET_BUSDEV_RESPONDERS *responders,
- ULTRA_VBUS_DEVICEINFO *driverInfo)
+visorchipset_register_busdev_client(
+ struct visorchipset_busdev_notifiers *notifiers,
+ struct visorchipset_busdev_responders *responders,
+ struct ultra_vbus_deviceinfo *driver_info)
{
down(&NotifierLock);
if (notifiers == NULL) {
@@ -624,9 +630,9 @@ visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
}
if (responders)
*responders = BusDev_Responders;
- if (driverInfo)
- bus_device_info_init(driverInfo, "chipset(bolts)", "visorchipset",
- VERSION, NULL);
+ if (driver_info)
+ bus_device_info_init(driver_info, "chipset(bolts)",
+ "visorchipset", VERSION, NULL);
up(&NotifierLock);
}
EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
@@ -634,8 +640,8 @@ EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
static void
cleanup_controlvm_structures(void)
{
- VISORCHIPSET_BUS_INFO *bi, *tmp_bi;
- VISORCHIPSET_DEVICE_INFO *di, *tmp_di;
+ struct visorchipset_bus_info *bi, *tmp_bi;
+ struct visorchipset_device_info *di, *tmp_di;
list_for_each_entry_safe(bi, tmp_bi, &BusInfoList, entry) {
busInfo_clear(bi);
@@ -651,10 +657,10 @@ cleanup_controlvm_structures(void)
}
static void
-chipset_init(CONTROLVM_MESSAGE *inmsg)
+chipset_init(struct controlvm_message *inmsg)
{
static int chipset_inited;
- ULTRA_CHIPSET_FEATURE features = 0;
+ enum ultra_chipset_feature features = 0;
int rc = CONTROLVM_RESP_SUCCESS;
POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
@@ -669,7 +675,7 @@ chipset_init(CONTROLVM_MESSAGE *inmsg)
/* Set features to indicate we support parahotplug (if Command
* also supports it). */
features =
- inmsg->cmd.initChipset.
+ inmsg->cmd.init_chipset.
features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
/* Set the "reply" bit so Command knows this is a
@@ -679,42 +685,42 @@ chipset_init(CONTROLVM_MESSAGE *inmsg)
Away:
if (rc < 0)
cleanup_controlvm_structures();
- if (inmsg->hdr.Flags.responseExpected)
+ if (inmsg->hdr.flags.response_expected)
controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
}
static void
-controlvm_init_response(CONTROLVM_MESSAGE *msg,
- CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
-{
- memset(msg, 0, sizeof(CONTROLVM_MESSAGE));
- memcpy(&msg->hdr, msgHdr, sizeof(CONTROLVM_MESSAGE_HEADER));
- msg->hdr.PayloadBytes = 0;
- msg->hdr.PayloadVmOffset = 0;
- msg->hdr.PayloadMaxBytes = 0;
+controlvm_init_response(struct controlvm_message *msg,
+ struct controlvm_message_header *msgHdr, int response)
+{
+ memset(msg, 0, sizeof(struct controlvm_message));
+ memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
+ msg->hdr.payload_bytes = 0;
+ msg->hdr.payload_vm_offset = 0;
+ msg->hdr.payload_max_bytes = 0;
if (response < 0) {
- msg->hdr.Flags.failed = 1;
- msg->hdr.CompletionStatus = (u32) (-response);
+ msg->hdr.flags.failed = 1;
+ msg->hdr.completion_status = (u32) (-response);
}
}
static void
-controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
+controlvm_respond(struct controlvm_message_header *msgHdr, int response)
{
- CONTROLVM_MESSAGE outmsg;
+ struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msgHdr, response);
/* For DiagPool channel DEVICE_CHANGESTATE, we need to send
* back the deviceChangeState structure in the packet. */
- if (msgHdr->Id == CONTROLVM_DEVICE_CHANGESTATE
- && g_DeviceChangeStatePacket.deviceChangeState.busNo ==
+ if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE
+ && g_DeviceChangeStatePacket.device_change_state.bus_no ==
g_diagpoolBusNo
- && g_DeviceChangeStatePacket.deviceChangeState.devNo ==
+ && g_DeviceChangeStatePacket.device_change_state.dev_no ==
g_diagpoolDevNo)
outmsg.cmd = g_DeviceChangeStatePacket;
- if (outmsg.hdr.Flags.testMessage == 1) {
+ if (outmsg.hdr.flags.test_message == 1) {
LOGINF("%s controlvm_msg=0x%x response=%d for test message",
- __func__, outmsg.hdr.Id, response);
+ __func__, outmsg.hdr.id, response);
return;
}
if (!visorchannel_signalinsert(ControlVm_channel,
@@ -725,13 +731,14 @@ controlvm_respond(CONTROLVM_MESSAGE_HEADER *msgHdr, int response)
}
static void
-controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
- ULTRA_CHIPSET_FEATURE features)
+controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
+ int response,
+ enum ultra_chipset_feature features)
{
- CONTROLVM_MESSAGE outmsg;
+ struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msgHdr, response);
- outmsg.cmd.initChipset.features = features;
+ outmsg.cmd.init_chipset.features = features;
if (!visorchannel_signalinsert(ControlVm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg)) {
LOGERR("signalinsert failed!");
@@ -739,15 +746,15 @@ controlvm_respond_chipset_init(CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
}
}
-static void
-controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *msgHdr,
- int response, ULTRA_SEGMENT_STATE state)
+static void controlvm_respond_physdev_changestate(
+ struct controlvm_message_header *msgHdr, int response,
+ struct spar_segment_state state)
{
- CONTROLVM_MESSAGE outmsg;
+ struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msgHdr, response);
- outmsg.cmd.deviceChangeState.state = state;
- outmsg.cmd.deviceChangeState.flags.physicalDevice = 1;
+ outmsg.cmd.device_change_state.state = state;
+ outmsg.cmd.device_change_state.flags.phys_device = 1;
if (!visorchannel_signalinsert(ControlVm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg)) {
LOGERR("signalinsert failed!");
@@ -756,15 +763,16 @@ controlvm_respond_physdev_changestate(CONTROLVM_MESSAGE_HEADER *msgHdr,
}
void
-visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
+visorchipset_save_message(struct controlvm_message *msg,
+ enum crash_obj_type type)
{
u32 localSavedCrashMsgOffset;
u16 localSavedCrashMsgCount;
/* get saved message count */
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- SavedCrashMsgCount),
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_count),
&localSavedCrashMsgCount, sizeof(u16)) < 0) {
LOGERR("failed to get Saved Message Count");
POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
@@ -783,8 +791,8 @@ visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
/* get saved crash message offset */
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- SavedCrashMsgOffset),
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_offset),
&localSavedCrashMsgOffset, sizeof(u32)) < 0) {
LOGERR("failed to get Saved Message Offset");
POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
@@ -792,10 +800,11 @@ visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
return;
}
- if (type == CRASH_bus) {
+ if (type == CRASH_BUS) {
if (visorchannel_write(ControlVm_channel,
localSavedCrashMsgOffset,
- msg, sizeof(CONTROLVM_MESSAGE)) < 0) {
+ msg,
+ sizeof(struct controlvm_message)) < 0) {
LOGERR("SAVE_MSG_BUS_FAILURE: Failed to write CrashCreateBusMsg!");
POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
POSTCODE_SEVERITY_ERR);
@@ -804,8 +813,8 @@ visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
} else {
if (visorchannel_write(ControlVm_channel,
localSavedCrashMsgOffset +
- sizeof(CONTROLVM_MESSAGE), msg,
- sizeof(CONTROLVM_MESSAGE)) < 0) {
+ sizeof(struct controlvm_message), msg,
+ sizeof(struct controlvm_message)) < 0) {
LOGERR("SAVE_MSG_DEV_FAILURE: Failed to write CrashCreateDevMsg!");
POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
POSTCODE_SEVERITY_ERR);
@@ -816,9 +825,9 @@ visorchipset_save_message(CONTROLVM_MESSAGE *msg, CRASH_OBJ_TYPE type)
EXPORT_SYMBOL_GPL(visorchipset_save_message);
static void
-bus_responder(CONTROLVM_ID cmdId, ulong busNo, int response)
+bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
{
- VISORCHIPSET_BUS_INFO *p = NULL;
+ struct visorchipset_bus_info *p = NULL;
BOOL need_clear = FALSE;
p = findbus(&BusInfoList, busNo);
@@ -838,16 +847,16 @@ bus_responder(CONTROLVM_ID cmdId, ulong busNo, int response)
need_clear = TRUE;
}
- if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
LOGERR("bus_responder no pending msg");
return; /* no controlvm response needed */
}
- if (p->pendingMsgHdr.Id != (u32) cmdId) {
- LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ if (p->pending_msg_hdr.id != (u32) cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
return;
}
- controlvm_respond(&p->pendingMsgHdr, response);
- p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ controlvm_respond(&p->pending_msg_hdr, response);
+ p->pending_msg_hdr.id = CONTROLVM_INVALID;
if (need_clear) {
busInfo_clear(p);
delbusdevices(&DevInfoList, busNo);
@@ -855,32 +864,32 @@ bus_responder(CONTROLVM_ID cmdId, ulong busNo, int response)
}
static void
-device_changestate_responder(CONTROLVM_ID cmdId,
+device_changestate_responder(enum controlvm_id cmdId,
ulong busNo, ulong devNo, int response,
- ULTRA_SEGMENT_STATE responseState)
+ struct spar_segment_state responseState)
{
- VISORCHIPSET_DEVICE_INFO *p = NULL;
- CONTROLVM_MESSAGE outmsg;
+ struct visorchipset_device_info *p = NULL;
+ struct controlvm_message outmsg;
p = finddevice(&DevInfoList, busNo, devNo);
if (!p) {
LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
return;
}
- if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
LOGERR("device_responder no pending msg");
return; /* no controlvm response needed */
}
- if (p->pendingMsgHdr.Id != cmdId) {
- LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ if (p->pending_msg_hdr.id != cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
return;
}
- controlvm_init_response(&outmsg, &p->pendingMsgHdr, response);
+ controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
- outmsg.cmd.deviceChangeState.busNo = busNo;
- outmsg.cmd.deviceChangeState.devNo = devNo;
- outmsg.cmd.deviceChangeState.state = responseState;
+ outmsg.cmd.device_change_state.bus_no = busNo;
+ outmsg.cmd.device_change_state.dev_no = devNo;
+ outmsg.cmd.device_change_state.state = responseState;
if (!visorchannel_signalinsert(ControlVm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg)) {
@@ -888,13 +897,14 @@ device_changestate_responder(CONTROLVM_ID cmdId,
return;
}
- p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ p->pending_msg_hdr.id = CONTROLVM_INVALID;
}
static void
-device_responder(CONTROLVM_ID cmdId, ulong busNo, ulong devNo, int response)
+device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
+ int response)
{
- VISORCHIPSET_DEVICE_INFO *p = NULL;
+ struct visorchipset_device_info *p = NULL;
BOOL need_clear = FALSE;
p = finddevice(&DevInfoList, busNo, devNo);
@@ -909,38 +919,38 @@ device_responder(CONTROLVM_ID cmdId, ulong busNo, ulong devNo, int response)
need_clear = TRUE;
}
- if (p->pendingMsgHdr.Id == CONTROLVM_INVALID) {
+ if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
LOGERR("device_responder no pending msg");
return; /* no controlvm response needed */
}
- if (p->pendingMsgHdr.Id != (u32) cmdId) {
- LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.Id);
+ if (p->pending_msg_hdr.id != (u32) cmdId) {
+ LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
return;
}
- controlvm_respond(&p->pendingMsgHdr, response);
- p->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ controlvm_respond(&p->pending_msg_hdr, response);
+ p->pending_msg_hdr.id = CONTROLVM_INVALID;
if (need_clear)
devInfo_clear(p);
}
static void
bus_epilog(u32 busNo,
- u32 cmd, CONTROLVM_MESSAGE_HEADER *msgHdr,
+ u32 cmd, struct controlvm_message_header *msgHdr,
int response, BOOL needResponse)
{
BOOL notified = FALSE;
- VISORCHIPSET_BUS_INFO *pBusInfo = findbus(&BusInfoList, busNo);
+ struct visorchipset_bus_info *pBusInfo = findbus(&BusInfoList, busNo);
if (!pBusInfo) {
LOGERR("HUH? bad busNo=%d", busNo);
return;
}
if (needResponse) {
- memcpy(&pBusInfo->pendingMsgHdr, msgHdr,
- sizeof(CONTROLVM_MESSAGE_HEADER));
+ memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
+ sizeof(struct controlvm_message_header));
} else
- pBusInfo->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
down(&NotifierLock);
if (response == CONTROLVM_RESP_SUCCESS) {
@@ -981,7 +991,7 @@ bus_epilog(u32 busNo,
}
if (notified)
/* The callback function just called above is responsible
- * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
+ * for calling the appropriate visorchipset_busdev_responders
* function, which will call bus_responder()
*/
;
@@ -991,14 +1001,14 @@ bus_epilog(u32 busNo,
}
static void
-device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
- CONTROLVM_MESSAGE_HEADER *msgHdr, int response,
+device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
+ struct controlvm_message_header *msgHdr, int response,
BOOL needResponse, BOOL for_visorbus)
{
- VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers = NULL;
+ struct visorchipset_busdev_notifiers *notifiers = NULL;
BOOL notified = FALSE;
- VISORCHIPSET_DEVICE_INFO *pDevInfo =
+ struct visorchipset_device_info *pDevInfo =
finddevice(&DevInfoList, busNo, devNo);
char *envp[] = {
"SPARSP_DIAGPOOL_PAUSED_STATE = 1",
@@ -1014,10 +1024,10 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
else
notifiers = &BusDev_Client_Notifiers;
if (needResponse) {
- memcpy(&pDevInfo->pendingMsgHdr, msgHdr,
- sizeof(CONTROLVM_MESSAGE_HEADER));
+ memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
+ sizeof(struct controlvm_message_header));
} else
- pDevInfo->pendingMsgHdr.Id = CONTROLVM_INVALID;
+ pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
down(&NotifierLock);
if (response >= 0) {
@@ -1030,8 +1040,9 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
break;
case CONTROLVM_DEVICE_CHANGESTATE:
/* ServerReady / ServerRunning / SegmentStateRunning */
- if (state.Alive == SegmentStateRunning.Alive &&
- state.Operating == SegmentStateRunning.Operating) {
+ if (state.alive == segment_state_running.alive &&
+ state.operating ==
+ segment_state_running.operating) {
if (notifiers->device_resume) {
(*notifiers->device_resume) (busNo,
devNo);
@@ -1039,9 +1050,9 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
}
}
/* ServerNotReady / ServerLost / SegmentStateStandby */
- else if (state.Alive == SegmentStateStandby.Alive &&
- state.Operating ==
- SegmentStateStandby.Operating) {
+ else if (state.alive == segment_state_standby.alive &&
+ state.operating ==
+ segment_state_standby.operating) {
/* technically this is standby case
* where server is lost
*/
@@ -1050,9 +1061,9 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
devNo);
notified = TRUE;
}
- } else if (state.Alive == SegmentStatePaused.Alive &&
- state.Operating ==
- SegmentStatePaused.Operating) {
+ } else if (state.alive == segment_state_paused.alive &&
+ state.operating ==
+ segment_state_paused.operating) {
/* this is lite pause where channel is
* still valid just 'pause' of it
*/
@@ -1079,7 +1090,7 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
}
if (notified)
/* The callback function just called above is responsible
- * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
+ * for calling the appropriate visorchipset_busdev_responders
* function, which will call device_responder()
*/
;
@@ -1089,12 +1100,12 @@ device_epilog(u32 busNo, u32 devNo, ULTRA_SEGMENT_STATE state, u32 cmd,
}
static void
-bus_create(CONTROLVM_MESSAGE *inmsg)
+bus_create(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->createBus.busNo;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->create_bus.bus_no;
int rc = CONTROLVM_RESP_SUCCESS;
- VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ struct visorchipset_bus_info *pBusInfo = NULL;
pBusInfo = findbus(&BusInfoList, busNo);
@@ -1106,7 +1117,7 @@ bus_create(CONTROLVM_MESSAGE *inmsg)
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
goto Away;
}
- pBusInfo = kzalloc(sizeof(VISORCHIPSET_BUS_INFO), GFP_KERNEL);
+ pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
if (pBusInfo == NULL) {
LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu kzalloc failed",
busNo);
@@ -1117,21 +1128,22 @@ bus_create(CONTROLVM_MESSAGE *inmsg)
}
INIT_LIST_HEAD(&pBusInfo->entry);
- pBusInfo->busNo = busNo;
- pBusInfo->devNo = cmd->createBus.deviceCount;
+ pBusInfo->bus_no = busNo;
+ pBusInfo->dev_no = cmd->create_bus.dev_count;
POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
- if (inmsg->hdr.Flags.testMessage == 1)
- pBusInfo->chanInfo.addrType = ADDRTYPE_localTest;
+ if (inmsg->hdr.flags.test_message == 1)
+ pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
else
- pBusInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
+ pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
- pBusInfo->flags.server = inmsg->hdr.Flags.server;
- pBusInfo->chanInfo.channelAddr = cmd->createBus.channelAddr;
- pBusInfo->chanInfo.nChannelBytes = cmd->createBus.channelBytes;
- pBusInfo->chanInfo.channelTypeGuid = cmd->createBus.busDataTypeGuid;
- pBusInfo->chanInfo.channelInstGuid = cmd->createBus.busInstGuid;
+ pBusInfo->flags.server = inmsg->hdr.flags.server;
+ pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
+ pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
+ pBusInfo->chan_info.channel_type_uuid =
+ cmd->create_bus.bus_data_type_uuid;
+ pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
list_add(&pBusInfo->entry, &BusInfoList);
@@ -1139,15 +1151,15 @@ bus_create(CONTROLVM_MESSAGE *inmsg)
Away:
bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
- rc, inmsg->hdr.Flags.responseExpected == 1);
+ rc, inmsg->hdr.flags.response_expected == 1);
}
static void
-bus_destroy(CONTROLVM_MESSAGE *inmsg)
+bus_destroy(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->destroyBus.busNo;
- VISORCHIPSET_BUS_INFO *pBusInfo;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->destroy_bus.bus_no;
+ struct visorchipset_bus_info *pBusInfo;
int rc = CONTROLVM_RESP_SUCCESS;
pBusInfo = findbus(&BusInfoList, busNo);
@@ -1165,19 +1177,19 @@ bus_destroy(CONTROLVM_MESSAGE *inmsg)
Away:
bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
- rc, inmsg->hdr.Flags.responseExpected == 1);
+ rc, inmsg->hdr.flags.response_expected == 1);
}
static void
-bus_configure(CONTROLVM_MESSAGE *inmsg, PARSER_CONTEXT *parser_ctx)
+bus_configure(struct controlvm_message *inmsg, PARSER_CONTEXT *parser_ctx)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->configureBus.busNo;
- VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->configure_bus.bus_no;
+ struct visorchipset_bus_info *pBusInfo = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
char s[99];
- busNo = cmd->configureBus.busNo;
+ busNo = cmd->configure_bus.bus_no;
POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
pBusInfo = findbus(&BusInfoList, busNo);
@@ -1198,35 +1210,35 @@ bus_configure(CONTROLVM_MESSAGE *inmsg, PARSER_CONTEXT *parser_ctx)
goto Away;
}
/* TBD - add this check to other commands also... */
- if (pBusInfo->pendingMsgHdr.Id != CONTROLVM_INVALID) {
+ if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu MsgId=%u outstanding",
- busNo, (uint) pBusInfo->pendingMsgHdr.Id);
+ busNo, (uint) pBusInfo->pending_msg_hdr.id);
POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
goto Away;
}
- pBusInfo->partitionHandle = cmd->configureBus.guestHandle;
- pBusInfo->partitionGuid = parser_id_get(parser_ctx);
+ pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
+ pBusInfo->partition_uuid = parser_id_get(parser_ctx);
parser_param_start(parser_ctx, PARSERSTRING_NAME);
pBusInfo->name = parser_string_get(parser_ctx);
- visorchannel_uuid_id(&pBusInfo->partitionGuid, s);
+ visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
Away:
bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
- rc, inmsg->hdr.Flags.responseExpected == 1);
+ rc, inmsg->hdr.flags.response_expected == 1);
}
static void
-my_device_create(CONTROLVM_MESSAGE *inmsg)
+my_device_create(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->createDevice.busNo;
- ulong devNo = cmd->createDevice.devNo;
- VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
- VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->create_device.bus_no;
+ ulong devNo = cmd->create_device.dev_no;
+ struct visorchipset_device_info *pDevInfo = NULL;
+ struct visorchipset_bus_info *pBusInfo = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
pDevInfo = finddevice(&DevInfoList, busNo, devNo);
@@ -1255,7 +1267,7 @@ my_device_create(CONTROLVM_MESSAGE *inmsg)
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
goto Away;
}
- pDevInfo = kzalloc(sizeof(VISORCHIPSET_DEVICE_INFO), GFP_KERNEL);
+ pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
if (pDevInfo == NULL) {
LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu kmaloc failed",
busNo, devNo);
@@ -1266,45 +1278,47 @@ my_device_create(CONTROLVM_MESSAGE *inmsg)
}
INIT_LIST_HEAD(&pDevInfo->entry);
- pDevInfo->busNo = busNo;
- pDevInfo->devNo = devNo;
- pDevInfo->devInstGuid = cmd->createDevice.devInstGuid;
+ pDevInfo->bus_no = busNo;
+ pDevInfo->dev_no = devNo;
+ pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
POSTCODE_SEVERITY_INFO);
- if (inmsg->hdr.Flags.testMessage == 1)
- pDevInfo->chanInfo.addrType = ADDRTYPE_localTest;
+ if (inmsg->hdr.flags.test_message == 1)
+ pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
else
- pDevInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
- pDevInfo->chanInfo.channelAddr = cmd->createDevice.channelAddr;
- pDevInfo->chanInfo.nChannelBytes = cmd->createDevice.channelBytes;
- pDevInfo->chanInfo.channelTypeGuid = cmd->createDevice.dataTypeGuid;
- pDevInfo->chanInfo.intr = cmd->createDevice.intr;
+ pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
+ pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
+ pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
+ pDevInfo->chan_info.channel_type_uuid =
+ cmd->create_device.data_type_uuid;
+ pDevInfo->chan_info.intr = cmd->create_device.intr;
list_add(&pDevInfo->entry, &DevInfoList);
POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
POSTCODE_SEVERITY_INFO);
Away:
/* get the bus and devNo for DiagPool channel */
- if (is_diagpool_channel(pDevInfo->chanInfo.channelTypeGuid)) {
+ if (pDevInfo &&
+ is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
g_diagpoolBusNo = busNo;
g_diagpoolDevNo = devNo;
LOGINF("CONTROLVM_DEVICE_CREATE for DiagPool channel: busNo=%lu, devNo=%lu",
g_diagpoolBusNo, g_diagpoolDevNo);
}
- device_epilog(busNo, devNo, SegmentStateRunning,
+ device_epilog(busNo, devNo, segment_state_running,
CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
- inmsg->hdr.Flags.responseExpected == 1,
- FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+ inmsg->hdr.flags.response_expected == 1,
+ FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
}
static void
-my_device_changestate(CONTROLVM_MESSAGE *inmsg)
+my_device_changestate(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->deviceChangeState.busNo;
- ulong devNo = cmd->deviceChangeState.devNo;
- ULTRA_SEGMENT_STATE state = cmd->deviceChangeState.state;
- VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->device_change_state.bus_no;
+ ulong devNo = cmd->device_change_state.dev_no;
+ struct spar_segment_state state = cmd->device_change_state.state;
+ struct visorchipset_device_info *pDevInfo = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
pDevInfo = finddevice(&DevInfoList, busNo, devNo);
@@ -1327,17 +1341,18 @@ Away:
if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
&inmsg->hdr, rc,
- inmsg->hdr.Flags.responseExpected == 1,
- FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+ inmsg->hdr.flags.response_expected == 1,
+ FOR_VISORBUS(
+ pDevInfo->chan_info.channel_type_uuid));
}
static void
-my_device_destroy(CONTROLVM_MESSAGE *inmsg)
+my_device_destroy(struct controlvm_message *inmsg)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg->cmd;
- ulong busNo = cmd->destroyDevice.busNo;
- ulong devNo = cmd->destroyDevice.devNo;
- VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
+ struct controlvm_message_packet *cmd = &inmsg->cmd;
+ ulong busNo = cmd->destroy_device.bus_no;
+ ulong devNo = cmd->destroy_device.dev_no;
+ struct visorchipset_device_info *pDevInfo = NULL;
int rc = CONTROLVM_RESP_SUCCESS;
pDevInfo = finddevice(&DevInfoList, busNo, devNo);
@@ -1355,10 +1370,11 @@ my_device_destroy(CONTROLVM_MESSAGE *inmsg)
Away:
if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
- device_epilog(busNo, devNo, SegmentStateRunning,
+ device_epilog(busNo, devNo, segment_state_running,
CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
- inmsg->hdr.Flags.responseExpected == 1,
- FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
+ inmsg->hdr.flags.response_expected == 1,
+ FOR_VISORBUS(
+ pDevInfo->chan_info.channel_type_uuid));
}
/* When provided with the physical address of the controlvm channel
@@ -1382,7 +1398,7 @@ initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
}
memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
if ((offset == 0) || (bytes == 0)) {
- LOGERR("CONTROLVM_PAYLOAD_INIT Failed: RequestPayloadOffset=%llu RequestPayloadBytes=%llu!",
+ LOGERR("CONTROLVM_PAYLOAD_INIT Failed: request_payload_offset=%llu request_payload_bytes=%llu!",
(u64) offset, (u64) bytes);
rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
goto Away;
@@ -1429,8 +1445,8 @@ initialize_controlvm_payload(void)
u32 payloadBytes = 0;
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- RequestPayloadOffset),
+ offsetof(struct spar_controlvm_channel_protocol,
+ request_payload_offset),
&payloadOffset, sizeof(payloadOffset)) < 0) {
LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
@@ -1438,8 +1454,8 @@ initialize_controlvm_payload(void)
return;
}
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- RequestPayloadBytes),
+ offsetof(struct spar_controlvm_channel_protocol,
+ request_payload_bytes),
&payloadBytes, sizeof(payloadBytes)) < 0) {
LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
@@ -1487,15 +1503,15 @@ visorchipset_chipset_notready(void)
EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
static void
-chipset_ready(CONTROLVM_MESSAGE_HEADER *msgHdr)
+chipset_ready(struct controlvm_message_header *msgHdr)
{
int rc = visorchipset_chipset_ready();
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msgHdr->Flags.responseExpected && !visorchipset_holdchipsetready)
+ if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
controlvm_respond(msgHdr, rc);
- if (msgHdr->Flags.responseExpected && visorchipset_holdchipsetready) {
+ if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
/* Send CHIPSET_READY response when all modules have been loaded
* and disks mounted for the partition
*/
@@ -1505,24 +1521,24 @@ chipset_ready(CONTROLVM_MESSAGE_HEADER *msgHdr)
}
static void
-chipset_selftest(CONTROLVM_MESSAGE_HEADER *msgHdr)
+chipset_selftest(struct controlvm_message_header *msgHdr)
{
int rc = visorchipset_chipset_selftest();
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msgHdr->Flags.responseExpected)
+ if (msgHdr->flags.response_expected)
controlvm_respond(msgHdr, rc);
}
static void
-chipset_notready(CONTROLVM_MESSAGE_HEADER *msgHdr)
+chipset_notready(struct controlvm_message_header *msgHdr)
{
int rc = visorchipset_chipset_notready();
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msgHdr->Flags.responseExpected)
+ if (msgHdr->flags.response_expected)
controlvm_respond(msgHdr, rc);
}
@@ -1530,13 +1546,14 @@ chipset_notready(CONTROLVM_MESSAGE_HEADER *msgHdr)
* CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
*/
static BOOL
-read_controlvm_event(CONTROLVM_MESSAGE *msg)
+read_controlvm_event(struct controlvm_message *msg)
{
if (visorchannel_signalremove(ControlVm_channel,
CONTROLVM_QUEUE_EVENT, msg)) {
/* got a message */
- if (msg->hdr.Flags.testMessage == 1) {
- LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)", msg->hdr.Id);
+ if (msg->hdr.flags.test_message == 1) {
+ LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)",
+ msg->hdr.id);
return FALSE;
}
return TRUE;
@@ -1586,7 +1603,7 @@ parahotplug_next_expiration(void)
* CONTROLVM_MESSAGE that we can stick on a list
*/
static struct parahotplug_request *
-parahotplug_request_create(CONTROLVM_MESSAGE *msg)
+parahotplug_request_create(struct controlvm_message *msg)
{
struct parahotplug_request *req =
kmalloc(sizeof(struct parahotplug_request),
@@ -1618,7 +1635,7 @@ parahotplug_request_destroy(struct parahotplug_request *req)
static void
parahotplug_request_kickoff(struct parahotplug_request *req)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &req->msg.cmd;
+ struct controlvm_message_packet *cmd = &req->msg.cmd;
char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
env_func[40];
char *envp[] = {
@@ -1628,18 +1645,19 @@ parahotplug_request_kickoff(struct parahotplug_request *req)
sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
- cmd->deviceChangeState.state.Active);
+ cmd->device_change_state.state.active);
sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
- cmd->deviceChangeState.busNo);
+ cmd->device_change_state.bus_no);
sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
- cmd->deviceChangeState.devNo >> 3);
+ cmd->device_change_state.dev_no >> 3);
sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
- cmd->deviceChangeState.devNo & 0x7);
+ cmd->device_change_state.dev_no & 0x7);
LOGINF("parahotplug_request_kickoff: state=%d, bdf=%d/%d/%d, id=%u\n",
- cmd->deviceChangeState.state.Active,
- cmd->deviceChangeState.busNo, cmd->deviceChangeState.devNo >> 3,
- cmd->deviceChangeState.devNo & 7, req->id);
+ cmd->device_change_state.state.active,
+ cmd->device_change_state.bus_no,
+ cmd->device_change_state.dev_no >> 3,
+ cmd->device_change_state.dev_no & 7, req->id);
kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
envp);
@@ -1662,11 +1680,11 @@ parahotplug_process_list(void)
list_entry(pos, struct parahotplug_request, list);
if (time_after_eq(jiffies, req->expiration)) {
list_del(pos);
- if (req->msg.hdr.Flags.responseExpected)
+ if (req->msg.hdr.flags.response_expected)
controlvm_respond_physdev_changestate(
&req->msg.hdr,
CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
- req->msg.cmd.deviceChangeState.state);
+ req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
}
}
@@ -1697,11 +1715,11 @@ parahotplug_request_complete(int id, u16 active)
*/
list_del(pos);
spin_unlock(&Parahotplug_request_list_lock);
- req->msg.cmd.deviceChangeState.state.Active = active;
- if (req->msg.hdr.Flags.responseExpected)
+ req->msg.cmd.device_change_state.state.active = active;
+ if (req->msg.hdr.flags.response_expected)
controlvm_respond_physdev_changestate(
&req->msg.hdr, CONTROLVM_RESP_SUCCESS,
- req->msg.cmd.deviceChangeState.state);
+ req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
return 0;
}
@@ -1715,7 +1733,7 @@ parahotplug_request_complete(int id, u16 active)
* Enables or disables a PCI device by kicking off a udev script
*/
static void
-parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
+parahotplug_process_message(struct controlvm_message *inmsg)
{
struct parahotplug_request *req;
@@ -1726,7 +1744,7 @@ parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
return;
}
- if (inmsg->cmd.deviceChangeState.state.Active) {
+ if (inmsg->cmd.device_change_state.state.active) {
/* For enable messages, just respond with success
* right away. This is a bit of a hack, but there are
* issues with the early enable messages we get (with
@@ -1738,9 +1756,8 @@ parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
*/
parahotplug_request_kickoff(req);
controlvm_respond_physdev_changestate(&inmsg->hdr,
- CONTROLVM_RESP_SUCCESS,
- inmsg->cmd.
- deviceChangeState.state);
+ CONTROLVM_RESP_SUCCESS, inmsg->cmd.
+ device_change_state.state);
parahotplug_request_destroy(req);
} else {
/* For disable messages, add the request to the
@@ -1768,23 +1785,23 @@ parahotplug_process_message(CONTROLVM_MESSAGE *inmsg)
* either successfully or with an error.
*/
static BOOL
-handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
+handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
{
- CONTROLVM_MESSAGE_PACKET *cmd = &inmsg.cmd;
+ struct controlvm_message_packet *cmd = &inmsg.cmd;
u64 parametersAddr = 0;
u32 parametersBytes = 0;
PARSER_CONTEXT *parser_ctx = NULL;
BOOL isLocalAddr = FALSE;
- CONTROLVM_MESSAGE ackmsg;
+ struct controlvm_message ackmsg;
/* create parsing context if necessary */
- isLocalAddr = (inmsg.hdr.Flags.testMessage == 1);
+ isLocalAddr = (inmsg.hdr.flags.test_message == 1);
if (channel_addr == 0) {
LOGERR("HUH? channel_addr is 0!");
return TRUE;
}
- parametersAddr = channel_addr + inmsg.hdr.PayloadVmOffset;
- parametersBytes = inmsg.hdr.PayloadBytes;
+ parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
+ parametersBytes = inmsg.hdr.payload_bytes;
/* Parameter and channel addresses within test messages actually lie
* within our OS-controlled memory. We need to know that, because it
@@ -1802,7 +1819,7 @@ handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
return FALSE;
}
LOGWRN("parsing failed");
- LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.Id);
+ LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.id);
LOGWRN("parametersAddr=0x%llx", (u64) parametersAddr);
LOGWRN("parametersBytes=%lu", (ulong) parametersBytes);
LOGWRN("isLocalAddr=%d", isLocalAddr);
@@ -1818,45 +1835,45 @@ handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
(ControlVm_channel, CONTROLVM_QUEUE_ACK, &ackmsg)))
LOGWRN("failed to send ACK failed");
}
- switch (inmsg.hdr.Id) {
+ switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
LOGINF("CHIPSET_INIT(#busses=%lu,#switches=%lu)",
- (ulong) inmsg.cmd.initChipset.busCount,
- (ulong) inmsg.cmd.initChipset.switchCount);
+ (ulong) inmsg.cmd.init_chipset.bus_count,
+ (ulong) inmsg.cmd.init_chipset.switch_count);
chipset_init(&inmsg);
break;
case CONTROLVM_BUS_CREATE:
LOGINF("BUS_CREATE(%lu,#devs=%lu)",
- (ulong) cmd->createBus.busNo,
- (ulong) cmd->createBus.deviceCount);
+ (ulong) cmd->create_bus.bus_no,
+ (ulong) cmd->create_bus.dev_count);
bus_create(&inmsg);
break;
case CONTROLVM_BUS_DESTROY:
- LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroyBus.busNo);
+ LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroy_bus.bus_no);
bus_destroy(&inmsg);
break;
case CONTROLVM_BUS_CONFIGURE:
- LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configureBus.busNo);
+ LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configure_bus.bus_no);
bus_configure(&inmsg, parser_ctx);
break;
case CONTROLVM_DEVICE_CREATE:
LOGINF("DEVICE_CREATE(%lu,%lu)",
- (ulong) cmd->createDevice.busNo,
- (ulong) cmd->createDevice.devNo);
+ (ulong) cmd->create_device.bus_no,
+ (ulong) cmd->create_device.dev_no);
my_device_create(&inmsg);
break;
case CONTROLVM_DEVICE_CHANGESTATE:
- if (cmd->deviceChangeState.flags.physicalDevice) {
+ if (cmd->device_change_state.flags.phys_device) {
LOGINF("DEVICE_CHANGESTATE for physical device (%lu,%lu, active=%lu)",
- (ulong) cmd->deviceChangeState.busNo,
- (ulong) cmd->deviceChangeState.devNo,
- (ulong) cmd->deviceChangeState.state.Active);
+ (ulong) cmd->device_change_state.bus_no,
+ (ulong) cmd->device_change_state.dev_no,
+ (ulong) cmd->device_change_state.state.active);
parahotplug_process_message(&inmsg);
} else {
LOGINF("DEVICE_CHANGESTATE for virtual device (%lu,%lu, state.Alive=0x%lx)",
- (ulong) cmd->deviceChangeState.busNo,
- (ulong) cmd->deviceChangeState.devNo,
- (ulong) cmd->deviceChangeState.state.Alive);
+ (ulong) cmd->device_change_state.bus_no,
+ (ulong) cmd->device_change_state.dev_no,
+ (ulong) cmd->device_change_state.state.alive);
/* save the hdr and cmd structures for later use */
/* when sending back the response to Command */
my_device_changestate(&inmsg);
@@ -1867,16 +1884,16 @@ handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
break;
case CONTROLVM_DEVICE_DESTROY:
LOGINF("DEVICE_DESTROY(%lu,%lu)",
- (ulong) cmd->destroyDevice.busNo,
- (ulong) cmd->destroyDevice.devNo);
+ (ulong) cmd->destroy_device.bus_no,
+ (ulong) cmd->destroy_device.dev_no);
my_device_destroy(&inmsg);
break;
case CONTROLVM_DEVICE_CONFIGURE:
LOGINF("DEVICE_CONFIGURE(%lu,%lu)",
- (ulong) cmd->configureDevice.busNo,
- (ulong) cmd->configureDevice.devNo);
+ (ulong) cmd->configure_device.bus_no,
+ (ulong) cmd->configure_device.dev_no);
/* no op for now, just send a respond that we passed */
- if (inmsg.hdr.Flags.responseExpected)
+ if (inmsg.hdr.flags.response_expected)
controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
break;
case CONTROLVM_CHIPSET_READY:
@@ -1892,8 +1909,8 @@ handle_command(CONTROLVM_MESSAGE inmsg, HOSTADDRESS channel_addr)
chipset_notready(&inmsg.hdr);
break;
default:
- LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.Id);
- if (inmsg.hdr.Flags.responseExpected)
+ LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.id);
+ if (inmsg.hdr.flags.response_expected)
controlvm_respond(&inmsg.hdr,
-CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
break;
@@ -1923,8 +1940,7 @@ static HOSTADDRESS controlvm_get_channel_address(void)
static void
controlvm_periodic_work(struct work_struct *work)
{
- VISORCHIPSET_CHANNEL_INFO chanInfo;
- CONTROLVM_MESSAGE inmsg;
+ struct controlvm_message inmsg;
BOOL gotACommand = FALSE;
BOOL handle_command_failed = FALSE;
static u64 Poll_Count;
@@ -1938,8 +1954,6 @@ controlvm_periodic_work(struct work_struct *work)
if (visorchipset_clientregwait && !clientregistered)
goto Away;
- memset(&chanInfo, 0, sizeof(VISORCHIPSET_CHANNEL_INFO));
-
Poll_Count++;
if (Poll_Count >= 250)
; /* keep going */
@@ -1950,24 +1964,24 @@ controlvm_periodic_work(struct work_struct *work)
* should be sent
*/
if (visorchipset_holdchipsetready
- && (g_ChipSetMsgHdr.Id != CONTROLVM_INVALID)) {
+ && (g_ChipSetMsgHdr.id != CONTROLVM_INVALID)) {
if (check_chipset_events() == 1) {
LOGINF("Sending CHIPSET_READY response");
controlvm_respond(&g_ChipSetMsgHdr, 0);
clear_chipset_events();
memset(&g_ChipSetMsgHdr, 0,
- sizeof(CONTROLVM_MESSAGE_HEADER));
+ sizeof(struct controlvm_message_header));
}
}
while (visorchannel_signalremove(ControlVm_channel,
CONTROLVM_QUEUE_RESPONSE,
&inmsg)) {
- if (inmsg.hdr.PayloadMaxBytes != 0) {
+ if (inmsg.hdr.payload_max_bytes != 0) {
LOGERR("Payload of size %lu returned @%lu with unexpected message id %d.",
- (ulong) inmsg.hdr.PayloadMaxBytes,
- (ulong) inmsg.hdr.PayloadVmOffset,
- inmsg.hdr.Id);
+ (ulong) inmsg.hdr.payload_max_bytes,
+ (ulong) inmsg.hdr.payload_vm_offset,
+ inmsg.hdr.id);
}
}
if (!gotACommand) {
@@ -2033,9 +2047,9 @@ static void
setup_crash_devices_work_queue(struct work_struct *work)
{
- CONTROLVM_MESSAGE localCrashCreateBusMsg;
- CONTROLVM_MESSAGE localCrashCreateDevMsg;
- CONTROLVM_MESSAGE msg;
+ struct controlvm_message localCrashCreateBusMsg;
+ struct controlvm_message localCrashCreateDevMsg;
+ struct controlvm_message msg;
u32 localSavedCrashMsgOffset;
u16 localSavedCrashMsgCount;
@@ -2052,16 +2066,16 @@ setup_crash_devices_work_queue(struct work_struct *work)
POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
/* send init chipset msg */
- msg.hdr.Id = CONTROLVM_CHIPSET_INIT;
- msg.cmd.initChipset.busCount = 23;
- msg.cmd.initChipset.switchCount = 0;
+ msg.hdr.id = CONTROLVM_CHIPSET_INIT;
+ msg.cmd.init_chipset.bus_count = 23;
+ msg.cmd.init_chipset.switch_count = 0;
chipset_init(&msg);
/* get saved message count */
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- SavedCrashMsgCount),
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_count),
&localSavedCrashMsgCount, sizeof(u16)) < 0) {
LOGERR("failed to get Saved Message Count");
POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
@@ -2080,8 +2094,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
/* get saved crash message offset */
if (visorchannel_read(ControlVm_channel,
- offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
- SavedCrashMsgOffset),
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_offset),
&localSavedCrashMsgOffset, sizeof(u32)) < 0) {
LOGERR("failed to get Saved Message Offset");
POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
@@ -2093,7 +2107,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
if (visorchannel_read(ControlVm_channel,
localSavedCrashMsgOffset,
&localCrashCreateBusMsg,
- sizeof(CONTROLVM_MESSAGE)) < 0) {
+ sizeof(struct controlvm_message)) < 0) {
LOGERR("CRASH_DEV_RD_BUS_FAIULRE: Failed to read CrashCreateBusMsg!");
POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
POSTCODE_SEVERITY_ERR);
@@ -2103,9 +2117,9 @@ setup_crash_devices_work_queue(struct work_struct *work)
/* read create device message for storage device */
if (visorchannel_read(ControlVm_channel,
localSavedCrashMsgOffset +
- sizeof(CONTROLVM_MESSAGE),
+ sizeof(struct controlvm_message),
&localCrashCreateDevMsg,
- sizeof(CONTROLVM_MESSAGE)) < 0) {
+ sizeof(struct controlvm_message)) < 0) {
LOGERR("CRASH_DEV_RD_DEV_FAIULRE: Failed to read CrashCreateDevMsg!");
POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
POSTCODE_SEVERITY_ERR);
@@ -2113,7 +2127,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
}
/* reuse IOVM create bus message */
- if (localCrashCreateBusMsg.cmd.createBus.channelAddr != 0)
+ if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
bus_create(&localCrashCreateBusMsg);
else {
LOGERR("CrashCreateBusMsg is null, no dump will be taken");
@@ -2123,7 +2137,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
}
/* reuse create device message for storage device */
- if (localCrashCreateDevMsg.cmd.createDevice.channelAddr != 0)
+ if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
my_device_create(&localCrashCreateDevMsg);
else {
LOGERR("CrashCreateDevMsg is null, no dump will be taken");
@@ -2168,12 +2182,12 @@ device_destroy_response(ulong busNo, ulong devNo, int response)
}
void
-visorchipset_device_pause_response(ulong busNo, ulong devNo, int response)
+visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
{
device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
- busNo, devNo, response,
- SegmentStateStandby);
+ bus_no, dev_no, response,
+ segment_state_standby);
}
EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
@@ -2182,30 +2196,30 @@ device_resume_response(ulong busNo, ulong devNo, int response)
{
device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
busNo, devNo, response,
- SegmentStateRunning);
+ segment_state_running);
}
BOOL
-visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo)
+visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
{
- void *p = findbus(&BusInfoList, busNo);
+ void *p = findbus(&BusInfoList, bus_no);
if (!p) {
- LOGERR("(%lu) failed", busNo);
+ LOGERR("(%lu) failed", bus_no);
return FALSE;
}
- memcpy(busInfo, p, sizeof(VISORCHIPSET_BUS_INFO));
+ memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
return TRUE;
}
EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
BOOL
-visorchipset_set_bus_context(ulong busNo, void *context)
+visorchipset_set_bus_context(ulong bus_no, void *context)
{
- VISORCHIPSET_BUS_INFO *p = findbus(&BusInfoList, busNo);
+ struct visorchipset_bus_info *p = findbus(&BusInfoList, bus_no);
if (!p) {
- LOGERR("(%lu) failed", busNo);
+ LOGERR("(%lu) failed", bus_no);
return FALSE;
}
p->bus_driver_context = context;
@@ -2214,27 +2228,28 @@ visorchipset_set_bus_context(ulong busNo, void *context)
EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
BOOL
-visorchipset_get_device_info(ulong busNo, ulong devNo,
- VISORCHIPSET_DEVICE_INFO *devInfo)
+visorchipset_get_device_info(ulong bus_no, ulong dev_no,
+ struct visorchipset_device_info *dev_info)
{
- void *p = finddevice(&DevInfoList, busNo, devNo);
+ void *p = finddevice(&DevInfoList, bus_no, dev_no);
if (!p) {
- LOGERR("(%lu,%lu) failed", busNo, devNo);
+ LOGERR("(%lu,%lu) failed", bus_no, dev_no);
return FALSE;
}
- memcpy(devInfo, p, sizeof(VISORCHIPSET_DEVICE_INFO));
+ memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
return TRUE;
}
EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
BOOL
-visorchipset_set_device_context(ulong busNo, ulong devNo, void *context)
+visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
{
- VISORCHIPSET_DEVICE_INFO *p = finddevice(&DevInfoList, busNo, devNo);
+ struct visorchipset_device_info *p =
+ finddevice(&DevInfoList, bus_no, dev_no);
if (!p) {
- LOGERR("(%lu,%lu) failed", busNo, devNo);
+ LOGERR("(%lu,%lu) failed", bus_no, dev_no);
return FALSE;
}
p->bus_driver_context = context;
@@ -2377,11 +2392,10 @@ visorchipset_init(void)
ControlVm_channel =
visorchannel_create_with_lock
(addr,
- sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL),
- UltraControlvmChannelProtocolGuid);
- if (ULTRA_CONTROLVM_CHANNEL_OK_CLIENT
- (visorchannel_get_header(ControlVm_channel),
- NULL)) {
+ sizeof(struct spar_controlvm_channel_protocol),
+ spar_controlvm_channel_protocol_uuid);
+ if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
+ visorchannel_get_header(ControlVm_channel))) {
LOGINF("Channel %s (ControlVm) discovered",
visorchannel_id(ControlVm_channel, s));
initialize_controlvm_payload();
@@ -2404,11 +2418,11 @@ visorchipset_init(void)
goto Away;
}
- memset(&g_DiagMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_ChipSetMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_DelDumpMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
Putfile_buffer_list_pool =
kmem_cache_create(Putfile_buffer_list_pool_name,
@@ -2497,11 +2511,11 @@ visorchipset_exit(void)
cleanup_controlvm_structures();
- memset(&g_DiagMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_ChipSetMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
- memset(&g_DelDumpMsgHdr, 0, sizeof(CONTROLVM_MESSAGE_HEADER));
+ memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
LOGINF("Channel %s (ControlVm) disconnected",
visorchannel_id(ControlVm_channel, s));
diff --git a/drivers/staging/unisys/visorutil/charqueue.c b/drivers/staging/unisys/visorutil/charqueue.c
index 22241c7b4f7f..1ce7003c3a90 100644
--- a/drivers/staging/unisys/visorutil/charqueue.c
+++ b/drivers/staging/unisys/visorutil/charqueue.c
@@ -25,9 +25,7 @@
#define IS_EMPTY(charqueue) (charqueue->head == charqueue->tail)
-
-
-struct CHARQUEUE_Tag {
+struct charqueue {
int alloc_size;
int nslots;
spinlock_t lock;
@@ -35,12 +33,10 @@ struct CHARQUEUE_Tag {
unsigned char buf[0];
};
-
-
-CHARQUEUE *visor_charqueue_create(ulong nslots)
+struct charqueue *visor_charqueue_create(ulong nslots)
{
- int alloc_size = sizeof(CHARQUEUE) + nslots + 1;
- CHARQUEUE *cq = kmalloc(alloc_size, GFP_KERNEL|__GFP_NORETRY);
+ int alloc_size = sizeof(struct charqueue) + nslots + 1;
+ struct charqueue *cq = kmalloc(alloc_size, GFP_KERNEL|__GFP_NORETRY);
if (cq == NULL) {
ERRDRV("visor_charqueue_create allocation failed (alloc_size=%d)",
@@ -49,15 +45,14 @@ CHARQUEUE *visor_charqueue_create(ulong nslots)
}
cq->alloc_size = alloc_size;
cq->nslots = nslots;
- cq->head = cq->tail = 0;
+ cq->head = 0;
+ cq->tail = 0;
spin_lock_init(&cq->lock);
return cq;
}
EXPORT_SYMBOL_GPL(visor_charqueue_create);
-
-
-void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c)
+void visor_charqueue_enqueue(struct charqueue *charqueue, unsigned char c)
{
int alloc_slots = charqueue->nslots+1; /* 1 slot is always empty */
@@ -71,9 +66,7 @@ void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c)
}
EXPORT_SYMBOL_GPL(visor_charqueue_enqueue);
-
-
-BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue)
+BOOL visor_charqueue_is_empty(struct charqueue *charqueue)
{
BOOL b;
@@ -84,9 +77,7 @@ BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue)
}
EXPORT_SYMBOL_GPL(visor_charqueue_is_empty);
-
-
-static int charqueue_dequeue_1(CHARQUEUE *charqueue)
+static int charqueue_dequeue_1(struct charqueue *charqueue)
{
int alloc_slots = charqueue->nslots + 1; /* 1 slot is always empty */
@@ -96,9 +87,7 @@ static int charqueue_dequeue_1(CHARQUEUE *charqueue)
return charqueue->buf[charqueue->tail];
}
-
-
-int charqueue_dequeue(CHARQUEUE *charqueue)
+int charqueue_dequeue(struct charqueue *charqueue)
{
int rc;
@@ -108,9 +97,8 @@ int charqueue_dequeue(CHARQUEUE *charqueue)
return rc;
}
-
-
-int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n)
+int visor_charqueue_dequeue_n(struct charqueue *charqueue, unsigned char *buf,
+ int n)
{
int rc, counter = 0, c;
@@ -132,9 +120,7 @@ int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n)
}
EXPORT_SYMBOL_GPL(visor_charqueue_dequeue_n);
-
-
-void visor_charqueue_destroy(CHARQUEUE *charqueue)
+void visor_charqueue_destroy(struct charqueue *charqueue)
{
if (charqueue == NULL)
return;
diff --git a/drivers/staging/unisys/visorutil/charqueue.h b/drivers/staging/unisys/visorutil/charqueue.h
index d6f16587a364..56c1f79a54b0 100644
--- a/drivers/staging/unisys/visorutil/charqueue.h
+++ b/drivers/staging/unisys/visorutil/charqueue.h
@@ -21,17 +21,18 @@
#include "uniklog.h"
#include "timskmod.h"
-/* CHARQUEUE is an opaque structure to users.
+/* struct charqueue is an opaque structure to users.
* Fields are declared only in the implementation .c files.
*/
-typedef struct CHARQUEUE_Tag CHARQUEUE;
+struct charqueue;
-CHARQUEUE *visor_charqueue_create(ulong nslots);
-void visor_charqueue_enqueue(CHARQUEUE *charqueue, unsigned char c);
-int charqueue_dequeue(CHARQUEUE *charqueue);
-int visor_charqueue_dequeue_n(CHARQUEUE *charqueue, unsigned char *buf, int n);
-BOOL visor_charqueue_is_empty(CHARQUEUE *charqueue);
-void visor_charqueue_destroy(CHARQUEUE *charqueue);
+struct charqueue *visor_charqueue_create(ulong nslots);
+void visor_charqueue_enqueue(struct charqueue *charqueue, unsigned char c);
+int charqueue_dequeue(struct charqueue *charqueue);
+int visor_charqueue_dequeue_n(struct charqueue *charqueue, unsigned char *buf,
+ int n);
+BOOL visor_charqueue_is_empty(struct charqueue *charqueue);
+void visor_charqueue_destroy(struct charqueue *charqueue);
#endif
diff --git a/drivers/staging/unisys/visorutil/easyproc.c b/drivers/staging/unisys/visorutil/easyproc.c
index 3b388494e2af..40f1ae9a155c 100644
--- a/drivers/staging/unisys/visorutil/easyproc.c
+++ b/drivers/staging/unisys/visorutil/easyproc.c
@@ -254,9 +254,9 @@ void visor_easyproc_CreateDeviceProperty(struct easyproc_device_info *p,
}
strcpy(px->property_name, property_name);
if (px->procEntry == NULL) {
- ERRDEVX(p->devno, "failed to register /proc/%s/device/%d/%s entry",
- p->pdriver->ProcId, p->devno, property_name
- );
+ ERRDEVX(p->devno,
+ "failed to register /proc/%s/device/%d/%s entry",
+ p->pdriver->ProcId, p->devno, property_name);
return;
}
px->show_device_property_info = show_property_info;
diff --git a/drivers/staging/unisys/visorutil/memregion.h b/drivers/staging/unisys/visorutil/memregion.h
index f4a65d2fcf02..0c3eebcf6d50 100644
--- a/drivers/staging/unisys/visorutil/memregion.h
+++ b/drivers/staging/unisys/visorutil/memregion.h
@@ -20,24 +20,24 @@
#include "timskmod.h"
-/* MEMREGION is an opaque structure to users.
+/* struct memregion is an opaque structure to users.
* Fields are declared only in the implementation .c files.
*/
-typedef struct MEMREGION_Tag MEMREGION;
+struct memregion;
-MEMREGION *visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes);
-MEMREGION *visor_memregion_create_overlapped(MEMREGION *parent,
- ulong offset, ulong nbytes);
-int visor_memregion_resize(MEMREGION *memregion, ulong newsize);
-int visor_memregion_read(MEMREGION *memregion,
- ulong offset, void *dest, ulong nbytes);
-int visor_memregion_write(MEMREGION *memregion,
+struct memregion *visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes);
+struct memregion *visor_memregion_create_overlapped(struct memregion *parent,
+ ulong offset, ulong nbytes);
+int visor_memregion_resize(struct memregion *memregion, ulong newsize);
+int visor_memregion_read(struct memregion *memregion,
+ ulong offset, void *dest, ulong nbytes);
+int visor_memregion_write(struct memregion *memregion,
ulong offset, void *src, ulong nbytes);
-void visor_memregion_destroy(MEMREGION *memregion);
-HOSTADDRESS visor_memregion_get_physaddr(MEMREGION *memregion);
-ulong visor_memregion_get_nbytes(MEMREGION *memregion);
-void memregion_dump(MEMREGION *memregion, char *s,
+void visor_memregion_destroy(struct memregion *memregion);
+HOSTADDRESS visor_memregion_get_physaddr(struct memregion *memregion);
+ulong visor_memregion_get_nbytes(struct memregion *memregion);
+void memregion_dump(struct memregion *memregion, char *s,
ulong off, ulong len, struct seq_file *seq);
-void __iomem *visor_memregion_get_pointer(MEMREGION *memregion);
+void __iomem *visor_memregion_get_pointer(struct memregion *memregion);
#endif
diff --git a/drivers/staging/unisys/visorutil/memregion_direct.c b/drivers/staging/unisys/visorutil/memregion_direct.c
index 65bc07b947db..33522cc8c22c 100644
--- a/drivers/staging/unisys/visorutil/memregion_direct.c
+++ b/drivers/staging/unisys/visorutil/memregion_direct.c
@@ -26,7 +26,7 @@
#define MYDRVNAME "memregion"
-struct MEMREGION_Tag {
+struct memregion {
HOSTADDRESS physaddr;
ulong nbytes;
void __iomem *mapped;
@@ -34,15 +34,15 @@ struct MEMREGION_Tag {
BOOL overlapped;
};
-static BOOL mapit(MEMREGION *memregion);
-static void unmapit(MEMREGION *memregion);
+static BOOL mapit(struct memregion *memregion);
+static void unmapit(struct memregion *memregion);
-MEMREGION *
+struct memregion *
visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes)
{
- MEMREGION *rc = NULL;
- MEMREGION *memregion = kzalloc(sizeof(MEMREGION),
- GFP_KERNEL | __GFP_NORETRY);
+ struct memregion *rc = NULL;
+ struct memregion *memregion = kzalloc(sizeof(*memregion),
+ GFP_KERNEL | __GFP_NORETRY);
if (memregion == NULL) {
ERRDRV("visor_memregion_create allocation failed");
return NULL;
@@ -52,24 +52,23 @@ visor_memregion_create(HOSTADDRESS physaddr, ulong nbytes)
memregion->overlapped = FALSE;
if (!mapit(memregion)) {
rc = NULL;
- goto Away;
+ goto cleanup;
}
rc = memregion;
-Away:
+cleanup:
if (rc == NULL) {
- if (memregion != NULL) {
- visor_memregion_destroy(memregion);
- memregion = NULL;
- }
+ visor_memregion_destroy(memregion);
+ memregion = NULL;
}
return rc;
}
EXPORT_SYMBOL_GPL(visor_memregion_create);
-MEMREGION *
-visor_memregion_create_overlapped(MEMREGION *parent, ulong offset, ulong nbytes)
+struct memregion *
+visor_memregion_create_overlapped(struct memregion *parent, ulong offset,
+ ulong nbytes)
{
- MEMREGION *memregion = NULL;
+ struct memregion *memregion = NULL;
if (parent == NULL) {
ERRDRV("%s parent is NULL", __func__);
@@ -85,7 +84,7 @@ visor_memregion_create_overlapped(MEMREGION *parent, ulong offset, ulong nbytes)
__func__, offset, nbytes);
return NULL;
}
- memregion = kzalloc(sizeof(MEMREGION), GFP_KERNEL|__GFP_NORETRY);
+ memregion = kzalloc(sizeof(*memregion), GFP_KERNEL|__GFP_NORETRY);
if (memregion == NULL) {
ERRDRV("%s allocation failed", __func__);
return NULL;
@@ -93,23 +92,23 @@ visor_memregion_create_overlapped(MEMREGION *parent, ulong offset, ulong nbytes)
memregion->physaddr = parent->physaddr + offset;
memregion->nbytes = nbytes;
- memregion->mapped = ((u8 __iomem *) (parent->mapped)) + offset;
+ memregion->mapped = ((u8 __iomem *)(parent->mapped)) + offset;
memregion->requested = FALSE;
memregion->overlapped = TRUE;
return memregion;
}
EXPORT_SYMBOL_GPL(visor_memregion_create_overlapped);
-
static BOOL
-mapit(MEMREGION *memregion)
+mapit(struct memregion *memregion)
{
- ulong physaddr = (ulong) (memregion->physaddr);
+ ulong physaddr = (ulong)(memregion->physaddr);
ulong nbytes = memregion->nbytes;
memregion->requested = FALSE;
if (!request_mem_region(physaddr, nbytes, MYDRVNAME))
- ERRDRV("cannot reserve channel memory @0x%lx for 0x%lx-- no big deal", physaddr, nbytes);
+ ERRDRV("cannot reserve channel memory @0x%lx for 0x%lx-- no big deal",
+ physaddr, nbytes);
else
memregion->requested = TRUE;
memregion->mapped = ioremap_cache(physaddr, nbytes);
@@ -122,42 +121,42 @@ mapit(MEMREGION *memregion)
}
static void
-unmapit(MEMREGION *memregion)
+unmapit(struct memregion *memregion)
{
if (memregion->mapped != NULL) {
iounmap(memregion->mapped);
memregion->mapped = NULL;
}
if (memregion->requested) {
- release_mem_region((ulong) (memregion->physaddr),
+ release_mem_region((ulong)(memregion->physaddr),
memregion->nbytes);
memregion->requested = FALSE;
}
}
HOSTADDRESS
-visor_memregion_get_physaddr(MEMREGION *memregion)
+visor_memregion_get_physaddr(struct memregion *memregion)
{
return memregion->physaddr;
}
EXPORT_SYMBOL_GPL(visor_memregion_get_physaddr);
ulong
-visor_memregion_get_nbytes(MEMREGION *memregion)
+visor_memregion_get_nbytes(struct memregion *memregion)
{
return memregion->nbytes;
}
EXPORT_SYMBOL_GPL(visor_memregion_get_nbytes);
void __iomem *
-visor_memregion_get_pointer(MEMREGION *memregion)
+visor_memregion_get_pointer(struct memregion *memregion)
{
return memregion->mapped;
}
EXPORT_SYMBOL_GPL(visor_memregion_get_pointer);
int
-visor_memregion_resize(MEMREGION *memregion, ulong newsize)
+visor_memregion_resize(struct memregion *memregion, ulong newsize)
{
if (newsize == memregion->nbytes)
return 0;
@@ -176,10 +175,9 @@ visor_memregion_resize(MEMREGION *memregion, ulong newsize)
}
EXPORT_SYMBOL_GPL(visor_memregion_resize);
-
static int
memregion_readwrite(BOOL is_write,
- MEMREGION *memregion, ulong offset,
+ struct memregion *memregion, ulong offset,
void *local, ulong nbytes)
{
if (offset + nbytes > memregion->nbytes) {
@@ -195,7 +193,7 @@ memregion_readwrite(BOOL is_write,
}
int
-visor_memregion_read(MEMREGION *memregion, ulong offset, void *dest,
+visor_memregion_read(struct memregion *memregion, ulong offset, void *dest,
ulong nbytes)
{
return memregion_readwrite(FALSE, memregion, offset, dest, nbytes);
@@ -203,7 +201,7 @@ visor_memregion_read(MEMREGION *memregion, ulong offset, void *dest,
EXPORT_SYMBOL_GPL(visor_memregion_read);
int
-visor_memregion_write(MEMREGION *memregion, ulong offset, void *src,
+visor_memregion_write(struct memregion *memregion, ulong offset, void *src,
ulong nbytes)
{
return memregion_readwrite(TRUE, memregion, offset, src, nbytes);
@@ -211,7 +209,7 @@ visor_memregion_write(MEMREGION *memregion, ulong offset, void *src,
EXPORT_SYMBOL_GPL(visor_memregion_write);
void
-visor_memregion_destroy(MEMREGION *memregion)
+visor_memregion_destroy(struct memregion *memregion)
{
if (memregion == NULL)
return;
diff --git a/drivers/staging/unisys/visorutil/periodic_work.c b/drivers/staging/unisys/visorutil/periodic_work.c
index 3dd1c04d0e14..0908bf929401 100644
--- a/drivers/staging/unisys/visorutil/periodic_work.c
+++ b/drivers/staging/unisys/visorutil/periodic_work.c
@@ -25,8 +25,6 @@
#define MYDRVNAME "periodic_work"
-
-
struct periodic_work {
rwlock_t lock;
struct delayed_work work;
@@ -39,8 +37,6 @@ struct periodic_work {
const char *devnam;
};
-
-
static void periodic_work_func(struct work_struct *work)
{
struct periodic_work *pw;
@@ -49,8 +45,6 @@ static void periodic_work_func(struct work_struct *work)
(*pw->workfunc)(pw->workfuncarg);
}
-
-
struct periodic_work *visor_periodic_work_create(ulong jiffy_interval,
struct workqueue_struct *workqueue,
void (*workfunc)(void *),
@@ -73,16 +67,12 @@ struct periodic_work *visor_periodic_work_create(ulong jiffy_interval,
}
EXPORT_SYMBOL_GPL(visor_periodic_work_create);
-
-
void visor_periodic_work_destroy(struct periodic_work *pw)
{
kfree(pw);
}
EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
-
-
/** Call this from your periodic work worker function to schedule the next
* call.
* If this function returns FALSE, there was a failure and the
@@ -112,8 +102,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
-
-
/** This function returns TRUE iff new periodic work was actually started.
* If this function returns FALSE, then no work was started
* (either because it was already started, or because of a failure).
@@ -145,13 +133,9 @@ BOOL visor_periodic_work_start(struct periodic_work *pw)
unlock:
write_unlock(&pw->lock);
return rc;
-
}
EXPORT_SYMBOL_GPL(visor_periodic_work_start);
-
-
-
/** This function returns TRUE iff your call actually stopped the periodic
* work.
*
@@ -223,8 +207,9 @@ BOOL visor_periodic_work_stop(struct periodic_work *pw)
*/
SLEEPJIFFIES(10);
write_lock(&pw->lock);
- } else
+ } else {
pw->want_to_stop = FALSE;
+ }
}
write_unlock(&pw->lock);
return stopped_something;
diff --git a/drivers/staging/unisys/visorutil/procobjecttree.c b/drivers/staging/unisys/visorutil/procobjecttree.c
index c476036f7382..195772d22c9e 100644
--- a/drivers/staging/unisys/visorutil/procobjecttree.c
+++ b/drivers/staging/unisys/visorutil/procobjecttree.c
@@ -320,19 +320,18 @@ void visor_proc_DestroyObject(MYPROCOBJECT *obj)
kfree(obj->procDirProperties);
obj->procDirProperties = NULL;
}
- if (obj->procDirPropertyContexts != NULL) {
- kfree(obj->procDirPropertyContexts);
- obj->procDirPropertyContexts = NULL;
- }
+
+ kfree(obj->procDirPropertyContexts);
+ obj->procDirPropertyContexts = NULL;
+
if (obj->procDir != NULL) {
if (obj->name != NULL)
remove_proc_entry(obj->name, type->procDir);
obj->procDir = NULL;
}
- if (obj->name != NULL) {
- kfree(obj->name);
- obj->name = NULL;
- }
+
+ kfree(obj->name);
+ obj->name = NULL;
kfree(obj);
}
EXPORT_SYMBOL_GPL(visor_proc_DestroyObject);
diff --git a/drivers/staging/unisys/visorutil/visorkmodutils.c b/drivers/staging/unisys/visorutil/visorkmodutils.c
index d6815f9e1337..556e2642d2d9 100644
--- a/drivers/staging/unisys/visorutil/visorkmodutils.c
+++ b/drivers/staging/unisys/visorutil/visorkmodutils.c
@@ -36,18 +36,7 @@
int unisys_spar_platform;
EXPORT_SYMBOL_GPL(unisys_spar_platform);
-/** Callers to interfaces that set __GFP_NORETRY flag below
- * must check for a NULL (error) result as we are telling the
- * kernel interface that it is okay to fail.
- */
-
-void *kmalloc_kernel(size_t siz)
-{
- return kmalloc(siz, GFP_KERNEL | __GFP_NORETRY);
-}
-
-static __init uint32_t
-visorutil_spar_detect(void)
+static __init uint32_t visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -57,22 +46,19 @@ visorutil_spar_detect(void)
return (ebx == UNISYS_SPAR_ID_EBX) &&
(ecx == UNISYS_SPAR_ID_ECX) &&
(edx == UNISYS_SPAR_ID_EDX);
- } else
+ } else {
return 0;
-
+ }
}
-
-
-
-static __init int
-visorutil_mod_init(void)
+static __init int visorutil_mod_init(void)
{
if (visorutil_spar_detect()) {
unisys_spar_platform = TRUE;
return 0;
- } else
+ } else {
return -ENODEV;
+ }
}
static __exit void
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index 8e8bbb1dcd9b..1d2ff0cc41f1 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -8,6 +8,9 @@ config VME_USER
VME windows in a manner at least semi-compatible with the interface
provided with the original driver at <http://www.vmelinux.org/>.
+ To compile this driver as a module, choose M here. The module will
+ be called vme_user. If unsure, say N.
+
config VME_PIO2
tristate "GE PIO2 VME"
depends on STAGING && GPIOLIB
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index c64776f71809..da34d5529f51 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -191,11 +191,11 @@ int pio2_gpio_init(struct pio2_card *card)
int retval = 0;
char *label;
- label = kmalloc(PIO2_NUM_CHANNELS, GFP_KERNEL);
+ label = kasprintf(GFP_KERNEL,
+ "%s@%s", driver_name, dev_name(&card->vdev->dev));
if (label == NULL)
return -ENOMEM;
- sprintf(label, "%s@%s", driver_name, dev_name(&card->vdev->dev));
card->gc.label = label;
card->gc.ngpio = PIO2_NUM_CHANNELS;
diff --git a/drivers/staging/vt6655/80211hdr.h b/drivers/staging/vt6655/80211hdr.h
deleted file mode 100644
index 36e14ec50564..000000000000
--- a/drivers/staging/vt6655/80211hdr.h
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: 80211hdr.h
- *
- * Purpose: 802.11 MAC headers related pre-defines and macros.
- *
- *
- * Author: Lyndon Chen
- *
- * Date: Apr 8, 2002
- *
- */
-
-#ifndef __80211HDR_H__
-#define __80211HDR_H__
-
-#include "ttype.h"
-
-/* bit type */
-#define BIT0 0x00000001
-#define BIT1 0x00000002
-#define BIT2 0x00000004
-#define BIT3 0x00000008
-#define BIT4 0x00000010
-#define BIT5 0x00000020
-#define BIT6 0x00000040
-#define BIT7 0x00000080
-#define BIT8 0x00000100
-#define BIT9 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-
-/* 802.11 frame related, defined as 802.11 spec */
-#define WLAN_ADDR_LEN 6
-#define WLAN_CRC_LEN 4
-#define WLAN_CRC32_LEN 4
-#define WLAN_FCS_LEN 4
-#define WLAN_BSSID_LEN 6
-#define WLAN_BSS_TS_LEN 8
-#define WLAN_HDR_ADDR2_LEN 16
-#define WLAN_HDR_ADDR3_LEN 24
-#define WLAN_HDR_ADDR4_LEN 30
-#define WLAN_IEHDR_LEN 2
-#define WLAN_SSID_MAXLEN 32
-#define WLAN_RATES_MAXLEN 16
-#define WLAN_RATES_MAXLEN_11B 4
-#define WLAN_RSN_MAXLEN 32
-#define WLAN_DATA_MAXLEN 2312
-#define WLAN_A3FR_MAXLEN (WLAN_HDR_ADDR3_LEN + WLAN_DATA_MAXLEN + \
- WLAN_CRC_LEN)
-
-#define WLAN_BEACON_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_ATIM_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 0)
-#define WLAN_NULLDATA_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 0)
-#define WLAN_DISASSOC_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 2)
-#define WLAN_ASSOCREQ_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_ASSOCRESP_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_REASSOCREQ_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_REASSOCRESP_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_PROBEREQ_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_PROBERESP_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_AUTHEN_FR_MAXLEN WLAN_A3FR_MAXLEN
-#define WLAN_DEAUTHEN_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 2)
-
-#define WLAN_WEP_NKEYS 4
-#define WLAN_WEP40_KEYLEN 5
-#define WLAN_WEP104_KEYLEN 13
-#define WLAN_WEP232_KEYLEN 29
-#define WLAN_WEPMAX_KEYLEN 32
-#define WLAN_CHALLENGE_IE_MAXLEN 255
-#define WLAN_CHALLENGE_IE_LEN 130
-#define WLAN_CHALLENGE_LEN 128
-#define WLAN_WEP_IV_LEN 4
-#define WLAN_WEP_ICV_LEN 4
-#define WLAN_FRAGS_MAX 16
-
-/* Frame Type */
-#define WLAN_TYPE_MGR 0x00
-#define WLAN_TYPE_CTL 0x01
-#define WLAN_TYPE_DATA 0x02
-
-#define WLAN_FTYPE_MGMT 0x00
-#define WLAN_FTYPE_CTL 0x01
-#define WLAN_FTYPE_DATA 0x02
-
-/* Frame Subtypes */
-#define WLAN_FSTYPE_ASSOCREQ 0x00
-#define WLAN_FSTYPE_ASSOCRESP 0x01
-#define WLAN_FSTYPE_REASSOCREQ 0x02
-#define WLAN_FSTYPE_REASSOCRESP 0x03
-#define WLAN_FSTYPE_PROBEREQ 0x04
-#define WLAN_FSTYPE_PROBERESP 0x05
-#define WLAN_FSTYPE_BEACON 0x08
-#define WLAN_FSTYPE_ATIM 0x09
-#define WLAN_FSTYPE_DISASSOC 0x0a
-#define WLAN_FSTYPE_AUTHEN 0x0b
-#define WLAN_FSTYPE_DEAUTHEN 0x0c
-#define WLAN_FSTYPE_ACTION 0x0d
-
-/* Control */
-#define WLAN_FSTYPE_PSPOLL 0x0a
-#define WLAN_FSTYPE_RTS 0x0b
-#define WLAN_FSTYPE_CTS 0x0c
-#define WLAN_FSTYPE_ACK 0x0d
-#define WLAN_FSTYPE_CFEND 0x0e
-#define WLAN_FSTYPE_CFENDCFACK 0x0f
-
-/* Data */
-#define WLAN_FSTYPE_DATAONLY 0x00
-#define WLAN_FSTYPE_DATA_CFACK 0x01
-#define WLAN_FSTYPE_DATA_CFPOLL 0x02
-#define WLAN_FSTYPE_DATA_CFACK_CFPOLL 0x03
-#define WLAN_FSTYPE_NULL 0x04
-#define WLAN_FSTYPE_CFACK 0x05
-#define WLAN_FSTYPE_CFPOLL 0x06
-#define WLAN_FSTYPE_CFACK_CFPOLL 0x07
-
-#ifdef __BIG_ENDIAN
-
-/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) (((unsigned short)(n) >> 8) & (BIT0 | BIT1))
-#define WLAN_GET_FC_FTYPE(n) ((((unsigned short)(n) >> 8) & (BIT2 | BIT3)) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((unsigned short)(n) >> 8) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
-#define WLAN_GET_FC_TODS(n) ((((unsigned short)(n) << 8) & (BIT8)) >> 8)
-#define WLAN_GET_FC_FROMDS(n) ((((unsigned short)(n) << 8) & (BIT9)) >> 9)
-#define WLAN_GET_FC_MOREFRAG(n) ((((unsigned short)(n) << 8) & (BIT10)) >> 10)
-#define WLAN_GET_FC_RETRY(n) ((((unsigned short)(n) << 8) & (BIT11)) >> 11)
-#define WLAN_GET_FC_PWRMGT(n) ((((unsigned short)(n) << 8) & (BIT12)) >> 12)
-#define WLAN_GET_FC_MOREDATA(n) ((((unsigned short)(n) << 8) & (BIT13)) >> 13)
-#define WLAN_GET_FC_ISWEP(n) ((((unsigned short)(n) << 8) & (BIT14)) >> 14)
-#define WLAN_GET_FC_ORDER(n) ((((unsigned short)(n) << 8) & (BIT15)) >> 15)
-
-/* Sequence Field bit */
-#define WLAN_GET_SEQ_FRGNUM(n) (((unsigned short)(n) >> 8) & (BIT0|BIT1|BIT2|BIT3))
-#define WLAN_GET_SEQ_SEQNUM(n) ((((unsigned short)(n) >> 8) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-
-/* Capability Field bit */
-#define WLAN_GET_CAP_INFO_ESS(n) (((n) >> 8) & BIT0)
-#define WLAN_GET_CAP_INFO_IBSS(n) ((((n) >> 8) & BIT1) >> 1)
-#define WLAN_GET_CAP_INFO_CFPOLLABLE(n) ((((n) >> 8) & BIT2) >> 2)
-#define WLAN_GET_CAP_INFO_CFPOLLREQ(n) ((((n) >> 8) & BIT3) >> 3)
-#define WLAN_GET_CAP_INFO_PRIVACY(n) ((((n) >> 8) & BIT4) >> 4)
-#define WLAN_GET_CAP_INFO_SHORTPREAMBLE(n) ((((n) >> 8) & BIT5) >> 5)
-#define WLAN_GET_CAP_INFO_PBCC(n) ((((n) >> 8) & BIT6) >> 6)
-#define WLAN_GET_CAP_INFO_AGILITY(n) ((((n) >> 8) & BIT7) >> 7)
-#define WLAN_GET_CAP_INFO_SPECTRUMMNG(n) ((((n)) & BIT8) >> 10)
-#define WLAN_GET_CAP_INFO_SHORTSLOTTIME(n) ((((n)) & BIT10) >> 10)
-#define WLAN_GET_CAP_INFO_DSSSOFDM(n) ((((n)) & BIT13) >> 13)
-#define WLAN_GET_CAP_INFO_GRPACK(n) ((((n)) & BIT14) >> 14)
-
-#else
-
-/* GET & SET Frame Control bit */
-#define WLAN_GET_FC_PRVER(n) (((unsigned short)(n)) & (BIT0 | BIT1))
-#define WLAN_GET_FC_FTYPE(n) ((((unsigned short)(n)) & (BIT2 | BIT3)) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((unsigned short)(n)) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
-#define WLAN_GET_FC_TODS(n) ((((unsigned short)(n)) & (BIT8)) >> 8)
-#define WLAN_GET_FC_FROMDS(n) ((((unsigned short)(n)) & (BIT9)) >> 9)
-#define WLAN_GET_FC_MOREFRAG(n) ((((unsigned short)(n)) & (BIT10)) >> 10)
-#define WLAN_GET_FC_RETRY(n) ((((unsigned short)(n)) & (BIT11)) >> 11)
-#define WLAN_GET_FC_PWRMGT(n) ((((unsigned short)(n)) & (BIT12)) >> 12)
-#define WLAN_GET_FC_MOREDATA(n) ((((unsigned short)(n)) & (BIT13)) >> 13)
-#define WLAN_GET_FC_ISWEP(n) ((((unsigned short)(n)) & (BIT14)) >> 14)
-#define WLAN_GET_FC_ORDER(n) ((((unsigned short)(n)) & (BIT15)) >> 15)
-
-/* Sequence Field bit */
-#define WLAN_GET_SEQ_FRGNUM(n) (((unsigned short)(n)) & (BIT0|BIT1|BIT2|BIT3))
-#define WLAN_GET_SEQ_SEQNUM(n) ((((unsigned short)(n)) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-
-/* Capability Field bit */
-#define WLAN_GET_CAP_INFO_ESS(n) ((n) & BIT0)
-#define WLAN_GET_CAP_INFO_IBSS(n) (((n) & BIT1) >> 1)
-#define WLAN_GET_CAP_INFO_CFPOLLABLE(n) (((n) & BIT2) >> 2)
-#define WLAN_GET_CAP_INFO_CFPOLLREQ(n) (((n) & BIT3) >> 3)
-#define WLAN_GET_CAP_INFO_PRIVACY(n) (((n) & BIT4) >> 4)
-#define WLAN_GET_CAP_INFO_SHORTPREAMBLE(n) (((n) & BIT5) >> 5)
-#define WLAN_GET_CAP_INFO_PBCC(n) (((n) & BIT6) >> 6)
-#define WLAN_GET_CAP_INFO_AGILITY(n) (((n) & BIT7) >> 7)
-#define WLAN_GET_CAP_INFO_SPECTRUMMNG(n) (((n) & BIT8) >> 10)
-#define WLAN_GET_CAP_INFO_SHORTSLOTTIME(n) (((n) & BIT10) >> 10)
-#define WLAN_GET_CAP_INFO_DSSSOFDM(n) (((n) & BIT13) >> 13)
-#define WLAN_GET_CAP_INFO_GRPACK(n) (((n) & BIT14) >> 14)
-
-#endif /*#ifdef __BIG_ENDIAN */
-
-#define WLAN_SET_CAP_INFO_ESS(n) (n)
-#define WLAN_SET_CAP_INFO_IBSS(n) ((n) << 1)
-#define WLAN_SET_CAP_INFO_CFPOLLABLE(n) ((n) << 2)
-#define WLAN_SET_CAP_INFO_CFPOLLREQ(n) ((n) << 3)
-#define WLAN_SET_CAP_INFO_PRIVACY(n) ((n) << 4)
-#define WLAN_SET_CAP_INFO_SHORTPREAMBLE(n) ((n) << 5)
-#define WLAN_SET_CAP_INFO_SPECTRUMMNG(n) ((n) << 8)
-#define WLAN_SET_CAP_INFO_PBCC(n) ((n) << 6)
-#define WLAN_SET_CAP_INFO_AGILITY(n) ((n) << 7)
-#define WLAN_SET_CAP_INFO_SHORTSLOTTIME(n) ((n) << 10)
-#define WLAN_SET_CAP_INFO_DSSSOFDM(n) ((n) << 13)
-#define WLAN_SET_CAP_INFO_GRPACK(n) ((n) << 14)
-
-#define WLAN_SET_FC_PRVER(n) ((unsigned short)(n))
-#define WLAN_SET_FC_FTYPE(n) (((unsigned short)(n)) << 2)
-#define WLAN_SET_FC_FSTYPE(n) (((unsigned short)(n)) << 4)
-#define WLAN_SET_FC_TODS(n) (((unsigned short)(n)) << 8)
-#define WLAN_SET_FC_FROMDS(n) (((unsigned short)(n)) << 9)
-#define WLAN_SET_FC_MOREFRAG(n) (((unsigned short)(n)) << 10)
-#define WLAN_SET_FC_RETRY(n) (((unsigned short)(n)) << 11)
-#define WLAN_SET_FC_PWRMGT(n) (((unsigned short)(n)) << 12)
-#define WLAN_SET_FC_MOREDATA(n) (((unsigned short)(n)) << 13)
-#define WLAN_SET_FC_ISWEP(n) (((unsigned short)(n)) << 14)
-#define WLAN_SET_FC_ORDER(n) (((unsigned short)(n)) << 15)
-
-#define WLAN_SET_SEQ_FRGNUM(n) ((unsigned short)(n))
-#define WLAN_SET_SEQ_SEQNUM(n) (((unsigned short)(n)) << 4)
-
-/* ERP Field bit */
-
-#define WLAN_GET_ERP_NONERP_PRESENT(n) ((n) & BIT0)
-#define WLAN_GET_ERP_USE_PROTECTION(n) (((n) & BIT1) >> 1)
-#define WLAN_GET_ERP_BARKER_MODE(n) (((n) & BIT2) >> 2)
-
-#define WLAN_SET_ERP_NONERP_PRESENT(n) (n)
-#define WLAN_SET_ERP_USE_PROTECTION(n) ((n) << 1)
-#define WLAN_SET_ERP_BARKER_MODE(n) ((n) << 2)
-
-/* Support & Basic Rates field */
-#define WLAN_MGMT_IS_BASICRATE(b) ((b) & BIT7)
-#define WLAN_MGMT_GET_RATE(b) ((b) & ~BIT7)
-
-/* TIM field */
-#define WLAN_MGMT_IS_MULTICAST_TIM(b) ((b) & BIT0)
-#define WLAN_MGMT_GET_TIM_OFFSET(b) (((b) & ~BIT0) >> 1)
-
-/* 3-Addr & 4-Addr */
-#define WLAN_HDR_A3_DATA_PTR(p) (((unsigned char *)(p)) + WLAN_HDR_ADDR3_LEN)
-#define WLAN_HDR_A4_DATA_PTR(p) (((unsigned char *)(p)) + WLAN_HDR_ADDR4_LEN)
-
-/* IEEE ADDR */
-#define IEEE_ADDR_UNIVERSAL 0x02
-#define IEEE_ADDR_GROUP 0x01
-
-typedef struct {
- unsigned char abyAddr[6];
-} IEEE_ADDR, *PIEEE_ADDR;
-
-/* 802.11 Header Format */
-
-typedef struct tagWLAN_80211HDR_A2 {
- unsigned short wFrameCtl;
- unsigned short wDurationID;
- unsigned char abyAddr1[WLAN_ADDR_LEN];
- unsigned char abyAddr2[WLAN_ADDR_LEN];
-} __attribute__ ((__packed__))
-WLAN_80211HDR_A2, *PWLAN_80211HDR_A2;
-
-typedef struct tagWLAN_80211HDR_A3 {
- unsigned short wFrameCtl;
- unsigned short wDurationID;
- unsigned char abyAddr1[WLAN_ADDR_LEN];
- unsigned char abyAddr2[WLAN_ADDR_LEN];
- unsigned char abyAddr3[WLAN_ADDR_LEN];
- unsigned short wSeqCtl;
-} __attribute__ ((__packed__))
-WLAN_80211HDR_A3, *PWLAN_80211HDR_A3;
-
-typedef struct tagWLAN_80211HDR_A4 {
- unsigned short wFrameCtl;
- unsigned short wDurationID;
- unsigned char abyAddr1[WLAN_ADDR_LEN];
- unsigned char abyAddr2[WLAN_ADDR_LEN];
- unsigned char abyAddr3[WLAN_ADDR_LEN];
- unsigned short wSeqCtl;
- unsigned char abyAddr4[WLAN_ADDR_LEN];
-} __attribute__ ((__packed__))
-WLAN_80211HDR_A4, *PWLAN_80211HDR_A4;
-
-typedef union tagUWLAN_80211HDR {
- WLAN_80211HDR_A2 sA2;
- WLAN_80211HDR_A3 sA3;
- WLAN_80211HDR_A4 sA4;
-} UWLAN_80211HDR, *PUWLAN_80211HDR;
-
-#endif /* __80211HDR_H__ */
diff --git a/drivers/staging/vt6655/80211mgr.c b/drivers/staging/vt6655/80211mgr.c
deleted file mode 100644
index 7d2c6472ec9a..000000000000
--- a/drivers/staging/vt6655/80211mgr.c
+++ /dev/null
@@ -1,1019 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: 80211mgr.c
- *
- * Purpose: Handles the 802.11 management support functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- * Functions:
- * vMgrEncodeBeacon - Encode the Beacon frame
- * vMgrDecodeBeacon - Decode the Beacon frame
- * vMgrEncodeIBSSATIM - Encode the IBSS ATIM frame
- * vMgrDecodeIBSSATIM - Decode the IBSS ATIM frame
- * vMgrEncodeDisassociation - Encode the Disassociation frame
- * vMgrDecodeDisassociation - Decode the Disassociation frame
- * vMgrEncodeAssocRequest - Encode the Association request frame
- * vMgrDecodeAssocRequest - Decode the Association request frame
- * vMgrEncodeAssocResponse - Encode the Association response frame
- * vMgrDecodeAssocResponse - Decode the Association response frame
- * vMgrEncodeReAssocRequest - Encode the ReAssociation request frame
- * vMgrDecodeReAssocRequest - Decode the ReAssociation request frame
- * vMgrEncodeProbeRequest - Encode the Probe request frame
- * vMgrDecodeProbeRequest - Decode the Probe request frame
- * vMgrEncodeProbeResponse - Encode the Probe response frame
- * vMgrDecodeProbeResponse - Decode the Probe response frame
- * vMgrEncodeAuthen - Encode the Authentication frame
- * vMgrDecodeAuthen - Decode the Authentication frame
- * vMgrEncodeDeauthen - Encode the DeAuthentication frame
- * vMgrDecodeDeauthen - Decode the DeAuthentication frame
- * vMgrEncodeReassocResponse - Encode the Reassociation response frame
- * vMgrDecodeReassocResponse - Decode the Reassociation response frame
- *
- * Revision History:
- *
- */
-
-#include "tmacro.h"
-#include "tether.h"
-#include "80211mgr.h"
-#include "80211hdr.h"
-#include "device.h"
-#include "wpa.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Routine Description:
- * Encode Beacon frame body offset
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeBeacon(
- PWLAN_FR_BEACON pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pqwTimestamp = (__le64 *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_CAPINFO);
-
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_BEACON_OFF_SSID;
-}
-
-/*+
- *
- * Routine Description:
- * Decode Beacon frame body offset
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeBeacon(
- PWLAN_FR_BEACON pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pqwTimestamp = (__le64 *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_BEACON_OFF_CAPINFO);
-
- /* Information elements */
- pItem = (PWLAN_IE)((unsigned char *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))) +
- WLAN_BEACON_OFF_SSID);
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- break;
- case WLAN_EID_FH_PARMS:
- /* pFrame->pFHParms = (PWLAN_IE_FH_PARMS)pItem; */
- break;
- case WLAN_EID_DS_PARMS:
- if (pFrame->pDSParms == NULL)
- pFrame->pDSParms = (PWLAN_IE_DS_PARMS)pItem;
- break;
- case WLAN_EID_CF_PARMS:
- if (pFrame->pCFParms == NULL)
- pFrame->pCFParms = (PWLAN_IE_CF_PARMS)pItem;
- break;
- case WLAN_EID_IBSS_PARMS:
- if (pFrame->pIBSSParms == NULL)
- pFrame->pIBSSParms = (PWLAN_IE_IBSS_PARMS)pItem;
- break;
- case WLAN_EID_TIM:
- if (pFrame->pTIM == NULL)
- pFrame->pTIM = (PWLAN_IE_TIM)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL)
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA =
- (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
-
- case WLAN_EID_ERP:
- if (pFrame->pERP == NULL)
- pFrame->pERP = (PWLAN_IE_ERP)pItem;
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_COUNTRY: /* 7 */
- if (pFrame->pIE_Country == NULL)
- pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
- break;
-
- case WLAN_EID_PWR_CONSTRAINT: /* 32 */
- if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint =
- (PWLAN_IE_PW_CONST)pItem;
- break;
-
- case WLAN_EID_CH_SWITCH: /* 37 */
- if (pFrame->pIE_CHSW == NULL)
- pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
- break;
-
- case WLAN_EID_QUIET: /* 40 */
- if (pFrame->pIE_Quiet == NULL)
- pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
- break;
-
- case WLAN_EID_IBSS_DFS:
- if (pFrame->pIE_IBSSDFS == NULL)
- pFrame->pIE_IBSSDFS = (PWLAN_IE_IBSS_DFS)pItem;
- break;
-
- default:
- pr_debug("Unrecognized EID=%dd in beacon decode\n",
- pItem->byElementID);
- break;
-
- }
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode IBSS ATIM
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeIBSSATIM(
- PWLAN_FR_IBSSATIM pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- pFrame->len = WLAN_HDR_ADDR3_LEN;
-}
-
-/*+
- *
- * Routine Description:
- * Decode IBSS ATIM
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeIBSSATIM(
- PWLAN_FR_IBSSATIM pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-}
-
-/*+
- *
- * Routine Description:
- * Encode Disassociation
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeDisassociation(
- PWLAN_FR_DISASSOC pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwReason = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_DISASSOC_OFF_REASON);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON +
- sizeof(*(pFrame->pwReason));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Disassociation
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeDisassociation(
- PWLAN_FR_DISASSOC pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwReason = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_DISASSOC_OFF_REASON);
-}
-
-/*+
- *
- * Routine Description:
- * Encode Association Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeAssocRequest(
- PWLAN_FR_ASSOCREQ pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCREQ_OFF_LISTEN_INT);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT +
- sizeof(*(pFrame->pwListenInterval));
-}
-
-/*+
- *
- * Routine Description: (AP)
- * Decode Association Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeAssocRequest(
- PWLAN_FR_ASSOCREQ pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCREQ_OFF_LISTEN_INT);
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_ASSOCREQ_OFF_SSID);
-
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL)
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA =
- (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- default:
- pr_debug("Unrecognized EID=%dd in assocreq decode\n",
- pItem->byElementID);
- break;
- }
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description: (AP)
- * Encode Association Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeAssocResponse(
- PWLAN_FR_ASSOCRESP pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_AID);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID +
- sizeof(*(pFrame->pwAid));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Association Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeAssocResponse(
- PWLAN_FR_ASSOCRESP pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_AID);
-
- /* Information elements */
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_ASSOCRESP_OFF_SUPP_RATES);
-
- pItem = (PWLAN_IE)(pFrame->pSuppRates);
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
-
- if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) &&
- (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- pr_debug("pFrame->pExtSuppRates=[%p]\n", pItem);
- } else {
- pFrame->pExtSuppRates = NULL;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode Reassociation Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeReassocRequest(
- PWLAN_FR_REASSOCREQ pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_LISTEN_INT);
- pFrame->pAddrCurrAP = (PIEEE_ADDR)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_CURR_AP);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP +
- sizeof(*(pFrame->pAddrCurrAP));
-}
-
-/*+
- *
- * Routine Description: (AP)
- * Decode Reassociation Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeReassocRequest(
- PWLAN_FR_REASSOCREQ pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_CAP_INFO);
- pFrame->pwListenInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_LISTEN_INT);
- pFrame->pAddrCurrAP = (PIEEE_ADDR)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCREQ_OFF_CURR_AP);
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_REASSOCREQ_OFF_SSID);
-
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL)
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA =
- (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
-
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
- default:
- pr_debug("Unrecognized EID=%dd in reassocreq decode\n",
- pItem->byElementID);
- break;
- }
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode Probe Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeProbeRequest(
- PWLAN_FR_PROBEREQ pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- pFrame->len = WLAN_HDR_ADDR3_LEN;
-}
-
-/*+
- *
- * Routine Description:
- * Decode Probe Request
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeProbeRequest(
- PWLAN_FR_PROBEREQ pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)));
-
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
-
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- default:
- pr_debug("Bad EID=%dd in probereq\n",
- pItem->byElementID);
- break;
- }
-
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode Probe Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeProbeResponse(
- PWLAN_FR_PROBERESP pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pqwTimestamp = (__le64 *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_CAP_INFO);
-
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_PROBERESP_OFF_CAP_INFO +
- sizeof(*(pFrame->pwCapInfo));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Probe Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeProbeResponse(
- PWLAN_FR_PROBERESP pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pqwTimestamp = (__le64 *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_TS);
- pFrame->pwBeaconInterval = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_BCN_INT);
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_PROBERESP_OFF_CAP_INFO);
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_PROBERESP_OFF_SSID);
-
- while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
- switch (pItem->byElementID) {
- case WLAN_EID_SSID:
- if (pFrame->pSSID == NULL)
- pFrame->pSSID = (PWLAN_IE_SSID)pItem;
- break;
- case WLAN_EID_SUPP_RATES:
- if (pFrame->pSuppRates == NULL)
- pFrame->pSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
- case WLAN_EID_FH_PARMS:
- break;
- case WLAN_EID_DS_PARMS:
- if (pFrame->pDSParms == NULL)
- pFrame->pDSParms = (PWLAN_IE_DS_PARMS)pItem;
- break;
- case WLAN_EID_CF_PARMS:
- if (pFrame->pCFParms == NULL)
- pFrame->pCFParms = (PWLAN_IE_CF_PARMS)pItem;
- break;
- case WLAN_EID_IBSS_PARMS:
- if (pFrame->pIBSSParms == NULL)
- pFrame->pIBSSParms =
- (PWLAN_IE_IBSS_PARMS)pItem;
- break;
-
- case WLAN_EID_RSN:
- if (pFrame->pRSN == NULL)
- pFrame->pRSN = (PWLAN_IE_RSN)pItem;
- break;
- case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
- if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == true)
- pFrame->pRSNWPA =
- (PWLAN_IE_RSN_EXT)pItem;
- }
- break;
- case WLAN_EID_ERP:
- if (pFrame->pERP == NULL)
- pFrame->pERP = (PWLAN_IE_ERP)pItem;
- break;
- case WLAN_EID_EXTSUPP_RATES:
- if (pFrame->pExtSuppRates == NULL)
- pFrame->pExtSuppRates =
- (PWLAN_IE_SUPP_RATES)pItem;
- break;
-
- case WLAN_EID_COUNTRY: /* 7 */
- if (pFrame->pIE_Country == NULL)
- pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
- break;
-
- case WLAN_EID_PWR_CONSTRAINT: /* 32 */
- if (pFrame->pIE_PowerConstraint == NULL)
- pFrame->pIE_PowerConstraint =
- (PWLAN_IE_PW_CONST)pItem;
- break;
-
- case WLAN_EID_CH_SWITCH: /* 37 */
- if (pFrame->pIE_CHSW == NULL)
- pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
- break;
-
- case WLAN_EID_QUIET: /* 40 */
- if (pFrame->pIE_Quiet == NULL)
- pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
- break;
-
- case WLAN_EID_IBSS_DFS:
- if (pFrame->pIE_IBSSDFS == NULL)
- pFrame->pIE_IBSSDFS = (PWLAN_IE_IBSS_DFS)pItem;
- break;
-
- default:
- pr_debug("Bad EID=%dd in proberesp\n",
- pItem->byElementID);
- break;
- }
-
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Encode Authentication frame
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeAuthen(
- PWLAN_FR_AUTHEN pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwAuthAlgorithm = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_STATUS);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS +
- sizeof(*(pFrame->pwStatus));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Authentication
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeAuthen(
- PWLAN_FR_AUTHEN pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwAuthAlgorithm = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_AUTH_ALG);
- pFrame->pwAuthSequence = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_AUTH_SEQ);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_AUTHEN_OFF_STATUS);
-
- /* Information elements */
- pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
- + WLAN_AUTHEN_OFF_CHALLENGE);
-
- if (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len) &&
- pItem->byElementID == WLAN_EID_CHALLENGE)
- pFrame->pChallenge = (PWLAN_IE_CHALLENGE)pItem;
-}
-
-/*+
- *
- * Routine Description:
- * Encode Authentication
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeDeauthen(
- PWLAN_FR_DEAUTHEN pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwReason = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_DEAUTHEN_OFF_REASON);
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON +
- sizeof(*(pFrame->pwReason));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Deauthentication
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeDeauthen(
- PWLAN_FR_DEAUTHEN pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwReason = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_DEAUTHEN_OFF_REASON);
-}
-
-/*+
- *
- * Routine Description: (AP)
- * Encode Reassociation Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrEncodeReassocResponse(
- PWLAN_FR_REASSOCRESP pFrame
-)
-{
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_AID);
-
- pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID +
- sizeof(*(pFrame->pwAid));
-}
-
-/*+
- *
- * Routine Description:
- * Decode Reassociation Response
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDecodeReassocResponse(
- PWLAN_FR_REASSOCRESP pFrame
-)
-{
- PWLAN_IE pItem;
-
- pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- /* Fixed Fields */
- pFrame->pwCapInfo = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_CAP_INFO);
- pFrame->pwStatus = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_STATUS);
- pFrame->pwAid = (unsigned short *)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_AID);
-
- /* Information elements */
- pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)
- (WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)) +
- WLAN_REASSOCRESP_OFF_SUPP_RATES);
-
- pItem = (PWLAN_IE)(pFrame->pSuppRates);
- pItem = (PWLAN_IE)(((unsigned char *)pItem) + 2 + pItem->len);
-
- if ((((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) &&
- (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
- pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- }
-}
diff --git a/drivers/staging/vt6655/80211mgr.h b/drivers/staging/vt6655/80211mgr.h
deleted file mode 100644
index d462a8af087b..000000000000
--- a/drivers/staging/vt6655/80211mgr.h
+++ /dev/null
@@ -1,725 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: 80211mgr.h
- *
- * Purpose: 802.11 management frames pre-defines.
- *
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- */
-
-#ifndef __80211MGR_H__
-#define __80211MGR_H__
-
-#include <linux/types.h>
-#include "linux/ieee80211.h"
-
-#include "ttype.h"
-#include "80211hdr.h"
-
-#define WLAN_MIN_ARRAY 1
-
-/* Information Element ID value */
-#define WLAN_EID_FH_PARMS 2
-#define WLAN_EID_DS_PARMS 3
-#define WLAN_EID_CF_PARMS 4
-#define WLAN_EID_IBSS_PARMS 6
-#define WLAN_EID_TPC_REQ 34
-#define WLAN_EID_TPC_REP 35
-#define WLAN_EID_SUPP_CH 36
-#define WLAN_EID_CH_SWITCH 37
-#define WLAN_EID_MEASURE_REQ 38
-#define WLAN_EID_MEASURE_REP 39
-#define WLAN_EID_QUIET 40
-#define WLAN_EID_IBSS_DFS 41
-#define WLAN_EID_ERP 42
-/* reference 802.11i 7.3.2 table 20 */
-#define WLAN_EID_EXTSUPP_RATES 50
-/* reference WiFi WPA spec. */
-#define WLAN_EID_RSN_WPA 221
-
-#define WLAN_EID_ERP_NONERP_PRESENT 0x01
-#define WLAN_EID_ERP_USE_PROTECTION 0x02
-#define WLAN_EID_ERP_BARKER_MODE 0x04
-
-/* Reason Codes */
-#define WLAN_MGMT_REASON_RSVD 0
-#define WLAN_MGMT_REASON_UNSPEC 1
-#define WLAN_MGMT_REASON_PRIOR_AUTH_INVALID 2
-#define WLAN_MGMT_REASON_DEAUTH_LEAVING 3
-#define WLAN_MGMT_REASON_DISASSOC_INACTIVE 4
-#define WLAN_MGMT_REASON_DISASSOC_AP_BUSY 5
-#define WLAN_MGMT_REASON_CLASS2_NONAUTH 6
-#define WLAN_MGMT_REASON_CLASS3_NONASSOC 7
-#define WLAN_MGMT_REASON_DISASSOC_STA_HASLEFT 8
-#define WLAN_MGMT_REASON_CANT_ASSOC_NONAUTH 9
-#define WLAN_MGMT_REASON_DISASSOC_PWR_CAP_UNACCEPT 10
-#define WLAN_MGMT_REASON_DISASSOC_SUPP_CH_UNACCEPT 11
-#define WLAN_MGMT_REASON_INVALID_IE 13
-#define WLAN_MGMT_REASON_MIC_FAILURE 14
-#define WLAN_MGMT_REASON_4WAY_HANDSHAKE_TIMEOUT 15
-#define WLAN_MGMT_REASON_GRPKEY_UPDATE_TIMEOUT 16
-#define WLAN_MGMT_REASON_4WAY_INFO_DIFFERENT 17
-#define WLAN_MGMT_REASON_MULTCAST_CIPHER_INVALID 18
-#define WLAN_MGMT_REASON_UNCAST_CIPHER_INVALID 19
-#define WLAN_MGMT_REASON_AKMP_INVALID 20
-#define WLAN_MGMT_REASON_RSNE_UNSUPPORTED 21
-#define WLAN_MGMT_REASON_RSNE_CAP_INVALID 22
-#define WLAN_MGMT_REASON_80211X_AUTH_FAILED 23
-
-/* Status Codes */
-#define WLAN_MGMT_STATUS_SUCCESS 0
-#define WLAN_MGMT_STATUS_UNSPEC_FAILURE 1
-#define WLAN_MGMT_STATUS_CAPS_UNSUPPORTED 10
-#define WLAN_MGMT_STATUS_REASSOC_NO_ASSOC 11
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_UNSPEC 12
-#define WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG 13
-#define WLAN_MGMT_STATUS_RX_AUTH_NOSEQ 14
-#define WLAN_MGMT_STATUS_CHALLENGE_FAIL 15
-#define WLAN_MGMT_STATUS_AUTH_TIMEOUT 16
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_BUSY 17
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_RATES 18
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_SHORTPREAMBLE 19
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_PBCC 20
-#define WLAN_MGMT_STATUS_ASSOC_DENIED_AGILITY 21
-
-/* reference 802.11h 7.3.1.9 */
-#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_SPECTRUM_MNG 22
-#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_PWR_CAP 23
-#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_SUPP_CH 24
-/* reference 802.11g 7.3.1.9 */
-#define WLAN_MGMT_STATUS_SHORTSLOTTIME_UNSUPPORTED 25
-#define WLAN_MGMT_STATUS_DSSSOFDM_UNSUPPORTED 26
-/* reference 802.11i 3.7.1.9 table 19 */
-#define WLAN_MGMT_STATUS_INVALID_IE 40
-#define WLAN_MGMT_STATUS_GROUP_CIPHER_INVALID 41
-#define WLAN_MGMT_STATUS_PAIRWISE_CIPHER_INVALID 42
-#define WLAN_MGMT_STATUS_AKMP_INVALID 43
-#define WLAN_MGMT_STATUS_UNSUPPORT_RSN_IE_VER 44
-#define WLAN_MGMT_STATUS_INVALID_RSN_IE_CAP 45
-#define WLAN_MGMT_STATUS_CIPHER_REJECT 46
-
-/* Auth Algorithm */
-#define WLAN_AUTH_ALG_OPENSYSTEM 0
-#define WLAN_AUTH_ALG_SHAREDKEY 1
-
-/* Management Frame Field Offsets */
-/* Note: Not all fields are listed because of variable lengths. */
-/* Note: These offsets are from the start of the frame data */
-
-#define WLAN_BEACON_OFF_TS 0
-#define WLAN_BEACON_OFF_BCN_INT 8
-#define WLAN_BEACON_OFF_CAPINFO 10
-#define WLAN_BEACON_OFF_SSID 12
-
-#define WLAN_DISASSOC_OFF_REASON 0
-
-#define WLAN_ASSOCREQ_OFF_CAP_INFO 0
-#define WLAN_ASSOCREQ_OFF_LISTEN_INT 2
-#define WLAN_ASSOCREQ_OFF_SSID 4
-
-#define WLAN_ASSOCRESP_OFF_CAP_INFO 0
-#define WLAN_ASSOCRESP_OFF_STATUS 2
-#define WLAN_ASSOCRESP_OFF_AID 4
-#define WLAN_ASSOCRESP_OFF_SUPP_RATES 6
-
-#define WLAN_REASSOCREQ_OFF_CAP_INFO 0
-#define WLAN_REASSOCREQ_OFF_LISTEN_INT 2
-#define WLAN_REASSOCREQ_OFF_CURR_AP 4
-#define WLAN_REASSOCREQ_OFF_SSID 10
-
-#define WLAN_REASSOCRESP_OFF_CAP_INFO 0
-#define WLAN_REASSOCRESP_OFF_STATUS 2
-#define WLAN_REASSOCRESP_OFF_AID 4
-#define WLAN_REASSOCRESP_OFF_SUPP_RATES 6
-
-#define WLAN_PROBEREQ_OFF_SSID 0
-
-#define WLAN_PROBERESP_OFF_TS 0
-#define WLAN_PROBERESP_OFF_BCN_INT 8
-#define WLAN_PROBERESP_OFF_CAP_INFO 10
-#define WLAN_PROBERESP_OFF_SSID 12
-
-#define WLAN_AUTHEN_OFF_AUTH_ALG 0
-#define WLAN_AUTHEN_OFF_AUTH_SEQ 2
-#define WLAN_AUTHEN_OFF_STATUS 4
-#define WLAN_AUTHEN_OFF_CHALLENGE 6
-
-#define WLAN_DEAUTHEN_OFF_REASON 0
-
-/* Cipher Suite Selectors defined in 802.11i */
-#define WLAN_11i_CSS_USE_GROUP 0
-#define WLAN_11i_CSS_WEP40 1
-#define WLAN_11i_CSS_TKIP 2
-#define WLAN_11i_CSS_CCMP 4
-#define WLAN_11i_CSS_WEP104 5
-#define WLAN_11i_CSS_UNKNOWN 255
-
-/* Authentication and Key Management Suite Selectors defined in 802.11i */
-#define WLAN_11i_AKMSS_802_1X 1
-#define WLAN_11i_AKMSS_PSK 2
-#define WLAN_11i_AKMSS_UNKNOWN 255
-
-/* Measurement type definitions reference ieee 802.11h Table 20b */
-#define MEASURE_TYPE_BASIC 0
-#define MEASURE_TYPE_CCA 1
-#define MEASURE_TYPE_RPI 2
-
-/* Measurement request mode definitions reference ieee 802.11h Figure 46h */
-#define MEASURE_MODE_ENABLE 0x02
-#define MEASURE_MODE_REQ 0x04
-#define MEASURE_MODE_REP 0x08
-
-/* Measurement report mode definitions reference ieee 802.11h Figure 46m */
-#define MEASURE_MODE_LATE 0x01
-#define MEASURE_MODE_INCAPABLE 0x02
-#define MEASURE_MODE_REFUSED 0x04
-
-/* Information Element Types */
-
-#pragma pack(1)
-typedef struct tagWLAN_IE {
- unsigned char byElementID;
- unsigned char len;
-} __attribute__ ((__packed__))
-WLAN_IE, *PWLAN_IE;
-
-/* Service Set Identity (SSID) */
-#pragma pack(1)
-typedef struct tagWLAN_IE_SSID {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abySSID[1];
-} __attribute__ ((__packed__))
-WLAN_IE_SSID, *PWLAN_IE_SSID;
-
-/* Supported Rates */
-#pragma pack(1)
-typedef struct tagWLAN_IE_SUPP_RATES {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyRates[1];
-} __attribute__ ((__packed__))
-WLAN_IE_SUPP_RATES, *PWLAN_IE_SUPP_RATES;
-
-/* FH Parameter Set */
-#pragma pack(1)
-typedef struct _WLAN_IE_FH_PARMS {
- unsigned char byElementID;
- unsigned char len;
- unsigned short wDwellTime;
- unsigned char byHopSet;
- unsigned char byHopPattern;
- unsigned char byHopIndex;
-} WLAN_IE_FH_PARMS, *PWLAN_IE_FH_PARMS;
-
-/* DS Parameter Set */
-#pragma pack(1)
-typedef struct tagWLAN_IE_DS_PARMS {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byCurrChannel;
-} __attribute__ ((__packed__))
-WLAN_IE_DS_PARMS, *PWLAN_IE_DS_PARMS;
-
-/* CF Parameter Set */
-#pragma pack(1)
-typedef struct tagWLAN_IE_CF_PARMS {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byCFPCount;
- unsigned char byCFPPeriod;
- unsigned short wCFPMaxDuration;
- unsigned short wCFPDurRemaining;
-} __attribute__ ((__packed__))
-WLAN_IE_CF_PARMS, *PWLAN_IE_CF_PARMS;
-
-/* TIM */
-#pragma pack(1)
-typedef struct tagWLAN_IE_TIM {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byDTIMCount;
- unsigned char byDTIMPeriod;
- unsigned char byBitMapCtl;
- unsigned char byVirtBitMap[1];
-} __attribute__ ((__packed__))
-WLAN_IE_TIM, *PWLAN_IE_TIM;
-
-/* IBSS Parameter Set */
-#pragma pack(1)
-typedef struct tagWLAN_IE_IBSS_PARMS {
- unsigned char byElementID;
- unsigned char len;
- unsigned short wATIMWindow;
-} __attribute__ ((__packed__))
-WLAN_IE_IBSS_PARMS, *PWLAN_IE_IBSS_PARMS;
-
-/* Challenge Text */
-#pragma pack(1)
-typedef struct tagWLAN_IE_CHALLENGE {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyChallenge[1];
-} __attribute__ ((__packed__))
-WLAN_IE_CHALLENGE, *PWLAN_IE_CHALLENGE;
-
-#pragma pack(1)
-typedef struct tagWLAN_IE_RSN_EXT {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyOUI[4];
- unsigned short wVersion;
- unsigned char abyMulticast[4];
- unsigned short wPKCount;
- struct {
- unsigned char abyOUI[4];
- } PKSList[1]; /* the rest is variable so need to */
- /* overlay ieauth structure */
-} WLAN_IE_RSN_EXT, *PWLAN_IE_RSN_EXT;
-
-#pragma pack(1)
-typedef struct tagWLAN_IE_RSN_AUTH {
- unsigned short wAuthCount;
- struct {
- unsigned char abyOUI[4];
- } AuthKSList[1];
-} WLAN_IE_RSN_AUTH, *PWLAN_IE_RSN_AUTH;
-
-/* RSN Identity */
-#pragma pack(1)
-typedef struct tagWLAN_IE_RSN {
- unsigned char byElementID;
- unsigned char len;
- unsigned short wVersion;
- unsigned char abyRSN[WLAN_MIN_ARRAY];
-} WLAN_IE_RSN, *PWLAN_IE_RSN;
-
-/* ERP */
-#pragma pack(1)
-typedef struct tagWLAN_IE_ERP {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byContext;
-} __attribute__ ((__packed__))
-WLAN_IE_ERP, *PWLAN_IE_ERP;
-
-#pragma pack(1)
-typedef struct _MEASEURE_REQ {
- unsigned char byChannel;
- unsigned char abyStartTime[8];
- unsigned char abyDuration[2];
-} MEASEURE_REQ, *PMEASEURE_REQ,
- MEASEURE_REQ_BASIC, *PMEASEURE_REQ_BASIC,
- MEASEURE_REQ_CCA, *PMEASEURE_REQ_CCA,
- MEASEURE_REQ_RPI, *PMEASEURE_REQ_RPI;
-
-typedef struct _MEASEURE_REP_BASIC {
- unsigned char byChannel;
- unsigned char abyStartTime[8];
- unsigned char abyDuration[2];
- unsigned char byMap;
-} MEASEURE_REP_BASIC, *PMEASEURE_REP_BASIC;
-
-typedef struct _MEASEURE_REP_CCA {
- unsigned char byChannel;
- unsigned char abyStartTime[8];
- unsigned char abyDuration[2];
- unsigned char byCCABusyFraction;
-} MEASEURE_REP_CCA, *PMEASEURE_REP_CCA;
-
-typedef struct _MEASEURE_REP_RPI {
- unsigned char byChannel;
- unsigned char abyStartTime[8];
- unsigned char abyDuration[2];
- unsigned char abyRPIdensity[8];
-} MEASEURE_REP_RPI, *PMEASEURE_REP_RPI;
-
-typedef union _MEASEURE_REP {
- MEASEURE_REP_BASIC sBasic;
- MEASEURE_REP_CCA sCCA;
- MEASEURE_REP_RPI sRPI;
-} MEASEURE_REP, *PMEASEURE_REP;
-
-typedef struct _WLAN_IE_MEASURE_REQ {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byToken;
- unsigned char byMode;
- unsigned char byType;
- MEASEURE_REQ sReq;
-} WLAN_IE_MEASURE_REQ, *PWLAN_IE_MEASURE_REQ;
-
-typedef struct _WLAN_IE_MEASURE_REP {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byToken;
- unsigned char byMode;
- unsigned char byType;
- MEASEURE_REP sRep;
-} WLAN_IE_MEASURE_REP, *PWLAN_IE_MEASURE_REP;
-
-typedef struct _WLAN_IE_CH_SW {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byMode;
- unsigned char byChannel;
- unsigned char byCount;
-} WLAN_IE_CH_SW, *PWLAN_IE_CH_SW;
-
-typedef struct _WLAN_IE_QUIET {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byQuietCount;
- unsigned char byQuietPeriod;
- unsigned char abyQuietDuration[2];
- unsigned char abyQuietOffset[2];
-} WLAN_IE_QUIET, *PWLAN_IE_QUIET;
-
-typedef struct _WLAN_IE_COUNTRY {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyCountryString[3];
- unsigned char abyCountryInfo[3];
-} WLAN_IE_COUNTRY, *PWLAN_IE_COUNTRY;
-
-typedef struct _WLAN_IE_PW_CONST {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byPower;
-} WLAN_IE_PW_CONST, *PWLAN_IE_PW_CONST;
-
-typedef struct _WLAN_IE_PW_CAP {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byMinPower;
- unsigned char byMaxPower;
-} WLAN_IE_PW_CAP, *PWLAN_IE_PW_CAP;
-
-typedef struct _WLAN_IE_SUPP_CH {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyChannelTuple[2];
-} WLAN_IE_SUPP_CH, *PWLAN_IE_SUPP_CH;
-
-typedef struct _WLAN_IE_TPC_REQ {
- unsigned char byElementID;
- unsigned char len;
-} WLAN_IE_TPC_REQ, *PWLAN_IE_TPC_REQ;
-
-typedef struct _WLAN_IE_TPC_REP {
- unsigned char byElementID;
- unsigned char len;
- unsigned char byTxPower;
- unsigned char byLinkMargin;
-} WLAN_IE_TPC_REP, *PWLAN_IE_TPC_REP;
-
-typedef struct _WLAN_IE_IBSS_DFS {
- unsigned char byElementID;
- unsigned char len;
- unsigned char abyDFSOwner[6];
- unsigned char byDFSRecovery;
- unsigned char abyChannelMap[2];
-} WLAN_IE_IBSS_DFS, *PWLAN_IE_IBSS_DFS;
-
-#pragma pack()
-
-/* Frame Types */
-/* prototype structure, all mgmt frame types will start with these members */
-typedef struct tagWLAN_FR_MGMT {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
-} WLAN_FR_MGMT, *PWLAN_FR_MGMT;
-
-/* Beacon frame */
-typedef struct tagWLAN_FR_BEACON {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- __le64 *pqwTimestamp;
- unsigned short *pwBeaconInterval;
- unsigned short *pwCapInfo;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_DS_PARMS pDSParms;
- PWLAN_IE_CF_PARMS pCFParms;
- PWLAN_IE_TIM pTIM;
- PWLAN_IE_IBSS_PARMS pIBSSParms;
- PWLAN_IE_RSN pRSN;
- PWLAN_IE_RSN_EXT pRSNWPA;
- PWLAN_IE_ERP pERP;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
- PWLAN_IE_COUNTRY pIE_Country;
- PWLAN_IE_PW_CONST pIE_PowerConstraint;
- PWLAN_IE_CH_SW pIE_CHSW;
- PWLAN_IE_IBSS_DFS pIE_IBSSDFS;
- PWLAN_IE_QUIET pIE_Quiet;
-} WLAN_FR_BEACON, *PWLAN_FR_BEACON;
-
-/* IBSS ATIM frame */
-typedef struct tagWLAN_FR_IBSSATIM {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
-} WLAN_FR_IBSSATIM, *PWLAN_FR_IBSSATIM;
-
-/* Disassociation */
-typedef struct tagWLAN_FR_DISASSOC {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwReason;
-} WLAN_FR_DISASSOC, *PWLAN_FR_DISASSOC;
-
-/* Association Request */
-typedef struct tagWLAN_FR_ASSOCREQ {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwCapInfo;
- unsigned short *pwListenInterval;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_RSN pRSN;
- PWLAN_IE_RSN_EXT pRSNWPA;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
- PWLAN_IE_PW_CAP pCurrPowerCap;
- PWLAN_IE_SUPP_CH pCurrSuppCh;
-} WLAN_FR_ASSOCREQ, *PWLAN_FR_ASSOCREQ;
-
-/* Association Response */
-typedef struct tagWLAN_FR_ASSOCRESP {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwCapInfo;
- unsigned short *pwStatus;
- unsigned short *pwAid;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
-} WLAN_FR_ASSOCRESP, *PWLAN_FR_ASSOCRESP;
-
-/* Reassociation Request */
-typedef struct tagWLAN_FR_REASSOCREQ {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwCapInfo;
- unsigned short *pwListenInterval;
- PIEEE_ADDR pAddrCurrAP;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_RSN pRSN;
- PWLAN_IE_RSN_EXT pRSNWPA;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
-} WLAN_FR_REASSOCREQ, *PWLAN_FR_REASSOCREQ;
-
-/* Reassociation Response */
-typedef struct tagWLAN_FR_REASSOCRESP {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwCapInfo;
- unsigned short *pwStatus;
- unsigned short *pwAid;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
-} WLAN_FR_REASSOCRESP, *PWLAN_FR_REASSOCRESP;
-
-/* Probe Request */
-typedef struct tagWLAN_FR_PROBEREQ {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
-} WLAN_FR_PROBEREQ, *PWLAN_FR_PROBEREQ;
-
-/* Probe Response */
-typedef struct tagWLAN_FR_PROBERESP {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- __le64 *pqwTimestamp;
- unsigned short *pwBeaconInterval;
- unsigned short *pwCapInfo;
- PWLAN_IE_SSID pSSID;
- PWLAN_IE_SUPP_RATES pSuppRates;
- PWLAN_IE_DS_PARMS pDSParms;
- PWLAN_IE_CF_PARMS pCFParms;
- PWLAN_IE_IBSS_PARMS pIBSSParms;
- PWLAN_IE_RSN pRSN;
- PWLAN_IE_RSN_EXT pRSNWPA;
- PWLAN_IE_ERP pERP;
- PWLAN_IE_SUPP_RATES pExtSuppRates;
- PWLAN_IE_COUNTRY pIE_Country;
- PWLAN_IE_PW_CONST pIE_PowerConstraint;
- PWLAN_IE_CH_SW pIE_CHSW;
- PWLAN_IE_IBSS_DFS pIE_IBSSDFS;
- PWLAN_IE_QUIET pIE_Quiet;
-} WLAN_FR_PROBERESP, *PWLAN_FR_PROBERESP;
-
-/* Authentication */
-typedef struct tagWLAN_FR_AUTHEN {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwAuthAlgorithm;
- unsigned short *pwAuthSequence;
- unsigned short *pwStatus;
- PWLAN_IE_CHALLENGE pChallenge;
-} WLAN_FR_AUTHEN, *PWLAN_FR_AUTHEN;
-
-/* Deauthenication */
-typedef struct tagWLAN_FR_DEAUTHEN {
- unsigned int uType;
- unsigned int len;
- unsigned char *pBuf;
- PUWLAN_80211HDR pHdr;
- unsigned short *pwReason;
-} WLAN_FR_DEAUTHEN, *PWLAN_FR_DEAUTHEN;
-
-void
-vMgrEncodeBeacon(
- PWLAN_FR_BEACON pFrame
-);
-
-void
-vMgrDecodeBeacon(
- PWLAN_FR_BEACON pFrame
-);
-
-void
-vMgrEncodeIBSSATIM(
- PWLAN_FR_IBSSATIM pFrame
-);
-
-void
-vMgrDecodeIBSSATIM(
- PWLAN_FR_IBSSATIM pFrame
-);
-
-void
-vMgrEncodeDisassociation(
- PWLAN_FR_DISASSOC pFrame
-);
-
-void
-vMgrDecodeDisassociation(
- PWLAN_FR_DISASSOC pFrame
-);
-
-void
-vMgrEncodeAssocRequest(
- PWLAN_FR_ASSOCREQ pFrame
-);
-
-void
-vMgrDecodeAssocRequest(
- PWLAN_FR_ASSOCREQ pFrame
-);
-
-void
-vMgrEncodeAssocResponse(
- PWLAN_FR_ASSOCRESP pFrame
-);
-
-void
-vMgrDecodeAssocResponse(
- PWLAN_FR_ASSOCRESP pFrame
-);
-
-void
-vMgrEncodeReassocRequest(
- PWLAN_FR_REASSOCREQ pFrame
-);
-
-void
-vMgrDecodeReassocRequest(
- PWLAN_FR_REASSOCREQ pFrame
-);
-
-void
-vMgrEncodeProbeRequest(
- PWLAN_FR_PROBEREQ pFrame
-);
-
-void
-vMgrDecodeProbeRequest(
- PWLAN_FR_PROBEREQ pFrame
-);
-
-void
-vMgrEncodeProbeResponse(
- PWLAN_FR_PROBERESP pFrame
-);
-
-void
-vMgrDecodeProbeResponse(
- PWLAN_FR_PROBERESP pFrame
-);
-
-void
-vMgrEncodeAuthen(
- PWLAN_FR_AUTHEN pFrame
-);
-
-void
-vMgrDecodeAuthen(
- PWLAN_FR_AUTHEN pFrame
-);
-
-void
-vMgrEncodeDeauthen(
- PWLAN_FR_DEAUTHEN pFrame
-);
-
-void
-vMgrDecodeDeauthen(
- PWLAN_FR_DEAUTHEN pFrame
-);
-
-void
-vMgrEncodeReassocResponse(
- PWLAN_FR_REASSOCRESP pFrame
-);
-
-void
-vMgrDecodeReassocResponse(
- PWLAN_FR_REASSOCRESP pFrame
-);
-
-#endif/* __80211MGR_H__ */
diff --git a/drivers/staging/vt6655/IEEE11h.c b/drivers/staging/vt6655/IEEE11h.c
deleted file mode 100644
index 180a27cc74d7..000000000000
--- a/drivers/staging/vt6655/IEEE11h.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 1996, 2005 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: IEEE11h.c
- *
- * Purpose:
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Yiching Chen
- *
- * Date: Mar. 31, 2005
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "tether.h"
-#include "IEEE11h.h"
-#include "device.h"
-#include "wmgr.h"
-#include "rxtx.h"
-#include "channel.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-#pragma pack(1)
-
-typedef struct _WLAN_FRAME_ACTION {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char abyVars[1];
-} WLAN_FRAME_ACTION, *PWLAN_FRAME_ACTION;
-
-typedef struct _WLAN_FRAME_MSRREQ {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_MEASURE_REQ sMSRReqEIDs[1];
-} WLAN_FRAME_MSRREQ, *PWLAN_FRAME_MSRREQ;
-
-typedef struct _WLAN_FRAME_MSRREP {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_MEASURE_REP sMSRRepEIDs[1];
-} WLAN_FRAME_MSRREP, *PWLAN_FRAME_MSRREP;
-
-typedef struct _WLAN_FRAME_TPCREQ {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_TPC_REQ sTPCReqEIDs;
-} WLAN_FRAME_TPCREQ, *PWLAN_FRAME_TPCREQ;
-
-typedef struct _WLAN_FRAME_TPCREP {
- WLAN_80211HDR_A3 Header;
- unsigned char byCategory;
- unsigned char byAction;
- unsigned char byDialogToken;
- WLAN_IE_TPC_REP sTPCRepEIDs;
-} WLAN_FRAME_TPCREP, *PWLAN_FRAME_TPCREP;
-
-#pragma pack()
-
-/* action field reference ieee 802.11h Table 20e */
-#define ACTION_MSRREQ 0
-#define ACTION_MSRREP 1
-#define ACTION_TPCREQ 2
-#define ACTION_TPCREP 3
-#define ACTION_CHSW 4
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-bool IEEE11hbMSRRepTx(void *pMgmtHandle)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
- PWLAN_FRAME_MSRREP pMSRRep = (PWLAN_FRAME_MSRREP)
- (pMgmt->abyCurrentMSRRep + sizeof(STxMgmtPacket));
- size_t uLength = 0;
- PSTxMgmtPacket pTxPacket = NULL;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->abyCurrentMSRRep;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket +
- sizeof(STxMgmtPacket));
-
- pMSRRep->Header.wFrameCtl = (WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION)
-);
-
- memcpy(pMSRRep->Header.abyAddr1, ((PWLAN_FRAME_MSRREQ)
- (pMgmt->abyCurrentMSRReq))->Header.abyAddr2, WLAN_ADDR_LEN);
- memcpy(pMSRRep->Header.abyAddr2,
- CARDpGetCurrentAddress(pMgmt->pAdapter), WLAN_ADDR_LEN);
- memcpy(pMSRRep->Header.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- pMSRRep->byCategory = 0;
- pMSRRep->byAction = 1;
- pMSRRep->byDialogToken = ((PWLAN_FRAME_MSRREQ)
- (pMgmt->abyCurrentMSRReq))->byDialogToken;
-
- uLength = pMgmt->uLengthOfRepEIDs + offsetof(WLAN_FRAME_MSRREP,
- sMSRRepEIDs);
-
- pTxPacket->cbMPDULen = uLength;
- pTxPacket->cbPayloadLen = uLength - WLAN_HDR_ADDR3_LEN;
- if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING)
- return false;
- return true;
-}
diff --git a/drivers/staging/vt6655/IEEE11h.h b/drivers/staging/vt6655/IEEE11h.h
deleted file mode 100644
index 551922022b19..000000000000
--- a/drivers/staging/vt6655/IEEE11h.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1996, 2005 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: IEEE11h.h
- *
- * Purpose: Defines the macros, types, and functions for dealing
- * with IEEE 802.11h.
- *
- * Author: Yiching Chen
- *
- * Date: Mar. 31, 2005
- *
- */
-
-#ifndef __IEEE11h_H__
-#define __IEEE11h_H__
-
-#include "ttype.h"
-#include "80211hdr.h"
-#include "80211mgr.h"
-
-bool IEEE11hbMSRRepTx(
- void *pMgmtHandle
-);
-
-#endif // __IEEE11h_H__
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index c3ba693a8cad..77cfc708c516 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,8 +1,6 @@
config VT6655
tristate "VIA Technologies VT6655 support"
- depends on PCI && WLAN && m
- select WIRELESS_EXT
- select WEXT_PRIV
+ depends on PCI && MAC80211 && m
---help---
This is a vendor-written driver for VIA VT6655.
diff --git a/drivers/staging/vt6655/Makefile b/drivers/staging/vt6655/Makefile
index f7544a6cb63e..115b951bf0d9 100644
--- a/drivers/staging/vt6655/Makefile
+++ b/drivers/staging/vt6655/Makefile
@@ -7,33 +7,12 @@ vt6655_stage-y += device_main.o \
channel.o \
mac.o \
baseband.o \
- wctl.o \
- 80211mgr.o \
- wcmd.o \
- wmgr.o \
- bssdb.o \
rxtx.o \
dpc.o \
power.o \
- datarate.o \
srom.o \
mib.o \
- rc4.o \
- tether.o \
- tcrc.o \
- ioctl.o \
- hostap.o \
- wpa.o \
key.o \
- tkip.o \
- michael.o \
- wroute.o \
- rf.o \
- iwctl.o \
- wpactl.o \
- wpa2.o \
- aes_ccmp.o \
- vntwifi.o \
- IEEE11h.o
+ rf.o
obj-$(CONFIG_VT6655) += vt6655_stage.o
diff --git a/drivers/staging/vt6655/aes_ccmp.c b/drivers/staging/vt6655/aes_ccmp.c
deleted file mode 100644
index 1dfcfcb3c69c..000000000000
--- a/drivers/staging/vt6655/aes_ccmp.c
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: aes_ccmp.c
- *
- * Purpose: AES_CCMP decryption
- *
- * Author: Warren Hsu
- *
- * Date: Feb 15, 2005
- *
- * Functions:
- * AESbGenCCMP - Parsing RX-packet
- *
- *
- * Revision History:
- *
- */
-
-#include "device.h"
-#include "80211hdr.h"
-#include "aes_ccmp.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*
- * SBOX Table
- */
-
-static unsigned char sbox_table[256] = {
- 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
- 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
- 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
- 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
- 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
- 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
- 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
- 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
- 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
- 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
- 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
- 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
- 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
- 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
- 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
- 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-};
-
-static unsigned char dot2_table[256] = {
- 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
- 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
- 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
- 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
- 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
- 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
- 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
- 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
- 0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
- 0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
- 0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
- 0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
- 0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
- 0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
- 0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
- 0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
-};
-
-static unsigned char dot3_table[256] = {
- 0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
- 0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
- 0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
- 0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
- 0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
- 0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
- 0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
- 0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
- 0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
- 0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
- 0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
- 0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
- 0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
- 0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
- 0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
- 0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a
-};
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-static void xor_128(unsigned char *a, unsigned char *b, unsigned char *out)
-{
- unsigned long *dwPtrA = (unsigned long *)a;
- unsigned long *dwPtrB = (unsigned long *)b;
- unsigned long *dwPtrOut = (unsigned long *)out;
-
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
-}
-
-static void xor_32(unsigned char *a, unsigned char *b, unsigned char *out)
-{
- unsigned long *dwPtrA = (unsigned long *)a;
- unsigned long *dwPtrB = (unsigned long *)b;
- unsigned long *dwPtrOut = (unsigned long *)out;
-
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
-}
-
-static void AddRoundKey(unsigned char *key, int round)
-{
- unsigned char sbox_key[4];
- unsigned char rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
-
- sbox_key[0] = sbox_table[key[13]];
- sbox_key[1] = sbox_table[key[14]];
- sbox_key[2] = sbox_table[key[15]];
- sbox_key[3] = sbox_table[key[12]];
-
- key[0] = key[0] ^ rcon_table[round];
- xor_32(&key[0], sbox_key, &key[0]);
-
- xor_32(&key[4], &key[0], &key[4]);
- xor_32(&key[8], &key[4], &key[8]);
- xor_32(&key[12], &key[8], &key[12]);
-}
-
-static void SubBytes(unsigned char *in, unsigned char *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = sbox_table[in[i]];
-}
-
-static void ShiftRows(unsigned char *in, unsigned char *out)
-{
- out[0] = in[0];
- out[1] = in[5];
- out[2] = in[10];
- out[3] = in[15];
- out[4] = in[4];
- out[5] = in[9];
- out[6] = in[14];
- out[7] = in[3];
- out[8] = in[8];
- out[9] = in[13];
- out[10] = in[2];
- out[11] = in[7];
- out[12] = in[12];
- out[13] = in[1];
- out[14] = in[6];
- out[15] = in[11];
-}
-
-static void MixColumns(unsigned char *in, unsigned char *out)
-{
- out[0] = dot2_table[in[0]] ^ dot3_table[in[1]] ^ in[2] ^ in[3];
- out[1] = in[0] ^ dot2_table[in[1]] ^ dot3_table[in[2]] ^ in[3];
- out[2] = in[0] ^ in[1] ^ dot2_table[in[2]] ^ dot3_table[in[3]];
- out[3] = dot3_table[in[0]] ^ in[1] ^ in[2] ^ dot2_table[in[3]];
-}
-
-static void AESv128(unsigned char *key, unsigned char *data, unsigned char *ciphertext)
-{
- int i;
- int round;
- unsigned char TmpdataA[16];
- unsigned char TmpdataB[16];
- unsigned char abyRoundKey[16];
-
- for (i = 0; i < 16; i++)
- abyRoundKey[i] = key[i];
-
- for (round = 0; round < 11; round++) {
- if (round == 0) {
- xor_128(abyRoundKey, data, ciphertext);
- AddRoundKey(abyRoundKey, round);
- } else if (round == 10) {
- SubBytes(ciphertext, TmpdataA);
- ShiftRows(TmpdataA, TmpdataB);
- xor_128(TmpdataB, abyRoundKey, ciphertext);
- } else /* round 1 ~ 9 */{
- SubBytes(ciphertext, TmpdataA);
- ShiftRows(TmpdataA, TmpdataB);
- MixColumns(&TmpdataB[0], &TmpdataA[0]);
- MixColumns(&TmpdataB[4], &TmpdataA[4]);
- MixColumns(&TmpdataB[8], &TmpdataA[8]);
- MixColumns(&TmpdataB[12], &TmpdataA[12]);
- xor_128(TmpdataA, abyRoundKey, ciphertext);
- AddRoundKey(abyRoundKey, round);
- }
- }
-}
-
-/*
- * Description: AES decryption
- *
- * Parameters:
- * In:
- * pbyRxKey - The key used to decrypt
- * pbyFrame - Starting address of packet header
- * wFrameSize - Total packet size including CRC
- * Out:
- * none
- *
- * Return Value: MIC compare result
- *
- */
-bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned short wFrameSize)
-{
- unsigned char abyNonce[13];
- unsigned char MIC_IV[16];
- unsigned char MIC_HDR1[16];
- unsigned char MIC_HDR2[16];
- unsigned char abyMIC[16];
- unsigned char abyCTRPLD[16];
- unsigned char abyTmp[16];
- unsigned char abyPlainText[16];
- unsigned char abyLastCipher[16];
-
- PS802_11Header pMACHeader = (PS802_11Header) pbyFrame;
- unsigned char *pbyIV;
- unsigned char *pbyPayload;
- unsigned short wHLen = 22;
- unsigned short wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;/* 8 is IV, 8 is MIC, 4 is CRC */
- bool bA4 = false;
- unsigned char byTmp;
- unsigned short wCnt;
- int ii, jj, kk;
-
- pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if (WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame)) {
- bA4 = true;
- pbyIV += 6; /* 6 is 802.11 address4 */
- wHLen += 6;
- wPayloadSize -= 6;
- }
- pbyPayload = pbyIV + 8; /* IV-length */
-
- abyNonce[0] = 0x00; /* now is 0, if Qos here will be priority */
- memcpy(&(abyNonce[1]), pMACHeader->abyAddr2, ETH_ALEN);
- abyNonce[7] = pbyIV[7];
- abyNonce[8] = pbyIV[6];
- abyNonce[9] = pbyIV[5];
- abyNonce[10] = pbyIV[4];
- abyNonce[11] = pbyIV[1];
- abyNonce[12] = pbyIV[0];
-
- /* MIC_IV */
- MIC_IV[0] = 0x59;
- memcpy(&(MIC_IV[1]), &(abyNonce[0]), 13);
- MIC_IV[14] = (unsigned char)(wPayloadSize >> 8);
- MIC_IV[15] = (unsigned char)(wPayloadSize & 0xff);
-
- /* MIC_HDR1 */
- MIC_HDR1[0] = (unsigned char)(wHLen >> 8);
- MIC_HDR1[1] = (unsigned char)(wHLen & 0xff);
- byTmp = (unsigned char)(pMACHeader->wFrameCtl & 0xff);
- MIC_HDR1[2] = byTmp & 0x8f;
- byTmp = (unsigned char)(pMACHeader->wFrameCtl >> 8);
- byTmp &= 0x87;
- MIC_HDR1[3] = byTmp | 0x40;
- memcpy(&(MIC_HDR1[4]), pMACHeader->abyAddr1, ETH_ALEN);
- memcpy(&(MIC_HDR1[10]), pMACHeader->abyAddr2, ETH_ALEN);
-
- /* MIC_HDR2 */
- memcpy(&(MIC_HDR2[0]), pMACHeader->abyAddr3, ETH_ALEN);
- byTmp = (unsigned char)(pMACHeader->wSeqCtl & 0xff);
- MIC_HDR2[6] = byTmp & 0x0f;
- MIC_HDR2[7] = 0;
- if (bA4) {
- memcpy(&(MIC_HDR2[8]), pMACHeader->abyAddr4, ETH_ALEN);
- } else {
- MIC_HDR2[8] = 0x00;
- MIC_HDR2[9] = 0x00;
- MIC_HDR2[10] = 0x00;
- MIC_HDR2[11] = 0x00;
- MIC_HDR2[12] = 0x00;
- MIC_HDR2[13] = 0x00;
- }
- MIC_HDR2[14] = 0x00;
- MIC_HDR2[15] = 0x00;
-
- /* CCMP */
- AESv128(pbyRxKey, MIC_IV, abyMIC);
- for (kk = 0; kk < 16; kk++)
- abyTmp[kk] = MIC_HDR1[kk] ^ abyMIC[kk];
- AESv128(pbyRxKey, abyTmp, abyMIC);
- for (kk = 0; kk < 16; kk++)
- abyTmp[kk] = MIC_HDR2[kk] ^ abyMIC[kk];
- AESv128(pbyRxKey, abyTmp, abyMIC);
-
- wCnt = 1;
- abyCTRPLD[0] = 0x01;
- memcpy(&(abyCTRPLD[1]), &(abyNonce[0]), 13);
-
- for (jj = wPayloadSize; jj > 16; jj = jj - 16) {
- abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
- abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
-
- AESv128(pbyRxKey, abyCTRPLD, abyTmp);
-
- for (kk = 0; kk < 16; kk++)
- abyPlainText[kk] = abyTmp[kk] ^ pbyPayload[kk];
- for (kk = 0; kk < 16; kk++)
- abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- AESv128(pbyRxKey, abyTmp, abyMIC);
-
- memcpy(pbyPayload, abyPlainText, 16);
- wCnt++;
- pbyPayload += 16;
- } /* for wPayloadSize */
-
- /* last payload */
- memcpy(&(abyLastCipher[0]), pbyPayload, jj);
- for (ii = jj; ii < 16; ii++)
- abyLastCipher[ii] = 0x00;
-
- abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
- abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
-
- AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 16; kk++)
- abyPlainText[kk] = abyTmp[kk] ^ abyLastCipher[kk];
- memcpy(pbyPayload, abyPlainText, jj);
- pbyPayload += jj;
-
- /* for MIC calculation */
- for (ii = jj; ii < 16; ii++)
- abyPlainText[ii] = 0x00;
- for (kk = 0; kk < 16; kk++)
- abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- AESv128(pbyRxKey, abyTmp, abyMIC);
-
- /* =>above is the calculate MIC */
- /* -------------------------------------------- */
-
- wCnt = 0;
- abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
- abyCTRPLD[15] = (unsigned char)(wCnt & 0xff);
- AESv128(pbyRxKey, abyCTRPLD, abyTmp);
- for (kk = 0; kk < 8; kk++)
- abyTmp[kk] = abyTmp[kk] ^ pbyPayload[kk];
- /* =>above is the dec-MIC from packet */
- /* -------------------------------------------- */
-
- return !memcmp(abyMIC, abyTmp, 8);
-}
diff --git a/drivers/staging/vt6655/aes_ccmp.h b/drivers/staging/vt6655/aes_ccmp.h
deleted file mode 100644
index fe0c506205d5..000000000000
--- a/drivers/staging/vt6655/aes_ccmp.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: aes_ccmp.h
- *
- * Purpose: AES_CCMP Decryption
- *
- * Author: Warren Hsu
- *
- * Date: Feb 15, 2005
- *
- */
-
-#ifndef __AES_H__
-#define __AES_H__
-
-#include "ttype.h"
-
-bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned short wFrameSize);
-
-#endif /* __AES_H__ */
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index de54923e8861..86c72ba0a0cd 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -30,12 +30,7 @@
* BBvCaculateParameter - Caculate PhyLength, PhyService and Phy Signal parameter for baseband Tx
* BBbReadEmbedded - Embedded read baseband register via MAC
* BBbWriteEmbedded - Embedded write baseband register via MAC
- * BBbIsRegBitsOn - Test if baseband register bits on
- * BBbIsRegBitsOff - Test if baseband register bits off
* BBbVT3253Init - VIA VT3253 baseband chip init code
- * BBvReadAllRegs - Read All Baseband Registers
- * BBvLoopbackOn - Turn on BaseBand Loopback mode
- * BBvLoopbackOff - Turn off BaseBand Loopback mode
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -50,7 +45,6 @@
*/
#include "tmacro.h"
-#include "tether.h"
#include "mac.h"
#include "baseband.h"
#include "srom.h"
@@ -1708,39 +1702,39 @@ static const unsigned short awcFrameTime[MAX_RATE] = {
static
unsigned long
-s_ulGetRatio(struct vnt_private *pDevice);
+s_ulGetRatio(struct vnt_private *priv);
static
void
s_vChangeAntenna(
- struct vnt_private *pDevice
+ struct vnt_private *priv
);
static
void
s_vChangeAntenna(
- struct vnt_private *pDevice
+ struct vnt_private *priv
)
{
- if (pDevice->dwRxAntennaSel == 0) {
- pDevice->dwRxAntennaSel = 1;
- if (pDevice->bTxRxAntInv == true)
- BBvSetRxAntennaMode(pDevice->PortOffset, ANT_A);
+ if (priv->dwRxAntennaSel == 0) {
+ priv->dwRxAntennaSel = 1;
+ if (priv->bTxRxAntInv == true)
+ BBvSetRxAntennaMode(priv, ANT_A);
else
- BBvSetRxAntennaMode(pDevice->PortOffset, ANT_B);
+ BBvSetRxAntennaMode(priv, ANT_B);
} else {
- pDevice->dwRxAntennaSel = 0;
- if (pDevice->bTxRxAntInv == true)
- BBvSetRxAntennaMode(pDevice->PortOffset, ANT_B);
+ priv->dwRxAntennaSel = 0;
+ if (priv->bTxRxAntInv == true)
+ BBvSetRxAntennaMode(priv, ANT_B);
else
- BBvSetRxAntennaMode(pDevice->PortOffset, ANT_A);
+ BBvSetRxAntennaMode(priv, ANT_A);
}
- if (pDevice->dwTxAntennaSel == 0) {
- pDevice->dwTxAntennaSel = 1;
- BBvSetTxAntennaMode(pDevice->PortOffset, ANT_B);
+ if (priv->dwTxAntennaSel == 0) {
+ priv->dwTxAntennaSel = 1;
+ BBvSetTxAntennaMode(priv, ANT_B);
} else {
- pDevice->dwTxAntennaSel = 0;
- BBvSetTxAntennaMode(pDevice->PortOffset, ANT_A);
+ priv->dwTxAntennaSel = 0;
+ BBvSetTxAntennaMode(priv, ANT_A);
}
}
@@ -1792,18 +1786,17 @@ BBuGetFrameTime(
uFrameTime++;
return uPreamble + uFrameTime;
- } else {
- uFrameTime = (cbFrameLength * 8 + 22) / uRate; /* ???????? */
- uTmp = ((uFrameTime * uRate) - 22) / 8;
- if (cbFrameLength != uTmp)
- uFrameTime++;
+ }
+ uFrameTime = (cbFrameLength * 8 + 22) / uRate; /* ???????? */
+ uTmp = ((uFrameTime * uRate) - 22) / 8;
+ if (cbFrameLength != uTmp)
+ uFrameTime++;
- uFrameTime = uFrameTime * 4; /* ??????? */
- if (byPktType != PK_TYPE_11A)
- uFrameTime += 6; /* ?????? */
+ uFrameTime = uFrameTime * 4; /* ??????? */
+ if (byPktType != PK_TYPE_11A)
+ uFrameTime += 6; /* ?????? */
- return 20 + uFrameTime; /* ?????? */
- }
+ return 20 + uFrameTime; /* ?????? */
}
/*
@@ -1968,8 +1961,10 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
* Return Value: true if succeeded; false if failed.
*
*/
-bool BBbReadEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char *pbyData)
+bool BBbReadEmbedded(struct vnt_private *priv,
+ unsigned char byBBAddr, unsigned char *pbyData)
{
+ void __iomem *dwIoBase = priv->PortOffset;
unsigned short ww;
unsigned char byValue;
@@ -2010,8 +2005,10 @@ bool BBbReadEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned ch
* Return Value: true if succeeded; false if failed.
*
*/
-bool BBbWriteEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byData)
+bool BBbWriteEmbedded(struct vnt_private *priv,
+ unsigned char byBBAddr, unsigned char byData)
{
+ void __iomem *dwIoBase = priv->PortOffset;
unsigned short ww;
unsigned char byValue;
@@ -2038,50 +2035,6 @@ bool BBbWriteEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned c
}
/*
- * Description: Test if all bits are set for the Baseband register
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byBBAddr - address of register in Baseband
- * byTestBits - TestBits
- * Out:
- * none
- *
- * Return Value: true if all TestBits are set; false otherwise.
- *
- */
-bool BBbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byTestBits)
-{
- unsigned char byOrgData;
-
- BBbReadEmbedded(dwIoBase, byBBAddr, &byOrgData);
- return (byOrgData & byTestBits) == byTestBits;
-}
-
-/*
- * Description: Test if all bits are clear for the Baseband register
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byBBAddr - address of register in Baseband
- * byTestBits - TestBits
- * Out:
- * none
- *
- * Return Value: true if all TestBits are clear; false otherwise.
- *
- */
-bool BBbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byTestBits)
-{
- unsigned char byOrgData;
-
- BBbReadEmbedded(dwIoBase, byBBAddr, &byOrgData);
- return (byOrgData & byTestBits) == 0;
-}
-
-/*
* Description: VIA VT3253 Baseband chip init function
*
* Parameters:
@@ -2096,126 +2049,126 @@ bool BBbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned ch
*
*/
-bool BBbVT3253Init(struct vnt_private *pDevice)
+bool BBbVT3253Init(struct vnt_private *priv)
{
bool bResult = true;
int ii;
- void __iomem *dwIoBase = pDevice->PortOffset;
- unsigned char byRFType = pDevice->byRFType;
- unsigned char byLocalID = pDevice->byLocalID;
+ void __iomem *dwIoBase = priv->PortOffset;
+ unsigned char byRFType = priv->byRFType;
+ unsigned char byLocalID = priv->byLocalID;
if (byRFType == RF_RFMD2959) {
if (byLocalID <= REV_ID_VT3253_A1) {
for (ii = 0; ii < CB_VT3253_INIT_FOR_RFMD; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253InitTab_RFMD[ii][0], byVT3253InitTab_RFMD[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253InitTab_RFMD[ii][0], byVT3253InitTab_RFMD[ii][1]);
} else {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_RFMD; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_RFMD[ii][0], byVT3253B0_RFMD[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_RFMD[ii][0], byVT3253B0_RFMD[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC_FOR_RFMD2959; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC4_RFMD2959[ii][0], byVT3253B0_AGC4_RFMD2959[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC4_RFMD2959[ii][0], byVT3253B0_AGC4_RFMD2959[ii][1]);
VNSvOutPortD(dwIoBase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT0);
+ MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
}
- pDevice->abyBBVGA[0] = 0x18;
- pDevice->abyBBVGA[1] = 0x0A;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -50;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ priv->abyBBVGA[0] = 0x18;
+ priv->abyBBVGA[1] = 0x0A;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -50;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
} else if ((byRFType == RF_AIROHA) || (byRFType == RF_AL2230S)) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+
+ priv->abyBBVGA[0] = 0x1C;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
} else if (byRFType == RF_UW2451) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_UW2451[ii][0], byVT3253B0_UW2451[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_UW2451[ii][0], byVT3253B0_UW2451[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
VNSvOutPortB(dwIoBase + MAC_REG_ITRTMSET, 0x23);
- MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT0);
-
- pDevice->abyBBVGA[0] = 0x14;
- pDevice->abyBBVGA[1] = 0x0A;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -60;
- pDevice->ldBmThreshold[1] = -50;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
+
+ priv->abyBBVGA[0] = 0x14;
+ priv->abyBBVGA[1] = 0x0A;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -60;
+ priv->ldBmThreshold[1] = -50;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
} else if (byRFType == RF_UW2452) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_UW2451[ii][0], byVT3253B0_UW2451[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_UW2451[ii][0], byVT3253B0_UW2451[ii][1]);
/* Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
/* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06);
+ bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
/* {{RobertYu:20050125, request by Jack */
- bResult &= BBbWriteEmbedded(dwIoBase, 0x90, 0x20);
- bResult &= BBbWriteEmbedded(dwIoBase, 0x97, 0xeb);
+ bResult &= BBbWriteEmbedded(priv, 0x90, 0x20);
+ bResult &= BBbWriteEmbedded(priv, 0x97, 0xeb);
/* }} */
/* {{RobertYu:20050221, request by Jack */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xa6, 0x00);
- bResult &= BBbWriteEmbedded(dwIoBase, 0xa8, 0x30);
+ bResult &= BBbWriteEmbedded(priv, 0xa6, 0x00);
+ bResult &= BBbWriteEmbedded(priv, 0xa8, 0x30);
/* }} */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xb0, 0x58);
+ bResult &= BBbWriteEmbedded(priv, 0xb0, 0x58);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
-
- pDevice->abyBBVGA[0] = 0x14;
- pDevice->abyBBVGA[1] = 0x0A;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -60;
- pDevice->ldBmThreshold[1] = -50;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+
+ priv->abyBBVGA[0] = 0x14;
+ priv->abyBBVGA[1] = 0x0A;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -60;
+ priv->ldBmThreshold[1] = -50;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
/* }} RobertYu */
} else if (byRFType == RF_VT3226) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+
+ priv->abyBBVGA[0] = 0x1C;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
/* Fix VT3226 DFC system timing issue */
MACvSetRFLE_LatchBase(dwIoBase);
/* {{ RobertYu: 20050104 */
} else if (byRFType == RF_AIROHA7230) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AIROHA2230[ii][0], byVT3253B0_AIROHA2230[ii][1]);
/* {{ RobertYu:20050223, request by JerryChung */
@@ -2228,150 +2181,37 @@ bool BBbVT3253Init(struct vnt_private *pDevice)
/* }} */
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(dwIoBase, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
-
- pDevice->abyBBVGA[0] = 0x1C;
- pDevice->abyBBVGA[1] = 0x10;
- pDevice->abyBBVGA[2] = 0x0;
- pDevice->abyBBVGA[3] = 0x0;
- pDevice->ldBmThreshold[0] = -70;
- pDevice->ldBmThreshold[1] = -48;
- pDevice->ldBmThreshold[2] = 0;
- pDevice->ldBmThreshold[3] = 0;
+ bResult &= BBbWriteEmbedded(priv, byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
+
+ priv->abyBBVGA[0] = 0x1C;
+ priv->abyBBVGA[1] = 0x10;
+ priv->abyBBVGA[2] = 0x0;
+ priv->abyBBVGA[3] = 0x0;
+ priv->ldBmThreshold[0] = -70;
+ priv->ldBmThreshold[1] = -48;
+ priv->ldBmThreshold[2] = 0;
+ priv->ldBmThreshold[3] = 0;
/* }} RobertYu */
} else {
/* No VGA Table now */
- pDevice->bUpdateBBVGA = false;
- pDevice->abyBBVGA[0] = 0x1C;
+ priv->bUpdateBBVGA = false;
+ priv->abyBBVGA[0] = 0x1C;
}
if (byLocalID > REV_ID_VT3253_A1) {
- BBbWriteEmbedded(dwIoBase, 0x04, 0x7F);
- BBbWriteEmbedded(dwIoBase, 0x0D, 0x01);
+ BBbWriteEmbedded(priv, 0x04, 0x7F);
+ BBbWriteEmbedded(priv, 0x0D, 0x01);
}
return bResult;
}
/*
- * Description: Read All Baseband Registers
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * pbyBBRegs - Point to struct that stores Baseband Registers
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void BBvReadAllRegs(void __iomem *dwIoBase, unsigned char *pbyBBRegs)
-{
- int ii;
- unsigned char byBase = 1;
-
- for (ii = 0; ii < BB_MAX_CONTEXT_SIZE; ii++) {
- BBbReadEmbedded(dwIoBase, (unsigned char)(ii*byBase), pbyBBRegs);
- pbyBBRegs += byBase;
- }
-}
-
-/*
- * Description: Turn on BaseBand Loopback mode
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * bCCK - If CCK is set
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void BBvLoopbackOn(struct vnt_private *pDevice)
-{
- unsigned char byData;
- void __iomem *dwIoBase = pDevice->PortOffset;
-
- /* CR C9 = 0x00 */
- BBbReadEmbedded(dwIoBase, 0xC9, &pDevice->byBBCRc9); /* CR201 */
- BBbWriteEmbedded(dwIoBase, 0xC9, 0);
- BBbReadEmbedded(dwIoBase, 0x4D, &pDevice->byBBCR4d); /* CR77 */
- BBbWriteEmbedded(dwIoBase, 0x4D, 0x90);
-
- /* CR 88 = 0x02(CCK), 0x03(OFDM) */
- BBbReadEmbedded(dwIoBase, 0x88, &pDevice->byBBCR88); /* CR136 */
-
- if (pDevice->uConnectionRate <= RATE_11M) { /* CCK */
- /* Enable internal digital loopback: CR33 |= 0000 0001 */
- BBbReadEmbedded(dwIoBase, 0x21, &byData); /* CR33 */
- BBbWriteEmbedded(dwIoBase, 0x21, (unsigned char)(byData | 0x01)); /* CR33 */
- /* CR154 = 0x00 */
- BBbWriteEmbedded(dwIoBase, 0x9A, 0); /* CR154 */
-
- BBbWriteEmbedded(dwIoBase, 0x88, 0x02); /* CR239 */
- } else { /* OFDM */
- /* Enable internal digital loopback:CR154 |= 0000 0001 */
- BBbReadEmbedded(dwIoBase, 0x9A, &byData); /* CR154 */
- BBbWriteEmbedded(dwIoBase, 0x9A, (unsigned char)(byData | 0x01)); /* CR154 */
- /* CR33 = 0x00 */
- BBbWriteEmbedded(dwIoBase, 0x21, 0); /* CR33 */
-
- BBbWriteEmbedded(dwIoBase, 0x88, 0x03); /* CR239 */
- }
-
- /* CR14 = 0x00 */
- BBbWriteEmbedded(dwIoBase, 0x0E, 0); /* CR14 */
-
- /* Disable TX_IQUN */
- BBbReadEmbedded(pDevice->PortOffset, 0x09, &pDevice->byBBCR09);
- BBbWriteEmbedded(pDevice->PortOffset, 0x09, (unsigned char)(pDevice->byBBCR09 & 0xDE));
-}
-
-/*
- * Description: Turn off BaseBand Loopback mode
- *
- * Parameters:
- * In:
- * pDevice - Device Structure
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void BBvLoopbackOff(struct vnt_private *pDevice)
-{
- unsigned char byData;
- void __iomem *dwIoBase = pDevice->PortOffset;
-
- BBbWriteEmbedded(dwIoBase, 0xC9, pDevice->byBBCRc9); /* CR201 */
- BBbWriteEmbedded(dwIoBase, 0x88, pDevice->byBBCR88); /* CR136 */
- BBbWriteEmbedded(dwIoBase, 0x09, pDevice->byBBCR09); /* CR136 */
- BBbWriteEmbedded(dwIoBase, 0x4D, pDevice->byBBCR4d); /* CR77 */
-
- if (pDevice->uConnectionRate <= RATE_11M) { /* CCK */
- /* Set the CR33 Bit2 to disable internal Loopback. */
- BBbReadEmbedded(dwIoBase, 0x21, &byData);/* CR33 */
- BBbWriteEmbedded(dwIoBase, 0x21, (unsigned char)(byData & 0xFE)); /* CR33 */
- } else { /* OFDM */
- BBbReadEmbedded(dwIoBase, 0x9A, &byData); /* CR154 */
- BBbWriteEmbedded(dwIoBase, 0x9A, (unsigned char)(byData & 0xFE)); /* CR154 */
- }
- BBbReadEmbedded(dwIoBase, 0x0E, &byData); /* CR14 */
- BBbWriteEmbedded(dwIoBase, 0x0E, (unsigned char)(byData | 0x80)); /* CR14 */
-}
-
-/*
* Description: Set ShortSlotTime mode
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* Out:
* none
*
@@ -2379,42 +2219,42 @@ void BBvLoopbackOff(struct vnt_private *pDevice)
*
*/
void
-BBvSetShortSlotTime(struct vnt_private *pDevice)
+BBvSetShortSlotTime(struct vnt_private *priv)
{
unsigned char byBBRxConf = 0;
unsigned char byBBVGA = 0;
- BBbReadEmbedded(pDevice->PortOffset, 0x0A, &byBBRxConf); /* CR10 */
+ BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
- if (pDevice->bShortSlotTime)
+ if (priv->bShortSlotTime)
byBBRxConf &= 0xDF; /* 1101 1111 */
else
byBBRxConf |= 0x20; /* 0010 0000 */
/* patch for 3253B0 Baseband with Cardbus module */
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byBBVGA);
- if (byBBVGA == pDevice->abyBBVGA[0])
+ BBbReadEmbedded(priv, 0xE7, &byBBVGA);
+ if (byBBVGA == priv->abyBBVGA[0])
byBBRxConf |= 0x20; /* 0010 0000 */
- BBbWriteEmbedded(pDevice->PortOffset, 0x0A, byBBRxConf); /* CR10 */
+ BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
}
-void BBvSetVGAGainOffset(struct vnt_private *pDevice, unsigned char byData)
+void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
{
unsigned char byBBRxConf = 0;
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, byData);
+ BBbWriteEmbedded(priv, 0xE7, byData);
- BBbReadEmbedded(pDevice->PortOffset, 0x0A, &byBBRxConf); /* CR10 */
+ BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
/* patch for 3253B0 Baseband with Cardbus module */
- if (byData == pDevice->abyBBVGA[0])
+ if (byData == priv->abyBBVGA[0])
byBBRxConf |= 0x20; /* 0010 0000 */
- else if (pDevice->bShortSlotTime)
+ else if (priv->bShortSlotTime)
byBBRxConf &= 0xDF; /* 1101 1111 */
else
byBBRxConf |= 0x20; /* 0010 0000 */
- pDevice->byBBVGACurrent = byData;
- BBbWriteEmbedded(pDevice->PortOffset, 0x0A, byBBRxConf); /* CR10 */
+ priv->byBBVGACurrent = byData;
+ BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
}
/*
@@ -2430,12 +2270,12 @@ void BBvSetVGAGainOffset(struct vnt_private *pDevice, unsigned char byData)
*
*/
void
-BBvSoftwareReset(void __iomem *dwIoBase)
+BBvSoftwareReset(struct vnt_private *priv)
{
- BBbWriteEmbedded(dwIoBase, 0x50, 0x40);
- BBbWriteEmbedded(dwIoBase, 0x50, 0);
- BBbWriteEmbedded(dwIoBase, 0x9C, 0x01);
- BBbWriteEmbedded(dwIoBase, 0x9C, 0);
+ BBbWriteEmbedded(priv, 0x50, 0x40);
+ BBbWriteEmbedded(priv, 0x50, 0);
+ BBbWriteEmbedded(priv, 0x9C, 0x01);
+ BBbWriteEmbedded(priv, 0x9C, 0);
}
/*
@@ -2451,13 +2291,13 @@ BBvSoftwareReset(void __iomem *dwIoBase)
*
*/
void
-BBvPowerSaveModeON(void __iomem *dwIoBase)
+BBvPowerSaveModeON(struct vnt_private *priv)
{
unsigned char byOrgData;
- BBbReadEmbedded(dwIoBase, 0x0D, &byOrgData);
- byOrgData |= BIT0;
- BBbWriteEmbedded(dwIoBase, 0x0D, byOrgData);
+ BBbReadEmbedded(priv, 0x0D, &byOrgData);
+ byOrgData |= BIT(0);
+ BBbWriteEmbedded(priv, 0x0D, byOrgData);
}
/*
@@ -2473,13 +2313,13 @@ BBvPowerSaveModeON(void __iomem *dwIoBase)
*
*/
void
-BBvPowerSaveModeOFF(void __iomem *dwIoBase)
+BBvPowerSaveModeOFF(struct vnt_private *priv)
{
unsigned char byOrgData;
- BBbReadEmbedded(dwIoBase, 0x0D, &byOrgData);
- byOrgData &= ~(BIT0);
- BBbWriteEmbedded(dwIoBase, 0x0D, byOrgData);
+ BBbReadEmbedded(priv, 0x0D, &byOrgData);
+ byOrgData &= ~(BIT(0));
+ BBbWriteEmbedded(priv, 0x0D, byOrgData);
}
/*
@@ -2487,7 +2327,7 @@ BBvPowerSaveModeOFF(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* byAntennaMode - Antenna Mode
* Out:
* none
@@ -2497,11 +2337,11 @@ BBvPowerSaveModeOFF(void __iomem *dwIoBase)
*/
void
-BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
+BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
{
unsigned char byBBTxConf;
- BBbReadEmbedded(dwIoBase, 0x09, &byBBTxConf); /* CR09 */
+ BBbReadEmbedded(priv, 0x09, &byBBTxConf); /* CR09 */
if (byAntennaMode == ANT_DIVERSITY) {
/* bit 1 is diversity */
byBBTxConf |= 0x02;
@@ -2512,7 +2352,7 @@ BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
byBBTxConf &= 0xFD; /* 1111 1101 */
byBBTxConf |= 0x04;
}
- BBbWriteEmbedded(dwIoBase, 0x09, byBBTxConf); /* CR09 */
+ BBbWriteEmbedded(priv, 0x09, byBBTxConf); /* CR09 */
}
/*
@@ -2520,7 +2360,7 @@ BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* byAntennaMode - Antenna Mode
* Out:
* none
@@ -2530,11 +2370,11 @@ BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
*/
void
-BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
+BBvSetRxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
{
unsigned char byBBRxConf;
- BBbReadEmbedded(dwIoBase, 0x0A, &byBBRxConf); /* CR10 */
+ BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
if (byAntennaMode == ANT_DIVERSITY) {
byBBRxConf |= 0x01;
@@ -2544,7 +2384,7 @@ BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
byBBRxConf &= 0xFE; /* 1111 1110 */
byBBRxConf |= 0x02;
}
- BBbWriteEmbedded(dwIoBase, 0x0A, byBBRxConf); /* CR10 */
+ BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
}
/*
@@ -2552,7 +2392,7 @@ BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* Out:
* none
*
@@ -2560,109 +2400,109 @@ BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode)
*
*/
void
-BBvSetDeepSleep(void __iomem *dwIoBase, unsigned char byLocalID)
+BBvSetDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
{
- BBbWriteEmbedded(dwIoBase, 0x0C, 0x17); /* CR12 */
- BBbWriteEmbedded(dwIoBase, 0x0D, 0xB9); /* CR13 */
+ BBbWriteEmbedded(priv, 0x0C, 0x17); /* CR12 */
+ BBbWriteEmbedded(priv, 0x0D, 0xB9); /* CR13 */
}
void
-BBvExitDeepSleep(void __iomem *dwIoBase, unsigned char byLocalID)
+BBvExitDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
{
- BBbWriteEmbedded(dwIoBase, 0x0C, 0x00); /* CR12 */
- BBbWriteEmbedded(dwIoBase, 0x0D, 0x01); /* CR13 */
+ BBbWriteEmbedded(priv, 0x0C, 0x00); /* CR12 */
+ BBbWriteEmbedded(priv, 0x0D, 0x01); /* CR13 */
}
static
unsigned long
-s_ulGetRatio(struct vnt_private *pDevice)
+s_ulGetRatio(struct vnt_private *priv)
{
unsigned long ulRatio = 0;
unsigned long ulMaxPacket;
unsigned long ulPacketNum;
/* This is a thousand-ratio */
- ulMaxPacket = pDevice->uNumSQ3[RATE_54M];
- if (pDevice->uNumSQ3[RATE_54M] != 0) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ ulMaxPacket = priv->uNumSQ3[RATE_54M];
+ if (priv->uNumSQ3[RATE_54M] != 0) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_54M;
}
- if (pDevice->uNumSQ3[RATE_48M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_48M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_48M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_48M];
+ ulMaxPacket = priv->uNumSQ3[RATE_48M];
}
- if (pDevice->uNumSQ3[RATE_36M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M] +
- pDevice->uNumSQ3[RATE_36M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_36M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
+ priv->uNumSQ3[RATE_36M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_36M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_36M];
+ ulMaxPacket = priv->uNumSQ3[RATE_36M];
}
- if (pDevice->uNumSQ3[RATE_24M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M] +
- pDevice->uNumSQ3[RATE_36M] + pDevice->uNumSQ3[RATE_24M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_24M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
+ priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_24M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_24M];
+ ulMaxPacket = priv->uNumSQ3[RATE_24M];
}
- if (pDevice->uNumSQ3[RATE_18M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M] +
- pDevice->uNumSQ3[RATE_36M] + pDevice->uNumSQ3[RATE_24M] +
- pDevice->uNumSQ3[RATE_18M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_18M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
+ priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M] +
+ priv->uNumSQ3[RATE_18M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_18M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_18M];
+ ulMaxPacket = priv->uNumSQ3[RATE_18M];
}
- if (pDevice->uNumSQ3[RATE_12M] > ulMaxPacket) {
- ulPacketNum = pDevice->uNumSQ3[RATE_54M] + pDevice->uNumSQ3[RATE_48M] +
- pDevice->uNumSQ3[RATE_36M] + pDevice->uNumSQ3[RATE_24M] +
- pDevice->uNumSQ3[RATE_18M] + pDevice->uNumSQ3[RATE_12M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_12M] > ulMaxPacket) {
+ ulPacketNum = priv->uNumSQ3[RATE_54M] + priv->uNumSQ3[RATE_48M] +
+ priv->uNumSQ3[RATE_36M] + priv->uNumSQ3[RATE_24M] +
+ priv->uNumSQ3[RATE_18M] + priv->uNumSQ3[RATE_12M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_12M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_12M];
+ ulMaxPacket = priv->uNumSQ3[RATE_12M];
}
- if (pDevice->uNumSQ3[RATE_11M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M] -
- pDevice->uNumSQ3[RATE_2M] - pDevice->uNumSQ3[RATE_5M] -
- pDevice->uNumSQ3[RATE_6M] - pDevice->uNumSQ3[RATE_9M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_11M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
+ priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M] -
+ priv->uNumSQ3[RATE_6M] - priv->uNumSQ3[RATE_9M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_11M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_11M];
+ ulMaxPacket = priv->uNumSQ3[RATE_11M];
}
- if (pDevice->uNumSQ3[RATE_9M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M] -
- pDevice->uNumSQ3[RATE_2M] - pDevice->uNumSQ3[RATE_5M] -
- pDevice->uNumSQ3[RATE_6M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_9M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
+ priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M] -
+ priv->uNumSQ3[RATE_6M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_9M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_9M];
+ ulMaxPacket = priv->uNumSQ3[RATE_9M];
}
- if (pDevice->uNumSQ3[RATE_6M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M] -
- pDevice->uNumSQ3[RATE_2M] - pDevice->uNumSQ3[RATE_5M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_6M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
+ priv->uNumSQ3[RATE_2M] - priv->uNumSQ3[RATE_5M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_6M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_6M];
+ ulMaxPacket = priv->uNumSQ3[RATE_6M];
}
- if (pDevice->uNumSQ3[RATE_5M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M] -
- pDevice->uNumSQ3[RATE_2M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_5M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M] -
+ priv->uNumSQ3[RATE_2M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_55M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_5M];
+ ulMaxPacket = priv->uNumSQ3[RATE_5M];
}
- if (pDevice->uNumSQ3[RATE_2M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt - pDevice->uNumSQ3[RATE_1M];
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_2M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt - priv->uNumSQ3[RATE_1M];
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_2M;
- ulMaxPacket = pDevice->uNumSQ3[RATE_2M];
+ ulMaxPacket = priv->uNumSQ3[RATE_2M];
}
- if (pDevice->uNumSQ3[RATE_1M] > ulMaxPacket) {
- ulPacketNum = pDevice->uDiversityCnt;
- ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
+ if (priv->uNumSQ3[RATE_1M] > ulMaxPacket) {
+ ulPacketNum = priv->uDiversityCnt;
+ ulRatio = (ulPacketNum * 1000 / priv->uDiversityCnt);
ulRatio += TOP_RATE_1M;
}
@@ -2670,13 +2510,13 @@ s_ulGetRatio(struct vnt_private *pDevice)
}
void
-BBvClearAntDivSQ3Value(struct vnt_private *pDevice)
+BBvClearAntDivSQ3Value(struct vnt_private *priv)
{
unsigned int ii;
- pDevice->uDiversityCnt = 0;
+ priv->uDiversityCnt = 0;
for (ii = 0; ii < MAX_RATE; ii++)
- pDevice->uNumSQ3[ii] = 0;
+ priv->uNumSQ3[ii] = 0;
}
/*
@@ -2684,7 +2524,7 @@ BBvClearAntDivSQ3Value(struct vnt_private *pDevice)
*
* Parameters:
* In:
- * pDevice - Device Structure
+ * priv - Device Structure
* byRSR - RSR from received packet
* bySQ3 - SQ3 value from received packet
* Out:
@@ -2694,75 +2534,75 @@ BBvClearAntDivSQ3Value(struct vnt_private *pDevice)
*
*/
-void BBvAntennaDiversity(struct vnt_private *pDevice,
+void BBvAntennaDiversity(struct vnt_private *priv,
unsigned char byRxRate, unsigned char bySQ3)
{
- if ((byRxRate >= MAX_RATE) || (pDevice->wAntDiversityMaxRate >= MAX_RATE))
+ if ((byRxRate >= MAX_RATE) || (priv->wAntDiversityMaxRate >= MAX_RATE))
return;
- pDevice->uDiversityCnt++;
+ priv->uDiversityCnt++;
- pDevice->uNumSQ3[byRxRate]++;
+ priv->uNumSQ3[byRxRate]++;
- if (pDevice->byAntennaState == 0) {
- if (pDevice->uDiversityCnt > pDevice->ulDiversityNValue) {
+ if (priv->byAntennaState == 0) {
+ if (priv->uDiversityCnt > priv->ulDiversityNValue) {
pr_debug("ulDiversityNValue=[%d],54M-[%d]\n",
- (int)pDevice->ulDiversityNValue,
- (int)pDevice->uNumSQ3[(int)pDevice->wAntDiversityMaxRate]);
+ (int)priv->ulDiversityNValue,
+ (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate]);
- if (pDevice->uNumSQ3[pDevice->wAntDiversityMaxRate] < pDevice->uDiversityCnt/2) {
- pDevice->ulRatio_State0 = s_ulGetRatio(pDevice);
+ if (priv->uNumSQ3[priv->wAntDiversityMaxRate] < priv->uDiversityCnt/2) {
+ priv->ulRatio_State0 = s_ulGetRatio(priv);
pr_debug("SQ3_State0, rate = [%08x]\n",
- (int)pDevice->ulRatio_State0);
+ (int)priv->ulRatio_State0);
- if (pDevice->byTMax == 0)
+ if (priv->byTMax == 0)
return;
pr_debug("1.[%08x], uNumSQ3[%d]=%d, %d\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->wAntDiversityMaxRate,
- (int)pDevice->uNumSQ3[(int)pDevice->wAntDiversityMaxRate],
- (int)pDevice->uDiversityCnt);
-
- s_vChangeAntenna(pDevice);
- pDevice->byAntennaState = 1;
- del_timer(&pDevice->TimerSQ3Tmax3);
- del_timer(&pDevice->TimerSQ3Tmax2);
- pDevice->TimerSQ3Tmax1.expires = RUN_AT(pDevice->byTMax * HZ);
- add_timer(&pDevice->TimerSQ3Tmax1);
+ (int)priv->ulRatio_State0,
+ (int)priv->wAntDiversityMaxRate,
+ (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
+ (int)priv->uDiversityCnt);
+
+ s_vChangeAntenna(priv);
+ priv->byAntennaState = 1;
+ del_timer(&priv->TimerSQ3Tmax3);
+ del_timer(&priv->TimerSQ3Tmax2);
+ priv->TimerSQ3Tmax1.expires = RUN_AT(priv->byTMax * HZ);
+ add_timer(&priv->TimerSQ3Tmax1);
} else {
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
}
- BBvClearAntDivSQ3Value(pDevice);
+ BBvClearAntDivSQ3Value(priv);
}
} else { /* byAntennaState == 1 */
- if (pDevice->uDiversityCnt > pDevice->ulDiversityMValue) {
- del_timer(&pDevice->TimerSQ3Tmax1);
+ if (priv->uDiversityCnt > priv->ulDiversityMValue) {
+ del_timer(&priv->TimerSQ3Tmax1);
- pDevice->ulRatio_State1 = s_ulGetRatio(pDevice);
+ priv->ulRatio_State1 = s_ulGetRatio(priv);
pr_debug("RX:SQ3_State1, rate0 = %08x,rate1 = %08x\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->ulRatio_State1);
+ (int)priv->ulRatio_State0,
+ (int)priv->ulRatio_State1);
- if (pDevice->ulRatio_State1 < pDevice->ulRatio_State0) {
+ if (priv->ulRatio_State1 < priv->ulRatio_State0) {
pr_debug("2.[%08x][%08x], uNumSQ3[%d]=%d, %d\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->ulRatio_State1,
- (int)pDevice->wAntDiversityMaxRate,
- (int)pDevice->uNumSQ3[(int)pDevice->wAntDiversityMaxRate],
- (int)pDevice->uDiversityCnt);
-
- s_vChangeAntenna(pDevice);
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
- add_timer(&pDevice->TimerSQ3Tmax2);
+ (int)priv->ulRatio_State0,
+ (int)priv->ulRatio_State1,
+ (int)priv->wAntDiversityMaxRate,
+ (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
+ (int)priv->uDiversityCnt);
+
+ s_vChangeAntenna(priv);
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
+ add_timer(&priv->TimerSQ3Tmax2);
}
- pDevice->byAntennaState = 0;
- BBvClearAntDivSQ3Value(pDevice);
+ priv->byAntennaState = 0;
+ BBvClearAntDivSQ3Value(priv);
}
} /* byAntennaState */
}
@@ -2783,28 +2623,30 @@ void BBvAntennaDiversity(struct vnt_private *pDevice,
void
TimerSQ3CallBack(
- void *hDeviceContext
+ unsigned long data
)
{
- struct vnt_private *pDevice = hDeviceContext;
+ struct vnt_private *priv = (struct vnt_private *)data;
+ unsigned long flags;
pr_debug("TimerSQ3CallBack...\n");
- spin_lock_irq(&pDevice->lock);
+
+ spin_lock_irqsave(&priv->lock, flags);
pr_debug("3.[%08x][%08x], %d\n",
- (int)pDevice->ulRatio_State0, (int)pDevice->ulRatio_State1,
- (int)pDevice->uDiversityCnt);
+ (int)priv->ulRatio_State0, (int)priv->ulRatio_State1,
+ (int)priv->uDiversityCnt);
- s_vChangeAntenna(pDevice);
- pDevice->byAntennaState = 0;
- BBvClearAntDivSQ3Value(pDevice);
+ s_vChangeAntenna(priv);
+ priv->byAntennaState = 0;
+ BBvClearAntDivSQ3Value(priv);
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
- add_timer(&pDevice->TimerSQ3Tmax2);
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
+ add_timer(&priv->TimerSQ3Tmax2);
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
/*+
@@ -2827,43 +2669,46 @@ TimerSQ3CallBack(
void
TimerState1CallBack(
- void *hDeviceContext
+ unsigned long data
)
{
- struct vnt_private *pDevice = hDeviceContext;
+ struct vnt_private *priv = (struct vnt_private *)data;
+ unsigned long flags;
pr_debug("TimerState1CallBack...\n");
- spin_lock_irq(&pDevice->lock);
- if (pDevice->uDiversityCnt < pDevice->ulDiversityMValue/100) {
- s_vChangeAntenna(pDevice);
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
- add_timer(&pDevice->TimerSQ3Tmax2);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (priv->uDiversityCnt < priv->ulDiversityMValue/100) {
+ s_vChangeAntenna(priv);
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
+ add_timer(&priv->TimerSQ3Tmax2);
} else {
- pDevice->ulRatio_State1 = s_ulGetRatio(pDevice);
+ priv->ulRatio_State1 = s_ulGetRatio(priv);
pr_debug("SQ3_State1, rate0 = %08x,rate1 = %08x\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->ulRatio_State1);
+ (int)priv->ulRatio_State0,
+ (int)priv->ulRatio_State1);
- if (pDevice->ulRatio_State1 < pDevice->ulRatio_State0) {
+ if (priv->ulRatio_State1 < priv->ulRatio_State0) {
pr_debug("2.[%08x][%08x], uNumSQ3[%d]=%d, %d\n",
- (int)pDevice->ulRatio_State0,
- (int)pDevice->ulRatio_State1,
- (int)pDevice->wAntDiversityMaxRate,
- (int)pDevice->uNumSQ3[(int)pDevice->wAntDiversityMaxRate],
- (int)pDevice->uDiversityCnt);
-
- s_vChangeAntenna(pDevice);
-
- pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
- pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
- add_timer(&pDevice->TimerSQ3Tmax3);
- add_timer(&pDevice->TimerSQ3Tmax2);
+ (int)priv->ulRatio_State0,
+ (int)priv->ulRatio_State1,
+ (int)priv->wAntDiversityMaxRate,
+ (int)priv->uNumSQ3[(int)priv->wAntDiversityMaxRate],
+ (int)priv->uDiversityCnt);
+
+ s_vChangeAntenna(priv);
+
+ priv->TimerSQ3Tmax3.expires = RUN_AT(priv->byTMax3 * HZ);
+ priv->TimerSQ3Tmax2.expires = RUN_AT(priv->byTMax2 * HZ);
+ add_timer(&priv->TimerSQ3Tmax3);
+ add_timer(&priv->TimerSQ3Tmax2);
}
}
- pDevice->byAntennaState = 0;
- BBvClearAntDivSQ3Value(pDevice);
- spin_unlock_irq(&pDevice->lock);
+ priv->byAntennaState = 0;
+ BBvClearAntDivSQ3Value(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 31f2255519cf..d9f6d63e4ab7 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -30,8 +30,6 @@
#ifndef __BASEBAND_H__
#define __BASEBAND_H__
-#include "ttype.h"
-#include "tether.h"
#include "device.h"
/*
@@ -79,42 +77,37 @@ BBuGetFrameTime(
void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
-bool BBbReadEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char *pbyData);
-bool BBbWriteEmbedded(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byData);
+bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char *pbyData);
+bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char byData);
-void BBvReadAllRegs(void __iomem *dwIoBase, unsigned char *pbyBBRegs);
-void BBvLoopbackOn(struct vnt_private *pDevice);
-void BBvLoopbackOff(struct vnt_private *pDevice);
-void BBvSetShortSlotTime(struct vnt_private *pDevice);
-bool BBbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byTestBits);
-bool BBbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byBBAddr, unsigned char byTestBits);
-void BBvSetVGAGainOffset(struct vnt_private *pDevice, unsigned char byData);
+void BBvSetShortSlotTime(struct vnt_private *);
+void BBvSetVGAGainOffset(struct vnt_private *, unsigned char byData);
/* VT3253 Baseband */
-bool BBbVT3253Init(struct vnt_private *pDevice);
-void BBvSoftwareReset(void __iomem *dwIoBase);
-void BBvPowerSaveModeON(void __iomem *dwIoBase);
-void BBvPowerSaveModeOFF(void __iomem *dwIoBase);
-void BBvSetTxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode);
-void BBvSetRxAntennaMode(void __iomem *dwIoBase, unsigned char byAntennaMode);
-void BBvSetDeepSleep(void __iomem *dwIoBase, unsigned char byLocalID);
-void BBvExitDeepSleep(void __iomem *dwIoBase, unsigned char byLocalID);
+bool BBbVT3253Init(struct vnt_private *);
+void BBvSoftwareReset(struct vnt_private *);
+void BBvPowerSaveModeON(struct vnt_private *);
+void BBvPowerSaveModeOFF(struct vnt_private *);
+void BBvSetTxAntennaMode(struct vnt_private *, unsigned char byAntennaMode);
+void BBvSetRxAntennaMode(struct vnt_private *, unsigned char byAntennaMode);
+void BBvSetDeepSleep(struct vnt_private *, unsigned char byLocalID);
+void BBvExitDeepSleep(struct vnt_private *, unsigned char byLocalID);
/* timer for antenna diversity */
void
TimerSQ3CallBack(
- void *hDeviceContext
+ unsigned long
);
void
TimerState1CallBack(
- void *hDeviceContext
+ unsigned long
);
-void BBvAntennaDiversity(struct vnt_private *pDevice,
+void BBvAntennaDiversity(struct vnt_private *,
unsigned char byRxRate, unsigned char bySQ3);
void
-BBvClearAntDivSQ3Value(struct vnt_private *pDevice);
+BBvClearAntDivSQ3Value(struct vnt_private *);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
deleted file mode 100644
index 996d3302ce3d..000000000000
--- a/drivers/staging/vt6655/bssdb.c
+++ /dev/null
@@ -1,1512 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: bssdb.c
- *
- * Purpose: Handles the Basic Service Set & Node Database functions
- *
- * Functions:
- * BSSpSearchBSSList - Search known BSS list for Desire SSID or BSSID
- * BSSvClearBSSList - Clear BSS List
- * BSSbInsertToBSSList - Insert a BSS set into known BSS list
- * BSSbUpdateToBSSList - Update BSS set in known BSS list
- * BSSDBbIsSTAInNodeDB - Search Node DB table to find the index of matched DstAddr
- * BSSvCreateOneNode - Allocate an Node for Node DB
- * BSSvUpdateAPNode - Update AP Node content in Index 0 of KnownNodeDB
- * BSSvSecondCallBack - One second timer callback function to update Node DB info & AP link status
- * BSSvUpdateNodeTxCounter - Update Tx attemps, Tx failure counter in Node DB for auto-fall back rate control
- *
- * Revision History:
- *
- * Author: Lyndon Chen
- *
- * Date: July 17, 2002
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "tether.h"
-#include "device.h"
-#include "80211hdr.h"
-#include "bssdb.h"
-#include "wmgr.h"
-#include "datarate.h"
-#include "desc.h"
-#include "wcmd.h"
-#include "wpa.h"
-#include "baseband.h"
-#include "rf.h"
-#include "card.h"
-#include "channel.h"
-#include "mac.h"
-#include "wpa2.h"
-#include "iowpa.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-static const unsigned short awHWRetry0[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
- {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
- {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
-};
-static const unsigned short awHWRetry1[5][5] = {
- {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
- {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
- {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
- {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
-};
-
-/*--------------------- Static Functions --------------------------*/
-
-void s_vCheckSensitivity(
- void *hDeviceContext
-);
-
-#ifdef Calcu_LinkQual
-void s_uCalculateLinkQual(
- void *hDeviceContext
-);
-#endif
-
-void s_vCheckPreEDThreshold(
- void *hDeviceContext
-);
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Routine Description:
- * Search known BSS list for Desire SSID or BSSID.
- *
- * Return Value:
- * PTR to KnownBSS or NULL
- *
- -*/
-
-PKnownBSS
-BSSpSearchBSSList(
- void *hDeviceContext,
- unsigned char *pbyDesireBSSID,
- unsigned char *pbyDesireSSID,
- CARD_PHY_TYPE ePhyType
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned char *pbyBSSID = NULL;
- PWLAN_IE_SSID pSSID = NULL;
- PKnownBSS pCurrBSS = NULL;
- PKnownBSS pSelect = NULL;
- unsigned char ZeroBSSID[WLAN_BSSID_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- unsigned int ii = 0;
-
- if (pbyDesireBSSID != NULL) {
- pr_debug("BSSpSearchBSSList BSSID[%pM]\n", pbyDesireBSSID);
- if ((!is_broadcast_ether_addr(pbyDesireBSSID)) &&
- (memcmp(pbyDesireBSSID, ZeroBSSID, 6) != 0))
- pbyBSSID = pbyDesireBSSID;
- }
- if (pbyDesireSSID != NULL) {
- if (((PWLAN_IE_SSID)pbyDesireSSID)->len != 0)
- pSSID = (PWLAN_IE_SSID) pbyDesireSSID;
- }
-
- if (pbyBSSID != NULL) {
- /* match BSSID first */
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pCurrBSS = &(pMgmt->sBSSList[ii]);
- if (!pDevice->bLinkPass)
- pCurrBSS->bSelected = false;
- if ((pCurrBSS->bActive) &&
- (!pCurrBSS->bSelected)) {
- if (ether_addr_equal(pCurrBSS->abyBSSID,
- pbyBSSID)) {
- if (pSSID != NULL) {
- /* compare ssid */
- if (!memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
- pSSID->len)) {
- if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
-) {
- pCurrBSS->bSelected = true;
- return pCurrBSS;
- }
- }
- } else {
- if ((pMgmt->eConfigMode == WMAC_CONFIG_AUTO) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo))
-) {
- pCurrBSS->bSelected = true;
- return pCurrBSS;
- }
- }
- }
- }
- }
- } else {
- /* ignore BSSID */
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pCurrBSS = &(pMgmt->sBSSList[ii]);
- /* 2007-0721-01<Add>by MikeLiu */
- pCurrBSS->bSelected = false;
- if (pCurrBSS->bActive) {
- if (pSSID != NULL) {
- /* matched SSID */
- if (!!memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pCurrBSS->abySSID)->abySSID,
- pSSID->len) ||
- (pSSID->len != ((PWLAN_IE_SSID)pCurrBSS->abySSID)->len)) {
- /* SSID not match skip this BSS */
- continue;
- }
- }
- if (((pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) && WLAN_GET_CAP_INFO_ESS(pCurrBSS->wCapInfo)) ||
- ((pMgmt->eConfigMode == WMAC_CONFIG_ESS_STA) && WLAN_GET_CAP_INFO_IBSS(pCurrBSS->wCapInfo))
-) {
- /* Type not match skip this BSS */
- pr_debug("BSS type mismatch.... Config[%d] BSS[0x%04x]\n",
- pMgmt->eConfigMode,
- pCurrBSS->wCapInfo);
- continue;
- }
-
- if (ePhyType != PHY_TYPE_AUTO) {
- if (((ePhyType == PHY_TYPE_11A) && (PHY_TYPE_11A != pCurrBSS->eNetworkTypeInUse)) ||
- ((ePhyType != PHY_TYPE_11A) && (PHY_TYPE_11A == pCurrBSS->eNetworkTypeInUse))) {
- /* PhyType not match skip this BSS */
- pr_debug("Physical type mismatch.... ePhyType[%d] BSS[%d]\n",
- ePhyType,
- pCurrBSS->eNetworkTypeInUse);
- continue;
- }
- }
-
- if (pSelect == NULL) {
- pSelect = pCurrBSS;
- } else {
- /* compare RSSI, select signal strong one */
- if (pCurrBSS->uRSSI < pSelect->uRSSI)
- pSelect = pCurrBSS;
- }
- }
- }
- if (pSelect != NULL) {
- pSelect->bSelected = true;
- return pSelect;
- }
- }
- return NULL;
-}
-
-/*+
- *
- * Routine Description:
- * Clear BSS List
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-BSSvClearBSSList(
- void *hDeviceContext,
- bool bKeepCurrBSSID
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (bKeepCurrBSSID) {
- if (pMgmt->sBSSList[ii].bActive &&
- ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
- pMgmt->abyCurrBSSID)) {
- continue;
- }
- }
-
- if ((pMgmt->sBSSList[ii].bActive) && (pMgmt->sBSSList[ii].uClearCount < BSS_CLEAR_COUNT)) {
- pMgmt->sBSSList[ii].uClearCount++;
- continue;
- }
-
- pMgmt->sBSSList[ii].bActive = false;
- memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
- }
- BSSvClearAnyBSSJoinRecord(pDevice);
-}
-
-/*+
- *
- * Routine Description:
- * search BSS list by BSSID & SSID if matched
- *
- * Return Value:
- * true if found.
- *
- -*/
-PKnownBSS
-BSSpAddrIsInBSSList(
- void *hDeviceContext,
- unsigned char *abyBSSID,
- PWLAN_IE_SSID pSSID
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PKnownBSS pBSSList = NULL;
- unsigned int ii;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSSList = &(pMgmt->sBSSList[ii]);
- if (pBSSList->bActive) {
- if (ether_addr_equal(pBSSList->abyBSSID, abyBSSID)) {
- if (pSSID->len == ((PWLAN_IE_SSID)pBSSList->abySSID)->len) {
- if (memcmp(pSSID->abySSID,
- ((PWLAN_IE_SSID)pBSSList->abySSID)->abySSID,
- pSSID->len) == 0)
- return pBSSList;
- }
- }
- }
- }
-
- return NULL;
-};
-
-/*+
- *
- * Routine Description:
- * Insert a BSS set into known BSS list
- *
- * Return Value:
- * true if success.
- *
- -*/
-
-bool
-BSSbInsertToBSSList(
- void *hDeviceContext,
- unsigned char *abyBSSIDAddr,
- __le64 qwTimestamp,
- unsigned short wBeaconInterval,
- unsigned short wCapInfo,
- unsigned char byCurrChannel,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- unsigned int uIELength,
- unsigned char *pbyIEs,
- void *pRxPacketContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
- PKnownBSS pBSSList = NULL;
- unsigned int ii;
- bool bParsingQuiet = false;
- PWLAN_IE_QUIET pQuiet = NULL;
-
- pBSSList = (PKnownBSS)&(pMgmt->sBSSList[0]);
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSSList = (PKnownBSS)&(pMgmt->sBSSList[ii]);
- if (!pBSSList->bActive)
- break;
- }
-
- if (ii == MAX_BSS_NUM) {
- pr_debug("Get free KnowBSS node failed\n");
- return false;
- }
- /* save the BSS info */
- pBSSList->bActive = true;
- memcpy(pBSSList->abyBSSID, abyBSSIDAddr, WLAN_BSSID_LEN);
- pBSSList->qwBSSTimestamp = le64_to_cpu(qwTimestamp);
- pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
- pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
- pBSSList->uClearCount = 0;
-
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
- memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
-
- pBSSList->uChannel = byCurrChannel;
-
- if (pSuppRates->len > WLAN_RATES_MAXLEN)
- pSuppRates->len = WLAN_RATES_MAXLEN;
- memcpy(pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN);
-
- if (pExtSuppRates != NULL) {
- if (pExtSuppRates->len > WLAN_RATES_MAXLEN)
- pExtSuppRates->len = WLAN_RATES_MAXLEN;
- memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN);
- pr_debug("BSSbInsertToBSSList: pExtSuppRates->len = %d\n",
- pExtSuppRates->len);
-
- } else {
- memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- }
- pBSSList->sERP.byERP = psERP->byERP;
- pBSSList->sERP.bERPExist = psERP->bERPExist;
-
- /* check if BSS is 802.11a/b/g */
- if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
- } else {
- if (pBSSList->sERP.bERPExist)
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- else
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
-
- pBSSList->byRxRate = pRxPacket->byRxRate;
- pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
- pBSSList->uRSSI = pRxPacket->uRSSI;
- pBSSList->bySQ = pRxPacket->bySQ;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- /* assoc with BSS */
- if (pBSSList == pMgmt->pCurrBSS)
- bParsingQuiet = true;
- }
-
- WPA_ClearRSN(pBSSList);
-
- if (pRSNWPA != NULL) {
- unsigned int uLen = pRSNWPA->len + 2;
-
- if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
- }
- }
-
- WPA2_ClearRSN(pBSSList);
-
- if (pRSN != NULL) {
- unsigned int uLen = pRSN->len + 2;
-
- if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
- }
- }
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || pBSSList->bWPA2Valid) {
- PSKeyItem pTransmitKey = NULL;
- bool bIs802_1x = false;
-
- for (ii = 0; ii < pBSSList->wAKMSSAuthCount; ii++) {
- if (pBSSList->abyAKMSSAuthType[ii] == WLAN_11i_AKMSS_802_1X) {
- bIs802_1x = true;
- break;
- }
- }
- if (bIs802_1x && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
- (!memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
- bAdd_PMKID_Candidate((void *)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj);
-
- if (pDevice->bLinkPass && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) ||
- KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, GROUP_KEY, &pTransmitKey)) {
- pDevice->gsPMKIDCandidate.StatusType = Ndis802_11StatusType_PMKID_CandidateList;
- pDevice->gsPMKIDCandidate.Version = 1;
-
- }
-
- }
- }
- }
-
- if (pDevice->bUpdateBBVGA) {
- /* monitor if RSSI is too strong */
- pBSSList->byRSSIStatCnt = 0;
- RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &pBSSList->ldBmMAX);
- pBSSList->ldBmAverage[0] = pBSSList->ldBmMAX;
- for (ii = 1; ii < RSSI_STAT_COUNT; ii++)
- pBSSList->ldBmAverage[ii] = 0;
- }
-
- if ((pIE_Country != NULL) && pMgmt->b11hEnable) {
- set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
- pIE_Country);
- }
-
- if (bParsingQuiet && (pIE_Quiet != NULL)) {
- if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
- (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- /* valid EID */
- if (pQuiet == NULL) {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet(pMgmt->pAdapter,
- true,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((unsigned short *)pQuiet->abyQuietDuration),
- *((unsigned short *)pQuiet->abyQuietOffset)
-);
- } else {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet(pMgmt->pAdapter,
- false,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((unsigned short *)pQuiet->abyQuietDuration),
- *((unsigned short *)pQuiet->abyQuietOffset)
- );
- }
- }
- }
-
- if (bParsingQuiet && (pQuiet != NULL))
- CARDbStartQuiet(pMgmt->pAdapter);
-
- pBSSList->uIELength = uIELength;
- if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
- pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
- memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
-
- return true;
-}
-
-/*+
- *
- * Routine Description:
- * Update BSS set in known BSS list
- *
- * Return Value:
- * true if success.
- *
- -*/
-/* TODO: input structure modify */
-
-bool
-BSSbUpdateToBSSList(
- void *hDeviceContext,
- __le64 qwTimestamp,
- unsigned short wBeaconInterval,
- unsigned short wCapInfo,
- unsigned char byCurrChannel,
- bool bChannelHit,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- PKnownBSS pBSSList,
- unsigned int uIELength,
- unsigned char *pbyIEs,
- void *pRxPacketContext
-)
-{
- int ii;
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
- long ldBm;
- bool bParsingQuiet = false;
- PWLAN_IE_QUIET pQuiet = NULL;
-
- if (pBSSList == NULL)
- return false;
-
- pBSSList->qwBSSTimestamp = le64_to_cpu(qwTimestamp);
- pBSSList->wBeaconInterval = cpu_to_le16(wBeaconInterval);
- pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
- pBSSList->uClearCount = 0;
- pBSSList->uChannel = byCurrChannel;
-
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
-
- if ((pSSID->len != 0) && (pSSID->abySSID[0] != 0))
- memcpy(pBSSList->abySSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
- memcpy(pBSSList->abySuppRates, pSuppRates, pSuppRates->len + WLAN_IEHDR_LEN);
-
- if (pExtSuppRates != NULL)
- memcpy(pBSSList->abyExtSuppRates, pExtSuppRates, pExtSuppRates->len + WLAN_IEHDR_LEN);
- else
- memset(pBSSList->abyExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- pBSSList->sERP.byERP = psERP->byERP;
- pBSSList->sERP.bERPExist = psERP->bERPExist;
-
- /* check if BSS is 802.11a/b/g */
- if (pBSSList->uChannel > CB_MAX_CHANNEL_24G) {
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11A;
- } else {
- if (pBSSList->sERP.bERPExist)
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11G;
- else
- pBSSList->eNetworkTypeInUse = PHY_TYPE_11B;
- }
-
- pBSSList->byRxRate = pRxPacket->byRxRate;
- pBSSList->qwLocalTSF = pRxPacket->qwLocalTSF;
- if (bChannelHit)
- pBSSList->uRSSI = pRxPacket->uRSSI;
- pBSSList->bySQ = pRxPacket->bySQ;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- /* assoc with BSS */
- if (pBSSList == pMgmt->pCurrBSS)
- bParsingQuiet = true;
- }
-
- WPA_ClearRSN(pBSSList); /* mike update */
-
- if (pRSNWPA != NULL) {
- unsigned int uLen = pRSNWPA->len + 2;
-
- if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
- }
- }
-
- WPA2_ClearRSN(pBSSList); /* mike update */
-
- if (pRSN != NULL) {
- unsigned int uLen = pRSN->len + 2;
-
- if (uLen <= (uIELength - (unsigned int)((unsigned char *)pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
- }
- }
-
- if (pRxPacket->uRSSI != 0) {
- RFvRSSITodBm(pDevice, (unsigned char)(pRxPacket->uRSSI), &ldBm);
- /* monitor if RSSI is too strong */
- pBSSList->byRSSIStatCnt++;
- pBSSList->byRSSIStatCnt %= RSSI_STAT_COUNT;
- pBSSList->ldBmAverage[pBSSList->byRSSIStatCnt] = ldBm;
- for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0)
- pBSSList->ldBmMAX = max(pBSSList->ldBmAverage[ii], ldBm);
- }
- }
-
- if ((pIE_Country != NULL) && pMgmt->b11hEnable) {
- set_country_info(pMgmt->pAdapter, pBSSList->eNetworkTypeInUse,
- pIE_Country);
- }
-
- if (bParsingQuiet && (pIE_Quiet != NULL)) {
- if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
- (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- /* valid EID */
- if (pQuiet == NULL) {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet(pMgmt->pAdapter,
- true,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((unsigned short *)pQuiet->abyQuietDuration),
- *((unsigned short *)pQuiet->abyQuietOffset)
-);
- } else {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet(pMgmt->pAdapter,
- false,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((unsigned short *)pQuiet->abyQuietDuration),
- *((unsigned short *)pQuiet->abyQuietOffset)
- );
- }
- }
- }
-
- if (bParsingQuiet && (pQuiet != NULL))
- CARDbStartQuiet(pMgmt->pAdapter);
-
- pBSSList->uIELength = uIELength;
- if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
- pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
- memcpy(pBSSList->abyIEs, pbyIEs, pBSSList->uIELength);
-
- return true;
-}
-
-/*+
- *
- * Routine Description:
- * Search Node DB table to find the index of matched DstAddr
- *
- * Return Value:
- * None
- *
- -*/
-
-bool
-BSSDBbIsSTAInNodeDB(void *pMgmtObject, unsigned char *abyDstAddr,
- unsigned int *puNodeIndex)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
- unsigned int ii;
-
- /* Index = 0 reserved for AP Node */
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- if (ether_addr_equal(abyDstAddr,
- pMgmt->sNodeDBTable[ii].abyMACAddr)) {
- *puNodeIndex = ii;
- return true;
- }
- }
- }
-
- return false;
-};
-
-/*+
- *
- * Routine Description:
- * Find an empty node and allocat it; if there is no empty node,
- * then use the most inactive one.
- *
- * Return Value:
- * None
- *
- -*/
-void
-BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
- unsigned int BigestCount = 0;
- unsigned int SelectIndex;
- struct sk_buff *skb;
- /*
- * Index = 0 reserved for AP Node (In STA mode)
- * Index = 0 reserved for Broadcast/MultiCast (In AP mode)
- */
- SelectIndex = 1;
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- if (pMgmt->sNodeDBTable[ii].uInActiveCount > BigestCount) {
- BigestCount = pMgmt->sNodeDBTable[ii].uInActiveCount;
- SelectIndex = ii;
- }
- } else {
- break;
- }
- }
-
- /* if not found replace uInActiveCount is largest one */
- if (ii == (MAX_NODE_NUM + 1)) {
- *puNodeIndex = SelectIndex;
- pr_info("Replace inactive node = %d\n", SelectIndex);
- /* clear ps buffer */
- if (pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue.next != NULL) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue)) != NULL)
- dev_kfree_skb(skb);
- }
- } else {
- *puNodeIndex = ii;
- }
-
- memset(&pMgmt->sNodeDBTable[*puNodeIndex], 0, sizeof(KnownNodeDB));
- pMgmt->sNodeDBTable[*puNodeIndex].bActive = true;
- pMgmt->sNodeDBTable[*puNodeIndex].uRatePollTimeout = FALLBACK_POLL_SECOND;
- /* for AP mode PS queue */
- skb_queue_head_init(&pMgmt->sNodeDBTable[*puNodeIndex].sTxPSQueue);
- pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
- pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
- pr_debug("Create node index = %d\n", ii);
- return;
-};
-
-/*+
- *
- * Routine Description:
- * Remove Node by NodeIndex
- *
- *
- * Return Value:
- * None
- *
- -*/
-void
-BSSvRemoveOneNode(
- void *hDeviceContext,
- unsigned int uNodeIndex
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue)) != NULL)
- dev_kfree_skb(skb);
- /* clear context */
- memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
- /* clear tx bit map */
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
-
- return;
-};
-/*+
- *
- * Routine Description:
- * Update AP Node content in Index 0 of KnownNodeDB
- *
- *
- * Return Value:
- * None
- *
- -*/
-
-void
-BSSvUpdateAPNode(
- void *hDeviceContext,
- unsigned short *pwCapInfo,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
-
- memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
-
- pMgmt->sNodeDBTable[0].bActive = true;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- uRateLen);
- pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pExtSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- uRateLen);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
-);
- memcpy(pMgmt->sNodeDBTable[0].abyMACAddr, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxSuppRate;
- pMgmt->sNodeDBTable[0].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*pwCapInfo);
- pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
- netdev_dbg(pDevice->dev, "BSSvUpdateAPNode:MaxSuppRate is %d\n",
- pMgmt->sNodeDBTable[0].wMaxSuppRate);
- /* auto rate fallback function initiation */
- pr_debug("pMgmt->sNodeDBTable[0].wTxDataRate = %d\n",
- pMgmt->sNodeDBTable[0].wTxDataRate);
-};
-
-/*+
- *
- * Routine Description:
- * Add Multicast Node content in Index 0 of KnownNodeDB
- *
- *
- * Return Value:
- * None
- *
- -*/
-
-void
-BSSvAddMulticastNode(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- if (!pDevice->bEnableHostWEP)
- memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
- memset(pMgmt->sNodeDBTable[0].abyMACAddr, 0xff, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].bPSEnable = false;
- skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
-);
- pMgmt->sNodeDBTable[0].wTxDataRate = pMgmt->sNodeDBTable[0].wMaxBasicRate;
- netdev_dbg(pDevice->dev,
- "BSSvAddMultiCastNode:pMgmt->sNodeDBTable[0].wTxDataRate is %d\n",
- pMgmt->sNodeDBTable[0].wTxDataRate);
- pMgmt->sNodeDBTable[0].uRatePollTimeout = FALLBACK_POLL_SECOND;
-};
-
-/*+
- *
- * Routine Description:
- *
- *
- * Second call back function to update Node DB info & AP link status
- *
- *
- * Return Value:
- * none.
- *
- -*/
-void
-BSSvSecondCallBack(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
- PWLAN_IE_SSID pItemSSID, pCurrSSID;
- unsigned int uSleepySTACnt = 0;
- unsigned int uNonShortSlotSTACnt = 0;
- unsigned int uLongPreambleSTACnt = 0;
- viawget_wpa_header *wpahdr; /* DavidWang */
-
- spin_lock_irq(&pDevice->lock);
-
- pDevice->uAssocCount = 0;
-
- pDevice->byERPFlag &=
- ~(WLAN_SET_ERP_BARKER_MODE(1) | WLAN_SET_ERP_NONERP_PRESENT(1));
-
- if (pDevice->wUseProtectCntDown > 0) {
- pDevice->wUseProtectCntDown--;
- } else {
- /* disable protect mode */
- pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
- }
-
- if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
- pDevice->byReAssocCount++;
- /* 10 sec timeout */
- if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
- netdev_info(pDevice->dev, "Re-association timeout!!!\n");
- pDevice->byReAssocCount = 0;
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
- } else if (pDevice->bLinkPass)
- pDevice->byReAssocCount = 0;
- }
-
-#ifdef Calcu_LinkQual
- s_uCalculateLinkQual((void *)pDevice);
-#endif
-
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- /* increase in-activity counter */
- pMgmt->sNodeDBTable[ii].uInActiveCount++;
-
- if (ii > 0) {
- if (pMgmt->sNodeDBTable[ii].uInActiveCount > MAX_INACTIVE_COUNT) {
- BSSvRemoveOneNode(pDevice, ii);
- pr_debug("Inactive timeout [%d] sec, STA index = [%d] remove\n",
- MAX_INACTIVE_COUNT, ii);
- continue;
- }
-
- if (pMgmt->sNodeDBTable[ii].eNodeState >= NODE_ASSOC) {
- pDevice->uAssocCount++;
-
- /* check if Non ERP exist */
- if (pMgmt->sNodeDBTable[ii].uInActiveCount < ERP_RECOVER_COUNT) {
- if (!pMgmt->sNodeDBTable[ii].bShortPreamble) {
- pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1);
- uLongPreambleSTACnt++;
- }
- if (!pMgmt->sNodeDBTable[ii].bERPExist) {
- pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1);
- pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1);
- }
- if (!pMgmt->sNodeDBTable[ii].bShortSlotTime)
- uNonShortSlotSTACnt++;
- }
- }
-
- /* check if any STA in PS mode */
- if (pMgmt->sNodeDBTable[ii].bPSEnable)
- uSleepySTACnt++;
-
- }
-
- /* rate fallback check */
- if (!pDevice->bFixRate) {
- if (ii > 0) {
- /* ii = 0 for multicast node (AP & Adhoc) */
- RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
- } else {
- /* ii = 0 reserved for unicast AP node (Infra STA) */
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)
- netdev_dbg(pDevice->dev,
- "SecondCallback:Before:TxDataRate is %d\n",
- pMgmt->sNodeDBTable[0].wTxDataRate);
- RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
- netdev_dbg(pDevice->dev,
- "SecondCallback:After:TxDataRate is %d\n",
- pMgmt->sNodeDBTable[0].wTxDataRate);
-
- }
-
- }
-
- /* check if pending PS queue */
- if (pMgmt->sNodeDBTable[ii].wEnQueueCnt != 0) {
- pr_debug("Index= %d, Queue = %d pending\n",
- ii,
- pMgmt->sNodeDBTable[ii].wEnQueueCnt);
- if ((ii > 0) && (pMgmt->sNodeDBTable[ii].wEnQueueCnt > 15)) {
- BSSvRemoveOneNode(pDevice, ii);
- pr_info("Pending many queues PS STA Index = %d remove\n",
- ii);
- continue;
- }
- }
- }
-
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && (pDevice->eCurrentPHYType == PHY_TYPE_11G)) {
- /* on/off protect mode */
- if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
- if (!pDevice->bProtectMode) {
- MACvEnableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = true;
- }
- } else {
- if (pDevice->bProtectMode) {
- MACvDisableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = false;
- }
- }
- /* on/off short slot time */
-
- if (uNonShortSlotSTACnt > 0) {
- if (pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = false;
- BBvSetShortSlotTime(pDevice);
- vUpdateIFS((void *)pDevice);
- }
- } else {
- if (!pDevice->bShortSlotTime) {
- pDevice->bShortSlotTime = true;
- BBvSetShortSlotTime(pDevice);
- vUpdateIFS((void *)pDevice);
- }
- }
-
- /* on/off barker long preamble mode */
-
- if (uLongPreambleSTACnt > 0) {
- if (!pDevice->bBarkerPreambleMd) {
- MACvEnableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = true;
- }
- } else {
- if (pDevice->bBarkerPreambleMd) {
- MACvDisableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = false;
- }
- }
-
- }
-
- /* check if any STA in PS mode, enable DTIM multicast deliver */
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (uSleepySTACnt > 0)
- pMgmt->sNodeDBTable[0].bPSEnable = true;
- else
- pMgmt->sNodeDBTable[0].bPSEnable = false;
- }
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pCurrSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_STANDBY) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
- /* assoc with BSS */
- if (pMgmt->sNodeDBTable[0].bActive) {
- if (pDevice->bUpdateBBVGA)
- s_vCheckPreEDThreshold((void *)pDevice);
-
- if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
- (pDevice->byBBVGACurrent != pDevice->abyBBVGA[0])) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[0];
- bScheduleCommand((void *)pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
- }
-
- if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) {
- pMgmt->sNodeDBTable[0].bActive = false;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- pDevice->bRoaming = true;
- pr_info("Lost AP beacon [%d] sec, disconnected !\n",
- pMgmt->sNodeDBTable[0].uInActiveCount);
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_DISASSOC_MSG;
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
- }
- } else if (pItemSSID->len != 0) {
- if (pDevice->uAutoReConnectTime < 10) {
- pDevice->uAutoReConnectTime++;
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- /*
- * network manager support need not do
- * Roaming scan???
- */
- if (pDevice->bWPASuppWextEnabled)
- pDevice->uAutoReConnectTime = 0;
-#endif
- } else {
- /*
- * mike use old encryption status
- * for wpa reauthentication
- */
- if (pDevice->bWPADEVUp)
- pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus;
-
- pr_debug("Roaming ...\n");
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
- pDevice->uAutoReConnectTime = 0;
- }
- }
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- /* if adhoc started which essid is NULL string, rescanning */
- if ((pMgmt->eCurrState == WMAC_STATE_STARTED) && (pCurrSSID->len == 0)) {
- if (pDevice->uAutoReConnectTime < 10) {
- pDevice->uAutoReConnectTime++;
- } else {
- pr_info("Adhoc re-scanning ...\n");
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- pDevice->uAutoReConnectTime = 0;
- }
- }
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (pDevice->bUpdateBBVGA)
- s_vCheckPreEDThreshold((void *)pDevice);
- if (pMgmt->sNodeDBTable[0].uInActiveCount >= ADHOC_LOST_BEACON_COUNT) {
- pr_info("Lost other STA beacon [%d] sec, started !\n",
- pMgmt->sNodeDBTable[0].uInActiveCount);
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- }
- }
- }
-
- spin_unlock_irq(&pDevice->lock);
-
- pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
- add_timer(&pMgmt->sTimerSecondCallback);
-}
-
-/*+
- *
- * Routine Description:
- *
- *
- * Update Tx attemps, Tx failure counter in Node DB
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-void
-BSSvUpdateNodeTxCounter(
- void *hDeviceContext,
- unsigned char byTsr0,
- unsigned char byTsr1,
- unsigned char *pbyBuffer,
- unsigned int uFIFOHeaderSize
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uNodeIndex = 0;
- unsigned char byTxRetry = (byTsr0 & TSR0_NCR);
- PSTxBufHead pTxBufHead;
- PS802_11Header pMACHeader;
- unsigned short wRate;
- unsigned short wFallBackRate = RATE_1M;
- unsigned char byFallBack;
- unsigned int ii;
-
- pTxBufHead = (PSTxBufHead) pbyBuffer;
- if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_0)
- byFallBack = AUTO_FB_0;
- else if (pTxBufHead->wFIFOCtl & FIFOCTL_AUTO_FB_1)
- byFallBack = AUTO_FB_1;
- else
- byFallBack = AUTO_FB_NONE;
- wRate = pTxBufHead->wReserved;
-
- /* Only Unicast using support rates */
- if (pTxBufHead->wFIFOCtl & FIFOCTL_NEEDACK) {
- pr_debug("wRate %04X, byTsr0 %02X, byTsr1 %02X\n",
- wRate, byTsr0, byTsr1);
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
- pMgmt->sNodeDBTable[0].uTxAttempts += 1;
- if ((byTsr1 & TSR1_TERR) == 0) {
- /* transmit success, TxAttempts at least plus one */
- pMgmt->sNodeDBTable[0].uTxOk[MAX_RATE]++;
- if ((byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M)) {
- wFallBackRate = wRate;
- } else if (byFallBack == AUTO_FB_0) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- } else if (byFallBack == AUTO_FB_1) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- }
- pMgmt->sNodeDBTable[0].uTxOk[wFallBackRate]++;
- } else {
- pMgmt->sNodeDBTable[0].uTxFailures++;
- }
- pMgmt->sNodeDBTable[0].uTxRetry += byTxRetry;
- if (byTxRetry != 0) {
- pMgmt->sNodeDBTable[0].uTxFail[MAX_RATE] += byTxRetry;
- if ((byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M)) {
- pMgmt->sNodeDBTable[0].uTxFail[wRate] += byTxRetry;
- } else if (byFallBack == AUTO_FB_0) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
- }
- } else if (byFallBack == AUTO_FB_1) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[0].uTxFail[wFallBackRate]++;
- }
- }
- }
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- pMACHeader = (PS802_11Header)(pbyBuffer + uFIFOHeaderSize);
-
- if (BSSDBbIsSTAInNodeDB((void *)pMgmt, &(pMACHeader->abyAddr1[0]), &uNodeIndex)) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
- if ((byTsr1 & TSR1_TERR) == 0) {
- /* transmit success, TxAttempts at least plus one */
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
- if ((byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M)) {
- wFallBackRate = wRate;
- } else if (byFallBack == AUTO_FB_0) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry0[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry0[wRate-RATE_18M][4];
- } else if (byFallBack == AUTO_FB_1) {
- if (byTxRetry < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][byTxRetry];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wFallBackRate]++;
- } else {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFailures++;
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += byTxRetry;
- if (byTxRetry != 0) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[MAX_RATE] += byTxRetry;
- if ((byFallBack == AUTO_FB_NONE) ||
- (wRate < RATE_18M)) {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wRate] += byTxRetry;
- } else if (byFallBack == AUTO_FB_0) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry0[wRate - RATE_18M][ii];
- else
- wFallBackRate = awHWRetry0[wRate - RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
- } else if (byFallBack == AUTO_FB_1) {
- for (ii = 0; ii < byTxRetry; ii++) {
- if (ii < 5)
- wFallBackRate = awHWRetry1[wRate-RATE_18M][ii];
- else
- wFallBackRate = awHWRetry1[wRate-RATE_18M][4];
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[wFallBackRate]++;
- }
- }
- }
- }
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- * Clear Nodes & skb in DB Table
- *
- *
- * Parameters:
- * In:
- * hDeviceContext - The adapter context.
- * uStartIndex - starting index
- * Out:
- * none
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-BSSvClearNodeDBTable(
- void *hDeviceContext,
- unsigned int uStartIndex
-)
-
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- struct sk_buff *skb;
- unsigned int ii;
-
- for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive) {
- /* check if sTxPSQueue has been initial */
- if (pMgmt->sNodeDBTable[ii].sTxPSQueue.next != NULL) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
- pr_debug("PS skb != NULL %d\n", ii);
- dev_kfree_skb(skb);
- }
- }
- memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
- }
- }
-
- return;
-};
-
-void s_vCheckSensitivity(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PKnownBSS pBSSList = NULL;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int ii;
-
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) && (pDevice->byRFType == RF_RFMD2959) &&
- (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
- return;
- }
-
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL) {
- /* Update BB Reg if RSSI is too strong */
- long LocalldBmAverage = 0;
- long uNumofdBm = 0;
-
- for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
- if (pBSSList->ldBmAverage[ii] != 0) {
- uNumofdBm++;
- LocalldBmAverage += pBSSList->ldBmAverage[ii];
- }
- }
- if (uNumofdBm > 0) {
- LocalldBmAverage = LocalldBmAverage/uNumofdBm;
- for (ii = 0; ii < BB_VGA_LEVEL; ii++) {
- pr_debug("LocalldBmAverage:%ld, %ld %02x\n",
- LocalldBmAverage,
- pDevice->ldBmThreshold[ii],
- pDevice->abyBBVGA[ii]);
- if (LocalldBmAverage < pDevice->ldBmThreshold[ii]) {
- pDevice->byBBVGANew = pDevice->abyBBVGA[ii];
- break;
- }
- }
- if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
- pDevice->uBBVGADiffCount++;
- if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD)
- bScheduleCommand((void *)pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
- } else {
- pDevice->uBBVGADiffCount = 0;
- }
- }
- }
- }
-}
-
-void
-BSSvClearAnyBSSJoinRecord(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++)
- pMgmt->sBSSList[ii].bSelected = false;
-}
-
-#ifdef Calcu_LinkQual
-void s_uCalculateLinkQual(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- unsigned long TxOkRatio, TxCnt;
- unsigned long RxOkRatio, RxCnt;
- unsigned long RssiRatio;
- long ldBm;
-
- TxCnt = pDevice->scStatistic.TxNoRetryOkCount +
- pDevice->scStatistic.TxRetryOkCount +
- pDevice->scStatistic.TxFailCount;
- RxCnt = pDevice->scStatistic.RxFcsErrCnt +
- pDevice->scStatistic.RxOkCnt;
- TxOkRatio = (TxCnt < 6) ? 4000 : ((pDevice->scStatistic.TxNoRetryOkCount * 4000) / TxCnt);
- RxOkRatio = (RxCnt < 6) ? 2000 : ((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
- /* decide link quality */
- if (!pDevice->bLinkPass) {
- pDevice->scStatistic.LinkQuality = 0;
- pDevice->scStatistic.SignalStren = 0;
- } else {
- RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
- if (-ldBm < 50)
- RssiRatio = 4000;
- else if (-ldBm > 90)
- RssiRatio = 0;
- else
- RssiRatio = (40-(-ldBm-50))*4000/40;
- pDevice->scStatistic.SignalStren = RssiRatio/40;
- pDevice->scStatistic.LinkQuality = (RssiRatio+TxOkRatio+RxOkRatio)/100;
- }
- pDevice->scStatistic.RxFcsErrCnt = 0;
- pDevice->scStatistic.RxOkCnt = 0;
- pDevice->scStatistic.TxFailCount = 0;
- pDevice->scStatistic.TxNoRetryOkCount = 0;
- pDevice->scStatistic.TxRetryOkCount = 0;
-}
-#endif
-
-void s_vCheckPreEDThreshold(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PKnownBSS pBSSList = NULL;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
- if (pBSSList != NULL)
- pDevice->byBBPreEDRSSI = (unsigned char) (~(pBSSList->ldBmAverRange) + 1);
- }
-}
diff --git a/drivers/staging/vt6655/bssdb.h b/drivers/staging/vt6655/bssdb.h
deleted file mode 100644
index 5d4dd28b6223..000000000000
--- a/drivers/staging/vt6655/bssdb.h
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: bssdb.h
- *
- * Purpose: Handles the Basic Service Set & Node Database functions
- *
- * Author: Lyndon Chen
- *
- * Date: July 16, 2002
- *
- */
-
-#ifndef __BSSDB_H__
-#define __BSSDB_H__
-
-#include <linux/skbuff.h>
-#include "80211hdr.h"
-#include "80211mgr.h"
-#include "card.h"
-
-#define MAX_NODE_NUM 64
-#define MAX_BSS_NUM 42
-#define LOST_BEACON_COUNT 10 // 10 sec, XP defined
-#define MAX_PS_TX_BUF 32 // sta max power saving tx buf
-#define ADHOC_LOST_BEACON_COUNT 30 // 30 sec, beacon lost for adhoc only
-#define MAX_INACTIVE_COUNT 300 // 300 sec, inactive STA node refresh
-
-#define USE_PROTECT_PERIOD 10 // 10 sec, Use protect mode check period
-#define ERP_RECOVER_COUNT 30 // 30 sec, ERP support callback check
-#define BSS_CLEAR_COUNT 1
-
-#define RSSI_STAT_COUNT 10
-#define MAX_CHECK_RSSI_COUNT 8
-
-// STA dwflags
-#define WLAN_STA_AUTH BIT0
-#define WLAN_STA_ASSOC BIT1
-#define WLAN_STA_PS BIT2
-#define WLAN_STA_TIM BIT3
-// permanent; do not remove entry on expiration
-#define WLAN_STA_PERM BIT4
-// If 802.1X is used, this flag is
-// controlling whether STA is authorized to
-// send and receive non-IEEE 802.1X frames
-#define WLAN_STA_AUTHORIZED BIT5
-
-#define MAX_RATE 12
-
-#define MAX_WPA_IE_LEN 64
-
-//
-// IEEE 802.11 Structures and definitions
-//
-
-typedef enum _NDIS_802_11_NETWORK_TYPE {
- Ndis802_11FH,
- Ndis802_11DS,
- Ndis802_11OFDM5,
- Ndis802_11OFDM24,
- Ndis802_11NetworkTypeMax // not a real type, defined as an upper bound
-} NDIS_802_11_NETWORK_TYPE, *PNDIS_802_11_NETWORK_TYPE;
-
-typedef struct tagSERPObject {
- bool bERPExist;
- unsigned char byERP;
-} ERPObject, *PERPObject;
-
-typedef struct tagSRSNCapObject {
- bool bRSNCapExist;
- unsigned short wRSNCap;
-} SRSNCapObject, *PSRSNCapObject;
-
-// BSS info(AP)
-#pragma pack(1)
-typedef struct tagKnownBSS {
- bool bActive;
- unsigned char abyBSSID[WLAN_BSSID_LEN];
- unsigned int uChannel;
- unsigned char abySuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned int uRSSI;
- unsigned char bySQ;
- unsigned short wBeaconInterval;
- unsigned short wCapInfo;
- unsigned char abySSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned char byRxRate;
-
- unsigned char byRSSIStatCnt;
- long ldBmMAX;
- long ldBmAverage[RSSI_STAT_COUNT];
- long ldBmAverRange;
- bool bSelected;
-
- bool bWPAValid;
- unsigned char byGKType;
- unsigned char abyPKType[4];
- unsigned short wPKCount;
- unsigned char abyAuthType[4];
- unsigned short wAuthCount;
- unsigned char byDefaultK_as_PK;
- unsigned char byReplayIdx;
-
- bool bWPA2Valid;
- unsigned char byCSSGK;
- unsigned short wCSSPKCount;
- unsigned char abyCSSPK[4];
- unsigned short wAKMSSAuthCount;
- unsigned char abyAKMSSAuthType[4];
-
- unsigned char byWPAIE[MAX_WPA_IE_LEN];
- unsigned char byRSNIE[MAX_WPA_IE_LEN];
- unsigned short wWPALen;
- unsigned short wRSNLen;
-
- unsigned int uClearCount;
- unsigned int uIELength;
- u64 qwBSSTimestamp;
- u64 qwLocalTSF;
-
- CARD_PHY_TYPE eNetworkTypeInUse;
-
- ERPObject sERP;
- SRSNCapObject sRSNCapObj;
- unsigned char abyIEs[1024];
-} __attribute__ ((__packed__))
-KnownBSS , *PKnownBSS;
-
-#pragma pack()
-
-typedef enum tagNODE_STATE {
- NODE_FREE,
- NODE_AGED,
- NODE_KNOWN,
- NODE_AUTH,
- NODE_ASSOC
-} NODE_STATE, *PNODE_STATE;
-
-// STA node info
-typedef struct tagKnownNodeDB {
- bool bActive;
- unsigned char abyMACAddr[WLAN_ADDR_LEN];
- unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
- unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN];
- unsigned short wTxDataRate;
- bool bShortPreamble;
- bool bERPExist;
- bool bShortSlotTime;
- unsigned int uInActiveCount;
- unsigned short wMaxBasicRate; //Get from byTopOFDMBasicRate or byTopCCKBasicRate which depends on packetTyp.
- unsigned short wMaxSuppRate; //Records the highest supported rate getting from SuppRates IE and ExtSuppRates IE in Beacon.
- unsigned short wSuppRate;
- unsigned char byTopOFDMBasicRate;//Records the highest basic rate in OFDM mode
- unsigned char byTopCCKBasicRate; //Records the highest basic rate in CCK mode
-
- // For AP mode
- struct sk_buff_head sTxPSQueue;
- unsigned short wCapInfo;
- unsigned short wListenInterval;
- unsigned short wAID;
- NODE_STATE eNodeState;
- bool bPSEnable;
- bool bRxPSPoll;
- unsigned char byAuthSequence;
- unsigned long ulLastRxJiffer;
- unsigned char bySuppRate;
- unsigned long dwFlags;
- unsigned short wEnQueueCnt;
-
- bool bOnFly;
- unsigned long long KeyRSC;
- unsigned char byKeyIndex;
- unsigned long dwKeyIndex;
- unsigned char byCipherSuite;
- unsigned long dwTSC47_16;
- unsigned short wTSC15_0;
- unsigned int uWepKeyLength;
- unsigned char abyWepKey[WLAN_WEPMAX_KEYLEN];
- // Auto rate fallback vars
- bool bIsInFallback;
- unsigned int uAverageRSSI;
- unsigned int uRateRecoveryTimeout;
- unsigned int uRatePollTimeout;
- unsigned int uTxFailures;
- unsigned int uTxAttempts;
-
- unsigned int uTxRetry;
- unsigned int uFailureRatio;
- unsigned int uRetryRatio;
- unsigned int uTxOk[MAX_RATE+1];
- unsigned int uTxFail[MAX_RATE+1];
- unsigned int uTimeCount;
-} KnownNodeDB, *PKnownNodeDB;
-
-PKnownBSS
-BSSpSearchBSSList(
- void *hDeviceContext,
- unsigned char *pbyDesireBSSID,
- unsigned char *pbyDesireSSID,
- CARD_PHY_TYPE ePhyType
-);
-
-PKnownBSS
-BSSpAddrIsInBSSList(
- void *hDeviceContext,
- unsigned char *abyBSSID,
- PWLAN_IE_SSID pSSID
-);
-
-void
-BSSvClearBSSList(
- void *hDeviceContext,
- bool bKeepCurrBSSID
-);
-
-bool
-BSSbInsertToBSSList(
- void *hDeviceContext,
- unsigned char *abyBSSIDAddr,
- __le64 qwTimestamp,
- unsigned short wBeaconInterval,
- unsigned short wCapInfo,
- unsigned char byCurrChannel,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- unsigned int uIELength,
- unsigned char *pbyIEs,
- void *pRxPacketContext
-);
-
-bool
-BSSbUpdateToBSSList(
- void *hDeviceContext,
- __le64 qwTimestamp,
- unsigned short wBeaconInterval,
- unsigned short wCapInfo,
- unsigned char byCurrChannel,
- bool bChannelHit,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pSuppRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates,
- PERPObject psERP,
- PWLAN_IE_RSN pRSN,
- PWLAN_IE_RSN_EXT pRSNWPA,
- PWLAN_IE_COUNTRY pIE_Country,
- PWLAN_IE_QUIET pIE_Quiet,
- PKnownBSS pBSSList,
- unsigned int uIELength,
- unsigned char *pbyIEs,
- void *pRxPacketContext
-);
-
-bool
-BSSDBbIsSTAInNodeDB(void *hDeviceContext, unsigned char *abyDstAddr,
- unsigned int *puNodeIndex);
-
-void
-BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex);
-
-void
-BSSvUpdateAPNode(
- void *hDeviceContext,
- unsigned short *pwCapInfo,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pExtSuppRates
-);
-
-void
-BSSvSecondCallBack(
- void *hDeviceContext
-);
-
-void
-BSSvUpdateNodeTxCounter(
- void *hDeviceContext,
- unsigned char byTsr0,
- unsigned char byTsr1,
- unsigned char *pbyBuffer,
- unsigned int uFIFOHeaderSize
-);
-
-void
-BSSvRemoveOneNode(
- void *hDeviceContext,
- unsigned int uNodeIndex
-);
-
-void
-BSSvAddMulticastNode(
- void *hDeviceContext
-);
-
-void
-BSSvClearNodeDBTable(
- void *hDeviceContext,
- unsigned int uStartIndex
-);
-
-void
-BSSvClearAnyBSSJoinRecord(
- void *hDeviceContext
-);
-
-#endif //__BSSDB_H__
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 5a6950264bdc..a0796405c308 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -21,7 +21,6 @@
* Functions:
* s_vSafeResetTx - Rest Tx
* CARDvSetRSPINF - Set RSPINF
- * vUpdateIFS - Update slotTime,SIFS,DIFS, and EIFS
* CARDvUpdateBasicTopRate - Update BasicTopRate
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
@@ -34,8 +33,6 @@
* CARDvUpdateNextTBTT - Sync. NIC Beacon time
* CARDbRadioPowerOff - Turn Off NIC Radio Power
* CARDbRadioPowerOn - Turn On NIC Radio Power
- * CARDbSetWEPMode - Set NIC Wep mode
- * CARDbSetTxPower - Set NIC tx power
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -50,38 +47,24 @@
#include "mac.h"
#include "desc.h"
#include "rf.h"
-#include "vntwifi.h"
#include "power.h"
-#include "key.h"
-#include "rc4.h"
-#include "country.h"
-#include "channel.h"
/*--------------------- Static Definitions -------------------------*/
-#define C_SIFS_A 16 // micro sec.
+#define C_SIFS_A 16 /* micro sec. */
#define C_SIFS_BG 10
-#define C_EIFS 80 // micro sec.
+#define C_EIFS 80 /* micro sec. */
-#define C_SLOT_SHORT 9 // micro sec.
+#define C_SLOT_SHORT 9 /* micro sec. */
#define C_SLOT_LONG 20
-#define C_CWMIN_A 15 // slot time
+#define C_CWMIN_A 15 /* slot time */
#define C_CWMIN_B 31
-#define C_CWMAX 1023 // slot time
+#define C_CWMAX 1023 /* slot time */
-#define WAIT_BEACON_TX_DOWN_TMO 3 // Times
-
-//1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
-static unsigned char abyDefaultSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
-//6M, 9M, 12M, 48M
-static unsigned char abyDefaultExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
-//6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M
-static unsigned char abyDefaultSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
-//1M, 2M, 5M, 11M,
-static unsigned char abyDefaultSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
+#define WAIT_BEACON_TX_DOWN_TMO 3 /* Times */
/*--------------------- Static Variables --------------------------*/
@@ -94,7 +77,7 @@ static
void
s_vCalculateOFDMRParameter(
unsigned char byRate,
- CARD_PHY_TYPE ePHYType,
+ u8 bb_type,
unsigned char *pbyTxRate,
unsigned char *pbyRsvTime
);
@@ -113,20 +96,19 @@ s_vCalculateOFDMRParameter(
* pbyRsvTime - pointer to RSPINF RsvTime field
*
* Return Value: none
- *
*/
static
void
s_vCalculateOFDMRParameter(
unsigned char byRate,
- CARD_PHY_TYPE ePHYType,
+ u8 bb_type,
unsigned char *pbyTxRate,
unsigned char *pbyRsvTime
)
{
switch (byRate) {
case RATE_6M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9B;
*pbyRsvTime = 44;
} else {
@@ -136,7 +118,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_9M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9F;
*pbyRsvTime = 36;
} else {
@@ -146,7 +128,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_12M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9A;
*pbyRsvTime = 32;
} else {
@@ -156,7 +138,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_18M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9E;
*pbyRsvTime = 28;
} else {
@@ -166,7 +148,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_36M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9D;
*pbyRsvTime = 24;
} else {
@@ -176,7 +158,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_48M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x98;
*pbyRsvTime = 24;
} else {
@@ -186,7 +168,7 @@ s_vCalculateOFDMRParameter(
break;
case RATE_54M:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9C;
*pbyRsvTime = 24;
} else {
@@ -197,7 +179,7 @@ s_vCalculateOFDMRParameter(
case RATE_24M:
default:
- if (ePHYType == PHY_TYPE_11A) {//5GHZ
+ if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x99;
*pbyRsvTime = 28;
} else {
@@ -208,167 +190,9 @@ s_vCalculateOFDMRParameter(
}
}
-/*
- * Description: Set RSPINF
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: None.
- *
- */
-static
-void
-s_vSetRSPINF(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
- void *pvSupportRateIEs, void *pvExtSupportRateIEs)
-{
- union vnt_phy_field_swap phy;
- unsigned char byTxRate = 0, byRsvTime = 0; // For OFDM
-
- //Set to Page1
- MACvSelectPage1(pDevice->PortOffset);
-
- /* RSPINF_b_1 */
- vnt_get_phy_field(pDevice,
- 14,
- VNTWIFIbyGetACKTxRate(RATE_1M, pvSupportRateIEs, pvExtSupportRateIEs),
- PK_TYPE_11B,
- &phy.field_read);
-
- /* swap over to get correct write order */
- swap(phy.swap[0], phy.swap[1]);
-
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_1, phy.field_write);
-
- /* RSPINF_b_2 */
- vnt_get_phy_field(pDevice, 14,
- VNTWIFIbyGetACKTxRate(RATE_2M, pvSupportRateIEs, pvExtSupportRateIEs),
- PK_TYPE_11B, &phy.field_read);
-
- swap(phy.swap[0], phy.swap[1]);
-
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_2, phy.field_write);
-
- /* RSPINF_b_5 */
- vnt_get_phy_field(pDevice, 14,
- VNTWIFIbyGetACKTxRate(RATE_5M, pvSupportRateIEs, pvExtSupportRateIEs),
- PK_TYPE_11B, &phy.field_read);
-
- swap(phy.swap[0], phy.swap[1]);
-
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_5, phy.field_write);
-
- /* RSPINF_b_11 */
- vnt_get_phy_field(pDevice, 14,
- VNTWIFIbyGetACKTxRate(RATE_11M, pvSupportRateIEs, pvExtSupportRateIEs),
- PK_TYPE_11B, &phy.field_read);
-
- swap(phy.swap[0], phy.swap[1]);
-
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_11, phy.field_write);
-
- //RSPINF_a_6
- s_vCalculateOFDMRParameter(RATE_6M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_9
- s_vCalculateOFDMRParameter(RATE_9M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_12
- s_vCalculateOFDMRParameter(RATE_12M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_18
- s_vCalculateOFDMRParameter(RATE_18M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_24
- s_vCalculateOFDMRParameter(RATE_24M,
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_36
- s_vCalculateOFDMRParameter(
- VNTWIFIbyGetACKTxRate(RATE_36M, pvSupportRateIEs, pvExtSupportRateIEs),
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_48
- s_vCalculateOFDMRParameter(
- VNTWIFIbyGetACKTxRate(RATE_48M, pvSupportRateIEs, pvExtSupportRateIEs),
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_54
- s_vCalculateOFDMRParameter(
- VNTWIFIbyGetACKTxRate(RATE_54M, pvSupportRateIEs, pvExtSupportRateIEs),
- ePHYType,
- &byTxRate,
- &byRsvTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_72
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime));
- //Set to Page0
- MACvSelectPage0(pDevice->PortOffset);
-}
-
/*--------------------- Export Functions --------------------------*/
/*
- * Description: Get Card short preamble option value
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: true if short preamble; otherwise false
- *
- */
-bool CARDbIsShortPreamble(struct vnt_private *pDevice)
-{
-
- if (pDevice->byPreambleType == 0)
- return false;
-
- return true;
-}
-
-/*
- * Description: Get Card short slot time option value
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: true if short slot time; otherwise false
- *
- */
-bool CARDbIsShorSlotTime(struct vnt_private *pDevice)
-{
-
- return pDevice->bShortSlotTime;
-}
-
-/*
* Description: Update IFS
*
* Parameters:
@@ -378,138 +202,118 @@ bool CARDbIsShorSlotTime(struct vnt_private *pDevice)
* none
*
* Return Value: None.
- *
*/
-bool CARDbSetPhyParameter(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
- unsigned short wCapInfo, unsigned char byERPField,
- void *pvSupportRateIEs, void *pvExtSupportRateIEs)
+bool CARDbSetPhyParameter(struct vnt_private *pDevice, u8 bb_type)
{
unsigned char byCWMaxMin = 0;
unsigned char bySlot = 0;
unsigned char bySIFS = 0;
unsigned char byDIFS = 0;
unsigned char byData;
- PWLAN_IE_SUPP_RATES pSupportRates = (PWLAN_IE_SUPP_RATES) pvSupportRateIEs;
- PWLAN_IE_SUPP_RATES pExtSupportRates = (PWLAN_IE_SUPP_RATES) pvExtSupportRateIEs;
-
- //Set SIFS, DIFS, EIFS, SlotTime, CwMin
- if (ePHYType == PHY_TYPE_11A) {
- if (pSupportRates == NULL)
- pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesA;
+ int i;
+ /* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
+ if (bb_type == BB_TYPE_11A) {
if (pDevice->byRFType == RF_AIROHA7230) {
- // AL7230 use single PAPE and connect to PAPE_2.4G
+ /* AL7230 use single PAPE and connect to PAPE_2.4G */
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11G);
pDevice->abyBBVGA[0] = 0x20;
pDevice->abyBBVGA[2] = 0x10;
pDevice->abyBBVGA[3] = 0x10;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x1C)
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
} else if (pDevice->byRFType == RF_UW2452) {
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11A);
pDevice->abyBBVGA[0] = 0x18;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x14) {
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
- BBbWriteEmbedded(pDevice->PortOffset, 0xE1, 0x57);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE1, 0x57);
}
} else {
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11A);
}
- BBbWriteEmbedded(pDevice->PortOffset, 0x88, 0x03);
+ BBbWriteEmbedded(pDevice, 0x88, 0x03);
bySlot = C_SLOT_SHORT;
bySIFS = C_SIFS_A;
byDIFS = C_SIFS_A + 2*C_SLOT_SHORT;
byCWMaxMin = 0xA4;
- } else if (ePHYType == PHY_TYPE_11B) {
- if (pSupportRates == NULL)
- pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesB;
-
+ } else if (bb_type == BB_TYPE_11B) {
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11B);
if (pDevice->byRFType == RF_AIROHA7230) {
pDevice->abyBBVGA[0] = 0x1C;
pDevice->abyBBVGA[2] = 0x00;
pDevice->abyBBVGA[3] = 0x00;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x20)
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
} else if (pDevice->byRFType == RF_UW2452) {
pDevice->abyBBVGA[0] = 0x14;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x18) {
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
- BBbWriteEmbedded(pDevice->PortOffset, 0xE1, 0xD3);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE1, 0xD3);
}
}
- BBbWriteEmbedded(pDevice->PortOffset, 0x88, 0x02);
+ BBbWriteEmbedded(pDevice, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
byCWMaxMin = 0xA5;
- } else {// PK_TYPE_11GA & PK_TYPE_11GB
- if (pSupportRates == NULL) {
- pSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultSuppRatesG;
- pExtSupportRates = (PWLAN_IE_SUPP_RATES) abyDefaultExtSuppRatesG;
- }
+ } else { /* PK_TYPE_11GA & PK_TYPE_11GB */
MACvSetBBType(pDevice->PortOffset, BB_TYPE_11G);
if (pDevice->byRFType == RF_AIROHA7230) {
pDevice->abyBBVGA[0] = 0x1C;
pDevice->abyBBVGA[2] = 0x00;
pDevice->abyBBVGA[3] = 0x00;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x20)
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
} else if (pDevice->byRFType == RF_UW2452) {
pDevice->abyBBVGA[0] = 0x14;
- BBbReadEmbedded(pDevice->PortOffset, 0xE7, &byData);
+ BBbReadEmbedded(pDevice, 0xE7, &byData);
if (byData == 0x18) {
- BBbWriteEmbedded(pDevice->PortOffset, 0xE7, pDevice->abyBBVGA[0]);
- BBbWriteEmbedded(pDevice->PortOffset, 0xE1, 0xD3);
+ BBbWriteEmbedded(pDevice, 0xE7, pDevice->abyBBVGA[0]);
+ BBbWriteEmbedded(pDevice, 0xE1, 0xD3);
}
}
- BBbWriteEmbedded(pDevice->PortOffset, 0x88, 0x08);
+ BBbWriteEmbedded(pDevice, 0x88, 0x08);
bySIFS = C_SIFS_BG;
- if (VNTWIFIbIsShortSlotTime(wCapInfo)) {
+
+ if (pDevice->bShortSlotTime) {
bySlot = C_SLOT_SHORT;
byDIFS = C_SIFS_BG + 2*C_SLOT_SHORT;
} else {
bySlot = C_SLOT_LONG;
byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
}
- if (VNTWIFIbyGetMaxSupportRate(pSupportRates, pExtSupportRates) > RATE_11M)
- byCWMaxMin = 0xA4;
- else
- byCWMaxMin = 0xA5;
-
- if (pDevice->bProtectMode != VNTWIFIbIsProtectMode(byERPField)) {
- pDevice->bProtectMode = VNTWIFIbIsProtectMode(byERPField);
- if (pDevice->bProtectMode)
- MACvEnableProtectMD(pDevice->PortOffset);
- else
- MACvDisableProtectMD(pDevice->PortOffset);
- }
- if (pDevice->bBarkerPreambleMd != VNTWIFIbIsBarkerMode(byERPField)) {
- pDevice->bBarkerPreambleMd = VNTWIFIbIsBarkerMode(byERPField);
- if (pDevice->bBarkerPreambleMd)
- MACvEnableBarkerPreambleMd(pDevice->PortOffset);
- else
- MACvDisableBarkerPreambleMd(pDevice->PortOffset);
+ byCWMaxMin = 0xa4;
+
+ for (i = RATE_54M; i >= RATE_6M; i--) {
+ if (pDevice->basic_rates & ((u32)(0x1 << i))) {
+ byCWMaxMin |= 0x1;
+ break;
+ }
}
}
if (pDevice->byRFType == RF_RFMD2959) {
- // bcs TX_PE will reserve 3 us
- // hardware's processing time here is 2 us.
+ /*
+ * bcs TX_PE will reserve 3 us hardware's processing
+ * time here is 2 us.
+ */
bySIFS -= 3;
byDIFS -= 3;
- //{{ RobertYu: 20041202
- //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput
- //// MAC will need 2 us to process, so the SIFS, DIFS can be shorter by 2 us.
+ /*
+ * TX_PE will reserve 3 us for MAX2829 A mode only, it is for
+ * better TX throughput; MAC will need 2 us to process, so the
+ * SIFS, DIFS can be shorter by 2 us.
+ */
}
if (pDevice->bySIFS != bySIFS) {
@@ -527,10 +331,6 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
if (pDevice->bySlot != bySlot) {
pDevice->bySlot = bySlot;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, pDevice->bySlot);
- if (pDevice->bySlot == C_SLOT_SHORT)
- pDevice->bShortSlotTime = true;
- else
- pDevice->bShortSlotTime = false;
BBvSetShortSlotTime(pDevice);
}
@@ -538,14 +338,11 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
pDevice->byCWMaxMin = byCWMaxMin;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, pDevice->byCWMaxMin);
}
- if (VNTWIFIbIsShortPreamble(wCapInfo))
- pDevice->byPreambleType = pDevice->byShortPreamble;
- else
- pDevice->byPreambleType = 0;
- s_vSetRSPINF(pDevice, ePHYType, pSupportRates, pExtSupportRates);
- pDevice->eCurrentPHYType = ePHYType;
- // set for NDIS OID_802_11SUPPORTED_RATES
+ pDevice->byPacketType = CARDbyGetPktType(pDevice);
+
+ CARDvSetRSPINF(pDevice, bb_type);
+
return true;
}
@@ -563,7 +360,6 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType,
* none
*
* Return Value: none
- *
*/
bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate,
u64 qwBSSTimestamp, u64 qwLocalTSF)
@@ -572,8 +368,7 @@ bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate,
if (qwBSSTimestamp != qwLocalTSF) {
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF);
- // adjust TSF
- // HW's TSF add TSF Offset reg
+ /* adjust TSF, HW's TSF add TSF Offset reg */
VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32));
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
@@ -593,21 +388,20 @@ bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate,
* none
*
* Return Value: true if succeed; otherwise false
- *
*/
bool CARDbSetBeaconPeriod(struct vnt_private *pDevice,
unsigned short wBeaconInterval)
{
u64 qwNextTBTT = 0;
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwNextTBTT); //Get Local TSF counter
+ CARDbGetCurrentTSF(pDevice, &qwNextTBTT); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
- // set HW beacon interval
+ /* set HW beacon interval */
VNSvOutPortW(pDevice->PortOffset + MAC_REG_BI, wBeaconInterval);
pDevice->wBeaconInterval = wBeaconInterval;
- // Set NextTBTT
+ /* Set NextTBTT */
VNSvOutPortD(pDevice->PortOffset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
@@ -616,225 +410,6 @@ bool CARDbSetBeaconPeriod(struct vnt_private *pDevice,
}
/*
- * Description: Card Stop Hardware Tx
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * ePktType - Packet type to stop
- * Out:
- * none
- *
- * Return Value: true if all data packet complete; otherwise false.
- *
- */
-bool CARDbStopTxPacket(struct vnt_private *pDevice, CARD_PKT_TYPE ePktType)
-{
-
- if (ePktType == PKT_TYPE_802_11_ALL) {
- pDevice->bStopBeacon = true;
- pDevice->bStopTx0Pkt = true;
- pDevice->bStopDataPkt = true;
- } else if (ePktType == PKT_TYPE_802_11_BCN) {
- pDevice->bStopBeacon = true;
- } else if (ePktType == PKT_TYPE_802_11_MNG) {
- pDevice->bStopTx0Pkt = true;
- } else if (ePktType == PKT_TYPE_802_11_DATA) {
- pDevice->bStopDataPkt = true;
- }
-
- if (pDevice->bStopBeacon == true) {
- if (pDevice->bIsBeaconBufReadySet == true) {
- if (pDevice->cbBeaconBufReadySetCnt < WAIT_BEACON_TX_DOWN_TMO) {
- pDevice->cbBeaconBufReadySetCnt++;
- return false;
- }
- }
- pDevice->bIsBeaconBufReadySet = false;
- pDevice->cbBeaconBufReadySetCnt = 0;
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
- // wait all TD0 complete
- if (pDevice->bStopTx0Pkt == true) {
- if (pDevice->iTDUsed[TYPE_TXDMA0] != 0)
- return false;
- }
- // wait all Data TD complete
- if (pDevice->bStopDataPkt == true) {
- if (pDevice->iTDUsed[TYPE_AC0DMA] != 0)
- return false;
- }
-
- return true;
-}
-
-/*
- * Description: Card Start Hardware Tx
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * ePktType - Packet type to start
- * Out:
- * none
- *
- * Return Value: true if success; false if failed.
- *
- */
-bool CARDbStartTxPacket(struct vnt_private *pDevice, CARD_PKT_TYPE ePktType)
-{
-
- if (ePktType == PKT_TYPE_802_11_ALL) {
- pDevice->bStopBeacon = false;
- pDevice->bStopTx0Pkt = false;
- pDevice->bStopDataPkt = false;
- } else if (ePktType == PKT_TYPE_802_11_BCN) {
- pDevice->bStopBeacon = false;
- } else if (ePktType == PKT_TYPE_802_11_MNG) {
- pDevice->bStopTx0Pkt = false;
- } else if (ePktType == PKT_TYPE_802_11_DATA) {
- pDevice->bStopDataPkt = false;
- }
-
- if ((pDevice->bStopBeacon == false) &&
- (pDevice->bBeaconBufReady == true) &&
- (pDevice->op_mode == NL80211_IFTYPE_ADHOC)) {
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
-
- return true;
-}
-
-/*
- * Description: Card Set BSSID value
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * pbyBSSID - pointer to BSSID field
- * bAdhoc - flag to indicate IBSS
- * Out:
- * none
- *
- * Return Value: true if success; false if failed.
- *
- */
-bool CARDbSetBSSID(struct vnt_private *pDevice,
- unsigned char *pbyBSSID, enum nl80211_iftype op_mode)
-{
-
- MACvWriteBSSIDAddress(pDevice->PortOffset, pbyBSSID);
- memcpy(pDevice->abyBSSID, pbyBSSID, WLAN_BSSID_LEN);
- if (op_mode == NL80211_IFTYPE_ADHOC)
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
- else
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
-
- if (op_mode == NL80211_IFTYPE_AP)
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
- else
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
-
- if (op_mode == NL80211_IFTYPE_UNSPECIFIED) {
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID);
- pDevice->bBSSIDFilter = false;
- pDevice->byRxMode &= ~RCR_BSSID;
- pr_debug("wcmd: rx_mode = %x\n", pDevice->byRxMode);
- } else {
- if (is_zero_ether_addr(pDevice->abyBSSID) == false) {
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_BSSID);
- pDevice->bBSSIDFilter = true;
- pDevice->byRxMode |= RCR_BSSID;
- }
- pr_debug("wmgr: rx_mode = %x\n", pDevice->byRxMode);
- }
- // Adopt BSS state in Adapter Device Object
- pDevice->op_mode = op_mode;
- return true;
-}
-
-/*
- * Description: Card indicate status
- *
- * Parameters:
- * In:
- * pDeviceHandler - The adapter to be set
- * eStatus - Status
- * Out:
- * none
- *
- * Return Value: true if success; false if failed.
- *
- */
-
-/*
- * Description: Save Assoc info. contain in assoc. response frame
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * wCapabilityInfo - Capability information
- * wStatus - Status code
- * wAID - Assoc. ID
- * uLen - Length of IEs
- * pbyIEs - pointer to IEs
- * Out:
- * none
- *
- * Return Value: true if succeed; otherwise false
- *
- */
-bool CARDbSetTxDataRate(
- struct vnt_private *pDevice,
- unsigned short wDataRate
-)
-{
-
- pDevice->wCurrentRate = wDataRate;
- return true;
-}
-
-/*+
- *
- * Routine Description:
- * Consider to power down when no more packets to tx or rx.
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: true if power down success; otherwise false
- *
- -*/
-bool
-CARDbPowerDown(
- struct vnt_private *pDevice
-)
-{
- unsigned int uIdx;
-
- // check if already in Doze mode
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
- return true;
-
- // Froce PSEN on
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
-
- // check if all TD are empty,
-
- for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx++) {
- if (pDevice->iTDUsed[uIdx] != 0)
- return false;
- }
-
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE);
- pr_debug("Go to Doze ZZZZZZZZZZZZZZZ\n");
- return true;
-}
-
-/*
* Description: Turn off Radio power
*
* Parameters:
@@ -844,7 +419,6 @@ CARDbPowerDown(
* none
*
* Return Value: true if success; otherwise false
- *
*/
bool CARDbRadioPowerOff(struct vnt_private *pDevice)
{
@@ -861,7 +435,7 @@ bool CARDbRadioPowerOff(struct vnt_private *pDevice)
case RF_AIROHA:
case RF_AL2230S:
- case RF_AIROHA7230: //RobertYu:20050104
+ case RF_AIROHA7230:
MACvWordRegBitsOff(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2);
MACvWordRegBitsOff(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
break;
@@ -870,12 +444,11 @@ bool CARDbRadioPowerOff(struct vnt_private *pDevice)
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
- BBvSetDeepSleep(pDevice->PortOffset, pDevice->byLocalID);
+ BBvSetDeepSleep(pDevice, pDevice->byLocalID);
pDevice->bRadioOff = true;
- //2007-0409-03,<Add> by chester
pr_debug("chester power off\n");
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
+ MACvRegBitsOn(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
return bResult;
}
@@ -889,7 +462,6 @@ bool CARDbRadioPowerOff(struct vnt_private *pDevice)
* none
*
* Return Value: true if success; otherwise false
- *
*/
bool CARDbRadioPowerOn(struct vnt_private *pDevice)
{
@@ -907,7 +479,7 @@ bool CARDbRadioPowerOn(struct vnt_private *pDevice)
pr_debug("chester pbRadioOff\n");
return true; }
- BBvExitDeepSleep(pDevice->PortOffset, pDevice->byLocalID);
+ BBvExitDeepSleep(pDevice, pDevice->byLocalID);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
@@ -919,7 +491,7 @@ bool CARDbRadioPowerOn(struct vnt_private *pDevice)
case RF_AIROHA:
case RF_AL2230S:
- case RF_AIROHA7230: //RobertYu:20050104
+ case RF_AIROHA7230:
MACvWordRegBitsOn(pDevice->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPE3));
break;
@@ -927,493 +499,11 @@ bool CARDbRadioPowerOn(struct vnt_private *pDevice)
}
pDevice->bRadioOff = false;
-// 2007-0409-03,<Add> by chester
pr_debug("chester power on\n");
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
+ MACvRegBitsOff(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
return bResult;
}
-bool CARDbRemoveKey(struct vnt_private *pDevice, unsigned char *pbyBSSID)
-{
-
- KeybRemoveAllKey(&(pDevice->sKey), pbyBSSID, pDevice->PortOffset);
- return true;
-}
-
-/*
- *
- * Description:
- * Add BSSID in PMKID Candidate list.
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * pbyBSSID - BSSID address for adding
- * wRSNCap - BSS's RSN capability
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbAdd_PMKID_Candidate(
- struct vnt_private *pDevice,
- unsigned char *pbyBSSID,
- bool bRSNCapExist,
- unsigned short wRSNCap
-)
-{
- struct pmkid_candidate *pCandidateList;
- unsigned int ii = 0;
-
- pr_debug("bAdd_PMKID_Candidate START: (%d)\n",
- (int)pDevice->gsPMKIDCandidate.NumCandidates);
-
- if (pDevice->gsPMKIDCandidate.NumCandidates >= MAX_PMKIDLIST) {
- pr_debug("vFlush_PMKID_Candidate: 3\n");
- memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent));
- }
-
- for (ii = 0; ii < 6; ii++)
- pr_debug("%02X ", *(pbyBSSID + ii));
-
- pr_debug("\n");
-
- // Update Old Candidate
- for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
- if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if (bRSNCapExist && (wRSNCap & BIT0))
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- else
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
-
- return true;
- }
- }
-
- // New Candidate
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if (bRSNCapExist && (wRSNCap & BIT0))
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- else
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
-
- memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
- pDevice->gsPMKIDCandidate.NumCandidates++;
- pr_debug("NumCandidates:%d\n",
- (int)pDevice->gsPMKIDCandidate.NumCandidates);
- return true;
-}
-
-void *
-CARDpGetCurrentAddress(
- struct vnt_private *pDevice
-)
-{
-
- return pDevice->abyCurrentNetAddr;
-}
-
-/*
- *
- * Description:
- * Start Spectrum Measure defined in 802.11h
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbStartMeasure(
- struct vnt_private *pDevice,
- void *pvMeasureEIDs,
- unsigned int uNumOfMeasureEIDs
-)
-{
- PWLAN_IE_MEASURE_REQ pEID = (PWLAN_IE_MEASURE_REQ) pvMeasureEIDs;
- u64 qwCurrTSF;
- u64 qwStartTSF;
- bool bExpired = true;
- unsigned short wDuration = 0;
-
- if ((pEID == NULL) ||
- (uNumOfMeasureEIDs == 0)) {
- return true;
- }
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- if (pDevice->bMeasureInProgress == true) {
- pDevice->bMeasureInProgress = false;
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR);
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR4, pDevice->dwOrgMAR4);
- // clear measure control
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
- MACvSelectPage0(pDevice->PortOffset);
- set_channel(pDevice, pDevice->byOrgChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- }
- pDevice->uNumOfMeasureEIDs = uNumOfMeasureEIDs;
-
- do {
- pDevice->pCurrMeasureEID = pEID;
- pEID++;
- pDevice->uNumOfMeasureEIDs--;
-
- if (pDevice->byLocalID > REV_ID_VT3253_B1) {
- qwStartTSF = *((u64 *)(pDevice->pCurrMeasureEID->sReq.abyStartTime));
- wDuration = *((unsigned short *)(pDevice->pCurrMeasureEID->sReq.abyDuration));
- wDuration += 1; // 1 TU for channel switching
-
- if (qwStartTSF == 0) {
- // start immediately by setting start TSF == current TSF + 2 TU
- qwStartTSF = qwCurrTSF + 2048;
-
- bExpired = false;
- break;
- } else {
- // start at setting start TSF - 1TU(for channel switching)
- qwStartTSF -= 1024;
- }
-
- if (qwCurrTSF < qwStartTSF) {
- bExpired = false;
- break;
- }
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- false,
- pDevice->pCurrMeasureEID,
- MEASURE_MODE_LATE,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- } else {
- // hardware do not support measure
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- false,
- pDevice->pCurrMeasureEID,
- MEASURE_MODE_INCAPABLE,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- }
- } while (pDevice->uNumOfMeasureEIDs != 0);
-
- if (!bExpired) {
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART, (u32)qwStartTSF);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MSRSTART + 4, (u32)(qwStartTSF >> 32));
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_MSRDURATION, wDuration);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
- MACvSelectPage0(pDevice->PortOffset);
- } else {
- // all measure start time expired we should complete action
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- true,
- NULL,
- 0,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- }
- return true;
-}
-
-/*
- *
- * Description:
- * Do Channel Switch defined in 802.11h
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbChannelSwitch(
- struct vnt_private *pDevice,
- unsigned char byMode,
- unsigned char byNewChannel,
- unsigned char byCount
-)
-{
- bool bResult = true;
-
- if (byCount == 0) {
- bResult = set_channel(pDevice, byNewChannel);
- VNTWIFIbChannelSwitch(pDevice->pMgmt, byNewChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- return bResult;
- }
- pDevice->byChannelSwitchCount = byCount;
- pDevice->byNewChannel = byNewChannel;
- pDevice->bChannelSwitch = true;
- if (byMode == 1)
- bResult = CARDbStopTxPacket(pDevice, PKT_TYPE_802_11_ALL);
-
- return bResult;
-}
-
-/*
- *
- * Description:
- * Handle Quiet EID defined in 802.11h
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbSetQuiet(
- struct vnt_private *pDevice,
- bool bResetQuiet,
- unsigned char byQuietCount,
- unsigned char byQuietPeriod,
- unsigned short wQuietDuration,
- unsigned short wQuietOffset
-)
-{
- unsigned int ii = 0;
-
- if (bResetQuiet) {
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
- for (ii = 0; ii < MAX_QUIET_COUNT; ii++)
- pDevice->sQuiet[ii].bEnable = false;
-
- pDevice->uQuietEnqueue = 0;
- pDevice->bEnableFirstQuiet = false;
- pDevice->bQuietEnable = false;
- pDevice->byQuietStartCount = byQuietCount;
- }
- if (pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable == false) {
- pDevice->sQuiet[pDevice->uQuietEnqueue].bEnable = true;
- pDevice->sQuiet[pDevice->uQuietEnqueue].byPeriod = byQuietPeriod;
- pDevice->sQuiet[pDevice->uQuietEnqueue].wDuration = wQuietDuration;
- pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime = (unsigned long) byQuietCount;
- pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime *= pDevice->wBeaconInterval;
- pDevice->sQuiet[pDevice->uQuietEnqueue].dwStartTime += wQuietOffset;
- pDevice->uQuietEnqueue++;
- pDevice->uQuietEnqueue %= MAX_QUIET_COUNT;
- if (pDevice->byQuietStartCount < byQuietCount)
- pDevice->byQuietStartCount = byQuietCount;
- }
- return true;
-}
-
-/*
- *
- * Description:
- * Do Quiet, It will be called by either ISR(after start)
- * or VNTWIFI(before start) so we do not need a SPINLOCK
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-CARDbStartQuiet(
- struct vnt_private *pDevice
-)
-{
- unsigned int ii = 0;
- unsigned long dwStartTime = 0xFFFFFFFF;
- unsigned int uCurrentQuietIndex = 0;
- unsigned long dwNextTime = 0;
- unsigned long dwGap = 0;
- unsigned long dwDuration = 0;
-
- for (ii = 0; ii < MAX_QUIET_COUNT; ii++) {
- if ((pDevice->sQuiet[ii].bEnable == true) &&
- (dwStartTime > pDevice->sQuiet[ii].dwStartTime)) {
- dwStartTime = pDevice->sQuiet[ii].dwStartTime;
- uCurrentQuietIndex = ii;
- }
- }
- if (dwStartTime == 0xFFFFFFFF) {
- // no more quiet
- pDevice->bQuietEnable = false;
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
- } else {
- if (pDevice->bQuietEnable == false) {
- // first quiet
- pDevice->byQuietStartCount--;
- dwNextTime = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
- dwNextTime %= pDevice->wBeaconInterval;
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETINIT, (unsigned short) dwNextTime);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (unsigned short) pDevice->sQuiet[uCurrentQuietIndex].wDuration);
- if (pDevice->byQuietStartCount == 0) {
- pDevice->bEnableFirstQuiet = false;
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
- } else {
- pDevice->bEnableFirstQuiet = true;
- }
- MACvSelectPage0(pDevice->PortOffset);
- } else {
- if (pDevice->dwCurrentQuietEndTime > pDevice->sQuiet[uCurrentQuietIndex].dwStartTime) {
- // overlap with previous Quiet
- dwGap = pDevice->dwCurrentQuietEndTime - pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
- if (dwGap >= pDevice->sQuiet[uCurrentQuietIndex].wDuration) {
- // return false to indicate next quiet expired, should call this function again
- return false;
- }
- dwDuration = pDevice->sQuiet[uCurrentQuietIndex].wDuration - dwGap;
- dwGap = 0;
- } else {
- dwGap = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime - pDevice->dwCurrentQuietEndTime;
- dwDuration = pDevice->sQuiet[uCurrentQuietIndex].wDuration;
- }
- // set GAP and Next duration
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETGAP, (unsigned short) dwGap);
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_QUIETDUR, (unsigned short) dwDuration);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_QUIETRPT);
- MACvSelectPage0(pDevice->PortOffset);
- }
- pDevice->bQuietEnable = true;
- pDevice->dwCurrentQuietEndTime = pDevice->sQuiet[uCurrentQuietIndex].dwStartTime;
- pDevice->dwCurrentQuietEndTime += pDevice->sQuiet[uCurrentQuietIndex].wDuration;
- if (pDevice->sQuiet[uCurrentQuietIndex].byPeriod == 0) {
- // not period disable current quiet element
- pDevice->sQuiet[uCurrentQuietIndex].bEnable = false;
- } else {
- // set next period start time
- dwNextTime = (unsigned long) pDevice->sQuiet[uCurrentQuietIndex].byPeriod;
- dwNextTime *= pDevice->wBeaconInterval;
- pDevice->sQuiet[uCurrentQuietIndex].dwStartTime = dwNextTime;
- }
- if (pDevice->dwCurrentQuietEndTime > 0x80010000) {
- // decreament all time to avoid wrap around
- for (ii = 0; ii < MAX_QUIET_COUNT; ii++) {
- if (pDevice->sQuiet[ii].bEnable == true)
- pDevice->sQuiet[ii].dwStartTime -= 0x80000000;
-
- }
- pDevice->dwCurrentQuietEndTime -= 0x80000000;
- }
- }
- return true;
-}
-
-/*
- *
- * Description:
- * Set Local Power Constraint
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-CARDvSetPowerConstraint(
- struct vnt_private *pDevice,
- unsigned char byChannel,
- char byPower
-)
-{
-
- if (byChannel > CB_MAX_CHANNEL_24G) {
- if (pDevice->bCountryInfo5G == true)
- pDevice->abyLocalPwr[byChannel] = pDevice->abyRegPwr[byChannel] - byPower;
-
- } else {
- if (pDevice->bCountryInfo24G == true)
- pDevice->abyLocalPwr[byChannel] = pDevice->abyRegPwr[byChannel] - byPower;
-
- }
-}
-
-/*
- *
- * Description:
- * Set Local Power Constraint
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-CARDvGetPowerCapability(
- struct vnt_private *pDevice,
- unsigned char *pbyMinPower,
- unsigned char *pbyMaxPower
-)
-{
- unsigned char byDec = 0;
-
- *pbyMaxPower = pDevice->abyOFDMDefaultPwr[pDevice->byCurrentCh];
- byDec = pDevice->abyOFDMPwrTbl[pDevice->byCurrentCh];
- if (pDevice->byRFType == RF_UW2452) {
- byDec *= 3;
- byDec >>= 1;
- } else {
- byDec <<= 1;
- }
- *pbyMinPower = pDevice->abyOFDMDefaultPwr[pDevice->byCurrentCh] - byDec;
-}
-
-/*
- *
- * Description:
- * Get Current Tx Power
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- */
-char
-CARDbyGetTransmitPower(
- struct vnt_private *pDevice
-)
-{
-
- return pDevice->byCurPwrdBm;
-}
-
-//xxx
void
CARDvSafeResetTx(
struct vnt_private *pDevice
@@ -1422,7 +512,7 @@ CARDvSafeResetTx(
unsigned int uu;
PSTxDesc pCurrTD;
- // initialize TD index
+ /* initialize TD index */
pDevice->apTailTD[0] = pDevice->apCurrTD[0] = &(pDevice->apTD0Rings[0]);
pDevice->apTailTD[1] = pDevice->apCurrTD[1] = &(pDevice->apTD1Rings[0]);
@@ -1432,28 +522,27 @@ CARDvSafeResetTx(
for (uu = 0; uu < pDevice->sOpts.nTxDescs[0]; uu++) {
pCurrTD = &(pDevice->apTD0Rings[uu]);
pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST;
- // init all Tx Packet pointer to NULL
+ /* init all Tx Packet pointer to NULL */
}
for (uu = 0; uu < pDevice->sOpts.nTxDescs[1]; uu++) {
pCurrTD = &(pDevice->apTD1Rings[uu]);
pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST;
- // init all Tx Packet pointer to NULL
+ /* init all Tx Packet pointer to NULL */
}
- // set MAC TD pointer
+ /* set MAC TD pointer */
MACvSetCurrTXDescAddr(TYPE_TXDMA0, pDevice->PortOffset,
(pDevice->td0_pool_dma));
MACvSetCurrTXDescAddr(TYPE_AC0DMA, pDevice->PortOffset,
(pDevice->td1_pool_dma));
- // set MAC Beacon TX pointer
+ /* set MAC Beacon TX pointer */
MACvSetCurrBCNTxDescAddr(pDevice->PortOffset,
(pDevice->tx_beacon_dma));
}
-/*+
- *
+/*
* Description:
* Reset Rx
*
@@ -1464,8 +553,7 @@ CARDvSafeResetTx(
* none
*
* Return Value: none
- *
- -*/
+ */
void
CARDvSafeResetRx(
struct vnt_private *pDevice
@@ -1474,11 +562,11 @@ CARDvSafeResetRx(
unsigned int uu;
PSRxDesc pDesc;
- // initialize RD index
+ /* initialize RD index */
pDevice->pCurrRD[0] = &(pDevice->aRD0Ring[0]);
pDevice->pCurrRD[1] = &(pDevice->aRD1Ring[0]);
- // init state, all RD is chip's
+ /* init state, all RD is chip's */
for (uu = 0; uu < pDevice->sOpts.nRxDescs0; uu++) {
pDesc = &(pDevice->aRD0Ring[uu]);
pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
@@ -1486,7 +574,7 @@ CARDvSafeResetRx(
pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
}
- // init state, all RD is chip's
+ /* init state, all RD is chip's */
for (uu = 0; uu < pDevice->sOpts.nRxDescs1; uu++) {
pDesc = &(pDevice->aRD1Ring[uu]);
pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
@@ -1494,13 +582,10 @@ CARDvSafeResetRx(
pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
}
- pDevice->cbDFCB = CB_MAX_RX_FRAG;
- pDevice->cbFreeDFCB = pDevice->cbDFCB;
-
- // set perPkt mode
+ /* set perPkt mode */
MACvRx0PerPktMode(pDevice->PortOffset);
MACvRx1PerPktMode(pDevice->PortOffset);
- // set MAC RD pointer
+ /* set MAC RD pointer */
MACvSetCurrRx0DescAddr(pDevice->PortOffset,
pDevice->rd0_pool_dma);
@@ -1519,7 +604,6 @@ CARDvSafeResetRx(
* none
*
* Return Value: response Control frame rate
- *
*/
static unsigned short CARDwGetCCKControlRate(struct vnt_private *pDevice,
unsigned short wRateIdx)
@@ -1527,7 +611,7 @@ static unsigned short CARDwGetCCKControlRate(struct vnt_private *pDevice,
unsigned int ui = (unsigned int) wRateIdx;
while (ui > RATE_1M) {
- if (pDevice->wBasicRate & ((unsigned short)1 << ui))
+ if (pDevice->basic_rates & ((u32)0x1 << ui))
return (unsigned short)ui;
ui--;
@@ -1546,14 +630,13 @@ static unsigned short CARDwGetCCKControlRate(struct vnt_private *pDevice,
* none
*
* Return Value: response Control frame rate
- *
*/
static unsigned short CARDwGetOFDMControlRate(struct vnt_private *pDevice,
unsigned short wRateIdx)
{
unsigned int ui = (unsigned int) wRateIdx;
- pr_debug("BASIC RATE: %X\n", pDevice->wBasicRate);
+ pr_debug("BASIC RATE: %X\n", pDevice->basic_rates);
if (!CARDbIsOFDMinBasicRate((void *)pDevice)) {
pr_debug("CARDwGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
@@ -1562,7 +645,7 @@ static unsigned short CARDwGetOFDMControlRate(struct vnt_private *pDevice,
return wRateIdx;
}
while (ui > RATE_11M) {
- if (pDevice->wBasicRate & ((unsigned short)1 << ui)) {
+ if (pDevice->basic_rates & ((u32)0x1 << ui)) {
pr_debug("CARDwGetOFDMControlRate : %d\n", ui);
return (unsigned short)ui;
}
@@ -1582,14 +665,13 @@ static unsigned short CARDwGetOFDMControlRate(struct vnt_private *pDevice,
* none
*
* Return Value: None.
- *
*/
-void CARDvSetRSPINF(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType)
+void CARDvSetRSPINF(struct vnt_private *pDevice, u8 bb_type)
{
union vnt_phy_field_swap phy;
- unsigned char byTxRate, byRsvTime; //For OFDM
+ unsigned char byTxRate, byRsvTime; /* For OFDM */
- //Set to Page1
+ /* Set to Page1 */
MACvSelectPage1(pDevice->PortOffset);
/* RSPINF_b_1 */
@@ -1629,136 +711,72 @@ void CARDvSetRSPINF(struct vnt_private *pDevice, CARD_PHY_TYPE ePHYType)
VNSvOutPortD(pDevice->PortOffset + MAC_REG_RSPINF_B_11, phy.field_write);
- //RSPINF_a_6
+ /* RSPINF_a_6 */
s_vCalculateOFDMRParameter(RATE_6M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_9
+ /* RSPINF_a_9 */
s_vCalculateOFDMRParameter(RATE_9M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_12
+ /* RSPINF_a_12 */
s_vCalculateOFDMRParameter(RATE_12M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_18
+ /* RSPINF_a_18 */
s_vCalculateOFDMRParameter(RATE_18M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_24
+ /* RSPINF_a_24 */
s_vCalculateOFDMRParameter(RATE_24M,
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_36
+ /* RSPINF_a_36 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_36M),
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_48
+ /* RSPINF_a_48 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_48M),
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate, byRsvTime));
- //RSPINF_a_54
+ /* RSPINF_a_54 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_54M),
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate, byRsvTime));
-
- //RSPINF_a_72
+ /* RSPINF_a_72 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_54M),
- ePHYType,
+ bb_type,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime));
- //Set to Page0
+ /* Set to Page0 */
MACvSelectPage0(pDevice->PortOffset);
}
-/*
- * Description: Update IFS
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: None.
- *
- */
-void vUpdateIFS(struct vnt_private *pDevice)
-{
- /* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
-
- unsigned char byMaxMin = 0;
-
- if (pDevice->byPacketType == PK_TYPE_11A) {//0000 0000 0000 0000,11a
- pDevice->uSlot = C_SLOT_SHORT;
- pDevice->uSIFS = C_SIFS_A;
- pDevice->uDIFS = C_SIFS_A + 2*C_SLOT_SHORT;
- pDevice->uCwMin = C_CWMIN_A;
- byMaxMin = 4;
- } else if (pDevice->byPacketType == PK_TYPE_11B) {//0000 0001 0000 0000,11b
- pDevice->uSlot = C_SLOT_LONG;
- pDevice->uSIFS = C_SIFS_BG;
- pDevice->uDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
- pDevice->uCwMin = C_CWMIN_B;
- byMaxMin = 5;
- } else { // PK_TYPE_11GA & PK_TYPE_11GB
- pDevice->uSIFS = C_SIFS_BG;
- if (pDevice->bShortSlotTime)
- pDevice->uSlot = C_SLOT_SHORT;
- else
- pDevice->uSlot = C_SLOT_LONG;
-
- pDevice->uDIFS = C_SIFS_BG + 2*pDevice->uSlot;
- if (pDevice->wBasicRate & 0x0150) { //0000 0001 0101 0000,24M,12M,6M
- pDevice->uCwMin = C_CWMIN_A;
- byMaxMin = 4;
- } else {
- pDevice->uCwMin = C_CWMIN_B;
- byMaxMin = 5;
- }
- }
-
- pDevice->uCwMax = C_CWMAX;
- pDevice->uEIFS = C_EIFS;
- if (pDevice->byRFType == RF_RFMD2959) {
- // bcs TX_PE will reserve 3 us
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (unsigned char)(pDevice->uSIFS - 3));
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (unsigned char)(pDevice->uDIFS - 3));
- } else {
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SIFS, (unsigned char)pDevice->uSIFS);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_DIFS, (unsigned char)pDevice->uDIFS);
- }
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_EIFS, (unsigned char)pDevice->uEIFS);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_SLOT, (unsigned char)pDevice->uSlot);
- byMaxMin |= 0xA0;//1010 1111,C_CWMAX = 1023
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, (unsigned char)byMaxMin);
-}
-
void CARDvUpdateBasicTopRate(struct vnt_private *pDevice)
{
unsigned char byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
unsigned char ii;
- //Determines the highest basic rate.
+ /* Determines the highest basic rate. */
for (ii = RATE_54M; ii >= RATE_6M; ii--) {
- if ((pDevice->wBasicRate) & ((unsigned short)(1<<ii))) {
+ if ((pDevice->basic_rates) & ((u32)(1 << ii))) {
byTopOFDM = ii;
break;
}
@@ -1766,7 +784,7 @@ void CARDvUpdateBasicTopRate(struct vnt_private *pDevice)
pDevice->byTopOFDMBasicRate = byTopOFDM;
for (ii = RATE_11M;; ii--) {
- if ((pDevice->wBasicRate) & ((unsigned short)(1<<ii))) {
+ if ((pDevice->basic_rates) & ((u32)(1 << ii))) {
byTopCCK = ii;
break;
}
@@ -1776,24 +794,12 @@ void CARDvUpdateBasicTopRate(struct vnt_private *pDevice)
pDevice->byTopCCKBasicRate = byTopCCK;
}
-bool CARDbAddBasicRate(struct vnt_private *pDevice, unsigned short wRateIdx)
-{
- unsigned short wRate = (unsigned short)(1<<wRateIdx);
-
- pDevice->wBasicRate |= wRate;
-
- //Determines the highest basic rate.
- CARDvUpdateBasicTopRate((void *)pDevice);
-
- return true;
-}
-
bool CARDbIsOFDMinBasicRate(struct vnt_private *pDevice)
{
int ii;
for (ii = RATE_54M; ii >= RATE_6M; ii--) {
- if ((pDevice->wBasicRate) & ((unsigned short)(1 << ii)))
+ if ((pDevice->basic_rates) & ((u32)(1 << ii)))
return true;
}
return false;
@@ -1821,10 +827,11 @@ unsigned char CARDbyGetPktType(struct vnt_private *pDevice)
* none
*
* Return Value: none
- *
*/
-void CARDvSetLoopbackMode(void __iomem *dwIoBase, unsigned short wLoopbackMode)
+void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode)
{
+ void __iomem *dwIoBase = priv->PortOffset;
+
switch (wLoopbackMode) {
case CARD_LB_NONE:
case CARD_LB_MAC:
@@ -1834,9 +841,9 @@ void CARDvSetLoopbackMode(void __iomem *dwIoBase, unsigned short wLoopbackMode)
ASSERT(false);
break;
}
- // set MAC loopback
+ /* set MAC loopback */
MACvSetLoopbackMode(dwIoBase, LOBYTE(wLoopbackMode));
- // set Baseband loopback
+ /* set Baseband loopback */
}
/*
@@ -1849,12 +856,11 @@ void CARDvSetLoopbackMode(void __iomem *dwIoBase, unsigned short wLoopbackMode)
* none
*
* Return Value: none
- *
*/
bool CARDbSoftwareReset(struct vnt_private *pDevice)
{
- // reset MAC
+ /* reset MAC */
if (!MACbSafeSoftwareReset(pDevice->PortOffset))
return false;
@@ -1874,7 +880,6 @@ bool CARDbSoftwareReset(struct vnt_private *pDevice)
* none
*
* Return Value: TSF Offset value
- *
*/
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
{
@@ -1901,10 +906,10 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
* qwCurrTSF - Current TSF counter
*
* Return Value: true if success; otherwise false
- *
*/
-bool CARDbGetCurrentTSF(void __iomem *dwIoBase, u64 *pqwCurrTSF)
+bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
{
+ void __iomem *dwIoBase = priv->PortOffset;
unsigned short ww;
unsigned char byData;
@@ -1934,17 +939,12 @@ bool CARDbGetCurrentTSF(void __iomem *dwIoBase, u64 *pqwCurrTSF)
* qwCurrTSF - Current TSF counter
*
* Return Value: TSF value of next Beacon
- *
*/
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
{
u32 beacon_int;
beacon_int = wBeaconInterval * 1024;
-
- /* Next TBTT =
- * ((local_current_TSF / beacon_interval) + 1) * beacon_interval
- */
if (beacon_int) {
do_div(qwTSF, beacon_int);
qwTSF += 1;
@@ -1966,16 +966,16 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
* none
*
* Return Value: none
- *
*/
-void CARDvSetFirstNextTBTT(void __iomem *dwIoBase, unsigned short wBeaconInterval)
+void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInterval)
{
+ void __iomem *dwIoBase = priv->PortOffset;
u64 qwNextTBTT = 0;
- CARDbGetCurrentTSF(dwIoBase, &qwNextTBTT); //Get Local TSF counter
+ CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
- // Set NextTBTT
+ /* Set NextTBTT */
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
@@ -1994,12 +994,13 @@ void CARDvSetFirstNextTBTT(void __iomem *dwIoBase, unsigned short wBeaconInterva
* none
*
* Return Value: none
- *
*/
-void CARDvUpdateNextTBTT(void __iomem *dwIoBase, u64 qwTSF, unsigned short wBeaconInterval)
+void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, unsigned short wBeaconInterval)
{
+ void __iomem *dwIoBase = priv->PortOffset;
+
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
- // Set NextTBTT
+ /* Set NextTBTT */
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwTSF);
VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 96f5b6c46e82..2dfc41952271 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -29,35 +29,28 @@
#ifndef __CARD_H__
#define __CARD_H__
-#include "ttype.h"
#include <linux/types.h>
#include <linux/nl80211.h>
-//
-// Loopback mode
-//
-// LOBYTE is MAC LB mode, HIBYTE is MII LB mode
+/*
+ * Loopback mode
+ *
+ * LOBYTE is MAC LB mode, HIBYTE is MII LB mode
+ */
#define CARD_LB_NONE MAKEWORD(MAC_LB_NONE, 0)
-#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0) // PHY must ISO, avoid MAC loopback packet go out
+#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0) /* PHY must ISO, avoid MAC loopback packet go out */
#define CARD_LB_PHY MAKEWORD(MAC_LB_EXT, 0)
-#define DEFAULT_MSDU_LIFETIME 512 // ms
-#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 // 64us
+#define DEFAULT_MSDU_LIFETIME 512 /* ms */
+#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 /* 64us */
-#define DEFAULT_MGN_LIFETIME 8 // ms
-#define DEFAULT_MGN_LIFETIME_RES_64us 125 // 64us
+#define DEFAULT_MGN_LIFETIME 8 /* ms */
+#define DEFAULT_MGN_LIFETIME_RES_64us 125 /* 64us */
#define CB_MAX_CHANNEL_24G 14
#define CB_MAX_CHANNEL_5G 42
#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G)
-typedef enum _CARD_PHY_TYPE {
- PHY_TYPE_AUTO,
- PHY_TYPE_11B,
- PHY_TYPE_11G,
- PHY_TYPE_11A
-} CARD_PHY_TYPE, *PCARD_PHY_TYPE;
-
typedef enum _CARD_PKT_TYPE {
PKT_TYPE_802_11_BCN,
PKT_TYPE_802_11_MNG,
@@ -73,103 +66,24 @@ typedef enum _CARD_STATUS_TYPE {
struct vnt_private;
-void CARDvSetRSPINF(struct vnt_private *, CARD_PHY_TYPE ePHYType);
-void vUpdateIFS(struct vnt_private *);
+void CARDvSetRSPINF(struct vnt_private *, u8);
void CARDvUpdateBasicTopRate(struct vnt_private *);
-bool CARDbAddBasicRate(struct vnt_private *, unsigned short wRateIdx);
bool CARDbIsOFDMinBasicRate(struct vnt_private *);
-void CARDvSetLoopbackMode(void __iomem *dwIoBase, unsigned short wLoopbackMode);
+void CARDvSetLoopbackMode(struct vnt_private *, unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *);
-void CARDvSetFirstNextTBTT(void __iomem *dwIoBase, unsigned short wBeaconInterval);
-void CARDvUpdateNextTBTT(void __iomem *dwIoBase, u64 qwTSF, unsigned short wBeaconInterval);
-bool CARDbGetCurrentTSF(void __iomem *dwIoBase, u64 *pqwCurrTSF);
+void CARDvSetFirstNextTBTT(struct vnt_private *, unsigned short wBeaconInterval);
+void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF, unsigned short wBeaconInterval);
+bool CARDbGetCurrentTSF(struct vnt_private *, u64 *pqwCurrTSF);
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
-bool CARDbSetTxPower(struct vnt_private *, unsigned long ulTxPower);
unsigned char CARDbyGetPktType(struct vnt_private *);
void CARDvSafeResetTx(struct vnt_private *);
void CARDvSafeResetRx(struct vnt_private *);
bool CARDbRadioPowerOff(struct vnt_private *);
bool CARDbRadioPowerOn(struct vnt_private *);
-bool CARDbIsShortPreamble(struct vnt_private *);
-bool CARDbIsShorSlotTime(struct vnt_private *);
-bool CARDbSetPhyParameter(struct vnt_private *, CARD_PHY_TYPE ePHYType,
- unsigned short wCapInfo, unsigned char byERPField,
- void *pvSupportRateIEs, void *pvExtSupportRateIEs);
+bool CARDbSetPhyParameter(struct vnt_private *, u8);
bool CARDbUpdateTSF(struct vnt_private *, unsigned char byRxRate,
u64 qwBSSTimestamp, u64 qwLocalTSF);
-bool CARDbStopTxPacket(struct vnt_private *, CARD_PKT_TYPE ePktType);
-bool CARDbStartTxPacket(struct vnt_private *, CARD_PKT_TYPE ePktType);
bool CARDbSetBeaconPeriod(struct vnt_private *, unsigned short wBeaconInterval);
-bool CARDbSetBSSID(struct vnt_private *,
- unsigned char *pbyBSSID, enum nl80211_iftype);
-
-bool CARDbPowerDown(struct vnt_private *);
-
-bool CARDbSetTxDataRate(struct vnt_private *, unsigned short wDataRate);
-
-bool CARDbRemoveKey(struct vnt_private *, unsigned char *pbyBSSID);
-
-bool
-CARDbAdd_PMKID_Candidate(
- struct vnt_private *,
- unsigned char *pbyBSSID,
- bool bRSNCapExist,
- unsigned short wRSNCap
-);
-
-void *
-CARDpGetCurrentAddress(
- struct vnt_private *
-);
-
-bool
-CARDbStartMeasure(
- struct vnt_private *,
- void *pvMeasureEIDs,
- unsigned int uNumOfMeasureEIDs
-);
-
-bool
-CARDbChannelSwitch(
- struct vnt_private *,
- unsigned char byMode,
- unsigned char byNewChannel,
- unsigned char byCount
-);
-
-bool
-CARDbSetQuiet(
- struct vnt_private *,
- bool bResetQuiet,
- unsigned char byQuietCount,
- unsigned char byQuietPeriod,
- unsigned short wQuietDuration,
- unsigned short wQuietOffset
-);
-
-bool
-CARDbStartQuiet(
- struct vnt_private *
-);
-
-void
-CARDvSetPowerConstraint(
- struct vnt_private *,
- unsigned char byChannel,
- char byPower
-);
-
-void
-CARDvGetPowerCapability(
- struct vnt_private *,
- unsigned char *pbyMinPower,
- unsigned char *pbyMaxPower
-);
-
-char
-CARDbyGetTransmitPower(
- struct vnt_private *
-);
-#endif // __CARD_H__
+#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 4ce964ba14b7..c8f739dd346e 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -21,494 +21,148 @@
*/
#include "baseband.h"
-#include "country.h"
#include "channel.h"
#include "device.h"
#include "rf.h"
-/*--------------------- Static Definitions -------------------------*/
-
-#define CARD_MAX_CHANNEL_TBL 56
-
-/*--------------------- Static Variables --------------------------*/
-
-static SChannelTblElement sChannelTbl[CARD_MAX_CHANNEL_TBL + 1] =
-{
- {0, 0, false, 0},
- {1, 2412, true, 0},
- {2, 2417, true, 0},
- {3, 2422, true, 0},
- {4, 2427, true, 0},
- {5, 2432, true, 0},
- {6, 2437, true, 0},
- {7, 2442, true, 0},
- {8, 2447, true, 0},
- {9, 2452, true, 0},
- {10, 2457, true, 0},
- {11, 2462, true, 0},
- {12, 2467, true, 0},
- {13, 2472, true, 0},
- {14, 2484, true, 0},
- {183, 4915, true, 0},
- {184, 4920, true, 0},
- {185, 4925, true, 0},
- {187, 4935, true, 0},
- {188, 4940, true, 0},
- {189, 4945, true, 0},
- {192, 4960, true, 0},
- {196, 4980, true, 0},
- {7, 5035, true, 0},
- {8, 5040, true, 0},
- {9, 5045, true, 0},
- {11, 5055, true, 0},
- {12, 5060, true, 0},
- {16, 5080, true, 0},
- {34, 5170, true, 0},
- {36, 5180, true, 0},
- {38, 5190, true, 0},
- {40, 5200, true, 0},
- {42, 5210, true, 0},
- {44, 5220, true, 0},
- {46, 5230, true, 0},
- {48, 5240, true, 0},
- {52, 5260, true, 0},
- {56, 5280, true, 0},
- {60, 5300, true, 0},
- {64, 5320, true, 0},
- {100, 5500, true, 0},
- {104, 5520, true, 0},
- {108, 5540, true, 0},
- {112, 5560, true, 0},
- {116, 5580, true, 0},
- {120, 5600, true, 0},
- {124, 5620, true, 0},
- {128, 5640, true, 0},
- {132, 5660, true, 0},
- {136, 5680, true, 0},
- {140, 5700, true, 0},
- {149, 5745, true, 0},
- {153, 5765, true, 0},
- {157, 5785, true, 0},
- {161, 5805, true, 0},
- {165, 5825, true, 0}
+static struct ieee80211_rate vnt_rates_bg[] = {
+ { .bitrate = 10, .hw_value = RATE_1M },
+ { .bitrate = 20, .hw_value = RATE_2M },
+ { .bitrate = 55, .hw_value = RATE_5M },
+ { .bitrate = 110, .hw_value = RATE_11M },
+ { .bitrate = 60, .hw_value = RATE_6M },
+ { .bitrate = 90, .hw_value = RATE_9M },
+ { .bitrate = 120, .hw_value = RATE_12M },
+ { .bitrate = 180, .hw_value = RATE_18M },
+ { .bitrate = 240, .hw_value = RATE_24M },
+ { .bitrate = 360, .hw_value = RATE_36M },
+ { .bitrate = 480, .hw_value = RATE_48M },
+ { .bitrate = 540, .hw_value = RATE_54M },
};
-/************************************************************************
- * The Radar regulation rules for each country
- ************************************************************************/
-static struct
-{
- unsigned char byChannelCountryCode; /* The country code */
- char chCountryCode[2];
- unsigned char bChannelIdxList[CB_MAX_CHANNEL]; /* Available channels Index */
- unsigned char byPower[CB_MAX_CHANNEL];
-} ChannelRuleTab[] =
-{
-/************************************************************************
- * This table is based on Athero driver rules
- ************************************************************************/
-/* Country Available channels, ended with 0 */
-/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */
- {CCODE_FCC, {'U' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_TELEC, {'J' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 23, 0, 0, 23, 0, 23, 23, 0, 23, 0, 0, 23, 23, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ETSI, {'E' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_RESV3, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV4, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV5, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV6, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV7, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV8, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESV9, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVa, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVb, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVc, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVd, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RESVe, {' ' , ' '}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ALLBAND, {' ' , ' '}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ALBANIA, {'A' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ALGERIA, {'D' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ARGENTINA, {'A' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} },
- {CCODE_ARMENIA, {'A' , 'M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_AUSTRALIA, {'A' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_AUSTRIA, {'A' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 15, 0, 15, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_AZERBAIJAN, {'A' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BAHRAIN, {'B' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BELARUS, {'B' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BELGIUM, {'B' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BELIZE, {'B' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_BOLIVIA, {'B' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_BRAZIL, {'B' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_BRUNEI_DARUSSALAM, {'B' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_BULGARIA, {'B' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 0, 0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0} },
- {CCODE_CANADA, {'C' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_CHILE, {'C' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 17, 17} },
- {CCODE_CHINA, {'C' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_COLOMBIA, {'C' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_COSTA_RICA, {'C' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_CROATIA, {'H' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_CYPRUS, {'C' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_CZECH, {'C' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_DENMARK, {'D' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_DOMINICAN_REPUBLIC, {'D' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_ECUADOR, {'E' , 'C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_EGYPT, {'E' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_EL_SALVADOR, {'S' , 'V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ESTONIA, {'E' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_FINLAND, {'F' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_FRANCE, {'F' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_GERMANY, {'D' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_GREECE, {'G' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_GEORGIA, {'G' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_GUATEMALA, {'G' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_HONDURAS, {'H' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_HONG_KONG, {'H' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_HUNGARY, {'H' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ICELAND, {'I' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_INDIA, {'I' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_INDONESIA, {'I' , 'D'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_IRAN, {'I' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_IRELAND, {'I' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_ITALY, {'I' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_ISRAEL, {'I' , 'L'}, { 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_JAPAN, {'J' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_JORDAN, {'J' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_KAZAKHSTAN, {'K' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_KUWAIT, {'K' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_LATVIA, {'L' , 'V'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_LEBANON, {'L' , 'B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_LEICHTENSTEIN, {'L' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_LITHUANIA, {'L' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_LUXEMBURG, {'L' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_MACAU, {'M' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_MACEDONIA, {'M' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_MALTA, {'M' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
- {CCODE_MALAYSIA, {'M' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_MEXICO, {'M' , 'X'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_MONACO, {'M' , 'C'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_MOROCCO, {'M' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_NETHERLANDS, {'N' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_NEW_ZEALAND, {'N' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 23, 0, 23, 0, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_NORTH_KOREA, {'K' , 'P'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_NORWAY, {'N' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_OMAN, {'O' , 'M'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_PAKISTAN, {'P' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_PANAMA, {'P' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_PERU, {'P' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_PHILIPPINES, {'P' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_POLAND, {'P' , 'L'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_PORTUGAL, {'P' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_PUERTO_RICO, {'P' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_QATAR, {'Q' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ROMANIA, {'R' , 'O'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_RUSSIA, {'R' , 'U'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_SAUDI_ARABIA, {'S' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_SINGAPORE, {'S' , 'G'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 20, 20, 20, 20} },
- {CCODE_SLOVAKIA, {'S' , 'K'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
- {CCODE_SLOVENIA, {'S' , 'I'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_SOUTH_AFRICA, {'Z' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_SOUTH_KOREA, {'K' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_SPAIN, {'E' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 16, 0, 16, 0, 16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 16, 16, 16, 0} },
- {CCODE_SWEDEN, {'S' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_SWITZERLAND, {'C' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_SYRIA, {'S' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_TAIWAN, {'T' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 0} },
- {CCODE_THAILAND, {'T' , 'H'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_TRINIDAD_TOBAGO, {'T' , 'T'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 18, 18, 18, 18, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_TUNISIA, {'T' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_TURKEY, {'T' , 'R'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_UK, {'G' , 'B'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 20, 0, 20, 0, 20, 20, 20, 20, 20, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0} },
- {CCODE_UKRAINE, {'U' , 'A'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_UNITED_ARAB_EMIRATES, {'A' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_UNITED_STATES, {'U' , 'S'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1}
- , { 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 17, 0, 17, 0, 17, 23, 23, 23, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 30, 30, 30, 30} },
- {CCODE_URUGUAY, {'U' , 'Y'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_UZBEKISTAN, {'U' , 'Z'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_VENEZUELA, {'V' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0}
- , { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 23, 23, 23, 0} },
- {CCODE_VIETNAM, {'V' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_YEMEN, {'Y' , 'E'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_ZIMBABWE, {'Z' , 'W'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_JAPAN_W52_W53, {'J' , 'J'}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {CCODE_MAX, {'U' , 'N'}, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
- , { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
-/* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 */
+static struct ieee80211_rate vnt_rates_a[] = {
+ { .bitrate = 60, .hw_value = RATE_6M },
+ { .bitrate = 90, .hw_value = RATE_9M },
+ { .bitrate = 120, .hw_value = RATE_12M },
+ { .bitrate = 180, .hw_value = RATE_18M },
+ { .bitrate = 240, .hw_value = RATE_24M },
+ { .bitrate = 360, .hw_value = RATE_36M },
+ { .bitrate = 480, .hw_value = RATE_48M },
+ { .bitrate = 540, .hw_value = RATE_54M },
};
-/*--------------------- Export Functions --------------------------*/
-
-/**
- * is_channel_valid() - Is Country Channel Valid
- * @ChanneIndex: defined as VT3253 MAC channel:
- * 1 = 2.4G channel 1
- * 2 = 2.4G channel 2
- * ...
- * 14 = 2.4G channel 14
- * 15 = 4.9G channel 183
- * 16 = 4.9G channel 184
- * .....
- * Output: true if the specified 5GHz band is allowed to be used,
- * false otherwise.
- * 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196 (Value:15 ~ 22)
- *
- * 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
- * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
- */
-
-bool is_channel_valid(unsigned int ChannelIndex)
-{
- bool bValid;
-
- bValid = false;
- /*
- * If Channel Index is invalid, return invalid
- */
- if ((ChannelIndex > CB_MAX_CHANNEL) ||
- (ChannelIndex == 0)) {
- bValid = false;
- goto exit;
- }
+static struct ieee80211_channel vnt_channels_2ghz[] = {
+ { .center_freq = 2412, .hw_value = 1 },
+ { .center_freq = 2417, .hw_value = 2 },
+ { .center_freq = 2422, .hw_value = 3 },
+ { .center_freq = 2427, .hw_value = 4 },
+ { .center_freq = 2432, .hw_value = 5 },
+ { .center_freq = 2437, .hw_value = 6 },
+ { .center_freq = 2442, .hw_value = 7 },
+ { .center_freq = 2447, .hw_value = 8 },
+ { .center_freq = 2452, .hw_value = 9 },
+ { .center_freq = 2457, .hw_value = 10 },
+ { .center_freq = 2462, .hw_value = 11 },
+ { .center_freq = 2467, .hw_value = 12 },
+ { .center_freq = 2472, .hw_value = 13 },
+ { .center_freq = 2484, .hw_value = 14 }
+};
- bValid = sChannelTbl[ChannelIndex].bValid;
+static struct ieee80211_channel vnt_channels_5ghz[] = {
+ { .center_freq = 4915, .hw_value = 15 },
+ { .center_freq = 4920, .hw_value = 16 },
+ { .center_freq = 4925, .hw_value = 17 },
+ { .center_freq = 4935, .hw_value = 18 },
+ { .center_freq = 4940, .hw_value = 19 },
+ { .center_freq = 4945, .hw_value = 20 },
+ { .center_freq = 4960, .hw_value = 21 },
+ { .center_freq = 4980, .hw_value = 22 },
+ { .center_freq = 5035, .hw_value = 23 },
+ { .center_freq = 5040, .hw_value = 24 },
+ { .center_freq = 5045, .hw_value = 25 },
+ { .center_freq = 5055, .hw_value = 26 },
+ { .center_freq = 5060, .hw_value = 27 },
+ { .center_freq = 5080, .hw_value = 28 },
+ { .center_freq = 5170, .hw_value = 29 },
+ { .center_freq = 5180, .hw_value = 30 },
+ { .center_freq = 5190, .hw_value = 31 },
+ { .center_freq = 5200, .hw_value = 32 },
+ { .center_freq = 5210, .hw_value = 33 },
+ { .center_freq = 5220, .hw_value = 34 },
+ { .center_freq = 5230, .hw_value = 35 },
+ { .center_freq = 5240, .hw_value = 36 },
+ { .center_freq = 5260, .hw_value = 37 },
+ { .center_freq = 5280, .hw_value = 38 },
+ { .center_freq = 5300, .hw_value = 39 },
+ { .center_freq = 5320, .hw_value = 40 },
+ { .center_freq = 5500, .hw_value = 41 },
+ { .center_freq = 5520, .hw_value = 42 },
+ { .center_freq = 5540, .hw_value = 43 },
+ { .center_freq = 5560, .hw_value = 44 },
+ { .center_freq = 5580, .hw_value = 45 },
+ { .center_freq = 5600, .hw_value = 46 },
+ { .center_freq = 5620, .hw_value = 47 },
+ { .center_freq = 5640, .hw_value = 48 },
+ { .center_freq = 5660, .hw_value = 49 },
+ { .center_freq = 5680, .hw_value = 50 },
+ { .center_freq = 5700, .hw_value = 51 },
+ { .center_freq = 5745, .hw_value = 52 },
+ { .center_freq = 5765, .hw_value = 53 },
+ { .center_freq = 5785, .hw_value = 54 },
+ { .center_freq = 5805, .hw_value = 55 },
+ { .center_freq = 5825, .hw_value = 56 }
+};
-exit:
- return bValid;
-}
+static struct ieee80211_supported_band vnt_supported_2ghz_band = {
+ .channels = vnt_channels_2ghz,
+ .n_channels = ARRAY_SIZE(vnt_channels_2ghz),
+ .bitrates = vnt_rates_bg,
+ .n_bitrates = ARRAY_SIZE(vnt_rates_bg),
+};
-/**
- * channel_get_list() - Get Available Channel List for a given country
- * @CountryCode: The country code defined in country.h
- *
- * Output:
- * pbyChannelTable: (QWORD *) correspondent bit mask
- * of available channels
- * 0x0000000000000001 means channel 1 is supported
- * 0x0000000000000003 means channel 1,2 are supported
- * 0x000000000000000F means channel 1,2,..15 are supported
- */
+static struct ieee80211_supported_band vnt_supported_5ghz_band = {
+ .channels = vnt_channels_5ghz,
+ .n_channels = ARRAY_SIZE(vnt_channels_5ghz),
+ .bitrates = vnt_rates_a,
+ .n_bitrates = ARRAY_SIZE(vnt_rates_a),
+};
-bool channel_get_list(unsigned int uCountryCodeIdx, unsigned char *pbyChannelTable)
+void vnt_init_bands(struct vnt_private *priv)
{
- if (uCountryCodeIdx >= CCODE_MAX)
- return false;
-
- memcpy(pbyChannelTable, ChannelRuleTab[uCountryCodeIdx].bChannelIdxList, CB_MAX_CHANNEL);
-
- return true;
-}
+ struct ieee80211_channel *ch;
+ int i;
-void init_channel_table(void *pDeviceHandler)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- bool bMultiBand = false;
- unsigned int ii;
+ switch (priv->byRFType) {
+ case RF_AIROHA7230:
+ case RF_UW2452:
+ case RF_NOTHING:
+ default:
+ ch = vnt_channels_5ghz;
- for (ii = 1; ii <= CARD_MAX_CHANNEL_TBL; ii++)
- sChannelTbl[ii].bValid = false;
+ for (i = 0; i < ARRAY_SIZE(vnt_channels_5ghz); i++) {
+ ch[i].max_power = 0x3f;
+ ch[i].flags = IEEE80211_CHAN_NO_HT40;
+ }
- switch (pDevice->byRFType) {
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &vnt_supported_5ghz_band;
+ /* fallthrough */
case RF_RFMD2959:
case RF_AIROHA:
case RF_AL2230S:
case RF_UW2451:
case RF_VT3226:
- bMultiBand = false;
- break;
- case RF_AIROHA7230:
- case RF_UW2452:
- case RF_NOTHING:
- default:
- bMultiBand = true;
- break;
- }
+ ch = vnt_channels_2ghz;
- if ((pDevice->dwDiagRefCount != 0) || pDevice->b11hEnable) {
- if (bMultiBand) {
- for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
- sChannelTbl[ii + 1].bValid = true;
- pDevice->abyRegPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
- pDevice->abyLocalPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
- }
- for (ii = 0; ii < CHANNEL_MAX_24G; ii++) {
- pDevice->abyRegPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1];
- pDevice->abyLocalPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1];
- }
- } else {
- for (ii = 0; ii < CHANNEL_MAX_24G; ii++) {
- //2008-8-4 <add> by chester
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii + 1].bValid = true;
- pDevice->abyRegPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1];
- pDevice->abyLocalPwr[ii + 1] = pDevice->abyCCKDefaultPwr[ii + 1];
- }
- }
+ for (i = 0; i < ARRAY_SIZE(vnt_channels_2ghz); i++) {
+ ch[i].max_power = 0x3f;
+ ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- } else if (pDevice->byZoneType <= CCODE_MAX) {
- if (bMultiBand) {
- for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii + 1].bValid = true;
- pDevice->abyRegPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- pDevice->abyLocalPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- } else {
- for (ii = 0; ii < CHANNEL_MAX_24G; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- sChannelTbl[ii + 1].bValid = true;
- pDevice->abyRegPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- pDevice->abyLocalPwr[ii + 1] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- }
- }
- pr_info("Zone=[%d][%c][%c]!!\n",
- pDevice->byZoneType,
- ChannelRuleTab[pDevice->byZoneType].chCountryCode[0],
- ChannelRuleTab[pDevice->byZoneType].chCountryCode[1]);
-
- for (ii = 0; ii < CARD_MAX_CHANNEL_TBL; ii++) {
- if (pDevice->abyRegPwr[ii + 1] == 0)
- pDevice->abyRegPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
- if (pDevice->abyLocalPwr[ii + 1] == 0)
- pDevice->abyLocalPwr[ii + 1] = pDevice->abyOFDMDefaultPwr[ii + 1];
- }
-}
-
-unsigned char get_channel_mapping(void *pDeviceHandler, unsigned char byChannelNumber, CARD_PHY_TYPE ePhyType)
-{
- unsigned int ii;
-
- if ((ePhyType == PHY_TYPE_11B) || (ePhyType == PHY_TYPE_11G))
- return byChannelNumber;
-
- for (ii = (CB_MAX_CHANNEL_24G + 1); ii <= CB_MAX_CHANNEL;) {
- if (sChannelTbl[ii].byChannelNumber == byChannelNumber)
- return (unsigned char) ii;
- ii++;
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &vnt_supported_2ghz_band;
+ break;
}
- return 0;
-}
-
-unsigned char get_channel_number(void *pDeviceHandler, unsigned char byChannelIndex)
-{
- return sChannelTbl[byChannelIndex].byChannelNumber;
}
/**
@@ -528,37 +182,27 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
if (pDevice->byCurrentCh == uConnectionChannel)
return bResult;
- if (!sChannelTbl[uConnectionChannel].bValid)
- return false;
-
- if ((uConnectionChannel > CB_MAX_CHANNEL_24G) &&
- (pDevice->eCurrentPHYType != PHY_TYPE_11A)) {
- CARDbSetPhyParameter(pDevice, PHY_TYPE_11A, 0, 0, NULL, NULL);
- } else if ((uConnectionChannel <= CB_MAX_CHANNEL_24G) &&
- (pDevice->eCurrentPHYType == PHY_TYPE_11A)) {
- CARDbSetPhyParameter(pDevice, PHY_TYPE_11G, 0, 0, NULL, NULL);
- }
- // clear NAV
+ /* clear NAV */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
- //{{ RobertYu: 20041202
- //// TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput
+ /* TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput */
if (pDevice->byRFType == RF_AIROHA7230)
- RFbAL7230SelectChannelPostProcess(pDevice->PortOffset, pDevice->byCurrentCh, (unsigned char)uConnectionChannel);
- //}} RobertYu
+ RFbAL7230SelectChannelPostProcess(pDevice, pDevice->byCurrentCh,
+ (unsigned char)uConnectionChannel);
pDevice->byCurrentCh = (unsigned char)uConnectionChannel;
- bResult &= RFbSelectChannel(pDevice->PortOffset, pDevice->byRFType, (unsigned char)uConnectionChannel);
+ bResult &= RFbSelectChannel(pDevice, pDevice->byRFType,
+ (unsigned char)uConnectionChannel);
- // Init Synthesizer Table
+ /* Init Synthesizer Table */
if (pDevice->bEnablePSMode)
- RFvWriteWakeProgSyn(pDevice->PortOffset, pDevice->byRFType, uConnectionChannel);
+ RFvWriteWakeProgSyn(pDevice, pDevice->byRFType, uConnectionChannel);
- BBvSoftwareReset(pDevice->PortOffset);
+ BBvSoftwareReset(pDevice);
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
- // set HW default power register
+ /* set HW default power register */
MACvSelectPage1(pDevice->PortOffset);
RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWRCCK, pDevice->byCurPwr);
@@ -567,242 +211,10 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
MACvSelectPage0(pDevice->PortOffset);
}
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
+ if (pDevice->byBBType == BB_TYPE_11B)
RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
else
RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
return bResult;
}
-
-/**
- * set_country_info() - Set Channel Info of Country
- *
- * Return Value: none.
- *
- */
-
-void set_country_info(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, void *pIE)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned int ii = 0;
- unsigned int uu = 0;
- unsigned int step = 0;
- unsigned int uNumOfCountryInfo = 0;
- unsigned char byCh = 0;
- PWLAN_IE_COUNTRY pIE_Country = (PWLAN_IE_COUNTRY) pIE;
-
- uNumOfCountryInfo = (pIE_Country->len - 3);
- uNumOfCountryInfo /= 3;
-
- if (ePHYType == PHY_TYPE_11A) {
- pDevice->bCountryInfo5G = true;
- for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CARD_MAX_CHANNEL_TBL; ii++)
- sChannelTbl[ii].bValid = false;
-
- step = 4;
- } else {
- pDevice->bCountryInfo24G = true;
- for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++)
- sChannelTbl[ii].bValid = false;
-
- step = 1;
- }
- pDevice->abyCountryCode[0] = pIE_Country->abyCountryString[0];
- pDevice->abyCountryCode[1] = pIE_Country->abyCountryString[1];
- pDevice->abyCountryCode[2] = pIE_Country->abyCountryString[2];
-
- for (ii = 0; ii < uNumOfCountryInfo; ii++) {
- for (uu = 0; uu < pIE_Country->abyCountryInfo[ii*3+1]; uu++) {
- byCh = get_channel_mapping(pDevice, (unsigned char)(pIE_Country->abyCountryInfo[ii*3]+step*uu), ePHYType);
- sChannelTbl[byCh].bValid = true;
- pDevice->abyRegPwr[byCh] = pIE_Country->abyCountryInfo[ii*3+2];
- }
- }
-}
-
-/**
- *
- * set_support_channels() - Set Support Channels IE defined in 802.11h
- *
- * @hDeviceContext: device structure point
- *
- * Return Value: none.
- *
- */
-
-unsigned char set_support_channels(void *pDeviceHandler, unsigned char *pbyIEs)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned int ii;
- unsigned char byCount;
- PWLAN_IE_SUPP_CH pIE = (PWLAN_IE_SUPP_CH) pbyIEs;
- unsigned char *pbyChTupple;
- unsigned char byLen = 0;
-
- pIE->byElementID = WLAN_EID_SUPP_CH;
- pIE->len = 0;
- pbyChTupple = pIE->abyChannelTuple;
- byLen = 2;
- // lower band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[28] == true) {
- for (ii = 28; ii < 36; ii += 2) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 34;
- *pbyChTupple++ = byCount;
- byLen += 2;
- } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[29] == true) {
- for (ii = 29; ii < 36; ii += 2) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 36;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- // middle band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[36] == true) {
- for (ii = 36; ii < 40; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 52;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- // higher band
- byCount = 0;
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[40] == true) {
- for (ii = 40; ii < 51; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 100;
- *pbyChTupple++ = byCount;
- byLen += 2;
- } else if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[51] == true) {
- for (ii = 51; ii < 56; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] == true)
- byCount++;
- }
-
- *pbyChTupple++ = 149;
- *pbyChTupple++ = byCount;
- byLen += 2;
- }
- pIE->len += (byLen - 2);
- return byLen;
-}
-
-void set_country_IE(void *pDeviceHandler, void *pIE)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned int ii;
- PWLAN_IE_COUNTRY pIECountry = (PWLAN_IE_COUNTRY) pIE;
-
- pIECountry->byElementID = WLAN_EID_COUNTRY;
- pIECountry->len = 0;
- pIECountry->abyCountryString[0] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[0];
- pIECountry->abyCountryString[1] = ChannelRuleTab[pDevice->byZoneType].chCountryCode[1];
- pIECountry->abyCountryString[2] = ' ';
- for (ii = CB_MAX_CHANNEL_24G; ii < CB_MAX_CHANNEL; ii++) {
- if (ChannelRuleTab[pDevice->byZoneType].bChannelIdxList[ii] != 0) {
- pIECountry->abyCountryInfo[pIECountry->len++] = sChannelTbl[ii + 1].byChannelNumber;
- pIECountry->abyCountryInfo[pIECountry->len++] = 1;
- pIECountry->abyCountryInfo[pIECountry->len++] = ChannelRuleTab[pDevice->byZoneType].byPower[ii];
- }
- }
- pIECountry->len += 3;
-}
-
-bool get_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
- unsigned char *pbyChannelNumber, unsigned char *pbyMap)
-{
- if (uChannelIndex > CB_MAX_CHANNEL)
- return false;
-
- *pbyChannelNumber = sChannelTbl[uChannelIndex].byChannelNumber;
- *pbyMap = sChannelTbl[uChannelIndex].byMAP;
- return sChannelTbl[uChannelIndex].bValid;
-}
-
-void set_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
- unsigned char byMap)
-{
- if (uChannelIndex > CB_MAX_CHANNEL)
- return;
-
- sChannelTbl[uChannelIndex].byMAP |= byMap;
-}
-
-void clear_channel_map_info(void *pDeviceHandler)
-{
- unsigned int ii = 0;
-
- for (ii = 1; ii <= CB_MAX_CHANNEL; ii++)
- sChannelTbl[ii].byMAP = 0;
-}
-
-unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
-{
- unsigned int ii = 0;
- unsigned char byOptionChannel = 0;
- int aiWeight[CB_MAX_CHANNEL_24G + 1] = {-1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-
- if (ePHYType == PHY_TYPE_11A) {
- for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) {
- if (sChannelTbl[ii].bValid) {
- if (byOptionChannel == 0)
- byOptionChannel = (unsigned char) ii;
-
- if (sChannelTbl[ii].byMAP == 0)
- return (unsigned char) ii;
- else if (!(sChannelTbl[ii].byMAP & 0x08))
- byOptionChannel = (unsigned char) ii;
- }
- }
- } else {
- byOptionChannel = 0;
- for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) {
- if (sChannelTbl[ii].bValid) {
- if (sChannelTbl[ii].byMAP == 0) {
- aiWeight[ii] += 100;
- } else if (sChannelTbl[ii].byMAP & 0x01) {
- if (ii > 3)
- aiWeight[ii - 3] -= 10;
-
- if (ii > 2)
- aiWeight[ii - 2] -= 20;
-
- if (ii > 1)
- aiWeight[ii - 1] -= 40;
-
- aiWeight[ii] -= 80;
- if (ii < CB_MAX_CHANNEL_24G)
- aiWeight[ii + 1] -= 40;
-
- if (ii < (CB_MAX_CHANNEL_24G - 1))
- aiWeight[ii+2] -= 20;
-
- if (ii < (CB_MAX_CHANNEL_24G - 2))
- aiWeight[ii+3] -= 10;
- }
- }
- }
- for (ii = 1; ii <= CB_MAX_CHANNEL_24G; ii++) {
- if (sChannelTbl[ii].bValid &&
- (aiWeight[ii] > aiWeight[byOptionChannel])) {
- byOptionChannel = (unsigned char) ii;
- }
- }
- }
- return byOptionChannel;
-}
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 4f44c8a3d3cf..4f4264e23462 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -23,30 +23,10 @@
#ifndef _CHANNEL_H_
#define _CHANNEL_H_
-#include "ttype.h"
#include "card.h"
-typedef struct tagSChannelTblElement {
- unsigned char byChannelNumber;
- unsigned int uFrequency;
- bool bValid;
- unsigned char byMAP;
-} SChannelTblElement, *PSChannelTblElement;
+void vnt_init_bands(struct vnt_private *);
-bool is_channel_valid(unsigned int CountryCode);
-void init_channel_table(void *pDeviceHandler);
-unsigned char get_channel_mapping(void *pDeviceHandler, unsigned char byChannelNumber, CARD_PHY_TYPE ePhyType);
-bool channel_get_list(unsigned int uCountryCodeIdx, unsigned char *pbyChannelTable);
-unsigned char get_channel_number(void *pDeviceHandler, unsigned char byChannelIndex);
bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel);
-void set_country_info(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, void *pIE);
-unsigned char set_support_channels(void *pDeviceHandler, unsigned char *pbyIEs);
-void set_country_IE(void *pDeviceHandler, void *pIE);
-bool get_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
- unsigned char *pbyChannelNumber, unsigned char *pbyMap);
-void set_channel_map_info(void *pDeviceHandler, unsigned int uChannelIndex,
- unsigned char byMap);
-void clear_channel_map_info(void *pDeviceHandler);
-unsigned char auto_channel_select(void *pDeviceHandler, CARD_PHY_TYPE ePHYType);
#endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6655/country.h b/drivers/staging/vt6655/country.h
deleted file mode 100644
index 2365fb13b033..000000000000
--- a/drivers/staging/vt6655/country.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: country.h
- *
- * Purpose: Country Code information
- *
- * Author: Lucas Lin
- *
- * Date: Dec 23, 2004
- *
- */
-
-#ifndef __COUNTRY_H__
-#define __COUNTRY_H__
-
-#include "ttype.h"
-
-/************************************************************************
- * The definition here should be complied with the INF country order
- * Please check with VNWL.inf/VNWL64.inf/VNWL*.inf
- ************************************************************************/
-typedef enum _COUNTRY_CODE {
- CCODE_FCC = 0,
- CCODE_TELEC,
- CCODE_ETSI,
- CCODE_RESV3,
- CCODE_RESV4,
- CCODE_RESV5,
- CCODE_RESV6,
- CCODE_RESV7,
- CCODE_RESV8,
- CCODE_RESV9,
- CCODE_RESVa,
- CCODE_RESVb,
- CCODE_RESVc,
- CCODE_RESVd,
- CCODE_RESVe,
- CCODE_ALLBAND,
- CCODE_ALBANIA,
- CCODE_ALGERIA,
- CCODE_ARGENTINA,
- CCODE_ARMENIA,
- CCODE_AUSTRALIA,
- CCODE_AUSTRIA,
- CCODE_AZERBAIJAN,
- CCODE_BAHRAIN,
- CCODE_BELARUS,
- CCODE_BELGIUM,
- CCODE_BELIZE,
- CCODE_BOLIVIA,
- CCODE_BRAZIL,
- CCODE_BRUNEI_DARUSSALAM,
- CCODE_BULGARIA,
- CCODE_CANADA,
- CCODE_CHILE,
- CCODE_CHINA,
- CCODE_COLOMBIA,
- CCODE_COSTA_RICA,
- CCODE_CROATIA,
- CCODE_CYPRUS,
- CCODE_CZECH,
- CCODE_DENMARK,
- CCODE_DOMINICAN_REPUBLIC,
- CCODE_ECUADOR,
- CCODE_EGYPT,
- CCODE_EL_SALVADOR,
- CCODE_ESTONIA,
- CCODE_FINLAND,
- CCODE_FRANCE,
- CCODE_GERMANY,
- CCODE_GREECE,
- CCODE_GEORGIA,
- CCODE_GUATEMALA,
- CCODE_HONDURAS,
- CCODE_HONG_KONG,
- CCODE_HUNGARY,
- CCODE_ICELAND,
- CCODE_INDIA,
- CCODE_INDONESIA,
- CCODE_IRAN,
- CCODE_IRELAND,
- CCODE_ITALY,
- CCODE_ISRAEL,
- CCODE_JAPAN,
- CCODE_JORDAN,
- CCODE_KAZAKHSTAN,
- CCODE_KUWAIT,
- CCODE_LATVIA,
- CCODE_LEBANON,
- CCODE_LEICHTENSTEIN,
- CCODE_LITHUANIA,
- CCODE_LUXEMBURG,
- CCODE_MACAU,
- CCODE_MACEDONIA,
- CCODE_MALTA,
- CCODE_MALAYSIA,
- CCODE_MEXICO,
- CCODE_MONACO,
- CCODE_MOROCCO,
- CCODE_NETHERLANDS,
- CCODE_NEW_ZEALAND,
- CCODE_NORTH_KOREA,
- CCODE_NORWAY,
- CCODE_OMAN,
- CCODE_PAKISTAN,
- CCODE_PANAMA,
- CCODE_PERU,
- CCODE_PHILIPPINES,
- CCODE_POLAND,
- CCODE_PORTUGAL,
- CCODE_PUERTO_RICO,
- CCODE_QATAR,
- CCODE_ROMANIA,
- CCODE_RUSSIA,
- CCODE_SAUDI_ARABIA,
- CCODE_SINGAPORE,
- CCODE_SLOVAKIA,
- CCODE_SLOVENIA,
- CCODE_SOUTH_AFRICA,
- CCODE_SOUTH_KOREA,
- CCODE_SPAIN,
- CCODE_SWEDEN,
- CCODE_SWITZERLAND,
- CCODE_SYRIA,
- CCODE_TAIWAN,
- CCODE_THAILAND,
- CCODE_TRINIDAD_TOBAGO,
- CCODE_TUNISIA,
- CCODE_TURKEY,
- CCODE_UK,
- CCODE_UKRAINE,
- CCODE_UNITED_ARAB_EMIRATES,
- CCODE_UNITED_STATES,
- CCODE_URUGUAY,
- CCODE_UZBEKISTAN,
- CCODE_VENEZUELA,
- CCODE_VIETNAM,
- CCODE_YEMEN,
- CCODE_ZIMBABWE,
- CCODE_JAPAN_W52_W53,
- CCODE_MAX
-} COUNTRY_CODE;
-
-#endif /* __COUNTRY_H__ */
diff --git a/drivers/staging/vt6655/datarate.c b/drivers/staging/vt6655/datarate.c
deleted file mode 100644
index 52907a4fae9d..000000000000
--- a/drivers/staging/vt6655/datarate.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: datarate.c
- *
- * Purpose: Handles the auto fallback & data rates functions
- *
- * Author: Lyndon Chen
- *
- * Date: July 17, 2002
- *
- * Functions:
- * RATEvParseMaxRate - Parsing the highest basic & support rate in rate field of frame
- * RATEvTxRateFallBack - Rate fallback Algorithm Implementaion
- * RATEuSetIE- Set rate IE field.
- *
- * Revision History:
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "mac.h"
-#include "80211mgr.h"
-#include "bssdb.h"
-#include "datarate.h"
-#include "card.h"
-#include "baseband.h"
-#include "srom.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-extern unsigned short TxRate_iwconfig; /* 2008-5-8 <add> by chester */
-/*--------------------- Static Variables --------------------------*/
-static const unsigned char acbyIERate[MAX_RATE] = {
-0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C
-};
-
-#define AUTORATE_TXOK_CNT 0x0400
-#define AUTORATE_TXFAIL_CNT 0x0064
-#define AUTORATE_TIMEOUT 10
-
-/*--------------------- Static Functions --------------------------*/
-
-void s_vResetCounter(
- PKnownNodeDB psNodeDBTable
-);
-
-void
-s_vResetCounter(
- PKnownNodeDB psNodeDBTable
-)
-{
- unsigned char ii;
-
- /* clear statistic counter for auto_rate */
- for (ii = 0; ii <= MAX_RATE; ii++) {
- psNodeDBTable->uTxOk[ii] = 0;
- psNodeDBTable->uTxFail[ii] = 0;
- }
-}
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Description:
- * Get RateIdx from the value in SuppRates IE or ExtSuppRates IE
- *
- * Parameters:
- * In:
- * unsigned char - Rate value in SuppRates IE or ExtSuppRates IE
- * Out:
- * none
- *
- * Return Value: RateIdx
- *
- -*/
-unsigned char
-DATARATEbyGetRateIdx(
- unsigned char byRate
-)
-{
- unsigned char ii;
-
- /* Erase basicRate flag. */
- byRate = byRate & 0x7F;/* 0111 1111 */
-
- for (ii = 0; ii < MAX_RATE; ii++) {
- if (acbyIERate[ii] == byRate)
- return ii;
- }
- return 0;
-}
-
-/*+
- *
- * Routine Description:
- * Rate fallback Algorithm Implementation
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * psNodeDBTable - Pointer to Node Data Base
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-#define AUTORATE_TXCNT_THRESHOLD 20
-#define AUTORATE_INC_THRESHOLD 30
-
-/*+
- *
- * Description:
- * Get RateIdx from the value in SuppRates IE or ExtSuppRates IE
- *
- * Parameters:
- * In:
- * unsigned char - Rate value in SuppRates IE or ExtSuppRates IE
- * Out:
- * none
- *
- * Return Value: RateIdx
- *
- -*/
-unsigned short
-wGetRateIdx(
- unsigned char byRate
-)
-{
- unsigned short ii;
-
- /* Erase basicRate flag. */
- byRate = byRate & 0x7F;/* 0111 1111 */
-
- for (ii = 0; ii < MAX_RATE; ii++) {
- if (acbyIERate[ii] == byRate)
- return ii;
- }
-
- return 0;
-}
-
-/*+
- *
- * Description:
- * Parsing the highest basic & support rate in rate field of frame.
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * pItemRates - Pointer to Rate field defined in 802.11 spec.
- * pItemExtRates - Pointer to Extended Rate field defined in 802.11 spec.
- * Out:
- * pwMaxBasicRate - Maximum Basic Rate
- * pwMaxSuppRate - Maximum Supported Rate
- * pbyTopCCKRate - Maximum Basic Rate in CCK mode
- * pbyTopOFDMRate - Maximum Basic Rate in OFDM mode
- *
- * Return Value: none
- *
- -*/
-void
-RATEvParseMaxRate(
- void *pDeviceHandler,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pItemExtRates,
- bool bUpdateBasicRate,
- unsigned short *pwMaxBasicRate,
- unsigned short *pwMaxSuppRate,
- unsigned short *pwSuppRate,
- unsigned char *pbyTopCCKRate,
- unsigned char *pbyTopOFDMRate
-)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned int ii;
- unsigned char byHighSuppRate = 0;
- unsigned char byRate = 0;
- unsigned short wOldBasicRate = pDevice->wBasicRate;
- unsigned int uRateLen;
-
- if (pItemRates == NULL)
- return;
-
- *pwSuppRate = 0;
- uRateLen = pItemRates->len;
-
- pr_debug("ParseMaxRate Len: %d\n", uRateLen);
- if (pDevice->eCurrentPHYType != PHY_TYPE_11B) {
- if (uRateLen > WLAN_RATES_MAXLEN)
- uRateLen = WLAN_RATES_MAXLEN;
- } else {
- if (uRateLen > WLAN_RATES_MAXLEN_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
- }
-
- for (ii = 0; ii < uRateLen; ii++) {
- byRate = (unsigned char)(pItemRates->abyRates[ii]);
- if (WLAN_MGMT_IS_BASICRATE(byRate) && bUpdateBasicRate) {
- /* Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate */
- CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
- pr_debug("ParseMaxRate AddBasicRate: %d\n",
- wGetRateIdx(byRate));
- }
- byRate = (unsigned char)(pItemRates->abyRates[ii]&0x7F);
- if (byHighSuppRate == 0)
- byHighSuppRate = byRate;
- if (byRate > byHighSuppRate)
- byHighSuppRate = byRate;
- *pwSuppRate |= (1<<wGetRateIdx(byRate));
- }
- if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) &&
- (pDevice->eCurrentPHYType != PHY_TYPE_11B)) {
- unsigned int uExtRateLen = pItemExtRates->len;
-
- if (uExtRateLen > WLAN_RATES_MAXLEN)
- uExtRateLen = WLAN_RATES_MAXLEN;
-
- for (ii = 0; ii < uExtRateLen; ii++) {
- byRate = (unsigned char)(pItemExtRates->abyRates[ii]);
- /* select highest basic rate */
- if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
- /* Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate */
- CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
- pr_debug("ParseMaxRate AddBasicRate: %d\n",
- wGetRateIdx(byRate));
- }
- byRate = (unsigned char)(pItemExtRates->abyRates[ii]&0x7F);
- if (byHighSuppRate == 0)
- byHighSuppRate = byRate;
- if (byRate > byHighSuppRate)
- byHighSuppRate = byRate;
- *pwSuppRate |= (1<<wGetRateIdx(byRate));
- }
- }
-
- if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((void *)pDevice))
- pDevice->byPacketType = PK_TYPE_11GA;
-
- *pbyTopCCKRate = pDevice->byTopCCKBasicRate;
- *pbyTopOFDMRate = pDevice->byTopOFDMBasicRate;
- *pwMaxSuppRate = wGetRateIdx(byHighSuppRate);
- if ((pDevice->byPacketType == PK_TYPE_11B) || (pDevice->byPacketType == PK_TYPE_11GB))
- *pwMaxBasicRate = pDevice->byTopCCKBasicRate;
- else
- *pwMaxBasicRate = pDevice->byTopOFDMBasicRate;
- if (wOldBasicRate != pDevice->wBasicRate)
- CARDvSetRSPINF((void *)pDevice, pDevice->eCurrentPHYType);
-
- pr_debug("Exit ParseMaxRate\n");
-}
-
-/*+
- *
- * Routine Description:
- * Rate fallback Algorithm Implementaion
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * psNodeDBTable - Pointer to Node Data Base
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-#define AUTORATE_TXCNT_THRESHOLD 20
-#define AUTORATE_INC_THRESHOLD 30
-
-void
-RATEvTxRateFallBack(
- void *pDeviceHandler,
- PKnownNodeDB psNodeDBTable
-)
-{
- struct vnt_private *pDevice = pDeviceHandler;
- unsigned short wIdxDownRate = 0;
- unsigned int ii;
- bool bAutoRate[MAX_RATE] = {true, true, true, true, false, false, true, true, true, true, true, true};
- unsigned long dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
- unsigned long dwThroughput = 0;
- unsigned short wIdxUpRate = 0;
- unsigned long dwTxDiff = 0;
-
- if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING)
- /* Don't do Fallback when scanning Channel */
- return;
-
- psNodeDBTable->uTimeCount++;
-
- if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE])
- dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE];
-
- if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) &&
- (dwTxDiff < AUTORATE_TXFAIL_CNT) &&
- (psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) {
- return;
- }
-
- if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT)
- psNodeDBTable->uTimeCount = 0;
-
- for (ii = 0; ii < MAX_RATE; ii++) {
- if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
- if (bAutoRate[ii])
- wIdxUpRate = (unsigned short) ii;
-
- } else {
- bAutoRate[ii] = false;
- }
- }
-
- for (ii = 0; ii <= psNodeDBTable->wTxDataRate; ii++) {
- if ((psNodeDBTable->uTxOk[ii] != 0) ||
- (psNodeDBTable->uTxFail[ii] != 0)) {
- dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii];
- if (ii < RATE_11M)
- psNodeDBTable->uTxFail[ii] *= 4;
-
- dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]);
- }
- }
- dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate];
-
- wIdxDownRate = psNodeDBTable->wTxDataRate;
- for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
- ii--;
- if ((dwThroughputTbl[ii] > dwThroughput) && bAutoRate[ii]) {
- dwThroughput = dwThroughputTbl[ii];
- wIdxDownRate = (unsigned short) ii;
- }
- }
- psNodeDBTable->wTxDataRate = wIdxDownRate;
- if (psNodeDBTable->uTxOk[MAX_RATE]) {
- if (psNodeDBTable->uTxOk[MAX_RATE] >
- (psNodeDBTable->uTxFail[MAX_RATE] * 4)) {
- psNodeDBTable->wTxDataRate = wIdxUpRate;
- }
- } else {
- /* adhoc, if uTxOk =0 & uTxFail = 0 */
- if (psNodeDBTable->uTxFail[MAX_RATE] == 0)
- psNodeDBTable->wTxDataRate = wIdxUpRate;
- }
-
- /* 2008-5-8 <add> by chester */
- TxRate_iwconfig = psNodeDBTable->wTxDataRate;
- s_vResetCounter(psNodeDBTable);
-}
-
-/*+
- *
- * Description:
- * This routine is used to assemble available Rate IE.
- *
- * Parameters:
- * In:
- * pDevice
- * Out:
- *
- * Return Value: None
- *
- -*/
-unsigned char
-RATEuSetIE(
- PWLAN_IE_SUPP_RATES pSrcRates,
- PWLAN_IE_SUPP_RATES pDstRates,
- unsigned int uRateLen
-)
-{
- unsigned int ii, uu, uRateCnt = 0;
-
- if ((pSrcRates == NULL) || (pDstRates == NULL))
- return 0;
-
- if (pSrcRates->len == 0)
- return 0;
-
- for (ii = 0; ii < uRateLen; ii++) {
- for (uu = 0; uu < pSrcRates->len; uu++) {
- if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) {
- pDstRates->abyRates[uRateCnt++] = pSrcRates->abyRates[uu];
- break;
- }
- }
- }
- return (unsigned char)uRateCnt;
-}
diff --git a/drivers/staging/vt6655/datarate.h b/drivers/staging/vt6655/datarate.h
deleted file mode 100644
index 0509c4fd2a42..000000000000
--- a/drivers/staging/vt6655/datarate.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: datarate.h
- *
- * Purpose: Handles the auto fallback & data rates functions
- *
- * Author: Lyndon Chen
- *
- * Date: July 16, 2002
- *
- */
-#ifndef __DATARATE_H__
-#define __DATARATE_H__
-
-#define FALLBACK_PKT_COLLECT_TR_H 50
-#define FALLBACK_PKT_COLLECT_TR_L 10
-#define FALLBACK_POLL_SECOND 5
-#define FALLBACK_RECOVER_SECOND 30
-#define FALLBACK_THRESHOLD 15
-#define UPGRADE_THRESHOLD 5
-#define UPGRADE_CNT_THRD 3
-#define RETRY_TIMES_THRD_H 2
-#define RETRY_TIMES_THRD_L 1
-
-void
-RATEvParseMaxRate(
- void *pDeviceHandler,
- PWLAN_IE_SUPP_RATES pItemRates,
- PWLAN_IE_SUPP_RATES pItemExtRates,
- bool bUpdateBasicRate,
- unsigned short *pwMaxBasicRate,
- unsigned short *pwMaxSuppRate,
- unsigned short *pwSuppRate,
- unsigned char *pbyTopCCKRate,
- unsigned char *pbyTopOFDMRate
-);
-
-void
-RATEvTxRateFallBack(
- void *pDeviceHandler,
- PKnownNodeDB psNodeDBTable
-);
-
-unsigned char
-RATEuSetIE(
- PWLAN_IE_SUPP_RATES pSrcRates,
- PWLAN_IE_SUPP_RATES pDstRates,
- unsigned int uRateLen
-);
-
-unsigned short
-wGetRateIdx(
- unsigned char byRate
-);
-
-unsigned char
-DATARATEbyGetRateIdx(
- unsigned char byRate
-);
-
-#endif //__DATARATE_H__
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 5a2bbd2047d8..758eeb2afd51 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -34,44 +34,34 @@
#include <linux/types.h>
#include <linux/mm.h>
#include "linux/ieee80211.h"
-#include "ttype.h"
-#include "tether.h"
#define B_OWNED_BY_CHIP 1
#define B_OWNED_BY_HOST 0
-//
-// Bits in the RSR register
-//
+/* Bits in the RSR register */
#define RSR_ADDRBROAD 0x80
#define RSR_ADDRMULTI 0x40
#define RSR_ADDRUNI 0x00
#define RSR_IVLDTYP 0x20
-#define RSR_IVLDLEN 0x10 // invalid len (> 2312 byte)
+#define RSR_IVLDLEN 0x10 /* invalid len (> 2312 byte) */
#define RSR_BSSIDOK 0x08
#define RSR_CRCOK 0x04
#define RSR_BCNSSIDOK 0x02
#define RSR_ADDROK 0x01
-//
-// Bits in the new RSR register
-//
+/* Bits in the new RSR register */
#define NEWRSR_DECRYPTOK 0x10
#define NEWRSR_CFPIND 0x08
#define NEWRSR_HWUTSF 0x04
#define NEWRSR_BCNHITAID 0x02
#define NEWRSR_BCNHITAID0 0x01
-//
-// Bits in the TSR0 register
-//
+/* Bits in the TSR0 register */
#define TSR0_PWRSTS1_2 0xC0
#define TSR0_PWRSTS7 0x20
#define TSR0_NCR 0x1F
-//
-// Bits in the TSR1 register
-//
+/* Bits in the TSR1 register */
#define TSR1_TERR 0x80
#define TSR1_PWRSTS4_6 0x70
#define TSR1_RETRYTMO 0x08
@@ -79,16 +69,14 @@
#define TSR1_PWRSTS3 0x02
#define ACK_DATA 0x01
-//
-// Bits in the TCR register
-//
-#define EDMSDU 0x04 // end of sdu
-#define TCR_EDP 0x02 // end of packet
-#define TCR_STP 0x01 // start of packet
+/* Bits in the TCR register */
+#define EDMSDU 0x04 /* end of sdu */
+#define TCR_EDP 0x02 /* end of packet */
+#define TCR_STP 0x01 /* start of packet */
-// max transmit or receive buffer size
+/* max transmit or receive buffer size */
#define CB_MAX_BUF_SIZE 2900U
- // NOTE: must be multiple of 4
+ /* NOTE: must be multiple of 4 */
#define CB_MAX_TX_BUF_SIZE CB_MAX_BUF_SIZE
#define CB_MAX_RX_BUF_SIZE_NORMAL CB_MAX_BUF_SIZE
@@ -100,18 +88,21 @@
#define CB_MIN_TX_DESC 16
#define CB_MAX_RECEIVED_PACKETS 16
- // limit our receive routine to indicating
- // this many at a time for 2 reasons:
- // 1. driver flow control to protocol layer
- // 2. limit the time used in ISR routine
+ /*
+ * limit our receive routine to indicating
+ * this many at a time for 2 reasons:
+ * 1. driver flow control to protocol layer
+ * 2. limit the time used in ISR routine
+ */
#define CB_EXTRA_RD_NUM 32
#define CB_RD_NUM 32
#define CB_TD_NUM 32
-// max number of physical segments
-// in a single NDIS packet. Above this threshold, the packet
-// is copied into a single physically contiguous buffer
+/*
+ * max number of physical segments in a single NDIS packet. Above this
+ * threshold, the packet is copied into a single physically contiguous buffer
+ */
#define CB_MAX_SEGMENT 4
#define CB_MIN_MAP_REG_NUM 4
@@ -119,42 +110,13 @@
#define CB_PROTOCOL_RESERVED_SECTION 16
-// if retrys excess 15 times , tx will abort, and
-// if tx fifo underflow, tx will fail
-// we should try to resend it
+/*
+ * if retrys excess 15 times , tx will abort, and if tx fifo underflow,
+ * tx will fail, we should try to resend it
+ */
#define CB_MAX_TX_ABORT_RETRY 3
-#ifdef __BIG_ENDIAN
-
-// WMAC definition FIFO Control
-#define FIFOCTL_AUTO_FB_1 0x0010
-#define FIFOCTL_AUTO_FB_0 0x0008
-#define FIFOCTL_GRPACK 0x0004
-#define FIFOCTL_11GA 0x0003
-#define FIFOCTL_11GB 0x0002
-#define FIFOCTL_11B 0x0001
-#define FIFOCTL_11A 0x0000
-#define FIFOCTL_RTS 0x8000
-#define FIFOCTL_ISDMA0 0x4000
-#define FIFOCTL_GENINT 0x2000
-#define FIFOCTL_TMOEN 0x1000
-#define FIFOCTL_LRETRY 0x0800
-#define FIFOCTL_CRCDIS 0x0400
-#define FIFOCTL_NEEDACK 0x0200
-#define FIFOCTL_LHEAD 0x0100
-
-//WMAC definition Frag Control
-#define FRAGCTL_AES 0x0003
-#define FRAGCTL_TKIP 0x0002
-#define FRAGCTL_LEGACY 0x0001
-#define FRAGCTL_NONENCRYPT 0x0000
-#define FRAGCTL_ENDFRAG 0x0300
-#define FRAGCTL_MIDFRAG 0x0200
-#define FRAGCTL_STAFRAG 0x0100
-#define FRAGCTL_NONFRAG 0x0000
-
-#else
-
+/* WMAC definition FIFO Control */
#define FIFOCTL_AUTO_FB_1 0x1000
#define FIFOCTL_AUTO_FB_0 0x0800
#define FIFOCTL_GRPACK 0x0400
@@ -171,7 +133,7 @@
#define FIFOCTL_NEEDACK 0x0002
#define FIFOCTL_LHEAD 0x0001
-//WMAC definition Frag Control
+/* WMAC definition Frag Control */
#define FRAGCTL_AES 0x0300
#define FRAGCTL_TKIP 0x0200
#define FRAGCTL_LEGACY 0x0100
@@ -181,8 +143,6 @@
#define FRAGCTL_STAFRAG 0x0001
#define FRAGCTL_NONFRAG 0x0000
-#endif
-
#define TYPE_TXDMA0 0
#define TYPE_AC0DMA 1
#define TYPE_ATIMDMA 2
@@ -195,14 +155,17 @@
#define TYPE_RXDMA1 1
#define TYPE_MAXRD 2
-// TD_INFO flags control bit
-#define TD_FLAGS_NETIF_SKB 0x01 // check if need release skb
-#define TD_FLAGS_PRIV_SKB 0x02 // check if called from private skb(hostap)
-#define TD_FLAGS_PS_RETRY 0x04 // check if PS STA frame re-transmit
+/* TD_INFO flags control bit */
+#define TD_FLAGS_NETIF_SKB 0x01 /* check if need release skb */
+#define TD_FLAGS_PRIV_SKB 0x02 /* check if called from private skb (hostap) */
+#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
-// ref_sk_buff is used for mapping the skb structure between pre-built driver-obj & running kernel.
-// Since different kernel version (2.4x) may change skb structure, i.e. pre-built driver-obj
-// may link to older skb that leads error.
+/*
+ * ref_sk_buff is used for mapping the skb structure between pre-built
+ * driver-obj & running kernel. Since different kernel version (2.4x) may
+ * change skb structure, i.e. pre-built driver-obj may link to older skb that
+ * leads error.
+ */
typedef struct tagDEVICE_RD_INFO {
struct sk_buff *skb;
@@ -242,9 +205,7 @@ typedef struct tagRDES1 {
} __attribute__ ((__packed__))
SRDES1;
-//
-// Rx descriptor
-//
+/* Rx descriptor*/
typedef struct tagSRxDesc {
volatile SRDES0 m_rd0RD0;
volatile SRDES1 m_rd1RD1;
@@ -292,6 +253,7 @@ typedef struct tagTDES1 {
STDES1;
typedef struct tagDEVICE_TD_INFO {
+ void *mic_hdr;
struct sk_buff *skb;
unsigned char *buf;
dma_addr_t skb_dma;
@@ -302,9 +264,7 @@ typedef struct tagDEVICE_TD_INFO {
unsigned char byFlags;
} DEVICE_TD_INFO, *PDEVICE_TD_INFO;
-//
-// transmit descriptor
-//
+/* transmit descriptor */
typedef struct tagSTxDesc {
volatile STDES0 m_td0TD0;
volatile STDES1 m_td1TD1;
@@ -319,8 +279,8 @@ typedef const STxDesc *PCSTxDesc;
typedef struct tagSTxSyncDesc {
volatile STDES0 m_td0TD0;
volatile STDES1 m_td1TD1;
- volatile u32 buff_addr; // pointer to logical buffer
- volatile u32 next_desc; // pointer to next logical descriptor
+ volatile u32 buff_addr; /* pointer to logical buffer */
+ volatile u32 next_desc; /* pointer to next logical descriptor */
volatile unsigned short m_wFIFOCtl;
volatile unsigned short m_wTimeStamp;
struct tagSTxSyncDesc *next __aligned(8);
@@ -329,9 +289,7 @@ typedef struct tagSTxSyncDesc {
STxSyncDesc, *PSTxSyncDesc;
typedef const STxSyncDesc *PCSTxSyncDesc;
-//
-// RsvTime buffer header
-//
+/* RsvTime buffer header */
typedef struct tagSRrvTime_atim {
unsigned short wCTSTxRrvTime_ba;
unsigned short wTxRrvTime_a;
@@ -352,9 +310,7 @@ union vnt_phy_field_swap {
u32 field_write;
};
-//
-// Tx FIFO header
-//
+/* Tx FIFO header */
typedef struct tagSTxBufHead {
u32 adwTxKey[4];
unsigned short wFIFOCtl;
@@ -392,4 +348,4 @@ typedef struct tagSKeyEntry {
} __attribute__ ((__packed__))
SKeyEntry;
-#endif // __DESC_H__
+#endif /* __DESC_H__ */
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index ddd356aa7eaf..83efbfb57c79 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -50,40 +50,47 @@
#include <linux/io.h>
#include <linux/if.h>
#include <linux/crc32.h>
-//#include <linux/config.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/inetdevice.h>
#include <linux/reboot.h>
#include <linux/ethtool.h>
/* Include Wireless Extension definition and check version - Jean II */
+#include <net/mac80211.h>
#include <linux/wireless.h>
-#include <net/iw_handler.h> // New driver API
+#include <net/iw_handler.h> /* New driver API */
-//2008-0409-07, <Add> by Einsn Liu
#ifndef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
#define WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
#endif
-//
-// device specific
-//
+/* device specific */
#include "device_cfg.h"
-#include "ttype.h"
-#include "80211hdr.h"
-#include "tether.h"
-#include "wmgr.h"
-#include "wcmd.h"
+#include "card.h"
#include "mib.h"
#include "srom.h"
-#include "rc4.h"
#include "desc.h"
#include "key.h"
#include "mac.h"
/*--------------------- Export Definitions -------------------------*/
+#define RATE_1M 0
+#define RATE_2M 1
+#define RATE_5M 2
+#define RATE_11M 3
+#define RATE_6M 4
+#define RATE_9M 5
+#define RATE_12M 6
+#define RATE_18M 7
+#define RATE_24M 8
+#define RATE_36M 9
+#define RATE_48M 10
+#define RATE_54M 11
+#define RATE_AUTO 12
+#define MAX_RATE 12
+
#define MAC_MAX_CONTEXT_REG (256+128)
#define MAX_MULTICAST_ADDRESS_NUM 32
@@ -112,7 +119,7 @@
#define FB_RATE0 0
#define FB_RATE1 1
-// Antenna Mode
+/* Antenna Mode */
#define ANT_A 0
#define ANT_B 1
#define ANT_DIVERSITY 2
@@ -129,120 +136,28 @@
#define RUN_AT(x) (jiffies+(x))
#endif
-// DMA related
+#define MAKE_BEACON_RESERVED 10 /* (us) */
+
+/* DMA related */
#define RESERV_AC0DMA 4
-// BUILD OBJ mode
+/* BUILD OBJ mode */
#define AVAIL_TD(p, q) ((p)->sOpts.nTxDescs[(q)] - ((p)->iTDUsed[(q)]))
#define NUM 64
-#define PRIVATE_Message 0
-
-/*--------------------- Export Types ------------------------------*/
-
-#define PRINT_K(p, args...) \
-do { \
- if (PRIVATE_Message) \
- printk(p, ##args); \
-} while (0)
+/* 0:11A 1:11B 2:11G */
+#define BB_TYPE_11A 0
+#define BB_TYPE_11B 1
+#define BB_TYPE_11G 2
-//0:11A 1:11B 2:11G
-typedef enum _VIA_BB_TYPE
-{
- BB_TYPE_11A = 0,
- BB_TYPE_11B,
- BB_TYPE_11G
-} VIA_BB_TYPE, *PVIA_BB_TYPE;
-
-//0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
-typedef enum _VIA_PKT_TYPE
-{
- PK_TYPE_11A = 0,
- PK_TYPE_11B,
- PK_TYPE_11GB,
- PK_TYPE_11GA
-} VIA_PKT_TYPE, *PVIA_PKT_TYPE;
-
-typedef enum __device_msg_level {
- MSG_LEVEL_ERR = 0, //Errors that will cause abnormal operation.
- MSG_LEVEL_NOTICE = 1, //Some errors need users to be notified.
- MSG_LEVEL_INFO = 2, //Normal message.
- MSG_LEVEL_VERBOSE = 3, //Will report all trival errors.
- MSG_LEVEL_DEBUG = 4 //Only for debug purpose.
-} DEVICE_MSG_LEVEL, *PDEVICE_MSG_LEVEL;
-
-//++ NDIS related
-
-#define MAX_BSSIDINFO_4_PMKID 16
-#define MAX_PMKIDLIST 5
-//Flags for PMKID Candidate list structure
-#define NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED 0x01
-
-// PMKID Structures
-typedef unsigned char NDIS_802_11_PMKID_VALUE[16];
-
-typedef enum _NDIS_802_11_WEP_STATUS {
- Ndis802_11WEPEnabled,
- Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled,
- Ndis802_11WEPDisabled,
- Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled,
- Ndis802_11WEPKeyAbsent,
- Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent,
- Ndis802_11WEPNotSupported,
- Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported,
- Ndis802_11Encryption2Enabled,
- Ndis802_11Encryption2KeyAbsent,
- Ndis802_11Encryption3Enabled,
- Ndis802_11Encryption3KeyAbsent
-} NDIS_802_11_WEP_STATUS, *PNDIS_802_11_WEP_STATUS,
- NDIS_802_11_ENCRYPTION_STATUS, *PNDIS_802_11_ENCRYPTION_STATUS;
-
-typedef enum _NDIS_802_11_STATUS_TYPE {
- Ndis802_11StatusType_Authentication,
- Ndis802_11StatusType_MediaStreamMode,
- Ndis802_11StatusType_PMKID_CandidateList,
- Ndis802_11StatusTypeMax // not a real type, defined as an upper bound
-} NDIS_802_11_STATUS_TYPE, *PNDIS_802_11_STATUS_TYPE;
-
-//Added new types for PMKID Candidate lists.
-struct pmkid_candidate {
- NDIS_802_11_MAC_ADDRESS BSSID;
- unsigned long Flags;
-};
+/* 0:11a, 1:11b, 2:11gb (only CCK in BasicRate), 3:11ga (OFDM in BasicRate) */
+#define PK_TYPE_11A 0
+#define PK_TYPE_11B 1
+#define PK_TYPE_11GB 2
+#define PK_TYPE_11GA 3
-typedef struct _BSSID_INFO {
- NDIS_802_11_MAC_ADDRESS BSSID;
- NDIS_802_11_PMKID_VALUE PMKID;
-} BSSID_INFO, *PBSSID_INFO;
-
-typedef struct tagSPMKID {
- unsigned long Length;
- unsigned long BSSIDInfoCount;
- BSSID_INFO BSSIDInfo[MAX_BSSIDINFO_4_PMKID];
-} SPMKID, *PSPMKID;
-
-typedef struct tagSPMKIDCandidateEvent {
- NDIS_802_11_STATUS_TYPE StatusType;
- unsigned long Version; // Version of the structure
- unsigned long NumCandidates; // No. of pmkid candidates
- struct pmkid_candidate CandidateList[MAX_PMKIDLIST];
-} SPMKIDCandidateEvent, *PSPMKIDCandidateEvent;
-
-//--
-
-//++ 802.11h related
-#define MAX_QUIET_COUNT 8
-
-typedef struct tagSQuietControl {
- bool bEnable;
- unsigned long dwStartTime;
- unsigned char byPeriod;
- unsigned short wDuration;
-} SQuietControl, *PSQuietControl;
-
-//--
typedef struct __chip_info_tbl {
CHIP_TYPE chip_id;
char *name;
@@ -256,34 +171,7 @@ typedef enum {
OWNED_BY_NIC = 1
} DEVICE_OWNER_TYPE, *PDEVICE_OWNER_TYPE;
-// The receive duplicate detection cache entry
-typedef struct tagSCacheEntry {
- unsigned short wFmSequence;
- unsigned char abyAddr2[ETH_ALEN];
-} SCacheEntry, *PSCacheEntry;
-
-typedef struct tagSCache {
-/* The receive cache is updated circularly. The next entry to be written is
- * indexed by the "InPtr".
- */
- unsigned int uInPtr; // Place to use next
- SCacheEntry asCacheEntry[DUPLICATE_RX_CACHE_LENGTH];
-} SCache, *PSCache;
-
-#define CB_MAX_RX_FRAG 64
-// DeFragment Control Block, used for collecting fragments prior to reassembly
-typedef struct tagSDeFragControlBlock {
- unsigned short wSequence;
- unsigned short wFragNum;
- unsigned char abyAddr2[ETH_ALEN];
- unsigned int uLifetime;
- struct sk_buff *skb;
- unsigned char *pbyRxBuffer;
- unsigned int cbFrameLength;
- bool bInUse;
-} SDeFragControlBlock, *PSDeFragControlBlock;
-
-//flags for options
+/* flags for options */
#define DEVICE_FLAGS_IP_ALIGN 0x00000001UL
#define DEVICE_FLAGS_PREAMBLE_TYPE 0x00000002UL
#define DEVICE_FLAGS_OP_MODE 0x00000004UL
@@ -291,15 +179,15 @@ typedef struct tagSDeFragControlBlock {
#define DEVICE_FLAGS_80211h_MODE 0x00000010UL
#define DEVICE_FLAGS_DiversityANT 0x00000020UL
-//flags for driver status
+/* flags for driver status */
#define DEVICE_FLAGS_OPENED 0x00010000UL
#define DEVICE_FLAGS_WOL_ENABLED 0x00080000UL
-//flags for capabilities
+/* flags for capabilities */
#define DEVICE_FLAGS_TX_ALIGN 0x01000000UL
#define DEVICE_FLAGS_HAVE_CAM 0x02000000UL
#define DEVICE_FLAGS_FLOW_CTRL 0x04000000UL
-//flags for MII status
+/* flags for MII status */
#define DEVICE_LINK_FAIL 0x00000001UL
#define DEVICE_SPEED_10 0x00000002UL
#define DEVICE_SPEED_100 0x00000004UL
@@ -307,18 +195,14 @@ typedef struct tagSDeFragControlBlock {
#define DEVICE_DUPLEX_FULL 0x00000010UL
#define DEVICE_AUTONEG_ENABLE 0x00000020UL
#define DEVICE_FORCED_BY_EEPROM 0x00000040UL
-//for device_set_media_duplex
+/* for device_set_media_duplex */
#define DEVICE_LINK_CHANGE 0x00000001UL
typedef struct __device_opt {
- int nRxDescs0; //Number of RX descriptors0
- int nRxDescs1; //Number of RX descriptors1
- int nTxDescs[2]; //Number of TX descriptors 0, 1
- int int_works; //interrupt limits
- int rts_thresh; //rts threshold
- int frag_thresh;
- int data_rate;
- int channel_num;
+ int nRxDescs0; /* Number of RX descriptors0 */
+ int nRxDescs1; /* Number of RX descriptors1 */
+ int nTxDescs[2]; /* Number of TX descriptors 0, 1 */
+ int int_works; /* interrupt limits */
int short_retry;
int long_retry;
int bbp_type;
@@ -327,11 +211,16 @@ typedef struct __device_opt {
struct vnt_private {
struct pci_dev *pcid;
-
-// netdev
- struct net_device *dev;
-
-//dma addr, rx/tx pool
+ /* mac80211 */
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ unsigned long key_entry_inuse;
+ u32 basic_rates;
+ u16 current_aid;
+ int mc_list_count;
+ u8 mac_hw;
+
+/* dma addr, rx/tx pool */
dma_addr_t pool_dma;
dma_addr_t rd0_pool_dma;
dma_addr_t rd1_pool_dma;
@@ -356,9 +245,12 @@ struct vnt_private {
u32 io_size;
unsigned char byRevId;
+ unsigned char byRxMode;
unsigned short SubSystemID;
unsigned short SubVendorID;
+ spinlock_t lock;
+
int nTxQueues;
volatile int iTDUsed[TYPE_MAXTD];
@@ -371,30 +263,18 @@ struct vnt_private {
volatile PSRxDesc aRD0Ring;
volatile PSRxDesc aRD1Ring;
volatile PSRxDesc pCurrRD[TYPE_MAXRD];
- SCache sDupRxCache;
-
- SDeFragControlBlock sRxDFCB[CB_MAX_RX_FRAG];
- unsigned int cbDFCB;
- unsigned int cbFreeDFCB;
- unsigned int uCurrentDFCBIdx;
OPTIONS sOpts;
u32 flags;
u32 rx_buf_sz;
+ u8 rx_rate;
int multicast_limit;
- unsigned char byRxMode;
-
- spinlock_t lock;
-
- pid_t MLMEThr_pid;
- struct completion notify;
- struct semaphore mlme_semaphore;
u32 rx_bytes;
- // Version control
+ /* Version control */
unsigned char byLocalID;
unsigned char byRFType;
@@ -402,20 +282,15 @@ struct vnt_private {
unsigned char byZoneType;
bool bZoneRegExist;
unsigned char byOriginalZonetype;
- unsigned char abyMacContext[MAC_MAX_CONTEXT_REG];
- bool bLinkPass; // link status: OK or fail
- unsigned char abyCurrentNetAddr[ETH_ALEN];
- // Adapter statistics
+ unsigned char abyCurrentNetAddr[ETH_ALEN]; __aligned(2)
+ bool bLinkPass; /* link status: OK or fail */
+
+ /* Adapter statistics */
SStatCounter scStatistic;
- // 802.11 counter
+ /* 802.11 counter */
SDot11Counters s802_11Counter;
- // 802.11 management
- PSMgmtObject pMgmt;
- SMgmtObject sMgmtObj;
-
- // 802.11 MAC specific
unsigned int uCurrRSSI;
unsigned char byCurrSQ;
@@ -427,22 +302,25 @@ struct vnt_private {
bool bTxRxAntInv;
unsigned char *pbyTmpBuff;
- unsigned int uSIFS; //Current SIFS
- unsigned int uDIFS; //Current DIFS
- unsigned int uEIFS; //Current EIFS
- unsigned int uSlot; //Current SlotTime
- unsigned int uCwMin; //Current CwMin
- unsigned int uCwMax; //CwMax is fixed on 1023.
- // PHY parameter
+ unsigned int uSIFS; /* Current SIFS */
+ unsigned int uDIFS; /* Current DIFS */
+ unsigned int uEIFS; /* Current EIFS */
+ unsigned int uSlot; /* Current SlotTime */
+ unsigned int uCwMin; /* Current CwMin */
+ unsigned int uCwMax; /* CwMax is fixed on 1023. */
+ /* PHY parameter */
unsigned char bySIFS;
unsigned char byDIFS;
unsigned char byEIFS;
unsigned char bySlot;
unsigned char byCWMaxMin;
- CARD_PHY_TYPE eCurrentPHYType;
- VIA_BB_TYPE byBBType; //0: 11A, 1:11B, 2:11G
- VIA_PKT_TYPE byPacketType; //0:11a,1:11b,2:11gb(only CCK in BasicRate),3:11ga(OFDM in Basic Rate)
+ u8 byBBType; /* 0:11A, 1:11B, 2:11G */
+ u8 byPacketType; /*
+ * 0:11a,1:11b,2:11gb (only CCK
+ * in BasicRate), 3:11ga (OFDM in
+ * Basic Rate)
+ */
unsigned short wBasicRate;
unsigned char byACKRate;
unsigned char byTopOFDMBasicRate;
@@ -450,28 +328,16 @@ struct vnt_private {
unsigned char byMinChannel;
unsigned char byMaxChannel;
- unsigned int uConnectionRate;
unsigned char byPreambleType;
unsigned char byShortPreamble;
unsigned short wCurrentRate;
- unsigned short wRTSThreshold;
- unsigned short wFragmentationThreshold;
unsigned char byShortRetryLimit;
unsigned char byLongRetryLimit;
enum nl80211_iftype op_mode;
- unsigned char byOpMode;
bool bBSSIDFilter;
unsigned short wMaxTransmitMSDULifetime;
- unsigned char abyBSSID[ETH_ALEN];
- unsigned char abyDesireBSSID[ETH_ALEN];
- unsigned short wACKDuration; // update while speed change
- unsigned short wRTSTransmitLen; // update while speed change
- unsigned char byRTSServiceField; // update while speed change
- unsigned char byRTSSignalField; // update while speed change
-
- unsigned long dwMaxReceiveLifetime; // dot11MaxReceiveLifetime
bool bEncryptionEnable;
bool bLongHeader;
@@ -480,24 +346,20 @@ struct vnt_private {
bool bNonERPPresent;
bool bBarkerPreambleMd;
- unsigned char byERPFlag;
- unsigned short wUseProtectCntDown;
-
bool bRadioControlOff;
bool bRadioOff;
bool bEnablePSMode;
unsigned short wListenInterval;
bool bPWBitOn;
- WMAC_POWER_MODE ePSMode;
- // GPIO Radio Control
+ /* GPIO Radio Control */
unsigned char byRadioCtl;
unsigned char byGPIO;
bool bHWRadioOff;
bool bPrvActive4RadioOFF;
bool bGPIOBlockRead;
- // Beacon related
+ /* Beacon related */
unsigned short wSeqCounter;
unsigned short wBCNBufLen;
bool bBeaconBufReady;
@@ -506,71 +368,12 @@ struct vnt_private {
unsigned int cbBeaconBufReadySetCnt;
bool bFixRate;
unsigned char byCurrentCh;
- unsigned int uScanTime;
-
- CMD_STATE eCommandState;
-
- CMD_CODE eCommand;
- bool bBeaconTx;
-
- bool bStopBeacon;
- bool bStopDataPkt;
- bool bStopTx0Pkt;
- unsigned int uAutoReConnectTime;
-
- // 802.11 counter
-
- CMD_ITEM eCmdQueue[CMD_Q_SIZE];
- unsigned int uCmdDequeueIdx;
- unsigned int uCmdEnqueueIdx;
- unsigned int cbFreeCmdQueue;
- bool bCmdRunning;
- bool bCmdClear;
-
- bool bRoaming;
- //WOW
- unsigned char abyIPAddr[4];
-
- unsigned long ulTxPower;
- NDIS_802_11_WEP_STATUS eEncryptionStatus;
- bool bTransmitKey;
-//2007-0925-01<Add>by MikeLiu
-//mike add :save old Encryption
- NDIS_802_11_WEP_STATUS eOldEncryptionStatus;
-
- SKeyManagement sKey;
- unsigned long dwIVCounter;
-
- u64 qwPacketNumber; /* For CCMP and TKIP as TSC(6 bytes) */
- unsigned int uCurrentWEPMode;
-
- RC4Ext SBox;
- unsigned char abyPRNG[WLAN_WEPMAX_KEYLEN+3];
- unsigned char byKeyIndex;
- unsigned int uKeyLength;
- unsigned char abyKey[WLAN_WEP232_KEYLEN];
bool bAES;
- unsigned char byCntMeasure;
-
- // for AP mode
- unsigned int uAssocCount;
- bool bMoreData;
-
- // QoS
- bool bGrpAckPolicy;
-
- // for OID_802_11_ASSOCIATION_INFORMATION
- bool bAssocInfoSet;
unsigned char byAutoFBCtrl;
- bool bTxMICFail;
- bool bRxMICFail;
-
- unsigned int uRATEIdx;
-
- // For Update BaseBand VGA Gain Offset
+ /* For Update BaseBand VGA Gain Offset */
bool bUpdateBBVGA;
unsigned int uBBVGADiffCount;
unsigned char byBBVGANew;
@@ -581,24 +384,12 @@ struct vnt_private {
unsigned char byBBPreEDRSSI;
unsigned char byBBPreEDIndex;
- bool bRadioCmd;
unsigned long dwDiagRefCount;
- // For FOE Tuning
+ /* For FOE Tuning */
unsigned char byFOETuning;
- // For Auto Power Tunning
-
- unsigned char byAutoPwrTunning;
- short sPSetPointCCK;
- short sPSetPointOFDMG;
- short sPSetPointOFDMA;
- long lPFormulaOffset;
- short sPThreshold;
- char cAdjustStep;
- char cMinTxAGC;
-
- // For RF Power table
+ /* For RF Power table */
unsigned char byCCKPwr;
unsigned char byOFDMPwrG;
unsigned char byCurPwr;
@@ -610,27 +401,12 @@ struct vnt_private {
char abyRegPwr[CB_MAX_CHANNEL+1];
char abyLocalPwr[CB_MAX_CHANNEL+1];
- // BaseBand Loopback Use
+ /* BaseBand Loopback Use */
unsigned char byBBCR4d;
unsigned char byBBCRc9;
unsigned char byBBCR88;
unsigned char byBBCR09;
- // command timer
- struct timer_list sTimerCommand;
- struct timer_list sTimerTxData;
- unsigned long nTxDataTimeCout;
- bool fTxDataInSleep;
- bool IsTxDataTrigger;
-
-#ifdef WPA_SM_Transtatus
- bool fWPA_Authened; //is WPA/WPA-PSK or WPA2/WPA2-PSK authen??
-#endif
- unsigned char byReAssocCount; //mike add:re-association retry times!
- unsigned char byLinkWaitCount;
-
- unsigned char abyNodeName[17];
-
bool bDiversityRegCtlON;
bool bDiversityEnable;
unsigned long ulDiversityNValue;
@@ -640,13 +416,13 @@ struct vnt_private {
unsigned char byTMax3;
unsigned long ulSQ3TH;
-// ANT diversity
+ /* ANT diversity */
unsigned long uDiversityCnt;
unsigned char byAntennaState;
unsigned long ulRatio_State0;
unsigned long ulRatio_State1;
- //SQ3 functions for antenna diversity
+ /* SQ3 functions for antenna diversity */
struct timer_list TimerSQ3Tmax1;
struct timer_list TimerSQ3Tmax2;
struct timer_list TimerSQ3Tmax3;
@@ -654,86 +430,11 @@ struct vnt_private {
unsigned long uNumSQ3[MAX_RATE];
unsigned short wAntDiversityMaxRate;
- SEthernetHeader sTxEthHeader;
- SEthernetHeader sRxEthHeader;
- unsigned char abyBroadcastAddr[ETH_ALEN];
- unsigned char abySNAP_RFC1042[ETH_ALEN];
- unsigned char abySNAP_Bridgetunnel[ETH_ALEN];
- unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; //unsigned long alignment
- // Pre-Authentication & PMK cache
- SPMKID gsPMKID;
- SPMKIDCandidateEvent gsPMKIDCandidate;
-
- // for 802.11h
- bool b11hEnable;
- unsigned char abyCountryCode[3];
- // for 802.11h DFS
- unsigned int uNumOfMeasureEIDs;
- PWLAN_IE_MEASURE_REQ pCurrMeasureEID;
- bool bMeasureInProgress;
- unsigned char byOrgChannel;
- unsigned char byOrgRCR;
- unsigned long dwOrgMAR0;
- unsigned long dwOrgMAR4;
- unsigned char byBasicMap;
- unsigned char byCCAFraction;
- unsigned char abyRPIs[8];
- unsigned long dwRPIs[8];
- bool bChannelSwitch;
- unsigned char byNewChannel;
- unsigned char byChannelSwitchCount;
- bool bQuietEnable;
- bool bEnableFirstQuiet;
- unsigned char byQuietStartCount;
- unsigned int uQuietEnqueue;
- unsigned long dwCurrentQuietEndTime;
- SQuietControl sQuiet[MAX_QUIET_COUNT];
- // for 802.11h TPC
- bool bCountryInfo5G;
- bool bCountryInfo24G;
+ unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; /* unsigned long alignment */
unsigned short wBeaconInterval;
-
- //WPA supplicant deamon
- struct net_device *wpadev;
- bool bWPADEVUp;
- struct sk_buff *skb;
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- unsigned int bwextcount;
- bool bWPASuppWextEnabled;
-#endif
-
- //--
-#ifdef HOSTAP
- // user space daemon: hostapd, is used for HOSTAP
- bool bEnableHostapd;
- bool bEnable8021x;
- bool bEnableHostWEP;
- struct net_device *apdev;
- int (*tx_80211)(struct sk_buff *skb, struct net_device *dev);
-#endif
- unsigned int uChannel;
- bool bMACSuspend;
-
- struct iw_statistics wstats; // wireless stats
- bool bCommit;
};
-static inline bool device_get_ip(struct vnt_private *pInfo)
-{
- struct in_device *in_dev = (struct in_device *)pInfo->dev->ip_ptr;
- struct in_ifaddr *ifa;
-
- if (in_dev != NULL) {
- ifa = (struct in_ifaddr *)in_dev->ifa_list;
- if (ifa != NULL) {
- memcpy(pInfo->abyIPAddr, &ifa->ifa_address, 4);
- return true;
- }
- }
- return false;
-}
-
static inline PDEVICE_RD_INFO alloc_rd_info(void)
{
return kzalloc(sizeof(DEVICE_RD_INFO), GFP_ATOMIC);
@@ -743,13 +444,4 @@ static inline PDEVICE_TD_INFO alloc_td_info(void)
{
return kzalloc(sizeof(DEVICE_TD_INFO), GFP_ATOMIC);
}
-
-/*--------------------- Export Functions --------------------------*/
-
-bool device_dma0_xmit(struct vnt_private *pDevice,
- struct sk_buff *skb, unsigned int uNodeIndex);
-bool device_alloc_frag_buf(struct vnt_private *pDevice,
- PSDeFragControlBlock pDeF);
-int Config_FileOperation(struct vnt_private *pDevice,
- bool fwrite, unsigned char *Parameter);
#endif
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index 7221824e4f23..a4a8a8489e0b 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -29,8 +29,6 @@
#include <linux/types.h>
-#include "ttype.h"
-
typedef
struct _version {
unsigned char major;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 54e16f40d8ed..83e4162c0094 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -32,27 +32,16 @@
* device_free_info - device structure resource free function
* device_get_pci_info - get allocated pci io/mem resource
* device_print_info - print out resource
- * device_open - allocate dma/descripter resource & initial mac/bbp function
- * device_xmit - asynchrous data tx function
* device_intr - interrupt handle function
- * device_set_multi - set mac filter
- * device_ioctl - ioctl entry
- * device_close - shutdown mac/bbp & free dma/descripter resource
* device_rx_srv - rx service function
- * device_receive_frame - rx data function
* device_alloc_rx_buf - rx buffer pre-allocated function
- * device_alloc_frag_buf - rx fragement pre-allocated function
* device_free_tx_buf - free tx buffer function
- * device_free_frag_buf- free de-fragement buffer
- * device_dma0_tx_80211- tx 802.11 frame via dma0
- * device_dma0_xmit- tx PS bufferred frame via dma0
* device_init_rd0_ring- initial rd dma0 ring
* device_init_rd1_ring- initial rd dma1 ring
* device_init_td0_ring- initial tx dma0 ring buffer
* device_init_td1_ring- initial tx dma1 ring buffer
* device_init_registers- initial MAC & BBP & RF internal registers.
* device_init_rings- initial tx/rx ring buffer
- * device_init_defrag_cb- initial & allocate de-fragement buffer.
* device_free_rings- free all allocated ring buffer
* device_tx_srv- tx interrupt service function
*
@@ -66,24 +55,10 @@
#include "channel.h"
#include "baseband.h"
#include "mac.h"
-#include "tether.h"
-#include "wmgr.h"
-#include "wctl.h"
#include "power.h"
-#include "wcmd.h"
-#include "iocmd.h"
-#include "tcrc.h"
#include "rxtx.h"
-#include "wroute.h"
-#include "bssdb.h"
-#include "hostap.h"
-#include "wpactl.h"
-#include "ioctl.h"
-#include "iwctl.h"
#include "dpc.h"
-#include "datarate.h"
#include "rf.h"
-#include "iowpa.h"
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
@@ -118,89 +93,16 @@ DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
#define TX_DESC_DEF1 64
DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
-#define IP_ALIG_DEF 0
-/* IP_byte_align[] is used for IP header unsigned long byte aligned
- 0: indicate the IP header won't be unsigned long byte aligned.(Default) .
- 1: indicate the IP header will be unsigned long byte aligned.
- In some environment, the IP header should be unsigned long byte aligned,
- or the packet will be droped when we receive it. (eg: IPVS)
-*/
-DEVICE_PARAM(IP_byte_align, "Enable IP header dword aligned");
-
#define INT_WORKS_DEF 20
#define INT_WORKS_MIN 10
#define INT_WORKS_MAX 64
DEVICE_PARAM(int_works, "Number of packets per interrupt services");
-#define CHANNEL_MIN 1
-#define CHANNEL_MAX 14
-#define CHANNEL_DEF 6
-
-DEVICE_PARAM(Channel, "Channel number");
-
-/* PreambleType[] is the preamble length used for transmit.
- 0: indicate allows long preamble type
- 1: indicate allows short preamble type
-*/
-
-#define PREAMBLE_TYPE_DEF 1
-
-DEVICE_PARAM(PreambleType, "Preamble Type");
-
-#define RTS_THRESH_MIN 512
-#define RTS_THRESH_MAX 2347
#define RTS_THRESH_DEF 2347
-DEVICE_PARAM(RTSThreshold, "RTS threshold");
-
-#define FRAG_THRESH_MIN 256
-#define FRAG_THRESH_MAX 2346
#define FRAG_THRESH_DEF 2346
-DEVICE_PARAM(FragThreshold, "Fragmentation threshold");
-
-#define DATA_RATE_MIN 0
-#define DATA_RATE_MAX 13
-#define DATA_RATE_DEF 13
-/* datarate[] index
- 0: indicate 1 Mbps 0x02
- 1: indicate 2 Mbps 0x04
- 2: indicate 5.5 Mbps 0x0B
- 3: indicate 11 Mbps 0x16
- 4: indicate 6 Mbps 0x0c
- 5: indicate 9 Mbps 0x12
- 6: indicate 12 Mbps 0x18
- 7: indicate 18 Mbps 0x24
- 8: indicate 24 Mbps 0x30
- 9: indicate 36 Mbps 0x48
- 10: indicate 48 Mbps 0x60
- 11: indicate 54 Mbps 0x6c
- 12: indicate 72 Mbps 0x90
- 13: indicate auto rate
-*/
-
-DEVICE_PARAM(ConnectionRate, "Connection data rate");
-
-#define OP_MODE_DEF 0
-
-DEVICE_PARAM(OPMode, "Infrastruct, adhoc, AP mode ");
-
-/* OpMode[] is used for transmit.
- 0: indicate infrastruct mode used
- 1: indicate adhoc mode used
- 2: indicate AP mode used
-*/
-
-/* PSMode[]
- 0: indicate disable power saving mode
- 1: indicate enable power saving mode
-*/
-
-#define PS_MODE_DEF 0
-
-DEVICE_PARAM(PSMode, "Power saving mode");
-
#define SHORT_RETRY_MIN 0
#define SHORT_RETRY_MAX 31
#define SHORT_RETRY_DEF 8
@@ -224,20 +126,6 @@ DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
DEVICE_PARAM(BasebandType, "baseband type");
-/* 80211hEnable[]
- 0: indicate disable 802.11h
- 1: indicate enable 802.11h
-*/
-
-#define X80211h_MODE_DEF 0
-
-DEVICE_PARAM(b80211hEnable, "802.11h mode");
-
-/* 80211hEnable[]
- 0: indicate disable 802.11h
- 1: indicate enable 802.11h
-*/
-
#define DIVERSITY_ANT_DEF 0
DEVICE_PARAM(bDiversityANTEnable, "ANT diversity mode");
@@ -265,17 +153,10 @@ static void device_free_info(struct vnt_private *pDevice);
static bool device_get_pci_info(struct vnt_private *, struct pci_dev *pcid);
static void device_print_info(struct vnt_private *pDevice);
static void device_init_diversity_timer(struct vnt_private *pDevice);
-static int device_open(struct net_device *dev);
-static int device_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t device_intr(int irq, void *dev_instance);
-static void device_set_multi(struct net_device *dev);
-static int device_close(struct net_device *dev);
-static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#ifdef CONFIG_PM
static int device_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
-static int viawget_suspend(struct pci_dev *pcid, pm_message_t state);
-static int viawget_resume(struct pci_dev *pcid);
static struct notifier_block device_notifier = {
.notifier_call = device_notify_reboot,
.next = NULL,
@@ -285,15 +166,9 @@ static struct notifier_block device_notifier = {
static void device_init_rd0_ring(struct vnt_private *pDevice);
static void device_init_rd1_ring(struct vnt_private *pDevice);
-static void device_init_defrag_cb(struct vnt_private *pDevice);
static void device_init_td0_ring(struct vnt_private *pDevice);
static void device_init_td1_ring(struct vnt_private *pDevice);
-static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
-//2008-0714<Add>by Mike Liu
-static bool device_release_WPADEV(struct vnt_private *pDevice);
-
-static int ethtool_ioctl(struct net_device *dev, void __user *useraddr);
static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx);
static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx);
static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pDesc);
@@ -304,9 +179,6 @@ static void device_free_td1_ring(struct vnt_private *pDevice);
static void device_free_rd0_ring(struct vnt_private *pDevice);
static void device_free_rd1_ring(struct vnt_private *pDevice);
static void device_free_rings(struct vnt_private *pDevice);
-static void device_free_frag_buf(struct vnt_private *pDevice);
-static int Config_FileGetParameter(unsigned char *string,
- unsigned char *dest, unsigned char *source);
/*--------------------- Export Variables --------------------------*/
@@ -339,124 +211,50 @@ static void device_get_options(struct vnt_private *pDevice)
pOpts->nRxDescs1 = RX_DESC_DEF1;
pOpts->nTxDescs[0] = TX_DESC_DEF0;
pOpts->nTxDescs[1] = TX_DESC_DEF1;
- pOpts->flags |= DEVICE_FLAGS_IP_ALIGN;
pOpts->int_works = INT_WORKS_DEF;
- pOpts->rts_thresh = RTS_THRESH_DEF;
- pOpts->frag_thresh = FRAG_THRESH_DEF;
- pOpts->data_rate = DATA_RATE_DEF;
- pOpts->channel_num = CHANNEL_DEF;
- pOpts->flags |= DEVICE_FLAGS_PREAMBLE_TYPE;
- pOpts->flags |= DEVICE_FLAGS_OP_MODE;
pOpts->short_retry = SHORT_RETRY_DEF;
pOpts->long_retry = LONG_RETRY_DEF;
pOpts->bbp_type = BBP_TYPE_DEF;
- pOpts->flags |= DEVICE_FLAGS_80211h_MODE;
pOpts->flags |= DEVICE_FLAGS_DiversityANT;
}
static void
device_set_options(struct vnt_private *pDevice)
{
- unsigned char abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- unsigned char abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- unsigned char abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
-
- memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
- memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
- memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
-
- pDevice->uChannel = pDevice->sOpts.channel_num;
- pDevice->wRTSThreshold = pDevice->sOpts.rts_thresh;
- pDevice->wFragmentationThreshold = pDevice->sOpts.frag_thresh;
pDevice->byShortRetryLimit = pDevice->sOpts.short_retry;
pDevice->byLongRetryLimit = pDevice->sOpts.long_retry;
- pDevice->wMaxTransmitMSDULifetime = DEFAULT_MSDU_LIFETIME;
- pDevice->byShortPreamble = (pDevice->sOpts.flags & DEVICE_FLAGS_PREAMBLE_TYPE) ? 1 : 0;
- pDevice->byOpMode = (pDevice->sOpts.flags & DEVICE_FLAGS_OP_MODE) ? 1 : 0;
- pDevice->ePSMode = (pDevice->sOpts.flags & DEVICE_FLAGS_PS_MODE) ? 1 : 0;
- pDevice->b11hEnable = (pDevice->sOpts.flags & DEVICE_FLAGS_80211h_MODE) ? 1 : 0;
pDevice->bDiversityRegCtlON = (pDevice->sOpts.flags & DEVICE_FLAGS_DiversityANT) ? 1 : 0;
- pDevice->uConnectionRate = pDevice->sOpts.data_rate;
- if (pDevice->uConnectionRate < RATE_AUTO)
- pDevice->bFixRate = true;
pDevice->byBBType = pDevice->sOpts.bbp_type;
- pDevice->byPacketType = (VIA_PKT_TYPE)pDevice->byBBType;
+ pDevice->byPacketType = pDevice->byBBType;
pDevice->byAutoFBCtrl = AUTO_FB_0;
pDevice->bUpdateBBVGA = true;
- pDevice->byFOETuning = 0;
pDevice->byPreambleType = 0;
- pr_debug(" uChannel= %d\n", (int)pDevice->uChannel);
- pr_debug(" byOpMode= %d\n", (int)pDevice->byOpMode);
- pr_debug(" ePSMode= %d\n", (int)pDevice->ePSMode);
- pr_debug(" wRTSThreshold= %d\n", (int)pDevice->wRTSThreshold);
pr_debug(" byShortRetryLimit= %d\n", (int)pDevice->byShortRetryLimit);
pr_debug(" byLongRetryLimit= %d\n", (int)pDevice->byLongRetryLimit);
pr_debug(" byPreambleType= %d\n", (int)pDevice->byPreambleType);
pr_debug(" byShortPreamble= %d\n", (int)pDevice->byShortPreamble);
- pr_debug(" uConnectionRate= %d\n", (int)pDevice->uConnectionRate);
pr_debug(" byBBType= %d\n", (int)pDevice->byBBType);
- pr_debug(" pDevice->b11hEnable= %d\n", (int)pDevice->b11hEnable);
pr_debug(" pDevice->bDiversityRegCtlON= %d\n",
(int)pDevice->bDiversityRegCtlON);
}
-static void s_vCompleteCurrentMeasure(struct vnt_private *pDevice,
- unsigned char byResult)
-{
- unsigned int ii;
- unsigned long dwDuration = 0;
- unsigned char byRPI0 = 0;
-
- for (ii = 1; ii < 8; ii++) {
- pDevice->dwRPIs[ii] *= 255;
- dwDuration |= *((unsigned short *)(pDevice->pCurrMeasureEID->sReq.abyDuration));
- dwDuration <<= 10;
- pDevice->dwRPIs[ii] /= dwDuration;
- pDevice->abyRPIs[ii] = (unsigned char)pDevice->dwRPIs[ii];
- byRPI0 += pDevice->abyRPIs[ii];
- }
- pDevice->abyRPIs[0] = (0xFF - byRPI0);
-
- if (pDevice->uNumOfMeasureEIDs == 0) {
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- true,
- pDevice->pCurrMeasureEID,
- byResult,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- } else {
- VNTWIFIbMeasureReport(pDevice->pMgmt,
- false,
- pDevice->pCurrMeasureEID,
- byResult,
- pDevice->byBasicMap,
- pDevice->byCCAFraction,
- pDevice->abyRPIs
- );
- CARDbStartMeasure(pDevice, pDevice->pCurrMeasureEID++, pDevice->uNumOfMeasureEIDs);
- }
-}
-
//
// Initialisation of MAC & BBP registers
//
static void device_init_registers(struct vnt_private *pDevice)
{
+ unsigned long flags;
unsigned int ii;
unsigned char byValue;
unsigned char byValue1;
unsigned char byCCKPwrdBm = 0;
unsigned char byOFDMPwrdBm = 0;
- int zonetype = 0;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
MACbShutdown(pDevice->PortOffset);
- BBvSoftwareReset(pDevice->PortOffset);
+ BBvSoftwareReset(pDevice);
/* Do MACbSoftwareReset in MACvInitialize */
MACbSoftwareReset(pDevice->PortOffset);
@@ -481,11 +279,11 @@ static void device_init_registers(struct vnt_private *pDevice)
/* Get Local ID */
VNSvInPortB(pDevice->PortOffset + MAC_REG_LOCALID, &pDevice->byLocalID);
- spin_lock_irq(&pDevice->lock);
+ spin_lock_irqsave(&pDevice->lock, flags);
SROMvReadAllContents(pDevice->PortOffset, pDevice->abyEEPROM);
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irqrestore(&pDevice->lock, flags);
/* Get Channel range */
pDevice->byMinChannel = 1;
@@ -558,41 +356,6 @@ static void device_init_registers(struct vnt_private *pDevice)
/* zonetype initial */
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
- zonetype = Config_FileOperation(pDevice, false, NULL);
-
- if (zonetype >= 0) {
- if ((zonetype == 0) &&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x00)) {
- /* for USA */
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
-
- pr_debug("Init Zone Type :USA\n");
- } else if ((zonetype == 1) &&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x01)) {
- /* for Japan */
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
- } else if ((zonetype == 2) &&
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x02)) {
- /* for Europe */
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
- pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
-
- pr_debug("Init Zone Type :Europe\n");
- } else {
- if (zonetype != pDevice->abyEEPROM[EEP_OFS_ZONETYPE])
- pr_debug("zonetype in file[%02x] mismatch with in EEPROM[%02x]\n",
- zonetype,
- pDevice->abyEEPROM[EEP_OFS_ZONETYPE]);
- else
- pr_debug("Read Zonetype file success,use default zonetype setting[%02x]\n",
- zonetype);
- }
- } else {
- pr_debug("Read Zonetype file fail,use default zonetype setting[%02x]\n",
- SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ZONETYPE));
- }
/* Get RFType */
pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
@@ -636,14 +399,9 @@ static void device_init_registers(struct vnt_private *pDevice)
}
/* recover 12,13 ,14channel for EUROPE by 11 channel */
- if (((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
- (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe)) &&
- (pDevice->byOriginalZonetype == ZoneType_USA)) {
- for (ii = 11; ii < 14; ii++) {
- pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
- pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
-
- }
+ for (ii = 11; ii < 14; ii++) {
+ pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
+ pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
}
/* Load OFDM A Power Table */
@@ -657,8 +415,6 @@ static void device_init_registers(struct vnt_private *pDevice)
(unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
}
- init_channel_table((void *)pDevice);
-
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
MACvSelectPage1(pDevice->PortOffset);
@@ -690,21 +446,12 @@ static void device_init_registers(struct vnt_private *pDevice)
BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
}
- BBvSetRxAntennaMode(pDevice->PortOffset, pDevice->byRxAntennaMode);
- BBvSetTxAntennaMode(pDevice->PortOffset, pDevice->byTxAntennaMode);
-
- pDevice->byCurrentCh = 0;
+ BBvSetRxAntennaMode(pDevice, pDevice->byRxAntennaMode);
+ BBvSetTxAntennaMode(pDevice, pDevice->byTxAntennaMode);
/* Set BB and packet type at the same time. */
/* Set Short Slot Time, xIFS, and RSPINF. */
- if (pDevice->uConnectionRate == RATE_AUTO)
- pDevice->wCurrentRate = RATE_54M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
-
- /* default G Mode */
- VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_11G);
- VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_AUTO);
+ pDevice->wCurrentRate = RATE_54M;
pDevice->bRadioOff = false;
@@ -726,8 +473,6 @@ static void device_init_registers(struct vnt_private *pDevice)
if (pDevice->bHWRadioOff || pDevice->bRadioControlOff)
CARDbRadioPowerOff(pDevice);
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
-
/* get Permanent network address */
SROMvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
pr_debug("Network address = %pM\n", pDevice->abyCurrentNetAddr);
@@ -740,223 +485,39 @@ static void device_init_registers(struct vnt_private *pDevice)
if (pDevice->byLocalID <= REV_ID_VT3253_A1)
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_WPAERR);
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
-
/* Turn On Rx DMA */
MACvReceive0(pDevice->PortOffset);
MACvReceive1(pDevice->PortOffset);
/* start the adapter */
MACvStart(pDevice->PortOffset);
-
- netif_stop_queue(pDevice->dev);
}
static void device_init_diversity_timer(struct vnt_private *pDevice)
{
init_timer(&pDevice->TimerSQ3Tmax1);
pDevice->TimerSQ3Tmax1.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack;
+ pDevice->TimerSQ3Tmax1.function = TimerSQ3CallBack;
pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax2);
pDevice->TimerSQ3Tmax2.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack;
+ pDevice->TimerSQ3Tmax2.function = TimerSQ3CallBack;
pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax3);
pDevice->TimerSQ3Tmax3.data = (unsigned long) pDevice;
- pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerState1CallBack;
+ pDevice->TimerSQ3Tmax3.function = TimerState1CallBack;
pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ);
}
-static bool device_release_WPADEV(struct vnt_private *pDevice)
-{
- viawget_wpa_header *wpahdr;
- int ii = 0;
-
- //send device close to wpa_supplicnat layer
- if (pDevice->bWPADEVUp) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_DEVICECLOSE_MSG;
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
-
- while (pDevice->bWPADEVUp) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ / 20); //wait 50ms
- ii++;
- if (ii > 20)
- break;
- }
- }
- return true;
-}
-
-static const struct net_device_ops device_netdev_ops = {
- .ndo_open = device_open,
- .ndo_stop = device_close,
- .ndo_do_ioctl = device_ioctl,
- .ndo_start_xmit = device_xmit,
- .ndo_set_rx_mode = device_set_multi,
-};
-
-static int
-vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
-{
- static bool bFirst = true;
- struct net_device *dev = NULL;
- PCHIP_INFO pChip_info = (PCHIP_INFO)ent->driver_data;
- struct vnt_private *pDevice;
- int rc;
-
- dev = alloc_etherdev(sizeof(*pDevice));
-
- pDevice = netdev_priv(dev);
-
- if (dev == NULL) {
- pr_err(DEVICE_NAME ": allocate net device failed\n");
- return -ENOMEM;
- }
-
- // Chain it all together
- SET_NETDEV_DEV(dev, &pcid->dev);
-
- if (bFirst) {
- pr_notice("%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
- pr_notice("Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
- bFirst = false;
- }
-
- vt6655_init_info(pcid, &pDevice, pChip_info);
- pDevice->dev = dev;
-
- if (pci_enable_device(pcid)) {
- device_free_info(pDevice);
- return -ENODEV;
- }
- dev->irq = pcid->irq;
-
-#ifdef DEBUG
- pr_debug("Before get pci_info memaddr is %x\n", pDevice->memaddr);
-#endif
- if (!device_get_pci_info(pDevice, pcid)) {
- pr_err(DEVICE_NAME ": Failed to find PCI device.\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
-
-#if 1
-
-#ifdef DEBUG
-
- pr_debug("after get pci_info memaddr is %x, io addr is %x,io_size is %d\n", pDevice->memaddr, pDevice->ioaddr, pDevice->io_size);
- {
- int i;
- u32 bar, len;
- u32 address[] = {
- PCI_BASE_ADDRESS_0,
- PCI_BASE_ADDRESS_1,
- PCI_BASE_ADDRESS_2,
- PCI_BASE_ADDRESS_3,
- PCI_BASE_ADDRESS_4,
- PCI_BASE_ADDRESS_5,
- 0};
- for (i = 0; address[i]; i++) {
- pci_read_config_dword(pcid, address[i], &bar);
- pr_debug("bar %d is %x\n", i, bar);
- if (!bar) {
- pr_debug("bar %d not implemented\n", i);
- continue;
- }
- if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
- /* This is IO */
-
- len = bar & (PCI_BASE_ADDRESS_IO_MASK & 0xFFFF);
- len = len & ~(len - 1);
-
- pr_debug("IO space: len in IO %x, BAR %d\n", len, i);
- } else {
- len = bar & 0xFFFFFFF0;
- len = ~len + 1;
-
- pr_debug("len in MEM %x, BAR %d\n", len, i);
- }
- }
- }
-#endif
-
-#endif
-
- pDevice->PortOffset = ioremap(pDevice->memaddr & PCI_BASE_ADDRESS_MEM_MASK, pDevice->io_size);
-
- if (pDevice->PortOffset == NULL) {
- pr_err(DEVICE_NAME ": Failed to IO remapping ..\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
-
- rc = pci_request_regions(pcid, DEVICE_NAME);
- if (rc) {
- pr_err(DEVICE_NAME ": Failed to find PCI device\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
-
- dev->base_addr = pDevice->ioaddr;
- // do reset
- if (!MACbSoftwareReset(pDevice->PortOffset)) {
- pr_err(DEVICE_NAME ": Failed to access MAC hardware..\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
- // initial to reload eeprom
- MACvInitialize(pDevice->PortOffset);
- MACvReadEtherAddress(pDevice->PortOffset, dev->dev_addr);
-
- device_get_options(pDevice);
- device_set_options(pDevice);
- //Mask out the options cannot be set to the chip
- pDevice->sOpts.flags &= pChip_info->flags;
-
- //Enable the chip specified capabilities
- pDevice->flags = pDevice->sOpts.flags | (pChip_info->flags & 0xFF000000UL);
- pDevice->tx_80211 = device_dma0_tx_80211;
- pDevice->sMgmtObj.pAdapter = (void *)pDevice;
- pDevice->pMgmt = &(pDevice->sMgmtObj);
-
- dev->irq = pcid->irq;
- dev->netdev_ops = &device_netdev_ops;
-
- dev->wireless_handlers = (struct iw_handler_def *)&iwctl_handler_def;
-
- rc = register_netdev(dev);
- if (rc) {
- pr_err(DEVICE_NAME " Failed to register netdev\n");
- device_free_info(pDevice);
- return -ENODEV;
- }
- device_print_info(pDevice);
- pci_set_drvdata(pcid, pDevice);
- return 0;
-}
-
static void device_print_info(struct vnt_private *pDevice)
{
- struct net_device *dev = pDevice->dev;
+ dev_info(&pDevice->pcid->dev, "%s\n", get_chip_name(pDevice->chip_id));
- pr_info("%s: %s\n", dev->name, get_chip_name(pDevice->chip_id));
- pr_info("%s: MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
- dev->name, dev->dev_addr, (unsigned long)pDevice->ioaddr,
- (unsigned long)pDevice->PortOffset, pDevice->dev->irq);
+ dev_info(&pDevice->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
+ pDevice->abyCurrentNetAddr, (unsigned long)pDevice->ioaddr,
+ (unsigned long)pDevice->PortOffset, pDevice->pcid->irq);
}
static void vt6655_init_info(struct pci_dev *pcid,
@@ -1003,31 +564,20 @@ static bool device_get_pci_info(struct vnt_private *pDevice,
static void device_free_info(struct vnt_private *pDevice)
{
- struct net_device *dev = pDevice->dev;
-
- ASSERT(pDevice);
-//2008-0714-01<Add>by chester
- device_release_WPADEV(pDevice);
-
-//2008-07-21-01<Add>by MikeLiu
-//unregister wpadev
- if (wpa_set_wpadev(pDevice, 0) != 0)
- pr_err("unregister wpadev fail?\n");
+ if (!pDevice)
+ return;
-#ifdef HOSTAP
- if (dev)
- vt6655_hostap_set_hostapd(pDevice, 0, 0);
-#endif
- if (dev)
- unregister_netdev(dev);
+ if (pDevice->mac_hw)
+ ieee80211_unregister_hw(pDevice->hw);
if (pDevice->PortOffset)
iounmap(pDevice->PortOffset);
if (pDevice->pcid)
pci_release_regions(pDevice->pcid);
- if (dev)
- free_netdev(dev);
+
+ if (pDevice->hw)
+ ieee80211_free_hw(pDevice->hw);
}
static bool device_init_rings(struct vnt_private *pDevice)
@@ -1176,21 +726,6 @@ static void device_init_rd1_ring(struct vnt_private *pDevice)
pDevice->pCurrRD[1] = &(pDevice->aRD1Ring[0]);
}
-static void device_init_defrag_cb(struct vnt_private *pDevice)
-{
- int i;
- PSDeFragControlBlock pDeF;
-
- /* Init the fragment ctl entries */
- for (i = 0; i < CB_MAX_RX_FRAG; i++) {
- pDeF = &(pDevice->sRxDFCB[i]);
- if (!device_alloc_frag_buf(pDevice, pDeF))
- dev_err(&pDevice->pcid->dev, "can not alloc frag bufs\n");
- }
- pDevice->cbDFCB = CB_MAX_RX_FRAG;
- pDevice->cbFreeDFCB = pDevice->cbDFCB;
-}
-
static void device_free_rd0_ring(struct vnt_private *pDevice)
{
int i;
@@ -1204,7 +739,7 @@ static void device_free_rd0_ring(struct vnt_private *pDevice)
dev_kfree_skb(pRDInfo->skb);
- kfree((void *)pDesc->pRDInfo);
+ kfree(pDesc->pRDInfo);
}
}
@@ -1221,21 +756,7 @@ static void device_free_rd1_ring(struct vnt_private *pDevice)
dev_kfree_skb(pRDInfo->skb);
- kfree((void *)pDesc->pRDInfo);
- }
-}
-
-static void device_free_frag_buf(struct vnt_private *pDevice)
-{
- PSDeFragControlBlock pDeF;
- int i;
-
- for (i = 0; i < CB_MAX_RX_FRAG; i++) {
- pDeF = &(pDevice->sRxDFCB[i]);
-
- if (pDeF->skb)
- dev_kfree_skb(pDeF->skb);
-
+ kfree(pDesc->pRDInfo);
}
}
@@ -1305,7 +826,7 @@ static void device_free_td0_ring(struct vnt_private *pDevice)
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
- kfree((void *)pDesc->pTDInfo);
+ kfree(pDesc->pTDInfo);
}
}
@@ -1324,7 +845,7 @@ static void device_free_td1_ring(struct vnt_private *pDevice)
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
- kfree((void *)pDesc->pTDInfo);
+ kfree(pDesc->pTDInfo);
}
}
@@ -1340,7 +861,7 @@ static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx)
pRD = pRD->next) {
if (works++ > 15)
break;
- if (device_receive_frame(pDevice, pRD)) {
+ if (vnt_receive_frame(pDevice, pRD)) {
if (!device_alloc_rx_buf(pDevice, pRD)) {
dev_err(&pDevice->pcid->dev,
"can not allocate rx buf\n");
@@ -1348,7 +869,6 @@ static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx)
}
}
pRD->m_rd0RD0.f1Owner = OWNED_BY_NIC;
- pDevice->dev->last_rx = jiffies;
}
pDevice->pCurrRD[uIdx] = pRD;
@@ -1364,9 +884,12 @@ static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pRD)
if (pRDInfo->skb == NULL)
return false;
ASSERT(pRDInfo->skb);
- pRDInfo->skb->dev = pDevice->dev;
- pRDInfo->skb_dma = pci_map_single(pDevice->pcid, skb_tail_pointer(pRDInfo->skb),
- pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ pRDInfo->skb_dma =
+ pci_map_single(pDevice->pcid,
+ skb_put(pRDInfo->skb, skb_tailroom(pRDInfo->skb)),
+ pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
*((unsigned int *)&(pRD->m_rd0RD0)) = 0; /* FIX cast */
pRD->m_rd0RD0.wResCount = cpu_to_le16(pDevice->rx_buf_sz);
@@ -1377,31 +900,84 @@ static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pRD)
return true;
}
-bool device_alloc_frag_buf(struct vnt_private *pDevice,
- PSDeFragControlBlock pDeF)
+static const u8 fallback_rate0[5][5] = {
+ {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
+ {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
+ {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
+ {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
+ {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
+};
+
+static const u8 fallback_rate1[5][5] = {
+ {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
+ {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
+ {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
+ {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
+ {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
+};
+
+static int vnt_int_report_rate(struct vnt_private *priv,
+ PDEVICE_TD_INFO context, u8 tsr0, u8 tsr1)
{
- pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDeF->skb == NULL)
- return false;
- ASSERT(pDeF->skb);
- pDeF->skb->dev = pDevice->dev;
+ struct vnt_tx_fifo_head *fifo_head;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_rate *rate;
+ u16 fb_option;
+ u8 tx_retry = (tsr0 & TSR0_NCR);
+ s8 idx;
+
+ if (!context)
+ return -ENOMEM;
- return true;
+ if (!context->skb)
+ return -EINVAL;
+
+ fifo_head = (struct vnt_tx_fifo_head *)context->buf;
+ fb_option = (le16_to_cpu(fifo_head->fifo_ctl) &
+ (FIFOCTL_AUTO_FB_0 | FIFOCTL_AUTO_FB_1));
+
+ info = IEEE80211_SKB_CB(context->skb);
+ idx = info->control.rates[0].idx;
+
+ if (fb_option && !(tsr1 & TSR1_TERR)) {
+ u8 tx_rate;
+ u8 retry = tx_retry;
+
+ rate = ieee80211_get_tx_rate(priv->hw, info);
+ tx_rate = rate->hw_value - RATE_18M;
+
+ if (retry > 4)
+ retry = 4;
+
+ if (fb_option & FIFOCTL_AUTO_FB_0)
+ tx_rate = fallback_rate0[tx_rate][retry];
+ else if (fb_option & FIFOCTL_AUTO_FB_1)
+ tx_rate = fallback_rate1[tx_rate][retry];
+
+ if (info->band == IEEE80211_BAND_5GHZ)
+ idx = tx_rate - RATE_6M;
+ else
+ idx = tx_rate;
+ }
+
+ ieee80211_tx_info_clear_status(info);
+
+ info->status.rates[0].count = tx_retry;
+
+ if (!(tsr1 & TSR1_TERR)) {
+ info->status.rates[0].idx = idx;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ return 0;
}
static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
{
PSTxDesc pTD;
- bool bFull = false;
int works = 0;
unsigned char byTsr0;
unsigned char byTsr1;
- unsigned int uFrameSize, uFIFOHeaderSize;
- PSTxBufHead pTxBufHead;
- struct net_device_stats *pStats = &pDevice->dev->stats;
- struct sk_buff *skb;
- unsigned int uNodeIndex;
- PSMgmtObject pMgmt = pDevice->pMgmt;
for (pTD = pDevice->apTailTD[uIdx]; pDevice->iTDUsed[uIdx] > 0; pTD = pTD->next) {
if (pTD->m_td0TD0.f1Owner == OWNED_BY_NIC)
@@ -1415,22 +991,8 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
//Only the status of first TD in the chain is correct
if (pTD->m_td1TD1.byTCR & TCR_STP) {
if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
- uFIFOHeaderSize = pTD->pTDInfo->dwHeaderLength;
- uFrameSize = pTD->pTDInfo->dwReqCount - uFIFOHeaderSize;
- pTxBufHead = (PSTxBufHead) (pTD->pTDInfo->buf);
- // Update the statistics based on the Transmit status
- // now, we DONT check TSR0_CDH
-
- STAvUpdateTDStatCounter(&pDevice->scStatistic,
- byTsr0, byTsr1,
- (unsigned char *)(pTD->pTDInfo->buf + uFIFOHeaderSize),
- uFrameSize, uIdx);
-
- BSSvUpdateNodeTxCounter(pDevice,
- byTsr0, byTsr1,
- (unsigned char *)(pTD->pTDInfo->buf),
- uFIFOHeaderSize
- );
+
+ vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
if (!(byTsr1 & TSR1_TERR)) {
if (byTsr0 != 0) {
@@ -1438,28 +1000,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
(int)uIdx, byTsr1,
byTsr0);
}
- if ((pTxBufHead->wFragCtl & FRAGCTL_ENDFRAG) != FRAGCTL_NONFRAG)
- pDevice->s802_11Counter.TransmittedFragmentCount++;
-
- pStats->tx_packets++;
- pStats->tx_bytes += pTD->pTDInfo->skb->len;
} else {
pr_debug(" Tx[%d] dropped & tsr1[%02X] tsr0[%02X]\n",
(int)uIdx, byTsr1, byTsr0);
- pStats->tx_errors++;
- pStats->tx_dropped++;
- }
- }
-
- if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
- if (pDevice->bEnableHostapd) {
- pr_debug("tx call back netif..\n");
- skb = pTD->pTDInfo->skb;
- skb->dev = pDevice->apdev;
- skb_reset_mac_header(skb);
- skb->pkt_type = PACKET_OTHERHOST;
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
}
}
@@ -1468,49 +1011,12 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
(int)uIdx, byTsr1, byTsr0);
}
-
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) &&
- (pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)) {
- unsigned short wAID;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
-
- skb = pTD->pTDInfo->skb;
- if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
- if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
- skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
- pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
- // set tx map
- wAID = pMgmt->sNodeDBTable[uNodeIndex].wAID;
- pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
- pTD->pTDInfo->byFlags &= ~(TD_FLAGS_NETIF_SKB);
- pr_debug("tx_srv:tx fail re-queue sta index= %d, QueCnt= %d\n",
- (int)uNodeIndex,
- pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt);
- pStats->tx_errors--;
- pStats->tx_dropped--;
- }
- }
- }
}
device_free_tx_buf(pDevice, pTD);
pDevice->iTDUsed[uIdx]--;
}
}
- if (uIdx == TYPE_AC0DMA) {
- // RESERV_AC0DMA reserved for relay
-
- if (AVAIL_TD(pDevice, uIdx) < RESERV_AC0DMA) {
- bFull = true;
- pr_debug(" AC0DMA is Full = %d\n",
- pDevice->iTDUsed[uIdx]);
- }
- if (netif_queue_stopped(pDevice->dev) && !bFull)
- netif_wake_queue(pDevice->dev);
-
- }
-
pDevice->apTailTD[uIdx] = pTD;
return works;
@@ -1521,10 +1027,6 @@ static void device_error(struct vnt_private *pDevice, unsigned short status)
if (status & ISR_FETALERR) {
dev_err(&pDevice->pcid->dev, "Hardware fatal error\n");
- netif_stop_queue(pDevice->dev);
- del_timer(&pDevice->sTimerCommand);
- del_timer(&(pDevice->pMgmt->sTimerSecondCallback));
- pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
return;
}
@@ -1541,7 +1043,9 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
PCI_DMA_TODEVICE);
}
- if ((pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0)
+ if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
+ ieee80211_tx_status_irqsafe(pDevice->hw, skb);
+ else
dev_kfree_skb_irq(skb);
pTDInfo->skb_dma = 0;
@@ -1549,671 +1053,13 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
pTDInfo->byFlags = 0;
}
-static int device_open(struct net_device *dev)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int i;
-#ifdef WPA_SM_Transtatus
- extern SWPAResult wpa_Result;
-#endif
-
- pDevice->rx_buf_sz = PKT_BUF_SZ;
- if (!device_init_rings(pDevice))
- return -ENOMEM;
-
-//2008-5-13 <add> by chester
- i = request_irq(pDevice->pcid->irq, &device_intr, IRQF_SHARED, dev->name, dev);
- if (i)
- return i;
-
-#ifdef WPA_SM_Transtatus
- memset(wpa_Result.ifname, 0, sizeof(wpa_Result.ifname));
- wpa_Result.proto = 0;
- wpa_Result.key_mgmt = 0;
- wpa_Result.eap_type = 0;
- wpa_Result.authenticated = false;
- pDevice->fWPA_Authened = false;
-#endif
- pr_debug("call device init rd0 ring\n");
- device_init_rd0_ring(pDevice);
- device_init_rd1_ring(pDevice);
- device_init_defrag_cb(pDevice);
- device_init_td0_ring(pDevice);
- device_init_td1_ring(pDevice);
-
- if (pDevice->bDiversityRegCtlON)
- device_init_diversity_timer(pDevice);
-
- vMgrObjectInit(pDevice);
- vMgrTimerInit(pDevice);
-
- pr_debug("call device_init_registers\n");
- device_init_registers(pDevice);
-
- MACvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
- memcpy(pDevice->pMgmt->abyMACAddr, pDevice->abyCurrentNetAddr, ETH_ALEN);
- device_set_multi(pDevice->dev);
-
- // Init for Key Management
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
- add_timer(&(pDevice->pMgmt->sTimerSecondCallback));
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- pDevice->bwextcount = 0;
- pDevice->bWPASuppWextEnabled = false;
-#endif
- pDevice->byReAssocCount = 0;
- pDevice->bWPADEVUp = false;
- // Patch: if WEP key already set by iwconfig but device not yet open
- if (pDevice->bEncryptionEnable && pDevice->bTransmitKey) {
- KeybSetDefaultKey(&(pDevice->sKey),
- (unsigned long)(pDevice->byKeyIndex | (1 << 31)),
- pDevice->uKeyLength,
- NULL,
- pDevice->abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID
- );
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- }
-
- pr_debug("call MACvIntEnable\n");
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
-
- if (pDevice->pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
- } else {
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- }
- pDevice->flags |= DEVICE_FLAGS_OPENED;
-
- pr_debug("device_open success..\n");
- return 0;
-}
-
-static int device_close(struct net_device *dev)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = pDevice->pMgmt;
-//2007-1121-02<Add>by EinsnLiu
- if (pDevice->bLinkPass) {
- bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- mdelay(30);
- }
-
- del_timer(&pDevice->sTimerTxData);
- del_timer(&pDevice->sTimerCommand);
- del_timer(&pMgmt->sTimerSecondCallback);
- if (pDevice->bDiversityRegCtlON) {
- del_timer(&pDevice->TimerSQ3Tmax1);
- del_timer(&pDevice->TimerSQ3Tmax2);
- del_timer(&pDevice->TimerSQ3Tmax3);
- }
-
- netif_stop_queue(dev);
- pDevice->bCmdRunning = false;
- MACbShutdown(pDevice->PortOffset);
- MACbSoftwareReset(pDevice->PortOffset);
- CARDbRadioPowerOff(pDevice);
-
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- device_free_td0_ring(pDevice);
- device_free_td1_ring(pDevice);
- device_free_rd0_ring(pDevice);
- device_free_rd1_ring(pDevice);
- device_free_frag_buf(pDevice);
- device_free_rings(pDevice);
- BSSvClearNodeDBTable(pDevice, 0);
- free_irq(dev->irq, dev);
- pDevice->flags &= (~DEVICE_FLAGS_OPENED);
- //2008-0714-01<Add>by chester
- device_release_WPADEV(pDevice);
-
- pr_debug("device_close..\n");
- return 0;
-}
-
-static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- unsigned char *pbMPDU;
- unsigned int cbMPDULen = 0;
-
- pr_debug("device_dma0_tx_80211\n");
- spin_lock_irq(&pDevice->lock);
-
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
- pr_debug("device_dma0_tx_80211, td0 <=0\n");
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pDevice->bStopTx0Pkt) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- cbMPDULen = skb->len;
- pbMPDU = skb->data;
-
- vDMA0_tx_80211(pDevice, skb, pbMPDU, cbMPDULen);
-
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-bool device_dma0_xmit(struct vnt_private *pDevice,
- struct sk_buff *skb, unsigned int uNodeIndex)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSTxDesc pHeadTD, pLastTD;
- unsigned int cbFrameBodySize;
- unsigned int uMACfragNum;
- unsigned char byPktType;
- bool bNeedEncryption = false;
- PSKeyItem pTransmitKey = NULL;
- unsigned int cbHeaderSize;
- unsigned int ii;
- SKeyItem STempKey;
-
- if (pDevice->bStopTx0Pkt) {
- dev_kfree_skb_irq(skb);
- return false;
- }
-
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
- dev_kfree_skb_irq(skb);
- pr_debug("device_dma0_xmit, td0 <=0\n");
- return false;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (pDevice->uAssocCount == 0) {
- dev_kfree_skb_irq(skb);
- pr_debug("device_dma0_xmit, assocCount = 0\n");
- return false;
- }
- }
-
- pHeadTD = pDevice->apCurrTD[TYPE_TXDMA0];
-
- pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
-
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
- cbFrameBodySize = skb->len - ETH_HLEN;
-
- // 802.1H
- if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN)
- cbFrameBodySize += 8;
-
- uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
-
- if (uMACfragNum > AVAIL_TD(pDevice, TYPE_TXDMA0)) {
- dev_kfree_skb_irq(skb);
- return false;
- }
- byPktType = (unsigned char)pDevice->byPacketType;
-
- if (pDevice->bFixRate) {
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
- if (pDevice->uConnectionRate >= RATE_11M)
- pDevice->wCurrentRate = RATE_11M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
- } else {
- if (pDevice->uConnectionRate >= RATE_54M)
- pDevice->wCurrentRate = RATE_54M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
- }
- } else {
- pDevice->wCurrentRate = pDevice->pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate;
- }
-
- //preamble type
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble)
- pDevice->byPreambleType = pDevice->byShortPreamble;
- else
- pDevice->byPreambleType = PREAMBLE_LONG;
-
- pr_debug("dma0: pDevice->wCurrentRate = %d\n", pDevice->wCurrentRate);
-
- if (pDevice->wCurrentRate <= RATE_11M) {
- byPktType = PK_TYPE_11B;
- } else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- byPktType = PK_TYPE_11A;
- } else {
- if (pDevice->bProtectMode)
- byPktType = PK_TYPE_11GB;
- else
- byPktType = PK_TYPE_11GA;
- }
-
- if (pDevice->bEncryptionEnable)
- bNeedEncryption = true;
-
- if (pDevice->bEnableHostWEP) {
- pTransmitKey = &STempKey;
- pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
- pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
- pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
- pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
- pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
- memcpy(pTransmitKey->abyKey,
- &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength
- );
- }
- vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
- cbFrameBodySize, TYPE_TXDMA0, pHeadTD,
- &pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
- &uMACfragNum,
- &cbHeaderSize
- );
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
- }
-
- pDevice->bPWBitOn = false;
-
- pLastTD = pHeadTD;
- for (ii = 0; ii < uMACfragNum; ii++) {
- // Poll Transmit the adapter
- wmb();
- pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
- if (ii == (uMACfragNum - 1))
- pLastTD = pHeadTD;
- pHeadTD = pHeadTD->next;
- }
-
- // Save the information needed by the tx interrupt handler
- // to complete the Send request
- pLastTD->pTDInfo->skb = skb;
- pLastTD->pTDInfo->byFlags = 0;
- pLastTD->pTDInfo->byFlags |= TD_FLAGS_NETIF_SKB;
-
- pDevice->apCurrTD[TYPE_TXDMA0] = pHeadTD;
-
- MACvTransmit0(pDevice->PortOffset);
-
- return true;
-}
-
-//TYPE_AC0DMA data tx
-static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSTxDesc pHeadTD, pLastTD;
- unsigned int uNodeIndex = 0;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- unsigned short wAID;
- unsigned int uMACfragNum = 1;
- unsigned int cbFrameBodySize;
- unsigned char byPktType;
- unsigned int cbHeaderSize;
- bool bNeedEncryption = false;
- PSKeyItem pTransmitKey = NULL;
- SKeyItem STempKey;
- unsigned int ii;
- bool bTKIP_UseGTK = false;
- bool bNeedDeAuth = false;
- unsigned char *pbyBSSID;
- bool bNodeExist = false;
-
- spin_lock_irq(&pDevice->lock);
- if (!pDevice->bLinkPass) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pDevice->bStopDataPkt) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (pDevice->uAssocCount == 0) {
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- if (is_multicast_ether_addr((unsigned char *)(skb->data))) {
- uNodeIndex = 0;
- bNodeExist = true;
- if (pMgmt->sNodeDBTable[0].bPSEnable) {
- skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skb);
- pMgmt->sNodeDBTable[0].wEnQueueCnt++;
- // set tx map
- pMgmt->abyPSTxMap[0] |= byMask[0];
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- } else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
- if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
- skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
- pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
- // set tx map
- wAID = pMgmt->sNodeDBTable[uNodeIndex].wAID;
- pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
- pr_debug("Set:pMgmt->abyPSTxMap[%d]= %d\n",
- (wAID >> 3),
- pMgmt->abyPSTxMap[wAID >> 3]);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble)
- pDevice->byPreambleType = pDevice->byShortPreamble;
- else
- pDevice->byPreambleType = PREAMBLE_LONG;
-
- bNodeExist = true;
-
- }
- }
-
- if (!bNodeExist) {
- pr_debug("Unknown STA not found in node DB\n");
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- }
-
- pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA];
-
- pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
-
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
- cbFrameBodySize = skb->len - ETH_HLEN;
- // 802.1H
- if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN)
- cbFrameBodySize += 8;
-
- if (pDevice->bEncryptionEnable) {
- bNeedEncryption = true;
- // get Transmit key
- do {
- if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- pbyBSSID = pDevice->abyBSSID;
- // get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
- // get group key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
- bTKIP_UseGTK = true;
- pr_debug("Get GTK\n");
- break;
- }
- } else {
- pr_debug("Get PTK\n");
- break;
- }
- } else if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- pbyBSSID = pDevice->sTxEthHeader.abyDstAddr; //TO_DS = 0 and FROM_DS = 0 --> 802.11 MAC Address1
- pr_debug("IBSS Serach Key:\n");
- for (ii = 0; ii < 6; ii++)
- pr_debug("%x\n", *(pbyBSSID+ii));
- pr_debug("\n");
-
- // get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == true)
- break;
- }
- // get group key
- pbyBSSID = pDevice->abyBroadcastAddr;
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
- pTransmitKey = NULL;
- if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- pr_debug("IBSS and KEY is NULL. [%d]\n",
- pDevice->pMgmt->eCurrMode);
- else
- pr_debug("NOT IBSS and KEY is NULL. [%d]\n",
- pDevice->pMgmt->eCurrMode);
- } else {
- bTKIP_UseGTK = true;
- pr_debug("Get GTK\n");
- }
- } while (false);
- }
-
- if (pDevice->bEnableHostWEP) {
- pr_debug("acdma0: STA index %d\n", uNodeIndex);
- if (pDevice->bEncryptionEnable) {
- pTransmitKey = &STempKey;
- pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
- pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
- pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
- pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
- pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
- memcpy(pTransmitKey->abyKey,
- &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength
- );
- }
- }
-
- uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
-
- if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA)) {
- pr_debug("uMACfragNum > AVAIL_TD(TYPE_AC0DMA) = %d\n",
- uMACfragNum);
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
-
- if (pTransmitKey != NULL) {
- if ((pTransmitKey->byCipherSuite == KEY_CTL_WEP) &&
- (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN)) {
- uMACfragNum = 1; //WEP256 doesn't support fragment
- }
- }
-
- byPktType = (unsigned char)pDevice->byPacketType;
-
- if (pDevice->bFixRate) {
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
- if (pDevice->uConnectionRate >= RATE_11M)
- pDevice->wCurrentRate = RATE_11M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
- } else {
- if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
- (pDevice->uConnectionRate <= RATE_6M)) {
- pDevice->wCurrentRate = RATE_6M;
- } else {
- if (pDevice->uConnectionRate >= RATE_54M)
- pDevice->wCurrentRate = RATE_54M;
- else
- pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
-
- }
- }
- pDevice->byACKRate = (unsigned char) pDevice->wCurrentRate;
- pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byTopOFDMBasicRate = RATE_6M;
- } else {
- //auto rate
- if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
- if (pDevice->eCurrentPHYType != PHY_TYPE_11A) {
- pDevice->wCurrentRate = RATE_1M;
- pDevice->byACKRate = RATE_1M;
- pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byTopOFDMBasicRate = RATE_6M;
- } else {
- pDevice->wCurrentRate = RATE_6M;
- pDevice->byACKRate = RATE_6M;
- pDevice->byTopCCKBasicRate = RATE_1M;
- pDevice->byTopOFDMBasicRate = RATE_6M;
- }
- } else {
- VNTWIFIvGetTxRate(pDevice->pMgmt,
- pDevice->sTxEthHeader.abyDstAddr,
- &(pDevice->wCurrentRate),
- &(pDevice->byACKRate),
- &(pDevice->byTopCCKBasicRate),
- &(pDevice->byTopOFDMBasicRate));
-
- }
- }
-
-
- if (pDevice->wCurrentRate <= RATE_11M) {
- byPktType = PK_TYPE_11B;
- } else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- byPktType = PK_TYPE_11A;
- } else {
- if (pDevice->bProtectMode)
- byPktType = PK_TYPE_11GB;
- else
- byPktType = PK_TYPE_11GA;
- }
-
- if (bNeedEncryption) {
- pr_debug("ntohs Pkt Type=%04x\n",
- ntohs(pDevice->sTxEthHeader.wType));
- if ((pDevice->sTxEthHeader.wType) == TYPE_PKT_802_1x) {
- bNeedEncryption = false;
- pr_debug("Pkt Type=%04x\n",
- (pDevice->sTxEthHeader.wType));
- if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- if (pTransmitKey == NULL) {
- pr_debug("Don't Find TX KEY\n");
- } else {
- if (bTKIP_UseGTK) {
- pr_debug("error: KEY is GTK!!~~\n");
- } else {
- pr_debug("Find PTK [%lX]\n",
- pTransmitKey->dwKeyIndex);
- bNeedEncryption = true;
- }
- }
- }
-
- if (pDevice->byCntMeasure == 2) {
- bNeedDeAuth = true;
- pDevice->s802_11Counter.TKIPCounterMeasuresInvoked++;
- }
-
- if (pDevice->bEnableHostWEP) {
- if ((uNodeIndex != 0) &&
- (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
- pr_debug("Find PTK [%lX]\n",
- pTransmitKey->dwKeyIndex);
- bNeedEncryption = true;
- }
- }
- } else {
- if (pTransmitKey == NULL) {
- pr_debug("return no tx key\n");
- dev_kfree_skb_irq(skb);
- spin_unlock_irq(&pDevice->lock);
- return 0;
- }
- }
- }
-
- vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
- cbFrameBodySize, TYPE_AC0DMA, pHeadTD,
- &pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
- &uMACfragNum,
- &cbHeaderSize
- );
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
- }
- pDevice->bPWBitOn = false;
-
- pLastTD = pHeadTD;
- for (ii = 0; ii < uMACfragNum; ii++) {
- // Poll Transmit the adapter
- wmb();
- pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
- if (ii == uMACfragNum - 1)
- pLastTD = pHeadTD;
- pHeadTD = pHeadTD->next;
- }
-
- // Save the information needed by the tx interrupt handler
- // to complete the Send request
- pLastTD->pTDInfo->skb = skb;
- pLastTD->pTDInfo->byFlags = 0;
- pLastTD->pTDInfo->byFlags |= TD_FLAGS_NETIF_SKB;
- pDevice->nTxDataTimeCout = 0; //2008-8-21 chester <add> for send null packet
-
- if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 1)
- netif_stop_queue(dev);
-
- pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD;
-
- if (pDevice->bFixRate)
- pr_debug("FixRate:Rate is %d,TxPower is %d\n", pDevice->wCurrentRate, pDevice->byCurPwr);
-
- {
- unsigned char Protocol_Version; //802.1x Authentication
- unsigned char Packet_Type; //802.1x Authentication
- unsigned char Descriptor_type;
- unsigned short Key_info;
- bool bTxeapol_key = false;
-
- Protocol_Version = skb->data[ETH_HLEN];
- Packet_Type = skb->data[ETH_HLEN+1];
- Descriptor_type = skb->data[ETH_HLEN+1+1+2];
- Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
- if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
- if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
- (Packet_Type == 3)) { //802.1x OR eapol-key challenge frame transfer
- bTxeapol_key = true;
- if ((Descriptor_type == 254) || (Descriptor_type == 2)) { //WPA or RSN
- if (!(Key_info & BIT3) && //group-key challenge
- (Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
- pDevice->fWPA_Authened = true;
- if (Descriptor_type == 254)
- pr_debug("WPA ");
- else
- pr_debug("WPA2 ");
- pr_debug("Authentication completed!!\n");
- }
- }
- }
- }
- }
-
- MACvTransmitAC0(pDevice->PortOffset);
-
- dev->trans_start = jiffies;
-
- spin_unlock_irq(&pDevice->lock);
- return 0;
-}
-
static irqreturn_t device_intr(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct vnt_private *pDevice = netdev_priv(dev);
+ struct vnt_private *pDevice = dev_instance;
int max_count = 0;
unsigned long dwMIBCounter = 0;
- PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned char byOrgPageSel = 0;
int handled = 0;
- unsigned char byData = 0;
int ii = 0;
unsigned long flags;
@@ -2256,96 +1102,13 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
device_error(pDevice, pDevice->dwIsr);
}
- if (pDevice->byLocalID > REV_ID_VT3253_B1) {
- if (pDevice->dwIsr & ISR_MEASURESTART) {
- // 802.11h measure start
- pDevice->byOrgChannel = pDevice->byCurrentCh;
- VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byOrgRCR));
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, (RCR_RXALLTYPE | RCR_UNICAST | RCR_BROADCAST | RCR_MULTICAST | RCR_WPAERR));
- MACvSelectPage1(pDevice->PortOffset);
- VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR0, &(pDevice->dwOrgMAR0));
- VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR4, &(pDevice->dwOrgMAR4));
- MACvSelectPage0(pDevice->PortOffset);
- //xxxx
- if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel)) {
- pDevice->bMeasureInProgress = true;
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_READY);
- MACvSelectPage0(pDevice->PortOffset);
- pDevice->byBasicMap = 0;
- pDevice->byCCAFraction = 0;
- for (ii = 0; ii < 8; ii++)
- pDevice->dwRPIs[ii] = 0;
-
- } else {
- // can not measure because set channel fail
- // clear measure control
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
- s_vCompleteCurrentMeasure(pDevice, MEASURE_MODE_INCAPABLE);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- }
- }
- if (pDevice->dwIsr & ISR_MEASUREEND) {
- // 802.11h measure end
- pDevice->bMeasureInProgress = false;
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR);
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR4, pDevice->dwOrgMAR4);
- VNSvInPortB(pDevice->PortOffset + MAC_REG_MSRBBSTS, &byData);
- pDevice->byBasicMap |= (byData >> 4);
- VNSvInPortB(pDevice->PortOffset + MAC_REG_CCAFRACTION, &pDevice->byCCAFraction);
- VNSvInPortB(pDevice->PortOffset + MAC_REG_MSRCTL, &byData);
- // clear measure control
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
- MACvSelectPage0(pDevice->PortOffset);
- set_channel(pDevice, pDevice->byOrgChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- if (byData & MSRCTL_FINISH) {
- // measure success
- s_vCompleteCurrentMeasure(pDevice, 0);
- } else {
- // can not measure because not ready before end of measure time
- s_vCompleteCurrentMeasure(pDevice, MEASURE_MODE_LATE);
- }
- }
- if (pDevice->dwIsr & ISR_QUIETSTART) {
- do {
- ;
- } while (!CARDbStartQuiet(pDevice));
- }
- }
-
if (pDevice->dwIsr & ISR_TBTT) {
- if (pDevice->bEnableFirstQuiet) {
- pDevice->byQuietStartCount--;
- if (pDevice->byQuietStartCount == 0) {
- pDevice->bEnableFirstQuiet = false;
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
- MACvSelectPage0(pDevice->PortOffset);
- }
- }
- if (pDevice->bChannelSwitch &&
- (pDevice->op_mode == NL80211_IFTYPE_STATION)) {
- pDevice->byChannelSwitchCount--;
- if (pDevice->byChannelSwitchCount == 0) {
- pDevice->bChannelSwitch = false;
- set_channel(pDevice, pDevice->byNewChannel);
- VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- CARDbStartTxPacket(pDevice, PKT_TYPE_802_11_ALL);
-
- }
- }
- if (pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
- if ((pDevice->bUpdateBBVGA) && pDevice->bLinkPass && (pDevice->uCurrRSSI != 0)) {
+ if (pDevice->vif &&
+ pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
+ if (pDevice->bUpdateBBVGA &&
+ !(pDevice->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
+ pDevice->vif->bss_conf.assoc &&
+ pDevice->uCurrRSSI) {
long ldBm;
RFvRSSITodBm(pDevice, (unsigned char) pDevice->uCurrRSSI, &ldBm);
@@ -2384,10 +1147,11 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
if (pDevice->bEnablePSMode)
PSbIsNextTBTTWakeUp((void *)pDevice);
- if ((pDevice->op_mode == NL80211_IFTYPE_AP) ||
- (pDevice->op_mode == NL80211_IFTYPE_ADHOC)) {
+ if ((pDevice->op_mode == NL80211_IFTYPE_AP ||
+ pDevice->op_mode == NL80211_IFTYPE_ADHOC) &&
+ pDevice->vif->bss_conf.enable_beacon) {
MACvOneShotTimer1MicroSec(pDevice->PortOffset,
- (pMgmt->wIBSSBeaconPeriod - MAKE_BEACON_RESERVED) << 10);
+ (pDevice->vif->bss_conf.beacon_int - MAKE_BEACON_RESERVED) << 10);
}
/* TODO: adhoc PS mode */
@@ -2400,34 +1164,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
pDevice->cbBeaconBufReadySetCnt = 0;
}
- if (pDevice->op_mode == NL80211_IFTYPE_AP) {
- if (pMgmt->byDTIMCount > 0) {
- pMgmt->byDTIMCount--;
- pMgmt->sNodeDBTable[0].bRxPSPoll = false;
- } else {
- if (pMgmt->byDTIMCount == 0) {
- // check if mutltcast tx bufferring
- pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
- pMgmt->sNodeDBTable[0].bRxPSPoll = true;
- bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- }
- }
- }
pDevice->bBeaconSent = true;
-
- if (pDevice->bChannelSwitch) {
- pDevice->byChannelSwitchCount--;
- if (pDevice->byChannelSwitchCount == 0) {
- pDevice->bChannelSwitch = false;
- set_channel(pDevice, pDevice->byNewChannel);
- VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
- MACvSelectPage1(pDevice->PortOffset);
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
- MACvSelectPage0(pDevice->PortOffset);
- CARDbStartTxPacket(pDevice, PKT_TYPE_802_11_ALL);
- }
- }
-
}
if (pDevice->dwIsr & ISR_RXDMA0)
@@ -2443,14 +1180,18 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
max_count += device_tx_srv(pDevice, TYPE_AC0DMA);
if (pDevice->dwIsr & ISR_SOFTTIMER1) {
- if (pDevice->op_mode == NL80211_IFTYPE_AP) {
- if (pDevice->bShortSlotTime)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
- else
- pMgmt->wCurrCapInfo &= ~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1));
+ if (pDevice->vif) {
+ if (pDevice->vif->bss_conf.enable_beacon)
+ vnt_beacon_make(pDevice, pDevice->vif);
}
- bMgrPrepareBeaconToSend(pDevice, pMgmt);
- pDevice->byCntMeasure = 0;
+ }
+
+ /* If both buffers available wake the queue */
+ if (pDevice->vif) {
+ if (AVAIL_TD(pDevice, TYPE_TXDMA0) &&
+ AVAIL_TD(pDevice, TYPE_AC0DMA) &&
+ ieee80211_queue_stopped(pDevice->hw, 0))
+ ieee80211_wake_queues(pDevice->hw);
}
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
@@ -2472,550 +1213,655 @@ static irqreturn_t device_intr(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
-//2008-8-4 <add> by chester
-static int Config_FileGetParameter(unsigned char *string,
- unsigned char *dest, unsigned char *source)
+static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
{
- unsigned char buf1[100];
- int source_len = strlen(source);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ PSTxDesc head_td;
+ u32 dma_idx = TYPE_AC0DMA;
+ unsigned long flags;
- memset(buf1, 0, 100);
- strcat(buf1, string);
- strcat(buf1, "=");
- source += strlen(buf1);
+ spin_lock_irqsave(&priv->lock, flags);
- memcpy(dest, source, source_len - strlen(buf1));
- return true;
-}
+ if (!ieee80211_is_data(hdr->frame_control))
+ dma_idx = TYPE_TXDMA0;
-int Config_FileOperation(struct vnt_private *pDevice,
- bool fwrite, unsigned char *Parameter)
-{
- unsigned char *buffer = kmalloc(1024, GFP_KERNEL);
- unsigned char tmpbuffer[20];
- struct file *file;
- int result = 0;
-
- if (!buffer) {
- pr_err("allocate mem for file fail?\n");
- return -1;
- }
- file = filp_open(CONFIG_PATH, O_RDONLY, 0);
- if (IS_ERR(file)) {
- kfree(buffer);
- pr_err("Config_FileOperation:open file fail?\n");
- return -1;
+ if (AVAIL_TD(priv, dma_idx) < 1) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -ENOMEM;
}
- if (kernel_read(file, 0, buffer, 1024) < 0) {
- pr_err("read file error?\n");
- result = -1;
- goto error1;
- }
+ head_td = priv->apCurrTD[dma_idx];
- if (Config_FileGetParameter("ZONETYPE", tmpbuffer, buffer) != true) {
- pr_err("get parameter error?\n");
- result = -1;
- goto error1;
- }
+ head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
- if (memcmp(tmpbuffer, "USA", 3) == 0) {
- result = ZoneType_USA;
- } else if (memcmp(tmpbuffer, "JAPAN", 5) == 0) {
- result = ZoneType_Japan;
- } else if (memcmp(tmpbuffer, "EUROPE", 5) == 0) {
- result = ZoneType_Europe;
- } else {
- result = -1;
- pr_err("Unknown Zonetype[%s]?\n", tmpbuffer);
- }
+ head_td->pTDInfo->skb = skb;
+
+ priv->iTDUsed[dma_idx]++;
+
+ /* Take ownership */
+ wmb();
+ head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
+
+ /* get Next */
+ wmb();
+ priv->apCurrTD[dma_idx] = head_td->next;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
+
+ if (MACbIsRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
+ MACbPSWakeup(priv->PortOffset);
+
+ spin_lock_irqsave(&priv->lock, flags);
-error1:
- kfree(buffer);
- fput(file);
- return result;
+ priv->bPWBitOn = false;
+
+ head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
+
+ if (dma_idx == TYPE_AC0DMA)
+ MACvTransmitAC0(priv->PortOffset);
+ else
+ MACvTransmit0(priv->PortOffset);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
-static void device_set_multi(struct net_device *dev) {
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = pDevice->pMgmt;
- u32 mc_filter[2];
- struct netdev_hw_addr *ha;
+static void vnt_tx_80211(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct vnt_private *priv = hw->priv;
- VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
+ ieee80211_stop_queues(hw);
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- pr_notice("%s: Promiscuous mode enabled\n", dev->name);
- /* Unconditionally log net taps. */
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
- } else if ((netdev_mc_count(dev) > pDevice->multicast_limit)
- || (dev->flags & IFF_ALLMULTI)) {
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, 0xffffffff);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0 + 4, 0xffffffff);
- MACvSelectPage0(pDevice->PortOffset);
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- } else {
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ if (vnt_tx_packet(priv, skb)) {
+ ieee80211_free_txskb(hw, skb);
- mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
- }
- MACvSelectPage1(pDevice->PortOffset);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, mc_filter[0]);
- VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0 + 4, mc_filter[1]);
- MACvSelectPage0(pDevice->PortOffset);
- pDevice->byRxMode &= ~(RCR_UNICAST);
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
+ ieee80211_wake_queues(hw);
}
+}
+
+static int vnt_start(struct ieee80211_hw *hw)
+{
+ struct vnt_private *priv = hw->priv;
+ int ret;
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- // If AP mode, don't enable RCR_UNICAST. Since hw only compare addr1 with local mac.
- pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
- pDevice->byRxMode &= ~(RCR_UNICAST);
+ priv->rx_buf_sz = PKT_BUF_SZ;
+ if (!device_init_rings(priv))
+ return -ENOMEM;
+
+ ret = request_irq(priv->pcid->irq, &device_intr,
+ IRQF_SHARED, "vt6655", priv);
+ if (ret) {
+ dev_dbg(&priv->pcid->dev, "failed to start irq\n");
+ return ret;
}
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byRxMode);
- pr_debug("pDevice->byRxMode = %x\n", pDevice->byRxMode);
+ dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
+ device_init_rd0_ring(priv);
+ device_init_rd1_ring(priv);
+ device_init_td0_ring(priv);
+ device_init_td1_ring(priv);
+
+ device_init_registers(priv);
+
+ dev_dbg(&priv->pcid->dev, "call MACvIntEnable\n");
+ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
+
+ ieee80211_wake_queues(hw);
+
+ return 0;
}
-static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static void vnt_stop(struct ieee80211_hw *hw)
{
- struct vnt_private *pDevice = netdev_priv(dev);
- struct iwreq *wrq = (struct iwreq *)rq;
- int rc = 0;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSCmdRequest pReq;
-
- if (pMgmt == NULL) {
- rc = -EFAULT;
- return rc;
- }
+ struct vnt_private *priv = hw->priv;
- switch (cmd) {
- case SIOCGIWNAME:
- rc = iwctl_giwname(dev, NULL, (char *)&(wrq->u.name), NULL);
- break;
+ ieee80211_stop_queues(hw);
+
+ MACbShutdown(priv->PortOffset);
+ MACbSoftwareReset(priv->PortOffset);
+ CARDbRadioPowerOff(priv);
+
+ device_free_td0_ring(priv);
+ device_free_td1_ring(priv);
+ device_free_rd0_ring(priv);
+ device_free_rd1_ring(priv);
+ device_free_rings(priv);
+
+ free_irq(priv->pcid->irq, priv);
+}
+
+static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct vnt_private *priv = hw->priv;
+
+ priv->vif = vif;
- case SIOCGIWNWID: //0x8b03 support
- rc = -EOPNOTSUPP;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (priv->bDiversityRegCtlON)
+ device_init_diversity_timer(priv);
break;
+ case NL80211_IFTYPE_ADHOC:
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
+
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
- // Set frequency/channel
- case SIOCSIWFREQ:
- rc = iwctl_siwfreq(dev, NULL, &(wrq->u.freq), NULL);
break;
+ case NL80211_IFTYPE_AP:
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
+
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
- // Get frequency/channel
- case SIOCGIWFREQ:
- rc = iwctl_giwfreq(dev, NULL, &(wrq->u.freq), NULL);
break;
+ default:
+ return -EOPNOTSUPP;
+ }
- // Set desired network name (ESSID)
- case SIOCSIWESSID:
+ priv->op_mode = vif->type;
- {
- char essid[IW_ESSID_MAX_SIZE+1];
+ return 0;
+}
- if (wrq->u.essid.length > IW_ESSID_MAX_SIZE) {
- rc = -E2BIG;
- break;
- }
- if (copy_from_user(essid, wrq->u.essid.pointer,
- wrq->u.essid.length)) {
- rc = -EFAULT;
- break;
+static void vnt_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct vnt_private *priv = hw->priv;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (priv->bDiversityRegCtlON) {
+ del_timer(&priv->TimerSQ3Tmax1);
+ del_timer(&priv->TimerSQ3Tmax2);
+ del_timer(&priv->TimerSQ3Tmax3);
}
- rc = iwctl_siwessid(dev, NULL,
- &(wrq->u.essid), essid);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->PortOffset,
+ MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
+ break;
+ case NL80211_IFTYPE_AP:
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->PortOffset,
+ MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
+ break;
+ default:
+ break;
}
- break;
- // Get current network name (ESSID)
- case SIOCGIWESSID:
+ priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
+}
- {
- char essid[IW_ESSID_MAX_SIZE+1];
-
- if (wrq->u.essid.pointer)
- rc = iwctl_giwessid(dev, NULL,
- &(wrq->u.essid), essid);
- if (copy_to_user(wrq->u.essid.pointer,
- essid,
- wrq->u.essid.length))
- rc = -EFAULT;
+
+static int vnt_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct vnt_private *priv = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ u8 bb_type;
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS)
+ PSvEnablePowerSaving(priv, conf->listen_interval);
+ else
+ PSvDisablePowerSaving(priv);
}
- break;
- case SIOCSIWAP:
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
+ (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
+ set_channel(priv, conf->chandef.chan->hw_value);
- rc = iwctl_siwap(dev, NULL, &(wrq->u.ap_addr), NULL);
- break;
+ if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ bb_type = BB_TYPE_11A;
+ else
+ bb_type = BB_TYPE_11G;
- // Get current Access Point (BSSID)
- case SIOCGIWAP:
- rc = iwctl_giwap(dev, NULL, &(wrq->u.ap_addr), NULL);
- break;
+ if (priv->byBBType != bb_type) {
+ priv->byBBType = bb_type;
- // Set desired station name
- case SIOCSIWNICKN:
- pr_debug(" SIOCSIWNICKN\n");
- rc = -EOPNOTSUPP;
- break;
+ CARDbSetPhyParameter(priv, priv->byBBType);
+ }
+ }
- // Get current station name
- case SIOCGIWNICKN:
- pr_debug(" SIOCGIWNICKN\n");
- rc = -EOPNOTSUPP;
- break;
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ if (priv->byBBType == BB_TYPE_11B)
+ priv->wCurrentRate = RATE_1M;
+ else
+ priv->wCurrentRate = RATE_54M;
- // Set the desired bit-rate
- case SIOCSIWRATE:
- rc = iwctl_siwrate(dev, NULL, &(wrq->u.bitrate), NULL);
- break;
+ RFbSetPower(priv, priv->wCurrentRate,
+ conf->chandef.chan->hw_value);
+ }
+
+ return 0;
+}
- // Get the current bit-rate
- case SIOCGIWRATE:
+static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf,
+ u32 changed)
+{
+ struct vnt_private *priv = hw->priv;
- rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
- break;
+ priv->current_aid = conf->aid;
- // Set the desired RTS threshold
- case SIOCSIWRTS:
+ if (changed & BSS_CHANGED_BSSID)
+ MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
- rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL);
- break;
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ priv->basic_rates = conf->basic_rates;
- // Get the current RTS threshold
- case SIOCGIWRTS:
+ CARDvUpdateBasicTopRate(priv);
- rc = iwctl_giwrts(dev, NULL, &(wrq->u.rts), NULL);
- break;
+ dev_dbg(&priv->pcid->dev,
+ "basic rates %x\n", conf->basic_rates);
+ }
- // Set the desired fragmentation threshold
- case SIOCSIWFRAG:
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (conf->use_short_preamble) {
+ MACvEnableBarkerPreambleMd(priv->PortOffset);
+ priv->byPreambleType = true;
+ } else {
+ MACvDisableBarkerPreambleMd(priv->PortOffset);
+ priv->byPreambleType = false;
+ }
+ }
- rc = iwctl_siwfrag(dev, NULL, &(wrq->u.frag), NULL);
- break;
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ if (conf->use_cts_prot)
+ MACvEnableProtectMD(priv->PortOffset);
+ else
+ MACvDisableProtectMD(priv->PortOffset);
+ }
- // Get the current fragmentation threshold
- case SIOCGIWFRAG:
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (conf->use_short_slot)
+ priv->bShortSlotTime = true;
+ else
+ priv->bShortSlotTime = false;
- rc = iwctl_giwfrag(dev, NULL, &(wrq->u.frag), NULL);
- break;
+ CARDbSetPhyParameter(priv, priv->byBBType);
+ BBvSetVGAGainOffset(priv, priv->abyBBVGA[0]);
+ }
- // Set mode of operation
- case SIOCSIWMODE:
- rc = iwctl_siwmode(dev, NULL, &(wrq->u.mode), NULL);
- break;
+ if (changed & BSS_CHANGED_TXPOWER)
+ RFbSetPower(priv, priv->wCurrentRate,
+ conf->chandef.chan->hw_value);
- // Get mode of operation
- case SIOCGIWMODE:
- rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
- break;
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ dev_dbg(&priv->pcid->dev,
+ "Beacon enable %d\n", conf->enable_beacon);
- // Set WEP keys and mode
- case SIOCSIWENCODE: {
- char abyKey[WLAN_WEP232_KEYLEN];
+ if (conf->enable_beacon) {
+ vnt_beacon_enable(priv, vif, conf);
- if (wrq->u.encoding.pointer) {
- if (wrq->u.encoding.length > WLAN_WEP232_KEYLEN) {
- rc = -E2BIG;
- break;
- }
- memset(abyKey, 0, WLAN_WEP232_KEYLEN);
- if (copy_from_user(abyKey,
- wrq->u.encoding.pointer,
- wrq->u.encoding.length)) {
- rc = -EFAULT;
- break;
- }
- } else if (wrq->u.encoding.length != 0) {
- rc = -EINVAL;
- break;
+ MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+ } else {
+ MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
}
- rc = iwctl_siwencode(dev, NULL, &(wrq->u.encoding), abyKey);
}
- break;
- // Get the WEP keys and mode
- case SIOCGIWENCODE:
+ if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
+ if (conf->assoc) {
+ CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
+ conf->sync_device_ts, conf->sync_tsf);
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
+ CARDbSetBeaconPeriod(priv, conf->beacon_int);
+
+ CARDvSetFirstNextTBTT(priv, conf->beacon_int);
+ } else {
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL,
+ TFTCTL_TSFCNTRST);
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL,
+ TFTCTL_TSFCNTREN);
}
- {
- char abyKey[WLAN_WEP232_KEYLEN];
+ }
+}
- rc = iwctl_giwencode(dev, NULL, &(wrq->u.encoding), abyKey);
- if (rc != 0)
- break;
- if (wrq->u.encoding.pointer) {
- if (copy_to_user(wrq->u.encoding.pointer,
- abyKey,
- wrq->u.encoding.length))
- rc = -EFAULT;
+static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct vnt_private *priv = hw->priv;
+ struct netdev_hw_addr *ha;
+ u64 mc_filter = 0;
+ u32 bit_nr = 0;
+
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ mc_filter |= 1ULL << (bit_nr & 0x3f);
+ }
+
+ priv->mc_list_count = mc_list->count;
+
+ return mc_filter;
+}
+
+static void vnt_configure(struct ieee80211_hw *hw,
+ unsigned int changed_flags, unsigned int *total_flags, u64 multicast)
+{
+ struct vnt_private *priv = hw->priv;
+ u8 rx_mode = 0;
+
+ *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC;
+
+ VNSvInPortB(priv->PortOffset + MAC_REG_RCR, &rx_mode);
+
+ dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
+
+ if (changed_flags & FIF_PROMISC_IN_BSS) {
+ /* unconditionally log net taps */
+ if (*total_flags & FIF_PROMISC_IN_BSS)
+ rx_mode |= RCR_UNICAST;
+ else
+ rx_mode &= ~RCR_UNICAST;
+ }
+
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*total_flags & FIF_ALLMULTI) {
+ if (priv->mc_list_count > 2) {
+ MACvSelectPage1(priv->PortOffset);
+
+ VNSvOutPortD(priv->PortOffset +
+ MAC_REG_MAR0, 0xffffffff);
+ VNSvOutPortD(priv->PortOffset +
+ MAC_REG_MAR0 + 4, 0xffffffff);
+
+ MACvSelectPage0(priv->PortOffset);
+ } else {
+ MACvSelectPage1(priv->PortOffset);
+
+ VNSvOutPortD(priv->PortOffset +
+ MAC_REG_MAR0, (u32)multicast);
+ VNSvOutPortD(priv->PortOffset +
+ MAC_REG_MAR0 + 4,
+ (u32)(multicast >> 32));
+
+ MACvSelectPage0(priv->PortOffset);
}
+
+ rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
+ } else {
+ rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
}
- break;
+ }
- // Get the current Tx-Power
- case SIOCGIWTXPOW:
- pr_debug(" SIOCGIWTXPOW\n");
- rc = -EOPNOTSUPP;
- break;
+ if (changed_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)) {
+ rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
- case SIOCSIWTXPOW:
- pr_debug(" SIOCSIWTXPOW\n");
- rc = -EOPNOTSUPP;
- break;
+ if (*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC))
+ rx_mode &= ~RCR_BSSID;
+ else
+ rx_mode |= RCR_BSSID;
+ }
- case SIOCSIWRETRY:
+ VNSvOutPortB(priv->PortOffset + MAC_REG_RCR, rx_mode);
- rc = iwctl_siwretry(dev, NULL, &(wrq->u.retry), NULL);
- break;
+ dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
+}
- case SIOCGIWRETRY:
+static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct vnt_private *priv = hw->priv;
- rc = iwctl_giwretry(dev, NULL, &(wrq->u.retry), NULL);
+ switch (cmd) {
+ case SET_KEY:
+ if (vnt_set_keys(hw, sta, vif, key))
+ return -EOPNOTSUPP;
break;
+ case DISABLE_KEY:
+ if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
+ clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
+ default:
+ break;
+ }
- // Get range of parameters
- case SIOCGIWRANGE:
+ return 0;
+}
- {
- struct iw_range range;
+static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct vnt_private *priv = hw->priv;
+ u64 tsf;
- rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *)&range);
- if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
- rc = -EFAULT;
- }
+ CARDbGetCurrentTSF(priv, &tsf);
- break;
+ return tsf;
+}
- case SIOCGIWPOWER:
+static void vnt_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf)
+{
+ struct vnt_private *priv = hw->priv;
- rc = iwctl_giwpower(dev, NULL, &(wrq->u.power), NULL);
- break;
+ CARDvUpdateNextTBTT(priv, tsf, vif->bss_conf.beacon_int);
+}
- case SIOCSIWPOWER:
+static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct vnt_private *priv = hw->priv;
- rc = iwctl_siwpower(dev, NULL, &(wrq->u.power), NULL);
- break;
+ /* reset TSF counter */
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+}
- case SIOCGIWSENS:
+static const struct ieee80211_ops vnt_mac_ops = {
+ .tx = vnt_tx_80211,
+ .start = vnt_start,
+ .stop = vnt_stop,
+ .add_interface = vnt_add_interface,
+ .remove_interface = vnt_remove_interface,
+ .config = vnt_config,
+ .bss_info_changed = vnt_bss_info_changed,
+ .prepare_multicast = vnt_prepare_multicast,
+ .configure_filter = vnt_configure,
+ .set_key = vnt_set_key,
+ .get_tsf = vnt_get_tsf,
+ .set_tsf = vnt_set_tsf,
+ .reset_tsf = vnt_reset_tsf,
+};
- rc = iwctl_giwsens(dev, NULL, &(wrq->u.sens), NULL);
- break;
+int vnt_init(struct vnt_private *priv)
+{
+ SET_IEEE80211_PERM_ADDR(priv->hw, priv->abyCurrentNetAddr);
- case SIOCSIWSENS:
- pr_debug(" SIOCSIWSENS\n");
- rc = -EOPNOTSUPP;
- break;
+ vnt_init_bands(priv);
- case SIOCGIWAPLIST: {
- char buffer[IW_MAX_AP * (sizeof(struct sockaddr) + sizeof(struct iw_quality))];
-
- if (wrq->u.data.pointer) {
- rc = iwctl_giwaplist(dev, NULL, &(wrq->u.data), buffer);
- if (rc == 0) {
- if (copy_to_user(wrq->u.data.pointer,
- buffer,
- (wrq->u.data.length * (sizeof(struct sockaddr) + sizeof(struct iw_quality)))
- ))
- rc = -EFAULT;
- }
- }
- }
- break;
+ if (ieee80211_register_hw(priv->hw))
+ return -ENODEV;
-#ifdef WIRELESS_SPY
- // Set the spy list
- case SIOCSIWSPY:
+ priv->mac_hw = true;
- pr_debug(" SIOCSIWSPY\n");
- rc = -EOPNOTSUPP;
- break;
+ CARDbRadioPowerOff(priv);
- // Get the spy list
- case SIOCGIWSPY:
+ return 0;
+}
- pr_debug(" SIOCGIWSPY\n");
- rc = -EOPNOTSUPP;
- break;
+static int
+vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
+{
+ PCHIP_INFO pChip_info = (PCHIP_INFO)ent->driver_data;
+ struct vnt_private *priv;
+ struct ieee80211_hw *hw;
+ struct wiphy *wiphy;
+ int rc;
-#endif // WIRELESS_SPY
+ dev_notice(&pcid->dev,
+ "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
- case SIOCGIWPRIV:
- pr_debug(" SIOCGIWPRIV\n");
- rc = -EOPNOTSUPP;
- break;
+ dev_notice(&pcid->dev,
+ "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- case SIOCSIWAUTH:
- pr_debug(" SIOCSIWAUTH\n");
- rc = iwctl_siwauth(dev, NULL, &(wrq->u.param), NULL);
- break;
+ hw = ieee80211_alloc_hw(sizeof(*priv), &vnt_mac_ops);
+ if (!hw) {
+ dev_err(&pcid->dev, "could not register ieee80211_hw\n");
+ return -ENOMEM;
+ }
- case SIOCGIWAUTH:
- pr_debug(" SIOCGIWAUTH\n");
- rc = iwctl_giwauth(dev, NULL, &(wrq->u.param), NULL);
- break;
+ priv = hw->priv;
- case SIOCSIWGENIE:
- pr_debug(" SIOCSIWGENIE\n");
- rc = iwctl_siwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
- break;
+ vt6655_init_info(pcid, &priv, pChip_info);
- case SIOCGIWGENIE:
- pr_debug(" SIOCGIWGENIE\n");
- rc = iwctl_giwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
- break;
+ priv->hw = hw;
- case SIOCSIWENCODEEXT: {
- char extra[sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1];
+ SET_IEEE80211_DEV(priv->hw, &pcid->dev);
- pr_debug(" SIOCSIWENCODEEXT\n");
- if (wrq->u.encoding.pointer) {
- memset(extra, 0, sizeof(struct iw_encode_ext)+MAX_KEY_LEN + 1);
- if (wrq->u.encoding.length > (sizeof(struct iw_encode_ext) + MAX_KEY_LEN)) {
- rc = -E2BIG;
- break;
- }
- if (copy_from_user(extra, wrq->u.encoding.pointer, wrq->u.encoding.length)) {
- rc = -EFAULT;
- break;
- }
- } else if (wrq->u.encoding.length != 0) {
- rc = -EINVAL;
- break;
- }
- rc = iwctl_siwencodeext(dev, NULL, &(wrq->u.encoding), extra);
+ if (pci_enable_device(pcid)) {
+ device_free_info(priv);
+ return -ENODEV;
}
- break;
- case SIOCGIWENCODEEXT:
- pr_debug(" SIOCGIWENCODEEXT\n");
- rc = iwctl_giwencodeext(dev, NULL, &(wrq->u.encoding), NULL);
- break;
+ dev_dbg(&pcid->dev,
+ "Before get pci_info memaddr is %x\n", priv->memaddr);
- case SIOCSIWMLME:
- pr_debug(" SIOCSIWMLME\n");
- rc = iwctl_siwmlme(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
- break;
+ if (!device_get_pci_info(priv, pcid)) {
+ dev_err(&pcid->dev, ": Failed to find PCI device.\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
-#endif // #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-//End Add -- //2008-0409-07, <Add> by Einsn Liu
+#ifdef DEBUG
+ dev_dbg(&pcid->dev,
+ "after get pci_info memaddr is %x, io addr is %x,io_size is %d\n",
+ priv->memaddr, priv->ioaddr, priv->io_size);
+ {
+ int i;
+ u32 bar, len;
+ u32 address[] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ PCI_BASE_ADDRESS_2,
+ PCI_BASE_ADDRESS_3,
+ PCI_BASE_ADDRESS_4,
+ PCI_BASE_ADDRESS_5,
+ 0};
+ for (i = 0; address[i]; i++) {
+ pci_read_config_dword(pcid, address[i], &bar);
- case IOCTL_CMD_TEST:
+ dev_dbg(&pcid->dev, "bar %d is %x\n", i, bar);
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
- rc = -EFAULT;
- break;
- } else {
- rc = 0;
- }
- pReq = (PSCmdRequest)rq;
- pReq->wResult = MAGIC_CODE;
- break;
+ if (!bar) {
+ dev_dbg(&pcid->dev,
+ "bar %d not implemented\n", i);
+ continue;
+ }
- case IOCTL_CMD_SET:
+ if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
+ /* This is IO */
-#ifdef SndEvt_ToAPI
- if ((((PSCmdRequest)rq)->wCmdCode != WLAN_CMD_SET_EVT) &&
- !(pDevice->flags & DEVICE_FLAGS_OPENED))
-#else
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED) &&
- (((PSCmdRequest)rq)->wCmdCode != WLAN_CMD_SET_WPA))
-#endif
- {
- rc = -EFAULT;
- break;
+ len = bar & (PCI_BASE_ADDRESS_IO_MASK & 0xffff);
+ len = len & ~(len - 1);
+
+ dev_dbg(&pcid->dev,
+ "IO space: len in IO %x, BAR %d\n",
+ len, i);
} else {
- rc = 0;
+ len = bar & 0xfffffff0;
+ len = ~len + 1;
+
+ dev_dbg(&pcid->dev,
+ "len in MEM %x, BAR %d\n", len, i);
}
+ }
+ }
+#endif
- if (test_and_set_bit(0, (void *)&(pMgmt->uCmdBusy)))
- return -EBUSY;
+ priv->PortOffset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ priv->io_size);
+ if (!priv->PortOffset) {
+ dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
- rc = private_ioctl(pDevice, rq);
- clear_bit(0, (void *)&(pMgmt->uCmdBusy));
- break;
+ rc = pci_request_regions(pcid, DEVICE_NAME);
+ if (rc) {
+ dev_err(&pcid->dev, ": Failed to find PCI device\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
- case IOCTL_CMD_HOSTAPD:
+ /* do reset */
+ if (!MACbSoftwareReset(priv->PortOffset)) {
+ dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
+ device_free_info(priv);
+ return -ENODEV;
+ }
+ /* initial to reload eeprom */
+ MACvInitialize(priv->PortOffset);
+ MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
- rc = vt6655_hostap_ioctl(pDevice, &wrq->u.data);
- break;
+ device_get_options(priv);
+ device_set_options(priv);
+ /* Mask out the options cannot be set to the chip */
+ priv->sOpts.flags &= pChip_info->flags;
- case IOCTL_CMD_WPA:
+ /* Enable the chip specified capabilities */
+ priv->flags = priv->sOpts.flags | (pChip_info->flags & 0xff000000UL);
- rc = wpa_ioctl(pDevice, &wrq->u.data);
- break;
+ wiphy = priv->hw->wiphy;
- case SIOCETHTOOL:
- return ethtool_ioctl(dev, rq->ifr_data);
- // All other calls are currently unsupported
+ wiphy->frag_threshold = FRAG_THRESH_DEF;
+ wiphy->rts_threshold = RTS_THRESH_DEF;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
- default:
- rc = -EOPNOTSUPP;
- pr_debug("Ioctl command not support..%x\n", cmd);
+ priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_TIMING_BEACON_ONLY;
- }
+ priv->hw->max_signal = 100;
- if (pDevice->bCommit) {
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
- spin_unlock_irq(&pDevice->lock);
- } else {
- pr_debug("Commit the settings\n");
- spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- if (!pDevice->bWPASuppWextEnabled)
-#endif
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
- }
- pDevice->bCommit = false;
- }
+ if (vnt_init(priv))
+ return -ENODEV;
- return rc;
+ device_print_info(priv);
+ pci_set_drvdata(pcid, priv);
+
+ return 0;
}
-static int ethtool_ioctl(struct net_device *dev, void __user *useraddr)
+/*------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
{
- u32 ethcmd;
+ struct vnt_private *priv = pci_get_drvdata(pcid);
+ unsigned long flags;
- if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
- return -EFAULT;
+ spin_lock_irqsave(&priv->lock, flags);
- switch (ethcmd) {
- case ETHTOOL_GDRVINFO: {
- struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
+ pci_save_state(pcid);
- strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1);
- strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1);
- if (copy_to_user(useraddr, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
+ MACbShutdown(priv->PortOffset);
- }
+ pci_disable_device(pcid);
+ pci_set_power_state(pcid, pci_choose_state(pcid, state));
+
+ spin_unlock_irqrestore(&priv->lock, flags);
- return -EOPNOTSUPP;
+ return 0;
}
-/*------------------------------------------------------------------*/
+static int vt6655_resume(struct pci_dev *pcid)
+{
+
+ pci_set_power_state(pcid, PCI_D0);
+ pci_enable_wake(pcid, PCI_D0, 0);
+ pci_restore_state(pcid);
+
+ return 0;
+}
+#endif
MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
@@ -3025,8 +1871,8 @@ static struct pci_driver device_driver = {
.probe = vt6655_probe,
.remove = vt6655_remove,
#ifdef CONFIG_PM
- .suspend = viawget_suspend,
- .resume = viawget_resume,
+ .suspend = vt6655_suspend,
+ .resume = vt6655_resume,
#endif
};
@@ -3067,75 +1913,10 @@ device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
for_each_pci_dev(pdev) {
if (pci_dev_driver(pdev) == &device_driver) {
if (pci_get_drvdata(pdev))
- viawget_suspend(pdev, PMSG_HIBERNATE);
+ vt6655_suspend(pdev, PMSG_HIBERNATE);
}
}
}
return NOTIFY_DONE;
}
-
-static int
-viawget_suspend(struct pci_dev *pcid, pm_message_t state)
-{
- int power_status; // to silence the compiler
-
- struct vnt_private *pDevice = pci_get_drvdata(pcid);
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- pci_save_state(pcid);
- del_timer(&pDevice->sTimerCommand);
- del_timer(&pMgmt->sTimerSecondCallback);
- pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
- pDevice->uCmdDequeueIdx = 0;
- pDevice->uCmdEnqueueIdx = 0;
- pDevice->bCmdRunning = false;
- MACbShutdown(pDevice->PortOffset);
- MACvSaveContext(pDevice->PortOffset, pDevice->abyMacContext);
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pci_disable_device(pcid);
- power_status = pci_set_power_state(pcid, pci_choose_state(pcid, state));
- spin_unlock_irq(&pDevice->lock);
- return 0;
-}
-
-static int
-viawget_resume(struct pci_dev *pcid)
-{
- struct vnt_private *pDevice = pci_get_drvdata(pcid);
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int power_status; // to silence the compiler
-
- power_status = pci_set_power_state(pcid, PCI_D0);
- power_status = pci_enable_wake(pcid, PCI_D0, 0);
- pci_restore_state(pcid);
- if (netif_running(pDevice->dev)) {
- spin_lock_irq(&pDevice->lock);
- MACvRestoreContext(pDevice->PortOffset, pDevice->abyMacContext);
- device_init_registers(pDevice);
- if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
- pMgmt->sNodeDBTable[0].bActive = false;
- pDevice->bLinkPass = false;
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // In Adhoc, BSS state set back to started.
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- } else {
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
- }
- init_timer(&pMgmt->sTimerSecondCallback);
- init_timer(&pDevice->sTimerCommand);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
- }
- return 0;
-}
-
#endif
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 8515b8c80801..977683cb7391 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -25,1289 +25,135 @@
* Date: May 20, 2003
*
* Functions:
- * device_receive_frame - Rcv 802.11 frame function
- * s_bAPModeRxCtl- AP Rcv frame filer Ctl.
- * s_bAPModeRxData- AP Rcv data frame handle
- * s_bHandleRxEncryption- Rcv decrypted data via on-fly
- * s_bHostWepRxEncryption- Rcv encrypted data via host
- * s_byGetRateIdx- get rate index
- * s_vGetDASA- get data offset
- * s_vProcessRxMACHeader- Rcv 802.11 and translate to 802.3
*
* Revision History:
*
*/
#include "device.h"
-#include "rxtx.h"
-#include "tether.h"
-#include "card.h"
-#include "bssdb.h"
-#include "mac.h"
#include "baseband.h"
-#include "michael.h"
-#include "tkip.h"
-#include "tcrc.h"
-#include "wctl.h"
-#include "wroute.h"
-#include "hostap.h"
#include "rf.h"
-#include "iowpa.h"
-#include "aes_ccmp.h"
#include "dpc.h"
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-static const unsigned char acbyRxRate[MAX_RATE] =
-{2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-static unsigned char s_byGetRateIdx(unsigned char byRate);
-
-static void
-s_vGetDASA(unsigned char *pbyRxBufferAddr, unsigned int *pcbHeaderSize,
- PSEthernetHeader psEthHeader);
-
-static void
-s_vProcessRxMACHeader(struct vnt_private *pDevice, unsigned char *pbyRxBufferAddr,
- unsigned int cbPacketSize, bool bIsWEP, bool bExtIV,
- unsigned int *pcbHeadSize);
-
-static bool s_bAPModeRxCtl(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- int iSANodeIndex
-);
-
-static bool s_bAPModeRxData(
- struct vnt_private *pDevice,
- struct sk_buff *skb,
- unsigned int FrameSize,
- unsigned int cbHeaderOffset,
- int iSANodeIndex,
- int iDANodeIndex
-);
-
-static bool s_bHandleRxEncryption(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- unsigned int FrameSize,
- unsigned char *pbyRsr,
- unsigned char *pbyNewRsr,
- PSKeyItem *pKeyOut,
- bool *pbExtIV,
- unsigned short *pwRxTSC15_0,
- unsigned long *pdwRxTSC47_16
-);
-
-static bool s_bHostWepRxEncryption(
-
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- unsigned int FrameSize,
- unsigned char *pbyRsr,
- bool bOnFly,
- PSKeyItem pKey,
- unsigned char *pbyNewRsr,
- bool *pbExtIV,
- unsigned short *pwRxTSC15_0,
- unsigned long *pdwRxTSC47_16
-
-);
-
-/*--------------------- Export Variables --------------------------*/
-
-/*+
- *
- * Description:
- * Translate Rcv 802.11 header to 802.3 header with Rx buffer
- *
- * Parameters:
- * In:
- * pDevice
- * dwRxBufferAddr - Address of Rcv Buffer
- * cbPacketSize - Rcv Packet size
- * bIsWEP - If Rcv with WEP
- * Out:
- * pcbHeaderSize - 802.11 header size
- *
- * Return Value: None
- *
- -*/
-static void
-s_vProcessRxMACHeader(struct vnt_private *pDevice,
- unsigned char *pbyRxBufferAddr,
- unsigned int cbPacketSize, bool bIsWEP, bool bExtIV,
- unsigned int *pcbHeadSize)
+static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
+ u16 bytes_received)
{
- unsigned char *pbyRxBuffer;
- unsigned int cbHeaderSize = 0;
- unsigned short *pwType;
- PS802_11Header pMACHeader;
- int ii;
-
- pMACHeader = (PS802_11Header) (pbyRxBufferAddr + cbHeaderSize);
-
- s_vGetDASA((unsigned char *)pMACHeader, &cbHeaderSize, &pDevice->sRxEthHeader);
-
- if (bIsWEP) {
- if (bExtIV) {
- // strip IV&ExtIV , add 8 byte
- cbHeaderSize += (WLAN_HDR_ADDR3_LEN + 8);
- } else {
- // strip IV , add 4 byte
- cbHeaderSize += (WLAN_HDR_ADDR3_LEN + 4);
- }
- } else {
- cbHeaderSize += WLAN_HDR_ADDR3_LEN;
- }
-
- pbyRxBuffer = (unsigned char *)(pbyRxBufferAddr + cbHeaderSize);
- if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_Bridgetunnel)) {
- cbHeaderSize += 6;
- } else if (ether_addr_equal(pbyRxBuffer, pDevice->abySNAP_RFC1042)) {
- cbHeaderSize += 6;
- pwType = (unsigned short *)(pbyRxBufferAddr + cbHeaderSize);
- if ((*pwType != TYPE_PKT_IPX) && (*pwType != cpu_to_le16(0xF380))) {
- } else {
- cbHeaderSize -= 8;
- pwType = (unsigned short *)(pbyRxBufferAddr + cbHeaderSize);
- if (bIsWEP) {
- if (bExtIV)
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 8); // 8 is IV&ExtIV
- else
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 4); // 4 is IV
-
- } else {
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN);
- }
- }
- } else {
- cbHeaderSize -= 2;
- pwType = (unsigned short *)(pbyRxBufferAddr + cbHeaderSize);
- if (bIsWEP) {
- if (bExtIV)
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 8); // 8 is IV&ExtIV
- else
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN - 4); // 4 is IV
-
- } else {
- *pwType = htons(cbPacketSize - WLAN_HDR_ADDR3_LEN);
- }
- }
-
- cbHeaderSize -= (ETH_ALEN * 2);
- pbyRxBuffer = (unsigned char *)(pbyRxBufferAddr + cbHeaderSize);
- for (ii = 0; ii < ETH_ALEN; ii++)
- *pbyRxBuffer++ = pDevice->sRxEthHeader.abyDstAddr[ii];
- for (ii = 0; ii < ETH_ALEN; ii++)
- *pbyRxBuffer++ = pDevice->sRxEthHeader.abySrcAddr[ii];
-
- *pcbHeadSize = cbHeaderSize;
-}
-
-static unsigned char s_byGetRateIdx(unsigned char byRate)
-{
- unsigned char byRateIdx;
-
- for (byRateIdx = 0; byRateIdx < MAX_RATE; byRateIdx++) {
- if (acbyRxRate[byRateIdx % MAX_RATE] == byRate)
- return byRateIdx;
- }
-
- return 0;
-}
-
-static void
-s_vGetDASA(unsigned char *pbyRxBufferAddr, unsigned int *pcbHeaderSize,
- PSEthernetHeader psEthHeader)
-{
- unsigned int cbHeaderSize = 0;
- PS802_11Header pMACHeader;
- int ii;
-
- pMACHeader = (PS802_11Header) (pbyRxBufferAddr + cbHeaderSize);
-
- if ((pMACHeader->wFrameCtl & FC_TODS) == 0) {
- if (pMACHeader->wFrameCtl & FC_FROMDS) {
- for (ii = 0; ii < ETH_ALEN; ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr1[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr3[ii];
- }
- } else {
- // IBSS mode
- for (ii = 0; ii < ETH_ALEN; ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr1[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
- }
- }
- } else {
- // Is AP mode..
- if (pMACHeader->wFrameCtl & FC_FROMDS) {
- for (ii = 0; ii < ETH_ALEN; ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr3[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr4[ii];
- cbHeaderSize += 6;
- }
- } else {
- for (ii = 0; ii < ETH_ALEN; ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr3[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
- }
- }
- }
- *pcbHeaderSize = cbHeaderSize;
-}
-
-bool
-device_receive_frame(
- struct vnt_private *pDevice,
- PSRxDesc pCurrRD
-)
-{
- PDEVICE_RD_INFO pRDInfo = pCurrRD->pRDInfo;
- struct net_device_stats *pStats = &pDevice->dev->stats;
- struct sk_buff *skb;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSRxMgmtPacket pRxPacket = &(pDevice->pMgmt->sRxPacket);
- PS802_11Header p802_11Header;
- unsigned char *pbyRsr;
- unsigned char *pbyNewRsr;
- unsigned char *pbyRSSI;
- __le64 *pqwTSFTime;
- unsigned short *pwFrameSize;
- unsigned char *pbyFrame;
- bool bDeFragRx = false;
- bool bIsWEP = false;
- unsigned int cbHeaderOffset;
- unsigned int FrameSize;
- unsigned short wEtherType = 0;
- int iSANodeIndex = -1;
- int iDANodeIndex = -1;
- unsigned int ii;
- unsigned int cbIVOffset;
- bool bExtIV = false;
- unsigned char *pbyRxSts;
- unsigned char *pbyRxRate;
- unsigned char *pbySQ;
- unsigned int cbHeaderSize;
- PSKeyItem pKey = NULL;
- unsigned short wRxTSC15_0 = 0;
- unsigned long dwRxTSC47_16 = 0;
- SKeyItem STempKey;
- // 802.11h RPI
- unsigned long dwDuration = 0;
- long ldBm = 0;
- long ldBmThreshold = 0;
- PS802_11Header pMACHeader;
- bool bRxeapol_key = false;
-
- skb = pRDInfo->skb;
-
- pci_unmap_single(pDevice->pcid, pRDInfo->skb_dma,
- pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
- pwFrameSize = (unsigned short *)(skb->data + 2);
- FrameSize = cpu_to_le16(pCurrRD->m_rd1RD1.wReqCount) - cpu_to_le16(pCurrRD->m_rd0RD0.wResCount);
-
- // Max: 2312Payload + 30HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
- // Min (ACK): 10HD +4CRC + 2Padding + 4Len + 8TSF + 4RSR
- if ((FrameSize > 2364) || (FrameSize <= 32)) {
- // Frame Size error drop this packet.
- pr_debug("---------- WRONG Length 1\n");
- return false;
- }
-
- pbyRxSts = (unsigned char *)(skb->data);
- pbyRxRate = (unsigned char *)(skb->data + 1);
- pbyRsr = (unsigned char *)(skb->data + FrameSize - 1);
- pbyRSSI = (unsigned char *)(skb->data + FrameSize - 2);
- pbyNewRsr = (unsigned char *)(skb->data + FrameSize - 3);
- pbySQ = (unsigned char *)(skb->data + FrameSize - 4);
- pqwTSFTime = (__le64 *)(skb->data + FrameSize - 12);
- pbyFrame = (unsigned char *)(skb->data + 4);
-
- // get packet size
- FrameSize = cpu_to_le16(*pwFrameSize);
-
- if ((FrameSize > 2346)|(FrameSize < 14)) { // Max: 2312Payload + 30HD +4CRC
- // Min: 14 bytes ACK
- pr_debug("---------- WRONG Length 2\n");
- return false;
- }
-
- // update receive statistic counter
- STAvUpdateRDStatCounter(&pDevice->scStatistic,
- *pbyRsr,
- *pbyNewRsr,
- *pbyRxRate,
- pbyFrame,
- FrameSize);
-
- pMACHeader = (PS802_11Header)((unsigned char *)(skb->data) + 8);
-
- if (pDevice->bMeasureInProgress) {
- if ((*pbyRsr & RSR_CRCOK) != 0)
- pDevice->byBasicMap |= 0x01;
-
- dwDuration = (FrameSize << 4);
- dwDuration /= acbyRxRate[*pbyRxRate%MAX_RATE];
- if (*pbyRxRate <= RATE_11M) {
- if (*pbyRxSts & 0x01) {
- // long preamble
- dwDuration += 192;
- } else {
- // short preamble
- dwDuration += 96;
- }
- } else {
- dwDuration += 16;
- }
- RFvRSSITodBm(pDevice, *pbyRSSI, &ldBm);
- ldBmThreshold = -57;
- for (ii = 7; ii > 0;) {
- if (ldBm > ldBmThreshold)
- break;
-
- ldBmThreshold -= 5;
- ii--;
- }
- pDevice->dwRPIs[ii] += dwDuration;
- return false;
- }
-
- if (!is_multicast_ether_addr(pbyFrame)) {
- if (WCTLbIsDuplicate(&(pDevice->sDupRxCache), (PS802_11Header)(skb->data + 4))) {
- pDevice->s802_11Counter.FrameDuplicateCount++;
- return false;
- }
- }
-
- // Use for TKIP MIC
- s_vGetDASA(skb->data+4, &cbHeaderSize, &pDevice->sRxEthHeader);
-
- // filter packet send from myself
- if (ether_addr_equal(pDevice->sRxEthHeader.abySrcAddr,
- pDevice->abyCurrentNetAddr))
- return false;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) || (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
- if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) {
- p802_11Header = (PS802_11Header)(pbyFrame);
- // get SA NodeIndex
- if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(p802_11Header->abyAddr2), &iSANodeIndex)) {
- pMgmt->sNodeDBTable[iSANodeIndex].ulLastRxJiffer = jiffies;
- pMgmt->sNodeDBTable[iSANodeIndex].uInActiveCount = 0;
- }
- }
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (s_bAPModeRxCtl(pDevice, pbyFrame, iSANodeIndex))
- return false;
- }
-
- if (IS_FC_WEP(pbyFrame)) {
- bool bRxDecryOK = false;
-
- pr_debug("rx WEP pkt\n");
- bIsWEP = true;
- if ((pDevice->bEnableHostWEP) && (iSANodeIndex >= 0)) {
- pKey = &STempKey;
- pKey->byCipherSuite = pMgmt->sNodeDBTable[iSANodeIndex].byCipherSuite;
- pKey->dwKeyIndex = pMgmt->sNodeDBTable[iSANodeIndex].dwKeyIndex;
- pKey->uKeyLength = pMgmt->sNodeDBTable[iSANodeIndex].uWepKeyLength;
- pKey->dwTSC47_16 = pMgmt->sNodeDBTable[iSANodeIndex].dwTSC47_16;
- pKey->wTSC15_0 = pMgmt->sNodeDBTable[iSANodeIndex].wTSC15_0;
- memcpy(pKey->abyKey,
- &pMgmt->sNodeDBTable[iSANodeIndex].abyWepKey[0],
- pKey->uKeyLength
-);
-
- bRxDecryOK = s_bHostWepRxEncryption(pDevice,
- pbyFrame,
- FrameSize,
- pbyRsr,
- pMgmt->sNodeDBTable[iSANodeIndex].bOnFly,
- pKey,
- pbyNewRsr,
- &bExtIV,
- &wRxTSC15_0,
- &dwRxTSC47_16);
- } else {
- bRxDecryOK = s_bHandleRxEncryption(pDevice,
- pbyFrame,
- FrameSize,
- pbyRsr,
- pbyNewRsr,
- &pKey,
- &bExtIV,
- &wRxTSC15_0,
- &dwRxTSC47_16);
- }
-
- if (bRxDecryOK) {
- if ((*pbyNewRsr & NEWRSR_DECRYPTOK) == 0) {
- pr_debug("ICV Fail\n");
- if ((pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
- (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) ||
- (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP))
- pDevice->s802_11Counter.TKIPICVErrors++;
- else if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP))
- pDevice->s802_11Counter.CCMPDecryptErrors++;
- }
- return false;
- }
- } else {
- pr_debug("WEP Func Fail\n");
- return false;
- }
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_CCMP))
- FrameSize -= 8; // Message Integrity Code
- else
- FrameSize -= 4; // 4 is ICV
- }
-
- //
- // RX OK
- //
- //remove the CRC length
- FrameSize -= ETH_FCS_LEN;
-
- if ((!(*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI))) && // unicast address
- (IS_FRAGMENT_PKT((skb->data+4)))
-) {
- // defragment
- bDeFragRx = WCTLbHandleFragment(pDevice, (PS802_11Header)(skb->data+4), FrameSize, bIsWEP, bExtIV);
- pDevice->s802_11Counter.ReceivedFragmentCount++;
- if (bDeFragRx) {
- // defrag complete
- skb = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb;
- FrameSize = pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength;
-
- } else {
- return false;
- }
- }
-
-// Management & Control frame Handle
- if ((IS_TYPE_DATA((skb->data+4))) == false) {
- // Handle Control & Manage Frame
-
- if (IS_TYPE_MGMT((skb->data+4))) {
- unsigned char *pbyData1;
- unsigned char *pbyData2;
-
- pRxPacket->p80211Header = (PUWLAN_80211HDR)(skb->data+4);
- pRxPacket->cbMPDULen = FrameSize;
- pRxPacket->uRSSI = *pbyRSSI;
- pRxPacket->bySQ = *pbySQ;
- pRxPacket->qwLocalTSF = le64_to_cpu(*pqwTSFTime);
- if (bIsWEP) {
- // strip IV
- pbyData1 = WLAN_HDR_A3_DATA_PTR(skb->data+4);
- pbyData2 = WLAN_HDR_A3_DATA_PTR(skb->data+4) + 4;
- for (ii = 0; ii < (FrameSize - 4); ii++) {
- *pbyData1 = *pbyData2;
- pbyData1++;
- pbyData2++;
- }
- }
- pRxPacket->byRxRate = s_byGetRateIdx(*pbyRxRate);
- pRxPacket->byRxChannel = (*pbyRxSts) >> 2;
-
- vMgrRxManagePacket((void *)pDevice, pDevice->pMgmt, pRxPacket);
-
- // hostap Deamon handle 802.11 management
- if (pDevice->bEnableHostapd) {
- skb->dev = pDevice->apdev;
- skb->data += 4;
- skb->tail += 4;
- skb_put(skb, FrameSize);
- skb_reset_mac_header(skb);
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = htons(ETH_P_802_2);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
- return true;
- }
- }
-
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rx_status rx_status = { 0 };
+ struct ieee80211_hdr *hdr;
+ __le16 fc;
+ u8 *rsr, *new_rsr, *rssi;
+ __le64 *tsf_time;
+ u16 frame_size;
+ int ii, r;
+ u8 *rx_sts, *rx_rate, *sq;
+ u8 *skb_data;
+ u8 rate_idx = 0;
+ u8 rate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
+ long rx_dbm;
+
+ /* [31:16]RcvByteCount ( not include 4-byte Status ) */
+ frame_size = le16_to_cpu(*((__le16 *)(skb->data + 2)));
+ if (frame_size > 2346 || frame_size < 14) {
+ dev_dbg(&priv->pcid->dev, "------- WRONG Length 1\n");
return false;
- } else {
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- //In AP mode, hw only check addr1(BSSID or RA) if equal to local MAC.
- if (!(*pbyRsr & RSR_BSSIDOK)) {
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- return false;
- }
- } else {
- // discard DATA packet while not associate || BSSID error
- if (!pDevice->bLinkPass || !(*pbyRsr & RSR_BSSIDOK)) {
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- return false;
- }
- //mike add:station mode check eapol-key challenge--->
- {
- unsigned char Protocol_Version; //802.1x Authentication
- unsigned char Packet_Type; //802.1x Authentication
-
- if (bIsWEP)
- cbIVOffset = 8;
- else
- cbIVOffset = 0;
- wEtherType = (skb->data[cbIVOffset + 8 + 24 + 6] << 8) |
- skb->data[cbIVOffset + 8 + 24 + 6 + 1];
- Protocol_Version = skb->data[cbIVOffset + 8 + 24 + 6 + 1 + 1];
- Packet_Type = skb->data[cbIVOffset + 8 + 24 + 6 + 1 + 1 + 1];
- if (wEtherType == ETH_P_PAE) { //Protocol Type in LLC-Header
- if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
- (Packet_Type == 3)) { //802.1x OR eapol-key challenge frame receive
- bRxeapol_key = true;
- }
- }
- }
- //mike add:station mode check eapol-key challenge<---
- }
- }
-
-// Data frame Handle
-
- if (pDevice->bEnablePSMode) {
- if (!IS_FC_MOREDATA((skb->data+4))) {
- if (pDevice->pMgmt->bInTIMWake == true)
- pDevice->pMgmt->bInTIMWake = false;
- }
- }
-
- // Now it only supports 802.11g Infrastructure Mode, and support rate must up to 54 Mbps
- if (pDevice->bDiversityEnable && (FrameSize > 50) &&
- (pDevice->op_mode == NL80211_IFTYPE_STATION) &&
- pDevice->bLinkPass) {
- BBvAntennaDiversity(pDevice, s_byGetRateIdx(*pbyRxRate), 0);
- }
-
- if (pDevice->byLocalID != REV_ID_VT3253_B1)
- pDevice->uCurrRSSI = *pbyRSSI;
-
- pDevice->byCurrSQ = *pbySQ;
-
- if ((*pbyRSSI != 0) &&
- (pMgmt->pCurrBSS != NULL)) {
- RFvRSSITodBm(pDevice, *pbyRSSI, &ldBm);
- // Monitor if RSSI is too strong.
- pMgmt->pCurrBSS->byRSSIStatCnt++;
- pMgmt->pCurrBSS->byRSSIStatCnt %= RSSI_STAT_COUNT;
- pMgmt->pCurrBSS->ldBmAverage[pMgmt->pCurrBSS->byRSSIStatCnt] = ldBm;
- for (ii = 0; ii < RSSI_STAT_COUNT; ii++)
- if (pMgmt->pCurrBSS->ldBmAverage[ii] != 0)
- pMgmt->pCurrBSS->ldBmMAX = max(pMgmt->pCurrBSS->ldBmAverage[ii], ldBm);
-
}
- // -----------------------------------------------
+ skb_data = (u8 *)skb->data;
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnable8021x) {
- unsigned char abyMacHdr[24];
+ rx_sts = skb_data;
+ rx_rate = skb_data + 1;
- // Only 802.1x packet incoming allowed
- if (bIsWEP)
- cbIVOffset = 8;
- else
- cbIVOffset = 0;
- wEtherType = (skb->data[cbIVOffset + 4 + 24 + 6] << 8) |
- skb->data[cbIVOffset + 4 + 24 + 6 + 1];
+ sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
- pr_debug("wEtherType = %04x\n", wEtherType);
- if (wEtherType == ETH_P_PAE) {
- skb->dev = pDevice->apdev;
-
- if (bIsWEP) {
- // strip IV header(8)
- memcpy(&abyMacHdr[0], (skb->data + 4), 24);
- memcpy((skb->data + 4 + cbIVOffset), &abyMacHdr[0], 24);
- }
- skb->data += (cbIVOffset + 4);
- skb->tail += (cbIVOffset + 4);
- skb_put(skb, FrameSize);
- skb_reset_mac_header(skb);
-
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = htons(ETH_P_802_2);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
- return true;
-
- }
- // check if 802.1x authorized
- if (!(pMgmt->sNodeDBTable[iSANodeIndex].dwFlags & WLAN_STA_AUTHORIZED))
- return false;
+ for (r = RATE_1M; r < MAX_RATE; r++) {
+ if (*rx_rate == rate[r])
+ break;
}
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
- if (bIsWEP)
- FrameSize -= 8; //MIC
- }
-
- //--------------------------------------------------------------------------------
- // Soft MIC
- if ((pKey != NULL) && (pKey->byCipherSuite == KEY_CTL_TKIP)) {
- if (bIsWEP) {
- __le32 *pdwMIC_L;
- __le32 *pdwMIC_R;
- __le32 dwMIC_Priority;
- __le32 dwMICKey0 = 0, dwMICKey1 = 0;
- u32 dwLocalMIC_L = 0;
- u32 dwLocalMIC_R = 0;
- viawget_wpa_header *wpahdr;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28]));
- } else {
- if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20]));
- } else if ((pKey->dwKeyIndex & BIT28) == 0) {
- dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[16]));
- dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[20]));
- } else {
- dwMICKey0 = cpu_to_le32(*(u32 *)(&pKey->abyKey[24]));
- dwMICKey1 = cpu_to_le32(*(u32 *)(&pKey->abyKey[28]));
- }
- }
-
- MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((unsigned char *)&(pDevice->sRxEthHeader.abyDstAddr[0]), 12);
- dwMIC_Priority = 0;
- MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- // 4 is Rcv buffer header, 24 is MAC Header, and 8 is IV and Ext IV.
- MIC_vAppend((unsigned char *)(skb->data + 4 + WLAN_HDR_ADDR3_LEN + 8),
- FrameSize - WLAN_HDR_ADDR3_LEN - 8);
- MIC_vGetMIC(&dwLocalMIC_L, &dwLocalMIC_R);
- MIC_vUnInit();
-
- pdwMIC_L = (__le32 *)(skb->data + 4 + FrameSize);
- pdwMIC_R = (__le32 *)(skb->data + 4 + FrameSize + 4);
-
- if ((le32_to_cpu(*pdwMIC_L) != dwLocalMIC_L) ||
- (le32_to_cpu(*pdwMIC_R) != dwLocalMIC_R) ||
- pDevice->bRxMICFail) {
- pr_debug("MIC comparison is fail!\n");
- pDevice->bRxMICFail = false;
- pDevice->s802_11Counter.TKIPLocalMICFailures++;
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- //2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- //send event to wpa_supplicant
- {
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
- int keyidx = pbyFrame[cbHeaderSize+3] >> 6; //top two-bits
-
- memset(&ev, 0, sizeof(ev));
- ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC) &&
- (*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) == 0) {
- ev.flags |= IW_MICFAILURE_PAIRWISE;
- } else {
- ev.flags |= IW_MICFAILURE_GROUP;
- }
-
- ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, pMACHeader->abyAddr2, ETH_ALEN);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
- wireless_send_event(pDevice->dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
-
- }
-#endif
+ priv->rx_rate = r;
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC) &&
- (*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) == 0) {
- wpahdr->type = VIAWGET_PTK_MIC_MSG;
- } else {
- wpahdr->type = VIAWGET_GTK_MIC_MSG;
- }
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-
- return false;
-
- }
- }
- } //---end of SOFT MIC-----------------------------------------------------------------------
-
- // ++++++++++ Reply Counter Check +++++++++++++
-
- if ((pKey != NULL) && ((pKey->byCipherSuite == KEY_CTL_TKIP) ||
- (pKey->byCipherSuite == KEY_CTL_CCMP))) {
- if (bIsWEP) {
- unsigned short wLocalTSC15_0 = 0;
- unsigned long dwLocalTSC47_16 = 0;
- unsigned long long RSC = 0;
- // endian issues
- RSC = *((unsigned long long *)&(pKey->KeyRSC));
- wLocalTSC15_0 = (unsigned short)RSC;
- dwLocalTSC47_16 = (unsigned long)(RSC>>16);
-
- RSC = dwRxTSC47_16;
- RSC <<= 16;
- RSC += wRxTSC15_0;
- pKey->KeyRSC = RSC;
-
- if ((pDevice->sMgmtObj.eCurrMode == WMAC_MODE_ESS_STA) &&
- (pDevice->sMgmtObj.eCurrState == WMAC_STATE_ASSOC)) {
- // check RSC
- if ((wRxTSC15_0 < wLocalTSC15_0) &&
- (dwRxTSC47_16 <= dwLocalTSC47_16) &&
- !((dwRxTSC47_16 == 0) && (dwLocalTSC47_16 == 0xFFFFFFFF))) {
- pr_debug("TSC is illegal~~!\n ");
- if (pKey->byCipherSuite == KEY_CTL_TKIP)
- pDevice->s802_11Counter.TKIPReplays++;
- else
- pDevice->s802_11Counter.CCMPReplays++;
-
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- return false;
- }
- }
- }
- } // ----- End of Reply Counter Check --------------------------
-
- s_vProcessRxMACHeader(pDevice, (unsigned char *)(skb->data+4), FrameSize, bIsWEP, bExtIV, &cbHeaderOffset);
- FrameSize -= cbHeaderOffset;
- cbHeaderOffset += 4; // 4 is Rcv buffer header
-
- // Null data, framesize = 14
- if (FrameSize < 15)
- return false;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (!s_bAPModeRxData(pDevice,
- skb,
- FrameSize,
- cbHeaderOffset,
- iSANodeIndex,
- iDANodeIndex
-)) {
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
- }
- return false;
+ for (ii = 0; ii < sband->n_bitrates; ii++) {
+ if (sband->bitrates[ii].hw_value == r) {
+ rate_idx = ii;
+ break;
}
}
- skb->data += cbHeaderOffset;
- skb->tail += cbHeaderOffset;
- skb_put(skb, FrameSize);
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- //drop frame not met IEEE 802.3
-
- skb->ip_summed = CHECKSUM_NONE;
- pStats->rx_bytes += skb->len;
- pStats->rx_packets++;
- netif_rx(skb);
-
- if (bDeFragRx) {
- if (!device_alloc_frag_buf(pDevice, &pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx])) {
- pr_err("%s: can not alloc more frag bufs\n",
- pDevice->dev->name);
- }
+ if (ii == sband->n_bitrates) {
+ dev_dbg(&priv->pcid->dev, "Wrong RxRate %x\n", *rx_rate);
return false;
}
- return true;
-}
-
-static bool s_bAPModeRxCtl(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- int iSANodeIndex
-)
-{
- PS802_11Header p802_11Header;
- CMD_STATUS Status;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- if (IS_CTL_PSPOLL(pbyFrame) || !IS_TYPE_CONTROL(pbyFrame)) {
- p802_11Header = (PS802_11Header)(pbyFrame);
- if (!IS_TYPE_MGMT(pbyFrame)) {
- // Data & PS-Poll packet
- // check frame class
- if (iSANodeIndex > 0) {
- // frame class 3 fliter & checking
- if (pMgmt->sNodeDBTable[iSANodeIndex].eNodeState < NODE_AUTH) {
- // send deauth notification
- // reason = (6) class 2 received from nonauth sta
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- (unsigned char *)(p802_11Header->abyAddr2),
- (WLAN_MGMT_REASON_CLASS2_NONAUTH),
- &Status
-);
- pr_debug("dpc: send vMgrDeAuthenBeginSta 1\n");
- return true;
- }
- if (pMgmt->sNodeDBTable[iSANodeIndex].eNodeState < NODE_ASSOC) {
- // send deassoc notification
- // reason = (7) class 3 received from nonassoc sta
- vMgrDisassocBeginSta(pDevice,
- pMgmt,
- (unsigned char *)(p802_11Header->abyAddr2),
- (WLAN_MGMT_REASON_CLASS3_NONASSOC),
- &Status
-);
- pr_debug("dpc: send vMgrDisassocBeginSta 2\n");
- return true;
- }
-
- if (pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable) {
- // delcare received ps-poll event
- if (IS_CTL_PSPOLL(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
- bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- pr_debug("dpc: WLAN_CMD_RX_PSPOLL 1\n");
- } else {
- // check Data PS state
- // if PW bit off, send out all PS bufferring packets.
- if (!IS_FC_POWERMGT(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false;
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
- bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- pr_debug("dpc: WLAN_CMD_RX_PSPOLL 2\n");
- }
- }
- } else {
- if (IS_FC_POWERMGT(pbyFrame)) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = true;
- // Once if STA in PS state, enable multicast bufferring
- pMgmt->sNodeDBTable[0].bPSEnable = true;
- } else {
- // clear all pending PS frame.
- if (pMgmt->sNodeDBTable[iSANodeIndex].wEnQueueCnt > 0) {
- pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = false;
- pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = true;
- bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- pr_debug("dpc: WLAN_CMD_RX_PSPOLL 3\n");
-
- }
- }
- }
- } else {
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- (unsigned char *)(p802_11Header->abyAddr2),
- (WLAN_MGMT_REASON_CLASS2_NONAUTH),
- &Status
-);
- pr_debug("dpc: send vMgrDeAuthenBeginSta 3\n");
- pr_debug("BSSID:%pM\n",
- p802_11Header->abyAddr3);
- pr_debug("ADDR2:%pM\n",
- p802_11Header->abyAddr2);
- pr_debug("ADDR1:%pM\n",
- p802_11Header->abyAddr1);
- pr_debug("dpc: wFrameCtl= %x\n",
- p802_11Header->wFrameCtl);
- VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
- pr_debug("dpc:pDevice->byRxMode = %x\n",
- pDevice->byRxMode);
- return true;
- }
- }
- }
- return false;
-}
-
-static bool s_bHandleRxEncryption(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- unsigned int FrameSize,
- unsigned char *pbyRsr,
- unsigned char *pbyNewRsr,
- PSKeyItem *pKeyOut,
- bool *pbExtIV,
- unsigned short *pwRxTSC15_0,
- unsigned long *pdwRxTSC47_16
-)
-{
- unsigned int PayloadLen = FrameSize;
- unsigned char *pbyIV;
- unsigned char byKeyIdx;
- PSKeyItem pKey = NULL;
- unsigned char byDecMode = KEY_CTL_WEP;
- PSMgmtObject pMgmt = pDevice->pMgmt;
+ tsf_time = (__le64 *)(skb_data + bytes_received - 12);
+ sq = skb_data + bytes_received - 4;
+ new_rsr = skb_data + bytes_received - 3;
+ rssi = skb_data + bytes_received - 2;
+ rsr = skb_data + bytes_received - 1;
- *pwRxTSC15_0 = 0;
- *pdwRxTSC47_16 = 0;
+ RFvRSSITodBm(priv, *rssi, &rx_dbm);
- pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if (WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame)) {
- pbyIV += 6; // 6 is 802.11 address4
- PayloadLen -= 6;
- }
- byKeyIdx = (*(pbyIV+3) & 0xc0);
- byKeyIdx >>= 6;
- pr_debug("\nKeyIdx: %d\n", byKeyIdx);
+ priv->byBBPreEDRSSI = (u8)rx_dbm + 1;
+ priv->uCurrRSSI = *rssi;
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if (((*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI)) == 0) &&
- (pDevice->pMgmt->byCSSPK != KEY_CTL_NONE)) {
- // unicast pkt use pairwise key
- pr_debug("unicast pkt\n");
- if (KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, 0xFFFFFFFF, &pKey) == true) {
- if (pDevice->pMgmt->byCSSPK == KEY_CTL_TKIP)
- byDecMode = KEY_CTL_TKIP;
- else if (pDevice->pMgmt->byCSSPK == KEY_CTL_CCMP)
- byDecMode = KEY_CTL_CCMP;
- }
- pr_debug("unicast pkt: %d, %p\n", byDecMode, pKey);
- } else {
- // use group key
- KeybGetKey(&(pDevice->sKey), pDevice->abyBSSID, byKeyIdx, &pKey);
- if (pDevice->pMgmt->byCSSGK == KEY_CTL_TKIP)
- byDecMode = KEY_CTL_TKIP;
- else if (pDevice->pMgmt->byCSSGK == KEY_CTL_CCMP)
- byDecMode = KEY_CTL_CCMP;
- pr_debug("group pkt: %d, %d, %p\n",
- byKeyIdx, byDecMode, pKey);
- }
- }
- // our WEP only support Default Key
- if (pKey == NULL) {
- // use default group key
- KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, byKeyIdx, &pKey);
- if (pDevice->pMgmt->byCSSGK == KEY_CTL_TKIP)
- byDecMode = KEY_CTL_TKIP;
- else if (pDevice->pMgmt->byCSSGK == KEY_CTL_CCMP)
- byDecMode = KEY_CTL_CCMP;
- }
- *pKeyOut = pKey;
+ skb_pull(skb, 4);
+ skb_trim(skb, frame_size);
- pr_debug("AES:%d %d %d\n",
- pDevice->pMgmt->byCSSPK, pDevice->pMgmt->byCSSGK, byDecMode);
+ rx_status.mactime = le64_to_cpu(*tsf_time);
+ rx_status.band = hw->conf.chandef.chan->band;
+ rx_status.signal = rx_dbm;
+ rx_status.flag = 0;
+ rx_status.freq = hw->conf.chandef.chan->center_freq;
- if (pKey == NULL) {
- pr_debug("pKey == NULL\n");
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
- return false;
- }
- if (byDecMode != pKey->byCipherSuite) {
+ rx_status.rate_idx = rate_idx;
- *pKeyOut = NULL;
- return false;
+ if (ieee80211_has_protected(fc)) {
+ if (priv->byLocalID > REV_ID_VT3253_A1)
+ rx_status.flag = RX_FLAG_DECRYPTED;
}
- if (byDecMode == KEY_CTL_WEP) {
- // handle WEP
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
- (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true)) {
- // Software WEP
- // 1. 3253A
- // 2. WEP 256
-
- PayloadLen -= (WLAN_HDR_ADDR3_LEN + 4 + 4); // 24 is 802.11 header,4 is IV, 4 is crc
- memcpy(pDevice->abyPRNG, pbyIV, 3);
- memcpy(pDevice->abyPRNG + 3, pKey->abyKey, pKey->uKeyLength);
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, pKey->uKeyLength + 3);
- rc4_encrypt(&pDevice->SBox, pbyIV+4, pbyIV+4, PayloadLen);
-
- if (ETHbIsBufferCrc32Ok(pbyIV+4, PayloadLen))
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
-
- }
- } else if ((byDecMode == KEY_CTL_TKIP) ||
- (byDecMode == KEY_CTL_CCMP)) {
- // TKIP/AES
-
- PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
- *pdwRxTSC47_16 = cpu_to_le32(*(unsigned long *)(pbyIV + 4));
- pr_debug("ExtIV: %lx\n", *pdwRxTSC47_16);
- if (byDecMode == KEY_CTL_TKIP)
- *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV + 2), *pbyIV));
- else
- *pwRxTSC15_0 = cpu_to_le16(*(unsigned short *)pbyIV);
-
- pr_debug("TSC0_15: %x\n", *pwRxTSC15_0);
-
- if ((byDecMode == KEY_CTL_TKIP) &&
- (pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- // Software TKIP
- // 1. 3253 A
- PS802_11Header pMACHeader = (PS802_11Header)(pbyFrame);
-
- TKIPvMixKey(pKey->abyKey, pMACHeader->abyAddr2, *pwRxTSC15_0, *pdwRxTSC47_16, pDevice->abyPRNG);
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN);
- rc4_encrypt(&pDevice->SBox, pbyIV+8, pbyIV+8, PayloadLen);
- if (ETHbIsBufferCrc32Ok(pbyIV+8, PayloadLen)) {
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
- pr_debug("ICV OK!\n");
- } else {
- pr_debug("ICV FAIL!!!\n");
- pr_debug("PayloadLen = %d\n", PayloadLen);
- }
- }
- }// end of TKIP/AES
-
- if ((*(pbyIV+3) & 0x20) != 0)
- *pbExtIV = true;
- return true;
-}
-
-static bool s_bHostWepRxEncryption(
- struct vnt_private *pDevice,
- unsigned char *pbyFrame,
- unsigned int FrameSize,
- unsigned char *pbyRsr,
- bool bOnFly,
- PSKeyItem pKey,
- unsigned char *pbyNewRsr,
- bool *pbExtIV,
- unsigned short *pwRxTSC15_0,
- unsigned long *pdwRxTSC47_16
-)
-{
- unsigned int PayloadLen = FrameSize;
- unsigned char *pbyIV;
- unsigned char byKeyIdx;
- unsigned char byDecMode = KEY_CTL_WEP;
- PS802_11Header pMACHeader;
- *pwRxTSC15_0 = 0;
- *pdwRxTSC47_16 = 0;
-
- pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if (WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame)) {
- pbyIV += 6; // 6 is 802.11 address4
- PayloadLen -= 6;
+ if (priv->vif && priv->bDiversityEnable) {
+ if (ieee80211_is_data(fc) &&
+ (frame_size > 50) && priv->vif->bss_conf.assoc)
+ BBvAntennaDiversity(priv, priv->rx_rate, 0);
}
- byKeyIdx = (*(pbyIV+3) & 0xc0);
- byKeyIdx >>= 6;
- pr_debug("\nKeyIdx: %d\n", byKeyIdx);
-
- if (pDevice->pMgmt->byCSSGK == KEY_CTL_TKIP)
- byDecMode = KEY_CTL_TKIP;
- else if (pDevice->pMgmt->byCSSGK == KEY_CTL_CCMP)
- byDecMode = KEY_CTL_CCMP;
-
- pr_debug("AES:%d %d %d\n",
- pDevice->pMgmt->byCSSPK, pDevice->pMgmt->byCSSGK, byDecMode);
-
- if (byDecMode != pKey->byCipherSuite)
- return false;
-
- if (byDecMode == KEY_CTL_WEP) {
- // handle WEP
- pr_debug("byDecMode == KEY_CTL_WEP\n");
-
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) ||
- (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == true) ||
- !bOnFly) {
- // Software WEP
- // 1. 3253A
- // 2. WEP 256
- // 3. NotOnFly
- PayloadLen -= (WLAN_HDR_ADDR3_LEN + 4 + 4); // 24 is 802.11 header,4 is IV, 4 is crc
- memcpy(pDevice->abyPRNG, pbyIV, 3);
- memcpy(pDevice->abyPRNG + 3, pKey->abyKey, pKey->uKeyLength);
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, pKey->uKeyLength + 3);
- rc4_encrypt(&pDevice->SBox, pbyIV+4, pbyIV+4, PayloadLen);
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
- if (ETHbIsBufferCrc32Ok(pbyIV+4, PayloadLen))
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
+ ieee80211_rx_irqsafe(priv->hw, skb);
- }
- } else if ((byDecMode == KEY_CTL_TKIP) ||
- (byDecMode == KEY_CTL_CCMP)) {
- // TKIP/AES
-
- PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
- *pdwRxTSC47_16 = cpu_to_le32(*(unsigned long *)(pbyIV + 4));
- pr_debug("ExtIV: %lx\n", *pdwRxTSC47_16);
-
- if (byDecMode == KEY_CTL_TKIP)
- *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
- else
- *pwRxTSC15_0 = cpu_to_le16(*(unsigned short *)pbyIV);
-
- pr_debug("TSC0_15: %x\n", *pwRxTSC15_0);
-
- if (byDecMode == KEY_CTL_TKIP) {
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || !bOnFly) {
- // Software TKIP
- // 1. 3253 A
- // 2. NotOnFly
- pr_debug("soft KEY_CTL_TKIP\n");
- pMACHeader = (PS802_11Header)(pbyFrame);
- TKIPvMixKey(pKey->abyKey, pMACHeader->abyAddr2, *pwRxTSC15_0, *pdwRxTSC47_16, pDevice->abyPRNG);
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN);
- rc4_encrypt(&pDevice->SBox, pbyIV+8, pbyIV+8, PayloadLen);
- if (ETHbIsBufferCrc32Ok(pbyIV+8, PayloadLen)) {
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
- pr_debug("ICV OK!\n");
- } else {
- pr_debug("ICV FAIL!!!\n");
- pr_debug("PayloadLen = %d\n",
- PayloadLen);
- }
- }
- }
-
- if (byDecMode == KEY_CTL_CCMP) {
- if (!bOnFly) {
- // Software CCMP
- // NotOnFly
- pr_debug("soft KEY_CTL_CCMP\n");
- if (AESbGenCCMP(pKey->abyKey, pbyFrame, FrameSize)) {
- *pbyNewRsr |= NEWRSR_DECRYPTOK;
- pr_debug("CCMP MIC compare OK!\n");
- } else {
- pr_debug("CCMP MIC fail!\n");
- }
- }
- }
-
- }// end of TKIP/AES
-
- if ((*(pbyIV+3) & 0x20) != 0)
- *pbExtIV = true;
return true;
}
-static bool s_bAPModeRxData(
- struct vnt_private *pDevice,
- struct sk_buff *skb,
- unsigned int FrameSize,
- unsigned int cbHeaderOffset,
- int iSANodeIndex,
- int iDANodeIndex
-)
+bool vnt_receive_frame(struct vnt_private *priv, PSRxDesc curr_rd)
{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- bool bRelayAndForward = false;
- bool bRelayOnly = false;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- unsigned short wAID;
-
- struct sk_buff *skbcpy = NULL;
+ PDEVICE_RD_INFO rd_info = curr_rd->pRDInfo;
+ struct sk_buff *skb;
+ u16 frame_size;
- if (FrameSize > CB_MAX_BUF_SIZE)
- return false;
- // check DA
- if (is_multicast_ether_addr((unsigned char *)(skb->data+cbHeaderOffset))) {
- if (pMgmt->sNodeDBTable[0].bPSEnable) {
- skbcpy = dev_alloc_skb((int)pDevice->rx_buf_sz);
+ skb = rd_info->skb;
- // if any node in PS mode, buffer packet until DTIM.
- if (skbcpy == NULL) {
- pr_info("relay multicast no skb available\n");
- } else {
- skbcpy->dev = pDevice->dev;
- skbcpy->len = FrameSize;
- memcpy(skbcpy->data, skb->data+cbHeaderOffset, FrameSize);
- skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skbcpy);
+ pci_unmap_single(priv->pcid, rd_info->skb_dma,
+ priv->rx_buf_sz, PCI_DMA_FROMDEVICE);
- pMgmt->sNodeDBTable[0].wEnQueueCnt++;
- // set tx map
- pMgmt->abyPSTxMap[0] |= byMask[0];
- }
- } else {
- bRelayAndForward = true;
- }
- } else {
- // check if relay
- if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data+cbHeaderOffset), &iDANodeIndex)) {
- if (pMgmt->sNodeDBTable[iDANodeIndex].eNodeState >= NODE_ASSOC) {
- if (pMgmt->sNodeDBTable[iDANodeIndex].bPSEnable) {
- // queue this skb until next PS tx, and then release.
+ frame_size = le16_to_cpu(curr_rd->m_rd1RD1.wReqCount)
+ - cpu_to_le16(curr_rd->m_rd0RD0.wResCount);
- skb->data += cbHeaderOffset;
- skb->tail += cbHeaderOffset;
- skb_put(skb, FrameSize);
- skb_queue_tail(&pMgmt->sNodeDBTable[iDANodeIndex].sTxPSQueue, skb);
- pMgmt->sNodeDBTable[iDANodeIndex].wEnQueueCnt++;
- wAID = pMgmt->sNodeDBTable[iDANodeIndex].wAID;
- pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
- pr_debug("relay: index= %d, pMgmt->abyPSTxMap[%d]= %d\n",
- iDANodeIndex, (wAID >> 3),
- pMgmt->abyPSTxMap[wAID >> 3]);
- return true;
- } else {
- bRelayOnly = true;
- }
- }
- }
+ if ((frame_size > 2364) || (frame_size < 33)) {
+ /* Frame Size error drop this packet.*/
+ dev_dbg(&priv->pcid->dev, "Wrong frame size %d\n", frame_size);
+ dev_kfree_skb_irq(skb);
+ return true;
}
- if (bRelayOnly || bRelayAndForward) {
- // relay this packet right now
- if (bRelayAndForward)
- iDANodeIndex = 0;
+ if (vnt_rx_data(priv, skb, frame_size))
+ return true;
- if ((pDevice->uAssocCount > 1) && (iDANodeIndex >= 0))
- ROUTEbRelay(pDevice, (unsigned char *)(skb->data + cbHeaderOffset), FrameSize, (unsigned int)iDANodeIndex);
-
- if (bRelayOnly)
- return false;
- }
- // none associate, don't forward
- if (pDevice->uAssocCount == 0)
- return false;
+ dev_kfree_skb_irq(skb);
return true;
}
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index a068b846b1be..ad495719a251 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -29,14 +29,8 @@
#ifndef __DPC_H__
#define __DPC_H__
-#include "ttype.h"
#include "device.h"
-#include "wcmd.h"
-bool
-device_receive_frame(
- struct vnt_private *,
- PSRxDesc pCurrRD
-);
+bool vnt_receive_frame(struct vnt_private *priv, PSRxDesc curr_rd);
-#endif // __RXTX_H__
+#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
deleted file mode 100644
index ae0dade229d8..000000000000
--- a/drivers/staging/vt6655/hostap.c
+++ /dev/null
@@ -1,765 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: hostap.c
- *
- * Purpose: handle hostap deamon ioctl input/out functions
- *
- * Author: Lyndon Chen
- *
- * Date: Oct. 20, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "hostap.h"
-#include "iocmd.h"
-#include "mac.h"
-#include "card.h"
-#include "baseband.h"
-#include "wpactl.h"
-#include "key.h"
-
-#define VIAWGET_HOSTAPD_MAX_BUF_SIZE 1024
-#define HOSTAP_CRYPT_FLAG_SET_TX_KEY BIT0
-#define HOSTAP_CRYPT_ERR_UNKNOWN_ADDR 3
-#define HOSTAP_CRYPT_ERR_KEY_SET_FAILED 5
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*
- * Description:
- * register net_device (AP) for hostap deamon
- *
- * Parameters:
- * In:
- * pDevice -
- * rtnl_locked -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
-{
- struct vnt_private *apdev_priv;
- struct net_device *dev = pDevice->dev;
- int ret;
- const struct net_device_ops apdev_netdev_ops = {
- .ndo_start_xmit = pDevice->tx_80211,
- };
-
- pr_debug("%s: Enabling hostapd mode\n", dev->name);
-
- pDevice->apdev = alloc_etherdev(sizeof(*apdev_priv));
- if (pDevice->apdev == NULL)
- return -ENOMEM;
-
- apdev_priv = netdev_priv(pDevice->apdev);
- *apdev_priv = *pDevice;
- eth_hw_addr_inherit(pDevice->apdev, dev);
-
- pDevice->apdev->netdev_ops = &apdev_netdev_ops;
-
- pDevice->apdev->type = ARPHRD_IEEE80211;
-
- pDevice->apdev->base_addr = dev->base_addr;
- pDevice->apdev->irq = dev->irq;
- pDevice->apdev->mem_start = dev->mem_start;
- pDevice->apdev->mem_end = dev->mem_end;
- sprintf(pDevice->apdev->name, "%sap", dev->name);
- if (rtnl_locked)
- ret = register_netdevice(pDevice->apdev);
- else
- ret = register_netdev(pDevice->apdev);
- if (ret) {
- pr_debug("%s: register_netdevice(AP) failed!\n",
- dev->name);
- free_netdev(pDevice->apdev);
- pDevice->apdev = NULL;
- return -1;
- }
-
- pr_debug("%s: Registered netdevice %s for AP management\n",
- dev->name, pDevice->apdev->name);
-
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
-
- return 0;
-}
-
-/*
- * Description:
- * unregister net_device(AP)
- *
- * Parameters:
- * In:
- * pDevice -
- * rtnl_locked -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
-{
- pr_debug("%s: disabling hostapd mode\n", pDevice->dev->name);
-
- if (pDevice->apdev && pDevice->apdev->name && pDevice->apdev->name[0]) {
- if (rtnl_locked)
- unregister_netdevice(pDevice->apdev);
- else
- unregister_netdev(pDevice->apdev);
- pr_debug("%s: Netdevice %s unregistered\n",
- pDevice->dev->name, pDevice->apdev->name);
- }
- if (pDevice->apdev)
- free_netdev(pDevice->apdev);
- pDevice->apdev = NULL;
- pDevice->bEnable8021x = false;
- pDevice->bEnableHostWEP = false;
- pDevice->bEncryptionEnable = false;
-
-/* 4.2007-0118-03,<Add> by EinsnLiu */
-/* execute some clear work */
- pDevice->pMgmt->byCSSPK = KEY_CTL_NONE;
- pDevice->pMgmt->byCSSGK = KEY_CTL_NONE;
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
-
- return 0;
-}
-
-/*
- * Description:
- * Set enable/disable hostapd mode
- *
- * Parameters:
- * In:
- * pDevice -
- * rtnl_locked -
- * Out:
- *
- * Return Value:
- *
- */
-
-int vt6655_hostap_set_hostapd(struct vnt_private *pDevice,
- int val, int rtnl_locked)
-{
- if (val < 0 || val > 1)
- return -EINVAL;
-
- if (pDevice->bEnableHostapd == val)
- return 0;
-
- pDevice->bEnableHostapd = val;
-
- if (val)
- return hostap_enable_hostapd(pDevice, rtnl_locked);
- else
- return hostap_disable_hostapd(pDevice, rtnl_locked);
-}
-
-/*
- * Description:
- * remove station function supported for hostap deamon
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_remove_sta(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- unsigned int uNodeIndex;
-
- if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, param->sta_addr, &uNodeIndex))
- BSSvRemoveOneNode(pDevice, uNodeIndex);
- else
- return -ENOENT;
-
- return 0;
-}
-
-/*
- * Description:
- * add a station from hostap deamon
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_add_sta(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uNodeIndex;
-
- if (!BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex))
- BSSvCreateOneNode(pDevice, &uNodeIndex);
-
- memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, param->sta_addr, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC;
- pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = param->u.add_sta.capability;
-/* TODO listenInterval */
- pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = false;
- pMgmt->sNodeDBTable[uNodeIndex].bySuppRate = param->u.add_sta.tx_supp_rates;
-
- /* set max tx rate */
- pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate =
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate;
- /* set max basic rate */
- pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate = RATE_2M;
- /* Todo: check sta preamble, if ap can't support, set status code */
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble =
- WLAN_GET_CAP_INFO_SHORTPREAMBLE(pMgmt->sNodeDBTable[uNodeIndex].wCapInfo);
-
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)param->u.add_sta.aid;
-
- pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer = jiffies;
-
- pr_debug("Add STA AID= %d\n", pMgmt->sNodeDBTable[uNodeIndex].wAID);
- pr_debug("MAC=%pM\n", param->sta_addr);
- pr_debug("Max Support rate = %d\n",
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate);
-
- return 0;
-}
-
-/*
- * Description:
- * get station info
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int hostap_get_info_sta(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uNodeIndex;
-
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) {
- param->u.get_info_sta.inactive_sec =
- (jiffies - pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer) / HZ;
- } else {
- return -ENOENT;
- }
-
- return 0;
-}
-
-/*
- * Description:
- * set station flag
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_set_flags_sta(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uNodeIndex;
-
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) {
- pMgmt->sNodeDBTable[uNodeIndex].dwFlags |= param->u.set_flags_sta.flags_or;
- pMgmt->sNodeDBTable[uNodeIndex].dwFlags &= param->u.set_flags_sta.flags_and;
- pr_debug(" dwFlags = %x\n",
- (unsigned int)pMgmt->sNodeDBTable[uNodeIndex].dwFlags);
- } else {
- return -ENOENT;
- }
-
- return 0;
-}
-
-/*
- * Description:
- * set generic element (wpa ie)
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_set_generic_element(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- if (param->u.generic_elem.len > sizeof(pMgmt->abyWPAIE))
- return -EINVAL;
-
- memcpy(pMgmt->abyWPAIE,
- param->u.generic_elem.data,
- param->u.generic_elem.len
- );
-
- pMgmt->wWPAIELen = param->u.generic_elem.len;
-
- pr_debug("pMgmt->wWPAIELen = %d\n", pMgmt->wWPAIELen);
-
- /* disable wpa */
- if (pMgmt->wWPAIELen == 0) {
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- pr_debug(" No WPAIE, Disable WPA\n");
- } else {
- /* enable wpa */
- if ((pMgmt->abyWPAIE[0] == WLAN_EID_RSN_WPA) ||
- (pMgmt->abyWPAIE[0] == WLAN_EID_RSN)) {
- pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
- pr_debug("Set WPAIE enable WPA\n");
- } else
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Description:
- * flush station nodes table.
- *
- * Parameters:
- * In:
- * pDevice -
- * Out:
- *
- * Return Value:
- *
- */
-
-static void hostap_flush_sta(struct vnt_private *pDevice)
-{
- /* reserved node index =0 for multicast node. */
- BSSvClearNodeDBTable(pDevice, 1);
- pDevice->uAssocCount = 0;
-}
-
-/*
- * Description:
- * set each stations encryption key
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_set_encryption(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param,
- int param_len)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned long dwKeyIndex = 0;
- unsigned char abyKey[MAX_KEY_LEN];
- unsigned char abySeq[MAX_KEY_LEN];
- u64 KeyRSC;
- unsigned char byKeyDecMode = KEY_CTL_WEP;
- int iNodeIndex = -1;
- int ii;
- bool bKeyTableFull = false;
- unsigned short wKeyCtl = 0;
-
- param->u.crypt.err = 0;
-
- if (param->u.crypt.alg > WPA_ALG_CCMP)
- return -EINVAL;
-
- if ((param->u.crypt.idx > 3) || (param->u.crypt.key_len > MAX_KEY_LEN)) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED;
- pr_debug(" HOSTAP_CRYPT_ERR_KEY_SET_FAILED\n");
- return -EINVAL;
- }
-
- if (is_broadcast_ether_addr(param->sta_addr)) {
- if (param->u.crypt.idx >= MAX_GROUP_KEY)
- return -EINVAL;
- iNodeIndex = 0;
-
- } else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == false) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
- pr_debug(" HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n");
- return -EINVAL;
- }
- }
- pr_debug(" hostap_set_encryption: sta_index %d\n", iNodeIndex);
- pr_debug(" hostap_set_encryption: alg %d\n", param->u.crypt.alg);
-
- if (param->u.crypt.alg == WPA_ALG_NONE) {
- if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly) {
- if (!KeybRemoveKey(&(pDevice->sKey),
- param->sta_addr,
- pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex,
- pDevice->PortOffset)) {
- pr_debug("KeybRemoveKey fail\n");
- }
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
- }
- pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = 0;
- pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = 0;
- pMgmt->sNodeDBTable[iNodeIndex].uWepKeyLength = 0;
- pMgmt->sNodeDBTable[iNodeIndex].KeyRSC = 0;
- pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0;
- pMgmt->sNodeDBTable[iNodeIndex].wTSC15_0 = 0;
- pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = 0;
- memset(&pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0],
- 0,
- MAX_KEY_LEN
-);
-
- return 0;
- }
-
- memcpy(abyKey, param->u.crypt.key, param->u.crypt.key_len);
- /* copy to node key tbl */
- pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = param->u.crypt.idx;
- pMgmt->sNodeDBTable[iNodeIndex].uWepKeyLength = param->u.crypt.key_len;
- memcpy(&pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0],
- param->u.crypt.key,
- param->u.crypt.key_len
-);
-
- dwKeyIndex = (unsigned long)(param->u.crypt.idx);
- if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
- pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
- pDevice->bTransmitKey = true;
- dwKeyIndex |= (1 << 31);
- }
-
- if (param->u.crypt.alg == WPA_ALG_WEP) {
- if ((pDevice->bEnable8021x == false) || (iNodeIndex == 0)) {
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex & ~(BIT30 | USE_KEYRSC),
- param->u.crypt.key_len,
- NULL,
- abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID);
-
- } else {
- /* 8021x enable, individual key */
- dwKeyIndex |= (1 << 30); /* set pairwise key */
- if (KeybSetKey(&(pDevice->sKey),
- &param->sta_addr[0],
- dwKeyIndex & ~(USE_KEYRSC),
- param->u.crypt.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID)) {
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
-
- } else {
- /* Key Table Full */
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
- bKeyTableFull = true;
- }
- }
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = true;
- pMgmt->byCSSPK = KEY_CTL_WEP;
- pMgmt->byCSSGK = KEY_CTL_WEP;
- pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = KEY_CTL_WEP;
- pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex;
- return 0;
- }
-
- if (param->u.crypt.seq) {
- memcpy(&abySeq, param->u.crypt.seq, 8);
- for (ii = 0; ii < 8; ii++)
- KeyRSC |= (u64)abySeq[ii] << (ii * 8);
-
- dwKeyIndex |= 1 << 29;
- pMgmt->sNodeDBTable[iNodeIndex].KeyRSC = KeyRSC;
- }
-
- if (param->u.crypt.alg == WPA_ALG_TKIP) {
- if (param->u.crypt.key_len != MAX_KEY_LEN)
- return -EINVAL;
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- byKeyDecMode = KEY_CTL_TKIP;
- pMgmt->byCSSPK = KEY_CTL_TKIP;
- pMgmt->byCSSGK = KEY_CTL_TKIP;
- }
-
- if (param->u.crypt.alg == WPA_ALG_CCMP) {
- if ((param->u.crypt.key_len != AES_KEY_LEN) ||
- (pDevice->byLocalID <= REV_ID_VT3253_A1))
- return -EINVAL;
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- byKeyDecMode = KEY_CTL_CCMP;
- pMgmt->byCSSPK = KEY_CTL_CCMP;
- pMgmt->byCSSGK = KEY_CTL_CCMP;
- }
-
- if (iNodeIndex == 0) {
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex,
- param->u.crypt.key_len,
- (u64 *) &KeyRSC,
- abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID);
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
-
- } else {
- dwKeyIndex |= (1 << 30); /* set pairwise key */
- if (KeybSetKey(&(pDevice->sKey),
- &param->sta_addr[0],
- dwKeyIndex,
- param->u.crypt.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID)) {
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true;
-
- } else {
- /* Key Table Full */
- pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false;
- bKeyTableFull = true;
- pr_debug(" Key Table Full\n");
- }
-
- }
-
- if (bKeyTableFull) {
- wKeyCtl &= 0x7F00; /* clear all key control filed */
- wKeyCtl |= (byKeyDecMode << 4);
- wKeyCtl |= (byKeyDecMode);
- wKeyCtl |= 0x0044; /* use group key for all address */
- wKeyCtl |= 0x4000; /* disable KeyTable[MAX_KEY_TABLE-1] on-fly to genernate rx int */
- MACvSetDefaultKeyCtl(pDevice->PortOffset, wKeyCtl, MAX_KEY_TABLE-1, pDevice->byLocalID);
- }
-
- pr_debug(" Set key sta_index= %d\n", iNodeIndex);
- pr_debug(" tx_index=%d len=%d\n",
- param->u.crypt.idx, param->u.crypt.key_len);
- pr_debug(" key=%x-%x-%x-%x-%x-xxxxx\n",
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[1],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[2],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[3],
- pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[4]);
-
- /* set wep key */
- pDevice->bEncryptionEnable = true;
- pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = byKeyDecMode;
- pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex;
- pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0;
- pMgmt->sNodeDBTable[iNodeIndex].wTSC15_0 = 0;
-
- return 0;
-}
-
-/*
- * Description:
- * get each stations encryption key
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-static int hostap_get_encryption(struct vnt_private *pDevice,
- struct viawget_hostapd_param *param,
- int param_len)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int ii;
- int iNodeIndex = 0;
-
- param->u.crypt.err = 0;
-
- if (is_broadcast_ether_addr(param->sta_addr)) {
- iNodeIndex = 0;
- } else {
- if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == false) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
- pr_debug("hostap_get_encryption: HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n");
- return -EINVAL;
- }
- }
- pr_debug("hostap_get_encryption: %d\n", iNodeIndex);
- memset(param->u.crypt.seq, 0, 8);
- for (ii = 0; ii < 8; ii++)
- param->u.crypt.seq[ii] = (unsigned char)pMgmt->sNodeDBTable[iNodeIndex].KeyRSC >> (ii * 8);
-
- return 0;
-}
-
-/*
- * Description:
- * vt6655_hostap_ioctl main function supported for hostap deamon.
- *
- * Parameters:
- * In:
- * pDevice -
- * iw_point -
- * Out:
- *
- * Return Value:
- *
- */
-int vt6655_hostap_ioctl(struct vnt_private *pDevice, struct iw_point *p)
-{
- struct viawget_hostapd_param *param;
- int ret = 0;
- int ap_ioctl = 0;
-
- if (p->length < sizeof(struct viawget_hostapd_param) ||
- p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = kmalloc((int)p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- ret = -EFAULT;
- goto out;
- }
-
- switch (param->cmd) {
- case VIAWGET_HOSTAPD_SET_ENCRYPTION:
- pr_debug("VIAWGET_HOSTAPD_SET_ENCRYPTION\n");
- spin_lock_irq(&pDevice->lock);
- ret = hostap_set_encryption(pDevice, param, p->length);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_GET_ENCRYPTION:
- pr_debug("VIAWGET_HOSTAPD_GET_ENCRYPTION\n");
- spin_lock_irq(&pDevice->lock);
- ret = hostap_get_encryption(pDevice, param, p->length);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR:
- pr_debug("VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR\n");
- ret = -EOPNOTSUPP;
- goto out;
- case VIAWGET_HOSTAPD_FLUSH:
- pr_debug("VIAWGET_HOSTAPD_FLUSH\n");
- spin_lock_irq(&pDevice->lock);
- hostap_flush_sta(pDevice);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_ADD_STA:
- pr_debug("VIAWGET_HOSTAPD_ADD_STA\n");
- spin_lock_irq(&pDevice->lock);
- ret = hostap_add_sta(pDevice, param);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_REMOVE_STA:
- pr_debug("VIAWGET_HOSTAPD_REMOVE_STA\n");
- spin_lock_irq(&pDevice->lock);
- ret = hostap_remove_sta(pDevice, param);
- spin_unlock_irq(&pDevice->lock);
- break;
- case VIAWGET_HOSTAPD_GET_INFO_STA:
- pr_debug("VIAWGET_HOSTAPD_GET_INFO_STA\n");
- ret = hostap_get_info_sta(pDevice, param);
- ap_ioctl = 1;
- break;
- case VIAWGET_HOSTAPD_SET_FLAGS_STA:
- pr_debug("VIAWGET_HOSTAPD_SET_FLAGS_STA\n");
- ret = hostap_set_flags_sta(pDevice, param);
- break;
- case VIAWGET_HOSTAPD_MLME:
- pr_debug("VIAWGET_HOSTAPD_MLME\n");
- ret = -EOPNOTSUPP;
- goto out;
- case VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT:
- pr_debug("VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT\n");
- ret = hostap_set_generic_element(pDevice, param);
- break;
- case VIAWGET_HOSTAPD_SCAN_REQ:
- pr_debug("VIAWGET_HOSTAPD_SCAN_REQ\n");
- ret = -EOPNOTSUPP;
- goto out;
- case VIAWGET_HOSTAPD_STA_CLEAR_STATS:
- pr_debug("VIAWGET_HOSTAPD_STA_CLEAR_STATS\n");
- ret = -EOPNOTSUPP;
- goto out;
- default:
- pr_debug("vt6655_hostap_ioctl: unknown cmd=%d\n",
- (int)param->cmd);
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- if ((ret == 0) && ap_ioctl) {
- if (copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
- }
-
-out:
- kfree(param);
- return ret;
-}
diff --git a/drivers/staging/vt6655/hostap.h b/drivers/staging/vt6655/hostap.h
deleted file mode 100644
index 17df4e403fcf..000000000000
--- a/drivers/staging/vt6655/hostap.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: hostap.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: May 21, 2003
- *
- */
-
-#ifndef __HOSTAP_H__
-#define __HOSTAP_H__
-
-#include "device.h"
-
-#define WLAN_RATE_1M BIT0
-#define WLAN_RATE_2M BIT1
-#define WLAN_RATE_5M5 BIT2
-#define WLAN_RATE_11M BIT3
-#define WLAN_RATE_6M BIT4
-#define WLAN_RATE_9M BIT5
-#define WLAN_RATE_12M BIT6
-#define WLAN_RATE_18M BIT7
-#define WLAN_RATE_24M BIT8
-#define WLAN_RATE_36M BIT9
-#define WLAN_RATE_48M BIT10
-#define WLAN_RATE_54M BIT11
-
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
-
-#ifndef ARPHRD_IEEE80211
-#define ARPHRD_IEEE80211 801
-#endif
-
-int vt6655_hostap_set_hostapd(struct vnt_private *, int val, int rtnl_locked);
-int vt6655_hostap_ioctl(struct vnt_private *, struct iw_point *p);
-
-#endif // __HOSTAP_H__
diff --git a/drivers/staging/vt6655/iocmd.h b/drivers/staging/vt6655/iocmd.h
deleted file mode 100644
index a665cfd8a482..000000000000
--- a/drivers/staging/vt6655/iocmd.h
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: iocmd.h
- *
- * Purpose: Handles the viawget ioctl private interface functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- */
-
-#ifndef __IOCMD_H__
-#define __IOCMD_H__
-
-#include "ttype.h"
-
-// ioctl Command code
-#define MAGIC_CODE 0x3142
-#define IOCTL_CMD_TEST (SIOCDEVPRIVATE + 0)
-#define IOCTL_CMD_SET (SIOCDEVPRIVATE + 1)
-#define IOCTL_CMD_HOSTAPD (SIOCDEVPRIVATE + 2)
-#define IOCTL_CMD_WPA (SIOCDEVPRIVATE + 3)
-
-typedef enum tagWMAC_CMD {
- WLAN_CMD_BSS_SCAN,
- WLAN_CMD_BSS_JOIN,
- WLAN_CMD_DISASSOC,
- WLAN_CMD_SET_WEP,
- WLAN_CMD_GET_LINK,
- WLAN_CMD_GET_LISTLEN,
- WLAN_CMD_GET_LIST,
- WLAN_CMD_GET_MIB,
- WLAN_CMD_GET_STAT,
- WLAN_CMD_STOP_MAC,
- WLAN_CMD_START_MAC,
- WLAN_CMD_AP_START,
- WLAN_CMD_SET_HOSTAPD,
- WLAN_CMD_SET_HOSTAPD_STA,
- WLAN_CMD_SET_802_1X,
- WLAN_CMD_SET_HOST_WEP,
- WLAN_CMD_SET_WPA,
- WLAN_CMD_GET_NODE_CNT,
- WLAN_CMD_ZONETYPE_SET,
- WLAN_CMD_GET_NODE_LIST
-} WMAC_CMD, *PWMAC_CMD;
-
-typedef enum tagWZONETYPE {
- ZoneType_USA = 0,
- ZoneType_Japan = 1,
- ZoneType_Europe = 2
-} WZONETYPE;
-
-#define ADHOC 0
-#define INFRA 1
-#define BOTH 2
-#define AP 3
-
-#define ADHOC_STARTED 1
-#define ADHOC_JOINTED 2
-
-#define PHY80211a 0
-#define PHY80211b 1
-#define PHY80211g 2
-
-#define SSID_ID 0
-#define SSID_MAXLEN 32
-#define BSSID_LEN 6
-#define WEP_NKEYS 4
-#define WEP_KEYMAXLEN 29
-#define WEP_40BIT_LEN 5
-#define WEP_104BIT_LEN 13
-#define WEP_232BIT_LEN 16
-
-// Ioctl interface structure
-// Command structure
-//
-#pragma pack(1)
-typedef struct tagSCmdRequest {
- u8 name[16];
- void __user *data;
- u16 wResult;
- u16 wCmdCode;
-} SCmdRequest, *PSCmdRequest;
-
-//
-// Scan
-//
-
-typedef struct tagSCmdScan {
- u8 ssid[SSID_MAXLEN + 2];
-} SCmdScan, *PSCmdScan;
-
-//
-// BSS Join
-//
-
-typedef struct tagSCmdBSSJoin {
- u16 wBSSType;
- u16 wBBPType;
- u8 ssid[SSID_MAXLEN + 2];
- u32 uChannel;
- bool bPSEnable;
- bool bShareKeyAuth;
-} SCmdBSSJoin, *PSCmdBSSJoin;
-
-//
-// Zonetype Setting
-//
-
-typedef struct tagSCmdZoneTypeSet {
- bool bWrite;
- WZONETYPE ZoneType;
-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
-
-#ifdef WPA_SM_Transtatus
-typedef struct tagSWPAResult {
- char ifname[100];
- u8 proto;
- u8 key_mgmt;
- u8 eap_type;
- bool authenticated;
-} SWPAResult, *PSWPAResult;
-#endif
-
-typedef struct tagSCmdStartAP {
- u16 wBSSType;
- u16 wBBPType;
- u8 ssid[SSID_MAXLEN + 2];
- u32 uChannel;
- u32 uBeaconInt;
- bool bShareKeyAuth;
- u8 byBasicRate;
-} SCmdStartAP, *PSCmdStartAP;
-
-typedef struct tagSCmdSetWEP {
- bool bEnableWep;
- u8 byKeyIndex;
- u8 abyWepKey[WEP_NKEYS][WEP_KEYMAXLEN];
- bool bWepKeyAvailable[WEP_NKEYS];
- u32 auWepKeyLength[WEP_NKEYS];
-} SCmdSetWEP, *PSCmdSetWEP;
-
-typedef struct tagSBSSIDItem {
- u32 uChannel;
- u8 abyBSSID[BSSID_LEN];
- u8 abySSID[SSID_MAXLEN + 1];
- u8 byNetType;
- u16 wBeaconInterval;
- u16 wCapInfo; // for address of byNetType at align 4
-
- bool bWEPOn;
- u32 uRSSI;
-} SBSSIDItem;
-
-typedef struct tagSBSSIDList {
- u32 uItem;
- SBSSIDItem sBSSIDList[0];
-} SBSSIDList, *PSBSSIDList;
-
-typedef struct tagSCmdLinkStatus {
- bool bLink;
- u16 wBSSType;
- u8 byState;
- u8 abyBSSID[BSSID_LEN];
- u8 abySSID[SSID_MAXLEN + 2];
- u32 uChannel;
- u32 uLinkRate;
-} SCmdLinkStatus, *PSCmdLinkStatus;
-
-//
-// 802.11 counter
-//
-typedef struct tagSDot11MIBCount {
- u32 TransmittedFragmentCount;
- u32 MulticastTransmittedFrameCount;
- u32 FailedCount;
- u32 RetryCount;
- u32 MultipleRetryCount;
- u32 RTSSuccessCount;
- u32 RTSFailureCount;
- u32 ACKFailureCount;
- u32 FrameDuplicateCount;
- u32 ReceivedFragmentCount;
- u32 MulticastReceivedFrameCount;
- u32 FCSErrorCount;
-} SDot11MIBCount, *PSDot11MIBCount;
-
-//
-// statistic counter
-//
-typedef struct tagSStatMIBCount {
- //
- // ISR status count
- //
- u32 dwIsrTx0OK;
- u32 dwIsrTx1OK;
- u32 dwIsrBeaconTxOK;
- u32 dwIsrRxOK;
- u32 dwIsrTBTTInt;
- u32 dwIsrSTIMERInt;
- u32 dwIsrUnrecoverableError;
- u32 dwIsrSoftInterrupt;
- u32 dwIsrRxNoBuf;
-
- u32 dwIsrUnknown;
-
- // RSR status count
- //
- u32 dwRsrFrmAlgnErr;
- u32 dwRsrErr;
- u32 dwRsrCRCErr;
- u32 dwRsrCRCOk;
- u32 dwRsrBSSIDOk;
- u32 dwRsrADDROk;
- u32 dwRsrICVOk;
- u32 dwNewRsrShortPreamble;
- u32 dwRsrLong;
- u32 dwRsrRunt;
-
- u32 dwRsrRxControl;
- u32 dwRsrRxData;
- u32 dwRsrRxManage;
-
- u32 dwRsrRxPacket;
- u32 dwRsrRxOctet;
- u32 dwRsrBroadcast;
- u32 dwRsrMulticast;
- u32 dwRsrDirected;
- // 64-bit OID
- u32 ullRsrOK;
-
- // for some optional OIDs (64 bits) and DMI support
- u32 ullRxBroadcastBytes;
- u32 ullRxMulticastBytes;
- u32 ullRxDirectedBytes;
- u32 ullRxBroadcastFrames;
- u32 ullRxMulticastFrames;
- u32 ullRxDirectedFrames;
-
- u32 dwRsrRxFragment;
- u32 dwRsrRxFrmLen64;
- u32 dwRsrRxFrmLen65_127;
- u32 dwRsrRxFrmLen128_255;
- u32 dwRsrRxFrmLen256_511;
- u32 dwRsrRxFrmLen512_1023;
- u32 dwRsrRxFrmLen1024_1518;
-
- // TSR0,1 status count
- //
- u32 dwTsrTotalRetry[2]; // total collision retry count
- u32 dwTsrOnceRetry[2]; // this packet only occur one collision
- u32 dwTsrMoreThanOnceRetry[2]; // this packet occur more than one collision
- u32 dwTsrRetry[2]; // this packet has ever occur collision,
- // that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- u32 dwTsrACKData[2];
- u32 dwTsrErr[2];
- u32 dwAllTsrOK[2];
- u32 dwTsrRetryTimeout[2];
- u32 dwTsrTransmitTimeout[2];
-
- u32 dwTsrTxPacket[2];
- u32 dwTsrTxOctet[2];
- u32 dwTsrBroadcast[2];
- u32 dwTsrMulticast[2];
- u32 dwTsrDirected[2];
-
- // RD/TD count
- u32 dwCntRxFrmLength;
- u32 dwCntTxBufLength;
-
- u8 abyCntRxPattern[16];
- u8 abyCntTxPattern[16];
-
- // Software check....
- u32 dwCntRxDataErr; // rx buffer data software compare CRC err count
- u32 dwCntDecryptErr; // rx buffer data software compare CRC err count
- u32 dwCntRxICVErr; // rx buffer data software compare CRC err count
- u32 idxRxErrorDesc; // index for rx data error RD
-
- // 64-bit OID
- u32 ullTsrOK[2];
-
- // for some optional OIDs (64 bits) and DMI support
- u32 ullTxBroadcastFrames[2];
- u32 ullTxMulticastFrames[2];
- u32 ullTxDirectedFrames[2];
- u32 ullTxBroadcastBytes[2];
- u32 ullTxMulticastBytes[2];
- u32 ullTxDirectedBytes[2];
-} SStatMIBCount, *PSStatMIBCount;
-
-typedef struct tagSNodeItem {
- // STA info
- u16 wAID;
- u8 abyMACAddr[6];
- u16 wTxDataRate;
- u16 wInActiveCount;
- u16 wEnQueueCnt;
- u16 wFlags;
- bool bPWBitOn;
- u8 byKeyIndex;
- u16 wWepKeyLength;
- u8 abyWepKey[WEP_KEYMAXLEN];
- // Auto rate fallback vars
- bool bIsInFallback;
- u32 uTxFailures;
- u32 uTxAttempts;
- u16 wFailureRatio;
-} SNodeItem;
-
-typedef struct tagSNodeList {
- u32 uItem;
- SNodeItem sNodeList[0];
-} SNodeList, *PSNodeList;
-
-typedef struct tagSCmdValue {
- u32 dwValue;
-} SCmdValue, *PSCmdValue;
-
-//
-// hostapd & viawget ioctl related
-//
-
-enum {
- VIAWGET_HOSTAPD_FLUSH = 1,
- VIAWGET_HOSTAPD_ADD_STA = 2,
- VIAWGET_HOSTAPD_REMOVE_STA = 3,
- VIAWGET_HOSTAPD_GET_INFO_STA = 4,
- VIAWGET_HOSTAPD_SET_ENCRYPTION = 5,
- VIAWGET_HOSTAPD_GET_ENCRYPTION = 6,
- VIAWGET_HOSTAPD_SET_FLAGS_STA = 7,
- VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR = 8,
- VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT = 9,
- VIAWGET_HOSTAPD_MLME = 10,
- VIAWGET_HOSTAPD_SCAN_REQ = 11,
- VIAWGET_HOSTAPD_STA_CLEAR_STATS = 12,
-};
-
-#define VIAWGET_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
- ((int)(&((struct viawget_hostapd_param *)0)->u.generic_elem.data))
-
-// Maximum length for algorithm names (-1 for nul termination) used in ioctl()
-
-struct viawget_hostapd_param {
- u32 cmd;
- u8 sta_addr[6];
- union {
- struct {
- u16 aid;
- u16 capability;
- u8 tx_supp_rates;
- } add_sta;
- struct {
- u32 inactive_sec;
- } get_info_sta;
- struct {
- u8 alg;
- u32 flags;
- u32 err;
- u8 idx;
- u8 seq[8];
- u16 key_len;
- u8 key[0];
- } crypt;
- struct {
- u32 flags_and;
- u32 flags_or;
- } set_flags_sta;
- struct {
- u16 rid;
- u16 len;
- u8 data[0];
- } rid;
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
- struct {
- u16 cmd;
- u16 reason_code;
- } mlme;
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
- } u;
-};
-
-#pragma pack()
-
-#endif //__IOCMD_H__
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
deleted file mode 100644
index 970e80d92fb9..000000000000
--- a/drivers/staging/vt6655/ioctl.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: ioctl.c
- *
- * Purpose: private ioctl functions
- *
- * Author: Lyndon Chen
- *
- * Date: Auguest 20, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "ioctl.h"
-#include "iocmd.h"
-#include "mac.h"
-#include "card.h"
-#include "hostap.h"
-#include "wpactl.h"
-#include "rf.h"
-
-#ifdef WPA_SM_Transtatus
-SWPAResult wpa_Result;
-#endif
-
-int private_ioctl(struct vnt_private *pDevice, struct ifreq *rq)
-{
- PSCmdRequest pReq = (PSCmdRequest)rq;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int result = 0;
- PWLAN_IE_SSID pItemSSID;
- SCmdBSSJoin sJoinCmd;
- SCmdZoneTypeSet sZoneTypeCmd;
- SCmdScan sScanCmd;
- SCmdStartAP sStartAPCmd;
- SCmdSetWEP sWEPCmd;
- SCmdValue sValue;
- SBSSIDList sList;
- SNodeList sNodeList;
- PSBSSIDList pList;
- PSNodeList pNodeList;
- unsigned int cbListCount;
- PKnownBSS pBSS;
- PKnownNodeDB pNode;
- unsigned int ii, jj;
- unsigned char abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- unsigned long dwKeyIndex = 0;
- unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- long ldBm;
-
- pReq->wResult = 0;
-
- switch (pReq->wCmdCode) {
- case WLAN_CMD_BSS_SCAN:
- pr_debug("WLAN_CMD_BSS_SCAN..begin\n");
- if (copy_from_user(&sScanCmd, pReq->data, sizeof(SCmdScan))) {
- result = -EFAULT;
- break;
- }
-
- pItemSSID = (PWLAN_IE_SSID)sScanCmd.ssid;
- if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
- return -EINVAL;
- if (pItemSSID->len != 0) {
- memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- }
-
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
- spin_lock_irq(&pDevice->lock);
- if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
- BSSvClearBSSList((void *)pDevice, false);
- else
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
-
- if (pItemSSID->len != 0)
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
- else
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_ZONETYPE_SET:
- /* mike add :can't support. */
- result = -EOPNOTSUPP;
- break;
-
- if (copy_from_user(&sZoneTypeCmd, pReq->data, sizeof(SCmdZoneTypeSet))) {
- result = -EFAULT;
- break;
- }
-
- if (sZoneTypeCmd.bWrite == true) {
- /* write zonetype */
- if (sZoneTypeCmd.ZoneType == ZoneType_USA) {
- /* set to USA */
- pr_debug("set_ZoneType:USA\n");
- } else if (sZoneTypeCmd.ZoneType == ZoneType_Japan) {
- /* set to Japan */
- pr_debug("set_ZoneType:Japan\n");
- } else if (sZoneTypeCmd.ZoneType == ZoneType_Europe) {
- /* set to Europe */
- pr_debug("set_ZoneType:Europe\n");
- }
- } else {
- /* read zonetype */
- unsigned char zonetype = 0;
-
- if (zonetype == 0x00) { /* USA */
- sZoneTypeCmd.ZoneType = ZoneType_USA;
- } else if (zonetype == 0x01) { /* Japan */
- sZoneTypeCmd.ZoneType = ZoneType_Japan;
- } else if (zonetype == 0x02) { /* Europe */
- sZoneTypeCmd.ZoneType = ZoneType_Europe;
- } else { /* Unknown ZoneType */
- pr_err("Error:ZoneType[%x] Unknown ???\n", zonetype);
- result = -EFAULT;
- break;
- }
- if (copy_to_user(pReq->data, &sZoneTypeCmd, sizeof(SCmdZoneTypeSet))) {
- result = -EFAULT;
- break;
- }
- }
- break;
-
- case WLAN_CMD_BSS_JOIN:
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
-
- if (copy_from_user(&sJoinCmd, pReq->data, sizeof(SCmdBSSJoin))) {
- result = -EFAULT;
- break;
- }
-
- pItemSSID = (PWLAN_IE_SSID)sJoinCmd.ssid;
- if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
- return -EINVAL;
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
- if (sJoinCmd.wBSSType == ADHOC) {
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
- pr_debug("ioct set to adhoc mode\n");
- } else {
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- pr_debug("ioct set to STA mode\n");
- }
- if (sJoinCmd.bPSEnable == true) {
- pDevice->ePSMode = WMAC_POWER_FAST;
- pMgmt->wListenInterval = 2;
- pr_debug("Power Saving On\n");
- } else {
- pDevice->ePSMode = WMAC_POWER_CAM;
- pMgmt->wListenInterval = 1;
- pr_debug("Power Saving Off\n");
- }
-
- if (sJoinCmd.bShareKeyAuth == true) {
- pMgmt->bShareKeyAlgorithm = true;
- pr_debug("Share Key\n");
- } else {
- pMgmt->bShareKeyAlgorithm = false;
- pr_debug("Open System\n");
- }
- pDevice->uChannel = sJoinCmd.uChannel;
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_SET_WEP:
- pr_debug("WLAN_CMD_SET_WEP Key\n");
- memset(&sWEPCmd, 0, sizeof(SCmdSetWEP));
- if (copy_from_user(&sWEPCmd, pReq->data, sizeof(SCmdSetWEP))) {
- result = -EFAULT;
- break;
- }
- if (sWEPCmd.bEnableWep != true) {
- pDevice->bEncryptionEnable = false;
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- MACvDisableDefaultKey(pDevice->PortOffset);
- pr_debug("WEP function disable\n");
- break;
- }
-
- for (ii = 0; ii < WLAN_WEP_NKEYS; ii++) {
- if (sWEPCmd.bWepKeyAvailable[ii]) {
- if (ii == sWEPCmd.byKeyIndex)
- dwKeyIndex = ii | (1 << 31);
- else
- dwKeyIndex = ii;
-
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex,
- sWEPCmd.auWepKeyLength[ii],
- NULL,
- (unsigned char *)&sWEPCmd.abyWepKey[ii][0],
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID);
- }
- }
- pDevice->byKeyIndex = sWEPCmd.byKeyIndex;
- pDevice->bTransmitKey = true;
- pDevice->bEncryptionEnable = true;
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- break;
-
- case WLAN_CMD_GET_LINK: {
- SCmdLinkStatus sLinkStatus;
-
- pr_debug("WLAN_CMD_GET_LINK status\n");
-
- memset(&sLinkStatus, 0, sizeof(sLinkStatus));
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- sLinkStatus.wBSSType = ADHOC;
- else
- sLinkStatus.wBSSType = INFRA;
-
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED)
- sLinkStatus.byState = ADHOC_JOINTED;
- else
- sLinkStatus.byState = ADHOC_STARTED;
-
- sLinkStatus.uChannel = pMgmt->uCurrChannel;
- if (pDevice->bLinkPass == true) {
- sLinkStatus.bLink = true;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(sLinkStatus.abySSID, pItemSSID->abySSID, pItemSSID->len);
- memcpy(sLinkStatus.abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- sLinkStatus.uLinkRate = pMgmt->sNodeDBTable[0].wTxDataRate;
- pr_debug(" Link Success!\n");
- } else {
- sLinkStatus.bLink = false;
- sLinkStatus.uLinkRate = 0;
- }
- if (copy_to_user(pReq->data, &sLinkStatus, sizeof(SCmdLinkStatus))) {
- result = -EFAULT;
- break;
- }
- break;
- }
- case WLAN_CMD_GET_LISTLEN:
- cbListCount = 0;
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- cbListCount++;
- }
- sList.uItem = cbListCount;
- if (copy_to_user(pReq->data, &sList, sizeof(SBSSIDList))) {
- result = -EFAULT;
- break;
- }
- pReq->wResult = 0;
- break;
-
- case WLAN_CMD_GET_LIST:
- if (copy_from_user(&sList, pReq->data, sizeof(SBSSIDList))) {
- result = -EFAULT;
- break;
- }
- if (sList.uItem > (ULONG_MAX - sizeof(SBSSIDList)) / sizeof(SBSSIDItem)) {
- result = -EINVAL;
- break;
- }
- pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)),
- GFP_ATOMIC);
- if (pList == NULL) {
- result = -ENOMEM;
- break;
- }
- pList->uItem = sList.uItem;
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; jj < MAX_BSS_NUM; jj++) {
- pBSS = &(pMgmt->sBSSList[jj]);
- if (pBSS->bActive) {
- pList->sBSSIDList[ii].uChannel = pBSS->uChannel;
- pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
- pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
- RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm);
- pList->sBSSIDList[ii].uRSSI = (unsigned int)ldBm;
- memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memset(pList->sBSSIDList[ii].abySSID, 0, WLAN_SSID_MAXLEN + 1);
- memcpy(pList->sBSSIDList[ii].abySSID, pItemSSID->abySSID, pItemSSID->len);
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
- pList->sBSSIDList[ii].byNetType = INFRA;
- else
- pList->sBSSIDList[ii].byNetType = ADHOC;
-
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
- pList->sBSSIDList[ii].bWEPOn = true;
- else
- pList->sBSSIDList[ii].bWEPOn = false;
-
- ii++;
- if (ii >= pList->uItem)
- break;
- }
- }
-
- if (copy_to_user(pReq->data, pList, sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)))) {
- result = -EFAULT;
- break;
- }
- kfree(pList);
- pReq->wResult = 0;
- break;
-
- case WLAN_CMD_GET_MIB:
- if (copy_to_user(pReq->data, &(pDevice->s802_11Counter), sizeof(SDot11MIBCount))) {
- result = -EFAULT;
- break;
- }
- break;
-
- case WLAN_CMD_GET_STAT:
- if (copy_to_user(pReq->data, &(pDevice->scStatistic), sizeof(SStatCounter))) {
- result = -EFAULT;
- break;
- }
- break;
-
- case WLAN_CMD_STOP_MAC:
- pr_debug("WLAN_CMD_STOP_MAC\n");
- netif_stop_queue(pDevice->dev);
-
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bRadioOff == false)
- CARDbRadioPowerOff(pDevice);
-
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- del_timer(&pDevice->sTimerCommand);
- del_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bCmdRunning = false;
- pDevice->bMACSuspend = true;
- MACvIntDisable(pDevice->PortOffset);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_START_MAC:
- pr_debug("WLAN_CMD_START_MAC\n");
-
- if (pDevice->bMACSuspend == true) {
- if (pDevice->bRadioOff == true)
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- pDevice->bMACSuspend = false;
- }
- break;
-
- case WLAN_CMD_SET_HOSTAPD:
- pr_debug("WLAN_CMD_SET_HOSTAPD\n");
-
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
- result = -EFAULT;
- break;
- }
- if (sValue.dwValue == 1) {
- if (vt6655_hostap_set_hostapd(pDevice, 1, 1) == 0) {
- pr_debug("Enable HOSTAP\n");
- } else {
- result = -EFAULT;
- break;
- }
- } else {
- vt6655_hostap_set_hostapd(pDevice, 0, 1);
- pr_debug("Disable HOSTAP\n");
- }
- break;
-
- case WLAN_CMD_SET_HOSTAPD_STA:
- pr_debug("WLAN_CMD_SET_HOSTAPD_STA\n");
- break;
-
- case WLAN_CMD_SET_802_1X:
- pr_debug("WLAN_CMD_SET_802_1X\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
- result = -EFAULT;
- break;
- }
-
- if (sValue.dwValue == 1) {
- pDevice->bEnable8021x = true;
- pr_debug("Enable 802.1x\n");
- } else {
- pDevice->bEnable8021x = false;
- pr_debug("Disable 802.1x\n");
- }
- break;
-
- case WLAN_CMD_SET_HOST_WEP:
- pr_debug("WLAN_CMD_SET_HOST_WEP\n");
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
- result = -EFAULT;
- break;
- }
-
- if (sValue.dwValue == 1) {
- pDevice->bEnableHostWEP = true;
- pr_debug("Enable HostWEP\n");
- } else {
- pDevice->bEnableHostWEP = false;
- pr_debug("Disable HostWEP\n");
- }
- break;
-
- case WLAN_CMD_SET_WPA:
- pr_debug("WLAN_CMD_SET_WPA\n");
-
- if (copy_from_user(&sValue, pReq->data, sizeof(SCmdValue))) {
- result = -EFAULT;
- break;
- }
- if (sValue.dwValue == 1) {
- pr_debug("up wpadev\n");
- eth_hw_addr_inherit(pDevice->wpadev, pDevice->dev);
- pDevice->bWPADEVUp = true;
- } else {
- pr_debug("close wpadev\n");
- pDevice->bWPADEVUp = false;
- }
- break;
-
- case WLAN_CMD_AP_START:
- pr_debug("WLAN_CMD_AP_START\n");
- if (pDevice->bRadioOff == true) {
- CARDbRadioPowerOn(pDevice);
- vMgrTimerInit(pDevice);
- MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- add_timer(&pMgmt->sTimerSecondCallback);
- }
- if (copy_from_user(&sStartAPCmd, pReq->data, sizeof(SCmdStartAP))) {
- result = -EFAULT;
- break;
- }
-
- if (sStartAPCmd.wBSSType == AP) {
- pMgmt->eConfigMode = WMAC_CONFIG_AP;
- pr_debug("ioct set to AP mode\n");
- } else {
- pr_debug("ioct BSS type not set to AP mode\n");
- result = -EFAULT;
- break;
- }
-
- if (sStartAPCmd.wBBPType == PHY80211g)
- pMgmt->byAPBBType = PHY_TYPE_11G;
- else if (sStartAPCmd.wBBPType == PHY80211a)
- pMgmt->byAPBBType = PHY_TYPE_11A;
- else
- pMgmt->byAPBBType = PHY_TYPE_11B;
-
- pItemSSID = (PWLAN_IE_SSID)sStartAPCmd.ssid;
- if (pItemSSID->len > WLAN_SSID_MAXLEN + 1)
- return -EINVAL;
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyDesireSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
-
- if ((sStartAPCmd.uChannel > 0) && (sStartAPCmd.uChannel <= 14))
- pDevice->uChannel = sStartAPCmd.uChannel;
-
- if ((sStartAPCmd.uBeaconInt >= 20) && (sStartAPCmd.uBeaconInt <= 1000))
- pMgmt->wIBSSBeaconPeriod = sStartAPCmd.uBeaconInt;
- else
- pMgmt->wIBSSBeaconPeriod = 100;
-
- if (sStartAPCmd.bShareKeyAuth == true) {
- pMgmt->bShareKeyAlgorithm = true;
- pr_debug("Share Key\n");
- } else {
- pMgmt->bShareKeyAlgorithm = false;
- pr_debug("Open System\n");
- }
- memcpy(pMgmt->abyIBSSSuppRates, abySuppRates, 6);
-
- if (sStartAPCmd.byBasicRate & BIT3) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- pMgmt->abyIBSSSuppRates[4] |= BIT7;
- pMgmt->abyIBSSSuppRates[5] |= BIT7;
- } else if (sStartAPCmd.byBasicRate & BIT2) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- pMgmt->abyIBSSSuppRates[4] |= BIT7;
- } else if (sStartAPCmd.byBasicRate & BIT1) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- } else if (sStartAPCmd.byBasicRate & BIT1) {
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- } else {
- /* default 1,2M */
- pMgmt->abyIBSSSuppRates[2] |= BIT7;
- pMgmt->abyIBSSSuppRates[3] |= BIT7;
- }
-
- pr_debug("Support Rate= %*ph\n",
- 4, pMgmt->abyIBSSSuppRates + 2);
-
- netif_stop_queue(pDevice->dev);
- spin_lock_irq(&pDevice->lock);
- bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case WLAN_CMD_GET_NODE_CNT:
- cbListCount = 0;
- pNode = &(pMgmt->sNodeDBTable[0]);
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- pNode = &(pMgmt->sNodeDBTable[ii]);
- if (!pNode->bActive)
- continue;
- cbListCount++;
- }
-
- sNodeList.uItem = cbListCount;
- if (copy_to_user(pReq->data, &sNodeList, sizeof(SNodeList))) {
- result = -EFAULT;
- break;
- }
- pReq->wResult = 0;
- break;
-
- case WLAN_CMD_GET_NODE_LIST:
- if (copy_from_user(&sNodeList, pReq->data, sizeof(SNodeList))) {
- result = -EFAULT;
- break;
- }
- if (sNodeList.uItem > (ULONG_MAX - sizeof(SNodeList)) / sizeof(SNodeItem)) {
- result = -EINVAL;
- break;
- }
- pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)),
- GFP_ATOMIC);
- if (pNodeList == NULL) {
- result = -ENOMEM;
- break;
- }
- pNodeList->uItem = sNodeList.uItem;
- pNode = &(pMgmt->sNodeDBTable[0]);
- for (ii = 0, jj = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- pNode = &(pMgmt->sNodeDBTable[ii]);
- if (pNode->bActive) {
- pNodeList->sNodeList[jj].wAID = pNode->wAID;
- memcpy(pNodeList->sNodeList[jj].abyMACAddr, pNode->abyMACAddr, WLAN_ADDR_LEN);
- pNodeList->sNodeList[jj].wTxDataRate = pNode->wTxDataRate;
- pNodeList->sNodeList[jj].wInActiveCount = (unsigned short)pNode->uInActiveCount;
- pNodeList->sNodeList[jj].wEnQueueCnt = (unsigned short)pNode->wEnQueueCnt;
- pNodeList->sNodeList[jj].wFlags = (unsigned short)pNode->dwFlags;
- pNodeList->sNodeList[jj].bPWBitOn = pNode->bPSEnable;
- pNodeList->sNodeList[jj].byKeyIndex = pNode->byKeyIndex;
- pNodeList->sNodeList[jj].wWepKeyLength = pNode->uWepKeyLength;
- memcpy(&(pNodeList->sNodeList[jj].abyWepKey[0]), &(pNode->abyWepKey[0]), WEP_KEYMAXLEN);
- pr_debug("key= %2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- pNodeList->sNodeList[jj].abyWepKey[0],
- pNodeList->sNodeList[jj].abyWepKey[1],
- pNodeList->sNodeList[jj].abyWepKey[2],
- pNodeList->sNodeList[jj].abyWepKey[3],
- pNodeList->sNodeList[jj].abyWepKey[4]);
- pNodeList->sNodeList[jj].bIsInFallback = pNode->bIsInFallback;
- pNodeList->sNodeList[jj].uTxFailures = pNode->uTxFailures;
- pNodeList->sNodeList[jj].uTxAttempts = pNode->uTxAttempts;
- pNodeList->sNodeList[jj].wFailureRatio = (unsigned short)pNode->uFailureRatio;
- jj++;
- if (jj >= pNodeList->uItem)
- break;
- }
- }
- if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
- result = -EFAULT;
- break;
- }
- kfree(pNodeList);
- pReq->wResult = 0;
- break;
-
-#ifdef WPA_SM_Transtatus
- case 0xFF:
- memset(wpa_Result.ifname, 0, sizeof(wpa_Result.ifname));
- wpa_Result.proto = 0;
- wpa_Result.key_mgmt = 0;
- wpa_Result.eap_type = 0;
- wpa_Result.authenticated = false;
- pDevice->fWPA_Authened = false;
- if (copy_from_user(&wpa_Result, pReq->data, sizeof(wpa_Result))) {
- result = -EFAULT;
- break;
- }
-
- if (wpa_Result.authenticated == true) {
-#ifdef SndEvt_ToAPI
- {
- union iwreq_data wrqu;
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.flags = RT_WPACONNECTED_EVENT_FLAG;
- wrqu.data.length = pItemSSID->len;
- wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pItemSSID->abySSID);
- }
-#endif
- pDevice->fWPA_Authened = true; /* is successful peer to wpa_Result.authenticated? */
- }
- pReq->wResult = 0;
- break;
-#endif
-
- default:
- pr_debug("Private command not support..\n");
- }
-
- return result;
-}
diff --git a/drivers/staging/vt6655/ioctl.h b/drivers/staging/vt6655/ioctl.h
deleted file mode 100644
index 2dc5a5743e8d..000000000000
--- a/drivers/staging/vt6655/ioctl.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: hostap.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: May 21, 2003
- *
- */
-
-#ifndef __IOCTL_H__
-#define __IOCTL_H__
-
-#include "device.h"
-
-int private_ioctl(struct vnt_private *, struct ifreq *rq);
-
-#endif // __IOCTL_H__
diff --git a/drivers/staging/vt6655/iowpa.h b/drivers/staging/vt6655/iowpa.h
deleted file mode 100644
index fe4b22ed49f4..000000000000
--- a/drivers/staging/vt6655/iowpa.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: iowpa.h
- *
- * Purpose: Handles wpa supplicant ioctl interface
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- */
-
-#ifndef __IOWPA_H__
-#define __IOWPA_H__
-
-#define WPA_IE_LEN 64
-
-//WPA related
-
-enum {
- VIAWGET_SET_WPA = 1,
- VIAWGET_SET_KEY = 2,
- VIAWGET_SET_SCAN = 3,
- VIAWGET_GET_SCAN = 4,
- VIAWGET_GET_SSID = 5,
- VIAWGET_GET_BSSID = 6,
- VIAWGET_SET_DROP_UNENCRYPT = 7,
- VIAWGET_SET_DEAUTHENTICATE = 8,
- VIAWGET_SET_ASSOCIATE = 9,
- VIAWGET_SET_DISASSOCIATE = 10
-};
-
-enum {
- VIAWGET_ASSOC_MSG = 1,
- VIAWGET_DISASSOC_MSG = 2,
- VIAWGET_PTK_MIC_MSG = 3,
- VIAWGET_GTK_MIC_MSG = 4,
- VIAWGET_CCKM_ROAM_MSG = 5,
- VIAWGET_DEVICECLOSE_MSG = 6
-};
-
-#pragma pack(1)
-typedef struct viawget_wpa_header {
- u8 type;
- u16 req_ie_len;
- u16 resp_ie_len;
-} viawget_wpa_header;
-
-struct viawget_wpa_param {
- u32 cmd;
- u8 addr[6];
- union {
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
-
- struct {
- u8 bssid[6];
- u8 ssid[32];
- u8 ssid_len;
- u8 __user *wpa_ie;
- u16 wpa_ie_len;
- int pairwise_suite;
- int group_suite;
- int key_mgmt_suite;
- int auth_alg;
- int mode;
-
- } wpa_associate;
-
- struct {
- int alg_name;
- u16 key_index;
- u16 set_tx;
- u8 *seq;
- u16 seq_len;
- u8 *key;
- u16 key_len;
- } wpa_key;
-
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
-
- struct {
- u16 scan_count;
- u8 __user *buf;
- } scan_results;
-
- } u;
-};
-
-#pragma pack(1)
-struct viawget_scan_result {
- u8 bssid[6];
- u8 ssid[32];
- u16 ssid_len;
- u8 wpa_ie[WPA_IE_LEN];
- u16 wpa_ie_len;
- u8 rsn_ie[WPA_IE_LEN];
- u16 rsn_ie_len;
- int freq; // MHz
- int caps; // e.g. privacy
- int qual; // signal quality
- int noise;
- int level;
- int maxrate;
-};
-
-#pragma pack()
-
-#endif //__IOWPA_H__
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
deleted file mode 100644
index 14a62bdae278..000000000000
--- a/drivers/staging/vt6655/iwctl.c
+++ /dev/null
@@ -1,1937 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: iwctl.c
- *
- * Purpose: wireless ext & ioctl functions
- *
- * Author: Lyndon Chen
- *
- * Date: July 5, 2006
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "device.h"
-#include "ioctl.h"
-#include "iocmd.h"
-#include "iwctl.h"
-#include "mac.h"
-#include "card.h"
-#include "hostap.h"
-#include "power.h"
-#include "rf.h"
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-#include "iowpa.h"
-#include "wpactl.h"
-#endif
-
-#include <net/iw_handler.h>
-extern unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
-
-/*--------------------- Static Definitions -------------------------*/
-
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-#define SUPPORTED_WIRELESS_EXT 18
-#else
-#define SUPPORTED_WIRELESS_EXT 17
-#endif
-
-static const long frequency_list[] = {
- 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484,
- 4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980,
- 5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210, 5220, 5230, 5240,
- 5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680,
- 5700, 5745, 5765, 5785, 5805, 5825
-};
-
-/*--------------------- Static Classes ----------------------------*/
-/*--------------------- Static Variables --------------------------*/
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- long ldBm;
-
- pDevice->wstats.status = pDevice->op_mode;
-#ifdef Calcu_LinkQual
- if (pDevice->scStatistic.LinkQuality > 100)
- pDevice->scStatistic.LinkQuality = 100;
- pDevice->wstats.qual.qual = (unsigned char)pDevice->scStatistic.LinkQuality;
-#else
- pDevice->wstats.qual.qual = pDevice->byCurrSQ;
-#endif
- RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
- pDevice->wstats.qual.level = ldBm;
- pDevice->wstats.qual.noise = 0;
- pDevice->wstats.qual.updated = 1;
- pDevice->wstats.discard.nwid = 0;
- pDevice->wstats.discard.code = 0;
- pDevice->wstats.discard.fragment = 0;
- pDevice->wstats.discard.retries = (unsigned long)pDevice->scStatistic.dwTsrErr;
- pDevice->wstats.discard.misc = 0;
- pDevice->wstats.miss.beacon = 0;
-
- return &pDevice->wstats;
-}
-
-/*------------------------------------------------------------------*/
-
-static int iwctl_commit(struct net_device *dev,
- struct iw_request_info *info,
- void *wrq,
- char *extra)
-{
- pr_debug(" SIOCSIWCOMMIT\n");
-
- return 0;
-}
-/*
- * Wireless Handler : get protocol name
- */
-
-int iwctl_giwname(struct net_device *dev,
- struct iw_request_info *info,
- char *wrq,
- char *extra)
-{
- strcpy(wrq, "802.11-a/b/g");
- return 0;
-}
-
-/*
- * Wireless Handler : set scan
- */
-
-static int iwctl_siwscan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- struct iw_scan_req *req = (struct iw_scan_req *)extra;
- unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- PWLAN_IE_SSID pItemSSID = NULL;
-
- pr_debug(" SIOCSIWSCAN\n");
-
- if (pDevice->byReAssocCount > 0) { //reject scan when re-associating!
-//send scan event to wpa_Supplicant
- union iwreq_data wrqu;
-
- PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
- memset(&wrqu, 0, sizeof(wrqu));
- wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
- return 0;
- }
-
- spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
-
-//mike add: active scan OR passive scan OR desire_ssid scan
- if (wrq->length == sizeof(struct iw_scan_req)) {
- if (wrq->flags & IW_SCAN_THIS_ESSID) { //desire_ssid scan
- memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)abyScanSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
- memcpy(pItemSSID->abySSID, req->essid, (int)req->essid_len);
- if (pItemSSID->abySSID[req->essid_len - 1] == '\0') {
- if (req->essid_len > 0)
- pItemSSID->len = req->essid_len - 1;
- } else
- pItemSSID->len = req->essid_len;
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- PRINT_K("SIOCSIWSCAN:[desired_ssid=%s,len=%d]\n", ((PWLAN_IE_SSID)abyScanSSID)->abySSID,
- ((PWLAN_IE_SSID)abyScanSSID)->len);
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
- } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { //passive scan
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- }
- } else { //active scan
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- }
-
- pMgmt->eScanType = WMAC_SCAN_PASSIVE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-/*
- * Wireless Handler : get scan results
- */
-
-static int iwctl_giwscan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- int ii, jj, kk;
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PKnownBSS pBSS;
- PWLAN_IE_SSID pItemSSID;
- PWLAN_IE_SUPP_RATES pSuppRates, pExtSuppRates;
- char *current_ev = extra;
- char *end_buf = extra + IW_SCAN_MAX_DATA;
- char *current_val = NULL;
- struct iw_event iwe;
- long ldBm;
- char buf[MAX_WPA_IE_LEN * 2 + 30];
-
- pr_debug(" SIOCGIWSCAN\n");
-
- if (pMgmt->eScanState == WMAC_IS_SCANNING) {
- // In scanning..
- return -EAGAIN;
- }
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; jj < MAX_BSS_NUM; jj++) {
- if (current_ev >= end_buf)
- break;
- pBSS = &(pMgmt->sBSSList[jj]);
- if (pBSS->bActive) {
- //ADD mac address
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, pBSS->abyBSSID, WLAN_BSSID_LEN);
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN);
- //ADD ssid
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWESSID;
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- iwe.u.data.length = pItemSSID->len;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pItemSSID->abySSID);
- //ADD mode
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWMODE;
- if (WLAN_GET_CAP_INFO_ESS(pBSS->wCapInfo))
- iwe.u.mode = IW_MODE_INFRA;
- else
- iwe.u.mode = IW_MODE_ADHOC;
-
- iwe.len = IW_EV_UINT_LEN;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN);
- //ADD frequency
- pSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abySuppRates;
- pExtSuppRates = (PWLAN_IE_SUPP_RATES)pBSS->abyExtSuppRates;
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = pBSS->uChannel;
- iwe.u.freq.e = 0;
- iwe.u.freq.i = 0;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
- //2008-0409-04, <Add> by Einsn Liu
- {
- int f = (int)pBSS->uChannel - 1;
-
- if (f < 0)f = 0;
- iwe.u.freq.m = frequency_list[f] * 100000;
- iwe.u.freq.e = 1;
- }
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
- //ADD quality
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVQUAL;
- RFvRSSITodBm(pDevice, (unsigned char)(pBSS->uRSSI), &ldBm);
- iwe.u.qual.level = ldBm;
- iwe.u.qual.noise = 0;
-//2008-0409-01, <Add> by Einsn Liu
- if (-ldBm < 50)
- iwe.u.qual.qual = 100;
- else if (-ldBm > 90)
- iwe.u.qual.qual = 0;
- else
- iwe.u.qual.qual = (40 - (-ldBm - 50)) * 100 / 40;
-
- iwe.u.qual.updated = 7;
-
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWENCODE;
- iwe.u.data.length = 0;
- if (WLAN_GET_CAP_INFO_PRIVACY(pBSS->wCapInfo))
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
-
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pItemSSID->abySSID);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
- current_val = current_ev + IW_EV_LCP_LEN;
-
- for (kk = 0; kk < 12; kk++) {
- if (pSuppRates->abyRates[kk] == 0)
- break;
- // Bit rate given in 500 kb/s units (+ 0x80)
- iwe.u.bitrate.value = ((pSuppRates->abyRates[kk] & 0x7f) * 500000);
- current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
- }
- for (kk = 0; kk < 8; kk++) {
- if (pExtSuppRates->abyRates[kk] == 0)
- break;
- // Bit rate given in 500 kb/s units (+ 0x80)
- iwe.u.bitrate.value = ((pExtSuppRates->abyRates[kk] & 0x7f) * 500000);
- current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
- }
-
- if ((current_val - current_ev) > IW_EV_LCP_LEN)
- current_ev = current_val;
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "bcn_int=%d", pBSS->wBeaconInterval);
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf);
-
- if ((pBSS->wWPALen > 0) && (pBSS->wWPALen <= MAX_WPA_IE_LEN)) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = pBSS->wWPALen;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pBSS->byWPAIE);
- }
-
- if ((pBSS->wRSNLen > 0) && (pBSS->wRSNLen <= MAX_WPA_IE_LEN)) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = pBSS->wRSNLen;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, pBSS->byRSNIE);
- }
-
- }
- }// for
-
- wrq->length = current_ev - extra;
- return 0;
-}
-
-/*
- * Wireless Handler : set frequency or channel
- */
-
-int iwctl_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
-
- pr_debug(" SIOCSIWFREQ\n");
-
- // If setting by frequency, convert to a channel
- if ((wrq->e == 1) &&
- (wrq->m >= (int) 2.412e8) &&
- (wrq->m <= (int) 2.487e8)) {
- int f = wrq->m / 100000;
- int c = 0;
-
- while ((c < 14) && (f != frequency_list[c]))
- c++;
- wrq->e = 0;
- wrq->m = c + 1;
- }
- // Setting by channel number
- if ((wrq->m > 14) || (wrq->e > 0))
- rc = -EOPNOTSUPP;
- else {
- int channel = wrq->m;
-
- if ((channel < 1) || (channel > 14)) {
- pr_debug("%s: New channel value of %d is invalid!\n",
- dev->name, wrq->m);
- rc = -EINVAL;
- } else {
- // Yes ! We can set it !!!
- pr_debug(" Set to channel = %d\n", channel);
- pDevice->uChannel = channel;
- //2007-0207-04,<Add> by EinsnLiu
- //Make change effect at once
- pDevice->bCommit = true;
- }
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get frequency or channel
- */
-
-int iwctl_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWFREQ\n");
-
-#ifdef WEXT_USECHANNELS
- wrq->m = (int)pMgmt->uCurrChannel;
- wrq->e = 0;
-#else
- {
- int f = (int)pMgmt->uCurrChannel - 1;
-
- if (f < 0)
- f = 0;
- wrq->m = frequency_list[f] * 100000;
- wrq->e = 1;
- }
-#endif
-
- return 0;
-}
-
-/*
- * Wireless Handler : set operation mode
- */
-
-int iwctl_siwmode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *wmode,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
-
- pr_debug(" SIOCSIWMODE\n");
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP && pDevice->bEnableHostapd) {
- pr_debug("Can't set operation mode, hostapd is running\n");
- return rc;
- }
-
- switch (*wmode) {
- case IW_MODE_ADHOC:
- if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) {
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- }
- pr_debug("set mode to ad-hoc\n");
- break;
- case IW_MODE_AUTO:
- case IW_MODE_INFRA:
- if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) {
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- }
- pr_debug("set mode to infrastructure\n");
- break;
- case IW_MODE_MASTER:
-
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- rc = -EOPNOTSUPP;
- break;
-
- if (pMgmt->eConfigMode != WMAC_CONFIG_AP) {
- pMgmt->eConfigMode = WMAC_CONFIG_AP;
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- }
- pr_debug("set mode to Access Point\n");
- break;
-
- case IW_MODE_REPEAT:
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- rc = -EOPNOTSUPP;
- break;
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get operation mode
- */
-
-int iwctl_giwmode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *wmode,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWMODE\n");
- // If not managed, assume it's ad-hoc
- switch (pMgmt->eConfigMode) {
- case WMAC_CONFIG_ESS_STA:
- *wmode = IW_MODE_INFRA;
- break;
- case WMAC_CONFIG_IBSS_STA:
- *wmode = IW_MODE_ADHOC;
- break;
- case WMAC_CONFIG_AUTO:
- *wmode = IW_MODE_INFRA;
- break;
- case WMAC_CONFIG_AP:
- *wmode = IW_MODE_MASTER;
- break;
- default:
- *wmode = IW_MODE_ADHOC;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : get capability range
- */
-
-int iwctl_giwrange(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
- int i, k;
- unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
-
- pr_debug(" SIOCGIWRANGE\n");
- if (wrq->pointer) {
- wrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(struct iw_range));
- range->min_nwid = 0x0000;
- range->max_nwid = 0x0000;
- range->num_channels = 14;
- // Should be based on cap_rid.country to give only
- // what the current card support
- k = 0;
- for (i = 0; i < 14; i++) {
- range->freq[k].i = i + 1; // List index
- range->freq[k].m = frequency_list[i] * 100000;
- range->freq[k++].e = 1; // Values in table in MHz -> * 10^5 * 10
- }
- range->num_frequency = k;
- // Hum... Should put the right values there
-#ifdef Calcu_LinkQual
- range->max_qual.qual = 100;
-#else
- range->max_qual.qual = 255;
-#endif
- range->max_qual.level = 0;
- range->max_qual.noise = 0;
- range->sensitivity = 255;
-
- for (i = 0; i < 13; i++) {
- range->bitrate[i] = abySupportedRates[i] * 500000;
- if (range->bitrate[i] == 0)
- break;
- }
- range->num_bitrates = i;
-
- // Set an indication of the max TCP throughput
- // in bit/s that we can expect using this interface.
- // May be use for QoS stuff... Jean II
- if (i > 2)
- range->throughput = 5 * 1000 * 1000;
- else
- range->throughput = 1.5 * 1000 * 1000;
-
- range->min_rts = 0;
- range->max_rts = 2312;
- range->min_frag = 256;
- range->max_frag = 2312;
-
- // the encoding capabilities
- range->num_encoding_sizes = 3;
- // 64(40) bits WEP
- range->encoding_size[0] = 5;
- // 128(104) bits WEP
- range->encoding_size[1] = 13;
- // 256 bits for WPA-PSK
- range->encoding_size[2] = 32;
- // 4 keys are allowed
- range->max_encoding_tokens = 4;
-
- range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
-
- range->min_pmp = 0;
- range->max_pmp = 1000000;// 1 secs
- range->min_pmt = 0;
- range->max_pmt = 1000000;// 1 secs
- range->pmp_flags = IW_POWER_PERIOD;
- range->pmt_flags = IW_POWER_TIMEOUT;
- range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
-
- // Transmit Power - values are in mW
-
- range->txpower[0] = 100;
- range->num_txpower = 1;
- range->txpower_capa = IW_TXPOW_MWATT;
- range->we_version_source = SUPPORTED_WIRELESS_EXT;
- range->we_version_compiled = WIRELESS_EXT;
- range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
- range->retry_flags = IW_RETRY_LIMIT;
- range->r_time_flags = IW_RETRY_LIFETIME;
- range->min_retry = 1;
- range->max_retry = 65535;
- range->min_r_time = 1024;
- range->max_r_time = 65535 * 1024;
- // Experimental measurements - boundary 11/5.5 Mb/s
- // Note : with or without the (local->rssi), results
- // are somewhat different. - Jean II
- range->avg_qual.qual = 6;
- range->avg_qual.level = 176; // -80 dBm
- range->avg_qual.noise = 0;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : set ap mac address
- */
-
-int iwctl_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
- unsigned char ZeroBSSID[WLAN_BSSID_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- pr_debug(" SIOCSIWAP\n");
- if (pMgmt->eScanState == WMAC_IS_SCANNING) {
- // In scanning..
- pr_debug("SIOCSIWAP(??)-->In scanning..\n");
- }
- if (wrq->sa_family != ARPHRD_ETHER)
- rc = -EINVAL;
- else {
- memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6);
- //2008-0409-05, <Add> by Einsn Liu
- if ((pDevice->bLinkPass == true) &&
- (memcmp(pMgmt->abyDesireBSSID, pMgmt->abyCurrBSSID, 6) == 0)) {
- return rc;
- }
- //mike :add
- if ((is_broadcast_ether_addr(pMgmt->abyDesireBSSID)) ||
- (memcmp(pMgmt->abyDesireBSSID, ZeroBSSID, 6) == 0)) {
- PRINT_K("SIOCSIWAP:invalid desired BSSID return!\n");
- return rc;
- }
- //mike add: if desired AP is hidden ssid(there are two same BSSID in list),
- // then ignore,because you don't known which one to be connect with??
- {
- unsigned int ii, uSameBssidNum = 0;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive &&
- ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
- pMgmt->abyDesireBSSID)) {
- uSameBssidNum++;
- }
- }
- if (uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!!
- PRINT_K("SIOCSIWAP:ignore for desired AP in hidden mode\n");
- return rc;
- }
- }
-
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- }
- return rc;
-}
-
-/*
- * Wireless Handler : get ap mac address
- */
-
-int iwctl_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWAP\n");
-
- memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
- //2008-0410,<Modify> by Einsn Liu
- if ((pDevice->bLinkPass == false) && (pMgmt->eCurrMode != WMAC_MODE_ESS_AP))
- memset(wrq->sa_data, 0, 6);
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)
- memcpy(wrq->sa_data, pMgmt->abyCurrBSSID, 6);
-
- wrq->sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-/*
- * Wireless Handler : get ap list
- */
-
-int iwctl_giwaplist(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- int ii, jj, rc = 0;
- struct sockaddr *sock = NULL;
- struct sockaddr *s = NULL;
- struct iw_quality *qual = NULL;
- struct iw_quality *q = NULL;
- PKnownBSS pBSS = NULL;
-
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWAPLIST\n");
-
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- goto exit;
- }
-
- if (!wrq->pointer)
- goto exit;
-
- sock = kmalloc_array(IW_MAX_AP, sizeof(struct sockaddr), GFP_KERNEL);
- if (!sock) {
- rc = -ENOMEM;
- goto exit;
- }
-
- qual = kmalloc_array(IW_MAX_AP, sizeof(struct iw_quality), GFP_KERNEL);
- if (!qual) {
- rc = -ENOMEM;
- goto exit;
- }
-
- for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
-
- if (!pBSS->bActive)
- continue;
- if (jj >= IW_MAX_AP)
- break;
-
- s = &sock[jj];
- q = &qual[jj];
-
- memcpy(s->sa_data, pBSS->abyBSSID, 6);
- s->sa_family = ARPHRD_ETHER;
- q->level = pBSS->uRSSI;
- q->qual = 0;
- q->noise = 0;
- q->updated = 2;
- jj++;
- }
-
- wrq->flags = 1; /* Should be define'd */
- wrq->length = jj;
- memcpy(extra, sock, sizeof(struct sockaddr) * jj);
- memcpy(extra + sizeof(struct sockaddr) * jj,
- qual,
- sizeof(struct iw_quality) * jj);
-exit:
- kfree(sock);
- kfree(qual);
- return rc;
-}
-
-/*
- * Wireless Handler : set essid
- */
-
-int iwctl_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
- //2008-0409-05, <Add> by Einsn Liu
- unsigned char len;
-
- pr_debug(" SIOCSIWESSID\n");
- pDevice->fWPA_Authened = false;
- if (pMgmt->eScanState == WMAC_IS_SCANNING) {
- // In scanning..
- pr_debug("SIOCSIWESSID(??)-->In scanning..\n");
- }
- // Check if we asked for `any'
- if (wrq->flags == 0) {
- // Just send an empty SSID list
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memset(pMgmt->abyDesireBSSID, 0xFF, 6);
- PRINT_K("set essid to 'any'\n");
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- return 0;
-#endif
- } else {
- // Set the SSID
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
-
- memcpy(pItemSSID->abySSID, extra, wrq->length);
- if (pItemSSID->abySSID[wrq->length - 1] == '\0') {
- if (wrq->length > 0)
- pItemSSID->len = wrq->length - 1;
- } else
- pItemSSID->len = wrq->length;
- pr_debug("set essid to %s\n", pItemSSID->abySSID);
- //2008-0409-05, <Add> by Einsn Liu
- len = (pItemSSID->len > ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) ? pItemSSID->len : ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len;
- if ((pDevice->bLinkPass == true) &&
- (memcmp(pItemSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID, len) == 0))
- return 0;
-
- //mike:need clear desiredBSSID
- if (pItemSSID->len == 0) {
- memset(pMgmt->abyDesireBSSID, 0xFF, 6);
- return 0;
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- //Wext wil order another command of siwap to link with desired AP,
- //so here need not associate??
- if (pDevice->bWPASuppWextEnabled == true) {
- /*******search if in hidden ssid mode ****/
- {
- PKnownBSS pCurr = NULL;
- unsigned char abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned int ii, uSameBssidNum = 0;
-
- memcpy(abyTmpDesireSSID, pMgmt->abyDesireSSID, sizeof(abyTmpDesireSSID));
- pCurr = BSSpSearchBSSList(pDevice,
- NULL,
- abyTmpDesireSSID,
- pMgmt->eConfigPHYMode
-);
-
- if (pCurr == NULL) {
- PRINT_K("SIOCSIWESSID:hidden ssid site survey before associate.......\n");
- vResetCommandTimer((void *)pDevice);
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
- } else { //mike:to find out if that desired SSID is a hidden-ssid AP ,
- // by means of judging if there are two same BSSID exist in list ?
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive &&
- ether_addr_equal(pMgmt->sBSSList[ii].abyBSSID,
- pCurr->abyBSSID)) {
- uSameBssidNum++;
- }
- }
- if (uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!!
- pr_debug("SIOCSIWESSID:hidden ssid directly associate.......\n");
- vResetCommandTimer((void *)pDevice);
- pMgmt->eScanType = WMAC_SCAN_PASSIVE; //this scan type,you'll submit scan result!
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
- }
- }
- }
- return 0;
- }
-#endif
-
- pr_debug("set essid = %s\n", pItemSSID->abySSID);
- }
-
- if (pDevice->flags & DEVICE_FLAGS_OPENED)
- pDevice->bCommit = true;
-
- return 0;
-}
-
-/*
- * Wireless Handler : get essid
- */
-
-int iwctl_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- PWLAN_IE_SSID pItemSSID;
-
- pr_debug(" SIOCGIWESSID\n");
-
- // Note : if wrq->u.data.flags != 0, we should
- // get the relevant SSID from the SSID list...
-
- // Get the current SSID
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- memcpy(extra, pItemSSID->abySSID , pItemSSID->len);
- extra[pItemSSID->len] = '\0';
- wrq->length = pItemSSID->len + 1;
- //2008-0409-03, <Add> by Einsn Liu
- wrq->length = pItemSSID->len;
- wrq->flags = 1; // active
-
- return 0;
-}
-
-/*
- * Wireless Handler : set data rate
- */
-
-int iwctl_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
- u8 brate = 0;
- int i;
- unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
-
- pr_debug(" SIOCSIWRATE\n");
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
- rc = -EINVAL;
- return rc;
- }
-
- // First : get a valid bit rate value
-
- // Which type of value
- if ((wrq->value < 13) &&
- (wrq->value >= 0)) {
- // Setting by rate index
- // Find value in the magic rate table
- brate = wrq->value;
- } else {
- // Setting by frequency value
- u8 normvalue = (u8) (wrq->value/500000);
-
- // Check if rate is valid
- for (i = 0; i < 13; i++) {
- if (normvalue == abySupportedRates[i]) {
- brate = i;
- break;
- }
- }
- }
- // -1 designed the max rate (mostly auto mode)
- if (wrq->value == -1) {
- // Get the highest available rate
- for (i = 0; i < 13; i++) {
- if (abySupportedRates[i] == 0)
- break;
- }
- if (i != 0)
- brate = i - 1;
-
- }
- // Check that it is valid
- // brate is index of abySupportedRates[]
- if (brate > 13) {
- rc = -EINVAL;
- return rc;
- }
-
- // Now, check if we want a fixed or auto value
- if (wrq->fixed != 0) {
- // Fixed mode
- // One rate, fixed
- pr_debug("Rate Fix\n");
- pDevice->bFixRate = true;
- if ((pDevice->byBBType == BB_TYPE_11B) && (brate > 3)) {
- pDevice->uConnectionRate = 3;
- } else {
- pDevice->uConnectionRate = brate;
- pr_debug("Fixed to Rate %d\n",
- pDevice->uConnectionRate);
- }
-
- } else {
- pDevice->bFixRate = false;
- pDevice->uConnectionRate = 13;
- pr_debug("auto rate:connection_rate is 13\n");
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get data rate
- */
-
-int iwctl_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
-//2007-0118-05,<Mark> by EinsnLiu
-//Mark the unnecessary sentences.
-// PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pr_debug(" SIOCGIWRATE\n");
- {
- unsigned char abySupportedRates[13] = {0x02, 0x04, 0x0B, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90};
- int brate = 0;
-//2008-5-8 <modify> by chester
- if (pDevice->bLinkPass) {
- if (pDevice->bFixRate == true) {
- if (pDevice->uConnectionRate < 13) {
- brate = abySupportedRates[pDevice->uConnectionRate];
- } else {
- if (pDevice->byBBType == BB_TYPE_11B)
- brate = 0x16;
- if (pDevice->byBBType == BB_TYPE_11G)
- brate = 0x6C;
- if (pDevice->byBBType == BB_TYPE_11A)
- brate = 0x6C;
- }
- } else {
- brate = abySupportedRates[TxRate_iwconfig];
- }
- } else brate = 0;
-
- wrq->value = brate * 500000;
- // If more than one rate, set auto
- if (pDevice->bFixRate == true)
- wrq->fixed = true;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : set rts threshold
- */
-
-int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
-
- pr_debug(" SIOCSIWRTS\n");
-
- {
- int rthr = wrq->value;
-
- if (wrq->disabled)
- rthr = 2312;
-
- if ((rthr < 0) || (rthr > 2312))
- rc = -EINVAL;
- else
- pDevice->wRTSThreshold = rthr;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : get rts
- */
-
-int iwctl_giwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
-
- pr_debug(" SIOCGIWRTS\n");
- wrq->value = pDevice->wRTSThreshold;
- wrq->disabled = (wrq->value >= 2312);
- wrq->fixed = 1;
-
- return 0;
-}
-
-/*
- * Wireless Handler : set fragment threshold
- */
-
-int iwctl_siwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
- int fthr = wrq->value;
-
- pr_debug(" SIOCSIWFRAG\n");
-
- if (wrq->disabled)
- fthr = 2312;
- if ((fthr < 256) || (fthr > 2312)) {
- rc = -EINVAL;
- } else {
- fthr &= ~0x1; // Get an even value
- pDevice->wFragmentationThreshold = (u16)fthr;
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get fragment threshold
- */
-
-int iwctl_giwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
-
- pr_debug(" SIOCGIWFRAG\n");
- wrq->value = pDevice->wFragmentationThreshold;
- wrq->disabled = (wrq->value >= 2312);
- wrq->fixed = 1;
-
- return 0;
-}
-
-/*
- * Wireless Handler : set retry threshold
- */
-int iwctl_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- int rc = 0;
-
- pr_debug(" SIOCSIWRETRY\n");
-
- if (wrq->disabled) {
- rc = -EINVAL;
- return rc;
- }
-
- if (wrq->flags & IW_RETRY_LIMIT) {
- if (wrq->flags & IW_RETRY_MAX)
- pDevice->byLongRetryLimit = wrq->value;
- else if (wrq->flags & IW_RETRY_MIN)
- pDevice->byShortRetryLimit = wrq->value;
- else {
- // No modifier : set both
- pDevice->byShortRetryLimit = wrq->value;
- pDevice->byLongRetryLimit = wrq->value;
- }
- }
- if (wrq->flags & IW_RETRY_LIFETIME)
- pDevice->wMaxTransmitMSDULifetime = wrq->value;
-
- return rc;
-}
-
-/*
- * Wireless Handler : get retry threshold
- */
-int iwctl_giwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
-
- pr_debug(" SIOCGIWRETRY\n");
- wrq->disabled = 0; // Can't be disabled
-
- // Note : by default, display the min retry number
- if ((wrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- wrq->flags = IW_RETRY_LIFETIME;
- wrq->value = (int)pDevice->wMaxTransmitMSDULifetime; //ms
- } else if ((wrq->flags & IW_RETRY_MAX)) {
- wrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
- wrq->value = (int)pDevice->byLongRetryLimit;
- } else {
- wrq->flags = IW_RETRY_LIMIT;
- wrq->value = (int)pDevice->byShortRetryLimit;
- if ((int)pDevice->byShortRetryLimit != (int)pDevice->byLongRetryLimit)
- wrq->flags |= IW_RETRY_MIN;
- }
-
- return 0;
-}
-
-/*
- * Wireless Handler : set encode mode
- */
-int iwctl_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- unsigned long dwKeyIndex = (unsigned long)(wrq->flags & IW_ENCODE_INDEX);
- int ii, uu, rc = 0;
- int index = (wrq->flags & IW_ENCODE_INDEX);
-
-//2007-0207-07,<Modify> by EinsnLiu
-//There are some problems when using iwconfig encode/key command to set the WEP key.
-//I almost rewrite this function.
-//now it support:(assume the wireless interface's name is eth0)
-//iwconfig eth0 key [1] 1122334455 open /*set key stirng to index 1,and driver using key index is set to 1*/
-//iwconfig eth0 key [3] /*set driver using key index to 3,the key string no change */
-//iwconfig eth0 key 1122334455 /*set key string to driver using index*/
-//iwconfig eth0 key restricted /*enable share key*/
-
- PSKeyTable pkeytab;
-
- pr_debug(" SIOCSIWENCODE\n");
-
- if ((wrq->flags & IW_ENCODE_DISABLED) == 0) {
- //Not disable encryption
-
- if (dwKeyIndex > WLAN_WEP_NKEYS) {
- rc = -EINVAL;
- return rc;
- }
-
- if (dwKeyIndex < 1 && ((wrq->flags & IW_ENCODE_NOKEY) == 0)) {//set default key
- if (pDevice->byKeyIndex < WLAN_WEP_NKEYS)
- dwKeyIndex = pDevice->byKeyIndex;
- else
- dwKeyIndex = 0;
- } else {
- dwKeyIndex--;
- }
-
- // Check the size of the key
- if (wrq->length > WLAN_WEP232_KEYLEN) {
- rc = -EINVAL;
- return rc;
- }
-
- if (wrq->length > 0) {//have key
-
- if (wrq->length == WLAN_WEP232_KEYLEN) {
- pr_debug("Set 232 bit wep key\n");
- } else if (wrq->length == WLAN_WEP104_KEYLEN) {
- pr_debug("Set 104 bit wep key\n");
- } else if (wrq->length == WLAN_WEP40_KEYLEN) {
- pr_debug("Set 40 bit wep key, index= %d\n",
- (int)dwKeyIndex);
- } else {//no support length
- rc = -EINVAL;
- return rc;
- }
- memset(pDevice->abyKey, 0, WLAN_WEP232_KEYLEN);
- memcpy(pDevice->abyKey, extra, wrq->length);
-
- pr_debug("abyKey: ");
- for (ii = 0; ii < wrq->length; ii++)
- pr_debug("%02x ", pDevice->abyKey[ii]);
-
- if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- spin_lock_irq(&pDevice->lock);
- KeybSetDefaultKey(&(pDevice->sKey),
- (unsigned long)(dwKeyIndex | (1 << 31)),
- wrq->length,
- NULL,
- pDevice->abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID
-);
- spin_unlock_irq(&pDevice->lock);
- }
- pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
- pDevice->uKeyLength = wrq->length;
- pDevice->bTransmitKey = true;
- pDevice->bEncryptionEnable = true;
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
-
- } else if (index > 0) {
- //when the length is 0 the request only changes the default transmit key index
- //check the new key if it has a non zero length
- if (pDevice->bEncryptionEnable == false) {
- rc = -EINVAL;
- return rc;
- }
- pr_debug("Just set Default key Index:\n");
- pkeytab = &(pDevice->sKey.KeyTable[MAX_KEY_TABLE - 1]);
- if (pkeytab->GroupKey[(unsigned char)dwKeyIndex].uKeyLength == 0) {
- pr_debug("Default key len is 0\n");
- rc = -EINVAL;
- return rc;
- }
- pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
- pkeytab->dwGTKeyIndex = dwKeyIndex | (1 << 31);
- pkeytab->GroupKey[(unsigned char)dwKeyIndex].dwKeyIndex = dwKeyIndex | (1 << 31);
- }
-
- } else {//disable the key
- pr_debug("Disable WEP function\n");
- if (pDevice->bEncryptionEnable == false)
- return 0;
- pMgmt->bShareKeyAlgorithm = false;
- pDevice->bEncryptionEnable = false;
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- if (pDevice->flags & DEVICE_FLAGS_OPENED) {
- spin_lock_irq(&pDevice->lock);
- for (uu = 0; uu < MAX_KEY_TABLE; uu++)
- MACvDisableKeyEntry(pDevice->PortOffset, uu);
- spin_unlock_irq(&pDevice->lock);
- }
- }
-//End Modify,Einsn
-
- if (wrq->flags & IW_ENCODE_RESTRICTED) {
- pr_debug("Enable WEP & ShareKey System\n");
- pMgmt->bShareKeyAlgorithm = true;
- }
- if (wrq->flags & IW_ENCODE_OPEN) {
- pr_debug("Enable WEP & Open System\n");
- pMgmt->bShareKeyAlgorithm = false;
- }
- return rc;
-}
-
-int iwctl_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- char abyKey[WLAN_WEP232_KEYLEN];
-
- unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
- PSKeyItem pKey = NULL;
-
- pr_debug(" SIOCGIWENCODE\n");
-
- if (index > WLAN_WEP_NKEYS)
- return -EINVAL;
-
- if (index < 1) {//get default key
- if (pDevice->byKeyIndex < WLAN_WEP_NKEYS)
- index = pDevice->byKeyIndex;
- else
- index = 0;
- } else {
- index--;
- }
-
- memset(abyKey, 0, WLAN_WEP232_KEYLEN);
- // Check encryption mode
- wrq->flags = IW_ENCODE_NOKEY;
- // Is WEP enabled ???
- if (pDevice->bEncryptionEnable)
- wrq->flags |= IW_ENCODE_ENABLED;
- else
- wrq->flags |= IW_ENCODE_DISABLED;
-
- if (pMgmt->bShareKeyAlgorithm)
- wrq->flags |= IW_ENCODE_RESTRICTED;
- else
- wrq->flags |= IW_ENCODE_OPEN;
- wrq->length = 0;
-
- if ((index == 0) && (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled ||
- pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)) {//get wpa pairwise key
- if (KeybGetKey(&(pDevice->sKey), pMgmt->abyCurrBSSID, 0xffffffff, &pKey)) {
- wrq->length = pKey->uKeyLength;
- memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
- memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
- }
- } else if (KeybGetKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, (unsigned char)index , &pKey)) {
- wrq->length = pKey->uKeyLength;
- memcpy(abyKey, pKey->abyKey, pKey->uKeyLength);
- memcpy(extra, abyKey, WLAN_WEP232_KEYLEN);
- }
-
- wrq->flags |= index+1;
-
- return 0;
-}
-
-/*
- * Wireless Handler : set power mode
- */
-int iwctl_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int rc = 0;
-
- pr_debug(" SIOCSIWPOWER\n");
-
- if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
- rc = -EINVAL;
- return rc;
- }
-
- if (wrq->disabled) {
- pDevice->ePSMode = WMAC_POWER_CAM;
- PSvDisablePowerSaving(pDevice);
- return rc;
- }
- if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- pDevice->ePSMode = WMAC_POWER_FAST;
- PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
-
- } else if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
- pDevice->ePSMode = WMAC_POWER_FAST;
- PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
- }
- switch (wrq->flags & IW_POWER_MODE) {
- case IW_POWER_UNICAST_R:
- pr_debug(" SIOCSIWPOWER: IW_POWER_UNICAST_R\n");
- rc = -EINVAL;
- break;
- case IW_POWER_ALL_R:
- pr_debug(" SIOCSIWPOWER: IW_POWER_ALL_R\n");
- rc = -EINVAL;
- case IW_POWER_ON:
- pr_debug(" SIOCSIWPOWER: IW_POWER_ON\n");
- break;
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-/*
- * Wireless Handler : get power mode
- */
-int iwctl_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int mode = pDevice->ePSMode;
-
- pr_debug(" SIOCGIWPOWER\n");
-
- wrq->disabled = (mode == WMAC_POWER_CAM);
- if (wrq->disabled)
- return 0;
-
- if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- wrq->value = (int)((pMgmt->wListenInterval * pMgmt->wCurrBeaconPeriod) << 10);
- wrq->flags = IW_POWER_TIMEOUT;
- } else {
- wrq->value = (int)((pMgmt->wListenInterval * pMgmt->wCurrBeaconPeriod) << 10);
- wrq->flags = IW_POWER_PERIOD;
- }
- wrq->flags |= IW_POWER_ALL_R;
-
- return 0;
-}
-
-/*
- * Wireless Handler : get Sensitivity
- */
-int iwctl_giwsens(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- long ldBm;
-
- pr_debug(" SIOCGIWSENS\n");
- if (pDevice->bLinkPass == true) {
- RFvRSSITodBm(pDevice, (unsigned char)(pDevice->uCurrRSSI), &ldBm);
- wrq->value = ldBm;
- } else {
- wrq->value = 0;
- }
- wrq->disabled = (wrq->value == 0);
- wrq->fixed = 1;
-
- return 0;
-}
-
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-
-int iwctl_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- static int wpa_version = 0; //must be static to save the last value,einsn liu
- static int pairwise = 0;
-
- pr_debug(" SIOCSIWAUTH\n");
- switch (wrq->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- wpa_version = wrq->value;
- if (wrq->value == IW_AUTH_WPA_VERSION_DISABLED)
- PRINT_K("iwctl_siwauth:set WPADEV to disable at 1??????\n");
- else if (wrq->value == IW_AUTH_WPA_VERSION_WPA)
- PRINT_K("iwctl_siwauth:set WPADEV to WPA1******\n");
- else
- PRINT_K("iwctl_siwauth:set WPADEV to WPA2******\n");
-
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- pairwise = wrq->value;
- if (pairwise == IW_AUTH_CIPHER_CCMP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- else if (pairwise == IW_AUTH_CIPHER_TKIP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- else if (pairwise == IW_AUTH_CIPHER_WEP40 || pairwise == IW_AUTH_CIPHER_WEP104)
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- else if (pairwise == IW_AUTH_CIPHER_NONE)
- ; /* do nothing,einsn liu */
- else
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
-
- break;
- case IW_AUTH_CIPHER_GROUP:
- if (wpa_version == IW_AUTH_WPA_VERSION_DISABLED)
- break;
- if (pairwise == IW_AUTH_CIPHER_NONE) {
- if (wrq->value == IW_AUTH_CIPHER_CCMP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- else
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- }
- break;
- case IW_AUTH_KEY_MGMT:
-
- if (wpa_version == IW_AUTH_WPA_VERSION_WPA2) {
- if (wrq->value == IW_AUTH_KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
- } else if (wpa_version == IW_AUTH_WPA_VERSION_WPA) {
- if (wrq->value == 0)
- pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
- else if (wrq->value == IW_AUTH_KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA;
- }
-
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- break; /* FIXME */
- case IW_AUTH_DROP_UNENCRYPTED:
- break;
- case IW_AUTH_80211_AUTH_ALG:
- if (wrq->value == IW_AUTH_ALG_OPEN_SYSTEM)
- pMgmt->bShareKeyAlgorithm = false;
- else if (wrq->value == IW_AUTH_ALG_SHARED_KEY)
- pMgmt->bShareKeyAlgorithm = true;
-
- break;
- case IW_AUTH_WPA_ENABLED:
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- break;
- case IW_AUTH_ROAMING_CONTROL:
- ret = -EOPNOTSUPP;
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- pDevice->bEncryptionEnable = !!wrq->value;
- if (pDevice->bEncryptionEnable == false) {
- wpa_version = 0;
- pairwise = 0;
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pMgmt->bShareKeyAlgorithm = false;
- pMgmt->eAuthenMode = false;
- }
-
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-int iwctl_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- return -EOPNOTSUPP;
-}
-
-int iwctl_siwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- char length;
-
- if (wrq->length) {
- if (wrq->length < 2)
- return -EINVAL;
-
- ret = get_user(length, extra + 1);
- if (ret)
- return ret;
-
- if (length + 2 != wrq->length)
- return -EINVAL;
-
- if (wrq->length > MAX_WPA_IE_LEN) {
- ret = -ENOMEM;
- goto out;
- }
- memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
- if (copy_from_user(pMgmt->abyWPAIE, extra, wrq->length)) {
- ret = -EFAULT;
- goto out;
- }
- pMgmt->wWPAIELen = wrq->length;
- } else {
- memset(pMgmt->abyWPAIE, 0, MAX_WPA_IE_LEN);
- pMgmt->wWPAIELen = 0;
- }
-
-out://not completely ...not necessary in wpa_supplicant 0.5.8
- return ret;
-}
-
-int iwctl_giwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- int ret = 0;
- int space = wrq->length;
-
- wrq->length = 0;
- if (pMgmt->wWPAIELen > 0) {
- wrq->length = pMgmt->wWPAIELen;
- if (pMgmt->wWPAIELen <= space) {
- if (copy_to_user(extra, pMgmt->abyWPAIE, pMgmt->wWPAIELen))
- ret = -EFAULT;
-
- } else {
- ret = -E2BIG;
- }
- }
-
- return ret;
-}
-
-int iwctl_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct viawget_wpa_param *param = NULL;
-//original member
- enum wpa_alg alg_name;
- u8 addr[6];
- int key_idx, set_tx = 0;
- u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
- u8 key[64];
- size_t seq_len = 0, key_len = 0;
-
- u8 key_array[64];
- int ret = 0;
-
- PRINT_K("SIOCSIWENCODEEXT......\n");
-
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
-//recover alg_name
- switch (ext->alg) {
- case IW_ENCODE_ALG_NONE:
- alg_name = WPA_ALG_NONE;
- break;
- case IW_ENCODE_ALG_WEP:
- alg_name = WPA_ALG_WEP;
- break;
- case IW_ENCODE_ALG_TKIP:
- alg_name = WPA_ALG_TKIP;
- break;
- case IW_ENCODE_ALG_CCMP:
- alg_name = WPA_ALG_CCMP;
- break;
- default:
- PRINT_K("Unknown alg = %d\n", ext->alg);
- ret = -ENOMEM;
- goto error;
- }
-//recover addr
- memcpy(addr, ext->addr.sa_data, ETH_ALEN);
-//recover key_idx
- key_idx = (wrq->flags&IW_ENCODE_INDEX) - 1;
-//recover set_tx
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
- set_tx = 1;
-//recover seq,seq_len
- if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
- seq_len = IW_ENCODE_SEQ_MAX_SIZE;
- memcpy(seq, ext->rx_seq, seq_len);
- }
-//recover key,key_len
- if (ext->key_len) {
- key_len = ext->key_len;
- memcpy(key, &ext->key[0], key_len);
- }
-
- memset(key_array, 0, 64);
- if (key_len > 0) {
- memcpy(key_array, key, key_len);
- if (key_len == 32) {
- // notice ! the oder
- memcpy(&key_array[16], &key[24], 8);
- memcpy(&key_array[24], &key[16], 8);
- }
- }
-
-/**************Translate iw_encode_ext to viawget_wpa_param****************/
- memcpy(param->addr, addr, ETH_ALEN);
- param->u.wpa_key.alg_name = (int)alg_name;
- param->u.wpa_key.set_tx = set_tx;
- param->u.wpa_key.key_index = key_idx;
- param->u.wpa_key.key_len = key_len;
- param->u.wpa_key.key = (u8 *)key_array;
- param->u.wpa_key.seq = (u8 *)seq;
- param->u.wpa_key.seq_len = seq_len;
-
-//****set if current action is Network Manager count??
-//****this method is so foolish,but there is no other way???
- if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
- if (param->u.wpa_key.key_index == 0)
- pDevice->bwextcount++;
-
- if ((pDevice->bwextcount == 1) && (param->u.wpa_key.key_index == 1))
- pDevice->bwextcount++;
-
- if ((pDevice->bwextcount == 2) && (param->u.wpa_key.key_index == 2))
- pDevice->bwextcount++;
-
- if ((pDevice->bwextcount == 3) && (param->u.wpa_key.key_index == 3))
- pDevice->bwextcount++;
-
- }
- if (pDevice->bwextcount == 4) {
- pr_debug("SIOCSIWENCODEEXT:Enable WPA WEXT SUPPORT!!!!!\n");
- pDevice->bwextcount = 0;
- pDevice->bWPASuppWextEnabled = true;
- }
-//******
-
- spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, true);
- spin_unlock_irq(&pDevice->lock);
-
-error:
- kfree(param);
- return ret;
-}
-
-int iwctl_giwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra)
-{
- return -EOPNOTSUPP;
-}
-
-int iwctl_siwmlme(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra)
-{
- struct vnt_private *pDevice = netdev_priv(dev);
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- struct iw_mlme mime;
-
- int ret = 0;
-
- ret = copy_from_user(&mime, extra, sizeof(mime));
- if (ret)
- return -EFAULT;
-
- if (memcmp(pMgmt->abyCurrBSSID, mime.addr.sa_data, ETH_ALEN)) {
- ret = -EINVAL;
- return ret;
- }
- switch (mime.cmd) {
- case IW_MLME_DEAUTH:
- //this command seems to be not complete,please test it --einsnliu
- //bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (unsigned char *)&reason);
- break;
- case IW_MLME_DISASSOC:
- if (pDevice->bLinkPass == true) {
- pr_debug("iwctl_siwmlme--->send DISASSOCIATE\n");
- //clear related flags
- memset(pMgmt->abyDesireBSSID, 0xFF, 6);
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
- bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- }
- break;
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-#endif
-
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const iw_handler iwctl_handler[] =
-{
- (iw_handler) iwctl_commit, // SIOCSIWCOMMIT
- (iw_handler) NULL, // SIOCGIWNAME
- (iw_handler) NULL, // SIOCSIWNWID
- (iw_handler) NULL, // SIOCGIWNWID
- (iw_handler) NULL, // SIOCSIWFREQ
- (iw_handler) NULL, // SIOCGIWFREQ
- (iw_handler) NULL, // SIOCSIWMODE
- (iw_handler) NULL, // SIOCGIWMODE
- (iw_handler) NULL, // SIOCSIWSENS
- (iw_handler) NULL, // SIOCGIWSENS
- (iw_handler) NULL, // SIOCSIWRANGE
- (iw_handler) iwctl_giwrange, // SIOCGIWRANGE
- (iw_handler) NULL, // SIOCSIWPRIV
- (iw_handler) NULL, // SIOCGIWPRIV
- (iw_handler) NULL, // SIOCSIWSTATS
- (iw_handler) NULL, // SIOCGIWSTATS
- (iw_handler) NULL, // SIOCSIWSPY
- (iw_handler) NULL, // SIOCGIWSPY
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // SIOCSIWAP
- (iw_handler) NULL, // SIOCGIWAP
- (iw_handler) NULL, // -- hole -- 0x16
- (iw_handler) NULL, // SIOCGIWAPLIST
- (iw_handler) iwctl_siwscan, // SIOCSIWSCAN
- (iw_handler) iwctl_giwscan, // SIOCGIWSCAN
- (iw_handler) NULL, // SIOCSIWESSID
- (iw_handler) NULL, // SIOCGIWESSID
- (iw_handler) NULL, // SIOCSIWNICKN
- (iw_handler) NULL, // SIOCGIWNICKN
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // SIOCSIWRATE 0x20
- (iw_handler) NULL, // SIOCGIWRATE
- (iw_handler) NULL, // SIOCSIWRTS
- (iw_handler) NULL, // SIOCGIWRTS
- (iw_handler) NULL, // SIOCSIWFRAG
- (iw_handler) NULL, // SIOCGIWFRAG
- (iw_handler) NULL, // SIOCSIWTXPOW
- (iw_handler) NULL, // SIOCGIWTXPOW
- (iw_handler) NULL, // SIOCSIWRETRY
- (iw_handler) NULL, // SIOCGIWRETRY
- (iw_handler) NULL, // SIOCSIWENCODE
- (iw_handler) NULL, // SIOCGIWENCODE
- (iw_handler) NULL, // SIOCSIWPOWER
- (iw_handler) NULL, // SIOCGIWPOWER
-
-//2008-0409-07, <Add> by Einsn Liu
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // -- hole --
- (iw_handler) NULL, // SIOCSIWGENIE
- (iw_handler) NULL, // SIOCGIWGENIE
- (iw_handler) NULL, // SIOCSIWAUTH
- (iw_handler) NULL, // SIOCGIWAUTH
- (iw_handler) NULL, // SIOCSIWENCODEEXT
- (iw_handler) NULL, // SIOCGIWENCODEEXT
- (iw_handler) NULL, // SIOCSIWPMKSA
- (iw_handler) NULL, // -- hole --
-};
-
-static const iw_handler iwctl_private_handler[] =
-{
- NULL, // SIOCIWFIRSTPRIV
-};
-
-struct iw_priv_args iwctl_private_args[] = {
- { IOCTL_CMD_SET,
- IW_PRIV_TYPE_CHAR | 1024, 0,
- "set"},
-};
-
-const struct iw_handler_def iwctl_handler_def =
-{
- .get_wireless_stats = &iwctl_get_wireless_stats,
- .num_standard = sizeof(iwctl_handler)/sizeof(iw_handler),
- .num_private = 0,
- .num_private_args = 0,
- .standard = (iw_handler *)iwctl_handler,
- .private = NULL,
- .private_args = NULL,
-};
diff --git a/drivers/staging/vt6655/iwctl.h b/drivers/staging/vt6655/iwctl.h
deleted file mode 100644
index 7dd63102182d..000000000000
--- a/drivers/staging/vt6655/iwctl.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: iwctl.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: May 21, 2004
- *
- */
-
-#ifndef __IWCTL_H__
-#define __IWCTL_H__
-
-#include "device.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-struct iw_statistics *iwctl_get_wireless_stats(struct net_device *dev);
-
-int iwctl_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *wrq,
- char *extra);
-
-int iwctl_giwrange(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_giwmode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *wmode,
- char *extra);
-
-int iwctl_siwmode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *wmode,
- char *extra);
-
-int iwctl_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wrq,
- char *extra);
-
-int iwctl_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wrq,
- char *extra);
-
-int iwctl_giwname(struct net_device *dev,
- struct iw_request_info *info,
- char *wrq,
- char *extra);
-
-int iwctl_giwsens(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *wrq,
- char *extra);
-
-int iwctl_giwaplist(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-int iwctl_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra);
-
-int iwctl_siwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra);
-
-int iwctl_giwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra);
-
-int iwctl_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_giwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char *extra);
-
-int iwctl_siwmlme(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrq,
- char __user *extra);
-#endif // #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-//End Add -- //2008-0409-07, <Add> by Einsn Liu
-
-extern const struct iw_handler_def iwctl_handler_def;
-extern struct iw_priv_args iwctl_private_args[];
-
-#endif // __IWCTL_H__
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index 211afae306c7..f2b3fea90533 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -25,770 +25,144 @@
*
* Date: May 29, 2003
*
- * Functions:
- * KeyvInitTable - Init Key management table
- * KeybGetKey - Get Key from table
- * KeybSetKey - Set Key to table
- * KeybRemoveKey - Remove Key from table
- * KeybGetTransmitKey - Get Transmit Key from table
- *
- * Revision History:
- *
*/
#include "tmacro.h"
#include "key.h"
#include "mac.h"
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-static void
-s_vCheckKeyTableValid(PSKeyManagement pTable, void __iomem *dwIoBase)
-{
- int i;
-
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- !pTable->KeyTable[i].PairwiseKey.bKeyValid &&
- !pTable->KeyTable[i].GroupKey[0].bKeyValid &&
- !pTable->KeyTable[i].GroupKey[1].bKeyValid &&
- !pTable->KeyTable[i].GroupKey[2].bKeyValid &&
- !pTable->KeyTable[i].GroupKey[3].bKeyValid) {
- pTable->KeyTable[i].bInUse = false;
- pTable->KeyTable[i].wKeyCtl = 0;
- pTable->KeyTable[i].bSoftWEP = false;
- MACvDisableKeyEntry(dwIoBase, i);
- }
- }
-}
-
-/*--------------------- Export Functions --------------------------*/
-
-/*
- * Description: Init Key management table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void KeyvInitTable(PSKeyManagement pTable, void __iomem *dwIoBase)
-{
- int i;
- int jj;
-
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- pTable->KeyTable[i].bInUse = false;
- pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
- pTable->KeyTable[i].PairwiseKey.pvKeyTable = (void *)&pTable->KeyTable[i];
- for (jj = 0; jj < MAX_GROUP_KEY; jj++) {
- pTable->KeyTable[i].GroupKey[jj].bKeyValid = false;
- pTable->KeyTable[i].GroupKey[jj].pvKeyTable = (void *)&pTable->KeyTable[i];
- }
- pTable->KeyTable[i].wKeyCtl = 0;
- pTable->KeyTable[i].dwGTKeyIndex = 0;
- pTable->KeyTable[i].bSoftWEP = false;
- MACvDisableKeyEntry(dwIoBase, i);
- }
-}
-
-/*
- * Description: Get Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * dwKeyIndex - Key Index (0xFFFFFFFF means pairwise key)
- * Out:
- * pKey - Key return
- *
- * Return Value: true if found otherwise false
- *
- */
-bool KeybGetKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- PSKeyItem *pKey
-)
-{
- int i;
-
- pr_debug("KeybGetKey()\n");
-
- *pKey = NULL;
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- if (dwKeyIndex == 0xFFFFFFFF) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid) {
- *pKey = &(pTable->KeyTable[i].PairwiseKey);
- return true;
- } else {
- return false;
- }
- } else if (dwKeyIndex < MAX_GROUP_KEY) {
- if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid) {
- *pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex]);
- return true;
- } else {
- return false;
- }
- } else {
- return false;
- }
- }
- }
- return false;
-}
-
-/*
- * Description: Set Key to table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * dwKeyIndex - Key index (reference to NDIS DDK)
- * uKeyLength - Key length
- * KeyRSC - Key RSC
- * pbyKey - Pointer to key
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybSetKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-)
-{
- int i, j;
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
-
- pr_debug("Enter KeybSetKey: %lX\n", dwKeyIndex);
-
- j = (MAX_KEY_TABLE-1);
- for (i = 0; i < (MAX_KEY_TABLE - 1); i++) {
- if (!pTable->KeyTable[i].bInUse && (j == (MAX_KEY_TABLE-1))) {
- // found empty table
- j = i;
- }
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- // found table already exist
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- // Pairwise key
- pKey = &(pTable->KeyTable[i].PairwiseKey);
- pTable->KeyTable[i].wKeyCtl &= 0xFFF0; // clear pairwise key control filed
- pTable->KeyTable[i].wKeyCtl |= byKeyDecMode;
- uKeyIdx = 4; // use HW key entry 4 for pairwise key
- } else {
- // Group key
- if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return false;
- pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
- if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
- // Group transmit key
- pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
- pr_debug("Group transmit key(R)[%lX]: %d\n",
- pTable->KeyTable[i].dwGTKeyIndex, i);
- }
- pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
- pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
- pTable->KeyTable[i].wKeyCtl |= 0x0040; // use group key for group address
- uKeyIdx = (dwKeyIndex & 0x000000FF);
- }
- pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly
-
- pKey->bKeyValid = true;
- pKey->uKeyLength = uKeyLength;
- pKey->dwKeyIndex = dwKeyIndex;
- pKey->byCipherSuite = byKeyDecMode;
- memcpy(pKey->abyKey, pbyKey, uKeyLength);
- if (byKeyDecMode == KEY_CTL_WEP) {
- if (uKeyLength == WLAN_WEP40_KEYLEN)
- pKey->abyKey[15] &= 0x7F;
- if (uKeyLength == WLAN_WEP104_KEYLEN)
- pKey->abyKey[15] |= 0x80;
- }
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (u32 *)pKey->abyKey, byLocalID);
-
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- pKey->KeyRSC = 0;
- } else {
- pKey->KeyRSC = *pKeyRSC;
- }
- pKey->dwTSC47_16 = 0;
- pKey->wTSC15_0 = 0;
-
- pr_debug("KeybSetKey(R):\n");
- pr_debug("pKey->bKeyValid: %d\n ", pKey->bKeyValid);
- pr_debug("pKey->abyKey: ");
- for (ii = 0; ii < pKey->uKeyLength; ii++)
- pr_debug("%02x ", pKey->abyKey[ii]);
-
- pr_debug("\n");
-
- pr_debug("pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
- pr_debug("pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
- pr_debug("pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
-
- return true;
- }
- }
- if (j < (MAX_KEY_TABLE-1)) {
- memcpy(pTable->KeyTable[j].abyBSSID, pbyBSSID, ETH_ALEN);
- pTable->KeyTable[j].bInUse = true;
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- // Pairwise key
- pKey = &(pTable->KeyTable[j].PairwiseKey);
- pTable->KeyTable[j].wKeyCtl &= 0xFFF0; // clear pairwise key control filed
- pTable->KeyTable[j].wKeyCtl |= byKeyDecMode;
- uKeyIdx = 4; // use HW key entry 4 for pairwise key
- } else {
- // Group key
- if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return false;
- pKey = &(pTable->KeyTable[j].GroupKey[dwKeyIndex & 0x000000FF]);
- if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
- // Group transmit key
- pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex;
- pr_debug("Group transmit key(N)[%lX]: %d\n",
- pTable->KeyTable[j].dwGTKeyIndex, j);
- }
- pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed
- pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4);
- pTable->KeyTable[j].wKeyCtl |= 0x0040; // use group key for group address
- uKeyIdx = (dwKeyIndex & 0x000000FF);
- }
- pTable->KeyTable[j].wKeyCtl |= 0x8000; // enable on-fly
-
- pKey->bKeyValid = true;
- pKey->uKeyLength = uKeyLength;
- pKey->dwKeyIndex = dwKeyIndex;
- pKey->byCipherSuite = byKeyDecMode;
- memcpy(pKey->abyKey, pbyKey, uKeyLength);
- if (byKeyDecMode == KEY_CTL_WEP) {
- if (uKeyLength == WLAN_WEP40_KEYLEN)
- pKey->abyKey[15] &= 0x7F;
- if (uKeyLength == WLAN_WEP104_KEYLEN)
- pKey->abyKey[15] |= 0x80;
- }
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (u32 *)pKey->abyKey, byLocalID);
-
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- pKey->KeyRSC = 0;
- } else {
- pKey->KeyRSC = *pKeyRSC;
- }
- pKey->dwTSC47_16 = 0;
- pKey->wTSC15_0 = 0;
-
- pr_debug("KeybSetKey(N):\n");
- pr_debug("pKey->bKeyValid: %d\n ", pKey->bKeyValid);
- pr_debug("pKey->uKeyLength: %d\n ", (int)pKey->uKeyLength);
- pr_debug("pKey->abyKey: ");
- for (ii = 0; ii < pKey->uKeyLength; ii++)
- pr_debug("%02x ", pKey->abyKey[ii]);
-
- pr_debug("\n");
-
- pr_debug("pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
- pr_debug("pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
- pr_debug("pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
-
- return true;
- }
- return false;
-}
-
-/*
- * Description: Remove Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * dwKeyIndex - Key Index (reference to NDIS DDK)
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybRemoveKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- void __iomem *dwIoBase
-)
-{
- int i;
-
- if (is_broadcast_ether_addr(pbyBSSID)) {
- // delete all keys
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- for (i = 0; i < MAX_KEY_TABLE; i++)
- pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
-
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- } else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
- if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) {
- // remove Group transmit key
- pTable->KeyTable[i].dwGTKeyIndex = 0;
- }
- }
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- } else {
- return false;
- }
- }
-
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- } else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
- if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) {
- // remove Group transmit key
- pTable->KeyTable[i].dwGTKeyIndex = 0;
- }
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- } else {
- return false;
- }
- }
- }
- return false;
-}
-
-/*
- * Description: Remove Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybRemoveAllKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- void __iomem *dwIoBase
-)
+int vnt_key_init_table(struct vnt_private *priv)
{
- int i, u;
+ u32 i;
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- pTable->KeyTable[i].PairwiseKey.bKeyValid = false;
- for (u = 0; u < MAX_GROUP_KEY; u++)
- pTable->KeyTable[i].GroupKey[u].bKeyValid = false;
+ for (i = 0; i < MAX_KEY_TABLE; i++)
+ MACvDisableKeyEntry(priv->PortOffset, i);
- pTable->KeyTable[i].dwGTKeyIndex = 0;
- s_vCheckKeyTableValid(pTable, dwIoBase);
- return true;
- }
- }
- return false;
+ return 0;
}
-/*
- * Description: Remove WEP Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-void KeyvRemoveWEPKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- void __iomem *dwIoBase
-)
+static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
+ struct ieee80211_key_conf *key, u32 key_type, u32 mode,
+ bool onfly_latch)
{
- if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse) {
- if (pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].byCipherSuite == KEY_CTL_WEP) {
- pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = false;
- if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex & 0x7FFFFFFF)) {
- // remove Group transmit key
- pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = 0;
- }
+ struct vnt_private *priv = hw->priv;
+ u8 broadcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u16 key_mode = 0;
+ u32 entry = 0;
+ u8 *bssid;
+ u8 key_inx = key->keyidx;
+ u8 i;
+
+ if (mac_addr)
+ bssid = mac_addr;
+ else
+ bssid = &broadcast[0];
+
+ if (key_type != VNT_KEY_DEFAULTKEY) {
+ for (i = 0; i < (MAX_KEY_TABLE - 1); i++) {
+ if (!test_bit(i, &priv->key_entry_inuse)) {
+ set_bit(i, &priv->key_entry_inuse);
+
+ key->hw_key_idx = i;
+ entry = key->hw_key_idx;
+ break;
}
}
- s_vCheckKeyTableValid(pTable, dwIoBase);
}
-}
-void KeyvRemoveAllWEPKey(
- PSKeyManagement pTable,
- void __iomem *dwIoBase
-)
-{
- int i;
-
- for (i = 0; i < MAX_GROUP_KEY; i++)
- KeyvRemoveWEPKey(pTable, i, dwIoBase);
+ switch (key_type) {
+ /* fallthrough */
+ case VNT_KEY_DEFAULTKEY:
+ /* default key last entry */
+ entry = MAX_KEY_TABLE - 1;
+ key->hw_key_idx = entry;
+ case VNT_KEY_ALLGROUP:
+ key_mode |= VNT_KEY_ALLGROUP;
+ if (onfly_latch)
+ key_mode |= VNT_KEY_ONFLY_ALL;
+ case VNT_KEY_GROUP_ADDRESS:
+ key_mode |= mode;
+ case VNT_KEY_GROUP:
+ key_mode |= (mode << 4);
+ key_mode |= VNT_KEY_GROUP;
+ break;
+ case VNT_KEY_PAIRWISE:
+ key_mode |= mode;
+ key_inx = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (onfly_latch)
+ key_mode |= VNT_KEY_ONFLY;
+
+ if (mode == KEY_CTL_WEP) {
+ if (key->keylen == WLAN_KEY_LEN_WEP40)
+ key->key[15] &= 0x7f;
+ if (key->keylen == WLAN_KEY_LEN_WEP104)
+ key->key[15] |= 0x80;
+ }
+
+ MACvSetKeyEntry(priv->PortOffset, key_mode, entry, key_inx,
+ bssid, (u32 *)key->key, priv->byLocalID);
+
+ return 0;
}
-/*
- * Description: Get Transmit Key from table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * pbyBSSID - BSSID of Key
- * Out:
- * pKey - Key return
- *
- * Return Value: true if found otherwise false
- *
- */
-bool KeybGetTransmitKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyType,
- PSKeyItem *pKey
-)
+int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
{
- int i, ii;
-
- *pKey = NULL;
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- ether_addr_equal(pTable->KeyTable[i].abyBSSID, pbyBSSID)) {
- if (dwKeyType == PAIRWISE_KEY) {
- if (pTable->KeyTable[i].PairwiseKey.bKeyValid) {
- *pKey = &(pTable->KeyTable[i].PairwiseKey);
-
- pr_debug("KeybGetTransmitKey:");
- pr_debug("PAIRWISE_KEY: KeyTable.abyBSSID: ");
- for (ii = 0; ii < 6; ii++)
- pr_debug("%x ",
- pTable->KeyTable[i].abyBSSID[ii]);
-
- pr_debug("\n");
+ struct ieee80211_bss_conf *conf = &vif->bss_conf;
+ struct vnt_private *priv = hw->priv;
+ u8 *mac_addr = NULL;
+ u8 key_dec_mode = 0;
+ int ret = 0;
+ u32 u;
- return true;
- } else {
- pr_debug("PairwiseKey.bKeyValid == false\n");
- return false;
- }
- } // End of Type == PAIRWISE
- else {
- if (pTable->KeyTable[i].dwGTKeyIndex == 0) {
- pr_debug("ERROR: dwGTKeyIndex == 0 !!!\n");
- return false;
- }
- if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid) {
- *pKey = &(pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)]);
-
- pr_debug("KeybGetTransmitKey:");
- pr_debug("GROUP_KEY: KeyTable.abyBSSID\n");
- for (ii = 0; ii < 6; ii++)
- pr_debug("%x ",
- pTable->KeyTable[i].abyBSSID[ii]);
-
- pr_debug("\n");
- pr_debug("dwGTKeyIndex: %lX\n",
- pTable->KeyTable[i].dwGTKeyIndex);
-
- return true;
- } else {
- pr_debug("GroupKey.bKeyValid == false\n");
- return false;
- }
- } // End of Type = GROUP
- } // BSSID match
- }
- pr_debug("ERROR: NO Match BSSID !!! ");
- for (ii = 0; ii < 6; ii++)
- pr_debug("%02x ", *(pbyBSSID+ii));
-
- pr_debug("\n");
- return false;
-}
-
-/*
- * Description: Check Pairewise Key
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * Out:
- * none
- *
- * Return Value: true if found otherwise false
- *
- */
-bool KeybCheckPairewiseKey(
- PSKeyManagement pTable,
- PSKeyItem *pKey
-)
-{
- int i;
+ if (sta)
+ mac_addr = &sta->addr[0];
- *pKey = NULL;
- for (i = 0; i < MAX_KEY_TABLE; i++) {
- if (pTable->KeyTable[i].bInUse &&
- pTable->KeyTable[i].PairwiseKey.bKeyValid) {
- *pKey = &(pTable->KeyTable[i].PairwiseKey);
- return true;
- }
- }
- return false;
-}
+ switch (key->cipher) {
+ case 0:
+ for (u = 0 ; u < MAX_KEY_TABLE; u++)
+ MACvDisableKeyEntry(priv->PortOffset, u);
+ return ret;
-/*
- * Description: Set Key to table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * dwKeyIndex - Key index (reference to NDIS DDK)
- * uKeyLength - Key length
- * KeyRSC - Key RSC
- * pbyKey - Pointer to key
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybSetDefaultKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-)
-{
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ for (u = 0; u < MAX_KEY_TABLE; u++)
+ MACvDisableKeyEntry(priv->PortOffset, u);
- pr_debug("Enter KeybSetDefaultKey: %1x, %d\n",
- (int)dwKeyIndex, (int)uKeyLength);
+ vnt_set_keymode(hw, mac_addr,
+ key, VNT_KEY_DEFAULTKEY, KEY_CTL_WEP, true);
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) // Pairwise key
- return false;
- else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return false;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- if (uKeyLength > MAX_KEY_LEN)
- return false;
+ return ret;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- pTable->KeyTable[MAX_KEY_TABLE - 1].bInUse = true;
- for (ii = 0; ii < ETH_ALEN; ii++)
- pTable->KeyTable[MAX_KEY_TABLE - 1].abyBSSID[ii] = 0xFF;
+ key_dec_mode = KEY_CTL_TKIP;
- // Group key
- pKey = &(pTable->KeyTable[MAX_KEY_TABLE - 1].GroupKey[dwKeyIndex & 0x000000FF]);
- if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
- // Group transmit key
- pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex;
- pr_debug("Group transmit key(R)[%lX]: %d\n",
- pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex,
- MAX_KEY_TABLE-1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_dec_mode = KEY_CTL_CCMP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
}
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= (byKeyDecMode << 4);
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= (byKeyDecMode);
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x0044; // use group key for all address
- uKeyIdx = (dwKeyIndex & 0x000000FF);
- if ((uKeyLength == WLAN_WEP232_KEYLEN) &&
- (byKeyDecMode == KEY_CTL_WEP)) {
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x4000; // disable on-fly disable address match
- pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = true;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ vnt_set_keymode(hw, mac_addr,
+ key, VNT_KEY_PAIRWISE, key_dec_mode, true);
} else {
- if (!pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP)
- pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0xC000; // enable on-fly disable address match
- }
+ vnt_set_keymode(hw, mac_addr,
+ key, VNT_KEY_DEFAULTKEY, key_dec_mode, true);
- pKey->bKeyValid = true;
- pKey->uKeyLength = uKeyLength;
- pKey->dwKeyIndex = dwKeyIndex;
- pKey->byCipherSuite = byKeyDecMode;
- memcpy(pKey->abyKey, pbyKey, uKeyLength);
- if (byKeyDecMode == KEY_CTL_WEP) {
- if (uKeyLength == WLAN_WEP40_KEYLEN)
- pKey->abyKey[15] &= 0x7F;
- if (uKeyLength == WLAN_WEP104_KEYLEN)
- pKey->abyKey[15] |= 0x80;
- }
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (u32 *)pKey->abyKey, byLocalID);
-
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- pKey->KeyRSC = 0;
- } else {
- pKey->KeyRSC = *pKeyRSC;
+ vnt_set_keymode(hw, (u8 *)conf->bssid,
+ key, VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
}
- pKey->dwTSC47_16 = 0;
- pKey->wTSC15_0 = 0;
-
- pr_debug("KeybSetKey(R):\n");
- pr_debug("pKey->bKeyValid: %d\n", pKey->bKeyValid);
- pr_debug("pKey->uKeyLength: %d\n", (int)pKey->uKeyLength);
- pr_debug("pKey->abyKey:\n");
- for (ii = 0; ii < pKey->uKeyLength; ii++)
- pr_debug("%x", pKey->abyKey[ii]);
-
- pr_debug("\n");
-
- pr_debug("pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16);
- pr_debug("pKey->wTSC15_0: %x\n", pKey->wTSC15_0);
- pr_debug("pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex);
-
- return true;
-}
-/*
- * Description: Set Key to table
- *
- * Parameters:
- * In:
- * pTable - Pointer to Key table
- * dwKeyIndex - Key index (reference to NDIS DDK)
- * uKeyLength - Key length
- * KeyRSC - Key RSC
- * pbyKey - Pointer to key
- * Out:
- * none
- *
- * Return Value: true if success otherwise false
- *
- */
-bool KeybSetAllGroupKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-)
-{
- int i;
- unsigned int ii;
- PSKeyItem pKey;
- unsigned int uKeyIdx;
-
- pr_debug("Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
-
- if ((dwKeyIndex & PAIRWISE_KEY) != 0) // Pairwise key
- return false;
- else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY)
- return false;
-
- for (i = 0; i < MAX_KEY_TABLE - 1; i++) {
- if (pTable->KeyTable[i].bInUse) {
- // found table already exist
- // Group key
- pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]);
- if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
- // Group transmit key
- pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
- pr_debug("Group transmit key(R)[%lX]: %d\n",
- pTable->KeyTable[i].dwGTKeyIndex, i);
-
- }
- pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
- pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
- pTable->KeyTable[i].wKeyCtl |= 0x0040; // use group key for group address
- uKeyIdx = (dwKeyIndex & 0x000000FF);
-
- pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly
-
- pKey->bKeyValid = true;
- pKey->uKeyLength = uKeyLength;
- pKey->dwKeyIndex = dwKeyIndex;
- pKey->byCipherSuite = byKeyDecMode;
- memcpy(pKey->abyKey, pbyKey, uKeyLength);
- if (byKeyDecMode == KEY_CTL_WEP) {
- if (uKeyLength == WLAN_WEP40_KEYLEN)
- pKey->abyKey[15] &= 0x7F;
- if (uKeyLength == WLAN_WEP104_KEYLEN)
- pKey->abyKey[15] |= 0x80;
- }
- MACvSetKeyEntry(dwIoBase, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (u32 *)pKey->abyKey, byLocalID);
-
- if ((dwKeyIndex & USE_KEYRSC) == 0) {
- // RSC set by NIC
- pKey->KeyRSC = 0;
- } else {
- pKey->KeyRSC = *pKeyRSC;
- }
- pKey->dwTSC47_16 = 0;
- pKey->wTSC15_0 = 0;
-
- pr_debug("KeybSetKey(R):\n");
- pr_debug("pKey->bKeyValid: %d\n ", pKey->bKeyValid);
- pr_debug("pKey->uKeyLength: %d\n ",
- (int)pKey->uKeyLength);
- pr_debug("pKey->abyKey: ");
- for (ii = 0; ii < pKey->uKeyLength; ii++)
- pr_debug("%02x ", pKey->abyKey[ii]);
-
- pr_debug("\n");
-
- } // (pTable->KeyTable[i].bInUse == true)
- }
- return true;
+ return 0;
}
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index 44efe18315af..c01d4afb6ab8 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -30,9 +30,7 @@
#ifndef __KEY_H__
#define __KEY_H__
-#include "ttype.h"
-#include "tether.h"
-#include "80211mgr.h"
+#include <net/mac80211.h>
/*--------------------- Export Definitions -------------------------*/
#define MAX_GROUP_KEY 4
@@ -53,124 +51,19 @@
#define KEY_CTL_CCMP 0x03
#define KEY_CTL_INVALID 0xFF
-typedef struct tagSKeyItem {
- bool bKeyValid;
- unsigned long uKeyLength;
- unsigned char abyKey[MAX_KEY_LEN];
- u64 KeyRSC;
- unsigned long dwTSC47_16;
- unsigned short wTSC15_0;
- unsigned char byCipherSuite;
- unsigned char byReserved0;
- unsigned long dwKeyIndex;
- void *pvKeyTable;
-} SKeyItem, *PSKeyItem; //64
+#define VNT_KEY_DEFAULTKEY 0x1
+#define VNT_KEY_GROUP_ADDRESS 0x2
+#define VNT_KEY_ALLGROUP 0x4
+#define VNT_KEY_GROUP 0x40
+#define VNT_KEY_PAIRWISE 0x00
+#define VNT_KEY_ONFLY 0x8000
+#define VNT_KEY_ONFLY_ALL 0x4000
-typedef struct tagSKeyTable {
- unsigned char abyBSSID[ETH_ALEN]; //6
- unsigned char byReserved0[2]; //8
- SKeyItem PairwiseKey;
- SKeyItem GroupKey[MAX_GROUP_KEY]; //64*5 = 320, 320+8=328
- unsigned long dwGTKeyIndex; // GroupTransmitKey Index
- bool bInUse;
- //2006-1116-01,<Modify> by NomadZhao
- bool bSoftWEP;
- unsigned short wKeyCtl; // for address of wKeyCtl at align 4
+struct vnt_private;
- unsigned char byReserved1[6];
-} SKeyTable, *PSKeyTable; //348
+int vnt_key_init_table(struct vnt_private *);
-typedef struct tagSKeyManagement {
- SKeyTable KeyTable[MAX_KEY_TABLE];
-} SKeyManagement, *PSKeyManagement;
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void KeyvInitTable(PSKeyManagement pTable, void __iomem *dwIoBase);
-
-bool KeybGetKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- PSKeyItem *pKey
-);
-
-bool KeybSetKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-);
-
-bool KeybSetDefaultKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-);
-
-bool KeybRemoveKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyIndex,
- void __iomem *dwIoBase
-);
-
-bool KeybGetTransmitKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- unsigned long dwKeyType,
- PSKeyItem *pKey
-);
-
-bool KeybCheckPairewiseKey(
- PSKeyManagement pTable,
- PSKeyItem *pKey
-);
-
-bool KeybRemoveAllKey(
- PSKeyManagement pTable,
- unsigned char *pbyBSSID,
- void __iomem *dwIoBase
-);
-
-void KeyvRemoveWEPKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- void __iomem *dwIoBase
-);
-
-void KeyvRemoveAllWEPKey(
- PSKeyManagement pTable,
- void __iomem *dwIoBase
-);
-
-bool KeybSetAllGroupKey(
- PSKeyManagement pTable,
- unsigned long dwKeyIndex,
- unsigned long uKeyLength,
- u64 *pKeyRSC,
- unsigned char *pbyKey,
- unsigned char byKeyDecMode,
- void __iomem *dwIoBase,
- unsigned char byLocalID
-);
+int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, struct ieee80211_key_conf *key);
#endif // __KEY_H__
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index e3b0b7f7ca85..8f0d652fea7c 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -26,30 +26,15 @@
* Date: May 21, 1996
*
* Functions:
- * MACvReadAllRegs - Read All MAC Registers to buffer
* MACbIsRegBitsOn - Test if All test Bits On
* MACbIsRegBitsOff - Test if All test Bits Off
* MACbIsIntDisable - Test if MAC interrupt disable
- * MACbyReadMultiAddr - Read Multicast Address Mask Pattern
- * MACvWriteMultiAddr - Write Multicast Address Mask Pattern
- * MACvSetMultiAddrByHash - Set Multicast Address Mask by Hash value
- * MACvResetMultiAddrByHash - Clear Multicast Address Mask by Hash value
- * MACvSetRxThreshold - Set Rx Threshold value
- * MACvGetRxThreshold - Get Rx Threshold value
- * MACvSetTxThreshold - Set Tx Threshold value
- * MACvGetTxThreshold - Get Tx Threshold value
- * MACvSetDmaLength - Set Dma Length value
- * MACvGetDmaLength - Get Dma Length value
* MACvSetShortRetryLimit - Set 802.11 Short Retry limit
* MACvGetShortRetryLimit - Get 802.11 Short Retry limit
* MACvSetLongRetryLimit - Set 802.11 Long Retry limit
- * MACvGetLongRetryLimit - Get 802.11 Long Retry limit
* MACvSetLoopbackMode - Set MAC Loopback Mode
- * MACbIsInLoopbackMode - Test if MAC in Loopback mode
- * MACvSetPacketFilter - Set MAC Address Filter
* MACvSaveContext - Save Context of MAC Registers
* MACvRestoreContext - Restore Context of MAC Registers
- * MACbCompareContext - Compare if values of MAC Registers same as Context
* MACbSoftwareReset - Software Reset MAC
* MACbSafeRxOff - Turn Off MAC Rx
* MACbSafeTxOff - Turn Off MAC Tx
@@ -69,54 +54,8 @@
*/
#include "tmacro.h"
-#include "tether.h"
#include "mac.h"
-unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*
- * Description:
- * Read All MAC Registers to buffer
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyMacRegs - buffer to read
- *
- * Return Value: none
- *
- */
-void MACvReadAllRegs(void __iomem *dwIoBase, unsigned char *pbyMacRegs)
-{
- int ii;
-
- // read page0 register
- for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE0; ii++) {
- VNSvInPortB(dwIoBase + ii, pbyMacRegs);
- pbyMacRegs++;
- }
-
- MACvSelectPage1(dwIoBase);
-
- // read page1 register
- for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++) {
- VNSvInPortB(dwIoBase + ii, pbyMacRegs);
- pbyMacRegs++;
- }
-
- MACvSelectPage0(dwIoBase);
-}
-
/*
* Description:
* Test if all test bits on
@@ -189,252 +128,6 @@ bool MACbIsIntDisable(void __iomem *dwIoBase)
/*
* Description:
- * Read MAC Multicast Address Mask
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * uByteidx - Index of Mask
- * Out:
- * none
- *
- * Return Value: Mask Value read
- *
- */
-unsigned char MACbyReadMultiAddr(void __iomem *dwIoBase, unsigned int uByteIdx)
-{
- unsigned char byData;
-
- MACvSelectPage1(dwIoBase);
- VNSvInPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, &byData);
- MACvSelectPage0(dwIoBase);
- return byData;
-}
-
-/*
- * Description:
- * Write MAC Multicast Address Mask
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * uByteidx - Index of Mask
- * byData - Mask Value to write
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvWriteMultiAddr(void __iomem *dwIoBase, unsigned int uByteIdx, unsigned char byData)
-{
- MACvSelectPage1(dwIoBase);
- VNSvOutPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, byData);
- MACvSelectPage0(dwIoBase);
-}
-
-/*
- * Description:
- * Set this hash index into multicast address register bit
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byHashIdx - Hash index to set
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetMultiAddrByHash(void __iomem *dwIoBase, unsigned char byHashIdx)
-{
- unsigned int uByteIdx;
- unsigned char byBitMask;
- unsigned char byOrgValue;
-
- // calculate byte position
- uByteIdx = byHashIdx / 8;
- ASSERT(uByteIdx < 8);
- // calculate bit position
- byBitMask = 1;
- byBitMask <<= (byHashIdx % 8);
- // turn on the bit
- byOrgValue = MACbyReadMultiAddr(dwIoBase, uByteIdx);
- MACvWriteMultiAddr(dwIoBase, uByteIdx, (unsigned char)(byOrgValue | byBitMask));
-}
-
-/*
- * Description:
- * Reset this hash index into multicast address register bit
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byHashIdx - Hash index to clear
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvResetMultiAddrByHash(void __iomem *dwIoBase, unsigned char byHashIdx)
-{
- unsigned int uByteIdx;
- unsigned char byBitMask;
- unsigned char byOrgValue;
-
- // calculate byte position
- uByteIdx = byHashIdx / 8;
- ASSERT(uByteIdx < 8);
- // calculate bit position
- byBitMask = 1;
- byBitMask <<= (byHashIdx % 8);
- // turn off the bit
- byOrgValue = MACbyReadMultiAddr(dwIoBase, uByteIdx);
- MACvWriteMultiAddr(dwIoBase, uByteIdx, (unsigned char)(byOrgValue & (~byBitMask)));
-}
-
-/*
- * Description:
- * Set Rx Threshold
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byThreshold - Threshold Value
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetRxThreshold(void __iomem *dwIoBase, unsigned char byThreshold)
-{
- unsigned char byOrgValue;
-
- ASSERT(byThreshold < 4);
-
- // set FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
- byOrgValue = (byOrgValue & 0xCF) | (byThreshold << 4);
- VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
-}
-
-/*
- * Description:
- * Get Rx Threshold
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyThreshold- Threshold Value Get
- *
- * Return Value: none
- *
- */
-void MACvGetRxThreshold(void __iomem *dwIoBase, unsigned char *pbyThreshold)
-{
- // get FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyThreshold);
- *pbyThreshold = (*pbyThreshold >> 4) & 0x03;
-}
-
-/*
- * Description:
- * Set Tx Threshold
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byThreshold - Threshold Value
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetTxThreshold(void __iomem *dwIoBase, unsigned char byThreshold)
-{
- unsigned char byOrgValue;
-
- ASSERT(byThreshold < 4);
-
- // set FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
- byOrgValue = (byOrgValue & 0xF3) | (byThreshold << 2);
- VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
-}
-
-/*
- * Description:
- * Get Tx Threshold
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyThreshold- Threshold Value Get
- *
- * Return Value: none
- *
- */
-void MACvGetTxThreshold(void __iomem *dwIoBase, unsigned char *pbyThreshold)
-{
- // get FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyThreshold);
- *pbyThreshold = (*pbyThreshold >> 2) & 0x03;
-}
-
-/*
- * Description:
- * Set Dma Length
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * byDmaLength - Dma Length Value
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetDmaLength(void __iomem *dwIoBase, unsigned char byDmaLength)
-{
- unsigned char byOrgValue;
-
- ASSERT(byDmaLength < 4);
-
- // set FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, &byOrgValue);
- byOrgValue = (byOrgValue & 0xFC) | byDmaLength;
- VNSvOutPortB(dwIoBase + MAC_REG_FCR0, byOrgValue);
-}
-
-/*
- * Description:
- * Get Dma Length
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyDmaLength- Dma Length Value Get
- *
- * Return Value: none
- *
- */
-void MACvGetDmaLength(void __iomem *dwIoBase, unsigned char *pbyDmaLength)
-{
- // get FCR0
- VNSvInPortB(dwIoBase + MAC_REG_FCR0, pbyDmaLength);
- *pbyDmaLength &= 0x03;
-}
-
-/*
- * Description:
* Set 802.11 Short Retry Limit
*
* Parameters:
@@ -494,25 +187,6 @@ void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
/*
* Description:
- * Get 802.11 Long Retry Limit
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * pbyRetryLimit - Retry Limit Get
- *
- * Return Value: none
- *
- */
-void MACvGetLongRetryLimit(void __iomem *dwIoBase, unsigned char *pbyRetryLimit)
-{
- // get LRT
- VNSvInPortB(dwIoBase + MAC_REG_LRT, pbyRetryLimit);
-}
-
-/*
- * Description:
* Set MAC Loopback mode
*
* Parameters:
@@ -540,89 +214,6 @@ void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode)
/*
* Description:
- * Test if MAC in Loopback mode
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * Out:
- * none
- *
- * Return Value: true if in Loopback mode; otherwise false
- *
- */
-bool MACbIsInLoopbackMode(void __iomem *dwIoBase)
-{
- unsigned char byOrgValue;
-
- VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue);
- if (byOrgValue & (TEST_LBINT | TEST_LBEXT))
- return true;
- return false;
-}
-
-/*
- * Description:
- * Set MAC Address filter
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * wFilterType - Filter Type
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetPacketFilter(void __iomem *dwIoBase, unsigned short wFilterType)
-{
- unsigned char byOldRCR;
- unsigned char byNewRCR = 0;
-
- // if only in DIRECTED mode, multicast-address will set to zero,
- // but if other mode exist (e.g. PROMISCUOUS), multicast-address
- // will be open
- if (wFilterType & PKT_TYPE_DIRECTED) {
- // set multicast address to accept none
- MACvSelectPage1(dwIoBase);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0, 0L);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(unsigned long), 0L);
- MACvSelectPage0(dwIoBase);
- }
-
- if (wFilterType & (PKT_TYPE_PROMISCUOUS | PKT_TYPE_ALL_MULTICAST)) {
- // set multicast address to accept all
- MACvSelectPage1(dwIoBase);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0, 0xFFFFFFFFL);
- VNSvOutPortD(dwIoBase + MAC_REG_MAR0 + sizeof(unsigned long), 0xFFFFFFFFL);
- MACvSelectPage0(dwIoBase);
- }
-
- if (wFilterType & PKT_TYPE_PROMISCUOUS) {
- byNewRCR |= (RCR_RXALLTYPE | RCR_UNICAST | RCR_MULTICAST | RCR_BROADCAST);
-
- byNewRCR &= ~RCR_BSSID;
- }
-
- if (wFilterType & (PKT_TYPE_ALL_MULTICAST | PKT_TYPE_MULTICAST))
- byNewRCR |= RCR_MULTICAST;
-
- if (wFilterType & PKT_TYPE_BROADCAST)
- byNewRCR |= RCR_BROADCAST;
-
- if (wFilterType & PKT_TYPE_ERROR_CRC)
- byNewRCR |= RCR_ERRCRC;
-
- VNSvInPortB(dwIoBase + MAC_REG_RCR, &byOldRCR);
- if (byNewRCR != byOldRCR) {
- // Modify the Receive Command Register
- VNSvOutPortB(dwIoBase + MAC_REG_RCR, byNewRCR);
- }
-}
-
-/*
- * Description:
* Save MAC registers to context buffer
*
* Parameters:
@@ -702,47 +293,6 @@ void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
/*
* Description:
- * Compare if MAC registers same as context buffer
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * pbyCxtBuf - Context buffer
- * Out:
- * none
- *
- * Return Value: true if all values are the same; otherwise false
- *
- */
-bool MACbCompareContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
-{
- unsigned long dwData;
-
- // compare MAC context to determine if this is a power lost init,
- // return true for power remaining init, return false for power lost init
-
- // compare CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR
- VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0, &dwData);
- if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_TXDMAPTR0))
- return false;
-
- VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR, &dwData);
- if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_AC0DMAPTR))
- return false;
-
- VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0, &dwData);
- if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR0))
- return false;
-
- VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1, &dwData);
- if (dwData != *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR1))
- return false;
-
- return true;
-}
-
-/*
- * Description:
* Software Reset MAC
*
* Parameters:
@@ -1018,11 +568,6 @@ void MACvInitialize(void __iomem *dwIoBase)
VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
// enable TSF counter
VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
-
- // set packet filter
- // receive directed and broadcast address
-
- MACvSetPacketFilter(dwIoBase, PKT_TYPE_DIRECTED | PKT_TYPE_BROADCAST);
}
/*
@@ -1234,27 +779,6 @@ void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay)
* Return Value: none
*
*/
-void MACvOneShotTimer0MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime)
-{
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
- VNSvOutPortD(dwIoBase + MAC_REG_TMDATA0, uDelayTime);
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, (TMCTL_TMD | TMCTL_TE));
-}
-
-/*
- * Description:
- * Micro Second One shot timer via MAC
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- * uDelay - Delay time
- * Out:
- * none
- *
- * Return Value: none
- *
- */
void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime)
{
VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, 0);
@@ -1271,102 +795,6 @@ void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset, unsigned lo
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
-bool MACbTxDMAOff(void __iomem *dwIoBase, unsigned int idx)
-{
- unsigned char byData;
- unsigned int ww = 0;
-
- if (idx == TYPE_TXDMA0) {
- VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0+2, DMACTL_RUN);
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byData);
- if (!(byData & DMACTL_RUN))
- break;
- }
- } else if (idx == TYPE_AC0DMA) {
- VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL+2, DMACTL_RUN);
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byData);
- if (!(byData & DMACTL_RUN))
- break;
- }
- }
- if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x29);
- pr_debug(" DBG_PORT80(0x29)\n");
- return false;
- }
- return true;
-}
-
-void MACvClearBusSusInd(void __iomem *dwIoBase)
-{
- unsigned long dwOrgValue;
- unsigned int ww;
- // check if BcnSusInd enabled
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
- if (!(dwOrgValue & EnCFG_BcnSusInd))
- return;
- //Set BcnSusClr
- dwOrgValue = dwOrgValue | EnCFG_BcnSusClr;
- VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue);
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
- if (!(dwOrgValue & EnCFG_BcnSusInd))
- break;
- }
- if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x33);
- pr_debug(" DBG_PORT80(0x33)\n");
- }
-}
-
-void MACvEnableBusSusEn(void __iomem *dwIoBase)
-{
- unsigned char byOrgValue;
- unsigned long dwOrgValue;
- unsigned int ww;
- // check if BcnSusInd enabled
- VNSvInPortB(dwIoBase + MAC_REG_CFG , &byOrgValue);
-
- //Set BcnSusEn
- byOrgValue = byOrgValue | CFG_BCNSUSEN;
- VNSvOutPortB(dwIoBase + MAC_REG_ENCFG, byOrgValue);
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_ENCFG , &dwOrgValue);
- if (dwOrgValue & EnCFG_BcnSusInd)
- break;
- }
- if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x34);
- pr_debug(" DBG_PORT80(0x34)\n");
- }
-}
-
-bool MACbFlushSYNCFifo(void __iomem *dwIoBase)
-{
- unsigned char byOrgValue;
- unsigned int ww;
- // Read MACCR
- VNSvInPortB(dwIoBase + MAC_REG_MACCR , &byOrgValue);
-
- // Set SYNCFLUSH
- byOrgValue = byOrgValue | MACCR_SYNCFLUSH;
- VNSvOutPortB(dwIoBase + MAC_REG_MACCR, byOrgValue);
-
- // Check if SyncFlushOK
- for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_MACCR , &byOrgValue);
- if (byOrgValue & MACCR_SYNCFLUSHOK)
- break;
- }
- if (ww == W_MAX_TIMEOUT) {
- DBG_PORT80(0x35);
- pr_debug(" DBG_PORT80(0x33)\n");
- }
- return true;
-}
-
bool MACbPSWakeup(void __iomem *dwIoBase)
{
unsigned char byOrgValue;
@@ -1484,211 +912,3 @@ void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx)
VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, 0);
VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
}
-
-/*
- * Description:
- * Set the default Key (KeyEntry[10]) by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void MACvSetDefaultKeyEntry(void __iomem *dwIoBase, unsigned int uKeyLen,
- unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID)
-{
- unsigned short wOffset;
- unsigned long dwData;
- int ii;
-
- if (byLocalID <= 1)
- return;
-
- pr_debug("MACvSetDefaultKeyEntry\n");
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
-
- wOffset++;
- wOffset++;
- wOffset += (uKeyIdx * 4);
- // always push 128 bits
- for (ii = 0; ii < 3; ii++) {
- pr_debug("(%d) wOffset: %d, Data: %lX\n",
- ii, wOffset+ii, *pdwKey);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- }
- dwData = *pdwKey;
- if (uKeyLen == WLAN_WEP104_KEYLEN)
- dwData |= 0x80000000;
-
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+3);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- pr_debug("End. wOffset: %d, Data: %lX\n", wOffset+3, dwData);
-}
-
-/*
- * Description:
- * Enable default Key (KeyEntry[10]) by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-/*
- void MACvEnableDefaultKey(void __iomem *dwIoBase, unsigned char byLocalID)
- {
- unsigned short wOffset;
- unsigned long dwData;
-
- if (byLocalID <= 1)
- return;
-
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0xC0440000;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- pr_debug("MACvEnableDefaultKey: wOffset: %d, Data: %lX\n", wOffset, dwData);
-
- }
-*/
-
-/*
- * Description:
- * Disable default Key (KeyEntry[10]) by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvDisableDefaultKey(void __iomem *dwIoBase)
-{
- unsigned short wOffset;
- unsigned long dwData;
-
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0x0;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- pr_debug("MACvDisableDefaultKey: wOffset: %d, Data: %lX\n",
- wOffset, dwData);
-}
-
-/*
- * Description:
- * Set the default TKIP Group Key (KeyEntry[10]) by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void MACvSetDefaultTKIPKeyEntry(void __iomem *dwIoBase, unsigned int uKeyLen,
- unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID)
-{
- unsigned short wOffset;
- unsigned long dwData;
- int ii;
-
- if (byLocalID <= 1)
- return;
-
- pr_debug("MACvSetDefaultTKIPKeyEntry\n");
- wOffset = MISCFIFO_KEYETRY0;
- // Kyle test : change offset from 10 -> 0
- wOffset += (10 * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0xC0660000;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- wOffset++;
-
- dwData = 0;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- wOffset++;
-
- wOffset += (uKeyIdx * 4);
- pr_debug("1. wOffset: %d, Data: %lX, idx:%d\n",
- wOffset, *pdwKey, uKeyIdx);
- // always push 128 bits
- for (ii = 0; ii < 4; ii++) {
- pr_debug("2.(%d) wOffset: %d, Data: %lX\n",
- ii, wOffset+ii, *pdwKey);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- }
-}
-
-/*
- * Description:
- * Set the Key Control by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void MACvSetDefaultKeyCtl(void __iomem *dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned char byLocalID)
-{
- unsigned short wOffset;
- unsigned long dwData;
-
- if (byLocalID <= 1)
- return;
-
- pr_debug("MACvSetKeyEntry\n");
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0;
- dwData |= wKeyCtl;
- dwData <<= 16;
- dwData |= 0xffff;
- pr_debug("1. wOffset: %d, Data: %lX, KeyCtl:%X\n",
- wOffset, dwData, wKeyCtl);
-
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
-}
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 0bf93759b6af..e1e7e10435f6 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -34,7 +34,6 @@
#ifndef __MAC_H__
#define __MAC_H__
-#include "ttype.h"
#include "tmacro.h"
#include "upc.h"
@@ -575,17 +574,6 @@
#define MAC_LB_INTERNAL 0x01 //
#define MAC_LB_NONE 0x00 //
-// Ethernet address filter type
-#define PKT_TYPE_NONE 0x00 // turn off receiver
-#define PKT_TYPE_ALL_MULTICAST 0x80
-#define PKT_TYPE_PROMISCUOUS 0x40
-#define PKT_TYPE_DIRECTED 0x20 // obsolete, directed address is always accepted
-#define PKT_TYPE_BROADCAST 0x10
-#define PKT_TYPE_MULTICAST 0x08
-#define PKT_TYPE_ERROR_WPA 0x04
-#define PKT_TYPE_ERROR_CRC 0x02
-#define PKT_TYPE_BSSID 0x01
-
#define Default_BI 0x200
// MiscFIFO Offset
@@ -965,48 +953,20 @@ do { \
#define MACvSetRFLE_LatchBase(dwIoBase) \
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-extern unsigned short TxRate_iwconfig;//2008-5-8 <add> by chester
-void MACvReadAllRegs(void __iomem *dwIoBase, unsigned char *pbyMacRegs);
-
bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs, unsigned char byTestBits);
bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs, unsigned char byTestBits);
bool MACbIsIntDisable(void __iomem *dwIoBase);
-unsigned char MACbyReadMultiAddr(void __iomem *dwIoBase, unsigned int uByteIdx);
-void MACvWriteMultiAddr(void __iomem *dwIoBase, unsigned int uByteIdx, unsigned char byData);
-void MACvSetMultiAddrByHash(void __iomem *dwIoBase, unsigned char byHashIdx);
-void MACvResetMultiAddrByHash(void __iomem *dwIoBase, unsigned char byHashIdx);
-
-void MACvSetRxThreshold(void __iomem *dwIoBase, unsigned char byThreshold);
-void MACvGetRxThreshold(void __iomem *dwIoBase, unsigned char *pbyThreshold);
-
-void MACvSetTxThreshold(void __iomem *dwIoBase, unsigned char byThreshold);
-void MACvGetTxThreshold(void __iomem *dwIoBase, unsigned char *pbyThreshold);
-
-void MACvSetDmaLength(void __iomem *dwIoBase, unsigned char byDmaLength);
-void MACvGetDmaLength(void __iomem *dwIoBase, unsigned char *pbyDmaLength);
-
void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
-void MACvGetShortRetryLimit(void __iomem *dwIoBase, unsigned char *pbyRetryLimit);
void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
void MACvGetLongRetryLimit(void __iomem *dwIoBase, unsigned char *pbyRetryLimit);
void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode);
-bool MACbIsInLoopbackMode(void __iomem *dwIoBase);
-
-void MACvSetPacketFilter(void __iomem *dwIoBase, unsigned short wFilterType);
void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
-bool MACbCompareContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
bool MACbSoftwareReset(void __iomem *dwIoBase);
bool MACbSafeSoftwareReset(void __iomem *dwIoBase);
@@ -1023,27 +983,14 @@ void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAd
void MACvSetCurrSyncDescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
void MACvSetCurrATIMDescAddrEx(void __iomem *dwIoBase, unsigned long dwCurrDescAddr);
void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay);
-void MACvOneShotTimer0MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime);
void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime);
void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset, unsigned long dwData);
-bool MACbTxDMAOff(void __iomem *dwIoBase, unsigned int idx);
-
-void MACvClearBusSusInd(void __iomem *dwIoBase);
-void MACvEnableBusSusEn(void __iomem *dwIoBase);
-
-bool MACbFlushSYNCFifo(void __iomem *dwIoBase);
bool MACbPSWakeup(void __iomem *dwIoBase);
void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx,
unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID);
void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx);
-void MACvSetDefaultKeyEntry(void __iomem *dwIoBase, unsigned int uKeyLen,
- unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID);
-void MACvDisableDefaultKey(void __iomem *dwIoBase);
-void MACvSetDefaultTKIPKeyEntry(void __iomem *dwIoBase, unsigned int uKeyLen,
- unsigned int uKeyIdx, unsigned long *pdwKey, unsigned char byLocalID);
-void MACvSetDefaultKeyCtl(void __iomem *dwIoBase, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned char byLocalID);
#endif // __MAC_H__
diff --git a/drivers/staging/vt6655/mib.c b/drivers/staging/vt6655/mib.c
index 111c01877086..d2f351d19ff8 100644
--- a/drivers/staging/vt6655/mib.c
+++ b/drivers/staging/vt6655/mib.c
@@ -25,24 +25,15 @@
* Date: May 21, 1996
*
* Functions:
- * STAvClearAllCounter - Clear All MIB Counter
* STAvUpdateIstStatCounter - Update ISR statistic counter
- * STAvUpdateRDStatCounter - Update Rx statistic counter
- * STAvUpdateRDStatCounterEx - Update Rx statistic counter and copy rcv data
- * STAvUpdateTDStatCounter - Update Tx statistic counter
- * STAvUpdateTDStatCounterEx - Update Tx statistic counter and copy tx data
* STAvUpdate802_11Counter - Update 802.11 mib counter
*
* Revision History:
*
*/
-#include "upc.h"
#include "mac.h"
-#include "tether.h"
#include "mib.h"
-#include "wctl.h"
-#include "baseband.h"
/*--------------------- Static Classes ----------------------------*/
@@ -55,24 +46,6 @@
/*--------------------- Export Functions --------------------------*/
/*
- * Description: Clear All Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void STAvClearAllCounter(PSStatCounter pStatistic)
-{
- // set memory to zero
- memset(pStatistic, 0, sizeof(SStatCounter));
-}
-
-/*
* Description: Update Isr Statistic Counter
*
* Parameters:
@@ -139,373 +112,6 @@ void STAvUpdateIsrStatCounter(PSStatCounter pStatistic, unsigned long dwIsr)
}
/*
- * Description: Update Rx Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byRSR - Rx Status
- * byNewRSR - Rx Status
- * pbyBuffer - Rx Buffer
- * cbFrameLength - Rx Length
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- unsigned char byRSR, unsigned char byNewRSR, unsigned char byRxRate,
- unsigned char *pbyBuffer, unsigned int cbFrameLength)
-{
- //need change
- PS802_11Header pHeader = (PS802_11Header)pbyBuffer;
-
- if (byRSR & RSR_ADDROK)
- pStatistic->dwRsrADDROk++;
- if (byRSR & RSR_CRCOK) {
- pStatistic->dwRsrCRCOk++;
-
- pStatistic->ullRsrOK++;
-
- if (cbFrameLength >= ETH_ALEN) {
- // update counters in case of successful transmit
- if (byRSR & RSR_ADDRBROAD) {
- pStatistic->ullRxBroadcastFrames++;
- pStatistic->ullRxBroadcastBytes += (unsigned long long) cbFrameLength;
- } else if (byRSR & RSR_ADDRMULTI) {
- pStatistic->ullRxMulticastFrames++;
- pStatistic->ullRxMulticastBytes += (unsigned long long) cbFrameLength;
- } else {
- pStatistic->ullRxDirectedFrames++;
- pStatistic->ullRxDirectedBytes += (unsigned long long) cbFrameLength;
- }
- }
- }
-
- if (byRxRate == 22) {
- pStatistic->CustomStat.ullRsr11M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr11MCRCOk++;
-
- pr_debug("11M: ALL[%d], OK[%d]:[%02x]\n",
- (int)pStatistic->CustomStat.ullRsr11M,
- (int)pStatistic->CustomStat.ullRsr11MCRCOk, byRSR);
- } else if (byRxRate == 11) {
- pStatistic->CustomStat.ullRsr5M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr5MCRCOk++;
-
- pr_debug(" 5M: ALL[%d], OK[%d]:[%02x]\n",
- (int)pStatistic->CustomStat.ullRsr5M,
- (int)pStatistic->CustomStat.ullRsr5MCRCOk, byRSR);
- } else if (byRxRate == 4) {
- pStatistic->CustomStat.ullRsr2M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr2MCRCOk++;
-
- pr_debug(" 2M: ALL[%d], OK[%d]:[%02x]\n",
- (int)pStatistic->CustomStat.ullRsr2M,
- (int)pStatistic->CustomStat.ullRsr2MCRCOk, byRSR);
- } else if (byRxRate == 2) {
- pStatistic->CustomStat.ullRsr1M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr1MCRCOk++;
-
- pr_debug(" 1M: ALL[%d], OK[%d]:[%02x]\n",
- (int)pStatistic->CustomStat.ullRsr1M,
- (int)pStatistic->CustomStat.ullRsr1MCRCOk, byRSR);
- } else if (byRxRate == 12) {
- pStatistic->CustomStat.ullRsr6M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr6MCRCOk++;
-
- pr_debug(" 6M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr6M,
- (int)pStatistic->CustomStat.ullRsr6MCRCOk);
- } else if (byRxRate == 18) {
- pStatistic->CustomStat.ullRsr9M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr9MCRCOk++;
-
- pr_debug(" 9M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr9M,
- (int)pStatistic->CustomStat.ullRsr9MCRCOk);
- } else if (byRxRate == 24) {
- pStatistic->CustomStat.ullRsr12M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr12MCRCOk++;
-
- pr_debug("12M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr12M,
- (int)pStatistic->CustomStat.ullRsr12MCRCOk);
- } else if (byRxRate == 36) {
- pStatistic->CustomStat.ullRsr18M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr18MCRCOk++;
-
- pr_debug("18M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr18M,
- (int)pStatistic->CustomStat.ullRsr18MCRCOk);
- } else if (byRxRate == 48) {
- pStatistic->CustomStat.ullRsr24M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr24MCRCOk++;
-
- pr_debug("24M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr24M,
- (int)pStatistic->CustomStat.ullRsr24MCRCOk);
- } else if (byRxRate == 72) {
- pStatistic->CustomStat.ullRsr36M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr36MCRCOk++;
-
- pr_debug("36M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr36M,
- (int)pStatistic->CustomStat.ullRsr36MCRCOk);
- } else if (byRxRate == 96) {
- pStatistic->CustomStat.ullRsr48M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr48MCRCOk++;
-
- pr_debug("48M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr48M,
- (int)pStatistic->CustomStat.ullRsr48MCRCOk);
- } else if (byRxRate == 108) {
- pStatistic->CustomStat.ullRsr54M++;
- if (byRSR & RSR_CRCOK)
- pStatistic->CustomStat.ullRsr54MCRCOk++;
-
- pr_debug("54M: ALL[%d], OK[%d]\n",
- (int)pStatistic->CustomStat.ullRsr54M,
- (int)pStatistic->CustomStat.ullRsr54MCRCOk);
- } else {
- pr_debug("Unknown: Total[%d], CRCOK[%d]\n",
- (int)pStatistic->dwRsrRxPacket+1,
- (int)pStatistic->dwRsrCRCOk);
- }
-
- if (byRSR & RSR_BSSIDOK)
- pStatistic->dwRsrBSSIDOk++;
-
- if (byRSR & RSR_BCNSSIDOK)
- pStatistic->dwRsrBCNSSIDOk++;
- if (byRSR & RSR_IVLDLEN) //invalid len (> 2312 byte)
- pStatistic->dwRsrLENErr++;
- if (byRSR & RSR_IVLDTYP) //invalid packet type
- pStatistic->dwRsrTYPErr++;
- if (byRSR & (RSR_IVLDTYP | RSR_IVLDLEN))
- pStatistic->dwRsrErr++;
-
- if (byNewRSR & NEWRSR_DECRYPTOK)
- pStatistic->dwNewRsrDECRYPTOK++;
- if (byNewRSR & NEWRSR_CFPIND)
- pStatistic->dwNewRsrCFP++;
- if (byNewRSR & NEWRSR_HWUTSF)
- pStatistic->dwNewRsrUTSF++;
- if (byNewRSR & NEWRSR_BCNHITAID)
- pStatistic->dwNewRsrHITAID++;
- if (byNewRSR & NEWRSR_BCNHITAID0)
- pStatistic->dwNewRsrHITAID0++;
-
- // increase rx packet count
- pStatistic->dwRsrRxPacket++;
- pStatistic->dwRsrRxOctet += cbFrameLength;
-
- if (IS_TYPE_DATA(pbyBuffer))
- pStatistic->dwRsrRxData++;
- else if (IS_TYPE_MGMT(pbyBuffer))
- pStatistic->dwRsrRxManage++;
- else if (IS_TYPE_CONTROL(pbyBuffer))
- pStatistic->dwRsrRxControl++;
-
- if (byRSR & RSR_ADDRBROAD)
- pStatistic->dwRsrBroadcast++;
- else if (byRSR & RSR_ADDRMULTI)
- pStatistic->dwRsrMulticast++;
- else
- pStatistic->dwRsrDirected++;
-
- if (WLAN_GET_FC_MOREFRAG(pHeader->wFrameCtl))
- pStatistic->dwRsrRxFragment++;
-
- if (cbFrameLength < ETH_ZLEN + 4)
- pStatistic->dwRsrRunt++;
- else if (cbFrameLength == ETH_ZLEN + 4)
- pStatistic->dwRsrRxFrmLen64++;
- else if ((65 <= cbFrameLength) && (cbFrameLength <= 127))
- pStatistic->dwRsrRxFrmLen65_127++;
- else if ((128 <= cbFrameLength) && (cbFrameLength <= 255))
- pStatistic->dwRsrRxFrmLen128_255++;
- else if ((256 <= cbFrameLength) && (cbFrameLength <= 511))
- pStatistic->dwRsrRxFrmLen256_511++;
- else if ((512 <= cbFrameLength) && (cbFrameLength <= 1023))
- pStatistic->dwRsrRxFrmLen512_1023++;
- else if ((1024 <= cbFrameLength) && (cbFrameLength <= ETH_FRAME_LEN + 4))
- pStatistic->dwRsrRxFrmLen1024_1518++;
- else if (cbFrameLength > ETH_FRAME_LEN + 4)
- pStatistic->dwRsrLong++;
-}
-
-/*
- * Description: Update Rx Statistic Counter and copy Rx buffer
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byRSR - Rx Status
- * byNewRSR - Rx Status
- * pbyBuffer - Rx Buffer
- * cbFrameLength - Rx Length
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-
-void
-STAvUpdateRDStatCounterEx(
- PSStatCounter pStatistic,
- unsigned char byRSR,
- unsigned char byNewRSR,
- unsigned char byRxRate,
- unsigned char *pbyBuffer,
- unsigned int cbFrameLength
-)
-{
- STAvUpdateRDStatCounter(
- pStatistic,
- byRSR,
- byNewRSR,
- byRxRate,
- pbyBuffer,
- cbFrameLength
-);
-
- // rx length
- pStatistic->dwCntRxFrmLength = cbFrameLength;
- // rx pattern, we just see 10 bytes for sample
- memcpy(pStatistic->abyCntRxPattern, (unsigned char *)pbyBuffer, 10);
-}
-
-/*
- * Description: Update Tx Statistic Counter
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * byTSR0 - Tx Status
- * byTSR1 - Tx Status
- * pbyBuffer - Tx Buffer
- * cbFrameLength - Tx Length
- * uIdx - Index of Tx DMA
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvUpdateTDStatCounter(
- PSStatCounter pStatistic,
- unsigned char byTSR0,
- unsigned char byTSR1,
- unsigned char *pbyBuffer,
- unsigned int cbFrameLength,
- unsigned int uIdx
-)
-{
- PWLAN_80211HDR_A4 pHeader;
- unsigned char *pbyDestAddr;
- unsigned char byTSR0_NCR = byTSR0 & TSR0_NCR;
-
- pHeader = (PWLAN_80211HDR_A4) pbyBuffer;
- if (WLAN_GET_FC_TODS(pHeader->wFrameCtl) == 0)
- pbyDestAddr = &(pHeader->abyAddr1[0]);
- else
- pbyDestAddr = &(pHeader->abyAddr3[0]);
-
- // increase tx packet count
- pStatistic->dwTsrTxPacket[uIdx]++;
- pStatistic->dwTsrTxOctet[uIdx] += cbFrameLength;
-
- if (byTSR0_NCR != 0) {
- pStatistic->dwTsrRetry[uIdx]++;
- pStatistic->dwTsrTotalRetry[uIdx] += byTSR0_NCR;
-
- if (byTSR0_NCR == 1)
- pStatistic->dwTsrOnceRetry[uIdx]++;
- else
- pStatistic->dwTsrMoreThanOnceRetry[uIdx]++;
- }
-
- if ((byTSR1&(TSR1_TERR|TSR1_RETRYTMO|TSR1_TMO|ACK_DATA)) == 0) {
- pStatistic->ullTsrOK[uIdx]++;
- pStatistic->CustomStat.ullTsrAllOK =
- (pStatistic->ullTsrOK[TYPE_AC0DMA] + pStatistic->ullTsrOK[TYPE_TXDMA0]);
- // update counters in case that successful transmit
- if (is_broadcast_ether_addr(pbyDestAddr)) {
- pStatistic->ullTxBroadcastFrames[uIdx]++;
- pStatistic->ullTxBroadcastBytes[uIdx] += (unsigned long long) cbFrameLength;
- } else if (is_multicast_ether_addr(pbyDestAddr)) {
- pStatistic->ullTxMulticastFrames[uIdx]++;
- pStatistic->ullTxMulticastBytes[uIdx] += (unsigned long long) cbFrameLength;
- } else {
- pStatistic->ullTxDirectedFrames[uIdx]++;
- pStatistic->ullTxDirectedBytes[uIdx] += (unsigned long long) cbFrameLength;
- }
- } else {
- if (byTSR1 & TSR1_TERR)
- pStatistic->dwTsrErr[uIdx]++;
- if (byTSR1 & TSR1_RETRYTMO)
- pStatistic->dwTsrRetryTimeout[uIdx]++;
- if (byTSR1 & TSR1_TMO)
- pStatistic->dwTsrTransmitTimeout[uIdx]++;
- if (byTSR1 & ACK_DATA)
- pStatistic->dwTsrACKData[uIdx]++;
- }
-
- if (is_broadcast_ether_addr(pbyDestAddr))
- pStatistic->dwTsrBroadcast[uIdx]++;
- else if (is_multicast_ether_addr(pbyDestAddr))
- pStatistic->dwTsrMulticast[uIdx]++;
- else
- pStatistic->dwTsrDirected[uIdx]++;
-}
-
-/*
- * Description: Update Tx Statistic Counter and copy Tx buffer
- *
- * Parameters:
- * In:
- * pStatistic - Pointer to Statistic Counter Data Structure
- * pbyBuffer - Tx Buffer
- * cbFrameLength - Tx Length
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvUpdateTDStatCounterEx(
- PSStatCounter pStatistic,
- unsigned char *pbyBuffer,
- unsigned long cbFrameLength
-)
-{
- unsigned int uPktLength;
-
- uPktLength = (unsigned int)cbFrameLength;
-
- // tx length
- pStatistic->dwCntTxBufLength = uPktLength;
- // tx pattern, we just see 16 bytes for sample
- memcpy(pStatistic->abyCntTxPattern, pbyBuffer, 16);
-}
-
-/*
* Description: Update 802.11 mib counter
*
* Parameters:
@@ -526,37 +132,8 @@ STAvUpdate802_11Counter(
unsigned long dwCounter
)
{
- p802_11Counter->MulticastTransmittedFrameCount = (unsigned long long) (pStatistic->dwTsrBroadcast[TYPE_AC0DMA] +
- pStatistic->dwTsrBroadcast[TYPE_TXDMA0] +
- pStatistic->dwTsrMulticast[TYPE_AC0DMA] +
- pStatistic->dwTsrMulticast[TYPE_TXDMA0]);
- p802_11Counter->FailedCount = (unsigned long long) (pStatistic->dwTsrErr[TYPE_AC0DMA] + pStatistic->dwTsrErr[TYPE_TXDMA0]);
- p802_11Counter->RetryCount = (unsigned long long) (pStatistic->dwTsrRetry[TYPE_AC0DMA] + pStatistic->dwTsrRetry[TYPE_TXDMA0]);
- p802_11Counter->MultipleRetryCount = (unsigned long long) (pStatistic->dwTsrMoreThanOnceRetry[TYPE_AC0DMA] +
- pStatistic->dwTsrMoreThanOnceRetry[TYPE_TXDMA0]);
p802_11Counter->RTSSuccessCount += (unsigned long long) (dwCounter & 0x000000ff);
p802_11Counter->RTSFailureCount += (unsigned long long) ((dwCounter & 0x0000ff00) >> 8);
p802_11Counter->ACKFailureCount += (unsigned long long) ((dwCounter & 0x00ff0000) >> 16);
p802_11Counter->FCSErrorCount += (unsigned long long) ((dwCounter & 0xff000000) >> 24);
- p802_11Counter->MulticastReceivedFrameCount = (unsigned long long) (pStatistic->dwRsrBroadcast +
- pStatistic->dwRsrMulticast);
-}
-
-/*
- * Description: Clear 802.11 mib counter
- *
- * Parameters:
- * In:
- * p802_11Counter - Pointer to 802.11 mib counter
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void
-STAvClear802_11Counter(PSDot11Counters p802_11Counter)
-{
- // set memory to zero
- memset(p802_11Counter, 0, sizeof(SDot11Counters));
}
diff --git a/drivers/staging/vt6655/mib.h b/drivers/staging/vt6655/mib.h
index 732bddaf5b91..5cb59b8a1c7c 100644
--- a/drivers/staging/vt6655/mib.h
+++ b/drivers/staging/vt6655/mib.h
@@ -29,8 +29,6 @@
#ifndef __MIB_H__
#define __MIB_H__
-#include "ttype.h"
-#include "tether.h"
#include "desc.h"
//
@@ -38,136 +36,16 @@
//
typedef struct tagSDot11Counters {
- unsigned long Length;
- unsigned long long TransmittedFragmentCount;
- unsigned long long MulticastTransmittedFrameCount;
- unsigned long long FailedCount;
- unsigned long long RetryCount;
- unsigned long long MultipleRetryCount;
unsigned long long RTSSuccessCount;
unsigned long long RTSFailureCount;
unsigned long long ACKFailureCount;
- unsigned long long FrameDuplicateCount;
- unsigned long long ReceivedFragmentCount;
- unsigned long long MulticastReceivedFrameCount;
unsigned long long FCSErrorCount;
- unsigned long long TKIPLocalMICFailures;
- unsigned long long TKIPRemoteMICFailures;
- unsigned long long TKIPICVErrors;
- unsigned long long TKIPCounterMeasuresInvoked;
- unsigned long long TKIPReplays;
- unsigned long long CCMPFormatErrors;
- unsigned long long CCMPReplays;
- unsigned long long CCMPDecryptErrors;
- unsigned long long FourWayHandshakeFailures;
} SDot11Counters, *PSDot11Counters;
//
-// MIB2 counter
-//
-typedef struct tagSMib2Counter {
- long ifIndex;
- char ifDescr[256];
- long ifType;
- long ifMtu;
- unsigned long ifSpeed;
- unsigned char ifPhysAddress[ETH_ALEN];
- long ifAdminStatus;
- long ifOperStatus;
- unsigned long ifLastChange;
- unsigned long ifInOctets;
- unsigned long ifInUcastPkts;
- unsigned long ifInNUcastPkts;
- unsigned long ifInDiscards;
- unsigned long ifInErrors;
- unsigned long ifInUnknownProtos;
- unsigned long ifOutOctets;
- unsigned long ifOutUcastPkts;
- unsigned long ifOutNUcastPkts;
- unsigned long ifOutDiscards;
- unsigned long ifOutErrors;
- unsigned long ifOutQLen;
- unsigned long ifSpecific;
-} SMib2Counter, *PSMib2Counter;
-
-// Value in the ifType entry
-#define WIRELESSLANIEEE80211b 6
-
-// Value in the ifAdminStatus/ifOperStatus entry
-#define UP 1
-#define DOWN 2
-#define TESTING 3
-
-//
-// RMON counter
-//
-typedef struct tagSRmonCounter {
- long etherStatsIndex;
- unsigned long etherStatsDataSource;
- unsigned long etherStatsDropEvents;
- unsigned long etherStatsOctets;
- unsigned long etherStatsPkts;
- unsigned long etherStatsBroadcastPkts;
- unsigned long etherStatsMulticastPkts;
- unsigned long etherStatsCRCAlignErrors;
- unsigned long etherStatsUndersizePkts;
- unsigned long etherStatsOversizePkts;
- unsigned long etherStatsFragments;
- unsigned long etherStatsJabbers;
- unsigned long etherStatsCollisions;
- unsigned long etherStatsPkt64Octets;
- unsigned long etherStatsPkt65to127Octets;
- unsigned long etherStatsPkt128to255Octets;
- unsigned long etherStatsPkt256to511Octets;
- unsigned long etherStatsPkt512to1023Octets;
- unsigned long etherStatsPkt1024to1518Octets;
- unsigned long etherStatsOwners;
- unsigned long etherStatsStatus;
-} SRmonCounter, *PSRmonCounter;
-
-//
-// Custom counter
-//
-typedef struct tagSCustomCounters {
- unsigned long Length;
-
- unsigned long long ullTsrAllOK;
-
- unsigned long long ullRsr11M;
- unsigned long long ullRsr5M;
- unsigned long long ullRsr2M;
- unsigned long long ullRsr1M;
-
- unsigned long long ullRsr11MCRCOk;
- unsigned long long ullRsr5MCRCOk;
- unsigned long long ullRsr2MCRCOk;
- unsigned long long ullRsr1MCRCOk;
-
- unsigned long long ullRsr54M;
- unsigned long long ullRsr48M;
- unsigned long long ullRsr36M;
- unsigned long long ullRsr24M;
- unsigned long long ullRsr18M;
- unsigned long long ullRsr12M;
- unsigned long long ullRsr9M;
- unsigned long long ullRsr6M;
-
- unsigned long long ullRsr54MCRCOk;
- unsigned long long ullRsr48MCRCOk;
- unsigned long long ullRsr36MCRCOk;
- unsigned long long ullRsr24MCRCOk;
- unsigned long long ullRsr18MCRCOk;
- unsigned long long ullRsr12MCRCOk;
- unsigned long long ullRsr9MCRCOk;
- unsigned long long ullRsr6MCRCOk;
-} SCustomCounters, *PSCustomCounters;
-
-//
// Custom counter
//
typedef struct tagSISRCounters {
- unsigned long Length;
-
unsigned long dwIsrTx0OK;
unsigned long dwIsrAC0TxOK;
unsigned long dwIsrBeaconTxOK;
@@ -183,161 +61,22 @@ typedef struct tagSISRCounters {
unsigned long dwIsrUnknown;
unsigned long dwIsrRx1OK;
- unsigned long dwIsrATIMTxOK;
- unsigned long dwIsrSYNCTxOK;
- unsigned long dwIsrCFPEnd;
- unsigned long dwIsrATIMEnd;
- unsigned long dwIsrSYNCFlushOK;
unsigned long dwIsrSTIMER1Int;
} SISRCounters, *PSISRCounters;
-// Value in the etherStatsStatus entry
-#define VALID 1
-#define CREATE_REQUEST 2
-#define UNDER_CREATION 3
-#define INVALID 4
-
//
// statistic counter
//
typedef struct tagSStatCounter {
- // RSR status count
- //
- unsigned long dwRsrFrmAlgnErr;
- unsigned long dwRsrErr;
- unsigned long dwRsrCRCErr;
- unsigned long dwRsrCRCOk;
- unsigned long dwRsrBSSIDOk;
- unsigned long dwRsrADDROk;
- unsigned long dwRsrBCNSSIDOk;
- unsigned long dwRsrLENErr;
- unsigned long dwRsrTYPErr;
-
- unsigned long dwNewRsrDECRYPTOK;
- unsigned long dwNewRsrCFP;
- unsigned long dwNewRsrUTSF;
- unsigned long dwNewRsrHITAID;
- unsigned long dwNewRsrHITAID0;
-
- unsigned long dwRsrLong;
- unsigned long dwRsrRunt;
-
- unsigned long dwRsrRxControl;
- unsigned long dwRsrRxData;
- unsigned long dwRsrRxManage;
-
- unsigned long dwRsrRxPacket;
- unsigned long dwRsrRxOctet;
- unsigned long dwRsrBroadcast;
- unsigned long dwRsrMulticast;
- unsigned long dwRsrDirected;
- // 64-bit OID
- unsigned long long ullRsrOK;
-
- // for some optional OIDs (64 bits) and DMI support
- unsigned long long ullRxBroadcastBytes;
- unsigned long long ullRxMulticastBytes;
- unsigned long long ullRxDirectedBytes;
- unsigned long long ullRxBroadcastFrames;
- unsigned long long ullRxMulticastFrames;
- unsigned long long ullRxDirectedFrames;
-
- unsigned long dwRsrRxFragment;
- unsigned long dwRsrRxFrmLen64;
- unsigned long dwRsrRxFrmLen65_127;
- unsigned long dwRsrRxFrmLen128_255;
- unsigned long dwRsrRxFrmLen256_511;
- unsigned long dwRsrRxFrmLen512_1023;
- unsigned long dwRsrRxFrmLen1024_1518;
-
- // TSR status count
- //
- unsigned long dwTsrTotalRetry[TYPE_MAXTD]; // total collision retry count
- unsigned long dwTsrOnceRetry[TYPE_MAXTD]; // this packet only occur one collision
- unsigned long dwTsrMoreThanOnceRetry[TYPE_MAXTD]; // this packet occur more than one collision
- unsigned long dwTsrRetry[TYPE_MAXTD]; // this packet has ever occur collision,
- // that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- unsigned long dwTsrACKData[TYPE_MAXTD];
- unsigned long dwTsrErr[TYPE_MAXTD];
- unsigned long dwAllTsrOK[TYPE_MAXTD];
- unsigned long dwTsrRetryTimeout[TYPE_MAXTD];
- unsigned long dwTsrTransmitTimeout[TYPE_MAXTD];
-
- unsigned long dwTsrTxPacket[TYPE_MAXTD];
- unsigned long dwTsrTxOctet[TYPE_MAXTD];
- unsigned long dwTsrBroadcast[TYPE_MAXTD];
- unsigned long dwTsrMulticast[TYPE_MAXTD];
- unsigned long dwTsrDirected[TYPE_MAXTD];
-
- // RD/TD count
- unsigned long dwCntRxFrmLength;
- unsigned long dwCntTxBufLength;
-
- unsigned char abyCntRxPattern[16];
- unsigned char abyCntTxPattern[16];
-
- // Software check....
- unsigned long dwCntRxDataErr; // rx buffer data software compare CRC err count
- unsigned long dwCntDecryptErr; // rx buffer data software compare CRC err count
- unsigned long dwCntRxICVErr; // rx buffer data software compare CRC err count
- unsigned int idxRxErrorDesc[TYPE_MAXRD]; // index for rx data error RD
-
- // 64-bit OID
- unsigned long long ullTsrOK[TYPE_MAXTD];
-
- // for some optional OIDs (64 bits) and DMI support
- unsigned long long ullTxBroadcastFrames[TYPE_MAXTD];
- unsigned long long ullTxMulticastFrames[TYPE_MAXTD];
- unsigned long long ullTxDirectedFrames[TYPE_MAXTD];
- unsigned long long ullTxBroadcastBytes[TYPE_MAXTD];
- unsigned long long ullTxMulticastBytes[TYPE_MAXTD];
- unsigned long long ullTxDirectedBytes[TYPE_MAXTD];
-
SISRCounters ISRStat;
-
- SCustomCounters CustomStat;
-
-#ifdef Calcu_LinkQual
- //Tx count:
- unsigned long TxNoRetryOkCount;
- unsigned long TxRetryOkCount;
- unsigned long TxFailCount;
- //Rx count:
- unsigned long RxOkCnt;
- unsigned long RxFcsErrCnt;
- //statistic
- unsigned long SignalStren;
- unsigned long LinkQuality;
-#endif
} SStatCounter, *PSStatCounter;
-void STAvClearAllCounter(PSStatCounter pStatistic);
-
void STAvUpdateIsrStatCounter(PSStatCounter pStatistic, unsigned long dwIsr);
-void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- unsigned char byRSR, unsigned char byNewRSR, unsigned char byRxRate,
- unsigned char *pbyBuffer, unsigned int cbFrameLength);
-
-void STAvUpdateRDStatCounterEx(PSStatCounter pStatistic,
- unsigned char byRSR, unsigned char byNewRsr, unsigned char byRxRate,
- unsigned char *pbyBuffer, unsigned int cbFrameLength);
-
-void STAvUpdateTDStatCounter(PSStatCounter pStatistic, unsigned char byTSR0, unsigned char byTSR1,
- unsigned char *pbyBuffer, unsigned int cbFrameLength, unsigned int uIdx);
-
-void STAvUpdateTDStatCounterEx(
- PSStatCounter pStatistic,
- unsigned char *pbyBuffer,
- unsigned long cbFrameLength
-);
-
void STAvUpdate802_11Counter(
PSDot11Counters p802_11Counter,
PSStatCounter pStatistic,
unsigned long dwCounter
);
-void STAvClear802_11Counter(PSDot11Counters p802_11Counter);
-
#endif // __MIB_H__
diff --git a/drivers/staging/vt6655/michael.c b/drivers/staging/vt6655/michael.c
deleted file mode 100644
index edee48777aac..000000000000
--- a/drivers/staging/vt6655/michael.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: michael.cpp
- *
- * Purpose: The implementation of LIST data structure.
- *
- * Author: Kyle Hsu
- *
- * Date: Sep 4, 2002
- *
- * Functions:
- * s_dwGetUINT32 - Convert from unsigned char [] to unsigned long in a portable way
- * s_vPutUINT32 - Convert from unsigned long to unsigned char [] in a portable way
- * s_vClear - Reset the state to the empty message.
- * s_vSetKey - Set the key.
- * MIC_vInit - Set the key.
- * s_vAppendByte - Append the byte to our word-sized buffer.
- * MIC_vAppend - call s_vAppendByte.
- * MIC_vGetMIC - Append the minimum padding and call s_vAppendByte.
- *
- * Revision History:
- *
- */
-
-#include "tmacro.h"
-#include "michael.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-static void s_vClear(void); // Clear the internal message,
-// resets the object to the state just after construction.
-static void s_vSetKey(u32 dwK0, u32 dwK1);
-static void s_vAppendByte(unsigned char b); // Add a single byte to the internal message
-
-/*--------------------- Export Variables --------------------------*/
-static u32 L, R; /* Current state */
-
-static u32 K0, K1; /* Key */
-static u32 M; /* Message accumulator (single word) */
-static unsigned int nBytesInM; // # bytes in M
-
-/*--------------------- Export Functions --------------------------*/
-
-static void s_vClear(void)
-{
- // Reset the state to the empty message.
- L = K0;
- R = K1;
- nBytesInM = 0;
- M = 0;
-}
-
-static void s_vSetKey(u32 dwK0, u32 dwK1)
-{
- // Set the key
- K0 = dwK0;
- K1 = dwK1;
- // and reset the message
- s_vClear();
-}
-
-static void s_vAppendByte(unsigned char b)
-{
- // Append the byte to our word-sized buffer
- M |= b << (8*nBytesInM);
- nBytesInM++;
- // Process the word if it is full.
- if (nBytesInM >= 4) {
- L ^= M;
- R ^= ROL32(L, 17);
- L += R;
- R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8);
- L += R;
- R ^= ROL32(L, 3);
- L += R;
- R ^= ROR32(L, 2);
- L += R;
- // Clear the buffer
- M = 0;
- nBytesInM = 0;
- }
-}
-
-void MIC_vInit(u32 dwK0, u32 dwK1)
-{
- // Set the key
- s_vSetKey(dwK0, dwK1);
-}
-
-void MIC_vUnInit(void)
-{
- // Wipe the key material
- K0 = 0;
- K1 = 0;
-
- // And the other fields as well.
- //Note that this sets (L,R) to (K0,K1) which is just fine.
- s_vClear();
-}
-
-void MIC_vAppend(unsigned char *src, unsigned int nBytes)
-{
- // This is simple
- while (nBytes > 0) {
- s_vAppendByte(*src++);
- nBytes--;
- }
-}
-
-void MIC_vGetMIC(u32 *pdwL, u32 *pdwR)
-{
- // Append the minimum padding
- s_vAppendByte(0x5a);
- s_vAppendByte(0);
- s_vAppendByte(0);
- s_vAppendByte(0);
- s_vAppendByte(0);
- // and then zeroes until the length is a multiple of 4
- while (nBytesInM != 0)
- s_vAppendByte(0);
-
- // The s_vAppendByte function has already computed the result.
- *pdwL = L;
- *pdwR = R;
- // Reset to the empty message.
- s_vClear();
-}
diff --git a/drivers/staging/vt6655/michael.h b/drivers/staging/vt6655/michael.h
deleted file mode 100644
index 86cb140e3087..000000000000
--- a/drivers/staging/vt6655/michael.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: Michael.h
- *
- * Purpose: Reference implementation for Michael
- * written by Niels Ferguson
- *
- * Author: Kyle Hsu
- *
- * Date: Jan 2, 2003
- *
- */
-
-#ifndef __MICHAEL_H__
-#define __MICHAEL_H__
-
-#include <linux/types.h>
-
-void MIC_vInit(u32 dwK0, u32 dwK1);
-
-void MIC_vUnInit(void);
-
-/* Append bytes to the message to be MICed */
-void MIC_vAppend(unsigned char *src, unsigned int nBytes);
-
-/* Get the MIC result. Destination should accept 8 bytes of result. */
-/* This also resets the message to empty. */
-void MIC_vGetMIC(u32 *pdwL, u32 *pdwR);
-
-/* Rotation functions on 32 bit values */
-#define ROL32(A, n) \
- (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
-#define ROR32(A, n) ROL32((A), 32-(n))
-
-#endif /*__MICHAEL_H__ */
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 08241b917777..e826f07e91c0 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -37,13 +37,9 @@
*
*/
-#include "ttype.h"
#include "mac.h"
#include "device.h"
-#include "wmgr.h"
#include "power.h"
-#include "wcmd.h"
-#include "rxtx.h"
#include "card.h"
/*--------------------- Static Definitions -------------------------*/
@@ -73,8 +69,7 @@ PSvEnablePowerSaving(
)
{
struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned short wAID = pMgmt->wCurrAID | BIT14 | BIT15;
+ u16 wAID = pDevice->current_aid | BIT(14) | BIT(15);
// set period of power up before TBTT
VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT);
@@ -83,7 +78,9 @@ PSvEnablePowerSaving(
VNSvOutPortW(pDevice->PortOffset + MAC_REG_AIDATIM, wAID);
} else {
// set ATIM Window
+#if 0 /* TODO atim window */
MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
+#endif
}
// Set AutoSleep
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
@@ -95,22 +92,15 @@ PSvEnablePowerSaving(
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
// first time set listen next beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
- pMgmt->wCountToWakeUp = wListenInterval;
} else {
// always listen beacon
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
- pMgmt->wCountToWakeUp = 0;
}
// enable power saving hw function
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
pDevice->bEnablePSMode = true;
- /* We don't send null pkt in ad hoc mode since beacon will handle this. */
- if (pDevice->op_mode != NL80211_IFTYPE_ADHOC &&
- pDevice->op_mode == NL80211_IFTYPE_STATION)
- PSbSendNullPacket(pDevice);
-
pDevice->bPWBitOn = true;
pr_debug("PS:Power Saving Mode Enable...\n");
}
@@ -143,182 +133,9 @@ PSvDisablePowerSaving(
pDevice->bEnablePSMode = false;
- if (pDevice->op_mode == NL80211_IFTYPE_STATION)
- PSbSendNullPacket(pDevice);
-
pDevice->bPWBitOn = false;
}
-/*+
- *
- * Routine Description:
- * Consider to power down when no more packets to tx or rx.
- *
- * Return Value:
- * true, if power down success
- * false, if fail
- -*/
-
-bool
-PSbConsiderPowerDown(
- void *hDeviceContext,
- bool bCheckRxDMA,
- bool bCheckCountToWakeUp
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uIdx;
-
- // check if already in Doze mode
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
- return true;
-
- if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
- // check if in TIM wake period
- if (pMgmt->bInTIMWake)
- return false;
- }
-
- // check scan state
- if (pDevice->bCmdRunning)
- return false;
-
- // Force PSEN on
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
-
- // check if all TD are empty,
- for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx++) {
- if (pDevice->iTDUsed[uIdx] != 0)
- return false;
- }
-
- // check if rx isr is clear
- if (bCheckRxDMA &&
- ((pDevice->dwIsr & ISR_RXDMA0) != 0) &&
- ((pDevice->dwIsr & ISR_RXDMA1) != 0)) {
- return false;
- }
-
- if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
- if (bCheckCountToWakeUp &&
- (pMgmt->wCountToWakeUp == 0 || pMgmt->wCountToWakeUp == 1)) {
- return false;
- }
- }
-
- // no Tx, no Rx isr, now go to Doze
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_GO2DOZE);
- pr_debug("Go to Doze ZZZZZZZZZZZZZZZ\n");
- return true;
-}
-
-/*+
- *
- * Routine Description:
- * Send PS-POLL packet
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-PSvSendPSPOLL(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSTxMgmtPacket pTxPacket = NULL;
-
- memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_HDR_ADDR2_LEN);
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- pTxPacket->p80211Header->sA2.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_CTL) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PSPOLL) |
- WLAN_SET_FC_PWRMGT(0)
-));
- pTxPacket->p80211Header->sA2.wDurationID = pMgmt->wCurrAID | BIT14 | BIT15;
- memcpy(pTxPacket->p80211Header->sA2.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA2.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- pTxPacket->cbMPDULen = WLAN_HDR_ADDR2_LEN;
- pTxPacket->cbPayloadLen = 0;
- // send the frame
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Send PS-Poll packet failed..\n");
-}
-
-/*+
- *
- * Routine Description:
- * Send NULL packet to AP for notification power state of STA
- *
- * Return Value:
- * None.
- *
- -*/
-bool
-PSbSendNullPacket(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket = NULL;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int uIdx;
-
- if (!pDevice->bLinkPass)
- return false;
-
- if (!pDevice->bEnablePSMode && !pDevice->fTxDataInSleep)
- return false;
-
- if (pDevice->bEnablePSMode) {
- for (uIdx = 0; uIdx < TYPE_MAXTD; uIdx++) {
- if (pDevice->iTDUsed[uIdx] != 0)
- return false;
- }
- }
-
- memset(pMgmt->pbyPSPacketPool, 0, sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN);
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyPSPacketPool;
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
-
- if (pDevice->bEnablePSMode) {
- pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL) |
- WLAN_SET_FC_PWRMGT(1)
-));
- } else {
- pTxPacket->p80211Header->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_DATA) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_NULL) |
- WLAN_SET_FC_PWRMGT(0)
-));
- }
-
- if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA)
- pTxPacket->p80211Header->sA3.wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_TODS(1));
-
- memcpy(pTxPacket->p80211Header->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(pTxPacket->p80211Header->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- pTxPacket->cbMPDULen = WLAN_HDR_ADDR3_LEN;
- pTxPacket->cbPayloadLen = 0;
- // send the frame
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING) {
- pr_debug("Send Null Packet failed !\n");
- return false;
- }
-
- return true;
-}
/*+
*
@@ -336,21 +153,14 @@ PSbIsNextTBTTWakeUp(
)
{
struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
+ struct ieee80211_hw *hw = pDevice->hw;
+ struct ieee80211_conf *conf = &hw->conf;
bool bWakeUp = false;
- if (pMgmt->wListenInterval >= 2) {
- if (pMgmt->wCountToWakeUp == 0)
- pMgmt->wCountToWakeUp = pMgmt->wListenInterval;
-
- pMgmt->wCountToWakeUp--;
-
- if (pMgmt->wCountToWakeUp == 1) {
- // Turn on wake up to listen next beacon
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
- bWakeUp = true;
- }
-
+ if (conf->listen_interval == 1) {
+ /* Turn on wake up to listen next beacon */
+ MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
+ bWakeUp = true;
}
return bWakeUp;
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index 936f171a6b19..1083341b2a47 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -33,13 +33,6 @@
#define PS_FAST_INTERVAL 1 // Fast power saving listen interval
#define PS_MAX_INTERVAL 4 // MAX power saving listen interval
-bool
-PSbConsiderPowerDown(
- void *hDeviceContext,
- bool bCheckRxDMA,
- bool bCheckCountToWakeUp
-);
-
void
PSvDisablePowerSaving(
void *hDeviceContext
@@ -51,15 +44,6 @@ PSvEnablePowerSaving(
unsigned short wListenInterval
);
-void
-PSvSendPSPOLL(
- void *hDeviceContext
-);
-
-bool
-PSbSendNullPacket(
- void *hDeviceContext
-);
bool
PSbIsNextTBTTWakeUp(
diff --git a/drivers/staging/vt6655/rc4.c b/drivers/staging/vt6655/rc4.c
deleted file mode 100644
index b7819bc702de..000000000000
--- a/drivers/staging/vt6655/rc4.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: rc4.c
- *
- * Purpose:
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Kyle Hsu
- *
- * Date: Sep 4, 2002
- *
- */
-
-#include "rc4.h"
-
-void rc4_init(PRC4Ext pRC4, unsigned char *pbyKey, unsigned int cbKey_len)
-{
- unsigned int ust1, ust2;
- unsigned int keyindex;
- unsigned int stateindex;
- unsigned char *pbyst;
- unsigned int idx;
-
- pbyst = pRC4->abystate;
- pRC4->ux = 0;
- pRC4->uy = 0;
- for (idx = 0; idx < 256; idx++)
- pbyst[idx] = (unsigned char)idx;
- keyindex = 0;
- stateindex = 0;
- for (idx = 0; idx < 256; idx++) {
- ust1 = pbyst[idx];
- stateindex = (stateindex + pbyKey[keyindex] + ust1) & 0xff;
- ust2 = pbyst[stateindex];
- pbyst[stateindex] = (unsigned char)ust1;
- pbyst[idx] = (unsigned char)ust2;
- if (++keyindex >= cbKey_len)
- keyindex = 0;
- }
-}
-
-unsigned int rc4_byte(PRC4Ext pRC4)
-{
- unsigned int ux;
- unsigned int uy;
- unsigned int ustx, usty;
- unsigned char *pbyst;
-
- pbyst = pRC4->abystate;
- ux = (pRC4->ux + 1) & 0xff;
- ustx = pbyst[ux];
- uy = (ustx + pRC4->uy) & 0xff;
- usty = pbyst[uy];
- pRC4->ux = ux;
- pRC4->uy = uy;
- pbyst[uy] = (unsigned char)ustx;
- pbyst[ux] = (unsigned char)usty;
-
- return pbyst[(ustx + usty) & 0xff];
-}
-
-void rc4_encrypt(PRC4Ext pRC4, unsigned char *pbyDest,
- unsigned char *pbySrc, unsigned int cbData_len)
-{
- unsigned int ii;
-
- for (ii = 0; ii < cbData_len; ii++)
- pbyDest[ii] = (unsigned char)(pbySrc[ii] ^ rc4_byte(pRC4));
-}
diff --git a/drivers/staging/vt6655/rc4.h b/drivers/staging/vt6655/rc4.h
deleted file mode 100644
index 74b2eed9bce3..000000000000
--- a/drivers/staging/vt6655/rc4.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * File: rc4.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Purpose:
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Kyle Hsu
- *
- * Date: Sep 4, 2002
- *
- */
-
-#ifndef __RC4_H__
-#define __RC4_H__
-
-#include "ttype.h"
-
-/*--------------------- Export Definitions -------------------------*/
-/*--------------------- Export Types ------------------------------*/
-typedef struct {
- unsigned int ux;
- unsigned int uy;
- unsigned char abystate[256];
-} RC4Ext, *PRC4Ext;
-
-void rc4_init(PRC4Ext pRC4, unsigned char *pbyKey, unsigned int cbKey_len);
-unsigned int rc4_byte(PRC4Ext pRC4);
-void rc4_encrypt(PRC4Ext pRC4, unsigned char *pbyDest, unsigned char *pbySrc, unsigned int cbData_len);
-
-#endif //__RC4_H__
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index e505af91bfd0..32ef99341e20 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -29,6 +29,8 @@
* IFRFbWriteEmbedded - Embedded write RF register via MAC
*
* Revision History:
+ * RobertYu 2005
+ * chester 2008
*
*/
@@ -37,8 +39,6 @@
#include "rf.h"
#include "baseband.h"
-/*--------------------- Static Definitions -------------------------*/
-
#define BY_AL2230_REG_LEN 23 //24bit
#define CB_AL2230_INIT_SEQ 15
#define SWITCH_CHANNEL_DELAY_AL2230 200 //us
@@ -49,10 +49,6 @@
#define SWITCH_CHANNEL_DELAY_AL7230 200 //us
#define AL7230_PWR_IDX_LEN 64
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
@@ -172,7 +168,6 @@ static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
};
-//{{ RobertYu:20050104
// 40MHz reference frequency
// Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
@@ -408,9 +403,6 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55)
0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW // channel = 165, Tf = 5825MHz (56)
};
-//}} RobertYu
-
-/*--------------------- Static Functions --------------------------*/
/*
* Description: AIROHA IFRF chip init function
@@ -424,137 +416,81 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
* Return Value: true if succeeded; false if failed.
*
*/
-static bool s_bAL7230Init(void __iomem *dwIoBase)
+static bool s_bAL7230Init(struct vnt_private *priv)
{
+ void __iomem *dwIoBase = priv->PortOffset;
int ii;
bool bResult;
bResult = true;
- //3-wire control for normal mode
+ /* 3-wire control for normal mode */
VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- BBvPowerSaveModeOFF(dwIoBase); //RobertYu:20050106, have DC value for Calibration
+ BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[ii]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
- // PLL On
+ /* PLL On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- //Calibration
+ /* Calibration */
MACvTimer0MicroSDelay(dwIoBase, 150);//150us
- bResult &= IFRFbWriteEmbedded(dwIoBase, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); //TXDCOC:active, RCK:disable
+ /* TXDCOC:active, RCK:disable */
+ bResult &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
MACvTimer0MicroSDelay(dwIoBase, 30);//30us
- bResult &= IFRFbWriteEmbedded(dwIoBase, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); //TXDCOC:disable, RCK:active
+ /* TXDCOC:disable, RCK:active */
+ bResult &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
MACvTimer0MicroSDelay(dwIoBase, 30);//30us
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]); //TXDCOC:disable, RCK:disable
+ /* TXDCOC:disable, RCK:disable */
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- BBvPowerSaveModeON(dwIoBase); // RobertYu:20050106
+ BBvPowerSaveModeON(priv); /* RobertYu:20050106 */
- // PE1: TX_ON, PE2: RX_ON, PE3: PLLON
- //3-wire control for power saving mode
+ /* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
+ /* 3-wire control for power saving mode */
VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000
return bResult;
}
-// Need to Pull PLLON low when writing channel registers through 3-wire interface
-static bool s_bAL7230SelectChannel(void __iomem *dwIoBase, unsigned char byChannel)
+/* Need to Pull PLLON low when writing channel registers through
+ * 3-wire interface */
+static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
+ void __iomem *dwIoBase = priv->PortOffset;
bool bResult;
bResult = true;
- // PLLON Off
+ /* PLLON Off */
MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable0[byChannel - 1]); //Reg0
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable1[byChannel - 1]); //Reg1
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230ChannelTable2[byChannel - 1]); //Reg4
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
- // PLLOn On
+ /* PLLOn On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- // Set Channel[7] = 0 to tell H/W channel is changing now.
+ /* Set Channel[7] = 0 to tell H/W channel is changing now. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL7230);
- // Set Channel[7] = 1 to tell H/W channel change is done.
+ /* Set Channel[7] = 1 to tell H/W channel change is done. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
return bResult;
}
/*
- * Description: Select channel with UW2452 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-//{{ RobertYu: 20041210
-/*
- * Description: UW2452 IFRF chip init function
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-//}} RobertYu
-////////////////////////////////////////////////////////////////////////////////
-
-/*
- * Description: VT3226 IFRF chip init function
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
- * Description: Select channel with VT3226 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*
* Description: Write to IF/RF, by embedded programming
*
* Parameters:
@@ -567,14 +503,15 @@ static bool s_bAL7230SelectChannel(void __iomem *dwIoBase, unsigned char byChann
* Return Value: true if succeeded; false if failed.
*
*/
-bool IFRFbWriteEmbedded(void __iomem *dwIoBase, unsigned long dwData)
+bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
{
+ void __iomem *dwIoBase = priv->PortOffset;
unsigned short ww;
unsigned long dwValue;
VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData);
- // W_MAX_TIMEOUT is the timeout period
+ /* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue);
if (dwValue & IFREGCTL_DONE)
@@ -588,33 +525,6 @@ bool IFRFbWriteEmbedded(void __iomem *dwIoBase, unsigned long dwData)
}
/*
- * Description: RFMD RF2959 IFRF chip init function
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
- * Description: Select channel with RFMD 2959 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
* Description: AIROHA IFRF chip init function
*
* Parameters:
@@ -626,113 +536,70 @@ bool IFRFbWriteEmbedded(void __iomem *dwIoBase, unsigned long dwData)
* Return Value: true if succeeded; false if failed.
*
*/
-static bool RFbAL2230Init(void __iomem *dwIoBase)
+static bool RFbAL2230Init(struct vnt_private *priv)
{
+ void __iomem *dwIoBase = priv->PortOffset;
int ii;
bool bResult;
bResult = true;
- //3-wire control for normal mode
+ /* 3-wire control for normal mode */
VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
-//2008-8-21 chester <add>
- // PLL Off
-
+ /* PLL Off */
MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- //patch abnormal AL2230 frequency output
-//2008-8-21 chester <add>
- IFRFbWriteEmbedded(dwIoBase, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ /* patch abnormal AL2230 frequency output */
+ IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230InitTable[ii]);
-//2008-8-21 chester <add>
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
MACvTimer0MicroSDelay(dwIoBase, 30); //delay 30 us
- // PLL On
+ /* PLL On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
MACvTimer0MicroSDelay(dwIoBase, 150);//150us
- bResult &= IFRFbWriteEmbedded(dwIoBase, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ bResult &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
MACvTimer0MicroSDelay(dwIoBase, 30);//30us
- bResult &= IFRFbWriteEmbedded(dwIoBase, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ bResult &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
MACvTimer0MicroSDelay(dwIoBase, 30);//30us
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- //3-wire control for power saving mode
+ /* 3-wire control for power saving mode */
VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000
return bResult;
}
-static bool RFbAL2230SelectChannel(void __iomem *dwIoBase, unsigned char byChannel)
+static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
+ void __iomem *dwIoBase = priv->PortOffset;
bool bResult;
bResult = true;
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230ChannelTable0[byChannel - 1]);
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL2230ChannelTable1[byChannel - 1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
- // Set Channel[7] = 0 to tell H/W channel is changing now.
+ /* Set Channel[7] = 0 to tell H/W channel is changing now. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL2230);
- // Set Channel[7] = 1 to tell H/W channel change is done.
+ /* Set Channel[7] = 1 to tell H/W channel change is done. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
return bResult;
}
/*
- * Description: UW2451 IFRF chip init function
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
- * Description: Select channel with UW2451 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
- * Description: Set sleep mode to UW2451 chip
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * uChannel - Channel number
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-
-/*
* Description: RF init function
*
* Parameters:
@@ -746,20 +613,20 @@ static bool RFbAL2230SelectChannel(void __iomem *dwIoBase, unsigned char byChann
*
*/
bool RFbInit(
- struct vnt_private *pDevice
+ struct vnt_private *priv
)
{
bool bResult = true;
- switch (pDevice->byRFType) {
+ switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- pDevice->byMaxPwrLevel = AL2230_PWR_IDX_LEN;
- bResult = RFbAL2230Init(pDevice->PortOffset);
+ priv->byMaxPwrLevel = AL2230_PWR_IDX_LEN;
+ bResult = RFbAL2230Init(priv);
break;
case RF_AIROHA7230:
- pDevice->byMaxPwrLevel = AL7230_PWR_IDX_LEN;
- bResult = s_bAL7230Init(pDevice->PortOffset);
+ priv->byMaxPwrLevel = AL7230_PWR_IDX_LEN;
+ bResult = s_bAL7230Init(priv);
break;
case RF_NOTHING:
bResult = true;
@@ -784,18 +651,18 @@ bool RFbInit(
* Return Value: true if succeeded; false if failed.
*
*/
-bool RFbSelectChannel(void __iomem *dwIoBase, unsigned char byRFType, unsigned char byChannel)
+bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, unsigned char byChannel)
{
bool bResult = true;
switch (byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- bResult = RFbAL2230SelectChannel(dwIoBase, byChannel);
+ bResult = RFbAL2230SelectChannel(priv, byChannel);
break;
//{{ RobertYu: 20050104
case RF_AIROHA7230:
- bResult = s_bAL7230SelectChannel(dwIoBase, byChannel);
+ bResult = s_bAL7230SelectChannel(priv, byChannel);
break;
//}} RobertYu
case RF_NOTHING:
@@ -820,8 +687,9 @@ bool RFbSelectChannel(void __iomem *dwIoBase, unsigned char byRFType, unsigned c
* Return Value: None.
*
*/
-bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigned int uChannel)
+bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, unsigned int uChannel)
{
+ void __iomem *dwIoBase = priv->PortOffset;
int ii;
unsigned char byInitCount = 0;
unsigned char bySleepCount = 0;
@@ -834,7 +702,8 @@ bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigne
if (uChannel > CB_MAX_CHANNEL_24G)
return false;
- byInitCount = CB_AL2230_INIT_SEQ + 2; // Init Reg + Channel Reg (2)
+ /* Init Reg + Channel Reg (2) */
+ byInitCount = CB_AL2230_INIT_SEQ + 2;
bySleepCount = 0;
if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount))
return false;
@@ -847,10 +716,10 @@ bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigne
MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
break;
- //{{ RobertYu: 20050104
- // Need to check, PLLON need to be low for channel setting
+ /* Need to check, PLLON need to be low for channel setting */
case RF_AIROHA7230:
- byInitCount = CB_AL7230_INIT_SEQ + 3; // Init Reg + Channel Reg (3)
+ /* Init Reg + Channel Reg (3) */
+ byInitCount = CB_AL7230_INIT_SEQ + 3;
bySleepCount = 0;
if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount))
return false;
@@ -869,7 +738,6 @@ bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigne
ii++;
MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
break;
- //}} RobertYu
case RF_NOTHING:
return true;
@@ -897,7 +765,7 @@ bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigne
*
*/
bool RFbSetPower(
- struct vnt_private *pDevice,
+ struct vnt_private *priv,
unsigned int uRATE,
unsigned int uCH
)
@@ -907,7 +775,7 @@ bool RFbSetPower(
unsigned char byDec = 0;
unsigned char byPwrdBm = 0;
- if (pDevice->dwDiagRefCount != 0)
+ if (priv->dwDiagRefCount != 0)
return true;
if ((uCH < 1) || (uCH > CB_MAX_CHANNEL))
@@ -918,22 +786,22 @@ bool RFbSetPower(
case RATE_2M:
case RATE_5M:
case RATE_11M:
- byPwr = pDevice->abyCCKPwrTbl[uCH];
- byPwrdBm = pDevice->abyCCKDefaultPwr[uCH];
+ byPwr = priv->abyCCKPwrTbl[uCH];
+ byPwrdBm = priv->abyCCKDefaultPwr[uCH];
break;
case RATE_6M:
case RATE_9M:
case RATE_18M:
- byPwr = pDevice->abyOFDMPwrTbl[uCH];
- if (pDevice->byRFType == RF_UW2452)
+ byPwr = priv->abyOFDMPwrTbl[uCH];
+ if (priv->byRFType == RF_UW2452)
byDec = byPwr + 14;
else
byDec = byPwr + 10;
- if (byDec >= pDevice->byMaxPwrLevel)
- byDec = pDevice->byMaxPwrLevel-1;
+ if (byDec >= priv->byMaxPwrLevel)
+ byDec = priv->byMaxPwrLevel-1;
- if (pDevice->byRFType == RF_UW2452) {
+ if (priv->byRFType == RF_UW2452) {
byPwrdBm = byDec - byPwr;
byPwrdBm /= 3;
} else {
@@ -941,24 +809,24 @@ bool RFbSetPower(
byPwrdBm >>= 1;
}
- byPwrdBm += pDevice->abyOFDMDefaultPwr[uCH];
+ byPwrdBm += priv->abyOFDMDefaultPwr[uCH];
byPwr = byDec;
break;
case RATE_24M:
case RATE_36M:
case RATE_48M:
case RATE_54M:
- byPwr = pDevice->abyOFDMPwrTbl[uCH];
- byPwrdBm = pDevice->abyOFDMDefaultPwr[uCH];
+ byPwr = priv->abyOFDMPwrTbl[uCH];
+ byPwrdBm = priv->abyOFDMDefaultPwr[uCH];
break;
}
- if (pDevice->byCurPwr == byPwr)
+ if (priv->byCurPwr == byPwr)
return true;
- bResult = RFbRawSetPower(pDevice, byPwr, uRATE);
+ bResult = RFbRawSetPower(priv, byPwr, uRATE);
if (bResult)
- pDevice->byCurPwr = byPwr;
+ priv->byCurPwr = byPwr;
return bResult;
}
@@ -978,7 +846,7 @@ bool RFbSetPower(
*/
bool RFbRawSetPower(
- struct vnt_private *pDevice,
+ struct vnt_private *priv,
unsigned char byPwr,
unsigned int uRATE
)
@@ -986,37 +854,38 @@ bool RFbRawSetPower(
bool bResult = true;
unsigned long dwMax7230Pwr = 0;
- if (byPwr >= pDevice->byMaxPwrLevel)
+ if (byPwr >= priv->byMaxPwrLevel)
return false;
- switch (pDevice->byRFType) {
+ switch (priv->byRFType) {
case RF_AIROHA:
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwAL2230PowerTable[byPwr]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (uRATE <= RATE_11M)
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
else
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
break;
case RF_AL2230S:
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwAL2230PowerTable[byPwr]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (uRATE <= RATE_11M) {
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
} else {
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ bResult &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
}
break;
case RF_AIROHA7230:
- // 0x080F1B00 for 3 wire control TxGain(D10) and 0x31 as TX Gain value
+ /* 0x080F1B00 for 3 wire control TxGain(D10)
+ * and 0x31 as TX Gain value */
dwMax7230Pwr = 0x080C0B00 | ((byPwr) << 12) |
(BY_AL7230_REG_LEN << 3) | IFREGCTL_REGW;
- bResult &= IFRFbWriteEmbedded(pDevice->PortOffset, dwMax7230Pwr);
+ bResult &= IFRFbWriteEmbedded(priv, dwMax7230Pwr);
break;
default:
@@ -1032,7 +901,7 @@ bool RFbRawSetPower(
*
* Parameters:
* In:
- * pDevice - The adapter to be translated
+ * priv - The adapter to be translated
* byCurrRSSI - RSSI to be translated
* Out:
* pdwdbm - Translated dbm number
@@ -1042,7 +911,7 @@ bool RFbRawSetPower(
-*/
void
RFvRSSITodBm(
- struct vnt_private *pDevice,
+ struct vnt_private *priv,
unsigned char byCurrRSSI,
long *pldBm
)
@@ -1052,10 +921,10 @@ RFvRSSITodBm(
long a = 0;
unsigned char abyAIROHARF[4] = {0, 18, 0, 40};
- switch (pDevice->byRFType) {
+ switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- case RF_AIROHA7230: //RobertYu: 20040104
+ case RF_AIROHA7230:
a = abyAIROHARF[byIdx];
break;
default:
@@ -1065,42 +934,38 @@ RFvRSSITodBm(
*pldBm = -1 * (a + b * 2);
}
-////////////////////////////////////////////////////////////////////////////////
-//{{ RobertYu: 20050104
-
-// Post processing for the 11b/g and 11a.
-// for save time on changing Reg2,3,5,7,10,12,15
-bool RFbAL7230SelectChannelPostProcess(void __iomem *dwIoBase, unsigned char byOldChannel, unsigned char byNewChannel)
+/* Post processing for the 11b/g and 11a.
+ * for save time on changing Reg2,3,5,7,10,12,15 */
+bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv,
+ unsigned char byOldChannel,
+ unsigned char byNewChannel)
{
bool bResult;
bResult = true;
- // if change between 11 b/g and 11a need to update the following register
- // Channel Index 1~14
-
+ /* if change between 11 b/g and 11a need to update the following
+ * register
+ * Channel Index 1~14 */
if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) {
- // Change from 2.4G to 5G
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[2]); //Reg2
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[3]); //Reg3
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[5]); //Reg5
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[7]); //Reg7
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[10]);//Reg10
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[12]);//Reg12
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTableAMode[15]);//Reg15
+ /* Change from 2.4G to 5G [Reg] */
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]);
} else if ((byOldChannel > CB_MAX_CHANNEL_24G) && (byNewChannel <= CB_MAX_CHANNEL_24G)) {
- // change from 5G to 2.4G
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[2]); //Reg2
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[3]); //Reg3
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[5]); //Reg5
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[7]); //Reg7
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[10]);//Reg10
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[12]);//Reg12
- bResult &= IFRFbWriteEmbedded(dwIoBase, dwAL7230InitTable[15]);//Reg15
+ /* Change from 5G to 2.4G [Reg] */
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]);
+ bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]);
}
return bResult;
}
-
-//}} RobertYu
-////////////////////////////////////////////////////////////////////////////////
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index be4ef88b7666..8a6e2cfedaa5 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -30,7 +30,6 @@
#ifndef __RF_H__
#define __RF_H__
-#include "ttype.h"
#include "device.h"
/*--------------------- Export Definitions -------------------------*/
@@ -74,12 +73,12 @@
/*--------------------- Export Functions --------------------------*/
-bool IFRFbWriteEmbedded(void __iomem *dwIoBase, unsigned long dwData);
-bool RFbSelectChannel(void __iomem *dwIoBase, unsigned char byRFType, unsigned char byChannel);
+bool IFRFbWriteEmbedded(struct vnt_private *, unsigned long dwData);
+bool RFbSelectChannel(struct vnt_private *, unsigned char byRFType, unsigned char byChannel);
bool RFbInit(
struct vnt_private *
);
-bool RFvWriteWakeProgSyn(void __iomem *dwIoBase, unsigned char byRFType, unsigned int uChannel);
+bool RFvWriteWakeProgSyn(struct vnt_private *, unsigned char byRFType, unsigned int uChannel);
bool RFbSetPower(struct vnt_private *, unsigned int uRATE, unsigned int uCH);
bool RFbRawSetPower(
struct vnt_private *,
@@ -95,7 +94,7 @@ RFvRSSITodBm(
);
//{{ RobertYu: 20050104
-bool RFbAL7230SelectChannelPostProcess(void __iomem *dwIoBase, unsigned char byOldChannel, unsigned char byNewChannel);
+bool RFbAL7230SelectChannelPostProcess(struct vnt_private *, unsigned char byOldChannel, unsigned char byNewChannel);
//}} RobertYu
#endif // __RF_H__
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 7a183f55e7eb..61c39dd7ad01 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -50,17 +50,9 @@
#include "device.h"
#include "rxtx.h"
-#include "tether.h"
#include "card.h"
-#include "bssdb.h"
#include "mac.h"
#include "baseband.h"
-#include "michael.h"
-#include "tkip.h"
-#include "tcrc.h"
-#include "wctl.h"
-#include "wroute.h"
-#include "hostap.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
@@ -105,19 +97,6 @@ static const unsigned short wFB_Opt1[2][5] = {
#define DATADUR_A_F1 13
/*--------------------- Static Functions --------------------------*/
-
-static
-void
-s_vFillTxKey(
- struct vnt_private *pDevice,
- unsigned char *pbyBuf,
- unsigned char *pbyIVHead,
- PSKeyItem pTransmitKey,
- unsigned char *pbyHdrBuf,
- unsigned short wPayloadLen,
- unsigned char *pMICHDR
-);
-
static
void
s_vFillRTSHead(
@@ -127,7 +106,7 @@ s_vFillRTSHead(
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
- PSEthernetHeader psEthHeader,
+ struct ieee80211_hdr *hdr,
unsigned short wCurrentRate,
unsigned char byFBOption
);
@@ -144,26 +123,15 @@ s_vGenerateTxParameter(
unsigned int cbFrameSize,
bool bNeedACK,
unsigned int uDMAIdx,
- PSEthernetHeader psEthHeader,
+ void *psEthHeader,
unsigned short wCurrentRate
);
-static void s_vFillFragParameter(
- struct vnt_private *pDevice,
- unsigned char *pbyBuffer,
- unsigned int uTxType,
- void *pvtdCurr,
- unsigned short wFragType,
- unsigned int cbReqCount
-);
-
static unsigned int
s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
- unsigned char *pbyTxBufferAddr, unsigned int cbFrameBodySize,
+ unsigned char *pbyTxBufferAddr,
unsigned int uDMAIdx, PSTxDesc pHeadTD,
- PSEthernetHeader psEthHeader, unsigned char *pPacket,
- bool bNeedEncrypt, PSKeyItem pTransmitKey,
- unsigned int uNodeIndex, unsigned int *puMACfragNum);
+ unsigned int uNodeIndex);
static
__le16
@@ -178,165 +146,12 @@ s_uFillDataHead(
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption,
- unsigned short wCurrentRate
+ unsigned short wCurrentRate,
+ bool is_pspoll
);
/*--------------------- Export Variables --------------------------*/
-static
-void
-s_vFillTxKey(
- struct vnt_private *pDevice,
- unsigned char *pbyBuf,
- unsigned char *pbyIVHead,
- PSKeyItem pTransmitKey,
- unsigned char *pbyHdrBuf,
- unsigned short wPayloadLen,
- unsigned char *pMICHDR
-)
-{
- struct vnt_mic_hdr *mic_hdr = (struct vnt_mic_hdr *)pMICHDR;
- unsigned long *pdwIV = (unsigned long *)pbyIVHead;
- unsigned long *pdwExtIV = (unsigned long *)((unsigned char *)pbyIVHead+4);
- PS802_11Header pMACHeader = (PS802_11Header)pbyHdrBuf;
- unsigned long dwRevIVCounter;
- unsigned char byKeyIndex = 0;
-
- //Fill TXKEY
- if (pTransmitKey == NULL)
- return;
-
- dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter);
- *pdwIV = pDevice->dwIVCounter;
- byKeyIndex = pTransmitKey->dwKeyIndex & 0xf;
-
- if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) {
- memcpy(pDevice->abyPRNG, (unsigned char *)&(dwRevIVCounter), 3);
- memcpy(pDevice->abyPRNG+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- } else {
- memcpy(pbyBuf, (unsigned char *)&(dwRevIVCounter), 3);
- memcpy(pbyBuf+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- if (pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
- memcpy(pbyBuf+8, (unsigned char *)&(dwRevIVCounter), 3);
- memcpy(pbyBuf+11, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- }
- memcpy(pDevice->abyPRNG, pbyBuf, 16);
- }
- // Append IV after Mac Header
- *pdwIV &= WEP_IV_MASK;//00000000 11111111 11111111 11111111
- *pdwIV |= (unsigned long)byKeyIndex << 30;
- *pdwIV = cpu_to_le32(*pdwIV);
- pDevice->dwIVCounter++;
- if (pDevice->dwIVCounter > WEP_IV_MASK)
- pDevice->dwIVCounter = 0;
-
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- pTransmitKey->wTSC15_0++;
- if (pTransmitKey->wTSC15_0 == 0)
- pTransmitKey->dwTSC47_16++;
-
- TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
- pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
- memcpy(pbyBuf, pDevice->abyPRNG, 16);
- // Make IV
- memcpy(pdwIV, pDevice->abyPRNG, 3);
-
- *(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- // Append IV&ExtIV after Mac Header
- *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
- pr_debug("vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
-
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
- pTransmitKey->wTSC15_0++;
- if (pTransmitKey->wTSC15_0 == 0)
- pTransmitKey->dwTSC47_16++;
-
- memcpy(pbyBuf, pTransmitKey->abyKey, 16);
-
- // Make IV
- *pdwIV = 0;
- *(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- *pdwIV |= cpu_to_le16((unsigned short)(pTransmitKey->wTSC15_0));
- //Append IV&ExtIV after Mac Header
- *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
-
- /* MICHDR0 */
- mic_hdr->id = 0x59;
- mic_hdr->tx_priority = 0;
- memcpy(mic_hdr->mic_addr2, pMACHeader->abyAddr2, ETH_ALEN);
-
- /* ccmp pn big endian order */
- mic_hdr->ccmp_pn[0] = (u8)(pTransmitKey->dwTSC47_16 >> 24);
- mic_hdr->ccmp_pn[1] = (u8)(pTransmitKey->dwTSC47_16 >> 16);
- mic_hdr->ccmp_pn[2] = (u8)(pTransmitKey->dwTSC47_16 >> 8);
- mic_hdr->ccmp_pn[3] = (u8)pTransmitKey->dwTSC47_16;
- mic_hdr->ccmp_pn[4] = (u8)(pTransmitKey->wTSC15_0 >> 8);
- mic_hdr->ccmp_pn[5] = (u8)pTransmitKey->wTSC15_0;
-
- /* MICHDR1 */
- mic_hdr->payload_len = cpu_to_be16(wPayloadLen);
-
- if (pDevice->bLongHeader)
- mic_hdr->hlen = cpu_to_be16(28);
- else
- mic_hdr->hlen = cpu_to_be16(22);
-
- memcpy(mic_hdr->addr1, pMACHeader->abyAddr1, ETH_ALEN);
- memcpy(mic_hdr->addr2, pMACHeader->abyAddr2, ETH_ALEN);
-
- /* MICHDR2 */
- memcpy(mic_hdr->addr3, pMACHeader->abyAddr3, ETH_ALEN);
- mic_hdr->frame_control =
- cpu_to_le16(pMACHeader->wFrameCtl & 0xc78f);
- mic_hdr->seq_ctrl = cpu_to_le16(pMACHeader->wSeqCtl & 0xf);
-
- if (pDevice->bLongHeader)
- memcpy(mic_hdr->addr4, pMACHeader->abyAddr4, ETH_ALEN);
- }
-}
-
-static
-void
-s_vSWencryption(
- struct vnt_private *pDevice,
- PSKeyItem pTransmitKey,
- unsigned char *pbyPayloadHead,
- unsigned short wPayloadSize
-)
-{
- unsigned int cbICVlen = 4;
- unsigned long dwICV = 0xFFFFFFFFL;
- unsigned long *pdwICV;
-
- if (pTransmitKey == NULL)
- return;
-
- if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- //=======================================================================
- // Append ICV after payload
- dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
- pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize);
- // finally, we must invert dwCRC to get the correct answer
- *pdwICV = cpu_to_le32(~dwICV);
- // RC4 encryption
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength + 3);
- rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen);
- //=======================================================================
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- //=======================================================================
- //Append ICV after payload
- dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
- pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize);
- // finally, we must invert dwCRC to get the correct answer
- *pdwICV = cpu_to_le32(~dwICV);
- // RC4 encryption
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN);
- rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen);
- //=======================================================================
- }
-}
-
static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
{
return cpu_to_le16(wTimeStampOff[priv->byPreambleType % 2]
@@ -683,7 +498,8 @@ s_uFillDataHead(
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption,
- unsigned short wCurrentRate
+ unsigned short wCurrentRate,
+ bool is_pspoll
)
{
@@ -702,15 +518,24 @@ s_uFillDataHead(
pDevice->byTopCCKBasicRate,
PK_TYPE_11B, &buf->b);
- /* Get Duration and TimeStamp */
- buf->duration_a = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
- byPktType, wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption));
- buf->duration_b = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
- PK_TYPE_11B, pDevice->byTopCCKBasicRate,
- bNeedAck, uFragIdx, cbLastFragmentSize,
- uMACfragNum, byFBOption));
+ if (is_pspoll) {
+ __le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
+
+ buf->duration_a = dur;
+ buf->duration_b = dur;
+ } else {
+ /* Get Duration and TimeStamp */
+ buf->duration_a =
+ cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
+ byPktType, wCurrentRate, bNeedAck, uFragIdx,
+ cbLastFragmentSize, uMACfragNum,
+ byFBOption));
+ buf->duration_b =
+ cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
+ PK_TYPE_11B, pDevice->byTopCCKBasicRate,
+ bNeedAck, uFragIdx, cbLastFragmentSize,
+ uMACfragNum, byFBOption));
+ }
buf->time_stamp_off_a = vnt_time_stamp_off(pDevice, wCurrentRate);
buf->time_stamp_off_b = vnt_time_stamp_off(pDevice, pDevice->byTopCCKBasicRate);
@@ -764,11 +589,18 @@ s_uFillDataHead(
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->ab);
- /* Get Duration and TimeStampOff */
- buf->duration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
+ if (is_pspoll) {
+ __le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
+
+ buf->duration = dur;
+ } else {
+ /* Get Duration and TimeStampOff */
+ buf->duration =
+ cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
+ }
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
@@ -778,17 +610,27 @@ s_uFillDataHead(
/* Get SignalField, ServiceField & Length */
vnt_get_phy_field(pDevice, cbFrameLength, wCurrentRate,
byPktType, &buf->ab);
- /* Get Duration and TimeStampOff */
- buf->duration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
+
+ if (is_pspoll) {
+ __le16 dur = cpu_to_le16(pDevice->current_aid | BIT(14) | BIT(15));
+
+ buf->duration = dur;
+ } else {
+ /* Get Duration and TimeStampOff */
+ buf->duration =
+ cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
+ }
+
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
}
return 0;
}
+
static
void
s_vFillRTSHead(
@@ -798,7 +640,7 @@ s_vFillRTSHead(
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
- PSEthernetHeader psEthHeader,
+ struct ieee80211_hdr *hdr,
unsigned short wCurrentRate,
unsigned char byFBOption
)
@@ -850,18 +692,8 @@ s_vFillRTSHead(
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
-
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
} else {
struct vnt_rts_g_fb *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
@@ -914,19 +746,8 @@ s_vFillRTSHead(
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
-
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
-
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
} // if (byFBOption == AUTO_FB_NONE)
} else if (byPktType == PK_TYPE_11A) {
if (byFBOption == AUTO_FB_NONE) {
@@ -947,19 +768,8 @@ s_vFillRTSHead(
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
-
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
-
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
} else {
struct vnt_rts_a_fb *buf = pvRTS;
/* Get SignalField, ServiceField & Length */
@@ -988,16 +798,8 @@ s_vFillRTSHead(
cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
}
} else if (byPktType == PK_TYPE_11B) {
struct vnt_rts_ab *buf = pvRTS;
@@ -1016,17 +818,8 @@ s_vFillRTSHead(
buf->data.frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- memcpy(&buf->data.ra, psEthHeader->abyDstAddr, ETH_ALEN);
- } else {
- memcpy(&buf->data.ra, pDevice->abyBSSID, ETH_ALEN);
- }
-
- if (pDevice->op_mode == NL80211_IFTYPE_AP)
- memcpy(&buf->data.ta, pDevice->abyBSSID, ETH_ALEN);
- else
- memcpy(&buf->data.ta, psEthHeader->abySrcAddr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra, hdr->addr1);
+ ether_addr_copy(buf->data.ta, hdr->addr2);
}
}
@@ -1093,7 +886,8 @@ s_vFillCTSHead(
buf->reserved2 = 0x0;
- memcpy(&buf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra,
+ pDevice->abyCurrentNetAddr);
} else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA)
struct vnt_cts *buf = pvCTS;
/* Get SignalField, ServiceField & Length */
@@ -1116,7 +910,8 @@ s_vFillCTSHead(
IEEE80211_STYPE_CTS);
buf->reserved2 = 0x0;
- memcpy(&buf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
+ ether_addr_copy(buf->data.ra,
+ pDevice->abyCurrentNetAddr);
}
}
}
@@ -1156,11 +951,10 @@ s_vGenerateTxParameter(
unsigned int cbFrameSize,
bool bNeedACK,
unsigned int uDMAIdx,
- PSEthernetHeader psEthHeader,
+ void *psEthHeader,
unsigned short wCurrentRate
)
{
- unsigned int cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24
unsigned short wFifoCtl;
bool bDisCRC = false;
unsigned char byFBOption = AUTO_FB_NONE;
@@ -1178,9 +972,6 @@ s_vGenerateTxParameter(
else if (wFifoCtl & FIFOCTL_AUTO_FB_1)
byFBOption = AUTO_FB_1;
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
-
if (!pvRrvTime)
return;
@@ -1237,90 +1028,30 @@ s_vGenerateTxParameter(
}
}
-static
-void
-s_vFillFragParameter(
- struct vnt_private *pDevice,
- unsigned char *pbyBuffer,
- unsigned int uTxType,
- void *pvtdCurr,
- unsigned short wFragType,
- unsigned int cbReqCount
-)
-{
- PSTxBufHead pTxBufHead = (PSTxBufHead) pbyBuffer;
-
- if (uTxType == TYPE_SYNCDMA) {
- PSTxSyncDesc ptdCurr = (PSTxSyncDesc)pvtdCurr;
-
- //Set FIFOCtl & TimeStamp in TxSyncDesc
- ptdCurr->m_wFIFOCtl = pTxBufHead->wFIFOCtl;
- ptdCurr->m_wTimeStamp = pTxBufHead->wTimeStamp;
- //Set TSR1 & ReqCount in TxDescHead
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
- if (wFragType == FRAGCTL_ENDFRAG) //Last Fragmentation
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- else
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP);
- } else {
- PSTxDesc ptdCurr = (PSTxDesc)pvtdCurr;
- //Set TSR1 & ReqCount in TxDescHead
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
- if (wFragType == FRAGCTL_ENDFRAG) //Last Fragmentation
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- else
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP);
- }
-
- pTxBufHead->wFragCtl |= (unsigned short)wFragType;//0x0001; //0000 0000 0000 0001
-}
-
static unsigned int
s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
- unsigned char *pbyTxBufferAddr, unsigned int cbFrameBodySize,
+ unsigned char *pbyTxBufferAddr,
unsigned int uDMAIdx, PSTxDesc pHeadTD,
- PSEthernetHeader psEthHeader, unsigned char *pPacket,
- bool bNeedEncrypt, PSKeyItem pTransmitKey,
- unsigned int uNodeIndex, unsigned int *puMACfragNum)
+ unsigned int is_pspoll)
{
- unsigned int cbMACHdLen;
+ PDEVICE_TD_INFO td_info = pHeadTD->pTDInfo;
+ struct sk_buff *skb = td_info->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct vnt_tx_fifo_head *tx_buffer_head =
+ (struct vnt_tx_fifo_head *)td_info->buf;
+ u16 fifo_ctl = le16_to_cpu(tx_buffer_head->fifo_ctl);
unsigned int cbFrameSize;
- unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- unsigned int cbFragPayloadSize;
- unsigned int cbLastFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- unsigned int cbLastFragPayloadSize;
- unsigned int uFragIdx;
- unsigned char *pbyPayloadHead;
- unsigned char *pbyIVHead;
- unsigned char *pbyMacHdr;
- unsigned short wFragType; //00:Non-Frag, 01:Start, 10:Mid, 11:Last
__le16 uDuration;
unsigned char *pbyBuffer;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int cb802_1_H_len = 0;
unsigned int uLength = 0;
- unsigned int uTmpLen = 0;
unsigned int cbMICHDR = 0;
- u32 dwMICKey0, dwMICKey1;
- u32 dwMIC_Priority;
- u32 *pdwMIC_L;
- u32 *pdwMIC_R;
- u32 dwSafeMIC_L, dwSafeMIC_R; /* Fix "Last Frag Size" < "MIC length". */
- bool bMIC2Frag = false;
- unsigned int uMICFragLen = 0;
unsigned int uMACfragNum = 1;
unsigned int uPadding = 0;
unsigned int cbReqCount = 0;
-
- bool bNeedACK;
- bool bRTS;
- bool bIsAdhoc;
- unsigned char *pbyType;
+ bool bNeedACK = (bool)(fifo_ctl & FIFOCTL_NEEDACK);
+ bool bRTS = (bool)(fifo_ctl & FIFOCTL_RTS);
PSTxDesc ptdCurr;
- PSTxBufHead psTxBufHd = (PSTxBufHead) pbyTxBufferAddr;
unsigned int cbHeaderLength = 0;
void *pvRrvTime;
struct vnt_mic_hdr *pMICHDR;
@@ -1328,72 +1059,35 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
void *pvCTS;
void *pvTxDataHd;
unsigned short wTxBufSize; // FFinfo size
- unsigned int uTotalCopyLength = 0;
unsigned char byFBOption = AUTO_FB_NONE;
- bool bIsWEP256 = false;
- PSMgmtObject pMgmt = pDevice->pMgmt;
pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0])))
- bNeedACK = false;
- else
- bNeedACK = true;
- bIsAdhoc = true;
- } else {
- // MSDUs in Infra mode always need ACK
- bNeedACK = true;
- bIsAdhoc = false;
- }
+ cbFrameSize = skb->len + 4;
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
- else
- cbMACHdLen = WLAN_HDR_ADDR3_LEN;
-
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL)) {
- if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- cbIVlen = 4;
- cbICVlen = 4;
- if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN)
- bIsWEP256 = true;
- }
- if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- }
- if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
+ if (info->control.hw_key) {
+ switch (info->control.hw_key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
cbMICHDR = sizeof(struct vnt_mic_hdr);
+ default:
+ break;
}
+
+ cbFrameSize += info->control.hw_key->icv_len;
+
if (pDevice->byLocalID > REV_ID_VT3253_A1) {
//MAC Header should be padding 0 to DW alignment.
- uPadding = 4 - (cbMACHdLen%4);
+ uPadding = 4 - (ieee80211_get_hdrlen_from_skb(skb) % 4);
uPadding %= 4;
}
}
- cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
-
- if ((bNeedACK == false) ||
- (cbFrameSize < pDevice->wRTSThreshold) ||
- ((cbFrameSize >= pDevice->wFragmentationThreshold) && (pDevice->wFragmentationThreshold <= pDevice->wRTSThreshold))
-) {
- bRTS = false;
- } else {
- bRTS = true;
- psTxBufHd->wFIFOCtl |= (FIFOCTL_RTS | FIFOCTL_LRETRY);
- }
//
// Use for AUTO FALL BACK
//
- if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_0)
+ if (fifo_ctl & FIFOCTL_AUTO_FB_0)
byFBOption = AUTO_FB_0;
- else if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_1)
+ else if (fifo_ctl & FIFOCTL_AUTO_FB_1)
byFBOption = AUTO_FB_1;
//////////////////////////////////////////////////////
@@ -1487,1477 +1181,345 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
}
} // Auto Fall Back
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
-
-//////////////////////////////////////////////////////////////////
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
- } else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) {
- dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
- } else {
- dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[24]);
- dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[28]);
- }
- // DO Software Michael
- MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((unsigned char *)&(psEthHeader->abyDstAddr[0]), 12);
- dwMIC_Priority = 0;
- MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- pr_debug("MIC KEY: %X, %X\n", dwMICKey0, dwMICKey1);
- }
-
-///////////////////////////////////////////////////////////////////
-
- pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderLength);
- pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding + cbIVlen);
- pbyIVHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding);
-
- if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true) && (bIsWEP256 == false)) {
- // Fragmentation
- // FragThreshold = Fragment size(Hdr+(IV)+fragment payload+(MIC)+(ICV)+FCS)
- cbFragmentSize = pDevice->wFragmentationThreshold;
- cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
- //FragNum = (FrameSize-(Hdr+FCS))/(Fragment Size -(Hrd+FCS)))
- uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
- cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
- if (cbLastFragPayloadSize == 0)
- cbLastFragPayloadSize = cbFragPayloadSize;
- else
- uMACfragNum++;
-
- //[Hdr+(IV)+last fragment payload+(MIC)+(ICV)+FCS]
- cbLastFragmentSize = cbMACHdLen + cbLastFragPayloadSize + cbIVlen + cbICVlen + cbFCSlen;
-
- for (uFragIdx = 0; uFragIdx < uMACfragNum; uFragIdx++) {
- if (uFragIdx == 0) {
- //=========================
- // Start Fragmentation
- //=========================
- pr_debug("Start Fragmentation...\n");
- wFragType = FRAGCTL_STAFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
- cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
- uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
- // Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, uDuration, psEthHeader, bNeedEncrypt,
- wFragType, uDMAIdx, uFragIdx);
-
- if (bNeedEncrypt == true) {
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR);
- //Fill IV(ExtIV,RSNHDR)
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
- }
-
- // 802.1H
- if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
- if ((psEthHeader->wType == TYPE_PKT_IPX) ||
- (psEthHeader->wType == cpu_to_le16(0xF380))) {
- memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
- } else {
- memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
- }
- pbyType = (unsigned char *)(pbyPayloadHead + 6);
- memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short));
- cb802_1_H_len = 8;
- }
-
- cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize;
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
-
- uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
- //copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
-
- // Copy the Packet into a tx Buffer
- memcpy((pbyBuffer + uLength), (pPacket + 14), (cbFragPayloadSize - cb802_1_H_len));
-
- uTotalCopyLength += cbFragPayloadSize - cb802_1_H_len;
-
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- pr_debug("Start MIC: %d\n",
- cbFragPayloadSize);
- MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFragPayloadSize);
-
- }
-
- //---------------------------
- // S/W Encryption
- //---------------------------
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len), (unsigned short)cbFragPayloadSize);
- cbReqCount += cbICVlen;
- }
- }
-
- ptdCurr = (PSTxDesc)pHeadTD;
- //--------------------
- //1.Set TSR1 & ReqCount in TxDescHead
- //2.Set FragCtl in TxBufferHead
- //3.Set Frame Control
- //4.Set Sequence Control
- //5.Get S/W generate FCS
- //--------------------
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
-
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- pDevice->iTDUsed[uDMAIdx]++;
- pHeadTD = ptdCurr->next;
- } else if (uFragIdx == (uMACfragNum-1)) {
- //=========================
- // Last Fragmentation
- //=========================
- pr_debug("Last Fragmentation...\n");
-
- wFragType = FRAGCTL_ENDFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
- cbLastFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbLastFragmentSize, uDMAIdx, bNeedACK,
- uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
-
- // Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, uDuration, psEthHeader, bNeedEncrypt,
- wFragType, uDMAIdx, uFragIdx);
-
- if (bNeedEncrypt == true) {
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbLastFragPayloadSize, (unsigned char *)pMICHDR);
-
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
-
- }
-
- cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbLastFragPayloadSize;
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
-
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
-
- uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
-
- //copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
-
- // Copy the Packet into a tx Buffer
- if (bMIC2Frag == false) {
- memcpy((pbyBuffer + uLength),
- (pPacket + 14 + uTotalCopyLength),
- (cbLastFragPayloadSize - cbMIClen)
-);
- //TODO check uTmpLen !
- uTmpLen = cbLastFragPayloadSize - cbMIClen;
-
- }
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- pr_debug("LAST: uMICFragLen:%d, cbLastFragPayloadSize:%d, uTmpLen:%d\n",
- uMICFragLen,
- cbLastFragPayloadSize,
- uTmpLen);
-
- if (bMIC2Frag == false) {
- if (uTmpLen != 0)
- MIC_vAppend((pbyBuffer + uLength), uTmpLen);
- pdwMIC_L = (u32 *)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (u32 *)(pbyBuffer + uLength + uTmpLen + 4);
- MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- pr_debug("Last MIC:%X, %X\n",
- *pdwMIC_L, *pdwMIC_R);
- } else {
- if (uMICFragLen >= 4) {
- memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
- (cbMIClen - uMICFragLen));
- pr_debug("LAST: uMICFragLen >= 4: %X, %d\n",
- *(unsigned char *)((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
- (cbMIClen - uMICFragLen));
-
- } else {
- memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_L + uMICFragLen),
- (4 - uMICFragLen));
- memcpy((pbyBuffer + uLength + (4 - uMICFragLen)), &dwSafeMIC_R, 4);
- pr_debug("LAST: uMICFragLen < 4: %X, %d\n",
- *(unsigned char *)((unsigned char *)&dwSafeMIC_R + uMICFragLen - 4),
- (cbMIClen - uMICFragLen));
- }
- }
- MIC_vUnInit();
- } else {
- ASSERT(uTmpLen == (cbLastFragPayloadSize - cbMIClen));
- }
-
- //---------------------------
- // S/W Encryption
- //---------------------------
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbLastFragPayloadSize);
- cbReqCount += cbICVlen;
- }
- }
-
- ptdCurr = (PSTxDesc)pHeadTD;
-
- //--------------------
- //1.Set TSR1 & ReqCount in TxDescHead
- //2.Set FragCtl in TxBufferHead
- //3.Set Frame Control
- //4.Set Sequence Control
- //5.Get S/W generate FCS
- //--------------------
-
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
-
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- pDevice->iTDUsed[uDMAIdx]++;
- pHeadTD = ptdCurr->next;
-
- } else {
- //=========================
- // Middle Fragmentation
- //=========================
- pr_debug("Middle Fragmentation...\n");
-
- wFragType = FRAGCTL_MIDFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
- cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
- uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
-
- // Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, uDuration, psEthHeader, bNeedEncrypt,
- wFragType, uDMAIdx, uFragIdx);
-
- if (bNeedEncrypt == true) {
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR);
-
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
- }
-
- cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize;
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
-
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
- uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
-
- //copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
-
- // Copy the Packet into a tx Buffer
- memcpy((pbyBuffer + uLength),
- (pPacket + 14 + uTotalCopyLength),
- cbFragPayloadSize
-);
- uTmpLen = cbFragPayloadSize;
-
- uTotalCopyLength += uTmpLen;
-
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- MIC_vAppend((pbyBuffer + uLength), uTmpLen);
-
- if (uTmpLen < cbFragPayloadSize) {
- bMIC2Frag = true;
- uMICFragLen = cbFragPayloadSize - uTmpLen;
- ASSERT(uMICFragLen < cbMIClen);
-
- pdwMIC_L = (u32 *)(pbyBuffer + uLength + uTmpLen);
- pdwMIC_R = (u32 *)(pbyBuffer + uLength + uTmpLen + 4);
- MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- dwSafeMIC_L = *pdwMIC_L;
- dwSafeMIC_R = *pdwMIC_R;
-
- pr_debug("MIDDLE: uMICFragLen:%d, cbFragPayloadSize:%d, uTmpLen:%d\n",
- uMICFragLen,
- cbFragPayloadSize,
- uTmpLen);
- pr_debug("Fill MIC in Middle frag [%d]\n",
- uMICFragLen);
- pr_debug("Get MIC:%X, %X\n",
- *pdwMIC_L, *pdwMIC_R);
- }
- pr_debug("Middle frag len: %d\n",
- uTmpLen);
-
- } else {
- ASSERT(uTmpLen == (cbFragPayloadSize));
- }
-
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbFragPayloadSize);
- cbReqCount += cbICVlen;
- }
- }
-
- ptdCurr = (PSTxDesc)pHeadTD;
-
- //--------------------
- //1.Set TSR1 & ReqCount in TxDescHead
- //2.Set FragCtl in TxBufferHead
- //3.Set Frame Control
- //4.Set Sequence Control
- //5.Get S/W generate FCS
- //--------------------
-
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
-
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- pDevice->iTDUsed[uDMAIdx]++;
- pHeadTD = ptdCurr->next;
- }
- } // for (uMACfragNum)
- } else {
- //=========================
- // No Fragmentation
- //=========================
- wFragType = FRAGCTL_NONFRAG;
-
- //Set FragCtl in TxBufferHead
- psTxBufHd->wFragCtl |= (unsigned short)wFragType;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
- cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
- 0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate);
-
- // Generate TX MAC Header
- vGenerateMACHeader(pDevice, pbyMacHdr, uDuration, psEthHeader, bNeedEncrypt,
- wFragType, uDMAIdx, 0);
-
- if (bNeedEncrypt == true) {
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR);
-
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
- }
-
- // 802.1H
- if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
- if ((psEthHeader->wType == TYPE_PKT_IPX) ||
- (psEthHeader->wType == cpu_to_le16(0xF380))) {
- memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
- } else {
- memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
- }
- pbyType = (unsigned char *)(pbyPayloadHead + 6);
- memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short));
- cb802_1_H_len = 8;
- }
-
- cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen);
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
- pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
- uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
- //copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
+ td_info->mic_hdr = pMICHDR;
- // Copy the Packet into a tx Buffer
- memcpy((pbyBuffer + uLength),
- (pPacket + 14),
- cbFrameBodySize - cb802_1_H_len
-);
-
- if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- pr_debug("Length:%d, %d\n",
- cbFrameBodySize - cb802_1_H_len, uLength);
-
- MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFrameBodySize);
-
- pdwMIC_L = (u32 *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
- pdwMIC_R = (u32 *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
-
- MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- MIC_vUnInit();
-
- if (pDevice->bTxMICFail == true) {
- *pdwMIC_L = 0;
- *pdwMIC_R = 0;
- pDevice->bTxMICFail = false;
- }
+ memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
- pr_debug("uLength: %d, %d\n", uLength, cbFrameBodySize);
- pr_debug("cbReqCount:%d, %d, %d, %d\n",
- cbReqCount, cbHeaderLength, uPadding, cbIVlen);
- pr_debug("MIC:%x, %x\n", *pdwMIC_L, *pdwMIC_R);
+ /* Fill FIFO,RrvTime,RTS,and CTS */
+ s_vGenerateTxParameter(pDevice, byPktType, tx_buffer_head, pvRrvTime, pvRTS, pvCTS,
+ cbFrameSize, bNeedACK, uDMAIdx, hdr, pDevice->wCurrentRate);
+ /* Fill DataHead */
+ uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
+ 0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate, is_pspoll);
- }
+ hdr->duration_id = uDuration;
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
- if (bNeedEncrypt) {
- s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len),
- (unsigned short)(cbFrameBodySize + cbMIClen));
- cbReqCount += cbICVlen;
- }
- }
+ cbReqCount = cbHeaderLength + uPadding + skb->len;
+ pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
+ uLength = cbHeaderLength + uPadding;
- ptdCurr = (PSTxDesc)pHeadTD;
+ /* Copy the Packet into a tx Buffer */
+ memcpy((pbyBuffer + uLength), skb->data, skb->len);
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
- ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
- ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
- ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- //Set TSR1 & ReqCount in TxDescHead
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
+ ptdCurr = (PSTxDesc)pHeadTD;
- pDevice->iTDUsed[uDMAIdx]++;
-
- }
- *puMACfragNum = uMACfragNum;
+ ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
+ ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
+ ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
+ ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
+ /* Set TSR1 & ReqCount in TxDescHead */
+ ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
+ ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
return cbHeaderLength;
}
-void
-vGenerateFIFOHeader(struct vnt_private *pDevice, unsigned char byPktType,
- unsigned char *pbyTxBufferAddr, bool bNeedEncrypt,
- unsigned int cbPayloadSize, unsigned int uDMAIdx,
- PSTxDesc pHeadTD, PSEthernetHeader psEthHeader, unsigned char *pPacket,
- PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum,
- unsigned int *pcbHeaderSize)
+static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
+ struct ieee80211_key_conf *tx_key,
+ struct sk_buff *skb, u16 payload_len,
+ struct vnt_mic_hdr *mic_hdr)
{
- unsigned int wTxBufSize; // FFinfo size
- bool bNeedACK;
- bool bIsAdhoc;
- unsigned short cbMacHdLen;
- PSTxBufHead pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
-
- wTxBufSize = sizeof(STxBufHead);
-
- memset(pTxBufHead, 0, wTxBufSize);
- //Set FIFOCTL_NEEDACK
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0]))) {
- bNeedACK = false;
- pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
- } else {
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
+ struct ieee80211_key_seq seq;
+ u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
+
+ /* strip header and icv len from payload */
+ payload_len -= ieee80211_get_hdrlen_from_skb(skb);
+ payload_len -= tx_key->icv_len;
+
+ switch (tx_key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ memcpy(key_buffer, iv, 3);
+ memcpy(key_buffer + 3, tx_key->key, tx_key->keylen);
+
+ if (tx_key->keylen == WLAN_KEY_LEN_WEP40) {
+ memcpy(key_buffer + 8, iv, 3);
+ memcpy(key_buffer + 11,
+ tx_key->key, WLAN_KEY_LEN_WEP40);
}
- bIsAdhoc = true;
- } else {
- // MSDUs in Infra mode always need ACK
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- bIsAdhoc = false;
- }
-
- pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
- pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
-
- //Set FIFOCTL_LHEAD
- if (pDevice->bLongHeader)
- pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD;
- //Set FIFOCTL_GENINT
-
- pTxBufHead->wFIFOCtl |= FIFOCTL_GENINT;
-
- //Set FIFOCTL_ISDMA0
- if (TYPE_TXDMA0 == uDMAIdx)
- pTxBufHead->wFIFOCtl |= FIFOCTL_ISDMA0;
-
- //Set FRAGCTL_MACHDCNT
- if (pDevice->bLongHeader)
- cbMacHdLen = WLAN_HDR_ADDR3_LEN + 6;
- else
- cbMacHdLen = WLAN_HDR_ADDR3_LEN;
-
- pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
-
- //Set packet type
- if (byPktType == PK_TYPE_11A) //0000 0000 0000 0000
- ;
- else if (byPktType == PK_TYPE_11B) //0000 0001 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- else if (byPktType == PK_TYPE_11GB) //0000 0010 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
- else if (byPktType == PK_TYPE_11GA) //0000 0011 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
-
- //Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == true) //0000 0100 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
-
- //Set Auto Fallback Ctl
- if (pDevice->wCurrentRate >= RATE_18M) {
- if (pDevice->byAutoFBCtrl == AUTO_FB_0)
- pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_0;
- else if (pDevice->byAutoFBCtrl == AUTO_FB_1)
- pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_1;
- }
-
- //Set FRAGCTL_WEPTYP
- pDevice->bAES = false;
-
- //Set FRAGCTL_WEPTYP
- if (pDevice->byLocalID > REV_ID_VT3253_A1) {
- if ((bNeedEncrypt) && (pTransmitKey != NULL)) { //WEP enabled
- if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { //WEP40 or WEP104
- if (pTransmitKey->uKeyLength != WLAN_WEP232_KEYLEN)
- pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { //CCMP
- pTxBufHead->wFragCtl |= FRAGCTL_AES;
- }
- }
- }
-
- RFbSetPower(pDevice, pDevice->wCurrentRate, pDevice->byCurrentCh);
-
- pTxBufHead->byTxPower = pDevice->byCurPwr;
-
- *pcbHeaderSize = s_cbFillTxBufHead(pDevice, byPktType, pbyTxBufferAddr, cbPayloadSize,
- uDMAIdx, pHeadTD, psEthHeader, pPacket, bNeedEncrypt,
- pTransmitKey, uNodeIndex, puMACfragNum);
-}
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ieee80211_get_tkip_p2k(tx_key, skb, key_buffer);
-/*+
- *
- * Description:
- * Translate 802.3 to 802.11 header
- *
- * Parameters:
- * In:
- * pDevice - Pointer to adapter
- * dwTxBufferAddr - Transmit Buffer
- * pPacket - Packet from upper layer
- * cbPacketSize - Transmit Data Length
- * Out:
- * pcbHeadSize - Header size of MAC&Baseband control and 802.11 Header
- * pcbAppendPayload - size of append payload for 802.1H translation
- *
- * Return Value: none
- *
- -*/
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
-void
-vGenerateMACHeader(
- struct vnt_private *pDevice,
- unsigned char *pbyBufferAddr,
- __le16 wDuration,
- PSEthernetHeader psEthHeader,
- bool bNeedEncrypt,
- unsigned short wFragType,
- unsigned int uDMAIdx,
- unsigned int uFragIdx
-)
-{
- PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
+ if (!mic_hdr)
+ return;
- memset(pMACHeader, 0, (sizeof(S802_11Header)));
+ mic_hdr->id = 0x59;
+ mic_hdr->payload_len = cpu_to_be16(payload_len);
+ ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
- if (uDMAIdx == TYPE_ATIMDMA)
- pMACHeader->wFrameCtl = TYPE_802_11_ATIM;
- else
- pMACHeader->wFrameCtl = TYPE_802_11_DATA;
+ ieee80211_get_key_tx_seq(tx_key, &seq);
- if (pDevice->op_mode == NL80211_IFTYPE_AP) {
- memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
- pMACHeader->wFrameCtl |= FC_FROMDS;
- } else {
- if (pDevice->op_mode == NL80211_IFTYPE_ADHOC) {
- memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr3[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
- } else {
- memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
- memcpy(&(pMACHeader->abyAddr1[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
- pMACHeader->wFrameCtl |= FC_TODS;
- }
- }
+ memcpy(mic_hdr->ccmp_pn, seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
- if (bNeedEncrypt)
- pMACHeader->wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_ISWEP(1));
+ if (ieee80211_has_a4(hdr->frame_control))
+ mic_hdr->hlen = cpu_to_be16(28);
+ else
+ mic_hdr->hlen = cpu_to_be16(22);
- pMACHeader->wDurationID = le16_to_cpu(wDuration);
+ ether_addr_copy(mic_hdr->addr1, hdr->addr1);
+ ether_addr_copy(mic_hdr->addr2, hdr->addr2);
+ ether_addr_copy(mic_hdr->addr3, hdr->addr3);
- if (pDevice->bLongHeader) {
- PWLAN_80211HDR_A4 pMACA4Header = (PWLAN_80211HDR_A4) pbyBufferAddr;
+ mic_hdr->frame_control = cpu_to_le16(
+ le16_to_cpu(hdr->frame_control) & 0xc78f);
+ mic_hdr->seq_ctrl = cpu_to_le16(
+ le16_to_cpu(hdr->seq_ctrl) & 0xf);
- pMACHeader->wFrameCtl |= (FC_TODS | FC_FROMDS);
- memcpy(pMACA4Header->abyAddr4, pDevice->abyBSSID, WLAN_ADDR_LEN);
- }
- pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
+ if (ieee80211_has_a4(hdr->frame_control))
+ ether_addr_copy(mic_hdr->addr4, hdr->addr4);
- //Set FragNumber in Sequence Control
- pMACHeader->wSeqCtl |= cpu_to_le16((unsigned short)uFragIdx);
+ memcpy(key_buffer, tx_key->key, WLAN_KEY_LEN_CCMP);
- if ((wFragType == FRAGCTL_ENDFRAG) || (wFragType == FRAGCTL_NONFRAG)) {
- pDevice->wSeqCounter++;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
+ break;
+ default:
+ break;
}
-
- if ((wFragType == FRAGCTL_STAFRAG) || (wFragType == FRAGCTL_MIDFRAG)) //StartFrag or MidFrag
- pMACHeader->wFrameCtl |= FC_MOREFRAG;
}
-CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice, PSTxMgmtPacket pPacket)
+int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
+ PSTxDesc head_td, struct sk_buff *skb)
{
- PSTxDesc pFrstTD;
- unsigned char byPktType;
- unsigned char *pbyTxBufferAddr;
- void *pvRTS;
- struct vnt_cts *pCTS;
- void *pvTxDataHd;
- unsigned int uDuration;
- unsigned int cbReqCount;
- PS802_11Header pMACHeader;
- unsigned int cbHeaderSize;
- unsigned int cbFrameBodySize;
- bool bNeedACK;
- bool bIsPSPOLL = false;
- PSTxBufHead pTxBufHead;
- unsigned int cbFrameSize;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int uPadding = 0;
- unsigned short wTxBufSize;
- unsigned int cbMacHdLen;
- SEthernetHeader sEthHeader;
- void *pvRrvTime;
- void *pMICHDR;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned short wCurrentRate = RATE_1M;
-
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0)
- return CMD_STATUS_RESOURCES;
-
- pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
- pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf;
- cbFrameBodySize = pPacket->cbPayloadLen;
- pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
- wTxBufSize = sizeof(STxBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- wCurrentRate = RATE_6M;
- byPktType = PK_TYPE_11A;
- } else {
- wCurrentRate = RATE_1M;
- byPktType = PK_TYPE_11B;
+ PDEVICE_TD_INFO td_info = head_td->pTDInfo;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *tx_rate = &info->control.rates[0];
+ struct ieee80211_rate *rate;
+ struct ieee80211_key_conf *tx_key;
+ struct ieee80211_hdr *hdr;
+ struct vnt_tx_fifo_head *tx_buffer_head =
+ (struct vnt_tx_fifo_head *)td_info->buf;
+ u16 tx_body_size = skb->len, current_rate;
+ u8 pkt_type;
+ bool is_pspoll = false;
+
+ memset(tx_buffer_head, 0, sizeof(*tx_buffer_head));
+
+ hdr = (struct ieee80211_hdr *)(skb->data);
+
+ rate = ieee80211_get_tx_rate(priv->hw, info);
+
+ current_rate = rate->hw_value;
+ if (priv->wCurrentRate != current_rate &&
+ !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
+ priv->wCurrentRate = current_rate;
+
+ RFbSetPower(priv, priv->wCurrentRate,
+ priv->hw->conf.chandef.chan->hw_value);
}
- // SetPower will cause error power TX state for OFDM Date packet in TX buffer.
- // 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability.
- // And cmd timer will wait data pkt TX finish before scanning so it's OK
- // to set power here.
- if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING)
- RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh);
+ if (current_rate > RATE_11M)
+ pkt_type = (u8)priv->byPacketType;
else
- RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel);
-
- pTxBufHead->byTxPower = pDevice->byCurPwr;
- //+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++
- if (pDevice->byFOETuning) {
- if ((pPacket->p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) {
- wCurrentRate = RATE_24M;
- byPktType = PK_TYPE_11GA;
- }
- }
-
- //Set packet type
- if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
- pTxBufHead->wFIFOCtl = 0;
- } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- } else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
- } else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
+ pkt_type = PK_TYPE_11B;
+
+ /*Set fifo controls */
+ if (pkt_type == PK_TYPE_11A)
+ tx_buffer_head->fifo_ctl = 0;
+ else if (pkt_type == PK_TYPE_11B)
+ tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11B);
+ else if (pkt_type == PK_TYPE_11GB)
+ tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11GB);
+ else if (pkt_type == PK_TYPE_11GA)
+ tx_buffer_head->fifo_ctl = cpu_to_le16(FIFOCTL_11GA);
+
+ /* generate interrupt */
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_GENINT);
+
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_TMOEN);
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_ISDMA0);
+ tx_buffer_head->time_stamp =
+ cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
+ } else {
+ tx_buffer_head->time_stamp =
+ cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
}
- pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
- pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_NEEDACK);
- if (is_multicast_ether_addr(&(pPacket->p80211Header->sA3.abyAddr1[0])))
- bNeedACK = false;
- else {
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- }
+ if (ieee80211_has_retry(hdr->frame_control))
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LRETRY);
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
- (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
- pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY;
- }
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ priv->byPreambleType = PREAMBLE_SHORT;
+ else
+ priv->byPreambleType = PREAMBLE_LONG;
- pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_RTS);
- if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
- bIsPSPOLL = true;
- cbMacHdLen = WLAN_HDR_ADDR2_LEN;
- } else {
- cbMacHdLen = WLAN_HDR_ADDR3_LEN;
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LHEAD);
+ priv->bLongHeader = true;
}
- //Set FRAGCTL_MACHDCNT
- pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
-
- // Notes:
- // Although spec says MMPDU can be fragmented; In most cases,
- // no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = false; //Set FRAGCTL_WEPTYP
-
- if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
- cbIVlen = 4;
- cbICVlen = 4;
- pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
- //We need to get seed here for filling TxKey entry.
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
- pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = true;
+ if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)
+ is_pspoll = true;
+
+ tx_buffer_head->frag_ctl =
+ cpu_to_le16(ieee80211_get_hdrlen_from_skb(skb) << 10);
+
+ if (info->control.hw_key) {
+ tx_key = info->control.hw_key;
+
+ switch (info->control.hw_key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_LEGACY);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_TKIP);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_AES);
+ default:
+ break;
}
- //MAC Header should be padding 0 to DW alignment.
- uPadding = 4 - (cbMacHdLen%4);
- uPadding %= 4;
}
- cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen;
-
- //Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == true) //0000 0100 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
+ tx_buffer_head->current_rate = cpu_to_le16(current_rate);
- //the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
+ /* legacy rates TODO use ieee80211_tx_rate */
+ if (current_rate >= RATE_18M && ieee80211_is_data(hdr->frame_control)) {
+ if (priv->byAutoFBCtrl == AUTO_FB_0)
+ tx_buffer_head->fifo_ctl |=
+ cpu_to_le16(FIFOCTL_AUTO_FB_0);
+ else if (priv->byAutoFBCtrl == AUTO_FB_1)
+ tx_buffer_head->fifo_ctl |=
+ cpu_to_le16(FIFOCTL_AUTO_FB_1);
- //Set RrvTime/RTS/CTS Buffer
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
- pvRrvTime = (void *) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = NULL;
- pvRTS = NULL;
- pCTS = (struct vnt_cts *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts));
- pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + sizeof(struct vnt_cts));
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
- } else { // 802.11a/b packet
- pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
- pMICHDR = NULL;
- pvRTS = NULL;
- pCTS = NULL;
- pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_ab));
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
- sizeof(struct vnt_tx_datahead_ab);
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
-
- memcpy(&(sEthHeader.abyDstAddr[0]), &(pPacket->p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
- memcpy(&(sEthHeader.abySrcAddr[0]), &(pPacket->p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
- //=========================
- // No Fragmentation
- //=========================
- pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pCTS,
- cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate);
-
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- 0, 0, 1, AUTO_FB_NONE, wCurrentRate);
-
- pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize);
-
- cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + cbFrameBodySize;
-
- if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
- unsigned char *pbyIVHead;
- unsigned char *pbyPayloadHead;
- unsigned char *pbyBSSID;
- PSKeyItem pTransmitKey = NULL;
-
- pbyIVHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding);
- pbyPayloadHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
-
- //Fill TXKEY
- //Kyle: Need fix: TKIP and AES did't encrypt Mnt Packet.
- //s_vFillTxKey(pDevice, (unsigned char *)pTxBufHead->adwTxKey, NULL);
-
- //Fill IV(ExtIV,RSNHDR)
- //s_vFillPrePayload(pDevice, pbyIVHead, NULL);
- //---------------------------
- // S/W or H/W Encryption
- //---------------------------
- do {
- if ((pDevice->op_mode == NL80211_IFTYPE_STATION) &&
- (pDevice->bLinkPass == true)) {
- pbyBSSID = pDevice->abyBSSID;
- // get pairwise key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
- // get group key
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
- pr_debug("Get GTK\n");
- break;
- }
- } else {
- pr_debug("Get PTK\n");
- break;
- }
- }
- // get group key
- pbyBSSID = pDevice->abyBroadcastAddr;
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
- pTransmitKey = NULL;
- pr_debug("KEY is NULL. OP Mode[%d]\n",
- pDevice->op_mode);
- } else {
- pr_debug("Get GTK\n");
- }
- } while (false);
- //Fill TXKEY
- s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- (unsigned char *)pMACHeader, (unsigned short)cbFrameBodySize, NULL);
-
- memcpy(pMACHeader, pPacket->p80211Header, cbMacHdLen);
- memcpy(pbyPayloadHead, ((unsigned char *)(pPacket->p80211Header) + cbMacHdLen),
- cbFrameBodySize);
- } else {
- // Copy the Packet into a tx Buffer
- memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
- }
+ tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_NONFRAG);
- pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
- pDevice->wSeqCounter++;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
-
- if (bIsPSPOLL) {
- // The MAC will automatically replace the Duration-field of MAC header by Duration-field
- // of FIFO control header.
- // This will cause AID-field of PS-POLL packet to be incorrect (Because PS-POLL's AID field is
- // in the same place of other packet's Duration-field).
- // And it will cause Cisco-AP to issue Disassociation-packet
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->duration_a = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->duration_b = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- } else {
- ((struct vnt_tx_datahead_ab *)pvTxDataHd)->duration = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- }
- }
+ s_cbFillTxBufHead(priv, pkt_type, (u8 *)tx_buffer_head,
+ dma_idx, head_td, is_pspoll);
- // first TD is the only TD
- //Set TSR1 & ReqCount in TxDescHead
- pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
- pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
- pFrstTD->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
- pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
- pFrstTD->pTDInfo->byFlags = 0;
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
+ if (info->control.hw_key) {
+ tx_key = info->control.hw_key;
+ if (tx_key->keylen > 0)
+ vnt_fill_txkey(hdr, tx_buffer_head->tx_key,
+ tx_key, skb, tx_body_size, td_info->mic_hdr);
}
- pDevice->bPWBitOn = false;
-
- wmb();
- pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
-
- pDevice->iTDUsed[TYPE_TXDMA0]++;
-
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1)
- pr_debug(" available td0 <= 1\n");
-
- pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next;
-
- pDevice->nTxDataTimeCout = 0; //2008-8-21 chester <add> for send null packet
- // Poll Transmit the adapter
- MACvTransmit0(pDevice->PortOffset);
-
- return CMD_STATUS_PENDING;
+ return 0;
}
-CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice, PSTxMgmtPacket pPacket)
+static int vnt_beacon_xmit(struct vnt_private *priv,
+ struct sk_buff *skb)
{
- unsigned char byPktType;
- unsigned char *pbyBuffer = (unsigned char *)pDevice->tx_beacon_bufs;
- unsigned int cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
- unsigned int cbHeaderSize = 0;
struct vnt_tx_short_buf_head *short_head =
- (struct vnt_tx_short_buf_head *)pbyBuffer;
- PS802_11Header pMACHeader;
- unsigned short wCurrentRate;
-
- memset(short_head, 0, sizeof(*short_head));
+ (struct vnt_tx_short_buf_head *)priv->tx_beacon_bufs;
+ struct ieee80211_mgmt *mgmt_hdr = (struct ieee80211_mgmt *)
+ (priv->tx_beacon_bufs + sizeof(*short_head));
+ struct ieee80211_tx_info *info;
+ u32 frame_size = skb->len + 4;
+ u16 current_rate;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- wCurrentRate = RATE_6M;
- byPktType = PK_TYPE_11A;
- } else {
- wCurrentRate = RATE_2M;
- byPktType = PK_TYPE_11B;
- }
+ memset(priv->tx_beacon_bufs, 0, sizeof(*short_head));
- //Set Preamble type always long
- pDevice->byPreambleType = PREAMBLE_LONG;
+ if (priv->byBBType == BB_TYPE_11A) {
+ current_rate = RATE_6M;
- /* Set FIFOCTL_GENINT */
- short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_GENINT);
-
- /* Set packet type & Get Duration */
- if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
- short_head->duration =
- cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A,
- cbFrameSize, byPktType, wCurrentRate, false,
- 0, 0, 1, AUTO_FB_NONE));
- } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
- short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_11B);
+ /* Get SignalField,ServiceField,Length */
+ vnt_get_phy_field(priv, frame_size, current_rate,
+ PK_TYPE_11A, &short_head->ab);
+ /* Get Duration and TimeStampOff */
short_head->duration =
- cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B,
- cbFrameSize, byPktType, wCurrentRate, false,
- 0, 0, 1, AUTO_FB_NONE));
- }
-
- vnt_get_phy_field(pDevice, cbFrameSize,
- wCurrentRate, byPktType, &short_head->ab);
-
- /* Get TimeStampOff */
- short_head->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
- cbHeaderSize = sizeof(struct vnt_tx_short_buf_head);
-
- //Generate Beacon Header
- pMACHeader = (PS802_11Header)(pbyBuffer + cbHeaderSize);
- memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
-
- pMACHeader->wDurationID = 0;
- pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
- pDevice->wSeqCounter++;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
-
- // Set Beacon buffer length
- pDevice->wBCNBufLen = pPacket->cbMPDULen + cbHeaderSize;
-
- MACvSetCurrBCNTxDescAddr(pDevice->PortOffset, (pDevice->tx_beacon_dma));
-
- MACvSetCurrBCNLength(pDevice->PortOffset, pDevice->wBCNBufLen);
- // Set auto Transmit on
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- // Poll Transmit the adapter
- MACvTransmitBCN(pDevice->PortOffset);
-
- return CMD_STATUS_PENDING;
-}
-
-unsigned int
-cbGetFragCount(
- struct vnt_private *pDevice,
- PSKeyItem pTransmitKey,
- unsigned int cbFrameBodySize,
- PSEthernetHeader psEthHeader
-)
-{
- unsigned int cbMACHdLen;
- unsigned int cbFrameSize;
- unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
- unsigned int cbFragPayloadSize;
- unsigned int cbLastFragPayloadSize;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int uMACfragNum = 1;
- bool bNeedACK;
-
- if ((pDevice->op_mode == NL80211_IFTYPE_ADHOC) ||
- (pDevice->op_mode == NL80211_IFTYPE_AP)) {
- if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0])))
- bNeedACK = false;
- else
- bNeedACK = true;
- } else {
- // MSDUs in Infra mode always need ACK
- bNeedACK = true;
- }
-
- if (pDevice->bLongHeader)
- cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
- else
- cbMACHdLen = WLAN_HDR_ADDR3_LEN;
-
- if (pDevice->bEncryptionEnable == true) {
- if (pTransmitKey == NULL) {
- if ((pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) ||
- (pDevice->pMgmt->eAuthenMode < WMAC_AUTH_WPA)) {
- cbIVlen = 4;
- cbICVlen = 4;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
- }
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- cbIVlen = 4;
- cbICVlen = 4;
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
- }
- }
-
- cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
-
- if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true)) {
- // Fragmentation
- cbFragmentSize = pDevice->wFragmentationThreshold;
- cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
- uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
- cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
- if (cbLastFragPayloadSize == 0)
- cbLastFragPayloadSize = cbFragPayloadSize;
- else
- uMACfragNum++;
- }
- return uMACfragNum;
-}
-
-void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb,
- unsigned char *pbMPDU, unsigned int cbMPDULen)
-{
- PSTxDesc pFrstTD;
- unsigned char byPktType;
- unsigned char *pbyTxBufferAddr;
- void *pvRTS;
- void *pvCTS;
- void *pvTxDataHd;
- unsigned int uDuration;
- unsigned int cbReqCount;
- PS802_11Header pMACHeader;
- unsigned int cbHeaderSize;
- unsigned int cbFrameBodySize;
- bool bNeedACK;
- bool bIsPSPOLL = false;
- PSTxBufHead pTxBufHead;
- unsigned int cbFrameSize;
- unsigned int cbIVlen = 0;
- unsigned int cbICVlen = 0;
- unsigned int cbMIClen = 0;
- unsigned int cbFCSlen = 4;
- unsigned int uPadding = 0;
- unsigned int cbMICHDR = 0;
- unsigned int uLength = 0;
- u32 dwMICKey0, dwMICKey1;
- u32 dwMIC_Priority;
- u32 *pdwMIC_L;
- u32 *pdwMIC_R;
- unsigned short wTxBufSize;
- unsigned int cbMacHdLen;
- SEthernetHeader sEthHeader;
- void *pvRrvTime;
- void *pMICHDR;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned short wCurrentRate = RATE_1M;
- PUWLAN_80211HDR p80211Header;
- unsigned int uNodeIndex = 0;
- bool bNodeExist = false;
- SKeyItem STempKey;
- PSKeyItem pTransmitKey = NULL;
- unsigned char *pbyIVHead;
- unsigned char *pbyPayloadHead;
- unsigned char *pbyMacHdr;
-
- unsigned int cbExtSuppRate = 0;
-
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
-
- if (cbMPDULen <= WLAN_HDR_ADDR3_LEN)
- cbFrameBodySize = 0;
- else
- cbFrameBodySize = cbMPDULen - WLAN_HDR_ADDR3_LEN;
-
- p80211Header = (PUWLAN_80211HDR)pbMPDU;
+ cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
+ frame_size, PK_TYPE_11A, current_rate,
+ false, 0, 0, 1, AUTO_FB_NONE));
- pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
- pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf;
- pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
- wTxBufSize = sizeof(STxBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
- wCurrentRate = RATE_6M;
- byPktType = PK_TYPE_11A;
+ short_head->time_stamp_off =
+ vnt_time_stamp_off(priv, current_rate);
} else {
- wCurrentRate = RATE_1M;
- byPktType = PK_TYPE_11B;
- }
-
- // SetPower will cause error power TX state for OFDM Date packet in TX buffer.
- // 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability.
- // And cmd timer will wait data pkt TX to finish before scanning so it's OK
- // to set power here.
- if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING)
- RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh);
- else
- RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel);
-
- pTxBufHead->byTxPower = pDevice->byCurPwr;
-
- //+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++
- if (pDevice->byFOETuning) {
- if ((p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) {
- wCurrentRate = RATE_24M;
- byPktType = PK_TYPE_11GA;
- }
- }
-
- pr_debug("vDMA0_tx_80211: p80211Header->sA3.wFrameCtl = %x\n",
- p80211Header->sA3.wFrameCtl);
-
- //Set packet type
- if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
- pTxBufHead->wFIFOCtl = 0;
- } else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- } else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
- } else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
- }
-
- pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
- pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
-
- if (is_multicast_ether_addr(&(p80211Header->sA3.abyAddr1[0]))) {
- bNeedACK = false;
- if (pDevice->bEnableHostWEP) {
- uNodeIndex = 0;
- bNodeExist = true;
- }
- } else {
- if (pDevice->bEnableHostWEP) {
- if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, (unsigned char *)(p80211Header->sA3.abyAddr1), &uNodeIndex))
- bNodeExist = true;
- }
- bNeedACK = true;
- pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
- }
-
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
- (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
- pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY;
- }
-
- pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
-
- if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
- bIsPSPOLL = true;
- cbMacHdLen = WLAN_HDR_ADDR2_LEN;
- } else {
- cbMacHdLen = WLAN_HDR_ADDR3_LEN;
- }
-
- // hostapd deamon ext support rate patch
- if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) {
- if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0)
- cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN;
+ current_rate = RATE_1M;
+ short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_11B);
- if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0)
- cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
+ /* Get SignalField,ServiceField,Length */
+ vnt_get_phy_field(priv, frame_size, current_rate,
+ PK_TYPE_11B, &short_head->ab);
- if (cbExtSuppRate > 0)
- cbFrameBodySize = WLAN_ASSOCRESP_OFF_SUPP_RATES;
- }
+ /* Get Duration and TimeStampOff */
+ short_head->duration =
+ cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
+ frame_size, PK_TYPE_11B, current_rate,
+ false, 0, 0, 1, AUTO_FB_NONE));
- //Set FRAGCTL_MACHDCNT
- pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)cbMacHdLen << 10);
-
- // Notes:
- // Although spec says MMPDU can be fragmented; In most cases,
- // no one will send a MMPDU under fragmentation. With RTS may occur.
- pDevice->bAES = false; //Set FRAGCTL_WEPTYP
-
- if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
- cbIVlen = 4;
- cbICVlen = 4;
- pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- cbIVlen = 8;//IV+ExtIV
- cbMIClen = 8;
- cbICVlen = 4;
- pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
- //We need to get seed here for filling TxKey entry.
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- cbIVlen = 8;//RSN Header
- cbICVlen = 8;//MIC
- cbMICHDR = sizeof(struct vnt_mic_hdr);
- pTxBufHead->wFragCtl |= FRAGCTL_AES;
- pDevice->bAES = true;
- }
- //MAC Header should be padding 0 to DW alignment.
- uPadding = 4 - (cbMacHdLen%4);
- uPadding %= 4;
+ short_head->time_stamp_off =
+ vnt_time_stamp_off(priv, current_rate);
}
- cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen + cbExtSuppRate;
-
- //Set FIFOCTL_GrpAckPolicy
- if (pDevice->bGrpAckPolicy == true) //0000 0100 0000 0000
- pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
-
- //the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
+ short_head->fifo_ctl |= cpu_to_le16(FIFOCTL_GENINT);
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
+ /* Copy Beacon */
+ memcpy(mgmt_hdr, skb->data, skb->len);
- pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts));
- pvRTS = NULL;
- pvCTS = (struct vnt_cts *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
- pvTxDataHd = (void *)(pbyTxBufferAddr + wTxBufSize +
- sizeof(struct vnt_rrv_time_cts) + cbMICHDR + sizeof(struct vnt_cts));
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
- cbMICHDR + sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
+ /* time stamp always 0 */
+ mgmt_hdr->u.beacon.timestamp = 0;
- } else {//802.11a/b packet
-
- pvRrvTime = (void *)(pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab));
- pvRTS = NULL;
- pvCTS = NULL;
- pvTxDataHd = (void *)(pbyTxBufferAddr +
- wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
- cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
- cbMICHDR + sizeof(struct vnt_tx_datahead_ab);
+ info = IEEE80211_SKB_CB(skb);
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)mgmt_hdr;
+ hdr->duration_id = 0;
+ hdr->seq_ctrl = cpu_to_le16(priv->wSeqCounter << 4);
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
- memcpy(&(sEthHeader.abyDstAddr[0]), &(p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
- memcpy(&(sEthHeader.abySrcAddr[0]), &(p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
- //=========================
- // No Fragmentation
- //=========================
- pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG;
-
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
- cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate);
+ priv->wSeqCounter++;
+ if (priv->wSeqCounter > 0x0fff)
+ priv->wSeqCounter = 0;
- //Fill DataHead
- uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- 0, 0, 1, AUTO_FB_NONE, wCurrentRate);
+ priv->wBCNBufLen = sizeof(*short_head) + skb->len;
- pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize);
+ MACvSetCurrBCNTxDescAddr(priv->PortOffset, priv->tx_beacon_dma);
- cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen) + cbExtSuppRate;
+ MACvSetCurrBCNLength(priv->PortOffset, priv->wBCNBufLen);
+ /* Set auto Transmit on */
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ /* Poll Transmit the adapter */
+ MACvTransmitBCN(priv->PortOffset);
- pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize);
- pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding + cbIVlen);
- pbyIVHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding);
+ return 0;
+}
- // Copy the Packet into a tx Buffer
- memcpy(pbyMacHdr, pbMPDU, cbMacHdLen);
+int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
+{
+ struct sk_buff *beacon;
- // version set to 0, patch for hostapd deamon
- pMACHeader->wFrameCtl &= cpu_to_le16(0xfffc);
- memcpy(pbyPayloadHead, (pbMPDU + cbMacHdLen), cbFrameBodySize);
+ beacon = ieee80211_beacon_get(priv->hw, vif);
+ if (!beacon)
+ return -ENOMEM;
- // replace support rate, patch for hostapd deamon(only support 11M)
- if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) {
- if (cbExtSuppRate != 0) {
- if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0)
- memcpy((pbyPayloadHead + cbFrameBodySize),
- pMgmt->abyCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
- if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0)
- memcpy((pbyPayloadHead + cbFrameBodySize) + ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN,
- pMgmt->abyCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
+ if (vnt_beacon_xmit(priv, beacon)) {
+ ieee80211_free_txskb(priv->hw, beacon);
+ return -ENODEV;
}
- // Set wep
- if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
- if (pDevice->bEnableHostWEP) {
- pTransmitKey = &STempKey;
- pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
- pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
- pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
- pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
- pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
- memcpy(pTransmitKey->abyKey,
- &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength
-);
- }
-
- if ((pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
- dwMICKey0 = *(u32 *)(&pTransmitKey->abyKey[16]);
- dwMICKey1 = *(u32 *)(&pTransmitKey->abyKey[20]);
-
- // DO Software Michael
- MIC_vInit(dwMICKey0, dwMICKey1);
- MIC_vAppend((unsigned char *)&(sEthHeader.abyDstAddr[0]), 12);
- dwMIC_Priority = 0;
- MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
- pr_debug("DMA0_tx_8021:MIC KEY: %X, %X\n",
- dwMICKey0, dwMICKey1);
-
- uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
-
- MIC_vAppend((pbyTxBufferAddr + uLength), cbFrameBodySize);
-
- pdwMIC_L = (u32 *)(pbyTxBufferAddr + uLength + cbFrameBodySize);
- pdwMIC_R = (u32 *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
-
- MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
- MIC_vUnInit();
-
- if (pDevice->bTxMICFail == true) {
- *pdwMIC_L = 0;
- *pdwMIC_R = 0;
- pDevice->bTxMICFail = false;
- }
-
- pr_debug("uLength: %d, %d\n", uLength, cbFrameBodySize);
- pr_debug("cbReqCount:%d, %d, %d, %d\n",
- cbReqCount, cbHeaderSize, uPadding, cbIVlen);
- pr_debug("MIC:%x, %x\n", *pdwMIC_L, *pdwMIC_R);
-
- }
-
- s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR);
-
- if (pDevice->bEnableHostWEP) {
- pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
- pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
- }
-
- if ((pDevice->byLocalID <= REV_ID_VT3253_A1))
- s_vSWencryption(pDevice, pTransmitKey, pbyPayloadHead, (unsigned short)(cbFrameBodySize + cbMIClen));
- }
+ return 0;
+}
- pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
- pDevice->wSeqCounter++;
- if (pDevice->wSeqCounter > 0x0fff)
- pDevice->wSeqCounter = 0;
-
- if (bIsPSPOLL) {
- // The MAC will automatically replace the Duration-field of MAC header by Duration-field
- // of FIFO control header.
- // This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
- // in the same place of other packet's Duration-field).
- // And it will cause Cisco-AP to issue Disassociation-packet
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->duration_a = cpu_to_le16(p80211Header->sA2.wDurationID);
- ((struct vnt_tx_datahead_g *)pvTxDataHd)->duration_b = cpu_to_le16(p80211Header->sA2.wDurationID);
- } else {
- ((struct vnt_tx_datahead_ab *)pvTxDataHd)->duration = cpu_to_le16(p80211Header->sA2.wDurationID);
- }
- }
+int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ int ret;
- // first TD is the only TD
- //Set TSR1 & ReqCount in TxDescHead
- pFrstTD->pTDInfo->skb = skb;
- pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
- pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
- pFrstTD->m_td1TD1.wReqCount = cpu_to_le16(cbReqCount);
- pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
- pFrstTD->pTDInfo->byFlags = 0;
- pFrstTD->pTDInfo->byFlags |= TD_FLAGS_PRIV_SKB;
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
- }
- pDevice->bPWBitOn = false;
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
- wmb();
- pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
+ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
- pDevice->iTDUsed[TYPE_TXDMA0]++;
+ CARDvSetFirstNextTBTT(priv, conf->beacon_int);
- if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1)
- pr_debug(" available td0 <= 1\n");
+ CARDbSetBeaconPeriod(priv, conf->beacon_int);
- pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next;
+ ret = vnt_beacon_make(priv, vif);
- // Poll Transmit the adapter
- MACvTransmit0(pDevice->PortOffset);
+ return ret;
}
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 8ee62887dee5..b9bd1639b13e 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -29,9 +29,11 @@
#ifndef __RXTX_H__
#define __RXTX_H__
-#include "ttype.h"
#include "device.h"
-#include "wcmd.h"
+
+#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 /* 64us */
+#define DEFAULT_MGN_LIFETIME_RES_64us 125 /* 64us */
+
/*--------------------- Export Definitions -------------------------*/
@@ -173,6 +175,14 @@ struct vnt_cts_fb {
u16 reserved2;
} __packed;
+struct vnt_tx_fifo_head {
+ u8 tx_key[WLAN_KEY_LEN_CCMP];
+ __le16 fifo_ctl;
+ __le16 time_stamp;
+ __le16 frag_ctl;
+ __le16 current_rate;
+} __packed;
+
struct vnt_tx_short_buf_head {
__le16 fifo_ctl;
u16 time_stamp;
@@ -181,38 +191,10 @@ struct vnt_tx_short_buf_head {
__le16 time_stamp_off;
} __packed;
-void
-vGenerateMACHeader(
- struct vnt_private *,
- unsigned char *pbyBufferAddr,
- unsigned short wDuration,
- PSEthernetHeader psEthHeader,
- bool bNeedEncrypt,
- unsigned short wFragType,
- unsigned int uDMAIdx,
- unsigned int uFragIdx
-);
-
-unsigned int
-cbGetFragCount(
- struct vnt_private *,
- PSKeyItem pTransmitKey,
- unsigned int cbFrameBodySize,
- PSEthernetHeader psEthHeader
-);
-
-void
-vGenerateFIFOHeader(struct vnt_private *, unsigned char byPktTyp,
- unsigned char *pbyTxBufferAddr, bool bNeedEncrypt,
- unsigned int cbPayloadSize, unsigned int uDMAIdx,
- PSTxDesc pHeadTD, PSEthernetHeader psEthHeader,
- unsigned char *pPacket, PSKeyItem pTransmitKey,
- unsigned int uNodeIndex, unsigned int *puMACfragNum,
- unsigned int *pcbHeaderSize);
-
-void vDMA0_tx_80211(struct vnt_private *, struct sk_buff *skb,
- unsigned char *pbMPDU, unsigned int cbMPDULen);
-CMD_STATUS csMgmt_xmit(struct vnt_private *, PSTxMgmtPacket pPacket);
-CMD_STATUS csBeacon_xmit(struct vnt_private *, PSTxMgmtPacket pPacket);
+int vnt_generate_fifo_header(struct vnt_private *, u32,
+ PSTxDesc head_td, struct sk_buff *);
+int vnt_beacon_make(struct vnt_private *, struct ieee80211_vif *);
+int vnt_beacon_enable(struct vnt_private *, struct ieee80211_vif *,
+ struct ieee80211_bss_conf *);
#endif // __RXTX_H__
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 5396e5832c22..9ec49e653b61 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -44,7 +44,6 @@
#include "upc.h"
#include "tmacro.h"
-#include "tether.h"
#include "mac.h"
#include "srom.h"
@@ -108,144 +107,6 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byContntO
}
/*
- * Description: Write a byte to EEPROM, by MAC I2C
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * wData - data to write
- * Out:
- * none
- *
- * Return Value: true if succeeded; false if failed.
- *
- */
-bool SROMbWriteEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byData)
-{
- unsigned short wDelay, wNoACK;
- unsigned char byWait;
-
- unsigned char byOrg;
-
- VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
- /* turn off hardware retry for getting NACK */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
- for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
- VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
- VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset);
- VNSvOutPortB(dwIoBase + MAC_REG_I2MDOPT, byData);
-
- /* issue write command */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMW);
- /* wait DONE be set */
- for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
- VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
- if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
- break;
- PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
- }
-
- if ((wDelay < W_MAX_TIMEOUT) &&
- (!(byWait & I2MCSR_NACK))) {
- break;
- }
- }
- if (wNoACK == W_MAX_I2CRETRY) {
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
- return false;
- }
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
- return true;
-}
-
-/*
- * Description: Turn bits on in eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * byBits - bits to turn on
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void SROMvRegBitsOn(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byBits)
-{
- unsigned char byOrgData;
-
- byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- SROMbWriteEmbedded(dwIoBase, byContntOffset, (unsigned char)(byOrgData | byBits));
-}
-
-/*
- * Description: Turn bits off in eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * byBits - bits to turn off
- * Out:
- * none
- *
- */
-void SROMvRegBitsOff(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byBits)
-{
- unsigned char byOrgData;
-
- byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- SROMbWriteEmbedded(dwIoBase, byContntOffset, (unsigned char)(byOrgData & (~byBits)));
-}
-
-/*
- * Description: Test if bits on in eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * byTestBits - bits to test
- * Out:
- * none
- *
- * Return Value: true if all test bits on; otherwise false
- *
- */
-bool SROMbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byTestBits)
-{
- unsigned char byOrgData;
-
- byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- return (byOrgData & byTestBits) == byTestBits;
-}
-
-/*
- * Description: Test if bits off in eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * byContntOffset - address of EEPROM
- * byTestBits - bits to test
- * Out:
- * none
- *
- * Return Value: true if all test bits off; otherwise false
- *
- */
-bool SROMbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byTestBits)
-{
- unsigned char byOrgData;
-
- byOrgData = SROMbyReadEmbedded(dwIoBase, byContntOffset);
- return !(byOrgData & byTestBits);
-}
-
-/*
* Description: Read all contents of eeprom to buffer
*
* Parameters:
@@ -269,30 +130,6 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
}
/*
- * Description: Write all contents of buffer to eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * pbyEepromRegs - EEPROM content Buffer
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void SROMvWriteAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
-{
- int ii;
-
- /* ii = Rom Address */
- for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- SROMbWriteEmbedded(dwIoBase, (unsigned char)ii, *pbyEepromRegs);
- pbyEepromRegs++;
- }
-}
-
-/*
* Description: Read Ethernet Address from eeprom to buffer
*
* Parameters:
@@ -314,92 +151,3 @@ void SROMvReadEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddres
pbyEtherAddress++;
}
}
-
-/*
- * Description: Write Ethernet Address from buffer to eeprom
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * pbyEtherAddress - Ethernet Address buffer
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void SROMvWriteEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress)
-{
- unsigned char ii;
-
- /* ii = Rom Address */
- for (ii = 0; ii < ETH_ALEN; ii++) {
- SROMbWriteEmbedded(dwIoBase, ii, *pbyEtherAddress);
- pbyEtherAddress++;
- }
-}
-
-/*
- * Description: Read Sub_VID and Sub_SysId from eeprom to buffer
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * pdwSubSysVenId - Sub_VID and Sub_SysId read
- *
- * Return Value: none
- *
- */
-void SROMvReadSubSysVenId(void __iomem *dwIoBase, unsigned long *pdwSubSysVenId)
-{
- unsigned char *pbyData;
-
- pbyData = (unsigned char *)pdwSubSysVenId;
- /* sub vendor */
- *pbyData = SROMbyReadEmbedded(dwIoBase, 6);
- *(pbyData+1) = SROMbyReadEmbedded(dwIoBase, 7);
- /* sub system */
- *(pbyData+2) = SROMbyReadEmbedded(dwIoBase, 8);
- *(pbyData+3) = SROMbyReadEmbedded(dwIoBase, 9);
-}
-
-/*
- * Description: Auto Load EEPROM to MAC register
- *
- * Parameters:
- * In:
- * dwIoBase - I/O base address
- * Out:
- * none
- *
- * Return Value: true if success; otherwise false
- *
- */
-bool SROMbAutoLoad(void __iomem *dwIoBase)
-{
- unsigned char byWait;
- int ii;
-
- unsigned char byOrg;
-
- VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
- /* turn on hardware retry */
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg | I2MCFG_NORETRY));
-
- MACvRegBitsOn(dwIoBase, MAC_REG_I2MCSR, I2MCSR_AUTOLD);
-
- /* ii = Rom Address */
- for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- MACvTimer0MicroSDelay(dwIoBase, CB_EEPROM_READBYTE_WAIT);
- VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
- if (!(byWait & I2MCSR_AUTOLD))
- break;
- }
-
- VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
-
- if (ii == EEP_MAX_CONTEXT_SIZE)
- return false;
- return true;
-}
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index 3128e535bbd8..7d3e3ef9f17f 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -30,8 +30,6 @@
#ifndef __SROM_H__
#define __SROM_H__
-#include "ttype.h"
-
/*--------------------- Export Definitions -------------------------*/
#define EEP_MAX_CONTEXT_SIZE 256
@@ -91,40 +89,6 @@
/*--------------------- Export Types ------------------------------*/
-// AT24C02 eeprom contents
-// 2048 bits = 256 bytes = 128 words
-//
-typedef struct tagSSromReg {
- unsigned char abyPAR[6]; // 0x00 (unsigned short)
-
- unsigned short wSUB_VID; // 0x03 (unsigned short)
- unsigned short wSUB_SID;
-
- unsigned char byBCFG0; // 0x05 (unsigned short)
- unsigned char byBCFG1;
-
- unsigned char byFCR0; // 0x06 (unsigned short)
- unsigned char byFCR1;
- unsigned char byPMC0; // 0x07 (unsigned short)
- unsigned char byPMC1;
- unsigned char byMAXLAT; // 0x08 (unsigned short)
- unsigned char byMINGNT;
- unsigned char byCFG0; // 0x09 (unsigned short)
- unsigned char byCFG1;
- unsigned short wCISPTR; // 0x0A (unsigned short)
- unsigned short wRsv0; // 0x0B (unsigned short)
- unsigned short wRsv1; // 0x0C (unsigned short)
- unsigned char byBBPAIR; // 0x0D (unsigned short)
- unsigned char byRFTYPE;
- unsigned char byMinChannel; // 0x0E (unsigned short)
- unsigned char byMaxChannel;
- unsigned char bySignature; // 0x0F (unsigned short)
- unsigned char byCheckSum;
-
- unsigned char abyReserved0[96]; // 0x10 (unsigned short)
- unsigned char abyCIS[128]; // 0x80 (unsigned short)
-} SSromReg, *PSSromReg;
-
/*--------------------- Export Macros ------------------------------*/
/*--------------------- Export Classes ----------------------------*/
@@ -134,22 +98,9 @@ typedef struct tagSSromReg {
/*--------------------- Export Functions --------------------------*/
unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset);
-bool SROMbWriteEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byData);
-
-void SROMvRegBitsOn(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byBits);
-void SROMvRegBitsOff(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byBits);
-
-bool SROMbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byTestBits);
-bool SROMbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byContntOffset, unsigned char byTestBits);
void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs);
-void SROMvWriteAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs);
void SROMvReadEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress);
-void SROMvWriteEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress);
-
-void SROMvReadSubSysVenId(void __iomem *dwIoBase, unsigned long *pdwSubSysVenId);
-
-bool SROMbAutoLoad(void __iomem *dwIoBase);
#endif // __EEPROM_H__
diff --git a/drivers/staging/vt6655/tcrc.c b/drivers/staging/vt6655/tcrc.c
deleted file mode 100644
index ddc5efd040f9..000000000000
--- a/drivers/staging/vt6655/tcrc.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (c) 2003 VIA Networking, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tcrc.c
- *
- * Purpose: Implement functions to calculate CRC
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- * Functions:
- * CRCdwCrc32 -
- * CRCdwGetCrc32 -
- * CRCdwGetCrc32Ex -
- *
- * Revision History:
- *
- */
-
-#include "tcrc.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/* 32-bit CRC table */
-static const unsigned long s_adwCrc32Table[256] = {
- 0x00000000L, 0x77073096L, 0xEE0E612CL, 0x990951BAL,
- 0x076DC419L, 0x706AF48FL, 0xE963A535L, 0x9E6495A3L,
- 0x0EDB8832L, 0x79DCB8A4L, 0xE0D5E91EL, 0x97D2D988L,
- 0x09B64C2BL, 0x7EB17CBDL, 0xE7B82D07L, 0x90BF1D91L,
- 0x1DB71064L, 0x6AB020F2L, 0xF3B97148L, 0x84BE41DEL,
- 0x1ADAD47DL, 0x6DDDE4EBL, 0xF4D4B551L, 0x83D385C7L,
- 0x136C9856L, 0x646BA8C0L, 0xFD62F97AL, 0x8A65C9ECL,
- 0x14015C4FL, 0x63066CD9L, 0xFA0F3D63L, 0x8D080DF5L,
- 0x3B6E20C8L, 0x4C69105EL, 0xD56041E4L, 0xA2677172L,
- 0x3C03E4D1L, 0x4B04D447L, 0xD20D85FDL, 0xA50AB56BL,
- 0x35B5A8FAL, 0x42B2986CL, 0xDBBBC9D6L, 0xACBCF940L,
- 0x32D86CE3L, 0x45DF5C75L, 0xDCD60DCFL, 0xABD13D59L,
- 0x26D930ACL, 0x51DE003AL, 0xC8D75180L, 0xBFD06116L,
- 0x21B4F4B5L, 0x56B3C423L, 0xCFBA9599L, 0xB8BDA50FL,
- 0x2802B89EL, 0x5F058808L, 0xC60CD9B2L, 0xB10BE924L,
- 0x2F6F7C87L, 0x58684C11L, 0xC1611DABL, 0xB6662D3DL,
- 0x76DC4190L, 0x01DB7106L, 0x98D220BCL, 0xEFD5102AL,
- 0x71B18589L, 0x06B6B51FL, 0x9FBFE4A5L, 0xE8B8D433L,
- 0x7807C9A2L, 0x0F00F934L, 0x9609A88EL, 0xE10E9818L,
- 0x7F6A0DBBL, 0x086D3D2DL, 0x91646C97L, 0xE6635C01L,
- 0x6B6B51F4L, 0x1C6C6162L, 0x856530D8L, 0xF262004EL,
- 0x6C0695EDL, 0x1B01A57BL, 0x8208F4C1L, 0xF50FC457L,
- 0x65B0D9C6L, 0x12B7E950L, 0x8BBEB8EAL, 0xFCB9887CL,
- 0x62DD1DDFL, 0x15DA2D49L, 0x8CD37CF3L, 0xFBD44C65L,
- 0x4DB26158L, 0x3AB551CEL, 0xA3BC0074L, 0xD4BB30E2L,
- 0x4ADFA541L, 0x3DD895D7L, 0xA4D1C46DL, 0xD3D6F4FBL,
- 0x4369E96AL, 0x346ED9FCL, 0xAD678846L, 0xDA60B8D0L,
- 0x44042D73L, 0x33031DE5L, 0xAA0A4C5FL, 0xDD0D7CC9L,
- 0x5005713CL, 0x270241AAL, 0xBE0B1010L, 0xC90C2086L,
- 0x5768B525L, 0x206F85B3L, 0xB966D409L, 0xCE61E49FL,
- 0x5EDEF90EL, 0x29D9C998L, 0xB0D09822L, 0xC7D7A8B4L,
- 0x59B33D17L, 0x2EB40D81L, 0xB7BD5C3BL, 0xC0BA6CADL,
- 0xEDB88320L, 0x9ABFB3B6L, 0x03B6E20CL, 0x74B1D29AL,
- 0xEAD54739L, 0x9DD277AFL, 0x04DB2615L, 0x73DC1683L,
- 0xE3630B12L, 0x94643B84L, 0x0D6D6A3EL, 0x7A6A5AA8L,
- 0xE40ECF0BL, 0x9309FF9DL, 0x0A00AE27L, 0x7D079EB1L,
- 0xF00F9344L, 0x8708A3D2L, 0x1E01F268L, 0x6906C2FEL,
- 0xF762575DL, 0x806567CBL, 0x196C3671L, 0x6E6B06E7L,
- 0xFED41B76L, 0x89D32BE0L, 0x10DA7A5AL, 0x67DD4ACCL,
- 0xF9B9DF6FL, 0x8EBEEFF9L, 0x17B7BE43L, 0x60B08ED5L,
- 0xD6D6A3E8L, 0xA1D1937EL, 0x38D8C2C4L, 0x4FDFF252L,
- 0xD1BB67F1L, 0xA6BC5767L, 0x3FB506DDL, 0x48B2364BL,
- 0xD80D2BDAL, 0xAF0A1B4CL, 0x36034AF6L, 0x41047A60L,
- 0xDF60EFC3L, 0xA867DF55L, 0x316E8EEFL, 0x4669BE79L,
- 0xCB61B38CL, 0xBC66831AL, 0x256FD2A0L, 0x5268E236L,
- 0xCC0C7795L, 0xBB0B4703L, 0x220216B9L, 0x5505262FL,
- 0xC5BA3BBEL, 0xB2BD0B28L, 0x2BB45A92L, 0x5CB36A04L,
- 0xC2D7FFA7L, 0xB5D0CF31L, 0x2CD99E8BL, 0x5BDEAE1DL,
- 0x9B64C2B0L, 0xEC63F226L, 0x756AA39CL, 0x026D930AL,
- 0x9C0906A9L, 0xEB0E363FL, 0x72076785L, 0x05005713L,
- 0x95BF4A82L, 0xE2B87A14L, 0x7BB12BAEL, 0x0CB61B38L,
- 0x92D28E9BL, 0xE5D5BE0DL, 0x7CDCEFB7L, 0x0BDBDF21L,
- 0x86D3D2D4L, 0xF1D4E242L, 0x68DDB3F8L, 0x1FDA836EL,
- 0x81BE16CDL, 0xF6B9265BL, 0x6FB077E1L, 0x18B74777L,
- 0x88085AE6L, 0xFF0F6A70L, 0x66063BCAL, 0x11010B5CL,
- 0x8F659EFFL, 0xF862AE69L, 0x616BFFD3L, 0x166CCF45L,
- 0xA00AE278L, 0xD70DD2EEL, 0x4E048354L, 0x3903B3C2L,
- 0xA7672661L, 0xD06016F7L, 0x4969474DL, 0x3E6E77DBL,
- 0xAED16A4AL, 0xD9D65ADCL, 0x40DF0B66L, 0x37D83BF0L,
- 0xA9BCAE53L, 0xDEBB9EC5L, 0x47B2CF7FL, 0x30B5FFE9L,
- 0xBDBDF21CL, 0xCABAC28AL, 0x53B39330L, 0x24B4A3A6L,
- 0xBAD03605L, 0xCDD70693L, 0x54DE5729L, 0x23D967BFL,
- 0xB3667A2EL, 0xC4614AB8L, 0x5D681B02L, 0x2A6F2B94L,
- 0xB40BBE37L, 0xC30C8EA1L, 0x5A05DF1BL, 0x2D02EF8DL
-};
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*+
- *
- * Description:
- * Generate a CRC-32 from the data stream
- *
- * Parameters:
- * In:
- * pbyData - the data stream
- * cbByte - the length of the stream
- * dwCrcSeed - Seed for CRC32
- * Out:
- * none
- *
- * Return Value: CRC-32
- *
- -*/
-unsigned long CRCdwCrc32(unsigned char *pbyData, unsigned int cbByte, unsigned long dwCrcSeed)
-{
- unsigned long dwCrc;
-
- dwCrc = dwCrcSeed;
- while (cbByte--) {
- dwCrc = s_adwCrc32Table[(unsigned char)((dwCrc ^ (*pbyData)) & 0xFF)] ^ (dwCrc >> 8);
- pbyData++;
- }
-
- return dwCrc;
-}
-
-/*+
- *
- * Description:
- * To test CRC generator, input 8 bytes packet
- * -- 0xff 0xff 0xff 0xff 0x00 0x00 0x00 0x00
- * the generated CRC should be
- * -- 0xff 0xff 0xff 0xff
- *
- * Parameters:
- * In:
- * pbyData - the data stream
- * cbByte - the length of the stream
- * Out:
- * none
- *
- * Return Value: CRC-32
- *
- -*/
-unsigned long CRCdwGetCrc32(unsigned char *pbyData, unsigned int cbByte)
-{
- return ~CRCdwCrc32(pbyData, cbByte, 0xFFFFFFFFL);
-}
-
-/*+
- *
- * Description:
- *
- * NOTE.... Because CRCdwGetCrc32Ex() is an iteration function,
- * this means we will use the output of CRCdwGetCrc32Ex()
- * to be a new argument to do next CRCdwGetCrc32Ex() calculation.
- * Thus, the final result must be inverted to be the
- * correct answer.
- *
- * Parameters:
- * In:
- * pbyData - the data stream
- * cbByte - the length of the stream
- * Out:
- * none
- *
- * Return Value: CRC-32
- *
- -*/
-unsigned long CRCdwGetCrc32Ex(unsigned char *pbyData, unsigned int cbByte, unsigned long dwPreCRC)
-{
- return CRCdwCrc32(pbyData, cbByte, dwPreCRC);
-}
diff --git a/drivers/staging/vt6655/tcrc.h b/drivers/staging/vt6655/tcrc.h
deleted file mode 100644
index 82b5ddafae55..000000000000
--- a/drivers/staging/vt6655/tcrc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2003 VIA Networking, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tcrc.h
- *
- * Purpose: Implement functions to calculate CRC
- *
- * Author: Tevin Chen
- *
- * Date: Jan. 28, 1997
- *
- */
-
-#ifndef __TCRC_H__
-#define __TCRC_H__
-
-#include "ttype.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-unsigned long CRCdwCrc32(unsigned char *pbyData, unsigned int cbByte, unsigned long dwCrcSeed);
-unsigned long CRCdwGetCrc32(unsigned char *pbyData, unsigned int cbByte);
-unsigned long CRCdwGetCrc32Ex(unsigned char *pbyData, unsigned int cbByte, unsigned long dwPreCRC);
-
-#endif // __TCRC_H__
diff --git a/drivers/staging/vt6655/tether.c b/drivers/staging/vt6655/tether.c
deleted file mode 100644
index 1e7d3e2115a9..000000000000
--- a/drivers/staging/vt6655/tether.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2003 VIA Networking, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tether.c
- *
- * Purpose:
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- * Functions:
- * ETHbyGetHashIndexByCrc32 - Calculate multicast hash value by CRC32
- * ETHbIsBufferCrc32Ok - Check CRC value of the buffer if Ok or not
- *
- * Revision History:
- *
- */
-
-#include "device.h"
-#include "tmacro.h"
-#include "tcrc.h"
-#include "tether.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*
- * Description: Calculate multicast hash value by CRC32
- *
- * Parameters:
- * In:
- * pbyMultiAddr - Multicast Address
- * Out:
- * none
- *
- * Return Value: Hash value
- *
- */
-unsigned char ETHbyGetHashIndexByCrc32(unsigned char *pbyMultiAddr)
-{
- int ii;
- unsigned char byTmpHash;
- unsigned char byHash = 0;
-
- // get the least 6-bits from CRC generator
- byTmpHash = (unsigned char)(CRCdwCrc32(pbyMultiAddr, ETH_ALEN,
- 0xFFFFFFFFL) & 0x3F);
- // reverse most bit to least bit
- for (ii = 0; ii < (sizeof(byTmpHash) * 8); ii++) {
- byHash <<= 1;
- if (byTmpHash & 0x01)
- byHash |= 1;
- byTmpHash >>= 1;
- }
-
- // adjust 6-bits to the right most
- return byHash >> 2;
-}
-
-/*
- * Description: Check CRC value of the buffer if Ok or not
- *
- * Parameters:
- * In:
- * pbyBuffer - pointer of buffer (normally is rx buffer)
- * cbFrameLength - length of buffer, including CRC portion
- * Out:
- * none
- *
- * Return Value: true if ok; false if error.
- *
- */
-bool ETHbIsBufferCrc32Ok(unsigned char *pbyBuffer, unsigned int cbFrameLength)
-{
- unsigned long dwCRC;
-
- dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4);
- if (cpu_to_le32(*((unsigned long *)(pbyBuffer + cbFrameLength - 4))) != dwCRC)
- return false;
-
- return true;
-}
diff --git a/drivers/staging/vt6655/tether.h b/drivers/staging/vt6655/tether.h
deleted file mode 100644
index 94cc8830d8cc..000000000000
--- a/drivers/staging/vt6655/tether.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: tether.h
- *
- * Purpose:
- *
- * Author: Tevin Chen
- *
- * Date: Jan. 28, 1997
- *
- */
-
-#ifndef __TETHER_H__
-#define __TETHER_H__
-
-#include <linux/etherdevice.h>
-#include "ttype.h"
-
-/*--------------------- Export Definitions -------------------------*/
-//
-// constants
-//
-#define U_ETHER_ADDR_STR_LEN (ETH_ALEN * 2 + 1)
-// Ethernet address string length
-
-#define MAX_LOOKAHEAD_SIZE ETH_FRAME_LEN
-
-#define U_MULTI_ADDR_LEN 8 // multicast address length
-
-#ifdef __BIG_ENDIAN
-
-#define TYPE_PKT_IP 0x0800 //
-#define TYPE_PKT_ARP 0x0806 //
-#define TYPE_PKT_RARP 0x8035 //
-#define TYPE_PKT_IPX 0x8137 //
-#define TYPE_PKT_802_1x 0x888e
-#define TYPE_PKT_PreAuth 0x88C7
-
-#define TYPE_PKT_PING_M_REQ 0x8011 // master reguest
-#define TYPE_PKT_PING_S_GNT 0x8022 // slave grant
-#define TYPE_PKT_PING_M 0x8077 // pingpong master packet
-#define TYPE_PKT_PING_S 0x8088 // pingpong slave packet
-#define TYPE_PKT_WOL_M_REQ 0x8033 // WOL waker request
-#define TYPE_PKT_WOL_S_GNT 0x8044 // WOL sleeper grant
-#define TYPE_MGMT_PROBE_RSP 0x5000
-#define TYPE_PKT_VNT_DIAG 0x8011 // Diag Pkt
-#define TYPE_PKT_VNT_PER 0x8888 // Diag PER Pkt
-//
-// wFrameCtl field in the S802_11Header
-//
-// NOTE....
-// in network byte order, high byte is going first
-#define FC_TODS 0x0001
-#define FC_FROMDS 0x0002
-#define FC_MOREFRAG 0x0004
-#define FC_RETRY 0x0008
-#define FC_POWERMGT 0x0010
-#define FC_MOREDATA 0x0020
-#define FC_WEP 0x0040
-#define TYPE_802_11_ATIM 0x9000
-
-#define TYPE_802_11_DATA 0x0800
-#define TYPE_802_11_CTL 0x0400
-#define TYPE_802_11_MGMT 0x0000
-#define TYPE_802_11_MASK 0x0C00
-#define TYPE_SUBTYPE_MASK 0xFC00
-#define TYPE_802_11_NODATA 0x4000
-#define TYPE_DATE_NULL 0x4800
-
-#define TYPE_CTL_PSPOLL 0xa400
-#define TYPE_CTL_RTS 0xb400
-#define TYPE_CTL_CTS 0xc400
-#define TYPE_CTL_ACK 0xd400
-
-#else //if LITTLE_ENDIAN
-//
-// wType field in the SEthernetHeader
-//
-// NOTE....
-// in network byte order, high byte is going first
-#define TYPE_PKT_IP 0x0008 //
-#define TYPE_PKT_ARP 0x0608 //
-#define TYPE_PKT_RARP 0x3580 //
-#define TYPE_PKT_IPX 0x3781 //
-
-#define TYPE_PKT_802_1x 0x8e88
-#define TYPE_PKT_PreAuth 0xC788
-
-#define TYPE_PKT_PING_M_REQ 0x1180 // master reguest
-#define TYPE_PKT_PING_S_GNT 0x2280 // slave grant
-#define TYPE_PKT_PING_M 0x7780 // pingpong master packet
-#define TYPE_PKT_PING_S 0x8880 // pingpong slave packet
-#define TYPE_PKT_WOL_M_REQ 0x3380 // WOL waker request
-#define TYPE_PKT_WOL_S_GNT 0x4480 // WOL sleeper grant
-#define TYPE_MGMT_PROBE_RSP 0x0050
-#define TYPE_PKT_VNT_DIAG 0x1180 // Diag Pkt
-#define TYPE_PKT_VNT_PER 0x8888 // Diag PER Pkt
-//
-// wFrameCtl field in the S802_11Header
-//
-// NOTE....
-// in network byte order, high byte is going first
-#define FC_TODS 0x0100
-#define FC_FROMDS 0x0200
-#define FC_MOREFRAG 0x0400
-#define FC_RETRY 0x0800
-#define FC_POWERMGT 0x1000
-#define FC_MOREDATA 0x2000
-#define FC_WEP 0x4000
-#define TYPE_802_11_ATIM 0x0090
-
-#define TYPE_802_11_DATA 0x0008
-#define TYPE_802_11_CTL 0x0004
-#define TYPE_802_11_MGMT 0x0000
-#define TYPE_802_11_MASK 0x000C
-#define TYPE_SUBTYPE_MASK 0x00FC
-#define TYPE_802_11_NODATA 0x0040
-#define TYPE_DATE_NULL 0x0048
-
-#define TYPE_CTL_PSPOLL 0x00a4
-#define TYPE_CTL_RTS 0x00b4
-#define TYPE_CTL_CTS 0x00c4
-#define TYPE_CTL_ACK 0x00d4
-
-#endif //#ifdef __BIG_ENDIAN
-
-#define WEP_IV_MASK 0x00FFFFFF
-
-/*--------------------- Export Types ------------------------------*/
-//
-// Ethernet packet
-//
-typedef struct tagSEthernetHeader {
- unsigned char abyDstAddr[ETH_ALEN];
- unsigned char abySrcAddr[ETH_ALEN];
- unsigned short wType;
-} __attribute__ ((__packed__))
-SEthernetHeader, *PSEthernetHeader;
-
-//
-// 802_3 packet
-//
-typedef struct tagS802_3Header {
- unsigned char abyDstAddr[ETH_ALEN];
- unsigned char abySrcAddr[ETH_ALEN];
- unsigned short wLen;
-} __attribute__ ((__packed__))
-S802_3Header, *PS802_3Header;
-
-//
-// 802_11 packet
-//
-typedef struct tagS802_11Header {
- unsigned short wFrameCtl;
- unsigned short wDurationID;
- unsigned char abyAddr1[ETH_ALEN];
- unsigned char abyAddr2[ETH_ALEN];
- unsigned char abyAddr3[ETH_ALEN];
- unsigned short wSeqCtl;
- unsigned char abyAddr4[ETH_ALEN];
-} __attribute__ ((__packed__))
-S802_11Header, *PS802_11Header;
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-unsigned char ETHbyGetHashIndexByCrc32(unsigned char *pbyMultiAddr);
-//unsigned char ETHbyGetHashIndexByCrc(unsigned char *pbyMultiAddr);
-bool ETHbIsBufferCrc32Ok(unsigned char *pbyBuffer, unsigned int cbFrameLength);
-
-#endif // __TETHER_H__
diff --git a/drivers/staging/vt6655/tkip.c b/drivers/staging/vt6655/tkip.c
deleted file mode 100644
index f758d021c60e..000000000000
--- a/drivers/staging/vt6655/tkip.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tkip.c
- *
- * Purpose: Implement functions for 802.11i TKIP
- *
- * Author: Jerry Chen
- *
- * Date: Mar. 11, 2003
- *
- * Functions:
- * TKIPvMixKey - Get TKIP RC4 Key from TK,TA, and TSC
- *
- * Revision History:
- *
- */
-
-#include "tmacro.h"
-#include "tkip.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/* The Sbox is reduced to 2 16-bit wide tables, each with 256 entries. */
-/* The 2nd table is the same as the 1st but with the upper and lower */
-/* bytes swapped. To allow an endian tolerant implementation, the byte */
-/* halves have been expressed independently here. */
-static const unsigned char TKIP_Sbox_Lower[256] = {
- 0xA5, 0x84, 0x99, 0x8D, 0x0D, 0xBD, 0xB1, 0x54,
- 0x50, 0x03, 0xA9, 0x7D, 0x19, 0x62, 0xE6, 0x9A,
- 0x45, 0x9D, 0x40, 0x87, 0x15, 0xEB, 0xC9, 0x0B,
- 0xEC, 0x67, 0xFD, 0xEA, 0xBF, 0xF7, 0x96, 0x5B,
- 0xC2, 0x1C, 0xAE, 0x6A, 0x5A, 0x41, 0x02, 0x4F,
- 0x5C, 0xF4, 0x34, 0x08, 0x93, 0x73, 0x53, 0x3F,
- 0x0C, 0x52, 0x65, 0x5E, 0x28, 0xA1, 0x0F, 0xB5,
- 0x09, 0x36, 0x9B, 0x3D, 0x26, 0x69, 0xCD, 0x9F,
- 0x1B, 0x9E, 0x74, 0x2E, 0x2D, 0xB2, 0xEE, 0xFB,
- 0xF6, 0x4D, 0x61, 0xCE, 0x7B, 0x3E, 0x71, 0x97,
- 0xF5, 0x68, 0x00, 0x2C, 0x60, 0x1F, 0xC8, 0xED,
- 0xBE, 0x46, 0xD9, 0x4B, 0xDE, 0xD4, 0xE8, 0x4A,
- 0x6B, 0x2A, 0xE5, 0x16, 0xC5, 0xD7, 0x55, 0x94,
- 0xCF, 0x10, 0x06, 0x81, 0xF0, 0x44, 0xBA, 0xE3,
- 0xF3, 0xFE, 0xC0, 0x8A, 0xAD, 0xBC, 0x48, 0x04,
- 0xDF, 0xC1, 0x75, 0x63, 0x30, 0x1A, 0x0E, 0x6D,
- 0x4C, 0x14, 0x35, 0x2F, 0xE1, 0xA2, 0xCC, 0x39,
- 0x57, 0xF2, 0x82, 0x47, 0xAC, 0xE7, 0x2B, 0x95,
- 0xA0, 0x98, 0xD1, 0x7F, 0x66, 0x7E, 0xAB, 0x83,
- 0xCA, 0x29, 0xD3, 0x3C, 0x79, 0xE2, 0x1D, 0x76,
- 0x3B, 0x56, 0x4E, 0x1E, 0xDB, 0x0A, 0x6C, 0xE4,
- 0x5D, 0x6E, 0xEF, 0xA6, 0xA8, 0xA4, 0x37, 0x8B,
- 0x32, 0x43, 0x59, 0xB7, 0x8C, 0x64, 0xD2, 0xE0,
- 0xB4, 0xFA, 0x07, 0x25, 0xAF, 0x8E, 0xE9, 0x18,
- 0xD5, 0x88, 0x6F, 0x72, 0x24, 0xF1, 0xC7, 0x51,
- 0x23, 0x7C, 0x9C, 0x21, 0xDD, 0xDC, 0x86, 0x85,
- 0x90, 0x42, 0xC4, 0xAA, 0xD8, 0x05, 0x01, 0x12,
- 0xA3, 0x5F, 0xF9, 0xD0, 0x91, 0x58, 0x27, 0xB9,
- 0x38, 0x13, 0xB3, 0x33, 0xBB, 0x70, 0x89, 0xA7,
- 0xB6, 0x22, 0x92, 0x20, 0x49, 0xFF, 0x78, 0x7A,
- 0x8F, 0xF8, 0x80, 0x17, 0xDA, 0x31, 0xC6, 0xB8,
- 0xC3, 0xB0, 0x77, 0x11, 0xCB, 0xFC, 0xD6, 0x3A
-};
-
-static const unsigned char TKIP_Sbox_Upper[256] = {
- 0xC6, 0xF8, 0xEE, 0xF6, 0xFF, 0xD6, 0xDE, 0x91,
- 0x60, 0x02, 0xCE, 0x56, 0xE7, 0xB5, 0x4D, 0xEC,
- 0x8F, 0x1F, 0x89, 0xFA, 0xEF, 0xB2, 0x8E, 0xFB,
- 0x41, 0xB3, 0x5F, 0x45, 0x23, 0x53, 0xE4, 0x9B,
- 0x75, 0xE1, 0x3D, 0x4C, 0x6C, 0x7E, 0xF5, 0x83,
- 0x68, 0x51, 0xD1, 0xF9, 0xE2, 0xAB, 0x62, 0x2A,
- 0x08, 0x95, 0x46, 0x9D, 0x30, 0x37, 0x0A, 0x2F,
- 0x0E, 0x24, 0x1B, 0xDF, 0xCD, 0x4E, 0x7F, 0xEA,
- 0x12, 0x1D, 0x58, 0x34, 0x36, 0xDC, 0xB4, 0x5B,
- 0xA4, 0x76, 0xB7, 0x7D, 0x52, 0xDD, 0x5E, 0x13,
- 0xA6, 0xB9, 0x00, 0xC1, 0x40, 0xE3, 0x79, 0xB6,
- 0xD4, 0x8D, 0x67, 0x72, 0x94, 0x98, 0xB0, 0x85,
- 0xBB, 0xC5, 0x4F, 0xED, 0x86, 0x9A, 0x66, 0x11,
- 0x8A, 0xE9, 0x04, 0xFE, 0xA0, 0x78, 0x25, 0x4B,
- 0xA2, 0x5D, 0x80, 0x05, 0x3F, 0x21, 0x70, 0xF1,
- 0x63, 0x77, 0xAF, 0x42, 0x20, 0xE5, 0xFD, 0xBF,
- 0x81, 0x18, 0x26, 0xC3, 0xBE, 0x35, 0x88, 0x2E,
- 0x93, 0x55, 0xFC, 0x7A, 0xC8, 0xBA, 0x32, 0xE6,
- 0xC0, 0x19, 0x9E, 0xA3, 0x44, 0x54, 0x3B, 0x0B,
- 0x8C, 0xC7, 0x6B, 0x28, 0xA7, 0xBC, 0x16, 0xAD,
- 0xDB, 0x64, 0x74, 0x14, 0x92, 0x0C, 0x48, 0xB8,
- 0x9F, 0xBD, 0x43, 0xC4, 0x39, 0x31, 0xD3, 0xF2,
- 0xD5, 0x8B, 0x6E, 0xDA, 0x01, 0xB1, 0x9C, 0x49,
- 0xD8, 0xAC, 0xF3, 0xCF, 0xCA, 0xF4, 0x47, 0x10,
- 0x6F, 0xF0, 0x4A, 0x5C, 0x38, 0x57, 0x73, 0x97,
- 0xCB, 0xA1, 0xE8, 0x3E, 0x96, 0x61, 0x0D, 0x0F,
- 0xE0, 0x7C, 0x71, 0xCC, 0x90, 0x06, 0xF7, 0x1C,
- 0xC2, 0x6A, 0xAE, 0x69, 0x17, 0x99, 0x3A, 0x27,
- 0xD9, 0xEB, 0x2B, 0x22, 0xD2, 0xA9, 0x07, 0x33,
- 0x2D, 0x3C, 0x15, 0xC9, 0x87, 0xAA, 0x50, 0xA5,
- 0x03, 0x59, 0x09, 0x1A, 0x65, 0xD7, 0x84, 0xD0,
- 0x82, 0x29, 0x5A, 0x1E, 0x7B, 0xA8, 0x6D, 0x2C
-};
-
-//STKIPKeyManagement sTKIPKeyTable[MAX_TKIP_KEY];
-
-/*--------------------- Static Functions --------------------------*/
-unsigned int tkip_sbox(unsigned int index);
-unsigned int rotr1(unsigned int a);
-
-/*--------------------- Export Variables --------------------------*/
-
-/************************************************************/
-/* tkip_sbox() */
-/* Returns a 16 bit value from a 64K entry table. The Table */
-/* is synthesized from two 256 entry byte wide tables. */
-/************************************************************/
-unsigned int tkip_sbox(unsigned int index)
-{
- unsigned int index_low;
- unsigned int index_high;
- unsigned int left, right;
-
- index_low = (index % 256);
- index_high = ((index >> 8) % 256);
-
- left = TKIP_Sbox_Lower[index_low] + (TKIP_Sbox_Upper[index_low] * 256);
- right = TKIP_Sbox_Upper[index_high] + (TKIP_Sbox_Lower[index_high] * 256);
-
- return left ^ right;
-};
-
-unsigned int rotr1(unsigned int a)
-{
- unsigned int b;
-
- if ((a & 0x01) == 0x01)
- b = (a >> 1) | 0x8000;
- else
- b = (a >> 1) & 0x7fff;
-
- b = b % 65536;
- return b;
-}
-
-/*
- * Description: Calculate RC4Key fom TK, TA, and TSC
- *
- * Parameters:
- * In:
- * pbyTKey - TKey
- * pbyTA - TA
- * dwTSC - TSC
- * Out:
- * pbyRC4Key - RC4Key
- *
- * Return Value: none
- *
- */
-void TKIPvMixKey(
- unsigned char *pbyTKey,
- unsigned char *pbyTA,
- unsigned short wTSC15_0,
- unsigned long dwTSC47_16,
- unsigned char *pbyRC4Key
-)
-{
- unsigned int p1k[5];
- unsigned int tsc0, tsc1, tsc2;
- unsigned int ppk0, ppk1, ppk2, ppk3, ppk4, ppk5;
- unsigned long int pnl, pnh;
-
- int i, j;
-
- pnl = wTSC15_0;
- pnh = dwTSC47_16;
-
- tsc0 = (unsigned int)((pnh >> 16) % 65536); /* msb */
- tsc1 = (unsigned int)(pnh % 65536);
- tsc2 = (unsigned int)(pnl % 65536); /* lsb */
-
- /* Phase 1, step 1 */
- p1k[0] = tsc1;
- p1k[1] = tsc0;
- p1k[2] = (unsigned int)(pbyTA[0] + (pbyTA[1]*256));
- p1k[3] = (unsigned int)(pbyTA[2] + (pbyTA[3]*256));
- p1k[4] = (unsigned int)(pbyTA[4] + (pbyTA[5]*256));
-
- /* Phase 1, step 2 */
- for (i = 0; i < 8; i++) {
- j = 2 * (i & 1);
- p1k[0] = (p1k[0] + tkip_sbox((p1k[4] ^ ((256*pbyTKey[1+j]) + pbyTKey[j])) % 65536)) % 65536;
- p1k[1] = (p1k[1] + tkip_sbox((p1k[0] ^ ((256*pbyTKey[5+j]) + pbyTKey[4+j])) % 65536)) % 65536;
- p1k[2] = (p1k[2] + tkip_sbox((p1k[1] ^ ((256*pbyTKey[9+j]) + pbyTKey[8+j])) % 65536)) % 65536;
- p1k[3] = (p1k[3] + tkip_sbox((p1k[2] ^ ((256*pbyTKey[13+j]) + pbyTKey[12+j])) % 65536)) % 65536;
- p1k[4] = (p1k[4] + tkip_sbox((p1k[3] ^ (((256*pbyTKey[1+j]) + pbyTKey[j]))) % 65536)) % 65536;
- p1k[4] = (p1k[4] + i) % 65536;
- }
- /* Phase 2, Step 1 */
- ppk0 = p1k[0];
- ppk1 = p1k[1];
- ppk2 = p1k[2];
- ppk3 = p1k[3];
- ppk4 = p1k[4];
- ppk5 = (p1k[4] + tsc2) % 65536;
-
- /* Phase2, Step 2 */
- ppk0 = ppk0 + tkip_sbox((ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) % 65536);
- ppk1 = ppk1 + tkip_sbox((ppk0 ^ ((256*pbyTKey[3]) + pbyTKey[2])) % 65536);
- ppk2 = ppk2 + tkip_sbox((ppk1 ^ ((256*pbyTKey[5]) + pbyTKey[4])) % 65536);
- ppk3 = ppk3 + tkip_sbox((ppk2 ^ ((256*pbyTKey[7]) + pbyTKey[6])) % 65536);
- ppk4 = ppk4 + tkip_sbox((ppk3 ^ ((256*pbyTKey[9]) + pbyTKey[8])) % 65536);
- ppk5 = ppk5 + tkip_sbox((ppk4 ^ ((256*pbyTKey[11]) + pbyTKey[10])) % 65536);
-
- ppk0 = ppk0 + rotr1(ppk5 ^ ((256*pbyTKey[13]) + pbyTKey[12]));
- ppk1 = ppk1 + rotr1(ppk0 ^ ((256*pbyTKey[15]) + pbyTKey[14]));
- ppk2 = ppk2 + rotr1(ppk1);
- ppk3 = ppk3 + rotr1(ppk2);
- ppk4 = ppk4 + rotr1(ppk3);
- ppk5 = ppk5 + rotr1(ppk4);
-
- /* Phase 2, Step 3 */
- pbyRC4Key[0] = (tsc2 >> 8) % 256;
- pbyRC4Key[1] = (((tsc2 >> 8) % 256) | 0x20) & 0x7f;
- pbyRC4Key[2] = tsc2 % 256;
- pbyRC4Key[3] = ((ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) >> 1) % 256;
-
- pbyRC4Key[4] = ppk0 % 256;
- pbyRC4Key[5] = (ppk0 >> 8) % 256;
-
- pbyRC4Key[6] = ppk1 % 256;
- pbyRC4Key[7] = (ppk1 >> 8) % 256;
-
- pbyRC4Key[8] = ppk2 % 256;
- pbyRC4Key[9] = (ppk2 >> 8) % 256;
-
- pbyRC4Key[10] = ppk3 % 256;
- pbyRC4Key[11] = (ppk3 >> 8) % 256;
-
- pbyRC4Key[12] = ppk4 % 256;
- pbyRC4Key[13] = (ppk4 >> 8) % 256;
-
- pbyRC4Key[14] = ppk5 % 256;
- pbyRC4Key[15] = (ppk5 >> 8) % 256;
-}
diff --git a/drivers/staging/vt6655/tkip.h b/drivers/staging/vt6655/tkip.h
deleted file mode 100644
index 3b6357ac6dee..000000000000
--- a/drivers/staging/vt6655/tkip.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: tkip.h
- *
- * Purpose: Implement functions for 802.11i TKIP
- *
- * Author: Jerry Chen
- *
- * Date: Mar. 11, 2003
- *
- */
-
-#ifndef __TKIP_H__
-#define __TKIP_H__
-
-#include "ttype.h"
-#include "tether.h"
-
-/*--------------------- Export Definitions -------------------------*/
-#define TKIP_KEY_LEN 16
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void TKIPvMixKey(
- unsigned char *pbyTKey,
- unsigned char *pbyTA,
- unsigned short wTSC15_0,
- unsigned long dwTSC47_16,
- unsigned char *pbyRC4Key
-);
-
-#endif // __TKIP_H__
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index 59c6e72f993a..607b78f7a6a0 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -29,8 +29,6 @@
#ifndef __TMACRO_H__
#define __TMACRO_H__
-#include "ttype.h"
-
/****** Common helper macros ***********************************************/
#if !defined(LOBYTE)
diff --git a/drivers/staging/vt6655/ttype.h b/drivers/staging/vt6655/ttype.h
deleted file mode 100644
index 747ef62ec9be..000000000000
--- a/drivers/staging/vt6655/ttype.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: ttype.h
- *
- * Purpose: define basic common types and macros
- *
- * Author: Tevin Chen
- *
- * Date: May 21, 1996
- *
- */
-
-#ifndef __TTYPE_H__
-#define __TTYPE_H__
-
-/******* Common definitions and typedefs ***********************************/
-
-#ifndef WPA_SM_Transtatus
-#define WPA_SM_Transtatus
-#endif
-
-#ifndef Calcu_LinkQual
-#define Calcu_LinkQual
-#endif
-
-#endif // __TTYPE_H__
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index c5c889cade25..c53703a772f5 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -30,7 +30,6 @@
#define __UPC_H__
#include "device.h"
-#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
diff --git a/drivers/staging/vt6655/vntconfiguration.dat b/drivers/staging/vt6655/vntconfiguration.dat
deleted file mode 100644
index 0064ddce7c11..000000000000
--- a/drivers/staging/vt6655/vntconfiguration.dat
+++ /dev/null
@@ -1 +0,0 @@
-ZONETYPE=EUROPE \ No newline at end of file
diff --git a/drivers/staging/vt6655/vntwifi.c b/drivers/staging/vt6655/vntwifi.c
deleted file mode 100644
index 59f66fe47352..000000000000
--- a/drivers/staging/vt6655/vntwifi.c
+++ /dev/null
@@ -1,700 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: vntwifi.c
- *
- * Purpose: export functions for vntwifi lib
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Yiching Chen
- *
- * Date: feb. 2, 2005
- *
- */
-
-#include "vntwifi.h"
-#include "IEEE11h.h"
-#include "country.h"
-#include "device.h"
-#include "wmgr.h"
-#include "datarate.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Description:
- * Set Operation Mode
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * eOPMode - Operation Mode
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvSetOPMode(
- void *pMgmtHandle,
- WMAC_CONFIG_MODE eOPMode
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- pMgmt->eConfigMode = eOPMode;
-}
-
-/*+
- *
- * Description:
- * Set Operation Mode
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * wBeaconPeriod - Beacon Period
- * wATIMWindow - ATIM window
- * uChannel - channel number
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvSetIBSSParameter(
- void *pMgmtHandle,
- unsigned short wBeaconPeriod,
- unsigned short wATIMWindow,
- unsigned int uChannel
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- pMgmt->wIBSSBeaconPeriod = wBeaconPeriod;
- pMgmt->wIBSSATIMWindow = wATIMWindow;
- pMgmt->uIBSSChannel = uChannel;
-}
-
-/*+
- *
- * Description:
- * Get current SSID
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * Out:
- * none
- *
- * Return Value: current SSID pointer.
- *
- -*/
-PWLAN_IE_SSID
-VNTWIFIpGetCurrentSSID(
- void *pMgmtHandle
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- return (PWLAN_IE_SSID) pMgmt->abyCurrSSID;
-}
-
-/*+
- *
- * Description:
- * Get current link channel
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * Out:
- * none
- *
- * Return Value: current Channel.
- *
- -*/
-unsigned int
-VNTWIFIpGetCurrentChannel(
- void *pMgmtHandle
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- if (pMgmtHandle != NULL)
- return pMgmt->uCurrChannel;
-
- return 0;
-}
-
-/*+
- *
- * Description:
- * Get current Assoc ID
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * Out:
- * none
- *
- * Return Value: current Assoc ID
- *
- -*/
-unsigned short
-VNTWIFIwGetAssocID(
- void *pMgmtHandle
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- return pMgmt->wCurrAID;
-}
-
-/*+
- *
- * Description:
- * This routine return max support rate of IES
- *
- * Parameters:
- * In:
- * pSupportRateIEs
- * pExtSupportRateIEs
- *
- * Out:
- *
- * Return Value: max support rate
- *
- -*/
-unsigned char
-VNTWIFIbyGetMaxSupportRate(
- PWLAN_IE_SUPP_RATES pSupportRateIEs,
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs
-)
-{
- unsigned char byMaxSupportRate = RATE_1M;
- unsigned char bySupportRate = RATE_1M;
- unsigned int ii = 0;
-
- if (pSupportRateIEs) {
- for (ii = 0; ii < pSupportRateIEs->len; ii++) {
- bySupportRate = DATARATEbyGetRateIdx(pSupportRateIEs->abyRates[ii]);
- if (bySupportRate > byMaxSupportRate)
- byMaxSupportRate = bySupportRate;
-
- }
- }
- if (pExtSupportRateIEs) {
- for (ii = 0; ii < pExtSupportRateIEs->len; ii++) {
- bySupportRate = DATARATEbyGetRateIdx(pExtSupportRateIEs->abyRates[ii]);
- if (bySupportRate > byMaxSupportRate)
- byMaxSupportRate = bySupportRate;
-
- }
- }
-
- return byMaxSupportRate;
-}
-
-/*+
- *
- * Description:
- * This routine return data rate of ACK packtet
- *
- * Parameters:
- * In:
- * byRxDataRate
- * pSupportRateIEs
- * pExtSupportRateIEs
- *
- * Out:
- *
- * Return Value: max support rate
- *
- -*/
-unsigned char
-VNTWIFIbyGetACKTxRate(
- unsigned char byRxDataRate,
- PWLAN_IE_SUPP_RATES pSupportRateIEs,
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs
-)
-{
- unsigned char byMaxAckRate;
- unsigned char byBasicRate;
- unsigned int ii;
-
- if (byRxDataRate <= RATE_11M) {
- byMaxAckRate = RATE_1M;
- } else {
- /* 24M is mandatory for 802.11a and 802.11g */
- byMaxAckRate = RATE_24M;
- }
- if (pSupportRateIEs) {
- for (ii = 0; ii < pSupportRateIEs->len; ii++) {
- if (pSupportRateIEs->abyRates[ii] & 0x80) {
- byBasicRate = DATARATEbyGetRateIdx(pSupportRateIEs->abyRates[ii]);
- if ((byBasicRate <= byRxDataRate) &&
- (byBasicRate > byMaxAckRate)) {
- byMaxAckRate = byBasicRate;
- }
- }
- }
- }
- if (pExtSupportRateIEs) {
- for (ii = 0; ii < pExtSupportRateIEs->len; ii++) {
- if (pExtSupportRateIEs->abyRates[ii] & 0x80) {
- byBasicRate = DATARATEbyGetRateIdx(pExtSupportRateIEs->abyRates[ii]);
- if ((byBasicRate <= byRxDataRate) &&
- (byBasicRate > byMaxAckRate)) {
- byMaxAckRate = byBasicRate;
- }
- }
- }
- }
-
- return byMaxAckRate;
-}
-
-/*+
- *
- * Description:
- * Set Authentication Mode
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * eAuthMode - Authentication mode
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvSetAuthenticationMode(
- void *pMgmtHandle,
- WMAC_AUTHENTICATION_MODE eAuthMode
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- pMgmt->eAuthenMode = eAuthMode;
- if ((eAuthMode == WMAC_AUTH_SHAREKEY) ||
- (eAuthMode == WMAC_AUTH_AUTO)) {
- pMgmt->bShareKeyAlgorithm = true;
- } else {
- pMgmt->bShareKeyAlgorithm = false;
- }
-}
-
-/*+
- *
- * Description:
- * Set Encryption Mode
- *
- * Parameters:
- * In:
- * pMgmtHandle - pointer to management object
- * eAuthMode - Authentication mode
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvSetEncryptionMode(
- void *pMgmtHandle,
- WMAC_ENCRYPTION_MODE eEncryptionMode
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- pMgmt->eEncryptionMode = eEncryptionMode;
- if ((eEncryptionMode == WMAC_ENCRYPTION_WEPEnabled) ||
- (eEncryptionMode == WMAC_ENCRYPTION_TKIPEnabled) ||
- (eEncryptionMode == WMAC_ENCRYPTION_AESEnabled)) {
- pMgmt->bPrivacyInvoked = true;
- } else {
- pMgmt->bPrivacyInvoked = false;
- }
-}
-
-bool
-VNTWIFIbConfigPhyMode(
- void *pMgmtHandle,
- CARD_PHY_TYPE ePhyType
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- if ((ePhyType != PHY_TYPE_AUTO) &&
- (ePhyType != pMgmt->eCurrentPHYMode)) {
- if (CARDbSetPhyParameter(pMgmt->pAdapter, ePhyType, 0, 0, NULL, NULL) == true)
- pMgmt->eCurrentPHYMode = ePhyType;
- else
- return false;
- }
- pMgmt->eConfigPHYMode = ePhyType;
- return true;
-}
-
-void
-VNTWIFIbGetConfigPhyMode(
- void *pMgmtHandle,
- void *pePhyType
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- if ((pMgmt != NULL) && (pePhyType != NULL))
- *(PCARD_PHY_TYPE)pePhyType = pMgmt->eConfigPHYMode;
-}
-
-/*+
- *
- * Description:
- * Clear BSS List Database except current assoc BSS
- *
- * Parameters:
- * In:
- * pMgmtHandle - Management Object structure
- * bLinkPass - Current Link status
- * Out:
- *
- * Return Value: None.
- *
- -*/
-
-/*+
- *
- * Description:
- * Query BSS List in management database
- *
- * Parameters:
- * In:
- * pMgmtHandle - Management Object structure
- * Out:
- * puBSSCount - BSS count
- * pvFirstBSS - pointer to first BSS
- *
- * Return Value: None.
- *
- -*/
-
-void
-VNTWIFIvQueryBSSList(void *pMgmtHandle, unsigned int *puBSSCount, void **pvFirstBSS)
-{
- unsigned int ii = 0;
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- PKnownBSS pBSS = NULL;
- unsigned int uCount = 0;
-
- *pvFirstBSS = NULL;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
-
- if (*pvFirstBSS == NULL)
- *pvFirstBSS = &(pMgmt->sBSSList[ii]);
-
- uCount++;
- }
- *puBSSCount = uCount;
-}
-
-void
-VNTWIFIvGetNextBSS(
- void *pMgmtHandle,
- void *pvCurrentBSS,
- void **pvNextBSS
-)
-{
- PKnownBSS pBSS = (PKnownBSS) pvCurrentBSS;
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- *pvNextBSS = NULL;
-
- while (*pvNextBSS == NULL) {
- pBSS++;
- if (pBSS > &(pMgmt->sBSSList[MAX_BSS_NUM]))
- return;
-
- if (pBSS->bActive == true) {
- *pvNextBSS = pBSS;
- return;
- }
- }
-}
-
-/*+
- *
- * Description:
- * Update Tx attemps, Tx failure counter in Node DB
- *
- * In:
- * Out:
- * none
- *
- * Return Value: none
- *
- -*/
-void
-VNTWIFIvUpdateNodeTxCounter(
- void *pMgmtHandle,
- unsigned char *pbyDestAddress,
- bool bTxOk,
- unsigned short wRate,
- unsigned char *pbyTxFailCount
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- unsigned int uNodeIndex = 0;
- unsigned int ii;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex) == false)
- return;
- }
-
- pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts++;
- if (bTxOk) {
- /* transmit success, TxAttempts at least plus one */
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
- pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wRate]++;
- } else {
- pMgmt->sNodeDBTable[uNodeIndex].uTxFailures++;
- }
- pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += pbyTxFailCount[MAX_RATE];
- for (ii = 0; ii < MAX_RATE; ii++)
- pMgmt->sNodeDBTable[uNodeIndex].uTxFail[ii] += pbyTxFailCount[ii];
-}
-
-void
-VNTWIFIvGetTxRate(
- void *pMgmtHandle,
- unsigned char *pbyDestAddress,
- unsigned short *pwTxDataRate,
- unsigned char *pbyACKRate,
- unsigned char *pbyCCKBasicRate,
- unsigned char *pbyOFDMBasicRate
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
- unsigned int uNodeIndex = 0;
- unsigned short wTxDataRate = RATE_1M;
- unsigned char byACKRate = RATE_1M;
- unsigned char byCCKBasicRate = RATE_1M;
- unsigned char byOFDMBasicRate = RATE_24M;
- PWLAN_IE_SUPP_RATES pSupportRateIEs = NULL;
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs = NULL;
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
- (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- /* Adhoc Tx rate decided from node DB */
- if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex)) {
- wTxDataRate = (pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate);
- pSupportRateIEs = (PWLAN_IE_SUPP_RATES) (pMgmt->sNodeDBTable[uNodeIndex].abyCurrSuppRates);
- pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) (pMgmt->sNodeDBTable[uNodeIndex].abyCurrExtSuppRates);
- } else {
- if (pMgmt->eCurrentPHYMode != PHY_TYPE_11A)
- wTxDataRate = RATE_2M;
- else
- wTxDataRate = RATE_24M;
-
- pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates;
- pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates;
- }
- } else { /* Infrastructure: rate decided from AP Node, index = 0 */
-
- wTxDataRate = (pMgmt->sNodeDBTable[0].wTxDataRate);
-
- pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates;
- pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates;
- }
- byACKRate = VNTWIFIbyGetACKTxRate((unsigned char) wTxDataRate,
- pSupportRateIEs,
- pExtSupportRateIEs
-);
- if (byACKRate > (unsigned char) wTxDataRate)
- byACKRate = (unsigned char) wTxDataRate;
-
- byCCKBasicRate = VNTWIFIbyGetACKTxRate(RATE_11M,
- pSupportRateIEs,
- pExtSupportRateIEs
-);
- byOFDMBasicRate = VNTWIFIbyGetACKTxRate(RATE_54M,
- pSupportRateIEs,
- pExtSupportRateIEs
-);
- *pwTxDataRate = wTxDataRate;
- *pbyACKRate = byACKRate;
- *pbyCCKBasicRate = byCCKBasicRate;
- *pbyOFDMBasicRate = byOFDMBasicRate;
-}
-
-unsigned char
-VNTWIFIbyGetKeyCypher(
- void *pMgmtHandle,
- bool bGroupKey
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
-
- if (bGroupKey)
- return pMgmt->byCSSGK;
- else
- return pMgmt->byCSSPK;
-}
-
-bool
-VNTWIFIbSetPMKIDCache(
- void *pMgmtObject,
- unsigned long ulCount,
- void *pPMKIDInfo
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
-
- if (ulCount > MAX_PMKID_CACHE)
- return false;
-
- pMgmt->gsPMKIDCache.BSSIDInfoCount = ulCount;
- memcpy(pMgmt->gsPMKIDCache.BSSIDInfo, pPMKIDInfo, (ulCount*sizeof(PMKIDInfo)));
- return true;
-}
-
-unsigned short
-VNTWIFIwGetMaxSupportRate(
- void *pMgmtObject
-)
-{
- unsigned short wRate = RATE_54M;
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
-
- for (wRate = RATE_54M; wRate > RATE_1M; wRate--) {
- if (pMgmt->sNodeDBTable[0].wSuppRate & (1<<wRate))
- return wRate;
- }
-
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)
- return RATE_6M;
- else
- return RATE_1M;
-}
-
-void
-VNTWIFIvSet11h(
- void *pMgmtObject,
- bool b11hEnable
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
-
- pMgmt->b11hEnable = b11hEnable;
-}
-
-bool
-VNTWIFIbMeasureReport(
- void *pMgmtObject,
- bool bEndOfReport,
- void *pvMeasureEID,
- unsigned char byReportMode,
- unsigned char byBasicMap,
- unsigned char byCCAFraction,
- unsigned char *pbyRPIs
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
- unsigned char *pbyCurrentEID = (unsigned char *)(pMgmt->pCurrMeasureEIDRep);
-
- if ((pvMeasureEID != NULL) &&
- (pMgmt->uLengthOfRepEIDs < (WLAN_A3FR_MAXLEN - sizeof(MEASEURE_REP) - sizeof(WLAN_80211HDR_A3) - 3))
-) {
- pMgmt->pCurrMeasureEIDRep->byElementID = WLAN_EID_MEASURE_REP;
- pMgmt->pCurrMeasureEIDRep->len = 3;
- pMgmt->pCurrMeasureEIDRep->byToken = ((PWLAN_IE_MEASURE_REQ)pvMeasureEID)->byToken;
- pMgmt->pCurrMeasureEIDRep->byMode = byReportMode;
- pMgmt->pCurrMeasureEIDRep->byType = ((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->byType;
- switch (pMgmt->pCurrMeasureEIDRep->byType) {
- case MEASURE_TYPE_BASIC:
- pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_BASIC);
- memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sBasic),
- &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq),
- sizeof(MEASEURE_REQ));
- pMgmt->pCurrMeasureEIDRep->sRep.sBasic.byMap = byBasicMap;
- break;
- case MEASURE_TYPE_CCA:
- pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_CCA);
- memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sCCA),
- &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq),
- sizeof(MEASEURE_REQ));
- pMgmt->pCurrMeasureEIDRep->sRep.sCCA.byCCABusyFraction = byCCAFraction;
- break;
- case MEASURE_TYPE_RPI:
- pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_RPI);
- memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sRPI),
- &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq),
- sizeof(MEASEURE_REQ));
- memcpy(pMgmt->pCurrMeasureEIDRep->sRep.sRPI.abyRPIdensity, pbyRPIs, 8);
- break;
- default:
- break;
- }
- pbyCurrentEID += (2 + pMgmt->pCurrMeasureEIDRep->len);
- pMgmt->uLengthOfRepEIDs += (2 + pMgmt->pCurrMeasureEIDRep->len);
- pMgmt->pCurrMeasureEIDRep = (PWLAN_IE_MEASURE_REP) pbyCurrentEID;
- }
- if (bEndOfReport)
- IEEE11hbMSRRepTx(pMgmt);
-
- return true;
-}
-
-bool
-VNTWIFIbChannelSwitch(
- void *pMgmtObject,
- unsigned char byNewChannel
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
-
- pMgmt->uCurrChannel = byNewChannel;
- pMgmt->bSwitchChannel = false;
- return true;
-}
diff --git a/drivers/staging/vt6655/vntwifi.h b/drivers/staging/vt6655/vntwifi.h
deleted file mode 100644
index 880b8ab109be..000000000000
--- a/drivers/staging/vt6655/vntwifi.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: vntwifi.h
- *
- * Purpose: export VNT Host WiFi library function
- *
- * Author: Yiching Chen
- *
- * Date: Jan 7, 2004
- *
- */
-
-#ifndef __VNTWIFI_H__
-#define __VNTWIFI_H__
-
-#include "ttype.h"
-#include "80211mgr.h"
-#include "card.h"
-#include "wpa2.h"
-
-/*--------------------- Export Definitions -------------------------*/
-#define RATE_1M 0
-#define RATE_2M 1
-#define RATE_5M 2
-#define RATE_11M 3
-#define RATE_6M 4
-#define RATE_9M 5
-#define RATE_12M 6
-#define RATE_18M 7
-#define RATE_24M 8
-#define RATE_36M 9
-#define RATE_48M 10
-#define RATE_54M 11
-#define RATE_AUTO 12
-#define MAX_RATE 12
-
-// key CipherSuite
-#define KEY_CTL_WEP 0x00
-#define KEY_CTL_NONE 0x01
-#define KEY_CTL_TKIP 0x02
-#define KEY_CTL_CCMP 0x03
-#define KEY_CTL_INVALID 0xFF
-
-#define CHANNEL_MAX_24G 14
-
-#define MAX_BSS_NUM 42
-
-// Pre-configured Authenticaiton Mode (from XP)
-typedef enum tagWMAC_AUTHENTICATION_MODE {
- WMAC_AUTH_OPEN,
- WMAC_AUTH_SHAREKEY,
- WMAC_AUTH_AUTO,
- WMAC_AUTH_WPA,
- WMAC_AUTH_WPAPSK,
- WMAC_AUTH_WPANONE,
- WMAC_AUTH_WPA2,
- WMAC_AUTH_WPA2PSK,
- WMAC_AUTH_MAX // Not a real mode, defined as upper bound
-} WMAC_AUTHENTICATION_MODE, *PWMAC_AUTHENTICATION_MODE;
-
-typedef enum tagWMAC_ENCRYPTION_MODE {
- WMAC_ENCRYPTION_WEPEnabled,
- WMAC_ENCRYPTION_WEPDisabled,
- WMAC_ENCRYPTION_WEPKeyAbsent,
- WMAC_ENCRYPTION_WEPNotSupported,
- WMAC_ENCRYPTION_TKIPEnabled,
- WMAC_ENCRYPTION_TKIPKeyAbsent,
- WMAC_ENCRYPTION_AESEnabled,
- WMAC_ENCRYPTION_AESKeyAbsent
-} WMAC_ENCRYPTION_MODE, *PWMAC_ENCRYPTION_MODE;
-
-// Pre-configured Mode (from XP)
-
-typedef enum tagWMAC_CONFIG_MODE {
- WMAC_CONFIG_ESS_STA = 0,
- WMAC_CONFIG_IBSS_STA,
- WMAC_CONFIG_AUTO,
- WMAC_CONFIG_AP
-} WMAC_CONFIG_MODE, *PWMAC_CONFIG_MODE;
-
-typedef enum tagWMAC_POWER_MODE {
- WMAC_POWER_CAM,
- WMAC_POWER_FAST,
- WMAC_POWER_MAX
-} WMAC_POWER_MODE, *PWMAC_POWER_MODE;
-
-#define VNTWIFIbIsShortSlotTime(wCapInfo) \
- WLAN_GET_CAP_INFO_SHORTSLOTTIME(wCapInfo) \
-
-#define VNTWIFIbIsProtectMode(byERP) \
- ((byERP & WLAN_EID_ERP_USE_PROTECTION) != 0) \
-
-#define VNTWIFIbIsBarkerMode(byERP) \
- ((byERP & WLAN_EID_ERP_BARKER_MODE) != 0) \
-
-#define VNTWIFIbIsShortPreamble(wCapInfo) \
- WLAN_GET_CAP_INFO_SHORTPREAMBLE(wCapInfo) \
-
-#define VNTWIFIbIsEncryption(wCapInfo) \
- WLAN_GET_CAP_INFO_PRIVACY(wCapInfo) \
-
-#define VNTWIFIbIsESS(wCapInfo) \
- WLAN_GET_CAP_INFO_ESS(wCapInfo) \
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void
-VNTWIFIvSetIBSSParameter(
- void *pMgmtHandle,
- unsigned short wBeaconPeriod,
- unsigned short wATIMWindow,
- unsigned int uChannel
-);
-
-void
-VNTWIFIvSetOPMode(
- void *pMgmtHandle,
- WMAC_CONFIG_MODE eOPMode
-);
-
-PWLAN_IE_SSID
-VNTWIFIpGetCurrentSSID(
- void *pMgmtHandle
-);
-
-unsigned int
-VNTWIFIpGetCurrentChannel(
- void *pMgmtHandle
-);
-
-unsigned short
-VNTWIFIwGetAssocID(
- void *pMgmtHandle
-);
-
-unsigned char
-VNTWIFIbyGetMaxSupportRate(
- PWLAN_IE_SUPP_RATES pSupportRateIEs,
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs
-);
-
-unsigned char
-VNTWIFIbyGetACKTxRate(
- unsigned char byRxDataRate,
- PWLAN_IE_SUPP_RATES pSupportRateIEs,
- PWLAN_IE_SUPP_RATES pExtSupportRateIEs
-);
-
-void
-VNTWIFIvSetAuthenticationMode(
- void *pMgmtHandle,
- WMAC_AUTHENTICATION_MODE eAuthMode
-);
-
-void
-VNTWIFIvSetEncryptionMode(
- void *pMgmtHandle,
- WMAC_ENCRYPTION_MODE eEncryptionMode
-);
-
-bool
-VNTWIFIbConfigPhyMode(
- void *pMgmtHandle,
- CARD_PHY_TYPE ePhyType
-);
-
-void
-VNTWIFIbGetConfigPhyMode(
- void *pMgmtHandle,
- void *pePhyType
-);
-
-void
-VNTWIFIvQueryBSSList(void *pMgmtHandle, unsigned int *puBSSCount,
- void **pvFirstBSS);
-
-void
-VNTWIFIvGetNextBSS(
- void *pMgmtHandle,
- void *pvCurrentBSS,
- void **pvNextBSS
-);
-
-void
-VNTWIFIvUpdateNodeTxCounter(
- void *pMgmtHandle,
- unsigned char *pbyDestAddress,
- bool bTxOk,
- unsigned short wRate,
- unsigned char *pbyTxFailCount
-);
-
-void
-VNTWIFIvGetTxRate(
- void *pMgmtHandle,
- unsigned char *pbyDestAddress,
- unsigned short *pwTxDataRate,
- unsigned char *pbyACKRate,
- unsigned char *pbyCCKBasicRate,
- unsigned char *pbyOFDMBasicRate
-);
-
-unsigned char
-VNTWIFIbyGetKeyCypher(
- void *pMgmtHandle,
- bool bGroupKey
-);
-
-bool
-VNTWIFIbSetPMKIDCache(
- void *pMgmtObject,
- unsigned long ulCount,
- void *pPMKIDInfo
-);
-
-bool
-VNTWIFIbCommandRunning(
- void *pMgmtObject
-);
-
-unsigned short
-VNTWIFIwGetMaxSupportRate(
- void *pMgmtObject
-);
-
-// for 802.11h
-void
-VNTWIFIvSet11h(
- void *pMgmtObject,
- bool b11hEnable
-);
-
-bool
-VNTWIFIbMeasureReport(
- void *pMgmtObject,
- bool bEndOfReport,
- void *pvMeasureEID,
- unsigned char byReportMode,
- unsigned char byBasicMap,
- unsigned char byCCAFraction,
- unsigned char *pbyRPIs
-);
-
-bool
-VNTWIFIbChannelSwitch(
- void *pMgmtObject,
- unsigned char byNewChannel
-);
-
-#endif //__VNTWIFI_H__
diff --git a/drivers/staging/vt6655/wcmd.c b/drivers/staging/vt6655/wcmd.c
deleted file mode 100644
index 985e1b99362d..000000000000
--- a/drivers/staging/vt6655/wcmd.c
+++ /dev/null
@@ -1,1023 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wcmd.c
- *
- * Purpose: Handles the management command interface functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2003
- *
- * Functions:
- * s_vProbeChannel - Active scan channel
- * s_MgrMakeProbeRequest - Make ProbeRequest packet
- * CommandTimer - Timer function to handle command
- * s_bCommandComplete - Command Complete function
- * bScheduleCommand - Push Command and wait Command Scheduler to do
- * vCommandTimer- Command call back functions
- * vCommandTimerWait- Call back timer
- * bClearBSSID_SCAN- Clear BSSID_SCAN cmd in CMD Queue
- *
- * Revision History:
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "device.h"
-#include "mac.h"
-#include "card.h"
-#include "80211hdr.h"
-#include "wcmd.h"
-#include "wmgr.h"
-#include "power.h"
-#include "wctl.h"
-#include "baseband.h"
-#include "rxtx.h"
-#include "rf.h"
-#include "iowpa.h"
-#include "channel.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-static
-void
-s_vProbeChannel(
- struct vnt_private *pDevice
-);
-
-static
-PSTxMgmtPacket
-s_MgrMakeProbeRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pScanBSSID,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-static
-bool
-s_bCommandComplete(
- struct vnt_private *pDevice
-);
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*
- * Description:
- * Stop AdHoc beacon during scan process
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-static
-void
-vAdHocBeaconStop(struct vnt_private *pDevice)
-{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- bool bStop;
-
- /*
- * temporarily stop Beacon packet for AdHoc Server
- * if all of the following conditions are met:
- * (1) STA is in AdHoc mode
- * (2) VT3253 is programmed as automatic Beacon Transmitting
- * (3) One of the following conditions is met
- * (3.1) AdHoc channel is in B/G band and the
- * current scan channel is in A band
- * or
- * (3.2) AdHoc channel is in A mode
- */
- bStop = false;
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->eCurrState >= WMAC_STATE_STARTED)) {
- if ((pMgmt->uIBSSChannel <= CB_MAX_CHANNEL_24G) &&
- (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) {
- bStop = true;
- }
- if (pMgmt->uIBSSChannel > CB_MAX_CHANNEL_24G)
- bStop = true;
-
- }
-
- if (bStop)
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
-} /* vAdHocBeaconStop */
-
-/*
- * Description:
- * Restart AdHoc beacon after scan process complete
- *
- * Parameters:
- * In:
- * pDevice - Pointer to the adapter
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-static
-void
-vAdHocBeaconRestart(struct vnt_private *pDevice)
-{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- /*
- * Restart Beacon packet for AdHoc Server
- * if all of the following coditions are met:
- * (1) STA is in AdHoc mode
- * (2) VT3253 is programmed as automatic Beacon Transmitting
- */
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->eCurrState >= WMAC_STATE_STARTED)) {
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Prepare and send probe request management frames.
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-static
-void
-s_vProbeChannel(
- struct vnt_private *pDevice
-)
-{
- //1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
- unsigned char abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
- unsigned char abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
- //6M, 9M, 12M, 48M
- unsigned char abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- unsigned char abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
- unsigned char *pbyRate;
- PSTxMgmtPacket pTxPacket;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned int ii;
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11A)
- pbyRate = &abyCurrSuppRatesA[0];
- else if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
- pbyRate = &abyCurrSuppRatesB[0];
- else
- pbyRate = &abyCurrSuppRatesG[0];
-
- // build an assocreq frame and send it
- pTxPacket = s_MgrMakeProbeRequest
- (
- pDevice,
- pMgmt,
- pMgmt->abyScanBSSID,
- (PWLAN_IE_SSID)pMgmt->abyScanSSID,
- (PWLAN_IE_SUPP_RATES)pbyRate,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRatesG
- );
-
- if (pTxPacket != NULL) {
- for (ii = 0; ii < 2; ii++) {
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Probe request sending fail..\n");
- else
- pr_debug("Probe request is sending..\n");
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an probe request frame
- *
- *
- * Return Value:
- * A ptr to Tx frame or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeProbeRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pScanBSSID,
- PWLAN_IE_SSID pSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_PROBEREQ sFrame;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBEREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_PROBEREQ_FR_MAXLEN;
- vMgrEncodeProbeRequest(&sFrame);
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBEREQ)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pScanBSSID, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pScanBSSID, WLAN_BSSID_LEN);
- // Copy the SSID, pSSID->len=0 indicate broadcast SSID
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += pSSID->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID, pSSID, pSSID->len + WLAN_IEHDR_LEN);
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
- // Copy the extension rate set
- if (pDevice->eCurrentPHYType == PHY_TYPE_11G) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
- }
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-void
-vCommandTimerWait(
- void *hDeviceContext,
- unsigned int MSecond
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
-
- init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (unsigned long) pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
- // RUN_AT :1 msec ~= (HZ/1024)
- pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
- add_timer(&pDevice->sTimerCommand);
-}
-
-void
-vCommandTimer(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PWLAN_IE_SSID pItemSSID;
- PWLAN_IE_SSID pItemSSIDCurr;
- CMD_STATUS Status;
- unsigned int ii;
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- struct sk_buff *skb;
-
- if (pDevice->dwDiagRefCount != 0)
- return;
- if (!pDevice->bCmdRunning)
- return;
-
- spin_lock_irq(&pDevice->lock);
-
- switch (pDevice->eCommandState) {
- case WLAN_CMD_SCAN_START:
-
- pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- s_bCommandComplete(pDevice);
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_AP);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-
- pr_debug("eCommandState= WLAN_CMD_SCAN_START\n");
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID;
- // wait all Data TD complete
- if (pDevice->iTDUsed[TYPE_AC0DMA] != 0) {
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, 10);
- return;
- }
-
- if (pMgmt->uScanChannel == 0) {
- pMgmt->uScanChannel = pDevice->byMinChannel;
- // Set Baseband to be more sensitive.
-
- }
- if (pMgmt->uScanChannel > pDevice->byMaxChannel) {
- pMgmt->eScanState = WMAC_NO_SCANNING;
-
- // Set Baseband's sensitivity back.
- // Set channel back
- set_channel(pMgmt->pAdapter, pMgmt->uCurrChannel);
- pr_debug("Scanning, set back to channel: [%d]\n",
- pMgmt->uCurrChannel);
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_ADHOC);
- else
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_STATION);
-
- vAdHocBeaconRestart(pDevice);
- s_bCommandComplete(pDevice);
-
- } else {
-//2008-8-4 <add> by chester
- if (!is_channel_valid(pMgmt->uScanChannel)) {
- pr_debug("Invalid channel pMgmt->uScanChannel = %d\n",
- pMgmt->uScanChannel);
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- if (pMgmt->uScanChannel == pDevice->byMinChannel) {
- pMgmt->abyScanBSSID[0] = 0xFF;
- pMgmt->abyScanBSSID[1] = 0xFF;
- pMgmt->abyScanBSSID[2] = 0xFF;
- pMgmt->abyScanBSSID[3] = 0xFF;
- pMgmt->abyScanBSSID[4] = 0xFF;
- pMgmt->abyScanBSSID[5] = 0xFF;
- pItemSSID->byElementID = WLAN_EID_SSID;
- pMgmt->eScanState = WMAC_IS_SCANNING;
-
- }
-
- vAdHocBeaconStop(pDevice);
-
- if (set_channel(pMgmt->pAdapter, pMgmt->uScanChannel))
- pr_debug("SCAN Channel: %d\n",
- pMgmt->uScanChannel);
- else
- pr_debug("SET SCAN Channel Fail: %d\n",
- pMgmt->uScanChannel);
-
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_UNSPECIFIED);
- pMgmt->uScanChannel++;
-//2008-8-4 <modify> by chester
- if (!is_channel_valid(pMgmt->uScanChannel) &&
- pMgmt->uScanChannel <= pDevice->byMaxChannel) {
- pMgmt->uScanChannel = pDevice->byMaxChannel + 1;
- pMgmt->eCommandState = WLAN_CMD_SCAN_END;
-
- }
-
- if (!pMgmt->b11hEnable ||
- (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
- s_vProbeChannel(pDevice);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, WCMD_ACTIVE_SCAN_TIME);
- return;
- } else {
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, WCMD_PASSIVE_SCAN_TIME);
- return;
- }
-
- }
-
- break;
-
- case WLAN_CMD_SCAN_END:
-
- // Set Baseband's sensitivity back.
- // Set channel back
- set_channel(pMgmt->pAdapter, pMgmt->uCurrChannel);
- pr_debug("Scanning, set back to channel: [%d]\n",
- pMgmt->uCurrChannel);
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_ADHOC);
- else
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_STATION);
-
- pMgmt->eScanState = WMAC_NO_SCANNING;
- vAdHocBeaconRestart(pDevice);
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- if (pMgmt->eScanType == WMAC_SCAN_PASSIVE) {
- //send scan event to wpa_Supplicant
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
- }
-#endif
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_DISASSOCIATE_START:
- pDevice->byReAssocCount = 0;
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState != WMAC_STATE_ASSOC)) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- } else {
- pr_debug("Send Disassociation Packet..\n");
- // reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status);
- pDevice->bLinkPass = false;
- // unlock command busy
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pItemSSID->len = 0;
- memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->sNodeDBTable[0].bActive = false;
- }
- netif_stop_queue(pDevice->dev);
- pDevice->eCommandState = WLAN_DISASSOCIATE_WAIT;
- // wait all Control TD complete
- if (pDevice->iTDUsed[TYPE_TXDMA0] != 0) {
- vCommandTimerWait((void *)pDevice, 10);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- pr_debug(" CARDbRadioPowerOff\n");
- //2008-09-02 <mark> by chester
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_DISASSOCIATE_WAIT:
- // wait all Control TD complete
- if (pDevice->iTDUsed[TYPE_TXDMA0] != 0) {
- vCommandTimerWait((void *)pDevice, 10);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
-//2008-09-02 <mark> by chester
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_SSID_START:
- pDevice->byReAssocCount = 0;
- if (pDevice->bRadioOff) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- pr_debug("chester-abyDesireSSID=%s\n", ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pr_debug(" cmd: desire ssid = %s\n", pItemSSID->abySSID);
- pr_debug(" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID);
-
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- pr_debug(" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n");
- pr_debug(" pItemSSID->len =%d\n", pItemSSID->len);
- pr_debug(" pItemSSIDCurr->len = %d\n",
- pItemSSIDCurr->len);
- pr_debug(" desire ssid = %s\n", pItemSSID->abySSID);
- pr_debug(" curr ssid = %s\n", pItemSSIDCurr->abySSID);
- }
-
- if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
- if (pItemSSID->len == pItemSSIDCurr->len) {
- if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) {
- s_bCommandComplete(pDevice);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- }
-
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- }
- // set initial state
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- PSvDisablePowerSaving((void *)pDevice);
- BSSvClearNodeDBTable(pDevice, 0);
-
- vMgrJoinBSSBegin((void *)pDevice, &Status);
- // if Infra mode
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
- // Call mgr to begin the deauthentication
- // reason = (3) because sta has left ESS
- if (pMgmt->eCurrState >= WMAC_STATE_AUTH)
- vMgrDeAuthenBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status);
-
- // Call mgr to begin the authentication
- vMgrAuthenBeginSta((void *)pDevice, pMgmt, &Status);
- if (Status == CMD_STATUS_SUCCESS) {
- pDevice->byLinkWaitCount = 0;
- pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
- vCommandTimerWait((void *)pDevice, AUTHENTICATE_TIMEOUT);
- spin_unlock_irq(&pDevice->lock);
- pr_debug(" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
- return;
- }
- }
- // if Adhoc mode
- else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- pDevice->bLinkPass = true;
-
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- bClearBSSID_SCAN(pDevice);
- } else {
- // start own IBSS
- vMgrCreateOwnIBSS((void *)pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS)
- pr_debug(" WLAN_CMD_IBSS_CREATE fail !\n");
-
- BSSvAddMulticastNode(pDevice);
- }
- }
- // if SSID not found
- else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) {
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
- pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
- // start own IBSS
- vMgrCreateOwnIBSS((void *)pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS)
- pr_debug(" WLAN_CMD_IBSS_CREATE fail !\n");
-
- BSSvAddMulticastNode(pDevice);
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- pDevice->bLinkPass = true;
- } else {
- pr_debug("Disconnect SSID none\n");
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- pr_debug("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
-
- }
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_AUTHENTICATE_WAIT:
- pr_debug("eCommandState == WLAN_AUTHENTICATE_WAIT\n");
- if (pMgmt->eCurrState == WMAC_STATE_AUTH) {
- // Call mgr to begin the association
- pDevice->byLinkWaitCount = 0;
- pr_debug("eCurrState == WMAC_STATE_AUTH\n");
- vMgrAssocBeginSta((void *)pDevice, pMgmt, &Status);
- if (Status == CMD_STATUS_SUCCESS) {
- pDevice->byLinkWaitCount = 0;
- pr_debug("eCommandState = WLAN_ASSOCIATE_WAIT\n");
- pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
- vCommandTimerWait((void *)pDevice, ASSOCIATE_TIMEOUT);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- }
-
- else if (pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) {
- pr_debug("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n");
- } else if (pDevice->byLinkWaitCount <= 4) { //mike add:wait another 2 sec if authenticated_frame delay!
- pDevice->byLinkWaitCount++;
- pr_debug("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, AUTHENTICATE_TIMEOUT/2);
- return;
- }
- pDevice->byLinkWaitCount = 0;
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_ASSOCIATE_WAIT:
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- pr_debug("eCurrState == WMAC_STATE_ASSOC\n");
- if (pDevice->ePSMode != WMAC_POWER_CAM)
- PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
-
- if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA)
- KeybRemoveAllKey(&(pDevice->sKey), pDevice->abyBSSID, pDevice->PortOffset);
-
- pDevice->bLinkPass = true;
- pDevice->byLinkWaitCount = 0;
- pDevice->byReAssocCount = 0;
- bClearBSSID_SCAN(pDevice);
- if (pDevice->byFOETuning) {
- BBvSetFOE(pDevice->PortOffset);
- PSbSendNullPacket(pDevice);
- }
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- if (pDevice->IsTxDataTrigger) { //TxDataTimer is not triggered at the first time
- del_timer(&pDevice->sTimerTxData);
- init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (unsigned long) pDevice;
- pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = false;
- pDevice->nTxDataTimeCout = 0;
- }
-
- pDevice->IsTxDataTrigger = true;
- add_timer(&pDevice->sTimerTxData);
-
- } else if (pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) {
- printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n");
- } else if (pDevice->byLinkWaitCount <= 4) { //mike add:wait another 2 sec if associated_frame delay!
- pDevice->byLinkWaitCount++;
- pr_debug("WLAN_ASSOCIATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount);
- spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((void *)pDevice, ASSOCIATE_TIMEOUT/2);
- return;
- }
- pDevice->byLinkWaitCount = 0;
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_AP_MODE_START:
- pr_debug("eCommandState == WLAN_CMD_AP_MODE_START\n");
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- del_timer(&pMgmt->sTimerSecondCallback);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pDevice->bLinkPass = false;
- if (pDevice->bEnableHostWEP)
- BSSvClearNodeDBTable(pDevice, 1);
- else
- BSSvClearNodeDBTable(pDevice, 0);
- pDevice->uAssocCount = 0;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bFixRate = false;
-
- vMgrCreateOwnIBSS((void *)pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS)
- pr_debug(" vMgrCreateOwnIBSS fail !\n");
-
- // alway turn off unicast bit
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_RCR, RCR_UNICAST);
- pDevice->byRxMode &= ~RCR_UNICAST;
- pr_debug("wcmd: rx_mode = %x\n", pDevice->byRxMode);
- BSSvAddMulticastNode(pDevice);
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- pDevice->bLinkPass = true;
- add_timer(&pMgmt->sTimerSecondCallback);
- }
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_TX_PSPACKET_START:
- // DTIM Multicast tx
- if (pMgmt->sNodeDBTable[0].bRxPSPoll) {
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) {
- if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) {
- pMgmt->abyPSTxMap[0] &= ~byMask[0];
- pDevice->bMoreData = false;
- } else {
- pDevice->bMoreData = true;
- }
- if (!device_dma0_xmit(pDevice, skb, 0))
- pr_debug("Multicast ps tx fail\n");
-
- pMgmt->sNodeDBTable[0].wEnQueueCnt--;
- }
- }
-
- // PS nodes tx
- for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
- if (pMgmt->sNodeDBTable[ii].bActive &&
- pMgmt->sNodeDBTable[ii].bRxPSPoll) {
- pr_debug("Index=%d Enqueu Cnt= %d\n",
- ii,
- pMgmt->sNodeDBTable[ii].wEnQueueCnt);
- while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) {
- if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
- // clear tx map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
- ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- pDevice->bMoreData = false;
- } else {
- pDevice->bMoreData = true;
- }
- if (!device_dma0_xmit(pDevice, skb, ii))
- pr_debug("sta ps tx fail\n");
-
- pMgmt->sNodeDBTable[ii].wEnQueueCnt--;
- // check if sta ps enabled, and wait next pspoll.
- // if sta ps disable, then send all pending buffers.
- if (pMgmt->sNodeDBTable[ii].bPSEnable)
- break;
- }
- if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) {
- // clear tx map
- pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &=
- ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7];
- pr_debug("Index=%d PS queue clear\n",
- ii);
- }
- pMgmt->sNodeDBTable[ii].bRxPSPoll = false;
- }
- }
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_RADIO_START:
- pr_debug("eCommandState == WLAN_CMD_RADIO_START\n");
- if (pDevice->bRadioCmd)
- CARDbRadioPowerOn(pDevice);
- else
- CARDbRadioPowerOff(pDevice);
-
- s_bCommandComplete(pDevice);
- break;
-
- case WLAN_CMD_CHECK_BBSENSITIVITY_CHANGE:
- // wait all TD complete
- if (pDevice->iTDUsed[TYPE_AC0DMA] != 0) {
- vCommandTimerWait((void *)pDevice, 10);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- if (pDevice->iTDUsed[TYPE_TXDMA0] != 0) {
- vCommandTimerWait((void *)pDevice, 10);
- spin_unlock_irq(&pDevice->lock);
- return;
- }
- pDevice->byBBVGACurrent = pDevice->byBBVGANew;
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
- pr_debug("SetVGAGainOffset %02X\n", pDevice->byBBVGACurrent);
- s_bCommandComplete(pDevice);
- break;
-
- default:
- s_bCommandComplete(pDevice);
- break;
-
- } //switch
- spin_unlock_irq(&pDevice->lock);
-}
-
-static
-bool
-s_bCommandComplete(
- struct vnt_private *pDevice
-)
-{
- PWLAN_IE_SSID pSSID;
- bool bRadioCmd = false;
- bool bForceSCAN = true;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- pDevice->eCommandState = WLAN_CMD_IDLE;
- if (pDevice->cbFreeCmdQueue == CMD_Q_SIZE) {
- //Command Queue Empty
- pDevice->bCmdRunning = false;
- return true;
- } else {
- pDevice->eCommand = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].eCmd;
- pSSID = (PWLAN_IE_SSID)pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].abyCmdDesireSSID;
- bRadioCmd = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bRadioCmd;
- bForceSCAN = pDevice->eCmdQueue[pDevice->uCmdDequeueIdx].bForceSCAN;
- ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdDequeueIdx, CMD_Q_SIZE);
- pDevice->cbFreeCmdQueue++;
- pDevice->bCmdRunning = true;
- switch (pDevice->eCommand) {
- case WLAN_CMD_BSSID_SCAN:
- pr_debug("eCommandState= WLAN_CMD_BSSID_SCAN\n");
- pDevice->eCommandState = WLAN_CMD_SCAN_START;
- pMgmt->uScanChannel = 0;
- if (pSSID->len != 0)
- memcpy(pMgmt->abyScanSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- else
- memset(pMgmt->abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-
- break;
- case WLAN_CMD_SSID:
- pDevice->eCommandState = WLAN_CMD_SSID_START;
- if (pSSID->len > WLAN_SSID_MAXLEN)
- pSSID->len = WLAN_SSID_MAXLEN;
- if (pSSID->len != 0)
- memcpy(pDevice->pMgmt->abyDesireSSID, pSSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pr_debug("eCommandState= WLAN_CMD_SSID_START\n");
- break;
- case WLAN_CMD_DISASSOCIATE:
- pDevice->eCommandState = WLAN_CMD_DISASSOCIATE_START;
- break;
- case WLAN_CMD_RX_PSPOLL:
- pDevice->eCommandState = WLAN_CMD_TX_PSPACKET_START;
- break;
- case WLAN_CMD_RUN_AP:
- pDevice->eCommandState = WLAN_CMD_AP_MODE_START;
- break;
- case WLAN_CMD_RADIO:
- pDevice->eCommandState = WLAN_CMD_RADIO_START;
- pDevice->bRadioCmd = bRadioCmd;
- break;
- case WLAN_CMD_CHANGE_BBSENSITIVITY:
- pDevice->eCommandState = WLAN_CMD_CHECK_BBSENSITIVITY_CHANGE;
- break;
-
- default:
- break;
-
- }
-
- vCommandTimerWait((void *)pDevice, 0);
- }
-
- return true;
-}
-
-bool bScheduleCommand(
- void *hDeviceContext,
- CMD_CODE eCommand,
- unsigned char *pbyItem0
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
-
- if (pDevice->cbFreeCmdQueue == 0)
- return false;
-
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].eCmd = eCommand;
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = true;
- memset(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID, 0 , WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-
- if (pbyItem0 != NULL) {
- switch (eCommand) {
- case WLAN_CMD_BSSID_SCAN:
- memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
- pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bForceSCAN = false;
- break;
-
- case WLAN_CMD_SSID:
- memcpy(pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].abyCmdDesireSSID,
- pbyItem0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- break;
-
- case WLAN_CMD_DISASSOCIATE:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bNeedRadioOFF = *((int *)pbyItem0);
- break;
-
- case WLAN_CMD_RX_PSPOLL:
- break;
-
- case WLAN_CMD_RADIO:
- pDevice->eCmdQueue[pDevice->uCmdEnqueueIdx].bRadioCmd = *((int *)pbyItem0);
- break;
-
- case WLAN_CMD_CHANGE_BBSENSITIVITY:
- pDevice->eCommandState = WLAN_CMD_CHECK_BBSENSITIVITY_CHANGE;
- break;
-
- default:
- break;
- }
- }
-
- ADD_ONE_WITH_WRAP_AROUND(pDevice->uCmdEnqueueIdx, CMD_Q_SIZE);
- pDevice->cbFreeCmdQueue--;
-
- if (!pDevice->bCmdRunning)
- s_bCommandComplete(pDevice);
-
- return true;
-}
-
-/*
- * Description:
- * Clear BSSID_SCAN cmd in CMD Queue
- *
- * Parameters:
- * In:
- * hDeviceContext - Pointer to the adapter
- * eCommand - Command
- * Out:
- * none
- *
- * Return Value: true if success; otherwise false
- *
- */
-bool bClearBSSID_SCAN(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- unsigned int uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
- unsigned int ii;
-
- if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
- for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii++) {
- if (pDevice->eCmdQueue[uCmdDequeueIdx].eCmd == WLAN_CMD_BSSID_SCAN)
- pDevice->eCmdQueue[uCmdDequeueIdx].eCmd = WLAN_CMD_IDLE;
- ADD_ONE_WITH_WRAP_AROUND(uCmdDequeueIdx, CMD_Q_SIZE);
- if (uCmdDequeueIdx == pDevice->uCmdEnqueueIdx)
- break;
- }
- }
- return true;
-}
-
-//mike add:reset command timer
-void
-vResetCommandTimer(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
-
- //delete timer
- del_timer(&pDevice->sTimerCommand);
- //init timer
- init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (unsigned long) pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
- pDevice->sTimerCommand.expires = RUN_AT(HZ);
- pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
- pDevice->uCmdDequeueIdx = 0;
- pDevice->uCmdEnqueueIdx = 0;
- pDevice->eCommandState = WLAN_CMD_IDLE;
- pDevice->bCmdRunning = false;
- pDevice->bCmdClear = false;
-}
-
-void
-BSSvSecondTxData(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- pDevice->nTxDataTimeCout++;
-
- if (pDevice->nTxDataTimeCout < 4) //don't tx data if timer less than 40s
- {
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- add_timer(&pDevice->sTimerTxData);
- return;
- }
-
- spin_lock_irq(&pDevice->lock);
-
- /* open && sharekey linking */
- if ((pDevice->bLinkPass && (pMgmt->eAuthenMode < WMAC_AUTH_WPA)) ||
- pDevice->fWPA_Authened) { /* wpa linking */
- pDevice->fTxDataInSleep = true;
- PSbSendNullPacket(pDevice); /* send null packet */
- pDevice->fTxDataInSleep = false;
- }
-
- spin_unlock_irq(&pDevice->lock);
-
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); /* 10s callback */
- add_timer(&pDevice->sTimerTxData);
-}
diff --git a/drivers/staging/vt6655/wcmd.h b/drivers/staging/vt6655/wcmd.h
deleted file mode 100644
index 6ef04de69f37..000000000000
--- a/drivers/staging/vt6655/wcmd.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wcmd.h
- *
- * Purpose: Handles the management command interface functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- */
-
-#ifndef __WCMD_H__
-#define __WCMD_H__
-
-#include "ttype.h"
-#include "80211hdr.h"
-#include "80211mgr.h"
-
-#define AUTHENTICATE_TIMEOUT 1000
-#define ASSOCIATE_TIMEOUT 1000
-
-typedef enum tagCMD_CODE {
- WLAN_CMD_BSSID_SCAN,
- WLAN_CMD_SSID,
- WLAN_CMD_DISASSOCIATE,
- WLAN_CMD_DEAUTH,
- WLAN_CMD_RX_PSPOLL,
- WLAN_CMD_RADIO,
- WLAN_CMD_CHANGE_BBSENSITIVITY,
- WLAN_CMD_SETPOWER,
- WLAN_CMD_TBTT_WAKEUP,
- WLAN_CMD_BECON_SEND,
- WLAN_CMD_CHANGE_ANTENNA,
- WLAN_CMD_REMOVE_ALLKEY,
- WLAN_CMD_MAC_DISPOWERSAVING,
- WLAN_CMD_11H_CHSW,
- WLAN_CMD_RUN_AP
-} CMD_CODE, *PCMD_CODE;
-
-#define CMD_Q_SIZE 32
-
-typedef enum tagCMD_STATUS {
- CMD_STATUS_SUCCESS = 0,
- CMD_STATUS_FAILURE,
- CMD_STATUS_RESOURCES,
- CMD_STATUS_TIMEOUT,
- CMD_STATUS_PENDING
-} CMD_STATUS, *PCMD_STATUS;
-
-typedef struct tagCMD_ITEM {
- CMD_CODE eCmd;
- unsigned char abyCmdDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- bool bNeedRadioOFF;
- unsigned short wDeAuthenReason;
- bool bRadioCmd;
- bool bForceSCAN;
-} CMD_ITEM, *PCMD_ITEM;
-
-typedef enum tagCMD_STATE {
- WLAN_CMD_SCAN_START,
- WLAN_CMD_SCAN_END,
- WLAN_CMD_DISASSOCIATE_START,
- WLAN_CMD_SSID_START,
- WLAN_AUTHENTICATE_WAIT,
- WLAN_ASSOCIATE_WAIT,
- WLAN_DISASSOCIATE_WAIT,
- WLAN_CMD_TX_PSPACKET_START,
- WLAN_CMD_AP_MODE_START,
- WLAN_CMD_RADIO_START,
- WLAN_CMD_CHECK_BBSENSITIVITY_CHANGE,
- WLAN_CMD_IDLE
-} CMD_STATE, *PCMD_STATE;
-
-void
-vResetCommandTimer(
- void *hDeviceContext
-);
-
-void
-vCommandTimer(
- void *hDeviceContext
-);
-
-bool bClearBSSID_SCAN(
- void *hDeviceContext
-);
-
-bool
-bScheduleCommand(
- void *hDeviceContext,
- CMD_CODE eCommand,
- unsigned char *pbyItem0
-);
-
-void
-vCommandTimerWait(
- void *hDeviceContext,
- unsigned int MSecond
-);
-
-void
-BSSvSecondTxData(
- void *hDeviceContext
-);
-
-#endif //__WCMD_H__
diff --git a/drivers/staging/vt6655/wctl.c b/drivers/staging/vt6655/wctl.c
deleted file mode 100644
index 5a54d98d985a..000000000000
--- a/drivers/staging/vt6655/wctl.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wctl.c
- *
- * Purpose: handle WMAC duplicate filter & defragment
- *
- * Author: Jerry Chen
- *
- * Date: Jun. 27, 2002
- *
- * Functions:
- * WCTLbIsDuplicate - Test if duplicate packet
- * WCTLuSearchDFCB - Search DeFragment Control Database
- * WCTLuInsertDFCB - Insert DeFragment Control Database
- * WCTLbHandleFragment - Handle received fragment packet
- *
- * Revision History:
- *
- */
-
-#include "wctl.h"
-#include "device.h"
-#include "card.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*
- * Description:
- * Scan Rx cache. Return true if packet is duplicate, else
- * inserts in receive cache and returns false.
- *
- * Parameters:
- * In:
- * pCache - Receive packets history
- * pMACHeader - 802.11 MAC Header of received packet
- * Out:
- * none
- *
- * Return Value: true if packet duplicate; otherwise false
- *
- */
-
-bool WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader)
-{
- unsigned int uIndex;
- unsigned int ii;
- PSCacheEntry pCacheEntry;
-
- if (IS_FC_RETRY(pMACHeader)) {
- uIndex = pCache->uInPtr;
- for (ii = 0; ii < DUPLICATE_RX_CACHE_LENGTH; ii++) {
- pCacheEntry = &(pCache->asCacheEntry[uIndex]);
- if ((pCacheEntry->wFmSequence == pMACHeader->wSeqCtl) &&
- ether_addr_equal(pCacheEntry->abyAddr2,
- pMACHeader->abyAddr2)) {
- /* Duplicate match */
- return true;
- }
- ADD_ONE_WITH_WRAP_AROUND(uIndex, DUPLICATE_RX_CACHE_LENGTH);
- }
- }
- /* Not fount in cache - insert */
- pCacheEntry = &pCache->asCacheEntry[pCache->uInPtr];
- pCacheEntry->wFmSequence = pMACHeader->wSeqCtl;
- memcpy(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
- ADD_ONE_WITH_WRAP_AROUND(pCache->uInPtr, DUPLICATE_RX_CACHE_LENGTH);
- return false;
-}
-
-/*
- * Description:
- * Found if sequence number of received fragment packet in Defragment Database
- *
- * Parameters:
- * In:
- * pDevice - Pointer to adapter
- * pMACHeader - 802.11 MAC Header of received packet
- * Out:
- * none
- *
- * Return Value: index number in Defragment Database
- *
- */
-unsigned int WCTLuSearchDFCB(struct vnt_private *pDevice,
- PS802_11Header pMACHeader)
-{
- unsigned int ii;
-
- for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if (pDevice->sRxDFCB[ii].bInUse &&
- ether_addr_equal(pDevice->sRxDFCB[ii].abyAddr2,
- pMACHeader->abyAddr2)) {
- return ii;
- }
- }
- return pDevice->cbDFCB;
-}
-
-/*
- * Description:
- * Insert received fragment packet in Defragment Database
- *
- * Parameters:
- * In:
- * pDevice - Pointer to adapter
- * pMACHeader - 802.11 MAC Header of received packet
- * Out:
- * none
- *
- * Return Value: index number in Defragment Database
- *
- */
-unsigned int WCTLuInsertDFCB(struct vnt_private *pDevice, PS802_11Header pMACHeader)
-{
- unsigned int ii;
-
- if (pDevice->cbFreeDFCB == 0)
- return pDevice->cbDFCB;
- for (ii = 0; ii < pDevice->cbDFCB; ii++) {
- if (!pDevice->sRxDFCB[ii].bInUse) {
- pDevice->cbFreeDFCB--;
- pDevice->sRxDFCB[ii].uLifetime = pDevice->dwMaxReceiveLifetime;
- pDevice->sRxDFCB[ii].bInUse = true;
- pDevice->sRxDFCB[ii].wSequence = (pMACHeader->wSeqCtl >> 4);
- pDevice->sRxDFCB[ii].wFragNum = (pMACHeader->wSeqCtl & 0x000F);
- memcpy(&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
- return ii;
- }
- }
- return pDevice->cbDFCB;
-}
-
-/*
- * Description:
- * Handle received fragment packet
- *
- * Parameters:
- * In:
- * pDevice - Pointer to adapter
- * pMACHeader - 802.11 MAC Header of received packet
- * cbFrameLength - Frame length
- * bWEP - is WEP packet
- * Out:
- * none
- *
- * Return Value: true if it is valid fragment packet and we have resource to defragment; otherwise false
- *
- */
-bool WCTLbHandleFragment(struct vnt_private *pDevice, PS802_11Header pMACHeader,
- unsigned int cbFrameLength, bool bWEP, bool bExtIV)
-{
- unsigned int uHeaderSize;
-
- if (bWEP) {
- uHeaderSize = 28;
- if (bExtIV)
- // ExtIV
- uHeaderSize += 4;
- } else {
- uHeaderSize = 24;
- }
-
- if (IS_FIRST_FRAGMENT_PKT(pMACHeader)) {
- pDevice->uCurrentDFCBIdx = WCTLuSearchDFCB(pDevice, pMACHeader);
- if (pDevice->uCurrentDFCBIdx < pDevice->cbDFCB) {
- // duplicate, we must flush previous DCB
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].uLifetime = pDevice->dwMaxReceiveLifetime;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wSequence = (pMACHeader->wSeqCtl >> 4);
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum = (pMACHeader->wSeqCtl & 0x000F);
- } else {
- pDevice->uCurrentDFCBIdx = WCTLuInsertDFCB(pDevice, pMACHeader);
- if (pDevice->uCurrentDFCBIdx == pDevice->cbDFCB)
- return false;
- }
- // reserve 4 byte to match MAC RX Buffer
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer = (unsigned char *)(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].skb->data + 4);
- memcpy(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer, pMACHeader, cbFrameLength);
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength = cbFrameLength;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer += cbFrameLength;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum++;
- return false;
- } else {
- pDevice->uCurrentDFCBIdx = WCTLuSearchDFCB(pDevice, pMACHeader);
- if (pDevice->uCurrentDFCBIdx != pDevice->cbDFCB) {
- if ((pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wSequence == (pMACHeader->wSeqCtl >> 4)) &&
- (pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum == (pMACHeader->wSeqCtl & 0x000F)) &&
- ((pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength + cbFrameLength - uHeaderSize) < 2346)) {
- memcpy(pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer, ((unsigned char *)(pMACHeader) + uHeaderSize), (cbFrameLength - uHeaderSize));
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].cbFrameLength += (cbFrameLength - uHeaderSize);
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].pbyRxBuffer += (cbFrameLength - uHeaderSize);
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].wFragNum++;
- } else {
- // seq error or frag # error flush DFCB
- pDevice->cbFreeDFCB++;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = false;
- return false;
- }
- } else {
- return false;
- }
- if (IS_LAST_FRAGMENT_PKT(pMACHeader)) {
- //enq defragcontrolblock
- pDevice->cbFreeDFCB++;
- pDevice->sRxDFCB[pDevice->uCurrentDFCBIdx].bInUse = false;
- return true;
- }
- return false;
- }
-}
diff --git a/drivers/staging/vt6655/wctl.h b/drivers/staging/vt6655/wctl.h
deleted file mode 100644
index f0995d86f71f..000000000000
--- a/drivers/staging/vt6655/wctl.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wctl.h
- *
- * Purpose:
- *
- * Author: Jerry Chen
- *
- * Date: Jun. 27, 2002
- *
- */
-
-#ifndef __WCTL_H__
-#define __WCTL_H__
-
-#include "ttype.h"
-#include "tether.h"
-#include "device.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-#define IS_TYPE_DATA(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & TYPE_802_11_MASK) == TYPE_802_11_DATA)
-
-#define IS_TYPE_MGMT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & TYPE_802_11_MASK) == TYPE_802_11_MGMT)
-
-#define IS_TYPE_CONTROL(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & TYPE_802_11_MASK) == TYPE_802_11_CTL)
-
-#define IS_FC_MOREDATA(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_MOREDATA) == FC_MOREDATA)
-
-#define IS_FC_POWERMGT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_POWERMGT) == FC_POWERMGT)
-
-#define IS_FC_RETRY(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_RETRY) == FC_RETRY)
-
-#define IS_FC_WEP(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_WEP) == FC_WEP)
-
-#ifdef __BIG_ENDIAN
-
-#define IS_FRAGMENT_PKT(pMACHeader) \
- (((((PS802_11Header) pMACHeader)->wFrameCtl & FC_MOREFRAG) != 0) | \
- ((((PS802_11Header) pMACHeader)->wSeqCtl & 0x0F00) != 0))
-
-#define IS_FIRST_FRAGMENT_PKT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wSeqCtl & 0x0F00) == 0)
-
-#else
-
-#define IS_FRAGMENT_PKT(pMACHeader) \
- (((((PS802_11Header) pMACHeader)->wFrameCtl & FC_MOREFRAG) != 0) | \
- ((((PS802_11Header) pMACHeader)->wSeqCtl & 0x000F) != 0))
-
-#define IS_FIRST_FRAGMENT_PKT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wSeqCtl & 0x000F) == 0)
-
-#endif//#ifdef __BIG_ENDIAN
-
-#define IS_LAST_FRAGMENT_PKT(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & FC_MOREFRAG) == 0)
-
-#define IS_CTL_PSPOLL(pMACHeader) \
- ((((PS802_11Header) pMACHeader)->wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL)
-
-#define ADD_ONE_WITH_WRAP_AROUND(uVar, uModulo) \
-do { \
- if ((uVar) >= ((uModulo) - 1)) \
- (uVar) = 0; \
- else \
- (uVar)++; \
-} while (0)
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-bool WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader);
-bool WCTLbHandleFragment(struct vnt_private *, PS802_11Header pMACHeader,
- unsigned int cbFrameLength, bool bWEP, bool bExtIV);
-unsigned int WCTLuSearchDFCB(struct vnt_private *, PS802_11Header pMACHeader);
-unsigned int WCTLuInsertDFCB(struct vnt_private *, PS802_11Header pMACHeader);
-
-#endif // __WCTL_H__
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
deleted file mode 100644
index c73c39d7adfd..000000000000
--- a/drivers/staging/vt6655/wmgr.c
+++ /dev/null
@@ -1,4602 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wmgr.c
- *
- * Purpose: Handles the 802.11 management functions
- *
- * Author: Lyndon Chen
- *
- * Date: May 8, 2002
- *
- * Functions:
- * nsMgrObjectInitial - Initialize Management Object data structure
- * vMgrObjectReset - Reset Management Object data structure
- * vMgrAssocBeginSta - Start associate function
- * vMgrReAssocBeginSta - Start reassociate function
- * vMgrDisassocBeginSta - Start disassociate function
- * s_vMgrRxAssocRequest - Handle Rcv associate_request
- * s_vMgrRxAssocResponse - Handle Rcv associate_response
- * vMrgAuthenBeginSta - Start authentication function
- * vMgrDeAuthenDeginSta - Start deauthentication function
- * s_vMgrRxAuthentication - Handle Rcv authentication
- * s_vMgrRxAuthenSequence_1 - Handle Rcv authentication sequence 1
- * s_vMgrRxAuthenSequence_2 - Handle Rcv authentication sequence 2
- * s_vMgrRxAuthenSequence_3 - Handle Rcv authentication sequence 3
- * s_vMgrRxAuthenSequence_4 - Handle Rcv authentication sequence 4
- * s_vMgrRxDisassociation - Handle Rcv disassociation
- * s_vMgrRxBeacon - Handle Rcv Beacon
- * vMgrCreateOwnIBSS - Create ad_hoc IBSS or AP BSS
- * vMgrJoinBSSBegin - Join BSS function
- * s_vMgrSynchBSS - Synch & adopt BSS parameters
- * s_MgrMakeBeacon - Create Baecon frame
- * s_MgrMakeProbeResponse - Create Probe Response frame
- * s_MgrMakeAssocRequest - Create Associate Request frame
- * s_MgrMakeReAssocRequest - Create ReAssociate Request frame
- * s_vMgrRxProbeResponse - Handle Rcv probe_response
- * s_vMrgRxProbeRequest - Handle Rcv probe_request
- * bMgrPrepareBeaconToSend - Prepare Beacon frame
- * s_vMgrLogStatus - Log 802.11 Status
- * vMgrRxManagePacket - Rcv management frame dispatch function
- * s_vMgrFormatTIM- Assembler TIM field of beacon
- * vMgrTimerInit- Initial 1-sec and command call back funtions
- *
- * Revision History:
- *
- */
-
-#include "tmacro.h"
-#include "desc.h"
-#include "device.h"
-#include "card.h"
-#include "channel.h"
-#include "80211hdr.h"
-#include "80211mgr.h"
-#include "wmgr.h"
-#include "wcmd.h"
-#include "mac.h"
-#include "bssdb.h"
-#include "power.h"
-#include "datarate.h"
-#include "baseband.h"
-#include "rxtx.h"
-#include "wpa.h"
-#include "rf.h"
-#include "iowpa.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-//2008-8-4 <add> by chester
-static bool ChannelExceedZoneType(
- struct vnt_private *pDevice,
- unsigned char byCurrChannel
-);
-
-// Association/diassociation functions
-static
-PSTxMgmtPacket
-s_MgrMakeAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pDAddr,
- unsigned short wCurrCapInfo,
- unsigned short wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-static
-void
-s_vMgrRxAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
-);
-
-static
-PSTxMgmtPacket
-s_MgrMakeReAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pDAddr,
- unsigned short wCurrCapInfo,
- unsigned short wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-static
-void
-s_vMgrRxAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- bool bReAssocType
-);
-
-static
-void
-s_vMgrRxDisassociation(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-// Authentication/deauthen functions
-static
-void
-s_vMgrRxAuthenSequence_1(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-);
-
-static
-void
-s_vMgrRxAuthenSequence_2(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-);
-
-static
-void
-s_vMgrRxAuthenSequence_3(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-);
-
-static
-void
-s_vMgrRxAuthenSequence_4(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-);
-
-static
-void
-s_vMgrRxAuthentication(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-static
-void
-s_vMgrRxDeauthentication(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-// Scan functions
-// probe request/response functions
-static
-void
-s_vMgrRxProbeRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-static
-void
-s_vMgrRxProbeResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-// beacon functions
-static
-void
-s_vMgrRxBeacon(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- bool bInScan
-);
-
-static
-void
-s_vMgrFormatTIM(
- PSMgmtObject pMgmt,
- PWLAN_IE_TIM pTIM
-);
-
-static
-PSTxMgmtPacket
-s_MgrMakeBeacon(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- unsigned short wCurrATIMWinodw,
- PWLAN_IE_SSID pCurrSSID,
- unsigned char *pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-// Association response
-static
-PSTxMgmtPacket
-s_MgrMakeAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wAssocStatus,
- unsigned short wAssocAID,
- unsigned char *pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-// ReAssociation response
-static
-PSTxMgmtPacket
-s_MgrMakeReAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wAssocStatus,
- unsigned short wAssocAID,
- unsigned char *pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-);
-
-// Probe response
-static
-PSTxMgmtPacket
-s_MgrMakeProbeResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- unsigned short wCurrATIMWinodw,
- unsigned char *pDstAddr,
- PWLAN_IE_SSID pCurrSSID,
- unsigned char *pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- unsigned char byPHYType
-);
-
-// received status
-static
-void
-s_vMgrLogStatus(
- PSMgmtObject pMgmt,
- unsigned short wStatus
-);
-
-static
-void
-s_vMgrSynchBSS(
- struct vnt_private *pDevice,
- unsigned int uBSSMode,
- PKnownBSS pCurr,
- PCMD_STATUS pStatus
-);
-
-static bool
-s_bCipherMatch(
- PKnownBSS pBSSNode,
- NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- unsigned char *pbyCCSPK,
- unsigned char *pbyCCSGK
-);
-
-static void Encyption_Rebuild(
- struct vnt_private *pDevice,
- PKnownBSS pCurr
-);
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Routine Description:
- * Allocates and initializes the Management object.
- *
- * Return Value:
- * Ndis_staus.
- *
- -*/
-
-void
-vMgrObjectInit(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- int ii;
-
- pMgmt->pbyPSPacketPool = &pMgmt->byPSPacketPool[0];
- pMgmt->pbyMgmtPacketPool = &pMgmt->byMgmtPacketPool[0];
- pMgmt->uCurrChannel = pDevice->uChannel;
- for (ii = 0; ii < WLAN_BSSID_LEN; ii++)
- pMgmt->abyDesireBSSID[ii] = 0xFF;
-
- pMgmt->sAssocInfo.AssocInfo.Length = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION);
- pMgmt->byCSSPK = KEY_CTL_NONE;
- pMgmt->byCSSGK = KEY_CTL_NONE;
- pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
- BSSvClearBSSList((void *)pDevice, false);
-}
-
-/*+
- *
- * Routine Description:
- * Initializes timer object
- *
- * Return Value:
- * Ndis_staus.
- *
- -*/
-
-void
-vMgrTimerInit(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- init_timer(&pMgmt->sTimerSecondCallback);
- pMgmt->sTimerSecondCallback.data = (unsigned long) pDevice;
- pMgmt->sTimerSecondCallback.function = (TimerFunction)BSSvSecondCallBack;
- pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
-
- init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (unsigned long) pDevice;
- pDevice->sTimerCommand.function = (TimerFunction)vCommandTimer;
- pDevice->sTimerCommand.expires = RUN_AT(HZ);
-
- init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (unsigned long) pDevice;
- pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
- pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
- pDevice->fTxDataInSleep = false;
- pDevice->IsTxDataTrigger = false;
- pDevice->nTxDataTimeCout = 0;
-
- pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
- pDevice->uCmdDequeueIdx = 0;
- pDevice->uCmdEnqueueIdx = 0;
-}
-
-/*+
- *
- * Routine Description:
- * Reset the management object structure.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrObjectReset(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- pDevice->bEnablePSMode = false;
- // TODO: timer
-}
-
-/*+
- *
- * Routine Description:
- * Start the station association procedure. Namely, send an
- * association request frame to the AP.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrAssocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket;
-
- pMgmt->wCurrCapInfo = 0;
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
- if (pDevice->bEncryptionEnable)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
-
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (pMgmt->wListenInterval == 0)
- pMgmt->wListenInterval = 1; // at least one.
-
- // ERP Phy (802.11g) should support short preamble.
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
- } else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- }
- if (pMgmt->b11hEnable)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
-
- /* build an assocreq frame and send it */
- pTxPacket = s_MgrMakeAssocRequest
- (
- pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- pMgmt->wCurrCapInfo,
- pMgmt->wListenInterval,
- (PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
-);
-
- if (pTxPacket != NULL) {
- /* send the frame */
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus == CMD_STATUS_PENDING) {
- pMgmt->eCurrState = WMAC_STATE_ASSOCPENDING;
- *pStatus = CMD_STATUS_SUCCESS;
- }
- } else {
- *pStatus = CMD_STATUS_RESOURCES;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Start the station re-association procedure.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrReAssocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket;
-
- pMgmt->wCurrCapInfo = 0;
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
- if (pDevice->bEncryptionEnable)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
-
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
-
- if (pMgmt->wListenInterval == 0)
- pMgmt->wListenInterval = 1; // at least one.
-
- // ERP Phy (802.11g) should support short preamble.
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- if (CARDbIsShorSlotTime(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
- } else if (pMgmt->eCurrentPHYMode == PHY_TYPE_11B) {
- if (CARDbIsShortPreamble(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- }
-
- if (pMgmt->b11hEnable)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
-
- pTxPacket = s_MgrMakeReAssocRequest
- (
- pDevice,
- pMgmt,
- pMgmt->abyCurrBSSID,
- pMgmt->wCurrCapInfo,
- pMgmt->wListenInterval,
- (PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
-);
-
- if (pTxPacket != NULL) {
- /* send the frame */
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus != CMD_STATUS_PENDING)
- pr_debug("Mgt:Reassociation tx failed\n");
- else
- pr_debug("Mgt:Reassociation tx sending\n");
- }
-}
-
-/*+
- *
- * Routine Description:
- * Send an dis-association request frame to the AP.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDisassocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- unsigned char *abyDestAddress,
- unsigned short wReason,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_DISASSOC sFrame;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DISASSOC_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
-
- // Setup the sFrame structure
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_DISASSOC_FR_MAXLEN;
-
- // format fixed field frame structure
- vMgrEncodeDisassociation(&sFrame);
-
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_DISASSOC)
-));
-
- memcpy(sFrame.pHdr->sA3.abyAddr1, abyDestAddress, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- // Set reason code
- *(sFrame.pwReason) = cpu_to_le16(wReason);
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- // send the frame
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus == CMD_STATUS_PENDING) {
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- *pStatus = CMD_STATUS_SUCCESS;
- }
-}
-
-/*+
- *
- * Routine Description:(AP function)
- * Handle incoming station association request frames.
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
-)
-{
- WLAN_FR_ASSOCREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- unsigned short wAssocStatus = 0;
- unsigned short wAssocAID = 0;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
-
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)
- return;
- // node index not found
- if (!uNodeIndex)
- return;
-
- //check if node is authenticated
- //decode the frame
- memset(&sFrame, 0, sizeof(WLAN_FR_ASSOCREQ));
- memset(abyCurrSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- memset(abyCurrExtSuppRates, 0, WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
-
- vMgrDecodeAssocRequest(&sFrame);
-
- if (pMgmt->sNodeDBTable[uNodeIndex].eNodeState >= NODE_AUTH) {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC;
- pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval);
- pMgmt->sNodeDBTable[uNodeIndex].bPSEnable =
- WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? true : false;
- // Todo: check sta basic rate, if ap can't support, set status code
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
-
- abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES;
- abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
- uRateLen);
- abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11G)
- abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pExtSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- uRateLen);
- else
- abyCurrExtSuppRates[1] = 0;
-
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- false, // do not change our basic rate
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate)
-);
-
- // set max tx rate
- pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate =
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate;
-
- pr_debug("RxAssocRequest:wTxDataRate is %d\n", pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate);
-
- // Todo: check sta preamble, if ap can't support, set status code
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble =
- WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime =
- WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)uNodeIndex;
- wAssocStatus = WLAN_MGMT_STATUS_SUCCESS;
- wAssocAID = (unsigned short)uNodeIndex;
- // check if ERP support
- if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
-
- if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) {
- // B only STA join
- pDevice->bProtectMode = true;
- pDevice->bNonERPPresent = true;
- }
- if (!pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble)
- pDevice->bBarkerPreambleMd = true;
-
- pr_info("Associate AID= %d\n", wAssocAID);
- pr_info("MAC=%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- sFrame.pHdr->sA3.abyAddr2[0],
- sFrame.pHdr->sA3.abyAddr2[1],
- sFrame.pHdr->sA3.abyAddr2[2],
- sFrame.pHdr->sA3.abyAddr2[3],
- sFrame.pHdr->sA3.abyAddr2[4],
- sFrame.pHdr->sA3.abyAddr2[5]
- );
- pr_info("Max Support rate = %d\n",
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate);
- } else {
- /* TODO: received STA under state1 handle */
- return;
- }
-
- // assoc response reply..
- pTxPacket = s_MgrMakeAssocResponse
- (
- pDevice,
- pMgmt,
- pMgmt->wCurrCapInfo,
- wAssocStatus,
- wAssocAID,
- sFrame.pHdr->sA3.abyAddr2,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
-);
- if (pTxPacket != NULL) {
- if (pDevice->bEnableHostapd)
- return;
-
- /* send the frame */
- Status = csMgmt_xmit(pDevice, pTxPacket);
- if (Status != CMD_STATUS_PENDING)
- pr_debug("Mgt:Assoc response tx failed\n");
- else
- pr_debug("Mgt:Assoc response tx sending..\n");
- }
-}
-
-/*+
- *
- * Description:(AP function)
- * Handle incoming station re-association request frames.
- *
- * Parameters:
- * In:
- * pMgmt - Management Object structure
- * pRxPacket - Received Packet
- * Out:
- * none
- *
- * Return Value: None.
- *
- -*/
-
-static
-void
-s_vMgrRxReAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- unsigned int uNodeIndex
-)
-{
- WLAN_FR_REASSOCREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- unsigned short wAssocStatus = 0;
- unsigned short wAssocAID = 0;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
-
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP)
- return;
- // node index not found
- if (!uNodeIndex)
- return;
- //check if node is authenticated
- //decode the frame
- memset(&sFrame, 0, sizeof(WLAN_FR_REASSOCREQ));
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeReassocRequest(&sFrame);
-
- if (pMgmt->sNodeDBTable[uNodeIndex].eNodeState >= NODE_AUTH) {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC;
- pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = cpu_to_le16(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = cpu_to_le16(*sFrame.pwListenInterval);
- pMgmt->sNodeDBTable[uNodeIndex].bPSEnable =
- WLAN_GET_FC_PWRMGT(sFrame.pHdr->sA3.wFrameCtl) ? true : false;
- // Todo: check sta basic rate, if ap can't support, set status code
-
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
-
- abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES;
- abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
- uRateLen);
- abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES;
- if (pDevice->eCurrentPHYType == PHY_TYPE_11G) {
- abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pExtSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- uRateLen);
- } else {
- abyCurrExtSuppRates[1] = 0;
- }
-
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
- false, // do not change our basic rate
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate)
-);
-
- // set max tx rate
- pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate =
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate;
-
- pr_debug("RxReAssocRequest:TxDataRate is %d\n", pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate);
-
- // Todo: check sta preamble, if ap can't support, set status code
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble =
- WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime =
- WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)uNodeIndex;
- wAssocStatus = WLAN_MGMT_STATUS_SUCCESS;
- wAssocAID = (unsigned short)uNodeIndex;
-
- // if suppurt ERP
- if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate > RATE_11M)
- pMgmt->sNodeDBTable[uNodeIndex].bERPExist = true;
-
- if (pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate <= RATE_11M) {
- // B only STA join
- pDevice->bProtectMode = true;
- pDevice->bNonERPPresent = true;
- }
- if (!pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble)
- pDevice->bBarkerPreambleMd = true;
-
- pr_info("Rx ReAssociate AID= %d\n", wAssocAID);
- pr_info("MAC=%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- sFrame.pHdr->sA3.abyAddr2[0],
- sFrame.pHdr->sA3.abyAddr2[1],
- sFrame.pHdr->sA3.abyAddr2[2],
- sFrame.pHdr->sA3.abyAddr2[3],
- sFrame.pHdr->sA3.abyAddr2[4],
- sFrame.pHdr->sA3.abyAddr2[5]
- );
- pr_info("Max Support rate = %d\n",
- pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate);
-
- }
-
- // assoc response reply..
- pTxPacket = s_MgrMakeReAssocResponse
- (
- pDevice,
- pMgmt,
- pMgmt->wCurrCapInfo,
- wAssocStatus,
- wAssocAID,
- sFrame.pHdr->sA3.abyAddr2,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
- );
-
- if (pTxPacket != NULL) {
- /* send the frame */
- if (pDevice->bEnableHostapd)
- return;
-
- Status = csMgmt_xmit(pDevice, pTxPacket);
- if (Status != CMD_STATUS_PENDING)
- pr_debug("Mgt:ReAssoc response tx failed\n");
- else
- pr_debug("Mgt:ReAssoc response tx sending..\n");
- }
-}
-
-/*+
- *
- * Routine Description:
- * Handle incoming association response frames.
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- bool bReAssocType
-)
-{
- WLAN_FR_ASSOCRESP sFrame;
- PWLAN_IE_SSID pItemSSID;
- unsigned char *pbyIEs;
- viawget_wpa_header *wpahdr;
-
- if (pMgmt->eCurrState == WMAC_STATE_ASSOCPENDING ||
- pMgmt->eCurrState == WMAC_STATE_ASSOC) {
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- // decode the frame
- vMgrDecodeAssocResponse(&sFrame);
- if ((sFrame.pwCapInfo == NULL) ||
- (sFrame.pwStatus == NULL) ||
- (sFrame.pwAid == NULL) ||
- (sFrame.pSuppRates == NULL)) {
- DBG_PORT80(0xCC);
- return;
- }
-
- pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.Capabilities = *(sFrame.pwCapInfo);
- pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.StatusCode = *(sFrame.pwStatus);
- pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.AssociationId = *(sFrame.pwAid);
- pMgmt->sAssocInfo.AssocInfo.AvailableResponseFixedIEs |= 0x07;
-
- pMgmt->sAssocInfo.AssocInfo.ResponseIELength = sFrame.len - 24 - 6;
- pMgmt->sAssocInfo.AssocInfo.OffsetResponseIEs = pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs + pMgmt->sAssocInfo.AssocInfo.RequestIELength;
- pbyIEs = pMgmt->sAssocInfo.abyIEs;
- pbyIEs += pMgmt->sAssocInfo.AssocInfo.RequestIELength;
- memcpy(pbyIEs, (sFrame.pBuf + 24 + 6), pMgmt->sAssocInfo.AssocInfo.ResponseIELength);
-
- // save values and set current BSS state
- if (cpu_to_le16((*(sFrame.pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- // set AID
- pMgmt->wCurrAID = cpu_to_le16((*(sFrame.pwAid)));
- if ((pMgmt->wCurrAID >> 14) != (BIT0 | BIT1))
- pr_debug("AID from AP, has two msb clear\n");
-
- pr_info("Association Successful, AID=%d\n",
- pMgmt->wCurrAID & ~(BIT14 | BIT15));
- pMgmt->eCurrState = WMAC_STATE_ASSOC;
- BSSvUpdateAPNode((void *)pDevice, sFrame.pwCapInfo, sFrame.pSuppRates, sFrame.pExtSuppRates);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
- pr_info("Link with AP(SSID): %s\n", pItemSSID->abySSID);
- pDevice->bLinkPass = true;
- pDevice->uBBVGADiffCount = 0;
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- if (skb_tailroom(pDevice->skb) < (sizeof(viawget_wpa_header) + pMgmt->sAssocInfo.AssocInfo.ResponseIELength +
- pMgmt->sAssocInfo.AssocInfo.RequestIELength)) { //data room not enough
- dev_kfree_skb(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_ASSOC_MSG;
- wpahdr->resp_ie_len = pMgmt->sAssocInfo.AssocInfo.ResponseIELength;
- wpahdr->req_ie_len = pMgmt->sAssocInfo.AssocInfo.RequestIELength;
- memcpy(pDevice->skb->data + sizeof(viawget_wpa_header), pMgmt->sAssocInfo.abyIEs, wpahdr->req_ie_len);
- memcpy(pDevice->skb->data + sizeof(viawget_wpa_header) + wpahdr->req_ie_len,
- pbyIEs,
- wpahdr->resp_ie_len
-);
- skb_put(pDevice->skb, sizeof(viawget_wpa_header) + wpahdr->resp_ie_len + wpahdr->req_ie_len);
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-
-//2008-0409-07, <Add> by Einsn Liu
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- unsigned char buf[512];
- size_t len;
- union iwreq_data wrqu;
- int we_event;
-
- memset(buf, 0, 512);
-
- len = pMgmt->sAssocInfo.AssocInfo.RequestIELength;
- if (len) {
- memcpy(buf, pMgmt->sAssocInfo.abyIEs, len);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = len;
- we_event = IWEVASSOCREQIE;
- wireless_send_event(pDevice->dev, we_event, &wrqu, buf);
- }
-
- memset(buf, 0, 512);
- len = pMgmt->sAssocInfo.AssocInfo.ResponseIELength;
-
- if (len) {
- memcpy(buf, pbyIEs, len);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = len;
- we_event = IWEVASSOCRESPIE;
- wireless_send_event(pDevice->dev, we_event, &wrqu, buf);
- }
-
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(wrqu.ap_addr.sa_data, &pMgmt->abyCurrBSSID[0], ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif //#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-//End Add -- //2008-0409-07, <Add> by Einsn Liu
- } else {
- if (bReAssocType) {
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- } else {
- // jump back to the auth state and indicate the error
- pMgmt->eCurrState = WMAC_STATE_AUTH;
- }
- s_vMgrLogStatus(pMgmt, cpu_to_le16((*(sFrame.pwStatus))));
- }
-
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-//need clear flags related to Networkmanager
-
- pDevice->bwextcount = 0;
- pDevice->bWPASuppWextEnabled = false;
-#endif
-
- if (pMgmt->eCurrState == WMAC_STATE_ASSOC)
- timer_expire(pDevice->sTimerCommand, 0);
-}
-
-/*+
- *
- * Routine Description:
- * Start the station authentication procedure. Namely, send an
- * authentication frame to the AP.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrAuthenBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- WLAN_FR_AUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
- vMgrEncodeAuthen(&sFrame);
- /* insert values */
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- if (pMgmt->bShareKeyAlgorithm)
- *(sFrame.pwAuthAlgorithm) = cpu_to_le16(WLAN_AUTH_ALG_SHAREDKEY);
- else
- *(sFrame.pwAuthAlgorithm) = cpu_to_le16(WLAN_AUTH_ALG_OPENSYSTEM);
-
- *(sFrame.pwAuthSequence) = cpu_to_le16(1);
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus == CMD_STATUS_PENDING) {
- pMgmt->eCurrState = WMAC_STATE_AUTHPENDING;
- *pStatus = CMD_STATUS_SUCCESS;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Start the station(AP) deauthentication procedure. Namely, send an
- * deauthentication frame to the AP or Sta.
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrDeAuthenBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- unsigned char *abyDestAddress,
- unsigned short wReason,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- WLAN_FR_DEAUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_DEAUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_DEAUTHEN_FR_MAXLEN;
- vMgrEncodeDeauthen(&sFrame);
- /* insert values */
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_DEAUTHEN)
-));
-
- memcpy(sFrame.pHdr->sA3.abyAddr1, abyDestAddress, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- *(sFrame.pwReason) = cpu_to_le16(wReason); // deauthen. bcs left BSS
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- *pStatus = csMgmt_xmit(pDevice, pTxPacket);
- if (*pStatus == CMD_STATUS_PENDING)
- *pStatus = CMD_STATUS_SUCCESS;
-}
-
-/*+
- *
- * Routine Description:
- * Handle incoming authentication frames.
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAuthentication(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- WLAN_FR_AUTHEN sFrame;
-
- // we better be an AP or a STA in AUTHPENDING otherwise ignore
- if (!(pMgmt->eCurrMode == WMAC_MODE_ESS_AP ||
- pMgmt->eCurrState == WMAC_STATE_AUTHPENDING)) {
- return;
- }
-
- // decode the frame
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeAuthen(&sFrame);
- switch (cpu_to_le16((*(sFrame.pwAuthSequence)))) {
- case 1:
- //AP function
- s_vMgrRxAuthenSequence_1(pDevice, pMgmt, &sFrame);
- break;
- case 2:
- s_vMgrRxAuthenSequence_2(pDevice, pMgmt, &sFrame);
- break;
- case 3:
- //AP function
- s_vMgrRxAuthenSequence_3(pDevice, pMgmt, &sFrame);
- break;
- case 4:
- s_vMgrRxAuthenSequence_4(pDevice, pMgmt, &sFrame);
- break;
- default:
- pr_debug("Auth Sequence error, seq = %d\n",
- cpu_to_le16((*(sFrame.pwAuthSequence))));
- break;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming authen frames with sequence 1. Currently
- * assumes we're an AP. So far, no one appears to use authentication
- * in Ad-Hoc mode.
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAuthenSequence_1(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- unsigned int uNodeIndex;
- WLAN_FR_AUTHEN sFrame;
- PSKeyItem pTransmitKey;
-
- // Insert a Node entry
- if (!BSSDBbIsSTAInNodeDB(pMgmt, pFrame->pHdr->sA3.abyAddr2, &uNodeIndex)) {
- BSSvCreateOneNode(pDevice, &uNodeIndex);
- memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, pFrame->pHdr->sA3.abyAddr2,
- WLAN_ADDR_LEN);
- }
-
- if (pMgmt->bShareKeyAlgorithm) {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_KNOWN;
- pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence = 1;
- } else {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_AUTH;
- }
-
- // send auth reply
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
- // format buffer structure
- vMgrEncodeAuthen(&sFrame);
- // insert values
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)|
- WLAN_SET_FC_ISWEP(0)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pFrame->pHdr->sA3.abyAddr2, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm);
- *(sFrame.pwAuthSequence) = cpu_to_le16(2);
-
- if (cpu_to_le16(*(pFrame->pwAuthAlgorithm)) == WLAN_AUTH_ALG_SHAREDKEY) {
- if (pMgmt->bShareKeyAlgorithm)
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS);
- else
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG);
- } else {
- if (pMgmt->bShareKeyAlgorithm)
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG);
- else
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS);
- }
-
- if (pMgmt->bShareKeyAlgorithm &&
- (cpu_to_le16(*(sFrame.pwStatus)) == WLAN_MGMT_STATUS_SUCCESS)) {
- sFrame.pChallenge = (PWLAN_IE_CHALLENGE)(sFrame.pBuf + sFrame.len);
- sFrame.len += WLAN_CHALLENGE_IE_LEN;
- sFrame.pChallenge->byElementID = WLAN_EID_CHALLENGE;
- sFrame.pChallenge->len = WLAN_CHALLENGE_LEN;
- memset(pMgmt->abyChallenge, 0, WLAN_CHALLENGE_LEN);
- // get group key
- if (KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBroadcastAddr, GROUP_KEY, &pTransmitKey) == true) {
- rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength+3);
- rc4_encrypt(&pDevice->SBox, pMgmt->abyChallenge, pMgmt->abyChallenge, WLAN_CHALLENGE_LEN);
- }
- memcpy(sFrame.pChallenge->abyChallenge, pMgmt->abyChallenge , WLAN_CHALLENGE_LEN);
- }
-
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
- // send the frame
- if (pDevice->bEnableHostapd)
- return;
-
- pr_debug("Mgt:Authreq_reply sequence_1 tx..\n");
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Mgt:Authreq_reply sequence_1 tx failed\n");
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming auth frames with sequence number 2. Currently
- * assumes we're a station.
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAuthenSequence_2(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-)
-{
- WLAN_FR_AUTHEN sFrame;
- PSTxMgmtPacket pTxPacket = NULL;
-
- switch (cpu_to_le16((*(pFrame->pwAuthAlgorithm)))) {
- case WLAN_AUTH_ALG_OPENSYSTEM:
- if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- pr_info("802.11 Authen (OPEN) Successful\n");
- pMgmt->eCurrState = WMAC_STATE_AUTH;
- timer_expire(pDevice->sTimerCommand, 0);
- } else {
- pr_info("802.11 Authen (OPEN) Failed\n");
- s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
-
- break;
-
- case WLAN_AUTH_ALG_SHAREDKEY:
-
- if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
- // format buffer structure
- vMgrEncodeAuthen(&sFrame);
- // insert values
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)|
- WLAN_SET_FC_ISWEP(1)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm);
- *(sFrame.pwAuthSequence) = cpu_to_le16(3);
- *(sFrame.pwStatus) = cpu_to_le16(WLAN_MGMT_STATUS_SUCCESS);
- sFrame.pChallenge = (PWLAN_IE_CHALLENGE)(sFrame.pBuf + sFrame.len);
- sFrame.len += WLAN_CHALLENGE_IE_LEN;
- sFrame.pChallenge->byElementID = WLAN_EID_CHALLENGE;
- sFrame.pChallenge->len = WLAN_CHALLENGE_LEN;
- memcpy(sFrame.pChallenge->abyChallenge, pFrame->pChallenge->abyChallenge, WLAN_CHALLENGE_LEN);
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
- // send the frame
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Mgt:Auth_reply sequence_2 tx failed\n");
-
- pr_debug("Mgt:Auth_reply sequence_2 tx ...\n");
- } else {
- pr_debug("Mgt:rx Auth_reply sequence_2 status error ...\n");
- s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
- }
- break;
- default:
- pr_debug("Mgt: rx auth.seq = 2 unknown AuthAlgorithm=%d\n",
- cpu_to_le16((*(pFrame->pwAuthAlgorithm))));
- break;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming authen frames with sequence 3. Currently
- * assumes we're an AP. This function assumes the frame has
- * already been successfully decrypted.
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxAuthenSequence_3(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- unsigned int uStatusCode = 0;
- unsigned int uNodeIndex = 0;
- WLAN_FR_AUTHEN sFrame;
-
- if (!WLAN_GET_FC_ISWEP(pFrame->pHdr->sA3.wFrameCtl)) {
- uStatusCode = WLAN_MGMT_STATUS_CHALLENGE_FAIL;
- goto reply;
- }
- if (BSSDBbIsSTAInNodeDB(pMgmt, pFrame->pHdr->sA3.abyAddr2, &uNodeIndex)) {
- if (pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence != 1) {
- uStatusCode = WLAN_MGMT_STATUS_RX_AUTH_NOSEQ;
- goto reply;
- }
- if (memcmp(pMgmt->abyChallenge, pFrame->pChallenge->abyChallenge, WLAN_CHALLENGE_LEN) != 0) {
- uStatusCode = WLAN_MGMT_STATUS_CHALLENGE_FAIL;
- goto reply;
- }
- } else {
- uStatusCode = WLAN_MGMT_STATUS_UNSPEC_FAILURE;
- goto reply;
- }
-
- if (uNodeIndex) {
- pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_AUTH;
- pMgmt->sNodeDBTable[uNodeIndex].byAuthSequence = 0;
- }
- uStatusCode = WLAN_MGMT_STATUS_SUCCESS;
- pr_debug("Challenge text check ok..\n");
-
-reply:
- // send auth reply
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_AUTHEN_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_AUTHEN_FR_MAXLEN;
- // format buffer structure
- vMgrEncodeAuthen(&sFrame);
- /* insert values */
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_AUTHEN)|
- WLAN_SET_FC_ISWEP(0)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pFrame->pHdr->sA3.abyAddr2, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- *(sFrame.pwAuthAlgorithm) = *(pFrame->pwAuthAlgorithm);
- *(sFrame.pwAuthSequence) = cpu_to_le16(4);
- *(sFrame.pwStatus) = cpu_to_le16(uStatusCode);
-
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
- // send the frame
- if (pDevice->bEnableHostapd)
- return;
-
- if (csMgmt_xmit(pDevice, pTxPacket) != CMD_STATUS_PENDING)
- pr_debug("Mgt:Authreq_reply sequence_4 tx failed\n");
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming authen frames with sequence 4
- *
- *
- * Return Value:
- * None.
- *
- -*/
-static
-void
-s_vMgrRxAuthenSequence_4(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PWLAN_FR_AUTHEN pFrame
-)
-{
- if (cpu_to_le16((*(pFrame->pwStatus))) == WLAN_MGMT_STATUS_SUCCESS) {
- pr_info("802.11 Authen (SHAREDKEY) Successful\n");
- pMgmt->eCurrState = WMAC_STATE_AUTH;
- timer_expire(pDevice->sTimerCommand, 0);
- } else{
- pr_info("802.11 Authen (SHAREDKEY) Failed\n");
- s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming disassociation frames
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxDisassociation(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- WLAN_FR_DISASSOC sFrame;
- unsigned int uNodeIndex = 0;
- viawget_wpa_header *wpahdr;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- // if is acting an AP..
- // a STA is leaving this BSS..
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- if (BSSDBbIsSTAInNodeDB(pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex))
- BSSvRemoveOneNode(pDevice, uNodeIndex);
- else
- pr_debug("Rx disassoc, sta not found\n");
-
- } else if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeDisassociation(&sFrame);
- pr_info("AP disassociated me, reason=%d\n",
- cpu_to_le16(*(sFrame.pwReason)));
- //TODO: do something let upper layer know or
- //try to send associate packet again because of inactivity timeout
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_DISASSOC_MSG;
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
-
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- pr_debug("wireless_send_event--->SIOCGIWAP(disassociated)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
- }
- /* else, ignore it */
-}
-
-/*+
- *
- * Routine Description:
- * Handles incoming deauthentication frames
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxDeauthentication(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- WLAN_FR_DEAUTHEN sFrame;
- unsigned int uNodeIndex = 0;
- viawget_wpa_header *wpahdr;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- //Todo:
- // if is acting an AP..
- // a STA is leaving this BSS..
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- if (BSSDBbIsSTAInNodeDB(pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex))
- BSSvRemoveOneNode(pDevice, uNodeIndex);
- else
- pr_info("Rx deauth, sta not found\n");
- } else {
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeDeauthen(&sFrame);
- pr_info("AP deauthed me, reason=%d\n",
- cpu_to_le16((*(sFrame.pwReason))));
- // TODO: update BSS list for specific BSSID if pre-authentication case
- if (ether_addr_equal(sFrame.pHdr->sA3.abyAddr3,
- pMgmt->abyCurrBSSID)) {
- if (pMgmt->eCurrState >= WMAC_STATE_AUTHPENDING) {
- pMgmt->sNodeDBTable[0].bActive = false;
- pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
- pDevice->bLinkPass = false;
- }
- }
-
- if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
- wpahdr = (viawget_wpa_header *)pDevice->skb->data;
- wpahdr->type = VIAWGET_DISASSOC_MSG;
- wpahdr->resp_ie_len = 0;
- wpahdr->req_ie_len = 0;
- skb_put(pDevice->skb, sizeof(viawget_wpa_header));
- pDevice->skb->dev = pDevice->wpadev;
- skb_reset_mac_header(pDevice->skb);
- pDevice->skb->pkt_type = PACKET_HOST;
- pDevice->skb->protocol = htons(ETH_P_802_2);
- memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
- netif_rx(pDevice->skb);
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- {
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- PRINT_K("wireless_send_event--->SIOCGIWAP(disauthen)\n");
- wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL);
- }
-#endif
-
- }
- /* else, ignore it. TODO: IBSS authentication service
- would be implemented here */
- }
-}
-
-//2008-8-4 <add> by chester
-/*+
- *
- * Routine Description:
- * check if current channel is match ZoneType.
- *for USA:1~11;
- * Japan:1~13;
- * Europe:1~13
- * Return Value:
- * True:exceed;
- * False:normal case
- -*/
-static bool
-ChannelExceedZoneType(
- struct vnt_private *pDevice,
- unsigned char byCurrChannel
-)
-{
- bool exceed = false;
-
- switch (pDevice->byZoneType) {
- case 0x00: //USA:1~11
- if ((byCurrChannel < 1) || (byCurrChannel > 11))
- exceed = true;
- break;
- case 0x01: //Japan:1~13
- case 0x02: //Europe:1~13
- if ((byCurrChannel < 1) || (byCurrChannel > 13))
- exceed = true;
- break;
- default: //reserve for other zonetype
- break;
- }
-
- return exceed;
-}
-
-/*+
- *
- * Routine Description:
- * Handles and analysis incoming beacon frames.
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-static
-void
-s_vMgrRxBeacon(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket,
- bool bInScan
-)
-{
- PKnownBSS pBSSList;
- WLAN_FR_BEACON sFrame;
- u64 qwTSFOffset;
- bool bIsBSSIDEqual = false;
- bool bIsSSIDEqual = false;
- bool bTSFLargeDiff = false;
- bool bTSFOffsetPostive = false;
- bool bUpdateTSF = false;
- bool bIsAPBeacon = false;
- bool bIsChannelEqual = false;
- unsigned int uLocateByteIndex;
- unsigned char byTIMBitOn = 0;
- unsigned short wAIDNumber = 0;
- unsigned int uNodeIndex;
- u64 qwTimestamp, qwLocalTSF;
- u64 qwCurrTSF;
- unsigned short wStartIndex = 0;
- unsigned short wAIDIndex = 0;
- unsigned char byCurrChannel = pRxPacket->byRxChannel;
- ERPObject sERP;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- bool bChannelHit = false;
- bool bUpdatePhyParameter = false;
- unsigned char byIEChannel = 0;
-
- memset(&sFrame, 0, sizeof(WLAN_FR_BEACON));
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
-
- // decode the beacon frame
- vMgrDecodeBeacon(&sFrame);
-
- if ((sFrame.pwBeaconInterval == NULL) ||
- (sFrame.pwCapInfo == NULL) ||
- (sFrame.pSSID == NULL) ||
- (sFrame.pSuppRates == NULL)) {
- pr_debug("Rx beacon frame error\n");
- return;
- }
-
- if (sFrame.pDSParms != NULL) {
- if (byCurrChannel > CB_MAX_CHANNEL_24G) {
- // channel remapping to
- byIEChannel = get_channel_mapping(pDevice, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
- } else {
- byIEChannel = sFrame.pDSParms->byCurrChannel;
- }
- if (byCurrChannel != byIEChannel) {
- // adjust channel info. bcs we rcv adjacent channel packets
- bChannelHit = false;
- byCurrChannel = byIEChannel;
- }
- } else {
- // no DS channel info
- bChannelHit = true;
- }
-//2008-0730-01<Add>by MikeLiu
- if (ChannelExceedZoneType(pDevice, byCurrChannel))
- return;
-
- if (sFrame.pERP != NULL) {
- sERP.byERP = sFrame.pERP->byContext;
- sERP.bERPExist = true;
-
- } else {
- sERP.bERPExist = false;
- sERP.byERP = 0;
- }
-
- pBSSList = BSSpAddrIsInBSSList((void *)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
- if (pBSSList == NULL) {
- pr_debug("Beacon/insert: RxChannel = : %d\n", byCurrChannel);
- BSSbInsertToBSSList((void *)pDevice,
- sFrame.pHdr->sA3.abyAddr3,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of beacon
- (void *)pRxPacket
-);
- } else {
- BSSbUpdateToBSSList((void *)pDevice,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- bChannelHit,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- pBSSList,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of probresponse
- (void *)pRxPacket
-);
-
- }
-
- if (bInScan)
- return;
-
- if (byCurrChannel == (unsigned char)pMgmt->uCurrChannel)
- bIsChannelEqual = true;
-
- if (bIsChannelEqual && (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- // if rx beacon without ERP field
- if (sERP.bERPExist) {
- if (WLAN_GET_ERP_USE_PROTECTION(sERP.byERP)) {
- pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1);
- pDevice->wUseProtectCntDown = USE_PROTECT_PERIOD;
- }
- } else {
- pDevice->byERPFlag |= WLAN_SET_ERP_USE_PROTECTION(1);
- pDevice->wUseProtectCntDown = USE_PROTECT_PERIOD;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- if (!WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo))
- pDevice->byERPFlag |= WLAN_SET_ERP_BARKER_MODE(1);
- if (!sERP.bERPExist)
- pDevice->byERPFlag |= WLAN_SET_ERP_NONERP_PRESENT(1);
- }
-
- // set to MAC&BBP
- if (WLAN_GET_ERP_USE_PROTECTION(pDevice->byERPFlag)) {
- if (!pDevice->bProtectMode) {
- MACvEnableProtectMD(pDevice->PortOffset);
- pDevice->bProtectMode = true;
- }
- }
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)
- return;
-
- // check if BSSID the same
- if (memcmp(sFrame.pHdr->sA3.abyAddr3,
- pMgmt->abyCurrBSSID,
- WLAN_BSSID_LEN) == 0) {
- bIsBSSIDEqual = true;
-
-// 2008-05-21 <add> by Richardtai
- pDevice->uCurrRSSI = pRxPacket->uRSSI;
- pDevice->byCurrSQ = pRxPacket->bySQ;
-
- if (pMgmt->sNodeDBTable[0].uInActiveCount != 0)
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
- }
- // check if SSID the same
- if (sFrame.pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) {
- if (memcmp(sFrame.pSSID->abySSID,
- ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID,
- sFrame.pSSID->len
-) == 0) {
- bIsSSIDEqual = true;
- }
- }
-
- if (WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo) &&
- bIsBSSIDEqual &&
- bIsSSIDEqual &&
- (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
- (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
- // add state check to prevent reconnect fail since we'll receive Beacon
-
- bIsAPBeacon = true;
-
- if (pBSSList != NULL) {
- // Compare PHY parameter setting
- if (pMgmt->wCurrCapInfo != pBSSList->wCapInfo) {
- bUpdatePhyParameter = true;
- pMgmt->wCurrCapInfo = pBSSList->wCapInfo;
- }
- if (sFrame.pERP != NULL) {
- if ((sFrame.pERP->byElementID == WLAN_EID_ERP) &&
- (pMgmt->byERPContext != sFrame.pERP->byContext)) {
- bUpdatePhyParameter = true;
- pMgmt->byERPContext = sFrame.pERP->byContext;
- }
- }
- //
- // Basic Rate Set may change dynamically
- //
- if (pBSSList->eNetworkTypeInUse == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
-
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pBSSList->abySuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- uRateLen);
- pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pBSSList->abyExtSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- uRateLen);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- true,
- &(pMgmt->sNodeDBTable[0].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[0].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[0].wSuppRate),
- &(pMgmt->sNodeDBTable[0].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[0].byTopOFDMBasicRate)
- );
- if (bUpdatePhyParameter) {
- CARDbSetPhyParameter(pMgmt->pAdapter,
- pMgmt->eCurrentPHYMode,
- pMgmt->wCurrCapInfo,
- pMgmt->byERPContext,
- pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates
- );
- }
- if (sFrame.pIE_PowerConstraint != NULL) {
- CARDvSetPowerConstraint(pMgmt->pAdapter,
- (unsigned char) pBSSList->uChannel,
- sFrame.pIE_PowerConstraint->byPower
-);
- }
- if (sFrame.pIE_CHSW != NULL) {
- CARDbChannelSwitch(pMgmt->pAdapter,
- sFrame.pIE_CHSW->byMode,
- get_channel_mapping(pMgmt->pAdapter, sFrame.pIE_CHSW->byMode, pMgmt->eCurrentPHYMode),
- sFrame.pIE_CHSW->byCount
- );
-
- } else if (!bIsChannelEqual) {
- set_channel(pMgmt->pAdapter, pBSSList->uChannel);
- }
- }
- }
-
-// pr_debug("Beacon 2\n");
- // check if CF field exists
- if (WLAN_GET_CAP_INFO_ESS(*sFrame.pwCapInfo)) {
- if (sFrame.pCFParms->wCFPDurRemaining > 0) {
- // TODO: deal with CFP period to set NAV
- }
- }
-
- qwTimestamp = le64_to_cpu(*sFrame.pqwTimestamp);
- qwLocalTSF = pRxPacket->qwLocalTSF;
-
- // check if beacon TSF larger or small than our local TSF
- if (qwTimestamp >= qwLocalTSF)
- bTSFOffsetPostive = true;
- else
- bTSFOffsetPostive = false;
-
- if (bTSFOffsetPostive)
- qwTSFOffset = CARDqGetTSFOffset(pRxPacket->byRxRate, (qwTimestamp), (qwLocalTSF));
- else
- qwTSFOffset = CARDqGetTSFOffset(pRxPacket->byRxRate, (qwLocalTSF), (qwTimestamp));
-
- if (qwTSFOffset > TRIVIAL_SYNC_DIFFERENCE)
- bTSFLargeDiff = true;
-
- // if infra mode
- if (bIsAPBeacon) {
- // Infra mode: Local TSF always follow AP's TSF if Difference huge.
- if (bTSFLargeDiff)
- bUpdateTSF = true;
-
- if (pDevice->bEnablePSMode && (sFrame.pTIM != NULL)) {
- // deal with DTIM, analysis TIM
- pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? true : false;
- pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
- pMgmt->byDTIMPeriod = sFrame.pTIM->byDTIMPeriod;
- wAIDNumber = pMgmt->wCurrAID & ~(BIT14|BIT15);
-
- // check if AID in TIM field bit on
- // wStartIndex = N1
- wStartIndex = WLAN_MGMT_GET_TIM_OFFSET(sFrame.pTIM->byBitMapCtl) << 1;
- // AIDIndex = N2
- wAIDIndex = (wAIDNumber >> 3);
- if ((wAIDNumber > 0) && (wAIDIndex >= wStartIndex)) {
- uLocateByteIndex = wAIDIndex - wStartIndex;
- // len = byDTIMCount + byDTIMPeriod + byDTIMPeriod + byVirtBitMap[0~250]
- if (sFrame.pTIM->len >= (uLocateByteIndex + 4)) {
- byTIMBitOn = (0x01) << ((wAIDNumber) % 8);
- pMgmt->bInTIM = sFrame.pTIM->byVirtBitMap[uLocateByteIndex] & byTIMBitOn ? true : false;
- } else {
- pMgmt->bInTIM = false;
- }
- } else {
- pMgmt->bInTIM = false;
- }
-
- if (pMgmt->bInTIM ||
- (pMgmt->bMulticastTIM && (pMgmt->byDTIMCount == 0))) {
- pMgmt->bInTIMWake = true;
- // send out ps-poll packet
-
- if (pMgmt->bInTIM)
- PSvSendPSPOLL(pDevice);
-
- } else {
- pMgmt->bInTIMWake = false;
- pr_debug("BCN: Not In TIM..\n");
- if (!pDevice->bPWBitOn) {
- pr_debug("BCN: Send Null Packet\n");
- if (PSbSendNullPacket(pDevice))
- pDevice->bPWBitOn = true;
- }
- if (PSbConsiderPowerDown(pDevice, false, false))
- pr_debug("BCN: Power down now...\n");
- }
-
- }
-
- }
- // if adhoc mode
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && !bIsAPBeacon && bIsChannelEqual) {
- if (bIsBSSIDEqual) {
- // Use sNodeDBTable[0].uInActiveCount as IBSS beacons received count.
- if (pMgmt->sNodeDBTable[0].uInActiveCount != 0)
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
-
- // adhoc mode:TSF updated only when beacon larger than local TSF
- if (bTSFLargeDiff && bTSFOffsetPostive &&
- (pMgmt->eCurrState == WMAC_STATE_JOINTED))
- bUpdateTSF = true;
-
- // During dpc, already in spinlocked.
- if (BSSDBbIsSTAInNodeDB(pMgmt, sFrame.pHdr->sA3.abyAddr2, &uNodeIndex)) {
- // Update the STA, (Technically the Beacons of all the IBSS nodes
- // should be identical, but that's not happening in practice.
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- WLAN_RATES_MAXLEN_11B);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- NULL,
- true,
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate)
- );
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].bShortSlotTime = WLAN_GET_CAP_INFO_SHORTSLOTTIME(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].uInActiveCount = 0;
- } else {
- // Todo, initial Node content
- BSSvCreateOneNode(pDevice, &uNodeIndex);
-
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- WLAN_RATES_MAXLEN_11B);
- RATEvParseMaxRate((void *)pDevice,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- NULL,
- true,
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].wSuppRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopCCKBasicRate),
- &(pMgmt->sNodeDBTable[uNodeIndex].byTopOFDMBasicRate)
- );
-
- memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, sFrame.pHdr->sA3.abyAddr2, WLAN_ADDR_LEN);
- pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(*sFrame.pwCapInfo);
- pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate = pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate;
- {
- pr_debug("s_vMgrRxBeacon:TxDataRate is %d,Index is %d\n", pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate, uNodeIndex);
- }
- }
-
- // if other stations joined, indicate connection to upper layer..
- if (pMgmt->eCurrState == WMAC_STATE_STARTED) {
- pr_debug("Current IBSS State: [Started]........to: [Jointed]\n");
- pMgmt->eCurrState = WMAC_STATE_JOINTED;
- pDevice->bLinkPass = true;
- if (netif_queue_stopped(pDevice->dev))
- netif_wake_queue(pDevice->dev);
-
- pMgmt->sNodeDBTable[0].bActive = true;
- pMgmt->sNodeDBTable[0].uInActiveCount = 0;
-
- }
- } else if (bIsSSIDEqual) {
- // See other adhoc sta with the same SSID but BSSID is different.
- // adpot this vars only when TSF larger then us.
- if (bTSFLargeDiff && bTSFOffsetPostive) {
- // we don't support ATIM under adhoc mode
- // if (sFrame.pIBSSParms->wATIMWindow == 0) {
- // adpot this vars
- // TODO: check sFrame cap if privacy on, and support rate syn
- memcpy(pMgmt->abyCurrBSSID, sFrame.pHdr->sA3.abyAddr3, WLAN_BSSID_LEN);
- memcpy(pDevice->abyBSSID, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- pMgmt->wCurrATIMWindow = cpu_to_le16(sFrame.pIBSSParms->wATIMWindow);
- pMgmt->wCurrBeaconPeriod = cpu_to_le16(*sFrame.pwBeaconInterval);
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- WLAN_RATES_MAXLEN_11B);
- // set HW beacon interval and re-synchronizing....
- pr_debug("Rejoining to Other Adhoc group with same SSID........\n");
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_BI, pMgmt->wCurrBeaconPeriod);
- CARDbUpdateTSF(pDevice, pRxPacket->byRxRate, qwTimestamp, qwLocalTSF);
- CARDvUpdateNextTBTT(pDevice->PortOffset, qwTimestamp, pMgmt->wCurrBeaconPeriod);
- // Turn off bssid filter to avoid filter others adhoc station which bssid is different.
- MACvWriteBSSIDAddress(pDevice->PortOffset, pMgmt->abyCurrBSSID);
-
- CARDbSetPhyParameter(pMgmt->pAdapter,
- pMgmt->eCurrentPHYMode,
- pMgmt->wCurrCapInfo,
- pMgmt->byERPContext,
- pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates);
-
- // Prepare beacon frame
- bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
- }
- }
- }
- // endian issue ???
- // Update TSF
-if (bUpdateTSF) {
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- CARDbUpdateTSF(pDevice, pRxPacket->byRxRate, qwTimestamp, pRxPacket->qwLocalTSF);
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- CARDvUpdateNextTBTT(pDevice->PortOffset, qwTimestamp, pMgmt->wCurrBeaconPeriod);
- }
-}
-
-/*+
- *
- * Routine Description:
- * Instructs the hw to create a bss using the supplied
- * attributes. Note that this implementation only supports Ad-Hoc
- * BSS creation.
- *
- *
- * Return Value:
- * CMD_STATUS
- *
- -*/
-void
-vMgrCreateOwnIBSS(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned short wMaxBasicRate;
- unsigned short wMaxSuppRate;
- unsigned char byTopCCKBasicRate;
- unsigned char byTopOFDMBasicRate;
- u64 qwCurrTSF;
- unsigned int ii;
- unsigned char abyRATE[] = {0x82, 0x84, 0x8B, 0x96, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60};
- unsigned char abyCCK_RATE[] = {0x82, 0x84, 0x8B, 0x96};
- unsigned char abyOFDM_RATE[] = {0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- unsigned short wSuppRate;
-
- pr_debug("Create Basic Service Set .......\n");
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) &&
- (pDevice->eEncryptionStatus != Ndis802_11Encryption2Enabled) &&
- (pDevice->eEncryptionStatus != Ndis802_11Encryption3Enabled)) {
- // encryption mode error
- *pStatus = CMD_STATUS_FAILURE;
- return;
- }
- }
-
- pMgmt->abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES;
- pMgmt->abyCurrExtSuppRates[0] = WLAN_EID_EXTSUPP_RATES;
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- pMgmt->eCurrentPHYMode = pMgmt->byAPBBType;
- } else {
- if (pDevice->byBBType == BB_TYPE_11G)
- pMgmt->eCurrentPHYMode = PHY_TYPE_11G;
- if (pDevice->byBBType == BB_TYPE_11B)
- pMgmt->eCurrentPHYMode = PHY_TYPE_11B;
- if (pDevice->byBBType == BB_TYPE_11A)
- pMgmt->eCurrentPHYMode = PHY_TYPE_11A;
- }
-
- if (pMgmt->eCurrentPHYMode != PHY_TYPE_11A) {
- pMgmt->abyCurrSuppRates[1] = WLAN_RATES_MAXLEN_11B;
- pMgmt->abyCurrExtSuppRates[1] = 0;
- for (ii = 0; ii < 4; ii++)
- pMgmt->abyCurrSuppRates[2+ii] = abyRATE[ii];
- } else {
- pMgmt->abyCurrSuppRates[1] = 8;
- pMgmt->abyCurrExtSuppRates[1] = 0;
- for (ii = 0; ii < 8; ii++)
- pMgmt->abyCurrSuppRates[2+ii] = abyRATE[ii];
- }
-
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
- pMgmt->abyCurrSuppRates[1] = 8;
- pMgmt->abyCurrExtSuppRates[1] = 4;
- for (ii = 0; ii < 4; ii++)
- pMgmt->abyCurrSuppRates[2+ii] = abyCCK_RATE[ii];
- for (ii = 4; ii < 8; ii++)
- pMgmt->abyCurrSuppRates[2+ii] = abyOFDM_RATE[ii-4];
- for (ii = 0; ii < 4; ii++)
- pMgmt->abyCurrExtSuppRates[2+ii] = abyOFDM_RATE[ii+4];
- }
-
- // Disable Protect Mode
- pDevice->bProtectMode = false;
- MACvDisableProtectMD(pDevice->PortOffset);
-
- pDevice->bBarkerPreambleMd = false;
- MACvDisableBarkerPreambleMd(pDevice->PortOffset);
-
- // Kyle Test 2003.11.04
-
- // set HW beacon interval
- if (pMgmt->wIBSSBeaconPeriod == 0)
- pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
-
- CARDbGetCurrentTSF(pDevice->PortOffset, &qwCurrTSF);
- // clear TSF counter
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
- // enable TSF counter
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
-
- // set Next TBTT
- CARDvSetFirstNextTBTT(pDevice->PortOffset, pMgmt->wIBSSBeaconPeriod);
-
- pMgmt->uIBSSChannel = pDevice->uChannel;
-
- if (pMgmt->uIBSSChannel == 0)
- pMgmt->uIBSSChannel = DEFAULT_IBSS_CHANNEL;
-
- // set basic rate
-
- RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, true,
- &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
- &byTopCCKBasicRate, &byTopOFDMBasicRate);
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP)
- pMgmt->eCurrMode = WMAC_MODE_ESS_AP;
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
- memcpy(pMgmt->abyIBSSDFSOwner, pDevice->abyCurrentNetAddr, 6);
- pMgmt->byIBSSDFSRecovery = 10;
- pMgmt->eCurrMode = WMAC_MODE_IBSS_STA;
- }
-
- // Adopt pre-configured IBSS vars to current vars
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- pMgmt->wCurrBeaconPeriod = pMgmt->wIBSSBeaconPeriod;
- pMgmt->uCurrChannel = pMgmt->uIBSSChannel;
- pMgmt->wCurrATIMWindow = pMgmt->wIBSSATIMWindow;
- MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
- pDevice->uCurrRSSI = 0;
- pDevice->byCurrSQ = 0;
- memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyCurrSSID,
- pMgmt->abyDesireSSID,
- ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN
-);
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- // AP mode BSSID = MAC addr
- memcpy(pMgmt->abyCurrBSSID, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- pr_info("AP beacon created BSSID:%pM\n",
- pMgmt->abyCurrBSSID);
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // BSSID selected must be randomized as spec 11.1.3
- pMgmt->abyCurrBSSID[5] = (u8) (qwCurrTSF & 0x000000ff);
- pMgmt->abyCurrBSSID[4] = (u8) ((qwCurrTSF & 0x0000ff00) >> 8);
- pMgmt->abyCurrBSSID[3] = (u8) ((qwCurrTSF & 0x00ff0000) >> 16);
- pMgmt->abyCurrBSSID[2] = (u8) ((qwCurrTSF & 0x00000ff0) >> 4);
- pMgmt->abyCurrBSSID[1] = (u8) ((qwCurrTSF & 0x000ff000) >> 12);
- pMgmt->abyCurrBSSID[0] = (u8) ((qwCurrTSF & 0x0ff00000) >> 20);
- pMgmt->abyCurrBSSID[5] ^= pMgmt->abyMACAddr[0];
- pMgmt->abyCurrBSSID[4] ^= pMgmt->abyMACAddr[1];
- pMgmt->abyCurrBSSID[3] ^= pMgmt->abyMACAddr[2];
- pMgmt->abyCurrBSSID[2] ^= pMgmt->abyMACAddr[3];
- pMgmt->abyCurrBSSID[1] ^= pMgmt->abyMACAddr[4];
- pMgmt->abyCurrBSSID[0] ^= pMgmt->abyMACAddr[5];
- pMgmt->abyCurrBSSID[0] &= ~IEEE_ADDR_GROUP;
- pMgmt->abyCurrBSSID[0] |= IEEE_ADDR_UNIVERSAL;
-
- pr_info("Adhoc beacon created bssid:%pM\n",
- pMgmt->abyCurrBSSID);
- }
-
- // Set Capability Info
- pMgmt->wCurrCapInfo = 0;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_ESS(1);
- pMgmt->byDTIMPeriod = DEFAULT_DTIM_PERIOD;
- pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_IBSS(1);
-
- if (pDevice->bEncryptionEnable) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- pMgmt->byCSSPK = KEY_CTL_CCMP;
- pMgmt->byCSSGK = KEY_CTL_CCMP;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- pMgmt->byCSSPK = KEY_CTL_TKIP;
- pMgmt->byCSSGK = KEY_CTL_TKIP;
- } else {
- pMgmt->byCSSPK = KEY_CTL_NONE;
- pMgmt->byCSSGK = KEY_CTL_WEP;
- }
- } else {
- pMgmt->byCSSPK = KEY_CTL_WEP;
- pMgmt->byCSSGK = KEY_CTL_WEP;
- }
- }
-
- pMgmt->byERPContext = 0;
-
- if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_AP);
- } else {
- CARDbSetBSSID(pMgmt->pAdapter, pMgmt->abyCurrBSSID, NL80211_IFTYPE_ADHOC);
- }
-
- CARDbSetPhyParameter(pMgmt->pAdapter,
- pMgmt->eCurrentPHYMode,
- pMgmt->wCurrCapInfo,
- pMgmt->byERPContext,
- pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates
- );
-
- CARDbSetBeaconPeriod(pMgmt->pAdapter, pMgmt->wIBSSBeaconPeriod);
- // set channel and clear NAV
- set_channel(pMgmt->pAdapter, pMgmt->uIBSSChannel);
- pMgmt->uCurrChannel = pMgmt->uIBSSChannel;
-
- if (CARDbIsShortPreamble(pMgmt->pAdapter))
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTPREAMBLE(1);
- else
- pMgmt->wCurrCapInfo &= (~WLAN_SET_CAP_INFO_SHORTPREAMBLE(1));
-
- if (pMgmt->b11hEnable &&
- (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SPECTRUMMNG(1);
- } else {
- pMgmt->wCurrCapInfo &= (~WLAN_SET_CAP_INFO_SPECTRUMMNG(1));
- }
-
- pMgmt->eCurrState = WMAC_STATE_STARTED;
- // Prepare beacon to send
- if (bMgrPrepareBeaconToSend((void *)pDevice, pMgmt))
- *pStatus = CMD_STATUS_SUCCESS;
-}
-
-/*+
- *
- * Routine Description:
- * Instructs wmac to join a bss using the supplied attributes.
- * The arguments may the BSSID or SSID and the rest of the
- * attributes are obtained from the scan result of known bss list.
- *
- *
- * Return Value:
- * None.
- *
- -*/
-
-void
-vMgrJoinBSSBegin(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PKnownBSS pCurr = NULL;
- unsigned int ii, uu;
- PWLAN_IE_SUPP_RATES pItemRates = NULL;
- PWLAN_IE_SUPP_RATES pItemExtRates = NULL;
- PWLAN_IE_SSID pItemSSID;
- unsigned int uRateLen = WLAN_RATES_MAXLEN;
- unsigned short wMaxBasicRate = RATE_1M;
- unsigned short wMaxSuppRate = RATE_1M;
- unsigned short wSuppRate;
- unsigned char byTopCCKBasicRate = RATE_1M;
- unsigned char byTopOFDMBasicRate = RATE_1M;
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- if (pMgmt->sBSSList[ii].bActive)
- break;
- }
-
- if (ii == MAX_BSS_NUM) {
- *pStatus = CMD_STATUS_RESOURCES;
- pr_info("BSS finding:BSS list is empty\n");
- return;
- }
-
- // Search known BSS list for prefer BSSID or SSID
-
- pCurr = BSSpSearchBSSList(pDevice,
- pMgmt->abyDesireBSSID,
- pMgmt->abyDesireSSID,
- pMgmt->eConfigPHYMode
-);
-
- if (pCurr == NULL) {
- *pStatus = CMD_STATUS_RESOURCES;
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pr_info("Scanning [%s] not found, disconnected !\n",
- pItemSSID->abySSID);
- return;
- }
-
- pr_info("AP(BSS) finding:Found a AP(BSS)..\n");
- if (WLAN_GET_CAP_INFO_ESS(cpu_to_le16(pCurr->wCapInfo))) {
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) || (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
- // patch for CISCO migration mode
- }
-
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- Encyption_Rebuild(pDevice, pCurr);
-#endif
- // Infrastructure BSS
- s_vMgrSynchBSS(pDevice,
- WMAC_MODE_ESS_STA,
- pCurr,
- pStatus
-);
-
- if (*pStatus == CMD_STATUS_SUCCESS) {
- // Adopt this BSS state vars in Mgmt Object
- pMgmt->uCurrChannel = pCurr->uChannel;
-
- memset(pMgmt->abyCurrSuppRates, 0 , WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
- memset(pMgmt->abyCurrExtSuppRates, 0 , WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1);
-
- if (pCurr->eNetworkTypeInUse == PHY_TYPE_11B)
- uRateLen = WLAN_RATES_MAXLEN_11B;
-
- pItemRates = (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates;
- pItemExtRates = (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates;
-
- // Parse Support Rate IE
- pItemRates->byElementID = WLAN_EID_SUPP_RATES;
- pItemRates->len = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abySuppRates,
- pItemRates,
- uRateLen);
-
- // Parse Extension Support Rate IE
- pItemExtRates->byElementID = WLAN_EID_EXTSUPP_RATES;
- pItemExtRates->len = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abyExtSuppRates,
- pItemExtRates,
- uRateLen);
- // Stuffing Rate IE
- if ((pItemExtRates->len > 0) && (pItemRates->len < 8)) {
- for (ii = 0; ii < (unsigned int)(8 - pItemRates->len);) {
- pItemRates->abyRates[pItemRates->len + ii] = pItemExtRates->abyRates[ii];
- ii++;
- if (pItemExtRates->len <= ii)
- break;
- }
- pItemRates->len += (unsigned char)ii;
- if (pItemExtRates->len - ii > 0) {
- pItemExtRates->len -= (unsigned char)ii;
- for (uu = 0; uu < pItemExtRates->len; uu++)
- pItemExtRates->abyRates[uu] = pItemExtRates->abyRates[uu + ii];
- } else {
- pItemExtRates->len = 0;
- }
- }
-
- RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, true,
- &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
- &byTopCCKBasicRate, &byTopOFDMBasicRate);
-
- // TODO: deal with if wCapInfo the privacy is on, but station WEP is off
- // TODO: deal with if wCapInfo the PS-Pollable is on.
- pMgmt->wCurrBeaconPeriod = pCurr->wBeaconInterval;
- memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- memcpy(pMgmt->abyCurrBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
- memcpy(pMgmt->abyCurrSSID, pCurr->abySSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
-
- pMgmt->eCurrMode = WMAC_MODE_ESS_STA;
-
- pMgmt->eCurrState = WMAC_STATE_JOINTED;
-
- // Add current BSS to Candidate list
- // This should only works for WPA2 BSS, and WPA2 BSS check must be done before.
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
- bool bResult = bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
-
- pr_debug("bAdd_PMKID_Candidate: 1(%d)\n",
- bResult);
- if (!bResult) {
- vFlush_PMKID_Candidate((void *)pDevice);
- pr_debug("vFlush_PMKID_Candidate: 4\n");
- bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
- }
- }
-
- // Preamble type auto-switch: if AP can receive short-preamble cap,
- // we can turn on too.
-
- pr_debug("Join ESS\n");
-
- pr_debug("End of Join AP -- A/B/G Action\n");
- } else {
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
-
- } else {
- // ad-hoc mode BSS
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (!WPA_SearchRSN(0, WPA_TKIP, pCurr)) {
- // encryption mode error
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- return;
- }
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (!WPA_SearchRSN(0, WPA_AESCCMP, pCurr)) {
- // encryption mode error
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- return;
- }
- } else {
- // encryption mode error
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- return;
- }
- }
-
- s_vMgrSynchBSS(pDevice,
- WMAC_MODE_IBSS_STA,
- pCurr,
- pStatus
-);
-
- if (*pStatus == CMD_STATUS_SUCCESS) {
- // Adopt this BSS state vars in Mgmt Object
- // TODO: check if CapInfo privacy on, but we don't..
- pMgmt->uCurrChannel = pCurr->uChannel;
-
- // Parse Support Rate IE
- pMgmt->abyCurrSuppRates[0] = WLAN_EID_SUPP_RATES;
- pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pCurr->abySuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- WLAN_RATES_MAXLEN_11B);
- // set basic rate
- RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- NULL, true, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
- &byTopCCKBasicRate, &byTopOFDMBasicRate);
-
- pMgmt->wCurrCapInfo = pCurr->wCapInfo;
- pMgmt->wCurrBeaconPeriod = pCurr->wBeaconInterval;
- memset(pMgmt->abyCurrSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN);
- memcpy(pMgmt->abyCurrBSSID, pCurr->abyBSSID, WLAN_BSSID_LEN);
- memcpy(pMgmt->abyCurrSSID, pCurr->abySSID, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN);
- MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
- pMgmt->eCurrMode = WMAC_MODE_IBSS_STA;
-
- pMgmt->eCurrState = WMAC_STATE_STARTED;
-
- pr_debug("Join IBSS ok:%pM\n",
- pMgmt->abyCurrBSSID);
- // Preamble type auto-switch: if AP can receive short-preamble cap,
- // and if registry setting is short preamble we can turn on too.
-
- // Prepare beacon
- bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
- } else {
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- * Set HW to synchronize a specific BSS from known BSS list.
- *
- *
- * Return Value:
- * PCM_STATUS
- *
- -*/
-static
-void
-s_vMgrSynchBSS(
- struct vnt_private *pDevice,
- unsigned int uBSSMode,
- PKnownBSS pCurr,
- PCMD_STATUS pStatus
-)
-{
- CARD_PHY_TYPE ePhyType = PHY_TYPE_11B;
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- //1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
- unsigned char abyCurrSuppRatesG[] = {WLAN_EID_SUPP_RATES, 8, 0x02, 0x04, 0x0B, 0x16, 0x24, 0x30, 0x48, 0x6C};
- unsigned char abyCurrExtSuppRatesG[] = {WLAN_EID_EXTSUPP_RATES, 4, 0x0C, 0x12, 0x18, 0x60};
- //6M, 9M, 12M, 48M
- unsigned char abyCurrSuppRatesA[] = {WLAN_EID_SUPP_RATES, 8, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
- unsigned char abyCurrSuppRatesB[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
-
- *pStatus = CMD_STATUS_FAILURE;
-
- if (!s_bCipherMatch(pCurr,
- pDevice->eEncryptionStatus,
- &(pMgmt->byCSSPK),
- &(pMgmt->byCSSGK))) {
- pr_debug("s_bCipherMatch Fail .......\n");
- return;
- }
-
- pMgmt->pCurrBSS = pCurr;
-
- // if previous mode is IBSS.
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_BCNDMACTL, BEACON_READY);
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- }
-
- // Init the BSS informations
- pDevice->bProtectMode = false;
- MACvDisableProtectMD(pDevice->PortOffset);
- pDevice->bBarkerPreambleMd = false;
- MACvDisableBarkerPreambleMd(pDevice->PortOffset);
- pDevice->bNonERPPresent = false;
- pDevice->byPreambleType = 0;
- pDevice->wBasicRate = 0;
- // Set Basic Rate
- CARDbAddBasicRate((void *)pDevice, RATE_1M);
- // calculate TSF offset
- // TSF Offset = Received Timestamp TSF - Marked Local's TSF
- CARDbUpdateTSF(pDevice, pCurr->byRxRate, pCurr->qwBSSTimestamp, pCurr->qwLocalTSF);
-
- CARDbSetBeaconPeriod(pDevice, pCurr->wBeaconInterval);
-
- // set Next TBTT
- // Next TBTT = ((local_current_TSF / beacon_interval) + 1) * beacon_interval
- CARDvSetFirstNextTBTT(pDevice->PortOffset, pCurr->wBeaconInterval);
-
- // set BSSID
- MACvWriteBSSIDAddress(pDevice->PortOffset, pCurr->abyBSSID);
-
- MACvReadBSSIDAddress(pDevice->PortOffset, pMgmt->abyCurrBSSID);
-
- pr_debug("Sync:set CurrBSSID address = %pM\n", pMgmt->abyCurrBSSID);
-
- if (pCurr->eNetworkTypeInUse == PHY_TYPE_11A) {
- if ((pMgmt->eConfigPHYMode == PHY_TYPE_11A) ||
- (pMgmt->eConfigPHYMode == PHY_TYPE_AUTO)) {
- ePhyType = PHY_TYPE_11A;
- } else {
- return;
- }
- } else if (pCurr->eNetworkTypeInUse == PHY_TYPE_11B) {
- if ((pMgmt->eConfigPHYMode == PHY_TYPE_11B) ||
- (pMgmt->eConfigPHYMode == PHY_TYPE_11G) ||
- (pMgmt->eConfigPHYMode == PHY_TYPE_AUTO)) {
- ePhyType = PHY_TYPE_11B;
- } else {
- return;
- }
- } else {
- if ((pMgmt->eConfigPHYMode == PHY_TYPE_11G) ||
- (pMgmt->eConfigPHYMode == PHY_TYPE_AUTO)) {
- ePhyType = PHY_TYPE_11G;
- } else if (pMgmt->eConfigPHYMode == PHY_TYPE_11B) {
- ePhyType = PHY_TYPE_11B;
- } else {
- return;
- }
- }
-
- if (ePhyType == PHY_TYPE_11A) {
- memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesA[0], sizeof(abyCurrSuppRatesA));
- pMgmt->abyCurrExtSuppRates[1] = 0;
- } else if (ePhyType == PHY_TYPE_11B) {
- memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesB[0], sizeof(abyCurrSuppRatesB));
- pMgmt->abyCurrExtSuppRates[1] = 0;
- } else {
- memcpy(pMgmt->abyCurrSuppRates, &abyCurrSuppRatesG[0], sizeof(abyCurrSuppRatesG));
- memcpy(pMgmt->abyCurrExtSuppRates, &abyCurrExtSuppRatesG[0], sizeof(abyCurrExtSuppRatesG));
- }
-
- if (WLAN_GET_CAP_INFO_ESS(pCurr->wCapInfo)) {
- CARDbSetBSSID(pMgmt->pAdapter, pCurr->abyBSSID, NL80211_IFTYPE_STATION);
- // Add current BSS to Candidate list
- // This should only works for WPA2 BSS, and WPA2 BSS check must be done before.
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)
- CARDbAdd_PMKID_Candidate(pMgmt->pAdapter, pMgmt->abyCurrBSSID, pCurr->sRSNCapObj.bRSNCapExist, pCurr->sRSNCapObj.wRSNCap);
- } else {
- CARDbSetBSSID(pMgmt->pAdapter, pCurr->abyBSSID, NL80211_IFTYPE_ADHOC);
- }
-
- if (!CARDbSetPhyParameter(pMgmt->pAdapter,
- ePhyType,
- pCurr->wCapInfo,
- pCurr->sERP.byERP,
- pMgmt->abyCurrSuppRates,
- pMgmt->abyCurrExtSuppRates)) {
- pr_debug("<----s_bSynchBSS Set Phy Mode Fail [%d]\n", ePhyType);
- return;
- }
- // set channel and clear NAV
- if (!set_channel(pMgmt->pAdapter, pCurr->uChannel)) {
- pr_debug("<----s_bSynchBSS Set Channel [%d]\n",
- pCurr->uChannel);
- return;
- }
-
- pMgmt->uCurrChannel = pCurr->uChannel;
- pMgmt->eCurrentPHYMode = ePhyType;
- pMgmt->byERPContext = pCurr->sERP.byERP;
- pr_debug("Sync:Set to channel = [%d]\n", (int)pCurr->uChannel);
-
- *pStatus = CMD_STATUS_SUCCESS;
-
- return;
-};
-
-//mike add: fix NetworkManager 0.7.0 hidden ssid mode in WPA encryption
-// ,need reset eAuthenMode and eEncryptionStatus
-static void Encyption_Rebuild(
- struct vnt_private *pDevice,
- PKnownBSS pCurr
-)
-{
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
-
- if ((pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) || //networkmanager 0.7.0 does not give the pairwise-key selection,
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) { // so we need re-select it according to real pairwise-key info.
- if (pCurr->bWPAValid) { //WPA-PSK
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
- if (pCurr->abyPKType[0] == WPA_TKIP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
- PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-TKIP]\n");
- } else if (pCurr->abyPKType[0] == WPA_AESCCMP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; //AES
- PRINT_K("Encyption_Rebuild--->ssid reset config to [WPAPSK-AES]\n");
- }
- } else if (pCurr->bWPA2Valid) { //WPA2-PSK
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
- if (pCurr->abyCSSPK[0] == WLAN_11i_CSS_TKIP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; //TKIP
- PRINT_K("Encyption_Rebuild--->ssid reset config to [WPA2PSK-TKIP]\n");
- } else if (pCurr->abyCSSPK[0] == WLAN_11i_CSS_CCMP) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; //AES
- PRINT_K("Encyption_Rebuild--->ssid reset config to [WPA2PSK-AES]\n");
- }
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- * Format TIM field
- *
- *
- * Return Value:
- * void
- *
- -*/
-
-static
-void
-s_vMgrFormatTIM(
- PSMgmtObject pMgmt,
- PWLAN_IE_TIM pTIM
-)
-{
- unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
- unsigned char byMap;
- unsigned int ii, jj;
- bool bStartFound = false;
- bool bMulticast = false;
- unsigned short wStartIndex = 0;
- unsigned short wEndIndex = 0;
-
- // Find size of partial virtual bitmap
- for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
- byMap = pMgmt->abyPSTxMap[ii];
- if (!ii) {
- // Mask out the broadcast bit which is indicated separately.
- bMulticast = (byMap & byMask[0]) != 0;
- if (bMulticast)
- pMgmt->sNodeDBTable[0].bRxPSPoll = true;
-
- byMap = 0;
- }
- if (byMap) {
- if (!bStartFound) {
- bStartFound = true;
- wStartIndex = ii;
- }
- wEndIndex = ii;
- }
- }
-
- // Round start index down to nearest even number
- wStartIndex &= ~BIT0;
-
- // Round end index up to nearest even number
- wEndIndex = ((wEndIndex + 1) & ~BIT0);
-
- // Size of element payload
-
- pTIM->len = 3 + (wEndIndex - wStartIndex) + 1;
-
- // Fill in the Fixed parts of the TIM
- pTIM->byDTIMCount = pMgmt->byDTIMCount;
- pTIM->byDTIMPeriod = pMgmt->byDTIMPeriod;
- pTIM->byBitMapCtl = (bMulticast ? TIM_MULTICAST_MASK : 0) |
- (((wStartIndex >> 1) << 1) & TIM_BITMAPOFFSET_MASK);
-
- // Append variable part of TIM
-
- for (ii = wStartIndex, jj = 0; ii <= wEndIndex; ii++, jj++)
- pTIM->byVirtBitMap[jj] = pMgmt->abyPSTxMap[ii];
-
- // Aid = 0 don't used.
- pTIM->byVirtBitMap[0] &= ~BIT0;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an Beacon frame(Ad-hoc mode)
- *
- *
- * Return Value:
- * PTR to frame; or NULL on allocation failure
- *
- -*/
-
-static
-PSTxMgmtPacket
-s_MgrMakeBeacon(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- unsigned short wCurrATIMWinodw,
- PWLAN_IE_SSID pCurrSSID,
- unsigned char *pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_BEACON sFrame;
- unsigned char abyBroadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- unsigned char *pbyBuffer;
- unsigned int uLength = 0;
- PWLAN_IE_IBSS_DFS pIBSSDFS = NULL;
- unsigned int ii;
-
- // prepare beacon frame
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_BEACON_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure.
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_BEACON_FR_MAXLEN;
- vMgrEncodeBeacon(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_BEACON)
-));
-
- if (pDevice->bEnablePSMode)
- sFrame.pHdr->sA3.wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_PWRMGT(1));
-
- memcpy(sFrame.pHdr->sA3.abyAddr1, abyBroadcastAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pCurrBSSID, WLAN_BSSID_LEN);
- *sFrame.pwBeaconInterval = cpu_to_le16(wCurrBeaconPeriod);
- *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
- // Copy SSID
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID,
- pCurrSSID,
- ((PWLAN_IE_SSID)pCurrSSID)->len + WLAN_IEHDR_LEN
-);
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates,
- pCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
- // DS parameter
- if (pDevice->eCurrentPHYType != PHY_TYPE_11A) {
- sFrame.pDSParms = (PWLAN_IE_DS_PARMS)(sFrame.pBuf + sFrame.len);
- sFrame.len += (1) + WLAN_IEHDR_LEN;
- sFrame.pDSParms->byElementID = WLAN_EID_DS_PARMS;
- sFrame.pDSParms->len = 1;
- sFrame.pDSParms->byCurrChannel = (unsigned char)uCurrChannel;
- }
- // TIM field
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- sFrame.pTIM = (PWLAN_IE_TIM)(sFrame.pBuf + sFrame.len);
- sFrame.pTIM->byElementID = WLAN_EID_TIM;
- s_vMgrFormatTIM(pMgmt, sFrame.pTIM);
- sFrame.len += (WLAN_IEHDR_LEN + sFrame.pTIM->len);
- }
-
- if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
- // IBSS parameter
- sFrame.pIBSSParms = (PWLAN_IE_IBSS_PARMS)(sFrame.pBuf + sFrame.len);
- sFrame.len += (2) + WLAN_IEHDR_LEN;
- sFrame.pIBSSParms->byElementID = WLAN_EID_IBSS_PARMS;
- sFrame.pIBSSParms->len = 2;
- sFrame.pIBSSParms->wATIMWindow = wCurrATIMWinodw;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- /* RSN parameter */
- sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len);
- sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA;
- sFrame.pRSNWPA->len = 12;
- sFrame.pRSNWPA->abyOUI[0] = 0x00;
- sFrame.pRSNWPA->abyOUI[1] = 0x50;
- sFrame.pRSNWPA->abyOUI[2] = 0xf2;
- sFrame.pRSNWPA->abyOUI[3] = 0x01;
- sFrame.pRSNWPA->wVersion = 1;
- sFrame.pRSNWPA->abyMulticast[0] = 0x00;
- sFrame.pRSNWPA->abyMulticast[1] = 0x50;
- sFrame.pRSNWPA->abyMulticast[2] = 0xf2;
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
- sFrame.pRSNWPA->abyMulticast[3] = 0x04;//AES
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
- sFrame.pRSNWPA->abyMulticast[3] = 0x02;//TKIP
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled)
- sFrame.pRSNWPA->abyMulticast[3] = 0x01;//WEP40
- else
- sFrame.pRSNWPA->abyMulticast[3] = 0x00;//NONE
-
- // Pairwise Key Cipher Suite
- sFrame.pRSNWPA->wPKCount = 0;
- // Auth Key Management Suite
- *((unsigned short *)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len)) = 0;
- sFrame.pRSNWPA->len += 2;
-
- // RSN Capabilities
- *((unsigned short *)(sFrame.pBuf + sFrame.len + sFrame.pRSNWPA->len)) = 0;
- sFrame.pRSNWPA->len += 2;
- sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- }
- }
-
- if (pMgmt->b11hEnable && (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
- // Country IE
- pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
- set_country_IE(pMgmt->pAdapter, pbyBuffer);
- set_country_info(pMgmt->pAdapter, PHY_TYPE_11A, pbyBuffer);
- uLength += ((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN;
- pbyBuffer += (((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN);
- // Power Constrain IE
- ((PWLAN_IE_PW_CONST) pbyBuffer)->byElementID = WLAN_EID_PWR_CONSTRAINT;
- ((PWLAN_IE_PW_CONST) pbyBuffer)->len = 1;
- ((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
- pbyBuffer += (1) + WLAN_IEHDR_LEN;
- uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel) {
- // Channel Switch IE
- ((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
- ((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byMode = 1;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byChannel = get_channel_number(pMgmt->pAdapter, pMgmt->byNewChannel);
- ((PWLAN_IE_CH_SW) pbyBuffer)->byCount = 0;
- pbyBuffer += (3) + WLAN_IEHDR_LEN;
- uLength += (3) + WLAN_IEHDR_LEN;
- }
- // TPC report
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byElementID = WLAN_EID_TPC_REP;
- ((PWLAN_IE_TPC_REP) pbyBuffer)->len = 2;
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byTxPower = CARDbyGetTransmitPower(pMgmt->pAdapter);
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byLinkMargin = 0;
- pbyBuffer += (2) + WLAN_IEHDR_LEN;
- uLength += (2) + WLAN_IEHDR_LEN;
- // IBSS DFS
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) {
- pIBSSDFS = (PWLAN_IE_IBSS_DFS) pbyBuffer;
- pIBSSDFS->byElementID = WLAN_EID_IBSS_DFS;
- pIBSSDFS->len = 7;
- memcpy(pIBSSDFS->abyDFSOwner,
- pMgmt->abyIBSSDFSOwner,
- 6);
- pIBSSDFS->byDFSRecovery = pMgmt->byIBSSDFSRecovery;
- pbyBuffer += (7) + WLAN_IEHDR_LEN;
- uLength += (7) + WLAN_IEHDR_LEN;
- for (ii = CB_MAX_CHANNEL_24G+1; ii <= CB_MAX_CHANNEL; ii++) {
- if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1)) {
- pbyBuffer += 2;
- uLength += 2;
- pIBSSDFS->len += 2;
- }
- }
- }
- sFrame.len += uLength;
- }
-
- if (pMgmt->eCurrentPHYMode == PHY_TYPE_11G) {
- sFrame.pERP = (PWLAN_IE_ERP)(sFrame.pBuf + sFrame.len);
- sFrame.len += 1 + WLAN_IEHDR_LEN;
- sFrame.pERP->byElementID = WLAN_EID_ERP;
- sFrame.pERP->len = 1;
- sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode)
- sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent)
- sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd)
- sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
- }
- if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates,
- pCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
- // hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnableHostapd) {
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- if (pMgmt->wWPAIELen != 0) {
- sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
- memcpy(sFrame.pRSN, pMgmt->abyWPAIE, pMgmt->wWPAIELen);
- sFrame.len += pMgmt->wWPAIELen;
- }
- }
- }
-
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an Prob-response frame
- *
- *
- * Return Value:
- * PTR to frame; or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeProbeResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wCurrBeaconPeriod,
- unsigned int uCurrChannel,
- unsigned short wCurrATIMWinodw,
- unsigned char *pDstAddr,
- PWLAN_IE_SSID pCurrSSID,
- unsigned char *pCurrBSSID,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- unsigned char byPHYType
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_PROBERESP sFrame;
- unsigned char *pbyBuffer;
- unsigned int uLength = 0;
- PWLAN_IE_IBSS_DFS pIBSSDFS = NULL;
- unsigned int ii;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_PROBERESP_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure.
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_PROBERESP_FR_MAXLEN;
- vMgrEncodeProbeResponse(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_PROBERESP)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pCurrBSSID, WLAN_BSSID_LEN);
- *sFrame.pwBeaconInterval = cpu_to_le16(wCurrBeaconPeriod);
- *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
-
- if (byPHYType == BB_TYPE_11B)
- *sFrame.pwCapInfo &= cpu_to_le16((unsigned short)~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1)));
-
- // Copy SSID
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID,
- pCurrSSID,
- ((PWLAN_IE_SSID)pCurrSSID)->len + WLAN_IEHDR_LEN
-);
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
-
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates,
- pCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
-
- // DS parameter
- if (pDevice->eCurrentPHYType != PHY_TYPE_11A) {
- sFrame.pDSParms = (PWLAN_IE_DS_PARMS)(sFrame.pBuf + sFrame.len);
- sFrame.len += (1) + WLAN_IEHDR_LEN;
- sFrame.pDSParms->byElementID = WLAN_EID_DS_PARMS;
- sFrame.pDSParms->len = 1;
- sFrame.pDSParms->byCurrChannel = (unsigned char)uCurrChannel;
- }
-
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) {
- // IBSS parameter
- sFrame.pIBSSParms = (PWLAN_IE_IBSS_PARMS)(sFrame.pBuf + sFrame.len);
- sFrame.len += (2) + WLAN_IEHDR_LEN;
- sFrame.pIBSSParms->byElementID = WLAN_EID_IBSS_PARMS;
- sFrame.pIBSSParms->len = 2;
- sFrame.pIBSSParms->wATIMWindow = 0;
- }
- if (pDevice->eCurrentPHYType == PHY_TYPE_11G) {
- sFrame.pERP = (PWLAN_IE_ERP)(sFrame.pBuf + sFrame.len);
- sFrame.len += 1 + WLAN_IEHDR_LEN;
- sFrame.pERP->byElementID = WLAN_EID_ERP;
- sFrame.pERP->len = 1;
- sFrame.pERP->byContext = 0;
- if (pDevice->bProtectMode)
- sFrame.pERP->byContext |= WLAN_EID_ERP_USE_PROTECTION;
- if (pDevice->bNonERPPresent)
- sFrame.pERP->byContext |= WLAN_EID_ERP_NONERP_PRESENT;
- if (pDevice->bBarkerPreambleMd)
- sFrame.pERP->byContext |= WLAN_EID_ERP_BARKER_MODE;
- }
-
- if (pMgmt->b11hEnable && (pMgmt->eCurrentPHYMode == PHY_TYPE_11A)) {
- // Country IE
- pbyBuffer = (unsigned char *)(sFrame.pBuf + sFrame.len);
- set_country_IE(pMgmt->pAdapter, pbyBuffer);
- set_country_info(pMgmt->pAdapter, PHY_TYPE_11A, pbyBuffer);
- uLength += ((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN;
- pbyBuffer += (((PWLAN_IE_COUNTRY) pbyBuffer)->len + WLAN_IEHDR_LEN);
- // Power Constrain IE
- ((PWLAN_IE_PW_CONST) pbyBuffer)->byElementID = WLAN_EID_PWR_CONSTRAINT;
- ((PWLAN_IE_PW_CONST) pbyBuffer)->len = 1;
- ((PWLAN_IE_PW_CONST) pbyBuffer)->byPower = 0;
- pbyBuffer += (1) + WLAN_IEHDR_LEN;
- uLength += (1) + WLAN_IEHDR_LEN;
- if (pMgmt->bSwitchChannel) {
- // Channel Switch IE
- ((PWLAN_IE_CH_SW) pbyBuffer)->byElementID = WLAN_EID_CH_SWITCH;
- ((PWLAN_IE_CH_SW) pbyBuffer)->len = 3;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byMode = 1;
- ((PWLAN_IE_CH_SW) pbyBuffer)->byChannel = get_channel_number(pMgmt->pAdapter, pMgmt->byNewChannel);
- ((PWLAN_IE_CH_SW) pbyBuffer)->byCount = 0;
- pbyBuffer += (3) + WLAN_IEHDR_LEN;
- uLength += (3) + WLAN_IEHDR_LEN;
- }
- // TPC report
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byElementID = WLAN_EID_TPC_REP;
- ((PWLAN_IE_TPC_REP) pbyBuffer)->len = 2;
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byTxPower = CARDbyGetTransmitPower(pMgmt->pAdapter);
- ((PWLAN_IE_TPC_REP) pbyBuffer)->byLinkMargin = 0;
- pbyBuffer += (2) + WLAN_IEHDR_LEN;
- uLength += (2) + WLAN_IEHDR_LEN;
- // IBSS DFS
- if (pMgmt->eCurrMode != WMAC_MODE_ESS_AP) {
- pIBSSDFS = (PWLAN_IE_IBSS_DFS) pbyBuffer;
- pIBSSDFS->byElementID = WLAN_EID_IBSS_DFS;
- pIBSSDFS->len = 7;
- memcpy(pIBSSDFS->abyDFSOwner,
- pMgmt->abyIBSSDFSOwner,
- 6);
- pIBSSDFS->byDFSRecovery = pMgmt->byIBSSDFSRecovery;
- pbyBuffer += (7) + WLAN_IEHDR_LEN;
- uLength += (7) + WLAN_IEHDR_LEN;
- for (ii = CB_MAX_CHANNEL_24G + 1; ii <= CB_MAX_CHANNEL; ii++) {
- if (get_channel_map_info(pMgmt->pAdapter, ii, pbyBuffer, pbyBuffer+1)) {
- pbyBuffer += 2;
- uLength += 2;
- pIBSSDFS->len += 2;
- }
- }
- }
- sFrame.len += uLength;
- }
-
- if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates,
- pCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
-
- // hostapd wpa/wpa2 IE
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) && pDevice->bEnableHostapd) {
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
- if (pMgmt->wWPAIELen != 0) {
- sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
- memcpy(sFrame.pRSN, pMgmt->abyWPAIE, pMgmt->wWPAIELen);
- sFrame.len += pMgmt->wWPAIELen;
- }
- }
- }
-
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an association request frame
- *
- *
- * Return Value:
- * A ptr to frame or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pDAddr,
- unsigned short wCurrCapInfo,
- unsigned short wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_ASSOCREQ sFrame;
- unsigned char *pbyIEs;
- unsigned char *pbyRSN;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure.
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_ASSOCREQ_FR_MAXLEN;
- // format fixed field frame structure
- vMgrEncodeAssocRequest(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ASSOCREQ)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- // Set the capability and listen interval
- *(sFrame.pwCapInfo) = cpu_to_le16(wCurrCapInfo);
- *(sFrame.pwListenInterval) = cpu_to_le16(wListenInterval);
-
- // sFrame.len point to end of fixed field
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrSSID->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN);
-
- pMgmt->sAssocInfo.AssocInfo.RequestIELength = pCurrSSID->len + WLAN_IEHDR_LEN;
- pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION);
- pbyIEs = pMgmt->sAssocInfo.abyIEs;
- memcpy(pbyIEs, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN);
- pbyIEs += pCurrSSID->len + WLAN_IEHDR_LEN;
-
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- if ((pDevice->eCurrentPHYType == PHY_TYPE_11B) && (pCurrRates->len > 4))
- sFrame.len += 4 + WLAN_IEHDR_LEN;
- else
- sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
-
- // Copy the extension rate set
- if ((pDevice->eCurrentPHYType == PHY_TYPE_11G) && (pCurrExtSuppRates->len > 0)) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
- }
-
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
- pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN;
-
- // for 802.11h
- if (pMgmt->b11hEnable) {
- if (sFrame.pCurrPowerCap == NULL) {
- sFrame.pCurrPowerCap = (PWLAN_IE_PW_CAP)(sFrame.pBuf + sFrame.len);
- sFrame.len += (2 + WLAN_IEHDR_LEN);
- sFrame.pCurrPowerCap->byElementID = WLAN_EID_PWR_CAPABILITY;
- sFrame.pCurrPowerCap->len = 2;
- CARDvGetPowerCapability(pMgmt->pAdapter,
- &(sFrame.pCurrPowerCap->byMinPower),
- &(sFrame.pCurrPowerCap->byMaxPower)
-);
- }
- if (sFrame.pCurrSuppCh == NULL) {
- sFrame.pCurrSuppCh = (PWLAN_IE_SUPP_CH)(sFrame.pBuf + sFrame.len);
- sFrame.len += set_support_channels(pMgmt->pAdapter, (unsigned char *)sFrame.pCurrSuppCh);
- }
- }
-
- if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE)) &&
- (pMgmt->pCurrBSS != NULL)) {
- /* WPA IE */
- sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len);
- sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA;
- sFrame.pRSNWPA->len = 16;
- sFrame.pRSNWPA->abyOUI[0] = 0x00;
- sFrame.pRSNWPA->abyOUI[1] = 0x50;
- sFrame.pRSNWPA->abyOUI[2] = 0xf2;
- sFrame.pRSNWPA->abyOUI[3] = 0x01;
- sFrame.pRSNWPA->wVersion = 1;
- //Group Key Cipher Suite
- sFrame.pRSNWPA->abyMulticast[0] = 0x00;
- sFrame.pRSNWPA->abyMulticast[1] = 0x50;
- sFrame.pRSNWPA->abyMulticast[2] = 0xf2;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- sFrame.pRSNWPA->abyMulticast[3] = pMgmt->pCurrBSS->byGKType;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- sFrame.pRSNWPA->abyMulticast[3] = WPA_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- sFrame.pRSNWPA->abyMulticast[3] = WPA_AESCCMP;
- else
- sFrame.pRSNWPA->abyMulticast[3] = WPA_NONE;
-
- // Pairwise Key Cipher Suite
- sFrame.pRSNWPA->wPKCount = 1;
- sFrame.pRSNWPA->PKSList[0].abyOUI[0] = 0x00;
- sFrame.pRSNWPA->PKSList[0].abyOUI[1] = 0x50;
- sFrame.pRSNWPA->PKSList[0].abyOUI[2] = 0xf2;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_AESCCMP;
- else
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_NONE;
-
- // Auth Key Management Suite
- pbyRSN = (unsigned char *)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len);
- *pbyRSN++ = 0x01;
- *pbyRSN++ = 0x00;
- *pbyRSN++ = 0x00;
-
- *pbyRSN++ = 0x50;
- *pbyRSN++ = 0xf2;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)
- *pbyRSN++ = WPA_AUTH_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA)
- *pbyRSN++ = WPA_AUTH_IEEE802_1X;
- else
- *pbyRSN++ = WPA_NONE;
-
- sFrame.pRSNWPA->len += 6;
-
- // RSN Capabilities
-
- *pbyRSN++ = 0x00;
- *pbyRSN++ = 0x00;
- sFrame.pRSNWPA->len += 2;
-
- sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, sFrame.pRSNWPA, sFrame.pRSNWPA->len + WLAN_IEHDR_LEN);
- pbyIEs += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
-
- } else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
- (pMgmt->pCurrBSS != NULL)) {
- unsigned int ii;
- unsigned short *pwPMKID;
-
- // WPA IE
- sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
- sFrame.pRSN->byElementID = WLAN_EID_RSN;
- sFrame.pRSN->len = 6; //Version(2)+GK(4)
- sFrame.pRSN->wVersion = 1;
- //Group Key Cipher Suite
- sFrame.pRSN->abyRSN[0] = 0x00;
- sFrame.pRSN->abyRSN[1] = 0x0F;
- sFrame.pRSN->abyRSN[2] = 0xAC;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- sFrame.pRSN->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_CCMP;
- else
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_UNKNOWN;
-
- // Pairwise Key Cipher Suite
- sFrame.pRSN->abyRSN[4] = 1;
- sFrame.pRSN->abyRSN[5] = 0;
- sFrame.pRSN->abyRSN[6] = 0x00;
- sFrame.pRSN->abyRSN[7] = 0x0F;
- sFrame.pRSN->abyRSN[8] = 0xAC;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_CCMP;
- else if (pMgmt->byCSSPK == KEY_CTL_NONE)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_USE_GROUP;
- else
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_UNKNOWN;
-
- sFrame.pRSN->len += 6;
-
- // Auth Key Management Suite
- sFrame.pRSN->abyRSN[10] = 1;
- sFrame.pRSN->abyRSN[11] = 0;
- sFrame.pRSN->abyRSN[12] = 0x00;
- sFrame.pRSN->abyRSN[13] = 0x0F;
- sFrame.pRSN->abyRSN[14] = 0xAC;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_802_1X;
- else
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN;
-
- sFrame.pRSN->len += 6;
-
- // RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist) {
- memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
- } else {
- sFrame.pRSN->abyRSN[16] = 0;
- sFrame.pRSN->abyRSN[17] = 0;
- }
- sFrame.pRSN->len += 2;
-
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && pDevice->bRoaming && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
- // RSN PMKID
- pbyRSN = &sFrame.pRSN->abyRSN[18];
- pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
- *pwPMKID = 0; // Initialize PMKID count
- pbyRSN += 2; // Point to PMKID list
- for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
- if (!memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
- (*pwPMKID)++;
- memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16);
- pbyRSN += 16;
- }
- }
- if (*pwPMKID != 0)
- sFrame.pRSN->len += (2 + (*pwPMKID)*16);
- }
-
- sFrame.len += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, sFrame.pRSN, sFrame.pRSN->len + WLAN_IEHDR_LEN);
- pbyIEs += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- }
-
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an re-association request frame
- *
- *
- * Return Value:
- * A ptr to frame or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeReAssocRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned char *pDAddr,
- unsigned short wCurrCapInfo,
- unsigned short wListenInterval,
- PWLAN_IE_SSID pCurrSSID,
- PWLAN_IE_SUPP_RATES pCurrRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_REASSOCREQ sFrame;
- unsigned char *pbyIEs;
- unsigned char *pbyRSN;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_REASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- /* Setup the sFrame structure. */
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_REASSOCREQ_FR_MAXLEN;
-
- // format fixed field frame structure
- vMgrEncodeReassocRequest(&sFrame);
-
- /* Setup the header */
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_REASSOCREQ)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- /* Set the capability and listen interval */
- *(sFrame.pwCapInfo) = cpu_to_le16(wCurrCapInfo);
- *(sFrame.pwListenInterval) = cpu_to_le16(wListenInterval);
-
- memcpy(sFrame.pAddrCurrAP, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
- /* Copy the SSID */
- /* sFrame.len point to end of fixed field */
- sFrame.pSSID = (PWLAN_IE_SSID)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrSSID->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSSID, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN);
-
- pMgmt->sAssocInfo.AssocInfo.RequestIELength = pCurrSSID->len + WLAN_IEHDR_LEN;
- pMgmt->sAssocInfo.AssocInfo.OffsetRequestIEs = sizeof(NDIS_802_11_ASSOCIATION_INFORMATION);
- pbyIEs = pMgmt->sAssocInfo.abyIEs;
- memcpy(pbyIEs, pCurrSSID, pCurrSSID->len + WLAN_IEHDR_LEN);
- pbyIEs += pCurrSSID->len + WLAN_IEHDR_LEN;
-
- /* Copy the rate set */
- /* sFrame.len point to end of SSID */
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
-
- // Copy the extension rate set
- if ((pMgmt->eCurrentPHYMode == PHY_TYPE_11G) && (pCurrExtSuppRates->len > 0)) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += pCurrExtSuppRates->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates, pCurrExtSuppRates, pCurrExtSuppRates->len + WLAN_IEHDR_LEN);
- }
-
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += pCurrRates->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, pCurrRates, pCurrRates->len + WLAN_IEHDR_LEN);
- pbyIEs += pCurrRates->len + WLAN_IEHDR_LEN;
-
- if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPANONE)) &&
- (pMgmt->pCurrBSS != NULL)) {
- /* WPA IE */
- sFrame.pRSNWPA = (PWLAN_IE_RSN_EXT)(sFrame.pBuf + sFrame.len);
- sFrame.pRSNWPA->byElementID = WLAN_EID_RSN_WPA;
- sFrame.pRSNWPA->len = 16;
- sFrame.pRSNWPA->abyOUI[0] = 0x00;
- sFrame.pRSNWPA->abyOUI[1] = 0x50;
- sFrame.pRSNWPA->abyOUI[2] = 0xf2;
- sFrame.pRSNWPA->abyOUI[3] = 0x01;
- sFrame.pRSNWPA->wVersion = 1;
- //Group Key Cipher Suite
- sFrame.pRSNWPA->abyMulticast[0] = 0x00;
- sFrame.pRSNWPA->abyMulticast[1] = 0x50;
- sFrame.pRSNWPA->abyMulticast[2] = 0xf2;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- sFrame.pRSNWPA->abyMulticast[3] = pMgmt->pCurrBSS->byGKType;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- sFrame.pRSNWPA->abyMulticast[3] = WPA_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- sFrame.pRSNWPA->abyMulticast[3] = WPA_AESCCMP;
- else
- sFrame.pRSNWPA->abyMulticast[3] = WPA_NONE;
-
- // Pairwise Key Cipher Suite
- sFrame.pRSNWPA->wPKCount = 1;
- sFrame.pRSNWPA->PKSList[0].abyOUI[0] = 0x00;
- sFrame.pRSNWPA->PKSList[0].abyOUI[1] = 0x50;
- sFrame.pRSNWPA->PKSList[0].abyOUI[2] = 0xf2;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_AESCCMP;
- else
- sFrame.pRSNWPA->PKSList[0].abyOUI[3] = WPA_NONE;
-
- // Auth Key Management Suite
- pbyRSN = (unsigned char *)(sFrame.pBuf + sFrame.len + 2 + sFrame.pRSNWPA->len);
- *pbyRSN++ = 0x01;
- *pbyRSN++ = 0x00;
- *pbyRSN++ = 0x00;
-
- *pbyRSN++ = 0x50;
- *pbyRSN++ = 0xf2;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)
- *pbyRSN++ = WPA_AUTH_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA)
- *pbyRSN++ = WPA_AUTH_IEEE802_1X;
- else
- *pbyRSN++ = WPA_NONE;
-
- sFrame.pRSNWPA->len += 6;
-
- // RSN Capabilities
- *pbyRSN++ = 0x00;
- *pbyRSN++ = 0x00;
- sFrame.pRSNWPA->len += 2;
-
- sFrame.len += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, sFrame.pRSNWPA, sFrame.pRSNWPA->len + WLAN_IEHDR_LEN);
- pbyIEs += sFrame.pRSNWPA->len + WLAN_IEHDR_LEN;
-
- } else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
- (pMgmt->pCurrBSS != NULL)) {
- unsigned int ii;
- unsigned short *pwPMKID;
-
- /* WPA IE */
- sFrame.pRSN = (PWLAN_IE_RSN)(sFrame.pBuf + sFrame.len);
- sFrame.pRSN->byElementID = WLAN_EID_RSN;
- sFrame.pRSN->len = 6; //Version(2)+GK(4)
- sFrame.pRSN->wVersion = 1;
- //Group Key Cipher Suite
- sFrame.pRSN->abyRSN[0] = 0x00;
- sFrame.pRSN->abyRSN[1] = 0x0F;
- sFrame.pRSN->abyRSN[2] = 0xAC;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- sFrame.pRSN->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_CCMP;
- else
- sFrame.pRSN->abyRSN[3] = WLAN_11i_CSS_UNKNOWN;
-
- // Pairwise Key Cipher Suite
- sFrame.pRSN->abyRSN[4] = 1;
- sFrame.pRSN->abyRSN[5] = 0;
- sFrame.pRSN->abyRSN[6] = 0x00;
- sFrame.pRSN->abyRSN[7] = 0x0F;
- sFrame.pRSN->abyRSN[8] = 0xAC;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_CCMP;
- else if (pMgmt->byCSSPK == KEY_CTL_NONE)
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_USE_GROUP;
- else
- sFrame.pRSN->abyRSN[9] = WLAN_11i_CSS_UNKNOWN;
-
- sFrame.pRSN->len += 6;
-
- // Auth Key Management Suite
- sFrame.pRSN->abyRSN[10] = 1;
- sFrame.pRSN->abyRSN[11] = 0;
- sFrame.pRSN->abyRSN[12] = 0x00;
- sFrame.pRSN->abyRSN[13] = 0x0F;
- sFrame.pRSN->abyRSN[14] = 0xAC;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_802_1X;
- else
- sFrame.pRSN->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN;
-
- sFrame.pRSN->len += 6;
-
- // RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist) {
- memcpy(&sFrame.pRSN->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
- } else {
- sFrame.pRSN->abyRSN[16] = 0;
- sFrame.pRSN->abyRSN[17] = 0;
- }
- sFrame.pRSN->len += 2;
-
- if ((pDevice->gsPMKID.BSSIDInfoCount > 0) && pDevice->bRoaming && (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
- // RSN PMKID
- pbyRSN = &sFrame.pRSN->abyRSN[18];
- pwPMKID = (unsigned short *)pbyRSN; // Point to PMKID count
- *pwPMKID = 0; // Initialize PMKID count
- pbyRSN += 2; // Point to PMKID list
- for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
- if (!memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
- (*pwPMKID)++;
- memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16);
- pbyRSN += 16;
- }
- }
-
- if (*pwPMKID != 0)
- sFrame.pRSN->len += (2 + (*pwPMKID) * 16);
- }
-
- sFrame.len += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- // copy to AssocInfo. for OID_802_11_ASSOCIATION_INFORMATION
- pMgmt->sAssocInfo.AssocInfo.RequestIELength += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- memcpy(pbyIEs, sFrame.pRSN, sFrame.pRSN->len + WLAN_IEHDR_LEN);
- pbyIEs += sFrame.pRSN->len + WLAN_IEHDR_LEN;
- }
-
- /* Adjust the length fields */
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an assoc-response frame
- *
- *
- * Return Value:
- * PTR to frame; or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wAssocStatus,
- unsigned short wAssocAID,
- unsigned char *pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_ASSOCRESP sFrame;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN;
- vMgrEncodeAssocResponse(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ASSOCRESP)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
- *sFrame.pwStatus = cpu_to_le16(wAssocStatus);
- *sFrame.pwAid = cpu_to_le16((unsigned short)(wAssocAID | BIT14 | BIT15));
-
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates,
- pCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
-
- if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates,
- pCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
-
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Constructs an reassoc-response frame
- *
- *
- * Return Value:
- * PTR to frame; or NULL on allocation failure
- *
- -*/
-
-static PSTxMgmtPacket
-s_MgrMakeReAssocResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- unsigned short wCurrCapInfo,
- unsigned short wAssocStatus,
- unsigned short wAssocAID,
- unsigned char *pDstAddr,
- PWLAN_IE_SUPP_RATES pCurrSuppRates,
- PWLAN_IE_SUPP_RATES pCurrExtSuppRates
-)
-{
- PSTxMgmtPacket pTxPacket = NULL;
- WLAN_FR_REASSOCRESP sFrame;
-
- pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool;
- memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_ASSOCREQ_FR_MAXLEN);
- pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket));
- // Setup the sFrame structure
- sFrame.pBuf = (unsigned char *)pTxPacket->p80211Header;
- sFrame.len = WLAN_REASSOCRESP_FR_MAXLEN;
- vMgrEncodeReassocResponse(&sFrame);
- // Setup the header
- sFrame.pHdr->sA3.wFrameCtl = cpu_to_le16(
- (
- WLAN_SET_FC_FTYPE(WLAN_TYPE_MGR) |
- WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_REASSOCRESP)
-));
- memcpy(sFrame.pHdr->sA3.abyAddr1, pDstAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr2, pMgmt->abyMACAddr, WLAN_ADDR_LEN);
- memcpy(sFrame.pHdr->sA3.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN);
-
- *sFrame.pwCapInfo = cpu_to_le16(wCurrCapInfo);
- *sFrame.pwStatus = cpu_to_le16(wAssocStatus);
- *sFrame.pwAid = cpu_to_le16((unsigned short)(wAssocAID | BIT14 | BIT15));
-
- // Copy the rate set
- sFrame.pSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pSuppRates,
- pCurrSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrSuppRates)->len + WLAN_IEHDR_LEN
-);
-
- if (((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len != 0) {
- sFrame.pExtSuppRates = (PWLAN_IE_SUPP_RATES)(sFrame.pBuf + sFrame.len);
- sFrame.len += ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
- memcpy(sFrame.pExtSuppRates,
- pCurrExtSuppRates,
- ((PWLAN_IE_SUPP_RATES)pCurrExtSuppRates)->len + WLAN_IEHDR_LEN
-);
- }
-
- // Adjust the length fields
- pTxPacket->cbMPDULen = sFrame.len;
- pTxPacket->cbPayloadLen = sFrame.len - WLAN_HDR_ADDR3_LEN;
-
- return pTxPacket;
-}
-
-/*+
- *
- * Routine Description:
- * Handles probe response management frames.
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-static
-void
-s_vMgrRxProbeResponse(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- PKnownBSS pBSSList = NULL;
- WLAN_FR_PROBERESP sFrame;
- unsigned char byCurrChannel = pRxPacket->byRxChannel;
- ERPObject sERP;
- unsigned char byIEChannel = 0;
- bool bChannelHit = true;
-
- memset(&sFrame, 0, sizeof(WLAN_FR_PROBERESP));
- // decode the frame
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeProbeResponse(&sFrame);
-
- if ((sFrame.pqwTimestamp == NULL) ||
- (sFrame.pwBeaconInterval == NULL) ||
- (sFrame.pwCapInfo == NULL) ||
- (sFrame.pSSID == NULL) ||
- (sFrame.pSuppRates == NULL)) {
- pr_debug("Probe resp:Fail addr:[%p]\n",
- pRxPacket->p80211Header);
- DBG_PORT80(0xCC);
- return;
- }
-
- if (sFrame.pSSID->len == 0)
- pr_debug("Rx Probe resp: SSID len = 0\n");
-
- if (sFrame.pDSParms != NULL) {
- if (byCurrChannel > CB_MAX_CHANNEL_24G) {
- // channel remapping to
- byIEChannel = get_channel_mapping(pMgmt->pAdapter, sFrame.pDSParms->byCurrChannel, PHY_TYPE_11A);
- } else {
- byIEChannel = sFrame.pDSParms->byCurrChannel;
- }
- if (byCurrChannel != byIEChannel) {
- // adjust channel info. bcs we rcv adjacent channel packets
- bChannelHit = false;
- byCurrChannel = byIEChannel;
- }
- } else {
- // no DS channel info
- bChannelHit = true;
- }
-
-//2008-0730-01<Add>by MikeLiu
- if (ChannelExceedZoneType(pDevice, byCurrChannel))
- return;
-
- if (sFrame.pERP != NULL) {
- sERP.byERP = sFrame.pERP->byContext;
- sERP.bERPExist = true;
- } else {
- sERP.bERPExist = false;
- sERP.byERP = 0;
- }
-
- // update or insert the bss
- pBSSList = BSSpAddrIsInBSSList((void *)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
- if (pBSSList) {
- BSSbUpdateToBSSList((void *)pDevice,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- bChannelHit,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- pBSSList,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of probresponse
- (void *)pRxPacket
-);
- } else {
- pr_debug("Probe resp/insert: RxChannel = : %d\n",
- byCurrChannel);
- BSSbInsertToBSSList((void *)pDevice,
- sFrame.pHdr->sA3.abyAddr3,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of beacon
- (void *)pRxPacket
-);
- }
-}
-
-/*+
- *
- * Routine Description:(AP)or(Ad-hoc STA)
- * Handles probe request management frames.
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-static
-void
-s_vMgrRxProbeRequest(
- struct vnt_private *pDevice,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- WLAN_FR_PROBEREQ sFrame;
- CMD_STATUS Status;
- PSTxMgmtPacket pTxPacket;
- unsigned char byPHYType = BB_TYPE_11B;
-
- // STA in Ad-hoc mode: when latest TBTT beacon transmit success,
- // STA have to response this request.
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
- ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && pDevice->bBeaconSent)) {
- memset(&sFrame, 0, sizeof(WLAN_FR_PROBEREQ));
- // decode the frame
- sFrame.len = pRxPacket->cbMPDULen;
- sFrame.pBuf = (unsigned char *)pRxPacket->p80211Header;
- vMgrDecodeProbeRequest(&sFrame);
-
- if (sFrame.pSSID->len != 0) {
- if (sFrame.pSSID->len != ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len)
- return;
- if (memcmp(sFrame.pSSID->abySSID,
- ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->abySSID,
- ((PWLAN_IE_SSID)pMgmt->abyCurrSSID)->len) != 0) {
- return;
- }
- }
-
- if ((sFrame.pSuppRates->len > 4) || (sFrame.pExtSuppRates != NULL))
- byPHYType = BB_TYPE_11G;
-
- // Probe response reply..
- pTxPacket = s_MgrMakeProbeResponse
- (
- pDevice,
- pMgmt,
- pMgmt->wCurrCapInfo,
- pMgmt->wCurrBeaconPeriod,
- pMgmt->uCurrChannel,
- 0,
- sFrame.pHdr->sA3.abyAddr2,
- (PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (unsigned char *)pMgmt->abyCurrBSSID,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
- byPHYType
-);
- if (pTxPacket != NULL) {
- /* send the frame */
- Status = csMgmt_xmit(pDevice, pTxPacket);
- if (Status != CMD_STATUS_PENDING)
- pr_debug("Mgt:Probe response tx failed\n");
- }
- }
-}
-
-/*+
- *
- * Routine Description:
- *
- * Entry point for the reception and handling of 802.11 management
- * frames. Makes a determination of the frame type and then calls
- * the appropriate function.
- *
- *
- * Return Value:
- * none.
- *
- -*/
-
-void
-vMgrRxManagePacket(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- bool bInScan = false;
- unsigned int uNodeIndex = 0;
- NODE_STATE eNodeState = 0;
- CMD_STATUS Status;
-
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
- if (BSSDBbIsSTAInNodeDB(pMgmt, pRxPacket->p80211Header->sA3.abyAddr2, &uNodeIndex))
- eNodeState = pMgmt->sNodeDBTable[uNodeIndex].eNodeState;
- }
-
- switch (WLAN_GET_FC_FSTYPE((pRxPacket->p80211Header->sA3.wFrameCtl))) {
- case WLAN_FSTYPE_ASSOCREQ:
- // Frame Clase = 2
- pr_debug("rx assocreq\n");
- if (eNodeState < NODE_AUTH) {
- // send deauth notification
- // reason = (6) class 2 received from nonauth sta
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- pRxPacket->p80211Header->sA3.abyAddr2,
- (6),
- &Status
-);
- pr_debug("wmgr: send vMgrDeAuthenBeginSta 1\n");
- } else {
- s_vMgrRxAssocRequest(pDevice, pMgmt, pRxPacket, uNodeIndex);
- }
- break;
-
- case WLAN_FSTYPE_ASSOCRESP:
- // Frame Clase = 2
- pr_debug("rx assocresp1\n");
- s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, false);
- pr_debug("rx assocresp2\n");
- break;
-
- case WLAN_FSTYPE_REASSOCREQ:
- // Frame Clase = 2
- pr_debug("rx reassocreq\n");
- // Todo: reassoc
- if (eNodeState < NODE_AUTH) {
- // send deauth notification
- // reason = (6) class 2 received from nonauth sta
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- pRxPacket->p80211Header->sA3.abyAddr2,
- (6),
- &Status
-);
- pr_debug("wmgr: send vMgrDeAuthenBeginSta 2\n");
-
- }
- s_vMgrRxReAssocRequest(pDevice, pMgmt, pRxPacket, uNodeIndex);
- break;
-
- case WLAN_FSTYPE_REASSOCRESP:
- // Frame Clase = 2
- pr_debug("rx reassocresp\n");
- s_vMgrRxAssocResponse(pDevice, pMgmt, pRxPacket, true);
- break;
-
- case WLAN_FSTYPE_PROBEREQ:
- // Frame Clase = 0
- s_vMgrRxProbeRequest(pDevice, pMgmt, pRxPacket);
- break;
-
- case WLAN_FSTYPE_PROBERESP:
- // Frame Clase = 0
- pr_debug("rx proberesp\n");
-
- s_vMgrRxProbeResponse(pDevice, pMgmt, pRxPacket);
- break;
-
- case WLAN_FSTYPE_BEACON:
- // Frame Clase = 0
- if (pMgmt->eScanState != WMAC_NO_SCANNING)
- bInScan = true;
-
- s_vMgrRxBeacon(pDevice, pMgmt, pRxPacket, bInScan);
- break;
-
- case WLAN_FSTYPE_ATIM:
- // Frame Clase = 1
- pr_debug("rx atim\n");
- break;
-
- case WLAN_FSTYPE_DISASSOC:
- // Frame Clase = 2
- pr_debug("rx disassoc\n");
- if (eNodeState < NODE_AUTH) {
- // send deauth notification
- // reason = (6) class 2 received from nonauth sta
- vMgrDeAuthenBeginSta(pDevice,
- pMgmt,
- pRxPacket->p80211Header->sA3.abyAddr2,
- (6),
- &Status
-);
- pr_debug("wmgr: send vMgrDeAuthenBeginSta 3\n");
- }
- s_vMgrRxDisassociation(pDevice, pMgmt, pRxPacket);
- break;
-
- case WLAN_FSTYPE_AUTHEN:
- // Frame Clase = 1
- pr_debug("rx authen\n");
- s_vMgrRxAuthentication(pDevice, pMgmt, pRxPacket);
- break;
-
- case WLAN_FSTYPE_DEAUTHEN:
- // Frame Clase = 1
- pr_debug("rx deauthen\n");
- s_vMgrRxDeauthentication(pDevice, pMgmt, pRxPacket);
- break;
-
- default:
- pr_debug("rx unknown mgmt\n");
- }
-}
-
-/*+
- *
- * Routine Description:
- *
- *
- * Prepare beacon to send
- *
- * Return Value:
- * true if success; false if failed.
- *
- -*/
-bool
-bMgrPrepareBeaconToSend(
- void *hDeviceContext,
- PSMgmtObject pMgmt
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- PSTxMgmtPacket pTxPacket;
-
- if (pDevice->bEncryptionEnable || pDevice->bEnable8021x)
- pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_PRIVACY(1);
- else
- pMgmt->wCurrCapInfo &= ~WLAN_SET_CAP_INFO_PRIVACY(1);
-
- pTxPacket = s_MgrMakeBeacon
- (
- pDevice,
- pMgmt,
- pMgmt->wCurrCapInfo,
- pMgmt->wCurrBeaconPeriod,
- pMgmt->uCurrChannel,
- pMgmt->wCurrATIMWindow,
- (PWLAN_IE_SSID)pMgmt->abyCurrSSID,
- (unsigned char *)pMgmt->abyCurrBSSID,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
- (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates
-);
-
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) &&
- (pMgmt->abyCurrBSSID[0] == 0))
- return false;
-
- csBeacon_xmit(pDevice, pTxPacket);
-
- return true;
-}
-
-/*+
- *
- * Routine Description:
- *
- * Log a warning message based on the contents of the Status
- * Code field of an 802.11 management frame. Defines are
- * derived from 802.11-1997 SPEC.
- *
- * Return Value:
- * none.
- *
- -*/
-static
-void
-s_vMgrLogStatus(
- PSMgmtObject pMgmt,
- unsigned short wStatus
-)
-{
- switch (wStatus) {
- case WLAN_MGMT_STATUS_UNSPEC_FAILURE:
- pr_info("Status code == Unspecified error\n");
- break;
- case WLAN_MGMT_STATUS_CAPS_UNSUPPORTED:
- pr_info("Status code == Can't support all requested capabilities\n");
- break;
- case WLAN_MGMT_STATUS_REASSOC_NO_ASSOC:
- pr_info("Status code == Reassoc denied, can't confirm original Association\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_UNSPEC:
- pr_info("Status code == Assoc denied, undefine in spec\n");
- break;
- case WLAN_MGMT_STATUS_UNSUPPORTED_AUTHALG:
- pr_info("Status code == Peer doesn't support authen algorithm\n");
- break;
- case WLAN_MGMT_STATUS_RX_AUTH_NOSEQ:
- pr_info("Status code == Authen frame received out of sequence\n");
- break;
- case WLAN_MGMT_STATUS_CHALLENGE_FAIL:
- pr_info("Status code == Authen rejected, challenge failure\n");
- break;
- case WLAN_MGMT_STATUS_AUTH_TIMEOUT:
- pr_info("Status code == Authen rejected, timeout waiting for next frame\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_BUSY:
- pr_info("Status code == Assoc denied, AP too busy\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_RATES:
- pr_info("Status code == Assoc denied, we haven't enough basic rates\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_SHORTPREAMBLE:
- pr_info("Status code == Assoc denied, we do not support short preamble\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_PBCC:
- pr_info("Status code == Assoc denied, we do not support PBCC\n");
- break;
- case WLAN_MGMT_STATUS_ASSOC_DENIED_AGILITY:
- pr_info("Status code == Assoc denied, we do not support channel agility\n");
- break;
- default:
- pr_info("Unknown status code %d\n", wStatus);
- break;
- }
-}
-
-/*
- *
- * Description:
- * Add BSSID in PMKID Candidate list.
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * pbyBSSID - BSSID address for adding
- * wRSNCap - BSS's RSN capability
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-bAdd_PMKID_Candidate(
- void *hDeviceContext,
- unsigned char *pbyBSSID,
- PSRSNCapObject psRSNCapObj
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
- struct pmkid_candidate *pCandidateList;
- unsigned int ii = 0;
-
- pr_debug("bAdd_PMKID_Candidate START: (%d)\n",
- (int)pDevice->gsPMKIDCandidate.NumCandidates);
-
- if ((pDevice == NULL) || (pbyBSSID == NULL) || (psRSNCapObj == NULL))
- return false;
-
- if (pDevice->gsPMKIDCandidate.NumCandidates >= MAX_PMKIDLIST)
- return false;
-
- // Update Old Candidate
- for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
- if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
- if (psRSNCapObj->bRSNCapExist && (psRSNCapObj->wRSNCap & BIT0))
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- else
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
-
- return true;
- }
- }
-
- // New Candidate
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[pDevice->gsPMKIDCandidate.NumCandidates];
- if (psRSNCapObj->bRSNCapExist && (psRSNCapObj->wRSNCap & BIT0))
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- else
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
-
- memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
- pDevice->gsPMKIDCandidate.NumCandidates++;
- pr_debug("NumCandidates:%d\n",
- (int)pDevice->gsPMKIDCandidate.NumCandidates);
- return true;
-}
-
-/*
- *
- * Description:
- * Flush PMKID Candidate list.
- *
- * Parameters:
- * In:
- * hDeviceContext - device structure point
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-vFlush_PMKID_Candidate(
- void *hDeviceContext
-)
-{
- struct vnt_private *pDevice = hDeviceContext;
-
- if (pDevice == NULL)
- return;
-
- memset(&pDevice->gsPMKIDCandidate, 0, sizeof(SPMKIDCandidateEvent));
-}
-
-static bool
-s_bCipherMatch(
- PKnownBSS pBSSNode,
- NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- unsigned char *pbyCCSPK,
- unsigned char *pbyCCSGK
-)
-{
- unsigned char byMulticastCipher = KEY_CTL_INVALID;
- unsigned char byCipherMask = 0x00;
- int i;
-
- if (pBSSNode == NULL)
- return false;
-
- // check cap. of BSS
- if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- (EncStatus == Ndis802_11Encryption1Enabled)) {
- // default is WEP only
- byMulticastCipher = KEY_CTL_WEP;
- }
-
- if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- pBSSNode->bWPA2Valid &&
- //20080123-01,<Add> by Einsn Liu
- ((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
- //WPA2
- // check Group Key Cipher
- if ((pBSSNode->byCSSGK == WLAN_11i_CSS_WEP40) ||
- (pBSSNode->byCSSGK == WLAN_11i_CSS_WEP104)) {
- byMulticastCipher = KEY_CTL_WEP;
- } else if (pBSSNode->byCSSGK == WLAN_11i_CSS_TKIP) {
- byMulticastCipher = KEY_CTL_TKIP;
- } else if (pBSSNode->byCSSGK == WLAN_11i_CSS_CCMP) {
- byMulticastCipher = KEY_CTL_CCMP;
- } else {
- byMulticastCipher = KEY_CTL_INVALID;
- }
-
- // check Pairwise Key Cipher
- for (i = 0; i < pBSSNode->wCSSPKCount; i++) {
- if ((pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP40) ||
- (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_WEP104)) {
- // this should not happen as defined 802.11i
- byCipherMask |= 0x01;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_TKIP) {
- byCipherMask |= 0x02;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_CCMP) {
- byCipherMask |= 0x04;
- } else if (pBSSNode->abyCSSPK[i] == WLAN_11i_CSS_USE_GROUP) {
- // use group key only ignore all others
- byCipherMask = 0;
- i = pBSSNode->wCSSPKCount;
- }
- }
-
- } else if ((WLAN_GET_CAP_INFO_PRIVACY(pBSSNode->wCapInfo) != 0) &&
- pBSSNode->bWPAValid &&
- ((EncStatus == Ndis802_11Encryption3Enabled) || (EncStatus == Ndis802_11Encryption2Enabled))) {
- //WPA
- // check Group Key Cipher
- if ((pBSSNode->byGKType == WPA_WEP40) ||
- (pBSSNode->byGKType == WPA_WEP104)) {
- byMulticastCipher = KEY_CTL_WEP;
- } else if (pBSSNode->byGKType == WPA_TKIP) {
- byMulticastCipher = KEY_CTL_TKIP;
- } else if (pBSSNode->byGKType == WPA_AESCCMP) {
- byMulticastCipher = KEY_CTL_CCMP;
- } else {
- byMulticastCipher = KEY_CTL_INVALID;
- }
-
- // check Pairwise Key Cipher
- for (i = 0; i < pBSSNode->wPKCount; i++) {
- if (pBSSNode->abyPKType[i] == WPA_TKIP) {
- byCipherMask |= 0x02;
- } else if (pBSSNode->abyPKType[i] == WPA_AESCCMP) {
- byCipherMask |= 0x04;
- } else if (pBSSNode->abyPKType[i] == WPA_NONE) {
- // use group key only ignore all others
- byCipherMask = 0;
- i = pBSSNode->wPKCount;
- }
- }
- }
-
- pr_debug("%d, %d, %d, %d, EncStatus:%d\n",
- byMulticastCipher, byCipherMask,
- pBSSNode->bWPAValid, pBSSNode->bWPA2Valid, EncStatus);
-
- // mask our cap. with BSS
- if (EncStatus == Ndis802_11Encryption1Enabled) {
- // For supporting Cisco migration mode, don't care pairwise key cipher
- if ((byMulticastCipher == KEY_CTL_WEP) &&
- (byCipherMask == 0)) {
- *pbyCCSGK = KEY_CTL_WEP;
- *pbyCCSPK = KEY_CTL_NONE;
- return true;
- } else {
- return false;
- }
-
- } else if (EncStatus == Ndis802_11Encryption2Enabled) {
- if ((byMulticastCipher == KEY_CTL_TKIP) &&
- (byCipherMask == 0)) {
- *pbyCCSGK = KEY_CTL_TKIP;
- *pbyCCSPK = KEY_CTL_NONE;
- return true;
- } else if ((byMulticastCipher == KEY_CTL_WEP) &&
- ((byCipherMask & 0x02) != 0)) {
- *pbyCCSGK = KEY_CTL_WEP;
- *pbyCCSPK = KEY_CTL_TKIP;
- return true;
- } else if ((byMulticastCipher == KEY_CTL_TKIP) &&
- ((byCipherMask & 0x02) != 0)) {
- *pbyCCSGK = KEY_CTL_TKIP;
- *pbyCCSPK = KEY_CTL_TKIP;
- return true;
- } else {
- return false;
- }
- } else if (EncStatus == Ndis802_11Encryption3Enabled) {
- if ((byMulticastCipher == KEY_CTL_CCMP) &&
- (byCipherMask == 0)) {
- // When CCMP is enable, "Use group cipher suite" shall not be a valid option.
- return false;
- } else if ((byMulticastCipher == KEY_CTL_WEP) &&
- ((byCipherMask & 0x04) != 0)) {
- *pbyCCSGK = KEY_CTL_WEP;
- *pbyCCSPK = KEY_CTL_CCMP;
- return true;
- } else if ((byMulticastCipher == KEY_CTL_TKIP) &&
- ((byCipherMask & 0x04) != 0)) {
- *pbyCCSGK = KEY_CTL_TKIP;
- *pbyCCSPK = KEY_CTL_CCMP;
- return true;
- } else if ((byMulticastCipher == KEY_CTL_CCMP) &&
- ((byCipherMask & 0x04) != 0)) {
- *pbyCCSGK = KEY_CTL_CCMP;
- *pbyCCSPK = KEY_CTL_CCMP;
- return true;
- } else {
- return false;
- }
- }
- return true;
-}
diff --git a/drivers/staging/vt6655/wmgr.h b/drivers/staging/vt6655/wmgr.h
deleted file mode 100644
index ce939b30ac2a..000000000000
--- a/drivers/staging/vt6655/wmgr.h
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wmgr.h
- *
- * Purpose:
- *
- * Author: lyndon chen
- *
- * Date: Jan 2, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#ifndef __WMGR_H__
-#define __WMGR_H__
-
-#include "ttype.h"
-#include "80211mgr.h"
-#include "80211hdr.h"
-#include "wcmd.h"
-#include "bssdb.h"
-#include "wpa2.h"
-#include "vntwifi.h"
-#include "card.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-// Scan time
-#define PROBE_DELAY 100 // (us)
-#define SWITCH_CHANNEL_DELAY 200 // (us)
-#define WLAN_SCAN_MINITIME 25 // (ms)
-#define WLAN_SCAN_MAXTIME 100 // (ms)
-#define TRIVIAL_SYNC_DIFFERENCE 0 // (us)
-#define DEFAULT_IBSS_BI 100 // (ms)
-
-#define WCMD_ACTIVE_SCAN_TIME 50 //(ms)
-#define WCMD_PASSIVE_SCAN_TIME 100 //(ms)
-
-#define DEFAULT_MSDU_LIFETIME 512 // ms
-#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 // 64us
-
-#define DEFAULT_MGN_LIFETIME 8 // ms
-#define DEFAULT_MGN_LIFETIME_RES_64us 125 // 64us
-
-#define MAKE_BEACON_RESERVED 10 //(us)
-
-#define TIM_MULTICAST_MASK 0x01
-#define TIM_BITMAPOFFSET_MASK 0xFE
-#define DEFAULT_DTIM_PERIOD 1
-
-#define AP_LONG_RETRY_LIMIT 4
-
-#define DEFAULT_IBSS_CHANNEL 6 //2.4G
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-#define timer_expire(timer, next_tick) mod_timer(&timer, RUN_AT(next_tick))
-typedef void (*TimerFunction)(unsigned long);
-
-//+++ NDIS related
-
-typedef unsigned char NDIS_802_11_MAC_ADDRESS[6];
-typedef struct _NDIS_802_11_AI_REQFI {
- unsigned short Capabilities;
- unsigned short ListenInterval;
- NDIS_802_11_MAC_ADDRESS CurrentAPAddress;
-} NDIS_802_11_AI_REQFI, *PNDIS_802_11_AI_REQFI;
-
-typedef struct _NDIS_802_11_AI_RESFI {
- unsigned short Capabilities;
- unsigned short StatusCode;
- unsigned short AssociationId;
-} NDIS_802_11_AI_RESFI, *PNDIS_802_11_AI_RESFI;
-
-typedef struct _NDIS_802_11_ASSOCIATION_INFORMATION {
- unsigned long Length;
- unsigned short AvailableRequestFixedIEs;
- NDIS_802_11_AI_REQFI RequestFixedIEs;
- unsigned long RequestIELength;
- unsigned long OffsetRequestIEs;
- unsigned short AvailableResponseFixedIEs;
- NDIS_802_11_AI_RESFI ResponseFixedIEs;
- unsigned long ResponseIELength;
- unsigned long OffsetResponseIEs;
-} NDIS_802_11_ASSOCIATION_INFORMATION, *PNDIS_802_11_ASSOCIATION_INFORMATION;
-
-typedef struct tagSAssocInfo {
- NDIS_802_11_ASSOCIATION_INFORMATION AssocInfo;
- unsigned char abyIEs[WLAN_BEACON_FR_MAXLEN+WLAN_BEACON_FR_MAXLEN];
- // store ReqIEs set by OID_802_11_ASSOCIATION_INFORMATION
- unsigned long RequestIELength;
- unsigned char abyReqIEs[WLAN_BEACON_FR_MAXLEN];
-} SAssocInfo, *PSAssocInfo;
-//---
-
-typedef enum tagWMAC_SCAN_TYPE {
- WMAC_SCAN_ACTIVE,
- WMAC_SCAN_PASSIVE,
- WMAC_SCAN_HYBRID
-} WMAC_SCAN_TYPE, *PWMAC_SCAN_TYPE;
-
-typedef enum tagWMAC_SCAN_STATE {
- WMAC_NO_SCANNING,
- WMAC_IS_SCANNING,
- WMAC_IS_PROBEPENDING
-} WMAC_SCAN_STATE, *PWMAC_SCAN_STATE;
-
-// Notes:
-// Basic Service Set state explained as following:
-// WMAC_STATE_IDLE : no BSS is selected (Adhoc or Infra)
-// WMAC_STATE_STARTED : no BSS is selected, start own IBSS (Adhoc only)
-// WMAC_STATE_JOINTED : BSS is selected and synchronized (Adhoc or Infra)
-// WMAC_STATE_AUTHPENDING : Authentication pending (Infra)
-// WMAC_STATE_AUTH : Authenticated (Infra)
-// WMAC_STATE_ASSOCPENDING : Association pending (Infra)
-// WMAC_STATE_ASSOC : Associated (Infra)
-
-typedef enum tagWMAC_BSS_STATE {
- WMAC_STATE_IDLE,
- WMAC_STATE_STARTED,
- WMAC_STATE_JOINTED,
- WMAC_STATE_AUTHPENDING,
- WMAC_STATE_AUTH,
- WMAC_STATE_ASSOCPENDING,
- WMAC_STATE_ASSOC
-} WMAC_BSS_STATE, *PWMAC_BSS_STATE;
-
-// WMAC selected running mode
-typedef enum tagWMAC_CURRENT_MODE {
- WMAC_MODE_STANDBY,
- WMAC_MODE_ESS_STA,
- WMAC_MODE_IBSS_STA,
- WMAC_MODE_ESS_AP
-} WMAC_CURRENT_MODE, *PWMAC_CURRENT_MODE;
-
-/*
- typedef enum tagWMAC_POWER_MODE {
- WMAC_POWER_CAM,
- WMAC_POWER_FAST,
- WMAC_POWER_MAX
-
- } WMAC_POWER_MODE, *PWMAC_POWER_MODE;
-*/
-
-// Tx Management Packet descriptor
-typedef struct tagSTxMgmtPacket {
- PUWLAN_80211HDR p80211Header;
- unsigned int cbMPDULen;
- unsigned int cbPayloadLen;
-} STxMgmtPacket, *PSTxMgmtPacket;
-
-// Rx Management Packet descriptor
-typedef struct tagSRxMgmtPacket {
- PUWLAN_80211HDR p80211Header;
- u64 qwLocalTSF;
- unsigned int cbMPDULen;
- unsigned int cbPayloadLen;
- unsigned int uRSSI;
- unsigned char bySQ;
- unsigned char byRxRate;
- unsigned char byRxChannel;
-} SRxMgmtPacket, *PSRxMgmtPacket;
-
-typedef struct tagSMgmtObject {
- void *pAdapter;
- // MAC address
- unsigned char abyMACAddr[WLAN_ADDR_LEN];
-
- // Configuration Mode
- WMAC_CONFIG_MODE eConfigMode; // MAC pre-configed mode
- CARD_PHY_TYPE eCurrentPHYMode;
- CARD_PHY_TYPE eConfigPHYMode;
-
- // Operation state variables
- WMAC_CURRENT_MODE eCurrMode; // MAC current connection mode
- WMAC_BSS_STATE eCurrState; // MAC current BSS state
-
- PKnownBSS pCurrBSS;
- unsigned char byCSSGK;
- unsigned char byCSSPK;
-
- // Current state vars
- unsigned int uCurrChannel;
- unsigned char abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char abyCurrSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned char abyCurrBSSID[WLAN_BSSID_LEN];
- unsigned short wCurrCapInfo;
- unsigned short wCurrAID;
- unsigned short wCurrATIMWindow;
- unsigned short wCurrBeaconPeriod;
- bool bIsDS;
- unsigned char byERPContext;
-
- CMD_STATE eCommandState;
- unsigned int uScanChannel;
-
- // Desire joining BSS vars
- unsigned char abyDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned char abyDesireBSSID[WLAN_BSSID_LEN];
-
- // Adhoc or AP configuration vars
- unsigned short wIBSSBeaconPeriod;
- unsigned short wIBSSATIMWindow;
- unsigned int uIBSSChannel;
- unsigned char abyIBSSSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- unsigned char byAPBBType;
- unsigned char abyWPAIE[MAX_WPA_IE_LEN];
- unsigned short wWPAIELen;
-
- unsigned int uAssocCount;
- bool bMoreData;
-
- // Scan state vars
- WMAC_SCAN_STATE eScanState;
- WMAC_SCAN_TYPE eScanType;
- unsigned int uScanStartCh;
- unsigned int uScanEndCh;
- unsigned short wScanSteps;
- unsigned int uScanBSSType;
- // Desire scanning vars
- unsigned char abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- unsigned char abyScanBSSID[WLAN_BSSID_LEN];
-
- // Privacy
- WMAC_AUTHENTICATION_MODE eAuthenMode;
- WMAC_ENCRYPTION_MODE eEncryptionMode;
- bool bShareKeyAlgorithm;
- unsigned char abyChallenge[WLAN_CHALLENGE_LEN];
- bool bPrivacyInvoked;
-
- // Received beacon state vars
- bool bInTIM;
- bool bMulticastTIM;
- unsigned char byDTIMCount;
- unsigned char byDTIMPeriod;
-
- // Power saving state vars
- WMAC_POWER_MODE ePSMode;
- unsigned short wListenInterval;
- unsigned short wCountToWakeUp;
- bool bInTIMWake;
- unsigned char *pbyPSPacketPool;
- unsigned char byPSPacketPool[sizeof(STxMgmtPacket) + WLAN_NULLDATA_FR_MAXLEN];
- bool bRxBeaconInTBTTWake;
- unsigned char abyPSTxMap[MAX_NODE_NUM + 1];
-
- // management command related
- unsigned int uCmdBusy;
- unsigned int uCmdHostAPBusy;
-
- // management packet pool
- unsigned char *pbyMgmtPacketPool;
- unsigned char byMgmtPacketPool[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
-
- // One second callback timer
- struct timer_list sTimerSecondCallback;
-
- // Temporarily Rx Mgmt Packet Descriptor
- SRxMgmtPacket sRxPacket;
-
- // link list of known bss's (scan results)
- KnownBSS sBSSList[MAX_BSS_NUM];
-
- // table list of known node
- // sNodeDBList[0] is reserved for AP under Infra mode
- // sNodeDBList[0] is reserved for Multicast under adhoc/AP mode
- KnownNodeDB sNodeDBTable[MAX_NODE_NUM + 1];
-
- // WPA2 PMKID Cache
- SPMKIDCache gsPMKIDCache;
- bool bRoaming;
-
- // rate fall back vars
-
- // associate info
- SAssocInfo sAssocInfo;
-
- // for 802.11h
- bool b11hEnable;
- bool bSwitchChannel;
- unsigned char byNewChannel;
- PWLAN_IE_MEASURE_REP pCurrMeasureEIDRep;
- unsigned int uLengthOfRepEIDs;
- unsigned char abyCurrentMSRReq[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
- unsigned char abyCurrentMSRRep[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
- unsigned char abyIECountry[WLAN_A3FR_MAXLEN];
- unsigned char abyIBSSDFSOwner[6];
- unsigned char byIBSSDFSRecovery;
-
- struct sk_buff skb;
-} SMgmtObject, *PSMgmtObject;
-
-/*--------------------- Export Macros ------------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void
-vMgrObjectInit(
- void *hDeviceContext
-);
-
-void
-vMgrTimerInit(
- void *hDeviceContext
-);
-
-void
-vMgrObjectReset(
- void *hDeviceContext
-);
-
-void
-vMgrAssocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrReAssocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrDisassocBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- unsigned char *abyDestAddress,
- unsigned short wReason,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrAuthenBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrCreateOwnIBSS(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrJoinBSSBegin(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-);
-
-void
-vMgrRxManagePacket(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- PSRxMgmtPacket pRxPacket
-);
-
-/*
- void
- vMgrScanBegin(
- void *hDeviceContext,
- PCMD_STATUS pStatus
-);
-*/
-
-void
-vMgrDeAuthenBeginSta(
- void *hDeviceContext,
- PSMgmtObject pMgmt,
- unsigned char *abyDestAddress,
- unsigned short wReason,
- PCMD_STATUS pStatus
-);
-
-bool
-bMgrPrepareBeaconToSend(
- void *hDeviceContext,
- PSMgmtObject pMgmt
-);
-
-bool
-bAdd_PMKID_Candidate(
- void *hDeviceContext,
- unsigned char *pbyBSSID,
- PSRSNCapObject psRSNCapObj
-);
-
-void
-vFlush_PMKID_Candidate(
- void *hDeviceContext
-);
-
-#endif // __WMGR_H__
diff --git a/drivers/staging/vt6655/wpa.c b/drivers/staging/vt6655/wpa.c
deleted file mode 100644
index 5d4eca8512af..000000000000
--- a/drivers/staging/vt6655/wpa.c
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpa.c
- *
- * Purpose: Handles the Basic Service Set & Node Database functions
- *
- * Functions:
- * WPA_ParseRSN - Parse RSN IE.
- *
- * Revision History:
- *
- * Author: Kyle Hsu
- *
- * Date: July 14, 2003
- *
- */
-
-#include "ttype.h"
-#include "tmacro.h"
-#include "tether.h"
-#include "device.h"
-#include "80211hdr.h"
-#include "bssdb.h"
-#include "wmgr.h"
-#include "wpa.h"
-#include "80211mgr.h"
-
-/*--------------------- Static Variables --------------------------*/
-static const unsigned char abyOUI00[4] = { 0x00, 0x50, 0xf2, 0x00 };
-static const unsigned char abyOUI01[4] = { 0x00, 0x50, 0xf2, 0x01 };
-static const unsigned char abyOUI02[4] = { 0x00, 0x50, 0xf2, 0x02 };
-static const unsigned char abyOUI03[4] = { 0x00, 0x50, 0xf2, 0x03 };
-static const unsigned char abyOUI04[4] = { 0x00, 0x50, 0xf2, 0x04 };
-static const unsigned char abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
-
-/*+
- *
- * Description:
- * Clear RSN information in BSSList.
- *
- * Parameters:
- * In:
- * pBSSList - BSS list.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-
-void
-WPA_ClearRSN(
- PKnownBSS pBSSList
-)
-{
- int ii;
-
- pBSSList->byGKType = WPA_TKIP;
- for (ii = 0; ii < 4; ii++)
- pBSSList->abyPKType[ii] = WPA_TKIP;
- pBSSList->wPKCount = 0;
- for (ii = 0; ii < 4; ii++)
- pBSSList->abyAuthType[ii] = WPA_AUTH_IEEE802_1X;
- pBSSList->wAuthCount = 0;
- pBSSList->byDefaultK_as_PK = 0;
- pBSSList->byReplayIdx = 0;
- pBSSList->sRSNCapObj.bRSNCapExist = false;
- pBSSList->sRSNCapObj.wRSNCap = 0;
- pBSSList->bWPAValid = false;
-}
-
-/*+
- *
- * Description:
- * Parse RSN IE.
- *
- * Parameters:
- * In:
- * pBSSList - BSS list.
- * pRSN - Pointer to the RSN IE.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-WPA_ParseRSN(
- PKnownBSS pBSSList,
- PWLAN_IE_RSN_EXT pRSN
-)
-{
- PWLAN_IE_RSN_AUTH pIE_RSN_Auth = NULL;
- int i, j, m, n = 0;
- unsigned char *pbyCaps;
-
- WPA_ClearRSN(pBSSList);
-
- pr_debug("WPA_ParseRSN: [%d]\n", pRSN->len);
-
- // information element header makes sense
- if ((pRSN->len >= 6) // oui1(4)+ver(2)
- && (pRSN->byElementID == WLAN_EID_RSN_WPA) && !memcmp(pRSN->abyOUI, abyOUI01, 4)
- && (pRSN->wVersion == 1)) {
- pr_debug("Legal RSN\n");
- // update each variable if pRSN is long enough to contain the variable
- if (pRSN->len >= 10) {
- //OUI1(4)+ver(2)+GKSuite(4)
- if (!memcmp(pRSN->abyMulticast, abyOUI01, 4))
- pBSSList->byGKType = WPA_WEP40;
- else if (!memcmp(pRSN->abyMulticast, abyOUI02, 4))
- pBSSList->byGKType = WPA_TKIP;
- else if (!memcmp(pRSN->abyMulticast, abyOUI03, 4))
- pBSSList->byGKType = WPA_AESWRAP;
- else if (!memcmp(pRSN->abyMulticast, abyOUI04, 4))
- pBSSList->byGKType = WPA_AESCCMP;
- else if (!memcmp(pRSN->abyMulticast, abyOUI05, 4))
- pBSSList->byGKType = WPA_WEP104;
- else
- // any vendor checks here
- pBSSList->byGKType = WPA_NONE;
-
- pr_debug("byGKType: %x\n", pBSSList->byGKType);
- }
-
- if (pRSN->len >= 12) {
- //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)
- j = 0;
- pr_debug("wPKCount: %d, sizeof(pBSSList->abyPKType): %zu\n",
- pRSN->wPKCount, sizeof(pBSSList->abyPKType));
- for (i = 0; (i < pRSN->wPKCount) && (j < ARRAY_SIZE(pBSSList->abyPKType)); i++) {
- if (pRSN->len >= 12+i*4+4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*i)
- if (!memcmp(pRSN->PKSList[i].abyOUI, abyOUI00, 4))
- pBSSList->abyPKType[j++] = WPA_NONE;
- else if (!memcmp(pRSN->PKSList[i].abyOUI, abyOUI02, 4))
- pBSSList->abyPKType[j++] = WPA_TKIP;
- else if (!memcmp(pRSN->PKSList[i].abyOUI, abyOUI03, 4))
- pBSSList->abyPKType[j++] = WPA_AESWRAP;
- else if (!memcmp(pRSN->PKSList[i].abyOUI, abyOUI04, 4))
- pBSSList->abyPKType[j++] = WPA_AESCCMP;
- else
- // any vendor checks here
- ;
- } else
- break;
- }
- pBSSList->wPKCount = (unsigned short)j;
- pr_debug("wPKCount: %d\n", pBSSList->wPKCount);
- }
-
- m = pRSN->wPKCount;
- pr_debug("m: %d\n", m);
- pr_debug("14+m*4: %d\n", 14+m*4);
-
- if (pRSN->len >= 14+m*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)
- // overlay IE_RSN_Auth structure into correct place
- pIE_RSN_Auth = (PWLAN_IE_RSN_AUTH) pRSN->PKSList[m].abyOUI;
- j = 0;
- pr_debug("wAuthCount: %d, sizeof(pBSSList->abyAuthType): %zu\n",
- pIE_RSN_Auth->wAuthCount,
- sizeof(pBSSList->abyAuthType));
- for (i = 0; (i < pIE_RSN_Auth->wAuthCount) && (j < ARRAY_SIZE(pBSSList->abyAuthType)); i++) {
- if (pRSN->len >= 14+4+(m+i)*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)+AKS(4*i)
- if (!memcmp(pIE_RSN_Auth->AuthKSList[i].abyOUI, abyOUI01, 4))
- pBSSList->abyAuthType[j++] = WPA_AUTH_IEEE802_1X;
- else if (!memcmp(pIE_RSN_Auth->AuthKSList[i].abyOUI, abyOUI02, 4))
- pBSSList->abyAuthType[j++] = WPA_AUTH_PSK;
- else
- // any vendor checks here
- ;
- } else
- break;
-
- }
- if (j > 0)
- pBSSList->wAuthCount = (unsigned short)j;
- pr_debug("wAuthCount: %d\n", pBSSList->wAuthCount);
- }
-
- if (pIE_RSN_Auth != NULL) {
- n = pIE_RSN_Auth->wAuthCount;
-
- pr_debug("n: %d\n", n);
- pr_debug("14+4+(m+n)*4: %d\n", 14+4+(m+n)*4);
-
- if (pRSN->len+2 >= 14+4+(m+n)*4) { //oui1(4)+ver(2)+GKS(4)+PKSCnt(2)+PKS(4*m)+AKC(2)+AKS(4*n)+Cap(2)
- pbyCaps = (unsigned char *)pIE_RSN_Auth->AuthKSList[n].abyOUI;
- pBSSList->byDefaultK_as_PK = (*pbyCaps) & WPA_GROUPFLAG;
- pBSSList->byReplayIdx = 2 << ((*pbyCaps >> WPA_REPLAYBITSSHIFT) & WPA_REPLAYBITS);
- pBSSList->sRSNCapObj.bRSNCapExist = true;
- pBSSList->sRSNCapObj.wRSNCap = *(unsigned short *)pbyCaps;
- }
- }
- pBSSList->bWPAValid = true;
- }
-}
-
-/*+
- *
- * Description:
- * Search RSN information in BSSList.
- *
- * Parameters:
- * In:
- * byCmd - Search type
- * byEncrypt- Encrypt Type
- * pBSSList - BSS list
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-WPA_SearchRSN(
- unsigned char byCmd,
- unsigned char byEncrypt,
- PKnownBSS pBSSList
-)
-{
- int ii;
- unsigned char byPKType = WPA_NONE;
-
- if (!pBSSList->bWPAValid)
- return false;
-
- switch (byCmd) {
- case 0:
-
- if (byEncrypt != pBSSList->byGKType)
- return false;
-
- if (pBSSList->wPKCount > 0) {
- for (ii = 0; ii < pBSSList->wPKCount; ii++) {
- if (pBSSList->abyPKType[ii] == WPA_AESCCMP)
- byPKType = WPA_AESCCMP;
- else if ((pBSSList->abyPKType[ii] == WPA_TKIP) && (byPKType != WPA_AESCCMP))
- byPKType = WPA_TKIP;
- else if ((pBSSList->abyPKType[ii] == WPA_WEP40) && (byPKType != WPA_AESCCMP) && (byPKType != WPA_TKIP))
- byPKType = WPA_WEP40;
- else if ((pBSSList->abyPKType[ii] == WPA_WEP104) && (byPKType != WPA_AESCCMP) && (byPKType != WPA_TKIP))
- byPKType = WPA_WEP104;
- }
- if (byEncrypt != byPKType)
- return false;
- }
- return true;
-
- default:
- break;
- }
- return false;
-}
-
-/*+
- *
- * Description:
- * Check if RSN IE makes sense.
- *
- * Parameters:
- * In:
- * pRSN - Pointer to the RSN IE.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-bool
-WPAb_Is_RSN(
- PWLAN_IE_RSN_EXT pRSN
-)
-{
- if (pRSN == NULL)
- return false;
-
- if ((pRSN->len >= 6) && // oui1(4)+ver(2)
- (pRSN->byElementID == WLAN_EID_RSN_WPA) && !memcmp(pRSN->abyOUI, abyOUI01, 4) &&
- (pRSN->wVersion == 1)) {
- return true;
- } else
- return false;
-}
diff --git a/drivers/staging/vt6655/wpa.h b/drivers/staging/vt6655/wpa.h
deleted file mode 100644
index 1d1918a12641..000000000000
--- a/drivers/staging/vt6655/wpa.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpa.h
- *
- * Purpose: Defines the macros, types, and functions for dealing
- * with WPA informations.
- *
- * Author: Kyle Hsu
- *
- * Date: Jul 14, 2003
- *
- */
-
-#ifndef __WPA_H__
-#define __WPA_H__
-
-#include "ttype.h"
-#include "80211hdr.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-#define WPA_NONE 0
-#define WPA_WEP40 1
-#define WPA_TKIP 2
-#define WPA_AESWRAP 3
-#define WPA_AESCCMP 4
-#define WPA_WEP104 5
-#define WPA_AUTH_IEEE802_1X 1
-#define WPA_AUTH_PSK 2
-
-#define WPA_GROUPFLAG 0x02
-#define WPA_REPLAYBITSSHIFT 2
-#define WPA_REPLAYBITS 0x03
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void
-WPA_ClearRSN(
- PKnownBSS pBSSList
-);
-
-void
-WPA_ParseRSN(
- PKnownBSS pBSSList,
- PWLAN_IE_RSN_EXT pRSN
-);
-
-bool
-WPA_SearchRSN(
- unsigned char byCmd,
- unsigned char byEncrypt,
- PKnownBSS pBSSList
-);
-
-bool
-WPAb_Is_RSN(
- PWLAN_IE_RSN_EXT pRSN
-);
-
-#endif // __WPA_H__
diff --git a/drivers/staging/vt6655/wpa2.c b/drivers/staging/vt6655/wpa2.c
deleted file mode 100644
index bb335ef51172..000000000000
--- a/drivers/staging/vt6655/wpa2.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpa2.c
- *
- * Purpose: Handles the Basic Service Set & Node Database functions
- *
- * Functions:
- *
- * Revision History:
- *
- * Author: Yiching Chen
- *
- * Date: Oct. 4, 2004
- *
- */
-
-#include "wpa2.h"
-#include "device.h"
-#include "wmgr.h"
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Variables --------------------------*/
-
-static const unsigned char abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
-static const unsigned char abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-static const unsigned char abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
-static const unsigned char abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-static const unsigned char abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
-
-static const unsigned char abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
-static const unsigned char abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-/*+
- *
- * Description:
- * Clear RSN information in BSSList.
- *
- * Parameters:
- * In:
- * pBSSNode - BSS list.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-WPA2_ClearRSN(
- PKnownBSS pBSSNode
-)
-{
- int ii;
-
- pBSSNode->bWPA2Valid = false;
-
- pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
- for (ii = 0; ii < 4; ii++)
- pBSSNode->abyCSSPK[ii] = WLAN_11i_CSS_CCMP;
- pBSSNode->wCSSPKCount = 1;
- for (ii = 0; ii < 4; ii++)
- pBSSNode->abyAKMSSAuthType[ii] = WLAN_11i_AKMSS_802_1X;
- pBSSNode->wAKMSSAuthCount = 1;
- pBSSNode->sRSNCapObj.bRSNCapExist = false;
- pBSSNode->sRSNCapObj.wRSNCap = 0;
-}
-
-/*+
- *
- * Description:
- * Parse RSN IE.
- *
- * Parameters:
- * In:
- * pBSSNode - BSS list.
- * pRSN - Pointer to the RSN IE.
- * Out:
- * none
- *
- * Return Value: none.
- *
- -*/
-void
-WPA2vParseRSN(
- PKnownBSS pBSSNode,
- PWLAN_IE_RSN pRSN
-)
-{
- int i, j;
- unsigned short m = 0, n = 0;
- unsigned char *pbyOUI;
- bool bUseGK = false;
-
- pr_debug("WPA2_ParseRSN: [%d]\n", pRSN->len);
-
- WPA2_ClearRSN(pBSSNode);
-
- if (pRSN->len == 2) { // ver(2)
- if ((pRSN->byElementID == WLAN_EID_RSN) && (pRSN->wVersion == 1))
- pBSSNode->bWPA2Valid = true;
-
- return;
- }
-
- if (pRSN->len < 6) { // ver(2) + GK(4)
- // invalid CSS, P802.11i/D10.0, p31
- return;
- }
-
- // information element header makes sense
- if ((pRSN->byElementID == WLAN_EID_RSN) &&
- (pRSN->wVersion == 1)) {
- pr_debug("Legal 802.11i RSN\n");
-
- pbyOUI = &(pRSN->abyRSN[0]);
- if (!memcmp(pbyOUI, abyOUIWEP40, 4))
- pBSSNode->byCSSGK = WLAN_11i_CSS_WEP40;
- else if (!memcmp(pbyOUI, abyOUITKIP, 4))
- pBSSNode->byCSSGK = WLAN_11i_CSS_TKIP;
- else if (!memcmp(pbyOUI, abyOUICCMP, 4))
- pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
- else if (!memcmp(pbyOUI, abyOUIWEP104, 4))
- pBSSNode->byCSSGK = WLAN_11i_CSS_WEP104;
- else if (!memcmp(pbyOUI, abyOUIGK, 4)) {
- // invalid CSS, P802.11i/D10.0, p32
- return;
- } else
- // any vendor checks here
- pBSSNode->byCSSGK = WLAN_11i_CSS_UNKNOWN;
-
- pr_debug("802.11i CSS: %X\n", pBSSNode->byCSSGK);
-
- if (pRSN->len == 6) {
- pBSSNode->bWPA2Valid = true;
- return;
- }
-
- if (pRSN->len >= 8) { // ver(2) + GK(4) + PK count(2)
- pBSSNode->wCSSPKCount = *((unsigned short *)&(pRSN->abyRSN[4]));
- j = 0;
- pbyOUI = &(pRSN->abyRSN[6]);
-
- for (i = 0; (i < pBSSNode->wCSSPKCount) && (j < sizeof(pBSSNode->abyCSSPK)/sizeof(unsigned char)); i++) {
- if (pRSN->len >= 8+i*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*i)
- if (!memcmp(pbyOUI, abyOUIGK, 4)) {
- pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_USE_GROUP;
- bUseGK = true;
- } else if (!memcmp(pbyOUI, abyOUIWEP40, 4)) {
- // Invalid CSS, continue to parsing
- } else if (!memcmp(pbyOUI, abyOUITKIP, 4)) {
- if (pBSSNode->byCSSGK != WLAN_11i_CSS_CCMP)
- pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_TKIP;
- else
- ; // Invalid CSS, continue to parsing
- } else if (!memcmp(pbyOUI, abyOUICCMP, 4)) {
- pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_CCMP;
- } else if (!memcmp(pbyOUI, abyOUIWEP104, 4)) {
- // Invalid CSS, continue to parsing
- } else {
- // any vendor checks here
- pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_UNKNOWN;
- }
- pbyOUI += 4;
- pr_debug("abyCSSPK[%d]: %X\n",
- j-1, pBSSNode->abyCSSPK[j-1]);
- } else
- break;
- } //for
-
- if (bUseGK) {
- if (j != 1) {
- // invalid CSS, This should be only PK CSS.
- return;
- }
- if (pBSSNode->byCSSGK == WLAN_11i_CSS_CCMP) {
- // invalid CSS, If CCMP is enable , PK can't be CSSGK.
- return;
- }
- }
- if ((pBSSNode->wCSSPKCount != 0) && (j == 0)) {
- // invalid CSS, No valid PK.
- return;
- }
- pBSSNode->wCSSPKCount = (unsigned short)j;
- pr_debug("wCSSPKCount: %d\n", pBSSNode->wCSSPKCount);
- }
-
- m = *((unsigned short *)&(pRSN->abyRSN[4]));
-
- if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
- pBSSNode->wAKMSSAuthCount = *((unsigned short *)&(pRSN->abyRSN[6+4*m]));
- j = 0;
- pbyOUI = &(pRSN->abyRSN[8+4*m]);
- for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(unsigned char)); i++) {
- if (pRSN->len >= 10+(m+i)*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSS(2)+AKS(4*i)
- if (!memcmp(pbyOUI, abyOUI8021X, 4))
- pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_802_1X;
- else if (!memcmp(pbyOUI, abyOUIPSK, 4))
- pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_PSK;
- else
- // any vendor checks here
- pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_UNKNOWN;
- pr_debug("abyAKMSSAuthType[%d]: %X\n",
- j-1,
- pBSSNode->abyAKMSSAuthType[j-1]);
- } else
- break;
- }
- pBSSNode->wAKMSSAuthCount = (unsigned short)j;
- pr_debug("wAKMSSAuthCount: %d\n",
- pBSSNode->wAKMSSAuthCount);
-
- n = *((unsigned short *)&(pRSN->abyRSN[6+4*m]));
- if (pRSN->len >= 12 + 4 * m + 4 * n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
- pBSSNode->sRSNCapObj.bRSNCapExist = true;
- pBSSNode->sRSNCapObj.wRSNCap = *((unsigned short *)&(pRSN->abyRSN[8+4*m+4*n]));
- }
- }
- //ignore PMKID lists bcs only (Re)Assocrequest has this field
- pBSSNode->bWPA2Valid = true;
- }
-}
-
-/*+
- *
- * Description:
- * Set WPA IEs
- *
- * Parameters:
- * In:
- * pMgmtHandle - Pointer to management object
- * Out:
- * pRSNIEs - Pointer to the RSN IE to set.
- *
- * Return Value: length of IEs.
- *
- -*/
-unsigned int
-WPA2uSetIEs(
- void *pMgmtHandle,
- PWLAN_IE_RSN pRSNIEs
-)
-{
- PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
- unsigned char *pbyBuffer = NULL;
- unsigned int ii = 0;
- unsigned short *pwPMKID = NULL;
-
- if (pRSNIEs == NULL)
- return 0;
-
- if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
- (pMgmt->pCurrBSS != NULL)) {
- /* WPA2 IE */
- pbyBuffer = (unsigned char *)pRSNIEs;
- pRSNIEs->byElementID = WLAN_EID_RSN;
- pRSNIEs->len = 6; //Version(2)+GK(4)
- pRSNIEs->wVersion = 1;
- //Group Key Cipher Suite
- pRSNIEs->abyRSN[0] = 0x00;
- pRSNIEs->abyRSN[1] = 0x0F;
- pRSNIEs->abyRSN[2] = 0xAC;
- if (pMgmt->byCSSGK == KEY_CTL_WEP)
- pRSNIEs->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK;
- else if (pMgmt->byCSSGK == KEY_CTL_TKIP)
- pRSNIEs->abyRSN[3] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSGK == KEY_CTL_CCMP)
- pRSNIEs->abyRSN[3] = WLAN_11i_CSS_CCMP;
- else
- pRSNIEs->abyRSN[3] = WLAN_11i_CSS_UNKNOWN;
-
- // Pairwise Key Cipher Suite
- pRSNIEs->abyRSN[4] = 1;
- pRSNIEs->abyRSN[5] = 0;
- pRSNIEs->abyRSN[6] = 0x00;
- pRSNIEs->abyRSN[7] = 0x0F;
- pRSNIEs->abyRSN[8] = 0xAC;
- if (pMgmt->byCSSPK == KEY_CTL_TKIP)
- pRSNIEs->abyRSN[9] = WLAN_11i_CSS_TKIP;
- else if (pMgmt->byCSSPK == KEY_CTL_CCMP)
- pRSNIEs->abyRSN[9] = WLAN_11i_CSS_CCMP;
- else if (pMgmt->byCSSPK == KEY_CTL_NONE)
- pRSNIEs->abyRSN[9] = WLAN_11i_CSS_USE_GROUP;
- else
- pRSNIEs->abyRSN[9] = WLAN_11i_CSS_UNKNOWN;
-
- pRSNIEs->len += 6;
-
- // Auth Key Management Suite
- pRSNIEs->abyRSN[10] = 1;
- pRSNIEs->abyRSN[11] = 0;
- pRSNIEs->abyRSN[12] = 0x00;
- pRSNIEs->abyRSN[13] = 0x0F;
- pRSNIEs->abyRSN[14] = 0xAC;
- if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)
- pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_PSK;
- else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)
- pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_802_1X;
- else
- pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN;
-
- pRSNIEs->len += 6;
-
- // RSN Capabilities
- if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
- memcpy(&pRSNIEs->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
- } else {
- pRSNIEs->abyRSN[16] = 0;
- pRSNIEs->abyRSN[17] = 0;
- }
- pRSNIEs->len += 2;
-
- if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
- pMgmt->bRoaming &&
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
- // RSN PMKID
- pwPMKID = (unsigned short *)(&pRSNIEs->abyRSN[18]); // Point to PMKID count
- *pwPMKID = 0; // Initialize PMKID count
- pbyBuffer = &pRSNIEs->abyRSN[20]; // Point to PMKID list
- for (ii = 0; ii < pMgmt->gsPMKIDCache.BSSIDInfoCount; ii++) {
- if (!memcmp(&pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyBSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
- (*pwPMKID)++;
- memcpy(pbyBuffer, pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyPMKID, 16);
- pbyBuffer += 16;
- }
- }
- if (*pwPMKID != 0)
- pRSNIEs->len += (2 + (*pwPMKID)*16);
- else
- pbyBuffer = &pRSNIEs->abyRSN[18];
- }
- return pRSNIEs->len + WLAN_IEHDR_LEN;
- }
- return 0;
-}
diff --git a/drivers/staging/vt6655/wpa2.h b/drivers/staging/vt6655/wpa2.h
deleted file mode 100644
index 2d0bd2e515f3..000000000000
--- a/drivers/staging/vt6655/wpa2.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpa2.h
- *
- * Purpose: Defines the macros, types, and functions for dealing
- * with WPA2 informations.
- *
- * Author: Yiching Chen
- *
- * Date: Oct. 4, 2004
- *
- */
-
-#ifndef __WPA2_H__
-#define __WPA2_H__
-
-#include "ttype.h"
-#include "80211mgr.h"
-#include "80211hdr.h"
-#include "bssdb.h"
-
-/*--------------------- Export Definitions -------------------------*/
-#define MAX_PMKID_CACHE 16
-
-typedef struct tagsPMKIDInfo {
- unsigned char abyBSSID[6];
- unsigned char abyPMKID[16];
-} PMKIDInfo, *PPMKIDInfo;
-
-typedef struct tagSPMKIDCache {
- unsigned long BSSIDInfoCount;
- PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
-} SPMKIDCache, *PSPMKIDCache;
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Types ------------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-void
-WPA2_ClearRSN(
- PKnownBSS pBSSNode
-);
-
-void
-WPA2vParseRSN(
- PKnownBSS pBSSNode,
- PWLAN_IE_RSN pRSN
-);
-
-unsigned int
-WPA2uSetIEs(
- void *pMgmtHandle,
- PWLAN_IE_RSN pRSNIEs
-);
-
-#endif // __WPA2_H__
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
deleted file mode 100644
index dab1e8078652..000000000000
--- a/drivers/staging/vt6655/wpactl.c
+++ /dev/null
@@ -1,896 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- * File: wpactl.c
- *
- * Purpose: handle wpa supplicant ioctl input/out functions
- *
- * Author: Lyndon Chen
- *
- * Date: Oct. 20, 2003
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include "wpactl.h"
-#include "key.h"
-#include "mac.h"
-#include "device.h"
-#include "wmgr.h"
-#include "iocmd.h"
-#include "iowpa.h"
-#include "rf.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-#define VIAWGET_WPA_MAX_BUF_SIZE 1024
-
-static const int frequency_list[] = {
- 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484
-};
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-static void wpadev_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_IEEE80211;
- dev->hard_header_len = ETH_HLEN;
- dev->mtu = 2048;
- dev->addr_len = ETH_ALEN;
- dev->tx_queue_len = 1000;
-
- memset(dev->broadcast, 0xFF, ETH_ALEN);
-
- dev->flags = IFF_BROADCAST|IFF_MULTICAST;
-}
-
-/*
- * Description:
- * register netdev for wpa supplicant daemon
- *
- * Parameters:
- * In:
- * pDevice -
- * enable -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_init_wpadev(struct vnt_private *pDevice)
-{
- struct vnt_private *wpadev_priv;
- struct net_device *dev = pDevice->dev;
- int ret = 0;
-
- pDevice->wpadev = alloc_netdev(sizeof(*wpadev_priv), "vntwpa",
- NET_NAME_UNKNOWN, wpadev_setup);
- if (pDevice->wpadev == NULL)
- return -ENOMEM;
-
- wpadev_priv = netdev_priv(pDevice->wpadev);
- *wpadev_priv = *pDevice;
- eth_hw_addr_inherit(pDevice->wpadev, dev);
- pDevice->wpadev->base_addr = dev->base_addr;
- pDevice->wpadev->irq = dev->irq;
- pDevice->wpadev->mem_start = dev->mem_start;
- pDevice->wpadev->mem_end = dev->mem_end;
- ret = register_netdev(pDevice->wpadev);
- if (ret) {
- pr_debug("%s: register_netdev(WPA) failed!\n", dev->name);
- free_netdev(pDevice->wpadev);
- return -1;
- }
-
- if (pDevice->skb == NULL) {
- pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
- if (pDevice->skb == NULL)
- return -ENOMEM;
- }
-
- pr_debug("%s: Registered netdev %s for WPA management\n",
- dev->name, pDevice->wpadev->name);
-
- return 0;
-}
-
-/*
- * Description:
- * unregister net_device (wpadev)
- *
- * Parameters:
- * In:
- * pDevice -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_release_wpadev(struct vnt_private *pDevice)
-{
- if (pDevice->skb) {
- dev_kfree_skb(pDevice->skb);
- pDevice->skb = NULL;
- }
-
- if (pDevice->wpadev) {
- pr_debug("%s: Netdevice %s unregistered\n",
- pDevice->dev->name, pDevice->wpadev->name);
- unregister_netdev(pDevice->wpadev);
- free_netdev(pDevice->wpadev);
- pDevice->wpadev = NULL;
- }
-
- return 0;
-}
-
-/*
- * Description:
- * Set enable/disable dev for wpa supplicant daemon
- *
- * Parameters:
- * In:
- * pDevice -
- * val -
- * Out:
- *
- * Return Value:
- *
- */
-
-int wpa_set_wpadev(struct vnt_private *pDevice, int val)
-{
- if (val)
- return wpa_init_wpadev(pDevice);
- else
- return wpa_release_wpadev(pDevice);
-}
-
-/*
- * Description:
- * Set WPA algorithm & keys
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-int wpa_set_keys(struct vnt_private *pDevice, void *ctx,
- bool fcpfkernel) __must_hold(&pDevice->lock)
-{
- struct viawget_wpa_param *param = ctx;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- unsigned long dwKeyIndex = 0;
- unsigned char abyKey[MAX_KEY_LEN];
- unsigned char abySeq[MAX_KEY_LEN];
- u64 KeyRSC;
- unsigned char byKeyDecMode = KEY_CTL_WEP;
- int ret = 0;
- int uu, ii;
-
- if (param->u.wpa_key.alg_name > WPA_ALG_CCMP ||
- param->u.wpa_key.key_len > MAX_KEY_LEN ||
- param->u.wpa_key.seq_len > MAX_KEY_LEN)
- return -EINVAL;
-
- pr_debug("param->u.wpa_key.alg_name = %d\n", param->u.wpa_key.alg_name);
- if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- pDevice->bEncryptionEnable = false;
- pDevice->byKeyIndex = 0;
- pDevice->bTransmitKey = false;
- KeyvRemoveAllWEPKey(&(pDevice->sKey), pDevice->PortOffset);
- for (uu = 0; uu < MAX_KEY_TABLE; uu++)
- MACvDisableKeyEntry(pDevice->PortOffset, uu);
-
- return ret;
- }
-
- if (param->u.wpa_key.key && fcpfkernel) {
- memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
- } else {
- spin_unlock_irq(&pDevice->lock);
- if (param->u.wpa_key.key &&
- copy_from_user(&abyKey[0],
- (void __user *)param->u.wpa_key.key,
- param->u.wpa_key.key_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
- }
- spin_lock_irq(&pDevice->lock);
- }
-
- dwKeyIndex = (unsigned long)(param->u.wpa_key.key_index);
-
- if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
- if (dwKeyIndex > 3) {
- return -EINVAL;
- } else {
- if (param->u.wpa_key.set_tx) {
- pDevice->byKeyIndex = (unsigned char)dwKeyIndex;
- pDevice->bTransmitKey = true;
- dwKeyIndex |= (1 << 31);
- }
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex & ~(BIT30 | USE_KEYRSC),
- param->u.wpa_key.key_len,
- NULL,
- abyKey,
- KEY_CTL_WEP,
- pDevice->PortOffset,
- pDevice->byLocalID);
-
- }
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pDevice->bEncryptionEnable = true;
- return ret;
- }
-
- if (param->u.wpa_key.seq && fcpfkernel) {
- memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
- } else {
- spin_unlock_irq(&pDevice->lock);
- if (param->u.wpa_key.seq &&
- copy_from_user(&abySeq[0],
- (void __user *)param->u.wpa_key.seq,
- param->u.wpa_key.seq_len)) {
- spin_lock_irq(&pDevice->lock);
- return -EINVAL;
- }
- spin_lock_irq(&pDevice->lock);
- }
-
- if (param->u.wpa_key.seq_len > 0) {
- for (ii = 0; ii < param->u.wpa_key.seq_len; ii++) {
- if (ii < 4)
- KeyRSC |= (u64)(abySeq[ii] << (ii * 8));
- else
- KeyRSC |= (u64)(abySeq[ii] << ((ii-4) * 8));
- }
- dwKeyIndex |= 1 << 29;
- }
-
- if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
- pr_debug("return dwKeyIndex > 3\n");
- return -EINVAL;
- }
-
- if (param->u.wpa_key.alg_name == WPA_ALG_TKIP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
-
- if (param->u.wpa_key.alg_name == WPA_ALG_CCMP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
-
- if (param->u.wpa_key.set_tx)
- dwKeyIndex |= (1 << 31);
-
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
- byKeyDecMode = KEY_CTL_CCMP;
- else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
- byKeyDecMode = KEY_CTL_TKIP;
- else
- byKeyDecMode = KEY_CTL_WEP;
-
- /* Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled */
- if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
- if (param->u.wpa_key.key_len == MAX_KEY_LEN)
- byKeyDecMode = KEY_CTL_TKIP;
- else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- } else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
- if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
- byKeyDecMode = KEY_CTL_WEP;
- }
-
- /* Check TKIP key length */
- if ((byKeyDecMode == KEY_CTL_TKIP) &&
- (param->u.wpa_key.key_len != MAX_KEY_LEN)) {
- /* TKIP Key must be 256 bits */
- pr_debug("return- TKIP Key must be 256 bits!\n");
- return -EINVAL;
- }
- /* Check AES key length */
- if ((byKeyDecMode == KEY_CTL_CCMP) &&
- (param->u.wpa_key.key_len != AES_KEY_LEN)) {
- /* AES Key must be 128 bits */
- return -EINVAL;
- }
-
- /* spin_lock_irq(&pDevice->lock); */
- if (is_broadcast_ether_addr(&param->addr[0]) || (param->addr == NULL)) {
- /* If is_broadcast_ether_addr, set the key as every key entry's group key. */
- pr_debug("Groupe Key Assign\n");
-
- if (KeybSetAllGroupKey(&(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID) &&
- KeybSetDefaultKey(&(pDevice->sKey),
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID)) {
- pr_debug("GROUP Key Assign\n");
-
- } else {
- return -EINVAL;
- }
-
- } else {
- pr_debug("Pairwise Key Assign\n");
- /* BSSID not 0xffffffffffff */
- /* Pairwise Key can't be WEP */
- if (byKeyDecMode == KEY_CTL_WEP) {
- pr_debug("Pairwise Key can't be WEP\n");
- return -EINVAL;
- }
-
- dwKeyIndex |= (1 << 30); /* set pairwise key */
- if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA)
- return -EINVAL;
-
- if (KeybSetKey(&(pDevice->sKey),
- &param->addr[0],
- dwKeyIndex,
- param->u.wpa_key.key_len,
- (u64 *) &KeyRSC,
- (unsigned char *)abyKey,
- byKeyDecMode,
- pDevice->PortOffset,
- pDevice->byLocalID)) {
- pr_debug("Pairwise Key Set\n");
-
- } else {
- /* Key Table Full */
- return -EINVAL;
- }
- } /* BSSID not 0xffffffffffff */
- if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
- pDevice->byKeyIndex = (unsigned char)param->u.wpa_key.key_index;
- pDevice->bTransmitKey = true;
- }
- pDevice->bEncryptionEnable = true;
-
- return ret;
-}
-
-/*
- * Description:
- * enable wpa auth & mode
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_set_wpa(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- pMgmt->bShareKeyAlgorithm = false;
-
- return 0;
-}
-
-/*
- * Description:
- * set disassociate
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_set_disassociate(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- spin_lock_irq(&pDevice->lock);
- if (pDevice->bLinkPass) {
- if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
- bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
- }
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-/*
- * Description:
- * enable scan process
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_set_scan(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-/*
- * Description:
- * get bssid
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_get_bssid(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
-
- memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID , 6);
-
- return 0;
-}
-
-/*
- * Description:
- * get bssid
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_get_ssid(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PWLAN_IE_SSID pItemSSID;
-
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
-
- memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID , pItemSSID->len);
- param->u.wpa_associate.ssid_len = pItemSSID->len;
-
- return 0;
-}
-
-/*
- * Description:
- * get scan results
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_get_scan(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- struct viawget_scan_result *scan_buf;
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PWLAN_IE_SSID pItemSSID;
- PKnownBSS pBSS;
- unsigned char *pBuf;
- int ret = 0;
- u16 count = 0;
- u16 ii, jj;
-#if 1
-
- unsigned char *ptempBSS;
-
- ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
-
- if (ptempBSS == NULL) {
- pr_err("bubble sort kmalloc memory fail@@@\n");
-
- ret = -ENOMEM;
-
- return ret;
-
- }
-
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
- if ((pMgmt->sBSSList[jj].bActive != true) ||
-
- ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI) && (pMgmt->sBSSList[jj + 1].bActive != false))) {
- memcpy(ptempBSS, &pMgmt->sBSSList[jj], sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1], sizeof(KnownBSS));
-
- memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
-
- }
-
- }
-
- }
-
- kfree(ptempBSS);
-#endif
-
-//******mike:bubble sort by stronger RSSI*****//
-
- count = 0;
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (!pBSS->bActive)
- continue;
- count++;
- }
-
- pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
-
- if (pBuf == NULL) {
- ret = -ENOMEM;
- return ret;
- }
- scan_buf = (struct viawget_scan_result *)pBuf;
- pBSS = &(pMgmt->sBSSList[0]);
- for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
- pBSS = &(pMgmt->sBSSList[ii]);
- if (pBSS->bActive) {
- if (jj >= count)
- break;
- memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
- pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
- memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
- scan_buf->ssid_len = pItemSSID->len;
- scan_buf->freq = frequency_list[pBSS->uChannel-1];
- scan_buf->caps = pBSS->wCapInfo;
-
- if (pBSS->wWPALen != 0) {
- scan_buf->wpa_ie_len = pBSS->wWPALen;
- memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
- }
- if (pBSS->wRSNLen != 0) {
- scan_buf->rsn_ie_len = pBSS->wRSNLen;
- memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
- }
- scan_buf = (struct viawget_scan_result *)((unsigned char *)scan_buf + sizeof(struct viawget_scan_result));
- jj++;
- }
- }
-
- if (jj < count)
- count = jj;
-
- if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
- ret = -EFAULT;
-
- param->u.scan_results.scan_count = count;
- pr_debug(" param->u.scan_results.scan_count = %d\n", count);
-
- kfree(pBuf);
- return ret;
-}
-
-/*
- * Description:
- * set associate with AP
- *
- * Parameters:
- * In:
- * pDevice -
- * param -
- * Out:
- *
- * Return Value:
- *
- */
-
-static int wpa_set_associate(struct vnt_private *pDevice,
- struct viawget_wpa_param *param)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PWLAN_IE_SSID pItemSSID;
- unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- unsigned char abyWPAIE[64];
- bool bWepEnabled = false;
-
- /* set key type & algorithm */
- pr_debug("pairwise_suite = %d\n",
- param->u.wpa_associate.pairwise_suite);
- pr_debug("group_suite = %d\n", param->u.wpa_associate.group_suite);
- pr_debug("key_mgmt_suite = %d\n",
- param->u.wpa_associate.key_mgmt_suite);
- pr_debug("auth_alg = %d\n", param->u.wpa_associate.auth_alg);
- pr_debug("mode = %d\n", param->u.wpa_associate.mode);
- pr_debug("wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
-
- if (param->u.wpa_associate.wpa_ie_len) {
- if (!param->u.wpa_associate.wpa_ie)
- return -EINVAL;
- if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
- return -EINVAL;
- if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
- return -EFAULT;
- }
-
- if (param->u.wpa_associate.mode == 1)
- pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
- else
- pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
- /* set ssid */
- memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
- pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
- pItemSSID->byElementID = WLAN_EID_SSID;
- pItemSSID->len = param->u.wpa_associate.ssid_len;
- memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
- /* set bssid */
- if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
- memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
- else
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pItemSSID->abySSID);
-
- if (param->u.wpa_associate.wpa_ie_len == 0) {
- if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
- pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
- } else if (abyWPAIE[0] == RSN_INFO_ELEM) {
- if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
- } else {
- if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
- pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
- else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
- pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
- else
- pMgmt->eAuthenMode = WMAC_AUTH_WPA;
- }
-
- switch (param->u.wpa_associate.pairwise_suite) {
- case CIPHER_CCMP:
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- break;
- case CIPHER_TKIP:
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- break;
- case CIPHER_WEP40:
- case CIPHER_WEP104:
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- bWepEnabled = true;
- break;
- case CIPHER_NONE:
- if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
- pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
- else
- pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
- break;
- default:
- pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- }
-
-//DavidWang add for WPA_supplicant support open/share mode
-
- if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) {
- pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- pMgmt->bShareKeyAlgorithm = true;
- } else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
- if (!bWepEnabled) pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
- else pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
- }
-//mike save old encryption status
- pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
-
- if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
- pDevice->bEncryptionEnable = true;
- else
- pDevice->bEncryptionEnable = false;
- if (!((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
- ((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && bWepEnabled))) //DavidWang //20080717-06,<Modify> by chester//Not to initial WEP
- KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
- spin_lock_irq(&pDevice->lock);
- pDevice->bLinkPass = false;
- memset(pMgmt->abyCurrBSSID, 0, 6);
- pMgmt->eCurrState = WMAC_STATE_IDLE;
- netif_stop_queue(pDevice->dev);
- //20080701-02,<Add> by Mike Liu
-/*******search if ap_scan=2 ,which is associating request in hidden ssid mode ****/
- {
- PKnownBSS pCurr = NULL;
-
- pCurr = BSSpSearchBSSList(pDevice,
- pMgmt->abyDesireBSSID,
- pMgmt->abyDesireSSID,
- pMgmt->eConfigPHYMode
-);
-
- if (pCurr == NULL) {
- pr_debug("wpa_set_associate---->hidden mode site survey before associate.......\n");
- bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- }
- }
-/****************************************************************/
- bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
- spin_unlock_irq(&pDevice->lock);
-
- return 0;
-}
-
-/*
- * Description:
- * wpa_ioctl main function supported for wpa supplicant
- *
- * Parameters:
- * In:
- * pDevice -
- * iw_point -
- * Out:
- *
- * Return Value:
- *
- */
-
-int wpa_ioctl(struct vnt_private *pDevice, struct iw_point *p)
-{
- struct viawget_wpa_param *param;
- int ret = 0;
- int wpa_ioctl = 0;
-
- if (p->length < sizeof(struct viawget_wpa_param) ||
- p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = kmalloc((int)p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- ret = -EFAULT;
- goto out;
- }
-
- switch (param->cmd) {
- case VIAWGET_SET_WPA:
- ret = wpa_set_wpa(pDevice, param);
- pr_debug("VIAWGET_SET_WPA\n");
- break;
-
- case VIAWGET_SET_KEY:
- pr_debug("VIAWGET_SET_KEY\n");
- spin_lock_irq(&pDevice->lock);
- ret = wpa_set_keys(pDevice, param, false);
- spin_unlock_irq(&pDevice->lock);
- break;
-
- case VIAWGET_SET_SCAN:
- pr_debug("VIAWGET_SET_SCAN\n");
- ret = wpa_set_scan(pDevice, param);
- break;
-
- case VIAWGET_GET_SCAN:
- pr_debug("VIAWGET_GET_SCAN\n");
- ret = wpa_get_scan(pDevice, param);
- wpa_ioctl = 1;
- break;
-
- case VIAWGET_GET_SSID:
- pr_debug("VIAWGET_GET_SSID\n");
- ret = wpa_get_ssid(pDevice, param);
- wpa_ioctl = 1;
- break;
-
- case VIAWGET_GET_BSSID:
- pr_debug("VIAWGET_GET_BSSID\n");
- ret = wpa_get_bssid(pDevice, param);
- wpa_ioctl = 1;
- break;
-
- case VIAWGET_SET_ASSOCIATE:
- pr_debug("VIAWGET_SET_ASSOCIATE\n");
- ret = wpa_set_associate(pDevice, param);
- break;
-
- case VIAWGET_SET_DISASSOCIATE:
- pr_debug("VIAWGET_SET_DISASSOCIATE\n");
- ret = wpa_set_disassociate(pDevice, param);
- break;
-
- case VIAWGET_SET_DROP_UNENCRYPT:
- pr_debug("VIAWGET_SET_DROP_UNENCRYPT\n");
- break;
-
- case VIAWGET_SET_DEAUTHENTICATE:
- pr_debug("VIAWGET_SET_DEAUTHENTICATE\n");
- break;
-
- default:
- pr_debug("wpa_ioctl: unknown cmd=%d\n",
- param->cmd);
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- if ((ret == 0) && wpa_ioctl) {
- if (copy_to_user(p->pointer, param, p->length)) {
- ret = -EFAULT;
- goto out;
- }
- }
-
-out:
- kfree(param);
-
- return ret;
-}
diff --git a/drivers/staging/vt6655/wpactl.h b/drivers/staging/vt6655/wpactl.h
deleted file mode 100644
index c1b4a7292061..000000000000
--- a/drivers/staging/vt6655/wpactl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wpactl.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: March 1, 2005
- *
- */
-
-#ifndef __WPACTL_H__
-#define __WPACTL_H__
-
-#include "device.h"
-#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
-#include "iowpa.h"
-#endif
-
-/*--------------------- Export Definitions -------------------------*/
-
-//WPA related
-
-enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP };
-enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
- CIPHER_WEP104 };
-enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_CCKM, KEY_MGMT_PSK, KEY_MGMT_NONE,
- KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE };
-
-#define AUTH_ALG_OPEN_SYSTEM 0x01
-#define AUTH_ALG_SHARED_KEY 0x02
-#define AUTH_ALG_LEAP 0x04
-
-#define GENERIC_INFO_ELEM 0xdd
-#define RSN_INFO_ELEM 0x30
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-int wpa_set_wpadev(struct vnt_private *, int val);
-int wpa_ioctl(struct vnt_private *, struct iw_point *p);
-int wpa_set_keys(struct vnt_private *, void *ctx, bool fcpfkernel);
-
-#endif // __WPACL_H__
diff --git a/drivers/staging/vt6655/wroute.c b/drivers/staging/vt6655/wroute.c
deleted file mode 100644
index d1171fa3446e..000000000000
--- a/drivers/staging/vt6655/wroute.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wroute.c
- *
- * Purpose: handle WMAC frame relay & filtering
- *
- * Author: Lyndon Chen
- *
- * Date: May 20, 2003
- *
- * Functions:
- * ROUTEbRelay - Relay packet
- *
- * Revision History:
- *
- */
-
-#include "mac.h"
-#include "tcrc.h"
-#include "rxtx.h"
-#include "wroute.h"
-#include "card.h"
-#include "baseband.h"
-
-/*--------------------- Static Definitions -------------------------*/
-
-/*--------------------- Static Classes ----------------------------*/
-
-/*--------------------- Static Functions --------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*
- * Description:
- * Relay packet. Return true if packet is copy to DMA1
- *
- * Parameters:
- * In:
- * pDevice -
- * pbySkbData - rx packet skb data
- * Out:
- * true, false
- *
- * Return Value: true if packet duplicate; otherwise false
- *
- */
-bool ROUTEbRelay(struct vnt_private *pDevice, unsigned char *pbySkbData,
- unsigned int uDataLen, unsigned int uNodeIndex)
-{
- PSMgmtObject pMgmt = pDevice->pMgmt;
- PSTxDesc pHeadTD, pLastTD;
- unsigned int cbFrameBodySize;
- unsigned int uMACfragNum;
- unsigned char byPktType;
- bool bNeedEncryption = false;
- SKeyItem STempKey;
- PSKeyItem pTransmitKey = NULL;
- unsigned int cbHeaderSize;
- unsigned int ii;
- unsigned char *pbyBSSID;
-
- if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 0) {
- pr_debug("Relay can't allocate TD1..\n");
- return false;
- }
-
- pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA];
-
- pHeadTD->m_td1TD1.byTCR = (TCR_EDP | TCR_STP);
-
- memcpy(pDevice->sTxEthHeader.abyDstAddr, pbySkbData, ETH_HLEN);
-
- cbFrameBodySize = uDataLen - ETH_HLEN;
-
- if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN)
- cbFrameBodySize += 8;
-
- if (pDevice->bEncryptionEnable == true) {
- bNeedEncryption = true;
-
- // get group key
- pbyBSSID = pDevice->abyBroadcastAddr;
- if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID,
- GROUP_KEY, &pTransmitKey) == false) {
- pTransmitKey = NULL;
- pr_debug("KEY is NULL. [%d]\n",
- pDevice->pMgmt->eCurrMode);
- } else {
- pr_debug("Get GTK\n");
- }
- }
-
- if (pDevice->bEnableHostWEP) {
- if (uNodeIndex < MAX_NODE_NUM + 1) {
- pTransmitKey = &STempKey;
- pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
- pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
- pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
- pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
- pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
- memcpy(pTransmitKey->abyKey,
- &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
- pTransmitKey->uKeyLength);
- }
- }
-
- uMACfragNum = cbGetFragCount(pDevice, pTransmitKey,
- cbFrameBodySize, &pDevice->sTxEthHeader);
-
- if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA))
- return false;
-
- byPktType = pDevice->byPacketType;
-
- if (pDevice->bFixRate) {
- if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
- if (pDevice->uConnectionRate >= RATE_11M)
- pDevice->wCurrentRate = RATE_11M;
- else
- pDevice->wCurrentRate = pDevice->uConnectionRate;
- } else {
- if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
- (pDevice->uConnectionRate <= RATE_6M)) {
- pDevice->wCurrentRate = RATE_6M;
- } else {
- if (pDevice->uConnectionRate >= RATE_54M)
- pDevice->wCurrentRate = RATE_54M;
- else
- pDevice->wCurrentRate = pDevice->uConnectionRate;
- }
- }
- } else {
- pDevice->wCurrentRate = pDevice->pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate;
- }
-
- if (pDevice->wCurrentRate <= RATE_11M)
- byPktType = PK_TYPE_11B;
-
- vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff,
- bNeedEncryption, cbFrameBodySize, TYPE_AC0DMA,
- pHeadTD, &pDevice->sTxEthHeader, pbySkbData,
- pTransmitKey, uNodeIndex, &uMACfragNum,
- &cbHeaderSize);
-
- if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
- // Disable PS
- MACbPSWakeup(pDevice->PortOffset);
- }
-
- pDevice->bPWBitOn = false;
-
- pLastTD = pHeadTD;
- for (ii = 0; ii < uMACfragNum; ii++) {
- // Poll Transmit the adapter
- wmb();
- pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
- wmb();
- if (ii == (uMACfragNum - 1))
- pLastTD = pHeadTD;
- pHeadTD = pHeadTD->next;
- }
-
- pLastTD->pTDInfo->skb = NULL;
- pLastTD->pTDInfo->byFlags = 0;
-
- pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD;
-
- MACvTransmitAC0(pDevice->PortOffset);
-
- return true;
-}
diff --git a/drivers/staging/vt6655/wroute.h b/drivers/staging/vt6655/wroute.h
deleted file mode 100644
index e59eec955cac..000000000000
--- a/drivers/staging/vt6655/wroute.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * File: wroute.h
- *
- * Purpose:
- *
- * Author: Lyndon Chen
- *
- * Date: May 21, 2003
- *
- */
-
-#ifndef __WROUTE_H__
-#define __WROUTE_H__
-
-#include "device.h"
-
-/*--------------------- Export Definitions -------------------------*/
-
-/*--------------------- Export Classes ----------------------------*/
-
-/*--------------------- Export Variables --------------------------*/
-
-/*--------------------- Export Functions --------------------------*/
-
-bool ROUTEbRelay(struct vnt_private *pDevice, unsigned char *pbySkbData,
- unsigned int uDataLen, unsigned int uNodeIndex);
-
-#endif /* __WROUTE_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 2fbff907ce8a..b95d5b1efcc7 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -431,11 +431,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
GFP_KERNEL);
- if (tx_context == NULL) {
- dev_err(&priv->usb->dev,
- "allocate tx usb context failed\n");
+ if (tx_context == NULL)
goto free_tx;
- }
priv->tx_context[ii] = tx_context;
tx_context->priv = priv;
@@ -471,10 +468,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
}
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
- if (rcb->skb == NULL) {
- dev_err(&priv->usb->dev, "Failed to alloc rx skb\n");
+ if (rcb->skb == NULL)
goto free_rx_tx;
- }
rcb->in_use = false;
@@ -491,7 +486,6 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
if (priv->int_buf.data_buf == NULL) {
- dev_err(&priv->usb->dev, "Failed to alloc int buf\n");
usb_free_urb(priv->interrupt_urb);
goto free_rx_tx;
}
@@ -856,7 +850,9 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return 0;
}
-static void vnt_sw_scan_start(struct ieee80211_hw *hw)
+static void vnt_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *addr)
{
struct vnt_private *priv = hw->priv;
@@ -865,7 +861,8 @@ static void vnt_sw_scan_start(struct ieee80211_hw *hw)
vnt_update_pre_ed_threshold(priv, true);
}
-static void vnt_sw_scan_complete(struct ieee80211_hw *hw)
+static void vnt_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct vnt_private *priv = hw->priv;
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 1f2c78cc0086..20d146b61ba7 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1376,6 +1376,7 @@ int hfa384x_drvr_setconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len);
static inline int hfa384x_drvr_getconfig16(hfa384x_t *hw, u16 rid, void *val)
{
int result = 0;
+
result = hfa384x_drvr_getconfig(hw, rid, val, sizeof(u16));
if (result == 0)
*((u16 *) val) = le16_to_cpu(*((u16 *) val));
@@ -1385,6 +1386,7 @@ static inline int hfa384x_drvr_getconfig16(hfa384x_t *hw, u16 rid, void *val)
static inline int hfa384x_drvr_setconfig16(hfa384x_t *hw, u16 rid, u16 val)
{
u16 value = cpu_to_le16(val);
+
return hfa384x_drvr_setconfig(hw, rid, &value, sizeof(value));
}
@@ -1402,6 +1404,7 @@ static inline int
hfa384x_drvr_setconfig16_async(hfa384x_t *hw, u16 rid, u16 val)
{
u16 value = cpu_to_le16(val);
+
return hfa384x_drvr_setconfig_async(hw, rid, &value, sizeof(value),
NULL, NULL);
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 898bde73c59a..55d2f563e308 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3583,12 +3583,8 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
}
skb = dev_alloc_skb(skblen);
- if (skb == NULL) {
- netdev_err(hw->wlandev->netdev,
- "alloc_skb failed trying to allocate %d bytes\n",
- skblen);
+ if (skb == NULL)
return;
- }
/* only prepend the prism header if in the right mode */
if ((wlandev->netdev->type == ARPHRD_IEEE80211_PRISM) &&
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 3b5468c64fde..7eaaf9a63503 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -107,7 +107,7 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
struct p80211_metawep *p80211_wep)
{
- u16 fc;
+ __le16 fc;
u16 proto;
struct wlan_ethhdr e_hdr;
struct wlan_llc *e_llc;
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 66b5e201d418..79d9b20b364d 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -148,7 +148,7 @@
/* Generic 802.11 Header types */
struct p80211_hdr_a3 {
- u16 fc;
+ __le16 fc;
u16 dur;
u8 a1[ETH_ALEN];
u8 a2[ETH_ALEN];
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 2dd9bf8a6e18..a9c1e0bafa62 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -358,7 +358,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
* and return success .
* TODO: we need a saner way to handle this
*/
- if (skb->protocol != ETH_P_80211_RAW) {
+ if (be16_to_cpu(skb->protocol) != ETH_P_80211_RAW) {
netif_start_queue(wlandev->netdev);
netdev_notice(netdev, "Tx attempt prior to association, frame dropped.\n");
netdev->stats.tx_dropped++;
@@ -369,7 +369,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
}
/* Check for raw transmits */
- if (skb->protocol == ETH_P_80211_RAW) {
+ if (be16_to_cpu(skb->protocol) == ETH_P_80211_RAW) {
if (!capable(CAP_NET_ADMIN)) {
result = 1;
goto failed;
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 6c38f797d1ab..9408644cc8b8 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -238,7 +238,8 @@ static int prism2_fwtry(struct usb_device *udev, wlandevice_t *wlandev)
* 0 - success
* ~0 - failure
----------------------------------------------------------------*/
-static int prism2_fwapply(const struct ihex_binrec *rfptr, wlandevice_t *wlandev)
+static int prism2_fwapply(const struct ihex_binrec *rfptr,
+ wlandevice_t *wlandev)
{
signed int result = 0;
struct p80211msg_dot11req_mibget getmsg;
@@ -986,8 +987,8 @@ static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
u32 currlen;
u32 currdaddr;
- rstmsg = kmalloc(sizeof(*rstmsg), GFP_KERNEL);
- rwrmsg = kmalloc(sizeof(*rwrmsg), GFP_KERNEL);
+ rstmsg = kzalloc(sizeof(*rstmsg), GFP_KERNEL);
+ rwrmsg = kzalloc(sizeof(*rwrmsg), GFP_KERNEL);
if (!rstmsg || !rwrmsg) {
kfree(rstmsg);
kfree(rwrmsg);
@@ -997,7 +998,6 @@ static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
}
/* Initialize the messages */
- memset(rstmsg, 0, sizeof(*rstmsg));
strcpy(rstmsg->devname, wlandev->name);
rstmsg->msgcode = DIDmsg_p2req_ramdl_state;
rstmsg->msglen = sizeof(*rstmsg);
@@ -1011,7 +1011,6 @@ static int writeimage(wlandevice_t *wlandev, struct imgchunk *fchunk,
rstmsg->exeaddr.len = sizeof(u32);
rstmsg->resultcode.len = sizeof(u32);
- memset(rwrmsg, 0, sizeof(*rwrmsg));
strcpy(rwrmsg->devname, wlandev->name);
rwrmsg->msgcode = DIDmsg_p2req_ramdl_write;
rwrmsg->msglen = sizeof(*rwrmsg);
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index be7778b59118..709d49e7f3c9 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -2012,7 +2012,7 @@ static int xgifb_probe(struct pci_dev *pdev,
XGIfb_get_fix(&fb_info->fix, -1, fb_info);
fb_info->pseudo_palette = xgifb_info->pseudo_palette;
- fb_alloc_cmap(&fb_info->cmap, 256 , 0);
+ fb_alloc_cmap(&fb_info->cmap, 256, 0);
#ifdef CONFIG_MTRR
xgifb_info->mtrr = mtrr_add(xgifb_info->video_base,
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 481eb174fdf0..d9524a2e9ce4 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -7,7 +7,6 @@
#define SupportCRT2in301C 0x0100 /* for 301C */
#define SetCHTVOverScan 0x8000
-#define Panel_320x480 0x07 /*fstn*/
#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */
#define Panel_1024x768x75 0x22
#define Panel_1280x1024x75 0x23
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index d5f49d2a8db3..1f6f699e238c 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -4135,7 +4135,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
tempax -= 1;
temp = (tempax & 0xFF00) >> 8;
- temp = ((temp & 0x0003) << 4);
+ temp = (temp & 0x0003) << 4;
xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
temp = (tempax & 0x00FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
diff --git a/drivers/staging/xgifb/vb_util.c b/drivers/staging/xgifb/vb_util.c
index 1b452f8b6274..be3437ca339e 100644
--- a/drivers/staging/xgifb/vb_util.c
+++ b/drivers/staging/xgifb/vb_util.c
@@ -9,11 +9,8 @@ void xgifb_reg_set(unsigned long port, u8 index, u8 data)
u8 xgifb_reg_get(unsigned long port, u8 index)
{
- u8 data;
-
outb(index, port);
- data = inb(port + 1);
- return data;
+ return inb(port + 1);
}
void xgifb_reg_and_or(unsigned long port, u8 index,
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index ce87ce9bdb9c..7c6a95bcb35e 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1326,21 +1326,19 @@ static int iscsit_do_rx_data(
struct iscsi_conn *conn,
struct iscsi_data_count *count)
{
- int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
- struct kvec *iov_p;
+ int data = count->data_length, rx_loop = 0, total_rx = 0;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&msg, 0, sizeof(struct msghdr));
-
- iov_p = count->iov;
- iov_len = count->iov_count;
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
+ count->iov, count->iov_count, data);
while (total_rx < data) {
- rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
- (data - total_rx), MSG_WAITALL);
+ rx_loop = sock_recvmsg(conn->sock, &msg,
+ (data - total_rx), MSG_WAITALL);
if (rx_loop <= 0) {
pr_debug("rx_loop: %d total_rx: %d\n",
rx_loop, total_rx);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index e9186cdf35e9..33ac39bf75e5 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -335,7 +335,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
desc += XCOPY_SEGMENT_DESC_LEN;
break;
default:
- pr_err("XCOPY unspported segment descriptor"
+ pr_err("XCOPY unsupported segment descriptor"
"type: 0x%02x\n", desc[0]);
goto out;
}
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index 946562389ca8..3be9519654e5 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -83,8 +83,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
/* Found a board, allocate it an entry in the list */
tdev = kzalloc(sizeof(*tdev), GFP_KERNEL);
if (!tdev) {
- printk(KERN_ERR "tc%x: unable to allocate tc_dev\n",
- slot);
+ pr_err("tc%x: unable to allocate tc_dev\n", slot);
goto out_err;
}
dev_set_name(&tdev->dev, "tc%x", slot);
@@ -117,10 +116,10 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
tdev->resource.start = extslotaddr;
tdev->resource.end = extslotaddr + devsize - 1;
} else {
- printk(KERN_ERR "%s: Cannot provide slot space "
- "(%dMiB required, up to %dMiB supported)\n",
- dev_name(&tdev->dev), devsize >> 20,
- max(slotsize, extslotsize) >> 20);
+ pr_err("%s: Cannot provide slot space "
+ "(%ldMiB required, up to %ldMiB supported)\n",
+ dev_name(&tdev->dev), (long)(devsize >> 20),
+ (long)(max(slotsize, extslotsize) >> 20));
kfree(tdev);
goto out_err;
}
@@ -147,14 +146,12 @@ static int __init tc_init(void)
{
/* Initialize the TURBOchannel bus */
if (tc_bus_get_info(&tc_bus))
- return 0;
+ goto out_err;
INIT_LIST_HEAD(&tc_bus.devices);
dev_set_name(&tc_bus.dev, "tc");
- if (device_register(&tc_bus.dev)) {
- put_device(&tc_bus.dev);
- return 0;
- }
+ if (device_register(&tc_bus.dev))
+ goto out_err_device;
if (tc_bus.info.slot_size) {
unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
@@ -172,8 +169,8 @@ static int __init tc_init(void)
tc_bus.resource[0].flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource,
&tc_bus.resource[0]) < 0) {
- printk(KERN_ERR "tc: Cannot reserve resource\n");
- return 0;
+ pr_err("tc: Cannot reserve resource\n");
+ goto out_err_device;
}
if (tc_bus.ext_slot_size) {
tc_bus.resource[1].start = tc_bus.ext_slot_base;
@@ -184,10 +181,8 @@ static int __init tc_init(void)
tc_bus.resource[1].flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource,
&tc_bus.resource[1]) < 0) {
- printk(KERN_ERR
- "tc: Cannot reserve resource\n");
- release_resource(&tc_bus.resource[0]);
- return 0;
+ pr_err("tc: Cannot reserve resource\n");
+ goto out_err_resource;
}
}
@@ -195,6 +190,13 @@ static int __init tc_init(void)
}
return 0;
+
+out_err_resource:
+ release_resource(&tc_bus.resource[0]);
+out_err_device:
+ put_device(&tc_bus.dev);
+out_err:
+ return 0;
}
subsys_initcall(tc_init);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f554d25b4399..af40db0df58e 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -112,6 +112,18 @@ config CPU_THERMAL
If you want this support, you should say Y here.
+config CLOCK_THERMAL
+ bool "Generic clock cooling support"
+ depends on COMMON_CLK
+ depends on PM_OPP
+ help
+ This entry implements the generic clock cooling mechanism through
+ frequency clipping. Typically used to cool off co-processors. The
+ device that is configured to use this cooling mechanism will be
+ controlled to reduce clock frequency whenever temperature is high.
+
+ If you want this support, you should say Y here.
+
config THERMAL_EMULATION
bool "Thermal emulation mode support"
help
@@ -143,6 +155,16 @@ config SPEAR_THERMAL
Enable this to plug the SPEAr thermal sensor driver into the Linux
thermal framework.
+config ROCKCHIP_THERMAL
+ tristate "Rockchip thermal driver"
+ depends on ARCH_ROCKCHIP
+ depends on RESET_CONTROLLER
+ help
+ Rockchip thermal driver provides support for Temperature sensor
+ ADC (TS-ADC) found on Rockchip SoCs. It supports one critical
+ trip point. Cpufreq is used as the cooling device and will throttle
+ CPUs when the Temperature crosses the passive trip point.
+
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -185,6 +207,16 @@ config ARMADA_THERMAL
Enable this option if you want to have support for thermal management
controller present in Armada 370 and Armada XP SoC.
+config TEGRA_SOCTHERM
+ tristate "Tegra SOCTHERM thermal management"
+ depends on ARCH_TEGRA
+ help
+ Enable this option for integrated thermal management support on NVIDIA
+ Tegra124 systems-on-chip. The driver supports four thermal zones
+ (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal
+ zones to manage temperatures. This option is also required for the
+ emergency thermal reset (thermtrip) feature to function.
+
config DB8500_CPUFREQ_COOLING
tristate "DB8500 cpufreq cooling"
depends on ARCH_U8500
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 39c4fe87da2f..fa0dc486790f 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -18,8 +18,12 @@ thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
# cpufreq cooling
thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
+# clock cooling
+thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o
+
# platform thermal drivers
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
+obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-y += samsung/
@@ -34,3 +38,4 @@ obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_ST_THERMAL) += st/
+obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 9d1420acb391..c2556cf5186b 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -35,10 +35,6 @@
#define PMU_TDC0_OTF_CAL_MASK (0x1 << 30)
#define PMU_TDC0_START_CAL_MASK (0x1 << 25)
-#define A375_Z1_CAL_RESET_LSB 0x8011e214
-#define A375_Z1_CAL_RESET_MSB 0x30a88019
-#define A375_Z1_WORKAROUND_BIT BIT(9)
-
#define A375_UNIT_CONTROL_SHIFT 27
#define A375_UNIT_CONTROL_MASK 0x7
#define A375_READOUT_INVERT BIT(15)
@@ -124,24 +120,12 @@ static void armada375_init_sensor(struct platform_device *pdev,
struct armada_thermal_priv *priv)
{
unsigned long reg;
- bool quirk_needed =
- !!of_device_is_compatible(pdev->dev.of_node,
- "marvell,armada375-z1-thermal");
-
- if (quirk_needed) {
- /* Ensure these registers have the default (reset) values */
- writel(A375_Z1_CAL_RESET_LSB, priv->control);
- writel(A375_Z1_CAL_RESET_MSB, priv->control + 0x4);
- }
reg = readl(priv->control + 4);
reg &= ~(A375_UNIT_CONTROL_MASK << A375_UNIT_CONTROL_SHIFT);
reg &= ~A375_READOUT_INVERT;
reg &= ~A375_HW_RESETn;
- if (quirk_needed)
- reg |= A375_Z1_WORKAROUND_BIT;
-
writel(reg, priv->control + 4);
mdelay(20);
@@ -260,10 +244,6 @@ static const struct of_device_id armada_thermal_id_table[] = {
.data = &armada375_data,
},
{
- .compatible = "marvell,armada375-z1-thermal",
- .data = &armada375_data,
- },
- {
.compatible = "marvell,armada380-thermal",
.data = &armada380_data,
},
@@ -329,7 +309,6 @@ static struct platform_driver armada_thermal_driver = {
.remove = armada_thermal_exit,
.driver = {
.name = "armada_thermal",
- .owner = THIS_MODULE,
.of_match_table = armada_thermal_id_table,
},
};
diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c
new file mode 100644
index 000000000000..1b4ff0f4c716
--- /dev/null
+++ b/drivers/thermal/clock_cooling.c
@@ -0,0 +1,485 @@
+/*
+ * drivers/thermal/clock_cooling.c
+ *
+ * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
+ *
+ * Highly based on cpu_cooling.c.
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
+ * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/mutex.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/clock_cooling.h>
+
+/**
+ * struct clock_cooling_device - data for cooling device with clock
+ * @id: unique integer value corresponding to each clock_cooling_device
+ * registered.
+ * @dev: struct device pointer to the device being used to cool off using
+ * clock frequencies.
+ * @cdev: thermal_cooling_device pointer to keep track of the
+ * registered cooling device.
+ * @clk_rate_change_nb: reference to notifier block used to receive clock
+ * rate changes.
+ * @freq_table: frequency table used to keep track of available frequencies.
+ * @clock_state: integer value representing the current state of clock
+ * cooling devices.
+ * @clock_val: integer value representing the absolute value of the clipped
+ * frequency.
+ * @clk: struct clk reference used to enforce clock limits.
+ * @lock: mutex lock to protect this struct.
+ *
+ * This structure is required for keeping information of each
+ * clock_cooling_device registered. In order to prevent corruption of this a
+ * mutex @lock is used.
+ */
+struct clock_cooling_device {
+ int id;
+ struct device *dev;
+ struct thermal_cooling_device *cdev;
+ struct notifier_block clk_rate_change_nb;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned long clock_state;
+ unsigned long clock_val;
+ struct clk *clk;
+ struct mutex lock; /* lock to protect the content of this struct */
+};
+#define to_clock_cooling_device(x) \
+ container_of(x, struct clock_cooling_device, clk_rate_change_nb)
+static DEFINE_IDR(clock_idr);
+static DEFINE_MUTEX(cooling_clock_lock);
+
+/**
+ * clock_cooling_get_idr - function to get an unique id.
+ * @id: int * value generated by this function.
+ *
+ * This function will populate @id with an unique
+ * id, using the idr API.
+ *
+ * Return: 0 on success, an error code on failure.
+ */
+static int clock_cooling_get_idr(int *id)
+{
+ int ret;
+
+ mutex_lock(&cooling_clock_lock);
+ ret = idr_alloc(&clock_idr, NULL, 0, 0, GFP_KERNEL);
+ mutex_unlock(&cooling_clock_lock);
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
+
+ return 0;
+}
+
+/**
+ * release_idr - function to free the unique id.
+ * @id: int value representing the unique id.
+ */
+static void release_idr(int id)
+{
+ mutex_lock(&cooling_clock_lock);
+ idr_remove(&clock_idr, id);
+ mutex_unlock(&cooling_clock_lock);
+}
+
+/* Below code defines functions to be used for clock as cooling device */
+
+enum clock_cooling_property {
+ GET_LEVEL,
+ GET_FREQ,
+ GET_MAXL,
+};
+
+/**
+ * clock_cooling_get_property - fetch a property of interest for a give cpu.
+ * @ccdev: clock cooling device reference
+ * @input: query parameter
+ * @output: query return
+ * @property: type of query (frequency, level, max level)
+ *
+ * This is the common function to
+ * 1. get maximum clock cooling states
+ * 2. translate frequency to cooling state
+ * 3. translate cooling state to frequency
+ * Note that the code may be not in good shape
+ * but it is written in this way in order to:
+ * a) reduce duplicate code as most of the code can be shared.
+ * b) make sure the logic is consistent when translating between
+ * cooling states and frequencies.
+ *
+ * Return: 0 on success, -EINVAL when invalid parameters are passed.
+ */
+static int clock_cooling_get_property(struct clock_cooling_device *ccdev,
+ unsigned long input,
+ unsigned long *output,
+ enum clock_cooling_property property)
+{
+ int i;
+ unsigned long max_level = 0, level = 0;
+ unsigned int freq = CPUFREQ_ENTRY_INVALID;
+ int descend = -1;
+ struct cpufreq_frequency_table *pos, *table = ccdev->freq_table;
+
+ if (!output)
+ return -EINVAL;
+
+ if (!table)
+ return -EINVAL;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ /* ignore duplicate entry */
+ if (freq == pos->frequency)
+ continue;
+
+ /* get the frequency order */
+ if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
+ descend = freq > pos->frequency;
+
+ freq = pos->frequency;
+ max_level++;
+ }
+
+ /* No valid cpu frequency entry */
+ if (max_level == 0)
+ return -EINVAL;
+
+ /* max_level is an index, not a counter */
+ max_level--;
+
+ /* get max level */
+ if (property == GET_MAXL) {
+ *output = max_level;
+ return 0;
+ }
+
+ if (property == GET_FREQ)
+ level = descend ? input : (max_level - input);
+
+ i = 0;
+ cpufreq_for_each_valid_entry(pos, table) {
+ /* ignore duplicate entry */
+ if (freq == pos->frequency)
+ continue;
+
+ /* now we have a valid frequency entry */
+ freq = pos->frequency;
+
+ if (property == GET_LEVEL && (unsigned int)input == freq) {
+ /* get level by frequency */
+ *output = descend ? i : (max_level - i);
+ return 0;
+ }
+ if (property == GET_FREQ && level == i) {
+ /* get frequency by level */
+ *output = freq;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * clock_cooling_get_level - return the cooling level of given clock cooling.
+ * @cdev: reference of a thermal cooling device of used as clock cooling device
+ * @freq: the frequency of interest
+ *
+ * This function will match the cooling level corresponding to the
+ * requested @freq and return it.
+ *
+ * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
+ * otherwise.
+ */
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq)
+{
+ struct clock_cooling_device *ccdev = cdev->devdata;
+ unsigned long val;
+
+ if (clock_cooling_get_property(ccdev, (unsigned long)freq, &val,
+ GET_LEVEL))
+ return THERMAL_CSTATE_INVALID;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(clock_cooling_get_level);
+
+/**
+ * clock_cooling_get_frequency - get the absolute value of frequency from level.
+ * @ccdev: clock cooling device reference
+ * @level: cooling level
+ *
+ * This function matches cooling level with frequency. Based on a cooling level
+ * of frequency, equals cooling state of cpu cooling device, it will return
+ * the corresponding frequency.
+ * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
+ *
+ * Return: 0 on error, the corresponding frequency otherwise.
+ */
+static unsigned long
+clock_cooling_get_frequency(struct clock_cooling_device *ccdev,
+ unsigned long level)
+{
+ int ret = 0;
+ unsigned long freq;
+
+ ret = clock_cooling_get_property(ccdev, level, &freq, GET_FREQ);
+ if (ret)
+ return 0;
+
+ return freq;
+}
+
+/**
+ * clock_cooling_apply - function to apply frequency clipping.
+ * @ccdev: clock_cooling_device pointer containing frequency clipping data.
+ * @cooling_state: value of the cooling state.
+ *
+ * Function used to make sure the clock layer is aware of current thermal
+ * limits. The limits are applied by updating the clock rate in case it is
+ * higher than the corresponding frequency based on the requested cooling_state.
+ *
+ * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
+ * cooling state).
+ */
+static int clock_cooling_apply(struct clock_cooling_device *ccdev,
+ unsigned long cooling_state)
+{
+ unsigned long clip_freq, cur_freq;
+ int ret = 0;
+
+ /* Here we write the clipping */
+ /* Check if the old cooling action is same as new cooling action */
+ if (ccdev->clock_state == cooling_state)
+ return 0;
+
+ clip_freq = clock_cooling_get_frequency(ccdev, cooling_state);
+ if (!clip_freq)
+ return -EINVAL;
+
+ cur_freq = clk_get_rate(ccdev->clk);
+
+ mutex_lock(&ccdev->lock);
+ ccdev->clock_state = cooling_state;
+ ccdev->clock_val = clip_freq;
+ /* enforce clock level */
+ if (cur_freq > clip_freq)
+ ret = clk_set_rate(ccdev->clk, clip_freq);
+ mutex_unlock(&ccdev->lock);
+
+ return ret;
+}
+
+/**
+ * clock_cooling_clock_notifier - notifier callback on clock rate changes.
+ * @nb: struct notifier_block * with callback info.
+ * @event: value showing clock event for which this function invoked.
+ * @data: callback-specific data
+ *
+ * Callback to hijack the notification on clock transition.
+ * Every time there is a clock change, we intercept all pre change events
+ * and block the transition in case the new rate infringes thermal limits.
+ *
+ * Return: NOTIFY_DONE (success) or NOTIFY_BAD (new_rate > thermal limit).
+ */
+static int clock_cooling_clock_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct clock_cooling_device *ccdev = to_clock_cooling_device(nb);
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ /*
+ * checks on current state
+ * TODO: current method is not best we can find as it
+ * allows possibly voltage transitions, in case DVFS
+ * layer is also hijacking clock pre notifications.
+ */
+ if (ndata->new_rate > ccdev->clock_val)
+ return NOTIFY_BAD;
+ /* fall through */
+ case POST_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+/* clock cooling device thermal callback functions are defined below */
+
+/**
+ * clock_cooling_get_max_state - callback function to get the max cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the max cooling state.
+ *
+ * Callback for the thermal cooling device to return the clock
+ * max cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int clock_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct clock_cooling_device *ccdev = cdev->devdata;
+ unsigned long count = 0;
+ int ret;
+
+ ret = clock_cooling_get_property(ccdev, 0, &count, GET_MAXL);
+ if (!ret)
+ *state = count;
+
+ return ret;
+}
+
+/**
+ * clock_cooling_get_cur_state - function to get the current cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the current cooling state.
+ *
+ * Callback for the thermal cooling device to return the clock
+ * current cooling state.
+ *
+ * Return: 0 (success)
+ */
+static int clock_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct clock_cooling_device *ccdev = cdev->devdata;
+
+ *state = ccdev->clock_state;
+
+ return 0;
+}
+
+/**
+ * clock_cooling_set_cur_state - function to set the current cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the clock cooling
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int clock_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct clock_cooling_device *clock_device = cdev->devdata;
+
+ return clock_cooling_apply(clock_device, state);
+}
+
+/* Bind clock callbacks to thermal cooling device ops */
+static struct thermal_cooling_device_ops const clock_cooling_ops = {
+ .get_max_state = clock_cooling_get_max_state,
+ .get_cur_state = clock_cooling_get_cur_state,
+ .set_cur_state = clock_cooling_set_cur_state,
+};
+
+/**
+ * clock_cooling_register - function to create clock cooling device.
+ * @dev: struct device pointer to the device used as clock cooling device.
+ * @clock_name: string containing the clock used as cooling mechanism.
+ *
+ * This interface function registers the clock cooling device with the name
+ * "thermal-clock-%x". The cooling device is based on clock frequencies.
+ * The struct device is assumed to be capable of DVFS transitions.
+ * The OPP layer is used to fetch and fill the available frequencies for
+ * the referred device. The ordered frequency table is used to control
+ * the clock cooling device cooling states and to limit clock transitions
+ * based on the cooling state requested by the thermal framework.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name)
+{
+ struct thermal_cooling_device *cdev;
+ struct clock_cooling_device *ccdev = NULL;
+ char dev_name[THERMAL_NAME_LENGTH];
+ int ret = 0;
+
+ ccdev = devm_kzalloc(dev, sizeof(*ccdev), GFP_KERNEL);
+ if (!ccdev)
+ return ERR_PTR(-ENOMEM);
+
+ ccdev->dev = dev;
+ ccdev->clk = devm_clk_get(dev, clock_name);
+ if (IS_ERR(ccdev->clk))
+ return ERR_CAST(ccdev->clk);
+
+ ret = clock_cooling_get_idr(&ccdev->id);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ snprintf(dev_name, sizeof(dev_name), "thermal-clock-%d", ccdev->id);
+
+ cdev = thermal_cooling_device_register(dev_name, ccdev,
+ &clock_cooling_ops);
+ if (IS_ERR(cdev)) {
+ release_idr(ccdev->id);
+ return ERR_PTR(-EINVAL);
+ }
+ ccdev->cdev = cdev;
+ ccdev->clk_rate_change_nb.notifier_call = clock_cooling_clock_notifier;
+
+ /* Assuming someone has already filled the opp table for this device */
+ ret = dev_pm_opp_init_cpufreq_table(dev, &ccdev->freq_table);
+ if (ret) {
+ release_idr(ccdev->id);
+ return ERR_PTR(ret);
+ }
+ ccdev->clock_state = 0;
+ ccdev->clock_val = clock_cooling_get_frequency(ccdev, 0);
+
+ clk_notifier_register(ccdev->clk, &ccdev->clk_rate_change_nb);
+
+ return cdev;
+}
+EXPORT_SYMBOL_GPL(clock_cooling_register);
+
+/**
+ * clock_cooling_unregister - function to remove clock cooling device.
+ * @cdev: thermal cooling device pointer.
+ *
+ * This interface function unregisters the "thermal-clock-%x" cooling device.
+ */
+void clock_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+ struct clock_cooling_device *ccdev;
+
+ if (!cdev)
+ return;
+
+ ccdev = cdev->devdata;
+
+ clk_notifier_unregister(ccdev->clk, &ccdev->clk_rate_change_nb);
+ dev_pm_opp_free_cpufreq_table(ccdev->dev, &ccdev->freq_table);
+
+ thermal_cooling_device_unregister(ccdev->cdev);
+ release_idr(ccdev->id);
+}
+EXPORT_SYMBOL_GPL(clock_cooling_unregister);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 786d19263ab0..000d53e934a0 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -78,7 +78,6 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
static struct platform_driver db8500_cpufreq_cooling_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "db8500-cpufreq-cooling",
.of_match_table = of_match_ptr(db8500_cpufreq_cooling_match),
},
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 1e3b3bf9f993..20adfbe27df1 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -517,7 +517,6 @@ static const struct of_device_id db8500_thermal_match[] = {
static struct platform_driver db8500_thermal_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "db8500-thermal",
.of_match_table = of_match_ptr(db8500_thermal_match),
},
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 828f5e345c30..09f6e304c274 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -184,7 +184,6 @@ static struct platform_driver dove_thermal_driver = {
.remove = dove_thermal_exit,
.driver = {
.name = "dove_thermal",
- .owner = THIS_MODULE,
.of_match_table = dove_thermal_id_table,
},
};
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 5a1f1070b702..88b32f942dcf 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -637,7 +637,6 @@ static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops,
static struct platform_driver imx_thermal = {
.driver = {
.name = "imx_thermal",
- .owner = THIS_MODULE,
.pm = &imx_thermal_pm_ops,
.of_match_table = of_imx_thermal_match,
},
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index 0d8db808f0ae..e4e61b3fb11e 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -131,6 +131,8 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
pr_warn("Failed to get target ACPI device\n");
}
+ result = 0;
+
*trtp = trts;
/* don't count bad entries */
*trt_count -= nr_bad_entries;
@@ -317,21 +319,21 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
{
int ret = 0;
unsigned long length = 0;
- unsigned long count = 0;
+ int count = 0;
char __user *arg = (void __user *)__arg;
struct trt *trts;
struct art *arts;
switch (cmd) {
case ACPI_THERMAL_GET_TRT_COUNT:
- ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count,
+ ret = acpi_parse_trt(acpi_thermal_rel_handle, &count,
&trts, false);
kfree(trts);
if (!ret)
return put_user(count, (unsigned long __user *)__arg);
return ret;
case ACPI_THERMAL_GET_TRT_LEN:
- ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count,
+ ret = acpi_parse_trt(acpi_thermal_rel_handle, &count,
&trts, false);
kfree(trts);
length = count * sizeof(union trt_object);
@@ -341,14 +343,14 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
case ACPI_THERMAL_GET_TRT:
return fill_trt(arg);
case ACPI_THERMAL_GET_ART_COUNT:
- ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count,
+ ret = acpi_parse_art(acpi_thermal_rel_handle, &count,
&arts, false);
kfree(arts);
if (!ret)
return put_user(count, (unsigned long __user *)__arg);
return ret;
case ACPI_THERMAL_GET_ART_LEN:
- ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count,
+ ret = acpi_parse_art(acpi_thermal_rel_handle, &count,
&arts, false);
kfree(arts);
length = count * sizeof(union art_object);
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index edc1cce117ba..dcb306ea14a4 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -43,6 +43,74 @@ struct int3400_thermal_priv {
struct trt *trts;
u8 uuid_bitmap;
int rel_misc_dev_res;
+ int current_uuid_index;
+};
+
+static ssize_t available_uuids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
+ int i;
+ int length = 0;
+
+ for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
+ if (priv->uuid_bitmap & (1 << i))
+ if (PAGE_SIZE - length > 0)
+ length += snprintf(&buf[length],
+ PAGE_SIZE - length,
+ "%s\n",
+ int3400_thermal_uuids[i]);
+ }
+
+ return length;
+}
+
+static ssize_t current_uuid_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
+
+ if (priv->uuid_bitmap & (1 << priv->current_uuid_index))
+ return sprintf(buf, "%s\n",
+ int3400_thermal_uuids[priv->current_uuid_index]);
+ else
+ return sprintf(buf, "INVALID\n");
+}
+
+static ssize_t current_uuid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) {
+ if ((priv->uuid_bitmap & (1 << i)) &&
+ !(strncmp(buf, int3400_thermal_uuids[i],
+ sizeof(int3400_thermal_uuids[i]) - 1))) {
+ priv->current_uuid_index = i;
+ return count;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(current_uuid, 0644, current_uuid_show, current_uuid_store);
+static DEVICE_ATTR_RO(available_uuids);
+static struct attribute *uuid_attrs[] = {
+ &dev_attr_available_uuids.attr,
+ &dev_attr_current_uuid.attr,
+ NULL
+};
+
+static struct attribute_group uuid_attribute_group = {
+ .attrs = uuid_attrs,
+ .name = "uuids"
};
static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
@@ -160,9 +228,9 @@ static int int3400_thermal_set_mode(struct thermal_zone_device *thermal,
if (enable != priv->mode) {
priv->mode = enable;
- /* currently, only PASSIVE COOLING is supported */
result = int3400_thermal_run_osc(priv->adev->handle,
- INT3400_THERMAL_PASSIVE_1, enable);
+ priv->current_uuid_index,
+ enable);
}
return result;
}
@@ -223,7 +291,14 @@ static int int3400_thermal_probe(struct platform_device *pdev)
priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
priv->adev->handle);
+ result = sysfs_create_group(&pdev->dev.kobj, &uuid_attribute_group);
+ if (result)
+ goto free_zone;
+
return 0;
+
+free_zone:
+ thermal_zone_device_unregister(priv->thermal);
free_trt:
kfree(priv->trts);
free_art:
@@ -240,6 +315,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
+ sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
thermal_zone_device_unregister(priv->thermal);
kfree(priv->trts);
kfree(priv->arts);
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 6e9fb62eb817..1bfa6a69e77a 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -293,8 +293,7 @@ static int int3403_sensor_add(struct int3403_priv *priv)
return 0;
err_free_obj:
- if (obj->tzone)
- thermal_zone_device_unregister(obj->tzone);
+ thermal_zone_device_unregister(obj->tzone);
return result;
}
@@ -471,7 +470,6 @@ static struct platform_driver int3403_driver = {
.remove = int3403_remove,
.driver = {
.name = "int3403 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3403_device_ids,
},
};
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 95cb7fc20e17..b46c706e1cac 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -689,6 +689,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x3f},
{ X86_VENDOR_INTEL, 6, 0x45},
{ X86_VENDOR_INTEL, 6, 0x46},
+ { X86_VENDOR_INTEL, 6, 0x4c},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index a6a0a18ec0aa..5580f5b24eb9 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -360,6 +360,9 @@ static void proc_thermal_interrupt(void)
u32 sticky_out;
int status;
u32 ptmc_out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&intr_notify_lock, flags);
/* Clear APIC interrupt */
status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
@@ -378,21 +381,20 @@ static void proc_thermal_interrupt(void)
/* reset sticky bit */
status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
SOC_DTS_OFFSET_PTTSS, sticky_out);
+ spin_unlock_irqrestore(&intr_notify_lock, flags);
+
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
pr_debug("TZD update for zone %d\n", i);
thermal_zone_device_update(soc_dts[i]->tzone);
}
- }
+ } else
+ spin_unlock_irqrestore(&intr_notify_lock, flags);
}
static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
{
- unsigned long flags;
-
- spin_lock_irqsave(&intr_notify_lock, flags);
proc_thermal_interrupt();
- spin_unlock_irqrestore(&intr_notify_lock, flags);
pr_debug("proc_thermal_interrupt\n");
return IRQ_HANDLED;
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 3b034a0dfc94..11041fe63dc2 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -114,7 +114,6 @@ static struct platform_driver kirkwood_thermal_driver = {
.remove = kirkwood_thermal_exit,
.driver = {
.name = "kirkwood_thermal",
- .owner = THIS_MODULE,
.of_match_table = kirkwood_thermal_id_table,
},
};
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 62143ba31001..e145b66df444 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -30,27 +30,13 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/string.h>
+#include <linux/thermal.h>
#include "thermal_core.h"
/*** Private data structures to represent thermal device tree data ***/
/**
- * struct __thermal_trip - representation of a point in temperature domain
- * @np: pointer to struct device_node that this trip point was created from
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- */
-
-struct __thermal_trip {
- struct device_node *np;
- unsigned long int temperature;
- unsigned long int hysteresis;
- enum thermal_trip_type type;
-};
-
-/**
* struct __thermal_bind_param - a match between trip and cooling device
* @cooling_device: a pointer to identify the referred cooling device
* @trip_id: the trip point index
@@ -77,8 +63,7 @@ struct __thermal_bind_params {
* @num_tbps: number of thermal bind params
* @tbps: an array of thermal bind params (0..num_tbps - 1)
* @sensor_data: sensor private data used while reading temperature and trend
- * @get_temp: sensor callback to read temperature
- * @get_trend: sensor callback to read temperature trend
+ * @ops: set of callbacks to handle the thermal zone based on DT
*/
struct __thermal_zone {
@@ -88,7 +73,7 @@ struct __thermal_zone {
/* trip data */
int ntrips;
- struct __thermal_trip *trips;
+ struct thermal_trip *trips;
/* cooling binding data */
int num_tbps;
@@ -96,8 +81,7 @@ struct __thermal_zone {
/* sensor interface */
void *sensor_data;
- int (*get_temp)(void *, long *);
- int (*get_trend)(void *, long *);
+ const struct thermal_zone_of_device_ops *ops;
};
/*** DT thermal zone device callbacks ***/
@@ -107,10 +91,96 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz,
{
struct __thermal_zone *data = tz->devdata;
- if (!data->get_temp)
+ if (!data->ops->get_temp)
return -EINVAL;
- return data->get_temp(data->sensor_data, temp);
+ return data->ops->get_temp(data->sensor_data, temp);
+}
+
+/**
+ * of_thermal_get_ntrips - function to export number of available trip
+ * points.
+ * @tz: pointer to a thermal zone
+ *
+ * This function is a globally visible wrapper to get number of trip points
+ * stored in the local struct __thermal_zone
+ *
+ * Return: number of available trip points, -ENODEV when data not available
+ */
+int of_thermal_get_ntrips(struct thermal_zone_device *tz)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data || IS_ERR(data))
+ return -ENODEV;
+
+ return data->ntrips;
+}
+EXPORT_SYMBOL_GPL(of_thermal_get_ntrips);
+
+/**
+ * of_thermal_is_trip_valid - function to check if trip point is valid
+ *
+ * @tz: pointer to a thermal zone
+ * @trip: trip point to evaluate
+ *
+ * This function is responsible for checking if passed trip point is valid
+ *
+ * Return: true if trip point is valid, false otherwise
+ */
+bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, int trip)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data || trip >= data->ntrips || trip < 0)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
+
+/**
+ * of_thermal_get_trip_points - function to get access to a globally exported
+ * trip points
+ *
+ * @tz: pointer to a thermal zone
+ *
+ * This function provides a pointer to trip points table
+ *
+ * Return: pointer to trip points table, NULL otherwise
+ */
+const struct thermal_trip * const
+of_thermal_get_trip_points(struct thermal_zone_device *tz)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data)
+ return NULL;
+
+ return data->trips;
+}
+EXPORT_SYMBOL_GPL(of_thermal_get_trip_points);
+
+/**
+ * of_thermal_set_emul_temp - function to set emulated temperature
+ *
+ * @tz: pointer to a thermal zone
+ * @temp: temperature to set
+ *
+ * This function gives the ability to set emulated value of temperature,
+ * which is handy for debugging
+ *
+ * Return: zero on success, error code otherwise
+ */
+static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
+ unsigned long temp)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data->ops || !data->ops->set_emul_temp)
+ return -EINVAL;
+
+ return data->ops->set_emul_temp(data->sensor_data, temp);
}
static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
@@ -120,10 +190,10 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
long dev_trend;
int r;
- if (!data->get_trend)
+ if (!data->ops->get_trend)
return -EINVAL;
- r = data->get_trend(data->sensor_data, &dev_trend);
+ r = data->ops->get_trend(data->sensor_data, &dev_trend);
if (r)
return r;
@@ -324,8 +394,7 @@ static struct thermal_zone_device_ops of_thermal_ops = {
static struct thermal_zone_device *
thermal_zone_of_add_sensor(struct device_node *zone,
struct device_node *sensor, void *data,
- int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *))
+ const struct thermal_zone_of_device_ops *ops)
{
struct thermal_zone_device *tzd;
struct __thermal_zone *tz;
@@ -336,13 +405,16 @@ thermal_zone_of_add_sensor(struct device_node *zone,
tz = tzd->devdata;
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+
mutex_lock(&tzd->lock);
- tz->get_temp = get_temp;
- tz->get_trend = get_trend;
+ tz->ops = ops;
tz->sensor_data = data;
tzd->ops->get_temp = of_thermal_get_temp;
tzd->ops->get_trend = of_thermal_get_trend;
+ tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
mutex_unlock(&tzd->lock);
return tzd;
@@ -356,8 +428,7 @@ thermal_zone_of_add_sensor(struct device_node *zone,
* than one sensors
* @data: a private pointer (owned by the caller) that will be passed
* back, when a temperature reading is needed.
- * @get_temp: a pointer to a function that reads the sensor temperature.
- * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp.
*
* This function will search the list of thermal zones described in device
* tree and look for the zone that refer to the sensor device pointed by
@@ -382,9 +453,8 @@ thermal_zone_of_add_sensor(struct device_node *zone,
* check the return value with help of IS_ERR() helper.
*/
struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
- void *data, int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *))
+thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
{
struct device_node *np, *child, *sensor_np;
struct thermal_zone_device *tzd = ERR_PTR(-ENODEV);
@@ -426,9 +496,7 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
if (sensor_specs.np == sensor_np && id == sensor_id) {
tzd = thermal_zone_of_add_sensor(child, sensor_np,
- data,
- get_temp,
- get_trend);
+ data, ops);
of_node_put(sensor_specs.np);
of_node_put(child);
goto exit;
@@ -475,9 +543,9 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
mutex_lock(&tzd->lock);
tzd->ops->get_temp = NULL;
tzd->ops->get_trend = NULL;
+ tzd->ops->set_emul_temp = NULL;
- tz->get_temp = NULL;
- tz->get_trend = NULL;
+ tz->ops = NULL;
tz->sensor_data = NULL;
mutex_unlock(&tzd->lock);
}
@@ -501,7 +569,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
*/
static int thermal_of_populate_bind_params(struct device_node *np,
struct __thermal_bind_params *__tbp,
- struct __thermal_trip *trips,
+ struct thermal_trip *trips,
int ntrips)
{
struct of_phandle_args cooling_spec;
@@ -604,7 +672,7 @@ static int thermal_of_get_trip_type(struct device_node *np,
* Return: 0 on success, proper error code otherwise
*/
static int thermal_of_populate_trip(struct device_node *np,
- struct __thermal_trip *trip)
+ struct thermal_trip *trip)
{
int prop;
int ret;
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
new file mode 100644
index 000000000000..1bcddfc60e91
--- /dev/null
+++ b/drivers/thermal/rockchip_thermal.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+
+/**
+ * If the temperature over a period of time High,
+ * the resulting TSHUT gave CRU module,let it reset the entire chip,
+ * or via GPIO give PMIC.
+ */
+enum tshut_mode {
+ TSHUT_MODE_CRU = 0,
+ TSHUT_MODE_GPIO,
+};
+
+/**
+ * the system Temperature Sensors tshut(tshut) polarity
+ * the bit 8 is tshut polarity.
+ * 0: low active, 1: high active
+ */
+enum tshut_polarity {
+ TSHUT_LOW_ACTIVE = 0,
+ TSHUT_HIGH_ACTIVE,
+};
+
+/**
+ * The system has three Temperature Sensors. channel 0 is reserved,
+ * channel 1 is for CPU, and channel 2 is for GPU.
+ */
+enum sensor_id {
+ SENSOR_CPU = 1,
+ SENSOR_GPU,
+};
+
+struct rockchip_tsadc_chip {
+ /* The hardware-controlled tshut property */
+ long tshut_temp;
+ enum tshut_mode tshut_mode;
+ enum tshut_polarity tshut_polarity;
+
+ /* Chip-wide methods */
+ void (*initialize)(void __iomem *reg, enum tshut_polarity p);
+ void (*irq_ack)(void __iomem *reg);
+ void (*control)(void __iomem *reg, bool on);
+
+ /* Per-sensor methods */
+ int (*get_temp)(int chn, void __iomem *reg, long *temp);
+ void (*set_tshut_temp)(int chn, void __iomem *reg, long temp);
+ void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
+};
+
+struct rockchip_thermal_sensor {
+ struct rockchip_thermal_data *thermal;
+ struct thermal_zone_device *tzd;
+ enum sensor_id id;
+};
+
+#define NUM_SENSORS 2 /* Ignore unused sensor 0 */
+
+struct rockchip_thermal_data {
+ const struct rockchip_tsadc_chip *chip;
+ struct platform_device *pdev;
+ struct reset_control *reset;
+
+ struct rockchip_thermal_sensor sensors[NUM_SENSORS];
+
+ struct clk *clk;
+ struct clk *pclk;
+
+ void __iomem *regs;
+
+ long tshut_temp;
+ enum tshut_mode tshut_mode;
+ enum tshut_polarity tshut_polarity;
+};
+
+/* TSADC V2 Sensor info define: */
+#define TSADCV2_AUTO_CON 0x04
+#define TSADCV2_INT_EN 0x08
+#define TSADCV2_INT_PD 0x0c
+#define TSADCV2_DATA(chn) (0x20 + (chn) * 0x04)
+#define TSADCV2_COMP_SHUT(chn) (0x40 + (chn) * 0x04)
+#define TSADCV2_HIGHT_INT_DEBOUNCE 0x60
+#define TSADCV2_HIGHT_TSHUT_DEBOUNCE 0x64
+#define TSADCV2_AUTO_PERIOD 0x68
+#define TSADCV2_AUTO_PERIOD_HT 0x6c
+
+#define TSADCV2_AUTO_EN BIT(0)
+#define TSADCV2_AUTO_DISABLE ~BIT(0)
+#define TSADCV2_AUTO_SRC_EN(chn) BIT(4 + (chn))
+#define TSADCV2_AUTO_TSHUT_POLARITY_HIGH BIT(8)
+#define TSADCV2_AUTO_TSHUT_POLARITY_LOW ~BIT(8)
+
+#define TSADCV2_INT_SRC_EN(chn) BIT(chn)
+#define TSADCV2_SHUT_2GPIO_SRC_EN(chn) BIT(4 + (chn))
+#define TSADCV2_SHUT_2CRU_SRC_EN(chn) BIT(8 + (chn))
+
+#define TSADCV2_INT_PD_CLEAR ~BIT(8)
+
+#define TSADCV2_DATA_MASK 0xfff
+#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4
+#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4
+#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */
+#define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* msec */
+
+struct tsadc_table {
+ unsigned long code;
+ long temp;
+};
+
+static const struct tsadc_table v2_code_table[] = {
+ {TSADCV2_DATA_MASK, -40000},
+ {3800, -40000},
+ {3792, -35000},
+ {3783, -30000},
+ {3774, -25000},
+ {3765, -20000},
+ {3756, -15000},
+ {3747, -10000},
+ {3737, -5000},
+ {3728, 0},
+ {3718, 5000},
+ {3708, 10000},
+ {3698, 15000},
+ {3688, 20000},
+ {3678, 25000},
+ {3667, 30000},
+ {3656, 35000},
+ {3645, 40000},
+ {3634, 45000},
+ {3623, 50000},
+ {3611, 55000},
+ {3600, 60000},
+ {3588, 65000},
+ {3575, 70000},
+ {3563, 75000},
+ {3550, 80000},
+ {3537, 85000},
+ {3524, 90000},
+ {3510, 95000},
+ {3496, 100000},
+ {3482, 105000},
+ {3467, 110000},
+ {3452, 115000},
+ {3437, 120000},
+ {3421, 125000},
+ {0, 125000},
+};
+
+static u32 rk_tsadcv2_temp_to_code(long temp)
+{
+ int high, low, mid;
+
+ low = 0;
+ high = ARRAY_SIZE(v2_code_table) - 1;
+ mid = (high + low) / 2;
+
+ if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp)
+ return 0;
+
+ while (low <= high) {
+ if (temp == v2_code_table[mid].temp)
+ return v2_code_table[mid].code;
+ else if (temp < v2_code_table[mid].temp)
+ high = mid - 1;
+ else
+ low = mid + 1;
+ mid = (low + high) / 2;
+ }
+
+ return 0;
+}
+
+static long rk_tsadcv2_code_to_temp(u32 code)
+{
+ int high, low, mid;
+
+ low = 0;
+ high = ARRAY_SIZE(v2_code_table) - 1;
+ mid = (high + low) / 2;
+
+ if (code > v2_code_table[low].code || code < v2_code_table[high].code)
+ return 125000; /* No code available, return max temperature */
+
+ while (low <= high) {
+ if (code >= v2_code_table[mid].code && code <
+ v2_code_table[mid - 1].code)
+ return v2_code_table[mid].temp;
+ else if (code < v2_code_table[mid].code)
+ low = mid + 1;
+ else
+ high = mid - 1;
+ mid = (low + high) / 2;
+ }
+
+ return 125000;
+}
+
+/**
+ * rk_tsadcv2_initialize - initialize TASDC Controller
+ * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between
+ * every two accessing of TSADC in normal operation.
+ * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between
+ * every two accessing of TSADC after the temperature is higher
+ * than COM_SHUT or COM_INT.
+ * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE,
+ * if the temperature is higher than COMP_INT or COMP_SHUT for
+ * "debounce" times, TSADC controller will generate interrupt or TSHUT.
+ */
+static void rk_tsadcv2_initialize(void __iomem *regs,
+ enum tshut_polarity tshut_polarity)
+{
+ if (tshut_polarity == TSHUT_HIGH_ACTIVE)
+ writel_relaxed(0 | (TSADCV2_AUTO_TSHUT_POLARITY_HIGH),
+ regs + TSADCV2_AUTO_CON);
+ else
+ writel_relaxed(0 | (TSADCV2_AUTO_TSHUT_POLARITY_LOW),
+ regs + TSADCV2_AUTO_CON);
+
+ writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
+ writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+ writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
+ regs + TSADCV2_AUTO_PERIOD_HT);
+ writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
+}
+
+static void rk_tsadcv2_irq_ack(void __iomem *regs)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + TSADCV2_INT_PD);
+ writel_relaxed(val & TSADCV2_INT_PD_CLEAR, regs + TSADCV2_INT_PD);
+}
+
+static void rk_tsadcv2_control(void __iomem *regs, bool enable)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + TSADCV2_AUTO_CON);
+ if (enable)
+ val |= TSADCV2_AUTO_EN;
+ else
+ val &= ~TSADCV2_AUTO_EN;
+
+ writel_relaxed(val, regs + TSADCV2_AUTO_CON);
+}
+
+static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, long *temp)
+{
+ u32 val;
+
+ /* the A/D value of the channel last conversion need some time */
+ val = readl_relaxed(regs + TSADCV2_DATA(chn));
+ if (val == 0)
+ return -EAGAIN;
+
+ *temp = rk_tsadcv2_code_to_temp(val);
+
+ return 0;
+}
+
+static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp)
+{
+ u32 tshut_value, val;
+
+ tshut_value = rk_tsadcv2_temp_to_code(temp);
+ writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
+
+ /* TSHUT will be valid */
+ val = readl_relaxed(regs + TSADCV2_AUTO_CON);
+ writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
+}
+
+static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
+ enum tshut_mode mode)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + TSADCV2_INT_EN);
+ if (mode == TSHUT_MODE_GPIO) {
+ val &= ~TSADCV2_SHUT_2CRU_SRC_EN(chn);
+ val |= TSADCV2_SHUT_2GPIO_SRC_EN(chn);
+ } else {
+ val &= ~TSADCV2_SHUT_2GPIO_SRC_EN(chn);
+ val |= TSADCV2_SHUT_2CRU_SRC_EN(chn);
+ }
+
+ writel_relaxed(val, regs + TSADCV2_INT_EN);
+}
+
+static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
+ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv2_initialize,
+ .irq_ack = rk_tsadcv2_irq_ack,
+ .control = rk_tsadcv2_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+};
+
+static const struct of_device_id of_rockchip_thermal_match[] = {
+ {
+ .compatible = "rockchip,rk3288-tsadc",
+ .data = (void *)&rk3288_tsadc_data,
+ },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
+
+static void
+rockchip_thermal_toggle_sensor(struct rockchip_thermal_sensor *sensor, bool on)
+{
+ struct thermal_zone_device *tzd = sensor->tzd;
+
+ tzd->ops->set_mode(tzd,
+ on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED);
+}
+
+static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
+{
+ struct rockchip_thermal_data *thermal = dev;
+ int i;
+
+ dev_dbg(&thermal->pdev->dev, "thermal alarm\n");
+
+ thermal->chip->irq_ack(thermal->regs);
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+ thermal_zone_device_update(thermal->sensors[i].tzd);
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_thermal_get_temp(void *_sensor, long *out_temp)
+{
+ struct rockchip_thermal_sensor *sensor = _sensor;
+ struct rockchip_thermal_data *thermal = sensor->thermal;
+ const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
+ int retval;
+
+ retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp);
+ dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %ld, retval: %d\n",
+ sensor->id, *out_temp, retval);
+
+ return retval;
+}
+
+static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = {
+ .get_temp = rockchip_thermal_get_temp,
+};
+
+static int rockchip_configure_from_dt(struct device *dev,
+ struct device_node *np,
+ struct rockchip_thermal_data *thermal)
+{
+ u32 shut_temp, tshut_mode, tshut_polarity;
+
+ if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) {
+ dev_warn(dev,
+ "Missing tshut temp property, using default %ld\n",
+ thermal->chip->tshut_temp);
+ thermal->tshut_temp = thermal->chip->tshut_temp;
+ } else {
+ thermal->tshut_temp = shut_temp;
+ }
+
+ if (thermal->tshut_temp > INT_MAX) {
+ dev_err(dev, "Invalid tshut temperature specified: %ld\n",
+ thermal->tshut_temp);
+ return -ERANGE;
+ }
+
+ if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
+ dev_warn(dev,
+ "Missing tshut mode property, using default (%s)\n",
+ thermal->chip->tshut_mode == TSHUT_MODE_GPIO ?
+ "gpio" : "cru");
+ thermal->tshut_mode = thermal->chip->tshut_mode;
+ } else {
+ thermal->tshut_mode = tshut_mode;
+ }
+
+ if (thermal->tshut_mode > 1) {
+ dev_err(dev, "Invalid tshut mode specified: %d\n",
+ thermal->tshut_mode);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(np, "rockchip,hw-tshut-polarity",
+ &tshut_polarity)) {
+ dev_warn(dev,
+ "Missing tshut-polarity property, using default (%s)\n",
+ thermal->chip->tshut_polarity == TSHUT_LOW_ACTIVE ?
+ "low" : "high");
+ thermal->tshut_polarity = thermal->chip->tshut_polarity;
+ } else {
+ thermal->tshut_polarity = tshut_polarity;
+ }
+
+ if (thermal->tshut_polarity > 1) {
+ dev_err(dev, "Invalid tshut-polarity specified: %d\n",
+ thermal->tshut_polarity);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+rockchip_thermal_register_sensor(struct platform_device *pdev,
+ struct rockchip_thermal_data *thermal,
+ struct rockchip_thermal_sensor *sensor,
+ enum sensor_id id)
+{
+ const struct rockchip_tsadc_chip *tsadc = thermal->chip;
+ int error;
+
+ tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
+ tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp);
+
+ sensor->thermal = thermal;
+ sensor->id = id;
+ sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, id, sensor,
+ &rockchip_of_thermal_ops);
+ if (IS_ERR(sensor->tzd)) {
+ error = PTR_ERR(sensor->tzd);
+ dev_err(&pdev->dev, "failed to register sensor %d: %d\n",
+ id, error);
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Reset TSADC Controller, reset all tsadc registers.
+ */
+static void rockchip_thermal_reset_controller(struct reset_control *reset)
+{
+ reset_control_assert(reset);
+ usleep_range(10, 20);
+ reset_control_deassert(reset);
+}
+
+static int rockchip_thermal_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct rockchip_thermal_data *thermal;
+ const struct of_device_id *match;
+ struct resource *res;
+ int irq;
+ int i;
+ int error;
+
+ match = of_match_node(of_rockchip_thermal_match, np);
+ if (!match)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -EINVAL;
+ }
+
+ thermal = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_thermal_data),
+ GFP_KERNEL);
+ if (!thermal)
+ return -ENOMEM;
+
+ thermal->pdev = pdev;
+
+ thermal->chip = (const struct rockchip_tsadc_chip *)match->data;
+ if (!thermal->chip)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ thermal->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(thermal->regs))
+ return PTR_ERR(thermal->regs);
+
+ thermal->reset = devm_reset_control_get(&pdev->dev, "tsadc-apb");
+ if (IS_ERR(thermal->reset)) {
+ error = PTR_ERR(thermal->reset);
+ dev_err(&pdev->dev, "failed to get tsadc reset: %d\n", error);
+ return error;
+ }
+
+ thermal->clk = devm_clk_get(&pdev->dev, "tsadc");
+ if (IS_ERR(thermal->clk)) {
+ error = PTR_ERR(thermal->clk);
+ dev_err(&pdev->dev, "failed to get tsadc clock: %d\n", error);
+ return error;
+ }
+
+ thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(thermal->pclk)) {
+ error = PTR_ERR(thermal->clk);
+ dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n",
+ error);
+ return error;
+ }
+
+ error = clk_prepare_enable(thermal->clk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable converter clock: %d\n",
+ error);
+ return error;
+ }
+
+ error = clk_prepare_enable(thermal->pclk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable pclk: %d\n", error);
+ goto err_disable_clk;
+ }
+
+ rockchip_thermal_reset_controller(thermal->reset);
+
+ error = rockchip_configure_from_dt(&pdev->dev, np, thermal);
+ if (error) {
+ dev_err(&pdev->dev, "failed to parse device tree data: %d\n",
+ error);
+ goto err_disable_pclk;
+ }
+
+ thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+
+ error = rockchip_thermal_register_sensor(pdev, thermal,
+ &thermal->sensors[0],
+ SENSOR_CPU);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to register CPU thermal sensor: %d\n", error);
+ goto err_disable_pclk;
+ }
+
+ error = rockchip_thermal_register_sensor(pdev, thermal,
+ &thermal->sensors[1],
+ SENSOR_GPU);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to register GPU thermal sensor: %d\n", error);
+ goto err_unregister_cpu_sensor;
+ }
+
+ error = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ &rockchip_thermal_alarm_irq_thread,
+ IRQF_ONESHOT,
+ "rockchip_thermal", thermal);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to request tsadc irq: %d\n", error);
+ goto err_unregister_gpu_sensor;
+ }
+
+ thermal->chip->control(thermal->regs, true);
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+ rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+
+err_unregister_gpu_sensor:
+ thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd);
+err_unregister_cpu_sensor:
+ thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd);
+err_disable_pclk:
+ clk_disable_unprepare(thermal->pclk);
+err_disable_clk:
+ clk_disable_unprepare(thermal->clk);
+
+ return error;
+}
+
+static int rockchip_thermal_remove(struct platform_device *pdev)
+{
+ struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) {
+ struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
+
+ rockchip_thermal_toggle_sensor(sensor, false);
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
+ }
+
+ thermal->chip->control(thermal->regs, false);
+
+ clk_disable_unprepare(thermal->pclk);
+ clk_disable_unprepare(thermal->clk);
+
+ return 0;
+}
+
+static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+ rockchip_thermal_toggle_sensor(&thermal->sensors[i], false);
+
+ thermal->chip->control(thermal->regs, false);
+
+ clk_disable(thermal->pclk);
+ clk_disable(thermal->clk);
+
+ return 0;
+}
+
+static int __maybe_unused rockchip_thermal_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
+ int i;
+ int error;
+
+ error = clk_enable(thermal->clk);
+ if (error)
+ return error;
+
+ error = clk_enable(thermal->pclk);
+ if (error)
+ return error;
+
+ rockchip_thermal_reset_controller(thermal->reset);
+
+ thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) {
+ enum sensor_id id = thermal->sensors[i].id;
+
+ thermal->chip->set_tshut_mode(id, thermal->regs,
+ thermal->tshut_mode);
+ thermal->chip->set_tshut_temp(id, thermal->regs,
+ thermal->tshut_temp);
+ }
+
+ thermal->chip->control(thermal->regs, true);
+
+ for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++)
+ rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops,
+ rockchip_thermal_suspend, rockchip_thermal_resume);
+
+static struct platform_driver rockchip_thermal_driver = {
+ .driver = {
+ .name = "rockchip-thermal",
+ .owner = THIS_MODULE,
+ .pm = &rockchip_thermal_pm_ops,
+ .of_match_table = of_rockchip_thermal_match,
+ },
+ .probe = rockchip_thermal_probe,
+ .remove = rockchip_thermal_remove,
+};
+
+module_platform_driver(rockchip_thermal_driver);
+
+MODULE_DESCRIPTION("ROCKCHIP THERMAL Driver");
+MODULE_AUTHOR("Rockchip, Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rockchip-thermal");
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
index 158f5aa8dc5d..cd4471925cdd 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.h
+++ b/drivers/thermal/samsung/exynos_thermal_common.h
@@ -27,7 +27,6 @@
#define SENSOR_NAME_LEN 16
#define MAX_TRIP_COUNT 8
#define MAX_COOLING_DEVICE 4
-#define MAX_TRIMINFO_CTRL_REG 2
#define ACTIVE_INTERVAL 500
#define IDLE_INTERVAL 10000
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 49c09243fd38..d44d91d681d4 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -33,7 +33,87 @@
#include "exynos_thermal_common.h"
#include "exynos_tmu.h"
-#include "exynos_tmu_data.h"
+
+/* Exynos generic registers */
+#define EXYNOS_TMU_REG_TRIMINFO 0x0
+#define EXYNOS_TMU_REG_CONTROL 0x20
+#define EXYNOS_TMU_REG_STATUS 0x28
+#define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
+#define EXYNOS_TMU_REG_INTEN 0x70
+#define EXYNOS_TMU_REG_INTSTAT 0x74
+#define EXYNOS_TMU_REG_INTCLEAR 0x78
+
+#define EXYNOS_TMU_TEMP_MASK 0xff
+#define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
+#define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f
+#define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf
+#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
+#define EXYNOS_TMU_CORE_EN_SHIFT 0
+
+/* Exynos3250 specific registers */
+#define EXYNOS_TMU_TRIMINFO_CON1 0x10
+
+/* Exynos4210 specific registers */
+#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
+
+/* Exynos5250, Exynos4412, Exynos3250 specific registers */
+#define EXYNOS_TMU_TRIMINFO_CON2 0x14
+#define EXYNOS_THD_TEMP_RISE 0x50
+#define EXYNOS_THD_TEMP_FALL 0x54
+#define EXYNOS_EMUL_CON 0x80
+
+#define EXYNOS_TRIMINFO_RELOAD_ENABLE 1
+#define EXYNOS_TRIMINFO_25_SHIFT 0
+#define EXYNOS_TRIMINFO_85_SHIFT 8
+#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
+#define EXYNOS_TMU_TRIP_MODE_MASK 0x7
+#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
+
+#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
+#define EXYNOS_TMU_INTEN_RISE1_SHIFT 4
+#define EXYNOS_TMU_INTEN_RISE2_SHIFT 8
+#define EXYNOS_TMU_INTEN_RISE3_SHIFT 12
+#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
+
+#define EXYNOS_EMUL_TIME 0x57F0
+#define EXYNOS_EMUL_TIME_MASK 0xffff
+#define EXYNOS_EMUL_TIME_SHIFT 16
+#define EXYNOS_EMUL_DATA_SHIFT 8
+#define EXYNOS_EMUL_DATA_MASK 0xFF
+#define EXYNOS_EMUL_ENABLE 0x1
+
+/* Exynos5260 specific */
+#define EXYNOS5260_TMU_REG_INTEN 0xC0
+#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
+#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
+#define EXYNOS5260_EMUL_CON 0x100
+
+/* Exynos4412 specific */
+#define EXYNOS4412_MUX_ADDR_VALUE 6
+#define EXYNOS4412_MUX_ADDR_SHIFT 20
+
+/*exynos5440 specific registers*/
+#define EXYNOS5440_TMU_S0_7_TRIM 0x000
+#define EXYNOS5440_TMU_S0_7_CTRL 0x020
+#define EXYNOS5440_TMU_S0_7_DEBUG 0x040
+#define EXYNOS5440_TMU_S0_7_TEMP 0x0f0
+#define EXYNOS5440_TMU_S0_7_TH0 0x110
+#define EXYNOS5440_TMU_S0_7_TH1 0x130
+#define EXYNOS5440_TMU_S0_7_TH2 0x150
+#define EXYNOS5440_TMU_S0_7_IRQEN 0x210
+#define EXYNOS5440_TMU_S0_7_IRQ 0x230
+/* exynos5440 common registers */
+#define EXYNOS5440_TMU_IRQ_STATUS 0x000
+#define EXYNOS5440_TMU_PMIN 0x004
+
+#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
+#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
+#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
+#define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
+#define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
+#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
+#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
/**
* struct exynos_tmu_data : A structure to hold the private data of the TMU
@@ -52,6 +132,11 @@
* @temp_error2: fused value of the second point trim.
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
+ * @tmu_initialize: SoC specific TMU initialization method
+ * @tmu_control: SoC specific TMU control method
+ * @tmu_read: SoC specific TMU temperature read method
+ * @tmu_set_emulation: SoC specific TMU emulation setting method
+ * @tmu_clear_irqs: SoC specific TMU interrupts clearing method
*/
struct exynos_tmu_data {
int id;
@@ -66,6 +151,12 @@ struct exynos_tmu_data {
u8 temp_error1, temp_error2;
struct regulator *regulator;
struct thermal_sensor_conf *reg_conf;
+ int (*tmu_initialize)(struct platform_device *pdev);
+ void (*tmu_control)(struct platform_device *pdev, bool on);
+ int (*tmu_read)(struct exynos_tmu_data *data);
+ void (*tmu_set_emulation)(struct exynos_tmu_data *data,
+ unsigned long temp);
+ void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
};
/*
@@ -122,83 +213,10 @@ static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
return temp;
}
-static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data)
-{
- const struct exynos_tmu_registers *reg = data->pdata->registers;
- unsigned int val_irq;
-
- val_irq = readl(data->base + reg->tmu_intstat);
- /*
- * Clear the interrupts. Please note that the documentation for
- * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
- * states that INTCLEAR register has a different placing of bits
- * responsible for FALL IRQs than INTSTAT register. Exynos5420
- * and Exynos5440 documentation is correct (Exynos4210 doesn't
- * support FALL IRQs at all).
- */
- writel(val_irq, data->base + reg->tmu_intclear);
-}
-
-static int exynos_tmu_initialize(struct platform_device *pdev)
+static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
{
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
- unsigned int status, trim_info = 0, con, ctrl;
- unsigned int rising_threshold = 0, falling_threshold = 0;
- int ret = 0, threshold_code, i;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
- if (!IS_ERR(data->clk_sec))
- clk_enable(data->clk_sec);
- if (TMU_SUPPORTS(pdata, READY_STATUS)) {
- status = readb(data->base + reg->tmu_status);
- if (!status) {
- ret = -EBUSY;
- goto out;
- }
- }
-
- if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) {
- for (i = 0; i < reg->triminfo_ctrl_count; i++) {
- if (pdata->triminfo_reload[i]) {
- ctrl = readl(data->base +
- reg->triminfo_ctrl[i]);
- ctrl |= pdata->triminfo_reload[i];
- writel(ctrl, data->base +
- reg->triminfo_ctrl[i]);
- }
- }
- }
-
- /* Save trimming info in order to perform calibration */
- if (data->soc == SOC_ARCH_EXYNOS5440) {
- /*
- * For exynos5440 soc triminfo value is swapped between TMU0 and
- * TMU2, so the below logic is needed.
- */
- switch (data->id) {
- case 0:
- trim_info = readl(data->base +
- EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
- break;
- case 1:
- trim_info = readl(data->base + reg->triminfo_data);
- break;
- case 2:
- trim_info = readl(data->base -
- EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
- }
- } else {
- /* On exynos5420 the triminfo register is in the shared space */
- if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
- trim_info = readl(data->base_second +
- reg->triminfo_data);
- else
- trim_info = readl(data->base + reg->triminfo_data);
- }
data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
EXYNOS_TMU_TEMP_MASK);
@@ -212,69 +230,37 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
data->temp_error2 =
(pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
EXYNOS_TMU_TEMP_MASK;
+}
- rising_threshold = readl(data->base + reg->threshold_th0);
+static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
+{
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ int i;
- if (data->soc == SOC_ARCH_EXYNOS4210) {
- /* Write temperature code for threshold */
- threshold_code = temp_to_code(data, pdata->threshold);
- writeb(threshold_code,
- data->base + reg->threshold_temp);
- for (i = 0; i < pdata->non_hw_trigger_levels; i++)
- writeb(pdata->trigger_levels[i], data->base +
- reg->threshold_th0 + i * sizeof(reg->threshold_th0));
+ for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
+ u8 temp = pdata->trigger_levels[i];
- exynos_tmu_clear_irqs(data);
- } else {
- /* Write temperature code for rising and falling threshold */
- for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
- threshold_code = temp_to_code(data,
- pdata->trigger_levels[i]);
- rising_threshold &= ~(0xff << 8 * i);
- rising_threshold |= threshold_code << 8 * i;
- if (pdata->threshold_falling) {
- threshold_code = temp_to_code(data,
- pdata->trigger_levels[i] -
- pdata->threshold_falling);
- falling_threshold |= threshold_code << 8 * i;
- }
- }
+ if (falling)
+ temp -= pdata->threshold_falling;
+ else
+ threshold &= ~(0xff << 8 * i);
- writel(rising_threshold,
- data->base + reg->threshold_th0);
- writel(falling_threshold,
- data->base + reg->threshold_th1);
-
- exynos_tmu_clear_irqs(data);
-
- /* if last threshold limit is also present */
- i = pdata->max_trigger_level - 1;
- if (pdata->trigger_levels[i] &&
- (pdata->trigger_type[i] == HW_TRIP)) {
- threshold_code = temp_to_code(data,
- pdata->trigger_levels[i]);
- if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
- /* 1-4 level to be assigned in th0 reg */
- rising_threshold &= ~(0xff << 8 * i);
- rising_threshold |= threshold_code << 8 * i;
- writel(rising_threshold,
- data->base + reg->threshold_th0);
- } else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
- /* 5th level to be assigned in th2 reg */
- rising_threshold =
- threshold_code << reg->threshold_th3_l0_shift;
- writel(rising_threshold,
- data->base + reg->threshold_th2);
- }
- con = readl(data->base + reg->tmu_ctrl);
- con |= (1 << reg->therm_trip_en_shift);
- writel(con, data->base + reg->tmu_ctrl);
- }
+ threshold |= temp_to_code(data, temp) << 8 * i;
}
- /*Clear the PMIN in the common TMU register*/
- if (reg->tmu_pmin && !data->id)
- writel(0, data->base_second + reg->tmu_pmin);
-out:
+
+ return threshold;
+}
+
+static int exynos_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ if (!IS_ERR(data->clk_sec))
+ clk_enable(data->clk_sec);
+ ret = data->tmu_initialize(pdev);
clk_disable(data->clk);
mutex_unlock(&data->lock);
if (!IS_ERR(data->clk_sec))
@@ -283,20 +269,13 @@ out:
return ret;
}
-static void exynos_tmu_control(struct platform_device *pdev, bool on)
+static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
{
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
- unsigned int con, interrupt_en;
- mutex_lock(&data->lock);
- clk_enable(data->clk);
-
- con = readl(data->base + reg->tmu_ctrl);
-
- if (pdata->test_mux)
- con |= (pdata->test_mux << reg->test_mux_addr_shift);
+ if (data->soc == SOC_ARCH_EXYNOS4412 ||
+ data->soc == SOC_ARCH_EXYNOS3250)
+ con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT);
con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
@@ -305,95 +284,287 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
if (pdata->noise_cancel_mode) {
- con &= ~(reg->therm_trip_mode_mask <<
- reg->therm_trip_mode_shift);
- con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
+ con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
+ con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
}
- if (on) {
- con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
- interrupt_en =
- pdata->trigger_enable[3] << reg->inten_rise3_shift |
- pdata->trigger_enable[2] << reg->inten_rise2_shift |
- pdata->trigger_enable[1] << reg->inten_rise1_shift |
- pdata->trigger_enable[0] << reg->inten_rise0_shift;
- if (TMU_SUPPORTS(pdata, FALLING_TRIP))
- interrupt_en |=
- interrupt_en << reg->inten_fall0_shift;
- } else {
- con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
- interrupt_en = 0; /* Disable all interrupts */
- }
- writel(interrupt_en, data->base + reg->tmu_inten);
- writel(con, data->base + reg->tmu_ctrl);
+ return con;
+}
+
+static void exynos_tmu_control(struct platform_device *pdev, bool on)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ data->tmu_control(pdev, on);
clk_disable(data->clk);
mutex_unlock(&data->lock);
}
-static int exynos_tmu_read(struct exynos_tmu_data *data)
+static int exynos4210_tmu_initialize(struct platform_device *pdev)
{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
- u8 temp_code;
- int temp;
+ unsigned int status;
+ int ret = 0, threshold_code, i;
- mutex_lock(&data->lock);
- clk_enable(data->clk);
+ status = readb(data->base + EXYNOS_TMU_REG_STATUS);
+ if (!status) {
+ ret = -EBUSY;
+ goto out;
+ }
- temp_code = readb(data->base + reg->tmu_cur_temp);
+ sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
- if (data->soc == SOC_ARCH_EXYNOS4210)
- /* temp_code should range between 75 and 175 */
- if (temp_code < 75 || temp_code > 175) {
- temp = -ENODATA;
- goto out;
+ /* Write temperature code for threshold */
+ threshold_code = temp_to_code(data, pdata->threshold);
+ writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
+
+ for (i = 0; i < pdata->non_hw_trigger_levels; i++)
+ writeb(pdata->trigger_levels[i], data->base +
+ EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
+
+ data->tmu_clear_irqs(data);
+out:
+ return ret;
+}
+
+static int exynos4412_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ unsigned int status, trim_info, con, ctrl, rising_threshold;
+ int ret = 0, threshold_code, i;
+
+ status = readb(data->base + EXYNOS_TMU_REG_STATUS);
+ if (!status) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (data->soc == SOC_ARCH_EXYNOS3250 ||
+ data->soc == SOC_ARCH_EXYNOS4412 ||
+ data->soc == SOC_ARCH_EXYNOS5250) {
+ if (data->soc == SOC_ARCH_EXYNOS3250) {
+ ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
+ ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
+ writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
}
+ ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
+ ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
+ writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
+ }
- temp = code_to_temp(data, temp_code);
+ /* On exynos5420 the triminfo register is in the shared space */
+ if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
+ trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
+ else
+ trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
+
+ sanitize_temp_error(data, trim_info);
+
+ /* Write temperature code for rising and falling threshold */
+ rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
+ rising_threshold = get_th_reg(data, rising_threshold, false);
+ writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
+ writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
+
+ data->tmu_clear_irqs(data);
+
+ /* if last threshold limit is also present */
+ i = pdata->max_trigger_level - 1;
+ if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
+ threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
+ /* 1-4 level to be assigned in th0 reg */
+ rising_threshold &= ~(0xff << 8 * i);
+ rising_threshold |= threshold_code << 8 * i;
+ writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
+ con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
+ con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
+ writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+ }
out:
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
+ return ret;
+}
- return temp;
+static int exynos5440_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ unsigned int trim_info = 0, con, rising_threshold;
+ int ret = 0, threshold_code, i;
+
+ /*
+ * For exynos5440 soc triminfo value is swapped between TMU0 and
+ * TMU2, so the below logic is needed.
+ */
+ switch (data->id) {
+ case 0:
+ trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET +
+ EXYNOS5440_TMU_S0_7_TRIM);
+ break;
+ case 1:
+ trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
+ break;
+ case 2:
+ trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET +
+ EXYNOS5440_TMU_S0_7_TRIM);
+ }
+ sanitize_temp_error(data, trim_info);
+
+ /* Write temperature code for rising and falling threshold */
+ rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0);
+ rising_threshold = get_th_reg(data, rising_threshold, false);
+ writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0);
+ writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1);
+
+ data->tmu_clear_irqs(data);
+
+ /* if last threshold limit is also present */
+ i = pdata->max_trigger_level - 1;
+ if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
+ threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
+ /* 5th level to be assigned in th2 reg */
+ rising_threshold =
+ threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
+ writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2);
+ con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL);
+ con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
+ writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
+ }
+ /* Clear the PMIN in the common TMU register */
+ if (!data->id)
+ writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
+ return ret;
}
-#ifdef CONFIG_THERMAL_EMULATION
-static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
{
- struct exynos_tmu_data *data = drv_data;
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
- unsigned int val;
- int ret = -EINVAL;
+ unsigned int con, interrupt_en;
- if (!TMU_SUPPORTS(pdata, EMULATION))
- goto out;
+ con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
- if (temp && temp < MCELSIUS)
- goto out;
+ if (on) {
+ con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en =
+ pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT |
+ pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT |
+ pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT |
+ pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT;
+ if (data->soc != SOC_ARCH_EXYNOS4210)
+ interrupt_en |=
+ interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
+ } else {
+ con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en = 0; /* Disable all interrupts */
+ }
+ writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
+ writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+}
+
+static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ unsigned int con, interrupt_en;
+
+ con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
+
+ if (on) {
+ con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en =
+ pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT |
+ pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT |
+ pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT |
+ pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT;
+ interrupt_en |= interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
+ } else {
+ con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en = 0; /* Disable all interrupts */
+ }
+ writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN);
+ writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
+}
+
+static int exynos_tmu_read(struct exynos_tmu_data *data)
+{
+ int ret;
mutex_lock(&data->lock);
clk_enable(data->clk);
+ ret = data->tmu_read(data);
+ if (ret >= 0)
+ ret = code_to_temp(data, ret);
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
- val = readl(data->base + reg->emul_con);
+ return ret;
+}
+#ifdef CONFIG_THERMAL_EMULATION
+static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
+ unsigned long temp)
+{
if (temp) {
temp /= MCELSIUS;
- if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
- val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
- val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
+ if (data->soc != SOC_ARCH_EXYNOS5440) {
+ val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
+ val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
}
- val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
- val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
+ val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT);
+ val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) |
EXYNOS_EMUL_ENABLE;
} else {
val &= ~EXYNOS_EMUL_ENABLE;
}
- writel(val, data->base + reg->emul_con);
+ return val;
+}
+
+static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
+ unsigned long temp)
+{
+ unsigned int val;
+ u32 emul_con;
+
+ if (data->soc == SOC_ARCH_EXYNOS5260)
+ emul_con = EXYNOS5260_EMUL_CON;
+ else
+ emul_con = EXYNOS_EMUL_CON;
+
+ val = readl(data->base + emul_con);
+ val = get_emul_con_reg(data, val, temp);
+ writel(val, data->base + emul_con);
+}
+
+static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
+ unsigned long temp)
+{
+ unsigned int val;
+
+ val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG);
+ val = get_emul_con_reg(data, val, temp);
+ writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
+}
+
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+{
+ struct exynos_tmu_data *data = drv_data;
+ int ret = -EINVAL;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+ if (temp && temp < MCELSIUS)
+ goto out;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ data->tmu_set_emulation(data, temp);
clk_disable(data->clk);
mutex_unlock(&data->lock);
return 0;
@@ -401,23 +572,41 @@ out:
return ret;
}
#else
+#define exynos4412_tmu_set_emulation NULL
+#define exynos5440_tmu_set_emulation NULL
static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
{ return -EINVAL; }
#endif/*CONFIG_THERMAL_EMULATION*/
+static int exynos4210_tmu_read(struct exynos_tmu_data *data)
+{
+ int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
+
+ /* "temp_code" should range between 75 and 175 */
+ return (ret < 75 || ret > 175) ? -ENODATA : ret;
+}
+
+static int exynos4412_tmu_read(struct exynos_tmu_data *data)
+{
+ return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
+}
+
+static int exynos5440_tmu_read(struct exynos_tmu_data *data)
+{
+ return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
+}
+
static void exynos_tmu_work(struct work_struct *work)
{
struct exynos_tmu_data *data = container_of(work,
struct exynos_tmu_data, irq_work);
- struct exynos_tmu_platform_data *pdata = data->pdata;
- const struct exynos_tmu_registers *reg = pdata->registers;
unsigned int val_type;
if (!IS_ERR(data->clk_sec))
clk_enable(data->clk_sec);
/* Find which sensor generated this interrupt */
- if (reg->tmu_irqstatus) {
- val_type = readl(data->base_second + reg->tmu_irqstatus);
+ if (data->soc == SOC_ARCH_EXYNOS5440) {
+ val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
if (!((val_type >> data->id) & 0x1))
goto out;
}
@@ -429,7 +618,7 @@ static void exynos_tmu_work(struct work_struct *work)
clk_enable(data->clk);
/* TODO: take action based on particular interrupt */
- exynos_tmu_clear_irqs(data);
+ data->tmu_clear_irqs(data);
clk_disable(data->clk);
mutex_unlock(&data->lock);
@@ -437,6 +626,40 @@ out:
enable_irq(data->irq);
}
+static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
+{
+ unsigned int val_irq;
+ u32 tmu_intstat, tmu_intclear;
+
+ if (data->soc == SOC_ARCH_EXYNOS5260) {
+ tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
+ tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
+ } else {
+ tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
+ tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
+ }
+
+ val_irq = readl(data->base + tmu_intstat);
+ /*
+ * Clear the interrupts. Please note that the documentation for
+ * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
+ * states that INTCLEAR register has a different placing of bits
+ * responsible for FALL IRQs than INTSTAT register. Exynos5420
+ * and Exynos5440 documentation is correct (Exynos4210 doesn't
+ * support FALL IRQs at all).
+ */
+ writel(val_irq, data->base + tmu_intclear);
+}
+
+static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data)
+{
+ unsigned int val_irq;
+
+ val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ);
+ /* clear the interrupts */
+ writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ);
+}
+
static irqreturn_t exynos_tmu_irq(int irq, void *id)
{
struct exynos_tmu_data *data = id;
@@ -450,35 +673,35 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
static const struct of_device_id exynos_tmu_match[] = {
{
.compatible = "samsung,exynos3250-tmu",
- .data = (void *)EXYNOS3250_TMU_DRV_DATA,
+ .data = &exynos3250_default_tmu_data,
},
{
.compatible = "samsung,exynos4210-tmu",
- .data = (void *)EXYNOS4210_TMU_DRV_DATA,
+ .data = &exynos4210_default_tmu_data,
},
{
.compatible = "samsung,exynos4412-tmu",
- .data = (void *)EXYNOS4412_TMU_DRV_DATA,
+ .data = &exynos4412_default_tmu_data,
},
{
.compatible = "samsung,exynos5250-tmu",
- .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+ .data = &exynos5250_default_tmu_data,
},
{
.compatible = "samsung,exynos5260-tmu",
- .data = (void *)EXYNOS5260_TMU_DRV_DATA,
+ .data = &exynos5260_default_tmu_data,
},
{
.compatible = "samsung,exynos5420-tmu",
- .data = (void *)EXYNOS5420_TMU_DRV_DATA,
+ .data = &exynos5420_default_tmu_data,
},
{
.compatible = "samsung,exynos5420-tmu-ext-triminfo",
- .data = (void *)EXYNOS5420_TMU_DRV_DATA,
+ .data = &exynos5420_default_tmu_data,
},
{
.compatible = "samsung,exynos5440-tmu",
- .data = (void *)EXYNOS5440_TMU_DRV_DATA,
+ .data = &exynos5440_default_tmu_data,
},
{},
};
@@ -553,12 +776,47 @@ static int exynos_map_dt_data(struct platform_device *pdev)
dev_err(&pdev->dev, "No platform init data supplied.\n");
return -ENODEV;
}
+
data->pdata = pdata;
+ data->soc = pdata->type;
+
+ switch (data->soc) {
+ case SOC_ARCH_EXYNOS4210:
+ data->tmu_initialize = exynos4210_tmu_initialize;
+ data->tmu_control = exynos4210_tmu_control;
+ data->tmu_read = exynos4210_tmu_read;
+ data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ break;
+ case SOC_ARCH_EXYNOS3250:
+ case SOC_ARCH_EXYNOS4412:
+ case SOC_ARCH_EXYNOS5250:
+ case SOC_ARCH_EXYNOS5260:
+ case SOC_ARCH_EXYNOS5420:
+ case SOC_ARCH_EXYNOS5420_TRIMINFO:
+ data->tmu_initialize = exynos4412_tmu_initialize;
+ data->tmu_control = exynos4210_tmu_control;
+ data->tmu_read = exynos4412_tmu_read;
+ data->tmu_set_emulation = exynos4412_tmu_set_emulation;
+ data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ break;
+ case SOC_ARCH_EXYNOS5440:
+ data->tmu_initialize = exynos5440_tmu_initialize;
+ data->tmu_control = exynos5440_tmu_control;
+ data->tmu_read = exynos5440_tmu_read;
+ data->tmu_set_emulation = exynos5440_tmu_set_emulation;
+ data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
+ break;
+ default:
+ dev_err(&pdev->dev, "Platform not supported\n");
+ return -EINVAL;
+ }
+
/*
* Check if the TMU shares some registers and then try to map the
* memory of common registers.
*/
- if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
+ if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO &&
+ data->soc != SOC_ARCH_EXYNOS5440)
return 0;
if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
@@ -625,20 +883,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
goto err_clk_sec;
}
- if (pdata->type == SOC_ARCH_EXYNOS3250 ||
- pdata->type == SOC_ARCH_EXYNOS4210 ||
- pdata->type == SOC_ARCH_EXYNOS4412 ||
- pdata->type == SOC_ARCH_EXYNOS5250 ||
- pdata->type == SOC_ARCH_EXYNOS5260 ||
- pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
- pdata->type == SOC_ARCH_EXYNOS5440)
- data->soc = pdata->type;
- else {
- ret = -EINVAL;
- dev_err(&pdev->dev, "Platform not supported\n");
- goto err_clk;
- }
-
ret = exynos_tmu_initialize(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize TMU\n");
@@ -750,7 +994,6 @@ static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
static struct platform_driver exynos_tmu_driver = {
.driver = {
.name = "exynos-tmu",
- .owner = THIS_MODULE,
.pm = EXYNOS_TMU_PM,
.of_match_table = exynos_tmu_match,
},
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index c58c7663a3fe..da3009bff6c4 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -40,115 +40,12 @@ enum soc_type {
SOC_ARCH_EXYNOS4412,
SOC_ARCH_EXYNOS5250,
SOC_ARCH_EXYNOS5260,
+ SOC_ARCH_EXYNOS5420,
SOC_ARCH_EXYNOS5420_TRIMINFO,
SOC_ARCH_EXYNOS5440,
};
/**
- * EXYNOS TMU supported features.
- * TMU_SUPPORT_EMULATION - This features is used to set user defined
- * temperature to the TMU controller.
- * TMU_SUPPORT_MULTI_INST - This features denotes that the soc
- * has many instances of TMU.
- * TMU_SUPPORT_TRIM_RELOAD - This features shows that trimming can
- * be reloaded.
- * TMU_SUPPORT_FALLING_TRIP - This features shows that interrupt can
- * be registered for falling trips also.
- * TMU_SUPPORT_READY_STATUS - This feature tells that the TMU current
- * state(active/idle) can be checked.
- * TMU_SUPPORT_EMUL_TIME - This features allows to set next temp emulation
- * sample time.
- * TMU_SUPPORT_ADDRESS_MULTIPLE - This feature tells that the different TMU
- * sensors shares some common registers.
- * TMU_SUPPORT - macro to compare the above features with the supplied.
- */
-#define TMU_SUPPORT_EMULATION BIT(0)
-#define TMU_SUPPORT_MULTI_INST BIT(1)
-#define TMU_SUPPORT_TRIM_RELOAD BIT(2)
-#define TMU_SUPPORT_FALLING_TRIP BIT(3)
-#define TMU_SUPPORT_READY_STATUS BIT(4)
-#define TMU_SUPPORT_EMUL_TIME BIT(5)
-#define TMU_SUPPORT_ADDRESS_MULTIPLE BIT(6)
-
-#define TMU_SUPPORTS(a, b) (a->features & TMU_SUPPORT_ ## b)
-
-/**
- * struct exynos_tmu_register - register descriptors to access registers and
- * bitfields. The register validity, offsets and bitfield values may vary
- * slightly across different exynos SOC's.
- * @triminfo_data: register containing 2 pont trimming data
- * @triminfo_ctrl: trim info controller register.
- * @triminfo_ctrl_count: the number of trim info controller register.
- * @tmu_ctrl: TMU main controller register.
- * @test_mux_addr_shift: shift bits of test mux address.
- * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
- * @therm_trip_mode_mask: mask bits of tripping mode in tmu_ctrl register.
- * @therm_trip_en_shift: shift bits of tripping enable in tmu_ctrl register.
- * @tmu_status: register drescribing the TMU status.
- * @tmu_cur_temp: register containing the current temperature of the TMU.
- * @threshold_temp: register containing the base threshold level.
- * @threshold_th0: Register containing first set of rising levels.
- * @threshold_th1: Register containing second set of rising levels.
- * @threshold_th2: Register containing third set of rising levels.
- * @threshold_th3_l0_shift: shift bits of level0 threshold temperature.
- * @tmu_inten: register containing the different threshold interrupt
- enable bits.
- * @inten_rise0_shift: shift bits of rising 0 interrupt bits.
- * @inten_rise1_shift: shift bits of rising 1 interrupt bits.
- * @inten_rise2_shift: shift bits of rising 2 interrupt bits.
- * @inten_rise3_shift: shift bits of rising 3 interrupt bits.
- * @inten_fall0_shift: shift bits of falling 0 interrupt bits.
- * @tmu_intstat: Register containing the interrupt status values.
- * @tmu_intclear: Register for clearing the raised interrupt status.
- * @emul_con: TMU emulation controller register.
- * @emul_temp_shift: shift bits of emulation temperature.
- * @emul_time_shift: shift bits of emulation time.
- * @tmu_irqstatus: register to find which TMU generated interrupts.
- * @tmu_pmin: register to get/set the Pmin value.
- */
-struct exynos_tmu_registers {
- u32 triminfo_data;
-
- u32 triminfo_ctrl[MAX_TRIMINFO_CTRL_REG];
- u32 triminfo_ctrl_count;
-
- u32 tmu_ctrl;
- u32 test_mux_addr_shift;
- u32 therm_trip_mode_shift;
- u32 therm_trip_mode_mask;
- u32 therm_trip_en_shift;
-
- u32 tmu_status;
-
- u32 tmu_cur_temp;
-
- u32 threshold_temp;
-
- u32 threshold_th0;
- u32 threshold_th1;
- u32 threshold_th2;
- u32 threshold_th3_l0_shift;
-
- u32 tmu_inten;
- u32 inten_rise0_shift;
- u32 inten_rise1_shift;
- u32 inten_rise2_shift;
- u32 inten_rise3_shift;
- u32 inten_fall0_shift;
-
- u32 tmu_intstat;
-
- u32 tmu_intclear;
-
- u32 emul_con;
- u32 emul_temp_shift;
- u32 emul_time_shift;
-
- u32 tmu_irqstatus;
- u32 tmu_pmin;
-};
-
-/**
* struct exynos_tmu_platform_data
* @threshold: basic temperature for generating interrupt
* 25 <= threshold <= 125 [unit: degree Celsius]
@@ -192,16 +89,10 @@ struct exynos_tmu_registers {
* @first_point_trim: temp value of the first point trimming
* @second_point_trim: temp value of the second point trimming
* @default_temp_offset: default temperature offset in case of no trimming
- * @test_mux; information if SoC supports test MUX
- * @triminfo_reload: reload value to read TRIMINFO register
* @cal_type: calibration type for temperature
* @freq_clip_table: Table representing frequency reduction percentage.
* @freq_tab_count: Count of the above table as frequency reduction may
* applicable to only some of the trigger levels.
- * @registers: Pointer to structure containing all the TMU controller registers
- * and bitfields shifts and masks.
- * @features: a bitfield value indicating the features supported in SOC like
- * emulation, multi instance etc
*
* This structure is required for configuration of exynos_tmu driver.
*/
@@ -223,15 +114,11 @@ struct exynos_tmu_platform_data {
u8 first_point_trim;
u8 second_point_trim;
u8 default_temp_offset;
- u8 test_mux;
- u8 triminfo_reload[MAX_TRIMINFO_CTRL_REG];
enum calibration_type cal_type;
enum soc_type type;
struct freq_clip_table freq_tab[4];
unsigned int freq_tab_count;
- const struct exynos_tmu_registers *registers;
- unsigned int features;
};
/**
@@ -246,4 +133,12 @@ struct exynos_tmu_init_data {
struct exynos_tmu_platform_data tmu_data[];
};
+extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
+extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
+
#endif /* _EXYNOS_TMU_H */
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 1724f6cdaef8..b23910069f68 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -22,24 +22,6 @@
#include "exynos_thermal_common.h"
#include "exynos_tmu.h"
-#include "exynos_tmu_data.h"
-
-#if defined(CONFIG_CPU_EXYNOS4210)
-static const struct exynos_tmu_registers exynos4210_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP,
- .threshold_th0 = EXYNOS4210_TMU_REG_TRIG_LEVEL0,
- .tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
- .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
-};
struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
.tmu_data = {
@@ -75,40 +57,10 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
},
.freq_tab_count = 2,
.type = SOC_ARCH_EXYNOS4210,
- .registers = &exynos4210_tmu_registers,
- .features = TMU_SUPPORT_READY_STATUS,
},
},
.tmu_count = 1,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS3250)
-static const struct exynos_tmu_registers exynos3250_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON1,
- .triminfo_ctrl[1] = EXYNOS_TMU_TRIMINFO_CON2,
- .triminfo_ctrl_count = 2,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_th0 = EXYNOS_THD_TEMP_RISE,
- .threshold_th1 = EXYNOS_THD_TEMP_FALL,
- .tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
- .emul_con = EXYNOS_EMUL_CON,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-};
#define EXYNOS3250_TMU_DATA \
.threshold_falling = 10, \
@@ -144,54 +96,17 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = {
.freq_clip_max = 400 * 1000, \
.temp_level = 95, \
}, \
- .freq_tab_count = 2, \
- .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
- .triminfo_reload[1] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
- .registers = &exynos3250_tmu_registers, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
- TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
- TMU_SUPPORT_EMUL_TIME)
-#endif
+ .freq_tab_count = 2
-#if defined(CONFIG_SOC_EXYNOS3250)
struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
.tmu_data = {
{
EXYNOS3250_TMU_DATA,
.type = SOC_ARCH_EXYNOS3250,
- .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
},
},
.tmu_count = 1,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
-static const struct exynos_tmu_registers exynos4412_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON2,
- .triminfo_ctrl_count = 1,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_th0 = EXYNOS_THD_TEMP_RISE,
- .threshold_th1 = EXYNOS_THD_TEMP_FALL,
- .tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
- .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
- .emul_con = EXYNOS_EMUL_CON,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-};
#define EXYNOS4412_TMU_DATA \
.threshold_falling = 10, \
@@ -227,28 +142,18 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
.freq_clip_max = 400 * 1000, \
.temp_level = 95, \
}, \
- .freq_tab_count = 2, \
- .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
- .registers = &exynos4412_tmu_registers, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
- TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
- TMU_SUPPORT_EMUL_TIME)
-#endif
+ .freq_tab_count = 2
-#if defined(CONFIG_SOC_EXYNOS4412)
struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
.tmu_data = {
{
EXYNOS4412_TMU_DATA,
.type = SOC_ARCH_EXYNOS4412,
- .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
},
},
.tmu_count = 1,
};
-#endif
-#if defined(CONFIG_SOC_EXYNOS5250)
struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
.tmu_data = {
{
@@ -258,31 +163,6 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
},
.tmu_count = 1,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5260)
-static const struct exynos_tmu_registers exynos5260_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_th0 = EXYNOS_THD_TEMP_RISE,
- .threshold_th1 = EXYNOS_THD_TEMP_FALL,
- .tmu_inten = EXYNOS5260_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
- .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR,
- .emul_con = EXYNOS5260_EMUL_CON,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-};
#define __EXYNOS5260_TMU_DATA \
.threshold_falling = 10, \
@@ -319,13 +199,10 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = {
.temp_level = 103, \
}, \
.freq_tab_count = 2, \
- .registers = &exynos5260_tmu_registers, \
#define EXYNOS5260_TMU_DATA \
__EXYNOS5260_TMU_DATA \
- .type = SOC_ARCH_EXYNOS5260, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME)
+ .type = SOC_ARCH_EXYNOS5260
struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
.tmu_data = {
@@ -337,82 +214,14 @@ struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
},
.tmu_count = 5,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5420)
-static const struct exynos_tmu_registers exynos5420_tmu_registers = {
- .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS_TMU_REG_STATUS,
- .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
- .threshold_th0 = EXYNOS_THD_TEMP_RISE,
- .threshold_th1 = EXYNOS_THD_TEMP_FALL,
- .tmu_inten = EXYNOS_TMU_REG_INTEN,
- .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
- /* INTEN_RISE3 Not availble in exynos5420 */
- .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
- .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
- .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
- .emul_con = EXYNOS_EMUL_CON,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
-};
-
-#define __EXYNOS5420_TMU_DATA \
- .threshold_falling = 10, \
- .trigger_levels[0] = 85, \
- .trigger_levels[1] = 103, \
- .trigger_levels[2] = 110, \
- .trigger_levels[3] = 120, \
- .trigger_enable[0] = true, \
- .trigger_enable[1] = true, \
- .trigger_enable[2] = true, \
- .trigger_enable[3] = false, \
- .trigger_type[0] = THROTTLE_ACTIVE, \
- .trigger_type[1] = THROTTLE_ACTIVE, \
- .trigger_type[2] = SW_TRIP, \
- .trigger_type[3] = HW_TRIP, \
- .max_trigger_level = 4, \
- .non_hw_trigger_levels = 3, \
- .gain = 8, \
- .reference_voltage = 16, \
- .noise_cancel_mode = 4, \
- .cal_type = TYPE_ONE_POINT_TRIMMING, \
- .efuse_value = 55, \
- .min_efuse_value = 40, \
- .max_efuse_value = 100, \
- .first_point_trim = 25, \
- .second_point_trim = 85, \
- .default_temp_offset = 50, \
- .freq_tab[0] = { \
- .freq_clip_max = 800 * 1000, \
- .temp_level = 85, \
- }, \
- .freq_tab[1] = { \
- .freq_clip_max = 200 * 1000, \
- .temp_level = 103, \
- }, \
- .freq_tab_count = 2, \
- .registers = &exynos5420_tmu_registers, \
#define EXYNOS5420_TMU_DATA \
- __EXYNOS5420_TMU_DATA \
- .type = SOC_ARCH_EXYNOS5250, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME)
+ __EXYNOS5260_TMU_DATA \
+ .type = SOC_ARCH_EXYNOS5420
#define EXYNOS5420_TMU_DATA_SHARED \
- __EXYNOS5420_TMU_DATA \
- .type = SOC_ARCH_EXYNOS5420_TRIMINFO, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME | \
- TMU_SUPPORT_ADDRESS_MULTIPLE)
+ __EXYNOS5260_TMU_DATA \
+ .type = SOC_ARCH_EXYNOS5420_TRIMINFO
struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
.tmu_data = {
@@ -424,34 +233,6 @@ struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
},
.tmu_count = 5,
};
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5440)
-static const struct exynos_tmu_registers exynos5440_tmu_registers = {
- .triminfo_data = EXYNOS5440_TMU_S0_7_TRIM,
- .tmu_ctrl = EXYNOS5440_TMU_S0_7_CTRL,
- .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
- .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
- .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
- .tmu_status = EXYNOS5440_TMU_S0_7_STATUS,
- .tmu_cur_temp = EXYNOS5440_TMU_S0_7_TEMP,
- .threshold_th0 = EXYNOS5440_TMU_S0_7_TH0,
- .threshold_th1 = EXYNOS5440_TMU_S0_7_TH1,
- .threshold_th2 = EXYNOS5440_TMU_S0_7_TH2,
- .threshold_th3_l0_shift = EXYNOS5440_TMU_TH_RISE4_SHIFT,
- .tmu_inten = EXYNOS5440_TMU_S0_7_IRQEN,
- .inten_rise0_shift = EXYNOS5440_TMU_INTEN_RISE0_SHIFT,
- .inten_rise1_shift = EXYNOS5440_TMU_INTEN_RISE1_SHIFT,
- .inten_rise2_shift = EXYNOS5440_TMU_INTEN_RISE2_SHIFT,
- .inten_rise3_shift = EXYNOS5440_TMU_INTEN_RISE3_SHIFT,
- .inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT,
- .tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ,
- .tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ,
- .tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS,
- .emul_con = EXYNOS5440_TMU_S0_7_DEBUG,
- .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
- .tmu_pmin = EXYNOS5440_TMU_PMIN,
-};
#define EXYNOS5440_TMU_DATA \
.trigger_levels[0] = 100, \
@@ -471,10 +252,7 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
.first_point_trim = 25, \
.second_point_trim = 70, \
.default_temp_offset = 25, \
- .type = SOC_ARCH_EXYNOS5440, \
- .registers = &exynos5440_tmu_registers, \
- .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
- TMU_SUPPORT_MULTI_INST | TMU_SUPPORT_ADDRESS_MULTIPLE),
+ .type = SOC_ARCH_EXYNOS5440
struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
.tmu_data = {
@@ -484,4 +262,3 @@ struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
},
.tmu_count = 3,
};
-#endif
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
deleted file mode 100644
index 63de598c9c2c..000000000000
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * exynos_tmu_data.h - Samsung EXYNOS tmu data header file
- *
- * Copyright (C) 2013 Samsung Electronics
- * Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef _EXYNOS_TMU_DATA_H
-#define _EXYNOS_TMU_DATA_H
-
-/* Exynos generic registers */
-#define EXYNOS_TMU_REG_TRIMINFO 0x0
-#define EXYNOS_TMU_REG_CONTROL 0x20
-#define EXYNOS_TMU_REG_STATUS 0x28
-#define EXYNOS_TMU_REG_CURRENT_TEMP 0x40
-#define EXYNOS_TMU_REG_INTEN 0x70
-#define EXYNOS_TMU_REG_INTSTAT 0x74
-#define EXYNOS_TMU_REG_INTCLEAR 0x78
-
-#define EXYNOS_TMU_TEMP_MASK 0xff
-#define EXYNOS_TMU_REF_VOLTAGE_SHIFT 24
-#define EXYNOS_TMU_REF_VOLTAGE_MASK 0x1f
-#define EXYNOS_TMU_BUF_SLOPE_SEL_MASK 0xf
-#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
-#define EXYNOS_TMU_CORE_EN_SHIFT 0
-
-/* Exynos3250 specific registers */
-#define EXYNOS_TMU_TRIMINFO_CON1 0x10
-
-/* Exynos4210 specific registers */
-#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
-
-/* Exynos5250, Exynos4412, Exynos3250 specific registers */
-#define EXYNOS_TMU_TRIMINFO_CON2 0x14
-#define EXYNOS_THD_TEMP_RISE 0x50
-#define EXYNOS_THD_TEMP_FALL 0x54
-#define EXYNOS_EMUL_CON 0x80
-
-#define EXYNOS_TRIMINFO_RELOAD_ENABLE 1
-#define EXYNOS_TRIMINFO_25_SHIFT 0
-#define EXYNOS_TRIMINFO_85_SHIFT 8
-#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
-#define EXYNOS_TMU_TRIP_MODE_MASK 0x7
-#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
-
-#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
-#define EXYNOS_TMU_INTEN_RISE1_SHIFT 4
-#define EXYNOS_TMU_INTEN_RISE2_SHIFT 8
-#define EXYNOS_TMU_INTEN_RISE3_SHIFT 12
-#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
-
-#define EXYNOS_EMUL_TIME 0x57F0
-#define EXYNOS_EMUL_TIME_MASK 0xffff
-#define EXYNOS_EMUL_TIME_SHIFT 16
-#define EXYNOS_EMUL_DATA_SHIFT 8
-#define EXYNOS_EMUL_DATA_MASK 0xFF
-#define EXYNOS_EMUL_ENABLE 0x1
-
-#define EXYNOS_MAX_TRIGGER_PER_REG 4
-
-/* Exynos5260 specific */
-#define EXYNOS5260_TMU_REG_INTEN 0xC0
-#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
-#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
-#define EXYNOS5260_EMUL_CON 0x100
-
-/* Exynos4412 specific */
-#define EXYNOS4412_MUX_ADDR_VALUE 6
-#define EXYNOS4412_MUX_ADDR_SHIFT 20
-
-/*exynos5440 specific registers*/
-#define EXYNOS5440_TMU_S0_7_TRIM 0x000
-#define EXYNOS5440_TMU_S0_7_CTRL 0x020
-#define EXYNOS5440_TMU_S0_7_DEBUG 0x040
-#define EXYNOS5440_TMU_S0_7_STATUS 0x060
-#define EXYNOS5440_TMU_S0_7_TEMP 0x0f0
-#define EXYNOS5440_TMU_S0_7_TH0 0x110
-#define EXYNOS5440_TMU_S0_7_TH1 0x130
-#define EXYNOS5440_TMU_S0_7_TH2 0x150
-#define EXYNOS5440_TMU_S0_7_IRQEN 0x210
-#define EXYNOS5440_TMU_S0_7_IRQ 0x230
-/* exynos5440 common registers */
-#define EXYNOS5440_TMU_IRQ_STATUS 0x000
-#define EXYNOS5440_TMU_PMIN 0x004
-
-#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
-#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
-#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
-#define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
-#define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
-#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
-#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
-
-#if defined(CONFIG_SOC_EXYNOS3250)
-extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
-#define EXYNOS3250_TMU_DRV_DATA (&exynos3250_default_tmu_data)
-#else
-#define EXYNOS3250_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_CPU_EXYNOS4210)
-extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
-#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
-#else
-#define EXYNOS4210_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS4412)
-extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
-#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
-#else
-#define EXYNOS4412_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5250)
-extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
-#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
-#else
-#define EXYNOS5250_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5260)
-extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
-#define EXYNOS5260_TMU_DRV_DATA (&exynos5260_default_tmu_data)
-#else
-#define EXYNOS5260_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5420)
-extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
-#define EXYNOS5420_TMU_DRV_DATA (&exynos5420_default_tmu_data)
-#else
-#define EXYNOS5420_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5440)
-extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
-#define EXYNOS5440_TMU_DRV_DATA (&exynos5440_default_tmu_data)
-#else
-#define EXYNOS5440_TMU_DRV_DATA (NULL)
-#endif
-
-#endif /*_EXYNOS_TMU_DATA_H*/
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 1e2193fc3241..bddb71744a6c 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -186,7 +186,6 @@ static struct platform_driver spear_thermal_driver = {
.remove = spear_thermal_exit,
.driver = {
.name = "spear_thermal",
- .owner = THIS_MODULE,
.pm = &spear_thermal_pm_ops,
.of_match_table = spear_thermal_id_table,
},
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index 39896ce2ee00..067bfcdb91d6 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -194,7 +194,6 @@ int st_mmap_remove(struct platform_device *pdev)
static struct platform_driver st_mmap_thermal_driver = {
.driver = {
.name = "st_thermal_mmap",
- .owner = THIS_MODULE,
.pm = &st_thermal_pm_ops,
.of_match_table = st_mmap_thermal_of_match,
},
diff --git a/drivers/thermal/st/st_thermal_syscfg.c b/drivers/thermal/st/st_thermal_syscfg.c
index 888b58e64090..26d36a242bb8 100644
--- a/drivers/thermal/st/st_thermal_syscfg.c
+++ b/drivers/thermal/st/st_thermal_syscfg.c
@@ -165,7 +165,6 @@ int st_syscfg_remove(struct platform_device *pdev)
static struct platform_driver st_syscfg_thermal_driver = {
.driver = {
.name = "st_syscfg_thermal",
- .owner = THIS_MODULE,
.pm = &st_thermal_pm_ops,
.of_match_table = st_syscfg_thermal_of_match,
},
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
new file mode 100644
index 000000000000..9197fc05c5cc
--- /dev/null
+++ b/drivers/thermal/tegra_soctherm.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+
+#include <soc/tegra/fuse.h>
+
+#define SENSOR_CONFIG0 0
+#define SENSOR_CONFIG0_STOP BIT(0)
+#define SENSOR_CONFIG0_TALL_SHIFT 8
+#define SENSOR_CONFIG0_TCALC_OVER BIT(4)
+#define SENSOR_CONFIG0_OVER BIT(3)
+#define SENSOR_CONFIG0_CPTR_OVER BIT(2)
+
+#define SENSOR_CONFIG1 4
+#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0
+#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15
+#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24
+#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31)
+
+#define SENSOR_CONFIG2 8
+#define SENSOR_CONFIG2_THERMA_SHIFT 16
+#define SENSOR_CONFIG2_THERMB_SHIFT 0
+
+#define SENSOR_PDIV 0x1c0
+#define SENSOR_PDIV_T124 0x8888
+#define SENSOR_HOTSPOT_OFF 0x1c4
+#define SENSOR_HOTSPOT_OFF_T124 0x00060600
+#define SENSOR_TEMP1 0x1c8
+#define SENSOR_TEMP2 0x1cc
+
+#define SENSOR_TEMP_MASK 0xffff
+#define READBACK_VALUE_MASK 0xff00
+#define READBACK_VALUE_SHIFT 8
+#define READBACK_ADD_HALF BIT(7)
+#define READBACK_NEGATE BIT(1)
+
+#define FUSE_TSENSOR8_CALIB 0x180
+#define FUSE_SPARE_REALIGNMENT_REG_0 0x1fc
+
+#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13)
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13
+
+#define FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK 0x3ff
+#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK (0x7ff << 10)
+#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT 10
+
+#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_MASK 0x3f
+#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK (0x1f << 21)
+#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT 21
+
+#define NOMINAL_CALIB_FT_T124 105
+#define NOMINAL_CALIB_CP_T124 25
+
+struct tegra_tsensor_configuration {
+ u32 tall, tsample, tiddq_en, ten_count, pdiv, tsample_ate, pdiv_ate;
+};
+
+struct tegra_tsensor {
+ const struct tegra_tsensor_configuration *config;
+ u32 base, calib_fuse_offset;
+ /* Correction values used to modify values read from calibration fuses */
+ s32 fuse_corr_alpha, fuse_corr_beta;
+};
+
+struct tegra_thermctl_zone {
+ void __iomem *reg;
+ unsigned int shift;
+};
+
+static const struct tegra_tsensor_configuration t124_tsensor_config = {
+ .tall = 16300,
+ .tsample = 120,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .pdiv = 8,
+ .tsample_ate = 480,
+ .pdiv_ate = 8
+};
+
+static const struct tegra_tsensor t124_tsensors[] = {
+ {
+ .config = &t124_tsensor_config,
+ .base = 0xc0,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1135400,
+ .fuse_corr_beta = -6266900,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0xe0,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1122220,
+ .fuse_corr_beta = -5700700,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x100,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1127000,
+ .fuse_corr_beta = -6768200,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x120,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1110900,
+ .fuse_corr_beta = -6232000,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x140,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1122300,
+ .fuse_corr_beta = -5936400,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x160,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1145700,
+ .fuse_corr_beta = -7124600,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x180,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1120100,
+ .fuse_corr_beta = -6000500,
+ },
+ {
+ .config = &t124_tsensor_config,
+ .base = 0x1a0,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1106500,
+ .fuse_corr_beta = -6729300,
+ },
+};
+
+struct tegra_soctherm {
+ struct reset_control *reset;
+ struct clk *clock_tsensor;
+ struct clk *clock_soctherm;
+ void __iomem *regs;
+
+ struct thermal_zone_device *thermctl_tzs[4];
+};
+
+struct tsensor_shared_calibration {
+ u32 base_cp, base_ft;
+ u32 actual_temp_cp, actual_temp_ft;
+};
+
+static int calculate_shared_calibration(struct tsensor_shared_calibration *r)
+{
+ u32 val, shifted_cp, shifted_ft;
+ int err;
+
+ err = tegra_fuse_readl(FUSE_TSENSOR8_CALIB, &val);
+ if (err)
+ return err;
+ r->base_cp = val & FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK;
+ r->base_ft = (val & FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK)
+ >> FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT;
+ val = ((val & FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK)
+ >> FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT);
+ shifted_ft = sign_extend32(val, 4);
+
+ err = tegra_fuse_readl(FUSE_SPARE_REALIGNMENT_REG_0, &val);
+ if (err)
+ return err;
+ shifted_cp = sign_extend32(val, 5);
+
+ r->actual_temp_cp = 2 * NOMINAL_CALIB_CP_T124 + shifted_cp;
+ r->actual_temp_ft = 2 * NOMINAL_CALIB_FT_T124 + shifted_ft;
+
+ return 0;
+}
+
+static s64 div64_s64_precise(s64 a, s64 b)
+{
+ s64 r, al;
+
+ /* Scale up for increased precision division */
+ al = a << 16;
+
+ r = div64_s64(al * 2 + 1, 2 * b);
+ return r >> 16;
+}
+
+static int
+calculate_tsensor_calibration(const struct tegra_tsensor *sensor,
+ const struct tsensor_shared_calibration *shared,
+ u32 *calib)
+{
+ u32 val;
+ s32 actual_tsensor_ft, actual_tsensor_cp, delta_sens, delta_temp,
+ mult, div;
+ s16 therma, thermb;
+ s64 tmp;
+ int err;
+
+ err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
+ if (err)
+ return err;
+
+ actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
+ val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK)
+ >> FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
+ actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
+
+ delta_sens = actual_tsensor_ft - actual_tsensor_cp;
+ delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
+
+ mult = sensor->config->pdiv * sensor->config->tsample_ate;
+ div = sensor->config->tsample * sensor->config->pdiv_ate;
+
+ therma = div64_s64_precise((s64) delta_temp * (1LL << 13) * mult,
+ (s64) delta_sens * div);
+
+ tmp = (s64)actual_tsensor_ft * shared->actual_temp_cp -
+ (s64)actual_tsensor_cp * shared->actual_temp_ft;
+ thermb = div64_s64_precise(tmp, (s64)delta_sens);
+
+ therma = div64_s64_precise((s64)therma * sensor->fuse_corr_alpha,
+ (s64)1000000LL);
+ thermb = div64_s64_precise((s64)thermb * sensor->fuse_corr_alpha +
+ sensor->fuse_corr_beta, (s64)1000000LL);
+
+ *calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
+ ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
+
+ return 0;
+}
+
+static int enable_tsensor(struct tegra_soctherm *tegra,
+ const struct tegra_tsensor *sensor,
+ const struct tsensor_shared_calibration *shared)
+{
+ void __iomem *base = tegra->regs + sensor->base;
+ unsigned int val;
+ u32 calib;
+ int err;
+
+ err = calculate_tsensor_calibration(sensor, shared, &calib);
+ if (err)
+ return err;
+
+ val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
+ writel(val, base + SENSOR_CONFIG0);
+
+ val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
+ val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
+ val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
+ val |= SENSOR_CONFIG1_TEMP_ENABLE;
+ writel(val, base + SENSOR_CONFIG1);
+
+ writel(calib, base + SENSOR_CONFIG2);
+
+ return 0;
+}
+
+/*
+ * Translate from soctherm readback format to millicelsius.
+ * The soctherm readback format in bits is as follows:
+ * TTTTTTTT H______N
+ * where T's contain the temperature in Celsius,
+ * H denotes an addition of 0.5 Celsius and N denotes negation
+ * of the final value.
+ */
+static long translate_temp(u16 val)
+{
+ long t;
+
+ t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
+ if (val & READBACK_ADD_HALF)
+ t += 500;
+ if (val & READBACK_NEGATE)
+ t *= -1;
+
+ return t;
+}
+
+static int tegra_thermctl_get_temp(void *data, long *out_temp)
+{
+ struct tegra_thermctl_zone *zone = data;
+ u32 val;
+
+ val = (readl(zone->reg) >> zone->shift) & SENSOR_TEMP_MASK;
+ *out_temp = translate_temp(val);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
+ .get_temp = tegra_thermctl_get_temp,
+};
+
+static const struct of_device_id tegra_soctherm_of_match[] = {
+ { .compatible = "nvidia,tegra124-soctherm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
+
+struct thermctl_zone_desc {
+ unsigned int offset;
+ unsigned int shift;
+};
+
+static const struct thermctl_zone_desc t124_thermctl_temp_zones[] = {
+ { SENSOR_TEMP1, 16 },
+ { SENSOR_TEMP2, 16 },
+ { SENSOR_TEMP1, 0 },
+ { SENSOR_TEMP2, 0 }
+};
+
+static int tegra_soctherm_probe(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra;
+ struct thermal_zone_device *tz;
+ struct tsensor_shared_calibration shared_calib;
+ struct resource *res;
+ unsigned int i;
+ int err;
+
+ const struct tegra_tsensor *tsensors = t124_tsensors;
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->regs))
+ return PTR_ERR(tegra->regs);
+
+ tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
+ if (IS_ERR(tegra->reset)) {
+ dev_err(&pdev->dev, "can't get soctherm reset\n");
+ return PTR_ERR(tegra->reset);
+ }
+
+ tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
+ if (IS_ERR(tegra->clock_tsensor)) {
+ dev_err(&pdev->dev, "can't get tsensor clock\n");
+ return PTR_ERR(tegra->clock_tsensor);
+ }
+
+ tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
+ if (IS_ERR(tegra->clock_soctherm)) {
+ dev_err(&pdev->dev, "can't get soctherm clock\n");
+ return PTR_ERR(tegra->clock_soctherm);
+ }
+
+ reset_control_assert(tegra->reset);
+
+ err = clk_prepare_enable(tegra->clock_soctherm);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(tegra->clock_tsensor);
+ if (err) {
+ clk_disable_unprepare(tegra->clock_soctherm);
+ return err;
+ }
+
+ reset_control_deassert(tegra->reset);
+
+ /* Initialize raw sensors */
+
+ err = calculate_shared_calibration(&shared_calib);
+ if (err)
+ goto disable_clocks;
+
+ for (i = 0; i < ARRAY_SIZE(t124_tsensors); ++i) {
+ err = enable_tsensor(tegra, tsensors + i, &shared_calib);
+ if (err)
+ goto disable_clocks;
+ }
+
+ writel(SENSOR_PDIV_T124, tegra->regs + SENSOR_PDIV);
+ writel(SENSOR_HOTSPOT_OFF_T124, tegra->regs + SENSOR_HOTSPOT_OFF);
+
+ /* Initialize thermctl sensors */
+
+ for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
+ struct tegra_thermctl_zone *zone =
+ devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
+ if (!zone) {
+ err = -ENOMEM;
+ goto unregister_tzs;
+ }
+
+ zone->reg = tegra->regs + t124_thermctl_temp_zones[i].offset;
+ zone->shift = t124_thermctl_temp_zones[i].shift;
+
+ tz = thermal_zone_of_sensor_register(&pdev->dev, i, zone,
+ &tegra_of_thermal_ops);
+ if (IS_ERR(tz)) {
+ err = PTR_ERR(tz);
+ dev_err(&pdev->dev, "failed to register sensor: %d\n",
+ err);
+ goto unregister_tzs;
+ }
+
+ tegra->thermctl_tzs[i] = tz;
+ }
+
+ return 0;
+
+unregister_tzs:
+ while (i--)
+ thermal_zone_of_sensor_unregister(&pdev->dev,
+ tegra->thermctl_tzs[i]);
+
+disable_clocks:
+ clk_disable_unprepare(tegra->clock_tsensor);
+ clk_disable_unprepare(tegra->clock_soctherm);
+
+ return err;
+}
+
+static int tegra_soctherm_remove(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
+ thermal_zone_of_sensor_unregister(&pdev->dev,
+ tegra->thermctl_tzs[i]);
+ }
+
+ clk_disable_unprepare(tegra->clock_tsensor);
+ clk_disable_unprepare(tegra->clock_soctherm);
+
+ return 0;
+}
+
+static struct platform_driver tegra_soctherm_driver = {
+ .probe = tegra_soctherm_probe,
+ .remove = tegra_soctherm_remove,
+ .driver = {
+ .name = "tegra-soctherm",
+ .of_match_table = tegra_soctherm_of_match,
+ },
+};
+module_platform_driver(tegra_soctherm_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 43b90709585f..84fdf0792e27 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -368,7 +368,7 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
tz->ops->get_trip_temp(tz, trip, &trip_temp);
/* If we have not crossed the trip_temp, we do not care. */
- if (tz->temperature < trip_temp)
+ if (trip_temp <= 0 || tz->temperature < trip_temp)
return;
trace_thermal_zone_trip(tz, trip, trip_type);
@@ -757,6 +757,7 @@ policy_store(struct device *dev, struct device_attribute *attr,
snprintf(name, sizeof(name), "%s", buf);
mutex_lock(&thermal_governor_lock);
+ mutex_lock(&tz->lock);
gov = __find_governor(strim(name));
if (!gov)
@@ -766,6 +767,7 @@ policy_store(struct device *dev, struct device_attribute *attr,
ret = count;
exit:
+ mutex_unlock(&tz->lock);
mutex_unlock(&thermal_governor_lock);
return ret;
}
@@ -1835,10 +1837,10 @@ static int __init thermal_init(void)
exit_netlink:
genetlink_exit();
-unregister_governors:
- thermal_unregister_governors();
unregister_class:
class_unregister(&thermal_class);
+unregister_governors:
+ thermal_unregister_governors();
error:
idr_destroy(&thermal_tz_idr);
idr_destroy(&thermal_cdev_idr);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index d15d243de27a..9083e7520623 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -89,9 +89,27 @@ static inline void thermal_gov_user_space_unregister(void) {}
#ifdef CONFIG_THERMAL_OF
int of_parse_thermal_zones(void);
void of_thermal_destroy_zones(void);
+int of_thermal_get_ntrips(struct thermal_zone_device *);
+bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
+const struct thermal_trip * const
+of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
static inline void of_thermal_destroy_zones(void) { }
+static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
+{
+ return 0;
+}
+static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
+ int trip)
+{
+ return 0;
+}
+static inline const struct thermal_trip * const
+of_thermal_get_trip_points(struct thermal_zone_device *tz)
+{
+ return NULL;
+}
#endif
#endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 9eec26dc0448..5fd03865e396 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -286,6 +286,11 @@ static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal,
return ti_thermal_get_trip_temp(thermal, OMAP_TRIP_NUMBER - 1, temp);
}
+static const struct thermal_zone_of_device_ops ti_of_thermal_ops = {
+ .get_temp = __ti_thermal_get_temp,
+ .get_trend = __ti_thermal_get_trend,
+};
+
static struct thermal_zone_device_ops ti_thermal_ops = {
.get_temp = ti_thermal_get_temp,
.get_trend = ti_thermal_get_trend,
@@ -333,8 +338,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
/* in case this is specified by DT */
data->ti_thermal = thermal_zone_of_sensor_register(bgp->dev, id,
- data, __ti_thermal_get_temp,
- __ti_thermal_get_trend);
+ data, &ti_of_thermal_ops);
if (IS_ERR(data->ti_thermal)) {
/* Create thermal zone */
data->ti_thermal = thermal_zone_device_register(domain,
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 979e7c3ea2cb..d9f85f95eb2a 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1802,7 +1802,6 @@ static struct platform_driver amiga_serial_driver = {
.remove = __exit_p(amiga_serial_remove),
.driver = {
.name = "amiga-serial",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 4f485e88f60c..3c60923b0957 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -309,8 +309,8 @@ static int __init ehv_bc_console_init(void)
* handle for udbg.
*/
if (stdout_bc != CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE)
- pr_warning("ehv-bc: udbg handle %u is not the stdout handle\n",
- CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE);
+ pr_warn("ehv-bc: udbg handle %u is not the stdout handle\n",
+ CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE);
#endif
/* add_preferred_console() must be called before register_console(),
@@ -740,7 +740,6 @@ static const struct of_device_id ehv_bc_tty_of_ids[] = {
static struct platform_driver ehv_bc_tty_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ehv-bc",
.of_match_table = ehv_bc_tty_of_ids,
},
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 09495f515fa9..967b2c2b7cf1 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -155,9 +155,9 @@ static struct tty_driver *goldfish_tty_console_device(struct console *c,
static int goldfish_tty_console_setup(struct console *co, char *options)
{
- if ((unsigned)co->index > goldfish_tty_line_count)
+ if ((unsigned)co->index >= goldfish_tty_line_count)
return -ENODEV;
- if (goldfish_ttys[co->index].base == 0)
+ if (!goldfish_ttys[co->index].base)
return -ENODEV;
return 0;
}
@@ -317,7 +317,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
unregister_console(&qtty->console);
tty_unregister_device(goldfish_tty_driver, pdev->id);
iounmap(qtty->base);
- qtty->base = 0;
+ qtty->base = NULL;
free_irq(qtty->irq, pdev);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index a2cc5f834c63..071551bf3e9a 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -262,7 +262,6 @@ static struct platform_driver hvc_opal_driver = {
.remove = hvc_opal_remove,
.driver = {
.name = hvc_opal_name,
- .owner = THIS_MODULE,
.of_match_table = hvc_opal_match,
}
};
diff --git a/drivers/tty/hvc/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
index df374860037c..3f6cd3102db5 100644
--- a/drivers/tty/hvc/hvc_tile.c
+++ b/drivers/tty/hvc/hvc_tile.c
@@ -178,7 +178,6 @@ static struct platform_driver hvc_tile_driver = {
.shutdown = hvc_tile_shutdown,
.driver = {
.name = "hvc-tile",
- .owner = THIS_MODULE,
}
};
#endif
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 81e939e90c4c..81ff7e1bfb1a 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1575,7 +1575,7 @@ static int __init hvcs_module_init(void)
*/
rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
if (rc)
- pr_warning(KERN_ERR "HVCS: Failed to create rescan file (err %d)\n", rc);
+ pr_warning("HVCS: Failed to create rescan file (err %d)\n", rc);
return 0;
}
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 7ae6c293e518..a270f04588d7 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -405,8 +405,7 @@ void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp)
hvsi_send_close(pv);
}
- if (pv->tty)
- tty_kref_put(pv->tty);
+ tty_kref_put(pv->tty);
pv->tty = NULL;
}
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index 2c14842541dd..5c77e1eac4ee 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -378,9 +378,9 @@ static void swap_packet_bitfield_to_le(unsigned char *data)
/*
* transform bits from aa.bbb.ccc to ccc.bbb.aa
*/
- ret |= tmp & 0xc0 >> 6;
- ret |= tmp & 0x38 >> 1;
- ret |= tmp & 0x07 << 5;
+ ret |= (tmp & 0xc0) >> 6;
+ ret |= (tmp & 0x38) >> 1;
+ ret |= (tmp & 0x07) << 5;
*data = ret & 0xff;
#endif
}
@@ -393,9 +393,9 @@ static void swap_packet_bitfield_from_le(unsigned char *data)
/*
* transform bits from ccc.bbb.aa to aa.bbb.ccc
*/
- ret |= tmp & 0xe0 >> 5;
- ret |= tmp & 0x1c << 1;
- ret |= tmp & 0x03 << 6;
+ ret |= (tmp & 0xe0) >> 5;
+ ret |= (tmp & 0x1c) << 1;
+ ret |= (tmp & 0x03) << 6;
*data = ret & 0xff;
#endif
}
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 858291ca889c..59ed783c4bcd 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -249,7 +249,7 @@ static int lock_card(struct isi_board *card)
spin_unlock_irqrestore(&card->card_lock, card->flags);
msleep(10);
}
- pr_warning("Failed to lock Card (0x%lx)\n", card->base);
+ pr_warn("Failed to lock Card (0x%lx)\n", card->base);
return 0; /* Failed to acquire the card! */
}
@@ -378,13 +378,13 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
char *name, const char *routine)
{
if (!port) {
- pr_warning("Warning: bad isicom magic for dev %s in %s.\n",
- name, routine);
+ pr_warn("Warning: bad isicom magic for dev %s in %s\n",
+ name, routine);
return 1;
}
if (port->magic != ISICOM_MAGIC) {
- pr_warning("Warning: NULL isicom port for dev %s in %s.\n",
- name, routine);
+ pr_warn("Warning: NULL isicom port for dev %s in %s\n",
+ name, routine);
return 1;
}
@@ -546,8 +546,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
byte_count = header & 0xff;
if (channel + 1 > card->port_count) {
- pr_warning("%s(0x%lx): %d(channel) > port_count.\n",
- __func__, base, channel+1);
+ pr_warn("%s(0x%lx): %d(channel) > port_count\n",
+ __func__, base, channel + 1);
outw(0x0000, base+0x04); /* enable interrupts */
spin_unlock(&card->card_lock);
return IRQ_HANDLED;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 26f097f60b10..d2b496750d59 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -321,7 +321,8 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
{
- *read_buf_addr(ldata, ldata->read_head++) = c;
+ *read_buf_addr(ldata, ldata->read_head) = c;
+ ldata->read_head++;
}
/**
@@ -351,13 +352,13 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
if (tty->link->packet) {
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
tty->ctrl_status |= TIOCPKT_FLUSHREAD;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
if (waitqueue_active(&tty->link->read_wait))
wake_up_interruptible(&tty->link->read_wait);
}
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
}
/**
@@ -2128,7 +2129,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
int minimum, time;
ssize_t retval = 0;
long timeout;
- unsigned long flags;
int packet;
c = job_control(tty, file);
@@ -2174,10 +2174,10 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
unsigned char cs;
if (b != buf)
break;
- spin_lock_irqsave(&tty->link->ctrl_lock, flags);
+ spin_lock_irq(&tty->link->ctrl_lock);
cs = tty->link->ctrl_status;
tty->link->ctrl_status = 0;
- spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
+ spin_unlock_irq(&tty->link->ctrl_lock);
if (tty_put_user(tty, cs, b++)) {
retval = -EFAULT;
b--;
@@ -2193,45 +2193,29 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
if (!input_available_p(tty, 0)) {
if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
- up_read(&tty->termios_rwsem);
- tty_flush_to_ldisc(tty);
- down_read(&tty->termios_rwsem);
- if (!input_available_p(tty, 0)) {
- retval = -EIO;
- break;
- }
- } else {
- if (tty_hung_up_p(file))
- break;
- if (!timeout)
- break;
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- n_tty_set_room(tty);
- up_read(&tty->termios_rwsem);
-
- timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
- timeout);
-
- down_read(&tty->termios_rwsem);
- continue;
+ retval = -EIO;
+ break;
}
- }
-
- /* Deal with packet mode. */
- if (packet && b == buf) {
- if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
- retval = -EFAULT;
- b--;
+ if (tty_hung_up_p(file))
+ break;
+ if (!timeout)
+ break;
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
break;
}
- nr--;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ n_tty_set_room(tty);
+ up_read(&tty->termios_rwsem);
+
+ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
+ timeout);
+
+ down_read(&tty->termios_rwsem);
+ continue;
}
if (ldata->icanon && !L_EXTPROC(tty)) {
@@ -2243,8 +2227,17 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
break;
} else {
int uncopied;
- /* The copy function takes the read lock and handles
- locking internally for this case */
+
+ /* Deal with packet mode. */
+ if (packet && b == buf) {
+ if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
+ retval = -EFAULT;
+ b--;
+ break;
+ }
+ nr--;
+ }
+
uncopied = copy_from_read_buf(tty, &b, &nr);
uncopied += copy_from_read_buf(tty, &b, &nr);
if (uncopied) {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 7c4447a5c0f4..a9d256d6e909 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -47,10 +47,13 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
set_bit(TTY_IO_ERROR, &tty->flags);
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
+ spin_lock_irq(&tty->ctrl_lock);
tty->packet = 0;
+ spin_unlock_irq(&tty->ctrl_lock);
/* Review - krefs on tty_link ?? */
if (!tty->link)
return;
+ tty_flush_to_ldisc(tty->link);
set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
wake_up_interruptible(&tty->link->read_wait);
wake_up_interruptible(&tty->link->write_wait);
@@ -64,9 +67,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&devpts_mutex);
}
#endif
- tty_unlock(tty);
tty_vhangup(tty->link);
- tty_lock(tty);
}
}
@@ -178,21 +179,21 @@ static int pty_get_lock(struct tty_struct *tty, int __user *arg)
/* Set the packet mode on a pty */
static int pty_set_pktmode(struct tty_struct *tty, int __user *arg)
{
- unsigned long flags;
int pktmode;
if (get_user(pktmode, arg))
return -EFAULT;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
+ spin_lock_irq(&tty->ctrl_lock);
if (pktmode) {
if (!tty->packet) {
- tty->packet = 1;
tty->link->ctrl_status = 0;
+ smp_mb();
+ tty->packet = 1;
}
} else
tty->packet = 0;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ spin_unlock_irq(&tty->ctrl_lock);
return 0;
}
@@ -207,15 +208,12 @@ static int pty_get_pktmode(struct tty_struct *tty, int __user *arg)
/* Send a signal to the slave */
static int pty_signal(struct tty_struct *tty, int sig)
{
- unsigned long flags;
struct pid *pgrp;
if (tty->link) {
- spin_lock_irqsave(&tty->link->ctrl_lock, flags);
- pgrp = get_pid(tty->link->pgrp);
- spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
-
- kill_pgrp(pgrp, sig, 1);
+ pgrp = tty_get_pgrp(tty->link);
+ if (pgrp)
+ kill_pgrp(pgrp, sig, 1);
put_pid(pgrp);
}
return 0;
@@ -224,16 +222,15 @@ static int pty_signal(struct tty_struct *tty, int sig)
static void pty_flush_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
- unsigned long flags;
if (!to)
return;
/* tty_buffer_flush(to); FIXME */
if (to->packet) {
- spin_lock_irqsave(&tty->ctrl_lock, flags);
+ spin_lock_irq(&tty->ctrl_lock);
tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
wake_up_interruptible(&to->read_wait);
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ spin_unlock_irq(&tty->ctrl_lock);
}
}
@@ -262,6 +259,32 @@ out:
static void pty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
+ /* See if packet mode change of state. */
+ if (tty->link && tty->link->packet) {
+ int extproc = (old_termios->c_lflag & EXTPROC) |
+ (tty->termios.c_lflag & EXTPROC);
+ int old_flow = ((old_termios->c_iflag & IXON) &&
+ (old_termios->c_cc[VSTOP] == '\023') &&
+ (old_termios->c_cc[VSTART] == '\021'));
+ int new_flow = (I_IXON(tty) &&
+ STOP_CHAR(tty) == '\023' &&
+ START_CHAR(tty) == '\021');
+ if ((old_flow != new_flow) || extproc) {
+ spin_lock_irq(&tty->ctrl_lock);
+ if (old_flow != new_flow) {
+ tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
+ if (new_flow)
+ tty->ctrl_status |= TIOCPKT_DOSTOP;
+ else
+ tty->ctrl_status |= TIOCPKT_NOSTOP;
+ }
+ if (extproc)
+ tty->ctrl_status |= TIOCPKT_IOCTL;
+ spin_unlock_irq(&tty->ctrl_lock);
+ wake_up_interruptible(&tty->link->read_wait);
+ }
+ }
+
tty->termios.c_cflag &= ~(CSIZE | PARENB);
tty->termios.c_cflag |= (CS8 | CREAD);
}
@@ -278,7 +301,6 @@ static void pty_set_termios(struct tty_struct *tty,
static int pty_resize(struct tty_struct *tty, struct winsize *ws)
{
struct pid *pgrp, *rpgrp;
- unsigned long flags;
struct tty_struct *pty = tty->link;
/* For a PTY we need to lock the tty side */
@@ -286,17 +308,9 @@ static int pty_resize(struct tty_struct *tty, struct winsize *ws)
if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
goto done;
- /* Get the PID values and reference them so we can
- avoid holding the tty ctrl lock while sending signals.
- We need to lock these individually however. */
-
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- pgrp = get_pid(tty->pgrp);
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
-
- spin_lock_irqsave(&pty->ctrl_lock, flags);
- rpgrp = get_pid(pty->pgrp);
- spin_unlock_irqrestore(&pty->ctrl_lock, flags);
+ /* Signal the foreground process group of both ptys */
+ pgrp = tty_get_pgrp(tty);
+ rpgrp = tty_get_pgrp(pty);
if (pgrp)
kill_pgrp(pgrp, SIGWINCH, 1);
@@ -327,26 +341,26 @@ static void pty_start(struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
if (tty->link && tty->link->packet) {
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
tty->ctrl_status &= ~TIOCPKT_STOP;
tty->ctrl_status |= TIOCPKT_START;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
wake_up_interruptible_poll(&tty->link->read_wait, POLLIN);
}
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
}
static void pty_stop(struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
if (tty->link && tty->link->packet) {
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
tty->ctrl_status &= ~TIOCPKT_START;
tty->ctrl_status |= TIOCPKT_STOP;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
wake_up_interruptible_poll(&tty->link->read_wait, POLLIN);
}
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
}
/**
@@ -368,6 +382,10 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
int idx = tty->index;
int retval = -ENOMEM;
+ /* Opening the slave first has always returned -EIO */
+ if (driver->subtype != PTY_TYPE_MASTER)
+ return -EIO;
+
ports[0] = kmalloc(sizeof **ports, GFP_KERNEL);
ports[1] = kmalloc(sizeof **ports, GFP_KERNEL);
if (!ports[0] || !ports[1])
@@ -380,6 +398,8 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
if (!o_tty)
goto err_put_module;
+ tty_set_lock_subclass(o_tty);
+
if (legacy) {
/* We always use new tty termios data so we can do this
the easy way .. */
@@ -404,8 +424,6 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
* Everything allocated ... set up the o_tty structure.
*/
tty_driver_kref_get(driver->other);
- if (driver->subtype == PTY_TYPE_MASTER)
- o_tty->count++;
/* Establish the links in both directions */
tty->link = o_tty;
o_tty->link = tty;
@@ -417,6 +435,7 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
tty_driver_kref_get(driver);
tty->count++;
+ o_tty->count++;
return 0;
err_free_termios:
if (legacy)
@@ -489,7 +508,6 @@ static const struct tty_operations master_pty_ops_bsd = {
.flush_buffer = pty_flush_buffer,
.chars_in_buffer = pty_chars_in_buffer,
.unthrottle = pty_unthrottle,
- .set_termios = pty_set_termios,
.ioctl = pty_bsd_ioctl,
.cleanup = pty_cleanup,
.resize = pty_resize,
@@ -666,7 +684,6 @@ static const struct tty_operations ptm_unix98_ops = {
.flush_buffer = pty_flush_buffer,
.chars_in_buffer = pty_chars_in_buffer,
.unthrottle = pty_unthrottle,
- .set_termios = pty_set_termios,
.ioctl = pty_unix98_ioctl,
.resize = pty_resize,
.shutdown = pty_unix98_shutdown,
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index cb51be55989e..b00836851061 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -16,6 +16,9 @@
#include <linux/dmaengine.h>
struct uart_8250_dma {
+ int (*tx_dma)(struct uart_8250_port *p);
+ int (*rx_dma)(struct uart_8250_port *p, unsigned int iir);
+
/* Filter function */
dma_filter_fn fn;
@@ -41,6 +44,8 @@ struct uart_8250_dma {
size_t tx_size;
unsigned char tx_running:1;
+ unsigned char tx_err: 1;
+ unsigned char rx_running:1;
};
struct old_serial_port {
@@ -51,7 +56,7 @@ struct old_serial_port {
unsigned int flags;
unsigned char hub6;
unsigned char io_type;
- unsigned char *iomem_base;
+ unsigned char __iomem *iomem_base;
unsigned short iomem_reg_shift;
unsigned long irqflags;
};
@@ -114,6 +119,8 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
}
struct uart_8250_port *serial8250_get_port(int line);
+void serial8250_rpm_get(struct uart_8250_port *p);
+void serial8250_rpm_put(struct uart_8250_port *p);
#if defined(__alpha__) && !defined(CONFIG_PCI)
/*
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index ca5cfdc1459a..11c66856ba2f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -541,23 +541,25 @@ void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
}
EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
-static void serial8250_rpm_get(struct uart_8250_port *p)
+void serial8250_rpm_get(struct uart_8250_port *p)
{
if (!(p->capabilities & UART_CAP_RPM))
return;
pm_runtime_get_sync(p->port.dev);
}
+EXPORT_SYMBOL_GPL(serial8250_rpm_get);
-static void serial8250_rpm_put(struct uart_8250_port *p)
+void serial8250_rpm_put(struct uart_8250_port *p)
{
if (!(p->capabilities & UART_CAP_RPM))
return;
pm_runtime_mark_last_busy(p->port.dev);
pm_runtime_put_autosuspend(p->port.dev);
}
+EXPORT_SYMBOL_GPL(serial8250_rpm_put);
/*
- * This two wrapper ensure, that enable_runtime_pm_tx() can be called more than
+ * These two wrappers ensure that enable_runtime_pm_tx() can be called more than
* once and disable_runtime_pm_tx() will still disable RPM because the fifo is
* empty and the HW can idle again.
*/
@@ -595,6 +597,7 @@ static void serial8250_rpm_put_tx(struct uart_8250_port *p)
*/
static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
{
+ unsigned char lcr = 0, efr = 0;
/*
* Exar UARTs have a SLEEP register that enables or disables
* each UART to enter sleep mode separately. On the XR17V35x the
@@ -611,6 +614,8 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
if (p->capabilities & UART_CAP_SLEEP) {
if (p->capabilities & UART_CAP_EFR) {
+ lcr = serial_in(p, UART_LCR);
+ efr = serial_in(p, UART_EFR);
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(p, UART_EFR, UART_EFR_ECB);
serial_out(p, UART_LCR, 0);
@@ -618,8 +623,8 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
if (p->capabilities & UART_CAP_EFR) {
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(p, UART_EFR, 0);
- serial_out(p, UART_LCR, 0);
+ serial_out(p, UART_EFR, efr);
+ serial_out(p, UART_LCR, lcr);
}
}
out:
@@ -1350,7 +1355,7 @@ static void serial8250_start_tx(struct uart_port *port)
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_rpm_get_tx(up);
- if (up->dma && !serial8250_tx_dma(up)) {
+ if (up->dma && !up->dma->tx_dma(up)) {
return;
} else if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
@@ -1397,6 +1402,19 @@ static void serial8250_stop_rx(struct uart_port *port)
serial8250_rpm_put(up);
}
+static void serial8250_disable_ms(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ /* no MSR capabilities */
+ if (up->bugs & UART_BUG_NOMSR)
+ return;
+
+ up->ier &= ~UART_IER_MSI;
+ serial_port_out(port, UART_IER, up->ier);
+}
+
static void serial8250_enable_ms(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -1483,7 +1501,7 @@ serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
ignore_char:
lsr = serial_in(up, UART_LSR);
- } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
+ } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (--max_count > 0));
spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
@@ -1532,7 +1550,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
DEBUG_INTR("THRE...");
/*
- * With RPM enabled, we have to wait once the FIFO is empty before the
+ * With RPM enabled, we have to wait until the FIFO is empty before the
* HW can go idle. So we get here once again with empty FIFO and disable
* the interrupt and RPM in __stop_tx()
*/
@@ -1588,13 +1606,14 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
if (status & (UART_LSR_DR | UART_LSR_BI)) {
if (up->dma)
- dma_err = serial8250_rx_dma(up, iir);
+ dma_err = up->dma->rx_dma(up, iir);
if (!up->dma || dma_err)
status = serial8250_rx_chars(up, status);
}
serial8250_modem_status(up);
- if (!up->dma && (status & UART_LSR_THRE))
+ if ((!up->dma || (up->dma && up->dma->tx_err)) &&
+ (status & UART_LSR_THRE))
serial8250_tx_chars(up);
spin_unlock_irqrestore(&port->lock, flags);
@@ -2603,13 +2622,21 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
}
static void
-serial8250_set_ldisc(struct uart_port *port, int new)
+serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
- if (new == N_PPS) {
+ if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
+ spin_lock_irq(&port->lock);
serial8250_enable_ms(port);
- } else
+ spin_unlock_irq(&port->lock);
+ } else {
port->flags &= ~UPF_HARDPPS_CD;
+ if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+ spin_lock_irq(&port->lock);
+ serial8250_disable_ms(port);
+ spin_unlock_irq(&port->lock);
+ }
+ }
}
@@ -2634,8 +2661,11 @@ serial8250_pm(struct uart_port *port, unsigned int state,
static unsigned int serial8250_port_size(struct uart_8250_port *pt)
{
- if (pt->port.iotype == UPIO_AU)
+ if (pt->port.iotype == UPIO_AU) {
+ if (pt->port.type == PORT_RT2880)
+ return 0x100;
return 0x1000;
+ }
if (is_omap1_8250(pt))
return 0x16 << pt->port.regshift;
@@ -2975,42 +3005,6 @@ serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
return 0;
}
-static int serial8250_ioctl(struct uart_port *port, unsigned int cmd,
- unsigned long arg)
-{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
- int ret;
- struct serial_rs485 rs485_config;
-
- if (!up->rs485_config)
- return -ENOIOCTLCMD;
-
- switch (cmd) {
- case TIOCSRS485:
- if (copy_from_user(&rs485_config, (void __user *)arg,
- sizeof(rs485_config)))
- return -EFAULT;
-
- ret = up->rs485_config(up, &rs485_config);
- if (ret)
- return ret;
-
- memcpy(&up->rs485, &rs485_config, sizeof(rs485_config));
-
- return 0;
- case TIOCGRS485:
- if (copy_to_user((void __user *)arg, &up->rs485,
- sizeof(up->rs485)))
- return -EFAULT;
- return 0;
- default:
- break;
- }
-
- return -ENOIOCTLCMD;
-}
-
static const char *
serial8250_type(struct uart_port *port)
{
@@ -3042,7 +3036,6 @@ static struct uart_ops serial8250_pops = {
.request_port = serial8250_request_port,
.config_port = serial8250_config_port,
.verify_port = serial8250_verify_port,
- .ioctl = serial8250_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = serial8250_get_poll_char,
.poll_put_char = serial8250_put_poll_char,
@@ -3198,7 +3191,9 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
serial8250_rpm_get(up);
- if (port->sysrq || oops_in_progress)
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
@@ -3237,7 +3232,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
serial8250_rpm_put(up);
}
-static int __init serial8250_console_setup(struct console *co, char *options)
+static int serial8250_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 9600;
@@ -3488,7 +3483,6 @@ static struct platform_driver serial8250_isa_driver = {
.resume = serial8250_resume,
.driver = {
.name = "serial8250",
- .owner = THIS_MODULE,
},
};
@@ -3585,10 +3579,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->port.fifosize = up->port.fifosize;
uart->tx_loadsz = up->tx_loadsz;
uart->capabilities = up->capabilities;
- uart->rs485_config = up->rs485_config;
- uart->rs485 = up->rs485;
uart->port.throttle = up->port.throttle;
uart->port.unthrottle = up->port.unthrottle;
+ uart->port.rs485_config = up->port.rs485_config;
+ uart->port.rs485 = up->port.rs485;
/* Take tx_loadsz from fifosize if it wasn't set separately */
if (uart->port.fifosize && !uart->tx_loadsz)
@@ -3623,8 +3617,13 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->dl_read = up->dl_read;
if (up->dl_write)
uart->dl_write = up->dl_write;
- if (up->dma)
+ if (up->dma) {
uart->dma = up->dma;
+ if (!uart->dma->tx_dma)
+ uart->dma->tx_dma = serial8250_tx_dma;
+ if (!uart->dma->rx_dma)
+ uart->dma->rx_dma = serial8250_rx_dma;
+ }
if (serial8250_isa_config != NULL)
serial8250_isa_config(0, &uart->port,
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 148ffe4c232f..fcd7ac6af2fc 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -21,6 +21,7 @@ static void __dma_tx_complete(void *param)
struct uart_8250_dma *dma = p->dma;
struct circ_buf *xmit = &p->port.state->xmit;
unsigned long flags;
+ int ret;
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
@@ -36,8 +37,11 @@ static void __dma_tx_complete(void *param)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&p->port);
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port))
- serial8250_tx_dma(p);
+ ret = serial8250_tx_dma(p);
+ if (ret) {
+ p->ier |= UART_IER_THRI;
+ serial_port_out(&p->port, UART_IER, p->ier);
+ }
spin_unlock_irqrestore(&p->port.lock, flags);
}
@@ -53,6 +57,7 @@ static void __dma_rx_complete(void *param)
dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
+ dma->rx_running = 0;
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
dmaengine_terminate_all(dma->rxchan);
@@ -69,6 +74,7 @@ int serial8250_tx_dma(struct uart_8250_port *p)
struct uart_8250_dma *dma = p->dma;
struct circ_buf *xmit = &p->port.state->xmit;
struct dma_async_tx_descriptor *desc;
+ int ret;
if (uart_tx_stopped(&p->port) || dma->tx_running ||
uart_circ_empty(xmit))
@@ -80,11 +86,12 @@ int serial8250_tx_dma(struct uart_8250_port *p)
dma->tx_addr + xmit->tail,
dma->tx_size, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc)
- return -EBUSY;
+ if (!desc) {
+ ret = -EBUSY;
+ goto err;
+ }
dma->tx_running = 1;
-
desc->callback = __dma_tx_complete;
desc->callback_param = p;
@@ -94,19 +101,23 @@ int serial8250_tx_dma(struct uart_8250_port *p)
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_async_issue_pending(dma->txchan);
-
+ if (dma->tx_err) {
+ dma->tx_err = 0;
+ if (p->ier & UART_IER_THRI) {
+ p->ier &= ~UART_IER_THRI;
+ serial_out(p, UART_IER, p->ier);
+ }
+ }
return 0;
+err:
+ dma->tx_err = 1;
+ return ret;
}
-EXPORT_SYMBOL_GPL(serial8250_tx_dma);
int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
{
struct uart_8250_dma *dma = p->dma;
struct dma_async_tx_descriptor *desc;
- struct dma_tx_state state;
- int dma_status;
-
- dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
switch (iir & 0x3f) {
case UART_IIR_RLSI:
@@ -117,7 +128,7 @@ int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
* If RCVR FIFO trigger level was not reached, complete the
* transfer and let 8250_core copy the remaining data.
*/
- if (dma_status == DMA_IN_PROGRESS) {
+ if (dma->rx_running) {
dmaengine_pause(dma->rxchan);
__dma_rx_complete(p);
}
@@ -126,7 +137,7 @@ int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
break;
}
- if (dma_status)
+ if (dma->rx_running)
return 0;
desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
@@ -135,6 +146,7 @@ int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
if (!desc)
return -EBUSY;
+ dma->rx_running = 1;
desc->callback = __dma_rx_complete;
desc->callback_param = p;
@@ -147,7 +159,6 @@ int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
return 0;
}
-EXPORT_SYMBOL_GPL(serial8250_rx_dma);
int serial8250_request_dma(struct uart_8250_port *p)
{
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index ed45a14872cb..555de07db593 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -122,13 +122,44 @@ static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
return dw8250_modify_msr(p, offset, value);
}
-/* Read Back (rb) version to ensure register access ording. */
-static void dw8250_serial_out_rb(struct uart_port *p, int offset, int value)
+#ifdef CONFIG_64BIT
+static unsigned int dw8250_serial_inq(struct uart_port *p, int offset)
{
- dw8250_serial_out(p, offset, value);
- dw8250_serial_in(p, UART_LCR);
+ unsigned int value;
+
+ value = (u8)__raw_readq(p->membase + (offset << p->regshift));
+
+ return dw8250_modify_msr(p, offset, value);
}
+static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
+{
+ struct dw8250_data *d = p->private_data;
+
+ if (offset == UART_MCR)
+ d->last_mcr = value;
+
+ value &= 0xff;
+ __raw_writeq(value, p->membase + (offset << p->regshift));
+ /* Read back to ensure register write ordering. */
+ __raw_readq(p->membase + (UART_LCR << p->regshift));
+
+ /* Make sure LCR write wasn't ignored */
+ if (offset == UART_LCR) {
+ int tries = 1000;
+ while (tries--) {
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
+ return;
+ dw8250_force_idle(p);
+ __raw_writeq(value & 0xff,
+ p->membase + (UART_LCR << p->regshift));
+ }
+ dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ }
+}
+#endif /* CONFIG_64BIT */
+
static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
@@ -258,22 +289,19 @@ static int dw8250_probe_of(struct uart_port *p,
struct uart_8250_port *up = up_to_u8250p(p);
u32 val;
bool has_ucv = true;
+ int id;
+#ifdef CONFIG_64BIT
if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) {
-#ifdef __BIG_ENDIAN
- /*
- * Low order bits of these 64-bit registers, when
- * accessed as a byte, are 7 bytes further down in the
- * address space in big endian mode.
- */
- p->membase += 7;
-#endif
- p->serial_out = dw8250_serial_out_rb;
+ p->serial_in = dw8250_serial_inq;
+ p->serial_out = dw8250_serial_outq;
p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
p->type = PORT_OCTEON;
data->usr_reg = 0x27;
has_ucv = false;
- } else if (!of_property_read_u32(np, "reg-io-width", &val)) {
+ } else
+#endif
+ if (!of_property_read_u32(np, "reg-io-width", &val)) {
switch (val) {
case 1:
break;
@@ -290,9 +318,22 @@ static int dw8250_probe_of(struct uart_port *p,
if (has_ucv)
dw8250_setup_port(up);
+ /* if we have a valid fifosize, try hooking up DMA here */
+ if (p->fifosize) {
+ up->dma = &data->dma;
+
+ up->dma->rxconf.src_maxburst = p->fifosize / 4;
+ up->dma->txconf.dst_maxburst = p->fifosize / 4;
+ }
+
if (!of_property_read_u32(np, "reg-shift", &val))
p->regshift = val;
+ /* get index of serial line, if found in DT aliases */
+ id = of_alias_get_id(np, "serial");
+ if (id >= 0)
+ p->line = id;
+
/* clock got configured through clk api, all done */
if (p->uartclk)
return 0;
@@ -543,7 +584,6 @@ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
static struct platform_driver dw8250_platform_driver = {
.driver = {
.name = "dw-apb-uart",
- .owner = THIS_MODULE,
.pm = &dw8250_pm_ops,
.of_match_table = dw8250_of_match,
.acpi_match_table = ACPI_PTR(dw8250_acpi_match),
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index 56c87232b6a0..ae5eaed6aa85 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -102,10 +102,8 @@ static int serial8250_em_probe(struct platform_device *pdev)
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "unable to allocate private data\n");
+ if (!priv)
return -ENOMEM;
- }
priv->sclk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(priv->sclk)) {
@@ -161,7 +159,6 @@ static struct platform_driver serial8250_em_platform_driver = {
.driver = {
.name = "serial8250-em",
.of_match_table = serial8250_em_dt_ids,
- .owner = THIS_MODULE,
},
.probe = serial8250_em_probe,
.remove = serial8250_em_remove,
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 1bb28cb69493..1e6899bc9429 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -89,11 +89,11 @@ static int fintek_8250_check_id(void)
return 0;
}
-static int fintek_8250_rs4850_config(struct uart_8250_port *uart,
+static int fintek_8250_rs485_config(struct uart_port *port,
struct serial_rs485 *rs485)
{
uint8_t config = 0;
- int index = fintek_8250_get_index(uart->port.iobase);
+ int index = fintek_8250_get_index(port->iobase);
if (index < 0)
return -EINVAL;
@@ -134,6 +134,8 @@ static int fintek_8250_rs4850_config(struct uart_8250_port *uart,
outb(config, DATA_PORT);
fintek_8250_exit_key();
+ port->rs485 = *rs485;
+
return 0;
}
@@ -166,7 +168,7 @@ fintek_8250_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
uart.port.irq = pnp_irq(dev, 0);
uart.port.iobase = pnp_port_start(dev, 0);
uart.port.iotype = UPIO_PORT;
- uart.rs485_config = fintek_8250_rs4850_config;
+ uart.port.rs485_config = fintek_8250_rs485_config;
uart.port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
if (pnp_irq_flags(dev, 0) & IORESOURCE_IRQ_SHAREABLE)
diff --git a/drivers/tty/serial/8250/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c
index afffe4d1f034..b4882082b247 100644
--- a/drivers/tty/serial/8250/8250_hp300.c
+++ b/drivers/tty/serial/8250/8250_hp300.c
@@ -88,10 +88,6 @@ extern int hp300_uart_scode;
/*
* Parse the bootinfo to find descriptions for headless console and
* debug serial ports and register them with the 8250 driver.
- * This function should be called before serial_console_init() is called
- * to make sure the serial console will be available for use. IA-64 kernel
- * calls this function from setup_arch() after the EFI and ACPI tables have
- * been parsed.
*/
int __init hp300_setup_serial_console(void)
{
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index b7f1dc64fed5..7a11fac775c4 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -74,14 +74,14 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
/* Set to next lower baudrate supported */
if ((baud == 500000) || (baud == 576000))
baud = 460800;
- quot = DIV_ROUND_CLOSEST(port->uartclk, 4 * baud);
+ quot = DIV_ROUND_UP(port->uartclk, 4 * baud);
} else {
serial_port_out(port, UART_MTK_HIGHS, 0x3);
/* Set to highest baudrate supported */
if (baud >= 1152000)
baud = 921600;
- quot = (port->uartclk / (256 * baud)) + 1;
+ quot = DIV_ROUND_UP(port->uartclk, 256 * baud);
}
/*
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
new file mode 100644
index 000000000000..336602eb453e
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -0,0 +1,1281 @@
+/*
+ * 8250-core based driver for the OMAP internal UART
+ *
+ * based on omap-serial.c, Copyright (C) 2010 Texas Instruments.
+ *
+ * Copyright (C) 2014 Sebastian Andrzej Siewior
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/tty_flip.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/console.h>
+#include <linux/pm_qos.h>
+#include <linux/dma-mapping.h>
+
+#include "8250.h"
+
+#define DEFAULT_CLK_SPEED 48000000
+
+#define UART_ERRATA_i202_MDR1_ACCESS (1 << 0)
+#define OMAP_UART_WER_HAS_TX_WAKEUP (1 << 1)
+#define OMAP_DMA_TX_KICK (1 << 2)
+
+#define OMAP_UART_FCR_RX_TRIG 6
+#define OMAP_UART_FCR_TX_TRIG 4
+
+/* SCR register bitmasks */
+#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
+#define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK (1 << 6)
+#define OMAP_UART_SCR_TX_EMPTY (1 << 3)
+#define OMAP_UART_SCR_DMAMODE_MASK (3 << 1)
+#define OMAP_UART_SCR_DMAMODE_1 (1 << 1)
+#define OMAP_UART_SCR_DMAMODE_CTL (1 << 0)
+
+/* MVR register bitmasks */
+#define OMAP_UART_MVR_SCHEME_SHIFT 30
+#define OMAP_UART_LEGACY_MVR_MAJ_MASK 0xf0
+#define OMAP_UART_LEGACY_MVR_MAJ_SHIFT 4
+#define OMAP_UART_LEGACY_MVR_MIN_MASK 0x0f
+#define OMAP_UART_MVR_MAJ_MASK 0x700
+#define OMAP_UART_MVR_MAJ_SHIFT 8
+#define OMAP_UART_MVR_MIN_MASK 0x3f
+
+#define UART_TI752_TLR_TX 0
+#define UART_TI752_TLR_RX 4
+
+#define TRIGGER_TLR_MASK(x) ((x & 0x3c) >> 2)
+#define TRIGGER_FCR_MASK(x) (x & 3)
+
+/* Enable XON/XOFF flow control on output */
+#define OMAP_UART_SW_TX 0x08
+/* Enable XON/XOFF flow control on input */
+#define OMAP_UART_SW_RX 0x02
+
+#define OMAP_UART_WER_MOD_WKUP 0x7f
+#define OMAP_UART_TX_WAKEUP_EN (1 << 7)
+
+#define TX_TRIGGER 1
+#define RX_TRIGGER 48
+
+#define OMAP_UART_TCR_RESTORE(x) ((x / 4) << 4)
+#define OMAP_UART_TCR_HALT(x) ((x / 4) << 0)
+
+#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y))
+
+#define OMAP_UART_REV_46 0x0406
+#define OMAP_UART_REV_52 0x0502
+#define OMAP_UART_REV_63 0x0603
+
+struct omap8250_priv {
+ int line;
+ u8 habit;
+ u8 mdr1;
+ u8 efr;
+ u8 scr;
+ u8 wer;
+ u8 xon;
+ u8 xoff;
+ u8 delayed_restore;
+ u16 quot;
+
+ bool is_suspending;
+ int wakeirq;
+ int wakeups_enabled;
+ u32 latency;
+ u32 calc_latency;
+ struct pm_qos_request pm_qos_request;
+ struct work_struct qos_work;
+ struct uart_8250_dma omap8250_dma;
+};
+
+static u32 uart_read(struct uart_8250_port *up, u32 reg)
+{
+ return readl(up->port.membase + (reg << up->port.regshift));
+}
+
+/*
+ * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
+ * The access to uart register after MDR1 Access
+ * causes UART to corrupt data.
+ *
+ * Need a delay =
+ * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
+ * give 10 times as much
+ */
+static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
+ struct omap8250_priv *priv)
+{
+ u8 timeout = 255;
+ u8 old_mdr1;
+
+ old_mdr1 = serial_in(up, UART_OMAP_MDR1);
+ if (old_mdr1 == priv->mdr1)
+ return;
+
+ serial_out(up, UART_OMAP_MDR1, priv->mdr1);
+ udelay(2);
+ serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
+ UART_FCR_CLEAR_RCVR);
+ /*
+ * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
+ * TX_FIFO_E bit is 1.
+ */
+ while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
+ (UART_LSR_THRE | UART_LSR_DR))) {
+ timeout--;
+ if (!timeout) {
+ /* Should *never* happen. we warn and carry on */
+ dev_crit(up->port.dev, "Errata i202: timedout %x\n",
+ serial_in(up, UART_LSR));
+ break;
+ }
+ udelay(1);
+ }
+}
+
+static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
+ struct omap8250_priv *priv)
+{
+ unsigned int uartclk = port->uartclk;
+ unsigned int div_13, div_16;
+ unsigned int abs_d13, abs_d16;
+
+ /*
+ * Old custom speed handling.
+ */
+ if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) {
+ priv->quot = port->custom_divisor & 0xffff;
+ /*
+ * I assume that nobody is using this. But hey, if somebody
+ * would like to specify the divisor _and_ the mode then the
+ * driver is ready and waiting for it.
+ */
+ if (port->custom_divisor & (1 << 16))
+ priv->mdr1 = UART_OMAP_MDR1_13X_MODE;
+ else
+ priv->mdr1 = UART_OMAP_MDR1_16X_MODE;
+ return;
+ }
+ div_13 = DIV_ROUND_CLOSEST(uartclk, 13 * baud);
+ div_16 = DIV_ROUND_CLOSEST(uartclk, 16 * baud);
+
+ if (!div_13)
+ div_13 = 1;
+ if (!div_16)
+ div_16 = 1;
+
+ abs_d13 = abs(baud - uartclk / 13 / div_13);
+ abs_d16 = abs(baud - uartclk / 16 / div_16);
+
+ if (abs_d13 >= abs_d16) {
+ priv->mdr1 = UART_OMAP_MDR1_16X_MODE;
+ priv->quot = div_16;
+ } else {
+ priv->mdr1 = UART_OMAP_MDR1_13X_MODE;
+ priv->quot = div_13;
+ }
+}
+
+static void omap8250_update_scr(struct uart_8250_port *up,
+ struct omap8250_priv *priv)
+{
+ u8 old_scr;
+
+ old_scr = serial_in(up, UART_OMAP_SCR);
+ if (old_scr == priv->scr)
+ return;
+
+ /*
+ * The manual recommends not to enable the DMA mode selector in the SCR
+ * (instead of the FCR) register _and_ selecting the DMA mode as one
+ * register write because this may lead to malfunction.
+ */
+ if (priv->scr & OMAP_UART_SCR_DMAMODE_MASK)
+ serial_out(up, UART_OMAP_SCR,
+ priv->scr & ~OMAP_UART_SCR_DMAMODE_MASK);
+ serial_out(up, UART_OMAP_SCR, priv->scr);
+}
+
+static void omap8250_restore_regs(struct uart_8250_port *up)
+{
+ struct omap8250_priv *priv = up->port.private_data;
+ struct uart_8250_dma *dma = up->dma;
+
+ if (dma && dma->tx_running) {
+ /*
+ * TCSANOW requests the change to occur immediately however if
+ * we have a TX-DMA operation in progress then it has been
+ * observed that it might stall and never complete. Therefore we
+ * delay DMA completes to prevent this hang from happen.
+ */
+ priv->delayed_restore = 1;
+ return;
+ }
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_MCR, UART_MCR_TCRTLR);
+ serial_out(up, UART_FCR, up->fcr);
+
+ omap8250_update_scr(up, priv);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_RESTORE(16) |
+ OMAP_UART_TCR_HALT(52));
+ serial_out(up, UART_TI752_TLR,
+ TRIGGER_TLR_MASK(TX_TRIGGER) << UART_TI752_TLR_TX |
+ TRIGGER_TLR_MASK(RX_TRIGGER) << UART_TI752_TLR_RX);
+
+ serial_out(up, UART_LCR, 0);
+
+ /* drop TCR + TLR access, we setup XON/XOFF later */
+ serial_out(up, UART_MCR, up->mcr);
+ serial_out(up, UART_IER, up->ier);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_dl_write(up, priv->quot);
+
+ serial_out(up, UART_EFR, priv->efr);
+
+ /* Configure flow control */
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_XON1, priv->xon);
+ serial_out(up, UART_XOFF1, priv->xoff);
+
+ serial_out(up, UART_LCR, up->lcr);
+ /* need mode A for FCR */
+ if (priv->habit & UART_ERRATA_i202_MDR1_ACCESS)
+ omap_8250_mdr1_errataset(up, priv);
+ else
+ serial_out(up, UART_OMAP_MDR1, priv->mdr1);
+ up->port.ops->set_mctrl(&up->port, up->port.mctrl);
+}
+
+/*
+ * OMAP can use "CLK / (16 or 13) / div" for baud rate. And then we have have
+ * some differences in how we want to handle flow control.
+ */
+static void omap_8250_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ struct omap8250_priv *priv = up->port.private_data;
+ unsigned char cval = 0;
+ unsigned int baud;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+ if (termios->c_cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk / 16 / 0xffff,
+ port->uartclk / 13);
+ omap_8250_get_divisor(port, baud, priv);
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ pm_runtime_get_sync(port->dev);
+ spin_lock_irq(&port->lock);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (IGNBRK | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * Modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+
+ up->lcr = cval;
+ /* Up to here it was mostly serial8250_do_set_termios() */
+
+ /*
+ * We enable TRIG_GRANU for RX and TX and additionaly we set
+ * SCR_TX_EMPTY bit. The result is the following:
+ * - RX_TRIGGER amount of bytes in the FIFO will cause an interrupt.
+ * - less than RX_TRIGGER number of bytes will also cause an interrupt
+ * once the UART decides that there no new bytes arriving.
+ * - Once THRE is enabled, the interrupt will be fired once the FIFO is
+ * empty - the trigger level is ignored here.
+ *
+ * Once DMA is enabled:
+ * - UART will assert the TX DMA line once there is room for TX_TRIGGER
+ * bytes in the TX FIFO. On each assert the DMA engine will move
+ * TX_TRIGGER bytes into the FIFO.
+ * - UART will assert the RX DMA line once there are RX_TRIGGER bytes in
+ * the FIFO and move RX_TRIGGER bytes.
+ * This is because threshold and trigger values are the same.
+ */
+ up->fcr = UART_FCR_ENABLE_FIFO;
+ up->fcr |= TRIGGER_FCR_MASK(TX_TRIGGER) << OMAP_UART_FCR_TX_TRIG;
+ up->fcr |= TRIGGER_FCR_MASK(RX_TRIGGER) << OMAP_UART_FCR_RX_TRIG;
+
+ priv->scr = OMAP_UART_SCR_RX_TRIG_GRANU1_MASK | OMAP_UART_SCR_TX_EMPTY |
+ OMAP_UART_SCR_TX_TRIG_GRANU1_MASK;
+
+ if (up->dma)
+ priv->scr |= OMAP_UART_SCR_DMAMODE_1 |
+ OMAP_UART_SCR_DMAMODE_CTL;
+
+ priv->xon = termios->c_cc[VSTART];
+ priv->xoff = termios->c_cc[VSTOP];
+
+ priv->efr = 0;
+ up->mcr &= ~(UART_MCR_RTS | UART_MCR_XONANY);
+ if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
+ /* Enable AUTORTS and AUTOCTS */
+ priv->efr |= UART_EFR_CTS | UART_EFR_RTS;
+
+ /* Ensure MCR RTS is asserted */
+ up->mcr |= UART_MCR_RTS;
+ } else if (up->port.flags & UPF_SOFT_FLOW) {
+ /*
+ * IXON Flag:
+ * Enable XON/XOFF flow control on input.
+ * Receiver compares XON1, XOFF1.
+ */
+ if (termios->c_iflag & IXON)
+ priv->efr |= OMAP_UART_SW_RX;
+
+ /*
+ * IXOFF Flag:
+ * Enable XON/XOFF flow control on output.
+ * Transmit XON1, XOFF1
+ */
+ if (termios->c_iflag & IXOFF)
+ priv->efr |= OMAP_UART_SW_TX;
+
+ /*
+ * IXANY Flag:
+ * Enable any character to restart output.
+ * Operation resumes after receiving any
+ * character after recognition of the XOFF character
+ */
+ if (termios->c_iflag & IXANY)
+ up->mcr |= UART_MCR_XONANY;
+ }
+ omap8250_restore_regs(up);
+
+ spin_unlock_irq(&up->port.lock);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+
+ /* calculate wakeup latency constraint */
+ priv->calc_latency = USEC_PER_SEC * 64 * 8 / baud;
+ priv->latency = priv->calc_latency;
+
+ schedule_work(&priv->qos_work);
+
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+}
+
+/* same as 8250 except that we may have extra flow bits set in EFR */
+static void omap_8250_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ struct omap8250_priv *priv = up->port.private_data;
+
+ pm_runtime_get_sync(port->dev);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, priv->efr | UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0);
+
+ serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, priv->efr);
+ serial_out(up, UART_LCR, 0);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+}
+
+static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
+ struct omap8250_priv *priv)
+{
+ u32 mvr, scheme;
+ u16 revision, major, minor;
+
+ mvr = uart_read(up, UART_OMAP_MVER);
+
+ /* Check revision register scheme */
+ scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT;
+
+ switch (scheme) {
+ case 0: /* Legacy Scheme: OMAP2/3 */
+ /* MINOR_REV[0:4], MAJOR_REV[4:7] */
+ major = (mvr & OMAP_UART_LEGACY_MVR_MAJ_MASK) >>
+ OMAP_UART_LEGACY_MVR_MAJ_SHIFT;
+ minor = (mvr & OMAP_UART_LEGACY_MVR_MIN_MASK);
+ break;
+ case 1:
+ /* New Scheme: OMAP4+ */
+ /* MINOR_REV[0:5], MAJOR_REV[8:10] */
+ major = (mvr & OMAP_UART_MVR_MAJ_MASK) >>
+ OMAP_UART_MVR_MAJ_SHIFT;
+ minor = (mvr & OMAP_UART_MVR_MIN_MASK);
+ break;
+ default:
+ dev_warn(up->port.dev,
+ "Unknown revision, defaulting to highest\n");
+ /* highest possible revision */
+ major = 0xff;
+ minor = 0xff;
+ }
+ /* normalize revision for the driver */
+ revision = UART_BUILD_REVISION(major, minor);
+
+ switch (revision) {
+ case OMAP_UART_REV_46:
+ priv->habit = UART_ERRATA_i202_MDR1_ACCESS;
+ break;
+ case OMAP_UART_REV_52:
+ priv->habit = UART_ERRATA_i202_MDR1_ACCESS |
+ OMAP_UART_WER_HAS_TX_WAKEUP;
+ break;
+ case OMAP_UART_REV_63:
+ priv->habit = UART_ERRATA_i202_MDR1_ACCESS |
+ OMAP_UART_WER_HAS_TX_WAKEUP;
+ break;
+ default:
+ break;
+ }
+}
+
+static void omap8250_uart_qos_work(struct work_struct *work)
+{
+ struct omap8250_priv *priv;
+
+ priv = container_of(work, struct omap8250_priv, qos_work);
+ pm_qos_update_request(&priv->pm_qos_request, priv->latency);
+}
+
+static irqreturn_t omap_wake_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ int ret;
+
+ ret = port->handle_irq(port);
+ if (ret)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
+static int omap_8250_startup(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ struct omap8250_priv *priv = port->private_data;
+
+ int ret;
+
+ if (priv->wakeirq) {
+ ret = request_irq(priv->wakeirq, omap_wake_irq,
+ port->irqflags, "uart wakeup irq", port);
+ if (ret)
+ return ret;
+ disable_irq(priv->wakeirq);
+ }
+
+ pm_runtime_get_sync(port->dev);
+
+ ret = serial8250_do_startup(port);
+ if (ret)
+ goto err;
+
+#ifdef CONFIG_PM_RUNTIME
+ up->capabilities |= UART_CAP_RPM;
+#endif
+
+ /* Enable module level wake up */
+ priv->wer = OMAP_UART_WER_MOD_WKUP;
+ if (priv->habit & OMAP_UART_WER_HAS_TX_WAKEUP)
+ priv->wer |= OMAP_UART_TX_WAKEUP_EN;
+ serial_out(up, UART_OMAP_WER, priv->wer);
+
+ if (up->dma)
+ up->dma->rx_dma(up, 0);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+ return 0;
+err:
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+ if (priv->wakeirq)
+ free_irq(priv->wakeirq, port);
+ return ret;
+}
+
+static void omap_8250_shutdown(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ struct omap8250_priv *priv = port->private_data;
+
+ flush_work(&priv->qos_work);
+ if (up->dma)
+ up->dma->rx_dma(up, UART_IIR_RX_TIMEOUT);
+
+ pm_runtime_get_sync(port->dev);
+
+ serial_out(up, UART_OMAP_WER, 0);
+ serial8250_do_shutdown(port);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+
+ if (priv->wakeirq)
+ free_irq(priv->wakeirq, port);
+}
+
+static void omap_8250_throttle(struct uart_port *port)
+{
+ unsigned long flags;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ pm_runtime_get_sync(port->dev);
+
+ spin_lock_irqsave(&port->lock, flags);
+ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+ serial_out(up, UART_IER, up->ier);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+}
+
+static void omap_8250_unthrottle(struct uart_port *port)
+{
+ unsigned long flags;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ pm_runtime_get_sync(port->dev);
+
+ spin_lock_irqsave(&port->lock, flags);
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+}
+
+#ifdef CONFIG_SERIAL_8250_DMA
+static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir);
+
+static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
+{
+ struct uart_8250_dma *dma = p->dma;
+ struct tty_port *tty_port = &p->port.state->port;
+ struct dma_tx_state state;
+ int count;
+
+ dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ dma->rx_running = 0;
+ dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ dmaengine_terminate_all(dma->rxchan);
+
+ count = dma->rx_size - state.residue;
+
+ tty_insert_flip_string(tty_port, dma->rx_buf, count);
+ p->port.icount.rx += count;
+ if (!error)
+ omap_8250_rx_dma(p, 0);
+
+ tty_flip_buffer_push(tty_port);
+}
+
+static void __dma_rx_complete(void *param)
+{
+ __dma_rx_do_complete(param, false);
+}
+
+static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+{
+ struct uart_8250_dma *dma = p->dma;
+ struct dma_async_tx_descriptor *desc;
+
+ switch (iir & 0x3f) {
+ case UART_IIR_RLSI:
+ /* 8250_core handles errors and break interrupts */
+ if (dma->rx_running) {
+ dmaengine_pause(dma->rxchan);
+ __dma_rx_do_complete(p, true);
+ }
+ return -EIO;
+ case UART_IIR_RX_TIMEOUT:
+ /*
+ * If RCVR FIFO trigger level was not reached, complete the
+ * transfer and let 8250_core copy the remaining data.
+ */
+ if (dma->rx_running) {
+ dmaengine_pause(dma->rxchan);
+ __dma_rx_do_complete(p, true);
+ }
+ return -ETIMEDOUT;
+ case UART_IIR_RDI:
+ /*
+ * The OMAP UART is a special BEAST. If we receive RDI we _have_
+ * a DMA transfer programmed but it didn't work. One reason is
+ * that we were too slow and there were too many bytes in the
+ * FIFO, the UART counted wrong and never kicked the DMA engine
+ * to do anything. That means once we receive RDI on OMAP then
+ * the DMA won't do anything soon so we have to cancel the DMA
+ * transfer and purge the FIFO manually.
+ */
+ if (dma->rx_running) {
+ dmaengine_pause(dma->rxchan);
+ __dma_rx_do_complete(p, true);
+ }
+ return -ETIMEDOUT;
+
+ default:
+ break;
+ }
+
+ if (dma->rx_running)
+ return 0;
+
+ desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
+ dma->rx_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EBUSY;
+
+ dma->rx_running = 1;
+ desc->callback = __dma_rx_complete;
+ desc->callback_param = p;
+
+ dma->rx_cookie = dmaengine_submit(desc);
+
+ dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ dma_async_issue_pending(dma->rxchan);
+ return 0;
+}
+
+static int omap_8250_tx_dma(struct uart_8250_port *p);
+
+static void omap_8250_dma_tx_complete(void *param)
+{
+ struct uart_8250_port *p = param;
+ struct uart_8250_dma *dma = p->dma;
+ struct circ_buf *xmit = &p->port.state->xmit;
+ unsigned long flags;
+ bool en_thri = false;
+ struct omap8250_priv *priv = p->port.private_data;
+
+ dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&p->port.lock, flags);
+
+ dma->tx_running = 0;
+
+ xmit->tail += dma->tx_size;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+ p->port.icount.tx += dma->tx_size;
+
+ if (priv->delayed_restore) {
+ priv->delayed_restore = 0;
+ omap8250_restore_regs(p);
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&p->port);
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) {
+ int ret;
+
+ ret = omap_8250_tx_dma(p);
+ if (ret)
+ en_thri = true;
+
+ } else if (p->capabilities & UART_CAP_RPM) {
+ en_thri = true;
+ }
+
+ if (en_thri) {
+ dma->tx_err = 1;
+ p->ier |= UART_IER_THRI;
+ serial_port_out(&p->port, UART_IER, p->ier);
+ }
+
+ spin_unlock_irqrestore(&p->port.lock, flags);
+}
+
+static int omap_8250_tx_dma(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+ struct omap8250_priv *priv = p->port.private_data;
+ struct circ_buf *xmit = &p->port.state->xmit;
+ struct dma_async_tx_descriptor *desc;
+ unsigned int skip_byte = 0;
+ int ret;
+
+ if (dma->tx_running)
+ return 0;
+ if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
+
+ /*
+ * Even if no data, we need to return an error for the two cases
+ * below so serial8250_tx_chars() is invoked and properly clears
+ * THRI and/or runtime suspend.
+ */
+ if (dma->tx_err || p->capabilities & UART_CAP_RPM) {
+ ret = -EBUSY;
+ goto err;
+ }
+ if (p->ier & UART_IER_THRI) {
+ p->ier &= ~UART_IER_THRI;
+ serial_out(p, UART_IER, p->ier);
+ }
+ return 0;
+ }
+
+ dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (priv->habit & OMAP_DMA_TX_KICK) {
+ u8 tx_lvl;
+
+ /*
+ * We need to put the first byte into the FIFO in order to start
+ * the DMA transfer. For transfers smaller than four bytes we
+ * don't bother doing DMA at all. It seem not matter if there
+ * are still bytes in the FIFO from the last transfer (in case
+ * we got here directly from omap_8250_dma_tx_complete()). Bytes
+ * leaving the FIFO seem not to trigger the DMA transfer. It is
+ * really the byte that we put into the FIFO.
+ * If the FIFO is already full then we most likely got here from
+ * omap_8250_dma_tx_complete(). And this means the DMA engine
+ * just completed its work. We don't have to wait the complete
+ * 86us at 115200,8n1 but around 60us (not to mention lower
+ * baudrates). So in that case we take the interrupt and try
+ * again with an empty FIFO.
+ */
+ tx_lvl = serial_in(p, UART_OMAP_TX_LVL);
+ if (tx_lvl == p->tx_loadsz) {
+ ret = -EBUSY;
+ goto err;
+ }
+ if (dma->tx_size < 4) {
+ ret = -EINVAL;
+ goto err;
+ }
+ skip_byte = 1;
+ }
+
+ desc = dmaengine_prep_slave_single(dma->txchan,
+ dma->tx_addr + xmit->tail + skip_byte,
+ dma->tx_size - skip_byte, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ dma->tx_running = 1;
+
+ desc->callback = omap_8250_dma_tx_complete;
+ desc->callback_param = p;
+
+ dma->tx_cookie = dmaengine_submit(desc);
+
+ dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ dma_async_issue_pending(dma->txchan);
+ if (dma->tx_err)
+ dma->tx_err = 0;
+
+ if (p->ier & UART_IER_THRI) {
+ p->ier &= ~UART_IER_THRI;
+ serial_out(p, UART_IER, p->ier);
+ }
+ if (skip_byte)
+ serial_out(p, UART_TX, xmit->buf[xmit->tail]);
+ return 0;
+err:
+ dma->tx_err = 1;
+ return ret;
+}
+
+/*
+ * This is mostly serial8250_handle_irq(). We have a slightly different DMA
+ * hoook for RX/TX and need different logic for them in the ISR. Therefore we
+ * use the default routine in the non-DMA case and this one for with DMA.
+ */
+static int omap_8250_dma_handle_irq(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned char status;
+ unsigned long flags;
+ u8 iir;
+ int dma_err = 0;
+
+ serial8250_rpm_get(up);
+
+ iir = serial_port_in(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ serial8250_rpm_put(up);
+ return 0;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ status = serial_port_in(port, UART_LSR);
+
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+
+ dma_err = omap_8250_rx_dma(up, iir);
+ if (dma_err) {
+ status = serial8250_rx_chars(up, status);
+ omap_8250_rx_dma(up, 0);
+ }
+ }
+ serial8250_modem_status(up);
+ if (status & UART_LSR_THRE && up->dma->tx_err) {
+ if (uart_tx_stopped(&up->port) ||
+ uart_circ_empty(&up->port.state->xmit)) {
+ up->dma->tx_err = 0;
+ serial8250_tx_chars(up);
+ } else {
+ /*
+ * try again due to an earlier failer which
+ * might have been resolved by now.
+ */
+ dma_err = omap_8250_tx_dma(up);
+ if (dma_err)
+ serial8250_tx_chars(up);
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ serial8250_rpm_put(up);
+ return 1;
+}
+
+static bool the_no_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ return false;
+}
+
+#else
+
+static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+{
+ return -EINVAL;
+}
+#endif
+
+static int omap8250_probe(struct platform_device *pdev)
+{
+ struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ struct omap8250_priv *priv;
+ struct uart_8250_port up;
+ int ret;
+ void __iomem *membase;
+
+ if (!regs || !irq) {
+ dev_err(&pdev->dev, "missing registers or irq\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ membase = devm_ioremap_nocache(&pdev->dev, regs->start,
+ resource_size(regs));
+ if (!membase)
+ return -ENODEV;
+
+ memset(&up, 0, sizeof(up));
+ up.port.dev = &pdev->dev;
+ up.port.mapbase = regs->start;
+ up.port.membase = membase;
+ up.port.irq = irq->start;
+ /*
+ * It claims to be 16C750 compatible however it is a little different.
+ * It has EFR and has no FCR7_64byte bit. The AFE (which it claims to
+ * have) is enabled via EFR instead of MCR. The type is set here 8250
+ * just to get things going. UNKNOWN does not work for a few reasons and
+ * we don't need our own type since we don't use 8250's set_termios()
+ * or pm callback.
+ */
+ up.port.type = PORT_8250;
+ up.port.iotype = UPIO_MEM;
+ up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW |
+ UPF_HARD_FLOW;
+ up.port.private_data = priv;
+
+ up.port.regshift = 2;
+ up.port.fifosize = 64;
+ up.tx_loadsz = 64;
+ up.capabilities = UART_CAP_FIFO;
+#ifdef CONFIG_PM_RUNTIME
+ /*
+ * PM_RUNTIME is mostly transparent. However to do it right we need to a
+ * TX empty interrupt before we can put the device to auto idle. So if
+ * PM_RUNTIME is not enabled we don't add that flag and can spare that
+ * one extra interrupt in the TX path.
+ */
+ up.capabilities |= UART_CAP_RPM;
+#endif
+ up.port.set_termios = omap_8250_set_termios;
+ up.port.pm = omap_8250_pm;
+ up.port.startup = omap_8250_startup;
+ up.port.shutdown = omap_8250_shutdown;
+ up.port.throttle = omap_8250_throttle;
+ up.port.unthrottle = omap_8250_unthrottle;
+
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &up.port.uartclk);
+ priv->wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ } else {
+ ret = pdev->id;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias/pdev id\n");
+ return ret;
+ }
+ up.port.line = ret;
+
+ if (!up.port.uartclk) {
+ up.port.uartclk = DEFAULT_CLK_SPEED;
+ dev_warn(&pdev->dev,
+ "No clock speed specified: using default: %d\n",
+ DEFAULT_CLK_SPEED);
+ }
+
+ priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ priv->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ pm_qos_add_request(&priv->pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
+ priv->latency);
+ INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);
+
+ device_init_wakeup(&pdev->dev, true);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
+
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ omap_serial_fill_features_erratas(&up, priv);
+#ifdef CONFIG_SERIAL_8250_DMA
+ if (pdev->dev.of_node) {
+ /*
+ * Oh DMA support. If there are no DMA properties in the DT then
+ * we will fall back to a generic DMA channel which does not
+ * really work here. To ensure that we do not get a generic DMA
+ * channel assigned, we have the the_no_dma_filter_fn() here.
+ * To avoid "failed to request DMA" messages we check for DMA
+ * properties in DT.
+ */
+ ret = of_property_count_strings(pdev->dev.of_node, "dma-names");
+ if (ret == 2) {
+ up.dma = &priv->omap8250_dma;
+ up.port.handle_irq = omap_8250_dma_handle_irq;
+ priv->omap8250_dma.fn = the_no_dma_filter_fn;
+ priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
+ priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
+ priv->omap8250_dma.rx_size = RX_TRIGGER;
+ priv->omap8250_dma.rxconf.src_maxburst = RX_TRIGGER;
+ priv->omap8250_dma.txconf.dst_maxburst = TX_TRIGGER;
+
+ if (of_machine_is_compatible("ti,am33xx"))
+ priv->habit |= OMAP_DMA_TX_KICK;
+ }
+ }
+#endif
+ ret = serial8250_register_8250_port(&up);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register 8250 port\n");
+ goto err;
+ }
+ priv->line = ret;
+ platform_set_drvdata(pdev, priv);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ return 0;
+err:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int omap8250_remove(struct platform_device *pdev)
+{
+ struct omap8250_priv *priv = platform_get_drvdata(pdev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ serial8250_unregister_port(priv->line);
+ pm_qos_remove_request(&priv->pm_qos_request);
+ device_init_wakeup(&pdev->dev, false);
+ return 0;
+}
+
+#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
+
+static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
+ bool enable)
+{
+ if (!priv->wakeirq)
+ return;
+
+ if (enable)
+ enable_irq(priv->wakeirq);
+ else
+ disable_irq_nosync(priv->wakeirq);
+}
+
+static void omap8250_enable_wakeup(struct omap8250_priv *priv,
+ bool enable)
+{
+ if (enable == priv->wakeups_enabled)
+ return;
+
+ omap8250_enable_wakeirq(priv, enable);
+ priv->wakeups_enabled = enable;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int omap8250_prepare(struct device *dev)
+{
+ struct omap8250_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+ priv->is_suspending = true;
+ return 0;
+}
+
+static void omap8250_complete(struct device *dev)
+{
+ struct omap8250_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return;
+ priv->is_suspending = false;
+}
+
+static int omap8250_suspend(struct device *dev)
+{
+ struct omap8250_priv *priv = dev_get_drvdata(dev);
+
+ serial8250_suspend_port(priv->line);
+ flush_work(&priv->qos_work);
+
+ if (device_may_wakeup(dev))
+ omap8250_enable_wakeup(priv, true);
+ else
+ omap8250_enable_wakeup(priv, false);
+ return 0;
+}
+
+static int omap8250_resume(struct device *dev)
+{
+ struct omap8250_priv *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ omap8250_enable_wakeup(priv, false);
+
+ serial8250_resume_port(priv->line);
+ return 0;
+}
+#else
+#define omap8250_prepare NULL
+#define omap8250_complete NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int omap8250_lost_context(struct uart_8250_port *up)
+{
+ u32 val;
+
+ val = serial_in(up, UART_OMAP_MDR1);
+ /*
+ * If we lose context, then MDR1 is set to its reset value which is
+ * UART_OMAP_MDR1_DISABLE. After set_termios() we set it either to 13x
+ * or 16x but never to disable again.
+ */
+ if (val == UART_OMAP_MDR1_DISABLE)
+ return 1;
+ return 0;
+}
+
+static int omap8250_runtime_suspend(struct device *dev)
+{
+ struct omap8250_priv *priv = dev_get_drvdata(dev);
+ struct uart_8250_port *up;
+
+ up = serial8250_get_port(priv->line);
+ /*
+ * When using 'no_console_suspend', the console UART must not be
+ * suspended. Since driver suspend is managed by runtime suspend,
+ * preventing runtime suspend (by returning error) will keep device
+ * active during suspend.
+ */
+ if (priv->is_suspending && !console_suspend_enabled) {
+ if (uart_console(&up->port))
+ return -EBUSY;
+ }
+
+ omap8250_enable_wakeup(priv, true);
+ if (up->dma)
+ omap_8250_rx_dma(up, UART_IIR_RX_TIMEOUT);
+
+ priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ schedule_work(&priv->qos_work);
+
+ return 0;
+}
+
+static int omap8250_runtime_resume(struct device *dev)
+{
+ struct omap8250_priv *priv = dev_get_drvdata(dev);
+ struct uart_8250_port *up;
+ int loss_cntx;
+
+ /* In case runtime-pm tries this before we are setup */
+ if (!priv)
+ return 0;
+
+ up = serial8250_get_port(priv->line);
+ omap8250_enable_wakeup(priv, false);
+ loss_cntx = omap8250_lost_context(up);
+
+ if (loss_cntx)
+ omap8250_restore_regs(up);
+
+ if (up->dma)
+ omap_8250_rx_dma(up, 0);
+
+ priv->latency = priv->calc_latency;
+ schedule_work(&priv->qos_work);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap8250_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume)
+ SET_RUNTIME_PM_OPS(omap8250_runtime_suspend,
+ omap8250_runtime_resume, NULL)
+ .prepare = omap8250_prepare,
+ .complete = omap8250_complete,
+};
+
+static const struct of_device_id omap8250_dt_ids[] = {
+ { .compatible = "ti,omap2-uart" },
+ { .compatible = "ti,omap3-uart" },
+ { .compatible = "ti,omap4-uart" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
+
+static struct platform_driver omap8250_platform_driver = {
+ .driver = {
+ .name = "omap8250",
+ .pm = &omap8250_dev_pm_ops,
+ .of_match_table = omap8250_dt_ids,
+ .owner = THIS_MODULE,
+ },
+ .probe = omap8250_probe,
+ .remove = omap8250_remove,
+};
+module_platform_driver(omap8250_platform_driver);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior");
+MODULE_DESCRIPTION("OMAP 8250 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index beb9d71cd47a..31feeb2d0a66 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -79,29 +79,24 @@ setup_port(struct serial_private *priv, struct uart_8250_port *port,
int bar, int offset, int regshift)
{
struct pci_dev *dev = priv->dev;
- unsigned long base, len;
if (bar >= PCI_NUM_BAR_RESOURCES)
return -EINVAL;
- base = pci_resource_start(dev, bar);
-
if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
- len = pci_resource_len(dev, bar);
-
if (!priv->remapped_bar[bar])
- priv->remapped_bar[bar] = ioremap_nocache(base, len);
+ priv->remapped_bar[bar] = pci_ioremap_bar(dev, bar);
if (!priv->remapped_bar[bar])
return -ENOMEM;
port->port.iotype = UPIO_MEM;
port->port.iobase = 0;
- port->port.mapbase = base + offset;
+ port->port.mapbase = pci_resource_start(dev, bar) + offset;
port->port.membase = priv->remapped_bar[bar] + offset;
port->port.regshift = regshift;
} else {
port->port.iotype = UPIO_PORT;
- port->port.iobase = base + offset;
+ port->port.iobase = pci_resource_start(dev, bar) + offset;
port->port.mapbase = 0;
port->port.membase = NULL;
port->port.regshift = 0;
@@ -317,7 +312,6 @@ static void pci_plx9050_exit(struct pci_dev *dev)
static void pci_ni8420_exit(struct pci_dev *dev)
{
void __iomem *p;
- unsigned long base, len;
unsigned int bar = 0;
if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) {
@@ -325,9 +319,7 @@ static void pci_ni8420_exit(struct pci_dev *dev)
return;
}
- base = pci_resource_start(dev, bar);
- len = pci_resource_len(dev, bar);
- p = ioremap_nocache(base, len);
+ p = pci_ioremap_bar(dev, bar);
if (p == NULL)
return;
@@ -349,7 +341,6 @@ static void pci_ni8420_exit(struct pci_dev *dev)
static void pci_ni8430_exit(struct pci_dev *dev)
{
void __iomem *p;
- unsigned long base, len;
unsigned int bar = 0;
if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) {
@@ -357,9 +348,7 @@ static void pci_ni8430_exit(struct pci_dev *dev)
return;
}
- base = pci_resource_start(dev, bar);
- len = pci_resource_len(dev, bar);
- p = ioremap_nocache(base, len);
+ p = pci_ioremap_bar(dev, bar);
if (p == NULL)
return;
@@ -682,7 +671,6 @@ static int pci_xircom_init(struct pci_dev *dev)
static int pci_ni8420_init(struct pci_dev *dev)
{
void __iomem *p;
- unsigned long base, len;
unsigned int bar = 0;
if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) {
@@ -690,9 +678,7 @@ static int pci_ni8420_init(struct pci_dev *dev)
return 0;
}
- base = pci_resource_start(dev, bar);
- len = pci_resource_len(dev, bar);
- p = ioremap_nocache(base, len);
+ p = pci_ioremap_bar(dev, bar);
if (p == NULL)
return -ENOMEM;
@@ -714,7 +700,7 @@ static int pci_ni8420_init(struct pci_dev *dev)
static int pci_ni8430_init(struct pci_dev *dev)
{
void __iomem *p;
- unsigned long base, len;
+ struct pci_bus_region region;
u32 device_window;
unsigned int bar = 0;
@@ -723,14 +709,17 @@ static int pci_ni8430_init(struct pci_dev *dev)
return 0;
}
- base = pci_resource_start(dev, bar);
- len = pci_resource_len(dev, bar);
- p = ioremap_nocache(base, len);
+ p = pci_ioremap_bar(dev, bar);
if (p == NULL)
return -ENOMEM;
- /* Set device window address and size in BAR0 */
- device_window = ((base + MITE_IOWBSR1_WIN_OFFSET) & 0xffffff00)
+ /*
+ * Set device window address and size in BAR0, while acknowledging that
+ * the resource structure may contain a translated address that differs
+ * from the address the device responds to.
+ */
+ pcibios_resource_to_bus(dev->bus, &region, &dev->resource[bar]);
+ device_window = ((region.start + MITE_IOWBSR1_WIN_OFFSET) & 0xffffff00)
| MITE_IOWBSR1_WENAB | MITE_IOWBSR1_WSIZE;
writel(device_window, p + MITE_IOWBSR1);
@@ -757,8 +746,8 @@ pci_ni8430_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
{
+ struct pci_dev *dev = priv->dev;
void __iomem *p;
- unsigned long base, len;
unsigned int bar, offset = board->first_offset;
if (idx >= board->num_ports)
@@ -767,9 +756,9 @@ pci_ni8430_setup(struct serial_private *priv,
bar = FL_GET_BASE(board->flags);
offset += idx * board->uart_offset;
- base = pci_resource_start(priv->dev, bar);
- len = pci_resource_len(priv->dev, bar);
- p = ioremap_nocache(base, len);
+ p = pci_ioremap_bar(dev, bar);
+ if (!p)
+ return -ENOMEM;
/* enable the transceiver */
writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
@@ -1003,6 +992,40 @@ static void pci_ite887x_exit(struct pci_dev *dev)
}
/*
+ * EndRun Technologies.
+ * Determine the number of ports available on the device.
+ */
+#define PCI_VENDOR_ID_ENDRUN 0x7401
+#define PCI_DEVICE_ID_ENDRUN_1588 0xe100
+
+static int pci_endrun_init(struct pci_dev *dev)
+{
+ u8 __iomem *p;
+ unsigned long deviceID;
+ unsigned int number_uarts = 0;
+
+ /* EndRun device is all 0xexxx */
+ if (dev->vendor == PCI_VENDOR_ID_ENDRUN &&
+ (dev->device & 0xf000) != 0xe000)
+ return 0;
+
+ p = pci_iomap(dev, 0, 5);
+ if (p == NULL)
+ return -ENOMEM;
+
+ deviceID = ioread32(p);
+ /* EndRun device */
+ if (deviceID == 0x07000200) {
+ number_uarts = ioread8(p + 4);
+ dev_dbg(&dev->dev,
+ "%d ports detected on EndRun PCI Express device\n",
+ number_uarts);
+ }
+ pci_iounmap(dev, p);
+ return number_uarts;
+}
+
+/*
* Oxford Semiconductor Inc.
* Check that device is part of the Tornado range of devices, then determine
* the number of ports available on the device.
@@ -1531,25 +1554,48 @@ static int pci_fintek_setup(struct serial_private *priv,
unsigned long iobase;
unsigned long ciobase = 0;
u8 config_base;
+ u32 bar_data[3];
/*
- * We are supposed to be able to read these from the PCI config space,
- * but the values there don't seem to match what we need to use, so
- * just use these hard-coded values for now, as they are correct.
+ * Find each UARTs offset in PCI configuraion space
*/
switch (idx) {
- case 0: iobase = 0xe000; config_base = 0x40; break;
- case 1: iobase = 0xe008; config_base = 0x48; break;
- case 2: iobase = 0xe010; config_base = 0x50; break;
- case 3: iobase = 0xe018; config_base = 0x58; break;
- case 4: iobase = 0xe020; config_base = 0x60; break;
- case 5: iobase = 0xe028; config_base = 0x68; break;
- case 6: iobase = 0xe030; config_base = 0x70; break;
- case 7: iobase = 0xe038; config_base = 0x78; break;
- case 8: iobase = 0xe040; config_base = 0x80; break;
- case 9: iobase = 0xe048; config_base = 0x88; break;
- case 10: iobase = 0xe050; config_base = 0x90; break;
- case 11: iobase = 0xe058; config_base = 0x98; break;
+ case 0:
+ config_base = 0x40;
+ break;
+ case 1:
+ config_base = 0x48;
+ break;
+ case 2:
+ config_base = 0x50;
+ break;
+ case 3:
+ config_base = 0x58;
+ break;
+ case 4:
+ config_base = 0x60;
+ break;
+ case 5:
+ config_base = 0x68;
+ break;
+ case 6:
+ config_base = 0x70;
+ break;
+ case 7:
+ config_base = 0x78;
+ break;
+ case 8:
+ config_base = 0x80;
+ break;
+ case 9:
+ config_base = 0x88;
+ break;
+ case 10:
+ config_base = 0x90;
+ break;
+ case 11:
+ config_base = 0x98;
+ break;
default:
/* Unknown number of ports, get out of here */
return -EINVAL;
@@ -1560,6 +1606,14 @@ static int pci_fintek_setup(struct serial_private *priv,
ciobase = (int)(base + (0x8 * idx));
}
+ /* Get the io address dispatch from the BIOS */
+ pci_read_config_dword(pdev, 0x24, &bar_data[0]);
+ pci_read_config_dword(pdev, 0x20, &bar_data[1]);
+ pci_read_config_dword(pdev, 0x1c, &bar_data[2]);
+
+ /* Calculate Real IO Port */
+ iobase = (bar_data[idx/4] & 0xffffffe0) + (idx % 4) * 8;
+
dev_dbg(&pdev->dev, "%s: idx=%d iobase=0x%lx ciobase=0x%lx config_base=0x%2x\n",
__func__, idx, iobase, ciobase, config_base);
@@ -1760,6 +1814,16 @@ pci_wch_ch353_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+static int
+pci_wch_ch382_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ port->port.flags |= UPF_FIXED_TYPE;
+ port->port.type = PORT_16850;
+ return pci_default_setup(priv, board, port, idx);
+}
+
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
#define PCI_DEVICE_ID_OCTPRO 0x0001
@@ -1814,6 +1878,8 @@ pci_wch_ch353_setup(struct serial_private *priv,
#define PCI_VENDOR_ID_SUNIX 0x1fd4
#define PCI_DEVICE_ID_SUNIX_1999 0x1999
+#define PCIE_VENDOR_ID_WCH 0x1c00
+#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -2346,6 +2412,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_netmos_9900_setup,
},
/*
+ * EndRun Technologies
+ */
+ {
+ .vendor = PCI_VENDOR_ID_ENDRUN,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_endrun_init,
+ .setup = pci_default_setup,
+ },
+ /*
* For Oxford Semiconductor Tornado based devices
*/
{
@@ -2494,6 +2571,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
+ /* WCH CH382 2S1P card (16750 clone) */
+ {
+ .vendor = PCIE_VENDOR_ID_WCH,
+ .device = PCIE_DEVICE_ID_WCH_CH382_2S1P,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch382_setup,
+ },
/*
* ASIX devices with FIFO bug
*/
@@ -2754,6 +2839,7 @@ enum pci_board_num_t {
pbn_panacom2,
pbn_panacom4,
pbn_plx_romulus,
+ pbn_endrun_2_4000000,
pbn_oxsemi,
pbn_oxsemi_1_4000000,
pbn_oxsemi_2_4000000,
@@ -3299,6 +3385,20 @@ static struct pciserial_board pci_boards[] = {
},
/*
+ * EndRun Technologies
+ * Uses the size of PCI Base region 0 to
+ * signal now many ports are available
+ * 2 port 952 Uart support
+ */
+ [pbn_endrun_2_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+
+ /*
* This board uses the size of PCI Base region 0 to
* signal now many ports are available
*/
@@ -3586,6 +3686,7 @@ static const struct pci_device_id blacklist[] = {
/* multi-io cards handled by parport_serial */
{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
+ { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
};
/*
@@ -4171,6 +4272,13 @@ static struct pci_device_id serial_pci_tbl[] = {
0x10b5, 0x106a, 0, 0,
pbn_plx_romulus },
/*
+ * EndRun Technologies. PCI express device range.
+ * EndRun PTP/1588 has 2 Native UARTs.
+ */
+ { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_endrun_2_4000000 },
+ /*
* Quatech cards. These actually have configurable clocks but for
* now we just use the default.
*
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 21eca79224e4..0fcbcd29502f 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -293,12 +293,21 @@ config SERIAL_8250_EM
config SERIAL_8250_RT288X
bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
- depends on SERIAL_8250 && (SOC_RT288X || SOC_RT305X || SOC_RT3883)
+ depends on SERIAL_8250 && (SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620)
help
If you have a Ralink RT288x/RT305x SoC based board and want to use the
serial port, say Y to this option. The driver can handle up to 2 serial
ports. If unsure, say N.
+config SERIAL_8250_OMAP
+ tristate "Support for OMAP internal UART (8250 based driver)"
+ depends on SERIAL_8250 && ARCH_OMAP2PLUS
+ help
+ If you have a machine based on an Texas Instruments OMAP CPU you
+ can enable its onboard serial ports by enabling this option.
+
+ This driver uses ttyS instead of ttyO.
+
config SERIAL_8250_FINTEK
tristate "Support for Fintek F81216A LPC to 4 UART"
depends on SERIAL_8250 && PNP
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 5256b894e46a..31e7cdc6865c 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -20,5 +20,6 @@ obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
+obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
obj-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 98f8bcaf3e7e..c79b43cd6014 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -701,7 +701,7 @@ config PDC_CONSOLE
Saying Y here will enable the software based PDC console to be
used as the system console. This is useful for machines in
which the hardware based console has not been written yet. The
- following steps must be competed to use the PDC console:
+ following steps must be completed to use the PDC console:
1. create the device entry (mknod /dev/ttyB0 c 11 0)
2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0
@@ -1029,11 +1029,11 @@ config SERIAL_VR41XX_CONSOLE
a console on a serial port, say Y. Otherwise, say N.
config SERIAL_JSM
- tristate "Digi International NEO PCI Support"
+ tristate "Digi International NEO and Classic PCI Support"
depends on PCI
select SERIAL_CORE
help
- This is a driver for Digi International's Neo series
+ This is a driver for Digi International's Neo and Classic series
of cards which provide multiple serial ports. You would need
something like this to connect more than two modems to your Linux
box, for instance in order to become a dial-in server. This driver
@@ -1281,22 +1281,25 @@ config SERIAL_TIMBERDALE
Add support for UART controller on timberdale.
config SERIAL_BCM63XX
- tristate "bcm63xx serial port support"
+ tristate "Broadcom BCM63xx/BCM33xx UART support"
select SERIAL_CORE
- depends on BCM63XX
+ depends on MIPS || ARM || COMPILE_TEST
help
- If you have a bcm63xx CPU, you can enable its onboard
- serial port by enabling this options.
+ This enables the driver for the onchip UART core found on
+ the following chipsets:
- To compile this driver as a module, choose M here: the
- module will be called bcm963xx_uart.
+ BCM33xx (cable modem)
+ BCM63xx/BCM63xxx (DSL)
+ BCM68xx (PON)
+ BCM7xxx (STB) - DOCSIS console
config SERIAL_BCM63XX_CONSOLE
- bool "Console on bcm63xx serial port"
+ bool "Console on BCM63xx serial port"
depends on SERIAL_BCM63XX=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
help
- If you have enabled the serial port on the bcm63xx CPU
+ If you have enabled the serial port on the BCM63xx CPU
you can make it the console by answering Y to this option.
config SERIAL_GRLIB_GAISLER_APBUART
@@ -1408,6 +1411,7 @@ config SERIAL_MXS_AUART
depends on ARCH_MXS
tristate "MXS AUART support"
select SERIAL_CORE
+ select SERIAL_MCTRL_GPIO if GPIOLIB
help
This driver supports the MXS Application UART (AUART) port.
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 932e01995c0a..192d0435bb86 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -475,7 +475,6 @@ static struct platform_driver altera_jtaguart_platform_driver = {
.remove = altera_jtaguart_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(altera_jtaguart_match),
},
};
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 1cb2cdb1bc42..eb15a50623cb 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -623,7 +623,6 @@ static struct platform_driver altera_uart_platform_driver = {
.remove = altera_uart_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(altera_uart_match),
},
};
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2064d31d0c8b..5d41d5b92619 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -75,7 +75,8 @@ struct uart_amba_port {
static void pl010_stop_tx(struct uart_port *port)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
@@ -85,7 +86,8 @@ static void pl010_stop_tx(struct uart_port *port)
static void pl010_start_tx(struct uart_port *port)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
@@ -95,7 +97,8 @@ static void pl010_start_tx(struct uart_port *port)
static void pl010_stop_rx(struct uart_port *port)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
@@ -103,12 +106,23 @@ static void pl010_stop_rx(struct uart_port *port)
writel(cr, uap->port.membase + UART010_CR);
}
-static void pl010_enable_ms(struct uart_port *port)
+static void pl010_disable_ms(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int cr;
cr = readb(uap->port.membase + UART010_CR);
+ cr &= ~UART010_CR_MSIE;
+ writel(cr, uap->port.membase + UART010_CR);
+}
+
+static void pl010_enable_ms(struct uart_port *port)
+{
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
+ unsigned int cr;
+
+ cr = readb(uap->port.membase + UART010_CR);
cr |= UART010_CR_MSIE;
writel(cr, uap->port.membase + UART010_CR);
}
@@ -259,14 +273,16 @@ static irqreturn_t pl010_int(int irq, void *dev_id)
static unsigned int pl010_tx_empty(struct uart_port *port)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
unsigned int status = readb(uap->port.membase + UART01x_FR);
return status & UART01x_FR_BUSY ? 0 : TIOCSER_TEMT;
}
static unsigned int pl010_get_mctrl(struct uart_port *port)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
unsigned int result = 0;
unsigned int status;
@@ -283,7 +299,8 @@ static unsigned int pl010_get_mctrl(struct uart_port *port)
static void pl010_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
if (uap->data)
uap->data->set_mctrl(uap->dev, uap->port.membase, mctrl);
@@ -291,7 +308,8 @@ static void pl010_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void pl010_break_ctl(struct uart_port *port, int break_state)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
unsigned long flags;
unsigned int lcr_h;
@@ -307,7 +325,8 @@ static void pl010_break_ctl(struct uart_port *port, int break_state)
static int pl010_startup(struct uart_port *port)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
int retval;
/*
@@ -347,7 +366,8 @@ static int pl010_startup(struct uart_port *port)
static void pl010_shutdown(struct uart_port *port)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
/*
* Free the interrupt
@@ -374,7 +394,8 @@ static void
pl010_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
unsigned int lcr_h, old_cr;
unsigned long flags;
unsigned int baud, quot;
@@ -468,13 +489,21 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
spin_unlock_irqrestore(&uap->port.lock, flags);
}
-static void pl010_set_ldisc(struct uart_port *port, int new)
+static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
- if (new == N_PPS) {
+ if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
+ spin_lock_irq(&port->lock);
pl010_enable_ms(port);
- } else
+ spin_unlock_irq(&port->lock);
+ } else {
port->flags &= ~UPF_HARDPPS_CD;
+ if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+ spin_lock_irq(&port->lock);
+ pl010_disable_ms(port);
+ spin_unlock_irq(&port->lock);
+ }
+ }
}
static const char *pl010_type(struct uart_port *port)
@@ -551,7 +580,8 @@ static struct uart_amba_port *amba_ports[UART_NR];
static void pl010_console_putchar(struct uart_port *port, int ch)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
unsigned int status;
do {
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 02016fcd91b8..8d94c194f090 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -246,6 +246,7 @@ static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
sg_set_page(&sg->sg, phys_to_page(dma_addr),
PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
sg_dma_address(&sg->sg) = dma_addr;
+ sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
return 0;
}
@@ -321,10 +322,26 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *
.src_maxburst = uap->fifosize >> 2,
.device_fc = false,
};
+ struct dma_slave_caps caps;
+ /*
+ * Some DMA controllers provide information on their capabilities.
+ * If the controller does, check for suitable residue processing
+ * otherwise assime all is well.
+ */
+ if (0 == dma_get_slave_caps(chan, &caps)) {
+ if (caps.residue_granularity ==
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
+ dma_release_channel(chan);
+ dev_info(uap->port.dev,
+ "RX DMA disabled - no residue processing\n");
+ return;
+ }
+ }
dmaengine_slave_config(chan, &rx_conf);
uap->dmarx.chan = chan;
+ uap->dmarx.auto_poll_rate = false;
if (plat && plat->dma_rx_poll_enable) {
/* Set poll rate if specified. */
if (plat->dma_rx_poll_rate) {
@@ -345,9 +362,24 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *
plat->dma_rx_poll_timeout;
else
uap->dmarx.poll_timeout = 3000;
- } else
- uap->dmarx.auto_poll_rate = false;
-
+ } else if (!plat && dev->of_node) {
+ uap->dmarx.auto_poll_rate = of_property_read_bool(
+ dev->of_node, "auto-poll");
+ if (uap->dmarx.auto_poll_rate) {
+ u32 x;
+
+ if (0 == of_property_read_u32(dev->of_node,
+ "poll-rate-ms", &x))
+ uap->dmarx.poll_rate = x;
+ else
+ uap->dmarx.poll_rate = 100;
+ if (0 == of_property_read_u32(dev->of_node,
+ "poll-timeout-ms", &x))
+ uap->dmarx.poll_timeout = x;
+ else
+ uap->dmarx.poll_timeout = 3000;
+ }
+ }
dev_info(uap->port.dev, "DMA channel RX %s\n",
dma_chan_name(uap->dmarx.chan));
}
@@ -501,7 +533,11 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
else {
size_t first = UART_XMIT_SIZE - xmit->tail;
- size_t second = xmit->head;
+ size_t second;
+
+ if (first > count)
+ first = count;
+ second = count - first;
memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
if (second)
@@ -988,7 +1024,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
if (!uap->dmatx.chan)
return;
- uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
+ uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
if (!uap->dmatx.buf) {
dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
uap->port.fifosize = uap->fifosize;
@@ -1689,6 +1725,8 @@ static void pl011_shutdown(struct uart_port *port)
plat->exit();
}
+ if (uap->port.ops->flush_buffer)
+ uap->port.ops->flush_buffer(port);
}
static void
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index a34a0cec1685..4f0f95e358e8 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -585,7 +585,6 @@ static struct of_device_id apbuart_match[] = {
static struct platform_driver grlib_apbuart_of_driver = {
.probe = apbuart_probe,
.driver = {
- .owner = THIS_MODULE,
.name = "grlib-apbuart",
.of_match_table = apbuart_match,
},
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 0be1c45efd65..77fc9faa74a4 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -119,7 +119,8 @@ static inline void ar933x_uart_putc(struct ar933x_uart_port *up, int ch)
static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
unsigned int rdata;
@@ -141,21 +142,24 @@ static void ar933x_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void ar933x_uart_start_tx(struct uart_port *port)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
ar933x_uart_start_tx_interrupt(up);
}
static void ar933x_uart_stop_tx(struct uart_port *port)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
ar933x_uart_stop_tx_interrupt(up);
}
static void ar933x_uart_stop_rx(struct uart_port *port)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
up->ier &= ~AR933X_UART_INT_RX_VALID;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
@@ -163,7 +167,8 @@ static void ar933x_uart_stop_rx(struct uart_port *port)
static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
@@ -231,7 +236,8 @@ static void ar933x_uart_set_termios(struct uart_port *port,
struct ktermios *new,
struct ktermios *old)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
unsigned int cs;
unsigned long flags;
unsigned int baud, scale, step;
@@ -404,7 +410,8 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
static int ar933x_uart_startup(struct uart_port *port)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
int ret;
@@ -430,7 +437,8 @@ static int ar933x_uart_startup(struct uart_port *port)
static void ar933x_uart_shutdown(struct uart_port *port)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
/* Disable all interrupts */
up->ier = 0;
@@ -468,7 +476,8 @@ static void ar933x_uart_config_port(struct uart_port *port, int flags)
static int ar933x_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
if (ser->type != PORT_UNKNOWN &&
ser->type != PORT_AR933X)
@@ -521,7 +530,8 @@ static void ar933x_uart_wait_xmitr(struct ar933x_uart_port *up)
static void ar933x_uart_console_putchar(struct uart_port *port, int ch)
{
- struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
ar933x_uart_wait_xmitr(up);
ar933x_uart_putc(up, ch);
@@ -734,7 +744,6 @@ static struct platform_driver ar933x_uart_platform_driver = {
.remove = ar933x_uart_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ar933x_uart_of_ids),
},
};
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index a59d1d77e750..03ebe401fff7 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -653,7 +653,6 @@ static struct platform_driver arc_platform_driver = {
.remove = arc_serial_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = arc_uart_dt_ids,
},
};
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index edde3eca055d..4d848a29e223 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -167,7 +167,6 @@ struct atmel_uart_port {
struct circ_buf rx_ring;
- struct serial_rs485 rs485; /* rs485 settings */
struct mctrl_gpios *gpios;
int gpio_irq[UART_GPIO_MAX];
unsigned int tx_done_mask;
@@ -290,13 +289,11 @@ static unsigned int atmel_get_lines_status(struct uart_port *port)
}
/* Enable or disable the rs485 support */
-void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+static int atmel_config_rs485(struct uart_port *port,
+ struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int mode;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
@@ -306,7 +303,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
- atmel_port->rs485 = *rs485conf;
+ port->rs485 = *rs485conf;
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
@@ -327,8 +324,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
- spin_unlock_irqrestore(&port->lock, flags);
-
+ return 0;
}
/*
@@ -372,11 +368,10 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
- if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ if (port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- if ((atmel_port->rs485.delay_rts_after_send) > 0)
- UART_PUT_TTGR(port,
- atmel_port->rs485.delay_rts_after_send);
+ if ((port->rs485.delay_rts_after_send) > 0)
+ UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -423,8 +418,8 @@ static void atmel_stop_tx(struct uart_port *port)
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
- if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
- !(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX))
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_start_rx(port);
}
@@ -441,8 +436,8 @@ static void atmel_start_tx(struct uart_port *port)
really need this.*/
return;
- if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
- !(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX))
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_stop_rx(port);
/* re-enable PDC transmit */
@@ -807,7 +802,7 @@ static void atmel_tx_dma(struct uart_port *port)
atmel_port->cookie_tx = dmaengine_submit(desc);
} else {
- if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ if (port->rs485.flags & SER_RS485_ENABLED) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
@@ -862,9 +857,8 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
config.dst_addr = port->mapbase + ATMEL_US_THR;
- ret = dmaengine_device_control(atmel_port->chan_tx,
- DMA_SLAVE_CONFIG,
- (unsigned long)&config);
+ ret = dmaengine_slave_config(atmel_port->chan_tx,
+ &config);
if (ret) {
dev_err(port->dev, "DMA tx slave configuration failed\n");
goto chan_err;
@@ -880,32 +874,6 @@ chan_err:
return -EINVAL;
}
-static void atmel_flip_buffer_rx_dma(struct uart_port *port,
- char *buf, size_t count)
-{
- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- struct tty_port *tport = &port->state->port;
-
- dma_sync_sg_for_cpu(port->dev,
- &atmel_port->sg_rx,
- 1,
- DMA_DEV_TO_MEM);
-
- tty_insert_flip_string(tport, buf, count);
-
- dma_sync_sg_for_device(port->dev,
- &atmel_port->sg_rx,
- 1,
- DMA_DEV_TO_MEM);
- /*
- * Drop the lock here since it might end up calling
- * uart_start(), which takes the lock.
- */
- spin_unlock(&port->lock);
- tty_flip_buffer_push(tport);
- spin_lock(&port->lock);
-}
-
static void atmel_complete_rx_dma(void *arg)
{
struct uart_port *port = arg;
@@ -934,11 +902,12 @@ static void atmel_release_rx_dma(struct uart_port *port)
static void atmel_rx_from_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct tty_port *tport = &port->state->port;
struct circ_buf *ring = &atmel_port->rx_ring;
struct dma_chan *chan = atmel_port->chan_rx;
struct dma_tx_state state;
enum dma_status dmastat;
- size_t pending, count;
+ size_t count;
/* Reset the UART timeout early so that we don't miss one */
@@ -953,27 +922,68 @@ static void atmel_rx_from_dma(struct uart_port *port)
tasklet_schedule(&atmel_port->tasklet);
return;
}
- /* current transfer size should no larger than dma buffer */
- pending = sg_dma_len(&atmel_port->sg_rx) - state.residue;
- BUG_ON(pending > sg_dma_len(&atmel_port->sg_rx));
+
+ /* CPU claims ownership of RX DMA buffer */
+ dma_sync_sg_for_cpu(port->dev,
+ &atmel_port->sg_rx,
+ 1,
+ DMA_DEV_TO_MEM);
/*
- * This will take the chars we have so far,
- * ring->head will record the transfer size, only new bytes come
- * will insert into the framework.
+ * ring->head points to the end of data already written by the DMA.
+ * ring->tail points to the beginning of data to be read by the
+ * framework.
+ * The current transfer size should not be larger than the dma buffer
+ * length.
*/
- if (pending > ring->head) {
- count = pending - ring->head;
+ ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
+ BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
+ /*
+ * At this point ring->head may point to the first byte right after the
+ * last byte of the dma buffer:
+ * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
+ *
+ * However ring->tail must always points inside the dma buffer:
+ * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
+ *
+ * Since we use a ring buffer, we have to handle the case
+ * where head is lower than tail. In such a case, we first read from
+ * tail to the end of the buffer then reset tail.
+ */
+ if (ring->head < ring->tail) {
+ count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
+
+ tty_insert_flip_string(tport, ring->buf + ring->tail, count);
+ ring->tail = 0;
+ port->icount.rx += count;
+ }
- atmel_flip_buffer_rx_dma(port, ring->buf + ring->head, count);
+ /* Finally we read data from tail to head */
+ if (ring->tail < ring->head) {
+ count = ring->head - ring->tail;
- ring->head += count;
- if (ring->head == sg_dma_len(&atmel_port->sg_rx))
+ tty_insert_flip_string(tport, ring->buf + ring->tail, count);
+ /* Wrap ring->head if needed */
+ if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
ring->head = 0;
-
+ ring->tail = ring->head;
port->icount.rx += count;
}
+ /* USART retreives ownership of RX DMA buffer */
+ dma_sync_sg_for_device(port->dev,
+ &atmel_port->sg_rx,
+ 1,
+ DMA_DEV_TO_MEM);
+
+ /*
+ * Drop the lock here since it might end up calling
+ * uart_start(), which takes the lock.
+ */
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(tport);
+ spin_lock(&port->lock);
+
UART_PUT_IER(port, ATMEL_US_TIMEOUT);
}
@@ -1026,9 +1036,8 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
config.src_addr = port->mapbase + ATMEL_US_RHR;
- ret = dmaengine_device_control(atmel_port->chan_rx,
- DMA_SLAVE_CONFIG,
- (unsigned long)&config);
+ ret = dmaengine_slave_config(atmel_port->chan_rx,
+ &config);
if (ret) {
dev_err(port->dev, "DMA rx slave configuration failed\n");
goto chan_err;
@@ -1240,8 +1249,8 @@ static void atmel_tx_pdc(struct uart_port *port)
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
} else {
- if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
- !(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
@@ -1552,7 +1561,7 @@ static int atmel_init_property(struct atmel_uart_port *atmel_port,
return 0;
}
-static void atmel_init_rs485(struct atmel_uart_port *atmel_port,
+static void atmel_init_rs485(struct uart_port *port,
struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1563,7 +1572,7 @@ static void atmel_init_rs485(struct atmel_uart_port *atmel_port,
/* rs485 properties */
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
- struct serial_rs485 *rs485conf = &atmel_port->rs485;
+ struct serial_rs485 *rs485conf = &port->rs485;
rs485conf->delay_rts_before_send = rs485_delay[0];
rs485conf->delay_rts_after_send = rs485_delay[1];
@@ -1577,7 +1586,7 @@ static void atmel_init_rs485(struct atmel_uart_port *atmel_port,
rs485conf->flags |= SER_RS485_ENABLED;
}
} else {
- atmel_port->rs485 = pdata->rs485;
+ port->rs485 = pdata->rs485;
}
}
@@ -1802,6 +1811,20 @@ free_irq:
}
/*
+ * Flush any TX data submitted for DMA. Called when the TX circular
+ * buffer is reset.
+ */
+static void atmel_flush_buffer(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_use_pdc_tx(port)) {
+ UART_PUT_TCR(port, 0);
+ atmel_port->pdc_tx.ofs = 0;
+ }
+}
+
+/*
* Disable the port
*/
static void atmel_shutdown(struct uart_port *port)
@@ -1852,20 +1875,8 @@ static void atmel_shutdown(struct uart_port *port)
atmel_free_gpio_irq(port);
atmel_port->ms_irq_enabled = false;
-}
-/*
- * Flush any TX data submitted for DMA. Called when the TX circular
- * buffer is reset.
- */
-static void atmel_flush_buffer(struct uart_port *port)
-{
- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-
- if (atmel_use_pdc_tx(port)) {
- UART_PUT_TCR(port, 0);
- atmel_port->pdc_tx.ofs = 0;
- }
+ atmel_flush_buffer(port);
}
/*
@@ -1911,7 +1922,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
{
unsigned long flags;
unsigned int mode, imr, quot, baud;
- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* Get current mode register */
mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
@@ -2013,10 +2023,9 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
- if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
- if ((atmel_port->rs485.delay_rts_after_send) > 0)
- UART_PUT_TTGR(port,
- atmel_port->rs485.delay_rts_after_send);
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ if ((port->rs485.delay_rts_after_send) > 0)
+ UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
}
@@ -2040,13 +2049,20 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
spin_unlock_irqrestore(&port->lock, flags);
}
-static void atmel_set_ldisc(struct uart_port *port, int new)
+static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
{
- if (new == N_PPS) {
+ if (termios->c_line == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
+ spin_lock_irq(&port->lock);
atmel_enable_ms(port);
+ spin_unlock_irq(&port->lock);
} else {
port->flags &= ~UPF_HARDPPS_CD;
+ if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+ spin_lock_irq(&port->lock);
+ atmel_disable_ms(port);
+ spin_unlock_irq(&port->lock);
+ }
}
}
@@ -2148,35 +2164,6 @@ static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
}
#endif
-static int
-atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
-{
- struct serial_rs485 rs485conf;
-
- switch (cmd) {
- case TIOCSRS485:
- if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
- sizeof(rs485conf)))
- return -EFAULT;
-
- atmel_config_rs485(port, &rs485conf);
- break;
-
- case TIOCGRS485:
- if (copy_to_user((struct serial_rs485 *) arg,
- &(to_atmel_uart_port(port)->rs485),
- sizeof(rs485conf)))
- return -EFAULT;
- break;
-
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-
-
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
@@ -2197,7 +2184,6 @@ static struct uart_ops atmel_pops = {
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
- .ioctl = atmel_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = atmel_poll_get_char,
.poll_put_char = atmel_poll_put_char,
@@ -2217,7 +2203,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
if (!atmel_init_property(atmel_port, pdev))
atmel_set_ops(port);
- atmel_init_rs485(atmel_port, pdev);
+ atmel_init_rs485(port, pdev);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
@@ -2226,6 +2212,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
+ port->rs485_config = atmel_config_rs485;
tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
(unsigned long)port);
@@ -2260,7 +2247,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
}
/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
- if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ if (port->rs485.flags & SER_RS485_ENABLED)
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
else if (atmel_use_pdc_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
@@ -2541,6 +2528,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
void *data;
int ret = -ENODEV;
+ bool rs485_enabled;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
@@ -2588,6 +2576,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
port->rx_ring.buf = data;
}
+ rs485_enabled = port->uart.rs485.flags & SER_RS485_ENABLED;
+
ret = uart_add_one_port(&atmel_uart, &port->uart);
if (ret)
goto err_add_port;
@@ -2606,7 +2596,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, port);
- if (port->rs485.flags & SER_RS485_ENABLED) {
+ if (rs485_enabled) {
UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
}
@@ -2660,7 +2650,6 @@ static struct platform_driver atmel_serial_driver = {
.resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
},
};
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 231519022b73..01d83df08e3d 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -588,20 +588,7 @@ static void bcm_uart_set_termios(struct uart_port *port,
*/
static int bcm_uart_request_port(struct uart_port *port)
{
- unsigned int size;
-
- size = UART_REG_SIZE;
- if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
- dev_err(port->dev, "Memory region busy\n");
- return -EBUSY;
- }
-
- port->membase = ioremap(port->mapbase, size);
- if (!port->membase) {
- dev_err(port->dev, "Unable to map registers\n");
- release_mem_region(port->mapbase, size);
- return -EBUSY;
- }
+ /* UARTs always present */
return 0;
}
@@ -610,8 +597,7 @@ static int bcm_uart_request_port(struct uart_port *port)
*/
static void bcm_uart_release_port(struct uart_port *port)
{
- release_mem_region(port->mapbase, UART_REG_SIZE);
- iounmap(port->membase);
+ /* Nothing to release ... */
}
/*
@@ -782,6 +768,26 @@ static int __init bcm63xx_console_init(void)
console_initcall(bcm63xx_console_init);
+static void bcm_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, bcm_console_putchar);
+ wait_for_xmitr(&dev->port);
+}
+
+static int __init bcm_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = bcm_early_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(bcm63xx_uart, "brcm,bcm6345-uart", bcm_early_console_setup);
+
#define BCM63XX_CONSOLE (&bcm63xx_console)
#else
#define BCM63XX_CONSOLE NULL
@@ -813,25 +819,30 @@ static int bcm_uart_probe(struct platform_device *pdev)
if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
return -EINVAL;
- if (ports[pdev->id].membase)
+ port = &ports[pdev->id];
+ if (port->membase)
return -EBUSY;
+ memset(port, 0, sizeof(*port));
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mem)
return -ENODEV;
+ port->mapbase = res_mem->start;
+ port->membase = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(port->membase))
+ return PTR_ERR(port->membase);
+
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq)
return -ENODEV;
- clk = clk_get(&pdev->dev, "periph");
+ clk = pdev->dev.of_node ? of_clk_get(pdev->dev.of_node, 0) :
+ clk_get(&pdev->dev, "periph");
if (IS_ERR(clk))
return -ENODEV;
- port = &ports[pdev->id];
- memset(port, 0, sizeof(*port));
port->iotype = UPIO_MEM;
- port->mapbase = res_mem->start;
port->irq = res_irq->start;
port->ops = &bcm_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
@@ -874,7 +885,6 @@ static struct platform_driver bcm_uart_platform_driver = {
.probe = bcm_uart_probe,
.remove = bcm_uart_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "bcm63xx_uart",
.of_match_table = bcm63xx_of_match,
},
@@ -905,5 +915,5 @@ module_init(bcm_uart_init);
module_exit(bcm_uart_exit);
MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
-MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver");
+MODULE_DESCRIPTION("Broadcom 63xx integrated uart driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index d62d8daac8ab..984e1c050096 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -517,14 +517,15 @@ static void sport_set_termios(struct uart_port *port,
up->csize = 5;
break;
default:
- pr_warning("requested word length not supported\n");
+ pr_warn("requested word length not supported\n");
+ break;
}
if (termios->c_cflag & CSTOPB) {
up->stopb = 1;
}
if (termios->c_cflag & PARENB) {
- pr_warning("PAREN bits is not supported yet\n");
+ pr_warn("PAREN bit is not supported yet\n");
/* up->parib = 1; */
}
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 7da9911e95f0..43b3e2c233ff 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -944,12 +944,13 @@ bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
* Enable the IrDA function if tty->ldisc.num is N_IRDA.
* In other cases, disable IrDA function.
*/
-static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
+static void bfin_serial_set_ldisc(struct uart_port *port,
+ struct ktermios *termios)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned int val;
- switch (ld) {
+ switch (termios->c_line) {
case N_IRDA:
val = UART_GET_GCTL(uart);
val |= (UMOD_IRDA | RPOLC);
@@ -1386,7 +1387,6 @@ static struct platform_driver bfin_serial_driver = {
.resume = bfin_serial_resume,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index acfe31773643..6e11c275f2ab 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -225,13 +225,14 @@ static void uart_clps711x_break_ctl(struct uart_port *port, int break_state)
writel(ubrlcr, port->membase + UBRLCR_OFFSET);
}
-static void uart_clps711x_set_ldisc(struct uart_port *port, int ld)
+static void uart_clps711x_set_ldisc(struct uart_port *port,
+ struct ktermios *termios)
{
if (!port->line) {
struct clps711x_port *s = dev_get_drvdata(port->dev);
regmap_update_bits(s->syscon, SYSCON_OFFSET, SYSCON1_SIREN,
- (ld == N_IRDA) ? SYSCON1_SIREN : 0);
+ (termios->c_line == N_IRDA) ? SYSCON1_SIREN : 0);
}
}
@@ -542,7 +543,6 @@ MODULE_DEVICE_TABLE(of, clps711x_uart_dt_ids);
static struct platform_driver clps711x_uart_platform = {
.driver = {
.name = "clps711x-uart",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(clps711x_uart_dt_ids),
},
.probe = uart_clps711x_probe,
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 533852eb8778..fddb1fd4d9d3 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -80,7 +80,8 @@ static void cpm_uart_initbd(struct uart_cpm_port *pinfo);
*/
static unsigned int cpm_uart_tx_empty(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
cbd_t __iomem *bdp = pinfo->tx_bd_base;
int ret = 0;
@@ -102,7 +103,8 @@ static unsigned int cpm_uart_tx_empty(struct uart_port *port)
static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
if (pinfo->gpios[GPIO_RTS] >= 0)
gpio_set_value(pinfo->gpios[GPIO_RTS], !(mctrl & TIOCM_RTS));
@@ -113,7 +115,8 @@ static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
static unsigned int cpm_uart_get_mctrl(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
unsigned int mctrl = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
if (pinfo->gpios[GPIO_CTS] >= 0) {
@@ -144,7 +147,8 @@ static unsigned int cpm_uart_get_mctrl(struct uart_port *port)
*/
static void cpm_uart_stop_tx(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
smc_t __iomem *smcp = pinfo->smcp;
scc_t __iomem *sccp = pinfo->sccp;
@@ -161,7 +165,8 @@ static void cpm_uart_stop_tx(struct uart_port *port)
*/
static void cpm_uart_start_tx(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
smc_t __iomem *smcp = pinfo->smcp;
scc_t __iomem *sccp = pinfo->sccp;
@@ -189,7 +194,8 @@ static void cpm_uart_start_tx(struct uart_port *port)
*/
static void cpm_uart_stop_rx(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
smc_t __iomem *smcp = pinfo->smcp;
scc_t __iomem *sccp = pinfo->sccp;
@@ -206,7 +212,8 @@ static void cpm_uart_stop_rx(struct uart_port *port)
*/
static void cpm_uart_break_ctl(struct uart_port *port, int break_state)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
pr_debug("CPM uart[%d]:break ctrl, break_state: %d\n", port->line,
break_state);
@@ -240,7 +247,8 @@ static void cpm_uart_int_rx(struct uart_port *port)
unsigned char ch;
u8 *cp;
struct tty_port *tport = &port->state->port;
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
cbd_t __iomem *bdp;
u16 status;
unsigned int flg;
@@ -397,7 +405,8 @@ static irqreturn_t cpm_uart_int(int irq, void *data)
static int cpm_uart_startup(struct uart_port *port)
{
int retval;
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
pr_debug("CPM uart[%d]:startup\n", port->line);
@@ -442,7 +451,8 @@ inline void cpm_uart_wait_until_send(struct uart_cpm_port *pinfo)
*/
static void cpm_uart_shutdown(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
pr_debug("CPM uart[%d]:shutdown\n", port->line);
@@ -492,7 +502,8 @@ static void cpm_uart_set_termios(struct uart_port *port,
unsigned long flags;
u16 cval, scval, prev_mode;
int bits, sbits;
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
smc_t __iomem *smcp = pinfo->smcp;
scc_t __iomem *sccp = pinfo->sccp;
int maxidl;
@@ -675,7 +686,8 @@ static int cpm_uart_tx_pump(struct uart_port *port)
cbd_t __iomem *bdp;
u8 *p;
int count;
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
struct circ_buf *xmit = &port->state->xmit;
/* Handle xon/xoff */
@@ -906,7 +918,8 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
*/
static int cpm_uart_request_port(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
int ret;
pr_debug("CPM uart[%d]:request port\n", port->line);
@@ -938,7 +951,8 @@ static int cpm_uart_request_port(struct uart_port *port)
static void cpm_uart_release_port(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
if (!(pinfo->flags & FLAG_CONSOLE))
cpm_uart_freebuf(pinfo);
@@ -1082,7 +1096,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
static int cpm_get_poll_char(struct uart_port *port)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
if (!serial_polled) {
serial_polled = 1;
@@ -1099,7 +1114,8 @@ static int cpm_get_poll_char(struct uart_port *port)
static void cpm_put_poll_char(struct uart_port *port,
unsigned char c)
{
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct uart_cpm_port *pinfo =
+ container_of(port, struct uart_cpm_port, port);
static char ch[2];
ch[0] = (char)c;
@@ -1438,7 +1454,6 @@ static struct of_device_id cpm_uart_match[] = {
static struct platform_driver cpm_uart_driver = {
.driver = {
.name = "cpm_uart",
- .owner = THIS_MODULE,
.of_match_table = cpm_uart_match,
},
.probe = cpm_uart_probe,
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 58e6f61a87e4..0c1825b0b41d 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3676,12 +3676,6 @@ rs_close(struct tty_struct *tty, struct file * filp)
}
info->port.flags |= ASYNC_CLOSING;
/*
- * Save the termios structure, since this port may have
- * separate termios for callout and dialin.
- */
- if (info->port.flags & ASYNC_NORMAL_ACTIVE)
- info->normal_termios = tty->termios;
- /*
* Now we wait for the transmit buffer to clear; and we notify
* the line discipline to only process XON/XOFF characters.
*/
@@ -4076,11 +4070,6 @@ rs_open(struct tty_struct *tty, struct file * filp)
return retval;
}
- if ((info->port.count == 1) && (info->port.flags & ASYNC_SPLIT_TERMIOS)) {
- tty->termios = info->normal_termios;
- change_speed(info);
- }
-
#ifdef SERIAL_DEBUG_OPEN
printk("rs_open ttyS%d successful...\n", info->line);
#endif
@@ -4327,7 +4316,6 @@ static int __init rs_init(void)
info->custom_divisor = 0;
info->x_char = 0;
info->event = 0;
- info->normal_termios = driver->init_termios;
info->xmit.buf = NULL;
info->xmit.tail = info->xmit.head = 0;
info->first_recv_buffer = info->last_recv_buffer = NULL;
diff --git a/drivers/tty/serial/crisv10.h b/drivers/tty/serial/crisv10.h
index 7599014ae03f..15a52ee58251 100644
--- a/drivers/tty/serial/crisv10.h
+++ b/drivers/tty/serial/crisv10.h
@@ -98,7 +98,6 @@ struct e100_serial {
struct work_struct work;
struct async_icount icount; /* error-statistics etc.*/
- struct ktermios normal_termios;
unsigned long char_time_usec; /* The time for 1 char, in usecs */
unsigned long flush_time_usec; /* How often we should flush */
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index a514ee6f5406..64fe25a4285c 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -98,7 +98,7 @@ static int __init parse_options(struct earlycon_device *device,
strlcpy(device->options, options, length);
}
- if (mmio || mmio32)
+ if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM32)
pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n",
mmio32 ? "32" : "",
(unsigned long long)port->mapbase,
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 55d9c00112cc..195acc868763 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -814,7 +814,6 @@ static struct platform_driver efm32_uart_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = efm32_uart_dt_ids,
},
};
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 6dd53af546a3..e7cde3a9566d 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1786,15 +1786,13 @@ static int lpuart_probe(struct platform_device *pdev)
}
sport->port.line = ret;
sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- sport->port.mapbase = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sport->port.membase))
return PTR_ERR(sport->port.membase);
+ sport->port.mapbase = res->start;
sport->port.dev = &pdev->dev;
sport->port.type = PORT_LPUART;
sport->port.iotype = UPIO_MEM;
@@ -1862,6 +1860,20 @@ static int lpuart_suspend(struct device *dev)
static int lpuart_resume(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
+ unsigned long temp;
+
+ if (sport->lpuart32) {
+ lpuart32_setup_watermark(sport);
+ temp = lpuart32_read(sport->port.membase + UARTCTRL);
+ temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
+ UARTCTRL_TE | UARTCTRL_ILIE);
+ lpuart32_write(temp, sport->port.membase + UARTCTRL);
+ } else {
+ lpuart_setup_watermark(sport);
+ temp = readb(sport->port.membase + UARTCR2);
+ temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE);
+ writeb(temp, sport->port.membase + UARTCR2);
+ }
uart_resume_port(&lpuart_reg, &sport->port);
@@ -1876,7 +1888,6 @@ static struct platform_driver lpuart_driver = {
.remove = lpuart_remove,
.driver = {
.name = "fsl-lpuart",
- .owner = THIS_MODULE,
.of_match_table = lpuart_dt_ids,
.pm = &lpuart_pm_ops,
},
@@ -1884,11 +1895,8 @@ static struct platform_driver lpuart_driver = {
static int __init lpuart_serial_init(void)
{
- int ret;
-
- pr_info("serial: Freescale lpuart driver\n");
+ int ret = uart_register_driver(&lpuart_reg);
- ret = uart_register_driver(&lpuart_reg);
if (ret)
return ret;
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index d4620fe5da2e..45fc323b95e6 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1550,8 +1550,10 @@ static int icom_probe(struct pci_dev *dev,
icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
- if (!icom_adapter->base_addr)
+ if (!icom_adapter->base_addr) {
+ retval = -ENOMEM;
goto probe_exit1;
+ }
/* save off irq and request irq line */
if ( (retval = request_irq(dev->irq, icom_interrupt,
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 8f62a3cec23e..4c5e9092e2d7 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -302,7 +302,7 @@ static inline int is_imx6q_uart(struct imx_port *sport)
/*
* Save and restore functions for UCR1, UCR2 and UCR3 registers
*/
-#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_IMX_CONSOLE)
+#if defined(CONFIG_SERIAL_IMX_CONSOLE)
static void imx_port_ucrs_save(struct uart_port *port,
struct imx_port_ucrs *ucr)
{
@@ -991,7 +991,6 @@ static int imx_uart_dma_init(struct imx_port *sport)
sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sport->rx_buf) {
- dev_err(dev, "cannot alloc DMA buffer.\n");
ret = -ENOMEM;
goto err;
}
@@ -1076,11 +1075,11 @@ static int imx_startup(struct uart_port *port)
retval = clk_prepare_enable(sport->clk_per);
if (retval)
- goto error_out1;
+ return retval;
retval = clk_prepare_enable(sport->clk_ipg);
if (retval) {
clk_disable_unprepare(sport->clk_per);
- goto error_out1;
+ return retval;
}
imx_setup_ufcr(sport, 0);
@@ -1109,37 +1108,6 @@ static int imx_startup(struct uart_port *port)
while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
udelay(1);
- /*
- * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
- * chips only have one interrupt.
- */
- if (sport->txirq > 0) {
- retval = request_irq(sport->rxirq, imx_rxint, 0,
- dev_name(port->dev), sport);
- if (retval)
- goto error_out1;
-
- retval = request_irq(sport->txirq, imx_txint, 0,
- dev_name(port->dev), sport);
- if (retval)
- goto error_out2;
-
- /* do not use RTS IRQ on IrDA */
- if (!USE_IRDA(sport)) {
- retval = request_irq(sport->rtsirq, imx_rtsint, 0,
- dev_name(port->dev), sport);
- if (retval)
- goto error_out3;
- }
- } else {
- retval = request_irq(sport->port.irq, imx_int, 0,
- dev_name(port->dev), sport);
- if (retval) {
- free_irq(sport->port.irq, sport);
- goto error_out1;
- }
- }
-
spin_lock_irqsave(&sport->port.lock, flags);
/*
* Finally, clear and enable interrupts
@@ -1201,15 +1169,6 @@ static int imx_startup(struct uart_port *port)
}
return 0;
-
-error_out3:
- if (sport->txirq)
- free_irq(sport->txirq, sport);
-error_out2:
- if (sport->rxirq)
- free_irq(sport->rxirq, sport);
-error_out1:
- return retval;
}
static void imx_shutdown(struct uart_port *port)
@@ -1255,17 +1214,6 @@ static void imx_shutdown(struct uart_port *port)
del_timer_sync(&sport->timer);
/*
- * Free the interrupts
- */
- if (sport->txirq > 0) {
- if (!USE_IRDA(sport))
- free_irq(sport->rtsirq, sport);
- free_irq(sport->txirq, sport);
- free_irq(sport->rxirq, sport);
- } else
- free_irq(sport->port.irq, sport);
-
- /*
* Disable all interrupts, port and break condition.
*/
@@ -1507,44 +1455,65 @@ imx_verify_port(struct uart_port *port, struct serial_struct *ser)
}
#if defined(CONFIG_CONSOLE_POLL)
+
+static int imx_poll_init(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long flags;
+ unsigned long temp;
+ int retval;
+
+ retval = clk_prepare_enable(sport->clk_ipg);
+ if (retval)
+ return retval;
+ retval = clk_prepare_enable(sport->clk_per);
+ if (retval)
+ clk_disable_unprepare(sport->clk_ipg);
+
+ imx_setup_ufcr(sport, 0);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ temp = readl(sport->port.membase + UCR1);
+ if (is_imx1_uart(sport))
+ temp |= IMX1_UCR1_UARTCLKEN;
+ temp |= UCR1_UARTEN | UCR1_RRDYEN;
+ temp &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR2);
+ temp |= UCR2_RXEN;
+ writel(temp, sport->port.membase + UCR2);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ return 0;
+}
+
static int imx_poll_get_char(struct uart_port *port)
{
- if (!(readl(port->membase + USR2) & USR2_RDR))
+ if (!(readl_relaxed(port->membase + USR2) & USR2_RDR))
return NO_POLL_CHAR;
- return readl(port->membase + URXD0) & URXD_RX_DATA;
+ return readl_relaxed(port->membase + URXD0) & URXD_RX_DATA;
}
static void imx_poll_put_char(struct uart_port *port, unsigned char c)
{
- struct imx_port_ucrs old_ucr;
unsigned int status;
- /* save control registers */
- imx_port_ucrs_save(port, &old_ucr);
-
- /* disable interrupts */
- writel(UCR1_UARTEN, port->membase + UCR1);
- writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
- port->membase + UCR2);
- writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
- port->membase + UCR3);
-
/* drain */
do {
- status = readl(port->membase + USR1);
+ status = readl_relaxed(port->membase + USR1);
} while (~status & USR1_TRDY);
/* write */
- writel(c, port->membase + URTX0);
+ writel_relaxed(c, port->membase + URTX0);
/* flush */
do {
- status = readl(port->membase + USR2);
+ status = readl_relaxed(port->membase + USR2);
} while (~status & USR2_TXDC);
-
- /* restore control registers */
- imx_port_ucrs_restore(port, &old_ucr);
}
#endif
@@ -1565,6 +1534,7 @@ static struct uart_ops imx_pops = {
.config_port = imx_config_port,
.verify_port = imx_verify_port,
#if defined(CONFIG_CONSOLE_POLL)
+ .poll_init = imx_poll_init,
.poll_get_char = imx_poll_get_char,
.poll_put_char = imx_poll_put_char,
#endif
@@ -1929,6 +1899,36 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.uartclk = clk_get_rate(sport->clk_per);
+ /*
+ * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
+ * chips only have one interrupt.
+ */
+ if (sport->txirq > 0) {
+ ret = devm_request_irq(&pdev->dev, sport->rxirq, imx_rxint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, sport->txirq, imx_txint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret)
+ return ret;
+
+ /* do not use RTS IRQ on IrDA */
+ if (!USE_IRDA(sport)) {
+ ret = devm_request_irq(&pdev->dev, sport->rtsirq,
+ imx_rtsint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret)
+ return ret;
+ }
+ } else {
+ ret = devm_request_irq(&pdev->dev, sport->port.irq, imx_int, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret)
+ return ret;
+ }
+
imx_ports[sport->port.line] = sport;
platform_set_drvdata(pdev, sport);
@@ -1952,18 +1952,14 @@ static struct platform_driver serial_imx_driver = {
.id_table = imx_uart_devtype,
.driver = {
.name = "imx-uart",
- .owner = THIS_MODULE,
.of_match_table = imx_uart_dt_ids,
},
};
static int __init imx_serial_init(void)
{
- int ret;
-
- pr_info("Serial: IMX driver\n");
+ int ret = uart_register_driver(&imx_reg);
- ret = uart_register_driver(&imx_reg);
if (ret)
return ret;
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 99b7b8697861..991e6dce916e 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -544,7 +544,8 @@ static unsigned int ip22zilog_get_mctrl(struct uart_port *port)
/* The port lock is held and interrupts are disabled. */
static void ip22zilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct uart_ip22zilog_port *up =
+ container_of(port, struct uart_ip22zilog_port, port);
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char set_bits, clear_bits;
@@ -568,7 +569,8 @@ static void ip22zilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
/* The port lock is held and interrupts are disabled. */
static void ip22zilog_stop_tx(struct uart_port *port)
{
- struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct uart_ip22zilog_port *up =
+ container_of(port, struct uart_ip22zilog_port, port);
up->flags |= IP22ZILOG_FLAG_TX_STOPPED;
}
@@ -576,7 +578,8 @@ static void ip22zilog_stop_tx(struct uart_port *port)
/* The port lock is held and interrupts are disabled. */
static void ip22zilog_start_tx(struct uart_port *port)
{
- struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct uart_ip22zilog_port *up =
+ container_of(port, struct uart_ip22zilog_port, port);
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char status;
@@ -636,7 +639,8 @@ static void ip22zilog_stop_rx(struct uart_port *port)
/* The port lock is held. */
static void ip22zilog_enable_ms(struct uart_port *port)
{
- struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct uart_ip22zilog_port *up =
+ container_of(port, struct uart_ip22zilog_port, port);
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char new_reg;
@@ -652,7 +656,8 @@ static void ip22zilog_enable_ms(struct uart_port *port)
/* The port lock is not held. */
static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
{
- struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct uart_ip22zilog_port *up =
+ container_of(port, struct uart_ip22zilog_port, port);
struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char set_bits, clear_bits, new_reg;
unsigned long flags;
@@ -873,7 +878,8 @@ static void
ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct uart_ip22zilog_port *up =
+ container_of(port, struct uart_ip22zilog_port, port);
unsigned long flags;
int baud, brg;
diff --git a/drivers/tty/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile
index e46b6e0f8b18..705d1ff6fdeb 100644
--- a/drivers/tty/serial/jsm/Makefile
+++ b/drivers/tty/serial/jsm/Makefile
@@ -4,5 +4,5 @@
obj-$(CONFIG_SERIAL_JSM) += jsm.o
-jsm-objs := jsm_driver.o jsm_neo.o jsm_tty.o
+jsm-objs := jsm_driver.o jsm_neo.o jsm_tty.o jsm_cls.o
diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index af7013488aeb..0b79b87df47d 100644
--- a/drivers/tty/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
@@ -13,11 +13,6 @@
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
@@ -68,6 +63,10 @@ do { \
#define MAX_STOPS_SENT 5
/* Board ids */
+#define PCI_DEVICE_ID_CLASSIC_4 0x0028
+#define PCI_DEVICE_ID_CLASSIC_8 0x0029
+#define PCI_DEVICE_ID_CLASSIC_4_422 0x00D0
+#define PCI_DEVICE_ID_CLASSIC_8_422 0x00D1
#define PCI_DEVICE_ID_NEO_4 0x00B0
#define PCI_DEVICE_ID_NEO_1_422 0x00CC
#define PCI_DEVICE_ID_NEO_1_422_485 0x00CD
@@ -112,21 +111,21 @@ struct jsm_channel;
************************************************************************/
struct board_ops {
irq_handler_t intr;
- void (*uart_init) (struct jsm_channel *ch);
- void (*uart_off) (struct jsm_channel *ch);
- void (*param) (struct jsm_channel *ch);
- void (*assert_modem_signals) (struct jsm_channel *ch);
- void (*flush_uart_write) (struct jsm_channel *ch);
- void (*flush_uart_read) (struct jsm_channel *ch);
- void (*disable_receiver) (struct jsm_channel *ch);
- void (*enable_receiver) (struct jsm_channel *ch);
- void (*send_break) (struct jsm_channel *ch);
- void (*clear_break) (struct jsm_channel *ch, int);
- void (*send_start_character) (struct jsm_channel *ch);
- void (*send_stop_character) (struct jsm_channel *ch);
- void (*copy_data_from_queue_to_uart) (struct jsm_channel *ch);
- u32 (*get_uart_bytes_left) (struct jsm_channel *ch);
- void (*send_immediate_char) (struct jsm_channel *ch, unsigned char);
+ void (*uart_init)(struct jsm_channel *ch);
+ void (*uart_off)(struct jsm_channel *ch);
+ void (*param)(struct jsm_channel *ch);
+ void (*assert_modem_signals)(struct jsm_channel *ch);
+ void (*flush_uart_write)(struct jsm_channel *ch);
+ void (*flush_uart_read)(struct jsm_channel *ch);
+ void (*disable_receiver)(struct jsm_channel *ch);
+ void (*enable_receiver)(struct jsm_channel *ch);
+ void (*send_break)(struct jsm_channel *ch);
+ void (*clear_break)(struct jsm_channel *ch);
+ void (*send_start_character)(struct jsm_channel *ch);
+ void (*send_stop_character)(struct jsm_channel *ch);
+ void (*copy_data_from_queue_to_uart)(struct jsm_channel *ch);
+ u32 (*get_uart_bytes_left)(struct jsm_channel *ch);
+ void (*send_immediate_char)(struct jsm_channel *ch, unsigned char);
};
@@ -189,7 +188,7 @@ struct jsm_board
#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
-/* Our Read/Error/Write queue sizes */
+/* Our Read/Error queue sizes */
#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
#define EQUEUEMASK 0x1FFF /* 8 K - 1 */
#define RQUEUESIZE (RQUEUEMASK + 1)
@@ -222,7 +221,10 @@ struct jsm_channel {
u8 ch_mostat; /* FEP output modem status */
u8 ch_mistat; /* FEP input modem status */
- struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the "mapped" UART struct */
+ /* Pointers to the "mapped" UART structs */
+ struct neo_uart_struct __iomem *ch_neo_uart; /* NEO card */
+ struct cls_uart_struct __iomem *ch_cls_uart; /* Classic card */
+
u8 ch_cached_lsr; /* Cached value of the LSR register */
u8 *ch_rqueue; /* Our read queue buffer - malloc'ed */
@@ -254,6 +256,60 @@ struct jsm_channel {
u64 ch_xoff_sends; /* Count of xoffs transmitted */
};
+/************************************************************************
+ * Per channel/port Classic UART structures *
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * U = Unused. *
+ ************************************************************************/
+
+struct cls_uart_struct {
+ u8 txrx; /* WR RHR/THR - Holding Reg */
+ u8 ier; /* WR IER - Interrupt Enable Reg */
+ u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg*/
+ u8 lcr; /* WR LCR - Line Control Reg */
+ u8 mcr; /* WR MCR - Modem Control Reg */
+ u8 lsr; /* WR LSR - Line Status Reg */
+ u8 msr; /* WR MSR - Modem Status Reg */
+ u8 spr; /* WR SPR - Scratch Pad Reg */
+};
+
+/* Where to read the interrupt register (8bits) */
+#define UART_CLASSIC_POLL_ADDR_OFFSET 0x40
+
+#define UART_EXAR654_ENHANCED_REGISTER_SET 0xBF
+
+#define UART_16654_FCR_TXTRIGGER_8 0x0
+#define UART_16654_FCR_TXTRIGGER_16 0x10
+#define UART_16654_FCR_TXTRIGGER_32 0x20
+#define UART_16654_FCR_TXTRIGGER_56 0x30
+
+#define UART_16654_FCR_RXTRIGGER_8 0x0
+#define UART_16654_FCR_RXTRIGGER_16 0x40
+#define UART_16654_FCR_RXTRIGGER_56 0x80
+#define UART_16654_FCR_RXTRIGGER_60 0xC0
+
+#define UART_IIR_CTSRTS 0x20 /* Received CTS/RTS change of state */
+#define UART_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
+
+/*
+ * These are the EXTENDED definitions for the Exar 654's Interrupt
+ * Enable Register.
+ */
+#define UART_EXAR654_EFR_ECB 0x10 /* Enhanced control bit */
+#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
+#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
+#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
+#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+
+#define UART_EXAR654_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
+#define UART_EXAR654_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
+
+#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */
+#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */
+#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */
/************************************************************************
* Per channel/port NEO UART structure *
@@ -374,6 +430,7 @@ struct neo_uart_struct {
*/
extern struct uart_driver jsm_uart_driver;
extern struct board_ops jsm_neo_ops;
+extern struct board_ops jsm_cls_ops;
extern int jsm_debug;
/*************************************************************************
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
new file mode 100644
index 000000000000..bfb0681195b6
--- /dev/null
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -0,0 +1,982 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+#include <linux/delay.h> /* For udelay */
+#include <linux/io.h> /* For read[bwl]/write[bwl] */
+#include <linux/serial.h> /* For struct async_serial */
+#include <linux/serial_reg.h> /* For the various UART offsets */
+#include <linux/pci.h>
+#include <linux/tty.h>
+
+#include "jsm.h" /* Driver main header file */
+
+static struct {
+ unsigned int rate;
+ unsigned int cflag;
+} baud_rates[] = {
+ { 921600, B921600 },
+ { 460800, B460800 },
+ { 230400, B230400 },
+ { 115200, B115200 },
+ { 57600, B57600 },
+ { 38400, B38400 },
+ { 19200, B19200 },
+ { 9600, B9600 },
+ { 4800, B4800 },
+ { 2400, B2400 },
+ { 1200, B1200 },
+ { 600, B600 },
+ { 300, B300 },
+ { 200, B200 },
+ { 150, B150 },
+ { 134, B134 },
+ { 110, B110 },
+ { 75, B75 },
+ { 50, B50 },
+};
+
+static void cls_set_cts_flow_control(struct jsm_channel *ch)
+{
+ u8 lcrb = readb(&ch->ch_cls_uart->lcr);
+ u8 ier = readb(&ch->ch_cls_uart->ier);
+ u8 isr_fcr = 0;
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on CTS flow control, turn off IXON flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_CTSDSR);
+ isr_fcr &= ~(UART_EXAR654_EFR_IXON);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /*
+ * Enable interrupts for CTS flow, turn off interrupts for
+ * received XOFF chars
+ */
+ ier |= (UART_EXAR654_IER_CTSDSR);
+ ier &= ~(UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_t_tlevel = 16;
+}
+
+static void cls_set_ixon_flow_control(struct jsm_channel *ch)
+{
+ u8 lcrb = readb(&ch->ch_cls_uart->lcr);
+ u8 ier = readb(&ch->ch_cls_uart->ier);
+ u8 isr_fcr = 0;
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on IXON flow control, turn off CTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXON);
+ isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Now set our current start/stop chars while in enhanced mode */
+ writeb(ch->ch_startc, &ch->ch_cls_uart->mcr);
+ writeb(0, &ch->ch_cls_uart->lsr);
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->msr);
+ writeb(0, &ch->ch_cls_uart->spr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /*
+ * Disable interrupts for CTS flow, turn on interrupts for
+ * received XOFF chars
+ */
+ ier &= ~(UART_EXAR654_IER_CTSDSR);
+ ier |= (UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+}
+
+static void cls_set_no_output_flow_control(struct jsm_channel *ch)
+{
+ u8 lcrb = readb(&ch->ch_cls_uart->lcr);
+ u8 ier = readb(&ch->ch_cls_uart->ier);
+ u8 isr_fcr = 0;
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn off IXON flow control, turn off CTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+ isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR | UART_EXAR654_EFR_IXON);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /*
+ * Disable interrupts for CTS flow, turn off interrupts for
+ * received XOFF chars
+ */
+ ier &= ~(UART_EXAR654_IER_CTSDSR);
+ ier &= ~(UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_r_watermark = 0;
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
+}
+
+static void cls_set_rts_flow_control(struct jsm_channel *ch)
+{
+ u8 lcrb = readb(&ch->ch_cls_uart->lcr);
+ u8 ier = readb(&ch->ch_cls_uart->ier);
+ u8 isr_fcr = 0;
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on RTS flow control, turn off IXOFF flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_RTSDTR);
+ isr_fcr &= ~(UART_EXAR654_EFR_IXOFF);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Enable interrupts for RTS flow */
+ ier |= (UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_r_watermark = 4;
+ ch->ch_r_tlevel = 8;
+}
+
+static void cls_set_ixoff_flow_control(struct jsm_channel *ch)
+{
+ u8 lcrb = readb(&ch->ch_cls_uart->lcr);
+ u8 ier = readb(&ch->ch_cls_uart->ier);
+ u8 isr_fcr = 0;
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on IXOFF flow control, turn off RTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXOFF);
+ isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Now set our current start/stop chars while in enhanced mode */
+ writeb(ch->ch_startc, &ch->ch_cls_uart->mcr);
+ writeb(0, &ch->ch_cls_uart->lsr);
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->msr);
+ writeb(0, &ch->ch_cls_uart->spr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for RTS flow */
+ ier &= ~(UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+}
+
+static void cls_set_no_input_flow_control(struct jsm_channel *ch)
+{
+ u8 lcrb = readb(&ch->ch_cls_uart->lcr);
+ u8 ier = readb(&ch->ch_cls_uart->ier);
+ u8 isr_fcr = 0;
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn off IXOFF flow control, turn off RTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+ isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR | UART_EXAR654_EFR_IXOFF);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for RTS flow */
+ ier &= ~(UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
+}
+
+/*
+ * cls_clear_break.
+ * Determines whether its time to shut off break condition.
+ *
+ * No locks are assumed to be held when calling this function.
+ * channel lock is held and released in this function.
+ */
+static void cls_clear_break(struct jsm_channel *ch)
+{
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ /* Turn break off, and unset some variables */
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ u8 temp = readb(&ch->ch_cls_uart->lcr);
+
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ jsm_dbg(IOCTL, &ch->ch_bd->pci_dev,
+ "clear break Finishing UART_LCR_SBC! finished: %lx\n",
+ jiffies);
+ }
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+}
+
+static void cls_disable_receiver(struct jsm_channel *ch)
+{
+ u8 tmp = readb(&ch->ch_cls_uart->ier);
+
+ tmp &= ~(UART_IER_RDI);
+ writeb(tmp, &ch->ch_cls_uart->ier);
+}
+
+static void cls_enable_receiver(struct jsm_channel *ch)
+{
+ u8 tmp = readb(&ch->ch_cls_uart->ier);
+
+ tmp |= (UART_IER_RDI);
+ writeb(tmp, &ch->ch_cls_uart->ier);
+}
+
+/* Make the UART raise any of the output signals we want up */
+static void cls_assert_modem_signals(struct jsm_channel *ch)
+{
+ if (!ch)
+ return;
+
+ writeb(ch->ch_mostat, &ch->ch_cls_uart->mcr);
+}
+
+static void cls_copy_data_from_uart_to_queue(struct jsm_channel *ch)
+{
+ int qleft = 0;
+ u8 linestatus = 0;
+ u8 error_mask = 0;
+ u16 head;
+ u16 tail;
+ unsigned long flags;
+
+ if (!ch)
+ return;
+
+ spin_lock_irqsave(&ch->ch_lock, flags);
+
+ /* cache head and tail of queue */
+ head = ch->ch_r_head & RQUEUEMASK;
+ tail = ch->ch_r_tail & RQUEUEMASK;
+
+ /* Get our cached LSR */
+ linestatus = ch->ch_cached_lsr;
+ ch->ch_cached_lsr = 0;
+
+ /* Store how much space we have left in the queue */
+ qleft = tail - head - 1;
+ if (qleft < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * Create a mask to determine whether we should
+ * insert the character (if any) into our queue.
+ */
+ if (ch->ch_c_iflag & IGNBRK)
+ error_mask |= UART_LSR_BI;
+
+ while (1) {
+ /*
+ * Grab the linestatus register, we need to
+ * check to see if there is any data to read
+ */
+ linestatus = readb(&ch->ch_cls_uart->lsr);
+
+ /* Break out if there is no data to fetch */
+ if (!(linestatus & UART_LSR_DR))
+ break;
+
+ /*
+ * Discard character if we are ignoring the error mask
+ * which in this case is the break signal.
+ */
+ if (linestatus & error_mask) {
+ u8 discard;
+
+ linestatus = 0;
+ discard = readb(&ch->ch_cls_uart->txrx);
+ continue;
+ }
+
+ /*
+ * If our queue is full, we have no choice but to drop some
+ * data. The assumption is that HWFLOW or SWFLOW should have
+ * stopped things way way before we got to this point.
+ *
+ * I decided that I wanted to ditch the oldest data first,
+ * I hope thats okay with everyone? Yes? Good.
+ */
+ while (qleft < 1) {
+ tail = (tail + 1) & RQUEUEMASK;
+ ch->ch_r_tail = tail;
+ ch->ch_err_overrun++;
+ qleft++;
+ }
+
+ ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE
+ | UART_LSR_FE);
+ ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
+
+ qleft--;
+
+ if (ch->ch_equeue[head] & UART_LSR_PE)
+ ch->ch_err_parity++;
+ if (ch->ch_equeue[head] & UART_LSR_BI)
+ ch->ch_err_break++;
+ if (ch->ch_equeue[head] & UART_LSR_FE)
+ ch->ch_err_frame++;
+
+ /* Add to, and flip head if needed */
+ head = (head + 1) & RQUEUEMASK;
+ ch->ch_rxcount++;
+ }
+
+ /*
+ * Write new final heads to channel structure.
+ */
+ ch->ch_r_head = head & RQUEUEMASK;
+ ch->ch_e_head = head & EQUEUEMASK;
+
+ spin_unlock_irqrestore(&ch->ch_lock, flags);
+}
+
+static void cls_copy_data_from_queue_to_uart(struct jsm_channel *ch)
+{
+ u16 tail;
+ int n;
+ int qlen;
+ u32 len_written = 0;
+ struct circ_buf *circ;
+
+ if (!ch)
+ return;
+
+ circ = &ch->uart_port.state->xmit;
+
+ /* No data to write to the UART */
+ if (uart_circ_empty(circ))
+ return;
+
+ /* If port is "stopped", don't send any data to the UART */
+ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING))
+ return;
+
+ /* We have to do it this way, because of the EXAR TXFIFO count bug. */
+ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
+ return;
+
+ n = 32;
+
+ /* cache tail of queue */
+ tail = circ->tail & (UART_XMIT_SIZE - 1);
+ qlen = uart_circ_chars_pending(circ);
+
+ /* Find minimum of the FIFO space, versus queue length */
+ n = min(n, qlen);
+
+ while (n > 0) {
+ writeb(circ->buf[tail], &ch->ch_cls_uart->txrx);
+ tail = (tail + 1) & (UART_XMIT_SIZE - 1);
+ n--;
+ ch->ch_txcount++;
+ len_written++;
+ }
+
+ /* Update the final tail */
+ circ->tail = tail & (UART_XMIT_SIZE - 1);
+
+ if (len_written > ch->ch_t_tlevel)
+ ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+ if (uart_circ_empty(circ))
+ uart_write_wakeup(&ch->uart_port);
+}
+
+static void cls_parse_modem(struct jsm_channel *ch, u8 signals)
+{
+ u8 msignals = signals;
+
+ jsm_dbg(MSIGS, &ch->ch_bd->pci_dev,
+ "neo_parse_modem: port: %d msignals: %x\n",
+ ch->ch_portnum, msignals);
+
+ /*
+ * Scrub off lower bits.
+ * They signify delta's, which I don't care about
+ * Keep DDCD and DDSR though
+ */
+ msignals &= 0xf8;
+
+ if (msignals & UART_MSR_DDCD)
+ uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD);
+ if (msignals & UART_MSR_DDSR)
+ uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_CTS);
+
+ if (msignals & UART_MSR_DCD)
+ ch->ch_mistat |= UART_MSR_DCD;
+ else
+ ch->ch_mistat &= ~UART_MSR_DCD;
+
+ if (msignals & UART_MSR_DSR)
+ ch->ch_mistat |= UART_MSR_DSR;
+ else
+ ch->ch_mistat &= ~UART_MSR_DSR;
+
+ if (msignals & UART_MSR_RI)
+ ch->ch_mistat |= UART_MSR_RI;
+ else
+ ch->ch_mistat &= ~UART_MSR_RI;
+
+ if (msignals & UART_MSR_CTS)
+ ch->ch_mistat |= UART_MSR_CTS;
+ else
+ ch->ch_mistat &= ~UART_MSR_CTS;
+
+ jsm_dbg(MSIGS, &ch->ch_bd->pci_dev,
+ "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
+ ch->ch_portnum,
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD));
+}
+
+/* Parse the ISR register for the specific port */
+static inline void cls_parse_isr(struct jsm_board *brd, uint port)
+{
+ struct jsm_channel *ch;
+ u8 isr = 0;
+ unsigned long flags;
+
+ /*
+ * No need to verify board pointer, it was already
+ * verified in the interrupt routine.
+ */
+
+ if (port > brd->nasync)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch)
+ return;
+
+ /* Here we try to figure out what caused the interrupt to happen */
+ while (1) {
+ isr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Bail if no pending interrupt on port */
+ if (isr & UART_IIR_NO_INT)
+ break;
+
+ /* Receive Interrupt pending */
+ if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) {
+ /* Read data from uart -> queue */
+ cls_copy_data_from_uart_to_queue(ch);
+ jsm_check_queue_flow_control(ch);
+ }
+
+ /* Transmit Hold register empty pending */
+ if (isr & UART_IIR_THRI) {
+ /* Transfer data (if any) from Write Queue -> UART. */
+ spin_lock_irqsave(&ch->ch_lock, flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ spin_unlock_irqrestore(&ch->ch_lock, flags);
+ cls_copy_data_from_queue_to_uart(ch);
+ }
+
+ /*
+ * CTS/RTS change of state:
+ * Don't need to do anything, the cls_parse_modem
+ * below will grab the updated modem signals.
+ */
+
+ /* Parse any modem signal changes */
+ cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
+ }
+}
+
+/* Channel lock MUST be held before calling this function! */
+static void cls_flush_uart_write(struct jsm_channel *ch)
+{
+ u8 tmp = 0;
+ u8 i = 0;
+
+ if (!ch)
+ return;
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
+ &ch->ch_cls_uart->isr_fcr);
+
+ for (i = 0; i < 10; i++) {
+ /* Check to see if the UART feels it completely flushed FIFO */
+ tmp = readb(&ch->ch_cls_uart->isr_fcr);
+ if (tmp & UART_FCR_CLEAR_XMIT) {
+ jsm_dbg(IOCTL, &ch->ch_bd->pci_dev,
+ "Still flushing TX UART... i: %d\n", i);
+ udelay(10);
+ } else
+ break;
+ }
+
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+}
+
+/* Channel lock MUST be held before calling this function! */
+static void cls_flush_uart_read(struct jsm_channel *ch)
+{
+ if (!ch)
+ return;
+
+ /*
+ * For complete POSIX compatibility, we should be purging the
+ * read FIFO in the UART here.
+ *
+ * However, clearing the read FIFO (UART_FCR_CLEAR_RCVR) also
+ * incorrectly flushes write data as well as just basically trashing the
+ * FIFO.
+ *
+ * Presumably, this is a bug in this UART.
+ */
+
+ udelay(10);
+}
+
+static void cls_send_start_character(struct jsm_channel *ch)
+{
+ if (!ch)
+ return;
+
+ if (ch->ch_startc != __DISABLED_CHAR) {
+ ch->ch_xon_sends++;
+ writeb(ch->ch_startc, &ch->ch_cls_uart->txrx);
+ }
+}
+
+static void cls_send_stop_character(struct jsm_channel *ch)
+{
+ if (!ch)
+ return;
+
+ if (ch->ch_stopc != __DISABLED_CHAR) {
+ ch->ch_xoff_sends++;
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx);
+ }
+}
+
+/*
+ * cls_param()
+ * Send any/all changes to the line to the UART.
+ */
+static void cls_param(struct jsm_channel *ch)
+{
+ u8 lcr = 0;
+ u8 uart_lcr = 0;
+ u8 ier = 0;
+ u32 baud = 9600;
+ int quot = 0;
+ struct jsm_board *bd;
+ int i;
+ unsigned int cflag;
+
+ bd = ch->ch_bd;
+ if (!bd)
+ return;
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ ch->ch_r_head = 0;
+ ch->ch_r_tail = 0;
+ ch->ch_e_head = 0;
+ ch->ch_e_tail = 0;
+
+ cls_flush_uart_write(ch);
+ cls_flush_uart_read(ch);
+
+ /* The baudrate is B0 so all modem lines are to be dropped. */
+ ch->ch_flags |= (CH_BAUD0);
+ ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
+ cls_assert_modem_signals(ch);
+ return;
+ }
+
+ cflag = C_BAUD(ch->uart_port.state->port.tty);
+ baud = 9600;
+ for (i = 0; i < ARRAY_SIZE(baud_rates); i++) {
+ if (baud_rates[i].cflag == cflag) {
+ baud = baud_rates[i].rate;
+ break;
+ }
+ }
+
+ if (ch->ch_flags & CH_BAUD0)
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ if (ch->ch_c_cflag & PARENB)
+ lcr |= UART_LCR_PARITY;
+
+ if (!(ch->ch_c_cflag & PARODD))
+ lcr |= UART_LCR_EPAR;
+
+ /*
+ * Not all platforms support mark/space parity,
+ * so this will hide behind an ifdef.
+ */
+#ifdef CMSPAR
+ if (ch->ch_c_cflag & CMSPAR)
+ lcr |= UART_LCR_SPAR;
+#endif
+
+ if (ch->ch_c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+
+ switch (ch->ch_c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ case CS8:
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ ier = readb(&ch->ch_cls_uart->ier);
+ uart_lcr = readb(&ch->ch_cls_uart->lcr);
+
+ quot = ch->ch_bd->bd_dividend / baud;
+
+ if (quot != 0) {
+ writeb(UART_LCR_DLAB, &ch->ch_cls_uart->lcr);
+ writeb((quot & 0xff), &ch->ch_cls_uart->txrx);
+ writeb((quot >> 8), &ch->ch_cls_uart->ier);
+ writeb(lcr, &ch->ch_cls_uart->lcr);
+ }
+
+ if (uart_lcr != lcr)
+ writeb(lcr, &ch->ch_cls_uart->lcr);
+
+ if (ch->ch_c_cflag & CREAD)
+ ier |= (UART_IER_RDI | UART_IER_RLSI);
+
+ ier |= (UART_IER_THRI | UART_IER_MSI);
+
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ if (ch->ch_c_cflag & CRTSCTS)
+ cls_set_cts_flow_control(ch);
+ else if (ch->ch_c_iflag & IXON) {
+ /*
+ * If start/stop is set to disable,
+ * then we should disable flow control.
+ */
+ if ((ch->ch_startc == __DISABLED_CHAR) ||
+ (ch->ch_stopc == __DISABLED_CHAR))
+ cls_set_no_output_flow_control(ch);
+ else
+ cls_set_ixon_flow_control(ch);
+ } else
+ cls_set_no_output_flow_control(ch);
+
+ if (ch->ch_c_cflag & CRTSCTS)
+ cls_set_rts_flow_control(ch);
+ else if (ch->ch_c_iflag & IXOFF) {
+ /*
+ * If start/stop is set to disable,
+ * then we should disable flow control.
+ */
+ if ((ch->ch_startc == __DISABLED_CHAR) ||
+ (ch->ch_stopc == __DISABLED_CHAR))
+ cls_set_no_input_flow_control(ch);
+ else
+ cls_set_ixoff_flow_control(ch);
+ } else
+ cls_set_no_input_flow_control(ch);
+
+ cls_assert_modem_signals(ch);
+
+ /* get current status of the modem signals now */
+ cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
+}
+
+/*
+ * cls_intr()
+ *
+ * Classic specific interrupt handler.
+ */
+static irqreturn_t cls_intr(int irq, void *voidbrd)
+{
+ struct jsm_board *brd = voidbrd;
+ unsigned long lock_flags;
+ unsigned char uart_poll;
+ uint i = 0;
+
+ /* Lock out the slow poller from running on this board. */
+ spin_lock_irqsave(&brd->bd_intr_lock, lock_flags);
+
+ /*
+ * Check the board's global interrupt offset to see if we
+ * acctually do have an interrupt pending on us.
+ */
+ uart_poll = readb(brd->re_map_membase + UART_CLASSIC_POLL_ADDR_OFFSET);
+
+ jsm_dbg(INTR, &brd->pci_dev, "%s:%d uart_poll: %x\n",
+ __FILE__, __LINE__, uart_poll);
+
+ if (!uart_poll) {
+ jsm_dbg(INTR, &brd->pci_dev,
+ "Kernel interrupted to me, but no pending interrupts...\n");
+ spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags);
+ return IRQ_NONE;
+ }
+
+ /* At this point, we have at least SOMETHING to service, dig further. */
+
+ /* Parse each port to find out what caused the interrupt */
+ for (i = 0; i < brd->nasync; i++)
+ cls_parse_isr(brd, i);
+
+ spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags);
+
+ return IRQ_HANDLED;
+}
+
+/* Inits UART */
+static void cls_uart_init(struct jsm_channel *ch)
+{
+ unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
+ unsigned char isr_fcr = 0;
+
+ writeb(0, &ch->ch_cls_uart->ier);
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on Enhanced/Extended controls */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Clear out UART and FIFO */
+ readb(&ch->ch_cls_uart->txrx);
+
+ writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT),
+ &ch->ch_cls_uart->isr_fcr);
+ udelay(10);
+
+ ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+ readb(&ch->ch_cls_uart->lsr);
+ readb(&ch->ch_cls_uart->msr);
+}
+
+/*
+ * Turns off UART.
+ */
+static void cls_uart_off(struct jsm_channel *ch)
+{
+ /* Stop all interrupts from accurring. */
+ writeb(0, &ch->ch_cls_uart->ier);
+}
+
+/*
+ * cls_get_uarts_bytes_left.
+ * Returns 0 is nothing left in the FIFO, returns 1 otherwise.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static u32 cls_get_uart_bytes_left(struct jsm_channel *ch)
+{
+ u8 left = 0;
+ u8 lsr = readb(&ch->ch_cls_uart->lsr);
+
+ /* Determine whether the Transmitter is empty or not */
+ if (!(lsr & UART_LSR_TEMT))
+ left = 1;
+ else {
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ left = 0;
+ }
+
+ return left;
+}
+
+/*
+ * cls_send_break.
+ * Starts sending a break thru the UART.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void cls_send_break(struct jsm_channel *ch)
+{
+ /* Tell the UART to start sending the break */
+ if (!(ch->ch_flags & CH_BREAK_SENDING)) {
+ u8 temp = readb(&ch->ch_cls_uart->lcr);
+
+ writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ ch->ch_flags |= (CH_BREAK_SENDING);
+ }
+}
+
+/*
+ * cls_send_immediate_char.
+ * Sends a specific character as soon as possible to the UART,
+ * jumping over any bytes that might be in the write queue.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void cls_send_immediate_char(struct jsm_channel *ch, unsigned char c)
+{
+ writeb(c, &ch->ch_cls_uart->txrx);
+}
+
+struct board_ops jsm_cls_ops = {
+ .intr = cls_intr,
+ .uart_init = cls_uart_init,
+ .uart_off = cls_uart_off,
+ .param = cls_param,
+ .assert_modem_signals = cls_assert_modem_signals,
+ .flush_uart_write = cls_flush_uart_write,
+ .flush_uart_read = cls_flush_uart_read,
+ .disable_receiver = cls_disable_receiver,
+ .enable_receiver = cls_enable_receiver,
+ .send_break = cls_send_break,
+ .clear_break = cls_clear_break,
+ .send_start_character = cls_send_start_character,
+ .send_stop_character = cls_send_stop_character,
+ .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart,
+ .get_uart_bytes_left = cls_get_uart_bytes_left,
+ .send_immediate_char = cls_send_immediate_char
+};
+
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index d2885a7bb090..efbd87a76656 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -13,11 +13,6 @@
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
@@ -31,8 +26,7 @@
#include "jsm.h"
MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International "
- "Neo PCI based product line");
+MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("jsm");
@@ -50,7 +44,7 @@ struct uart_driver jsm_uart_driver = {
};
static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state);
+ pci_channel_state_t state);
static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev);
static void jsm_io_resume(struct pci_dev *pdev);
@@ -68,7 +62,7 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc = 0;
struct jsm_board *brd;
- static int adapter_count = 0;
+ static int adapter_count;
rc = pci_enable_device(pdev);
if (rc) {
@@ -82,10 +76,8 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
- brd = kzalloc(sizeof(struct jsm_board), GFP_KERNEL);
+ brd = kzalloc(sizeof(*brd), GFP_KERNEL);
if (!brd) {
- dev_err(&pdev->dev,
- "memory allocation for board structure failed\n");
rc = -ENOMEM;
goto out_release_regions;
}
@@ -95,7 +87,6 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
brd->pci_dev = pdev;
switch (pdev->device) {
-
case PCI_DEVICE_ID_NEO_2DB9:
case PCI_DEVICE_ID_NEO_2DB9PRI:
case PCI_DEVICE_ID_NEO_2RJ45:
@@ -104,6 +95,8 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
brd->maxports = 2;
break;
+ case PCI_DEVICE_ID_CLASSIC_4:
+ case PCI_DEVICE_ID_CLASSIC_4_422:
case PCI_DEVICE_ID_NEO_4:
case PCIE_DEVICE_ID_NEO_4:
case PCIE_DEVICE_ID_NEO_4RJ45:
@@ -111,6 +104,8 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
brd->maxports = 4;
break;
+ case PCI_DEVICE_ID_CLASSIC_8:
+ case PCI_DEVICE_ID_CLASSIC_8_422:
case PCI_DEVICE_ID_DIGI_NEO_8:
case PCIE_DEVICE_ID_NEO_8:
case PCIE_DEVICE_ID_NEO_8RJ45:
@@ -129,36 +124,109 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
brd->irq = pdev->irq;
- jsm_dbg(INIT, &brd->pci_dev, "jsm_found_board - NEO adapter\n");
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_CLASSIC_4:
+ case PCI_DEVICE_ID_CLASSIC_4_422:
+ case PCI_DEVICE_ID_CLASSIC_8:
+ case PCI_DEVICE_ID_CLASSIC_8_422:
+
+ jsm_dbg(INIT, &brd->pci_dev,
+ "jsm_found_board - Classic adapter\n");
+
+ /*
+ * For PCI ClassicBoards
+ * PCI Local Address (.i.e. "resource" number) space
+ * 0 PLX Memory Mapped Config
+ * 1 PLX I/O Mapped Config
+ * 2 I/O Mapped UARTs and Status
+ * 3 Memory Mapped VPD
+ * 4 Memory Mapped UARTs and Status
+ */
+
+ /* Get the PCI Base Address Registers */
+ brd->membase = pci_resource_start(pdev, 4);
+ brd->membase_end = pci_resource_end(pdev, 4);
+
+ if (brd->membase & 0x1)
+ brd->membase &= ~0x3;
+ else
+ brd->membase &= ~0xF;
+
+ brd->iobase = pci_resource_start(pdev, 1);
+ brd->iobase_end = pci_resource_end(pdev, 1);
+ brd->iobase = ((unsigned int)(brd->iobase)) & 0xFFFE;
+
+ /* Assign the board_ops struct */
+ brd->bd_ops = &jsm_cls_ops;
+
+ brd->bd_uart_offset = 0x8;
+ brd->bd_dividend = 921600;
+
+ brd->re_map_membase = ioremap(brd->membase,
+ pci_resource_len(pdev, 4));
+ if (!brd->re_map_membase) {
+ dev_err(&pdev->dev,
+ "Card has no PCI Memory resources, failing board.\n");
+ rc = -ENOMEM;
+ goto out_kfree_brd;
+ }
+
+ /*
+ * Enable Local Interrupt 1 (0x1),
+ * Local Interrupt 1 Polarity Active high (0x2),
+ * Enable PCI interrupt (0x43)
+ */
+ outb(0x43, brd->iobase + 0x4c);
+
+ break;
+
+ case PCI_DEVICE_ID_NEO_2DB9:
+ case PCI_DEVICE_ID_NEO_2DB9PRI:
+ case PCI_DEVICE_ID_NEO_2RJ45:
+ case PCI_DEVICE_ID_NEO_2RJ45PRI:
+ case PCI_DEVICE_ID_NEO_2_422_485:
+ case PCI_DEVICE_ID_NEO_4:
+ case PCIE_DEVICE_ID_NEO_4:
+ case PCIE_DEVICE_ID_NEO_4RJ45:
+ case PCIE_DEVICE_ID_NEO_4_IBM:
+ case PCI_DEVICE_ID_DIGI_NEO_8:
+ case PCIE_DEVICE_ID_NEO_8:
+ case PCIE_DEVICE_ID_NEO_8RJ45:
+
+ jsm_dbg(INIT, &brd->pci_dev, "jsm_found_board - NEO adapter\n");
- /* get the PCI Base Address Registers */
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
+ /* get the PCI Base Address Registers */
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
+ if (brd->membase & 1)
+ brd->membase &= ~0x3;
+ else
+ brd->membase &= ~0xF;
- /* Assign the board_ops struct */
- brd->bd_ops = &jsm_neo_ops;
+ /* Assign the board_ops struct */
+ brd->bd_ops = &jsm_neo_ops;
- brd->bd_uart_offset = 0x200;
- brd->bd_dividend = 921600;
+ brd->bd_uart_offset = 0x200;
+ brd->bd_dividend = 921600;
- brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0));
- if (!brd->re_map_membase) {
- dev_err(&pdev->dev,
- "card has no PCI Memory resources, "
- "failing board.\n");
- rc = -ENOMEM;
- goto out_kfree_brd;
+ brd->re_map_membase = ioremap(brd->membase,
+ pci_resource_len(pdev, 0));
+ if (!brd->re_map_membase) {
+ dev_err(&pdev->dev,
+ "Card has no PCI Memory resources, failing board.\n");
+ rc = -ENOMEM;
+ goto out_kfree_brd;
+ }
+
+ break;
+ default:
+ return -ENXIO;
}
- rc = request_irq(brd->irq, brd->bd_ops->intr,
- IRQF_SHARED, "JSM", brd);
+ rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd);
if (rc) {
- printk(KERN_WARNING "Failed to hook IRQ %d\n",brd->irq);
+ dev_warn(&pdev->dev, "Failed to hook IRQ %d\n", brd->irq);
goto out_iounmap;
}
@@ -178,7 +246,7 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Log the information about the board */
- dev_info(&pdev->dev, "board %d: Digi Neo (rev %d), irq %d\n",
+ dev_info(&pdev->dev, "board %d: Digi Classic/Neo (rev %d), irq %d\n",
adapter_count, brd->rev, brd->irq);
pci_set_drvdata(pdev, brd);
@@ -205,6 +273,18 @@ static void jsm_remove_one(struct pci_dev *pdev)
struct jsm_board *brd = pci_get_drvdata(pdev);
int i = 0;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_CLASSIC_4:
+ case PCI_DEVICE_ID_CLASSIC_4_422:
+ case PCI_DEVICE_ID_CLASSIC_8:
+ case PCI_DEVICE_ID_CLASSIC_8_422:
+ /* Tell card not to interrupt anymore. */
+ outb(0x0, brd->iobase + 0x4c);
+ break;
+ default:
+ break;
+ }
+
jsm_remove_uart_port(brd);
free_irq(brd->irq, brd);
@@ -239,6 +319,10 @@ static struct pci_device_id jsm_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4), 0, 0, 11 },
{ PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4RJ45), 0, 0, 12 },
{ PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_8RJ45), 0, 0, 13 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_CLASSIC_4), 0, 0, 14 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_CLASSIC_4_422), 0, 0, 15 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_CLASSIC_8), 0, 0, 16 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_CLASSIC_8_422), 0, 0, 17 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, jsm_pci_tbl);
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index dfaf48826417..7291c2117daa 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -13,11 +13,6 @@
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
@@ -649,7 +644,7 @@ static void neo_flush_uart_write(struct jsm_channel *ch)
/* Check to see if the UART feels it completely flushed the FIFO. */
tmp = readb(&ch->ch_neo_uart->isr_fcr);
- if (tmp & 4) {
+ if (tmp & UART_FCR_CLEAR_XMIT) {
jsm_dbg(IOCTL, &ch->ch_bd->pci_dev,
"Still flushing TX UART... i: %d\n", i);
udelay(10);
@@ -694,7 +689,7 @@ static void neo_flush_uart_read(struct jsm_channel *ch)
/*
* No locks are assumed to be held when calling this function.
*/
-static void neo_clear_break(struct jsm_channel *ch, int force)
+static void neo_clear_break(struct jsm_channel *ch)
{
unsigned long lock_flags;
@@ -1024,27 +1019,24 @@ static void neo_param(struct jsm_channel *ch)
lcr |= UART_LCR_STOP;
switch (ch->ch_c_cflag & CSIZE) {
- case CS5:
- lcr |= UART_LCR_WLEN5;
- break;
- case CS6:
- lcr |= UART_LCR_WLEN6;
- break;
- case CS7:
- lcr |= UART_LCR_WLEN7;
- break;
- case CS8:
- default:
- lcr |= UART_LCR_WLEN8;
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ case CS8:
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
}
ier = readb(&ch->ch_neo_uart->ier);
uart_lcr = readb(&ch->ch_neo_uart->lcr);
- if (baud == 0)
- baud = 9600;
-
quot = ch->ch_bd->bd_dividend / baud;
if (quot != 0) {
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 3e5c1563afe2..524e86ab3cae 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -13,11 +13,6 @@
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Ananda Venkatarman <mansarov@us.ibm.com>
@@ -77,7 +72,8 @@ static unsigned int jsm_tty_tx_empty(struct uart_port *port)
static unsigned int jsm_tty_get_mctrl(struct uart_port *port)
{
int result;
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
@@ -98,7 +94,8 @@ static unsigned int jsm_tty_get_mctrl(struct uart_port *port)
*/
static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
@@ -133,7 +130,8 @@ static void jsm_tty_write(struct uart_port *port)
static void jsm_tty_start_tx(struct uart_port *port)
{
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
@@ -145,7 +143,8 @@ static void jsm_tty_start_tx(struct uart_port *port)
static void jsm_tty_stop_tx(struct uart_port *port)
{
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n");
@@ -157,7 +156,8 @@ static void jsm_tty_stop_tx(struct uart_port *port)
static void jsm_tty_send_xchar(struct uart_port *port, char ch)
{
unsigned long lock_flags;
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
struct ktermios *termios;
spin_lock_irqsave(&port->lock, lock_flags);
@@ -172,7 +172,8 @@ static void jsm_tty_send_xchar(struct uart_port *port, char ch)
static void jsm_tty_stop_rx(struct uart_port *port)
{
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
channel->ch_bd->bd_ops->disable_receiver(channel);
}
@@ -180,13 +181,14 @@ static void jsm_tty_stop_rx(struct uart_port *port)
static void jsm_tty_break(struct uart_port *port, int break_state)
{
unsigned long lock_flags;
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
spin_lock_irqsave(&port->lock, lock_flags);
if (break_state == -1)
channel->ch_bd->bd_ops->send_break(channel);
else
- channel->ch_bd->bd_ops->clear_break(channel, 0);
+ channel->ch_bd->bd_ops->clear_break(channel);
spin_unlock_irqrestore(&port->lock, lock_flags);
}
@@ -194,7 +196,8 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
static int jsm_tty_open(struct uart_port *port)
{
struct jsm_board *brd;
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
struct ktermios *termios;
/* Get board pointer from our array of majors we have allocated */
@@ -273,7 +276,8 @@ static void jsm_tty_close(struct uart_port *port)
{
struct jsm_board *bd;
struct ktermios *ts;
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "start\n");
@@ -307,7 +311,8 @@ static void jsm_tty_set_termios(struct uart_port *port,
struct ktermios *old_termios)
{
unsigned long lock_flags;
- struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
spin_lock_irqsave(&port->lock, lock_flags);
channel->ch_c_cflag = termios->c_cflag;
@@ -415,6 +420,8 @@ int jsm_tty_init(struct jsm_board *brd)
if (brd->bd_uart_offset == 0x200)
ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
+ else
+ ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
ch->ch_bd = brd;
ch->ch_portnum = i;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 4675fe198d31..4ccc0397664c 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -497,8 +497,10 @@ lqasc_type(struct uart_port *port)
static void
lqasc_release_port(struct uart_port *port)
{
+ struct platform_device *pdev = to_platform_device(port->dev);
+
if (port->flags & UPF_IOREMAP) {
- iounmap(port->membase);
+ devm_iounmap(&pdev->dev, port->membase);
port->membase = NULL;
}
}
@@ -743,7 +745,6 @@ MODULE_DEVICE_TABLE(of, ltq_asc_match);
static struct platform_driver lqasc_driver = {
.driver = {
.name = DRVNAME,
- .owner = THIS_MODULE,
.of_match_table = ltq_asc_match,
},
};
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index 6f0f89282847..e92d7ebe9e77 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -768,7 +768,6 @@ static struct platform_driver serial_hs_lpc32xx_driver = {
.resume = serial_hs_lpc32xx_resume,
.driver = {
.name = MODNAME,
- .owner = THIS_MODULE,
.of_match_table = serial_hs_lpc32xx_dt_ids,
},
};
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 5702828fb62e..8f7f83a14c93 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -249,7 +249,8 @@ static void serial_out(struct uart_sio_port *up, int offset, int value)
static void m32r_sio_stop_tx(struct uart_port *port)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
@@ -260,7 +261,8 @@ static void m32r_sio_stop_tx(struct uart_port *port)
static void m32r_sio_start_tx(struct uart_port *port)
{
#ifdef CONFIG_SERIAL_M32R_PLDSIO
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
struct circ_buf *xmit = &up->port.state->xmit;
if (!(up->ier & UART_IER_THRI)) {
@@ -274,7 +276,8 @@ static void m32r_sio_start_tx(struct uart_port *port)
}
while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY);
#else
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
@@ -285,7 +288,8 @@ static void m32r_sio_start_tx(struct uart_port *port)
static void m32r_sio_stop_rx(struct uart_port *port)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
@@ -294,7 +298,8 @@ static void m32r_sio_stop_rx(struct uart_port *port)
static void m32r_sio_enable_ms(struct uart_port *port)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
@@ -581,7 +586,8 @@ static void m32r_sio_timeout(unsigned long data)
static unsigned int m32r_sio_tx_empty(struct uart_port *port)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
unsigned long flags;
unsigned int ret;
@@ -609,7 +615,8 @@ static void m32r_sio_break_ctl(struct uart_port *port, int break_state)
static int m32r_sio_startup(struct uart_port *port)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
int retval;
sio_init();
@@ -652,7 +659,8 @@ static int m32r_sio_startup(struct uart_port *port)
static void m32r_sio_shutdown(struct uart_port *port)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
/*
* Disable interrupts from this port
@@ -681,7 +689,8 @@ static unsigned int m32r_sio_get_divisor(struct uart_port *port,
static void m32r_sio_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
unsigned char cval = 0;
unsigned long flags;
unsigned int baud, quot;
@@ -780,7 +789,8 @@ static void m32r_sio_set_termios(struct uart_port *port,
static void m32r_sio_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
if (up->pm)
up->pm(port, state, oldstate);
@@ -825,7 +835,8 @@ m32r_sio_request_std_resource(struct uart_sio_port *up, struct resource **res)
static void m32r_sio_release_port(struct uart_port *port)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
unsigned long start, offset = 0, size = 0;
size <<= up->port.regshift;
@@ -862,7 +873,8 @@ static void m32r_sio_release_port(struct uart_port *port)
static int m32r_sio_request_port(struct uart_port *port)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
struct resource *res = NULL;
int ret = 0;
@@ -889,7 +901,8 @@ static int m32r_sio_request_port(struct uart_port *port)
static void m32r_sio_config_port(struct uart_port *port, int unused)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
@@ -1000,7 +1013,8 @@ static inline void wait_for_xmitr(struct uart_sio_port *up)
static void m32r_sio_console_putchar(struct uart_port *port, int ch)
{
- struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct uart_sio_port *up =
+ container_of(port, struct uart_sio_port, port);
wait_for_xmitr(up);
sio_out(up, SIOTXB, ch);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 0041a64cc86e..182549f55904 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -346,10 +346,13 @@ static int max3109_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_read(s->regmap, MAX310X_REVID_REG, &val);
+ ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
+ MAX310X_EXTREG_ENBL);
if (ret)
return ret;
+ regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
+ regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -874,55 +877,37 @@ static void max310x_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
}
-static int max310x_ioctl(struct uart_port *port, unsigned int cmd,
- unsigned long arg)
+static int max310x_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
{
-#if defined(TIOCSRS485) && defined(TIOCGRS485)
- struct serial_rs485 rs485;
unsigned int val;
- switch (cmd) {
- case TIOCSRS485:
- if (copy_from_user(&rs485, (void __user *)arg, sizeof(rs485)))
- return -EFAULT;
- if (rs485.delay_rts_before_send > 0x0f ||
- rs485.delay_rts_after_send > 0x0f)
- return -ERANGE;
- val = (rs485.delay_rts_before_send << 4) |
- rs485.delay_rts_after_send;
- max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val);
- if (rs485.flags & SER_RS485_ENABLED) {
- max310x_port_update(port, MAX310X_MODE1_REG,
- MAX310X_MODE1_TRNSCVCTRL_BIT,
- MAX310X_MODE1_TRNSCVCTRL_BIT);
- max310x_port_update(port, MAX310X_MODE2_REG,
- MAX310X_MODE2_ECHOSUPR_BIT,
- MAX310X_MODE2_ECHOSUPR_BIT);
- } else {
- max310x_port_update(port, MAX310X_MODE1_REG,
- MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
- max310x_port_update(port, MAX310X_MODE2_REG,
- MAX310X_MODE2_ECHOSUPR_BIT, 0);
- }
- return 0;
- case TIOCGRS485:
- memset(&rs485, 0, sizeof(rs485));
- val = max310x_port_read(port, MAX310X_MODE1_REG);
- rs485.flags = (val & MAX310X_MODE1_TRNSCVCTRL_BIT) ?
- SER_RS485_ENABLED : 0;
- rs485.flags |= SER_RS485_RTS_ON_SEND;
- val = max310x_port_read(port, MAX310X_HDPIXDELAY_REG);
- rs485.delay_rts_before_send = val >> 4;
- rs485.delay_rts_after_send = val & 0x0f;
- if (copy_to_user((void __user *)arg, &rs485, sizeof(rs485)))
- return -EFAULT;
- return 0;
- default:
- break;
+ if (rs485->delay_rts_before_send > 0x0f ||
+ rs485->delay_rts_after_send > 0x0f)
+ return -ERANGE;
+
+ val = (rs485->delay_rts_before_send << 4) |
+ rs485->delay_rts_after_send;
+ max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val);
+ if (rs485->flags & SER_RS485_ENABLED) {
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TRNSCVCTRL_BIT,
+ MAX310X_MODE1_TRNSCVCTRL_BIT);
+ max310x_port_update(port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_ECHOSUPR_BIT,
+ MAX310X_MODE2_ECHOSUPR_BIT);
+ } else {
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
+ max310x_port_update(port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_ECHOSUPR_BIT, 0);
}
-#endif
- return -ENOIOCTLCMD;
+ rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_ENABLED;
+ memset(rs485->padding, 0, sizeof(rs485->padding));
+ port->rs485 = *rs485;
+
+ return 0;
}
static int max310x_startup(struct uart_port *port)
@@ -1017,7 +1002,6 @@ static const struct uart_ops max310x_ops = {
.release_port = max310x_null_void,
.config_port = max310x_config_port,
.verify_port = max310x_verify_port,
- .ioctl = max310x_ioctl,
};
static int __maybe_unused max310x_suspend(struct device *dev)
@@ -1218,6 +1202,7 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
s->p[i].port.iobase = i * 0x20;
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
+ s->p[i].port.rs485_config = max310x_rs485_config;
s->p[i].port.ops = &max310x_ops;
/* Disable all interrupts */
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index bc896dc7d2ed..10496672dfdb 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -57,7 +57,6 @@ struct mcf_uart {
struct uart_port port;
unsigned int sigs; /* Local copy of line sigs */
unsigned char imr; /* Local IMR mirror */
- struct serial_rs485 rs485; /* RS485 settings */
};
/****************************************************************************/
@@ -104,7 +103,7 @@ static void mcf_start_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
- if (pp->rs485.flags & SER_RS485_ENABLED) {
+ if (port->rs485.flags & SER_RS485_ENABLED) {
/* Enable Transmitter */
writeb(MCFUART_UCR_TXENABLE, port->membase + MCFUART_UCR);
/* Manually assert RTS */
@@ -258,12 +257,12 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
mr2 |= MCFUART_MR2_TXCTS;
}
- if (pp->rs485.flags & SER_RS485_ENABLED) {
+ spin_lock_irqsave(&port->lock, flags);
+ if (port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
mr2 |= MCFUART_MR2_TXRTS;
}
- spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
@@ -360,7 +359,7 @@ static void mcf_tx_chars(struct mcf_uart *pp)
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
/* Disable TX to negate RTS automatically */
- if (pp->rs485.flags & SER_RS485_ENABLED)
+ if (port->rs485.flags & SER_RS485_ENABLED)
writeb(MCFUART_UCR_TXDISABLE,
port->membase + MCFUART_UCR);
}
@@ -440,13 +439,11 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/****************************************************************************/
/* Enable or disable the RS485 support */
-static void mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
- unsigned long flags;
unsigned char mr1, mr2;
- spin_lock_irqsave(&port->lock, flags);
/* Get mode registers */
mr1 = readb(port->membase + MCFUART_UMR);
mr2 = readb(port->membase + MCFUART_UMR);
@@ -460,32 +457,8 @@ static void mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
}
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
- pp->rs485 = *rs485;
- spin_unlock_irqrestore(&port->lock, flags);
-}
+ port->rs485 = *rs485;
-static int mcf_ioctl(struct uart_port *port, unsigned int cmd,
- unsigned long arg)
-{
- switch (cmd) {
- case TIOCSRS485: {
- struct serial_rs485 rs485;
- if (copy_from_user(&rs485, (struct serial_rs485 *)arg,
- sizeof(struct serial_rs485)))
- return -EFAULT;
- mcf_config_rs485(port, &rs485);
- break;
- }
- case TIOCGRS485: {
- struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
- if (copy_to_user((struct serial_rs485 *)arg, &pp->rs485,
- sizeof(struct serial_rs485)))
- return -EFAULT;
- break;
- }
- default:
- return -ENOIOCTLCMD;
- }
return 0;
}
@@ -510,7 +483,6 @@ static const struct uart_ops mcf_uart_ops = {
.release_port = mcf_release_port,
.config_port = mcf_config_port,
.verify_port = mcf_verify_port,
- .ioctl = mcf_ioctl,
};
static struct mcf_uart mcf_ports[4];
@@ -538,6 +510,7 @@ int __init early_mcf_setup(struct mcf_platform_uart *platp)
port->irq = platp[i].irq;
port->uartclk = MCF_BUSCLK;
port->flags = UPF_BOOT_AUTOCONF;
+ port->rs485_config = mcf_config_rs485;
port->ops = &mcf_uart_ops;
}
@@ -663,6 +636,7 @@ static int mcf_probe(struct platform_device *pdev)
port->uartclk = MCF_BUSCLK;
port->ops = &mcf_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
+ port->rs485_config = mcf_config_rs485;
uart_add_one_port(&mcf_driver, port);
}
@@ -693,7 +667,6 @@ static struct platform_driver mcf_platform_driver = {
.remove = mcf_remove,
.driver = {
.name = "mcfuart",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 30e9e60bc5cd..517cd073dc08 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -809,6 +809,7 @@ static void men_z135_remove(struct mcb_device *mdev)
static const struct mcb_device_id men_z135_ids[] = {
{ .device = 0x87 },
+ { }
};
MODULE_DEVICE_TABLE(mcb, men_z135_ids);
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 15c749753317..67c036702629 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -599,7 +599,6 @@ static struct platform_driver meson_uart_platform_driver = {
.probe = meson_uart_probe,
.remove = meson_uart_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "meson_uart",
.of_match_table = meson_uart_dt_match,
},
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 4a5c956f4746..8fe4501d7565 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -1368,7 +1368,7 @@ static void hsu_global_init(void)
hsu->iolen = 0x1000;
if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
- pr_warning("HSU: error in request mem region\n");
+ pr_warn("HSU: error in request mem region\n");
hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
if (!hsu->reg) {
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index a5f4e3648b15..3308ef243dc3 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1889,7 +1889,6 @@ static struct platform_driver mpc52xx_uart_of_driver = {
#endif
.driver = {
.name = "mpc52xx-psc-uart",
- .owner = THIS_MODULE,
.of_match_table = mpc52xx_uart_of_match,
},
};
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index ae49856ef6c7..856fd5a5fa3c 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -1246,7 +1246,8 @@ static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
*/
static uint mpsc_tx_empty(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
ulong iflags;
uint rc;
@@ -1264,7 +1265,8 @@ static void mpsc_set_mctrl(struct uart_port *port, uint mctrl)
static uint mpsc_get_mctrl(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
u32 mflags, status;
status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m
@@ -1281,7 +1283,8 @@ static uint mpsc_get_mctrl(struct uart_port *port)
static void mpsc_stop_tx(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
pr_debug("mpsc_stop_tx[%d]\n", port->line);
@@ -1290,7 +1293,8 @@ static void mpsc_stop_tx(struct uart_port *port)
static void mpsc_start_tx(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
unsigned long iflags;
spin_lock_irqsave(&pi->tx_lock, iflags);
@@ -1316,7 +1320,8 @@ static void mpsc_start_rx(struct mpsc_port_info *pi)
static void mpsc_stop_rx(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
@@ -1338,7 +1343,8 @@ static void mpsc_stop_rx(struct uart_port *port)
static void mpsc_break_ctl(struct uart_port *port, int ctl)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
ulong flags;
u32 v;
@@ -1353,7 +1359,8 @@ static void mpsc_break_ctl(struct uart_port *port, int ctl)
static int mpsc_startup(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
u32 flag = 0;
int rc;
@@ -1383,7 +1390,8 @@ static int mpsc_startup(struct uart_port *port)
static void mpsc_shutdown(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
@@ -1394,7 +1402,8 @@ static void mpsc_shutdown(struct uart_port *port)
static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
u32 baud;
ulong flags;
u32 chr_bits, stop_bits, par;
@@ -1498,7 +1507,8 @@ static int mpsc_request_port(struct uart_port *port)
static void mpsc_release_port(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
if (pi->ready) {
mpsc_uninit_rings(pi);
@@ -1513,7 +1523,8 @@ static void mpsc_config_port(struct uart_port *port, int flags)
static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
int rc = 0;
pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
@@ -1548,7 +1559,8 @@ static void mpsc_put_poll_char(struct uart_port *port,
static int mpsc_get_poll_char(struct uart_port *port)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
struct mpsc_rx_desc *rxre;
u32 cmdstat, bytes_in, i;
u8 *bp;
@@ -1648,7 +1660,8 @@ static int mpsc_get_poll_char(struct uart_port *port)
static void mpsc_put_poll_char(struct uart_port *port,
unsigned char c)
{
- struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_port_info *pi =
+ container_of(port, struct mpsc_port_info, port);
u32 data;
data = readl(pi->mpsc_base + MPSC_MPCR);
@@ -2111,7 +2124,6 @@ static struct platform_driver mpsc_driver = {
.remove = mpsc_drv_remove,
.driver = {
.name = MPSC_CTLR_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 1504a14ec1a6..77239d5e620d 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -27,6 +27,8 @@
* interrupt for a low speed UART device
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#ifdef CONFIG_MAGIC_SYSRQ
#define SUPPORT_SYSRQ
#endif
@@ -47,8 +49,6 @@
#include "mrst_max3110.h"
-#define PR_FMT "mrst_max3110: "
-
#define UART_TX_NEEDED 1
#define CON_TX_NEEDED 2
#define BIT_IRQ_PENDING 3
@@ -127,8 +127,8 @@ static int max3110_out(struct uart_max3110 *max, const u16 out)
*obuf = out;
ret = max3110_write_then_read(max, obuf, ibuf, 2, 1);
if (ret) {
- pr_warning(PR_FMT "%s(): get err msg %d when sending 0x%x\n",
- __func__, ret, out);
+ pr_warn("%s: get err msg %d when sending 0x%x\n",
+ __func__, ret, out);
goto exit;
}
@@ -153,10 +153,8 @@ static int max3110_read_multi(struct uart_max3110 *max)
blen = M3110_RX_FIFO_DEPTH * sizeof(u16);
buf = kzalloc(blen * 2, GFP_KERNEL | GFP_DMA);
- if (!buf) {
- pr_warning(PR_FMT "%s(): fail to alloc dma buffer\n", __func__);
+ if (!buf)
return 0;
- }
/* tx/rx always have the same length */
obuf = buf;
@@ -212,13 +210,13 @@ serial_m3110_con_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- pr_info(PR_FMT "setting up console\n");
+ pr_info("setting up console\n");
if (co->index == -1)
co->index = 0;
if (!max) {
- pr_err(PR_FMT "pmax is NULL, return");
+ pr_err("pmax is NULL, return\n");
return -ENODEV;
}
@@ -296,8 +294,7 @@ static void send_circ_buf(struct uart_max3110 *max,
ret = max3110_write_then_read(max, obuf, ibuf, blen, 0);
if (ret)
- pr_warning(PR_FMT "%s(): get err msg %d\n",
- __func__, ret);
+ pr_warn("%s: get err msg %d\n", __func__, ret);
receive_chars(max, ibuf, len);
@@ -411,7 +408,7 @@ static int max3110_main_thread(void *_max)
int ret = 0;
struct circ_buf *xmit = &max->con_xmit;
- pr_info(PR_FMT "start main thread\n");
+ pr_info("start main thread\n");
do {
wait_event_interruptible(*wq,
@@ -455,7 +452,7 @@ static int max3110_read_thread(void *_max)
{
struct uart_max3110 *max = _max;
- pr_info(PR_FMT "start read thread\n");
+ pr_info("start read thread\n");
do {
/*
* If can't acquire the mutex, it means the main thread
@@ -481,7 +478,7 @@ static int serial_m3110_startup(struct uart_port *port)
int ret = 0;
if (port->line != 0) {
- pr_err(PR_FMT "uart port startup failed\n");
+ pr_err("uart port startup failed\n");
return -1;
}
@@ -504,7 +501,7 @@ static int serial_m3110_startup(struct uart_port *port)
if (IS_ERR(max->read_thread)) {
ret = PTR_ERR(max->read_thread);
max->read_thread = NULL;
- pr_err(PR_FMT "Can't create read thread!\n");
+ pr_err("Can't create read thread!\n");
return ret;
}
}
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 4b6c78331a64..c88b522ccd73 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -54,6 +54,7 @@ struct msm_port {
unsigned int imr;
int is_uartdm;
unsigned int old_snap_state;
+ bool break_detected;
};
static inline void wait_for_xmitr(struct uart_port *port)
@@ -126,23 +127,38 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
while (count > 0) {
unsigned char buf[4];
+ int sysrq, r_count, i;
sr = msm_read(port, UART_SR);
if ((sr & UART_SR_RX_READY) == 0) {
msm_port->old_snap_state -= count;
break;
}
+
ioread32_rep(port->membase + UARTDM_RF, buf, 1);
- if (sr & UART_SR_RX_BREAK) {
- port->icount.brk++;
- if (uart_handle_break(port))
- continue;
- } else if (sr & UART_SR_PAR_FRAME_ERR)
- port->icount.frame++;
+ r_count = min_t(int, count, sizeof(buf));
- /* TODO: handle sysrq */
- tty_insert_flip_string(tport, buf, min(count, 4));
- count -= 4;
+ for (i = 0; i < r_count; i++) {
+ char flag = TTY_NORMAL;
+
+ if (msm_port->break_detected && buf[i] == 0) {
+ port->icount.brk++;
+ flag = TTY_BREAK;
+ msm_port->break_detected = false;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ flag = TTY_NORMAL;
+
+ spin_unlock(&port->lock);
+ sysrq = uart_handle_sysrq_char(port, buf[i]);
+ spin_lock(&port->lock);
+ if (!sysrq)
+ tty_insert_flip_char(tport, buf[i], flag);
+ }
+ count -= r_count;
}
spin_unlock(&port->lock);
@@ -174,6 +190,7 @@ static void handle_rx(struct uart_port *port)
while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
unsigned int c;
char flag = TTY_NORMAL;
+ int sysrq;
c = msm_read(port, UART_RF);
@@ -195,7 +212,10 @@ static void handle_rx(struct uart_port *port)
else if (sr & UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
- if (!uart_handle_sysrq_char(port, c))
+ spin_unlock(&port->lock);
+ sysrq = uart_handle_sysrq_char(port, c);
+ spin_lock(&port->lock);
+ if (!sysrq)
tty_insert_flip_char(tport, c, flag);
}
@@ -287,6 +307,11 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
+ if (misr & UART_IMR_RXBREAK_START) {
+ msm_port->break_detected = true;
+ msm_write(port, UART_CR_CMD_RESET_RXBREAK_START, UART_CR);
+ }
+
if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
if (msm_port->is_uartdm)
handle_rx_dm(port, misr);
@@ -402,9 +427,6 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
entry = msm_find_best_baud(port, baud);
- if (msm_port->is_uartdm)
- msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
-
msm_write(port, entry->code, UART_CSR);
/* RX stale watermark */
@@ -421,6 +443,18 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
/* set TX watermark */
msm_write(port, 10, UART_TFWR);
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_reset(port);
+
+ /* Enable RX and TX */
+ msm_write(port, UART_CR_TX_ENABLE | UART_CR_RX_ENABLE, UART_CR);
+
+ /* turn on RX and CTS interrupts */
+ msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
+ UART_IMR_CURRENT_CTS | UART_IMR_RXBREAK_START;
+
+ msm_write(port, msm_port->imr, UART_IMR);
+
if (msm_port->is_uartdm) {
msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
@@ -467,40 +501,6 @@ static int msm_startup(struct uart_port *port)
data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
msm_write(port, data, UART_MR1);
-
- /* make sure that RXSTALE count is non-zero */
- data = msm_read(port, UART_IPR);
- if (unlikely(!data)) {
- data |= UART_IPR_RXSTALE_LAST;
- data |= UART_IPR_STALE_LSB;
- msm_write(port, data, UART_IPR);
- }
-
- data = 0;
- if (!port->cons || (port->cons && !(port->cons->flags & CON_ENABLED))) {
- msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
- msm_reset(port);
- data = UART_CR_TX_ENABLE;
- }
-
- data |= UART_CR_RX_ENABLE;
- msm_write(port, data, UART_CR); /* enable TX & RX */
-
- /* Make sure IPR is not 0 to start with*/
- if (msm_port->is_uartdm)
- msm_write(port, UART_IPR_STALE_LSB, UART_IPR);
-
- /* turn on RX and CTS interrupts */
- msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
- UART_IMR_CURRENT_CTS;
-
- if (msm_port->is_uartdm) {
- msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
- }
-
- msm_write(port, msm_port->imr, UART_IMR);
return 0;
}
@@ -1044,17 +1044,22 @@ static int msm_serial_probe(struct platform_device *pdev)
struct resource *resource;
struct uart_port *port;
const struct of_device_id *id;
- int irq;
+ int irq, line;
+
+ if (pdev->dev.of_node)
+ line = of_alias_get_id(pdev->dev.of_node, "serial");
+ else
+ line = pdev->id;
- if (pdev->id == -1)
- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
+ if (line < 0)
+ line = atomic_inc_return(&msm_uart_next_id) - 1;
- if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ if (unlikely(line < 0 || line >= UART_NR))
return -ENXIO;
- dev_info(&pdev->dev, "msm_serial: detected port #%d\n", pdev->id);
+ dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
- port = get_port_from_line(pdev->id);
+ port = get_port_from_line(line);
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
@@ -1114,7 +1119,6 @@ static struct platform_driver msm_platform_driver = {
.probe = msm_serial_probe,
.driver = {
.name = "msm_serial",
- .owner = THIS_MODULE,
.of_match_table = msm_match_table,
},
};
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 73d3abe71e79..3e1c7138d8cd 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -65,6 +65,7 @@
#define UART_CR_TX_ENABLE (1 << 2)
#define UART_CR_RX_DISABLE (1 << 1)
#define UART_CR_RX_ENABLE (1 << 0)
+#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
#define UART_IMR 0x0014
#define UART_IMR_TXLEV (1 << 0)
@@ -72,6 +73,7 @@
#define UART_IMR_RXLEV (1 << 4)
#define UART_IMR_DELTA_CTS (1 << 5)
#define UART_IMR_CURRENT_CTS (1 << 6)
+#define UART_IMR_RXBREAK_START (1 << 10)
#define UART_IPR_RXSTALE_LAST 0x20
#define UART_IPR_STALE_LSB 0x1F
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index df68f334ab8b..62da8534ba75 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1838,7 +1838,6 @@ static struct platform_driver msm_serial_hs_platform_driver = {
.remove = msm_hs_remove,
.driver = {
.name = "msm_serial_hs",
- .owner = THIS_MODULE,
.pm = &msm_hs_dev_pm_ops,
},
};
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 10c29334fe2f..ec553f8eb218 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -14,6 +14,10 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#if defined(CONFIG_SERIAL_MXS_AUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -38,6 +42,12 @@
#include <asm/cacheflush.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include "serial_mctrl_gpio.h"
+
#define MXS_AUART_PORTS 5
#define MXS_AUART_FIFO_SIZE 16
@@ -139,7 +149,7 @@ struct mxs_auart_port {
#define MXS_AUART_DMA_RX_READY 3 /* bit 3 */
#define MXS_AUART_RTSCTS 4 /* bit 4 */
unsigned long flags;
- unsigned int ctrl;
+ unsigned int mctrl_prev;
enum mxs_auart_type devtype;
unsigned int irq;
@@ -155,6 +165,10 @@ struct mxs_auart_port {
struct scatterlist rx_sgl;
struct dma_chan *rx_dma_chan;
void *rx_dma_buf;
+
+ struct mctrl_gpios *gpios;
+ int gpio_irq[UART_GPIO_MAX];
+ bool ms_irq_enabled;
};
static struct platform_device_id mxs_auart_devtype[] = {
@@ -414,25 +428,102 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
ctrl |= AUART_CTRL2_RTS;
}
- s->ctrl = mctrl;
writel(ctrl, u->membase + AUART_CTRL2);
+
+ mctrl_gpio_set(s->gpios, mctrl);
+}
+
+#define MCTRL_ANY_DELTA (TIOCM_RI | TIOCM_DSR | TIOCM_CD | TIOCM_CTS)
+static u32 mxs_auart_modem_status(struct mxs_auart_port *s, u32 mctrl)
+{
+ u32 mctrl_diff;
+
+ mctrl_diff = mctrl ^ s->mctrl_prev;
+ s->mctrl_prev = mctrl;
+ if (mctrl_diff & MCTRL_ANY_DELTA && s->ms_irq_enabled &&
+ s->port.state != NULL) {
+ if (mctrl_diff & TIOCM_RI)
+ s->port.icount.rng++;
+ if (mctrl_diff & TIOCM_DSR)
+ s->port.icount.dsr++;
+ if (mctrl_diff & TIOCM_CD)
+ uart_handle_dcd_change(&s->port, mctrl & TIOCM_CD);
+ if (mctrl_diff & TIOCM_CTS)
+ uart_handle_cts_change(&s->port, mctrl & TIOCM_CTS);
+
+ wake_up_interruptible(&s->port.state->port.delta_msr_wait);
+ }
+ return mctrl;
}
static u32 mxs_auart_get_mctrl(struct uart_port *u)
{
struct mxs_auart_port *s = to_auart_port(u);
u32 stat = readl(u->membase + AUART_STAT);
- int ctrl2 = readl(u->membase + AUART_CTRL2);
- u32 mctrl = s->ctrl;
+ u32 mctrl = 0;
- mctrl &= ~TIOCM_CTS;
if (stat & AUART_STAT_CTS)
mctrl |= TIOCM_CTS;
- if (ctrl2 & AUART_CTRL2_RTS)
- mctrl |= TIOCM_RTS;
+ return mctrl_gpio_get(s->gpios, &mctrl);
+}
+
+/*
+ * Enable modem status interrupts
+ */
+static void mxs_auart_enable_ms(struct uart_port *port)
+{
+ struct mxs_auart_port *s = to_auart_port(port);
- return mctrl;
+ /*
+ * Interrupt should not be enabled twice
+ */
+ if (s->ms_irq_enabled)
+ return;
+
+ s->ms_irq_enabled = true;
+
+ if (s->gpio_irq[UART_GPIO_CTS] >= 0)
+ enable_irq(s->gpio_irq[UART_GPIO_CTS]);
+ /* TODO: enable AUART_INTR_CTSMIEN otherwise */
+
+ if (s->gpio_irq[UART_GPIO_DSR] >= 0)
+ enable_irq(s->gpio_irq[UART_GPIO_DSR]);
+
+ if (s->gpio_irq[UART_GPIO_RI] >= 0)
+ enable_irq(s->gpio_irq[UART_GPIO_RI]);
+
+ if (s->gpio_irq[UART_GPIO_DCD] >= 0)
+ enable_irq(s->gpio_irq[UART_GPIO_DCD]);
+}
+
+/*
+ * Disable modem status interrupts
+ */
+static void mxs_auart_disable_ms(struct uart_port *port)
+{
+ struct mxs_auart_port *s = to_auart_port(port);
+
+ /*
+ * Interrupt should not be disabled twice
+ */
+ if (!s->ms_irq_enabled)
+ return;
+
+ s->ms_irq_enabled = false;
+
+ if (s->gpio_irq[UART_GPIO_CTS] >= 0)
+ disable_irq(s->gpio_irq[UART_GPIO_CTS]);
+ /* TODO: disable AUART_INTR_CTSMIEN otherwise */
+
+ if (s->gpio_irq[UART_GPIO_DSR] >= 0)
+ disable_irq(s->gpio_irq[UART_GPIO_DSR]);
+
+ if (s->gpio_irq[UART_GPIO_RI] >= 0)
+ disable_irq(s->gpio_irq[UART_GPIO_RI]);
+
+ if (s->gpio_irq[UART_GPIO_DCD] >= 0)
+ disable_irq(s->gpio_irq[UART_GPIO_DCD]);
}
static int mxs_auart_dma_prep_rx(struct mxs_auart_port *s);
@@ -560,6 +651,10 @@ err_out:
}
+#define RTS_AT_AUART() IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(s->gpios, \
+ UART_GPIO_RTS))
+#define CTS_AT_AUART() IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(s->gpios, \
+ UART_GPIO_CTS))
static void mxs_auart_settermios(struct uart_port *u,
struct ktermios *termios,
struct ktermios *old)
@@ -636,6 +731,7 @@ static void mxs_auart_settermios(struct uart_port *u,
ctrl |= AUART_LINECTRL_STP2;
/* figure out the hardware flow control settings */
+ ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN);
if (cflag & CRTSCTS) {
/*
* The DMA has a bug(see errata:2836) in mx23.
@@ -650,9 +746,11 @@ static void mxs_auart_settermios(struct uart_port *u,
ctrl2 |= AUART_CTRL2_TXDMAE | AUART_CTRL2_RXDMAE
| AUART_CTRL2_DMAONERR;
}
- ctrl2 |= AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN;
- } else {
- ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN);
+ /* Even if RTS is GPIO line RTSEN can be enabled because
+ * the pinctrl configuration decides about RTS pin function */
+ ctrl2 |= AUART_CTRL2_RTSEN;
+ if (CTS_AT_AUART())
+ ctrl2 |= AUART_CTRL2_CTSEN;
}
/* set baud rate */
@@ -678,12 +776,30 @@ static void mxs_auart_settermios(struct uart_port *u,
dev_err(s->dev, "We can not start up the DMA.\n");
}
}
+
+ /* CTS flow-control and modem-status interrupts */
+ if (UART_ENABLE_MS(u, termios->c_cflag))
+ mxs_auart_enable_ms(u);
+ else
+ mxs_auart_disable_ms(u);
+}
+
+static void mxs_auart_set_ldisc(struct uart_port *port,
+ struct ktermios *termios)
+{
+ if (termios->c_line == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+ mxs_auart_enable_ms(port);
+ } else {
+ port->flags &= ~UPF_HARDPPS_CD;
+ }
}
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
u32 istat;
struct mxs_auart_port *s = context;
+ u32 mctrl_temp = s->mctrl_prev;
u32 stat = readl(s->port.membase + AUART_STAT);
istat = readl(s->port.membase + AUART_INTR);
@@ -695,8 +811,20 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
| AUART_INTR_CTSMIS),
s->port.membase + AUART_INTR_CLR);
+ /*
+ * Dealing with GPIO interrupt
+ */
+ if (irq == s->gpio_irq[UART_GPIO_CTS] ||
+ irq == s->gpio_irq[UART_GPIO_DCD] ||
+ irq == s->gpio_irq[UART_GPIO_DSR] ||
+ irq == s->gpio_irq[UART_GPIO_RI])
+ mxs_auart_modem_status(s,
+ mctrl_gpio_get(s->gpios, &mctrl_temp));
+
if (istat & AUART_INTR_CTSMIS) {
- uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
+ if (CTS_AT_AUART() && s->ms_irq_enabled)
+ uart_handle_cts_change(&s->port,
+ stat & AUART_STAT_CTS);
writel(AUART_INTR_CTSMIS,
s->port.membase + AUART_INTR_CLR);
istat &= ~AUART_INTR_CTSMIS;
@@ -757,6 +885,10 @@ static int mxs_auart_startup(struct uart_port *u)
*/
writel(AUART_LINECTRL_FEN, u->membase + AUART_LINECTRL_SET);
+ /* get initial status of modem lines */
+ mctrl_gpio_get(s->gpios, &s->mctrl_prev);
+
+ s->ms_irq_enabled = false;
return 0;
}
@@ -764,6 +896,8 @@ static void mxs_auart_shutdown(struct uart_port *u)
{
struct mxs_auart_port *s = to_auart_port(u);
+ mxs_auart_disable_ms(u);
+
if (auart_dma_enabled(s))
mxs_auart_dma_exit(s);
@@ -779,10 +913,11 @@ static void mxs_auart_shutdown(struct uart_port *u)
static unsigned int mxs_auart_tx_empty(struct uart_port *u)
{
- if (readl(u->membase + AUART_STAT) & AUART_STAT_TXFE)
+ if ((readl(u->membase + AUART_STAT) &
+ (AUART_STAT_TXFE | AUART_STAT_BUSY)) == AUART_STAT_TXFE)
return TIOCSER_TEMT;
- else
- return 0;
+
+ return 0;
}
static void mxs_auart_start_tx(struct uart_port *u)
@@ -820,12 +955,14 @@ static struct uart_ops mxs_auart_ops = {
.start_tx = mxs_auart_start_tx,
.stop_tx = mxs_auart_stop_tx,
.stop_rx = mxs_auart_stop_rx,
+ .enable_ms = mxs_auart_enable_ms,
.break_ctl = mxs_auart_break_ctl,
.set_mctrl = mxs_auart_set_mctrl,
.get_mctrl = mxs_auart_get_mctrl,
.startup = mxs_auart_startup,
.shutdown = mxs_auart_shutdown,
.set_termios = mxs_auart_settermios,
+ .set_ldisc = mxs_auart_set_ldisc,
.type = mxs_auart_type,
.release_port = mxs_auart_release_port,
.request_port = mxs_auart_request_port,
@@ -1020,6 +1157,71 @@ static int serial_mxs_probe_dt(struct mxs_auart_port *s,
return 0;
}
+static bool mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev)
+{
+ enum mctrl_gpio_idx i;
+ struct gpio_desc *gpiod;
+
+ s->gpios = mctrl_gpio_init(dev, 0);
+ if (IS_ERR_OR_NULL(s->gpios))
+ return false;
+
+ /* Block (enabled before) DMA option if RTS or CTS is GPIO line */
+ if (!RTS_AT_AUART() || !CTS_AT_AUART()) {
+ if (test_bit(MXS_AUART_RTSCTS, &s->flags))
+ dev_warn(dev,
+ "DMA and flow control via gpio may cause some problems. DMA disabled!\n");
+ clear_bit(MXS_AUART_RTSCTS, &s->flags);
+ }
+
+ for (i = 0; i < UART_GPIO_MAX; i++) {
+ gpiod = mctrl_gpio_to_gpiod(s->gpios, i);
+ if (gpiod && (gpiod_get_direction(gpiod) == GPIOF_DIR_IN))
+ s->gpio_irq[i] = gpiod_to_irq(gpiod);
+ else
+ s->gpio_irq[i] = -EINVAL;
+ }
+
+ return true;
+}
+
+static void mxs_auart_free_gpio_irq(struct mxs_auart_port *s)
+{
+ enum mctrl_gpio_idx i;
+
+ for (i = 0; i < UART_GPIO_MAX; i++)
+ if (s->gpio_irq[i] >= 0)
+ free_irq(s->gpio_irq[i], s);
+}
+
+static int mxs_auart_request_gpio_irq(struct mxs_auart_port *s)
+{
+ int *irq = s->gpio_irq;
+ enum mctrl_gpio_idx i;
+ int err = 0;
+
+ for (i = 0; (i < UART_GPIO_MAX) && !err; i++) {
+ if (irq[i] < 0)
+ continue;
+
+ irq_set_status_flags(irq[i], IRQ_NOAUTOEN);
+ err = request_irq(irq[i], mxs_auart_irq_handle,
+ IRQ_TYPE_EDGE_BOTH, dev_name(s->dev), s);
+ if (err)
+ dev_err(s->dev, "%s - Can't get %d irq\n",
+ __func__, irq[i]);
+ }
+
+ /*
+ * If something went wrong, rollback.
+ */
+ while (err && (--i >= 0))
+ if (irq[i] >= 0)
+ free_irq(irq[i], s);
+
+ return err;
+}
+
static int mxs_auart_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -1067,7 +1269,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.type = PORT_IMX;
s->port.dev = s->dev = &pdev->dev;
- s->ctrl = 0;
+ s->mctrl_prev = 0;
s->irq = platform_get_irq(pdev, 0);
s->port.irq = s->irq;
@@ -1077,13 +1279,24 @@ static int mxs_auart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, s);
+ if (!mxs_auart_init_gpios(s, &pdev->dev))
+ dev_err(&pdev->dev,
+ "Failed to initialize GPIOs. The serial port may not work as expected\n");
+
+ /*
+ * Get the GPIO lines IRQ
+ */
+ ret = mxs_auart_request_gpio_irq(s);
+ if (ret)
+ goto out_free_irq;
+
auart_port[s->port.line] = s;
mxs_auart_reset(&s->port);
ret = uart_add_one_port(&auart_driver, &s->port);
if (ret)
- goto out_free_irq;
+ goto out_free_gpio_irq;
version = readl(s->port.membase + AUART_VERSION);
dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n",
@@ -1092,6 +1305,8 @@ static int mxs_auart_probe(struct platform_device *pdev)
return 0;
+out_free_gpio_irq:
+ mxs_auart_free_gpio_irq(s);
out_free_irq:
auart_port[pdev->id] = NULL;
free_irq(s->irq, s);
@@ -1111,6 +1326,7 @@ static int mxs_auart_remove(struct platform_device *pdev)
auart_port[pdev->id] = NULL;
+ mxs_auart_free_gpio_irq(s);
clk_put(s->clk);
free_irq(s->irq, s);
kfree(s);
@@ -1123,7 +1339,6 @@ static struct platform_driver mxs_auart_driver = {
.remove = mxs_auart_remove,
.driver = {
.name = "mxs-auart",
- .owner = THIS_MODULE,
.of_match_table = mxs_auart_dt_ids,
},
};
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index 7a6745601d4e..207a0a032ed1 100644
--- a/drivers/tty/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
@@ -710,7 +710,6 @@ static struct platform_driver serial_netx_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index bf355050eab6..64f1bab7e9d7 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*
*/
+#include <linux/console.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -129,8 +130,15 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->dev = &ofdev->dev;
- if (type == PORT_TEGRA)
+ switch (type) {
+ case PORT_TEGRA:
port->handle_break = tegra_serial_handle_break;
+ break;
+
+ case PORT_RT2880:
+ port->iotype = UPIO_AU;
+ break;
+ }
return 0;
out:
@@ -240,6 +248,70 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_SERIAL_8250
+static void of_serial_suspend_8250(struct of_serial_info *info)
+{
+ struct uart_8250_port *port8250 = serial8250_get_port(info->line);
+ struct uart_port *port = &port8250->port;
+
+ serial8250_suspend_port(info->line);
+ if (info->clk && (!uart_console(port) || console_suspend_enabled))
+ clk_disable_unprepare(info->clk);
+}
+
+static void of_serial_resume_8250(struct of_serial_info *info)
+{
+ struct uart_8250_port *port8250 = serial8250_get_port(info->line);
+ struct uart_port *port = &port8250->port;
+
+ if (info->clk && (!uart_console(port) || console_suspend_enabled))
+ clk_prepare_enable(info->clk);
+
+ serial8250_resume_port(info->line);
+}
+#else
+static inline void of_serial_suspend_8250(struct of_serial_info *info)
+{
+}
+
+static inline void of_serial_resume_8250(struct of_serial_info *info)
+{
+}
+#endif
+
+static int of_serial_suspend(struct device *dev)
+{
+ struct of_serial_info *info = dev_get_drvdata(dev);
+
+ switch (info->type) {
+ case PORT_8250 ... PORT_MAX_8250:
+ of_serial_suspend_8250(info);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int of_serial_resume(struct device *dev)
+{
+ struct of_serial_info *info = dev_get_drvdata(dev);
+
+ switch (info->type) {
+ case PORT_8250 ... PORT_MAX_8250:
+ of_serial_resume_8250(info);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
+
/*
* A few common types, add more as needed.
*/
@@ -252,6 +324,7 @@ static struct of_device_id of_platform_serial_table[] = {
{ .compatible = "ns16850", .data = (void *)PORT_16850, },
{ .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
{ .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, },
+ { .compatible = "ralink,rt2880-uart", .data = (void *)PORT_RT2880, },
{ .compatible = "altr,16550-FIFO32",
.data = (void *)PORT_ALTR_16550_F32, },
{ .compatible = "altr,16550-FIFO64",
@@ -269,7 +342,6 @@ static struct of_device_id of_platform_serial_table[] = {
static struct platform_driver of_platform_serial_driver = {
.driver = {
.name = "of_serial",
- .owner = THIS_MODULE,
.of_match_table = of_platform_serial_table,
},
.probe = of_platform_serial_probe,
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index c44ec5fb2994..2e1073da6719 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -46,7 +46,7 @@
#include <dt-bindings/gpio/gpio.h>
-#define OMAP_MAX_HSUART_PORTS 6
+#define OMAP_MAX_HSUART_PORTS 10
#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y))
@@ -163,7 +163,6 @@ struct uart_omap_port {
u8 wakeups_enabled;
u32 features;
- struct serial_rs485 rs485;
int rts_gpio;
struct pm_qos_request pm_qos_request;
@@ -316,7 +315,7 @@ static void serial_omap_stop_tx(struct uart_port *port)
pm_runtime_get_sync(up->dev);
/* Handle RS-485 */
- if (up->rs485.flags & SER_RS485_ENABLED) {
+ if (port->rs485.flags & SER_RS485_ENABLED) {
if (up->scr & OMAP_UART_SCR_TX_EMPTY) {
/* THR interrupt is fired when both TX FIFO and TX
* shift register are empty. This means there's nothing
@@ -327,10 +326,12 @@ static void serial_omap_stop_tx(struct uart_port *port)
*/
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
- res = (up->rs485.flags & SER_RS485_RTS_AFTER_SEND) ? 1 : 0;
+ res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
+ 1 : 0;
if (gpio_get_value(up->rts_gpio) != res) {
- if (up->rs485.delay_rts_after_send > 0)
- mdelay(up->rs485.delay_rts_after_send);
+ if (port->rs485.delay_rts_after_send > 0)
+ mdelay(
+ port->rs485.delay_rts_after_send);
gpio_set_value(up->rts_gpio, res);
}
} else {
@@ -353,8 +354,8 @@ static void serial_omap_stop_tx(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
}
- if ((up->rs485.flags & SER_RS485_ENABLED) &&
- !(up->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
/*
* Empty the RX FIFO, we are not interested in anything
* received during the half-duplex transmission.
@@ -429,22 +430,22 @@ static void serial_omap_start_tx(struct uart_port *port)
pm_runtime_get_sync(up->dev);
/* Handle RS-485 */
- if (up->rs485.flags & SER_RS485_ENABLED) {
+ if (port->rs485.flags & SER_RS485_ENABLED) {
/* Fire THR interrupts when FIFO is below trigger level */
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
/* if rts not already enabled */
- res = (up->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
+ res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
if (gpio_get_value(up->rts_gpio) != res) {
gpio_set_value(up->rts_gpio, res);
- if (up->rs485.delay_rts_before_send > 0)
- mdelay(up->rs485.delay_rts_before_send);
+ if (port->rs485.delay_rts_before_send > 0)
+ mdelay(port->rs485.delay_rts_before_send);
}
}
- if ((up->rs485.flags & SER_RS485_ENABLED) &&
- !(up->rs485.flags & SER_RS485_RX_DURING_TX))
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX))
serial_omap_stop_rx(port);
serial_omap_enable_ier_thri(up);
@@ -1355,16 +1356,14 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
#endif
/* Enable or disable the rs485 support */
-static void
+static int
serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
struct uart_omap_port *up = to_uart_omap_port(port);
- unsigned long flags;
unsigned int mode;
int val;
pm_runtime_get_sync(up->dev);
- spin_lock_irqsave(&up->port.lock, flags);
/* Disable interrupts from this port */
mode = up->ier;
@@ -1372,7 +1371,7 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
serial_out(up, UART_IER, 0);
/* store new config */
- up->rs485 = *rs485conf;
+ port->rs485 = *rs485conf;
/*
* Just as a precaution, only allow rs485
@@ -1380,12 +1379,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
*/
if (gpio_is_valid(up->rts_gpio)) {
/* enable / disable rts */
- val = (up->rs485.flags & SER_RS485_ENABLED) ?
+ val = (port->rs485.flags & SER_RS485_ENABLED) ?
SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
- val = (up->rs485.flags & val) ? 1 : 0;
+ val = (port->rs485.flags & val) ? 1 : 0;
gpio_set_value(up->rts_gpio, val);
} else
- up->rs485.flags &= ~SER_RS485_ENABLED;
+ port->rs485.flags &= ~SER_RS485_ENABLED;
/* Enable interrupts */
up->ier = mode;
@@ -1394,45 +1393,18 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
/* If RS-485 is disabled, make sure the THR interrupt is fired when
* TX FIFO is below the trigger level.
*/
- if (!(up->rs485.flags & SER_RS485_ENABLED) &&
+ if (!(port->rs485.flags & SER_RS485_ENABLED) &&
(up->scr & OMAP_UART_SCR_TX_EMPTY)) {
up->scr &= ~OMAP_UART_SCR_TX_EMPTY;
serial_out(up, UART_OMAP_SCR, up->scr);
}
- spin_unlock_irqrestore(&up->port.lock, flags);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
-}
-
-static int
-serial_omap_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
-{
- struct serial_rs485 rs485conf;
-
- switch (cmd) {
- case TIOCSRS485:
- if (copy_from_user(&rs485conf, (void __user *) arg,
- sizeof(rs485conf)))
- return -EFAULT;
- serial_omap_config_rs485(port, &rs485conf);
- break;
-
- case TIOCGRS485:
- if (copy_to_user((void __user *) arg,
- &(to_uart_omap_port(port)->rs485),
- sizeof(rs485conf)))
- return -EFAULT;
- break;
-
- default:
- return -ENOIOCTLCMD;
- }
return 0;
}
-
static struct uart_ops serial_omap_pops = {
.tx_empty = serial_omap_tx_empty,
.set_mctrl = serial_omap_set_mctrl,
@@ -1453,7 +1425,6 @@ static struct uart_ops serial_omap_pops = {
.request_port = serial_omap_request_port,
.config_port = serial_omap_config_port,
.verify_port = serial_omap_verify_port,
- .ioctl = serial_omap_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = serial_omap_poll_put_char,
.poll_get_char = serial_omap_poll_get_char,
@@ -1587,7 +1558,7 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
static int serial_omap_probe_rs485(struct uart_omap_port *up,
struct device_node *np)
{
- struct serial_rs485 *rs485conf = &up->rs485;
+ struct serial_rs485 *rs485conf = &up->port.rs485;
u32 rs485_delay[2];
enum of_gpio_flags flags;
int ret;
@@ -1682,14 +1653,21 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.ops = &serial_omap_pops;
if (pdev->dev.of_node)
- up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
else
- up->port.line = pdev->id;
+ ret = pdev->id;
- if (up->port.line < 0) {
+ if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
- up->port.line);
- ret = -ENODEV;
+ ret);
+ goto err_port_line;
+ }
+ up->port.line = ret;
+
+ if (up->port.line >= OMAP_MAX_HSUART_PORTS) {
+ dev_err(&pdev->dev, "uart ID %d > MAX %d.\n", up->port.line,
+ OMAP_MAX_HSUART_PORTS);
+ ret = -ENXIO;
goto err_port_line;
}
@@ -1702,6 +1680,7 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.membase = base;
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
+ up->port.rs485_config = serial_omap_config_rs485;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
@@ -1747,8 +1726,6 @@ err_add_port:
pm_runtime_disable(&pdev->dev);
err_rs485:
err_port_line:
- dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
- pdev->id, __func__, ret);
return ret;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index abbfedb84901..8f515799c9c1 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1352,7 +1352,8 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)
static int pmz_poll_get_char(struct uart_port *port)
{
- struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+ struct uart_pmac_port *uap =
+ container_of(port, struct uart_pmac_port, port);
int tries = 2;
while (tries) {
@@ -1367,7 +1368,8 @@ static int pmz_poll_get_char(struct uart_port *port)
static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
{
- struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+ struct uart_pmac_port *uap =
+ container_of(port, struct uart_pmac_port, port);
/* Wait for the transmit buffer to empty. */
while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0)
@@ -1874,7 +1876,6 @@ static struct platform_driver pmz_driver = {
.remove = __exit_p(pmz_detach),
.driver = {
.name = "scc",
- .owner = THIS_MODULE,
},
};
@@ -1954,7 +1955,8 @@ static void __exit exit_pmz(void)
static void pmz_console_putchar(struct uart_port *port, int ch)
{
- struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+ struct uart_pmac_port *uap =
+ container_of(port, struct uart_pmac_port, port);
/* Wait for the transmit buffer to empty. */
while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0)
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 2ba24a45c97f..7a3bb9cf1f2e 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -126,7 +126,8 @@ static void pnx8xxx_timeout(unsigned long data)
*/
static void pnx8xxx_stop_tx(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
u32 ien;
/* Disable TX intr */
@@ -142,7 +143,8 @@ static void pnx8xxx_stop_tx(struct uart_port *port)
*/
static void pnx8xxx_start_tx(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
u32 ien;
/* Clear all pending TX intr */
@@ -158,7 +160,8 @@ static void pnx8xxx_start_tx(struct uart_port *port)
*/
static void pnx8xxx_stop_rx(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
u32 ien;
/* Disable RX intr */
@@ -174,7 +177,8 @@ static void pnx8xxx_stop_rx(struct uart_port *port)
*/
static void pnx8xxx_enable_ms(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
mod_timer(&sport->timer, jiffies);
}
@@ -313,14 +317,16 @@ static irqreturn_t pnx8xxx_int(int irq, void *dev_id)
*/
static unsigned int pnx8xxx_tx_empty(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
return serial_in(sport, PNX8XXX_FIFO) & PNX8XXX_UART_FIFO_TXFIFO_STA ? 0 : TIOCSER_TEMT;
}
static unsigned int pnx8xxx_get_mctrl(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
unsigned int mctrl = TIOCM_DSR;
unsigned int msr;
@@ -347,7 +353,8 @@ static void pnx8xxx_set_mctrl(struct uart_port *port, unsigned int mctrl)
*/
static void pnx8xxx_break_ctl(struct uart_port *port, int break_state)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
unsigned long flags;
unsigned int lcr;
@@ -363,7 +370,8 @@ static void pnx8xxx_break_ctl(struct uart_port *port, int break_state)
static int pnx8xxx_startup(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
int retval;
/*
@@ -397,7 +405,8 @@ static int pnx8xxx_startup(struct uart_port *port)
static void pnx8xxx_shutdown(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
int lcr;
/*
@@ -434,7 +443,8 @@ static void
pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
unsigned long flags;
unsigned int lcr_fcr, old_ien, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
@@ -551,7 +561,8 @@ pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
static const char *pnx8xxx_type(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
return sport->port.type == PORT_PNX8XXX ? "PNX8XXX" : NULL;
}
@@ -561,7 +572,8 @@ static const char *pnx8xxx_type(struct uart_port *port)
*/
static void pnx8xxx_release_port(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
}
@@ -571,7 +583,8 @@ static void pnx8xxx_release_port(struct uart_port *port)
*/
static int pnx8xxx_request_port(struct uart_port *port)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
"pnx8xxx-uart") != NULL ? 0 : -EBUSY;
}
@@ -581,7 +594,8 @@ static int pnx8xxx_request_port(struct uart_port *port)
*/
static void pnx8xxx_config_port(struct uart_port *port, int flags)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
if (flags & UART_CONFIG_TYPE &&
pnx8xxx_request_port(&sport->port) == 0)
@@ -596,7 +610,8 @@ static void pnx8xxx_config_port(struct uart_port *port, int flags)
static int
pnx8xxx_verify_port(struct uart_port *port, struct serial_struct *ser)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_PNX8XXX)
@@ -662,7 +677,8 @@ static void __init pnx8xxx_init_ports(void)
static void pnx8xxx_console_putchar(struct uart_port *port, int ch)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ struct pnx8xxx_port *sport =
+ container_of(port, struct pnx8xxx_port, port);
int status;
do {
@@ -813,7 +829,6 @@ static int pnx8xxx_serial_remove(struct platform_device *pdev)
static struct platform_driver pnx8xxx_serial_driver = {
.driver = {
.name = "pnx8xxx-uart",
- .owner = THIS_MODULE,
},
.probe = pnx8xxx_serial_probe,
.remove = pnx8xxx_serial_remove,
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 21b7d8b86493..d5d062694bd3 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -223,6 +223,7 @@ static void serial_pxa_start_tx(struct uart_port *port)
}
}
+/* should hold up->port.lock */
static inline void check_modem_status(struct uart_pxa_port *up)
{
int status;
@@ -255,12 +256,14 @@ static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id)
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
return IRQ_NONE;
+ spin_lock(&up->port.lock);
lsr = serial_in(up, UART_LSR);
if (lsr & UART_LSR_DR)
receive_chars(up, &lsr);
check_modem_status(up);
if (lsr & UART_LSR_THRE)
transmit_chars(up);
+ spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
@@ -930,7 +933,6 @@ static struct platform_driver serial_pxa_driver = {
.driver = {
.name = "pxa2xx-uart",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &serial_pxa_pm_ops,
#endif
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 753d4525b367..fd3d1329d48c 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -142,7 +142,8 @@ static void sa1100_timeout(unsigned long data)
*/
static void sa1100_stop_tx(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
@@ -155,7 +156,8 @@ static void sa1100_stop_tx(struct uart_port *port)
*/
static void sa1100_start_tx(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
@@ -168,7 +170,8 @@ static void sa1100_start_tx(struct uart_port *port)
*/
static void sa1100_stop_rx(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
@@ -180,7 +183,8 @@ static void sa1100_stop_rx(struct uart_port *port)
*/
static void sa1100_enable_ms(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
mod_timer(&sport->timer, jiffies);
}
@@ -323,7 +327,8 @@ static irqreturn_t sa1100_int(int irq, void *dev_id)
*/
static unsigned int sa1100_tx_empty(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
return UART_GET_UTSR1(sport) & UTSR1_TBY ? 0 : TIOCSER_TEMT;
}
@@ -342,7 +347,8 @@ static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl)
*/
static void sa1100_break_ctl(struct uart_port *port, int break_state)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
unsigned long flags;
unsigned int utcr3;
@@ -358,7 +364,8 @@ static void sa1100_break_ctl(struct uart_port *port, int break_state)
static int sa1100_startup(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
int retval;
/*
@@ -387,7 +394,8 @@ static int sa1100_startup(struct uart_port *port)
static void sa1100_shutdown(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
/*
* Stop our timer.
@@ -409,7 +417,8 @@ static void
sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
unsigned long flags;
unsigned int utcr0, old_utcr3, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
@@ -512,7 +521,8 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
static const char *sa1100_type(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
return sport->port.type == PORT_SA1100 ? "SA1100" : NULL;
}
@@ -522,7 +532,8 @@ static const char *sa1100_type(struct uart_port *port)
*/
static void sa1100_release_port(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
}
@@ -532,7 +543,8 @@ static void sa1100_release_port(struct uart_port *port)
*/
static int sa1100_request_port(struct uart_port *port)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
"sa11x0-uart") != NULL ? 0 : -EBUSY;
@@ -543,7 +555,8 @@ static int sa1100_request_port(struct uart_port *port)
*/
static void sa1100_config_port(struct uart_port *port, int flags)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
if (flags & UART_CONFIG_TYPE &&
sa1100_request_port(&sport->port) == 0)
@@ -558,7 +571,8 @@ static void sa1100_config_port(struct uart_port *port, int flags)
static int
sa1100_verify_port(struct uart_port *port, struct serial_struct *ser)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_SA1100)
@@ -691,7 +705,8 @@ void __init sa1100_register_uart(int idx, int port)
#ifdef CONFIG_SERIAL_SA1100_CONSOLE
static void sa1100_console_putchar(struct uart_port *port, int ch)
{
- struct sa1100_port *sport = (struct sa1100_port *)port;
+ struct sa1100_port *sport =
+ container_of(port, struct sa1100_port, port);
while (!(UART_GET_UTSR1(sport) & UTSR1_TNF))
barrier();
@@ -883,7 +898,6 @@ static struct platform_driver sa11x0_serial_driver = {
.resume = sa1100_serial_resume,
.driver = {
.name = "sa11x0-uart",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index c78f43a481ce..19273e31d224 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -199,12 +199,14 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
}
}
-static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *port)
+static inline struct s3c24xx_uart_info
+ *s3c24xx_port_to_info(struct uart_port *port)
{
return to_ourport(port)->info;
}
-static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port)
+static inline struct s3c2410_uartcfg
+ *s3c24xx_port_to_cfg(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport;
@@ -237,7 +239,7 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id)
struct uart_port *port = &ourport->port;
unsigned int ufcon, ch, flag, ufstat, uerstat;
unsigned long flags;
- int max_count = 64;
+ int max_count = port->fifosize;
spin_lock_irqsave(&port->lock, flags);
@@ -311,14 +313,14 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id)
uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN,
ch, flag);
- ignore_char:
+ignore_char:
continue;
}
spin_unlock_irqrestore(&port->lock, flags);
tty_flip_buffer_push(&port->state->port);
- out:
+out:
return IRQ_HANDLED;
}
@@ -328,7 +330,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
- int count = 256;
+ int count = port->fifosize;
spin_lock_irqsave(&port->lock, flags);
@@ -368,7 +370,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
if (uart_circ_empty(xmit))
s3c24xx_serial_stop_tx(port);
- out:
+out:
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}
@@ -519,7 +521,7 @@ static int s3c24xx_serial_startup(struct uart_port *port)
return ret;
- err:
+err:
s3c24xx_serial_shutdown(port);
return ret;
}
@@ -559,11 +561,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
unsigned int old)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ int timeout = 10000;
ourport->pm_level = level;
switch (level) {
case 3:
+ while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
+ udelay(100);
+
if (!IS_ERR(ourport->baudclk))
clk_disable_unprepare(ourport->baudclk);
@@ -841,8 +847,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
*/
port->read_status_mask = S3C2410_UERSTAT_OVERRUN;
if (termios->c_iflag & INPCK)
- port->read_status_mask |= S3C2410_UERSTAT_FRAME | S3C2410_UERSTAT_PARITY;
-
+ port->read_status_mask |= S3C2410_UERSTAT_FRAME |
+ S3C2410_UERSTAT_PARITY;
/*
* Which character status flags should we ignore?
*/
@@ -969,10 +975,13 @@ static struct uart_driver s3c24xx_uart_drv = {
.minor = S3C24XX_SERIAL_MINOR,
};
-static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
+#define __PORT_LOCK_UNLOCKED(i) \
+ __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[i].port.lock)
+static struct s3c24xx_uart_port
+s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
[0] = {
.port = {
- .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock),
+ .lock = __PORT_LOCK_UNLOCKED(0),
.iotype = UPIO_MEM,
.uartclk = 0,
.fifosize = 16,
@@ -983,7 +992,7 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
},
[1] = {
.port = {
- .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[1].port.lock),
+ .lock = __PORT_LOCK_UNLOCKED(1),
.iotype = UPIO_MEM,
.uartclk = 0,
.fifosize = 16,
@@ -996,7 +1005,7 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
[2] = {
.port = {
- .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[2].port.lock),
+ .lock = __PORT_LOCK_UNLOCKED(2),
.iotype = UPIO_MEM,
.uartclk = 0,
.fifosize = 16,
@@ -1009,7 +1018,7 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
[3] = {
.port = {
- .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[3].port.lock),
+ .lock = __PORT_LOCK_UNLOCKED(3),
.iotype = UPIO_MEM,
.uartclk = 0,
.fifosize = 16,
@@ -1020,6 +1029,7 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
}
#endif
};
+#undef __PORT_LOCK_UNLOCKED
/* s3c24xx_serial_resetport
*
@@ -1102,11 +1112,12 @@ static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
s3c24xx_serial_set_termios(uport, termios, NULL);
}
- exit:
+exit:
return 0;
}
-static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
+static inline int
+s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
{
port->freq_transition.notifier_call = s3c24xx_serial_cpufreq_transition;
@@ -1114,19 +1125,22 @@ static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port
CPUFREQ_TRANSITION_NOTIFIER);
}
-static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
+static inline void
+s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
{
cpufreq_unregister_notifier(&port->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
#else
-static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
+static inline int
+s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
{
return 0;
}
-static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
+static inline void
+s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
{
}
#endif
@@ -1226,24 +1240,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
return 0;
}
-#ifdef CONFIG_SAMSUNG_CLOCK
-static ssize_t s3c24xx_serial_show_clksrc(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct uart_port *port = s3c24xx_dev_to_port(dev);
- struct s3c24xx_uart_port *ourport = to_ourport(port);
-
- if (IS_ERR(ourport->baudclk))
- return -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "* %s\n",
- ourport->baudclk->name ?: "(null)");
-}
-
-static DEVICE_ATTR(clock_source, S_IRUGO, s3c24xx_serial_show_clksrc, NULL);
-#endif
-
/* Device driver serial port probe */
static const struct of_device_id s3c24xx_uart_dt_match[];
@@ -1296,11 +1292,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
of_property_read_u32(np,
"samsung,uart-fifosize", &ourport->port.fifosize);
- if (!ourport->port.fifosize) {
- ourport->port.fifosize = (ourport->info->fifosize) ?
- ourport->info->fifosize :
- ourport->drv_data->fifosize[index];
- }
+ if (ourport->drv_data->fifosize[index])
+ ourport->port.fifosize = ourport->drv_data->fifosize[index];
+ else if (ourport->info->fifosize)
+ ourport->port.fifosize = ourport->info->fifosize;
probe_index++;
@@ -1329,12 +1324,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
*/
clk_disable_unprepare(ourport->clk);
-#ifdef CONFIG_SAMSUNG_CLOCK
- ret = device_create_file(&pdev->dev, &dev_attr_clock_source);
- if (ret < 0)
- dev_err(&pdev->dev, "failed to add clock source attr.\n");
-#endif
-
ret = s3c24xx_serial_cpufreq_register(ourport);
if (ret < 0)
dev_err(&pdev->dev, "failed to add cpufreq notifier\n");
@@ -1348,9 +1337,6 @@ static int s3c24xx_serial_remove(struct platform_device *dev)
if (port) {
s3c24xx_serial_cpufreq_deregister(to_ourport(port));
-#ifdef CONFIG_SAMSUNG_CLOCK
- device_remove_file(&dev->dev, &dev_attr_clock_source);
-#endif
uart_remove_one_port(&s3c24xx_uart_drv, port);
}
@@ -1848,7 +1834,6 @@ static struct platform_driver samsung_serial_driver = {
.id_table = s3c24xx_serial_driver_ids,
.driver = {
.name = "samsung-uart",
- .owner = THIS_MODULE,
.pm = SERIAL_SAMSUNG_PM_OPS,
.of_match_table = of_match_ptr(s3c24xx_uart_dt_match),
},
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 6246820d7f05..df9a384dfbda 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -304,8 +304,6 @@ struct sc16is7xx_one {
struct uart_port port;
struct work_struct tx_work;
struct work_struct md_work;
-
- struct serial_rs485 rs485;
};
struct sc16is7xx_port {
@@ -657,15 +655,15 @@ static void sc16is7xx_stop_tx(struct uart_port* port)
struct circ_buf *xmit = &one->port.state->xmit;
/* handle rs485 */
- if (one->rs485.flags & SER_RS485_ENABLED) {
+ if (port->rs485.flags & SER_RS485_ENABLED) {
/* do nothing if current tx not yet completed */
int lsr = sc16is7xx_port_read(port, SC16IS7XX_LSR_REG);
if (!(lsr & SC16IS7XX_LSR_TEMT_BIT))
return;
if (uart_circ_empty(xmit) &&
- (one->rs485.delay_rts_after_send > 0))
- mdelay(one->rs485.delay_rts_after_send);
+ (port->rs485.delay_rts_after_send > 0))
+ mdelay(port->rs485.delay_rts_after_send);
}
sc16is7xx_port_update(port, SC16IS7XX_IER_REG,
@@ -688,9 +686,9 @@ static void sc16is7xx_start_tx(struct uart_port *port)
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
/* handle rs485 */
- if ((one->rs485.flags & SER_RS485_ENABLED) &&
- (one->rs485.delay_rts_before_send > 0)) {
- mdelay(one->rs485.delay_rts_before_send);
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ (port->rs485.delay_rts_before_send > 0)) {
+ mdelay(port->rs485.delay_rts_before_send);
}
if (!work_pending(&one->tx_work))
@@ -830,51 +828,20 @@ static void sc16is7xx_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
}
-#if defined(TIOCSRS485) && defined(TIOCGRS485)
-static void sc16is7xx_config_rs485(struct uart_port *port,
+static int sc16is7xx_config_rs485(struct uart_port *port,
struct serial_rs485 *rs485)
{
- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
-
- one->rs485 = *rs485;
-
- if (one->rs485.flags & SER_RS485_ENABLED) {
+ if (port->rs485.flags & SER_RS485_ENABLED)
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_AUTO_RS485_BIT,
SC16IS7XX_EFCR_AUTO_RS485_BIT);
- } else {
+ else
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_AUTO_RS485_BIT,
0);
- }
-}
-#endif
-
-static int sc16is7xx_ioctl(struct uart_port *port, unsigned int cmd,
- unsigned long arg)
-{
-#if defined(TIOCSRS485) && defined(TIOCGRS485)
- struct serial_rs485 rs485;
+ port->rs485 = *rs485;
- switch (cmd) {
- case TIOCSRS485:
- if (copy_from_user(&rs485, (void __user *)arg, sizeof(rs485)))
- return -EFAULT;
-
- sc16is7xx_config_rs485(port, &rs485);
- return 0;
- case TIOCGRS485:
- if (copy_to_user((void __user *)arg,
- &(to_sc16is7xx_one(port, port)->rs485),
- sizeof(rs485)))
- return -EFAULT;
- return 0;
- default:
- break;
- }
-#endif
-
- return -ENOIOCTLCMD;
+ return 0;
}
static int sc16is7xx_startup(struct uart_port *port)
@@ -1000,7 +967,6 @@ static const struct uart_ops sc16is7xx_ops = {
.release_port = sc16is7xx_null_void,
.config_port = sc16is7xx_config_port,
.verify_port = sc16is7xx_verify_port,
- .ioctl = sc16is7xx_ioctl,
.pm = sc16is7xx_pm,
};
@@ -1130,6 +1096,7 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
+ s->p[i].port.rs485_config = sc16is7xx_config_rs485;
s->p[i].port.ops = &sc16is7xx_ops;
/* Disable all interrupts */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 75850f70b479..fcf803ffad19 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -1012,7 +1012,6 @@ static int sccnxp_remove(struct platform_device *pdev)
static struct platform_driver sccnxp_uart_driver = {
.driver = {
.name = SCCNXP_NAME,
- .owner = THIS_MODULE,
},
.probe = sccnxp_probe,
.remove = sccnxp_remove,
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 53d7c31ce098..48e6e41636b2 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -319,16 +319,16 @@ static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
if (lsr & UART_LSR_OE) {
/* Overrrun error */
- flag |= TTY_OVERRUN;
+ flag = TTY_OVERRUN;
tup->uport.icount.overrun++;
dev_err(tup->uport.dev, "Got overrun errors\n");
} else if (lsr & UART_LSR_PE) {
/* Parity error */
- flag |= TTY_PARITY;
+ flag = TTY_PARITY;
tup->uport.icount.parity++;
dev_err(tup->uport.dev, "Got Parity errors\n");
} else if (lsr & UART_LSR_FE) {
- flag |= TTY_FRAME;
+ flag = TTY_FRAME;
tup->uport.icount.frame++;
dev_err(tup->uport.dev, "Got frame errors\n");
} else if (lsr & UART_LSR_BI) {
@@ -1034,6 +1034,20 @@ fail_rx_dma:
return ret;
}
+/*
+ * Flush any TX data submitted for DMA and PIO. Called when the
+ * TX circular buffer is reset.
+ */
+static void tegra_uart_flush_buffer(struct uart_port *u)
+{
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+
+ tup->tx_bytes = 0;
+ if (tup->tx_dma_chan)
+ dmaengine_terminate_all(tup->tx_dma_chan);
+ return;
+}
+
static void tegra_uart_shutdown(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
@@ -1046,6 +1060,8 @@ static void tegra_uart_shutdown(struct uart_port *u)
tegra_uart_dma_channel_free(tup, true);
tegra_uart_dma_channel_free(tup, false);
free_irq(u->irq, tup);
+
+ tegra_uart_flush_buffer(u);
}
static void tegra_uart_enable_ms(struct uart_port *u)
@@ -1174,20 +1190,6 @@ static void tegra_uart_set_termios(struct uart_port *u,
return;
}
-/*
- * Flush any TX data submitted for DMA and PIO. Called when the
- * TX circular buffer is reset.
- */
-static void tegra_uart_flush_buffer(struct uart_port *u)
-{
- struct tegra_uart_port *tup = to_tegra_uport(u);
-
- tup->tx_bytes = 0;
- if (tup->tx_dma_chan)
- dmaengine_terminate_all(tup->tx_dma_chan);
- return;
-}
-
static const char *tegra_uart_type(struct uart_port *u)
{
return TEGRA_UART_TYPE;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index eaeb9a02c7fe..57ca61b14670 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -61,7 +61,7 @@ static void uart_port_shutdown(struct tty_port *port);
static int uart_dcd_enabled(struct uart_port *uport)
{
- return uport->status & UPSTAT_DCD_ENABLE;
+ return !!(uport->status & UPSTAT_DCD_ENABLE);
}
/*
@@ -436,7 +436,7 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
EXPORT_SYMBOL(uart_get_divisor);
-/* FIXME: Consistent locking policy */
+/* Caller holds port mutex */
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios)
{
@@ -537,9 +537,10 @@ static int uart_write(struct tty_struct *tty,
count -= c;
ret += c;
}
+
+ __uart_start(tty);
spin_unlock_irqrestore(&port->lock, flags);
- uart_start(tty);
return ret;
}
@@ -618,7 +619,7 @@ static void uart_throttle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
- uint32_t mask = 0;
+ upf_t mask = 0;
if (I_IXOFF(tty))
mask |= UPF_SOFT_FLOW;
@@ -641,7 +642,7 @@ static void uart_unthrottle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
- uint32_t mask = 0;
+ upf_t mask = 0;
if (I_IXOFF(tty))
mask |= UPF_SOFT_FLOW;
@@ -1151,6 +1152,47 @@ static int uart_get_icount(struct tty_struct *tty,
return 0;
}
+static int uart_get_rs485_config(struct uart_port *port,
+ struct serial_rs485 __user *rs485)
+{
+ unsigned long flags;
+ struct serial_rs485 aux;
+
+ spin_lock_irqsave(&port->lock, flags);
+ aux = port->rs485;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (copy_to_user(rs485, &aux, sizeof(aux)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int uart_set_rs485_config(struct uart_port *port,
+ struct serial_rs485 __user *rs485_user)
+{
+ struct serial_rs485 rs485;
+ int ret;
+ unsigned long flags;
+
+ if (!port->rs485_config)
+ return -ENOIOCTLCMD;
+
+ if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ret = port->rs485_config(port, &rs485);
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(rs485_user, &port->rs485, sizeof(port->rs485)))
+ return -EFAULT;
+
+ return 0;
+}
+
/*
* Called via sys_ioctl. We can use spin_lock_irq() here.
*/
@@ -1173,11 +1215,15 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd,
break;
case TIOCSSERIAL:
+ down_write(&tty->termios_rwsem);
ret = uart_set_info_user(tty, state, uarg);
+ up_write(&tty->termios_rwsem);
break;
case TIOCSERCONFIG:
+ down_write(&tty->termios_rwsem);
ret = uart_do_autoconfig(tty, state);
+ up_write(&tty->termios_rwsem);
break;
case TIOCSERGWILD: /* obsolete */
@@ -1217,11 +1263,19 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd,
* All these rely on hardware being present and need to be
* protected against the tty being hung up.
*/
+
switch (cmd) {
case TIOCSERGETLSR: /* Get line status register */
ret = uart_get_lsr_info(tty, state, uarg);
break;
+ case TIOCGRS485:
+ ret = uart_get_rs485_config(state->uart_port, uarg);
+ break;
+
+ case TIOCSRS485:
+ ret = uart_set_rs485_config(state->uart_port, uarg);
+ break;
default: {
struct uart_port *uport = state->uart_port;
if (uport->ops->ioctl)
@@ -1240,8 +1294,11 @@ static void uart_set_ldisc(struct tty_struct *tty)
struct uart_state *state = tty->driver_data;
struct uart_port *uport = state->uart_port;
- if (uport->ops->set_ldisc)
- uport->ops->set_ldisc(uport, tty->termios.c_line);
+ if (uport->ops->set_ldisc) {
+ mutex_lock(&state->port.mutex);
+ uport->ops->set_ldisc(uport, &tty->termios);
+ mutex_unlock(&state->port.mutex);
+ }
}
static void uart_set_termios(struct tty_struct *tty,
@@ -1278,7 +1335,9 @@ static void uart_set_termios(struct tty_struct *tty,
return;
}
+ mutex_lock(&state->port.mutex);
uart_change_speed(tty, state, old_termios);
+ mutex_unlock(&state->port.mutex);
/* reload cflag from termios; port driver may have overriden flags */
cflag = tty->termios.c_cflag;
@@ -1331,8 +1390,16 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
struct uart_port *uport;
unsigned long flags;
- if (!state)
+ if (!state) {
+ struct uart_driver *drv = tty->driver->driver_state;
+
+ state = drv->state + tty->index;
+ port = &state->port;
+ spin_lock_irq(&port->lock);
+ --port->count;
+ spin_unlock_irq(&port->lock);
return;
+ }
uport = state->uart_port;
port = &state->port;
@@ -1361,10 +1428,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
mutex_lock(&port->mutex);
uart_shutdown(tty, state);
- uart_flush_buffer(tty);
-
- tty_ldisc_flush(tty);
-
tty_port_tty_set(port, NULL);
tty->closing = 0;
spin_lock_irqsave(&port->lock, flags);
@@ -1372,8 +1435,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
if (port->blocked_open) {
spin_unlock_irqrestore(&port->lock, flags);
if (port->close_delay)
- msleep_interruptible(
- jiffies_to_msecs(port->close_delay));
+ msleep_interruptible(jiffies_to_msecs(port->close_delay));
spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -1391,6 +1453,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
wake_up_interruptible(&port->close_wait);
mutex_unlock(&port->mutex);
+
+ tty_ldisc_flush(tty);
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
@@ -1552,6 +1616,10 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
pr_debug("uart_open(%d) called\n", line);
+ spin_lock_irq(&port->lock);
+ ++port->count;
+ spin_unlock_irq(&port->lock);
+
/*
* We take the semaphore here to guarantee that we won't be re-entered
* while allocating the state structure, or while we request any IRQs
@@ -1564,17 +1632,11 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
goto end;
}
- port->count++;
if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
retval = -ENXIO;
- goto err_dec_count;
+ goto err_unlock;
}
- /*
- * Once we set tty->driver_data here, we are guaranteed that
- * uart_close() will decrement the driver module use count.
- * Any failures from here onwards should not touch the count.
- */
tty->driver_data = state;
state->uart_port->state = state;
state->port.low_latency =
@@ -1595,8 +1657,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
end:
return retval;
-err_dec_count:
- port->count--;
+err_unlock:
mutex_unlock(&port->mutex);
goto end;
}
@@ -2092,6 +2153,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
break;
case UPIO_MEM:
case UPIO_MEM32:
+ case UPIO_MEM32BE:
case UPIO_AU:
case UPIO_TSI:
snprintf(address, sizeof(address),
@@ -2339,8 +2401,6 @@ int uart_register_driver(struct uart_driver *drv)
tty_port_init(port);
port->ops = &uart_port_ops;
- port->close_delay = HZ / 2; /* .5 seconds */
- port->closing_wait = 30 * HZ;/* 30 seconds */
}
retval = tty_register_driver(normal);
@@ -2589,11 +2649,12 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
goto out;
}
+ /* Link the port to the driver state table and vice versa */
state->uart_port = uport;
- state->pm_state = UART_PM_STATE_UNDEFINED;
+ uport->state = state;
+ state->pm_state = UART_PM_STATE_UNDEFINED;
uport->cons = drv->cons;
- uport->state = state;
/*
* If this port is a console, then the spinlock is already
@@ -2736,6 +2797,7 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2)
(port1->hub6 == port2->hub6);
case UPIO_MEM:
case UPIO_MEM32:
+ case UPIO_MEM32BE:
case UPIO_AU:
case UPIO_TSI:
return (port1->mapbase == port2->mapbase);
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index a3035f997b98..a38596c5194e 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -44,15 +44,21 @@ static const struct {
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
{
enum mctrl_gpio_idx i;
+ struct gpio_desc *desc_array[UART_GPIO_MAX];
+ int value_array[UART_GPIO_MAX];
+ unsigned int count = 0;
if (IS_ERR_OR_NULL(gpios))
return;
for (i = 0; i < UART_GPIO_MAX; i++)
if (!IS_ERR_OR_NULL(gpios->gpio[i]) &&
- mctrl_gpios_desc[i].dir_out)
- gpiod_set_value(gpios->gpio[i],
- !!(mctrl & mctrl_gpios_desc[i].mctrl));
+ mctrl_gpios_desc[i].dir_out) {
+ desc_array[count] = gpios->gpio[i];
+ value_array[count] = !!(mctrl & mctrl_gpios_desc[i].mctrl);
+ count++;
+ }
+ gpiod_set_array(count, desc_array, value_array);
}
EXPORT_SYMBOL_GPL(mctrl_gpio_set);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index af115645c51f..f80312eed4fd 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1165,7 +1165,6 @@ static struct platform_driver serial_txx9_plat_driver = {
#endif
.driver = {
.name = "serial_txx9",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index eb17c7124e72..e032963989fc 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1812,9 +1812,6 @@ static void sci_baud_calc_hscif(unsigned int bps, unsigned long freq,
err = DIV_ROUND_CLOSEST(freq, ((br + 1) * bps * sr *
(1 << (2 * c + 1)) / 1000)) -
1000;
- if (err < 0)
- continue;
-
/* Calc recv margin
* M: Receive margin (%)
* N: Ratio of bit rate to clock (N = sampling rate)
@@ -1829,7 +1826,7 @@ static void sci_baud_calc_hscif(unsigned int bps, unsigned long freq,
*/
recv_margin = abs((500 -
DIV_ROUND_CLOSEST(1000, sr << 1)) / 10);
- if (min_err > err) {
+ if (abs(min_err) > abs(err)) {
min_err = err;
recv_max_margin = recv_margin;
} else if ((min_err == err) &&
@@ -2638,7 +2635,6 @@ static struct platform_driver sci_driver = {
.remove = sci_remove,
.driver = {
.name = "sh-sci",
- .owner = THIS_MODULE,
.pm = &sci_dev_pm_ops,
.of_match_table = of_match_ptr(of_sci_match),
},
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 4102192687ee..b269f6bd16d6 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -1032,10 +1032,19 @@ static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- if (!state)
+ if (!state) {
+ if (sirfport->is_bt_uart) {
+ clk_prepare_enable(sirfport->clk_noc);
+ clk_prepare_enable(sirfport->clk_general);
+ }
clk_prepare_enable(sirfport->clk);
- else
+ } else {
clk_disable_unprepare(sirfport->clk);
+ if (sirfport->is_bt_uart) {
+ clk_disable_unprepare(sirfport->clk_general);
+ clk_disable_unprepare(sirfport->clk_noc);
+ }
+ }
}
static int sirfsoc_uart_startup(struct uart_port *port)
@@ -1378,12 +1387,26 @@ usp_no_flow_control:
}
port->irq = res->start;
- sirfport->clk = clk_get(&pdev->dev, NULL);
+ sirfport->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sirfport->clk)) {
ret = PTR_ERR(sirfport->clk);
goto err;
}
port->uartclk = clk_get_rate(sirfport->clk);
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-bt-uart")) {
+ sirfport->clk_general = devm_clk_get(&pdev->dev, "general");
+ if (IS_ERR(sirfport->clk_general)) {
+ ret = PTR_ERR(sirfport->clk_general);
+ goto err;
+ }
+ sirfport->clk_noc = devm_clk_get(&pdev->dev, "noc");
+ if (IS_ERR(sirfport->clk_noc)) {
+ ret = PTR_ERR(sirfport->clk_noc);
+ goto err;
+ }
+ sirfport->is_bt_uart = true;
+ } else
+ sirfport->is_bt_uart = false;
port->ops = &sirfsoc_uart_ops;
spin_lock_init(&port->lock);
@@ -1392,7 +1415,7 @@ usp_no_flow_control:
ret = uart_add_one_port(&sirfsoc_uart_drv, port);
if (ret != 0) {
dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
- goto port_err;
+ goto err;
}
sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
@@ -1421,8 +1444,6 @@ alloc_coherent_err:
sirfport->rx_dma_items[j].xmit.buf,
sirfport->rx_dma_items[j].dma_addr);
dma_release_channel(sirfport->rx_dma_chan);
-port_err:
- clk_put(sirfport->clk);
err:
return ret;
}
@@ -1431,7 +1452,6 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
{
struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
struct uart_port *port = &sirfport->port;
- clk_put(sirfport->clk);
uart_remove_one_port(&sirfsoc_uart_drv, port);
if (sirfport->rx_dma_chan) {
int i;
@@ -1477,7 +1497,6 @@ static struct platform_driver sirfsoc_uart_driver = {
.remove = sirfsoc_uart_remove,
.driver = {
.name = SIRFUART_PORT_NAME,
- .owner = THIS_MODULE,
.of_match_table = sirfsoc_uart_ids,
.pm = &sirfsoc_uart_pm_ops,
},
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 6a7ebf7ef130..275d03893990 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -417,6 +417,10 @@ struct sirfsoc_uart_port {
struct uart_port port;
struct clk *clk;
+ /* UART6 for BT usage in A7DA platform need multi-clock source */
+ bool is_bt_uart;
+ struct clk *clk_general;
+ struct clk *clk_noc;
/* for SiRFmarco, there are SET/CLR for UART_INT_EN */
bool is_marco;
struct sirfsoc_uart_register *uart_reg;
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index a3165842ca29..712b03a076b8 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -895,7 +895,6 @@ static struct platform_driver asc_serial_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = &asc_serial_pm_ops,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(asc_match),
},
};
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 25d43ce8b318..534754440fa8 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -626,7 +626,6 @@ MODULE_DEVICE_TABLE(of, hv_match);
static struct platform_driver hv_driver = {
.driver = {
.name = "hv",
- .owner = THIS_MODULE,
.of_match_table = hv_match,
},
.probe = hv_probe,
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index b339fe4811cd..b5e3195b3697 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -345,7 +345,8 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
/* port->lock is not held. */
static unsigned int sunsab_tx_empty(struct uart_port *port)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
int ret;
/* Do not need a lock for a state test like this. */
@@ -360,7 +361,8 @@ static unsigned int sunsab_tx_empty(struct uart_port *port)
/* port->lock held by caller. */
static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
if (mctrl & TIOCM_RTS) {
up->cached_mode &= ~SAB82532_MODE_FRTS;
@@ -383,7 +385,8 @@ static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl)
/* port->lock is held by caller and interrupts are disabled. */
static unsigned int sunsab_get_mctrl(struct uart_port *port)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
unsigned char val;
unsigned int result;
@@ -404,7 +407,8 @@ static unsigned int sunsab_get_mctrl(struct uart_port *port)
/* port->lock held by caller. */
static void sunsab_stop_tx(struct uart_port *port)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
up->interrupt_mask1 |= SAB82532_IMR1_XPR;
writeb(up->interrupt_mask1, &up->regs->w.imr1);
@@ -432,7 +436,8 @@ static void sunsab_tx_idle(struct uart_sunsab_port *up)
/* port->lock held by caller. */
static void sunsab_start_tx(struct uart_port *port)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
struct circ_buf *xmit = &up->port.state->xmit;
int i;
@@ -465,7 +470,8 @@ static void sunsab_start_tx(struct uart_port *port)
/* port->lock is not held. */
static void sunsab_send_xchar(struct uart_port *port, char ch)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
unsigned long flags;
if (ch == __DISABLED_CHAR)
@@ -482,7 +488,8 @@ static void sunsab_send_xchar(struct uart_port *port, char ch)
/* port->lock held by caller. */
static void sunsab_stop_rx(struct uart_port *port)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
up->interrupt_mask0 |= SAB82532_IMR0_TCD;
writeb(up->interrupt_mask1, &up->regs->w.imr0);
@@ -491,7 +498,8 @@ static void sunsab_stop_rx(struct uart_port *port)
/* port->lock is not held. */
static void sunsab_break_ctl(struct uart_port *port, int break_state)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
unsigned long flags;
unsigned char val;
@@ -514,7 +522,8 @@ static void sunsab_break_ctl(struct uart_port *port, int break_state)
/* port->lock is not held. */
static int sunsab_startup(struct uart_port *port)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
unsigned long flags;
unsigned char tmp;
int err = request_irq(up->port.irq, sunsab_interrupt,
@@ -585,7 +594,8 @@ static int sunsab_startup(struct uart_port *port)
/* port->lock is not held. */
static void sunsab_shutdown(struct uart_port *port)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
@@ -771,7 +781,8 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
unsigned long flags;
unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
unsigned int quot = uart_get_divisor(port, baud);
@@ -840,7 +851,8 @@ static struct uart_sunsab_port *sunsab_ports;
static void sunsab_console_putchar(struct uart_port *port, int c)
{
- struct uart_sunsab_port *up = (struct uart_sunsab_port *)port;
+ struct uart_sunsab_port *up =
+ container_of(port, struct uart_sunsab_port, port);
sunsab_tec_wait(up);
writeb(c, &up->regs->w.tic);
@@ -1092,7 +1104,6 @@ MODULE_DEVICE_TABLE(of, sab_match);
static struct platform_driver sab_driver = {
.driver = {
.name = "sab",
- .owner = THIS_MODULE,
.of_match_table = sab_match,
},
.probe = sab_probe,
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 5326ae195e5f..e124d2e88996 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -264,7 +264,8 @@ static inline void __stop_tx(struct uart_sunsu_port *p)
static void sunsu_stop_tx(struct uart_port *port)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
__stop_tx(up);
@@ -279,7 +280,8 @@ static void sunsu_stop_tx(struct uart_port *port)
static void sunsu_start_tx(struct uart_port *port)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
@@ -297,7 +299,8 @@ static void sunsu_start_tx(struct uart_port *port)
static void sunsu_stop_rx(struct uart_port *port)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
@@ -306,7 +309,8 @@ static void sunsu_stop_rx(struct uart_port *port)
static void sunsu_enable_ms(struct uart_port *port)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
@@ -543,7 +547,8 @@ static irqreturn_t sunsu_kbd_ms_interrupt(int irq, void *dev_id)
static unsigned int sunsu_tx_empty(struct uart_port *port)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
unsigned int ret;
@@ -556,7 +561,8 @@ static unsigned int sunsu_tx_empty(struct uart_port *port)
static unsigned int sunsu_get_mctrl(struct uart_port *port)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
unsigned char status;
unsigned int ret;
@@ -576,7 +582,8 @@ static unsigned int sunsu_get_mctrl(struct uart_port *port)
static void sunsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
unsigned char mcr = 0;
if (mctrl & TIOCM_RTS)
@@ -595,7 +602,8 @@ static void sunsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void sunsu_break_ctl(struct uart_port *port, int break_state)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
@@ -609,7 +617,8 @@ static void sunsu_break_ctl(struct uart_port *port, int break_state)
static int sunsu_startup(struct uart_port *port)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
int retval;
@@ -719,7 +728,8 @@ static int sunsu_startup(struct uart_port *port)
static void sunsu_shutdown(struct uart_port *port)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
unsigned long flags;
/*
@@ -767,7 +777,8 @@ static void
sunsu_change_speed(struct uart_port *port, unsigned int cflag,
unsigned int iflag, unsigned int quot)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
unsigned char cval, fcr = 0;
unsigned long flags;
@@ -918,7 +929,8 @@ static int sunsu_request_port(struct uart_port *port)
static void sunsu_config_port(struct uart_port *port, int flags)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
if (flags & UART_CONFIG_TYPE) {
/*
@@ -1277,7 +1289,8 @@ static __inline__ void wait_for_xmitr(struct uart_sunsu_port *up)
static void sunsu_console_putchar(struct uart_port *port, int ch)
{
- struct uart_sunsu_port *up = (struct uart_sunsu_port *)port;
+ struct uart_sunsu_port *up =
+ container_of(port, struct uart_sunsu_port, port);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
@@ -1537,7 +1550,6 @@ MODULE_DEVICE_TABLE(of, su_match);
static struct platform_driver su_driver = {
.driver = {
.name = "su",
- .owner = THIS_MODULE,
.of_match_table = su_match,
},
.probe = su_probe,
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 02df3940b95e..8b6ace341029 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -644,7 +644,8 @@ static unsigned int sunzilog_get_mctrl(struct uart_port *port)
/* The port lock is held and interrupts are disabled. */
static void sunzilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct uart_sunzilog_port *up =
+ container_of(port, struct uart_sunzilog_port, port);
struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char set_bits, clear_bits;
@@ -668,7 +669,8 @@ static void sunzilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
/* The port lock is held and interrupts are disabled. */
static void sunzilog_stop_tx(struct uart_port *port)
{
- struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct uart_sunzilog_port *up =
+ container_of(port, struct uart_sunzilog_port, port);
up->flags |= SUNZILOG_FLAG_TX_STOPPED;
}
@@ -676,7 +678,8 @@ static void sunzilog_stop_tx(struct uart_port *port)
/* The port lock is held and interrupts are disabled. */
static void sunzilog_start_tx(struct uart_port *port)
{
- struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct uart_sunzilog_port *up =
+ container_of(port, struct uart_sunzilog_port, port);
struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char status;
@@ -736,7 +739,8 @@ static void sunzilog_stop_rx(struct uart_port *port)
/* The port lock is held. */
static void sunzilog_enable_ms(struct uart_port *port)
{
- struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct uart_sunzilog_port *up =
+ container_of(port, struct uart_sunzilog_port, port);
struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char new_reg;
@@ -752,7 +756,8 @@ static void sunzilog_enable_ms(struct uart_port *port)
/* The port lock is not held. */
static void sunzilog_break_ctl(struct uart_port *port, int break_state)
{
- struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct uart_sunzilog_port *up =
+ container_of(port, struct uart_sunzilog_port, port);
struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
unsigned char set_bits, clear_bits, new_reg;
unsigned long flags;
@@ -938,7 +943,8 @@ static void
sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct uart_sunzilog_port *up =
+ container_of(port, struct uart_sunzilog_port, port);
unsigned long flags;
int baud, brg;
@@ -998,7 +1004,8 @@ static int sunzilog_verify_port(struct uart_port *port, struct serial_struct *se
static int sunzilog_get_poll_char(struct uart_port *port)
{
unsigned char ch, r1;
- struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct uart_sunzilog_port *up =
+ container_of(port, struct uart_sunzilog_port, port);
struct zilog_channel __iomem *channel
= ZILOG_CHANNEL_FROM_PORT(&up->port);
@@ -1032,7 +1039,8 @@ static int sunzilog_get_poll_char(struct uart_port *port)
static void sunzilog_put_poll_char(struct uart_port *port,
unsigned char ch)
{
- struct uart_sunzilog_port *up = (struct uart_sunzilog_port *)port;
+ struct uart_sunzilog_port *up =
+ container_of(port, struct uart_sunzilog_port, port);
sunzilog_putchar(&up->port, ch);
}
@@ -1533,7 +1541,6 @@ MODULE_DEVICE_TABLE(of, zs_match);
static struct platform_driver zs_driver = {
.driver = {
.name = "zs",
- .owner = THIS_MODULE,
.of_match_table = zs_match,
},
.probe = zs_probe,
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 0d11d5032b93..512c162634a3 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -273,6 +273,8 @@ static void timbuart_shutdown(struct uart_port *port)
dev_dbg(port->dev, "%s\n", __func__);
free_irq(port->irq, uart);
iowrite32(0, port->membase + TIMBUART_IER);
+
+ timbuart_flush_buffer(port);
}
static int get_bindex(int baud)
@@ -501,7 +503,6 @@ static int timbuart_remove(struct platform_device *dev)
static struct platform_driver timbuart_platform_driver = {
.driver = {
.name = "timb-uart",
- .owner = THIS_MODULE,
},
.probe = timbuart_probe,
.remove = timbuart_remove,
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index c107a0f0e72f..14d10fcfd210 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1485,7 +1485,6 @@ MODULE_DEVICE_TABLE(of, ucc_uart_match);
static struct platform_driver ucc_uart_of_driver = {
.driver = {
.name = "ucc_uart",
- .owner = THIS_MODULE,
.of_match_table = ucc_uart_match,
},
.probe = ucc_uart_probe,
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index d7f9d622cdcb..485de53c5d75 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -954,7 +954,6 @@ static struct platform_driver siu_device_driver = {
.resume = siu_resume,
.driver = {
.name = "SIU",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index b2bc9e8ba048..4079ec56f5f9 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -745,7 +745,6 @@ static struct platform_driver vt8500_platform_driver = {
.remove = vt8500_serial_remove,
.driver = {
.name = "vt8500_serial",
- .owner = THIS_MODULE,
.of_match_table = wmt_dt_ids,
},
};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 200c1af2141b..542bab37e502 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -133,6 +133,15 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_IXR_BRK 0x80000000
/*
+ * Modem Control register:
+ * The read/write Modem Control register controls the interface with the modem
+ * or data set, or a peripheral device emulating a modem.
+ */
+#define CDNS_UART_MODEMCR_FCM 0x00000020 /* Automatic flow control mode */
+#define CDNS_UART_MODEMCR_RTS 0x00000002 /* Request to send output control */
+#define CDNS_UART_MODEMCR_DTR 0x00000001 /* Data Terminal Ready */
+
+/*
* Channel Status Register:
* The channel status register (CSR) is provided to enable the control logic
* to monitor the status of bits in the channel interrupt status register,
@@ -915,7 +924,18 @@ static unsigned int cdns_uart_get_mctrl(struct uart_port *port)
static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* N/A */
+ u32 val;
+
+ val = cdns_uart_readl(CDNS_UART_MODEMCR_OFFSET);
+
+ val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR);
+
+ if (mctrl & TIOCM_RTS)
+ val |= CDNS_UART_MODEMCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ val |= CDNS_UART_MODEMCR_DTR;
+
+ cdns_uart_writel(val, CDNS_UART_MODEMCR_OFFSET);
}
#ifdef CONFIG_CONSOLE_POLL
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 143deb62467d..3605103fc1ac 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -202,14 +202,16 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
/**
* tty_buffer_flush - flush full tty buffers
* @tty: tty to flush
+ * @ld: optional ldisc ptr (must be referenced)
*
- * flush all the buffers containing receive data.
+ * flush all the buffers containing receive data. If ld != NULL,
+ * flush the ldisc input buffer.
*
* Locking: takes buffer lock to ensure single-threaded flip buffer
* 'consumer'
*/
-void tty_buffer_flush(struct tty_struct *tty)
+void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
{
struct tty_port *port = tty->port;
struct tty_bufhead *buf = &port->buf;
@@ -223,6 +225,10 @@ void tty_buffer_flush(struct tty_struct *tty)
buf->head = next;
}
buf->head->read = buf->head->commit;
+
+ if (ld && ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+
atomic_dec(&buf->priority);
mutex_unlock(&buf->lock);
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 0508a1d8e4cd..4f35b43e2475 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -153,8 +153,6 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
static int __tty_fasync(int fd, struct file *filp, int on);
static int tty_fasync(int fd, struct file *filp, int on);
static void release_tty(struct tty_struct *tty, int idx);
-static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
-static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
/**
* free_tty_struct - free a disused tty
@@ -169,8 +167,7 @@ void free_tty_struct(struct tty_struct *tty)
{
if (!tty)
return;
- if (tty->dev)
- put_device(tty->dev);
+ put_device(tty->dev);
kfree(tty->write_buf);
tty->magic = 0xDEADDEAD;
kfree(tty);
@@ -277,6 +274,7 @@ int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
return 0;
}
+/* Caller must hold tty_lock */
static int check_tty_count(struct tty_struct *tty, const char *routine)
{
#ifdef CHECK_TTY_COUNT
@@ -492,6 +490,78 @@ static const struct file_operations hung_up_tty_fops = {
static DEFINE_SPINLOCK(redirect_lock);
static struct file *redirect;
+
+void proc_clear_tty(struct task_struct *p)
+{
+ unsigned long flags;
+ struct tty_struct *tty;
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ tty = p->signal->tty;
+ p->signal->tty = NULL;
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ tty_kref_put(tty);
+}
+
+/**
+ * proc_set_tty - set the controlling terminal
+ *
+ * Only callable by the session leader and only if it does not already have
+ * a controlling terminal.
+ *
+ * Caller must hold: tty_lock()
+ * a readlock on tasklist_lock
+ * sighand lock
+ */
+static void __proc_set_tty(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ /*
+ * The session and fg pgrp references will be non-NULL if
+ * tiocsctty() is stealing the controlling tty
+ */
+ put_pid(tty->session);
+ put_pid(tty->pgrp);
+ tty->pgrp = get_pid(task_pgrp(current));
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ tty->session = get_pid(task_session(current));
+ if (current->signal->tty) {
+ printk(KERN_DEBUG "tty not NULL!!\n");
+ tty_kref_put(current->signal->tty);
+ }
+ put_pid(current->signal->tty_old_pgrp);
+ current->signal->tty = tty_kref_get(tty);
+ current->signal->tty_old_pgrp = NULL;
+}
+
+static void proc_set_tty(struct tty_struct *tty)
+{
+ spin_lock_irq(&current->sighand->siglock);
+ __proc_set_tty(tty);
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+struct tty_struct *get_current_tty(void)
+{
+ struct tty_struct *tty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ tty = tty_kref_get(current->signal->tty);
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ return tty;
+}
+EXPORT_SYMBOL_GPL(get_current_tty);
+
+static void session_clear_tty(struct pid *session)
+{
+ struct task_struct *p;
+ do_each_pid_task(session, PIDTYPE_SID, p) {
+ proc_clear_tty(p);
+ } while_each_pid_task(session, PIDTYPE_SID, p);
+}
+
/**
* tty_wakeup - request more data
* @tty: terminal
@@ -620,9 +690,6 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
return;
}
- /* some functions below drop BTM, so we need this bit */
- set_bit(TTY_HUPPING, &tty->flags);
-
/* inuse_filps is protected by the single tty lock,
this really needs to change if we want to flush the
workqueue with the lock held */
@@ -647,10 +714,6 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
while (refs--)
tty_kref_put(tty);
- /*
- * it drops BTM and thus races with reopen
- * we protect the race by TTY_HUPPING
- */
tty_ldisc_hangup(tty);
spin_lock_irq(&tty->ctrl_lock);
@@ -682,8 +745,6 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
* can't yet guarantee all that.
*/
set_bit(TTY_HUPPED, &tty->flags);
- clear_bit(TTY_HUPPING, &tty->flags);
-
tty_unlock(tty);
if (f)
@@ -792,14 +853,6 @@ int tty_hung_up_p(struct file *filp)
EXPORT_SYMBOL(tty_hung_up_p);
-static void session_clear_tty(struct pid *session)
-{
- struct task_struct *p;
- do_each_pid_task(session, PIDTYPE_SID, p) {
- proc_clear_tty(p);
- } while_each_pid_task(session, PIDTYPE_SID, p);
-}
-
/**
* disassociate_ctty - disconnect controlling tty
* @on_exit: true if exiting so need to "hang up" the session
@@ -927,7 +980,7 @@ void __stop_tty(struct tty_struct *tty)
return;
tty->stopped = 1;
if (tty->ops->stop)
- (tty->ops->stop)(tty);
+ tty->ops->stop(tty);
}
void stop_tty(struct tty_struct *tty)
@@ -958,7 +1011,7 @@ void __start_tty(struct tty_struct *tty)
return;
tty->stopped = 0;
if (tty->ops->start)
- (tty->ops->start)(tty);
+ tty->ops->start(tty);
tty_wakeup(tty);
}
@@ -1012,7 +1065,7 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
situation */
ld = tty_ldisc_ref_wait(tty);
if (ld->ops->read)
- i = (ld->ops->read)(tty, file, buf, count);
+ i = ld->ops->read(tty, file, buf, count);
else
i = -EIO;
tty_ldisc_deref(ld);
@@ -1024,14 +1077,12 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
}
static void tty_write_unlock(struct tty_struct *tty)
- __releases(&tty->atomic_write_lock)
{
mutex_unlock(&tty->atomic_write_lock);
wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
}
static int tty_write_lock(struct tty_struct *tty, int ndelay)
- __acquires(&tty->atomic_write_lock)
{
if (!mutex_trylock(&tty->atomic_write_lock)) {
if (ndelay)
@@ -1146,7 +1197,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
if (tty) {
mutex_lock(&tty->atomic_write_lock);
tty_lock(tty);
- if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
+ if (tty->ops->write && tty->count > 0) {
tty_unlock(tty);
tty->ops->write(tty, msg, strlen(msg));
} else
@@ -1293,19 +1344,24 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
* @driver: the driver for the tty
* @idx: the minor number
*
- * Return the tty, if found or ERR_PTR() otherwise.
+ * Return the tty, if found. If not found, return NULL or ERR_PTR() if the
+ * driver lookup() method returns an error.
*
- * Locking: tty_mutex must be held. If tty is found, the mutex must
- * be held until the 'fast-open' is also done. Will change once we
- * have refcounting in the driver and per driver locking
+ * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
*/
static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
struct inode *inode, int idx)
{
+ struct tty_struct *tty;
+
if (driver->ops->lookup)
- return driver->ops->lookup(driver, inode, idx);
+ tty = driver->ops->lookup(driver, inode, idx);
+ else
+ tty = driver->ttys[idx];
- return driver->ttys[idx];
+ if (!IS_ERR(tty))
+ tty_kref_get(tty);
+ return tty;
}
/**
@@ -1393,29 +1449,21 @@ void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
* @tty - the tty to open
*
* Return 0 on success, -errno on error.
+ * Re-opens on master ptys are not allowed and return -EIO.
*
- * Locking: tty_mutex must be held from the time the tty was found
- * till this open completes.
+ * Locking: Caller must hold tty_lock
*/
static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
- if (test_bit(TTY_CLOSING, &tty->flags) ||
- test_bit(TTY_HUPPING, &tty->flags))
+ if (!tty->count)
return -EIO;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
- driver->subtype == PTY_TYPE_MASTER) {
- /*
- * special case for PTY masters: only one open permitted,
- * and the slave side open count is incremented as well.
- */
- if (tty->count)
- return -EIO;
+ driver->subtype == PTY_TYPE_MASTER)
+ return -EIO;
- tty->link->count++;
- }
tty->count++;
WARN_ON(!tty->ldisc);
@@ -1535,15 +1583,19 @@ void tty_free_termios(struct tty_struct *tty)
EXPORT_SYMBOL(tty_free_termios);
/**
- * tty_flush_works - flush all works of a tty
- * @tty: tty device to flush works for
+ * tty_flush_works - flush all works of a tty/pty pair
+ * @tty: tty device to flush works for (or either end of a pty pair)
*
- * Sync flush all works belonging to @tty.
+ * Sync flush all works belonging to @tty (and the 'other' tty).
*/
static void tty_flush_works(struct tty_struct *tty)
{
flush_work(&tty->SAK_work);
flush_work(&tty->hangup_work);
+ if (tty->link) {
+ flush_work(&tty->link->SAK_work);
+ flush_work(&tty->link->hangup_work);
+ }
}
/**
@@ -1635,8 +1687,7 @@ static void release_tty(struct tty_struct *tty, int idx)
tty->link->port->itty = NULL;
cancel_work_sync(&tty->port->buf.work);
- if (tty->link)
- tty_kref_put(tty->link);
+ tty_kref_put(tty->link);
tty_kref_put(tty);
}
@@ -1649,8 +1700,7 @@ static void release_tty(struct tty_struct *tty, int idx)
* Performs some paranoid checking before true release of the @tty.
* This is a no-op unless TTY_PARANOIA_CHECK is defined.
*/
-static int tty_release_checks(struct tty_struct *tty, struct tty_struct *o_tty,
- int idx)
+static int tty_release_checks(struct tty_struct *tty, int idx)
{
#ifdef TTY_PARANOIA_CHECK
if (idx < 0 || idx >= tty->driver->num) {
@@ -1669,6 +1719,8 @@ static int tty_release_checks(struct tty_struct *tty, struct tty_struct *o_tty,
return -1;
}
if (tty->driver->other) {
+ struct tty_struct *o_tty = tty->link;
+
if (o_tty != tty->driver->other->ttys[idx]) {
printk(KERN_DEBUG "%s: other->table[%d] not o_tty for (%s)\n",
__func__, idx, tty->name);
@@ -1705,8 +1757,8 @@ static int tty_release_checks(struct tty_struct *tty, struct tty_struct *o_tty,
int tty_release(struct inode *inode, struct file *filp)
{
struct tty_struct *tty = file_tty(filp);
- struct tty_struct *o_tty;
- int pty_master, tty_closing, o_tty_closing, do_sleep;
+ struct tty_struct *o_tty = NULL;
+ int do_sleep, final;
int idx;
char buf[64];
long timeout = 0;
@@ -1721,12 +1773,11 @@ int tty_release(struct inode *inode, struct file *filp)
__tty_fasync(-1, filp, 0);
idx = tty->index;
- pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER);
- /* Review: parallel close */
- o_tty = tty->link;
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ o_tty = tty->link;
- if (tty_release_checks(tty, o_tty, idx)) {
+ if (tty_release_checks(tty, idx)) {
tty_unlock(tty);
return 0;
}
@@ -1739,7 +1790,9 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty->ops->close)
tty->ops->close(tty, filp);
- tty_unlock(tty);
+ /* If tty is pty master, lock the slave pty (stable lock order) */
+ tty_lock_slave(o_tty);
+
/*
* Sanity check: if tty->count is going to zero, there shouldn't be
* any waiters on tty->read_wait or tty->write_wait. We test the
@@ -1750,25 +1803,13 @@ int tty_release(struct inode *inode, struct file *filp)
* The test for the o_tty closing is necessary, since the master and
* slave sides may close in any order. If the slave side closes out
* first, its count will be one, since the master side holds an open.
- * Thus this test wouldn't be triggered at the time the slave closes,
+ * Thus this test wouldn't be triggered at the time the slave closed,
* so we do it now.
- *
- * Note that it's possible for the tty to be opened again while we're
- * flushing out waiters. By recalculating the closing flags before
- * each iteration we avoid any problems.
*/
while (1) {
- /* Guard against races with tty->count changes elsewhere and
- opens on /dev/tty */
-
- mutex_lock(&tty_mutex);
- tty_lock_pair(tty, o_tty);
- tty_closing = tty->count <= 1;
- o_tty_closing = o_tty &&
- (o_tty->count <= (pty_master ? 1 : 0));
do_sleep = 0;
- if (tty_closing) {
+ if (tty->count <= 1) {
if (waitqueue_active(&tty->read_wait)) {
wake_up_poll(&tty->read_wait, POLLIN);
do_sleep++;
@@ -1778,7 +1819,7 @@ int tty_release(struct inode *inode, struct file *filp)
do_sleep++;
}
}
- if (o_tty_closing) {
+ if (o_tty && o_tty->count <= 1) {
if (waitqueue_active(&o_tty->read_wait)) {
wake_up_poll(&o_tty->read_wait, POLLIN);
do_sleep++;
@@ -1796,8 +1837,6 @@ int tty_release(struct inode *inode, struct file *filp)
printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
__func__, tty_name(tty, buf));
}
- tty_unlock_pair(tty, o_tty);
- mutex_unlock(&tty_mutex);
schedule_timeout_killable(timeout);
if (timeout < 120 * HZ)
timeout = 2 * timeout + 1;
@@ -1805,15 +1844,7 @@ int tty_release(struct inode *inode, struct file *filp)
timeout = MAX_SCHEDULE_TIMEOUT;
}
- /*
- * The closing flags are now consistent with the open counts on
- * both sides, and we've completed the last operation that could
- * block, so it's safe to proceed with closing.
- *
- * We must *not* drop the tty_mutex until we ensure that a further
- * entry into tty_open can not pick up this tty.
- */
- if (pty_master) {
+ if (o_tty) {
if (--o_tty->count < 0) {
printk(KERN_WARNING "%s: bad pty slave count (%d) for %s\n",
__func__, o_tty->count, tty_name(o_tty, buf));
@@ -1840,21 +1871,11 @@ int tty_release(struct inode *inode, struct file *filp)
/*
* Perform some housekeeping before deciding whether to return.
*
- * Set the TTY_CLOSING flag if this was the last open. In the
- * case of a pty we may have to wait around for the other side
- * to close, and TTY_CLOSING makes sure we can't be reopened.
- */
- if (tty_closing)
- set_bit(TTY_CLOSING, &tty->flags);
- if (o_tty_closing)
- set_bit(TTY_CLOSING, &o_tty->flags);
-
- /*
* If _either_ side is closing, make sure there aren't any
* processes that still think tty or o_tty is their controlling
* tty.
*/
- if (tty_closing || o_tty_closing) {
+ if (!tty->count) {
read_lock(&tasklist_lock);
session_clear_tty(tty->session);
if (o_tty)
@@ -1862,13 +1883,16 @@ int tty_release(struct inode *inode, struct file *filp)
read_unlock(&tasklist_lock);
}
- mutex_unlock(&tty_mutex);
- tty_unlock_pair(tty, o_tty);
- /* At this point the TTY_CLOSING flag should ensure a dead tty
+ /* check whether both sides are closing ... */
+ final = !tty->count && !(o_tty && o_tty->count);
+
+ tty_unlock_slave(o_tty);
+ tty_unlock(tty);
+
+ /* At this point, the tty->count == 0 should ensure a dead tty
cannot be re-opened by a racing opener */
- /* check whether both sides are closing ... */
- if (!tty_closing || (o_tty && !o_tty_closing))
+ if (!final)
return 0;
#ifdef TTY_DEBUG_HANGUP
@@ -1877,12 +1901,10 @@ int tty_release(struct inode *inode, struct file *filp)
/*
* Ask the line discipline code to release its structures
*/
- tty_ldisc_release(tty, o_tty);
+ tty_ldisc_release(tty);
/* Wait for pending work before tty destruction commmences */
tty_flush_works(tty);
- if (o_tty)
- tty_flush_works(o_tty);
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "%s: %s: freeing structure...\n", __func__, tty_name(tty, buf));
@@ -1901,20 +1923,20 @@ int tty_release(struct inode *inode, struct file *filp)
}
/**
- * tty_open_current_tty - get tty of current task for open
+ * tty_open_current_tty - get locked tty of current task
* @device: device number
* @filp: file pointer to tty
- * @return: tty of the current task iff @device is /dev/tty
+ * @return: locked tty of the current task iff @device is /dev/tty
+ *
+ * Performs a re-open of the current task's controlling tty.
*
* We cannot return driver and index like for the other nodes because
* devpts will not work then. It expects inodes to be from devpts FS.
- *
- * We need to move to returning a refcounted object from all the lookup
- * paths including this one.
*/
static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
{
struct tty_struct *tty;
+ int retval;
if (device != MKDEV(TTYAUX_MAJOR, 0))
return NULL;
@@ -1925,9 +1947,14 @@ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
/* noctty = 1; */
- tty_kref_put(tty);
- /* FIXME: we put a reference and return a TTY! */
- /* This is only safe because the caller holds tty_mutex */
+ tty_lock(tty);
+ tty_kref_put(tty); /* safe to drop the kref now */
+
+ retval = tty_reopen(tty);
+ if (retval < 0) {
+ tty_unlock(tty);
+ tty = ERR_PTR(retval);
+ }
return tty;
}
@@ -2025,13 +2052,9 @@ retry_open:
index = -1;
retval = 0;
- mutex_lock(&tty_mutex);
- /* This is protected by the tty_mutex */
tty = tty_open_current_tty(device, filp);
- if (IS_ERR(tty)) {
- retval = PTR_ERR(tty);
- goto err_unlock;
- } else if (!tty) {
+ if (!tty) {
+ mutex_lock(&tty_mutex);
driver = tty_lookup_driver(device, filp, &noctty, &index);
if (IS_ERR(driver)) {
retval = PTR_ERR(driver);
@@ -2044,21 +2067,25 @@ retry_open:
retval = PTR_ERR(tty);
goto err_unlock;
}
- }
- if (tty) {
- tty_lock(tty);
- retval = tty_reopen(tty);
- if (retval < 0) {
- tty_unlock(tty);
- tty = ERR_PTR(retval);
+ if (tty) {
+ mutex_unlock(&tty_mutex);
+ tty_lock(tty);
+ /* safe to drop the kref from tty_driver_lookup_tty() */
+ tty_kref_put(tty);
+ retval = tty_reopen(tty);
+ if (retval < 0) {
+ tty_unlock(tty);
+ tty = ERR_PTR(retval);
+ }
+ } else { /* Returns with the tty_lock held for now */
+ tty = tty_init_dev(driver, index);
+ mutex_unlock(&tty_mutex);
}
- } else /* Returns with the tty_lock held for now */
- tty = tty_init_dev(driver, index);
- mutex_unlock(&tty_mutex);
- if (driver)
tty_driver_kref_put(driver);
+ }
+
if (IS_ERR(tty)) {
retval = PTR_ERR(tty);
goto err_file;
@@ -2100,25 +2127,23 @@ retry_open:
/*
* Need to reset f_op in case a hangup happened.
*/
- if (filp->f_op == &hung_up_tty_fops)
+ if (tty_hung_up_p(filp))
filp->f_op = &tty_fops;
goto retry_open;
}
clear_bit(TTY_HUPPED, &tty->flags);
- tty_unlock(tty);
- mutex_lock(&tty_mutex);
- tty_lock(tty);
+ read_lock(&tasklist_lock);
spin_lock_irq(&current->sighand->siglock);
if (!noctty &&
current->signal->leader &&
!current->signal->tty &&
tty->session == NULL)
- __proc_set_tty(current, tty);
+ __proc_set_tty(tty);
spin_unlock_irq(&current->sighand->siglock);
+ read_unlock(&tasklist_lock);
tty_unlock(tty);
- mutex_unlock(&tty_mutex);
return 0;
err_unlock:
mutex_unlock(&tty_mutex);
@@ -2155,7 +2180,7 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
ld = tty_ldisc_ref_wait(tty);
if (ld->ops->poll)
- ret = (ld->ops->poll)(tty, filp, wait);
+ ret = ld->ops->poll(tty, filp, wait);
tty_ldisc_deref(ld);
return ret;
}
@@ -2283,18 +2308,14 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
{
struct pid *pgrp;
- unsigned long flags;
/* Lock the tty */
mutex_lock(&tty->winsize_mutex);
if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
goto done;
- /* Get the PID values and reference them so we can
- avoid holding the tty ctrl lock while sending signals */
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- pgrp = get_pid(tty->pgrp);
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ /* Signal the foreground process group */
+ pgrp = tty_get_pgrp(tty);
if (pgrp)
kill_pgrp(pgrp, SIGWINCH, 1);
put_pid(pgrp);
@@ -2403,7 +2424,7 @@ static int fionbio(struct file *file, int __user *p)
* leader to set this tty as the controlling tty for the session.
*
* Locking:
- * Takes tty_mutex() to protect tty instance
+ * Takes tty_lock() to serialize proc_set_tty() for this tty
* Takes tasklist_lock internally to walk sessions
* Takes ->siglock() when updating signal->tty
*/
@@ -2411,10 +2432,13 @@ static int fionbio(struct file *file, int __user *p)
static int tiocsctty(struct tty_struct *tty, int arg)
{
int ret = 0;
+
+ tty_lock(tty);
+ read_lock(&tasklist_lock);
+
if (current->signal->leader && (task_session(current) == tty->session))
- return ret;
+ goto unlock;
- mutex_lock(&tty_mutex);
/*
* The process must be a session leader and
* not have a controlling tty already.
@@ -2433,17 +2457,16 @@ static int tiocsctty(struct tty_struct *tty, int arg)
/*
* Steal it away
*/
- read_lock(&tasklist_lock);
session_clear_tty(tty->session);
- read_unlock(&tasklist_lock);
} else {
ret = -EPERM;
goto unlock;
}
}
- proc_set_tty(current, tty);
+ proc_set_tty(tty);
unlock:
- mutex_unlock(&tty_mutex);
+ read_unlock(&tasklist_lock);
+ tty_unlock(tty);
return ret;
}
@@ -2468,6 +2491,27 @@ struct pid *tty_get_pgrp(struct tty_struct *tty)
}
EXPORT_SYMBOL_GPL(tty_get_pgrp);
+/*
+ * This checks not only the pgrp, but falls back on the pid if no
+ * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
+ * without this...
+ *
+ * The caller must hold rcu lock or the tasklist lock.
+ */
+static struct pid *session_of_pgrp(struct pid *pgrp)
+{
+ struct task_struct *p;
+ struct pid *sid = NULL;
+
+ p = pid_task(pgrp, PIDTYPE_PGID);
+ if (p == NULL)
+ p = pid_task(pgrp, PIDTYPE_PID);
+ if (p != NULL)
+ sid = task_session(p);
+
+ return sid;
+}
+
/**
* tiocgpgrp - get process group
* @tty: tty passed by user
@@ -2714,23 +2758,35 @@ static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
return 0;
}
-struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+static void tty_warn_deprecated_flags(struct serial_struct __user *ss)
{
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER)
- tty = tty->link;
- return tty;
+ static DEFINE_RATELIMIT_STATE(depr_flags,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ char comm[TASK_COMM_LEN];
+ int flags;
+
+ if (get_user(flags, &ss->flags))
+ return;
+
+ flags &= ASYNC_DEPRECATED;
+
+ if (flags && __ratelimit(&depr_flags))
+ pr_warning("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+ __func__, get_task_comm(comm, current), flags);
}
-EXPORT_SYMBOL(tty_pair_get_tty);
-struct tty_struct *tty_pair_get_pty(struct tty_struct *tty)
+/*
+ * if pty, return the slave side (real_tty)
+ * otherwise, return self
+ */
+static struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
{
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
- return tty;
- return tty->link;
+ tty = tty->link;
+ return tty;
}
-EXPORT_SYMBOL(tty_pair_get_pty);
/*
* Split this up, as gcc can choke on it otherwise..
@@ -2859,13 +2915,16 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TCIFLUSH:
case TCIOFLUSH:
/* flush tty buffer and allow ldisc to process ioctl */
- tty_buffer_flush(tty);
+ tty_buffer_flush(tty, NULL);
break;
}
break;
+ case TIOCSSERIAL:
+ tty_warn_deprecated_flags(p);
+ break;
}
if (tty->ops->ioctl) {
- retval = (tty->ops->ioctl)(tty, cmd, arg);
+ retval = tty->ops->ioctl(tty, cmd, arg);
if (retval != -ENOIOCTLCMD)
return retval;
}
@@ -2892,7 +2951,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
if (tty->ops->compat_ioctl) {
- retval = (tty->ops->compat_ioctl)(tty, cmd, arg);
+ retval = tty->ops->compat_ioctl(tty, cmd, arg);
if (retval != -ENOIOCTLCMD)
return retval;
}
@@ -3443,59 +3502,6 @@ dev_t tty_devnum(struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_devnum);
-void proc_clear_tty(struct task_struct *p)
-{
- unsigned long flags;
- struct tty_struct *tty;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- tty = p->signal->tty;
- p->signal->tty = NULL;
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- tty_kref_put(tty);
-}
-
-/* Called under the sighand lock */
-
-static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
-{
- if (tty) {
- unsigned long flags;
- /* We should not have a session or pgrp to put here but.... */
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- put_pid(tty->session);
- put_pid(tty->pgrp);
- tty->pgrp = get_pid(task_pgrp(tsk));
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
- tty->session = get_pid(task_session(tsk));
- if (tsk->signal->tty) {
- printk(KERN_DEBUG "tty not NULL!!\n");
- tty_kref_put(tsk->signal->tty);
- }
- }
- put_pid(tsk->signal->tty_old_pgrp);
- tsk->signal->tty = tty_kref_get(tty);
- tsk->signal->tty_old_pgrp = NULL;
-}
-
-static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
-{
- spin_lock_irq(&tsk->sighand->siglock);
- __proc_set_tty(tsk, tty);
- spin_unlock_irq(&tsk->sighand->siglock);
-}
-
-struct tty_struct *get_current_tty(void)
-{
- struct tty_struct *tty;
- unsigned long flags;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- tty = tty_kref_get(current->signal->tty);
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- return tty;
-}
-EXPORT_SYMBOL_GPL(get_current_tty);
-
void tty_default_fops(struct file_operations *fops)
{
*fops = tty_fops;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 62380ccf70fb..1787fa4d9448 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -524,9 +524,8 @@ EXPORT_SYMBOL(tty_termios_hw_change);
* @tty: tty to update
* @new_termios: desired new value
*
- * Perform updates to the termios values set on this terminal. There
- * is a bit of layering violation here with n_tty in terms of the
- * internal knowledge of this function.
+ * Perform updates to the termios values set on this terminal.
+ * A master pty's termios should never be set.
*
* Locking: termios_rwsem
*/
@@ -535,8 +534,9 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
{
struct ktermios old_termios;
struct tty_ldisc *ld;
- unsigned long flags;
+ WARN_ON(tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER);
/*
* Perform the actual termios internal changes under lock.
*/
@@ -549,41 +549,15 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
tty->termios = *new_termios;
unset_locked_termios(&tty->termios, &old_termios, &tty->termios_locked);
- /* See if packet mode change of state. */
- if (tty->link && tty->link->packet) {
- int extproc = (old_termios.c_lflag & EXTPROC) |
- (tty->termios.c_lflag & EXTPROC);
- int old_flow = ((old_termios.c_iflag & IXON) &&
- (old_termios.c_cc[VSTOP] == '\023') &&
- (old_termios.c_cc[VSTART] == '\021'));
- int new_flow = (I_IXON(tty) &&
- STOP_CHAR(tty) == '\023' &&
- START_CHAR(tty) == '\021');
- if ((old_flow != new_flow) || extproc) {
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- if (old_flow != new_flow) {
- tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
- if (new_flow)
- tty->ctrl_status |= TIOCPKT_DOSTOP;
- else
- tty->ctrl_status |= TIOCPKT_NOSTOP;
- }
- if (extproc)
- tty->ctrl_status |= TIOCPKT_IOCTL;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
- wake_up_interruptible(&tty->link->read_wait);
- }
- }
-
if (tty->ops->set_termios)
- (*tty->ops->set_termios)(tty, &old_termios);
+ tty->ops->set_termios(tty, &old_termios);
else
tty_termios_copy_hw(&tty->termios, &old_termios);
ld = tty_ldisc_ref(tty);
if (ld != NULL) {
if (ld->ops->set_termios)
- (ld->ops->set_termios)(tty, &old_termios);
+ ld->ops->set_termios(tty, &old_termios);
tty_ldisc_deref(ld);
}
up_write(&tty->termios_rwsem);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 2d822aa259b2..3737f55272d2 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -308,48 +308,66 @@ EXPORT_SYMBOL_GPL(tty_ldisc_deref);
static inline int __lockfunc
-tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+__tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
{
return ldsem_down_write(&tty->ldisc_sem, timeout);
}
static inline int __lockfunc
-tty_ldisc_lock_nested(struct tty_struct *tty, unsigned long timeout)
+__tty_ldisc_lock_nested(struct tty_struct *tty, unsigned long timeout)
{
return ldsem_down_write_nested(&tty->ldisc_sem,
LDISC_SEM_OTHER, timeout);
}
-static inline void tty_ldisc_unlock(struct tty_struct *tty)
+static inline void __tty_ldisc_unlock(struct tty_struct *tty)
{
return ldsem_up_write(&tty->ldisc_sem);
}
static int __lockfunc
+tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+{
+ int ret;
+
+ ret = __tty_ldisc_lock(tty, timeout);
+ if (!ret)
+ return -EBUSY;
+ set_bit(TTY_LDISC_HALTED, &tty->flags);
+ return 0;
+}
+
+static void tty_ldisc_unlock(struct tty_struct *tty)
+{
+ clear_bit(TTY_LDISC_HALTED, &tty->flags);
+ __tty_ldisc_unlock(tty);
+}
+
+static int __lockfunc
tty_ldisc_lock_pair_timeout(struct tty_struct *tty, struct tty_struct *tty2,
unsigned long timeout)
{
int ret;
if (tty < tty2) {
- ret = tty_ldisc_lock(tty, timeout);
+ ret = __tty_ldisc_lock(tty, timeout);
if (ret) {
- ret = tty_ldisc_lock_nested(tty2, timeout);
+ ret = __tty_ldisc_lock_nested(tty2, timeout);
if (!ret)
- tty_ldisc_unlock(tty);
+ __tty_ldisc_unlock(tty);
}
} else {
/* if this is possible, it has lots of implications */
WARN_ON_ONCE(tty == tty2);
if (tty2 && tty != tty2) {
- ret = tty_ldisc_lock(tty2, timeout);
+ ret = __tty_ldisc_lock(tty2, timeout);
if (ret) {
- ret = tty_ldisc_lock_nested(tty, timeout);
+ ret = __tty_ldisc_lock_nested(tty, timeout);
if (!ret)
- tty_ldisc_unlock(tty2);
+ __tty_ldisc_unlock(tty2);
}
} else
- ret = tty_ldisc_lock(tty, timeout);
+ ret = __tty_ldisc_lock(tty, timeout);
}
if (!ret)
@@ -370,38 +388,26 @@ tty_ldisc_lock_pair(struct tty_struct *tty, struct tty_struct *tty2)
static void __lockfunc tty_ldisc_unlock_pair(struct tty_struct *tty,
struct tty_struct *tty2)
{
- tty_ldisc_unlock(tty);
- if (tty2)
- tty_ldisc_unlock(tty2);
-}
-
-static void __lockfunc tty_ldisc_enable_pair(struct tty_struct *tty,
- struct tty_struct *tty2)
-{
- clear_bit(TTY_LDISC_HALTED, &tty->flags);
+ __tty_ldisc_unlock(tty);
if (tty2)
- clear_bit(TTY_LDISC_HALTED, &tty2->flags);
-
- tty_ldisc_unlock_pair(tty, tty2);
+ __tty_ldisc_unlock(tty2);
}
/**
* tty_ldisc_flush - flush line discipline queue
* @tty: tty
*
- * Flush the line discipline queue (if any) for this tty. If there
- * is no line discipline active this is a no-op.
+ * Flush the line discipline queue (if any) and the tty flip buffers
+ * for this tty.
*/
void tty_ldisc_flush(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_ref(tty);
- if (ld) {
- if (ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
+
+ tty_buffer_flush(tty, ld);
+ if (ld)
tty_ldisc_deref(ld);
- }
- tty_buffer_flush(tty);
}
EXPORT_SYMBOL_GPL(tty_ldisc_flush);
@@ -517,15 +523,16 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
int retval;
struct tty_ldisc *old_ldisc, *new_ldisc;
- struct tty_struct *o_tty = tty->link;
new_ldisc = tty_ldisc_get(tty, ldisc);
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
- retval = tty_ldisc_lock_pair_timeout(tty, o_tty, 5 * HZ);
+ tty_lock(tty);
+ retval = tty_ldisc_lock(tty, 5 * HZ);
if (retval) {
tty_ldisc_put(new_ldisc);
+ tty_unlock(tty);
return retval;
}
@@ -534,19 +541,18 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
*/
if (tty->ldisc->ops->num == ldisc) {
- tty_ldisc_enable_pair(tty, o_tty);
+ tty_ldisc_unlock(tty);
tty_ldisc_put(new_ldisc);
+ tty_unlock(tty);
return 0;
}
old_ldisc = tty->ldisc;
- tty_lock(tty);
- if (test_bit(TTY_HUPPING, &tty->flags) ||
- test_bit(TTY_HUPPED, &tty->flags)) {
+ if (test_bit(TTY_HUPPED, &tty->flags)) {
/* We were raced by the hangup method. It will have stomped
the ldisc data and closed the ldisc down */
- tty_ldisc_enable_pair(tty, o_tty);
+ tty_ldisc_unlock(tty);
tty_ldisc_put(new_ldisc);
tty_unlock(tty);
return -EIO;
@@ -566,8 +572,11 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
tty_ldisc_restore(tty, old_ldisc);
}
- if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc)
+ if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
+ down_read(&tty->termios_rwsem);
tty->ops->set_ldisc(tty);
+ up_read(&tty->termios_rwsem);
+ }
/* At this point we hold a reference to the new ldisc and a
reference to the old ldisc, or we hold two references to
@@ -580,13 +589,11 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
/*
* Allow ldisc referencing to occur again
*/
- tty_ldisc_enable_pair(tty, o_tty);
+ tty_ldisc_unlock(tty);
/* Restart the work queue in case no characters kick it off. Safe if
already running */
schedule_work(&tty->port->buf.work);
- if (o_tty)
- schedule_work(&o_tty->port->buf.work);
tty_unlock(tty);
return retval;
@@ -675,16 +682,13 @@ void tty_ldisc_hangup(struct tty_struct *tty)
wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
wake_up_interruptible_poll(&tty->read_wait, POLLIN);
- tty_unlock(tty);
-
/*
* Shutdown the current line discipline, and reset it to
* N_TTY if need be.
*
* Avoid racing set_ldisc or tty_ldisc_release
*/
- tty_ldisc_lock_pair(tty, tty->link);
- tty_lock(tty);
+ tty_ldisc_lock(tty, MAX_SCHEDULE_TIMEOUT);
if (tty->ldisc) {
@@ -706,7 +710,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
WARN_ON(tty_ldisc_open(tty, tty->ldisc));
}
}
- tty_ldisc_enable_pair(tty, tty->link);
+ tty_ldisc_unlock(tty);
if (reset)
tty_reset_termios(tty);
@@ -758,16 +762,17 @@ static void tty_ldisc_kill(struct tty_struct *tty)
/**
* tty_ldisc_release - release line discipline
- * @tty: tty being shut down
- * @o_tty: pair tty for pty/tty pairs
+ * @tty: tty being shut down (or one end of pty pair)
*
- * Called during the final close of a tty/pty pair in order to shut down
- * the line discpline layer. On exit the ldisc assigned is N_TTY and the
- * ldisc has not been opened.
+ * Called during the final close of a tty or a pty pair in order to shut
+ * down the line discpline layer. On exit, each ldisc assigned is N_TTY and
+ * each ldisc has not been opened.
*/
-void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
+void tty_ldisc_release(struct tty_struct *tty)
{
+ struct tty_struct *o_tty = tty->link;
+
/*
* Shutdown this line discipline. As this is the final close,
* it does not race with the set_ldisc code path.
@@ -776,13 +781,9 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc);
tty_ldisc_lock_pair(tty, o_tty);
- tty_lock_pair(tty, o_tty);
-
tty_ldisc_kill(tty);
if (o_tty)
tty_ldisc_kill(o_tty);
-
- tty_unlock_pair(tty, o_tty);
tty_ldisc_unlock_pair(tty, o_tty);
/* And the memory resources remaining (buffers, termios) will be
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 2e41abebbcba..4486741190c4 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -4,19 +4,23 @@
#include <linux/semaphore.h>
#include <linux/sched.h>
+/*
+ * Nested tty locks are necessary for releasing pty pairs.
+ * The stable lock order is master pty first, then slave pty.
+ */
+
/* Legacy tty mutex glue */
enum {
TTY_MUTEX_NORMAL,
- TTY_MUTEX_NESTED,
+ TTY_MUTEX_SLAVE,
};
/*
* Getting the big tty mutex.
*/
-static void __lockfunc tty_lock_nested(struct tty_struct *tty,
- unsigned int subclass)
+void __lockfunc tty_lock(struct tty_struct *tty)
{
if (tty->magic != TTY_MAGIC) {
pr_err("L Bad %p\n", tty);
@@ -24,12 +28,7 @@ static void __lockfunc tty_lock_nested(struct tty_struct *tty,
return;
}
tty_kref_get(tty);
- mutex_lock_nested(&tty->legacy_mutex, subclass);
-}
-
-void __lockfunc tty_lock(struct tty_struct *tty)
-{
- return tty_lock_nested(tty, TTY_MUTEX_NORMAL);
+ mutex_lock(&tty->legacy_mutex);
}
EXPORT_SYMBOL(tty_lock);
@@ -45,29 +44,23 @@ void __lockfunc tty_unlock(struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_unlock);
-/*
- * Getting the big tty mutex for a pair of ttys with lock ordering
- * On a non pty/tty pair tty2 can be NULL which is just fine.
- */
-void __lockfunc tty_lock_pair(struct tty_struct *tty,
- struct tty_struct *tty2)
+void __lockfunc tty_lock_slave(struct tty_struct *tty)
{
- if (tty < tty2) {
+ if (tty && tty != tty->link) {
+ WARN_ON(!mutex_is_locked(&tty->link->legacy_mutex) ||
+ !tty->driver->type == TTY_DRIVER_TYPE_PTY ||
+ !tty->driver->type == PTY_TYPE_SLAVE);
tty_lock(tty);
- tty_lock_nested(tty2, TTY_MUTEX_NESTED);
- } else {
- if (tty2 && tty2 != tty)
- tty_lock(tty2);
- tty_lock_nested(tty, TTY_MUTEX_NESTED);
}
}
-EXPORT_SYMBOL(tty_lock_pair);
-void __lockfunc tty_unlock_pair(struct tty_struct *tty,
- struct tty_struct *tty2)
+void __lockfunc tty_unlock_slave(struct tty_struct *tty)
+{
+ if (tty && tty != tty->link)
+ tty_unlock(tty);
+}
+
+void tty_set_lock_subclass(struct tty_struct *tty)
{
- tty_unlock(tty);
- if (tty2 && tty2 != tty)
- tty_unlock(tty2);
+ lockdep_set_subclass(&tty->legacy_mutex, TTY_MUTEX_SLAVE);
}
-EXPORT_SYMBOL(tty_unlock_pair);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 1b9335796da4..40b31835f80b 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -193,8 +193,7 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- if (port->tty)
- tty_kref_put(port->tty);
+ tty_kref_put(port->tty);
port->tty = tty_kref_get(tty);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -473,12 +472,10 @@ int tty_port_close_start(struct tty_port *port,
{
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ if (tty_hung_up_p(filp))
return 0;
- }
+ spin_lock_irqsave(&port->lock, flags);
if (tty->count == 1 && port->count != 1) {
printk(KERN_WARNING
"tty_port_close_start: tty->count = 1 port count = %d.\n",
@@ -522,6 +519,7 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
{
unsigned long flags;
+ tty_ldisc_flush(tty);
tty->closing = 0;
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c039cfea5b11..8a89f6e7715d 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -924,7 +924,7 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
if (kbd->kbdmode != VC_UNICODE) {
if (!up_flag)
- pr_warning("keyboard mode must be unicode for braille patterns\n");
+ pr_warn("keyboard mode must be unicode for braille patterns\n");
return;
}
@@ -1253,8 +1253,8 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
if (raw_mode && !hw_raw)
if (emulate_raw(vc, keycode, !down << 7))
if (keycode < BTN_MISC && printk_ratelimit())
- pr_warning("can't emulate rawmode for keycode %d\n",
- keycode);
+ pr_warn("can't emulate rawmode for keycode %d\n",
+ keycode);
#ifdef CONFIG_SPARC
if (keycode == KEY_A && sparc_l1_a_state) {
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index b33b00b386de..f3fbbbca9bde 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -156,6 +156,8 @@ static void console_callback(struct work_struct *ignored);
static void blank_screen_t(unsigned long dummy);
static void set_palette(struct vc_data *vc);
+#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
+
static int printable; /* Is console ready for printing? */
int default_utf8 = true;
module_param(default_utf8, int, S_IRUGO | S_IWUSR);
@@ -1367,9 +1369,11 @@ static void csi_m(struct vc_data *vc)
rgb_from_256(vc->vc_par[i]));
} else if (vc->vc_par[i] == 2 && /* 24 bit */
i <= vc->vc_npar + 3) {/* extremely rare */
- struct rgb c = {r:vc->vc_par[i+1],
- g:vc->vc_par[i+2],
- b:vc->vc_par[i+3]};
+ struct rgb c = {
+ .r = vc->vc_par[i + 1],
+ .g = vc->vc_par[i + 2],
+ .b = vc->vc_par[i + 3],
+ };
rgb_foreground(vc, c);
i += 3;
}
@@ -1388,9 +1392,11 @@ static void csi_m(struct vc_data *vc)
rgb_from_256(vc->vc_par[i]));
} else if (vc->vc_par[i] == 2 && /* 24 bit */
i <= vc->vc_npar + 3) {
- struct rgb c = {r:vc->vc_par[i+1],
- g:vc->vc_par[i+2],
- b:vc->vc_par[i+3]};
+ struct rgb c = {
+ .r = vc->vc_par[i + 1],
+ .g = vc->vc_par[i + 2],
+ .b = vc->vc_par[i + 3],
+ };
rgb_background(vc, c);
i += 3;
}
@@ -3849,8 +3855,8 @@ void do_unblank_screen(int leaving_gfx)
return;
if (!vc_cons_allocated(fg_console)) {
/* impossible */
- pr_warning("unblank_screen: tty %d not allocated ??\n",
- fg_console+1);
+ pr_warn("unblank_screen: tty %d not allocated ??\n",
+ fg_console + 1);
return;
}
vc = vc_cons[fg_console].d;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 60fa6278fbce..6276f13e9e12 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -56,12 +56,12 @@ static ssize_t map_name_show(struct uio_mem *mem, char *buf)
static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
{
- return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr);
+ return sprintf(buf, "%pa\n", &mem->addr);
}
static ssize_t map_size_show(struct uio_mem *mem, char *buf)
{
- return sprintf(buf, "0x%lx\n", mem->size);
+ return sprintf(buf, "%pa\n", &mem->size);
}
static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 8d0bba469566..915facbf552e 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -343,7 +343,6 @@ static struct platform_driver uio_dmem_genirq = {
.remove = uio_dmem_genirq_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &uio_dmem_genirq_dev_pm_ops,
.of_match_table = of_match_ptr(uio_of_genirq_match),
},
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 76669313e9a7..f598ecddc8a7 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -266,7 +266,6 @@ static struct platform_driver uio_pdrv_genirq = {
.remove = uio_pdrv_genirq_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &uio_pdrv_genirq_dev_pm_ops,
.of_match_table = of_match_ptr(uio_of_genirq_match),
},
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index c28d6e2e3df2..818735bb8c3a 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -231,7 +231,6 @@ static struct platform_driver pruss_driver = {
.remove = pruss_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 8db3380c3329..5796c8820514 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -221,7 +221,6 @@ static struct platform_driver c67x00_driver = {
.probe = c67x00_drv_probe,
.remove = c67x00_drv_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "c67x00",
},
};
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index 2f099c7df7b5..1fc86a2ca22d 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -10,6 +10,7 @@ ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
# Glue/Bridge layers go here
+obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_usb2.o
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_msm.o
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_zevio.o
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index ea40626e0246..65913d48f0c8 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -161,7 +161,8 @@ struct hw_bank {
* @test_mode: the selected test mode
* @platdata: platform specific information supplied by parent device
* @vbus_active: is VBUS active
- * @transceiver: pointer to USB PHY, if any
+ * @phy: pointer to PHY, if any
+ * @usb_phy: pointer to USB PHY, if any and if using the USB PHY framework
* @hcd: pointer to usb_hcd for ehci host driver
* @debugfs: root dentry for this controller in debugfs
* @id_event: indicates there is an id event, and handled at ci_otg_work
@@ -177,6 +178,7 @@ struct ci_hdrc {
struct ci_role_driver *roles[CI_ROLE_END];
enum ci_role role;
bool is_otg;
+ struct usb_otg otg;
struct otg_fsm fsm;
struct ci_otg_fsm_timer_list *fsm_timer;
struct work_struct work;
@@ -201,7 +203,9 @@ struct ci_hdrc {
struct ci_hdrc_platform_data *platdata;
int vbus_active;
- struct usb_phy *transceiver;
+ struct phy *phy;
+ /* old usb_phy interface */
+ struct usb_phy *usb_phy;
struct usb_hcd *hcd;
struct dentry *debugfs;
bool id_event;
@@ -348,7 +352,7 @@ u32 hw_read_intr_enable(struct ci_hdrc *ci);
u32 hw_read_intr_status(struct ci_hdrc *ci);
-int hw_device_reset(struct ci_hdrc *ci, u32 mode);
+int hw_device_reset(struct ci_hdrc *ci);
int hw_port_test_set(struct ci_hdrc *ci, u8 mode);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index a7ab0f15926e..0f05de7c6b6c 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -106,8 +106,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
struct ci_hdrc_platform_data pdata = {
.name = dev_name(&pdev->dev),
.capoffset = DEF_CAPOFFSET,
- .flags = CI_HDRC_REQUIRE_TRANSCEIVER |
- CI_HDRC_DISABLE_STREAMING,
+ .flags = CI_HDRC_DISABLE_STREAMING,
};
int ret;
const struct of_device_id *of_id =
@@ -115,10 +114,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev, "Failed to allocate ci_hdrc-imx data!\n");
+ if (!data)
return -ENOMEM;
- }
data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
if (IS_ERR(data->usbmisc_data))
@@ -147,7 +144,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
goto err_clk;
}
- pdata.phy = data->phy;
+ pdata.usb_phy = data->phy;
if (imx_platform_flag->flags & CI_HDRC_IMX_IMX28_WRITE_FIX)
pdata.flags |= CI_HDRC_IMX28_WRITE_FIX;
@@ -210,13 +207,48 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int imx_controller_suspend(struct device *dev)
+{
+ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "at %s\n", __func__);
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int imx_controller_resume(struct device *dev)
+{
+ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "at %s\n", __func__);
+
+ return clk_prepare_enable(data->clk);
+}
+
+static int ci_hdrc_imx_suspend(struct device *dev)
+{
+ return imx_controller_suspend(dev);
+}
+
+static int ci_hdrc_imx_resume(struct device *dev)
+{
+ return imx_controller_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ci_hdrc_imx_suspend, ci_hdrc_imx_resume)
+};
static struct platform_driver ci_hdrc_imx_driver = {
.probe = ci_hdrc_imx_probe,
.remove = ci_hdrc_imx_remove,
.driver = {
.name = "imx_usb",
- .owner = THIS_MODULE,
.of_match_table = ci_hdrc_imx_dt_ids,
+ .pm = &ci_hdrc_imx_pm_ops,
},
};
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 4935ac38fd00..d79ecc08a1be 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -26,15 +26,15 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
writel(0, USB_AHBBURST);
writel(0, USB_AHBMODE);
- usb_phy_init(ci->transceiver);
+ usb_phy_init(ci->usb_phy);
break;
case CI_HDRC_CONTROLLER_STOPPED_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
/*
- * Put the transceiver in non-driving mode. Otherwise host
+ * Put the phy in non-driving mode. Otherwise host
* may not detect soft-disconnection.
*/
- usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN);
+ usb_phy_notify_disconnect(ci->usb_phy, USB_SPEED_UNKNOWN);
break;
default:
dev_dbg(dev, "unknown ci_hdrc event\n");
@@ -46,7 +46,6 @@ static struct ci_hdrc_platform_data ci_hdrc_msm_platdata = {
.name = "ci_hdrc_msm",
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REGS_SHARED |
- CI_HDRC_REQUIRE_TRANSCEIVER |
CI_HDRC_DISABLE_STREAMING,
.notify_event = ci_hdrc_msm_notify_event,
@@ -68,7 +67,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
if (IS_ERR(phy))
return PTR_ERR(phy);
- ci_hdrc_msm_platdata.phy = phy;
+ ci_hdrc_msm_platdata.usb_phy = phy;
plat_ci = ci_hdrc_add_device(&pdev->dev,
pdev->resource, pdev->num_resources,
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
new file mode 100644
index 000000000000..45844c951788
--- /dev/null
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2014 Marvell Technology Group Ltd.
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/chipidea.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ulpi.h>
+
+#include "ci.h"
+
+struct ci_hdrc_usb2_priv {
+ struct platform_device *ci_pdev;
+ struct clk *clk;
+};
+
+static struct ci_hdrc_platform_data ci_default_pdata = {
+ .capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_DISABLE_STREAMING,
+};
+
+static int ci_hdrc_usb2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ci_hdrc_usb2_priv *priv;
+ struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
+ int ret;
+
+ if (!ci_pdata)
+ ci_pdata = &ci_default_pdata;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (!IS_ERR(priv->clk)) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable the clock: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto clk_err;
+
+ ci_pdata->name = dev_name(dev);
+
+ priv->ci_pdev = ci_hdrc_add_device(dev, pdev->resource,
+ pdev->num_resources, ci_pdata);
+ if (IS_ERR(priv->ci_pdev)) {
+ ret = PTR_ERR(priv->ci_pdev);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to register ci_hdrc platform device: %d\n",
+ ret);
+ goto clk_err;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+clk_err:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int ci_hdrc_usb2_remove(struct platform_device *pdev)
+{
+ struct ci_hdrc_usb2_priv *priv = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ ci_hdrc_remove_device(priv->ci_pdev);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id ci_hdrc_usb2_of_match[] = {
+ { .compatible = "chipidea,usb2" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
+
+static struct platform_driver ci_hdrc_usb2_driver = {
+ .probe = ci_hdrc_usb2_probe,
+ .remove = ci_hdrc_usb2_remove,
+ .driver = {
+ .name = "chipidea-usb2",
+ .of_match_table = of_match_ptr(ci_hdrc_usb2_of_match),
+ },
+};
+module_platform_driver(ci_hdrc_usb2_driver);
+
+MODULE_DESCRIPTION("ChipIdea HDRC USB2 binding for ci13xxx");
+MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/chipidea/ci_hdrc_zevio.c b/drivers/usb/chipidea/ci_hdrc_zevio.c
index 3bf6489ef5ec..d976fc1db73a 100644
--- a/drivers/usb/chipidea/ci_hdrc_zevio.c
+++ b/drivers/usb/chipidea/ci_hdrc_zevio.c
@@ -61,7 +61,6 @@ static struct platform_driver ci_hdrc_zevio_driver = {
.remove = ci_hdrc_zevio_remove,
.driver = {
.name = "zevio_usb",
- .owner = THIS_MODULE,
.of_match_table = ci_hdrc_zevio_dt_ids,
},
};
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 9bdc6bd73432..5b9825a4538a 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -47,6 +47,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/idr.h>
@@ -189,24 +190,29 @@ u8 hw_port_test_get(struct ci_hdrc *ci)
return hw_read(ci, OP_PORTSC, PORTSC_PTC) >> __ffs(PORTSC_PTC);
}
+static void hw_wait_phy_stable(void)
+{
+ /*
+ * The phy needs some delay to output the stable status from low
+ * power mode. And for OTGSC, the status inputs are debounced
+ * using a 1 ms time constant, so, delay 2ms for controller to get
+ * the stable status, like vbus and id when the phy leaves low power.
+ */
+ usleep_range(2000, 2500);
+}
+
/* The PHY enters/leaves low power mode */
static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
{
enum ci_hw_regs reg = ci->hw_bank.lpm ? OP_DEVLC : OP_PORTSC;
bool lpm = !!(hw_read(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm)));
- if (enable && !lpm) {
+ if (enable && !lpm)
hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
PORTSC_PHCD(ci->hw_bank.lpm));
- } else if (!enable && lpm) {
+ else if (!enable && lpm)
hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
0);
- /*
- * the PHY needs some time (less
- * than 1ms) to leave low power mode.
- */
- usleep_range(1000, 1100);
- }
}
static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
@@ -299,6 +305,49 @@ static void hw_phymode_configure(struct ci_hdrc *ci)
}
/**
+ * _ci_usb_phy_init: initialize phy taking in account both phy and usb_phy
+ * interfaces
+ * @ci: the controller
+ *
+ * This function returns an error code if the phy failed to init
+ */
+static int _ci_usb_phy_init(struct ci_hdrc *ci)
+{
+ int ret;
+
+ if (ci->phy) {
+ ret = phy_init(ci->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(ci->phy);
+ if (ret) {
+ phy_exit(ci->phy);
+ return ret;
+ }
+ } else {
+ ret = usb_phy_init(ci->usb_phy);
+ }
+
+ return ret;
+}
+
+/**
+ * _ci_usb_phy_exit: deinitialize phy taking in account both phy and usb_phy
+ * interfaces
+ * @ci: the controller
+ */
+static void ci_usb_phy_exit(struct ci_hdrc *ci)
+{
+ if (ci->phy) {
+ phy_power_off(ci->phy);
+ phy_exit(ci->phy);
+ } else {
+ usb_phy_shutdown(ci->usb_phy);
+ }
+}
+
+/**
* ci_usb_phy_init: initialize phy according to different phy type
* @ci: the controller
*
@@ -312,40 +361,68 @@ static int ci_usb_phy_init(struct ci_hdrc *ci)
case USBPHY_INTERFACE_MODE_UTMI:
case USBPHY_INTERFACE_MODE_UTMIW:
case USBPHY_INTERFACE_MODE_HSIC:
- ret = usb_phy_init(ci->transceiver);
- if (ret)
+ ret = _ci_usb_phy_init(ci);
+ if (!ret)
+ hw_wait_phy_stable();
+ else
return ret;
hw_phymode_configure(ci);
break;
case USBPHY_INTERFACE_MODE_ULPI:
case USBPHY_INTERFACE_MODE_SERIAL:
hw_phymode_configure(ci);
- ret = usb_phy_init(ci->transceiver);
+ ret = _ci_usb_phy_init(ci);
if (ret)
return ret;
break;
default:
- ret = usb_phy_init(ci->transceiver);
+ ret = _ci_usb_phy_init(ci);
+ if (!ret)
+ hw_wait_phy_stable();
}
return ret;
}
/**
- * hw_device_reset: resets chip (execute without interruption)
+ * hw_controller_reset: do controller reset
* @ci: the controller
*
* This function returns an error code
*/
-int hw_device_reset(struct ci_hdrc *ci, u32 mode)
+static int hw_controller_reset(struct ci_hdrc *ci)
{
+ int count = 0;
+
+ hw_write(ci, OP_USBCMD, USBCMD_RST, USBCMD_RST);
+ while (hw_read(ci, OP_USBCMD, USBCMD_RST)) {
+ udelay(10);
+ if (count++ > 1000)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * hw_device_reset: resets chip (execute without interruption)
+ * @ci: the controller
+ *
+ * This function returns an error code
+ */
+int hw_device_reset(struct ci_hdrc *ci)
+{
+ int ret;
+
/* should flush & stop before reset */
hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
- hw_write(ci, OP_USBCMD, USBCMD_RST, USBCMD_RST);
- while (hw_read(ci, OP_USBCMD, USBCMD_RST))
- udelay(10); /* not RTOS friendly */
+ ret = hw_controller_reset(ci);
+ if (ret) {
+ dev_err(ci->dev, "error resetting controller, ret=%d\n", ret);
+ return ret;
+ }
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
@@ -363,12 +440,12 @@ int hw_device_reset(struct ci_hdrc *ci, u32 mode)
/* USBMODE should be configured step by step */
hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
- hw_write(ci, OP_USBMODE, USBMODE_CM, mode);
+ hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_DC);
/* HW >= 2.3 */
hw_write(ci, OP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);
- if (hw_read(ci, OP_USBMODE, USBMODE_CM) != mode) {
- pr_err("cannot enter in %s mode", ci_role(ci)->name);
+ if (hw_read(ci, OP_USBMODE, USBMODE_CM) != USBMODE_CM_DC) {
+ pr_err("cannot enter in %s device mode", ci_role(ci)->name);
pr_err("lpm = %i", ci->hw_bank.lpm);
return -ENODEV;
}
@@ -472,7 +549,7 @@ static int ci_get_platdata(struct device *dev,
if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
- /* no vbus regualator is needed */
+ /* no vbus regulator is needed */
platdata->reg_vbus = NULL;
} else if (IS_ERR(platdata->reg_vbus)) {
dev_err(dev, "Getting regulator error: %ld\n",
@@ -589,11 +666,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return PTR_ERR(base);
ci = devm_kzalloc(dev, sizeof(*ci), GFP_KERNEL);
- if (!ci) {
- dev_err(dev, "can't allocate device\n");
+ if (!ci)
return -ENOMEM;
- }
+ platform_set_drvdata(pdev, ci);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -605,36 +681,32 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (ci->platdata->phy)
- ci->transceiver = ci->platdata->phy;
- else
- ci->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
-
- if (IS_ERR(ci->transceiver)) {
- ret = PTR_ERR(ci->transceiver);
- /*
- * if -ENXIO is returned, it means PHY layer wasn't
- * enabled, so it makes no sense to return -EPROBE_DEFER
- * in that case, since no PHY driver will ever probe.
- */
- if (ret == -ENXIO)
- return ret;
+ if (ci->platdata->phy) {
+ ci->phy = ci->platdata->phy;
+ } else if (ci->platdata->usb_phy) {
+ ci->usb_phy = ci->platdata->usb_phy;
+ } else {
+ ci->phy = devm_phy_get(dev->parent, "usb-phy");
+ ci->usb_phy = devm_usb_get_phy(dev->parent, USB_PHY_TYPE_USB2);
+
+ /* if both generic PHY and USB PHY layers aren't enabled */
+ if (PTR_ERR(ci->phy) == -ENOSYS &&
+ PTR_ERR(ci->usb_phy) == -ENXIO)
+ return -ENXIO;
+
+ if (IS_ERR(ci->phy) && IS_ERR(ci->usb_phy))
+ return -EPROBE_DEFER;
- dev_err(dev, "no usb2 phy configured\n");
- return -EPROBE_DEFER;
+ if (IS_ERR(ci->phy))
+ ci->phy = NULL;
+ else if (IS_ERR(ci->usb_phy))
+ ci->usb_phy = NULL;
}
ret = ci_usb_phy_init(ci);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
return ret;
- } else {
- /*
- * The delay to sync PHY's status, the maximum delay is
- * 2ms since the otgsc uses 1ms timer to debounce the
- * PHY's input
- */
- usleep_range(2000, 2500);
}
ci->hw_bank.phys = res->start;
@@ -711,9 +783,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, ci);
- ret = request_irq(ci->irq, ci_irq, IRQF_SHARED, ci->platdata->name,
- ci);
+ ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED,
+ ci->platdata->name, ci);
if (ret)
goto stop;
@@ -724,11 +795,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ret)
return 0;
- free_irq(ci->irq, ci);
stop:
ci_role_destroy(ci);
deinit_phy:
- usb_phy_shutdown(ci->transceiver);
+ ci_usb_phy_exit(ci);
return ret;
}
@@ -738,20 +808,66 @@ static int ci_hdrc_remove(struct platform_device *pdev)
struct ci_hdrc *ci = platform_get_drvdata(pdev);
dbg_remove_files(ci);
- free_irq(ci->irq, ci);
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
- usb_phy_shutdown(ci->transceiver);
+ ci_usb_phy_exit(ci);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void ci_controller_suspend(struct ci_hdrc *ci)
+{
+ ci_hdrc_enter_lpm(ci, true);
+
+ if (ci->usb_phy)
+ usb_phy_set_suspend(ci->usb_phy, 1);
+}
+
+static int ci_controller_resume(struct device *dev)
+{
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "at %s\n", __func__);
+
+ ci_hdrc_enter_lpm(ci, false);
+
+ if (ci->usb_phy) {
+ usb_phy_set_suspend(ci->usb_phy, 0);
+ usb_phy_set_wakeup(ci->usb_phy, false);
+ hw_wait_phy_stable();
+ }
return 0;
}
+static int ci_suspend(struct device *dev)
+{
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+
+ if (ci->wq)
+ flush_workqueue(ci->wq);
+
+ ci_controller_suspend(ci);
+
+ return 0;
+}
+
+static int ci_resume(struct device *dev)
+{
+ return ci_controller_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ci_suspend, ci_resume)
+};
static struct platform_driver ci_hdrc_driver = {
.probe = ci_hdrc_probe,
.remove = ci_hdrc_remove,
.driver = {
.name = "ci_hdrc",
- .owner = THIS_MODULE,
+ .pm = &ci_pm_ops,
},
};
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 795d6538d630..268e4236e84c 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -220,7 +220,7 @@ static int ci_otg_show(struct seq_file *s, void *unused)
/* ------ State ----- */
seq_printf(s, "OTG state: %s\n\n",
- usb_otg_state_string(ci->transceiver->state));
+ usb_otg_state_string(ci->otg.state));
/* ------ State Machine Variables ----- */
seq_printf(s, "a_bus_drop: %d\n", fsm->a_bus_drop);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index ebde7b6ce687..c1694cff1eaf 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -34,6 +34,44 @@
static struct hc_driver __read_mostly ci_ehci_hc_driver;
+struct ehci_ci_priv {
+ struct regulator *reg_vbus;
+};
+
+static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_ci_priv *priv = (struct ehci_ci_priv *)ehci->priv;
+ struct device *dev = hcd->self.controller;
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+ int ret = 0;
+ int port = HCS_N_PORTS(ehci->hcs_params);
+
+ if (priv->reg_vbus && !ci_otg_is_fsm_mode(ci)) {
+ if (port > 1) {
+ dev_warn(dev,
+ "Not support multi-port regulator control\n");
+ return 0;
+ }
+ if (enable)
+ ret = regulator_enable(priv->reg_vbus);
+ else
+ ret = regulator_disable(priv->reg_vbus);
+ if (ret) {
+ dev_err(dev,
+ "Failed to %s vbus regulator, ret=%d\n",
+ enable ? "enable" : "disable", ret);
+ return ret;
+ }
+ }
+ return 0;
+};
+
+static const struct ehci_driver_overrides ehci_ci_overrides = {
+ .extra_priv_size = sizeof(struct ehci_ci_priv),
+ .port_power = ehci_ci_portpower,
+};
+
static irqreturn_t host_irq(struct ci_hdrc *ci)
{
return usb_hcd_irq(ci->irq, ci->hcd);
@@ -43,6 +81,7 @@ static int host_start(struct ci_hdrc *ci)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
+ struct ehci_ci_priv *priv;
int ret;
if (usb_disabled())
@@ -52,15 +91,17 @@ static int host_start(struct ci_hdrc *ci)
if (!hcd)
return -ENOMEM;
- dev_set_drvdata(ci->dev, ci);
hcd->rsrc_start = ci->hw_bank.phys;
hcd->rsrc_len = ci->hw_bank.size;
hcd->regs = ci->hw_bank.abs;
hcd->has_tt = 1;
hcd->power_budget = ci->platdata->power_budget;
- hcd->usb_phy = ci->transceiver;
hcd->tpl_support = ci->platdata->tpl_support;
+ if (ci->phy)
+ hcd->phy = ci->phy;
+ else
+ hcd->usb_phy = ci->usb_phy;
ehci = hcd_to_ehci(hcd);
ehci->caps = ci->hw_bank.cap;
@@ -68,28 +109,21 @@ static int host_start(struct ci_hdrc *ci)
ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
ehci->imx28_write_fix = ci->imx28_write_fix;
- /*
- * vbus is always on if host is not in OTG FSM mode,
- * otherwise should be controlled by OTG FSM
- */
- if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci)) {
- ret = regulator_enable(ci->platdata->reg_vbus);
- if (ret) {
- dev_err(ci->dev,
- "Failed to enable vbus regulator, ret=%d\n",
- ret);
- goto put_hcd;
- }
- }
+ priv = (struct ehci_ci_priv *)ehci->priv;
+ priv->reg_vbus = NULL;
+
+ if (ci->platdata->reg_vbus)
+ priv->reg_vbus = ci->platdata->reg_vbus;
ret = usb_add_hcd(hcd, 0, 0);
if (ret) {
- goto disable_reg;
+ goto put_hcd;
} else {
- struct usb_otg *otg = ci->transceiver->otg;
+ struct usb_otg *otg = &ci->otg;
ci->hcd = hcd;
- if (otg) {
+
+ if (ci_otg_is_fsm_mode(ci)) {
otg->host = &hcd->self;
hcd->self.otg_port = 1;
}
@@ -100,10 +134,6 @@ static int host_start(struct ci_hdrc *ci)
return ret;
-disable_reg:
- if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci))
- regulator_disable(ci->platdata->reg_vbus);
-
put_hcd:
usb_put_hcd(hcd);
@@ -117,8 +147,6 @@ static void host_stop(struct ci_hdrc *ci)
if (hcd) {
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
- if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci))
- regulator_disable(ci->platdata->reg_vbus);
}
}
@@ -146,7 +174,7 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
rdrv->name = "host";
ci->roles[CI_ROLE_HOST] = rdrv;
- ehci_init_driver(&ci_ehci_hc_driver, NULL);
+ ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
return 0;
}
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index caaabc58021e..562e581f6765 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -303,7 +303,7 @@ static void a_wait_vfall_tmout_func(void *ptr, unsigned long indicator)
set_tmout(ci, indicator);
/* Disable port power */
hw_write(ci, OP_PORTSC, PORTSC_W1C_BITS | PORTSC_PP, 0);
- /* Clear exsiting DP irq */
+ /* Clear existing DP irq */
hw_write_otgsc(ci, OTGSC_DPIS, OTGSC_DPIS);
/* Enable data pulse irq */
hw_write_otgsc(ci, OTGSC_DPIE, OTGSC_DPIE);
@@ -328,7 +328,7 @@ static void b_ssend_srp_tmout_func(void *ptr, unsigned long indicator)
set_tmout(ci, indicator);
/* only vbus fall below B_sess_vld in b_idle state */
- if (ci->transceiver->state == OTG_STATE_B_IDLE)
+ if (ci->fsm.otg->state == OTG_STATE_B_IDLE)
ci_otg_queue_work(ci);
}
@@ -543,7 +543,7 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
ci_role_start(ci, CI_ROLE_HOST);
} else {
ci_role_stop(ci);
- hw_device_reset(ci, USBMODE_CM_DC);
+ hw_device_reset(ci);
ci_role_start(ci, CI_ROLE_GADGET);
}
mutex_lock(&fsm->lock);
@@ -582,11 +582,11 @@ int ci_otg_fsm_work(struct ci_hdrc *ci)
* when there is no gadget class driver
*/
if (ci->fsm.id && !(ci->driver) &&
- ci->transceiver->state < OTG_STATE_A_IDLE)
+ ci->fsm.otg->state < OTG_STATE_A_IDLE)
return 0;
if (otg_statemachine(&ci->fsm)) {
- if (ci->transceiver->state == OTG_STATE_A_IDLE) {
+ if (ci->fsm.otg->state == OTG_STATE_A_IDLE) {
/*
* Further state change for cases:
* a_idle to b_idle; or
@@ -600,7 +600,7 @@ int ci_otg_fsm_work(struct ci_hdrc *ci)
ci_otg_queue_work(ci);
if (ci->id_event)
ci->id_event = false;
- } else if (ci->transceiver->state == OTG_STATE_B_IDLE) {
+ } else if (ci->fsm.otg->state == OTG_STATE_B_IDLE) {
if (ci->fsm.b_sess_vld) {
ci->fsm.power_up = 0;
/*
@@ -627,7 +627,7 @@ static void ci_otg_fsm_event(struct ci_hdrc *ci)
otg_bsess_vld = hw_read_otgsc(ci, OTGSC_BSV);
port_conn = hw_read(ci, OP_PORTSC, PORTSC_CCS);
- switch (ci->transceiver->state) {
+ switch (ci->fsm.otg->state) {
case OTG_STATE_A_WAIT_BCON:
if (port_conn) {
fsm->b_conn = 1;
@@ -663,7 +663,7 @@ static void ci_otg_fsm_event(struct ci_hdrc *ci)
fsm->b_bus_suspend = 1;
/*
* Init a timer to know how long this suspend
- * will contine, if time out, indicates B no longer
+ * will continue, if time out, indicates B no longer
* wants to be host role
*/
ci_otg_add_timer(ci, A_BIDL_ADIS);
@@ -778,34 +778,25 @@ void ci_hdrc_otg_fsm_start(struct ci_hdrc *ci)
int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci)
{
int retval = 0;
- struct usb_otg *otg;
- otg = devm_kzalloc(ci->dev,
- sizeof(struct usb_otg), GFP_KERNEL);
- if (!otg) {
- dev_err(ci->dev,
- "Failed to allocate usb_otg structure for ci hdrc otg!\n");
- return -ENOMEM;
- }
+ if (ci->phy)
+ ci->otg.phy = ci->phy;
+ else
+ ci->otg.usb_phy = ci->usb_phy;
- otg->phy = ci->transceiver;
- otg->gadget = &ci->gadget;
- ci->fsm.otg = otg;
- ci->transceiver->otg = ci->fsm.otg;
+ ci->otg.gadget = &ci->gadget;
+ ci->fsm.otg = &ci->otg;
ci->fsm.power_up = 1;
ci->fsm.id = hw_read_otgsc(ci, OTGSC_ID) ? 1 : 0;
- ci->transceiver->state = OTG_STATE_UNDEFINED;
+ ci->fsm.otg->state = OTG_STATE_UNDEFINED;
ci->fsm.ops = &ci_otg_ops;
mutex_init(&ci->fsm.lock);
ci->fsm_timer = devm_kzalloc(ci->dev,
sizeof(struct ci_otg_fsm_timer_list), GFP_KERNEL);
- if (!ci->fsm_timer) {
- dev_err(ci->dev,
- "Failed to allocate timer structure for ci hdrc otg!\n");
+ if (!ci->fsm_timer)
return -ENOMEM;
- }
INIT_LIST_HEAD(&ci->fsm_timer->active_timers);
retval = ci_otg_init_timers(ci);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 0444d3f8971a..4fe18ce3bd5a 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -692,10 +692,8 @@ __acquires(ci->lock)
int retval;
spin_unlock(&ci->lock);
- if (ci->gadget.speed != USB_SPEED_UNKNOWN) {
- if (ci->driver)
- ci->driver->disconnect(&ci->gadget);
- }
+ if (ci->gadget.speed != USB_SPEED_UNKNOWN)
+ usb_gadget_udc_reset(&ci->gadget, ci->driver);
retval = _gadget_stop_activity(&ci->gadget);
if (retval)
@@ -709,8 +707,6 @@ __acquires(ci->lock)
if (ci->status == NULL)
retval = -ENOMEM;
- usb_gadget_set_state(&ci->gadget, USB_STATE_DEFAULT);
-
done:
spin_lock(&ci->lock);
@@ -1475,7 +1471,7 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
if (gadget_ready) {
if (is_active) {
pm_runtime_get_sync(&_gadget->dev);
- hw_device_reset(ci, USBMODE_CM_DC);
+ hw_device_reset(ci);
hw_device_state(ci, ci->ep0out->qh.dma);
usb_gadget_set_state(_gadget, USB_STATE_POWERED);
} else {
@@ -1519,8 +1515,8 @@ static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
- if (ci->transceiver)
- return usb_phy_set_power(ci->transceiver, ma);
+ if (ci->usb_phy)
+ return usb_phy_set_power(ci->usb_phy, ma);
return -ENOTSUPP;
}
@@ -1544,8 +1540,7 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
static int ci_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
-static int ci_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver);
+static int ci_udc_stop(struct usb_gadget *gadget);
/**
* Device operations part of the API to the USB controller hardware,
* which don't involve endpoints (or i/o)
@@ -1665,7 +1660,7 @@ static int ci_udc_start(struct usb_gadget *gadget,
pm_runtime_get_sync(&ci->gadget.dev);
if (ci->vbus_active) {
spin_lock_irqsave(&ci->lock, flags);
- hw_device_reset(ci, USBMODE_CM_DC);
+ hw_device_reset(ci);
} else {
pm_runtime_put_sync(&ci->gadget.dev);
return retval;
@@ -1682,8 +1677,7 @@ static int ci_udc_start(struct usb_gadget *gadget,
/**
* ci_udc_stop: unregister a gadget driver
*/
-static int ci_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int ci_udc_stop(struct usb_gadget *gadget)
{
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
unsigned long flags;
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 926c997ef310..c3c6225b8acf 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -117,10 +117,9 @@ static int usbmisc_imx25_post(struct imx_usbmisc_data *data)
if (data->index > 2)
return -EINVAL;
- reg = usbmisc->base + MX25_USB_PHY_CTRL_OFFSET;
-
if (data->evdo) {
spin_lock_irqsave(&usbmisc->lock, flags);
+ reg = usbmisc->base + MX25_USB_PHY_CTRL_OFFSET;
val = readl(reg);
writel(val | MX25_BM_EXTERNAL_VBUS_DIVIDER, reg);
spin_unlock_irqrestore(&usbmisc->lock, flags);
@@ -172,8 +171,7 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
return -EINVAL;
/* Select a 24 MHz reference clock for the PHY */
- reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET;
- val = readl(reg);
+ val = readl(usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET);
val &= ~MX53_USB_PHYCTRL1_PLLDIV_MASK;
val |= MX53_USB_PLL_DIV_24_MHZ;
writel(val, usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET);
@@ -372,7 +370,6 @@ static struct platform_driver usbmisc_imx_driver = {
.remove = usbmisc_imx_remove,
.driver = {
.name = "usbmisc_imx",
- .owner = THIS_MODULE,
.of_match_table = usbmisc_imx_dt_ids,
},
};
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 077d58ac3dcb..546a17e8ad5b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1157,8 +1157,6 @@ static int acm_probe(struct usb_interface *intf,
case USB_CDC_CALL_MANAGEMENT_TYPE:
call_management_function = buffer[3];
call_interface_num = buffer[4];
- if ((quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3)
- dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
break;
default:
/* there are LOTS more CDC descriptors that
@@ -1197,10 +1195,11 @@ next_desc:
} else {
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
- if (!control_interface || !data_interface) {
- dev_dbg(&intf->dev, "no interfaces\n");
- return -ENODEV;
- }
+ }
+
+ if (!control_interface || !data_interface) {
+ dev_dbg(&intf->dev, "no interfaces\n");
+ return -ENODEV;
}
if (data_interface_num != call_interface_num)
@@ -1475,6 +1474,7 @@ alloc_fail8:
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
&dev_attr_iCountryCodeRelDate);
+ kfree(acm->country_codes);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
alloc_fail7:
@@ -1813,11 +1813,6 @@ static const struct usb_device_id acm_ids[] = {
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
- /* Support Lego NXT using pbLua firmware */
- { USB_DEVICE(0x0694, 0xff00),
- .driver_info = NOT_A_MODEM,
- },
-
/* Support for Droids MuIn LCD */
{ USB_DEVICE(0x04d8, 0x000b),
.driver_info = NO_DATA_INTERFACE,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index d3251ebd09e2..ffeb3c83941f 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -130,7 +130,6 @@ struct acm {
#define NO_UNION_NORMAL BIT(0)
#define SINGLE_RX_URB BIT(1)
#define NO_CAP_LINE BIT(2)
-#define NOT_A_MODEM BIT(3)
#define NO_DATA_INTERFACE BIT(4)
#define IGNORE_DEVICE BIT(5)
#define QUIRK_CONTROL_LINE_STATE BIT(6)
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index ec978408a2ee..960bc089111b 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1104,10 +1104,8 @@ static int usbtmc_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "%s called\n", __func__);
data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(&intf->dev, "Unable to allocate kernel memory\n");
+ if (!data)
return -ENOMEM;
- }
data->intf = intf;
data->id = id;
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index 98e8340a5bb1..c6b35b77dab7 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -124,10 +124,10 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
{
state_changed = 1;
- if (fsm->otg->phy->state == new_state)
+ if (fsm->otg->state == new_state)
return 0;
VDBG("Set state: %s\n", usb_otg_state_string(new_state));
- otg_leave_state(fsm, fsm->otg->phy->state);
+ otg_leave_state(fsm, fsm->otg->state);
switch (new_state) {
case OTG_STATE_B_IDLE:
otg_drv_vbus(fsm, 0);
@@ -236,7 +236,7 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
break;
}
- fsm->otg->phy->state = new_state;
+ fsm->otg->state = new_state;
return 0;
}
@@ -247,7 +247,7 @@ int otg_statemachine(struct otg_fsm *fsm)
mutex_lock(&fsm->lock);
- state = fsm->otg->phy->state;
+ state = fsm->otg->state;
state_changed = 0;
/* State machine state change judgement */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 278be0515e8e..11cee55ae397 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2646,7 +2646,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
}
- if (IS_ENABLED(CONFIG_GENERIC_PHY)) {
+ if (IS_ENABLED(CONFIG_GENERIC_PHY) && !hcd->phy) {
struct phy *phy = phy_get(hcd->self.controller, "usb");
if (IS_ERR(phy)) {
@@ -2666,6 +2666,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
goto err_phy;
}
hcd->phy = phy;
+ hcd->remove_phy = 1;
}
}
@@ -2812,7 +2813,7 @@ err_allocate_root_hub:
err_register_bus:
hcd_buffer_destroy(hcd);
err_create_buf:
- if (IS_ENABLED(CONFIG_GENERIC_PHY) && hcd->phy) {
+ if (IS_ENABLED(CONFIG_GENERIC_PHY) && hcd->remove_phy && hcd->phy) {
phy_power_off(hcd->phy);
phy_exit(hcd->phy);
phy_put(hcd->phy);
@@ -2896,7 +2897,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
usb_deregister_bus(&hcd->self);
hcd_buffer_destroy(hcd);
- if (IS_ENABLED(CONFIG_GENERIC_PHY) && hcd->phy) {
+ if (IS_ENABLED(CONFIG_GENERIC_PHY) && hcd->remove_phy && hcd->phy) {
phy_power_off(hcd->phy);
phy_exit(hcd->phy);
phy_put(hcd->phy);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c9596525ba8c..aeb50bb6ba9c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2543,11 +2543,14 @@ int usb_authorize_device(struct usb_device *usb_dev)
"can't autoresume for authorization: %d\n", result);
goto error_autoresume;
}
- result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor));
- if (result < 0) {
- dev_err(&usb_dev->dev, "can't re-read device descriptor for "
- "authorization: %d\n", result);
- goto error_device_descriptor;
+
+ if (usb_dev->wusb) {
+ result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor));
+ if (result < 0) {
+ dev_err(&usb_dev->dev, "can't re-read device descriptor for "
+ "authorization: %d\n", result);
+ goto error_device_descriptor;
+ }
}
usb_dev->authorized = 1;
@@ -3907,14 +3910,9 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
- int feature;
-
switch (state) {
case USB3_LPM_U1:
- feature = USB_PORT_FEAT_U1_TIMEOUT;
- break;
case USB3_LPM_U2:
- feature = USB_PORT_FEAT_U2_TIMEOUT;
break;
default:
dev_warn(&udev->dev, "%s: Can't disable non-U1 or U2 state.\n",
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 96fafed92b76..0ffb4ed0a945 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -103,6 +103,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04f3, 0x009b), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
+ { USB_DEVICE(0x04f3, 0x010c), .driver_info =
+ USB_QUIRK_DEVICE_QUALIFIER },
+
{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index f93807b3631a..b323c4c11b0a 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,6 +1,6 @@
config USB_DWC2
- bool "DesignWare USB2 DRD Core Support"
- depends on USB
+ tristate "DesignWare USB2 DRD Core Support"
+ depends on USB || USB_GADGET
help
Say Y here if your system has a Dual Role Hi-Speed USB
controller based on the DesignWare HSOTG IP Core.
@@ -10,49 +10,61 @@ config USB_DWC2
bus interface module (if you have a PCI bus system) will be
called dwc2_pci.ko, and the platform interface module (for
controllers directly connected to the CPU) will be called
- dwc2_platform.ko. For gadget mode, there will be a single
- module called dwc2_gadget.ko.
-
- NOTE: The s3c-hsotg driver is now renamed to dwc2_gadget. The
- host and gadget drivers are still currently separate drivers.
- There are plans to merge the dwc2_gadget driver with the dwc2
- host driver in the near future to create a dual-role driver.
+ dwc2_platform.ko. For all modes(host, gadget and dual-role), there
+ will be an additional module named dwc2.ko.
if USB_DWC2
+choice
+ bool "DWC2 Mode Selection"
+ default USB_DWC2_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_DWC2_HOST if (USB && !USB_GADGET)
+ default USB_DWC2_PERIPHERAL if (!USB && USB_GADGET)
+
config USB_DWC2_HOST
- tristate "Host only mode"
+ bool "Host only mode"
depends on USB
help
The Designware USB2.0 high-speed host controller
- integrated into many SoCs.
+ integrated into many SoCs. Select this option if you want the
+ driver to operate in Host-only mode.
-config USB_DWC2_PLATFORM
- bool "DWC2 Platform"
- depends on USB_DWC2_HOST
- default USB_DWC2_HOST
+comment "Gadget/Dual-role mode requires USB Gadget support to be enabled"
+
+config USB_DWC2_PERIPHERAL
+ bool "Gadget only mode"
+ depends on USB_GADGET=y || USB_GADGET=USB_DWC2
help
- The Designware USB2.0 platform interface module for
- controllers directly connected to the CPU. This is only
- used for host mode.
+ The Designware USB2.0 high-speed gadget controller
+ integrated into many SoCs. Select this option if you want the
+ driver to operate in Peripheral-only mode. This option requires
+ USB_GADGET to be enabled.
+
+config USB_DWC2_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on (USB=y || USB=USB_DWC2) && (USB_GADGET=y || USB_GADGET=USB_DWC2)
+ help
+ Select this option if you want the driver to work in a dual-role
+ mode. In this mode both host and gadget features are enabled, and
+ the role will be determined by the cable that gets plugged-in. This
+ option requires USB_GADGET to be enabled.
+endchoice
+
+config USB_DWC2_PLATFORM
+ tristate "DWC2 Platform"
+ default USB_DWC2_HOST || USB_DWC2_PERIPHERAL
+ help
+ The Designware USB2.0 platform interface module for
+ controllers directly connected to the CPU.
config USB_DWC2_PCI
- bool "DWC2 PCI"
+ tristate "DWC2 PCI"
depends on USB_DWC2_HOST && PCI
default USB_DWC2_HOST
help
The Designware USB2.0 PCI interface module for controllers
connected to a PCI bus. This is only used for host mode.
-comment "Gadget mode requires USB Gadget support to be enabled"
-
-config USB_DWC2_PERIPHERAL
- tristate "Gadget only mode"
- depends on USB_GADGET
- help
- The Designware USB2.0 high-speed gadget controller
- integrated into many SoCs.
-
config USB_DWC2_DEBUG
bool "Enable Debugging Messages"
help
diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index b73d2a527970..8f752679752a 100644
--- a/drivers/usb/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
@@ -1,28 +1,28 @@
ccflags-$(CONFIG_USB_DWC2_DEBUG) += -DDEBUG
ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG
-obj-$(CONFIG_USB_DWC2_HOST) += dwc2.o
+obj-$(CONFIG_USB_DWC2) += dwc2.o
dwc2-y := core.o core_intr.o
-dwc2-y += hcd.o hcd_intr.o
-dwc2-y += hcd_queue.o hcd_ddma.o
+
+ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
+ dwc2-y += hcd.o hcd_intr.o
+ dwc2-y += hcd_queue.o hcd_ddma.o
+endif
+
+ifneq ($(filter y,$(CONFIG_USB_DWC2_PERIPHERAL) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
+ dwc2-y += gadget.o
+endif
# NOTE: The previous s3c-hsotg peripheral mode only driver has been moved to
# this location and renamed gadget.c. When building for dynamically linked
-# modules, dwc2_gadget.ko will get built for peripheral mode. For host mode,
-# the core module will be dwc2.ko, the PCI bus interface module will called
-# dwc2_pci.ko and the platform interface module will be called dwc2_platform.ko.
-# At present the host and gadget driver will be separate drivers, but there
-# are plans in the near future to create a dual-role driver.
+# modules, dwc2.ko will get built for host mode, peripheral mode, and dual-role
+# mode. The PCI bus interface module will called dwc2_pci.ko and the platform
+# interface module will be called dwc2_platform.ko.
ifneq ($(CONFIG_USB_DWC2_PCI),)
- obj-$(CONFIG_USB_DWC2_HOST) += dwc2_pci.o
+ obj-$(CONFIG_USB_DWC2) += dwc2_pci.o
dwc2_pci-y := pci.o
endif
-ifneq ($(CONFIG_USB_DWC2_PLATFORM),)
- obj-$(CONFIG_USB_DWC2_HOST) += dwc2_platform.o
- dwc2_platform-y := platform.o
-endif
-
-obj-$(CONFIG_USB_DWC2_PERIPHERAL) += dwc2_gadget.o
-dwc2_gadget-y := gadget.o
+obj-$(CONFIG_USB_DWC2_PLATFORM) += dwc2_platform.o
+dwc2_platform-y := platform.o
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index d9269459d481..7605850b7a9c 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -458,16 +458,6 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
/* Clear the SRP success bit for FS-I2c */
hsotg->srp_success = 0;
- if (irq >= 0) {
- dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
- irq);
- retval = devm_request_irq(hsotg->dev, irq,
- dwc2_handle_common_intr, IRQF_SHARED,
- dev_name(hsotg->dev), hsotg);
- if (retval)
- return retval;
- }
-
/* Enable common interrupts */
dwc2_enable_common_interrupts(hsotg);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 55c90c53f2d6..7a70a1349334 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -84,7 +84,7 @@ static const char * const s3c_hsotg_supply_names[] = {
*/
#define EP0_MPS_LIMIT 64
-struct s3c_hsotg;
+struct dwc2_hsotg;
struct s3c_hsotg_req;
/**
@@ -130,7 +130,7 @@ struct s3c_hsotg_req;
struct s3c_hsotg_ep {
struct usb_ep ep;
struct list_head queue;
- struct s3c_hsotg *parent;
+ struct dwc2_hsotg *parent;
struct s3c_hsotg_req *req;
struct dentry *debugfs;
@@ -155,67 +155,6 @@ struct s3c_hsotg_ep {
};
/**
- * struct s3c_hsotg - driver state.
- * @dev: The parent device supplied to the probe function
- * @driver: USB gadget driver
- * @phy: The otg phy transceiver structure for phy control.
- * @uphy: The otg phy transceiver structure for old USB phy control.
- * @plat: The platform specific configuration data. This can be removed once
- * all SoCs support usb transceiver.
- * @regs: The memory area mapped for accessing registers.
- * @irq: The IRQ number we are using
- * @supplies: Definition of USB power supplies
- * @phyif: PHY interface width
- * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
- * @num_of_eps: Number of available EPs (excluding EP0)
- * @debug_root: root directrory for debugfs.
- * @debug_file: main status file for debugfs.
- * @debug_fifo: FIFO status file for debugfs.
- * @ep0_reply: Request used for ep0 reply.
- * @ep0_buff: Buffer for EP0 reply data, if needed.
- * @ctrl_buff: Buffer for EP0 control requests.
- * @ctrl_req: Request for EP0 control packets.
- * @setup: NAK management for EP0 SETUP
- * @last_rst: Time of last reset
- * @eps: The endpoints being supplied to the gadget framework
- */
-struct s3c_hsotg {
- struct device *dev;
- struct usb_gadget_driver *driver;
- struct phy *phy;
- struct usb_phy *uphy;
- struct s3c_hsotg_plat *plat;
-
- spinlock_t lock;
-
- void __iomem *regs;
- int irq;
- struct clk *clk;
-
- struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsotg_supply_names)];
-
- u32 phyif;
- int fifo_mem;
- unsigned int dedicated_fifos:1;
- unsigned char num_of_eps;
- u32 fifo_map;
-
- struct dentry *debug_root;
- struct dentry *debug_file;
- struct dentry *debug_fifo;
-
- struct usb_request *ep0_reply;
- struct usb_request *ctrl_req;
- u8 ep0_buff[8];
- u8 ctrl_buff[8];
-
- struct usb_gadget gadget;
- unsigned int setup;
- unsigned long last_rst;
- struct s3c_hsotg_ep *eps;
-};
-
-/**
* struct s3c_hsotg_req - data transfer request
* @req: The USB gadget request
* @queue: The list of requests for the endpoint this is queued for.
@@ -229,6 +168,7 @@ struct s3c_hsotg_req {
unsigned char mapped;
};
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
#define call_gadget(_hs, _entry) \
do { \
if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
@@ -238,6 +178,9 @@ do { \
spin_lock(&_hs->lock); \
} \
} while (0)
+#else
+#define call_gadget(_hs, _entry) do {} while (0)
+#endif
struct dwc2_hsotg;
struct dwc2_host_chan;
@@ -495,11 +438,13 @@ struct dwc2_hw_params {
* struct dwc2_hsotg - Holds the state of the driver, including the non-periodic
* and periodic schedules
*
+ * These are common for both host and peripheral modes:
+ *
* @dev: The struct device pointer
* @regs: Pointer to controller regs
- * @core_params: Parameters that define how the core should be configured
* @hw_params: Parameters that were autodetected from the
* hardware registers
+ * @core_params: Parameters that define how the core should be configured
* @op_state: The operational State, during transitions (a_host=>
* a_peripheral and b_device=>b_host) this may not match
* the core, but allows the software to determine
@@ -508,6 +453,8 @@ struct dwc2_hw_params {
* - USB_DR_MODE_PERIPHERAL
* - USB_DR_MODE_HOST
* - USB_DR_MODE_OTG
+ * @lock: Spinlock that protects all the driver data structures
+ * @priv: Stores a pointer to the struct usb_hcd
* @queuing_high_bandwidth: True if multiple packets of a high-bandwidth
* transfer are in process of being queued
* @srp_success: Stores status of SRP request in the case of a FS PHY
@@ -517,6 +464,9 @@ struct dwc2_hw_params {
* interrupt
* @wkp_timer: Timer object for handling Wakeup Detected interrupt
* @lx_state: Lx state of connected device
+ *
+ * These are for host mode:
+ *
* @flags: Flags for handling root port state changes
* @non_periodic_sched_inactive: Inactive QHs in the non-periodic schedule.
* Transfers associated with these QHs are not currently
@@ -585,11 +535,31 @@ struct dwc2_hw_params {
* @status_buf_dma: DMA address for status_buf
* @start_work: Delayed work for handling host A-cable connection
* @reset_work: Delayed work for handling a port reset
- * @lock: Spinlock that protects all the driver data structures
- * @priv: Stores a pointer to the struct usb_hcd
* @otg_port: OTG port number
* @frame_list: Frame list
* @frame_list_dma: Frame list DMA address
+ *
+ * These are for peripheral mode:
+ *
+ * @driver: USB gadget driver
+ * @phy: The otg phy transceiver structure for phy control.
+ * @uphy: The otg phy transceiver structure for old USB phy control.
+ * @plat: The platform specific configuration data. This can be removed once
+ * all SoCs support usb transceiver.
+ * @supplies: Definition of USB power supplies
+ * @phyif: PHY interface width
+ * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
+ * @num_of_eps: Number of available EPs (excluding EP0)
+ * @debug_root: Root directrory for debugfs.
+ * @debug_file: Main status file for debugfs.
+ * @debug_fifo: FIFO status file for debugfs.
+ * @ep0_reply: Request used for ep0 reply.
+ * @ep0_buff: Buffer for EP0 reply data, if needed.
+ * @ctrl_buff: Buffer for EP0 control requests.
+ * @ctrl_req: Request for EP0 control packets.
+ * @setup: NAK management for EP0 SETUP
+ * @last_rst: Time of last reset
+ * @eps: The endpoints being supplied to the gadget framework
*/
struct dwc2_hsotg {
struct device *dev;
@@ -601,6 +571,16 @@ struct dwc2_hsotg {
enum usb_otg_state op_state;
enum usb_dr_mode dr_mode;
+ struct phy *phy;
+ struct usb_phy *uphy;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsotg_supply_names)];
+
+ spinlock_t lock;
+ struct mutex init_mutex;
+ void *priv;
+ int irq;
+ struct clk *clk;
+
unsigned int queuing_high_bandwidth:1;
unsigned int srp_success:1;
@@ -609,6 +589,18 @@ struct dwc2_hsotg {
struct timer_list wkp_timer;
enum dwc2_lx_state lx_state;
+ struct dentry *debug_root;
+ struct dentry *debug_file;
+ struct dentry *debug_fifo;
+
+ /* DWC OTG HW Release versions */
+#define DWC2_CORE_REV_2_71a 0x4f54271a
+#define DWC2_CORE_REV_2_90a 0x4f54290a
+#define DWC2_CORE_REV_2_92a 0x4f54292a
+#define DWC2_CORE_REV_2_94a 0x4f54294a
+#define DWC2_CORE_REV_3_00a 0x4f54300a
+
+#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
union dwc2_hcd_internal_flags {
u32 d32;
struct {
@@ -655,19 +647,10 @@ struct dwc2_hsotg {
struct delayed_work start_work;
struct delayed_work reset_work;
- spinlock_t lock;
- void *priv;
u8 otg_port;
u32 *frame_list;
dma_addr_t frame_list_dma;
- /* DWC OTG HW Release versions */
-#define DWC2_CORE_REV_2_71a 0x4f54271a
-#define DWC2_CORE_REV_2_90a 0x4f54290a
-#define DWC2_CORE_REV_2_92a 0x4f54292a
-#define DWC2_CORE_REV_2_94a 0x4f54294a
-#define DWC2_CORE_REV_3_00a 0x4f54300a
-
#ifdef DEBUG
u32 frrem_samples;
u64 frrem_accum;
@@ -686,6 +669,31 @@ struct dwc2_hsotg {
u32 hfnum_other_samples_b;
u64 hfnum_other_frrem_accum_b;
#endif
+#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Gadget structures */
+ struct usb_gadget_driver *driver;
+ struct s3c_hsotg_plat *plat;
+
+ u32 phyif;
+ int fifo_mem;
+ unsigned int dedicated_fifos:1;
+ unsigned char num_of_eps;
+ u32 fifo_map;
+
+ struct usb_request *ep0_reply;
+ struct usb_request *ctrl_req;
+ u8 ep0_buff[8];
+ u8 ctrl_buff[8];
+
+ struct usb_gadget gadget;
+ unsigned int enabled:1;
+ unsigned int connected:1;
+ unsigned int setup:1;
+ unsigned long last_rst;
+ struct s3c_hsotg_ep *eps;
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
};
/* Reasons for halting a host channel */
@@ -955,4 +963,43 @@ extern void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg);
*/
extern u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg);
+/* Gadget defines */
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+extern int s3c_hsotg_remove(struct dwc2_hsotg *hsotg);
+extern int s3c_hsotg_suspend(struct dwc2_hsotg *dwc2);
+extern int s3c_hsotg_resume(struct dwc2_hsotg *dwc2);
+extern int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq);
+extern void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2);
+extern void s3c_hsotg_core_connect(struct dwc2_hsotg *hsotg);
+extern void s3c_hsotg_disconnect(struct dwc2_hsotg *dwc2);
+#else
+static inline int s3c_hsotg_remove(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int s3c_hsotg_suspend(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int s3c_hsotg_resume(struct dwc2_hsotg *dwc2)
+{ return 0; }
+static inline int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
+{ return 0; }
+static inline void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2) {}
+static inline void s3c_hsotg_core_connect(struct dwc2_hsotg *hsotg) {}
+static inline void s3c_hsotg_disconnect(struct dwc2_hsotg *dwc2) {}
+#endif
+
+#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
+extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg);
+extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
+#else
+static inline void dwc2_set_all_params(struct dwc2_core_params *params, int value) {}
+static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {}
+static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
+ const struct dwc2_core_params *params)
+{ return 0; }
+#endif
+
#endif /* __DWC2_CORE_H__ */
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index c93918b70d03..ad43c5bc1ef1 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -128,6 +128,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
dwc2_op_state_str(hsotg));
gotgctl = readl(hsotg->regs + GOTGCTL);
+ if (dwc2_is_device_mode(hsotg))
+ s3c_hsotg_disconnect(hsotg);
+
if (hsotg->op_state == OTG_STATE_B_HOST) {
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
} else {
@@ -287,9 +290,11 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
* Release lock before scheduling workq as it holds spinlock during
* scheduling.
*/
- spin_unlock(&hsotg->lock);
- queue_work(hsotg->wq_otg, &hsotg->wf_otg);
- spin_lock(&hsotg->lock);
+ if (hsotg->wq_otg) {
+ spin_unlock(&hsotg->lock);
+ queue_work(hsotg->wq_otg, &hsotg->wf_otg);
+ spin_lock(&hsotg->lock);
+ }
/* Clear interrupt */
writel(GINTSTS_CONIDSTSCHNG, hsotg->regs + GINTSTS);
@@ -312,6 +317,12 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
/* Clear interrupt */
writel(GINTSTS_SESSREQINT, hsotg->regs + GINTSTS);
+
+ /*
+ * Report disconnect if there is any previous session established
+ */
+ if (dwc2_is_device_mode(hsotg))
+ s3c_hsotg_disconnect(hsotg);
}
/*
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 8b5c079c7b7d..200168ec2d75 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
+#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -36,6 +37,7 @@
#include <linux/platform_data/s3c-hsotg.h>
#include "core.h"
+#include "hw.h"
/* conversion functions */
static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
@@ -48,9 +50,9 @@ static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
return container_of(ep, struct s3c_hsotg_ep, ep);
}
-static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
+static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
{
- return container_of(gadget, struct s3c_hsotg, gadget);
+ return container_of(gadget, struct dwc2_hsotg, gadget);
}
static inline void __orr32(void __iomem *ptr, u32 val)
@@ -64,7 +66,7 @@ static inline void __bic32(void __iomem *ptr, u32 val)
}
/* forward decleration of functions */
-static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
+static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg);
/**
* using_dma - return the DMA status of the driver.
@@ -85,7 +87,7 @@ static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
*
* Until this issue is sorted out, we always return 'false'.
*/
-static inline bool using_dma(struct s3c_hsotg *hsotg)
+static inline bool using_dma(struct dwc2_hsotg *hsotg)
{
return false; /* support is not complete */
}
@@ -95,7 +97,7 @@ static inline bool using_dma(struct s3c_hsotg *hsotg)
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
*/
-static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
+static void s3c_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
{
u32 gsintmsk = readl(hsotg->regs + GINTMSK);
u32 new_gsintmsk;
@@ -113,7 +115,7 @@ static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
*/
-static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
+static void s3c_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
{
u32 gsintmsk = readl(hsotg->regs + GINTMSK);
u32 new_gsintmsk;
@@ -134,7 +136,7 @@ static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
* Set or clear the mask for an individual endpoint's interrupt
* request.
*/
-static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
unsigned int ep, unsigned int dir_in,
unsigned int en)
{
@@ -159,7 +161,7 @@ static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
* s3c_hsotg_init_fifo - initialise non-periodic FIFOs
* @hsotg: The device instance.
*/
-static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
{
unsigned int ep;
unsigned int addr;
@@ -283,7 +285,7 @@ static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
* This is the reverse of s3c_hsotg_map_dma(), called for the completion
* of a request to ensure the buffer is ready for access by the caller.
*/
-static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep,
struct s3c_hsotg_req *hs_req)
{
@@ -312,7 +314,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
*
* This routine is only needed for PIO
*/
-static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
+static int s3c_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep,
struct s3c_hsotg_req *hs_req)
{
@@ -517,7 +519,7 @@ static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
* Start the given request running by setting the endpoint registers
* appropriately, and writing any data to the FIFOs.
*/
-static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_start_req(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep,
struct s3c_hsotg_req *hs_req,
bool continuing)
@@ -707,7 +709,7 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
* DMA memory, then we map the memory and mark our request to allow us to
* cleanup on completion.
*/
-static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
+static int s3c_hsotg_map_dma(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep,
struct usb_request *req)
{
@@ -736,7 +738,7 @@ static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
{
struct s3c_hsotg_req *hs_req = our_req(req);
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hs = hs_ep->parent;
+ struct dwc2_hsotg *hs = hs_ep->parent;
bool first;
dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
@@ -768,7 +770,7 @@ static int s3c_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hs = hs_ep->parent;
+ struct dwc2_hsotg *hs = hs_ep->parent;
unsigned long flags = 0;
int ret = 0;
@@ -799,7 +801,7 @@ static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
struct usb_request *req)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
@@ -814,7 +816,7 @@ static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
* Convert the given wIndex into a pointer to an driver endpoint
* structure, or return NULL if it is not a valid endpoint.
*/
-static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
+static struct s3c_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
@@ -843,7 +845,7 @@ static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
* Create a request and queue it on the given endpoint. This is useful as
* an internal method of sending replies to certain control requests, etc.
*/
-static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
+static int s3c_hsotg_send_reply(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *ep,
void *buff,
int length)
@@ -884,7 +886,7 @@ static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
* @hsotg: The device state
* @ctrl: USB control request
*/
-static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
+static int s3c_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
@@ -955,7 +957,7 @@ static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
* @hsotg: The device state
* @ctrl: USB control request
*/
-static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
+static int s3c_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
@@ -1028,8 +1030,7 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
return 1;
}
-static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
-static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg);
+static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
/**
* s3c_hsotg_stall_ep0 - stall ep0
@@ -1037,7 +1038,7 @@ static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg);
*
* Set stall for ep0 as response for setup request.
*/
-static void s3c_hsotg_stall_ep0(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
{
struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
u32 reg;
@@ -1076,7 +1077,7 @@ static void s3c_hsotg_stall_ep0(struct s3c_hsotg *hsotg)
* needs to work out what to do next (and whether to pass it on to the
* gadget driver).
*/
-static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_process_control(struct dwc2_hsotg *hsotg,
struct usb_ctrlrequest *ctrl)
{
struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
@@ -1107,7 +1108,6 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (ctrl->bRequest) {
case USB_REQ_SET_ADDRESS:
- s3c_hsotg_disconnect(hsotg);
dcfg = readl(hsotg->regs + DCFG);
dcfg &= ~DCFG_DEVADDR_MASK;
dcfg |= (le16_to_cpu(ctrl->wValue) <<
@@ -1161,7 +1161,7 @@ static void s3c_hsotg_complete_setup(struct usb_ep *ep,
struct usb_request *req)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
if (req->status < 0) {
dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
@@ -1183,7 +1183,7 @@ static void s3c_hsotg_complete_setup(struct usb_ep *ep,
* Enqueue a request on EP0 if necessary to received any SETUP packets
* received from the host.
*/
-static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
{
struct usb_request *req = hsotg->ctrl_req;
struct s3c_hsotg_req *hs_req = our_req(req);
@@ -1226,7 +1226,7 @@ static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
*
* Note, expects the ep to already be locked as appropriate.
*/
-static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_complete_request(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep,
struct s3c_hsotg_req *hs_req,
int result)
@@ -1291,7 +1291,7 @@ static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
* endpoint, so sort out whether we need to read the data into a request
* that has been made for that endpoint.
*/
-static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
+static void s3c_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
{
struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
struct s3c_hsotg_req *hs_req = hs_ep->req;
@@ -1356,7 +1356,7 @@ static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
* currently believed that we do not need to wait for any space in
* the TxFIFO.
*/
-static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_send_zlp(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_req *req)
{
u32 ctrl;
@@ -1398,7 +1398,7 @@ static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
* transfer for an OUT endpoint has been completed, either by a short
* packet or by the finish of a transfer.
*/
-static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_handle_outdone(struct dwc2_hsotg *hsotg,
int epnum, bool was_setup)
{
u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum));
@@ -1471,7 +1471,7 @@ static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
*
* Return the current frame number
*/
-static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
+static u32 s3c_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
{
u32 dsts;
@@ -1498,7 +1498,7 @@ static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
* as the actual data should be sent to the memory directly and we turn
* on the completion interrupts to get notifications of transfer completion.
*/
-static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
{
u32 grxstsr = readl(hsotg->regs + GRXSTSP);
u32 epnum, status, size;
@@ -1590,7 +1590,7 @@ static u32 s3c_hsotg_ep0_mps(unsigned int mps)
* Configure the maximum packet size for the given endpoint, updating
* the hardware control registers to reflect this.
*/
-static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
unsigned int ep, unsigned int mps)
{
struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
@@ -1645,7 +1645,7 @@ bad_mps:
* @hsotg: The driver state
* @idx: The index for the endpoint (0..15)
*/
-static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
+static void s3c_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
{
int timeout;
int val;
@@ -1681,7 +1681,7 @@ static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
* Check to see if there is a request that has data to send, and if so
* make an attempt to write data into the FIFO.
*/
-static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
+static int s3c_hsotg_trytx(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep)
{
struct s3c_hsotg_req *hs_req = hs_ep->req;
@@ -1714,7 +1714,7 @@ static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
* An IN transfer has been completed, update the transfer's state and then
* call the relevant completion routines.
*/
-static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_complete_in(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep)
{
struct s3c_hsotg_req *hs_req = hs_ep->req;
@@ -1791,7 +1791,7 @@ static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
*
* Process and clear any interrupt pending for an individual endpoint
*/
-static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
+static void s3c_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
int dir_in)
{
struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
@@ -1916,7 +1916,7 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
* Handle updating the device settings after the enumeration phase has
* been completed.
*/
-static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
{
u32 dsts = readl(hsotg->regs + DSTS);
int ep0_mps = 0, ep_mps = 8;
@@ -1993,7 +1993,7 @@ static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
* Go through the requests on the given endpoint and mark them
* completed with the given result code.
*/
-static void kill_all_requests(struct s3c_hsotg *hsotg,
+static void kill_all_requests(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *ep,
int result, bool force)
{
@@ -2027,22 +2027,27 @@ static void kill_all_requests(struct s3c_hsotg *hsotg,
* transactions and signal the gadget driver that this
* has happened.
*/
-static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg)
+void s3c_hsotg_disconnect(struct dwc2_hsotg *hsotg)
{
unsigned ep;
+ if (!hsotg->connected)
+ return;
+
+ hsotg->connected = 0;
for (ep = 0; ep < hsotg->num_of_eps; ep++)
kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
call_gadget(hsotg, disconnect);
}
+EXPORT_SYMBOL_GPL(s3c_hsotg_disconnect);
/**
* s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
* @hsotg: The device state:
* @periodic: True if this is a periodic FIFO interrupt
*/
-static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
+static void s3c_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
{
struct s3c_hsotg_ep *ep;
int epno, ret;
@@ -2076,7 +2081,7 @@ static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
*
* Issue a soft reset to the core, and await the core finishing it.
*/
-static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
+static int s3c_hsotg_corereset(struct dwc2_hsotg *hsotg)
{
int timeout;
u32 grstctl;
@@ -2124,7 +2129,7 @@ static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
*
* Issue a soft reset to the core, and await the core finishing it.
*/
-static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
+void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg)
{
s3c_hsotg_corereset(hsotg);
@@ -2241,12 +2246,23 @@ static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
readl(hsotg->regs + DOEPCTL0));
/* clear global NAKs */
- writel(DCTL_CGOUTNAK | DCTL_CGNPINNAK,
+ writel(DCTL_CGOUTNAK | DCTL_CGNPINNAK | DCTL_SFTDISCON,
hsotg->regs + DCTL);
/* must be at-least 3ms to allow bus to see disconnect */
mdelay(3);
+ hsotg->last_rst = jiffies;
+}
+
+static void s3c_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
+{
+ /* set the soft-disconnect bit */
+ __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
+}
+
+void s3c_hsotg_core_connect(struct dwc2_hsotg *hsotg)
+{
/* remove the soft-disconnect and let's go */
__bic32(hsotg->regs + DCTL, DCTL_SFTDISCON);
}
@@ -2258,7 +2274,7 @@ static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
*/
static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
{
- struct s3c_hsotg *hsotg = pw;
+ struct dwc2_hsotg *hsotg = pw;
int retry_count = 8;
u32 gintsts;
u32 gintmsk;
@@ -2273,31 +2289,11 @@ irq_retry:
gintsts &= gintmsk;
- if (gintsts & GINTSTS_OTGINT) {
- u32 otgint = readl(hsotg->regs + GOTGINT);
-
- dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
-
- writel(otgint, hsotg->regs + GOTGINT);
- }
-
- if (gintsts & GINTSTS_SESSREQINT) {
- dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
- writel(GINTSTS_SESSREQINT, hsotg->regs + GINTSTS);
- }
-
if (gintsts & GINTSTS_ENUMDONE) {
writel(GINTSTS_ENUMDONE, hsotg->regs + GINTSTS);
s3c_hsotg_irq_enumdone(hsotg);
- }
-
- if (gintsts & GINTSTS_CONIDSTSCHNG) {
- dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
- readl(hsotg->regs + DSTS),
- readl(hsotg->regs + GOTGCTL));
-
- writel(GINTSTS_CONIDSTSCHNG, hsotg->regs + GINTSTS);
+ hsotg->connected = 1;
}
if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
@@ -2340,8 +2336,8 @@ irq_retry:
kill_all_requests(hsotg, &hsotg->eps[0],
-ECONNRESET, true);
- s3c_hsotg_core_init(hsotg);
- hsotg->last_rst = jiffies;
+ s3c_hsotg_core_init_disconnected(hsotg);
+ s3c_hsotg_core_connect(hsotg);
}
}
}
@@ -2380,25 +2376,6 @@ irq_retry:
s3c_hsotg_handle_rx(hsotg);
}
- if (gintsts & GINTSTS_MODEMIS) {
- dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
- writel(GINTSTS_MODEMIS, hsotg->regs + GINTSTS);
- }
-
- if (gintsts & GINTSTS_USBSUSP) {
- dev_info(hsotg->dev, "GINTSTS_USBSusp\n");
- writel(GINTSTS_USBSUSP, hsotg->regs + GINTSTS);
-
- call_gadget(hsotg, suspend);
- }
-
- if (gintsts & GINTSTS_WKUPINT) {
- dev_info(hsotg->dev, "GINTSTS_WkUpIn\n");
- writel(GINTSTS_WKUPINT, hsotg->regs + GINTSTS);
-
- call_gadget(hsotg, resume);
- }
-
if (gintsts & GINTSTS_ERLYSUSP) {
dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
writel(GINTSTS_ERLYSUSP, hsotg->regs + GINTSTS);
@@ -2450,7 +2427,7 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
unsigned long flags;
int index = hs_ep->index;
u32 epctrl_reg;
@@ -2593,7 +2570,7 @@ error:
static int s3c_hsotg_ep_disable(struct usb_ep *ep)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
int index = hs_ep->index;
unsigned long flags;
@@ -2658,7 +2635,7 @@ static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct s3c_hsotg_req *hs_req = our_req(req);
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hs = hs_ep->parent;
+ struct dwc2_hsotg *hs = hs_ep->parent;
unsigned long flags;
dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
@@ -2684,7 +2661,7 @@ static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hs = hs_ep->parent;
+ struct dwc2_hsotg *hs = hs_ep->parent;
int index = hs_ep->index;
u32 epreg;
u32 epctl;
@@ -2748,7 +2725,7 @@ static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
static int s3c_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
- struct s3c_hsotg *hs = hs_ep->parent;
+ struct dwc2_hsotg *hs = hs_ep->parent;
unsigned long flags = 0;
int ret = 0;
@@ -2777,7 +2754,7 @@ static struct usb_ep_ops s3c_hsotg_ep_ops = {
* A wrapper for platform code responsible for controlling
* low-level USB code
*/
-static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_phy_enable(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
@@ -2800,7 +2777,7 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
* A wrapper for platform code responsible for controlling
* low-level USB code
*/
-static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_phy_disable(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
@@ -2818,7 +2795,7 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
* s3c_hsotg_init - initalize the usb core
* @hsotg: The driver state
*/
-static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_init(struct dwc2_hsotg *hsotg)
{
/* unmask subset of endpoint interrupts */
@@ -2868,7 +2845,8 @@ static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
- struct s3c_hsotg *hsotg = to_hsotg(gadget);
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
+ unsigned long flags;
int ret;
if (!hsotg) {
@@ -2889,6 +2867,7 @@ static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
return -EINVAL;
}
+ mutex_lock(&hsotg->init_mutex);
WARN_ON(hsotg->driver);
driver->driver.bus = NULL;
@@ -2905,11 +2884,22 @@ static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
goto err;
}
- hsotg->last_rst = jiffies;
+ s3c_hsotg_phy_enable(hsotg);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ s3c_hsotg_init(hsotg);
+ s3c_hsotg_core_init_disconnected(hsotg);
+ hsotg->enabled = 0;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
+
+ mutex_unlock(&hsotg->init_mutex);
+
return 0;
err:
+ mutex_unlock(&hsotg->init_mutex);
hsotg->driver = NULL;
return ret;
}
@@ -2921,16 +2911,17 @@ err:
*
* Stop udc hw block and stay tunned for future transmissions
*/
-static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
{
- struct s3c_hsotg *hsotg = to_hsotg(gadget);
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
unsigned long flags = 0;
int ep;
if (!hsotg)
return -ENODEV;
+ mutex_lock(&hsotg->init_mutex);
+
/* all endpoints should be shutdown */
for (ep = 1; ep < hsotg->num_of_eps; ep++)
s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
@@ -2939,13 +2930,18 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
hsotg->driver = NULL;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ hsotg->enabled = 0;
spin_unlock_irqrestore(&hsotg->lock, flags);
+ s3c_hsotg_phy_disable(hsotg);
+
regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
clk_disable(hsotg->clk);
+ mutex_unlock(&hsotg->init_mutex);
+
return 0;
}
@@ -2969,23 +2965,26 @@ static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
*/
static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
{
- struct s3c_hsotg *hsotg = to_hsotg(gadget);
+ struct dwc2_hsotg *hsotg = to_hsotg(gadget);
unsigned long flags = 0;
dev_dbg(hsotg->dev, "%s: is_on: %d\n", __func__, is_on);
+ mutex_lock(&hsotg->init_mutex);
spin_lock_irqsave(&hsotg->lock, flags);
if (is_on) {
- s3c_hsotg_phy_enable(hsotg);
clk_enable(hsotg->clk);
- s3c_hsotg_core_init(hsotg);
+ hsotg->enabled = 1;
+ s3c_hsotg_core_connect(hsotg);
} else {
+ s3c_hsotg_core_disconnect(hsotg);
+ hsotg->enabled = 0;
clk_disable(hsotg->clk);
- s3c_hsotg_phy_disable(hsotg);
}
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&hsotg->lock, flags);
+ mutex_unlock(&hsotg->init_mutex);
return 0;
}
@@ -3007,7 +3006,7 @@ static const struct usb_gadget_ops s3c_hsotg_gadget_ops = {
* creation) to give to the gadget driver. Setup the endpoint name, any
* direction information and other state that may be required.
*/
-static void s3c_hsotg_initep(struct s3c_hsotg *hsotg,
+static void s3c_hsotg_initep(struct dwc2_hsotg *hsotg,
struct s3c_hsotg_ep *hs_ep,
int epnum)
{
@@ -3056,7 +3055,7 @@ static void s3c_hsotg_initep(struct s3c_hsotg *hsotg,
*
* Read the USB core HW configuration registers
*/
-static void s3c_hsotg_hw_cfg(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
{
u32 cfg2, cfg3, cfg4;
/* check hardware configuration */
@@ -3080,7 +3079,7 @@ static void s3c_hsotg_hw_cfg(struct s3c_hsotg *hsotg)
* s3c_hsotg_dump - dump state of the udc
* @param: The device state
*/
-static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_dump(struct dwc2_hsotg *hsotg)
{
#ifdef DEBUG
struct device *dev = hsotg->dev;
@@ -3139,7 +3138,7 @@ static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
*/
static int state_show(struct seq_file *seq, void *v)
{
- struct s3c_hsotg *hsotg = seq->private;
+ struct dwc2_hsotg *hsotg = seq->private;
void __iomem *regs = hsotg->regs;
int idx;
@@ -3209,7 +3208,7 @@ static const struct file_operations state_fops = {
*/
static int fifo_show(struct seq_file *seq, void *v)
{
- struct s3c_hsotg *hsotg = seq->private;
+ struct dwc2_hsotg *hsotg = seq->private;
void __iomem *regs = hsotg->regs;
u32 val;
int idx;
@@ -3265,7 +3264,7 @@ static const char *decode_direction(int is_in)
static int ep_show(struct seq_file *seq, void *v)
{
struct s3c_hsotg_ep *ep = seq->private;
- struct s3c_hsotg *hsotg = ep->parent;
+ struct dwc2_hsotg *hsotg = ep->parent;
struct s3c_hsotg_req *req;
void __iomem *regs = hsotg->regs;
int index = ep->index;
@@ -3342,7 +3341,7 @@ static const struct file_operations ep_fops = {
* with the same name as the device itself, in case we end up
* with multiple blocks in future systems.
*/
-static void s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_create_debug(struct dwc2_hsotg *hsotg)
{
struct dentry *root;
unsigned epidx;
@@ -3388,7 +3387,7 @@ static void s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
*
* Cleanup (remove) the debugfs files for use on module exit.
*/
-static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_delete_debug(struct dwc2_hsotg *hsotg)
{
unsigned epidx;
@@ -3403,27 +3402,21 @@ static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
}
/**
- * s3c_hsotg_probe - probe function for hsotg driver
- * @pdev: The platform information for the driver
+ * dwc2_gadget_init - init function for gadget
+ * @dwc2: The data structure for the DWC2 driver.
+ * @irq: The IRQ number for the controller.
*/
-
-static int s3c_hsotg_probe(struct platform_device *pdev)
+int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
{
- struct s3c_hsotg_plat *plat = dev_get_platdata(&pdev->dev);
+ struct device *dev = hsotg->dev;
+ struct s3c_hsotg_plat *plat = dev->platform_data;
struct phy *phy;
struct usb_phy *uphy;
- struct device *dev = &pdev->dev;
struct s3c_hsotg_ep *eps;
- struct s3c_hsotg *hsotg;
- struct resource *res;
int epnum;
int ret;
int i;
- hsotg = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsotg), GFP_KERNEL);
- if (!hsotg)
- return -ENOMEM;
-
/* Set default UTMI width */
hsotg->phyif = GUSBCFG_PHYIF16;
@@ -3431,14 +3424,14 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
* Attempt to find a generic PHY, then look for an old style
* USB PHY, finally fall back to pdata
*/
- phy = devm_phy_get(&pdev->dev, "usb2-phy");
+ phy = devm_phy_get(dev, "usb2-phy");
if (IS_ERR(phy)) {
uphy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(uphy)) {
/* Fallback for pdata */
- plat = dev_get_platdata(&pdev->dev);
+ plat = dev_get_platdata(dev);
if (!plat) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"no platform data or transceiver defined\n");
return -EPROBE_DEFER;
}
@@ -3455,43 +3448,24 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
hsotg->phyif = GUSBCFG_PHYIF8;
}
- hsotg->dev = dev;
-
- hsotg->clk = devm_clk_get(&pdev->dev, "otg");
+ hsotg->clk = devm_clk_get(dev, "otg");
if (IS_ERR(hsotg->clk)) {
- dev_err(dev, "cannot get otg clock\n");
- return PTR_ERR(hsotg->clk);
- }
-
- platform_set_drvdata(pdev, hsotg);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- hsotg->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hsotg->regs)) {
- ret = PTR_ERR(hsotg->regs);
- goto err_clk;
+ hsotg->clk = NULL;
+ dev_dbg(dev, "cannot get otg clock\n");
}
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "cannot find IRQ\n");
- goto err_clk;
- }
-
- spin_lock_init(&hsotg->lock);
-
- hsotg->irq = ret;
-
- dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
-
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
hsotg->gadget.name = dev_name(dev);
/* reset the system */
- clk_prepare_enable(hsotg->clk);
+ ret = clk_prepare_enable(hsotg->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable otg clk\n");
+ goto err_clk;
+ }
+
/* regulators */
@@ -3509,7 +3483,7 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
hsotg->supplies);
if (ret) {
- dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
+ dev_err(dev, "failed to enable supplies: %d\n", ret);
goto err_supplies;
}
@@ -3520,14 +3494,14 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
s3c_hsotg_hw_cfg(hsotg);
s3c_hsotg_init(hsotg);
- ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
- dev_name(dev), hsotg);
+ ret = devm_request_irq(hsotg->dev, irq, s3c_hsotg_irq, IRQF_SHARED,
+ dev_name(hsotg->dev), hsotg);
if (ret < 0) {
s3c_hsotg_phy_disable(hsotg);
clk_disable_unprepare(hsotg->clk);
regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
- dev_err(dev, "cannot claim IRQ\n");
+ dev_err(dev, "cannot claim IRQ for gadget\n");
goto err_clk;
}
@@ -3573,11 +3547,11 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
if (ret) {
- dev_err(hsotg->dev, "failed to disable supplies: %d\n", ret);
+ dev_err(dev, "failed to disable supplies: %d\n", ret);
goto err_ep_mem;
}
- ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);
+ ret = usb_add_gadget_udc(dev, &hsotg->gadget);
if (ret)
goto err_ep_mem;
@@ -3596,47 +3570,44 @@ err_clk:
return ret;
}
+EXPORT_SYMBOL_GPL(dwc2_gadget_init);
/**
* s3c_hsotg_remove - remove function for hsotg driver
* @pdev: The platform information for the driver
*/
-static int s3c_hsotg_remove(struct platform_device *pdev)
+int s3c_hsotg_remove(struct dwc2_hsotg *hsotg)
{
- struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
-
usb_del_gadget_udc(&hsotg->gadget);
-
s3c_hsotg_delete_debug(hsotg);
-
- if (hsotg->driver) {
- /* should have been done already by driver model core */
- usb_gadget_unregister_driver(hsotg->driver);
- }
-
clk_disable_unprepare(hsotg->clk);
return 0;
}
+EXPORT_SYMBOL_GPL(s3c_hsotg_remove);
-static int s3c_hsotg_suspend(struct platform_device *pdev, pm_message_t state)
+int s3c_hsotg_suspend(struct dwc2_hsotg *hsotg)
{
- struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
unsigned long flags;
int ret = 0;
- if (hsotg->driver)
+ mutex_lock(&hsotg->init_mutex);
+
+ if (hsotg->driver) {
+ int ep;
+
dev_info(hsotg->dev, "suspending usb gadget %s\n",
hsotg->driver->driver.name);
- spin_lock_irqsave(&hsotg->lock, flags);
- s3c_hsotg_disconnect(hsotg);
- s3c_hsotg_phy_disable(hsotg);
- hsotg->gadget.speed = USB_SPEED_UNKNOWN;
- spin_unlock_irqrestore(&hsotg->lock, flags);
+ spin_lock_irqsave(&hsotg->lock, flags);
+ if (hsotg->enabled)
+ s3c_hsotg_core_disconnect(hsotg);
+ s3c_hsotg_disconnect(hsotg);
+ hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ s3c_hsotg_phy_disable(hsotg);
- if (hsotg->driver) {
- int ep;
for (ep = 0; ep < hsotg->num_of_eps; ep++)
s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
@@ -3645,57 +3616,37 @@ static int s3c_hsotg_suspend(struct platform_device *pdev, pm_message_t state)
clk_disable(hsotg->clk);
}
+ mutex_unlock(&hsotg->init_mutex);
+
return ret;
}
+EXPORT_SYMBOL_GPL(s3c_hsotg_suspend);
-static int s3c_hsotg_resume(struct platform_device *pdev)
+int s3c_hsotg_resume(struct dwc2_hsotg *hsotg)
{
- struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
unsigned long flags;
int ret = 0;
+ mutex_lock(&hsotg->init_mutex);
+
if (hsotg->driver) {
dev_info(hsotg->dev, "resuming usb gadget %s\n",
hsotg->driver->driver.name);
clk_enable(hsotg->clk);
ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
- hsotg->supplies);
- }
+ hsotg->supplies);
- spin_lock_irqsave(&hsotg->lock, flags);
- hsotg->last_rst = jiffies;
- s3c_hsotg_phy_enable(hsotg);
- s3c_hsotg_core_init(hsotg);
- spin_unlock_irqrestore(&hsotg->lock, flags);
+ s3c_hsotg_phy_enable(hsotg);
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ s3c_hsotg_core_init_disconnected(hsotg);
+ if (hsotg->enabled)
+ s3c_hsotg_core_connect(hsotg);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ }
+ mutex_unlock(&hsotg->init_mutex);
return ret;
}
-
-#ifdef CONFIG_OF
-static const struct of_device_id s3c_hsotg_of_ids[] = {
- { .compatible = "samsung,s3c6400-hsotg", },
- { .compatible = "snps,dwc2", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, s3c_hsotg_of_ids);
-#endif
-
-static struct platform_driver s3c_hsotg_driver = {
- .driver = {
- .name = "s3c-hsotg",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(s3c_hsotg_of_ids),
- },
- .probe = s3c_hsotg_probe,
- .remove = s3c_hsotg_remove,
- .suspend = s3c_hsotg_suspend,
- .resume = s3c_hsotg_resume,
-};
-
-module_platform_driver(s3c_hsotg_driver);
-
-MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c-hsotg");
+EXPORT_SYMBOL_GPL(s3c_hsotg_resume);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 0a0e6f0ad15f..a0cd9db6f4cd 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1371,6 +1371,8 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
dwc2_core_init(hsotg, false, -1);
dwc2_enable_global_interrupts(hsotg);
+ s3c_hsotg_core_init_disconnected(hsotg);
+ s3c_hsotg_core_connect(hsotg);
} else {
/* A-Device connector (Host Mode) */
dev_dbg(hsotg->dev, "connId A\n");
@@ -1471,6 +1473,30 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
}
}
+static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
+{
+ u32 hprt0;
+
+ /* After clear the Stop PHY clock bit, we should wait for a moment
+ * for PLL work stable with clock output.
+ */
+ writel(0, hsotg->regs + PCGCTL);
+ usleep_range(2000, 4000);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+ writel(hprt0, hsotg->regs + HPRT0);
+ hprt0 &= ~HPRT0_SUSP;
+ /* according to USB2.0 Spec 7.1.7.7, the host must send the resume
+ * signal for at least 20ms
+ */
+ usleep_range(20000, 25000);
+
+ hprt0 &= ~HPRT0_RES;
+ writel(hprt0, hsotg->regs + HPRT0);
+ hsotg->lx_state = DWC2_L0;
+}
+
/* Handles hub class-specific requests */
static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u16 wvalue, u16 windex, char *buf, u16 wlength)
@@ -1516,17 +1542,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
- writel(0, hsotg->regs + PCGCTL);
- usleep_range(20000, 40000);
-
- hprt0 = dwc2_read_hprt0(hsotg);
- hprt0 |= HPRT0_RES;
- writel(hprt0, hsotg->regs + HPRT0);
- hprt0 &= ~HPRT0_SUSP;
- usleep_range(100000, 150000);
-
- hprt0 &= ~HPRT0_RES;
- writel(hprt0, hsotg->regs + HPRT0);
+ dwc2_port_resume(hsotg);
break;
case USB_PORT_FEAT_POWER:
@@ -2299,6 +2315,55 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
usleep_range(1000, 3000);
}
+static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ u32 hprt0;
+
+ if (!((hsotg->op_state == OTG_STATE_B_HOST) ||
+ (hsotg->op_state == OTG_STATE_A_HOST)))
+ return 0;
+
+ /* TODO: We get into suspend from 'on' state, maybe we need to do
+ * something if we get here from DWC2_L1(LPM sleep) state one day.
+ */
+ if (hsotg->lx_state != DWC2_L0)
+ return 0;
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ if (hprt0 & HPRT0_CONNSTS) {
+ dwc2_port_suspend(hsotg, 1);
+ } else {
+ u32 pcgctl = readl(hsotg->regs + PCGCTL);
+
+ pcgctl |= PCGCTL_STOPPCLK;
+ writel(pcgctl, hsotg->regs + PCGCTL);
+ }
+
+ return 0;
+}
+
+static int _dwc2_hcd_resume(struct usb_hcd *hcd)
+{
+ struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
+ u32 hprt0;
+
+ if (!((hsotg->op_state == OTG_STATE_B_HOST) ||
+ (hsotg->op_state == OTG_STATE_A_HOST)))
+ return 0;
+
+ if (hsotg->lx_state != DWC2_L2)
+ return 0;
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ if ((hprt0 & HPRT0_CONNSTS) && (hprt0 & HPRT0_SUSP))
+ dwc2_port_resume(hsotg);
+ else
+ writel(0, hsotg->regs + PCGCTL);
+
+ return 0;
+}
+
/* Returns the current frame number */
static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
{
@@ -2669,6 +2734,9 @@ static struct hc_driver dwc2_hc_driver = {
.hub_status_data = _dwc2_hcd_hub_status_data,
.hub_control = _dwc2_hcd_hub_control,
.clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
+
+ .bus_suspend = _dwc2_hcd_suspend,
+ .bus_resume = _dwc2_hcd_resume,
};
/*
@@ -2778,6 +2846,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
int i, num_channels;
int retval;
+ if (usb_disabled())
+ return -ENODEV;
+
dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
/* Detect config values from hardware */
@@ -2839,7 +2910,6 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
hcd->has_tt = 1;
- spin_lock_init(&hsotg->lock);
((struct wrapper_priv_data *) &hcd->hcd_priv)->hsotg = hsotg;
hsotg->priv = hcd;
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index a12bb1538666..e69a843d8928 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -668,9 +668,6 @@ extern irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg);
*/
extern void dwc2_hcd_stop(struct dwc2_hsotg *hsotg);
-extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
-extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg);
-
/**
* dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
* and 0 otherwise
@@ -680,13 +677,6 @@ extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg);
extern int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg);
/**
- * dwc2_hcd_get_frame_number() - Returns current frame number
- *
- * @hsotg: The DWC2 HCD
- */
-extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
-
-/**
* dwc2_hcd_dump_state() - Dumps hsotg state
*
* @hsotg: The DWC2 HCD
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index c291fca5d21f..a4e724b0a62e 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -141,6 +141,13 @@ static int dwc2_driver_probe(struct pci_dev *dev,
pci_set_master(dev);
+ retval = devm_request_irq(hsotg->dev, dev->irq,
+ dwc2_handle_common_intr, IRQF_SHARED,
+ dev_name(hsotg->dev), hsotg);
+ if (retval)
+ return retval;
+
+ spin_lock_init(&hsotg->lock);
retval = dwc2_hcd_init(hsotg, dev->irq, &dwc2_module_params);
if (retval) {
pci_disable_device(dev);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 121dbdafc06b..6a795aa2ff05 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -40,6 +40,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/usb/of.h>
@@ -121,6 +122,7 @@ static int dwc2_driver_remove(struct platform_device *dev)
struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
dwc2_hcd_remove(hsotg);
+ s3c_hsotg_remove(hsotg);
return 0;
}
@@ -129,6 +131,7 @@ static const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
{ .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
{ .compatible = "snps,dwc2", .data = NULL },
+ { .compatible = "samsung,s3c6400-hsotg", .data = NULL},
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
@@ -155,9 +158,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
int retval;
int irq;
- if (usb_disabled())
- return -ENODEV;
-
match = of_match_device(dwc2_of_match_table, &dev->dev);
if (match && match->data) {
params = match->data;
@@ -194,6 +194,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
return irq;
}
+ dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
+ irq);
+ retval = devm_request_irq(hsotg->dev, irq,
+ dwc2_handle_common_intr, IRQF_SHARED,
+ dev_name(hsotg->dev), hsotg);
+ if (retval)
+ return retval;
+
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
hsotg->regs = devm_ioremap_resource(&dev->dev, res);
if (IS_ERR(hsotg->regs))
@@ -204,6 +212,11 @@ static int dwc2_driver_probe(struct platform_device *dev)
hsotg->dr_mode = of_usb_get_dr_mode(dev->dev.of_node);
+ spin_lock_init(&hsotg->lock);
+ mutex_init(&hsotg->init_mutex);
+ retval = dwc2_gadget_init(hsotg, irq);
+ if (retval)
+ return retval;
retval = dwc2_hcd_init(hsotg, irq, params);
if (retval)
return retval;
@@ -213,10 +226,35 @@ static int dwc2_driver_probe(struct platform_device *dev)
return retval;
}
+static int __maybe_unused dwc2_suspend(struct device *dev)
+{
+ struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (dwc2_is_device_mode(dwc2))
+ ret = s3c_hsotg_suspend(dwc2);
+ return ret;
+}
+
+static int __maybe_unused dwc2_resume(struct device *dev)
+{
+ struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (dwc2_is_device_mode(dwc2))
+ ret = s3c_hsotg_resume(dwc2);
+ return ret;
+}
+
+static const struct dev_pm_ops dwc2_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc2_suspend, dwc2_resume)
+};
+
static struct platform_driver dwc2_platform_driver = {
.driver = {
.name = dwc2_driver_name,
.of_match_table = dwc2_of_match_table,
+ .pm = &dwc2_dev_pm_ops,
},
.probe = dwc2_driver_probe,
.remove = dwc2_driver_remove,
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index f4e5cc60db0b..58b5b2cde4c5 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -55,7 +55,7 @@ config USB_DWC3_OMAP
config USB_DWC3_EXYNOS
tristate "Samsung Exynos Platform"
- depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on ARCH_EXYNOS && OF || COMPILE_TEST
default USB_DWC3
help
Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside,
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index b0f4d52b7f04..25ddc39efad8 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -32,6 +33,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
+#include <linux/acpi.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -363,6 +365,72 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
}
/**
+ * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ */
+static void dwc3_phy_setup(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+
+ /*
+ * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
+ * to '0' during coreConsultant configuration. So default value
+ * will be '0' when the core is reset. Application needs to set it
+ * to '1' after the core initialization is completed.
+ */
+ if (dwc->revision > DWC3_REVISION_194A)
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+
+ if (dwc->u2ss_inp3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
+
+ if (dwc->req_p1p2p3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
+
+ if (dwc->del_p1p2p3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN;
+
+ if (dwc->del_phy_power_chg_quirk)
+ reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE;
+
+ if (dwc->lfps_filter_quirk)
+ reg |= DWC3_GUSB3PIPECTL_LFPSFILT;
+
+ if (dwc->rx_detect_poll_quirk)
+ reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL;
+
+ if (dwc->tx_de_emphasis_quirk)
+ reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
+
+ if (dwc->dis_u3_susphy_quirk)
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+
+ mdelay(100);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+
+ /*
+ * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to
+ * '0' during coreConsultant configuration. So default value will
+ * be '0' when the core is reset. Application needs to set it to
+ * '1' after the core initialization is completed.
+ */
+ if (dwc->revision > DWC3_REVISION_194A)
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+
+ if (dwc->dis_u2_susphy_quirk)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ mdelay(100);
+}
+
+/**
* dwc3_core_init - Low-level initialization of DWC3 Core
* @dwc: Pointer to our controller context structure
*
@@ -384,6 +452,12 @@ static int dwc3_core_init(struct dwc3 *dwc)
}
dwc->revision = reg;
+ /*
+ * Write Linux Version Code to our GUID register so it's easy to figure
+ * out which kernel version a bug was found.
+ */
+ dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+
/* Handle USB2.0-only core configuration */
if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
@@ -414,7 +488,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
- reg &= ~DWC3_GCTL_DISSCRAMBLE;
switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
@@ -441,11 +514,34 @@ static int dwc3_core_init(struct dwc3 *dwc)
case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
/* enable hibernation here */
dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
+
+ /*
+ * REVISIT Enabling this bit so that host-mode hibernation
+ * will work. Device-mode hibernation is not yet implemented.
+ */
+ reg |= DWC3_GCTL_GBLHIBERNATIONEN;
break;
default:
dev_dbg(dwc->dev, "No power optimization available\n");
}
+ /* check if current dwc3 is on simulation board */
+ if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
+ dev_dbg(dwc->dev, "it is on FPGA board\n");
+ dwc->is_fpga = true;
+ }
+
+ WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
+ "disable_scramble cannot be used on non-FPGA builds\n");
+
+ if (dwc->disable_scramble_quirk && dwc->is_fpga)
+ reg |= DWC3_GCTL_DISSCRAMBLE;
+ else
+ reg &= ~DWC3_GCTL_DISSCRAMBLE;
+
+ if (dwc->u2exit_lfps_quirk)
+ reg |= DWC3_GCTL_U2EXIT_LFPS;
+
/*
* WORKAROUND: DWC3 revisions <1.90a have a bug
* where the device can fail to connect at SuperSpeed
@@ -459,6 +555,8 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ dwc3_phy_setup(dwc);
+
ret = dwc3_alloc_scratch_buffers(dwc);
if (ret)
goto err1;
@@ -630,6 +728,9 @@ static int dwc3_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct resource *res;
struct dwc3 *dwc;
+ u8 lpm_nyet_threshold;
+ u8 tx_de_emphasis;
+ u8 hird_threshold;
int ret;
@@ -685,22 +786,96 @@ static int dwc3_probe(struct platform_device *pdev)
*/
res->start -= DWC3_GLOBALS_REGS_START;
+ /* default to highest possible threshold */
+ lpm_nyet_threshold = 0xff;
+
+ /* default to -3.5dB de-emphasis */
+ tx_de_emphasis = 1;
+
+ /*
+ * default to assert utmi_sleep_n and use maximum allowed HIRD
+ * threshold value of 0b1100
+ */
+ hird_threshold = 12;
+
if (node) {
dwc->maximum_speed = of_usb_get_maximum_speed(node);
-
- dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
+ dwc->has_lpm_erratum = of_property_read_bool(node,
+ "snps,has-lpm-erratum");
+ of_property_read_u8(node, "snps,lpm-nyet-threshold",
+ &lpm_nyet_threshold);
+ dwc->is_utmi_l1_suspend = of_property_read_bool(node,
+ "snps,is-utmi-l1-suspend");
+ of_property_read_u8(node, "snps,hird-threshold",
+ &hird_threshold);
+
+ dwc->needs_fifo_resize = of_property_read_bool(node,
+ "tx-fifo-resize");
dwc->dr_mode = of_usb_get_dr_mode(node);
+
+ dwc->disable_scramble_quirk = of_property_read_bool(node,
+ "snps,disable_scramble_quirk");
+ dwc->u2exit_lfps_quirk = of_property_read_bool(node,
+ "snps,u2exit_lfps_quirk");
+ dwc->u2ss_inp3_quirk = of_property_read_bool(node,
+ "snps,u2ss_inp3_quirk");
+ dwc->req_p1p2p3_quirk = of_property_read_bool(node,
+ "snps,req_p1p2p3_quirk");
+ dwc->del_p1p2p3_quirk = of_property_read_bool(node,
+ "snps,del_p1p2p3_quirk");
+ dwc->del_phy_power_chg_quirk = of_property_read_bool(node,
+ "snps,del_phy_power_chg_quirk");
+ dwc->lfps_filter_quirk = of_property_read_bool(node,
+ "snps,lfps_filter_quirk");
+ dwc->rx_detect_poll_quirk = of_property_read_bool(node,
+ "snps,rx_detect_poll_quirk");
+ dwc->dis_u3_susphy_quirk = of_property_read_bool(node,
+ "snps,dis_u3_susphy_quirk");
+ dwc->dis_u2_susphy_quirk = of_property_read_bool(node,
+ "snps,dis_u2_susphy_quirk");
+
+ dwc->tx_de_emphasis_quirk = of_property_read_bool(node,
+ "snps,tx_de_emphasis_quirk");
+ of_property_read_u8(node, "snps,tx_de_emphasis",
+ &tx_de_emphasis);
} else if (pdata) {
dwc->maximum_speed = pdata->maximum_speed;
+ dwc->has_lpm_erratum = pdata->has_lpm_erratum;
+ if (pdata->lpm_nyet_threshold)
+ lpm_nyet_threshold = pdata->lpm_nyet_threshold;
+ dwc->is_utmi_l1_suspend = pdata->is_utmi_l1_suspend;
+ if (pdata->hird_threshold)
+ hird_threshold = pdata->hird_threshold;
dwc->needs_fifo_resize = pdata->tx_fifo_resize;
dwc->dr_mode = pdata->dr_mode;
+
+ dwc->disable_scramble_quirk = pdata->disable_scramble_quirk;
+ dwc->u2exit_lfps_quirk = pdata->u2exit_lfps_quirk;
+ dwc->u2ss_inp3_quirk = pdata->u2ss_inp3_quirk;
+ dwc->req_p1p2p3_quirk = pdata->req_p1p2p3_quirk;
+ dwc->del_p1p2p3_quirk = pdata->del_p1p2p3_quirk;
+ dwc->del_phy_power_chg_quirk = pdata->del_phy_power_chg_quirk;
+ dwc->lfps_filter_quirk = pdata->lfps_filter_quirk;
+ dwc->rx_detect_poll_quirk = pdata->rx_detect_poll_quirk;
+ dwc->dis_u3_susphy_quirk = pdata->dis_u3_susphy_quirk;
+ dwc->dis_u2_susphy_quirk = pdata->dis_u2_susphy_quirk;
+
+ dwc->tx_de_emphasis_quirk = pdata->tx_de_emphasis_quirk;
+ if (pdata->tx_de_emphasis)
+ tx_de_emphasis = pdata->tx_de_emphasis;
}
/* default to superspeed if no maximum_speed passed */
if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
dwc->maximum_speed = USB_SPEED_SUPER;
+ dwc->lpm_nyet_threshold = lpm_nyet_threshold;
+ dwc->tx_de_emphasis = tx_de_emphasis;
+
+ dwc->hird_threshold = hird_threshold
+ | (dwc->is_utmi_l1_suspend << 4);
+
ret = dwc3_core_get_phy(dwc);
if (ret)
return ret;
@@ -708,9 +883,11 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
platform_set_drvdata(pdev, dwc);
- dev->dma_mask = dev->parent->dma_mask;
- dev->dma_parms = dev->parent->dma_parms;
- dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+ if (!dev->dma_mask) {
+ dev->dma_mask = dev->parent->dma_mask;
+ dev->dma_parms = dev->parent->dma_parms;
+ dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+ }
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -815,50 +992,6 @@ static int dwc3_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int dwc3_prepare(struct device *dev)
-{
- struct dwc3 *dwc = dev_get_drvdata(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
- dwc3_gadget_prepare(dwc);
- /* FALLTHROUGH */
- case USB_DR_MODE_HOST:
- default:
- dwc3_event_buffers_cleanup(dwc);
- break;
- }
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- return 0;
-}
-
-static void dwc3_complete(struct device *dev)
-{
- struct dwc3 *dwc = dev_get_drvdata(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- dwc3_event_buffers_setup(dwc);
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
- dwc3_gadget_complete(dwc);
- /* FALLTHROUGH */
- case USB_DR_MODE_HOST:
- default:
- break;
- }
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-}
-
static int dwc3_suspend(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
@@ -873,7 +1006,7 @@ static int dwc3_suspend(struct device *dev)
/* FALLTHROUGH */
case USB_DR_MODE_HOST:
default:
- /* do nothing */
+ dwc3_event_buffers_cleanup(dwc);
break;
}
@@ -906,6 +1039,7 @@ static int dwc3_resume(struct device *dev)
spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_event_buffers_setup(dwc);
dwc3_writel(dwc->regs, DWC3_GCTL, dwc->gctl);
switch (dwc->dr_mode) {
@@ -934,9 +1068,6 @@ err_usb2phy_init:
}
static const struct dev_pm_ops dwc3_dev_pm_ops = {
- .prepare = dwc3_prepare,
- .complete = dwc3_complete,
-
SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
};
@@ -958,12 +1089,24 @@ static const struct of_device_id of_dwc3_match[] = {
MODULE_DEVICE_TABLE(of, of_dwc3_match);
#endif
+#ifdef CONFIG_ACPI
+
+#define ACPI_ID_INTEL_BSW "808622B7"
+
+static const struct acpi_device_id dwc3_acpi_match[] = {
+ { ACPI_ID_INTEL_BSW, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
+#endif
+
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = dwc3_remove,
.driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
+ .acpi_match_table = ACPI_PTR(dwc3_acpi_match),
.pm = DWC3_PM_OPS,
},
};
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 66f62563bcf9..4bb9aa696ede 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -166,6 +166,7 @@
#define DWC3_GCTL_SCALEDOWN(n) ((n) << 4)
#define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3)
#define DWC3_GCTL_DISSCRAMBLE (1 << 3)
+#define DWC3_GCTL_U2EXIT_LFPS (1 << 2)
#define DWC3_GCTL_GBLHIBERNATIONEN (1 << 1)
#define DWC3_GCTL_DSBLCLKGTNG (1 << 0)
@@ -175,7 +176,17 @@
/* Global USB3 PIPE Control Register */
#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
+#define DWC3_GUSB3PIPECTL_U2SSINP3OK (1 << 29)
+#define DWC3_GUSB3PIPECTL_REQP1P2P3 (1 << 24)
+#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19)
+#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7)
+#define DWC3_GUSB3PIPECTL_DEP1P2P3_EN DWC3_GUSB3PIPECTL_DEP1P2P3(1)
+#define DWC3_GUSB3PIPECTL_DEPOCHANGE (1 << 18)
#define DWC3_GUSB3PIPECTL_SUSPHY (1 << 17)
+#define DWC3_GUSB3PIPECTL_LFPSFILT (1 << 9)
+#define DWC3_GUSB3PIPECTL_RX_DETOPOLL (1 << 8)
+#define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK DWC3_GUSB3PIPECTL_TX_DEEPH(3)
+#define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
/* Global TX Fifo Size Register */
#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
@@ -210,6 +221,9 @@
#define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13)
#define DWC3_MAX_HIBER_SCRATCHBUFS 15
+/* Global HWPARAMS6 Register */
+#define DWC3_GHWPARAMS6_EN_FPGA (1 << 7)
+
/* Device Configuration Register */
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
@@ -243,16 +257,19 @@
#define DWC3_DCTL_TRGTULST_SS_INACT (DWC3_DCTL_TRGTULST(6))
/* These apply for core versions 1.94a and later */
-#define DWC3_DCTL_KEEP_CONNECT (1 << 19)
-#define DWC3_DCTL_L1_HIBER_EN (1 << 18)
-#define DWC3_DCTL_CRS (1 << 17)
-#define DWC3_DCTL_CSS (1 << 16)
+#define DWC3_DCTL_LPM_ERRATA_MASK DWC3_DCTL_LPM_ERRATA(0xf)
+#define DWC3_DCTL_LPM_ERRATA(n) ((n) << 20)
+
+#define DWC3_DCTL_KEEP_CONNECT (1 << 19)
+#define DWC3_DCTL_L1_HIBER_EN (1 << 18)
+#define DWC3_DCTL_CRS (1 << 17)
+#define DWC3_DCTL_CSS (1 << 16)
-#define DWC3_DCTL_INITU2ENA (1 << 12)
-#define DWC3_DCTL_ACCEPTU2ENA (1 << 11)
-#define DWC3_DCTL_INITU1ENA (1 << 10)
-#define DWC3_DCTL_ACCEPTU1ENA (1 << 9)
-#define DWC3_DCTL_TSTCTRL_MASK (0xf << 1)
+#define DWC3_DCTL_INITU2ENA (1 << 12)
+#define DWC3_DCTL_ACCEPTU2ENA (1 << 11)
+#define DWC3_DCTL_INITU1ENA (1 << 10)
+#define DWC3_DCTL_ACCEPTU1ENA (1 << 9)
+#define DWC3_DCTL_TSTCTRL_MASK (0xf << 1)
#define DWC3_DCTL_ULSTCHNGREQ_MASK (0x0f << 5)
#define DWC3_DCTL_ULSTCHNGREQ(n) (((n) << 5) & DWC3_DCTL_ULSTCHNGREQ_MASK)
@@ -657,17 +674,41 @@ struct dwc3_scratchpad_array {
* @regset: debugfs pointer to regdump file
* @test_mode: true when we're entering a USB test mode
* @test_mode_nr: test feature selector
+ * @lpm_nyet_threshold: LPM NYET response threshold
+ * @hird_threshold: HIRD threshold
* @delayed_status: true when gadget driver asks for delayed status
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
* @has_hibernation: true when dwc3 was configured with Hibernation
+ * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
+ * there's now way for software to detect this in runtime.
+ * @is_utmi_l1_suspend: the core asserts output signal
+ * 0 - utmi_sleep_n
+ * 1 - utmi_l1_suspend_n
* @is_selfpowered: true when we are selfpowered
+ * @is_fpga: true when we are using the FPGA board
* @needs_fifo_resize: not all users might want fifo resizing, flag it
* @pullups_connected: true when Run/Stop bit is set
* @resize_fifos: tells us it's ok to reconfigure our TxFIFO sizes.
* @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @start_config_issued: true when StartConfig command has been issued
* @three_stage_setup: set if we perform a three phase setup
+ * @disable_scramble_quirk: set if we enable the disable scramble quirk
+ * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
+ * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
+ * @req_p1p2p3_quirk: set if we enable request p1p2p3 quirk
+ * @del_p1p2p3_quirk: set if we enable delay p1p2p3 quirk
+ * @del_phy_power_chg_quirk: set if we enable delay phy power change quirk
+ * @lfps_filter_quirk: set if we enable LFPS filter quirk
+ * @rx_detect_poll_quirk: set if we enable rx_detect to polling lfps quirk
+ * @dis_u3_susphy_quirk: set if we disable usb3 suspend phy
+ * @dis_u2_susphy_quirk: set if we disable usb2 suspend phy
+ * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
+ * @tx_de_emphasis: Tx de-emphasis value
+ * 0 - -6dB de-emphasis
+ * 1 - -3.5dB de-emphasis
+ * 2 - No de-emphasis
+ * 3 - Reserved
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
@@ -759,18 +800,37 @@ struct dwc3 {
u8 test_mode;
u8 test_mode_nr;
+ u8 lpm_nyet_threshold;
+ u8 hird_threshold;
unsigned delayed_status:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
unsigned has_hibernation:1;
+ unsigned has_lpm_erratum:1;
+ unsigned is_utmi_l1_suspend:1;
unsigned is_selfpowered:1;
+ unsigned is_fpga:1;
unsigned needs_fifo_resize:1;
unsigned pullups_connected:1;
unsigned resize_fifos:1;
unsigned setup_packet_pending:1;
unsigned start_config_issued:1;
unsigned three_stage_setup:1;
+
+ unsigned disable_scramble_quirk:1;
+ unsigned u2exit_lfps_quirk:1;
+ unsigned u2ss_inp3_quirk:1;
+ unsigned req_p1p2p3_quirk:1;
+ unsigned del_p1p2p3_quirk:1;
+ unsigned del_phy_power_chg_quirk:1;
+ unsigned lfps_filter_quirk:1;
+ unsigned rx_detect_poll_quirk:1;
+ unsigned dis_u3_susphy_quirk:1;
+ unsigned dis_u2_susphy_quirk:1;
+
+ unsigned tx_de_emphasis_quirk:1;
+ unsigned tx_de_emphasis:2;
};
/* -------------------------------------------------------------------------- */
@@ -964,20 +1024,9 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
/* power management interface */
#if !IS_ENABLED(CONFIG_USB_DWC3_HOST)
-int dwc3_gadget_prepare(struct dwc3 *dwc);
-void dwc3_gadget_complete(struct dwc3 *dwc);
int dwc3_gadget_suspend(struct dwc3 *dwc);
int dwc3_gadget_resume(struct dwc3 *dwc);
#else
-static inline int dwc3_gadget_prepare(struct dwc3 *dwc)
-{
- return 0;
-}
-
-static inline void dwc3_gadget_complete(struct dwc3 *dwc)
-{
-}
-
static inline int dwc3_gadget_suspend(struct dwc3 *dwc)
{
return 0;
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 3951a65fea04..7bd0a95b2815 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/dwc3-exynos.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/usb/otg.h>
@@ -35,6 +34,9 @@ struct dwc3_exynos {
struct device *dev;
struct clk *clk;
+ struct clk *susp_clk;
+ struct clk *axius_clk;
+
struct regulator *vdd33;
struct regulator *vdd10;
};
@@ -106,7 +108,6 @@ static int dwc3_exynos_remove_child(struct device *dev, void *unused)
static int dwc3_exynos_probe(struct platform_device *pdev)
{
struct dwc3_exynos *exynos;
- struct clk *clk;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
@@ -133,16 +134,32 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
return ret;
}
- clk = devm_clk_get(dev, "usbdrd30");
- if (IS_ERR(clk)) {
+ exynos->dev = dev;
+
+ exynos->clk = devm_clk_get(dev, "usbdrd30");
+ if (IS_ERR(exynos->clk)) {
dev_err(dev, "couldn't get clock\n");
return -EINVAL;
}
+ clk_prepare_enable(exynos->clk);
- exynos->dev = dev;
- exynos->clk = clk;
+ exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk");
+ if (IS_ERR(exynos->susp_clk)) {
+ dev_dbg(dev, "no suspend clk specified\n");
+ exynos->susp_clk = NULL;
+ }
+ clk_prepare_enable(exynos->susp_clk);
- clk_prepare_enable(exynos->clk);
+ if (of_device_is_compatible(node, "samsung,exynos7-dwusb3")) {
+ exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
+ if (IS_ERR(exynos->axius_clk)) {
+ dev_err(dev, "no AXI UpScaler clk specified\n");
+ return -ENODEV;
+ }
+ clk_prepare_enable(exynos->axius_clk);
+ } else {
+ exynos->axius_clk = NULL;
+ }
exynos->vdd33 = devm_regulator_get(dev, "vdd33");
if (IS_ERR(exynos->vdd33)) {
@@ -185,7 +202,9 @@ err4:
err3:
regulator_disable(exynos->vdd33);
err2:
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(exynos->axius_clk);
+ clk_disable_unprepare(exynos->susp_clk);
+ clk_disable_unprepare(exynos->clk);
return ret;
}
@@ -197,6 +216,8 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
platform_device_unregister(exynos->usb2_phy);
platform_device_unregister(exynos->usb3_phy);
+ clk_disable_unprepare(exynos->axius_clk);
+ clk_disable_unprepare(exynos->susp_clk);
clk_disable_unprepare(exynos->clk);
regulator_disable(exynos->vdd33);
@@ -205,19 +226,19 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id exynos_dwc3_match[] = {
{ .compatible = "samsung,exynos5250-dwusb3" },
+ { .compatible = "samsung,exynos7-dwusb3" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
-#endif
#ifdef CONFIG_PM_SLEEP
static int dwc3_exynos_suspend(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
+ clk_disable(exynos->axius_clk);
clk_disable(exynos->clk);
regulator_disable(exynos->vdd33);
@@ -243,6 +264,7 @@ static int dwc3_exynos_resume(struct device *dev)
}
clk_enable(exynos->clk);
+ clk_enable(exynos->axius_clk);
/* runtime set active to reflect active state. */
pm_runtime_disable(dev);
@@ -266,7 +288,7 @@ static struct platform_driver dwc3_exynos_driver = {
.remove = dwc3_exynos_remove,
.driver = {
.name = "exynos-dwc3",
- .of_match_table = of_match_ptr(exynos_dwc3_match),
+ .of_match_table = exynos_dwc3_match,
.pm = DEV_PM_OPS,
},
};
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 7ec8495f4523..fe3b9335a74e 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -104,11 +104,6 @@ static int kdwc3_probe(struct platform_device *pdev)
kdwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing usbss resource\n");
- return -EINVAL;
- }
-
kdwc->usbss = devm_ioremap_resource(dev, res);
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
@@ -128,6 +123,7 @@ static int kdwc3_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "missing irq\n");
+ error = irq;
goto err_irq;
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index a0aa9f3da441..172d64e585b6 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -593,27 +593,12 @@ static const struct of_device_id of_dwc3_match[] = {
MODULE_DEVICE_TABLE(of, of_dwc3_match);
#ifdef CONFIG_PM_SLEEP
-static int dwc3_omap_prepare(struct device *dev)
-{
- struct dwc3_omap *omap = dev_get_drvdata(dev);
-
- dwc3_omap_disable_irqs(omap);
-
- return 0;
-}
-
-static void dwc3_omap_complete(struct device *dev)
-{
- struct dwc3_omap *omap = dev_get_drvdata(dev);
-
- dwc3_omap_enable_irqs(omap);
-}
-
static int dwc3_omap_suspend(struct device *dev)
{
struct dwc3_omap *omap = dev_get_drvdata(dev);
omap->utmi_otg_status = dwc3_omap_read_utmi_status(omap);
+ dwc3_omap_disable_irqs(omap);
return 0;
}
@@ -623,6 +608,7 @@ static int dwc3_omap_resume(struct device *dev)
struct dwc3_omap *omap = dev_get_drvdata(dev);
dwc3_omap_write_utmi_status(omap, omap->utmi_otg_status);
+ dwc3_omap_enable_irqs(omap);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
@@ -632,8 +618,6 @@ static int dwc3_omap_resume(struct device *dev)
}
static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
- .prepare = dwc3_omap_prepare,
- .complete = dwc3_omap_complete,
SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
};
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index a36cf66302fb..7c4faf738747 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -25,6 +25,8 @@
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
+#include "platform_data.h"
+
/* FIXME define these in <linux/pci_ids.h> */
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
@@ -102,6 +104,9 @@ static int dwc3_pci_probe(struct pci_dev *pci,
struct dwc3_pci *glue;
int ret;
struct device *dev = &pci->dev;
+ struct dwc3_platform_data dwc3_pdata;
+
+ memset(&dwc3_pdata, 0x00, sizeof(dwc3_pdata));
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
@@ -140,6 +145,31 @@ static int dwc3_pci_probe(struct pci_dev *pci,
res[1].name = "dwc_usb3";
res[1].flags = IORESOURCE_IRQ;
+ if (pci->vendor == PCI_VENDOR_ID_AMD &&
+ pci->device == PCI_DEVICE_ID_AMD_NL_USB) {
+ dwc3_pdata.has_lpm_erratum = true;
+ dwc3_pdata.lpm_nyet_threshold = 0xf;
+
+ dwc3_pdata.u2exit_lfps_quirk = true;
+ dwc3_pdata.u2ss_inp3_quirk = true;
+ dwc3_pdata.req_p1p2p3_quirk = true;
+ dwc3_pdata.del_p1p2p3_quirk = true;
+ dwc3_pdata.del_phy_power_chg_quirk = true;
+ dwc3_pdata.lfps_filter_quirk = true;
+ dwc3_pdata.rx_detect_poll_quirk = true;
+
+ dwc3_pdata.tx_de_emphasis_quirk = true;
+ dwc3_pdata.tx_de_emphasis = 1;
+
+ /*
+ * FIXME these quirks should be removed when AMD NL
+ * taps out
+ */
+ dwc3_pdata.disable_scramble_quirk = true;
+ dwc3_pdata.dis_u3_susphy_quirk = true;
+ dwc3_pdata.dis_u2_susphy_quirk = true;
+ }
+
ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
@@ -148,6 +178,10 @@ static int dwc3_pci_probe(struct pci_dev *pci,
pci_set_drvdata(pci, glue);
+ ret = platform_device_add_data(dwc3, &dwc3_pdata, sizeof(dwc3_pdata));
+ if (ret)
+ goto err3;
+
dma_set_coherent_mask(&dwc3->dev, dev->coherent_dma_mask);
dwc3->dev.dma_mask = dev->dma_mask;
@@ -185,6 +219,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index c7602b5362ad..4a1a543deeda 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -243,7 +243,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset");
if (IS_ERR(dwc3_data->rstc_rst)) {
dev_err(&pdev->dev, "could not get reset controller\n");
- ret = PTR_ERR(dwc3_data->rstc_pwrdn);
+ ret = PTR_ERR(dwc3_data->rstc_rst);
goto undo_powerdown;
}
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index df38e7ef4976..1bc77a3b4997 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -86,6 +86,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
params.param0 = upper_32_bits(dwc->ep0_trb_addr);
params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+ trace_dwc3_prepare_trb(dep, trb);
+
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_STARTTRANSFER, &params);
if (ret < 0) {
@@ -441,7 +443,6 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
case USB_DEVICE_LTM_ENABLE:
return -EINVAL;
- break;
case USB_DEVICE_TEST_MODE:
if ((wIndex & 0xff) != 0)
@@ -550,7 +551,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
switch (state) {
case USB_STATE_DEFAULT:
return -EINVAL;
- break;
case USB_STATE_ADDRESS:
ret = dwc3_ep0_delegate_req(dwc, ctrl);
@@ -700,35 +700,35 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_GET_STATUS\n");
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_GET_STATUS");
ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_CLEAR_FEATURE\n");
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_CLEAR_FEATURE");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
break;
case USB_REQ_SET_FEATURE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_FEATURE\n");
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_FEATURE");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
break;
case USB_REQ_SET_ADDRESS:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ADDRESS\n");
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ADDRESS");
ret = dwc3_ep0_set_address(dwc, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_CONFIGURATION\n");
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_CONFIGURATION");
ret = dwc3_ep0_set_config(dwc, ctrl);
break;
case USB_REQ_SET_SEL:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_SEL\n");
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_SEL");
ret = dwc3_ep0_set_sel(dwc, ctrl);
break;
case USB_REQ_SET_ISOCH_DELAY:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY\n");
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
default:
- dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver\n");
+ dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
break;
}
@@ -791,6 +791,8 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
trb = dwc->ep0_trb;
+ trace_dwc3_complete_trb(ep0, trb);
+
r = next_request(&ep0->request_list);
if (!r)
return;
@@ -855,6 +857,8 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
dep = dwc->eps[0];
trb = dwc->ep0_trb;
+ trace_dwc3_complete_trb(dep, trb);
+
if (!list_empty(&dep->request_list)) {
r = next_request(&dep->request_list);
@@ -875,7 +879,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (status == DWC3_TRBSTS_SETUP_PENDING)
- dwc3_trace(trace_dwc3_ep0, "Setup Pending received\n");
+ dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 546ea5431b8c..f03b136ecfce 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1140,8 +1140,14 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
if (!dep->endpoint.desc) {
dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
request, ep->name);
- spin_unlock_irqrestore(&dwc->lock, flags);
- return -ESHUTDOWN;
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
+ request, req->dep->name)) {
+ ret = -EINVAL;
+ goto out;
}
dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
@@ -1149,6 +1155,8 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
trace_dwc3_ep_queue(req);
ret = __dwc3_gadget_ep_queue(dep, req);
+
+out:
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1622,8 +1630,7 @@ err0:
return ret;
}
-static int dwc3_gadget_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int dwc3_gadget_stop(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
@@ -2034,6 +2041,17 @@ static void dwc3_resume_gadget(struct dwc3 *dwc)
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(&dwc->gadget);
+ }
+}
+
+static void dwc3_reset_gadget(struct dwc3 *dwc)
+{
+ if (!dwc->gadget_driver)
+ return;
+
+ if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+ spin_unlock(&dwc->lock);
+ usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
spin_lock(&dwc->lock);
}
}
@@ -2140,6 +2158,7 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
+ usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
@@ -2177,11 +2196,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_gadget_disconnect_interrupt(dwc);
}
- /* after reset -> Default State */
- usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
-
- if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
- dwc3_disconnect_gadget(dwc);
+ dwc3_reset_gadget(dwc);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
@@ -2287,11 +2302,20 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
+ reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
+
/*
- * TODO: This should be configurable. For now using
- * maximum allowed HIRD threshold value of 0b1100
+ * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
+ * DCFG.LPMCap is set, core responses with an ACK and the
+ * BESL value in the LPM token is less than or equal to LPM
+ * NYET threshold.
*/
- reg |= DWC3_DCTL_HIRD_THRES(12);
+ WARN_ONCE(dwc->revision < DWC3_REVISION_240A
+ && dwc->has_lpm_erratum,
+ "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
+
+ if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
+ reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
} else {
@@ -2744,26 +2768,13 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
dwc->ctrl_req, dwc->ctrl_req_addr);
}
-int dwc3_gadget_prepare(struct dwc3 *dwc)
+int dwc3_gadget_suspend(struct dwc3 *dwc)
{
if (dwc->pullups_connected) {
dwc3_gadget_disable_irq(dwc);
dwc3_gadget_run_stop(dwc, true, true);
}
- return 0;
-}
-
-void dwc3_gadget_complete(struct dwc3 *dwc)
-{
- if (dwc->pullups_connected) {
- dwc3_gadget_enable_irq(dwc);
- dwc3_gadget_run_stop(dwc, true, false);
- }
-}
-
-int dwc3_gadget_suspend(struct dwc3 *dwc)
-{
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -2798,6 +2809,11 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
+ if (dwc->pullups_connected) {
+ dwc3_gadget_enable_irq(dwc);
+ dwc3_gadget_run_stop(dwc, true, false);
+ }
+
return 0;
err1:
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index dcb8ca084598..12bfd3c5405e 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -29,8 +29,7 @@ int dwc3_host_init(struct dwc3 *dwc)
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
dev_err(dwc->dev, "couldn't allocate xHCI device\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
@@ -60,22 +59,33 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err1;
}
+ phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
+ dev_name(&xhci->dev));
+ phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
+ dev_name(&xhci->dev));
+
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
- goto err1;
+ goto err2;
}
return 0;
-
+err2:
+ phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
+ dev_name(&xhci->dev));
+ phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
+ dev_name(&xhci->dev));
err1:
platform_device_put(xhci);
-
-err0:
return ret;
}
void dwc3_host_exit(struct dwc3 *dwc)
{
+ phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
+ dev_name(&dwc->xhci->dev));
+ phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
+ dev_name(&dwc->xhci->dev));
platform_device_unregister(dwc->xhci);
}
diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
index 7db34f00b89a..a3a3b6d5668c 100644
--- a/drivers/usb/dwc3/platform_data.h
+++ b/drivers/usb/dwc3/platform_data.h
@@ -24,4 +24,24 @@ struct dwc3_platform_data {
enum usb_device_speed maximum_speed;
enum usb_dr_mode dr_mode;
bool tx_fifo_resize;
+
+ unsigned is_utmi_l1_suspend:1;
+ u8 hird_threshold;
+
+ u8 lpm_nyet_threshold;
+
+ unsigned disable_scramble_quirk:1;
+ unsigned has_lpm_erratum:1;
+ unsigned u2exit_lfps_quirk:1;
+ unsigned u2ss_inp3_quirk:1;
+ unsigned req_p1p2p3_quirk:1;
+ unsigned del_p1p2p3_quirk:1;
+ unsigned del_phy_power_chg_quirk:1;
+ unsigned lfps_filter_quirk:1;
+ unsigned rx_detect_poll_quirk:1;
+ unsigned dis_u3_susphy_quirk:1;
+ unsigned dis_u2_susphy_quirk:1;
+
+ unsigned tx_de_emphasis_quirk:1;
+ unsigned tx_de_emphasis:2;
};
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 60b0f41eafc4..9fc20b33dd8e 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -61,7 +61,7 @@ DECLARE_EVENT_CLASS(dwc3_log_event,
TP_fast_assign(
__entry->event = event;
),
- TP_printk("event %08x\n", __entry->event)
+ TP_printk("event %08x", __entry->event)
);
DEFINE_EVENT(dwc3_log_event, dwc3_event,
@@ -157,7 +157,7 @@ DECLARE_EVENT_CLASS(dwc3_log_generic_cmd,
__entry->cmd = cmd;
__entry->param = param;
),
- TP_printk("cmd '%s' [%d] param %08x\n",
+ TP_printk("cmd '%s' [%d] param %08x",
dwc3_gadget_generic_cmd_string(__entry->cmd),
__entry->cmd, __entry->param
)
@@ -175,17 +175,21 @@ DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
TP_STRUCT__entry(
__dynamic_array(char, name, DWC3_MSG_MAX)
__field(unsigned int, cmd)
- __field(struct dwc3_gadget_ep_cmd_params *, params)
+ __field(u32, param0)
+ __field(u32, param1)
+ __field(u32, param2)
),
TP_fast_assign(
snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
__entry->cmd = cmd;
- __entry->params = params;
+ __entry->param0 = params->param0;
+ __entry->param1 = params->param1;
+ __entry->param2 = params->param2;
),
- TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x\n",
+ TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x",
__get_str(name), dwc3_gadget_ep_cmd_string(__entry->cmd),
- __entry->cmd, __entry->params->param0,
- __entry->params->param1, __entry->params->param2
+ __entry->cmd, __entry->param0,
+ __entry->param1, __entry->param2
)
);
@@ -214,7 +218,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
),
- TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x\n",
+ TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x",
__get_str(name), __entry->trb, __entry->bph, __entry->bpl,
__entry->size, __entry->ctrl
)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c4880fc0d86e..747ef53bda14 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -190,6 +190,12 @@ config USB_F_UAC2
config USB_F_UVC
tristate
+config USB_F_MIDI
+ tristate
+
+config USB_F_HID
+ tristate
+
choice
tristate "USB Gadget Drivers"
default USB_ETH
@@ -362,6 +368,61 @@ config USB_CONFIGFS_F_FS
implemented in kernel space (for instance Ethernet, serial or
mass storage) and other are implemented in user space.
+config USB_CONFIGFS_F_UAC1
+ boolean "Audio Class 1.0"
+ depends on USB_CONFIGFS
+ depends on SND
+ select USB_LIBCOMPOSITE
+ select SND_PCM
+ select USB_F_UAC1
+ help
+ This Audio function implements 1 AudioControl interface,
+ 1 AudioStreaming Interface each for USB-OUT and USB-IN.
+ This driver requires a real Audio codec to be present
+ on the device.
+
+config USB_CONFIGFS_F_UAC2
+ boolean "Audio Class 2.0"
+ depends on USB_CONFIGFS
+ depends on SND
+ select USB_LIBCOMPOSITE
+ select SND_PCM
+ select USB_F_UAC2
+ help
+ This Audio function is compatible with USB Audio Class
+ specification 2.0. It implements 1 AudioControl interface,
+ 1 AudioStreaming Interface each for USB-OUT and USB-IN.
+ This driver doesn't expect any real Audio codec to be present
+ on the device - the audio streams are simply sinked to and
+ sourced from a virtual ALSA sound card created. The user-space
+ application may choose to do whatever it wants with the data
+ received from the USB Host and choose to provide whatever it
+ wants as audio data to the USB Host.
+
+config USB_CONFIGFS_F_MIDI
+ boolean "MIDI function"
+ depends on USB_CONFIGFS
+ depends on SND
+ select USB_LIBCOMPOSITE
+ select SND_RAWMIDI
+ select USB_F_MIDI
+ help
+ The MIDI Function acts as a USB Audio device, with one MIDI
+ input and one MIDI output. These MIDI jacks appear as
+ a sound "card" in the ALSA sound system. Other MIDI
+ connections can then be made on the gadget system, using
+ ALSA's aconnect utility etc.
+
+config USB_CONFIGFS_F_HID
+ boolean "HID function"
+ depends on USB_CONFIGFS
+ select USB_F_HID
+ help
+ The HID function driver provides generic emulation of USB
+ Human Interface Devices (HID).
+
+ For more information, see Documentation/usb/gadget_hid.txt.
+
source "drivers/usb/gadget/legacy/Kconfig"
endchoice
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f6a51fddd5b5..617835348569 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1246,10 +1246,49 @@ EXPORT_SYMBOL_GPL(usb_string_ids_n);
static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
{
+ struct usb_composite_dev *cdev;
+
if (req->status || req->actual != req->length)
DBG((struct usb_composite_dev *) ep->driver_data,
"setup complete --> %d, %d/%d\n",
req->status, req->actual, req->length);
+
+ /*
+ * REVIST The same ep0 requests are shared with function drivers
+ * so they don't have to maintain the same ->complete() stubs.
+ *
+ * Because of that, we need to check for the validity of ->context
+ * here, even though we know we've set it to something useful.
+ */
+ if (!req->context)
+ return;
+
+ cdev = req->context;
+
+ if (cdev->req == req)
+ cdev->setup_pending = false;
+ else if (cdev->os_desc_req == req)
+ cdev->os_desc_pending = false;
+ else
+ WARN(1, "unknown request %p\n", req);
+}
+
+static int composite_ep0_queue(struct usb_composite_dev *cdev,
+ struct usb_request *req, gfp_t gfp_flags)
+{
+ int ret;
+
+ ret = usb_ep_queue(cdev->gadget->ep0, req, gfp_flags);
+ if (ret == 0) {
+ if (cdev->req == req)
+ cdev->setup_pending = true;
+ else if (cdev->os_desc_req == req)
+ cdev->os_desc_pending = true;
+ else
+ WARN(1, "unknown request %p\n", req);
+ }
+
+ return ret;
}
static int count_ext_compat(struct usb_configuration *c)
@@ -1428,6 +1467,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
* when we delegate to it.
*/
req->zero = 0;
+ req->context = cdev;
req->complete = composite_setup_complete;
req->length = 0;
gadget->ep0->driver_data = cdev;
@@ -1624,6 +1664,7 @@ unknown:
int count = 0;
req = cdev->os_desc_req;
+ req->context = cdev;
req->complete = composite_setup_complete;
buf = req->buf;
os_desc_cfg = cdev->os_desc_config;
@@ -1686,8 +1727,9 @@ unknown:
break;
}
req->length = value;
+ req->context = cdev;
req->zero = value < w_length;
- value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+ value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
@@ -1757,8 +1799,9 @@ unknown:
/* respond with data transfer before status phase? */
if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) {
req->length = value;
+ req->context = cdev;
req->zero = value < w_length;
- value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+ value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
@@ -1893,6 +1936,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
goto fail_dev;
cdev->req->complete = composite_setup_complete;
+ cdev->req->context = cdev;
gadget->ep0->driver_data = cdev;
cdev->driver = composite;
@@ -1937,6 +1981,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
kfree(cdev->os_desc_req);
goto end;
}
+ cdev->os_desc_req->context = cdev;
cdev->os_desc_req->complete = composite_setup_complete;
end:
return ret;
@@ -1951,10 +1996,16 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev)
kfree(uc);
}
if (cdev->os_desc_req) {
+ if (cdev->os_desc_pending)
+ usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
+
kfree(cdev->os_desc_req->buf);
usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
}
if (cdev->req) {
+ if (cdev->setup_pending)
+ usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+
kfree(cdev->req->buf);
usb_ep_free_request(cdev->gadget->ep0, cdev->req);
}
@@ -2013,8 +2064,7 @@ fail:
/*-------------------------------------------------------------------------*/
-static void
-composite_suspend(struct usb_gadget *gadget)
+void composite_suspend(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
@@ -2037,8 +2087,7 @@ composite_suspend(struct usb_gadget *gadget)
usb_gadget_vbus_draw(gadget, 2);
}
-static void
-composite_resume(struct usb_gadget *gadget)
+void composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
@@ -2158,7 +2207,8 @@ void usb_composite_setup_continue(struct usb_composite_dev *cdev)
} else if (--cdev->delayed_status == 0) {
DBG(cdev, "%s: Completing delayed status\n", __func__);
req->length = 0;
- value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ req->context = cdev;
+ value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
if (value < 0) {
DBG(cdev, "ep_queue --> %d\n", value);
req->status = 0;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 34034333f7f6..75648145dc1b 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -271,7 +271,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct gadget_info *gi,
ret = -EBUSY;
goto err;
}
- ret = udc_attach_driver(name, &gi->composite.gadget_driver);
+ ret = usb_udc_attach_driver(name, &gi->composite.gadget_driver);
if (ret)
goto err;
gi->udc_name = name;
@@ -1453,6 +1453,9 @@ static const struct usb_gadget_driver configfs_driver_template = {
.reset = composite_disconnect,
.disconnect = composite_disconnect,
+ .suspend = composite_suspend,
+ .resume = composite_resume,
+
.max_speed = USB_SPEED_SUPER,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 90701aa5a826..dd68091d92f0 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -38,3 +38,7 @@ usb_f_uac2-y := f_uac2.o
obj-$(CONFIG_USB_F_UAC2) += usb_f_uac2.o
usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o
obj-$(CONFIG_USB_F_UVC) += usb_f_uvc.o
+usb_f_midi-y := f_midi.o
+obj-$(CONFIG_USB_F_MIDI) += usb_f_midi.o
+usb_f_hid-y := f_hid.o
+obj-$(CONFIG_USB_F_HID) += usb_f_hid.o
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 59ab62c92b66..6e04e302dc3a 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hid.h>
+#include <linux/idr.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/poll.h>
@@ -21,9 +22,14 @@
#include <linux/usb/g_hid.h>
#include "u_f.h"
+#include "u_hid.h"
+
+#define HIDG_MINORS 4
static int major, minors;
static struct class *hidg_class;
+static DEFINE_IDA(hidg_ida);
+static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */
/*-------------------------------------------------------------------------*/
/* HID gadget struct */
@@ -161,6 +167,26 @@ static struct usb_descriptor_header *hidg_fs_descriptors[] = {
};
/*-------------------------------------------------------------------------*/
+/* Strings */
+
+#define CT_FUNC_HID_IDX 0
+
+static struct usb_string ct_func_string_defs[] = {
+ [CT_FUNC_HID_IDX].s = "HID Interface",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings ct_func_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = ct_func_string_defs,
+};
+
+static struct usb_gadget_strings *ct_func_strings[] = {
+ &ct_func_string_table,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
/* Char Device */
static ssize_t f_hidg_read(struct file *file, char __user *buffer,
@@ -396,7 +422,7 @@ static int hidg_setup(struct usb_function *f,
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_REPORT):
- VDBG(cdev, "set_report | wLenght=%d\n", ctrl->wLength);
+ VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
goto stall;
break;
@@ -552,13 +578,22 @@ const struct file_operations f_hidg_fops = {
.llseek = noop_llseek,
};
-static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f)
+static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_ep *ep;
struct f_hidg *hidg = func_to_hidg(f);
+ struct usb_string *us;
+ struct device *device;
int status;
dev_t dev;
+ /* maybe allocate device-global string IDs, and patch descriptors */
+ us = usb_gstrings_attach(c->cdev, ct_func_strings,
+ ARRAY_SIZE(ct_func_string_defs));
+ if (IS_ERR(us))
+ return PTR_ERR(us);
+ hidg_interface_desc.iInterface = us[CT_FUNC_HID_IDX].id;
+
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
@@ -623,10 +658,16 @@ static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f)
if (status)
goto fail_free_descs;
- device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor);
+ device = device_create(hidg_class, NULL, dev, NULL,
+ "%s%d", "hidg", hidg->minor);
+ if (IS_ERR(device)) {
+ status = PTR_ERR(device);
+ goto del;
+ }
return 0;
-
+del:
+ cdev_del(&hidg->cdev);
fail_free_descs:
usb_free_all_descriptors(f);
fail:
@@ -640,116 +681,313 @@ fail:
return status;
}
-static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
+static inline int hidg_get_minor(void)
{
- struct f_hidg *hidg = func_to_hidg(f);
+ int ret;
- device_destroy(hidg_class, MKDEV(major, hidg->minor));
- cdev_del(&hidg->cdev);
+ ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
- /* disable/free request and end point */
- usb_ep_disable(hidg->in_ep);
- usb_ep_dequeue(hidg->in_ep, hidg->req);
- kfree(hidg->req->buf);
- usb_ep_free_request(hidg->in_ep, hidg->req);
-
- usb_free_all_descriptors(f);
+ return ret;
+}
- kfree(hidg->report_desc);
- kfree(hidg);
+static inline struct f_hid_opts *to_f_hid_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_hid_opts,
+ func_inst.group);
}
-/*-------------------------------------------------------------------------*/
-/* Strings */
+CONFIGFS_ATTR_STRUCT(f_hid_opts);
+CONFIGFS_ATTR_OPS(f_hid_opts);
-#define CT_FUNC_HID_IDX 0
+static void hid_attr_release(struct config_item *item)
+{
+ struct f_hid_opts *opts = to_f_hid_opts(item);
-static struct usb_string ct_func_string_defs[] = {
- [CT_FUNC_HID_IDX].s = "HID Interface",
- {}, /* end of list */
-};
+ usb_put_function_instance(&opts->func_inst);
+}
-static struct usb_gadget_strings ct_func_string_table = {
- .language = 0x0409, /* en-US */
- .strings = ct_func_string_defs,
+static struct configfs_item_operations hidg_item_ops = {
+ .release = hid_attr_release,
+ .show_attribute = f_hid_opts_attr_show,
+ .store_attribute = f_hid_opts_attr_store,
};
-static struct usb_gadget_strings *ct_func_strings[] = {
- &ct_func_string_table,
+#define F_HID_OPT(name, prec, limit) \
+static ssize_t f_hid_opts_##name##_show(struct f_hid_opts *opts, char *page)\
+{ \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_hid_opts_##name##_store(struct f_hid_opts *opts, \
+ const char *page, size_t len) \
+{ \
+ int ret; \
+ u##prec num; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = kstrtou##prec(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ if (num > limit) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ opts->name = num; \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+static struct f_hid_opts_attribute f_hid_opts_##name = \
+ __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, f_hid_opts_##name##_show,\
+ f_hid_opts_##name##_store)
+
+F_HID_OPT(subclass, 8, 255);
+F_HID_OPT(protocol, 8, 255);
+F_HID_OPT(report_length, 16, 65536);
+
+static ssize_t f_hid_opts_report_desc_show(struct f_hid_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = opts->report_desc_length;
+ memcpy(page, opts->report_desc, opts->report_desc_length);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_hid_opts_report_desc_store(struct f_hid_opts *opts,
+ const char *page, size_t len)
+{
+ int ret = -EBUSY;
+ char *d;
+
+ mutex_lock(&opts->lock);
+
+ if (opts->refcnt)
+ goto end;
+ if (len > PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto end;
+ }
+ d = kmemdup(page, len, GFP_KERNEL);
+ if (!d) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ kfree(opts->report_desc);
+ opts->report_desc = d;
+ opts->report_desc_length = len;
+ opts->report_desc_alloc = true;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_hid_opts_attribute f_hid_opts_report_desc =
+ __CONFIGFS_ATTR(report_desc, S_IRUGO | S_IWUSR,
+ f_hid_opts_report_desc_show,
+ f_hid_opts_report_desc_store);
+
+static struct configfs_attribute *hid_attrs[] = {
+ &f_hid_opts_subclass.attr,
+ &f_hid_opts_protocol.attr,
+ &f_hid_opts_report_length.attr,
+ &f_hid_opts_report_desc.attr,
NULL,
};
-/*-------------------------------------------------------------------------*/
-/* usb_configuration */
+static struct config_item_type hid_func_type = {
+ .ct_item_ops = &hidg_item_ops,
+ .ct_attrs = hid_attrs,
+ .ct_owner = THIS_MODULE,
+};
-int __init hidg_bind_config(struct usb_configuration *c,
- struct hidg_func_descriptor *fdesc, int index)
+static inline void hidg_put_minor(int minor)
{
- struct f_hidg *hidg;
- int status;
+ ida_simple_remove(&hidg_ida, minor);
+}
- if (index >= minors)
- return -ENOENT;
+static void hidg_free_inst(struct usb_function_instance *f)
+{
+ struct f_hid_opts *opts;
- /* maybe allocate device-global string IDs, and patch descriptors */
- if (ct_func_string_defs[CT_FUNC_HID_IDX].id == 0) {
- status = usb_string_id(c->cdev);
- if (status < 0)
- return status;
- ct_func_string_defs[CT_FUNC_HID_IDX].id = status;
- hidg_interface_desc.iInterface = status;
+ opts = container_of(f, struct f_hid_opts, func_inst);
+
+ mutex_lock(&hidg_ida_lock);
+
+ hidg_put_minor(opts->minor);
+ if (idr_is_empty(&hidg_ida.idr))
+ ghid_cleanup();
+
+ mutex_unlock(&hidg_ida_lock);
+
+ if (opts->report_desc_alloc)
+ kfree(opts->report_desc);
+
+ kfree(opts);
+}
+
+static struct usb_function_instance *hidg_alloc_inst(void)
+{
+ struct f_hid_opts *opts;
+ struct usb_function_instance *ret;
+ int status = 0;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+ mutex_init(&opts->lock);
+ opts->func_inst.free_func_inst = hidg_free_inst;
+ ret = &opts->func_inst;
+
+ mutex_lock(&hidg_ida_lock);
+
+ if (idr_is_empty(&hidg_ida.idr)) {
+ status = ghid_setup(NULL, HIDG_MINORS);
+ if (status) {
+ ret = ERR_PTR(status);
+ kfree(opts);
+ goto unlock;
+ }
+ }
+
+ opts->minor = hidg_get_minor();
+ if (opts->minor < 0) {
+ ret = ERR_PTR(opts->minor);
+ kfree(opts);
+ if (idr_is_empty(&hidg_ida.idr))
+ ghid_cleanup();
+ goto unlock;
}
+ config_group_init_type_name(&opts->func_inst.group, "", &hid_func_type);
+
+unlock:
+ mutex_unlock(&hidg_ida_lock);
+ return ret;
+}
+
+static void hidg_free(struct usb_function *f)
+{
+ struct f_hidg *hidg;
+ struct f_hid_opts *opts;
+
+ hidg = func_to_hidg(f);
+ opts = container_of(f->fi, struct f_hid_opts, func_inst);
+ kfree(hidg->report_desc);
+ kfree(hidg);
+ mutex_lock(&opts->lock);
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+}
+
+static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_hidg *hidg = func_to_hidg(f);
+
+ device_destroy(hidg_class, MKDEV(major, hidg->minor));
+ cdev_del(&hidg->cdev);
+
+ /* disable/free request and end point */
+ usb_ep_disable(hidg->in_ep);
+ usb_ep_dequeue(hidg->in_ep, hidg->req);
+ kfree(hidg->req->buf);
+ usb_ep_free_request(hidg->in_ep, hidg->req);
+
+ usb_free_all_descriptors(f);
+}
+
+static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+{
+ struct f_hidg *hidg;
+ struct f_hid_opts *opts;
/* allocate and initialize one new instance */
- hidg = kzalloc(sizeof *hidg, GFP_KERNEL);
+ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL);
if (!hidg)
- return -ENOMEM;
-
- hidg->minor = index;
- hidg->bInterfaceSubClass = fdesc->subclass;
- hidg->bInterfaceProtocol = fdesc->protocol;
- hidg->report_length = fdesc->report_length;
- hidg->report_desc_length = fdesc->report_desc_length;
- hidg->report_desc = kmemdup(fdesc->report_desc,
- fdesc->report_desc_length,
- GFP_KERNEL);
- if (!hidg->report_desc) {
- kfree(hidg);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ opts = container_of(fi, struct f_hid_opts, func_inst);
+
+ mutex_lock(&opts->lock);
+ ++opts->refcnt;
+
+ hidg->minor = opts->minor;
+ hidg->bInterfaceSubClass = opts->subclass;
+ hidg->bInterfaceProtocol = opts->protocol;
+ hidg->report_length = opts->report_length;
+ hidg->report_desc_length = opts->report_desc_length;
+ if (opts->report_desc) {
+ hidg->report_desc = kmemdup(opts->report_desc,
+ opts->report_desc_length,
+ GFP_KERNEL);
+ if (!hidg->report_desc) {
+ kfree(hidg);
+ mutex_unlock(&opts->lock);
+ return ERR_PTR(-ENOMEM);
+ }
}
+ mutex_unlock(&opts->lock);
+
hidg->func.name = "hid";
- hidg->func.strings = ct_func_strings;
hidg->func.bind = hidg_bind;
hidg->func.unbind = hidg_unbind;
hidg->func.set_alt = hidg_set_alt;
hidg->func.disable = hidg_disable;
hidg->func.setup = hidg_setup;
+ hidg->func.free_func = hidg_free;
/* this could me made configurable at some point */
hidg->qlen = 4;
- status = usb_add_function(c, &hidg->func);
- if (status)
- kfree(hidg);
-
- return status;
+ return &hidg->func;
}
-int __init ghid_setup(struct usb_gadget *g, int count)
+DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fabien Chouteau");
+
+int ghid_setup(struct usb_gadget *g, int count)
{
int status;
dev_t dev;
hidg_class = class_create(THIS_MODULE, "hidg");
+ if (IS_ERR(hidg_class)) {
+ status = PTR_ERR(hidg_class);
+ hidg_class = NULL;
+ return status;
+ }
status = alloc_chrdev_region(&dev, 0, count, "hidg");
- if (!status) {
- major = MAJOR(dev);
- minors = count;
+ if (status) {
+ class_destroy(hidg_class);
+ hidg_class = NULL;
+ return status;
}
- return status;
+ major = MAJOR(dev);
+ minors = count;
+
+ return 0;
}
void ghid_cleanup(void)
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 807b31c0edc3..a90440300735 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -20,6 +20,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
@@ -33,6 +34,7 @@
#include <linux/usb/midi.h>
#include "u_f.h"
+#include "u_midi.h"
MODULE_AUTHOR("Ben Williamson");
MODULE_LICENSE("GPL v2");
@@ -99,7 +101,7 @@ DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
/* B.3.1 Standard AC Interface Descriptor */
-static struct usb_interface_descriptor ac_interface_desc __initdata = {
+static struct usb_interface_descriptor ac_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
@@ -110,7 +112,7 @@ static struct usb_interface_descriptor ac_interface_desc __initdata = {
};
/* B.3.2 Class-Specific AC Interface Descriptor */
-static struct uac1_ac_header_descriptor_1 ac_header_desc __initdata = {
+static struct uac1_ac_header_descriptor_1 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
@@ -121,7 +123,7 @@ static struct uac1_ac_header_descriptor_1 ac_header_desc __initdata = {
};
/* B.4.1 Standard MS Interface Descriptor */
-static struct usb_interface_descriptor ms_interface_desc __initdata = {
+static struct usb_interface_descriptor ms_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
@@ -132,7 +134,7 @@ static struct usb_interface_descriptor ms_interface_desc __initdata = {
};
/* B.4.2 Class-Specific MS Interface Descriptor */
-static struct usb_ms_header_descriptor ms_header_desc __initdata = {
+static struct usb_ms_header_descriptor ms_header_desc = {
.bLength = USB_DT_MS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
@@ -387,29 +389,6 @@ static void f_midi_disable(struct usb_function *f)
usb_ep_disable(midi->out_ep);
}
-static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
-{
- struct usb_composite_dev *cdev = f->config->cdev;
- struct f_midi *midi = func_to_midi(f);
- struct snd_card *card;
-
- DBG(cdev, "unbind\n");
-
- /* just to be sure */
- f_midi_disable(f);
-
- card = midi->card;
- midi->card = NULL;
- if (card)
- snd_card_free(card);
-
- kfree(midi->id);
- midi->id = NULL;
-
- usb_free_all_descriptors(f);
- kfree(midi);
-}
-
static int f_midi_snd_free(struct snd_device *device)
{
return 0;
@@ -654,6 +633,14 @@ static struct snd_rawmidi_ops gmidi_out_ops = {
.trigger = f_midi_out_trigger
};
+static inline void f_midi_unregister_card(struct f_midi *midi)
+{
+ if (midi->card) {
+ snd_card_free(midi->card);
+ midi->card = NULL;
+ }
+}
+
/* register as a sound "card" */
static int f_midi_register_card(struct f_midi *midi)
{
@@ -715,17 +702,13 @@ static int f_midi_register_card(struct f_midi *midi)
return 0;
fail:
- if (midi->card) {
- snd_card_free(midi->card);
- midi->card = NULL;
- }
+ f_midi_unregister_card(midi);
return err;
}
/* MIDI function driver setup/binding */
-static int __init
-f_midi_bind(struct usb_configuration *c, struct usb_function *f)
+static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_descriptor_header **midi_function;
struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
@@ -734,15 +717,23 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
+ struct usb_string *us;
int status, n, jack = 1, i = 0;
+ midi->gadget = cdev->gadget;
+ tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
+ status = f_midi_register_card(midi);
+ if (status < 0)
+ goto fail_register;
+
/* maybe allocate device-global string ID */
- if (midi_string_defs[0].id == 0) {
- status = usb_string_id(c->cdev);
- if (status < 0)
- goto fail;
- midi_string_defs[0].id = status;
+ us = usb_gstrings_attach(c->cdev, midi_strings,
+ ARRAY_SIZE(midi_string_defs));
+ if (IS_ERR(us)) {
+ status = PTR_ERR(us);
+ goto fail;
}
+ ac_interface_desc.iInterface = us[STRING_FUNC_IDX].id;
/* We have two interfaces, AudioControl and MIDIStreaming */
status = usb_interface_id(c, f);
@@ -892,6 +883,8 @@ fail_f_midi:
kfree(midi_function);
usb_free_descriptors(f->hs_descriptors);
fail:
+ f_midi_unregister_card(midi);
+fail_register:
/* we might as well release our claims on endpoints */
if (midi->out_ep)
midi->out_ep->driver_data = NULL;
@@ -903,42 +896,235 @@ fail:
return status;
}
-/**
- * f_midi_bind_config - add USB MIDI function to a configuration
- * @c: the configuration to supcard the USB audio function
- * @index: the soundcard index to use for the ALSA device creation
- * @id: the soundcard id to use for the ALSA device creation
- * @buflen: the buffer length to use
- * @qlen the number of read requests to pre-allocate
- * Context: single threaded during gadget setup
- *
- * Returns zero on success, else negative errno.
- */
-int __init f_midi_bind_config(struct usb_configuration *c,
- int index, char *id,
- unsigned int in_ports,
- unsigned int out_ports,
- unsigned int buflen,
- unsigned int qlen)
+static inline struct f_midi_opts *to_f_midi_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_midi_opts,
+ func_inst.group);
+}
+
+CONFIGFS_ATTR_STRUCT(f_midi_opts);
+CONFIGFS_ATTR_OPS(f_midi_opts);
+
+static void midi_attr_release(struct config_item *item)
+{
+ struct f_midi_opts *opts = to_f_midi_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations midi_item_ops = {
+ .release = midi_attr_release,
+ .show_attribute = f_midi_opts_attr_show,
+ .store_attribute = f_midi_opts_attr_store,
+};
+
+#define F_MIDI_OPT(name, test_limit, limit) \
+static ssize_t f_midi_opts_##name##_show(struct f_midi_opts *opts, char *page) \
+{ \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_midi_opts_##name##_store(struct f_midi_opts *opts, \
+ const char *page, size_t len) \
+{ \
+ int ret; \
+ u32 num; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = kstrtou32(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ if (test_limit && num > limit) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ opts->name = num; \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+static struct f_midi_opts_attribute f_midi_opts_##name = \
+ __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, f_midi_opts_##name##_show, \
+ f_midi_opts_##name##_store)
+
+F_MIDI_OPT(index, true, SNDRV_CARDS);
+F_MIDI_OPT(buflen, false, 0);
+F_MIDI_OPT(qlen, false, 0);
+F_MIDI_OPT(in_ports, true, MAX_PORTS);
+F_MIDI_OPT(out_ports, true, MAX_PORTS);
+
+static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = strlcpy(page, opts->id, PAGE_SIZE);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_midi_opts_id_store(struct f_midi_opts *opts,
+ const char *page, size_t len)
+{
+ int ret;
+ char *c;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ c = kstrndup(page, len, GFP_KERNEL);
+ if (!c) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ if (opts->id_allocated)
+ kfree(opts->id);
+ opts->id = c;
+ opts->id_allocated = true;
+ ret = len;
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+static struct f_midi_opts_attribute f_midi_opts_id =
+ __CONFIGFS_ATTR(id, S_IRUGO | S_IWUSR, f_midi_opts_id_show,
+ f_midi_opts_id_store);
+
+static struct configfs_attribute *midi_attrs[] = {
+ &f_midi_opts_index.attr,
+ &f_midi_opts_buflen.attr,
+ &f_midi_opts_qlen.attr,
+ &f_midi_opts_in_ports.attr,
+ &f_midi_opts_out_ports.attr,
+ &f_midi_opts_id.attr,
+ NULL,
+};
+
+static struct config_item_type midi_func_type = {
+ .ct_item_ops = &midi_item_ops,
+ .ct_attrs = midi_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void f_midi_free_inst(struct usb_function_instance *f)
+{
+ struct f_midi_opts *opts;
+
+ opts = container_of(f, struct f_midi_opts, func_inst);
+
+ if (opts->id_allocated)
+ kfree(opts->id);
+
+ kfree(opts);
+}
+
+static struct usb_function_instance *f_midi_alloc_inst(void)
+{
+ struct f_midi_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&opts->lock);
+ opts->func_inst.free_func_inst = f_midi_free_inst;
+ opts->index = SNDRV_DEFAULT_IDX1;
+ opts->id = SNDRV_DEFAULT_STR1;
+ opts->buflen = 256;
+ opts->qlen = 32;
+ opts->in_ports = 1;
+ opts->out_ports = 1;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &midi_func_type);
+
+ return &opts->func_inst;
+}
+
+static void f_midi_free(struct usb_function *f)
+{
+ struct f_midi *midi;
+ struct f_midi_opts *opts;
+ int i;
+
+ midi = func_to_midi(f);
+ opts = container_of(f->fi, struct f_midi_opts, func_inst);
+ kfree(midi->id);
+ mutex_lock(&opts->lock);
+ for (i = opts->in_ports - 1; i >= 0; --i)
+ kfree(midi->in_port[i]);
+ kfree(midi);
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+}
+
+static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct f_midi *midi = func_to_midi(f);
+ struct snd_card *card;
+
+ DBG(cdev, "unbind\n");
+
+ /* just to be sure */
+ f_midi_disable(f);
+
+ card = midi->card;
+ midi->card = NULL;
+ if (card)
+ snd_card_free(card);
+
+ usb_free_all_descriptors(f);
+}
+
+static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
{
struct f_midi *midi;
+ struct f_midi_opts *opts;
int status, i;
+ opts = container_of(fi, struct f_midi_opts, func_inst);
+
+ mutex_lock(&opts->lock);
/* sanity check */
- if (in_ports > MAX_PORTS || out_ports > MAX_PORTS)
- return -EINVAL;
+ if (opts->in_ports > MAX_PORTS || opts->out_ports > MAX_PORTS) {
+ mutex_unlock(&opts->lock);
+ return ERR_PTR(-EINVAL);
+ }
/* allocate and initialize one new instance */
- midi = kzalloc(sizeof *midi, GFP_KERNEL);
+ midi = kzalloc(sizeof(*midi), GFP_KERNEL);
if (!midi) {
- status = -ENOMEM;
- goto fail;
+ mutex_unlock(&opts->lock);
+ return ERR_PTR(-ENOMEM);
}
- for (i = 0; i < in_ports; i++) {
+ for (i = 0; i < opts->in_ports; i++) {
struct gmidi_in_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+
if (!port) {
status = -ENOMEM;
+ mutex_unlock(&opts->lock);
goto setup_fail;
}
@@ -948,39 +1134,37 @@ int __init f_midi_bind_config(struct usb_configuration *c,
midi->in_port[i] = port;
}
- midi->gadget = c->cdev->gadget;
- tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
-
/* set up ALSA midi devices */
- midi->in_ports = in_ports;
- midi->out_ports = out_ports;
- status = f_midi_register_card(midi);
- if (status < 0)
- goto setup_fail;
-
- midi->func.name = "gmidi function";
- midi->func.strings = midi_strings;
- midi->func.bind = f_midi_bind;
- midi->func.unbind = f_midi_unbind;
- midi->func.set_alt = f_midi_set_alt;
- midi->func.disable = f_midi_disable;
-
- midi->id = kstrdup(id, GFP_KERNEL);
- midi->index = index;
- midi->buflen = buflen;
- midi->qlen = qlen;
-
- status = usb_add_function(c, &midi->func);
- if (status)
- goto setup_fail;
-
- return 0;
-
+ midi->id = kstrdup(opts->id, GFP_KERNEL);
+ if (opts->id && !midi->id) {
+ status = -ENOMEM;
+ mutex_unlock(&opts->lock);
+ goto kstrdup_fail;
+ }
+ midi->in_ports = opts->in_ports;
+ midi->out_ports = opts->out_ports;
+ midi->index = opts->index;
+ midi->buflen = opts->buflen;
+ midi->qlen = opts->qlen;
+ ++opts->refcnt;
+ mutex_unlock(&opts->lock);
+
+ midi->func.name = "gmidi function";
+ midi->func.bind = f_midi_bind;
+ midi->func.unbind = f_midi_unbind;
+ midi->func.set_alt = f_midi_set_alt;
+ midi->func.disable = f_midi_disable;
+ midi->func.free_func = f_midi_free;
+
+ return &midi->func;
+
+kstrdup_fail:
+ f_midi_unregister_card(midi);
setup_fail:
for (--i; i >= 0; i--)
kfree(midi->in_port[i]);
kfree(midi);
-fail:
- return status;
+ return ERR_PTR(status);
}
+DECLARE_USB_FUNCTION_INIT(midi, f_midi_alloc_inst, f_midi_alloc);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 16361b0a8b46..bdcda9f5148e 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1441,6 +1441,9 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
NULL);
+ if (status)
+ goto fail;
+
/*
* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 1ec8b7ffdccd..c89e96cfa3e4 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -303,7 +303,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL);
+ page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC);
if (!page)
return -ENOMEM;
@@ -377,7 +377,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
if (page)
put_page(page);
if (req)
- pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
+ pn_rx_submit(fp, req, GFP_ATOMIC);
}
/*-------------------------------------------------------------------------*/
@@ -437,7 +437,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
netif_carrier_on(dev);
for (i = 0; i < phonet_rxq_size; i++)
- pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
+ pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
}
spin_unlock(&port->lock);
return 0;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index f13fc6a58565..829edf878dac 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -375,8 +375,7 @@ static struct sk_buff *rndis_add_header(struct gether *port,
struct sk_buff *skb2;
skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
- if (skb2)
- rndis_add_hdr(skb2);
+ rndis_add_hdr(skb2);
dev_kfree_skb(skb);
return skb2;
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
new file mode 100644
index 000000000000..aaa0e368a159
--- /dev/null
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -0,0 +1,42 @@
+/*
+ * u_hid.h
+ *
+ * Utility definitions for the hid function
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef U_HID_H
+#define U_HID_H
+
+#include <linux/usb/composite.h>
+
+struct f_hid_opts {
+ struct usb_function_instance func_inst;
+ int minor;
+ unsigned char subclass;
+ unsigned char protocol;
+ unsigned short report_length;
+ unsigned short report_desc_length;
+ unsigned char *report_desc;
+ bool report_desc_alloc;
+
+ /*
+ * Protect the data form concurrent access by read/write
+ * and create symlink/remove symlink.
+ */
+ struct mutex lock;
+ int refcnt;
+};
+
+int ghid_setup(struct usb_gadget *g, int count);
+void ghid_cleanup(void);
+
+#endif /* U_HID_H */
diff --git a/drivers/usb/gadget/function/u_midi.h b/drivers/usb/gadget/function/u_midi.h
new file mode 100644
index 000000000000..22510189758e
--- /dev/null
+++ b/drivers/usb/gadget/function/u_midi.h
@@ -0,0 +1,40 @@
+/*
+ * u_midi.h
+ *
+ * Utility definitions for the midi function
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef U_MIDI_H
+#define U_MIDI_H
+
+#include <linux/usb/composite.h>
+
+struct f_midi_opts {
+ struct usb_function_instance func_inst;
+ int index;
+ char *id;
+ bool id_allocated;
+ unsigned int in_ports;
+ unsigned int out_ports;
+ unsigned int buflen;
+ unsigned int qlen;
+
+ /*
+ * Protect the data form concurrent access by read/write
+ * and create symlink/remove symlink.
+ */
+ struct mutex lock;
+ int refcnt;
+};
+
+#endif /* U_MIDI_H */
+
diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
index a44a07f30281..53842a1b947f 100644
--- a/drivers/usb/gadget/function/u_uac1.c
+++ b/drivers/usb/gadget/function/u_uac1.c
@@ -213,9 +213,6 @@ static int gaudio_open_snd_dev(struct gaudio *card)
fn_cap = opts->fn_cap;
fn_cntl = opts->fn_cntl;
- if (!card)
- return -ENODEV;
-
/* Open control device */
snd = &card->control;
snd->filp = filp_open(fn_cntl, O_RDWR, 0);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 24392d269709..fd48ef3af4eb 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -287,6 +287,7 @@ config USB_MIDI_GADGET
depends on SND
select USB_LIBCOMPOSITE
select SND_RAWMIDI
+ select USB_F_MIDI
help
The MIDI Gadget acts as a USB Audio device, with one MIDI
input and one MIDI output. These MIDI jacks appear as
@@ -419,6 +420,7 @@ endif # TTY
config USB_G_HID
tristate "HID Gadget"
select USB_LIBCOMPOSITE
+ select USB_F_HID
help
The HID gadget driver provides generic emulation of USB
Human Interface Devices (HID).
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 1b075132f8f1..633683a72a11 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -237,7 +237,7 @@ static void dbgp_unbind(struct usb_gadget *gadget)
static unsigned char tty_line;
#endif
-static int __init dbgp_configure_endpoints(struct usb_gadget *gadget)
+static int dbgp_configure_endpoints(struct usb_gadget *gadget)
{
int stp;
@@ -273,19 +273,10 @@ static int __init dbgp_configure_endpoints(struct usb_gadget *gadget)
dbgp.serial->in->desc = &i_desc;
dbgp.serial->out->desc = &o_desc;
-
- if (gserial_alloc_line(&tty_line)) {
- stp = 3;
- goto fail_3;
- }
+#endif
return 0;
-fail_3:
- dbgp.o_ep->driver_data = NULL;
-#else
- return 0;
-#endif
fail_2:
dbgp.i_ep->driver_data = NULL;
fail_1:
@@ -324,10 +315,17 @@ static int __init dbgp_bind(struct usb_gadget *gadget,
err = -ENOMEM;
goto fail;
}
+
+ if (gserial_alloc_line(&tty_line)) {
+ stp = 4;
+ err = -ENODEV;
+ goto fail;
+ }
#endif
+
err = dbgp_configure_endpoints(gadget);
if (err < 0) {
- stp = 4;
+ stp = 5;
goto fail;
}
@@ -383,6 +381,10 @@ static int dbgp_setup(struct usb_gadget *gadget,
#ifdef CONFIG_USB_G_DBGP_PRINTK
err = dbgp_enable_ep();
#else
+ err = dbgp_configure_endpoints(gadget);
+ if (err < 0) {
+ goto fail;
+ }
err = gserial_connect(dbgp.serial, tty_line);
#endif
if (err < 0)
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index 3d696b86ff76..e02a095294ac 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -37,7 +37,7 @@
#include "gadget_chips.h"
-#include "f_midi.c"
+#include "u_midi.h"
/*-------------------------------------------------------------------------*/
@@ -115,8 +115,13 @@ static struct usb_gadget_strings *dev_strings[] = {
NULL,
};
+static struct usb_function_instance *fi_midi;
+static struct usb_function *f_midi;
+
static int __exit midi_unbind(struct usb_composite_dev *dev)
{
+ usb_put_function(f_midi);
+ usb_put_function_instance(fi_midi);
return 0;
}
@@ -130,28 +135,54 @@ static struct usb_configuration midi_config = {
static int __init midi_bind_config(struct usb_configuration *c)
{
- return f_midi_bind_config(c, index, id,
- in_ports, out_ports,
- buflen, qlen);
+ int status;
+
+ f_midi = usb_get_function(fi_midi);
+ if (IS_ERR(f_midi))
+ return PTR_ERR(f_midi);
+
+ status = usb_add_function(c, f_midi);
+ if (status < 0) {
+ usb_put_function(f_midi);
+ return status;
+ }
+
+ return 0;
}
static int __init midi_bind(struct usb_composite_dev *cdev)
{
+ struct f_midi_opts *midi_opts;
int status;
+ fi_midi = usb_get_function_instance("midi");
+ if (IS_ERR(fi_midi))
+ return PTR_ERR(fi_midi);
+
+ midi_opts = container_of(fi_midi, struct f_midi_opts, func_inst);
+ midi_opts->index = index;
+ midi_opts->id = id;
+ midi_opts->in_ports = in_ports;
+ midi_opts->out_ports = out_ports;
+ midi_opts->buflen = buflen;
+ midi_opts->qlen = qlen;
+
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
- return status;
+ goto put;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
midi_config.iConfiguration = strings_dev[STRING_DESCRIPTION_IDX].id;
status = usb_add_config(cdev, &midi_config, midi_bind_config);
if (status < 0)
- return status;
+ goto put;
usb_composite_overwrite_options(cdev, &coverwrite);
pr_info("%s\n", longname);
return 0;
+put:
+ usb_put_function_instance(fi_midi);
+ return status;
}
static __refdata struct usb_composite_driver midi_driver = {
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 778613eb37af..614b06d80b41 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -17,11 +17,14 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/usb/composite.h>
+#include <linux/usb/g_hid.h>
#include "gadget_chips.h"
#define DRIVER_DESC "HID Gadget"
#define DRIVER_VERSION "2010/03/16"
+#include "u_hid.h"
+
/*-------------------------------------------------------------------------*/
#define HIDG_VENDOR_NUM 0x0525 /* XXX NetChip */
@@ -29,17 +32,9 @@
/*-------------------------------------------------------------------------*/
-/*
- * kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module. So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
-#include "f_hid.c"
-
-
struct hidg_func_node {
+ struct usb_function_instance *fi;
+ struct usb_function *f;
struct list_head node;
struct hidg_func_descriptor *func;
};
@@ -113,8 +108,8 @@ static struct usb_gadget_strings *dev_strings[] = {
static int __init do_config(struct usb_configuration *c)
{
- struct hidg_func_node *e;
- int func = 0, status = 0;
+ struct hidg_func_node *e, *n;
+ int status = 0;
if (gadget_is_otg(c->cdev->gadget)) {
c->descriptors = otg_desc;
@@ -122,11 +117,24 @@ static int __init do_config(struct usb_configuration *c)
}
list_for_each_entry(e, &hidg_func_list, node) {
- status = hidg_bind_config(c, e->func, func++);
- if (status)
- break;
+ e->f = usb_get_function(e->fi);
+ if (IS_ERR(e->f))
+ goto put;
+ status = usb_add_function(c, e->f);
+ if (status < 0) {
+ usb_put_function(e->f);
+ goto put;
+ }
}
+ return 0;
+put:
+ list_for_each_entry(n, &hidg_func_list, node) {
+ if (n == e)
+ break;
+ usb_remove_function(c, n->f);
+ usb_put_function(n->f);
+ }
return status;
}
@@ -143,6 +151,8 @@ static int __init hid_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
struct list_head *tmp;
+ struct hidg_func_node *n, *m;
+ struct f_hid_opts *hid_opts;
int status, funcs = 0;
list_for_each(tmp, &hidg_func_list)
@@ -151,10 +161,20 @@ static int __init hid_bind(struct usb_composite_dev *cdev)
if (!funcs)
return -ENODEV;
- /* set up HID */
- status = ghid_setup(cdev->gadget, funcs);
- if (status < 0)
- return status;
+ list_for_each_entry(n, &hidg_func_list, node) {
+ n->fi = usb_get_function_instance("hid");
+ if (IS_ERR(n->fi)) {
+ status = PTR_ERR(n->fi);
+ goto put;
+ }
+ hid_opts = container_of(n->fi, struct f_hid_opts, func_inst);
+ hid_opts->subclass = n->func->subclass;
+ hid_opts->protocol = n->func->protocol;
+ hid_opts->report_length = n->func->report_length;
+ hid_opts->report_desc_length = n->func->report_desc_length;
+ hid_opts->report_desc = n->func->report_desc;
+ }
+
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
@@ -162,24 +182,37 @@ static int __init hid_bind(struct usb_composite_dev *cdev)
status = usb_string_ids_tab(cdev, strings_dev);
if (status < 0)
- return status;
+ goto put;
device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
/* register our configuration */
status = usb_add_config(cdev, &config_driver, do_config);
if (status < 0)
- return status;
+ goto put;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
return 0;
+
+put:
+ list_for_each_entry(m, &hidg_func_list, node) {
+ if (m == n)
+ break;
+ usb_put_function_instance(m->fi);
+ }
+ return status;
}
static int __exit hid_unbind(struct usb_composite_dev *cdev)
{
- ghid_cleanup();
+ struct hidg_func_node *n;
+
+ list_for_each_entry(n, &hidg_func_list, node) {
+ usb_put_function(n->f);
+ usb_put_function_instance(n->fi);
+ }
return 0;
}
@@ -231,7 +264,6 @@ static __refdata struct usb_composite_driver hidg_driver = {
static struct platform_driver hidg_plat_driver = {
.remove = hidg_plat_driver_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "hidg",
},
};
@@ -260,7 +292,7 @@ module_init(hidg_init);
static void __exit hidg_cleanup(void)
{
- platform_driver_unregister(&hidg_plat_driver);
usb_composite_unregister(&hidg_driver);
+ platform_driver_unregister(&hidg_plat_driver);
}
module_exit(hidg_cleanup);
diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
index 6474081dcbaf..90545980542f 100644
--- a/drivers/usb/gadget/legacy/printer.c
+++ b/drivers/usb/gadget/legacy/printer.c
@@ -208,6 +208,43 @@ static struct usb_descriptor_header *hs_printer_function[] = {
NULL
};
+/*
+ * Added endpoint descriptors for 3.0 devices
+ */
+
+static struct usb_endpoint_descriptor ss_ep_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_ep_in_comp_desc = {
+ .bLength = sizeof(ss_ep_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_endpoint_descriptor ss_ep_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_ep_out_comp_desc = {
+ .bLength = sizeof(ss_ep_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *ss_printer_function[] = {
+ (struct usb_descriptor_header *) &intf_desc,
+ (struct usb_descriptor_header *) &ss_ep_in_desc,
+ (struct usb_descriptor_header *) &ss_ep_in_comp_desc,
+ (struct usb_descriptor_header *) &ss_ep_out_desc,
+ (struct usb_descriptor_header *) &ss_ep_out_comp_desc,
+ NULL
+};
+
static struct usb_otg_descriptor otg_descriptor = {
.bLength = sizeof otg_descriptor,
.bDescriptorType = USB_DT_OTG,
@@ -220,7 +257,20 @@ static const struct usb_descriptor_header *otg_desc[] = {
};
/* maxpacket and other transfer characteristics vary by speed. */
-#define ep_desc(g, hs, fs) (((g)->speed == USB_SPEED_HIGH)?(hs):(fs))
+static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *fs,
+ struct usb_endpoint_descriptor *hs,
+ struct usb_endpoint_descriptor *ss)
+{
+ switch (gadget->speed) {
+ case USB_SPEED_SUPER:
+ return ss;
+ case USB_SPEED_HIGH:
+ return hs;
+ default:
+ return fs;
+ }
+}
/*-------------------------------------------------------------------------*/
@@ -793,11 +843,12 @@ set_printer_interface(struct printer_dev *dev)
{
int result = 0;
- dev->in_ep->desc = ep_desc(dev->gadget, &hs_ep_in_desc, &fs_ep_in_desc);
+ dev->in_ep->desc = ep_desc(dev->gadget, &fs_ep_in_desc, &hs_ep_in_desc,
+ &ss_ep_in_desc);
dev->in_ep->driver_data = dev;
- dev->out_ep->desc = ep_desc(dev->gadget, &hs_ep_out_desc,
- &fs_ep_out_desc);
+ dev->out_ep->desc = ep_desc(dev->gadget, &fs_ep_out_desc,
+ &hs_ep_out_desc, &ss_ep_out_desc);
dev->out_ep->driver_data = dev;
result = usb_ep_enable(dev->in_ep);
@@ -1016,9 +1067,11 @@ autoconf_fail:
/* assumes that all endpoints are dual-speed */
hs_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress;
hs_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
+ ss_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress;
+ ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_printer_function,
- hs_printer_function, NULL);
+ hs_printer_function, ss_printer_function);
if (ret)
return ret;
@@ -1253,7 +1306,7 @@ static __refdata struct usb_composite_driver printer_driver = {
.name = shortname,
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = printer_bind,
.unbind = printer_unbind,
};
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index ebf09f439f3a..ff97ac93ac03 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -28,7 +28,7 @@
*
* Why is *this* driver using two configurations, rather than setting up
* two interfaces with different functions? To help verify that multiple
- * configuration infrastucture is working correctly; also, so that it can
+ * configuration infrastructure is working correctly; also, so that it can
* work with low capability USB controllers without four bulk endpoints.
*/
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 217365d35a25..b8e213eb36cc 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -241,6 +241,8 @@ config USB_M66592
dynamically linked module called "m66592_udc" and force all
gadget drivers to also be dynamically linked.
+source "drivers/usb/gadget/udc/bdc/Kconfig"
+
#
# Controllers available only in discrete form (and all PCI controllers)
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index a7f4491593f1..fba2049bf985 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_USB_FOTG210_UDC) += fotg210-udc.o
obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o
obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
+obj-$(CONFIG_USB_BDC_UDC) += bdc/
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index 3b9d13848a4f..de7e5e2ccf1c 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -1401,9 +1401,8 @@ static int udc_wakeup(struct usb_gadget *gadget)
static int amd5536_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int amd5536_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
-/* gadget operations */
+static int amd5536_udc_stop(struct usb_gadget *g);
+
static const struct usb_gadget_ops udc_ops = {
.wakeup = udc_wakeup,
.get_frame = udc_get_frame,
@@ -1962,8 +1961,7 @@ __acquires(dev->lock)
}
/* Called by gadget driver to unregister itself */
-static int amd5536_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int amd5536_udc_stop(struct usb_gadget *g)
{
struct udc *dev = to_amd5536_udc(g);
unsigned long flags;
@@ -1971,7 +1969,7 @@ static int amd5536_udc_stop(struct usb_gadget *g,
spin_lock_irqsave(&dev->lock, flags);
udc_mask_unused_interrupts(dev);
- shutdown(dev, driver);
+ shutdown(dev, NULL);
spin_unlock_irqrestore(&dev->lock, flags);
dev->driver = NULL;
@@ -2873,7 +2871,7 @@ __acquires(dev->lock)
dev->driver->resume(&dev->gadget);
dev->sys_suspended = 0;
}
- dev->driver->disconnect(&dev->gadget);
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
spin_lock(&dev->lock);
/* disable ep0 to empty req queue */
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 9968f5331fe4..c862656d18b8 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -840,6 +840,31 @@ static void udc_reinit(struct at91_udc *udc)
}
}
+static void reset_gadget(struct at91_udc *udc)
+{
+ struct usb_gadget_driver *driver = udc->driver;
+ int i;
+
+ if (udc->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->suspended = 0;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ struct at91_ep *ep = &udc->ep[i];
+
+ ep->stopped = 1;
+ nuke(ep, -ESHUTDOWN);
+ }
+ if (driver) {
+ spin_unlock(&udc->lock);
+ usb_gadget_udc_reset(&udc->gadget, driver);
+ spin_lock(&udc->lock);
+ }
+
+ udc_reinit(udc);
+}
+
static void stop_activity(struct at91_udc *udc)
{
struct usb_gadget_driver *driver = udc->driver;
@@ -870,12 +895,10 @@ static void clk_on(struct at91_udc *udc)
return;
udc->clocked = 1;
- if (IS_ENABLED(CONFIG_COMMON_CLK)) {
- clk_set_rate(udc->uclk, 48000000);
- clk_prepare_enable(udc->uclk);
- }
- clk_prepare_enable(udc->iclk);
- clk_prepare_enable(udc->fclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_enable(udc->uclk);
+ clk_enable(udc->iclk);
+ clk_enable(udc->fclk);
}
static void clk_off(struct at91_udc *udc)
@@ -884,10 +907,10 @@ static void clk_off(struct at91_udc *udc)
return;
udc->clocked = 0;
udc->gadget.speed = USB_SPEED_UNKNOWN;
- clk_disable_unprepare(udc->fclk);
- clk_disable_unprepare(udc->iclk);
+ clk_disable(udc->fclk);
+ clk_disable(udc->iclk);
if (IS_ENABLED(CONFIG_COMMON_CLK))
- clk_disable_unprepare(udc->uclk);
+ clk_disable(udc->uclk);
}
/*
@@ -984,8 +1007,8 @@ static int at91_set_selfpowered(struct usb_gadget *gadget, int is_on)
static int at91_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
-static int at91_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver);
+static int at91_stop(struct usb_gadget *gadget);
+
static const struct usb_gadget_ops at91_udc_ops = {
.get_frame = at91_get_frame,
.wakeup = at91_wakeup,
@@ -1426,7 +1449,7 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES);
VDBG("end bus reset\n");
udc->addr = 0;
- stop_activity(udc);
+ reset_gadget(udc);
/* enable ep0 */
at91_udp_write(udc, AT91_UDP_CSR(0),
@@ -1512,20 +1535,11 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
/*-------------------------------------------------------------------------*/
-static void nop_release(struct device *dev)
-{
- /* nothing to free */
-}
-
static struct at91_udc controller = {
.gadget = {
.ops = &at91_udc_ops,
.ep0 = &controller.ep[0].ep,
.name = driver_name,
- .dev = {
- .init_name = "gadget",
- .release = nop_release,
- }
},
.ep[0] = {
.ep = {
@@ -1641,12 +1655,10 @@ static int at91_start(struct usb_gadget *gadget,
udc->enabled = 1;
udc->selfpowered = 1;
- DBG("bound to %s\n", driver->driver.name);
return 0;
}
-static int at91_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int at91_stop(struct usb_gadget *gadget)
{
struct at91_udc *udc;
unsigned long flags;
@@ -1659,7 +1671,6 @@ static int at91_stop(struct usb_gadget *gadget,
udc->driver = NULL;
- DBG("unbound from %s\n", driver->driver.name);
return 0;
}
@@ -1780,14 +1791,24 @@ static int at91udc_probe(struct platform_device *pdev)
}
/* don't do anything until we have both gadget driver and VBUS */
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ clk_set_rate(udc->uclk, 48000000);
+ retval = clk_prepare(udc->uclk);
+ if (retval)
+ goto fail1;
+ }
+ retval = clk_prepare(udc->fclk);
+ if (retval)
+ goto fail1a;
+
retval = clk_prepare_enable(udc->iclk);
if (retval)
- goto fail1;
+ goto fail1b;
at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
/* Clear all pending interrupts - UDP may be used by bootloader. */
at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
- clk_disable_unprepare(udc->iclk);
+ clk_disable(udc->iclk);
/* request UDC and maybe VBUS irqs */
udc->udp_irq = platform_get_irq(pdev, 0);
@@ -1795,7 +1816,7 @@ static int at91udc_probe(struct platform_device *pdev)
0, driver_name, udc);
if (retval < 0) {
DBG("request irq %d failed\n", udc->udp_irq);
- goto fail1;
+ goto fail1c;
}
if (gpio_is_valid(udc->board.vbus_pin)) {
retval = gpio_request(udc->board.vbus_pin, "udc_vbus");
@@ -1848,6 +1869,13 @@ fail3:
gpio_free(udc->board.vbus_pin);
fail2:
free_irq(udc->udp_irq, udc);
+fail1c:
+ clk_unprepare(udc->iclk);
+fail1b:
+ clk_unprepare(udc->fclk);
+fail1a:
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_unprepare(udc->uclk);
fail1:
if (IS_ENABLED(CONFIG_COMMON_CLK) && !IS_ERR(udc->uclk))
clk_put(udc->uclk);
@@ -1896,6 +1924,11 @@ static int __exit at91udc_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_unprepare(udc->uclk);
+ clk_unprepare(udc->fclk);
+ clk_unprepare(udc->iclk);
+
clk_put(udc->iclk);
clk_put(udc->fclk);
if (IS_ENABLED(CONFIG_COMMON_CLK))
@@ -1972,7 +2005,6 @@ static struct platform_driver at91_udc_driver = {
.resume = at91udc_resume,
.driver = {
.name = (char *) driver_name,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91_udc_dt_ids),
},
};
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 1529926e20a0..ce882371786b 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -987,8 +987,8 @@ usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
-static int atmel_usba_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver);
+static int atmel_usba_stop(struct usb_gadget *gadget);
+
static const struct usb_gadget_ops usba_udc_ops = {
.get_frame = usba_udc_get_frame,
.wakeup = usba_udc_wakeup,
@@ -1007,19 +1007,10 @@ static struct usb_endpoint_descriptor usba_ep0_desc = {
.bInterval = 1,
};
-static void nop_release(struct device *dev)
-{
-
-}
-
static struct usb_gadget usba_gadget_template = {
.ops = &usba_udc_ops,
.max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
- .dev = {
- .init_name = "gadget",
- .release = nop_release,
- },
};
/*
@@ -1685,11 +1676,10 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
reset_all_endpoints(udc);
- if (udc->gadget.speed != USB_SPEED_UNKNOWN
- && udc->driver && udc->driver->disconnect) {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN && udc->driver) {
udc->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock(&udc->lock);
- udc->driver->disconnect(&udc->gadget);
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
spin_lock(&udc->lock);
}
@@ -1791,8 +1781,6 @@ static int atmel_usba_start(struct usb_gadget *gadget,
return ret;
}
- DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
-
udc->vbus_prev = 0;
if (gpio_is_valid(udc->vbus_pin))
enable_irq(gpio_to_irq(udc->vbus_pin));
@@ -1809,8 +1797,7 @@ static int atmel_usba_start(struct usb_gadget *gadget,
return 0;
}
-static int atmel_usba_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int atmel_usba_stop(struct usb_gadget *gadget)
{
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
unsigned long flags;
@@ -1830,8 +1817,6 @@ static int atmel_usba_stop(struct usb_gadget *gadget,
clk_disable_unprepare(udc->hclk);
clk_disable_unprepare(udc->pclk);
- DBG(DBG_GADGET, "unregistered driver `%s'\n", udc->driver->driver.name);
-
udc->driver = NULL;
return 0;
@@ -2120,7 +2105,6 @@ static struct platform_driver udc_driver = {
.remove = __exit_p(usba_udc_remove),
.driver = {
.name = "atmel_usba_udc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_udc_dt_ids),
},
};
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 2235b8808700..9db968ba39f5 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -1836,8 +1836,7 @@ static int bcm63xx_udc_start(struct usb_gadget *gadget,
* @gadget: USB slave device.
* @driver: Driver for USB slave devices.
*/
-static int bcm63xx_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int bcm63xx_udc_stop(struct usb_gadget *gadget)
{
struct bcm63xx_udc *udc = gadget_to_udc(gadget);
unsigned long flags;
@@ -1963,7 +1962,7 @@ static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
{
struct bcm63xx_udc *udc = dev_id;
u32 stat;
- bool disconnected = false;
+ bool disconnected = false, bus_reset = false;
stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
@@ -1991,7 +1990,7 @@ static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
udc->ep0_req_reset = 1;
schedule_work(&udc->ep0_wq);
- disconnected = true;
+ bus_reset = true;
}
if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
if (bcm63xx_update_link_speed(udc)) {
@@ -2014,6 +2013,8 @@ static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
if (disconnected && udc->driver)
udc->driver->disconnect(&udc->gadget);
+ else if (bus_reset && udc->driver)
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
return IRQ_HANDLED;
}
@@ -2324,10 +2325,8 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
int rc = -ENOMEM, i, irq;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
- if (!udc) {
- dev_err(dev, "cannot allocate memory\n");
+ if (!udc)
return -ENOMEM;
- }
platform_set_drvdata(pdev, udc);
udc->dev = dev;
@@ -2425,7 +2424,6 @@ static struct platform_driver bcm63xx_udc_driver = {
.remove = bcm63xx_udc_remove,
.driver = {
.name = DRV_MODULE_NAME,
- .owner = THIS_MODULE,
},
};
module_platform_driver(bcm63xx_udc_driver);
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
new file mode 100644
index 000000000000..0d7b8c9f72fd
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -0,0 +1,21 @@
+config USB_BDC_UDC
+ tristate "Broadcom USB3.0 device controller IP driver(BDC)"
+ depends on USB_GADGET && HAS_DMA
+
+ help
+ BDC is Broadcom's USB3.0 device controller IP. If your SOC has a BDC IP
+ then select this driver.
+
+ Say "y" here to link the driver statically, or "m" to build a dynamically
+ linked module called "bdc".
+
+if USB_BDC_UDC
+
+comment "Platform Support"
+config USB_BDC_PCI
+ tristate "BDC support for PCIe based platforms"
+ depends on PCI
+ default USB_BDC_UDC
+ help
+ Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
+endif
diff --git a/drivers/usb/gadget/udc/bdc/Makefile b/drivers/usb/gadget/udc/bdc/Makefile
new file mode 100644
index 000000000000..5cf6a3bcdf0f
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_USB_BDC_UDC) += bdc.o
+bdc-y := bdc_core.o bdc_cmd.o bdc_ep.o bdc_udc.o
+
+ifneq ($(CONFIG_USB_GADGET_VERBOSE),)
+ bdc-y += bdc_dbg.o
+endif
+
+obj-$(CONFIG_USB_BDC_PCI) += bdc_pci.o
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
new file mode 100644
index 000000000000..dc18a20bf040
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -0,0 +1,490 @@
+/*
+ * bdc.h - header for the BRCM BDC USB3.0 device controller
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_BDC_H__
+#define __LINUX_BDC_H__
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <asm/unaligned.h>
+
+#define BRCM_BDC_NAME "bdc_usb3"
+#define BRCM_BDC_DESC "BDC device controller driver"
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+/* BDC command operation timeout in usec*/
+#define BDC_CMD_TIMEOUT 1000
+/* BDC controller operation timeout in usec*/
+#define BDC_COP_TIMEOUT 500
+
+/*
+ * Maximum size of ep0 response buffer for ch9 requests,
+ * the set_sel request uses 6 so far, the max.
+*/
+#define EP0_RESPONSE_BUFF 6
+/* Start with SS as default */
+#define EP0_MAX_PKT_SIZE 512
+
+/* 64 entries in a SRR */
+#define NUM_SR_ENTRIES 64
+
+/* Num of bds per table */
+#define NUM_BDS_PER_TABLE 32
+
+/* Num of tables in bd list for control,bulk and Int ep */
+#define NUM_TABLES 2
+
+/* Num of tables in bd list for Isoch ep */
+#define NUM_TABLES_ISOCH 6
+
+/* U1 Timeout default: 248usec */
+#define U1_TIMEOUT 0xf8
+
+/* Interrupt coalescence in usec */
+#define INT_CLS 500
+
+/* Register offsets */
+/* Configuration and Capability registers */
+#define BDC_BDCCFG0 0x00
+#define BDC_BDCCFG1 0x04
+#define BDC_BDCCAP0 0x08
+#define BDC_BDCCAP1 0x0c
+#define BDC_CMDPAR0 0x10
+#define BDC_CMDPAR1 0x14
+#define BDC_CMDPAR2 0x18
+#define BDC_CMDSC 0x1c
+#define BDC_USPC 0x20
+#define BDC_USPPMS 0x28
+#define BDC_USPPM2 0x2c
+#define BDC_SPBBAL 0x38
+#define BDC_SPBBAH 0x3c
+#define BDC_BDCSC 0x40
+#define BDC_XSFNTF 0x4c
+
+#define BDC_DVCSA 0x50
+#define BDC_DVCSB 0x54
+#define BDC_EPSTS0(n) (0x60 + (n * 0x10))
+#define BDC_EPSTS1(n) (0x64 + (n * 0x10))
+#define BDC_EPSTS2(n) (0x68 + (n * 0x10))
+#define BDC_EPSTS3(n) (0x6c + (n * 0x10))
+#define BDC_EPSTS4(n) (0x70 + (n * 0x10))
+#define BDC_EPSTS5(n) (0x74 + (n * 0x10))
+#define BDC_EPSTS6(n) (0x78 + (n * 0x10))
+#define BDC_EPSTS7(n) (0x7c + (n * 0x10))
+#define BDC_SRRBAL(n) (0x200 + (n * 0x10))
+#define BDC_SRRBAH(n) (0x204 + (n * 0x10))
+#define BDC_SRRINT(n) (0x208 + (n * 0x10))
+#define BDC_INTCTLS(n) (0x20c + (n * 0x10))
+
+/* Extended capability regs */
+#define BDC_FSCNOC 0xcd4
+#define BDC_FSCNIC 0xce4
+#define NUM_NCS(p) (p >> 28)
+
+/* Register bit fields and Masks */
+/* BDC Configuration 0 */
+#define BDC_PGS(p) (((p) & (0x7 << 8)) >> 8)
+#define BDC_SPB(p) (p & 0x7)
+
+/* BDC Capability1 */
+#define BDC_P64 (1 << 0)
+
+/* BDC Command register */
+#define BDC_CMD_FH 0xe
+#define BDC_CMD_DNC 0x6
+#define BDC_CMD_EPO 0x4
+#define BDC_CMD_BLA 0x3
+#define BDC_CMD_EPC 0x2
+#define BDC_CMD_DVC 0x1
+#define BDC_CMD_CWS (0x1 << 5)
+#define BDC_CMD_CST(p) (((p) & (0xf << 6))>>6)
+#define BDC_CMD_EPN(p) ((p & 0x1f) << 10)
+#define BDC_SUB_CMD_ADD (0x1 << 17)
+#define BDC_SUB_CMD_FWK (0x4 << 17)
+/* Reset sequence number */
+#define BDC_CMD_EPO_RST_SN (0x1 << 16)
+#define BDC_CMD_EP0_XSD (0x1 << 16)
+#define BDC_SUB_CMD_ADD_EP (0x1 << 17)
+#define BDC_SUB_CMD_DRP_EP (0x2 << 17)
+#define BDC_SUB_CMD_EP_STP (0x2 << 17)
+#define BDC_SUB_CMD_EP_STL (0x4 << 17)
+#define BDC_SUB_CMD_EP_RST (0x1 << 17)
+#define BDC_CMD_SRD (1 << 27)
+
+/* CMD completion status */
+#define BDC_CMDS_SUCC 0x1
+#define BDC_CMDS_PARA 0x3
+#define BDC_CMDS_STAT 0x4
+#define BDC_CMDS_FAIL 0x5
+#define BDC_CMDS_INTL 0x6
+#define BDC_CMDS_BUSY 0xf
+
+/* CMDSC Param 2 shifts */
+#define EPT_SHIFT 22
+#define MP_SHIFT 10
+#define MB_SHIFT 6
+#define EPM_SHIFT 4
+
+/* BDC USPSC */
+#define BDC_VBC (1 << 31)
+#define BDC_PRC (1 << 30)
+#define BDC_PCE (1 << 29)
+#define BDC_CFC (1 << 28)
+#define BDC_PCC (1 << 27)
+#define BDC_PSC (1 << 26)
+#define BDC_VBS (1 << 25)
+#define BDC_PRS (1 << 24)
+#define BDC_PCS (1 << 23)
+#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
+#define BDC_SCN (1 << 8)
+#define BDC_SDC (1 << 7)
+#define BDC_SWS (1 << 4)
+
+#define BDC_USPSC_RW (BDC_SCN|BDC_SDC|BDC_SWS|0xf)
+#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
+
+#define BDC_SPEED_FS 0x1
+#define BDC_SPEED_LS 0x2
+#define BDC_SPEED_HS 0x3
+#define BDC_SPEED_SS 0x4
+
+#define BDC_PST(p) (p & 0xf)
+#define BDC_PST_MASK 0xf
+
+/* USPPMS */
+#define BDC_U2E (0x1 << 31)
+#define BDC_U1E (0x1 << 30)
+#define BDC_U2A (0x1 << 29)
+#define BDC_PORT_W1S (0x1 << 17)
+#define BDC_U1T(p) ((p) & 0xff)
+#define BDC_U2T(p) (((p) & 0xff) << 8)
+#define BDC_U1T_MASK 0xff
+
+/* USBPM2 */
+/* Hardware LPM Enable */
+#define BDC_HLE (1 << 16)
+
+/* BDC Status and Control */
+#define BDC_COP_RST (1 << 29)
+#define BDC_COP_RUN (2 << 29)
+#define BDC_COP_STP (4 << 29)
+
+#define BDC_COP_MASK (BDC_COP_RST|BDC_COP_RUN|BDC_COP_STP)
+
+#define BDC_COS (1 << 28)
+#define BDC_CSTS(p) (((p) & (0x7 << 20)) >> 20)
+#define BDC_MASK_MCW (1 << 7)
+#define BDC_GIE (1 << 1)
+#define BDC_GIP (1 << 0)
+
+#define BDC_HLT 1
+#define BDC_NOR 2
+#define BDC_OIP 7
+
+/* Buffer descriptor and Status report bit fields and masks */
+#define BD_TYPE_BITMASK (0xf)
+#define BD_CHAIN 0xf
+
+#define BD_TFS_SHIFT 4
+#define BD_SOT (1 << 26)
+#define BD_EOT (1 << 27)
+#define BD_ISP (1 << 29)
+#define BD_IOC (1 << 30)
+#define BD_SBF (1 << 31)
+
+#define BD_INTR_TARGET(p) (((p) & 0x1f) << 27)
+
+#define BDC_SRR_RWS (1 << 4)
+#define BDC_SRR_RST (1 << 3)
+#define BDC_SRR_ISR (1 << 2)
+#define BDC_SRR_IE (1 << 1)
+#define BDC_SRR_IP (1 << 0)
+#define BDC_SRR_EPI(p) (((p) & (0xff << 24)) >> 24)
+#define BDC_SRR_DPI(p) (((p) & (0xff << 16)) >> 16)
+#define BDC_SRR_DPI_MASK 0x00ff0000
+
+#define MARK_CHAIN_BD (BD_CHAIN|BD_EOT|BD_SOT)
+
+/* Control transfer BD specific fields */
+#define BD_DIR_IN (1 << 25)
+
+#define BDC_PTC_MASK 0xf0000000
+
+/* status report defines */
+#define SR_XSF 0
+#define SR_USPC 4
+#define SR_BD_LEN(p) (p & 0xffffff)
+
+#define XSF_SUCC 0x1
+#define XSF_SHORT 0x3
+#define XSF_BABB 0x4
+#define XSF_SETUP_RECV 0x6
+#define XSF_DATA_START 0x7
+#define XSF_STATUS_START 0x8
+
+#define XSF_STS(p) (((p) >> 28) & 0xf)
+
+/* Transfer BD fields */
+#define BD_LEN(p) ((p) & 0x1ffff)
+#define BD_LTF (1 << 25)
+#define BD_TYPE_DS 0x1
+#define BD_TYPE_SS 0x2
+
+#define BDC_EP_ENABLED (1 << 0)
+#define BDC_EP_STALL (1 << 1)
+#define BDC_EP_STOP (1 << 2)
+
+/* One BD can transfer max 65536 bytes */
+#define BD_MAX_BUFF_SIZE (1 << 16)
+/* Maximum bytes in one XFR, Refer to BDC spec */
+#define MAX_XFR_LEN 16777215
+
+/* defines for Force Header command */
+#define DEV_NOTF_TYPE 6
+#define FWK_SUBTYPE 1
+#define TRA_PACKET 4
+
+#define to_bdc_ep(e) container_of(e, struct bdc_ep, usb_ep)
+#define to_bdc_req(r) container_of(r, struct bdc_req, usb_req)
+#define gadget_to_bdc(g) container_of(g, struct bdc, gadget)
+
+/* FUNCTION WAKE DEV NOTIFICATION interval, USB3 spec table 8.13 */
+#define BDC_TNOTIFY 2500 /*in ms*/
+/* Devstatus bitfields */
+#define REMOTE_WAKEUP_ISSUED (1 << 16)
+#define DEVICE_SUSPENDED (1 << 17)
+#define FUNC_WAKE_ISSUED (1 << 18)
+#define REMOTE_WAKE_ENABLE (1 << USB_DEVICE_REMOTE_WAKEUP)
+
+/* On disconnect, preserve these bits and clear rest */
+#define DEVSTATUS_CLEAR (1 << USB_DEVICE_SELF_POWERED)
+/* Hardware and software Data structures */
+
+/* Endpoint bd: buffer descriptor */
+struct bdc_bd {
+ __le32 offset[4];
+};
+
+/* Status report in Status report ring(srr) */
+struct bdc_sr {
+ __le32 offset[4];
+};
+
+/* bd_table: contigous bd's in a table */
+struct bd_table {
+ struct bdc_bd *start_bd;
+ /* dma address of start bd of table*/
+ dma_addr_t dma;
+};
+
+/*
+ * Each endpoint has a bdl(buffer descriptor list), bdl consists of 1 or more bd
+ * table's chained to each other through a chain bd, every table has equal
+ * number of bds. the software uses bdi(bd index) to refer to particular bd in
+ * the list.
+ */
+struct bd_list {
+ /* Array of bd table pointers*/
+ struct bd_table **bd_table_array;
+ /* How many tables chained to each other */
+ int num_tabs;
+ /* Max_bdi = num_tabs * num_bds_table - 1 */
+ int max_bdi;
+ /* current enq bdi from sw point of view */
+ int eqp_bdi;
+ /* current deq bdi from sw point of view */
+ int hwd_bdi;
+ /* numbers of bds per table */
+ int num_bds_table;
+};
+
+struct bdc_req;
+
+/* Representation of a transfer, one transfer can have multiple bd's */
+struct bd_transfer {
+ struct bdc_req *req;
+ /* start bd index */
+ int start_bdi;
+ /* this will be the next hw dqp when this transfer completes */
+ int next_hwd_bdi;
+ /* number of bds in this transfer */
+ int num_bds;
+};
+
+/*
+ * Representation of a gadget request, every gadget request is contained
+ * by 1 bd_transfer.
+ */
+struct bdc_req {
+ struct usb_request usb_req;
+ struct list_head queue;
+ struct bdc_ep *ep;
+ /* only one Transfer per request */
+ struct bd_transfer bd_xfr;
+ int epnum;
+};
+
+/* scratchpad buffer needed by bdc hardware */
+struct bdc_scratchpad {
+ dma_addr_t sp_dma;
+ void *buff;
+ u32 size;
+};
+
+/* endpoint representation */
+struct bdc_ep {
+ struct usb_ep usb_ep;
+ struct list_head queue;
+ struct bdc *bdc;
+ u8 ep_type;
+ u8 dir;
+ u8 ep_num;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+ unsigned int flags;
+ char name[20];
+ /* endpoint bd list*/
+ struct bd_list bd_list;
+ /*
+ * HW generates extra event for multi bd tranfers, this flag helps in
+ * ignoring the extra event
+ */
+ bool ignore_next_sr;
+};
+
+/* bdc cmmand parameter structure */
+struct bdc_cmd_params {
+ u32 param2;
+ u32 param1;
+ u32 param0;
+};
+
+/* status report ring(srr), currently one srr is supported for entire system */
+struct srr {
+ struct bdc_sr *sr_bds;
+ u16 eqp_index;
+ u16 dqp_index;
+ dma_addr_t dma_addr;
+};
+
+/* EP0 states */
+enum bdc_ep0_state {
+ WAIT_FOR_SETUP = 0,
+ WAIT_FOR_DATA_START,
+ WAIT_FOR_DATA_XMIT,
+ WAIT_FOR_STATUS_START,
+ WAIT_FOR_STATUS_XMIT,
+ STATUS_PENDING
+};
+
+/* Link states */
+enum bdc_link_state {
+ BDC_LINK_STATE_U0 = 0x00,
+ BDC_LINK_STATE_U3 = 0x03,
+ BDC_LINK_STATE_RX_DET = 0x05,
+ BDC_LINK_STATE_RESUME = 0x0f
+};
+
+/* representation of bdc */
+struct bdc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+ struct device *dev;
+ /* device lock */
+ spinlock_t lock;
+
+ /* num of endpoints for a particular instantiation of IP */
+ unsigned int num_eps;
+ /*
+ * Array of ep's, it uses the same index covention as bdc hw i.e.
+ * 1 for ep0, 2 for 1out,3 for 1in ....
+ */
+ struct bdc_ep **bdc_ep_array;
+ void __iomem *regs;
+ struct bdc_scratchpad scratchpad;
+ u32 sp_buff_size;
+ /* current driver supports 1 status ring */
+ struct srr srr;
+ /* Last received setup packet */
+ struct usb_ctrlrequest setup_pkt;
+ struct bdc_req ep0_req;
+ struct bdc_req status_req;
+ enum bdc_ep0_state ep0_state;
+ bool delayed_status;
+ bool zlp_needed;
+ bool reinit;
+ bool pullup;
+ /* Bits 0-15 are standard and 16-31 for proprietary information */
+ u32 devstatus;
+ int irq;
+ void *mem;
+ u32 dev_addr;
+ /* DMA pools */
+ struct dma_pool *bd_table_pool;
+ u8 test_mode;
+ /* array of callbacks for various status report handlers */
+ void (*sr_handler[2])(struct bdc *, struct bdc_sr *);
+ /* ep0 callback handlers */
+ void (*sr_xsf_ep0[3])(struct bdc *, struct bdc_sr *);
+ /* ep0 response buffer for ch9 requests like GET_STATUS and SET_SEL */
+ unsigned char ep0_response_buff[EP0_RESPONSE_BUFF];
+ /*
+ * Timer to check if host resumed transfer after bdc sent Func wake
+ * notification packet after a remote wakeup. if not, then resend the
+ * Func Wake packet every 2.5 secs. Refer to USB3 spec section 8.5.6.4
+ */
+ struct delayed_work func_wake_notify;
+};
+
+static inline u32 bdc_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void bdc_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+/* Buffer descriptor list operations */
+void bdc_notify_xfr(struct bdc *, u32);
+void bdc_softconn(struct bdc *);
+void bdc_softdisconn(struct bdc *);
+int bdc_run(struct bdc *);
+int bdc_stop(struct bdc *);
+int bdc_reset(struct bdc *);
+int bdc_udc_init(struct bdc *);
+void bdc_udc_exit(struct bdc *);
+int bdc_reinit(struct bdc *);
+
+/* Status report handlers */
+/* Upstream port status change sr */
+void bdc_sr_uspc(struct bdc *, struct bdc_sr *);
+/* transfer sr */
+void bdc_sr_xsf(struct bdc *, struct bdc_sr *);
+/* EP0 XSF handlers */
+void bdc_xsf_ep0_setup_recv(struct bdc *, struct bdc_sr *);
+void bdc_xsf_ep0_data_start(struct bdc *, struct bdc_sr *);
+void bdc_xsf_ep0_status_start(struct bdc *, struct bdc_sr *);
+
+#endif /* __LINUX_BDC_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
new file mode 100644
index 000000000000..6a4155c4bd86
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -0,0 +1,376 @@
+/*
+ * bdc_cmd.c - BRCM BDC USB3.0 device controller
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include "bdc.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+/* Issues a cmd to cmd processor and waits for cmd completion */
+static int bdc_issue_cmd(struct bdc *bdc, u32 cmd_sc, u32 param0,
+ u32 param1, u32 param2)
+{
+ u32 timeout = BDC_CMD_TIMEOUT;
+ u32 cmd_status;
+ u32 temp;
+
+ bdc_writel(bdc->regs, BDC_CMDPAR0, param0);
+ bdc_writel(bdc->regs, BDC_CMDPAR1, param1);
+ bdc_writel(bdc->regs, BDC_CMDPAR2, param2);
+
+ /* Issue the cmd */
+ /* Make sure the cmd params are written before asking HW to exec cmd */
+ wmb();
+ bdc_writel(bdc->regs, BDC_CMDSC, cmd_sc | BDC_CMD_CWS | BDC_CMD_SRD);
+ do {
+ temp = bdc_readl(bdc->regs, BDC_CMDSC);
+ dev_dbg_ratelimited(bdc->dev, "cmdsc=%x", temp);
+ cmd_status = BDC_CMD_CST(temp);
+ if (cmd_status != BDC_CMDS_BUSY) {
+ dev_dbg(bdc->dev,
+ "command completed cmd_sts:%x\n", cmd_status);
+ return cmd_status;
+ }
+ udelay(1);
+ } while (timeout--);
+
+ dev_err(bdc->dev,
+ "command operation timedout cmd_status=%d\n", cmd_status);
+
+ return cmd_status;
+}
+
+/* Submits cmd and analyze the return value of bdc_issue_cmd */
+static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
+ u32 param0, u32 param1, u32 param2)
+{
+ u32 temp, cmd_status;
+ int reset_bdc = 0;
+ int ret;
+
+ temp = bdc_readl(bdc->regs, BDC_CMDSC);
+ dev_dbg(bdc->dev,
+ "%s:CMDSC:%08x cmdsc:%08x param0=%08x param1=%08x param2=%08x\n",
+ __func__, temp, cmd_sc, param0, param1, param2);
+
+ cmd_status = BDC_CMD_CST(temp);
+ if (cmd_status == BDC_CMDS_BUSY) {
+ dev_err(bdc->dev, "command processor busy: %x\n", cmd_status);
+ return -EBUSY;
+ }
+ ret = bdc_issue_cmd(bdc, cmd_sc, param0, param1, param2);
+ switch (ret) {
+ case BDC_CMDS_SUCC:
+ dev_dbg(bdc->dev, "command completed successfully\n");
+ ret = 0;
+ break;
+
+ case BDC_CMDS_PARA:
+ dev_err(bdc->dev, "command parameter error\n");
+ ret = -EINVAL;
+ break;
+
+ case BDC_CMDS_STAT:
+ dev_err(bdc->dev, "Invalid device/ep state\n");
+ ret = -EINVAL;
+ break;
+
+ case BDC_CMDS_FAIL:
+ dev_err(bdc->dev, "Command failed?\n");
+ ret = -EAGAIN;
+ break;
+
+ case BDC_CMDS_INTL:
+ dev_err(bdc->dev, "BDC Internal error\n");
+ reset_bdc = 1;
+ ret = -ECONNRESET;
+ break;
+
+ case BDC_CMDS_BUSY:
+ dev_err(bdc->dev,
+ "command timedout waited for %dusec\n",
+ BDC_CMD_TIMEOUT);
+ reset_bdc = 1;
+ ret = -ECONNRESET;
+ break;
+ default:
+ dev_dbg(bdc->dev, "Unknown command completion code:%x\n", ret);
+ }
+
+ return ret;
+}
+
+/* Deconfigure the endpoint from HW */
+int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep)
+{
+ u32 cmd_sc;
+
+ cmd_sc = BDC_SUB_CMD_DRP_EP|BDC_CMD_EPN(ep->ep_num)|BDC_CMD_EPC;
+ dev_dbg(bdc->dev, "%s ep->ep_num =%d cmd_sc=%x\n", __func__,
+ ep->ep_num, cmd_sc);
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+}
+
+/* Reinitalize the bdlist after config ep command */
+static void ep_bd_list_reinit(struct bdc_ep *ep)
+{
+ struct bdc *bdc = ep->bdc;
+ struct bdc_bd *bd;
+
+ ep->bd_list.eqp_bdi = 0;
+ ep->bd_list.hwd_bdi = 0;
+ bd = ep->bd_list.bd_table_array[0]->start_bd;
+ dev_dbg(bdc->dev, "%s ep:%p bd:%p\n", __func__, ep, bd);
+ memset(bd, 0, sizeof(struct bdc_bd));
+ bd->offset[3] |= cpu_to_le32(BD_SBF);
+}
+
+/* Configure an endpoint */
+int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
+{
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+ u32 param0, param1, param2, cmd_sc;
+ u32 mps, mbs, mul, si;
+ int ret;
+
+ desc = ep->desc;
+ comp_desc = ep->comp_desc;
+ cmd_sc = mul = mbs = param2 = 0;
+ param0 = lower_32_bits(ep->bd_list.bd_table_array[0]->dma);
+ param1 = upper_32_bits(ep->bd_list.bd_table_array[0]->dma);
+ cpu_to_le32s(&param0);
+ cpu_to_le32s(&param1);
+
+ dev_dbg(bdc->dev, "%s: param0=%08x param1=%08x",
+ __func__, param0, param1);
+ si = desc->bInterval;
+ si = clamp_val(si, 1, 16) - 1;
+
+ mps = usb_endpoint_maxp(desc);
+ mps &= 0x7ff;
+ param2 |= mps << MP_SHIFT;
+ param2 |= usb_endpoint_type(desc) << EPT_SHIFT;
+
+ switch (bdc->gadget.speed) {
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc)) {
+ param2 |= si;
+ if (usb_endpoint_xfer_isoc(desc) && comp_desc)
+ mul = comp_desc->bmAttributes;
+
+ }
+ param2 |= mul << EPM_SHIFT;
+ if (comp_desc)
+ mbs = comp_desc->bMaxBurst;
+ param2 |= mbs << MB_SHIFT;
+ break;
+
+ case USB_SPEED_HIGH:
+ if (usb_endpoint_xfer_isoc(desc) ||
+ usb_endpoint_xfer_int(desc)) {
+ param2 |= si;
+
+ mbs = (usb_endpoint_maxp(desc) & 0x1800) >> 11;
+ param2 |= mbs << MB_SHIFT;
+ }
+ break;
+
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ /* the hardware accepts SI in 125usec range */
+ if (usb_endpoint_xfer_isoc(desc))
+ si += 3;
+
+ /*
+ * FS Int endpoints can have si of 1-255ms but the controller
+ * accepts 2^bInterval*125usec, so convert ms to nearest power
+ * of 2
+ */
+ if (usb_endpoint_xfer_int(desc))
+ si = fls(desc->bInterval * 8) - 1;
+
+ param2 |= si;
+ break;
+ default:
+ dev_err(bdc->dev, "UNKNOWN speed ERR\n");
+ return -EINVAL;
+ }
+
+ cmd_sc |= BDC_CMD_EPC|BDC_CMD_EPN(ep->ep_num)|BDC_SUB_CMD_ADD_EP;
+
+ dev_dbg(bdc->dev, "cmd_sc=%x param2=%08x\n", cmd_sc, param2);
+ ret = bdc_submit_cmd(bdc, cmd_sc, param0, param1, param2);
+ if (ret) {
+ dev_err(bdc->dev, "command failed :%x\n", ret);
+ return ret;
+ }
+ ep_bd_list_reinit(ep);
+
+ return ret;
+}
+
+/*
+ * Change the HW deq pointer, if this command is successful, HW will start
+ * fetching the next bd from address dma_addr.
+ */
+int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr)
+{
+ u32 param0, param1;
+ u32 cmd_sc = 0;
+
+ dev_dbg(bdc->dev, "%s: add=%08llx\n", __func__,
+ (unsigned long long)(dma_addr));
+ param0 = lower_32_bits(dma_addr);
+ param1 = upper_32_bits(dma_addr);
+ cpu_to_le32s(&param0);
+ cpu_to_le32s(&param1);
+
+ cmd_sc |= BDC_CMD_EPN(ep->ep_num)|BDC_CMD_BLA;
+ dev_dbg(bdc->dev, "cmd_sc=%x\n", cmd_sc);
+
+ return bdc_submit_cmd(bdc, cmd_sc, param0, param1, 0);
+}
+
+/* Set the address sent bu Host in SET_ADD request */
+int bdc_address_device(struct bdc *bdc, u32 add)
+{
+ u32 cmd_sc = 0;
+ u32 param2;
+
+ dev_dbg(bdc->dev, "%s: add=%d\n", __func__, add);
+ cmd_sc |= BDC_SUB_CMD_ADD|BDC_CMD_DVC;
+ param2 = add & 0x7f;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, param2);
+}
+
+/* Send a Function Wake notification packet using FH command */
+int bdc_function_wake_fh(struct bdc *bdc, u8 intf)
+{
+ u32 param0, param1;
+ u32 cmd_sc = 0;
+
+ param0 = param1 = 0;
+ dev_dbg(bdc->dev, "%s intf=%d\n", __func__, intf);
+ cmd_sc |= BDC_CMD_FH;
+ param0 |= TRA_PACKET;
+ param0 |= (bdc->dev_addr << 25);
+ param1 |= DEV_NOTF_TYPE;
+ param1 |= (FWK_SUBTYPE<<4);
+ dev_dbg(bdc->dev, "param0=%08x param1=%08x\n", param0, param1);
+
+ return bdc_submit_cmd(bdc, cmd_sc, param0, param1, 0);
+}
+
+/* Send a Function Wake notification packet using DNC command */
+int bdc_function_wake(struct bdc *bdc, u8 intf)
+{
+ u32 cmd_sc = 0;
+ u32 param2 = 0;
+
+ dev_dbg(bdc->dev, "%s intf=%d", __func__, intf);
+ param2 |= intf;
+ cmd_sc |= BDC_SUB_CMD_FWK|BDC_CMD_DNC;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, param2);
+}
+
+/* Stall the endpoint */
+int bdc_ep_set_stall(struct bdc *bdc, int epnum)
+{
+ u32 cmd_sc = 0;
+
+ dev_dbg(bdc->dev, "%s epnum=%d\n", __func__, epnum);
+ /* issue a stall endpoint command */
+ cmd_sc |= BDC_SUB_CMD_EP_STL | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+}
+
+/* resets the endpoint, called when host sends CLEAR_FEATURE(HALT) */
+int bdc_ep_clear_stall(struct bdc *bdc, int epnum)
+{
+ struct bdc_ep *ep;
+ u32 cmd_sc = 0;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s: epnum=%d\n", __func__, epnum);
+ ep = bdc->bdc_ep_array[epnum];
+ /*
+ * If we are not in stalled then stall Endpoint and issue clear stall,
+ * his will reset the seq number for non EP0.
+ */
+ if (epnum != 1) {
+ /* if the endpoint it not stallled */
+ if (!(ep->flags & BDC_EP_STALL)) {
+ ret = bdc_ep_set_stall(bdc, epnum);
+ if (ret)
+ return ret;
+ }
+ }
+ /* Preserve the seq number for ep0 only */
+ if (epnum != 1)
+ cmd_sc |= BDC_CMD_EPO_RST_SN;
+
+ /* issue a reset endpoint command */
+ cmd_sc |= BDC_SUB_CMD_EP_RST | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ ret = bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+ if (ret) {
+ dev_err(bdc->dev, "command failed:%x\n", ret);
+ return ret;
+ }
+ bdc_notify_xfr(bdc, epnum);
+
+ return ret;
+}
+
+/* Stop the endpoint, called when software wants to dequeue some request */
+int bdc_stop_ep(struct bdc *bdc, int epnum)
+{
+ struct bdc_ep *ep;
+ u32 cmd_sc = 0;
+ int ret;
+
+ ep = bdc->bdc_ep_array[epnum];
+ dev_dbg(bdc->dev, "%s: ep:%s ep->flags:%08x\n", __func__,
+ ep->name, ep->flags);
+ /* Endpoint has to be in running state to execute stop ep command */
+ if (!(ep->flags & BDC_EP_ENABLED)) {
+ dev_err(bdc->dev, "stop endpoint called for disabled ep\n");
+ return -EINVAL;
+ }
+ if ((ep->flags & BDC_EP_STALL) || (ep->flags & BDC_EP_STOP))
+ return 0;
+
+ /* issue a stop endpoint command */
+ cmd_sc |= BDC_CMD_EP0_XSD | BDC_SUB_CMD_EP_STP
+ | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ ret = bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+ if (ret) {
+ dev_err(bdc->dev,
+ "stop endpoint command didn't complete:%d ep:%s\n",
+ ret, ep->name);
+ return ret;
+ }
+ ep->flags |= BDC_EP_STOP;
+ bdc_dump_epsts(bdc);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.h b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
new file mode 100644
index 000000000000..61d0e3bf9853
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
@@ -0,0 +1,29 @@
+/*
+ * bdc_cmd.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __LINUX_BDC_CMD_H__
+#define __LINUX_BDC_CMD_H__
+
+/* Command operations */
+int bdc_address_device(struct bdc *, u32);
+int bdc_config_ep(struct bdc *, struct bdc_ep *);
+int bdc_dconfig_ep(struct bdc *, struct bdc_ep *);
+int bdc_stop_ep(struct bdc *, int);
+int bdc_ep_set_stall(struct bdc *, int);
+int bdc_ep_clear_stall(struct bdc *, int);
+int bdc_ep_set_halt(struct bdc_ep *, u32 , int);
+int bdc_ep_bla(struct bdc *, struct bdc_ep *, dma_addr_t);
+int bdc_function_wake(struct bdc*, u8);
+int bdc_function_wake_fh(struct bdc*, u8);
+
+#endif /* __LINUX_BDC_CMD_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
new file mode 100644
index 000000000000..c6dfef8c7bbc
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -0,0 +1,533 @@
+/*
+ * bdc_core.c - BRCM BDC USB3.0 device controller core operations
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/of.h>
+#include <linux/moduleparam.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "bdc.h"
+#include "bdc_dbg.h"
+
+/* Poll till controller status is not OIP */
+static int poll_oip(struct bdc *bdc, int usec)
+{
+ u32 status;
+ /* Poll till STS!= OIP */
+ while (usec) {
+ status = bdc_readl(bdc->regs, BDC_BDCSC);
+ if (BDC_CSTS(status) != BDC_OIP) {
+ dev_dbg(bdc->dev,
+ "poll_oip complete status=%d",
+ BDC_CSTS(status));
+ return 0;
+ }
+ udelay(10);
+ usec -= 10;
+ }
+ dev_err(bdc->dev, "Err: operation timedout BDCSC: 0x%08x\n", status);
+
+ return -ETIMEDOUT;
+}
+
+/* Stop the BDC controller */
+int bdc_stop(struct bdc *bdc)
+{
+ int ret;
+ u32 temp;
+
+ dev_dbg(bdc->dev, "%s ()\n\n", __func__);
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* Check if BDC is already halted */
+ if (BDC_CSTS(temp) == BDC_HLT) {
+ dev_vdbg(bdc->dev, "BDC already halted\n");
+ return 0;
+ }
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COS|BDC_COP_STP;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret)
+ dev_err(bdc->dev, "bdc stop operation failed");
+
+ return ret;
+}
+
+/* Issue a reset to BDC controller */
+int bdc_reset(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ /* First halt the controller */
+ ret = bdc_stop(bdc);
+ if (ret)
+ return ret;
+
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COS|BDC_COP_RST;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret)
+ dev_err(bdc->dev, "bdc reset operation failed");
+
+ return ret;
+}
+
+/* Run the BDC controller */
+int bdc_run(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* if BDC is already in running state then do not do anything */
+ if (BDC_CSTS(temp) == BDC_NOR) {
+ dev_warn(bdc->dev, "bdc is already in running state\n");
+ return 0;
+ }
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COP_RUN;
+ temp |= BDC_COS;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret) {
+ dev_err(bdc->dev, "bdc run operation failed:%d", ret);
+ return ret;
+ }
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ if (BDC_CSTS(temp) != BDC_NOR) {
+ dev_err(bdc->dev, "bdc not in normal mode after RUN op :%d\n",
+ BDC_CSTS(temp));
+ return -ESHUTDOWN;
+ }
+
+ return 0;
+}
+
+/*
+ * Present the termination to the host, typically called from upstream port
+ * event with Vbus present =1
+ */
+void bdc_softconn(struct bdc *bdc)
+{
+ u32 uspc;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc &= ~BDC_PST_MASK;
+ uspc |= BDC_LINK_STATE_RX_DET;
+ uspc |= BDC_SWS;
+ dev_dbg(bdc->dev, "%s () uspc=%08x\n", __func__, uspc);
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+}
+
+/* Remove the termination */
+void bdc_softdisconn(struct bdc *bdc)
+{
+ u32 uspc;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc |= BDC_SDC;
+ uspc &= ~BDC_SCN;
+ dev_dbg(bdc->dev, "%s () uspc=%x\n", __func__, uspc);
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+}
+
+/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
+static int scratchpad_setup(struct bdc *bdc)
+{
+ int sp_buff_size;
+ u32 low32;
+ u32 upp32;
+
+ sp_buff_size = BDC_SPB(bdc_readl(bdc->regs, BDC_BDCCFG0));
+ dev_dbg(bdc->dev, "%s() sp_buff_size=%d\n", __func__, sp_buff_size);
+ if (!sp_buff_size) {
+ dev_dbg(bdc->dev, "Scratchpad buffer not needed\n");
+ return 0;
+ }
+ /* Refer to BDC spec, Table 4 for description of SPB */
+ sp_buff_size = 1 << (sp_buff_size + 5);
+ dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
+ bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size,
+ &bdc->scratchpad.sp_dma, GFP_KERNEL);
+
+ if (!bdc->scratchpad.buff)
+ goto fail;
+
+ bdc->sp_buff_size = sp_buff_size;
+ bdc->scratchpad.size = sp_buff_size;
+ low32 = lower_32_bits(bdc->scratchpad.sp_dma);
+ upp32 = upper_32_bits(bdc->scratchpad.sp_dma);
+ cpu_to_le32s(&low32);
+ cpu_to_le32s(&upp32);
+ bdc_writel(bdc->regs, BDC_SPBBAL, low32);
+ bdc_writel(bdc->regs, BDC_SPBBAH, upp32);
+ return 0;
+
+fail:
+ bdc->scratchpad.buff = NULL;
+
+ return -ENOMEM;
+}
+
+/* Allocate the status report ring */
+static int setup_srr(struct bdc *bdc, int interrupter)
+{
+ dev_dbg(bdc->dev, "%s() NUM_SR_ENTRIES:%d\n", __func__, NUM_SR_ENTRIES);
+ /* Reset the SRR */
+ bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
+ bdc->srr.dqp_index = 0;
+ /* allocate the status report descriptors */
+ bdc->srr.sr_bds = dma_zalloc_coherent(
+ bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ &bdc->srr.dma_addr,
+ GFP_KERNEL);
+ if (!bdc->srr.sr_bds)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Initialize the HW regs and internal data structures */
+static void bdc_mem_init(struct bdc *bdc, bool reinit)
+{
+ u8 size = 0;
+ u32 usb2_pm;
+ u32 low32;
+ u32 upp32;
+ u32 temp;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc->ep0_state = WAIT_FOR_SETUP;
+ bdc->dev_addr = 0;
+ bdc->srr.eqp_index = 0;
+ bdc->srr.dqp_index = 0;
+ bdc->zlp_needed = false;
+ bdc->delayed_status = false;
+
+ bdc_writel(bdc->regs, BDC_SPBBAL, bdc->scratchpad.sp_dma);
+ /* Init the SRR */
+ temp = BDC_SRR_RWS | BDC_SRR_RST;
+ /* Reset the SRR */
+ bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
+ dev_dbg(bdc->dev, "bdc->srr.sr_bds =%p\n", bdc->srr.sr_bds);
+ temp = lower_32_bits(bdc->srr.dma_addr);
+ size = fls(NUM_SR_ENTRIES) - 2;
+ temp |= size;
+ dev_dbg(bdc->dev, "SRRBAL[0]=%08x NUM_SR_ENTRIES:%d size:%d\n",
+ temp, NUM_SR_ENTRIES, size);
+
+ low32 = lower_32_bits(temp);
+ upp32 = upper_32_bits(bdc->srr.dma_addr);
+ cpu_to_le32s(&low32);
+ cpu_to_le32s(&upp32);
+
+ /* Write the dma addresses into regs*/
+ bdc_writel(bdc->regs, BDC_SRRBAL(0), low32);
+ bdc_writel(bdc->regs, BDC_SRRBAH(0), upp32);
+
+ temp = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ temp |= BDC_SRR_IE;
+ temp &= ~(BDC_SRR_RST | BDC_SRR_RWS);
+ bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
+
+ /* Set the Interrupt Coalescence ~500 usec */
+ temp = bdc_readl(bdc->regs, BDC_INTCTLS(0));
+ temp &= ~0xffff;
+ temp |= INT_CLS;
+ bdc_writel(bdc->regs, BDC_INTCTLS(0), temp);
+
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
+ /* Enable hardware LPM Enable */
+ usb2_pm |= BDC_HLE;
+ bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
+
+ /* readback for debug */
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ dev_dbg(bdc->dev, "usb2_pm=%08x\n", usb2_pm);
+
+ /* Disable any unwanted SR's on SRR */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* We don't want Microframe counter wrap SR */
+ temp |= BDC_MASK_MCW;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+
+ /*
+ * In some error cases, driver has to reset the entire BDC controller
+ * in that case reinit is passed as 1
+ */
+ if (reinit) {
+ /* Enable interrupts */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp |= BDC_GIE;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ /* Init scratchpad to 0 */
+ memset(bdc->scratchpad.buff, 0, bdc->sp_buff_size);
+ /* Initialize SRR to 0 */
+ memset(bdc->srr.sr_bds, 0,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd));
+ } else {
+ /* One time initiaization only */
+ /* Enable status report function pointers */
+ bdc->sr_handler[0] = bdc_sr_xsf;
+ bdc->sr_handler[1] = bdc_sr_uspc;
+
+ /* EP0 status report function pointers */
+ bdc->sr_xsf_ep0[0] = bdc_xsf_ep0_setup_recv;
+ bdc->sr_xsf_ep0[1] = bdc_xsf_ep0_data_start;
+ bdc->sr_xsf_ep0[2] = bdc_xsf_ep0_status_start;
+ }
+}
+
+/* Free the dynamic memory */
+static void bdc_mem_free(struct bdc *bdc)
+{
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ /* Free SRR */
+ if (bdc->srr.sr_bds)
+ dma_free_coherent(bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ bdc->srr.sr_bds, bdc->srr.dma_addr);
+
+ /* Free scratchpad */
+ if (bdc->scratchpad.buff)
+ dma_free_coherent(bdc->dev, bdc->sp_buff_size,
+ bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
+
+ /* Destroy the dma pools */
+ if (bdc->bd_table_pool)
+ dma_pool_destroy(bdc->bd_table_pool);
+
+ /* Free the bdc_ep array */
+ kfree(bdc->bdc_ep_array);
+
+ bdc->srr.sr_bds = NULL;
+ bdc->scratchpad.buff = NULL;
+ bdc->bd_table_pool = NULL;
+ bdc->bdc_ep_array = NULL;
+}
+
+/*
+ * bdc reinit gives a controller reset and reinitialize the registers,
+ * called from disconnect/bus reset scenario's, to ensure proper HW cleanup
+ */
+int bdc_reinit(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ret = bdc_stop(bdc);
+ if (ret)
+ goto out;
+
+ ret = bdc_reset(bdc);
+ if (ret)
+ goto out;
+
+ /* the reinit flag is 1 */
+ bdc_mem_init(bdc, true);
+ ret = bdc_run(bdc);
+out:
+ bdc->reinit = false;
+
+ return ret;
+}
+
+/* Allocate all the dyanmic memory */
+static int bdc_mem_alloc(struct bdc *bdc)
+{
+ u32 page_size;
+ unsigned int num_ieps, num_oeps;
+
+ dev_dbg(bdc->dev,
+ "%s() NUM_BDS_PER_TABLE:%d\n", __func__,
+ NUM_BDS_PER_TABLE);
+ page_size = BDC_PGS(bdc_readl(bdc->regs, BDC_BDCCFG0));
+ /* page size is 2^pgs KB */
+ page_size = 1 << page_size;
+ /* KB */
+ page_size <<= 10;
+ dev_dbg(bdc->dev, "page_size=%d\n", page_size);
+
+ /* Create a pool of bd tables */
+ bdc->bd_table_pool =
+ dma_pool_create("BDC BD tables", bdc->dev, NUM_BDS_PER_TABLE * 16,
+ 16, page_size);
+
+ if (!bdc->bd_table_pool)
+ goto fail;
+
+ if (scratchpad_setup(bdc))
+ goto fail;
+
+ /* read from regs */
+ num_ieps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNIC));
+ num_oeps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNOC));
+ /* +2: 1 for ep0 and the other is rsvd i.e. bdc_ep[0] is rsvd */
+ bdc->num_eps = num_ieps + num_oeps + 2;
+ dev_dbg(bdc->dev,
+ "ieps:%d eops:%d num_eps:%d\n",
+ num_ieps, num_oeps, bdc->num_eps);
+ /* allocate array of ep pointers */
+ bdc->bdc_ep_array = kcalloc(bdc->num_eps, sizeof(struct bdc_ep *),
+ GFP_KERNEL);
+ if (!bdc->bdc_ep_array)
+ goto fail;
+
+ dev_dbg(bdc->dev, "Allocating sr report0\n");
+ if (setup_srr(bdc, 0))
+ goto fail;
+
+ return 0;
+fail:
+ dev_warn(bdc->dev, "Couldn't initialize memory\n");
+ bdc_mem_free(bdc);
+
+ return -ENOMEM;
+}
+
+/* opposite to bdc_hw_init */
+static void bdc_hw_exit(struct bdc *bdc)
+{
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc_mem_free(bdc);
+}
+
+/* Initialize the bdc HW and memory */
+static int bdc_hw_init(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ ret = bdc_reset(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "err resetting bdc abort bdc init%d\n", ret);
+ return ret;
+ }
+ ret = bdc_mem_alloc(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "Mem alloc failed, aborting\n");
+ return -ENOMEM;
+ }
+ bdc_mem_init(bdc, 0);
+ bdc_dbg_regs(bdc);
+ dev_dbg(bdc->dev, "HW Init done\n");
+
+ return 0;
+}
+
+static int bdc_probe(struct platform_device *pdev)
+{
+ struct bdc *bdc;
+ struct resource *res;
+ int ret = -ENOMEM;
+ int irq;
+ u32 temp;
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "%s()\n", __func__);
+ bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
+ if (!bdc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bdc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(bdc->regs)) {
+ dev_err(dev, "ioremap error\n");
+ return -ENOMEM;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "platform_get_irq failed:%d\n", irq);
+ return irq;
+ }
+ spin_lock_init(&bdc->lock);
+ platform_set_drvdata(pdev, bdc);
+ bdc->irq = irq;
+ bdc->dev = dev;
+ dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
+
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ if ((temp & BDC_P64) &&
+ !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ dev_dbg(bdc->dev, "Using 64-bit address\n");
+ } else {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(bdc->dev, "No suitable DMA config available, abort\n");
+ return -ENOTSUPP;
+ }
+ dev_dbg(bdc->dev, "Using 32-bit address\n");
+ }
+ ret = bdc_hw_init(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "BDC init failure:%d\n", ret);
+ return ret;
+ }
+ ret = bdc_udc_init(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "BDC Gadget init failure:%d\n", ret);
+ goto cleanup;
+ }
+ return 0;
+
+cleanup:
+ bdc_hw_exit(bdc);
+
+ return ret;
+}
+
+static int bdc_remove(struct platform_device *pdev)
+{
+ struct bdc *bdc;
+
+ bdc = platform_get_drvdata(pdev);
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc_udc_exit(bdc);
+ bdc_hw_exit(bdc);
+
+ return 0;
+}
+
+static struct platform_driver bdc_driver = {
+ .driver = {
+ .name = BRCM_BDC_NAME,
+ .owner = THIS_MODULE
+ },
+ .probe = bdc_probe,
+ .remove = bdc_remove,
+};
+
+module_platform_driver(bdc_driver);
+MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(BRCM_BDC_DESC);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.c b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
new file mode 100644
index 000000000000..5945dbc47825
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
@@ -0,0 +1,123 @@
+/*
+ * bdc_dbg.c - BRCM BDC USB3.0 device controller debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "bdc.h"
+#include "bdc_dbg.h"
+
+void bdc_dbg_regs(struct bdc *bdc)
+{
+ u32 temp;
+
+ dev_vdbg(bdc->dev, "bdc->regs:%p\n", bdc->regs);
+ temp = bdc_readl(bdc->regs, BDC_BDCCFG0);
+ dev_vdbg(bdc->dev, "bdccfg0:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCFG1);
+ dev_vdbg(bdc->dev, "bdccfg1:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP0);
+ dev_vdbg(bdc->dev, "bdccap0:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
+ dev_vdbg(bdc->dev, "bdccap1:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_USPC);
+ dev_vdbg(bdc->dev, "uspc:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_DVCSA);
+ dev_vdbg(bdc->dev, "dvcsa:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_DVCSB);
+ dev_vdbg(bdc->dev, "dvcsb:0x%x08\n", temp);
+}
+
+void bdc_dump_epsts(struct bdc *bdc)
+{
+ u32 temp;
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS0(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS0:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS1(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS1:0x%x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS2(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS2:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS3(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS3:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS4(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS4:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS5(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS5:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS6(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS6:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS7(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS7:0x%08x\n", temp);
+}
+
+void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
+{
+ struct bdc_sr *sr;
+ dma_addr_t addr;
+ int i;
+
+ sr = bdc->srr.sr_bds;
+ addr = bdc->srr.dma_addr;
+ dev_vdbg(bdc->dev, "bdc_dbg_srr sr:%p dqp_index:%d\n",
+ sr, bdc->srr.dqp_index);
+ for (i = 0; i < NUM_SR_ENTRIES; i++) {
+ sr = &bdc->srr.sr_bds[i];
+ dev_vdbg(bdc->dev, "%llx %08x %08x %08x %08x\n",
+ (unsigned long long)addr,
+ le32_to_cpu(sr->offset[0]),
+ le32_to_cpu(sr->offset[1]),
+ le32_to_cpu(sr->offset[2]),
+ le32_to_cpu(sr->offset[3]));
+ addr += sizeof(*sr);
+ }
+}
+
+void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ struct bd_table *bd_table;
+ struct bdc_bd *bd;
+ int tbi, bdi, gbdi;
+ dma_addr_t dma;
+
+ gbdi = 0;
+ dev_vdbg(bdc->dev,
+ "Dump bd list for %s epnum:%d\n",
+ ep->name, ep->ep_num);
+
+ dev_vdbg(bdc->dev,
+ "tabs:%d max_bdi:%d eqp_bdi:%d hwd_bdi:%d num_bds_table:%d\n",
+ bd_list->num_tabs, bd_list->max_bdi, bd_list->eqp_bdi,
+ bd_list->hwd_bdi, bd_list->num_bds_table);
+
+ for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
+ bd_table = bd_list->bd_table_array[tbi];
+ for (bdi = 0; bdi < bd_list->num_bds_table; bdi++) {
+ bd = bd_table->start_bd + bdi;
+ dma = bd_table->dma + (sizeof(struct bdc_bd) * bdi);
+ dev_vdbg(bdc->dev,
+ "tbi:%2d bdi:%2d gbdi:%2d virt:%p phys:%llx %08x %08x %08x %08x\n",
+ tbi, bdi, gbdi++, bd, (unsigned long long)dma,
+ le32_to_cpu(bd->offset[0]),
+ le32_to_cpu(bd->offset[1]),
+ le32_to_cpu(bd->offset[2]),
+ le32_to_cpu(bd->offset[3]));
+ }
+ dev_vdbg(bdc->dev, "\n\n");
+ }
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.h b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
new file mode 100644
index 000000000000..338a6c701315
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
@@ -0,0 +1,37 @@
+/*
+ * bdc_dbg.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __LINUX_BDC_DBG_H__
+#define __LINUX_BDC_DBG_H__
+
+#include "bdc.h"
+
+#ifdef CONFIG_USB_GADGET_VERBOSE
+void bdc_dbg_bd_list(struct bdc *, struct bdc_ep*);
+void bdc_dbg_srr(struct bdc *, u32);
+void bdc_dbg_regs(struct bdc *);
+void bdc_dump_epsts(struct bdc *);
+#else
+static inline void bdc_dbg_regs(struct bdc *bdc)
+{ }
+
+static inline void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
+{ }
+
+static inline void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep)
+{ }
+
+static inline void bdc_dump_epsts(struct bdc *bdc)
+{ }
+#endif /* CONFIG_USB_GADGET_VERBOSE */
+#endif /* __LINUX_BDC_DBG_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
new file mode 100644
index 000000000000..ff67ceac77c4
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -0,0 +1,2024 @@
+/*
+ * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * Based on drivers under drivers/usb/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/unaligned.h>
+#include <linux/platform_device.h>
+#include <linux/usb/composite.h>
+
+#include "bdc.h"
+#include "bdc_ep.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+static const char * const ep0_state_string[] = {
+ "WAIT_FOR_SETUP",
+ "WAIT_FOR_DATA_START",
+ "WAIT_FOR_DATA_XMIT",
+ "WAIT_FOR_STATUS_START",
+ "WAIT_FOR_STATUS_XMIT",
+ "STATUS_PENDING"
+};
+
+/* Free the bdl during ep disable */
+static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ int index;
+
+ dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n",
+ __func__, ep->name, num_tabs);
+
+ if (!bd_list->bd_table_array) {
+ dev_dbg(bdc->dev, "%s already freed\n", ep->name);
+ return;
+ }
+ for (index = 0; index < num_tabs; index++) {
+ /*
+ * check if the bd_table struct is allocated ?
+ * if yes, then check if bd memory has been allocated, then
+ * free the dma_pool and also the bd_table struct memory
+ */
+ bd_table = bd_list->bd_table_array[index];
+ dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
+ if (!bd_table) {
+ dev_dbg(bdc->dev, "bd_table not allocated\n");
+ continue;
+ }
+ if (!bd_table->start_bd) {
+ dev_dbg(bdc->dev, "bd dma pool not allocted\n");
+ continue;
+ }
+
+ dev_dbg(bdc->dev,
+ "Free dma pool start_bd:%p dma:%llx\n",
+ bd_table->start_bd,
+ (unsigned long long)bd_table->dma);
+
+ dma_pool_free(bdc->bd_table_pool,
+ bd_table->start_bd,
+ bd_table->dma);
+ /* Free the bd_table structure */
+ kfree(bd_table);
+ }
+ /* Free the bd table array */
+ kfree(ep->bd_list.bd_table_array);
+}
+
+/*
+ * chain the tables, by insteting a chain bd at the end of prev_table, pointing
+ * to next_table
+ */
+static inline void chain_table(struct bd_table *prev_table,
+ struct bd_table *next_table,
+ u32 bd_p_tab)
+{
+ /* Chain the prev table to next table */
+ prev_table->start_bd[bd_p_tab-1].offset[0] =
+ cpu_to_le32(lower_32_bits(next_table->dma));
+
+ prev_table->start_bd[bd_p_tab-1].offset[1] =
+ cpu_to_le32(upper_32_bits(next_table->dma));
+
+ prev_table->start_bd[bd_p_tab-1].offset[2] =
+ 0x0;
+
+ prev_table->start_bd[bd_p_tab-1].offset[3] =
+ cpu_to_le32(MARK_CHAIN_BD);
+}
+
+/* Allocate the bdl for ep, during config ep */
+static int ep_bd_list_alloc(struct bdc_ep *ep)
+{
+ struct bd_table *prev_table = NULL;
+ int index, num_tabs, bd_p_tab;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ dma_addr_t dma;
+
+ if (usb_endpoint_xfer_isoc(ep->desc))
+ num_tabs = NUM_TABLES_ISOCH;
+ else
+ num_tabs = NUM_TABLES;
+
+ bd_p_tab = NUM_BDS_PER_TABLE;
+ /* if there is only 1 table in bd list then loop chain to self */
+ dev_dbg(bdc->dev,
+ "%s ep:%p num_tabs:%d\n",
+ __func__, ep, num_tabs);
+
+ /* Allocate memory for table array */
+ ep->bd_list.bd_table_array = kzalloc(
+ num_tabs * sizeof(struct bd_table *),
+ GFP_ATOMIC);
+ if (!ep->bd_list.bd_table_array)
+ return -ENOMEM;
+
+ /* Allocate memory for each table */
+ for (index = 0; index < num_tabs; index++) {
+ /* Allocate memory for bd_table structure */
+ bd_table = kzalloc(sizeof(struct bd_table), GFP_ATOMIC);
+ if (!bd_table)
+ goto fail;
+
+ bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool,
+ GFP_ATOMIC,
+ &dma);
+ if (!bd_table->start_bd)
+ goto fail;
+
+ bd_table->dma = dma;
+
+ dev_dbg(bdc->dev,
+ "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
+ index, bd_table->start_bd,
+ (unsigned long long)bd_table->dma, prev_table);
+
+ ep->bd_list.bd_table_array[index] = bd_table;
+ memset(bd_table->start_bd, 0, bd_p_tab * sizeof(struct bdc_bd));
+ if (prev_table)
+ chain_table(prev_table, bd_table, bd_p_tab);
+
+ prev_table = bd_table;
+ }
+ chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab);
+ /* Memory allocation is successful, now init the internal fields */
+ ep->bd_list.num_tabs = num_tabs;
+ ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1;
+ ep->bd_list.num_tabs = num_tabs;
+ ep->bd_list.num_bds_table = bd_p_tab;
+ ep->bd_list.eqp_bdi = 0;
+ ep->bd_list.hwd_bdi = 0;
+
+ return 0;
+fail:
+ /* Free the bd_table_array, bd_table struct, bd's */
+ ep_bd_list_free(ep, num_tabs);
+
+ return -ENOMEM;
+}
+
+/* returns how many bd's are need for this transfer */
+static inline int bd_needed_req(struct bdc_req *req)
+{
+ int bd_needed = 0;
+ int remaining;
+
+ /* 1 bd needed for 0 byte transfer */
+ if (req->usb_req.length == 0)
+ return 1;
+
+ /* remaining bytes after tranfering all max BD size BD's */
+ remaining = req->usb_req.length % BD_MAX_BUFF_SIZE;
+ if (remaining)
+ bd_needed++;
+
+ /* How many maximum BUFF size BD's ? */
+ remaining = req->usb_req.length / BD_MAX_BUFF_SIZE;
+ bd_needed += remaining;
+
+ return bd_needed;
+}
+
+/* returns the bd index(bdi) corresponding to bd dma address */
+static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ dma_addr_t dma_first_bd, dma_last_bd;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ bool found = false;
+ int tbi, bdi;
+
+ dma_first_bd = dma_last_bd = 0;
+ dev_dbg(bdc->dev, "%s %llx\n",
+ __func__, (unsigned long long)bd_dma_addr);
+ /*
+ * Find in which table this bd_dma_addr belongs?, go through the table
+ * array and compare addresses of first and last address of bd of each
+ * table
+ */
+ for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
+ bd_table = bd_list->bd_table_array[tbi];
+ dma_first_bd = bd_table->dma;
+ dma_last_bd = bd_table->dma +
+ (sizeof(struct bdc_bd) *
+ (bd_list->num_bds_table - 1));
+ dev_dbg(bdc->dev, "dma_first_bd:%llx dma_last_bd:%llx\n",
+ (unsigned long long)dma_first_bd,
+ (unsigned long long)dma_last_bd);
+ if (bd_dma_addr >= dma_first_bd && bd_dma_addr <= dma_last_bd) {
+ found = true;
+ break;
+ }
+ }
+ if (unlikely(!found)) {
+ dev_err(bdc->dev, "%s FATAL err, bd not found\n", __func__);
+ return -EINVAL;
+ }
+ /* Now we know the table, find the bdi */
+ bdi = (bd_dma_addr - dma_first_bd) / sizeof(struct bdc_bd);
+
+ /* return the global bdi, to compare with ep eqp_bdi */
+ return (bdi + (tbi * bd_list->num_bds_table));
+}
+
+/* returns the table index(tbi) of the given bdi */
+static int bdi_to_tbi(struct bdc_ep *ep, int bdi)
+{
+ int tbi;
+
+ tbi = bdi / ep->bd_list.num_bds_table;
+ dev_vdbg(ep->bdc->dev,
+ "bdi:%d num_bds_table:%d tbi:%d\n",
+ bdi, ep->bd_list.num_bds_table, tbi);
+
+ return tbi;
+}
+
+/* Find the bdi last bd in the transfer */
+static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
+{
+ int end_bdi;
+
+ end_bdi = next_hwd_bdi - 1;
+ if (end_bdi < 0)
+ end_bdi = ep->bd_list.max_bdi - 1;
+ else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
+ end_bdi--;
+
+ return end_bdi;
+}
+
+/*
+ * How many transfer bd's are available on this ep bdl, chain bds are not
+ * counted in available bds
+ */
+static int bd_available_ep(struct bdc_ep *ep)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ int available1, available2;
+ struct bdc *bdc = ep->bdc;
+ int chain_bd1, chain_bd2;
+ int available_bd = 0;
+
+ available1 = available2 = chain_bd1 = chain_bd2 = 0;
+ /* if empty then we have all bd's available - number of chain bd's */
+ if (bd_list->eqp_bdi == bd_list->hwd_bdi)
+ return bd_list->max_bdi - bd_list->num_tabs;
+
+ /*
+ * Depending upon where eqp and dqp pointers are, caculate number
+ * of avaialble bd's
+ */
+ if (bd_list->hwd_bdi < bd_list->eqp_bdi) {
+ /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
+ available1 = bd_list->max_bdi - bd_list->eqp_bdi;
+ available2 = bd_list->hwd_bdi;
+ chain_bd1 = available1 / bd_list->num_bds_table;
+ chain_bd2 = available2 / bd_list->num_bds_table;
+ dev_vdbg(bdc->dev, "chain_bd1:%d chain_bd2:%d\n",
+ chain_bd1, chain_bd2);
+ available_bd = available1 + available2 - chain_bd1 - chain_bd2;
+ } else {
+ /* available bd's are from eqp..dqp - number of chain bd's */
+ available1 = bd_list->hwd_bdi - bd_list->eqp_bdi;
+ /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
+ if ((bd_list->hwd_bdi - bd_list->eqp_bdi)
+ <= bd_list->num_bds_table) {
+ /* If there any chain bd in between */
+ if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
+ == bdi_to_tbi(ep, bd_list->eqp_bdi))) {
+ available_bd = available1 - 1;
+ }
+ } else {
+ chain_bd1 = available1 / bd_list->num_bds_table;
+ available_bd = available1 - chain_bd1;
+ }
+ }
+ /*
+ * we need to keep one extra bd to check if ring is full or empty so
+ * reduce by 1
+ */
+ available_bd--;
+ dev_vdbg(bdc->dev, "available_bd:%d\n", available_bd);
+
+ return available_bd;
+}
+
+/* Notify the hardware after queueing the bd to bdl */
+void bdc_notify_xfr(struct bdc *bdc, u32 epnum)
+{
+ struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
+
+ dev_vdbg(bdc->dev, "%s epnum:%d\n", __func__, epnum);
+ /*
+ * We don't have anyway to check if ep state is running,
+ * except the software flags.
+ */
+ if (unlikely(ep->flags & BDC_EP_STOP))
+ ep->flags &= ~BDC_EP_STOP;
+
+ bdc_writel(bdc->regs, BDC_XSFNTF, epnum);
+}
+
+/* returns the bd corresponding to bdi */
+static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
+{
+ int tbi = bdi_to_tbi(ep, bdi);
+ int local_bdi = 0;
+
+ local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
+ dev_vdbg(ep->bdc->dev,
+ "%s bdi:%d local_bdi:%d\n",
+ __func__, bdi, local_bdi);
+
+ return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
+}
+
+/* Advance the enqueue pointer */
+static void ep_bdlist_eqp_adv(struct bdc_ep *ep)
+{
+ ep->bd_list.eqp_bdi++;
+ /* if it's chain bd, then move to next */
+ if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0)
+ ep->bd_list.eqp_bdi++;
+
+ /* if the eqp is pointing to last + 1 then move back to 0 */
+ if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1))
+ ep->bd_list.eqp_bdi = 0;
+}
+
+/* Setup the first bd for ep0 transfer */
+static int setup_first_bd_ep0(struct bdc *bdc, struct bdc_req *req, u32 *dword3)
+{
+ u16 wValue;
+ u32 req_len;
+
+ req->ep->dir = 0;
+ req_len = req->usb_req.length;
+ switch (bdc->ep0_state) {
+ case WAIT_FOR_DATA_START:
+ *dword3 |= BD_TYPE_DS;
+ if (bdc->setup_pkt.bRequestType & USB_DIR_IN)
+ *dword3 |= BD_DIR_IN;
+
+ /* check if zlp will be needed */
+ wValue = le16_to_cpu(bdc->setup_pkt.wValue);
+ if ((wValue > req_len) &&
+ (req_len % bdc->gadget.ep0->maxpacket == 0)) {
+ dev_dbg(bdc->dev, "ZLP needed wVal:%d len:%d MaxP:%d\n",
+ wValue, req_len,
+ bdc->gadget.ep0->maxpacket);
+ bdc->zlp_needed = true;
+ }
+ break;
+
+ case WAIT_FOR_STATUS_START:
+ *dword3 |= BD_TYPE_SS;
+ if (!le16_to_cpu(bdc->setup_pkt.wLength) ||
+ !(bdc->setup_pkt.bRequestType & USB_DIR_IN))
+ *dword3 |= BD_DIR_IN;
+ break;
+ default:
+ dev_err(bdc->dev,
+ "Unknown ep0 state for queueing bd ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Setup the bd dma descriptor for a given request */
+static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
+{
+ dma_addr_t buf_add = req->usb_req.dma;
+ u32 maxp, tfs, dword2, dword3;
+ struct bd_transfer *bd_xfr;
+ struct bd_list *bd_list;
+ struct bdc_ep *ep;
+ struct bdc_bd *bd;
+ int ret, bdnum;
+ u32 req_len;
+
+ ep = req->ep;
+ bd_list = &ep->bd_list;
+ bd_xfr = &req->bd_xfr;
+ bd_xfr->req = req;
+ bd_xfr->start_bdi = bd_list->eqp_bdi;
+ bd = bdi_to_bd(ep, bd_list->eqp_bdi);
+ req_len = req->usb_req.length;
+ maxp = usb_endpoint_maxp(ep->desc) & 0x7ff;
+ tfs = roundup(req->usb_req.length, maxp);
+ tfs = tfs/maxp;
+ dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
+ __func__, ep->name, num_bds, tfs, req_len, bd);
+
+ for (bdnum = 0; bdnum < num_bds; bdnum++) {
+ dword2 = dword3 = 0;
+ /* First bd */
+ if (!bdnum) {
+ dword3 |= BD_SOT|BD_SBF|(tfs<<BD_TFS_SHIFT);
+ dword2 |= BD_LTF;
+ /* format of first bd for ep0 is different than other */
+ if (ep->ep_num == 1) {
+ ret = setup_first_bd_ep0(bdc, req, &dword3);
+ if (ret)
+ return ret;
+ }
+ }
+ if (!req->ep->dir)
+ dword3 |= BD_ISP;
+
+ if (req_len > BD_MAX_BUFF_SIZE) {
+ dword2 |= BD_MAX_BUFF_SIZE;
+ req_len -= BD_MAX_BUFF_SIZE;
+ } else {
+ /* this should be the last bd */
+ dword2 |= req_len;
+ dword3 |= BD_IOC;
+ dword3 |= BD_EOT;
+ }
+ /* Currently only 1 INT target is supported */
+ dword2 |= BD_INTR_TARGET(0);
+ bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
+ if (unlikely(!bd)) {
+ dev_err(bdc->dev, "Err bd pointing to wrong addr\n");
+ return -EINVAL;
+ }
+ /* write bd */
+ bd->offset[0] = cpu_to_le32(lower_32_bits(buf_add));
+ bd->offset[1] = cpu_to_le32(upper_32_bits(buf_add));
+ bd->offset[2] = cpu_to_le32(dword2);
+ bd->offset[3] = cpu_to_le32(dword3);
+ /* advance eqp pointer */
+ ep_bdlist_eqp_adv(ep);
+ /* advance the buff pointer */
+ buf_add += BD_MAX_BUFF_SIZE;
+ dev_vdbg(bdc->dev, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
+ (unsigned long long)buf_add, req_len, bd,
+ ep->bd_list.eqp_bdi);
+ bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
+ bd->offset[3] = cpu_to_le32(BD_SBF);
+ }
+ /* clear the STOP BD fetch bit from the first bd of this xfr */
+ bd = bdi_to_bd(ep, bd_xfr->start_bdi);
+ bd->offset[3] &= cpu_to_le32(~BD_SBF);
+ /* the new eqp will be next hw dqp */
+ bd_xfr->num_bds = num_bds;
+ bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi;
+ /* everything is written correctly before notifying the HW */
+ wmb();
+
+ return 0;
+}
+
+/* Queue the xfr */
+static int bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
+{
+ int num_bds, bd_available;
+ struct bdc_ep *ep;
+ int ret;
+
+ ep = req->ep;
+ dev_dbg(bdc->dev, "%s req:%p\n", __func__, req);
+ dev_dbg(bdc->dev, "eqp_bdi:%d hwd_bdi:%d\n",
+ ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi);
+
+ num_bds = bd_needed_req(req);
+ bd_available = bd_available_ep(ep);
+
+ /* how many bd's are avaialble on ep */
+ if (num_bds > bd_available)
+ return -ENOMEM;
+
+ ret = setup_bd_list_xfr(bdc, req, num_bds);
+ if (ret)
+ return ret;
+ list_add_tail(&req->queue, &ep->queue);
+ bdc_dbg_bd_list(bdc, ep);
+ bdc_notify_xfr(bdc, ep->ep_num);
+
+ return 0;
+}
+
+/* callback to gadget layer when xfr completes */
+static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
+ int status)
+{
+ struct bdc *bdc = ep->bdc;
+
+ if (req == NULL || &req->queue == NULL || &req->usb_req == NULL)
+ return;
+
+ dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
+ list_del(&req->queue);
+ req->usb_req.status = status;
+ usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (req->usb_req.complete) {
+ spin_unlock(&bdc->lock);
+ usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
+ spin_lock(&bdc->lock);
+ }
+}
+
+/* Disable the endpoint */
+int bdc_ep_disable(struct bdc_ep *ep)
+{
+ struct bdc_req *req;
+ struct bdc *bdc;
+ int ret;
+
+ ret = 0;
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num);
+ /* Stop the endpoint */
+ ret = bdc_stop_ep(bdc, ep->ep_num);
+
+ /*
+ * Intentionally don't check the ret value of stop, it can fail in
+ * disconnect scenarios, continue with dconfig
+ */
+ /* de-queue any pending requests */
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+ bdc_req_complete(ep, req, -ESHUTDOWN);
+ }
+ /* deconfigure the endpoint */
+ ret = bdc_dconfig_ep(bdc, ep);
+ if (ret)
+ dev_warn(bdc->dev,
+ "dconfig fail but continue with memory free");
+
+ ep->flags = 0;
+ /* ep0 memory is not freed, but reused on next connect sr */
+ if (ep->ep_num == 1)
+ return 0;
+
+ /* Free the bdl memory */
+ ep_bd_list_free(ep, ep->bd_list.num_tabs);
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+ ep->usb_ep.desc = NULL;
+ ep->ep_type = 0;
+
+ return ret;
+}
+
+/* Enable the ep */
+int bdc_ep_enable(struct bdc_ep *ep)
+{
+ struct bdc *bdc;
+ int ret = 0;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s NUM_TABLES:%d %d\n",
+ __func__, NUM_TABLES, NUM_TABLES_ISOCH);
+
+ ret = ep_bd_list_alloc(ep);
+ if (ret) {
+ dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret);
+ return -ENOMEM;
+ }
+ bdc_dbg_bd_list(bdc, ep);
+ /* only for ep0: config ep is called for ep0 from connect event */
+ ep->flags |= BDC_EP_ENABLED;
+ if (ep->ep_num == 1)
+ return ret;
+
+ /* Issue a configure endpoint command */
+ ret = bdc_config_ep(bdc, ep);
+ if (ret)
+ return ret;
+
+ ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc);
+ ep->usb_ep.desc = ep->desc;
+ ep->usb_ep.comp_desc = ep->comp_desc;
+ ep->ep_type = usb_endpoint_type(ep->desc);
+ ep->flags |= BDC_EP_ENABLED;
+
+ return 0;
+}
+
+/* EP0 related code */
+
+/* Queue a status stage BD */
+static int ep0_queue_status_stage(struct bdc *bdc)
+{
+ struct bdc_req *status_req;
+ struct bdc_ep *ep;
+
+ status_req = &bdc->status_req;
+ ep = bdc->bdc_ep_array[1];
+ status_req->ep = ep;
+ status_req->usb_req.length = 0;
+ status_req->usb_req.status = -EINPROGRESS;
+ status_req->usb_req.actual = 0;
+ status_req->usb_req.complete = NULL;
+ bdc_queue_xfr(bdc, status_req);
+
+ return 0;
+}
+
+/* Queue xfr on ep0 */
+static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ struct bdc *bdc;
+ int ret;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ req->usb_req.actual = 0;
+ req->usb_req.status = -EINPROGRESS;
+ req->epnum = ep->ep_num;
+
+ if (bdc->delayed_status) {
+ bdc->delayed_status = false;
+ /* if status stage was delayed? */
+ if (bdc->ep0_state == WAIT_FOR_STATUS_START) {
+ /* Queue a status stage BD */
+ ep0_queue_status_stage(bdc);
+ bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
+ return 0;
+ }
+ } else {
+ /*
+ * if delayed status is false and 0 length transfer is requested
+ * i.e. for status stage of some setup request, then just
+ * return from here the status stage is queued independently
+ */
+ if (req->usb_req.length == 0)
+ return 0;
+
+ }
+ ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (ret) {
+ dev_err(bdc->dev, "dma mapping failed %s\n", ep->name);
+ return ret;
+ }
+
+ return bdc_queue_xfr(bdc, req);
+}
+
+/* Queue data stage */
+static int ep0_queue_data_stage(struct bdc *bdc)
+{
+ struct usb_request *ep0_usb_req;
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ep0_usb_req = &bdc->ep0_req.usb_req;
+ ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.ep = ep;
+ bdc->ep0_req.usb_req.complete = NULL;
+
+ return ep0_queue(ep, &bdc->ep0_req);
+}
+
+/* Queue req on ep */
+static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ struct bdc *bdc;
+ int ret = 0;
+
+ bdc = ep->bdc;
+ if (!req || !ep || !ep->usb_ep.desc)
+ return -EINVAL;
+
+ req->usb_req.actual = 0;
+ req->usb_req.status = -EINPROGRESS;
+ req->epnum = ep->ep_num;
+
+ ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (ret) {
+ dev_err(bdc->dev, "dma mapping failed\n");
+ return ret;
+ }
+
+ return bdc_queue_xfr(bdc, req);
+}
+
+/* Dequeue a request from ep */
+static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ int start_bdi, end_bdi, tbi, eqp_bdi, curr_hw_dqpi;
+ bool start_pending, end_pending;
+ bool first_remove = false;
+ struct bdc_req *first_req;
+ struct bdc_bd *bd_start;
+ struct bd_table *table;
+ dma_addr_t next_bd_dma;
+ u64 deq_ptr_64 = 0;
+ struct bdc *bdc;
+ u32 tmp_32;
+ int ret;
+
+ bdc = ep->bdc;
+ start_pending = end_pending = false;
+ eqp_bdi = ep->bd_list.eqp_bdi - 1;
+
+ if (eqp_bdi < 0)
+ eqp_bdi = ep->bd_list.max_bdi;
+
+ start_bdi = req->bd_xfr.start_bdi;
+ end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
+
+ dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
+ __func__, ep->name, start_bdi, end_bdi);
+ dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
+ ep, (void *)ep->usb_ep.desc);
+ /* Stop the ep to see where the HW is ? */
+ ret = bdc_stop_ep(bdc, ep->ep_num);
+ /* if there is an issue with stopping ep, then no need to go further */
+ if (ret)
+ return 0;
+
+ /*
+ * After endpoint is stopped, there can be 3 cases, the request
+ * is processed, pending or in the middle of processing
+ */
+
+ /* The current hw dequeue pointer */
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
+ deq_ptr_64 = tmp_32;
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1));
+ deq_ptr_64 |= ((u64)tmp_32 << 32);
+
+ /* we have the dma addr of next bd that will be fetched by hardware */
+ curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64);
+ if (curr_hw_dqpi < 0)
+ return curr_hw_dqpi;
+
+ /*
+ * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
+ * curr_hw_dqbdi..eqp_bdi.
+ */
+
+ /* Check if start_bdi and end_bdi are in range of HW owned BD's */
+ if (curr_hw_dqpi > eqp_bdi) {
+ /* there is a wrap from last to 0 */
+ if (start_bdi >= curr_hw_dqpi || start_bdi <= eqp_bdi) {
+ start_pending = true;
+ end_pending = true;
+ } else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
+ end_pending = true;
+ }
+ } else {
+ if (start_bdi >= curr_hw_dqpi) {
+ start_pending = true;
+ end_pending = true;
+ } else if (end_bdi >= curr_hw_dqpi) {
+ end_pending = true;
+ }
+ }
+ dev_dbg(bdc->dev,
+ "start_pending:%d end_pending:%d speed:%d\n",
+ start_pending, end_pending, bdc->gadget.speed);
+
+ /* If both start till end are processes, we cannot deq req */
+ if (!start_pending && !end_pending)
+ return -EINVAL;
+
+ /*
+ * if ep_dequeue is called after disconnect then just return
+ * success from here
+ */
+ if (bdc->gadget.speed == USB_SPEED_UNKNOWN)
+ return 0;
+ tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
+ table = ep->bd_list.bd_table_array[tbi];
+ next_bd_dma = table->dma +
+ sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
+ tbi * ep->bd_list.num_bds_table);
+
+ first_req = list_first_entry(&ep->queue, struct bdc_req,
+ queue);
+
+ if (req == first_req)
+ first_remove = true;
+
+ /*
+ * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
+ * incase if start is pending this is the first request in the list
+ * then issue ep_bla instead of marking as chain bd
+ */
+ if (start_pending && !first_remove) {
+ /*
+ * Mark the start bd as Chain bd, and point the chain
+ * bd to next_bd_dma
+ */
+ bd_start = bdi_to_bd(ep, start_bdi);
+ bd_start->offset[0] = cpu_to_le32(lower_32_bits(next_bd_dma));
+ bd_start->offset[1] = cpu_to_le32(upper_32_bits(next_bd_dma));
+ bd_start->offset[2] = 0x0;
+ bd_start->offset[3] = cpu_to_le32(MARK_CHAIN_BD);
+ bdc_dbg_bd_list(bdc, ep);
+ } else if (end_pending) {
+ /*
+ * The transfer is stopped in the middle, move the
+ * HW deq pointer to next_bd_dma
+ */
+ ret = bdc_ep_bla(bdc, ep, next_bd_dma);
+ if (ret) {
+ dev_err(bdc->dev, "error in ep_bla:%d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Halt/Clear the ep based on value */
+static int ep_set_halt(struct bdc_ep *ep, u32 value)
+{
+ struct bdc *bdc;
+ int ret;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
+
+ if (value) {
+ dev_dbg(bdc->dev, "Halt\n");
+ if (ep->ep_num == 1)
+ bdc->ep0_state = WAIT_FOR_SETUP;
+
+ ret = bdc_ep_set_stall(bdc, ep->ep_num);
+ if (ret)
+ dev_err(bdc->dev, "failed to %s STALL on %s\n",
+ value ? "set" : "clear", ep->name);
+ else
+ ep->flags |= BDC_EP_STALL;
+ } else {
+ /* Clear */
+ dev_dbg(bdc->dev, "Before Clear\n");
+ ret = bdc_ep_clear_stall(bdc, ep->ep_num);
+ if (ret)
+ dev_err(bdc->dev, "failed to %s STALL on %s\n",
+ value ? "set" : "clear", ep->name);
+ else
+ ep->flags &= ~BDC_EP_STALL;
+ dev_dbg(bdc->dev, "After Clear\n");
+ }
+
+ return ret;
+}
+
+/* Free all the ep */
+void bdc_free_ep(struct bdc *bdc)
+{
+ struct bdc_ep *ep;
+ u8 epnum;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ for (epnum = 1; epnum < bdc->num_eps; epnum++) {
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep)
+ continue;
+
+ if (ep->flags & BDC_EP_ENABLED)
+ ep_bd_list_free(ep, ep->bd_list.num_tabs);
+
+ /* ep0 is not in this gadget list */
+ if (epnum != 1)
+ list_del(&ep->usb_ep.ep_list);
+
+ kfree(ep);
+ }
+}
+
+/* USB2 spec, section 7.1.20 */
+static int bdc_set_test_mode(struct bdc *bdc)
+{
+ u32 usb2_pm;
+
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ usb2_pm &= ~BDC_PTC_MASK;
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ switch (bdc->test_mode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ usb2_pm |= bdc->test_mode << 28;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
+ bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
+
+ return 0;
+}
+
+/*
+ * Helper function to handle Transfer status report with status as either
+ * success or short
+ */
+static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep,
+ struct bdc_sr *sreport)
+{
+ int short_bdi, start_bdi, end_bdi, max_len_bds, chain_bds;
+ struct bd_list *bd_list = &ep->bd_list;
+ int actual_length, length_short;
+ struct bd_transfer *bd_xfr;
+ struct bdc_bd *short_bd;
+ struct bdc_req *req;
+ u64 deq_ptr_64 = 0;
+ int status = 0;
+ int sr_status;
+ u32 tmp_32;
+
+ dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
+ bdc_dbg_srr(bdc, 0);
+ /* do not process thie sr if ignore flag is set */
+ if (ep->ignore_next_sr) {
+ ep->ignore_next_sr = false;
+ return;
+ }
+
+ if (unlikely(list_empty(&ep->queue))) {
+ dev_warn(bdc->dev, "xfr srr with no BD's queued\n");
+ return;
+ }
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+
+ bd_xfr = &req->bd_xfr;
+ sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
+
+ /*
+ * sr_status is short and this transfer has more than 1 bd then it needs
+ * special handling, this is only applicable for bulk and ctrl
+ */
+ if (sr_status == XSF_SHORT && bd_xfr->num_bds > 1) {
+ /*
+ * This is multi bd xfr, lets see which bd
+ * caused short transfer and how many bytes have been
+ * transferred so far.
+ */
+ tmp_32 = le32_to_cpu(sreport->offset[0]);
+ deq_ptr_64 = tmp_32;
+ tmp_32 = le32_to_cpu(sreport->offset[1]);
+ deq_ptr_64 |= ((u64)tmp_32 << 32);
+ short_bdi = bd_add_to_bdi(ep, deq_ptr_64);
+ if (unlikely(short_bdi < 0))
+ dev_warn(bdc->dev, "bd doesn't exist?\n");
+
+ start_bdi = bd_xfr->start_bdi;
+ /*
+ * We know the start_bdi and short_bdi, how many xfr
+ * bds in between
+ */
+ if (start_bdi <= short_bdi) {
+ max_len_bds = short_bdi - start_bdi;
+ if (max_len_bds <= bd_list->num_bds_table) {
+ if (!(bdi_to_tbi(ep, start_bdi) ==
+ bdi_to_tbi(ep, short_bdi)))
+ max_len_bds--;
+ } else {
+ chain_bds = max_len_bds/bd_list->num_bds_table;
+ max_len_bds -= chain_bds;
+ }
+ } else {
+ /* there is a wrap in the ring within a xfr */
+ chain_bds = (bd_list->max_bdi - start_bdi)/
+ bd_list->num_bds_table;
+ chain_bds += short_bdi/bd_list->num_bds_table;
+ max_len_bds = bd_list->max_bdi - start_bdi;
+ max_len_bds += short_bdi;
+ max_len_bds -= chain_bds;
+ }
+ /* max_len_bds is the number of full length bds */
+ end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi);
+ if (!(end_bdi == short_bdi))
+ ep->ignore_next_sr = true;
+
+ actual_length = max_len_bds * BD_MAX_BUFF_SIZE;
+ short_bd = bdi_to_bd(ep, short_bdi);
+ /* length queued */
+ length_short = le32_to_cpu(short_bd->offset[2]) & 0x1FFFFF;
+ /* actual length trensfered */
+ length_short -= SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
+ actual_length += length_short;
+ req->usb_req.actual = actual_length;
+ } else {
+ req->usb_req.actual = req->usb_req.length -
+ SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
+ dev_dbg(bdc->dev,
+ "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
+ req->usb_req.length, req->usb_req.actual,
+ bd_xfr->next_hwd_bdi);
+ }
+
+ /* Update the dequeue pointer */
+ ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi;
+ if (req->usb_req.actual < req->usb_req.length) {
+ dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num);
+ if (req->usb_req.short_not_ok)
+ status = -EREMOTEIO;
+ }
+ bdc_req_complete(ep, bd_xfr->req, status);
+}
+
+/* EP0 setup related packet handlers */
+
+/*
+ * Setup packet received, just store the packet and process on next DS or SS
+ * started SR
+ */
+void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct usb_ctrlrequest *setup_pkt;
+ u32 len;
+
+ dev_dbg(bdc->dev,
+ "%s ep0_state:%s\n",
+ __func__, ep0_state_string[bdc->ep0_state]);
+ /* Store received setup packet */
+ setup_pkt = &bdc->setup_pkt;
+ memcpy(setup_pkt, &sreport->offset[0], sizeof(*setup_pkt));
+ len = le16_to_cpu(setup_pkt->wLength);
+ if (!len)
+ bdc->ep0_state = WAIT_FOR_STATUS_START;
+ else
+ bdc->ep0_state = WAIT_FOR_DATA_START;
+
+
+ dev_dbg(bdc->dev,
+ "%s exit ep0_state:%s\n",
+ __func__, ep0_state_string[bdc->ep0_state]);
+}
+
+/* Stall ep0 */
+static void ep0_stall(struct bdc *bdc)
+{
+ struct bdc_ep *ep = bdc->bdc_ep_array[1];
+ struct bdc_req *req;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ bdc->delayed_status = false;
+ ep_set_halt(ep, 1);
+
+ /* de-queue any pendig requests */
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+ bdc_req_complete(ep, req, -ESHUTDOWN);
+ }
+}
+
+/* SET_ADD handlers */
+static int ep0_set_address(struct bdc *bdc, struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ int ret = 0;
+ u32 addr;
+
+ addr = le16_to_cpu(ctrl->wValue);
+ dev_dbg(bdc->dev,
+ "%s addr:%d dev state:%d\n",
+ __func__, addr, state);
+
+ if (addr > 127)
+ return -EINVAL;
+
+ switch (state) {
+ case USB_STATE_DEFAULT:
+ case USB_STATE_ADDRESS:
+ /* Issue Address device command */
+ ret = bdc_address_device(bdc, addr);
+ if (ret)
+ return ret;
+
+ if (addr)
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
+
+ bdc->dev_addr = addr;
+ break;
+ default:
+ dev_warn(bdc->dev,
+ "SET Address in wrong device state %d\n",
+ state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Handler for SET/CLEAR FEATURE requests for device */
+static int ep0_handle_feature_dev(struct bdc *bdc, u16 wValue,
+ u16 wIndex, bool set)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ u32 usppms = 0;
+
+ dev_dbg(bdc->dev, "%s set:%d dev state:%d\n",
+ __func__, set, state);
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ dev_dbg(bdc->dev, "USB_DEVICE_REMOTE_WAKEUP\n");
+ if (set)
+ bdc->devstatus |= REMOTE_WAKE_ENABLE;
+ else
+ bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
+ break;
+
+ case USB_DEVICE_TEST_MODE:
+ dev_dbg(bdc->dev, "USB_DEVICE_TEST_MODE\n");
+ if ((wIndex & 0xFF) ||
+ (bdc->gadget.speed != USB_SPEED_HIGH) || !set)
+ return -EINVAL;
+
+ bdc->test_mode = wIndex >> 8;
+ break;
+
+ case USB_DEVICE_U1_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_U1_ENABLE\n");
+
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ if (set) {
+ /* clear previous u1t */
+ usppms &= ~BDC_U1T(BDC_U1T_MASK);
+ usppms |= BDC_U1T(U1_TIMEOUT);
+ usppms |= BDC_U1E | BDC_PORT_W1S;
+ bdc->devstatus |= (1 << USB_DEV_STAT_U1_ENABLED);
+ } else {
+ usppms &= ~BDC_U1E;
+ usppms |= BDC_PORT_W1S;
+ bdc->devstatus &= ~(1 << USB_DEV_STAT_U1_ENABLED);
+ }
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case USB_DEVICE_U2_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_U2_ENABLE\n");
+
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ if (set) {
+ usppms |= BDC_U2E;
+ usppms |= BDC_U2A;
+ bdc->devstatus |= (1 << USB_DEV_STAT_U2_ENABLED);
+ } else {
+ usppms &= ~BDC_U2E;
+ usppms &= ~BDC_U2A;
+ bdc->devstatus &= ~(1 << USB_DEV_STAT_U2_ENABLED);
+ }
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case USB_DEVICE_LTM_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_LTM_ENABLE?\n");
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ break;
+ default:
+ dev_err(bdc->dev, "Unknown wValue:%d\n", wValue);
+ return -EOPNOTSUPP;
+ } /* USB_RECIP_DEVICE end */
+
+ return 0;
+}
+
+/* SET/CLEAR FEATURE handler */
+static int ep0_handle_feature(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt, bool set)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct bdc_ep *ep;
+ u16 wValue;
+ u16 wIndex;
+ int epnum;
+
+ wValue = le16_to_cpu(setup_pkt->wValue);
+ wIndex = le16_to_cpu(setup_pkt->wIndex);
+
+ dev_dbg(bdc->dev,
+ "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
+ __func__, wValue, wIndex, state,
+ bdc->gadget.speed, set);
+
+ switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ return ep0_handle_feature_dev(bdc, wValue, wIndex, set);
+ case USB_RECIP_INTERFACE:
+ dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
+ /* USB3 spec, sec 9.4.9 */
+ if (wValue != USB_INTRF_FUNC_SUSPEND)
+ return -EINVAL;
+ /* USB3 spec, Table 9-8 */
+ if (set) {
+ if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) {
+ dev_dbg(bdc->dev, "SET REMOTE_WAKEUP\n");
+ bdc->devstatus |= REMOTE_WAKE_ENABLE;
+ } else {
+ dev_dbg(bdc->dev, "CLEAR REMOTE_WAKEUP\n");
+ bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
+ }
+ }
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
+ if (wValue != USB_ENDPOINT_HALT)
+ return -EINVAL;
+
+ epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum) {
+ if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ epnum = epnum * 2 + 1;
+ else
+ epnum *= 2;
+ } else {
+ epnum = 1; /*EP0*/
+ }
+ /*
+ * If CLEAR_FEATURE on ep0 then don't do anything as the stall
+ * condition on ep0 has already been cleared when SETUP packet
+ * was received.
+ */
+ if (epnum == 1 && !set) {
+ dev_dbg(bdc->dev, "ep0 stall already cleared\n");
+ return 0;
+ }
+ dev_dbg(bdc->dev, "epnum=%d\n", epnum);
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep)
+ return -EINVAL;
+
+ return ep_set_halt(ep, set);
+ default:
+ dev_err(bdc->dev, "Unknown recipient\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* GET_STATUS request handler */
+static int ep0_handle_status(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct bdc_ep *ep;
+ u16 usb_status = 0;
+ u32 epnum;
+ u16 wIndex;
+
+ /* USB2.0 spec sec 9.4.5 */
+ if (state == USB_STATE_DEFAULT)
+ return -EINVAL;
+ wIndex = le16_to_cpu(setup_pkt->wIndex);
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ usb_status = bdc->devstatus;
+ switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ dev_dbg(bdc->dev,
+ "USB_RECIP_DEVICE devstatus:%08x\n",
+ bdc->devstatus);
+ /* USB3 spec, sec 9.4.5 */
+ if (bdc->gadget.speed == USB_SPEED_SUPER)
+ usb_status &= ~REMOTE_WAKE_ENABLE;
+ break;
+
+ case USB_RECIP_INTERFACE:
+ dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
+ if (bdc->gadget.speed == USB_SPEED_SUPER) {
+ /*
+ * This should come from func for Func remote wkup
+ * usb_status |=1;
+ */
+ if (bdc->devstatus & REMOTE_WAKE_ENABLE)
+ usb_status |= REMOTE_WAKE_ENABLE;
+ } else {
+ usb_status = 0;
+ }
+
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
+ epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum) {
+ if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ epnum = epnum*2 + 1;
+ else
+ epnum *= 2;
+ } else {
+ epnum = 1; /* EP0 */
+ }
+
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep) {
+ dev_err(bdc->dev, "ISSUE, GET_STATUS for invalid EP ?");
+ return -EINVAL;
+ }
+ if (ep->flags & BDC_EP_STALL)
+ usb_status |= 1 << USB_ENDPOINT_HALT;
+
+ break;
+ default:
+ dev_err(bdc->dev, "Unknown recipient for get_status\n");
+ return -EINVAL;
+ }
+ /* prepare a data stage for GET_STATUS */
+ dev_dbg(bdc->dev, "usb_status=%08x\n", usb_status);
+ *(__le16 *)bdc->ep0_response_buff = cpu_to_le16(usb_status);
+ bdc->ep0_req.usb_req.length = 2;
+ bdc->ep0_req.usb_req.buf = &bdc->ep0_response_buff;
+ ep0_queue_data_stage(bdc);
+
+ return 0;
+}
+
+static void ep0_set_sel_cmpl(struct usb_ep *_ep, struct usb_request *_req)
+{
+ /* ep0_set_sel_cmpl */
+}
+
+/* Queue data stage to handle 6 byte SET_SEL request */
+static int ep0_set_sel(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt)
+{
+ struct bdc_ep *ep;
+ u16 wLength;
+ u16 wValue;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ wValue = le16_to_cpu(setup_pkt->wValue);
+ wLength = le16_to_cpu(setup_pkt->wLength);
+ if (unlikely(wLength != 6)) {
+ dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
+ return -EINVAL;
+ }
+ ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.ep = ep;
+ bdc->ep0_req.usb_req.length = 6;
+ bdc->ep0_req.usb_req.buf = bdc->ep0_response_buff;
+ bdc->ep0_req.usb_req.complete = ep0_set_sel_cmpl;
+ ep0_queue_data_stage(bdc);
+
+ return 0;
+}
+
+/*
+ * Queue a 0 byte bd only if wLength is more than the length and and length is
+ * a multiple of MaxPacket then queue 0 byte BD
+ */
+static int ep0_queue_zlp(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ bdc->ep0_req.ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.usb_req.length = 0;
+ bdc->ep0_req.usb_req.complete = NULL;
+ bdc->ep0_state = WAIT_FOR_DATA_START;
+ ret = bdc_queue_xfr(bdc, &bdc->ep0_req);
+ if (ret) {
+ dev_err(bdc->dev, "err queueing zlp :%d\n", ret);
+ return ret;
+ }
+ bdc->ep0_state = WAIT_FOR_DATA_XMIT;
+
+ return 0;
+}
+
+/* Control request handler */
+static int handle_control_request(struct bdc *bdc)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct usb_ctrlrequest *setup_pkt;
+ int delegate_setup = 0;
+ int ret = 0;
+ int config = 0;
+
+ setup_pkt = &bdc->setup_pkt;
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ if ((setup_pkt->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (setup_pkt->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(bdc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = ep0_set_address(bdc, setup_pkt);
+ bdc->devstatus &= DEVSTATUS_CLEAR;
+ break;
+
+ case USB_REQ_SET_CONFIGURATION:
+ dev_dbg(bdc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ if (state == USB_STATE_ADDRESS) {
+ usb_gadget_set_state(&bdc->gadget,
+ USB_STATE_CONFIGURED);
+ } else if (state == USB_STATE_CONFIGURED) {
+ /*
+ * USB2 spec sec 9.4.7, if wValue is 0 then dev
+ * is moved to addressed state
+ */
+ config = le16_to_cpu(setup_pkt->wValue);
+ if (!config)
+ usb_gadget_set_state(
+ &bdc->gadget,
+ USB_STATE_ADDRESS);
+ }
+ delegate_setup = 1;
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(bdc->dev, "USB_REQ_SET_FEATURE\n");
+ ret = ep0_handle_feature(bdc, setup_pkt, 1);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ dev_dbg(bdc->dev, "USB_REQ_CLEAR_FEATURE\n");
+ ret = ep0_handle_feature(bdc, setup_pkt, 0);
+ break;
+
+ case USB_REQ_GET_STATUS:
+ dev_dbg(bdc->dev, "USB_REQ_GET_STATUS\n");
+ ret = ep0_handle_status(bdc, setup_pkt);
+ break;
+
+ case USB_REQ_SET_SEL:
+ dev_dbg(bdc->dev, "USB_REQ_SET_SEL\n");
+ ret = ep0_set_sel(bdc, setup_pkt);
+ break;
+
+ case USB_REQ_SET_ISOCH_DELAY:
+ dev_warn(bdc->dev,
+ "USB_REQ_SET_ISOCH_DELAY not handled\n");
+ ret = 0;
+ break;
+ default:
+ delegate_setup = 1;
+ }
+ } else {
+ delegate_setup = 1;
+ }
+
+ if (delegate_setup) {
+ spin_unlock(&bdc->lock);
+ ret = bdc->gadget_driver->setup(&bdc->gadget, setup_pkt);
+ spin_lock(&bdc->lock);
+ }
+
+ return ret;
+}
+
+/* EP0: Data stage started */
+void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct bdc_ep *ep;
+ int ret = 0;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ep = bdc->bdc_ep_array[1];
+ /* If ep0 was stalled, the clear it first */
+ if (ep->flags & BDC_EP_STALL) {
+ ret = ep_set_halt(ep, 0);
+ if (ret)
+ goto err;
+ }
+ if (bdc->ep0_state != WAIT_FOR_DATA_START)
+ dev_warn(bdc->dev,
+ "Data stage not expected ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ ret = handle_control_request(bdc);
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ /*
+ * The ep0 state will remain WAIT_FOR_DATA_START till
+ * we received ep_queue on ep0
+ */
+ bdc->delayed_status = true;
+ return;
+ }
+ if (!ret) {
+ bdc->ep0_state = WAIT_FOR_DATA_XMIT;
+ dev_dbg(bdc->dev,
+ "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
+ return;
+ }
+err:
+ ep0_stall(bdc);
+}
+
+/* EP0: status stage started */
+void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct usb_ctrlrequest *setup_pkt;
+ struct bdc_ep *ep;
+ int ret = 0;
+
+ dev_dbg(bdc->dev,
+ "%s ep0_state:%s",
+ __func__, ep0_state_string[bdc->ep0_state]);
+ ep = bdc->bdc_ep_array[1];
+
+ /* check if ZLP was queued? */
+ if (bdc->zlp_needed)
+ bdc->zlp_needed = false;
+
+ if (ep->flags & BDC_EP_STALL) {
+ ret = ep_set_halt(ep, 0);
+ if (ret)
+ goto err;
+ }
+
+ if ((bdc->ep0_state != WAIT_FOR_STATUS_START) &&
+ (bdc->ep0_state != WAIT_FOR_DATA_XMIT))
+ dev_err(bdc->dev,
+ "Status stage recv but ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ /* check if data stage is in progress ? */
+ if (bdc->ep0_state == WAIT_FOR_DATA_XMIT) {
+ bdc->ep0_state = STATUS_PENDING;
+ /* Status stage will be queued upon Data stage transmit event */
+ dev_dbg(bdc->dev,
+ "status started but data not transmitted yet\n");
+ return;
+ }
+ setup_pkt = &bdc->setup_pkt;
+
+ /*
+ * 2 stage setup then only process the setup, for 3 stage setup the date
+ * stage is already handled
+ */
+ if (!le16_to_cpu(setup_pkt->wLength)) {
+ ret = handle_control_request(bdc);
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ bdc->delayed_status = true;
+ /* ep0_state will remain WAIT_FOR_STATUS_START */
+ return;
+ }
+ }
+ if (!ret) {
+ /* Queue a status stage BD */
+ ep0_queue_status_stage(bdc);
+ bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
+ dev_dbg(bdc->dev,
+ "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
+ return;
+ }
+err:
+ ep0_stall(bdc);
+}
+
+/* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
+static void ep0_xsf_complete(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ switch (bdc->ep0_state) {
+ case WAIT_FOR_DATA_XMIT:
+ bdc->ep0_state = WAIT_FOR_STATUS_START;
+ break;
+ case WAIT_FOR_STATUS_XMIT:
+ bdc->ep0_state = WAIT_FOR_SETUP;
+ if (bdc->test_mode) {
+ int ret;
+
+ dev_dbg(bdc->dev, "test_mode:%d\n", bdc->test_mode);
+ ret = bdc_set_test_mode(bdc);
+ if (ret < 0) {
+ dev_err(bdc->dev, "Err in setting Test mode\n");
+ return;
+ }
+ bdc->test_mode = 0;
+ }
+ break;
+ case STATUS_PENDING:
+ bdc_xsf_ep0_status_start(bdc, sreport);
+ break;
+
+ default:
+ dev_err(bdc->dev,
+ "Unknown ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ }
+}
+
+/* xfr completion status report handler */
+void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct bdc_ep *ep;
+ u32 sr_status;
+ u8 ep_num;
+
+ ep_num = (le32_to_cpu(sreport->offset[3])>>4) & 0x1f;
+ ep = bdc->bdc_ep_array[ep_num];
+ if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
+ dev_err(bdc->dev, "xsf for ep not enabled\n");
+ return;
+ }
+ /*
+ * check if this transfer is after link went from U3->U0 due
+ * to remote wakeup
+ */
+ if (bdc->devstatus & FUNC_WAKE_ISSUED) {
+ bdc->devstatus &= ~(FUNC_WAKE_ISSUED);
+ dev_dbg(bdc->dev, "%s clearing FUNC_WAKE_ISSUED flag\n",
+ __func__);
+ }
+ sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
+ dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
+ __func__, sr_status, ep->name);
+
+ switch (sr_status) {
+ case XSF_SUCC:
+ case XSF_SHORT:
+ handle_xsr_succ_status(bdc, ep, sreport);
+ if (ep_num == 1)
+ ep0_xsf_complete(bdc, sreport);
+ break;
+
+ case XSF_SETUP_RECV:
+ case XSF_DATA_START:
+ case XSF_STATUS_START:
+ if (ep_num != 1) {
+ dev_err(bdc->dev,
+ "ep0 related packets on non ep0 endpoint");
+ return;
+ }
+ bdc->sr_xsf_ep0[sr_status - XSF_SETUP_RECV](bdc, sreport);
+ break;
+
+ case XSF_BABB:
+ if (ep_num == 1) {
+ dev_dbg(bdc->dev, "Babble on ep0 zlp_need:%d\n",
+ bdc->zlp_needed);
+ /*
+ * If the last completed transfer had wLength >Data Len,
+ * and Len is multiple of MaxPacket,then queue ZLP
+ */
+ if (bdc->zlp_needed) {
+ /* queue 0 length bd */
+ ep0_queue_zlp(bdc);
+ return;
+ }
+ }
+ dev_warn(bdc->dev, "Babble on ep not handled\n");
+ break;
+ default:
+ dev_warn(bdc->dev, "sr status not handled:%x\n", sr_status);
+ break;
+ }
+}
+
+static int bdc_gadget_ep_queue(struct usb_ep *_ep,
+ struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct bdc_req *req;
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !_ep->desc)
+ return -ESHUTDOWN;
+
+ if (!_req || !_req->complete || !_req->buf)
+ return -EINVAL;
+
+ ep = to_bdc_ep(_ep);
+ req = to_bdc_req(_req);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
+ dev_dbg(bdc->dev, "queuing request %p to %s length %d zero:%d\n",
+ _req, ep->name, _req->length, _req->zero);
+
+ if (!ep->usb_ep.desc) {
+ dev_warn(bdc->dev,
+ "trying to queue req %p to disabled %s\n",
+ _req, ep->name);
+ return -ESHUTDOWN;
+ }
+
+ if (_req->length > MAX_XFR_LEN) {
+ dev_warn(bdc->dev,
+ "req length > supported MAX:%d requested:%d\n",
+ MAX_XFR_LEN, _req->length);
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (ep == bdc->bdc_ep_array[1])
+ ret = ep0_queue(ep, req);
+ else
+ ret = ep_queue(ep, req);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_dequeue(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct bdc_req *req;
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ ep = to_bdc_ep(_ep);
+ req = to_bdc_req(_req);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
+ bdc_dbg_bd_list(bdc, ep);
+ spin_lock_irqsave(&bdc->lock, flags);
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->usb_req == _req)
+ break;
+ }
+ if (&req->usb_req != _req) {
+ spin_unlock_irqrestore(&bdc->lock, flags);
+ dev_err(bdc->dev, "usb_req !=req n");
+ return -EINVAL;
+ }
+ ret = ep_dequeue(ep, req);
+ if (ret) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ bdc_req_complete(ep, req, -ECONNRESET);
+
+err:
+ bdc_dbg_bd_list(bdc, ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (usb_endpoint_xfer_isoc(ep->usb_ep.desc))
+ ret = -EINVAL;
+ else if (!list_empty(&ep->queue))
+ ret = -EAGAIN;
+ else
+ ret = ep_set_halt(ep, value);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *bdc_gadget_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct bdc_req *req;
+ struct bdc_ep *ep;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ ep = to_bdc_ep(_ep);
+ req->ep = ep;
+ req->epnum = ep->ep_num;
+ req->usb_req.dma = DMA_ADDR_INVALID;
+ dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
+
+ return &req->usb_req;
+}
+
+static void bdc_gadget_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct bdc_req *req;
+
+ req = to_bdc_req(_req);
+ kfree(req);
+}
+
+/* endpoint operations */
+
+/* configure endpoint and also allocate resources */
+static int bdc_gadget_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_debug("bdc_gadget_ep_enable invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (!desc->wMaxPacketSize) {
+ pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
+ return -EINVAL;
+ }
+
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+
+ /* Sanity check, upper layer will not send enable for ep0 */
+ if (ep == bdc->bdc_ep_array[1])
+ return -EINVAL;
+
+ if (!bdc->gadget_driver
+ || bdc->gadget.speed == USB_SPEED_UNKNOWN) {
+ return -ESHUTDOWN;
+ }
+
+ dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name);
+ spin_lock_irqsave(&bdc->lock, flags);
+ ep->desc = desc;
+ ep->comp_desc = _ep->comp_desc;
+ ret = bdc_ep_enable(ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_disable(struct usb_ep *_ep)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep) {
+ pr_debug("bdc: invalid parameters\n");
+ return -EINVAL;
+ }
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+
+ /* Upper layer will not call this for ep0, but do a sanity check */
+ if (ep == bdc->bdc_ep_array[1]) {
+ dev_warn(bdc->dev, "%s called for ep0\n", __func__);
+ return -EINVAL;
+ }
+ dev_dbg(bdc->dev,
+ "%s() ep:%s ep->flags:%08x\n",
+ __func__, ep->name, ep->flags);
+
+ if (!(ep->flags & BDC_EP_ENABLED)) {
+ dev_warn(bdc->dev, "%s is already disabled\n", ep->name);
+ return 0;
+ }
+ spin_lock_irqsave(&bdc->lock, flags);
+ ret = bdc_ep_disable(ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_ep_ops bdc_gadget_ep_ops = {
+ .enable = bdc_gadget_ep_enable,
+ .disable = bdc_gadget_ep_disable,
+ .alloc_request = bdc_gadget_alloc_request,
+ .free_request = bdc_gadget_free_request,
+ .queue = bdc_gadget_ep_queue,
+ .dequeue = bdc_gadget_ep_dequeue,
+ .set_halt = bdc_gadget_ep_set_halt
+};
+
+/* dir = 1 is IN */
+static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
+{
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s epnum=%d dir=%d\n", __func__, epnum, dir);
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->bdc = bdc;
+ ep->dir = dir;
+
+ /* ep->ep_num is the index inside bdc_ep */
+ if (epnum == 1) {
+ ep->ep_num = 1;
+ bdc->bdc_ep_array[ep->ep_num] = ep;
+ snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
+ ep->comp_desc = NULL;
+ bdc->gadget.ep0 = &ep->usb_ep;
+ } else {
+ if (dir)
+ ep->ep_num = epnum * 2 - 1;
+ else
+ ep->ep_num = epnum * 2 - 2;
+
+ bdc->bdc_ep_array[ep->ep_num] = ep;
+ snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1,
+ dir & 1 ? "in" : "out");
+
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.max_streams = 0;
+ list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
+ }
+ ep->usb_ep.ops = &bdc_gadget_ep_ops;
+ ep->usb_ep.name = ep->name;
+ ep->flags = 0;
+ ep->ignore_next_sr = false;
+ dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
+ ep, ep->usb_ep.name, epnum, ep->ep_num);
+
+ INIT_LIST_HEAD(&ep->queue);
+
+ return 0;
+}
+
+/* Init all ep */
+int bdc_init_ep(struct bdc *bdc)
+{
+ u8 epnum;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ INIT_LIST_HEAD(&bdc->gadget.ep_list);
+ /* init ep0 */
+ ret = init_ep(bdc, 1, 0);
+ if (ret) {
+ dev_err(bdc->dev, "init ep ep0 fail %d\n", ret);
+ return ret;
+ }
+
+ for (epnum = 2; epnum <= bdc->num_eps / 2; epnum++) {
+ /* OUT */
+ ret = init_ep(bdc, epnum, 0);
+ if (ret) {
+ dev_err(bdc->dev,
+ "init ep failed for:%d error: %d\n",
+ epnum, ret);
+ return ret;
+ }
+
+ /* IN */
+ ret = init_ep(bdc, epnum, 1);
+ if (ret) {
+ dev_err(bdc->dev,
+ "init ep failed for:%d error: %d\n",
+ epnum, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.h b/drivers/usb/gadget/udc/bdc/bdc_ep.h
new file mode 100644
index 000000000000..8a6b36cbf2ea
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.h
@@ -0,0 +1,22 @@
+/*
+ * bdc_ep.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __LINUX_BDC_EP_H__
+#define __LINUX_BDC_EP_H__
+
+int bdc_init_ep(struct bdc *);
+int bdc_ep_disable(struct bdc_ep *);
+int bdc_ep_enable(struct bdc_ep *);
+void bdc_free_ep(struct bdc *);
+
+#endif /* __LINUX_BDC_EP_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
new file mode 100644
index 000000000000..02968842b359
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -0,0 +1,132 @@
+/*
+ * bdc_pci.c - BRCM BDC USB3.0 device controller PCI interface file.
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * Based on drivers under drivers/usb/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/platform_device.h>
+
+#include "bdc.h"
+
+#define BDC_PCI_PID 0x1570
+
+struct bdc_pci {
+ struct device *dev;
+ struct platform_device *bdc;
+};
+
+static int bdc_setup_msi(struct pci_dev *pci)
+{
+ int ret;
+
+ ret = pci_enable_msi(pci);
+ if (ret) {
+ pr_err("failed to allocate MSI entry\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct resource res[2];
+ struct platform_device *bdc;
+ struct bdc_pci *glue;
+ int ret = -ENOMEM;
+
+ glue = devm_kzalloc(&pci->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ glue->dev = &pci->dev;
+ ret = pci_enable_device(pci);
+ if (ret) {
+ dev_err(&pci->dev, "failed to enable pci device\n");
+ return -ENODEV;
+ }
+ pci_set_master(pci);
+
+ bdc = platform_device_alloc(BRCM_BDC_NAME, PLATFORM_DEVID_AUTO);
+ if (!bdc)
+ return -ENOMEM;
+
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+ bdc_setup_msi(pci);
+
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+ res[0].name = BRCM_BDC_NAME;
+ res[0].flags = IORESOURCE_MEM;
+
+ res[1].start = pci->irq;
+ res[1].name = BRCM_BDC_NAME;
+ res[1].flags = IORESOURCE_IRQ;
+
+ ret = platform_device_add_resources(bdc, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(&pci->dev,
+ "couldn't add resources to bdc device\n");
+ return ret;
+ }
+
+ pci_set_drvdata(pci, glue);
+
+ dma_set_coherent_mask(&bdc->dev, pci->dev.coherent_dma_mask);
+
+ bdc->dev.dma_mask = pci->dev.dma_mask;
+ bdc->dev.dma_parms = pci->dev.dma_parms;
+ bdc->dev.parent = &pci->dev;
+ glue->bdc = bdc;
+
+ ret = platform_device_add(bdc);
+ if (ret) {
+ dev_err(&pci->dev, "failed to register bdc device\n");
+ platform_device_put(bdc);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bdc_pci_remove(struct pci_dev *pci)
+{
+ struct bdc_pci *glue = pci_get_drvdata(pci);
+
+ platform_device_unregister(glue->bdc);
+ pci_disable_msi(pci);
+}
+
+static struct pci_device_id bdc_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BDC_PCI_PID), },
+ {} /* Terminating Entry */
+};
+
+MODULE_DEVICE_TABLE(pci, bdc_pci_id_table);
+
+static struct pci_driver bdc_pci_driver = {
+ .name = "bdc-pci",
+ .id_table = bdc_pci_id_table,
+ .probe = bdc_pci_probe,
+ .remove = bdc_pci_remove,
+};
+
+MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("BRCM BDC USB3 PCI Glue layer");
+module_pci_driver(bdc_pci_driver);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
new file mode 100644
index 000000000000..3700ce70b0be
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -0,0 +1,587 @@
+/*
+ * bdc_udc.c - BRCM BDC USB3.0 device controller gagdet ops
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * Based on drivers under drivers/usb/gadget/udc/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/unaligned.h>
+#include <linux/platform_device.h>
+
+#include "bdc.h"
+#include "bdc_ep.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+static const struct usb_gadget_ops bdc_gadget_ops;
+
+static const char * const conn_speed_str[] = {
+ "Not connected",
+ "Full Speed",
+ "Low Speed",
+ "High Speed",
+ "Super Speed",
+};
+
+/* EP0 initial descripror */
+static struct usb_endpoint_descriptor bdc_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .bEndpointAddress = 0,
+ .wMaxPacketSize = cpu_to_le16(EP0_MAX_PKT_SIZE),
+};
+
+/* Advance the srr dqp maintained by SW */
+static void srr_dqp_index_advc(struct bdc *bdc, u32 srr_num)
+{
+ struct srr *srr;
+
+ srr = &bdc->srr;
+ dev_dbg_ratelimited(bdc->dev, "srr->dqp_index:%d\n", srr->dqp_index);
+ srr->dqp_index++;
+ /* rollback to 0 if we are past the last */
+ if (srr->dqp_index == NUM_SR_ENTRIES)
+ srr->dqp_index = 0;
+}
+
+/* connect sr */
+static void bdc_uspc_connected(struct bdc *bdc)
+{
+ u32 speed, temp;
+ u32 usppms;
+ int ret;
+
+ temp = bdc_readl(bdc->regs, BDC_USPC);
+ speed = BDC_PSP(temp);
+ dev_dbg(bdc->dev, "%s speed=%x\n", __func__, speed);
+ switch (speed) {
+ case BDC_SPEED_SS:
+ bdc_gadget_ep0_desc.wMaxPacketSize =
+ cpu_to_le16(EP0_MAX_PKT_SIZE);
+ bdc->gadget.ep0->maxpacket = EP0_MAX_PKT_SIZE;
+ bdc->gadget.speed = USB_SPEED_SUPER;
+ /* Enable U1T in SS mode */
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ usppms &= ~BDC_U1T(0xff);
+ usppms |= BDC_U1T(U1_TIMEOUT);
+ usppms |= BDC_PORT_W1S;
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case BDC_SPEED_HS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ bdc->gadget.ep0->maxpacket = 64;
+ bdc->gadget.speed = USB_SPEED_HIGH;
+ break;
+
+ case BDC_SPEED_FS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ bdc->gadget.ep0->maxpacket = 64;
+ bdc->gadget.speed = USB_SPEED_FULL;
+ break;
+
+ case BDC_SPEED_LS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
+ bdc->gadget.ep0->maxpacket = 8;
+ bdc->gadget.speed = USB_SPEED_LOW;
+ break;
+ default:
+ dev_err(bdc->dev, "UNDEFINED SPEED\n");
+ return;
+ }
+ dev_dbg(bdc->dev, "connected at %s\n", conn_speed_str[speed]);
+ /* Now we know the speed, configure ep0 */
+ bdc->bdc_ep_array[1]->desc = &bdc_gadget_ep0_desc;
+ ret = bdc_config_ep(bdc, bdc->bdc_ep_array[1]);
+ if (ret)
+ dev_err(bdc->dev, "EP0 config failed\n");
+ bdc->bdc_ep_array[1]->usb_ep.desc = &bdc_gadget_ep0_desc;
+ bdc->bdc_ep_array[1]->flags |= BDC_EP_ENABLED;
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
+}
+
+/* device got disconnected */
+static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit)
+{
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ /*
+ * Only stop ep0 from here, rest of the endpoints will be disabled
+ * from gadget_disconnect
+ */
+ ep = bdc->bdc_ep_array[1];
+ if (ep && (ep->flags & BDC_EP_ENABLED))
+ /* if enabled then stop and remove requests */
+ bdc_ep_disable(ep);
+
+ if (bdc->gadget_driver && bdc->gadget_driver->disconnect) {
+ spin_unlock(&bdc->lock);
+ bdc->gadget_driver->disconnect(&bdc->gadget);
+ spin_lock(&bdc->lock);
+ }
+ /* Set Unknown speed */
+ bdc->gadget.speed = USB_SPEED_UNKNOWN;
+ bdc->devstatus &= DEVSTATUS_CLEAR;
+ bdc->delayed_status = false;
+ bdc->reinit = reinit;
+ bdc->test_mode = false;
+}
+
+/* TNotify wkaeup timer */
+static void bdc_func_wake_timer(struct work_struct *work)
+{
+ struct bdc *bdc = container_of(work, struct bdc, func_wake_notify.work);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ /*
+ * Check if host has started transferring on endpoints
+ * FUNC_WAKE_ISSUED is cleared when transfer has started after resume
+ */
+ if (bdc->devstatus & FUNC_WAKE_ISSUED) {
+ dev_dbg(bdc->dev, "FUNC_WAKE_ISSUED FLAG IS STILL SET\n");
+ /* flag is still set, so again send func wake */
+ bdc_function_wake_fh(bdc, 0);
+ schedule_delayed_work(&bdc->func_wake_notify,
+ msecs_to_jiffies(BDC_TNOTIFY));
+ }
+ spin_unlock_irqrestore(&bdc->lock, flags);
+}
+
+/* handler for Link state change condition */
+static void handle_link_state_change(struct bdc *bdc, u32 uspc)
+{
+ u32 link_state;
+
+ dev_dbg(bdc->dev, "Link state change");
+ link_state = BDC_PST(uspc);
+ switch (link_state) {
+ case BDC_LINK_STATE_U3:
+ if ((bdc->gadget.speed != USB_SPEED_UNKNOWN) &&
+ bdc->gadget_driver->suspend) {
+ dev_dbg(bdc->dev, "Entered Suspend mode\n");
+ spin_unlock(&bdc->lock);
+ bdc->devstatus |= DEVICE_SUSPENDED;
+ bdc->gadget_driver->suspend(&bdc->gadget);
+ spin_lock(&bdc->lock);
+ }
+ break;
+ case BDC_LINK_STATE_U0:
+ if (bdc->devstatus & REMOTE_WAKEUP_ISSUED) {
+ bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
+ if (bdc->gadget.speed == USB_SPEED_SUPER) {
+ bdc_function_wake_fh(bdc, 0);
+ bdc->devstatus |= FUNC_WAKE_ISSUED;
+ /*
+ * Start a Notification timer and check if the
+ * Host transferred anything on any of the EPs,
+ * if not then send function wake again every
+ * TNotification secs until host initiates
+ * transfer to BDC, USB3 spec Table 8.13
+ */
+ schedule_delayed_work(
+ &bdc->func_wake_notify,
+ msecs_to_jiffies(BDC_TNOTIFY));
+ dev_dbg(bdc->dev, "sched func_wake_notify\n");
+ }
+ }
+ break;
+
+ case BDC_LINK_STATE_RESUME:
+ dev_dbg(bdc->dev, "Resumed from Suspend\n");
+ if (bdc->devstatus & DEVICE_SUSPENDED) {
+ bdc->gadget_driver->resume(&bdc->gadget);
+ bdc->devstatus &= ~DEVICE_SUSPENDED;
+ }
+ break;
+ default:
+ dev_dbg(bdc->dev, "link state:%d\n", link_state);
+ }
+}
+
+/* something changes on upstream port, handle it here */
+void bdc_sr_uspc(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ u32 clear_flags = 0;
+ u32 uspc;
+ bool connected = false;
+ bool disconn = false;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ dev_dbg(bdc->dev, "%s uspc=0x%08x\n", __func__, uspc);
+
+ /* Port connect changed */
+ if (uspc & BDC_PCC) {
+ /* Vbus not present, and not connected to Downstream port */
+ if ((uspc & BDC_VBC) && !(uspc & BDC_VBS) && !(uspc & BDC_PCS))
+ disconn = true;
+ else if ((uspc & BDC_PCS) && !BDC_PST(uspc))
+ connected = true;
+ }
+
+ /* Change in VBus and VBus is present */
+ if ((uspc & BDC_VBC) && (uspc & BDC_VBS)) {
+ if (bdc->pullup) {
+ dev_dbg(bdc->dev, "Do a softconnect\n");
+ /* Attached state, do a softconnect */
+ bdc_softconn(bdc);
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_POWERED);
+ }
+ clear_flags = BDC_VBC;
+ } else if ((uspc & BDC_PRS) || (uspc & BDC_PRC) || disconn) {
+ /* Hot reset, warm reset, 2.0 bus reset or disconn */
+ dev_dbg(bdc->dev, "Port reset or disconn\n");
+ bdc_uspc_disconnected(bdc, disconn);
+ clear_flags = BDC_PCC|BDC_PCS|BDC_PRS|BDC_PRC;
+ } else if ((uspc & BDC_PSC) && (uspc & BDC_PCS)) {
+ /* Change in Link state */
+ handle_link_state_change(bdc, uspc);
+ clear_flags = BDC_PSC|BDC_PCS;
+ }
+
+ /*
+ * In SS we might not have PRC bit set before connection, but in 2.0
+ * the PRC bit is set before connection, so moving this condition out
+ * of bus reset to handle both SS/2.0 speeds.
+ */
+ if (connected) {
+ /* This is the connect event for U0/L0 */
+ dev_dbg(bdc->dev, "Connected\n");
+ bdc_uspc_connected(bdc);
+ bdc->devstatus &= ~(DEVICE_SUSPENDED);
+ }
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc &= (~BDC_USPSC_RW);
+ dev_dbg(bdc->dev, "uspc=%x\n", uspc);
+ bdc_writel(bdc->regs, BDC_USPC, clear_flags);
+}
+
+/* Main interrupt handler for bdc */
+static irqreturn_t bdc_udc_interrupt(int irq, void *_bdc)
+{
+ u32 eqp_index, dqp_index, sr_type, srr_int;
+ struct bdc_sr *sreport;
+ struct bdc *bdc = _bdc;
+ u32 status;
+ int ret;
+
+ spin_lock(&bdc->lock);
+ status = bdc_readl(bdc->regs, BDC_BDCSC);
+ if (!(status & BDC_GIP)) {
+ spin_unlock(&bdc->lock);
+ return IRQ_NONE;
+ }
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ /* Check if the SRR IP bit it set? */
+ if (!(srr_int & BDC_SRR_IP)) {
+ dev_warn(bdc->dev, "Global irq pending but SRR IP is 0\n");
+ spin_unlock(&bdc->lock);
+ return IRQ_NONE;
+ }
+ eqp_index = BDC_SRR_EPI(srr_int);
+ dqp_index = BDC_SRR_DPI(srr_int);
+ dev_dbg(bdc->dev,
+ "%s eqp_index=%d dqp_index=%d srr.dqp_index=%d\n\n",
+ __func__, eqp_index, dqp_index, bdc->srr.dqp_index);
+
+ /* check for ring empty condition */
+ if (eqp_index == dqp_index) {
+ dev_dbg(bdc->dev, "SRR empty?\n");
+ spin_unlock(&bdc->lock);
+ return IRQ_HANDLED;
+ }
+
+ while (bdc->srr.dqp_index != eqp_index) {
+ sreport = &bdc->srr.sr_bds[bdc->srr.dqp_index];
+ /* sreport is read before using it */
+ rmb();
+ sr_type = le32_to_cpu(sreport->offset[3]) & BD_TYPE_BITMASK;
+ dev_dbg_ratelimited(bdc->dev, "sr_type=%d\n", sr_type);
+ switch (sr_type) {
+ case SR_XSF:
+ bdc->sr_handler[0](bdc, sreport);
+ break;
+
+ case SR_USPC:
+ bdc->sr_handler[1](bdc, sreport);
+ break;
+ default:
+ dev_warn(bdc->dev, "SR:%d not handled\n", sr_type);
+ }
+ /* Advance the srr dqp index */
+ srr_dqp_index_advc(bdc, 0);
+ }
+ /* update the hw dequeue pointer */
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ srr_int &= ~BDC_SRR_DPI_MASK;
+ srr_int &= ~(BDC_SRR_RWS|BDC_SRR_RST|BDC_SRR_ISR);
+ srr_int |= ((bdc->srr.dqp_index) << 16);
+ srr_int |= BDC_SRR_IP;
+ bdc_writel(bdc->regs, BDC_SRRINT(0), srr_int);
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ if (bdc->reinit) {
+ ret = bdc_reinit(bdc);
+ if (ret)
+ dev_err(bdc->dev, "err in bdc reinit\n");
+ }
+
+ spin_unlock(&bdc->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* Gadget ops */
+static int bdc_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (bdc->gadget_driver) {
+ dev_err(bdc->dev, "%s is already bound to %s\n",
+ bdc->gadget.name,
+ bdc->gadget_driver->driver.name);
+ ret = -EBUSY;
+ goto err;
+ }
+ /*
+ * Run the controller from here and when BDC is connected to
+ * Host then driver will receive a USPC SR with VBUS present
+ * and then driver will do a softconnect.
+ */
+ ret = bdc_run(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "%s bdc run fail\n", __func__);
+ goto err;
+ }
+ bdc->gadget_driver = driver;
+ bdc->gadget.dev.driver = &driver->driver;
+err:
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_udc_stop(struct usb_gadget *gadget)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ bdc_stop(bdc);
+ bdc->gadget_driver = NULL;
+ bdc->gadget.dev.driver = NULL;
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ u32 uspc;
+
+ dev_dbg(bdc->dev, "%s() is_on:%d\n", __func__, is_on);
+ if (!gadget)
+ return -EINVAL;
+
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (!is_on) {
+ bdc_softdisconn(bdc);
+ bdc->pullup = false;
+ } else {
+ /*
+ * For a self powered device, we need to wait till we receive
+ * a VBUS change and Vbus present event, then if pullup flag
+ * is set, then only we present the Termintation.
+ */
+ bdc->pullup = true;
+ /*
+ * Check if BDC is already connected to Host i.e Vbus=1,
+ * if yes, then present TERM now, this is typical for bus
+ * powered devices.
+ */
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ if (uspc & BDC_VBS)
+ bdc_softconn(bdc);
+ }
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_set_selfpowered(struct usb_gadget *gadget,
+ int is_self)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (!is_self)
+ bdc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
+ else
+ bdc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ u8 link_state;
+ u32 uspc;
+ int ret = 0;
+
+ dev_dbg(bdc->dev,
+ "%s() bdc->devstatus=%08x\n",
+ __func__, bdc->devstatus);
+
+ if (!(bdc->devstatus & REMOTE_WAKE_ENABLE))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&bdc->lock, flags);
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ link_state = BDC_PST(uspc);
+ dev_dbg(bdc->dev, "link_state =%d portsc=%x", link_state, uspc);
+ if (link_state != BDC_LINK_STATE_U3) {
+ dev_warn(bdc->dev,
+ "can't wakeup from link state %d\n",
+ link_state);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (bdc->gadget.speed == USB_SPEED_SUPER)
+ bdc->devstatus |= REMOTE_WAKEUP_ISSUED;
+
+ uspc &= ~BDC_PST_MASK;
+ uspc &= (~BDC_USPSC_RW);
+ uspc |= BDC_PST(BDC_LINK_STATE_U0);
+ uspc |= BDC_SWS;
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ link_state = BDC_PST(uspc);
+ dev_dbg(bdc->dev, "link_state =%d portsc=%x", link_state, uspc);
+out:
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_gadget_ops bdc_gadget_ops = {
+ .wakeup = bdc_udc_wakeup,
+ .set_selfpowered = bdc_udc_set_selfpowered,
+ .pullup = bdc_udc_pullup,
+ .udc_start = bdc_udc_start,
+ .udc_stop = bdc_udc_stop,
+};
+
+/* Init the gadget interface and register the udc */
+int bdc_udc_init(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ bdc->gadget.ops = &bdc_gadget_ops;
+ bdc->gadget.max_speed = USB_SPEED_SUPER;
+ bdc->gadget.speed = USB_SPEED_UNKNOWN;
+ bdc->gadget.dev.parent = bdc->dev;
+
+ bdc->gadget.sg_supported = false;
+
+
+ bdc->gadget.name = BRCM_BDC_NAME;
+ ret = devm_request_irq(bdc->dev, bdc->irq, bdc_udc_interrupt,
+ IRQF_SHARED , BRCM_BDC_NAME, bdc);
+ if (ret) {
+ dev_err(bdc->dev,
+ "failed to request irq #%d %d\n",
+ bdc->irq, ret);
+ return ret;
+ }
+
+ ret = bdc_init_ep(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "bdc init ep fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = usb_add_gadget_udc(bdc->dev, &bdc->gadget);
+ if (ret) {
+ dev_err(bdc->dev, "failed to register udc\n");
+ goto err0;
+ }
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
+ bdc->bdc_ep_array[1]->desc = &bdc_gadget_ep0_desc;
+ /*
+ * Allocate bd list for ep0 only, ep0 will be enabled on connect
+ * status report when the speed is known
+ */
+ ret = bdc_ep_enable(bdc->bdc_ep_array[1]);
+ if (ret) {
+ dev_err(bdc->dev, "fail to enable %s\n",
+ bdc->bdc_ep_array[1]->name);
+ goto err1;
+ }
+ INIT_DELAYED_WORK(&bdc->func_wake_notify, bdc_func_wake_timer);
+ /* Enable Interrupts */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp |= BDC_GIE;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ return 0;
+err1:
+ usb_del_gadget_udc(&bdc->gadget);
+err0:
+ bdc_free_ep(bdc);
+
+ return ret;
+}
+
+void bdc_udc_exit(struct bdc *bdc)
+{
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ bdc_ep_disable(bdc->bdc_ep_array[1]);
+ usb_del_gadget_udc(&bdc->gadget);
+ bdc_free_ep(bdc);
+}
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 81dc5959e36b..9c598801404c 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -367,19 +367,22 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
dum_hcd->active)
dum_hcd->resuming = 0;
- /* if !connected or reset */
+ /* Currently !connected or in reset */
if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
- /*
- * We're connected and not reset (reset occurred now),
- * and driver attached - disconnect!
- */
- if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) != 0 &&
- (dum_hcd->old_status & USB_PORT_STAT_RESET) == 0 &&
- dum->driver) {
+ unsigned disconnect = USB_PORT_STAT_CONNECTION &
+ dum_hcd->old_status & (~dum_hcd->port_status);
+ unsigned reset = USB_PORT_STAT_RESET &
+ (~dum_hcd->old_status) & dum_hcd->port_status;
+
+ /* Report reset and disconnect events to the driver */
+ if (dum->driver && (disconnect || reset)) {
stop_activity(dum);
spin_unlock(&dum->lock);
- dum->driver->disconnect(&dum->gadget);
+ if (reset)
+ usb_gadget_udc_reset(&dum->gadget, dum->driver);
+ else
+ dum->driver->disconnect(&dum->gadget);
spin_lock(&dum->lock);
}
} else if (dum_hcd->active != dum_hcd->old_active) {
@@ -851,8 +854,7 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
static int dummy_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int dummy_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
+static int dummy_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops dummy_ops = {
.get_frame = dummy_g_get_frame,
@@ -908,23 +910,16 @@ static int dummy_udc_start(struct usb_gadget *g,
*/
dum->devstatus = 0;
-
dum->driver = driver;
- dev_dbg(udc_dev(dum), "binding gadget driver '%s'\n",
- driver->driver.name);
+
return 0;
}
-static int dummy_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int dummy_udc_stop(struct usb_gadget *g)
{
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
- if (driver)
- dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
- driver->driver.name);
-
dum->driver = NULL;
return 0;
@@ -1042,7 +1037,6 @@ static struct platform_driver dummy_udc_driver = {
.resume = dummy_udc_resume,
.driver = {
.name = (char *) gadget_name,
- .owner = THIS_MODULE,
},
};
@@ -2370,7 +2364,6 @@ static void dummy_stop(struct usb_hcd *hcd)
dum = hcd_to_dummy_hcd(hcd)->dum;
device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
- usb_gadget_unregister_driver(dum->driver);
dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
}
@@ -2612,7 +2605,6 @@ static struct platform_driver dummy_hcd_driver = {
.resume = dummy_hcd_resume,
.driver = {
.name = (char *) driver_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 1d315921bf34..e547ea7f56b1 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1053,8 +1053,7 @@ static void fotg210_init(struct fotg210_udc *fotg210)
iowrite32(value, fotg210->reg + FOTG210_DMISGR0);
}
-static int fotg210_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int fotg210_udc_stop(struct usb_gadget *g)
{
struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
unsigned long flags;
@@ -1203,7 +1202,6 @@ err_alloc:
static struct platform_driver fotg210_driver = {
.driver = {
.name = (char *)udc_name,
- .owner = THIS_MODULE,
},
.probe = fotg210_udc_probe,
.remove = fotg210_udc_remove,
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index dd18ea38e391..795c99c77697 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -1887,8 +1887,7 @@ static int qe_get_frame(struct usb_gadget *gadget)
static int fsl_qe_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
-static int fsl_qe_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver);
+static int fsl_qe_stop(struct usb_gadget *gadget);
/* defined in usb_gadget.h */
static const struct usb_gadget_ops qe_gadget_ops = {
@@ -1918,7 +1917,7 @@ static int reset_queues(struct qe_udc *udc)
/* report disconnect; the driver is already quiesced */
spin_unlock(&udc->lock);
- udc->driver->disconnect(&udc->gadget);
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
spin_lock(&udc->lock);
return 0;
@@ -2305,13 +2304,10 @@ static int fsl_qe_start(struct usb_gadget *gadget,
udc->ep0_dir = USB_DIR_OUT;
spin_unlock_irqrestore(&udc->lock, flags);
- dev_info(udc->dev, "%s bind to driver %s\n", udc->gadget.name,
- driver->driver.name);
return 0;
}
-static int fsl_qe_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int fsl_qe_stop(struct usb_gadget *gadget)
{
struct qe_udc *udc;
struct qe_ep *loop_ep;
@@ -2336,8 +2332,6 @@ static int fsl_qe_stop(struct usb_gadget *gadget,
udc->driver = NULL;
- dev_info(udc->dev, "unregistered gadget driver '%s'\r\n",
- driver->driver.name);
return 0;
}
@@ -2538,7 +2532,6 @@ static int qe_udc_probe(struct platform_device *ofdev)
/* create a buf for ZLP send, need to remain zeroed */
udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
if (udc->nullbuf == NULL) {
- dev_err(udc->dev, "cannot alloc nullbuf\n");
ret = -ENOMEM;
goto err3;
}
@@ -2709,7 +2702,6 @@ MODULE_DEVICE_TABLE(of, qe_udc_match);
static struct platform_driver udc_driver = {
.driver = {
.name = driver_name,
- .owner = THIS_MODULE,
.of_match_table = qe_udc_match,
},
.probe = qe_udc_probe,
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index c3620791a315..2df807403f22 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1236,9 +1236,8 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
static int fsl_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int fsl_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
-/* defined in gadget.h */
+static int fsl_udc_stop(struct usb_gadget *g);
+
static const struct usb_gadget_ops fsl_gadget_ops = {
.get_frame = fsl_get_frame,
.wakeup = fsl_wakeup,
@@ -1772,7 +1771,7 @@ static void bus_resume(struct fsl_udc *udc)
}
/* Clear up all ep queues */
-static int reset_queues(struct fsl_udc *udc)
+static int reset_queues(struct fsl_udc *udc, bool bus_reset)
{
u8 pipe;
@@ -1781,7 +1780,10 @@ static int reset_queues(struct fsl_udc *udc)
/* report disconnect; the driver is already quiesced */
spin_unlock(&udc->lock);
- udc->driver->disconnect(&udc->gadget);
+ if (bus_reset)
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+ else
+ udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
return 0;
@@ -1835,7 +1837,7 @@ static void reset_irq(struct fsl_udc *udc)
udc->bus_reset = 1;
/* Reset all the queues, include XD, dTD, EP queue
* head and TR Queue */
- reset_queues(udc);
+ reset_queues(udc, true);
udc->usb_state = USB_STATE_DEFAULT;
} else {
VDBG("Controller reset");
@@ -1844,7 +1846,7 @@ static void reset_irq(struct fsl_udc *udc)
dr_controller_setup(udc);
/* Reset all internal used Queues */
- reset_queues(udc);
+ reset_queues(udc, false);
ep0_setup(udc);
@@ -1975,8 +1977,7 @@ static int fsl_udc_start(struct usb_gadget *g,
}
/* Disconnect from gadget driver */
-static int fsl_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int fsl_udc_stop(struct usb_gadget *g)
{
struct fsl_ep *loop_ep;
unsigned long flags;
@@ -2669,7 +2670,6 @@ static struct platform_driver udc_driver = {
.resume = fsl_udc_resume,
.driver = {
.name = driver_name,
- .owner = THIS_MODULE,
/* udc suspend/resume called from OTG driver */
.suspend = fsl_udc_otg_suspend,
.resume = fsl_udc_otg_resume,
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 8286df72add4..fb4df159d32d 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1320,8 +1320,7 @@ static int fusb300_udc_start(struct usb_gadget *g,
return 0;
}
-static int fusb300_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int fusb300_udc_stop(struct usb_gadget *g)
{
struct fusb300 *fusb300 = to_fusb300(g);
@@ -1496,7 +1495,6 @@ static struct platform_driver fusb300_driver = {
.remove = __exit_p(fusb300_remove),
.driver = {
.name = (char *) udc_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index bf9c5ef8b56b..5b9176e7202a 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -992,8 +992,7 @@ static int goku_get_frame(struct usb_gadget *_gadget)
static int goku_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int goku_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
+static int goku_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
@@ -1364,8 +1363,7 @@ static void stop_activity(struct goku_udc *dev)
udc_enable(dev);
}
-static int goku_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int goku_udc_stop(struct usb_gadget *g)
{
struct goku_udc *dev = to_goku_udc(g);
unsigned long flags;
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 1b3048a6a2a3..c8868870e217 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1932,14 +1932,10 @@ static int gr_udc_start(struct usb_gadget *gadget,
spin_unlock(&dev->lock);
- dev_info(dev->dev, "Started with gadget driver '%s'\n",
- driver->driver.name);
-
return 0;
}
-static int gr_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int gr_udc_stop(struct usb_gadget *gadget)
{
struct gr_udc *dev = to_gr_udc(gadget);
unsigned long flags;
@@ -1951,8 +1947,6 @@ static int gr_udc_stop(struct usb_gadget *gadget,
spin_unlock_irqrestore(&dev->lock, flags);
- dev_info(dev->dev, "Stopped\n");
-
return 0;
}
@@ -2262,7 +2256,6 @@ MODULE_DEVICE_TABLE(of, gr_match);
static struct platform_driver gr_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = gr_match,
},
.probe = gr_probe,
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index feab0bac8fdc..34d9b7b831b3 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -582,8 +582,7 @@ static void create_debug_file(struct lpc32xx_udc *udc)
static void remove_debug_file(struct lpc32xx_udc *udc)
{
- if (udc->pde)
- debugfs_remove(udc->pde);
+ debugfs_remove(udc->pde);
}
#else
@@ -2559,7 +2558,7 @@ static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
}
static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *);
-static int lpc32xx_stop(struct usb_gadget *, struct usb_gadget_driver *);
+static int lpc32xx_stop(struct usb_gadget *);
static const struct usb_gadget_ops lpc32xx_udc_ops = {
.get_frame = lpc32xx_get_frame,
@@ -2961,15 +2960,11 @@ static int lpc32xx_start(struct usb_gadget *gadget,
return 0;
}
-static int lpc32xx_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int lpc32xx_stop(struct usb_gadget *gadget)
{
int i;
struct lpc32xx_udc *udc = to_udc(gadget);
- if (!driver || driver != udc->driver)
- return -EINVAL;
-
for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
disable_irq(udc->udp_irq[i]);
@@ -3410,7 +3405,6 @@ static struct platform_driver lpc32xx_udc_driver = {
.resume = lpc32xx_udc_resume,
.driver = {
.name = (char *) driver_name,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(lpc32xx_udc_of_match),
},
};
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 898565687a8c..8c7c83c93713 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1142,7 +1142,7 @@ static void irq_device_state(struct m66592 *m66592)
m66592_write(m66592, ~M66592_DVST, M66592_INTSTS0);
if (dvsq == M66592_DS_DFLT) { /* bus reset */
- m66592->driver->disconnect(&m66592->gadget);
+ usb_gadget_udc_reset(&m66592->gadget, m66592->driver);
m66592_update_usb_speed(m66592);
}
if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG)
@@ -1485,8 +1485,7 @@ static int m66592_udc_start(struct usb_gadget *g,
return 0;
}
-static int m66592_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int m66592_udc_stop(struct usb_gadget *g)
{
struct m66592 *m66592 = to_m66592(g);
@@ -1699,7 +1698,6 @@ static struct platform_driver m66592_driver = {
.remove = __exit_p(m66592_remove),
.driver = {
.name = (char *) udc_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 046a1f808b0d..ea35a248c898 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1266,8 +1266,7 @@ static int mv_u3d_start(struct usb_gadget *g,
return 0;
}
-static int mv_u3d_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int mv_u3d_stop(struct usb_gadget *g)
{
struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
@@ -1284,7 +1283,7 @@ static int mv_u3d_stop(struct usb_gadget *g,
mv_u3d_controller_stop(u3d);
/* stop all usb activities */
u3d->gadget.speed = USB_SPEED_UNKNOWN;
- mv_u3d_stop_activity(u3d, driver);
+ mv_u3d_stop_activity(u3d, NULL);
mv_u3d_disable(u3d);
if (pdata->phy_deinit)
@@ -2053,7 +2052,6 @@ static struct platform_driver mv_u3d_driver = {
.remove = mv_u3d_remove,
.shutdown = mv_u3d_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "mv-u3d",
.pm = &mv_u3d_pm_ops,
},
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 3c5db80ae325..253f3df8326a 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1223,7 +1223,7 @@ static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
}
static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
-static int mv_udc_stop(struct usb_gadget *, struct usb_gadget_driver *);
+static int mv_udc_stop(struct usb_gadget *);
/* device controller usb_gadget_ops structure */
static const struct usb_gadget_ops mv_ops = {
@@ -1307,6 +1307,23 @@ static void nuke(struct mv_ep *ep, int status)
}
}
+static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
+{
+ struct mv_ep *ep;
+
+ nuke(&udc->eps[0], -ESHUTDOWN);
+
+ list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+ nuke(ep, -ESHUTDOWN);
+ }
+
+ /* report reset; the driver is already quiesced */
+ if (driver) {
+ spin_unlock(&udc->lock);
+ usb_gadget_udc_reset(&udc->gadget, driver);
+ spin_lock(&udc->lock);
+ }
+}
/* stop all USB activities */
static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
{
@@ -1371,8 +1388,7 @@ static int mv_udc_start(struct usb_gadget *gadget,
return 0;
}
-static int mv_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int mv_udc_stop(struct usb_gadget *gadget)
{
struct mv_udc *udc;
unsigned long flags;
@@ -1386,7 +1402,7 @@ static int mv_udc_stop(struct usb_gadget *gadget,
/* stop all usb activities */
udc->gadget.speed = USB_SPEED_UNKNOWN;
- stop_activity(udc, driver);
+ stop_activity(udc, NULL);
mv_udc_disable(udc);
spin_unlock_irqrestore(&udc->lock, flags);
@@ -1882,7 +1898,7 @@ static void irq_process_reset(struct mv_udc *udc)
dev_info(&udc->dev->dev, "usb bus reset\n");
udc->usb_state = USB_STATE_DEFAULT;
/* reset all the queues, stop all USB activities */
- stop_activity(udc, udc->driver);
+ gadget_reset(udc, udc->driver);
} else {
dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
readl(&udc->op_regs->portsc));
@@ -2107,10 +2123,8 @@ static int mv_udc_probe(struct platform_device *pdev)
}
udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
- if (udc == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory for udc\n");
+ if (udc == NULL)
return -ENOMEM;
- }
udc->done = &release_done;
udc->pdata = dev_get_platdata(&pdev->dev);
@@ -2207,7 +2221,6 @@ static int mv_udc_probe(struct platform_device *pdev)
size = udc->max_eps * sizeof(struct mv_ep) *2;
udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (udc->eps == NULL) {
- dev_err(&pdev->dev, "allocate ep memory failed\n");
retval = -ENOMEM;
goto err_destroy_dma;
}
@@ -2216,7 +2229,6 @@ static int mv_udc_probe(struct platform_device *pdev)
udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
GFP_KERNEL);
if (!udc->status_req) {
- dev_err(&pdev->dev, "allocate status_req memory failed\n");
retval = -ENOMEM;
goto err_destroy_dma;
}
@@ -2403,7 +2415,6 @@ static struct platform_driver udc_driver = {
.remove = mv_udc_remove,
.shutdown = mv_udc_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "mv-udc",
#ifdef CONFIG_PM
.pm = &mv_udc_pm_ops,
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 84d7162a8022..d20de1fab08e 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1169,8 +1169,7 @@ net2272_pullup(struct usb_gadget *_gadget, int is_on)
static int net2272_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
-static int net2272_stop(struct usb_gadget *_gadget,
- struct usb_gadget_driver *driver);
+static int net2272_stop(struct usb_gadget *_gadget);
static const struct usb_gadget_ops net2272_ops = {
.get_frame = net2272_get_frame,
@@ -1471,8 +1470,6 @@ static int net2272_start(struct usb_gadget *_gadget,
*/
net2272_ep0_start(dev);
- dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
-
return 0;
}
@@ -1502,8 +1499,7 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
net2272_usb_reinit(dev);
}
-static int net2272_stop(struct usb_gadget *_gadget,
- struct usb_gadget_driver *driver)
+static int net2272_stop(struct usb_gadget *_gadget)
{
struct net2272 *dev;
unsigned long flags;
@@ -1511,12 +1507,11 @@ static int net2272_stop(struct usb_gadget *_gadget,
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
- stop_activity(dev, driver);
+ stop_activity(dev, NULL);
spin_unlock_irqrestore(&dev->lock, flags);
dev->driver = NULL;
- dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
@@ -1987,17 +1982,42 @@ net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
if (stat & tmp) {
+ bool reset = false;
+ bool disconnect = false;
+
+ /*
+ * Ignore disconnects and resets if the speed hasn't been set.
+ * VBUS can bounce and there's always an initial reset.
+ */
net2272_write(dev, IRQSTAT1, tmp);
- if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
- ((net2272_read(dev, USBCTL1) & mask) == 0))
- || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
- == 0))
- && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
- dev_dbg(dev->dev, "disconnect %s\n",
- dev->driver->driver.name);
- stop_activity(dev, dev->driver);
- net2272_ep0_start(dev);
- return;
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
+ if ((stat & (1 << VBUS_INTERRUPT)) &&
+ (net2272_read(dev, USBCTL1) &
+ (1 << VBUS_PIN)) == 0) {
+ disconnect = true;
+ dev_dbg(dev->dev, "disconnect %s\n",
+ dev->driver->driver.name);
+ } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
+ (net2272_read(dev, USBCTL1) & mask)
+ == 0) {
+ reset = true;
+ dev_dbg(dev->dev, "reset %s\n",
+ dev->driver->driver.name);
+ }
+
+ if (disconnect || reset) {
+ stop_activity(dev, dev->driver);
+ net2272_ep0_start(dev);
+ spin_unlock(&dev->lock);
+ if (reset)
+ usb_gadget_udc_reset
+ (&dev->gadget, dev->driver);
+ else
+ (dev->driver->disconnect)
+ (&dev->gadget);
+ spin_lock(&dev->lock);
+ return;
+ }
}
stat &= ~tmp;
@@ -2200,18 +2220,8 @@ static void
net2272_remove(struct net2272 *dev)
{
usb_del_gadget_udc(&dev->gadget);
-
- /* start with the driver above us */
- if (dev->driver) {
- /* should have been done already by driver model core */
- dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
- dev->driver->driver.name);
- usb_gadget_unregister_driver(dev->driver);
- }
-
free_irq(dev->irq, dev);
iounmap(dev->base_addr);
-
device_remove_file(dev->dev, &dev_attr_registers);
dev_info(dev->dev, "unbind\n");
@@ -2674,7 +2684,6 @@ static struct platform_driver net2272_plat_driver = {
.remove = net2272_plat_remove,
.driver = {
.name = driver_name,
- .owner = THIS_MODULE,
},
/* FIXME .suspend, .resume */
};
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 8d13337e2dde..d6411e0a8e03 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1118,10 +1118,10 @@ static void scan_dma_completions(struct net2280_ep *ep)
break;
} else if (!ep->is_in &&
(req->req.length % ep->ep.maxpacket) != 0) {
- tmp = readl(&ep->regs->ep_stat);
if (ep->dev->quirks & PLX_SUPERSPEED)
return dma_done(ep, req, tmp, 0);
+ tmp = readl(&ep->regs->ep_stat);
/* AVOID TROUBLE HERE by not issuing short reads from
* your gadget driver. That helps avoids errata 0121,
* 0122, and 0124; not all cases trigger the warning.
@@ -1548,8 +1548,7 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
static int net2280_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
-static int net2280_stop(struct usb_gadget *_gadget,
- struct usb_gadget_driver *driver);
+static int net2280_stop(struct usb_gadget *_gadget);
static const struct usb_gadget_ops net2280_ops = {
.get_frame = net2280_get_frame,
@@ -2397,11 +2396,6 @@ static int net2280_start(struct usb_gadget *_gadget,
ep0_start(dev);
- ep_dbg(dev, "%s ready, usbctl %08x stdrsp %08x\n",
- driver->driver.name,
- readl(&dev->usb->usbctl),
- readl(&dev->usb->stdrsp));
-
/* pci writes may still be posted */
return 0;
@@ -2437,8 +2431,7 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
usb_reinit(dev);
}
-static int net2280_stop(struct usb_gadget *_gadget,
- struct usb_gadget_driver *driver)
+static int net2280_stop(struct usb_gadget *_gadget)
{
struct net2280 *dev;
unsigned long flags;
@@ -2446,11 +2439,9 @@ static int net2280_stop(struct usb_gadget *_gadget,
dev = container_of(_gadget, struct net2280, gadget);
spin_lock_irqsave(&dev->lock, flags);
- stop_activity(dev, driver);
+ stop_activity(dev, NULL);
spin_unlock_irqrestore(&dev->lock, flags);
- dev->driver = NULL;
-
net2280_led_active(dev, 0);
/* Disable full-speed test mode */
@@ -2460,8 +2451,7 @@ static int net2280_stop(struct usb_gadget *_gadget,
device_remove_file(&dev->pdev->dev, &dev_attr_function);
device_remove_file(&dev->pdev->dev, &dev_attr_queues);
- ep_dbg(dev, "unregistered driver '%s'\n",
- driver ? driver->driver.name : "");
+ dev->driver = NULL;
return 0;
}
@@ -3318,17 +3308,42 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
* only indicates a change in the reset state).
*/
if (stat & tmp) {
+ bool reset = false;
+ bool disconnect = false;
+
+ /*
+ * Ignore disconnects and resets if the speed hasn't been set.
+ * VBUS can bounce and there's always an initial reset.
+ */
writel(tmp, &dev->regs->irqstat1);
- if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
- ((readl(&dev->usb->usbstat) & mask) == 0)) ||
- ((readl(&dev->usb->usbctl) &
- BIT(VBUS_PIN)) == 0)) &&
- (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
- ep_dbg(dev, "disconnect %s\n",
- dev->driver->driver.name);
- stop_activity(dev, dev->driver);
- ep0_start(dev);
- return;
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
+ if ((stat & BIT(VBUS_INTERRUPT)) &&
+ (readl(&dev->usb->usbctl) &
+ BIT(VBUS_PIN)) == 0) {
+ disconnect = true;
+ ep_dbg(dev, "disconnect %s\n",
+ dev->driver->driver.name);
+ } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
+ (readl(&dev->usb->usbstat) & mask)
+ == 0) {
+ reset = true;
+ ep_dbg(dev, "reset %s\n",
+ dev->driver->driver.name);
+ }
+
+ if (disconnect || reset) {
+ stop_activity(dev, dev->driver);
+ ep0_start(dev);
+ spin_unlock(&dev->lock);
+ if (reset)
+ usb_gadget_udc_reset
+ (&dev->gadget, dev->driver);
+ else
+ (dev->driver->disconnect)
+ (&dev->gadget);
+ spin_lock(&dev->lock);
+ return;
+ }
}
stat &= ~tmp;
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index dcdfea46003b..288c087220a8 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1311,8 +1311,7 @@ static int omap_pullup(struct usb_gadget *gadget, int is_on)
static int omap_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int omap_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
+static int omap_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops omap_gadget_ops = {
.get_frame = omap_get_frame,
@@ -2102,8 +2101,7 @@ done:
return status;
}
-static int omap_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int omap_udc_stop(struct usb_gadget *g)
{
unsigned long flags;
int status = -ENODEV;
@@ -3026,7 +3024,6 @@ static struct platform_driver udc_driver = {
.suspend = omap_udc_suspend,
.resume = omap_udc_resume,
.driver = {
- .owner = THIS_MODULE,
.name = (char *) driver_name,
},
};
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index ccbe3d4a2a50..1c7379ac2379 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1240,8 +1240,8 @@ static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
static int pch_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int pch_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
+static int pch_udc_stop(struct usb_gadget *g);
+
static const struct usb_gadget_ops pch_udc_ops = {
.get_frame = pch_udc_pcd_get_frame,
.wakeup = pch_udc_pcd_wakeup,
@@ -2592,9 +2592,9 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
/* Complete request queue */
empty_req_queue(ep);
}
- if (dev->driver && dev->driver->disconnect) {
+ if (dev->driver) {
spin_unlock(&dev->lock);
- dev->driver->disconnect(&dev->gadget);
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
spin_lock(&dev->lock);
}
}
@@ -3008,8 +3008,7 @@ static int pch_udc_start(struct usb_gadget *g,
return 0;
}
-static int pch_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int pch_udc_stop(struct usb_gadget *g)
{
struct pch_udc_dev *dev = to_pch_udc(g);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 42f7eeb8ff6f..8550b2d5db32 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -998,8 +998,7 @@ static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
static int pxa25x_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int pxa25x_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
+static int pxa25x_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops pxa25x_udc_ops = {
.get_frame = pxa25x_udc_get_frame,
@@ -1135,11 +1134,7 @@ static const struct file_operations debug_fops = {
dev->debugfs_udc = debugfs_create_file(dev->gadget.name, \
S_IRUGO, NULL, dev, &debug_fops); \
} while (0)
-#define remove_debug_files(dev) \
- do { \
- if (dev->debugfs_udc) \
- debugfs_remove(dev->debugfs_udc); \
- } while (0)
+#define remove_debug_files(dev) debugfs_remove(dev->debugfs_udc)
#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
@@ -1285,6 +1280,33 @@ bind_fail:
}
static void
+reset_gadget(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
+{
+ int i;
+
+ /* don't disconnect drivers more than once */
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ /* prevent new request submissions, kill any outstanding requests */
+ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+ struct pxa25x_ep *ep = &dev->ep[i];
+
+ ep->stopped = 1;
+ nuke(ep, -ESHUTDOWN);
+ }
+ del_timer_sync(&dev->timer);
+
+ /* report reset; the driver is already quiesced */
+ if (driver)
+ usb_gadget_udc_reset(&dev->gadget, driver);
+
+ /* re-init driver-visible data structures */
+ udc_reinit(dev);
+}
+
+static void
stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
{
int i;
@@ -1311,15 +1333,14 @@ stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
udc_reinit(dev);
}
-static int pxa25x_udc_stop(struct usb_gadget*g,
- struct usb_gadget_driver *driver)
+static int pxa25x_udc_stop(struct usb_gadget*g)
{
struct pxa25x_udc *dev = to_pxa25x(g);
local_irq_disable();
dev->pullup = 0;
pullup(dev);
- stop_activity(dev, driver);
+ stop_activity(dev, NULL);
local_irq_enable();
if (!IS_ERR_OR_NULL(dev->transceiver))
@@ -1723,7 +1744,7 @@ pxa25x_udc_irq(int irq, void *_dev)
/* reset driver and endpoints,
* in case that's not yet done
*/
- stop_activity (dev, dev->driver);
+ reset_gadget(dev, dev->driver);
} else {
DBG(DBG_VERBOSE, "USB reset end\n");
@@ -2271,7 +2292,6 @@ static struct platform_driver udc_driver = {
.suspend = pxa25x_udc_suspend,
.resume = pxa25x_udc_resume,
.driver = {
- .owner = THIS_MODULE,
.name = "pxa25x-udc",
},
};
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 4868369eeec6..c61a896061fa 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -22,10 +22,13 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/byteorder/generic.h>
#include <linux/platform_data/pxa2xx_udc.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
@@ -1507,18 +1510,13 @@ static struct usb_ep_ops pxa_ep_ops = {
*/
static void dplus_pullup(struct pxa_udc *udc, int on)
{
- if (on) {
- if (gpio_is_valid(udc->mach->gpio_pullup))
- gpio_set_value(udc->mach->gpio_pullup,
- !udc->mach->gpio_pullup_inverted);
- if (udc->mach->udc_command)
- udc->mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
- } else {
- if (gpio_is_valid(udc->mach->gpio_pullup))
- gpio_set_value(udc->mach->gpio_pullup,
- udc->mach->gpio_pullup_inverted);
- if (udc->mach->udc_command)
- udc->mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
+ if (udc->gpiod) {
+ gpiod_set_value(udc->gpiod, on);
+ } else if (udc->udc_command) {
+ if (on)
+ udc->udc_command(PXA2XX_UDC_CMD_CONNECT);
+ else
+ udc->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
}
udc->pullup_on = on;
}
@@ -1609,7 +1607,7 @@ static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
{
struct pxa_udc *udc = to_gadget_udc(_gadget);
- if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
+ if (!udc->gpiod && !udc->udc_command)
return -EOPNOTSUPP;
dplus_pullup(udc, is_active);
@@ -1671,8 +1669,7 @@ static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
static int pxa27x_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int pxa27x_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
+static int pxa27x_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops pxa_udc_ops = {
.get_frame = pxa_udc_get_frame,
@@ -1701,10 +1698,10 @@ static void udc_disable(struct pxa_udc *udc)
udc_writel(udc, UDCICR1, 0);
udc_clear_mask_UDCCR(udc, UDCCR_UDE);
- clk_disable(udc->clk);
ep0_idle(udc);
udc->gadget.speed = USB_SPEED_UNKNOWN;
+ clk_disable(udc->clk);
udc->enabled = 0;
}
@@ -1757,16 +1754,16 @@ static void udc_enable(struct pxa_udc *udc)
if (udc->enabled)
return;
+ clk_enable(udc->clk);
udc_writel(udc, UDCICR0, 0);
udc_writel(udc, UDCICR1, 0);
udc_clear_mask_UDCCR(udc, UDCCR_UDE);
- clk_enable(udc->clk);
-
ep0_idle(udc);
udc->gadget.speed = USB_SPEED_FULL;
memset(&udc->stats, 0, sizeof(udc->stats));
+ pxa_eps_setup(udc);
udc_set_mask_UDCCR(udc, UDCCR_UDE);
ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
udelay(2);
@@ -1859,12 +1856,11 @@ static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
*
* Returns 0 if no error, -ENODEV, -EINVAL otherwise
*/
-static int pxa27x_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int pxa27x_udc_stop(struct usb_gadget *g)
{
struct pxa_udc *udc = to_pxa(g);
- stop_activity(udc, driver);
+ stop_activity(udc, NULL);
udc_disable(udc);
dplus_pullup(udc, 0);
@@ -2404,6 +2400,14 @@ static struct pxa_udc memory = {
}
};
+#if defined(CONFIG_OF)
+static struct of_device_id udc_pxa_dt_ids[] = {
+ { .compatible = "marvell,pxa270-udc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
+#endif
+
/**
* pxa_udc_probe - probes the udc device
* @_dev: platform device
@@ -2416,81 +2420,77 @@ static int pxa_udc_probe(struct platform_device *pdev)
struct resource *regs;
struct pxa_udc *udc = &memory;
int retval = 0, gpio;
+ struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
+ unsigned long gpio_flags;
+
+ if (mach) {
+ gpio_flags = mach->gpio_pullup_inverted ? GPIOF_ACTIVE_LOW : 0;
+ gpio = mach->gpio_pullup;
+ if (gpio_is_valid(gpio)) {
+ retval = devm_gpio_request_one(&pdev->dev, gpio,
+ gpio_flags,
+ "USB D+ pullup");
+ if (retval)
+ return retval;
+ udc->gpiod = gpio_to_desc(mach->gpio_pullup);
+ }
+ udc->udc_command = mach->udc_command;
+ } else {
+ udc->gpiod = devm_gpiod_get(&pdev->dev, NULL);
+ }
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs)
- return -ENXIO;
+ udc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(udc->regs))
+ return PTR_ERR(udc->regs);
udc->irq = platform_get_irq(pdev, 0);
if (udc->irq < 0)
return udc->irq;
udc->dev = &pdev->dev;
- udc->mach = dev_get_platdata(&pdev->dev);
udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
- gpio = udc->mach->gpio_pullup;
- if (gpio_is_valid(gpio)) {
- retval = gpio_request(gpio, "USB D+ pullup");
- if (retval == 0)
- gpio_direction_output(gpio,
- udc->mach->gpio_pullup_inverted);
- }
- if (retval) {
- dev_err(&pdev->dev, "Couldn't request gpio %d : %d\n",
- gpio, retval);
- return retval;
+ if (IS_ERR(udc->gpiod)) {
+ dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n",
+ PTR_ERR(udc->gpiod));
+ return PTR_ERR(udc->gpiod);
}
+ if (udc->gpiod)
+ gpiod_direction_output(udc->gpiod, 0);
+
+ udc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc->clk))
+ return PTR_ERR(udc->clk);
- udc->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(udc->clk)) {
- retval = PTR_ERR(udc->clk);
- goto err_clk;
- }
retval = clk_prepare(udc->clk);
if (retval)
- goto err_clk_prepare;
-
- retval = -ENOMEM;
- udc->regs = ioremap(regs->start, resource_size(regs));
- if (!udc->regs) {
- dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
- goto err_map;
- }
+ return retval;
udc->vbus_sensed = 0;
the_controller = udc;
platform_set_drvdata(pdev, udc);
udc_init_data(udc);
- pxa_eps_setup(udc);
/* irq setup after old hardware state is cleaned up */
- retval = request_irq(udc->irq, pxa_udc_irq,
- IRQF_SHARED, driver_name, udc);
+ retval = devm_request_irq(&pdev->dev, udc->irq, pxa_udc_irq,
+ IRQF_SHARED, driver_name, udc);
if (retval != 0) {
dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
driver_name, udc->irq, retval);
- goto err_irq;
+ goto err;
}
retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (retval)
- goto err_add_udc;
+ goto err;
pxa_init_debugfs(udc);
-
+ if (should_enable_udc(udc))
+ udc_enable(udc);
return 0;
-
-err_add_udc:
- free_irq(udc->irq, udc);
-err_irq:
- iounmap(udc->regs);
-err_map:
+err:
clk_unprepare(udc->clk);
-err_clk_prepare:
- clk_put(udc->clk);
- udc->clk = NULL;
-err_clk:
return retval;
}
@@ -2501,22 +2501,15 @@ err_clk:
static int pxa_udc_remove(struct platform_device *_dev)
{
struct pxa_udc *udc = platform_get_drvdata(_dev);
- int gpio = udc->mach->gpio_pullup;
usb_del_gadget_udc(&udc->gadget);
- usb_gadget_unregister_driver(udc->driver);
- free_irq(udc->irq, udc);
pxa_cleanup_debugfs(udc);
- if (gpio_is_valid(gpio))
- gpio_free(gpio);
usb_put_phy(udc->transceiver);
udc->transceiver = NULL;
the_controller = NULL;
clk_unprepare(udc->clk);
- clk_put(udc->clk);
- iounmap(udc->regs);
return 0;
}
@@ -2546,19 +2539,11 @@ extern void pxa27x_clear_otgph(void);
*/
static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
{
- int i;
struct pxa_udc *udc = platform_get_drvdata(_dev);
struct pxa_ep *ep;
ep = &udc->pxa_ep[0];
udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
- for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
- ep = &udc->pxa_ep[i];
- ep->udccsr_value = udc_ep_readl(ep, UDCCSR);
- ep->udccr_value = udc_ep_readl(ep, UDCCR);
- ep_dbg(ep, "udccsr:0x%03x, udccr:0x%x\n",
- ep->udccsr_value, ep->udccr_value);
- }
udc_disable(udc);
udc->pullup_resume = udc->pullup_on;
@@ -2576,19 +2561,11 @@ static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
*/
static int pxa_udc_resume(struct platform_device *_dev)
{
- int i;
struct pxa_udc *udc = platform_get_drvdata(_dev);
struct pxa_ep *ep;
ep = &udc->pxa_ep[0];
udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
- for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
- ep = &udc->pxa_ep[i];
- udc_ep_writel(ep, UDCCSR, ep->udccsr_value);
- udc_ep_writel(ep, UDCCR, ep->udccr_value);
- ep_dbg(ep, "udccsr:0x%03x, udccr:0x%x\n",
- ep->udccsr_value, ep->udccr_value);
- }
dplus_pullup(udc, udc->pullup_resume);
if (should_enable_udc(udc))
@@ -2614,7 +2591,7 @@ MODULE_ALIAS("platform:pxa27x-udc");
static struct platform_driver udc_driver = {
.driver = {
.name = "pxa27x-udc",
- .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(udc_pxa_dt_ids),
},
.probe = pxa_udc_probe,
.remove = pxa_udc_remove,
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
index 28f2b53530f5..11e14232794b 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.h
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -420,7 +420,8 @@ struct udc_stats {
* @usb_gadget: udc gadget structure
* @driver: bound gadget (zero, g_ether, g_mass_storage, ...)
* @dev: device
- * @mach: machine info, used to activate specific GPIO
+ * @udc_command: machine specific function to activate D+ pullup
+ * @gpiod: gpio descriptor of gpio for D+ pullup (or NULL if none)
* @transceiver: external transceiver to handle vbus sense and D+ pullup
* @ep0state: control endpoint state machine state
* @stats: statistics on udc usage
@@ -446,7 +447,8 @@ struct pxa_udc {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct device *dev;
- struct pxa2xx_udc_mach_info *mach;
+ void (*udc_command)(int);
+ struct gpio_desc *gpiod;
struct usb_phy *transceiver;
enum ep0_state ep0state;
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index f8186613b53e..06870da0b988 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1345,7 +1345,7 @@ static void irq_device_state(struct r8a66597 *r8a66597)
if (dvsq == DS_DFLT) {
/* bus reset */
spin_unlock(&r8a66597->lock);
- r8a66597->driver->disconnect(&r8a66597->gadget);
+ usb_gadget_udc_reset(&r8a66597->gadget, r8a66597->driver);
spin_lock(&r8a66597->lock);
r8a66597_update_usb_speed(r8a66597);
}
@@ -1763,8 +1763,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
return 0;
}
-static int r8a66597_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int r8a66597_stop(struct usb_gadget *gadget)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
unsigned long flags;
@@ -1846,10 +1845,7 @@ static int r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sudmac");
r8a66597->sudmac_reg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(r8a66597->sudmac_reg))
- return PTR_ERR(r8a66597->sudmac_reg);
-
- return 0;
+ return PTR_ERR_OR_ZERO(r8a66597->sudmac_reg);
}
static int r8a66597_probe(struct platform_device *pdev)
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index dfbf55797360..85a712a03343 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1172,8 +1172,6 @@ static int s3c_hsudc_start(struct usb_gadget *gadget,
}
enable_irq(hsudc->irq);
- dev_info(hsudc->dev, "bound driver %s\n", driver->driver.name);
-
s3c_hsudc_reconfig(hsudc);
pm_runtime_get_sync(hsudc->dev);
@@ -1190,8 +1188,7 @@ err_supplies:
return ret;
}
-static int s3c_hsudc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int s3c_hsudc_stop(struct usb_gadget *gadget)
{
struct s3c_hsudc *hsudc = to_hsudc(gadget);
unsigned long flags;
@@ -1199,11 +1196,7 @@ static int s3c_hsudc_stop(struct usb_gadget *gadget,
if (!hsudc)
return -ENODEV;
- if (!driver || driver != hsudc->driver)
- return -EINVAL;
-
spin_lock_irqsave(&hsudc->lock, flags);
- hsudc->driver = NULL;
hsudc->gadget.speed = USB_SPEED_UNKNOWN;
s3c_hsudc_uninit_phy();
@@ -1220,9 +1213,8 @@ static int s3c_hsudc_stop(struct usb_gadget *gadget,
disable_irq(hsudc->irq);
regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+ hsudc->driver = NULL;
- dev_info(hsudc->dev, "unregistered gadget driver '%s'\n",
- driver->driver.name);
return 0;
}
@@ -1267,10 +1259,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
hsudc = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsudc) +
sizeof(struct s3c_hsudc_ep) * pd->epnum,
GFP_KERNEL);
- if (!hsudc) {
- dev_err(dev, "cannot allocate memory\n");
+ if (!hsudc)
return -ENOMEM;
- }
platform_set_drvdata(pdev, dev);
hsudc->dev = dev;
@@ -1354,7 +1344,6 @@ err_supplies:
static struct platform_driver s3c_hsudc_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "s3c-hsudc",
},
.probe = s3c_hsudc_probe,
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index ff423d15beff..824cf12e9add 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1541,8 +1541,7 @@ static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
static int s3c2410_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int s3c2410_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
+static int s3c2410_udc_stop(struct usb_gadget *g);
static const struct usb_gadget_ops s3c2410_ops = {
.get_frame = s3c2410_udc_get_frame,
@@ -1683,8 +1682,7 @@ static int s3c2410_udc_start(struct usb_gadget *g,
return 0;
}
-static int s3c2410_udc_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int s3c2410_udc_stop(struct usb_gadget *g)
{
struct s3c2410_udc *udc = to_s3c2410(g);
@@ -1997,7 +1995,6 @@ MODULE_DEVICE_TABLE(platform, s3c_udc_ids);
static struct platform_driver udc_driver_24x0 = {
.driver = {
.name = "s3c24x0-usbgadget",
- .owner = THIS_MODULE,
},
.probe = s3c2410_udc_probe,
.remove = s3c2410_udc_remove,
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index f2054659f25b..e31d574d8860 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -174,8 +174,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
/**
* usb_gadget_udc_start - tells usb device controller to start up
- * @gadget: The gadget we want to get started
- * @driver: The driver we want to bind to @gadget
+ * @udc: The UDC to be started
*
* This call is issued by the UDC Class driver when it's about
* to register a gadget driver to the device controller, before
@@ -186,10 +185,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
*
* Returns zero on success, else negative errno.
*/
-static inline int usb_gadget_udc_start(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static inline int usb_gadget_udc_start(struct usb_udc *udc)
{
- return gadget->ops->udc_start(gadget, driver);
+ return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
}
/**
@@ -204,10 +202,9 @@ static inline int usb_gadget_udc_start(struct usb_gadget *gadget,
* far as powering off UDC completely and disable its data
* line pullups.
*/
-static inline void usb_gadget_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static inline void usb_gadget_udc_stop(struct usb_udc *udc)
{
- gadget->ops->udc_stop(gadget, driver);
+ udc->gadget->ops->udc_stop(udc->gadget);
}
/**
@@ -328,14 +325,14 @@ EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
static void usb_gadget_remove_driver(struct usb_udc *udc)
{
dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
- udc->gadget->name);
+ udc->driver->function);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
usb_gadget_disconnect(udc->gadget);
udc->driver->disconnect(udc->gadget);
udc->driver->unbind(udc->gadget);
- usb_gadget_udc_stop(udc->gadget, NULL);
+ usb_gadget_udc_stop(udc);
udc->driver = NULL;
udc->dev.driver = NULL;
@@ -395,7 +392,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
ret = driver->bind(udc->gadget, driver);
if (ret)
goto err1;
- ret = usb_gadget_udc_start(udc->gadget, driver);
+ ret = usb_gadget_udc_start(udc);
if (ret) {
driver->unbind(udc->gadget);
goto err1;
@@ -414,7 +411,7 @@ err1:
return ret;
}
-int udc_attach_driver(const char *name, struct usb_gadget_driver *driver)
+int usb_udc_attach_driver(const char *name, struct usb_gadget_driver *driver)
{
struct usb_udc *udc = NULL;
int ret = -ENODEV;
@@ -438,7 +435,7 @@ out:
mutex_unlock(&udc_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(udc_attach_driver);
+EXPORT_SYMBOL_GPL(usb_udc_attach_driver);
int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
{
@@ -513,11 +510,12 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
}
if (sysfs_streq(buf, "connect")) {
- usb_gadget_udc_start(udc->gadget, udc->driver);
+ usb_gadget_udc_start(udc);
usb_gadget_connect(udc->gadget);
} else if (sysfs_streq(buf, "disconnect")) {
usb_gadget_disconnect(udc->gadget);
- usb_gadget_udc_stop(udc->gadget, udc->driver);
+ udc->driver->disconnect(udc->gadget);
+ usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
return -EINVAL;
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index ed27e1687a4e..1eac56fc384d 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1403,8 +1403,7 @@ err:
*
* Return: zero always
*/
-static int xudc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int xudc_stop(struct usb_gadget *gadget)
{
struct xusb_udc *udc = to_udc(gadget);
unsigned long flags;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index a3ca1375dd52..fafc628480e0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -292,11 +292,15 @@ config USB_EHCI_HCD_PLATFORM
If unsure, say N.
config USB_OCTEON_EHCI
- bool "Octeon on-chip EHCI support"
+ bool "Octeon on-chip EHCI support (DEPRECATED)"
depends on CAVIUM_OCTEON_SOC
default n
select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_HCD_PLATFORM
help
+ This option is deprecated now and the driver was removed, use
+ USB_EHCI_HCD_PLATFORM instead.
+
Enable support for the Octeon II SOC's on-chip EHCI
controller. It is needed for high-speed (480Mbit/sec)
USB 2.0 device support. All CN6XXX based chips with USB are
@@ -575,12 +579,16 @@ config USB_OHCI_HCD_PLATFORM
If unsure, say N.
config USB_OCTEON_OHCI
- bool "Octeon on-chip OHCI support"
+ bool "Octeon on-chip OHCI support (DEPRECATED)"
depends on CAVIUM_OCTEON_SOC
default USB_OCTEON_EHCI
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_OHCI_LITTLE_ENDIAN
+ select USB_OHCI_HCD_PLATFORM
help
+ This option is deprecated now and the driver was removed, use
+ USB_OHCI_HCD_PLATFORM instead.
+
Enable support for the Octeon II SOC's on-chip OHCI
controller. It is needed for low-speed USB 1.0 device
support. All CN6XXX based chips with USB are supported.
@@ -754,12 +762,6 @@ config USB_IMX21_HCD
To compile this driver as a module, choose M here: the
module will be called "imx21-hcd".
-
-
-config USB_OCTEON2_COMMON
- bool
- default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
-
config USB_HCD_BCMA
tristate "BCMA usb host driver"
depends on BCMA
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 348c24321562..d6216a493bab 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -73,7 +73,6 @@ obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
-obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index ec9f7b75d497..56a88506febe 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -107,22 +107,15 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no register addr. Check %s setup!\n",
- dev_name(&pdev->dev));
- retval = -ENODEV;
- goto fail_request_resource;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto fail_request_resource;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
iclk = devm_clk_get(&pdev->dev, "ehci_clk");
if (IS_ERR(iclk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 7189f2e32ac2..df538fd10aa4 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -74,7 +74,6 @@ static int exynos_ehci_get_phy(struct device *dev,
phy = devm_of_phy_get(dev, child, NULL);
exynos_ehci->phy[phy_number] = phy;
- of_node_put(child);
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
if (ret == -EPROBE_DEFER) {
@@ -188,20 +187,15 @@ skip_phy:
goto fail_clk;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get I/O memory\n");
- err = -ENXIO;
- goto fail_io;
- }
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto fail_io;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
@@ -323,7 +317,6 @@ static struct platform_driver exynos_ehci_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "exynos-ehci",
- .owner = THIS_MODULE,
.pm = &exynos_ehci_pm_ops,
.of_match_table = of_match_ptr(exynos_ehci_match),
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 2d2ae8db439e..fb7bd0c7dc15 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -93,21 +93,15 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no register addr. Check %s setup!\n",
- dev_name(&pdev->dev));
- retval = -ENODEV;
- goto err2;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err2;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
pdata->regs = hcd->regs;
if (pdata->power_budget)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 15feaf924b71..38bfeedae1d0 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -311,6 +311,7 @@ static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable);
#include "ehci-timer.c"
#include "ehci-hub.c"
@@ -329,9 +330,13 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
{
int port = HCS_N_PORTS(ehci->hcs_params);
- while (port--)
+ while (port--) {
ehci_writel(ehci, PORT_RWC_BITS,
&ehci->regs->port_status[port]);
+ spin_unlock_irq(&ehci->lock);
+ ehci_port_power(ehci, port, false);
+ spin_lock_irq(&ehci->lock);
+ }
}
/*
@@ -1233,6 +1238,8 @@ void ehci_init_driver(struct hc_driver *drv,
drv->hcd_priv_size += over->extra_priv_size;
if (over->reset)
drv->reset = over->reset;
+ if (over->port_power)
+ drv->port_power = over->port_power;
}
}
EXPORT_SYMBOL_GPL(ehci_init_driver);
@@ -1268,11 +1275,6 @@ MODULE_LICENSE ("GPL");
#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
#endif
-#ifdef CONFIG_USB_OCTEON_EHCI
-#include "ehci-octeon.c"
-#define PLATFORM_DRIVER ehci_octeon_driver
-#endif
-
#ifdef CONFIG_TILE_USB
#include "ehci-tilegx.c"
#define PLATFORM_DRIVER ehci_hcd_tilegx_driver
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 5728829cf6ef..118edb7bdca2 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -69,10 +69,8 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
if (test_bit(port, &ehci->owned_ports)) {
reg = &ehci->regs->port_status[port];
status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
- if (!(status & PORT_POWER)) {
- status |= PORT_POWER;
- ehci_writel(ehci, status, reg);
- }
+ if (!(status & PORT_POWER))
+ ehci_port_power(ehci, port, true);
}
}
@@ -952,9 +950,11 @@ int ehci_hub_control(
clear_bit(wIndex, &ehci->port_c_suspend);
break;
case USB_PORT_FEAT_POWER:
- if (HCS_PPC (ehci->hcs_params))
- ehci_writel(ehci, temp & ~PORT_POWER,
- status_reg);
+ if (HCS_PPC(ehci->hcs_params)) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ ehci_port_power(ehci, wIndex, false);
+ spin_lock_irqsave(&ehci->lock, flags);
+ }
break;
case USB_PORT_FEAT_C_CONNECTION:
ehci_writel(ehci, temp | PORT_CSC, status_reg);
@@ -1004,9 +1004,9 @@ int ehci_hub_control(
*/
if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle))
&& HCS_PPC(ehci->hcs_params)) {
- ehci_writel(ehci,
- temp & ~(PORT_RWC_BITS | PORT_POWER),
- status_reg);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ ehci_port_power(ehci, wIndex, false);
+ spin_lock_irqsave(&ehci->lock, flags);
temp = ehci_readl(ehci, status_reg);
}
}
@@ -1187,9 +1187,11 @@ int ehci_hub_control(
set_bit(wIndex, &ehci->suspended_ports);
break;
case USB_PORT_FEAT_POWER:
- if (HCS_PPC (ehci->hcs_params))
- ehci_writel(ehci, temp | PORT_POWER,
- status_reg);
+ if (HCS_PPC(ehci->hcs_params)) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ ehci_port_power(ehci, wIndex, true);
+ spin_lock_irqsave(&ehci->lock, flags);
+ }
break;
case USB_PORT_FEAT_RESET:
if (temp & (PORT_SUSPEND|PORT_RESUME))
@@ -1297,3 +1299,20 @@ static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
reg = &ehci->regs->port_status[portnum - 1];
return ehci_readl(ehci, reg) & PORT_OWNER;
}
+
+static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable)
+{
+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
+ u32 __iomem *status_reg = &ehci->regs->port_status[portnum];
+ u32 temp = ehci_readl(ehci, status_reg) & ~PORT_RWC_BITS;
+
+ if (enable)
+ ehci_writel(ehci, temp | PORT_POWER, status_reg);
+ else
+ ehci_writel(ehci, temp & ~PORT_POWER, status_reg);
+
+ if (hcd->driver->port_power)
+ hcd->driver->port_power(hcd, portnum, enable);
+
+ return 0;
+}
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 9dc2118ae808..9db74ca7e5b9 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -88,19 +88,13 @@ static int ehci_msm_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get memory resource\n");
- ret = -ENODEV;
- goto put_hcd;
- }
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto put_hcd;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
/*
* OTG driver takes care of PHY initialization, clock management,
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 08147c35f836..849806a75f1c 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -153,7 +153,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
if (ehci_mv == NULL) {
- dev_err(&pdev->dev, "cannot allocate ehci_hcd_mv\n");
retval = -ENOMEM;
goto err_put_hcd;
}
@@ -170,12 +169,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
- if (r == NULL) {
- dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
- retval = -ENODEV;
- goto err_put_hcd;
- }
-
ehci_mv->phy_regs = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(ehci_mv->phy_regs)) {
retval = PTR_ERR(ehci_mv->phy_regs);
@@ -183,12 +176,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
- if (!r) {
- dev_err(&pdev->dev, "no I/O memory resource defined\n");
- retval = -ENODEV;
- goto err_put_hcd;
- }
-
ehci_mv->cap_regs = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(ehci_mv->cap_regs)) {
retval = PTR_ERR(ehci_mv->cap_regs);
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index dbe5e4eea08d..c7a9b31eeaef 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -69,20 +69,13 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "Found HC with no register addr. Check setup!\n");
- ret = -ENODEV;
- goto err_alloc;
- }
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto err_alloc;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
hcd->has_tt = 1;
ehci = hcd_to_ehci(hcd);
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
deleted file mode 100644
index 9051439039a7..000000000000
--- a/drivers/usb/host/ehci-octeon.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * EHCI HCD glue for Cavium Octeon II SOCs.
- *
- * Loosely based on ehci-au1xxx.c
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2010 Cavium Networks
- *
- */
-
-#include <linux/platform_device.h>
-
-#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-uctlx-defs.h>
-
-#define OCTEON_EHCI_HCD_NAME "octeon-ehci"
-
-/* Common clock init code. */
-void octeon2_usb_clocks_start(void);
-void octeon2_usb_clocks_stop(void);
-
-static void ehci_octeon_start(void)
-{
- union cvmx_uctlx_ehci_ctl ehci_ctl;
-
- octeon2_usb_clocks_start();
-
- ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0));
- /* Use 64-bit addressing. */
- ehci_ctl.s.ehci_64b_addr_en = 1;
- ehci_ctl.s.l2c_addr_msb = 0;
- ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
- ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
- cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64);
-}
-
-static void ehci_octeon_stop(void)
-{
- octeon2_usb_clocks_stop();
-}
-
-static const struct hc_driver ehci_octeon_hc_driver = {
- .description = hcd_name,
- .product_desc = "Octeon EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
-
- /*
- * basic lifecycle operations
- */
- .reset = ehci_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
-
-static u64 ehci_octeon_dma_mask = DMA_BIT_MASK(64);
-
-static int ehci_octeon_drv_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct ehci_hcd *ehci;
- struct resource *res_mem;
- int irq;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "No irq assigned\n");
- return -ENODEV;
- }
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
- dev_err(&pdev->dev, "No register space assigned\n");
- return -ENODEV;
- }
-
- /*
- * We can DMA from anywhere. But the descriptors must be in
- * the lower 4GB.
- */
- pdev->dev.dma_mask = &ehci_octeon_dma_mask;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
- if (!hcd)
- return -ENOMEM;
-
- hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = resource_size(res_mem);
-
- hcd->regs = devm_ioremap_resource(&pdev->dev, res_mem);
- if (IS_ERR(hcd->regs)) {
- ret = PTR_ERR(hcd->regs);
- goto err1;
- }
-
- ehci_octeon_start();
-
- ehci = hcd_to_ehci(hcd);
-
- /* Octeon EHCI matches CPU endianness. */
-#ifdef __BIG_ENDIAN
- ehci->big_endian_mmio = 1;
-#endif
-
- ehci->caps = hcd->regs;
-
- ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (ret) {
- dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
- goto err2;
- }
- device_wakeup_enable(hcd->self.controller);
-
- platform_set_drvdata(pdev, hcd);
-
- return 0;
-err2:
- ehci_octeon_stop();
-
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ehci_octeon_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
-
- ehci_octeon_stop();
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static struct platform_driver ehci_octeon_driver = {
- .probe = ehci_octeon_drv_probe,
- .remove = ehci_octeon_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = OCTEON_EHCI_HCD_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-MODULE_ALIAS("platform:" OCTEON_EHCI_HCD_NAME);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 22e15cab8ea5..f6eafecab15c 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -25,8 +25,8 @@
#include "ehci.h"
-#define rdl(off) __raw_readl(hcd->regs + (off))
-#define wrl(off, val) __raw_writel((val), hcd->regs + (off))
+#define rdl(off) readl_relaxed(hcd->regs + (off))
+#define wrl(off, val) writel_relaxed((val), hcd->regs + (off))
#define USB_CMD 0x140
#define USB_MODE 0x1a8
@@ -175,15 +175,6 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
goto err;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no register addr. Check %s setup!\n",
- dev_name(&pdev->dev));
- err = -ENODEV;
- goto err;
- }
-
/*
* Right now device-tree probed devices don't get dma_mask
* set. Since shared usb code relies on it, set it here for
@@ -193,6 +184,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
if (err)
goto err;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(regs)) {
err = PTR_ERR(regs);
@@ -321,7 +313,6 @@ static struct platform_driver ehci_orion_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "orion-ehci",
- .owner = THIS_MODULE,
.of_match_table = ehci_orion_dt_ids,
},
};
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 2f5b9ce3e042..8557803e6154 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -164,11 +164,6 @@ static int ehci_platform_probe(struct platform_device *dev)
dev_err(&dev->dev, "no irq provided");
return irq;
}
- res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(&dev->dev, "no memory resource provided");
- return -ENXIO;
- }
hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
@@ -250,14 +245,15 @@ static int ehci_platform_probe(struct platform_device *dev)
goto err_reset;
}
- hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = resource_size(res_mem);
-
+ res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_power;
}
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_power;
@@ -311,8 +307,7 @@ static int ehci_platform_remove(struct platform_device *dev)
return 0;
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int ehci_platform_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -348,11 +343,7 @@ static int ehci_platform_resume(struct device *dev)
ehci_resume(hcd, false);
return 0;
}
-
-#else /* !CONFIG_PM */
-#define ehci_platform_suspend NULL
-#define ehci_platform_resume NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id vt8500_ehci_ids[] = {
{ .compatible = "via,vt8500-ehci", },
@@ -368,10 +359,8 @@ static const struct platform_device_id ehci_platform_table[] = {
};
MODULE_DEVICE_TABLE(platform, ehci_platform_table);
-static const struct dev_pm_ops ehci_platform_pm_ops = {
- .suspend = ehci_platform_suspend,
- .resume = ehci_platform_resume,
-};
+static SIMPLE_DEV_PM_OPS(ehci_platform_pm_ops, ehci_platform_suspend,
+ ehci_platform_resume);
static struct platform_driver ehci_platform_driver = {
.id_table = ehci_platform_table,
@@ -379,7 +368,6 @@ static struct platform_driver ehci_platform_driver = {
.remove = ehci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "ehci-platform",
.pm = &ehci_platform_pm_ops,
.of_match_table = vt8500_ehci_ids,
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index cf1267673868..9b6e8d0eac43 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -110,14 +110,13 @@ static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto err1;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
/* Root hub has integrated TT. */
hcd->has_tt = 1;
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 9b9b9f5b016e..0e0ce684aff3 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -86,15 +86,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
if (usb_disabled())
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no register addr. Check %s setup!\n",
- dev_name(&pdev->dev));
- ret = -ENODEV;
- goto fail_create_hcd;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev,
@@ -114,19 +105,18 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto fail_request_resource;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
priv = devm_kzalloc(&pdev->dev, sizeof(struct ehci_sh_priv),
GFP_KERNEL);
if (!priv) {
- dev_dbg(&pdev->dev, "error allocating priv data\n");
ret = -ENOMEM;
goto fail_request_resource;
}
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1355ff0946b9..34e14746b92e 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -99,18 +99,13 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- retval = -ENODEV;
- goto err_put_hcd;
- }
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
sehci = to_spear_ehci(hcd);
sehci->clk = usbh_clk;
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index f6459dfb6f54..5e44407aa099 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -132,7 +132,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
if (allocated_max > uframe_periodic_max) {
ehci_info(ehci,
- "cannot decrease uframe_periodic_max becase "
+ "cannot decrease uframe_periodic_max because "
"periodic bandwidth is already allocated "
"(%u > %u)\n",
allocated_max, uframe_periodic_max);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index aaa01971efe9..19a9af1b4d74 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -460,18 +460,14 @@ static int tegra_ehci_probe(struct platform_device *pdev)
"nvidia,needs-double-reset");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get I/O memory\n");
- err = -ENXIO;
- goto cleanup_clk_en;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto cleanup_clk_en;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
ehci->caps = hcd->regs + 0x100;
ehci->has_hostpc = soc_config->has_hostpc;
@@ -484,7 +480,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
GFP_KERNEL);
if (!u_phy->otg) {
- dev_err(&pdev->dev, "Failed to alloc memory for otg\n");
err = -ENOMEM;
goto cleanup_phy;
}
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index a9303aff125e..e42a29e8e229 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -42,27 +42,20 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
int retval = 0, irq;
unsigned long val;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- retval = -ENXIO;
- goto err1;
- }
-
hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI");
if (!hcd) {
retval = -ENOMEM;
goto err1;
}
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err2;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
@@ -82,8 +75,10 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
__raw_writel(val, ehci->regs+PHY1_CTR);
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ if (irq < 0) {
+ retval = irq;
goto err2;
+ }
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
@@ -126,7 +121,6 @@ static struct platform_driver ehci_hcd_w90x900_driver = {
.remove = ehci_w90x900_remove,
.driver = {
.name = "w90x900-ehci",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index eee228a26a0e..6f0577b0a5ae 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -859,6 +859,8 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
struct ehci_driver_overrides {
size_t extra_priv_size;
int (*reset)(struct usb_hcd *hcd);
+ int (*port_power)(struct usb_hcd *hcd,
+ int portnum, bool enable);
};
extern void ehci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index a1a1ef521436..c6cebb96fd21 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -821,7 +821,6 @@ MODULE_DEVICE_TABLE(of, of_fhci_match);
static struct platform_driver of_fhci_driver = {
.driver = {
.name = "fsl,usb-fhci",
- .owner = THIS_MODULE,
.of_match_table = of_fhci_match,
},
.probe = of_fhci_probe,
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 3de1278677d0..ecf02b2623e8 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -4958,7 +4958,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
if (allocated_max > uframe_periodic_max) {
fotg210_info(fotg210,
- "cannot decrease uframe_periodic_max becase "
+ "cannot decrease uframe_periodic_max because "
"periodic bandwidth is already allocated "
"(%u > %u)\n",
allocated_max, uframe_periodic_max);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index ac6cd1bfd208..3bad17859cd7 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_FOTG210_H
#define __LINUX_FOTG210_H
+#include <linux/usb/ehci-dbgp.h>
+
/* definitions used for the EHCI driver */
/*
@@ -84,7 +86,7 @@ struct fotg210_hcd { /* one per controller */
/* glue to PCI and HCD framework */
struct fotg210_caps __iomem *caps;
struct fotg210_regs __iomem *regs;
- struct fotg210_dbg_port __iomem *debug;
+ struct ehci_dbg_port __iomem *debug;
__u32 hcs_params; /* cached register copy */
spinlock_t lock;
@@ -293,64 +295,6 @@ struct fotg210_regs {
#define GMIR_MDEV_INT (1 << 0)
};
-/* Appendix C, Debug port ... intended for use with special "debug devices"
- * that can help if there's no serial console. (nonstandard enumeration.)
- */
-struct fotg210_dbg_port {
- u32 control;
-#define DBGP_OWNER (1<<30)
-#define DBGP_ENABLED (1<<28)
-#define DBGP_DONE (1<<16)
-#define DBGP_INUSE (1<<10)
-#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
-# define DBGP_ERR_BAD 1
-# define DBGP_ERR_SIGNAL 2
-#define DBGP_ERROR (1<<6)
-#define DBGP_GO (1<<5)
-#define DBGP_OUT (1<<4)
-#define DBGP_LEN(x) (((x)>>0)&0x0f)
- u32 pids;
-#define DBGP_PID_GET(x) (((x)>>16)&0xff)
-#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
- u32 data03;
- u32 data47;
- u32 address;
-#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
-};
-
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-#include <linux/init.h>
-extern int __init early_dbgp_init(char *s);
-extern struct console early_dbgp_console;
-#endif /* CONFIG_EARLY_PRINTK_DBGP */
-
-struct usb_hcd;
-
-static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
-{
- return 1; /* Shouldn't this be 0? */
-}
-
-static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
-{
- return -1;
-}
-
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-/* Call backs from fotg210 host driver to fotg210 debug driver */
-extern int dbgp_external_startup(struct usb_hcd *);
-extern int dbgp_reset_prep(struct usb_hcd *hcd);
-#else
-static inline int dbgp_reset_prep(struct usb_hcd *hcd)
-{
- return xen_dbgp_reset_prep(hcd);
-}
-static inline int dbgp_external_startup(struct usb_hcd *hcd)
-{
- return xen_dbgp_external_startup(hcd);
-}
-#endif
-
/*-------------------------------------------------------------------------*/
#define QTD_NEXT(fotg210, dma) cpu_to_hc32(fotg210, (u32)dma)
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 9162d1b6c0a3..7e325e90d7d9 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -326,7 +326,6 @@ static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
static struct platform_driver fsl_usb2_mph_dr_driver = {
.driver = {
.name = "fsl-usb2-mph-dr",
- .owner = THIS_MODULE,
.of_match_table = fsl_usb2_mph_dr_of_match,
},
.probe = fsl_usb2_mph_dr_of_probe,
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
index abe42f31559f..664d2aa1239c 100644
--- a/drivers/usb/host/fusbh200-hcd.c
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -4893,7 +4893,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
if (allocated_max > uframe_periodic_max) {
fusbh200_info(fusbh200,
- "cannot decrease uframe_periodic_max becase "
+ "cannot decrease uframe_periodic_max because "
"periodic bandwidth is already allocated "
"(%u > %u)\n",
allocated_max, uframe_periodic_max);
diff --git a/drivers/usb/host/fusbh200.h b/drivers/usb/host/fusbh200.h
index 6b719e066c3f..d6e5b3d4aa68 100644
--- a/drivers/usb/host/fusbh200.h
+++ b/drivers/usb/host/fusbh200.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_FUSBH200_H
#define __LINUX_FUSBH200_H
+#include <linux/usb/ehci-dbgp.h>
+
/* definitions used for the EHCI driver */
/*
@@ -84,7 +86,7 @@ struct fusbh200_hcd { /* one per controller */
/* glue to PCI and HCD framework */
struct fusbh200_caps __iomem *caps;
struct fusbh200_regs __iomem *regs;
- struct fusbh200_dbg_port __iomem *debug;
+ struct ehci_dbg_port __iomem *debug;
__u32 hcs_params; /* cached register copy */
spinlock_t lock;
@@ -285,64 +287,6 @@ struct fusbh200_regs {
#define BMIER_VBUS_ERR_EN (1<<0)
};
-/* Appendix C, Debug port ... intended for use with special "debug devices"
- * that can help if there's no serial console. (nonstandard enumeration.)
- */
-struct fusbh200_dbg_port {
- u32 control;
-#define DBGP_OWNER (1<<30)
-#define DBGP_ENABLED (1<<28)
-#define DBGP_DONE (1<<16)
-#define DBGP_INUSE (1<<10)
-#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
-# define DBGP_ERR_BAD 1
-# define DBGP_ERR_SIGNAL 2
-#define DBGP_ERROR (1<<6)
-#define DBGP_GO (1<<5)
-#define DBGP_OUT (1<<4)
-#define DBGP_LEN(x) (((x)>>0)&0x0f)
- u32 pids;
-#define DBGP_PID_GET(x) (((x)>>16)&0xff)
-#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
- u32 data03;
- u32 data47;
- u32 address;
-#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
-};
-
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-#include <linux/init.h>
-extern int __init early_dbgp_init(char *s);
-extern struct console early_dbgp_console;
-#endif /* CONFIG_EARLY_PRINTK_DBGP */
-
-struct usb_hcd;
-
-static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
-{
- return 1; /* Shouldn't this be 0? */
-}
-
-static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
-{
- return -1;
-}
-
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-/* Call backs from fusbh200 host driver to fusbh200 debug driver */
-extern int dbgp_external_startup(struct usb_hcd *);
-extern int dbgp_reset_prep(struct usb_hcd *hcd);
-#else
-static inline int dbgp_reset_prep(struct usb_hcd *hcd)
-{
- return xen_dbgp_reset_prep(hcd);
-}
-static inline int dbgp_external_startup(struct usb_hcd *hcd)
-{
- return xen_dbgp_external_startup(hcd);
-}
-#endif
-
/*-------------------------------------------------------------------------*/
#define QTD_NEXT(fusbh200, dma) cpu_to_hc32(fusbh200, (u32)dma)
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 207bad99301f..eb4efba9f1ad 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1174,11 +1174,11 @@ static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
dev_vdbg(imx21->dev,
"enqueue urb=%p ep=%p len=%d "
- "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
+ "buffer=%p dma=%pad setupBuf=%p setupDma=%pad\n",
urb, ep,
urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma,
- urb->setup_packet, urb->setup_dma);
+ urb->transfer_buffer, &urb->transfer_dma,
+ urb->setup_packet, &urb->setup_dma);
if (usb_pipeisoc(urb->pipe))
return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 240e792c81a7..31c9c4d0fa0b 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1707,7 +1707,6 @@ static struct platform_driver isp116x_driver = {
.resume = isp116x_resume,
.driver = {
.name = hcd_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 4bb37982855e..75e5876f9d7c 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2778,7 +2778,6 @@ static struct platform_driver isp1362_driver = {
.resume = isp1362_resume,
.driver = {
.name = hcd_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index df931e9ba5b5..09254a43bc01 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -161,7 +161,6 @@ MODULE_DEVICE_TABLE(of, of_isp1760_match);
static struct platform_driver isp1760_of_driver = {
.driver = {
.name = "nxp-isp1760",
- .owner = THIS_MODULE,
.of_match_table = of_isp1760_match,
},
.probe = of_isp1760_probe,
diff --git a/drivers/usb/host/octeon2-common.c b/drivers/usb/host/octeon2-common.c
deleted file mode 100644
index d9df423f3d12..000000000000
--- a/drivers/usb/host/octeon2-common.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2010, 2011 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-
-#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-uctlx-defs.h>
-
-static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
-
-static int octeon2_usb_clock_start_cnt;
-
-void octeon2_usb_clocks_start(void)
-{
- u64 div;
- union cvmx_uctlx_if_ena if_ena;
- union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
- union cvmx_uctlx_uphy_ctl_status uphy_ctl_status;
- union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
- int i;
- unsigned long io_clk_64_to_ns;
-
-
- mutex_lock(&octeon2_usb_clocks_mutex);
-
- octeon2_usb_clock_start_cnt++;
- if (octeon2_usb_clock_start_cnt != 1)
- goto exit;
-
- io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
-
- /*
- * Step 1: Wait for voltages stable. That surely happened
- * before starting the kernel.
- *
- * Step 2: Enable SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1
- */
- if_ena.u64 = 0;
- if_ena.s.en = 1;
- cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
-
- /* Step 3: Configure the reference clock, PHY, and HCLK */
- clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
-
- /*
- * If the UCTL looks like it has already been started, skip
- * the initialization, otherwise bus errors are obtained.
- */
- if (clk_rst_ctl.s.hrst)
- goto end_clock;
- /* 3a */
- clk_rst_ctl.s.p_por = 1;
- clk_rst_ctl.s.hrst = 0;
- clk_rst_ctl.s.p_prst = 0;
- clk_rst_ctl.s.h_clkdiv_rst = 0;
- clk_rst_ctl.s.o_clkdiv_rst = 0;
- clk_rst_ctl.s.h_clkdiv_en = 0;
- clk_rst_ctl.s.o_clkdiv_en = 0;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
-
- /* 3b */
- /* 12MHz crystal. */
- clk_rst_ctl.s.p_refclk_sel = 0;
- clk_rst_ctl.s.p_refclk_div = 0;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
-
- /* 3c */
- div = octeon_get_io_clock_rate() / 130000000ull;
-
- switch (div) {
- case 0:
- div = 1;
- break;
- case 1:
- case 2:
- case 3:
- case 4:
- break;
- case 5:
- div = 4;
- break;
- case 6:
- case 7:
- div = 6;
- break;
- case 8:
- case 9:
- case 10:
- case 11:
- div = 8;
- break;
- default:
- div = 12;
- break;
- }
- clk_rst_ctl.s.h_div = div;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
- /* Read it back, */
- clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
- clk_rst_ctl.s.h_clkdiv_en = 1;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
- /* 3d */
- clk_rst_ctl.s.h_clkdiv_rst = 1;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
-
- /* 3e: delay 64 io clocks */
- ndelay(io_clk_64_to_ns);
-
- /*
- * Step 4: Program the power-on reset field in the UCTL
- * clock-reset-control register.
- */
- clk_rst_ctl.s.p_por = 0;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
-
- /* Step 5: Wait 1 ms for the PHY clock to start. */
- mdelay(1);
-
- /*
- * Step 6: Program the reset input from automatic test
- * equipment field in the UPHY CSR
- */
- uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0));
- uphy_ctl_status.s.ate_reset = 1;
- cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
-
- /* Step 7: Wait for at least 10ns. */
- ndelay(10);
-
- /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */
- uphy_ctl_status.s.ate_reset = 0;
- cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
-
- /*
- * Step 9: Wait for at least 20ns for UPHY to output PHY clock
- * signals and OHCI_CLK48
- */
- ndelay(20);
-
- /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
- /* 10a */
- clk_rst_ctl.s.o_clkdiv_rst = 1;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
-
- /* 10b */
- clk_rst_ctl.s.o_clkdiv_en = 1;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
-
- /* 10c */
- ndelay(io_clk_64_to_ns);
-
- /*
- * Step 11: Program the PHY reset field:
- * UCTL0_CLK_RST_CTL[P_PRST] = 1
- */
- clk_rst_ctl.s.p_prst = 1;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
-
- /* Step 12: Wait 1 uS. */
- udelay(1);
-
- /* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */
- clk_rst_ctl.s.hrst = 1;
- cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
-
-end_clock:
- /* Now we can set some other registers. */
-
- for (i = 0; i <= 1; i++) {
- port_ctl_status.u64 =
- cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
- /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
- port_ctl_status.s.txvreftune = 15;
- port_ctl_status.s.txrisetune = 1;
- port_ctl_status.s.txpreemphasistune = 1;
- cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
- port_ctl_status.u64);
- }
-
- /* Set uSOF cycle period to 60,000 bits. */
- cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
-exit:
- mutex_unlock(&octeon2_usb_clocks_mutex);
-}
-EXPORT_SYMBOL(octeon2_usb_clocks_start);
-
-void octeon2_usb_clocks_stop(void)
-{
- mutex_lock(&octeon2_usb_clocks_mutex);
- octeon2_usb_clock_start_cnt--;
- mutex_unlock(&octeon2_usb_clocks_mutex);
-}
-EXPORT_SYMBOL(octeon2_usb_clocks_stop);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index e49eb4f90f5d..dc9e4e61f1c8 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -24,12 +24,8 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
-#include <mach/hardware.h>
#include <asm/gpio.h>
-#include <mach/cpu.h>
-
-
#include "ohci.h"
#define valid_port(index) ((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
@@ -137,12 +133,6 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct resource *res;
int irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_dbg(dev, "hcd probe: missing memory resource\n");
- return -ENXIO;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_dbg(dev, "hcd probe: missing irq resource\n");
@@ -152,14 +142,15 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
hcd = usb_create_hcd(driver, dev, "at91");
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
iclk = devm_clk_get(dev, "ohci_clk");
if (IS_ERR(iclk)) {
@@ -664,7 +655,6 @@ static struct platform_driver ohci_hcd_at91_driver = {
.resume = ohci_hcd_at91_drv_resume,
.driver = {
.name = "at91_ohci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91_ohci_dt_ids),
},
};
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index df06be6b47f5..1c76999b2184 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -313,16 +313,13 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENODEV;
- hcd->rsrc_start = mem->start;
- hcd->rsrc_len = resource_size(mem);
-
hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(hcd->regs)) {
error = PTR_ERR(hcd->regs);
goto err;
}
+ hcd->rsrc_start = mem->start;
+ hcd->rsrc_len = resource_size(mem);
ohci_hcd_init(hcd_to_ohci(hcd));
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index d28b6583ba02..2cd105be7319 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -63,7 +63,6 @@ static int exynos_ohci_get_phy(struct device *dev,
phy = devm_of_phy_get(dev, child, NULL);
exynos_ohci->phy[phy_number] = phy;
- of_node_put(child);
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
if (ret == -EPROBE_DEFER) {
@@ -156,19 +155,13 @@ skip_phy:
goto fail_clk;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get I/O memory\n");
- err = -ENXIO;
- goto fail_io;
- }
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto fail_io;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
if (!irq) {
@@ -292,7 +285,6 @@ static struct platform_driver exynos_ohci_driver = {
.shutdown = exynos_ohci_shutdown,
.driver = {
.name = "exynos-ohci",
- .owner = THIS_MODULE,
.pm = &exynos_ohci_pm_ops,
.of_match_table = of_match_ptr(exynos_ohci_match),
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index d664edabf14e..1dab9dfbca6a 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1249,11 +1249,6 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_jz4740_driver
#endif
-#ifdef CONFIG_USB_OCTEON_OHCI
-#include "ohci-octeon.c"
-#define PLATFORM_DRIVER ohci_octeon_driver
-#endif
-
#ifdef CONFIG_TILE_USB
#include "ohci-tilegx.c"
#define PLATFORM_DRIVER ohci_hcd_tilegx_driver
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index c2c221a332eb..8ddd8f5470cb 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -153,13 +153,6 @@ static int jz4740_ohci_probe(struct platform_device *pdev)
struct resource *res;
int irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (!res) {
- dev_err(&pdev->dev, "Failed to get platform resource\n");
- return -ENOENT;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "Failed to get platform irq\n");
@@ -174,14 +167,14 @@ static int jz4740_ohci_probe(struct platform_device *pdev)
jz4740_ohci = hcd_to_jz4740_hcd(hcd);
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto err_free;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
jz4740_ohci->clk = devm_clk_get(&pdev->dev, "uhc");
if (IS_ERR(jz4740_ohci->clk)) {
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index ba180ed0f81c..d9f0481d7258 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -322,7 +322,6 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_nxp_match);
static struct platform_driver ohci_hcd_nxp_driver = {
.driver = {
.name = "usb-ohci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ohci_hcd_nxp_match),
},
.probe = ohci_hcd_nxp_probe,
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
deleted file mode 100644
index 15af8954085e..000000000000
--- a/drivers/usb/host/ohci-octeon.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * EHCI HCD glue for Cavium Octeon II SOCs.
- *
- * Loosely based on ehci-au1xxx.c
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2010 Cavium Networks
- *
- */
-
-#include <linux/platform_device.h>
-
-#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-uctlx-defs.h>
-
-#define OCTEON_OHCI_HCD_NAME "octeon-ohci"
-
-/* Common clock init code. */
-void octeon2_usb_clocks_start(void);
-void octeon2_usb_clocks_stop(void);
-
-static void ohci_octeon_hw_start(void)
-{
- union cvmx_uctlx_ohci_ctl ohci_ctl;
-
- octeon2_usb_clocks_start();
-
- ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0));
- ohci_ctl.s.l2c_addr_msb = 0;
- ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
- ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
- cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64);
-
-}
-
-static void ohci_octeon_hw_stop(void)
-{
- /* Undo ohci_octeon_start() */
- octeon2_usb_clocks_stop();
-}
-
-static int ohci_octeon_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- ret = ohci_init(ohci);
-
- if (ret < 0)
- return ret;
-
- ret = ohci_run(ohci);
-
- if (ret < 0) {
- ohci_err(ohci, "can't start %s", hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-static const struct hc_driver ohci_octeon_hc_driver = {
- .description = hcd_name,
- .product_desc = "Octeon OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_octeon_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-
- .start_port_reset = ohci_start_port_reset,
-};
-
-static int ohci_octeon_drv_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct ohci_hcd *ohci;
- void *reg_base;
- struct resource *res_mem;
- int irq;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "No irq assigned\n");
- return -ENODEV;
- }
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
- dev_err(&pdev->dev, "No register space assigned\n");
- return -ENODEV;
- }
-
- /* Ohci is a 32-bit device. */
- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
- if (!hcd)
- return -ENOMEM;
-
- hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = resource_size(res_mem);
-
- reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
- if (IS_ERR(reg_base)) {
- ret = PTR_ERR(reg_base);
- goto err1;
- }
-
- ohci_octeon_hw_start();
-
- hcd->regs = reg_base;
-
- ohci = hcd_to_ohci(hcd);
-
- /* Octeon OHCI matches CPU endianness. */
-#ifdef __BIG_ENDIAN
- ohci->flags |= OHCI_QUIRK_BE_MMIO;
-#endif
-
- ohci_hcd_init(ohci);
-
- ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (ret) {
- dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
- goto err2;
- }
-
- device_wakeup_enable(hcd->self.controller);
-
- platform_set_drvdata(pdev, hcd);
-
- return 0;
-
-err2:
- ohci_octeon_hw_stop();
-
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ohci_octeon_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
-
- ohci_octeon_hw_stop();
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static struct platform_driver ohci_octeon_driver = {
- .probe = ohci_octeon_drv_probe,
- .remove = ohci_octeon_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = OCTEON_OHCI_HCD_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-MODULE_ALIAS("platform:" OCTEON_OHCI_HCD_NAME);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 0231606d47c2..de7c68602a7e 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -183,7 +183,7 @@ static void start_hnp(struct ohci_hcd *ohci)
otg_start_hnp(hcd->usb_phy->otg);
local_irq_save(flags);
- hcd->usb_phy->state = OTG_STATE_A_SUSPEND;
+ hcd->usb_phy->otg->state = OTG_STATE_A_SUSPEND;
writel (RH_PS_PSS, &ohci->regs->roothub.portstatus [port]);
l = omap_readl(OTG_CTRL);
l &= ~OTG_A_BUSREQ;
@@ -481,7 +481,6 @@ static struct platform_driver ohci_hcd_omap_driver = {
.resume = ohci_omap_resume,
#endif
.driver = {
- .owner = THIS_MODULE,
.name = "ohci",
},
};
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 4369299064c7..b81d202b15a2 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -43,20 +43,6 @@ struct ohci_platform_priv {
static const char hcd_name[] = "ohci-platform";
-static int ohci_platform_reset(struct usb_hcd *hcd)
-{
- struct platform_device *pdev = to_platform_device(hcd->self.controller);
- struct usb_ohci_pdata *pdata = dev_get_platdata(&pdev->dev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- if (pdata->no_big_frame_no)
- ohci->flags |= OHCI_QUIRK_FRAME_NO;
- if (pdata->num_ports)
- ohci->num_ports = pdata->num_ports;
-
- return ohci_setup(hcd);
-}
-
static int ohci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -110,7 +96,6 @@ static struct hc_driver __read_mostly ohci_platform_hc_driver;
static const struct ohci_driver_overrides platform_overrides __initconst = {
.product_desc = "Generic Platform OHCI controller",
- .reset = ohci_platform_reset,
.extra_priv_size = sizeof(struct ohci_platform_priv),
};
@@ -149,12 +134,6 @@ static int ohci_platform_probe(struct platform_device *dev)
return irq;
}
- res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(&dev->dev, "no memory resource provided");
- return -ENXIO;
- }
-
hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
if (!hcd)
@@ -175,6 +154,12 @@ static int ohci_platform_probe(struct platform_device *dev)
if (of_property_read_bool(dev->dev.of_node, "big-endian"))
ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
+ if (of_property_read_bool(dev->dev.of_node, "no-big-frame-no"))
+ ohci->flags |= OHCI_QUIRK_FRAME_NO;
+
+ of_property_read_u32(dev->dev.of_node, "num-ports",
+ &ohci->num_ports);
+
priv->phy = devm_phy_get(&dev->dev, "usb");
if (IS_ERR(priv->phy)) {
err = PTR_ERR(priv->phy);
@@ -212,6 +197,10 @@ static int ohci_platform_probe(struct platform_device *dev)
ohci->flags |= OHCI_QUIRK_BE_DESC;
if (pdata->big_endian_mmio)
ohci->flags |= OHCI_QUIRK_BE_MMIO;
+ if (pdata->no_big_frame_no)
+ ohci->flags |= OHCI_QUIRK_FRAME_NO;
+ if (pdata->num_ports)
+ ohci->num_ports = pdata->num_ports;
#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
if (ohci->flags & OHCI_QUIRK_BE_MMIO) {
@@ -236,14 +225,15 @@ static int ohci_platform_probe(struct platform_device *dev)
goto err_reset;
}
- hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = resource_size(res_mem);
-
+ res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_power;
}
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_power;
@@ -298,8 +288,7 @@ static int ohci_platform_remove(struct platform_device *dev)
return 0;
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int ohci_platform_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -335,11 +324,7 @@ static int ohci_platform_resume(struct device *dev)
ohci_resume(hcd, false);
return 0;
}
-
-#else /* !CONFIG_PM */
-#define ohci_platform_suspend NULL
-#define ohci_platform_resume NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id ohci_platform_ids[] = {
{ .compatible = "generic-ohci", },
@@ -353,10 +338,8 @@ static const struct platform_device_id ohci_platform_table[] = {
};
MODULE_DEVICE_TABLE(platform, ohci_platform_table);
-static const struct dev_pm_ops ohci_platform_pm_ops = {
- .suspend = ohci_platform_suspend,
- .resume = ohci_platform_resume,
-};
+static SIMPLE_DEV_PM_OPS(ohci_platform_pm_ops, ohci_platform_suspend,
+ ohci_platform_resume);
static struct platform_driver ohci_platform_driver = {
.id_table = ohci_platform_table,
@@ -364,7 +347,6 @@ static struct platform_driver ohci_platform_driver = {
.remove = ohci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "ohci-platform",
.pm = &ohci_platform_pm_ops,
.of_match_table = ohci_platform_ids,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index e68f3d02cd1a..ba1bec7db026 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -447,20 +447,13 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- pr_err("no resource of IORESOURCE_MEM");
- retval = -ENXIO;
- goto err;
- }
-
- hcd->rsrc_start = r->start;
- hcd->rsrc_len = resource_size(r);
-
hcd->regs = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err;
}
+ hcd->rsrc_start = r->start;
+ hcd->rsrc_len = resource_size(r);
/* initialize "struct pxa27x_ohci" */
pxa_ohci = to_pxa27x_ohci(hcd);
@@ -610,7 +603,6 @@ static struct platform_driver ohci_hcd_pxa27x_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "pxa27x-ohci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pxa_ohci_dt_ids),
#ifdef CONFIG_PM
.pm = &ohci_hcd_pxa27x_pm_ops,
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 3d753a9d3141..095113ea1fcb 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -462,7 +462,6 @@ static struct platform_driver ohci_hcd_s3c2410_driver = {
.remove = ohci_hcd_s3c2410_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "s3c2410-ohci",
.pm = &ohci_hcd_s3c2410_pm_ops,
},
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 8d5876692e7c..707437c88d03 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -74,20 +74,15 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- retval = -ENODEV;
- goto err_put_hcd;
- }
-
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = resource_size(res);
-
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
+ hcd->rsrc_start = pdev->resource[0].start;
+ hcd->rsrc_len = resource_size(res);
+
sohci_p = to_spear_ohci(hcd);
sohci_p->clk = usbh_clk;
@@ -176,7 +171,6 @@ static struct platform_driver spear_ohci_hcd_driver = {
.resume = spear_ohci_hcd_drv_resume,
#endif
.driver = {
- .owner = THIS_MODULE,
.name = "spear-ohci",
.of_match_table = spear_ohci_id_table,
},
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 59f424567a8d..bc462288cfb0 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -647,23 +647,22 @@ static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
/*-------------------------------------------------------------------------*/
-/* HCCA frame number is 16 bits, but is accessed as 32 bits since not all
- * hardware handles 16 bit reads. That creates a different confusion on
- * some big-endian SOC implementations. Same thing happens with PSW access.
+/*
+ * The HCCA frame number is 16 bits, but is accessed as 32 bits since not all
+ * hardware handles 16 bit reads. Depending on the SoC implementation, the
+ * frame number can wind up in either bits [31:16] (default) or
+ * [15:0] (OHCI_QUIRK_FRAME_NO) on big endian hosts.
+ *
+ * Somewhat similarly, the 16-bit PSW fields in a transfer descriptor are
+ * reordered on BE.
*/
-#ifdef CONFIG_PPC_MPC52xx
-#define big_endian_frame_no_quirk(ohci) (ohci->flags & OHCI_QUIRK_FRAME_NO)
-#else
-#define big_endian_frame_no_quirk(ohci) 0
-#endif
-
static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
{
u32 tmp;
if (big_endian_desc(ohci)) {
tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no);
- if (!big_endian_frame_no_quirk(ohci))
+ if (!(ohci->flags & OHCI_QUIRK_FRAME_NO))
tmp >>= 16;
} else
tmp = le32_to_cpup((__force __le32 *)&ohci->hcca->frame_no);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 4fe79a2d71a9..75811dd5a9d7 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3846,7 +3846,6 @@ static int oxu_drv_probe(struct platform_device *pdev)
*/
info = devm_kzalloc(&pdev->dev, sizeof(struct oxu_info), GFP_KERNEL);
if (!info) {
- dev_dbg(&pdev->dev, "error allocating memory\n");
ret = -EFAULT;
goto error;
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 2f3acebb577a..dd483c13565b 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -233,10 +233,8 @@ commit:
spin_unlock_irqrestore(&amd_lock, flags);
- if (info.nb_dev)
- pci_dev_put(info.nb_dev);
- if (info.smbus_dev)
- pci_dev_put(info.smbus_dev);
+ pci_dev_put(info.nb_dev);
+ pci_dev_put(info.smbus_dev);
} else {
/* no race - commit the result */
@@ -447,10 +445,8 @@ void usb_amd_dev_put(void)
spin_unlock_irqrestore(&amd_lock, flags);
- if (nb)
- pci_dev_put(nb);
- if (smbus)
- pci_dev_put(smbus);
+ pci_dev_put(nb);
+ pci_dev_put(smbus);
}
EXPORT_SYMBOL_GPL(usb_amd_dev_put);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 110b4b9ebeaa..c4bcfaedeec9 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2535,7 +2535,6 @@ static struct platform_driver r8a66597_driver = {
.remove = r8a66597_remove,
.driver = {
.name = hcd_name,
- .owner = THIS_MODULE,
.pm = R8A66597_DEV_PM_OPS,
},
};
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index bf86630b3cea..2894e54e5b9c 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3218,7 +3218,6 @@ static struct platform_driver u132_platform_driver = {
.resume = u132_resume,
.driver = {
.name = hcd_name,
- .owner = THIS_MODULE,
},
};
static int __init u132_hcd_init(void)
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index b987f1d10058..cf8f46003f62 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -86,14 +86,14 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto err_rmr;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
uhci = hcd_to_uhci(hcd);
uhci->regs = hcd->regs;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 646300cbe5f7..08d402b15482 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -83,9 +83,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
if (of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-375-xhci") ||
@@ -109,15 +106,16 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto put_hcd;
}
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
/*
* Not all platforms have a clk so it is not an error if the
* clock does not exists.
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 06433aec81d7..e692e769c50c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -716,9 +716,7 @@ remove_finished_td:
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- /* Clear stopped_td if endpoint is not halted */
- if (!(ep->ep_state & EP_HALTED))
- ep->stopped_td = NULL;
+ ep->stopped_td = NULL;
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -1732,13 +1730,11 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
return;
ep->ep_state |= EP_HALTED;
- ep->stopped_td = td;
ep->stopped_stream = stream_id;
xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
- xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+ xhci_cleanup_stalled_ring(xhci, ep_index, td);
- ep->stopped_td = NULL;
ep->stopped_stream = 0;
xhci_ring_cmd_db(xhci);
@@ -1813,72 +1809,65 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
if (skip)
goto td_cleanup;
- if (trb_comp_code == COMP_STOP_INVAL ||
- trb_comp_code == COMP_STOP) {
+ if (trb_comp_code == COMP_STOP_INVAL || trb_comp_code == COMP_STOP) {
/* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep->stopped_td = td;
return 0;
+ }
+ if (trb_comp_code == COMP_STALL ||
+ xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+ trb_comp_code)) {
+ /* Issue a reset endpoint command to clear the host side
+ * halt, followed by a set dequeue command to move the
+ * dequeue pointer past the TD.
+ * The class driver clears the device side halt later.
+ */
+ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
+ ep_ring->stream_id, td, event_trb);
} else {
- if (trb_comp_code == COMP_STALL ||
- xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
- trb_comp_code)) {
- /* Issue a reset endpoint command to clear the host side
- * halt, followed by a set dequeue command to move the
- * dequeue pointer past the TD.
- * The class driver clears the device side halt later.
- */
- xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, ep_ring->stream_id,
- td, event_trb);
- } else {
- /* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring);
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring);
- }
+ inc_deq(xhci, ep_ring);
+ }
td_cleanup:
- /* Clean up the endpoint's TD list */
- urb = td->urb;
- urb_priv = urb->hcpriv;
-
- /* Do one last check of the actual transfer length.
- * If the host controller said we transferred more data than
- * the buffer length, urb->actual_length will be a very big
- * number (since it's unsigned). Play it safe and say we didn't
- * transfer anything.
- */
- if (urb->actual_length > urb->transfer_buffer_length) {
- xhci_warn(xhci, "URB transfer length is wrong, "
- "xHC issue? req. len = %u, "
- "act. len = %u\n",
- urb->transfer_buffer_length,
- urb->actual_length);
- urb->actual_length = 0;
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- *status = -EREMOTEIO;
- else
- *status = 0;
- }
- list_del_init(&td->td_list);
- /* Was this TD slated to be cancelled but completed anyway? */
- if (!list_empty(&td->cancelled_td_list))
- list_del_init(&td->cancelled_td_list);
-
- urb_priv->td_cnt++;
- /* Giveback the urb when all the tds are completed */
- if (urb_priv->td_cnt == urb_priv->length) {
- ret = 1;
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
- if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
- == 0) {
- if (xhci->quirks & XHCI_AMD_PLL_FIX)
- usb_amd_quirk_pll_enable();
- }
+ /* Clean up the endpoint's TD list */
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+
+ /* Do one last check of the actual transfer length.
+ * If the host controller said we transferred more data than the buffer
+ * length, urb->actual_length will be a very big number (since it's
+ * unsigned). Play it safe and say we didn't transfer anything.
+ */
+ if (urb->actual_length > urb->transfer_buffer_length) {
+ xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
+ urb->transfer_buffer_length,
+ urb->actual_length);
+ urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ list_del_init(&td->td_list);
+ /* Was this TD slated to be cancelled but completed anyway? */
+ if (!list_empty(&td->cancelled_td_list))
+ list_del_init(&td->cancelled_td_list);
+
+ urb_priv->td_cnt++;
+ /* Giveback the urb when all the tds are completed */
+ if (urb_priv->td_cnt == urb_priv->length) {
+ ret = 1;
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_enable();
}
}
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index cf3413116aff..01fcbb5eb06e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2912,10 +2912,11 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
}
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
- struct usb_device *udev, unsigned int ep_index)
+ unsigned int ep_index, struct xhci_td *td)
{
struct xhci_dequeue_state deq_state;
struct xhci_virt_ep *ep;
+ struct usb_device *udev = td->urb->dev;
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Cleaning up stalled endpoint ring");
@@ -2924,8 +2925,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
- ep_index, ep->stopped_stream, ep->stopped_td,
- &deq_state);
+ ep_index, ep->stopped_stream, td, &deq_state);
if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
return;
@@ -4009,6 +4009,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
+ slot_ctx->dev_state = 0;
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Set up evaluate context for LPM MEL change.");
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index d745715a1e2f..cc7c5bb7cbcf 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -358,7 +358,7 @@ struct xhci_op_regs {
/* wake on over-current (enable) */
#define PORT_WKOC_E (1 << 27)
/* bits 28:29 reserved */
-/* true: device is removable - for USB 3.0 roothub emulation */
+/* true: device is non-removable - for USB 3.0 roothub emulation */
#define PORT_DEV_REMOVE (1 << 30)
/* Initiate a warm port reset - complete when PORT_WRC is '1' */
#define PORT_WR (1 << 31)
@@ -1825,7 +1825,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
- struct usb_device *udev, unsigned int ep_index);
+ unsigned int ep_index, struct xhci_td *td);
void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 493c7f268b6f..3071c0ef909b 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -814,15 +814,10 @@ static void adu_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
/* if the device is not opened, then we clean up right now */
- dev_dbg(&dev->udev->dev, "%s : open count %d\n",
- __func__, dev->open_count);
if (!dev->open_count)
adu_delete(dev);
mutex_unlock(&adutux_mutex);
-
- dev_info(&interface->dev, "ADU device adutux%d now disconnected\n",
- (minor - ADU_MINOR_BASE));
}
/* usb specific object needed to register this driver with the usb subsystem */
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 633caf643122..022dc0008f2a 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -2472,8 +2472,7 @@ sisusb_delete(struct kref *kref)
if (!sisusb)
return;
- if (sisusb->sisusb_dev)
- usb_put_dev(sisusb->sisusb_dev);
+ usb_put_dev(sisusb->sisusb_dev);
sisusb->sisusb_dev = NULL;
sisusb_free_buffers(sisusb);
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index ae7e1206ca54..258d2f546e43 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -314,10 +314,8 @@ static int usb3503_i2c_probe(struct i2c_client *i2c,
int err;
hub = devm_kzalloc(&i2c->dev, sizeof(struct usb3503), GFP_KERNEL);
- if (!hub) {
- dev_err(&i2c->dev, "private data alloc fail\n");
+ if (!hub)
return -ENOMEM;
- }
i2c_set_clientdata(i2c, hub);
hub->regmap = devm_regmap_init_i2c(i2c, &usb3503_regmap_config);
@@ -336,10 +334,8 @@ static int usb3503_platform_probe(struct platform_device *pdev)
struct usb3503 *hub;
hub = devm_kzalloc(&pdev->dev, sizeof(struct usb3503), GFP_KERNEL);
- if (!hub) {
- dev_err(&pdev->dev, "private data alloc fail\n");
+ if (!hub)
return -ENOMEM;
- }
hub->dev = &pdev->dev;
return usb3503_probe(hub);
@@ -405,7 +401,6 @@ static struct platform_driver usb3503_platform_driver = {
.driver = {
.name = USB3503_I2C_NAME,
.of_match_table = of_match_ptr(usb3503_of_match),
- .owner = THIS_MODULE,
},
.probe = usb3503_platform_probe,
};
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index c3a45da11610..343fa6ff9f4b 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -410,7 +410,8 @@ static int yurex_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t yurex_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
+static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
+ loff_t *ppos)
{
struct usb_yurex *dev;
int retval = 0;
@@ -444,7 +445,8 @@ exit:
return retval;
}
-static ssize_t yurex_write(struct file *file, const char *user_buffer, size_t count, loff_t *ppos)
+static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
+ size_t count, loff_t *ppos)
{
struct usb_yurex *dev;
int i, set = 0, retval = 0;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 06cc5d6ea681..9d68372dd9aa 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -58,8 +58,7 @@ config USB_MUSB_DUAL_ROLE
endchoice
-choice
- prompt "Platform Glue Layer"
+comment "Platform Glue Layer"
config USB_MUSB_DAVINCI
tristate "DaVinci"
@@ -101,8 +100,6 @@ config USB_MUSB_JZ4740
depends on USB_MUSB_GADGET
depends on USB_OTG_BLACKLIST_HUB
-endchoice
-
config USB_MUSB_AM335X_CHILD
tristate
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index a2735df24cc6..220fd4d3b41c 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -149,25 +149,25 @@ static void otg_timer(unsigned long _musb)
*/
devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
break;
case OTG_STATE_A_WAIT_VFALL:
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
break;
@@ -176,7 +176,7 @@ static void otg_timer(unsigned long _musb)
if (devctl & MUSB_DEVCTL_BDEVICE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
else
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
default:
break;
@@ -193,9 +193,9 @@ static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || (musb->a_wait_bcon == 0 &&
- musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
del_timer(&otg_workaround);
last_timer = jiffies;
return;
@@ -208,7 +208,7 @@ static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
jiffies_to_msecs(timeout - jiffies));
mod_timer(&otg_workaround, timeout);
}
@@ -278,27 +278,27 @@ static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
* devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
- musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&otg_workaround);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
otg->default_a = 0;
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
}
/* NOTE: this must complete power-on within 100 ms. */
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
drvvbus ? "on" : "off",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
ret = IRQ_HANDLED;
@@ -324,7 +324,7 @@ eoi:
}
/* Poll for ID change */
- if (musb->xceiv->state == OTG_STATE_B_IDLE)
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -408,7 +408,7 @@ static int am35x_musb_exit(struct musb *musb)
}
/* AM35x supports only 32bit read operation */
-void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+static void am35x_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
void __iomem *fifo = hw_ep->fifo;
u32 val;
@@ -438,9 +438,11 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
}
static const struct musb_platform_ops am35x_ops = {
+ .quirks = MUSB_INDEXED_EP,
.init = am35x_musb_init,
.exit = am35x_musb_exit,
+ .read_fifo = am35x_read_fifo,
.enable = am35x_musb_enable,
.disable = am35x_musb_disable,
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ac4422b33dcd..a441a2de8619 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -33,10 +33,45 @@ struct bfin_glue {
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
+static u32 bfin_fifo_offset(u8 epnum)
+{
+ return USB_OFFSET(USB_EP0_FIFO) + (epnum * 8);
+}
+
+static u8 bfin_readb(const void __iomem *addr, unsigned offset)
+{
+ return (u8)(bfin_read16(addr + offset));
+}
+
+static u16 bfin_readw(const void __iomem *addr, unsigned offset)
+{
+ return bfin_read16(addr + offset);
+}
+
+static u32 bfin_readl(const void __iomem *addr, unsigned offset)
+{
+ return (u32)(bfin_read16(addr + offset));
+}
+
+static void bfin_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ bfin_write16(addr + offset, (u16)data);
+}
+
+static void bfin_writew(void __iomem *addr, unsigned offset, u16 data)
+{
+ bfin_write16(addr + offset, data);
+}
+
+static void binf_writel(void __iomem *addr, unsigned offset, u32 data)
+{
+ bfin_write16(addr + offset, (u16)data);
+}
+
/*
* Load an endpoint's FIFO
*/
-void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+static void bfin_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
{
struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
@@ -100,7 +135,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
/*
* Unload an endpoint's FIFO
*/
-void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+static void bfin_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
@@ -185,8 +220,8 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
}
/* Start sampling ID pin, when plug is removed from MUSB */
- if ((musb->xceiv->state == OTG_STATE_B_IDLE
- || musb->xceiv->state == OTG_STATE_A_WAIT_BCON) ||
+ if ((musb->xceiv->otg->state == OTG_STATE_B_IDLE
+ || musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON) ||
(musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
musb->a_wait_bcon = TIMER_DELAY;
@@ -205,7 +240,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
static u8 toggle;
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_IDLE:
case OTG_STATE_A_WAIT_BCON:
/* Start a new session */
@@ -219,7 +254,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
if (!(val & MUSB_DEVCTL_BDEVICE)) {
gpio_set_value(musb->config->gpio_vrsel, 1);
- musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
} else {
gpio_set_value(musb->config->gpio_vrsel, 0);
/* Ignore VBUSERROR and SUSPEND IRQ */
@@ -229,7 +264,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
musb_writeb(musb->mregs, MUSB_INTRUSB, val);
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
}
mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
break;
@@ -245,7 +280,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
if (!(val & MUSB_DEVCTL_BDEVICE)) {
gpio_set_value(musb->config->gpio_vrsel, 1);
- musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
} else {
gpio_set_value(musb->config->gpio_vrsel, 0);
@@ -280,13 +315,13 @@ static void musb_conn_timer_handler(unsigned long _musb)
break;
default:
dev_dbg(musb->controller, "%s state not handled\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
break;
}
spin_unlock_irqrestore(&musb->lock, flags);
dev_dbg(musb->controller, "state is %s\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
static void bfin_musb_enable(struct musb *musb)
@@ -307,7 +342,7 @@ static void bfin_musb_set_vbus(struct musb *musb, int is_on)
dev_dbg(musb->controller, "VBUS %s, devctl %02x "
/* otg %3x conf %08x prcm %08x */ "\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
musb_readb(musb->mregs, MUSB_DEVCTL));
}
@@ -433,6 +468,15 @@ static const struct musb_platform_ops bfin_ops = {
.init = bfin_musb_init,
.exit = bfin_musb_exit,
+ .readb = bfin_readb,
+ .writeb = bfin_writeb,
+ .readw = bfin_readw,
+ .writew = bfin_writew,
+ .readl = bfin_readl,
+ .writel = bfin_writel,
+ .fifo_mode = 2,
+ .read_fifo = bfin_read_fifo,
+ .write_fifo = bfin_write_fifo,
.enable = bfin_musb_enable,
.disable = bfin_musb_disable,
@@ -456,16 +500,12 @@ static int bfin_probe(struct platform_device *pdev)
int ret = -ENOMEM;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&pdev->dev, "failed to allocate glue context\n");
+ if (!glue)
goto err0;
- }
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
- if (!musb) {
- dev_err(&pdev->dev, "failed to allocate musb device\n");
+ if (!musb)
goto err0;
- }
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &bfin_dmamask;
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 058775e647ad..9a9c82a4d35d 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -198,20 +198,20 @@ static void otg_timer(unsigned long _musb)
*/
devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
break;
@@ -226,7 +226,7 @@ static void otg_timer(unsigned long _musb)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
break;
}
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG,
MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT);
break;
@@ -248,7 +248,7 @@ static void otg_timer(unsigned long _musb)
if (devctl & MUSB_DEVCTL_BDEVICE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
else
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
default:
break;
@@ -265,9 +265,9 @@ static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || (musb->a_wait_bcon == 0 &&
- musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
del_timer(&otg_workaround);
last_timer = jiffies;
return;
@@ -280,7 +280,7 @@ static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
jiffies_to_msecs(timeout - jiffies));
mod_timer(&otg_workaround, timeout);
}
@@ -341,26 +341,26 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
* devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
- musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&otg_workaround);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
otg->default_a = 0;
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
}
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
drvvbus ? "on" : "off",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
ret = IRQ_HANDLED;
@@ -375,7 +375,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
/* Poll for ID change */
- if (musb->xceiv->state == OTG_STATE_B_IDLE)
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -458,9 +458,11 @@ static int da8xx_musb_exit(struct musb *musb)
}
static const struct musb_platform_ops da8xx_ops = {
+ .quirks = MUSB_INDEXED_EP,
.init = da8xx_musb_init,
.exit = da8xx_musb_exit,
+ .fifo_mode = 2,
.enable = da8xx_musb_enable,
.disable = da8xx_musb_disable,
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 110b78415bf0..3c1d9b211b51 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -214,10 +214,10 @@ static void otg_timer(unsigned long _musb)
*/
devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "poll devctl %02x (%s)\n", devctl,
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VFALL:
/* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
* seems to mis-handle session "start" otherwise (or in our
@@ -228,7 +228,7 @@ static void otg_timer(unsigned long _musb)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
break;
}
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
break;
@@ -251,7 +251,7 @@ static void otg_timer(unsigned long _musb)
if (devctl & MUSB_DEVCTL_BDEVICE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
else
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
default:
break;
@@ -325,20 +325,20 @@ static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
* to stop registering in devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
- musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&otg_workaround);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
otg->default_a = 0;
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
}
@@ -348,7 +348,7 @@ static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
davinci_musb_source_power(musb, drvvbus, 0);
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
drvvbus ? "on" : "off",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
retval = IRQ_HANDLED;
@@ -361,7 +361,7 @@ static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
/* poll for ID change */
- if (musb->xceiv->state == OTG_STATE_B_IDLE)
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -520,10 +520,8 @@ static int davinci_probe(struct platform_device *pdev)
int ret = -ENOMEM;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&pdev->dev, "failed to allocate glue context\n");
+ if (!glue)
goto err0;
- }
clk = devm_clk_get(&pdev->dev, "usb");
if (IS_ERR(clk)) {
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index d1187290d4e3..bb7b26325a74 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -106,6 +106,8 @@ static int jz4740_musb_exit(struct musb *musb)
}
static const struct musb_platform_ops jz4740_musb_ops = {
+ .quirks = MUSB_INDEXED_EP,
+ .fifo_mode = 2,
.init = jz4740_musb_init,
.exit = jz4740_musb_exit,
};
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index b841ee0bff06..34cce3e38c49 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -224,12 +224,67 @@ static struct usb_phy_io_ops musb_ulpi_access = {
/*-------------------------------------------------------------------------*/
-#if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN)
+static u32 musb_default_fifo_offset(u8 epnum)
+{
+ return 0x20 + (epnum * 4);
+}
+
+/* "flat" mapping: each endpoint has its own i/o address */
+static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
+{
+}
+
+static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
+{
+ return 0x100 + (0x10 * epnum) + offset;
+}
+
+/* "indexed" mapping: INDEX register controls register bank select */
+static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
+{
+ musb_writeb(mbase, MUSB_INDEX, epnum);
+}
+
+static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
+{
+ return 0x10 + offset;
+}
+
+static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
+{
+ return __raw_readb(addr + offset);
+}
+
+static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ __raw_writeb(data, addr + offset);
+}
+
+static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
+{
+ return __raw_readw(addr + offset);
+}
+
+static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
+{
+ __raw_writew(data, addr + offset);
+}
+
+static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
+{
+ return __raw_readl(addr + offset);
+}
+
+static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
+{
+ __raw_writel(data, addr + offset);
+}
/*
* Load an endpoint's FIFO
*/
-void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
+ const u8 *src)
{
struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
@@ -270,11 +325,10 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
}
}
-#if !defined(CONFIG_USB_MUSB_AM35X)
/*
* Unload an endpoint's FIFO
*/
-void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
@@ -312,10 +366,40 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
ioread8_rep(fifo, dst, len);
}
}
-#endif
-#endif /* normal PIO */
+/*
+ * Old style IO functions
+ */
+u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
+EXPORT_SYMBOL_GPL(musb_readb);
+
+void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
+EXPORT_SYMBOL_GPL(musb_writeb);
+
+u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
+EXPORT_SYMBOL_GPL(musb_readw);
+
+void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
+EXPORT_SYMBOL_GPL(musb_writew);
+
+u32 (*musb_readl)(const void __iomem *addr, unsigned offset);
+EXPORT_SYMBOL_GPL(musb_readl);
+void (*musb_writel)(void __iomem *addr, unsigned offset, u32 data);
+EXPORT_SYMBOL_GPL(musb_writel);
+
+/*
+ * New style IO functions
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
+}
+
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+ return hw_ep->musb->io.write_fifo(hw_ep, len, src);
+}
/*-------------------------------------------------------------------------*/
@@ -360,23 +444,23 @@ static void musb_otg_timer_func(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_B_WAIT_ACON:
dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n");
musb_g_disconnect(musb);
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 0;
break;
case OTG_STATE_A_SUSPEND:
case OTG_STATE_A_WAIT_BCON:
dev_dbg(musb->controller, "HNP: %s timeout\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
musb_platform_set_vbus(musb, 0);
- musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
break;
default:
dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
spin_unlock_irqrestore(&musb->lock, flags);
}
@@ -391,19 +475,19 @@ void musb_hnp_stop(struct musb *musb)
u8 reg;
dev_dbg(musb->controller, "HNP: stop from %s\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_PERIPHERAL:
musb_g_disconnect(musb);
dev_dbg(musb->controller, "HNP: back to %s\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
break;
case OTG_STATE_B_HOST:
dev_dbg(musb->controller, "HNP: Disabling HR\n");
if (hcd)
hcd->self.is_b_host = 0;
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
MUSB_DEV_MODE(musb);
reg = musb_readb(mbase, MUSB_POWER);
reg |= MUSB_POWER_SUSPENDM;
@@ -412,7 +496,7 @@ void musb_hnp_stop(struct musb *musb)
break;
default:
dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
/*
@@ -423,6 +507,7 @@ void musb_hnp_stop(struct musb *musb)
musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
}
+static void musb_generic_disable(struct musb *musb);
/*
* Interrupt Service Routine to record USB "global" interrupts.
* Since these do not happen often and signify things of
@@ -449,13 +534,13 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
*/
if (int_usb & MUSB_INTR_RESUME) {
handled = IRQ_HANDLED;
- dev_dbg(musb->controller, "RESUME (%s)\n", usb_otg_state_string(musb->xceiv->state));
+ dev_dbg(musb->controller, "RESUME (%s)\n", usb_otg_state_string(musb->xceiv->otg->state));
if (devctl & MUSB_DEVCTL_HM) {
void __iomem *mbase = musb->mregs;
u8 power;
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
/* remote wakeup? later, GetPortStatus
* will stop RESUME signaling
@@ -478,29 +563,26 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
| MUSB_PORT_STAT_RESUME;
musb->rh_timer = jiffies
+ msecs_to_jiffies(20);
- schedule_delayed_work(
- &musb->finish_resume_work,
- msecs_to_jiffies(20));
+ musb->need_finish_resume = 1;
- musb->xceiv->state = OTG_STATE_A_HOST;
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
musb->is_active = 1;
- musb_host_resume_root_hub(musb);
break;
case OTG_STATE_B_WAIT_ACON:
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 1;
MUSB_DEV_MODE(musb);
break;
default:
WARNING("bogus %s RESUME (%s)\n",
"host",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
} else {
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
/* possibly DISCONNECT is upcoming */
- musb->xceiv->state = OTG_STATE_A_HOST;
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
musb_host_resume_root_hub(musb);
break;
case OTG_STATE_B_WAIT_ACON:
@@ -523,7 +605,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
default:
WARNING("bogus %s RESUME (%s)\n",
"peripheral",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
}
}
@@ -539,7 +621,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
}
dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
/* IRQ arrives from ID pin sense or (later, if VBUS power
* is removed) SRP. responses are time critical:
@@ -550,7 +632,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
*/
musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
musb->ep0_stage = MUSB_EP0_START;
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
musb_platform_set_vbus(musb, 1);
@@ -576,7 +658,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
* REVISIT: do delays from lots of DEBUG_KERNEL checks
* make trouble here, keeping VBUS < 4.4V ?
*/
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
/* recovery is dicey once we've gotten past the
* initial stages of enumeration, but if VBUS
@@ -605,7 +687,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
"VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
devctl,
({ char *s;
switch (devctl & MUSB_DEVCTL_VBUS) {
@@ -630,10 +712,10 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if (int_usb & MUSB_INTR_SUSPEND) {
dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x\n",
- usb_otg_state_string(musb->xceiv->state), devctl);
+ usb_otg_state_string(musb->xceiv->otg->state), devctl);
handled = IRQ_HANDLED;
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_PERIPHERAL:
/* We also come here if the cable is removed, since
* this silicon doesn't report ID-no-longer-grounded.
@@ -657,7 +739,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb_g_suspend(musb);
musb->is_active = musb->g.b_hnp_enable;
if (musb->is_active) {
- musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(
@@ -670,7 +752,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ msecs_to_jiffies(musb->a_wait_bcon));
break;
case OTG_STATE_A_HOST:
- musb->xceiv->state = OTG_STATE_A_SUSPEND;
+ musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
musb->is_active = musb->hcd->self.b_hnp_enable;
break;
case OTG_STATE_B_HOST:
@@ -713,7 +795,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
/* indicate new connection to OTG machine */
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_B_PERIPHERAL:
if (int_usb & MUSB_INTR_SUSPEND) {
dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
@@ -725,7 +807,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
case OTG_STATE_B_WAIT_ACON:
dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
b_host:
- musb->xceiv->state = OTG_STATE_B_HOST;
+ musb->xceiv->otg->state = OTG_STATE_B_HOST;
if (musb->hcd)
musb->hcd->self.is_b_host = 1;
del_timer(&musb->otg_timer);
@@ -733,7 +815,7 @@ b_host:
default:
if ((devctl & MUSB_DEVCTL_VBUS)
== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
- musb->xceiv->state = OTG_STATE_A_HOST;
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
if (hcd)
hcd->self.is_b_host = 0;
}
@@ -743,16 +825,16 @@ b_host:
musb_host_poke_root_hub(musb);
dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
- usb_otg_state_string(musb->xceiv->state), devctl);
+ usb_otg_state_string(musb->xceiv->otg->state), devctl);
}
if (int_usb & MUSB_INTR_DISCONNECT) {
dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
MUSB_MODE(musb), devctl);
handled = IRQ_HANDLED;
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
case OTG_STATE_A_SUSPEND:
musb_host_resume_root_hub(musb);
@@ -770,7 +852,7 @@ b_host:
musb_root_disconnect(musb);
if (musb->hcd)
musb->hcd->self.is_b_host = 0;
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
MUSB_DEV_MODE(musb);
musb_g_disconnect(musb);
break;
@@ -786,7 +868,7 @@ b_host:
break;
default:
WARNING("unhandled DISCONNECT transition (%s)\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
break;
}
}
@@ -812,15 +894,15 @@ b_host:
}
} else {
dev_dbg(musb->controller, "BUS RESET as %s\n",
- usb_otg_state_string(musb->xceiv->state));
- switch (musb->xceiv->state) {
+ usb_otg_state_string(musb->xceiv->otg->state));
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
musb_g_reset(musb);
/* FALLTHROUGH */
case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
/* never use invalid T(a_wait_bcon) */
dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
TA_WAIT_BCON(musb));
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(TA_WAIT_BCON(musb)));
@@ -831,27 +913,29 @@ b_host:
break;
case OTG_STATE_B_WAIT_ACON:
dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n",
- usb_otg_state_string(musb->xceiv->state));
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ usb_otg_state_string(musb->xceiv->otg->state));
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb_g_reset(musb);
break;
case OTG_STATE_B_IDLE:
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
/* FALLTHROUGH */
case OTG_STATE_B_PERIPHERAL:
musb_g_reset(musb);
break;
default:
dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
}
}
/* handle babble condition */
- if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb))
+ if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb)) {
+ musb_generic_disable(musb);
schedule_delayed_work(&musb->recover_work,
msecs_to_jiffies(100));
+ }
#if 0
/* REVISIT ... this would be for multiplexing periodic endpoints, or
@@ -1032,21 +1116,7 @@ static void musb_shutdown(struct platform_device *pdev)
* We don't currently use dynamic fifo setup capability to do anything
* more than selecting one of a bunch of predefined configurations.
*/
-#if defined(CONFIG_USB_MUSB_TUSB6010) \
- || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) \
- || defined(CONFIG_USB_MUSB_OMAP2PLUS) \
- || defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE) \
- || defined(CONFIG_USB_MUSB_AM35X) \
- || defined(CONFIG_USB_MUSB_AM35X_MODULE) \
- || defined(CONFIG_USB_MUSB_DSPS) \
- || defined(CONFIG_USB_MUSB_DSPS_MODULE)
-static ushort fifo_mode = 4;
-#elif defined(CONFIG_USB_MUSB_UX500) \
- || defined(CONFIG_USB_MUSB_UX500_MODULE)
-static ushort fifo_mode = 5;
-#else
-static ushort fifo_mode = 2;
-#endif
+static ushort fifo_mode;
/* "modprobe ... fifo_mode=1" etc */
module_param(fifo_mode, ushort, 0);
@@ -1456,20 +1526,25 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
for (i = 0; i < musb->nr_endpoints; i++) {
struct musb_hw_ep *hw_ep = musb->endpoints + i;
- hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
-#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
- hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
- hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
- hw_ep->fifo_sync_va =
- musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
-
- if (i == 0)
- hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
- else
- hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
+ hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
+ if (musb->io.quirks & MUSB_IN_TUSB) {
+ hw_ep->fifo_async = musb->async + 0x400 +
+ musb->io.fifo_offset(i);
+ hw_ep->fifo_sync = musb->sync + 0x400 +
+ musb->io.fifo_offset(i);
+ hw_ep->fifo_sync_va =
+ musb->sync_va + 0x400 + musb->io.fifo_offset(i);
+
+ if (i == 0)
+ hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
+ else
+ hw_ep->conf = mbase + 0x400 +
+ (((i - 1) & 0xf) << 2);
+ }
#endif
- hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
+ hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
hw_ep->rx_reinit = 1;
hw_ep->tx_reinit = 1;
@@ -1630,7 +1705,7 @@ musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
int ret = -EINVAL;
spin_lock_irqsave(&musb->lock, flags);
- ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->state));
+ ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
@@ -1675,7 +1750,7 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
spin_lock_irqsave(&musb->lock, flags);
/* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
- if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON)
+ if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
musb->is_active = 0;
musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
spin_unlock_irqrestore(&musb->lock, flags);
@@ -1743,8 +1818,8 @@ static void musb_irq_work(struct work_struct *data)
{
struct musb *musb = container_of(data, struct musb, irq_work);
- if (musb->xceiv->state != musb->xceiv_old_state) {
- musb->xceiv_old_state = musb->xceiv->state;
+ if (musb->xceiv->otg->state != musb->xceiv_old_state) {
+ musb->xceiv_old_state = musb->xceiv->otg->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
}
@@ -1903,6 +1978,18 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb->ops = plat->platform_ops;
musb->port_mode = plat->mode;
+ /*
+ * Initialize the default IO functions. At least omap2430 needs
+ * these early. We initialize the platform specific IO functions
+ * later on.
+ */
+ musb_readb = musb_default_readb;
+ musb_writeb = musb_default_writeb;
+ musb_readw = musb_default_readw;
+ musb_writew = musb_default_writew;
+ musb_readl = musb_default_readl;
+ musb_writel = musb_default_writel;
+
/* The musb_platform_init() call:
* - adjusts musb->mregs
* - sets the musb->isr
@@ -1924,6 +2011,57 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
goto fail2;
}
+ if (musb->ops->quirks)
+ musb->io.quirks = musb->ops->quirks;
+
+ /* At least tusb6010 has it's own offsets.. */
+ if (musb->ops->ep_offset)
+ musb->io.ep_offset = musb->ops->ep_offset;
+ if (musb->ops->ep_select)
+ musb->io.ep_select = musb->ops->ep_select;
+
+ /* ..and some devices use indexed offset or flat offset */
+ if (musb->io.quirks & MUSB_INDEXED_EP) {
+ musb->io.ep_offset = musb_indexed_ep_offset;
+ musb->io.ep_select = musb_indexed_ep_select;
+ } else {
+ musb->io.ep_offset = musb_flat_ep_offset;
+ musb->io.ep_select = musb_flat_ep_select;
+ }
+
+ if (musb->ops->fifo_mode)
+ fifo_mode = musb->ops->fifo_mode;
+ else
+ fifo_mode = 4;
+
+ if (musb->ops->fifo_offset)
+ musb->io.fifo_offset = musb->ops->fifo_offset;
+ else
+ musb->io.fifo_offset = musb_default_fifo_offset;
+
+ if (musb->ops->readb)
+ musb_readb = musb->ops->readb;
+ if (musb->ops->writeb)
+ musb_writeb = musb->ops->writeb;
+ if (musb->ops->readw)
+ musb_readw = musb->ops->readw;
+ if (musb->ops->writew)
+ musb_writew = musb->ops->writew;
+ if (musb->ops->readl)
+ musb_readl = musb->ops->readl;
+ if (musb->ops->writel)
+ musb_writel = musb->ops->writel;
+
+ if (musb->ops->read_fifo)
+ musb->io.read_fifo = musb->ops->read_fifo;
+ else
+ musb->io.read_fifo = musb_default_read_fifo;
+
+ if (musb->ops->write_fifo)
+ musb->io.write_fifo = musb->ops->write_fifo;
+ else
+ musb->io.write_fifo = musb_default_write_fifo;
+
if (!musb->xceiv->io_ops) {
musb->xceiv->io_dev = musb->controller;
musb->xceiv->io_priv = musb->mregs;
@@ -1983,10 +2121,10 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (musb->xceiv->otg->default_a) {
MUSB_HST_MODE(musb);
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
} else {
MUSB_DEV_MODE(musb);
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
}
switch (musb->port_mode) {
@@ -2080,10 +2218,10 @@ static int musb_probe(struct platform_device *pdev)
struct resource *iomem;
void __iomem *base;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem || irq <= 0)
+ if (irq <= 0)
return -ENODEV;
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, iomem);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -2297,9 +2435,11 @@ static int musb_suspend(struct device *dev)
return 0;
}
-static int musb_resume_noirq(struct device *dev)
+static int musb_resume(struct device *dev)
{
struct musb *musb = dev_to_musb(dev);
+ u8 devctl;
+ u8 mask;
/*
* For static cmos like DaVinci, register values were preserved
@@ -2313,6 +2453,23 @@ static int musb_resume_noirq(struct device *dev)
musb_restore_context(musb);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
+ if ((devctl & mask) != (musb->context.devctl & mask))
+ musb->port1_status = 0;
+ if (musb->need_finish_resume) {
+ musb->need_finish_resume = 0;
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(20));
+ }
+
+ /*
+ * The USB HUB code expects the device to be in RPM_ACTIVE once it came
+ * out of suspend
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
return 0;
}
@@ -2348,7 +2505,7 @@ static int musb_runtime_resume(struct device *dev)
static const struct dev_pm_ops musb_dev_pm_ops = {
.suspend = musb_suspend,
- .resume_noirq = musb_resume_noirq,
+ .resume = musb_resume,
.runtime_suspend = musb_runtime_suspend,
.runtime_resume = musb_runtime_resume,
};
@@ -2362,7 +2519,6 @@ static struct platform_driver musb_driver = {
.driver = {
.name = (char *)musb_driver_name,
.bus = &platform_bus_type,
- .owner = THIS_MODULE,
.pm = MUSB_DEV_PM_OPS,
},
.probe = musb_probe,
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 414e57a984bb..5e65958f7915 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -124,41 +124,6 @@ enum musb_g_ep0_state {
#define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
#define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
-
-/*************************** REGISTER ACCESS ********************************/
-
-/* Endpoint registers (other than dynfifo setup) can be accessed either
- * directly with the "flat" model, or after setting up an index register.
- */
-
-#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \
- || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \
- || defined(CONFIG_ARCH_OMAP4)
-/* REVISIT indexed access seemed to
- * misbehave (on DaVinci) for at least peripheral IN ...
- */
-#define MUSB_FLAT_REG
-#endif
-
-/* TUSB mapping: "flat" plus ep0 special cases */
-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
-#define musb_ep_select(_mbase, _epnum) \
- musb_writeb((_mbase), MUSB_INDEX, (_epnum))
-#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
-
-/* "flat" mapping: each endpoint has its own i/o address */
-#elif defined(MUSB_FLAT_REG)
-#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
-#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
-
-/* "indexed" mapping: INDEX register controls register bank select */
-#else
-#define musb_ep_select(_mbase, _epnum) \
- musb_writeb((_mbase), MUSB_INDEX, (_epnum))
-#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
-#endif
-
/****************************** FUNCTIONS ********************************/
#define MUSB_HST_MODE(_musb)\
@@ -173,8 +138,25 @@ enum musb_g_ep0_state {
/******************************** TYPES *************************************/
+struct musb_io;
+
/**
* struct musb_platform_ops - Operations passed to musb_core by HW glue layer
+ * @quirks: flags for platform specific quirks
+ * @enable: enable device
+ * @disable: disable device
+ * @ep_offset: returns the end point offset
+ * @ep_select: selects the specified end point
+ * @fifo_mode: sets the fifo mode
+ * @fifo_offset: returns the fifo offset
+ * @readb: read 8 bits
+ * @writeb: write 8 bits
+ * @readw: read 16 bits
+ * @writew: write 16 bits
+ * @readl: read 32 bits
+ * @writel: write 32 bits
+ * @read_fifo: reads the fifo
+ * @write_fifo: writes to fifo
* @init: turns on clocks, sets up platform-specific registers, etc
* @exit: undoes @init
* @set_mode: forcefully changes operating mode
@@ -184,12 +166,34 @@ enum musb_g_ep0_state {
* @adjust_channel_params: pre check for standard dma channel_program func
*/
struct musb_platform_ops {
+
+#define MUSB_DMA_UX500 BIT(6)
+#define MUSB_DMA_CPPI41 BIT(5)
+#define MUSB_DMA_CPPI BIT(4)
+#define MUSB_DMA_TUSB_OMAP BIT(3)
+#define MUSB_DMA_INVENTRA BIT(2)
+#define MUSB_IN_TUSB BIT(1)
+#define MUSB_INDEXED_EP BIT(0)
+ u32 quirks;
+
int (*init)(struct musb *musb);
int (*exit)(struct musb *musb);
void (*enable)(struct musb *musb);
void (*disable)(struct musb *musb);
+ u32 (*ep_offset)(u8 epnum, u16 offset);
+ void (*ep_select)(void __iomem *mbase, u8 epnum);
+ u16 fifo_mode;
+ u32 (*fifo_offset)(u8 epnum);
+ u8 (*readb)(const void __iomem *addr, unsigned offset);
+ void (*writeb)(void __iomem *addr, unsigned offset, u8 data);
+ u16 (*readw)(const void __iomem *addr, unsigned offset);
+ void (*writew)(void __iomem *addr, unsigned offset, u16 data);
+ u32 (*readl)(const void __iomem *addr, unsigned offset);
+ void (*writel)(void __iomem *addr, unsigned offset, u32 data);
+ void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
+ void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
int (*set_mode)(struct musb *musb, u8 mode);
void (*try_idle)(struct musb *musb, unsigned long timeout);
int (*reset)(struct musb *musb);
@@ -212,8 +216,7 @@ struct musb_hw_ep {
void __iomem *fifo;
void __iomem *regs;
-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
void __iomem *conf;
#endif
@@ -230,8 +233,7 @@ struct musb_hw_ep {
struct dma_channel *tx_channel;
struct dma_channel *rx_channel;
-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
/* TUSB has "asynchronous" and "synchronous" dma modes */
dma_addr_t fifo_async;
dma_addr_t fifo_sync;
@@ -292,6 +294,7 @@ struct musb {
/* device lock */
spinlock_t lock;
+ struct musb_io io;
const struct musb_platform_ops *ops;
struct musb_context_registers context;
@@ -334,8 +337,7 @@ struct musb {
void __iomem *ctrl_base;
void __iomem *mregs;
-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
dma_addr_t async;
dma_addr_t sync;
void __iomem *sync_va;
@@ -390,6 +392,7 @@ struct musb {
/* is_suspended means USB B_PERIPHERAL suspend */
unsigned is_suspended:1;
+ unsigned need_finish_resume :1;
/* may_wakeup means remote wakeup is enabled */
unsigned may_wakeup:1;
@@ -474,7 +477,7 @@ static inline int musb_read_fifosize(struct musb *musb,
u8 reg = 0;
/* read from core using indexed model */
- reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE));
+ reg = musb_readb(mbase, musb->io.ep_offset(epnum, MUSB_FIFOSIZE));
/* 0's returned when no more endpoints */
if (!reg)
return -ENODEV;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 5a9b977fbc19..f64fd964dc6d 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -253,6 +253,7 @@ static void cppi41_dma_callback(void *private_data)
cppi41_trans_done(cppi41_channel);
} else {
struct cppi41_dma_controller *controller;
+ int is_hs = 0;
/*
* On AM335x it has been observed that the TX interrupt fires
* too early that means the TXFIFO is not yet empty but the DMA
@@ -265,7 +266,14 @@ static void cppi41_dma_callback(void *private_data)
*/
controller = cppi41_channel->controller;
- if (musb->g.speed == USB_SPEED_HIGH) {
+ if (is_host_active(musb)) {
+ if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
+ is_hs = 1;
+ } else {
+ if (musb->g.speed == USB_SPEED_HIGH)
+ is_hs = 1;
+ }
+ if (is_hs) {
unsigned wait = 25;
do {
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 4c216790e86b..ad3701a97389 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -49,33 +49,36 @@ struct musb_register_map {
};
static const struct musb_register_map musb_regmap[] = {
- { "FAddr", 0x00, 8 },
- { "Power", 0x01, 8 },
- { "Frame", 0x0c, 16 },
- { "Index", 0x0e, 8 },
- { "Testmode", 0x0f, 8 },
- { "TxMaxPp", 0x10, 16 },
- { "TxCSRp", 0x12, 16 },
- { "RxMaxPp", 0x14, 16 },
- { "RxCSR", 0x16, 16 },
- { "RxCount", 0x18, 16 },
- { "ConfigData", 0x1f, 8 },
- { "DevCtl", 0x60, 8 },
- { "MISC", 0x61, 8 },
- { "TxFIFOsz", 0x62, 8 },
- { "RxFIFOsz", 0x63, 8 },
- { "TxFIFOadd", 0x64, 16 },
- { "RxFIFOadd", 0x66, 16 },
- { "VControl", 0x68, 32 },
- { "HWVers", 0x6C, 16 },
- { "EPInfo", 0x78, 8 },
- { "RAMInfo", 0x79, 8 },
- { "LinkInfo", 0x7A, 8 },
- { "VPLen", 0x7B, 8 },
- { "HS_EOF1", 0x7C, 8 },
- { "FS_EOF1", 0x7D, 8 },
- { "LS_EOF1", 0x7E, 8 },
- { "SOFT_RST", 0x7F, 8 },
+ { "FAddr", MUSB_FADDR, 8 },
+ { "Power", MUSB_POWER, 8 },
+ { "Frame", MUSB_FRAME, 16 },
+ { "Index", MUSB_INDEX, 8 },
+ { "Testmode", MUSB_TESTMODE, 8 },
+ { "TxMaxPp", MUSB_TXMAXP, 16 },
+ { "TxCSRp", MUSB_TXCSR, 16 },
+ { "RxMaxPp", MUSB_RXMAXP, 16 },
+ { "RxCSR", MUSB_RXCSR, 16 },
+ { "RxCount", MUSB_RXCOUNT, 16 },
+ { "ConfigData", MUSB_CONFIGDATA,8 },
+ { "IntrRxE", MUSB_INTRRXE, 16 },
+ { "IntrTxE", MUSB_INTRTXE, 16 },
+ { "IntrUsbE", MUSB_INTRUSBE, 8 },
+ { "DevCtl", MUSB_DEVCTL, 8 },
+ { "BabbleCtl", MUSB_BABBLE_CTL,8 },
+ { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
+ { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
+ { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
+ { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
+ { "VControl", 0x68, 32 },
+ { "HWVers", 0x69, 16 },
+ { "EPInfo", MUSB_EPINFO, 8 },
+ { "RAMInfo", MUSB_RAMINFO, 8 },
+ { "LinkInfo", MUSB_LINKINFO, 8 },
+ { "VPLen", MUSB_VPLEN, 8 },
+ { "HS_EOF1", MUSB_HS_EOF1, 8 },
+ { "FS_EOF1", MUSB_FS_EOF1, 8 },
+ { "LS_EOF1", MUSB_LS_EOF1, 8 },
+ { "SOFT_RST", 0x7F, 8 },
{ "DMA_CNTLch0", 0x204, 16 },
{ "DMA_ADDRch0", 0x208, 32 },
{ "DMA_COUNTch0", 0x20C, 32 },
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 48bc09e7b83b..53bd0e71d19f 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -179,9 +179,9 @@ static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || (musb->a_wait_bcon == 0 &&
- musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
del_timer(&glue->timer);
glue->last_timer = jiffies;
return;
@@ -201,7 +201,7 @@ static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
glue->last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
jiffies_to_msecs(timeout - jiffies));
mod_timer(&glue->timer, timeout);
}
@@ -265,10 +265,10 @@ static void otg_timer(unsigned long _musb)
*/
devctl = dsps_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
@@ -277,10 +277,10 @@ static void otg_timer(unsigned long _musb)
case OTG_STATE_A_IDLE:
case OTG_STATE_B_IDLE:
if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
@@ -288,7 +288,7 @@ static void otg_timer(unsigned long _musb)
mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
break;
case OTG_STATE_A_WAIT_VFALL:
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
dsps_writel(musb->ctrl_base, wrp->coreintr_set,
MUSB_INTR_VBUSERROR << wrp->usb_shift);
break;
@@ -373,26 +373,26 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
* devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
- musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
mod_timer(&glue->timer,
jiffies + wrp->poll_seconds * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
musb->xceiv->otg->default_a = 1;
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
del_timer(&glue->timer);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
musb->xceiv->otg->default_a = 0;
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
}
/* NOTE: this must complete power-on within 100 ms. */
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
drvvbus ? "on" : "off",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
err ? " ERROR" : "",
devctl);
ret = IRQ_HANDLED;
@@ -402,7 +402,7 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
ret |= musb_interrupt(musb);
/* Poll for ID change in OTG port mode */
- if (musb->xceiv->state == OTG_STATE_B_IDLE &&
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
out:
@@ -447,9 +447,6 @@ static int dsps_musb_init(struct musb *musb)
int ret;
r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
- if (!r)
- return -EINVAL;
-
reg_base = devm_ioremap_resource(dev, r);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
@@ -636,6 +633,7 @@ static int dsps_musb_reset(struct musb *musb)
}
static struct musb_platform_ops dsps_ops = {
+ .quirks = MUSB_INDEXED_EP,
.init = dsps_musb_init,
.exit = dsps_musb_exit,
@@ -729,7 +727,6 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
config = devm_kzalloc(&parent->dev, sizeof(*config), GFP_KERNEL);
if (!config) {
- dev_err(dev, "failed to allocate musb hdrc config\n");
ret = -ENOMEM;
goto err;
}
@@ -781,10 +778,8 @@ static int dsps_probe(struct platform_device *pdev)
/* allocate glue */
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&pdev->dev, "unable to allocate glue memory\n");
+ if (!glue)
return -ENOMEM;
- }
glue->dev = &pdev->dev;
glue->wrp = wrp;
@@ -906,7 +901,7 @@ static int dsps_resume(struct device *dev)
dsps_writel(mbase, wrp->mode, glue->context.mode);
dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
- if (musb->xceiv->state == OTG_STATE_B_IDLE &&
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 24c8c0219790..49b04cb6f5ca 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1546,7 +1546,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_B_PERIPHERAL:
/* NOTE: OTG state machine doesn't include B_SUSPENDED;
* that's part of the standard usb 1.1 state machine, and
@@ -1587,7 +1587,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
goto done;
default:
dev_dbg(musb->controller, "Unhandled wake: %s\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
goto done;
}
@@ -1684,8 +1684,7 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
static int musb_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
-static int musb_gadget_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
+static int musb_gadget_stop(struct usb_gadget *g);
static const struct usb_gadget_ops musb_gadget_operations = {
.get_frame = musb_gadget_get_frame,
@@ -1792,7 +1791,7 @@ int musb_gadget_setup(struct musb *musb)
MUSB_DEV_MODE(musb);
musb->xceiv->otg->default_a = 0;
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
/* this "gadget" abstracts/virtualizes the controller */
musb->g.name = musb_driver_name;
@@ -1851,8 +1850,6 @@ static int musb_gadget_start(struct usb_gadget *g,
pm_runtime_get_sync(musb->controller);
- dev_dbg(musb->controller, "registering driver %s\n", driver->function);
-
musb->softconnect = 0;
musb->gadget_driver = driver;
@@ -1860,7 +1857,7 @@ static int musb_gadget_start(struct usb_gadget *g,
musb->is_active = 1;
otg_set_peripheral(otg, &musb->g);
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
spin_unlock_irqrestore(&musb->lock, flags);
musb_start(musb);
@@ -1925,8 +1922,7 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
*
* @param driver the gadget driver to unregister
*/
-static int musb_gadget_stop(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+static int musb_gadget_stop(struct usb_gadget *g)
{
struct musb *musb = gadget_to_musb(g);
unsigned long flags;
@@ -1945,13 +1941,10 @@ static int musb_gadget_stop(struct usb_gadget *g,
(void) musb_gadget_vbus_draw(&musb->g, 0);
- musb->xceiv->state = OTG_STATE_UNDEFINED;
- stop_activity(musb, driver);
+ musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
+ stop_activity(musb, NULL);
otg_set_peripheral(musb->xceiv->otg, NULL);
- dev_dbg(musb->controller, "unregistering driver %s\n",
- driver ? driver->function : "(removed)");
-
musb->is_active = 0;
musb->gadget_driver = NULL;
musb_platform_try_idle(musb, 0);
@@ -1975,7 +1968,7 @@ static int musb_gadget_stop(struct usb_gadget *g,
void musb_g_resume(struct musb *musb)
{
musb->is_suspended = 0;
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_B_IDLE:
break;
case OTG_STATE_B_WAIT_ACON:
@@ -1989,7 +1982,7 @@ void musb_g_resume(struct musb *musb)
break;
default:
WARNING("unhandled RESUME transition (%s)\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
}
@@ -2001,10 +1994,10 @@ void musb_g_suspend(struct musb *musb)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "devctl %02x\n", devctl);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_B_IDLE:
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
break;
case OTG_STATE_B_PERIPHERAL:
musb->is_suspended = 1;
@@ -2019,7 +2012,7 @@ void musb_g_suspend(struct musb *musb)
* A_PERIPHERAL may need care too
*/
WARNING("unhandled SUSPEND transition (%s)\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
}
@@ -2050,22 +2043,22 @@ void musb_g_disconnect(struct musb *musb)
spin_lock(&musb->lock);
}
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
default:
dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
- usb_otg_state_string(musb->xceiv->state));
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ usb_otg_state_string(musb->xceiv->otg->state));
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
break;
case OTG_STATE_A_PERIPHERAL:
- musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
MUSB_HST_MODE(musb);
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_HOST:
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
break;
case OTG_STATE_B_SRP_INIT:
break;
@@ -2090,9 +2083,12 @@ __acquires(musb->lock)
: NULL
);
- /* report disconnect, if we didn't already (flushing EP state) */
- if (musb->g.speed != USB_SPEED_UNKNOWN)
- musb_g_disconnect(musb);
+ /* report reset, if we didn't already (flushing EP state) */
+ if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
+ spin_unlock(&musb->lock);
+ usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
+ spin_lock(&musb->lock);
+ }
/* clear HR */
else if (devctl & MUSB_DEVCTL_HR)
@@ -2125,13 +2121,13 @@ __acquires(musb->lock)
* In that case, do not rely on devctl for setting
* peripheral mode.
*/
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb->g.is_a_peripheral = 0;
} else if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb->g.is_a_peripheral = 0;
} else {
- musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
musb->g.is_a_peripheral = 1;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 855793d701bb..23d474d3d7f4 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2463,7 +2463,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
if (!is_host_active(musb))
return 0;
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
return 0;
case OTG_STATE_A_WAIT_VRISE:
@@ -2473,7 +2473,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
*/
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
- musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
break;
default:
break;
@@ -2481,7 +2481,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
if (musb->is_active) {
WARNING("trying to suspend as %s while active\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
return -EBUSY;
} else
return 0;
@@ -2678,7 +2678,7 @@ int musb_host_setup(struct musb *musb, int power_budget)
MUSB_HST_MODE(musb);
musb->xceiv->otg->default_a = 1;
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
otg_set_host(musb->xceiv->otg, &hcd->self);
hcd->self.otg_port = 1;
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index eebeed78edd6..8a57a6f4b3a6 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -37,86 +37,32 @@
#include <linux/io.h>
-#ifndef CONFIG_BLACKFIN
-
-/* NOTE: these offsets are all in bytes */
-
-static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
- { return __raw_readw(addr + offset); }
-
-static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
- { return __raw_readl(addr + offset); }
-
-
-static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
- { __raw_writew(data, addr + offset); }
-
-static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
- { __raw_writel(data, addr + offset); }
-
-
-#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
-
-/*
- * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+#define musb_ep_select(_mbase, _epnum) musb->io.ep_select((_mbase), (_epnum))
+
+/**
+ * struct musb_io - IO functions for MUSB
+ * @quirks: platform specific flags
+ * @ep_offset: platform specific function to get end point offset
+ * @ep_select: platform specific function to select end point
+ * @fifo_offset: platform specific function to get fifo offset
+ * @read_fifo: platform specific function to read fifo
+ * @write_fifo: platform specific function to write fifo
*/
-static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
-{
- u16 tmp;
- u8 val;
-
- tmp = __raw_readw(addr + (offset & ~1));
- if (offset & 1)
- val = (tmp >> 8);
- else
- val = tmp & 0xff;
-
- return val;
-}
-
-static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
-{
- u16 tmp;
-
- tmp = __raw_readw(addr + (offset & ~1));
- if (offset & 1)
- tmp = (data << 8) | (tmp & 0xff);
- else
- tmp = (tmp & 0xff00) | data;
-
- __raw_writew(tmp, addr + (offset & ~1));
-}
-
-#else
-
-static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
- { return __raw_readb(addr + offset); }
-
-static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
- { __raw_writeb(data, addr + offset); }
-
-#endif /* CONFIG_USB_MUSB_TUSB6010 */
-
-#else
-
-static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
- { return (u8) (bfin_read16(addr + offset)); }
-
-static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
- { return bfin_read16(addr + offset); }
-
-static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
- { return (u32) (bfin_read16(addr + offset)); }
-
-static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
- { bfin_write16(addr + offset, (u16) data); }
-
-static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
- { bfin_write16(addr + offset, data); }
-
-static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
- { bfin_write16(addr + offset, (u16) data); }
-
-#endif /* CONFIG_BLACKFIN */
+struct musb_io {
+ u32 quirks;
+ u32 (*ep_offset)(u8 epnum, u16 offset);
+ void (*ep_select)(void __iomem *mbase, u8 epnum);
+ u32 (*fifo_offset)(u8 epnum);
+ void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
+ void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
+};
+
+/* Do not add new entries here, add them the struct musb_io instead */
+extern u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
+extern void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
+extern u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
+extern void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
+extern u32 (*musb_readl)(const void __iomem *addr, unsigned offset);
+extern void (*musb_writel)(void __iomem *addr, unsigned offset, u32 data);
#endif
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 37122a480bc1..11f0be07491e 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -239,14 +239,6 @@
#define MUSB_INDEX 0x0E /* 8 bit */
#define MUSB_TESTMODE 0x0F /* 8 bit */
-/* Get offset for a given FIFO from musb->mregs */
-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
-#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
-#else
-#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
-#endif
-
/*
* Additional Control Registers
*/
@@ -295,21 +287,7 @@
#define MUSB_FIFOSIZE 0x0F
#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
-/* Offsets to endpoint registers in indexed model (using INDEX register) */
-#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
- (0x10 + (_offset))
-
-/* Offsets to endpoint registers in flat models */
-#define MUSB_FLAT_OFFSET(_epnum, _offset) \
- (0x100 + (0x10*(_epnum)) + (_offset))
-
-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
-/* TUSB6010 EP0 configuration register is special */
-#define MUSB_TUSB_OFFSET(_epnum, _offset) \
- (0x10 + _offset)
#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
-#endif
#define MUSB_TXCSR_MODE 0x2000
@@ -480,10 +458,6 @@ static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
#define MUSB_INDEX USB_OFFSET(USB_INDEX) /* 8 bit */
#define MUSB_TESTMODE USB_OFFSET(USB_TESTMODE)/* 8 bit */
-/* Get offset for a given FIFO from musb->mregs */
-#define MUSB_FIFO_OFFSET(epnum) \
- (USB_OFFSET(USB_EP0_FIFO) + ((epnum) * 8))
-
/*
* Additional Control Registers
*/
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index e2d2d8c9891b..b072420e44f5 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -69,9 +69,10 @@ void musb_host_finish_resume(struct work_struct *work)
musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
usb_hcd_poll_rh_status(musb->hcd);
/* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv->state = OTG_STATE_A_HOST;
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
spin_unlock_irqrestore(&musb->lock, flags);
+ musb_host_resume_root_hub(musb);
}
void musb_port_suspend(struct musb *musb, bool do_suspend)
@@ -107,9 +108,9 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
musb->port1_status |= USB_PORT_STAT_SUSPEND;
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
- musb->xceiv->state = OTG_STATE_A_SUSPEND;
+ musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
musb->is_active = otg->host->b_hnp_enable;
if (musb->is_active)
mod_timer(&musb->otg_timer, jiffies
@@ -118,13 +119,13 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
musb_platform_try_idle(musb, 0);
break;
case OTG_STATE_B_HOST:
- musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
musb->is_active = otg->host->b_hnp_enable;
musb_platform_try_idle(musb, 0);
break;
default:
dev_dbg(musb->controller, "bogus rh suspend? %s\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
} else if (power & MUSB_POWER_SUSPENDM) {
power &= ~MUSB_POWER_SUSPENDM;
@@ -145,7 +146,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
u8 power;
void __iomem *mbase = musb->mregs;
- if (musb->xceiv->state == OTG_STATE_B_IDLE) {
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) {
dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n");
musb->port1_status &= ~USB_PORT_STAT_RESET;
return;
@@ -224,24 +225,24 @@ void musb_root_disconnect(struct musb *musb)
usb_hcd_poll_rh_status(musb->hcd);
musb->is_active = 0;
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
if (otg->host->b_hnp_enable) {
- musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
+ musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
musb->g.is_a_peripheral = 1;
break;
}
/* FALLTHROUGH */
case OTG_STATE_A_HOST:
- musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
break;
case OTG_STATE_A_WAIT_VFALL:
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
break;
default:
dev_dbg(musb->controller, "host disconnect (%s)\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index e8e9f9aab203..ab7ec09a8afe 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -195,6 +195,7 @@ static int dma_channel_abort(struct dma_channel *channel)
{
struct musb_dma_channel *musb_channel = channel->private_data;
void __iomem *mbase = musb_channel->controller->base;
+ struct musb *musb = musb_channel->controller->private_data;
u8 bchannel = musb_channel->idx;
int offset;
@@ -202,7 +203,7 @@ static int dma_channel_abort(struct dma_channel *channel)
if (channel->status == MUSB_DMA_STATUS_BUSY) {
if (musb_channel->transmit) {
- offset = MUSB_EP_OFFSET(musb_channel->epnum,
+ offset = musb->io.ep_offset(musb_channel->epnum,
MUSB_TXCSR);
/*
@@ -215,7 +216,7 @@ static int dma_channel_abort(struct dma_channel *channel)
csr &= ~MUSB_TXCSR_DMAMODE;
musb_writew(mbase, offset, csr);
} else {
- offset = MUSB_EP_OFFSET(musb_channel->epnum,
+ offset = musb->io.ep_offset(musb_channel->epnum,
MUSB_RXCSR);
csr = musb_readw(mbase, offset);
@@ -326,7 +327,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
(musb_channel->max_packet_sz - 1)))
) {
u8 epnum = musb_channel->epnum;
- int offset = MUSB_EP_OFFSET(epnum,
+ int offset = musb->io.ep_offset(epnum,
MUSB_TXCSR);
u16 txcsr;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index d369bf1f3936..763649eb4987 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -65,15 +65,15 @@ static void musb_do_idle(unsigned long _musb)
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
break;
@@ -90,15 +90,15 @@ static void musb_do_idle(unsigned long _musb)
musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
usb_hcd_poll_rh_status(musb->hcd);
/* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv->state = OTG_STATE_A_HOST;
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
}
break;
case OTG_STATE_A_HOST:
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
else
- musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
default:
break;
}
@@ -116,9 +116,9 @@ static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
+ && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
del_timer(&musb_idle_timer);
last_timer = jiffies;
return;
@@ -135,7 +135,7 @@ static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
(unsigned long)jiffies_to_msecs(timeout - jiffies));
mod_timer(&musb_idle_timer, timeout);
}
@@ -153,7 +153,7 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (is_on) {
- if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+ if (musb->xceiv->otg->state == OTG_STATE_A_IDLE) {
int loops = 100;
/* start the session */
devctl |= MUSB_DEVCTL_SESSION;
@@ -162,7 +162,8 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
* Wait for the musb to set as A device to enable the
* VBUS
*/
- while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) &
+ MUSB_DEVCTL_BDEVICE) {
mdelay(5);
cpu_relax();
@@ -179,7 +180,7 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
} else {
musb->is_active = 1;
otg->default_a = 1;
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
MUSB_HST_MODE(musb);
}
@@ -191,7 +192,7 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
*/
otg->default_a = 0;
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
devctl &= ~MUSB_DEVCTL_SESSION;
MUSB_DEV_MODE(musb);
@@ -200,7 +201,7 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
dev_dbg(musb->controller, "VBUS %s, devctl %02x "
/* otg %3x conf %08x prcm %08x */ "\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
musb_readb(musb->mregs, MUSB_DEVCTL));
}
@@ -265,7 +266,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
dev_dbg(dev, "ID GND\n");
otg->default_a = true;
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
musb->xceiv->last_event = USB_EVENT_ID;
if (musb->gadget_driver) {
pm_runtime_get_sync(dev);
@@ -279,7 +280,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
dev_dbg(dev, "VBUS Connect\n");
otg->default_a = false;
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
musb->xceiv->last_event = USB_EVENT_VBUS;
if (musb->gadget_driver)
pm_runtime_get_sync(dev);
@@ -518,10 +519,8 @@ static int omap2430_probe(struct platform_device *pdev)
int ret = -ENOMEM;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&pdev->dev, "failed to allocate glue context\n");
+ if (!glue)
goto err0;
- }
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
@@ -543,25 +542,16 @@ static int omap2430_probe(struct platform_device *pdev)
struct platform_device *control_pdev;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev,
- "failed to allocate musb platform data\n");
+ if (!pdata)
goto err2;
- }
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev,
- "failed to allocate musb board data\n");
+ if (!data)
goto err2;
- }
config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
- if (!config) {
- dev_err(&pdev->dev,
- "failed to allocate musb hdrc config\n");
+ if (!config)
goto err2;
- }
of_property_read_u32(np, "mode", (u32 *)&pdata->mode);
of_property_read_u32(np, "interface-type",
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 2daa779f1382..3a5ffd575438 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -126,6 +126,52 @@ static void tusb_wbus_quirk(struct musb *musb, int enabled)
}
}
+static u32 tusb_fifo_offset(u8 epnum)
+{
+ return 0x200 + (epnum * 0x20);
+}
+
+static u32 tusb_ep_offset(u8 epnum, u16 offset)
+{
+ return 0x10 + offset;
+}
+
+/* TUSB mapping: "flat" plus ep0 special cases */
+static void tusb_ep_select(void __iomem *mbase, u8 epnum)
+{
+ musb_writeb(mbase, MUSB_INDEX, epnum);
+}
+
+/*
+ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+ */
+static u8 tusb_readb(const void __iomem *addr, unsigned offset)
+{
+ u16 tmp;
+ u8 val;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ val = (tmp >> 8);
+ else
+ val = tmp & 0xff;
+
+ return val;
+}
+
+static void tusb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ u16 tmp;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ tmp = (data << 8) | (tmp & 0xff);
+ else
+ tmp = (tmp & 0xff00) | data;
+
+ __raw_writew(tmp, addr + (offset & ~1));
+}
+
/*
* TUSB 6010 may use a parallel bus that doesn't support byte ops;
* so both loading and unloading FIFOs need explicit byte counts.
@@ -173,7 +219,7 @@ static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
}
}
-void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+static void tusb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
{
struct musb *musb = hw_ep->musb;
void __iomem *ep_conf = hw_ep->conf;
@@ -223,7 +269,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
tusb_fifo_write_unaligned(fifo, buf, len);
}
-void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+static void tusb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
{
struct musb *musb = hw_ep->musb;
void __iomem *ep_conf = hw_ep->conf;
@@ -415,13 +461,13 @@ static void musb_do_idle(unsigned long _musb)
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
if ((musb->a_wait_bcon != 0)
&& (musb->idle_timeout == 0
|| time_after(jiffies, musb->idle_timeout))) {
dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
}
/* FALLTHROUGH */
case OTG_STATE_A_IDLE:
@@ -474,9 +520,9 @@ static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
+ && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->state));
+ usb_otg_state_string(musb->xceiv->otg->state));
del_timer(&musb_idle_timer);
last_timer = jiffies;
return;
@@ -493,7 +539,7 @@ static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
(unsigned long)jiffies_to_msecs(timeout - jiffies));
mod_timer(&musb_idle_timer, timeout);
}
@@ -524,7 +570,7 @@ static void tusb_musb_set_vbus(struct musb *musb, int is_on)
if (is_on) {
timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
otg->default_a = 1;
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
conf |= TUSB_DEV_CONF_USB_HOST_MODE;
@@ -537,16 +583,16 @@ static void tusb_musb_set_vbus(struct musb *musb, int is_on)
/* If ID pin is grounded, we want to be a_idle */
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VRISE:
case OTG_STATE_A_WAIT_BCON:
- musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
break;
case OTG_STATE_A_WAIT_VFALL:
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
default:
- musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
}
musb->is_active = 0;
otg->default_a = 1;
@@ -554,7 +600,7 @@ static void tusb_musb_set_vbus(struct musb *musb, int is_on)
} else {
musb->is_active = 0;
otg->default_a = 0;
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
}
@@ -569,7 +615,7 @@ static void tusb_musb_set_vbus(struct musb *musb, int is_on)
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
dev_dbg(musb->controller, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
musb_readb(musb->mregs, MUSB_DEVCTL),
musb_readl(tbase, TUSB_DEV_OTG_STAT),
conf, prcm);
@@ -668,23 +714,23 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
dev_dbg(musb->controller, "Forcing disconnect (no interrupt)\n");
- if (musb->xceiv->state != OTG_STATE_B_IDLE) {
+ if (musb->xceiv->otg->state != OTG_STATE_B_IDLE) {
/* INTR_DISCONNECT can hide... */
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
musb->int_usb |= MUSB_INTR_DISCONNECT;
}
musb->is_active = 0;
}
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
- usb_otg_state_string(musb->xceiv->state), otg_stat);
+ usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
idle_timeout = jiffies + (1 * HZ);
schedule_work(&musb->irq_work);
} else /* A-dev state machine */ {
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
- usb_otg_state_string(musb->xceiv->state), otg_stat);
+ usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_IDLE:
dev_dbg(musb->controller, "Got SRP, turning on VBUS\n");
musb_platform_set_vbus(musb, 1);
@@ -731,9 +777,9 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
u8 devctl;
dev_dbg(musb->controller, "%s timer, %03x\n",
- usb_otg_state_string(musb->xceiv->state), otg_stat);
+ usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
- switch (musb->xceiv->state) {
+ switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VRISE:
/* VBUS has probably been valid for a while now,
* but may well have bounced out of range a bit
@@ -745,7 +791,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
dev_dbg(musb->controller, "devctl %02x\n", devctl);
break;
}
- musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
idle_timeout = jiffies
+ msecs_to_jiffies(musb->a_wait_bcon);
@@ -1135,9 +1181,17 @@ static int tusb_musb_exit(struct musb *musb)
}
static const struct musb_platform_ops tusb_ops = {
+ .quirks = MUSB_IN_TUSB,
.init = tusb_musb_init,
.exit = tusb_musb_exit,
+ .ep_offset = tusb_ep_offset,
+ .ep_select = tusb_ep_select,
+ .fifo_offset = tusb_fifo_offset,
+ .readb = tusb_readb,
+ .writeb = tusb_writeb,
+ .read_fifo = tusb_read_fifo,
+ .write_fifo = tusb_write_fifo,
.enable = tusb_musb_enable,
.disable = tusb_musb_disable,
@@ -1164,10 +1218,8 @@ static int tusb_probe(struct platform_device *pdev)
int ret;
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&pdev->dev, "failed to allocate glue context\n");
+ if (!glue)
return -ENOMEM;
- }
glue->dev = &pdev->dev;
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index dc666e96f45f..abf72728825f 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -56,7 +56,7 @@ static void ux500_musb_set_vbus(struct musb *musb, int is_on)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (is_on) {
- if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+ if (musb->xceiv->otg->state == OTG_STATE_A_IDLE) {
/* start the session */
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
@@ -76,7 +76,7 @@ static void ux500_musb_set_vbus(struct musb *musb, int is_on)
} else {
musb->is_active = 1;
musb->xceiv->otg->default_a = 1;
- musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
MUSB_HST_MODE(musb);
}
@@ -102,7 +102,7 @@ static void ux500_musb_set_vbus(struct musb *musb, int is_on)
mdelay(200);
dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
- usb_otg_state_string(musb->xceiv->state),
+ usb_otg_state_string(musb->xceiv->otg->state),
musb_readb(musb->mregs, MUSB_DEVCTL));
}
@@ -112,7 +112,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
struct musb *musb = container_of(nb, struct musb, nb);
dev_dbg(musb->controller, "musb_otg_notifications %ld %s\n",
- event, usb_otg_state_string(musb->xceiv->state));
+ event, usb_otg_state_string(musb->xceiv->otg->state));
switch (event) {
case UX500_MUSB_ID:
@@ -127,7 +127,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
if (is_host_active(musb))
ux500_musb_set_vbus(musb, 0);
else
- musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
break;
default:
dev_dbg(musb->controller, "ID float\n");
@@ -188,8 +188,10 @@ static int ux500_musb_exit(struct musb *musb)
}
static const struct musb_platform_ops ux500_ops = {
+ .quirks = MUSB_INDEXED_EP,
.init = ux500_musb_init,
.exit = ux500_musb_exit,
+ .fifo_mode = 5,
.set_vbus = ux500_musb_set_vbus,
};
@@ -247,10 +249,8 @@ static int ux500_probe(struct platform_device *pdev)
}
glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&pdev->dev, "failed to allocate glue context\n");
+ if (!glue)
goto err0;
- }
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index 221faed9f074..e93845c26bdb 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -91,9 +91,9 @@ static bool ux500_configure_channel(struct dma_channel *channel,
struct scatterlist sg;
struct dma_slave_config slave_conf;
enum dma_slave_buswidth addr_width;
- dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +
- ux500_channel->controller->phy_base);
struct musb *musb = ux500_channel->controller->private_data;
+ dma_addr_t usb_fifo_addr = (musb->io.fifo_offset(hw_ep->epnum) +
+ ux500_channel->controller->phy_base);
dev_dbg(musb->controller,
"packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
@@ -121,8 +121,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
slave_conf.dst_maxburst = 16;
slave_conf.device_fc = false;
- dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
- (unsigned long) &slave_conf);
+ dmaengine_slave_config(dma_chan, &slave_conf);
dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -246,9 +245,7 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
musb_writew(epio, MUSB_RXCSR, csr);
}
- ux500_channel->dma_chan->device->
- device_control(ux500_channel->dma_chan,
- DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(ux500_channel->dma_chan);
channel->status = MUSB_DMA_STATUS_FREE;
}
return 0;
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 11ab2c45e462..0b1bd2369293 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -1,6 +1,4 @@
/*
- * drivers/usb/otg/ab8500_usb.c
- *
* USB transceiver driver for AB8500 family chips
*
* Copyright (C) 2010-2013 ST-Ericsson AB
@@ -446,7 +444,8 @@ static int ab9540_usb_link_status_update(struct ab8500_usb *ab,
if (event != UX500_MUSB_RIDB)
event = UX500_MUSB_NONE;
/* Fallback to default B_IDLE as nothing is connected. */
- ab->phy.state = OTG_STATE_B_IDLE;
+ ab->phy.otg->state = OTG_STATE_B_IDLE;
+ usb_phy_set_event(&ab->phy, USB_EVENT_NONE);
break;
case USB_LINK_ACA_RID_C_NM_9540:
@@ -461,12 +460,14 @@ static int ab9540_usb_link_status_update(struct ab8500_usb *ab,
ab8500_usb_peri_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_ENUMERATED);
}
if (ab->mode == USB_IDLE) {
ab->mode = USB_PERIPHERAL;
ab8500_usb_peri_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_ENUMERATED);
}
if (event != UX500_MUSB_RIDC)
event = UX500_MUSB_VBUS;
@@ -502,6 +503,7 @@ static int ab9540_usb_link_status_update(struct ab8500_usb *ab,
event = UX500_MUSB_CHARGER;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
case USB_LINK_PHYEN_NO_VBUS_NO_IDGND_9540:
@@ -526,6 +528,7 @@ static int ab9540_usb_link_status_update(struct ab8500_usb *ab,
ab->mode = USB_IDLE;
ab->phy.otg->default_a = false;
ab->vbus_draw = 0;
+ usb_phy_set_event(&ab->phy, USB_EVENT_NONE);
}
}
break;
@@ -584,7 +587,8 @@ static int ab8540_usb_link_status_update(struct ab8500_usb *ab,
* Fallback to default B_IDLE as nothing
* is connected
*/
- ab->phy.state = OTG_STATE_B_IDLE;
+ ab->phy.otg->state = OTG_STATE_B_IDLE;
+ usb_phy_set_event(&ab->phy, USB_EVENT_NONE);
break;
case USB_LINK_ACA_RID_C_NM_8540:
@@ -598,6 +602,7 @@ static int ab8540_usb_link_status_update(struct ab8500_usb *ab,
ab8500_usb_peri_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_ENUMERATED);
}
if (event != UX500_MUSB_RIDC)
event = UX500_MUSB_VBUS;
@@ -626,6 +631,7 @@ static int ab8540_usb_link_status_update(struct ab8500_usb *ab,
event = UX500_MUSB_CHARGER;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
case USB_LINK_PHYEN_NO_VBUS_NO_IDGND_8540:
@@ -648,6 +654,7 @@ static int ab8540_usb_link_status_update(struct ab8500_usb *ab,
ab->mode = USB_IDLE;
ab->phy.otg->default_a = false;
ab->vbus_draw = 0;
+ usb_phy_set_event(&ab->phy, USB_EVENT_NONE);
}
break;
@@ -693,7 +700,8 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
* Fallback to default B_IDLE as nothing
* is connected
*/
- ab->phy.state = OTG_STATE_B_IDLE;
+ ab->phy.otg->state = OTG_STATE_B_IDLE;
+ usb_phy_set_event(&ab->phy, USB_EVENT_NONE);
break;
case USB_LINK_ACA_RID_C_NM_8505:
@@ -707,6 +715,7 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
ab8500_usb_peri_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_ENUMERATED);
}
if (event != UX500_MUSB_RIDC)
event = UX500_MUSB_VBUS;
@@ -734,6 +743,7 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
event = UX500_MUSB_CHARGER;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
default:
@@ -776,7 +786,8 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
if (event != UX500_MUSB_RIDB)
event = UX500_MUSB_NONE;
/* Fallback to default B_IDLE as nothing is connected */
- ab->phy.state = OTG_STATE_B_IDLE;
+ ab->phy.otg->state = OTG_STATE_B_IDLE;
+ usb_phy_set_event(&ab->phy, USB_EVENT_NONE);
break;
case USB_LINK_ACA_RID_C_NM_8500:
@@ -794,6 +805,7 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
ab8500_usb_peri_phy_en(ab);
atomic_notifier_call_chain(&ab->phy.notifier,
UX500_MUSB_PREPARE, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_ENUMERATED);
}
if (event != UX500_MUSB_RIDC)
event = UX500_MUSB_VBUS;
@@ -820,6 +832,7 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
event = UX500_MUSB_CHARGER;
atomic_notifier_call_chain(&ab->phy.notifier,
event, &ab->vbus_draw);
+ usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
case USB_LINK_RESERVED_8500:
@@ -1056,7 +1069,7 @@ static int ab8500_usb_set_peripheral(struct usb_otg *otg,
if (!otg)
return -ENODEV;
- ab = phy_to_ab(otg->phy);
+ ab = phy_to_ab(otg->usb_phy);
ab->phy.otg->gadget = gadget;
@@ -1080,7 +1093,7 @@ static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
if (!otg)
return -ENODEV;
- ab = phy_to_ab(otg->phy);
+ ab = phy_to_ab(otg->usb_phy);
ab->phy.otg->host = host;
@@ -1380,9 +1393,9 @@ static int ab8500_usb_probe(struct platform_device *pdev)
ab->phy.label = "ab8500";
ab->phy.set_suspend = ab8500_usb_set_suspend;
ab->phy.set_power = ab8500_usb_set_power;
- ab->phy.state = OTG_STATE_UNDEFINED;
+ ab->phy.otg->state = OTG_STATE_UNDEFINED;
- otg->phy = &ab->phy;
+ otg->usb_phy = &ab->phy;
otg->set_host = ab8500_usb_set_host;
otg->set_peripheral = ab8500_usb_set_peripheral;
@@ -1505,7 +1518,6 @@ static struct platform_driver ab8500_usb_driver = {
.id_table = ab8500_usb_devtype,
.driver = {
.name = "abx5x0-usb",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 35b6083b7999..403fab772724 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -147,10 +147,8 @@ static int am335x_control_usb_probe(struct platform_device *pdev)
phy_ctrl = of_id->data;
ctrl_usb = devm_kzalloc(&pdev->dev, sizeof(*ctrl_usb), GFP_KERNEL);
- if (!ctrl_usb) {
- dev_err(&pdev->dev, "unable to alloc memory for control usb\n");
+ if (!ctrl_usb)
return -ENOMEM;
- }
ctrl_usb->dev = &pdev->dev;
@@ -175,7 +173,6 @@ static struct platform_driver am335x_control_driver = {
.probe = am335x_control_usb_probe,
.driver = {
.name = "am335x-control-usb",
- .owner = THIS_MODULE,
.of_match_table = omap_control_usb_id_table,
},
};
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index b70e05537180..90b67a4ca221 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -137,7 +137,6 @@ static struct platform_driver am335x_phy_driver = {
.remove = am335x_phy_remove,
.driver = {
.name = "am335x-phy-driver",
- .owner = THIS_MODULE,
.pm = &am335x_pm_ops,
.of_match_table = am335x_phy_ids,
},
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index f1ea5990a50a..ab38aa32a6c1 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -274,7 +274,7 @@ void b_srp_end(unsigned long foo)
fsl_otg_dischrg_vbus(0);
srp_wait_done = 1;
- if ((fsl_otg_dev->phy.state == OTG_STATE_B_SRP_INIT) &&
+ if ((fsl_otg_dev->phy.otg->state == OTG_STATE_B_SRP_INIT) &&
fsl_otg_dev->fsm.b_sess_vld)
fsl_otg_dev->fsm.b_srp_done = 1;
}
@@ -499,7 +499,8 @@ int fsl_otg_start_host(struct otg_fsm *fsm, int on)
{
struct usb_otg *otg = fsm->otg;
struct device *dev;
- struct fsl_otg *otg_dev = container_of(otg->phy, struct fsl_otg, phy);
+ struct fsl_otg *otg_dev =
+ container_of(otg->usb_phy, struct fsl_otg, phy);
u32 retval = 0;
if (!otg->host)
@@ -594,7 +595,7 @@ static int fsl_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
if (!otg)
return -ENODEV;
- otg_dev = container_of(otg->phy, struct fsl_otg, phy);
+ otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy);
if (otg_dev != fsl_otg_dev)
return -ENODEV;
@@ -623,7 +624,7 @@ static int fsl_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
/* Mini-A cable connected */
struct otg_fsm *fsm = &otg_dev->fsm;
- otg->phy->state = OTG_STATE_UNDEFINED;
+ otg->state = OTG_STATE_UNDEFINED;
fsm->protocol = PROTO_UNDEF;
}
}
@@ -644,7 +645,7 @@ static int fsl_otg_set_peripheral(struct usb_otg *otg,
if (!otg)
return -ENODEV;
- otg_dev = container_of(otg->phy, struct fsl_otg, phy);
+ otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy);
VDBG("otg_dev 0x%x\n", (int)otg_dev);
VDBG("fsl_otg_dev 0x%x\n", (int)fsl_otg_dev);
if (otg_dev != fsl_otg_dev)
@@ -681,7 +682,7 @@ static int fsl_otg_set_power(struct usb_phy *phy, unsigned mA)
{
if (!fsl_otg_dev)
return -ENODEV;
- if (phy->state == OTG_STATE_B_PERIPHERAL)
+ if (phy->otg->state == OTG_STATE_B_PERIPHERAL)
pr_info("FSL OTG: Draw %d mA\n", mA);
return 0;
@@ -714,10 +715,10 @@ static int fsl_otg_start_srp(struct usb_otg *otg)
{
struct fsl_otg *otg_dev;
- if (!otg || otg->phy->state != OTG_STATE_B_IDLE)
+ if (!otg || otg->state != OTG_STATE_B_IDLE)
return -ENODEV;
- otg_dev = container_of(otg->phy, struct fsl_otg, phy);
+ otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy);
if (otg_dev != fsl_otg_dev)
return -ENODEV;
@@ -735,7 +736,7 @@ static int fsl_otg_start_hnp(struct usb_otg *otg)
if (!otg)
return -ENODEV;
- otg_dev = container_of(otg->phy, struct fsl_otg, phy);
+ otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy);
if (otg_dev != fsl_otg_dev)
return -ENODEV;
@@ -857,7 +858,7 @@ static int fsl_otg_conf(struct platform_device *pdev)
fsl_otg_tc->phy.dev = &pdev->dev;
fsl_otg_tc->phy.set_power = fsl_otg_set_power;
- fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy;
+ fsl_otg_tc->phy.otg->usb_phy = &fsl_otg_tc->phy;
fsl_otg_tc->phy.otg->set_host = fsl_otg_set_host;
fsl_otg_tc->phy.otg->set_peripheral = fsl_otg_set_peripheral;
fsl_otg_tc->phy.otg->start_hnp = fsl_otg_start_hnp;
@@ -989,10 +990,10 @@ int usb_otg_start(struct platform_device *pdev)
* Also: record initial state of ID pin
*/
if (fsl_readl(&p_otg->dr_mem_map->otgsc) & OTGSC_STS_USB_ID) {
- p_otg->phy.state = OTG_STATE_UNDEFINED;
+ p_otg->phy.otg->state = OTG_STATE_UNDEFINED;
p_otg->fsm.id = 1;
} else {
- p_otg->phy.state = OTG_STATE_A_IDLE;
+ p_otg->phy.otg->state = OTG_STATE_A_IDLE;
p_otg->fsm.id = 0;
}
@@ -1047,7 +1048,7 @@ static int show_fsl_usb2_otg_state(struct device *dev,
/* State */
t = scnprintf(next, size,
"OTG state: %s\n\n",
- usb_otg_state_string(fsl_otg_dev->phy.state));
+ usb_otg_state_string(fsl_otg_dev->phy.otg->state));
size -= t;
next += t;
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index 5986c96354df..23149954a09c 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -298,7 +298,7 @@
/* SE0 Time Before SRP */
#define TB_SE0_SRP (2) /* b_idle,minimum 2 ms, section:5.3.2 */
-#define SET_OTG_STATE(otg_ptr, newstate) ((otg_ptr)->state = newstate)
+#define SET_OTG_STATE(phy, newstate) ((phy)->otg->state = newstate)
struct usb_dr_mmap {
/* Capability register */
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 7594e5069ae5..f1b719b45a53 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -1,6 +1,4 @@
/*
- * drivers/usb/otg/nop-usb-xceiv.c
- *
* NOP USB transceiver for all USB transceiver which are either built-in
* into USB IP or which are mostly autonomous.
*
@@ -123,7 +121,7 @@ static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
}
otg->gadget = gadget;
- otg->phy->state = OTG_STATE_B_IDLE;
+ otg->state = OTG_STATE_B_IDLE;
return 0;
}
@@ -225,10 +223,10 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
nop->phy.set_suspend = nop_set_suspend;
- nop->phy.state = OTG_STATE_UNDEFINED;
nop->phy.type = type;
- nop->phy.otg->phy = &nop->phy;
+ nop->phy.otg->state = OTG_STATE_UNDEFINED;
+ nop->phy.otg->usb_phy = &nop->phy;
nop->phy.otg->set_host = nop_set_host;
nop->phy.otg->set_peripheral = nop_set_peripheral;
@@ -286,7 +284,6 @@ static struct platform_driver usb_phy_generic_driver = {
.remove = usb_phy_generic_remove,
.driver = {
.name = "usb_phy_generic",
- .owner = THIS_MODULE,
.of_match_table = nop_xceiv_dt_ids,
},
};
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index f4b14bd97e14..f66120db8a41 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -121,7 +121,7 @@ static void gpio_vbus_work(struct work_struct *work)
if (vbus) {
status = USB_EVENT_VBUS;
- gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL;
+ gpio_vbus->phy.otg->state = OTG_STATE_B_PERIPHERAL;
gpio_vbus->phy.last_event = status;
usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget);
@@ -134,6 +134,7 @@ static void gpio_vbus_work(struct work_struct *work)
atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
status, gpio_vbus->phy.otg->gadget);
+ usb_phy_set_event(&gpio_vbus->phy, USB_EVENT_ENUMERATED);
} else {
/* optionally disable D+ pullup */
if (gpio_is_valid(gpio))
@@ -143,11 +144,12 @@ static void gpio_vbus_work(struct work_struct *work)
usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget);
status = USB_EVENT_NONE;
- gpio_vbus->phy.state = OTG_STATE_B_IDLE;
+ gpio_vbus->phy.otg->state = OTG_STATE_B_IDLE;
gpio_vbus->phy.last_event = status;
atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
status, gpio_vbus->phy.otg->gadget);
+ usb_phy_set_event(&gpio_vbus->phy, USB_EVENT_NONE);
}
}
@@ -180,7 +182,7 @@ static int gpio_vbus_set_peripheral(struct usb_otg *otg,
struct platform_device *pdev;
int gpio;
- gpio_vbus = container_of(otg->phy, struct gpio_vbus_data, phy);
+ gpio_vbus = container_of(otg->usb_phy, struct gpio_vbus_data, phy);
pdev = to_platform_device(gpio_vbus->dev);
pdata = dev_get_platdata(gpio_vbus->dev);
gpio = pdata->gpio_pullup;
@@ -196,7 +198,7 @@ static int gpio_vbus_set_peripheral(struct usb_otg *otg,
set_vbus_draw(gpio_vbus, 0);
usb_gadget_vbus_disconnect(otg->gadget);
- otg->phy->state = OTG_STATE_UNDEFINED;
+ otg->state = OTG_STATE_UNDEFINED;
otg->gadget = NULL;
return 0;
@@ -218,7 +220,7 @@ static int gpio_vbus_set_power(struct usb_phy *phy, unsigned mA)
gpio_vbus = container_of(phy, struct gpio_vbus_data, phy);
- if (phy->state == OTG_STATE_B_PERIPHERAL)
+ if (phy->otg->state == OTG_STATE_B_PERIPHERAL)
set_vbus_draw(gpio_vbus, mA);
return 0;
}
@@ -269,9 +271,9 @@ static int gpio_vbus_probe(struct platform_device *pdev)
gpio_vbus->phy.dev = gpio_vbus->dev;
gpio_vbus->phy.set_power = gpio_vbus_set_power;
gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend;
- gpio_vbus->phy.state = OTG_STATE_UNDEFINED;
- gpio_vbus->phy.otg->phy = &gpio_vbus->phy;
+ gpio_vbus->phy.otg->state = OTG_STATE_UNDEFINED;
+ gpio_vbus->phy.otg->usb_phy = &gpio_vbus->phy;
gpio_vbus->phy.otg->set_peripheral = gpio_vbus_set_peripheral;
err = devm_gpio_request(&pdev->dev, gpio, "vbus_detect");
@@ -380,7 +382,6 @@ MODULE_ALIAS("platform:gpio-vbus");
static struct platform_driver gpio_vbus_driver = {
.driver = {
.name = "gpio-vbus",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &gpio_vbus_dev_pm_ops,
#endif
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 8eea56d3ded6..1e0e10dd6ba5 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -234,7 +234,7 @@ isp1301_clear_bits(struct isp1301 *isp, u8 reg, u8 bits)
static inline const char *state_name(struct isp1301 *isp)
{
- return usb_otg_state_string(isp->phy.state);
+ return usb_otg_state_string(isp->phy.otg->state);
}
/*-------------------------------------------------------------------------*/
@@ -249,7 +249,7 @@ static inline const char *state_name(struct isp1301 *isp)
static void power_down(struct isp1301 *isp)
{
- isp->phy.state = OTG_STATE_UNDEFINED;
+ isp->phy.otg->state = OTG_STATE_UNDEFINED;
// isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND);
@@ -339,7 +339,7 @@ static void a_idle(struct isp1301 *isp, const char *tag)
{
u32 l;
- if (isp->phy.state == OTG_STATE_A_IDLE)
+ if (isp->phy.otg->state == OTG_STATE_A_IDLE)
return;
isp->phy.otg->default_a = 1;
@@ -351,7 +351,7 @@ static void a_idle(struct isp1301 *isp, const char *tag)
isp->phy.otg->gadget->is_a_peripheral = 1;
gadget_suspend(isp);
}
- isp->phy.state = OTG_STATE_A_IDLE;
+ isp->phy.otg->state = OTG_STATE_A_IDLE;
l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
omap_writel(l, OTG_CTRL);
isp->last_otg_ctrl = l;
@@ -363,7 +363,7 @@ static void b_idle(struct isp1301 *isp, const char *tag)
{
u32 l;
- if (isp->phy.state == OTG_STATE_B_IDLE)
+ if (isp->phy.otg->state == OTG_STATE_B_IDLE)
return;
isp->phy.otg->default_a = 0;
@@ -375,7 +375,7 @@ static void b_idle(struct isp1301 *isp, const char *tag)
isp->phy.otg->gadget->is_a_peripheral = 0;
gadget_suspend(isp);
}
- isp->phy.state = OTG_STATE_B_IDLE;
+ isp->phy.otg->state = OTG_STATE_B_IDLE;
l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
omap_writel(l, OTG_CTRL);
isp->last_otg_ctrl = l;
@@ -474,7 +474,7 @@ static void check_state(struct isp1301 *isp, const char *tag)
default:
break;
}
- if (isp->phy.state == state && !extra)
+ if (isp->phy.otg->state == state && !extra)
return;
pr_debug("otg: %s FSM %s/%02x, %s, %06x\n", tag,
usb_otg_state_string(state), fsm, state_name(isp),
@@ -498,23 +498,23 @@ static void update_otg1(struct isp1301 *isp, u8 int_src)
if (int_src & INTR_SESS_VLD)
otg_ctrl |= OTG_ASESSVLD;
- else if (isp->phy.state == OTG_STATE_A_WAIT_VFALL) {
+ else if (isp->phy.otg->state == OTG_STATE_A_WAIT_VFALL) {
a_idle(isp, "vfall");
otg_ctrl &= ~OTG_CTRL_BITS;
}
if (int_src & INTR_VBUS_VLD)
otg_ctrl |= OTG_VBUSVLD;
if (int_src & INTR_ID_GND) { /* default-A */
- if (isp->phy.state == OTG_STATE_B_IDLE
- || isp->phy.state
+ if (isp->phy.otg->state == OTG_STATE_B_IDLE
+ || isp->phy.otg->state
== OTG_STATE_UNDEFINED) {
a_idle(isp, "init");
return;
}
} else { /* default-B */
otg_ctrl |= OTG_ID;
- if (isp->phy.state == OTG_STATE_A_IDLE
- || isp->phy.state == OTG_STATE_UNDEFINED) {
+ if (isp->phy.otg->state == OTG_STATE_A_IDLE
+ || isp->phy.otg->state == OTG_STATE_UNDEFINED) {
b_idle(isp, "init");
return;
}
@@ -548,14 +548,14 @@ static void otg_update_isp(struct isp1301 *isp)
isp->last_otg_ctrl = otg_ctrl;
otg_ctrl = otg_ctrl & OTG_XCEIV_INPUTS;
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_B_IDLE:
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_SRP_INIT:
if (!(otg_ctrl & OTG_PULLUP)) {
// if (otg_ctrl & OTG_B_HNPEN) {
if (isp->phy.otg->gadget->b_hnp_enable) {
- isp->phy.state = OTG_STATE_B_WAIT_ACON;
+ isp->phy.otg->state = OTG_STATE_B_WAIT_ACON;
pr_debug(" --> b_wait_acon\n");
}
goto pulldown;
@@ -585,7 +585,7 @@ pulldown:
if (!(isp->phy.otg->host))
otg_ctrl &= ~OTG_DRV_VBUS;
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_A_SUSPEND:
if (otg_ctrl & OTG_DRV_VBUS) {
set |= OTG1_VBUS_DRV;
@@ -596,7 +596,7 @@ pulldown:
/* FALLTHROUGH */
case OTG_STATE_A_VBUS_ERR:
- isp->phy.state = OTG_STATE_A_WAIT_VFALL;
+ isp->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
pr_debug(" --> a_wait_vfall\n");
/* FALLTHROUGH */
case OTG_STATE_A_WAIT_VFALL:
@@ -605,7 +605,7 @@ pulldown:
break;
case OTG_STATE_A_IDLE:
if (otg_ctrl & OTG_DRV_VBUS) {
- isp->phy.state = OTG_STATE_A_WAIT_VRISE;
+ isp->phy.otg->state = OTG_STATE_A_WAIT_VRISE;
pr_debug(" --> a_wait_vrise\n");
}
/* FALLTHROUGH */
@@ -625,17 +625,17 @@ pulldown:
if (otg_change & OTG_PULLUP) {
u32 l;
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_B_IDLE:
if (clr & OTG1_DP_PULLUP)
break;
- isp->phy.state = OTG_STATE_B_PERIPHERAL;
+ isp->phy.otg->state = OTG_STATE_B_PERIPHERAL;
pr_debug(" --> b_peripheral\n");
break;
case OTG_STATE_A_SUSPEND:
if (clr & OTG1_DP_PULLUP)
break;
- isp->phy.state = OTG_STATE_A_PERIPHERAL;
+ isp->phy.otg->state = OTG_STATE_A_PERIPHERAL;
pr_debug(" --> a_peripheral\n");
break;
default:
@@ -673,7 +673,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
* remote wakeup (SRP, normal) using their own timer
* to give "check cable and A-device" messages.
*/
- if (isp->phy.state == OTG_STATE_B_SRP_INIT)
+ if (isp->phy.otg->state == OTG_STATE_B_SRP_INIT)
b_idle(isp, "srp_timeout");
omap_writew(B_SRP_TMROUT, OTG_IRQ_SRC);
@@ -691,7 +691,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
omap_writel(otg_ctrl, OTG_CTRL);
/* subset of b_peripheral()... */
- isp->phy.state = OTG_STATE_B_PERIPHERAL;
+ isp->phy.otg->state = OTG_STATE_B_PERIPHERAL;
pr_debug(" --> b_peripheral\n");
omap_writew(B_HNP_FAIL, OTG_IRQ_SRC);
@@ -703,7 +703,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
state_name(isp), omap_readl(OTG_CTRL));
isp1301_defer_work(isp, WORK_UPDATE_OTG);
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_A_IDLE:
if (!otg->host)
break;
@@ -734,7 +734,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
otg_ctrl |= OTG_BUSDROP;
otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
omap_writel(otg_ctrl, OTG_CTRL);
- isp->phy.state = OTG_STATE_A_WAIT_VFALL;
+ isp->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
omap_writew(A_REQ_TMROUT, OTG_IRQ_SRC);
ret = IRQ_HANDLED;
@@ -748,7 +748,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
otg_ctrl |= OTG_BUSDROP;
otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
omap_writel(otg_ctrl, OTG_CTRL);
- isp->phy.state = OTG_STATE_A_VBUS_ERR;
+ isp->phy.otg->state = OTG_STATE_A_VBUS_ERR;
omap_writew(A_VBUS_ERR, OTG_IRQ_SRC);
ret = IRQ_HANDLED;
@@ -769,7 +769,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
/* role is peripheral */
if (otg_ctrl & OTG_DRIVER_SEL) {
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_A_IDLE:
b_idle(isp, __func__);
break;
@@ -786,18 +786,18 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
}
if (otg->host) {
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_B_WAIT_ACON:
- isp->phy.state = OTG_STATE_B_HOST;
+ isp->phy.otg->state = OTG_STATE_B_HOST;
pr_debug(" --> b_host\n");
kick = 1;
break;
case OTG_STATE_A_WAIT_BCON:
- isp->phy.state = OTG_STATE_A_HOST;
+ isp->phy.otg->state = OTG_STATE_A_HOST;
pr_debug(" --> a_host\n");
break;
case OTG_STATE_A_PERIPHERAL:
- isp->phy.state = OTG_STATE_A_WAIT_BCON;
+ isp->phy.otg->state = OTG_STATE_A_WAIT_BCON;
pr_debug(" --> a_wait_bcon\n");
break;
default:
@@ -878,7 +878,6 @@ static struct platform_driver omap_otg_driver = {
.probe = otg_probe,
.remove = otg_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "omap_otg",
},
};
@@ -937,7 +936,7 @@ static void b_peripheral(struct isp1301 *isp)
/* UDC driver just set OTG_BSESSVLD */
isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_DP_PULLUP);
isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_DP_PULLDOWN);
- isp->phy.state = OTG_STATE_B_PERIPHERAL;
+ isp->phy.otg->state = OTG_STATE_B_PERIPHERAL;
pr_debug(" --> b_peripheral\n");
dump_regs(isp, "2periph");
#endif
@@ -947,7 +946,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
{
struct usb_otg *otg = isp->phy.otg;
u8 isp_stat, isp_bstat;
- enum usb_otg_state state = isp->phy.state;
+ enum usb_otg_state state = isp->phy.otg->state;
if (stat & INTR_BDIS_ACON)
pr_debug("OTG: BDIS_ACON, %s\n", state_name(isp));
@@ -970,7 +969,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
* when HNP is used.
*/
if (isp_stat & INTR_VBUS_VLD)
- isp->phy.state = OTG_STATE_A_HOST;
+ isp->phy.otg->state = OTG_STATE_A_HOST;
break;
case OTG_STATE_A_WAIT_VFALL:
if (!(isp_stat & INTR_SESS_VLD))
@@ -978,7 +977,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
break;
default:
if (!(isp_stat & INTR_VBUS_VLD))
- isp->phy.state = OTG_STATE_A_VBUS_ERR;
+ isp->phy.otg->state = OTG_STATE_A_VBUS_ERR;
break;
}
isp_bstat = isp1301_get_u8(isp, ISP1301_OTG_STATUS);
@@ -1007,7 +1006,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
if (otg->default_a) {
switch (state) {
default:
- isp->phy.state = OTG_STATE_A_WAIT_VFALL;
+ isp->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
break;
case OTG_STATE_A_WAIT_VFALL:
state = OTG_STATE_A_IDLE;
@@ -1020,7 +1019,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
host_suspend(isp);
isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1,
MC1_BDIS_ACON_EN);
- isp->phy.state = OTG_STATE_B_IDLE;
+ isp->phy.otg->state = OTG_STATE_B_IDLE;
l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
l &= ~OTG_CTRL_BITS;
omap_writel(l, OTG_CTRL);
@@ -1031,7 +1030,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
}
isp_bstat = isp1301_get_u8(isp, ISP1301_OTG_STATUS);
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_HOST:
@@ -1071,7 +1070,7 @@ static void isp_update_otg(struct isp1301 *isp, u8 stat)
}
}
- if (state != isp->phy.state)
+ if (state != isp->phy.otg->state)
pr_debug(" isp, %s -> %s\n",
usb_otg_state_string(state), state_name(isp));
@@ -1129,10 +1128,10 @@ isp1301_work(struct work_struct *work)
* skip A_WAIT_VRISE; hc transitions invisibly
* skip A_WAIT_BCON; same.
*/
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_A_WAIT_BCON:
case OTG_STATE_A_WAIT_VRISE:
- isp->phy.state = OTG_STATE_A_HOST;
+ isp->phy.otg->state = OTG_STATE_A_HOST;
pr_debug(" --> a_host\n");
otg_ctrl = omap_readl(OTG_CTRL);
otg_ctrl |= OTG_A_BUSREQ;
@@ -1141,7 +1140,7 @@ isp1301_work(struct work_struct *work)
omap_writel(otg_ctrl, OTG_CTRL);
break;
case OTG_STATE_B_WAIT_ACON:
- isp->phy.state = OTG_STATE_B_HOST;
+ isp->phy.otg->state = OTG_STATE_B_HOST;
pr_debug(" --> b_host (acon)\n");
break;
case OTG_STATE_B_HOST:
@@ -1275,7 +1274,7 @@ static int isp1301_otg_enable(struct isp1301 *isp)
static int
isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
{
- struct isp1301 *isp = container_of(otg->phy, struct isp1301, phy);
+ struct isp1301 *isp = container_of(otg->usb_phy, struct isp1301, phy);
if (isp != the_transceiver)
return -ENODEV;
@@ -1331,7 +1330,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
static int
isp1301_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
{
- struct isp1301 *isp = container_of(otg->phy, struct isp1301, phy);
+ struct isp1301 *isp = container_of(otg->usb_phy, struct isp1301, phy);
if (isp != the_transceiver)
return -ENODEV;
@@ -1368,7 +1367,7 @@ isp1301_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
}
power_up(isp);
- isp->phy.state = OTG_STATE_B_IDLE;
+ isp->phy.otg->state = OTG_STATE_B_IDLE;
if (machine_is_omap_h2() || machine_is_omap_h3())
isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
@@ -1403,7 +1402,7 @@ isp1301_set_power(struct usb_phy *dev, unsigned mA)
{
if (!the_transceiver)
return -ENODEV;
- if (dev->state == OTG_STATE_B_PERIPHERAL)
+ if (dev->otg->state == OTG_STATE_B_PERIPHERAL)
enable_vbus_draw(the_transceiver, mA);
return 0;
}
@@ -1411,10 +1410,10 @@ isp1301_set_power(struct usb_phy *dev, unsigned mA)
static int
isp1301_start_srp(struct usb_otg *otg)
{
- struct isp1301 *isp = container_of(otg->phy, struct isp1301, phy);
+ struct isp1301 *isp = container_of(otg->usb_phy, struct isp1301, phy);
u32 otg_ctrl;
- if (isp != the_transceiver || isp->phy.state != OTG_STATE_B_IDLE)
+ if (isp != the_transceiver || isp->phy.otg->state != OTG_STATE_B_IDLE)
return -ENODEV;
otg_ctrl = omap_readl(OTG_CTRL);
@@ -1424,7 +1423,7 @@ isp1301_start_srp(struct usb_otg *otg)
otg_ctrl |= OTG_B_BUSREQ;
otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK;
omap_writel(otg_ctrl, OTG_CTRL);
- isp->phy.state = OTG_STATE_B_SRP_INIT;
+ isp->phy.otg->state = OTG_STATE_B_SRP_INIT;
pr_debug("otg: SRP, %s ... %06x\n", state_name(isp),
omap_readl(OTG_CTRL));
@@ -1438,7 +1437,7 @@ static int
isp1301_start_hnp(struct usb_otg *otg)
{
#ifdef CONFIG_USB_OTG
- struct isp1301 *isp = container_of(otg->phy, struct isp1301, phy);
+ struct isp1301 *isp = container_of(otg->usb_phy, struct isp1301, phy);
u32 l;
if (isp != the_transceiver)
@@ -1452,9 +1451,9 @@ isp1301_start_hnp(struct usb_otg *otg)
/* We want hardware to manage most HNP protocol timings.
* So do this part as early as possible...
*/
- switch (isp->phy.state) {
+ switch (isp->phy.otg->state) {
case OTG_STATE_B_HOST:
- isp->phy.state = OTG_STATE_B_PERIPHERAL;
+ isp->phy.otg->state = OTG_STATE_B_PERIPHERAL;
/* caller will suspend next */
break;
case OTG_STATE_A_HOST:
@@ -1583,7 +1582,7 @@ isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
isp->phy.label = DRIVER_NAME;
isp->phy.set_power = isp1301_set_power,
- isp->phy.otg->phy = &isp->phy;
+ isp->phy.otg->usb_phy = &isp->phy;
isp->phy.otg->set_host = isp1301_set_host,
isp->phy.otg->set_peripheral = isp1301_set_peripheral,
isp->phy.otg->start_srp = isp1301_start_srp,
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index f4d722de912b..e0556f7832b5 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -123,7 +123,6 @@ static struct platform_driver keystone_usbphy_driver = {
.remove = keystone_usbphy_remove,
.driver = {
.name = "keystone-usbphy",
- .owner = THIS_MODULE,
.of_match_table = keystone_usbphy_ids,
},
};
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 29be0e654ecc..000fd892455f 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -708,7 +708,7 @@ static void msm_otg_start_host(struct usb_phy *phy, int on)
static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
{
- struct msm_otg *motg = container_of(otg->phy, struct msm_otg, phy);
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
struct usb_hcd *hcd;
/*
@@ -716,16 +716,16 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
* only peripheral configuration.
*/
if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL) {
- dev_info(otg->phy->dev, "Host mode is not supported\n");
+ dev_info(otg->usb_phy->dev, "Host mode is not supported\n");
return -ENODEV;
}
if (!host) {
- if (otg->phy->state == OTG_STATE_A_HOST) {
- pm_runtime_get_sync(otg->phy->dev);
- msm_otg_start_host(otg->phy, 0);
+ if (otg->state == OTG_STATE_A_HOST) {
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_start_host(otg->usb_phy, 0);
otg->host = NULL;
- otg->phy->state = OTG_STATE_UNDEFINED;
+ otg->state = OTG_STATE_UNDEFINED;
schedule_work(&motg->sm_work);
} else {
otg->host = NULL;
@@ -738,14 +738,14 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
hcd->power_budget = motg->pdata->power_budget;
otg->host = host;
- dev_dbg(otg->phy->dev, "host driver registered w/ tranceiver\n");
+ dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
/*
* Kick the state machine work, if peripheral is not supported
* or peripheral is already registered with us.
*/
if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
- pm_runtime_get_sync(otg->phy->dev);
+ pm_runtime_get_sync(otg->usb_phy->dev);
schedule_work(&motg->sm_work);
}
@@ -782,23 +782,23 @@ static void msm_otg_start_peripheral(struct usb_phy *phy, int on)
static int msm_otg_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
- struct msm_otg *motg = container_of(otg->phy, struct msm_otg, phy);
+ struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
/*
* Fail peripheral registration if this board can support
* only host configuration.
*/
if (motg->pdata->mode == USB_DR_MODE_HOST) {
- dev_info(otg->phy->dev, "Peripheral mode is not supported\n");
+ dev_info(otg->usb_phy->dev, "Peripheral mode is not supported\n");
return -ENODEV;
}
if (!gadget) {
- if (otg->phy->state == OTG_STATE_B_PERIPHERAL) {
- pm_runtime_get_sync(otg->phy->dev);
- msm_otg_start_peripheral(otg->phy, 0);
+ if (otg->state == OTG_STATE_B_PERIPHERAL) {
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ msm_otg_start_peripheral(otg->usb_phy, 0);
otg->gadget = NULL;
- otg->phy->state = OTG_STATE_UNDEFINED;
+ otg->state = OTG_STATE_UNDEFINED;
schedule_work(&motg->sm_work);
} else {
otg->gadget = NULL;
@@ -807,14 +807,15 @@ static int msm_otg_set_peripheral(struct usb_otg *otg,
return 0;
}
otg->gadget = gadget;
- dev_dbg(otg->phy->dev, "peripheral driver registered w/ tranceiver\n");
+ dev_dbg(otg->usb_phy->dev,
+ "peripheral driver registered w/ tranceiver\n");
/*
* Kick the state machine work, if host is not supported
* or host is already registered with us.
*/
if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
- pm_runtime_get_sync(otg->phy->dev);
+ pm_runtime_get_sync(otg->usb_phy->dev);
schedule_work(&motg->sm_work);
}
@@ -1170,20 +1171,20 @@ static void msm_otg_sm_work(struct work_struct *w)
struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
struct usb_otg *otg = motg->phy.otg;
- switch (otg->phy->state) {
+ switch (otg->state) {
case OTG_STATE_UNDEFINED:
- dev_dbg(otg->phy->dev, "OTG_STATE_UNDEFINED state\n");
- msm_otg_reset(otg->phy);
+ dev_dbg(otg->usb_phy->dev, "OTG_STATE_UNDEFINED state\n");
+ msm_otg_reset(otg->usb_phy);
msm_otg_init_sm(motg);
- otg->phy->state = OTG_STATE_B_IDLE;
+ otg->state = OTG_STATE_B_IDLE;
/* FALL THROUGH */
case OTG_STATE_B_IDLE:
- dev_dbg(otg->phy->dev, "OTG_STATE_B_IDLE state\n");
+ dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_IDLE state\n");
if (!test_bit(ID, &motg->inputs) && otg->host) {
/* disable BSV bit */
writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
- msm_otg_start_host(otg->phy, 1);
- otg->phy->state = OTG_STATE_A_HOST;
+ msm_otg_start_host(otg->usb_phy, 1);
+ otg->state = OTG_STATE_A_HOST;
} else if (test_bit(B_SESS_VLD, &motg->inputs)) {
switch (motg->chg_state) {
case USB_CHG_STATE_UNDEFINED:
@@ -1198,14 +1199,16 @@ static void msm_otg_sm_work(struct work_struct *w)
case USB_CDP_CHARGER:
msm_otg_notify_charger(motg,
IDEV_CHG_MAX);
- msm_otg_start_peripheral(otg->phy, 1);
- otg->phy->state
+ msm_otg_start_peripheral(otg->usb_phy,
+ 1);
+ otg->state
= OTG_STATE_B_PERIPHERAL;
break;
case USB_SDP_CHARGER:
msm_otg_notify_charger(motg, IUNIT);
- msm_otg_start_peripheral(otg->phy, 1);
- otg->phy->state
+ msm_otg_start_peripheral(otg->usb_phy,
+ 1);
+ otg->state
= OTG_STATE_B_PERIPHERAL;
break;
default:
@@ -1222,36 +1225,36 @@ static void msm_otg_sm_work(struct work_struct *w)
* is incremented in charger detection work.
*/
if (cancel_delayed_work_sync(&motg->chg_work)) {
- pm_runtime_put_sync(otg->phy->dev);
- msm_otg_reset(otg->phy);
+ pm_runtime_put_sync(otg->usb_phy->dev);
+ msm_otg_reset(otg->usb_phy);
}
msm_otg_notify_charger(motg, 0);
motg->chg_state = USB_CHG_STATE_UNDEFINED;
motg->chg_type = USB_INVALID_CHARGER;
}
- if (otg->phy->state == OTG_STATE_B_IDLE)
- pm_runtime_put_sync(otg->phy->dev);
+ if (otg->state == OTG_STATE_B_IDLE)
+ pm_runtime_put_sync(otg->usb_phy->dev);
break;
case OTG_STATE_B_PERIPHERAL:
- dev_dbg(otg->phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
+ dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
if (!test_bit(B_SESS_VLD, &motg->inputs) ||
!test_bit(ID, &motg->inputs)) {
msm_otg_notify_charger(motg, 0);
- msm_otg_start_peripheral(otg->phy, 0);
+ msm_otg_start_peripheral(otg->usb_phy, 0);
motg->chg_state = USB_CHG_STATE_UNDEFINED;
motg->chg_type = USB_INVALID_CHARGER;
- otg->phy->state = OTG_STATE_B_IDLE;
- msm_otg_reset(otg->phy);
+ otg->state = OTG_STATE_B_IDLE;
+ msm_otg_reset(otg->usb_phy);
schedule_work(w);
}
break;
case OTG_STATE_A_HOST:
- dev_dbg(otg->phy->dev, "OTG_STATE_A_HOST state\n");
+ dev_dbg(otg->usb_phy->dev, "OTG_STATE_A_HOST state\n");
if (test_bit(ID, &motg->inputs)) {
- msm_otg_start_host(otg->phy, 0);
- otg->phy->state = OTG_STATE_B_IDLE;
- msm_otg_reset(otg->phy);
+ msm_otg_start_host(otg->usb_phy, 0);
+ otg->state = OTG_STATE_B_IDLE;
+ msm_otg_reset(otg->usb_phy);
schedule_work(w);
}
break;
@@ -1303,7 +1306,7 @@ static int msm_otg_mode_show(struct seq_file *s, void *unused)
struct msm_otg *motg = s->private;
struct usb_otg *otg = motg->phy.otg;
- switch (otg->phy->state) {
+ switch (otg->state) {
case OTG_STATE_A_HOST:
seq_puts(s, "host\n");
break;
@@ -1353,7 +1356,7 @@ static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
switch (req_mode) {
case USB_DR_MODE_UNKNOWN:
- switch (otg->phy->state) {
+ switch (otg->state) {
case OTG_STATE_A_HOST:
case OTG_STATE_B_PERIPHERAL:
set_bit(ID, &motg->inputs);
@@ -1364,7 +1367,7 @@ static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
}
break;
case USB_DR_MODE_PERIPHERAL:
- switch (otg->phy->state) {
+ switch (otg->state) {
case OTG_STATE_B_IDLE:
case OTG_STATE_A_HOST:
set_bit(ID, &motg->inputs);
@@ -1375,7 +1378,7 @@ static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
}
break;
case USB_DR_MODE_HOST:
- switch (otg->phy->state) {
+ switch (otg->state) {
case OTG_STATE_B_IDLE:
case OTG_STATE_B_PERIPHERAL:
clear_bit(ID, &motg->inputs);
@@ -1388,7 +1391,7 @@ static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
goto out;
}
- pm_runtime_get_sync(otg->phy->dev);
+ pm_runtime_get_sync(otg->usb_phy->dev);
schedule_work(&motg->sm_work);
out:
return status;
@@ -1505,10 +1508,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
}
pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
- if (!pdata->phy_init_seq) {
- dev_warn(&pdev->dev, "No space for PHY init sequence\n");
+ if (!pdata->phy_init_seq)
return 0;
- }
ret = of_property_read_u32_array(node, "qcom,phy-init-sequence",
pdata->phy_init_seq, words);
@@ -1530,10 +1531,8 @@ static int msm_otg_probe(struct platform_device *pdev)
void __iomem *phy_select;
motg = devm_kzalloc(&pdev->dev, sizeof(struct msm_otg), GFP_KERNEL);
- if (!motg) {
- dev_err(&pdev->dev, "unable to allocate msm_otg\n");
+ if (!motg)
return -ENOMEM;
- }
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
@@ -1546,10 +1545,8 @@ static int msm_otg_probe(struct platform_device *pdev)
motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
GFP_KERNEL);
- if (!motg->phy.otg) {
- dev_err(&pdev->dev, "unable to allocate msm_otg\n");
+ if (!motg->phy.otg)
return -ENOMEM;
- }
phy = &motg->phy;
phy->dev = &pdev->dev;
@@ -1674,7 +1671,7 @@ static int msm_otg_probe(struct platform_device *pdev)
phy->io_ops = &msm_otg_io_ops;
- phy->otg->phy = &motg->phy;
+ phy->otg->usb_phy = &motg->phy;
phy->otg->set_host = msm_otg_set_host;
phy->otg->set_peripheral = msm_otg_set_peripheral;
@@ -1775,7 +1772,7 @@ static int msm_otg_runtime_idle(struct device *dev)
* This 1 sec delay also prevents entering into LPM immediately
* after asynchronous interrupt.
*/
- if (otg->phy->state != OTG_STATE_UNDEFINED)
+ if (otg->state != OTG_STATE_UNDEFINED)
pm_schedule_suspend(dev, 1000);
return -EAGAIN;
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 7d80c54f0ac6..699e38c73d82 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -56,7 +56,7 @@ static char *state_string[] = {
static int mv_otg_set_vbus(struct usb_otg *otg, bool on)
{
- struct mv_otg *mvotg = container_of(otg->phy, struct mv_otg, phy);
+ struct mv_otg *mvotg = container_of(otg->usb_phy, struct mv_otg, phy);
if (mvotg->pdata->set_vbus == NULL)
return -ENODEV;
@@ -339,68 +339,68 @@ static void mv_otg_update_state(struct mv_otg *mvotg)
{
struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
struct usb_phy *phy = &mvotg->phy;
- int old_state = phy->state;
+ int old_state = mvotg->phy.otg->state;
switch (old_state) {
case OTG_STATE_UNDEFINED:
- phy->state = OTG_STATE_B_IDLE;
+ mvotg->phy.otg->state = OTG_STATE_B_IDLE;
/* FALL THROUGH */
case OTG_STATE_B_IDLE:
if (otg_ctrl->id == 0)
- phy->state = OTG_STATE_A_IDLE;
+ mvotg->phy.otg->state = OTG_STATE_A_IDLE;
else if (otg_ctrl->b_sess_vld)
- phy->state = OTG_STATE_B_PERIPHERAL;
+ mvotg->phy.otg->state = OTG_STATE_B_PERIPHERAL;
break;
case OTG_STATE_B_PERIPHERAL:
if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0)
- phy->state = OTG_STATE_B_IDLE;
+ mvotg->phy.otg->state = OTG_STATE_B_IDLE;
break;
case OTG_STATE_A_IDLE:
if (otg_ctrl->id)
- phy->state = OTG_STATE_B_IDLE;
+ mvotg->phy.otg->state = OTG_STATE_B_IDLE;
else if (!(otg_ctrl->a_bus_drop) &&
(otg_ctrl->a_bus_req || otg_ctrl->a_srp_det))
- phy->state = OTG_STATE_A_WAIT_VRISE;
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_VRISE;
break;
case OTG_STATE_A_WAIT_VRISE:
if (otg_ctrl->a_vbus_vld)
- phy->state = OTG_STATE_A_WAIT_BCON;
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON;
break;
case OTG_STATE_A_WAIT_BCON:
if (otg_ctrl->id || otg_ctrl->a_bus_drop
|| otg_ctrl->a_wait_bcon_timeout) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
- phy->state = OTG_STATE_A_WAIT_VFALL;
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
otg_ctrl->a_bus_req = 0;
} else if (!otg_ctrl->a_vbus_vld) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
- phy->state = OTG_STATE_A_VBUS_ERR;
+ mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR;
} else if (otg_ctrl->b_conn) {
mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
- phy->state = OTG_STATE_A_HOST;
+ mvotg->phy.otg->state = OTG_STATE_A_HOST;
}
break;
case OTG_STATE_A_HOST:
if (otg_ctrl->id || !otg_ctrl->b_conn
|| otg_ctrl->a_bus_drop)
- phy->state = OTG_STATE_A_WAIT_BCON;
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON;
else if (!otg_ctrl->a_vbus_vld)
- phy->state = OTG_STATE_A_VBUS_ERR;
+ mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR;
break;
case OTG_STATE_A_WAIT_VFALL:
if (otg_ctrl->id
|| (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld)
|| otg_ctrl->a_bus_req)
- phy->state = OTG_STATE_A_IDLE;
+ mvotg->phy.otg->state = OTG_STATE_A_IDLE;
break;
case OTG_STATE_A_VBUS_ERR:
if (otg_ctrl->id || otg_ctrl->a_clr_err
|| otg_ctrl->a_bus_drop) {
otg_ctrl->a_clr_err = 0;
- phy->state = OTG_STATE_A_WAIT_VFALL;
+ mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
}
break;
default:
@@ -420,8 +420,8 @@ static void mv_otg_work(struct work_struct *work)
run:
/* work queue is single thread, or we need spin_lock to protect */
phy = &mvotg->phy;
- otg = phy->otg;
- old_state = phy->state;
+ otg = mvotg->phy.otg;
+ old_state = otg->state;
if (!mvotg->active)
return;
@@ -429,22 +429,24 @@ run:
mv_otg_update_inputs(mvotg);
mv_otg_update_state(mvotg);
- if (old_state != phy->state) {
+ if (old_state != mvotg->phy.otg->state) {
dev_info(&mvotg->pdev->dev, "change from state %s to %s\n",
state_string[old_state],
- state_string[phy->state]);
+ state_string[mvotg->phy.otg->state]);
- switch (phy->state) {
+ switch (mvotg->phy.otg->state) {
case OTG_STATE_B_IDLE:
otg->default_a = 0;
if (old_state == OTG_STATE_B_PERIPHERAL)
mv_otg_start_periphrals(mvotg, 0);
mv_otg_reset(mvotg);
mv_otg_disable(mvotg);
+ usb_phy_set_event(&mvotg->phy, USB_EVENT_NONE);
break;
case OTG_STATE_B_PERIPHERAL:
mv_otg_enable(mvotg);
mv_otg_start_periphrals(mvotg, 1);
+ usb_phy_set_event(&mvotg->phy, USB_EVENT_ENUMERATED);
break;
case OTG_STATE_A_IDLE:
otg->default_a = 1;
@@ -545,8 +547,8 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr,
return -1;
/* We will use this interface to change to A device */
- if (mvotg->phy.state != OTG_STATE_B_IDLE
- && mvotg->phy.state != OTG_STATE_A_IDLE)
+ if (mvotg->phy.otg->state != OTG_STATE_B_IDLE
+ && mvotg->phy.otg->state != OTG_STATE_A_IDLE)
return -1;
/* The clock may disabled and we need to set irq for ID detected */
@@ -686,10 +688,8 @@ static int mv_otg_probe(struct platform_device *pdev)
}
mvotg = devm_kzalloc(&pdev->dev, sizeof(*mvotg), GFP_KERNEL);
- if (!mvotg) {
- dev_err(&pdev->dev, "failed to allocate memory!\n");
+ if (!mvotg)
return -ENOMEM;
- }
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
@@ -717,9 +717,9 @@ static int mv_otg_probe(struct platform_device *pdev)
mvotg->phy.dev = &pdev->dev;
mvotg->phy.otg = otg;
mvotg->phy.label = driver_name;
- mvotg->phy.state = OTG_STATE_UNDEFINED;
- otg->phy = &mvotg->phy;
+ otg->state = OTG_STATE_UNDEFINED;
+ otg->usb_phy = &mvotg->phy;
otg->set_host = mv_otg_set_host;
otg->set_peripheral = mv_otg_set_peripheral;
otg->set_vbus = mv_otg_set_vbus;
@@ -896,7 +896,6 @@ static struct platform_driver mv_otg_driver = {
.probe = mv_otg_probe,
.remove = mv_otg_remove,
.driver = {
- .owner = THIS_MODULE,
.name = driver_name,
},
#ifdef CONFIG_PM
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 0e0c41587a08..b9589f663683 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -390,10 +390,8 @@ static int mxs_phy_probe(struct platform_device *pdev)
}
mxs_phy = devm_kzalloc(&pdev->dev, sizeof(*mxs_phy), GFP_KERNEL);
- if (!mxs_phy) {
- dev_err(&pdev->dev, "Failed to allocate USB PHY structure!\n");
+ if (!mxs_phy)
return -ENOMEM;
- }
/* Some SoCs don't have anatop registers */
if (of_get_property(np, "fsl,anatop", NULL)) {
@@ -491,7 +489,6 @@ static struct platform_driver mxs_phy_driver = {
.remove = mxs_phy_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = mxs_phy_dt_ids,
.pm = &mxs_phy_pm,
},
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 11598cdb3189..56ee7603034b 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -158,7 +158,6 @@ static struct platform_driver omap_otg_driver = {
.probe = omap_otg_probe,
.remove = omap_otg_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "omap_otg",
},
};
diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c
index 388d89f6b141..f83808413ba2 100644
--- a/drivers/usb/phy/phy-rcar-gen2-usb.c
+++ b/drivers/usb/phy/phy-rcar-gen2-usb.c
@@ -195,10 +195,8 @@ static int rcar_gen2_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR(base);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev, "Memory allocation failed\n");
+ if (!priv)
return -ENOMEM;
- }
spin_lock_init(&priv->lock);
priv->clk = clk;
diff --git a/drivers/usb/phy/phy-rcar-usb.c b/drivers/usb/phy/phy-rcar-usb.c
index 33265a5b2cdf..1e09b8377885 100644
--- a/drivers/usb/phy/phy-rcar-usb.c
+++ b/drivers/usb/phy/phy-rcar-usb.c
@@ -195,17 +195,13 @@ static int rcar_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR(reg0);
res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res1) {
- reg1 = devm_ioremap_resource(dev, res1);
- if (IS_ERR(reg1))
- return PTR_ERR(reg1);
- }
+ reg1 = devm_ioremap_resource(dev, res1);
+ if (IS_ERR(reg1))
+ return PTR_ERR(reg1);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev, "priv data allocation error\n");
+ if (!priv)
return -ENOMEM;
- }
priv->reg0 = reg0;
priv->reg1 = reg1;
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index cc61ee44b911..845f658276b1 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -81,33 +81,35 @@ static void check_vbus_state(struct tahvo_usb *tu)
reg = retu_read(rdev, TAHVO_REG_IDSR);
if (reg & TAHVO_STAT_VBUS) {
- switch (tu->phy.state) {
+ switch (tu->phy.otg->state) {
case OTG_STATE_B_IDLE:
/* Enable the gadget driver */
if (tu->phy.otg->gadget)
usb_gadget_vbus_connect(tu->phy.otg->gadget);
- tu->phy.state = OTG_STATE_B_PERIPHERAL;
+ tu->phy.otg->state = OTG_STATE_B_PERIPHERAL;
+ usb_phy_set_event(&tu->phy, USB_EVENT_ENUMERATED);
break;
case OTG_STATE_A_IDLE:
/*
* Session is now valid assuming the USB hub is driving
* Vbus.
*/
- tu->phy.state = OTG_STATE_A_HOST;
+ tu->phy.otg->state = OTG_STATE_A_HOST;
break;
default:
break;
}
dev_info(&tu->pt_dev->dev, "USB cable connected\n");
} else {
- switch (tu->phy.state) {
+ switch (tu->phy.otg->state) {
case OTG_STATE_B_PERIPHERAL:
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
- tu->phy.state = OTG_STATE_B_IDLE;
+ tu->phy.otg->state = OTG_STATE_B_IDLE;
+ usb_phy_set_event(&tu->phy, USB_EVENT_NONE);
break;
case OTG_STATE_A_HOST:
- tu->phy.state = OTG_STATE_A_IDLE;
+ tu->phy.otg->state = OTG_STATE_A_IDLE;
break;
default:
break;
@@ -132,14 +134,14 @@ static void tahvo_usb_become_host(struct tahvo_usb *tu)
/* Power up the transceiver in USB host mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND |
USBR_MASTER_SW2 | USBR_MASTER_SW1);
- tu->phy.state = OTG_STATE_A_IDLE;
+ tu->phy.otg->state = OTG_STATE_A_IDLE;
check_vbus_state(tu);
}
static void tahvo_usb_stop_host(struct tahvo_usb *tu)
{
- tu->phy.state = OTG_STATE_A_IDLE;
+ tu->phy.otg->state = OTG_STATE_A_IDLE;
}
static void tahvo_usb_become_peripheral(struct tahvo_usb *tu)
@@ -151,7 +153,7 @@ static void tahvo_usb_become_peripheral(struct tahvo_usb *tu)
/* Power up transceiver and set it in USB peripheral mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT |
USBR_NSUSPEND | USBR_SLAVE_SW);
- tu->phy.state = OTG_STATE_B_IDLE;
+ tu->phy.otg->state = OTG_STATE_B_IDLE;
check_vbus_state(tu);
}
@@ -160,7 +162,7 @@ static void tahvo_usb_stop_peripheral(struct tahvo_usb *tu)
{
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
- tu->phy.state = OTG_STATE_B_IDLE;
+ tu->phy.otg->state = OTG_STATE_B_IDLE;
}
static void tahvo_usb_power_off(struct tahvo_usb *tu)
@@ -173,7 +175,7 @@ static void tahvo_usb_power_off(struct tahvo_usb *tu)
/* Power off transceiver */
retu_write(rdev, TAHVO_REG_USBR, 0);
- tu->phy.state = OTG_STATE_UNDEFINED;
+ tu->phy.otg->state = OTG_STATE_UNDEFINED;
}
static int tahvo_usb_set_suspend(struct usb_phy *dev, int suspend)
@@ -196,7 +198,8 @@ static int tahvo_usb_set_suspend(struct usb_phy *dev, int suspend)
static int tahvo_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
- struct tahvo_usb *tu = container_of(otg->phy, struct tahvo_usb, phy);
+ struct tahvo_usb *tu = container_of(otg->usb_phy, struct tahvo_usb,
+ phy);
dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, host);
@@ -225,7 +228,8 @@ static int tahvo_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
static int tahvo_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
- struct tahvo_usb *tu = container_of(otg->phy, struct tahvo_usb, phy);
+ struct tahvo_usb *tu = container_of(otg->usb_phy, struct tahvo_usb,
+ phy);
dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, gadget);
@@ -379,11 +383,11 @@ static int tahvo_usb_probe(struct platform_device *pdev)
/* Create OTG interface */
tahvo_usb_power_off(tu);
tu->phy.dev = &pdev->dev;
- tu->phy.state = OTG_STATE_UNDEFINED;
+ tu->phy.otg->state = OTG_STATE_UNDEFINED;
tu->phy.label = DRIVER_NAME;
tu->phy.set_suspend = tahvo_usb_set_suspend;
- tu->phy.otg->phy = &tu->phy;
+ tu->phy.otg->usb_phy = &tu->phy;
tu->phy.otg->set_host = tahvo_usb_set_host;
tu->phy.otg->set_peripheral = tahvo_usb_set_peripheral;
@@ -446,7 +450,6 @@ static struct platform_driver tahvo_usb_driver = {
.remove = tahvo_usb_remove,
.driver = {
.name = "tahvo-usb",
- .owner = THIS_MODULE,
},
};
module_platform_driver(tahvo_usb_driver);
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 886f1807a67b..ab025b00964c 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -880,11 +880,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config),
GFP_KERNEL);
- if (!tegra_phy->config) {
- dev_err(&pdev->dev,
- "unable to allocate memory for USB UTMIP config\n");
+ if (!tegra_phy->config)
return -ENOMEM;
- }
config = tegra_phy->config;
@@ -979,10 +976,8 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
int err;
tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL);
- if (!tegra_phy) {
- dev_err(&pdev->dev, "unable to allocate memory for USB2 PHY\n");
+ if (!tegra_phy)
return -ENOMEM;
- }
match = of_match_device(tegra_usb_phy_id_table, &pdev->dev);
if (!match) {
@@ -1086,7 +1081,6 @@ static struct platform_driver tegra_usb_phy_driver = {
.remove = tegra_usb_phy_remove,
.driver = {
.name = "tegra-phy",
- .owner = THIS_MODULE,
.of_match_table = tegra_usb_phy_id_table,
},
};
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 44ea082e40dc..12741856a75c 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -430,7 +430,6 @@ static struct platform_driver twl6030_usb_driver = {
.remove = twl6030_usb_remove,
.driver = {
.name = "twl6030_usb",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(twl6030_usb_id_table),
},
};
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index 4e3877c329f2..f48a7a21e3c2 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -211,7 +211,7 @@ static int ulpi_init(struct usb_phy *phy)
static int ulpi_set_host(struct usb_otg *otg, struct usb_bus *host)
{
- struct usb_phy *phy = otg->phy;
+ struct usb_phy *phy = otg->usb_phy;
unsigned int flags = usb_phy_io_read(phy, ULPI_IFC_CTRL);
if (!host) {
@@ -237,7 +237,7 @@ static int ulpi_set_host(struct usb_otg *otg, struct usb_bus *host)
static int ulpi_set_vbus(struct usb_otg *otg, bool on)
{
- struct usb_phy *phy = otg->phy;
+ struct usb_phy *phy = otg->usb_phy;
unsigned int flags = usb_phy_io_read(phy, ULPI_OTG_CTRL);
flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT);
@@ -276,7 +276,7 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
phy->otg = otg;
phy->init = ulpi_init;
- otg->phy = phy;
+ otg->usb_phy = phy;
otg->set_host = ulpi_set_host;
otg->set_vbus = ulpi_set_vbus;
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 045cd309367a..b4066a001ba0 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -191,7 +191,9 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
phy = __of_usb_find_phy(node);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- phy = ERR_PTR(-EPROBE_DEFER);
+ if (!IS_ERR(phy))
+ phy = ERR_PTR(-EPROBE_DEFER);
+
devres_free(ptr);
goto err1;
}
@@ -444,3 +446,15 @@ int usb_bind_phy(const char *dev_name, u8 index,
return 0;
}
EXPORT_SYMBOL_GPL(usb_bind_phy);
+
+/**
+ * usb_phy_set_event - set event to phy event
+ * @x: the phy returned by usb_get_phy();
+ *
+ * This sets event to phy event
+ */
+void usb_phy_set_event(struct usb_phy *x, unsigned long event)
+{
+ x->last_event = event;
+}
+EXPORT_SYMBOL_GPL(usb_phy_set_event);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index b3b6813ab270..371478704899 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -126,13 +126,15 @@ void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
{
u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
- u16 val = DPRPU | HSE | USBE;
+ u16 val = HSE | USBE;
/*
* if enable
*
* - select Function mode
- * - D+ Line Pull-up
+ * - D+ Line Pull-up is disabled
+ * When D+ Line Pull-up is enabled,
+ * calling usbhs_sys_function_pullup(,1)
*/
usbhs_bset(priv, SYSCFG, mask, enable ? val : 0);
}
@@ -496,20 +498,18 @@ static int usbhs_probe(struct platform_device *pdev)
}
/* platform data */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res || !irq_res) {
+ if (!irq_res) {
dev_err(&pdev->dev, "Not enough Renesas USB platform resources.\n");
return -ENODEV;
}
/* usb private data */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "Could not allocate priv\n");
+ if (!priv)
return -ENOMEM;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index a7996da6a1bd..0427cdd1a483 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -102,6 +102,10 @@ struct usbhs_priv;
#define DEVADD8 0x00E0
#define DEVADD9 0x00E2
#define DEVADDA 0x00E4
+#define D2FIFOSEL 0x00F0 /* for R-Car Gen2 */
+#define D2FIFOCTR 0x00F2 /* for R-Car Gen2 */
+#define D3FIFOSEL 0x00F4 /* for R-Car Gen2 */
+#define D3FIFOCTR 0x00F6 /* for R-Car Gen2 */
/* SYSCFG */
#define SCKE (1 << 10) /* USB Module Clock Enable */
@@ -269,7 +273,8 @@ struct usbhs_priv {
*/
struct usbhs_fifo_info fifo_info;
- struct usb_phy *phy;
+ struct usb_phy *usb_phy;
+ struct phy *phy;
};
/*
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b0c97a3f1bfe..f46271ce1b15 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -21,8 +21,6 @@
#include "pipe.h"
#define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
-#define usbhsf_get_d0fifo(p) (&((p)->fifo_info.d0fifo))
-#define usbhsf_get_d1fifo(p) (&((p)->fifo_info.d1fifo))
#define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f)
#define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
@@ -577,14 +575,6 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
usbhs_pipe_number(pipe),
pkt->length, pkt->actual, *is_done, pkt->zero);
- /*
- * Transmission end
- */
- if (*is_done) {
- if (usbhs_pipe_is_dcp(pipe))
- usbhs_dcp_control_transfer_done(pipe);
- }
-
usbhsf_fifo_unselect(pipe, fifo);
return 0;
@@ -722,14 +712,6 @@ usbhs_fifo_read_end:
usbhs_pipe_number(pipe),
pkt->length, pkt->actual, *is_done, pkt->zero);
- /*
- * Transmission end
- */
- if (*is_done) {
- if (usbhs_pipe_is_dcp(pipe))
- usbhs_dcp_control_transfer_done(pipe);
- }
-
usbhs_fifo_read_busy:
usbhsf_fifo_unselect(pipe, fifo);
@@ -777,18 +759,13 @@ static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
struct usbhs_pkt *pkt)
{
struct usbhs_fifo *fifo;
+ int i;
- /* DMA :: D0FIFO */
- fifo = usbhsf_get_d0fifo(priv);
- if (usbhsf_dma_chan_get(fifo, pkt) &&
- !usbhsf_fifo_is_busy(fifo))
- return fifo;
-
- /* DMA :: D1FIFO */
- fifo = usbhsf_get_d1fifo(priv);
- if (usbhsf_dma_chan_get(fifo, pkt) &&
- !usbhsf_fifo_is_busy(fifo))
- return fifo;
+ usbhs_for_each_dfifo(priv, fifo, i) {
+ if (usbhsf_dma_chan_get(fifo, pkt) &&
+ !usbhsf_fifo_is_busy(fifo))
+ return fifo;
+ }
return NULL;
}
@@ -1176,6 +1153,24 @@ static void usbhsf_dma_complete(void *arg)
usbhs_pipe_number(pipe), ret);
}
+void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
+
+ /* clear DCP FIFO of transmission */
+ if (usbhsf_fifo_select(pipe, fifo, 1) < 0)
+ return;
+ usbhsf_fifo_clear(pipe, fifo);
+ usbhsf_fifo_unselect(pipe, fifo);
+
+ /* clear DCP FIFO of reception */
+ if (usbhsf_fifo_select(pipe, fifo, 0) < 0)
+ return;
+ usbhsf_fifo_clear(pipe, fifo);
+ usbhsf_fifo_unselect(pipe, fifo);
+}
+
/*
* fifo init
*/
@@ -1183,8 +1178,8 @@ void usbhs_fifo_init(struct usbhs_priv *priv)
{
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
- struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
- struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
+ struct usbhs_fifo *dfifo;
+ int i;
mod->irq_empty = usbhsf_irq_empty;
mod->irq_ready = usbhsf_irq_ready;
@@ -1192,8 +1187,8 @@ void usbhs_fifo_init(struct usbhs_priv *priv)
mod->irq_brdysts = 0;
cfifo->pipe = NULL;
- d0fifo->pipe = NULL;
- d1fifo->pipe = NULL;
+ usbhs_for_each_dfifo(priv, dfifo, i)
+ dfifo->pipe = NULL;
}
void usbhs_fifo_quit(struct usbhs_priv *priv)
@@ -1206,6 +1201,25 @@ void usbhs_fifo_quit(struct usbhs_priv *priv)
mod->irq_brdysts = 0;
}
+#define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \
+do { \
+ fifo = usbhsf_get_dnfifo(priv, channel); \
+ fifo->name = "D"#channel"FIFO"; \
+ fifo->port = fifo_port; \
+ fifo->sel = D##channel##FIFOSEL; \
+ fifo->ctr = D##channel##FIFOCTR; \
+ fifo->tx_slave.shdma_slave.slave_id = \
+ usbhs_get_dparam(priv, d##channel##_tx_id); \
+ fifo->rx_slave.shdma_slave.slave_id = \
+ usbhs_get_dparam(priv, d##channel##_rx_id); \
+ usbhsf_dma_init(priv, fifo); \
+} while (0)
+
+#define USBHS_DFIFO_INIT(priv, fifo, channel) \
+ __USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO)
+#define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \
+ __USBHS_DFIFO_INIT(priv, fifo, channel, 0)
+
int usbhs_fifo_probe(struct usbhs_priv *priv)
{
struct usbhs_fifo *fifo;
@@ -1217,31 +1231,20 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
fifo->sel = CFIFOSEL;
fifo->ctr = CFIFOCTR;
- /* D0FIFO */
- fifo = usbhsf_get_d0fifo(priv);
- fifo->name = "D0FIFO";
- fifo->port = D0FIFO;
- fifo->sel = D0FIFOSEL;
- fifo->ctr = D0FIFOCTR;
- fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
- fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
- usbhsf_dma_init(priv, fifo);
-
- /* D1FIFO */
- fifo = usbhsf_get_d1fifo(priv);
- fifo->name = "D1FIFO";
- fifo->port = D1FIFO;
- fifo->sel = D1FIFOSEL;
- fifo->ctr = D1FIFOCTR;
- fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
- fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
- usbhsf_dma_init(priv, fifo);
+ /* DFIFO */
+ USBHS_DFIFO_INIT(priv, fifo, 0);
+ USBHS_DFIFO_INIT(priv, fifo, 1);
+ USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 2);
+ USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 3);
return 0;
}
void usbhs_fifo_remove(struct usbhs_priv *priv)
{
- usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
- usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
+ struct usbhs_fifo *fifo;
+ int i;
+
+ usbhs_for_each_dfifo(priv, fifo, i)
+ usbhsf_dma_quit(priv, fifo);
}
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index a168a1760fce..f07037c1185f 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -38,11 +38,16 @@ struct usbhs_fifo {
struct sh_dmae_slave rx_slave;
};
+#define USBHS_MAX_NUM_DFIFO 4
struct usbhs_fifo_info {
struct usbhs_fifo cfifo;
- struct usbhs_fifo d0fifo;
- struct usbhs_fifo d1fifo;
+ struct usbhs_fifo dfifo[USBHS_MAX_NUM_DFIFO];
};
+#define usbhsf_get_dnfifo(p, n) (&((p)->fifo_info.dfifo[n]))
+#define usbhs_for_each_dfifo(priv, dfifo, i) \
+ for ((i) = 0, dfifo = usbhsf_get_dnfifo(priv, (i)); \
+ ((i) < USBHS_MAX_NUM_DFIFO); \
+ (i)++, dfifo = usbhsf_get_dnfifo(priv, (i)))
struct usbhs_pkt_handle;
struct usbhs_pkt {
@@ -74,6 +79,7 @@ int usbhs_fifo_probe(struct usbhs_priv *priv);
void usbhs_fifo_remove(struct usbhs_priv *priv);
void usbhs_fifo_init(struct usbhs_priv *priv);
void usbhs_fifo_quit(struct usbhs_priv *priv);
+void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe);
/*
* packet info
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 2d17c10a0428..8697e6efcabf 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -56,6 +56,7 @@ struct usbhsg_gpriv {
#define USBHSG_STATUS_REGISTERD (1 << 1)
#define USBHSG_STATUS_WEDGE (1 << 2)
#define USBHSG_STATUS_SELF_POWERED (1 << 3)
+#define USBHSG_STATUS_SOFT_CONNECT (1 << 4)
};
struct usbhsg_recip_handle {
@@ -484,6 +485,9 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
case NODATA_STATUS_STAGE:
pipe->handler = &usbhs_ctrl_stage_end_handler;
break;
+ case READ_STATUS_STAGE:
+ case WRITE_STATUS_STAGE:
+ usbhs_dcp_control_transfer_done(pipe);
default:
return ret;
}
@@ -602,6 +606,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ if (!pipe)
+ return -EINVAL;
+
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
@@ -723,6 +730,25 @@ static struct usb_ep_ops usbhsg_ep_ops = {
};
/*
+ * pullup control
+ */
+static int usbhsg_can_pullup(struct usbhs_priv *priv)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+ return gpriv->driver &&
+ usbhsg_status_has(gpriv, USBHSG_STATUS_SOFT_CONNECT);
+}
+
+static void usbhsg_update_pullup(struct usbhs_priv *priv)
+{
+ if (usbhsg_can_pullup(priv))
+ usbhs_sys_function_pullup(priv, 1);
+ else
+ usbhs_sys_function_pullup(priv, 0);
+}
+
+/*
* usb module start/end
*/
static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
@@ -756,9 +782,9 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
/*
* pipe initialize and enable DCP
*/
+ usbhs_fifo_init(priv);
usbhs_pipe_init(priv,
usbhsg_dma_map_ctrl);
- usbhs_fifo_init(priv);
/* dcp init instead of usbhsg_ep_enable() */
dcp->pipe = usbhs_dcp_malloc(priv);
@@ -772,6 +798,7 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
* - usb module
*/
usbhs_sys_function_ctrl(priv, 1);
+ usbhsg_update_pullup(priv);
/*
* enable irq callback
@@ -851,8 +878,7 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
}
-static int usbhsg_gadget_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
+static int usbhsg_gadget_stop(struct usb_gadget *gadget)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
@@ -878,8 +904,15 @@ static int usbhsg_pullup(struct usb_gadget *gadget, int is_on)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
+ unsigned long flags;
- usbhs_sys_function_pullup(priv, is_on);
+ usbhs_lock(priv, flags);
+ if (is_on)
+ usbhsg_status_set(gpriv, USBHSG_STATUS_SOFT_CONNECT);
+ else
+ usbhsg_status_clr(gpriv, USBHSG_STATUS_SOFT_CONNECT);
+ usbhsg_update_pullup(priv);
+ usbhs_unlock(priv, flags);
return 0;
}
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 10e1ded9c9cc..f0d323125871 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1474,9 +1474,9 @@ static int usbhsh_start(struct usbhs_priv *priv)
/*
* pipe initialize and enable DCP
*/
+ usbhs_fifo_init(priv);
usbhs_pipe_init(priv,
usbhsh_dma_map_ctrl);
- usbhs_fifo_init(priv);
usbhsh_pipe_init_for_host(priv);
/*
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 040bcefcb040..007f45abe96c 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -618,8 +618,12 @@ void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
void usbhs_pipe_clear(struct usbhs_pipe *pipe)
{
- usbhsp_pipectrl_set(pipe, ACLRM, ACLRM);
- usbhsp_pipectrl_set(pipe, ACLRM, 0);
+ if (usbhs_pipe_is_dcp(pipe)) {
+ usbhs_fifo_clear_dcp(pipe);
+ } else {
+ usbhsp_pipectrl_set(pipe, ACLRM, ACLRM);
+ usbhsp_pipectrl_set(pipe, ACLRM, 0);
+ }
}
static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type)
diff --git a/drivers/usb/renesas_usbhs/rcar2.c b/drivers/usb/renesas_usbhs/rcar2.c
index e6b9dcc1c289..8fc15c0ba339 100644
--- a/drivers/usb/renesas_usbhs/rcar2.c
+++ b/drivers/usb/renesas_usbhs/rcar2.c
@@ -12,6 +12,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
#include <linux/platform_data/gpio-rcar.h>
#include <linux/usb/phy.h>
#include "common.h"
@@ -20,25 +21,43 @@
static int usbhs_rcar2_hardware_init(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- struct usb_phy *phy;
- phy = usb_get_phy_dev(&pdev->dev, 0);
- if (IS_ERR(phy))
- return PTR_ERR(phy);
+ if (IS_ENABLED(CONFIG_GENERIC_PHY)) {
+ struct phy *phy = phy_get(&pdev->dev, "usb");
- priv->phy = phy;
- return 0;
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ priv->phy = phy;
+ return 0;
+ }
+
+ if (IS_ENABLED(CONFIG_USB_PHY)) {
+ struct usb_phy *usb_phy = usb_get_phy_dev(&pdev->dev, 0);
+
+ if (IS_ERR(usb_phy))
+ return PTR_ERR(usb_phy);
+
+ priv->usb_phy = usb_phy;
+ return 0;
+ }
+
+ return -ENXIO;
}
static int usbhs_rcar2_hardware_exit(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- if (!priv->phy)
- return 0;
+ if (priv->phy) {
+ phy_put(priv->phy);
+ priv->phy = NULL;
+ }
- usb_put_phy(priv->phy);
- priv->phy = NULL;
+ if (priv->usb_phy) {
+ usb_put_phy(priv->usb_phy);
+ priv->usb_phy = NULL;
+ }
return 0;
}
@@ -47,21 +66,35 @@ static int usbhs_rcar2_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
+ int retval = -ENODEV;
+
+ if (priv->phy) {
+ if (enable) {
+ retval = phy_init(priv->phy);
- if (!priv->phy)
- return -ENODEV;
+ if (!retval)
+ retval = phy_power_on(priv->phy);
+ } else {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ retval = 0;
+ }
+ }
- if (enable) {
- int retval = usb_phy_init(priv->phy);
+ if (priv->usb_phy) {
+ if (enable) {
+ retval = usb_phy_init(priv->usb_phy);
- if (!retval)
- retval = usb_phy_set_suspend(priv->phy, 0);
- return retval;
+ if (!retval)
+ retval = usb_phy_set_suspend(priv->usb_phy, 0);
+ } else {
+ usb_phy_set_suspend(priv->usb_phy, 1);
+ usb_phy_shutdown(priv->usb_phy);
+ retval = 0;
+ }
}
- usb_phy_set_suspend(priv->phy, 1);
- usb_phy_shutdown(priv->phy);
- return 0;
+ return retval;
}
static int usbhs_rcar2_get_id(struct platform_device *pdev)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index a69f7cd9d0bf..b7cf1982d1d9 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -60,6 +60,7 @@ config USB_SERIAL_SIMPLE
- Suunto ANT+ USB device.
- Medtronic CareLink USB device
- Fundamental Software dongle.
+ - Google USB serial devices
- HP4x calculators
- a number of Motorola phones
- Novatel Wireless GPS receivers
@@ -606,10 +607,10 @@ config USB_SERIAL_CYBERJACK
If unsure, say N.
config USB_SERIAL_XIRCOM
- tristate "USB Xircom / Entregra Single Port Serial Driver"
+ tristate "USB Xircom / Entrega Single Port Serial Driver"
select USB_EZUSB_FX2
help
- Say Y here if you want to use a Xircom or Entregra single port USB to
+ Say Y here if you want to use a Xircom or Entrega single port USB to
serial converter device. This driver makes use of firmware
developed from scratch by Brian Warner.
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 742d827f876c..dd97d8b572c3 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -1,5 +1,5 @@
/*
- * USB Keyspan PDA / Xircom / Entregra Converter driver
+ * USB Keyspan PDA / Xircom / Entrega Converter driver
*
* Copyright (C) 1999 - 2001 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 1999, 2000 Brian Warner <warner@lothar.com>
@@ -58,11 +58,11 @@ struct keyspan_pda_private {
#define KEYSPAN_PDA_FAKE_ID 0x0103
#define KEYSPAN_PDA_ID 0x0104 /* no clue */
-/* For Xircom PGSDB9 and older Entregra version of the same device */
+/* For Xircom PGSDB9 and older Entrega version of the same device */
#define XIRCOM_VENDOR_ID 0x085a
#define XIRCOM_FAKE_ID 0x8027
-#define ENTREGRA_VENDOR_ID 0x1645
-#define ENTREGRA_FAKE_ID 0x8093
+#define ENTREGA_VENDOR_ID 0x1645
+#define ENTREGA_FAKE_ID 0x8093
static const struct usb_device_id id_table_combined[] = {
#ifdef KEYSPAN
@@ -70,7 +70,7 @@ static const struct usb_device_id id_table_combined[] = {
#endif
#ifdef XIRCOM
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
- { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
+ { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
#endif
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
{ } /* Terminating entry */
@@ -93,7 +93,7 @@ static const struct usb_device_id id_table_fake[] = {
#ifdef XIRCOM
static const struct usb_device_id id_table_fake_xircom[] = {
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
- { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
+ { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
{ }
};
#endif
@@ -667,7 +667,7 @@ static int keyspan_pda_fake_startup(struct usb_serial *serial)
#endif
#ifdef XIRCOM
else if ((le16_to_cpu(serial->dev->descriptor.idVendor) == XIRCOM_VENDOR_ID) ||
- (le16_to_cpu(serial->dev->descriptor.idVendor) == ENTREGRA_VENDOR_ID))
+ (le16_to_cpu(serial->dev->descriptor.idVendor) == ENTREGA_VENDOR_ID))
fw_name = "keyspan_pda/xircom_pgs.fw";
#endif
else {
@@ -744,7 +744,7 @@ static struct usb_serial_driver xircom_pgs_fake_device = {
.owner = THIS_MODULE,
.name = "xircom_no_firm",
},
- .description = "Xircom / Entregra PGS - (prerenumeration)",
+ .description = "Xircom / Entrega PGS - (prerenumeration)",
.id_table = id_table_fake_xircom,
.num_ports = 1,
.attach = keyspan_pda_fake_startup,
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 02c420af251e..2363654cafc9 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -244,7 +244,7 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* start reading (Adapter B 'cause PNP string) */
- result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
dev_dbg(dev, "%s - Send read URB returns: %i\n", __func__, result);
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index dfd728a263d2..4f70df33975a 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1299,8 +1299,6 @@ static void mos7720_throttle(struct tty_struct *tty)
mos7720_port->shadowMCR &= ~UART_MCR_RTS;
write_mos_reg(port->serial, port->port_number, MCR,
mos7720_port->shadowMCR);
- if (status != 0)
- return;
}
}
@@ -1331,8 +1329,6 @@ static void mos7720_unthrottle(struct tty_struct *tty)
mos7720_port->shadowMCR |= UART_MCR_RTS;
write_mos_reg(port->serial, port->port_number, MCR,
mos7720_port->shadowMCR);
- if (status != 0)
- return;
}
}
@@ -1657,7 +1653,7 @@ static void change_port_settings(struct tty_struct *tty,
write_mos_reg(serial, port_number, IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
- status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ status = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (status)
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n", status);
}
@@ -1702,7 +1698,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
change_port_settings(tty, mos7720_port, old_termios);
if (port->read_urb->status != -EINPROGRESS) {
- status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ status = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (status)
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n", status);
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 3d88eefdf1d1..220b4be89641 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1904,7 +1904,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
if (mos7840_port->read_urb_busy == false) {
mos7840_port->read_urb_busy = true;
- status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
+ status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
if (status) {
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n",
status);
@@ -1968,7 +1968,7 @@ static void mos7840_set_termios(struct tty_struct *tty,
if (mos7840_port->read_urb_busy == false) {
mos7840_port->read_urb_busy = true;
- status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
+ status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
if (status) {
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n",
status);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index b2aa003bf411..cb3e14780a7e 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -27,12 +27,15 @@ enum qcserial_layouts {
QCSERIAL_G2K = 0, /* Gobi 2000 */
QCSERIAL_G1K = 1, /* Gobi 1000 */
QCSERIAL_SWI = 2, /* Sierra Wireless */
+ QCSERIAL_HWI = 3, /* Huawei */
};
#define DEVICE_G1K(v, p) \
USB_DEVICE(v, p), .driver_info = QCSERIAL_G1K
#define DEVICE_SWI(v, p) \
USB_DEVICE(v, p), .driver_info = QCSERIAL_SWI
+#define DEVICE_HWI(v, p) \
+ USB_DEVICE(v, p), .driver_info = QCSERIAL_HWI
static const struct usb_device_id id_table[] = {
/* Gobi 1000 devices */
@@ -157,6 +160,9 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ /* Huawei devices */
+ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -287,6 +293,33 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
break;
}
break;
+ case QCSERIAL_HWI:
+ /*
+ * Huawei layout:
+ * 0: AT-capable modem port
+ * 1: DM/DIAG
+ * 2: AT-capable modem port
+ * 3: CCID-compatible PCSC interface
+ * 4: QMI/net
+ * 5: NMEA
+ */
+ switch (ifnum) {
+ case 0:
+ case 2:
+ dev_dbg(dev, "Modem port found\n");
+ break;
+ case 1:
+ dev_dbg(dev, "DM/DIAG interface found\n");
+ break;
+ case 5:
+ dev_dbg(dev, "NMEA GPS interface found\n");
+ break;
+ default:
+ /* don't claim any unsupported interface */
+ altsetting = -1;
+ break;
+ }
+ break;
default:
dev_err(dev, "unsupported device layout type: %lu\n",
id->driver_info);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 7064eb8d6142..3658662898fc 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -56,6 +56,14 @@ DEVICE(funsoft, FUNSOFT_IDS);
{ USB_DEVICE(0x8087, 0x0716) }
DEVICE(flashloader, FLASHLOADER_IDS);
+/* Google Serial USB SubClass */
+#define GOOGLE_IDS() \
+ { USB_VENDOR_AND_INTERFACE_INFO(0x18d1, \
+ USB_CLASS_VENDOR_SPEC, \
+ 0x50, \
+ 0x01) }
+DEVICE(google, GOOGLE_IDS);
+
/* ViVOpay USB Serial Driver */
#define VIVOPAY_IDS() \
{ USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -64,7 +72,7 @@ DEVICE(vivopay, VIVOPAY_IDS);
/* Motorola USB Phone driver */
#define MOTO_IDS() \
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \
- { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ \
+ { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Motorola phone */ \
{ USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ \
{ USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */ \
{ USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
@@ -97,6 +105,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&zio_device,
&funsoft_device,
&flashloader_device,
+ &google_device,
&vivopay_device,
&moto_modem_device,
&novatel_gps_device,
@@ -111,6 +120,7 @@ static const struct usb_device_id id_table[] = {
ZIO_IDS(),
FUNSOFT_IDS(),
FLASHLOADER_IDS(),
+ GOOGLE_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
NOVATEL_IDS(),
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index 2d81e1d8ee30..57bf3ad41fb6 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -181,17 +181,14 @@ void usb_stor_show_sense(const struct us_data *us,
US_DEBUGPX("%s\n", what);
}
-int usb_stor_dbg(const struct us_data *us, const char *fmt, ...)
+void usb_stor_dbg(const struct us_data *us, const char *fmt, ...)
{
va_list args;
- int r;
va_start(args, fmt);
- r = dev_vprintk_emit(LOGLEVEL_DEBUG, &us->pusb_dev->dev, fmt, args);
+ dev_vprintk_emit(LOGLEVEL_DEBUG, &us->pusb_dev->dev, fmt, args);
va_end(args);
-
- return r;
}
EXPORT_SYMBOL_GPL(usb_stor_dbg);
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index b1273f03e223..f52520306e1a 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -50,15 +50,17 @@
void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb);
void usb_stor_show_sense(const struct us_data *us, unsigned char key,
unsigned char asc, unsigned char ascq);
-__printf(2, 3) int usb_stor_dbg(const struct us_data *us,
- const char *fmt, ...);
+__printf(2, 3) void usb_stor_dbg(const struct us_data *us,
+ const char *fmt, ...);
#define US_DEBUGPX(fmt, ...) printk(fmt, ##__VA_ARGS__)
#define US_DEBUG(x) x
#else
__printf(2, 3)
-static inline int _usb_stor_dbg(const struct us_data *us,
- const char *fmt, ...) {return 1;}
+static inline void _usb_stor_dbg(const struct us_data *us,
+ const char *fmt, ...)
+{
+}
#define usb_stor_dbg(us, fmt, ...) \
do { if (0) _usb_stor_dbg(us, fmt, ##__VA_ARGS__); } while (0)
#define US_DEBUGPX(fmt, ...) \
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 73f125e0cb58..31fa2e92065b 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -49,10 +49,9 @@ int usb_stor_euscsi_init(struct us_data *us)
int result;
usb_stor_dbg(us, "Attempting to init eUSCSI bridge...\n");
- us->iobuf[0] = 0x1;
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR,
- 0x01, 0x0, us->iobuf, 0x1, 5 * HZ);
+ 0x01, 0x0, NULL, 0x0, 5 * HZ);
usb_stor_dbg(us, "-- result is %d\n", result);
return 0;
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index b1d815eb6d0b..540add24a12f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1035,9 +1035,20 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]);
- /* if we have a successful request, return the result */
- if (result > 0)
- return us->iobuf[0];
+ /*
+ * If we have a successful request, return the result if valid. The
+ * CBW LUN field is 4 bits wide, so the value reported by the device
+ * should fit into that.
+ */
+ if (result > 0) {
+ if (us->iobuf[0] < 16) {
+ return us->iobuf[0];
+ } else {
+ dev_info(&us->pusb_intf->dev,
+ "Max LUN %d is not valid, using 0 instead",
+ us->iobuf[0]);
+ }
+ }
/*
* Some devices don't like GetMaxLUN. They may STALL the control
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 4047edfb64e1..6cdabdc119a7 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -66,7 +66,7 @@ enum {
/* Overrides scsi_pointer */
struct uas_cmd_info {
unsigned int state;
- unsigned int stream;
+ unsigned int uas_tag;
struct urb *cmd_urb;
struct urb *data_in_urb;
struct urb *data_out_urb;
@@ -173,30 +173,15 @@ static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
cmnd->result = sense_iu->status;
}
-/*
- * scsi-tags go from 0 - (nr_tags - 1), uas tags need to match stream-ids,
- * which go from 1 - nr_streams. And we use 1 for untagged commands.
- */
-static int uas_get_tag(struct scsi_cmnd *cmnd)
-{
- int tag;
-
- if (cmnd->flags & SCMD_TAGGED)
- tag = cmnd->request->tag + 2;
- else
- tag = 1;
-
- return tag;
-}
-
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status)
{
struct uas_cmd_info *ci = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
scmd_printk(KERN_INFO, cmnd,
- "%s %d tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
- prefix, status, uas_get_tag(cmnd),
+ "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
+ prefix, status, cmdinfo->uas_tag,
(ci->state & SUBMIT_STATUS_URB) ? " s-st" : "",
(ci->state & ALLOC_DATA_IN_URB) ? " a-in" : "",
(ci->state & SUBMIT_DATA_IN_URB) ? " s-in" : "",
@@ -242,7 +227,7 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
DATA_OUT_URB_INFLIGHT |
COMMAND_ABORTED))
return -EBUSY;
- devinfo->cmnd[uas_get_tag(cmnd) - 1] = NULL;
+ devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
uas_free_unsubmitted_urbs(cmnd);
cmnd->scsi_done(cmnd);
return 0;
@@ -289,7 +274,7 @@ static void uas_stat_cmplt(struct urb *urb)
idx = be16_to_cpup(&iu->tag) - 1;
if (idx >= MAX_CMNDS || !devinfo->cmnd[idx]) {
dev_err(&urb->dev->dev,
- "stat urb: no pending cmd for tag %d\n", idx + 1);
+ "stat urb: no pending cmd for uas-tag %d\n", idx + 1);
goto out;
}
@@ -427,7 +412,8 @@ static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
goto out;
usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
uas_data_cmplt, cmnd);
- urb->stream_id = cmdinfo->stream;
+ if (devinfo->use_streams)
+ urb->stream_id = cmdinfo->uas_tag;
urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
urb->sg = sdb->table.sgl;
out:
@@ -451,7 +437,8 @@ static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
uas_stat_cmplt, cmnd->device->host);
- urb->stream_id = cmdinfo->stream;
+ if (devinfo->use_streams)
+ urb->stream_id = cmdinfo->uas_tag;
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
@@ -465,6 +452,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
{
struct usb_device *udev = devinfo->udev;
struct scsi_device *sdev = cmnd->device;
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct urb *urb = usb_alloc_urb(0, gfp);
struct command_iu *iu;
int len;
@@ -481,7 +469,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
goto free;
iu->iu_id = IU_ID_COMMAND;
- iu->tag = cpu_to_be16(uas_get_tag(cmnd));
+ iu->tag = cpu_to_be16(cmdinfo->uas_tag);
iu->prio_attr = UAS_SIMPLE_TAG;
iu->len = len;
int_to_scsilun(sdev->lun, &iu->lun);
@@ -608,8 +596,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo = sdev->hostdata;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
unsigned long flags;
- unsigned int stream;
- int err;
+ int idx, err;
BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
@@ -635,8 +622,12 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
return 0;
}
- stream = uas_get_tag(cmnd);
- if (devinfo->cmnd[stream - 1]) {
+ /* Find a free uas-tag */
+ for (idx = 0; idx < devinfo->qdepth; idx++) {
+ if (!devinfo->cmnd[idx])
+ break;
+ }
+ if (idx == devinfo->qdepth) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
@@ -644,7 +635,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
cmnd->scsi_done = done;
memset(cmdinfo, 0, sizeof(*cmdinfo));
- cmdinfo->stream = stream;
+ cmdinfo->uas_tag = idx + 1; /* uas-tag == usb-stream-id, so 1 based */
cmdinfo->state = SUBMIT_STATUS_URB | ALLOC_CMD_URB | SUBMIT_CMD_URB;
switch (cmnd->sc_data_direction) {
@@ -659,10 +650,8 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
break;
}
- if (!devinfo->use_streams) {
+ if (!devinfo->use_streams)
cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
- cmdinfo->stream = 0;
- }
err = uas_submit_urbs(cmnd, devinfo, GFP_ATOMIC);
if (err) {
@@ -674,7 +663,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
uas_add_work(cmdinfo);
}
- devinfo->cmnd[stream - 1] = cmnd;
+ devinfo->cmnd[idx] = cmnd;
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
@@ -702,7 +691,7 @@ static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
cmdinfo->state |= COMMAND_ABORTED;
/* Drop all refs to this cmnd, kill data urbs to break their ref */
- devinfo->cmnd[uas_get_tag(cmnd) - 1] = NULL;
+ devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
if (cmdinfo->state & DATA_IN_URB_INFLIGHT)
data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
if (cmdinfo->state & DATA_OUT_URB_INFLIGHT)
@@ -816,13 +805,6 @@ static struct scsi_host_template uas_host_template = {
.sg_tablesize = SG_NONE,
.cmd_per_lun = 1, /* until we override it */
.skip_settle_delay = 1,
-
- /*
- * The uas drivers expects tags not to be bigger than the maximum
- * per-device queue depth, which is not true with the blk-mq tag
- * allocator.
- */
- .disable_blk_mq = true,
.use_blk_tags = 1,
};
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 9d66ce62542e..d468d02179f4 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -884,7 +884,9 @@ static void usb_stor_scan_dwork(struct work_struct *work)
dev_dbg(dev, "starting scan\n");
/* For bulk-only devices, determine the max LUN value */
- if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) {
+ if (us->protocol == USB_PR_BULK &&
+ !(us->fflags & US_FL_SINGLE_LUN) &&
+ !(us->fflags & US_FL_SCM_MULT_TARG)) {
mutex_lock(&us->dev_mutex);
us->max_lun = usb_stor_Bulk_max_lun(us);
mutex_unlock(&us->dev_mutex);
@@ -983,21 +985,31 @@ int usb_stor_probe2(struct us_data *us)
usb_stor_dbg(us, "Transport: %s\n", us->transport_name);
usb_stor_dbg(us, "Protocol: %s\n", us->protocol_name);
+ if (us->fflags & US_FL_SCM_MULT_TARG) {
+ /*
+ * SCM eUSCSI bridge devices can have different numbers
+ * of LUNs on different targets; allow all to be probed.
+ */
+ us->max_lun = 7;
+ /* The eUSCSI itself has ID 7, so avoid scanning that */
+ us_to_host(us)->this_id = 7;
+ /* max_id is 8 initially, so no need to set it here */
+ } else {
+ /* In the normal case there is only a single target */
+ us_to_host(us)->max_id = 1;
+ /*
+ * Like Windows, we won't store the LUN bits in CDB[1] for
+ * SCSI-2 devices using the Bulk-Only transport (even though
+ * this violates the SCSI spec).
+ */
+ if (us->transport == usb_stor_Bulk_transport)
+ us_to_host(us)->no_scsi2_lun_in_cdb = 1;
+ }
+
/* fix for single-lun devices */
if (us->fflags & US_FL_SINGLE_LUN)
us->max_lun = 0;
- if (!(us->fflags & US_FL_SCM_MULT_TARG))
- us_to_host(us)->max_id = 1;
-
- /*
- * Like Windows, we won't store the LUN bits in CDB[1] for SCSI-2
- * devices using the Bulk-Only transport (even though this violates
- * the SCSI spec).
- */
- if (us->transport == usb_stor_Bulk_transport)
- us_to_host(us)->no_scsi2_lun_in_cdb = 1;
-
/* Find the endpoints and calculate pipe values */
result = get_pipes(us);
if (result)
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index fac20e0434c0..a3ec49bdc1e6 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -311,7 +311,6 @@ static int stub_probe(struct usb_device *udev)
{
struct stub_device *sdev = NULL;
const char *udev_busid = dev_name(&udev->dev);
- int err = 0;
struct bus_id_priv *busid_priv;
int rc;
@@ -372,23 +371,28 @@ static int stub_probe(struct usb_device *udev)
(struct usb_dev_state *) udev);
if (rc) {
dev_dbg(&udev->dev, "unable to claim port\n");
- return rc;
+ goto err_port;
}
- err = stub_add_files(&udev->dev);
- if (err) {
+ rc = stub_add_files(&udev->dev);
+ if (rc) {
dev_err(&udev->dev, "stub_add_files for %s\n", udev_busid);
- dev_set_drvdata(&udev->dev, NULL);
- usb_put_dev(udev);
- kthread_stop_put(sdev->ud.eh);
-
- busid_priv->sdev = NULL;
- stub_device_free(sdev);
- return err;
+ goto err_files;
}
busid_priv->status = STUB_BUSID_ALLOC;
return 0;
+err_files:
+ usb_hub_release_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+err_port:
+ dev_set_drvdata(&udev->dev, NULL);
+ usb_put_dev(udev);
+ kthread_stop_put(sdev->ud.eh);
+
+ busid_priv->sdev = NULL;
+ stub_device_free(sdev);
+ return rc;
}
static void shutdown_busid(struct bus_id_priv *busid_priv)
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index c02374b6049c..1ae9d40f96bf 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -518,8 +518,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
dev_info(dev, "SetAddress Request (%d) to port %d\n",
ctrlreq->wValue, vdev->rhport);
- if (vdev->udev)
- usb_put_dev(vdev->udev);
+ usb_put_dev(vdev->udev);
vdev->udev = usb_get_dev(urb->dev);
spin_lock(&vdev->ud.lock);
@@ -539,8 +538,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
usbip_dbg_vhci_hc(
"Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
- if (vdev->udev)
- usb_put_dev(vdev->udev);
+ usb_put_dev(vdev->udev);
vdev->udev = usb_get_dev(urb->dev);
goto out;
@@ -831,8 +829,7 @@ static void vhci_device_reset(struct usbip_device *ud)
vdev->speed = 0;
vdev->devid = 0;
- if (vdev->udev)
- usb_put_dev(vdev->udev);
+ usb_put_dev(vdev->udev);
vdev->udev = NULL;
if (ud->tcp_socket) {
@@ -1110,7 +1107,6 @@ static struct platform_driver vhci_driver = {
.resume = vhci_hcd_resume,
.driver = {
.name = driver_name,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
index 101834576236..415b14002a61 100644
--- a/drivers/usb/wusbcore/dev-sysfs.c
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -101,7 +101,7 @@ static ssize_t wusb_ck_store(struct device *dev,
if (wusbhc == NULL)
return -ENODEV;
result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck);
- memset(&ck, 0, sizeof(ck));
+ memzero_explicit(&ck, sizeof(ck));
wusbhc_put(wusbhc);
return result < 0 ? result : size;
}
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index cc74d669c802..b66faaf3e842 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -522,10 +522,10 @@ error_hs3:
error_hs2:
error_hs1:
memset(hs, 0, 3*sizeof(hs[0]));
- memset(&keydvt_out, 0, sizeof(keydvt_out));
- memset(&keydvt_in, 0, sizeof(keydvt_in));
- memset(&ccm_n, 0, sizeof(ccm_n));
- memset(mic, 0, sizeof(mic));
+ memzero_explicit(&keydvt_out, sizeof(keydvt_out));
+ memzero_explicit(&keydvt_in, sizeof(keydvt_in));
+ memzero_explicit(&ccm_n, sizeof(ccm_n));
+ memzero_explicit(mic, sizeof(mic));
if (result < 0)
wusb_dev_set_encryption(usb_dev, 0);
error_dev_set_encryption:
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
index 0887ae982783..536ad42b0a4b 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/uwb/rsv.c
@@ -213,7 +213,7 @@ void uwb_rsv_backoff_win_timer(unsigned long arg)
bow->total_expired = 0;
bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
}
- dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n);
+ dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n", bow->total_expired, bow->n);
/* try to relocate all the "to be moved" relocations */
uwb_rsv_handle_drp_avail_change(rc);
@@ -234,7 +234,7 @@ void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
bow->window <<= 1;
bow->n = prandom_u32() & (bow->window - 1);
- dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n);
+ dev_dbg(dev, "new_window=%d, n=%d\n", bow->window, bow->n);
/* reset the timer associated variables */
timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index d8c57636b9ce..14e27ab32456 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -16,7 +16,7 @@ config VFIO_SPAPR_EEH
menuconfig VFIO
tristate "VFIO Non-Privileged userspace driver framework"
depends on IOMMU_API
- select VFIO_IOMMU_TYPE1 if X86
+ select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM_SMMU)
select VFIO_IOMMU_SPAPR_TCE if (PPC_POWERNV || PPC_PSERIES)
select VFIO_SPAPR_EEH if (PPC_POWERNV || PPC_PSERIES)
select ANON_INODES
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index c41b01e2b693..c6bb5da2d2a7 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -16,3 +16,11 @@ config VFIO_PCI_VGA
BIOS and generic video drivers.
If you don't know what to do here, say N.
+
+config VFIO_PCI_MMAP
+ depends on VFIO_PCI
+ def_bool y if !S390
+
+config VFIO_PCI_INTX
+ depends on VFIO_PCI
+ def_bool y if !S390
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 9558da3f06a0..255201f22126 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -215,7 +215,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
- if (pin)
+ if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && pin)
return 1;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
@@ -406,7 +406,8 @@ static long vfio_pci_ioctl(void *device_data,
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
- if (pci_resource_flags(pdev, info.index) &
+ if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) &&
+ pci_resource_flags(pdev, info.index) &
IORESOURCE_MEM && info.size >= PAGE_SIZE)
info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
break;
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 1de3f94aa7de..ff75ca31a199 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -609,6 +609,10 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
/* Sometimes used by sw, just virtualize */
p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE);
+
+ /* Virtualize interrupt pin to allow hiding INTx */
+ p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE);
+
return 0;
}
@@ -1445,6 +1449,9 @@ int vfio_config_init(struct vfio_pci_device *vdev)
*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
}
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX))
+ vconfig[PCI_INTERRUPT_PIN] = 0;
+
ret = vfio_cap_init(vdev);
if (ret)
goto out;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8dae2f724a35..14419a8ccbb6 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -48,20 +48,21 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
* status internally; used for zerocopy tx only.
*/
/* Lower device DMA failed */
-#define VHOST_DMA_FAILED_LEN 3
+#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
/* Lower device DMA done */
-#define VHOST_DMA_DONE_LEN 2
+#define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
/* Lower device DMA in progress */
-#define VHOST_DMA_IN_PROGRESS 1
+#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
/* Buffer unused */
-#define VHOST_DMA_CLEAR_LEN 0
+#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
-#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
+#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF),
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_VERSION_1),
};
enum {
@@ -342,7 +343,6 @@ static void handle_tx(struct vhost_net *net)
.msg_namelen = 0,
.msg_control = NULL,
.msg_controllen = 0,
- .msg_iov = vq->iov,
.msg_flags = MSG_DONTWAIT,
};
size_t len, total_len = 0;
@@ -396,8 +396,8 @@ static void handle_tx(struct vhost_net *net)
}
/* Skip header. TODO: support TSO. */
s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
- msg.msg_iovlen = out;
len = iov_length(vq->iov, out);
+ iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
/* Sanity check */
if (!len) {
vq_err(vq, "Unexpected header len for TX: "
@@ -416,7 +416,7 @@ static void handle_tx(struct vhost_net *net)
struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
- vq->heads[nvq->upend_idx].id = head;
+ vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = nvq->ubufs;
@@ -500,6 +500,10 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
int headcount = 0;
unsigned d;
int r, nlogs = 0;
+ /* len is always initialized before use since we are always called with
+ * datalen > 0.
+ */
+ u32 uninitialized_var(len);
while (datalen > 0 && headcount < quota) {
if (unlikely(seg >= UIO_MAXIOV)) {
@@ -527,13 +531,14 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
nlogs += *log_num;
log += *log_num;
}
- heads[headcount].id = d;
- heads[headcount].len = iov_length(vq->iov + seg, in);
- datalen -= heads[headcount].len;
+ heads[headcount].id = cpu_to_vhost32(vq, d);
+ len = iov_length(vq->iov + seg, in);
+ heads[headcount].len = cpu_to_vhost32(vq, len);
+ datalen -= len;
++headcount;
seg += in;
}
- heads[headcount - 1].len += datalen;
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
@@ -562,7 +567,6 @@ static void handle_rx(struct vhost_net *net)
.msg_namelen = 0,
.msg_control = NULL, /* FIXME: get and handle RX aux data. */
.msg_controllen = 0,
- .msg_iov = vq->iov,
.msg_flags = MSG_DONTWAIT,
};
struct virtio_net_hdr_mrg_rxbuf hdr = {
@@ -600,7 +604,7 @@ static void handle_rx(struct vhost_net *net)
break;
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
- msg.msg_iovlen = 1;
+ iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
err = sock->ops->recvmsg(NULL, sock, &msg,
1, MSG_DONTWAIT | MSG_TRUNC);
pr_debug("Discarded rx packet: len %zd\n", sock_len);
@@ -626,7 +630,7 @@ static void handle_rx(struct vhost_net *net)
/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
* needed because recvmsg can modify msg_iov. */
copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
- msg.msg_iovlen = in;
+ iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len);
err = sock->ops->recvmsg(NULL, sock, &msg,
sock_len, MSG_DONTWAIT | MSG_TRUNC);
/* Userspace might have consumed the packet meanwhile:
@@ -1025,7 +1029,8 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
size_t vhost_hlen, sock_hlen, hdr_len;
int i;
- hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
+ hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_VERSION_1))) ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) :
sizeof(struct virtio_net_hdr);
if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index a17f11850669..01c01cb3933f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -168,6 +168,7 @@ enum {
VHOST_SCSI_VQ_IO = 2,
};
+/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
enum {
VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
(1ULL << VIRTIO_SCSI_F_T10_PI)
@@ -577,8 +578,8 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
return NULL;
}
- evt->event.event = event;
- evt->event.reason = reason;
+ evt->event.event = cpu_to_vhost32(vq, event);
+ evt->event.reason = cpu_to_vhost32(vq, reason);
vs->vs_events_nr++;
return evt;
@@ -636,7 +637,7 @@ again:
}
if (vs->vs_events_missed) {
- event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+ event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
vs->vs_events_missed = false;
}
@@ -695,12 +696,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
cmd, se_cmd->residual_count, se_cmd->scsi_status);
memset(&v_rsp, 0, sizeof(v_rsp));
- v_rsp.resid = se_cmd->residual_count;
+ v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
/* TODO is status_qualifier field needed? */
v_rsp.status = se_cmd->scsi_status;
- v_rsp.sense_len = se_cmd->scsi_sense_length;
+ v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
+ se_cmd->scsi_sense_length);
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
- v_rsp.sense_len);
+ se_cmd->scsi_sense_length);
ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
if (likely(ret == 0)) {
struct vhost_scsi_virtqueue *q;
@@ -1095,14 +1097,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
", but wrong data_direction\n");
goto err_cmd;
}
- prot_bytes = v_req_pi.pi_bytesout;
+ prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
} else if (v_req_pi.pi_bytesin) {
if (data_direction != DMA_FROM_DEVICE) {
vq_err(vq, "Received non zero di_pi_niov"
", but wrong data_direction\n");
goto err_cmd;
}
- prot_bytes = v_req_pi.pi_bytesin;
+ prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
}
if (prot_bytes) {
int tmp = 0;
@@ -1117,12 +1119,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
data_first += prot_niov;
data_niov = data_num - prot_niov;
}
- tag = v_req_pi.tag;
+ tag = vhost64_to_cpu(vq, v_req_pi.tag);
task_attr = v_req_pi.task_attr;
cdb = &v_req_pi.cdb[0];
lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
} else {
- tag = v_req.tag;
+ tag = vhost64_to_cpu(vq, v_req.tag);
task_attr = v_req.task_attr;
cdb = &v_req.cdb[0];
lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c90f4374442a..ed71b5347a76 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -33,8 +33,8 @@ enum {
VHOST_MEMORY_F_LOG = 0x1,
};
-#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
-#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
+#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
+#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
void __user *used;
- if (__put_user(vq->used_flags, &vq->used->flags) < 0)
+ if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
return -EFAULT;
if (unlikely(vq->log_used)) {
/* Make sure the flag is seen before log. */
@@ -1019,7 +1019,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
{
- if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
+ if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
return -EFAULT;
if (unlikely(vq->log_used)) {
void __user *used;
@@ -1038,6 +1038,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
int vhost_init_used(struct vhost_virtqueue *vq)
{
+ __virtio16 last_used_idx;
int r;
if (!vq->private_data)
return 0;
@@ -1046,7 +1047,13 @@ int vhost_init_used(struct vhost_virtqueue *vq)
if (r)
return r;
vq->signalled_used_valid = false;
- return get_user(vq->last_used_idx, &vq->used->idx);
+ if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx))
+ return -EFAULT;
+ r = __get_user(last_used_idx, &vq->used->idx);
+ if (r)
+ return r;
+ vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
+ return 0;
}
EXPORT_SYMBOL_GPL(vhost_init_used);
@@ -1087,16 +1094,16 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
/* Each buffer in the virtqueues is actually a chain of descriptors. This
* function returns the next descriptor in the chain,
* or -1U if we're at the end. */
-static unsigned next_desc(struct vring_desc *desc)
+static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
{
unsigned int next;
/* If this descriptor says it doesn't chain, we're done. */
- if (!(desc->flags & VRING_DESC_F_NEXT))
+ if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
return -1U;
/* Check they're not leading us off end of descriptors. */
- next = desc->next;
+ next = vhost16_to_cpu(vq, desc->next);
/* Make sure compiler knows to grab that: we don't want it changing! */
/* We will use the result as an index in an array, so most
* architectures only need a compiler barrier here. */
@@ -1113,18 +1120,19 @@ static int get_indirect(struct vhost_virtqueue *vq,
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
+ u32 len = vhost32_to_cpu(vq, indirect->len);
int ret;
/* Sanity check */
- if (unlikely(indirect->len % sizeof desc)) {
+ if (unlikely(len % sizeof desc)) {
vq_err(vq, "Invalid length in indirect descriptor: "
"len 0x%llx not multiple of 0x%zx\n",
- (unsigned long long)indirect->len,
+ (unsigned long long)len,
sizeof desc);
return -EINVAL;
}
- ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect,
+ ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
UIO_MAXIOV);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d in indirect.\n", ret);
@@ -1135,7 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
* architectures only need a compiler barrier here. */
read_barrier_depends();
- count = indirect->len / sizeof desc;
+ count = len / sizeof desc;
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
@@ -1155,16 +1163,17 @@ static int get_indirect(struct vhost_virtqueue *vq,
if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
vq->indirect, sizeof desc))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
- i, (size_t)indirect->addr + i * sizeof desc);
+ i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
- if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+ if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
- i, (size_t)indirect->addr + i * sizeof desc);
+ i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
- ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
+ ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
+ vhost32_to_cpu(vq, desc.len), iov + iov_count,
iov_size - iov_count);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d indirect idx %d\n",
@@ -1172,11 +1181,11 @@ static int get_indirect(struct vhost_virtqueue *vq,
return ret;
}
/* If this is an input descriptor, increment that count. */
- if (desc.flags & VRING_DESC_F_WRITE) {
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
*in_num += ret;
if (unlikely(log)) {
- log[*log_num].addr = desc.addr;
- log[*log_num].len = desc.len;
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
@@ -1189,7 +1198,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
}
*out_num += ret;
}
- } while ((i = next_desc(&desc)) != -1);
+ } while ((i = next_desc(vq, &desc)) != -1);
return 0;
}
@@ -1209,15 +1218,18 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct vring_desc desc;
unsigned int i, head, found = 0;
u16 last_avail_idx;
+ __virtio16 avail_idx;
+ __virtio16 ring_head;
int ret;
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
- if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
+ if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
return -EFAULT;
}
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
vq_err(vq, "Guest moved used index from %u to %u",
@@ -1234,7 +1246,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
- if (unlikely(__get_user(head,
+ if (unlikely(__get_user(ring_head,
&vq->avail->ring[last_avail_idx % vq->num]))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
@@ -1242,6 +1254,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
return -EFAULT;
}
+ head = vhost16_to_cpu(vq, ring_head);
+
/* If their number is silly, that's an error. */
if (unlikely(head >= vq->num)) {
vq_err(vq, "Guest says index %u > %u is available",
@@ -1274,7 +1288,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
i, vq->desc + i);
return -EFAULT;
}
- if (desc.flags & VRING_DESC_F_INDIRECT) {
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
ret = get_indirect(vq, iov, iov_size,
out_num, in_num,
log, log_num, &desc);
@@ -1286,20 +1300,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
continue;
}
- ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
+ ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
+ vhost32_to_cpu(vq, desc.len), iov + iov_count,
iov_size - iov_count);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d descriptor idx %d\n",
ret, i);
return ret;
}
- if (desc.flags & VRING_DESC_F_WRITE) {
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
if (unlikely(log)) {
- log[*log_num].addr = desc.addr;
- log[*log_num].len = desc.len;
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
@@ -1312,7 +1327,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
}
*out_num += ret;
}
- } while ((i = next_desc(&desc)) != -1);
+ } while ((i = next_desc(vq, &desc)) != -1);
/* On success, increment avail index. */
vq->last_avail_idx++;
@@ -1335,7 +1350,10 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
- struct vring_used_elem heads = { head, len };
+ struct vring_used_elem heads = {
+ cpu_to_vhost32(vq, head),
+ cpu_to_vhost32(vq, len)
+ };
return vhost_add_used_n(vq, &heads, 1);
}
@@ -1404,7 +1422,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
/* Make sure buffer is written before we update index. */
smp_wmb();
- if (put_user(vq->last_used_idx, &vq->used->idx)) {
+ if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
@@ -1422,7 +1440,8 @@ EXPORT_SYMBOL_GPL(vhost_add_used_n);
static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __u16 old, new, event;
+ __u16 old, new;
+ __virtio16 event;
bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
@@ -1434,12 +1453,12 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return true;
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
- __u16 flags;
+ __virtio16 flags;
if (__get_user(flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
}
- return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
+ return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
}
old = vq->signalled_used;
v = vq->signalled_used_valid;
@@ -1449,11 +1468,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
- if (get_user(event, vhost_used_event(vq))) {
+ if (__get_user(event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
- return vring_need_event(event, new, old);
+ return vring_need_event(vhost16_to_cpu(vq, event), new, old);
}
/* This actually signals the guest, using eventfd. */
@@ -1488,7 +1507,7 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- u16 avail_idx;
+ __virtio16 avail_idx;
int r;
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
@@ -1519,7 +1538,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return false;
}
- return avail_idx != vq->avail_idx;
+ return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 3eda654b8f5a..8c1c792900ba 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,8 +12,6 @@
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
-struct vhost_device;
-
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -54,8 +52,6 @@ struct vhost_log {
u64 len;
};
-struct vhost_virtqueue;
-
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -106,7 +102,7 @@ struct vhost_virtqueue {
/* Protected by virtqueue mutex. */
struct vhost_memory *memory;
void *private_data;
- unsigned acked_features;
+ u64 acked_features;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
@@ -172,8 +168,39 @@ enum {
(1ULL << VHOST_F_LOG_ALL),
};
-static inline int vhost_has_feature(struct vhost_virtqueue *vq, int bit)
+static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
+{
+ return vq->acked_features & (1ULL << bit);
+}
+
+/* Memory accessors */
+static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
+{
+ return __virtio16_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
+{
+ return __cpu_to_virtio16(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
+{
+ return __virtio32_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
+{
+ return __cpu_to_virtio32(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
+{
+ return __virtio64_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
{
- return vq->acked_features & (1 << bit);
+ return __cpu_to_virtio64(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
}
#endif
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 25fb8e3d75b1..a26d3bb25650 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/platform_data/lp855x.h>
#include <linux/pwm.h>
+#include <linux/regulator/consumer.h>
/* LP8550/1/2/3/6 Registers */
#define LP855X_BRIGHTNESS_CTRL 0x00
@@ -341,8 +342,10 @@ static const struct attribute_group lp855x_attr_group = {
};
#ifdef CONFIG_OF
-static int lp855x_parse_dt(struct device *dev, struct device_node *node)
+static int lp855x_parse_dt(struct lp855x *lp)
{
+ struct device *dev = lp->dev;
+ struct device_node *node = dev->of_node;
struct lp855x_platform_data *pdata;
int rom_length;
@@ -381,12 +384,19 @@ static int lp855x_parse_dt(struct device *dev, struct device_node *node)
pdata->rom_data = &rom[0];
}
- dev->platform_data = pdata;
+ pdata->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(pdata->supply)) {
+ if (PTR_ERR(pdata->supply) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ pdata->supply = NULL;
+ }
+
+ lp->pdata = pdata;
return 0;
}
#else
-static int lp855x_parse_dt(struct device *dev, struct device_node *node)
+static int lp855x_parse_dt(struct lp855x *lp)
{
return -EINVAL;
}
@@ -395,18 +405,8 @@ static int lp855x_parse_dt(struct device *dev, struct device_node *node)
static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct lp855x *lp;
- struct lp855x_platform_data *pdata = dev_get_platdata(&cl->dev);
- struct device_node *node = cl->dev.of_node;
int ret;
- if (!pdata) {
- ret = lp855x_parse_dt(&cl->dev, node);
- if (ret < 0)
- return ret;
-
- pdata = dev_get_platdata(&cl->dev);
- }
-
if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
return -EIO;
@@ -414,16 +414,31 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
if (!lp)
return -ENOMEM;
- if (pdata->period_ns > 0)
- lp->mode = PWM_BASED;
- else
- lp->mode = REGISTER_BASED;
-
lp->client = cl;
lp->dev = &cl->dev;
- lp->pdata = pdata;
lp->chipname = id->name;
lp->chip_id = id->driver_data;
+ lp->pdata = dev_get_platdata(&cl->dev);
+
+ if (!lp->pdata) {
+ ret = lp855x_parse_dt(lp);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (lp->pdata->period_ns > 0)
+ lp->mode = PWM_BASED;
+ else
+ lp->mode = REGISTER_BASED;
+
+ if (lp->pdata->supply) {
+ ret = regulator_enable(lp->pdata->supply);
+ if (ret < 0) {
+ dev_err(&cl->dev, "failed to enable supply: %d\n", ret);
+ return ret;
+ }
+ }
+
i2c_set_clientdata(cl, lp);
ret = lp855x_configure(lp);
@@ -455,6 +470,8 @@ static int lp855x_remove(struct i2c_client *cl)
lp->bl->props.brightness = 0;
backlight_update_status(lp->bl);
+ if (lp->pdata->supply)
+ regulator_disable(lp->pdata->supply);
sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
return 0;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index cb5ae4c08469..3a145a643e0d 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -34,6 +34,7 @@ struct pwm_bl_data {
struct regulator *power_supply;
struct gpio_desc *enable_gpio;
unsigned int scale;
+ bool legacy;
int (*notify)(struct device *,
int brightness);
void (*notify_after)(struct device *,
@@ -274,7 +275,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(pb->pwm)) {
dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
-
+ pb->legacy = true;
pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
if (IS_ERR(pb->pwm)) {
dev_err(&pdev->dev, "unable to request legacy PWM\n");
@@ -339,6 +340,8 @@ static int pwm_backlight_remove(struct platform_device *pdev)
if (pb->exit)
pb->exit(&pdev->dev);
+ if (pb->legacy)
+ pwm_free(pb->pwm);
return 0;
}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index eb976ee3a02f..ea437245562e 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3624,7 +3624,7 @@ static int __init fb_console_init(void)
return 0;
}
-module_init(fb_console_init);
+fs_initcall(fb_console_init);
#ifdef MODULE
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index c78bfd1d1b34..4916c97216f8 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2408,7 +2408,7 @@ config FB_JZ4740
config FB_MXS
tristate "MXS LCD framebuffer support"
- depends on FB && ARCH_MXS
+ depends on FB && (ARCH_MXS || ARCH_MXC)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 6ad23bd3523a..32c0b6b28097 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -27,7 +27,6 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/hardirq.h>
-#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 518f790ef88a..35f7900a0573 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -3782,7 +3782,6 @@ static struct platform_driver amifb_driver = {
.remove = __exit_p(amifb_remove),
.driver = {
.name = "amiga-video",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index adc4ea2cc5a0..b305a1e7cc76 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -1016,7 +1016,7 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
- par->state.vgabase = (void __iomem *) vga_res.start;
+ par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start;
/* FIXME get memsize */
regval = vga_rseq(par->state.vgabase, 0x10);
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 9ec81d46fc57..94a8d04e60f9 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1465,7 +1465,6 @@ static struct platform_driver atmel_lcdfb_driver = {
.id_table = atmel_lcdfb_devtypes,
.driver = {
.name = "atmel_lcdfb",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_lcdfb_dt_ids),
},
};
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 0676746ec68c..59560189b24a 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -638,7 +638,6 @@ int au1100fb_drv_resume(struct platform_device *dev)
static struct platform_driver au1100fb_driver = {
.driver = {
.name = "au1100-lcd",
- .owner = THIS_MODULE,
},
.probe = au1100fb_drv_probe,
.remove = au1100fb_drv_remove,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 18600d4e1b3f..f9507b1894df 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1841,7 +1841,6 @@ static const struct dev_pm_ops au1200fb_pmops = {
static struct platform_driver au1200fb_driver = {
.driver = {
.name = "au1200-lcd",
- .owner = THIS_MODULE,
.pm = AU1200FB_PMOPS,
},
.probe = au1200fb_drv_probe,
diff --git a/drivers/video/fbdev/auo_k1900fb.c b/drivers/video/fbdev/auo_k1900fb.c
index f5b668e77af3..7637c60eae3d 100644
--- a/drivers/video/fbdev/auo_k1900fb.c
+++ b/drivers/video/fbdev/auo_k1900fb.c
@@ -193,7 +193,6 @@ static struct platform_driver auok1900fb_driver = {
.probe = auok1900fb_probe,
.remove = auok1900fb_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "auo_k1900fb",
.pm = &auok190x_pm,
},
diff --git a/drivers/video/fbdev/auo_k1901fb.c b/drivers/video/fbdev/auo_k1901fb.c
index 12b9adcb75c5..681fe61957b6 100644
--- a/drivers/video/fbdev/auo_k1901fb.c
+++ b/drivers/video/fbdev/auo_k1901fb.c
@@ -246,7 +246,6 @@ static struct platform_driver auok1901fb_driver = {
.probe = auok1901fb_probe,
.remove = auok1901fb_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "auo_k1901fb",
.pm = &auok190x_pm,
},
diff --git a/drivers/video/fbdev/bf537-lq035.c b/drivers/video/fbdev/bf537-lq035.c
index a82d2578d976..7db3052b471d 100644
--- a/drivers/video/fbdev/bf537-lq035.c
+++ b/drivers/video/fbdev/bf537-lq035.c
@@ -894,7 +894,6 @@ static struct platform_driver bfin_lq035_driver = {
.resume = bfin_lq035_resume,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/bf54x-lq043fb.c b/drivers/video/fbdev/bf54x-lq043fb.c
index adbef542c998..8f1f97c75619 100644
--- a/drivers/video/fbdev/bf54x-lq043fb.c
+++ b/drivers/video/fbdev/bf54x-lq043fb.c
@@ -756,7 +756,6 @@ static struct platform_driver bfin_bf54x_driver = {
.resume = bfin_bf54x_resume,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
module_platform_driver(bfin_bf54x_driver);
diff --git a/drivers/video/fbdev/bfin-t350mcqb-fb.c b/drivers/video/fbdev/bfin-t350mcqb-fb.c
index b5cf1307a3d9..e5ee4d9677f7 100644
--- a/drivers/video/fbdev/bfin-t350mcqb-fb.c
+++ b/drivers/video/fbdev/bfin-t350mcqb-fb.c
@@ -661,7 +661,6 @@ static struct platform_driver bfin_t350mcqb_driver = {
.resume = bfin_t350mcqb_resume,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
module_platform_driver(bfin_t350mcqb_driver);
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 8556264b16b7..1c29bd19e3d5 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -1212,7 +1212,6 @@ static struct platform_driver broadsheetfb_driver = {
.probe = broadsheetfb_probe,
.remove = broadsheetfb_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "broadsheetfb",
},
};
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index bc123d6947a4..8c5b281f0b29 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -377,7 +377,6 @@ MODULE_DEVICE_TABLE(of, bw2_match);
static struct platform_driver bw2_driver = {
.driver = {
.name = "bw2",
- .owner = THIS_MODULE,
.of_match_table = bw2_match,
},
.probe = bw2_probe,
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index c79745b136bb..43e915eaf606 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -597,7 +597,6 @@ MODULE_DEVICE_TABLE(of, cg14_match);
static struct platform_driver cg14_driver = {
.driver = {
.name = "cg14",
- .owner = THIS_MODULE,
.of_match_table = cg14_match,
},
.probe = cg14_probe,
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 64a89d5747ed..716391f22e75 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -463,7 +463,6 @@ MODULE_DEVICE_TABLE(of, cg3_match);
static struct platform_driver cg3_driver = {
.driver = {
.name = "cg3",
- .owner = THIS_MODULE,
.of_match_table = cg3_match,
},
.probe = cg3_probe,
diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c
index 70781fea092a..bdf901ed5291 100644
--- a/drivers/video/fbdev/cg6.c
+++ b/drivers/video/fbdev/cg6.c
@@ -856,7 +856,6 @@ MODULE_DEVICE_TABLE(of, cg6_match);
static struct platform_driver cg6_driver = {
.driver = {
.name = "cg6",
- .owner = THIS_MODULE,
.of_match_table = cg6_match,
},
.probe = cg6_probe,
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 49a7bb4ef02f..649b32f78c08 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -384,7 +384,6 @@ MODULE_DEVICE_TABLE(of, clps711x_fb_dt_ids);
static struct platform_driver clps711x_fb_driver = {
.driver = {
.name = CLPS711X_FB_NAME,
- .owner = THIS_MODULE,
.of_match_table = clps711x_fb_dt_ids,
},
.probe = clps711x_fb_probe,
diff --git a/drivers/video/fbdev/clps711xfb.c b/drivers/video/fbdev/clps711xfb.c
index f00980607b8f..7693aea8fb23 100644
--- a/drivers/video/fbdev/clps711xfb.c
+++ b/drivers/video/fbdev/clps711xfb.c
@@ -303,7 +303,6 @@ static int clps711x_fb_remove(struct platform_device *pdev)
static struct platform_driver clps711x_fb_driver = {
.driver = {
.name = "video-clps711x",
- .owner = THIS_MODULE,
},
.probe = clps711x_fb_probe,
.remove = clps711x_fb_remove,
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index d5533f4db1cf..07675d6f323e 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -391,7 +391,6 @@ static struct platform_driver cobalt_lcdfb_driver = {
.remove = cobalt_lcdfb_remove,
.driver = {
.name = "cobalt-lcd",
- .owner = THIS_MODULE,
},
};
module_platform_driver(cobalt_lcdfb_driver);
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 10c876c95772..0081725c6b5b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1659,7 +1659,6 @@ static struct platform_driver da8xx_fb_driver = {
.remove = fb_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &fb_pm_ops,
},
};
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 982f6abe6faf..4bfff349b1fb 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -311,7 +311,6 @@ static int efifb_remove(struct platform_device *pdev)
static struct platform_driver efifb_driver = {
.driver = {
.name = "efi-framebuffer",
- .owner = THIS_MODULE,
},
.probe = efifb_probe,
.remove = efifb_remove,
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index 35a0f533f1a2..7ec251cc9c03 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -622,7 +622,6 @@ static struct platform_driver ep93xxfb_driver = {
.remove = ep93xxfb_remove,
.driver = {
.name = "ep93xx-fb",
- .owner = THIS_MODULE,
},
};
module_platform_driver(ep93xxfb_driver);
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
index cee9602f9a7b..b527fe464628 100644
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
@@ -562,7 +562,6 @@ static struct platform_driver exynos_mipi_dsi_driver = {
.remove = exynos_mipi_dsi_remove,
.driver = {
.name = "exynos-mipi-dsim",
- .owner = THIS_MODULE,
.pm = &exynos_mipi_dsi_pm_ops,
},
};
@@ -570,5 +569,5 @@ static struct platform_driver exynos_mipi_dsi_driver = {
module_platform_driver(exynos_mipi_dsi_driver);
MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samusung SoC MIPI-DSI driver");
+MODULE_DESCRIPTION("Samsung SoC MIPI-DSI driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
index 85edabfdef5a..2358a2fbbbcd 100644
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
@@ -876,5 +876,5 @@ int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim,
}
MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samusung SoC MIPI-DSI common driver");
+MODULE_DESCRIPTION("Samsung SoC MIPI-DSI common driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index 4c4ffa61ae26..dda31e0a45af 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -1052,7 +1052,6 @@ MODULE_DEVICE_TABLE(of, ffb_match);
static struct platform_driver ffb_driver = {
.driver = {
.name = "ffb",
- .owner = THIS_MODULE,
.of_match_table = ffb_match,
},
.probe = ffb_probe,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index e8758b9c3bcc..7fa2e6f9e322 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1881,7 +1881,6 @@ MODULE_DEVICE_TABLE(of, fsl_diu_match);
static struct platform_driver fsl_diu_driver = {
.driver = {
.name = "fsl-diu-fb",
- .owner = THIS_MODULE,
.of_match_table = fsl_diu_match,
},
.probe = fsl_diu_probe,
diff --git a/drivers/video/fbdev/grvga.c b/drivers/video/fbdev/grvga.c
index 2db5bb1a33e8..b471f92969b1 100644
--- a/drivers/video/fbdev/grvga.c
+++ b/drivers/video/fbdev/grvga.c
@@ -549,7 +549,6 @@ MODULE_DEVICE_TABLE(of, svgactrl_of_match);
static struct platform_driver grvga_driver = {
.driver = {
.name = "grlib-svgactrl",
- .owner = THIS_MODULE,
.of_match_table = svgactrl_of_match,
},
.probe = grvga_probe,
diff --git a/drivers/video/fbdev/hecubafb.c b/drivers/video/fbdev/hecubafb.c
index f64120ec9192..e4031ef39491 100644
--- a/drivers/video/fbdev/hecubafb.c
+++ b/drivers/video/fbdev/hecubafb.c
@@ -300,7 +300,6 @@ static struct platform_driver hecubafb_driver = {
.probe = hecubafb_probe,
.remove = hecubafb_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "hecubafb",
},
};
diff --git a/drivers/video/fbdev/hitfb.c b/drivers/video/fbdev/hitfb.c
index a648d5186c6e..9d68dc9ee7bf 100644
--- a/drivers/video/fbdev/hitfb.c
+++ b/drivers/video/fbdev/hitfb.c
@@ -464,7 +464,6 @@ static struct platform_driver hitfb_driver = {
.remove = hitfb_remove,
.driver = {
.name = "hitfb",
- .owner = THIS_MODULE,
.pm = &hitfb_dev_pm_ops,
},
};
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index f6e621684953..3b6a3c8c36e2 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1061,7 +1061,6 @@ static struct platform_driver imxfb_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = imxfb_of_dev_id,
- .owner = THIS_MODULE,
.pm = &imxfb_pm_ops,
},
.probe = imxfb_probe,
diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c
index 2c7f7d479fe2..62e59dc90ee6 100644
--- a/drivers/video/fbdev/leo.c
+++ b/drivers/video/fbdev/leo.c
@@ -662,7 +662,6 @@ MODULE_DEVICE_TABLE(of, leo_match);
static struct platform_driver leo_driver = {
.driver = {
.name = "leo",
- .owner = THIS_MODULE,
.of_match_table = leo_match,
},
.probe = leo_probe,
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 0cd4c3318511..9b8bebdf8f86 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -835,7 +835,6 @@ static struct of_device_id of_platform_mb862xx_tbl[] = {
static struct platform_driver of_platform_mb862xxfb_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_platform_mb862xx_tbl,
},
.probe = of_platform_mb862xx_probe,
diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c
index 195cc2db4c2c..ad04a01e2761 100644
--- a/drivers/video/fbdev/metronomefb.c
+++ b/drivers/video/fbdev/metronomefb.c
@@ -766,7 +766,6 @@ static struct platform_driver metronomefb_driver = {
.probe = metronomefb_probe,
.remove = metronomefb_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "metronomefb",
},
};
diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
index b563b920f159..a0f496049db7 100644
--- a/drivers/video/fbdev/mmp/core.c
+++ b/drivers/video/fbdev/mmp/core.c
@@ -223,10 +223,10 @@ struct mmp_path *mmp_register_path(struct mmp_path_info *info)
EXPORT_SYMBOL_GPL(mmp_register_path);
/*
- * mmp_unregister_path - unregister and destory path
- * @p: path to be destoried.
+ * mmp_unregister_path - unregister and destroy path
+ * @p: path to be destroyed.
*
- * this function registers path and destorys it.
+ * this function registers path and destroys it.
*/
void mmp_unregister_path(struct mmp_path *path)
{
diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c
index 910fcc6ecece..92279e02dd94 100644
--- a/drivers/video/fbdev/mmp/fb/mmpfb.c
+++ b/drivers/video/fbdev/mmp/fb/mmpfb.c
@@ -673,7 +673,6 @@ failed_destroy_mutex:
static struct platform_driver mmpfb_driver = {
.driver = {
.name = "mmp-fb",
- .owner = THIS_MODULE,
},
.probe = mmpfb_probe,
};
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 8621a9f2bdcc..de54a4748065 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -441,8 +441,7 @@ static void path_deinit(struct mmphw_path_plat *path_plat)
if (!path_plat)
return;
- if (path_plat->path)
- mmp_unregister_path(path_plat->path);
+ mmp_unregister_path(path_plat->path);
}
static int mmphw_probe(struct platform_device *pdev)
@@ -572,7 +571,6 @@ failed:
static struct platform_driver mmphw_driver = {
.driver = {
.name = "mmp-disp",
- .owner = THIS_MODULE,
},
.probe = mmphw_probe,
};
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 23ec781e9a61..7947634ee6b0 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -334,8 +334,7 @@ static void mx3fb_init_backlight(struct mx3fb_data *fbd)
static void mx3fb_exit_backlight(struct mx3fb_data *fbd)
{
- if (fbd->bl)
- backlight_device_unregister(fbd->bl);
+ backlight_device_unregister(fbd->bl);
}
static void mx3fb_dma_done(void *);
@@ -1646,7 +1645,6 @@ static int mx3fb_remove(struct platform_device *dev)
static struct platform_driver mx3fb_driver = {
.driver = {
.name = MX3FB_NAME,
- .owner = THIS_MODULE,
},
.probe = mx3fb_probe,
.remove = mx3fb_remove,
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index accf48a2cce4..f8ac4a452f26 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -172,6 +172,8 @@ struct mxsfb_info {
struct fb_info fb_info;
struct platform_device *pdev;
struct clk *clk;
+ struct clk *clk_axi;
+ struct clk *clk_disp_axi;
void __iomem *base; /* registers */
unsigned allocated_size;
int enabled;
@@ -331,6 +333,11 @@ static void mxsfb_enable_controller(struct fb_info *fb_info)
}
}
+ if (host->clk_axi)
+ clk_prepare_enable(host->clk_axi);
+
+ if (host->clk_disp_axi)
+ clk_prepare_enable(host->clk_disp_axi);
clk_prepare_enable(host->clk);
clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
@@ -374,6 +381,10 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
clk_disable_unprepare(host->clk);
+ if (host->clk_disp_axi)
+ clk_disable_unprepare(host->clk_disp_axi);
+ if (host->clk_axi)
+ clk_disable_unprepare(host->clk_axi);
host->enabled = 0;
@@ -867,6 +878,14 @@ static int mxsfb_probe(struct platform_device *pdev)
goto fb_release;
}
+ host->clk_axi = devm_clk_get(&host->pdev->dev, "axi");
+ if (IS_ERR(host->clk_axi))
+ host->clk_axi = NULL;
+
+ host->clk_disp_axi = devm_clk_get(&host->pdev->dev, "disp_axi");
+ if (IS_ERR(host->clk_disp_axi))
+ host->clk_disp_axi = NULL;
+
host->reg_lcd = devm_regulator_get(&pdev->dev, "lcd");
if (IS_ERR(host->reg_lcd))
host->reg_lcd = NULL;
diff --git a/drivers/video/fbdev/nuc900fb.c b/drivers/video/fbdev/nuc900fb.c
index 478f9808dee4..389fa2cbb713 100644
--- a/drivers/video/fbdev/nuc900fb.c
+++ b/drivers/video/fbdev/nuc900fb.c
@@ -755,7 +755,6 @@ static struct platform_driver nuc900fb_driver = {
.resume = nuc900fb_resume,
.driver = {
.name = "nuc900-lcd",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c
index 4a5f2cd3d3bf..f912a207b394 100644
--- a/drivers/video/fbdev/omap/lcd_ams_delta.c
+++ b/drivers/video/fbdev/omap/lcd_ams_delta.c
@@ -218,7 +218,6 @@ static struct platform_driver ams_delta_panel_driver = {
.resume = ams_delta_panel_resume,
.driver = {
.name = "lcd_ams_delta",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_h3.c b/drivers/video/fbdev/omap/lcd_h3.c
index 49bdeca81e50..a0729d0200d0 100644
--- a/drivers/video/fbdev/omap/lcd_h3.c
+++ b/drivers/video/fbdev/omap/lcd_h3.c
@@ -120,7 +120,6 @@ static struct platform_driver h3_panel_driver = {
.resume = h3_panel_resume,
.driver = {
.name = "lcd_h3",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_htcherald.c b/drivers/video/fbdev/omap/lcd_htcherald.c
index 20f477851d54..8b4dfa058258 100644
--- a/drivers/video/fbdev/omap/lcd_htcherald.c
+++ b/drivers/video/fbdev/omap/lcd_htcherald.c
@@ -111,7 +111,6 @@ static struct platform_driver htcherald_panel_driver = {
.resume = htcherald_panel_resume,
.driver = {
.name = "lcd_htcherald",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_inn1510.c b/drivers/video/fbdev/omap/lcd_inn1510.c
index 2ee423279e35..49907fab36ac 100644
--- a/drivers/video/fbdev/omap/lcd_inn1510.c
+++ b/drivers/video/fbdev/omap/lcd_inn1510.c
@@ -106,7 +106,6 @@ static struct platform_driver innovator1510_panel_driver = {
.resume = innovator1510_panel_resume,
.driver = {
.name = "lcd_inn1510",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_inn1610.c b/drivers/video/fbdev/omap/lcd_inn1610.c
index e3d3d135aa48..8b42894eeb77 100644
--- a/drivers/video/fbdev/omap/lcd_inn1610.c
+++ b/drivers/video/fbdev/omap/lcd_inn1610.c
@@ -127,7 +127,6 @@ static struct platform_driver innovator1610_panel_driver = {
.resume = innovator1610_panel_resume,
.driver = {
.name = "lcd_inn1610",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_osk.c b/drivers/video/fbdev/omap/lcd_osk.c
index 7fbe04bce0ed..c3ddebf934b2 100644
--- a/drivers/video/fbdev/omap/lcd_osk.c
+++ b/drivers/video/fbdev/omap/lcd_osk.c
@@ -126,7 +126,6 @@ static struct platform_driver osk_panel_driver = {
.resume = osk_panel_resume,
.driver = {
.name = "lcd_osk",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_palmte.c b/drivers/video/fbdev/omap/lcd_palmte.c
index ff4fb624b904..2713fed286f7 100644
--- a/drivers/video/fbdev/omap/lcd_palmte.c
+++ b/drivers/video/fbdev/omap/lcd_palmte.c
@@ -103,7 +103,6 @@ static struct platform_driver palmte_panel_driver = {
.resume = palmte_panel_resume,
.driver = {
.name = "lcd_palmte",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_palmtt.c b/drivers/video/fbdev/omap/lcd_palmtt.c
index aaf3c8ba1243..3d0ea04ec248 100644
--- a/drivers/video/fbdev/omap/lcd_palmtt.c
+++ b/drivers/video/fbdev/omap/lcd_palmtt.c
@@ -109,7 +109,6 @@ static struct platform_driver palmtt_panel_driver = {
.resume = palmtt_panel_resume,
.driver = {
.name = "lcd_palmtt",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/lcd_palmz71.c b/drivers/video/fbdev/omap/lcd_palmz71.c
index 3b7d8aa1cf34..a20db4f7ea99 100644
--- a/drivers/video/fbdev/omap/lcd_palmz71.c
+++ b/drivers/video/fbdev/omap/lcd_palmz71.c
@@ -105,7 +105,6 @@ static struct platform_driver palmz71_panel_driver = {
.resume = palmz71_panel_resume,
.driver = {
.name = "lcd_palmz71",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index d8d028d98711..1fb3ea3c98a1 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1869,7 +1869,6 @@ static struct platform_driver omapfb_driver = {
.resume = omapfb_resume,
.driver = {
.name = MODULE_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
index 91921665b98b..9a2b5ce58545 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
@@ -308,7 +308,6 @@ static struct platform_driver tvc_connector_driver = {
.remove = __exit_p(tvc_remove),
.driver = {
.name = "connector-analog-tv",
- .owner = THIS_MODULE,
.of_match_table = tvc_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index 2dfb6e5ff0cc..0cdc97413020 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
@@ -262,8 +262,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
in = omap_dss_find_output(pdata->source);
if (in == NULL) {
- if (ddata->i2c_adapter)
- i2c_put_adapter(ddata->i2c_adapter);
+ i2c_put_adapter(ddata->i2c_adapter);
dev_err(&pdev->dev, "Failed to find video source\n");
return -EPROBE_DEFER;
@@ -352,8 +351,7 @@ static int dvic_probe(struct platform_device *pdev)
err_reg:
omap_dss_put_device(ddata->in);
- if (ddata->i2c_adapter)
- i2c_put_adapter(ddata->i2c_adapter);
+ i2c_put_adapter(ddata->i2c_adapter);
return r;
}
@@ -371,8 +369,7 @@ static int __exit dvic_remove(struct platform_device *pdev)
omap_dss_put_device(in);
- if (ddata->i2c_adapter)
- i2c_put_adapter(ddata->i2c_adapter);
+ i2c_put_adapter(ddata->i2c_adapter);
return 0;
}
@@ -389,7 +386,6 @@ static struct platform_driver dvi_connector_driver = {
.remove = __exit_p(dvic_remove),
.driver = {
.name = "connector-dvi",
- .owner = THIS_MODULE,
.of_match_table = dvic_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
index 7b25967a91eb..6ee4129bc0c0 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
@@ -170,98 +170,6 @@ static bool hdmic_detect(struct omap_dss_device *dssdev)
return in->ops.hdmi->detect(in);
}
-static int hdmic_audio_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- int r;
-
- /* enable audio only if the display is active */
- if (!omapdss_device_is_enabled(dssdev))
- return -EPERM;
-
- r = in->ops.hdmi->audio_enable(in);
- if (r)
- return r;
-
- dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
-
- return 0;
-}
-
-static void hdmic_audio_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- in->ops.hdmi->audio_disable(in);
-
- dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
-}
-
-static int hdmic_audio_start(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- int r;
-
- /*
- * No need to check the panel state. It was checked when trasitioning
- * to AUDIO_ENABLED.
- */
- if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED)
- return -EPERM;
-
- r = in->ops.hdmi->audio_start(in);
- if (r)
- return r;
-
- dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
-
- return 0;
-}
-
-static void hdmic_audio_stop(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- in->ops.hdmi->audio_stop(in);
-
- dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
-}
-
-static bool hdmic_audio_supported(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_enabled(dssdev))
- return false;
-
- return in->ops.hdmi->audio_supported(in);
-}
-
-static int hdmic_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- int r;
-
- /* config audio only if the display is active */
- if (!omapdss_device_is_enabled(dssdev))
- return -EPERM;
-
- r = in->ops.hdmi->audio_config(in, audio);
- if (r)
- return r;
-
- dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
-
- return 0;
-}
-
static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -296,13 +204,6 @@ static struct omap_dss_driver hdmic_driver = {
.detect = hdmic_detect,
.set_hdmi_mode = hdmic_set_hdmi_mode,
.set_hdmi_infoframe = hdmic_set_infoframe,
-
- .audio_enable = hdmic_audio_enable,
- .audio_disable = hdmic_audio_disable,
- .audio_start = hdmic_audio_start,
- .audio_stop = hdmic_audio_stop,
- .audio_supported = hdmic_audio_supported,
- .audio_config = hdmic_audio_config,
};
static int hdmic_probe_pdata(struct platform_device *pdev)
@@ -435,7 +336,6 @@ static struct platform_driver hdmi_connector_driver = {
.remove = __exit_p(hdmic_remove),
.driver = {
.name = "connector-hdmi",
- .owner = THIS_MODULE,
.of_match_table = hdmic_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
index 47ee7cdee1c5..92919a74e715 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
@@ -249,6 +249,7 @@ static int tfp410_probe(struct platform_device *pdev)
dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
dssdev->phy.dpi.data_lines = ddata->data_lines;
+ dssdev->port_num = 1;
r = omapdss_register_output(dssdev);
if (r) {
@@ -296,7 +297,6 @@ static struct platform_driver tfp410_driver = {
.remove = __exit_p(tfp410_remove),
.driver = {
.name = "tfp410",
- .owner = THIS_MODULE,
.of_match_table = tfp410_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
index c4abd56dd846..7f3e11b16c86 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
@@ -193,55 +193,6 @@ static bool tpd_detect(struct omap_dss_device *dssdev)
return gpio_get_value_cansleep(ddata->hpd_gpio);
}
-static int tpd_audio_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->audio_enable(in);
-}
-
-static void tpd_audio_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- in->ops.hdmi->audio_disable(in);
-}
-
-static int tpd_audio_start(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->audio_start(in);
-}
-
-static void tpd_audio_stop(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- in->ops.hdmi->audio_stop(in);
-}
-
-static bool tpd_audio_supported(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->audio_supported(in);
-}
-
-static int tpd_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->audio_config(in, audio);
-}
-
static int tpd_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
@@ -275,13 +226,6 @@ static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
.detect = tpd_detect,
.set_infoframe = tpd_set_infoframe,
.set_hdmi_mode = tpd_set_hdmi_mode,
-
- .audio_enable = tpd_audio_enable,
- .audio_disable = tpd_audio_disable,
- .audio_start = tpd_audio_start,
- .audio_stop = tpd_audio_stop,
- .audio_supported = tpd_audio_supported,
- .audio_config = tpd_audio_config,
};
static int tpd_probe_pdata(struct platform_device *pdev)
@@ -409,6 +353,7 @@ static int tpd_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
+ dssdev->port_num = 1;
in = ddata->in;
@@ -459,7 +404,6 @@ static struct platform_driver tpd_driver = {
.remove = __exit_p(tpd_remove),
.driver = {
.name = "tpd12s015",
- .owner = THIS_MODULE,
.of_match_table = tpd_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-dpi.c b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
index a9c3dcf0f6b5..eb8fd8140ad0 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
@@ -325,7 +325,6 @@ static struct platform_driver panel_dpi_driver = {
.remove = __exit_p(panel_dpi_remove),
.driver = {
.name = "panel-dpi",
- .owner = THIS_MODULE,
.of_match_table = panel_dpi_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
index 899cb1ab523d..3414c2609320 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
@@ -1376,7 +1376,6 @@ static struct platform_driver dsicm_driver = {
.remove = __exit_p(dsicm_remove),
.driver = {
.name = "panel-dsi-cm",
- .owner = THIS_MODULE,
.of_match_table = dsicm_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
index 234142cc3764..18b19b6e1ac2 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
@@ -408,7 +408,6 @@ static struct platform_driver sharp_ls_driver = {
.remove = __exit_p(sharp_ls_remove),
.driver = {
.name = "panel-sharp-ls037v7dw01",
- .owner = THIS_MODULE,
.of_match_table = sharp_ls_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/dss/Kconfig b/drivers/video/fbdev/omap2/dss/Kconfig
index 3d5eb6c36c22..d1fa730c7d54 100644
--- a/drivers/video/fbdev/omap2/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/dss/Kconfig
@@ -74,9 +74,6 @@ config OMAP4_DSS_HDMI
help
HDMI support for OMAP4 based SoCs.
-config OMAP4_DSS_HDMI_AUDIO
- bool
-
config OMAP5_DSS_HDMI
bool "HDMI support for OMAP5"
default n
@@ -86,10 +83,6 @@ config OMAP5_DSS_HDMI
Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI
specification.
-config OMAP5_DSS_HDMI_AUDIO
- depends on OMAP5_DSS_HDMI
- bool
-
config OMAP2_DSS_SDI
bool "SDI support"
default n
diff --git a/drivers/video/fbdev/omap2/dss/Makefile b/drivers/video/fbdev/omap2/dss/Makefile
index 245f933060ee..2ea9d382354c 100644
--- a/drivers/video/fbdev/omap2/dss/Makefile
+++ b/drivers/video/fbdev/omap2/dss/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
- output.o dss-of.o
+ output.o dss-of.o pll.o
# DSS compat layer files
omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
dispc-compat.o display-sysfs.o
diff --git a/drivers/video/fbdev/omap2/dss/core.c b/drivers/video/fbdev/omap2/dss/core.c
index 6b74f73fb524..d5d92124e019 100644
--- a/drivers/video/fbdev/omap2/dss/core.c
+++ b/drivers/video/fbdev/omap2/dss/core.c
@@ -244,7 +244,6 @@ static struct platform_driver omap_dss_driver = {
.shutdown = omap_dss_shutdown,
.driver = {
.name = "omapdss",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c
index 0e9a74bb9fc2..9850d9ef9a9d 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/dss/dispc.c
@@ -3028,7 +3028,7 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
unsigned long dispc_fclk_rate(void)
{
- struct platform_device *dsidev;
+ struct dss_pll *pll;
unsigned long r = 0;
switch (dss_get_dispc_clk_source()) {
@@ -3036,12 +3036,12 @@ unsigned long dispc_fclk_rate(void)
r = dss_get_dispc_clk_rate();
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- dsidev = dsi_get_dsidev_from_id(0);
- r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ pll = dss_pll_find("dsi0");
+ r = pll->cinfo.clkout[0];
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- dsidev = dsi_get_dsidev_from_id(1);
- r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ pll = dss_pll_find("dsi1");
+ r = pll->cinfo.clkout[0];
break;
default:
BUG();
@@ -3053,7 +3053,7 @@ unsigned long dispc_fclk_rate(void)
unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
{
- struct platform_device *dsidev;
+ struct dss_pll *pll;
int lcd;
unsigned long r;
u32 l;
@@ -3068,12 +3068,12 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
r = dss_get_dispc_clk_rate();
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- dsidev = dsi_get_dsidev_from_id(0);
- r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ pll = dss_pll_find("dsi0");
+ r = pll->cinfo.clkout[0];
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- dsidev = dsi_get_dsidev_from_id(1);
- r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ pll = dss_pll_find("dsi1");
+ r = pll->cinfo.clkout[0];
break;
default:
BUG();
@@ -3839,7 +3839,6 @@ static struct platform_driver omap_dispchw_driver = {
.remove = __exit_p(omap_dispchw_remove),
.driver = {
.name = "omapdss_dispc",
- .owner = THIS_MODULE,
.pm = &dispc_pm_ops,
.of_match_table = dispc_of_match,
.suppress_bind_attrs = true,
diff --git a/drivers/video/fbdev/omap2/dss/dpi.c b/drivers/video/fbdev/omap2/dss/dpi.c
index 4a3363dae74a..9a2f8c3b102d 100644
--- a/drivers/video/fbdev/omap2/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/dss/dpi.c
@@ -31,17 +31,20 @@
#include <linux/regulator/consumer.h>
#include <linux/string.h>
#include <linux/of.h>
+#include <linux/clk.h>
#include <video/omapdss.h>
#include "dss.h"
#include "dss_features.h"
-static struct {
+#define HSDIV_DISPC 0
+
+struct dpi_data {
struct platform_device *pdev;
struct regulator *vdds_dsi_reg;
- struct platform_device *dsidev;
+ struct dss_pll *pll;
struct mutex lock;
@@ -52,9 +55,20 @@ static struct {
struct omap_dss_device output;
bool port_initialized;
-} dpi;
+};
+
+static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
+{
+ return container_of(dssdev, struct dpi_data, output);
+}
+
+/* only used in non-DT mode */
+static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
+{
+ return dev_get_drvdata(&pdev->dev);
+}
-static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
+static struct dss_pll *dpi_get_pll(enum omap_channel channel)
{
/*
* XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
@@ -75,9 +89,9 @@ static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
case OMAPDSS_VER_OMAP4:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
- return dsi_get_dsidev_from_id(0);
+ return dss_pll_find("dsi0");
case OMAP_DSS_CHANNEL_LCD2:
- return dsi_get_dsidev_from_id(1);
+ return dss_pll_find("dsi1");
default:
return NULL;
}
@@ -85,9 +99,9 @@ static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
case OMAPDSS_VER_OMAP5:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
- return dsi_get_dsidev_from_id(0);
+ return dss_pll_find("dsi0");
case OMAP_DSS_CHANNEL_LCD3:
- return dsi_get_dsidev_from_id(1);
+ return dss_pll_find("dsi1");
default:
return NULL;
}
@@ -114,7 +128,7 @@ static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
}
struct dpi_clk_calc_ctx {
- struct platform_device *dsidev;
+ struct dss_pll *pll;
/* inputs */
@@ -122,7 +136,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
- struct dsi_clock_info dsi_cinfo;
+ struct dss_pll_clock_info dsi_cinfo;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
@@ -154,7 +168,7 @@ static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
}
-static bool dpi_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
@@ -164,30 +178,31 @@ static bool dpi_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
* shifted. So skip all odd dividers when the pixel clock is on the
* higher side.
*/
- if (regm_dispc > 1 && regm_dispc % 2 != 0 && ctx->pck_min >= 100000000)
+ if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000)
return false;
- ctx->dsi_cinfo.regm_dispc = regm_dispc;
- ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+ ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
+ ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
-static bool dpi_calc_pll_cb(int regn, int regm, unsigned long fint,
- unsigned long pll,
+static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
+ unsigned long clkdco,
void *data)
{
struct dpi_clk_calc_ctx *ctx = data;
- ctx->dsi_cinfo.regn = regn;
- ctx->dsi_cinfo.regm = regm;
+ ctx->dsi_cinfo.n = n;
+ ctx->dsi_cinfo.m = m;
ctx->dsi_cinfo.fint = fint;
- ctx->dsi_cinfo.clkin4ddr = pll;
+ ctx->dsi_cinfo.clkdco = clkdco;
- return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->pck_min,
- dpi_calc_hsdiv_cb, ctx);
+ return dss_pll_hsdiv_calc(ctx->pll, clkdco,
+ ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
+ dpi_calc_hsdiv_cb, ctx);
}
static bool dpi_calc_dss_cb(unsigned long fck, void *data)
@@ -200,23 +215,23 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
dpi_calc_dispc_cb, ctx);
}
-static bool dpi_dsi_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck,
+ struct dpi_clk_calc_ctx *ctx)
{
unsigned long clkin;
unsigned long pll_min, pll_max;
- clkin = dsi_get_pll_clkin(dpi.dsidev);
-
memset(ctx, 0, sizeof(*ctx));
- ctx->dsidev = dpi.dsidev;
+ ctx->pll = dpi->pll;
ctx->pck_min = pck - 1000;
ctx->pck_max = pck + 1000;
- ctx->dsi_cinfo.clkin = clkin;
pll_min = 0;
pll_max = 0;
- return dsi_pll_calc(dpi.dsidev, clkin,
+ clkin = clk_get_rate(ctx->pll->clkin);
+
+ return dss_pll_calc(ctx->pll, clkin,
pll_min, pll_max,
dpi_calc_pll_cb, ctx);
}
@@ -252,7 +267,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
-static int dpi_set_dsi_clk(enum omap_channel channel,
+static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
@@ -260,28 +275,28 @@ static int dpi_set_dsi_clk(enum omap_channel channel,
int r;
bool ok;
- ok = dpi_dsi_clk_calc(pck_req, &ctx);
+ ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx);
if (!ok)
return -EINVAL;
- r = dsi_pll_set_clock_div(dpi.dsidev, &ctx.dsi_cinfo);
+ r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo);
if (r)
return r;
dss_select_lcd_clk_source(channel,
dpi_get_alt_clk_src(channel));
- dpi.mgr_config.clock_info = ctx.dispc_cinfo;
+ dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
+ *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
*lck_div = ctx.dispc_cinfo.lck_div;
*pck_div = ctx.dispc_cinfo.pck_div;
return 0;
}
-static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
- int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
+ unsigned long *fck, int *lck_div, int *pck_div)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -295,7 +310,7 @@ static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
if (r)
return r;
- dpi.mgr_config.clock_info = ctx.dispc_cinfo;
+ dpi->mgr_config.clock_info = ctx.dispc_cinfo;
*fck = ctx.fck;
*lck_div = ctx.dispc_cinfo.lck_div;
@@ -304,19 +319,21 @@ static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
return 0;
}
-static int dpi_set_mode(struct omap_overlay_manager *mgr)
+static int dpi_set_mode(struct dpi_data *dpi)
{
- struct omap_video_timings *t = &dpi.timings;
+ struct omap_dss_device *out = &dpi->output;
+ struct omap_overlay_manager *mgr = out->manager;
+ struct omap_video_timings *t = &dpi->timings;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
int r = 0;
- if (dpi.dsidev)
- r = dpi_set_dsi_clk(mgr->id, t->pixelclock, &fck,
+ if (dpi->pll)
+ r = dpi_set_dsi_clk(dpi, mgr->id, t->pixelclock, &fck,
&lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(t->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
@@ -335,28 +352,32 @@ static int dpi_set_mode(struct omap_overlay_manager *mgr)
return 0;
}
-static void dpi_config_lcd_manager(struct omap_overlay_manager *mgr)
+static void dpi_config_lcd_manager(struct dpi_data *dpi)
{
- dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
+ struct omap_dss_device *out = &dpi->output;
+ struct omap_overlay_manager *mgr = out->manager;
+
+ dpi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
- dpi.mgr_config.stallmode = false;
- dpi.mgr_config.fifohandcheck = false;
+ dpi->mgr_config.stallmode = false;
+ dpi->mgr_config.fifohandcheck = false;
- dpi.mgr_config.video_port_width = dpi.data_lines;
+ dpi->mgr_config.video_port_width = dpi->data_lines;
- dpi.mgr_config.lcden_sig_polarity = 0;
+ dpi->mgr_config.lcden_sig_polarity = 0;
- dss_mgr_set_lcd_config(mgr, &dpi.mgr_config);
+ dss_mgr_set_lcd_config(mgr, &dpi->mgr_config);
}
static int dpi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &dpi.output;
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+ struct omap_dss_device *out = &dpi->output;
int r;
- mutex_lock(&dpi.lock);
+ mutex_lock(&dpi->lock);
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi.vdds_dsi_reg) {
+ if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi->vdds_dsi_reg) {
DSSERR("no VDSS_DSI regulator\n");
r = -ENODEV;
goto err_no_reg;
@@ -369,7 +390,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
}
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) {
- r = regulator_enable(dpi.vdds_dsi_reg);
+ r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
goto err_reg_enable;
}
@@ -378,25 +399,21 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(out->manager->id);
+ r = dss_dpi_select_source(out->port_num, out->manager->id);
if (r)
goto err_src_sel;
- if (dpi.dsidev) {
- r = dsi_runtime_get(dpi.dsidev);
- if (r)
- goto err_get_dsi;
-
- r = dsi_pll_init(dpi.dsidev, 0, 1);
+ if (dpi->pll) {
+ r = dss_pll_enable(dpi->pll);
if (r)
goto err_dsi_pll_init;
}
- r = dpi_set_mode(out->manager);
+ r = dpi_set_mode(dpi);
if (r)
goto err_set_mode;
- dpi_config_lcd_manager(out->manager);
+ dpi_config_lcd_manager(dpi);
mdelay(2);
@@ -404,78 +421,80 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_mgr_enable;
- mutex_unlock(&dpi.lock);
+ mutex_unlock(&dpi->lock);
return 0;
err_mgr_enable:
err_set_mode:
- if (dpi.dsidev)
- dsi_pll_uninit(dpi.dsidev, true);
+ if (dpi->pll)
+ dss_pll_disable(dpi->pll);
err_dsi_pll_init:
- if (dpi.dsidev)
- dsi_runtime_put(dpi.dsidev);
-err_get_dsi:
err_src_sel:
dispc_runtime_put();
err_get_dispc:
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
- regulator_disable(dpi.vdds_dsi_reg);
+ regulator_disable(dpi->vdds_dsi_reg);
err_reg_enable:
err_no_out_mgr:
err_no_reg:
- mutex_unlock(&dpi.lock);
+ mutex_unlock(&dpi->lock);
return r;
}
static void dpi_display_disable(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = dpi.output.manager;
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+ struct omap_overlay_manager *mgr = dpi->output.manager;
- mutex_lock(&dpi.lock);
+ mutex_lock(&dpi->lock);
dss_mgr_disable(mgr);
- if (dpi.dsidev) {
+ if (dpi->pll) {
dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
- dsi_pll_uninit(dpi.dsidev, true);
- dsi_runtime_put(dpi.dsidev);
+ dss_pll_disable(dpi->pll);
}
dispc_runtime_put();
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
- regulator_disable(dpi.vdds_dsi_reg);
+ regulator_disable(dpi->vdds_dsi_reg);
- mutex_unlock(&dpi.lock);
+ mutex_unlock(&dpi->lock);
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+
DSSDBG("dpi_set_timings\n");
- mutex_lock(&dpi.lock);
+ mutex_lock(&dpi->lock);
- dpi.timings = *timings;
+ dpi->timings = *timings;
- mutex_unlock(&dpi.lock);
+ mutex_unlock(&dpi->lock);
}
static void dpi_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- mutex_lock(&dpi.lock);
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+
+ mutex_lock(&dpi->lock);
- *timings = dpi.timings;
+ *timings = dpi->timings;
- mutex_unlock(&dpi.lock);
+ mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct omap_overlay_manager *mgr = dpi.output.manager;
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+ struct omap_overlay_manager *mgr = dpi->output.manager;
int lck_div, pck_div;
unsigned long fck;
unsigned long pck;
@@ -488,12 +507,12 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
if (timings->pixelclock == 0)
return -EINVAL;
- if (dpi.dsidev) {
- ok = dpi_dsi_clk_calc(timings->pixelclock, &ctx);
+ if (dpi->pll) {
+ ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx);
if (!ok)
return -EINVAL;
- fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
+ fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
} else {
ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
if (!ok)
@@ -514,74 +533,69 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
{
- mutex_lock(&dpi.lock);
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- dpi.data_lines = data_lines;
+ mutex_lock(&dpi->lock);
- mutex_unlock(&dpi.lock);
+ dpi->data_lines = data_lines;
+
+ mutex_unlock(&dpi->lock);
}
-static int dpi_verify_dsi_pll(struct platform_device *dsidev)
+static int dpi_verify_dsi_pll(struct dss_pll *pll)
{
int r;
/* do initial setup with the PLL to see if it is operational */
- r = dsi_runtime_get(dsidev);
+ r = dss_pll_enable(pll);
if (r)
return r;
- r = dsi_pll_init(dsidev, 0, 1);
- if (r) {
- dsi_runtime_put(dsidev);
- return r;
- }
-
- dsi_pll_uninit(dsidev, true);
- dsi_runtime_put(dsidev);
+ dss_pll_disable(pll);
return 0;
}
-static int dpi_init_regulator(void)
+static int dpi_init_regulator(struct dpi_data *dpi)
{
struct regulator *vdds_dsi;
if (!dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
return 0;
- if (dpi.vdds_dsi_reg)
+ if (dpi->vdds_dsi_reg)
return 0;
- vdds_dsi = devm_regulator_get(&dpi.pdev->dev, "vdds_dsi");
+ vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi");
if (IS_ERR(vdds_dsi)) {
if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
}
- dpi.vdds_dsi_reg = vdds_dsi;
+ dpi->vdds_dsi_reg = vdds_dsi;
return 0;
}
-static void dpi_init_pll(void)
+static void dpi_init_pll(struct dpi_data *dpi)
{
- struct platform_device *dsidev;
+ struct dss_pll *pll;
- if (dpi.dsidev)
+ if (dpi->pll)
return;
- dsidev = dpi_get_dsidev(dpi.output.dispc_channel);
- if (!dsidev)
+ pll = dpi_get_pll(dpi->output.dispc_channel);
+ if (!pll)
return;
- if (dpi_verify_dsi_pll(dsidev)) {
+ if (dpi_verify_dsi_pll(pll)) {
DSSWARN("DSI PLL not operational\n");
return;
}
- dpi.dsidev = dsidev;
+ dpi->pll = pll;
}
/*
@@ -590,7 +604,7 @@ static void dpi_init_pll(void)
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dpi_get_channel(void)
+static enum omap_channel dpi_get_channel(int port_num)
{
switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP24xx:
@@ -618,14 +632,15 @@ static enum omap_channel dpi_get_channel(void)
static int dpi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
struct omap_overlay_manager *mgr;
int r;
- r = dpi_init_regulator();
+ r = dpi_init_regulator(dpi);
if (r)
return r;
- dpi_init_pll();
+ dpi_init_pll(dpi);
mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
if (!mgr)
@@ -676,13 +691,14 @@ static const struct omapdss_dpi_ops dpi_ops = {
static void dpi_init_output(struct platform_device *pdev)
{
- struct omap_dss_device *out = &dpi.output;
+ struct dpi_data *dpi = dpi_get_data_from_pdev(pdev);
+ struct omap_dss_device *out = &dpi->output;
out->dev = &pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
out->output_type = OMAP_DISPLAY_TYPE_DPI;
out->name = "dpi.0";
- out->dispc_channel = dpi_get_channel();
+ out->dispc_channel = dpi_get_channel(0);
out->ops.dpi = &dpi_ops;
out->owner = THIS_MODULE;
@@ -691,16 +707,69 @@ static void dpi_init_output(struct platform_device *pdev)
static void __exit dpi_uninit_output(struct platform_device *pdev)
{
- struct omap_dss_device *out = &dpi.output;
+ struct dpi_data *dpi = dpi_get_data_from_pdev(pdev);
+ struct omap_dss_device *out = &dpi->output;
+
+ omapdss_unregister_output(out);
+}
+
+static void dpi_init_output_port(struct platform_device *pdev,
+ struct device_node *port)
+{
+ struct dpi_data *dpi = port->data;
+ struct omap_dss_device *out = &dpi->output;
+ int r;
+ u32 port_num;
+
+ r = of_property_read_u32(port, "reg", &port_num);
+ if (r)
+ port_num = 0;
+
+ switch (port_num) {
+ case 2:
+ out->name = "dpi.2";
+ break;
+ case 1:
+ out->name = "dpi.1";
+ break;
+ case 0:
+ default:
+ out->name = "dpi.0";
+ break;
+ }
+
+ out->dev = &pdev->dev;
+ out->id = OMAP_DSS_OUTPUT_DPI;
+ out->output_type = OMAP_DISPLAY_TYPE_DPI;
+ out->dispc_channel = dpi_get_channel(port_num);
+ out->port_num = port_num;
+ out->ops.dpi = &dpi_ops;
+ out->owner = THIS_MODULE;
+
+ omapdss_register_output(out);
+}
+
+static void __exit dpi_uninit_output_port(struct device_node *port)
+{
+ struct dpi_data *dpi = port->data;
+ struct omap_dss_device *out = &dpi->output;
omapdss_unregister_output(out);
}
static int omap_dpi_probe(struct platform_device *pdev)
{
- dpi.pdev = pdev;
+ struct dpi_data *dpi;
+
+ dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
+ if (!dpi)
+ return -ENOMEM;
- mutex_init(&dpi.lock);
+ dpi->pdev = pdev;
+
+ dev_set_drvdata(&pdev->dev, dpi);
+
+ mutex_init(&dpi->lock);
dpi_init_output(pdev);
@@ -719,7 +788,6 @@ static struct platform_driver omap_dpi_driver = {
.remove = __exit_p(omap_dpi_remove),
.driver = {
.name = "omapdss_dpi",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
};
@@ -736,10 +804,15 @@ void __exit dpi_uninit_platform_driver(void)
int __init dpi_init_port(struct platform_device *pdev, struct device_node *port)
{
+ struct dpi_data *dpi;
struct device_node *ep;
u32 datalines;
int r;
+ dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
+ if (!dpi)
+ return -ENOMEM;
+
ep = omapdss_of_get_next_endpoint(port, NULL);
if (!ep)
return 0;
@@ -750,17 +823,18 @@ int __init dpi_init_port(struct platform_device *pdev, struct device_node *port)
goto err_datalines;
}
- dpi.data_lines = datalines;
+ dpi->data_lines = datalines;
of_node_put(ep);
- dpi.pdev = pdev;
+ dpi->pdev = pdev;
+ port->data = dpi;
- mutex_init(&dpi.lock);
+ mutex_init(&dpi->lock);
- dpi_init_output(pdev);
+ dpi_init_output_port(pdev, port);
- dpi.port_initialized = true;
+ dpi->port_initialized = true;
return 0;
@@ -770,10 +844,12 @@ err_datalines:
return r;
}
-void __exit dpi_uninit_port(void)
+void __exit dpi_uninit_port(struct device_node *port)
{
- if (!dpi.port_initialized)
+ struct dpi_data *dpi = port->data;
+
+ if (!dpi->port_initialized)
return;
- dpi_uninit_output(dpi.pdev);
+ dpi_uninit_output_port(port);
}
diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
index 0793bc67a275..3e44c580b1f8 100644
--- a/drivers/video/fbdev/omap2/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -219,6 +219,10 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
+/* DSI PLL HSDIV indices */
+#define HSDIV_DISPC 0
+#define HSDIV_DSI 1
+
#define DSI_MAX_NR_ISRS 2
#define DSI_MAX_NR_LANES 5
@@ -271,6 +275,7 @@ struct dsi_isr_tables {
struct dsi_clk_calc_ctx {
struct platform_device *dsidev;
+ struct dss_pll *pll;
/* inputs */
@@ -280,13 +285,18 @@ struct dsi_clk_calc_ctx {
/* outputs */
- struct dsi_clock_info dsi_cinfo;
+ struct dss_pll_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
struct omap_video_timings dispc_vm;
struct omap_dss_dsi_videomode_timings dsi_vm;
};
+struct dsi_lp_clock_info {
+ unsigned long lp_clk;
+ u16 lp_clk_div;
+};
+
struct dsi_data {
struct platform_device *pdev;
void __iomem *proto_base;
@@ -300,12 +310,14 @@ struct dsi_data {
bool is_enabled;
struct clk *dss_clk;
- struct clk *sys_clk;
struct dispc_clock_info user_dispc_cinfo;
- struct dsi_clock_info user_dsi_cinfo;
+ struct dss_pll_clock_info user_dsi_cinfo;
+
+ struct dsi_lp_clock_info user_lp_cinfo;
+ struct dsi_lp_clock_info current_lp_cinfo;
- struct dsi_clock_info current_cinfo;
+ struct dss_pll pll;
bool vdds_dsi_enabled;
struct regulator *vdds_dsi_reg;
@@ -321,8 +333,6 @@ struct dsi_data {
struct mutex lock;
struct semaphore bus_lock;
- unsigned pll_locked;
-
spinlock_t irq_lock;
struct dsi_isr_tables isr_tables;
/* space for a copy used by the interrupt handler */
@@ -347,7 +357,7 @@ struct dsi_data {
unsigned long cache_req_pck;
unsigned long cache_clk_freq;
- struct dsi_clock_info cache_cinfo;
+ struct dss_pll_clock_info cache_cinfo;
u32 errors;
spinlock_t errors_lock;
@@ -362,11 +372,6 @@ struct dsi_data {
spinlock_t irq_stats_lock;
struct dsi_irq_stats irq_stats;
#endif
- /* DSI PLL Parameter Ranges */
- unsigned long regm_max, regn_max;
- unsigned long regm_dispc_max, regm_dsi_max;
- unsigned long fint_min, fint_max;
- unsigned long lpdiv_max;
unsigned num_lanes_supported;
unsigned line_buffer_size;
@@ -412,7 +417,7 @@ static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss
return to_platform_device(dssdev->dev);
}
-struct platform_device *dsi_get_dsidev_from_id(int module)
+static struct platform_device *dsi_get_dsidev_from_id(int module)
{
struct omap_dss_device *out;
enum omap_dss_output_id id;
@@ -1134,7 +1139,7 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
return e;
}
-int dsi_runtime_get(struct platform_device *dsidev)
+static int dsi_runtime_get(struct platform_device *dsidev)
{
int r;
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -1146,7 +1151,7 @@ int dsi_runtime_get(struct platform_device *dsidev)
return r < 0 ? r : 0;
}
-void dsi_runtime_put(struct platform_device *dsidev)
+static void dsi_runtime_put(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
@@ -1188,23 +1193,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
return 0;
}
-/* source clock for DSI PLL. this could also be PCLKFREE */
-static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
- bool enable)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- if (enable)
- clk_prepare_enable(dsi->sys_clk);
- else
- clk_disable_unprepare(dsi->sys_clk);
-
- if (enable && dsi->pll_locked) {
- if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
- DSSERR("cannot lock PLL when enabling clocks\n");
- }
-}
-
static void _dsi_print_reset_status(struct platform_device *dsidev)
{
u32 l;
@@ -1256,25 +1244,25 @@ static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
return 0;
}
-unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
+static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
+ return dsi->pll.cinfo.clkout[HSDIV_DISPC];
}
static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
+ return dsi->pll.cinfo.clkout[HSDIV_DSI];
}
static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- return dsi->current_cinfo.clkin4ddr / 16;
+ return dsi->pll.cinfo.clkdco / 16;
}
static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
@@ -1293,10 +1281,10 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
return r;
}
-static int dsi_lp_clock_calc(struct dsi_clock_info *cinfo,
- unsigned long lp_clk_min, unsigned long lp_clk_max)
+static int dsi_lp_clock_calc(unsigned long dsi_fclk,
+ unsigned long lp_clk_min, unsigned long lp_clk_max,
+ struct dsi_lp_clock_info *lp_cinfo)
{
- unsigned long dsi_fclk = cinfo->dsi_pll_hsdiv_dsi_clk;
unsigned lp_clk_div;
unsigned long lp_clk;
@@ -1306,8 +1294,8 @@ static int dsi_lp_clock_calc(struct dsi_clock_info *cinfo,
if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
return -EINVAL;
- cinfo->lp_clk_div = lp_clk_div;
- cinfo->lp_clk = lp_clk;
+ lp_cinfo->lp_clk_div = lp_clk_div;
+ lp_cinfo->lp_clk = lp_clk;
return 0;
}
@@ -1318,10 +1306,12 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
unsigned long dsi_fclk;
unsigned lp_clk_div;
unsigned long lp_clk;
+ unsigned lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
+
- lp_clk_div = dsi->user_dsi_cinfo.lp_clk_div;
+ lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
- if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
+ if (lp_clk_div == 0 || lp_clk_div > lpdiv_max)
return -EINVAL;
dsi_fclk = dsi_fclk_rate(dsidev);
@@ -1329,8 +1319,8 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
lp_clk = dsi_fclk / 2 / lp_clk_div;
DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
- dsi->current_cinfo.lp_clk = lp_clk;
- dsi->current_cinfo.lp_clk_div = lp_clk_div;
+ dsi->current_lp_cinfo.lp_clk = lp_clk;
+ dsi->current_lp_cinfo.lp_clk_div = lp_clk_div;
/* LP_CLK_DIVISOR */
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
@@ -1391,286 +1381,33 @@ static int dsi_pll_power(struct platform_device *dsidev,
return 0;
}
-unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- return clk_get_rate(dsi->sys_clk);
-}
-
-bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
- unsigned long out_min, dsi_hsdiv_calc_func func, void *data)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int regm, regm_start, regm_stop;
- unsigned long out_max;
- unsigned long out;
-
- out_min = out_min ? out_min : 1;
- out_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
-
- regm_start = max(DIV_ROUND_UP(pll, out_max), 1ul);
- regm_stop = min(pll / out_min, dsi->regm_dispc_max);
-
- for (regm = regm_start; regm <= regm_stop; ++regm) {
- out = pll / regm;
-
- if (func(regm, out, data))
- return true;
- }
-
- return false;
-}
-
-bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
- unsigned long pll_min, unsigned long pll_max,
- dsi_pll_calc_func func, void *data)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int regn, regn_start, regn_stop;
- int regm, regm_start, regm_stop;
- unsigned long fint, pll;
- const unsigned long pll_hw_max = 1800000000;
- unsigned long fint_hw_min, fint_hw_max;
-
- fint_hw_min = dsi->fint_min;
- fint_hw_max = dsi->fint_max;
- regn_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
- regn_stop = min(clkin / fint_hw_min, dsi->regn_max);
-
- pll_max = pll_max ? pll_max : ULONG_MAX;
-
- for (regn = regn_start; regn <= regn_stop; ++regn) {
- fint = clkin / regn;
-
- regm_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
- 1ul);
- regm_stop = min3(pll_max / fint / 2,
- pll_hw_max / fint / 2,
- dsi->regm_max);
-
- for (regm = regm_start; regm <= regm_stop; ++regm) {
- pll = 2 * regm * fint;
-
- if (func(regn, regm, fint, pll, data))
- return true;
- }
- }
-
- return false;
-}
-
-/* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct platform_device *dsidev,
- struct dsi_clock_info *cinfo)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
- return -EINVAL;
-
- if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
- return -EINVAL;
-
- if (cinfo->regm_dispc > dsi->regm_dispc_max)
- return -EINVAL;
-
- if (cinfo->regm_dsi > dsi->regm_dsi_max)
- return -EINVAL;
-
- cinfo->clkin = clk_get_rate(dsi->sys_clk);
- cinfo->fint = cinfo->clkin / cinfo->regn;
-
- if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
- return -EINVAL;
-
- cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
-
- if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
- return -EINVAL;
-
- if (cinfo->regm_dispc > 0)
- cinfo->dsi_pll_hsdiv_dispc_clk =
- cinfo->clkin4ddr / cinfo->regm_dispc;
- else
- cinfo->dsi_pll_hsdiv_dispc_clk = 0;
-
- if (cinfo->regm_dsi > 0)
- cinfo->dsi_pll_hsdiv_dsi_clk =
- cinfo->clkin4ddr / cinfo->regm_dsi;
- else
- cinfo->dsi_pll_hsdiv_dsi_clk = 0;
-
- return 0;
-}
-
-static void dsi_pll_calc_dsi_fck(struct dsi_clock_info *cinfo)
+static void dsi_pll_calc_dsi_fck(struct dss_pll_clock_info *cinfo)
{
unsigned long max_dsi_fck;
max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);
- cinfo->regm_dsi = DIV_ROUND_UP(cinfo->clkin4ddr, max_dsi_fck);
- cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi;
+ cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck);
+ cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI];
}
-int dsi_pll_set_clock_div(struct platform_device *dsidev,
- struct dsi_clock_info *cinfo)
+static int dsi_pll_enable(struct dss_pll *pll)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
+ struct platform_device *dsidev = dsi->pdev;
int r = 0;
- u32 l;
- int f = 0;
- u8 regn_start, regn_end, regm_start, regm_end;
- u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
-
- DSSDBG("DSI PLL clock config starts");
-
- dsi->current_cinfo.clkin = cinfo->clkin;
- dsi->current_cinfo.fint = cinfo->fint;
- dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
- dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
- cinfo->dsi_pll_hsdiv_dispc_clk;
- dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
- cinfo->dsi_pll_hsdiv_dsi_clk;
-
- dsi->current_cinfo.regn = cinfo->regn;
- dsi->current_cinfo.regm = cinfo->regm;
- dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
- dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
-
- DSSDBG("DSI Fint %ld\n", cinfo->fint);
-
- DSSDBG("clkin rate %ld\n", cinfo->clkin);
-
- /* DSIPHY == CLKIN4DDR */
- DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
- cinfo->regm,
- cinfo->regn,
- cinfo->clkin,
- cinfo->clkin4ddr);
-
- DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
- cinfo->clkin4ddr / 1000 / 1000 / 2);
-
- DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
-
- DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
- dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
- dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
- cinfo->dsi_pll_hsdiv_dispc_clk);
- DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
- dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
- dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
- cinfo->dsi_pll_hsdiv_dsi_clk);
-
- dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
- dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
- dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, &regm_dispc_start,
- &regm_dispc_end);
- dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
- &regm_dsi_end);
-
- /* DSI_PLL_AUTOMODE = manual */
- REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
-
- l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
- l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
- /* DSI_PLL_REGN */
- l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
- /* DSI_PLL_REGM */
- l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
- /* DSI_CLOCK_DIV */
- l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
- regm_dispc_start, regm_dispc_end);
- /* DSIPROTO_CLOCK_DIV */
- l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
- regm_dsi_start, regm_dsi_end);
- dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
-
- BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
-
- l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
-
- if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
- f = cinfo->fint < 1000000 ? 0x3 :
- cinfo->fint < 1250000 ? 0x4 :
- cinfo->fint < 1500000 ? 0x5 :
- cinfo->fint < 1750000 ? 0x6 :
- 0x7;
-
- l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
- } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) {
- f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
-
- l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */
- }
-
- l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
- l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
- l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
- if (dss_has_feature(FEAT_DSI_PLL_REFSEL))
- l = FLD_MOD(l, 3, 22, 21); /* REF_SYSCLK = sysclk */
- dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
-
- REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
-
- if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
- DSSERR("dsi pll go bit not going down.\n");
- r = -EIO;
- goto err;
- }
-
- if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
- DSSERR("cannot lock PLL\n");
- r = -EIO;
- goto err;
- }
-
- dsi->pll_locked = 1;
-
- l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
- l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
- l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
- l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
- l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
- l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
- l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
- l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
- l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
- l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
- l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
- l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
- l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
- l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
- l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
- dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
-
- DSSDBG("PLL config done\n");
-err:
- return r;
-}
-
-int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
- bool enable_hsdiv)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int r = 0;
- enum dsi_pll_power_state pwstate;
DSSDBG("PLL init\n");
- /*
- * It seems that on many OMAPs we need to enable both to have a
- * functional HSDivider.
- */
- enable_hsclk = enable_hsdiv = true;
-
r = dsi_regulator_init(dsidev);
if (r)
return r;
- dsi_enable_pll_clock(dsidev, 1);
+ r = dsi_runtime_get(dsidev);
+ if (r)
+ return r;
+
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
*/
@@ -1697,16 +1434,7 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
* fill the whole display. No idea about this */
dispc_pck_free_enable(0);
- if (enable_hsclk && enable_hsdiv)
- pwstate = DSI_PLL_POWER_ON_ALL;
- else if (enable_hsclk)
- pwstate = DSI_PLL_POWER_ON_HSCLK;
- else if (enable_hsdiv)
- pwstate = DSI_PLL_POWER_ON_DIV;
- else
- pwstate = DSI_PLL_POWER_OFF;
-
- r = dsi_pll_power(dsidev, pwstate);
+ r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL);
if (r)
goto err1;
@@ -1721,15 +1449,14 @@ err1:
}
err0:
dsi_disable_scp_clk(dsidev);
- dsi_enable_pll_clock(dsidev, 0);
+ dsi_runtime_put(dsidev);
return r;
}
-void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
+static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- dsi->pll_locked = 0;
dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
if (disconnect_lanes) {
WARN_ON(!dsi->vdds_dsi_enabled);
@@ -1738,18 +1465,27 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
}
dsi_disable_scp_clk(dsidev);
- dsi_enable_pll_clock(dsidev, 0);
+ dsi_runtime_put(dsidev);
DSSDBG("PLL uninit done\n");
}
+static void dsi_pll_disable(struct dss_pll *pll)
+{
+ struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
+ struct platform_device *dsidev = dsi->pdev;
+
+ dsi_pll_uninit(dsidev, true);
+}
+
static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
struct seq_file *s)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct dsi_clock_info *cinfo = &dsi->current_cinfo;
+ struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
+ struct dss_pll *pll = &dsi->pll;
dispc_clk_src = dss_get_dispc_clk_source();
dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1759,28 +1495,28 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
- seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin);
+ seq_printf(s, "dsi pll clkin\t%lu\n", clk_get_rate(pll->clkin));
- seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
+ seq_printf(s, "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n);
- seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
- cinfo->clkin4ddr, cinfo->regm);
+ seq_printf(s, "CLKIN4DDR\t%-16lum %u\n",
+ cinfo->clkdco, cinfo->m);
- seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16luregm_dispc %u\t(%s)\n",
+ seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
dss_feat_get_clk_source_name(dsi_module == 0 ?
OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
- cinfo->dsi_pll_hsdiv_dispc_clk,
- cinfo->regm_dispc,
+ cinfo->clkout[HSDIV_DISPC],
+ cinfo->mX[HSDIV_DISPC],
dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
"off" : "on");
- seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16luregm_dsi %u\t(%s)\n",
+ seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
dss_feat_get_clk_source_name(dsi_module == 0 ?
OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
- cinfo->dsi_pll_hsdiv_dsi_clk,
- cinfo->regm_dsi,
+ cinfo->clkout[HSDIV_DSI],
+ cinfo->mX[HSDIV_DSI],
dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
"off" : "on");
@@ -1793,11 +1529,11 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
seq_printf(s, "DDR_CLK\t\t%lu\n",
- cinfo->clkin4ddr / 4);
+ cinfo->clkdco / 4);
seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
- seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
+ seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
dsi_runtime_put(dsidev);
}
@@ -2132,7 +1868,7 @@ static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
/* convert time in ns to ddr ticks, rounding up */
- unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
+ unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
}
@@ -2140,7 +1876,7 @@ static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
+ unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
return ddr * 1000 * 1000 / (ddr_clk / 1000);
}
@@ -3730,7 +3466,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
struct omap_video_timings *timings = &dsi->timings;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
- int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.regm_dsi + 1;
+ int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
@@ -4441,18 +4177,12 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct dsi_clock_info cinfo;
+ struct dss_pll_clock_info cinfo;
int r;
cinfo = dsi->user_dsi_cinfo;
- r = dsi_calc_clock_rates(dsidev, &cinfo);
- if (r) {
- DSSERR("Failed to calc dsi clocks\n");
- return r;
- }
-
- r = dsi_pll_set_clock_div(dsidev, &cinfo);
+ r = dss_pll_set_config(&dsi->pll, &cinfo);
if (r) {
DSSERR("Failed to set dsi clocks\n");
return r;
@@ -4466,7 +4196,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
- r = dsi_pll_init(dsidev, true, true);
+ r = dss_pll_enable(&dsi->pll);
if (r)
goto err0;
@@ -4510,7 +4240,7 @@ err3:
err2:
dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
err1:
- dsi_pll_uninit(dsidev, true);
+ dss_pll_disable(&dsi->pll);
err0:
return r;
}
@@ -4551,8 +4281,6 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dsi;
- dsi_enable_pll_clock(dsidev, 1);
-
_dsi_initialize_irq(dsidev);
r = dsi_display_init_dsi(dsidev);
@@ -4564,7 +4292,6 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
return 0;
err_init_dsi:
- dsi_enable_pll_clock(dsidev, 0);
dsi_runtime_put(dsidev);
err_get_dsi:
mutex_unlock(&dsi->lock);
@@ -4592,7 +4319,6 @@ static void dsi_display_disable(struct omap_dss_device *dssdev,
dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
dsi_runtime_put(dsidev);
- dsi_enable_pll_clock(dsidev, 0);
mutex_unlock(&dsi->lock);
}
@@ -4713,29 +4439,30 @@ static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return true;
}
-static bool dsi_cm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- ctx->dsi_cinfo.regm_dispc = regm_dispc;
- ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+ ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
+ ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
dsi_cm_calc_dispc_cb, ctx);
}
-static bool dsi_cm_calc_pll_cb(int regn, int regm, unsigned long fint,
- unsigned long pll, void *data)
+static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
+ unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- ctx->dsi_cinfo.regn = regn;
- ctx->dsi_cinfo.regm = regm;
+ ctx->dsi_cinfo.n = n;
+ ctx->dsi_cinfo.m = m;
ctx->dsi_cinfo.fint = fint;
- ctx->dsi_cinfo.clkin4ddr = pll;
+ ctx->dsi_cinfo.clkdco = clkdco;
- return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
+ return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
+ dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
dsi_cm_calc_hsdiv_cb, ctx);
}
@@ -4748,7 +4475,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
unsigned long pll_min, pll_max;
unsigned long pck, txbyteclk;
- clkin = clk_get_rate(dsi->sys_clk);
+ clkin = clk_get_rate(dsi->pll.clkin);
bitspp = dsi_get_pixel_size(cfg->pixel_format);
ndl = dsi->num_lanes_used - 1;
@@ -4764,16 +4491,16 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
memset(ctx, 0, sizeof(*ctx));
ctx->dsidev = dsi->pdev;
+ ctx->pll = &dsi->pll;
ctx->config = cfg;
ctx->req_pck_min = pck;
ctx->req_pck_nom = pck;
ctx->req_pck_max = pck * 3 / 2;
- ctx->dsi_cinfo.clkin = clkin;
pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
pll_max = cfg->hs_clk_max * 4;
- return dsi_pll_calc(dsi->pdev, clkin,
+ return dss_pll_calc(ctx->pll, clkin,
pll_min, pll_max,
dsi_cm_calc_pll_cb, ctx);
}
@@ -4784,7 +4511,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
const struct omap_dss_dsi_config *cfg = ctx->config;
int bitspp = dsi_get_pixel_size(cfg->pixel_format);
int ndl = dsi->num_lanes_used - 1;
- unsigned long hsclk = ctx->dsi_cinfo.clkin4ddr / 4;
+ unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
unsigned long byteclk = hsclk / 4;
unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
@@ -4999,14 +4726,14 @@ static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return true;
}
-static bool dsi_vm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
+static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
unsigned long pck_max;
- ctx->dsi_cinfo.regm_dispc = regm_dispc;
- ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc;
+ ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
+ ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
/*
* In burst mode we can let the dispc pck be arbitrarily high, but it
@@ -5022,17 +4749,18 @@ static bool dsi_vm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
dsi_vm_calc_dispc_cb, ctx);
}
-static bool dsi_vm_calc_pll_cb(int regn, int regm, unsigned long fint,
- unsigned long pll, void *data)
+static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
+ unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- ctx->dsi_cinfo.regn = regn;
- ctx->dsi_cinfo.regm = regm;
+ ctx->dsi_cinfo.n = n;
+ ctx->dsi_cinfo.m = m;
ctx->dsi_cinfo.fint = fint;
- ctx->dsi_cinfo.clkin4ddr = pll;
+ ctx->dsi_cinfo.clkdco = clkdco;
- return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min,
+ return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
+ dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
dsi_vm_calc_hsdiv_cb, ctx);
}
@@ -5048,14 +4776,13 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
int bitspp = dsi_get_pixel_size(cfg->pixel_format);
unsigned long byteclk_min;
- clkin = clk_get_rate(dsi->sys_clk);
+ clkin = clk_get_rate(dsi->pll.clkin);
memset(ctx, 0, sizeof(*ctx));
ctx->dsidev = dsi->pdev;
+ ctx->pll = &dsi->pll;
ctx->config = cfg;
- ctx->dsi_cinfo.clkin = clkin;
-
/* these limits should come from the panel driver */
ctx->req_pck_min = t->pixelclock - 1000;
ctx->req_pck_nom = t->pixelclock;
@@ -5074,7 +4801,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
pll_max = byteclk_max * 4 * 4;
}
- return dsi_pll_calc(dsi->pdev, clkin,
+ return dss_pll_calc(ctx->pll, clkin,
pll_min, pll_max,
dsi_vm_calc_pll_cb, ctx);
}
@@ -5106,8 +4833,8 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
- r = dsi_lp_clock_calc(&ctx.dsi_cinfo, config->lp_clk_min,
- config->lp_clk_max);
+ r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
+ config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
if (r) {
DSSERR("failed to find suitable DSI LP clock settings\n");
goto err;
@@ -5234,35 +4961,6 @@ static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
}
}
-void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
-{
- if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
- DSSERR("%s (%s) not active\n",
- dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
- dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
-}
-
-void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
-{
- if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
- DSSERR("%s (%s) not active\n",
- dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
- dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
-}
-
-static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
- dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
- dsi->regm_dispc_max =
- dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
- dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
- dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
- dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
- dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
-}
static int dsi_get_clocks(struct platform_device *dsidev)
{
@@ -5277,14 +4975,6 @@ static int dsi_get_clocks(struct platform_device *dsidev)
dsi->dss_clk = clk;
- clk = devm_clk_get(&dsidev->dev, "sys_clk");
- if (IS_ERR(clk)) {
- DSSERR("can't get sys_clk\n");
- return PTR_ERR(clk);
- }
-
- dsi->sys_clk = clk;
-
return 0;
}
@@ -5453,6 +5143,135 @@ err:
return r;
}
+static const struct dss_pll_ops dsi_pll_ops = {
+ .enable = dsi_pll_enable,
+ .disable = dsi_pll_disable,
+ .set_config = dss_pll_write_config_type_a,
+};
+
+static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
+ .n_max = (1 << 7) - 1,
+ .m_max = (1 << 11) - 1,
+ .mX_max = (1 << 4) - 1,
+ .fint_min = 750000,
+ .fint_max = 2100000,
+ .clkdco_low = 1000000000,
+ .clkdco_max = 1800000000,
+
+ .n_msb = 7,
+ .n_lsb = 1,
+ .m_msb = 18,
+ .m_lsb = 8,
+
+ .mX_msb[0] = 22,
+ .mX_lsb[0] = 19,
+ .mX_msb[1] = 26,
+ .mX_lsb[1] = 23,
+
+ .has_stopmode = true,
+ .has_freqsel = true,
+ .has_selfreqdco = false,
+ .has_refsel = false,
+};
+
+static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
+ .n_max = (1 << 8) - 1,
+ .m_max = (1 << 12) - 1,
+ .mX_max = (1 << 5) - 1,
+ .fint_min = 500000,
+ .fint_max = 2500000,
+ .clkdco_low = 1000000000,
+ .clkdco_max = 1800000000,
+
+ .n_msb = 8,
+ .n_lsb = 1,
+ .m_msb = 20,
+ .m_lsb = 9,
+
+ .mX_msb[0] = 25,
+ .mX_lsb[0] = 21,
+ .mX_msb[1] = 30,
+ .mX_lsb[1] = 26,
+
+ .has_stopmode = true,
+ .has_freqsel = false,
+ .has_selfreqdco = false,
+ .has_refsel = false,
+};
+
+static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
+ .n_max = (1 << 8) - 1,
+ .m_max = (1 << 12) - 1,
+ .mX_max = (1 << 5) - 1,
+ .fint_min = 150000,
+ .fint_max = 52000000,
+ .clkdco_low = 1000000000,
+ .clkdco_max = 1800000000,
+
+ .n_msb = 8,
+ .n_lsb = 1,
+ .m_msb = 20,
+ .m_lsb = 9,
+
+ .mX_msb[0] = 25,
+ .mX_lsb[0] = 21,
+ .mX_msb[1] = 30,
+ .mX_lsb[1] = 26,
+
+ .has_stopmode = true,
+ .has_freqsel = false,
+ .has_selfreqdco = true,
+ .has_refsel = true,
+};
+
+static int dsi_init_pll_data(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dss_pll *pll = &dsi->pll;
+ struct clk *clk;
+ int r;
+
+ clk = devm_clk_get(&dsidev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1";
+ pll->clkin = clk;
+ pll->base = dsi->pll_base;
+
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ pll->hw = &dss_omap3_dsi_pll_hw;
+ break;
+
+ case OMAPDSS_VER_OMAP4430_ES1:
+ case OMAPDSS_VER_OMAP4430_ES2:
+ case OMAPDSS_VER_OMAP4:
+ pll->hw = &dss_omap4_dsi_pll_hw;
+ break;
+
+ case OMAPDSS_VER_OMAP5:
+ pll->hw = &dss_omap5_dsi_pll_hw;
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ pll->ops = &dsi_pll_ops;
+
+ r = dss_pll_register(pll);
+ if (r)
+ return r;
+
+ return 0;
+}
+
/* DSI1 HW IP initialisation */
static int omap_dsihw_probe(struct platform_device *dsidev)
{
@@ -5598,12 +5417,12 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
dsi->vc[i].vc_id = 0;
}
- dsi_calc_clock_param_ranges(dsidev);
-
r = dsi_get_clocks(dsidev);
if (r)
return r;
+ dsi_init_pll_data(dsidev);
+
pm_runtime_enable(&dsidev->dev);
r = dsi_runtime_get(dsidev);
@@ -5672,6 +5491,8 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
WARN_ON(dsi->scp_clk_refcount > 0);
+ dss_pll_unregister(&dsi->pll);
+
dsi_uninit_output(dsidev);
pm_runtime_disable(&dsidev->dev);
@@ -5751,7 +5572,6 @@ static struct platform_driver omap_dsihw_driver = {
.remove = __exit_p(omap_dsihw_remove),
.driver = {
.name = "omapdss_dsi",
- .owner = THIS_MODULE,
.pm = &dsi_pm_ops,
.of_match_table = dsi_of_match,
.suppress_bind_attrs = true,
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index a4b20aaf6142..928ee639c0c1 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -20,6 +20,8 @@
#include <video/omapdss.h>
+#include "dss.h"
+
struct device_node *
omapdss_of_get_next_port(const struct device_node *parent,
struct device_node *prev)
@@ -84,20 +86,17 @@ omapdss_of_get_next_endpoint(const struct device_node *parent,
}
EXPORT_SYMBOL_GPL(omapdss_of_get_next_endpoint);
-static struct device_node *
-omapdss_of_get_remote_device_node(const struct device_node *node)
+struct device_node *dss_of_port_get_parent_device(struct device_node *port)
{
struct device_node *np;
int i;
- np = of_parse_phandle(node, "remote-endpoint", 0);
-
- if (!np)
+ if (!port)
return NULL;
- np = of_get_next_parent(np);
+ np = of_get_next_parent(port);
- for (i = 0; i < 3 && np; ++i) {
+ for (i = 0; i < 2 && np; ++i) {
struct property *prop;
prop = of_find_property(np, "compatible", NULL);
@@ -111,6 +110,31 @@ omapdss_of_get_remote_device_node(const struct device_node *node)
return NULL;
}
+u32 dss_of_port_get_port_number(struct device_node *port)
+{
+ int r;
+ u32 reg;
+
+ r = of_property_read_u32(port, "reg", &reg);
+ if (r)
+ reg = 0;
+
+ return reg;
+}
+
+static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
+{
+ struct device_node *np;
+
+ np = of_parse_phandle(node, "remote-endpoint", 0);
+ if (!np)
+ return NULL;
+
+ np = of_get_next_parent(np);
+
+ return np;
+}
+
struct device_node *
omapdss_of_get_first_endpoint(const struct device_node *parent)
{
@@ -133,27 +157,25 @@ struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node)
{
struct device_node *ep;
- struct device_node *src_node;
+ struct device_node *src_port;
struct omap_dss_device *src;
ep = omapdss_of_get_first_endpoint(node);
if (!ep)
return ERR_PTR(-EINVAL);
- src_node = omapdss_of_get_remote_device_node(ep);
-
- of_node_put(ep);
-
- if (!src_node)
+ src_port = omapdss_of_get_remote_port(ep);
+ if (!src_port) {
+ of_node_put(ep);
return ERR_PTR(-EINVAL);
+ }
- src = omap_dss_find_output_by_node(src_node);
+ of_node_put(ep);
- of_node_put(src_node);
+ src = omap_dss_find_output_by_port_node(src_port);
- if (!src)
- return ERR_PTR(-EPROBE_DEFER);
+ of_node_put(src_port);
- return src;
+ return src ? src : ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep);
diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
index 14bcd6c43f72..9987154d50b4 100644
--- a/drivers/video/fbdev/omap2/dss/dss.c
+++ b/drivers/video/fbdev/omap2/dss/dss.c
@@ -70,7 +70,9 @@ struct dss_features {
u8 fck_div_max;
u8 dss_fck_multiplier;
const char *parent_clk_name;
- int (*dpi_select_source)(enum omap_channel channel);
+ enum omap_display_type *ports;
+ int num_ports;
+ int (*dpi_select_source)(int port, enum omap_channel channel);
};
static struct {
@@ -294,7 +296,6 @@ static void dss_dump_regs(struct seq_file *s)
static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
{
- struct platform_device *dsidev;
int b;
u8 start, end;
@@ -304,13 +305,9 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
b = 1;
- dsidev = dsi_get_dsidev_from_id(0);
- dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
b = 2;
- dsidev = dsi_get_dsidev_from_id(1);
- dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
default:
BUG();
@@ -327,7 +324,6 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
void dss_select_dsi_clk_source(int dsi_module,
enum omap_dss_clk_source clk_src)
{
- struct platform_device *dsidev;
int b, pos;
switch (clk_src) {
@@ -337,14 +333,10 @@ void dss_select_dsi_clk_source(int dsi_module,
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
BUG_ON(dsi_module != 0);
b = 1;
- dsidev = dsi_get_dsidev_from_id(0);
- dsi_wait_pll_hsdiv_dsi_active(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI:
BUG_ON(dsi_module != 1);
b = 1;
- dsidev = dsi_get_dsidev_from_id(1);
- dsi_wait_pll_hsdiv_dsi_active(dsidev);
break;
default:
BUG();
@@ -360,7 +352,6 @@ void dss_select_dsi_clk_source(int dsi_module,
void dss_select_lcd_clk_source(enum omap_channel channel,
enum omap_dss_clk_source clk_src)
{
- struct platform_device *dsidev;
int b, ix, pos;
if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
@@ -375,15 +366,11 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
b = 1;
- dsidev = dsi_get_dsidev_from_id(0);
- dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
channel != OMAP_DSS_CHANNEL_LCD3);
b = 1;
- dsidev = dsi_get_dsidev_from_id(1);
- dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
default:
BUG();
@@ -564,7 +551,7 @@ enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
return REG_GET(DSS_CONTROL, 15, 15);
}
-static int dss_dpi_select_source_omap2_omap3(enum omap_channel channel)
+static int dss_dpi_select_source_omap2_omap3(int port, enum omap_channel channel)
{
if (channel != OMAP_DSS_CHANNEL_LCD)
return -EINVAL;
@@ -572,7 +559,7 @@ static int dss_dpi_select_source_omap2_omap3(enum omap_channel channel)
return 0;
}
-static int dss_dpi_select_source_omap4(enum omap_channel channel)
+static int dss_dpi_select_source_omap4(int port, enum omap_channel channel)
{
int val;
@@ -592,7 +579,7 @@ static int dss_dpi_select_source_omap4(enum omap_channel channel)
return 0;
}
-static int dss_dpi_select_source_omap5(enum omap_channel channel)
+static int dss_dpi_select_source_omap5(int port, enum omap_channel channel)
{
int val;
@@ -618,9 +605,9 @@ static int dss_dpi_select_source_omap5(enum omap_channel channel)
return 0;
}
-int dss_dpi_select_source(enum omap_channel channel)
+int dss_dpi_select_source(int port, enum omap_channel channel)
{
- return dss.feat->dpi_select_source(channel);
+ return dss.feat->dpi_select_source(port, channel);
}
static int dss_get_clocks(void)
@@ -689,6 +676,16 @@ void dss_debug_dump_clocks(struct seq_file *s)
}
#endif
+
+static enum omap_display_type omap2plus_ports[] = {
+ OMAP_DISPLAY_TYPE_DPI,
+};
+
+static enum omap_display_type omap34xx_ports[] = {
+ OMAP_DISPLAY_TYPE_DPI,
+ OMAP_DISPLAY_TYPE_SDI,
+};
+
static const struct dss_features omap24xx_dss_feats __initconst = {
/*
* fck div max is really 16, but the divider range has gaps. The range
@@ -698,6 +695,8 @@ static const struct dss_features omap24xx_dss_feats __initconst = {
.dss_fck_multiplier = 2,
.parent_clk_name = "core_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
+ .ports = omap2plus_ports,
+ .num_ports = ARRAY_SIZE(omap2plus_ports),
};
static const struct dss_features omap34xx_dss_feats __initconst = {
@@ -705,6 +704,8 @@ static const struct dss_features omap34xx_dss_feats __initconst = {
.dss_fck_multiplier = 2,
.parent_clk_name = "dpll4_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
+ .ports = omap34xx_ports,
+ .num_ports = ARRAY_SIZE(omap34xx_ports),
};
static const struct dss_features omap3630_dss_feats __initconst = {
@@ -712,6 +713,8 @@ static const struct dss_features omap3630_dss_feats __initconst = {
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
+ .ports = omap2plus_ports,
+ .num_ports = ARRAY_SIZE(omap2plus_ports),
};
static const struct dss_features omap44xx_dss_feats __initconst = {
@@ -719,6 +722,8 @@ static const struct dss_features omap44xx_dss_feats __initconst = {
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
.dpi_select_source = &dss_dpi_select_source_omap4,
+ .ports = omap2plus_ports,
+ .num_ports = ARRAY_SIZE(omap2plus_ports),
};
static const struct dss_features omap54xx_dss_feats __initconst = {
@@ -726,6 +731,8 @@ static const struct dss_features omap54xx_dss_feats __initconst = {
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
.dpi_select_source = &dss_dpi_select_source_omap5,
+ .ports = omap2plus_ports,
+ .num_ports = ARRAY_SIZE(omap2plus_ports),
};
static const struct dss_features am43xx_dss_feats __initconst = {
@@ -733,6 +740,8 @@ static const struct dss_features am43xx_dss_feats __initconst = {
.dss_fck_multiplier = 0,
.parent_clk_name = NULL,
.dpi_select_source = &dss_dpi_select_source_omap2_omap3,
+ .ports = omap2plus_ports,
+ .num_ports = ARRAY_SIZE(omap2plus_ports),
};
static int __init dss_init_features(struct platform_device *pdev)
@@ -798,37 +807,77 @@ static int __init dss_init_ports(struct platform_device *pdev)
if (!port)
return 0;
+ if (dss.feat->num_ports == 0)
+ return 0;
+
do {
+ enum omap_display_type port_type;
u32 reg;
r = of_property_read_u32(port, "reg", &reg);
if (r)
reg = 0;
-#ifdef CONFIG_OMAP2_DSS_DPI
- if (reg == 0)
- dpi_init_port(pdev, port);
-#endif
+ if (reg >= dss.feat->num_ports)
+ continue;
-#ifdef CONFIG_OMAP2_DSS_SDI
- if (reg == 1)
- sdi_init_port(pdev, port);
-#endif
+ port_type = dss.feat->ports[reg];
+ switch (port_type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ dpi_init_port(pdev, port);
+ break;
+ case OMAP_DISPLAY_TYPE_SDI:
+ sdi_init_port(pdev, port);
+ break;
+ default:
+ break;
+ }
} while ((port = omapdss_of_get_next_port(parent, port)) != NULL);
return 0;
}
-static void __exit dss_uninit_ports(void)
+static void __exit dss_uninit_ports(struct platform_device *pdev)
{
-#ifdef CONFIG_OMAP2_DSS_DPI
- dpi_uninit_port();
-#endif
+ struct device_node *parent = pdev->dev.of_node;
+ struct device_node *port;
-#ifdef CONFIG_OMAP2_DSS_SDI
- sdi_uninit_port();
-#endif
+ if (parent == NULL)
+ return;
+
+ port = omapdss_of_get_next_port(parent, NULL);
+ if (!port)
+ return;
+
+ if (dss.feat->num_ports == 0)
+ return;
+
+ do {
+ enum omap_display_type port_type;
+ u32 reg;
+ int r;
+
+ r = of_property_read_u32(port, "reg", &reg);
+ if (r)
+ reg = 0;
+
+ if (reg >= dss.feat->num_ports)
+ continue;
+
+ port_type = dss.feat->ports[reg];
+
+ switch (port_type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ dpi_uninit_port(port);
+ break;
+ case OMAP_DISPLAY_TYPE_SDI:
+ sdi_uninit_port(port);
+ break;
+ default:
+ break;
+ }
+ } while ((port = omapdss_of_get_next_port(parent, port)) != NULL);
}
/* DSS HW IP initialisation */
@@ -910,7 +959,7 @@ err_setup_clocks:
static int __exit omap_dsshw_remove(struct platform_device *pdev)
{
- dss_uninit_ports();
+ dss_uninit_ports(pdev);
pm_runtime_disable(&pdev->dev);
@@ -963,7 +1012,6 @@ static struct platform_driver omap_dsshw_driver = {
.remove = __exit_p(omap_dsshw_remove),
.driver = {
.name = "omapdss_dss",
- .owner = THIS_MODULE,
.pm = &dss_pm_ops,
.of_match_table = dss_of_match,
.suppress_bind_attrs = true,
diff --git a/drivers/video/fbdev/omap2/dss/dss.h b/drivers/video/fbdev/omap2/dss/dss.h
index 8ff22c134c62..14fb0c23f4a2 100644
--- a/drivers/video/fbdev/omap2/dss/dss.h
+++ b/drivers/video/fbdev/omap2/dss/dss.h
@@ -100,35 +100,77 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
-struct dispc_clock_info {
+struct dss_pll;
+
+#define DSS_PLL_MAX_HSDIVS 4
+
+/*
+ * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
+ * Type-B PLLs: clkout[0] refers to m2.
+ */
+struct dss_pll_clock_info {
/* rates that we get with dividers below */
- unsigned long lck;
- unsigned long pck;
+ unsigned long fint;
+ unsigned long clkdco;
+ unsigned long clkout[DSS_PLL_MAX_HSDIVS];
/* dividers */
- u16 lck_div;
- u16 pck_div;
+ u16 n;
+ u16 m;
+ u32 mf;
+ u16 mX[DSS_PLL_MAX_HSDIVS];
+ u16 sd;
+};
+
+struct dss_pll_ops {
+ int (*enable)(struct dss_pll *pll);
+ void (*disable)(struct dss_pll *pll);
+ int (*set_config)(struct dss_pll *pll,
+ const struct dss_pll_clock_info *cinfo);
+};
+
+struct dss_pll_hw {
+ unsigned n_max;
+ unsigned m_min;
+ unsigned m_max;
+ unsigned mX_max;
+
+ unsigned long fint_min, fint_max;
+ unsigned long clkdco_min, clkdco_low, clkdco_max;
+
+ u8 n_msb, n_lsb;
+ u8 m_msb, m_lsb;
+ u8 mX_msb[DSS_PLL_MAX_HSDIVS], mX_lsb[DSS_PLL_MAX_HSDIVS];
+
+ bool has_stopmode;
+ bool has_freqsel;
+ bool has_selfreqdco;
+ bool has_refsel;
};
-struct dsi_clock_info {
+struct dss_pll {
+ const char *name;
+
+ struct clk *clkin;
+ struct regulator *regulator;
+
+ void __iomem *base;
+
+ const struct dss_pll_hw *hw;
+
+ const struct dss_pll_ops *ops;
+
+ struct dss_pll_clock_info cinfo;
+};
+
+struct dispc_clock_info {
/* rates that we get with dividers below */
- unsigned long fint;
- unsigned long clkin4ddr;
- unsigned long clkin;
- unsigned long dsi_pll_hsdiv_dispc_clk; /* OMAP3: DSI1_PLL_CLK
- * OMAP4: PLLx_CLK1 */
- unsigned long dsi_pll_hsdiv_dsi_clk; /* OMAP3: DSI2_PLL_CLK
- * OMAP4: PLLx_CLK2 */
- unsigned long lp_clk;
+ unsigned long lck;
+ unsigned long pck;
/* dividers */
- u16 regn;
- u16 regm;
- u16 regm_dispc; /* OMAP3: REGM3
- * OMAP4: REGM4 */
- u16 regm_dsi; /* OMAP3: REGM4
- * OMAP4: REGM5 */
- u16 lp_clk_div;
+ u16 lck_div;
+ u16 pck_div;
};
struct dss_lcd_mgr_config {
@@ -209,12 +251,16 @@ int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
unsigned long dss_get_dispc_clk_rate(void);
-int dss_dpi_select_source(enum omap_channel channel);
+int dss_dpi_select_source(int port, enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
+/* dss-of */
+struct device_node *dss_of_port_get_parent_device(struct device_node *port);
+u32 dss_of_port_get_port_number(struct device_node *port);
+
#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
void dss_debug_dump_clocks(struct seq_file *s);
#endif
@@ -244,16 +290,22 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
int sdi_init_platform_driver(void) __init;
void sdi_uninit_platform_driver(void) __exit;
+#ifdef CONFIG_OMAP2_DSS_SDI
int sdi_init_port(struct platform_device *pdev, struct device_node *port) __init;
-void sdi_uninit_port(void) __exit;
+void sdi_uninit_port(struct device_node *port) __exit;
+#else
+static inline int __init sdi_init_port(struct platform_device *pdev,
+ struct device_node *port)
+{
+ return 0;
+}
+static inline void __exit sdi_uninit_port(struct device_node *port)
+{
+}
+#endif
/* DSI */
-typedef bool (*dsi_pll_calc_func)(int regn, int regm, unsigned long fint,
- unsigned long pll, void *data);
-typedef bool (*dsi_hsdiv_calc_func)(int regm_dispc, unsigned long dispc,
- void *data);
-
#ifdef CONFIG_OMAP2_DSS_DSI
struct dentry;
@@ -262,104 +314,36 @@ struct file_operations;
int dsi_init_platform_driver(void) __init;
void dsi_uninit_platform_driver(void) __exit;
-int dsi_runtime_get(struct platform_device *dsidev);
-void dsi_runtime_put(struct platform_device *dsidev);
-
void dsi_dump_clocks(struct seq_file *s);
void dsi_irq_handler(void);
u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
-unsigned long dsi_get_pll_clkin(struct platform_device *dsidev);
-
-bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
- unsigned long out_min, dsi_hsdiv_calc_func func, void *data);
-bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
- unsigned long pll_min, unsigned long pll_max,
- dsi_pll_calc_func func, void *data);
-
-unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
-int dsi_pll_set_clock_div(struct platform_device *dsidev,
- struct dsi_clock_info *cinfo);
-int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
- bool enable_hsdiv);
-void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
-void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
-void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
-struct platform_device *dsi_get_dsidev_from_id(int module);
#else
-static inline int dsi_runtime_get(struct platform_device *dsidev)
-{
- return 0;
-}
-static inline void dsi_runtime_put(struct platform_device *dsidev)
-{
-}
static inline u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
{
WARN("%s: DSI not compiled in, returning pixel_size as 0\n", __func__);
return 0;
}
-static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
-{
- WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
- return 0;
-}
-static inline int dsi_pll_set_clock_div(struct platform_device *dsidev,
- struct dsi_clock_info *cinfo)
-{
- WARN("%s: DSI not compiled in\n", __func__);
- return -ENODEV;
-}
-static inline int dsi_pll_init(struct platform_device *dsidev,
- bool enable_hsclk, bool enable_hsdiv)
-{
- WARN("%s: DSI not compiled in\n", __func__);
- return -ENODEV;
-}
-static inline void dsi_pll_uninit(struct platform_device *dsidev,
- bool disconnect_lanes)
-{
-}
-static inline void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
-{
-}
-static inline void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
-{
-}
-static inline struct platform_device *dsi_get_dsidev_from_id(int module)
-{
- return NULL;
-}
-
-static inline unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
-{
- return 0;
-}
-
-static inline bool dsi_hsdiv_calc(struct platform_device *dsidev,
- unsigned long pll, unsigned long out_min,
- dsi_hsdiv_calc_func func, void *data)
-{
- return false;
-}
-
-static inline bool dsi_pll_calc(struct platform_device *dsidev,
- unsigned long clkin,
- unsigned long pll_min, unsigned long pll_max,
- dsi_pll_calc_func func, void *data)
-{
- return false;
-}
-
#endif
/* DPI */
int dpi_init_platform_driver(void) __init;
void dpi_uninit_platform_driver(void) __exit;
+#ifdef CONFIG_OMAP2_DSS_DPI
int dpi_init_port(struct platform_device *pdev, struct device_node *port) __init;
-void dpi_uninit_port(void) __exit;
+void dpi_uninit_port(struct device_node *port) __exit;
+#else
+static inline int __init dpi_init_port(struct platform_device *pdev,
+ struct device_node *port)
+{
+ return 0;
+}
+static inline void __exit dpi_uninit_port(struct device_node *port)
+{
+}
+#endif
/* DISPC */
int dispc_init_platform_driver(void) __init;
@@ -438,4 +422,29 @@ static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
}
#endif
+/* PLL */
+typedef bool (*dss_pll_calc_func)(int n, int m, unsigned long fint,
+ unsigned long clkdco, void *data);
+typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
+ void *data);
+
+int dss_pll_register(struct dss_pll *pll);
+void dss_pll_unregister(struct dss_pll *pll);
+struct dss_pll *dss_pll_find(const char *name);
+int dss_pll_enable(struct dss_pll *pll);
+void dss_pll_disable(struct dss_pll *pll);
+int dss_pll_set_config(struct dss_pll *pll,
+ const struct dss_pll_clock_info *cinfo);
+
+bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
+ unsigned long out_min, unsigned long out_max,
+ dss_hsdiv_calc_func func, void *data);
+bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
+ unsigned long pll_min, unsigned long pll_max,
+ dss_pll_calc_func func, void *data);
+int dss_pll_write_config_type_a(struct dss_pll *pll,
+ const struct dss_pll_clock_info *cinfo);
+int dss_pll_write_config_type_b(struct dss_pll *pll,
+ const struct dss_pll_clock_info *cinfo);
+
#endif
diff --git a/drivers/video/fbdev/omap2/dss/dss_features.c b/drivers/video/fbdev/omap2/dss/dss_features.c
index 15088df7bd16..0e3da809473c 100644
--- a/drivers/video/fbdev/omap2/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/dss/dss_features.c
@@ -72,10 +72,6 @@ static const struct dss_reg_field omap2_dss_reg_fields[] = {
[FEAT_REG_HORIZONTALACCU] = { 9, 0 },
[FEAT_REG_VERTICALACCU] = { 25, 16 },
[FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
- [FEAT_REG_DSIPLL_REGN] = { 0, 0 },
- [FEAT_REG_DSIPLL_REGM] = { 0, 0 },
- [FEAT_REG_DSIPLL_REGM_DISPC] = { 0, 0 },
- [FEAT_REG_DSIPLL_REGM_DSI] = { 0, 0 },
};
static const struct dss_reg_field omap3_dss_reg_fields[] = {
@@ -87,10 +83,6 @@ static const struct dss_reg_field omap3_dss_reg_fields[] = {
[FEAT_REG_HORIZONTALACCU] = { 9, 0 },
[FEAT_REG_VERTICALACCU] = { 25, 16 },
[FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
- [FEAT_REG_DSIPLL_REGN] = { 7, 1 },
- [FEAT_REG_DSIPLL_REGM] = { 18, 8 },
- [FEAT_REG_DSIPLL_REGM_DISPC] = { 22, 19 },
- [FEAT_REG_DSIPLL_REGM_DSI] = { 26, 23 },
};
static const struct dss_reg_field am43xx_dss_reg_fields[] = {
@@ -113,10 +105,6 @@ static const struct dss_reg_field omap4_dss_reg_fields[] = {
[FEAT_REG_HORIZONTALACCU] = { 10, 0 },
[FEAT_REG_VERTICALACCU] = { 26, 16 },
[FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 },
- [FEAT_REG_DSIPLL_REGN] = { 8, 1 },
- [FEAT_REG_DSIPLL_REGM] = { 20, 9 },
- [FEAT_REG_DSIPLL_REGM_DISPC] = { 25, 21 },
- [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 },
};
static const struct dss_reg_field omap5_dss_reg_fields[] = {
@@ -128,10 +116,6 @@ static const struct dss_reg_field omap5_dss_reg_fields[] = {
[FEAT_REG_HORIZONTALACCU] = { 10, 0 },
[FEAT_REG_VERTICALACCU] = { 26, 16 },
[FEAT_REG_DISPC_CLK_SWITCH] = { 9, 7 },
- [FEAT_REG_DSIPLL_REGN] = { 8, 1 },
- [FEAT_REG_DSIPLL_REGM] = { 20, 9 },
- [FEAT_REG_DSIPLL_REGM_DISPC] = { 25, 21 },
- [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 },
};
static const enum omap_display_type omap2_dss_supported_displays[] = {
@@ -437,12 +421,6 @@ static const char * const omap5_dss_clk_source_names[] = {
static const struct dss_param_range omap2_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
[FEAT_PARAM_DSS_PCD] = { 2, 255 },
- [FEAT_PARAM_DSIPLL_REGN] = { 0, 0 },
- [FEAT_PARAM_DSIPLL_REGM] = { 0, 0 },
- [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, 0 },
- [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, 0 },
- [FEAT_PARAM_DSIPLL_FINT] = { 0, 0 },
- [FEAT_PARAM_DSIPLL_LPDIV] = { 0, 0 },
[FEAT_PARAM_DOWNSCALE] = { 1, 2 },
/*
* Assuming the line width buffer to be 768 pixels as OMAP2 DISPC
@@ -454,11 +432,6 @@ static const struct dss_param_range omap2_dss_param_range[] = {
static const struct dss_param_range omap3_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
[FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 7) - 1 },
- [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 11) - 1 },
- [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 4) - 1 },
- [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 4) - 1 },
- [FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
[FEAT_PARAM_DSI_FCK] = { 0, 173000000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
@@ -475,11 +448,6 @@ static const struct dss_param_range am43xx_dss_param_range[] = {
static const struct dss_param_range omap4_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 186000000 },
[FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
- [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
- [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
- [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
- [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
[FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
@@ -489,11 +457,6 @@ static const struct dss_param_range omap4_dss_param_range[] = {
static const struct dss_param_range omap5_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 209250000 },
[FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
- [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
- [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
- [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
- [FEAT_PARAM_DSIPLL_FINT] = { 150000, 52000000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
[FEAT_PARAM_DSI_FCK] = { 0, 209250000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
@@ -517,7 +480,6 @@ static const enum dss_feat_id omap3430_dss_feat_list[] = {
FEAT_LINEBUFFERSPLIT,
FEAT_ROWREPEATENABLE,
FEAT_RESIZECONF,
- FEAT_DSI_PLL_FREQSEL,
FEAT_DSI_REVERSE_TXCLKESC,
FEAT_VENC_REQUIRES_TV_DAC_CLK,
FEAT_CPR,
@@ -537,7 +499,6 @@ static const enum dss_feat_id am35xx_dss_feat_list[] = {
FEAT_LINEBUFFERSPLIT,
FEAT_ROWREPEATENABLE,
FEAT_RESIZECONF,
- FEAT_DSI_PLL_FREQSEL,
FEAT_DSI_REVERSE_TXCLKESC,
FEAT_VENC_REQUIRES_TV_DAC_CLK,
FEAT_CPR,
@@ -572,7 +533,6 @@ static const enum dss_feat_id omap3630_dss_feat_list[] = {
FEAT_ROWREPEATENABLE,
FEAT_RESIZECONF,
FEAT_DSI_PLL_PWR_BUG,
- FEAT_DSI_PLL_FREQSEL,
FEAT_CPR,
FEAT_PRELOAD,
FEAT_FIR_COEF_V,
@@ -654,8 +614,6 @@ static const enum dss_feat_id omap5_dss_feat_list[] = {
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
FEAT_BURST_2D,
- FEAT_DSI_PLL_SELFREQDCO,
- FEAT_DSI_PLL_REFSEL,
FEAT_DSI_PHY_DCC,
FEAT_MFLAG,
};
diff --git a/drivers/video/fbdev/omap2/dss/dss_features.h b/drivers/video/fbdev/omap2/dss/dss_features.h
index e3ef3b714896..100f7a2d0638 100644
--- a/drivers/video/fbdev/omap2/dss/dss_features.h
+++ b/drivers/video/fbdev/omap2/dss/dss_features.h
@@ -41,7 +41,6 @@ enum dss_feat_id {
FEAT_LCD_CLK_SRC,
/* DSI-PLL power command 0x3 is not working */
FEAT_DSI_PLL_PWR_BUG,
- FEAT_DSI_PLL_FREQSEL,
FEAT_DSI_DCS_CMD_CONFIG_VC,
FEAT_DSI_VC_OCP_WIDTH,
FEAT_DSI_REVERSE_TXCLKESC,
@@ -61,8 +60,6 @@ enum dss_feat_id {
/* An unknown HW bug causing the normal FIFO thresholds not to work */
FEAT_OMAP3_DSI_FIFO_BUG,
FEAT_BURST_2D,
- FEAT_DSI_PLL_SELFREQDCO,
- FEAT_DSI_PLL_REFSEL,
FEAT_DSI_PHY_DCC,
FEAT_MFLAG,
};
@@ -77,20 +74,11 @@ enum dss_feat_reg_field {
FEAT_REG_HORIZONTALACCU,
FEAT_REG_VERTICALACCU,
FEAT_REG_DISPC_CLK_SWITCH,
- FEAT_REG_DSIPLL_REGN,
- FEAT_REG_DSIPLL_REGM,
- FEAT_REG_DSIPLL_REGM_DISPC,
- FEAT_REG_DSIPLL_REGM_DSI,
};
enum dss_range_param {
FEAT_PARAM_DSS_FCK,
FEAT_PARAM_DSS_PCD,
- FEAT_PARAM_DSIPLL_REGN,
- FEAT_PARAM_DSIPLL_REGM,
- FEAT_PARAM_DSIPLL_REGM_DISPC,
- FEAT_PARAM_DSIPLL_REGM_DSI,
- FEAT_PARAM_DSIPLL_FINT,
FEAT_PARAM_DSIPLL_LPDIV,
FEAT_PARAM_DSI_FCK,
FEAT_PARAM_DOWNSCALE,
diff --git a/drivers/video/fbdev/omap2/dss/hdmi.h b/drivers/video/fbdev/omap2/dss/hdmi.h
index 262771b9b76b..e4a32fe77b02 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi.h
@@ -101,13 +101,6 @@ enum hdmi_core_hdmi_dvi {
HDMI_HDMI = 1
};
-enum hdmi_clk_refsel {
- HDMI_REFSEL_PCLK = 0,
- HDMI_REFSEL_REF1 = 1,
- HDMI_REFSEL_REF2 = 2,
- HDMI_REFSEL_SYSCLK = 3
-};
-
enum hdmi_packing_mode {
HDMI_PACK_10b_RGB_YUV444 = 0,
HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
@@ -160,7 +153,8 @@ enum hdmi_audio_blk_strt_end_sig {
enum hdmi_core_audio_layout {
HDMI_AUDIO_LAYOUT_2CH = 0,
- HDMI_AUDIO_LAYOUT_8CH = 1
+ HDMI_AUDIO_LAYOUT_8CH = 1,
+ HDMI_AUDIO_LAYOUT_6CH = 2
};
enum hdmi_core_cts_mode {
@@ -191,17 +185,6 @@ struct hdmi_config {
enum hdmi_core_hdmi_dvi hdmi_dvi_mode;
};
-/* HDMI PLL structure */
-struct hdmi_pll_info {
- u16 regn;
- u16 regm;
- u32 regmf;
- u16 regm2;
- u16 regsd;
- u16 dcofreq;
- enum hdmi_clk_refsel refsel;
-};
-
struct hdmi_audio_format {
enum hdmi_stereo_channels stereo_channels;
u8 active_chnnls_msk;
@@ -249,12 +232,15 @@ struct hdmi_core_audio_config {
struct hdmi_wp_data {
void __iomem *base;
+ phys_addr_t phys_base;
};
struct hdmi_pll_data {
+ struct dss_pll pll;
+
void __iomem *base;
- struct hdmi_pll_info info;
+ struct hdmi_wp_data *wp;
};
struct hdmi_phy_data {
@@ -316,16 +302,19 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
struct omap_video_timings *timings, struct hdmi_config *param);
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
+phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
/* HDMI PLL funcs */
-int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
-void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
-void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy);
-int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll);
+void hdmi_pll_compute(struct hdmi_pll_data *pll,
+ unsigned long target_tmds, struct dss_pll_clock_info *pi);
+int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
+ struct hdmi_wp_data *wp);
+void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
/* HDMI PHY funcs */
-int hdmi_phy_configure(struct hdmi_phy_data *phy, struct hdmi_config *cfg);
+int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
+ unsigned long lfbitclk);
void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s);
int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy);
int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes);
@@ -334,7 +323,7 @@ int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes);
int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
struct hdmi_phy_data *phy);
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) || defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
+/* Audio funcs */
int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts);
int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable);
int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable);
@@ -342,9 +331,33 @@ void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
struct hdmi_audio_format *aud_fmt);
void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
struct hdmi_audio_dma *aud_dma);
-static inline bool hdmi_mode_has_audio(int mode)
+static inline bool hdmi_mode_has_audio(struct hdmi_config *cfg)
{
- return mode == HDMI_HDMI ? true : false;
+ return cfg->hdmi_dvi_mode == HDMI_HDMI ? true : false;
}
-#endif
+
+/* HDMI DRV data */
+struct omap_hdmi {
+ struct mutex lock;
+ struct platform_device *pdev;
+
+ struct hdmi_wp_data wp;
+ struct hdmi_pll_data pll;
+ struct hdmi_phy_data phy;
+ struct hdmi_core_data core;
+
+ struct hdmi_config cfg;
+
+ struct regulator *vdda_reg;
+
+ bool core_enabled;
+ bool display_enabled;
+
+ struct omap_dss_device output;
+
+ struct platform_device *audio_pdev;
+ void (*audio_abort_cb)(struct device *dev);
+ int wp_idlemode;
+};
+
#endif
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4.c b/drivers/video/fbdev/omap2/dss/hdmi4.c
index 9a8713ca090c..916d47978f41 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4.c
@@ -33,29 +33,14 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
+#include <sound/omap-hdmi-audio.h>
#include "hdmi4_core.h"
#include "dss.h"
#include "dss_features.h"
+#include "hdmi.h"
-static struct {
- struct mutex lock;
- struct platform_device *pdev;
-
- struct hdmi_wp_data wp;
- struct hdmi_pll_data pll;
- struct hdmi_phy_data phy;
- struct hdmi_core_data core;
-
- struct hdmi_config cfg;
-
- struct clk *sys_clk;
- struct regulator *vdda_hdmi_dac_reg;
-
- bool core_enabled;
-
- struct omap_dss_device output;
-} hdmi;
+static struct omap_hdmi hdmi;
static int hdmi_runtime_get(void)
{
@@ -117,7 +102,7 @@ static int hdmi_init_regulator(void)
int r;
struct regulator *reg;
- if (hdmi.vdda_hdmi_dac_reg != NULL)
+ if (hdmi.vdda_reg != NULL)
return 0;
reg = devm_regulator_get(&hdmi.pdev->dev, "vdda");
@@ -137,7 +122,7 @@ static int hdmi_init_regulator(void)
}
}
- hdmi.vdda_hdmi_dac_reg = reg;
+ hdmi.vdda_reg = reg;
return 0;
}
@@ -146,7 +131,7 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
{
int r;
- r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
+ r = regulator_enable(hdmi.vdda_reg);
if (r)
return r;
@@ -162,7 +147,7 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
return 0;
err_runtime_get:
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
+ regulator_disable(hdmi.vdda_reg);
return r;
}
@@ -172,7 +157,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
hdmi.core_enabled = false;
hdmi_runtime_put();
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
+ regulator_disable(hdmi.vdda_reg);
}
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
@@ -180,8 +165,8 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
int r;
struct omap_video_timings *p;
struct omap_overlay_manager *mgr = hdmi.output.manager;
- unsigned long phy;
struct hdmi_wp_data *wp = &hdmi.wp;
+ struct dss_pll_clock_info hdmi_cinfo = { 0 };
r = hdmi_power_on_core(dssdev);
if (r)
@@ -195,19 +180,22 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
- /* the functions below use kHz pixel clock. TODO: change to Hz */
- phy = p->pixelclock / 1000;
-
- hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
+ hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
- /* config the PLL and PHY hdmi_set_pll_pwrfirst */
- r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
+ r = dss_pll_enable(&hdmi.pll.pll);
if (r) {
- DSSDBG("Failed to lock PLL\n");
+ DSSERR("Failed to enable PLL\n");
goto err_pll_enable;
}
- r = hdmi_phy_configure(&hdmi.phy, &hdmi.cfg);
+ r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo);
+ if (r) {
+ DSSERR("Failed to configure PLL\n");
+ goto err_pll_cfg;
+ }
+
+ r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco,
+ hdmi_cinfo.clkout[0]);
if (r) {
DSSDBG("Failed to configure PHY\n");
goto err_phy_cfg;
@@ -244,7 +232,8 @@ err_vid_enable:
err_phy_cfg:
hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
- hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+err_pll_cfg:
+ dss_pll_disable(&hdmi.pll.pll);
err_pll_enable:
hdmi_power_off_core(dssdev);
return -EIO;
@@ -262,7 +251,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
- hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+ dss_pll_disable(&hdmi.pll.pll);
hdmi_power_off_core(dssdev);
}
@@ -352,6 +341,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
+ hdmi.display_enabled = true;
+
mutex_unlock(&hdmi.lock);
return 0;
@@ -366,8 +357,13 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
+ if (hdmi.audio_pdev && hdmi.audio_abort_cb)
+ hdmi.audio_abort_cb(&hdmi.audio_pdev->dev);
+
hdmi_power_off_full(dssdev);
+ hdmi.display_enabled = false;
+
mutex_unlock(&hdmi.lock);
}
@@ -404,21 +400,6 @@ static void hdmi_core_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-static int hdmi_get_clocks(struct platform_device *pdev)
-{
- struct clk *clk;
-
- clk = devm_clk_get(&pdev->dev, "sys_clk");
- if (IS_ERR(clk)) {
- DSSERR("can't get sys_clk\n");
- return PTR_ERR(clk);
- }
-
- hdmi.sys_clk = clk;
-
- return 0;
-}
-
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
@@ -484,112 +465,6 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
return r;
}
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
- r = -EPERM;
- goto err;
- }
-
- r = hdmi_wp_audio_enable(&hdmi.wp, true);
- if (r)
- goto err;
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
- hdmi_wp_audio_enable(&hdmi.wp, false);
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
- return hdmi4_audio_start(&hdmi.core, &hdmi.wp);
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
- hdmi4_audio_stop(&hdmi.core, &hdmi.wp);
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
- bool r;
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode);
-
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- int r;
- u32 pclk = hdmi.cfg.timings.pixelclock;
-
- mutex_lock(&hdmi.lock);
-
- if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
- r = -EPERM;
- goto err;
- }
-
- r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, audio, pclk);
- if (r)
- goto err;
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-#else
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
- return false;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- return -EPERM;
-}
-#endif
-
static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
@@ -618,13 +493,6 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.read_edid = hdmi_read_edid,
.set_infoframe = hdmi_set_infoframe,
.set_hdmi_mode = hdmi_set_hdmi_mode,
-
- .audio_enable = hdmi_audio_enable,
- .audio_disable = hdmi_audio_disable,
- .audio_start = hdmi_audio_start,
- .audio_stop = hdmi_audio_stop,
- .audio_supported = hdmi_audio_supported,
- .audio_config = hdmi_audio_config,
};
static void hdmi_init_output(struct platform_device *pdev)
@@ -642,7 +510,7 @@ static void hdmi_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void __exit hdmi_uninit_output(struct platform_device *pdev)
+static void hdmi_uninit_output(struct platform_device *pdev)
{
struct omap_dss_device *out = &hdmi.output;
@@ -671,6 +539,112 @@ err:
return r;
}
+/* Audio callbacks */
+static int hdmi_audio_startup(struct device *dev,
+ void (*abort_cb)(struct device *dev))
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&hd->lock);
+
+ if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ hd->audio_abort_cb = abort_cb;
+
+out:
+ mutex_unlock(&hd->lock);
+
+ return ret;
+}
+
+static int hdmi_audio_shutdown(struct device *dev)
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+
+ mutex_lock(&hd->lock);
+ hd->audio_abort_cb = NULL;
+ mutex_unlock(&hd->lock);
+
+ return 0;
+}
+
+static int hdmi_audio_start(struct device *dev)
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+
+ WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
+ WARN_ON(!hd->display_enabled);
+
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi4_audio_start(&hd->core, &hd->wp);
+
+ return 0;
+}
+
+static void hdmi_audio_stop(struct device *dev)
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+
+ WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
+ WARN_ON(!hd->display_enabled);
+
+ hdmi4_audio_stop(&hd->core, &hd->wp);
+ hdmi_wp_audio_enable(&hd->wp, false);
+}
+
+static int hdmi_audio_config(struct device *dev,
+ struct omap_dss_audio *dss_audio)
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&hd->lock);
+
+ if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
+ hd->cfg.timings.pixelclock);
+
+out:
+ mutex_unlock(&hd->lock);
+
+ return ret;
+}
+
+static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
+ .audio_startup = hdmi_audio_startup,
+ .audio_shutdown = hdmi_audio_shutdown,
+ .audio_start = hdmi_audio_start,
+ .audio_stop = hdmi_audio_stop,
+ .audio_config = hdmi_audio_config,
+};
+
+static int hdmi_audio_register(struct device *dev)
+{
+ struct omap_hdmi_audio_pdata pdata = {
+ .dev = dev,
+ .dss_version = omapdss_get_version(),
+ .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
+ .ops = &hdmi_audio_ops,
+ };
+
+ hdmi.audio_pdev = platform_device_register_data(
+ dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
+ &pdata, sizeof(pdata));
+
+ if (IS_ERR(hdmi.audio_pdev))
+ return PTR_ERR(hdmi.audio_pdev);
+
+ return 0;
+}
+
/* HDMI HW IP initialisation */
static int omapdss_hdmihw_probe(struct platform_device *pdev)
{
@@ -678,6 +652,7 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
int irq;
hdmi.pdev = pdev;
+ dev_set_drvdata(&pdev->dev, &hdmi);
mutex_init(&hdmi.lock);
@@ -691,28 +666,23 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
if (r)
return r;
- r = hdmi_pll_init(pdev, &hdmi.pll);
+ r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp);
if (r)
return r;
r = hdmi_phy_init(pdev, &hdmi.phy);
if (r)
- return r;
+ goto err;
r = hdmi4_core_init(pdev, &hdmi.core);
if (r)
- return r;
-
- r = hdmi_get_clocks(pdev);
- if (r) {
- DSSERR("can't get clocks\n");
- return r;
- }
+ goto err;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- return -ENODEV;
+ r = -ENODEV;
+ goto err;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -720,22 +690,38 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- return r;
+ goto err;
}
pm_runtime_enable(&pdev->dev);
hdmi_init_output(pdev);
+ r = hdmi_audio_register(&pdev->dev);
+ if (r) {
+ DSSERR("Registering HDMI audio failed\n");
+ hdmi_uninit_output(pdev);
+ pm_runtime_disable(&pdev->dev);
+ return r;
+ }
+
dss_debugfs_create_file("hdmi", hdmi_dump_regs);
return 0;
+err:
+ hdmi_pll_uninit(&hdmi.pll);
+ return r;
}
static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
{
+ if (hdmi.audio_pdev)
+ platform_device_unregister(hdmi.audio_pdev);
+
hdmi_uninit_output(pdev);
+ hdmi_pll_uninit(&hdmi.pll);
+
pm_runtime_disable(&pdev->dev);
return 0;
@@ -743,8 +729,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
static int hdmi_runtime_suspend(struct device *dev)
{
- clk_disable_unprepare(hdmi.sys_clk);
-
dispc_runtime_put();
return 0;
@@ -758,8 +742,6 @@ static int hdmi_runtime_resume(struct device *dev)
if (r < 0)
return r;
- clk_prepare_enable(hdmi.sys_clk);
-
return 0;
}
@@ -778,7 +760,6 @@ static struct platform_driver omapdss_hdmihw_driver = {
.remove = __exit_p(omapdss_hdmihw_remove),
.driver = {
.name = "omapdss_hdmi",
- .owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
.of_match_table = hdmi_of_match,
.suppress_bind_attrs = true,
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/dss/hdmi4_core.c
index 4ad39cfce254..7eafea5b8e19 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.c
@@ -31,10 +31,8 @@
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/seq_file.h>
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
#include <sound/asound.h>
#include <sound/asoundef.h>
-#endif
#include "hdmi4_core.h"
#include "dss_features.h"
@@ -530,7 +528,6 @@ void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s)
DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
}
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
static void hdmi_core_audio_config(struct hdmi_core_data *core,
struct hdmi_core_audio_config *cfg)
{
@@ -877,17 +874,6 @@ void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
hdmi_wp_audio_core_req_enable(wp, false);
}
-int hdmi4_audio_get_dma_port(u32 *offset, u32 *size)
-{
- if (!offset || !size)
- return -EINVAL;
- *offset = HDMI_WP_AUDIO_DATA;
- *size = 4;
- return 0;
-}
-
-#endif
-
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
struct resource *res;
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4_core.h b/drivers/video/fbdev/omap2/dss/hdmi4_core.h
index 827909eb6c50..a069f96ec6f6 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4_core.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.h
@@ -266,12 +266,8 @@ void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct omap_dss_audio *audio, u32 pclk);
-int hdmi4_audio_get_dma_port(u32 *offset, u32 *size);
-#endif
-
#endif
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5.c b/drivers/video/fbdev/omap2/dss/hdmi5.c
index 169b764bb9d4..39aae3aa7136 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5.c
@@ -38,29 +38,13 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
+#include <sound/omap-hdmi-audio.h>
#include "hdmi5_core.h"
#include "dss.h"
#include "dss_features.h"
-static struct {
- struct mutex lock;
- struct platform_device *pdev;
-
- struct hdmi_wp_data wp;
- struct hdmi_pll_data pll;
- struct hdmi_phy_data phy;
- struct hdmi_core_data core;
-
- struct hdmi_config cfg;
-
- struct clk *sys_clk;
- struct regulator *vdda_reg;
-
- bool core_enabled;
-
- struct omap_dss_device output;
-} hdmi;
+static struct omap_hdmi hdmi;
static int hdmi_runtime_get(void)
{
@@ -198,7 +182,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
int r;
struct omap_video_timings *p;
struct omap_overlay_manager *mgr = hdmi.output.manager;
- unsigned long phy;
+ struct dss_pll_clock_info hdmi_cinfo = { 0 };
r = hdmi_power_on_core(dssdev);
if (r)
@@ -208,24 +192,27 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
- /* the functions below use kHz pixel clock. TODO: change to Hz */
- phy = p->pixelclock / 1000;
-
- hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
+ hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
/* disable and clear irqs */
hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
hdmi_wp_set_irqstatus(&hdmi.wp,
hdmi_wp_get_irqstatus(&hdmi.wp));
- /* config the PLL and PHY hdmi_set_pll_pwrfirst */
- r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
+ r = dss_pll_enable(&hdmi.pll.pll);
if (r) {
- DSSDBG("Failed to lock PLL\n");
+ DSSERR("Failed to enable PLL\n");
goto err_pll_enable;
}
- r = hdmi_phy_configure(&hdmi.phy, &hdmi.cfg);
+ r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo);
+ if (r) {
+ DSSERR("Failed to configure PLL\n");
+ goto err_pll_cfg;
+ }
+
+ r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco,
+ hdmi_cinfo.clkout[0]);
if (r) {
DSSDBG("Failed to start PHY\n");
goto err_phy_cfg;
@@ -262,7 +249,8 @@ err_vid_enable:
hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
- hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+err_pll_cfg:
+ dss_pll_disable(&hdmi.pll.pll);
err_pll_enable:
hdmi_power_off_core(dssdev);
return -EIO;
@@ -280,7 +268,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
- hdmi_pll_disable(&hdmi.pll, &hdmi.wp);
+ dss_pll_disable(&hdmi.pll.pll);
hdmi_power_off_core(dssdev);
}
@@ -290,6 +278,10 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
{
struct omap_dss_device *out = &hdmi.output;
+ /* TODO: proper interlace support */
+ if (timings->interlace)
+ return -EINVAL;
+
if (!dispc_mgr_timings_ok(out->dispc_channel, timings))
return -EINVAL;
@@ -377,6 +369,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
+ hdmi.display_enabled = true;
+
mutex_unlock(&hdmi.lock);
return 0;
@@ -391,8 +385,13 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
+ if (hdmi.audio_pdev && hdmi.audio_abort_cb)
+ hdmi.audio_abort_cb(&hdmi.audio_pdev->dev);
+
hdmi_power_off_full(dssdev);
+ hdmi.display_enabled = false;
+
mutex_unlock(&hdmi.lock);
}
@@ -429,21 +428,6 @@ static void hdmi_core_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-static int hdmi_get_clocks(struct platform_device *pdev)
-{
- struct clk *clk;
-
- clk = devm_clk_get(&pdev->dev, "sys_clk");
- if (IS_ERR(clk)) {
- DSSERR("can't get sys_clk\n");
- return PTR_ERR(clk);
- }
-
- hdmi.sys_clk = clk;
-
- return 0;
-}
-
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
@@ -509,112 +493,6 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
return r;
}
-#if defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
- r = -EPERM;
- goto err;
- }
-
- r = hdmi_wp_audio_enable(&hdmi.wp, true);
- if (r)
- goto err;
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
- hdmi_wp_audio_enable(&hdmi.wp, false);
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
- return hdmi_wp_audio_core_req_enable(&hdmi.wp, true);
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
- hdmi_wp_audio_core_req_enable(&hdmi.wp, false);
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
- bool r;
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode);
-
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- int r;
- u32 pclk = hdmi.cfg.timings.pixelclock;
-
- mutex_lock(&hdmi.lock);
-
- if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
- r = -EPERM;
- goto err;
- }
-
- r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, audio, pclk);
- if (r)
- goto err;
-
- mutex_unlock(&hdmi.lock);
- return 0;
-
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-#else
-static int hdmi_audio_enable(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_audio_disable(struct omap_dss_device *dssdev)
-{
-}
-
-static int hdmi_audio_start(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_audio_stop(struct omap_dss_device *dssdev)
-{
-}
-
-static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
-{
- return false;
-}
-
-static int hdmi_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- return -EPERM;
-}
-#endif
-
static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
@@ -643,13 +521,6 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.read_edid = hdmi_read_edid,
.set_infoframe = hdmi_set_infoframe,
.set_hdmi_mode = hdmi_set_hdmi_mode,
-
- .audio_enable = hdmi_audio_enable,
- .audio_disable = hdmi_audio_disable,
- .audio_start = hdmi_audio_start,
- .audio_stop = hdmi_audio_stop,
- .audio_supported = hdmi_audio_supported,
- .audio_config = hdmi_audio_config,
};
static void hdmi_init_output(struct platform_device *pdev)
@@ -667,7 +538,7 @@ static void hdmi_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void __exit hdmi_uninit_output(struct platform_device *pdev)
+static void hdmi_uninit_output(struct platform_device *pdev)
{
struct omap_dss_device *out = &hdmi.output;
@@ -696,6 +567,119 @@ err:
return r;
}
+/* Audio callbacks */
+static int hdmi_audio_startup(struct device *dev,
+ void (*abort_cb)(struct device *dev))
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&hd->lock);
+
+ if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ hd->audio_abort_cb = abort_cb;
+
+out:
+ mutex_unlock(&hd->lock);
+
+ return ret;
+}
+
+static int hdmi_audio_shutdown(struct device *dev)
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+
+ mutex_lock(&hd->lock);
+ hd->audio_abort_cb = NULL;
+ mutex_unlock(&hd->lock);
+
+ return 0;
+}
+
+static int hdmi_audio_start(struct device *dev)
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+
+ WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
+ WARN_ON(!hd->display_enabled);
+
+ /* No-idle while playing audio, store the old value */
+ hd->wp_idlemode = REG_GET(hdmi.wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi_wp_audio_core_req_enable(&hd->wp, true);
+
+ return 0;
+}
+
+static void hdmi_audio_stop(struct device *dev)
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+
+ WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
+ WARN_ON(!hd->display_enabled);
+
+ hdmi_wp_audio_core_req_enable(&hd->wp, false);
+ hdmi_wp_audio_enable(&hd->wp, false);
+
+ /* Playback stopped, restore original idlemode */
+ REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+}
+
+static int hdmi_audio_config(struct device *dev,
+ struct omap_dss_audio *dss_audio)
+{
+ struct omap_hdmi *hd = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&hd->lock);
+
+ if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
+ hd->cfg.timings.pixelclock);
+
+out:
+ mutex_unlock(&hd->lock);
+
+ return ret;
+}
+
+static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
+ .audio_startup = hdmi_audio_startup,
+ .audio_shutdown = hdmi_audio_shutdown,
+ .audio_start = hdmi_audio_start,
+ .audio_stop = hdmi_audio_stop,
+ .audio_config = hdmi_audio_config,
+};
+
+static int hdmi_audio_register(struct device *dev)
+{
+ struct omap_hdmi_audio_pdata pdata = {
+ .dev = dev,
+ .dss_version = omapdss_get_version(),
+ .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
+ .ops = &hdmi_audio_ops,
+ };
+
+ hdmi.audio_pdev = platform_device_register_data(
+ dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
+ &pdata, sizeof(pdata));
+
+ if (IS_ERR(hdmi.audio_pdev))
+ return PTR_ERR(hdmi.audio_pdev);
+
+ return 0;
+}
+
/* HDMI HW IP initialisation */
static int omapdss_hdmihw_probe(struct platform_device *pdev)
{
@@ -703,6 +687,7 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
int irq;
hdmi.pdev = pdev;
+ dev_set_drvdata(&pdev->dev, &hdmi);
mutex_init(&hdmi.lock);
@@ -716,28 +701,23 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
if (r)
return r;
- r = hdmi_pll_init(pdev, &hdmi.pll);
+ r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp);
if (r)
return r;
r = hdmi_phy_init(pdev, &hdmi.phy);
if (r)
- return r;
+ goto err;
r = hdmi5_core_init(pdev, &hdmi.core);
if (r)
- return r;
-
- r = hdmi_get_clocks(pdev);
- if (r) {
- DSSERR("can't get clocks\n");
- return r;
- }
+ goto err;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- return -ENODEV;
+ r = -ENODEV;
+ goto err;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -745,22 +725,38 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- return r;
+ goto err;
}
pm_runtime_enable(&pdev->dev);
hdmi_init_output(pdev);
+ r = hdmi_audio_register(&pdev->dev);
+ if (r) {
+ DSSERR("Registering HDMI audio failed %d\n", r);
+ hdmi_uninit_output(pdev);
+ pm_runtime_disable(&pdev->dev);
+ return r;
+ }
+
dss_debugfs_create_file("hdmi", hdmi_dump_regs);
return 0;
+err:
+ hdmi_pll_uninit(&hdmi.pll);
+ return r;
}
static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
{
+ if (hdmi.audio_pdev)
+ platform_device_unregister(hdmi.audio_pdev);
+
hdmi_uninit_output(pdev);
+ hdmi_pll_uninit(&hdmi.pll);
+
pm_runtime_disable(&pdev->dev);
return 0;
@@ -768,8 +764,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
static int hdmi_runtime_suspend(struct device *dev)
{
- clk_disable_unprepare(hdmi.sys_clk);
-
dispc_runtime_put();
return 0;
@@ -783,8 +777,6 @@ static int hdmi_runtime_resume(struct device *dev)
if (r < 0)
return r;
- clk_prepare_enable(hdmi.sys_clk);
-
return 0;
}
@@ -803,7 +795,6 @@ static struct platform_driver omapdss_hdmihw_driver = {
.remove = __exit_p(omapdss_hdmihw_remove),
.driver = {
.name = "omapdss_hdmi5",
- .owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
.of_match_table = hdmi_of_match,
.suppress_bind_attrs = true,
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/dss/hdmi5_core.c
index 83acbf7a8c89..a3cfe3d708f7 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5_core.c
@@ -30,10 +30,8 @@
#include <linux/string.h>
#include <linux/seq_file.h>
#include <drm/drm_edid.h>
-#if defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
#include <sound/asound.h>
#include <sound/asoundef.h>
-#endif
#include "hdmi5_core.h"
@@ -644,9 +642,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_enable_interrupts(core);
}
-
-#if defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
-
static void hdmi5_core_audio_config(struct hdmi_core_data *core,
struct hdmi_core_audio_config *cfg)
{
@@ -721,7 +716,7 @@ static void hdmi5_core_audio_config(struct hdmi_core_data *core,
/* Source number */
val = cfg->iec60958_cfg->status[2] & IEC958_AES2_CON_SOURCE;
- REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(2), val, 3, 4);
+ REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(2), val, 3, 0);
/* Channel number right 0 */
REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(3), 2, 3, 0);
@@ -879,6 +874,9 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
/* only LPCM atm */
audio_format.type = HDMI_AUDIO_TYPE_LPCM;
+ /* only allowed option */
+ audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
+
/* disable start/stop signals of IEC 60958 blocks */
audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
@@ -894,7 +892,6 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return 0;
}
-#endif
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5_core.h b/drivers/video/fbdev/omap2/dss/hdmi5_core.h
index ce7e9f376f04..f2f1022c5516 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5_core.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi5_core.h
@@ -299,8 +299,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
-#if defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct omap_dss_audio *audio, u32 pclk);
#endif
-#endif
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_common.c b/drivers/video/fbdev/omap2/dss/hdmi_common.c
index 7d5f1039de9f..1b8fcc6c4ba1 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_common.c
@@ -48,7 +48,6 @@ int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
return 0;
}
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
{
u32 deep_color;
@@ -147,4 +146,3 @@ int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
return 0;
}
-#endif
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/dss/hdmi_phy.c
index e007ac892d79..bc9e07d2afbe 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_phy.c
@@ -20,9 +20,7 @@
struct hdmi_phy_features {
bool bist_ctrl;
- bool calc_freqout;
bool ldo_voltage;
- unsigned long dcofreq_min;
unsigned long max_phy;
};
@@ -132,7 +130,8 @@ static void hdmi_phy_configure_lanes(struct hdmi_phy_data *phy)
REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, pol_val, 30, 27);
}
-int hdmi_phy_configure(struct hdmi_phy_data *phy, struct hdmi_config *cfg)
+int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
+ unsigned long lfbitclk)
{
u8 freqout;
@@ -149,20 +148,16 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, struct hdmi_config *cfg)
if (phy_feat->bist_ctrl)
REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11);
- if (phy_feat->calc_freqout) {
- /* DCOCLK/10 is pixel clock, compare pclk with DCOCLK_MIN/10 */
- u32 dco_min = phy_feat->dcofreq_min / 10;
- u32 pclk = cfg->timings.pixelclock;
-
- if (pclk < dco_min)
- freqout = 0;
- else if ((pclk >= dco_min) && (pclk < phy_feat->max_phy))
- freqout = 1;
- else
- freqout = 2;
- } else {
+ /*
+ * If the hfbitclk != lfbitclk, it means the lfbitclk was configured
+ * to be used for TMDS.
+ */
+ if (hfbitclk != lfbitclk)
+ freqout = 0;
+ else if (hfbitclk / 10 < phy_feat->max_phy)
freqout = 1;
- }
+ else
+ freqout = 2;
/*
* Write to phy address 0 to configure the clock
@@ -184,17 +179,13 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, struct hdmi_config *cfg)
static const struct hdmi_phy_features omap44xx_phy_feats = {
.bist_ctrl = false,
- .calc_freqout = false,
.ldo_voltage = true,
- .dcofreq_min = 500000000,
.max_phy = 185675000,
};
static const struct hdmi_phy_features omap54xx_phy_feats = {
.bist_ctrl = true,
- .calc_freqout = true,
.ldo_voltage = false,
- .dcofreq_min = 750000000,
.max_phy = 186000000,
};
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 6d92bb32fe51..87accdb59c81 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -15,26 +15,13 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
+
#include <video/omapdss.h>
#include "dss.h"
#include "hdmi.h"
-#define HDMI_DEFAULT_REGN 16
-#define HDMI_DEFAULT_REGM2 1
-
-struct hdmi_pll_features {
- bool sys_reset;
- /* this is a hack, need to replace it with a better computation of M2 */
- bool bound_dcofreq;
- unsigned long fint_min, fint_max;
- u16 regm_max;
- unsigned long dcofreq_low_min, dcofreq_low_max;
- unsigned long dcofreq_high_min, dcofreq_high_max;
-};
-
-static const struct hdmi_pll_features *pll_feat;
-
void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
{
#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
@@ -51,228 +38,189 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
DUMPPLL(PLLCTRL_CFG4);
}
-void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy)
+void hdmi_pll_compute(struct hdmi_pll_data *pll,
+ unsigned long target_tmds, struct dss_pll_clock_info *pi)
{
- struct hdmi_pll_info *pi = &pll->info;
- unsigned long refclk;
- u32 mf;
+ unsigned long fint, clkdco, clkout;
+ unsigned long target_bitclk, target_clkdco;
+ unsigned long min_dco;
+ unsigned n, m, mf, m2, sd;
+ unsigned long clkin;
+ const struct dss_pll_hw *hw = pll->pll.hw;
- /* use our funky units */
- clkin /= 10000;
+ clkin = clk_get_rate(pll->pll.clkin);
- /*
- * Input clock is predivided by N + 1
- * out put of which is reference clk
- */
+ DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds);
- pi->regn = HDMI_DEFAULT_REGN;
+ target_bitclk = target_tmds * 10;
- refclk = clkin / pi->regn;
+ /* Fint */
+ n = DIV_ROUND_UP(clkin, hw->fint_max);
+ fint = clkin / n;
- /* temorary hack to make sure DCO freq isn't calculated too low */
- if (pll_feat->bound_dcofreq && phy <= 65000)
- pi->regm2 = 3;
- else
- pi->regm2 = HDMI_DEFAULT_REGM2;
-
- /*
- * multiplier is pixel_clk/ref_clk
- * Multiplying by 100 to avoid fractional part removal
- */
- pi->regm = phy * pi->regm2 / refclk;
-
- /*
- * fractional multiplier is remainder of the difference between
- * multiplier and actual phy(required pixel clock thus should be
- * multiplied by 2^18(262144) divided by the reference clock
- */
- mf = (phy - pi->regm / pi->regm2 * refclk) * 262144;
- pi->regmf = pi->regm2 * mf / refclk;
-
- /*
- * Dcofreq should be set to 1 if required pixel clock
- * is greater than 1000MHz
- */
- pi->dcofreq = phy > 1000 * 100;
- pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
-
- /* Set the reference clock to sysclk reference */
- pi->refsel = HDMI_REFSEL_SYSCLK;
-
- DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
- DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
-}
+ /* adjust m2 so that the clkdco will be high enough */
+ min_dco = roundup(hw->clkdco_min, fint);
+ m2 = DIV_ROUND_UP(min_dco, target_bitclk);
+ if (m2 == 0)
+ m2 = 1;
+ target_clkdco = target_bitclk * m2;
+ m = target_clkdco / fint;
-static int hdmi_pll_config(struct hdmi_pll_data *pll)
-{
- u32 r;
- struct hdmi_pll_info *fmt = &pll->info;
+ clkdco = fint * m;
- /* PLL start always use manual mode */
- REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0);
-
- r = hdmi_read_reg(pll->base, PLLCTRL_CFG1);
- r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
- r = FLD_MOD(r, fmt->regn - 1, 8, 1); /* CFG1_PLL_REGN */
- hdmi_write_reg(pll->base, PLLCTRL_CFG1, r);
-
- r = hdmi_read_reg(pll->base, PLLCTRL_CFG2);
-
- r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
- r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
- r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
- r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
-
- if (fmt->dcofreq)
- r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
+ /* adjust clkdco with fractional mf */
+ if (WARN_ON(target_clkdco - clkdco > fint))
+ mf = 0;
else
- r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
-
- hdmi_write_reg(pll->base, PLLCTRL_CFG2, r);
-
- REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
+ mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
- r = hdmi_read_reg(pll->base, PLLCTRL_CFG4);
- r = FLD_MOD(r, fmt->regm2, 24, 18);
- r = FLD_MOD(r, fmt->regmf, 17, 0);
- hdmi_write_reg(pll->base, PLLCTRL_CFG4, r);
+ if (mf > 0)
+ clkdco += (u32)div_u64((u64)mf * fint, 262144);
- /* go now */
- REG_FLD_MOD(pll->base, PLLCTRL_PLL_GO, 0x1, 0, 0);
+ clkout = clkdco / m2;
- /* wait for bit change */
- if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
- 0, 0, 0) != 0) {
- DSSERR("PLL GO bit not clearing\n");
- return -ETIMEDOUT;
- }
+ /* sigma-delta */
+ sd = DIV_ROUND_UP(fint * m, 250000000);
- /* Wait till the lock bit is set in PLL status */
- if (hdmi_wait_for_bit_change(pll->base,
- PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) {
- DSSERR("cannot lock PLL\n");
- DSSERR("CFG1 0x%x\n",
- hdmi_read_reg(pll->base, PLLCTRL_CFG1));
- DSSERR("CFG2 0x%x\n",
- hdmi_read_reg(pll->base, PLLCTRL_CFG2));
- DSSERR("CFG4 0x%x\n",
- hdmi_read_reg(pll->base, PLLCTRL_CFG4));
- return -ETIMEDOUT;
- }
+ DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
+ n, m, mf, m2, sd);
+ DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
- DSSDBG("PLL locked!\n");
+ pi->n = n;
+ pi->m = m;
+ pi->mf = mf;
+ pi->mX[0] = m2;
+ pi->sd = sd;
- return 0;
+ pi->fint = fint;
+ pi->clkdco = clkdco;
+ pi->clkout[0] = clkout;
}
-static int hdmi_pll_reset(struct hdmi_pll_data *pll)
-{
- /* SYSRESET controlled by power FSM */
- REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, pll_feat->sys_reset, 3, 3);
-
- /* READ 0x0 reset is in progress */
- if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_STATUS, 0, 0, 1)
- != 1) {
- DSSERR("Failed to sysreset PLL\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
+static int hdmi_pll_enable(struct dss_pll *dsspll)
{
+ struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
+ struct hdmi_wp_data *wp = pll->wp;
u16 r = 0;
- r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
- if (r)
- return r;
-
r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
if (r)
return r;
- r = hdmi_pll_reset(pll);
- if (r)
- return r;
-
- r = hdmi_pll_config(pll);
- if (r)
- return r;
-
return 0;
}
-void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
+static void hdmi_pll_disable(struct dss_pll *dsspll)
{
+ struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
+ struct hdmi_wp_data *wp = pll->wp;
+
hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
}
-static const struct hdmi_pll_features omap44xx_pll_feats = {
- .sys_reset = false,
- .bound_dcofreq = false,
- .fint_min = 500000,
- .fint_max = 2500000,
- .regm_max = 4095,
- .dcofreq_low_min = 500000000,
- .dcofreq_low_max = 1000000000,
- .dcofreq_high_min = 1000000000,
- .dcofreq_high_max = 2000000000,
+static const struct dss_pll_ops dsi_pll_ops = {
+ .enable = hdmi_pll_enable,
+ .disable = hdmi_pll_disable,
+ .set_config = dss_pll_write_config_type_b,
};
-static const struct hdmi_pll_features omap54xx_pll_feats = {
- .sys_reset = true,
- .bound_dcofreq = true,
- .fint_min = 620000,
- .fint_max = 2500000,
- .regm_max = 2046,
- .dcofreq_low_min = 750000000,
- .dcofreq_low_max = 1500000000,
- .dcofreq_high_min = 1250000000,
- .dcofreq_high_max = 2500000000UL,
+static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
+ .n_max = 255,
+ .m_min = 20,
+ .m_max = 4095,
+ .mX_max = 127,
+ .fint_min = 500000,
+ .fint_max = 2500000,
+ .clkdco_max = 1800000000,
+
+ .clkdco_min = 500000000,
+ .clkdco_low = 1000000000,
+ .clkdco_max = 2000000000,
+
+ .n_msb = 8,
+ .n_lsb = 1,
+ .m_msb = 20,
+ .m_lsb = 9,
+
+ .mX_msb[0] = 24,
+ .mX_lsb[0] = 18,
+
+ .has_selfreqdco = true,
};
-static int hdmi_pll_init_features(struct platform_device *pdev)
+static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
+ .n_max = 255,
+ .m_min = 20,
+ .m_max = 2045,
+ .mX_max = 127,
+ .fint_min = 620000,
+ .fint_max = 2500000,
+ .clkdco_max = 1800000000,
+
+ .clkdco_min = 750000000,
+ .clkdco_low = 1500000000,
+ .clkdco_max = 2500000000UL,
+
+ .n_msb = 8,
+ .n_lsb = 1,
+ .m_msb = 20,
+ .m_lsb = 9,
+
+ .mX_msb[0] = 24,
+ .mX_lsb[0] = 18,
+
+ .has_selfreqdco = true,
+ .has_refsel = true,
+};
+
+static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data *hpll)
{
- struct hdmi_pll_features *dst;
- const struct hdmi_pll_features *src;
+ struct dss_pll *pll = &hpll->pll;
+ struct clk *clk;
+ int r;
- dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
- if (!dst) {
- dev_err(&pdev->dev, "Failed to allocate HDMI PHY Features\n");
- return -ENOMEM;
+ clk = devm_clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
}
+ pll->name = "hdmi";
+ pll->base = hpll->base;
+ pll->clkin = clk;
+
switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP4430_ES1:
case OMAPDSS_VER_OMAP4430_ES2:
case OMAPDSS_VER_OMAP4:
- src = &omap44xx_pll_feats;
+ pll->hw = &dss_omap4_hdmi_pll_hw;
break;
case OMAPDSS_VER_OMAP5:
- src = &omap54xx_pll_feats;
+ pll->hw = &dss_omap5_hdmi_pll_hw;
break;
default:
return -ENODEV;
}
- memcpy(dst, src, sizeof(*dst));
- pll_feat = dst;
+ pll->ops = &dsi_pll_ops;
+
+ r = dss_pll_register(pll);
+ if (r)
+ return r;
return 0;
}
-int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll)
+int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
+ struct hdmi_wp_data *wp)
{
int r;
struct resource *res;
- r = hdmi_pll_init_features(pdev);
- if (r)
- return r;
+ pll->wp = wp;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
if (!res) {
@@ -286,5 +234,18 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll)
return PTR_ERR(pll->base);
}
+ r = dsi_init_pll_data(pdev, pll);
+ if (r) {
+ DSSERR("failed to init HDMI PLL\n");
+ return r;
+ }
+
return 0;
}
+
+void hdmi_pll_uninit(struct hdmi_pll_data *hpll)
+{
+ struct dss_pll *pll = &hpll->pll;
+
+ dss_pll_unregister(pll);
+}
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/dss/hdmi_wp.c
index 496327e2b21b..c15377e242cc 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_wp.c
@@ -185,7 +185,6 @@ void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
timings->interlace = param->timings.interlace;
}
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) || defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
struct hdmi_audio_format *aud_fmt)
{
@@ -194,8 +193,12 @@ void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
DSSDBG("Enter hdmi_wp_audio_config_format\n");
r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG);
- r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
- r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
+ if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
+ omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
+ omapdss_get_version() == OMAPDSS_VER_OMAP4) {
+ r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
+ r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
+ }
r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
r = FLD_MOD(r, aud_fmt->type, 4, 4);
r = FLD_MOD(r, aud_fmt->justification, 3, 3);
@@ -236,7 +239,6 @@ int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable)
return 0;
}
-#endif
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
{
@@ -247,6 +249,7 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
DSSERR("can't get WP mem resource\n");
return -EINVAL;
}
+ wp->phys_base = res->start;
wp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(wp->base)) {
@@ -256,3 +259,8 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
return 0;
}
+
+phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp)
+{
+ return wp->phys_base + HDMI_WP_AUDIO_DATA;
+}
diff --git a/drivers/video/fbdev/omap2/dss/output.c b/drivers/video/fbdev/omap2/dss/output.c
index 2ab3afa615e8..16072159bd24 100644
--- a/drivers/video/fbdev/omap2/dss/output.c
+++ b/drivers/video/fbdev/omap2/dss/output.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <video/omapdss.h>
@@ -131,18 +132,30 @@ struct omap_dss_device *omap_dss_find_output(const char *name)
}
EXPORT_SYMBOL(omap_dss_find_output);
-struct omap_dss_device *omap_dss_find_output_by_node(struct device_node *node)
+struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port)
{
+ struct device_node *src_node;
struct omap_dss_device *out;
+ u32 reg;
+
+ src_node = dss_of_port_get_parent_device(port);
+ if (!src_node)
+ return NULL;
+
+ reg = dss_of_port_get_port_number(port);
list_for_each_entry(out, &output_list, list) {
- if (out->dev->of_node == node)
+ if (out->dev->of_node == src_node && out->port_num == reg) {
+ of_node_put(src_node);
return omap_dss_get_device(out);
+ }
}
+ of_node_put(src_node);
+
return NULL;
}
-EXPORT_SYMBOL(omap_dss_find_output_by_node);
+EXPORT_SYMBOL(omap_dss_find_output_by_port_node);
struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev)
{
diff --git a/drivers/video/fbdev/omap2/dss/pll.c b/drivers/video/fbdev/omap2/dss/pll.c
new file mode 100644
index 000000000000..50bc62c5d367
--- /dev/null
+++ b/drivers/video/fbdev/omap2/dss/pll.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "PLL"
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sched.h>
+
+#include <video/omapdss.h>
+
+#include "dss.h"
+
+#define PLL_CONTROL 0x0000
+#define PLL_STATUS 0x0004
+#define PLL_GO 0x0008
+#define PLL_CONFIGURATION1 0x000C
+#define PLL_CONFIGURATION2 0x0010
+#define PLL_CONFIGURATION3 0x0014
+#define PLL_SSC_CONFIGURATION1 0x0018
+#define PLL_SSC_CONFIGURATION2 0x001C
+#define PLL_CONFIGURATION4 0x0020
+
+static struct dss_pll *dss_plls[4];
+
+int dss_pll_register(struct dss_pll *pll)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
+ if (!dss_plls[i]) {
+ dss_plls[i] = pll;
+ return 0;
+ }
+ }
+
+ return -EBUSY;
+}
+
+void dss_pll_unregister(struct dss_pll *pll)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
+ if (dss_plls[i] == pll) {
+ dss_plls[i] = NULL;
+ return;
+ }
+ }
+}
+
+struct dss_pll *dss_pll_find(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
+ if (dss_plls[i] && strcmp(dss_plls[i]->name, name) == 0)
+ return dss_plls[i];
+ }
+
+ return NULL;
+}
+
+int dss_pll_enable(struct dss_pll *pll)
+{
+ int r;
+
+ r = clk_prepare_enable(pll->clkin);
+ if (r)
+ return r;
+
+ if (pll->regulator) {
+ r = regulator_enable(pll->regulator);
+ if (r)
+ goto err_reg;
+ }
+
+ r = pll->ops->enable(pll);
+ if (r)
+ goto err_enable;
+
+ return 0;
+
+err_enable:
+ regulator_disable(pll->regulator);
+err_reg:
+ clk_disable_unprepare(pll->clkin);
+ return r;
+}
+
+void dss_pll_disable(struct dss_pll *pll)
+{
+ pll->ops->disable(pll);
+
+ if (pll->regulator)
+ regulator_disable(pll->regulator);
+
+ clk_disable_unprepare(pll->clkin);
+
+ memset(&pll->cinfo, 0, sizeof(pll->cinfo));
+}
+
+int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo)
+{
+ int r;
+
+ r = pll->ops->set_config(pll, cinfo);
+ if (r)
+ return r;
+
+ pll->cinfo = *cinfo;
+
+ return 0;
+}
+
+bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
+ unsigned long out_min, unsigned long out_max,
+ dss_hsdiv_calc_func func, void *data)
+{
+ const struct dss_pll_hw *hw = pll->hw;
+ int m, m_start, m_stop;
+ unsigned long out;
+
+ out_min = out_min ? out_min : 1;
+ out_max = out_max ? out_max : ULONG_MAX;
+
+ m_start = max(DIV_ROUND_UP(clkdco, out_max), 1ul);
+
+ m_stop = min((unsigned)(clkdco / out_min), hw->mX_max);
+
+ for (m = m_start; m <= m_stop; ++m) {
+ out = clkdco / m;
+
+ if (func(m, out, data))
+ return true;
+ }
+
+ return false;
+}
+
+bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
+ unsigned long pll_min, unsigned long pll_max,
+ dss_pll_calc_func func, void *data)
+{
+ const struct dss_pll_hw *hw = pll->hw;
+ int n, n_start, n_stop;
+ int m, m_start, m_stop;
+ unsigned long fint, clkdco;
+ unsigned long pll_hw_max;
+ unsigned long fint_hw_min, fint_hw_max;
+
+ pll_hw_max = hw->clkdco_max;
+
+ fint_hw_min = hw->fint_min;
+ fint_hw_max = hw->fint_max;
+
+ n_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
+ n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
+
+ pll_max = pll_max ? pll_max : ULONG_MAX;
+
+ for (n = n_start; n <= n_stop; ++n) {
+ fint = clkin / n;
+
+ m_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
+ 1ul);
+ m_stop = min3((unsigned)(pll_max / fint / 2),
+ (unsigned)(pll_hw_max / fint / 2),
+ hw->m_max);
+
+ for (m = m_start; m <= m_stop; ++m) {
+ clkdco = 2 * m * fint;
+
+ if (func(n, m, fint, clkdco, data))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
+{
+ unsigned long timeout;
+ ktime_t wait;
+ int t;
+
+ /* first busyloop to see if the bit changes right away */
+ t = 100;
+ while (t-- > 0) {
+ if (FLD_GET(readl_relaxed(reg), bitnum, bitnum) == value)
+ return value;
+ }
+
+ /* then loop for 500ms, sleeping for 1ms in between */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (time_before(jiffies, timeout)) {
+ if (FLD_GET(readl_relaxed(reg), bitnum, bitnum) == value)
+ return value;
+
+ wait = ns_to_ktime(1000 * 1000);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
+ }
+
+ return !value;
+}
+
+static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask)
+{
+ int t = 100;
+
+ while (t-- > 0) {
+ u32 v = readl_relaxed(pll->base + PLL_STATUS);
+ v &= hsdiv_ack_mask;
+ if (v == hsdiv_ack_mask)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+int dss_pll_write_config_type_a(struct dss_pll *pll,
+ const struct dss_pll_clock_info *cinfo)
+{
+ const struct dss_pll_hw *hw = pll->hw;
+ void __iomem *base = pll->base;
+ int r = 0;
+ u32 l;
+
+ l = 0;
+ if (hw->has_stopmode)
+ l = FLD_MOD(l, 1, 0, 0); /* PLL_STOPMODE */
+ l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb); /* PLL_REGN */
+ l = FLD_MOD(l, cinfo->m, hw->m_msb, hw->m_lsb); /* PLL_REGM */
+ /* M4 */
+ l = FLD_MOD(l, cinfo->mX[0] ? cinfo->mX[0] - 1 : 0,
+ hw->mX_msb[0], hw->mX_lsb[0]);
+ /* M5 */
+ l = FLD_MOD(l, cinfo->mX[1] ? cinfo->mX[1] - 1 : 0,
+ hw->mX_msb[1], hw->mX_lsb[1]);
+ writel_relaxed(l, base + PLL_CONFIGURATION1);
+
+ l = 0;
+ /* M6 */
+ l = FLD_MOD(l, cinfo->mX[2] ? cinfo->mX[2] - 1 : 0,
+ hw->mX_msb[2], hw->mX_lsb[2]);
+ /* M7 */
+ l = FLD_MOD(l, cinfo->mX[3] ? cinfo->mX[3] - 1 : 0,
+ hw->mX_msb[3], hw->mX_lsb[3]);
+ writel_relaxed(l, base + PLL_CONFIGURATION3);
+
+ l = readl_relaxed(base + PLL_CONFIGURATION2);
+ if (hw->has_freqsel) {
+ u32 f = cinfo->fint < 1000000 ? 0x3 :
+ cinfo->fint < 1250000 ? 0x4 :
+ cinfo->fint < 1500000 ? 0x5 :
+ cinfo->fint < 1750000 ? 0x6 :
+ 0x7;
+
+ l = FLD_MOD(l, f, 4, 1); /* PLL_FREQSEL */
+ } else if (hw->has_selfreqdco) {
+ u32 f = cinfo->clkdco < hw->clkdco_low ? 0x2 : 0x4;
+
+ l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */
+ }
+ l = FLD_MOD(l, 1, 13, 13); /* PLL_REFEN */
+ l = FLD_MOD(l, 0, 14, 14); /* PHY_CLKINEN */
+ l = FLD_MOD(l, 0, 16, 16); /* M4_CLOCK_EN */
+ l = FLD_MOD(l, 0, 18, 18); /* M5_CLOCK_EN */
+ l = FLD_MOD(l, 1, 20, 20); /* HSDIVBYPASS */
+ if (hw->has_refsel)
+ l = FLD_MOD(l, 3, 22, 21); /* REFSEL = sysclk */
+ l = FLD_MOD(l, 0, 23, 23); /* M6_CLOCK_EN */
+ l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */
+ writel_relaxed(l, base + PLL_CONFIGURATION2);
+
+ writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+
+ if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
+ DSSERR("DSS DPLL GO bit not going down.\n");
+ r = -EIO;
+ goto err;
+ }
+
+ if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
+ DSSERR("cannot lock DSS DPLL\n");
+ r = -EIO;
+ goto err;
+ }
+
+ l = readl_relaxed(base + PLL_CONFIGURATION2);
+ l = FLD_MOD(l, 1, 14, 14); /* PHY_CLKINEN */
+ l = FLD_MOD(l, cinfo->mX[0] ? 1 : 0, 16, 16); /* M4_CLOCK_EN */
+ l = FLD_MOD(l, cinfo->mX[1] ? 1 : 0, 18, 18); /* M5_CLOCK_EN */
+ l = FLD_MOD(l, 0, 20, 20); /* HSDIVBYPASS */
+ l = FLD_MOD(l, cinfo->mX[2] ? 1 : 0, 23, 23); /* M6_CLOCK_EN */
+ l = FLD_MOD(l, cinfo->mX[3] ? 1 : 0, 25, 25); /* M7_CLOCK_EN */
+ writel_relaxed(l, base + PLL_CONFIGURATION2);
+
+ r = dss_wait_hsdiv_ack(pll,
+ (cinfo->mX[0] ? BIT(7) : 0) |
+ (cinfo->mX[1] ? BIT(8) : 0) |
+ (cinfo->mX[2] ? BIT(10) : 0) |
+ (cinfo->mX[3] ? BIT(11) : 0));
+ if (r) {
+ DSSERR("failed to enable HSDIV clocks\n");
+ goto err;
+ }
+
+err:
+ return r;
+}
+
+int dss_pll_write_config_type_b(struct dss_pll *pll,
+ const struct dss_pll_clock_info *cinfo)
+{
+ const struct dss_pll_hw *hw = pll->hw;
+ void __iomem *base = pll->base;
+ u32 l;
+
+ l = 0;
+ l = FLD_MOD(l, cinfo->m, 20, 9); /* PLL_REGM */
+ l = FLD_MOD(l, cinfo->n - 1, 8, 1); /* PLL_REGN */
+ writel_relaxed(l, base + PLL_CONFIGURATION1);
+
+ l = readl_relaxed(base + PLL_CONFIGURATION2);
+ l = FLD_MOD(l, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
+ l = FLD_MOD(l, 0x1, 13, 13); /* PLL_REFEN */
+ l = FLD_MOD(l, 0x0, 14, 14); /* PHY_CLKINEN */
+ if (hw->has_refsel)
+ l = FLD_MOD(l, 0x3, 22, 21); /* REFSEL = SYSCLK */
+
+ /* PLL_SELFREQDCO */
+ if (cinfo->clkdco > hw->clkdco_low)
+ l = FLD_MOD(l, 0x4, 3, 1);
+ else
+ l = FLD_MOD(l, 0x2, 3, 1);
+ writel_relaxed(l, base + PLL_CONFIGURATION2);
+
+ l = readl_relaxed(base + PLL_CONFIGURATION3);
+ l = FLD_MOD(l, cinfo->sd, 17, 10); /* PLL_REGSD */
+ writel_relaxed(l, base + PLL_CONFIGURATION3);
+
+ l = readl_relaxed(base + PLL_CONFIGURATION4);
+ l = FLD_MOD(l, cinfo->mX[0], 24, 18); /* PLL_REGM2 */
+ l = FLD_MOD(l, cinfo->mf, 17, 0); /* PLL_REGM_F */
+ writel_relaxed(l, base + PLL_CONFIGURATION4);
+
+ writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+
+ if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
+ DSSERR("DSS DPLL GO bit not going down.\n");
+ return -EIO;
+ }
+
+ if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
+ DSSERR("cannot lock DSS DPLL\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/fbdev/omap2/dss/rfbi.c b/drivers/video/fbdev/omap2/dss/rfbi.c
index 878273f58839..28e694b11ff9 100644
--- a/drivers/video/fbdev/omap2/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/dss/rfbi.c
@@ -1042,7 +1042,6 @@ static struct platform_driver omap_rfbihw_driver = {
.remove = __exit_p(omap_rfbihw_remove),
.driver = {
.name = "omapdss_rfbi",
- .owner = THIS_MODULE,
.pm = &rfbi_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index 4c9c46d4ea60..d51a983075bc 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -376,7 +376,6 @@ static struct platform_driver omap_sdi_driver = {
.remove = __exit_p(omap_sdi_remove),
.driver = {
.name = "omapdss_sdi",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
};
@@ -425,7 +424,7 @@ err_datapairs:
return r;
}
-void __exit sdi_uninit_port(void)
+void __exit sdi_uninit_port(struct device_node *port)
{
if (!sdi.port_initialized)
return;
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index d077d8a75ddc..ef7fd925e7f2 100644
--- a/drivers/video/fbdev/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
@@ -963,7 +963,6 @@ static struct platform_driver omap_venchw_driver = {
.remove = __exit_p(omap_venchw_remove),
.driver = {
.name = "omapdss_venc",
- .owner = THIS_MODULE,
.pm = &venc_pm_ops,
.of_match_table = venc_of_match,
.suppress_bind_attrs = true,
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index ce8a70570756..22f07f88bc40 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -2638,7 +2638,6 @@ static struct platform_driver omapfb_driver = {
.remove = omapfb_remove,
.driver = {
.name = "omapfb",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 367cea8f43f3..1f6ee76af878 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -353,7 +353,6 @@ MODULE_DEVICE_TABLE(of, p9100_match);
static struct platform_driver p9100_driver = {
.driver = {
.name = "p9100",
- .owner = THIS_MODULE,
.of_match_table = p9100_match,
},
.probe = p9100_probe,
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 4c9299576827..518d1fd38a81 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -679,7 +679,6 @@ static struct platform_driver platinum_driver =
{
.driver = {
.name = "platinumfb",
- .owner = THIS_MODULE,
.of_match_table = platinumfb_match,
},
.probe = platinumfb_probe,
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index c95b9e46d48f..e209b039f553 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -823,7 +823,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
static struct platform_driver pxa168fb_driver = {
.driver = {
.name = "pxa168-fb",
- .owner = THIS_MODULE,
},
.probe = pxa168fb_probe,
.remove = pxa168fb_remove,
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 4df3657fe221..86bd457d039d 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -707,7 +707,6 @@ static struct platform_driver pxa3xx_gcu_driver = {
.probe = pxa3xx_gcu_probe,
.remove = pxa3xx_gcu_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
};
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index a5acca88fa63..da2431eda2fd 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2304,7 +2304,6 @@ static struct platform_driver pxafb_driver = {
.probe = pxafb_probe,
.remove = pxafb_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "pxa2xx-fb",
#ifdef CONFIG_PM
.pm = &pxafb_pm_ops,
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 57f88fd0ba9e..7e3a05fc47aa 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1971,7 +1971,6 @@ static struct platform_driver s3c_fb_driver = {
.id_table = s3c_fb_driver_ids,
.driver = {
.name = "s3c-fb",
- .owner = THIS_MODULE,
.pm = &s3cfb_pm_ops,
},
};
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index e350eb57f11d..d6704add1601 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -1104,7 +1104,6 @@ static struct platform_driver s3c2410fb_driver = {
.resume = s3c2410fb_resume,
.driver = {
.name = "s3c2410-lcd",
- .owner = THIS_MODULE,
},
};
@@ -1115,7 +1114,6 @@ static struct platform_driver s3c2412fb_driver = {
.resume = s3c2410fb_resume,
.driver = {
.name = "s3c2412-lcd",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index c43b969e1e23..f0ae61a37f04 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -1182,7 +1182,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
- par->state.vgabase = (void __iomem *) vga_res.start;
+ par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start;
/* Unlock regs */
cr38 = vga_rcrt(par->state.vgabase, 0x38);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 9690216d38ff..89dd7e02197f 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -178,6 +178,7 @@
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <video/sa1100fb.h>
@@ -416,9 +417,9 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->transp.offset);
#ifdef CONFIG_CPU_FREQ
- dev_dbg(fbi->dev, "dma period = %d ps, clock = %d kHz\n",
+ dev_dbg(fbi->dev, "dma period = %d ps, clock = %ld kHz\n",
sa1100fb_display_dma_period(var),
- cpufreq_get(smp_processor_id()));
+ clk_get_rate(fbi->clk) / 1000);
#endif
return 0;
@@ -592,9 +593,10 @@ static struct fb_ops sa1100fb_ops = {
* Calculate the PCD value from the clock rate (in picoseconds).
* We take account of the PPCR clock setting.
*/
-static inline unsigned int get_pcd(unsigned int pixclock, unsigned int cpuclock)
+static inline unsigned int get_pcd(struct sa1100fb_info *fbi,
+ unsigned int pixclock)
{
- unsigned int pcd = cpuclock / 100;
+ unsigned int pcd = clk_get_rate(fbi->clk) / 100 / 1000;
pcd *= pixclock;
pcd /= 10000000;
@@ -673,7 +675,7 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
LCCR2_BegFrmDel(var->upper_margin) +
LCCR2_EndFrmDel(var->lower_margin);
- pcd = get_pcd(var->pixclock, cpufreq_get(0));
+ pcd = get_pcd(fbi, var->pixclock);
new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->inf->lccr3 |
(var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) |
(var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL);
@@ -787,6 +789,9 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
fbi->palette_cpu[0] &= 0xcfff;
fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var);
+ /* enable LCD controller clock */
+ clk_prepare_enable(fbi->clk);
+
/* Sequence from 11.7.10 */
writel_relaxed(fbi->reg_lccr3, fbi->base + LCCR3);
writel_relaxed(fbi->reg_lccr2, fbi->base + LCCR2);
@@ -831,6 +836,9 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
schedule_timeout(20 * HZ / 1000);
remove_wait_queue(&fbi->ctrlr_wait, &wait);
+
+ /* disable LCD controller clock */
+ clk_disable_unprepare(fbi->clk);
}
/*
@@ -1009,7 +1017,6 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
void *data)
{
struct sa1100fb_info *fbi = TO_INF(nb, freq_transition);
- struct cpufreq_freqs *f = data;
u_int pcd;
switch (val) {
@@ -1018,7 +1025,7 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
break;
case CPUFREQ_POSTCHANGE:
- pcd = get_pcd(fbi->fb.var.pixclock, f->new);
+ pcd = get_pcd(fbi, fbi->fb.var.pixclock);
fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd);
set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE);
break;
@@ -1225,6 +1232,13 @@ static int sa1100fb_probe(struct platform_device *pdev)
if (!fbi)
goto failed;
+ fbi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(fbi->clk)) {
+ ret = PTR_ERR(fbi->clk);
+ fbi->clk = NULL;
+ goto failed;
+ }
+
fbi->base = ioremap(res->start, resource_size(res));
if (!fbi->base)
goto failed;
@@ -1277,6 +1291,8 @@ static int sa1100fb_probe(struct platform_device *pdev)
failed:
if (fbi)
iounmap(fbi->base);
+ if (fbi->clk)
+ clk_put(fbi->clk);
kfree(fbi);
release_mem_region(res->start, resource_size(res));
return ret;
@@ -1288,7 +1304,6 @@ static struct platform_driver sa1100fb_driver = {
.resume = sa1100fb_resume,
.driver = {
.name = "sa11x0-fb",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index fc5d4292fad6..0139d13377a5 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -68,6 +68,7 @@ struct sa1100fb_info {
#endif
const struct sa1100fb_mach_info *inf;
+ struct clk *clk;
};
#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member)
diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
index 1265b25f9f99..96de91d76623 100644
--- a/drivers/video/fbdev/sh7760fb.c
+++ b/drivers/video/fbdev/sh7760fb.c
@@ -578,7 +578,6 @@ static int sh7760fb_remove(struct platform_device *dev)
static struct platform_driver sh7760_lcdc_driver = {
.driver = {
.name = "sh7760-lcdc",
- .owner = THIS_MODULE,
},
.probe = sh7760fb_probe,
.remove = sh7760fb_remove,
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 2bcc84ac18c7..d3013cd9f976 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -2181,8 +2181,7 @@ sh_mobile_lcdc_channel_fb_cleanup(struct sh_mobile_lcdc_chan *ch)
if (!info || !info->device)
return;
- if (ch->sglist)
- vfree(ch->sglist);
+ vfree(ch->sglist);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
@@ -2849,7 +2848,6 @@ err1:
static struct platform_driver sh_mobile_lcdc_driver = {
.driver = {
.name = "sh_mobile_lcdc_fb",
- .owner = THIS_MODULE,
.pm = &sh_mobile_lcdc_dev_pm_ops,
},
.probe = sh_mobile_lcdc_probe,
diff --git a/drivers/video/fbdev/sh_mobile_meram.c b/drivers/video/fbdev/sh_mobile_meram.c
index 239453942706..baadfb207b2e 100644
--- a/drivers/video/fbdev/sh_mobile_meram.c
+++ b/drivers/video/fbdev/sh_mobile_meram.c
@@ -745,7 +745,6 @@ static int sh_mobile_meram_remove(struct platform_device *pdev)
static struct platform_driver sh_mobile_meram_driver = {
.driver = {
.name = "sh_mobile_meram",
- .owner = THIS_MODULE,
.pm = &sh_mobile_meram_dev_pm_ops,
},
.probe = sh_mobile_meram_probe,
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 210f3a02121a..92cac803dee3 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/of_platform.h>
static struct fb_fix_screeninfo simplefb_fix = {
.id = "simple",
@@ -41,6 +43,8 @@ static struct fb_var_screeninfo simplefb_var = {
.vmode = FB_VMODE_NONINTERLACED,
};
+#define PSEUDO_PALETTE_SIZE 16
+
static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
@@ -50,7 +54,7 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u32 cb = blue >> (16 - info->var.blue.length);
u32 value;
- if (regno >= 16)
+ if (regno >= PSEUDO_PALETTE_SIZE)
return -EINVAL;
value = (cr << info->var.red.offset) |
@@ -163,11 +167,113 @@ static int simplefb_parse_pd(struct platform_device *pdev,
return 0;
}
+struct simplefb_par {
+ u32 palette[PSEUDO_PALETTE_SIZE];
+#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
+ int clk_count;
+ struct clk **clks;
+#endif
+};
+
+#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
+/*
+ * Clock handling code.
+ *
+ * Here we handle the clocks property of our "simple-framebuffer" dt node.
+ * This is necessary so that we can make sure that any clocks needed by
+ * the display engine that the bootloader set up for us (and for which it
+ * provided a simplefb dt node), stay up, for the life of the simplefb
+ * driver.
+ *
+ * When the driver unloads, we cleanly disable, and then release the clocks.
+ *
+ * We only complain about errors here, no action is taken as the most likely
+ * error can only happen due to a mismatch between the bootloader which set
+ * up simplefb, and the clock definitions in the device tree. Chances are
+ * that there are no adverse effects, and if there are, a clean teardown of
+ * the fb probe will not help us much either. So just complain and carry on,
+ * and hope that the user actually gets a working fb at the end of things.
+ */
+static int simplefb_clocks_init(struct simplefb_par *par,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk *clock;
+ int i, ret;
+
+ if (dev_get_platdata(&pdev->dev) || !np)
+ return 0;
+
+ par->clk_count = of_clk_get_parent_count(np);
+ if (par->clk_count <= 0)
+ return 0;
+
+ par->clks = kcalloc(par->clk_count, sizeof(struct clk *), GFP_KERNEL);
+ if (!par->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < par->clk_count; i++) {
+ clock = of_clk_get(np, i);
+ if (IS_ERR(clock)) {
+ if (PTR_ERR(clock) == -EPROBE_DEFER) {
+ while (--i >= 0) {
+ if (par->clks[i])
+ clk_put(par->clks[i]);
+ }
+ kfree(par->clks);
+ return -EPROBE_DEFER;
+ }
+ dev_err(&pdev->dev, "%s: clock %d not found: %ld\n",
+ __func__, i, PTR_ERR(clock));
+ continue;
+ }
+ par->clks[i] = clock;
+ }
+
+ for (i = 0; i < par->clk_count; i++) {
+ if (par->clks[i]) {
+ ret = clk_prepare_enable(par->clks[i]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: failed to enable clock %d: %d\n",
+ __func__, i, ret);
+ clk_put(par->clks[i]);
+ par->clks[i] = NULL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void simplefb_clocks_destroy(struct simplefb_par *par)
+{
+ int i;
+
+ if (!par->clks)
+ return;
+
+ for (i = 0; i < par->clk_count; i++) {
+ if (par->clks[i]) {
+ clk_disable_unprepare(par->clks[i]);
+ clk_put(par->clks[i]);
+ }
+ }
+
+ kfree(par->clks);
+}
+#else
+static int simplefb_clocks_init(struct simplefb_par *par,
+ struct platform_device *pdev) { return 0; }
+static void simplefb_clocks_destroy(struct simplefb_par *par) { }
+#endif
+
static int simplefb_probe(struct platform_device *pdev)
{
int ret;
struct simplefb_params params;
struct fb_info *info;
+ struct simplefb_par *par;
struct resource *mem;
if (fb_get_options("simplefb", NULL))
@@ -188,11 +294,13 @@ static int simplefb_probe(struct platform_device *pdev)
return -EINVAL;
}
- info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev);
+ info = framebuffer_alloc(sizeof(struct simplefb_par), &pdev->dev);
if (!info)
return -ENOMEM;
platform_set_drvdata(pdev, info);
+ par = info->par;
+
info->fix = simplefb_fix;
info->fix.smem_start = mem->start;
info->fix.smem_len = resource_size(mem);
@@ -211,8 +319,8 @@ static int simplefb_probe(struct platform_device *pdev)
info->apertures = alloc_apertures(1);
if (!info->apertures) {
- framebuffer_release(info);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error_fb_release;
}
info->apertures->ranges[0].base = info->fix.smem_start;
info->apertures->ranges[0].size = info->fix.smem_len;
@@ -222,10 +330,14 @@ static int simplefb_probe(struct platform_device *pdev)
info->screen_base = ioremap_wc(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
- framebuffer_release(info);
- return -ENODEV;
+ ret = -ENOMEM;
+ goto error_fb_release;
}
- info->pseudo_palette = (void *)(info + 1);
+ info->pseudo_palette = par->palette;
+
+ ret = simplefb_clocks_init(par, pdev);
+ if (ret < 0)
+ goto error_unmap;
dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n",
info->fix.smem_start, info->fix.smem_len,
@@ -238,21 +350,29 @@ static int simplefb_probe(struct platform_device *pdev)
ret = register_framebuffer(info);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
- iounmap(info->screen_base);
- framebuffer_release(info);
- return ret;
+ goto error_clocks;
}
dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
return 0;
+
+error_clocks:
+ simplefb_clocks_destroy(par);
+error_unmap:
+ iounmap(info->screen_base);
+error_fb_release:
+ framebuffer_release(info);
+ return ret;
}
static int simplefb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
+ struct simplefb_par *par = info->par;
unregister_framebuffer(info);
+ simplefb_clocks_destroy(par);
framebuffer_release(info);
return 0;
@@ -267,13 +387,32 @@ MODULE_DEVICE_TABLE(of, simplefb_of_match);
static struct platform_driver simplefb_driver = {
.driver = {
.name = "simple-framebuffer",
- .owner = THIS_MODULE,
.of_match_table = simplefb_of_match,
},
.probe = simplefb_probe,
.remove = simplefb_remove,
};
-module_platform_driver(simplefb_driver);
+
+static int __init simplefb_init(void)
+{
+ int ret;
+ struct device_node *np;
+
+ ret = platform_driver_register(&simplefb_driver);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_OF) && of_chosen) {
+ for_each_child_of_node(of_chosen, np) {
+ if (of_device_is_compatible(np, "simple-framebuffer"))
+ of_platform_device_create(np, NULL, NULL);
+ }
+ }
+
+ return 0;
+}
+
+fs_initcall(simplefb_init);
MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
MODULE_DESCRIPTION("Simple framebuffer driver");
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index e5d11b1892e8..fcf610edf217 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -5989,7 +5989,7 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if(!ivideo->sisvga_enabled) {
if(pci_enable_device(pdev)) {
- if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
+ pci_dev_put(ivideo->nbridge);
framebuffer_release(sis_fb_info);
return -EIO;
}
@@ -6202,10 +6202,8 @@ error_0: iounmap(ivideo->video_vbase);
error_1: release_mem_region(ivideo->video_base, ivideo->video_size);
error_2: release_mem_region(ivideo->mmio_base, ivideo->mmio_size);
error_3: vfree(ivideo->bios_abase);
- if(ivideo->lpcdev)
- pci_dev_put(ivideo->lpcdev);
- if(ivideo->nbridge)
- pci_dev_put(ivideo->nbridge);
+ pci_dev_put(ivideo->lpcdev);
+ pci_dev_put(ivideo->nbridge);
if(!ivideo->sisvga_enabled)
pci_disable_device(pdev);
framebuffer_release(sis_fb_info);
@@ -6505,11 +6503,9 @@ static void sisfb_remove(struct pci_dev *pdev)
vfree(ivideo->bios_abase);
- if(ivideo->lpcdev)
- pci_dev_put(ivideo->lpcdev);
+ pci_dev_put(ivideo->lpcdev);
- if(ivideo->nbridge)
- pci_dev_put(ivideo->nbridge);
+ pci_dev_put(ivideo->nbridge);
#ifdef CONFIG_MTRR
/* Release MTRR region */
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 9e74e8fbe074..e8d4121783fb 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1988,6 +1988,7 @@ static int sm501fb_probe(struct platform_device *pdev)
if (info->fb[HEAD_PANEL] == NULL &&
info->fb[HEAD_CRT] == NULL) {
dev_err(dev, "no framebuffers found\n");
+ ret = -ENODEV;
goto err_alloc;
}
@@ -2224,7 +2225,6 @@ static struct platform_driver sm501fb_driver = {
.resume = sm501fb_resume,
.driver = {
.name = "sm501-fb",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index d513ed6a49f2..9279e5f6696e 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1142,8 +1142,7 @@ static void ufx_free_framebuffer_work(struct work_struct *work)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
- if (info->screen_base)
- vfree(info->screen_base);
+ vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
@@ -1743,8 +1742,7 @@ error:
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
- if (info->screen_base)
- vfree(info->screen_base);
+ vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 58241b47a96d..08879bdfad35 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -202,7 +202,6 @@ static struct platform_driver gfb_driver = {
.remove = gfb_remove,
.driver = {
.name = "gfb",
- .owner = THIS_MODULE,
.of_match_table = gfb_match,
},
};
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 7fb2d696fac7..54ad08854c94 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -512,7 +512,6 @@ MODULE_DEVICE_TABLE(of, tcx_match);
static struct platform_driver tcx_driver = {
.driver = {
.name = "tcx",
- .owner = THIS_MODULE,
.of_match_table = tcx_match,
},
.probe = tcx_probe,
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 046d51d83d74..ff2b8731a2dc 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -922,8 +922,7 @@ static void dlfb_free(struct kref *kref)
{
struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref);
- if (dev->backing_buffer)
- vfree(dev->backing_buffer);
+ vfree(dev->backing_buffer);
kfree(dev->edid);
@@ -953,8 +952,7 @@ static void dlfb_free_framebuffer(struct dlfb_data *dev)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
- if (info->screen_base)
- vfree(info->screen_base);
+ vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
@@ -1203,8 +1201,7 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
if (!new_back)
pr_info("No shadow/backing buffer allocated\n");
else {
- if (dev->backing_buffer)
- vfree(dev->backing_buffer);
+ vfree(dev->backing_buffer);
dev->backing_buffer = new_back;
}
}
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 509d452e8f91..d32d1c4d1b99 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1219,8 +1219,7 @@ static int uvesafb_release(struct fb_info *info, int user)
uvesafb_vbe_state_restore(par, par->vbe_state_orig);
out:
atomic_dec(&par->ref_count);
- if (task)
- uvesafb_free(task);
+ uvesafb_free(task);
return 0;
}
@@ -1923,8 +1922,7 @@ static int uvesafb_init(void)
err = -ENOMEM;
if (err) {
- if (uvesafb_device)
- platform_device_put(uvesafb_device);
+ platform_device_put(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
cn_del_callback(&uvesafb_cn_id);
return err;
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index 6170e7f58640..d79a0ac49fc7 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -512,7 +512,6 @@ static int vesafb_remove(struct platform_device *pdev)
static struct platform_driver vesafb_driver = {
.driver = {
.name = "vesa-framebuffer",
- .owner = THIS_MODULE,
},
.probe = vesafb_probe,
.remove = vesafb_remove,
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 325c43c6ff97..f9718f012aae 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1937,8 +1937,7 @@ out_fb1_unreg_lcd_cle266:
out_dealloc_cmap:
fb_dealloc_cmap(&viafbinfo->cmap);
out_fb1_release:
- if (viafbinfo1)
- framebuffer_release(viafbinfo1);
+ framebuffer_release(viafbinfo1);
out_fb_release:
i2c_bus_free(viaparinfo->shared);
framebuffer_release(viafbinfo);
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index a1134c3f6c11..ffaf29eeaaba 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -486,7 +486,6 @@ static struct platform_driver vt8500lcd_driver = {
.probe = vt8500lcd_probe,
.remove = vt8500lcd_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "vt8500-lcd",
.of_match_table = of_match_ptr(via_dt_ids),
},
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 5c7cbc6c6236..ea7f056ed5fe 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -731,7 +731,7 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
- par->state.vgabase = (void __iomem *) vga_res.start;
+ par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start;
/* Find how many physical memory there is on card */
memsize1 = (vga_rseq(par->state.vgabase, 0x34) + 1) >> 1;
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index d2fafbbcd7f8..e925619da39b 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -407,7 +407,6 @@ static struct platform_driver wm8505fb_driver = {
.probe = wm8505fb_probe,
.remove = wm8505fb_remove,
.driver = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
.of_match_table = wmt_dt_ids,
},
diff --git a/drivers/video/fbdev/wmt_ge_rops.c b/drivers/video/fbdev/wmt_ge_rops.c
index 9df6fe78a44b..89d23844c1db 100644
--- a/drivers/video/fbdev/wmt_ge_rops.c
+++ b/drivers/video/fbdev/wmt_ge_rops.c
@@ -167,7 +167,6 @@ static struct platform_driver wmt_ge_rops_driver = {
.probe = wmt_ge_rops_probe,
.remove = wmt_ge_rops_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "wmt_ge_rops",
.of_match_table = wmt_dt_ids,
},
diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index 553cff2f3f4c..17dc119c7a98 100644
--- a/drivers/video/fbdev/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
@@ -497,7 +497,6 @@ static struct platform_driver xilinxfb_of_driver = {
.remove = xilinxfb_of_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = xilinxfb_of_match,
},
};
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 9076635697bb..bf5104b56894 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
+virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index df598dd8c5c8..f22665868781 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -3,6 +3,7 @@
#include <linux/virtio_config.h>
#include <linux/module.h>
#include <linux/idr.h>
+#include <uapi/linux/virtio_ids.h>
/* Unique numbering for virtio devices. */
static DEFINE_IDA(virtio_index_ida);
@@ -49,9 +50,9 @@ static ssize_t features_show(struct device *_d,
/* We actually represent this as a bitstring, as it could be
* arbitrary length in future. */
- for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++)
+ for (i = 0; i < sizeof(dev->features)*8; i++)
len += sprintf(buf+len, "%c",
- test_bit(i, dev->features) ? '1' : '0');
+ __virtio_test_bit(dev, i) ? '1' : '0');
len += sprintf(buf+len, "\n");
return len;
}
@@ -113,6 +114,13 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
for (i = 0; i < drv->feature_table_size; i++)
if (drv->feature_table[i] == fbit)
return;
+
+ if (drv->feature_table_legacy) {
+ for (i = 0; i < drv->feature_table_size_legacy; i++)
+ if (drv->feature_table_legacy[i] == fbit)
+ return;
+ }
+
BUG();
}
EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature);
@@ -159,7 +167,10 @@ static int virtio_dev_probe(struct device *_d)
int err, i;
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- u32 device_features;
+ u64 device_features;
+ u64 driver_features;
+ u64 driver_features_legacy;
+ unsigned status;
/* We have a driver! */
add_status(dev, VIRTIO_CONFIG_S_DRIVER);
@@ -167,34 +178,66 @@ static int virtio_dev_probe(struct device *_d)
/* Figure out what features the device supports. */
device_features = dev->config->get_features(dev);
- /* Features supported by both device and driver into dev->features. */
- memset(dev->features, 0, sizeof(dev->features));
+ /* Figure out what features the driver supports. */
+ driver_features = 0;
for (i = 0; i < drv->feature_table_size; i++) {
unsigned int f = drv->feature_table[i];
- BUG_ON(f >= 32);
- if (device_features & (1 << f))
- set_bit(f, dev->features);
+ BUG_ON(f >= 64);
+ driver_features |= (1ULL << f);
+ }
+
+ /* Some drivers have a separate feature table for virtio v1.0 */
+ if (drv->feature_table_legacy) {
+ driver_features_legacy = 0;
+ for (i = 0; i < drv->feature_table_size_legacy; i++) {
+ unsigned int f = drv->feature_table_legacy[i];
+ BUG_ON(f >= 64);
+ driver_features_legacy |= (1ULL << f);
+ }
+ } else {
+ driver_features_legacy = driver_features;
}
+ if (device_features & (1ULL << VIRTIO_F_VERSION_1))
+ dev->features = driver_features & device_features;
+ else
+ dev->features = driver_features_legacy & device_features;
+
/* Transport features always preserved to pass to finalize_features. */
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
- if (device_features & (1 << i))
- set_bit(i, dev->features);
+ if (device_features & (1ULL << i))
+ __virtio_set_bit(dev, i);
- dev->config->finalize_features(dev);
+ err = dev->config->finalize_features(dev);
+ if (err)
+ goto err;
+
+ if (virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
+ add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
+ status = dev->config->get_status(dev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ dev_err(_d, "virtio: device refuses features: %x\n",
+ status);
+ err = -ENODEV;
+ goto err;
+ }
+ }
err = drv->probe(dev);
if (err)
- add_status(dev, VIRTIO_CONFIG_S_FAILED);
- else {
- add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
- if (drv->scan)
- drv->scan(dev);
+ goto err;
- virtio_config_enable(dev);
- }
+ add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+ if (drv->scan)
+ drv->scan(dev);
+
+ virtio_config_enable(dev);
+ return 0;
+err:
+ add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
+
}
static int virtio_dev_remove(struct device *_d)
@@ -223,6 +266,12 @@ static struct bus_type virtio_bus = {
.remove = virtio_dev_remove,
};
+bool virtio_device_is_legacy_only(struct virtio_device_id id)
+{
+ return id.device == VIRTIO_ID_BALLOON;
+}
+EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only);
+
int register_virtio_driver(struct virtio_driver *driver)
{
/* Catch this early. */
@@ -303,6 +352,7 @@ EXPORT_SYMBOL_GPL(virtio_device_freeze);
int virtio_device_restore(struct virtio_device *dev)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
/* We always start by resetting the device, in case a previous
* driver messed it up. */
@@ -322,14 +372,14 @@ int virtio_device_restore(struct virtio_device *dev)
/* We have a driver! */
add_status(dev, VIRTIO_CONFIG_S_DRIVER);
- dev->config->finalize_features(dev);
+ ret = dev->config->finalize_features(dev);
+ if (ret)
+ goto err;
if (drv->restore) {
- int ret = drv->restore(dev);
- if (ret) {
- add_status(dev, VIRTIO_CONFIG_S_FAILED);
- return ret;
- }
+ ret = drv->restore(dev);
+ if (ret)
+ goto err;
}
/* Finally, tell the device we're all set */
@@ -338,6 +388,10 @@ int virtio_device_restore(struct virtio_device *dev)
virtio_config_enable(dev);
return 0;
+
+err:
+ add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ return ret;
}
EXPORT_SYMBOL_GPL(virtio_device_restore);
#endif
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c9703d4d6f67..50c5f42d7a9f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/balloon_compaction.h>
+#include <linux/oom.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -36,6 +37,12 @@
*/
#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
+#define OOM_VBALLOON_DEFAULT_PAGES 256
+#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
+
+static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
+module_param(oom_pages, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
struct virtio_balloon
{
@@ -71,6 +78,9 @@ struct virtio_balloon
/* Memory statistics */
int need_stats_update;
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
+
+ /* To register callback in oom notifier call chain */
+ struct notifier_block nb;
};
static struct virtio_device_id id_table[] = {
@@ -168,8 +178,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
}
}
-static void leak_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
{
+ unsigned num_freed_pages;
struct page *page;
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
@@ -186,6 +197,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
}
+ num_freed_pages = vb->num_pfns;
/*
* Note that if
* virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
@@ -195,6 +207,7 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
release_pages_by_pfn(vb->pfns, vb->num_pfns);
+ return num_freed_pages;
}
static inline void update_stat(struct virtio_balloon *vb, int idx,
@@ -287,6 +300,38 @@ static void update_balloon_size(struct virtio_balloon *vb)
&actual);
}
+/*
+ * virtballoon_oom_notify - release pages when system is under severe
+ * memory pressure (called from out_of_memory())
+ * @self : notifier block struct
+ * @dummy: not used
+ * @parm : returned - number of freed pages
+ *
+ * The balancing of memory by use of the virtio balloon should not cause
+ * the termination of processes while there are pages in the balloon.
+ * If virtio balloon manages to release some memory, it will make the
+ * system return and retry the allocation that forced the OOM killer
+ * to run.
+ */
+static int virtballoon_oom_notify(struct notifier_block *self,
+ unsigned long dummy, void *parm)
+{
+ struct virtio_balloon *vb;
+ unsigned long *freed;
+ unsigned num_freed_pages;
+
+ vb = container_of(self, struct virtio_balloon, nb);
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ return NOTIFY_OK;
+
+ freed = parm;
+ num_freed_pages = leak_balloon(vb, oom_pages);
+ update_balloon_size(vb);
+ *freed += num_freed_pages;
+
+ return NOTIFY_OK;
+}
+
static int balloon(void *_vballoon)
{
struct virtio_balloon *vb = _vballoon;
@@ -443,6 +488,12 @@ static int virtballoon_probe(struct virtio_device *vdev)
if (err)
goto out_free_vb;
+ vb->nb.notifier_call = virtballoon_oom_notify;
+ vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
+ err = register_oom_notifier(&vb->nb);
+ if (err < 0)
+ goto out_oom_notify;
+
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
err = PTR_ERR(vb->thread);
@@ -452,6 +503,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
return 0;
out_del_vqs:
+ unregister_oom_notifier(&vb->nb);
+out_oom_notify:
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
@@ -476,6 +529,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
+ unregister_oom_notifier(&vb->nb);
kthread_stop(vb->thread);
remove_common(vb);
kfree(vb);
@@ -515,6 +569,7 @@ static int virtballoon_restore(struct virtio_device *vdev)
static unsigned int features[] = {
VIRTIO_BALLOON_F_MUST_TELL_HOST,
VIRTIO_BALLOON_F_STATS_VQ,
+ VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
};
static struct virtio_driver virtio_balloon_driver = {
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index ef9a1650bb80..00d115b22bd8 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -142,7 +142,7 @@ struct virtio_mmio_vq_info {
/* Configuration interface */
-static u32 vm_get_features(struct virtio_device *vdev)
+static u64 vm_get_features(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
@@ -152,19 +152,20 @@ static u32 vm_get_features(struct virtio_device *vdev)
return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES);
}
-static void vm_finalize_features(struct virtio_device *vdev)
+static int vm_finalize_features(struct virtio_device *vdev)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- int i;
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
- writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
- writel(vdev->features[i],
- vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
- }
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
+ writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
+ writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
+
+ return 0;
}
static void vm_get(struct virtio_device *vdev, unsigned offset,
@@ -639,7 +640,6 @@ static struct platform_driver virtio_mmio_driver = {
.remove = virtio_mmio_remove,
.driver = {
.name = "virtio-mmio",
- .owner = THIS_MODULE,
.of_match_table = virtio_mmio_match,
},
};
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
deleted file mode 100644
index d34ebfa604f3..000000000000
--- a/drivers/virtio/virtio_pci.c
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Virtio PCI driver
- *
- * This module allows virtio devices to be used over a virtual PCI device.
- * This can be used with QEMU based VMMs like KVM or Xen.
- *
- * Copyright IBM Corp. 2007
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio_ring.h>
-#include <linux/virtio_pci.h>
-#include <linux/highmem.h>
-#include <linux/spinlock.h>
-
-MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
-MODULE_DESCRIPTION("virtio-pci");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1");
-
-/* Our device structure */
-struct virtio_pci_device
-{
- struct virtio_device vdev;
- struct pci_dev *pci_dev;
-
- /* the IO mapping for the PCI config space */
- void __iomem *ioaddr;
-
- /* a list of queues so we can dispatch IRQs */
- spinlock_t lock;
- struct list_head virtqueues;
-
- /* MSI-X support */
- int msix_enabled;
- int intx_enabled;
- struct msix_entry *msix_entries;
- cpumask_var_t *msix_affinity_masks;
- /* Name strings for interrupts. This size should be enough,
- * and I'm too lazy to allocate each name separately. */
- char (*msix_names)[256];
- /* Number of available vectors */
- unsigned msix_vectors;
- /* Vectors allocated, excluding per-vq vectors if any */
- unsigned msix_used_vectors;
-
- /* Whether we have vector per vq */
- bool per_vq_vectors;
-};
-
-/* Constants for MSI-X */
-/* Use first vector for configuration changes, second and the rest for
- * virtqueues Thus, we need at least 2 vectors for MSI. */
-enum {
- VP_MSIX_CONFIG_VECTOR = 0,
- VP_MSIX_VQ_VECTOR = 1,
-};
-
-struct virtio_pci_vq_info
-{
- /* the actual virtqueue */
- struct virtqueue *vq;
-
- /* the number of entries in the queue */
- int num;
-
- /* the virtual address of the ring queue */
- void *queue;
-
- /* the list node for the virtqueues list */
- struct list_head node;
-
- /* MSI-X vector (or none) */
- unsigned msix_vector;
-};
-
-/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
-static const struct pci_device_id virtio_pci_id_table[] = {
- { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
-
-/* Convert a generic virtio device to our structure */
-static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
-{
- return container_of(vdev, struct virtio_pci_device, vdev);
-}
-
-/* virtio config->get_features() implementation */
-static u32 vp_get_features(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- /* When someone needs more than 32 feature bits, we'll need to
- * steal a bit to indicate that the rest are somewhere else. */
- return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
-}
-
-/* virtio config->finalize_features() implementation */
-static void vp_finalize_features(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* We only support 32 feature bits. */
- BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1);
- iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES);
-}
-
-/* virtio config->get() implementation */
-static void vp_get(struct virtio_device *vdev, unsigned offset,
- void *buf, unsigned len)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- void __iomem *ioaddr = vp_dev->ioaddr +
- VIRTIO_PCI_CONFIG(vp_dev) + offset;
- u8 *ptr = buf;
- int i;
-
- for (i = 0; i < len; i++)
- ptr[i] = ioread8(ioaddr + i);
-}
-
-/* the config->set() implementation. it's symmetric to the config->get()
- * implementation */
-static void vp_set(struct virtio_device *vdev, unsigned offset,
- const void *buf, unsigned len)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- void __iomem *ioaddr = vp_dev->ioaddr +
- VIRTIO_PCI_CONFIG(vp_dev) + offset;
- const u8 *ptr = buf;
- int i;
-
- for (i = 0; i < len; i++)
- iowrite8(ptr[i], ioaddr + i);
-}
-
-/* config->{get,set}_status() implementations */
-static u8 vp_get_status(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
-}
-
-static void vp_set_status(struct virtio_device *vdev, u8 status)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- /* We should never be setting status to 0. */
- BUG_ON(status == 0);
- iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
-}
-
-/* wait for pending irq handlers */
-static void vp_synchronize_vectors(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- int i;
-
- if (vp_dev->intx_enabled)
- synchronize_irq(vp_dev->pci_dev->irq);
-
- for (i = 0; i < vp_dev->msix_vectors; ++i)
- synchronize_irq(vp_dev->msix_entries[i].vector);
-}
-
-static void vp_reset(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- /* 0 status means a reset. */
- iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
- /* Flush out the status write, and flush in device writes,
- * including MSi-X interrupts, if any. */
- ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
- /* Flush pending VQ/configuration callbacks. */
- vp_synchronize_vectors(vdev);
-}
-
-/* the notify function used when creating a virt queue */
-static bool vp_notify(struct virtqueue *vq)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
-
- /* we write the queue's selector into the notification register to
- * signal the other end */
- iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
- return true;
-}
-
-/* Handle a configuration change: Tell driver if it wants to know. */
-static irqreturn_t vp_config_changed(int irq, void *opaque)
-{
- struct virtio_pci_device *vp_dev = opaque;
-
- virtio_config_changed(&vp_dev->vdev);
- return IRQ_HANDLED;
-}
-
-/* Notify all virtqueues on an interrupt. */
-static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
-{
- struct virtio_pci_device *vp_dev = opaque;
- struct virtio_pci_vq_info *info;
- irqreturn_t ret = IRQ_NONE;
- unsigned long flags;
-
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_for_each_entry(info, &vp_dev->virtqueues, node) {
- if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
- ret = IRQ_HANDLED;
- }
- spin_unlock_irqrestore(&vp_dev->lock, flags);
-
- return ret;
-}
-
-/* A small wrapper to also acknowledge the interrupt when it's handled.
- * I really need an EIO hook for the vring so I can ack the interrupt once we
- * know that we'll be handling the IRQ but before we invoke the callback since
- * the callback may notify the host which results in the host attempting to
- * raise an interrupt that we would then mask once we acknowledged the
- * interrupt. */
-static irqreturn_t vp_interrupt(int irq, void *opaque)
-{
- struct virtio_pci_device *vp_dev = opaque;
- u8 isr;
-
- /* reading the ISR has the effect of also clearing it so it's very
- * important to save off the value. */
- isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
-
- /* It's definitely not us if the ISR was not high */
- if (!isr)
- return IRQ_NONE;
-
- /* Configuration change? Tell driver if it wants to know. */
- if (isr & VIRTIO_PCI_ISR_CONFIG)
- vp_config_changed(irq, opaque);
-
- return vp_vring_interrupt(irq, opaque);
-}
-
-static void vp_free_vectors(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- int i;
-
- if (vp_dev->intx_enabled) {
- free_irq(vp_dev->pci_dev->irq, vp_dev);
- vp_dev->intx_enabled = 0;
- }
-
- for (i = 0; i < vp_dev->msix_used_vectors; ++i)
- free_irq(vp_dev->msix_entries[i].vector, vp_dev);
-
- for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
-
- if (vp_dev->msix_enabled) {
- /* Disable the vector used for configuration */
- iowrite16(VIRTIO_MSI_NO_VECTOR,
- vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
- /* Flush the write out to device */
- ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
-
- pci_disable_msix(vp_dev->pci_dev);
- vp_dev->msix_enabled = 0;
- }
-
- vp_dev->msix_vectors = 0;
- vp_dev->msix_used_vectors = 0;
- kfree(vp_dev->msix_names);
- vp_dev->msix_names = NULL;
- kfree(vp_dev->msix_entries);
- vp_dev->msix_entries = NULL;
- kfree(vp_dev->msix_affinity_masks);
- vp_dev->msix_affinity_masks = NULL;
-}
-
-static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
- bool per_vq_vectors)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- const char *name = dev_name(&vp_dev->vdev.dev);
- unsigned i, v;
- int err = -ENOMEM;
-
- vp_dev->msix_vectors = nvectors;
-
- vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
- GFP_KERNEL);
- if (!vp_dev->msix_entries)
- goto error;
- vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
- GFP_KERNEL);
- if (!vp_dev->msix_names)
- goto error;
- vp_dev->msix_affinity_masks
- = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
- GFP_KERNEL);
- if (!vp_dev->msix_affinity_masks)
- goto error;
- for (i = 0; i < nvectors; ++i)
- if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
- GFP_KERNEL))
- goto error;
-
- for (i = 0; i < nvectors; ++i)
- vp_dev->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(vp_dev->pci_dev,
- vp_dev->msix_entries, nvectors);
- if (err)
- goto error;
- vp_dev->msix_enabled = 1;
-
- /* Set the vector used for configuration */
- v = vp_dev->msix_used_vectors;
- snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
- "%s-config", name);
- err = request_irq(vp_dev->msix_entries[v].vector,
- vp_config_changed, 0, vp_dev->msix_names[v],
- vp_dev);
- if (err)
- goto error;
- ++vp_dev->msix_used_vectors;
-
- iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
- /* Verify we had enough resources to assign the vector */
- v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
- if (v == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto error;
- }
-
- if (!per_vq_vectors) {
- /* Shared vector for all VQs */
- v = vp_dev->msix_used_vectors;
- snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
- "%s-virtqueues", name);
- err = request_irq(vp_dev->msix_entries[v].vector,
- vp_vring_interrupt, 0, vp_dev->msix_names[v],
- vp_dev);
- if (err)
- goto error;
- ++vp_dev->msix_used_vectors;
- }
- return 0;
-error:
- vp_free_vectors(vdev);
- return err;
-}
-
-static int vp_request_intx(struct virtio_device *vdev)
-{
- int err;
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
- IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
- if (!err)
- vp_dev->intx_enabled = 1;
- return err;
-}
-
-static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name,
- u16 msix_vec)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_vq_info *info;
- struct virtqueue *vq;
- unsigned long flags, size;
- u16 num;
- int err;
-
- /* Select the queue we're interested in */
- iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
-
- /* Check if queue is either not available or already active. */
- num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
- if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
- return ERR_PTR(-ENOENT);
-
- /* allocate and fill out our structure the represents an active
- * queue */
- info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- info->num = num;
- info->msix_vector = msix_vec;
-
- size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
- info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
- if (info->queue == NULL) {
- err = -ENOMEM;
- goto out_info;
- }
-
- /* activate the queue */
- iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
-
- /* create the vring */
- vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
- true, info->queue, vp_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto out_activate_queue;
- }
-
- vq->priv = info;
- info->vq = vq;
-
- if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
- msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
- if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto out_assign;
- }
- }
-
- if (callback) {
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_add(&info->node, &vp_dev->virtqueues);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
- } else {
- INIT_LIST_HEAD(&info->node);
- }
-
- return vq;
-
-out_assign:
- vring_del_virtqueue(vq);
-out_activate_queue:
- iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
- free_pages_exact(info->queue, size);
-out_info:
- kfree(info);
- return ERR_PTR(err);
-}
-
-static void vp_del_vq(struct virtqueue *vq)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- struct virtio_pci_vq_info *info = vq->priv;
- unsigned long flags, size;
-
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
-
- iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
-
- if (vp_dev->msix_enabled) {
- iowrite16(VIRTIO_MSI_NO_VECTOR,
- vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
- /* Flush the write out to device */
- ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
- }
-
- vring_del_virtqueue(vq);
-
- /* Select and deactivate the queue */
- iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
-
- size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
- free_pages_exact(info->queue, size);
- kfree(info);
-}
-
-/* the config->del_vqs() implementation */
-static void vp_del_vqs(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtqueue *vq, *n;
- struct virtio_pci_vq_info *info;
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- info = vq->priv;
- if (vp_dev->per_vq_vectors &&
- info->msix_vector != VIRTIO_MSI_NO_VECTOR)
- free_irq(vp_dev->msix_entries[info->msix_vector].vector,
- vq);
- vp_del_vq(vq);
- }
- vp_dev->per_vq_vectors = false;
-
- vp_free_vectors(vdev);
-}
-
-static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char *names[],
- bool use_msix,
- bool per_vq_vectors)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- u16 msix_vec;
- int i, err, nvectors, allocated_vectors;
-
- if (!use_msix) {
- /* Old style: one normal interrupt for change and all vqs. */
- err = vp_request_intx(vdev);
- if (err)
- goto error_request;
- } else {
- if (per_vq_vectors) {
- /* Best option: one for change interrupt, one per vq. */
- nvectors = 1;
- for (i = 0; i < nvqs; ++i)
- if (callbacks[i])
- ++nvectors;
- } else {
- /* Second best: one for change, shared for all vqs. */
- nvectors = 2;
- }
-
- err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
- if (err)
- goto error_request;
- }
-
- vp_dev->per_vq_vectors = per_vq_vectors;
- allocated_vectors = vp_dev->msix_used_vectors;
- for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
- vqs[i] = NULL;
- continue;
- } else if (!callbacks[i] || !vp_dev->msix_enabled)
- msix_vec = VIRTIO_MSI_NO_VECTOR;
- else if (vp_dev->per_vq_vectors)
- msix_vec = allocated_vectors++;
- else
- msix_vec = VP_MSIX_VQ_VECTOR;
- vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
- if (IS_ERR(vqs[i])) {
- err = PTR_ERR(vqs[i]);
- goto error_find;
- }
-
- if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
- continue;
-
- /* allocate per-vq irq if available and necessary */
- snprintf(vp_dev->msix_names[msix_vec],
- sizeof *vp_dev->msix_names,
- "%s-%s",
- dev_name(&vp_dev->vdev.dev), names[i]);
- err = request_irq(vp_dev->msix_entries[msix_vec].vector,
- vring_interrupt, 0,
- vp_dev->msix_names[msix_vec],
- vqs[i]);
- if (err) {
- vp_del_vq(vqs[i]);
- goto error_find;
- }
- }
- return 0;
-
-error_find:
- vp_del_vqs(vdev);
-
-error_request:
- return err;
-}
-
-/* the config->find_vqs() implementation */
-static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char *names[])
-{
- int err;
-
- /* Try MSI-X with one vector per queue. */
- err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
- if (!err)
- return 0;
- /* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- true, false);
- if (!err)
- return 0;
- /* Finally fall back to regular interrupts. */
- return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- false, false);
-}
-
-static const char *vp_bus_name(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- return pci_name(vp_dev->pci_dev);
-}
-
-/* Setup the affinity for a virtqueue:
- * - force the affinity for per vq vector
- * - OR over all affinities for shared MSI
- * - ignore the affinity request if we're using INTX
- */
-static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
-{
- struct virtio_device *vdev = vq->vdev;
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_vq_info *info = vq->priv;
- struct cpumask *mask;
- unsigned int irq;
-
- if (!vq->callback)
- return -EINVAL;
-
- if (vp_dev->msix_enabled) {
- mask = vp_dev->msix_affinity_masks[info->msix_vector];
- irq = vp_dev->msix_entries[info->msix_vector].vector;
- if (cpu == -1)
- irq_set_affinity_hint(irq, NULL);
- else {
- cpumask_set_cpu(cpu, mask);
- irq_set_affinity_hint(irq, mask);
- }
- }
- return 0;
-}
-
-static const struct virtio_config_ops virtio_pci_config_ops = {
- .get = vp_get,
- .set = vp_set,
- .get_status = vp_get_status,
- .set_status = vp_set_status,
- .reset = vp_reset,
- .find_vqs = vp_find_vqs,
- .del_vqs = vp_del_vqs,
- .get_features = vp_get_features,
- .finalize_features = vp_finalize_features,
- .bus_name = vp_bus_name,
- .set_vq_affinity = vp_set_vq_affinity,
-};
-
-static void virtio_pci_release_dev(struct device *_d)
-{
- /*
- * No need for a release method as we allocate/free
- * all devices together with the pci devices.
- * Provide an empty one to avoid getting a warning from core.
- */
-}
-
-/* the PCI probing function */
-static int virtio_pci_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id)
-{
- struct virtio_pci_device *vp_dev;
- int err;
-
- /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
- if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
- return -ENODEV;
-
- if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
- printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
- VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
- return -ENODEV;
- }
-
- /* allocate our structure and fill it out */
- vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
- if (vp_dev == NULL)
- return -ENOMEM;
-
- vp_dev->vdev.dev.parent = &pci_dev->dev;
- vp_dev->vdev.dev.release = virtio_pci_release_dev;
- vp_dev->vdev.config = &virtio_pci_config_ops;
- vp_dev->pci_dev = pci_dev;
- INIT_LIST_HEAD(&vp_dev->virtqueues);
- spin_lock_init(&vp_dev->lock);
-
- /* Disable MSI/MSIX to bring device to a known good state. */
- pci_msi_off(pci_dev);
-
- /* enable the device */
- err = pci_enable_device(pci_dev);
- if (err)
- goto out;
-
- err = pci_request_regions(pci_dev, "virtio-pci");
- if (err)
- goto out_enable_device;
-
- vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
- if (vp_dev->ioaddr == NULL) {
- err = -ENOMEM;
- goto out_req_regions;
- }
-
- pci_set_drvdata(pci_dev, vp_dev);
- pci_set_master(pci_dev);
-
- /* we use the subsystem vendor/device id as the virtio vendor/device
- * id. this allows us to use the same PCI vendor/device id for all
- * virtio devices and to identify the particular virtio driver by
- * the subsystem ids */
- vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
- vp_dev->vdev.id.device = pci_dev->subsystem_device;
-
- /* finally register the virtio device */
- err = register_virtio_device(&vp_dev->vdev);
- if (err)
- goto out_set_drvdata;
-
- return 0;
-
-out_set_drvdata:
- pci_iounmap(pci_dev, vp_dev->ioaddr);
-out_req_regions:
- pci_release_regions(pci_dev);
-out_enable_device:
- pci_disable_device(pci_dev);
-out:
- kfree(vp_dev);
- return err;
-}
-
-static void virtio_pci_remove(struct pci_dev *pci_dev)
-{
- struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
-
- unregister_virtio_device(&vp_dev->vdev);
-
- vp_del_vqs(&vp_dev->vdev);
- pci_iounmap(pci_dev, vp_dev->ioaddr);
- pci_release_regions(pci_dev);
- pci_disable_device(pci_dev);
- kfree(vp_dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int virtio_pci_freeze(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
- int ret;
-
- ret = virtio_device_freeze(&vp_dev->vdev);
-
- if (!ret)
- pci_disable_device(pci_dev);
- return ret;
-}
-
-static int virtio_pci_restore(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
- int ret;
-
- ret = pci_enable_device(pci_dev);
- if (ret)
- return ret;
-
- pci_set_master(pci_dev);
- return virtio_device_restore(&vp_dev->vdev);
-}
-
-static const struct dev_pm_ops virtio_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
-};
-#endif
-
-static struct pci_driver virtio_pci_driver = {
- .name = "virtio-pci",
- .id_table = virtio_pci_id_table,
- .probe = virtio_pci_probe,
- .remove = virtio_pci_remove,
-#ifdef CONFIG_PM_SLEEP
- .driver.pm = &virtio_pci_pm_ops,
-#endif
-};
-
-module_pci_driver(virtio_pci_driver);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
new file mode 100644
index 000000000000..953057d84185
--- /dev/null
+++ b/drivers/virtio/virtio_pci_common.c
@@ -0,0 +1,464 @@
+/*
+ * Virtio PCI driver - common functionality for all device versions
+ *
+ * This module allows virtio devices to be used over a virtual PCI device.
+ * This can be used with QEMU based VMMs like KVM or Xen.
+ *
+ * Copyright IBM Corp. 2007
+ * Copyright Red Hat, Inc. 2014
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Rusty Russell <rusty@rustcorp.com.au>
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "virtio_pci_common.h"
+
+/* wait for pending irq handlers */
+void vp_synchronize_vectors(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i;
+
+ if (vp_dev->intx_enabled)
+ synchronize_irq(vp_dev->pci_dev->irq);
+
+ for (i = 0; i < vp_dev->msix_vectors; ++i)
+ synchronize_irq(vp_dev->msix_entries[i].vector);
+}
+
+/* the notify function used when creating a virt queue */
+bool vp_notify(struct virtqueue *vq)
+{
+ /* we write the queue's selector into the notification register to
+ * signal the other end */
+ iowrite16(vq->index, (void __iomem *)vq->priv);
+ return true;
+}
+
+/* Handle a configuration change: Tell driver if it wants to know. */
+static irqreturn_t vp_config_changed(int irq, void *opaque)
+{
+ struct virtio_pci_device *vp_dev = opaque;
+
+ virtio_config_changed(&vp_dev->vdev);
+ return IRQ_HANDLED;
+}
+
+/* Notify all virtqueues on an interrupt. */
+static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
+{
+ struct virtio_pci_device *vp_dev = opaque;
+ struct virtio_pci_vq_info *info;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_for_each_entry(info, &vp_dev->virtqueues, node) {
+ if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ return ret;
+}
+
+/* A small wrapper to also acknowledge the interrupt when it's handled.
+ * I really need an EIO hook for the vring so I can ack the interrupt once we
+ * know that we'll be handling the IRQ but before we invoke the callback since
+ * the callback may notify the host which results in the host attempting to
+ * raise an interrupt that we would then mask once we acknowledged the
+ * interrupt. */
+static irqreturn_t vp_interrupt(int irq, void *opaque)
+{
+ struct virtio_pci_device *vp_dev = opaque;
+ u8 isr;
+
+ /* reading the ISR has the effect of also clearing it so it's very
+ * important to save off the value. */
+ isr = ioread8(vp_dev->isr);
+
+ /* It's definitely not us if the ISR was not high */
+ if (!isr)
+ return IRQ_NONE;
+
+ /* Configuration change? Tell driver if it wants to know. */
+ if (isr & VIRTIO_PCI_ISR_CONFIG)
+ vp_config_changed(irq, opaque);
+
+ return vp_vring_interrupt(irq, opaque);
+}
+
+static void vp_free_vectors(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i;
+
+ if (vp_dev->intx_enabled) {
+ free_irq(vp_dev->pci_dev->irq, vp_dev);
+ vp_dev->intx_enabled = 0;
+ }
+
+ for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+ free_irq(vp_dev->msix_entries[i].vector, vp_dev);
+
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+
+ if (vp_dev->msix_enabled) {
+ /* Disable the vector used for configuration */
+ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
+
+ pci_disable_msix(vp_dev->pci_dev);
+ vp_dev->msix_enabled = 0;
+ }
+
+ vp_dev->msix_vectors = 0;
+ vp_dev->msix_used_vectors = 0;
+ kfree(vp_dev->msix_names);
+ vp_dev->msix_names = NULL;
+ kfree(vp_dev->msix_entries);
+ vp_dev->msix_entries = NULL;
+ kfree(vp_dev->msix_affinity_masks);
+ vp_dev->msix_affinity_masks = NULL;
+}
+
+static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
+ bool per_vq_vectors)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ const char *name = dev_name(&vp_dev->vdev.dev);
+ unsigned i, v;
+ int err = -ENOMEM;
+
+ vp_dev->msix_vectors = nvectors;
+
+ vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
+ GFP_KERNEL);
+ if (!vp_dev->msix_entries)
+ goto error;
+ vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
+ GFP_KERNEL);
+ if (!vp_dev->msix_names)
+ goto error;
+ vp_dev->msix_affinity_masks
+ = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
+ GFP_KERNEL);
+ if (!vp_dev->msix_affinity_masks)
+ goto error;
+ for (i = 0; i < nvectors; ++i)
+ if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
+ GFP_KERNEL))
+ goto error;
+
+ for (i = 0; i < nvectors; ++i)
+ vp_dev->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_exact(vp_dev->pci_dev,
+ vp_dev->msix_entries, nvectors);
+ if (err)
+ goto error;
+ vp_dev->msix_enabled = 1;
+
+ /* Set the vector used for configuration */
+ v = vp_dev->msix_used_vectors;
+ snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+ "%s-config", name);
+ err = request_irq(vp_dev->msix_entries[v].vector,
+ vp_config_changed, 0, vp_dev->msix_names[v],
+ vp_dev);
+ if (err)
+ goto error;
+ ++vp_dev->msix_used_vectors;
+
+ v = vp_dev->config_vector(vp_dev, v);
+ /* Verify we had enough resources to assign the vector */
+ if (v == VIRTIO_MSI_NO_VECTOR) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!per_vq_vectors) {
+ /* Shared vector for all VQs */
+ v = vp_dev->msix_used_vectors;
+ snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+ "%s-virtqueues", name);
+ err = request_irq(vp_dev->msix_entries[v].vector,
+ vp_vring_interrupt, 0, vp_dev->msix_names[v],
+ vp_dev);
+ if (err)
+ goto error;
+ ++vp_dev->msix_used_vectors;
+ }
+ return 0;
+error:
+ vp_free_vectors(vdev);
+ return err;
+}
+
+static int vp_request_intx(struct virtio_device *vdev)
+{
+ int err;
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
+ IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
+ if (!err)
+ vp_dev->intx_enabled = 1;
+ return err;
+}
+
+static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ u16 msix_vec)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
+ struct virtqueue *vq;
+ unsigned long flags;
+
+ /* fill out our structure that represents an active queue */
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec);
+ if (IS_ERR(vq))
+ goto out_info;
+
+ info->vq = vq;
+ if (callback) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
+
+ vp_dev->vqs[index] = info;
+ return vq;
+
+out_info:
+ kfree(info);
+ return vq;
+}
+
+static void vp_del_vq(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ vp_dev->del_vq(info);
+ kfree(info);
+}
+
+/* the config->del_vqs() implementation */
+void vp_del_vqs(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtqueue *vq, *n;
+ struct virtio_pci_vq_info *info;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ info = vp_dev->vqs[vq->index];
+ if (vp_dev->per_vq_vectors &&
+ info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+ free_irq(vp_dev->msix_entries[info->msix_vector].vector,
+ vq);
+ vp_del_vq(vq);
+ }
+ vp_dev->per_vq_vectors = false;
+
+ vp_free_vectors(vdev);
+ kfree(vp_dev->vqs);
+}
+
+static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[],
+ bool use_msix,
+ bool per_vq_vectors)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ u16 msix_vec;
+ int i, err, nvectors, allocated_vectors;
+
+ vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL);
+ if (!vp_dev->vqs)
+ return -ENOMEM;
+
+ if (!use_msix) {
+ /* Old style: one normal interrupt for change and all vqs. */
+ err = vp_request_intx(vdev);
+ if (err)
+ goto error_find;
+ } else {
+ if (per_vq_vectors) {
+ /* Best option: one for change interrupt, one per vq. */
+ nvectors = 1;
+ for (i = 0; i < nvqs; ++i)
+ if (callbacks[i])
+ ++nvectors;
+ } else {
+ /* Second best: one for change, shared for all vqs. */
+ nvectors = 2;
+ }
+
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
+ if (err)
+ goto error_find;
+ }
+
+ vp_dev->per_vq_vectors = per_vq_vectors;
+ allocated_vectors = vp_dev->msix_used_vectors;
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ } else if (!callbacks[i] || !vp_dev->msix_enabled)
+ msix_vec = VIRTIO_MSI_NO_VECTOR;
+ else if (vp_dev->per_vq_vectors)
+ msix_vec = allocated_vectors++;
+ else
+ msix_vec = VP_MSIX_VQ_VECTOR;
+ vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto error_find;
+ }
+
+ if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
+ continue;
+
+ /* allocate per-vq irq if available and necessary */
+ snprintf(vp_dev->msix_names[msix_vec],
+ sizeof *vp_dev->msix_names,
+ "%s-%s",
+ dev_name(&vp_dev->vdev.dev), names[i]);
+ err = request_irq(vp_dev->msix_entries[msix_vec].vector,
+ vring_interrupt, 0,
+ vp_dev->msix_names[msix_vec],
+ vqs[i]);
+ if (err) {
+ vp_del_vq(vqs[i]);
+ goto error_find;
+ }
+ }
+ return 0;
+
+error_find:
+ vp_del_vqs(vdev);
+ return err;
+}
+
+/* the config->find_vqs() implementation */
+int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ int err;
+
+ /* Try MSI-X with one vector per queue. */
+ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
+ if (!err)
+ return 0;
+ /* Fallback: MSI-X with one vector for config, one shared for queues. */
+ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
+ true, false);
+ if (!err)
+ return 0;
+ /* Finally fall back to regular interrupts. */
+ return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
+ false, false);
+}
+
+const char *vp_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ return pci_name(vp_dev->pci_dev);
+}
+
+/* Setup the affinity for a virtqueue:
+ * - force the affinity for per vq vector
+ * - OR over all affinities for shared MSI
+ * - ignore the affinity request if we're using INTX
+ */
+int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+ struct cpumask *mask;
+ unsigned int irq;
+
+ if (!vq->callback)
+ return -EINVAL;
+
+ if (vp_dev->msix_enabled) {
+ mask = vp_dev->msix_affinity_masks[info->msix_vector];
+ irq = vp_dev->msix_entries[info->msix_vector].vector;
+ if (cpu == -1)
+ irq_set_affinity_hint(irq, NULL);
+ else {
+ cpumask_set_cpu(cpu, mask);
+ irq_set_affinity_hint(irq, mask);
+ }
+ }
+ return 0;
+}
+
+void virtio_pci_release_dev(struct device *_d)
+{
+ /*
+ * No need for a release method as we allocate/free
+ * all devices together with the pci devices.
+ * Provide an empty one to avoid getting a warning from core.
+ */
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtio_pci_freeze(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret;
+
+ ret = virtio_device_freeze(&vp_dev->vdev);
+
+ if (!ret)
+ pci_disable_device(pci_dev);
+ return ret;
+}
+
+static int virtio_pci_restore(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret;
+
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pci_dev);
+ return virtio_device_restore(&vp_dev->vdev);
+}
+
+const struct dev_pm_ops virtio_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
+};
+#endif
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
new file mode 100644
index 000000000000..d840dad4149d
--- /dev/null
+++ b/drivers/virtio/virtio_pci_common.h
@@ -0,0 +1,136 @@
+#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
+#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
+/*
+ * Virtio PCI driver - APIs for common functionality for all device versions
+ *
+ * This module allows virtio devices to be used over a virtual PCI device.
+ * This can be used with QEMU based VMMs like KVM or Xen.
+ *
+ * Copyright IBM Corp. 2007
+ * Copyright Red Hat, Inc. 2014
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Rusty Russell <rusty@rustcorp.com.au>
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+#define VIRTIO_PCI_NO_LEGACY
+#include <linux/virtio_pci.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+
+struct virtio_pci_vq_info {
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
+ /* the number of entries in the queue */
+ int num;
+
+ /* the virtual address of the ring queue */
+ void *queue;
+
+ /* the list node for the virtqueues list */
+ struct list_head node;
+
+ /* MSI-X vector (or none) */
+ unsigned msix_vector;
+};
+
+/* Our device structure */
+struct virtio_pci_device {
+ struct virtio_device vdev;
+ struct pci_dev *pci_dev;
+
+ /* the IO mapping for the PCI config space */
+ void __iomem *ioaddr;
+
+ /* the IO mapping for ISR operation */
+ void __iomem *isr;
+
+ /* a list of queues so we can dispatch IRQs */
+ spinlock_t lock;
+ struct list_head virtqueues;
+
+ /* array of all queues for house-keeping */
+ struct virtio_pci_vq_info **vqs;
+
+ /* MSI-X support */
+ int msix_enabled;
+ int intx_enabled;
+ struct msix_entry *msix_entries;
+ cpumask_var_t *msix_affinity_masks;
+ /* Name strings for interrupts. This size should be enough,
+ * and I'm too lazy to allocate each name separately. */
+ char (*msix_names)[256];
+ /* Number of available vectors */
+ unsigned msix_vectors;
+ /* Vectors allocated, excluding per-vq vectors if any */
+ unsigned msix_used_vectors;
+
+ /* Whether we have vector per vq */
+ bool per_vq_vectors;
+
+ struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
+ struct virtio_pci_vq_info *info,
+ unsigned idx,
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ u16 msix_vec);
+ void (*del_vq)(struct virtio_pci_vq_info *info);
+
+ u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
+};
+
+/* Constants for MSI-X */
+/* Use first vector for configuration changes, second and the rest for
+ * virtqueues Thus, we need at least 2 vectors for MSI. */
+enum {
+ VP_MSIX_CONFIG_VECTOR = 0,
+ VP_MSIX_VQ_VECTOR = 1,
+};
+
+/* Convert a generic virtio device to our structure */
+static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
+{
+ return container_of(vdev, struct virtio_pci_device, vdev);
+}
+
+/* wait for pending irq handlers */
+void vp_synchronize_vectors(struct virtio_device *vdev);
+/* the notify function used when creating a virt queue */
+bool vp_notify(struct virtqueue *vq);
+/* the config->del_vqs() implementation */
+void vp_del_vqs(struct virtio_device *vdev);
+/* the config->find_vqs() implementation */
+int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[]);
+const char *vp_bus_name(struct virtio_device *vdev);
+
+/* Setup the affinity for a virtqueue:
+ * - force the affinity for per vq vector
+ * - OR over all affinities for shared MSI
+ * - ignore the affinity request if we're using INTX
+ */
+int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
+void virtio_pci_release_dev(struct device *);
+
+#ifdef CONFIG_PM_SLEEP
+extern const struct dev_pm_ops virtio_pci_pm_ops;
+#endif
+
+#endif
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
new file mode 100644
index 000000000000..2588252e5c1c
--- /dev/null
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -0,0 +1,326 @@
+/*
+ * Virtio PCI driver - legacy device support
+ *
+ * This module allows virtio devices to be used over a virtual PCI device.
+ * This can be used with QEMU based VMMs like KVM or Xen.
+ *
+ * Copyright IBM Corp. 2007
+ * Copyright Red Hat, Inc. 2014
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Rusty Russell <rusty@rustcorp.com.au>
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "virtio_pci_common.h"
+
+/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
+static const struct pci_device_id virtio_pci_id_table[] = {
+ { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
+
+/* virtio config->get_features() implementation */
+static u64 vp_get_features(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ /* When someone needs more than 32 feature bits, we'll need to
+ * steal a bit to indicate that the rest are somewhere else. */
+ return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
+}
+
+/* virtio config->finalize_features() implementation */
+static int vp_finalize_features(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
+ /* We only support 32 feature bits. */
+ iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
+
+ return 0;
+}
+
+/* virtio config->get() implementation */
+static void vp_get(struct virtio_device *vdev, unsigned offset,
+ void *buf, unsigned len)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ void __iomem *ioaddr = vp_dev->ioaddr +
+ VIRTIO_PCI_CONFIG(vp_dev) + offset;
+ u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ ptr[i] = ioread8(ioaddr + i);
+}
+
+/* the config->set() implementation. it's symmetric to the config->get()
+ * implementation */
+static void vp_set(struct virtio_device *vdev, unsigned offset,
+ const void *buf, unsigned len)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ void __iomem *ioaddr = vp_dev->ioaddr +
+ VIRTIO_PCI_CONFIG(vp_dev) + offset;
+ const u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ iowrite8(ptr[i], ioaddr + i);
+}
+
+/* config->{get,set}_status() implementations */
+static u8 vp_get_status(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+}
+
+static void vp_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ /* We should never be setting status to 0. */
+ BUG_ON(status == 0);
+ iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+}
+
+static void vp_reset(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ /* 0 status means a reset. */
+ iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ /* Flush out the status write, and flush in device writes,
+ * including MSi-X interrupts, if any. */
+ ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ /* Flush pending VQ/configuration callbacks. */
+ vp_synchronize_vectors(vdev);
+}
+
+static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
+{
+ /* Setup the vector used for configuration events */
+ iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+ /* Verify we had enough resources to assign the vector */
+ /* Will also flush the write out to device */
+ return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+}
+
+static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+ struct virtio_pci_vq_info *info,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ u16 msix_vec)
+{
+ struct virtqueue *vq;
+ unsigned long size;
+ u16 num;
+ int err;
+
+ /* Select the queue we're interested in */
+ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+
+ /* Check if queue is either not available or already active. */
+ num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
+ if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
+ return ERR_PTR(-ENOENT);
+
+ info->num = num;
+ info->msix_vector = msix_vec;
+
+ size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
+ info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
+ if (info->queue == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* activate the queue */
+ iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
+ vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+
+ /* create the vring */
+ vq = vring_new_virtqueue(index, info->num,
+ VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
+ true, info->queue, vp_notify, callback, name);
+ if (!vq) {
+ err = -ENOMEM;
+ goto out_activate_queue;
+ }
+
+ vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
+
+ if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
+ iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
+ err = -EBUSY;
+ goto out_assign;
+ }
+ }
+
+ return vq;
+
+out_assign:
+ vring_del_virtqueue(vq);
+out_activate_queue:
+ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+ free_pages_exact(info->queue, size);
+ return ERR_PTR(err);
+}
+
+static void del_vq(struct virtio_pci_vq_info *info)
+{
+ struct virtqueue *vq = info->vq;
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ unsigned long size;
+
+ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+
+ if (vp_dev->msix_enabled) {
+ iowrite16(VIRTIO_MSI_NO_VECTOR,
+ vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ /* Flush the write out to device */
+ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
+ }
+
+ vring_del_virtqueue(vq);
+
+ /* Select and deactivate the queue */
+ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+
+ size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
+ free_pages_exact(info->queue, size);
+}
+
+static const struct virtio_config_ops virtio_pci_config_ops = {
+ .get = vp_get,
+ .set = vp_set,
+ .get_status = vp_get_status,
+ .set_status = vp_set_status,
+ .reset = vp_reset,
+ .find_vqs = vp_find_vqs,
+ .del_vqs = vp_del_vqs,
+ .get_features = vp_get_features,
+ .finalize_features = vp_finalize_features,
+ .bus_name = vp_bus_name,
+ .set_vq_affinity = vp_set_vq_affinity,
+};
+
+/* the PCI probing function */
+static int virtio_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ struct virtio_pci_device *vp_dev;
+ int err;
+
+ /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
+ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
+ return -ENODEV;
+
+ if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
+ printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
+ VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
+ return -ENODEV;
+ }
+
+ /* allocate our structure and fill it out */
+ vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
+ if (vp_dev == NULL)
+ return -ENOMEM;
+
+ vp_dev->vdev.dev.parent = &pci_dev->dev;
+ vp_dev->vdev.dev.release = virtio_pci_release_dev;
+ vp_dev->vdev.config = &virtio_pci_config_ops;
+ vp_dev->pci_dev = pci_dev;
+ INIT_LIST_HEAD(&vp_dev->virtqueues);
+ spin_lock_init(&vp_dev->lock);
+
+ /* Disable MSI/MSIX to bring device to a known good state. */
+ pci_msi_off(pci_dev);
+
+ /* enable the device */
+ err = pci_enable_device(pci_dev);
+ if (err)
+ goto out;
+
+ err = pci_request_regions(pci_dev, "virtio-pci");
+ if (err)
+ goto out_enable_device;
+
+ vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
+ if (vp_dev->ioaddr == NULL) {
+ err = -ENOMEM;
+ goto out_req_regions;
+ }
+
+ vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
+
+ pci_set_drvdata(pci_dev, vp_dev);
+ pci_set_master(pci_dev);
+
+ /* we use the subsystem vendor/device id as the virtio vendor/device
+ * id. this allows us to use the same PCI vendor/device id for all
+ * virtio devices and to identify the particular virtio driver by
+ * the subsystem ids */
+ vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
+ vp_dev->vdev.id.device = pci_dev->subsystem_device;
+
+ vp_dev->config_vector = vp_config_vector;
+ vp_dev->setup_vq = setup_vq;
+ vp_dev->del_vq = del_vq;
+
+ /* finally register the virtio device */
+ err = register_virtio_device(&vp_dev->vdev);
+ if (err)
+ goto out_set_drvdata;
+
+ return 0;
+
+out_set_drvdata:
+ pci_iounmap(pci_dev, vp_dev->ioaddr);
+out_req_regions:
+ pci_release_regions(pci_dev);
+out_enable_device:
+ pci_disable_device(pci_dev);
+out:
+ kfree(vp_dev);
+ return err;
+}
+
+static void virtio_pci_remove(struct pci_dev *pci_dev)
+{
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+
+ unregister_virtio_device(&vp_dev->vdev);
+
+ vp_del_vqs(&vp_dev->vdev);
+ pci_iounmap(pci_dev, vp_dev->ioaddr);
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+ kfree(vp_dev);
+}
+
+static struct pci_driver virtio_pci_driver = {
+ .name = "virtio-pci",
+ .id_table = virtio_pci_id_table,
+ .probe = virtio_pci_probe,
+ .remove = virtio_pci_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver.pm = &virtio_pci_pm_ops,
+#endif
+};
+
+module_pci_driver(virtio_pci_driver);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 3b1f89b6e743..00ec6b3f96b2 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -99,7 +99,8 @@ struct vring_virtqueue
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
-static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp)
+static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
+ unsigned int total_sg, gfp_t gfp)
{
struct vring_desc *desc;
unsigned int i;
@@ -116,7 +117,7 @@ static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp)
return NULL;
for (i = 0; i < total_sg; i++)
- desc[i].next = i+1;
+ desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
return desc;
}
@@ -165,17 +166,17 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && total_sg > 1 && vq->vq.num_free)
- desc = alloc_indirect(total_sg, gfp);
+ desc = alloc_indirect(_vq, total_sg, gfp);
else
desc = NULL;
if (desc) {
/* Use a single buffer which doesn't continue */
- vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
- vq->vring.desc[head].addr = virt_to_phys(desc);
+ vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
+ vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
/* avoid kmemleak false positive (hidden by virt_to_phys) */
kmemleak_ignore(desc);
- vq->vring.desc[head].len = total_sg * sizeof(struct vring_desc);
+ vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
/* Set up rest to use this indirect table. */
i = 0;
@@ -205,28 +206,28 @@ static inline int virtqueue_add(struct virtqueue *_vq,
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- desc[i].flags = VRING_DESC_F_NEXT;
- desc[i].addr = sg_phys(sg);
- desc[i].len = sg->length;
+ desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
+ desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
+ desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
- i = desc[i].next;
+ i = virtio16_to_cpu(_vq->vdev, desc[i].next);
}
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
- desc[i].addr = sg_phys(sg);
- desc[i].len = sg->length;
+ desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
+ desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
+ desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
- i = desc[i].next;
+ i = virtio16_to_cpu(_vq->vdev, desc[i].next);
}
}
/* Last one doesn't continue. */
- desc[prev].flags &= ~VRING_DESC_F_NEXT;
+ desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
/* Update free pointer */
if (indirect)
- vq->free_head = vq->vring.desc[head].next;
+ vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
else
vq->free_head = i;
@@ -235,13 +236,13 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
- avail = (vq->vring.avail->idx & (vq->vring.num-1));
- vq->vring.avail->ring[avail] = head;
+ avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1);
+ vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb(vq->weak_barriers);
- vq->vring.avail->idx++;
+ vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
vq->num_added++;
/* This is very unlikely, but theoretically possible. Kick
@@ -354,8 +355,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
* event. */
virtio_mb(vq->weak_barriers);
- old = vq->vring.avail->idx - vq->num_added;
- new = vq->vring.avail->idx;
+ old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added;
+ new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx);
vq->num_added = 0;
#ifdef DEBUG
@@ -367,10 +368,10 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
#endif
if (vq->event) {
- needs_kick = vring_need_event(vring_avail_event(&vq->vring),
+ needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
new, old);
} else {
- needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
+ needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
}
END_USE(vq);
return needs_kick;
@@ -432,15 +433,15 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
i = head;
/* Free the indirect table */
- if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
- kfree(phys_to_virt(vq->vring.desc[i].addr));
+ if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
+ kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
- while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
- i = vq->vring.desc[i].next;
+ while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
+ i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
vq->vq.num_free++;
}
- vq->vring.desc[i].next = vq->free_head;
+ vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
vq->free_head = head;
/* Plus final descriptor */
vq->vq.num_free++;
@@ -448,7 +449,7 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
static inline bool more_used(const struct vring_virtqueue *vq)
{
- return vq->last_used_idx != vq->vring.used->idx;
+ return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
}
/**
@@ -491,8 +492,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
virtio_rmb(vq->weak_barriers);
last_used = (vq->last_used_idx & (vq->vring.num - 1));
- i = vq->vring.used->ring[last_used].id;
- *len = vq->vring.used->ring[last_used].len;
+ i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
+ *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
if (unlikely(i >= vq->vring.num)) {
BAD_RING(vq, "id %u out of range\n", i);
@@ -510,8 +511,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
/* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
* the read in the next get_buf call. */
- if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
- vring_used_event(&vq->vring) = vq->last_used_idx;
+ if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) {
+ vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
virtio_mb(vq->weak_barriers);
}
@@ -537,7 +538,7 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT);
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
@@ -565,8 +566,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
- vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
+ vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
+ vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
END_USE(vq);
return last_used_idx;
}
@@ -586,7 +587,7 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
struct vring_virtqueue *vq = to_vvq(_vq);
virtio_mb(vq->weak_barriers);
- return (u16)last_used_idx != vq->vring.used->idx;
+ return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
@@ -633,12 +634,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
- vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
/* TODO: tune this threshold */
- bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
- vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
+ bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4;
+ vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
virtio_mb(vq->weak_barriers);
- if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
+ if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
}
@@ -670,7 +671,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
/* detach_buf clears data, so grab it now. */
buf = vq->data[i];
detach_buf(vq, i);
- vq->vring.avail->idx--;
+ vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1);
END_USE(vq);
return buf;
}
@@ -747,12 +748,12 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
/* No callback? Tell other side not to bother us. */
if (!callback)
- vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT);
/* Put everything in free lists. */
vq->free_head = 0;
for (i = 0; i < num-1; i++) {
- vq->vring.desc[i].next = i+1;
+ vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
vq->data[i] = NULL;
}
vq->data[i] = NULL;
@@ -779,9 +780,11 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_RING_F_EVENT_IDX:
break;
+ case VIRTIO_F_VERSION_1:
+ break;
default:
/* We don't understand this bit. */
- clear_bit(i, vdev->features);
+ __virtio_clear_bit(vdev, i);
}
}
}
@@ -826,4 +829,20 @@ void virtio_break_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_break_device);
+void *virtqueue_get_avail(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->vring.avail;
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_avail);
+
+void *virtqueue_get_used(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->vring.used;
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_used);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 1de6df87bfa3..049a884a756f 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -253,7 +253,7 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
count = 0;
err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
- dev->ep[EP_STATUS]), buf, size, &count, 100);
+ dev->ep[EP_STATUS]), buf, size, &count, 1000);
if (err < 0) {
pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
dev->ep[EP_STATUS], err);
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index da3d0f0ad63c..53bf2c860ad3 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -175,7 +175,6 @@ MODULE_DEVICE_TABLE(of, mxc_w1_dt_ids);
static struct platform_driver mxc_w1_driver = {
.driver = {
.name = "mxc_w1",
- .owner = THIS_MODULE,
.of_match_table = mxc_w1_dt_ids,
},
.probe = mxc_w1_probe,
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 1d111e56c8c8..b99a932ad901 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -228,7 +228,6 @@ static int w1_gpio_resume(struct platform_device *pdev)
static struct platform_driver w1_gpio_driver = {
.driver = {
.name = "w1-gpio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(w1_gpio_dt_ids),
},
.probe = w1_gpio_probe,
diff --git a/drivers/w1/slaves/w1_bq27000.c b/drivers/w1/slaves/w1_bq27000.c
index afbefed5f2c9..caafb1722783 100644
--- a/drivers/w1/slaves/w1_bq27000.c
+++ b/drivers/w1/slaves/w1_bq27000.c
@@ -88,7 +88,7 @@ static struct w1_family_ops w1_bq27000_fops = {
};
static struct w1_family w1_bq27000_family = {
- .fid = 1,
+ .fid = W1_FAMILY_BQ27000,
.fops = &w1_bq27000_fops,
};
@@ -111,7 +111,7 @@ module_exit(w1_bq27000_exit);
module_param(F_ID, int, S_IRUSR);
MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device");
-
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_BQ27000));
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Ltd");
MODULE_DESCRIPTION("HDQ/1-wire slave driver bq27000 battery monitor chip");
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 592f7edc671e..181f41cb960b 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -727,7 +727,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
- request_module("w1-family-0x%0x", rn->family);
+ request_module("w1-family-0x%02x", rn->family);
mutex_lock(&dev->mutex);
spin_lock(&w1_flock);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 0d18365b61ad..ed5dcb80a1f7 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -27,6 +27,7 @@
#include <linux/atomic.h>
#define W1_FAMILY_DEFAULT 0
+#define W1_FAMILY_BQ27000 0x01
#define W1_FAMILY_SMEM_01 0x01
#define W1_FAMILY_SMEM_81 0x81
#define W1_THERM_DS18S20 0x10
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index dd9656237274..881597a191b8 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -598,7 +598,7 @@ static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
msg = (struct w1_netlink_msg *)(cn + 1);
if (node_count) {
int size;
- u16 reply_size = sizeof(*cn) + cn->len + slave_len;
+ int reply_size = sizeof(*cn) + cn->len + slave_len;
if (cn->flags & W1_CN_BUNDLE) {
/* bundling duplicats some of the messages */
reply_size += 2 * cmd_count * (sizeof(struct cn_msg) +
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 5614416f1032..d6210d946082 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -294,7 +294,6 @@ static struct platform_driver acquirewdt_driver = {
.remove = acq_remove,
.shutdown = acq_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
};
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index 7796db7fa6e1..7d7db0c5a64e 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -302,7 +302,6 @@ static struct platform_driver advwdt_driver = {
.remove = advwdt_remove,
.shutdown = advwdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
};
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index ae6c287a49cb..6d5ae251e309 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -324,7 +324,6 @@ static struct platform_driver ar7_wdt_driver = {
.remove = ar7_wdt_remove,
.shutdown = ar7_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "ar7_wdt",
},
};
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index 25b5c67d3af9..81ba8920f127 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -422,7 +422,6 @@ static struct platform_driver at32_wdt_driver = {
.resume = at32_wdt_resume,
.driver = {
.name = "at32_wdt",
- .owner = THIS_MODULE,
},
.shutdown = at32_wdt_shutdown,
};
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index dee6cc21d270..d244112d5b6f 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -268,7 +268,6 @@ static struct platform_driver at91wdt_driver = {
.resume = at91wdt_resume,
.driver = {
.name = "at91_wdt",
- .owner = THIS_MODULE,
.of_match_table = at91_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 489729b26298..6df940528fd2 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -393,7 +393,6 @@ static struct platform_driver at91wdt_driver = {
.remove = __exit_p(at91wdt_remove),
.driver = {
.name = "at91_wdt",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91_wdt_dt_ids),
},
};
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 41ac4660fb89..835d310081e1 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -326,7 +326,6 @@ static struct platform_driver ath79_wdt_driver = {
.shutdown = ath97_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ath79_wdt_match),
},
};
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 8df450c090a9..2b5a9bbf80b7 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -167,7 +167,6 @@ static struct platform_driver bcm2835_wdt_driver = {
.shutdown = bcm2835_wdt_shutdown,
.driver = {
.name = "bcm2835-wdt",
- .owner = THIS_MODULE,
.of_match_table = bcm2835_wdt_of_match,
},
};
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index b61fcc535979..9816485f6825 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -242,7 +242,6 @@ static int bcm47xx_wdt_remove(struct platform_device *pdev)
static struct platform_driver bcm47xx_wdt_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "bcm47xx-wdt",
},
.probe = bcm47xx_wdt_probe,
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 5a8e879a430a..ab26fd90729e 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -304,7 +304,6 @@ static struct platform_driver bcm63xx_wdt_driver = {
.remove = bcm63xx_wdt_remove,
.shutdown = bcm63xx_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "bcm63xx-wdt",
}
};
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 9c248099f4a2..4e37db3539a4 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -352,7 +352,6 @@ MODULE_DEVICE_TABLE(of, bcm_kona_wdt_of_match);
static struct platform_driver bcm_kona_wdt_driver = {
.driver = {
.name = BCM_KONA_WDT_NAME,
- .owner = THIS_MODULE,
.of_match_table = bcm_kona_wdt_of_match,
},
.probe = bcm_kona_wdt_probe,
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index a3b6a5b30f9f..aa4d2e8a8ef9 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -407,7 +407,6 @@ static struct platform_driver bfin_wdt_driver = {
.resume = bfin_wdt_resume,
.driver = {
.name = WATCHDOG_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 4bd070f524e5..ce12f437f195 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -448,7 +448,6 @@ static const struct of_device_id coh901327_dt_match[] = {
static struct platform_driver coh901327_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "coh901327_wdog",
.of_match_table = coh901327_dt_match,
},
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index e55ed702209f..02007689e9ca 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -679,7 +679,6 @@ MODULE_DEVICE_TABLE(of, cpwd_match);
static struct platform_driver cpwd_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = cpwd_match,
},
.probe = cpwd_probe,
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index d09ad2254b57..cfdf8a408aea 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -218,7 +218,6 @@ MODULE_DEVICE_TABLE(of, davinci_wdt_of_match);
static struct platform_driver platform_wdt_driver = {
.driver = {
.name = "davinci-wdt",
- .owner = THIS_MODULE,
.of_match_table = davinci_wdt_of_match,
},
.probe = davinci_wdt_probe,
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 9e577a64ec9e..b34a2e4e4e43 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -382,7 +382,6 @@ static struct platform_driver dw_wdt_driver = {
.remove = dw_wdt_drv_remove,
.driver = {
.name = "dw_wdt",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(dw_wdt_of_match),
.pm = &dw_wdt_pm_ops,
},
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 5f54e1e5819a..7a2cc7191c58 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -156,7 +156,6 @@ static int ep93xx_wdt_remove(struct platform_device *pdev)
static struct platform_driver ep93xx_wdt_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ep93xx-wdt",
},
.probe = ep93xx_wdt_probe,
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 25beb30878d7..cc1bdfc2ff71 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -307,7 +307,6 @@ static const struct of_device_id gef_wdt_ids[] = {
static struct platform_driver gef_wdt_driver = {
.driver = {
.name = "gef_wdt",
- .owner = THIS_MODULE,
.of_match_table = gef_wdt_ids,
},
.probe = gef_wdt_probe,
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 4c43e3fa8bd2..88e01238f01b 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -258,7 +258,6 @@ static struct platform_driver geodewdt_driver = {
.remove = geodewdt_remove,
.shutdown = geodewdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
};
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index 220a9e07cfd5..bbdb19b45332 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -241,7 +241,6 @@ MODULE_DEVICE_TABLE(of, gpio_wdt_dt_ids);
static struct platform_driver gpio_wdt_driver = {
.driver = {
.name = "gpio-wdt",
- .owner = THIS_MODULE,
.of_match_table = gpio_wdt_dt_ids,
},
.probe = gpio_wdt_probe,
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 0ba1b7c99760..05ee0bf88ce9 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -576,7 +576,6 @@ static struct platform_driver iTCO_wdt_driver = {
.remove = iTCO_wdt_remove,
.shutdown = iTCO_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
};
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 4247c498ee78..f2e4e1eeb8dd 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -339,7 +339,6 @@ static struct platform_driver ibwdt_driver = {
.remove = ibwdt_remove,
.shutdown = ibwdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
};
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 07f88f54e5c0..9bc39ae51624 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -313,7 +313,6 @@ static struct platform_driver ie6xx_wdt_driver = {
.remove = ie6xx_wdt_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 7e12f88bb4a6..d6add516a7a7 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -326,6 +326,52 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
}
+#ifdef CONFIG_PM_SLEEP
+/* Disable watchdog if it is active during suspend */
+static int imx2_wdt_suspend(struct device *dev)
+{
+ struct watchdog_device *wdog = dev_get_drvdata(dev);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+ imx2_wdt_ping(wdog);
+
+ /* Watchdog has been stopped but IP block is still running */
+ if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev))
+ del_timer_sync(&wdev->timer);
+
+ clk_disable_unprepare(wdev->clk);
+
+ return 0;
+}
+
+/* Enable watchdog and configure it if necessary */
+static int imx2_wdt_resume(struct device *dev)
+{
+ struct watchdog_device *wdog = dev_get_drvdata(dev);
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ clk_prepare_enable(wdev->clk);
+
+ if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
+ /* Resumes from deep sleep we need restart
+ * the watchdog again.
+ */
+ imx2_wdt_setup(wdog);
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
+ imx2_wdt_ping(wdog);
+ } else if (imx2_wdt_is_running(wdev)) {
+ imx2_wdt_ping(wdog);
+ mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
+ imx2_wdt_resume);
+
static const struct of_device_id imx2_wdt_dt_ids[] = {
{ .compatible = "fsl,imx21-wdt", },
{ /* sentinel */ }
@@ -337,7 +383,7 @@ static struct platform_driver imx2_wdt_driver = {
.shutdown = imx2_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
+ .pm = &imx2_wdt_pm_ops,
.of_match_table = imx2_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index ca66e8e74635..84f6701c391f 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -172,7 +172,6 @@ static struct platform_driver mid_wdt_driver = {
.probe = mid_wdt_probe,
.remove = mid_wdt_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "intel_mid_wdt",
},
};
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 91e45ca589e6..18e41afa4da3 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -211,7 +211,6 @@ static struct platform_driver jz4740_wdt_driver = {
.remove = jz4740_wdt_remove,
.driver = {
.name = "jz4740-wdt",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index d9c1a1601926..5bf931ce1353 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -563,7 +563,6 @@ static int kempld_wdt_resume(struct platform_device *pdev)
static struct platform_driver kempld_wdt_driver = {
.driver = {
.name = "kempld-wdt",
- .owner = THIS_MODULE,
},
.probe = kempld_wdt_probe,
.remove = kempld_wdt_remove,
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 40ca5594a336..b7ea39b455c8 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -296,7 +296,6 @@ static struct platform_driver ks8695wdt_driver = {
.resume = ks8695wdt_resume,
.driver = {
.name = "ks8695_wdt",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 021e84eb88eb..582f2fa1b8d9 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -232,7 +232,6 @@ static struct platform_driver ltq_wdt_driver = {
.remove = ltq_wdt_remove,
.driver = {
.name = "wdt",
- .owner = THIS_MODULE,
.of_match_table = ltq_wdt_match,
},
};
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 0e9cc6f5a919..08da3114accb 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -231,7 +231,6 @@ static struct platform_driver max63xx_wdt_driver = {
.id_table = max63xx_id_table,
.driver = {
.name = "max63xx_wdt",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/watchdog/menf21bmc_wdt.c b/drivers/watchdog/menf21bmc_wdt.c
index 2042874d5ce3..59f0913c7341 100644
--- a/drivers/watchdog/menf21bmc_wdt.c
+++ b/drivers/watchdog/menf21bmc_wdt.c
@@ -187,7 +187,6 @@ static void menf21bmc_wdt_shutdown(struct platform_device *pdev)
static struct platform_driver menf21bmc_wdt = {
.driver = {
- .owner = THIS_MODULE,
.name = DEVNAME,
},
.probe = menf21bmc_wdt_probe,
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index a64405b82596..2789da2c0515 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -174,7 +174,6 @@ static struct platform_driver moxart_wdt_driver = {
.remove = moxart_wdt_remove,
.driver = {
.name = "moxart-watchdog",
- .owner = THIS_MODULE,
.of_match_table = moxart_watchdog_match,
},
};
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 7831955cd9e1..689381a24887 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -249,7 +249,6 @@ static struct platform_driver mpc8xxx_wdt_driver = {
.remove = mpc8xxx_wdt_remove,
.driver = {
.name = "mpc8xxx_wdt",
- .owner = THIS_MODULE,
.of_match_table = mpc8xxx_wdt_match,
},
};
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index f9fa58409396..315275d7bab6 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -300,7 +300,6 @@ static struct platform_driver mv64x60_wdt_driver = {
.probe = mv64x60_wdt_probe,
.remove = mv64x60_wdt_remove,
.driver = {
- .owner = THIS_MODULE,
.name = MV64x60_WDT_NAME,
},
};
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index 7135803ca1a3..d5bed78c4d9f 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -297,7 +297,6 @@ static struct platform_driver nuc900wdt_driver = {
.remove = nuc900wdt_remove,
.driver = {
.name = "nuc900-wdt",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 0b9ec61e1313..c028454be66c 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -471,7 +471,6 @@ static struct platform_driver nv_tco_driver = {
.remove = nv_tco_remove,
.shutdown = nv_tco_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = TCO_MODULE_NAME,
},
};
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 3691b157516a..9f2709db61ca 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -353,7 +353,6 @@ static struct platform_driver omap_wdt_driver = {
.suspend = omap_wdt_suspend,
.resume = omap_wdt_resume,
.driver = {
- .owner = THIS_MODULE,
.name = "omap_wdt",
.of_match_table = omap_wdt_of_match,
},
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 8cb1ff3bcd90..ef0c628d5037 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -633,7 +633,6 @@ static struct platform_driver orion_wdt_driver = {
.remove = orion_wdt_remove,
.shutdown = orion_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "orion_wdt",
.of_match_table = orion_wdt_of_match_table,
},
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 15fb316e9437..55e220150103 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -207,7 +207,6 @@ MODULE_DEVICE_TABLE(of, pnx4008_wdt_match);
static struct platform_driver platform_wdt_driver = {
.driver = {
.name = "pnx4008-watchdog",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pnx4008_wdt_match),
},
.probe = pnx4008_wdt_probe,
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 29cf4dcbc59c..47a8f1b1087d 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -287,7 +287,6 @@ static struct platform_driver rdc321x_wdt_driver = {
.probe = rdc321x_wdt_probe,
.remove = rdc321x_wdt_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rdc321x-wdt",
},
};
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index cfed0fe264dc..aba53424605e 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -237,7 +237,6 @@ MODULE_DEVICE_TABLE(of, riowd_match);
static struct platform_driver riowd_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = riowd_match,
},
.probe = riowd_probe,
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index d92c2d5859ce..11aad5b7aafe 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -194,7 +194,6 @@ static struct platform_driver rt288x_wdt_driver = {
.shutdown = rt288x_wdt_shutdown,
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
.of_match_table = rt288x_wdt_match,
},
};
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 1626dc66e763..e89ae027c91d 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -741,7 +741,6 @@ static struct platform_driver s3c2410wdt_driver = {
.shutdown = s3c2410wdt_shutdown,
.id_table = s3c2410_wdt_ids,
.driver = {
- .owner = THIS_MODULE,
.name = "s3c2410-wdt",
.pm = &s3c2410wdt_pm_ops,
.of_match_table = of_match_ptr(s3c2410_wdt_match),
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index b96127ea3de1..43d0cbb7ba0b 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -453,7 +453,6 @@ static struct platform_driver sch311x_wdt_driver = {
.remove = sch311x_wdt_remove,
.shutdown = sch311x_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
};
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index fa89bb30d004..567458b137a6 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -303,7 +303,6 @@ static void sh_wdt_shutdown(struct platform_device *pdev)
static struct platform_driver sh_wdt_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = sh_wdt_probe,
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index 702d07870808..42fa5c0c518a 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -210,7 +210,6 @@ MODULE_DEVICE_TABLE(of, sirfsoc_wdt_of_match);
static struct platform_driver sirfsoc_wdt_driver = {
.driver = {
.name = "sirfsoc-wdt",
- .owner = THIS_MODULE,
.pm = &sirfsoc_wdt_pm_ops,
.of_match_table = sirfsoc_wdt_of_match,
},
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 5cca9cddb87d..eb8044ef0ea0 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -538,7 +538,6 @@ static struct platform_driver sp5100_tco_driver = {
.remove = sp5100_tco_remove,
.shutdown = sp5100_tco_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = TCO_MODULE_NAME,
},
};
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index b62301e74e5f..a29afb37c48c 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -314,7 +314,6 @@ static struct platform_driver sunxi_wdt_driver = {
.remove = sunxi_wdt_remove,
.shutdown = sunxi_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = sunxi_wdt_dt_ids,
},
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 750e2a26cb12..30451ea46902 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -289,7 +289,6 @@ static struct platform_driver tegra_wdt_driver = {
.probe = tegra_wdt_probe,
.remove = tegra_wdt_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "tegra-wdt",
.pm = &tegra_wdt_pm_ops,
.of_match_table = tegra_wdt_of_match,
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index dee9c6cbe6df..119beb7f6017 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -436,7 +436,6 @@ static struct platform_driver ts72xx_wdt_driver = {
.remove = ts72xx_wdt_remove,
.driver = {
.name = "ts72xx-wdt",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 2d4535dc2676..12c15903d098 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -140,7 +140,6 @@ static struct platform_driver twl4030_wdt_driver = {
.suspend = twl4030_wdt_suspend,
.resume = twl4030_wdt_resume,
.driver = {
- .owner = THIS_MODULE,
.name = "twl4030_wdt",
.of_match_table = twl_wdt_of_match,
},
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 6a447e321dd0..7f615933d31a 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -167,7 +167,6 @@ static struct platform_driver txx9wdt_driver = {
.shutdown = txx9wdt_shutdown,
.driver = {
.name = "txx9wdt",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index 5aed9d7ad47e..9de09ab00838 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -156,7 +156,6 @@ static struct platform_driver ux500_wdt_driver = {
.suspend = ux500_wdt_suspend,
.resume = ux500_wdt_resume,
.driver = {
- .owner = THIS_MODULE,
.name = "ux500_wdt",
},
};
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
index 7a42dffd39e5..cf0e650c2015 100644
--- a/drivers/watchdog/xen_wdt.c
+++ b/drivers/watchdog/xen_wdt.c
@@ -320,7 +320,6 @@ static struct platform_driver xen_wdt_driver = {
.suspend = xen_wdt_suspend,
.resume = xen_wdt_resume,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
},
};
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ebd8f218a788..810ad419e34c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
phys_addr_t paddr = dma;
- BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
-
paddr |= baddr & ~PAGE_MASK;
return paddr;
@@ -399,11 +397,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) &&
- !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
+ !range_straddles_page_boundary(phys, size) &&
+ !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
+ !swiotlb_force) {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
- xen_dma_map_page(dev, page, offset, size, dir, attrs);
+ xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
return dev_addr;
}
@@ -417,7 +417,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE;
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
- map & ~PAGE_MASK, size, dir, attrs);
+ dev_addr, map & ~PAGE_MASK, size, dir, attrs);
dev_addr = xen_phys_to_bus(map);
/*
@@ -447,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
- xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
+ xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
@@ -495,14 +495,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (target == SYNC_FOR_CPU)
- xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+ xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
if (target == SYNC_FOR_DEVICE)
- xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+ xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
if (dir != DMA_FROM_DEVICE)
return;
@@ -557,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
dma_addr_t dev_addr = xen_phys_to_bus(paddr);
if (swiotlb_force ||
+ xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
!dma_capable(hwdev, dev_addr, sg->length) ||
range_straddles_page_boundary(paddr, sg->length)) {
phys_addr_t map = swiotlb_tbl_map_single(hwdev,
@@ -574,6 +575,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
return 0;
}
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
+ dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
@@ -584,6 +586,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
+ dev_addr,
paddr & ~PAGE_MASK,
sg->length,
dir,
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
index 828dddc360df..f16a30e2a110 100644
--- a/drivers/xen/xen-pciback/passthrough.c
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -69,7 +69,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
}
static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev)
+ struct pci_dev *dev, bool lock)
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
@@ -87,8 +87,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
mutex_unlock(&dev_data->lock);
- if (found_dev)
+ if (found_dev) {
+ if (lock)
+ device_lock(&found_dev->dev);
pcistub_put_pci_dev(found_dev);
+ if (lock)
+ device_unlock(&found_dev->dev);
+ }
}
static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
@@ -156,8 +161,11 @@ static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
struct pci_dev_entry *dev_entry, *t;
list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
+ struct pci_dev *dev = dev_entry->dev;
list_del(&dev_entry->list);
- pcistub_put_pci_dev(dev_entry->dev);
+ device_lock(&dev->dev);
+ pcistub_put_pci_dev(dev);
+ device_unlock(&dev->dev);
kfree(dev_entry);
}
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 017069a455d4..cc3cbb4435f8 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -105,7 +105,7 @@ static void pcistub_device_release(struct kref *kref)
*/
__pci_reset_function_locked(dev);
if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
- dev_dbg(&dev->dev, "Could not reload PCI state\n");
+ dev_info(&dev->dev, "Could not reload PCI state\n");
else
pci_restore_state(dev);
@@ -250,11 +250,15 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
* - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
*
* As such we have to be careful.
+ *
+ * To make this easier, the caller has to hold the device lock.
*/
void pcistub_put_pci_dev(struct pci_dev *dev)
{
struct pcistub_device *psdev, *found_psdev = NULL;
unsigned long flags;
+ struct xen_pcibk_dev_data *dev_data;
+ int ret;
spin_lock_irqsave(&pcistub_devices_lock, flags);
@@ -276,13 +280,20 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
/* Cleanup our device
* (so it's ready for the next domain)
*/
+ device_lock_assert(&dev->dev);
+ __pci_reset_function_locked(dev);
- /* This is OK - we are running from workqueue context
- * and want to inhibit the user from fiddling with 'reset'
- */
- pci_reset_function(dev);
- pci_restore_state(dev);
-
+ dev_data = pci_get_drvdata(dev);
+ ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
+ if (!ret) {
+ /*
+ * The usual sequence is pci_save_state & pci_restore_state
+ * but the guest might have messed the configuration space up.
+ * Use the initial version (when device was bound to us).
+ */
+ pci_restore_state(dev);
+ } else
+ dev_info(&dev->dev, "Could not reload PCI state\n");
/* This disables the device. */
xen_pcibk_reset_device(dev);
@@ -554,12 +565,14 @@ static void pcistub_remove(struct pci_dev *dev)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
if (found_psdev) {
- dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
- found_psdev->pdev);
+ dev_dbg(&dev->dev, "found device to remove %s\n",
+ found_psdev->pdev ? "- in-use" : "");
if (found_psdev->pdev) {
- pr_warn("****** removing device %s while still in-use! ******\n",
- pci_name(found_psdev->dev));
+ int domid = xen_find_device_domain_owner(dev);
+
+ pr_warn("****** removing device %s while still in-use by domain %d! ******\n",
+ pci_name(found_psdev->dev), domid);
pr_warn("****** driver domain may still access this device's i/o resources!\n");
pr_warn("****** shutdown driver domain before binding device\n");
pr_warn("****** to other drivers or domains\n");
@@ -567,7 +580,8 @@ static void pcistub_remove(struct pci_dev *dev)
/* N.B. This ends up calling pcistub_put_pci_dev which ends up
* doing the FLR. */
xen_pcibk_release_pci_dev(found_psdev->pdev,
- found_psdev->dev);
+ found_psdev->dev,
+ false /* caller holds the lock. */);
}
spin_lock_irqsave(&pcistub_devices_lock, flags);
@@ -629,10 +643,12 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
{
pci_ers_result_t res = result;
struct xen_pcie_aer_op *aer_op;
+ struct xen_pcibk_device *pdev = psdev->pdev;
+ struct xen_pci_sharedinfo *sh_info = pdev->sh_info;
int ret;
/*with PV AER drivers*/
- aer_op = &(psdev->pdev->sh_info->aer_op);
+ aer_op = &(sh_info->aer_op);
aer_op->cmd = aer_cmd ;
/*useful for error_detected callback*/
aer_op->err = state;
@@ -653,36 +669,36 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
* this flag to judge whether we need to check pci-front give aer
* service ack signal
*/
- set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+ set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
/*It is possible that a pcifront conf_read_write ops request invokes
* the callback which cause the spurious execution of wake_up.
* Yet it is harmless and better than a spinlock here
*/
set_bit(_XEN_PCIB_active,
- (unsigned long *)&psdev->pdev->sh_info->flags);
+ (unsigned long *)&sh_info->flags);
wmb();
- notify_remote_via_irq(psdev->pdev->evtchn_irq);
+ notify_remote_via_irq(pdev->evtchn_irq);
ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
!(test_bit(_XEN_PCIB_active, (unsigned long *)
- &psdev->pdev->sh_info->flags)), 300*HZ);
+ &sh_info->flags)), 300*HZ);
if (!ret) {
if (test_bit(_XEN_PCIB_active,
- (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ (unsigned long *)&sh_info->flags)) {
dev_err(&psdev->dev->dev,
"pcifront aer process not responding!\n");
clear_bit(_XEN_PCIB_active,
- (unsigned long *)&psdev->pdev->sh_info->flags);
+ (unsigned long *)&sh_info->flags);
aer_op->err = PCI_ERS_RESULT_NONE;
return res;
}
}
- clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+ clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
if (test_bit(_XEN_PCIF_active,
- (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ (unsigned long *)&sh_info->flags)) {
dev_dbg(&psdev->dev->dev,
"schedule pci_conf service in " DRV_NAME "\n");
xen_pcibk_test_and_schedule_op(psdev->pdev);
@@ -1502,6 +1518,53 @@ parse_error:
fs_initcall(pcistub_init);
#endif
+#ifdef CONFIG_PCI_IOV
+static struct pcistub_device *find_vfs(const struct pci_dev *pdev)
+{
+ struct pcistub_device *psdev = NULL;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (!psdev->pdev && psdev->dev != pdev
+ && pci_physfn(psdev->dev) == pdev) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ if (found)
+ return psdev;
+ return NULL;
+}
+
+static int pci_stub_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ const struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (action != BUS_NOTIFY_UNBIND_DRIVER)
+ return NOTIFY_DONE;
+
+ if (!pdev->is_physfn)
+ return NOTIFY_DONE;
+
+ for (;;) {
+ struct pcistub_device *psdev = find_vfs(pdev);
+ if (!psdev)
+ break;
+ device_release_driver(&psdev->dev->dev);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pci_stub_nb = {
+ .notifier_call = pci_stub_notifier,
+};
+#endif
+
static int __init xen_pcibk_init(void)
{
int err;
@@ -1523,12 +1586,19 @@ static int __init xen_pcibk_init(void)
err = xen_pcibk_xenbus_register();
if (err)
pcistub_exit();
+#ifdef CONFIG_PCI_IOV
+ else
+ bus_register_notifier(&pci_bus_type, &pci_stub_nb);
+#endif
return err;
}
static void __exit xen_pcibk_cleanup(void)
{
+#ifdef CONFIG_PCI_IOV
+ bus_unregister_notifier(&pci_bus_type, &pci_stub_nb);
+#endif
xen_pcibk_xenbus_unregister();
pcistub_exit();
}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index f72af87640e0..58e38d586f52 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -99,7 +99,8 @@ struct xen_pcibk_backend {
unsigned int *domain, unsigned int *bus,
unsigned int *devfn);
int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb);
- void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev);
+ void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
+ bool lock);
int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
int devid, publish_pci_dev_cb publish_cb);
struct pci_dev *(*get)(struct xen_pcibk_device *pdev,
@@ -122,10 +123,10 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
}
static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev)
+ struct pci_dev *dev, bool lock)
{
if (xen_pcibk_backend && xen_pcibk_backend->release)
- return xen_pcibk_backend->release(pdev, dev);
+ return xen_pcibk_backend->release(pdev, dev, lock);
}
static inline struct pci_dev *
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 51afff96c515..c99f8bb1c56c 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -145,7 +145,7 @@ out:
}
static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev)
+ struct pci_dev *dev, bool lock)
{
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
@@ -169,8 +169,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
out:
mutex_unlock(&vpci_dev->lock);
- if (found_dev)
+ if (found_dev) {
+ if (lock)
+ device_lock(&found_dev->dev);
pcistub_put_pci_dev(found_dev);
+ if (lock)
+ device_unlock(&found_dev->dev);
+ }
}
static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
@@ -208,8 +213,11 @@ static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
struct pci_dev_entry *e, *tmp;
list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
list) {
+ struct pci_dev *dev = e->dev;
list_del(&e->list);
- pcistub_put_pci_dev(e->dev);
+ device_lock(&dev->dev);
+ pcistub_put_pci_dev(dev);
+ device_unlock(&dev->dev);
kfree(e);
}
}
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index ad8d30c088fe..fe17c80ff4b7 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -247,7 +247,7 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
if (err)
goto out;
- dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
+ dev_info(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
if (xen_register_device_domain_owner(dev,
pdev->xdev->otherend_id) != 0) {
dev_err(&dev->dev, "Stealing ownership from dom%d.\n",
@@ -291,7 +291,7 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
/* N.B. This ends up calling pcistub_put_pci_dev which ends up
* doing the FLR. */
- xen_pcibk_release_pci_dev(pdev, dev);
+ xen_pcibk_release_pci_dev(pdev, dev, true /* use the lock. */);
out:
return err;
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 707c1a5a0317..d295d9878dff 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -222,7 +222,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
static struct platform_driver amiga_zorro_driver = {
.driver = {
.name = "amiga-zorro",
- .owner = THIS_MODULE,
},
};
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 370b24cee4d8..c055d56ec63d 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -30,6 +30,9 @@ config COMPAT_BINFMT_ELF
config ARCH_BINFMT_ELF_RANDOMIZE_PIE
bool
+config ARCH_BINFMT_ELF_STATE
+ bool
+
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y
diff --git a/fs/Makefile b/fs/Makefile
index da0bbb456d3f..bedff48e8fdc 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -11,7 +11,7 @@ obj-y := open.o read_write.o file_table.o super.o \
attr.o bad_inode.o file.o filesystems.o namespace.o \
seq_file.o xattr.o libfs.o fs-writeback.o \
pnode.o splice.o sync.o utimes.o \
- stack.o fs_struct.o statfs.o fs_pin.o
+ stack.o fs_struct.o statfs.o fs_pin.o nsfs.o
ifeq ($(CONFIG_BLOCK),y)
obj-y += buffer.o block_dev.o direct-io.o mpage.o
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 9bca88159725..ff44ff3ff015 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -135,8 +135,10 @@ extern void affs_fix_checksum(struct super_block *sb, struct buffer_head *bh);
extern void secs_to_datestamp(time_t secs, struct affs_date *ds);
extern umode_t prot_to_mode(u32 prot);
extern void mode_to_prot(struct inode *inode);
+__printf(3, 4)
extern void affs_error(struct super_block *sb, const char *function,
const char *fmt, ...);
+__printf(3, 4)
extern void affs_warning(struct super_block *sb, const char *function,
const char *fmt, ...);
extern bool affs_nofilenametruncate(const struct dentry *dentry);
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 937ce8754b24..c852f2fa1710 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -10,8 +10,6 @@
#include "affs.h"
-static char ErrorBuffer[256];
-
/*
* Functions for accessing Amiga-FFS structures.
*/
@@ -444,30 +442,30 @@ mode_to_prot(struct inode *inode)
void
affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
{
- va_list args;
-
- va_start(args,fmt);
- vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
- va_end(args);
+ struct va_format vaf;
+ va_list args;
- pr_crit("error (device %s): %s(): %s\n", sb->s_id,
- function,ErrorBuffer);
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_crit("error (device %s): %s(): %pV\n", sb->s_id, function, &vaf);
if (!(sb->s_flags & MS_RDONLY))
pr_warn("Remounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
+ va_end(args);
}
void
affs_warning(struct super_block *sb, const char *function, const char *fmt, ...)
{
- va_list args;
+ struct va_format vaf;
+ va_list args;
- va_start(args,fmt);
- vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_warn("(device %s): %s(): %pV\n", sb->s_id, function, &vaf);
va_end(args);
-
- pr_warn("(device %s): %s(): %s\n", sb->s_id,
- function,ErrorBuffer);
}
bool
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 1ed590aafecf..8faa6593ca6d 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -12,35 +12,10 @@
* affs regular file handling primitives
*/
+#include <linux/aio.h>
#include "affs.h"
-#if PAGE_SIZE < 4096
-#error PAGE_SIZE must be at least 4096
-#endif
-
-static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
-static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
-static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
-static int affs_file_open(struct inode *inode, struct file *filp);
-static int affs_file_release(struct inode *inode, struct file *filp);
-
-const struct file_operations affs_file_operations = {
- .llseek = generic_file_llseek,
- .read = new_sync_read,
- .read_iter = generic_file_read_iter,
- .write = new_sync_write,
- .write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
- .open = affs_file_open,
- .release = affs_file_release,
- .fsync = affs_file_fsync,
- .splice_read = generic_file_splice_read,
-};
-
-const struct inode_operations affs_file_inode_operations = {
- .setattr = affs_notify_change,
-};
static int
affs_file_open(struct inode *inode, struct file *filp)
@@ -355,7 +330,8 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul
/* store new block */
if (bh_result->b_blocknr)
- affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
+ affs_warning(sb, "get_block", "block already set (%lx)",
+ (unsigned long)bh_result->b_blocknr);
AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
@@ -377,7 +353,8 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul
return 0;
err_big:
- affs_error(inode->i_sb,"get_block","strange block request %d", block);
+ affs_error(inode->i_sb, "get_block", "strange block request %d",
+ (int)block);
return -EIO;
err_ext:
// unlock cache
@@ -412,6 +389,22 @@ static void affs_write_failed(struct address_space *mapping, loff_t to)
}
}
+static ssize_t
+affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
+{
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
+ ssize_t ret;
+
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block);
+ if (ret < 0 && (rw & WRITE))
+ affs_write_failed(mapping, offset + count);
+ return ret;
+}
+
static int affs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -438,6 +431,7 @@ const struct address_space_operations affs_aops = {
.writepage = affs_writepage,
.write_begin = affs_write_begin,
.write_end = generic_write_end,
+ .direct_IO = affs_direct_IO,
.bmap = _affs_bmap
};
@@ -867,8 +861,9 @@ affs_truncate(struct inode *inode)
// lock cache
ext_bh = affs_get_extblock(inode, ext);
if (IS_ERR(ext_bh)) {
- affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
- ext, PTR_ERR(ext_bh));
+ affs_warning(sb, "truncate",
+ "unexpected read error for ext block %u (%ld)",
+ (unsigned int)ext, PTR_ERR(ext_bh));
return;
}
if (AFFS_I(inode)->i_lc) {
@@ -914,8 +909,9 @@ affs_truncate(struct inode *inode)
struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
u32 tmp;
if (IS_ERR(bh)) {
- affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
- ext, PTR_ERR(bh));
+ affs_warning(sb, "truncate",
+ "unexpected read error for last block %u (%ld)",
+ (unsigned int)ext, PTR_ERR(bh));
return;
}
tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
@@ -961,3 +957,19 @@ int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
mutex_unlock(&inode->i_mutex);
return ret;
}
+const struct file_operations affs_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = new_sync_read,
+ .read_iter = generic_file_read_iter,
+ .write = new_sync_write,
+ .write_iter = generic_file_write_iter,
+ .mmap = generic_file_mmap,
+ .open = affs_file_open,
+ .release = affs_file_release,
+ .fsync = affs_file_fsync,
+ .splice_read = generic_file_splice_read,
+};
+
+const struct inode_operations affs_file_inode_operations = {
+ .setattr = affs_notify_change,
+};
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 03a3beb17004..06e14bfb3496 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -306,8 +306,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
_debug("- range %u-%u%s",
offset, to, msg->msg_flags ? " [more]" : "");
- msg->msg_iov = (struct iovec *) iov;
- msg->msg_iovlen = 1;
+ iov_iter_init(&msg->msg_iter, WRITE,
+ (struct iovec *) iov, 1, to - offset);
/* have to change the state *before* sending the last
* packet as RxRPC might give us the reply before it
@@ -384,8 +384,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
msg.msg_name = NULL;
msg.msg_namelen = 0;
- msg.msg_iov = (struct iovec *) iov;
- msg.msg_iovlen = 1;
+ iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)iov, 1,
+ call->request_size);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = (call->send_pages ? MSG_MORE : 0);
@@ -778,8 +778,7 @@ void afs_send_empty_reply(struct afs_call *call)
iov[0].iov_len = 0;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- msg.msg_iov = iov;
- msg.msg_iovlen = 0;
+ iov_iter_init(&msg.msg_iter, WRITE, iov, 0, 0); /* WTF? */
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -815,8 +814,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
iov[0].iov_len = len;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- msg.msg_iov = iov;
- msg.msg_iovlen = 1;
+ iov_iter_init(&msg.msg_iter, WRITE, iov, 1, len);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
diff --git a/fs/aio.c b/fs/aio.c
index 14b93159ef83..1b7893ecc296 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -286,12 +286,37 @@ static void aio_free_ring(struct kioctx *ctx)
static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
{
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_ops = &generic_file_vm_ops;
return 0;
}
+static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct kioctx_table *table;
+ int i;
+
+ spin_lock(&mm->ioctx_lock);
+ rcu_read_lock();
+ table = rcu_dereference(mm->ioctx_table);
+ for (i = 0; i < table->nr; i++) {
+ struct kioctx *ctx;
+
+ ctx = table->table[i];
+ if (ctx && ctx->aio_ring_file == file) {
+ ctx->user_id = ctx->mmap_base = vma->vm_start;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ spin_unlock(&mm->ioctx_lock);
+}
+
static const struct file_operations aio_ring_fops = {
.mmap = aio_ring_mmap,
+ .mremap = aio_ring_remap,
};
#if IS_ENABLED(CONFIG_MIGRATION)
@@ -1228,8 +1253,12 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
* the ringbuffer empty. So in practice we should be ok, but it's
* something to be aware of when touching this code.
*/
- wait_event_interruptible_hrtimeout(ctx->wait,
- aio_read_events(ctx, min_nr, nr, event, &ret), until);
+ if (until.tv64 == 0)
+ aio_read_events(ctx, min_nr, nr, event, &ret);
+ else
+ wait_event_interruptible_hrtimeout(ctx->wait,
+ aio_read_events(ctx, min_nr, nr, event, &ret),
+ until);
if (!ret && signal_pending(current))
ret = -EINTR;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index b94d1cc9cd30..edf47774b03d 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -269,10 +269,6 @@ more:
}
ctx->pos++;
goto more;
-
- befs_debug(sb, "<--- %s pos %lld", __func__, ctx->pos);
-
- return 0;
}
static struct inode *
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 3a6175fe10c0..02b16910f4c9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -386,6 +386,127 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
ELF_PAGESTART(cmds[first_idx].p_vaddr);
}
+/**
+ * load_elf_phdrs() - load ELF program headers
+ * @elf_ex: ELF header of the binary whose program headers should be loaded
+ * @elf_file: the opened ELF binary file
+ *
+ * Loads ELF program headers from the binary file elf_file, which has the ELF
+ * header pointed to by elf_ex, into a newly allocated array. The caller is
+ * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
+ */
+static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
+ struct file *elf_file)
+{
+ struct elf_phdr *elf_phdata = NULL;
+ int retval, size, err = -1;
+
+ /*
+ * If the size of this structure has changed, then punt, since
+ * we will be doing the wrong thing.
+ */
+ if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
+ goto out;
+
+ /* Sanity check the number of program headers... */
+ if (elf_ex->e_phnum < 1 ||
+ elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
+ goto out;
+
+ /* ...and their total size. */
+ size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
+ if (size > ELF_MIN_ALIGN)
+ goto out;
+
+ elf_phdata = kmalloc(size, GFP_KERNEL);
+ if (!elf_phdata)
+ goto out;
+
+ /* Read in the program headers */
+ retval = kernel_read(elf_file, elf_ex->e_phoff,
+ (char *)elf_phdata, size);
+ if (retval != size) {
+ err = (retval < 0) ? retval : -EIO;
+ goto out;
+ }
+
+ /* Success! */
+ err = 0;
+out:
+ if (err) {
+ kfree(elf_phdata);
+ elf_phdata = NULL;
+ }
+ return elf_phdata;
+}
+
+#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
+
+/**
+ * struct arch_elf_state - arch-specific ELF loading state
+ *
+ * This structure is used to preserve architecture specific data during
+ * the loading of an ELF file, throughout the checking of architecture
+ * specific ELF headers & through to the point where the ELF load is
+ * known to be proceeding (ie. SET_PERSONALITY).
+ *
+ * This implementation is a dummy for architectures which require no
+ * specific state.
+ */
+struct arch_elf_state {
+};
+
+#define INIT_ARCH_ELF_STATE {}
+
+/**
+ * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
+ * @ehdr: The main ELF header
+ * @phdr: The program header to check
+ * @elf: The open ELF file
+ * @is_interp: True if the phdr is from the interpreter of the ELF being
+ * loaded, else false.
+ * @state: Architecture-specific state preserved throughout the process
+ * of loading the ELF.
+ *
+ * Inspects the program header phdr to validate its correctness and/or
+ * suitability for the system. Called once per ELF program header in the
+ * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
+ * interpreter.
+ *
+ * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
+ * with that return code.
+ */
+static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
+ struct elf_phdr *phdr,
+ struct file *elf, bool is_interp,
+ struct arch_elf_state *state)
+{
+ /* Dummy implementation, always proceed */
+ return 0;
+}
+
+/**
+ * arch_check_elf() - check a PT_LOPROC..PT_HIPROC ELF program header
+ * @ehdr: The main ELF header
+ * @has_interp: True if the ELF has an interpreter, else false.
+ * @state: Architecture-specific state preserved throughout the process
+ * of loading the ELF.
+ *
+ * Provides a final opportunity for architecture code to reject the loading
+ * of the ELF & cause an exec syscall to return an error. This is called after
+ * all program headers to be checked by arch_elf_pt_proc have been.
+ *
+ * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
+ * with that return code.
+ */
+static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
+ struct arch_elf_state *state)
+{
+ /* Dummy implementation, always proceed */
+ return 0;
+}
+
+#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
/* This is much more generalized than the library routine read function,
so we keep this separate. Technically the library read function
@@ -394,16 +515,15 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
struct file *interpreter, unsigned long *interp_map_addr,
- unsigned long no_base)
+ unsigned long no_base, struct elf_phdr *interp_elf_phdata)
{
- struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
unsigned long load_addr = 0;
int load_addr_set = 0;
unsigned long last_bss = 0, elf_bss = 0;
unsigned long error = ~0UL;
unsigned long total_size;
- int retval, i, size;
+ int i;
/* First of all, some simple consistency checks */
if (interp_elf_ex->e_type != ET_EXEC &&
@@ -414,40 +534,14 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
if (!interpreter->f_op->mmap)
goto out;
- /*
- * If the size of this structure has changed, then punt, since
- * we will be doing the wrong thing.
- */
- if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
- goto out;
- if (interp_elf_ex->e_phnum < 1 ||
- interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
- goto out;
-
- /* Now read in all of the header information */
- size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
- if (size > ELF_MIN_ALIGN)
- goto out;
- elf_phdata = kmalloc(size, GFP_KERNEL);
- if (!elf_phdata)
- goto out;
-
- retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
- (char *)elf_phdata, size);
- error = -EIO;
- if (retval != size) {
- if (retval < 0)
- error = retval;
- goto out_close;
- }
-
- total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
+ total_size = total_mapping_size(interp_elf_phdata,
+ interp_elf_ex->e_phnum);
if (!total_size) {
error = -EINVAL;
- goto out_close;
+ goto out;
}
- eppnt = elf_phdata;
+ eppnt = interp_elf_phdata;
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
if (eppnt->p_type == PT_LOAD) {
int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
@@ -474,7 +568,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
*interp_map_addr = map_addr;
error = map_addr;
if (BAD_ADDR(map_addr))
- goto out_close;
+ goto out;
if (!load_addr_set &&
interp_elf_ex->e_type == ET_DYN) {
@@ -493,7 +587,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
eppnt->p_memsz > TASK_SIZE ||
TASK_SIZE - eppnt->p_memsz < k) {
error = -ENOMEM;
- goto out_close;
+ goto out;
}
/*
@@ -523,7 +617,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
*/
if (padzero(elf_bss)) {
error = -EFAULT;
- goto out_close;
+ goto out;
}
/* What we have mapped so far */
@@ -532,13 +626,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
/* Map the last of the bss segment */
error = vm_brk(elf_bss, last_bss - elf_bss);
if (BAD_ADDR(error))
- goto out_close;
+ goto out;
}
error = load_addr;
-
-out_close:
- kfree(elf_phdata);
out:
return error;
}
@@ -575,10 +666,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
int load_addr_set = 0;
char * elf_interpreter = NULL;
unsigned long error;
- struct elf_phdr *elf_ppnt, *elf_phdata;
+ struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
unsigned long elf_bss, elf_brk;
int retval, i;
- unsigned int size;
unsigned long elf_entry;
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
@@ -589,6 +679,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
} *loc;
+ struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
@@ -611,26 +702,10 @@ static int load_elf_binary(struct linux_binprm *bprm)
if (!bprm->file->f_op->mmap)
goto out;
- /* Now read in all of the header information */
- if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
- goto out;
- if (loc->elf_ex.e_phnum < 1 ||
- loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
- goto out;
- size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
- retval = -ENOMEM;
- elf_phdata = kmalloc(size, GFP_KERNEL);
+ elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
if (!elf_phdata)
goto out;
- retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
- (char *)elf_phdata, size);
- if (retval != size) {
- if (retval >= 0)
- retval = -EIO;
- goto out_free_ph;
- }
-
elf_ppnt = elf_phdata;
elf_bss = 0;
elf_brk = 0;
@@ -699,12 +774,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_ppnt = elf_phdata;
for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
- if (elf_ppnt->p_type == PT_GNU_STACK) {
+ switch (elf_ppnt->p_type) {
+ case PT_GNU_STACK:
if (elf_ppnt->p_flags & PF_X)
executable_stack = EXSTACK_ENABLE_X;
else
executable_stack = EXSTACK_DISABLE_X;
break;
+
+ case PT_LOPROC ... PT_HIPROC:
+ retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
+ bprm->file, false,
+ &arch_state);
+ if (retval)
+ goto out_free_dentry;
+ break;
}
/* Some simple consistency checks for the interpreter */
@@ -716,8 +800,36 @@ static int load_elf_binary(struct linux_binprm *bprm)
/* Verify the interpreter has a valid arch */
if (!elf_check_arch(&loc->interp_elf_ex))
goto out_free_dentry;
+
+ /* Load the interpreter program headers */
+ interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
+ interpreter);
+ if (!interp_elf_phdata)
+ goto out_free_dentry;
+
+ /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
+ elf_ppnt = interp_elf_phdata;
+ for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
+ switch (elf_ppnt->p_type) {
+ case PT_LOPROC ... PT_HIPROC:
+ retval = arch_elf_pt_proc(&loc->interp_elf_ex,
+ elf_ppnt, interpreter,
+ true, &arch_state);
+ if (retval)
+ goto out_free_dentry;
+ break;
+ }
}
+ /*
+ * Allow arch code to reject the ELF at this point, whilst it's
+ * still possible to return an error to the code that invoked
+ * the exec syscall.
+ */
+ retval = arch_check_elf(&loc->elf_ex, !!interpreter, &arch_state);
+ if (retval)
+ goto out_free_dentry;
+
/* Flush all traces of the currently running executable */
retval = flush_old_exec(bprm);
if (retval)
@@ -725,7 +837,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
- SET_PERSONALITY(loc->elf_ex);
+ SET_PERSONALITY2(loc->elf_ex, &arch_state);
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
@@ -890,7 +1002,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
&interp_map_addr,
- load_bias);
+ load_bias, interp_elf_phdata);
if (!IS_ERR((void *)elf_entry)) {
/*
* load_elf_interp() returns relocation
@@ -917,6 +1029,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
}
}
+ kfree(interp_elf_phdata);
kfree(elf_phdata);
set_binfmt(&elf_format);
@@ -981,6 +1094,7 @@ out_ret:
/* error cleanup */
out_free_dentry:
+ kfree(interp_elf_phdata);
allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
index f37b08cea1f7..490538536cb4 100644
--- a/fs/binfmt_em86.c
+++ b/fs/binfmt_em86.c
@@ -42,6 +42,10 @@ static int load_em86(struct linux_binprm *bprm)
return -ENOEXEC;
}
+ /* Need to be able to load the file after exec */
+ if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
+ return -ENOENT;
+
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 70789e198dea..c04ef1d4f18a 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -144,6 +144,10 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (!fmt)
goto ret;
+ /* Need to be able to load the file after exec */
+ if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
+ return -ENOENT;
+
if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
retval = remove_arg_zero(bprm);
if (retval)
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index 5027a3e14922..afdf4e3cafc2 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -24,6 +24,16 @@ static int load_script(struct linux_binprm *bprm)
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
+
+ /*
+ * If the script filename will be inaccessible after exec, typically
+ * because it is a "/dev/fd/<fd>/.." path against an O_CLOEXEC fd, give
+ * up now (on the assumption that the interpreter will want to load
+ * this file).
+ */
+ if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
+ return -ENOENT;
+
/*
* This section does the #! interpretation.
* Sorta complicated, but hopefully it will work. -TYT
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index cb7f3fe9c9f6..d897ef803b3b 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -94,6 +94,7 @@
#include <linux/mutex.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
#include "ctree.h"
#include "disk-io.h"
#include "hash.h"
@@ -326,9 +327,6 @@ static int btrfsic_handle_extent_data(struct btrfsic_state *state,
static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfsic_block_data_ctx *block_ctx_out,
int mirror_num);
-static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
- u32 len, struct block_device *bdev,
- struct btrfsic_block_data_ctx *block_ctx_out);
static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
static int btrfsic_read_block(struct btrfsic_state *state,
struct btrfsic_block_data_ctx *block_ctx);
@@ -1326,24 +1324,25 @@ static int btrfsic_create_link_to_next_block(
l = NULL;
next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
} else {
- if (next_block->logical_bytenr != next_bytenr &&
- !(!next_block->is_metadata &&
- 0 == next_block->logical_bytenr)) {
- printk(KERN_INFO
- "Referenced block @%llu (%s/%llu/%d)"
- " found in hash table, %c,"
- " bytenr mismatch (!= stored %llu).\n",
- next_bytenr, next_block_ctx->dev->name,
- next_block_ctx->dev_bytenr, *mirror_nump,
- btrfsic_get_block_type(state, next_block),
- next_block->logical_bytenr);
- } else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "Referenced block @%llu (%s/%llu/%d)"
- " found in hash table, %c.\n",
- next_bytenr, next_block_ctx->dev->name,
- next_block_ctx->dev_bytenr, *mirror_nump,
- btrfsic_get_block_type(state, next_block));
+ if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) {
+ if (next_block->logical_bytenr != next_bytenr &&
+ !(!next_block->is_metadata &&
+ 0 == next_block->logical_bytenr))
+ printk(KERN_INFO
+ "Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
+ next_bytenr, next_block_ctx->dev->name,
+ next_block_ctx->dev_bytenr, *mirror_nump,
+ btrfsic_get_block_type(state,
+ next_block),
+ next_block->logical_bytenr);
+ else
+ printk(KERN_INFO
+ "Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n",
+ next_bytenr, next_block_ctx->dev->name,
+ next_block_ctx->dev_bytenr, *mirror_nump,
+ btrfsic_get_block_type(state,
+ next_block));
+ }
next_block->logical_bytenr = next_bytenr;
next_block->mirror_num = *mirror_nump;
@@ -1529,7 +1528,9 @@ static int btrfsic_handle_extent_data(
return -1;
}
if (!block_was_created) {
- if (next_block->logical_bytenr != next_bytenr &&
+ if ((state->print_mask &
+ BTRFSIC_PRINT_MASK_VERBOSE) &&
+ next_block->logical_bytenr != next_bytenr &&
!(!next_block->is_metadata &&
0 == next_block->logical_bytenr)) {
printk(KERN_INFO
@@ -1607,25 +1608,6 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
return ret;
}
-static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
- u32 len, struct block_device *bdev,
- struct btrfsic_block_data_ctx *block_ctx_out)
-{
- block_ctx_out->dev = btrfsic_dev_state_lookup(bdev);
- block_ctx_out->dev_bytenr = bytenr;
- block_ctx_out->start = bytenr;
- block_ctx_out->len = len;
- block_ctx_out->datav = NULL;
- block_ctx_out->pagev = NULL;
- block_ctx_out->mem_to_free = NULL;
- if (NULL != block_ctx_out->dev) {
- return 0;
- } else {
- printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n");
- return -ENXIO;
- }
-}
-
static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
{
if (block_ctx->mem_to_free) {
@@ -1901,25 +1883,26 @@ again:
dev_state,
dev_bytenr);
}
- if (block->logical_bytenr != bytenr &&
- !(!block->is_metadata &&
- block->logical_bytenr == 0))
- printk(KERN_INFO
- "Written block @%llu (%s/%llu/%d)"
- " found in hash table, %c,"
- " bytenr mismatch"
- " (!= stored %llu).\n",
- bytenr, dev_state->name, dev_bytenr,
- block->mirror_num,
- btrfsic_get_block_type(state, block),
- block->logical_bytenr);
- else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "Written block @%llu (%s/%llu/%d)"
- " found in hash table, %c.\n",
- bytenr, dev_state->name, dev_bytenr,
- block->mirror_num,
- btrfsic_get_block_type(state, block));
+ if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) {
+ if (block->logical_bytenr != bytenr &&
+ !(!block->is_metadata &&
+ block->logical_bytenr == 0))
+ printk(KERN_INFO
+ "Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
+ bytenr, dev_state->name,
+ dev_bytenr,
+ block->mirror_num,
+ btrfsic_get_block_type(state,
+ block),
+ block->logical_bytenr);
+ else
+ printk(KERN_INFO
+ "Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
+ bytenr, dev_state->name,
+ dev_bytenr, block->mirror_num,
+ btrfsic_get_block_type(state,
+ block));
+ }
block->logical_bytenr = bytenr;
} else {
if (num_pages * PAGE_CACHE_SIZE <
@@ -2002,24 +1985,13 @@ again:
}
}
- if (block->is_superblock)
- ret = btrfsic_map_superblock(state, bytenr,
- processed_len,
- bdev, &block_ctx);
- else
- ret = btrfsic_map_block(state, bytenr, processed_len,
- &block_ctx, 0);
- if (ret) {
- printk(KERN_INFO
- "btrfsic: btrfsic_map_block(root @%llu)"
- " failed!\n", bytenr);
- goto continue_loop;
- }
- block_ctx.datav = mapped_datav;
- /* the following is required in case of writes to mirrors,
- * use the same that was used for the lookup */
block_ctx.dev = dev_state;
block_ctx.dev_bytenr = dev_bytenr;
+ block_ctx.start = bytenr;
+ block_ctx.len = processed_len;
+ block_ctx.pagev = NULL;
+ block_ctx.mem_to_free = NULL;
+ block_ctx.datav = mapped_datav;
if (is_metadata || state->include_extent_data) {
block->never_written = 0;
@@ -2133,10 +2105,6 @@ again:
/* this is getting ugly for the
* include_extent_data case... */
bytenr = 0; /* unknown */
- block_ctx.start = bytenr;
- block_ctx.len = processed_len;
- block_ctx.mem_to_free = NULL;
- block_ctx.pagev = NULL;
} else {
processed_len = state->metablock_size;
bytenr = btrfs_stack_header_bytenr(
@@ -2149,22 +2117,15 @@ again:
"Written block @%llu (%s/%llu/?)"
" !found in hash table, M.\n",
bytenr, dev_state->name, dev_bytenr);
-
- ret = btrfsic_map_block(state, bytenr, processed_len,
- &block_ctx, 0);
- if (ret) {
- printk(KERN_INFO
- "btrfsic: btrfsic_map_block(root @%llu)"
- " failed!\n",
- dev_bytenr);
- goto continue_loop;
- }
}
- block_ctx.datav = mapped_datav;
- /* the following is required in case of writes to mirrors,
- * use the same that was used for the lookup */
+
block_ctx.dev = dev_state;
block_ctx.dev_bytenr = dev_bytenr;
+ block_ctx.start = bytenr;
+ block_ctx.len = processed_len;
+ block_ctx.pagev = NULL;
+ block_ctx.mem_to_free = NULL;
+ block_ctx.datav = mapped_datav;
block = btrfsic_block_alloc();
if (NULL == block) {
@@ -3130,10 +3091,13 @@ int btrfsic_mount(struct btrfs_root *root,
root->sectorsize, PAGE_CACHE_SIZE);
return -1;
}
- state = kzalloc(sizeof(*state), GFP_NOFS);
- if (NULL == state) {
- printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
- return -1;
+ state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!state) {
+ state = vzalloc(sizeof(*state));
+ if (!state) {
+ printk(KERN_INFO "btrfs check-integrity: vzalloc() failed!\n");
+ return -1;
+ }
}
if (!btrfsic_is_initialized) {
@@ -3277,5 +3241,8 @@ void btrfsic_unmount(struct btrfs_root *root,
mutex_unlock(&btrfsic_mutex);
- kfree(state);
+ if (is_vmalloc_addr(state))
+ vfree(state);
+ else
+ kfree(state);
}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index dcd9be32ac57..e9df8862012c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -224,16 +224,19 @@ out:
* Clear the writeback bits on all of the file
* pages for a compressed write
*/
-static noinline void end_compressed_writeback(struct inode *inode, u64 start,
- unsigned long ram_size)
+static noinline void end_compressed_writeback(struct inode *inode,
+ const struct compressed_bio *cb)
{
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
+ unsigned long index = cb->start >> PAGE_CACHE_SHIFT;
+ unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT;
struct page *pages[16];
unsigned long nr_pages = end_index - index + 1;
int i;
int ret;
+ if (cb->errors)
+ mapping_set_error(inode->i_mapping, -EIO);
+
while (nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
min_t(unsigned long,
@@ -244,6 +247,8 @@ static noinline void end_compressed_writeback(struct inode *inode, u64 start,
continue;
}
for (i = 0; i < ret; i++) {
+ if (cb->errors)
+ SetPageError(pages[i]);
end_page_writeback(pages[i]);
page_cache_release(pages[i]);
}
@@ -287,10 +292,11 @@ static void end_compressed_bio_write(struct bio *bio, int err)
tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
cb->start,
cb->start + cb->len - 1,
- NULL, 1);
+ NULL,
+ err ? 0 : 1);
cb->compressed_pages[0]->mapping = NULL;
- end_compressed_writeback(inode, cb->start, cb->len);
+ end_compressed_writeback(inode, cb);
/* note, our inode could be gone now */
/*
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 150822ee0a0b..14a72ed14ef7 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2929,7 +2929,7 @@ done:
*/
if (!p->leave_spinning)
btrfs_set_path_blocking(p);
- if (ret < 0)
+ if (ret < 0 && !p->skip_release_on_error)
btrfs_release_path(p);
return ret;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index fe69edda11fb..e6fbbd74b716 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -607,6 +607,7 @@ struct btrfs_path {
unsigned int leave_spinning:1;
unsigned int search_commit_root:1;
unsigned int need_commit_sem:1;
+ unsigned int skip_release_on_error:1;
};
/*
@@ -1170,6 +1171,7 @@ struct btrfs_space_info {
struct percpu_counter total_bytes_pinned;
struct list_head list;
+ struct list_head ro_bgs;
struct rw_semaphore groups_sem;
/* for block groups in our same type */
@@ -1276,6 +1278,8 @@ struct btrfs_block_group_cache {
unsigned int ro:1;
unsigned int dirty:1;
unsigned int iref:1;
+ unsigned int has_caching_ctl:1;
+ unsigned int removed:1;
int disk_cache_state;
@@ -1305,6 +1309,11 @@ struct btrfs_block_group_cache {
/* For delayed block group creation or deletion of empty block groups */
struct list_head bg_list;
+
+ /* For read-only block groups */
+ struct list_head ro_list;
+
+ atomic_t trimming;
};
/* delayed seq elem */
@@ -1402,6 +1411,11 @@ struct btrfs_fs_info {
*/
u64 last_trans_log_full_commit;
unsigned long mount_opt;
+ /*
+ * Track requests for actions that need to be done during transaction
+ * commit (like for some mount options).
+ */
+ unsigned long pending_changes;
unsigned long compress_type:4;
int commit_interval;
/*
@@ -1729,6 +1743,12 @@ struct btrfs_fs_info {
/* For btrfs to record security options */
struct security_mnt_opts security_opts;
+
+ /*
+ * Chunks that can't be freed yet (under a trim/discard operation)
+ * and will be latter freed. Protected by fs_info->chunk_mutex.
+ */
+ struct list_head pinned_chunks;
};
struct btrfs_subvolume_writers {
@@ -2093,7 +2113,6 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22)
#define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23)
-#define BTRFS_MOUNT_CHANGE_INODE_CACHE (1 << 24)
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
#define BTRFS_DEFAULT_MAX_INLINE (8192)
@@ -2103,6 +2122,7 @@ struct btrfs_ioctl_defrag_range_args {
#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
BTRFS_MOUNT_##opt)
+
#define btrfs_set_and_info(root, opt, fmt, args...) \
{ \
if (!btrfs_test_opt(root, opt)) \
@@ -2118,6 +2138,49 @@ struct btrfs_ioctl_defrag_range_args {
}
/*
+ * Requests for changes that need to be done during transaction commit.
+ *
+ * Internal mount options that are used for special handling of the real
+ * mount options (eg. cannot be set during remount and have to be set during
+ * transaction commit)
+ */
+
+#define BTRFS_PENDING_SET_INODE_MAP_CACHE (0)
+#define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1)
+#define BTRFS_PENDING_COMMIT (2)
+
+#define btrfs_test_pending(info, opt) \
+ test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
+#define btrfs_set_pending(info, opt) \
+ set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
+#define btrfs_clear_pending(info, opt) \
+ clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
+
+/*
+ * Helpers for setting pending mount option changes.
+ *
+ * Expects corresponding macros
+ * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name
+ */
+#define btrfs_set_pending_and_info(info, opt, fmt, args...) \
+do { \
+ if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \
+ btrfs_info((info), fmt, ##args); \
+ btrfs_set_pending((info), SET_##opt); \
+ btrfs_clear_pending((info), CLEAR_##opt); \
+ } \
+} while(0)
+
+#define btrfs_clear_pending_and_info(info, opt, fmt, args...) \
+do { \
+ if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \
+ btrfs_info((info), fmt, ##args); \
+ btrfs_set_pending((info), CLEAR_##opt); \
+ btrfs_clear_pending((info), SET_##opt); \
+ } \
+} while(0)
+
+/*
* Inode flags
*/
#define BTRFS_INODE_NODATASUM (1 << 0)
@@ -3351,7 +3414,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 group_start);
+ struct btrfs_root *root, u64 group_start,
+ struct extent_map *em);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
@@ -3427,8 +3491,8 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int __get_raid_index(u64 flags);
-int btrfs_start_nocow_write(struct btrfs_root *root);
-void btrfs_end_nocow_write(struct btrfs_root *root);
+int btrfs_start_write_no_snapshoting(struct btrfs_root *root);
+void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int level, int *slot);
@@ -3686,6 +3750,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
int verify_dir_item(struct btrfs_root *root,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item);
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+ struct btrfs_path *path,
+ const char *name,
+ int name_len);
/* orphan.c */
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -3857,6 +3925,7 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
+int btrfs_inode_check_errors(struct inode *inode);
extern const struct dentry_operations btrfs_dentry_operations;
/* ioctl.c */
@@ -3901,6 +3970,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
struct page **pages, size_t num_pages,
loff_t pos, size_t write_bytes,
struct extent_state **cached);
+int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
@@ -4097,7 +4167,12 @@ int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
/* dev-replace.c */
void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info);
void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info);
-void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info);
+void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount);
+
+static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
+{
+ btrfs_bio_counter_sub(fs_info, 1);
+}
/* reada.c */
struct reada_control {
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 6f662b34ba0e..ca6a3a3b6b6c 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -316,11 +316,6 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
struct btrfs_device *tgt_device = NULL;
struct btrfs_device *src_device = NULL;
- if (btrfs_fs_incompat(fs_info, RAID56)) {
- btrfs_warn(fs_info, "dev_replace cannot yet handle RAID5/RAID6");
- return -EOPNOTSUPP;
- }
-
switch (args->start.cont_reading_from_srcdev_mode) {
case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
@@ -422,9 +417,15 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
&dev_replace->scrub_progress, 0, 1);
ret = btrfs_dev_replace_finishing(root->fs_info, ret);
- WARN_ON(ret);
+ /* don't warn if EINPROGRESS, someone else might be running scrub */
+ if (ret == -EINPROGRESS) {
+ args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
+ ret = 0;
+ } else {
+ WARN_ON(ret);
+ }
- return 0;
+ return ret;
leave:
dev_replace->srcdev = NULL;
@@ -542,7 +543,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
- return 0;
+ return scrub_ret;
}
printk_in_rcu(KERN_INFO
@@ -571,15 +572,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
fs_info->fs_devices->rw_devices++;
- /* replace the sysfs entry */
- btrfs_kobj_rm_device(fs_info, src_device);
- btrfs_kobj_add_device(fs_info, tgt_device);
-
btrfs_dev_replace_unlock(dev_replace);
btrfs_rm_dev_replace_blocked(fs_info);
- btrfs_rm_dev_replace_srcdev(fs_info, src_device);
+ btrfs_rm_dev_replace_remove_srcdev(fs_info, src_device);
btrfs_rm_dev_replace_unblocked(fs_info);
@@ -594,6 +591,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
mutex_unlock(&uuid_mutex);
+ /* replace the sysfs entry */
+ btrfs_kobj_rm_device(fs_info, src_device);
+ btrfs_kobj_add_device(fs_info, tgt_device);
+ btrfs_rm_dev_replace_free_srcdev(fs_info, src_device);
+
/* write back the superblocks */
trans = btrfs_start_transaction(root, 0);
if (!IS_ERR(trans))
@@ -920,9 +922,9 @@ void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
percpu_counter_inc(&fs_info->bio_counter);
}
-void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
+void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
{
- percpu_counter_dec(&fs_info->bio_counter);
+ percpu_counter_sub(&fs_info->bio_counter, amount);
if (waitqueue_active(&fs_info->replace_wait))
wake_up(&fs_info->replace_wait);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index fc8df866e919..1752625fb4dd 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -21,10 +21,6 @@
#include "hash.h"
#include "transaction.h"
-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
- struct btrfs_path *path,
- const char *name, int name_len);
-
/*
* insert a name into a directory, doing overflow properly if there is a hash
* collision. data_size indicates how big the item inserted should be. On
@@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
* this walks through all the entries in a dir item and finds one
* for a specific name.
*/
-static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
- struct btrfs_path *path,
- const char *name, int name_len)
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+ struct btrfs_path *path,
+ const char *name, int name_len)
{
struct btrfs_dir_item *dir_item;
unsigned long name_ptr;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1bf9f897065d..30965120772b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2384,6 +2384,8 @@ int open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
+ INIT_LIST_HEAD(&fs_info->pinned_chunks);
+
ret = btrfs_alloc_stripe_hash_table(fs_info);
if (ret) {
err = ret;
@@ -2830,9 +2832,11 @@ retry_root_backup:
btrfs_set_opt(fs_info->mount_opt, SSD);
}
- /* Set the real inode map cache flag */
- if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE))
- btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE);
+ /*
+ * Mount does not set all options immediatelly, we can do it now and do
+ * not have to wait for transaction commit
+ */
+ btrfs_apply_pending_changes(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
@@ -3713,6 +3717,17 @@ void close_ctree(struct btrfs_root *root)
btrfs_free_block_rsv(root, root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
+
+ lock_chunks(root);
+ while (!list_empty(&fs_info->pinned_chunks)) {
+ struct extent_map *em;
+
+ em = list_first_entry(&fs_info->pinned_chunks,
+ struct extent_map, list);
+ list_del_init(&em->list);
+ free_extent_map(em);
+ }
+ unlock_chunks(root);
}
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
@@ -3839,12 +3854,12 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
*/
if (!IS_ALIGNED(btrfs_super_root(sb), 4096))
printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
- sb->root);
+ btrfs_super_root(sb));
if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096))
- printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
- sb->chunk_root);
+ printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
+ btrfs_super_chunk_root(sb));
if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
- printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
+ printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
btrfs_super_log_root(sb));
if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
@@ -4129,6 +4144,25 @@ again:
return 0;
}
+static void btrfs_free_pending_ordered(struct btrfs_transaction *cur_trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_ordered_extent *ordered;
+
+ spin_lock(&fs_info->trans_lock);
+ while (!list_empty(&cur_trans->pending_ordered)) {
+ ordered = list_first_entry(&cur_trans->pending_ordered,
+ struct btrfs_ordered_extent,
+ trans_list);
+ list_del_init(&ordered->trans_list);
+ spin_unlock(&fs_info->trans_lock);
+
+ btrfs_put_ordered_extent(ordered);
+ spin_lock(&fs_info->trans_lock);
+ }
+ spin_unlock(&fs_info->trans_lock);
+}
+
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
struct btrfs_root *root)
{
@@ -4140,6 +4174,7 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
cur_trans->state = TRANS_STATE_UNBLOCKED;
wake_up(&root->fs_info->transaction_wait);
+ btrfs_free_pending_ordered(cur_trans, root->fs_info);
btrfs_destroy_delayed_inodes(root);
btrfs_assert_delayed_root_empty(root);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 47c1ba141082..222d6aea4a8a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -315,12 +315,6 @@ get_caching_control(struct btrfs_block_group_cache *cache)
struct btrfs_caching_control *ctl;
spin_lock(&cache->lock);
- if (cache->cached != BTRFS_CACHE_STARTED) {
- spin_unlock(&cache->lock);
- return NULL;
- }
-
- /* We're loading it the fast way, so we don't have a caching_ctl. */
if (!cache->caching_ctl) {
spin_unlock(&cache->lock);
return NULL;
@@ -594,6 +588,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
spin_unlock(&cache->lock);
if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
+ mutex_lock(&caching_ctl->mutex);
ret = load_free_space_cache(fs_info, cache);
spin_lock(&cache->lock);
@@ -601,15 +596,19 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
cache->caching_ctl = NULL;
cache->cached = BTRFS_CACHE_FINISHED;
cache->last_byte_to_unpin = (u64)-1;
+ caching_ctl->progress = (u64)-1;
} else {
if (load_cache_only) {
cache->caching_ctl = NULL;
cache->cached = BTRFS_CACHE_NO;
} else {
cache->cached = BTRFS_CACHE_STARTED;
+ cache->has_caching_ctl = 1;
}
}
spin_unlock(&cache->lock);
+ mutex_unlock(&caching_ctl->mutex);
+
wake_up(&caching_ctl->wait);
if (ret == 1) {
put_caching_control(caching_ctl);
@@ -627,6 +626,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
cache->cached = BTRFS_CACHE_NO;
} else {
cache->cached = BTRFS_CACHE_STARTED;
+ cache->has_caching_ctl = 1;
}
spin_unlock(&cache->lock);
wake_up(&caching_ctl->wait);
@@ -3162,7 +3162,19 @@ next_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
struct rb_node *node;
+
spin_lock(&root->fs_info->block_group_cache_lock);
+
+ /* If our block group was removed, we need a full search. */
+ if (RB_EMPTY_NODE(&cache->cache_node)) {
+ const u64 next_bytenr = cache->key.objectid + cache->key.offset;
+
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+ btrfs_put_block_group(cache);
+ cache = btrfs_lookup_first_block_group(root->fs_info,
+ next_bytenr);
+ return cache;
+ }
node = rb_next(&cache->cache_node);
btrfs_put_block_group(cache);
if (node) {
@@ -3504,6 +3516,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->chunk_alloc = 0;
found->flush = 0;
init_waitqueue_head(&found->wait);
+ INIT_LIST_HEAD(&found->ro_bgs);
ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
info->space_info_kobj, "%s",
@@ -5425,7 +5438,17 @@ static int update_block_group(struct btrfs_root *root,
spin_unlock(&cache->space_info->lock);
} else {
old_val -= num_bytes;
+ btrfs_set_block_group_used(&cache->item, old_val);
+ cache->pinned += num_bytes;
+ cache->space_info->bytes_pinned += num_bytes;
+ cache->space_info->bytes_used -= num_bytes;
+ cache->space_info->disk_used -= num_bytes * factor;
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
+ set_extent_dirty(info->pinned_extents,
+ bytenr, bytenr + num_bytes - 1,
+ GFP_NOFS | __GFP_NOFAIL);
/*
* No longer have used bytes in this block group, queue
* it for deletion.
@@ -5439,17 +5462,6 @@ static int update_block_group(struct btrfs_root *root,
}
spin_unlock(&info->unused_bgs_lock);
}
- btrfs_set_block_group_used(&cache->item, old_val);
- cache->pinned += num_bytes;
- cache->space_info->bytes_pinned += num_bytes;
- cache->space_info->bytes_used -= num_bytes;
- cache->space_info->disk_used -= num_bytes * factor;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
-
- set_extent_dirty(info->pinned_extents,
- bytenr, bytenr + num_bytes - 1,
- GFP_NOFS | __GFP_NOFAIL);
}
btrfs_put_block_group(cache);
total -= num_bytes;
@@ -8511,6 +8523,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
min_allocable_bytes <= sinfo->total_bytes) {
sinfo->bytes_readonly += num_bytes;
cache->ro = 1;
+ list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
ret = 0;
}
out:
@@ -8565,15 +8578,20 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
/*
* helper to account the unused space of all the readonly block group in the
- * list. takes mirrors into account.
+ * space_info. takes mirrors into account.
*/
-static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
+u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
{
struct btrfs_block_group_cache *block_group;
u64 free_bytes = 0;
int factor;
- list_for_each_entry(block_group, groups_list, list) {
+ /* It's df, we don't care if it's racey */
+ if (list_empty(&sinfo->ro_bgs))
+ return 0;
+
+ spin_lock(&sinfo->lock);
+ list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
spin_lock(&block_group->lock);
if (!block_group->ro) {
@@ -8594,26 +8612,6 @@ static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
spin_unlock(&block_group->lock);
}
-
- return free_bytes;
-}
-
-/*
- * helper to account the unused space of all the readonly block group in the
- * space_info. takes mirrors into account.
- */
-u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
-{
- int i;
- u64 free_bytes = 0;
-
- spin_lock(&sinfo->lock);
-
- for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
- if (!list_empty(&sinfo->block_groups[i]))
- free_bytes += __btrfs_get_ro_block_group_free_space(
- &sinfo->block_groups[i]);
-
spin_unlock(&sinfo->lock);
return free_bytes;
@@ -8633,6 +8631,7 @@ void btrfs_set_block_group_rw(struct btrfs_root *root,
cache->bytes_super - btrfs_block_group_used(&cache->item);
sinfo->bytes_readonly -= num_bytes;
cache->ro = 0;
+ list_del_init(&cache->ro_list);
spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock);
}
@@ -9002,7 +9001,9 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
INIT_LIST_HEAD(&cache->bg_list);
+ INIT_LIST_HEAD(&cache->ro_list);
btrfs_init_free_space_ctl(cache);
+ atomic_set(&cache->trimming, 0);
return cache;
}
@@ -9195,9 +9196,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
int ret = 0;
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
- list_del_init(&block_group->bg_list);
if (ret)
- continue;
+ goto next;
spin_lock(&block_group->lock);
memcpy(&item, &block_group->item, sizeof(item));
@@ -9212,6 +9212,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
key.objectid, key.offset);
if (ret)
btrfs_abort_transaction(trans, extent_root, ret);
+next:
+ list_del_init(&block_group->bg_list);
}
}
@@ -9304,7 +9306,8 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 group_start)
+ struct btrfs_root *root, u64 group_start,
+ struct extent_map *em)
{
struct btrfs_path *path;
struct btrfs_block_group_cache *block_group;
@@ -9316,6 +9319,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
int ret;
int index;
int factor;
+ struct btrfs_caching_control *caching_ctl = NULL;
+ bool remove_em;
root = root->fs_info->extent_root;
@@ -9400,6 +9405,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
&root->fs_info->block_group_cache_tree);
+ RB_CLEAR_NODE(&block_group->cache_node);
if (root->fs_info->first_logical_byte == block_group->key.objectid)
root->fs_info->first_logical_byte = (u64)-1;
@@ -9411,6 +9417,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* are still on the list after taking the semaphore
*/
list_del_init(&block_group->list);
+ list_del_init(&block_group->ro_list);
if (list_empty(&block_group->space_info->block_groups[index])) {
kobj = block_group->space_info->block_group_kobjs[index];
block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9422,8 +9429,32 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
kobject_put(kobj);
}
+ if (block_group->has_caching_ctl)
+ caching_ctl = get_caching_control(block_group);
if (block_group->cached == BTRFS_CACHE_STARTED)
wait_block_group_cache_done(block_group);
+ if (block_group->has_caching_ctl) {
+ down_write(&root->fs_info->commit_root_sem);
+ if (!caching_ctl) {
+ struct btrfs_caching_control *ctl;
+
+ list_for_each_entry(ctl,
+ &root->fs_info->caching_block_groups, list)
+ if (ctl->block_group == block_group) {
+ caching_ctl = ctl;
+ atomic_inc(&caching_ctl->count);
+ break;
+ }
+ }
+ if (caching_ctl)
+ list_del_init(&caching_ctl->list);
+ up_write(&root->fs_info->commit_root_sem);
+ if (caching_ctl) {
+ /* Once for the caching bgs list and once for us. */
+ put_caching_control(caching_ctl);
+ put_caching_control(caching_ctl);
+ }
+ }
btrfs_remove_free_space_cache(block_group);
@@ -9435,6 +9466,71 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
memcpy(&key, &block_group->key, sizeof(key));
+ lock_chunks(root);
+ if (!list_empty(&em->list)) {
+ /* We're in the transaction->pending_chunks list. */
+ free_extent_map(em);
+ }
+ spin_lock(&block_group->lock);
+ block_group->removed = 1;
+ /*
+ * At this point trimming can't start on this block group, because we
+ * removed the block group from the tree fs_info->block_group_cache_tree
+ * so no one can't find it anymore and even if someone already got this
+ * block group before we removed it from the rbtree, they have already
+ * incremented block_group->trimming - if they didn't, they won't find
+ * any free space entries because we already removed them all when we
+ * called btrfs_remove_free_space_cache().
+ *
+ * And we must not remove the extent map from the fs_info->mapping_tree
+ * to prevent the same logical address range and physical device space
+ * ranges from being reused for a new block group. This is because our
+ * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
+ * completely transactionless, so while it is trimming a range the
+ * currently running transaction might finish and a new one start,
+ * allowing for new block groups to be created that can reuse the same
+ * physical device locations unless we take this special care.
+ */
+ remove_em = (atomic_read(&block_group->trimming) == 0);
+ /*
+ * Make sure a trimmer task always sees the em in the pinned_chunks list
+ * if it sees block_group->removed == 1 (needs to lock block_group->lock
+ * before checking block_group->removed).
+ */
+ if (!remove_em) {
+ /*
+ * Our em might be in trans->transaction->pending_chunks which
+ * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
+ * and so is the fs_info->pinned_chunks list.
+ *
+ * So at this point we must be holding the chunk_mutex to avoid
+ * any races with chunk allocation (more specifically at
+ * volumes.c:contains_pending_extent()), to ensure it always
+ * sees the em, either in the pending_chunks list or in the
+ * pinned_chunks list.
+ */
+ list_move_tail(&em->list, &root->fs_info->pinned_chunks);
+ }
+ spin_unlock(&block_group->lock);
+
+ if (remove_em) {
+ struct extent_map_tree *em_tree;
+
+ em_tree = &root->fs_info->mapping_tree.map_tree;
+ write_lock(&em_tree->lock);
+ /*
+ * The em might be in the pending_chunks list, so make sure the
+ * chunk mutex is locked, since remove_extent_mapping() will
+ * delete us from that list.
+ */
+ remove_extent_mapping(em_tree, em);
+ write_unlock(&em_tree->lock);
+ /* once for the tree */
+ free_extent_map(em);
+ }
+
+ unlock_chunks(root);
+
btrfs_put_block_group(block_group);
btrfs_put_block_group(block_group);
@@ -9523,10 +9619,18 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
*/
start = block_group->key.objectid;
end = start + block_group->key.offset - 1;
- clear_extent_bits(&fs_info->freed_extents[0], start, end,
+ ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
EXTENT_DIRTY, GFP_NOFS);
- clear_extent_bits(&fs_info->freed_extents[1], start, end,
+ if (ret) {
+ btrfs_set_block_group_rw(root, block_group);
+ goto end_trans;
+ }
+ ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
EXTENT_DIRTY, GFP_NOFS);
+ if (ret) {
+ btrfs_set_block_group_rw(root, block_group);
+ goto end_trans;
+ }
/* Reset pinned so btrfs_put_block_group doesn't complain */
block_group->pinned = 0;
@@ -9537,6 +9641,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
*/
ret = btrfs_remove_chunk(trans, root,
block_group->key.objectid);
+end_trans:
btrfs_end_transaction(trans, root);
next:
btrfs_put_block_group(block_group);
@@ -9657,12 +9762,14 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
}
/*
- * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
- * they are used to prevent the some tasks writing data into the page cache
- * by nocow before the subvolume is snapshoted, but flush the data into
- * the disk after the snapshot creation.
+ * btrfs_{start,end}_write_no_snapshoting() are similar to
+ * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
+ * data into the page cache through nocow before the subvolume is snapshoted,
+ * but flush the data into disk after the snapshot creation, or to prevent
+ * operations while snapshoting is ongoing and that cause the snapshot to be
+ * inconsistent (writes followed by expanding truncates for example).
*/
-void btrfs_end_nocow_write(struct btrfs_root *root)
+void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
{
percpu_counter_dec(&root->subv_writers->counter);
/*
@@ -9674,7 +9781,7 @@ void btrfs_end_nocow_write(struct btrfs_root *root)
wake_up(&root->subv_writers->wait);
}
-int btrfs_start_nocow_write(struct btrfs_root *root)
+int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
{
if (atomic_read(&root->will_be_snapshoted))
return 0;
@@ -9685,7 +9792,7 @@ int btrfs_start_nocow_write(struct btrfs_root *root)
*/
smp_mb();
if (atomic_read(&root->will_be_snapshoted)) {
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
return 0;
}
return 1;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index bf3f424e0013..4ebabd237153 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -595,9 +595,14 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
clear = 1;
again:
if (!prealloc && (mask & __GFP_WAIT)) {
+ /*
+ * Don't care for allocation failure here because we might end
+ * up not needing the pre-allocated extent state at all, which
+ * is the case if we only have in the tree extent states that
+ * cover our input range and don't cover too any other range.
+ * If we end up needing a new extent state we allocate it later.
+ */
prealloc = alloc_extent_state(mask);
- if (!prealloc)
- return -ENOMEM;
}
spin_lock(&tree->lock);
@@ -796,17 +801,25 @@ static void set_state_bits(struct extent_io_tree *tree,
state->state |= bits_to_set;
}
-static void cache_state(struct extent_state *state,
- struct extent_state **cached_ptr)
+static void cache_state_if_flags(struct extent_state *state,
+ struct extent_state **cached_ptr,
+ const u64 flags)
{
if (cached_ptr && !(*cached_ptr)) {
- if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
+ if (!flags || (state->state & flags)) {
*cached_ptr = state;
atomic_inc(&state->refs);
}
}
}
+static void cache_state(struct extent_state *state,
+ struct extent_state **cached_ptr)
+{
+ return cache_state_if_flags(state, cached_ptr,
+ EXTENT_IOBITS | EXTENT_BOUNDARY);
+}
+
/*
* set some bits on a range in the tree. This may require allocations or
* sleeping, so the gfp mask is used to indicate what is allowed.
@@ -1058,13 +1071,21 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int err = 0;
u64 last_start;
u64 last_end;
+ bool first_iteration = true;
btrfs_debug_check_extent_io_range(tree, start, end);
again:
if (!prealloc && (mask & __GFP_WAIT)) {
+ /*
+ * Best effort, don't worry if extent state allocation fails
+ * here for the first iteration. We might have a cached state
+ * that matches exactly the target range, in which case no
+ * extent state allocations are needed. We'll only know this
+ * after locking the tree.
+ */
prealloc = alloc_extent_state(mask);
- if (!prealloc)
+ if (!prealloc && !first_iteration)
return -ENOMEM;
}
@@ -1234,6 +1255,7 @@ search_again:
spin_unlock(&tree->lock);
if (mask & __GFP_WAIT)
cond_resched();
+ first_iteration = false;
goto again;
}
@@ -1482,7 +1504,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
state = find_first_extent_bit_state(tree, start, bits);
got_it:
if (state) {
- cache_state(state, cached_state);
+ cache_state_if_flags(state, cached_state, 0);
*start_ret = state->start;
*end_ret = state->end;
ret = 0;
@@ -1746,6 +1768,9 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
if (page_ops == 0)
return 0;
+ if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
+ mapping_set_error(inode->i_mapping, -EIO);
+
while (nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
min_t(unsigned long,
@@ -1763,6 +1788,8 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
clear_page_dirty_for_io(pages[i]);
if (page_ops & PAGE_SET_WRITEBACK)
set_page_writeback(pages[i]);
+ if (page_ops & PAGE_SET_ERROR)
+ SetPageError(pages[i]);
if (page_ops & PAGE_END_WRITEBACK)
end_page_writeback(pages[i]);
if (page_ops & PAGE_UNLOCK)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 6d4b938be986..ece9ce87edff 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -49,6 +49,7 @@
#define PAGE_SET_WRITEBACK (1 << 2)
#define PAGE_END_WRITEBACK (1 << 3)
#define PAGE_SET_PRIVATE2 (1 << 4)
+#define PAGE_SET_ERROR (1 << 5)
/*
* page->private values. Every page that is controlled by the extent
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 225302b39afb..6a98bddd8f33 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -287,8 +287,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
if (!em)
goto out;
- if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
- list_move(&em->list, &tree->modified_extents);
em->generation = gen;
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
em->mod_start = em->start;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a18ceabd99a8..e4090259569b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1428,7 +1428,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
u64 num_bytes;
int ret;
- ret = btrfs_start_nocow_write(root);
+ ret = btrfs_start_write_no_snapshoting(root);
if (!ret)
return -ENOSPC;
@@ -1451,7 +1451,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
if (ret <= 0) {
ret = 0;
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
} else {
*write_bytes = min_t(size_t, *write_bytes ,
num_bytes - pos + lockstart);
@@ -1543,7 +1543,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
btrfs_free_reserved_data_space(inode,
reserve_bytes);
else
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
break;
}
@@ -1632,7 +1632,7 @@ again:
release_bytes = 0;
if (only_release_metadata)
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
if (only_release_metadata && copied > 0) {
u64 lockstart = round_down(pos, root->sectorsize);
@@ -1661,7 +1661,7 @@ again:
if (release_bytes) {
if (only_release_metadata) {
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
btrfs_delalloc_release_metadata(inode, release_bytes);
} else {
btrfs_delalloc_release_space(inode, release_bytes);
@@ -1676,6 +1676,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
loff_t pos)
{
struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
ssize_t written;
ssize_t written_buffered;
loff_t endbyte;
@@ -1692,8 +1693,15 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
err = written_buffered;
goto out;
}
+ /*
+ * Ensure all data is persisted. We want the next direct IO read to be
+ * able to read what was just written.
+ */
endbyte = pos + written_buffered - 1;
- err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
+ err = btrfs_fdatawrite_range(inode, pos, endbyte);
+ if (err)
+ goto out;
+ err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
if (err)
goto out;
written += written_buffered;
@@ -1854,10 +1862,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
int ret;
atomic_inc(&BTRFS_I(inode)->sync_writers);
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
- if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ ret = btrfs_fdatawrite_range(inode, start, end);
atomic_dec(&BTRFS_I(inode)->sync_writers);
return ret;
@@ -2810,3 +2815,29 @@ int btrfs_auto_defrag_init(void)
return 0;
}
+
+int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
+{
+ int ret;
+
+ /*
+ * So with compression we will find and lock a dirty page and clear the
+ * first one as dirty, setup an async extent, and immediately return
+ * with the entire range locked but with nobody actually marked with
+ * writeback. So we can't just filemap_write_and_wait_range() and
+ * expect it to work since it will just kick off a thread to do the
+ * actual work. So we need to call filemap_fdatawrite_range _again_
+ * since it will wait on the page lock, which won't be unlocked until
+ * after the pages have been marked as writeback and so we're good to go
+ * from there. We have to do this otherwise we'll miss the ordered
+ * extents and that results in badness. Please Josef, do not think you
+ * know better and pull this out at some point in the future, it is
+ * right and you are wrong.
+ */
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+
+ return ret;
+}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 33848196550e..030847bf7cec 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -27,10 +27,17 @@
#include "disk-io.h"
#include "extent_io.h"
#include "inode-map.h"
+#include "volumes.h"
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
+struct btrfs_trim_range {
+ u64 start;
+ u64 bytes;
+ struct list_head list;
+};
+
static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
@@ -881,6 +888,7 @@ int write_cache_extent_entries(struct io_ctl *io_ctl,
int ret;
struct btrfs_free_cluster *cluster = NULL;
struct rb_node *node = rb_first(&ctl->free_space_offset);
+ struct btrfs_trim_range *trim_entry;
/* Get the cluster for this block_group if it exists */
if (block_group && !list_empty(&block_group->cluster_list)) {
@@ -916,6 +924,21 @@ int write_cache_extent_entries(struct io_ctl *io_ctl,
cluster = NULL;
}
}
+
+ /*
+ * Make sure we don't miss any range that was removed from our rbtree
+ * because trimming is running. Otherwise after a umount+mount (or crash
+ * after committing the transaction) we would leak free space and get
+ * an inconsistent free space cache report from fsck.
+ */
+ list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
+ ret = io_ctl_add_entry(io_ctl, trim_entry->start,
+ trim_entry->bytes, NULL);
+ if (ret)
+ goto fail;
+ *entries += 1;
+ }
+
return 0;
fail:
return -ENOSPC;
@@ -1135,12 +1158,15 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_set_generation(&io_ctl, trans->transid);
+ mutex_lock(&ctl->cache_writeout_mutex);
/* Write out the extent entries in the free space cache */
ret = write_cache_extent_entries(&io_ctl, ctl,
block_group, &entries, &bitmaps,
&bitmap_list);
- if (ret)
+ if (ret) {
+ mutex_unlock(&ctl->cache_writeout_mutex);
goto out_nospc;
+ }
/*
* Some spaces that are freed in the current transaction are pinned,
@@ -1148,11 +1174,18 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
* committed, we shouldn't lose them.
*/
ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
- if (ret)
+ if (ret) {
+ mutex_unlock(&ctl->cache_writeout_mutex);
goto out_nospc;
+ }
- /* At last, we write out all the bitmaps. */
+ /*
+ * At last, we write out all the bitmaps and keep cache_writeout_mutex
+ * locked while doing it because a concurrent trim can be manipulating
+ * or freeing the bitmap.
+ */
ret = write_bitmap_entries(&io_ctl, &bitmap_list);
+ mutex_unlock(&ctl->cache_writeout_mutex);
if (ret)
goto out_nospc;
@@ -2295,6 +2328,8 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
ctl->start = block_group->key.objectid;
ctl->private = block_group;
ctl->op = &free_space_op;
+ INIT_LIST_HEAD(&ctl->trimming_ranges);
+ mutex_init(&ctl->cache_writeout_mutex);
/*
* we only want to have 32k of ram per block group for keeping
@@ -2911,10 +2946,12 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
static int do_trimming(struct btrfs_block_group_cache *block_group,
u64 *total_trimmed, u64 start, u64 bytes,
- u64 reserved_start, u64 reserved_bytes)
+ u64 reserved_start, u64 reserved_bytes,
+ struct btrfs_trim_range *trim_entry)
{
struct btrfs_space_info *space_info = block_group->space_info;
struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
int ret;
int update = 0;
u64 trimmed = 0;
@@ -2934,7 +2971,10 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
if (!ret)
*total_trimmed += trimmed;
+ mutex_lock(&ctl->cache_writeout_mutex);
btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
+ list_del(&trim_entry->list);
+ mutex_unlock(&ctl->cache_writeout_mutex);
if (update) {
spin_lock(&space_info->lock);
@@ -2962,16 +3002,21 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
u64 bytes;
while (start < end) {
+ struct btrfs_trim_range trim_entry;
+
+ mutex_lock(&ctl->cache_writeout_mutex);
spin_lock(&ctl->tree_lock);
if (ctl->free_space < minlen) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
break;
}
entry = tree_search_offset(ctl, start, 0, 1);
if (!entry) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
break;
}
@@ -2980,6 +3025,7 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
node = rb_next(&entry->offset_index);
if (!node) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
goto out;
}
entry = rb_entry(node, struct btrfs_free_space,
@@ -2988,6 +3034,7 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
if (entry->offset >= end) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
break;
}
@@ -2997,6 +3044,7 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
bytes = min(extent_start + extent_bytes, end) - start;
if (bytes < minlen) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
goto next;
}
@@ -3004,9 +3052,13 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
kmem_cache_free(btrfs_free_space_cachep, entry);
spin_unlock(&ctl->tree_lock);
+ trim_entry.start = extent_start;
+ trim_entry.bytes = extent_bytes;
+ list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
+ mutex_unlock(&ctl->cache_writeout_mutex);
ret = do_trimming(block_group, total_trimmed, start, bytes,
- extent_start, extent_bytes);
+ extent_start, extent_bytes, &trim_entry);
if (ret)
break;
next:
@@ -3035,17 +3087,21 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
while (offset < end) {
bool next_bitmap = false;
+ struct btrfs_trim_range trim_entry;
+ mutex_lock(&ctl->cache_writeout_mutex);
spin_lock(&ctl->tree_lock);
if (ctl->free_space < minlen) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
break;
}
entry = tree_search_offset(ctl, offset, 1, 0);
if (!entry) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
next_bitmap = true;
goto next;
}
@@ -3054,6 +3110,7 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
ret2 = search_bitmap(ctl, entry, &start, &bytes);
if (ret2 || start >= end) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
next_bitmap = true;
goto next;
}
@@ -3061,6 +3118,7 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
bytes = min(bytes, end - start);
if (bytes < minlen) {
spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
goto next;
}
@@ -3069,9 +3127,13 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
free_bitmap(ctl, entry);
spin_unlock(&ctl->tree_lock);
+ trim_entry.start = start;
+ trim_entry.bytes = bytes;
+ list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
+ mutex_unlock(&ctl->cache_writeout_mutex);
ret = do_trimming(block_group, total_trimmed, start, bytes,
- start, bytes);
+ start, bytes, &trim_entry);
if (ret)
break;
next:
@@ -3101,11 +3163,52 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
*trimmed = 0;
+ spin_lock(&block_group->lock);
+ if (block_group->removed) {
+ spin_unlock(&block_group->lock);
+ return 0;
+ }
+ atomic_inc(&block_group->trimming);
+ spin_unlock(&block_group->lock);
+
ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
if (ret)
- return ret;
+ goto out;
ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
+out:
+ spin_lock(&block_group->lock);
+ if (atomic_dec_and_test(&block_group->trimming) &&
+ block_group->removed) {
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+
+ spin_unlock(&block_group->lock);
+
+ em_tree = &block_group->fs_info->mapping_tree.map_tree;
+ write_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, block_group->key.objectid,
+ 1);
+ BUG_ON(!em); /* logic error, can't happen */
+ remove_extent_mapping(em_tree, em);
+ write_unlock(&em_tree->lock);
+
+ lock_chunks(block_group->fs_info->chunk_root);
+ list_del_init(&em->list);
+ unlock_chunks(block_group->fs_info->chunk_root);
+
+ /* once for us and once for the tree */
+ free_extent_map(em);
+ free_extent_map(em);
+
+ /*
+ * We've left one free space entry and other tasks trimming
+ * this block group have left 1 entry each one. Free them.
+ */
+ __btrfs_remove_free_space_cache(block_group->free_space_ctl);
+ } else {
+ spin_unlock(&block_group->lock);
+ }
return ret;
}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 0cf4977ef70d..88b2238a0aed 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -38,6 +38,8 @@ struct btrfs_free_space_ctl {
u64 start;
struct btrfs_free_space_op *op;
void *private;
+ struct mutex cache_writeout_mutex;
+ struct list_head trimming_ranges;
};
struct btrfs_free_space_op {
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 83d646bd2e4b..74faea3a516e 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -178,7 +178,7 @@ static void start_caching(struct btrfs_root *root)
root->root_key.objectid);
if (IS_ERR(tsk)) {
btrfs_warn(root->fs_info, "failed to start inode caching task");
- btrfs_clear_and_info(root, CHANGE_INODE_CACHE,
+ btrfs_clear_pending_and_info(root->fs_info, INODE_MAP_CACHE,
"disabling inode map caching");
}
}
@@ -364,6 +364,8 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root)
ctl->start = 0;
ctl->private = NULL;
ctl->op = &free_ino_op;
+ INIT_LIST_HEAD(&ctl->trimming_ranges);
+ mutex_init(&ctl->cache_writeout_mutex);
/*
* Initially we allow to use 16K of ram to cache chunks of
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ff0dcc016b71..e687bb0dc73a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -382,7 +382,7 @@ static inline int inode_need_compress(struct inode *inode)
* are written in the same order that the flusher thread sent them
* down.
*/
-static noinline int compress_file_range(struct inode *inode,
+static noinline void compress_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end,
struct async_cow *async_cow,
@@ -411,14 +411,6 @@ static noinline int compress_file_range(struct inode *inode,
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode);
- /*
- * skip compression for a small file range(<=blocksize) that
- * isn't an inline extent, since it dosen't save disk space at all.
- */
- if ((end - start + 1) <= blocksize &&
- (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
- goto cleanup_and_bail_uncompressed;
-
actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
@@ -440,6 +432,14 @@ again:
total_compressed = actual_end - start;
+ /*
+ * skip compression for a small file range(<=blocksize) that
+ * isn't an inline extent, since it dosen't save disk space at all.
+ */
+ if (total_compressed <= blocksize &&
+ (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
+ goto cleanup_and_bail_uncompressed;
+
/* we want to make sure that amount of ram required to uncompress
* an extent is reasonable, so we limit the total size in ram
* of a compressed extent to 128k. This is a crucial number
@@ -527,7 +527,10 @@ cont:
if (ret <= 0) {
unsigned long clear_flags = EXTENT_DELALLOC |
EXTENT_DEFRAG;
+ unsigned long page_error_op;
+
clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
+ page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
/*
* inline extent creation worked or returned error,
@@ -538,6 +541,7 @@ cont:
clear_flags, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK |
+ page_error_op |
PAGE_END_WRITEBACK);
goto free_pages_out;
}
@@ -620,8 +624,7 @@ cleanup_and_bail_uncompressed:
*num_added += 1;
}
-out:
- return ret;
+ return;
free_pages_out:
for (i = 0; i < nr_pages_ret; i++) {
@@ -629,8 +632,22 @@ free_pages_out:
page_cache_release(pages[i]);
}
kfree(pages);
+}
- goto out;
+static void free_async_extent_pages(struct async_extent *async_extent)
+{
+ int i;
+
+ if (!async_extent->pages)
+ return;
+
+ for (i = 0; i < async_extent->nr_pages; i++) {
+ WARN_ON(async_extent->pages[i]->mapping);
+ page_cache_release(async_extent->pages[i]);
+ }
+ kfree(async_extent->pages);
+ async_extent->nr_pages = 0;
+ async_extent->pages = NULL;
}
/*
@@ -639,7 +656,7 @@ free_pages_out:
* queued. We walk all the async extents created by compress_file_range
* and send them down to the disk.
*/
-static noinline int submit_compressed_extents(struct inode *inode,
+static noinline void submit_compressed_extents(struct inode *inode,
struct async_cow *async_cow)
{
struct async_extent *async_extent;
@@ -651,9 +668,6 @@ static noinline int submit_compressed_extents(struct inode *inode,
struct extent_io_tree *io_tree;
int ret = 0;
- if (list_empty(&async_cow->extents))
- return 0;
-
again:
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
@@ -709,15 +723,7 @@ retry:
async_extent->compressed_size,
0, alloc_hint, &ins, 1, 1);
if (ret) {
- int i;
-
- for (i = 0; i < async_extent->nr_pages; i++) {
- WARN_ON(async_extent->pages[i]->mapping);
- page_cache_release(async_extent->pages[i]);
- }
- kfree(async_extent->pages);
- async_extent->nr_pages = 0;
- async_extent->pages = NULL;
+ free_async_extent_pages(async_extent);
if (ret == -ENOSPC) {
unlock_extent(io_tree, async_extent->start,
@@ -814,15 +820,26 @@ retry:
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages);
+ if (ret) {
+ struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
+ struct page *p = async_extent->pages[0];
+ const u64 start = async_extent->start;
+ const u64 end = start + async_extent->ram_size - 1;
+
+ p->mapping = inode->i_mapping;
+ tree->ops->writepage_end_io_hook(p, start, end,
+ NULL, 0);
+ p->mapping = NULL;
+ extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
+ PAGE_END_WRITEBACK |
+ PAGE_SET_ERROR);
+ free_async_extent_pages(async_extent);
+ }
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
- if (ret)
- goto out;
cond_resched();
}
- ret = 0;
-out:
- return ret;
+ return;
out_free_reserve:
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_free:
@@ -832,7 +849,9 @@ out_free:
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
- PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
+ PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
+ PAGE_SET_ERROR);
+ free_async_extent_pages(async_extent);
kfree(async_extent);
goto again;
}
@@ -1318,7 +1337,7 @@ next_slot:
* we fall into common COW way.
*/
if (!nolock) {
- err = btrfs_start_nocow_write(root);
+ err = btrfs_start_write_no_snapshoting(root);
if (!err)
goto out_check;
}
@@ -1342,7 +1361,7 @@ out_check:
if (extent_end <= start) {
path->slots[0]++;
if (!nolock && nocow)
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
goto next_slot;
}
if (!nocow) {
@@ -1362,7 +1381,7 @@ out_check:
page_started, nr_written, 1);
if (ret) {
if (!nolock && nocow)
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
goto error;
}
cow_start = (u64)-1;
@@ -1413,7 +1432,7 @@ out_check:
num_bytes);
if (ret) {
if (!nolock && nocow)
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
goto error;
}
}
@@ -1424,7 +1443,7 @@ out_check:
EXTENT_DELALLOC, PAGE_UNLOCK |
PAGE_SET_PRIVATE2);
if (!nolock && nocow)
- btrfs_end_nocow_write(root);
+ btrfs_end_write_no_snapshoting(root);
cur_offset = extent_end;
if (cur_offset > end)
break;
@@ -4580,6 +4599,26 @@ next:
return err;
}
+static int wait_snapshoting_atomic_t(atomic_t *a)
+{
+ schedule();
+ return 0;
+}
+
+static void wait_for_snapshot_creation(struct btrfs_root *root)
+{
+ while (true) {
+ int ret;
+
+ ret = btrfs_start_write_no_snapshoting(root);
+ if (ret)
+ break;
+ wait_on_atomic_t(&root->will_be_snapshoted,
+ wait_snapshoting_atomic_t,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
static int btrfs_setsize(struct inode *inode, struct iattr *attr)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -4604,17 +4643,30 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
if (newsize > oldsize) {
truncate_pagecache(inode, newsize);
+ /*
+ * Don't do an expanding truncate while snapshoting is ongoing.
+ * This is to ensure the snapshot captures a fully consistent
+ * state of this file - if the snapshot captures this expanding
+ * truncation, it must capture all writes that happened before
+ * this truncation.
+ */
+ wait_for_snapshot_creation(root);
ret = btrfs_cont_expand(inode, oldsize, newsize);
- if (ret)
+ if (ret) {
+ btrfs_end_write_no_snapshoting(root);
return ret;
+ }
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans))
+ if (IS_ERR(trans)) {
+ btrfs_end_write_no_snapshoting(root);
return PTR_ERR(trans);
+ }
i_size_write(inode, newsize);
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
ret = btrfs_update_inode(trans, root, inode);
+ btrfs_end_write_no_snapshoting(root);
btrfs_end_transaction(trans, root);
} else {
@@ -7000,9 +7052,12 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
btrfs_put_ordered_extent(ordered);
} else {
/* Screw you mmap */
- ret = filemap_write_and_wait_range(inode->i_mapping,
- lockstart,
- lockend);
+ ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
+ if (ret)
+ break;
+ ret = filemap_fdatawait_range(inode->i_mapping,
+ lockstart,
+ lockend);
if (ret)
break;
@@ -9442,6 +9497,21 @@ out_inode:
}
+/* Inspired by filemap_check_errors() */
+int btrfs_inode_check_errors(struct inode *inode)
+{
+ int ret = 0;
+
+ if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
+ test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
+ ret = -ENOSPC;
+ if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
+ test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
+ ret = -EIO;
+
+ return ret;
+}
+
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 080fe66c0349..d49fe8a0f6b5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -617,7 +617,7 @@ fail:
return ret;
}
-static void btrfs_wait_nocow_write(struct btrfs_root *root)
+static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root)
{
s64 writers;
DEFINE_WAIT(wait);
@@ -649,7 +649,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
atomic_inc(&root->will_be_snapshoted);
smp_mb__after_atomic();
- btrfs_wait_nocow_write(root);
+ btrfs_wait_for_no_snapshoting_writes(root);
ret = btrfs_start_delalloc_inodes(root, 0);
if (ret)
@@ -717,35 +717,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
if (ret)
goto fail;
- /*
- * If orphan cleanup did remove any orphans, it means the tree was
- * modified and therefore the commit root is not the same as the
- * current root anymore. This is a problem, because send uses the
- * commit root and therefore can see inode items that don't exist
- * in the current root anymore, and for example make calls to
- * btrfs_iget, which will do tree lookups based on the current root
- * and not on the commit root. Those lookups will fail, returning a
- * -ESTALE error, and making send fail with that error. So make sure
- * a send does not see any orphans we have just removed, and that it
- * will see the same inodes regardless of whether a transaction
- * commit happened before it started (meaning that the commit root
- * will be the same as the current root) or not.
- */
- if (readonly && pending_snapshot->snap->node !=
- pending_snapshot->snap->commit_root) {
- trans = btrfs_join_transaction(pending_snapshot->snap);
- if (IS_ERR(trans) && PTR_ERR(trans) != -ENOENT) {
- ret = PTR_ERR(trans);
- goto fail;
- }
- if (!IS_ERR(trans)) {
- ret = btrfs_commit_transaction(trans,
- pending_snapshot->snap);
- if (ret)
- goto fail;
- }
- }
-
inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
@@ -761,7 +732,8 @@ fail:
free:
kfree(pending_snapshot);
out:
- atomic_dec(&root->will_be_snapshoted);
+ if (atomic_dec_and_test(&root->will_be_snapshoted))
+ wake_up_atomic_t(&root->will_be_snapshoted);
return ret;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index ac734ec4cc20..534544e08f76 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -220,6 +220,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
INIT_LIST_HEAD(&entry->work_list);
init_completion(&entry->completion);
INIT_LIST_HEAD(&entry->log_list);
+ INIT_LIST_HEAD(&entry->trans_list);
trace_btrfs_ordered_extent_add(inode, entry);
@@ -431,19 +432,31 @@ out:
/* Needs to either be called under a log transaction or the log_mutex */
void btrfs_get_logged_extents(struct inode *inode,
- struct list_head *logged_list)
+ struct list_head *logged_list,
+ const loff_t start,
+ const loff_t end)
{
struct btrfs_ordered_inode_tree *tree;
struct btrfs_ordered_extent *ordered;
struct rb_node *n;
+ struct rb_node *prev;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock_irq(&tree->lock);
- for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
+ n = __tree_search(&tree->tree, end, &prev);
+ if (!n)
+ n = prev;
+ for (; n; n = rb_prev(n)) {
ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+ if (ordered->file_offset > end)
+ continue;
+ if (entry_end(ordered) <= start)
+ break;
if (!list_empty(&ordered->log_list))
continue;
- list_add_tail(&ordered->log_list, logged_list);
+ if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
+ continue;
+ list_add(&ordered->log_list, logged_list);
atomic_inc(&ordered->refs);
}
spin_unlock_irq(&tree->lock);
@@ -472,7 +485,8 @@ void btrfs_submit_logged_extents(struct list_head *logged_list,
spin_unlock_irq(&log->log_extents_lock[index]);
}
-void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
+void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *log, u64 transid)
{
struct btrfs_ordered_extent *ordered;
int index = transid % 2;
@@ -497,7 +511,8 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
&ordered->flags));
- btrfs_put_ordered_extent(ordered);
+ if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
+ list_add_tail(&ordered->trans_list, &trans->ordered);
spin_lock_irq(&log->log_extents_lock[index]);
}
spin_unlock_irq(&log->log_extents_lock[index]);
@@ -725,30 +740,10 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
/* start IO across the range first to instantiate any delalloc
* extents
*/
- ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+ ret = btrfs_fdatawrite_range(inode, start, orig_end);
if (ret)
return ret;
- /*
- * So with compression we will find and lock a dirty page and clear the
- * first one as dirty, setup an async extent, and immediately return
- * with the entire range locked but with nobody actually marked with
- * writeback. So we can't just filemap_write_and_wait_range() and
- * expect it to work since it will just kick off a thread to do the
- * actual work. So we need to call filemap_fdatawrite_range _again_
- * since it will wait on the page lock, which won't be unlocked until
- * after the pages have been marked as writeback and so we're good to go
- * from there. We have to do this otherwise we'll miss the ordered
- * extents and that results in badness. Please Josef, do not think you
- * know better and pull this out at some point in the future, it is
- * right and you are wrong.
- */
- if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags)) {
- ret = filemap_fdatawrite_range(inode->i_mapping, start,
- orig_end);
- if (ret)
- return ret;
- }
+
ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
if (ret)
return ret;
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index d81a274d621e..e96cd4ccd805 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -71,6 +71,8 @@ struct btrfs_ordered_sum {
ordered extent */
#define BTRFS_ORDERED_TRUNCATED 9 /* Set when we have to truncate an extent */
+#define BTRFS_ORDERED_LOGGED 10 /* Set when we've waited on this ordered extent
+ * in the logging code. */
struct btrfs_ordered_extent {
/* logical offset in the file */
u64 file_offset;
@@ -121,6 +123,9 @@ struct btrfs_ordered_extent {
/* If we need to wait on this to be done */
struct list_head log_list;
+ /* If the transaction needs to wait on this ordered extent */
+ struct list_head trans_list;
+
/* used to wait for the BTRFS_ORDERED_COMPLETE bit */
wait_queue_head_t wait;
@@ -193,11 +198,14 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr);
void btrfs_get_logged_extents(struct inode *inode,
- struct list_head *logged_list);
+ struct list_head *logged_list,
+ const loff_t start,
+ const loff_t end);
void btrfs_put_logged_extents(struct list_head *logged_list);
void btrfs_submit_logged_extents(struct list_head *logged_list,
struct btrfs_root *log);
-void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
+void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *log, u64 transid);
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
int __init ordered_data_init(void);
void ordered_data_exit(void);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 6a41631cb959..8ab2a17bbba8 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -58,9 +58,23 @@
*/
#define RBIO_CACHE_READY_BIT 3
+/*
+ * bbio and raid_map is managed by the caller, so we shouldn't free
+ * them here. And besides that, all rbios with this flag should not
+ * be cached, because we need raid_map to check the rbios' stripe
+ * is the same or not, but it is very likely that the caller has
+ * free raid_map, so don't cache those rbios.
+ */
+#define RBIO_HOLD_BBIO_MAP_BIT 4
#define RBIO_CACHE_SIZE 1024
+enum btrfs_rbio_ops {
+ BTRFS_RBIO_WRITE = 0,
+ BTRFS_RBIO_READ_REBUILD = 1,
+ BTRFS_RBIO_PARITY_SCRUB = 2,
+};
+
struct btrfs_raid_bio {
struct btrfs_fs_info *fs_info;
struct btrfs_bio *bbio;
@@ -117,13 +131,16 @@ struct btrfs_raid_bio {
/* number of data stripes (no p/q) */
int nr_data;
+ int real_stripes;
+
+ int stripe_npages;
/*
* set if we're doing a parity rebuild
* for a read from higher up, which is handled
* differently from a parity rebuild as part of
* rmw
*/
- int read_rebuild;
+ enum btrfs_rbio_ops operation;
/* first bad stripe */
int faila;
@@ -131,6 +148,7 @@ struct btrfs_raid_bio {
/* second bad stripe (for raid6 use) */
int failb;
+ int scrubp;
/*
* number of pages needed to represent the full
* stripe
@@ -144,8 +162,13 @@ struct btrfs_raid_bio {
*/
int bio_list_bytes;
+ int generic_bio_cnt;
+
atomic_t refs;
+ atomic_t stripes_pending;
+
+ atomic_t error;
/*
* these are two arrays of pointers. We allocate the
* rbio big enough to hold them both and setup their
@@ -162,6 +185,11 @@ struct btrfs_raid_bio {
* here for faster lookup
*/
struct page **bio_pages;
+
+ /*
+ * bitmap to record which horizontal stripe has data
+ */
+ unsigned long *dbitmap;
};
static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
@@ -176,6 +204,10 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio);
static void index_rbio_pages(struct btrfs_raid_bio *rbio);
static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
+static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ int need_check);
+static void async_scrub_parity(struct btrfs_raid_bio *rbio);
+
/*
* the stripe hash table is used for locking, and to collect
* bios in hopes of making a full stripe
@@ -324,6 +356,7 @@ static void merge_rbio(struct btrfs_raid_bio *dest,
{
bio_list_merge(&dest->bio_list, &victim->bio_list);
dest->bio_list_bytes += victim->bio_list_bytes;
+ dest->generic_bio_cnt += victim->generic_bio_cnt;
bio_list_init(&victim->bio_list);
}
@@ -577,11 +610,20 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
cur->raid_map[0])
return 0;
- /* reads can't merge with writes */
- if (last->read_rebuild !=
- cur->read_rebuild) {
+ /* we can't merge with different operations */
+ if (last->operation != cur->operation)
+ return 0;
+ /*
+ * We've need read the full stripe from the drive.
+ * check and repair the parity and write the new results.
+ *
+ * We're not allowed to add any new bios to the
+ * bio list here, anyone else that wants to
+ * change this stripe needs to do their own rmw.
+ */
+ if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
+ cur->operation == BTRFS_RBIO_PARITY_SCRUB)
return 0;
- }
return 1;
}
@@ -601,7 +643,7 @@ static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
*/
static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
{
- if (rbio->nr_data + 1 == rbio->bbio->num_stripes)
+ if (rbio->nr_data + 1 == rbio->real_stripes)
return NULL;
index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
@@ -772,11 +814,14 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
spin_unlock(&rbio->bio_list_lock);
spin_unlock_irqrestore(&h->lock, flags);
- if (next->read_rebuild)
+ if (next->operation == BTRFS_RBIO_READ_REBUILD)
async_read_rebuild(next);
- else {
+ else if (next->operation == BTRFS_RBIO_WRITE) {
steal_rbio(rbio, next);
async_rmw_stripe(next);
+ } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
+ steal_rbio(rbio, next);
+ async_scrub_parity(next);
}
goto done_nolock;
@@ -796,6 +841,21 @@ done_nolock:
remove_rbio_from_cache(rbio);
}
+static inline void
+__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
+{
+ if (need) {
+ kfree(raid_map);
+ kfree(bbio);
+ }
+}
+
+static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
+{
+ __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
+ !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
+}
+
static void __free_raid_bio(struct btrfs_raid_bio *rbio)
{
int i;
@@ -814,8 +874,9 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
rbio->stripe_pages[i] = NULL;
}
}
- kfree(rbio->raid_map);
- kfree(rbio->bbio);
+
+ free_bbio_and_raid_map(rbio);
+
kfree(rbio);
}
@@ -833,6 +894,10 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
{
struct bio *cur = bio_list_get(&rbio->bio_list);
struct bio *next;
+
+ if (rbio->generic_bio_cnt)
+ btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
+
free_raid_bio(rbio);
while (cur) {
@@ -858,13 +923,13 @@ static void raid_write_end_io(struct bio *bio, int err)
bio_put(bio);
- if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
err = 0;
/* OK, we have read all the stripes we need to. */
- if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
err = -EIO;
rbio_orig_end_io(rbio, err, 0);
@@ -925,16 +990,16 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
{
struct btrfs_raid_bio *rbio;
int nr_data = 0;
- int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes);
+ int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
+ int num_pages = rbio_nr_pages(stripe_len, real_stripes);
+ int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
void *p;
- rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
+ rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
+ DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8),
GFP_NOFS);
- if (!rbio) {
- kfree(raid_map);
- kfree(bbio);
+ if (!rbio)
return ERR_PTR(-ENOMEM);
- }
bio_list_init(&rbio->bio_list);
INIT_LIST_HEAD(&rbio->plug_list);
@@ -946,9 +1011,13 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
rbio->fs_info = root->fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
+ rbio->real_stripes = real_stripes;
+ rbio->stripe_npages = stripe_npages;
rbio->faila = -1;
rbio->failb = -1;
atomic_set(&rbio->refs, 1);
+ atomic_set(&rbio->error, 0);
+ atomic_set(&rbio->stripes_pending, 0);
/*
* the stripe_pages and bio_pages array point to the extra
@@ -957,11 +1026,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
p = rbio + 1;
rbio->stripe_pages = p;
rbio->bio_pages = p + sizeof(struct page *) * num_pages;
+ rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
- if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
- nr_data = bbio->num_stripes - 2;
+ if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
+ nr_data = real_stripes - 2;
else
- nr_data = bbio->num_stripes - 1;
+ nr_data = real_stripes - 1;
rbio->nr_data = nr_data;
return rbio;
@@ -1073,7 +1143,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
{
if (rbio->faila >= 0 || rbio->failb >= 0) {
- BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1);
+ BUG_ON(rbio->faila == rbio->real_stripes - 1);
__raid56_parity_recover(rbio);
} else {
finish_rmw(rbio);
@@ -1134,7 +1204,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
{
struct btrfs_bio *bbio = rbio->bbio;
- void *pointers[bbio->num_stripes];
+ void *pointers[rbio->real_stripes];
int stripe_len = rbio->stripe_len;
int nr_data = rbio->nr_data;
int stripe;
@@ -1148,11 +1218,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
bio_list_init(&bio_list);
- if (bbio->num_stripes - rbio->nr_data == 1) {
- p_stripe = bbio->num_stripes - 1;
- } else if (bbio->num_stripes - rbio->nr_data == 2) {
- p_stripe = bbio->num_stripes - 2;
- q_stripe = bbio->num_stripes - 1;
+ if (rbio->real_stripes - rbio->nr_data == 1) {
+ p_stripe = rbio->real_stripes - 1;
+ } else if (rbio->real_stripes - rbio->nr_data == 2) {
+ p_stripe = rbio->real_stripes - 2;
+ q_stripe = rbio->real_stripes - 1;
} else {
BUG();
}
@@ -1169,7 +1239,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
spin_unlock_irq(&rbio->bio_list_lock);
- atomic_set(&rbio->bbio->error, 0);
+ atomic_set(&rbio->error, 0);
/*
* now that we've set rmw_locked, run through the
@@ -1209,7 +1279,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
SetPageUptodate(p);
pointers[stripe++] = kmap(p);
- raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE,
+ raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
pointers);
} else {
/* raid5 */
@@ -1218,7 +1288,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
}
- for (stripe = 0; stripe < bbio->num_stripes; stripe++)
+ for (stripe = 0; stripe < rbio->real_stripes; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
}
@@ -1227,7 +1297,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
* higher layers (the bio_list in our rbio) and our p/q. Ignore
* everything else.
*/
- for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+ for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
struct page *page;
if (stripe < rbio->nr_data) {
@@ -1245,8 +1315,34 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
}
}
- atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list));
- BUG_ON(atomic_read(&bbio->stripes_pending) == 0);
+ if (likely(!bbio->num_tgtdevs))
+ goto write_data;
+
+ for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
+ if (!bbio->tgtdev_map[stripe])
+ continue;
+
+ for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
+ struct page *page;
+ if (stripe < rbio->nr_data) {
+ page = page_in_rbio(rbio, stripe, pagenr, 1);
+ if (!page)
+ continue;
+ } else {
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ }
+
+ ret = rbio_add_io_page(rbio, &bio_list, page,
+ rbio->bbio->tgtdev_map[stripe],
+ pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
+write_data:
+ atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
+ BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
while (1) {
bio = bio_list_pop(&bio_list);
@@ -1283,7 +1379,8 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
stripe = &rbio->bbio->stripes[i];
stripe_start = stripe->physical;
if (physical >= stripe_start &&
- physical < stripe_start + rbio->stripe_len) {
+ physical < stripe_start + rbio->stripe_len &&
+ bio->bi_bdev == stripe->dev->bdev) {
return i;
}
}
@@ -1331,11 +1428,11 @@ static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
if (rbio->faila == -1) {
/* first failure on this rbio */
rbio->faila = failed;
- atomic_inc(&rbio->bbio->error);
+ atomic_inc(&rbio->error);
} else if (rbio->failb == -1) {
/* second failure on this rbio */
rbio->failb = failed;
- atomic_inc(&rbio->bbio->error);
+ atomic_inc(&rbio->error);
} else {
ret = -EIO;
}
@@ -1394,11 +1491,11 @@ static void raid_rmw_end_io(struct bio *bio, int err)
bio_put(bio);
- if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
err = 0;
- if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
goto cleanup;
/*
@@ -1439,7 +1536,6 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
{
int bios_to_read = 0;
- struct btrfs_bio *bbio = rbio->bbio;
struct bio_list bio_list;
int ret;
int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
@@ -1455,7 +1551,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
index_rbio_pages(rbio);
- atomic_set(&rbio->bbio->error, 0);
+ atomic_set(&rbio->error, 0);
/*
* build a list of bios to read all the missing parts of this
* stripe
@@ -1503,7 +1599,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
*/
- atomic_set(&bbio->stripes_pending, bios_to_read);
+ atomic_set(&rbio->stripes_pending, bios_to_read);
while (1) {
bio = bio_list_pop(&bio_list);
if (!bio)
@@ -1686,19 +1782,30 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb;
+ int ret;
rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
- if (IS_ERR(rbio))
+ if (IS_ERR(rbio)) {
+ __free_bbio_and_raid_map(bbio, raid_map, 1);
return PTR_ERR(rbio);
+ }
bio_list_add(&rbio->bio_list, bio);
rbio->bio_list_bytes = bio->bi_iter.bi_size;
+ rbio->operation = BTRFS_RBIO_WRITE;
+
+ btrfs_bio_counter_inc_noblocked(root->fs_info);
+ rbio->generic_bio_cnt = 1;
/*
* don't plug on full rbios, just get them out the door
* as quickly as we can
*/
- if (rbio_is_full(rbio))
- return full_stripe_write(rbio);
+ if (rbio_is_full(rbio)) {
+ ret = full_stripe_write(rbio);
+ if (ret)
+ btrfs_bio_counter_dec(root->fs_info);
+ return ret;
+ }
cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
sizeof(*plug));
@@ -1709,10 +1816,13 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
INIT_LIST_HEAD(&plug->rbio_list);
}
list_add_tail(&rbio->plug_list, &plug->rbio_list);
+ ret = 0;
} else {
- return __raid56_parity_write(rbio);
+ ret = __raid56_parity_write(rbio);
+ if (ret)
+ btrfs_bio_counter_dec(root->fs_info);
}
- return 0;
+ return ret;
}
/*
@@ -1730,7 +1840,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
int err;
int i;
- pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *),
+ pointers = kzalloc(rbio->real_stripes * sizeof(void *),
GFP_NOFS);
if (!pointers) {
err = -ENOMEM;
@@ -1740,7 +1850,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
faila = rbio->faila;
failb = rbio->failb;
- if (rbio->read_rebuild) {
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
spin_lock_irq(&rbio->bio_list_lock);
set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
spin_unlock_irq(&rbio->bio_list_lock);
@@ -1749,15 +1859,23 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
index_rbio_pages(rbio);
for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+ /*
+ * Now we just use bitmap to mark the horizontal stripes in
+ * which we have data when doing parity scrub.
+ */
+ if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
+ !test_bit(pagenr, rbio->dbitmap))
+ continue;
+
/* setup our array of pointers with pages
* from each stripe
*/
- for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
+ for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
/*
* if we're rebuilding a read, we have to use
* pages from the bio list
*/
- if (rbio->read_rebuild &&
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
(stripe == faila || stripe == failb)) {
page = page_in_rbio(rbio, stripe, pagenr, 0);
} else {
@@ -1767,7 +1885,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
}
/* all raid6 handling here */
- if (rbio->raid_map[rbio->bbio->num_stripes - 1] ==
+ if (rbio->raid_map[rbio->real_stripes - 1] ==
RAID6_Q_STRIPE) {
/*
@@ -1817,10 +1935,10 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
}
if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
- raid6_datap_recov(rbio->bbio->num_stripes,
+ raid6_datap_recov(rbio->real_stripes,
PAGE_SIZE, faila, pointers);
} else {
- raid6_2data_recov(rbio->bbio->num_stripes,
+ raid6_2data_recov(rbio->real_stripes,
PAGE_SIZE, faila, failb,
pointers);
}
@@ -1850,7 +1968,7 @@ pstripe:
* know they can be trusted. If this was a read reconstruction,
* other endio functions will fiddle the uptodate bits
*/
- if (!rbio->read_rebuild) {
+ if (rbio->operation == BTRFS_RBIO_WRITE) {
for (i = 0; i < nr_pages; i++) {
if (faila != -1) {
page = rbio_stripe_page(rbio, faila, i);
@@ -1862,12 +1980,12 @@ pstripe:
}
}
}
- for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
+ for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
/*
* if we're rebuilding a read, we have to use
* pages from the bio list
*/
- if (rbio->read_rebuild &&
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
(stripe == faila || stripe == failb)) {
page = page_in_rbio(rbio, stripe, pagenr, 0);
} else {
@@ -1882,9 +2000,9 @@ cleanup:
kfree(pointers);
cleanup_io:
-
- if (rbio->read_rebuild) {
- if (err == 0)
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
+ if (err == 0 &&
+ !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags))
cache_rbio_pages(rbio);
else
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -1893,7 +2011,13 @@ cleanup_io:
} else if (err == 0) {
rbio->faila = -1;
rbio->failb = -1;
- finish_rmw(rbio);
+
+ if (rbio->operation == BTRFS_RBIO_WRITE)
+ finish_rmw(rbio);
+ else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
+ finish_parity_scrub(rbio, 0);
+ else
+ BUG();
} else {
rbio_orig_end_io(rbio, err, 0);
}
@@ -1917,10 +2041,10 @@ static void raid_recover_end_io(struct bio *bio, int err)
set_bio_pages_uptodate(bio);
bio_put(bio);
- if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
- if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
rbio_orig_end_io(rbio, -EIO, 0);
else
__raid_recover_end_io(rbio);
@@ -1937,7 +2061,6 @@ static void raid_recover_end_io(struct bio *bio, int err)
static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
{
int bios_to_read = 0;
- struct btrfs_bio *bbio = rbio->bbio;
struct bio_list bio_list;
int ret;
int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
@@ -1951,16 +2074,16 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
if (ret)
goto cleanup;
- atomic_set(&rbio->bbio->error, 0);
+ atomic_set(&rbio->error, 0);
/*
* read everything that hasn't failed. Thanks to the
* stripe cache, it is possible that some or all of these
* pages are going to be uptodate.
*/
- for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+ for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
if (rbio->faila == stripe || rbio->failb == stripe) {
- atomic_inc(&rbio->bbio->error);
+ atomic_inc(&rbio->error);
continue;
}
@@ -1990,7 +2113,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* were up to date, or we might have no bios to read because
* the devices were gone.
*/
- if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) {
+ if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
__raid_recover_end_io(rbio);
goto out;
} else {
@@ -2002,7 +2125,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
*/
- atomic_set(&bbio->stripes_pending, bios_to_read);
+ atomic_set(&rbio->stripes_pending, bios_to_read);
while (1) {
bio = bio_list_pop(&bio_list);
if (!bio)
@@ -2021,7 +2144,7 @@ out:
return 0;
cleanup:
- if (rbio->read_rebuild)
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
rbio_orig_end_io(rbio, -EIO, 0);
return -EIO;
}
@@ -2034,34 +2157,42 @@ cleanup:
*/
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
- u64 stripe_len, int mirror_num)
+ u64 stripe_len, int mirror_num, int generic_io)
{
struct btrfs_raid_bio *rbio;
int ret;
rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
- if (IS_ERR(rbio))
+ if (IS_ERR(rbio)) {
+ __free_bbio_and_raid_map(bbio, raid_map, generic_io);
return PTR_ERR(rbio);
+ }
- rbio->read_rebuild = 1;
+ rbio->operation = BTRFS_RBIO_READ_REBUILD;
bio_list_add(&rbio->bio_list, bio);
rbio->bio_list_bytes = bio->bi_iter.bi_size;
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
BUG();
- kfree(raid_map);
- kfree(bbio);
+ __free_bbio_and_raid_map(bbio, raid_map, generic_io);
kfree(rbio);
return -EIO;
}
+ if (generic_io) {
+ btrfs_bio_counter_inc_noblocked(root->fs_info);
+ rbio->generic_bio_cnt = 1;
+ } else {
+ set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags);
+ }
+
/*
* reconstruct from the q stripe if they are
* asking for mirror 3
*/
if (mirror_num == 3)
- rbio->failb = bbio->num_stripes - 2;
+ rbio->failb = rbio->real_stripes - 2;
ret = lock_stripe_add(rbio);
@@ -2098,3 +2229,483 @@ static void read_rebuild_work(struct btrfs_work *work)
rbio = container_of(work, struct btrfs_raid_bio, work);
__raid56_parity_recover(rbio);
}
+
+/*
+ * The following code is used to scrub/replace the parity stripe
+ *
+ * Note: We need make sure all the pages that add into the scrub/replace
+ * raid bio are correct and not be changed during the scrub/replace. That
+ * is those pages just hold metadata or file data with checksum.
+ */
+
+struct btrfs_raid_bio *
+raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len, struct btrfs_device *scrub_dev,
+ unsigned long *dbitmap, int stripe_nsectors)
+{
+ struct btrfs_raid_bio *rbio;
+ int i;
+
+ rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+ if (IS_ERR(rbio))
+ return NULL;
+ bio_list_add(&rbio->bio_list, bio);
+ /*
+ * This is a special bio which is used to hold the completion handler
+ * and make the scrub rbio is similar to the other types
+ */
+ ASSERT(!bio->bi_iter.bi_size);
+ rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
+
+ for (i = 0; i < rbio->real_stripes; i++) {
+ if (bbio->stripes[i].dev == scrub_dev) {
+ rbio->scrubp = i;
+ break;
+ }
+ }
+
+ /* Now we just support the sectorsize equals to page size */
+ ASSERT(root->sectorsize == PAGE_SIZE);
+ ASSERT(rbio->stripe_npages == stripe_nsectors);
+ bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
+
+ return rbio;
+}
+
+void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
+ struct page *page, u64 logical)
+{
+ int stripe_offset;
+ int index;
+
+ ASSERT(logical >= rbio->raid_map[0]);
+ ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] +
+ rbio->stripe_len * rbio->nr_data);
+ stripe_offset = (int)(logical - rbio->raid_map[0]);
+ index = stripe_offset >> PAGE_CACHE_SHIFT;
+ rbio->bio_pages[index] = page;
+}
+
+/*
+ * We just scrub the parity that we have correct data on the same horizontal,
+ * so we needn't allocate all pages for all the stripes.
+ */
+static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
+{
+ int i;
+ int bit;
+ int index;
+ struct page *page;
+
+ for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
+ for (i = 0; i < rbio->real_stripes; i++) {
+ index = i * rbio->stripe_npages + bit;
+ if (rbio->stripe_pages[index])
+ continue;
+
+ page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
+ rbio->stripe_pages[index] = page;
+ ClearPageUptodate(page);
+ }
+ }
+ return 0;
+}
+
+/*
+ * end io function used by finish_rmw. When we finally
+ * get here, we've written a full stripe
+ */
+static void raid_write_parity_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (err)
+ fail_bio_stripe(rbio, bio);
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
+ return;
+
+ err = 0;
+
+ if (atomic_read(&rbio->error))
+ err = -EIO;
+
+ rbio_orig_end_io(rbio, err, 0);
+}
+
+static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ int need_check)
+{
+ struct btrfs_bio *bbio = rbio->bbio;
+ void *pointers[rbio->real_stripes];
+ DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
+ int nr_data = rbio->nr_data;
+ int stripe;
+ int pagenr;
+ int p_stripe = -1;
+ int q_stripe = -1;
+ struct page *p_page = NULL;
+ struct page *q_page = NULL;
+ struct bio_list bio_list;
+ struct bio *bio;
+ int is_replace = 0;
+ int ret;
+
+ bio_list_init(&bio_list);
+
+ if (rbio->real_stripes - rbio->nr_data == 1) {
+ p_stripe = rbio->real_stripes - 1;
+ } else if (rbio->real_stripes - rbio->nr_data == 2) {
+ p_stripe = rbio->real_stripes - 2;
+ q_stripe = rbio->real_stripes - 1;
+ } else {
+ BUG();
+ }
+
+ if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
+ is_replace = 1;
+ bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
+ }
+
+ /*
+ * Because the higher layers(scrubber) are unlikely to
+ * use this area of the disk again soon, so don't cache
+ * it.
+ */
+ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+
+ if (!need_check)
+ goto writeback;
+
+ p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!p_page)
+ goto cleanup;
+ SetPageUptodate(p_page);
+
+ if (q_stripe != -1) {
+ q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!q_page) {
+ __free_page(p_page);
+ goto cleanup;
+ }
+ SetPageUptodate(q_page);
+ }
+
+ atomic_set(&rbio->error, 0);
+
+ for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ struct page *p;
+ void *parity;
+ /* first collect one page from each data stripe */
+ for (stripe = 0; stripe < nr_data; stripe++) {
+ p = page_in_rbio(rbio, stripe, pagenr, 0);
+ pointers[stripe] = kmap(p);
+ }
+
+ /* then add the parity stripe */
+ pointers[stripe++] = kmap(p_page);
+
+ if (q_stripe != -1) {
+
+ /*
+ * raid6, add the qstripe and call the
+ * library function to fill in our p/q
+ */
+ pointers[stripe++] = kmap(q_page);
+
+ raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
+ pointers);
+ } else {
+ /* raid5 */
+ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+ run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+ }
+
+ /* Check scrubbing pairty and repair it */
+ p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
+ parity = kmap(p);
+ if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
+ memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
+ else
+ /* Parity is right, needn't writeback */
+ bitmap_clear(rbio->dbitmap, pagenr, 1);
+ kunmap(p);
+
+ for (stripe = 0; stripe < rbio->real_stripes; stripe++)
+ kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+ }
+
+ __free_page(p_page);
+ if (q_page)
+ __free_page(q_page);
+
+writeback:
+ /*
+ * time to start writing. Make bios for everything from the
+ * higher layers (the bio_list in our rbio) and our p/q. Ignore
+ * everything else.
+ */
+ for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ struct page *page;
+
+ page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
+ ret = rbio_add_io_page(rbio, &bio_list,
+ page, rbio->scrubp, pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+
+ if (!is_replace)
+ goto submit_write;
+
+ for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
+ struct page *page;
+
+ page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
+ ret = rbio_add_io_page(rbio, &bio_list, page,
+ bbio->tgtdev_map[rbio->scrubp],
+ pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+
+submit_write:
+ nr_data = bio_list_size(&bio_list);
+ if (!nr_data) {
+ /* Every parity is right */
+ rbio_orig_end_io(rbio, 0, 0);
+ return;
+ }
+
+ atomic_set(&rbio->stripes_pending, nr_data);
+
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid_write_parity_end_io;
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(WRITE, bio);
+ }
+ return;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
+{
+ if (stripe >= 0 && stripe < rbio->nr_data)
+ return 1;
+ return 0;
+}
+
+/*
+ * While we're doing the parity check and repair, we could have errors
+ * in reading pages off the disk. This checks for errors and if we're
+ * not able to read the page it'll trigger parity reconstruction. The
+ * parity scrub will be finished after we've reconstructed the failed
+ * stripes
+ */
+static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
+{
+ if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
+ goto cleanup;
+
+ if (rbio->faila >= 0 || rbio->failb >= 0) {
+ int dfail = 0, failp = -1;
+
+ if (is_data_stripe(rbio, rbio->faila))
+ dfail++;
+ else if (is_parity_stripe(rbio->faila))
+ failp = rbio->faila;
+
+ if (is_data_stripe(rbio, rbio->failb))
+ dfail++;
+ else if (is_parity_stripe(rbio->failb))
+ failp = rbio->failb;
+
+ /*
+ * Because we can not use a scrubbing parity to repair
+ * the data, so the capability of the repair is declined.
+ * (In the case of RAID5, we can not repair anything)
+ */
+ if (dfail > rbio->bbio->max_errors - 1)
+ goto cleanup;
+
+ /*
+ * If all data is good, only parity is correctly, just
+ * repair the parity.
+ */
+ if (dfail == 0) {
+ finish_parity_scrub(rbio, 0);
+ return;
+ }
+
+ /*
+ * Here means we got one corrupted data stripe and one
+ * corrupted parity on RAID6, if the corrupted parity
+ * is scrubbing parity, luckly, use the other one to repair
+ * the data, or we can not repair the data stripe.
+ */
+ if (failp != rbio->scrubp)
+ goto cleanup;
+
+ __raid_recover_end_io(rbio);
+ } else {
+ finish_parity_scrub(rbio, 1);
+ }
+ return;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+/*
+ * end io for the read phase of the rmw cycle. All the bios here are physical
+ * stripe bios we've read from the disk so we can recalculate the parity of the
+ * stripe.
+ *
+ * This will usually kick off finish_rmw once all the bios are read in, but it
+ * may trigger parity reconstruction if we had any errors along the way
+ */
+static void raid56_parity_scrub_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (err)
+ fail_bio_stripe(rbio, bio);
+ else
+ set_bio_pages_uptodate(bio);
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
+ return;
+
+ /*
+ * this will normally call finish_rmw to start our write
+ * but if there are any failed stripes we'll reconstruct
+ * from parity first
+ */
+ validate_rbio_for_parity_scrub(rbio);
+}
+
+static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
+{
+ int bios_to_read = 0;
+ struct bio_list bio_list;
+ int ret;
+ int pagenr;
+ int stripe;
+ struct bio *bio;
+
+ ret = alloc_rbio_essential_pages(rbio);
+ if (ret)
+ goto cleanup;
+
+ bio_list_init(&bio_list);
+
+ atomic_set(&rbio->error, 0);
+ /*
+ * build a list of bios to read all the missing parts of this
+ * stripe
+ */
+ for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
+ for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ struct page *page;
+ /*
+ * we want to find all the pages missing from
+ * the rbio and read them from the disk. If
+ * page_in_rbio finds a page in the bio list
+ * we don't need to read it off the stripe.
+ */
+ page = page_in_rbio(rbio, stripe, pagenr, 1);
+ if (page)
+ continue;
+
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ /*
+ * the bio cache may have handed us an uptodate
+ * page. If so, be happy and use it
+ */
+ if (PageUptodate(page))
+ continue;
+
+ ret = rbio_add_io_page(rbio, &bio_list, page,
+ stripe, pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
+ bios_to_read = bio_list_size(&bio_list);
+ if (!bios_to_read) {
+ /*
+ * this can happen if others have merged with
+ * us, it means there is nothing left to read.
+ * But if there are missing devices it may not be
+ * safe to do the full stripe write yet.
+ */
+ goto finish;
+ }
+
+ /*
+ * the bbio may be freed once we submit the last bio. Make sure
+ * not to touch it after that
+ */
+ atomic_set(&rbio->stripes_pending, bios_to_read);
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid56_parity_scrub_end_io;
+
+ btrfs_bio_wq_end_io(rbio->fs_info, bio,
+ BTRFS_WQ_ENDIO_RAID56);
+
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(READ, bio);
+ }
+ /* the actual write will happen once the reads are done */
+ return;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+ return;
+
+finish:
+ validate_rbio_for_parity_scrub(rbio);
+}
+
+static void scrub_parity_work(struct btrfs_work *work)
+{
+ struct btrfs_raid_bio *rbio;
+
+ rbio = container_of(work, struct btrfs_raid_bio, work);
+ raid56_parity_scrub_stripe(rbio);
+}
+
+static void async_scrub_parity(struct btrfs_raid_bio *rbio)
+{
+ btrfs_init_work(&rbio->work, btrfs_rmw_helper,
+ scrub_parity_work, NULL, NULL);
+
+ btrfs_queue_work(rbio->fs_info->rmw_workers,
+ &rbio->work);
+}
+
+void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
+{
+ if (!lock_stripe_add(rbio))
+ async_scrub_parity(rbio);
+}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index ea5d73bfdfbe..31d4a157b5e3 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -39,13 +39,25 @@ static inline int nr_data_stripes(struct map_lookup *map)
#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
((x) == RAID6_Q_STRIPE))
+struct btrfs_raid_bio;
+struct btrfs_device;
+
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
- struct btrfs_bio *bbio, u64 *raid_map,
- u64 stripe_len, int mirror_num);
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len, int mirror_num, int generic_io);
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len);
+struct btrfs_raid_bio *
+raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len, struct btrfs_device *scrub_dev,
+ unsigned long *dbitmap, int stripe_nsectors);
+void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
+ struct page *page, u64 logical);
+void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
+
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
#endif
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index efa083113827..f2bb13a23f86 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -63,10 +63,18 @@ struct scrub_ctx;
*/
#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
+struct scrub_recover {
+ atomic_t refs;
+ struct btrfs_bio *bbio;
+ u64 *raid_map;
+ u64 map_length;
+};
+
struct scrub_page {
struct scrub_block *sblock;
struct page *page;
struct btrfs_device *dev;
+ struct list_head list;
u64 flags; /* extent flags */
u64 generation;
u64 logical;
@@ -79,6 +87,8 @@ struct scrub_page {
unsigned int io_error:1;
};
u8 csum[BTRFS_CSUM_SIZE];
+
+ struct scrub_recover *recover;
};
struct scrub_bio {
@@ -105,14 +115,52 @@ struct scrub_block {
atomic_t outstanding_pages;
atomic_t ref_count; /* free mem on transition to zero */
struct scrub_ctx *sctx;
+ struct scrub_parity *sparity;
struct {
unsigned int header_error:1;
unsigned int checksum_error:1;
unsigned int no_io_error_seen:1;
unsigned int generation_error:1; /* also sets header_error */
+
+ /* The following is for the data used to check parity */
+ /* It is for the data with checksum */
+ unsigned int data_corrected:1;
};
};
+/* Used for the chunks with parity stripe such RAID5/6 */
+struct scrub_parity {
+ struct scrub_ctx *sctx;
+
+ struct btrfs_device *scrub_dev;
+
+ u64 logic_start;
+
+ u64 logic_end;
+
+ int nsectors;
+
+ int stripe_len;
+
+ atomic_t ref_count;
+
+ struct list_head spages;
+
+ /* Work of parity check and repair */
+ struct btrfs_work work;
+
+ /* Mark the parity blocks which have data */
+ unsigned long *dbitmap;
+
+ /*
+ * Mark the parity blocks which have data, but errors happen when
+ * read data or check data
+ */
+ unsigned long *ebitmap;
+
+ unsigned long bitmap[0];
+};
+
struct scrub_wr_ctx {
struct scrub_bio *wr_curr_bio;
struct btrfs_device *tgtdev;
@@ -196,7 +244,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
struct scrub_block *sblock, int is_metadata,
int have_csum, u8 *csum, u64 generation,
- u16 csum_size);
+ u16 csum_size, int retry_failed_mirror);
static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
struct scrub_block *sblock,
int is_metadata, int have_csum,
@@ -218,6 +266,8 @@ static void scrub_block_get(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
static void scrub_page_get(struct scrub_page *spage);
static void scrub_page_put(struct scrub_page *spage);
+static void scrub_parity_get(struct scrub_parity *sparity);
+static void scrub_parity_put(struct scrub_parity *sparity);
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
struct scrub_page *spage);
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
@@ -790,6 +840,20 @@ out:
scrub_pending_trans_workers_dec(sctx);
}
+static inline void scrub_get_recover(struct scrub_recover *recover)
+{
+ atomic_inc(&recover->refs);
+}
+
+static inline void scrub_put_recover(struct scrub_recover *recover)
+{
+ if (atomic_dec_and_test(&recover->refs)) {
+ kfree(recover->bbio);
+ kfree(recover->raid_map);
+ kfree(recover);
+ }
+}
+
/*
* scrub_handle_errored_block gets called when either verification of the
* pages failed or the bio failed to read, e.g. with EIO. In the latter
@@ -906,7 +970,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
/* build and submit the bios for the failed mirror, check checksums */
scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
- csum, generation, sctx->csum_size);
+ csum, generation, sctx->csum_size, 1);
if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
sblock_bad->no_io_error_seen) {
@@ -920,6 +984,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
*/
spin_lock(&sctx->stat_lock);
sctx->stat.unverified_errors++;
+ sblock_to_check->data_corrected = 1;
spin_unlock(&sctx->stat_lock);
if (sctx->is_dev_replace)
@@ -1019,7 +1084,7 @@ nodatasum_case:
/* build and submit the bios, check checksums */
scrub_recheck_block(fs_info, sblock_other, is_metadata,
have_csum, csum, generation,
- sctx->csum_size);
+ sctx->csum_size, 0);
if (!sblock_other->header_error &&
!sblock_other->checksum_error &&
@@ -1169,7 +1234,7 @@ nodatasum_case:
*/
scrub_recheck_block(fs_info, sblock_bad,
is_metadata, have_csum, csum,
- generation, sctx->csum_size);
+ generation, sctx->csum_size, 1);
if (!sblock_bad->header_error &&
!sblock_bad->checksum_error &&
sblock_bad->no_io_error_seen)
@@ -1180,6 +1245,7 @@ nodatasum_case:
corrected_error:
spin_lock(&sctx->stat_lock);
sctx->stat.corrected_errors++;
+ sblock_to_check->data_corrected = 1;
spin_unlock(&sctx->stat_lock);
printk_ratelimited_in_rcu(KERN_ERR
"BTRFS: fixed up error at logical %llu on dev %s\n",
@@ -1201,11 +1267,18 @@ out:
mirror_index++) {
struct scrub_block *sblock = sblocks_for_recheck +
mirror_index;
+ struct scrub_recover *recover;
int page_index;
for (page_index = 0; page_index < sblock->page_count;
page_index++) {
sblock->pagev[page_index]->sblock = NULL;
+ recover = sblock->pagev[page_index]->recover;
+ if (recover) {
+ scrub_put_recover(recover);
+ sblock->pagev[page_index]->recover =
+ NULL;
+ }
scrub_page_put(sblock->pagev[page_index]);
}
}
@@ -1215,14 +1288,63 @@ out:
return 0;
}
+static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
+{
+ if (raid_map) {
+ if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
+ return 3;
+ else
+ return 2;
+ } else {
+ return (int)bbio->num_stripes;
+ }
+}
+
+static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
+ u64 mapped_length,
+ int nstripes, int mirror,
+ int *stripe_index,
+ u64 *stripe_offset)
+{
+ int i;
+
+ if (raid_map) {
+ /* RAID5/6 */
+ for (i = 0; i < nstripes; i++) {
+ if (raid_map[i] == RAID6_Q_STRIPE ||
+ raid_map[i] == RAID5_P_STRIPE)
+ continue;
+
+ if (logical >= raid_map[i] &&
+ logical < raid_map[i] + mapped_length)
+ break;
+ }
+
+ *stripe_index = i;
+ *stripe_offset = logical - raid_map[i];
+ } else {
+ /* The other RAID type */
+ *stripe_index = mirror;
+ *stripe_offset = 0;
+ }
+}
+
static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
struct btrfs_fs_info *fs_info,
struct scrub_block *original_sblock,
u64 length, u64 logical,
struct scrub_block *sblocks_for_recheck)
{
+ struct scrub_recover *recover;
+ struct btrfs_bio *bbio;
+ u64 *raid_map;
+ u64 sublen;
+ u64 mapped_length;
+ u64 stripe_offset;
+ int stripe_index;
int page_index;
int mirror_index;
+ int nmirrors;
int ret;
/*
@@ -1233,23 +1355,39 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
page_index = 0;
while (length > 0) {
- u64 sublen = min_t(u64, length, PAGE_SIZE);
- u64 mapped_length = sublen;
- struct btrfs_bio *bbio = NULL;
+ sublen = min_t(u64, length, PAGE_SIZE);
+ mapped_length = sublen;
+ bbio = NULL;
+ raid_map = NULL;
/*
* with a length of PAGE_SIZE, each returned stripe
* represents one mirror
*/
- ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical,
- &mapped_length, &bbio, 0);
+ ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
+ &mapped_length, &bbio, 0, &raid_map);
if (ret || !bbio || mapped_length < sublen) {
kfree(bbio);
+ kfree(raid_map);
return -EIO;
}
+ recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
+ if (!recover) {
+ kfree(bbio);
+ kfree(raid_map);
+ return -ENOMEM;
+ }
+
+ atomic_set(&recover->refs, 1);
+ recover->bbio = bbio;
+ recover->raid_map = raid_map;
+ recover->map_length = mapped_length;
+
BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
- for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
+
+ nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
+ for (mirror_index = 0; mirror_index < nmirrors;
mirror_index++) {
struct scrub_block *sblock;
struct scrub_page *page;
@@ -1265,26 +1403,38 @@ leave_nomem:
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock);
- kfree(bbio);
+ scrub_put_recover(recover);
return -ENOMEM;
}
scrub_page_get(page);
sblock->pagev[page_index] = page;
page->logical = logical;
- page->physical = bbio->stripes[mirror_index].physical;
+
+ scrub_stripe_index_and_offset(logical, raid_map,
+ mapped_length,
+ bbio->num_stripes,
+ mirror_index,
+ &stripe_index,
+ &stripe_offset);
+ page->physical = bbio->stripes[stripe_index].physical +
+ stripe_offset;
+ page->dev = bbio->stripes[stripe_index].dev;
+
BUG_ON(page_index >= original_sblock->page_count);
page->physical_for_dev_replace =
original_sblock->pagev[page_index]->
physical_for_dev_replace;
/* for missing devices, dev->bdev is NULL */
- page->dev = bbio->stripes[mirror_index].dev;
page->mirror_num = mirror_index + 1;
sblock->page_count++;
page->page = alloc_page(GFP_NOFS);
if (!page->page)
goto leave_nomem;
+
+ scrub_get_recover(recover);
+ page->recover = recover;
}
- kfree(bbio);
+ scrub_put_recover(recover);
length -= sublen;
logical += sublen;
page_index++;
@@ -1293,6 +1443,51 @@ leave_nomem:
return 0;
}
+struct scrub_bio_ret {
+ struct completion event;
+ int error;
+};
+
+static void scrub_bio_wait_endio(struct bio *bio, int error)
+{
+ struct scrub_bio_ret *ret = bio->bi_private;
+
+ ret->error = error;
+ complete(&ret->event);
+}
+
+static inline int scrub_is_page_on_raid56(struct scrub_page *page)
+{
+ return page->recover && page->recover->raid_map;
+}
+
+static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
+ struct bio *bio,
+ struct scrub_page *page)
+{
+ struct scrub_bio_ret done;
+ int ret;
+
+ init_completion(&done.event);
+ done.error = 0;
+ bio->bi_iter.bi_sector = page->logical >> 9;
+ bio->bi_private = &done;
+ bio->bi_end_io = scrub_bio_wait_endio;
+
+ ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
+ page->recover->raid_map,
+ page->recover->map_length,
+ page->mirror_num, 0);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&done.event);
+ if (done.error)
+ return -EIO;
+
+ return 0;
+}
+
/*
* this function will check the on disk data for checksum errors, header
* errors and read I/O errors. If any I/O errors happen, the exact pages
@@ -1303,7 +1498,7 @@ leave_nomem:
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
struct scrub_block *sblock, int is_metadata,
int have_csum, u8 *csum, u64 generation,
- u16 csum_size)
+ u16 csum_size, int retry_failed_mirror)
{
int page_num;
@@ -1329,11 +1524,17 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
continue;
}
bio->bi_bdev = page->dev->bdev;
- bio->bi_iter.bi_sector = page->physical >> 9;
bio_add_page(bio, page->page, PAGE_SIZE, 0);
- if (btrfsic_submit_bio_wait(READ, bio))
- sblock->no_io_error_seen = 0;
+ if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
+ if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
+ sblock->no_io_error_seen = 0;
+ } else {
+ bio->bi_iter.bi_sector = page->physical >> 9;
+
+ if (btrfsic_submit_bio_wait(READ, bio))
+ sblock->no_io_error_seen = 0;
+ }
bio_put(bio);
}
@@ -1486,6 +1687,13 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
int page_num;
+ /*
+ * This block is used for the check of the parity on the source device,
+ * so the data needn't be written into the destination device.
+ */
+ if (sblock->sparity)
+ return;
+
for (page_num = 0; page_num < sblock->page_count; page_num++) {
int ret;
@@ -1867,6 +2075,9 @@ static void scrub_block_put(struct scrub_block *sblock)
if (atomic_dec_and_test(&sblock->ref_count)) {
int i;
+ if (sblock->sparity)
+ scrub_parity_put(sblock->sparity);
+
for (i = 0; i < sblock->page_count; i++)
scrub_page_put(sblock->pagev[i]);
kfree(sblock);
@@ -2124,9 +2335,51 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
scrub_pending_bio_dec(sctx);
}
+static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
+ unsigned long *bitmap,
+ u64 start, u64 len)
+{
+ int offset;
+ int nsectors;
+ int sectorsize = sparity->sctx->dev_root->sectorsize;
+
+ if (len >= sparity->stripe_len) {
+ bitmap_set(bitmap, 0, sparity->nsectors);
+ return;
+ }
+
+ start -= sparity->logic_start;
+ offset = (int)do_div(start, sparity->stripe_len);
+ offset /= sectorsize;
+ nsectors = (int)len / sectorsize;
+
+ if (offset + nsectors <= sparity->nsectors) {
+ bitmap_set(bitmap, offset, nsectors);
+ return;
+ }
+
+ bitmap_set(bitmap, offset, sparity->nsectors - offset);
+ bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
+}
+
+static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
+ u64 start, u64 len)
+{
+ __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
+}
+
+static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
+ u64 start, u64 len)
+{
+ __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
+}
+
static void scrub_block_complete(struct scrub_block *sblock)
{
+ int corrupted = 0;
+
if (!sblock->no_io_error_seen) {
+ corrupted = 1;
scrub_handle_errored_block(sblock);
} else {
/*
@@ -2134,9 +2387,19 @@ static void scrub_block_complete(struct scrub_block *sblock)
* dev replace case, otherwise write here in dev replace
* case.
*/
- if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
+ corrupted = scrub_checksum(sblock);
+ if (!corrupted && sblock->sctx->is_dev_replace)
scrub_write_block_to_dev_replace(sblock);
}
+
+ if (sblock->sparity && corrupted && !sblock->data_corrected) {
+ u64 start = sblock->pagev[0]->logical;
+ u64 end = sblock->pagev[sblock->page_count - 1]->logical +
+ PAGE_SIZE;
+
+ scrub_parity_mark_sectors_error(sblock->sparity,
+ start, end - start);
+ }
}
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
@@ -2228,6 +2491,132 @@ behind_scrub_pages:
return 0;
}
+static int scrub_pages_for_parity(struct scrub_parity *sparity,
+ u64 logical, u64 len,
+ u64 physical, struct btrfs_device *dev,
+ u64 flags, u64 gen, int mirror_num, u8 *csum)
+{
+ struct scrub_ctx *sctx = sparity->sctx;
+ struct scrub_block *sblock;
+ int index;
+
+ sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+ if (!sblock) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ return -ENOMEM;
+ }
+
+ /* one ref inside this function, plus one for each page added to
+ * a bio later on */
+ atomic_set(&sblock->ref_count, 1);
+ sblock->sctx = sctx;
+ sblock->no_io_error_seen = 1;
+ sblock->sparity = sparity;
+ scrub_parity_get(sparity);
+
+ for (index = 0; len > 0; index++) {
+ struct scrub_page *spage;
+ u64 l = min_t(u64, len, PAGE_SIZE);
+
+ spage = kzalloc(sizeof(*spage), GFP_NOFS);
+ if (!spage) {
+leave_nomem:
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ scrub_block_put(sblock);
+ return -ENOMEM;
+ }
+ BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
+ /* For scrub block */
+ scrub_page_get(spage);
+ sblock->pagev[index] = spage;
+ /* For scrub parity */
+ scrub_page_get(spage);
+ list_add_tail(&spage->list, &sparity->spages);
+ spage->sblock = sblock;
+ spage->dev = dev;
+ spage->flags = flags;
+ spage->generation = gen;
+ spage->logical = logical;
+ spage->physical = physical;
+ spage->mirror_num = mirror_num;
+ if (csum) {
+ spage->have_csum = 1;
+ memcpy(spage->csum, csum, sctx->csum_size);
+ } else {
+ spage->have_csum = 0;
+ }
+ sblock->page_count++;
+ spage->page = alloc_page(GFP_NOFS);
+ if (!spage->page)
+ goto leave_nomem;
+ len -= l;
+ logical += l;
+ physical += l;
+ }
+
+ WARN_ON(sblock->page_count == 0);
+ for (index = 0; index < sblock->page_count; index++) {
+ struct scrub_page *spage = sblock->pagev[index];
+ int ret;
+
+ ret = scrub_add_page_to_rd_bio(sctx, spage);
+ if (ret) {
+ scrub_block_put(sblock);
+ return ret;
+ }
+ }
+
+ /* last one frees, either here or in bio completion for last page */
+ scrub_block_put(sblock);
+ return 0;
+}
+
+static int scrub_extent_for_parity(struct scrub_parity *sparity,
+ u64 logical, u64 len,
+ u64 physical, struct btrfs_device *dev,
+ u64 flags, u64 gen, int mirror_num)
+{
+ struct scrub_ctx *sctx = sparity->sctx;
+ int ret;
+ u8 csum[BTRFS_CSUM_SIZE];
+ u32 blocksize;
+
+ if (flags & BTRFS_EXTENT_FLAG_DATA) {
+ blocksize = sctx->sectorsize;
+ } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ blocksize = sctx->nodesize;
+ } else {
+ blocksize = sctx->sectorsize;
+ WARN_ON(1);
+ }
+
+ while (len) {
+ u64 l = min_t(u64, len, blocksize);
+ int have_csum = 0;
+
+ if (flags & BTRFS_EXTENT_FLAG_DATA) {
+ /* push csums to sbio */
+ have_csum = scrub_find_csum(sctx, logical, l, csum);
+ if (have_csum == 0)
+ goto skip;
+ }
+ ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
+ flags, gen, mirror_num,
+ have_csum ? csum : NULL);
+skip:
+ if (ret)
+ return ret;
+ len -= l;
+ logical += l;
+ physical += l;
+ }
+ return 0;
+}
+
/*
* Given a physical address, this will calculate it's
* logical offset. if this is a parity stripe, it will return
@@ -2236,7 +2625,8 @@ behind_scrub_pages:
* return 0 if it is a data stripe, 1 means parity stripe.
*/
static int get_raid56_logic_offset(u64 physical, int num,
- struct map_lookup *map, u64 *offset)
+ struct map_lookup *map, u64 *offset,
+ u64 *stripe_start)
{
int i;
int j = 0;
@@ -2247,6 +2637,9 @@ static int get_raid56_logic_offset(u64 physical, int num,
last_offset = (physical - map->stripes[num].physical) *
nr_data_stripes(map);
+ if (stripe_start)
+ *stripe_start = last_offset;
+
*offset = last_offset;
for (i = 0; i < nr_data_stripes(map); i++) {
*offset = last_offset + i * map->stripe_len;
@@ -2269,13 +2662,330 @@ static int get_raid56_logic_offset(u64 physical, int num,
return 1;
}
+static void scrub_free_parity(struct scrub_parity *sparity)
+{
+ struct scrub_ctx *sctx = sparity->sctx;
+ struct scrub_page *curr, *next;
+ int nbits;
+
+ nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
+ if (nbits) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.read_errors += nbits;
+ sctx->stat.uncorrectable_errors += nbits;
+ spin_unlock(&sctx->stat_lock);
+ }
+
+ list_for_each_entry_safe(curr, next, &sparity->spages, list) {
+ list_del_init(&curr->list);
+ scrub_page_put(curr);
+ }
+
+ kfree(sparity);
+}
+
+static void scrub_parity_bio_endio(struct bio *bio, int error)
+{
+ struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
+ struct scrub_ctx *sctx = sparity->sctx;
+
+ if (error)
+ bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
+ sparity->nsectors);
+
+ scrub_free_parity(sparity);
+ scrub_pending_bio_dec(sctx);
+ bio_put(bio);
+}
+
+static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
+{
+ struct scrub_ctx *sctx = sparity->sctx;
+ struct bio *bio;
+ struct btrfs_raid_bio *rbio;
+ struct scrub_page *spage;
+ struct btrfs_bio *bbio = NULL;
+ u64 *raid_map = NULL;
+ u64 length;
+ int ret;
+
+ if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
+ sparity->nsectors))
+ goto out;
+
+ length = sparity->logic_end - sparity->logic_start + 1;
+ ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
+ sparity->logic_start,
+ &length, &bbio, 0, &raid_map);
+ if (ret || !bbio || !raid_map)
+ goto bbio_out;
+
+ bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
+ if (!bio)
+ goto bbio_out;
+
+ bio->bi_iter.bi_sector = sparity->logic_start >> 9;
+ bio->bi_private = sparity;
+ bio->bi_end_io = scrub_parity_bio_endio;
+
+ rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
+ raid_map, length,
+ sparity->scrub_dev,
+ sparity->dbitmap,
+ sparity->nsectors);
+ if (!rbio)
+ goto rbio_out;
+
+ list_for_each_entry(spage, &sparity->spages, list)
+ raid56_parity_add_scrub_pages(rbio, spage->page,
+ spage->logical);
+
+ scrub_pending_bio_inc(sctx);
+ raid56_parity_submit_scrub_rbio(rbio);
+ return;
+
+rbio_out:
+ bio_put(bio);
+bbio_out:
+ kfree(bbio);
+ kfree(raid_map);
+ bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
+ sparity->nsectors);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+out:
+ scrub_free_parity(sparity);
+}
+
+static inline int scrub_calc_parity_bitmap_len(int nsectors)
+{
+ return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
+}
+
+static void scrub_parity_get(struct scrub_parity *sparity)
+{
+ atomic_inc(&sparity->ref_count);
+}
+
+static void scrub_parity_put(struct scrub_parity *sparity)
+{
+ if (!atomic_dec_and_test(&sparity->ref_count))
+ return;
+
+ scrub_parity_check_and_repair(sparity);
+}
+
+static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
+ struct map_lookup *map,
+ struct btrfs_device *sdev,
+ struct btrfs_path *path,
+ u64 logic_start,
+ u64 logic_end)
+{
+ struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *csum_root = fs_info->csum_root;
+ struct btrfs_extent_item *extent;
+ u64 flags;
+ int ret;
+ int slot;
+ struct extent_buffer *l;
+ struct btrfs_key key;
+ u64 generation;
+ u64 extent_logical;
+ u64 extent_physical;
+ u64 extent_len;
+ struct btrfs_device *extent_dev;
+ struct scrub_parity *sparity;
+ int nsectors;
+ int bitmap_len;
+ int extent_mirror_num;
+ int stop_loop = 0;
+
+ nsectors = map->stripe_len / root->sectorsize;
+ bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
+ sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
+ GFP_NOFS);
+ if (!sparity) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ return -ENOMEM;
+ }
+
+ sparity->stripe_len = map->stripe_len;
+ sparity->nsectors = nsectors;
+ sparity->sctx = sctx;
+ sparity->scrub_dev = sdev;
+ sparity->logic_start = logic_start;
+ sparity->logic_end = logic_end;
+ atomic_set(&sparity->ref_count, 1);
+ INIT_LIST_HEAD(&sparity->spages);
+ sparity->dbitmap = sparity->bitmap;
+ sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
+
+ ret = 0;
+ while (logic_start < logic_end) {
+ if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ else
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.objectid = logic_start;
+ key.offset = (u64)-1;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ ret = btrfs_previous_extent_item(root, path, 0);
+ if (ret < 0)
+ goto out;
+ if (ret > 0) {
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(NULL, root, &key,
+ path, 0, 0);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ stop_loop = 0;
+ while (1) {
+ u64 bytes;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ if (slot >= btrfs_header_nritems(l)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret == 0)
+ continue;
+ if (ret < 0)
+ goto out;
+
+ stop_loop = 1;
+ break;
+ }
+ btrfs_item_key_to_cpu(l, &key, slot);
+
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ bytes = root->nodesize;
+ else
+ bytes = key.offset;
+
+ if (key.objectid + bytes <= logic_start)
+ goto next;
+
+ if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+ key.type != BTRFS_METADATA_ITEM_KEY)
+ goto next;
+
+ if (key.objectid > logic_end) {
+ stop_loop = 1;
+ break;
+ }
+
+ while (key.objectid >= logic_start + map->stripe_len)
+ logic_start += map->stripe_len;
+
+ extent = btrfs_item_ptr(l, slot,
+ struct btrfs_extent_item);
+ flags = btrfs_extent_flags(l, extent);
+ generation = btrfs_extent_generation(l, extent);
+
+ if (key.objectid < logic_start &&
+ (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
+ btrfs_err(fs_info,
+ "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
+ key.objectid, logic_start);
+ goto next;
+ }
+again:
+ extent_logical = key.objectid;
+ extent_len = bytes;
+
+ if (extent_logical < logic_start) {
+ extent_len -= logic_start - extent_logical;
+ extent_logical = logic_start;
+ }
+
+ if (extent_logical + extent_len >
+ logic_start + map->stripe_len)
+ extent_len = logic_start + map->stripe_len -
+ extent_logical;
+
+ scrub_parity_mark_sectors_data(sparity, extent_logical,
+ extent_len);
+
+ scrub_remap_extent(fs_info, extent_logical,
+ extent_len, &extent_physical,
+ &extent_dev,
+ &extent_mirror_num);
+
+ ret = btrfs_lookup_csums_range(csum_root,
+ extent_logical,
+ extent_logical + extent_len - 1,
+ &sctx->csum_list, 1);
+ if (ret)
+ goto out;
+
+ ret = scrub_extent_for_parity(sparity, extent_logical,
+ extent_len,
+ extent_physical,
+ extent_dev, flags,
+ generation,
+ extent_mirror_num);
+ if (ret)
+ goto out;
+
+ scrub_free_csums(sctx);
+ if (extent_logical + extent_len <
+ key.objectid + bytes) {
+ logic_start += map->stripe_len;
+
+ if (logic_start >= logic_end) {
+ stop_loop = 1;
+ break;
+ }
+
+ if (logic_start < key.objectid + bytes) {
+ cond_resched();
+ goto again;
+ }
+ }
+next:
+ path->slots[0]++;
+ }
+
+ btrfs_release_path(path);
+
+ if (stop_loop)
+ break;
+
+ logic_start += map->stripe_len;
+ }
+out:
+ if (ret < 0)
+ scrub_parity_mark_sectors_error(sparity, logic_start,
+ logic_end - logic_start + 1);
+ scrub_parity_put(sparity);
+ scrub_submit(sctx);
+ mutex_lock(&sctx->wr_ctx.wr_lock);
+ scrub_wr_submit(sctx);
+ mutex_unlock(&sctx->wr_ctx.wr_lock);
+
+ btrfs_release_path(path);
+ return ret < 0 ? ret : 0;
+}
+
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct map_lookup *map,
struct btrfs_device *scrub_dev,
int num, u64 base, u64 length,
int is_dev_replace)
{
- struct btrfs_path *path;
+ struct btrfs_path *path, *ppath;
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root;
@@ -2302,6 +3012,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
u64 extent_logical;
u64 extent_physical;
u64 extent_len;
+ u64 stripe_logical;
+ u64 stripe_end;
struct btrfs_device *extent_dev;
int extent_mirror_num;
int stop_loop = 0;
@@ -2327,7 +3039,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
mirror_num = num % map->num_stripes + 1;
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6)) {
- get_raid56_logic_offset(physical, num, map, &offset);
+ get_raid56_logic_offset(physical, num, map, &offset, NULL);
increment = map->stripe_len * nr_data_stripes(map);
mirror_num = 1;
} else {
@@ -2339,6 +3051,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
if (!path)
return -ENOMEM;
+ ppath = btrfs_alloc_path();
+ if (!ppath) {
+ btrfs_free_path(ppath);
+ return -ENOMEM;
+ }
+
/*
* work on commit root. The related disk blocks are static as
* long as COW is applied. This means, it is save to rewrite
@@ -2357,7 +3075,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6)) {
get_raid56_logic_offset(physical_end, num,
- map, &logic_end);
+ map, &logic_end, NULL);
logic_end += base;
} else {
logic_end = logical + increment * nstripes;
@@ -2404,10 +3122,18 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6)) {
ret = get_raid56_logic_offset(physical, num,
- map, &logical);
+ map, &logical, &stripe_logical);
logical += base;
- if (ret)
+ if (ret) {
+ stripe_logical += base;
+ stripe_end = stripe_logical + increment - 1;
+ ret = scrub_raid56_parity(sctx, map, scrub_dev,
+ ppath, stripe_logical,
+ stripe_end);
+ if (ret)
+ goto out;
goto skip;
+ }
}
/*
* canceled?
@@ -2558,13 +3284,25 @@ again:
* loop until we find next data stripe
* or we have finished all stripes.
*/
- do {
- physical += map->stripe_len;
- ret = get_raid56_logic_offset(
- physical, num,
- map, &logical);
- logical += base;
- } while (physical < physical_end && ret);
+loop:
+ physical += map->stripe_len;
+ ret = get_raid56_logic_offset(physical,
+ num, map, &logical,
+ &stripe_logical);
+ logical += base;
+
+ if (ret && physical < physical_end) {
+ stripe_logical += base;
+ stripe_end = stripe_logical +
+ increment - 1;
+ ret = scrub_raid56_parity(sctx,
+ map, scrub_dev, ppath,
+ stripe_logical,
+ stripe_end);
+ if (ret)
+ goto out;
+ goto loop;
+ }
} else {
physical += map->stripe_len;
logical += increment;
@@ -2605,6 +3343,7 @@ out:
blk_finish_plug(&plug);
btrfs_free_path(path);
+ btrfs_free_path(ppath);
return ret < 0 ? ret : 0;
}
@@ -3310,6 +4049,50 @@ out:
scrub_pending_trans_workers_dec(sctx);
}
+static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
+ u64 logical)
+{
+ struct extent_state *cached_state = NULL;
+ struct btrfs_ordered_extent *ordered;
+ struct extent_io_tree *io_tree;
+ struct extent_map *em;
+ u64 lockstart = start, lockend = start + len - 1;
+ int ret = 0;
+
+ io_tree = &BTRFS_I(inode)->io_tree;
+
+ lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
+ ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
+ if (ordered) {
+ btrfs_put_ordered_extent(ordered);
+ ret = 1;
+ goto out_unlock;
+ }
+
+ em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out_unlock;
+ }
+
+ /*
+ * This extent does not actually cover the logical extent anymore,
+ * move on to the next inode.
+ */
+ if (em->block_start > logical ||
+ em->block_start + em->block_len < logical + len) {
+ free_extent_map(em);
+ ret = 1;
+ goto out_unlock;
+ }
+ free_extent_map(em);
+
+out_unlock:
+ unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
+ GFP_NOFS);
+ return ret;
+}
+
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
struct scrub_copy_nocow_ctx *nocow_ctx)
{
@@ -3318,13 +4101,10 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
struct inode *inode;
struct page *page;
struct btrfs_root *local_root;
- struct btrfs_ordered_extent *ordered;
- struct extent_map *em;
- struct extent_state *cached_state = NULL;
struct extent_io_tree *io_tree;
u64 physical_for_dev_replace;
+ u64 nocow_ctx_logical;
u64 len = nocow_ctx->len;
- u64 lockstart = offset, lockend = offset + len - 1;
unsigned long index;
int srcu_index;
int ret = 0;
@@ -3356,30 +4136,13 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
io_tree = &BTRFS_I(inode)->io_tree;
+ nocow_ctx_logical = nocow_ctx->logical;
- lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
- ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
- if (ordered) {
- btrfs_put_ordered_extent(ordered);
- goto out_unlock;
- }
-
- em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out_unlock;
- }
-
- /*
- * This extent does not actually cover the logical extent anymore,
- * move on to the next inode.
- */
- if (em->block_start > nocow_ctx->logical ||
- em->block_start + em->block_len < nocow_ctx->logical + len) {
- free_extent_map(em);
- goto out_unlock;
+ ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
+ if (ret) {
+ ret = ret > 0 ? 0 : ret;
+ goto out;
}
- free_extent_map(em);
while (len >= PAGE_CACHE_SIZE) {
index = offset >> PAGE_CACHE_SHIFT;
@@ -3396,7 +4159,7 @@ again:
goto next_page;
} else {
ClearPageError(page);
- err = extent_read_full_page_nolock(io_tree, page,
+ err = extent_read_full_page(io_tree, page,
btrfs_get_extent,
nocow_ctx->mirror_num);
if (err) {
@@ -3421,6 +4184,14 @@ again:
goto next_page;
}
}
+
+ ret = check_extent_to_block(inode, offset, len,
+ nocow_ctx_logical);
+ if (ret) {
+ ret = ret > 0 ? 0 : ret;
+ goto next_page;
+ }
+
err = write_page_nocow(nocow_ctx->sctx,
physical_for_dev_replace, page);
if (err)
@@ -3434,12 +4205,10 @@ next_page:
offset += PAGE_CACHE_SIZE;
physical_for_dev_replace += PAGE_CACHE_SIZE;
+ nocow_ctx_logical += PAGE_CACHE_SIZE;
len -= PAGE_CACHE_SIZE;
}
ret = COPY_COMPLETE;
-out_unlock:
- unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
- GFP_NOFS);
out:
mutex_unlock(&inode->i_mutex);
iput(inode);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 874828dd0a86..804432dbc351 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5507,6 +5507,51 @@ out:
return ret;
}
+/*
+ * If orphan cleanup did remove any orphans from a root, it means the tree
+ * was modified and therefore the commit root is not the same as the current
+ * root anymore. This is a problem, because send uses the commit root and
+ * therefore can see inode items that don't exist in the current root anymore,
+ * and for example make calls to btrfs_iget, which will do tree lookups based
+ * on the current root and not on the commit root. Those lookups will fail,
+ * returning a -ESTALE error, and making send fail with that error. So make
+ * sure a send does not see any orphans we have just removed, and that it will
+ * see the same inodes regardless of whether a transaction commit happened
+ * before it started (meaning that the commit root will be the same as the
+ * current root) or not.
+ */
+static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
+{
+ int i;
+ struct btrfs_trans_handle *trans = NULL;
+
+again:
+ if (sctx->parent_root &&
+ sctx->parent_root->node != sctx->parent_root->commit_root)
+ goto commit_trans;
+
+ for (i = 0; i < sctx->clone_roots_cnt; i++)
+ if (sctx->clone_roots[i].root->node !=
+ sctx->clone_roots[i].root->commit_root)
+ goto commit_trans;
+
+ if (trans)
+ return btrfs_end_transaction(trans, sctx->send_root);
+
+ return 0;
+
+commit_trans:
+ /* Use any root, all fs roots will get their commit roots updated. */
+ if (!trans) {
+ trans = btrfs_join_transaction(sctx->send_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ goto again;
+ }
+
+ return btrfs_commit_transaction(trans, sctx->send_root);
+}
+
static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
{
spin_lock(&root->root_item_lock);
@@ -5728,6 +5773,10 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
NULL);
sort_clone_roots = 1;
+ ret = ensure_commit_roots_uptodate(sctx);
+ if (ret)
+ goto out;
+
current->journal_info = BTRFS_SEND_TRANS_STUB;
ret = send_subvol(sctx);
current->journal_info = NULL;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 54bd91ece35b..60f7cbe815e9 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -262,7 +262,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
trans->aborted = errno;
/* Nothing used. The other threads that have joined this
* transaction may be able to continue. */
- if (!trans->blocks_used) {
+ if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
const char *errstr;
errstr = btrfs_decode_error(errno);
@@ -642,11 +642,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
"disabling disk space caching");
break;
case Opt_inode_cache:
- btrfs_set_and_info(root, CHANGE_INODE_CACHE,
+ btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
"enabling inode map caching");
break;
case Opt_noinode_cache:
- btrfs_clear_and_info(root, CHANGE_INODE_CACHE,
+ btrfs_clear_pending_and_info(info, INODE_MAP_CACHE,
"disabling inode map caching");
break;
case Opt_clear_cache:
@@ -993,9 +993,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
/* no transaction, don't bother */
- if (PTR_ERR(trans) == -ENOENT)
- return 0;
- return PTR_ERR(trans);
+ if (PTR_ERR(trans) == -ENOENT) {
+ /*
+ * Exit unless we have some pending changes
+ * that need to go through commit
+ */
+ if (fs_info->pending_changes == 0)
+ return 0;
+ trans = btrfs_start_transaction(root, 0);
+ } else {
+ return PTR_ERR(trans);
+ }
}
return btrfs_commit_transaction(trans, root);
}
@@ -1644,8 +1652,20 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
int i = 0, nr_devices;
int ret;
+ /*
+ * We aren't under the device list lock, so this is racey-ish, but good
+ * enough for our purposes.
+ */
nr_devices = fs_info->fs_devices->open_devices;
- BUG_ON(!nr_devices);
+ if (!nr_devices) {
+ smp_mb();
+ nr_devices = fs_info->fs_devices->open_devices;
+ ASSERT(nr_devices);
+ if (!nr_devices) {
+ *free_bytes = 0;
+ return 0;
+ }
+ }
devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
GFP_NOFS);
@@ -1670,11 +1690,17 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
else
min_stripe_size = BTRFS_STRIPE_LEN;
- list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (fs_info->alloc_start)
+ mutex_lock(&fs_devices->device_list_mutex);
+ rcu_read_lock();
+ list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
if (!device->in_fs_metadata || !device->bdev ||
device->is_tgtdev_for_dev_replace)
continue;
+ if (i >= nr_devices)
+ break;
+
avail_space = device->total_bytes - device->bytes_used;
/* align with stripe_len */
@@ -1689,24 +1715,32 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
skip_space = 1024 * 1024;
/* user can set the offset in fs_info->alloc_start. */
- if (fs_info->alloc_start + BTRFS_STRIPE_LEN <=
- device->total_bytes)
+ if (fs_info->alloc_start &&
+ fs_info->alloc_start + BTRFS_STRIPE_LEN <=
+ device->total_bytes) {
+ rcu_read_unlock();
skip_space = max(fs_info->alloc_start, skip_space);
- /*
- * btrfs can not use the free space in [0, skip_space - 1],
- * we must subtract it from the total. In order to implement
- * it, we account the used space in this range first.
- */
- ret = btrfs_account_dev_extents_size(device, 0, skip_space - 1,
- &used_space);
- if (ret) {
- kfree(devices_info);
- return ret;
- }
+ /*
+ * btrfs can not use the free space in
+ * [0, skip_space - 1], we must subtract it from the
+ * total. In order to implement it, we account the used
+ * space in this range first.
+ */
+ ret = btrfs_account_dev_extents_size(device, 0,
+ skip_space - 1,
+ &used_space);
+ if (ret) {
+ kfree(devices_info);
+ mutex_unlock(&fs_devices->device_list_mutex);
+ return ret;
+ }
- /* calc the free space in [0, skip_space - 1] */
- skip_space -= used_space;
+ rcu_read_lock();
+
+ /* calc the free space in [0, skip_space - 1] */
+ skip_space -= used_space;
+ }
/*
* we can use the free space in [0, skip_space - 1], subtract
@@ -1725,6 +1759,9 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
i++;
}
+ rcu_read_unlock();
+ if (fs_info->alloc_start)
+ mutex_unlock(&fs_devices->device_list_mutex);
nr_devices = i;
@@ -1787,8 +1824,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* holding chunk_muext to avoid allocating new chunks, holding
* device_list_mutex to avoid the device being removed
*/
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
- mutex_lock(&fs_info->chunk_mutex);
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
@@ -1824,17 +1859,12 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bfree -= block_rsv->size >> bits;
spin_unlock(&block_rsv->lock);
- buf->f_bavail = total_free_data;
+ buf->f_bavail = div_u64(total_free_data, factor);
ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
- if (ret) {
- mutex_unlock(&fs_info->chunk_mutex);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ if (ret)
return ret;
- }
buf->f_bavail += div_u64(total_free_data, factor);
buf->f_bavail = buf->f_bavail >> bits;
- mutex_unlock(&fs_info->chunk_mutex);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
buf->f_type = BTRFS_SUPER_MAGIC;
buf->f_bsize = dentry->d_sb->s_blocksize;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index b2e7bb4393f6..92db3f648df4 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -111,7 +111,6 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info;
struct btrfs_feature_attr *fa = to_btrfs_feature_attr(a);
- struct btrfs_trans_handle *trans;
u64 features, set, clear;
unsigned long val;
int ret;
@@ -153,10 +152,6 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
btrfs_info(fs_info, "%s %s feature flag",
val ? "Setting" : "Clearing", fa->kobj_attr.attr.name);
- trans = btrfs_start_transaction(fs_info->fs_root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
spin_lock(&fs_info->super_lock);
features = get_features(fs_info, fa->feature_set);
if (val)
@@ -166,9 +161,11 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
set_features(fs_info, fa->feature_set, features);
spin_unlock(&fs_info->super_lock);
- ret = btrfs_commit_transaction(trans, fs_info->fs_root);
- if (ret)
- return ret;
+ /*
+ * We don't want to do full transaction commit from inside sysfs
+ */
+ btrfs_set_pending(fs_info, COMMIT);
+ wake_up_process(fs_info->transaction_kthread);
return count;
}
@@ -372,9 +369,6 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
const char *buf, size_t len)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root = fs_info->fs_root;
- int ret;
size_t p_len;
if (fs_info->sb->s_flags & MS_RDONLY)
@@ -389,20 +383,18 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
if (p_len >= BTRFS_LABEL_SIZE)
return -EINVAL;
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
-
- spin_lock(&root->fs_info->super_lock);
+ spin_lock(&fs_info->super_lock);
memset(fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE);
memcpy(fs_info->super_copy->label, buf, p_len);
- spin_unlock(&root->fs_info->super_lock);
- ret = btrfs_commit_transaction(trans, root);
+ spin_unlock(&fs_info->super_lock);
- if (!ret)
- return len;
+ /*
+ * We don't want to do full transaction commit from inside sysfs
+ */
+ btrfs_set_pending(fs_info, COMMIT);
+ wake_up_process(fs_info->transaction_kthread);
- return ret;
+ return len;
}
BTRFS_ATTR_RW(label, btrfs_label_show, btrfs_label_store);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dcaae3616728..a605d4e2f2bc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -76,6 +76,32 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
}
}
+static void clear_btree_io_tree(struct extent_io_tree *tree)
+{
+ spin_lock(&tree->lock);
+ while (!RB_EMPTY_ROOT(&tree->state)) {
+ struct rb_node *node;
+ struct extent_state *state;
+
+ node = rb_first(&tree->state);
+ state = rb_entry(node, struct extent_state, rb_node);
+ rb_erase(&state->rb_node, &tree->state);
+ RB_CLEAR_NODE(&state->rb_node);
+ /*
+ * btree io trees aren't supposed to have tasks waiting for
+ * changes in the flags of extent states ever.
+ */
+ ASSERT(!waitqueue_active(&state->wq));
+ free_extent_state(state);
+ if (need_resched()) {
+ spin_unlock(&tree->lock);
+ cond_resched();
+ spin_lock(&tree->lock);
+ }
+ }
+ spin_unlock(&tree->lock);
+}
+
static noinline void switch_commit_roots(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info)
{
@@ -89,6 +115,7 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans,
root->commit_root = btrfs_root_node(root);
if (is_fstree(root->objectid))
btrfs_unpin_free_ino(root);
+ clear_btree_io_tree(&root->dirty_log_pages);
}
up_write(&fs_info->commit_root_sem);
}
@@ -220,6 +247,7 @@ loop:
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
INIT_LIST_HEAD(&cur_trans->pending_chunks);
INIT_LIST_HEAD(&cur_trans->switch_commits);
+ INIT_LIST_HEAD(&cur_trans->pending_ordered);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
fs_info->btree_inode->i_mapping);
@@ -488,6 +516,7 @@ again:
h->sync = false;
INIT_LIST_HEAD(&h->qgroup_ref_list);
INIT_LIST_HEAD(&h->new_bgs);
+ INIT_LIST_HEAD(&h->ordered);
smp_mb();
if (cur_trans->state >= TRANS_STATE_BLOCKED &&
@@ -719,6 +748,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
+ if (!list_empty(&trans->ordered)) {
+ spin_lock(&info->trans_lock);
+ list_splice(&trans->ordered, &cur_trans->pending_ordered);
+ spin_unlock(&info->trans_lock);
+ }
+
trans->delayed_ref_updates = 0;
if (!trans->sync) {
must_run_delayed_refs =
@@ -828,17 +863,39 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
mark, &cached_state)) {
- convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
- mark, &cached_state, GFP_NOFS);
- cached_state = NULL;
- err = filemap_fdatawrite_range(mapping, start, end);
+ bool wait_writeback = false;
+
+ err = convert_extent_bit(dirty_pages, start, end,
+ EXTENT_NEED_WAIT,
+ mark, &cached_state, GFP_NOFS);
+ /*
+ * convert_extent_bit can return -ENOMEM, which is most of the
+ * time a temporary error. So when it happens, ignore the error
+ * and wait for writeback of this range to finish - because we
+ * failed to set the bit EXTENT_NEED_WAIT for the range, a call
+ * to btrfs_wait_marked_extents() would not know that writeback
+ * for this range started and therefore wouldn't wait for it to
+ * finish - we don't want to commit a superblock that points to
+ * btree nodes/leafs for which writeback hasn't finished yet
+ * (and without errors).
+ * We cleanup any entries left in the io tree when committing
+ * the transaction (through clear_btree_io_tree()).
+ */
+ if (err == -ENOMEM) {
+ err = 0;
+ wait_writeback = true;
+ }
+ if (!err)
+ err = filemap_fdatawrite_range(mapping, start, end);
if (err)
werr = err;
+ else if (wait_writeback)
+ werr = filemap_fdatawait_range(mapping, start, end);
+ free_extent_state(cached_state);
+ cached_state = NULL;
cond_resched();
start = end + 1;
}
- if (err)
- werr = err;
return werr;
}
@@ -862,11 +919,25 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
EXTENT_NEED_WAIT, &cached_state)) {
- clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
- 0, 0, &cached_state, GFP_NOFS);
- err = filemap_fdatawait_range(mapping, start, end);
+ /*
+ * Ignore -ENOMEM errors returned by clear_extent_bit().
+ * When committing the transaction, we'll remove any entries
+ * left in the io tree. For a log commit, we don't remove them
+ * after committing the log because the tree can be accessed
+ * concurrently - we do it only at transaction commit time when
+ * it's safe to do it (through clear_btree_io_tree()).
+ */
+ err = clear_extent_bit(dirty_pages, start, end,
+ EXTENT_NEED_WAIT,
+ 0, 0, &cached_state, GFP_NOFS);
+ if (err == -ENOMEM)
+ err = 0;
+ if (!err)
+ err = filemap_fdatawait_range(mapping, start, end);
if (err)
werr = err;
+ free_extent_state(cached_state);
+ cached_state = NULL;
cond_resched();
start = end + 1;
}
@@ -919,17 +990,17 @@ static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
return 0;
}
-int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
+static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- if (!trans || !trans->transaction) {
- struct inode *btree_inode;
- btree_inode = root->fs_info->btree_inode;
- return filemap_write_and_wait(btree_inode->i_mapping);
- }
- return btrfs_write_and_wait_marked_extents(root,
+ int ret;
+
+ ret = btrfs_write_and_wait_marked_extents(root,
&trans->transaction->dirty_pages,
EXTENT_DIRTY);
+ clear_btree_io_tree(&trans->transaction->dirty_pages);
+
+ return ret;
}
/*
@@ -1652,6 +1723,28 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
btrfs_wait_ordered_roots(fs_info, -1);
}
+static inline void
+btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_ordered_extent *ordered;
+
+ spin_lock(&fs_info->trans_lock);
+ while (!list_empty(&cur_trans->pending_ordered)) {
+ ordered = list_first_entry(&cur_trans->pending_ordered,
+ struct btrfs_ordered_extent,
+ trans_list);
+ list_del_init(&ordered->trans_list);
+ spin_unlock(&fs_info->trans_lock);
+
+ wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
+ &ordered->flags));
+ btrfs_put_ordered_extent(ordered);
+ spin_lock(&fs_info->trans_lock);
+ }
+ spin_unlock(&fs_info->trans_lock);
+}
+
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
@@ -1702,6 +1795,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
}
spin_lock(&root->fs_info->trans_lock);
+ list_splice(&trans->ordered, &cur_trans->pending_ordered);
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
spin_unlock(&root->fs_info->trans_lock);
atomic_inc(&cur_trans->use_count);
@@ -1754,6 +1848,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_wait_delalloc_flush(root->fs_info);
+ btrfs_wait_pending_ordered(cur_trans, root->fs_info);
+
btrfs_scrub_pause(root);
/*
* Ok now we need to make sure to block out any other joins while we
@@ -1842,13 +1938,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
}
/*
- * Since the transaction is done, we should set the inode map cache flag
- * before any other comming transaction.
+ * Since the transaction is done, we can apply the pending changes
+ * before the next transaction.
*/
- if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
- btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
- else
- btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
+ btrfs_apply_pending_changes(root->fs_info);
/* commit_fs_roots gets rid of all the tree log roots, it is now
* safe to free the root of tree log roots
@@ -2019,3 +2112,32 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
return (ret < 0) ? 0 : 1;
}
+
+void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
+{
+ unsigned long prev;
+ unsigned long bit;
+
+ prev = cmpxchg(&fs_info->pending_changes, 0, 0);
+ if (!prev)
+ return;
+
+ bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
+ if (prev & bit)
+ btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
+ prev &= ~bit;
+
+ bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
+ if (prev & bit)
+ btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
+ prev &= ~bit;
+
+ bit = 1 << BTRFS_PENDING_COMMIT;
+ if (prev & bit)
+ btrfs_debug(fs_info, "pending commit done");
+ prev &= ~bit;
+
+ if (prev)
+ btrfs_warn(fs_info,
+ "unknown pending changes left 0x%lx, ignoring", prev);
+}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index d8f40e1a5d2d..00ed29c4b3f9 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -56,6 +56,7 @@ struct btrfs_transaction {
wait_queue_head_t commit_wait;
struct list_head pending_snapshots;
struct list_head pending_chunks;
+ struct list_head pending_ordered;
struct list_head switch_commits;
struct btrfs_delayed_ref_root delayed_refs;
int aborted;
@@ -105,6 +106,7 @@ struct btrfs_trans_handle {
*/
struct btrfs_root *root;
struct seq_list delayed_ref_elem;
+ struct list_head ordered;
struct list_head qgroup_ref_list;
struct list_head new_bgs;
};
@@ -145,8 +147,6 @@ struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
-int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
@@ -170,4 +170,6 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
void btrfs_put_transaction(struct btrfs_transaction *transaction);
+void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
+
#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 286213cec861..9a02da16f2be 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2599,12 +2599,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
index2 = root_log_ctx.log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
- btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
+ mark);
+ btrfs_wait_logged_extents(trans, log, log_transid);
wait_log_commit(trans, log_root_tree,
root_log_ctx.log_transid);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
- ret = root_log_ctx.log_ret;
+ if (!ret)
+ ret = root_log_ctx.log_ret;
goto out;
}
ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
@@ -2641,11 +2643,18 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
- btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
- btrfs_wait_marked_extents(log_root_tree,
- &log_root_tree->dirty_log_pages,
- EXTENT_NEW | EXTENT_DIRTY);
- btrfs_wait_logged_extents(log, log_transid);
+ ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ if (!ret)
+ ret = btrfs_wait_marked_extents(log_root_tree,
+ &log_root_tree->dirty_log_pages,
+ EXTENT_NEW | EXTENT_DIRTY);
+ if (ret) {
+ btrfs_set_log_full_commit(root->fs_info, trans);
+ btrfs_free_logged_extents(log, log_transid);
+ mutex_unlock(&log_root_tree->log_mutex);
+ goto out_wake_log_root;
+ }
+ btrfs_wait_logged_extents(trans, log, log_transid);
btrfs_set_super_log_root(root->fs_info->super_for_commit,
log_root_tree->node->start);
@@ -3626,6 +3635,12 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
+ /*
+ * Clear the AS_EIO/AS_ENOSPC flags from the inode's
+ * i_mapping flags, so that the next fsync won't get
+ * an outdated io error too.
+ */
+ btrfs_inode_check_errors(inode);
*ordered_io_error = true;
break;
}
@@ -3766,7 +3781,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
+ btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
&token);
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
btrfs_set_token_file_extent_type(leaf, fi,
@@ -3963,7 +3978,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
mutex_lock(&BTRFS_I(inode)->log_mutex);
- btrfs_get_logged_extents(inode, &logged_list);
+ btrfs_get_logged_extents(inode, &logged_list, start, end);
/*
* a brute force approach to making sure we get the most uptodate
@@ -4089,6 +4104,21 @@ log_extents:
btrfs_release_path(path);
btrfs_release_path(dst_path);
if (fast_search) {
+ /*
+ * Some ordered extents started by fsync might have completed
+ * before we collected the ordered extents in logged_list, which
+ * means they're gone, not in our logged_list nor in the inode's
+ * ordered tree. We want the application/user space to know an
+ * error happened while attempting to persist file data so that
+ * it can take proper action. If such error happened, we leave
+ * without writing to the log tree and the fsync must report the
+ * file data write error and not commit the current transaction.
+ */
+ err = btrfs_inode_check_errors(inode);
+ if (err) {
+ ctx->io_err = err;
+ goto out_unlock;
+ }
ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
&logged_list, ctx);
if (ret) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d47289c715c8..0144790e296e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -53,16 +53,6 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
-static void lock_chunks(struct btrfs_root *root)
-{
- mutex_lock(&root->fs_info->chunk_mutex);
-}
-
-static void unlock_chunks(struct btrfs_root *root)
-{
- mutex_unlock(&root->fs_info->chunk_mutex);
-}
-
static struct btrfs_fs_devices *__alloc_fs_devices(void)
{
struct btrfs_fs_devices *fs_devs;
@@ -1068,9 +1058,11 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans,
u64 *start, u64 len)
{
struct extent_map *em;
+ struct list_head *search_list = &trans->transaction->pending_chunks;
int ret = 0;
- list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
+again:
+ list_for_each_entry(em, search_list, list) {
struct map_lookup *map;
int i;
@@ -1087,6 +1079,10 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans,
ret = 1;
}
}
+ if (search_list == &trans->transaction->pending_chunks) {
+ search_list = &trans->root->fs_info->pinned_chunks;
+ goto again;
+ }
return ret;
}
@@ -1800,8 +1796,8 @@ error_undo:
goto error_brelse;
}
-void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *srcdev)
+void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *srcdev)
{
struct btrfs_fs_devices *fs_devices;
@@ -1829,6 +1825,12 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
if (srcdev->bdev)
fs_devices->open_devices--;
+}
+
+void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *srcdev)
+{
+ struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
call_rcu(&srcdev->rcu, free_device);
@@ -2647,18 +2649,12 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
}
- ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
+ ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
if (ret) {
btrfs_abort_transaction(trans, extent_root, ret);
goto out;
}
- write_lock(&em_tree->lock);
- remove_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
-
- /* once for the tree */
- free_extent_map(em);
out:
/* once for us */
free_extent_map(em);
@@ -4505,6 +4501,8 @@ error_del_extent:
free_extent_map(em);
/* One for the tree reference */
free_extent_map(em);
+ /* One for the pending_chunks list reference */
+ free_extent_map(em);
error:
kfree(devices_info);
return ret;
@@ -4881,13 +4879,15 @@ static inline int parity_smaller(u64 a, u64 b)
static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
{
struct btrfs_bio_stripe s;
+ int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
int i;
u64 l;
int again = 1;
+ int m;
while (again) {
again = 0;
- for (i = 0; i < bbio->num_stripes - 1; i++) {
+ for (i = 0; i < real_stripes - 1; i++) {
if (parity_smaller(raid_map[i], raid_map[i+1])) {
s = bbio->stripes[i];
l = raid_map[i];
@@ -4895,6 +4895,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
raid_map[i] = raid_map[i+1];
bbio->stripes[i+1] = s;
raid_map[i+1] = l;
+
+ if (bbio->tgtdev_map) {
+ m = bbio->tgtdev_map[i];
+ bbio->tgtdev_map[i] =
+ bbio->tgtdev_map[i + 1];
+ bbio->tgtdev_map[i + 1] = m;
+ }
+
again = 1;
}
}
@@ -4923,6 +4931,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
int ret = 0;
int num_stripes;
int max_errors = 0;
+ int tgtdev_indexes = 0;
struct btrfs_bio *bbio = NULL;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int dev_replace_is_ongoing = 0;
@@ -5161,15 +5170,14 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
BTRFS_BLOCK_GROUP_RAID6)) {
u64 tmp;
- if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
- && raid_map_ret) {
+ if (raid_map_ret &&
+ ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
+ mirror_num > 1)) {
int i, rot;
/* push stripe_nr back to the start of the full stripe */
stripe_nr = raid56_full_stripe_start;
- do_div(stripe_nr, stripe_len);
-
- stripe_index = do_div(stripe_nr, nr_data_stripes(map));
+ do_div(stripe_nr, stripe_len * nr_data_stripes(map));
/* RAID[56] write or recovery. Return all stripes */
num_stripes = map->num_stripes;
@@ -5235,14 +5243,19 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
num_alloc_stripes <<= 1;
if (rw & REQ_GET_READ_MIRRORS)
num_alloc_stripes++;
+ tgtdev_indexes = num_stripes;
}
- bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
+
+ bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes),
+ GFP_NOFS);
if (!bbio) {
kfree(raid_map);
ret = -ENOMEM;
goto out;
}
atomic_set(&bbio->error, 0);
+ if (dev_replace_is_ongoing)
+ bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
if (rw & REQ_DISCARD) {
int factor = 0;
@@ -5327,6 +5340,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
max_errors = btrfs_chunk_max_errors(map);
+ tgtdev_indexes = 0;
if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
dev_replace->tgtdev != NULL) {
int index_where_to_add;
@@ -5355,8 +5369,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
new->physical = old->physical;
new->length = old->length;
new->dev = dev_replace->tgtdev;
+ bbio->tgtdev_map[i] = index_where_to_add;
index_where_to_add++;
max_errors++;
+ tgtdev_indexes++;
}
}
num_stripes = index_where_to_add;
@@ -5402,7 +5418,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
tgtdev_stripe->length =
bbio->stripes[index_srcdev].length;
tgtdev_stripe->dev = dev_replace->tgtdev;
+ bbio->tgtdev_map[index_srcdev] = num_stripes;
+ tgtdev_indexes++;
num_stripes++;
}
}
@@ -5412,6 +5430,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
bbio->num_stripes = num_stripes;
bbio->max_errors = max_errors;
bbio->mirror_num = mirror_num;
+ bbio->num_tgtdevs = tgtdev_indexes;
/*
* this is the case that REQ_READ && dev_replace_is_ongoing &&
@@ -5443,6 +5462,16 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
mirror_num, NULL);
}
+/* For Scrub/replace */
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
+ u64 logical, u64 *length,
+ struct btrfs_bio **bbio_ret, int mirror_num,
+ u64 **raid_map_ret)
+{
+ return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
+ mirror_num, raid_map_ret);
+}
+
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len)
@@ -5812,12 +5841,9 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
} else {
ret = raid56_parity_recover(root, bio, bbio,
raid_map, map_length,
- mirror_num);
+ mirror_num, 1);
}
- /*
- * FIXME, replace dosen't support raid56 yet, please fix
- * it in the future.
- */
+
btrfs_bio_counter_dec(root->fs_info);
return ret;
}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 08980fa23039..d6fe73c0f4a2 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -292,7 +292,7 @@ struct btrfs_bio_stripe {
struct btrfs_bio;
typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
-#define BTRFS_BIO_ORIG_BIO_SUBMITTED 0x1
+#define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0)
struct btrfs_bio {
atomic_t stripes_pending;
@@ -305,6 +305,8 @@ struct btrfs_bio {
int max_errors;
int num_stripes;
int mirror_num;
+ int num_tgtdevs;
+ int *tgtdev_map;
struct btrfs_bio_stripe stripes[];
};
@@ -387,12 +389,18 @@ struct btrfs_balance_control {
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
u64 end, u64 *length);
-#define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \
- (sizeof(struct btrfs_bio_stripe) * (n)))
+#define btrfs_bio_size(total_stripes, real_stripes) \
+ (sizeof(struct btrfs_bio) + \
+ (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \
+ (sizeof(int) * (real_stripes)))
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num);
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
+ u64 logical, u64 *length,
+ struct btrfs_bio **bbio_ret, int mirror_num,
+ u64 **raid_map_ret);
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len);
@@ -448,8 +456,10 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *srcdev);
+void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *srcdev);
+void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *srcdev);
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
struct btrfs_device *tgtdev);
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
@@ -513,4 +523,16 @@ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
struct btrfs_transaction *transaction);
+
+static inline void lock_chunks(struct btrfs_root *root)
+{
+ mutex_lock(&root->fs_info->chunk_mutex);
+}
+
+static inline void unlock_chunks(struct btrfs_root *root)
+{
+ mutex_unlock(&root->fs_info->chunk_mutex);
+}
+
+
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index dcf20131fbe4..47b19465f0dc 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -29,6 +29,7 @@
#include "xattr.h"
#include "disk-io.h"
#include "props.h"
+#include "locking.h"
ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
@@ -91,7 +92,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
- struct btrfs_dir_item *di;
+ struct btrfs_dir_item *di = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
size_t name_len = strlen(name);
@@ -103,84 +104,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ path->skip_release_on_error = 1;
+
+ if (!value) {
+ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
+ name, name_len, -1);
+ if (!di && (flags & XATTR_REPLACE))
+ ret = -ENODATA;
+ else if (di)
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ goto out;
+ }
+ /*
+ * For a replace we can't just do the insert blindly.
+ * Do a lookup first (read-only btrfs_search_slot), and return if xattr
+ * doesn't exist. If it exists, fall down below to the insert/replace
+ * path - we can't race with a concurrent xattr delete, because the VFS
+ * locks the inode's i_mutex before calling setxattr or removexattr.
+ */
if (flags & XATTR_REPLACE) {
- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
- name_len, -1);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- } else if (!di) {
+ ASSERT(mutex_is_locked(&inode->i_mutex));
+ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
+ name, name_len, 0);
+ if (!di) {
ret = -ENODATA;
goto out;
}
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
- if (ret)
- goto out;
btrfs_release_path(path);
+ di = NULL;
+ }
+ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
+ name, name_len, value, size);
+ if (ret == -EOVERFLOW) {
/*
- * remove the attribute
+ * We have an existing item in a leaf, split_leaf couldn't
+ * expand it. That item might have or not a dir_item that
+ * matches our target xattr, so lets check.
*/
- if (!value)
- goto out;
- } else {
- di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
- name, name_len, 0);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
+ ret = 0;
+ btrfs_assert_tree_locked(path->nodes[0]);
+ di = btrfs_match_dir_item_name(root, path, name, name_len);
+ if (!di && !(flags & XATTR_REPLACE)) {
+ ret = -ENOSPC;
goto out;
}
- if (!di && !value)
- goto out;
- btrfs_release_path(path);
+ } else if (ret == -EEXIST) {
+ ret = 0;
+ di = btrfs_match_dir_item_name(root, path, name, name_len);
+ ASSERT(di); /* logic error */
+ } else if (ret) {
+ goto out;
}
-again:
- ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
- name, name_len, value, size);
- /*
- * If we're setting an xattr to a new value but the new value is say
- * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
- * back from split_leaf. This is because it thinks we'll be extending
- * the existing item size, but we're asking for enough space to add the
- * item itself. So if we get EOVERFLOW just set ret to EEXIST and let
- * the rest of the function figure it out.
- */
- if (ret == -EOVERFLOW)
+ if (di && (flags & XATTR_CREATE)) {
ret = -EEXIST;
+ goto out;
+ }
- if (ret == -EEXIST) {
- if (flags & XATTR_CREATE)
- goto out;
+ if (di) {
/*
- * We can't use the path we already have since we won't have the
- * proper locking for a delete, so release the path and
- * re-lookup to delete the thing.
+ * We're doing a replace, and it must be atomic, that is, at
+ * any point in time we have either the old or the new xattr
+ * value in the tree. We don't want readers (getxattr and
+ * listxattrs) to miss a value, this is specially important
+ * for ACLs.
*/
- btrfs_release_path(path);
- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
- name, name_len, -1);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- } else if (!di) {
- /* Shouldn't happen but just in case... */
- btrfs_release_path(path);
- goto again;
+ const int slot = path->slots[0];
+ struct extent_buffer *leaf = path->nodes[0];
+ const u16 old_data_len = btrfs_dir_data_len(leaf, di);
+ const u32 item_size = btrfs_item_size_nr(leaf, slot);
+ const u32 data_size = sizeof(*di) + name_len + size;
+ struct btrfs_item *item;
+ unsigned long data_ptr;
+ char *ptr;
+
+ if (size > old_data_len) {
+ if (btrfs_leaf_free_space(root, leaf) <
+ (size - old_data_len)) {
+ ret = -ENOSPC;
+ goto out;
+ }
}
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
- if (ret)
- goto out;
+ if (old_data_len + name_len + sizeof(*di) == item_size) {
+ /* No other xattrs packed in the same leaf item. */
+ if (size > old_data_len)
+ btrfs_extend_item(root, path,
+ size - old_data_len);
+ else if (size < old_data_len)
+ btrfs_truncate_item(root, path, data_size, 1);
+ } else {
+ /* There are other xattrs packed in the same item. */
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ if (ret)
+ goto out;
+ btrfs_extend_item(root, path, data_size);
+ }
+ item = btrfs_item_nr(slot);
+ ptr = btrfs_item_ptr(leaf, slot, char);
+ ptr += btrfs_item_size(leaf, item) - data_size;
+ di = (struct btrfs_dir_item *)ptr;
+ btrfs_set_dir_data_len(leaf, di, size);
+ data_ptr = ((unsigned long)(di + 1)) + name_len;
+ write_extent_buffer(leaf, value, data_ptr, size);
+ btrfs_mark_buffer_dirty(leaf);
+ } else {
/*
- * We have a value to set, so go back and try to insert it now.
+ * Insert, and we had space for the xattr, so path->slots[0] is
+ * where our xattr dir_item is and btrfs_insert_xattr_item()
+ * filled it.
*/
- if (value) {
- btrfs_release_path(path);
- goto again;
- }
}
out:
btrfs_free_path(path);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 18c06bbaf136..f5013d92a7e6 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -192,17 +192,30 @@ static int readpage_nounlock(struct file *filp, struct page *page)
struct ceph_osd_client *osdc =
&ceph_inode_to_client(inode)->client->osdc;
int err = 0;
+ u64 off = page_offset(page);
u64 len = PAGE_CACHE_SIZE;
- err = ceph_readpage_from_fscache(inode, page);
+ if (off >= i_size_read(inode)) {
+ zero_user_segment(page, err, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ return 0;
+ }
+ /*
+ * Uptodate inline data should have been added into page cache
+ * while getting Fcr caps.
+ */
+ if (ci->i_inline_version != CEPH_INLINE_NONE)
+ return -EINVAL;
+
+ err = ceph_readpage_from_fscache(inode, page);
if (err == 0)
goto out;
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
- (u64) page_offset(page), &len,
+ off, &len,
ci->i_truncate_seq, ci->i_truncate_size,
&page, 1, 0);
if (err == -ENOENT)
@@ -319,7 +332,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
off, len);
vino = ceph_vino(inode);
req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
- 1, CEPH_OSD_OP_READ,
+ 0, 1, CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ, NULL,
ci->i_truncate_seq, ci->i_truncate_size,
false);
@@ -384,6 +397,9 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
int rc = 0;
int max = 0;
+ if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
+ return -EINVAL;
+
rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list,
&nr_pages);
@@ -673,7 +689,7 @@ static int ceph_writepages_start(struct address_space *mapping,
int rc = 0;
unsigned wsize = 1 << inode->i_blkbits;
struct ceph_osd_request *req = NULL;
- int do_sync;
+ int do_sync = 0;
u64 truncate_size, snap_size;
u32 truncate_seq;
@@ -750,7 +766,6 @@ retry:
last_snapc = snapc;
while (!done && index <= end) {
- int num_ops = do_sync ? 2 : 1;
unsigned i;
int first;
pgoff_t next;
@@ -850,7 +865,8 @@ get_more_pages:
len = wsize;
req = ceph_osdc_new_request(&fsc->client->osdc,
&ci->i_layout, vino,
- offset, &len, num_ops,
+ offset, &len, 0,
+ do_sync ? 2 : 1,
CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_WRITE |
CEPH_OSD_FLAG_ONDISK,
@@ -862,6 +878,9 @@ get_more_pages:
break;
}
+ if (do_sync)
+ osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
+
req->r_callback = writepages_finish;
req->r_inode = inode;
@@ -1204,6 +1223,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct inode *inode = file_inode(vma->vm_file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
+ struct page *pinned_page = NULL;
loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT;
int want, got, ret;
@@ -1215,7 +1235,8 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
want = CEPH_CAP_FILE_CACHE;
while (1) {
got = 0;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want,
+ -1, &got, &pinned_page);
if (ret == 0)
break;
if (ret != -ERESTARTSYS) {
@@ -1226,12 +1247,54 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got));
- ret = filemap_fault(vma, vmf);
+ if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
+ ci->i_inline_version == CEPH_INLINE_NONE)
+ ret = filemap_fault(vma, vmf);
+ else
+ ret = -EAGAIN;
dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret);
+ if (pinned_page)
+ page_cache_release(pinned_page);
ceph_put_cap_refs(ci, got);
+ if (ret != -EAGAIN)
+ return ret;
+
+ /* read inline data */
+ if (off >= PAGE_CACHE_SIZE) {
+ /* does not support inline data > PAGE_SIZE */
+ ret = VM_FAULT_SIGBUS;
+ } else {
+ int ret1;
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page = find_or_create_page(mapping, 0,
+ mapping_gfp_mask(mapping) &
+ ~__GFP_FS);
+ if (!page) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+ ret1 = __ceph_do_getattr(inode, page,
+ CEPH_STAT_CAP_INLINE_DATA, true);
+ if (ret1 < 0 || off >= i_size_read(inode)) {
+ unlock_page(page);
+ page_cache_release(page);
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+ if (ret1 < PAGE_CACHE_SIZE)
+ zero_user_segment(page, ret1, PAGE_CACHE_SIZE);
+ else
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ vmf->page = page;
+ ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
+ }
+out:
+ dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
+ inode, off, (size_t)PAGE_CACHE_SIZE, ret);
return ret;
}
@@ -1250,6 +1313,19 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
size_t len;
int want, got, ret;
+ if (ci->i_inline_version != CEPH_INLINE_NONE) {
+ struct page *locked_page = NULL;
+ if (off == 0) {
+ lock_page(page);
+ locked_page = page;
+ }
+ ret = ceph_uninline_data(vma->vm_file, locked_page);
+ if (locked_page)
+ unlock_page(locked_page);
+ if (ret < 0)
+ return VM_FAULT_SIGBUS;
+ }
+
if (off + PAGE_CACHE_SIZE <= size)
len = PAGE_CACHE_SIZE;
else
@@ -1263,7 +1339,8 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
want = CEPH_CAP_FILE_BUFFER;
while (1) {
got = 0;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, off + len);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
+ &got, NULL);
if (ret == 0)
break;
if (ret != -ERESTARTSYS) {
@@ -1297,11 +1374,13 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = VM_FAULT_SIGBUS;
}
out:
- if (ret != VM_FAULT_LOCKED) {
+ if (ret != VM_FAULT_LOCKED)
unlock_page(page);
- } else {
+ if (ret == VM_FAULT_LOCKED ||
+ ci->i_inline_version != CEPH_INLINE_NONE) {
int dirty;
spin_lock(&ci->i_ceph_lock);
+ ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
spin_unlock(&ci->i_ceph_lock);
if (dirty)
@@ -1315,6 +1394,178 @@ out:
return ret;
}
+void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
+ char *data, size_t len)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+
+ if (locked_page) {
+ page = locked_page;
+ } else {
+ if (i_size_read(inode) == 0)
+ return;
+ page = find_or_create_page(mapping, 0,
+ mapping_gfp_mask(mapping) & ~__GFP_FS);
+ if (!page)
+ return;
+ if (PageUptodate(page)) {
+ unlock_page(page);
+ page_cache_release(page);
+ return;
+ }
+ }
+
+ dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n",
+ inode, ceph_vinop(inode), len, locked_page);
+
+ if (len > 0) {
+ void *kaddr = kmap_atomic(page);
+ memcpy(kaddr, data, len);
+ kunmap_atomic(kaddr);
+ }
+
+ if (page != locked_page) {
+ if (len < PAGE_CACHE_SIZE)
+ zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ else
+ flush_dcache_page(page);
+
+ SetPageUptodate(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+}
+
+int ceph_uninline_data(struct file *filp, struct page *locked_page)
+{
+ struct inode *inode = file_inode(filp);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_osd_request *req;
+ struct page *page = NULL;
+ u64 len, inline_version;
+ int err = 0;
+ bool from_pagecache = false;
+
+ spin_lock(&ci->i_ceph_lock);
+ inline_version = ci->i_inline_version;
+ spin_unlock(&ci->i_ceph_lock);
+
+ dout("uninline_data %p %llx.%llx inline_version %llu\n",
+ inode, ceph_vinop(inode), inline_version);
+
+ if (inline_version == 1 || /* initial version, no data */
+ inline_version == CEPH_INLINE_NONE)
+ goto out;
+
+ if (locked_page) {
+ page = locked_page;
+ WARN_ON(!PageUptodate(page));
+ } else if (ceph_caps_issued(ci) &
+ (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
+ page = find_get_page(inode->i_mapping, 0);
+ if (page) {
+ if (PageUptodate(page)) {
+ from_pagecache = true;
+ lock_page(page);
+ } else {
+ page_cache_release(page);
+ page = NULL;
+ }
+ }
+ }
+
+ if (page) {
+ len = i_size_read(inode);
+ if (len > PAGE_CACHE_SIZE)
+ len = PAGE_CACHE_SIZE;
+ } else {
+ page = __page_cache_alloc(GFP_NOFS);
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = __ceph_do_getattr(inode, page,
+ CEPH_STAT_CAP_INLINE_DATA, true);
+ if (err < 0) {
+ /* no inline data */
+ if (err == -ENODATA)
+ err = 0;
+ goto out;
+ }
+ len = err;
+ }
+
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ ceph_vino(inode), 0, &len, 0, 1,
+ CEPH_OSD_OP_CREATE,
+ CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
+ ci->i_snap_realm->cached_context,
+ 0, 0, false);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+
+ ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
+ err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+ if (!err)
+ err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_osdc_put_request(req);
+ if (err < 0)
+ goto out;
+
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ ceph_vino(inode), 0, &len, 1, 3,
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
+ ci->i_snap_realm->cached_context,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ false);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+
+ osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
+
+ err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
+ "inline_version", &inline_version,
+ sizeof(inline_version),
+ CEPH_OSD_CMPXATTR_OP_GT,
+ CEPH_OSD_CMPXATTR_MODE_U64);
+ if (err)
+ goto out_put;
+
+ err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
+ "inline_version", &inline_version,
+ sizeof(inline_version), 0, 0);
+ if (err)
+ goto out_put;
+
+ ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
+ err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+ if (!err)
+ err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+out_put:
+ ceph_osdc_put_request(req);
+ if (err == -ECANCELED)
+ err = 0;
+out:
+ if (page && page != locked_page) {
+ if (from_pagecache) {
+ unlock_page(page);
+ page_cache_release(page);
+ } else
+ __free_pages(page, 0);
+ }
+
+ dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
+ inode, ceph_vinop(inode), inline_version, err);
+ return err;
+}
+
static struct vm_operations_struct ceph_vmops = {
.fault = ceph_filemap_fault,
.page_mkwrite = ceph_page_mkwrite,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index cefca661464b..b93c631c6c87 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -975,10 +975,12 @@ static int send_cap_msg(struct ceph_mds_session *session,
kuid_t uid, kgid_t gid, umode_t mode,
u64 xattr_version,
struct ceph_buffer *xattrs_buf,
- u64 follows)
+ u64 follows, bool inline_data)
{
struct ceph_mds_caps *fc;
struct ceph_msg *msg;
+ void *p;
+ size_t extra_len;
dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
" seq %u/%u mseq %u follows %lld size %llu/%llu"
@@ -988,7 +990,10 @@ static int send_cap_msg(struct ceph_mds_session *session,
seq, issue_seq, mseq, follows, size, max_size,
xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false);
+ /* flock buffer size + inline version + inline data size */
+ extra_len = 4 + 8 + 4;
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
+ GFP_NOFS, false);
if (!msg)
return -ENOMEM;
@@ -1020,6 +1025,14 @@ static int send_cap_msg(struct ceph_mds_session *session,
fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
fc->mode = cpu_to_le32(mode);
+ p = fc + 1;
+ /* flock buffer size */
+ ceph_encode_32(&p, 0);
+ /* inline version */
+ ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
+ /* inline data size */
+ ceph_encode_32(&p, 0);
+
fc->xattr_version = cpu_to_le64(xattr_version);
if (xattrs_buf) {
msg->middle = ceph_buffer_get(xattrs_buf);
@@ -1126,6 +1139,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
u64 flush_tid = 0;
int i;
int ret;
+ bool inline_data;
held = cap->issued | cap->implemented;
revoking = cap->implemented & ~cap->issued;
@@ -1209,13 +1223,15 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
xattr_version = ci->i_xattrs.version;
}
+ inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+
spin_unlock(&ci->i_ceph_lock);
ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
size, max_size, &mtime, &atime, time_warp_seq,
uid, gid, mode, xattr_version, xattr_blob,
- follows);
+ follows, inline_data);
if (ret < 0) {
dout("error sending cap msg, must requeue %p\n", inode);
delayed = 1;
@@ -1336,7 +1352,7 @@ retry:
capsnap->time_warp_seq,
capsnap->uid, capsnap->gid, capsnap->mode,
capsnap->xattr_version, capsnap->xattr_blob,
- capsnap->follows);
+ capsnap->follows, capsnap->inline_data);
next_follows = capsnap->follows + 1;
ceph_put_cap_snap(capsnap);
@@ -2057,15 +2073,17 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
* requested from the MDS.
*/
static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
- int *got, loff_t endoff, int *check_max, int *err)
+ loff_t endoff, int *got, struct page **pinned_page,
+ int *check_max, int *err)
{
struct inode *inode = &ci->vfs_inode;
int ret = 0;
- int have, implemented;
+ int have, implemented, _got = 0;
int file_wanted;
dout("get_cap_refs %p need %s want %s\n", inode,
ceph_cap_string(need), ceph_cap_string(want));
+again:
spin_lock(&ci->i_ceph_lock);
/* make sure file is actually open */
@@ -2075,7 +2093,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
ceph_cap_string(need), ceph_cap_string(file_wanted));
*err = -EBADF;
ret = 1;
- goto out;
+ goto out_unlock;
}
/* finish pending truncate */
@@ -2095,7 +2113,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
*check_max = 1;
ret = 1;
}
- goto out;
+ goto out_unlock;
}
/*
* If a sync write is in progress, we must wait, so that we
@@ -2103,7 +2121,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
*/
if (__ceph_have_pending_cap_snap(ci)) {
dout("get_cap_refs %p cap_snap_pending\n", inode);
- goto out;
+ goto out_unlock;
}
}
@@ -2120,18 +2138,50 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
inode, ceph_cap_string(have), ceph_cap_string(not),
ceph_cap_string(revoking));
if ((revoking & not) == 0) {
- *got = need | (have & want);
- __take_cap_refs(ci, *got);
+ _got = need | (have & want);
+ __take_cap_refs(ci, _got);
ret = 1;
}
} else {
dout("get_cap_refs %p have %s needed %s\n", inode,
ceph_cap_string(have), ceph_cap_string(need));
}
-out:
+out_unlock:
spin_unlock(&ci->i_ceph_lock);
+
+ if (ci->i_inline_version != CEPH_INLINE_NONE &&
+ (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
+ i_size_read(inode) > 0) {
+ int ret1;
+ struct page *page = find_get_page(inode->i_mapping, 0);
+ if (page) {
+ if (PageUptodate(page)) {
+ *pinned_page = page;
+ goto out;
+ }
+ page_cache_release(page);
+ }
+ /*
+ * drop cap refs first because getattr while holding
+ * caps refs can cause deadlock.
+ */
+ ceph_put_cap_refs(ci, _got);
+ _got = 0;
+
+ /* getattr request will bring inline data into page cache */
+ ret1 = __ceph_do_getattr(inode, NULL,
+ CEPH_STAT_CAP_INLINE_DATA, true);
+ if (ret1 >= 0) {
+ ret = 0;
+ goto again;
+ }
+ *err = ret1;
+ ret = 1;
+ }
+out:
dout("get_cap_refs %p ret %d got %s\n", inode,
- ret, ceph_cap_string(*got));
+ ret, ceph_cap_string(_got));
+ *got = _got;
return ret;
}
@@ -2168,8 +2218,8 @@ static void check_max_size(struct inode *inode, loff_t endoff)
* due to a small max_size, make sure we check_max_size (and possibly
* ask the mds) so we don't get hung up indefinitely.
*/
-int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
- loff_t endoff)
+int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+ loff_t endoff, int *got, struct page **pinned_page)
{
int check_max, ret, err;
@@ -2179,8 +2229,8 @@ retry:
check_max = 0;
err = 0;
ret = wait_event_interruptible(ci->i_cap_wq,
- try_get_cap_refs(ci, need, want,
- got, endoff,
+ try_get_cap_refs(ci, need, want, endoff,
+ got, pinned_page,
&check_max, &err));
if (err)
ret = err;
@@ -2383,6 +2433,8 @@ static void invalidate_aliases(struct inode *inode)
static void handle_cap_grant(struct ceph_mds_client *mdsc,
struct inode *inode, struct ceph_mds_caps *grant,
void *snaptrace, int snaptrace_len,
+ u64 inline_version,
+ void *inline_data, int inline_len,
struct ceph_buffer *xattr_buf,
struct ceph_mds_session *session,
struct ceph_cap *cap, int issued)
@@ -2403,6 +2455,7 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
bool queue_invalidate = false;
bool queue_revalidate = false;
bool deleted_inode = false;
+ bool fill_inline = false;
dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
inode, cap, mds, seq, ceph_cap_string(newcaps));
@@ -2576,6 +2629,13 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
}
BUG_ON(cap->issued & ~cap->implemented);
+ if (inline_version > 0 && inline_version >= ci->i_inline_version) {
+ ci->i_inline_version = inline_version;
+ if (ci->i_inline_version != CEPH_INLINE_NONE &&
+ (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
+ fill_inline = true;
+ }
+
spin_unlock(&ci->i_ceph_lock);
if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
@@ -2589,6 +2649,9 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
wake = true;
}
+ if (fill_inline)
+ ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
+
if (queue_trunc) {
ceph_queue_vmtruncate(inode);
ceph_queue_revalidate(inode);
@@ -2996,11 +3059,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
u64 cap_id;
u64 size, max_size;
u64 tid;
+ u64 inline_version = 0;
+ void *inline_data = NULL;
+ u32 inline_len = 0;
void *snaptrace;
size_t snaptrace_len;
- void *flock;
- void *end;
- u32 flock_len;
+ void *p, *end;
dout("handle_caps from mds%d\n", mds);
@@ -3021,30 +3085,37 @@ void ceph_handle_caps(struct ceph_mds_session *session,
snaptrace = h + 1;
snaptrace_len = le32_to_cpu(h->snap_trace_len);
+ p = snaptrace + snaptrace_len;
if (le16_to_cpu(msg->hdr.version) >= 2) {
- void *p = snaptrace + snaptrace_len;
+ u32 flock_len;
ceph_decode_32_safe(&p, end, flock_len, bad);
if (p + flock_len > end)
goto bad;
- flock = p;
- } else {
- flock = NULL;
- flock_len = 0;
+ p += flock_len;
}
if (le16_to_cpu(msg->hdr.version) >= 3) {
if (op == CEPH_CAP_OP_IMPORT) {
- void *p = flock + flock_len;
if (p + sizeof(*peer) > end)
goto bad;
peer = p;
+ p += sizeof(*peer);
} else if (op == CEPH_CAP_OP_EXPORT) {
/* recorded in unused fields */
peer = (void *)&h->size;
}
}
+ if (le16_to_cpu(msg->hdr.version) >= 4) {
+ ceph_decode_64_safe(&p, end, inline_version, bad);
+ ceph_decode_32_safe(&p, end, inline_len, bad);
+ if (p + inline_len > end)
+ goto bad;
+ inline_data = p;
+ p += inline_len;
+ }
+
/* lookup ino */
inode = ceph_find_inode(sb, vino);
ci = ceph_inode(inode);
@@ -3085,6 +3156,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
handle_cap_import(mdsc, inode, h, peer, session,
&cap, &issued);
handle_cap_grant(mdsc, inode, h, snaptrace, snaptrace_len,
+ inline_version, inline_data, inline_len,
msg->middle, session, cap, issued);
goto done_unlocked;
}
@@ -3105,8 +3177,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
case CEPH_CAP_OP_GRANT:
__ceph_caps_issued(ci, &issued);
issued |= __ceph_caps_dirty(ci);
- handle_cap_grant(mdsc, inode, h, NULL, 0, msg->middle,
- session, cap, issued);
+ handle_cap_grant(mdsc, inode, h, NULL, 0,
+ inline_version, inline_data, inline_len,
+ msg->middle, session, cap, issued);
goto done_unlocked;
case CEPH_CAP_OP_FLUSH_ACK:
@@ -3137,8 +3210,7 @@ flush_cap_releases:
done:
mutex_unlock(&session->s_mutex);
done_unlocked:
- if (inode)
- iput(inode);
+ iput(inode);
return;
bad:
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 681a8537b64f..c241603764fd 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -183,7 +183,7 @@ more:
spin_unlock(&parent->d_lock);
/* make sure a dentry wasn't dropped while we didn't have parent lock */
- if (!ceph_dir_is_complete(dir)) {
+ if (!ceph_dir_is_complete_ordered(dir)) {
dout(" lost dir complete on %p; falling back to mds\n", dir);
dput(dentry);
err = -EAGAIN;
@@ -261,10 +261,6 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
/* always start with . and .. */
if (ctx->pos == 0) {
- /* note dir version at start of readdir so we can tell
- * if any dentries get dropped */
- fi->dir_release_count = atomic_read(&ci->i_release_count);
-
dout("readdir off 0 -> '.'\n");
if (!dir_emit(ctx, ".", 1,
ceph_translate_ino(inode->i_sb, inode->i_ino),
@@ -289,7 +285,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
if ((ctx->pos == 2 || fi->dentry) &&
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
- __ceph_dir_is_complete(ci) &&
+ __ceph_dir_is_complete_ordered(ci) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
u32 shared_gen = ci->i_shared_gen;
spin_unlock(&ci->i_ceph_lock);
@@ -312,6 +308,13 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
/* proceed with a normal readdir */
+ if (ctx->pos == 2) {
+ /* note dir version at start of readdir so we can tell
+ * if any dentries get dropped */
+ fi->dir_release_count = atomic_read(&ci->i_release_count);
+ fi->dir_ordered_count = ci->i_ordered_count;
+ }
+
more:
/* do we have the correct frag content buffered? */
if (fi->frag != frag || fi->last_readdir == NULL) {
@@ -446,8 +449,12 @@ more:
*/
spin_lock(&ci->i_ceph_lock);
if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
- dout(" marking %p complete\n", inode);
- __ceph_dir_set_complete(ci, fi->dir_release_count);
+ if (ci->i_ordered_count == fi->dir_ordered_count)
+ dout(" marking %p complete and ordered\n", inode);
+ else
+ dout(" marking %p complete\n", inode);
+ __ceph_dir_set_complete(ci, fi->dir_release_count,
+ fi->dir_ordered_count);
}
spin_unlock(&ci->i_ceph_lock);
@@ -805,7 +812,9 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
acls.pagelist = NULL;
}
err = ceph_mdsc_do_request(mdsc, dir, req);
- if (!err && !req->r_reply_info.head->is_dentry)
+ if (!err &&
+ !req->r_reply_info.head->is_target &&
+ !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
out:
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 9f8e3572040e..ce74b394b49d 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -333,6 +333,11 @@ int ceph_release(struct inode *inode, struct file *file)
return 0;
}
+enum {
+ CHECK_EOF = 1,
+ READ_INLINE = 2,
+};
+
/*
* Read a range of bytes striped over one or more objects. Iterate over
* objects we stripe over. (That's not atomic, but good enough for now.)
@@ -412,7 +417,7 @@ more:
ret = read;
/* did we bounce off eof? */
if (pos + left > inode->i_size)
- *checkeof = 1;
+ *checkeof = CHECK_EOF;
}
dout("striped_read returns %d\n", ret);
@@ -598,7 +603,7 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
snapc = ci->i_snap_realm->cached_context;
vino = ceph_vino(inode);
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
- vino, pos, &len,
+ vino, pos, &len, 0,
2,/*include a 'startsync' command*/
CEPH_OSD_OP_WRITE, flags, snapc,
ci->i_truncate_seq,
@@ -609,6 +614,8 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
break;
}
+ osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
+
n = iov_iter_get_pages_alloc(from, &pages, len, &start);
if (unlikely(n < 0)) {
ret = n;
@@ -713,7 +720,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
snapc = ci->i_snap_realm->cached_context;
vino = ceph_vino(inode);
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
- vino, pos, &len, 1,
+ vino, pos, &len, 0, 1,
CEPH_OSD_OP_WRITE, flags, snapc,
ci->i_truncate_seq,
ci->i_truncate_size,
@@ -803,9 +810,10 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
size_t len = iocb->ki_nbytes;
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct page *pinned_page = NULL;
ssize_t ret;
int want, got = 0;
- int checkeof = 0, read = 0;
+ int retry_op = 0, read = 0;
again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
@@ -815,7 +823,7 @@ again:
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_CACHE;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
if (ret < 0)
return ret;
@@ -827,8 +835,12 @@ again:
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
- /* hmm, this isn't really async... */
- ret = ceph_sync_read(iocb, to, &checkeof);
+ if (ci->i_inline_version == CEPH_INLINE_NONE) {
+ /* hmm, this isn't really async... */
+ ret = ceph_sync_read(iocb, to, &retry_op);
+ } else {
+ retry_op = READ_INLINE;
+ }
} else {
dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
@@ -838,13 +850,55 @@ again:
}
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
+ if (pinned_page) {
+ page_cache_release(pinned_page);
+ pinned_page = NULL;
+ }
ceph_put_cap_refs(ci, got);
+ if (retry_op && ret >= 0) {
+ int statret;
+ struct page *page = NULL;
+ loff_t i_size;
+ if (retry_op == READ_INLINE) {
+ page = __page_cache_alloc(GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+ }
- if (checkeof && ret >= 0) {
- int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
+ statret = __ceph_do_getattr(inode, page,
+ CEPH_STAT_CAP_INLINE_DATA, !!page);
+ if (statret < 0) {
+ __free_page(page);
+ if (statret == -ENODATA) {
+ BUG_ON(retry_op != READ_INLINE);
+ goto again;
+ }
+ return statret;
+ }
+
+ i_size = i_size_read(inode);
+ if (retry_op == READ_INLINE) {
+ /* does not support inline data > PAGE_SIZE */
+ if (i_size > PAGE_CACHE_SIZE) {
+ ret = -EIO;
+ } else if (iocb->ki_pos < i_size) {
+ loff_t end = min_t(loff_t, i_size,
+ iocb->ki_pos + len);
+ if (statret < end)
+ zero_user_segment(page, statret, end);
+ ret = copy_page_to_iter(page,
+ iocb->ki_pos & ~PAGE_MASK,
+ end - iocb->ki_pos, to);
+ iocb->ki_pos += ret;
+ } else {
+ ret = 0;
+ }
+ __free_pages(page, 0);
+ return ret;
+ }
/* hit EOF or hole? */
- if (statret == 0 && iocb->ki_pos < inode->i_size &&
+ if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
ret < len) {
dout("sync_read hit hole, ppos %lld < size %lld"
", reading more\n", iocb->ki_pos,
@@ -852,7 +906,7 @@ again:
read += ret;
len -= ret;
- checkeof = 0;
+ retry_op = 0;
goto again;
}
}
@@ -909,6 +963,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (err)
goto out;
+ if (ci->i_inline_version != CEPH_INLINE_NONE) {
+ err = ceph_uninline_data(file, NULL);
+ if (err < 0)
+ goto out;
+ }
+
retry_snap:
if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
err = -ENOSPC;
@@ -922,7 +982,8 @@ retry_snap:
else
want = CEPH_CAP_FILE_BUFFER;
got = 0;
- err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count);
+ err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
+ &got, NULL);
if (err < 0)
goto out;
@@ -969,6 +1030,7 @@ retry_snap:
if (written >= 0) {
int dirty;
spin_lock(&ci->i_ceph_lock);
+ ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
spin_unlock(&ci->i_ceph_lock);
if (dirty)
@@ -1111,7 +1173,7 @@ static int ceph_zero_partial_object(struct inode *inode,
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
ceph_vino(inode),
offset, length,
- 1, op,
+ 0, 1, op,
CEPH_OSD_FLAG_WRITE |
CEPH_OSD_FLAG_ONDISK,
NULL, 0, 0, false);
@@ -1214,6 +1276,12 @@ static long ceph_fallocate(struct file *file, int mode,
goto unlock;
}
+ if (ci->i_inline_version != CEPH_INLINE_NONE) {
+ ret = ceph_uninline_data(file, NULL);
+ if (ret < 0)
+ goto unlock;
+ }
+
size = i_size_read(inode);
if (!(mode & FALLOC_FL_KEEP_SIZE))
endoff = offset + length;
@@ -1223,7 +1291,7 @@ static long ceph_fallocate(struct file *file, int mode,
else
want = CEPH_CAP_FILE_BUFFER;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
if (ret < 0)
goto unlock;
@@ -1240,6 +1308,7 @@ static long ceph_fallocate(struct file *file, int mode,
if (!ret) {
spin_lock(&ci->i_ceph_lock);
+ ci->i_inline_version = CEPH_INLINE_NONE;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
spin_unlock(&ci->i_ceph_lock);
if (dirty)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index a5593d51d035..f61a74115beb 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -387,8 +387,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
spin_lock_init(&ci->i_ceph_lock);
ci->i_version = 0;
+ ci->i_inline_version = 0;
ci->i_time_warp_seq = 0;
ci->i_ceph_flags = 0;
+ ci->i_ordered_count = 0;
atomic_set(&ci->i_release_count, 1);
atomic_set(&ci->i_complete_count, 0);
ci->i_symlink = NULL;
@@ -657,7 +659,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
* Populate an inode based on info from mds. May be called on new or
* existing inodes.
*/
-static int fill_inode(struct inode *inode,
+static int fill_inode(struct inode *inode, struct page *locked_page,
struct ceph_mds_reply_info_in *iinfo,
struct ceph_mds_reply_dirfrag *dirinfo,
struct ceph_mds_session *session,
@@ -675,6 +677,7 @@ static int fill_inode(struct inode *inode,
bool wake = false;
bool queue_trunc = false;
bool new_version = false;
+ bool fill_inline = false;
dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
inode, ceph_vinop(inode), le64_to_cpu(info->version),
@@ -845,7 +848,8 @@ static int fill_inode(struct inode *inode,
(issued & CEPH_CAP_FILE_EXCL) == 0 &&
!__ceph_dir_is_complete(ci)) {
dout(" marking %p complete (empty)\n", inode);
- __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
+ __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count),
+ ci->i_ordered_count);
}
/* were we issued a capability? */
@@ -873,8 +877,23 @@ static int fill_inode(struct inode *inode,
ceph_vinop(inode));
__ceph_get_fmode(ci, cap_fmode);
}
+
+ if (iinfo->inline_version > 0 &&
+ iinfo->inline_version >= ci->i_inline_version) {
+ int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
+ ci->i_inline_version = iinfo->inline_version;
+ if (ci->i_inline_version != CEPH_INLINE_NONE &&
+ (locked_page ||
+ (le32_to_cpu(info->cap.caps) & cache_caps)))
+ fill_inline = true;
+ }
+
spin_unlock(&ci->i_ceph_lock);
+ if (fill_inline)
+ ceph_fill_inline_data(inode, locked_page,
+ iinfo->inline_data, iinfo->inline_len);
+
if (wake)
wake_up_all(&ci->i_cap_wq);
@@ -1062,7 +1081,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
struct inode *dir = req->r_locked_dir;
if (dir) {
- err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
+ err = fill_inode(dir, NULL,
+ &rinfo->diri, rinfo->dirfrag,
session, req->r_request_started, -1,
&req->r_caps_reservation);
if (err < 0)
@@ -1132,7 +1152,7 @@ retry_lookup:
}
req->r_target_inode = in;
- err = fill_inode(in, &rinfo->targeti, NULL,
+ err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
session, req->r_request_started,
(!req->r_aborted && rinfo->head->result == 0) ?
req->r_fmode : -1,
@@ -1204,8 +1224,8 @@ retry_lookup:
ceph_invalidate_dentry_lease(dn);
/* d_move screws up sibling dentries' offsets */
- ceph_dir_clear_complete(dir);
- ceph_dir_clear_complete(olddir);
+ ceph_dir_clear_ordered(dir);
+ ceph_dir_clear_ordered(olddir);
dout("dn %p gets new offset %lld\n", req->r_old_dentry,
ceph_dentry(req->r_old_dentry)->offset);
@@ -1217,6 +1237,7 @@ retry_lookup:
if (!rinfo->head->is_target) {
dout("fill_trace null dentry\n");
if (dn->d_inode) {
+ ceph_dir_clear_ordered(dir);
dout("d_delete %p\n", dn);
d_delete(dn);
} else {
@@ -1233,7 +1254,7 @@ retry_lookup:
/* attach proper inode */
if (!dn->d_inode) {
- ceph_dir_clear_complete(dir);
+ ceph_dir_clear_ordered(dir);
ihold(in);
dn = splice_dentry(dn, in, &have_lease);
if (IS_ERR(dn)) {
@@ -1263,7 +1284,7 @@ retry_lookup:
BUG_ON(!dir);
BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
dout(" linking snapped dir %p to dn %p\n", in, dn);
- ceph_dir_clear_complete(dir);
+ ceph_dir_clear_ordered(dir);
ihold(in);
dn = splice_dentry(dn, in, NULL);
if (IS_ERR(dn)) {
@@ -1300,7 +1321,7 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
dout("new_inode badness got %d\n", err);
continue;
}
- rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
+ rc = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation);
if (rc < 0) {
@@ -1416,7 +1437,7 @@ retry_lookup:
}
}
- if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
+ if (fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation) < 0) {
pr_err("fill_inode badness on %p\n", in);
@@ -1899,7 +1920,8 @@ out_put:
* Verify that we have a lease on the given mask. If not,
* do a getattr against an mds.
*/
-int ceph_do_getattr(struct inode *inode, int mask, bool force)
+int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
+ int mask, bool force)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1911,7 +1933,8 @@ int ceph_do_getattr(struct inode *inode, int mask, bool force)
return 0;
}
- dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
+ dout("do_getattr inode %p mask %s mode 0%o\n",
+ inode, ceph_cap_string(mask), inode->i_mode);
if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
return 0;
@@ -1922,7 +1945,19 @@ int ceph_do_getattr(struct inode *inode, int mask, bool force)
ihold(inode);
req->r_num_caps = 1;
req->r_args.getattr.mask = cpu_to_le32(mask);
+ req->r_locked_page = locked_page;
err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (locked_page && err == 0) {
+ u64 inline_version = req->r_reply_info.targeti.inline_version;
+ if (inline_version == 0) {
+ /* the reply is supposed to contain inline data */
+ err = -EINVAL;
+ } else if (inline_version == CEPH_INLINE_NONE) {
+ err = -ENODATA;
+ } else {
+ err = req->r_reply_info.targeti.inline_len;
+ }
+ }
ceph_mdsc_put_request(req);
dout("do_getattr result=%d\n", err);
return err;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index fbc39c47bacd..c35c5c614e38 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -9,6 +9,8 @@
#include <linux/ceph/pagelist.h>
static u64 lock_secret;
+static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req);
static inline u64 secure_addr(void *addr)
{
@@ -40,6 +42,9 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
u64 length = 0;
u64 owner;
+ if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
+ wait = 0;
+
req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -68,6 +73,9 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
req->r_args.filelock_change.length = cpu_to_le64(length);
req->r_args.filelock_change.wait = wait;
+ if (wait)
+ req->r_wait_for_completion = ceph_lock_wait_for_completion;
+
err = ceph_mdsc_do_request(mdsc, inode, req);
if (operation == CEPH_MDS_OP_GETFILELOCK) {
@@ -96,6 +104,52 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
return err;
}
+static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+{
+ struct ceph_mds_request *intr_req;
+ struct inode *inode = req->r_inode;
+ int err, lock_type;
+
+ BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
+ if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
+ lock_type = CEPH_LOCK_FCNTL_INTR;
+ else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
+ lock_type = CEPH_LOCK_FLOCK_INTR;
+ else
+ BUG_ON(1);
+ BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);
+
+ err = wait_for_completion_interruptible(&req->r_completion);
+ if (!err)
+ return 0;
+
+ dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
+ req->r_tid);
+
+ intr_req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETFILELOCK,
+ USE_AUTH_MDS);
+ if (IS_ERR(intr_req))
+ return PTR_ERR(intr_req);
+
+ intr_req->r_inode = inode;
+ ihold(inode);
+ intr_req->r_num_caps = 1;
+
+ intr_req->r_args.filelock_change = req->r_args.filelock_change;
+ intr_req->r_args.filelock_change.rule = lock_type;
+ intr_req->r_args.filelock_change.type = CEPH_LOCK_UNLOCK;
+
+ err = ceph_mdsc_do_request(mdsc, inode, intr_req);
+ ceph_mdsc_put_request(intr_req);
+
+ if (err && err != -ERESTARTSYS)
+ return err;
+
+ wait_for_completion(&req->r_completion);
+ return 0;
+}
+
/**
* Attempt to set an fcntl lock.
* For now, this just goes away to the server. Later it may be more awesome.
@@ -143,11 +197,6 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
err);
}
}
-
- } else if (err == -ERESTARTSYS) {
- dout("undoing lock\n");
- ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
- CEPH_LOCK_UNLOCK, 0, fl);
}
return err;
}
@@ -186,11 +235,6 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
file, CEPH_LOCK_UNLOCK, 0, fl);
dout("got %d on flock_lock_file_wait, undid lock", err);
}
- } else if (err == -ERESTARTSYS) {
- dout("undoing lock\n");
- ceph_lock_message(CEPH_LOCK_FLOCK,
- CEPH_MDS_OP_SETFILELOCK,
- file, CEPH_LOCK_UNLOCK, 0, fl);
}
return err;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a92d3f5c6c12..d2171f4a6980 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -89,6 +89,16 @@ static int parse_reply_info_in(void **p, void *end,
ceph_decode_need(p, end, info->xattr_len, bad);
info->xattr_data = *p;
*p += info->xattr_len;
+
+ if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
+ ceph_decode_64_safe(p, end, info->inline_version, bad);
+ ceph_decode_32_safe(p, end, info->inline_len, bad);
+ ceph_decode_need(p, end, info->inline_len, bad);
+ info->inline_data = *p;
+ *p += info->inline_len;
+ } else
+ info->inline_version = CEPH_INLINE_NONE;
+
return 0;
bad:
return err;
@@ -524,8 +534,7 @@ void ceph_mdsc_release_request(struct kref *kref)
}
if (req->r_locked_dir)
ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
- if (req->r_target_inode)
- iput(req->r_target_inode);
+ iput(req->r_target_inode);
if (req->r_dentry)
dput(req->r_dentry);
if (req->r_old_dentry)
@@ -861,8 +870,11 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
/*
* Serialize client metadata into waiting buffer space, using
* the format that userspace expects for map<string, string>
+ *
+ * ClientSession messages with metadata are v2
*/
- msg->hdr.version = 2; /* ClientSession messages with metadata are v2 */
+ msg->hdr.version = cpu_to_le16(2);
+ msg->hdr.compat_version = cpu_to_le16(1);
/* The write pointer, following the session_head structure */
p = msg->front.iov_base + sizeof(*h);
@@ -1066,8 +1078,7 @@ out:
session->s_cap_iterator = NULL;
spin_unlock(&session->s_cap_lock);
- if (last_inode)
- iput(last_inode);
+ iput(last_inode);
if (old_cap)
ceph_put_cap(session->s_mdsc, old_cap);
@@ -1874,7 +1885,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
goto out_free2;
}
- msg->hdr.version = 2;
+ msg->hdr.version = cpu_to_le16(2);
msg->hdr.tid = cpu_to_le64(req->r_tid);
head = msg->front.iov_base;
@@ -2208,6 +2219,8 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
&req->r_completion, req->r_timeout);
if (err == 0)
err = -EIO;
+ } else if (req->r_wait_for_completion) {
+ err = req->r_wait_for_completion(mdsc, req);
} else {
err = wait_for_completion_killable(&req->r_completion);
}
@@ -3744,6 +3757,20 @@ static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
return msg;
}
+static int sign_message(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_auth_handshake *auth = &s->s_auth;
+ return ceph_auth_sign_message(auth, msg);
+}
+
+static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_auth_handshake *auth = &s->s_auth;
+ return ceph_auth_check_message_signature(auth, msg);
+}
+
static const struct ceph_connection_operations mds_con_ops = {
.get = con_get,
.put = con_put,
@@ -3753,6 +3780,8 @@ static const struct ceph_connection_operations mds_con_ops = {
.invalidate_authorizer = invalidate_authorizer,
.peer_reset = peer_reset,
.alloc_msg = mds_alloc_msg,
+ .sign_message = sign_message,
+ .check_message_signature = check_message_signature,
};
/* eof */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 3288359353e9..e2817d00f7d9 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -41,6 +41,9 @@ struct ceph_mds_reply_info_in {
char *symlink;
u32 xattr_len;
char *xattr_data;
+ u64 inline_version;
+ u32 inline_len;
+ char *inline_data;
};
/*
@@ -166,6 +169,11 @@ struct ceph_mds_client;
*/
typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc,
struct ceph_mds_request *req);
+/*
+ * wait for request completion callback
+ */
+typedef int (*ceph_mds_request_wait_callback_t) (struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req);
/*
* an in-flight mds request
@@ -215,6 +223,7 @@ struct ceph_mds_request {
int r_request_release_offset;
struct ceph_msg *r_reply;
struct ceph_mds_reply_info_parsed r_reply_info;
+ struct page *r_locked_page;
int r_err;
bool r_aborted;
@@ -239,6 +248,7 @@ struct ceph_mds_request {
struct completion r_completion;
struct completion r_safe_completion;
ceph_mds_request_callback_t r_callback;
+ ceph_mds_request_wait_callback_t r_wait_for_completion;
struct list_head r_unsafe_item; /* per-session unsafe list item */
bool r_got_unsafe, r_got_safe, r_got_result;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f01645a27752..ce35fbd4ba5d 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -288,6 +288,9 @@ static int cmpu64_rev(const void *a, const void *b)
return 0;
}
+
+static struct ceph_snap_context *empty_snapc;
+
/*
* build the snap context for a given realm.
*/
@@ -328,6 +331,12 @@ static int build_snap_context(struct ceph_snap_realm *realm)
return 0;
}
+ if (num == 0 && realm->seq == empty_snapc->seq) {
+ ceph_get_snap_context(empty_snapc);
+ snapc = empty_snapc;
+ goto done;
+ }
+
/* alloc new snap context */
err = -ENOMEM;
if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
@@ -365,8 +374,8 @@ static int build_snap_context(struct ceph_snap_realm *realm)
realm->ino, realm, snapc, snapc->seq,
(unsigned int) snapc->num_snaps);
- if (realm->cached_context)
- ceph_put_snap_context(realm->cached_context);
+done:
+ ceph_put_snap_context(realm->cached_context);
realm->cached_context = snapc;
return 0;
@@ -466,6 +475,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
cap_snap. lucky us. */
dout("queue_cap_snap %p already pending\n", inode);
kfree(capsnap);
+ } else if (ci->i_snap_realm->cached_context == empty_snapc) {
+ dout("queue_cap_snap %p empty snapc\n", inode);
+ kfree(capsnap);
} else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR)) {
struct ceph_snap_context *snapc = ci->i_head_snapc;
@@ -504,6 +516,8 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
capsnap->xattr_version = 0;
}
+ capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+
/* dirty page count moved from _head to this cap_snap;
all subsequent writes page dirties occur _after_ this
snapshot. */
@@ -590,15 +604,13 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
if (!inode)
continue;
spin_unlock(&realm->inodes_with_caps_lock);
- if (lastinode)
- iput(lastinode);
+ iput(lastinode);
lastinode = inode;
ceph_queue_cap_snap(ci);
spin_lock(&realm->inodes_with_caps_lock);
}
spin_unlock(&realm->inodes_with_caps_lock);
- if (lastinode)
- iput(lastinode);
+ iput(lastinode);
list_for_each_entry(child, &realm->children, child_item) {
dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
@@ -928,5 +940,16 @@ out:
return;
}
+int __init ceph_snap_init(void)
+{
+ empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
+ if (!empty_snapc)
+ return -ENOMEM;
+ empty_snapc->seq = 1;
+ return 0;
+}
-
+void ceph_snap_exit(void)
+{
+ ceph_put_snap_context(empty_snapc);
+}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index f6e12377335c..50f06cddc94b 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -515,7 +515,8 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
struct ceph_fs_client *fsc;
const u64 supported_features =
CEPH_FEATURE_FLOCK |
- CEPH_FEATURE_DIRLAYOUTHASH;
+ CEPH_FEATURE_DIRLAYOUTHASH |
+ CEPH_FEATURE_MDS_INLINE_DATA;
const u64 required_features = 0;
int page_count;
size_t size;
@@ -1017,9 +1018,6 @@ static struct file_system_type ceph_fs_type = {
};
MODULE_ALIAS_FS("ceph");
-#define _STRINGIFY(x) #x
-#define STRINGIFY(x) _STRINGIFY(x)
-
static int __init init_ceph(void)
{
int ret = init_caches();
@@ -1028,15 +1026,20 @@ static int __init init_ceph(void)
ceph_flock_init();
ceph_xattr_init();
+ ret = ceph_snap_init();
+ if (ret)
+ goto out_xattr;
ret = register_filesystem(&ceph_fs_type);
if (ret)
- goto out_icache;
+ goto out_snap;
pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
return 0;
-out_icache:
+out_snap:
+ ceph_snap_exit();
+out_xattr:
ceph_xattr_exit();
destroy_caches();
out:
@@ -1047,6 +1050,7 @@ static void __exit exit_ceph(void)
{
dout("exit_ceph\n");
unregister_filesystem(&ceph_fs_type);
+ ceph_snap_exit();
ceph_xattr_exit();
destroy_caches();
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b82f507979b8..e1aa32d0759d 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -161,6 +161,7 @@ struct ceph_cap_snap {
u64 time_warp_seq;
int writing; /* a sync write is still in progress */
int dirty_pages; /* dirty pages awaiting writeback */
+ bool inline_data;
};
static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
@@ -253,9 +254,11 @@ struct ceph_inode_info {
spinlock_t i_ceph_lock;
u64 i_version;
+ u64 i_inline_version;
u32 i_time_warp_seq;
unsigned i_ceph_flags;
+ int i_ordered_count;
atomic_t i_release_count;
atomic_t i_complete_count;
@@ -434,14 +437,19 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
/*
* Ceph inode.
*/
-#define CEPH_I_NODELAY 4 /* do not delay cap release */
-#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
-#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
+#define CEPH_I_DIR_ORDERED 1 /* dentries in dir are ordered */
+#define CEPH_I_NODELAY 4 /* do not delay cap release */
+#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
+#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
- int release_count)
+ int release_count, int ordered_count)
{
atomic_set(&ci->i_complete_count, release_count);
+ if (ci->i_ordered_count == ordered_count)
+ ci->i_ceph_flags |= CEPH_I_DIR_ORDERED;
+ else
+ ci->i_ceph_flags &= ~CEPH_I_DIR_ORDERED;
}
static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci)
@@ -455,16 +463,35 @@ static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci)
atomic_read(&ci->i_release_count);
}
+static inline bool __ceph_dir_is_complete_ordered(struct ceph_inode_info *ci)
+{
+ return __ceph_dir_is_complete(ci) &&
+ (ci->i_ceph_flags & CEPH_I_DIR_ORDERED);
+}
+
static inline void ceph_dir_clear_complete(struct inode *inode)
{
__ceph_dir_clear_complete(ceph_inode(inode));
}
-static inline bool ceph_dir_is_complete(struct inode *inode)
+static inline void ceph_dir_clear_ordered(struct inode *inode)
{
- return __ceph_dir_is_complete(ceph_inode(inode));
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ordered_count++;
+ ci->i_ceph_flags &= ~CEPH_I_DIR_ORDERED;
+ spin_unlock(&ci->i_ceph_lock);
}
+static inline bool ceph_dir_is_complete_ordered(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool ret;
+ spin_lock(&ci->i_ceph_lock);
+ ret = __ceph_dir_is_complete_ordered(ci);
+ spin_unlock(&ci->i_ceph_lock);
+ return ret;
+}
/* find a specific frag @f */
extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
@@ -580,6 +607,7 @@ struct ceph_file_info {
char *last_name; /* last entry in previous chunk */
struct dentry *dentry; /* next dentry (for dcache readdir) */
int dir_release_count;
+ int dir_ordered_count;
/* used for -o dirstat read() on directory thing */
char *dir_info;
@@ -673,6 +701,8 @@ extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap);
extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
+extern int ceph_snap_init(void);
+extern void ceph_snap_exit(void);
/*
* a cap_snap is "pending" if it is still awaiting an in-progress
@@ -715,7 +745,12 @@ extern void ceph_queue_vmtruncate(struct inode *inode);
extern void ceph_queue_invalidate(struct inode *inode);
extern void ceph_queue_writeback(struct inode *inode);
-extern int ceph_do_getattr(struct inode *inode, int mask, bool force);
+extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
+ int mask, bool force);
+static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
+{
+ return __ceph_do_getattr(inode, NULL, mask, force);
+}
extern int ceph_permission(struct inode *inode, int mask);
extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -830,7 +865,7 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
int mds, int drop, int unless);
extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
- int *got, loff_t endoff);
+ loff_t endoff, int *got, struct page **pinned_page);
/* for counting open files by mode */
static inline void __ceph_get_fmode(struct ceph_inode_info *ci, int mode)
@@ -852,7 +887,9 @@ extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode,
int *opened);
extern int ceph_release(struct inode *inode, struct file *filp);
-
+extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
+ char *data, size_t len);
+int ceph_uninline_data(struct file *filp, struct page *locked_page);
/* dir.c */
extern const struct file_operations ceph_dir_fops;
extern const struct inode_operations ceph_dir_iops;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 678b0d2bbbc4..5a492caf34cb 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -854,7 +854,7 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
struct ceph_pagelist *pagelist = NULL;
int err;
- if (value) {
+ if (size > 0) {
/* copy value into pagelist */
pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
if (!pagelist)
@@ -864,7 +864,7 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
err = ceph_pagelist_append(pagelist, value, size);
if (err)
goto out;
- } else {
+ } else if (!value) {
flags |= CEPH_XATTR_REMOVE;
}
@@ -1001,6 +1001,9 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, size, flags);
+ if (size == 0)
+ value = ""; /* empty EA, do not remove */
+
return __ceph_setxattr(dentry, name, value, size, flags);
}
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 7ff025966e4f..86c893884eb9 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -426,7 +426,6 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
struct coda_file_info *cfi;
struct coda_inode_info *cii;
struct file *host_file;
- struct dentry *de;
struct venus_dirent *vdir;
unsigned long vdir_size = offsetof(struct venus_dirent, d_name);
unsigned int type;
@@ -438,8 +437,7 @@ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
host_file = cfi->cfi_container;
- de = coda_file->f_path.dentry;
- cii = ITOC(de->d_inode);
+ cii = ITOC(file_inode(coda_file));
vdir = kmalloc(sizeof(*vdir), GFP_KERNEL);
if (!vdir) return -ENOMEM;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 8e0f2f410189..517e64938438 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <linux/device.h>
static ssize_t default_read_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -762,3 +763,56 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
EXPORT_SYMBOL_GPL(debugfs_create_regset32);
#endif /* CONFIG_HAS_IOMEM */
+
+struct debugfs_devm_entry {
+ int (*read)(struct seq_file *seq, void *data);
+ struct device *dev;
+};
+
+static int debugfs_devm_entry_open(struct inode *inode, struct file *f)
+{
+ struct debugfs_devm_entry *entry = inode->i_private;
+
+ return single_open(f, entry->read, entry->dev);
+}
+
+static const struct file_operations debugfs_devm_entry_ops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_devm_entry_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+/**
+ * debugfs_create_devm_seqfile - create a debugfs file that is bound to device.
+ *
+ * @dev: device related to this debugfs file.
+ * @name: name of the debugfs file.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @read_fn: function pointer called to print the seq_file content.
+ */
+struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data))
+{
+ struct debugfs_devm_entry *entry;
+
+ if (IS_ERR(parent))
+ return ERR_PTR(-ENOENT);
+
+ entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ entry->read = read_fn;
+ entry->dev = dev;
+
+ return debugfs_create_file(name, S_IRUGO, parent, entry,
+ &debugfs_devm_entry_ops);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_devm_seqfile);
+
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 1de7294aad20..2bc2c87f35e7 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -40,13 +40,14 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
static void drop_slab(void)
{
int nr_objects;
- struct shrink_control shrink = {
- .gfp_mask = GFP_KERNEL,
- };
- nodes_setall(shrink.nodes_to_scan);
do {
- nr_objects = shrink_slab(&shrink, 1000, 1000);
+ int nid;
+
+ nr_objects = 0;
+ for_each_online_node(nid)
+ nr_objects += shrink_node_slabs(GFP_KERNEL, nid,
+ 1000, 1000);
} while (nr_objects > 10);
}
diff --git a/fs/exec.c b/fs/exec.c
index 01aebe300200..ad8798e26be9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -748,18 +748,25 @@ EXPORT_SYMBOL(setup_arg_pages);
#endif /* CONFIG_MMU */
-static struct file *do_open_exec(struct filename *name)
+static struct file *do_open_execat(int fd, struct filename *name, int flags)
{
struct file *file;
int err;
- static const struct open_flags open_exec_flags = {
+ struct open_flags open_exec_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_EXEC | MAY_OPEN,
.intent = LOOKUP_OPEN,
.lookup_flags = LOOKUP_FOLLOW,
};
- file = do_filp_open(AT_FDCWD, name, &open_exec_flags);
+ if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
+ return ERR_PTR(-EINVAL);
+ if (flags & AT_SYMLINK_NOFOLLOW)
+ open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
+ if (flags & AT_EMPTY_PATH)
+ open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
+
+ file = do_filp_open(fd, name, &open_exec_flags);
if (IS_ERR(file))
goto out;
@@ -770,12 +777,13 @@ static struct file *do_open_exec(struct filename *name)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- fsnotify_open(file);
-
err = deny_write_access(file);
if (err)
goto exit;
+ if (name->name[0] != '\0')
+ fsnotify_open(file);
+
out:
return file;
@@ -787,7 +795,7 @@ exit:
struct file *open_exec(const char *name)
{
struct filename tmp = { .name = name };
- return do_open_exec(&tmp);
+ return do_open_execat(AT_FDCWD, &tmp, 0);
}
EXPORT_SYMBOL(open_exec);
@@ -1428,10 +1436,12 @@ static int exec_binprm(struct linux_binprm *bprm)
/*
* sys_execve() executes a new program.
*/
-static int do_execve_common(struct filename *filename,
- struct user_arg_ptr argv,
- struct user_arg_ptr envp)
+static int do_execveat_common(int fd, struct filename *filename,
+ struct user_arg_ptr argv,
+ struct user_arg_ptr envp,
+ int flags)
{
+ char *pathbuf = NULL;
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
@@ -1472,7 +1482,7 @@ static int do_execve_common(struct filename *filename,
check_unsafe_exec(bprm);
current->in_execve = 1;
- file = do_open_exec(filename);
+ file = do_open_execat(fd, filename, flags);
retval = PTR_ERR(file);
if (IS_ERR(file))
goto out_unmark;
@@ -1480,7 +1490,28 @@ static int do_execve_common(struct filename *filename,
sched_exec();
bprm->file = file;
- bprm->filename = bprm->interp = filename->name;
+ if (fd == AT_FDCWD || filename->name[0] == '/') {
+ bprm->filename = filename->name;
+ } else {
+ if (filename->name[0] == '\0')
+ pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d", fd);
+ else
+ pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d/%s",
+ fd, filename->name);
+ if (!pathbuf) {
+ retval = -ENOMEM;
+ goto out_unmark;
+ }
+ /*
+ * Record that a name derived from an O_CLOEXEC fd will be
+ * inaccessible after exec. Relies on having exclusive access to
+ * current->files (due to unshare_files above).
+ */
+ if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
+ bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
+ bprm->filename = pathbuf;
+ }
+ bprm->interp = bprm->filename;
retval = bprm_mm_init(bprm);
if (retval)
@@ -1521,6 +1552,7 @@ static int do_execve_common(struct filename *filename,
acct_update_integrals(current);
task_numa_free(current);
free_bprm(bprm);
+ kfree(pathbuf);
putname(filename);
if (displaced)
put_files_struct(displaced);
@@ -1538,6 +1570,7 @@ out_unmark:
out_free:
free_bprm(bprm);
+ kfree(pathbuf);
out_files:
if (displaced)
@@ -1553,7 +1586,18 @@ int do_execve(struct filename *filename,
{
struct user_arg_ptr argv = { .ptr.native = __argv };
struct user_arg_ptr envp = { .ptr.native = __envp };
- return do_execve_common(filename, argv, envp);
+ return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
+}
+
+int do_execveat(int fd, struct filename *filename,
+ const char __user *const __user *__argv,
+ const char __user *const __user *__envp,
+ int flags)
+{
+ struct user_arg_ptr argv = { .ptr.native = __argv };
+ struct user_arg_ptr envp = { .ptr.native = __envp };
+
+ return do_execveat_common(fd, filename, argv, envp, flags);
}
#ifdef CONFIG_COMPAT
@@ -1569,7 +1613,23 @@ static int compat_do_execve(struct filename *filename,
.is_compat = true,
.ptr.compat = __envp,
};
- return do_execve_common(filename, argv, envp);
+ return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
+}
+
+static int compat_do_execveat(int fd, struct filename *filename,
+ const compat_uptr_t __user *__argv,
+ const compat_uptr_t __user *__envp,
+ int flags)
+{
+ struct user_arg_ptr argv = {
+ .is_compat = true,
+ .ptr.compat = __argv,
+ };
+ struct user_arg_ptr envp = {
+ .is_compat = true,
+ .ptr.compat = __envp,
+ };
+ return do_execveat_common(fd, filename, argv, envp, flags);
}
#endif
@@ -1609,6 +1669,20 @@ SYSCALL_DEFINE3(execve,
{
return do_execve(getname(filename), argv, envp);
}
+
+SYSCALL_DEFINE5(execveat,
+ int, fd, const char __user *, filename,
+ const char __user *const __user *, argv,
+ const char __user *const __user *, envp,
+ int, flags)
+{
+ int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
+
+ return do_execveat(fd,
+ getname_flags(filename, lookup_flags, NULL),
+ argv, envp, flags);
+}
+
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
const compat_uptr_t __user *, argv,
@@ -1616,4 +1690,17 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
{
return compat_do_execve(getname(filename), argv, envp);
}
+
+COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
+ const char __user *, filename,
+ const compat_uptr_t __user *, argv,
+ const compat_uptr_t __user *, envp,
+ int, flags)
+{
+ int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
+
+ return compat_do_execveat(fd,
+ getname_flags(filename, lookup_flags, NULL),
+ argv, envp, flags);
+}
#endif
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index db3f772e57ae..a75fba67bb1f 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -158,17 +158,8 @@ struct ext4_allocation_request {
#define EXT4_MAP_MAPPED (1 << BH_Mapped)
#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
#define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
-/* Sometimes (in the bigalloc case, from ext4_da_get_block_prep) the caller of
- * ext4_map_blocks wants to know whether or not the underlying cluster has
- * already been accounted for. EXT4_MAP_FROM_CLUSTER conveys to the caller that
- * the requested mapping was from previously mapped (or delayed allocated)
- * cluster. We use BH_AllocFromCluster only for this flag. BH_AllocFromCluster
- * should never appear on buffer_head's state flags.
- */
-#define EXT4_MAP_FROM_CLUSTER (1 << BH_AllocFromCluster)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
- EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
- EXT4_MAP_FROM_CLUSTER)
+ EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
struct ext4_map_blocks {
ext4_fsblk_t m_pblk;
@@ -565,10 +556,8 @@ enum {
#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080
/* Do not take i_data_sem locking in ext4_map_blocks */
#define EXT4_GET_BLOCKS_NO_LOCK 0x0100
- /* Do not put hole in extent cache */
-#define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200
/* Convert written extents to unwritten */
-#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0400
+#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0200
/*
* The bit position of these flags must not overlap with any of the
@@ -889,10 +878,12 @@ struct ext4_inode_info {
/* extents status tree */
struct ext4_es_tree i_es_tree;
rwlock_t i_es_lock;
- struct list_head i_es_lru;
+ struct list_head i_es_list;
unsigned int i_es_all_nr; /* protected by i_es_lock */
- unsigned int i_es_lru_nr; /* protected by i_es_lock */
- unsigned long i_touch_when; /* jiffies of last accessing */
+ unsigned int i_es_shk_nr; /* protected by i_es_lock */
+ ext4_lblk_t i_es_shrink_lblk; /* Offset where we start searching for
+ extents to shrink. Protected by
+ i_es_lock */
/* ialloc */
ext4_group_t i_last_alloc_group;
@@ -1337,10 +1328,11 @@ struct ext4_sb_info {
/* Reclaim extents from extent status tree */
struct shrinker s_es_shrinker;
- struct list_head s_es_lru;
+ struct list_head s_es_list; /* List of inodes with reclaimable extents */
+ long s_es_nr_inode;
struct ext4_es_stats s_es_stats;
struct mb_cache *s_mb_cache;
- spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
+ spinlock_t s_es_lock ____cacheline_aligned_in_smp;
/* Ratelimit ext4 messages. */
struct ratelimit_state s_err_ratelimit_state;
@@ -2196,7 +2188,6 @@ extern int ext4_calculate_overhead(struct super_block *sb);
extern void ext4_superblock_csum_set(struct super_block *sb);
extern void *ext4_kvmalloc(size_t size, gfp_t flags);
extern void *ext4_kvzalloc(size_t size, gfp_t flags);
-extern void ext4_kvfree(void *ptr);
extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
extern const char *ext4_decode_error(struct super_block *sb, int errno,
@@ -2647,7 +2638,7 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
int *retval);
extern int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
- int *has_inline);
+ int *has_inline, __u64 start, __u64 len);
extern int ext4_try_to_evict_inline_data(handle_t *handle,
struct inode *inode,
int needed);
@@ -2795,16 +2786,6 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
/*
- * Note that these flags will never ever appear in a buffer_head's state flag.
- * See EXT4_MAP_... to see where this is used.
- */
-enum ext4_state_bits {
- BH_AllocFromCluster /* allocated blocks were part of already
- * allocated cluster. */
- = BH_JBDPrivateStart
-};
-
-/*
* Add new method to test whether block and inode bitmaps are properly
* initialized. With uninit_bg reading the block from disk is not enough
* to mark the bitmap uptodate. We need to also zero-out the bitmap
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0b16fb4c06d3..e5d3eadf47b1 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2306,16 +2306,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ext4_lblk_t block)
{
int depth = ext_depth(inode);
- unsigned long len = 0;
- ext4_lblk_t lblock = 0;
+ ext4_lblk_t len;
+ ext4_lblk_t lblock;
struct ext4_extent *ex;
+ struct extent_status es;
ex = path[depth].p_ext;
if (ex == NULL) {
- /*
- * there is no extent yet, so gap is [0;-] and we
- * don't cache it
- */
+ /* there is no extent yet, so gap is [0;-] */
+ lblock = 0;
+ len = EXT_MAX_BLOCKS;
ext_debug("cache gap(whole file):");
} else if (block < le32_to_cpu(ex->ee_block)) {
lblock = block;
@@ -2324,9 +2324,6 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
block,
le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex));
- if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
- ext4_es_insert_extent(inode, lblock, len, ~0,
- EXTENT_STATUS_HOLE);
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
ext4_lblk_t next;
@@ -2340,14 +2337,19 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
block);
BUG_ON(next == lblock);
len = next - lblock;
- if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
- ext4_es_insert_extent(inode, lblock, len, ~0,
- EXTENT_STATUS_HOLE);
} else {
BUG();
}
- ext_debug(" -> %u:%lu\n", lblock, len);
+ ext4_es_find_delayed_extent_range(inode, lblock, lblock + len - 1, &es);
+ if (es.es_len) {
+ /* There's delayed extent containing lblock? */
+ if (es.es_lblk <= lblock)
+ return;
+ len = min(es.es_lblk - lblock, len);
+ }
+ ext_debug(" -> %u:%u\n", lblock, len);
+ ext4_es_insert_extent(inode, lblock, len, ~0, EXTENT_STATUS_HOLE);
}
/*
@@ -2481,7 +2483,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t from, ext4_lblk_t to)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned short ee_len = ext4_ext_get_actual_len(ex);
+ unsigned short ee_len = ext4_ext_get_actual_len(ex);
ext4_fsblk_t pblk;
int flags = get_default_free_blocks_flags(inode);
@@ -2490,7 +2492,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* at the beginning of the extent. Instead, we make a note
* that we tried freeing the cluster, and check to see if we
* need to free it on a subsequent call to ext4_remove_blocks,
- * or at the end of the ext4_truncate() operation.
+ * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
*/
flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
@@ -2501,8 +2503,8 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* partial cluster here.
*/
pblk = ext4_ext_pblock(ex) + ee_len - 1;
- if ((*partial_cluster > 0) &&
- (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
+ if (*partial_cluster > 0 &&
+ *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(sbi, *partial_cluster),
sbi->s_cluster_ratio, flags);
@@ -2528,7 +2530,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
&& to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
/* tail removal */
ext4_lblk_t num;
- unsigned int unaligned;
+ long long first_cluster;
num = le32_to_cpu(ex->ee_block) + ee_len - from;
pblk = ext4_ext_pblock(ex) + ee_len - num;
@@ -2538,7 +2540,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* used by any other extent (partial_cluster is negative).
*/
if (*partial_cluster < 0 &&
- -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
+ *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
ext_debug("free last %u blocks starting %llu partial %lld\n",
@@ -2549,21 +2551,24 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* beginning of a cluster, and we removed the entire
* extent and the cluster is not used by any other extent,
* save the partial cluster here, since we might need to
- * delete if we determine that the truncate operation has
- * removed all of the blocks in the cluster.
+ * delete if we determine that the truncate or punch hole
+ * operation has removed all of the blocks in the cluster.
+ * If that cluster is used by another extent, preserve its
+ * negative value so it isn't freed later on.
*
- * On the other hand, if we did not manage to free the whole
- * extent, we have to mark the cluster as used (store negative
- * cluster number in partial_cluster).
+ * If the whole extent wasn't freed, we've reached the
+ * start of the truncated/punched region and have finished
+ * removing blocks. If there's a partial cluster here it's
+ * shared with the remainder of the extent and is no longer
+ * a candidate for removal.
*/
- unaligned = EXT4_PBLK_COFF(sbi, pblk);
- if (unaligned && (ee_len == num) &&
- (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
- *partial_cluster = EXT4_B2C(sbi, pblk);
- else if (unaligned)
- *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
- else if (*partial_cluster > 0)
+ if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
+ first_cluster = (long long) EXT4_B2C(sbi, pblk);
+ if (first_cluster != -*partial_cluster)
+ *partial_cluster = first_cluster;
+ } else {
*partial_cluster = 0;
+ }
} else
ext4_error(sbi->s_sb, "strange request: removal(2) "
"%u-%u from %u:%u\n",
@@ -2574,15 +2579,16 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
/*
* ext4_ext_rm_leaf() Removes the extents associated with the
- * blocks appearing between "start" and "end", and splits the extents
- * if "start" and "end" appear in the same extent
+ * blocks appearing between "start" and "end". Both "start"
+ * and "end" must appear in the same extent or EIO is returned.
*
* @handle: The journal handle
* @inode: The files inode
* @path: The path to the leaf
* @partial_cluster: The cluster which we'll have to free if all extents
- * has been released from it. It gets negative in case
- * that the cluster is still used.
+ * has been released from it. However, if this value is
+ * negative, it's a cluster just to the right of the
+ * punched region and it must not be freed.
* @start: The first block to remove
* @end: The last block to remove
*/
@@ -2621,27 +2627,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
- /*
- * If we're starting with an extent other than the last one in the
- * node, we need to see if it shares a cluster with the extent to
- * the right (towards the end of the file). If its leftmost cluster
- * is this extent's rightmost cluster and it is not cluster aligned,
- * we'll mark it as a partial that is not to be deallocated.
- */
-
- if (ex != EXT_LAST_EXTENT(eh)) {
- ext4_fsblk_t current_pblk, right_pblk;
- long long current_cluster, right_cluster;
-
- current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
- current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
- right_pblk = ext4_ext_pblock(ex + 1);
- right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
- if (current_cluster == right_cluster &&
- EXT4_PBLK_COFF(sbi, right_pblk))
- *partial_cluster = -right_cluster;
- }
-
trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
while (ex >= EXT_FIRST_EXTENT(eh) &&
@@ -2666,14 +2651,16 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (end < ex_ee_block) {
/*
* We're going to skip this extent and move to another,
- * so if this extent is not cluster aligned we have
- * to mark the current cluster as used to avoid
- * accidentally freeing it later on
+ * so note that its first cluster is in use to avoid
+ * freeing it when removing blocks. Eventually, the
+ * right edge of the truncated/punched region will
+ * be just to the left.
*/
- pblk = ext4_ext_pblock(ex);
- if (EXT4_PBLK_COFF(sbi, pblk))
+ if (sbi->s_cluster_ratio > 1) {
+ pblk = ext4_ext_pblock(ex);
*partial_cluster =
- -((long long)EXT4_B2C(sbi, pblk));
+ -(long long) EXT4_B2C(sbi, pblk);
+ }
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2749,8 +2736,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
sizeof(struct ext4_extent));
}
le16_add_cpu(&eh->eh_entries, -1);
- } else if (*partial_cluster > 0)
- *partial_cluster = 0;
+ }
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
@@ -2769,20 +2755,18 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
/*
* If there's a partial cluster and at least one extent remains in
* the leaf, free the partial cluster if it isn't shared with the
- * current extent. If there's a partial cluster and no extents
- * remain in the leaf, it can't be freed here. It can only be
- * freed when it's possible to determine if it's not shared with
- * any other extent - when the next leaf is processed or when space
- * removal is complete.
+ * current extent. If it is shared with the current extent
+ * we zero partial_cluster because we've reached the start of the
+ * truncated/punched region and we're done removing blocks.
*/
- if (*partial_cluster > 0 && eh->eh_entries &&
- (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
- *partial_cluster)) {
- int flags = get_default_free_blocks_flags(inode);
-
- ext4_free_blocks(handle, inode, NULL,
- EXT4_C2B(sbi, *partial_cluster),
- sbi->s_cluster_ratio, flags);
+ if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
+ pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
+ if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
+ ext4_free_blocks(handle, inode, NULL,
+ EXT4_C2B(sbi, *partial_cluster),
+ sbi->s_cluster_ratio,
+ get_default_free_blocks_flags(inode));
+ }
*partial_cluster = 0;
}
@@ -2819,7 +2803,7 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
ext4_lblk_t end)
{
- struct super_block *sb = inode->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int depth = ext_depth(inode);
struct ext4_ext_path *path = NULL;
long long partial_cluster = 0;
@@ -2845,9 +2829,10 @@ again:
*/
if (end < EXT_MAX_BLOCKS - 1) {
struct ext4_extent *ex;
- ext4_lblk_t ee_block;
+ ext4_lblk_t ee_block, ex_end, lblk;
+ ext4_fsblk_t pblk;
- /* find extent for this block */
+ /* find extent for or closest extent to this block */
path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
@@ -2867,6 +2852,7 @@ again:
}
ee_block = le32_to_cpu(ex->ee_block);
+ ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
/*
* See if the last block is inside the extent, if so split
@@ -2874,8 +2860,19 @@ again:
* tail of the first part of the split extent in
* ext4_ext_rm_leaf().
*/
- if (end >= ee_block &&
- end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
+ if (end >= ee_block && end < ex_end) {
+
+ /*
+ * If we're going to split the extent, note that
+ * the cluster containing the block after 'end' is
+ * in use to avoid freeing it when removing blocks.
+ */
+ if (sbi->s_cluster_ratio > 1) {
+ pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
+ partial_cluster =
+ -(long long) EXT4_B2C(sbi, pblk);
+ }
+
/*
* Split the extent in two so that 'end' is the last
* block in the first new extent. Also we should not
@@ -2886,6 +2883,24 @@ again:
end + 1, 1);
if (err < 0)
goto out;
+
+ } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
+ /*
+ * If there's an extent to the right its first cluster
+ * contains the immediate right boundary of the
+ * truncated/punched region. Set partial_cluster to
+ * its negative value so it won't be freed if shared
+ * with the current extent. The end < ee_block case
+ * is handled in ext4_ext_rm_leaf().
+ */
+ lblk = ex_end + 1;
+ err = ext4_ext_search_right(inode, path, &lblk, &pblk,
+ &ex);
+ if (err)
+ goto out;
+ if (pblk)
+ partial_cluster =
+ -(long long) EXT4_B2C(sbi, pblk);
}
}
/*
@@ -2996,16 +3011,18 @@ again:
trace_ext4_ext_remove_space_done(inode, start, end, depth,
partial_cluster, path->p_hdr->eh_entries);
- /* If we still have something in the partial cluster and we have removed
+ /*
+ * If we still have something in the partial cluster and we have removed
* even the first extent, then we should free the blocks in the partial
- * cluster as well. */
- if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
- int flags = get_default_free_blocks_flags(inode);
-
+ * cluster as well. (This code will only run when there are no leaves
+ * to the immediate left of the truncated/punched region.)
+ */
+ if (partial_cluster > 0 && err == 0) {
+ /* don't zero partial_cluster since it's not used afterwards */
ext4_free_blocks(handle, inode, NULL,
- EXT4_C2B(EXT4_SB(sb), partial_cluster),
- EXT4_SB(sb)->s_cluster_ratio, flags);
- partial_cluster = 0;
+ EXT4_C2B(sbi, partial_cluster),
+ sbi->s_cluster_ratio,
+ get_default_free_blocks_flags(inode));
}
/* TODO: flexible tree reduction should be here */
@@ -4267,6 +4284,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ext4_io_end_t *io = ext4_inode_aio(inode);
ext4_lblk_t cluster_offset;
int set_unwritten = 0;
+ bool map_from_cluster = false;
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
@@ -4343,10 +4361,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
}
}
- if ((sbi->s_cluster_ratio > 1) &&
- ext4_find_delalloc_cluster(inode, map->m_lblk))
- map->m_flags |= EXT4_MAP_FROM_CLUSTER;
-
/*
* requested block isn't allocated yet;
* we couldn't try to create block if create flag is zero
@@ -4356,15 +4370,13 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
* put just found gap into cache to speed up
* subsequent requests
*/
- if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
- ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
+ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
goto out2;
}
/*
* Okay, we need to do block allocation.
*/
- map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
newex.ee_block = cpu_to_le32(map->m_lblk);
cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
@@ -4376,7 +4388,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
- map->m_flags |= EXT4_MAP_FROM_CLUSTER;
+ map_from_cluster = true;
goto got_allocated_blocks;
}
@@ -4397,7 +4409,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
- map->m_flags |= EXT4_MAP_FROM_CLUSTER;
+ map_from_cluster = true;
goto got_allocated_blocks;
}
@@ -4523,7 +4535,7 @@ got_allocated_blocks:
*/
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, allocated);
- if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
+ if (map_from_cluster) {
if (reserved_clusters) {
/*
* We have clusters reserved for this range.
@@ -4620,7 +4632,6 @@ out2:
trace_ext4_ext_map_blocks_exit(inode, flags, map,
err ? err : allocated);
- ext4_es_lru_add(inode);
return err ? err : allocated;
}
@@ -5140,7 +5151,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (ext4_has_inline_data(inode)) {
int has_inline = 1;
- error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
+ error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
+ start, len);
if (has_inline)
return error;
@@ -5154,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
/* fallback to generic here if not in extents fmt */
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return generic_block_fiemap(inode, fieinfo, start, len,
- ext4_get_block);
+ return __generic_block_fiemap(inode, fieinfo, start, len,
+ ext4_get_block);
if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
return -EBADR;
@@ -5179,7 +5191,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
error = ext4_fill_fiemap_extents(inode, start_blk,
len_blks, fieinfo);
}
- ext4_es_lru_add(inode);
return error;
}
@@ -5239,8 +5250,6 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
return -EIO;
ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
- if (!ex_last)
- return -EIO;
err = ext4_access_path(handle, inode, path + depth);
if (err)
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 94e7855ae71b..e04d45733976 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -147,10 +147,9 @@ static struct kmem_cache *ext4_es_cachep;
static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end);
-static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
- int nr_to_scan);
-static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
- struct ext4_inode_info *locked_ei);
+static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ struct ext4_inode_info *locked_ei);
int __init ext4_init_es(void)
{
@@ -298,6 +297,36 @@ out:
trace_ext4_es_find_delayed_extent_range_exit(inode, es);
}
+static void ext4_es_list_add(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+ if (!list_empty(&ei->i_es_list))
+ return;
+
+ spin_lock(&sbi->s_es_lock);
+ if (list_empty(&ei->i_es_list)) {
+ list_add_tail(&ei->i_es_list, &sbi->s_es_list);
+ sbi->s_es_nr_inode++;
+ }
+ spin_unlock(&sbi->s_es_lock);
+}
+
+static void ext4_es_list_del(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+ spin_lock(&sbi->s_es_lock);
+ if (!list_empty(&ei->i_es_list)) {
+ list_del_init(&ei->i_es_list);
+ sbi->s_es_nr_inode--;
+ WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
+ }
+ spin_unlock(&sbi->s_es_lock);
+}
+
static struct extent_status *
ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
ext4_fsblk_t pblk)
@@ -314,9 +343,10 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
* We don't count delayed extent because we never try to reclaim them
*/
if (!ext4_es_is_delayed(es)) {
- EXT4_I(inode)->i_es_lru_nr++;
+ if (!EXT4_I(inode)->i_es_shk_nr++)
+ ext4_es_list_add(inode);
percpu_counter_inc(&EXT4_SB(inode->i_sb)->
- s_es_stats.es_stats_lru_cnt);
+ s_es_stats.es_stats_shk_cnt);
}
EXT4_I(inode)->i_es_all_nr++;
@@ -330,12 +360,13 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
EXT4_I(inode)->i_es_all_nr--;
percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
- /* Decrease the lru counter when this es is not delayed */
+ /* Decrease the shrink counter when this es is not delayed */
if (!ext4_es_is_delayed(es)) {
- BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
- EXT4_I(inode)->i_es_lru_nr--;
+ BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
+ if (!--EXT4_I(inode)->i_es_shk_nr)
+ ext4_es_list_del(inode);
percpu_counter_dec(&EXT4_SB(inode->i_sb)->
- s_es_stats.es_stats_lru_cnt);
+ s_es_stats.es_stats_shk_cnt);
}
kmem_cache_free(ext4_es_cachep, es);
@@ -351,7 +382,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
static int ext4_es_can_be_merged(struct extent_status *es1,
struct extent_status *es2)
{
- if (ext4_es_status(es1) != ext4_es_status(es2))
+ if (ext4_es_type(es1) != ext4_es_type(es2))
return 0;
if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
@@ -394,6 +425,8 @@ ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
es1 = rb_entry(node, struct extent_status, rb_node);
if (ext4_es_can_be_merged(es1, es)) {
es1->es_len += es->es_len;
+ if (ext4_es_is_referenced(es))
+ ext4_es_set_referenced(es1);
rb_erase(&es->rb_node, &tree->root);
ext4_es_free_extent(inode, es);
es = es1;
@@ -416,6 +449,8 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
es1 = rb_entry(node, struct extent_status, rb_node);
if (ext4_es_can_be_merged(es, es1)) {
es->es_len += es1->es_len;
+ if (ext4_es_is_referenced(es1))
+ ext4_es_set_referenced(es);
rb_erase(node, &tree->root);
ext4_es_free_extent(inode, es1);
}
@@ -683,8 +718,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
goto error;
retry:
err = __es_insert_extent(inode, &newes);
- if (err == -ENOMEM && __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
- EXT4_I(inode)))
+ if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+ 128, EXT4_I(inode)))
goto retry;
if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
err = 0;
@@ -782,6 +817,8 @@ out:
es->es_lblk = es1->es_lblk;
es->es_len = es1->es_len;
es->es_pblk = es1->es_pblk;
+ if (!ext4_es_is_referenced(es))
+ ext4_es_set_referenced(es);
stats->es_stats_cache_hits++;
} else {
stats->es_stats_cache_misses++;
@@ -841,8 +878,8 @@ retry:
es->es_lblk = orig_es.es_lblk;
es->es_len = orig_es.es_len;
if ((err == -ENOMEM) &&
- __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
- EXT4_I(inode)))
+ __es_shrink(EXT4_SB(inode->i_sb),
+ 128, EXT4_I(inode)))
goto retry;
goto out;
}
@@ -914,6 +951,11 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
end = lblk + len - 1;
BUG_ON(end < lblk);
+ /*
+ * ext4_clear_inode() depends on us taking i_es_lock unconditionally
+ * so that we are sure __es_shrink() is done with the inode before it
+ * is reclaimed.
+ */
write_lock(&EXT4_I(inode)->i_es_lock);
err = __es_remove_extent(inode, lblk, end);
write_unlock(&EXT4_I(inode)->i_es_lock);
@@ -921,114 +963,75 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
return err;
}
-static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
- struct list_head *b)
-{
- struct ext4_inode_info *eia, *eib;
- eia = list_entry(a, struct ext4_inode_info, i_es_lru);
- eib = list_entry(b, struct ext4_inode_info, i_es_lru);
-
- if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
- !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
- return 1;
- if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
- ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
- return -1;
- if (eia->i_touch_when == eib->i_touch_when)
- return 0;
- if (time_after(eia->i_touch_when, eib->i_touch_when))
- return 1;
- else
- return -1;
-}
-
-static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
- struct ext4_inode_info *locked_ei)
+static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ struct ext4_inode_info *locked_ei)
{
struct ext4_inode_info *ei;
struct ext4_es_stats *es_stats;
- struct list_head *cur, *tmp;
- LIST_HEAD(skipped);
ktime_t start_time;
u64 scan_time;
+ int nr_to_walk;
int nr_shrunk = 0;
- int retried = 0, skip_precached = 1, nr_skipped = 0;
+ int retried = 0, nr_skipped = 0;
es_stats = &sbi->s_es_stats;
start_time = ktime_get();
- spin_lock(&sbi->s_es_lru_lock);
retry:
- list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
- int shrunk;
-
- /*
- * If we have already reclaimed all extents from extent
- * status tree, just stop the loop immediately.
- */
- if (percpu_counter_read_positive(
- &es_stats->es_stats_lru_cnt) == 0)
- break;
-
- ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
+ spin_lock(&sbi->s_es_lock);
+ nr_to_walk = sbi->s_es_nr_inode;
+ while (nr_to_walk-- > 0) {
+ if (list_empty(&sbi->s_es_list)) {
+ spin_unlock(&sbi->s_es_lock);
+ goto out;
+ }
+ ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
+ i_es_list);
+ /* Move the inode to the tail */
+ list_move_tail(&ei->i_es_list, &sbi->s_es_list);
/*
- * Skip the inode that is newer than the last_sorted
- * time. Normally we try hard to avoid shrinking
- * precached inodes, but we will as a last resort.
+ * Normally we try hard to avoid shrinking precached inodes,
+ * but we will as a last resort.
*/
- if ((es_stats->es_stats_last_sorted < ei->i_touch_when) ||
- (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
- EXT4_STATE_EXT_PRECACHED))) {
+ if (!retried && ext4_test_inode_state(&ei->vfs_inode,
+ EXT4_STATE_EXT_PRECACHED)) {
nr_skipped++;
- list_move_tail(cur, &skipped);
continue;
}
- if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
- !write_trylock(&ei->i_es_lock))
+ if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
+ nr_skipped++;
continue;
+ }
+ /*
+ * Now we hold i_es_lock which protects us from inode reclaim
+ * freeing inode under us
+ */
+ spin_unlock(&sbi->s_es_lock);
- shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
- if (ei->i_es_lru_nr == 0)
- list_del_init(&ei->i_es_lru);
+ nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
write_unlock(&ei->i_es_lock);
- nr_shrunk += shrunk;
- nr_to_scan -= shrunk;
- if (nr_to_scan == 0)
- break;
+ if (nr_to_scan <= 0)
+ goto out;
+ spin_lock(&sbi->s_es_lock);
}
-
- /* Move the newer inodes into the tail of the LRU list. */
- list_splice_tail(&skipped, &sbi->s_es_lru);
- INIT_LIST_HEAD(&skipped);
+ spin_unlock(&sbi->s_es_lock);
/*
* If we skipped any inodes, and we weren't able to make any
- * forward progress, sort the list and try again.
+ * forward progress, try again to scan precached inodes.
*/
if ((nr_shrunk == 0) && nr_skipped && !retried) {
retried++;
- list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
- es_stats->es_stats_last_sorted = jiffies;
- ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
- i_es_lru);
- /*
- * If there are no non-precached inodes left on the
- * list, start releasing precached extents.
- */
- if (ext4_test_inode_state(&ei->vfs_inode,
- EXT4_STATE_EXT_PRECACHED))
- skip_precached = 0;
goto retry;
}
- spin_unlock(&sbi->s_es_lru_lock);
-
if (locked_ei && nr_shrunk == 0)
- nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
+ nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
+out:
scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
if (likely(es_stats->es_stats_scan_time))
es_stats->es_stats_scan_time = (scan_time +
@@ -1043,7 +1046,7 @@ retry:
else
es_stats->es_stats_shrunk = nr_shrunk;
- trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, skip_precached,
+ trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
nr_skipped, retried);
return nr_shrunk;
}
@@ -1055,7 +1058,7 @@ static unsigned long ext4_es_count(struct shrinker *shrink,
struct ext4_sb_info *sbi;
sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
- nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
+ nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
return nr;
}
@@ -1068,13 +1071,13 @@ static unsigned long ext4_es_scan(struct shrinker *shrink,
int nr_to_scan = sc->nr_to_scan;
int ret, nr_shrunk;
- ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
+ ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
if (!nr_to_scan)
return ret;
- nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
+ nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
return nr_shrunk;
@@ -1102,28 +1105,24 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
return 0;
/* here we just find an inode that has the max nr. of objects */
- spin_lock(&sbi->s_es_lru_lock);
- list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) {
+ spin_lock(&sbi->s_es_lock);
+ list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
inode_cnt++;
if (max && max->i_es_all_nr < ei->i_es_all_nr)
max = ei;
else if (!max)
max = ei;
}
- spin_unlock(&sbi->s_es_lru_lock);
+ spin_unlock(&sbi->s_es_lock);
seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
- percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt));
+ percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
seq_printf(seq, " %lu/%lu cache hits/misses\n",
es_stats->es_stats_cache_hits,
es_stats->es_stats_cache_misses);
- if (es_stats->es_stats_last_sorted != 0)
- seq_printf(seq, " %u ms last sorted interval\n",
- jiffies_to_msecs(jiffies -
- es_stats->es_stats_last_sorted));
if (inode_cnt)
- seq_printf(seq, " %d inodes on lru list\n", inode_cnt);
+ seq_printf(seq, " %d inodes on list\n", inode_cnt);
seq_printf(seq, "average:\n %llu us scan time\n",
div_u64(es_stats->es_stats_scan_time, 1000));
@@ -1132,7 +1131,7 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
seq_printf(seq,
"maximum:\n %lu inode (%u objects, %u reclaimable)\n"
" %llu us max scan time\n",
- max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr,
+ max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
div_u64(es_stats->es_stats_max_scan_time, 1000));
return 0;
@@ -1181,9 +1180,11 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
{
int err;
- INIT_LIST_HEAD(&sbi->s_es_lru);
- spin_lock_init(&sbi->s_es_lru_lock);
- sbi->s_es_stats.es_stats_last_sorted = 0;
+ /* Make sure we have enough bits for physical block number */
+ BUILD_BUG_ON(ES_SHIFT < 48);
+ INIT_LIST_HEAD(&sbi->s_es_list);
+ sbi->s_es_nr_inode = 0;
+ spin_lock_init(&sbi->s_es_lock);
sbi->s_es_stats.es_stats_shrunk = 0;
sbi->s_es_stats.es_stats_cache_hits = 0;
sbi->s_es_stats.es_stats_cache_misses = 0;
@@ -1192,7 +1193,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
if (err)
return err;
- err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt, 0, GFP_KERNEL);
+ err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
if (err)
goto err1;
@@ -1210,7 +1211,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
return 0;
err2:
- percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
err1:
percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
return err;
@@ -1221,71 +1222,83 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
if (sbi->s_proc)
remove_proc_entry("es_shrinker_info", sbi->s_proc);
percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
- percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
+ percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
unregister_shrinker(&sbi->s_es_shrinker);
}
-void ext4_es_lru_add(struct inode *inode)
+/*
+ * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
+ * most *nr_to_scan extents, update *nr_to_scan accordingly.
+ *
+ * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
+ * Increment *nr_shrunk by the number of reclaimed extents. Also update
+ * ei->i_es_shrink_lblk to where we should continue scanning.
+ */
+static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
+ int *nr_to_scan, int *nr_shrunk)
{
- struct ext4_inode_info *ei = EXT4_I(inode);
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-
- ei->i_touch_when = jiffies;
-
- if (!list_empty(&ei->i_es_lru))
- return;
+ struct inode *inode = &ei->vfs_inode;
+ struct ext4_es_tree *tree = &ei->i_es_tree;
+ struct extent_status *es;
+ struct rb_node *node;
- spin_lock(&sbi->s_es_lru_lock);
- if (list_empty(&ei->i_es_lru))
- list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
- spin_unlock(&sbi->s_es_lru_lock);
-}
+ es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
+ if (!es)
+ goto out_wrap;
+ node = &es->rb_node;
+ while (*nr_to_scan > 0) {
+ if (es->es_lblk > end) {
+ ei->i_es_shrink_lblk = end + 1;
+ return 0;
+ }
-void ext4_es_lru_del(struct inode *inode)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ (*nr_to_scan)--;
+ node = rb_next(&es->rb_node);
+ /*
+ * We can't reclaim delayed extent from status tree because
+ * fiemap, bigallic, and seek_data/hole need to use it.
+ */
+ if (ext4_es_is_delayed(es))
+ goto next;
+ if (ext4_es_is_referenced(es)) {
+ ext4_es_clear_referenced(es);
+ goto next;
+ }
- spin_lock(&sbi->s_es_lru_lock);
- if (!list_empty(&ei->i_es_lru))
- list_del_init(&ei->i_es_lru);
- spin_unlock(&sbi->s_es_lru_lock);
+ rb_erase(&es->rb_node, &tree->root);
+ ext4_es_free_extent(inode, es);
+ (*nr_shrunk)++;
+next:
+ if (!node)
+ goto out_wrap;
+ es = rb_entry(node, struct extent_status, rb_node);
+ }
+ ei->i_es_shrink_lblk = es->es_lblk;
+ return 1;
+out_wrap:
+ ei->i_es_shrink_lblk = 0;
+ return 0;
}
-static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
- int nr_to_scan)
+static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
{
struct inode *inode = &ei->vfs_inode;
- struct ext4_es_tree *tree = &ei->i_es_tree;
- struct rb_node *node;
- struct extent_status *es;
- unsigned long nr_shrunk = 0;
+ int nr_shrunk = 0;
+ ext4_lblk_t start = ei->i_es_shrink_lblk;
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- if (ei->i_es_lru_nr == 0)
+ if (ei->i_es_shk_nr == 0)
return 0;
if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
__ratelimit(&_rs))
ext4_warning(inode->i_sb, "forced shrink of precached extents");
- node = rb_first(&tree->root);
- while (node != NULL) {
- es = rb_entry(node, struct extent_status, rb_node);
- node = rb_next(&es->rb_node);
- /*
- * We can't reclaim delayed extent from status tree because
- * fiemap, bigallic, and seek_data/hole need to use it.
- */
- if (!ext4_es_is_delayed(es)) {
- rb_erase(&es->rb_node, &tree->root);
- ext4_es_free_extent(inode, es);
- nr_shrunk++;
- if (--nr_to_scan == 0)
- break;
- }
- }
- tree->cache_es = NULL;
+ if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
+ start != 0)
+ es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
+
+ ei->i_es_tree.cache_es = NULL;
return nr_shrunk;
}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index efd5f970b501..691b52613ce4 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -29,25 +29,28 @@
/*
* These flags live in the high bits of extent_status.es_pblk
*/
-#define ES_SHIFT 60
-
-#define EXTENT_STATUS_WRITTEN (1 << 3)
-#define EXTENT_STATUS_UNWRITTEN (1 << 2)
-#define EXTENT_STATUS_DELAYED (1 << 1)
-#define EXTENT_STATUS_HOLE (1 << 0)
+enum {
+ ES_WRITTEN_B,
+ ES_UNWRITTEN_B,
+ ES_DELAYED_B,
+ ES_HOLE_B,
+ ES_REFERENCED_B,
+ ES_FLAGS
+};
-#define EXTENT_STATUS_FLAGS (EXTENT_STATUS_WRITTEN | \
- EXTENT_STATUS_UNWRITTEN | \
- EXTENT_STATUS_DELAYED | \
- EXTENT_STATUS_HOLE)
+#define ES_SHIFT (sizeof(ext4_fsblk_t)*8 - ES_FLAGS)
+#define ES_MASK (~((ext4_fsblk_t)0) << ES_SHIFT)
-#define ES_WRITTEN (1ULL << 63)
-#define ES_UNWRITTEN (1ULL << 62)
-#define ES_DELAYED (1ULL << 61)
-#define ES_HOLE (1ULL << 60)
+#define EXTENT_STATUS_WRITTEN (1 << ES_WRITTEN_B)
+#define EXTENT_STATUS_UNWRITTEN (1 << ES_UNWRITTEN_B)
+#define EXTENT_STATUS_DELAYED (1 << ES_DELAYED_B)
+#define EXTENT_STATUS_HOLE (1 << ES_HOLE_B)
+#define EXTENT_STATUS_REFERENCED (1 << ES_REFERENCED_B)
-#define ES_MASK (ES_WRITTEN | ES_UNWRITTEN | \
- ES_DELAYED | ES_HOLE)
+#define ES_TYPE_MASK ((ext4_fsblk_t)(EXTENT_STATUS_WRITTEN | \
+ EXTENT_STATUS_UNWRITTEN | \
+ EXTENT_STATUS_DELAYED | \
+ EXTENT_STATUS_HOLE) << ES_SHIFT)
struct ext4_sb_info;
struct ext4_extent;
@@ -65,14 +68,13 @@ struct ext4_es_tree {
};
struct ext4_es_stats {
- unsigned long es_stats_last_sorted;
unsigned long es_stats_shrunk;
unsigned long es_stats_cache_hits;
unsigned long es_stats_cache_misses;
u64 es_stats_scan_time;
u64 es_stats_max_scan_time;
struct percpu_counter es_stats_all_cnt;
- struct percpu_counter es_stats_lru_cnt;
+ struct percpu_counter es_stats_shk_cnt;
};
extern int __init ext4_init_es(void);
@@ -93,29 +95,49 @@ extern void ext4_es_find_delayed_extent_range(struct inode *inode,
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status *es);
+static inline unsigned int ext4_es_status(struct extent_status *es)
+{
+ return es->es_pblk >> ES_SHIFT;
+}
+
+static inline unsigned int ext4_es_type(struct extent_status *es)
+{
+ return (es->es_pblk & ES_TYPE_MASK) >> ES_SHIFT;
+}
+
static inline int ext4_es_is_written(struct extent_status *es)
{
- return (es->es_pblk & ES_WRITTEN) != 0;
+ return (ext4_es_type(es) & EXTENT_STATUS_WRITTEN) != 0;
}
static inline int ext4_es_is_unwritten(struct extent_status *es)
{
- return (es->es_pblk & ES_UNWRITTEN) != 0;
+ return (ext4_es_type(es) & EXTENT_STATUS_UNWRITTEN) != 0;
}
static inline int ext4_es_is_delayed(struct extent_status *es)
{
- return (es->es_pblk & ES_DELAYED) != 0;
+ return (ext4_es_type(es) & EXTENT_STATUS_DELAYED) != 0;
}
static inline int ext4_es_is_hole(struct extent_status *es)
{
- return (es->es_pblk & ES_HOLE) != 0;
+ return (ext4_es_type(es) & EXTENT_STATUS_HOLE) != 0;
}
-static inline unsigned int ext4_es_status(struct extent_status *es)
+static inline void ext4_es_set_referenced(struct extent_status *es)
{
- return es->es_pblk >> ES_SHIFT;
+ es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
+}
+
+static inline void ext4_es_clear_referenced(struct extent_status *es)
+{
+ es->es_pblk &= ~(((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT);
+}
+
+static inline int ext4_es_is_referenced(struct extent_status *es)
+{
+ return (ext4_es_status(es) & EXTENT_STATUS_REFERENCED) != 0;
}
static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
@@ -135,23 +157,19 @@ static inline void ext4_es_store_pblock(struct extent_status *es,
static inline void ext4_es_store_status(struct extent_status *es,
unsigned int status)
{
- es->es_pblk = (((ext4_fsblk_t)
- (status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
- (es->es_pblk & ~ES_MASK));
+ es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
+ (es->es_pblk & ~ES_MASK);
}
static inline void ext4_es_store_pblock_status(struct extent_status *es,
ext4_fsblk_t pb,
unsigned int status)
{
- es->es_pblk = (((ext4_fsblk_t)
- (status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
- (pb & ~ES_MASK));
+ es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
+ (pb & ~ES_MASK);
}
extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
-extern void ext4_es_lru_add(struct inode *inode);
-extern void ext4_es_lru_del(struct inode *inode);
#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8131be8c0af3..513c12cf444c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -273,24 +273,19 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* we determine this extent as a data or a hole according to whether the
* page cache has data or not.
*/
-static int ext4_find_unwritten_pgoff(struct inode *inode,
- int whence,
- struct ext4_map_blocks *map,
- loff_t *offset)
+static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
+ loff_t endoff, loff_t *offset)
{
struct pagevec pvec;
- unsigned int blkbits;
pgoff_t index;
pgoff_t end;
- loff_t endoff;
loff_t startoff;
loff_t lastoff;
int found = 0;
- blkbits = inode->i_sb->s_blocksize_bits;
startoff = *offset;
lastoff = startoff;
- endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
+
index = startoff >> PAGE_CACHE_SHIFT;
end = endoff >> PAGE_CACHE_SHIFT;
@@ -408,147 +403,144 @@ out:
static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
- struct ext4_map_blocks map;
- struct extent_status es;
- ext4_lblk_t start, last, end;
- loff_t dataoff, isize;
- int blkbits;
- int ret = 0;
+ struct fiemap_extent_info fie;
+ struct fiemap_extent ext[2];
+ loff_t next;
+ int i, ret = 0;
mutex_lock(&inode->i_mutex);
-
- isize = i_size_read(inode);
- if (offset >= isize) {
+ if (offset >= inode->i_size) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
-
- blkbits = inode->i_sb->s_blocksize_bits;
- start = offset >> blkbits;
- last = start;
- end = isize >> blkbits;
- dataoff = offset;
-
- do {
- map.m_lblk = last;
- map.m_len = end - last + 1;
- ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
- if (last != start)
- dataoff = (loff_t)last << blkbits;
+ fie.fi_flags = 0;
+ fie.fi_extents_max = 2;
+ fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
+ while (1) {
+ mm_segment_t old_fs = get_fs();
+
+ fie.fi_extents_mapped = 0;
+ memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
+
+ set_fs(get_ds());
+ ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
+ set_fs(old_fs);
+ if (ret)
break;
- }
- /*
- * If there is a delay extent at this offset,
- * it will be as a data.
- */
- ext4_es_find_delayed_extent_range(inode, last, last, &es);
- if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
- if (last != start)
- dataoff = (loff_t)last << blkbits;
+ /* No extents found, EOF */
+ if (!fie.fi_extents_mapped) {
+ ret = -ENXIO;
break;
}
+ for (i = 0; i < fie.fi_extents_mapped; i++) {
+ next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (map.m_flags & EXT4_MAP_UNWRITTEN) {
- int unwritten;
- unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
- &map, &dataoff);
- if (unwritten)
- break;
- }
+ if (offset < (loff_t)ext[i].fe_logical)
+ offset = (loff_t)ext[i].fe_logical;
+ /*
+ * If extent is not unwritten, then it contains valid
+ * data, mapped or delayed.
+ */
+ if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
+ goto out;
- last++;
- dataoff = (loff_t)last << blkbits;
- } while (last <= end);
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+ next, &offset))
+ goto out;
+ if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
+ ret = -ENXIO;
+ goto out;
+ }
+ offset = next;
+ }
+ }
+ if (offset > inode->i_size)
+ offset = inode->i_size;
+out:
mutex_unlock(&inode->i_mutex);
+ if (ret)
+ return ret;
- if (dataoff > isize)
- return -ENXIO;
-
- return vfs_setpos(file, dataoff, maxsize);
+ return vfs_setpos(file, offset, maxsize);
}
/*
- * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE
*/
static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
- struct ext4_map_blocks map;
- struct extent_status es;
- ext4_lblk_t start, last, end;
- loff_t holeoff, isize;
- int blkbits;
- int ret = 0;
+ struct fiemap_extent_info fie;
+ struct fiemap_extent ext[2];
+ loff_t next;
+ int i, ret = 0;
mutex_lock(&inode->i_mutex);
-
- isize = i_size_read(inode);
- if (offset >= isize) {
+ if (offset >= inode->i_size) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
- blkbits = inode->i_sb->s_blocksize_bits;
- start = offset >> blkbits;
- last = start;
- end = isize >> blkbits;
- holeoff = offset;
+ fie.fi_flags = 0;
+ fie.fi_extents_max = 2;
+ fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
+ while (1) {
+ mm_segment_t old_fs = get_fs();
- do {
- map.m_lblk = last;
- map.m_len = end - last + 1;
- ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
- last += ret;
- holeoff = (loff_t)last << blkbits;
- continue;
- }
+ fie.fi_extents_mapped = 0;
+ memset(ext, 0, sizeof(*ext));
- /*
- * If there is a delay extent at this offset,
- * we will skip this extent.
- */
- ext4_es_find_delayed_extent_range(inode, last, last, &es);
- if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
- last = es.es_lblk + es.es_len;
- holeoff = (loff_t)last << blkbits;
- continue;
- }
+ set_fs(get_ds());
+ ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
+ set_fs(old_fs);
+ if (ret)
+ break;
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (map.m_flags & EXT4_MAP_UNWRITTEN) {
- int unwritten;
- unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
- &map, &holeoff);
- if (!unwritten) {
- last += ret;
- holeoff = (loff_t)last << blkbits;
+ /* No extents found */
+ if (!fie.fi_extents_mapped)
+ break;
+
+ for (i = 0; i < fie.fi_extents_mapped; i++) {
+ next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
+ /*
+ * If extent is not unwritten, then it contains valid
+ * data, mapped or delayed.
+ */
+ if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
+ if (offset < (loff_t)ext[i].fe_logical)
+ goto out;
+ offset = next;
continue;
}
- }
-
- /* find a hole */
- break;
- } while (last <= end);
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+ next, &offset))
+ goto out;
+ offset = next;
+ if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
+ goto out;
+ }
+ }
+ if (offset > inode->i_size)
+ offset = inode->i_size;
+out:
mutex_unlock(&inode->i_mutex);
+ if (ret)
+ return ret;
- if (holeoff > isize)
- holeoff = isize;
-
- return vfs_setpos(file, holeoff, maxsize);
+ return vfs_setpos(file, offset, maxsize);
}
/*
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 3ea62695abce..4b143febf21f 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -811,8 +811,11 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
ret = __block_write_begin(page, 0, inline_size,
ext4_da_get_block_prep);
if (ret) {
+ up_read(&EXT4_I(inode)->xattr_sem);
+ unlock_page(page);
+ page_cache_release(page);
ext4_truncate_failed_write(inode);
- goto out;
+ return ret;
}
SetPageDirty(page);
@@ -870,6 +873,12 @@ retry_journal:
goto out_journal;
}
+ /*
+ * We cannot recurse into the filesystem as the transaction
+ * is already started.
+ */
+ flags |= AOP_FLAG_NOFS;
+
if (ret == -ENOSPC) {
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
@@ -882,11 +891,6 @@ retry_journal:
goto out;
}
- /*
- * We cannot recurse into the filesystem as the transaction
- * is already started.
- */
- flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
if (!page) {
@@ -1807,11 +1811,12 @@ int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
- int *has_inline)
+ int *has_inline, __u64 start, __u64 len)
{
__u64 physical = 0;
- __u64 length;
- __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST;
+ __u64 inline_len;
+ __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
+ FIEMAP_EXTENT_LAST;
int error = 0;
struct ext4_iloc iloc;
@@ -1820,6 +1825,13 @@ int ext4_inline_data_fiemap(struct inode *inode,
*has_inline = 0;
goto out;
}
+ inline_len = min_t(size_t, ext4_get_inline_size(inode),
+ i_size_read(inode));
+ if (start >= inline_len)
+ goto out;
+ if (start + len < inline_len)
+ inline_len = start + len;
+ inline_len -= start;
error = ext4_get_inode_loc(inode, &iloc);
if (error)
@@ -1828,11 +1840,10 @@ int ext4_inline_data_fiemap(struct inode *inode,
physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
physical += offsetof(struct ext4_inode, i_block);
- length = i_size_read(inode);
if (physical)
- error = fiemap_fill_next_extent(fieinfo, 0, physical,
- length, flags);
+ error = fiemap_fill_next_extent(fieinfo, start, physical,
+ inline_len, flags);
brelse(iloc.bh);
out:
up_read(&EXT4_I(inode)->xattr_sem);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3356ab5395f4..5653fa42930b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -416,11 +416,6 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
}
if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
up_read((&EXT4_I(inode)->i_data_sem));
- /*
- * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
- * because it shouldn't be marked in es_map->m_flags.
- */
- map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
/*
* We don't check m_len because extent will be collpased in status
@@ -491,7 +486,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
- ext4_es_lru_add(inode);
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
@@ -1393,7 +1387,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, iblock, &es)) {
- ext4_es_lru_add(inode);
if (ext4_es_is_hole(&es)) {
retval = 0;
down_read(&EXT4_I(inode)->i_data_sem);
@@ -1434,24 +1427,12 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
- if (ext4_has_inline_data(inode)) {
- /*
- * We will soon create blocks for this page, and let
- * us pretend as if the blocks aren't allocated yet.
- * In case of clusters, we have to handle the work
- * of mapping from cluster so that the reserved space
- * is calculated properly.
- */
- if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
- ext4_find_delalloc_cluster(inode, map->m_lblk))
- map->m_flags |= EXT4_MAP_FROM_CLUSTER;
+ if (ext4_has_inline_data(inode))
retval = 0;
- } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- retval = ext4_ext_map_blocks(NULL, inode, map,
- EXT4_GET_BLOCKS_NO_PUT_HOLE);
+ else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
- retval = ext4_ind_map_blocks(NULL, inode, map,
- EXT4_GET_BLOCKS_NO_PUT_HOLE);
+ retval = ext4_ind_map_blocks(NULL, inode, map, 0);
add_delayed:
if (retval == 0) {
@@ -1465,7 +1446,8 @@ add_delayed:
* then we don't need to reserve it again. However we still need
* to reserve metadata for every block we're going to write.
*/
- if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
+ if (EXT4_SB(inode->i_sb)->s_cluster_ratio <= 1 ||
+ !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
ret = ext4_da_reserve_space(inode, iblock);
if (ret) {
/* not enough space to reserve */
@@ -1481,11 +1463,6 @@ add_delayed:
goto out_unlock;
}
- /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
- * and it should not appear on the bh->b_state.
- */
- map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
-
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
@@ -3643,7 +3620,7 @@ out_stop:
* If this was a simple ftruncate() and the file will remain alive,
* then we need to clear up the orphan record which we created above.
* However, if this was a real unlink then we were called by
- * ext4_delete_inode(), and we allow that function to clean up the
+ * ext4_evict_inode(), and we allow that function to clean up the
* orphan info for us.
*/
if (inode->i_nlink)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bfda18a15592..f58a0d106726 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -78,8 +78,6 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
- ext4_es_lru_del(inode1);
- ext4_es_lru_del(inode2);
isize = i_size_read(inode1);
i_size_write(inode1, i_size_read(inode2));
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index dbfe15c2533c..8d1e60214ef0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2358,7 +2358,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
if (sbi->s_group_info) {
memcpy(new_groupinfo, sbi->s_group_info,
sbi->s_group_info_size * sizeof(*sbi->s_group_info));
- ext4_kvfree(sbi->s_group_info);
+ kvfree(sbi->s_group_info);
}
sbi->s_group_info = new_groupinfo;
sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
@@ -2385,7 +2385,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
metalen = sizeof(*meta_group_info) <<
EXT4_DESC_PER_BLOCK_BITS(sb);
- meta_group_info = kmalloc(metalen, GFP_KERNEL);
+ meta_group_info = kmalloc(metalen, GFP_NOFS);
if (meta_group_info == NULL) {
ext4_msg(sb, KERN_ERR, "can't allocate mem "
"for a buddy group");
@@ -2399,7 +2399,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
- meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL);
+ meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
if (meta_group_info[i] == NULL) {
ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
goto exit_group_info;
@@ -2428,7 +2428,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
{
struct buffer_head *bh;
meta_group_info[i]->bb_bitmap =
- kmalloc(sb->s_blocksize, GFP_KERNEL);
+ kmalloc(sb->s_blocksize, GFP_NOFS);
BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
bh = ext4_read_block_bitmap(sb, group);
BUG_ON(bh == NULL);
@@ -2495,7 +2495,7 @@ err_freebuddy:
kfree(sbi->s_group_info[i]);
iput(sbi->s_buddy_cache);
err_freesgi:
- ext4_kvfree(sbi->s_group_info);
+ kvfree(sbi->s_group_info);
return -ENOMEM;
}
@@ -2708,12 +2708,11 @@ int ext4_mb_release(struct super_block *sb)
EXT4_DESC_PER_BLOCK_BITS(sb);
for (i = 0; i < num_meta_group_infos; i++)
kfree(sbi->s_group_info[i]);
- ext4_kvfree(sbi->s_group_info);
+ kvfree(sbi->s_group_info);
}
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
- if (sbi->s_buddy_cache)
- iput(sbi->s_buddy_cache);
+ iput(sbi->s_buddy_cache);
if (sbi->s_mb_stats) {
ext4_msg(sb, KERN_INFO,
"mballoc: %u blocks %u reqs (%u success)",
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index a432634f2e6a..3cb267aee802 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -592,7 +592,7 @@ err_out:
/*
* set the i_blocks count to zero
- * so that the ext4_delete_inode does the
+ * so that the ext4_evict_inode() does the
* right job
*
* We don't need to take the i_lock because
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 9f2311bc9c4f..503ea15dc5db 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -273,6 +273,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
int replaced_count = 0;
int from = data_offset_in_page << orig_inode->i_blkbits;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+ struct super_block *sb = orig_inode->i_sb;
/*
* It needs twice the amount of ordinary journal buffers because
@@ -405,10 +406,13 @@ unlock_pages:
page_cache_release(pagep[1]);
stop_journal:
ext4_journal_stop(handle);
+ if (*err == -ENOSPC &&
+ ext4_should_retry_alloc(sb, &retries))
+ goto again;
/* Buffer was busy because probably is pinned to journal transaction,
* force transaction commit may help to free it. */
- if (*err == -EBUSY && ext4_should_retry_alloc(orig_inode->i_sb,
- &retries))
+ if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
+ jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
goto again;
return replaced_count;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 426211882f72..2291923dae4e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2814,7 +2814,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
ext4_orphan_add(handle, inode);
inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
- retval = 0;
end_unlink:
brelse(bh);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ca4588388fc3..bf76f405a5f9 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -856,7 +856,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
n_group_desc[gdb_num] = gdb_bh;
EXT4_SB(sb)->s_group_desc = n_group_desc;
EXT4_SB(sb)->s_gdb_count++;
- ext4_kvfree(o_group_desc);
+ kvfree(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext4_handle_dirty_super(handle, sb);
@@ -866,7 +866,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
return err;
exit_inode:
- ext4_kvfree(n_group_desc);
+ kvfree(n_group_desc);
brelse(iloc.bh);
exit_dind:
brelse(dind);
@@ -909,7 +909,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
n_group_desc[gdb_num] = gdb_bh;
EXT4_SB(sb)->s_group_desc = n_group_desc;
EXT4_SB(sb)->s_gdb_count++;
- ext4_kvfree(o_group_desc);
+ kvfree(o_group_desc);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
if (unlikely(err))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 63e802b8ec68..43c92b1685cb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -176,15 +176,6 @@ void *ext4_kvzalloc(size_t size, gfp_t flags)
return ret;
}
-void ext4_kvfree(void *ptr)
-{
- if (is_vmalloc_addr(ptr))
- vfree(ptr);
- else
- kfree(ptr);
-
-}
-
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
@@ -811,8 +802,8 @@ static void ext4_put_super(struct super_block *sb)
for (i = 0; i < sbi->s_gdb_count; i++)
brelse(sbi->s_group_desc[i]);
- ext4_kvfree(sbi->s_group_desc);
- ext4_kvfree(sbi->s_flex_groups);
+ kvfree(sbi->s_group_desc);
+ kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -880,10 +871,10 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
spin_lock_init(&ei->i_prealloc_lock);
ext4_es_init_tree(&ei->i_es_tree);
rwlock_init(&ei->i_es_lock);
- INIT_LIST_HEAD(&ei->i_es_lru);
+ INIT_LIST_HEAD(&ei->i_es_list);
ei->i_es_all_nr = 0;
- ei->i_es_lru_nr = 0;
- ei->i_touch_when = 0;
+ ei->i_es_shk_nr = 0;
+ ei->i_es_shrink_lblk = 0;
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
@@ -973,7 +964,6 @@ void ext4_clear_inode(struct inode *inode)
dquot_drop(inode);
ext4_discard_preallocations(inode);
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
- ext4_es_lru_del(inode);
if (EXT4_I(inode)->jinode) {
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode);
@@ -1153,7 +1143,7 @@ enum {
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
- Opt_max_dir_size_kb,
+ Opt_max_dir_size_kb, Opt_nojournal_checksum,
};
static const match_table_t tokens = {
@@ -1187,6 +1177,7 @@ static const match_table_t tokens = {
{Opt_journal_dev, "journal_dev=%u"},
{Opt_journal_path, "journal_path=%s"},
{Opt_journal_checksum, "journal_checksum"},
+ {Opt_nojournal_checksum, "nojournal_checksum"},
{Opt_journal_async_commit, "journal_async_commit"},
{Opt_abort, "abort"},
{Opt_data_journal, "data=journal"},
@@ -1368,6 +1359,8 @@ static const struct mount_opts {
MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
MOPT_EXT4_ONLY | MOPT_CLEAR},
+ {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
+ MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_SET},
{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -1709,6 +1702,12 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
}
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
+ test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
+ "in data=ordered mode");
+ return 0;
+ }
return 1;
}
@@ -1946,7 +1945,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
memcpy(new_groups, sbi->s_flex_groups,
(sbi->s_flex_groups_allocated *
sizeof(struct flex_groups)));
- ext4_kvfree(sbi->s_flex_groups);
+ kvfree(sbi->s_flex_groups);
}
sbi->s_flex_groups = new_groups;
sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
@@ -3317,7 +3316,7 @@ int ext4_calculate_overhead(struct super_block *sb)
struct ext4_super_block *es = sbi->s_es;
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
ext4_fsblk_t overhead = 0;
- char *buf = (char *) get_zeroed_page(GFP_KERNEL);
+ char *buf = (char *) get_zeroed_page(GFP_NOFS);
if (!buf)
return -ENOMEM;
@@ -3345,8 +3344,8 @@ int ext4_calculate_overhead(struct super_block *sb)
memset(buf, 0, PAGE_SIZE);
cond_resched();
}
- /* Add the journal blocks as well */
- if (sbi->s_journal)
+ /* Add the internal journal blocks as well */
+ if (sbi->s_journal && !sbi->journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
sbi->s_overhead = overhead;
@@ -4232,7 +4231,7 @@ failed_mount7:
failed_mount6:
ext4_mb_release(sb);
if (sbi->s_flex_groups)
- ext4_kvfree(sbi->s_flex_groups);
+ kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -4261,7 +4260,7 @@ failed_mount3:
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
- ext4_kvfree(sbi->s_group_desc);
+ kvfree(sbi->s_group_desc);
failed_mount:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
@@ -4862,6 +4861,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+ test_opt(sb, JOURNAL_CHECKSUM)) {
+ ext4_msg(sb, KERN_ERR, "changing journal_checksum "
+ "during remount not supported");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
if (test_opt2(sb, EXPLICIT_DELALLOC)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index e0c4ba39a377..64e295e8ff38 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -370,6 +370,7 @@ extern int fat_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
/* fat/inode.c */
+extern int fat_block_truncate_page(struct inode *inode, loff_t from);
extern void fat_attach(struct inode *inode, loff_t i_pos);
extern void fat_detach(struct inode *inode);
extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 85f79a89e747..8429c68e3057 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -443,6 +443,9 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
+ error = fat_block_truncate_page(inode, attr->ia_size);
+ if (error)
+ goto out;
down_write(&MSDOS_I(inode)->truncate_lock);
truncate_setsize(inode, attr->ia_size);
fat_truncate_blocks(inode, attr->ia_size);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 756aead10d96..7b41a2dcdd76 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -294,6 +294,18 @@ static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
return blocknr;
}
+/*
+ * fat_block_truncate_page() zeroes out a mapping from file offset `from'
+ * up to the end of the block which corresponds to `from'.
+ * This is required during truncate to physically zeroout the tail end
+ * of that block so it doesn't yield old data if the file is later grown.
+ * Also, avoid causing failure from fsx for cases of "data past EOF"
+ */
+int fat_block_truncate_page(struct inode *inode, loff_t from)
+{
+ return block_truncate_page(inode->i_mapping, from, fat_get_block);
+}
+
static const struct address_space_operations fat_aops = {
.readpage = fat_readpage,
.readpages = fat_readpages,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ef9bef118342..2d609a5fbfea 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -479,12 +479,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* write_inode()
*/
spin_lock(&inode->i_lock);
- /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
- if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
- inode->i_state &= ~I_DIRTY_PAGES;
+
dirty = inode->i_state & I_DIRTY;
- inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
+ inode->i_state &= ~I_DIRTY;
+
+ /*
+ * Paired with smp_mb() in __mark_inode_dirty(). This allows
+ * __mark_inode_dirty() to test i_state without grabbing i_lock -
+ * either they see the I_DIRTY bits cleared or we see the dirtied
+ * inode.
+ *
+ * I_DIRTY_PAGES is always cleared together above even if @mapping
+ * still has dirty pages. The flag is reinstated after smp_mb() if
+ * necessary. This guarantees that either __mark_inode_dirty()
+ * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
+ */
+ smp_mb();
+
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+ inode->i_state |= I_DIRTY_PAGES;
+
spin_unlock(&inode->i_lock);
+
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
int err = write_inode(inode, wbc);
@@ -1148,12 +1164,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
}
/*
- * make sure that changes are seen by all cpus before we test i_state
- * -- mikulas
+ * Paired with smp_mb() in __writeback_single_inode() for the
+ * following lockless i_state test. See there for details.
*/
smp_mb();
- /* avoid the locking if we can */
if ((inode->i_state & flags) == flags)
return;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 966ace8b243f..28d0c7abba1c 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -415,7 +415,7 @@ err_unlock:
err_region:
unregister_chrdev_region(devt, 1);
err:
- fuse_conn_kill(fc);
+ fuse_abort_conn(fc);
goto out;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ca887314aba9..ba1107977f2e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -511,6 +511,35 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
}
EXPORT_SYMBOL_GPL(fuse_request_send);
+ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
+{
+ struct fuse_req *req;
+ ssize_t ret;
+
+ req = fuse_get_req(fc, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ req->in.h.opcode = args->in.h.opcode;
+ req->in.h.nodeid = args->in.h.nodeid;
+ req->in.numargs = args->in.numargs;
+ memcpy(req->in.args, args->in.args,
+ args->in.numargs * sizeof(struct fuse_in_arg));
+ req->out.argvar = args->out.argvar;
+ req->out.numargs = args->out.numargs;
+ memcpy(req->out.args, args->out.args,
+ args->out.numargs * sizeof(struct fuse_arg));
+ fuse_request_send(fc, req);
+ ret = req->out.h.error;
+ if (!ret && args->out.argvar) {
+ BUG_ON(args->out.numargs != 1);
+ ret = req->out.args[0].size;
+ }
+ fuse_put_request(fc, req);
+
+ return ret;
+}
+
static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
struct fuse_req *req)
{
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index df562cc87763..252b8a5de8b5 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -145,22 +145,22 @@ static void fuse_invalidate_entry(struct dentry *entry)
fuse_invalidate_entry_cache(entry);
}
-static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req,
+static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
u64 nodeid, struct qstr *name,
struct fuse_entry_out *outarg)
{
memset(outarg, 0, sizeof(struct fuse_entry_out));
- req->in.h.opcode = FUSE_LOOKUP;
- req->in.h.nodeid = nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = name->len + 1;
- req->in.args[0].value = name->name;
- req->out.numargs = 1;
+ args->in.h.opcode = FUSE_LOOKUP;
+ args->in.h.nodeid = nodeid;
+ args->in.numargs = 1;
+ args->in.args[0].size = name->len + 1;
+ args->in.args[0].value = name->name;
+ args->out.numargs = 1;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
else
- req->out.args[0].size = sizeof(struct fuse_entry_out);
- req->out.args[0].value = outarg;
+ args->out.args[0].size = sizeof(struct fuse_entry_out);
+ args->out.args[0].value = outarg;
}
u64 fuse_get_attr_version(struct fuse_conn *fc)
@@ -200,9 +200,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
goto invalid;
else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
(flags & LOOKUP_REVAL)) {
- int err;
struct fuse_entry_out outarg;
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_forget_link *forget;
u64 attr_version;
@@ -215,31 +214,23 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
goto out;
fc = get_fuse_conn(inode);
- req = fuse_get_req_nopages(fc);
- ret = PTR_ERR(req);
- if (IS_ERR(req))
- goto out;
forget = fuse_alloc_forget();
- if (!forget) {
- fuse_put_request(fc, req);
- ret = -ENOMEM;
+ ret = -ENOMEM;
+ if (!forget)
goto out;
- }
attr_version = fuse_get_attr_version(fc);
parent = dget_parent(entry);
- fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
+ fuse_lookup_init(fc, &args, get_node_id(parent->d_inode),
&entry->d_name, &outarg);
- fuse_request_send(fc, req);
+ ret = fuse_simple_request(fc, &args);
dput(parent);
- err = req->out.h.error;
- fuse_put_request(fc, req);
/* Zero nodeid is same as -ENOENT */
- if (!err && !outarg.nodeid)
- err = -ENOENT;
- if (!err) {
+ if (!ret && !outarg.nodeid)
+ ret = -ENOENT;
+ if (!ret) {
fi = get_fuse_inode(inode);
if (outarg.nodeid != get_node_id(inode)) {
fuse_queue_forget(fc, forget, outarg.nodeid, 1);
@@ -250,7 +241,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
spin_unlock(&fc->lock);
}
kfree(forget);
- if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
+ if (ret == -ENOMEM)
+ goto out;
+ if (ret || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
goto invalid;
fuse_change_attributes(inode, &outarg.attr,
@@ -296,7 +289,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_forget_link *forget;
u64 attr_version;
int err;
@@ -306,24 +299,16 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
if (name->len > FUSE_NAME_MAX)
goto out;
- req = fuse_get_req_nopages(fc);
- err = PTR_ERR(req);
- if (IS_ERR(req))
- goto out;
forget = fuse_alloc_forget();
err = -ENOMEM;
- if (!forget) {
- fuse_put_request(fc, req);
+ if (!forget)
goto out;
- }
attr_version = fuse_get_attr_version(fc);
- fuse_lookup_init(fc, req, nodeid, name, outarg);
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ fuse_lookup_init(fc, &args, nodeid, name, outarg);
+ err = fuse_simple_request(fc, &args);
/* Zero nodeid is same as -ENOENT, but with valid timeout */
if (err || !outarg->nodeid)
goto out_put_forget;
@@ -405,7 +390,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
int err;
struct inode *inode;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_forget_link *forget;
struct fuse_create_in inarg;
struct fuse_open_out outopen;
@@ -420,15 +405,10 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
if (!forget)
goto out_err;
- req = fuse_get_req_nopages(fc);
- err = PTR_ERR(req);
- if (IS_ERR(req))
- goto out_put_forget_req;
-
err = -ENOMEM;
ff = fuse_file_alloc(fc);
if (!ff)
- goto out_put_request;
+ goto out_put_forget_req;
if (!fc->dont_mask)
mode &= ~current_umask();
@@ -439,24 +419,23 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
inarg.flags = flags;
inarg.mode = mode;
inarg.umask = current_umask();
- req->in.h.opcode = FUSE_CREATE;
- req->in.h.nodeid = get_node_id(dir);
- req->in.numargs = 2;
- req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
+ args.in.h.opcode = FUSE_CREATE;
+ args.in.h.nodeid = get_node_id(dir);
+ args.in.numargs = 2;
+ args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = entry->d_name.len + 1;
- req->in.args[1].value = entry->d_name.name;
- req->out.numargs = 2;
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = entry->d_name.len + 1;
+ args.in.args[1].value = entry->d_name.name;
+ args.out.numargs = 2;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
else
- req->out.args[0].size = sizeof(outentry);
- req->out.args[0].value = &outentry;
- req->out.args[1].size = sizeof(outopen);
- req->out.args[1].value = &outopen;
- fuse_request_send(fc, req);
- err = req->out.h.error;
+ args.out.args[0].size = sizeof(outentry);
+ args.out.args[0].value = &outentry;
+ args.out.args[1].size = sizeof(outopen);
+ args.out.args[1].value = &outopen;
+ err = fuse_simple_request(fc, &args);
if (err)
goto out_free_ff;
@@ -464,7 +443,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
goto out_free_ff;
- fuse_put_request(fc, req);
ff->fh = outopen.fh;
ff->nodeid = outentry.nodeid;
ff->open_flags = outopen.open_flags;
@@ -492,8 +470,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
out_free_ff:
fuse_file_free(ff);
-out_put_request:
- fuse_put_request(fc, req);
out_put_forget_req:
kfree(forget);
out_err:
@@ -547,7 +523,7 @@ no_open:
/*
* Code shared between mknod, mkdir, symlink and link
*/
-static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
+static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
struct inode *dir, struct dentry *entry,
umode_t mode)
{
@@ -557,22 +533,18 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
struct fuse_forget_link *forget;
forget = fuse_alloc_forget();
- if (!forget) {
- fuse_put_request(fc, req);
+ if (!forget)
return -ENOMEM;
- }
memset(&outarg, 0, sizeof(outarg));
- req->in.h.nodeid = get_node_id(dir);
- req->out.numargs = 1;
+ args->in.h.nodeid = get_node_id(dir);
+ args->out.numargs = 1;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
else
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args->out.args[0].size = sizeof(outarg);
+ args->out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, args);
if (err)
goto out_put_forget_req;
@@ -609,9 +581,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
{
struct fuse_mknod_in inarg;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
if (!fc->dont_mask)
mode &= ~current_umask();
@@ -620,14 +590,14 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
inarg.mode = mode;
inarg.rdev = new_encode_dev(rdev);
inarg.umask = current_umask();
- req->in.h.opcode = FUSE_MKNOD;
- req->in.numargs = 2;
- req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
+ args.in.h.opcode = FUSE_MKNOD;
+ args.in.numargs = 2;
+ args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = entry->d_name.len + 1;
- req->in.args[1].value = entry->d_name.name;
- return create_new_entry(fc, req, dir, entry, mode);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = entry->d_name.len + 1;
+ args.in.args[1].value = entry->d_name.name;
+ return create_new_entry(fc, &args, dir, entry, mode);
}
static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
@@ -640,9 +610,7 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
{
struct fuse_mkdir_in inarg;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
if (!fc->dont_mask)
mode &= ~current_umask();
@@ -650,13 +618,13 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
inarg.umask = current_umask();
- req->in.h.opcode = FUSE_MKDIR;
- req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = entry->d_name.len + 1;
- req->in.args[1].value = entry->d_name.name;
- return create_new_entry(fc, req, dir, entry, S_IFDIR);
+ args.in.h.opcode = FUSE_MKDIR;
+ args.in.numargs = 2;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = entry->d_name.len + 1;
+ args.in.args[1].value = entry->d_name.name;
+ return create_new_entry(fc, &args, dir, entry, S_IFDIR);
}
static int fuse_symlink(struct inode *dir, struct dentry *entry,
@@ -664,17 +632,15 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
{
struct fuse_conn *fc = get_fuse_conn(dir);
unsigned len = strlen(link) + 1;
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
- req->in.h.opcode = FUSE_SYMLINK;
- req->in.numargs = 2;
- req->in.args[0].size = entry->d_name.len + 1;
- req->in.args[0].value = entry->d_name.name;
- req->in.args[1].size = len;
- req->in.args[1].value = link;
- return create_new_entry(fc, req, dir, entry, S_IFLNK);
+ args.in.h.opcode = FUSE_SYMLINK;
+ args.in.numargs = 2;
+ args.in.args[0].size = entry->d_name.len + 1;
+ args.in.args[0].value = entry->d_name.name;
+ args.in.args[1].size = len;
+ args.in.args[1].value = link;
+ return create_new_entry(fc, &args, dir, entry, S_IFLNK);
}
static inline void fuse_update_ctime(struct inode *inode)
@@ -689,18 +655,14 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
{
int err;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->in.h.opcode = FUSE_UNLINK;
- req->in.h.nodeid = get_node_id(dir);
- req->in.numargs = 1;
- req->in.args[0].size = entry->d_name.len + 1;
- req->in.args[0].value = entry->d_name.name;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ FUSE_ARGS(args);
+
+ args.in.h.opcode = FUSE_UNLINK;
+ args.in.h.nodeid = get_node_id(dir);
+ args.in.numargs = 1;
+ args.in.args[0].size = entry->d_name.len + 1;
+ args.in.args[0].value = entry->d_name.name;
+ err = fuse_simple_request(fc, &args);
if (!err) {
struct inode *inode = entry->d_inode;
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -729,18 +691,14 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
{
int err;
struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->in.h.opcode = FUSE_RMDIR;
- req->in.h.nodeid = get_node_id(dir);
- req->in.numargs = 1;
- req->in.args[0].size = entry->d_name.len + 1;
- req->in.args[0].value = entry->d_name.name;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ FUSE_ARGS(args);
+
+ args.in.h.opcode = FUSE_RMDIR;
+ args.in.h.nodeid = get_node_id(dir);
+ args.in.numargs = 1;
+ args.in.args[0].size = entry->d_name.len + 1;
+ args.in.args[0].value = entry->d_name.name;
+ err = fuse_simple_request(fc, &args);
if (!err) {
clear_nlink(entry->d_inode);
fuse_invalidate_attr(dir);
@@ -757,27 +715,21 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
int err;
struct fuse_rename2_in inarg;
struct fuse_conn *fc = get_fuse_conn(olddir);
- struct fuse_req *req;
-
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
memset(&inarg, 0, argsize);
inarg.newdir = get_node_id(newdir);
inarg.flags = flags;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = get_node_id(olddir);
- req->in.numargs = 3;
- req->in.args[0].size = argsize;
- req->in.args[0].value = &inarg;
- req->in.args[1].size = oldent->d_name.len + 1;
- req->in.args[1].value = oldent->d_name.name;
- req->in.args[2].size = newent->d_name.len + 1;
- req->in.args[2].value = newent->d_name.name;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = opcode;
+ args.in.h.nodeid = get_node_id(olddir);
+ args.in.numargs = 3;
+ args.in.args[0].size = argsize;
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = oldent->d_name.len + 1;
+ args.in.args[1].value = oldent->d_name.name;
+ args.in.args[2].size = newent->d_name.len + 1;
+ args.in.args[2].value = newent->d_name.name;
+ err = fuse_simple_request(fc, &args);
if (!err) {
/* ctime changes */
fuse_invalidate_attr(oldent->d_inode);
@@ -849,19 +801,17 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
struct fuse_link_in inarg;
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
- req->in.h.opcode = FUSE_LINK;
- req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = newent->d_name.len + 1;
- req->in.args[1].value = newent->d_name.name;
- err = create_new_entry(fc, req, newdir, newent, inode->i_mode);
+ args.in.h.opcode = FUSE_LINK;
+ args.in.numargs = 2;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = newent->d_name.len + 1;
+ args.in.args[1].value = newent->d_name.name;
+ err = create_new_entry(fc, &args, newdir, newent, inode->i_mode);
/* Contrary to "normal" filesystems it can happen that link
makes two "logical" inodes point to the same "physical"
inode. We invalidate the attributes of the old one, so it
@@ -929,13 +879,9 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
struct fuse_getattr_in inarg;
struct fuse_attr_out outarg;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
u64 attr_version;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
attr_version = fuse_get_attr_version(fc);
memset(&inarg, 0, sizeof(inarg));
@@ -947,20 +893,18 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
inarg.getattr_flags |= FUSE_GETATTR_FH;
inarg.fh = ff->fh;
}
- req->in.h.opcode = FUSE_GETATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
+ args.in.h.opcode = FUSE_GETATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
else
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (!err) {
if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
make_bad_inode(inode);
@@ -1102,7 +1046,7 @@ int fuse_allow_current_process(struct fuse_conn *fc)
static int fuse_access(struct inode *inode, int mask)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_access_in inarg;
int err;
@@ -1111,20 +1055,14 @@ static int fuse_access(struct inode *inode, int mask)
if (fc->no_access)
return 0;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&inarg, 0, sizeof(inarg));
inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
- req->in.h.opcode = FUSE_ACCESS;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_ACCESS;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_access = 1;
err = 0;
@@ -1445,31 +1383,27 @@ static char *read_link(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req = fuse_get_req_nopages(fc);
+ FUSE_ARGS(args);
char *link;
-
- if (IS_ERR(req))
- return ERR_CAST(req);
+ ssize_t ret;
link = (char *) __get_free_page(GFP_KERNEL);
- if (!link) {
- link = ERR_PTR(-ENOMEM);
- goto out;
- }
- req->in.h.opcode = FUSE_READLINK;
- req->in.h.nodeid = get_node_id(inode);
- req->out.argvar = 1;
- req->out.numargs = 1;
- req->out.args[0].size = PAGE_SIZE - 1;
- req->out.args[0].value = link;
- fuse_request_send(fc, req);
- if (req->out.h.error) {
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+
+ args.in.h.opcode = FUSE_READLINK;
+ args.in.h.nodeid = get_node_id(inode);
+ args.out.argvar = 1;
+ args.out.numargs = 1;
+ args.out.args[0].size = PAGE_SIZE - 1;
+ args.out.args[0].value = link;
+ ret = fuse_simple_request(fc, &args);
+ if (ret < 0) {
free_page((unsigned long) link);
- link = ERR_PTR(req->out.h.error);
- } else
- link[req->out.args[0].size] = '\0';
- out:
- fuse_put_request(fc, req);
+ link = ERR_PTR(ret);
+ } else {
+ link[ret] = '\0';
+ }
fuse_invalidate_atime(inode);
return link;
}
@@ -1629,22 +1563,22 @@ void fuse_release_nowrite(struct inode *inode)
spin_unlock(&fc->lock);
}
-static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_req *req,
+static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
struct inode *inode,
struct fuse_setattr_in *inarg_p,
struct fuse_attr_out *outarg_p)
{
- req->in.h.opcode = FUSE_SETATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(*inarg_p);
- req->in.args[0].value = inarg_p;
- req->out.numargs = 1;
+ args->in.h.opcode = FUSE_SETATTR;
+ args->in.h.nodeid = get_node_id(inode);
+ args->in.numargs = 1;
+ args->in.args[0].size = sizeof(*inarg_p);
+ args->in.args[0].value = inarg_p;
+ args->out.numargs = 1;
if (fc->minor < 9)
- req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
else
- req->out.args[0].size = sizeof(*outarg_p);
- req->out.args[0].value = outarg_p;
+ args->out.args[0].size = sizeof(*outarg_p);
+ args->out.args[0].value = outarg_p;
}
/*
@@ -1653,14 +1587,9 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_req *req,
int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_setattr_in inarg;
struct fuse_attr_out outarg;
- int err;
-
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
memset(&inarg, 0, sizeof(inarg));
memset(&outarg, 0, sizeof(outarg));
@@ -1677,12 +1606,9 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
inarg.valid |= FATTR_FH;
inarg.fh = ff->fh;
}
- fuse_setattr_fill(fc, req, inode, &inarg, &outarg);
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
- return err;
+ return fuse_simple_request(fc, &args);
}
/*
@@ -1698,7 +1624,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_setattr_in inarg;
struct fuse_attr_out outarg;
bool is_truncate = false;
@@ -1723,10 +1649,6 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
if (is_truncate) {
fuse_set_nowrite(inode);
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
@@ -1747,10 +1669,8 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
inarg.valid |= FATTR_LOCKOWNER;
inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
}
- fuse_setattr_fill(fc, req, inode, &inarg, &outarg);
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
+ err = fuse_simple_request(fc, &args);
if (err) {
if (err == -EINTR)
fuse_invalidate_attr(inode);
@@ -1837,32 +1757,26 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
{
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_setxattr_in inarg;
int err;
if (fc->no_setxattr)
return -EOPNOTSUPP;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
inarg.flags = flags;
- req->in.h.opcode = FUSE_SETXATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 3;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = strlen(name) + 1;
- req->in.args[1].value = name;
- req->in.args[2].size = size;
- req->in.args[2].value = value;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_SETXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 3;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = strlen(name) + 1;
+ args.in.args[1].value = name;
+ args.in.args[2].size = size;
+ args.in.args[2].value = value;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_setxattr = 1;
err = -EOPNOTSUPP;
@@ -1879,7 +1793,7 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
{
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_getxattr_in inarg;
struct fuse_getxattr_out outarg;
ssize_t ret;
@@ -1887,40 +1801,32 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
if (fc->no_getxattr)
return -EOPNOTSUPP;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
- req->in.h.opcode = FUSE_GETXATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->in.args[1].size = strlen(name) + 1;
- req->in.args[1].value = name;
+ args.in.h.opcode = FUSE_GETXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 2;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.in.args[1].size = strlen(name) + 1;
+ args.in.args[1].value = name;
/* This is really two different operations rolled into one */
- req->out.numargs = 1;
+ args.out.numargs = 1;
if (size) {
- req->out.argvar = 1;
- req->out.args[0].size = size;
- req->out.args[0].value = value;
+ args.out.argvar = 1;
+ args.out.args[0].size = size;
+ args.out.args[0].value = value;
} else {
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
}
- fuse_request_send(fc, req);
- ret = req->out.h.error;
- if (!ret)
- ret = size ? req->out.args[0].size : outarg.size;
- else {
- if (ret == -ENOSYS) {
- fc->no_getxattr = 1;
- ret = -EOPNOTSUPP;
- }
+ ret = fuse_simple_request(fc, &args);
+ if (!ret && !size)
+ ret = outarg.size;
+ if (ret == -ENOSYS) {
+ fc->no_getxattr = 1;
+ ret = -EOPNOTSUPP;
}
- fuse_put_request(fc, req);
return ret;
}
@@ -1928,7 +1834,7 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
{
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_getxattr_in inarg;
struct fuse_getxattr_out outarg;
ssize_t ret;
@@ -1939,38 +1845,30 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
if (fc->no_listxattr)
return -EOPNOTSUPP;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
- req->in.h.opcode = FUSE_LISTXATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
+ args.in.h.opcode = FUSE_LISTXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
/* This is really two different operations rolled into one */
- req->out.numargs = 1;
+ args.out.numargs = 1;
if (size) {
- req->out.argvar = 1;
- req->out.args[0].size = size;
- req->out.args[0].value = list;
+ args.out.argvar = 1;
+ args.out.args[0].size = size;
+ args.out.args[0].value = list;
} else {
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
}
- fuse_request_send(fc, req);
- ret = req->out.h.error;
- if (!ret)
- ret = size ? req->out.args[0].size : outarg.size;
- else {
- if (ret == -ENOSYS) {
- fc->no_listxattr = 1;
- ret = -EOPNOTSUPP;
- }
+ ret = fuse_simple_request(fc, &args);
+ if (!ret && !size)
+ ret = outarg.size;
+ if (ret == -ENOSYS) {
+ fc->no_listxattr = 1;
+ ret = -EOPNOTSUPP;
}
- fuse_put_request(fc, req);
return ret;
}
@@ -1978,24 +1876,18 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
{
struct inode *inode = entry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
int err;
if (fc->no_removexattr)
return -EOPNOTSUPP;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->in.h.opcode = FUSE_REMOVEXATTR;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = strlen(name) + 1;
- req->in.args[0].value = name;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_REMOVEXATTR;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = strlen(name) + 1;
+ args.in.args[0].value = name;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_removexattr = 1;
err = -EOPNOTSUPP;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bf50259012ab..760b2c552197 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -24,30 +24,22 @@ static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
int opcode, struct fuse_open_out *outargp)
{
struct fuse_open_in inarg;
- struct fuse_req *req;
- int err;
-
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ FUSE_ARGS(args);
memset(&inarg, 0, sizeof(inarg));
inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
if (!fc->atomic_o_trunc)
inarg.flags &= ~O_TRUNC;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(*outargp);
- req->out.args[0].value = outargp;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = opcode;
+ args.in.h.nodeid = nodeid;
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(*outargp);
+ args.out.args[0].value = outargp;
- return err;
+ return fuse_simple_request(fc, &args);
}
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
@@ -89,37 +81,9 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
return ff;
}
-static void fuse_release_async(struct work_struct *work)
-{
- struct fuse_req *req;
- struct fuse_conn *fc;
- struct path path;
-
- req = container_of(work, struct fuse_req, misc.release.work);
- path = req->misc.release.path;
- fc = get_fuse_conn(path.dentry->d_inode);
-
- fuse_put_request(fc, req);
- path_put(&path);
-}
-
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
- if (fc->destroy_req) {
- /*
- * If this is a fuseblk mount, then it's possible that
- * releasing the path will result in releasing the
- * super block and sending the DESTROY request. If
- * the server is single threaded, this would hang.
- * For this reason do the path_put() in a separate
- * thread.
- */
- atomic_inc(&req->count);
- INIT_WORK(&req->misc.release.work, fuse_release_async);
- schedule_work(&req->misc.release.work);
- } else {
- path_put(&req->misc.release.path);
- }
+ iput(req->misc.release.inode);
}
static void fuse_file_put(struct fuse_file *ff, bool sync)
@@ -133,12 +97,12 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
* implement 'open'
*/
req->background = 0;
- path_put(&req->misc.release.path);
+ iput(req->misc.release.inode);
fuse_put_request(ff->fc, req);
} else if (sync) {
req->background = 0;
fuse_request_send(ff->fc, req);
- path_put(&req->misc.release.path);
+ iput(req->misc.release.inode);
fuse_put_request(ff->fc, req);
} else {
req->end = fuse_release_end;
@@ -297,9 +261,8 @@ void fuse_release_common(struct file *file, int opcode)
inarg->lock_owner = fuse_lock_owner_id(ff->fc,
(fl_owner_t) file);
}
- /* Hold vfsmount and dentry until release is finished */
- path_get(&file->f_path);
- req->misc.release.path = file->f_path;
+ /* Hold inode until release is finished */
+ req->misc.release.inode = igrab(file_inode(file));
/*
* Normally this will send the RELEASE request, however if
@@ -480,7 +443,7 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
struct inode *inode = file->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_fsync_in inarg;
int err;
@@ -506,23 +469,15 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
goto out;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out;
- }
-
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.fsync_flags = datasync ? 1 : 0;
- req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
if (isdir)
fc->no_fsyncdir = 1;
@@ -2156,49 +2111,44 @@ static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
return 0;
}
-static void fuse_lk_fill(struct fuse_req *req, struct file *file,
+static void fuse_lk_fill(struct fuse_args *args, struct file *file,
const struct file_lock *fl, int opcode, pid_t pid,
- int flock)
+ int flock, struct fuse_lk_in *inarg)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
- struct fuse_lk_in *arg = &req->misc.lk_in;
-
- arg->fh = ff->fh;
- arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
- arg->lk.start = fl->fl_start;
- arg->lk.end = fl->fl_end;
- arg->lk.type = fl->fl_type;
- arg->lk.pid = pid;
+
+ memset(inarg, 0, sizeof(*inarg));
+ inarg->fh = ff->fh;
+ inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
+ inarg->lk.start = fl->fl_start;
+ inarg->lk.end = fl->fl_end;
+ inarg->lk.type = fl->fl_type;
+ inarg->lk.pid = pid;
if (flock)
- arg->lk_flags |= FUSE_LK_FLOCK;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(*arg);
- req->in.args[0].value = arg;
+ inarg->lk_flags |= FUSE_LK_FLOCK;
+ args->in.h.opcode = opcode;
+ args->in.h.nodeid = get_node_id(inode);
+ args->in.numargs = 1;
+ args->in.args[0].size = sizeof(*inarg);
+ args->in.args[0].value = inarg;
}
static int fuse_getlk(struct file *file, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
+ struct fuse_lk_in inarg;
struct fuse_lk_out outarg;
int err;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (!err)
err = convert_fuse_file_lock(&outarg.lk, fl);
@@ -2209,7 +2159,8 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
+ struct fuse_lk_in inarg;
int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
int err;
@@ -2223,17 +2174,13 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
if (fl->fl_flags & FL_CLOSE)
return 0;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ fuse_lk_fill(&args, file, fl, opcode, pid, flock, &inarg);
+ err = fuse_simple_request(fc, &args);
- fuse_lk_fill(req, file, fl, opcode, pid, flock);
- fuse_request_send(fc, req);
- err = req->out.h.error;
/* locking is restartable */
if (err == -EINTR)
err = -ERESTARTSYS;
- fuse_put_request(fc, req);
+
return err;
}
@@ -2283,7 +2230,7 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_bmap_in inarg;
struct fuse_bmap_out outarg;
int err;
@@ -2291,24 +2238,18 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
if (!inode->i_sb->s_bdev || fc->no_bmap)
return 0;
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return 0;
-
memset(&inarg, 0, sizeof(inarg));
inarg.block = block;
inarg.blocksize = inode->i_sb->s_blocksize;
- req->in.h.opcode = FUSE_BMAP;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_BMAP;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS)
fc->no_bmap = 1;
@@ -2776,7 +2717,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
struct fuse_conn *fc = ff->fc;
struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
struct fuse_poll_out outarg;
- struct fuse_req *req;
+ FUSE_ARGS(args);
int err;
if (fc->no_poll)
@@ -2794,21 +2735,15 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
fuse_register_polled_file(fc, ff);
}
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return POLLERR;
-
- req->in.h.opcode = FUSE_POLL;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.in.h.opcode = FUSE_POLL;
+ args.in.h.nodeid = ff->nodeid;
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (!err)
return outarg.revents;
@@ -2949,10 +2884,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
loff_t length)
{
struct fuse_file *ff = file->private_data;
- struct inode *inode = file->f_inode;
+ struct inode *inode = file_inode(file);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = ff->fc;
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_fallocate_in inarg = {
.fh = ff->fh,
.offset = offset,
@@ -2985,25 +2920,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out;
- }
-
- req->in.h.opcode = FUSE_FALLOCATE;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
+ args.in.h.opcode = FUSE_FALLOCATE;
+ args.in.h.nodeid = ff->nodeid;
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_fallocate = 1;
err = -EOPNOTSUPP;
}
- fuse_put_request(fc, req);
-
if (err)
goto out;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e8e47a6ab518..e0fc6725d1d0 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -213,7 +213,7 @@ struct fuse_out {
unsigned numargs;
/** Array of arguments */
- struct fuse_arg args[3];
+ struct fuse_arg args[2];
};
/** FUSE page descriptor */
@@ -222,6 +222,25 @@ struct fuse_page_desc {
unsigned int offset;
};
+struct fuse_args {
+ struct {
+ struct {
+ uint32_t opcode;
+ uint64_t nodeid;
+ } h;
+ unsigned numargs;
+ struct fuse_in_arg args[3];
+
+ } in;
+ struct {
+ unsigned argvar:1;
+ unsigned numargs;
+ struct fuse_arg args[2];
+ } out;
+};
+
+#define FUSE_ARGS(args) struct fuse_args args = {}
+
/** The request state */
enum fuse_req_state {
FUSE_REQ_INIT = 0,
@@ -305,11 +324,8 @@ struct fuse_req {
/** Data for asynchronous requests */
union {
struct {
- union {
- struct fuse_release_in in;
- struct work_struct work;
- };
- struct path path;
+ struct fuse_release_in in;
+ struct inode *inode;
} release;
struct fuse_init_in init_in;
struct fuse_init_out init_out;
@@ -324,7 +340,6 @@ struct fuse_req {
struct fuse_req *next;
} write;
struct fuse_notify_retrieve_in retrieve_in;
- struct fuse_lk_in lk_in;
} misc;
/** page vector */
@@ -754,15 +769,6 @@ struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
void __fuse_get_request(struct fuse_req *req);
/**
- * Get a request, may fail with -ENOMEM,
- * useful for callers who doesn't use req->pages[]
- */
-static inline struct fuse_req *fuse_get_req_nopages(struct fuse_conn *fc)
-{
- return fuse_get_req(fc, 0);
-}
-
-/**
* Gets a requests for a file operation, always succeeds
*/
struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
@@ -780,6 +786,11 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req);
/**
+ * Simple request sending that does request allocation and freeing
+ */
+ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args);
+
+/**
* Send a request in the background
*/
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
@@ -804,8 +815,6 @@ void fuse_invalidate_atime(struct inode *inode);
*/
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
-void fuse_conn_kill(struct fuse_conn *fc);
-
/**
* Initialize fuse_conn
*/
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 03246cd9d47a..6749109f255d 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -376,28 +376,13 @@ static void fuse_bdi_destroy(struct fuse_conn *fc)
bdi_destroy(&fc->bdi);
}
-void fuse_conn_kill(struct fuse_conn *fc)
-{
- spin_lock(&fc->lock);
- fc->connected = 0;
- fc->blocked = 0;
- fc->initialized = 1;
- spin_unlock(&fc->lock);
- /* Flush all readers on this fs */
- kill_fasync(&fc->fasync, SIGIO, POLL_IN);
- wake_up_all(&fc->waitq);
- wake_up_all(&fc->blocked_waitq);
- wake_up_all(&fc->reserved_req_waitq);
-}
-EXPORT_SYMBOL_GPL(fuse_conn_kill);
-
static void fuse_put_super(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
fuse_send_destroy(fc);
- fuse_conn_kill(fc);
+ fuse_abort_conn(fc);
mutex_lock(&fuse_mutex);
list_del(&fc->entry);
fuse_ctl_remove_conn(fc);
@@ -425,7 +410,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct fuse_conn *fc = get_fuse_conn_super(sb);
- struct fuse_req *req;
+ FUSE_ARGS(args);
struct fuse_statfs_out outarg;
int err;
@@ -434,23 +419,17 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
memset(&outarg, 0, sizeof(outarg));
- req->in.numargs = 0;
- req->in.h.opcode = FUSE_STATFS;
- req->in.h.nodeid = get_node_id(dentry->d_inode);
- req->out.numargs = 1;
- req->out.args[0].size =
+ args.in.numargs = 0;
+ args.in.h.opcode = FUSE_STATFS;
+ args.in.h.nodeid = get_node_id(dentry->d_inode);
+ args.out.numargs = 1;
+ args.out.args[0].size =
fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
- req->out.args[0].value = &outarg;
- fuse_request_send(fc, req);
- err = req->out.h.error;
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
if (!err)
convert_fuse_statfs(buf, &outarg.st);
- fuse_put_request(fc, req);
return err;
}
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 32602c667b4a..7892e6fddb66 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -38,21 +38,30 @@ int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
return hfsplus_strcmp(&k1->cat.name, &k2->cat.name);
}
-void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
- u32 parent, struct qstr *str)
+/* Generates key for catalog file/folders record. */
+int hfsplus_cat_build_key(struct super_block *sb,
+ hfsplus_btree_key *key, u32 parent, struct qstr *str)
{
- int len;
+ int len, err;
key->cat.parent = cpu_to_be32(parent);
- if (str) {
- hfsplus_asc2uni(sb, &key->cat.name, HFSPLUS_MAX_STRLEN,
- str->name, str->len);
- len = be16_to_cpu(key->cat.name.length);
- } else {
- key->cat.name.length = 0;
- len = 0;
- }
+ err = hfsplus_asc2uni(sb, &key->cat.name, HFSPLUS_MAX_STRLEN,
+ str->name, str->len);
+ if (unlikely(err < 0))
+ return err;
+
+ len = be16_to_cpu(key->cat.name.length);
key->key_len = cpu_to_be16(6 + 2 * len);
+ return 0;
+}
+
+/* Generates key for catalog thread record. */
+void hfsplus_cat_build_key_with_cnid(struct super_block *sb,
+ hfsplus_btree_key *key, u32 parent)
+{
+ key->cat.parent = cpu_to_be32(parent);
+ key->cat.name.length = 0;
+ key->key_len = cpu_to_be16(6);
}
static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent,
@@ -167,11 +176,16 @@ static int hfsplus_fill_cat_thread(struct super_block *sb,
hfsplus_cat_entry *entry, int type,
u32 parentid, struct qstr *str)
{
+ int err;
+
entry->type = cpu_to_be16(type);
entry->thread.reserved = 0;
entry->thread.parentID = cpu_to_be32(parentid);
- hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
+ err = hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
str->name, str->len);
+ if (unlikely(err < 0))
+ return err;
+
return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
}
@@ -183,7 +197,7 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
int err;
u16 type;
- hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd->search_key, cnid);
err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
if (err)
return err;
@@ -250,11 +264,16 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
if (err)
return err;
- hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
S_ISDIR(inode->i_mode) ?
HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
dir->i_ino, str);
+ if (unlikely(entry_size < 0)) {
+ err = entry_size;
+ goto err2;
+ }
+
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
@@ -265,7 +284,10 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
if (err)
goto err2;
- hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+ err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+ if (unlikely(err))
+ goto err1;
+
entry_size = hfsplus_cat_build_record(&entry, cnid, inode);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
@@ -288,7 +310,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
return 0;
err1:
- hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
if (!hfs_brec_find(&fd, hfs_find_rec_by_key))
hfs_brec_remove(&fd);
err2:
@@ -313,7 +335,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
if (!str) {
int len;
- hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -329,7 +351,9 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
off + 2, len);
fd.search_key->key_len = cpu_to_be16(6 + len);
} else
- hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+ err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
+ if (unlikely(err))
+ goto out;
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
@@ -360,7 +384,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
if (err)
goto out;
- hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -405,7 +429,11 @@ int hfsplus_rename_cat(u32 cnid,
dst_fd = src_fd;
/* find the old dir entry and read the data */
- hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
+ err = hfsplus_cat_build_key(sb, src_fd.search_key,
+ src_dir->i_ino, src_name);
+ if (unlikely(err))
+ goto out;
+
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -419,7 +447,11 @@ int hfsplus_rename_cat(u32 cnid,
type = be16_to_cpu(entry.type);
/* create new dir entry with the data from the old entry */
- hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
+ err = hfsplus_cat_build_key(sb, dst_fd.search_key,
+ dst_dir->i_ino, dst_name);
+ if (unlikely(err))
+ goto out;
+
err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
@@ -436,7 +468,11 @@ int hfsplus_rename_cat(u32 cnid,
dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
/* finally remove the old entry */
- hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
+ err = hfsplus_cat_build_key(sb, src_fd.search_key,
+ src_dir->i_ino, src_name);
+ if (unlikely(err))
+ goto out;
+
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -449,7 +485,7 @@ int hfsplus_rename_cat(u32 cnid,
src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
/* remove old thread entry */
- hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, src_fd.search_key, cnid);
err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
if (err)
goto out;
@@ -459,9 +495,14 @@ int hfsplus_rename_cat(u32 cnid,
goto out;
/* create new thread entry */
- hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, dst_fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry, type,
dst_dir->i_ino, dst_name);
+ if (unlikely(entry_size < 0)) {
+ err = entry_size;
+ goto out;
+ }
+
err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
if (err != -ENOENT) {
if (!err)
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 610a3260bef1..435bea231cc6 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -44,7 +44,10 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (err)
return ERR_PTR(err);
- hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
+ err = hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino,
+ &dentry->d_name);
+ if (unlikely(err < 0))
+ goto fail;
again:
err = hfs_brec_read(&fd, &entry, sizeof(entry));
if (err) {
@@ -97,9 +100,11 @@ again:
be32_to_cpu(entry.file.permissions.dev);
str.len = sprintf(name, "iNode%d", linkid);
str.name = name;
- hfsplus_cat_build_key(sb, fd.search_key,
+ err = hfsplus_cat_build_key(sb, fd.search_key,
HFSPLUS_SB(sb)->hidden_dir->i_ino,
&str);
+ if (unlikely(err < 0))
+ goto fail;
goto again;
}
} else if (!dentry->d_fsdata)
@@ -145,7 +150,7 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
err = -ENOMEM;
goto out;
}
- hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
+ hfsplus_cat_build_key_with_cnid(sb, fd.search_key, inode->i_ino);
err = hfs_brec_find(&fd, hfs_find_rec_by_key);
if (err)
goto out;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index eb5e059f481a..b0441d65fa54 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -443,8 +443,10 @@ int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2);
int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2);
-void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
+int hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
u32 parent, struct qstr *str);
+void hfsplus_cat_build_key_with_cnid(struct super_block *sb,
+ hfsplus_btree_key *key, u32 parent);
void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms);
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 4cf2024b87da..593af2fdcc2d 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -515,7 +515,9 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
err = hfs_find_init(sbi->cat_tree, &fd);
if (err)
goto out_put_root;
- hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+ err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+ if (unlikely(err < 0))
+ goto out_put_root;
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1e2872b25343..5eba47f593f8 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -412,10 +412,10 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
pgoff = offset >> PAGE_SHIFT;
i_size_write(inode, offset);
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap))
hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
truncate_hugepages(inode, offset);
return 0;
}
@@ -472,12 +472,12 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
}
/*
- * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
+ * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
* be taken from reclaim -- unlike regular filesystems. This needs an
* annotation because huge_pmd_share() does an allocation under
- * i_mmap_mutex.
+ * i_mmap_rwsem.
*/
-static struct lock_class_key hugetlbfs_i_mmap_mutex_key;
+static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct inode *dir,
@@ -495,8 +495,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
- lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
- &hugetlbfs_i_mmap_mutex_key);
+ lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
+ &hugetlbfs_i_mmap_rwsem_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
diff --git a/fs/inode.c b/fs/inode.c
index 2ed95f7caa4f..aa149e7262ac 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -114,6 +114,11 @@ int proc_nr_inodes(struct ctl_table *table, int write,
}
#endif
+static int no_open(struct inode *inode, struct file *file)
+{
+ return -ENXIO;
+}
+
/**
* inode_init_always - perform inode structure intialisation
* @sb: superblock inode belongs to
@@ -125,7 +130,7 @@ int proc_nr_inodes(struct ctl_table *table, int write,
int inode_init_always(struct super_block *sb, struct inode *inode)
{
static const struct inode_operations empty_iops;
- static const struct file_operations empty_fops;
+ static const struct file_operations no_open_fops = {.open = no_open};
struct address_space *const mapping = &inode->i_data;
inode->i_sb = sb;
@@ -133,7 +138,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_flags = 0;
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
- inode->i_fop = &empty_fops;
+ inode->i_fop = &no_open_fops;
inode->__i_nlink = 1;
inode->i_opflags = 0;
i_uid_write(inode, 0);
@@ -346,7 +351,7 @@ void address_space_init_once(struct address_space *mapping)
memset(mapping, 0, sizeof(*mapping));
INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
spin_lock_init(&mapping->tree_lock);
- mutex_init(&mapping->i_mmap_mutex);
+ init_rwsem(&mapping->i_mmap_rwsem);
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
mapping->i_mmap = RB_ROOT;
@@ -1798,7 +1803,7 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
} else if (S_ISFIFO(mode))
inode->i_fop = &pipefifo_fops;
else if (S_ISSOCK(mode))
- inode->i_fop = &bad_sock_fops;
+ ; /* leave it no_open_fops */
else
printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
" inode %s:%lu\n", mode, inode->i_sb->s_id,
diff --git a/fs/internal.h b/fs/internal.h
index 757ba2abf21e..e9a61fe67575 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -147,3 +147,8 @@ extern const struct file_operations pipefifo_fops;
*/
extern void sb_pin_kill(struct super_block *sb);
extern void mnt_pin_kill(struct mount *m);
+
+/*
+ * fs/nsfs.c
+ */
+extern struct dentry_operations ns_dentry_operations;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 77c9a7812542..214c3c11fbc2 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -443,7 +443,7 @@ int ioctl_preallocate(struct file *filp, void __user *argp)
return -EINVAL;
}
- return do_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
+ return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
}
static int file_ioctl(struct file *filp, unsigned int cmd,
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index f488bbae541a..bb63254ed848 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -30,6 +30,7 @@ struct rock_state {
int cont_size;
int cont_extent;
int cont_offset;
+ int cont_loops;
struct inode *inode;
};
@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
rs->inode = inode;
}
+/* Maximum number of Rock Ridge continuation entries */
+#define RR_MAX_CE_ENTRIES 32
+
/*
* Returns 0 if the caller should continue scanning, 1 if the scan must end
* and -ve on error.
@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
goto out;
}
ret = -EIO;
+ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
+ goto out;
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
if (bh) {
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 1df94fabe4eb..b96bd8076b70 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1714,8 +1714,7 @@ int jbd2_journal_destroy(journal_t *journal)
if (journal->j_proc_entry)
jbd2_stats_proc_exit(journal);
- if (journal->j_inode)
- iput(journal->j_inode);
+ iput(journal->j_inode);
if (journal->j_revoke)
jbd2_journal_destroy_revoke(journal);
if (journal->j_chksum_driver)
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 386303dca382..dddbde4f56f4 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -224,7 +224,7 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
- /* If a node has zero dsize, we only have to keep if it if it might be the
+ /* If a node has zero dsize, we only have to keep it if it might be the
node with highest version -- i.e. the one which will end up as f->metadata.
Note that such nodes won't be REF_UNCHECKED since there are no data to
check anyway. */
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index c522d098bb4f..bc5385471a6e 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -844,6 +844,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
/* Write out summary information - called from jffs2_do_reserve_space */
int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
+ __must_hold(&c->erase_completion_block)
{
int datasize, infosize, padsize;
struct jffs2_eraseblock *jeb;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 4429d6d9217f..697390ea47b8 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -106,7 +106,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
const struct kernfs_ops *ops;
/*
- * @of->mutex nests outside active ref and is just to ensure that
+ * @of->mutex nests outside active ref and is primarily to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
@@ -189,13 +189,16 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
const struct kernfs_ops *ops;
char *buf;
- buf = kmalloc(len, GFP_KERNEL);
+ buf = of->prealloc_buf;
+ if (!buf)
+ buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/*
- * @of->mutex nests outside active ref and is just to ensure that
- * the ops aren't called concurrently for the same open file.
+ * @of->mutex nests outside active ref and is used both to ensure that
+ * the ops aren't called concurrently for the same open file, and
+ * to provide exclusive access to ->prealloc_buf (when that exists).
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn)) {
@@ -210,21 +213,22 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
else
len = -EINVAL;
- kernfs_put_active(of->kn);
- mutex_unlock(&of->mutex);
-
if (len < 0)
- goto out_free;
+ goto out_unlock;
if (copy_to_user(user_buf, buf, len)) {
len = -EFAULT;
- goto out_free;
+ goto out_unlock;
}
*ppos += len;
+ out_unlock:
+ kernfs_put_active(of->kn);
+ mutex_unlock(&of->mutex);
out_free:
- kfree(buf);
+ if (buf != of->prealloc_buf)
+ kfree(buf);
return len;
}
@@ -278,19 +282,16 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
len = min_t(size_t, count, PAGE_SIZE);
}
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = of->prealloc_buf;
+ if (!buf)
+ buf = kmalloc(len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- if (copy_from_user(buf, user_buf, len)) {
- len = -EFAULT;
- goto out_free;
- }
- buf[len] = '\0'; /* guarantee string termination */
-
/*
- * @of->mutex nests outside active ref and is just to ensure that
- * the ops aren't called concurrently for the same open file.
+ * @of->mutex nests outside active ref and is used both to ensure that
+ * the ops aren't called concurrently for the same open file, and
+ * to provide exclusive access to ->prealloc_buf (when that exists).
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn)) {
@@ -299,19 +300,27 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
goto out_free;
}
+ if (copy_from_user(buf, user_buf, len)) {
+ len = -EFAULT;
+ goto out_unlock;
+ }
+ buf[len] = '\0'; /* guarantee string termination */
+
ops = kernfs_ops(of->kn);
if (ops->write)
len = ops->write(of, buf, len, *ppos);
else
len = -EINVAL;
- kernfs_put_active(of->kn);
- mutex_unlock(&of->mutex);
-
if (len > 0)
*ppos += len;
+
+out_unlock:
+ kernfs_put_active(of->kn);
+ mutex_unlock(&of->mutex);
out_free:
- kfree(buf);
+ if (buf != of->prealloc_buf)
+ kfree(buf);
return len;
}
@@ -685,6 +694,22 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
*/
of->atomic_write_len = ops->atomic_write_len;
+ error = -EINVAL;
+ /*
+ * ->seq_show is incompatible with ->prealloc,
+ * as seq_read does its own allocation.
+ * ->read must be used instead.
+ */
+ if (ops->prealloc && ops->seq_show)
+ goto err_free;
+ if (ops->prealloc) {
+ int len = of->atomic_write_len ?: PAGE_SIZE;
+ of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
+ error = -ENOMEM;
+ if (!of->prealloc_buf)
+ goto err_free;
+ }
+
/*
* Always instantiate seq_file even if read access doesn't use
* seq_file or is not requested. This unifies private data access
@@ -715,6 +740,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
err_close:
seq_release(inode, file);
err_free:
+ kfree(of->prealloc_buf);
kfree(of);
err_out:
kernfs_put_active(kn);
@@ -728,6 +754,7 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
kernfs_put_open_node(kn, of);
seq_release(inode, filp);
+ kfree(of->prealloc_buf);
kfree(of);
return 0;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 9106f42c472c..1cc6ec51e6b1 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -214,7 +214,7 @@ int nsm_monitor(const struct nlm_host *host)
if (unlikely(res.status != 0))
status = -EIO;
if (unlikely(status < 0)) {
- printk(KERN_NOTICE "lockd: cannot monitor %s\n", nsm->sm_name);
+ pr_notice_ratelimited("lockd: cannot monitor %s\n", nsm->sm_name);
return status;
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index d1bb7ecfd201..e94c887da2d7 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -350,7 +350,7 @@ static struct svc_serv *lockd_create_svc(void)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
- serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
+ serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
return ERR_PTR(-ENOMEM);
diff --git a/fs/mount.h b/fs/mount.h
index f82c62840905..0ad6f760ce52 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -1,10 +1,11 @@
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
+#include <linux/ns_common.h>
struct mnt_namespace {
atomic_t count;
- unsigned int proc_inum;
+ struct ns_common ns;
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
diff --git a/fs/namei.c b/fs/namei.c
index db5fe86319e6..bc35b02883bb 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -130,7 +130,7 @@ void final_putname(struct filename *name)
#define EMBEDDED_NAME_MAX (PATH_MAX - sizeof(struct filename))
-static struct filename *
+struct filename *
getname_flags(const char __user *filename, int flags, int *empty)
{
struct filename *result, *err;
@@ -487,6 +487,19 @@ void path_put(const struct path *path)
}
EXPORT_SYMBOL(path_put);
+struct nameidata {
+ struct path path;
+ struct qstr last;
+ struct path root;
+ struct inode *inode; /* path.dentry.d_inode */
+ unsigned int flags;
+ unsigned seq, m_seq;
+ int last_type;
+ unsigned depth;
+ struct file *base;
+ char *saved_names[MAX_NESTED_LINKS + 1];
+};
+
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
@@ -695,6 +708,18 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
nd->flags |= LOOKUP_JUMPED;
}
+void nd_set_link(struct nameidata *nd, char *path)
+{
+ nd->saved_names[nd->depth] = path;
+}
+EXPORT_SYMBOL(nd_set_link);
+
+char *nd_get_link(struct nameidata *nd)
+{
+ return nd->saved_names[nd->depth];
+}
+EXPORT_SYMBOL(nd_get_link);
+
static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
{
struct inode *inode = link->dentry->d_inode;
@@ -1821,13 +1846,14 @@ static int link_path_walk(const char *name, struct nameidata *nd)
}
static int path_init(int dfd, const char *name, unsigned int flags,
- struct nameidata *nd, struct file **fp)
+ struct nameidata *nd)
{
int retval = 0;
nd->last_type = LAST_ROOT; /* if there are only slashes... */
- nd->flags = flags | LOOKUP_JUMPED;
+ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
+ nd->base = NULL;
if (flags & LOOKUP_ROOT) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
@@ -1847,7 +1873,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
} else {
path_get(&nd->path);
}
- return 0;
+ goto done;
}
nd->root.mnt = NULL;
@@ -1897,7 +1923,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
if (f.flags & FDPUT_FPUT)
- *fp = f.file;
+ nd->base = f.file;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
rcu_read_lock();
} else {
@@ -1908,13 +1934,26 @@ static int path_init(int dfd, const char *name, unsigned int flags,
nd->inode = nd->path.dentry->d_inode;
if (!(flags & LOOKUP_RCU))
- return 0;
+ goto done;
if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
- return 0;
+ goto done;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
return -ECHILD;
+done:
+ current->total_link_count = 0;
+ return link_path_walk(name, nd);
+}
+
+static void path_cleanup(struct nameidata *nd)
+{
+ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+ path_put(&nd->root);
+ nd->root.mnt = NULL;
+ }
+ if (unlikely(nd->base))
+ fput(nd->base);
}
static inline int lookup_last(struct nameidata *nd, struct path *path)
@@ -1930,7 +1969,6 @@ static inline int lookup_last(struct nameidata *nd, struct path *path)
static int path_lookupat(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
- struct file *base = NULL;
struct path path;
int err;
@@ -1948,14 +1986,7 @@ static int path_lookupat(int dfd, const char *name,
* be handled by restarting a traditional ref-walk (which will always
* be able to complete).
*/
- err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
-
- if (unlikely(err))
- goto out;
-
- current->total_link_count = 0;
- err = link_path_walk(name, nd);
-
+ err = path_init(dfd, name, flags, nd);
if (!err && !(flags & LOOKUP_PARENT)) {
err = lookup_last(nd, &path);
while (err > 0) {
@@ -1983,14 +2014,7 @@ static int path_lookupat(int dfd, const char *name,
}
}
-out:
- if (base)
- fput(base);
-
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
- path_put(&nd->root);
- nd->root.mnt = NULL;
- }
+ path_cleanup(nd);
return err;
}
@@ -2297,19 +2321,13 @@ out:
static int
path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
{
- struct file *base = NULL;
struct nameidata nd;
int err;
- err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base);
+ err = path_init(dfd, name, flags, &nd);
if (unlikely(err))
goto out;
- current->total_link_count = 0;
- err = link_path_walk(name, &nd);
- if (err)
- goto out;
-
err = mountpoint_last(&nd, path);
while (err > 0) {
void *cookie;
@@ -2325,12 +2343,7 @@ path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags
put_link(&nd, &link, cookie);
}
out:
- if (base)
- fput(base);
-
- if (nd.root.mnt && !(nd.flags & LOOKUP_ROOT))
- path_put(&nd.root);
-
+ path_cleanup(&nd);
return err;
}
@@ -3181,7 +3194,6 @@ out:
static struct file *path_openat(int dfd, struct filename *pathname,
struct nameidata *nd, const struct open_flags *op, int flags)
{
- struct file *base = NULL;
struct file *file;
struct path path;
int opened = 0;
@@ -3198,12 +3210,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
goto out;
}
- error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base);
- if (unlikely(error))
- goto out;
-
- current->total_link_count = 0;
- error = link_path_walk(pathname->name, nd);
+ error = path_init(dfd, pathname->name, flags, nd);
if (unlikely(error))
goto out;
@@ -3229,10 +3236,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
put_link(nd, &link, cookie);
}
out:
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
- path_put(&nd->root);
- if (base)
- fput(base);
+ path_cleanup(nd);
if (!(opened & FILE_OPENED)) {
BUG_ON(!error);
put_filp(file);
diff --git a/fs/namespace.c b/fs/namespace.c
index 5b66b2b3624d..cd1e9681a0cf 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -963,7 +963,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
}
/* Don't allow unprivileged users to reveal what is under a mount */
- if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
+ if ((flag & CL_UNPRIVILEGED) &&
+ (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
mnt->mnt.mnt_flags |= MNT_LOCKED;
atomic_inc(&sb->s_active);
@@ -1369,6 +1370,8 @@ void umount_tree(struct mount *mnt, int how)
}
if (last) {
last->mnt_hash.next = unmounted.first;
+ if (unmounted.first)
+ unmounted.first->pprev = &last->mnt_hash.next;
unmounted.first = tmp_list.first;
unmounted.first->pprev = &unmounted.first;
}
@@ -1544,6 +1547,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
goto dput_and_out;
if (mnt->mnt.mnt_flags & MNT_LOCKED)
goto dput_and_out;
+ retval = -EPERM;
+ if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
+ goto dput_and_out;
retval = do_umount(mnt, flags);
dput_and_out:
@@ -1569,17 +1575,13 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static bool is_mnt_ns_file(struct dentry *dentry)
{
/* Is this a proxy for a mount namespace? */
- struct inode *inode = dentry->d_inode;
- struct proc_ns *ei;
-
- if (!proc_ns_inode(inode))
- return false;
-
- ei = get_proc_ns(inode);
- if (ei->ns_ops != &mntns_operations)
- return false;
+ return dentry->d_op == &ns_dentry_operations &&
+ dentry->d_fsdata == &mntns_operations;
+}
- return true;
+struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct mnt_namespace, ns);
}
static bool mnt_ns_loop(struct dentry *dentry)
@@ -1591,7 +1593,7 @@ static bool mnt_ns_loop(struct dentry *dentry)
if (!is_mnt_ns_file(dentry))
return false;
- mnt_ns = get_proc_ns(dentry->d_inode)->ns;
+ mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
}
@@ -1610,7 +1612,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
if (IS_ERR(q))
return q;
- q->mnt.mnt_flags &= ~MNT_LOCKED;
q->mnt_mountpoint = mnt->mnt_mountpoint;
p = mnt;
@@ -2020,7 +2021,10 @@ static int do_loopback(struct path *path, const char *old_name,
if (IS_MNT_UNBINDABLE(old))
goto out2;
- if (!check_mnt(parent) || !check_mnt(old))
+ if (!check_mnt(parent))
+ goto out2;
+
+ if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations)
goto out2;
if (!recurse && has_locked_children(old, old_path.dentry))
@@ -2098,7 +2102,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
}
if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
!(mnt_flags & MNT_NODEV)) {
- return -EPERM;
+ /* Was the nodev implicitly added in mount? */
+ if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
+ !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
+ mnt_flags |= MNT_NODEV;
+ } else {
+ return -EPERM;
+ }
}
if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
!(mnt_flags & MNT_NOSUID)) {
@@ -2640,7 +2650,7 @@ dput_out:
static void free_mnt_ns(struct mnt_namespace *ns)
{
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
put_user_ns(ns->user_ns);
kfree(ns);
}
@@ -2662,11 +2672,12 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
if (!new_ns)
return ERR_PTR(-ENOMEM);
- ret = proc_alloc_inum(&new_ns->proc_inum);
+ ret = ns_alloc_inum(&new_ns->ns);
if (ret) {
kfree(new_ns);
return ERR_PTR(ret);
}
+ new_ns->ns.ops = &mntns_operations;
new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
@@ -2958,6 +2969,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* mount new_root on / */
attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
touch_mnt_namespace(current->nsproxy->mnt_ns);
+ /* A moved mount should not expire automatically */
+ list_del_init(&new_mnt->mnt_expire);
unlock_mount_hash();
chroot_fs_refs(&root, &new);
put_mountpoint(root_mp);
@@ -3002,6 +3015,7 @@ static void __init init_mount_tree(void)
root.mnt = mnt;
root.dentry = mnt->mnt_root;
+ mnt->mnt_flags |= MNT_LOCKED;
set_fs_pwd(current->fs, &root);
set_fs_root(current->fs, &root);
@@ -3144,31 +3158,31 @@ found:
return visible;
}
-static void *mntns_get(struct task_struct *task)
+static struct ns_common *mntns_get(struct task_struct *task)
{
- struct mnt_namespace *ns = NULL;
+ struct ns_common *ns = NULL;
struct nsproxy *nsproxy;
task_lock(task);
nsproxy = task->nsproxy;
if (nsproxy) {
- ns = nsproxy->mnt_ns;
- get_mnt_ns(ns);
+ ns = &nsproxy->mnt_ns->ns;
+ get_mnt_ns(to_mnt_ns(ns));
}
task_unlock(task);
return ns;
}
-static void mntns_put(void *ns)
+static void mntns_put(struct ns_common *ns)
{
- put_mnt_ns(ns);
+ put_mnt_ns(to_mnt_ns(ns));
}
-static int mntns_install(struct nsproxy *nsproxy, void *ns)
+static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
{
struct fs_struct *fs = current->fs;
- struct mnt_namespace *mnt_ns = ns;
+ struct mnt_namespace *mnt_ns = to_mnt_ns(ns);
struct path root;
if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
@@ -3198,17 +3212,10 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
-static unsigned int mntns_inum(void *ns)
-{
- struct mnt_namespace *mnt_ns = ns;
- return mnt_ns->proc_inum;
-}
-
const struct proc_ns_operations mntns_operations = {
.name = "mnt",
.type = CLONE_NEWNS,
.get = mntns_get,
.put = mntns_put,
.install = mntns_install,
- .inum = mntns_inum,
};
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0beb023f25ac..ac71d13c69ef 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -33,6 +33,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/file.h>
+#include <linux/falloc.h>
#include <linux/slab.h>
#include "idmap.h"
@@ -772,7 +773,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* the client wants us to do more in this compound:
*/
if (!nfsd4_last_compound_op(rqstp))
- rqstp->rq_splice_ok = false;
+ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* check stateid */
if ((status = nfs4_preprocess_stateid_op(SVC_NET(rqstp),
@@ -1014,6 +1015,44 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
static __be32
+nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct nfsd4_fallocate *fallocate, int flags)
+{
+ __be32 status = nfserr_notsupp;
+ struct file *file;
+
+ status = nfs4_preprocess_stateid_op(SVC_NET(rqstp), cstate,
+ &fallocate->falloc_stateid,
+ WR_STATE, &file);
+ if (status != nfs_ok) {
+ dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
+ return status;
+ }
+
+ status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
+ fallocate->falloc_offset,
+ fallocate->falloc_length,
+ flags);
+ fput(file);
+ return status;
+}
+
+static __be32
+nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct nfsd4_fallocate *fallocate)
+{
+ return nfsd4_fallocate(rqstp, cstate, fallocate, 0);
+}
+
+static __be32
+nfsd4_deallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct nfsd4_fallocate *fallocate)
+{
+ return nfsd4_fallocate(rqstp, cstate, fallocate,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
+}
+
+static __be32
nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_seek *seek)
{
@@ -1331,7 +1370,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
* Don't use the deferral mechanism for NFSv4; compounds make it
* too hard to avoid non-idempotency problems.
*/
- rqstp->rq_usedeferral = false;
+ clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
/*
* According to RFC3010, this takes precedence over all other errors.
@@ -1447,7 +1486,7 @@ encode_op:
BUG_ON(cstate->replay_owner);
out:
/* Reset deferral mechanism for RPC deferrals */
- rqstp->rq_usedeferral = true;
+ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
dprintk("nfsv4 compound returned %d\n", ntohl(status));
return status;
}
@@ -1929,6 +1968,18 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
/* NFSv4.2 operations */
+ [OP_ALLOCATE] = {
+ .op_func = (nfsd4op_func)nfsd4_allocate,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+ .op_name = "OP_ALLOCATE",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
+ },
+ [OP_DEALLOCATE] = {
+ .op_func = (nfsd4op_func)nfsd4_deallocate,
+ .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+ .op_name = "OP_DEALLOCATE",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
+ },
[OP_SEEK] = {
.op_func = (nfsd4op_func)nfsd4_seek,
.op_name = "OP_SEEK",
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index e9c3afe4b5d3..3550a9c87616 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -41,7 +41,7 @@
#include <linux/ratelimit.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/addr.h>
-#include <linux/hash.h>
+#include <linux/jhash.h>
#include "xdr4.h"
#include "xdr4cb.h"
#include "vfs.h"
@@ -275,9 +275,11 @@ opaque_hashval(const void *ptr, int nbytes)
return x;
}
-static void nfsd4_free_file(struct nfs4_file *f)
+static void nfsd4_free_file_rcu(struct rcu_head *rcu)
{
- kmem_cache_free(file_slab, f);
+ struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
+
+ kmem_cache_free(file_slab, fp);
}
static inline void
@@ -286,9 +288,10 @@ put_nfs4_file(struct nfs4_file *fi)
might_lock(&state_lock);
if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
- hlist_del(&fi->fi_hash);
+ hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
- nfsd4_free_file(fi);
+ WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
+ call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
}
@@ -594,7 +597,7 @@ static int delegation_blocked(struct knfsd_fh *fh)
}
spin_unlock(&blocked_delegations_lock);
}
- hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
+ hash = jhash(&fh->fh_base, fh->fh_size, 0);
if (test_bit(hash&255, bd->set[0]) &&
test_bit((hash>>8)&255, bd->set[0]) &&
test_bit((hash>>16)&255, bd->set[0]))
@@ -613,7 +616,7 @@ static void block_delegations(struct knfsd_fh *fh)
u32 hash;
struct bloom_pair *bd = &blocked_delegations;
- hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
+ hash = jhash(&fh->fh_base, fh->fh_size, 0);
spin_lock(&blocked_delegations_lock);
__set_bit(hash&255, bd->set[bd->new]);
@@ -1440,7 +1443,7 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&clp->cl_lock);
- if (cses->flags & SESSION4_BACK_CHAN) {
+ {
struct sockaddr *sa = svc_addr(rqstp);
/*
* This is a little silly; with sessions there's no real
@@ -1711,15 +1714,14 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source)
return 0;
}
-static long long
+static int
compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
{
- long long res;
-
- res = o1->len - o2->len;
- if (res)
- return res;
- return (long long)memcmp(o1->data, o2->data, o1->len);
+ if (o1->len < o2->len)
+ return -1;
+ if (o1->len > o2->len)
+ return 1;
+ return memcmp(o1->data, o2->data, o1->len);
}
static int same_name(const char *n1, const char *n2)
@@ -1907,7 +1909,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
{
- long long cmp;
+ int cmp;
struct rb_node *node = root->rb_node;
struct nfs4_client *clp;
@@ -3057,10 +3059,9 @@ static struct nfs4_file *nfsd4_alloc_file(void)
}
/* OPEN Share state helper functions */
-static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
+static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
+ struct nfs4_file *fp)
{
- unsigned int hashval = file_hashval(fh);
-
lockdep_assert_held(&state_lock);
atomic_set(&fp->fi_ref, 1);
@@ -3073,7 +3074,7 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
- hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
+ hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
}
void
@@ -3294,17 +3295,14 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
/* search file_hashtbl[] for file */
static struct nfs4_file *
-find_file_locked(struct knfsd_fh *fh)
+find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
{
- unsigned int hashval = file_hashval(fh);
struct nfs4_file *fp;
- lockdep_assert_held(&state_lock);
-
- hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
+ hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
- get_nfs4_file(fp);
- return fp;
+ if (atomic_inc_not_zero(&fp->fi_ref))
+ return fp;
}
}
return NULL;
@@ -3314,10 +3312,11 @@ static struct nfs4_file *
find_file(struct knfsd_fh *fh)
{
struct nfs4_file *fp;
+ unsigned int hashval = file_hashval(fh);
- spin_lock(&state_lock);
- fp = find_file_locked(fh);
- spin_unlock(&state_lock);
+ rcu_read_lock();
+ fp = find_file_locked(fh, hashval);
+ rcu_read_unlock();
return fp;
}
@@ -3325,11 +3324,18 @@ static struct nfs4_file *
find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
{
struct nfs4_file *fp;
+ unsigned int hashval = file_hashval(fh);
+
+ rcu_read_lock();
+ fp = find_file_locked(fh, hashval);
+ rcu_read_unlock();
+ if (fp)
+ return fp;
spin_lock(&state_lock);
- fp = find_file_locked(fh);
- if (fp == NULL) {
- nfsd4_init_file(new, fh);
+ fp = find_file_locked(fh, hashval);
+ if (likely(fp == NULL)) {
+ nfsd4_init_file(fh, hashval, new);
fp = new;
}
spin_unlock(&state_lock);
@@ -4127,7 +4133,7 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
nfs4_put_stateowner(so);
}
if (open->op_file)
- nfsd4_free_file(open->op_file);
+ kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
nfs4_put_stid(&open->op_stp->st_stid);
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b1eed4dd2eab..15f7b73e0c0f 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1514,6 +1514,23 @@ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, str
}
static __be32
+nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
+ struct nfsd4_fallocate *fallocate)
+{
+ DECODE_HEAD;
+
+ status = nfsd4_decode_stateid(argp, &fallocate->falloc_stateid);
+ if (status)
+ return status;
+
+ READ_BUF(16);
+ p = xdr_decode_hyper(p, &fallocate->falloc_offset);
+ xdr_decode_hyper(p, &fallocate->falloc_length);
+
+ DECODE_TAIL;
+}
+
+static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
{
DECODE_HEAD;
@@ -1604,10 +1621,10 @@ static nfsd4_dec nfsd4_dec_ops[] = {
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
/* new operations for NFSv4.2 */
- [OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_COPY] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTSTATS] = (nfsd4_dec)nfsd4_decode_notsupp,
@@ -1714,7 +1731,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
- argp->rqstp->rq_splice_ok = false;
+ clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
DECODE_TAIL;
}
@@ -1795,9 +1812,12 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
}
else
end++;
+ if (found_esc)
+ end = next;
+
str = end;
}
- pathlen = htonl(xdr->buf->len - pathlen_offset);
+ pathlen = htonl(count);
write_bytes_to_xdr_buf(xdr->buf, pathlen_offset, &pathlen, 4);
return 0;
}
@@ -3236,10 +3256,10 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
if (!p) {
- WARN_ON_ONCE(resp->rqstp->rq_splice_ok);
+ WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags));
return nfserr_resource;
}
- if (resp->xdr.buf->page_len && resp->rqstp->rq_splice_ok) {
+ if (resp->xdr.buf->page_len && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) {
WARN_ON_ONCE(1);
return nfserr_resource;
}
@@ -3256,7 +3276,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
goto err_truncate;
}
- if (file->f_op->splice_read && resp->rqstp->rq_splice_ok)
+ if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
err = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
err = nfsd4_encode_readv(resp, read, file, maxcount);
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 122f69185ef5..83a9694ec485 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -490,7 +490,7 @@ found_entry:
/* From the hall of fame of impractical attacks:
* Is this a user who tries to snoop on the cache? */
rtn = RC_DOIT;
- if (!rqstp->rq_secure && rp->c_secure)
+ if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
goto out;
/* Compose RPC reply header */
@@ -579,7 +579,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
spin_lock(&b->cache_lock);
drc_mem_usage += bufsize;
lru_put_end(b, rp);
- rp->c_secure = rqstp->rq_secure;
+ rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
rp->c_type = cachetype;
rp->c_state = RC_DONE;
spin_unlock(&b->cache_lock);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 9506ea565610..19ace74d35f6 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -608,7 +608,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
num);
sep = " ";
- if (len > remaining)
+ if (len >= remaining)
break;
remaining -= len;
buf += len;
@@ -623,7 +623,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
'+' : '-',
minor);
- if (len > remaining)
+ if (len >= remaining)
break;
remaining -= len;
buf += len;
@@ -631,7 +631,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
}
len = snprintf(buf, remaining, "\n");
- if (len > remaining)
+ if (len >= remaining)
return -EINVAL;
return tlen + len;
}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 88026fc6a981..965b478d50fc 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -86,7 +86,7 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
int flags = nfsexp_flags(rqstp, exp);
/* Check if the request originated from a secure port. */
- if (!rqstp->rq_secure && !(flags & NFSEXP_INSECURE_PORT)) {
+ if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && !(flags & NFSEXP_INSECURE_PORT)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("nfsd: request from insecure port %s!\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 752d56bbe0ba..314f5c8f8f1a 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -692,7 +692,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
/* Now call the procedure handler, and encode NFS status. */
nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
nfserr = map_new_errors(rqstp->rq_vers, nfserr);
- if (nfserr == nfserr_dropit || rqstp->rq_dropme) {
+ if (nfserr == nfserr_dropit || test_bit(RQ_DROPME, &rqstp->rq_flags)) {
dprintk("nfsd: Dropping request; may be revisited later\n");
nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
return 0;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 2712042a66b1..9d3be371240a 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -463,17 +463,24 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
/*
* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
*
- * These objects are global. nfsd only keeps one instance of a nfs4_file per
- * inode (though it may keep multiple file descriptors open per inode). These
- * are tracked in the file_hashtbl which is protected by the state_lock
- * spinlock.
+ * These objects are global. nfsd keeps one instance of a nfs4_file per
+ * filehandle (though it may keep multiple file descriptors for each). Each
+ * inode can have multiple filehandles associated with it, so there is
+ * (potentially) a many to one relationship between this struct and struct
+ * inode.
+ *
+ * These are hashed by filehandle in the file_hashtbl, which is protected by
+ * the global state_lock spinlock.
*/
struct nfs4_file {
atomic_t fi_ref;
spinlock_t fi_lock;
- struct hlist_node fi_hash; /* hash by "struct inode *" */
+ struct hlist_node fi_hash; /* hash on fi_fhandle */
struct list_head fi_stateids;
- struct list_head fi_delegations;
+ union {
+ struct list_head fi_delegations;
+ struct rcu_head fi_rcu;
+ };
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 0a82e3c033ee..5685c679dd93 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/splice.h>
+#include <linux/falloc.h>
#include <linux/fcntl.h>
#include <linux/namei.h>
#include <linux/delay.h>
@@ -533,6 +534,26 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
#endif
+__be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct file *file, loff_t offset, loff_t len,
+ int flags)
+{
+ __be32 err;
+ int error;
+
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return nfserr_inval;
+
+ err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, NFSD_MAY_WRITE);
+ if (err)
+ return err;
+
+ error = vfs_fallocate(file, flags, offset, len);
+ if (!error)
+ error = commit_metadata(fhp);
+
+ return nfserrno(error);
+}
#endif /* defined(CONFIG_NFSD_V4) */
#ifdef CONFIG_NFSD_V3
@@ -881,7 +902,7 @@ static __be32
nfsd_vfs_read(struct svc_rqst *rqstp, struct file *file,
loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
{
- if (file->f_op->splice_read && rqstp->rq_splice_ok)
+ if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &rqstp->rq_flags))
return nfsd_splice_read(rqstp, file, offset, count);
else
return nfsd_readv(file, offset, vec, vlen, count);
@@ -937,9 +958,10 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
int stable = *stablep;
int use_wgather;
loff_t pos = offset;
+ loff_t end = LLONG_MAX;
unsigned int pflags = current->flags;
- if (rqstp->rq_local)
+ if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
/*
* We want less throttling in balance_dirty_pages()
* and shrink_inactive_list() so that nfs to
@@ -967,10 +989,13 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
fsnotify_modify(file);
if (stable) {
- if (use_wgather)
+ if (use_wgather) {
host_err = wait_for_concurrent_writes(file);
- else
- host_err = vfs_fsync_range(file, offset, offset+*cnt, 0);
+ } else {
+ if (*cnt)
+ end = offset + *cnt - 1;
+ host_err = vfs_fsync_range(file, offset, end, 0);
+ }
}
out_nfserr:
@@ -979,7 +1004,7 @@ out_nfserr:
err = 0;
else
err = nfserrno(host_err);
- if (rqstp->rq_local)
+ if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
tsk_restore_flags(current, pflags, PF_LESS_THROTTLE);
return err;
}
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index b1796d6ee538..2050cb016998 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -54,6 +54,8 @@ int nfsd_mountpoint(struct dentry *, struct svc_export *);
#ifdef CONFIG_NFSD_V4
__be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
struct xdr_netobj *);
+__be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
+ struct file *, loff_t, loff_t, int);
#endif /* CONFIG_NFSD_V4 */
__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 5720e9457f33..90a5925bd6ab 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -428,6 +428,13 @@ struct nfsd4_reclaim_complete {
u32 rca_one_fs;
};
+struct nfsd4_fallocate {
+ /* request */
+ stateid_t falloc_stateid;
+ loff_t falloc_offset;
+ u64 falloc_length;
+};
+
struct nfsd4_seek {
/* request */
stateid_t seek_stateid;
@@ -486,6 +493,8 @@ struct nfsd4_op {
struct nfsd4_free_stateid free_stateid;
/* NFSv4.2 */
+ struct nfsd4_fallocate allocate;
+ struct nfsd4_fallocate deallocate;
struct nfsd4_seek seek;
} u;
struct nfs4_replay * replay;
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index caaaf9dfe353..44523f4a6084 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -69,8 +69,8 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
if (old_mask == new_mask)
return;
- if (fsn_mark->i.inode)
- fsnotify_recalc_inode_mask(fsn_mark->i.inode);
+ if (fsn_mark->inode)
+ fsnotify_recalc_inode_mask(fsn_mark->inode);
}
/*
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 6ffd220eb14d..58b7cdb63da9 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -80,7 +80,7 @@ static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
return;
inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark);
- inode = igrab(mark->i.inode);
+ inode = igrab(mark->inode);
if (inode) {
seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:%x ",
inode_mark->wd, inode->i_ino, inode->i_sb->s_dev,
@@ -112,7 +112,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
mflags |= FAN_MARK_IGNORED_SURV_MODIFY;
if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
- inode = igrab(mark->i.inode);
+ inode = igrab(mark->inode);
if (!inode)
return;
seq_printf(m, "fanotify ino:%lx sdev:%x mflags:%x mask:%x ignored_mask:%x ",
@@ -122,7 +122,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
seq_putc(m, '\n');
iput(inode);
} else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT) {
- struct mount *mnt = real_mount(mark->m.mnt);
+ struct mount *mnt = real_mount(mark->mnt);
seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n",
mnt->mnt_id, mflags, mark->mask, mark->ignored_mask);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 41e39102743a..dd3fb0b17be7 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -242,13 +242,13 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
if (inode_node) {
inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
- struct fsnotify_mark, i.i_list);
+ struct fsnotify_mark, obj_list);
inode_group = inode_mark->group;
}
if (vfsmount_node) {
vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu),
- struct fsnotify_mark, m.m_list);
+ struct fsnotify_mark, obj_list);
vfsmount_group = vfsmount_mark->group;
}
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 3b68b0ae0a97..13a00be516d2 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -12,12 +12,19 @@ extern void fsnotify_flush_notify(struct fsnotify_group *group);
/* protects reads of inode and vfsmount marks list */
extern struct srcu_struct fsnotify_mark_srcu;
+/* Calculate mask of events for a list of marks */
+extern u32 fsnotify_recalc_mask(struct hlist_head *head);
+
/* compare two groups for sorting of marks lists */
extern int fsnotify_compare_groups(struct fsnotify_group *a,
struct fsnotify_group *b);
extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
__u32 mask);
+/* Add mark to a proper place in mark list */
+extern int fsnotify_add_mark_list(struct hlist_head *head,
+ struct fsnotify_mark *mark,
+ int allow_dups);
/* add a mark to an inode */
extern int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode,
@@ -31,6 +38,11 @@ extern int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
extern void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark);
/* inode specific destruction of a mark */
extern void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark);
+/* Destroy all marks in the given list */
+extern void fsnotify_destroy_marks(struct list_head *to_free);
+/* Find mark belonging to given group in the list of marks */
+extern struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
+ struct fsnotify_group *group);
/* run the list of all marks associated with inode and flag them to be freed */
extern void fsnotify_clear_marks_by_inode(struct inode *inode);
/* run the list of all marks associated with vfsmount and flag them to be freed */
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index dfbf5447eea4..3daf513ee99e 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -31,28 +31,13 @@
#include "../internal.h"
/*
- * Recalculate the mask of events relevant to a given inode locked.
- */
-static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
-{
- struct fsnotify_mark *mark;
- __u32 new_mask = 0;
-
- assert_spin_locked(&inode->i_lock);
-
- hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)
- new_mask |= mark->mask;
- inode->i_fsnotify_mask = new_mask;
-}
-
-/*
* Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
* any notifier is interested in hearing for this inode.
*/
void fsnotify_recalc_inode_mask(struct inode *inode)
{
spin_lock(&inode->i_lock);
- fsnotify_recalc_inode_mask_locked(inode);
+ inode->i_fsnotify_mask = fsnotify_recalc_mask(&inode->i_fsnotify_marks);
spin_unlock(&inode->i_lock);
__fsnotify_update_child_dentry_flags(inode);
@@ -60,23 +45,22 @@ void fsnotify_recalc_inode_mask(struct inode *inode)
void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
{
- struct inode *inode = mark->i.inode;
+ struct inode *inode = mark->inode;
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
spin_lock(&inode->i_lock);
- hlist_del_init_rcu(&mark->i.i_list);
- mark->i.inode = NULL;
+ hlist_del_init_rcu(&mark->obj_list);
+ mark->inode = NULL;
/*
* this mark is now off the inode->i_fsnotify_marks list and we
* hold the inode->i_lock, so this is the perfect time to update the
* inode->i_fsnotify_mask
*/
- fsnotify_recalc_inode_mask_locked(inode);
-
+ inode->i_fsnotify_mask = fsnotify_recalc_mask(&inode->i_fsnotify_marks);
spin_unlock(&inode->i_lock);
}
@@ -85,30 +69,19 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
*/
void fsnotify_clear_marks_by_inode(struct inode *inode)
{
- struct fsnotify_mark *mark, *lmark;
+ struct fsnotify_mark *mark;
struct hlist_node *n;
LIST_HEAD(free_list);
spin_lock(&inode->i_lock);
- hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {
- list_add(&mark->i.free_i_list, &free_list);
- hlist_del_init_rcu(&mark->i.i_list);
+ hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, obj_list) {
+ list_add(&mark->free_list, &free_list);
+ hlist_del_init_rcu(&mark->obj_list);
fsnotify_get_mark(mark);
}
spin_unlock(&inode->i_lock);
- list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
- struct fsnotify_group *group;
-
- spin_lock(&mark->lock);
- fsnotify_get_group(mark->group);
- group = mark->group;
- spin_unlock(&mark->lock);
-
- fsnotify_destroy_mark(mark, group);
- fsnotify_put_mark(mark);
- fsnotify_put_group(group);
- }
+ fsnotify_destroy_marks(&free_list);
}
/*
@@ -123,34 +96,13 @@ void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
* given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/
-static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
- struct fsnotify_group *group,
- struct inode *inode)
-{
- struct fsnotify_mark *mark;
-
- assert_spin_locked(&inode->i_lock);
-
- hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {
- if (mark->group == group) {
- fsnotify_get_mark(mark);
- return mark;
- }
- }
- return NULL;
-}
-
-/*
- * given a group and inode, find the mark associated with that combination.
- * if found take a reference to that mark and return it, else return NULL
- */
struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
struct inode *inode)
{
struct fsnotify_mark *mark;
spin_lock(&inode->i_lock);
- mark = fsnotify_find_inode_mark_locked(group, inode);
+ mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
spin_unlock(&inode->i_lock);
return mark;
@@ -168,10 +120,10 @@ void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
assert_spin_locked(&mark->lock);
if (mask &&
- mark->i.inode &&
+ mark->inode &&
!(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
- inode = igrab(mark->i.inode);
+ inode = igrab(mark->inode);
/*
* we shouldn't be able to get here if the inode wasn't
* already safely held in memory. But bug in case it
@@ -192,9 +144,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct inode *inode,
int allow_dups)
{
- struct fsnotify_mark *lmark, *last = NULL;
- int ret = 0;
- int cmp;
+ int ret;
mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
@@ -202,37 +152,10 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
assert_spin_locked(&mark->lock);
spin_lock(&inode->i_lock);
-
- mark->i.inode = inode;
-
- /* is mark the first mark? */
- if (hlist_empty(&inode->i_fsnotify_marks)) {
- hlist_add_head_rcu(&mark->i.i_list, &inode->i_fsnotify_marks);
- goto out;
- }
-
- /* should mark be in the middle of the current list? */
- hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) {
- last = lmark;
-
- if ((lmark->group == group) && !allow_dups) {
- ret = -EEXIST;
- goto out;
- }
-
- cmp = fsnotify_compare_groups(lmark->group, mark->group);
- if (cmp < 0)
- continue;
-
- hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
- goto out;
- }
-
- BUG_ON(last == NULL);
- /* mark should be the last entry. last is the current last entry */
- hlist_add_behind_rcu(&mark->i.i_list, &last->i.i_list);
-out:
- fsnotify_recalc_inode_mask_locked(inode);
+ mark->inode = inode;
+ ret = fsnotify_add_mark_list(&inode->i_fsnotify_marks, mark,
+ allow_dups);
+ inode->i_fsnotify_mask = fsnotify_recalc_mask(&inode->i_fsnotify_marks);
spin_unlock(&inode->i_lock);
return ret;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 7d888d77d59a..2cd900c2c737 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -156,7 +156,7 @@ static int idr_callback(int id, void *p, void *data)
*/
if (fsn_mark)
printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n",
- fsn_mark->group, fsn_mark->i.inode, i_mark->wd);
+ fsn_mark->group, fsn_mark->inode, i_mark->wd);
return 0;
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 283aa312d745..450648697433 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -433,7 +433,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
if (wd == -1) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
- i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
+ i_mark->fsn_mark.group, i_mark->fsn_mark.inode);
goto out;
}
@@ -442,7 +442,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
if (unlikely(!found_i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
- i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
+ i_mark->fsn_mark.group, i_mark->fsn_mark.inode);
goto out;
}
@@ -456,9 +456,9 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
"mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
"found_i_mark->group=%p found_i_mark->inode=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
- i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
+ i_mark->fsn_mark.inode, found_i_mark, found_i_mark->wd,
found_i_mark->fsn_mark.group,
- found_i_mark->fsn_mark.i.inode);
+ found_i_mark->fsn_mark.inode);
goto out;
}
@@ -470,7 +470,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
- i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
+ i_mark->fsn_mark.group, i_mark->fsn_mark.inode);
/* we can't really recover with bad ref cnting.. */
BUG();
}
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 34c38fabf514..92e48c70f0f0 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -110,6 +110,17 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
}
}
+/* Calculate mask of events for a list of marks */
+u32 fsnotify_recalc_mask(struct hlist_head *head)
+{
+ u32 new_mask = 0;
+ struct fsnotify_mark *mark;
+
+ hlist_for_each_entry(mark, head, obj_list)
+ new_mask |= mark->mask;
+ return new_mask;
+}
+
/*
* Any time a mark is getting freed we end up here.
* The caller had better be holding a reference to this mark so we don't actually
@@ -133,7 +144,7 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
- inode = mark->i.inode;
+ inode = mark->inode;
fsnotify_destroy_inode_mark(mark);
} else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
fsnotify_destroy_vfsmount_mark(mark);
@@ -150,7 +161,7 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
mutex_unlock(&group->mark_mutex);
spin_lock(&destroy_lock);
- list_add(&mark->destroy_list, &destroy_list);
+ list_add(&mark->g_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
/*
@@ -192,6 +203,27 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
mutex_unlock(&group->mark_mutex);
}
+/*
+ * Destroy all marks in the given list. The marks must be already detached from
+ * the original inode / vfsmount.
+ */
+void fsnotify_destroy_marks(struct list_head *to_free)
+{
+ struct fsnotify_mark *mark, *lmark;
+ struct fsnotify_group *group;
+
+ list_for_each_entry_safe(mark, lmark, to_free, free_list) {
+ spin_lock(&mark->lock);
+ fsnotify_get_group(mark->group);
+ group = mark->group;
+ spin_unlock(&mark->lock);
+
+ fsnotify_destroy_mark(mark, group);
+ fsnotify_put_mark(mark);
+ fsnotify_put_group(group);
+ }
+}
+
void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
{
assert_spin_locked(&mark->lock);
@@ -245,6 +277,39 @@ int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
return -1;
}
+/* Add mark into proper place in given list of marks */
+int fsnotify_add_mark_list(struct hlist_head *head, struct fsnotify_mark *mark,
+ int allow_dups)
+{
+ struct fsnotify_mark *lmark, *last = NULL;
+ int cmp;
+
+ /* is mark the first mark? */
+ if (hlist_empty(head)) {
+ hlist_add_head_rcu(&mark->obj_list, head);
+ return 0;
+ }
+
+ /* should mark be in the middle of the current list? */
+ hlist_for_each_entry(lmark, head, obj_list) {
+ last = lmark;
+
+ if ((lmark->group == mark->group) && !allow_dups)
+ return -EEXIST;
+
+ cmp = fsnotify_compare_groups(lmark->group, mark->group);
+ if (cmp >= 0) {
+ hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list);
+ return 0;
+ }
+ }
+
+ BUG_ON(last == NULL);
+ /* mark should be the last entry. last is the current last entry */
+ hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
+ return 0;
+}
+
/*
* Attach an initialized mark to a given group and fs object.
* These marks may be used for the fsnotify backend to determine which
@@ -305,7 +370,7 @@ err:
spin_unlock(&mark->lock);
spin_lock(&destroy_lock);
- list_add(&mark->destroy_list, &destroy_list);
+ list_add(&mark->g_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
@@ -323,6 +388,24 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
}
/*
+ * Given a list of marks, find the mark associated with given group. If found
+ * take a reference to that mark and return it, else return NULL.
+ */
+struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
+ struct fsnotify_group *group)
+{
+ struct fsnotify_mark *mark;
+
+ hlist_for_each_entry(mark, head, obj_list) {
+ if (mark->group == group) {
+ fsnotify_get_mark(mark);
+ return mark;
+ }
+ }
+ return NULL;
+}
+
+/*
* clear any marks in a group in which mark->flags & flags is true
*/
void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
@@ -352,8 +435,8 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
{
assert_spin_locked(&old->lock);
- new->i.inode = old->i.inode;
- new->m.mnt = old->m.mnt;
+ new->inode = old->inode;
+ new->mnt = old->mnt;
if (old->group)
fsnotify_get_group(old->group);
new->group = old->group;
@@ -386,8 +469,8 @@ static int fsnotify_mark_destroy(void *ignored)
synchronize_srcu(&fsnotify_mark_srcu);
- list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) {
- list_del_init(&mark->destroy_list);
+ list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
+ list_del_init(&mark->g_list);
fsnotify_put_mark(mark);
}
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index faefa72a11eb..326b148e623c 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -32,31 +32,20 @@
void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
{
- struct fsnotify_mark *mark, *lmark;
+ struct fsnotify_mark *mark;
struct hlist_node *n;
struct mount *m = real_mount(mnt);
LIST_HEAD(free_list);
spin_lock(&mnt->mnt_root->d_lock);
- hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {
- list_add(&mark->m.free_m_list, &free_list);
- hlist_del_init_rcu(&mark->m.m_list);
+ hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, obj_list) {
+ list_add(&mark->free_list, &free_list);
+ hlist_del_init_rcu(&mark->obj_list);
fsnotify_get_mark(mark);
}
spin_unlock(&mnt->mnt_root->d_lock);
- list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
- struct fsnotify_group *group;
-
- spin_lock(&mark->lock);
- fsnotify_get_group(mark->group);
- group = mark->group;
- spin_unlock(&mark->lock);
-
- fsnotify_destroy_mark(mark, group);
- fsnotify_put_mark(mark);
- fsnotify_put_group(group);
- }
+ fsnotify_destroy_marks(&free_list);
}
void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
@@ -65,66 +54,35 @@ void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
}
/*
- * Recalculate the mask of events relevant to a given vfsmount locked.
- */
-static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
-{
- struct mount *m = real_mount(mnt);
- struct fsnotify_mark *mark;
- __u32 new_mask = 0;
-
- assert_spin_locked(&mnt->mnt_root->d_lock);
-
- hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)
- new_mask |= mark->mask;
- m->mnt_fsnotify_mask = new_mask;
-}
-
-/*
* Recalculate the mnt->mnt_fsnotify_mask, or the mask of all FS_* event types
* any notifier is interested in hearing for this mount point
*/
void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt)
{
+ struct mount *m = real_mount(mnt);
+
spin_lock(&mnt->mnt_root->d_lock);
- fsnotify_recalc_vfsmount_mask_locked(mnt);
+ m->mnt_fsnotify_mask = fsnotify_recalc_mask(&m->mnt_fsnotify_marks);
spin_unlock(&mnt->mnt_root->d_lock);
}
void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
{
- struct vfsmount *mnt = mark->m.mnt;
+ struct vfsmount *mnt = mark->mnt;
+ struct mount *m = real_mount(mnt);
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
spin_lock(&mnt->mnt_root->d_lock);
- hlist_del_init_rcu(&mark->m.m_list);
- mark->m.mnt = NULL;
-
- fsnotify_recalc_vfsmount_mask_locked(mnt);
+ hlist_del_init_rcu(&mark->obj_list);
+ mark->mnt = NULL;
+ m->mnt_fsnotify_mask = fsnotify_recalc_mask(&m->mnt_fsnotify_marks);
spin_unlock(&mnt->mnt_root->d_lock);
}
-static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
- struct vfsmount *mnt)
-{
- struct mount *m = real_mount(mnt);
- struct fsnotify_mark *mark;
-
- assert_spin_locked(&mnt->mnt_root->d_lock);
-
- hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {
- if (mark->group == group) {
- fsnotify_get_mark(mark);
- return mark;
- }
- }
- return NULL;
-}
-
/*
* given a group and vfsmount, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
@@ -132,10 +90,11 @@ static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_
struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group,
struct vfsmount *mnt)
{
+ struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark;
spin_lock(&mnt->mnt_root->d_lock);
- mark = fsnotify_find_vfsmount_mark_locked(group, mnt);
+ mark = fsnotify_find_mark(&m->mnt_fsnotify_marks, group);
spin_unlock(&mnt->mnt_root->d_lock);
return mark;
@@ -151,9 +110,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
int allow_dups)
{
struct mount *m = real_mount(mnt);
- struct fsnotify_mark *lmark, *last = NULL;
- int ret = 0;
- int cmp;
+ int ret;
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
@@ -161,37 +118,9 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
assert_spin_locked(&mark->lock);
spin_lock(&mnt->mnt_root->d_lock);
-
- mark->m.mnt = mnt;
-
- /* is mark the first mark? */
- if (hlist_empty(&m->mnt_fsnotify_marks)) {
- hlist_add_head_rcu(&mark->m.m_list, &m->mnt_fsnotify_marks);
- goto out;
- }
-
- /* should mark be in the middle of the current list? */
- hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) {
- last = lmark;
-
- if ((lmark->group == group) && !allow_dups) {
- ret = -EEXIST;
- goto out;
- }
-
- cmp = fsnotify_compare_groups(lmark->group, mark->group);
- if (cmp < 0)
- continue;
-
- hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list);
- goto out;
- }
-
- BUG_ON(last == NULL);
- /* mark should be the last entry. last is the current last entry */
- hlist_add_behind_rcu(&mark->m.m_list, &last->m.m_list);
-out:
- fsnotify_recalc_vfsmount_mask_locked(mnt);
+ mark->mnt = mnt;
+ ret = fsnotify_add_mark_list(&m->mnt_fsnotify_marks, mark, allow_dups);
+ m->mnt_fsnotify_mask = fsnotify_recalc_mask(&m->mnt_fsnotify_marks);
spin_unlock(&mnt->mnt_root->d_lock);
return ret;
diff --git a/fs/nsfs.c b/fs/nsfs.c
new file mode 100644
index 000000000000..af1b24fa899d
--- /dev/null
+++ b/fs/nsfs.c
@@ -0,0 +1,161 @@
+#include <linux/mount.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/proc_ns.h>
+#include <linux/magic.h>
+#include <linux/ktime.h>
+
+static struct vfsmount *nsfs_mnt;
+
+static const struct file_operations ns_file_operations = {
+ .llseek = no_llseek,
+};
+
+static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct inode *inode = dentry->d_inode;
+ const struct proc_ns_operations *ns_ops = dentry->d_fsdata;
+
+ return dynamic_dname(dentry, buffer, buflen, "%s:[%lu]",
+ ns_ops->name, inode->i_ino);
+}
+
+static void ns_prune_dentry(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ if (inode) {
+ struct ns_common *ns = inode->i_private;
+ atomic_long_set(&ns->stashed, 0);
+ }
+}
+
+const struct dentry_operations ns_dentry_operations =
+{
+ .d_prune = ns_prune_dentry,
+ .d_delete = always_delete_dentry,
+ .d_dname = ns_dname,
+};
+
+static void nsfs_evict(struct inode *inode)
+{
+ struct ns_common *ns = inode->i_private;
+ clear_inode(inode);
+ ns->ops->put(ns);
+}
+
+void *ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops)
+{
+ struct vfsmount *mnt = mntget(nsfs_mnt);
+ struct qstr qname = { .name = "", };
+ struct dentry *dentry;
+ struct inode *inode;
+ struct ns_common *ns;
+ unsigned long d;
+
+again:
+ ns = ns_ops->get(task);
+ if (!ns) {
+ mntput(mnt);
+ return ERR_PTR(-ENOENT);
+ }
+ rcu_read_lock();
+ d = atomic_long_read(&ns->stashed);
+ if (!d)
+ goto slow;
+ dentry = (struct dentry *)d;
+ if (!lockref_get_not_dead(&dentry->d_lockref))
+ goto slow;
+ rcu_read_unlock();
+ ns_ops->put(ns);
+got_it:
+ path->mnt = mnt;
+ path->dentry = dentry;
+ return NULL;
+slow:
+ rcu_read_unlock();
+ inode = new_inode_pseudo(mnt->mnt_sb);
+ if (!inode) {
+ ns_ops->put(ns);
+ mntput(mnt);
+ return ERR_PTR(-ENOMEM);
+ }
+ inode->i_ino = ns->inum;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_flags |= S_IMMUTABLE;
+ inode->i_mode = S_IFREG | S_IRUGO;
+ inode->i_fop = &ns_file_operations;
+ inode->i_private = ns;
+
+ dentry = d_alloc_pseudo(mnt->mnt_sb, &qname);
+ if (!dentry) {
+ iput(inode);
+ mntput(mnt);
+ return ERR_PTR(-ENOMEM);
+ }
+ d_instantiate(dentry, inode);
+ dentry->d_fsdata = (void *)ns_ops;
+ d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
+ if (d) {
+ d_delete(dentry); /* make sure ->d_prune() does nothing */
+ dput(dentry);
+ cpu_relax();
+ goto again;
+ }
+ goto got_it;
+}
+
+int ns_get_name(char *buf, size_t size, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops)
+{
+ struct ns_common *ns;
+ int res = -ENOENT;
+ ns = ns_ops->get(task);
+ if (ns) {
+ res = snprintf(buf, size, "%s:[%u]", ns_ops->name, ns->inum);
+ ns_ops->put(ns);
+ }
+ return res;
+}
+
+struct file *proc_ns_fget(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (file->f_op != &ns_file_operations)
+ goto out_invalid;
+
+ return file;
+
+out_invalid:
+ fput(file);
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct super_operations nsfs_ops = {
+ .statfs = simple_statfs,
+ .evict_inode = nsfs_evict,
+};
+static struct dentry *nsfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "nsfs:", &nsfs_ops,
+ &ns_dentry_operations, NSFS_MAGIC);
+}
+static struct file_system_type nsfs = {
+ .name = "nsfs",
+ .mount = nsfs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+void __init nsfs_init(void)
+{
+ nsfs_mnt = kern_mount(&nsfs);
+ if (IS_ERR(nsfs_mnt))
+ panic("can't set nsfs up\n");
+ nsfs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index a93bf9892256..fcae9ef1a328 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5662,7 +5662,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 cpos, u32 phys_cpos, u32 len, int flags,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- u64 refcount_loc)
+ u64 refcount_loc, bool refcount_tree_locked)
{
int ret, credits = 0, extra_blocks = 0;
u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
@@ -5676,11 +5676,13 @@ int ocfs2_remove_btree_range(struct inode *inode,
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
OCFS2_HAS_REFCOUNT_FL));
- ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
- &ref_tree, NULL);
- if (ret) {
- mlog_errno(ret);
- goto bail;
+ if (!refcount_tree_locked) {
+ ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
+ &ref_tree, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto bail;
+ }
}
ret = ocfs2_prepare_refcount_change_for_del(inode,
@@ -7021,6 +7023,7 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
u64 refcount_loc = le64_to_cpu(di->i_refcount_loc);
struct ocfs2_extent_tree et;
struct ocfs2_cached_dealloc_ctxt dealloc;
+ struct ocfs2_refcount_tree *ref_tree = NULL;
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
@@ -7130,9 +7133,18 @@ start:
phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
+ if ((flags & OCFS2_EXT_REFCOUNTED) && trunc_len && !ref_tree) {
+ status = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
+ &ref_tree, NULL);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
status = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
phys_cpos, trunc_len, flags, &dealloc,
- refcount_loc);
+ refcount_loc, true);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -7147,6 +7159,8 @@ start:
goto start;
bail:
+ if (ref_tree)
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
ocfs2_schedule_truncate_log_flush(osb, 1);
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index ca381c584127..fb09b97db162 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -142,7 +142,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 cpos, u32 phys_cpos, u32 len, int flags,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- u64 refcount_loc);
+ u64 refcount_loc, bool refcount_tree_locked);
int ocfs2_num_free_extents(struct ocfs2_super *osb,
struct ocfs2_extent_tree *et);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index d9f222987f24..46d93e941f3d 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -894,7 +894,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
}
}
-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
{
int i;
@@ -915,7 +915,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
page_cache_release(wc->w_target_page);
}
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
+}
+static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+{
+ ocfs2_unlock_pages(wc);
brelse(wc->w_di_bh);
kfree(wc);
}
@@ -2042,11 +2046,19 @@ out_write_size:
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, wc->w_di_bh);
+ /* unlock pages before dealloc since it needs acquiring j_trans_barrier
+ * lock, or it will cause a deadlock since journal commit threads holds
+ * this lock and will ask for the page lock when flushing the data.
+ * put it here to preserve the unlock order.
+ */
+ ocfs2_unlock_pages(wc);
+
ocfs2_commit_trans(osb, handle);
ocfs2_run_deallocs(osb, &wc->w_dealloc);
- ocfs2_free_write_ctxt(wc);
+ brelse(wc->w_di_bh);
+ kfree(wc);
return copied;
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 79d56dc981bc..319e786175af 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -4479,7 +4479,7 @@ int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno);
ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0,
- &dealloc, 0);
+ &dealloc, 0, false);
if (ret) {
mlog_errno(ret);
goto out;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 3689b3592042..a6944b25fd5b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -695,14 +695,6 @@ void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
res->inflight_assert_workers);
}
-static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res)
-{
- spin_lock(&res->spinlock);
- __dlm_lockres_grab_inflight_worker(dlm, res);
- spin_unlock(&res->spinlock);
-}
-
static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
@@ -1646,6 +1638,7 @@ send_response:
}
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
dlm->node_num, res->lockname.len, res->lockname.name);
+ spin_lock(&res->spinlock);
ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
DLM_ASSERT_MASTER_MLE_CLEANUP);
if (ret < 0) {
@@ -1653,7 +1646,8 @@ send_response:
response = DLM_MASTER_RESP_ERROR;
dlm_lockres_put(res);
} else
- dlm_lockres_grab_inflight_worker(dlm, res);
+ __dlm_lockres_grab_inflight_worker(dlm, res);
+ spin_unlock(&res->spinlock);
} else {
if (res)
dlm_lockres_put(res);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 69fb9f75b082..3950693dd0f6 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1803,7 +1803,7 @@ static int ocfs2_remove_inode_range(struct inode *inode,
ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
phys_cpos, trunc_len, flags,
- &dealloc, refcount_loc);
+ &dealloc, refcount_loc, false);
if (ret < 0) {
mlog_errno(ret);
goto out;
diff --git a/fs/open.c b/fs/open.c
index b1bf3d542d5d..813be037b412 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -222,7 +222,7 @@ SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length)
#endif /* BITS_PER_LONG == 32 */
-int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
long ret;
@@ -295,9 +295,21 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
sb_start_write(inode->i_sb);
ret = file->f_op->fallocate(file, mode, offset, len);
+
+ /*
+ * Create inotify and fanotify events.
+ *
+ * To keep the logic simple always create events if fallocate succeeds.
+ * This implies that events are even created if the file size remains
+ * unchanged, e.g. when using flag FALLOC_FL_KEEP_SIZE.
+ */
+ if (ret == 0)
+ fsnotify_modify(file);
+
sb_end_write(inode->i_sb);
return ret;
}
+EXPORT_SYMBOL_GPL(vfs_fallocate);
SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
{
@@ -305,7 +317,7 @@ SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len)
int error = -EBADF;
if (f.file) {
- error = do_fallocate(f.file, mode, offset, len);
+ error = vfs_fallocate(f.file, mode, offset, len);
fdput(f);
}
return error;
diff --git a/fs/pnode.c b/fs/pnode.c
index aae331a5d03b..260ac8f898a4 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -242,6 +242,7 @@ static int propagate_one(struct mount *m)
child = copy_tree(last_source, last_source->mnt.mnt_root, type);
if (IS_ERR(child))
return PTR_ERR(child);
+ child->mnt.mnt_flags &= ~MNT_LOCKED;
mnt_set_mountpoint(m, mp, child);
last_dest = m;
last_source = child;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 590aeda5af12..3f3d7aeb0712 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2464,6 +2464,57 @@ static const struct file_operations proc_projid_map_operations = {
.llseek = seq_lseek,
.release = proc_id_map_release,
};
+
+static int proc_setgroups_open(struct inode *inode, struct file *file)
+{
+ struct user_namespace *ns = NULL;
+ struct task_struct *task;
+ int ret;
+
+ ret = -ESRCH;
+ task = get_proc_task(inode);
+ if (task) {
+ rcu_read_lock();
+ ns = get_user_ns(task_cred_xxx(task, user_ns));
+ rcu_read_unlock();
+ put_task_struct(task);
+ }
+ if (!ns)
+ goto err;
+
+ if (file->f_mode & FMODE_WRITE) {
+ ret = -EACCES;
+ if (!ns_capable(ns, CAP_SYS_ADMIN))
+ goto err_put_ns;
+ }
+
+ ret = single_open(file, &proc_setgroups_show, ns);
+ if (ret)
+ goto err_put_ns;
+
+ return 0;
+err_put_ns:
+ put_user_ns(ns);
+err:
+ return ret;
+}
+
+static int proc_setgroups_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct user_namespace *ns = seq->private;
+ int ret = single_release(inode, file);
+ put_user_ns(ns);
+ return ret;
+}
+
+static const struct file_operations proc_setgroups_operations = {
+ .open = proc_setgroups_open,
+ .write = proc_setgroups_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = proc_setgroups_release,
+};
#endif /* CONFIG_USER_NS */
static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
@@ -2572,6 +2623,7 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
+ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
#endif
#ifdef CONFIG_CHECKPOINT_RESTORE
REG("timers", S_IRUGO, proc_timers_operations),
@@ -2916,6 +2968,7 @@ static const struct pid_entry tid_base_stuff[] = {
REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
+ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
#endif
};
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 333080d7a671..8420a2f80811 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -32,8 +32,6 @@ static void proc_evict_inode(struct inode *inode)
{
struct proc_dir_entry *de;
struct ctl_table_header *head;
- const struct proc_ns_operations *ns_ops;
- void *ns;
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
@@ -50,11 +48,6 @@ static void proc_evict_inode(struct inode *inode)
RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
sysctl_head_put(head);
}
- /* Release any associated namespace */
- ns_ops = PROC_I(inode)->ns.ns_ops;
- ns = PROC_I(inode)->ns.ns;
- if (ns_ops && ns)
- ns_ops->put(ns);
}
static struct kmem_cache * proc_inode_cachep;
@@ -73,8 +66,7 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
ei->pde = NULL;
ei->sysctl = NULL;
ei->sysctl_entry = NULL;
- ei->ns.ns = NULL;
- ei->ns.ns_ops = NULL;
+ ei->ns_ops = NULL;
inode = &ei->vfs_inode;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
return inode;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 7fb1a4869fd0..6fcdba573e0f 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -65,7 +65,7 @@ struct proc_inode {
struct proc_dir_entry *pde;
struct ctl_table_header *sysctl;
struct ctl_table *sysctl_entry;
- struct proc_ns ns;
+ const struct proc_ns_operations *ns_ops;
struct inode vfs_inode;
};
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index aa1eee06420f..d3ebf2e61853 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -12,6 +12,9 @@
#include <linux/vmstat.h>
#include <linux/atomic.h>
#include <linux/vmalloc.h>
+#ifdef CONFIG_CMA
+#include <linux/cma.h>
+#endif
#include <asm/page.h>
#include <asm/pgtable.h>
#include "internal.h"
@@ -138,6 +141,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"AnonHugePages: %8lu kB\n"
#endif
+#ifdef CONFIG_CMA
+ "CmaTotal: %8lu kB\n"
+ "CmaFree: %8lu kB\n"
+#endif
,
K(i.totalram),
K(i.freeram),
@@ -187,12 +194,16 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
vmi.used >> 10,
vmi.largest_chunk >> 10
#ifdef CONFIG_MEMORY_FAILURE
- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
+ , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+ , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
HPAGE_PMD_NR)
#endif
+#ifdef CONFIG_CMA
+ , K(totalcma_pages)
+ , K(global_page_state(NR_FREE_CMA_PAGES))
+#endif
);
hugetlb_report_meminfo(m);
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 89026095f2b5..c9eac4563fa8 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -1,10 +1,6 @@
#include <linux/proc_fs.h>
#include <linux/nsproxy.h>
-#include <linux/sched.h>
#include <linux/ptrace.h>
-#include <linux/fs_struct.h>
-#include <linux/mount.h>
-#include <linux/path.h>
#include <linux/namei.h>
#include <linux/file.h>
#include <linux/utsname.h>
@@ -34,138 +30,45 @@ static const struct proc_ns_operations *ns_entries[] = {
&mntns_operations,
};
-static const struct file_operations ns_file_operations = {
- .llseek = no_llseek,
-};
-
-static const struct inode_operations ns_inode_operations = {
- .setattr = proc_setattr,
-};
-
-static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- struct inode *inode = dentry->d_inode;
- const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns.ns_ops;
-
- return dynamic_dname(dentry, buffer, buflen, "%s:[%lu]",
- ns_ops->name, inode->i_ino);
-}
-
-const struct dentry_operations ns_dentry_operations =
-{
- .d_delete = always_delete_dentry,
- .d_dname = ns_dname,
-};
-
-static struct dentry *proc_ns_get_dentry(struct super_block *sb,
- struct task_struct *task, const struct proc_ns_operations *ns_ops)
-{
- struct dentry *dentry, *result;
- struct inode *inode;
- struct proc_inode *ei;
- struct qstr qname = { .name = "", };
- void *ns;
-
- ns = ns_ops->get(task);
- if (!ns)
- return ERR_PTR(-ENOENT);
-
- dentry = d_alloc_pseudo(sb, &qname);
- if (!dentry) {
- ns_ops->put(ns);
- return ERR_PTR(-ENOMEM);
- }
-
- inode = iget_locked(sb, ns_ops->inum(ns));
- if (!inode) {
- dput(dentry);
- ns_ops->put(ns);
- return ERR_PTR(-ENOMEM);
- }
-
- ei = PROC_I(inode);
- if (inode->i_state & I_NEW) {
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
- inode->i_op = &ns_inode_operations;
- inode->i_mode = S_IFREG | S_IRUGO;
- inode->i_fop = &ns_file_operations;
- ei->ns.ns_ops = ns_ops;
- ei->ns.ns = ns;
- unlock_new_inode(inode);
- } else {
- ns_ops->put(ns);
- }
-
- d_set_d_op(dentry, &ns_dentry_operations);
- result = d_instantiate_unique(dentry, inode);
- if (result) {
- dput(dentry);
- dentry = result;
- }
-
- return dentry;
-}
-
static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
- struct super_block *sb = inode->i_sb;
- struct proc_inode *ei = PROC_I(inode);
+ const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
struct task_struct *task;
struct path ns_path;
void *error = ERR_PTR(-EACCES);
task = get_proc_task(inode);
if (!task)
- goto out;
+ return error;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out_put_task;
-
- ns_path.dentry = proc_ns_get_dentry(sb, task, ei->ns.ns_ops);
- if (IS_ERR(ns_path.dentry)) {
- error = ERR_CAST(ns_path.dentry);
- goto out_put_task;
+ if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+ error = ns_get_path(&ns_path, task, ns_ops);
+ if (!error)
+ nd_jump_link(nd, &ns_path);
}
-
- ns_path.mnt = mntget(nd->path.mnt);
- nd_jump_link(nd, &ns_path);
- error = NULL;
-
-out_put_task:
put_task_struct(task);
-out:
return error;
}
static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct inode *inode = dentry->d_inode;
- struct proc_inode *ei = PROC_I(inode);
- const struct proc_ns_operations *ns_ops = ei->ns.ns_ops;
+ const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
struct task_struct *task;
- void *ns;
char name[50];
int res = -EACCES;
task = get_proc_task(inode);
if (!task)
- goto out;
-
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out_put_task;
+ return res;
- res = -ENOENT;
- ns = ns_ops->get(task);
- if (!ns)
- goto out_put_task;
-
- snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns));
- res = readlink_copy(buffer, buflen, name);
- ns_ops->put(ns);
-out_put_task:
+ if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+ res = ns_get_name(name, sizeof(name), task, ns_ops);
+ if (res >= 0)
+ res = readlink_copy(buffer, buflen, name);
+ }
put_task_struct(task);
-out:
return res;
}
@@ -189,7 +92,7 @@ static int proc_ns_instantiate(struct inode *dir,
ei = PROC_I(inode);
inode->i_mode = S_IFLNK|S_IRWXUGO;
inode->i_op = &proc_ns_link_inode_operations;
- ei->ns.ns_ops = ns_ops;
+ ei->ns_ops = ns_ops;
d_set_d_op(dentry, &pid_dentry_operations);
d_add(dentry, inode);
@@ -267,31 +170,3 @@ const struct inode_operations proc_ns_dir_inode_operations = {
.getattr = pid_getattr,
.setattr = proc_setattr,
};
-
-struct file *proc_ns_fget(int fd)
-{
- struct file *file;
-
- file = fget(fd);
- if (!file)
- return ERR_PTR(-EBADF);
-
- if (file->f_op != &ns_file_operations)
- goto out_invalid;
-
- return file;
-
-out_invalid:
- fput(file);
- return ERR_PTR(-EINVAL);
-}
-
-struct proc_ns *get_proc_ns(struct inode *inode)
-{
- return &PROC_I(inode)->ns;
-}
-
-bool proc_ns_inode(struct inode *inode)
-{
- return inode->i_fop == &ns_file_operations;
-}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index ec881b312700..8613e5b35c22 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
MODULE_PARM_DESC(mem_size,
"size of reserved RAM used to store oops/panic logs");
+static unsigned int mem_type;
+module_param(mem_type, uint, 0600);
+MODULE_PARM_DESC(mem_type,
+ "set to 1 to try to use unbuffered memory (default 0)");
+
static int dump_oops = 1;
module_param(dump_oops, int, 0600);
MODULE_PARM_DESC(dump_oops,
@@ -79,6 +84,7 @@ struct ramoops_context {
struct persistent_ram_zone *fprz;
phys_addr_t phys_addr;
unsigned long size;
+ unsigned int memtype;
size_t record_size;
size_t console_size;
size_t ftrace_size;
@@ -366,7 +372,8 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
size_t sz = cxt->record_size;
cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
- &cxt->ecc_info);
+ &cxt->ecc_info,
+ cxt->memtype);
if (IS_ERR(cxt->przs[i])) {
err = PTR_ERR(cxt->przs[i]);
dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
@@ -396,7 +403,7 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
return -ENOMEM;
}
- *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info);
+ *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
@@ -443,6 +450,7 @@ static int ramoops_probe(struct platform_device *pdev)
cxt->size = pdata->mem_size;
cxt->phys_addr = pdata->mem_address;
+ cxt->memtype = pdata->mem_type;
cxt->record_size = pdata->record_size;
cxt->console_size = pdata->console_size;
cxt->ftrace_size = pdata->ftrace_size;
@@ -553,7 +561,6 @@ static struct platform_driver ramoops_driver = {
.remove = __exit_p(ramoops_remove),
.driver = {
.name = "ramoops",
- .owner = THIS_MODULE,
},
};
@@ -572,6 +579,7 @@ static void ramoops_register_dummy(void)
dummy_data->mem_size = mem_size;
dummy_data->mem_address = mem_address;
+ dummy_data->mem_type = 0;
dummy_data->record_size = record_size;
dummy_data->console_size = ramoops_console_size;
dummy_data->ftrace_size = ramoops_ftrace_size;
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 9d7b9a83699e..76c3f80efdfa 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -380,7 +380,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz)
persistent_ram_update_header_ecc(prz);
}
-static void *persistent_ram_vmap(phys_addr_t start, size_t size)
+static void *persistent_ram_vmap(phys_addr_t start, size_t size,
+ unsigned int memtype)
{
struct page **pages;
phys_addr_t page_start;
@@ -392,7 +393,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
page_start = start - offset_in_page(start);
page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
- prot = pgprot_noncached(PAGE_KERNEL);
+ if (memtype)
+ prot = pgprot_noncached(PAGE_KERNEL);
+ else
+ prot = pgprot_writecombine(PAGE_KERNEL);
pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
@@ -411,8 +415,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
return vaddr;
}
-static void *persistent_ram_iomap(phys_addr_t start, size_t size)
+static void *persistent_ram_iomap(phys_addr_t start, size_t size,
+ unsigned int memtype)
{
+ void *va;
+
if (!request_mem_region(start, size, "persistent_ram")) {
pr_err("request mem region (0x%llx@0x%llx) failed\n",
(unsigned long long)size, (unsigned long long)start);
@@ -422,19 +429,24 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size)
buffer_start_add = buffer_start_add_locked;
buffer_size_add = buffer_size_add_locked;
- return ioremap(start, size);
+ if (memtype)
+ va = ioremap(start, size);
+ else
+ va = ioremap_wc(start, size);
+
+ return va;
}
static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
- struct persistent_ram_zone *prz)
+ struct persistent_ram_zone *prz, int memtype)
{
prz->paddr = start;
prz->size = size;
if (pfn_valid(start >> PAGE_SHIFT))
- prz->vaddr = persistent_ram_vmap(start, size);
+ prz->vaddr = persistent_ram_vmap(start, size, memtype);
else
- prz->vaddr = persistent_ram_iomap(start, size);
+ prz->vaddr = persistent_ram_iomap(start, size, memtype);
if (!prz->vaddr) {
pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
@@ -500,7 +512,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
}
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
- u32 sig, struct persistent_ram_ecc_info *ecc_info)
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
+ unsigned int memtype)
{
struct persistent_ram_zone *prz;
int ret = -ENOMEM;
@@ -511,7 +524,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
goto err;
}
- ret = persistent_ram_buffer_map(start, size, prz);
+ ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
goto err;
diff --git a/fs/read_write.c b/fs/read_write.c
index 7d9318c3d43c..c0805c93b6fa 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -412,6 +412,23 @@ ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *p
EXPORT_SYMBOL(new_sync_read);
+ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ ssize_t ret;
+
+ if (file->f_op->read)
+ ret = file->f_op->read(file, buf, count, pos);
+ else if (file->f_op->aio_read)
+ ret = do_sync_read(file, buf, count, pos);
+ else if (file->f_op->read_iter)
+ ret = new_sync_read(file, buf, count, pos);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
{
ssize_t ret;
@@ -426,12 +443,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
ret = rw_verify_area(READ, file, pos, count);
if (ret >= 0) {
count = ret;
- if (file->f_op->read)
- ret = file->f_op->read(file, buf, count, pos);
- else if (file->f_op->aio_read)
- ret = do_sync_read(file, buf, count, pos);
- else
- ret = new_sync_read(file, buf, count, pos);
+ ret = __vfs_read(file, buf, count, pos);
if (ret > 0) {
fsnotify_access(file);
add_rchar(current, ret);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index d571e173a990..9d6486d416a3 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2772,7 +2772,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
if (journal_init_dev(sb, journal, j_dev_name) != 0) {
reiserfs_warning(sb, "sh-462",
- "unable to initialize jornal device");
+ "unable to initialize journal device");
goto free_and_return;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index ea63ab13ef92..71fbbe3e2dab 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2172,6 +2172,9 @@ error_unlocked:
reiserfs_write_unlock(s);
}
+ if (sbi->commit_wq)
+ destroy_workqueue(sbi->commit_wq);
+
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
reiserfs_free_bitmap_cache(s);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 353948ba1c5b..dbf3a59c86bb 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -25,7 +25,11 @@ static void *seq_buf_alloc(unsigned long size)
{
void *buf;
- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ /*
+ * __GFP_NORETRY to avoid oom-killings with high-order allocations -
+ * it's better to fall back to vmalloc() than to kill things.
+ */
+ buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
if (!buf && size > PAGE_SIZE)
buf = vmalloc(size);
return buf;
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index b6fa8657dcbc..ffb093e72b6c 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -120,6 +120,21 @@ config SQUASHFS_ZLIB
If unsure, say Y.
+config SQUASHFS_LZ4
+ bool "Include support for LZ4 compressed file systems"
+ depends on SQUASHFS
+ select LZ4_DECOMPRESS
+ help
+ Saying Y here includes support for reading Squashfs file systems
+ compressed with LZ4 compression. LZ4 compression is mainly
+ aimed at embedded systems with slower CPUs where the overheads
+ of zlib are too high.
+
+ LZ4 is not the standard compression used in Squashfs and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
config SQUASHFS_LZO
bool "Include support for LZO compressed file systems"
depends on SQUASHFS
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 4132520b4ff2..246a6f329d89 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -11,6 +11,7 @@ squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
+squashfs-$(CONFIG_SQUASHFS_LZ4) += lz4_wrapper.o
squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index ac22fe73b0ad..e9034bf6e5ae 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -41,6 +41,12 @@ static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
};
+#ifndef CONFIG_SQUASHFS_LZ4
+static const struct squashfs_decompressor squashfs_lz4_comp_ops = {
+ NULL, NULL, NULL, NULL, LZ4_COMPRESSION, "lz4", 0
+};
+#endif
+
#ifndef CONFIG_SQUASHFS_LZO
static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
@@ -65,6 +71,7 @@ static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
static const struct squashfs_decompressor *decompressor[] = {
&squashfs_zlib_comp_ops,
+ &squashfs_lz4_comp_ops,
&squashfs_lzo_comp_ops,
&squashfs_xz_comp_ops,
&squashfs_lzma_unsupported_comp_ops,
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index af0985321808..a25713c031a5 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -46,6 +46,10 @@ static inline void *squashfs_comp_opts(struct squashfs_sb_info *msblk,
extern const struct squashfs_decompressor squashfs_xz_comp_ops;
#endif
+#ifdef CONFIG_SQUASHFS_LZ4
+extern const struct squashfs_decompressor squashfs_lz4_comp_ops;
+#endif
+
#ifdef CONFIG_SQUASHFS_LZO
extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
#endif
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
new file mode 100644
index 000000000000..c31e2bc9c081
--- /dev/null
+++ b/fs/squashfs/lz4_wrapper.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2013, 2014
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/lz4.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+#define LZ4_LEGACY 1
+
+struct lz4_comp_opts {
+ __le32 version;
+ __le32 flags;
+};
+
+struct squashfs_lz4 {
+ void *input;
+ void *output;
+};
+
+
+static void *lz4_comp_opts(struct squashfs_sb_info *msblk,
+ void *buff, int len)
+{
+ struct lz4_comp_opts *comp_opts = buff;
+
+ /* LZ4 compressed filesystems always have compression options */
+ if (comp_opts == NULL || len < sizeof(*comp_opts))
+ return ERR_PTR(-EIO);
+
+ if (le32_to_cpu(comp_opts->version) != LZ4_LEGACY) {
+ /* LZ4 format currently used by the kernel is the 'legacy'
+ * format */
+ ERROR("Unknown LZ4 version\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return NULL;
+}
+
+
+static void *lz4_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+ struct squashfs_lz4 *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto failed;
+ stream->input = vmalloc(block_size);
+ if (stream->input == NULL)
+ goto failed2;
+ stream->output = vmalloc(block_size);
+ if (stream->output == NULL)
+ goto failed3;
+
+ return stream;
+
+failed3:
+ vfree(stream->input);
+failed2:
+ kfree(stream);
+failed:
+ ERROR("Failed to initialise LZ4 decompressor\n");
+ return ERR_PTR(-ENOMEM);
+}
+
+
+static void lz4_free(void *strm)
+{
+ struct squashfs_lz4 *stream = strm;
+
+ if (stream) {
+ vfree(stream->input);
+ vfree(stream->output);
+ }
+ kfree(stream);
+}
+
+
+static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct buffer_head **bh, int b, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct squashfs_lz4 *stream = strm;
+ void *buff = stream->input, *data;
+ int avail, i, bytes = length, res;
+ size_t dest_len = output->length;
+
+ for (i = 0; i < b; i++) {
+ avail = min(bytes, msblk->devblksize - offset);
+ memcpy(buff, bh[i]->b_data + offset, avail);
+ buff += avail;
+ bytes -= avail;
+ offset = 0;
+ put_bh(bh[i]);
+ }
+
+ res = lz4_decompress_unknownoutputsize(stream->input, length,
+ stream->output, &dest_len);
+ if (res)
+ return -EIO;
+
+ bytes = dest_len;
+ data = squashfs_first_page(output);
+ buff = stream->output;
+ while (data) {
+ if (bytes <= PAGE_CACHE_SIZE) {
+ memcpy(data, buff, bytes);
+ break;
+ }
+ memcpy(data, buff, PAGE_CACHE_SIZE);
+ buff += PAGE_CACHE_SIZE;
+ bytes -= PAGE_CACHE_SIZE;
+ data = squashfs_next_page(output);
+ }
+ squashfs_finish_page(output);
+
+ return dest_len;
+}
+
+const struct squashfs_decompressor squashfs_lz4_comp_ops = {
+ .init = lz4_init,
+ .comp_opts = lz4_comp_opts,
+ .free = lz4_free,
+ .decompress = lz4_uncompress,
+ .id = LZ4_COMPRESSION,
+ .name = "lz4",
+ .supported = 1
+};
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 4b2beda49498..506f4ba5b983 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -240,6 +240,7 @@ struct meta_index {
#define LZMA_COMPRESSION 2
#define LZO_COMPRESSION 3
#define XZ_COMPRESSION 4
+#define LZ4_COMPRESSION 5
struct squashfs_super_block {
__le32 s_magic;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index e9ef59b3abb1..dfe928a9540f 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -102,6 +102,22 @@ static ssize_t sysfs_kf_bin_read(struct kernfs_open_file *of, char *buf,
return battr->read(of->file, kobj, battr, buf, pos, count);
}
+/* kernfs read callback for regular sysfs files with pre-alloc */
+static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
+ size_t count, loff_t pos)
+{
+ const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
+ struct kobject *kobj = of->kn->parent->priv;
+
+ /*
+ * If buf != of->prealloc_buf, we don't know how
+ * large it is, so cannot safely pass it to ->show
+ */
+ if (pos || WARN_ON_ONCE(buf != of->prealloc_buf))
+ return 0;
+ return ops->show(kobj, of->kn->priv, buf);
+}
+
/* kernfs write callback for regular sysfs files */
static ssize_t sysfs_kf_write(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
@@ -125,7 +141,7 @@ static ssize_t sysfs_kf_bin_write(struct kernfs_open_file *of, char *buf,
if (size) {
if (size <= pos)
- return 0;
+ return -EFBIG;
count = min_t(ssize_t, count, size - pos);
}
if (!count)
@@ -184,6 +200,22 @@ static const struct kernfs_ops sysfs_file_kfops_rw = {
.write = sysfs_kf_write,
};
+static const struct kernfs_ops sysfs_prealloc_kfops_ro = {
+ .read = sysfs_kf_read,
+ .prealloc = true,
+};
+
+static const struct kernfs_ops sysfs_prealloc_kfops_wo = {
+ .write = sysfs_kf_write,
+ .prealloc = true,
+};
+
+static const struct kernfs_ops sysfs_prealloc_kfops_rw = {
+ .read = sysfs_kf_read,
+ .write = sysfs_kf_write,
+ .prealloc = true,
+};
+
static const struct kernfs_ops sysfs_bin_kfops_ro = {
.read = sysfs_kf_bin_read,
};
@@ -222,13 +254,22 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent,
kobject_name(kobj)))
return -EINVAL;
- if (sysfs_ops->show && sysfs_ops->store)
- ops = &sysfs_file_kfops_rw;
- else if (sysfs_ops->show)
- ops = &sysfs_file_kfops_ro;
- else if (sysfs_ops->store)
- ops = &sysfs_file_kfops_wo;
- else
+ if (sysfs_ops->show && sysfs_ops->store) {
+ if (mode & SYSFS_PREALLOC)
+ ops = &sysfs_prealloc_kfops_rw;
+ else
+ ops = &sysfs_file_kfops_rw;
+ } else if (sysfs_ops->show) {
+ if (mode & SYSFS_PREALLOC)
+ ops = &sysfs_prealloc_kfops_ro;
+ else
+ ops = &sysfs_file_kfops_ro;
+ } else if (sysfs_ops->store) {
+ if (mode & SYSFS_PREALLOC)
+ ops = &sysfs_prealloc_kfops_wo;
+ else
+ ops = &sysfs_file_kfops_wo;
+ } else
ops = &sysfs_file_kfops_empty;
size = PAGE_SIZE;
@@ -253,7 +294,7 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent,
if (!attr->ignore_lockdep)
key = attr->key ?: (struct lock_class_key *)&attr->skey;
#endif
- kn = __kernfs_create_file(parent, attr->name, mode, size, ops,
+ kn = __kernfs_create_file(parent, attr->name, mode & 0777, size, ops,
(void *)attr, ns, true, key);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index b5b593c45270..538519ee37d9 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -262,6 +262,7 @@ static int write_begin_slow(struct address_space *mapping,
if (err) {
unlock_page(page);
page_cache_release(page);
+ ubifs_release_budget(c, &req);
return err;
}
}
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index fb166e204441..f6ac3f29323c 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -571,7 +571,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
aligned_dlen = ALIGN(dlen, 8);
aligned_ilen = ALIGN(ilen, 8);
+
len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
+ /* Make sure to also account for extended attributes */
+ len += host_ui->data_len;
+
dent = kmalloc(len, GFP_NOFS);
if (!dent)
return -ENOMEM;
@@ -648,7 +652,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
ino_key_init(c, &ino_key, dir->i_ino);
ino_offs += aligned_ilen;
- err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ);
+ err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
+ UBIFS_INO_NODE_SZ + host_ui->data_len);
if (err)
goto out_ro;
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
deleted file mode 100644
index 6e247a99f5db..000000000000
--- a/fs/xfs/libxfs/xfs_ag.h
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_AG_H__
-#define __XFS_AG_H__
-
-/*
- * Allocation group header
- * This is divided into three structures, placed in sequential 512-byte
- * buffers after a copy of the superblock (also in a 512-byte buffer).
- */
-
-struct xfs_buf;
-struct xfs_mount;
-struct xfs_trans;
-
-#define XFS_AGF_MAGIC 0x58414746 /* 'XAGF' */
-#define XFS_AGI_MAGIC 0x58414749 /* 'XAGI' */
-#define XFS_AGFL_MAGIC 0x5841464c /* 'XAFL' */
-#define XFS_AGF_VERSION 1
-#define XFS_AGI_VERSION 1
-
-#define XFS_AGF_GOOD_VERSION(v) ((v) == XFS_AGF_VERSION)
-#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION)
-
-/*
- * Btree number 0 is bno, 1 is cnt. This value gives the size of the
- * arrays below.
- */
-#define XFS_BTNUM_AGF ((int)XFS_BTNUM_CNTi + 1)
-
-/*
- * The second word of agf_levels in the first a.g. overlaps the EFS
- * superblock's magic number. Since the magic numbers valid for EFS
- * are > 64k, our value cannot be confused for an EFS superblock's.
- */
-
-typedef struct xfs_agf {
- /*
- * Common allocation group header information
- */
- __be32 agf_magicnum; /* magic number == XFS_AGF_MAGIC */
- __be32 agf_versionnum; /* header version == XFS_AGF_VERSION */
- __be32 agf_seqno; /* sequence # starting from 0 */
- __be32 agf_length; /* size in blocks of a.g. */
- /*
- * Freespace information
- */
- __be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */
- __be32 agf_spare0; /* spare field */
- __be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
- __be32 agf_spare1; /* spare field */
-
- __be32 agf_flfirst; /* first freelist block's index */
- __be32 agf_fllast; /* last freelist block's index */
- __be32 agf_flcount; /* count of blocks in freelist */
- __be32 agf_freeblks; /* total free blocks */
-
- __be32 agf_longest; /* longest free space */
- __be32 agf_btreeblks; /* # of blocks held in AGF btrees */
- uuid_t agf_uuid; /* uuid of filesystem */
-
- /*
- * reserve some contiguous space for future logged fields before we add
- * the unlogged fields. This makes the range logging via flags and
- * structure offsets much simpler.
- */
- __be64 agf_spare64[16];
-
- /* unlogged fields, written during buffer writeback. */
- __be64 agf_lsn; /* last write sequence */
- __be32 agf_crc; /* crc of agf sector */
- __be32 agf_spare2;
-
- /* structure must be padded to 64 bit alignment */
-} xfs_agf_t;
-
-#define XFS_AGF_CRC_OFF offsetof(struct xfs_agf, agf_crc)
-
-#define XFS_AGF_MAGICNUM 0x00000001
-#define XFS_AGF_VERSIONNUM 0x00000002
-#define XFS_AGF_SEQNO 0x00000004
-#define XFS_AGF_LENGTH 0x00000008
-#define XFS_AGF_ROOTS 0x00000010
-#define XFS_AGF_LEVELS 0x00000020
-#define XFS_AGF_FLFIRST 0x00000040
-#define XFS_AGF_FLLAST 0x00000080
-#define XFS_AGF_FLCOUNT 0x00000100
-#define XFS_AGF_FREEBLKS 0x00000200
-#define XFS_AGF_LONGEST 0x00000400
-#define XFS_AGF_BTREEBLKS 0x00000800
-#define XFS_AGF_UUID 0x00001000
-#define XFS_AGF_NUM_BITS 13
-#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
-
-#define XFS_AGF_FLAGS \
- { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
- { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
- { XFS_AGF_SEQNO, "SEQNO" }, \
- { XFS_AGF_LENGTH, "LENGTH" }, \
- { XFS_AGF_ROOTS, "ROOTS" }, \
- { XFS_AGF_LEVELS, "LEVELS" }, \
- { XFS_AGF_FLFIRST, "FLFIRST" }, \
- { XFS_AGF_FLLAST, "FLLAST" }, \
- { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
- { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
- { XFS_AGF_LONGEST, "LONGEST" }, \
- { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
- { XFS_AGF_UUID, "UUID" }
-
-/* disk block (xfs_daddr_t) in the AG */
-#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
-#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
-#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr))
-
-extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
-
-/*
- * Size of the unlinked inode hash table in the agi.
- */
-#define XFS_AGI_UNLINKED_BUCKETS 64
-
-typedef struct xfs_agi {
- /*
- * Common allocation group header information
- */
- __be32 agi_magicnum; /* magic number == XFS_AGI_MAGIC */
- __be32 agi_versionnum; /* header version == XFS_AGI_VERSION */
- __be32 agi_seqno; /* sequence # starting from 0 */
- __be32 agi_length; /* size in blocks of a.g. */
- /*
- * Inode information
- * Inodes are mapped by interpreting the inode number, so no
- * mapping data is needed here.
- */
- __be32 agi_count; /* count of allocated inodes */
- __be32 agi_root; /* root of inode btree */
- __be32 agi_level; /* levels in inode btree */
- __be32 agi_freecount; /* number of free inodes */
-
- __be32 agi_newino; /* new inode just allocated */
- __be32 agi_dirino; /* last directory inode chunk */
- /*
- * Hash table of inodes which have been unlinked but are
- * still being referenced.
- */
- __be32 agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
- /*
- * This marks the end of logging region 1 and start of logging region 2.
- */
- uuid_t agi_uuid; /* uuid of filesystem */
- __be32 agi_crc; /* crc of agi sector */
- __be32 agi_pad32;
- __be64 agi_lsn; /* last write sequence */
-
- __be32 agi_free_root; /* root of the free inode btree */
- __be32 agi_free_level;/* levels in free inode btree */
-
- /* structure must be padded to 64 bit alignment */
-} xfs_agi_t;
-
-#define XFS_AGI_CRC_OFF offsetof(struct xfs_agi, agi_crc)
-
-#define XFS_AGI_MAGICNUM (1 << 0)
-#define XFS_AGI_VERSIONNUM (1 << 1)
-#define XFS_AGI_SEQNO (1 << 2)
-#define XFS_AGI_LENGTH (1 << 3)
-#define XFS_AGI_COUNT (1 << 4)
-#define XFS_AGI_ROOT (1 << 5)
-#define XFS_AGI_LEVEL (1 << 6)
-#define XFS_AGI_FREECOUNT (1 << 7)
-#define XFS_AGI_NEWINO (1 << 8)
-#define XFS_AGI_DIRINO (1 << 9)
-#define XFS_AGI_UNLINKED (1 << 10)
-#define XFS_AGI_NUM_BITS_R1 11 /* end of the 1st agi logging region */
-#define XFS_AGI_ALL_BITS_R1 ((1 << XFS_AGI_NUM_BITS_R1) - 1)
-#define XFS_AGI_FREE_ROOT (1 << 11)
-#define XFS_AGI_FREE_LEVEL (1 << 12)
-#define XFS_AGI_NUM_BITS_R2 13
-
-/* disk block (xfs_daddr_t) in the AG */
-#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
-#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
-#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr))
-
-extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, struct xfs_buf **bpp);
-
-/*
- * The third a.g. block contains the a.g. freelist, an array
- * of block pointers to blocks owned by the allocation btree code.
- */
-#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
-#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
-#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr))
-
-#define XFS_BUF_TO_AGFL_BNO(mp, bp) \
- (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
- &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
- (__be32 *)(bp)->b_addr)
-
-/*
- * Size of the AGFL. For CRC-enabled filesystes we steal a couple of
- * slots in the beginning of the block for a proper header with the
- * location information and CRC.
- */
-#define XFS_AGFL_SIZE(mp) \
- (((mp)->m_sb.sb_sectsize - \
- (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
- sizeof(struct xfs_agfl) : 0)) / \
- sizeof(xfs_agblock_t))
-
-typedef struct xfs_agfl {
- __be32 agfl_magicnum;
- __be32 agfl_seqno;
- uuid_t agfl_uuid;
- __be64 agfl_lsn;
- __be32 agfl_crc;
- __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
-} xfs_agfl_t;
-
-#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
-
-/*
- * tags for inode radix tree
- */
-#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
- in xfs_inode_ag_iterator */
-#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
-#define XFS_ICI_EOFBLOCKS_TAG 1 /* inode has blocks beyond EOF */
-
-#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
-#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \
- (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
-#define XFS_MIN_FREELIST(a,mp) \
- (XFS_MIN_FREELIST_RAW( \
- be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \
- be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
-#define XFS_MIN_FREELIST_PAG(pag,mp) \
- (XFS_MIN_FREELIST_RAW( \
- (unsigned int)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
- (unsigned int)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
-
-#define XFS_AGB_TO_FSB(mp,agno,agbno) \
- (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
-#define XFS_FSB_TO_AGNO(mp,fsbno) \
- ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog))
-#define XFS_FSB_TO_AGBNO(mp,fsbno) \
- ((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog)))
-#define XFS_AGB_TO_DADDR(mp,agno,agbno) \
- ((xfs_daddr_t)XFS_FSB_TO_BB(mp, \
- (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))
-#define XFS_AG_DADDR(mp,agno,d) (XFS_AGB_TO_DADDR(mp, agno, 0) + (d))
-
-/*
- * For checking for bad ranges of xfs_daddr_t's, covering multiple
- * allocation groups or a single xfs_daddr_t that's a superblock copy.
- */
-#define XFS_AG_CHECK_DADDR(mp,d,len) \
- ((len) == 1 ? \
- ASSERT((d) == XFS_SB_DADDR || \
- xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \
- ASSERT(xfs_daddr_to_agno(mp, d) == \
- xfs_daddr_to_agno(mp, (d) + (len) - 1)))
-
-#endif /* __XFS_AG_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index eff34218f405..a6fbf4472017 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -23,7 +23,6 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index feacb061bab7..d1b4b6a5c894 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -231,4 +231,7 @@ xfs_alloc_get_rec(
xfs_extlen_t *len, /* output: length of extent */
int *stat); /* output: success/failure */
+int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
+
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index e0e83e24d3ef..59d521c09a17 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -22,7 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_alloc_btree.h"
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 353fb425faef..0a472fbe06d4 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -42,7 +40,6 @@
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
-#include "xfs_dinode.h"
/*
* xfs_attr.c
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index b1f73dbbf3d8..5d38e8b8a913 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -24,7 +24,6 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -41,7 +40,6 @@
#include "xfs_trace.h"
#include "xfs_buf_item.h"
#include "xfs_cksum.h"
-#include "xfs_dinode.h"
#include "xfs_dir2.h"
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 7510ab8058a4..20de88d1bf86 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -23,8 +23,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 79c981984dca..b5eb4743f75a 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -22,9 +22,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -46,7 +44,6 @@
#include "xfs_trace.h"
#include "xfs_symlink.h"
#include "xfs_attr_leaf.h"
-#include "xfs_dinode.h"
#include "xfs_filestream.h"
@@ -5450,13 +5447,11 @@ xfs_bmse_merge(
struct xfs_btree_cur *cur,
int *logflags) /* output */
{
- struct xfs_ifork *ifp;
struct xfs_bmbt_irec got;
struct xfs_bmbt_irec left;
xfs_filblks_t blockcount;
int error, i;
- ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_bmbt_get_all(gotp, &got);
xfs_bmbt_get_all(leftp, &left);
blockcount = left.br_blockcount + got.br_blockcount;
@@ -5489,32 +5484,25 @@ xfs_bmse_merge(
error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
got.br_blockcount, &i);
if (error)
- goto out_error;
- XFS_WANT_CORRUPTED_GOTO(i == 1, out_error);
+ return error;
+ XFS_WANT_CORRUPTED_RETURN(i == 1);
error = xfs_btree_delete(cur, &i);
if (error)
- goto out_error;
- XFS_WANT_CORRUPTED_GOTO(i == 1, out_error);
+ return error;
+ XFS_WANT_CORRUPTED_RETURN(i == 1);
/* lookup and update size of the previous extent */
error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
left.br_blockcount, &i);
if (error)
- goto out_error;
- XFS_WANT_CORRUPTED_GOTO(i == 1, out_error);
+ return error;
+ XFS_WANT_CORRUPTED_RETURN(i == 1);
left.br_blockcount = blockcount;
- error = xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
- left.br_blockcount, left.br_state);
- if (error)
- goto out_error;
-
- return 0;
-
-out_error:
- return error;
+ return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
+ left.br_blockcount, left.br_state);
}
/*
@@ -5544,35 +5532,29 @@ xfs_bmse_shift_one(
startoff = got.br_startoff - offset_shift_fsb;
/* delalloc extents should be prevented by caller */
- XFS_WANT_CORRUPTED_GOTO(!isnullstartblock(got.br_startblock),
- out_error);
+ XFS_WANT_CORRUPTED_RETURN(!isnullstartblock(got.br_startblock));
/*
- * If this is the first extent in the file, make sure there's enough
- * room at the start of the file and jump right to the shift as there's
- * no left extent to merge.
+ * Check for merge if we've got an extent to the left, otherwise make
+ * sure there's enough room at the start of the file for the shift.
*/
- if (*current_ext == 0) {
- if (got.br_startoff < offset_shift_fsb)
- return -EINVAL;
- goto shift_extent;
- }
+ if (*current_ext) {
+ /* grab the left extent and check for a large enough hole */
+ leftp = xfs_iext_get_ext(ifp, *current_ext - 1);
+ xfs_bmbt_get_all(leftp, &left);
- /* grab the left extent and check for a large enough hole */
- leftp = xfs_iext_get_ext(ifp, *current_ext - 1);
- xfs_bmbt_get_all(leftp, &left);
+ if (startoff < left.br_startoff + left.br_blockcount)
+ return -EINVAL;
- if (startoff < left.br_startoff + left.br_blockcount)
+ /* check whether to merge the extent or shift it down */
+ if (xfs_bmse_can_merge(&left, &got, offset_shift_fsb)) {
+ return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
+ *current_ext, gotp, leftp, cur,
+ logflags);
+ }
+ } else if (got.br_startoff < offset_shift_fsb)
return -EINVAL;
- /* check whether to merge the extent or shift it down */
- if (!xfs_bmse_can_merge(&left, &got, offset_shift_fsb))
- goto shift_extent;
-
- return xfs_bmse_merge(ip, whichfork, offset_shift_fsb, *current_ext,
- gotp, leftp, cur, logflags);
-
-shift_extent:
/*
* Increment the extent index for the next iteration, update the start
* offset of the in-core extent and update the btree if applicable.
@@ -5589,18 +5571,11 @@ shift_extent:
got.br_blockcount, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_GOTO(i == 1, out_error);
+ XFS_WANT_CORRUPTED_RETURN(i == 1);
got.br_startoff = startoff;
- error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
+ return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
got.br_blockcount, got.br_state);
- if (error)
- return error;
-
- return 0;
-
-out_error:
- return error;
}
/*
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index fba753308f31..2c44c8e50782 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
@@ -36,7 +34,6 @@
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
-#include "xfs_dinode.h"
/*
* Determine the extent state.
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 8fe6a93ff473..81cad433df85 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index fd827530afec..9cb0115c6bd1 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -23,8 +23,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -514,7 +512,6 @@ xfs_da3_root_split(
struct xfs_buf *bp;
struct xfs_inode *dp;
struct xfs_trans *tp;
- struct xfs_mount *mp;
struct xfs_dir2_leaf *leaf;
xfs_dablk_t blkno;
int level;
@@ -534,7 +531,6 @@ xfs_da3_root_split(
dp = args->dp;
tp = args->trans;
- mp = state->mp;
error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
if (error)
return error;
@@ -2342,14 +2338,12 @@ xfs_da_shrink_inode(
xfs_inode_t *dp;
int done, error, w, count;
xfs_trans_t *tp;
- xfs_mount_t *mp;
trace_xfs_da_shrink_inode(args);
dp = args->dp;
w = args->whichfork;
tp = args->trans;
- mp = dp->i_mount;
count = args->geo->fsbcount;
for (;;) {
/*
diff --git a/fs/xfs/libxfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c
index 7e42fdfd2f1d..9d624a622946 100644
--- a/fs/xfs/libxfs/xfs_da_format.c
+++ b/fs/xfs/libxfs/xfs_da_format.c
@@ -22,8 +22,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
diff --git a/fs/xfs/libxfs/xfs_dinode.h b/fs/xfs/libxfs/xfs_dinode.h
deleted file mode 100644
index 623bbe8fd921..000000000000
--- a/fs/xfs/libxfs/xfs_dinode.h
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DINODE_H__
-#define __XFS_DINODE_H__
-
-#define XFS_DINODE_MAGIC 0x494e /* 'IN' */
-#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
-
-typedef struct xfs_timestamp {
- __be32 t_sec; /* timestamp seconds */
- __be32 t_nsec; /* timestamp nanoseconds */
-} xfs_timestamp_t;
-
-/*
- * On-disk inode structure.
- *
- * This is just the header or "dinode core", the inode is expanded to fill a
- * variable size the leftover area split into a data and an attribute fork.
- * The format of the data and attribute fork depends on the format of the
- * inode as indicated by di_format and di_aformat. To access the data and
- * attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros
- * below.
- *
- * There is a very similar struct icdinode in xfs_inode which matches the
- * layout of the first 96 bytes of this structure, but is kept in native
- * format instead of big endian.
- *
- * Note: di_flushiter is only used by v1/2 inodes - it's effectively a zeroed
- * padding field for v3 inodes.
- */
-typedef struct xfs_dinode {
- __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
- __be16 di_mode; /* mode and type of file */
- __u8 di_version; /* inode version */
- __u8 di_format; /* format of di_c data */
- __be16 di_onlink; /* old number of links to file */
- __be32 di_uid; /* owner's user id */
- __be32 di_gid; /* owner's group id */
- __be32 di_nlink; /* number of links to file */
- __be16 di_projid_lo; /* lower part of owner's project id */
- __be16 di_projid_hi; /* higher part owner's project id */
- __u8 di_pad[6]; /* unused, zeroed space */
- __be16 di_flushiter; /* incremented on flush */
- xfs_timestamp_t di_atime; /* time last accessed */
- xfs_timestamp_t di_mtime; /* time last modified */
- xfs_timestamp_t di_ctime; /* time created/inode modified */
- __be64 di_size; /* number of bytes in file */
- __be64 di_nblocks; /* # of direct & btree blocks used */
- __be32 di_extsize; /* basic/minimum extent size for file */
- __be32 di_nextents; /* number of extents in data fork */
- __be16 di_anextents; /* number of extents in attribute fork*/
- __u8 di_forkoff; /* attr fork offs, <<3 for 64b align */
- __s8 di_aformat; /* format of attr fork's data */
- __be32 di_dmevmask; /* DMIG event mask */
- __be16 di_dmstate; /* DMIG state info */
- __be16 di_flags; /* random flags, XFS_DIFLAG_... */
- __be32 di_gen; /* generation number */
-
- /* di_next_unlinked is the only non-core field in the old dinode */
- __be32 di_next_unlinked;/* agi unlinked list ptr */
-
- /* start of the extended dinode, writable fields */
- __le32 di_crc; /* CRC of the inode */
- __be64 di_changecount; /* number of attribute changes */
- __be64 di_lsn; /* flush sequence */
- __be64 di_flags2; /* more random flags */
- __u8 di_pad2[16]; /* more padding for future expansion */
-
- /* fields only written to during inode creation */
- xfs_timestamp_t di_crtime; /* time created */
- __be64 di_ino; /* inode number */
- uuid_t di_uuid; /* UUID of the filesystem */
-
- /* structure must be padded to 64 bit alignment */
-} xfs_dinode_t;
-
-#define XFS_DINODE_CRC_OFF offsetof(struct xfs_dinode, di_crc)
-
-#define DI_MAX_FLUSH 0xffff
-
-/*
- * Size of the core inode on disk. Version 1 and 2 inodes have
- * the same size, but version 3 has grown a few additional fields.
- */
-static inline uint xfs_dinode_size(int version)
-{
- if (version == 3)
- return sizeof(struct xfs_dinode);
- return offsetof(struct xfs_dinode, di_crc);
-}
-
-/*
- * The 32 bit link count in the inode theoretically maxes out at UINT_MAX.
- * Since the pathconf interface is signed, we use 2^31 - 1 instead.
- * The old inode format had a 16 bit link count, so its maximum is USHRT_MAX.
- */
-#define XFS_MAXLINK ((1U << 31) - 1U)
-#define XFS_MAXLINK_1 65535U
-
-/*
- * Values for di_format
- */
-typedef enum xfs_dinode_fmt {
- XFS_DINODE_FMT_DEV, /* xfs_dev_t */
- XFS_DINODE_FMT_LOCAL, /* bulk data */
- XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */
- XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */
- XFS_DINODE_FMT_UUID /* uuid_t */
-} xfs_dinode_fmt_t;
-
-/*
- * Inode minimum and maximum sizes.
- */
-#define XFS_DINODE_MIN_LOG 8
-#define XFS_DINODE_MAX_LOG 11
-#define XFS_DINODE_MIN_SIZE (1 << XFS_DINODE_MIN_LOG)
-#define XFS_DINODE_MAX_SIZE (1 << XFS_DINODE_MAX_LOG)
-
-/*
- * Inode size for given fs.
- */
-#define XFS_LITINO(mp, version) \
- ((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version)))
-
-/*
- * Inode data & attribute fork sizes, per inode.
- */
-#define XFS_DFORK_Q(dip) ((dip)->di_forkoff != 0)
-#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3))
-
-#define XFS_DFORK_DSIZE(dip,mp) \
- (XFS_DFORK_Q(dip) ? \
- XFS_DFORK_BOFF(dip) : \
- XFS_LITINO(mp, (dip)->di_version))
-#define XFS_DFORK_ASIZE(dip,mp) \
- (XFS_DFORK_Q(dip) ? \
- XFS_LITINO(mp, (dip)->di_version) - XFS_DFORK_BOFF(dip) : \
- 0)
-#define XFS_DFORK_SIZE(dip,mp,w) \
- ((w) == XFS_DATA_FORK ? \
- XFS_DFORK_DSIZE(dip, mp) : \
- XFS_DFORK_ASIZE(dip, mp))
-
-/*
- * Return pointers to the data or attribute forks.
- */
-#define XFS_DFORK_DPTR(dip) \
- ((char *)dip + xfs_dinode_size(dip->di_version))
-#define XFS_DFORK_APTR(dip) \
- (XFS_DFORK_DPTR(dip) + XFS_DFORK_BOFF(dip))
-#define XFS_DFORK_PTR(dip,w) \
- ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip))
-
-#define XFS_DFORK_FORMAT(dip,w) \
- ((w) == XFS_DATA_FORK ? \
- (dip)->di_format : \
- (dip)->di_aformat)
-#define XFS_DFORK_NEXTENTS(dip,w) \
- ((w) == XFS_DATA_FORK ? \
- be32_to_cpu((dip)->di_nextents) : \
- be16_to_cpu((dip)->di_anextents))
-
-#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)((bp)->b_addr))
-
-/*
- * For block and character special files the 32bit dev_t is stored at the
- * beginning of the data fork.
- */
-static inline xfs_dev_t xfs_dinode_get_rdev(struct xfs_dinode *dip)
-{
- return be32_to_cpu(*(__be32 *)XFS_DFORK_DPTR(dip));
-}
-
-static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
-{
- *(__be32 *)XFS_DFORK_DPTR(dip) = cpu_to_be32(rdev);
-}
-
-/*
- * Values for di_flags
- * There should be a one-to-one correspondence between these flags and the
- * XFS_XFLAG_s.
- */
-#define XFS_DIFLAG_REALTIME_BIT 0 /* file's blocks come from rt area */
-#define XFS_DIFLAG_PREALLOC_BIT 1 /* file space has been preallocated */
-#define XFS_DIFLAG_NEWRTBM_BIT 2 /* for rtbitmap inode, new format */
-#define XFS_DIFLAG_IMMUTABLE_BIT 3 /* inode is immutable */
-#define XFS_DIFLAG_APPEND_BIT 4 /* inode is append-only */
-#define XFS_DIFLAG_SYNC_BIT 5 /* inode is written synchronously */
-#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */
-#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */
-#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */
-#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
-#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
-#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
-#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
-#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
-#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
-#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
-#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
-#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
-#define XFS_DIFLAG_IMMUTABLE (1 << XFS_DIFLAG_IMMUTABLE_BIT)
-#define XFS_DIFLAG_APPEND (1 << XFS_DIFLAG_APPEND_BIT)
-#define XFS_DIFLAG_SYNC (1 << XFS_DIFLAG_SYNC_BIT)
-#define XFS_DIFLAG_NOATIME (1 << XFS_DIFLAG_NOATIME_BIT)
-#define XFS_DIFLAG_NODUMP (1 << XFS_DIFLAG_NODUMP_BIT)
-#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
-#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
-#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
-#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
-#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
-#define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT)
-#define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT)
-
-#ifdef CONFIG_XFS_RT
-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
-#else
-#define XFS_IS_REALTIME_INODE(ip) (0)
-#endif
-
-#define XFS_DIFLAG_ANY \
- (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
- XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
- XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
- XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
- XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
-
-#endif /* __XFS_DINODE_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 7075aaf131f4..a69fb3a1e161 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -20,9 +20,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_inum.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -34,10 +31,25 @@
#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
-#include "xfs_dinode.h"
struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
+/*
+ * @mode, if set, indicates that the type field needs to be set up.
+ * This uses the transformation from file mode to DT_* as defined in linux/fs.h
+ * for file type specification. This will be propagated into the directory
+ * structure if appropriate for the given operation and filesystem config.
+ */
+const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
+ [0] = XFS_DIR3_FT_UNKNOWN,
+ [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
+ [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
+ [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
+ [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
+ [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
+ [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
+ [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
+};
/*
* ASCII case-insensitive (ie. A-Z) support for directories that was
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 4dff261e6ed5..e55353651f5b 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -32,6 +32,12 @@ struct xfs_dir2_data_unused;
extern struct xfs_name xfs_name_dotdot;
/*
+ * directory filetype conversion tables.
+ */
+#define S_SHIFT 12
+extern const unsigned char xfs_mode_to_ftype[];
+
+/*
* directory operations vector for encode/decode routines
*/
struct xfs_dir_ops {
@@ -177,4 +183,138 @@ extern const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops;
extern const struct xfs_buf_ops xfs_dir3_free_buf_ops;
extern const struct xfs_buf_ops xfs_dir3_data_buf_ops;
+/*
+ * Directory offset/block conversion functions.
+ *
+ * DB blocks here are logical directory block numbers, not filesystem blocks.
+ */
+
+/*
+ * Convert dataptr to byte in file space
+ */
+static inline xfs_dir2_off_t
+xfs_dir2_dataptr_to_byte(xfs_dir2_dataptr_t dp)
+{
+ return (xfs_dir2_off_t)dp << XFS_DIR2_DATA_ALIGN_LOG;
+}
+
+/*
+ * Convert byte in file space to dataptr. It had better be aligned.
+ */
+static inline xfs_dir2_dataptr_t
+xfs_dir2_byte_to_dataptr(xfs_dir2_off_t by)
+{
+ return (xfs_dir2_dataptr_t)(by >> XFS_DIR2_DATA_ALIGN_LOG);
+}
+
+/*
+ * Convert byte in space to (DB) block
+ */
+static inline xfs_dir2_db_t
+xfs_dir2_byte_to_db(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
+{
+ return (xfs_dir2_db_t)(by >> geo->blklog);
+}
+
+/*
+ * Convert dataptr to a block number
+ */
+static inline xfs_dir2_db_t
+xfs_dir2_dataptr_to_db(struct xfs_da_geometry *geo, xfs_dir2_dataptr_t dp)
+{
+ return xfs_dir2_byte_to_db(geo, xfs_dir2_dataptr_to_byte(dp));
+}
+
+/*
+ * Convert byte in space to offset in a block
+ */
+static inline xfs_dir2_data_aoff_t
+xfs_dir2_byte_to_off(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
+{
+ return (xfs_dir2_data_aoff_t)(by & (geo->blksize - 1));
+}
+
+/*
+ * Convert dataptr to a byte offset in a block
+ */
+static inline xfs_dir2_data_aoff_t
+xfs_dir2_dataptr_to_off(struct xfs_da_geometry *geo, xfs_dir2_dataptr_t dp)
+{
+ return xfs_dir2_byte_to_off(geo, xfs_dir2_dataptr_to_byte(dp));
+}
+
+/*
+ * Convert block and offset to byte in space
+ */
+static inline xfs_dir2_off_t
+xfs_dir2_db_off_to_byte(struct xfs_da_geometry *geo, xfs_dir2_db_t db,
+ xfs_dir2_data_aoff_t o)
+{
+ return ((xfs_dir2_off_t)db << geo->blklog) + o;
+}
+
+/*
+ * Convert block (DB) to block (dablk)
+ */
+static inline xfs_dablk_t
+xfs_dir2_db_to_da(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
+{
+ return (xfs_dablk_t)(db << (geo->blklog - geo->fsblog));
+}
+
+/*
+ * Convert byte in space to (DA) block
+ */
+static inline xfs_dablk_t
+xfs_dir2_byte_to_da(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
+{
+ return xfs_dir2_db_to_da(geo, xfs_dir2_byte_to_db(geo, by));
+}
+
+/*
+ * Convert block and offset to dataptr
+ */
+static inline xfs_dir2_dataptr_t
+xfs_dir2_db_off_to_dataptr(struct xfs_da_geometry *geo, xfs_dir2_db_t db,
+ xfs_dir2_data_aoff_t o)
+{
+ return xfs_dir2_byte_to_dataptr(xfs_dir2_db_off_to_byte(geo, db, o));
+}
+
+/*
+ * Convert block (dablk) to block (DB)
+ */
+static inline xfs_dir2_db_t
+xfs_dir2_da_to_db(struct xfs_da_geometry *geo, xfs_dablk_t da)
+{
+ return (xfs_dir2_db_t)(da >> (geo->blklog - geo->fsblog));
+}
+
+/*
+ * Convert block (dablk) to byte offset in space
+ */
+static inline xfs_dir2_off_t
+xfs_dir2_da_to_byte(struct xfs_da_geometry *geo, xfs_dablk_t da)
+{
+ return xfs_dir2_db_off_to_byte(geo, xfs_dir2_da_to_db(geo, da), 0);
+}
+
+/*
+ * Directory tail pointer accessor functions. Based on block geometry.
+ */
+static inline struct xfs_dir2_block_tail *
+xfs_dir2_block_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_data_hdr *hdr)
+{
+ return ((struct xfs_dir2_block_tail *)
+ ((char *)hdr + geo->blksize)) - 1;
+}
+
+static inline struct xfs_dir2_leaf_tail *
+xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
+{
+ return (struct xfs_dir2_leaf_tail *)
+ ((char *)lp + geo->blksize -
+ sizeof(struct xfs_dir2_leaf_tail));
+}
+
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 9628ceccfa02..9354e190b82e 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -36,7 +34,6 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
-#include "xfs_dinode.h"
/*
* Local function prototypes.
@@ -353,7 +350,6 @@ xfs_dir2_block_addname(
int low; /* low index for binary srch */
int lowstale; /* low stale index */
int mid=0; /* midpoint for binary srch */
- xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log header */
int needscan; /* need to rescan freespace */
__be16 *tagp; /* pointer to tag value */
@@ -363,7 +359,6 @@ xfs_dir2_block_addname(
dp = args->dp;
tp = args->trans;
- mp = dp->i_mount;
/* Read the (one and only) directory block into bp. */
error = xfs_dir3_block_read(tp, dp, &bp);
@@ -618,7 +613,6 @@ xfs_dir2_block_lookup(
xfs_inode_t *dp; /* incore inode */
int ent; /* entry index */
int error; /* error return value */
- xfs_mount_t *mp; /* filesystem mount point */
trace_xfs_dir2_block_lookup(args);
@@ -629,7 +623,6 @@ xfs_dir2_block_lookup(
if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent)))
return error;
dp = args->dp;
- mp = dp->i_mount;
hdr = bp->b_addr;
xfs_dir3_data_check(dp, bp);
btp = xfs_dir2_block_tail_p(args->geo, hdr);
@@ -770,7 +763,6 @@ xfs_dir2_block_removename(
xfs_inode_t *dp; /* incore inode */
int ent; /* block leaf entry index */
int error; /* error return value */
- xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log block header */
int needscan; /* need to fixup bestfree */
xfs_dir2_sf_hdr_t sfh; /* shortform header */
@@ -788,7 +780,6 @@ xfs_dir2_block_removename(
}
dp = args->dp;
tp = args->trans;
- mp = dp->i_mount;
hdr = bp->b_addr;
btp = xfs_dir2_block_tail_p(args->geo, hdr);
blp = xfs_dir2_block_leaf_p(btp);
@@ -852,7 +843,6 @@ xfs_dir2_block_replace(
xfs_inode_t *dp; /* incore inode */
int ent; /* leaf entry index */
int error; /* error return value */
- xfs_mount_t *mp; /* filesystem mount point */
trace_xfs_dir2_block_replace(args);
@@ -864,7 +854,6 @@ xfs_dir2_block_replace(
return error;
}
dp = args->dp;
- mp = dp->i_mount;
hdr = bp->b_addr;
btp = xfs_dir2_block_tail_p(args->geo, hdr);
blp = xfs_dir2_block_leaf_p(btp);
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index fdd803fecb8e..5ff31be9b1cd 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index a19174eb3cb2..106119955400 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -384,7 +382,6 @@ xfs_dir2_block_to_leaf(
xfs_dir2_db_t ldb; /* leaf block's bno */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_tail_t *ltp; /* leaf's tail */
- xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log block header */
int needscan; /* need to rescan bestfree */
xfs_trans_t *tp; /* transaction pointer */
@@ -395,7 +392,6 @@ xfs_dir2_block_to_leaf(
trace_xfs_dir2_block_to_leaf(args);
dp = args->dp;
- mp = dp->i_mount;
tp = args->trans;
/*
* Add the leaf block to the inode.
@@ -626,7 +622,6 @@ xfs_dir2_leaf_addname(
int lfloghigh; /* high leaf logging index */
int lowstale; /* index of prev stale leaf */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail pointer */
- xfs_mount_t *mp; /* filesystem mount point */
int needbytes; /* leaf block bytes needed */
int needlog; /* need to log data header */
int needscan; /* need to rescan data free */
@@ -641,7 +636,6 @@ xfs_dir2_leaf_addname(
dp = args->dp;
tp = args->trans;
- mp = dp->i_mount;
error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, -1, &lbp);
if (error)
@@ -1356,11 +1350,9 @@ xfs_dir2_leaf_removename(
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
- xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log data header */
int needscan; /* need to rescan data frees */
xfs_dir2_data_off_t oldbest; /* old value of best free */
- xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir2_data_free *bf; /* bestfree table */
struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
@@ -1374,8 +1366,6 @@ xfs_dir2_leaf_removename(
return error;
}
dp = args->dp;
- tp = args->trans;
- mp = dp->i_mount;
leaf = lbp->b_addr;
hdr = dbp->b_addr;
xfs_dir3_data_check(dp, dbp);
@@ -1607,11 +1597,9 @@ xfs_dir2_leaf_trim_data(
int error; /* error return value */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
- xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
dp = args->dp;
- mp = dp->i_mount;
tp = args->trans;
/*
* Read the offending data block. We need its buffer.
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 2ae6ac2c11ae..41b80d3d3877 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -297,7 +295,6 @@ xfs_dir2_leaf_to_node(
int i; /* leaf freespace index */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
- xfs_mount_t *mp; /* filesystem mount point */
int n; /* count of live freespc ents */
xfs_dir2_data_off_t off; /* freespace entry value */
__be16 *to; /* pointer to freespace entry */
@@ -307,7 +304,6 @@ xfs_dir2_leaf_to_node(
trace_xfs_dir2_leaf_to_node(args);
dp = args->dp;
- mp = dp->i_mount;
tp = args->trans;
/*
* Add a freespace block to the directory.
@@ -387,16 +383,12 @@ xfs_dir2_leafn_add(
int lfloghigh; /* high leaf entry logging */
int lfloglow; /* low leaf entry logging */
int lowstale; /* previous stale entry */
- xfs_mount_t *mp; /* filesystem mount point */
- xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir3_icleaf_hdr leafhdr;
struct xfs_dir2_leaf_entry *ents;
trace_xfs_dir2_leafn_add(args, index);
dp = args->dp;
- mp = dp->i_mount;
- tp = args->trans;
leaf = bp->b_addr;
dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
ents = dp->d_ops->leaf_ents_p(leaf);
@@ -1170,7 +1162,6 @@ xfs_dir2_leafn_remove(
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
int longest; /* longest data free entry */
int off; /* data block entry offset */
- xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log data header */
int needscan; /* need to rescan data frees */
xfs_trans_t *tp; /* transaction pointer */
@@ -1182,7 +1173,6 @@ xfs_dir2_leafn_remove(
dp = args->dp;
tp = args->trans;
- mp = dp->i_mount;
leaf = bp->b_addr;
dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
ents = dp->d_ops->leaf_ents_p(leaf);
@@ -1323,7 +1313,6 @@ xfs_dir2_leafn_split(
xfs_da_args_t *args; /* operation arguments */
xfs_dablk_t blkno; /* new leaf block number */
int error; /* error return value */
- xfs_mount_t *mp; /* filesystem mount point */
struct xfs_inode *dp;
/*
@@ -1331,7 +1320,6 @@ xfs_dir2_leafn_split(
*/
args = state->args;
dp = args->dp;
- mp = dp->i_mount;
ASSERT(oldblk->magic == XFS_DIR2_LEAFN_MAGIC);
error = xfs_da_grow_inode(args, &blkno);
if (error) {
@@ -2231,12 +2219,10 @@ xfs_dir2_node_trim_free(
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
xfs_dir2_free_t *free; /* freespace structure */
- xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir3_icfree_hdr freehdr;
dp = args->dp;
- mp = dp->i_mount;
tp = args->trans;
/*
* Read the freespace block.
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 27ce0794d196..ef9f6ead96a4 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -20,140 +20,6 @@
struct dir_context;
-/*
- * Directory offset/block conversion functions.
- *
- * DB blocks here are logical directory block numbers, not filesystem blocks.
- */
-
-/*
- * Convert dataptr to byte in file space
- */
-static inline xfs_dir2_off_t
-xfs_dir2_dataptr_to_byte(xfs_dir2_dataptr_t dp)
-{
- return (xfs_dir2_off_t)dp << XFS_DIR2_DATA_ALIGN_LOG;
-}
-
-/*
- * Convert byte in file space to dataptr. It had better be aligned.
- */
-static inline xfs_dir2_dataptr_t
-xfs_dir2_byte_to_dataptr(xfs_dir2_off_t by)
-{
- return (xfs_dir2_dataptr_t)(by >> XFS_DIR2_DATA_ALIGN_LOG);
-}
-
-/*
- * Convert byte in space to (DB) block
- */
-static inline xfs_dir2_db_t
-xfs_dir2_byte_to_db(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
-{
- return (xfs_dir2_db_t)(by >> geo->blklog);
-}
-
-/*
- * Convert dataptr to a block number
- */
-static inline xfs_dir2_db_t
-xfs_dir2_dataptr_to_db(struct xfs_da_geometry *geo, xfs_dir2_dataptr_t dp)
-{
- return xfs_dir2_byte_to_db(geo, xfs_dir2_dataptr_to_byte(dp));
-}
-
-/*
- * Convert byte in space to offset in a block
- */
-static inline xfs_dir2_data_aoff_t
-xfs_dir2_byte_to_off(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
-{
- return (xfs_dir2_data_aoff_t)(by & (geo->blksize - 1));
-}
-
-/*
- * Convert dataptr to a byte offset in a block
- */
-static inline xfs_dir2_data_aoff_t
-xfs_dir2_dataptr_to_off(struct xfs_da_geometry *geo, xfs_dir2_dataptr_t dp)
-{
- return xfs_dir2_byte_to_off(geo, xfs_dir2_dataptr_to_byte(dp));
-}
-
-/*
- * Convert block and offset to byte in space
- */
-static inline xfs_dir2_off_t
-xfs_dir2_db_off_to_byte(struct xfs_da_geometry *geo, xfs_dir2_db_t db,
- xfs_dir2_data_aoff_t o)
-{
- return ((xfs_dir2_off_t)db << geo->blklog) + o;
-}
-
-/*
- * Convert block (DB) to block (dablk)
- */
-static inline xfs_dablk_t
-xfs_dir2_db_to_da(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return (xfs_dablk_t)(db << (geo->blklog - geo->fsblog));
-}
-
-/*
- * Convert byte in space to (DA) block
- */
-static inline xfs_dablk_t
-xfs_dir2_byte_to_da(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
-{
- return xfs_dir2_db_to_da(geo, xfs_dir2_byte_to_db(geo, by));
-}
-
-/*
- * Convert block and offset to dataptr
- */
-static inline xfs_dir2_dataptr_t
-xfs_dir2_db_off_to_dataptr(struct xfs_da_geometry *geo, xfs_dir2_db_t db,
- xfs_dir2_data_aoff_t o)
-{
- return xfs_dir2_byte_to_dataptr(xfs_dir2_db_off_to_byte(geo, db, o));
-}
-
-/*
- * Convert block (dablk) to block (DB)
- */
-static inline xfs_dir2_db_t
-xfs_dir2_da_to_db(struct xfs_da_geometry *geo, xfs_dablk_t da)
-{
- return (xfs_dir2_db_t)(da >> (geo->blklog - geo->fsblog));
-}
-
-/*
- * Convert block (dablk) to byte offset in space
- */
-static inline xfs_dir2_off_t
-xfs_dir2_da_to_byte(struct xfs_da_geometry *geo, xfs_dablk_t da)
-{
- return xfs_dir2_db_off_to_byte(geo, xfs_dir2_da_to_db(geo, da), 0);
-}
-
-/*
- * Directory tail pointer accessor functions. Based on block geometry.
- */
-static inline struct xfs_dir2_block_tail *
-xfs_dir2_block_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_data_hdr *hdr)
-{
- return ((struct xfs_dir2_block_tail *)
- ((char *)hdr + geo->blksize)) - 1;
-}
-
-static inline struct xfs_dir2_leaf_tail *
-xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
-{
- return (struct xfs_dir2_leaf_tail *)
- ((char *)lp + geo->blksize -
- sizeof(struct xfs_dir2_leaf_tail));
-}
-
/* xfs_dir2.c */
extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
@@ -161,12 +27,6 @@ extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
const unsigned char *name, int len);
-#define S_SHIFT 12
-extern const unsigned char xfs_mode_to_ftype[];
-
-extern unsigned char xfs_dir3_get_dtype(struct xfs_mount *mp,
- __uint8_t filetype);
-
/* xfs_dir2_block.c */
extern int xfs_dir3_block_read(struct xfs_trans *tp, struct xfs_inode *dp,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 5079e051ef08..974d62e677f4 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -20,8 +20,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -32,7 +30,6 @@
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_trace.h"
-#include "xfs_dinode.h"
/*
* Prototypes for internal functions.
@@ -455,13 +452,11 @@ xfs_dir2_sf_addname_hard(
xfs_dir2_sf_hdr_t *oldsfp; /* original shortform dir */
xfs_dir2_sf_entry_t *sfep; /* entry in new dir */
xfs_dir2_sf_hdr_t *sfp; /* new shortform dir */
- struct xfs_mount *mp;
/*
* Copy the old directory to the stack buffer.
*/
dp = args->dp;
- mp = dp->i_mount;
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
old_isize = (int)dp->i_d.di_size;
@@ -542,7 +537,6 @@ xfs_dir2_sf_addname_pick(
xfs_inode_t *dp; /* incore directory inode */
int holefit; /* found hole it will fit in */
int i; /* entry number */
- xfs_mount_t *mp; /* filesystem mount point */
xfs_dir2_data_aoff_t offset; /* data block offset */
xfs_dir2_sf_entry_t *sfep; /* shortform entry */
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
@@ -550,7 +544,6 @@ xfs_dir2_sf_addname_pick(
int used; /* data bytes used */
dp = args->dp;
- mp = dp->i_mount;
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
size = dp->d_ops->data_entsize(args->namelen);
@@ -616,10 +609,8 @@ xfs_dir2_sf_check(
int offset; /* data offset */
xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
- struct xfs_mount *mp;
dp = args->dp;
- mp = dp->i_mount;
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
offset = dp->d_ops->data_first_offset;
@@ -1016,12 +1007,10 @@ xfs_dir2_sf_toino4(
int oldsize; /* old inode size */
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
xfs_dir2_sf_hdr_t *sfp; /* new sf directory */
- struct xfs_mount *mp;
trace_xfs_dir2_sf_toino4(args);
dp = args->dp;
- mp = dp->i_mount;
/*
* Copy the old directory to the buffer.
@@ -1094,12 +1083,10 @@ xfs_dir2_sf_toino8(
int oldsize; /* old inode size */
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
xfs_dir2_sf_hdr_t *sfp; /* new sf directory */
- struct xfs_mount *mp;
trace_xfs_dir2_sf_toino8(args);
dp = args->dp;
- mp = dp->i_mount;
/*
* Copy the old directory to the buffer.
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index bb969337efc8..6fbf2d853a54 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -22,8 +22,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_quota.h"
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 7e42bba9a420..fbd6da263571 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -34,6 +34,1077 @@ struct xfs_buf;
struct xfs_ifork;
/*
+ * Super block
+ * Fits into a sector-sized buffer at address 0 of each allocation group.
+ * Only the first of these is ever updated except during growfs.
+ */
+#define XFS_SB_MAGIC 0x58465342 /* 'XFSB' */
+#define XFS_SB_VERSION_1 1 /* 5.3, 6.0.1, 6.1 */
+#define XFS_SB_VERSION_2 2 /* 6.2 - attributes */
+#define XFS_SB_VERSION_3 3 /* 6.2 - new inode version */
+#define XFS_SB_VERSION_4 4 /* 6.2+ - bitmask version */
+#define XFS_SB_VERSION_5 5 /* CRC enabled filesystem */
+#define XFS_SB_VERSION_NUMBITS 0x000f
+#define XFS_SB_VERSION_ALLFBITS 0xfff0
+#define XFS_SB_VERSION_ATTRBIT 0x0010
+#define XFS_SB_VERSION_NLINKBIT 0x0020
+#define XFS_SB_VERSION_QUOTABIT 0x0040
+#define XFS_SB_VERSION_ALIGNBIT 0x0080
+#define XFS_SB_VERSION_DALIGNBIT 0x0100
+#define XFS_SB_VERSION_SHAREDBIT 0x0200
+#define XFS_SB_VERSION_LOGV2BIT 0x0400
+#define XFS_SB_VERSION_SECTORBIT 0x0800
+#define XFS_SB_VERSION_EXTFLGBIT 0x1000
+#define XFS_SB_VERSION_DIRV2BIT 0x2000
+#define XFS_SB_VERSION_BORGBIT 0x4000 /* ASCII only case-insens. */
+#define XFS_SB_VERSION_MOREBITSBIT 0x8000
+
+/*
+ * Supported feature bit list is just all bits in the versionnum field because
+ * we've used them all up and understand them all. Except, of course, for the
+ * shared superblock bit, which nobody knows what it does and so is unsupported.
+ */
+#define XFS_SB_VERSION_OKBITS \
+ ((XFS_SB_VERSION_NUMBITS | XFS_SB_VERSION_ALLFBITS) & \
+ ~XFS_SB_VERSION_SHAREDBIT)
+
+/*
+ * There are two words to hold XFS "feature" bits: the original
+ * word, sb_versionnum, and sb_features2. Whenever a bit is set in
+ * sb_features2, the feature bit XFS_SB_VERSION_MOREBITSBIT must be set.
+ *
+ * These defines represent bits in sb_features2.
+ */
+#define XFS_SB_VERSION2_RESERVED1BIT 0x00000001
+#define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Superblk counters */
+#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004
+#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */
+#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */
+#define XFS_SB_VERSION2_PROJID32BIT 0x00000080 /* 32 bit project id */
+#define XFS_SB_VERSION2_CRCBIT 0x00000100 /* metadata CRCs */
+#define XFS_SB_VERSION2_FTYPE 0x00000200 /* inode type in dir */
+
+#define XFS_SB_VERSION2_OKBITS \
+ (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \
+ XFS_SB_VERSION2_ATTR2BIT | \
+ XFS_SB_VERSION2_PROJID32BIT | \
+ XFS_SB_VERSION2_FTYPE)
+
+/*
+ * Superblock - in core version. Must match the ondisk version below.
+ * Must be padded to 64 bit alignment.
+ */
+typedef struct xfs_sb {
+ __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
+ __uint32_t sb_blocksize; /* logical block size, bytes */
+ xfs_rfsblock_t sb_dblocks; /* number of data blocks */
+ xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */
+ xfs_rtblock_t sb_rextents; /* number of realtime extents */
+ uuid_t sb_uuid; /* file system unique id */
+ xfs_fsblock_t sb_logstart; /* starting block of log if internal */
+ xfs_ino_t sb_rootino; /* root inode number */
+ xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */
+ xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */
+ xfs_agblock_t sb_rextsize; /* realtime extent size, blocks */
+ xfs_agblock_t sb_agblocks; /* size of an allocation group */
+ xfs_agnumber_t sb_agcount; /* number of allocation groups */
+ xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */
+ xfs_extlen_t sb_logblocks; /* number of log blocks */
+ __uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */
+ __uint16_t sb_sectsize; /* volume sector size, bytes */
+ __uint16_t sb_inodesize; /* inode size, bytes */
+ __uint16_t sb_inopblock; /* inodes per block */
+ char sb_fname[12]; /* file system name */
+ __uint8_t sb_blocklog; /* log2 of sb_blocksize */
+ __uint8_t sb_sectlog; /* log2 of sb_sectsize */
+ __uint8_t sb_inodelog; /* log2 of sb_inodesize */
+ __uint8_t sb_inopblog; /* log2 of sb_inopblock */
+ __uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */
+ __uint8_t sb_rextslog; /* log2 of sb_rextents */
+ __uint8_t sb_inprogress; /* mkfs is in progress, don't mount */
+ __uint8_t sb_imax_pct; /* max % of fs for inode space */
+ /* statistics */
+ /*
+ * These fields must remain contiguous. If you really
+ * want to change their layout, make sure you fix the
+ * code in xfs_trans_apply_sb_deltas().
+ */
+ __uint64_t sb_icount; /* allocated inodes */
+ __uint64_t sb_ifree; /* free inodes */
+ __uint64_t sb_fdblocks; /* free data blocks */
+ __uint64_t sb_frextents; /* free realtime extents */
+ /*
+ * End contiguous fields.
+ */
+ xfs_ino_t sb_uquotino; /* user quota inode */
+ xfs_ino_t sb_gquotino; /* group quota inode */
+ __uint16_t sb_qflags; /* quota flags */
+ __uint8_t sb_flags; /* misc. flags */
+ __uint8_t sb_shared_vn; /* shared version number */
+ xfs_extlen_t sb_inoalignmt; /* inode chunk alignment, fsblocks */
+ __uint32_t sb_unit; /* stripe or raid unit */
+ __uint32_t sb_width; /* stripe or raid width */
+ __uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */
+ __uint8_t sb_logsectlog; /* log2 of the log sector size */
+ __uint16_t sb_logsectsize; /* sector size for the log, bytes */
+ __uint32_t sb_logsunit; /* stripe unit size for the log */
+ __uint32_t sb_features2; /* additional feature bits */
+
+ /*
+ * bad features2 field as a result of failing to pad the sb
+ * structure to 64 bits. Some machines will be using this field
+ * for features2 bits. Easiest just to mark it bad and not use
+ * it for anything else.
+ */
+ __uint32_t sb_bad_features2;
+
+ /* version 5 superblock fields start here */
+
+ /* feature masks */
+ __uint32_t sb_features_compat;
+ __uint32_t sb_features_ro_compat;
+ __uint32_t sb_features_incompat;
+ __uint32_t sb_features_log_incompat;
+
+ __uint32_t sb_crc; /* superblock crc */
+ __uint32_t sb_pad;
+
+ xfs_ino_t sb_pquotino; /* project quota inode */
+ xfs_lsn_t sb_lsn; /* last write sequence */
+
+ /* must be padded to 64 bit alignment */
+} xfs_sb_t;
+
+#define XFS_SB_CRC_OFF offsetof(struct xfs_sb, sb_crc)
+
+/*
+ * Superblock - on disk version. Must match the in core version above.
+ * Must be padded to 64 bit alignment.
+ */
+typedef struct xfs_dsb {
+ __be32 sb_magicnum; /* magic number == XFS_SB_MAGIC */
+ __be32 sb_blocksize; /* logical block size, bytes */
+ __be64 sb_dblocks; /* number of data blocks */
+ __be64 sb_rblocks; /* number of realtime blocks */
+ __be64 sb_rextents; /* number of realtime extents */
+ uuid_t sb_uuid; /* file system unique id */
+ __be64 sb_logstart; /* starting block of log if internal */
+ __be64 sb_rootino; /* root inode number */
+ __be64 sb_rbmino; /* bitmap inode for realtime extents */
+ __be64 sb_rsumino; /* summary inode for rt bitmap */
+ __be32 sb_rextsize; /* realtime extent size, blocks */
+ __be32 sb_agblocks; /* size of an allocation group */
+ __be32 sb_agcount; /* number of allocation groups */
+ __be32 sb_rbmblocks; /* number of rt bitmap blocks */
+ __be32 sb_logblocks; /* number of log blocks */
+ __be16 sb_versionnum; /* header version == XFS_SB_VERSION */
+ __be16 sb_sectsize; /* volume sector size, bytes */
+ __be16 sb_inodesize; /* inode size, bytes */
+ __be16 sb_inopblock; /* inodes per block */
+ char sb_fname[12]; /* file system name */
+ __u8 sb_blocklog; /* log2 of sb_blocksize */
+ __u8 sb_sectlog; /* log2 of sb_sectsize */
+ __u8 sb_inodelog; /* log2 of sb_inodesize */
+ __u8 sb_inopblog; /* log2 of sb_inopblock */
+ __u8 sb_agblklog; /* log2 of sb_agblocks (rounded up) */
+ __u8 sb_rextslog; /* log2 of sb_rextents */
+ __u8 sb_inprogress; /* mkfs is in progress, don't mount */
+ __u8 sb_imax_pct; /* max % of fs for inode space */
+ /* statistics */
+ /*
+ * These fields must remain contiguous. If you really
+ * want to change their layout, make sure you fix the
+ * code in xfs_trans_apply_sb_deltas().
+ */
+ __be64 sb_icount; /* allocated inodes */
+ __be64 sb_ifree; /* free inodes */
+ __be64 sb_fdblocks; /* free data blocks */
+ __be64 sb_frextents; /* free realtime extents */
+ /*
+ * End contiguous fields.
+ */
+ __be64 sb_uquotino; /* user quota inode */
+ __be64 sb_gquotino; /* group quota inode */
+ __be16 sb_qflags; /* quota flags */
+ __u8 sb_flags; /* misc. flags */
+ __u8 sb_shared_vn; /* shared version number */
+ __be32 sb_inoalignmt; /* inode chunk alignment, fsblocks */
+ __be32 sb_unit; /* stripe or raid unit */
+ __be32 sb_width; /* stripe or raid width */
+ __u8 sb_dirblklog; /* log2 of dir block size (fsbs) */
+ __u8 sb_logsectlog; /* log2 of the log sector size */
+ __be16 sb_logsectsize; /* sector size for the log, bytes */
+ __be32 sb_logsunit; /* stripe unit size for the log */
+ __be32 sb_features2; /* additional feature bits */
+ /*
+ * bad features2 field as a result of failing to pad the sb
+ * structure to 64 bits. Some machines will be using this field
+ * for features2 bits. Easiest just to mark it bad and not use
+ * it for anything else.
+ */
+ __be32 sb_bad_features2;
+
+ /* version 5 superblock fields start here */
+
+ /* feature masks */
+ __be32 sb_features_compat;
+ __be32 sb_features_ro_compat;
+ __be32 sb_features_incompat;
+ __be32 sb_features_log_incompat;
+
+ __le32 sb_crc; /* superblock crc */
+ __be32 sb_pad;
+
+ __be64 sb_pquotino; /* project quota inode */
+ __be64 sb_lsn; /* last write sequence */
+
+ /* must be padded to 64 bit alignment */
+} xfs_dsb_t;
+
+/*
+ * Sequence number values for the fields.
+ */
+typedef enum {
+ XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS,
+ XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO,
+ XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS,
+ XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS,
+ XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE,
+ XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG,
+ XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG,
+ XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT,
+ XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
+ XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
+ XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
+ XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
+ XFS_SBS_FEATURES2, XFS_SBS_BAD_FEATURES2, XFS_SBS_FEATURES_COMPAT,
+ XFS_SBS_FEATURES_RO_COMPAT, XFS_SBS_FEATURES_INCOMPAT,
+ XFS_SBS_FEATURES_LOG_INCOMPAT, XFS_SBS_CRC, XFS_SBS_PAD,
+ XFS_SBS_PQUOTINO, XFS_SBS_LSN,
+ XFS_SBS_FIELDCOUNT
+} xfs_sb_field_t;
+
+/*
+ * Mask values, defined based on the xfs_sb_field_t values.
+ * Only define the ones we're using.
+ */
+#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x)
+#define XFS_SB_UUID XFS_SB_MVAL(UUID)
+#define XFS_SB_FNAME XFS_SB_MVAL(FNAME)
+#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO)
+#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO)
+#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO)
+#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM)
+#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO)
+#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO)
+#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS)
+#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN)
+#define XFS_SB_UNIT XFS_SB_MVAL(UNIT)
+#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH)
+#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
+#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
+#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
+#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2)
+#define XFS_SB_BAD_FEATURES2 XFS_SB_MVAL(BAD_FEATURES2)
+#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
+#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
+#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
+#define XFS_SB_FEATURES_LOG_INCOMPAT XFS_SB_MVAL(FEATURES_LOG_INCOMPAT)
+#define XFS_SB_CRC XFS_SB_MVAL(CRC)
+#define XFS_SB_PQUOTINO XFS_SB_MVAL(PQUOTINO)
+#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT)
+#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1)
+#define XFS_SB_MOD_BITS \
+ (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
+ XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
+ XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
+ XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
+ XFS_SB_BAD_FEATURES2 | XFS_SB_FEATURES_COMPAT | \
+ XFS_SB_FEATURES_RO_COMPAT | XFS_SB_FEATURES_INCOMPAT | \
+ XFS_SB_FEATURES_LOG_INCOMPAT | XFS_SB_PQUOTINO)
+
+
+/*
+ * Misc. Flags - warning - these will be cleared by xfs_repair unless
+ * a feature bit is set when the flag is used.
+ */
+#define XFS_SBF_NOFLAGS 0x00 /* no flags set */
+#define XFS_SBF_READONLY 0x01 /* only read-only mounts allowed */
+
+/*
+ * define max. shared version we can interoperate with
+ */
+#define XFS_SB_MAX_SHARED_VN 0
+
+#define XFS_SB_VERSION_NUM(sbp) ((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS)
+
+/*
+ * The first XFS version we support is a v4 superblock with V2 directories.
+ */
+static inline bool xfs_sb_good_v4_features(struct xfs_sb *sbp)
+{
+ if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
+ return false;
+
+ /* check for unknown features in the fs */
+ if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
+ ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
+ (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
+ return false;
+
+ return true;
+}
+
+static inline bool xfs_sb_good_version(struct xfs_sb *sbp)
+{
+ if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5)
+ return true;
+ if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4)
+ return xfs_sb_good_v4_features(sbp);
+ return false;
+}
+
+/*
+ * Detect a mismatched features2 field. Older kernels read/wrote
+ * this into the wrong slot, so to be safe we keep them in sync.
+ */
+static inline bool xfs_sb_has_mismatched_features2(struct xfs_sb *sbp)
+{
+ return sbp->sb_bad_features2 != sbp->sb_features2;
+}
+
+static inline bool xfs_sb_version_hasattr(struct xfs_sb *sbp)
+{
+ return (sbp->sb_versionnum & XFS_SB_VERSION_ATTRBIT);
+}
+
+static inline void xfs_sb_version_addattr(struct xfs_sb *sbp)
+{
+ sbp->sb_versionnum |= XFS_SB_VERSION_ATTRBIT;
+}
+
+static inline bool xfs_sb_version_hasquota(struct xfs_sb *sbp)
+{
+ return (sbp->sb_versionnum & XFS_SB_VERSION_QUOTABIT);
+}
+
+static inline void xfs_sb_version_addquota(struct xfs_sb *sbp)
+{
+ sbp->sb_versionnum |= XFS_SB_VERSION_QUOTABIT;
+}
+
+static inline bool xfs_sb_version_hasalign(struct xfs_sb *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
+ (sbp->sb_versionnum & XFS_SB_VERSION_ALIGNBIT));
+}
+
+static inline bool xfs_sb_version_hasdalign(struct xfs_sb *sbp)
+{
+ return (sbp->sb_versionnum & XFS_SB_VERSION_DALIGNBIT);
+}
+
+static inline bool xfs_sb_version_haslogv2(struct xfs_sb *sbp)
+{
+ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
+ (sbp->sb_versionnum & XFS_SB_VERSION_LOGV2BIT);
+}
+
+static inline bool xfs_sb_version_hasextflgbit(struct xfs_sb *sbp)
+{
+ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
+ (sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT);
+}
+
+static inline bool xfs_sb_version_hassector(struct xfs_sb *sbp)
+{
+ return (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT);
+}
+
+static inline bool xfs_sb_version_hasasciici(struct xfs_sb *sbp)
+{
+ return (sbp->sb_versionnum & XFS_SB_VERSION_BORGBIT);
+}
+
+static inline bool xfs_sb_version_hasmorebits(struct xfs_sb *sbp)
+{
+ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
+ (sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT);
+}
+
+/*
+ * sb_features2 bit version macros.
+ */
+static inline bool xfs_sb_version_haslazysbcount(struct xfs_sb *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
+ (xfs_sb_version_hasmorebits(sbp) &&
+ (sbp->sb_features2 & XFS_SB_VERSION2_LAZYSBCOUNTBIT));
+}
+
+static inline bool xfs_sb_version_hasattr2(struct xfs_sb *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
+ (xfs_sb_version_hasmorebits(sbp) &&
+ (sbp->sb_features2 & XFS_SB_VERSION2_ATTR2BIT));
+}
+
+static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp)
+{
+ sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
+ sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
+ sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT;
+}
+
+static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp)
+{
+ sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
+ sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
+ if (!sbp->sb_features2)
+ sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
+}
+
+static inline bool xfs_sb_version_hasprojid32bit(struct xfs_sb *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
+ (xfs_sb_version_hasmorebits(sbp) &&
+ (sbp->sb_features2 & XFS_SB_VERSION2_PROJID32BIT));
+}
+
+static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
+{
+ sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
+ sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
+ sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
+}
+
+/*
+ * Extended v5 superblock feature masks. These are to be used for new v5
+ * superblock features only.
+ *
+ * Compat features are new features that old kernels will not notice or affect
+ * and so can mount read-write without issues.
+ *
+ * RO-Compat (read only) are features that old kernels can read but will break
+ * if they write. Hence only read-only mounts of such filesystems are allowed on
+ * kernels that don't support the feature bit.
+ *
+ * InCompat features are features which old kernels will not understand and so
+ * must not mount.
+ *
+ * Log-InCompat features are for changes to log formats or new transactions that
+ * can't be replayed on older kernels. The fields are set when the filesystem is
+ * mounted, and a clean unmount clears the fields.
+ */
+#define XFS_SB_FEAT_COMPAT_ALL 0
+#define XFS_SB_FEAT_COMPAT_UNKNOWN ~XFS_SB_FEAT_COMPAT_ALL
+static inline bool
+xfs_sb_has_compat_feature(
+ struct xfs_sb *sbp,
+ __uint32_t feature)
+{
+ return (sbp->sb_features_compat & feature) != 0;
+}
+
+#define XFS_SB_FEAT_RO_COMPAT_FINOBT (1 << 0) /* free inode btree */
+#define XFS_SB_FEAT_RO_COMPAT_ALL \
+ (XFS_SB_FEAT_RO_COMPAT_FINOBT)
+#define XFS_SB_FEAT_RO_COMPAT_UNKNOWN ~XFS_SB_FEAT_RO_COMPAT_ALL
+static inline bool
+xfs_sb_has_ro_compat_feature(
+ struct xfs_sb *sbp,
+ __uint32_t feature)
+{
+ return (sbp->sb_features_ro_compat & feature) != 0;
+}
+
+#define XFS_SB_FEAT_INCOMPAT_FTYPE (1 << 0) /* filetype in dirent */
+#define XFS_SB_FEAT_INCOMPAT_ALL \
+ (XFS_SB_FEAT_INCOMPAT_FTYPE)
+
+#define XFS_SB_FEAT_INCOMPAT_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_ALL
+static inline bool
+xfs_sb_has_incompat_feature(
+ struct xfs_sb *sbp,
+ __uint32_t feature)
+{
+ return (sbp->sb_features_incompat & feature) != 0;
+}
+
+#define XFS_SB_FEAT_INCOMPAT_LOG_ALL 0
+#define XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_LOG_ALL
+static inline bool
+xfs_sb_has_incompat_log_feature(
+ struct xfs_sb *sbp,
+ __uint32_t feature)
+{
+ return (sbp->sb_features_log_incompat & feature) != 0;
+}
+
+/*
+ * V5 superblock specific feature checks
+ */
+static inline int xfs_sb_version_hascrc(struct xfs_sb *sbp)
+{
+ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
+}
+
+static inline int xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
+{
+ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
+}
+
+static inline int xfs_sb_version_hasftype(struct xfs_sb *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
+ xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_FTYPE)) ||
+ (xfs_sb_version_hasmorebits(sbp) &&
+ (sbp->sb_features2 & XFS_SB_VERSION2_FTYPE));
+}
+
+static inline int xfs_sb_version_hasfinobt(xfs_sb_t *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) &&
+ (sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_FINOBT);
+}
+
+/*
+ * end of superblock version macros
+ */
+
+static inline bool
+xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
+{
+ return (ino == sbp->sb_uquotino ||
+ ino == sbp->sb_gquotino ||
+ ino == sbp->sb_pquotino);
+}
+
+#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */
+#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
+#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr))
+
+#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d))
+#define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \
+ xfs_daddr_to_agno(mp,d), xfs_daddr_to_agbno(mp,d))
+#define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \
+ XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno))
+
+/*
+ * File system sector to basic block conversions.
+ */
+#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log)
+
+/*
+ * File system block to basic block conversions.
+ */
+#define XFS_FSB_TO_BB(mp,fsbno) ((fsbno) << (mp)->m_blkbb_log)
+#define XFS_BB_TO_FSB(mp,bb) \
+ (((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log)
+#define XFS_BB_TO_FSBT(mp,bb) ((bb) >> (mp)->m_blkbb_log)
+
+/*
+ * File system block to byte conversions.
+ */
+#define XFS_FSB_TO_B(mp,fsbno) ((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog)
+#define XFS_B_TO_FSB(mp,b) \
+ ((((__uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog)
+#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
+#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask)
+
+/*
+ * Allocation group header
+ *
+ * This is divided into three structures, placed in sequential 512-byte
+ * buffers after a copy of the superblock (also in a 512-byte buffer).
+ */
+#define XFS_AGF_MAGIC 0x58414746 /* 'XAGF' */
+#define XFS_AGI_MAGIC 0x58414749 /* 'XAGI' */
+#define XFS_AGFL_MAGIC 0x5841464c /* 'XAFL' */
+#define XFS_AGF_VERSION 1
+#define XFS_AGI_VERSION 1
+
+#define XFS_AGF_GOOD_VERSION(v) ((v) == XFS_AGF_VERSION)
+#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION)
+
+/*
+ * Btree number 0 is bno, 1 is cnt. This value gives the size of the
+ * arrays below.
+ */
+#define XFS_BTNUM_AGF ((int)XFS_BTNUM_CNTi + 1)
+
+/*
+ * The second word of agf_levels in the first a.g. overlaps the EFS
+ * superblock's magic number. Since the magic numbers valid for EFS
+ * are > 64k, our value cannot be confused for an EFS superblock's.
+ */
+
+typedef struct xfs_agf {
+ /*
+ * Common allocation group header information
+ */
+ __be32 agf_magicnum; /* magic number == XFS_AGF_MAGIC */
+ __be32 agf_versionnum; /* header version == XFS_AGF_VERSION */
+ __be32 agf_seqno; /* sequence # starting from 0 */
+ __be32 agf_length; /* size in blocks of a.g. */
+ /*
+ * Freespace information
+ */
+ __be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */
+ __be32 agf_spare0; /* spare field */
+ __be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
+ __be32 agf_spare1; /* spare field */
+
+ __be32 agf_flfirst; /* first freelist block's index */
+ __be32 agf_fllast; /* last freelist block's index */
+ __be32 agf_flcount; /* count of blocks in freelist */
+ __be32 agf_freeblks; /* total free blocks */
+
+ __be32 agf_longest; /* longest free space */
+ __be32 agf_btreeblks; /* # of blocks held in AGF btrees */
+ uuid_t agf_uuid; /* uuid of filesystem */
+
+ /*
+ * reserve some contiguous space for future logged fields before we add
+ * the unlogged fields. This makes the range logging via flags and
+ * structure offsets much simpler.
+ */
+ __be64 agf_spare64[16];
+
+ /* unlogged fields, written during buffer writeback. */
+ __be64 agf_lsn; /* last write sequence */
+ __be32 agf_crc; /* crc of agf sector */
+ __be32 agf_spare2;
+
+ /* structure must be padded to 64 bit alignment */
+} xfs_agf_t;
+
+#define XFS_AGF_CRC_OFF offsetof(struct xfs_agf, agf_crc)
+
+#define XFS_AGF_MAGICNUM 0x00000001
+#define XFS_AGF_VERSIONNUM 0x00000002
+#define XFS_AGF_SEQNO 0x00000004
+#define XFS_AGF_LENGTH 0x00000008
+#define XFS_AGF_ROOTS 0x00000010
+#define XFS_AGF_LEVELS 0x00000020
+#define XFS_AGF_FLFIRST 0x00000040
+#define XFS_AGF_FLLAST 0x00000080
+#define XFS_AGF_FLCOUNT 0x00000100
+#define XFS_AGF_FREEBLKS 0x00000200
+#define XFS_AGF_LONGEST 0x00000400
+#define XFS_AGF_BTREEBLKS 0x00000800
+#define XFS_AGF_UUID 0x00001000
+#define XFS_AGF_NUM_BITS 13
+#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
+
+#define XFS_AGF_FLAGS \
+ { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
+ { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
+ { XFS_AGF_SEQNO, "SEQNO" }, \
+ { XFS_AGF_LENGTH, "LENGTH" }, \
+ { XFS_AGF_ROOTS, "ROOTS" }, \
+ { XFS_AGF_LEVELS, "LEVELS" }, \
+ { XFS_AGF_FLFIRST, "FLFIRST" }, \
+ { XFS_AGF_FLLAST, "FLLAST" }, \
+ { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
+ { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
+ { XFS_AGF_LONGEST, "LONGEST" }, \
+ { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
+ { XFS_AGF_UUID, "UUID" }
+
+/* disk block (xfs_daddr_t) in the AG */
+#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
+#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
+#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr))
+
+/*
+ * Size of the unlinked inode hash table in the agi.
+ */
+#define XFS_AGI_UNLINKED_BUCKETS 64
+
+typedef struct xfs_agi {
+ /*
+ * Common allocation group header information
+ */
+ __be32 agi_magicnum; /* magic number == XFS_AGI_MAGIC */
+ __be32 agi_versionnum; /* header version == XFS_AGI_VERSION */
+ __be32 agi_seqno; /* sequence # starting from 0 */
+ __be32 agi_length; /* size in blocks of a.g. */
+ /*
+ * Inode information
+ * Inodes are mapped by interpreting the inode number, so no
+ * mapping data is needed here.
+ */
+ __be32 agi_count; /* count of allocated inodes */
+ __be32 agi_root; /* root of inode btree */
+ __be32 agi_level; /* levels in inode btree */
+ __be32 agi_freecount; /* number of free inodes */
+
+ __be32 agi_newino; /* new inode just allocated */
+ __be32 agi_dirino; /* last directory inode chunk */
+ /*
+ * Hash table of inodes which have been unlinked but are
+ * still being referenced.
+ */
+ __be32 agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
+ /*
+ * This marks the end of logging region 1 and start of logging region 2.
+ */
+ uuid_t agi_uuid; /* uuid of filesystem */
+ __be32 agi_crc; /* crc of agi sector */
+ __be32 agi_pad32;
+ __be64 agi_lsn; /* last write sequence */
+
+ __be32 agi_free_root; /* root of the free inode btree */
+ __be32 agi_free_level;/* levels in free inode btree */
+
+ /* structure must be padded to 64 bit alignment */
+} xfs_agi_t;
+
+#define XFS_AGI_CRC_OFF offsetof(struct xfs_agi, agi_crc)
+
+#define XFS_AGI_MAGICNUM (1 << 0)
+#define XFS_AGI_VERSIONNUM (1 << 1)
+#define XFS_AGI_SEQNO (1 << 2)
+#define XFS_AGI_LENGTH (1 << 3)
+#define XFS_AGI_COUNT (1 << 4)
+#define XFS_AGI_ROOT (1 << 5)
+#define XFS_AGI_LEVEL (1 << 6)
+#define XFS_AGI_FREECOUNT (1 << 7)
+#define XFS_AGI_NEWINO (1 << 8)
+#define XFS_AGI_DIRINO (1 << 9)
+#define XFS_AGI_UNLINKED (1 << 10)
+#define XFS_AGI_NUM_BITS_R1 11 /* end of the 1st agi logging region */
+#define XFS_AGI_ALL_BITS_R1 ((1 << XFS_AGI_NUM_BITS_R1) - 1)
+#define XFS_AGI_FREE_ROOT (1 << 11)
+#define XFS_AGI_FREE_LEVEL (1 << 12)
+#define XFS_AGI_NUM_BITS_R2 13
+
+/* disk block (xfs_daddr_t) in the AG */
+#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
+#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
+#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr))
+
+/*
+ * The third a.g. block contains the a.g. freelist, an array
+ * of block pointers to blocks owned by the allocation btree code.
+ */
+#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
+#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
+#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr))
+
+#define XFS_BUF_TO_AGFL_BNO(mp, bp) \
+ (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
+ (__be32 *)(bp)->b_addr)
+
+/*
+ * Size of the AGFL. For CRC-enabled filesystes we steal a couple of
+ * slots in the beginning of the block for a proper header with the
+ * location information and CRC.
+ */
+#define XFS_AGFL_SIZE(mp) \
+ (((mp)->m_sb.sb_sectsize - \
+ (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ sizeof(struct xfs_agfl) : 0)) / \
+ sizeof(xfs_agblock_t))
+
+typedef struct xfs_agfl {
+ __be32 agfl_magicnum;
+ __be32 agfl_seqno;
+ uuid_t agfl_uuid;
+ __be64 agfl_lsn;
+ __be32 agfl_crc;
+ __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
+} xfs_agfl_t;
+
+#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
+
+
+#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
+#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \
+ (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
+#define XFS_MIN_FREELIST(a,mp) \
+ (XFS_MIN_FREELIST_RAW( \
+ be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \
+ be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
+#define XFS_MIN_FREELIST_PAG(pag,mp) \
+ (XFS_MIN_FREELIST_RAW( \
+ (unsigned int)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
+ (unsigned int)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
+
+#define XFS_AGB_TO_FSB(mp,agno,agbno) \
+ (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
+#define XFS_FSB_TO_AGNO(mp,fsbno) \
+ ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog))
+#define XFS_FSB_TO_AGBNO(mp,fsbno) \
+ ((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog)))
+#define XFS_AGB_TO_DADDR(mp,agno,agbno) \
+ ((xfs_daddr_t)XFS_FSB_TO_BB(mp, \
+ (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))
+#define XFS_AG_DADDR(mp,agno,d) (XFS_AGB_TO_DADDR(mp, agno, 0) + (d))
+
+/*
+ * For checking for bad ranges of xfs_daddr_t's, covering multiple
+ * allocation groups or a single xfs_daddr_t that's a superblock copy.
+ */
+#define XFS_AG_CHECK_DADDR(mp,d,len) \
+ ((len) == 1 ? \
+ ASSERT((d) == XFS_SB_DADDR || \
+ xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \
+ ASSERT(xfs_daddr_to_agno(mp, d) == \
+ xfs_daddr_to_agno(mp, (d) + (len) - 1)))
+
+typedef struct xfs_timestamp {
+ __be32 t_sec; /* timestamp seconds */
+ __be32 t_nsec; /* timestamp nanoseconds */
+} xfs_timestamp_t;
+
+/*
+ * On-disk inode structure.
+ *
+ * This is just the header or "dinode core", the inode is expanded to fill a
+ * variable size the leftover area split into a data and an attribute fork.
+ * The format of the data and attribute fork depends on the format of the
+ * inode as indicated by di_format and di_aformat. To access the data and
+ * attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros
+ * below.
+ *
+ * There is a very similar struct icdinode in xfs_inode which matches the
+ * layout of the first 96 bytes of this structure, but is kept in native
+ * format instead of big endian.
+ *
+ * Note: di_flushiter is only used by v1/2 inodes - it's effectively a zeroed
+ * padding field for v3 inodes.
+ */
+#define XFS_DINODE_MAGIC 0x494e /* 'IN' */
+#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
+typedef struct xfs_dinode {
+ __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
+ __be16 di_mode; /* mode and type of file */
+ __u8 di_version; /* inode version */
+ __u8 di_format; /* format of di_c data */
+ __be16 di_onlink; /* old number of links to file */
+ __be32 di_uid; /* owner's user id */
+ __be32 di_gid; /* owner's group id */
+ __be32 di_nlink; /* number of links to file */
+ __be16 di_projid_lo; /* lower part of owner's project id */
+ __be16 di_projid_hi; /* higher part owner's project id */
+ __u8 di_pad[6]; /* unused, zeroed space */
+ __be16 di_flushiter; /* incremented on flush */
+ xfs_timestamp_t di_atime; /* time last accessed */
+ xfs_timestamp_t di_mtime; /* time last modified */
+ xfs_timestamp_t di_ctime; /* time created/inode modified */
+ __be64 di_size; /* number of bytes in file */
+ __be64 di_nblocks; /* # of direct & btree blocks used */
+ __be32 di_extsize; /* basic/minimum extent size for file */
+ __be32 di_nextents; /* number of extents in data fork */
+ __be16 di_anextents; /* number of extents in attribute fork*/
+ __u8 di_forkoff; /* attr fork offs, <<3 for 64b align */
+ __s8 di_aformat; /* format of attr fork's data */
+ __be32 di_dmevmask; /* DMIG event mask */
+ __be16 di_dmstate; /* DMIG state info */
+ __be16 di_flags; /* random flags, XFS_DIFLAG_... */
+ __be32 di_gen; /* generation number */
+
+ /* di_next_unlinked is the only non-core field in the old dinode */
+ __be32 di_next_unlinked;/* agi unlinked list ptr */
+
+ /* start of the extended dinode, writable fields */
+ __le32 di_crc; /* CRC of the inode */
+ __be64 di_changecount; /* number of attribute changes */
+ __be64 di_lsn; /* flush sequence */
+ __be64 di_flags2; /* more random flags */
+ __u8 di_pad2[16]; /* more padding for future expansion */
+
+ /* fields only written to during inode creation */
+ xfs_timestamp_t di_crtime; /* time created */
+ __be64 di_ino; /* inode number */
+ uuid_t di_uuid; /* UUID of the filesystem */
+
+ /* structure must be padded to 64 bit alignment */
+} xfs_dinode_t;
+
+#define XFS_DINODE_CRC_OFF offsetof(struct xfs_dinode, di_crc)
+
+#define DI_MAX_FLUSH 0xffff
+
+/*
+ * Size of the core inode on disk. Version 1 and 2 inodes have
+ * the same size, but version 3 has grown a few additional fields.
+ */
+static inline uint xfs_dinode_size(int version)
+{
+ if (version == 3)
+ return sizeof(struct xfs_dinode);
+ return offsetof(struct xfs_dinode, di_crc);
+}
+
+/*
+ * The 32 bit link count in the inode theoretically maxes out at UINT_MAX.
+ * Since the pathconf interface is signed, we use 2^31 - 1 instead.
+ * The old inode format had a 16 bit link count, so its maximum is USHRT_MAX.
+ */
+#define XFS_MAXLINK ((1U << 31) - 1U)
+#define XFS_MAXLINK_1 65535U
+
+/*
+ * Values for di_format
+ */
+typedef enum xfs_dinode_fmt {
+ XFS_DINODE_FMT_DEV, /* xfs_dev_t */
+ XFS_DINODE_FMT_LOCAL, /* bulk data */
+ XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */
+ XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */
+ XFS_DINODE_FMT_UUID /* uuid_t */
+} xfs_dinode_fmt_t;
+
+/*
+ * Inode minimum and maximum sizes.
+ */
+#define XFS_DINODE_MIN_LOG 8
+#define XFS_DINODE_MAX_LOG 11
+#define XFS_DINODE_MIN_SIZE (1 << XFS_DINODE_MIN_LOG)
+#define XFS_DINODE_MAX_SIZE (1 << XFS_DINODE_MAX_LOG)
+
+/*
+ * Inode size for given fs.
+ */
+#define XFS_LITINO(mp, version) \
+ ((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version)))
+
+/*
+ * Inode data & attribute fork sizes, per inode.
+ */
+#define XFS_DFORK_Q(dip) ((dip)->di_forkoff != 0)
+#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3))
+
+#define XFS_DFORK_DSIZE(dip,mp) \
+ (XFS_DFORK_Q(dip) ? \
+ XFS_DFORK_BOFF(dip) : \
+ XFS_LITINO(mp, (dip)->di_version))
+#define XFS_DFORK_ASIZE(dip,mp) \
+ (XFS_DFORK_Q(dip) ? \
+ XFS_LITINO(mp, (dip)->di_version) - XFS_DFORK_BOFF(dip) : \
+ 0)
+#define XFS_DFORK_SIZE(dip,mp,w) \
+ ((w) == XFS_DATA_FORK ? \
+ XFS_DFORK_DSIZE(dip, mp) : \
+ XFS_DFORK_ASIZE(dip, mp))
+
+/*
+ * Return pointers to the data or attribute forks.
+ */
+#define XFS_DFORK_DPTR(dip) \
+ ((char *)dip + xfs_dinode_size(dip->di_version))
+#define XFS_DFORK_APTR(dip) \
+ (XFS_DFORK_DPTR(dip) + XFS_DFORK_BOFF(dip))
+#define XFS_DFORK_PTR(dip,w) \
+ ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip))
+
+#define XFS_DFORK_FORMAT(dip,w) \
+ ((w) == XFS_DATA_FORK ? \
+ (dip)->di_format : \
+ (dip)->di_aformat)
+#define XFS_DFORK_NEXTENTS(dip,w) \
+ ((w) == XFS_DATA_FORK ? \
+ be32_to_cpu((dip)->di_nextents) : \
+ be16_to_cpu((dip)->di_anextents))
+
+/*
+ * For block and character special files the 32bit dev_t is stored at the
+ * beginning of the data fork.
+ */
+static inline xfs_dev_t xfs_dinode_get_rdev(struct xfs_dinode *dip)
+{
+ return be32_to_cpu(*(__be32 *)XFS_DFORK_DPTR(dip));
+}
+
+static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
+{
+ *(__be32 *)XFS_DFORK_DPTR(dip) = cpu_to_be32(rdev);
+}
+
+/*
+ * Values for di_flags
+ * There should be a one-to-one correspondence between these flags and the
+ * XFS_XFLAG_s.
+ */
+#define XFS_DIFLAG_REALTIME_BIT 0 /* file's blocks come from rt area */
+#define XFS_DIFLAG_PREALLOC_BIT 1 /* file space has been preallocated */
+#define XFS_DIFLAG_NEWRTBM_BIT 2 /* for rtbitmap inode, new format */
+#define XFS_DIFLAG_IMMUTABLE_BIT 3 /* inode is immutable */
+#define XFS_DIFLAG_APPEND_BIT 4 /* inode is append-only */
+#define XFS_DIFLAG_SYNC_BIT 5 /* inode is written synchronously */
+#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */
+#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */
+#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */
+#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
+#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
+#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
+#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
+#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
+#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
+#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
+#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
+#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
+#define XFS_DIFLAG_IMMUTABLE (1 << XFS_DIFLAG_IMMUTABLE_BIT)
+#define XFS_DIFLAG_APPEND (1 << XFS_DIFLAG_APPEND_BIT)
+#define XFS_DIFLAG_SYNC (1 << XFS_DIFLAG_SYNC_BIT)
+#define XFS_DIFLAG_NOATIME (1 << XFS_DIFLAG_NOATIME_BIT)
+#define XFS_DIFLAG_NODUMP (1 << XFS_DIFLAG_NODUMP_BIT)
+#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
+#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
+#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
+#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
+#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
+#define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT)
+#define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT)
+
+#define XFS_DIFLAG_ANY \
+ (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
+ XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
+ XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
+ XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
+ XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
+
+/*
+ * Inode number format:
+ * low inopblog bits - offset in block
+ * next agblklog bits - block number in ag
+ * next agno_log bits - ag number
+ * high agno_log-agblklog-inopblog bits - 0
+ */
+#define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1)
+#define XFS_INO_OFFSET_BITS(mp) (mp)->m_sb.sb_inopblog
+#define XFS_INO_AGBNO_BITS(mp) (mp)->m_sb.sb_agblklog
+#define XFS_INO_AGINO_BITS(mp) (mp)->m_agino_log
+#define XFS_INO_AGNO_BITS(mp) (mp)->m_agno_log
+#define XFS_INO_BITS(mp) \
+ XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp)
+#define XFS_INO_TO_AGNO(mp,i) \
+ ((xfs_agnumber_t)((i) >> XFS_INO_AGINO_BITS(mp)))
+#define XFS_INO_TO_AGINO(mp,i) \
+ ((xfs_agino_t)(i) & XFS_INO_MASK(XFS_INO_AGINO_BITS(mp)))
+#define XFS_INO_TO_AGBNO(mp,i) \
+ (((xfs_agblock_t)(i) >> XFS_INO_OFFSET_BITS(mp)) & \
+ XFS_INO_MASK(XFS_INO_AGBNO_BITS(mp)))
+#define XFS_INO_TO_OFFSET(mp,i) \
+ ((int)(i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
+#define XFS_INO_TO_FSB(mp,i) \
+ XFS_AGB_TO_FSB(mp, XFS_INO_TO_AGNO(mp,i), XFS_INO_TO_AGBNO(mp,i))
+#define XFS_AGINO_TO_INO(mp,a,i) \
+ (((xfs_ino_t)(a) << XFS_INO_AGINO_BITS(mp)) | (i))
+#define XFS_AGINO_TO_AGBNO(mp,i) ((i) >> XFS_INO_OFFSET_BITS(mp))
+#define XFS_AGINO_TO_OFFSET(mp,i) \
+ ((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
+#define XFS_OFFBNO_TO_AGINO(mp,b,o) \
+ ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o)))
+
+#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL))
+#define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL))
+
+/*
* RealTime Device format definitions
*/
@@ -413,4 +1484,40 @@ struct xfs_btree_block {
#define XFS_BTREE_LBLOCK_CRC_OFF \
offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
+/*
+ * On-disk XFS access control list structure.
+ */
+struct xfs_acl_entry {
+ __be32 ae_tag;
+ __be32 ae_id;
+ __be16 ae_perm;
+ __be16 ae_pad; /* fill the implicit hole in the structure */
+};
+
+struct xfs_acl {
+ __be32 acl_cnt;
+ struct xfs_acl_entry acl_entry[0];
+};
+
+/*
+ * The number of ACL entries allowed is defined by the on-disk format.
+ * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
+ * limited only by the maximum size of the xattr that stores the information.
+ */
+#define XFS_ACL_MAX_ENTRIES(mp) \
+ (xfs_sb_version_hascrc(&mp->m_sb) \
+ ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
+ sizeof(struct xfs_acl_entry) \
+ : 25)
+
+#define XFS_ACL_MAX_SIZE(mp) \
+ (sizeof(struct xfs_acl) + \
+ sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
+
+/* On-disk XFS extended attribute names */
+#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
+#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
+#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
+#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
+
#endif /* __XFS_FORMAT_H__ */
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 23dcb72fc5e6..116ef1ddb3e3 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -22,9 +22,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -39,7 +37,6 @@
#include "xfs_buf_item.h"
#include "xfs_icreate_item.h"
#include "xfs_icache.h"
-#include "xfs_dinode.h"
#include "xfs_trace.h"
@@ -48,12 +45,12 @@
*/
static inline int
xfs_ialloc_cluster_alignment(
- xfs_alloc_arg_t *args)
+ struct xfs_mount *mp)
{
- if (xfs_sb_version_hasalign(&args->mp->m_sb) &&
- args->mp->m_sb.sb_inoalignmt >=
- XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size))
- return args->mp->m_sb.sb_inoalignmt;
+ if (xfs_sb_version_hasalign(&mp->m_sb) &&
+ mp->m_sb.sb_inoalignmt >=
+ XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+ return mp->m_sb.sb_inoalignmt;
return 1;
}
@@ -412,7 +409,7 @@ xfs_ialloc_ag_alloc(
* but not to use them in the actual exact allocation.
*/
args.alignment = 1;
- args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1;
+ args.minalignslop = xfs_ialloc_cluster_alignment(args.mp) - 1;
/* Allow space for the inode btree to split. */
args.minleft = args.mp->m_in_maxlevels - 1;
@@ -448,7 +445,7 @@ xfs_ialloc_ag_alloc(
args.alignment = args.mp->m_dalign;
isaligned = 1;
} else
- args.alignment = xfs_ialloc_cluster_alignment(&args);
+ args.alignment = xfs_ialloc_cluster_alignment(args.mp);
/*
* Need to figure out where to allocate the inode blocks.
* Ideally they should be spaced out through the a.g.
@@ -477,7 +474,7 @@ xfs_ialloc_ag_alloc(
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.agbno = be32_to_cpu(agi->agi_root);
args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
- args.alignment = xfs_ialloc_cluster_alignment(&args);
+ args.alignment = xfs_ialloc_cluster_alignment(args.mp);
if ((error = xfs_alloc_vextent(&args)))
return error;
}
@@ -632,10 +629,24 @@ xfs_ialloc_ag_select(
}
/*
- * Is there enough free space for the file plus a block of
- * inodes? (if we need to allocate some)?
+ * Check that there is enough free space for the file plus a
+ * chunk of inodes if we need to allocate some. If this is the
+ * first pass across the AGs, take into account the potential
+ * space needed for alignment of inode chunks when checking the
+ * longest contiguous free space in the AG - this prevents us
+ * from getting ENOSPC because we have free space larger than
+ * m_ialloc_blks but alignment constraints prevent us from using
+ * it.
+ *
+ * If we can't find an AG with space for full alignment slack to
+ * be taken into account, we must be near ENOSPC in all AGs.
+ * Hence we don't include alignment for the second pass and so
+ * if we fail allocation due to alignment issues then it is most
+ * likely a real ENOSPC condition.
*/
ineed = mp->m_ialloc_blks;
+ if (flags && ineed > 1)
+ ineed += xfs_ialloc_cluster_alignment(mp);
longest = pag->pagf_longest;
if (!longest)
longest = pag->pagf_flcount > 0;
@@ -1137,11 +1148,7 @@ xfs_dialloc_ag_update_inobt(
XFS_WANT_CORRUPTED_RETURN((rec.ir_free == frec->ir_free) &&
(rec.ir_freecount == frec->ir_freecount));
- error = xfs_inobt_update(cur, &rec);
- if (error)
- return error;
-
- return 0;
+ return xfs_inobt_update(cur, &rec);
}
/*
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 95ad1c002d60..100007d56449 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -160,4 +160,8 @@ int xfs_ialloc_inode_init(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_agblock_t length, unsigned int gen);
+int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_agnumber_t agno, struct xfs_buf **bpp);
+
+
#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index c9b06f30fe86..964c465ca69c 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index f18fd2da49f7..002b6b3a1988 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_error.h"
@@ -30,7 +28,6 @@
#include "xfs_icache.h"
#include "xfs_trans.h"
#include "xfs_ialloc.h"
-#include "xfs_dinode.h"
/*
* Check that none of the inode's in the buffer have a next
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 6a00f7fed69d..0defbd02f62d 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -22,9 +22,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_inum.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
@@ -34,7 +31,6 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
kmem_zone_t *xfs_ifork_zone;
diff --git a/fs/xfs/libxfs/xfs_inum.h b/fs/xfs/libxfs/xfs_inum.h
deleted file mode 100644
index 4ff2278e147a..000000000000
--- a/fs/xfs/libxfs/xfs_inum.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_INUM_H__
-#define __XFS_INUM_H__
-
-/*
- * Inode number format:
- * low inopblog bits - offset in block
- * next agblklog bits - block number in ag
- * next agno_log bits - ag number
- * high agno_log-agblklog-inopblog bits - 0
- */
-
-struct xfs_mount;
-
-#define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1)
-#define XFS_INO_OFFSET_BITS(mp) (mp)->m_sb.sb_inopblog
-#define XFS_INO_AGBNO_BITS(mp) (mp)->m_sb.sb_agblklog
-#define XFS_INO_AGINO_BITS(mp) (mp)->m_agino_log
-#define XFS_INO_AGNO_BITS(mp) (mp)->m_agno_log
-#define XFS_INO_BITS(mp) \
- XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp)
-#define XFS_INO_TO_AGNO(mp,i) \
- ((xfs_agnumber_t)((i) >> XFS_INO_AGINO_BITS(mp)))
-#define XFS_INO_TO_AGINO(mp,i) \
- ((xfs_agino_t)(i) & XFS_INO_MASK(XFS_INO_AGINO_BITS(mp)))
-#define XFS_INO_TO_AGBNO(mp,i) \
- (((xfs_agblock_t)(i) >> XFS_INO_OFFSET_BITS(mp)) & \
- XFS_INO_MASK(XFS_INO_AGBNO_BITS(mp)))
-#define XFS_INO_TO_OFFSET(mp,i) \
- ((int)(i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
-#define XFS_INO_TO_FSB(mp,i) \
- XFS_AGB_TO_FSB(mp, XFS_INO_TO_AGNO(mp,i), XFS_INO_TO_AGBNO(mp,i))
-#define XFS_AGINO_TO_INO(mp,a,i) \
- (((xfs_ino_t)(a) << XFS_INO_AGINO_BITS(mp)) | (i))
-#define XFS_AGINO_TO_AGBNO(mp,i) ((i) >> XFS_INO_OFFSET_BITS(mp))
-#define XFS_AGINO_TO_OFFSET(mp,i) \
- ((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
-#define XFS_OFFBNO_TO_AGINO(mp,b,o) \
- ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o)))
-
-#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL))
-#define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL))
-
-#endif /* __XFS_INUM_H__ */
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index aff12f2d4428..265314690415 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -361,7 +361,7 @@ typedef struct xfs_ictimestamp {
/*
* NOTE: This structure must be kept identical to struct xfs_dinode
- * in xfs_dinode.h except for the endianness annotations.
+ * except for the endianness annotations.
*/
typedef struct xfs_icdinode {
__uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */
diff --git a/fs/xfs/libxfs/xfs_log_rlimit.c b/fs/xfs/libxfs/xfs_log_rlimit.c
index ee7e0e80246b..c10597973333 100644
--- a/fs/xfs/libxfs/xfs_log_rlimit.c
+++ b/fs/xfs/libxfs/xfs_log_rlimit.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_ag.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_trans_space.h"
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 7c818f1e4484..9b59ffa1fc19 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
@@ -36,7 +34,6 @@
#include "xfs_trace.h"
#include "xfs_buf.h"
#include "xfs_icache.h"
-#include "xfs_dinode.h"
#include "xfs_rtalloc.h"
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 5f902fa7913f..752915fa775a 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -23,7 +23,6 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
@@ -33,7 +32,6 @@
#include "xfs_cksum.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
-#include "xfs_dinode.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 2e739708afd3..8eb1c54bafbf 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -19,590 +19,6 @@
#define __XFS_SB_H__
/*
- * Super block
- * Fits into a sector-sized buffer at address 0 of each allocation group.
- * Only the first of these is ever updated except during growfs.
- */
-
-struct xfs_buf;
-struct xfs_mount;
-struct xfs_trans;
-
-#define XFS_SB_MAGIC 0x58465342 /* 'XFSB' */
-#define XFS_SB_VERSION_1 1 /* 5.3, 6.0.1, 6.1 */
-#define XFS_SB_VERSION_2 2 /* 6.2 - attributes */
-#define XFS_SB_VERSION_3 3 /* 6.2 - new inode version */
-#define XFS_SB_VERSION_4 4 /* 6.2+ - bitmask version */
-#define XFS_SB_VERSION_5 5 /* CRC enabled filesystem */
-#define XFS_SB_VERSION_NUMBITS 0x000f
-#define XFS_SB_VERSION_ALLFBITS 0xfff0
-#define XFS_SB_VERSION_ATTRBIT 0x0010
-#define XFS_SB_VERSION_NLINKBIT 0x0020
-#define XFS_SB_VERSION_QUOTABIT 0x0040
-#define XFS_SB_VERSION_ALIGNBIT 0x0080
-#define XFS_SB_VERSION_DALIGNBIT 0x0100
-#define XFS_SB_VERSION_SHAREDBIT 0x0200
-#define XFS_SB_VERSION_LOGV2BIT 0x0400
-#define XFS_SB_VERSION_SECTORBIT 0x0800
-#define XFS_SB_VERSION_EXTFLGBIT 0x1000
-#define XFS_SB_VERSION_DIRV2BIT 0x2000
-#define XFS_SB_VERSION_BORGBIT 0x4000 /* ASCII only case-insens. */
-#define XFS_SB_VERSION_MOREBITSBIT 0x8000
-
-/*
- * Supported feature bit list is just all bits in the versionnum field because
- * we've used them all up and understand them all. Except, of course, for the
- * shared superblock bit, which nobody knows what it does and so is unsupported.
- */
-#define XFS_SB_VERSION_OKBITS \
- ((XFS_SB_VERSION_NUMBITS | XFS_SB_VERSION_ALLFBITS) & \
- ~XFS_SB_VERSION_SHAREDBIT)
-
-/*
- * There are two words to hold XFS "feature" bits: the original
- * word, sb_versionnum, and sb_features2. Whenever a bit is set in
- * sb_features2, the feature bit XFS_SB_VERSION_MOREBITSBIT must be set.
- *
- * These defines represent bits in sb_features2.
- */
-#define XFS_SB_VERSION2_RESERVED1BIT 0x00000001
-#define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Superblk counters */
-#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004
-#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */
-#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */
-#define XFS_SB_VERSION2_PROJID32BIT 0x00000080 /* 32 bit project id */
-#define XFS_SB_VERSION2_CRCBIT 0x00000100 /* metadata CRCs */
-#define XFS_SB_VERSION2_FTYPE 0x00000200 /* inode type in dir */
-
-#define XFS_SB_VERSION2_OKBITS \
- (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \
- XFS_SB_VERSION2_ATTR2BIT | \
- XFS_SB_VERSION2_PROJID32BIT | \
- XFS_SB_VERSION2_FTYPE)
-
-/*
- * Superblock - in core version. Must match the ondisk version below.
- * Must be padded to 64 bit alignment.
- */
-typedef struct xfs_sb {
- __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
- __uint32_t sb_blocksize; /* logical block size, bytes */
- xfs_rfsblock_t sb_dblocks; /* number of data blocks */
- xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */
- xfs_rtblock_t sb_rextents; /* number of realtime extents */
- uuid_t sb_uuid; /* file system unique id */
- xfs_fsblock_t sb_logstart; /* starting block of log if internal */
- xfs_ino_t sb_rootino; /* root inode number */
- xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */
- xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */
- xfs_agblock_t sb_rextsize; /* realtime extent size, blocks */
- xfs_agblock_t sb_agblocks; /* size of an allocation group */
- xfs_agnumber_t sb_agcount; /* number of allocation groups */
- xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */
- xfs_extlen_t sb_logblocks; /* number of log blocks */
- __uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */
- __uint16_t sb_sectsize; /* volume sector size, bytes */
- __uint16_t sb_inodesize; /* inode size, bytes */
- __uint16_t sb_inopblock; /* inodes per block */
- char sb_fname[12]; /* file system name */
- __uint8_t sb_blocklog; /* log2 of sb_blocksize */
- __uint8_t sb_sectlog; /* log2 of sb_sectsize */
- __uint8_t sb_inodelog; /* log2 of sb_inodesize */
- __uint8_t sb_inopblog; /* log2 of sb_inopblock */
- __uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */
- __uint8_t sb_rextslog; /* log2 of sb_rextents */
- __uint8_t sb_inprogress; /* mkfs is in progress, don't mount */
- __uint8_t sb_imax_pct; /* max % of fs for inode space */
- /* statistics */
- /*
- * These fields must remain contiguous. If you really
- * want to change their layout, make sure you fix the
- * code in xfs_trans_apply_sb_deltas().
- */
- __uint64_t sb_icount; /* allocated inodes */
- __uint64_t sb_ifree; /* free inodes */
- __uint64_t sb_fdblocks; /* free data blocks */
- __uint64_t sb_frextents; /* free realtime extents */
- /*
- * End contiguous fields.
- */
- xfs_ino_t sb_uquotino; /* user quota inode */
- xfs_ino_t sb_gquotino; /* group quota inode */
- __uint16_t sb_qflags; /* quota flags */
- __uint8_t sb_flags; /* misc. flags */
- __uint8_t sb_shared_vn; /* shared version number */
- xfs_extlen_t sb_inoalignmt; /* inode chunk alignment, fsblocks */
- __uint32_t sb_unit; /* stripe or raid unit */
- __uint32_t sb_width; /* stripe or raid width */
- __uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */
- __uint8_t sb_logsectlog; /* log2 of the log sector size */
- __uint16_t sb_logsectsize; /* sector size for the log, bytes */
- __uint32_t sb_logsunit; /* stripe unit size for the log */
- __uint32_t sb_features2; /* additional feature bits */
-
- /*
- * bad features2 field as a result of failing to pad the sb
- * structure to 64 bits. Some machines will be using this field
- * for features2 bits. Easiest just to mark it bad and not use
- * it for anything else.
- */
- __uint32_t sb_bad_features2;
-
- /* version 5 superblock fields start here */
-
- /* feature masks */
- __uint32_t sb_features_compat;
- __uint32_t sb_features_ro_compat;
- __uint32_t sb_features_incompat;
- __uint32_t sb_features_log_incompat;
-
- __uint32_t sb_crc; /* superblock crc */
- __uint32_t sb_pad;
-
- xfs_ino_t sb_pquotino; /* project quota inode */
- xfs_lsn_t sb_lsn; /* last write sequence */
-
- /* must be padded to 64 bit alignment */
-} xfs_sb_t;
-
-#define XFS_SB_CRC_OFF offsetof(struct xfs_sb, sb_crc)
-
-/*
- * Superblock - on disk version. Must match the in core version above.
- * Must be padded to 64 bit alignment.
- */
-typedef struct xfs_dsb {
- __be32 sb_magicnum; /* magic number == XFS_SB_MAGIC */
- __be32 sb_blocksize; /* logical block size, bytes */
- __be64 sb_dblocks; /* number of data blocks */
- __be64 sb_rblocks; /* number of realtime blocks */
- __be64 sb_rextents; /* number of realtime extents */
- uuid_t sb_uuid; /* file system unique id */
- __be64 sb_logstart; /* starting block of log if internal */
- __be64 sb_rootino; /* root inode number */
- __be64 sb_rbmino; /* bitmap inode for realtime extents */
- __be64 sb_rsumino; /* summary inode for rt bitmap */
- __be32 sb_rextsize; /* realtime extent size, blocks */
- __be32 sb_agblocks; /* size of an allocation group */
- __be32 sb_agcount; /* number of allocation groups */
- __be32 sb_rbmblocks; /* number of rt bitmap blocks */
- __be32 sb_logblocks; /* number of log blocks */
- __be16 sb_versionnum; /* header version == XFS_SB_VERSION */
- __be16 sb_sectsize; /* volume sector size, bytes */
- __be16 sb_inodesize; /* inode size, bytes */
- __be16 sb_inopblock; /* inodes per block */
- char sb_fname[12]; /* file system name */
- __u8 sb_blocklog; /* log2 of sb_blocksize */
- __u8 sb_sectlog; /* log2 of sb_sectsize */
- __u8 sb_inodelog; /* log2 of sb_inodesize */
- __u8 sb_inopblog; /* log2 of sb_inopblock */
- __u8 sb_agblklog; /* log2 of sb_agblocks (rounded up) */
- __u8 sb_rextslog; /* log2 of sb_rextents */
- __u8 sb_inprogress; /* mkfs is in progress, don't mount */
- __u8 sb_imax_pct; /* max % of fs for inode space */
- /* statistics */
- /*
- * These fields must remain contiguous. If you really
- * want to change their layout, make sure you fix the
- * code in xfs_trans_apply_sb_deltas().
- */
- __be64 sb_icount; /* allocated inodes */
- __be64 sb_ifree; /* free inodes */
- __be64 sb_fdblocks; /* free data blocks */
- __be64 sb_frextents; /* free realtime extents */
- /*
- * End contiguous fields.
- */
- __be64 sb_uquotino; /* user quota inode */
- __be64 sb_gquotino; /* group quota inode */
- __be16 sb_qflags; /* quota flags */
- __u8 sb_flags; /* misc. flags */
- __u8 sb_shared_vn; /* shared version number */
- __be32 sb_inoalignmt; /* inode chunk alignment, fsblocks */
- __be32 sb_unit; /* stripe or raid unit */
- __be32 sb_width; /* stripe or raid width */
- __u8 sb_dirblklog; /* log2 of dir block size (fsbs) */
- __u8 sb_logsectlog; /* log2 of the log sector size */
- __be16 sb_logsectsize; /* sector size for the log, bytes */
- __be32 sb_logsunit; /* stripe unit size for the log */
- __be32 sb_features2; /* additional feature bits */
- /*
- * bad features2 field as a result of failing to pad the sb
- * structure to 64 bits. Some machines will be using this field
- * for features2 bits. Easiest just to mark it bad and not use
- * it for anything else.
- */
- __be32 sb_bad_features2;
-
- /* version 5 superblock fields start here */
-
- /* feature masks */
- __be32 sb_features_compat;
- __be32 sb_features_ro_compat;
- __be32 sb_features_incompat;
- __be32 sb_features_log_incompat;
-
- __le32 sb_crc; /* superblock crc */
- __be32 sb_pad;
-
- __be64 sb_pquotino; /* project quota inode */
- __be64 sb_lsn; /* last write sequence */
-
- /* must be padded to 64 bit alignment */
-} xfs_dsb_t;
-
-/*
- * Sequence number values for the fields.
- */
-typedef enum {
- XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS,
- XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO,
- XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS,
- XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS,
- XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE,
- XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG,
- XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG,
- XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT,
- XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
- XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
- XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
- XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
- XFS_SBS_FEATURES2, XFS_SBS_BAD_FEATURES2, XFS_SBS_FEATURES_COMPAT,
- XFS_SBS_FEATURES_RO_COMPAT, XFS_SBS_FEATURES_INCOMPAT,
- XFS_SBS_FEATURES_LOG_INCOMPAT, XFS_SBS_CRC, XFS_SBS_PAD,
- XFS_SBS_PQUOTINO, XFS_SBS_LSN,
- XFS_SBS_FIELDCOUNT
-} xfs_sb_field_t;
-
-/*
- * Mask values, defined based on the xfs_sb_field_t values.
- * Only define the ones we're using.
- */
-#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x)
-#define XFS_SB_UUID XFS_SB_MVAL(UUID)
-#define XFS_SB_FNAME XFS_SB_MVAL(FNAME)
-#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO)
-#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO)
-#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO)
-#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM)
-#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO)
-#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO)
-#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS)
-#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN)
-#define XFS_SB_UNIT XFS_SB_MVAL(UNIT)
-#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH)
-#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
-#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
-#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
-#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2)
-#define XFS_SB_BAD_FEATURES2 XFS_SB_MVAL(BAD_FEATURES2)
-#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
-#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
-#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
-#define XFS_SB_FEATURES_LOG_INCOMPAT XFS_SB_MVAL(FEATURES_LOG_INCOMPAT)
-#define XFS_SB_CRC XFS_SB_MVAL(CRC)
-#define XFS_SB_PQUOTINO XFS_SB_MVAL(PQUOTINO)
-#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT)
-#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1)
-#define XFS_SB_MOD_BITS \
- (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
- XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
- XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
- XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
- XFS_SB_BAD_FEATURES2 | XFS_SB_FEATURES_COMPAT | \
- XFS_SB_FEATURES_RO_COMPAT | XFS_SB_FEATURES_INCOMPAT | \
- XFS_SB_FEATURES_LOG_INCOMPAT | XFS_SB_PQUOTINO)
-
-
-/*
- * Misc. Flags - warning - these will be cleared by xfs_repair unless
- * a feature bit is set when the flag is used.
- */
-#define XFS_SBF_NOFLAGS 0x00 /* no flags set */
-#define XFS_SBF_READONLY 0x01 /* only read-only mounts allowed */
-
-/*
- * define max. shared version we can interoperate with
- */
-#define XFS_SB_MAX_SHARED_VN 0
-
-#define XFS_SB_VERSION_NUM(sbp) ((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS)
-
-/*
- * The first XFS version we support is a v4 superblock with V2 directories.
- */
-static inline bool xfs_sb_good_v4_features(struct xfs_sb *sbp)
-{
- if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
- return false;
-
- /* check for unknown features in the fs */
- if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
- ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
- (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
- return false;
-
- return true;
-}
-
-static inline bool xfs_sb_good_version(struct xfs_sb *sbp)
-{
- if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5)
- return true;
- if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4)
- return xfs_sb_good_v4_features(sbp);
- return false;
-}
-
-/*
- * Detect a mismatched features2 field. Older kernels read/wrote
- * this into the wrong slot, so to be safe we keep them in sync.
- */
-static inline bool xfs_sb_has_mismatched_features2(struct xfs_sb *sbp)
-{
- return sbp->sb_bad_features2 != sbp->sb_features2;
-}
-
-static inline bool xfs_sb_version_hasattr(struct xfs_sb *sbp)
-{
- return (sbp->sb_versionnum & XFS_SB_VERSION_ATTRBIT);
-}
-
-static inline void xfs_sb_version_addattr(struct xfs_sb *sbp)
-{
- sbp->sb_versionnum |= XFS_SB_VERSION_ATTRBIT;
-}
-
-static inline bool xfs_sb_version_hasquota(struct xfs_sb *sbp)
-{
- return (sbp->sb_versionnum & XFS_SB_VERSION_QUOTABIT);
-}
-
-static inline void xfs_sb_version_addquota(struct xfs_sb *sbp)
-{
- sbp->sb_versionnum |= XFS_SB_VERSION_QUOTABIT;
-}
-
-static inline bool xfs_sb_version_hasalign(struct xfs_sb *sbp)
-{
- return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
- (sbp->sb_versionnum & XFS_SB_VERSION_ALIGNBIT));
-}
-
-static inline bool xfs_sb_version_hasdalign(struct xfs_sb *sbp)
-{
- return (sbp->sb_versionnum & XFS_SB_VERSION_DALIGNBIT);
-}
-
-static inline bool xfs_sb_version_haslogv2(struct xfs_sb *sbp)
-{
- return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
- (sbp->sb_versionnum & XFS_SB_VERSION_LOGV2BIT);
-}
-
-static inline bool xfs_sb_version_hasextflgbit(struct xfs_sb *sbp)
-{
- return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
- (sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT);
-}
-
-static inline bool xfs_sb_version_hassector(struct xfs_sb *sbp)
-{
- return (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT);
-}
-
-static inline bool xfs_sb_version_hasasciici(struct xfs_sb *sbp)
-{
- return (sbp->sb_versionnum & XFS_SB_VERSION_BORGBIT);
-}
-
-static inline bool xfs_sb_version_hasmorebits(struct xfs_sb *sbp)
-{
- return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
- (sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT);
-}
-
-/*
- * sb_features2 bit version macros.
- */
-static inline bool xfs_sb_version_haslazysbcount(struct xfs_sb *sbp)
-{
- return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
- (xfs_sb_version_hasmorebits(sbp) &&
- (sbp->sb_features2 & XFS_SB_VERSION2_LAZYSBCOUNTBIT));
-}
-
-static inline bool xfs_sb_version_hasattr2(struct xfs_sb *sbp)
-{
- return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
- (xfs_sb_version_hasmorebits(sbp) &&
- (sbp->sb_features2 & XFS_SB_VERSION2_ATTR2BIT));
-}
-
-static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp)
-{
- sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
- sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
- sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT;
-}
-
-static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp)
-{
- sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
- sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
- if (!sbp->sb_features2)
- sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
-}
-
-static inline bool xfs_sb_version_hasprojid32bit(struct xfs_sb *sbp)
-{
- return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
- (xfs_sb_version_hasmorebits(sbp) &&
- (sbp->sb_features2 & XFS_SB_VERSION2_PROJID32BIT));
-}
-
-static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
-{
- sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
- sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
- sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
-}
-
-/*
- * Extended v5 superblock feature masks. These are to be used for new v5
- * superblock features only.
- *
- * Compat features are new features that old kernels will not notice or affect
- * and so can mount read-write without issues.
- *
- * RO-Compat (read only) are features that old kernels can read but will break
- * if they write. Hence only read-only mounts of such filesystems are allowed on
- * kernels that don't support the feature bit.
- *
- * InCompat features are features which old kernels will not understand and so
- * must not mount.
- *
- * Log-InCompat features are for changes to log formats or new transactions that
- * can't be replayed on older kernels. The fields are set when the filesystem is
- * mounted, and a clean unmount clears the fields.
- */
-#define XFS_SB_FEAT_COMPAT_ALL 0
-#define XFS_SB_FEAT_COMPAT_UNKNOWN ~XFS_SB_FEAT_COMPAT_ALL
-static inline bool
-xfs_sb_has_compat_feature(
- struct xfs_sb *sbp,
- __uint32_t feature)
-{
- return (sbp->sb_features_compat & feature) != 0;
-}
-
-#define XFS_SB_FEAT_RO_COMPAT_FINOBT (1 << 0) /* free inode btree */
-#define XFS_SB_FEAT_RO_COMPAT_ALL \
- (XFS_SB_FEAT_RO_COMPAT_FINOBT)
-#define XFS_SB_FEAT_RO_COMPAT_UNKNOWN ~XFS_SB_FEAT_RO_COMPAT_ALL
-static inline bool
-xfs_sb_has_ro_compat_feature(
- struct xfs_sb *sbp,
- __uint32_t feature)
-{
- return (sbp->sb_features_ro_compat & feature) != 0;
-}
-
-#define XFS_SB_FEAT_INCOMPAT_FTYPE (1 << 0) /* filetype in dirent */
-#define XFS_SB_FEAT_INCOMPAT_ALL \
- (XFS_SB_FEAT_INCOMPAT_FTYPE)
-
-#define XFS_SB_FEAT_INCOMPAT_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_ALL
-static inline bool
-xfs_sb_has_incompat_feature(
- struct xfs_sb *sbp,
- __uint32_t feature)
-{
- return (sbp->sb_features_incompat & feature) != 0;
-}
-
-#define XFS_SB_FEAT_INCOMPAT_LOG_ALL 0
-#define XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_LOG_ALL
-static inline bool
-xfs_sb_has_incompat_log_feature(
- struct xfs_sb *sbp,
- __uint32_t feature)
-{
- return (sbp->sb_features_log_incompat & feature) != 0;
-}
-
-/*
- * V5 superblock specific feature checks
- */
-static inline int xfs_sb_version_hascrc(struct xfs_sb *sbp)
-{
- return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
-}
-
-static inline int xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
-{
- return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
-}
-
-static inline int xfs_sb_version_hasftype(struct xfs_sb *sbp)
-{
- return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
- xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_FTYPE)) ||
- (xfs_sb_version_hasmorebits(sbp) &&
- (sbp->sb_features2 & XFS_SB_VERSION2_FTYPE));
-}
-
-static inline int xfs_sb_version_hasfinobt(xfs_sb_t *sbp)
-{
- return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) &&
- (sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_FINOBT);
-}
-
-/*
- * end of superblock version macros
- */
-
-static inline bool
-xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
-{
- return (ino == sbp->sb_uquotino ||
- ino == sbp->sb_gquotino ||
- ino == sbp->sb_pquotino);
-}
-
-#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */
-#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
-#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr))
-
-#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d))
-#define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \
- xfs_daddr_to_agno(mp,d), xfs_daddr_to_agbno(mp,d))
-#define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \
- XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno))
-
-/*
- * File system sector to basic block conversions.
- */
-#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log)
-
-/*
- * File system block to basic block conversions.
- */
-#define XFS_FSB_TO_BB(mp,fsbno) ((fsbno) << (mp)->m_blkbb_log)
-#define XFS_BB_TO_FSB(mp,bb) \
- (((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log)
-#define XFS_BB_TO_FSBT(mp,bb) ((bb) >> (mp)->m_blkbb_log)
-
-/*
- * File system block to byte conversions.
- */
-#define XFS_FSB_TO_B(mp,fsbno) ((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog)
-#define XFS_B_TO_FSB(mp,b) \
- ((((__uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog)
-#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
-#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask)
-
-/*
* perag get/put wrappers for ref counting
*/
extern struct xfs_perag *xfs_perag_get(struct xfs_mount *, xfs_agnumber_t);
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index 5782f037eab4..c80c5236c3da 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
-#include "xfs_ag.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index f2bda7c76b8a..6c1330f29050 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -22,8 +22,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index a65fa5dde6e9..4b641676f258 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -19,8 +19,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_ag.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_acl.h"
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 5dc163744511..3841b07f27bf 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -22,42 +22,6 @@ struct inode;
struct posix_acl;
struct xfs_inode;
-#define XFS_ACL_NOT_PRESENT (-1)
-
-/* On-disk XFS access control list structure */
-struct xfs_acl_entry {
- __be32 ae_tag;
- __be32 ae_id;
- __be16 ae_perm;
- __be16 ae_pad; /* fill the implicit hole in the structure */
-};
-
-struct xfs_acl {
- __be32 acl_cnt;
- struct xfs_acl_entry acl_entry[0];
-};
-
-/*
- * The number of ACL entries allowed is defined by the on-disk format.
- * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
- * limited only by the maximum size of the xattr that stores the information.
- */
-#define XFS_ACL_MAX_ENTRIES(mp) \
- (xfs_sb_version_hascrc(&mp->m_sb) \
- ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
- sizeof(struct xfs_acl_entry) \
- : 25)
-
-#define XFS_ACL_MAX_SIZE(mp) \
- (sizeof(struct xfs_acl) + \
- sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
-
-/* On-disk XFS extended attribute names */
-#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
-#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
-#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
-#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
-
#ifdef CONFIG_XFS_POSIX_ACL
extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f5b2453a43b2..18e2f3bbae5e 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -20,8 +20,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
@@ -33,7 +31,6 @@
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include <linux/aio.h>
#include <linux/gfp.h>
#include <linux/mpage.h>
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index aa2a8b1838a2..83af4c149635 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -23,8 +23,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -39,7 +37,6 @@
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
-#include "xfs_dinode.h"
#include "xfs_dir2.h"
/*
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 62db83ab6cbc..a43d370d2c58 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -39,7 +37,6 @@
#include "xfs_trace.h"
#include "xfs_buf_item.h"
#include "xfs_cksum.h"
-#include "xfs_dinode.h"
#include "xfs_dir2.h"
STATIC int
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 281002689d64..22a5dcb70b32 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -23,8 +23,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_inode.h"
@@ -42,7 +40,6 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_log.h"
-#include "xfs_dinode.h"
/* Kernel only BMAP related definitions and functions */
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 24b4ebea0d4d..bb502a391792 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -34,18 +34,16 @@
#include <linux/backing-dev.h>
#include <linux/freezer.h>
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
#include "xfs_log.h"
static kmem_zone_t *xfs_buf_zone;
-static struct workqueue_struct *xfslogd_workqueue;
-
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
@@ -463,7 +461,7 @@ _xfs_buf_find(
* have to check that the buffer falls within the filesystem bounds.
*/
eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
- if (blkno >= eofs) {
+ if (blkno < 0 || blkno >= eofs) {
/*
* XXX (dgc): we should really be returning -EFSCORRUPTED here,
* but none of the higher level infrastructure supports
@@ -1043,7 +1041,7 @@ xfs_buf_ioend_work(
struct work_struct *work)
{
struct xfs_buf *bp =
- container_of(work, xfs_buf_t, b_iodone_work);
+ container_of(work, xfs_buf_t, b_ioend_work);
xfs_buf_ioend(bp);
}
@@ -1052,8 +1050,8 @@ void
xfs_buf_ioend_async(
struct xfs_buf *bp)
{
- INIT_WORK(&bp->b_iodone_work, xfs_buf_ioend_work);
- queue_work(xfslogd_workqueue, &bp->b_iodone_work);
+ INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
+ queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
}
void
@@ -1222,6 +1220,13 @@ _xfs_buf_ioapply(
*/
bp->b_error = 0;
+ /*
+ * Initialize the I/O completion workqueue if we haven't yet or the
+ * submitter has not opted to specify a custom one.
+ */
+ if (!bp->b_ioend_wq)
+ bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
+
if (bp->b_flags & XBF_WRITE) {
if (bp->b_flags & XBF_SYNCIO)
rw = WRITE_SYNC;
@@ -1882,15 +1887,8 @@ xfs_buf_init(void)
if (!xfs_buf_zone)
goto out;
- xfslogd_workqueue = alloc_workqueue("xfslogd",
- WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
- if (!xfslogd_workqueue)
- goto out_free_buf_zone;
-
return 0;
- out_free_buf_zone:
- kmem_zone_destroy(xfs_buf_zone);
out:
return -ENOMEM;
}
@@ -1898,6 +1896,5 @@ xfs_buf_init(void)
void
xfs_buf_terminate(void)
{
- destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 82002c00af90..75ff5d5a7d2e 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -164,7 +164,8 @@ typedef struct xfs_buf {
struct xfs_perag *b_pag; /* contains rbtree root */
xfs_buftarg_t *b_target; /* buffer target (device) */
void *b_addr; /* virtual address of buffer */
- struct work_struct b_iodone_work;
+ struct work_struct b_ioend_work;
+ struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
xfs_buf_iodone_t b_iodone; /* I/O completion function */
struct completion b_iowait; /* queue for I/O waiters */
void *b_fspriv;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index f15969543326..3f9bd58edec7 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -17,11 +17,11 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index f1b69edcdf31..098cd78fe708 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -34,7 +32,6 @@
#include "xfs_trace.h"
#include "xfs_bmap.h"
#include "xfs_trans.h"
-#include "xfs_dinode.h"
/*
* Directory file type support functions
@@ -44,7 +41,7 @@ static unsigned char xfs_dir3_filetype_table[] = {
DT_FIFO, DT_SOCK, DT_LNK, DT_WHT,
};
-unsigned char
+static unsigned char
xfs_dir3_get_dtype(
struct xfs_mount *mp,
__uint8_t filetype)
@@ -57,22 +54,6 @@ xfs_dir3_get_dtype(
return xfs_dir3_filetype_table[filetype];
}
-/*
- * @mode, if set, indicates that the type field needs to be set up.
- * This uses the transformation from file mode to DT_* as defined in linux/fs.h
- * for file type specification. This will be propagated into the directory
- * structure if appropriate for the given operation and filesystem config.
- */
-const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
- [0] = XFS_DIR3_FT_UNKNOWN,
- [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
- [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
- [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
-};
STATIC int
xfs_dir2_sf_getdents(
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 13d08a1b390e..799e5a2d334d 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -20,7 +20,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_quota.h"
#include "xfs_inode.h"
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 63c2de49f61d..02c01bbbc789 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -22,8 +22,6 @@
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index f33fbaaa4d8a..814cff94e78f 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -20,8 +20,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_quota.h"
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index b92fd7bc49e3..3ee186ac1093 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -20,8 +20,6 @@
#include "xfs_fs.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_error.h"
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 5a6bd5d8779a..5eb4a14e0a0f 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -19,10 +19,9 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
#include "xfs_dir2.h"
#include "xfs_export.h"
#include "xfs_inode.h"
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index fd22f69049d4..c263e079273e 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -24,7 +24,6 @@
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index c4327419dc5c..cb7fe64cdbfa 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -17,10 +17,9 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index eb596b419942..13e974e6a889 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -37,7 +35,6 @@
#include "xfs_ioctl.h"
#include "xfs_trace.h"
#include "xfs_log.h"
-#include "xfs_dinode.h"
#include "xfs_icache.h"
#include <linux/aio.h>
@@ -933,7 +930,6 @@ xfs_file_readdir(
{
struct inode *inode = file_inode(file);
xfs_inode_t *ip = XFS_I(inode);
- int error;
size_t bufsize;
/*
@@ -950,10 +946,7 @@ xfs_file_readdir(
*/
bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
- error = xfs_readdir(ip, ctx, bufsize);
- if (error)
- return error;
- return 0;
+ return xfs_readdir(ip, ctx, bufsize);
}
STATIC int
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index e92730c1d3ca..a2e86e8a0fea 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -20,16 +20,13 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_ag.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
-#include "xfs_inum.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_alloc.h"
#include "xfs_mru_cache.h"
-#include "xfs_dinode.h"
#include "xfs_filestream.h"
#include "xfs_trace.h"
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index c05ac8b70fa9..fdc64220fcb0 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -22,7 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -40,7 +39,6 @@
#include "xfs_rtalloc.h"
#include "xfs_trace.h"
#include "xfs_log.h"
-#include "xfs_dinode.h"
#include "xfs_filestream.h"
/*
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index b45f7b27b5df..9771b7ef62ed 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -20,9 +20,7 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_error.h"
@@ -65,6 +63,7 @@ xfs_inode_alloc(
return NULL;
}
+ XFS_STATS_INC(vn_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(!xfs_isiflocked(ip));
@@ -130,6 +129,7 @@ xfs_inode_free(
/* asserts to verify all state is correct here */
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!xfs_isiflocked(ip));
+ XFS_STATS_DEC(vn_active);
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 46748b86b12f..62f1f91c32cb 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -34,6 +34,14 @@ struct xfs_eofblocks {
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
/*
+ * tags for inode radix tree
+ */
+#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
+ in xfs_inode_ag_iterator */
+#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
+#define XFS_ICI_EOFBLOCKS_TAG 1 /* inode has blocks beyond EOF */
+
+/*
* Flags for xfs_iget()
*/
#define XFS_IGET_CREATE 0x1
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index 7e4549233251..d45ca72af6fb 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -18,11 +18,10 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 8ed049d1e332..41f804e740d7 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -23,9 +23,7 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_da_format.h"
@@ -1082,7 +1080,7 @@ xfs_create(
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
- struct xfs_trans_res tres;
+ struct xfs_trans_res *tres;
uint resblks;
trace_xfs_create(dp, name);
@@ -1105,13 +1103,11 @@ xfs_create(
if (is_dir) {
rdev = 0;
resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
- tres.tr_logres = M_RES(mp)->tr_mkdir.tr_logres;
- tres.tr_logcount = XFS_MKDIR_LOG_COUNT;
+ tres = &M_RES(mp)->tr_mkdir;
tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
} else {
resblks = XFS_CREATE_SPACE_RES(mp, name->len);
- tres.tr_logres = M_RES(mp)->tr_create.tr_logres;
- tres.tr_logcount = XFS_CREATE_LOG_COUNT;
+ tres = &M_RES(mp)->tr_create;
tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
}
@@ -1123,17 +1119,16 @@ xfs_create(
* the case we'll drop the one we have and get a more
* appropriate transaction later.
*/
- tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
- error = xfs_trans_reserve(tp, &tres, resblks, 0);
+ error = xfs_trans_reserve(tp, tres, resblks, 0);
if (error == -ENOSPC) {
/* flush outstanding delalloc blocks and retry */
xfs_flush_inodes(mp);
- error = xfs_trans_reserve(tp, &tres, resblks, 0);
+ error = xfs_trans_reserve(tp, tres, resblks, 0);
}
if (error == -ENOSPC) {
/* No space at all so try a "no-allocation" reservation */
resblks = 0;
- error = xfs_trans_reserve(tp, &tres, 0, 0);
+ error = xfs_trans_reserve(tp, tres, 0, 0);
}
if (error) {
cancel_flags = 0;
@@ -2488,9 +2483,7 @@ xfs_remove(
xfs_fsblock_t first_block;
int cancel_flags;
int committed;
- int link_zero;
uint resblks;
- uint log_count;
trace_xfs_remove(dp, name);
@@ -2505,13 +2498,10 @@ xfs_remove(
if (error)
goto std_return;
- if (is_dir) {
+ if (is_dir)
tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
- log_count = XFS_DEFAULT_LOG_COUNT;
- } else {
+ else
tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
- log_count = XFS_REMOVE_LOG_COUNT;
- }
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
/*
@@ -2579,9 +2569,6 @@ xfs_remove(
if (error)
goto out_trans_cancel;
- /* Determine if this is the last link while the inode is locked */
- link_zero = (ip->i_d.di_nlink == 0);
-
xfs_bmap_init(&free_list, &first_block);
error = xfs_dir_removename(tp, dp, name, ip->i_ino,
&first_block, &free_list, resblks);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 9af2882e1f4c..4ed2ba9342dc 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -20,7 +20,6 @@
#include "xfs_inode_buf.h"
#include "xfs_inode_fork.h"
-#include "xfs_dinode.h"
/*
* Kernel only inode definitions
@@ -324,7 +323,6 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
(((pip)->i_mount->m_flags & XFS_MOUNT_GRPID) || \
((pip)->i_d.di_mode & S_ISGID))
-
int xfs_release(struct xfs_inode *ip);
void xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 63de0b0acc32..bf13a5a7e2f4 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -20,8 +20,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
@@ -29,7 +27,6 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_trans_priv.h"
-#include "xfs_dinode.h"
#include "xfs_log.h"
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 24c926b6fe85..a1831980a68e 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_ioctl.h"
@@ -40,7 +38,6 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_symlink.h"
-#include "xfs_dinode.h"
#include "xfs_trans.h"
#include <linux/capability.h>
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 94ce027e28e3..ec6772866f3d 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -25,8 +25,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_itable.h"
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index afcf3c926565..c980e2a5086b 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -38,7 +36,6 @@
#include "xfs_quota.h"
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
-#include "xfs_dinode.h"
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
@@ -52,7 +49,6 @@ xfs_iomap_eof_align_last_fsb(
xfs_extlen_t extsize,
xfs_fileoff_t *last_fsb)
{
- xfs_fileoff_t new_last_fsb = 0;
xfs_extlen_t align = 0;
int eof, error;
@@ -70,8 +66,8 @@ xfs_iomap_eof_align_last_fsb(
else if (mp->m_dalign)
align = mp->m_dalign;
- if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align))
- new_last_fsb = roundup_64(*last_fsb, align);
+ if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
+ align = 0;
}
/*
@@ -79,14 +75,14 @@ xfs_iomap_eof_align_last_fsb(
* (when file on a real-time subvolume or has di_extsize hint).
*/
if (extsize) {
- if (new_last_fsb)
- align = roundup_64(new_last_fsb, extsize);
+ if (align)
+ align = roundup_64(align, extsize);
else
align = extsize;
- new_last_fsb = roundup_64(*last_fsb, align);
}
- if (new_last_fsb) {
+ if (align) {
+ xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
if (error)
return error;
@@ -264,7 +260,6 @@ xfs_iomap_eof_want_preallocate(
{
xfs_fileoff_t start_fsb;
xfs_filblks_t count_fsb;
- xfs_fsblock_t firstblock;
int n, error, imaps;
int found_delalloc = 0;
@@ -289,7 +284,6 @@ xfs_iomap_eof_want_preallocate(
count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
while (count_fsb > 0) {
imaps = nimaps;
- firstblock = NULLFSBLOCK;
error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
0);
if (error)
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ec6dcdc181ee..c50311cae1b1 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_inode.h"
@@ -37,8 +35,7 @@
#include "xfs_icache.h"
#include "xfs_symlink.h"
#include "xfs_da_btree.h"
-#include "xfs_dir2_priv.h"
-#include "xfs_dinode.h"
+#include "xfs_dir2.h"
#include "xfs_trans_space.h"
#include <linux/capability.h>
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 894924a5129b..82e314258f73 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -21,9 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_inum.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -33,7 +30,6 @@
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
-#include "xfs_dinode.h"
STATIC int
xfs_internal_inum(
@@ -352,7 +348,6 @@ xfs_bulkstat(
int *done) /* 1 if there are more stats to get */
{
xfs_buf_t *agbp; /* agi header buffer */
- xfs_agi_t *agi; /* agi header data */
xfs_agino_t agino; /* inode # in allocation group */
xfs_agnumber_t agno; /* allocation group number */
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
@@ -403,7 +398,6 @@ xfs_bulkstat(
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
if (error)
break;
- agi = XFS_BUF_TO_AGI(agbp);
/*
* Allocate and initialize a btree cursor for ialloc btree.
*/
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 6a51619d8690..c31d2c2eadc4 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -384,4 +384,10 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
#endif /* XFS_WARN */
#endif /* DEBUG */
+#ifdef CONFIG_XFS_RT
+#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+#else
+#define XFS_IS_REALTIME_INODE(ip) (0)
+#endif
+
#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index fe88ef67f93a..e408bf5a3ff7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_trans.h"
@@ -1031,7 +1029,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
struct xlog *log = mp->m_log;
int needed = 0;
- if (!xfs_fs_writable(mp))
+ if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
return 0;
if (!xlog_cil_empty(log))
@@ -1808,6 +1806,8 @@ xlog_sync(
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_SYNCIO;
+ /* use high priority completion wq */
+ bp->b_ioend_wq = log->l_mp->m_log_workqueue;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
bp->b_flags |= XBF_FUA;
@@ -1856,6 +1856,8 @@ xlog_sync(
bp->b_flags |= XBF_SYNCIO;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
bp->b_flags |= XBF_FUA;
+ /* use high priority completion wq */
+ bp->b_ioend_wq = log->l_mp->m_log_workqueue;
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index f506c457011e..45cc0ce18adf 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -17,11 +17,10 @@
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_shared.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_alloc.h"
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 00cd7f3a8f59..a5a945fc3bdc 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -22,11 +22,10 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_log.h"
@@ -42,7 +41,6 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
#include "xfs_error.h"
#include "xfs_dir2.h"
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 63ca2f0420b1..d8b67547ab34 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -17,10 +17,9 @@
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
/*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 51435dbce9c4..d3d38836f87f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -22,11 +22,10 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
#include "xfs_inode.h"
#include "xfs_dir2.h"
#include "xfs_ialloc.h"
@@ -41,7 +40,6 @@
#include "xfs_fsops.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
-#include "xfs_dinode.h"
#include "xfs_sysfs.h"
@@ -1074,11 +1072,23 @@ xfs_unmountfs(
xfs_sysfs_del(&mp->m_kobj);
}
-int
-xfs_fs_writable(xfs_mount_t *mp)
+/*
+ * Determine whether modifications can proceed. The caller specifies the minimum
+ * freeze level for which modifications should not be allowed. This allows
+ * certain operations to proceed while the freeze sequence is in progress, if
+ * necessary.
+ */
+bool
+xfs_fs_writable(
+ struct xfs_mount *mp,
+ int level)
{
- return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) ||
- (mp->m_flags & XFS_MOUNT_RDONLY));
+ ASSERT(level > SB_UNFROZEN);
+ if ((mp->m_super->s_writers.frozen >= level) ||
+ XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
+ return false;
+
+ return true;
}
/*
@@ -1086,9 +1096,9 @@ xfs_fs_writable(xfs_mount_t *mp)
*
* Sync the superblock counters to disk.
*
- * Note this code can be called during the process of freezing, so
- * we may need to use the transaction allocator which does not
- * block when the transaction subsystem is in its frozen state.
+ * Note this code can be called during the process of freezing, so we use the
+ * transaction allocator that does not block when the transaction subsystem is
+ * in its frozen state.
*/
int
xfs_log_sbcount(xfs_mount_t *mp)
@@ -1096,7 +1106,8 @@ xfs_log_sbcount(xfs_mount_t *mp)
xfs_trans_t *tp;
int error;
- if (!xfs_fs_writable(mp))
+ /* allow this to proceed during the freeze sequence... */
+ if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
return 0;
xfs_icsb_sync_counters(mp, 0);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index b0447c86e7e2..22ccf69d4d3c 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -168,6 +168,7 @@ typedef struct xfs_mount {
/* low free space thresholds */
struct xfs_kobj m_kobj;
+ struct workqueue_struct *m_buf_workqueue;
struct workqueue_struct *m_data_workqueue;
struct workqueue_struct *m_unwritten_workqueue;
struct workqueue_struct *m_cil_workqueue;
@@ -320,10 +321,7 @@ typedef struct xfs_mod_sb {
/*
* Per-ag incore structure, copies of information in agf and agi, to improve the
- * performance of allocation group selection. This is defined for the kernel
- * only, and hence is defined here instead of in xfs_ag.h. You need the struct
- * xfs_mount to be defined to look up a xfs_perag anyway (via mp->m_perag_tree),
- * so this doesn't introduce any strange header file dependencies.
+ * performance of allocation group selection.
*/
typedef struct xfs_perag {
struct xfs_mount *pag_mount; /* owner filesystem */
@@ -384,7 +382,7 @@ extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
extern int xfs_readsb(xfs_mount_t *, int);
extern void xfs_freesb(xfs_mount_t *);
-extern int xfs_fs_writable(xfs_mount_t *);
+extern bool xfs_fs_writable(struct xfs_mount *mp, int level);
extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index d68f23021af3..79fb19dd9c83 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -23,7 +23,6 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
@@ -38,7 +37,6 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_cksum.h"
-#include "xfs_dinode.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -1749,23 +1747,21 @@ xfs_qm_vop_dqalloc(
xfs_iunlock(ip, lockflags);
if (O_udqpp)
*O_udqpp = uq;
- else if (uq)
+ else
xfs_qm_dqrele(uq);
if (O_gdqpp)
*O_gdqpp = gq;
- else if (gq)
+ else
xfs_qm_dqrele(gq);
if (O_pdqpp)
*O_pdqpp = pq;
- else if (pq)
+ else
xfs_qm_dqrele(pq);
return 0;
error_rele:
- if (gq)
- xfs_qm_dqrele(gq);
- if (uq)
- xfs_qm_dqrele(uq);
+ xfs_qm_dqrele(gq);
+ xfs_qm_dqrele(uq);
return error;
}
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 2c61e61b0205..3e52d5de7ae1 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -20,8 +20,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 80f2d77d929a..74fca68e43b6 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -26,7 +26,6 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
@@ -784,19 +783,21 @@ xfs_qm_log_quotaoff(
{
xfs_trans_t *tp;
int error;
- xfs_qoff_logitem_t *qoffi=NULL;
- uint oldsbqflag=0;
+ xfs_qoff_logitem_t *qoffi;
+
+ *qoffstartp = NULL;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_quotaoff, 0, 0);
- if (error)
- goto error0;
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ goto out;
+ }
qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
xfs_trans_log_quotaoff_item(tp, qoffi);
spin_lock(&mp->m_sb_lock);
- oldsbqflag = mp->m_sb.sb_qflags;
mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
spin_unlock(&mp->m_sb_lock);
@@ -809,19 +810,11 @@ xfs_qm_log_quotaoff(
*/
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
+ if (error)
+ goto out;
-error0:
- if (error) {
- xfs_trans_cancel(tp, 0);
- /*
- * No one else is modifying sb_qflags, so this is OK.
- * We still hold the quotaofflock.
- */
- spin_lock(&mp->m_sb_lock);
- mp->m_sb.sb_qflags = oldsbqflag;
- spin_unlock(&mp->m_sb_lock);
- }
*qoffstartp = qoffi;
+out:
return error;
}
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index b238027df987..7542bbeca6a1 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -19,8 +19,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_quota.h"
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index e1175ea9b551..f2079b6911cc 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -22,8 +22,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
@@ -36,7 +34,6 @@
#include "xfs_trace.h"
#include "xfs_buf.h"
#include "xfs_icache.h"
-#include "xfs_dinode.h"
#include "xfs_rtalloc.h"
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 206b97fd1d8a..19cbda196369 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -21,9 +21,7 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_inode.h"
@@ -44,7 +42,6 @@
#include "xfs_icache.h"
#include "xfs_trace.h"
#include "xfs_icreate_item.h"
-#include "xfs_dinode.h"
#include "xfs_filestream.h"
#include "xfs_quota.h"
#include "xfs_sysfs.h"
@@ -796,8 +793,7 @@ xfs_open_devices(
out_free_ddev_targ:
xfs_free_buftarg(mp, mp->m_ddev_targp);
out_close_rtdev:
- if (rtdev)
- xfs_blkdev_put(rtdev);
+ xfs_blkdev_put(rtdev);
out_close_logdev:
if (logdev && logdev != ddev)
xfs_blkdev_put(logdev);
@@ -842,10 +838,15 @@ STATIC int
xfs_init_mount_workqueues(
struct xfs_mount *mp)
{
+ mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
+ if (!mp->m_buf_workqueue)
+ goto out;
+
mp->m_data_workqueue = alloc_workqueue("xfs-data/%s",
WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
if (!mp->m_data_workqueue)
- goto out;
+ goto out_destroy_buf;
mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
@@ -863,7 +864,7 @@ xfs_init_mount_workqueues(
goto out_destroy_cil;
mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
- WQ_FREEZABLE, 0, mp->m_fsname);
+ WQ_FREEZABLE|WQ_HIGHPRI, 0, mp->m_fsname);
if (!mp->m_log_workqueue)
goto out_destroy_reclaim;
@@ -884,6 +885,8 @@ out_destroy_unwritten:
destroy_workqueue(mp->m_unwritten_workqueue);
out_destroy_data_iodone_queue:
destroy_workqueue(mp->m_data_workqueue);
+out_destroy_buf:
+ destroy_workqueue(mp->m_buf_workqueue);
out:
return -ENOMEM;
}
@@ -898,6 +901,7 @@ xfs_destroy_mount_workqueues(
destroy_workqueue(mp->m_cil_workqueue);
destroy_workqueue(mp->m_data_workqueue);
destroy_workqueue(mp->m_unwritten_workqueue);
+ destroy_workqueue(mp->m_buf_workqueue);
}
/*
@@ -1000,7 +1004,6 @@ xfs_fs_evict_inode(
clear_inode(inode);
XFS_STATS_INC(vn_rele);
XFS_STATS_INC(vn_remove);
- XFS_STATS_DEC(vn_active);
xfs_inactive(ip);
}
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 02ae62a998e0..25791df6f638 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -23,8 +23,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -42,7 +40,6 @@
#include "xfs_symlink.h"
#include "xfs_trans.h"
#include "xfs_log.h"
-#include "xfs_dinode.h"
/* ----- Kernel only functions below ----- */
STATIC int
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 1e85bcd0e418..13a029806805 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_inode.h"
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 30e8e3410955..fa3135b9bf04 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -22,8 +22,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_extent_busy.h"
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 859482f53b5a..573aefb5a573 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -18,10 +18,9 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index e2b2216b1635..0a4d4ab6d9a9 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
@@ -229,13 +227,6 @@ xfs_trans_getsb(xfs_trans_t *tp,
return bp;
}
-#ifdef DEBUG
-xfs_buftarg_t *xfs_error_target;
-int xfs_do_error;
-int xfs_req_num;
-int xfs_error_mod = 33;
-#endif
-
/*
* Get and lock the buffer for the caller if it is not already
* locked within the given transaction. If it has not yet been
@@ -257,46 +248,11 @@ xfs_trans_read_buf_map(
struct xfs_buf **bpp,
const struct xfs_buf_ops *ops)
{
- xfs_buf_t *bp;
- xfs_buf_log_item_t *bip;
+ struct xfs_buf *bp = NULL;
+ struct xfs_buf_log_item *bip;
int error;
*bpp = NULL;
- if (!tp) {
- bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
- if (!bp)
- return (flags & XBF_TRYLOCK) ?
- -EAGAIN : -ENOMEM;
-
- if (bp->b_error) {
- error = bp->b_error;
- xfs_buf_ioerror_alert(bp, __func__);
- XFS_BUF_UNDONE(bp);
- xfs_buf_stale(bp);
- xfs_buf_relse(bp);
-
- /* bad CRC means corrupted metadata */
- if (error == -EFSBADCRC)
- error = -EFSCORRUPTED;
- return error;
- }
-#ifdef DEBUG
- if (xfs_do_error) {
- if (xfs_error_target == target) {
- if (((xfs_req_num++) % xfs_error_mod) == 0) {
- xfs_buf_relse(bp);
- xfs_debug(mp, "Returning error!");
- return -EIO;
- }
- }
- }
-#endif
- if (XFS_FORCED_SHUTDOWN(mp))
- goto shutdown_abort;
- *bpp = bp;
- return 0;
- }
-
/*
* If we find the buffer in the cache with this transaction
* pointer in its b_fsprivate2 field, then we know we already
@@ -305,49 +261,24 @@ xfs_trans_read_buf_map(
* If the buffer is not yet read in, then we read it in, increment
* the lock recursion count, and return it to the caller.
*/
- bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
- if (bp != NULL) {
+ if (tp)
+ bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
+ if (bp) {
ASSERT(xfs_buf_islocked(bp));
ASSERT(bp->b_transp == tp);
ASSERT(bp->b_fspriv != NULL);
ASSERT(!bp->b_error);
- if (!(XFS_BUF_ISDONE(bp))) {
- trace_xfs_trans_read_buf_io(bp, _RET_IP_);
- ASSERT(!XFS_BUF_ISASYNC(bp));
- ASSERT(bp->b_iodone == NULL);
- XFS_BUF_READ(bp);
- bp->b_ops = ops;
-
- error = xfs_buf_submit_wait(bp);
- if (error) {
- if (!XFS_FORCED_SHUTDOWN(mp))
- xfs_buf_ioerror_alert(bp, __func__);
- xfs_buf_relse(bp);
- /*
- * We can gracefully recover from most read
- * errors. Ones we can't are those that happen
- * after the transaction's already dirty.
- */
- if (tp->t_flags & XFS_TRANS_DIRTY)
- xfs_force_shutdown(tp->t_mountp,
- SHUTDOWN_META_IO_ERROR);
- /* bad CRC means corrupted metadata */
- if (error == -EFSBADCRC)
- error = -EFSCORRUPTED;
- return error;
- }
- }
+ ASSERT(bp->b_flags & XBF_DONE);
+
/*
* We never locked this buf ourselves, so we shouldn't
* brelse it either. Just get out.
*/
if (XFS_FORCED_SHUTDOWN(mp)) {
trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
- *bpp = NULL;
return -EIO;
}
-
bip = bp->b_fspriv;
bip->bli_recur++;
@@ -358,17 +289,29 @@ xfs_trans_read_buf_map(
}
bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
- if (bp == NULL) {
- *bpp = NULL;
- return (flags & XBF_TRYLOCK) ?
- 0 : -ENOMEM;
+ if (!bp) {
+ if (!(flags & XBF_TRYLOCK))
+ return -ENOMEM;
+ return tp ? 0 : -EAGAIN;
}
+
+ /*
+ * If we've had a read error, then the contents of the buffer are
+ * invalid and should not be used. To ensure that a followup read tries
+ * to pull the buffer from disk again, we clear the XBF_DONE flag and
+ * mark the buffer stale. This ensures that anyone who has a current
+ * reference to the buffer will interpret it's contents correctly and
+ * future cache lookups will also treat it as an empty, uninitialised
+ * buffer.
+ */
if (bp->b_error) {
error = bp->b_error;
+ if (!XFS_FORCED_SHUTDOWN(mp))
+ xfs_buf_ioerror_alert(bp, __func__);
+ bp->b_flags &= ~XBF_DONE;
xfs_buf_stale(bp);
- XFS_BUF_DONE(bp);
- xfs_buf_ioerror_alert(bp, __func__);
- if (tp->t_flags & XFS_TRANS_DIRTY)
+
+ if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
xfs_buf_relse(bp);
@@ -377,33 +320,19 @@ xfs_trans_read_buf_map(
error = -EFSCORRUPTED;
return error;
}
-#ifdef DEBUG
- if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
- if (xfs_error_target == target) {
- if (((xfs_req_num++) % xfs_error_mod) == 0) {
- xfs_force_shutdown(tp->t_mountp,
- SHUTDOWN_META_IO_ERROR);
- xfs_buf_relse(bp);
- xfs_debug(mp, "Returning trans error!");
- return -EIO;
- }
- }
+
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ xfs_buf_relse(bp);
+ trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
+ return -EIO;
}
-#endif
- if (XFS_FORCED_SHUTDOWN(mp))
- goto shutdown_abort;
- _xfs_trans_bjoin(tp, bp, 1);
+ if (tp)
+ _xfs_trans_bjoin(tp, bp, 1);
trace_xfs_trans_read_buf(bp->b_fspriv);
-
*bpp = bp;
return 0;
-shutdown_abort:
- trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
- xfs_buf_relse(bp);
- *bpp = NULL;
- return -EIO;
}
/*
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 846e061c2e98..76a16df55ef7 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_error.h"
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 47978ba89dae..284397dd7990 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -18,10 +18,9 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index cdb4d86520e1..17280cd71934 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -21,8 +21,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 93455b998041..69f6e475de97 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -20,8 +20,6 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_inode.h"
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 1402fa855388..f5c40b0fadc2 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -42,6 +42,14 @@
#define wmb() mb()
#endif
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+
+#ifndef dma_wmb
+#define dma_wmb() wmb()
+#endif
+
#ifndef read_barrier_depends
#define read_barrier_depends() do { } while (0)
#endif
diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h
deleted file mode 100644
index b6312843dbd9..000000000000
--- a/include/asm-generic/hash.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_GENERIC_HASH_H
-#define __ASM_GENERIC_HASH_H
-
-struct fast_hash_ops;
-static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
-{
-}
-
-#endif /* __ASM_GENERIC_HASH_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 752e30d63904..177d5973b132 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -103,6 +103,17 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp,
+ int full)
+{
+ return pmdp_get_and_clear(mm, address, pmdp);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pte_t *ptep,
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index aa70cbda327c..bee5d683074d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -164,6 +164,7 @@
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
+#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
@@ -497,6 +498,7 @@
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
CLKSRC_OF_TABLES() \
+ IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 74b13ec1ebd4..98abda9ed3aa 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -17,6 +17,32 @@
struct crypto_ahash;
+/**
+ * DOC: Message Digest Algorithm Definitions
+ *
+ * These data structures define modular message digest algorithm
+ * implementations, managed via crypto_register_ahash(),
+ * crypto_register_shash(), crypto_unregister_ahash() and
+ * crypto_unregister_shash().
+ */
+
+/**
+ * struct hash_alg_common - define properties of message digest
+ * @digestsize: Size of the result of the transformation. A buffer of this size
+ * must be available to the @final and @finup calls, so they can
+ * store the resulting hash into it. For various predefined sizes,
+ * search include/crypto/ using
+ * git grep _DIGEST_SIZE include/crypto.
+ * @statesize: Size of the block for partial state of the transformation. A
+ * buffer of this size must be passed to the @export function as it
+ * will save the partial state of the transformation into it. On the
+ * other side, the @import function will load the state from a
+ * buffer of this size as well.
+ * @base: Start of data structure of cipher algorithm. The common data
+ * structure of crypto_alg contains information common to all ciphers.
+ * The hash_alg_common data structure now adds the hash-specific
+ * information.
+ */
struct hash_alg_common {
unsigned int digestsize;
unsigned int statesize;
@@ -37,6 +63,63 @@ struct ahash_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+/**
+ * struct ahash_alg - asynchronous message digest definition
+ * @init: Initialize the transformation context. Intended only to initialize the
+ * state of the HASH transformation at the begining. This shall fill in
+ * the internal structures used during the entire duration of the whole
+ * transformation. No data processing happens at this point.
+ * @update: Push a chunk of data into the driver for transformation. This
+ * function actually pushes blocks of data from upper layers into the
+ * driver, which then passes those to the hardware as seen fit. This
+ * function must not finalize the HASH transformation by calculating the
+ * final message digest as this only adds more data into the
+ * transformation. This function shall not modify the transformation
+ * context, as this function may be called in parallel with the same
+ * transformation object. Data processing can happen synchronously
+ * [SHASH] or asynchronously [AHASH] at this point.
+ * @final: Retrieve result from the driver. This function finalizes the
+ * transformation and retrieves the resulting hash from the driver and
+ * pushes it back to upper layers. No data processing happens at this
+ * point.
+ * @finup: Combination of @update and @final. This function is effectively a
+ * combination of @update and @final calls issued in sequence. As some
+ * hardware cannot do @update and @final separately, this callback was
+ * added to allow such hardware to be used at least by IPsec. Data
+ * processing can happen synchronously [SHASH] or asynchronously [AHASH]
+ * at this point.
+ * @digest: Combination of @init and @update and @final. This function
+ * effectively behaves as the entire chain of operations, @init,
+ * @update and @final issued in sequence. Just like @finup, this was
+ * added for hardware which cannot do even the @finup, but can only do
+ * the whole transformation in one run. Data processing can happen
+ * synchronously [SHASH] or asynchronously [AHASH] at this point.
+ * @setkey: Set optional key used by the hashing algorithm. Intended to push
+ * optional key used by the hashing algorithm from upper layers into
+ * the driver. This function can store the key in the transformation
+ * context or can outright program it into the hardware. In the former
+ * case, one must be careful to program the key into the hardware at
+ * appropriate time and one must be careful that .setkey() can be
+ * called multiple times during the existence of the transformation
+ * object. Not all hashing algorithms do implement this function as it
+ * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
+ * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
+ * this function. This function must be called before any other of the
+ * @init, @update, @final, @finup, @digest is called. No data
+ * processing happens at this point.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ * @halg: see struct hash_alg_common
+ */
struct ahash_alg {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
@@ -63,6 +146,23 @@ struct shash_desc {
crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+/**
+ * struct shash_alg - synchronous message digest definition
+ * @init: see struct ahash_alg
+ * @update: see struct ahash_alg
+ * @final: see struct ahash_alg
+ * @finup: see struct ahash_alg
+ * @digest: see struct ahash_alg
+ * @export: see struct ahash_alg
+ * @import: see struct ahash_alg
+ * @setkey: see struct ahash_alg
+ * @digestsize: see struct ahash_alg
+ * @statesize: see struct ahash_alg
+ * @descsize: Size of the operational state for the message digest. This state
+ * size is the memory size that needs to be allocated for
+ * shash_desc.__ctx
+ * @base: internally used
+ */
struct shash_alg {
int (*init)(struct shash_desc *desc);
int (*update)(struct shash_desc *desc, const u8 *data,
@@ -107,11 +207,35 @@ struct crypto_shash {
struct crypto_tfm base;
};
+/**
+ * DOC: Asynchronous Message Digest API
+ *
+ * The asynchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
+ *
+ * The asynchronous cipher operation discussion provided for the
+ * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well.
+ */
+
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_ahash, base);
}
+/**
+ * crypto_alloc_ahash() - allocate ahash cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ahash cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ahash. The returned struct
+ * crypto_ahash is the cipher handle that is required for any subsequent
+ * API invocation for that ahash.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask);
@@ -120,6 +244,10 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_ahash() - zeroize and free the ahash handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
@@ -143,6 +271,16 @@ static inline struct hash_alg_common *crypto_hash_alg_common(
return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
}
+/**
+ * crypto_ahash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ *
+ * Return: message digest size of cipher
+ */
static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
{
return crypto_hash_alg_common(tfm)->digestsize;
@@ -168,12 +306,32 @@ static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
}
+/**
+ * crypto_ahash_reqtfm() - obtain cipher handle from request
+ * @req: asynchronous request handle that contains the reference to the ahash
+ * cipher handle
+ *
+ * Return the ahash cipher handle that is registered with the asynchronous
+ * request handle ahash_request.
+ *
+ * Return: ahash cipher handle
+ */
static inline struct crypto_ahash *crypto_ahash_reqtfm(
struct ahash_request *req)
{
return __crypto_ahash_cast(req->base.tfm);
}
+/**
+ * crypto_ahash_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return the size of the ahash state size. With the crypto_ahash_export
+ * function, the caller can export the state into a buffer whose size is
+ * defined with this function.
+ *
+ * Return: size of the ahash state
+ */
static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
{
return tfm->reqsize;
@@ -184,38 +342,166 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
return req->__ctx;
}
+/**
+ * crypto_ahash_setkey - set key for cipher handle
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ahash cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+
+/**
+ * crypto_ahash_finup() - update and finalize message digest
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_ahash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_ahash_finup(struct ahash_request *req);
+
+/**
+ * crypto_ahash_final() - calculate message digest
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer registered with the ahash_request handle.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_ahash_final(struct ahash_request *req);
+
+/**
+ * crypto_ahash_digest() - calculate message digest for a buffer
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of crypto_ahash_init,
+ * crypto_ahash_update and crypto_ahash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_ahash_digest(struct ahash_request *req);
+/**
+ * crypto_ahash_export() - extract current message digest state
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the ahash_request handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_ahash_reqsize).
+ *
+ * Return: 0 if the export was successful; < 0 if an error occurred
+ */
static inline int crypto_ahash_export(struct ahash_request *req, void *out)
{
return crypto_ahash_reqtfm(req)->export(req, out);
}
+/**
+ * crypto_ahash_import() - import message digest state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the ahash_request handle from the
+ * input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{
return crypto_ahash_reqtfm(req)->import(req, in);
}
+/**
+ * crypto_ahash_init() - (re)initialize message digest handle
+ * @req: ahash_request handle that already is initialized with all necessary
+ * data using the ahash_request_* API functions
+ *
+ * The call (re-)initializes the message digest referenced by the ahash_request
+ * handle. Any potentially existing state created by previous operations is
+ * discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
static inline int crypto_ahash_init(struct ahash_request *req)
{
return crypto_ahash_reqtfm(req)->init(req);
}
+/**
+ * crypto_ahash_update() - add data to message digest for processing
+ * @req: ahash_request handle that was previously initialized with the
+ * crypto_ahash_init call.
+ *
+ * Updates the message digest state of the &ahash_request handle. The input data
+ * is pointed to by the scatter/gather list registered in the &ahash_request
+ * handle
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
static inline int crypto_ahash_update(struct ahash_request *req)
{
return crypto_ahash_reqtfm(req)->update(req);
}
+/**
+ * DOC: Asynchronous Hash Request Handle
+ *
+ * The &ahash_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple &ahash_request instances), pointer
+ * to plaintext and the message digest output buffer, asynchronous callback
+ * function, etc. It acts as a handle to the ahash_request_* API calls in a
+ * similar way as ahash handle to the crypto_ahash_* API calls.
+ */
+
+/**
+ * ahash_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ahash handle in the request
+ * data structure with a different one.
+ */
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
req->base.tfm = crypto_ahash_tfm(tfm);
}
+/**
+ * ahash_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ahash
+ * message digest API calls. During
+ * the allocation, the provided ahash handle
+ * is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp)
{
@@ -230,6 +516,10 @@ static inline struct ahash_request *ahash_request_alloc(
return req;
}
+/**
+ * ahash_request_free() - zeroize and free the request data structure
+ * @req: request data structure cipher handle to be freed
+ */
static inline void ahash_request_free(struct ahash_request *req)
{
kzfree(req);
@@ -241,6 +531,31 @@ static inline struct ahash_request *ahash_request_cast(
return container_of(req, struct ahash_request, base);
}
+/**
+ * ahash_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * &crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once
+ * the cipher operation completes.
+ *
+ * The callback function is registered with the &ahash_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
static inline void ahash_request_set_callback(struct ahash_request *req,
u32 flags,
crypto_completion_t compl,
@@ -251,6 +566,19 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
req->base.flags = flags;
}
+/**
+ * ahash_request_set_crypt() - set data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source scatter/gather list
+ * @result: buffer that is filled with the message digest -- the caller must
+ * ensure that the buffer has sufficient space by, for example, calling
+ * crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source scatter/gather list
+ *
+ * By using this call, the caller references the source scatter/gather list.
+ * The source scatter/gather list points to the data the message digest is to
+ * be calculated for.
+ */
static inline void ahash_request_set_crypt(struct ahash_request *req,
struct scatterlist *src, u8 *result,
unsigned int nbytes)
@@ -260,6 +588,33 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
req->result = result;
}
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
+ *
+ * The message digest API is able to maintain state information for the
+ * caller.
+ *
+ * The synchronous message digest API can store user-related context in in its
+ * shash_desc request data structure.
+ */
+
+/**
+ * crypto_alloc_shash() - allocate message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned &struct
+ * crypto_shash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
@@ -268,6 +623,10 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_shash() - zeroize and free the message digest handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
@@ -279,6 +638,15 @@ static inline unsigned int crypto_shash_alignmask(
return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
}
+/**
+ * crypto_shash_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
{
return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
@@ -294,6 +662,15 @@ static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
}
+/**
+ * crypto_shash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: digest size of cipher
+ */
static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
{
return crypto_shash_alg(tfm)->digestsize;
@@ -319,6 +696,21 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
}
+/**
+ * crypto_shash_descsize() - obtain the operational state size
+ * @tfm: cipher handle
+ *
+ * The size of the operational state the cipher needs during operation is
+ * returned for the hash referenced with the cipher handle. This size is
+ * required to calculate the memory requirements to allow the caller allocating
+ * sufficient memory for operational state.
+ *
+ * The operational state is defined with struct shash_desc where the size of
+ * that data structure is to be calculated as
+ * sizeof(struct shash_desc) + crypto_shash_descsize(alg)
+ *
+ * Return: size of the operational state
+ */
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
return tfm->descsize;
@@ -329,29 +721,129 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
return desc->__ctx;
}
+/**
+ * crypto_shash_setkey() - set key for message digest
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the keyed message digest cipher. The
+ * cipher handle must point to a keyed message digest cipher in order for this
+ * function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
+
+/**
+ * crypto_shash_digest() - calculate message digest for buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_shash_init,
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
+/**
+ * crypto_shash_export() - extract operational state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the operational state handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_shash_descsize).
+ *
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
static inline int crypto_shash_export(struct shash_desc *desc, void *out)
{
return crypto_shash_alg(desc->tfm)->export(desc, out);
}
+/**
+ * crypto_shash_import() - import operational state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the operational state handle from
+ * the input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
{
return crypto_shash_alg(desc->tfm)->import(desc, in);
}
+/**
+ * crypto_shash_init() - (re)initialize message digest
+ * @desc: operational state handle that is already filled
+ *
+ * The call (re-)initializes the message digest referenced by the
+ * operational state handle. Any potentially existing state created by
+ * previous operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
static inline int crypto_shash_init(struct shash_desc *desc)
{
return crypto_shash_alg(desc->tfm)->init(desc);
}
+/**
+ * crypto_shash_update() - add data to message digest for processing
+ * @desc: operational state handle that is already initialized
+ * @data: input data to be added to the message digest
+ * @len: length of the input data
+ *
+ * Updates the message digest state of the operational state handle.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+
+/**
+ * crypto_shash_final() - calculate message digest
+ * @desc: operational state handle that is already filled with data
+ * @out: output buffer filled with the message digest
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer. The caller must ensure that the output buffer is
+ * large enough by using crypto_shash_digestsize.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_shash_final(struct shash_desc *desc, u8 *out);
+
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index d61c11170213..cd62bf4289e9 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -42,6 +42,7 @@ struct af_alg_completion {
struct af_alg_control {
struct af_alg_iv *iv;
int op;
+ unsigned int aead_assoclen;
};
struct af_alg_type {
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index c93f9b917925..a16fb10142bf 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -20,11 +20,38 @@ extern struct crypto_rng *crypto_default_rng;
int crypto_get_default_rng(void);
void crypto_put_default_rng(void);
+/**
+ * DOC: Random number generator API
+ *
+ * The random number generator API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
+ */
+
static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
{
return (struct crypto_rng *)tfm;
}
+/**
+ * crypto_alloc_rng() -- allocate RNG handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a random number generator. The returned struct
+ * crypto_rng is the cipher handle that is required for any subsequent
+ * API invocation for that random number generator.
+ *
+ * For all random number generators, this call creates a new private copy of
+ * the random number generator that does not share a state with other
+ * instances. The only exception is the "krng" random number generator which
+ * is a kernel crypto API use case for the get_random_bytes() function of the
+ * /dev/random driver.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
u32 type, u32 mask)
{
@@ -40,6 +67,14 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
return &tfm->base;
}
+/**
+ * crypto_rng_alg - obtain name of RNG
+ * @tfm: cipher handle
+ *
+ * Return the generic name (cra_name) of the initialized random number generator
+ *
+ * Return: generic name string
+ */
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
@@ -50,23 +85,68 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
return &crypto_rng_tfm(tfm)->crt_rng;
}
+/**
+ * crypto_free_rng() - zeroize and free RNG handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
crypto_free_tfm(crypto_rng_tfm(tfm));
}
+/**
+ * crypto_rng_get_bytes() - get random number
+ * @tfm: cipher handle
+ * @rdata: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random numbers using the
+ * random number generator referenced by the cipher handle.
+ *
+ * Return: > 0 function was successful and returns the number of generated
+ * bytes; < 0 if an error occurred
+ */
static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
u8 *rdata, unsigned int dlen)
{
return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
}
+/**
+ * crypto_rng_reset() - re-initialize the RNG
+ * @tfm: cipher handle
+ * @seed: seed input data
+ * @slen: length of the seed input data
+ *
+ * The reset function completely re-initializes the random number generator
+ * referenced by the cipher handle by clearing the current state. The new state
+ * is initialized with the caller provided seed or automatically, depending
+ * on the random number generator type (the ANSI X9.31 RNG requires
+ * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
+ * The seed is provided as a parameter to this function call. The provided seed
+ * should have the length of the seed size defined for the random number
+ * generator as defined by crypto_rng_seedsize.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_rng_reset(struct crypto_rng *tfm,
u8 *seed, unsigned int slen)
{
return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
}
+/**
+ * crypto_rng_seedsize() - obtain seed size of RNG
+ * @tfm: cipher handle
+ *
+ * The function returns the seed size for the random number generator
+ * referenced by the cipher handle. This value may be zero if the random
+ * number generator does not implement or require a reseeding. For example,
+ * the SP800-90A DRBGs implement an automated reseeding after reaching a
+ * pre-defined threshold.
+ *
+ * Return: seed size for the random number generator
+ */
static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
{
return crypto_rng_alg(tfm)->seedsize;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 53ed87698a74..8ba35c622e22 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -125,8 +125,8 @@ struct dma_buf_attachment;
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
const char *format, ...);
-extern __printf(2, 3)
-void drm_err(const char *func, const char *format, ...);
+extern __printf(1, 2)
+void drm_err(const char *format, ...);
/***********************************************************************/
/** \name DRM template customization defaults */
@@ -155,7 +155,7 @@ void drm_err(const char *func, const char *format, ...);
* \param arg arguments
*/
#define DRM_ERROR(fmt, ...) \
- drm_err(__func__, fmt, ##__VA_ARGS__)
+ drm_err(fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -170,7 +170,7 @@ void drm_err(const char *func, const char *format, ...);
DEFAULT_RATELIMIT_BURST); \
\
if (__ratelimit(&_rs)) \
- drm_err(__func__, fmt, ##__VA_ARGS__); \
+ drm_err(fmt, ##__VA_ARGS__); \
})
#define DRM_INFO(fmt, ...) \
@@ -809,7 +809,7 @@ struct drm_device {
struct drm_local_map *agp_buffer_map;
unsigned int agp_buffer_token;
- struct drm_mode_config mode_config; /**< Current mode config */
+ struct drm_mode_config mode_config; /**< Current mode config */
/** \name GEM information */
/*@{ */
@@ -986,7 +986,7 @@ extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages);
-extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
+extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
@@ -1028,10 +1028,25 @@ void drm_pci_agp_destroy(struct drm_device *dev);
extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+#ifdef CONFIG_PCI
extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+#else
+static inline int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
+{
+ return -ENOSYS;
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return -ENOSYS;
+}
+#endif
#define DRM_PCIE_SPEED_25 1
#define DRM_PCIE_SPEED_50 2
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
new file mode 100644
index 000000000000..ad2229574dd9
--- /dev/null
+++ b/include/drm/drm_atomic.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_H_
+#define DRM_ATOMIC_H_
+
+#include <drm/drm_crtc.h>
+
+struct drm_atomic_state * __must_check
+drm_atomic_state_alloc(struct drm_device *dev);
+void drm_atomic_state_clear(struct drm_atomic_state *state);
+void drm_atomic_state_free(struct drm_atomic_state *state);
+
+struct drm_crtc_state * __must_check
+drm_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+struct drm_plane_state * __must_check
+drm_atomic_get_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane);
+struct drm_connector_state * __must_check
+drm_atomic_get_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector);
+
+int __must_check
+drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane, struct drm_crtc *crtc);
+void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb);
+int __must_check
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc);
+int __must_check
+drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+int
+drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+
+void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
+
+int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
+int __must_check drm_atomic_commit(struct drm_atomic_state *state);
+int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+
+#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
new file mode 100644
index 000000000000..f956b413311e
--- /dev/null
+++ b/include/drm/drm_atomic_helper.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_HELPER_H_
+#define DRM_ATOMIC_HELPER_H_
+
+#include <drm/drm_crtc.h>
+
+int drm_atomic_helper_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async);
+
+void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_swap_state(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+/* implementations for legacy interfaces */
+int drm_atomic_helper_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_atomic_helper_disable_plane(struct drm_plane *plane);
+int drm_atomic_helper_set_config(struct drm_mode_set *set);
+
+int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags);
+
+/* default implementations for state handling */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+
+/**
+ * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
+ * @plane: the loop cursor
+ * @crtc: the crtc whose planes are iterated
+ *
+ * This iterates over the current state, useful (for example) when applying
+ * atomic state after it has been checked and swapped. To iterate over the
+ * planes which *will* be attached (for ->atomic_check()) see
+ * drm_crtc_for_each_pending_plane()
+ */
+#define drm_atomic_crtc_for_each_plane(plane, crtc) \
+ drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
+
+/**
+ * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
+ * @plane: the loop cursor
+ * @crtc_state: the incoming crtc-state
+ *
+ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
+ * attached if the specified state is applied. Useful during (for example)
+ * ->atomic_check() operations, to validate the incoming state
+ */
+#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
+ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+
+#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c40070a92d6b..b86329813ad3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -42,6 +42,7 @@ struct drm_object_properties;
struct drm_file;
struct drm_clip_rect;
struct device_node;
+struct fence;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -136,14 +137,22 @@ struct drm_display_info {
u8 cea_rev;
};
+/* data corresponds to displayid vend/prod/serial */
+struct drm_tile_group {
+ struct kref refcount;
+ struct drm_device *dev;
+ int id;
+ u8 group_data[8];
+};
+
struct drm_framebuffer_funcs {
/* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
- /**
- * Optinal callback for the dirty fb ioctl.
+ /*
+ * Optional callback for the dirty fb ioctl.
*
* Userspace can notify the driver via this callback
* that a area of the framebuffer has changed and should
@@ -196,7 +205,7 @@ struct drm_framebuffer {
struct drm_property_blob {
struct drm_mode_object base;
struct list_head head;
- unsigned int length;
+ size_t length;
unsigned char data[];
};
@@ -215,7 +224,7 @@ struct drm_property {
uint64_t *values;
struct drm_device *dev;
- struct list_head enum_blob_list;
+ struct list_head enum_list;
};
struct drm_crtc;
@@ -224,19 +233,65 @@ struct drm_encoder;
struct drm_pending_vblank_event;
struct drm_plane;
struct drm_bridge;
+struct drm_atomic_state;
/**
- * drm_crtc_funcs - control CRTCs for a given device
+ * struct drm_crtc_state - mutable CRTC state
+ * @enable: whether the CRTC should be enabled, gates all other state
+ * @mode_changed: for use by helpers and drivers when computing state updates
+ * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
+ * @last_vblank_count: for helpers and drivers to capture the vblank of the
+ * update to ensure framebuffer cleanup isn't done too early
+ * @planes_changed: for use by helpers and drivers when computing state updates
+ * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
+ * @mode: current mode timings
+ * @event: optional pointer to a DRM event to signal upon completion of the
+ * state update
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_crtc_state {
+ bool enable;
+
+ /* computed state bits used by helpers and drivers */
+ bool planes_changed : 1;
+ bool mode_changed : 1;
+
+ /* attached planes bitmask:
+ * WARNING: transitional helpers do not maintain plane_mask so
+ * drivers not converted over to atomic helpers should not rely
+ * on plane_mask being accurate!
+ */
+ u32 plane_mask;
+
+ /* last_vblank_count: for vblank waits before cleanup */
+ u32 last_vblank_count;
+
+ /* adjusted_mode: for use by helpers and drivers */
+ struct drm_display_mode adjusted_mode;
+
+ struct drm_display_mode mode;
+
+ struct drm_pending_vblank_event *event;
+
+ struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_crtc_funcs - control CRTCs for a given device
* @save: save CRTC state
* @restore: restore CRTC state
* @reset: reset CRTC after state has been invalidated (e.g. resume)
* @cursor_set: setup the cursor
+ * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
* @cursor_move: move the cursor
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object
* @set_property: called when a property is changed
* @set_config: apply a new CRTC configuration
* @page_flip: initiate a page flip
+ * @atomic_duplicate_state: duplicate the atomic state for this CRTC
+ * @atomic_destroy_state: destroy an atomic state for this CRTC
+ * @atomic_set_property: set a property on an atomic state for this CRTC
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -287,16 +342,28 @@ struct drm_crtc_funcs {
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
+
+ /* atomic update handling */
+ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
+ void (*atomic_destroy_state)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+ int (*atomic_set_property)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
/**
- * drm_crtc - central CRTC control structure
+ * struct drm_crtc - central CRTC control structure
* @dev: parent DRM device
+ * @port: OF node used by drm_of_find_possible_crtcs()
* @head: list management
* @mutex: per-CRTC locking
* @base: base KMS object for ID tracking etc.
* @primary: primary plane for this CRTC
* @cursor: cursor plane for this CRTC
+ * @cursor_x: current x position of the cursor, used for universal cursor planes
+ * @cursor_y: current y position of the cursor, used for universal cursor planes
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
@@ -309,10 +376,13 @@ struct drm_crtc_funcs {
* @gamma_size: size of gamma ramp
* @gamma_store: gamma ramp values
* @framedur_ns: precise frame timing
- * @framedur_ns: precise line timing
+ * @linedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
+ * @state: current atomic state for this CRTC
+ * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
+ * legacy ioctls
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
@@ -322,7 +392,7 @@ struct drm_crtc {
struct device_node *port;
struct list_head head;
- /**
+ /*
* crtc mutex
*
* This provides a read lock for the overall crtc state (mode, dpms
@@ -368,6 +438,8 @@ struct drm_crtc {
struct drm_object_properties properties;
+ struct drm_crtc_state *state;
+
/*
* For legacy crtc ioctls so that atomic drivers can get at the locking
* acquire context.
@@ -375,9 +447,22 @@ struct drm_crtc {
struct drm_modeset_acquire_ctx *acquire_ctx;
};
+/**
+ * struct drm_connector_state - mutable connector state
+ * @crtc: CRTC to connect connector to, NULL if disabled
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
+
+ struct drm_encoder *best_encoder;
+
+ struct drm_atomic_state *state;
+};
/**
- * drm_connector_funcs - control connectors on a given device
+ * struct drm_connector_funcs - control connectors on a given device
* @dpms: set power state (see drm_crtc_funcs above)
* @save: save connector state
* @restore: restore connector state
@@ -387,6 +472,9 @@ struct drm_crtc {
* @set_property: property for this connector may need an update
* @destroy: make object go away
* @force: notify the driver that the connector is forced on
+ * @atomic_duplicate_state: duplicate the atomic state for this connector
+ * @atomic_destroy_state: destroy an atomic state for this connector
+ * @atomic_set_property: set a property on an atomic state for this connector
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
@@ -411,10 +499,19 @@ struct drm_connector_funcs {
uint64_t val);
void (*destroy)(struct drm_connector *connector);
void (*force)(struct drm_connector *connector);
+
+ /* atomic update handling */
+ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+ void (*atomic_destroy_state)(struct drm_connector *connector,
+ struct drm_connector_state *state);
+ int (*atomic_set_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
/**
- * drm_encoder_funcs - encoder controls
+ * struct drm_encoder_funcs - encoder controls
* @reset: reset state (e.g. at init or resume time)
* @destroy: cleanup and free associated data
*
@@ -428,7 +525,7 @@ struct drm_encoder_funcs {
#define DRM_CONNECTOR_MAX_ENCODER 3
/**
- * drm_encoder - central DRM encoder structure
+ * struct drm_encoder - central DRM encoder structure
* @dev: parent DRM device
* @head: list management
* @base: base KMS object
@@ -472,7 +569,7 @@ struct drm_encoder {
#define MAX_ELD_BYTES 128
/**
- * drm_connector - central DRM connector control structure
+ * struct drm_connector - central DRM connector control structure
* @dev: parent DRM device
* @kdev: kernel device for sysfs attributes
* @attr: sysfs attributes
@@ -483,6 +580,7 @@ struct drm_encoder {
* @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
* @modes: modes available on this connector (from fill_modes() + user)
* @status: one of the drm_connector_status enums (connected, not, or unknown)
* @probed_modes: list of modes derived directly from the display
@@ -490,10 +588,13 @@ struct drm_encoder {
* @funcs: connector control functions
* @edid_blob_ptr: DRM property containing EDID if present
* @properties: property tracking for this connector
+ * @path_blob_ptr: DRM blob property data for the DP MST path property
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
* @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
* @encoder_ids: valid encoders for this connector
* @encoder: encoder driving this connector, if any
* @eld: EDID-like data, if present
@@ -503,6 +604,18 @@ struct drm_encoder {
* @video_latency: video latency info from ELD, if found
* @audio_latency: audio latency info from ELD, if found
* @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -538,6 +651,8 @@ struct drm_connector {
struct drm_property_blob *path_blob_ptr;
+ struct drm_property_blob *tile_blob_ptr;
+
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
/* requested DPMS state */
@@ -563,14 +678,63 @@ struct drm_connector {
unsigned bad_edid_counter;
struct dentry *debugfs_entry;
+
+ struct drm_connector_state *state;
+
+ /* DisplayID bits */
+ bool has_tile;
+ struct drm_tile_group *tile_group;
+ bool tile_is_single_monitor;
+
+ uint8_t num_h_tile, num_v_tile;
+ uint8_t tile_h_loc, tile_v_loc;
+ uint16_t tile_h_size, tile_v_size;
+};
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+ struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
+ struct fence *fence;
+
+ /* Signed dest location allows it to be partially off screen */
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ uint32_t src_x, src_y;
+ uint32_t src_h, src_w;
+
+ struct drm_atomic_state *state;
};
+
/**
- * drm_plane_funcs - driver plane control functions
+ * struct drm_plane_funcs - driver plane control functions
* @update_plane: update the plane configuration
* @disable_plane: shut down the plane
* @destroy: clean up plane resources
+ * @reset: reset plane after state has been invalidated (e.g. resume)
* @set_property: called when a property is changed
+ * @atomic_duplicate_state: duplicate the atomic state for this plane
+ * @atomic_destroy_state: destroy an atomic state for this plane
+ * @atomic_set_property: set a property on an atomic state for this plane
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
@@ -585,6 +749,15 @@ struct drm_plane_funcs {
int (*set_property)(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
+
+ /* atomic update handling */
+ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+ void (*atomic_destroy_state)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+ int (*atomic_set_property)(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val);
};
enum drm_plane_type {
@@ -594,7 +767,7 @@ enum drm_plane_type {
};
/**
- * drm_plane - central DRM plane control structure
+ * struct drm_plane - central DRM plane control structure
* @dev: DRM device this plane belongs to
* @head: for list management
* @base: base mode object
@@ -603,14 +776,19 @@ enum drm_plane_type {
* @format_count: number of formats supported
* @crtc: currently bound CRTC
* @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ * drm_mode_set_config_internal() to implement correct refcounting.
* @funcs: helper functions
* @properties: property tracking for this plane
* @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
*/
struct drm_plane {
struct drm_device *dev;
struct list_head head;
+ struct drm_modeset_lock mutex;
+
struct drm_mode_object base;
uint32_t possible_crtcs;
@@ -620,8 +798,6 @@ struct drm_plane {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
- /* Temporary tracking of the old fb while a modeset is ongoing. Used
- * by drm_mode_set_config_internal to implement correct refcounting. */
struct drm_framebuffer *old_fb;
const struct drm_plane_funcs *funcs;
@@ -629,10 +805,14 @@ struct drm_plane {
struct drm_object_properties properties;
enum drm_plane_type type;
+
+ void *helper_private;
+
+ struct drm_plane_state *state;
};
/**
- * drm_bridge_funcs - drm_bridge control functions
+ * struct drm_bridge_funcs - drm_bridge control functions
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
* @disable: Called right before encoder prepare, disables the bridge
* @post_disable: Called right after encoder prepare, for lockstepped disable
@@ -656,7 +836,7 @@ struct drm_bridge_funcs {
};
/**
- * drm_bridge - central DRM bridge control structure
+ * struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
* @head: list management
* @base: base mode object
@@ -674,8 +854,35 @@ struct drm_bridge {
};
/**
- * drm_mode_set - new values for a CRTC config change
- * @head: list management
+ * struct struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @flags: state flags like async update
+ * @planes: pointer to array of plane pointers
+ * @plane_states: pointer to array of plane states pointers
+ * @crtcs: pointer to array of CRTC pointers
+ * @crtc_states: pointer to array of CRTC states pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of connector pointers
+ * @connector_states: pointer to array of connector states pointers
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+ struct drm_device *dev;
+ uint32_t flags;
+ struct drm_plane **planes;
+ struct drm_plane_state **plane_states;
+ struct drm_crtc **crtcs;
+ struct drm_crtc_state **crtc_states;
+ int num_connector;
+ struct drm_connector **connectors;
+ struct drm_connector_state **connector_states;
+
+ struct drm_modeset_acquire_ctx *acquire_ctx;
+};
+
+
+/**
+ * struct drm_mode_set - new values for a CRTC config change
* @fb: framebuffer to use for new config
* @crtc: CRTC whose configuration we're about to change
* @mode: mode timings to use
@@ -705,6 +912,9 @@ struct drm_mode_set {
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
+ * @atomic_check: check whether a give atomic state update is possible
+ * @atomic_commit: commit an atomic state update previously verified with
+ * atomic_check()
*
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
* involve drivers.
@@ -714,13 +924,20 @@ struct drm_mode_config_funcs {
struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd);
void (*output_poll_changed)(struct drm_device *dev);
+
+ int (*atomic_check)(struct drm_device *dev,
+ struct drm_atomic_state *a);
+ int (*atomic_commit)(struct drm_device *dev,
+ struct drm_atomic_state *a,
+ bool async);
};
/**
- * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * struct drm_mode_group - group of mode setting resources for potential sub-grouping
* @num_crtcs: CRTC count
* @num_encoders: encoder count
* @num_connectors: connector count
+ * @num_bridges: bridge count
* @id_list: list of KMS object IDs in this group
*
* Currently this simply tracks the global mode setting state. But in the
@@ -740,10 +957,14 @@ struct drm_mode_group {
};
/**
- * drm_mode_config - Mode configuration control structure
+ * struct drm_mode_config - Mode configuration control structure
* @mutex: mutex protecting KMS related lists and structures
+ * @connection_mutex: ww mutex protecting connector state and routing
+ * @acquire_ctx: global implicit acquire context used by atomic drivers for
+ * legacy ioctls
* @idr_mutex: mutex for KMS ID allocation and management
* @crtc_idr: main KMS ID tracking object
+ * @fb_lock: mutex to protect fb state and lists
* @num_fb: number of fbs available
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
@@ -752,17 +973,28 @@ struct drm_mode_group {
* @bridge_list: list of bridge objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
+ * @num_overlay_plane: number of overlay planes on this device
+ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
+ * @plane_list: list of plane objects
* @num_crtc: number of CRTCs on this device
* @crtc_list: list of CRTC objects
+ * @property_list: list of property objects
* @min_width: minimum pixel width on this device
* @min_height: minimum pixel height on this device
* @max_width: maximum pixel width on this device
* @max_height: maximum pixel height on this device
* @funcs: core driver provided mode setting functions
* @fb_base: base address of the framebuffer
- * @poll_enabled: track polling status for this device
+ * @poll_enabled: track polling support for this device
+ * @poll_running: track polling status for this device
* @output_poll_work: delayed work for polling in process context
+ * @property_blob_list: list of all the blob property objects
* @*_property: core property tracking
+ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
+ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
+ * @async_page_flip: does this device support async flips on the primary plane?
+ * @cursor_width: hint to userspace for max cursor width
+ * @cursor_height: hint to userspace for max cursor height
*
* Core mode resource tracking structure. All CRTC, encoders, and connectors
* enumerated by the driver are added here, as are global properties. Some
@@ -774,16 +1006,10 @@ struct drm_mode_config {
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
struct mutex idr_mutex; /* for IDR management */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
-
- /**
- * fb_lock - mutex to protect fb state
- *
- * Besides the global fb list his also protects the fbs list in the
- * file_priv
- */
- struct mutex fb_lock;
+ struct mutex fb_lock; /* proctects global and per-file fb lists */
int num_fb;
struct list_head fb_list;
@@ -824,6 +1050,7 @@ struct drm_mode_config {
struct drm_property *edid_property;
struct drm_property *dpms_property;
struct drm_property *path_property;
+ struct drm_property *tile_property;
struct drm_property *plane_type_property;
struct drm_property *rotation_property;
@@ -851,6 +1078,10 @@ struct drm_mode_config {
struct drm_property *aspect_ratio_property;
struct drm_property *dirty_info_property;
+ /* properties for virtual machine layout */
+ struct drm_property *suggested_x_property;
+ struct drm_property *suggested_y_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
@@ -861,6 +1092,19 @@ struct drm_mode_config {
uint32_t cursor_width, cursor_height;
};
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+ if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
@@ -880,9 +1124,6 @@ extern int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs);
-extern int drm_crtc_init(struct drm_device *dev,
- struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
@@ -978,9 +1219,10 @@ extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
- char *path);
+ const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- struct edid *edid);
+ const struct edid *edid);
static inline bool drm_property_type_is(struct drm_property *property,
uint32_t type)
@@ -1041,11 +1283,13 @@ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *pr
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
-extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
- char *formats[]);
+extern int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int num_modes,
+ char *modes[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -1113,6 +1357,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
+
+extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index a3d75fefd010..7adbb65ea8ae 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -68,6 +68,7 @@ struct drm_crtc_helper_funcs {
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
+ void (*mode_set_nofb)(struct drm_crtc *crtc);
/* Move the crtc on the current fb to the given position *optional* */
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
@@ -81,6 +82,12 @@ struct drm_crtc_helper_funcs {
/* disable crtc when not in use - more explicit than dpms off */
void (*disable)(struct drm_crtc *crtc);
+
+ /* atomic helpers */
+ int (*atomic_check)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+ void (*atomic_begin)(struct drm_crtc *crtc);
+ void (*atomic_flush)(struct drm_crtc *crtc);
};
/**
@@ -161,6 +168,12 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
extern void drm_helper_resume_force_mode(struct drm_device *dev);
+int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+
/* drm_probe_helper.c */
extern int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
new file mode 100644
index 000000000000..623b4e98e748
--- /dev/null
+++ b/include/drm/drm_displayid.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef DRM_DISPLAYID_H
+#define DRM_DISPLAYID_H
+
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+struct displayid_hdr {
+ u8 rev;
+ u8 bytes;
+ u8 prod_id;
+ u8 ext_count;
+} __packed;
+
+struct displayid_block {
+ u8 tag;
+ u8 rev;
+ u8 num_bytes;
+} __packed;
+
+struct displayid_tiled_block {
+ struct displayid_block base;
+ u8 tile_cap;
+ u8 topo[3];
+ u8 tile_size[4];
+ u8 tile_pixel_bezel[5];
+ u8 topology_id[8];
+} __packed;
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9305c718d789..11f8c84f98ce 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -303,7 +303,8 @@
#define DP_TEST_CRC_B_CB 0x244
#define DP_TEST_SINK_MISC 0x246
-#define DP_TEST_CRC_SUPPORTED (1 << 5)
+# define DP_TEST_CRC_SUPPORTED (1 << 5)
+# define DP_TEST_COUNT_MASK 0x7
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
@@ -313,7 +314,7 @@
#define DP_TEST_EDID_CHECKSUM 0x261
#define DP_TEST_SINK 0x270
-#define DP_TEST_SINK_START (1 << 0)
+# define DP_TEST_SINK_START (1 << 0)
#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
@@ -404,26 +405,6 @@
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
-/**
- * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
- * aux algorithm
- * @running: set by the algo indicating whether an i2c is ongoing or whether
- * the i2c bus is quiescent
- * @address: i2c target address for the currently ongoing transfer
- * @aux_ch: driver callback to transfer a single byte of the i2c payload
- */
-struct i2c_algo_dp_aux_data {
- bool running;
- u16 address;
- int (*aux_ch) (struct i2c_adapter *adapter,
- int mode, uint8_t write_byte,
- uint8_t *read_byte);
-};
-
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
-
-
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
@@ -550,6 +531,7 @@ struct drm_dp_aux {
struct mutex hw_mutex;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
+ unsigned i2c_nack_count, i2c_defer_count;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 338fc1053835..00c1da927245 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -28,7 +28,7 @@
struct drm_dp_mst_branch;
/**
- * struct drm_dp_vcpi - Virtual Channel Payload Identifer
+ * struct drm_dp_vcpi - Virtual Channel Payload Identifier
* @vcpi: Virtual channel ID.
* @pbn: Payload Bandwidth Number for this channel
* @aligned_pbn: PBN aligned with slot size
@@ -92,6 +92,8 @@ struct drm_dp_mst_port {
struct drm_dp_vcpi vcpi;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
+
+ struct edid *cached_edid; /* for DP logical ports - make tiling work */
};
/**
@@ -371,7 +373,7 @@ struct drm_dp_sideband_msg_tx {
struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
- struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path);
+ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
@@ -474,7 +476,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b96031d947a0..87d85e81d3a7 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -27,12 +27,14 @@
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
+#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
#define CEA_EXT 0x02
#define VTB_EXT 0x10
#define DI_EXT 0x40
#define LS_EXT 0x50
#define MI_EXT 0x60
+#define DISPLAYID_EXT 0x70
struct est_timings {
u8 t1;
@@ -207,6 +209,61 @@ struct detailed_timing {
#define DRM_EDID_HDMI_DC_30 (1 << 4)
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE 4
+
+#define DRM_ELD_VER 0
+# define DRM_ELD_VER_SHIFT 3
+# define DRM_ELD_VER_MASK (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL 4
+# define DRM_ELD_CEA_EDID_VER_SHIFT 5
+# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
+# define DRM_ELD_MNL_SHIFT 0
+# define DRM_ELD_MNL_MASK (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
+# define DRM_ELD_SAD_COUNT_SHIFT 4
+# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT 2
+# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP (1 << 2)
+# define DRM_ELD_SUPPORTS_AI (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
+
+#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_RLRC (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC (1 << 5)
+# define DRM_ELD_SPEAKER_RC (1 << 4)
+# define DRM_ELD_SPEAKER_RLR (1 << 3)
+# define DRM_ELD_SPEAKER_FC (1 << 2)
+# define DRM_ELD_SPEAKER_LFE (1 << 1)
+# define DRM_ELD_SPEAKER_FLR (1 << 0)
+
+#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN 8
+
+#define DRM_ELD_MANUFACTURER_NAME0 16
+#define DRM_ELD_MANUFACTURER_NAME1 17
+
+#define DRM_ELD_PRODUCT_CODE0 18
+#define DRM_ELD_PRODUCT_CODE1 19
+
+#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+
struct edid {
u8 header[8];
/* Vendor & product info */
@@ -279,4 +336,56 @@ int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode);
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const uint8_t *eld)
+{
+ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const uint8_t *eld)
+{
+ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+ DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
+{
+ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const uint8_t *eld)
+{
+ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data);
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f4ad254e3488..b597068103aa 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -34,9 +34,14 @@ struct drm_fb_helper;
#include <linux/kgdb.h>
+struct drm_fb_offset {
+ int x, y;
+};
+
struct drm_fb_helper_crtc {
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
+ int x, y;
};
struct drm_fb_helper_surface_size {
@@ -72,6 +77,7 @@ struct drm_fb_helper_funcs {
bool (*initial_config)(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height);
};
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 9eed34dcd6af..d387cf06ae05 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -25,6 +25,7 @@
#define DRM_FLIP_WORK_H
#include <linux/kfifo.h>
+#include <linux/spinlock.h>
#include <linux/workqueue.h>
/**
@@ -32,9 +33,9 @@
*
* Util to queue up work to run from work-queue context after flip/vblank.
* Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank. The APIs are all safe (and lockless)
- * for up to one producer and once consumer at a time. The single-consumer
- * aspect is ensured by committing the queued work to a single work-queue.
+ * bo's, etc until after vblank. The APIs are all thread-safe.
+ * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
+ * in atomic context.
*/
struct drm_flip_work;
@@ -51,26 +52,40 @@ struct drm_flip_work;
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
/**
+ * struct drm_flip_task - flip work task
+ * @node: list entry element
+ * @data: data to pass to work->func
+ */
+struct drm_flip_task {
+ struct list_head node;
+ void *data;
+};
+
+/**
* struct drm_flip_work - flip work queue
* @name: debug name
- * @pending: number of queued but not committed items
- * @count: number of committed items
* @func: callback fxn called for each committed item
* @worker: worker which calls @func
- * @fifo: queue of committed items
+ * @queued: queued tasks
+ * @commited: commited tasks
+ * @lock: lock to access queued and commited lists
*/
struct drm_flip_work {
const char *name;
- atomic_t pending, count;
drm_flip_func_t func;
struct work_struct worker;
- DECLARE_KFIFO_PTR(fifo, void *);
+ struct list_head queued;
+ struct list_head commited;
+ spinlock_t lock;
};
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+ struct drm_flip_task *task);
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq);
-int drm_flip_work_init(struct drm_flip_work *work, int size,
+void drm_flip_work_init(struct drm_flip_work *work,
const char *name, drm_flip_func_t func);
void drm_flip_work_cleanup(struct drm_flip_work *work);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 1e6ae1458f7a..780511a459c0 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -119,6 +119,13 @@ struct drm_gem_object {
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
+
+ /**
+ * dumb - created as dumb buffer
+ * Whether the gem object was created using the dumb buffer interface
+ * as such it may not be used for GPU rendering.
+ */
+ bool dumb;
};
void drm_gem_object_release(struct drm_gem_object *obj);
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 2ff35f3de9c5..acd6af8a8e67 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -4,6 +4,13 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+/**
+ * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
+ * @base: base GEM object
+ * @paddr: physical address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers
+ * @vaddr: kernel virtual address of the backing memory
+ */
struct drm_gem_cma_object {
struct drm_gem_object base;
dma_addr_t paddr;
@@ -19,23 +26,30 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
return container_of(gem_obj, struct drm_gem_cma_object, base);
}
-/* free gem object. */
+/* free GEM object */
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-/* create memory region for drm framebuffer. */
+/* create memory region for DRM framebuffer */
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm, struct drm_mode_create_dumb *args);
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
-/* map memory region for drm framebuffer to user space. */
+/* map memory region for DRM framebuffer to user space */
int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, uint32_t handle, uint64_t *offset);
+ struct drm_device *drm, u32 handle,
+ u64 *offset);
-/* set vm_flags and we can change the vm attribute to other one at here. */
+/* set vm_flags and we can change the VM attribute to other one at here */
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
-/* allocate physical memory. */
+/* allocate physical memory */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- unsigned int size);
+ size_t size);
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 8569dc5a1026..f1d8d0dbb4f1 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -26,6 +26,7 @@ struct mipi_dsi_device;
* struct mipi_dsi_msg - read/write DSI buffer
* @channel: virtual channel id
* @type: payload data type
+ * @flags: flags controlling this message transmission
* @tx_len: length of @tx_buf
* @tx_buf: data to be written
* @rx_len: length of @rx_buf
@@ -43,12 +44,44 @@ struct mipi_dsi_msg {
void *rx_buf;
};
+bool mipi_dsi_packet_format_is_short(u8 type);
+bool mipi_dsi_packet_format_is_long(u8 type);
+
+/**
+ * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format
+ * @size: size (in bytes) of the packet
+ * @header: the four bytes that make up the header (Data ID, Word Count or
+ * Packet Data, and ECC)
+ * @payload_length: number of bytes in the payload
+ * @payload: a pointer to a buffer containing the payload, if any
+ */
+struct mipi_dsi_packet {
+ size_t size;
+ u8 header[4];
+ size_t payload_length;
+ const u8 *payload;
+};
+
+int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
+ const struct mipi_dsi_msg *msg);
+
/**
* struct mipi_dsi_host_ops - DSI bus operations
* @attach: attach DSI device to DSI host
* @detach: detach DSI device from DSI host
- * @transfer: send and/or receive DSI packet, return number of received bytes,
- * or error
+ * @transfer: transmit a DSI packet
+ *
+ * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg
+ * structures. This structure contains information about the type of packet
+ * being transmitted as well as the transmit and receive buffers. When an
+ * error is encountered during transmission, this function will return a
+ * negative error code. On success it shall return the number of bytes
+ * transmitted for write packets or the number of bytes received for read
+ * packets.
+ *
+ * Note that typically DSI packet transmission is atomic, so the .transfer()
+ * function will seldomly return anything other than the number of bytes
+ * contained in the transmit buffer on success.
*/
struct mipi_dsi_host_ops {
int (*attach)(struct mipi_dsi_host *host,
@@ -56,7 +89,7 @@ struct mipi_dsi_host_ops {
int (*detach)(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi);
ssize_t (*transfer)(struct mipi_dsi_host *host,
- struct mipi_dsi_msg *msg);
+ const struct mipi_dsi_msg *msg);
};
/**
@@ -130,12 +163,57 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
return container_of(dev, struct mipi_dsi_device, dev);
}
+struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
-ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
- size_t len);
+int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ u16 value);
+
+ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ size_t size);
+ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
+ size_t num_params, void *data, size_t size);
+
+/**
+ * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
+ * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking
+ * information only
+ * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both
+ * V-Blanking and H-Blanking information
+ */
+enum mipi_dsi_dcs_tear_mode {
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK,
+ MIPI_DSI_DCS_TEAR_MODE_VHBLANK,
+};
+
+#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2)
+#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3)
+#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4)
+#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5)
+#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6)
+
+ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
+ const void *data, size_t len);
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
+ const void *data, size_t len);
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len);
+int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
+int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format);
+int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end);
+int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end);
+int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
+ enum mipi_dsi_dcs_tear_mode mode);
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
/**
* struct mipi_dsi_driver - DSI driver
@@ -167,9 +245,13 @@ static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
dev_set_drvdata(&dsi->dev, data);
}
-int mipi_dsi_driver_register(struct mipi_dsi_driver *driver);
+int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver,
+ struct module *owner);
void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
+#define mipi_dsi_driver_register(driver) \
+ mipi_dsi_driver_register_full(driver, THIS_MODULE)
+
#define module_mipi_dsi_driver(__mipi_dsi_driver) \
module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
mipi_dsi_driver_unregister)
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 75a5c45e21c7..70595ff565ba 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -33,6 +33,7 @@ struct drm_modeset_lock;
* @ww_ctx: base acquire ctx
* @contended: used internally for -EDEADLK handling
* @locked: list of held locks
+ * @trylock_only: trylock mode used in atomic contexts/panic notifiers
*
* Each thread competing for a set of locks must use one acquire
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and
@@ -126,11 +127,13 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock);
struct drm_device;
struct drm_crtc;
+struct drm_plane;
void drm_modeset_lock_all(struct drm_device *dev);
int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
void drm_modeset_unlock_all(struct drm_device *dev);
-void drm_modeset_lock_crtc(struct drm_crtc *crtc);
+void drm_modeset_lock_crtc(struct drm_crtc *crtc,
+ struct drm_plane *plane);
void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
struct drm_modeset_acquire_ctx *
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 52e6870534b2..a185392cafeb 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -25,6 +25,7 @@
#define DRM_PLANE_HELPER_H
#include <drm/drm_rect.h>
+#include <drm/drm_crtc.h>
/*
* Drivers that don't allow primary plane scaling may pass this macro in place
@@ -42,6 +43,37 @@
* planes.
*/
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+
+/**
+ * drm_plane_helper_funcs - helper operations for CRTCs
+ * @prepare_fb: prepare a framebuffer for use by the plane
+ * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
+ * @atomic_check: check that a given atomic state is valid and can be applied
+ * @atomic_update: apply an atomic state to the plane
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_plane_helper_funcs {
+ int (*prepare_fb)(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+ void (*cleanup_fb)(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+
+ int (*atomic_check)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+ void (*atomic_update)(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+};
+
+static inline void drm_plane_helper_add(struct drm_plane *plane,
+ const struct drm_plane_helper_funcs *funcs)
+{
+ plane->helper_private = (void *)funcs;
+}
+
extern int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -68,4 +100,16 @@ extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
int num_formats);
+int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_plane_helper_disable(struct drm_plane *plane);
+
+/* For use by drm_crtc_helper.c */
+int drm_plane_helper_commit(struct drm_plane *plane,
+ struct drm_plane_state *plane_state,
+ struct drm_framebuffer *old_fb);
#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index a70d45647898..180ad0e6de21 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -259,4 +259,21 @@
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
+#define INTEL_SKL_IDS(info) \
+ INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
+ INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
+ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+ INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
+ INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
+ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 460441714413..b620c317c772 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
+ * @dups: [out] optional list of duplicates.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* calling process receives a signal while waiting. In that case, no
* buffers on the list will be reserved upon return.
*
+ * If dups is non NULL all buffers already reserved by the current thread
+ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
+ * on the first already reserved buffer and all buffers from the list are
+ * unreserved again.
+ *
* Buffers reserved by this function should be unreserved by
* a call to either ttm_eu_backoff_reservation() or
* ttm_eu_fence_buffer_objects() when command submission is complete or
@@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
*/
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr);
+ struct list_head *list, bool intr,
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h
index e835037a77b4..ab6cbba45401 100644
--- a/include/dt-bindings/dma/at91.h
+++ b/include/dt-bindings/dma/at91.h
@@ -9,6 +9,8 @@
#ifndef __DT_BINDINGS_AT91_DMA_H__
#define __DT_BINDINGS_AT91_DMA_H__
+/* ---------- HDMAC ---------- */
+
/*
* Source and/or destination peripheral ID
*/
@@ -24,4 +26,27 @@
#define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */
#define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */
+
+/* ---------- XDMAC ---------- */
+#define AT91_XDMAC_DT_MEM_IF_MASK (0x1)
+#define AT91_XDMAC_DT_MEM_IF_OFFSET (13)
+#define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \
+ << AT91_XDMAC_DT_MEM_IF_OFFSET)
+#define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \
+ & AT91_XDMAC_DT_MEM_IF_MASK)
+
+#define AT91_XDMAC_DT_PER_IF_MASK (0x1)
+#define AT91_XDMAC_DT_PER_IF_OFFSET (14)
+#define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \
+ << AT91_XDMAC_DT_PER_IF_OFFSET)
+#define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \
+ & AT91_XDMAC_DT_PER_IF_MASK)
+
+#define AT91_XDMAC_DT_PERID_MASK (0x7f)
+#define AT91_XDMAC_DT_PERID_OFFSET (24)
+#define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \
+ << AT91_XDMAC_DT_PERID_OFFSET)
+#define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \
+ & AT91_XDMAC_DT_PERID_MASK)
+
#endif /* __DT_BINDINGS_AT91_DMA_H__ */
diff --git a/include/dt-bindings/interrupt-controller/mips-gic.h b/include/dt-bindings/interrupt-controller/mips-gic.h
new file mode 100644
index 000000000000..cf35a577e371
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/mips-gic.h
@@ -0,0 +1,9 @@
+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H
+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#define GIC_SHARED 0
+#define GIC_LOCAL 1
+
+#endif
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
new file mode 100644
index 000000000000..6c901930eb3e
--- /dev/null
+++ b/include/dt-bindings/phy/phy.h
@@ -0,0 +1,19 @@
+/*
+ *
+ * This header provides constants for the phy framework
+ *
+ * Copyright (C) 2014 STMicroelectronics
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _DT_BINDINGS_PHY
+#define _DT_BINDINGS_PHY
+
+#define PHY_NONE 0
+#define PHY_TYPE_SATA 1
+#define PHY_TYPE_PCIE 2
+#define PHY_TYPE_USB2 3
+#define PHY_TYPE_USB3 4
+
+#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
new file mode 100644
index 000000000000..fa74d7cc960c
--- /dev/null
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -0,0 +1,142 @@
+/*
+ * This header provides constants for the Qualcomm PMIC GPIO binding.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H
+#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H
+
+#define PMIC_GPIO_PULL_UP_30 0
+#define PMIC_GPIO_PULL_UP_1P5 1
+#define PMIC_GPIO_PULL_UP_31P5 2
+#define PMIC_GPIO_PULL_UP_1P5_30 3
+
+#define PMIC_GPIO_STRENGTH_NO 0
+#define PMIC_GPIO_STRENGTH_HIGH 1
+#define PMIC_GPIO_STRENGTH_MED 2
+#define PMIC_GPIO_STRENGTH_LOW 3
+
+/*
+ * Note: PM8018 GPIO3 and GPIO4 are supporting
+ * only S3 and L2 options (1.8V)
+ */
+#define PM8018_GPIO_L6 0
+#define PM8018_GPIO_L5 1
+#define PM8018_GPIO_S3 2
+#define PM8018_GPIO_L14 3
+#define PM8018_GPIO_L2 4
+#define PM8018_GPIO_L4 5
+#define PM8018_GPIO_VDD 6
+
+/*
+ * Note: PM8038 GPIO7 and GPIO8 are supporting
+ * only L11 and L4 options (1.8V)
+ */
+#define PM8038_GPIO_VPH 0
+#define PM8038_GPIO_BB 1
+#define PM8038_GPIO_L11 2
+#define PM8038_GPIO_L15 3
+#define PM8038_GPIO_L4 4
+#define PM8038_GPIO_L3 5
+#define PM8038_GPIO_L17 6
+
+#define PM8058_GPIO_VPH 0
+#define PM8058_GPIO_BB 1
+#define PM8058_GPIO_S3 2
+#define PM8058_GPIO_L3 3
+#define PM8058_GPIO_L7 4
+#define PM8058_GPIO_L6 5
+#define PM8058_GPIO_L5 6
+#define PM8058_GPIO_L2 7
+
+#define PM8917_GPIO_VPH 0
+#define PM8917_GPIO_S4 2
+#define PM8917_GPIO_L15 3
+#define PM8917_GPIO_L4 4
+#define PM8917_GPIO_L3 5
+#define PM8917_GPIO_L17 6
+
+#define PM8921_GPIO_VPH 0
+#define PM8921_GPIO_BB 1
+#define PM8921_GPIO_S4 2
+#define PM8921_GPIO_L15 3
+#define PM8921_GPIO_L4 4
+#define PM8921_GPIO_L3 5
+#define PM8921_GPIO_L17 6
+
+/*
+ * Note: PM8941 gpios from 15 to 18 are supporting
+ * only S3 and L6 options (1.8V)
+ */
+#define PM8941_GPIO_VPH 0
+#define PM8941_GPIO_L1 1
+#define PM8941_GPIO_S3 2
+#define PM8941_GPIO_L6 3
+
+/*
+ * Note: PMA8084 gpios from 15 to 18 are supporting
+ * only S4 and L6 options (1.8V)
+ */
+#define PMA8084_GPIO_VPH 0
+#define PMA8084_GPIO_L1 1
+#define PMA8084_GPIO_S4 2
+#define PMA8084_GPIO_L6 3
+
+/* To be used with "function" */
+#define PMIC_GPIO_FUNC_NORMAL "normal"
+#define PMIC_GPIO_FUNC_PAIRED "paired"
+#define PMIC_GPIO_FUNC_FUNC1 "func1"
+#define PMIC_GPIO_FUNC_FUNC2 "func2"
+#define PMIC_GPIO_FUNC_DTEST1 "dtest1"
+#define PMIC_GPIO_FUNC_DTEST2 "dtest2"
+#define PMIC_GPIO_FUNC_DTEST3 "dtest3"
+#define PMIC_GPIO_FUNC_DTEST4 "dtest4"
+
+#define PM8038_GPIO1_2_LPG_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO3_5V_BOOST_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO4_SSBI_ALT_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO5_6_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO10_11_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO6_7_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO9_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8038_GPIO6_12_KYPD_DRV PMIC_GPIO_FUNC_FUNC2
+
+#define PM8058_GPIO7_8_MP3_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO7_8_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO9_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO24_26_LPG_DRV PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO33_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO34_35_MP3_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO36_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO37_UPL_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO37_UART_M_RX PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO38_39_CLK_32KHZ PMIC_GPIO_FUNC_FUNC2
+#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1
+
+#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2
+#define PM8917_GPIO25_26_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8917_GPIO37_38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8917_GPIO37_38_MP3_CLK PMIC_GPIO_FUNC_FUNC2
+
+#define PM8941_GPIO9_14_KYPD_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2
+#define PM8941_GPIO23_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO23_26_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2
+#define PM8941_GPIO31_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO33_36_LPG_DRV_3D PMIC_GPIO_FUNC_FUNC1
+#define PM8941_GPIO33_36_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2
+
+#define PMA8084_GPIO4_5_LPG_DRV PMIC_GPIO_FUNC_FUNC1
+#define PMA8084_GPIO7_10_LPG_DRV PMIC_GPIO_FUNC_FUNC1
+#define PMA8084_GPIO5_14_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+#define PMA8084_GPIO19_21_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+#define PMA8084_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1
+#define PMA8084_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2
+#define PMA8084_GPIO22_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+
+#endif
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
new file mode 100644
index 000000000000..d2c7dabe3223
--- /dev/null
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -0,0 +1,44 @@
+/*
+ * This header provides constants for the Qualcomm PMIC's
+ * Multi-Purpose Pin binding.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
+#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
+
+/* power-source */
+#define PM8841_MPP_VPH 0
+#define PM8841_MPP_S3 2
+
+#define PM8941_MPP_VPH 0
+#define PM8941_MPP_L1 1
+#define PM8941_MPP_S3 2
+#define PM8941_MPP_L6 3
+
+#define PMA8084_MPP_VPH 0
+#define PMA8084_MPP_L1 1
+#define PMA8084_MPP_S4 2
+#define PMA8084_MPP_L6 3
+
+/*
+ * Analog Input - Set the source for analog input.
+ * To be used with "qcom,amux-route" property
+ */
+#define PMIC_MPP_AMUX_ROUTE_CH5 0
+#define PMIC_MPP_AMUX_ROUTE_CH6 1
+#define PMIC_MPP_AMUX_ROUTE_CH7 2
+#define PMIC_MPP_AMUX_ROUTE_CH8 3
+#define PMIC_MPP_AMUX_ROUTE_ABUS1 4
+#define PMIC_MPP_AMUX_ROUTE_ABUS2 5
+#define PMIC_MPP_AMUX_ROUTE_ABUS3 6
+#define PMIC_MPP_AMUX_ROUTE_ABUS4 7
+
+/* To be used with "function" */
+#define PMIC_MPP_FUNC_NORMAL "normal"
+#define PMIC_MPP_FUNC_PAIRED "paired"
+#define PMIC_MPP_FUNC_DTEST1 "dtest1"
+#define PMIC_MPP_FUNC_DTEST2 "dtest2"
+#define PMIC_MPP_FUNC_DTEST3 "dtest3"
+#define PMIC_MPP_FUNC_DTEST4 "dtest4"
+
+#endif
diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h
new file mode 100644
index 000000000000..85aaf66690f9
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra124-soctherm.h
@@ -0,0 +1,13 @@
+/*
+ * This header provides constants for binding nvidia,tegra124-soctherm.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H
+#define _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H
+
+#define TEGRA124_SOCTHERM_SENSOR_CPU 0
+#define TEGRA124_SOCTHERM_SENSOR_MEM 1
+#define TEGRA124_SOCTHERM_SENSOR_GPU 2
+#define TEGRA124_SOCTHERM_SENSOR_PLLX 3
+
+#endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index ad9db6045b2f..b3f45a578344 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -60,7 +60,8 @@ struct arch_timer_cpu {
#ifdef CONFIG_KVM_ARM_TIMER
int kvm_timer_hyp_init(void);
-int kvm_timer_init(struct kvm *kvm);
+void kvm_timer_enable(struct kvm *kvm);
+void kvm_timer_init(struct kvm *kvm);
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
@@ -77,11 +78,8 @@ static inline int kvm_timer_hyp_init(void)
return 0;
};
-static inline int kvm_timer_init(struct kvm *kvm)
-{
- return 0;
-}
-
+static inline void kvm_timer_enable(struct kvm *kvm) {}
+static inline void kvm_timer_init(struct kvm *kvm) {}
static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq) {}
static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 206dcc3b3f7a..ac4888dc86bc 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -274,7 +274,7 @@ struct kvm_exit_mmio;
#ifdef CONFIG_KVM_ARM_VGIC
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
-int kvm_vgic_init(struct kvm *kvm);
+int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -287,7 +287,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_exit_mmio *mmio);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
-#define vgic_initialized(k) ((k)->arch.vgic.ready)
+#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
+#define vgic_ready(k) ((k)->arch.vgic.ready)
int vgic_v2_probe(struct device_node *vgic_node,
const struct vgic_ops **ops,
@@ -321,7 +322,7 @@ static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr,
return -ENXIO;
}
-static inline int kvm_vgic_init(struct kvm *kvm)
+static inline int kvm_vgic_map_resources(struct kvm *kvm)
{
return 0;
}
@@ -373,6 +374,11 @@ static inline bool vgic_initialized(struct kvm *kvm)
{
return true;
}
+
+static inline bool vgic_ready(struct kvm *kvm)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index c324f5700d1a..2afc618b15ce 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -23,6 +23,7 @@
#define AMBA_NR_IRQS 9
#define AMBA_CID 0xb105f00d
+#define CORESIGHT_CID 0xb105900d
struct clk;
@@ -97,6 +98,16 @@ void amba_release_regions(struct amba_device *);
#define amba_pclk_disable(d) \
do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
+static inline int amba_pclk_prepare(struct amba_device *dev)
+{
+ return clk_prepare(dev->pclk);
+}
+
+static inline void amba_pclk_unprepare(struct amba_device *dev)
+{
+ clk_unprepare(dev->pclk);
+}
+
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index a495a959e8a7..33eb274cd0e6 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -31,8 +31,11 @@ struct ath9k_platform_data {
u32 gpio_mask;
u32 gpio_val;
+ bool endian_check;
bool is_clk_25mhz;
bool tx_gain_buffalo;
+ bool disable_2ghz;
+ bool disable_5ghz;
int (*get_mac_revision)(void);
int (*external_reset)(void);
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 729f48e6b20b..eb1c6a47b67f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset);
#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */
extern u32 bcma_core_dma_translation(struct bcma_device *core);
+extern unsigned int bcma_core_irq(struct bcma_device *core, int num);
+
#endif /* LINUX_BCMA_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index fb61f3fb4ddb..0b3b32aeeb8a 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -43,12 +43,12 @@ struct bcma_drv_mips {
extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
-extern unsigned int bcma_core_irq(struct bcma_device *core);
+extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
#else
static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
-static inline unsigned int bcma_core_irq(struct bcma_device *core)
+static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
{
return 0;
}
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 61f29e5ea840..576e4639ca60 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -53,6 +53,10 @@ struct linux_binprm {
#define BINPRM_FLAGS_EXECFD_BIT 1
#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
+/* filename of the binary will be inaccessible after exec */
+#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
+#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
+
/* Function parameter for binfmt->coredump */
struct coredump_params {
const siginfo_t *siginfo;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7347f486ceca..efead0b532c4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -443,6 +443,11 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
+void generic_start_io_acct(int rw, unsigned long sectors,
+ struct hd_struct *part);
+void generic_end_io_acct(int rw, struct hd_struct *part,
+ unsigned long start_time);
+
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
#endif
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e1c8d080c427..202e4034fe26 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -45,6 +45,7 @@
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -60,6 +61,7 @@
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
*/
/*
@@ -114,11 +116,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
extern void bitmap_set(unsigned long *map, unsigned int start, int len);
extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
-extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask);
+
+extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset);
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds is multiples of that
+ * power of 2. A @align_mask of 0 means no alignment is required.
+ */
+static inline unsigned long
+bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
+ align_mask, 0);
+}
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
@@ -145,6 +172,8 @@ extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int o
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
+extern int bitmap_print_to_pagebuf(bool list, char *buf,
+ const unsigned long *maskp, int nmaskbits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 15f7034aa377..8aded9ab2e4e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -79,7 +79,13 @@ struct blk_mq_tag_set {
struct list_head tag_list;
};
-typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool);
+struct blk_mq_queue_data {
+ struct request *rq;
+ struct list_head *list;
+ bool last;
+};
+
+typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
@@ -140,6 +146,7 @@ enum {
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_SYSFS_UP = 1 << 3,
+ BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
@@ -162,6 +169,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
+void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
gfp_t gfp, bool reserved);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0495e3854247..92f4b4b288dd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1184,7 +1184,6 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
- BLK_DEF_MAX_SECTORS = 1024,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3cf91754a957..bbfceb756452 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -22,7 +22,7 @@ struct bpf_map_ops {
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
- int (*map_update_elem)(struct bpf_map *map, void *key, void *value);
+ int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
};
@@ -128,9 +128,18 @@ struct bpf_prog_aux {
struct work_struct work;
};
+#ifdef CONFIG_BPF_SYSCALL
void bpf_prog_put(struct bpf_prog *prog);
+#else
+static inline void bpf_prog_put(struct bpf_prog *prog) {}
+#endif
struct bpf_prog *bpf_prog_get(u32 ufd);
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
+/* verifier prototypes for helper functions called from eBPF programs */
+extern struct bpf_func_proto bpf_map_lookup_elem_proto;
+extern struct bpf_func_proto bpf_map_update_elem_proto;
+extern struct bpf_func_proto bpf_map_delete_elem_proto;
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
new file mode 100644
index 000000000000..3daf5ed392c9
--- /dev/null
+++ b/include/linux/cacheinfo.h
@@ -0,0 +1,100 @@
+#ifndef _LINUX_CACHEINFO_H
+#define _LINUX_CACHEINFO_H
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+
+struct device_node;
+struct attribute;
+
+enum cache_type {
+ CACHE_TYPE_NOCACHE = 0,
+ CACHE_TYPE_INST = BIT(0),
+ CACHE_TYPE_DATA = BIT(1),
+ CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA,
+ CACHE_TYPE_UNIFIED = BIT(2),
+};
+
+/**
+ * struct cacheinfo - represent a cache leaf node
+ * @type: type of the cache - data, inst or unified
+ * @level: represents the hierarcy in the multi-level cache
+ * @coherency_line_size: size of each cache line usually representing
+ * the minimum amount of data that gets transferred from memory
+ * @number_of_sets: total number of sets, a set is a collection of cache
+ * lines sharing the same index
+ * @ways_of_associativity: number of ways in which a particular memory
+ * block can be placed in the cache
+ * @physical_line_partition: number of physical cache lines sharing the
+ * same cachetag
+ * @size: Total size of the cache
+ * @shared_cpu_map: logical cpumask representing all the cpus sharing
+ * this cache node
+ * @attributes: bitfield representing various cache attributes
+ * @of_node: if devicetree is used, this represents either the cpu node in
+ * case there's no explicit cache node or the cache node itself in the
+ * device tree
+ * @disable_sysfs: indicates whether this node is visible to the user via
+ * sysfs or not
+ * @priv: pointer to any private data structure specific to particular
+ * cache design
+ *
+ * While @of_node, @disable_sysfs and @priv are used for internal book
+ * keeping, the remaining members form the core properties of the cache
+ */
+struct cacheinfo {
+ enum cache_type type;
+ unsigned int level;
+ unsigned int coherency_line_size;
+ unsigned int number_of_sets;
+ unsigned int ways_of_associativity;
+ unsigned int physical_line_partition;
+ unsigned int size;
+ cpumask_t shared_cpu_map;
+ unsigned int attributes;
+#define CACHE_WRITE_THROUGH BIT(0)
+#define CACHE_WRITE_BACK BIT(1)
+#define CACHE_WRITE_POLICY_MASK \
+ (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK)
+#define CACHE_READ_ALLOCATE BIT(2)
+#define CACHE_WRITE_ALLOCATE BIT(3)
+#define CACHE_ALLOCATE_POLICY_MASK \
+ (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
+
+ struct device_node *of_node;
+ bool disable_sysfs;
+ void *priv;
+};
+
+struct cpu_cacheinfo {
+ struct cacheinfo *info_list;
+ unsigned int num_levels;
+ unsigned int num_leaves;
+};
+
+/*
+ * Helpers to make sure "func" is executed on the cpu whose cache
+ * attributes are being detected
+ */
+#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
+static inline void _##func(void *ret) \
+{ \
+ int cpu = smp_processor_id(); \
+ *(int *)ret = __##func(cpu); \
+} \
+ \
+int func(unsigned int cpu) \
+{ \
+ int ret; \
+ smp_call_function_single(cpu, _##func, &ret, true); \
+ return ret; \
+}
+
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
+int init_cache_level(unsigned int cpu);
+int populate_cache_leaves(unsigned int cpu);
+
+const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
+
+#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index b37ea95bc348..c05ff0f9f9a5 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -127,6 +127,9 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state);
+
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 5f3386844134..260d78b587c4 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -13,6 +13,7 @@
struct ceph_auth_client;
struct ceph_authorizer;
+struct ceph_msg;
struct ceph_auth_handshake {
struct ceph_authorizer *authorizer;
@@ -20,6 +21,10 @@ struct ceph_auth_handshake {
size_t authorizer_buf_len;
void *authorizer_reply_buf;
size_t authorizer_reply_buf_len;
+ int (*sign_message)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+ int (*check_message_signature)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
};
struct ceph_auth_client_ops {
@@ -66,6 +71,11 @@ struct ceph_auth_client_ops {
void (*reset)(struct ceph_auth_client *ac);
void (*destroy)(struct ceph_auth_client *ac);
+
+ int (*sign_message)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+ int (*check_message_signature)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
};
struct ceph_auth_client {
@@ -113,4 +123,20 @@ extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type);
+static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ if (auth->sign_message)
+ return auth->sign_message(auth, msg);
+ return 0;
+}
+
+static inline
+int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ if (auth->check_message_signature)
+ return auth->check_message_signature(auth, msg);
+ return 0;
+}
#endif
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 07ad423cc37f..07ca15e76100 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -10,8 +10,7 @@
/*
* a simple reference counted buffer.
*
- * use kmalloc for small sizes (<= one page), vmalloc for larger
- * sizes.
+ * use kmalloc for smaller sizes, vmalloc for larger sizes.
*/
struct ceph_buffer {
struct kref kref;
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index d12659ce550d..71e05bbf8ceb 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -84,6 +84,7 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
+ CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
CEPH_FEATURE_OSDHASHPSPOOL | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 3c97d5e9b951..c0dadaac26e3 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -522,8 +522,11 @@ struct ceph_mds_reply_dirfrag {
__le32 dist[];
} __attribute__ ((packed));
-#define CEPH_LOCK_FCNTL 1
-#define CEPH_LOCK_FLOCK 2
+#define CEPH_LOCK_FCNTL 1
+#define CEPH_LOCK_FLOCK 2
+#define CEPH_LOCK_FCNTL_INTR 3
+#define CEPH_LOCK_FLOCK_INTR 4
+
#define CEPH_LOCK_SHARED 1
#define CEPH_LOCK_EXCL 2
@@ -549,6 +552,7 @@ struct ceph_filelock {
int ceph_flags_to_mode(int flags);
+#define CEPH_INLINE_NONE ((__u64)-1)
/* capability bits */
#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
@@ -613,6 +617,8 @@ int ceph_flags_to_mode(int flags);
CEPH_CAP_LINK_SHARED | \
CEPH_CAP_FILE_SHARED | \
CEPH_CAP_XATTR_SHARED)
+#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
+ CEPH_CAP_FILE_RD)
#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
CEPH_CAP_LINK_SHARED | \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 07bc359b88ac..8b11a79ca1cb 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -29,6 +29,7 @@
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
+#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
#define CEPH_OPT_DEFAULT (0)
@@ -184,7 +185,6 @@ extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
extern void *ceph_kvmalloc(size_t size, gfp_t flags);
-extern void ceph_kvfree(const void *ptr);
extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 40ae58e3e9db..d9d396c16503 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -42,6 +42,10 @@ struct ceph_connection_operations {
struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip);
+ int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg);
+
+ int (*check_message_signature) (struct ceph_connection *con,
+ struct ceph_msg *msg);
};
/* use format string %s%d */
@@ -142,7 +146,10 @@ struct ceph_msg_data_cursor {
*/
struct ceph_msg {
struct ceph_msg_header hdr; /* header */
- struct ceph_msg_footer footer; /* footer */
+ union {
+ struct ceph_msg_footer footer; /* footer */
+ struct ceph_msg_footer_old old_footer; /* old format footer */
+ };
struct kvec front; /* unaligned blobs of message */
struct ceph_buffer *middle;
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 3d94a73b5f30..1c1887206ffa 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -152,7 +152,8 @@ struct ceph_msg_header {
receiver: mask against ~PAGE_MASK */
struct ceph_entity_name src;
- __le32 reserved;
+ __le16 compat_version;
+ __le16 reserved;
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
@@ -164,13 +165,21 @@ struct ceph_msg_header {
/*
* follows data payload
*/
+struct ceph_msg_footer_old {
+ __le32 front_crc, middle_crc, data_crc;
+ __u8 flags;
+} __attribute__ ((packed));
+
struct ceph_msg_footer {
__le32 front_crc, middle_crc, data_crc;
+ // sig holds the 64 bits of the digital signature for the message PLR
+ __le64 sig;
__u8 flags;
} __attribute__ ((packed));
#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */
#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */
+#define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */
#endif
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 03aeb27fcc69..5d86416d35f2 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,6 +87,13 @@ struct ceph_osd_req_op {
struct ceph_osd_data osd_data;
} extent;
struct {
+ __le32 name_len;
+ __le32 value_len;
+ __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
+ __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
+ struct ceph_osd_data osd_data;
+ } xattr;
+ struct {
const char *class_name;
const char *method_name;
struct ceph_osd_data request_info;
@@ -295,6 +302,9 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
const char *class, const char *method);
+extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
+ u16 opcode, const char *name, const void *value,
+ size_t size, u8 cmp_op, u8 cmp_mode);
extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
u64 cookie, u64 version, int flag);
@@ -318,7 +328,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
struct ceph_vino vino,
u64 offset, u64 *len,
- int num_ops, int opcode, int flags,
+ unsigned int which, int num_ops,
+ int opcode, int flags,
struct ceph_snap_context *snapc,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 5f871d84ddce..13d71fe18b0c 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -1,8 +1,10 @@
#ifndef __FS_CEPH_PAGELIST_H
#define __FS_CEPH_PAGELIST_H
-#include <linux/list.h>
+#include <asm/byteorder.h>
#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/types.h>
struct ceph_pagelist {
struct list_head head;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 641e56494a92..da0dae0600e6 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -638,8 +638,10 @@ struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
int (*css_online)(struct cgroup_subsys_state *css);
void (*css_offline)(struct cgroup_subsys_state *css);
+ void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_e_css_changed)(struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
@@ -934,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
+struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
new file mode 100644
index 000000000000..4d1019d56f7f
--- /dev/null
+++ b/include/linux/clock_cooling.h
@@ -0,0 +1,65 @@
+/*
+ * linux/include/linux/clock_cooling.h
+ *
+ * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
+ *
+ * Highly based on cpu_cooling.c.
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
+ * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __CPU_COOLING_H__
+#define __CPU_COOLING_H__
+
+#include <linux/of.h>
+#include <linux/thermal.h>
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_CLOCK_THERMAL
+/**
+ * clock_cooling_register - function to create clock cooling device.
+ * @dev: struct device pointer to the device used as clock cooling device.
+ * @clock_name: string containing the clock used as cooling mechanism.
+ */
+struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name);
+
+/**
+ * clock_cooling_unregister - function to remove clock cooling device.
+ * @cdev: thermal cooling device pointer.
+ */
+void clock_cooling_unregister(struct thermal_cooling_device *cdev);
+
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq);
+#else /* !CONFIG_CLOCK_THERMAL */
+static inline struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name)
+{
+ return NULL;
+}
+static inline
+void clock_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+}
+static inline
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq)
+{
+ return THERMAL_CSTATE_INVALID;
+}
+#endif /* CONFIG_CLOCK_THERMAL */
+
+#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index a93438beb33c..9384ba66e975 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -15,6 +15,7 @@
struct cma;
+extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(struct cma *cma);
extern unsigned long cma_get_size(struct cma *cma);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e6494261eaff..7450ca2ac1fc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -357,6 +357,9 @@ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
+asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp, int flags);
asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
new file mode 100644
index 000000000000..5d3c54311f7a
--- /dev/null
+++ b/include/linux/coresight.h
@@ -0,0 +1,263 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_H
+#define _LINUX_CORESIGHT_H
+
+#include <linux/device.h>
+
+/* Peripheral id registers (0xFD0-0xFEC) */
+#define CORESIGHT_PERIPHIDR4 0xfd0
+#define CORESIGHT_PERIPHIDR5 0xfd4
+#define CORESIGHT_PERIPHIDR6 0xfd8
+#define CORESIGHT_PERIPHIDR7 0xfdC
+#define CORESIGHT_PERIPHIDR0 0xfe0
+#define CORESIGHT_PERIPHIDR1 0xfe4
+#define CORESIGHT_PERIPHIDR2 0xfe8
+#define CORESIGHT_PERIPHIDR3 0xfeC
+/* Component id registers (0xFF0-0xFFC) */
+#define CORESIGHT_COMPIDR0 0xff0
+#define CORESIGHT_COMPIDR1 0xff4
+#define CORESIGHT_COMPIDR2 0xff8
+#define CORESIGHT_COMPIDR3 0xffC
+
+#define ETM_ARCH_V3_3 0x23
+#define ETM_ARCH_V3_5 0x25
+#define PFT_ARCH_V1_0 0x30
+#define PFT_ARCH_V1_1 0x31
+
+#define CORESIGHT_UNLOCK 0xc5acce55
+
+extern struct bus_type coresight_bustype;
+
+enum coresight_dev_type {
+ CORESIGHT_DEV_TYPE_NONE,
+ CORESIGHT_DEV_TYPE_SINK,
+ CORESIGHT_DEV_TYPE_LINK,
+ CORESIGHT_DEV_TYPE_LINKSINK,
+ CORESIGHT_DEV_TYPE_SOURCE,
+};
+
+enum coresight_dev_subtype_sink {
+ CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_PORT,
+ CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+};
+
+enum coresight_dev_subtype_link {
+ CORESIGHT_DEV_SUBTYPE_LINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_LINK_MERG,
+ CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
+ CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
+};
+
+enum coresight_dev_subtype_source {
+ CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+};
+
+/**
+ * struct coresight_dev_subtype - further characterisation of a type
+ * @sink_subtype: type of sink this component is, as defined
+ by @coresight_dev_subtype_sink.
+ * @link_subtype: type of link this component is, as defined
+ by @coresight_dev_subtype_link.
+ * @source_subtype: type of source this component is, as defined
+ by @coresight_dev_subtype_source.
+ */
+struct coresight_dev_subtype {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ enum coresight_dev_subtype_source source_subtype;
+};
+
+/**
+ * struct coresight_platform_data - data harvested from the DT specification
+ * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
+ * @name: name of the component as shown under sysfs.
+ * @nr_inport: number of input ports for this component.
+ * @outports: list of remote endpoint port number.
+ * @child_names:name of all child components connected to this device.
+ * @child_ports:child component port number the current component is
+ connected to.
+ * @nr_outport: number of output ports for this component.
+ * @clk: The clock this component is associated to.
+ */
+struct coresight_platform_data {
+ int cpu;
+ const char *name;
+ int nr_inport;
+ int *outports;
+ const char **child_names;
+ int *child_ports;
+ int nr_outport;
+ struct clk *clk;
+};
+
+/**
+ * struct coresight_desc - description of a component required from drivers
+ * @type: as defined by @coresight_dev_type.
+ * @subtype: as defined by @coresight_dev_subtype.
+ * @ops: generic operations for this component, as defined
+ by @coresight_ops.
+ * @pdata: platform data collected from DT.
+ * @dev: The device entity associated to this component.
+ * @groups: operations specific to this component. These will end up
+ in the component's sysfs sub-directory.
+ */
+struct coresight_desc {
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct coresight_platform_data *pdata;
+ struct device *dev;
+ const struct attribute_group **groups;
+};
+
+/**
+ * struct coresight_connection - representation of a single connection
+ * @outport: a connection's output port number.
+ * @chid_name: remote component's name.
+ * @child_port: remote component's port number @output is connected to.
+ * @child_dev: a @coresight_device representation of the component
+ connected to @outport.
+ */
+struct coresight_connection {
+ int outport;
+ const char *child_name;
+ int child_port;
+ struct coresight_device *child_dev;
+};
+
+/**
+ * struct coresight_device - representation of a device as used by the framework
+ * @conns: array of coresight_connections associated to this component.
+ * @nr_inport: number of input port associated to this component.
+ * @nr_outport: number of output port associated to this component.
+ * @type: as defined by @coresight_dev_type.
+ * @subtype: as defined by @coresight_dev_subtype.
+ * @ops: generic operations for this component, as defined
+ by @coresight_ops.
+ * @dev: The device entity associated to this component.
+ * @refcnt: keep track of what is in use.
+ * @path_link: link of current component into the path being enabled.
+ * @orphan: true if the component has connections that haven't been linked.
+ * @enable: 'true' if component is currently part of an active path.
+ * @activated: 'true' only if a _sink_ has been activated. A sink can be
+ activated but not yet enabled. Enabling for a _sink_
+ happens when a source has been selected for that it.
+ */
+struct coresight_device {
+ struct coresight_connection *conns;
+ int nr_inport;
+ int nr_outport;
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct device dev;
+ atomic_t *refcnt;
+ struct list_head path_link;
+ bool orphan;
+ bool enable; /* true only if configured as part of a path */
+ bool activated; /* true only if a sink is part of a path */
+};
+
+#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+
+#define source_ops(csdev) csdev->ops->source_ops
+#define sink_ops(csdev) csdev->ops->sink_ops
+#define link_ops(csdev) csdev->ops->link_ops
+
+#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \
+ __mode, __get, __set, __fmt) \
+DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \
+static const struct coresight_ops_entry __name ## _entry = { \
+ .name = __entry_name, \
+ .mode = __mode, \
+ .ops = &__name ## _ops \
+}
+
+/**
+ * struct coresight_ops_sink - basic operations for a sink
+ * Operations available for sinks
+ * @enable: enables the sink.
+ * @disable: disables the sink.
+ */
+struct coresight_ops_sink {
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+/**
+ * struct coresight_ops_link - basic operations for a link
+ * Operations available for links.
+ * @enable: enables flow between iport and oport.
+ * @disable: disables flow between iport and oport.
+ */
+struct coresight_ops_link {
+ int (*enable)(struct coresight_device *csdev, int iport, int oport);
+ void (*disable)(struct coresight_device *csdev, int iport, int oport);
+};
+
+/**
+ * struct coresight_ops_source - basic operations for a source
+ * Operations available for sources.
+ * @trace_id: returns the value of the component's trace ID as known
+ to the HW.
+ * @enable: enables tracing from a source.
+ * @disable: disables tracing for a source.
+ */
+struct coresight_ops_source {
+ int (*trace_id)(struct coresight_device *csdev);
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+struct coresight_ops {
+ const struct coresight_ops_sink *sink_ops;
+ const struct coresight_ops_link *link_ops;
+ const struct coresight_ops_source *source_ops;
+};
+
+#ifdef CONFIG_CORESIGHT
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev);
+extern void coresight_disable(struct coresight_device *csdev);
+extern int coresight_is_bit_set(u32 val, int position, int value);
+extern int coresight_timeout(void __iomem *addr, u32 offset,
+ int position, int value);
+#ifdef CONFIG_OF
+extern struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node);
+#endif
+#else
+static inline struct coresight_device *
+coresight_register(struct coresight_desc *desc) { return NULL; }
+static inline void coresight_unregister(struct coresight_device *csdev) {}
+static inline int
+coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
+static inline void coresight_disable(struct coresight_device *csdev) {}
+static inline int coresight_is_bit_set(u32 val, int position, int value)
+ { return 0; }
+static inline int coresight_timeout(void __iomem *addr, u32 offset,
+ int position, int value) { return 1; }
+#ifdef CONFIG_OF
+static inline struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node) { return NULL; }
+#endif
+#endif
+
+#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b2d9a43012b2..4260e8594bd7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -19,6 +19,7 @@
struct device;
struct device_node;
+struct attribute_group;
struct cpu {
int node_id; /* The node which contains the CPU */
@@ -39,6 +40,9 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
+extern struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 0a9a6da21e74..b950e9d6008b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -803,6 +803,23 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
}
#endif /* NR_CPUS > BITS_PER_LONG */
+/**
+ * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
+ * as comma-separated list of cpus or hex values of cpumask
+ * @list: indicates whether the cpumap must be list
+ * @mask: the cpumask to copy
+ * @buf: the buffer to copy into
+ *
+ * Returns the length of the (null-terminated) @buf string, zero if
+ * nothing is copied.
+ */
+static inline ssize_t
+cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
+{
+ return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
+ nr_cpumask_bits);
+}
+
/*
*
* From here down, all obsolete. Use cpumask_ variants!
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 2f073db7392e..1b357997cac5 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
-extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
+extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
-static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
{
- return nr_cpusets() <= 1 ||
- __cpuset_node_allowed_softwall(node, gfp_mask);
+ return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
}
-static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
+static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return nr_cpusets() <= 1 ||
- __cpuset_node_allowed_hardwall(node, gfp_mask);
-}
-
-static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
-{
- return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
-}
-
-static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
-{
- return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
+ return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
}
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
-static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
-{
- return 1;
-}
-
-static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
-{
- return 1;
-}
-
-static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
+static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
{
return 1;
}
-static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
+static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return 1;
}
diff --git a/include/linux/cred.h b/include/linux/cred.h
index b2d0820837c4..2fb2ca2127ed 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
+extern bool may_setgroups(void);
/* access the groups "array" with this macro */
#define GROUP_AT(gi, i) \
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index d45e949699ea..9c8776d0ada8 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -26,6 +26,19 @@
#include <linux/uaccess.h>
/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
+/*
* Algorithm masks and types.
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
@@ -127,6 +140,13 @@ struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+/**
+ * DOC: Block Cipher Context Data Structures
+ *
+ * These data structures define the operating context for each block cipher
+ * type.
+ */
+
struct crypto_async_request {
struct list_head list;
crypto_completion_t complete;
@@ -194,9 +214,63 @@ struct hash_desc {
u32 flags;
};
-/*
- * Algorithms: modular crypto algorithm implementations, managed
- * via crypto_register_alg() and crypto_unregister_alg().
+/**
+ * DOC: Block Cipher Algorithm Definitions
+ *
+ * These data structures define modular crypto algorithm implementations,
+ * managed via crypto_register_alg() and crypto_unregister_alg().
+ */
+
+/**
+ * struct ablkcipher_alg - asynchronous block cipher definition
+ * @min_keysize: Minimum key size supported by the transformation. This is the
+ * smallest key length supported by this transformation algorithm.
+ * This must be set to one of the pre-defined values as this is
+ * not hardware specific. Possible values for this field can be
+ * found via git grep "_MIN_KEY_SIZE" include/crypto/
+ * @max_keysize: Maximum key size supported by the transformation. This is the
+ * largest key length supported by this transformation algorithm.
+ * This must be set to one of the pre-defined values as this is
+ * not hardware specific. Possible values for this field can be
+ * found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function can
+ * be called multiple times during the existence of the transformation
+ * object, so one must make sure the key is properly reprogrammed into
+ * the hardware. This function is also responsible for checking the key
+ * length for validity. In case a software fallback was put in place in
+ * the @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
+ * the supplied scatterlist containing the blocks of data. The crypto
+ * API consumer is responsible for aligning the entries of the
+ * scatterlist properly and making sure the chunks are correctly
+ * sized. In case a software fallback was put in place in the
+ * @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes. In case the
+ * key was stored in transformation context, the key might need to be
+ * re-programmed into the hardware in this function. This function
+ * shall not modify the transformation context, as this function may
+ * be called in parallel with the same transformation object.
+ * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
+ * and the conditions are exactly the same.
+ * @givencrypt: Update the IV for encryption. With this function, a cipher
+ * implementation may provide the function on how to update the IV
+ * for encryption.
+ * @givdecrypt: Update the IV for decryption. This is the reverse of
+ * @givencrypt .
+ * @geniv: The transformation implementation may use an "IV generator" provided
+ * by the kernel crypto API. Several use cases have a predefined
+ * approach how IVs are to be updated. For such use cases, the kernel
+ * crypto API provides ready-to-use implementations that can be
+ * referenced with this variable.
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ * IV of exactly that size to perform the encrypt or decrypt operation.
+ *
+ * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
+ * mandatory and must be filled.
*/
struct ablkcipher_alg {
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -213,6 +287,32 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
+/**
+ * struct aead_alg - AEAD cipher definition
+ * @maxauthsize: Set the maximum authentication tag size supported by the
+ * transformation. A transformation may support smaller tag sizes.
+ * As the authentication tag is a message digest to ensure the
+ * integrity of the encrypted data, a consumer typically wants the
+ * largest authentication tag possible as defined by this
+ * variable.
+ * @setauthsize: Set authentication size for the AEAD transformation. This
+ * function is used to specify the consumer requested size of the
+ * authentication tag to be either generated by the transformation
+ * during encryption or the size of the authentication tag to be
+ * supplied during the decryption operation. This function is also
+ * responsible for checking the authentication tag size for
+ * validity.
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @givencrypt: see struct ablkcipher_alg
+ * @givdecrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ *
+ * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
+ * mandatory and must be filled.
+ */
struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
@@ -228,6 +328,18 @@ struct aead_alg {
unsigned int maxauthsize;
};
+/**
+ * struct blkcipher_alg - synchronous block cipher definition
+ * @min_keysize: see struct ablkcipher_alg
+ * @max_keysize: see struct ablkcipher_alg
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ *
+ * All fields except @geniv and @ivsize are mandatory and must be filled.
+ */
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
@@ -245,6 +357,53 @@ struct blkcipher_alg {
unsigned int ivsize;
};
+/**
+ * struct cipher_alg - single-block symmetric ciphers definition
+ * @cia_min_keysize: Minimum key size supported by the transformation. This is
+ * the smallest key length supported by this transformation
+ * algorithm. This must be set to one of the pre-defined
+ * values as this is not hardware specific. Possible values
+ * for this field can be found via git grep "_MIN_KEY_SIZE"
+ * include/crypto/
+ * @cia_max_keysize: Maximum key size supported by the transformation. This is
+ * the largest key length supported by this transformation
+ * algorithm. This must be set to one of the pre-defined values
+ * as this is not hardware specific. Possible values for this
+ * field can be found via git grep "_MAX_KEY_SIZE"
+ * include/crypto/
+ * @cia_setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function
+ * can be called multiple times during the existence of the
+ * transformation object, so one must make sure the key is properly
+ * reprogrammed into the hardware. This function is also
+ * responsible for checking the key length for validity.
+ * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
+ * single block of data, which must be @cra_blocksize big. This
+ * always operates on a full @cra_blocksize and it is not possible
+ * to encrypt a block of smaller size. The supplied buffers must
+ * therefore also be at least of @cra_blocksize size. Both the
+ * input and output buffers are always aligned to @cra_alignmask.
+ * In case either of the input or output buffer supplied by user
+ * of the crypto API is not aligned to @cra_alignmask, the crypto
+ * API will re-align the buffers. The re-alignment means that a
+ * new buffer will be allocated, the data will be copied into the
+ * new buffer, then the processing will happen on the new buffer,
+ * then the data will be copied back into the original buffer and
+ * finally the new buffer will be freed. In case a software
+ * fallback was put in place in the @cra_init call, this function
+ * might need to use the fallback if the algorithm doesn't support
+ * all of the key sizes. In case the key was stored in
+ * transformation context, the key might need to be re-programmed
+ * into the hardware in this function. This function shall not
+ * modify the transformation context, as this function may be
+ * called in parallel with the same transformation object.
+ * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
+ * @cia_encrypt, and the conditions are exactly the same.
+ *
+ * All fields are mandatory and must be filled.
+ */
struct cipher_alg {
unsigned int cia_min_keysize;
unsigned int cia_max_keysize;
@@ -261,6 +420,25 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
+/**
+ * struct rng_alg - random number generator definition
+ * @rng_make_random: The function defined by this variable obtains a random
+ * number. The random number generator transform must generate
+ * the random number out of the context provided with this
+ * call.
+ * @rng_reset: Reset of the random number generator by clearing the entire state.
+ * With the invocation of this function call, the random number
+ * generator shall completely reinitialize its state. If the random
+ * number generator requires a seed for setting up a new state,
+ * the seed must be provided by the consumer while invoking this
+ * function. The required size of the seed is defined with
+ * @seedsize .
+ * @seedsize: The seed size required for a random number generator
+ * initialization defined with this variable. Some random number
+ * generators like the SP800-90A DRBG does not require a seed as the
+ * seeding is implemented internally without the need of support by
+ * the consumer. In this case, the seed size is set to zero.
+ */
struct rng_alg {
int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen);
@@ -277,6 +455,81 @@ struct rng_alg {
#define cra_compress cra_u.compress
#define cra_rng cra_u.rng
+/**
+ * struct crypto_alg - definition of a cryptograpic cipher algorithm
+ * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
+ * CRYPTO_ALG_* flags for the flags which go in here. Those are
+ * used for fine-tuning the description of the transformation
+ * algorithm.
+ * @cra_blocksize: Minimum block size of this transformation. The size in bytes
+ * of the smallest possible unit which can be transformed with
+ * this algorithm. The users must respect this value.
+ * In case of HASH transformation, it is possible for a smaller
+ * block than @cra_blocksize to be passed to the crypto API for
+ * transformation, in case of any other transformation type, an
+ * error will be returned upon any attempt to transform smaller
+ * than @cra_blocksize chunks.
+ * @cra_ctxsize: Size of the operational context of the transformation. This
+ * value informs the kernel crypto API about the memory size
+ * needed to be allocated for the transformation context.
+ * @cra_alignmask: Alignment mask for the input and output data buffer. The data
+ * buffer containing the input data for the algorithm must be
+ * aligned to this alignment mask. The data buffer for the
+ * output data must be aligned to this alignment mask. Note that
+ * the Crypto API will do the re-alignment in software, but
+ * only under special conditions and there is a performance hit.
+ * The re-alignment happens at these occasions for different
+ * @cra_u types: cipher -- For both input data and output data
+ * buffer; ahash -- For output hash destination buf; shash --
+ * For output hash destination buf.
+ * This is needed on hardware which is flawed by design and
+ * cannot pick data from arbitrary addresses.
+ * @cra_priority: Priority of this transformation implementation. In case
+ * multiple transformations with same @cra_name are available to
+ * the Crypto API, the kernel will use the one with highest
+ * @cra_priority.
+ * @cra_name: Generic name (usable by multiple implementations) of the
+ * transformation algorithm. This is the name of the transformation
+ * itself. This field is used by the kernel when looking up the
+ * providers of particular transformation.
+ * @cra_driver_name: Unique name of the transformation provider. This is the
+ * name of the provider of the transformation. This can be any
+ * arbitrary value, but in the usual case, this contains the
+ * name of the chip or provider and the name of the
+ * transformation algorithm.
+ * @cra_type: Type of the cryptographic transformation. This is a pointer to
+ * struct crypto_type, which implements callbacks common for all
+ * trasnformation types. There are multiple options:
+ * &crypto_blkcipher_type, &crypto_ablkcipher_type,
+ * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ * This field might be empty. In that case, there are no common
+ * callbacks. This is the case for: cipher, compress, shash.
+ * @cra_u: Callbacks implementing the transformation. This is a union of
+ * multiple structures. Depending on the type of transformation selected
+ * by @cra_type and @cra_flags above, the associated structure must be
+ * filled with callbacks. This field might be empty. This is the case
+ * for ahash, shash.
+ * @cra_init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated. In case the
+ * cryptographic hardware has some special requirements which need to
+ * be handled by software, this function shall check for the precise
+ * requirement of the transformation and put any software fallbacks
+ * in place.
+ * @cra_exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @cra_init, used to remove various changes set in
+ * @cra_init.
+ * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
+ * @cra_list: internally used
+ * @cra_users: internally used
+ * @cra_refcnt: internally used
+ * @cra_destroy: internally used
+ *
+ * The struct crypto_alg describes a generic Crypto API algorithm and is common
+ * for all of the transformations. Any variable not documented here shall not
+ * be used by a cipher implementation as it is internal to the Crypto API.
+ */
struct crypto_alg {
struct list_head cra_list;
struct list_head cra_users;
@@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask)
return mask;
}
+/**
+ * DOC: Asynchronous Block Cipher API
+ *
+ * Asynchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the ablkcipher_request data structure.
+ *
+ * For the asynchronous block cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+/**
+ * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ablkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ablkcipher. The returned struct
+ * crypto_ablkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that ablkcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask);
@@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm(
return &tfm->base;
}
+/**
+ * crypto_free_ablkcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
{
crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
}
+/**
+ * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ablkcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the ablkcipher is known to the kernel crypto API; false
+ * otherwise
+ */
static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
u32 mask)
{
@@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
}
+/**
+ * crypto_ablkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the ablkcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
static inline unsigned int crypto_ablkcipher_ivsize(
struct crypto_ablkcipher *tfm)
{
return crypto_ablkcipher_crt(tfm)->ivsize;
}
+/**
+ * crypto_ablkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the ablkcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_ablkcipher_blocksize(
struct crypto_ablkcipher *tfm)
{
@@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
}
+/**
+ * crypto_ablkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ablkcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
return crt->setkey(crt->base, key, keylen);
}
+/**
+ * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
+ * @req: ablkcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
+ * data structure.
+ *
+ * Return: crypto_ablkcipher handle
+ */
static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
struct ablkcipher_request *req)
{
return __crypto_ablkcipher_cast(req->base.tfm);
}
+/**
+ * crypto_ablkcipher_encrypt() - encrypt plaintext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the ablkcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * ablkcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
@@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
return crt->encrypt(req);
}
+/**
+ * crypto_ablkcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the ablkcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * ablkcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
@@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
return crt->decrypt(req);
}
+/**
+ * DOC: Asynchronous Cipher Request Handle
+ *
+ * The ablkcipher_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple ablkcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the ablkcipher_request_* API calls in a similar way as
+ * ablkcipher handle to the crypto_ablkcipher_* API calls.
+ */
+
+/**
+ * crypto_ablkcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
static inline unsigned int crypto_ablkcipher_reqsize(
struct crypto_ablkcipher *tfm)
{
return crypto_ablkcipher_crt(tfm)->reqsize;
}
+/**
+ * ablkcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ablkcipher handle in the request
+ * data structure with a different one.
+ */
static inline void ablkcipher_request_set_tfm(
struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
{
@@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
return container_of(req, struct ablkcipher_request, base);
}
+/**
+ * ablkcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ablkcipher
+ * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct ablkcipher_request *ablkcipher_request_alloc(
struct crypto_ablkcipher *tfm, gfp_t gfp)
{
@@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
return req;
}
+/**
+ * ablkcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
static inline void ablkcipher_request_free(struct ablkcipher_request *req)
{
kzfree(req);
}
+/**
+ * ablkcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the ablkcipher_request handle and
+ * must comply with the following template:
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
static inline void ablkcipher_request_set_callback(
struct ablkcipher_request *req,
u32 flags, crypto_completion_t compl, void *data)
@@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback(
req->base.flags = flags;
}
+/**
+ * ablkcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @nbytes: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_ablkcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed: the source is the ciphertext and the destination is the plaintext.
+ */
static inline void ablkcipher_request_set_crypt(
struct ablkcipher_request *req,
struct scatterlist *src, struct scatterlist *dst,
@@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt(
req->info = iv;
}
+/**
+ * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
+ *
+ * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
+ * (listed as type "aead" in /proc/crypto)
+ *
+ * The most prominent examples for this type of encryption is GCM and CCM.
+ * However, the kernel supports other types of AEAD ciphers which are defined
+ * with the following cipher string:
+ *
+ * authenc(keyed message digest, block cipher)
+ *
+ * For example: authenc(hmac(sha256), cbc(aes))
+ *
+ * The example code provided for the asynchronous block cipher operation
+ * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
+ * the *aead* pendants discussed in the following. In addtion, for the AEAD
+ * operation, the aead_request_set_assoc function must be used to set the
+ * pointer to the associated data memory location before performing the
+ * encryption or decryption operation. In case of an encryption, the associated
+ * data memory is filled during the encryption operation. For decryption, the
+ * associated data memory must contain data that is used to verify the integrity
+ * of the decrypted data. Another deviation from the asynchronous block cipher
+ * operation is that the caller should explicitly check for -EBADMSG of the
+ * crypto_aead_decrypt. That error indicates an authentication error, i.e.
+ * a breach in the integrity of the message. In essence, that -EBADMSG error
+ * code is the key bonus an AEAD cipher has over "standard" block chaining
+ * modes.
+ */
+
static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
{
return (struct crypto_aead *)tfm;
}
+/**
+ * crypto_alloc_aead() - allocate AEAD cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * AEAD cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an AEAD. The returned struct
+ * crypto_aead is the cipher handle that is required for any subsequent
+ * API invocation for that AEAD.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
@@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_aead() - zeroize and free aead handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_aead(struct crypto_aead *tfm)
{
crypto_free_tfm(crypto_aead_tfm(tfm));
@@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
return &crypto_aead_tfm(tfm)->crt_aead;
}
+/**
+ * crypto_aead_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the aead referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
{
return crypto_aead_crt(tfm)->ivsize;
}
+/**
+ * crypto_aead_authsize() - obtain maximum authentication data size
+ * @tfm: cipher handle
+ *
+ * The maximum size of the authentication data for the AEAD cipher referenced
+ * by the AEAD cipher handle is returned. The authentication data size may be
+ * zero if the cipher implements a hard-coded maximum.
+ *
+ * The authentication data may also be known as "tag value".
+ *
+ * Return: authentication data size / tag size in bytes
+ */
static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
{
return crypto_aead_crt(tfm)->authsize;
}
+/**
+ * crypto_aead_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the AEAD referenced with the cipher handle is returned.
+ * The caller may use that information to allocate appropriate memory for the
+ * data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
{
return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
@@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
}
+/**
+ * crypto_aead_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the AEAD referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return crt->setkey(crt->base, key, keylen);
}
+/**
+ * crypto_aead_setauthsize() - set authentication data size
+ * @tfm: cipher handle
+ * @authsize: size of the authentication data / tag in bytes
+ *
+ * Set the authentication data size / tag size. AEAD requires an authentication
+ * tag (or MAC) in addition to the associated data.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
@@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
return __crypto_aead_cast(req->base.tfm);
}
+/**
+ * crypto_aead_encrypt() - encrypt plaintext
+ * @req: reference to the aead_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The encryption operation creates the authentication data /
+ * tag. That data is concatenated with the created ciphertext.
+ * The ciphertext memory size is therefore the given number of
+ * block cipher blocks + the size defined by the
+ * crypto_aead_setauthsize invocation. The caller must ensure
+ * that sufficient memory is available for the ciphertext and
+ * the authentication tag.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_aead_encrypt(struct aead_request *req)
{
return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
}
+/**
+ * crypto_aead_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
+ * authentication data / tag. That authentication data / tag
+ * must have the size defined by the crypto_aead_setauthsize
+ * invocation.
+ *
+ *
+ * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
+ * cipher operation performs the authentication of the data during the
+ * decryption operation. Therefore, the function returns this error if
+ * the authentication of the ciphertext was unsuccessful (i.e. the
+ * integrity of the ciphertext or the associated data was violated);
+ * < 0 if an error occurred.
+ */
static inline int crypto_aead_decrypt(struct aead_request *req)
{
return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
}
+/**
+ * DOC: Asynchronous AEAD Request Handle
+ *
+ * The aead_request data structure contains all pointers to data required for
+ * the AEAD cipher operation. This includes the cipher handle (which can be
+ * used by multiple aead_request instances), pointer to plaintext and
+ * ciphertext, asynchronous callback function, etc. It acts as a handle to the
+ * aead_request_* API calls in a similar way as AEAD handle to the
+ * crypto_aead_* API calls.
+ */
+
+/**
+ * crypto_aead_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
{
return crypto_aead_crt(tfm)->reqsize;
}
+/**
+ * aead_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing aead handle in the request
+ * data structure with a different one.
+ */
static inline void aead_request_set_tfm(struct aead_request *req,
struct crypto_aead *tfm)
{
req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
}
+/**
+ * aead_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the AEAD
+ * encrypt and decrypt API calls. During the allocation, the provided aead
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
gfp_t gfp)
{
@@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
return req;
}
+/**
+ * aead_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
static inline void aead_request_free(struct aead_request *req)
{
kzfree(req);
}
+/**
+ * aead_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * Setting the callback function that is triggered once the cipher operation
+ * completes
+ *
+ * The callback function is registered with the aead_request handle and
+ * must comply with the following template:
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
static inline void aead_request_set_callback(struct aead_request *req,
u32 flags,
crypto_completion_t compl,
@@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req,
req->base.flags = flags;
}
+/**
+ * aead_request_set_crypt - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_aead_ivsize()
+ *
+ * Setting the source data and destination data scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed: the source is the ciphertext and the destination is the plaintext.
+ *
+ * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
+ * the caller must concatenate the ciphertext followed by the
+ * authentication tag and provide the entire data stream to the
+ * decryption operation (i.e. the data length used for the
+ * initialization of the scatterlist and the data length for the
+ * decryption operation is identical). For encryption, however,
+ * the authentication tag is created while encrypting the data.
+ * The destination buffer must hold sufficient space for the
+ * ciphertext and the authentication tag while the encryption
+ * invocation must only point to the plaintext data size. The
+ * following code snippet illustrates the memory usage
+ * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
+ * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
+ * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ */
static inline void aead_request_set_crypt(struct aead_request *req,
struct scatterlist *src,
struct scatterlist *dst,
@@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req,
req->iv = iv;
}
+/**
+ * aead_request_set_assoc() - set the associated data scatter / gather list
+ * @req: request handle
+ * @assoc: associated data scatter / gather list
+ * @assoclen: number of bytes to process from @assoc
+ *
+ * For encryption, the memory is filled with the associated data. For
+ * decryption, the memory must point to the associated data.
+ */
static inline void aead_request_set_assoc(struct aead_request *req,
struct scatterlist *assoc,
unsigned int assoclen)
@@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req,
req->assoclen = assoclen;
}
+/**
+ * DOC: Synchronous Block Cipher API
+ *
+ * The synchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
+ *
+ * Synchronous calls, have a context in the tfm. But since a single tfm can be
+ * used in multiple calls and in parallel, this info should not be changeable
+ * (unless a lock is used). This applies, for example, to the symmetric key.
+ * However, the IV is changeable, so there is an iv field in blkcipher_tfm
+ * structure for synchronous blkcipher api. So, its the only state info that can
+ * be kept for synchronous calls without using a big lock across a tfm.
+ *
+ * The block cipher API allows the use of a complete cipher, i.e. a cipher
+ * consisting of a template (a block chaining mode) and a single block cipher
+ * primitive (e.g. AES).
+ *
+ * The plaintext data buffer and the ciphertext data buffer are pointed to
+ * by using scatter/gather lists. The cipher operation is performed
+ * on all segments of the provided scatter/gather lists.
+ *
+ * The kernel crypto API supports a cipher operation "in-place" which means that
+ * the caller may provide the same scatter/gather list for the plaintext and
+ * cipher text. After the completion of the cipher operation, the plaintext
+ * data is replaced with the ciphertext data in case of an encryption and vice
+ * versa for a decryption. The caller must ensure that the scatter/gather lists
+ * for the output data point to sufficiently large buffers, i.e. multiples of
+ * the block size of the cipher.
+ */
+
static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
struct crypto_tfm *tfm)
{
@@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
return __crypto_blkcipher_cast(tfm);
}
+/**
+ * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * blkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a block cipher. The returned struct
+ * crypto_blkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
const char *alg_name, u32 type, u32 mask)
{
@@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm(
return &tfm->base;
}
+/**
+ * crypto_free_blkcipher() - zeroize and free the block cipher handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
{
crypto_free_tfm(crypto_blkcipher_tfm(tfm));
}
+/**
+ * crypto_has_blkcipher() - Search for the availability of a block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the block cipher is known to the kernel crypto API; false
+ * otherwise
+ */
static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
+/**
+ * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
+ * @tfm: cipher handle
+ *
+ * Return: The character string holding the name of the cipher
+ */
static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
{
return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
@@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg(
return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
}
+/**
+ * crypto_blkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the block cipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
{
return crypto_blkcipher_alg(tfm)->ivsize;
}
+/**
+ * crypto_blkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the block cipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation.
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_blkcipher_blocksize(
struct crypto_blkcipher *tfm)
{
@@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
}
+/**
+ * crypto_blkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the block cipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
key, keylen);
}
+/**
+ * crypto_blkcipher_encrypt() - encrypt plaintext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
@@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
}
+/**
+ * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.info is filled with the IV to be used for
+ * the current operation; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
@@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
}
+/**
+ * crypto_blkcipher_decrypt() - decrypt ciphertext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ *
+ */
static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
@@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
}
+/**
+ * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt_iv call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
@@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
}
+/**
+ * crypto_blkcipher_set_iv() - set IV for cipher
+ * @tfm: cipher handle
+ * @src: buffer holding the IV
+ * @len: length of the IV in bytes
+ *
+ * The caller provided IV is set for the block cipher referenced by the cipher
+ * handle.
+ */
static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
const u8 *src, unsigned int len)
{
memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
}
+/**
+ * crypto_blkcipher_get_iv() - obtain IV from cipher
+ * @tfm: cipher handle
+ * @dst: buffer filled with the IV
+ * @len: length of the buffer dst
+ *
+ * The caller can obtain the IV set for the block cipher referenced by the
+ * cipher handle and store it into the user-provided buffer. If the buffer
+ * has an insufficient space, the IV is truncated to fit the buffer.
+ */
static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
u8 *dst, unsigned int len)
{
memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
}
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
{
return (struct crypto_cipher *)tfm;
@@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
return __crypto_cipher_cast(tfm);
}
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
u32 type, u32 mask)
{
@@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_cipher(struct crypto_cipher *tfm)
{
crypto_free_tfm(crypto_cipher_tfm(tfm));
}
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ * false otherwise
+ */
static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
return &crypto_cipher_tfm(tfm)->crt_cipher;
}
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
@@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
}
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
key, keylen);
}
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
@@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
dst, src);
}
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
@@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
dst, src);
}
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
+ */
+
static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
{
return (struct crypto_hash *)tfm;
@@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
return __crypto_hash_cast(tfm);
}
+/**
+ * crypto_alloc_hash() - allocate synchronous message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned struct
+ * crypto_hash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
u32 type, u32 mask)
{
@@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
return &tfm->base;
}
+/**
+ * crypto_free_hash() - zeroize and free message digest handle
+ * @tfm: cipher handle to be freed
+ */
static inline void crypto_free_hash(struct crypto_hash *tfm)
{
crypto_free_tfm(crypto_hash_tfm(tfm));
}
+/**
+ * crypto_has_hash() - Search for the availability of a message digest
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the message digest cipher is known to the kernel crypto
+ * API; false otherwise
+ */
static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
return &crypto_hash_tfm(tfm)->crt_hash;
}
+/**
+ * crypto_hash_blocksize() - obtain block size for message digest
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
{
return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
@@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
}
+/**
+ * crypto_hash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: message digest size
+ */
static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
{
return crypto_hash_crt(tfm)->digestsize;
@@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
}
+/**
+ * crypto_hash_init() - (re)initialize message digest handle
+ * @desc: cipher request handle that to be filled by caller --
+ * desc.tfm is filled with the hash cipher handle;
+ * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * The call (re-)initializes the message digest referenced by the hash cipher
+ * request handle. Any potentially existing state created by previous
+ * operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
static inline int crypto_hash_init(struct hash_desc *desc)
{
return crypto_hash_crt(desc->tfm)->init(desc);
}
+/**
+ * crypto_hash_update() - add data to message digest for processing
+ * @desc: cipher request handle
+ * @sg: scatter / gather list pointing to the data to be added to the message
+ * digest
+ * @nbytes: number of bytes to be processed from @sg
+ *
+ * Updates the message digest state of the cipher handle pointed to by the
+ * hash cipher request handle with the input data pointed to by the
+ * scatter/gather list.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
static inline int crypto_hash_update(struct hash_desc *desc,
struct scatterlist *sg,
unsigned int nbytes)
@@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc,
return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
}
+/**
+ * crypto_hash_final() - calculate message digest
+ * @desc: cipher request handle
+ * @out: message digest output buffer -- The caller must ensure that the out
+ * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
+ * function).
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
{
return crypto_hash_crt(desc->tfm)->final(desc, out);
}
+/**
+ * crypto_hash_digest() - calculate message digest for a buffer
+ * @desc: see crypto_hash_final()
+ * @sg: see crypto_hash_update()
+ * @nbytes: see crypto_hash_update()
+ * @out: see crypto_hash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_hash_init,
+ * crypto_hash_update and crypto_hash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
static inline int crypto_hash_digest(struct hash_desc *desc,
struct scatterlist *sg,
unsigned int nbytes, u8 *out)
@@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc,
return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
}
+/**
+ * crypto_hash_setkey() - set key for message digest
+ * @hash: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the message digest cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
static inline int crypto_hash_setkey(struct crypto_hash *hash,
const u8 *key, unsigned int keylen)
{
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index d84f8c254a87..da4c4983adbe 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
+struct device;
struct file_operations;
struct debugfs_blob_wrapper {
@@ -99,13 +100,18 @@ struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
struct dentry *parent,
u32 *array, u32 elements);
+struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data));
+
bool debugfs_initialized(void);
#else
#include <linux/err.h>
-/*
+/*
* We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled
* so users have a chance to detect if there was a real error or not. We don't
* want to duplicate the design decision mistakes of procfs and devfs again.
@@ -250,6 +256,15 @@ static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
+ const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data))
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif
#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index ce1f21608b16..fb506738f7b7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev)
mutex_unlock(&dev->mutex);
}
+static inline void device_lock_assert(struct device *dev)
+{
+ lockdep_assert_held(&dev->mutex);
+}
+
void driver_init(void);
/*
@@ -1118,6 +1123,41 @@ do { \
})
#endif
+#ifdef CONFIG_PRINTK
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ static bool __print_once __read_mostly; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define dev_emerg_once(dev, fmt, ...) \
+ dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_once(dev, fmt, ...) \
+ dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_once(dev, fmt, ...) \
+ dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_once(dev, fmt, ...) \
+ dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_once(dev, fmt, ...) \
+ dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_once(dev, fmt, ...) \
+ dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_once(dev, fmt, ...) \
+ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+#define dev_dbg_once(dev, fmt, ...) \
+ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+
#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index d5d388160f42..c3007cb4bfa6 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
extern u64 dma_get_required_mask(struct device *dev);
-#ifndef set_arch_dma_coherent_ops
-static inline int set_arch_dma_coherent_ops(struct device *dev)
-{
- return 0;
-}
+#ifndef arch_setup_dma_ops
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, struct iommu_ops *iommu,
+ bool coherent) { }
+#endif
+
+#ifndef arch_teardown_dma_ops
+static inline void arch_teardown_dma_ops(struct device *dev) { }
#endif
static inline unsigned int dma_get_max_seg_size(struct device *dev)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 653a1fd07ae8..40cd75e21ea2 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -447,7 +447,8 @@ struct dmaengine_unmap_data {
* communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
- * @tx_submit: set the prepared descriptor(s) to be executed by the engine
+ * @tx_submit: accept the descriptor, assign ordered cookie and mark the
+ * descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
* ---async_tx api specific fields---
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 593fff99e6bf..30624954dec5 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -30,6 +30,12 @@
struct acpi_dmar_header;
+#ifdef CONFIG_X86
+# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
+#else
+# define DMAR_UNITS_SUPPORTED 64
+#endif
+
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2
@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
/* Intel IOMMU detection */
extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
+extern int dmar_device_add(acpi_handle handle);
+extern int dmar_device_remove(acpi_handle handle);
+
+static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
+{
+ return 0;
+}
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
-extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
-extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
+extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
+extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
+extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
-static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+
+#define dmar_parse_one_rmrr dmar_res_noop
+#define dmar_parse_one_atsr dmar_res_noop
+#define dmar_check_one_atsr dmar_res_noop
+#define dmar_release_one_atsr dmar_res_noop
+
+static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
return 0;
}
-static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
+
+static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
return 0;
}
-static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
+#endif /* CONFIG_INTEL_IOMMU */
+
+#ifdef CONFIG_IRQ_REMAP
+extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
+#else /* CONFIG_IRQ_REMAP */
+static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{ return 0; }
+#endif /* CONFIG_IRQ_REMAP */
+
+#else /* CONFIG_DMAR_TABLE */
+
+static inline int dmar_device_add(void *handle)
+{
+ return 0;
+}
+
+static inline int dmar_device_remove(void *handle)
{
return 0;
}
-#endif /* CONFIG_INTEL_IOMMU */
#endif /* CONFIG_DMAR_TABLE */
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index debb70d40547..8723f2a99e15 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -172,7 +172,7 @@ enum drbd_ret_code {
ERR_RES_NOT_KNOWN = 158,
ERR_RES_IN_USE = 159,
ERR_MINOR_CONFIGURED = 160,
- ERR_MINOR_EXISTS = 161,
+ ERR_MINOR_OR_VOLUME_EXISTS = 161,
ERR_INVALID_REQUEST = 162,
ERR_NEED_APV_100 = 163,
ERR_NEED_ALLOW_TWO_PRI = 164,
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 67a5fa7830c4..20fa8d8ae313 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -15,6 +15,11 @@
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#endif
+#ifndef SET_PERSONALITY2
+#define SET_PERSONALITY2(ex, state) \
+ SET_PERSONALITY(ex)
+#endif
+
#if ELF_CLASS == ELFCLASS32
extern Elf32_Dyn _DYNAMIC [];
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 733980fce8e3..41c891d05f04 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -392,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
#endif
}
+/**
+ * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * @skb: Buffer to pad
+ *
+ * An Ethernet frame should have a minimum size of 60 bytes. This function
+ * takes short frames and pads them with zeros up to the 60 byte limit.
+ */
+static inline int eth_skb_pad(struct sk_buff *skb)
+{
+ return skb_put_padto(skb, ETH_ZLEN);
+}
+
#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index c1a2d60dfb82..653dc9c4ebac 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -59,6 +59,26 @@ enum ethtool_phys_id_state {
ETHTOOL_ID_OFF
};
+enum {
+ ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
+ ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
+
+ /*
+ * Add your fresh new hash function bits above and remember to update
+ * rss_hash_func_strings[] in ethtool.c
+ */
+ ETH_RSS_HASH_FUNCS_COUNT
+};
+
+#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
+#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
+
+#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
+#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
+
+#define ETH_RSS_HASH_UNKNOWN 0
+#define ETH_RSS_HASH_NO_CHANGE 0
+
struct net_device;
/* Some generic methods drivers may use in their ethtool_ops */
@@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
* Returns zero if not supported for this specific device.
* @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
* Returns zero if not supported for this specific device.
- * @get_rxfh: Get the contents of the RX flow hash indirection table and hash
- * key.
- * Will only be called if one or both of @get_rxfh_indir_size and
- * @get_rxfh_key_size are implemented and return non-zero.
- * Returns a negative error code or zero.
- * @set_rxfh: Set the contents of the RX flow hash indirection table and/or
- * hash key. In case only the indirection table or hash key is to be
- * changed, the other argument will be %NULL.
- * Will only be called if one or both of @get_rxfh_indir_size and
- * @get_rxfh_key_size are implemented and return non-zero.
+ * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key
+ * and/or hash function.
* Returns a negative error code or zero.
+ * @set_rxfh: Set the contents of the RX flow hash indirection table, hash
+ * key, and/or hash function. Arguments which are set to %NULL or zero
+ * will remain unchanged.
+ * Returns a negative error code or zero. An error code must be returned
+ * if at least one unsupported change was requested.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -241,9 +258,10 @@ struct ethtool_ops {
int (*reset)(struct net_device *, u32 *);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
- int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key);
+ int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
+ u8 *hfunc);
int (*set_rxfh)(struct net_device *, const u32 *indir,
- const u8 *key);
+ const u8 *key, const u8 hfunc);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index c6f996f2abb6..798fad9e420d 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
#include <linux/atomic.h>
/*
@@ -25,14 +26,18 @@ struct fault_attr {
unsigned long reject_end;
unsigned long count;
+ struct ratelimit_state ratelimit_state;
+ struct dentry *dname;
};
-#define FAULT_ATTR_INITIALIZER { \
- .interval = 1, \
- .times = ATOMIC_INIT(1), \
- .require_end = ULONG_MAX, \
- .stacktrace_depth = 32, \
- .verbose = 2, \
+#define FAULT_ATTR_INITIALIZER { \
+ .interval = 1, \
+ .times = ATOMIC_INIT(1), \
+ .require_end = ULONG_MAX, \
+ .stacktrace_depth = 32, \
+ .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
+ .verbose = 2, \
+ .dname = NULL, \
}
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
diff --git a/include/linux/fence.h b/include/linux/fence.h
index d174585b874b..39efee130d2b 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -128,8 +128,8 @@ struct fence_cb {
* from irq context, so normal spinlocks can be used.
*
* A return value of false indicates the fence already passed,
- * or some failure occured that made it impossible to enable
- * signaling. True indicates succesful enabling.
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
*
* fence->status may be set in enable_signaling, but only when false is
* returned.
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ca95abd2bed1..caac2087a4d5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -381,6 +381,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bb29b02d9bb6..f90c0282c114 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -18,6 +18,7 @@
#include <linux/pid.h>
#include <linux/bug.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fiemap.h>
@@ -401,7 +402,7 @@ struct address_space {
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
struct rb_root i_mmap; /* tree of private and shared mappings */
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
- struct mutex i_mmap_mutex; /* protect tree, count, list */
+ struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
unsigned long nrshadows; /* number of shadow entries */
@@ -467,6 +468,26 @@ struct block_device {
int mapping_tagged(struct address_space *mapping, int tag);
+static inline void i_mmap_lock_write(struct address_space *mapping)
+{
+ down_write(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_unlock_write(struct address_space *mapping)
+{
+ up_write(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_lock_read(struct address_space *mapping)
+{
+ down_read(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_unlock_read(struct address_space *mapping)
+{
+ up_read(&mapping->i_mmap_rwsem);
+}
+
/*
* Might pages of this file be mapped into userspace?
*/
@@ -1497,6 +1518,7 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
+ void (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -1560,6 +1582,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec *fast_pointer,
struct iovec **ret_pointer);
+extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -2063,7 +2086,7 @@ struct filename {
extern long vfs_truncate(struct path *, loff_t);
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
-extern int do_fallocate(struct file *file, int mode, loff_t offset,
+extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
umode_t mode);
@@ -2075,6 +2098,7 @@ extern int vfs_open(const struct path *, struct file *, const struct cred *);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
+extern struct filename *getname_flags(const char __user *, int, int *);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
@@ -2152,7 +2176,6 @@ static inline int sb_is_blkdev_sb(struct super_block *sb)
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
-extern const struct file_operations bad_sock_fops;
#ifdef CONFIG_BLOCK
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 84d60cb841b1..bf0321eabbda 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -29,7 +29,16 @@
#include <linux/of_platform.h>
#include <linux/interrupt.h>
-#define FSL_IFC_BANK_COUNT 4
+/*
+ * The actual number of banks implemented depends on the IFC version
+ * - IFC version 1.0 implements 4 banks.
+ * - IFC version 1.1 onward implements 8 banks.
+ */
+#define FSL_IFC_BANK_COUNT 8
+
+#define FSL_IFC_VERSION_MASK 0x0F0F0000
+#define FSL_IFC_VERSION_1_0_0 0x01000000
+#define FSL_IFC_VERSION_1_1_0 0x01010000
/*
* CSPR - Chip Select Property Register
@@ -776,23 +785,23 @@ struct fsl_ifc_regs {
__be32 cspr;
u32 res2;
} cspr_cs[FSL_IFC_BANK_COUNT];
- u32 res3[0x19];
+ u32 res3[0xd];
struct {
__be32 amask;
u32 res4[0x2];
} amask_cs[FSL_IFC_BANK_COUNT];
- u32 res5[0x18];
+ u32 res5[0xc];
struct {
__be32 csor;
__be32 csor_ext;
u32 res6;
} csor_cs[FSL_IFC_BANK_COUNT];
- u32 res7[0x18];
+ u32 res7[0xc];
struct {
__be32 ftim[4];
u32 res8[0x8];
} ftim_cs[FSL_IFC_BANK_COUNT];
- u32 res9[0x60];
+ u32 res9[0x30];
__be32 rb_stat;
u32 res10[0x2];
__be32 ifc_gcr;
@@ -827,6 +836,8 @@ struct fsl_ifc_ctrl {
int nand_irq;
spinlock_t lock;
void *nand;
+ int version;
+ int banks;
u32 nand_stat;
wait_queue_head_t nand_wait;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index ca060d7c4fa6..0f313f93c586 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -197,24 +197,6 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_INODE 2
/*
- * Inode specific fields in an fsnotify_mark
- */
-struct fsnotify_inode_mark {
- struct inode *inode; /* inode this mark is associated with */
- struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */
- struct list_head free_i_list; /* tmp list used when freeing this mark */
-};
-
-/*
- * Mount point specific fields in an fsnotify_mark
- */
-struct fsnotify_vfsmount_mark {
- struct vfsmount *mnt; /* vfsmount this mark is associated with */
- struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */
- struct list_head free_m_list; /* tmp list used when freeing this mark */
-};
-
-/*
* a mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
@@ -230,11 +212,17 @@ struct fsnotify_mark {
* in kernel that found and may be using this mark. */
atomic_t refcnt; /* active things looking at this mark */
struct fsnotify_group *group; /* group this mark is for */
- struct list_head g_list; /* list of marks by group->i_fsnotify_marks */
+ struct list_head g_list; /* list of marks by group->i_fsnotify_marks
+ * Also reused for queueing mark into
+ * destroy_list when it's waiting for
+ * the end of SRCU period before it can
+ * be freed */
spinlock_t lock; /* protect group and inode */
+ struct hlist_node obj_list; /* list of marks for inode / vfsmount */
+ struct list_head free_list; /* tmp list used when freeing this mark */
union {
- struct fsnotify_inode_mark i;
- struct fsnotify_vfsmount_mark m;
+ struct inode *inode; /* inode this mark is associated with */
+ struct vfsmount *mnt; /* vfsmount this mark is associated with */
};
__u32 ignored_mask; /* events types to ignore */
#define FSNOTIFY_MARK_FLAG_INODE 0x01
@@ -243,7 +231,6 @@ struct fsnotify_mark {
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
unsigned int flags; /* vfsmount or inode mark? */
- struct list_head destroy_list;
void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index ed501953f0b2..1da602982cf9 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -39,6 +39,12 @@
# define FTRACE_FORCE_LIST_FUNC 0
#endif
+/* Main tracing buffer and events set up */
+#ifdef CONFIG_TRACING
+void trace_init(void);
+#else
+static inline void trace_init(void) { }
+#endif
struct module;
struct ftrace_hash;
@@ -873,6 +879,7 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops;
+extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 07d2699cdb51..b840e3b2770d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -110,11 +110,8 @@ struct vm_area_struct;
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
-#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
- __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
- __GFP_HARDWALL | __GFP_HIGHMEM | \
- __GFP_MOVABLE)
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 85aa5d0b9357..ab81339a8590 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -216,14 +216,15 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
-static inline int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
WARN_ON(1);
return -EINVAL;
}
-static inline void gpio_unlock_as_irq(struct gpio_chip *chip,
- unsigned int offset)
+static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
WARN_ON(1);
}
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 00b1b70d68ba..fd85cb120ee0 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -66,7 +66,7 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
-int gpiod_get_direction(const struct gpio_desc *desc);
+int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
@@ -74,14 +74,24 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
void gpiod_set_value(struct gpio_desc *desc, int value);
+void gpiod_set_array(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
+void gpiod_set_raw_array(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+void gpiod_set_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
+void gpiod_set_raw_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
@@ -217,6 +227,13 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
+static inline void gpiod_set_array(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -228,6 +245,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
+static inline void gpiod_set_raw_array(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
@@ -240,6 +264,13 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
+static inline void gpiod_set_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -252,6 +283,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
/* GPIO can never have been requested */
WARN_ON(1);
}
+static inline void gpiod_set_raw_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 249db3057e4d..c497c62889d1 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -32,6 +32,7 @@ struct seq_file;
* @get: returns value for signal "offset"; for output signals this
* returns either the value actually sensed, or zero
* @set: assigns output value for signal "offset"
+ * @set_multiple: assigns output values for multiple signals defined by "mask"
* @set_debounce: optional hook for setting debounce time for specified gpio in
* interrupt triggered gpio chips
* @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
@@ -89,6 +90,9 @@ struct gpio_chip {
unsigned offset);
void (*set)(struct gpio_chip *chip,
unsigned offset, int value);
+ void (*set_multiple)(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits);
int (*set_debounce)(struct gpio_chip *chip,
unsigned offset,
unsigned debounce);
@@ -149,8 +153,8 @@ extern struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip, void *data));
/* lock/unlock as IRQ */
-int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
-void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
+int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
diff --git a/include/linux/hash.h b/include/linux/hash.h
index d0494c399392..1afde47e1528 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -15,7 +15,6 @@
*/
#include <asm/types.h>
-#include <asm/hash.h>
#include <linux/compiler.h>
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
@@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr)
return (u32)val;
}
-struct fast_hash_ops {
- u32 (*hash)(const void *data, u32 len, u32 seed);
- u32 (*hash2)(const u32 *data, u32 len, u32 seed);
-};
-
-/**
- * arch_fast_hash - Caclulates a hash over a given buffer that can have
- * arbitrary size. This function will eventually use an
- * architecture-optimized hashing implementation if
- * available, and trades off distribution for speed.
- *
- * @data: buffer to hash
- * @len: length of buffer in bytes
- * @seed: start seed
- *
- * Returns 32bit hash.
- */
-extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);
-
-/**
- * arch_fast_hash2 - Caclulates a hash over a given buffer that has a
- * size that is of a multiple of 32bit words. This
- * function will eventually use an architecture-
- * optimized hashing implementation if available,
- * and trades off distribution for speed.
- *
- * @data: buffer to hash (must be 32bit padded)
- * @len: number of 32bit words
- * @seed: start seed
- *
- * Returns 32bit hash.
- */
-extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);
-
#endif /* _LINUX_HASH_H */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 11c0182a153b..cbb5790a35cd 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -1,9 +1,24 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#ifndef __LINUX_HDMI_H_
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 78ea9bf941cd..06c4607744f6 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -234,6 +234,33 @@ struct hid_item {
#define HID_DG_BARRELSWITCH 0x000d0044
#define HID_DG_ERASER 0x000d0045
#define HID_DG_TABLETPICK 0x000d0046
+
+#define HID_CP_CONSUMERCONTROL 0x000c0001
+#define HID_CP_NUMERICKEYPAD 0x000c0002
+#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003
+#define HID_CP_MICROPHONE 0x000c0004
+#define HID_CP_HEADPHONE 0x000c0005
+#define HID_CP_GRAPHICEQUALIZER 0x000c0006
+#define HID_CP_FUNCTIONBUTTONS 0x000c0036
+#define HID_CP_SELECTION 0x000c0080
+#define HID_CP_MEDIASELECTION 0x000c0087
+#define HID_CP_SELECTDISC 0x000c00ba
+#define HID_CP_PLAYBACKSPEED 0x000c00f1
+#define HID_CP_PROXIMITY 0x000c0109
+#define HID_CP_SPEAKERSYSTEM 0x000c0160
+#define HID_CP_CHANNELLEFT 0x000c0161
+#define HID_CP_CHANNELRIGHT 0x000c0162
+#define HID_CP_CHANNELCENTER 0x000c0163
+#define HID_CP_CHANNELFRONT 0x000c0164
+#define HID_CP_CHANNELCENTERFRONT 0x000c0165
+#define HID_CP_CHANNELSIDE 0x000c0166
+#define HID_CP_CHANNELSURROUND 0x000c0167
+#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168
+#define HID_CP_CHANNELTOP 0x000c0169
+#define HID_CP_CHANNELUNKNOWN 0x000c016a
+#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180
+#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200
+
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -312,11 +339,8 @@ struct hid_item {
* Vendor specific HID device groups
*/
#define HID_GROUP_RMI 0x0100
-
-/*
- * Vendor specific HID device groups
- */
#define HID_GROUP_WACOM 0x0101
+#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
/*
* This is the global environment of the parser. This information is
@@ -1063,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev)
hdev->ll_driver->wait(hdev);
}
+/**
+ * hid_report_len - calculate the report length
+ *
+ * @report: the report we want to know the length
+ */
+static inline int hid_report_len(struct hid_report *report)
+{
+ /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+ return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+}
+
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int interrupt);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index cdd149ca5cc0..431b7fc605c9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
}
#endif /* !CONFIG_HUGETLB_PAGE */
+/*
+ * hugepages at page global directory. If arch support
+ * hugepages at pgd level, they need to define this.
+ */
+#ifndef pgd_huge
+#define pgd_huge(x) 0
+#endif
+
+#ifndef pgd_write
+static inline int pgd_write(pgd_t pgd)
+{
+ BUG();
+ return 0;
+}
+#endif
+
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif
+
+#ifndef is_hugepd
+/*
+ * Some architectures requires a hugepage directory format that is
+ * required to support multiple hugepage sizes. For example
+ * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
+ * introduced the same on powerpc. This allows for a more flexible hugepage
+ * pagetable layout.
+ */
+typedef struct { unsigned long pd; } hugepd_t;
+#define is_hugepd(hugepd) (0)
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ return 0;
+}
+#else
+extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr);
+#endif
#define HUGETLB_ANON_FILE "anon_hugepage"
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 08cfaff8a072..476c685ca6f9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -650,6 +650,8 @@ struct vmbus_channel {
u8 monitor_grp;
u8 monitor_bit;
+ bool rescind; /* got rescind msg */
+
u32 ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b556e0ab946f..e3a1721c8354 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -46,6 +46,8 @@ struct i2c_client;
struct i2c_driver;
union i2c_smbus_data;
struct i2c_board_info;
+enum i2c_slave_event;
+typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
struct module;
@@ -209,6 +211,8 @@ struct i2c_driver {
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
* userspace_devices list
+ * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
+ * calls it to pass on slave events to the slave driver.
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -224,6 +228,7 @@ struct i2c_client {
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
+ i2c_slave_cb_t slave_cb; /* callback for slave mode */
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -246,6 +251,25 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
dev_set_drvdata(&dev->dev, data);
}
+/* I2C slave support */
+
+enum i2c_slave_event {
+ I2C_SLAVE_REQ_READ_START,
+ I2C_SLAVE_REQ_READ_END,
+ I2C_SLAVE_REQ_WRITE_START,
+ I2C_SLAVE_REQ_WRITE_END,
+ I2C_SLAVE_STOP,
+};
+
+extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
+extern int i2c_slave_unregister(struct i2c_client *client);
+
+static inline int i2c_slave_event(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ return client->slave_cb(client, event, val);
+}
+
/**
* struct i2c_board_info - template for device creation
* @type: chip type, to initialize i2c_client.name
@@ -352,6 +376,8 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* into I2C transfers instead.
* @functionality: Return the flags that this algorithm/adapter pair supports
* from the I2C_FUNC_* flags.
+ * @reg_slave: Register given client to I2C slave mode of this adapter
+ * @unreg_slave: Unregister given client from I2C slave mode of this adapter
*
* The following structs are for those who like to implement new bus drivers:
* i2c_algorithm is the interface to a class of hardware solutions which can
@@ -359,7 +385,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* to name two of the most common.
*
* The return codes from the @master_xfer field should indicate the type of
- * error code that occured during the transfer, as documented in the kernel
+ * error code that occurred during the transfer, as documented in the kernel
* Documentation file Documentation/i2c/fault-codes.
*/
struct i2c_algorithm {
@@ -377,6 +403,9 @@ struct i2c_algorithm {
/* To determine what the adapter supports */
u32 (*functionality) (struct i2c_adapter *);
+
+ int (*reg_slave)(struct i2c_client *client);
+ int (*unreg_slave)(struct i2c_client *client);
};
/**
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 8cfb50f38529..0bc03f100d04 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -26,7 +26,6 @@
#define __TWL_H_
#include <linux/types.h>
-#include <linux/phy/phy.h>
#include <linux/input/matrix_keypad.h>
/*
@@ -634,7 +633,6 @@ enum twl4030_usb_mode {
struct twl4030_usb_data {
enum twl4030_usb_mode usb_mode;
unsigned long features;
- struct phy_init_data *init_data;
int (*phy_init)(struct device *dev);
int (*phy_exit)(struct device *dev);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b1be39c76931..4f4eea8a6288 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
/*
* DS bit usage
@@ -1066,6 +1067,12 @@ struct ieee80211_pspoll {
/* TDLS */
+/* Channel switch timing */
+struct ieee80211_ch_switch_timing {
+ __le16 switch_time;
+ __le16 switch_timeout;
+} __packed;
+
/* Link-id information element */
struct ieee80211_tdls_lnkie {
u8 ie_type; /* Link Identifier IE */
@@ -1107,6 +1114,15 @@ struct ieee80211_tdls_data {
u8 dialog_token;
u8 variable[0];
} __packed discover_req;
+ struct {
+ u8 target_channel;
+ u8 oper_class;
+ u8 variable[0];
+ } __packed chan_switch_req;
+ struct {
+ __le16 status_code;
+ u8 variable[0];
+ } __packed chan_switch_resp;
} u;
} __packed;
@@ -1274,7 +1290,7 @@ struct ieee80211_ht_cap {
#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
/*
- * Maximum length of AMPDU that the STA can receive.
+ * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
* Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
*/
enum ieee80211_max_ampdu_length_exp {
@@ -1284,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp {
IEEE80211_HT_MAX_AMPDU_64K = 3
};
+/*
+ * Maximum length of AMPDU that the STA can receive in VHT.
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_vht_max_ampdu_length_exp {
+ IEEE80211_VHT_MAX_AMPDU_8K = 0,
+ IEEE80211_VHT_MAX_AMPDU_16K = 1,
+ IEEE80211_VHT_MAX_AMPDU_32K = 2,
+ IEEE80211_VHT_MAX_AMPDU_64K = 3,
+ IEEE80211_VHT_MAX_AMPDU_128K = 4,
+ IEEE80211_VHT_MAX_AMPDU_256K = 5,
+ IEEE80211_VHT_MAX_AMPDU_512K = 6,
+ IEEE80211_VHT_MAX_AMPDU_1024K = 7
+};
+
#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
/* Minimum MPDU start spacing */
@@ -1998,6 +2029,16 @@ enum ieee80211_tdls_actioncode {
WLAN_TDLS_DISCOVERY_REQUEST = 10,
};
+/* Extended Channel Switching capability to be set in the 1st byte of
+ * the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
+
+/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
+#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
+#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
+#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
+
/* Interworking capabilities are set in 7th bit of 4th byte of the
* @WLAN_EID_EXT_CAPABILITY information element
*/
@@ -2009,6 +2050,7 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5)
#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
+#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
@@ -2016,6 +2058,9 @@ enum ieee80211_tdls_actioncode {
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
+/* BSS Coex IE information field bits */
+#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
+
/**
* enum - mesh synchronization method identifier
*
@@ -2398,6 +2443,30 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
return !!(tim->virtual_map[index] & mask);
}
+/**
+ * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
+ * @skb: the skb containing the frame, length will not be checked
+ * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
+ *
+ * This function assumes the frame is a data frame, and that the network header
+ * is in the correct place.
+ */
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+{
+ if (!skb_is_nonlinear(skb) &&
+ skb->len > (skb_network_offset(skb) + 2)) {
+ /* Point to where the indication of TDLS should start */
+ const u8 *tdls_data = skb_network_header(skb) - 2;
+
+ if (get_unaligned_be16(tdls_data) == ETH_P_TDLS &&
+ tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE &&
+ tdls_data[3] == WLAN_CATEGORY_TDLS)
+ return tdls_data[4];
+ }
+
+ return -1;
+}
+
/* convert time units */
#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
diff --git a/include/net/ieee802154.h b/include/linux/ieee802154.h
index 0aa7122e8f15..6e82d888287c 100644
--- a/include/net/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
@@ -24,10 +20,27 @@
* Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
-#ifndef NET_IEEE802154_H
-#define NET_IEEE802154_H
+#ifndef LINUX_IEEE802154_H
+#define LINUX_IEEE802154_H
+
+#include <linux/types.h>
+#include <linux/random.h>
+#include <asm/byteorder.h>
#define IEEE802154_MTU 127
+#define IEEE802154_MIN_PSDU_LEN 5
+
+#define IEEE802154_PAN_ID_BROADCAST 0xffff
+#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
+#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe
+
+#define IEEE802154_EXTENDED_ADDR_LEN 8
+
+#define IEEE802154_LIFS_PERIOD 40
+#define IEEE802154_SIFS_PERIOD 12
+
+#define IEEE802154_MAX_CHANNEL 26
+#define IEEE802154_MAX_PAGE 31
#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
@@ -189,7 +202,41 @@ enum {
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
};
+/**
+ * ieee802154_is_valid_psdu_len - check if psdu len is valid
+ * @len: psdu len with (MHR + payload + MFR)
+ */
+static inline bool ieee802154_is_valid_psdu_len(const u8 len)
+{
+ return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
+}
+
+/**
+ * ieee802154_is_valid_psdu_len - check if extended addr is valid
+ * @addr: extended addr to check
+ */
+static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
+{
+ /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
+ * is used internally as extended to short address broadcast mapping.
+ * This is currently a workaround because neighbor discovery can't
+ * deal with short addresses types right now.
+ */
+ return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
+ (addr != cpu_to_le64(0xffffffffffffffffULL)));
+}
-#endif
+/**
+ * ieee802154_random_extended_addr - generates a random extended address
+ * @addr: extended addr pointer to place the random address
+ */
+static inline void ieee802154_random_extended_addr(__le64 *addr)
+{
+ get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
+ /* toggle some bit if we hit an invalid extended addr */
+ if (!ieee802154_is_valid_extended_addr(*addr))
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
+}
+#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 808dcb8cc04f..0a8ce762a47f 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <uapi/linux/if_bridge.h>
+#include <linux/bitops.h>
struct br_ip {
union {
@@ -32,11 +33,41 @@ struct br_ip_list {
struct br_ip addr;
};
+#define BR_HAIRPIN_MODE BIT(0)
+#define BR_BPDU_GUARD BIT(1)
+#define BR_ROOT_BLOCK BIT(2)
+#define BR_MULTICAST_FAST_LEAVE BIT(3)
+#define BR_ADMIN_COST BIT(4)
+#define BR_LEARNING BIT(5)
+#define BR_FLOOD BIT(6)
+#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
+#define BR_PROMISC BIT(7)
+#define BR_PROXYARP BIT(8)
+#define BR_LEARNING_SYNC BIT(9)
+
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
typedef int br_should_route_hook_t(struct sk_buff *skb);
extern br_should_route_hook_t __rcu *br_should_route_hook;
+#if IS_ENABLED(CONFIG_BRIDGE)
+int br_fdb_external_learn_add(struct net_device *dev,
+ const unsigned char *addr, u16 vid);
+int br_fdb_external_learn_del(struct net_device *dev,
+ const unsigned char *addr, u16 vid);
+#else
+static inline int br_fdb_external_learn_add(struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ return 0;
+}
+static inline int br_fdb_external_learn_del(struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ return 0;
+}
+#endif
+
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index d69f0577a319..515a35e2a48a 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
}
/**
- * vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
- *
- * Following the skb_unshare() example, in case of error, the calling function
- * doesn't have to worry about freeing the original skb.
+ * Returns error if skb_cow_head failes.
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline int __vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
- if (skb_cow_head(skb, VLAN_HLEN) < 0) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
+ if (skb_cow_head(skb, VLAN_HLEN) < 0)
+ return -ENOMEM;
+
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
@@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
+ return 0;
+}
+
+/**
+ * vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ int err;
+
+ err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
return skb;
}
/**
- * __vlan_put_tag - regular VLAN tag inserting
+ * vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
@@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
-static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
+ __be16 vlan_proto,
+ u16 vlan_tci)
{
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (skb)
@@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
return skb;
}
-/**
- * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
+/*
+ * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
- * @vlan_proto: VLAN encapsulation protocol
- * @vlan_tci: VLAN TCI to insert
*
- * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
+ * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
*/
-static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
- __be16 vlan_proto,
- u16 vlan_tci)
+static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
{
- skb->vlan_proto = vlan_proto;
- skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+ skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
+ vlan_tx_tag_get(skb));
+ if (likely(skb))
+ skb->vlan_tci = 0;
+ return skb;
+}
+/*
+ * vlan_hwaccel_push_inside - pushes vlan tag to the payload
+ * @skb: skbuff to tag
+ *
+ * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
+ * VLAN tag from @skb->vlan_tci inside to the payload.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
+{
+ if (vlan_tx_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
/**
- * vlan_put_tag - inserts VLAN tag according to device features
+ * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
- * Assumes skb->dev is the target that will xmit this frame.
- * Returns a VLAN tagged skb.
+ * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/
-static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
{
- if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) {
- return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
- } else {
- return __vlan_put_tag(skb, vlan_proto, vlan_tci);
- }
+ skb->vlan_proto = vlan_proto;
+ skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
}
/**
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index d8257ab60bac..2c476acb87d9 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -164,7 +164,7 @@ struct st_sensor_transfer_function {
};
/**
- * struct st_sensors - ST sensors list
+ * struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
* @sensors_supported: List of supported sensors by struct itself.
* @ch: IIO channels for the sensor.
@@ -177,7 +177,7 @@ struct st_sensor_transfer_function {
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
*/
-struct st_sensors {
+struct st_sensor_settings {
u8 wai;
char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
struct iio_chan_spec *ch;
@@ -196,7 +196,7 @@ struct st_sensors {
* struct st_sensor_data - ST sensor device status
* @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
- * @sensor: Pointer to the current sensor struct in use.
+ * @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
* @vdd: Pointer to sensor's Vdd power supply
* @vdd_io: Pointer to sensor's Vdd-IO power supply
@@ -213,7 +213,7 @@ struct st_sensors {
struct st_sensor_data {
struct device *dev;
struct iio_trigger *trig;
- struct st_sensors *sensor;
+ struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
struct regulator *vdd;
struct regulator *vdd_io;
@@ -279,7 +279,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *ch, int *val);
int st_sensors_check_device_support(struct iio_dev *indio_dev,
- int num_sensors_list, const struct st_sensors *sensors);
+ int num_sensors_list, const struct st_sensor_settings *sensor_settings);
ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct device_attribute *attr, char *buf);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 15dc6bc2bdd2..3642ce7ef512 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/iio/types.h>
+#include <linux/of.h>
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
@@ -326,6 +327,11 @@ struct iio_dev;
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
+ * @of_xlate: function pointer to obtain channel specifier index.
+ * When #iio-cells is greater than '0', the driver could
+ * provide a custom of_xlate function that reads the
+ * *args* and returns the appropriate index in registered
+ * IIO channels array.
**/
struct iio_info {
struct module *driver_module;
@@ -385,6 +391,8 @@ struct iio_info {
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
+ int (*of_xlate)(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec);
};
/**
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 83222cebd47b..c2d6082a1a4c 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -24,6 +24,7 @@ enum integrity_status {
#ifdef CONFIG_INTEGRITY
extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode);
extern void integrity_inode_free(struct inode *inode);
+extern void __init integrity_load_keys(void);
#else
static inline struct integrity_iint_cache *
@@ -36,5 +37,10 @@ static inline void integrity_inode_free(struct inode *inode)
{
return;
}
+
+static inline void integrity_load_keys(void)
+{
+}
#endif /* CONFIG_INTEGRITY */
+
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 69517a24bc50..d9b05b5bf8c7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -556,12 +556,6 @@ static inline void tasklet_enable(struct tasklet_struct *t)
atomic_dec(&t->count);
}
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
- smp_mb__before_atomic();
- atomic_dec(&t->count);
-}
-
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
diff --git a/include/linux/io.h b/include/linux/io.h
index d5fc9b8d8b03..fa02e55e5a2e 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -61,9 +61,9 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
- unsigned long size);
+ resource_size_t size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
- unsigned long size);
+ resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index b29a5982e1c3..38daa453f2e5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <trace/events/iommu.h>
@@ -28,7 +29,7 @@
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
-#define IOMMU_EXEC (1 << 3)
+#define IOMMU_NOEXEC (1 << 3)
struct iommu_ops;
struct iommu_group;
@@ -62,6 +63,7 @@ enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
+ IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
};
/*
@@ -105,7 +107,9 @@ enum iommu_attr {
* @remove_device: remove device from iommu grouping
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
+ * @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes
+ * @priv: per-instance data private to the iommu driver
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -137,7 +141,12 @@ struct iommu_ops {
/* Get the numer of window per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
+#ifdef CONFIG_OF_IOMMU
+ int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+#endif
+
unsigned long pgsize_bitmap;
+ void *priv;
};
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 35e7eca4e33b..1eee6bcfcf76 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -6,15 +6,7 @@
#include <linux/rwsem.h>
#include <linux/notifier.h>
#include <linux/nsproxy.h>
-
-/*
- * ipc namespace events
- */
-#define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */
-#define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */
-#define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */
-
-#define IPCNS_CALLBACK_PRI 0
+#include <linux/ns_common.h>
struct user_namespace;
@@ -38,7 +30,6 @@ struct ipc_namespace {
unsigned int msg_ctlmni;
atomic_t msg_bytes;
atomic_t msg_hdrs;
- int auto_msgmni;
size_t shm_ctlmax;
size_t shm_ctlall;
@@ -68,7 +59,7 @@ struct ipc_namespace {
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct ipc_namespace init_ipc_ns;
@@ -77,18 +68,8 @@ extern atomic_t nr_ipc_ns;
extern spinlock_t mq_lock;
#ifdef CONFIG_SYSVIPC
-extern int register_ipcns_notifier(struct ipc_namespace *);
-extern int cond_register_ipcns_notifier(struct ipc_namespace *);
-extern void unregister_ipcns_notifier(struct ipc_namespace *);
-extern int ipcns_notify(unsigned long);
extern void shm_destroy_orphaned(struct ipc_namespace *ns);
#else /* CONFIG_SYSVIPC */
-static inline int register_ipcns_notifier(struct ipc_namespace *ns)
-{ return 0; }
-static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns)
-{ return 0; }
-static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { }
-static inline int ipcns_notify(unsigned long l) { return 0; }
static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
#endif /* CONFIG_SYSVIPC */
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 76d2acbfa7c6..838dbfa3c331 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -37,6 +37,7 @@
#include <linux/list.h>
#include <linux/proc_fs.h>
+#include <linux/acpi.h> /* For acpi_handle */
struct module;
struct device;
@@ -278,15 +279,18 @@ enum ipmi_addr_src {
SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
SI_PCI, SI_DEVICETREE, SI_DEFAULT
};
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
union ipmi_smi_info_union {
+#ifdef CONFIG_ACPI
/*
* the acpi_info element is defined for the SI_ACPI
* address type
*/
struct {
- void *acpi_handle;
+ acpi_handle acpi_handle;
} acpi_info;
+#endif
};
struct ipmi_smi_info {
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index bd349240d50e..0b1e569f5ff5 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -98,12 +98,11 @@ struct ipmi_smi_handlers {
operation is not allowed to fail. If an error occurs, it
should report back the error in a received message. It may
do this in the current call context, since no write locks
- are held when this is run. If the priority is > 0, the
- message will go into a high-priority queue and be sent
- first. Otherwise, it goes into a normal-priority queue. */
+ are held when this is run. Message are delivered one at
+ a time by the message handler, a new message will not be
+ delivered until the previous message is returned. */
void (*sender)(void *send_info,
- struct ipmi_smi_msg *msg,
- int priority);
+ struct ipmi_smi_msg *msg);
/* Called by the upper layer to request that we try to get
events from the BMC we are attached to. */
@@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *dev,
- const char *sysfs_name,
unsigned char slave_addr);
/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ff560537dd61..c694e7baa621 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -42,6 +42,7 @@ struct ipv6_devconf {
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
__s32 optimistic_dad;
+ __s32 use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
__s32 mc_forwarding;
@@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
-
-#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
- ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
- net_eq(sock_net(__sk), (__net)))
-
#endif /* _IPV6_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 03a4ea37ba86..1e8b0cf30792 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -49,6 +49,10 @@
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_LPIS (1U << 17)
+
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -76,9 +80,27 @@
#define GICR_MOVALLR 0x0110
#define GICR_PIDR2 GICD_PIDR2
+#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+
+#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
+
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
+#define GICR_PROPBASER_NonShareable (0U << 10)
+#define GICR_PROPBASER_InnerShareable (1U << 10)
+#define GICR_PROPBASER_OuterShareable (2U << 10)
+#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PROPBASER_nCnB (0U << 7)
+#define GICR_PROPBASER_nC (1U << 7)
+#define GICR_PROPBASER_RaWt (2U << 7)
+#define GICR_PROPBASER_RaWb (3U << 7)
+#define GICR_PROPBASER_WaWt (4U << 7)
+#define GICR_PROPBASER_WaWb (5U << 7)
+#define GICR_PROPBASER_RaWaWt (6U << 7)
+#define GICR_PROPBASER_RaWaWb (7U << 7)
+#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+
/*
* Re-Distributor registers, offsets from SGI_base
*/
@@ -91,9 +113,93 @@
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_LAST (1U << 4)
+#define LPI_PROP_GROUP1 (1 << 1)
+#define LPI_PROP_ENABLED (1 << 0)
+
+/*
+ * ITS registers, offsets from ITS_base
+ */
+#define GITS_CTLR 0x0000
+#define GITS_IIDR 0x0004
+#define GITS_TYPER 0x0008
+#define GITS_CBASER 0x0080
+#define GITS_CWRITER 0x0088
+#define GITS_CREADR 0x0090
+#define GITS_BASER 0x0100
+#define GITS_PIDR2 GICR_PIDR2
+
+#define GITS_TRANSLATER 0x10040
+
+#define GITS_TYPER_PTA (1UL << 19)
+
+#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_nCnB (0UL << 59)
+#define GITS_CBASER_nC (1UL << 59)
+#define GITS_CBASER_RaWt (2UL << 59)
+#define GITS_CBASER_RaWb (3UL << 59)
+#define GITS_CBASER_WaWt (4UL << 59)
+#define GITS_CBASER_WaWb (5UL << 59)
+#define GITS_CBASER_RaWaWt (6UL << 59)
+#define GITS_CBASER_RaWaWb (7UL << 59)
+#define GITS_CBASER_NonShareable (0UL << 10)
+#define GITS_CBASER_InnerShareable (1UL << 10)
+#define GITS_CBASER_OuterShareable (2UL << 10)
+#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
+
+#define GITS_BASER_NR_REGS 8
+
+#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_nCnB (0UL << 59)
+#define GITS_BASER_nC (1UL << 59)
+#define GITS_BASER_RaWt (2UL << 59)
+#define GITS_BASER_RaWb (3UL << 59)
+#define GITS_BASER_WaWt (4UL << 59)
+#define GITS_BASER_WaWb (5UL << 59)
+#define GITS_BASER_RaWaWt (6UL << 59)
+#define GITS_BASER_RaWaWb (7UL << 59)
+#define GITS_BASER_TYPE_SHIFT (56)
+#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
+#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_NonShareable (0UL << 10)
+#define GITS_BASER_InnerShareable (1UL << 10)
+#define GITS_BASER_OuterShareable (2UL << 10)
+#define GITS_BASER_SHAREABILITY_SHIFT (10)
+#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
+#define GITS_BASER_PAGE_SIZE_SHIFT (8)
+#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+
+#define GITS_BASER_TYPE_NONE 0
+#define GITS_BASER_TYPE_DEVICE 1
+#define GITS_BASER_TYPE_VCPU 2
+#define GITS_BASER_TYPE_CPU 3
+#define GITS_BASER_TYPE_COLLECTION 4
+#define GITS_BASER_TYPE_RESERVED5 5
+#define GITS_BASER_TYPE_RESERVED6 6
+#define GITS_BASER_TYPE_RESERVED7 7
+
+/*
+ * ITS commands
+ */
+#define GITS_CMD_MAPD 0x08
+#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_MAPVI 0x0a
+#define GITS_CMD_MOVI 0x01
+#define GITS_CMD_DISCARD 0x0f
+#define GITS_CMD_INV 0x0c
+#define GITS_CMD_MOVALL 0x0e
+#define GITS_CMD_INVALL 0x0d
+#define GITS_CMD_INT 0x03
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_SYNC 0x05
+
/*
* CPU interface registers
*/
@@ -189,12 +295,34 @@
#include <linux/stringify.h>
+/*
+ * We need a value to serve as a irq-type for LPIs. Choose one that will
+ * hopefully pique the interest of the reviewer.
+ */
+#define GIC_IRQ_TYPE_LPI 0xa110c8ed
+
+struct rdists {
+ struct {
+ void __iomem *rd_base;
+ struct page *pend_page;
+ phys_addr_t phys_base;
+ } __percpu *rdist;
+ struct page *prop_page;
+ int id_bits;
+ u64 flags;
+};
+
static inline void gic_write_eoir(u64 irq)
{
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
isb();
}
+struct irq_domain;
+int its_cpu_init(void);
+int its_init(struct device_node *node, struct rdists *rdists,
+ struct irq_domain *domain);
+
#endif
#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 13eed92c7d24..71d706d5f169 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -91,6 +91,8 @@
#ifndef __ASSEMBLY__
+#include <linux/irqdomain.h>
+
struct device_node;
extern struct irq_chip gic_arch_extn;
@@ -106,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start,
gic_init_bases(nr, start, dist, cpu, 0, NULL);
}
+int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
+
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/arch/mips/include/asm/gic.h b/include/linux/irqchip/mips-gic.h
index d7699cf7e135..420f77b34d02 100644
--- a/arch/mips/include/asm/gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -4,57 +4,26 @@
* for more details.
*
* Copyright (C) 2000, 07 MIPS Technologies, Inc.
- *
- * GIC Register Definitions
- *
*/
-#ifndef _ASM_GICREGS_H
-#define _ASM_GICREGS_H
-
-#include <linux/bitmap.h>
-#include <linux/threads.h>
+#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
+#define __LINUX_IRQCHIP_MIPS_GIC_H
-#include <irq.h>
+#include <linux/clocksource.h>
-#undef GICISBYTELITTLEENDIAN
+#define GIC_MAX_INTRS 256
/* Constants */
#define GIC_POL_POS 1
#define GIC_POL_NEG 0
#define GIC_TRIG_EDGE 1
#define GIC_TRIG_LEVEL 0
+#define GIC_TRIG_DUAL_ENABLE 1
+#define GIC_TRIG_DUAL_DISABLE 0
#define MSK(n) ((1 << (n)) - 1)
-#define REG32(addr) (*(volatile unsigned int *) (addr))
-#define REG(base, offs) REG32((unsigned long)(base) + offs##_##OFS)
-#define REGP(base, phys) REG32((unsigned long)(base) + (phys))
/* Accessors */
-#define GIC_REG(segment, offset) \
- REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
-#define GIC_REG_ADDR(segment, offset) \
- REG32(_gic_base + segment##_##SECTION_OFS + offset)
-
-#define GIC_ABS_REG(segment, offset) \
- (_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
-#define GIC_REG_ABS_ADDR(segment, offset) \
- (_gic_base + segment##_##SECTION_OFS + offset)
-
-#ifdef GICISBYTELITTLEENDIAN
-#define GICREAD(reg, data) ((data) = (reg), (data) = le32_to_cpu(data))
-#define GICWRITE(reg, data) ((reg) = cpu_to_le32(data))
-#else
-#define GICREAD(reg, data) ((data) = (reg))
-#define GICWRITE(reg, data) ((reg) = (data))
-#endif
-#define GICBIS(reg, mask, bits) \
- do { u32 data; \
- GICREAD(reg, data); \
- data &= ~(mask); \
- data |= ((bits) & (mask)); \
- GICWRITE((reg), data); \
- } while (0)
-
+#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
/* GIC Address Space */
#define SHARED_SECTION_OFS 0x0000
@@ -75,120 +44,42 @@
#define GIC_SH_COUNTER_63_32_OFS 0x0014
#define GIC_SH_REVISIONID_OFS 0x0020
-/* Interrupt Polarity */
-#define GIC_SH_POL_31_0_OFS 0x0100
-#define GIC_SH_POL_63_32_OFS 0x0104
-#define GIC_SH_POL_95_64_OFS 0x0108
-#define GIC_SH_POL_127_96_OFS 0x010c
-#define GIC_SH_POL_159_128_OFS 0x0110
-#define GIC_SH_POL_191_160_OFS 0x0114
-#define GIC_SH_POL_223_192_OFS 0x0118
-#define GIC_SH_POL_255_224_OFS 0x011c
-
-/* Edge/Level Triggering */
-#define GIC_SH_TRIG_31_0_OFS 0x0180
-#define GIC_SH_TRIG_63_32_OFS 0x0184
-#define GIC_SH_TRIG_95_64_OFS 0x0188
-#define GIC_SH_TRIG_127_96_OFS 0x018c
-#define GIC_SH_TRIG_159_128_OFS 0x0190
-#define GIC_SH_TRIG_191_160_OFS 0x0194
-#define GIC_SH_TRIG_223_192_OFS 0x0198
-#define GIC_SH_TRIG_255_224_OFS 0x019c
-
-/* Dual Edge Triggering */
-#define GIC_SH_DUAL_31_0_OFS 0x0200
-#define GIC_SH_DUAL_63_32_OFS 0x0204
-#define GIC_SH_DUAL_95_64_OFS 0x0208
-#define GIC_SH_DUAL_127_96_OFS 0x020c
-#define GIC_SH_DUAL_159_128_OFS 0x0210
-#define GIC_SH_DUAL_191_160_OFS 0x0214
-#define GIC_SH_DUAL_223_192_OFS 0x0218
-#define GIC_SH_DUAL_255_224_OFS 0x021c
+/* Convert an interrupt number to a byte offset/bit for multi-word registers */
+#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
+#define GIC_INTR_BIT(intr) ((intr) % 32)
+
+/* Polarity : Reset Value is always 0 */
+#define GIC_SH_SET_POLARITY_OFS 0x0100
+
+/* Triggering : Reset Value is always 0 */
+#define GIC_SH_SET_TRIGGER_OFS 0x0180
+
+/* Dual edge triggering : Reset Value is always 0 */
+#define GIC_SH_SET_DUAL_OFS 0x0200
/* Set/Clear corresponding bit in Edge Detect Register */
#define GIC_SH_WEDGE_OFS 0x0280
-/* Reset Mask - Disables Interrupt */
-#define GIC_SH_RMASK_31_0_OFS 0x0300
-#define GIC_SH_RMASK_63_32_OFS 0x0304
-#define GIC_SH_RMASK_95_64_OFS 0x0308
-#define GIC_SH_RMASK_127_96_OFS 0x030c
-#define GIC_SH_RMASK_159_128_OFS 0x0310
-#define GIC_SH_RMASK_191_160_OFS 0x0314
-#define GIC_SH_RMASK_223_192_OFS 0x0318
-#define GIC_SH_RMASK_255_224_OFS 0x031c
-
-/* Set Mask (WO) - Enables Interrupt */
-#define GIC_SH_SMASK_31_0_OFS 0x0380
-#define GIC_SH_SMASK_63_32_OFS 0x0384
-#define GIC_SH_SMASK_95_64_OFS 0x0388
-#define GIC_SH_SMASK_127_96_OFS 0x038c
-#define GIC_SH_SMASK_159_128_OFS 0x0390
-#define GIC_SH_SMASK_191_160_OFS 0x0394
-#define GIC_SH_SMASK_223_192_OFS 0x0398
-#define GIC_SH_SMASK_255_224_OFS 0x039c
+/* Mask manipulation */
+#define GIC_SH_RMASK_OFS 0x0300
+#define GIC_SH_SMASK_OFS 0x0380
/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
-#define GIC_SH_MASK_31_0_OFS 0x0400
-#define GIC_SH_MASK_63_32_OFS 0x0404
-#define GIC_SH_MASK_95_64_OFS 0x0408
-#define GIC_SH_MASK_127_96_OFS 0x040c
-#define GIC_SH_MASK_159_128_OFS 0x0410
-#define GIC_SH_MASK_191_160_OFS 0x0414
-#define GIC_SH_MASK_223_192_OFS 0x0418
-#define GIC_SH_MASK_255_224_OFS 0x041c
+#define GIC_SH_MASK_OFS 0x0400
/* Pending Global Interrupts (RO) */
-#define GIC_SH_PEND_31_0_OFS 0x0480
-#define GIC_SH_PEND_63_32_OFS 0x0484
-#define GIC_SH_PEND_95_64_OFS 0x0488
-#define GIC_SH_PEND_127_96_OFS 0x048c
-#define GIC_SH_PEND_159_128_OFS 0x0490
-#define GIC_SH_PEND_191_160_OFS 0x0494
-#define GIC_SH_PEND_223_192_OFS 0x0498
-#define GIC_SH_PEND_255_224_OFS 0x049c
-
-#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
+#define GIC_SH_PEND_OFS 0x0480
/* Maps Interrupt X to a Pin */
-#define GIC_SH_MAP_TO_PIN(intr) \
- (GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
-
-#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
+#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
/* Maps Interrupt X to a VPE */
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
- (GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + (((vpe) / 32) * 4))
+ ((32 * (intr)) + (((vpe) / 32) * 4))
#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
-/* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr) (((intr) / 32)*4)
-#define GIC_INTR_BIT(intr) ((intr) % 32)
-
-/* Polarity : Reset Value is always 0 */
-#define GIC_SH_SET_POLARITY_OFS 0x0100
-#define GIC_SET_POLARITY(intr, pol) \
- GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + \
- GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \
- (pol) << GIC_INTR_BIT(intr))
-
-/* Triggering : Reset Value is always 0 */
-#define GIC_SH_SET_TRIGGER_OFS 0x0180
-#define GIC_SET_TRIGGER(intr, trig) \
- GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + \
- GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \
- (trig) << GIC_INTR_BIT(intr))
-
-/* Mask manipulation */
-#define GIC_SH_SMASK_OFS 0x0380
-#define GIC_SET_INTR_MASK(intr) \
- GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + \
- GIC_INTR_OFS(intr)), 1 << GIC_INTR_BIT(intr))
-#define GIC_SH_RMASK_OFS 0x0300
-#define GIC_CLR_INTR_MASK(intr) \
- GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + \
- GIC_INTR_OFS(intr)), 1 << GIC_INTR_BIT(intr))
-
/* Register Map for Local Section */
#define GIC_VPE_CTL_OFS 0x0000
#define GIC_VPE_PEND_OFS 0x0004
@@ -198,6 +89,7 @@
#define GIC_VPE_WD_MAP_OFS 0x0040
#define GIC_VPE_COMPARE_MAP_OFS 0x0044
#define GIC_VPE_TIMER_MAP_OFS 0x0048
+#define GIC_VPE_FDC_MAP_OFS 0x004c
#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
#define GIC_VPE_SWINT0_MAP_OFS 0x0054
#define GIC_VPE_SWINT1_MAP_OFS 0x0058
@@ -208,13 +100,11 @@
#define GIC_VPE_COMPARE_LO_OFS 0x00a0
#define GIC_VPE_COMPARE_HI_OFS 0x00a4
-#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100
-#define GIC_VPE_EIC_SS(intr) \
- (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
+#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
+#define GIC_VPE_EIC_SS(intr) (4 * (intr))
-#define GIC_VPE_EIC_VEC_BASE 0x0800
-#define GIC_VPE_EIC_VEC(intr) \
- (GIC_VPE_EIC_VEC_BASE + (4 * intr))
+#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
+#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
#define GIC_VPE_TENABLE_NMI_OFS 0x1000
#define GIC_VPE_TENABLE_YQ_OFS 0x1004
@@ -238,8 +128,8 @@
#define GIC_SH_CONFIG_NUMVPES_SHF 0
#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
-#define GIC_SH_WEDGE_SET(intr) (intr | (0x1 << 31))
-#define GIC_SH_WEDGE_CLR(intr) (intr & ~(0x1 << 31))
+#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
+#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
#define GIC_MAP_TO_PIN_SHF 31
#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
@@ -251,6 +141,10 @@
#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
/* GIC_VPE_CTL Masks */
+#define GIC_VPE_CTL_FDC_RTBL_SHF 4
+#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
+#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
+#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
@@ -300,38 +194,6 @@
#define GIC_VPE_SMASK_SWINT1_SHF 5
#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
-/*
- * Set the Mapping of Interrupt X to a VPE.
- */
-#define GIC_SH_MAP_TO_VPE_SMASK(intr, vpe) \
- GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe)), \
- GIC_SH_MAP_TO_VPE_REG_BIT(vpe))
-
-/*
- * Interrupt Meta-data specification. The ipiflag helps
- * in building ipi_map.
- */
-struct gic_intr_map {
- unsigned int cpunum; /* Directed to this CPU */
-#define GIC_UNUSED 0xdead /* Dummy data */
- unsigned int pin; /* Directed to this Pin */
- unsigned int polarity; /* Polarity : +/- */
- unsigned int trigtype; /* Trigger : Edge/Levl */
- unsigned int flags; /* Misc flags */
-#define GIC_FLAG_TRANSPARENT 0x01
-};
-
-/*
- * This is only used in EIC mode. This helps to figure out which
- * shared interrupts we need to process when we get a vector interrupt.
- */
-#define GIC_MAX_SHARED_INTR 0x5
-struct gic_shared_intr_map {
- unsigned int num_shared_intr;
- unsigned int intr_list[GIC_MAX_SHARED_INTR];
- unsigned int local_intr_mask;
-};
-
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
#define GIC_CPU_INT1 1 /* . */
@@ -340,45 +202,48 @@ struct gic_shared_intr_map {
#define GIC_CPU_INT4 4 /* . */
#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
-/* Local GIC interrupts. */
-#define GIC_INT_TMR (GIC_CPU_INT5)
-#define GIC_INT_PERFCTR (GIC_CPU_INT5)
+/* Add 2 to convert GIC CPU pin to core interrupt */
+#define GIC_CPU_PIN_OFFSET 2
/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
-#define GIC_CPU_TO_VEC_OFFSET (2)
+#define GIC_CPU_TO_VEC_OFFSET 2
/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
-#define GIC_PIN_TO_VEC_OFFSET (1)
+#define GIC_PIN_TO_VEC_OFFSET 1
-#include <linux/clocksource.h>
-#include <linux/irq.h>
+/* Local GIC interrupts. */
+#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
+#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
+#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
+#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
+#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
+#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
+#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
+#define GIC_NUM_LOCAL_INTRS 7
+
+/* Convert between local/shared IRQ number and GIC HW IRQ number. */
+#define GIC_LOCAL_HWIRQ_BASE 0
+#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
+#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
+#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
extern unsigned int gic_present;
-extern unsigned int gic_frequency;
-extern unsigned long _gic_base;
-extern unsigned int gic_irq_base;
-extern unsigned int gic_irq_flags[];
-extern struct gic_shared_intr_map gic_shared_intr_map[];
extern void gic_init(unsigned long gic_base_addr,
- unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
- unsigned int intrmap_size, unsigned int irqbase);
+ unsigned long gic_addrspace_size, unsigned int cpu_vec,
+ unsigned int irqbase);
extern void gic_clocksource_init(unsigned int);
-extern unsigned int gic_compare_int (void);
extern cycle_t gic_read_count(void);
+extern unsigned int gic_get_count_width(void);
extern cycle_t gic_read_compare(void);
extern void gic_write_compare(cycle_t cnt);
extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
extern void gic_send_ipi(unsigned int intr);
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
-extern void gic_bind_eic_interrupt(int irq, int set);
extern unsigned int gic_get_timer_pending(void);
-extern void gic_get_int_mask(unsigned long *dst, const unsigned long *src);
-extern unsigned int gic_get_int(void);
-extern void gic_enable_interrupt(int irq_vec);
-extern void gic_disable_interrupt(int irq_vec);
-extern void gic_irq_ack(struct irq_data *d);
-extern void gic_finish_irq(struct irq_data *d);
-extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
-#endif /* _ASM_GICREGS_H */
+extern int gic_get_c0_compare_int(void);
+extern int gic_get_c0_perfcount_int(void);
+#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 233ea8107038..5449d2f4a1ef 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -416,9 +416,6 @@ extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
-struct pid;
-extern struct pid *session_of_pgrp(struct pid *pgrp);
-
unsigned long int_sqrt(unsigned long);
extern void bust_spinlocks(int yes);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 30faf797c2c3..d4e01b358341 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -179,6 +179,7 @@ struct kernfs_open_file {
struct mutex mutex;
int event;
struct list_head list;
+ char *prealloc_buf;
size_t atomic_write_len;
bool mmapped;
@@ -214,6 +215,13 @@ struct kernfs_ops {
* larger ones are rejected with -E2BIG.
*/
size_t atomic_write_len;
+ /*
+ * "prealloc" causes a buffer to be allocated at open for
+ * all read/write requests. As ->seq_show uses seq_read()
+ * which does its own allocation, it is incompatible with
+ * ->prealloc. Provide ->read and ->write with ->prealloc.
+ */
+ bool prealloc;
ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
loff_t off);
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 057e95971014..e705467ddb47 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -21,6 +21,8 @@
#ifndef __KMEMLEAK_H
#define __KMEMLEAK_H
+#include <linux/slab.h>
+
#ifdef CONFIG_DEBUG_KMEMLEAK
extern void kmemleak_init(void) __ref;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index f7296e57d614..5297f9fa0ef2 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
#endif
+int arch_check_ftrace_location(struct kprobe *p);
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a6059bdf7b03..26f106022c88 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -43,6 +43,7 @@
* include/linux/kvm_h.
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_INCOHERENT (1UL << 17)
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
@@ -353,6 +354,8 @@ struct kvm_memslots {
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */
short id_to_index[KVM_MEM_SLOTS_NUM];
+ atomic_t lru_slot;
+ int used_slots;
};
struct kvm {
@@ -395,7 +398,6 @@ struct kvm {
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
- struct hlist_head mask_notifier_list;
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
struct hlist_head irq_ack_notifier_list;
@@ -447,6 +449,14 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
int __must_check vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
+#ifdef __KVM_HAVE_IOAPIC
+void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+#else
+static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
+{
+}
+#endif
+
#ifdef CONFIG_HAVE_KVM_IRQFD
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
@@ -711,44 +721,6 @@ struct kvm_irq_ack_notifier {
void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};
-struct kvm_assigned_dev_kernel {
- struct kvm_irq_ack_notifier ack_notifier;
- struct list_head list;
- int assigned_dev_id;
- int host_segnr;
- int host_busnr;
- int host_devfn;
- unsigned int entries_nr;
- int host_irq;
- bool host_irq_disabled;
- bool pci_2_3;
- struct msix_entry *host_msix_entries;
- int guest_irq;
- struct msix_entry *guest_msix_entries;
- unsigned long irq_requested_type;
- int irq_source_id;
- int flags;
- struct pci_dev *dev;
- struct kvm *kvm;
- spinlock_t intx_lock;
- spinlock_t intx_mask_lock;
- char irq_name[32];
- struct pci_saved_state *pci_saved_state;
-};
-
-struct kvm_irq_mask_notifier {
- void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
- int irq;
- struct hlist_node link;
-};
-
-void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn);
-void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn);
-void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
- bool mask);
-
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries, int gsi);
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
@@ -770,12 +742,6 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-int kvm_iommu_map_guest(struct kvm *kvm);
-int kvm_iommu_unmap_guest(struct kvm *kvm);
-int kvm_assign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev);
-int kvm_deassign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev);
#else
static inline int kvm_iommu_map_pages(struct kvm *kvm,
struct kvm_memory_slot *slot)
@@ -787,11 +753,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
}
-
-static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
-{
- return 0;
-}
#endif
static inline void kvm_guest_enter(void)
@@ -832,12 +793,28 @@ static inline void kvm_guest_exit(void)
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
- struct kvm_memory_slot *memslot;
+ int start = 0, end = slots->used_slots;
+ int slot = atomic_read(&slots->lru_slot);
+ struct kvm_memory_slot *memslots = slots->memslots;
+
+ if (gfn >= memslots[slot].base_gfn &&
+ gfn < memslots[slot].base_gfn + memslots[slot].npages)
+ return &memslots[slot];
- kvm_for_each_memslot(memslot, slots)
- if (gfn >= memslot->base_gfn &&
- gfn < memslot->base_gfn + memslot->npages)
- return memslot;
+ while (start < end) {
+ slot = start + (end - start) / 2;
+
+ if (gfn >= memslots[slot].base_gfn)
+ end = slot;
+ else
+ start = slot + 1;
+ }
+
+ if (gfn >= memslots[start].base_gfn &&
+ gfn < memslots[start].base_gfn + memslots[start].npages) {
+ atomic_set(&slots->lru_slot, start);
+ return &memslots[start];
+ }
return NULL;
}
@@ -1011,25 +988,6 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
#endif
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
-
-long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg);
-
-void kvm_free_all_assigned_devices(struct kvm *kvm);
-
-#else
-
-static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
-static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
-
-#endif
-
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
set_bit(req, &vcpu->requests);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index b606bb689a3e..931da7e917cf 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -54,33 +54,6 @@ typedef u64 hfn_t;
typedef hfn_t pfn_t;
-union kvm_ioapic_redirect_entry {
- u64 bits;
- struct {
- u8 vector;
- u8 delivery_mode:3;
- u8 dest_mode:1;
- u8 delivery_status:1;
- u8 polarity:1;
- u8 remote_irr:1;
- u8 trig_mode:1;
- u8 mask:1;
- u8 reserve:7;
- u8 reserved[4];
- u8 dest_id;
- } fields;
-};
-
-struct kvm_lapic_irq {
- u32 vector;
- u32 delivery_mode;
- u32 dest_mode;
- u32 level;
- u32 trig_mode;
- u32 shorthand;
- u32 dest_id;
-};
-
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 361101fef270..cfceef32c9b3 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,6 +13,7 @@
#define __LINUX_LEDS_H_INCLUDED
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
@@ -42,11 +43,20 @@ struct led_classdev {
#define LED_BLINK_ONESHOT (1 << 17)
#define LED_BLINK_ONESHOT_STOP (1 << 18)
#define LED_BLINK_INVERT (1 << 19)
+#define LED_SYSFS_DISABLE (1 << 20)
+#define SET_BRIGHTNESS_ASYNC (1 << 21)
+#define SET_BRIGHTNESS_SYNC (1 << 22)
/* Set LED brightness level */
/* Must not sleep, use a workqueue if needed */
void (*brightness_set)(struct led_classdev *led_cdev,
enum led_brightness brightness);
+ /*
+ * Set LED brightness level immediately - it can block the caller for
+ * the time required for accessing a LED device register.
+ */
+ int (*brightness_set_sync)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
/* Get LED brightness level */
enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
@@ -85,6 +95,9 @@ struct led_classdev {
/* true if activated - deactivate routine uses it to do cleanup */
bool activated;
#endif
+
+ /* Ensures consistent access to the LED Flash Class device */
+ struct mutex led_access;
};
extern int led_classdev_register(struct device *parent,
@@ -151,6 +164,33 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
*/
extern int led_update_brightness(struct led_classdev *led_cdev);
+/**
+ * led_sysfs_disable - disable LED sysfs interface
+ * @led_cdev: the LED to set
+ *
+ * Disable the led_cdev's sysfs interface.
+ */
+extern void led_sysfs_disable(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_enable - enable LED sysfs interface
+ * @led_cdev: the LED to set
+ *
+ * Enable the led_cdev's sysfs interface.
+ */
+extern void led_sysfs_enable(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_is_disabled - check if LED sysfs interface is disabled
+ * @led_cdev: the LED to query
+ *
+ * Returns: true if the led_cdev's sysfs interface is disabled.
+ */
+static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
+{
+ return led_cdev->flags & LED_SYSFS_DISABLE;
+}
+
/*
* LED Triggers
*/
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bfbc817c34ee..2d182413b1db 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -191,7 +191,8 @@ enum {
ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */
ATA_DEV_SEMB = 7, /* SEMB */
ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
- ATA_DEV_NONE = 9, /* no device */
+ ATA_DEV_ZAC = 9, /* ZAC device */
+ ATA_DEV_NONE = 10, /* no device */
/* struct ata_link flags */
ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
@@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
static inline unsigned int ata_class_enabled(unsigned int class)
{
return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
- class == ATA_DEV_PMP || class == ATA_DEV_SEMB;
+ class == ATA_DEV_PMP || class == ATA_DEV_SEMB ||
+ class == ATA_DEV_ZAC;
}
static inline unsigned int ata_class_disabled(unsigned int class)
diff --git a/include/linux/list.h b/include/linux/list.h
index f33f831eb3c8..feb773c76ee0 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -346,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
@@ -355,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_first_entry - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
@@ -366,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_last_entry - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
@@ -377,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note that if the list is empty, it returns NULL.
*/
@@ -387,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list,
/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
@@ -395,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list,
/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_prev_entry(pos, member) \
list_entry((pos)->member.prev, typeof(*(pos)), member)
@@ -441,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member); \
@@ -452,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member); \
@@ -463,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
* @pos: the type * to use as a start point
* @head: the head of the list
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
*/
@@ -474,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
@@ -488,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_continue_reverse - iterate backwards from the given point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Start to iterate over list of given type backwards, continuing after
* the current position.
@@ -502,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_from - iterate over list of given type from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing from current position.
*/
@@ -515,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member), \
@@ -528,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing after current point,
* safe against removal of list entry.
@@ -544,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate over list of given type from current point, safe against
* removal of list entry.
@@ -559,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Iterate backwards over list of given type, safe against removal
* of list entry.
@@ -574,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop
* @pos: the loop cursor used in the list_for_each_entry_safe loop
* @n: temporary storage used in list_for_each_entry_safe
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* list_safe_reset_next is not safe to use in general if the list may be
* modified concurrently (eg. the lock is dropped in the loop body). An
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 307d9cab2026..1726ccbd8009 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -25,6 +25,8 @@ struct mbox_chan;
* if the client receives some ACK packet for transmission.
* Unused if the controller already has TX_Done/RTR IRQ.
* @rx_callback: Atomic callback to provide client the data received
+ * @tx_prepare: Atomic callback to ask client to prepare the payload
+ * before initiating the transmission if required.
* @tx_done: Atomic callback to tell client of data transmission
*/
struct mbox_client {
@@ -34,6 +36,7 @@ struct mbox_client {
bool knows_txdone;
void (*rx_callback)(struct mbox_client *cl, void *mssg);
+ void (*tx_prepare)(struct mbox_client *cl, void *mssg);
void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
};
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 8e9a029e093d..e6982ac3200d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
#define MARVELL_PHY_ID_88E1318S 0x01410e90
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
+#define MARVELL_PHY_ID_88E3016 0x01410e60
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6ea9f919e888..7c95af8d552c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -400,8 +400,8 @@ int memcg_cache_id(struct mem_cgroup *memcg);
void memcg_update_array_size(int num_groups);
-struct kmem_cache *
-__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
+struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
+void __memcg_kmem_put_cache(struct kmem_cache *cachep);
int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
@@ -492,7 +492,13 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
if (unlikely(fatal_signal_pending(current)))
return cachep;
- return __memcg_kmem_get_cache(cachep, gfp);
+ return __memcg_kmem_get_cache(cachep);
+}
+
+static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_put_cache(cachep);
}
#else
#define for_each_memcg_cache_index(_idx) \
@@ -528,6 +534,10 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
return cachep;
}
+
+static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
+{
+}
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index f34723f7663c..910e3aa1e965 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -141,6 +141,7 @@ struct arizona {
uint16_t dac_comp_coeff;
uint8_t dac_comp_enabled;
+ struct mutex dac_comp_lock;
};
int arizona_clk32k_enable(struct arizona *arizona);
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index cb01496bfa49..8e1cdbef3dad 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -99,12 +99,6 @@ struct davinci_vcif {
dma_addr_t dma_rx_addr;
};
-struct cq93vc {
- struct platform_device *pdev;
- struct snd_soc_codec *codec;
- u32 sysclk;
-};
-
struct davinci_vc;
struct davinci_vc {
@@ -122,7 +116,6 @@ struct davinci_vc {
/* Client devices */
struct davinci_vcif davinci_vcif;
- struct cq93vc cq93vc;
};
#endif
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 53d33dee70e1..2e5b194b9b19 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -37,7 +37,6 @@
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_25MHZ_CLK 0x00000002
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 379c02648ab3..64d25941b329 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -67,6 +67,8 @@ enum {
MLX4_CMD_MAP_ICM_AUX = 0xffc,
MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
MLX4_CMD_SET_ICM_SIZE = 0xffd,
+ MLX4_CMD_ACCESS_REG = 0x3b,
+
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
MLX4_CMD_GET_OP_REQ = 0x59,
@@ -197,6 +199,33 @@ enum {
MLX4_CMD_NATIVE
};
+/*
+ * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP -
+ * Receive checksum value is reported in CQE also for non TCP/UDP packets.
+ *
+ * MLX4_RX_CSUM_MODE_L4 -
+ * L4_CSUM bit in CQE, which indicates whether or not L4 checksum
+ * was validated correctly, is supported.
+ *
+ * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP -
+ * IP_OK CQE's field is supported also for non TCP/UDP IP packets.
+ *
+ * MLX4_RX_CSUM_MODE_MULTI_VLAN -
+ * Receive Checksum offload is supported for packets with more than 2 vlan headers.
+ */
+enum mlx4_rx_csum_mode {
+ MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0,
+ MLX4_RX_CSUM_MODE_L4 = 1UL << 1,
+ MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2,
+ MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3
+};
+
+struct mlx4_config_dev_params {
+ u16 vxlan_udp_dport;
+ u8 rx_csum_flags_port_1;
+ u8 rx_csum_flags_port_2;
+};
+
struct mlx4_dev;
struct mlx4_cmd_mailbox {
@@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
+int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
+ struct mlx4_config_dev_params *params);
/*
* mlx4_get_slave_default_vlan -
* return true if VST ( default vlan)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 37e4404d0227..25c791e295fd 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -95,7 +95,7 @@ enum {
enum {
MLX4_MAX_NUM_PF = 16,
- MLX4_MAX_NUM_VF = 64,
+ MLX4_MAX_NUM_VF = 126,
MLX4_MAX_NUM_VF_P_PORT = 64,
MLX4_MFUNC_MAX = 80,
MLX4_MAX_EQ_NUM = 1024,
@@ -117,6 +117,14 @@ enum {
MLX4_STEERING_MODE_DEVICE_MANAGED
};
+enum {
+ MLX4_STEERING_DMFS_A0_DEFAULT,
+ MLX4_STEERING_DMFS_A0_DYNAMIC,
+ MLX4_STEERING_DMFS_A0_STATIC,
+ MLX4_STEERING_DMFS_A0_DISABLE,
+ MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
+};
+
static inline const char *mlx4_steering_mode_str(int steering_mode)
{
switch (steering_mode) {
@@ -186,7 +194,31 @@ enum {
MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
- MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13
+ MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
+ MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
+ MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
+ MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
+ MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
+ MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
+ MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
+};
+
+enum {
+ MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0,
+ MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
+};
+
+/* bit enums for an 8-bit flags field indicating special use
+ * QPs which require special handling in qp_reserve_range.
+ * Currently, this only includes QPs used by the ETH interface,
+ * where we expect to use blueflame. These QPs must not have
+ * bits 6 and 7 set in their qp number.
+ *
+ * This enum may use only bits 0..7.
+ */
+enum {
+ MLX4_RESERVE_A0_QP = 1 << 6,
+ MLX4_RESERVE_ETH_BF_QP = 1 << 7,
};
enum {
@@ -202,7 +234,8 @@ enum {
enum {
MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
- MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1
+ MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
+ MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
};
@@ -328,6 +361,8 @@ enum {
enum mlx4_qp_region {
MLX4_QP_REGION_FW = 0,
+ MLX4_QP_REGION_RSS_RAW_ETH,
+ MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH,
MLX4_QP_REGION_ETH_ADDR,
MLX4_QP_REGION_FC_ADDR,
MLX4_QP_REGION_FC_EXCH,
@@ -379,6 +414,13 @@ enum {
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
+enum mlx4_module_id {
+ MLX4_MODULE_ID_SFP = 0x3,
+ MLX4_MODULE_ID_QSFP = 0xC,
+ MLX4_MODULE_ID_QSFP_PLUS = 0xD,
+ MLX4_MODULE_ID_QSFP28 = 0x11,
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
@@ -433,6 +475,7 @@ struct mlx4_caps {
int num_cqs;
int max_cqes;
int reserved_cqs;
+ int num_sys_eqs;
int num_eqs;
int reserved_eqs;
int num_comp_vectors;
@@ -449,6 +492,7 @@ struct mlx4_caps {
int reserved_mcgs;
int num_qp_per_mgm;
int steering_mode;
+ int dmfs_high_steer_mode;
int fs_log_max_ucast_qp_range_size;
int num_pds;
int reserved_pds;
@@ -487,6 +531,10 @@ struct mlx4_caps {
u16 hca_core_clock;
u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode;
+ u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+ u8 alloc_res_qp_mask;
+ u32 dmfs_high_rate_qpn_base;
+ u32 dmfs_high_rate_qpn_range;
};
struct mlx4_buf_list {
@@ -607,6 +655,11 @@ struct mlx4_cq {
atomic_t refcount;
struct completion free;
+ struct {
+ struct list_head list;
+ void (*comp)(struct mlx4_cq *);
+ void *priv;
+ } tasklet_ctx;
};
struct mlx4_qp {
@@ -799,6 +852,26 @@ struct mlx4_init_port_param {
u64 si_guid;
};
+#define MAD_IFC_DATA_SZ 192
+/* MAD IFC Mailbox */
+struct mlx4_mad_ifc {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ __be16 class_specific;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ __be64 mkey;
+ __be16 dr_slid;
+ __be16 dr_dlid;
+ u8 reserved[28];
+ u8 data[MAD_IFC_DATA_SZ];
+} __packed;
+
#define mlx4_foreach_port(port, dev, type) \
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
if ((type) == (dev)->caps.port_mask[(port)])
@@ -835,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
{
return (qpn < dev->phys_caps.base_sqpn + 8 +
- 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev));
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) &&
+ qpn >= dev->phys_caps.base_sqpn) ||
+ (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]);
}
static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
@@ -911,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed, int timestamp_en);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
-
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base, u8 flags);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
@@ -1283,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
u64 iova, u64 size, int npages,
int page_shift, struct mlx4_mpt_entry *mpt_entry);
+int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+ u16 offset, u16 size, u8 *data);
+
/* Returns true if running in low memory profile (kdump kernel) */
static inline bool mlx4_low_memory_profile(void)
{
return is_kdump_kernel();
}
+/* ACCESS REG commands */
+enum mlx4_access_reg_method {
+ MLX4_ACCESS_REG_QUERY = 0x1,
+ MLX4_ACCESS_REG_WRITE = 0x2,
+};
+
+/* ACCESS PTYS Reg command */
+enum mlx4_ptys_proto {
+ MLX4_PTYS_IB = 1<<0,
+ MLX4_PTYS_EN = 1<<2,
+};
+
+struct mlx4_ptys_reg {
+ u8 resrvd1;
+ u8 local_port;
+ u8 resrvd2;
+ u8 proto_mask;
+ __be32 resrvd3[2];
+ __be32 eth_proto_cap;
+ __be16 ib_width_cap;
+ __be16 ib_speed_cap;
+ __be32 resrvd4;
+ __be32 eth_proto_admin;
+ __be16 ib_width_admin;
+ __be16 ib_speed_admin;
+ __be32 resrvd5;
+ __be32 eth_proto_oper;
+ __be16 ib_width_oper;
+ __be16 ib_speed_oper;
+ __be32 resrvd6;
+ __be32 eth_proto_lp_adv;
+} __packed;
+
+int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
+ enum mlx4_access_reg_method method,
+ struct mlx4_ptys_reg *ptys_reg);
+
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 5f4e36cf0091..467ccdf94c98 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -120,13 +120,15 @@ enum {
MLX4_RSS_QPC_FLAG_OFFSET = 13,
};
+#define MLX4_EN_RSS_KEY_SIZE 40
+
struct mlx4_rss_context {
__be32 base_qpn;
__be32 default_qpn;
u16 reserved;
u8 hash_fn;
u8 flags;
- __be32 rss_key[10];
+ __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)];
__be32 base_qpn_udp;
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 1d67fd32e71c..4e5bd813bb9a 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -120,6 +120,15 @@ enum {
};
enum {
+ MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
+};
+
+enum {
+ MLX5_PFAULT_SUBTYPE_WQE = 0,
+ MLX5_PFAULT_SUBTYPE_RDMA = 1,
+};
+
+enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -180,6 +189,19 @@ enum {
MLX5_MKEY_MASK_FREE = 1ull << 29,
};
+enum {
+ MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
+
+ MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
+ MLX5_UMR_CHECK_FREE = (2 << 5),
+
+ MLX5_UMR_INLINE = (1 << 7),
+};
+
+#define MLX5_UMR_MTT_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
+#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+
enum mlx5_event {
MLX5_EVENT_TYPE_COMP = 0x0,
@@ -206,6 +228,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_CMD = 0x0a,
MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
+
+ MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
};
enum {
@@ -219,11 +243,7 @@ enum {
};
enum {
- MLX5_DEV_CAP_FLAG_RC = 1LL << 0,
- MLX5_DEV_CAP_FLAG_UC = 1LL << 1,
- MLX5_DEV_CAP_FLAG_UD = 1LL << 2,
MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
- MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6,
MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
@@ -232,10 +252,7 @@ enum {
MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
- MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
- MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
- MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
};
@@ -298,6 +315,8 @@ enum {
enum {
HCA_CAP_OPMOD_GET_MAX = 0,
HCA_CAP_OPMOD_GET_CUR = 1,
+ HCA_CAP_OPMOD_GET_ODP_MAX = 4,
+ HCA_CAP_OPMOD_GET_ODP_CUR = 5
};
struct mlx5_inbox_hdr {
@@ -327,6 +346,23 @@ struct mlx5_cmd_query_adapter_mbox_out {
u8 vsd_psid[16];
};
+enum mlx5_odp_transport_cap_bits {
+ MLX5_ODP_SUPPORT_SEND = 1 << 31,
+ MLX5_ODP_SUPPORT_RECV = 1 << 30,
+ MLX5_ODP_SUPPORT_WRITE = 1 << 29,
+ MLX5_ODP_SUPPORT_READ = 1 << 28,
+};
+
+struct mlx5_odp_caps {
+ char reserved[0x10];
+ struct {
+ __be32 rc_odp_caps;
+ __be32 uc_odp_caps;
+ __be32 ud_odp_caps;
+ } per_transport_caps;
+ char reserved2[0xe4];
+};
+
struct mlx5_cmd_init_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];
@@ -447,6 +483,27 @@ struct mlx5_eqe_page_req {
__be32 rsvd1[5];
};
+struct mlx5_eqe_page_fault {
+ __be32 bytes_committed;
+ union {
+ struct {
+ u16 reserved1;
+ __be16 wqe_index;
+ u16 reserved2;
+ __be16 packet_length;
+ u8 reserved3[12];
+ } __packed wqe;
+ struct {
+ __be32 r_key;
+ u16 reserved1;
+ __be16 packet_length;
+ __be32 rdma_op_len;
+ __be64 rdma_va;
+ } __packed rdma;
+ } __packed;
+ __be32 flags_qpn;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -458,6 +515,7 @@ union ev_data {
struct mlx5_eqe_congestion cong;
struct mlx5_eqe_stall_vl stall_vl;
struct mlx5_eqe_page_req req_pages;
+ struct mlx5_eqe_page_fault page_fault;
} __packed;
struct mlx5_eqe {
@@ -784,6 +842,10 @@ struct mlx5_query_eq_mbox_out {
struct mlx5_eq_context ctx;
};
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
+};
+
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
@@ -820,7 +882,7 @@ struct mlx5_query_special_ctxs_mbox_out {
struct mlx5_create_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_mkey_index;
- u8 rsvd0[4];
+ __be32 flags;
struct mlx5_mkey_seg seg;
u8 rsvd1[16];
__be32 xlat_oct_act_size;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 246310dc8bef..166d9315fe4b 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -113,6 +113,13 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
};
+enum mlx5_page_fault_resume_flags {
+ MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
+ MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
+ MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
+ MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
+};
+
enum dbg_rsc_type {
MLX5_DBG_RSC_QP,
MLX5_DBG_RSC_EQ,
@@ -467,7 +474,7 @@ struct mlx5_priv {
struct workqueue_struct *pg_wq;
struct rb_root page_root;
int fw_pages;
- int reg_pages;
+ atomic_t reg_pages;
struct list_head free_list;
struct mlx5_core_health health;
@@ -633,14 +640,6 @@ static inline void *mlx5_vzalloc(unsigned long size)
return rtn;
}
-static inline void mlx5_vfree(const void *addr)
-{
- if (addr && is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
-}
-
static inline u32 mlx5_base_mkey(const u32 key)
{
return key & 0xffffff00u;
@@ -711,6 +710,9 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+#endif
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
@@ -748,6 +750,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
+int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
+ struct mlx5_odp_caps *odp_caps);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 3fa075daeb1d..61f7a342d1bf 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -50,6 +50,9 @@
#define MLX5_BSF_APPTAG_ESCAPE 0x1
#define MLX5_BSF_APPREF_ESCAPE 0x2
+#define MLX5_QPN_BITS 24
+#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
+
enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
MLX5_QP_OPTPAR_RRE = 1 << 1,
@@ -189,6 +192,14 @@ struct mlx5_wqe_ctrl_seg {
__be32 imm;
};
+#define MLX5_WQE_CTRL_DS_MASK 0x3f
+#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
+#define MLX5_WQE_CTRL_QPN_SHIFT 8
+#define MLX5_WQE_DS_UNITS 16
+#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
+#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
+#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+
struct mlx5_wqe_xrc_seg {
__be32 xrc_srqn;
u8 rsvd[12];
@@ -292,6 +303,8 @@ struct mlx5_wqe_signature_seg {
u8 rsvd1[11];
};
+#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
+
struct mlx5_wqe_inline_seg {
__be32 byte_count;
};
@@ -360,9 +373,46 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
+enum mlx5_pagefault_flags {
+ MLX5_PFAULT_REQUESTOR = 1 << 0,
+ MLX5_PFAULT_WRITE = 1 << 1,
+ MLX5_PFAULT_RDMA = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u8 event_subtype;
+ enum mlx5_pagefault_flags flags;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+};
+
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
+ void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
int qpn;
struct mlx5_rsc_debug *dbg;
int pid;
@@ -530,6 +580,17 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u
return radix_tree_lookup(&dev->priv.mr_table.tree, key);
}
+struct mlx5_page_fault_resume_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 flags_qpn;
+ u8 reserved[4];
+};
+
+struct mlx5_page_fault_resume_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
@@ -549,6 +610,10 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
+ u8 context, int error);
+#endif
static inline const char *mlx5_qp_type_str(int type)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f7606d3a0915..c0a67b894c4c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,6 +19,7 @@
#include <linux/bit_spinlock.h>
#include <linux/shrinker.h>
#include <linux/resource.h>
+#include <linux/page_ext.h>
struct mempolicy;
struct anon_vma;
@@ -56,6 +57,17 @@ extern int sysctl_legacy_va_layout;
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
+/*
+ * To prevent common memory management code establishing
+ * a zero page mapping on a read fault.
+ * This macro should be defined within <asm/pgtable.h>.
+ * s390 does this to prevent multiplexing of hardware bits
+ * related to the physical page in case of virtualization.
+ */
+#ifndef mm_forbids_zeropage
+#define mm_forbids_zeropage(X) (0)
+#endif
+
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
@@ -2049,7 +2061,22 @@ static inline void vm_stat_account(struct mm_struct *mm,
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void kernel_map_pages(struct page *page, int numpages, int enable);
+extern bool _debug_pagealloc_enabled;
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline bool debug_pagealloc_enabled(void)
+{
+ return _debug_pagealloc_enabled;
+}
+
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (!debug_pagealloc_enabled())
+ return;
+
+ __kernel_map_pages(page, numpages, enable);
+}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */
@@ -2083,9 +2110,9 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
#endif
-unsigned long shrink_slab(struct shrink_control *shrink,
- unsigned long nr_pages_scanned,
- unsigned long lru_pages);
+unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
+ unsigned long nr_scanned,
+ unsigned long nr_eligible);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -2144,20 +2171,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned int pages_per_huge_page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+extern struct page_ext_operations debug_guardpage_ops;
+extern struct page_ext_operations page_poisoning_ops;
+
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
+extern bool _debug_guardpage_enabled;
static inline unsigned int debug_guardpage_minorder(void)
{
return _debug_guardpage_minorder;
}
+static inline bool debug_guardpage_enabled(void)
+{
+ return _debug_guardpage_enabled;
+}
+
static inline bool page_is_guard(struct page *page)
{
- return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+ struct page_ext *page_ext;
+
+ if (!debug_guardpage_enabled())
+ return false;
+
+ page_ext = lookup_page_ext(page);
+ return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
}
#else
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool debug_guardpage_enabled(void) { return false; }
static inline bool page_is_guard(struct page *page) { return false; }
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bf9f57529dcf..6d34aa266a8c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -10,7 +10,6 @@
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
-#include <linux/page-debug-flags.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
#include <asm/page.h>
@@ -186,9 +185,6 @@ struct page {
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
- unsigned long debug_flags; /* Use atomic bitops on this */
-#endif
#ifdef CONFIG_KMEMCHECK
/*
@@ -534,4 +530,12 @@ enum tlb_flush_reason {
NR_TLB_FLUSH_REASONS,
};
+ /*
+ * A swap entry has to fit into a "unsigned long", as the entry is hidden
+ * in the "index" field of the swapper address space.
+ */
+typedef struct {
+ unsigned long val;
+} swp_entry_t;
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 88787bb4b3b9..95243d28a0ee 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -98,11 +98,11 @@ struct mmu_notifier_ops {
/*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the
- * locks protecting the reverse maps are held. The subsystem
- * must guarantee that no additional references are taken to
- * the pages in the range established between the call to
- * invalidate_range_start() and the matching call to
- * invalidate_range_end().
+ * locks protecting the reverse maps are held. If the subsystem
+ * can't guarantee that no additional references are taken to
+ * the pages in the range, it has to implement the
+ * invalidate_range() notifier to remove any references taken
+ * after invalidate_range_start().
*
* Invalidation of multiple concurrent ranges may be
* optionally permitted by the driver. Either way the
@@ -144,6 +144,29 @@ struct mmu_notifier_ops {
void (*invalidate_range_end)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end);
+
+ /*
+ * invalidate_range() is either called between
+ * invalidate_range_start() and invalidate_range_end() when the
+ * VM has to free pages that where unmapped, but before the
+ * pages are actually freed, or outside of _start()/_end() when
+ * a (remote) TLB is necessary.
+ *
+ * If invalidate_range() is used to manage a non-CPU TLB with
+ * shared page-tables, it not necessary to implement the
+ * invalidate_range_start()/end() notifiers, as
+ * invalidate_range() alread catches the points in time when an
+ * external TLB range needs to be flushed.
+ *
+ * The invalidate_range() function is called under the ptl
+ * spin-lock and not allowed to sleep.
+ *
+ * Note that this function might be called with just a sub-range
+ * of what was passed to invalidate_range_start()/end(), if
+ * called between those functions.
+ */
+ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
};
/*
@@ -154,7 +177,7 @@ struct mmu_notifier_ops {
* Therefore notifier chains can only be traversed when either
*
* 1. mmap_sem is held.
- * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem).
+ * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
* 3. No other concurrent thread can access the list (release)
*/
struct mmu_notifier {
@@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
__mmu_notifier_invalidate_range_end(mm, start, end);
}
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range(mm, start, end);
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
mm->mmu_notifier_mm = NULL;
@@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
__young; \
})
+#define ptep_clear_flush_notify(__vma, __address, __ptep) \
+({ \
+ unsigned long ___addr = __address & PAGE_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pte_t ___pte; \
+ \
+ ___pte = ptep_clear_flush(__vma, __address, __ptep); \
+ mmu_notifier_invalidate_range(___mm, ___addr, \
+ ___addr + PAGE_SIZE); \
+ \
+ ___pte; \
+})
+
+#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pmd_t ___pmd; \
+ \
+ ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
+ mmu_notifier_invalidate_range(___mm, ___haddr, \
+ ___haddr + HPAGE_PMD_SIZE); \
+ \
+ ___pmd; \
+})
+
+#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
+ pmd_t ___pmd; \
+ \
+ ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
+ mmu_notifier_invalidate_range(__mm, ___haddr, \
+ ___haddr + HPAGE_PMD_SIZE); \
+ \
+ ___pmd; \
+})
+
/*
* set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
{
}
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
}
@@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define ptep_clear_flush_notify ptep_clear_flush
+#define pmdp_clear_flush_notify pmdp_clear_flush
+#define pmdp_get_and_clear_notify pmdp_get_and_clear
#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3879d7664dfc..2f0856d14b21 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -722,6 +722,9 @@ typedef struct pglist_data {
int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
+#ifdef CONFIG_PAGE_EXTENSION
+ struct page_ext *node_page_ext;
+#endif
#endif
#ifndef CONFIG_NO_BOOTMEM
struct bootmem_data *bdata;
@@ -1075,6 +1078,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
struct page;
+struct page_ext;
struct mem_section {
/*
* This is, logically, a pointer to an array of struct
@@ -1092,6 +1096,14 @@ struct mem_section {
/* See declaration of similar field in struct zone */
unsigned long *pageblock_flags;
+#ifdef CONFIG_PAGE_EXTENSION
+ /*
+ * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use
+ * section. (see page_ext.h about this.)
+ */
+ struct page_ext *page_ext;
+ unsigned long pad;
+#endif
/*
* WARNING: mem_section must be a power-of-2 in size for the
* calculation and use of SECTION_ROOT_MASK to make sense.
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 44eeef0da186..745def862580 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -69,7 +69,7 @@ struct ieee1394_device_id {
* @bDeviceClass: Class of device; numbers are assigned
* by the USB forum. Products may choose to implement classes,
* or be vendor-specific. Device classes specify behavior of all
- * the interfaces on a devices.
+ * the interfaces on a device.
* @bDeviceSubClass: Subclass of device; associated with bDeviceClass.
* @bDeviceProtocol: Protocol of device; associated with bDeviceClass.
* @bInterfaceClass: Class of interface; numbers are assigned
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index e4d451e4600b..3d4ea7eb2b68 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -455,8 +455,21 @@ struct nand_hw_control {
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
* @correct: function for ECC correction, matching to ECC generator (sw/hw)
- * @read_page_raw: function to read a raw page without ECC
- * @write_page_raw: function to write a raw page without ECC
+ * @read_page_raw: function to read a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and always return contiguous in-band and
+ * out-of-band data even if they're not stored
+ * contiguously on the NAND chip (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
+ * @write_page_raw: function to write a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and consider the passed data as contiguous
+ * in-band and out-of-band data. ECC controller is
+ * responsible for doing the appropriate transformations
+ * to adapt to its specific layout (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
* @read_page: function to read a page according to the ECC generator
* requirements; returns maximum number of bitflips corrected in
* any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
@@ -723,6 +736,7 @@ struct nand_chip {
#define NAND_MFR_EON 0x92
#define NAND_MFR_SANDISK 0x45
#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_ATO 0x9b
/* The maximum expected count of bytes in the NAND ID sequence */
#define NAND_MAX_ID_LEN 8
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 046a0a2e4c4e..63aeccf9ddc8 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -116,6 +116,10 @@ enum spi_nor_ops {
SPI_NOR_OPS_UNLOCK,
};
+enum spi_nor_option_flags {
+ SNOR_F_USE_FSR = BIT(0),
+};
+
/**
* struct spi_nor - Structure for defining a the SPI NOR layer
* @mtd: point to a mtd_info structure
@@ -129,6 +133,7 @@ enum spi_nor_ops {
* @program_opcode: the program opcode
* @flash_read: the mode of the read
* @sst_write_second: used by the SST write operation
+ * @flags: flag options for the current SPI-NOR (SNOR_F_*)
* @cfg: used by the read_xfer/write_xfer
* @cmd_buf: used by the write_reg
* @prepare: [OPTIONAL] do some preparations for the
@@ -139,9 +144,6 @@ enum spi_nor_ops {
* @write_xfer: [OPTIONAL] the writefundamental primitive
* @read_reg: [DRIVER-SPECIFIC] read out the register
* @write_reg: [DRIVER-SPECIFIC] write data to the register
- * @read_id: [REPLACEABLE] read out the ID data, and find
- * the proper spi_device_id
- * @wait_till_ready: [REPLACEABLE] wait till the NOR becomes ready
* @read: [DRIVER-SPECIFIC] read data from the SPI NOR
* @write: [DRIVER-SPECIFIC] write data to the SPI NOR
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
@@ -160,6 +162,7 @@ struct spi_nor {
u8 program_opcode;
enum read_mode flash_read;
bool sst_write_second;
+ u32 flags;
struct spi_nor_xfer_cfg cfg;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
@@ -172,8 +175,6 @@ struct spi_nor {
int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
int write_enable);
- const struct spi_device_id *(*read_id)(struct spi_nor *nor);
- int (*wait_till_ready)(struct spi_nor *nor);
int (*read)(struct spi_nor *nor, loff_t from,
size_t len, size_t *retlen, u_char *read_buf);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 492de72560fa..c8990779f0c3 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -7,21 +7,10 @@
#include <linux/path.h>
struct vfsmount;
+struct nameidata;
enum { MAX_NESTED_LINKS = 8 };
-struct nameidata {
- struct path path;
- struct qstr last;
- struct path root;
- struct inode *inode; /* path.dentry.d_inode */
- unsigned int flags;
- unsigned seq, m_seq;
- int last_type;
- unsigned depth;
- char *saved_names[MAX_NESTED_LINKS + 1];
-};
-
/*
* Type of the last component on LOOKUP_PARENT
*/
@@ -82,16 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
extern void nd_jump_link(struct nameidata *nd, struct path *path);
-
-static inline void nd_set_link(struct nameidata *nd, char *path)
-{
- nd->saved_names[nd->depth] = path;
-}
-
-static inline char *nd_get_link(struct nameidata *nd)
-{
- return nd->saved_names[nd->depth];
-}
+extern void nd_set_link(struct nameidata *nd, char *path);
+extern char *nd_get_link(struct nameidata *nd);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index dcfdecbfa0b7..8e30685affeb 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -47,9 +47,9 @@ enum {
NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
- NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
+ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_MPLS_BIT,
+ NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
@@ -118,7 +118,7 @@ enum {
#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
-#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
+#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -181,7 +181,6 @@ enum {
NETIF_F_GSO_IPIP | \
NETIF_F_GSO_SIT | \
NETIF_F_GSO_UDP_TUNNEL | \
- NETIF_F_GSO_UDP_TUNNEL_CSUM | \
- NETIF_F_GSO_MPLS)
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 74fd5d37f15a..c31f74d76ebd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -57,6 +57,8 @@ struct device;
struct phy_device;
/* 802.11 specific */
struct wireless_dev;
+/* 802.15.4 specific */
+struct wpan_dev;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -314,6 +316,7 @@ struct napi_struct {
struct net_device *dev;
struct sk_buff *gro_list;
struct sk_buff *skb;
+ struct hrtimer timer;
struct list_head dev_list;
struct hlist_node napi_hash_node;
unsigned int napi_id;
@@ -386,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
void __napi_schedule(struct napi_struct *n);
+void __napi_schedule_irqoff(struct napi_struct *n);
static inline bool napi_disable_pending(struct napi_struct *n)
{
@@ -420,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n)
__napi_schedule(n);
}
+/**
+ * napi_schedule_irqoff - schedule NAPI poll
+ * @n: napi context
+ *
+ * Variant of napi_schedule(), assuming hard irqs are masked.
+ */
+static inline void napi_schedule_irqoff(struct napi_struct *n)
+{
+ if (napi_schedule_prep(n))
+ __napi_schedule_irqoff(n);
+}
+
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
static inline bool napi_reschedule(struct napi_struct *napi)
{
@@ -430,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false;
}
+void __napi_complete(struct napi_struct *n);
+void napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
* @n: napi context
*
* Mark NAPI processing as complete.
+ * Consider using napi_complete_done() instead.
*/
-void __napi_complete(struct napi_struct *n);
-void napi_complete(struct napi_struct *n);
+static inline void napi_complete(struct napi_struct *n)
+{
+ return napi_complete_done(n, 0);
+}
/**
* napi_by_id - lookup a NAPI by napi_id
@@ -472,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi);
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
*/
-static inline void napi_disable(struct napi_struct *n)
-{
- might_sleep();
- set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
- msleep(1);
- clear_bit(NAPI_STATE_DISABLE, &n->state);
-}
+void napi_disable(struct napi_struct *n);
/**
* napi_enable - enable NAPI scheduling
@@ -740,13 +754,13 @@ struct netdev_fcoe_hbainfo {
};
#endif
-#define MAX_PHYS_PORT_ID_LEN 32
+#define MAX_PHYS_ITEM_ID_LEN 32
-/* This structure holds a unique identifier to identify the
- * physical port used by a netdevice.
+/* This structure holds a unique identifier to identify some
+ * physical item (port for example) used by a netdevice.
*/
-struct netdev_phys_port_id {
- unsigned char id[MAX_PHYS_PORT_ID_LEN];
+struct netdev_phys_item_id {
+ unsigned char id[MAX_PHYS_ITEM_ID_LEN];
unsigned char id_len;
};
@@ -937,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
*
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr, u16 flags)
+ * const unsigned char *addr, u16 vid, u16 flags)
* Adds an FDB entry to dev for addr.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr)
+ * const unsigned char *addr, u16 vid)
* Deletes the FDB entry from dev coresponding to addr.
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
@@ -962,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
*
* int (*ndo_get_phys_port_id)(struct net_device *dev,
- * struct netdev_phys_port_id *ppid);
+ * struct netdev_phys_item_id *ppid);
* Called to get ID of physical port of this device. If driver does
* not implement this, it is assumed that the hw is not able to have
* multiple net devices on single physical port.
@@ -1004,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* performing GSO on a packet. The device returns true if it is
* able to GSO the packet, false otherwise. If the return value is
* false the stack will do software GSO.
+ *
+ * int (*ndo_switch_parent_id_get)(struct net_device *dev,
+ * struct netdev_phys_item_id *psid);
+ * Called to get an ID of the switch chip this port is part of.
+ * If driver implements this, it indicates that it represents a port
+ * of a switch chip.
+ * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
+ * Called to notify switch device port of bridge port STP
+ * state change.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1114,11 +1137,13 @@ struct net_device_ops {
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
+ u16 vid,
u16 flags);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr);
+ const unsigned char *addr,
+ u16 vid);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
@@ -1136,7 +1161,7 @@ struct net_device_ops {
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
- struct netdev_phys_port_id *ppid);
+ struct netdev_phys_item_id *ppid);
void (*ndo_add_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__be16 port);
@@ -1155,6 +1180,12 @@ struct net_device_ops {
int (*ndo_get_lock_subclass)(struct net_device *dev);
bool (*ndo_gso_check) (struct sk_buff *skb,
struct net_device *dev);
+#ifdef CONFIG_NET_SWITCHDEV
+ int (*ndo_switch_parent_id_get)(struct net_device *dev,
+ struct netdev_phys_item_id *psid);
+ int (*ndo_switch_port_stp_update)(struct net_device *dev,
+ u8 state);
+#endif
};
/**
@@ -1216,6 +1247,8 @@ enum netdev_priv_flags {
IFF_LIVE_ADDR_CHANGE = 1<<20,
IFF_MACVLAN = 1<<21,
IFF_XMIT_DST_RELEASE_PERM = 1<<22,
+ IFF_IPVLAN_MASTER = 1<<23,
+ IFF_IPVLAN_SLAVE = 1<<24,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1241,6 +1274,8 @@ enum netdev_priv_flags {
#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
#define IFF_MACVLAN IFF_MACVLAN
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
+#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
+#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
/**
* struct net_device - The DEVICE structure.
@@ -1572,6 +1607,7 @@ struct net_device {
struct inet6_dev __rcu *ip6_ptr;
void *ax25_ptr;
struct wireless_dev *ieee80211_ptr;
+ struct wpan_dev *ieee802154_ptr;
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -1590,6 +1626,7 @@ struct net_device {
#endif
+ unsigned long gro_flush_timeout;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
@@ -2316,10 +2353,7 @@ extern int netdev_flow_limit_table_len;
* Incoming packets are placed on per-cpu queues
*/
struct softnet_data {
- struct Qdisc *output_queue;
- struct Qdisc **output_queue_tailp;
struct list_head poll_list;
- struct sk_buff *completion_queue;
struct sk_buff_head process_queue;
/* stats */
@@ -2327,10 +2361,17 @@ struct softnet_data {
unsigned int time_squeeze;
unsigned int cpu_collision;
unsigned int received_rps;
-
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
+#endif
+#ifdef CONFIG_NET_FLOW_LIMIT
+ struct sd_flow_limit __rcu *flow_limit;
+#endif
+ struct Qdisc *output_queue;
+ struct Qdisc **output_queue_tailp;
+ struct sk_buff *completion_queue;
+#ifdef CONFIG_RPS
/* Elements below can be accessed between CPUs for RPS */
struct call_single_data csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
@@ -2342,9 +2383,6 @@ struct softnet_data {
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
-#ifdef CONFIG_NET_FLOW_LIMIT
- struct sd_flow_limit __rcu *flow_limit;
-#endif
};
static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -2748,23 +2786,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
}
#endif
-static inline int netif_copy_real_num_queues(struct net_device *to_dev,
- const struct net_device *from_dev)
-{
- int err;
-
- err = netif_set_real_num_tx_queues(to_dev,
- from_dev->real_num_tx_queues);
- if (err)
- return err;
-#ifdef CONFIG_SYSFS
- return netif_set_real_num_rx_queues(to_dev,
- from_dev->real_num_rx_queues);
-#else
- return 0;
-#endif
-}
-
#ifdef CONFIG_SYSFS
static inline unsigned int get_netdev_rx_queue_index(
struct netdev_rx_queue *queue)
@@ -2864,7 +2885,7 @@ void dev_set_group(struct net_device *, int);
int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_port_id *ppid);
+ struct netdev_phys_item_id *ppid);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3425,6 +3446,12 @@ void netdev_upper_dev_unlink(struct net_device *dev,
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
+
+/* RSS keys are 40 or 52 bytes long */
+#define NETDEV_RSS_KEY_LEN 52
+extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+void netdev_rss_key_fill(void *buffer, size_t len);
+
int dev_get_nest_level(struct net_device *dev,
bool (*type_check)(struct net_device *dev));
int skb_checksum_help(struct sk_buff *skb);
@@ -3569,7 +3596,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
@@ -3614,6 +3641,21 @@ static inline bool netif_is_macvlan(struct net_device *dev)
return dev->priv_flags & IFF_MACVLAN;
}
+static inline bool netif_is_macvlan_port(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_MACVLAN_PORT;
+}
+
+static inline bool netif_is_ipvlan(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_IPVLAN_SLAVE;
+}
+
+static inline bool netif_is_ipvlan_port(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_IPVLAN_MASTER;
+}
+
static inline bool netif_is_bond_master(struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 20163b9a0eae..167342c2ce6b 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef NL802154_H
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
new file mode 100644
index 000000000000..85a5c8c16be9
--- /dev/null
+++ b/include/linux/ns_common.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_NS_COMMON_H
+#define _LINUX_NS_COMMON_H
+
+struct proc_ns_operations;
+
+struct ns_common {
+ atomic_long_t stashed;
+ const struct proc_ns_operations *ops;
+ unsigned int inum;
+};
+
+#endif
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2bf403195c09..258945fcabf1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -19,6 +19,7 @@
#include <linux/pci.h>
#include <linux/miscdevice.h>
#include <linux/kref.h>
+#include <linux/blk-mq.h>
struct nvme_bar {
__u64 cap; /* Controller Capabilities */
@@ -38,6 +39,7 @@ struct nvme_bar {
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
enum {
NVME_CC_ENABLE = 1 << 0,
@@ -70,8 +72,10 @@ extern unsigned char nvme_io_timeout;
*/
struct nvme_dev {
struct list_head node;
- struct nvme_queue __rcu **queues;
- unsigned short __percpu *io_queue;
+ struct nvme_queue **queues;
+ struct request_queue *admin_q;
+ struct blk_mq_tag_set tagset;
+ struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct pci_dev *pci_dev;
struct dma_pool *prp_page_pool;
@@ -90,15 +94,16 @@ struct nvme_dev {
struct miscdevice miscdev;
work_func_t reset_workfn;
struct work_struct reset_work;
- struct work_struct cpu_work;
char name[12];
char serial[20];
char model[40];
char firmware_rev[8];
u32 max_hw_sectors;
u32 stripe_size;
+ u32 page_size;
u16 oncs;
u16 abort_limit;
+ u8 event_limit;
u8 vwc;
u8 initialized;
};
@@ -132,7 +137,6 @@ struct nvme_iod {
int offset; /* Of PRP list */
int nents; /* Used in scatterlist */
int length; /* Of data, in bytes */
- unsigned long start_time;
dma_addr_t first_dma;
struct list_head node;
struct scatterlist sg[0];
@@ -150,12 +154,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
*/
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
-int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
+int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
unsigned long addr, unsigned length);
void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
struct nvme_iod *iod);
-int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
+int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
+ struct nvme_command *, u32 *);
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
u32 *result);
int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
diff --git a/include/linux/of.h b/include/linux/of.h
index 1a66b8881c01..dfde07e77a63 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -24,6 +24,7 @@
#include <linux/topology.h>
#include <linux/notifier.h>
#include <linux/property.h>
+#include <linux/list.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
@@ -57,8 +58,6 @@ struct device_node {
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
- struct device_node *next; /* next device of same type */
- struct device_node *allnext; /* next in list of all nodes */
struct kobject kobj;
unsigned long _flags;
void *data;
@@ -76,6 +75,12 @@ struct of_phandle_args {
uint32_t args[MAX_PHANDLE_ARGS];
};
+struct of_reconfig_data {
+ struct device_node *dn;
+ struct property *prop;
+ struct property *old_prop;
+};
+
/* initialize a node */
extern struct kobj_type of_node_ktype;
static inline void of_node_init(struct device_node *node)
@@ -108,15 +113,14 @@ static inline struct device_node *of_node_get(struct device_node *node)
static inline void of_node_put(struct device_node *node) { }
#endif /* !CONFIG_OF_DYNAMIC */
-#ifdef CONFIG_OF
-
/* Pointer for first entry in chain of all nodes. */
-extern struct device_node *of_allnodes;
+extern struct device_node *of_root;
extern struct device_node *of_chosen;
extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
+#ifdef CONFIG_OF
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_OF;
@@ -129,7 +133,7 @@ static inline struct device_node *of_node(struct fwnode_handle *fwnode)
static inline bool of_have_populated_dt(void)
{
- return of_allnodes != NULL;
+ return of_root != NULL;
}
static inline bool of_node_is_root(const struct device_node *node)
@@ -173,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
clear_bit(flag, &p->_flags);
}
+extern struct device_node *__of_find_all_nodes(struct device_node *prev);
extern struct device_node *of_find_all_nodes(struct device_node *prev);
/*
@@ -228,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np)
return np ? np->full_name : "<no-node>";
}
-#define for_each_of_allnodes(dn) \
- for (dn = of_allnodes; dn; dn = dn->allnext)
+#define for_each_of_allnodes_from(from, dn) \
+ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn))
+#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn)
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
extern struct device_node *of_find_node_by_type(struct device_node *from,
@@ -241,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match(
const struct of_device_id *matches,
const struct of_device_id **match);
-extern struct device_node *of_find_node_by_path(const char *path);
+extern struct device_node *of_find_node_opts_by_path(const char *path,
+ const char **opts);
+static inline struct device_node *of_find_node_by_path(const char *path)
+{
+ return of_find_node_opts_by_path(path, NULL);
+}
+
extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_parent(struct device_node *node);
@@ -292,7 +304,7 @@ extern int of_property_read_string_helper(struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
-extern int of_device_is_available(const struct device_node *device);
+extern bool of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
@@ -334,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop);
#define OF_RECONFIG_REMOVE_PROPERTY 0x0004
#define OF_RECONFIG_UPDATE_PROPERTY 0x0005
-struct of_prop_reconfig {
- struct device_node *dn;
- struct property *prop;
- struct property *old_prop;
-};
-
-extern int of_reconfig_notifier_register(struct notifier_block *);
-extern int of_reconfig_notifier_unregister(struct notifier_block *);
-extern int of_reconfig_notify(unsigned long, void *);
-
extern int of_attach_node(struct device_node *);
extern int of_detach_node(struct device_node *);
@@ -412,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path)
return NULL;
}
+static inline struct device_node *of_find_node_opts_by_path(const char *path,
+ const char **opts)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_parent(const struct device_node *node)
{
return NULL;
@@ -453,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device,
return 0;
}
-static inline int of_device_is_available(const struct device_node *device)
+static inline bool of_device_is_available(const struct device_node *device)
{
- return 0;
+ return false;
}
static inline struct property *of_find_property(const struct device_node *np,
@@ -794,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np,
return of_property_read_u32_array(np, propname, out_value, 1);
}
+static inline int of_property_read_s32(const struct device_node *np,
+ const char *propname,
+ s32 *out_value)
+{
+ return of_property_read_u32(np, propname, (u32*) out_value);
+}
+
#define of_property_for_each_u32(np, propname, prop, p, u) \
for (prop = of_find_property(np, propname, NULL), \
p = of_prop_next_u32(prop, NULL, &u); \
@@ -862,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
-#define _OF_DECLARE(table, name, compat, fn, fn_type) \
+#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__attribute__((unused)) \
= { .compatible = compat, \
@@ -913,7 +928,19 @@ struct of_changeset {
struct list_head entries;
};
+enum of_reconfig_change {
+ OF_RECONFIG_NO_CHANGE = 0,
+ OF_RECONFIG_CHANGE_ADD,
+ OF_RECONFIG_CHANGE_REMOVE,
+};
+
#ifdef CONFIG_OF_DYNAMIC
+extern int of_reconfig_notifier_register(struct notifier_block *);
+extern int of_reconfig_notifier_unregister(struct notifier_block *);
+extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
+extern int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg);
+
extern void of_changeset_init(struct of_changeset *ocs);
extern void of_changeset_destroy(struct of_changeset *ocs);
extern int of_changeset_apply(struct of_changeset *ocs);
@@ -951,7 +978,26 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
-#endif
+#else /* CONFIG_OF_DYNAMIC */
+static inline int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notify(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF_DYNAMIC */
/* CONFIG_OF_RESOLVE api */
extern int of_resolve_phandles(struct device_node *tree);
@@ -967,4 +1013,34 @@ static inline bool of_device_is_system_power_controller(const struct device_node
return of_property_read_bool(np, "system-power-controller");
}
+/**
+ * Overlay support
+ */
+
+#ifdef CONFIG_OF_OVERLAY
+
+/* ID based overlays; the API for external users */
+int of_overlay_create(struct device_node *tree);
+int of_overlay_destroy(int id);
+int of_overlay_destroy_all(void);
+
+#else
+
+static inline int of_overlay_create(struct device_node *tree)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy(int id)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy_all(void)
+{
+ return -ENOTSUPP;
+}
+
+#endif
+
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 8cb14eb393d6..d88e81be6368 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -106,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
void __iomem *of_iomap(struct device_node *node, int index);
void __iomem *of_io_request_and_map(struct device_node *device,
- int index, char *name);
+ int index, const char *name);
#else
#include <linux/io.h>
@@ -123,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
}
static inline void __iomem *of_io_request_and_map(struct device_node *device,
- int index, char *name)
+ int index, const char *name)
{
return IOMEM_ERR_PTR(-EINVAL);
}
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 51a560f34bca..16c75547d725 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -1,12 +1,19 @@
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+
#ifdef CONFIG_OF_IOMMU
extern int of_get_dma_window(struct device_node *dn, const char *prefix,
int index, unsigned long *busno, dma_addr_t *addr,
size_t *size);
+extern void of_iommu_init(void);
+extern struct iommu_ops *of_iommu_configure(struct device *dev);
+
#else
static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
@@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
return -EINVAL;
}
+static inline void of_iommu_init(void) { }
+static inline struct iommu_ops *of_iommu_configure(struct device *dev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_OF_IOMMU */
+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
+struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+
+extern struct of_device_id __iommu_of_table;
+
+typedef int (*of_iommu_init_fn)(struct device_node *);
+
+#define IOMMU_OF_DECLARE(name, compat, fn) \
+ _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
+
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index c65a18a0cfdf..7e09244bb679 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size);
/* for building the device tree */
extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops);
-extern void (*of_pdt_build_more)(struct device_node *dp,
- struct device_node ***nextp);
+extern void (*of_pdt_build_more)(struct device_node *dp);
#endif /* _LINUX_OF_PDT_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index c2b0627a2317..8a860f096c35 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root,
static inline void of_platform_depopulate(struct device *parent) { }
#endif
+#ifdef CONFIG_OF_DYNAMIC
+extern void of_platform_register_reconfig_notifier(void);
+#else
+static inline void of_platform_register_reconfig_notifier(void) { }
+#endif
+
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index f8322d9cd235..587bbdd31f5a 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -10,20 +10,20 @@
#define OMAP_MAILBOX_H
typedef u32 mbox_msg_t;
-struct omap_mbox;
typedef int __bitwise omap_mbox_irq_t;
#define IRQ_TX ((__force omap_mbox_irq_t) 1)
#define IRQ_RX ((__force omap_mbox_irq_t) 2)
-int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg);
+struct mbox_chan;
+struct mbox_client;
-struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb);
-void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb);
+struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
+ const char *chan_name);
-void omap_mbox_save_ctx(struct omap_mbox *mbox);
-void omap_mbox_restore_ctx(struct omap_mbox *mbox);
-void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq);
-void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq);
+void omap_mbox_save_ctx(struct mbox_chan *chan);
+void omap_mbox_restore_ctx(struct mbox_chan *chan);
+void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
+void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
#endif /* OMAP_MAILBOX_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index e8d6e1058723..853698c721f7 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -92,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask)
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
+static inline bool task_will_free_mem(struct task_struct *task)
+{
+ /*
+ * A coredumping process may sleep for an extended period in exit_mm(),
+ * so the oom killer cannot assume that the process will promptly exit
+ * and release memory.
+ */
+ return (task->flags & PF_EXITING) &&
+ !(task->signal->flags & SIGNAL_GROUP_COREDUMP);
+}
+
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
deleted file mode 100644
index 22691f614043..000000000000
--- a/include/linux/page-debug-flags.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef LINUX_PAGE_DEBUG_FLAGS_H
-#define LINUX_PAGE_DEBUG_FLAGS_H
-
-/*
- * page->debug_flags bits:
- *
- * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
- * implement generic debug pagealloc feature. The pages are filled with
- * poison patterns and set this flag after free_pages(). The poisoned
- * pages are verified whether the patterns are not corrupted and clear
- * the flag before alloc_pages().
- */
-
-enum page_debug_flags {
- PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
- PAGE_DEBUG_FLAG_GUARD,
-};
-
-/*
- * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
- * gets turned off when no debug features are enabling it!
- */
-
-#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
-#if !defined(CONFIG_PAGE_POISONING) && \
- !defined(CONFIG_PAGE_GUARD) \
-/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
-#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
-#endif
-#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
-
-#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
new file mode 100644
index 000000000000..d2a2c84c72d0
--- /dev/null
+++ b/include/linux/page_ext.h
@@ -0,0 +1,84 @@
+#ifndef __LINUX_PAGE_EXT_H
+#define __LINUX_PAGE_EXT_H
+
+#include <linux/types.h>
+#include <linux/stacktrace.h>
+
+struct pglist_data;
+struct page_ext_operations {
+ bool (*need)(void);
+ void (*init)(void);
+};
+
+#ifdef CONFIG_PAGE_EXTENSION
+
+/*
+ * page_ext->flags bits:
+ *
+ * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
+ * implement generic debug pagealloc feature. The pages are filled with
+ * poison patterns and set this flag after free_pages(). The poisoned
+ * pages are verified whether the patterns are not corrupted and clear
+ * the flag before alloc_pages().
+ */
+
+enum page_ext_flags {
+ PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
+ PAGE_EXT_DEBUG_GUARD,
+ PAGE_EXT_OWNER,
+};
+
+/*
+ * Page Extension can be considered as an extended mem_map.
+ * A page_ext page is associated with every page descriptor. The
+ * page_ext helps us add more information about the page.
+ * All page_ext are allocated at boot or memory hotplug event,
+ * then the page_ext for pfn always exists.
+ */
+struct page_ext {
+ unsigned long flags;
+#ifdef CONFIG_PAGE_OWNER
+ unsigned int order;
+ gfp_t gfp_mask;
+ struct stack_trace trace;
+ unsigned long trace_entries[8];
+#endif
+};
+
+extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+
+#ifdef CONFIG_SPARSEMEM
+static inline void page_ext_init_flatmem(void)
+{
+}
+extern void page_ext_init(void);
+#else
+extern void page_ext_init_flatmem(void);
+static inline void page_ext_init(void)
+{
+}
+#endif
+
+struct page_ext *lookup_page_ext(struct page *page);
+
+#else /* !CONFIG_PAGE_EXTENSION */
+struct page_ext;
+
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+{
+}
+
+static inline struct page_ext *lookup_page_ext(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_init(void)
+{
+}
+
+static inline void page_ext_init_flatmem(void)
+{
+}
+#endif /* CONFIG_PAGE_EXTENSION */
+#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
new file mode 100644
index 000000000000..b48c3471c254
--- /dev/null
+++ b/include/linux/page_owner.h
@@ -0,0 +1,38 @@
+#ifndef __LINUX_PAGE_OWNER_H
+#define __LINUX_PAGE_OWNER_H
+
+#ifdef CONFIG_PAGE_OWNER
+extern bool page_owner_inited;
+extern struct page_ext_operations page_owner_ops;
+
+extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask);
+
+static inline void reset_page_owner(struct page *page, unsigned int order)
+{
+ if (likely(!page_owner_inited))
+ return;
+
+ __reset_page_owner(page, order);
+}
+
+static inline void set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask)
+{
+ if (likely(!page_owner_inited))
+ return;
+
+ __set_page_owner(page, order, gfp_mask);
+}
+#else
+static inline void reset_page_owner(struct page *page, unsigned int order)
+{
+}
+static inline void set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask)
+{
+}
+
+#endif /* CONFIG_PAGE_OWNER */
+#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a523cee3abb5..44a27696ab6c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1004,6 +1004,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
int pci_save_state(struct pci_dev *dev);
void pci_restore_state(struct pci_dev *dev);
struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
+int pci_load_saved_state(struct pci_dev *dev,
+ struct pci_saved_state *state);
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 97fb9f69aaed..e63c02a93f6b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -564,6 +564,7 @@
#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
+#define PCI_DEVICE_ID_AMD_NL_USB 0x7912
#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 420032d41d27..57f3a1c550dc 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -254,8 +254,6 @@ do { \
#endif /* CONFIG_SMP */
#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
-#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
-#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
/*
* Must be an lvalue. Since @var must be a simple identifier,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 530b249f7ea4..b4337646388b 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
static inline bool __ref_is_percpu(struct percpu_ref *ref,
unsigned long __percpu **percpu_countp)
{
- unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
-
/* paired with smp_store_release() in percpu_ref_reinit() */
- smp_read_barrier_depends();
+ unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
/*
* Theoretically, the following could test just ATOMIC; however,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index d090cfcaa167..22af8f8f5802 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -433,6 +433,7 @@ struct phy_device {
* by this PHY
* flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
+ * driver_data: static driver data
*
* The drivers must implement config_aneg and read_status. All
* other functions are optional. Note that none of these
@@ -448,6 +449,7 @@ struct phy_driver {
unsigned int phy_id_mask;
u32 features;
u32 flags;
+ const void *driver_data;
/*
* Called to issue a PHY software reset
@@ -772,4 +774,28 @@ int __init mdio_bus_init(void);
void mdio_bus_exit(void);
extern struct bus_type mdio_bus_type;
+
+/**
+ * module_phy_driver() - Helper macro for registering PHY drivers
+ * @__phy_drivers: array of PHY drivers to register
+ *
+ * Helper macro for PHY drivers which do not do anything special in module
+ * init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init() and module_exit().
+ */
+#define phy_module_driver(__phy_drivers, __count) \
+static int __init phy_module_init(void) \
+{ \
+ return phy_drivers_register(__phy_drivers, __count); \
+} \
+module_init(phy_module_init); \
+static void __exit phy_module_exit(void) \
+{ \
+ phy_drivers_unregister(__phy_drivers, __count); \
+} \
+module_exit(phy_module_exit)
+
+#define module_phy_driver(__phy_drivers) \
+ phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
+
#endif /* __PHY_H */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 8cb6f815475b..a0197fa1b116 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -61,7 +61,6 @@ struct phy {
struct device dev;
int id;
const struct phy_ops *ops;
- struct phy_init_data *init_data;
struct mutex mutex;
int init_count;
int power_count;
@@ -84,33 +83,14 @@ struct phy_provider {
struct of_phandle_args *args);
};
-/**
- * struct phy_consumer - represents the phy consumer
- * @dev_name: the device name of the controller that will use this PHY device
- * @port: name given to the consumer port
- */
-struct phy_consumer {
- const char *dev_name;
- const char *port;
-};
-
-/**
- * struct phy_init_data - contains the list of PHY consumers
- * @num_consumers: number of consumers for this PHY device
- * @consumers: list of PHY consumers
- */
-struct phy_init_data {
- unsigned int num_consumers;
- struct phy_consumer *consumers;
+struct phy_lookup {
+ struct list_head node;
+ const char *dev_id;
+ const char *con_id;
+ struct phy *phy;
};
-#define PHY_CONSUMER(_dev_name, _port) \
-{ \
- .dev_name = _dev_name, \
- .port = _port, \
-}
-
-#define to_phy(dev) (container_of((dev), struct phy, dev))
+#define to_phy(a) (container_of((a), struct phy, dev))
#define of_phy_provider_register(dev, xlate) \
__of_phy_provider_register((dev), THIS_MODULE, (xlate))
@@ -159,10 +139,9 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id);
struct phy *of_phy_simple_xlate(struct device *dev,
struct of_phandle_args *args);
struct phy *phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data);
+ const struct phy_ops *ops);
struct phy *devm_phy_create(struct device *dev, struct device_node *node,
- const struct phy_ops *ops, struct phy_init_data *init_data);
+ const struct phy_ops *ops);
void phy_destroy(struct phy *phy);
void devm_phy_destroy(struct device *dev, struct phy *phy);
struct phy_provider *__of_phy_provider_register(struct device *dev,
@@ -174,6 +153,8 @@ struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
void of_phy_provider_unregister(struct phy_provider *phy_provider);
void devm_of_phy_provider_unregister(struct device *dev,
struct phy_provider *phy_provider);
+int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id);
+void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id);
#else
static inline int phy_pm_runtime_get(struct phy *phy)
{
@@ -301,16 +282,14 @@ static inline struct phy *of_phy_simple_xlate(struct device *dev,
static inline struct phy *phy_create(struct device *dev,
struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data)
+ const struct phy_ops *ops)
{
return ERR_PTR(-ENOSYS);
}
static inline struct phy *devm_phy_create(struct device *dev,
struct device_node *node,
- const struct phy_ops *ops,
- struct phy_init_data *init_data)
+ const struct phy_ops *ops)
{
return ERR_PTR(-ENOSYS);
}
@@ -345,6 +324,13 @@ static inline void devm_of_phy_provider_unregister(struct device *dev,
struct phy_provider *phy_provider)
{
}
+static inline int
+phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
+{
+ return 0;
+}
+static inline void phy_remove_lookup(struct phy *phy, const char *con_id,
+ const char *dev_id) { }
#endif
#endif /* __DRIVERS_PHY_H */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index f2ca1b459377..7e75bfe37cc7 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -11,7 +11,7 @@ struct fixed_phy_status {
struct device_node;
-#ifdef CONFIG_FIXED_PHY
+#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status);
extern struct phy_device *fixed_phy_register(unsigned int irq,
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 1997ffc295a7..b9cf6c51b181 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -8,6 +8,7 @@
#include <linux/threads.h>
#include <linux/nsproxy.h>
#include <linux/kref.h>
+#include <linux/ns_common.h>
struct pidmap {
atomic_t nr_free;
@@ -43,7 +44,7 @@ struct pid_namespace {
kgid_t pid_gid;
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index a6591c693ebb..5e0bc779e6c5 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -27,6 +27,7 @@ struct samsung_i2s {
#define QUIRK_NO_MUXPSR (1 << 2)
#define QUIRK_NEED_RSTCLR (1 << 3)
#define QUIRK_SUPPORTS_TDM (1 << 4)
+#define QUIRK_SUPPORTS_IDMA (1 << 5)
/* Quirks of the I2S controller */
u32 quirks;
dma_addr_t idma_addr;
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h
new file mode 100644
index 000000000000..26af54321958
--- /dev/null
+++ b/include/linux/platform_data/bcmgenet.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__
+#define __LINUX_PLATFORM_DATA_BCMGENET_H__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+
+struct bcmgenet_platform_data {
+ bool mdio_enabled;
+ phy_interface_t phy_interface;
+ int phy_address;
+ int phy_speed;
+ int phy_duplex;
+ u8 mac_address[ETH_ALEN];
+ int genet_version;
+};
+
+#endif
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 6a1357d31871..7d964e787299 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -41,6 +41,7 @@ enum sdma_peripheral_type {
IMX_DMATYPE_ESAI, /* ESAI */
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
+ IMX_DMATYPE_SAI, /* SAI */
};
enum imx_dma_prio {
diff --git a/include/linux/platform_data/dwc3-exynos.h b/include/linux/platform_data/dwc3-exynos.h
deleted file mode 100644
index 5eb7da9b3772..000000000000
--- a/include/linux/platform_data/dwc3-exynos.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * dwc3-exynos.h - Samsung EXYNOS DWC3 Specific Glue layer, header.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Author: Anton Tikhomirov <av.tikhomirov@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _DWC3_EXYNOS_H_
-#define _DWC3_EXYNOS_H_
-
-struct dwc3_exynos_data {
- int phy_type;
- int (*phy_init)(struct platform_device *pdev, int type);
- int (*phy_exit)(struct platform_device *pdev, int type);
-};
-
-#endif /* _DWC3_EXYNOS_H_ */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 1b2ba24e4e03..9c7fd1efe495 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,6 +136,7 @@ struct lp855x_rom_data {
Only valid when mode is PWM_BASED.
* @size_program : total size of lp855x_rom_data
* @rom_data : list of new eeprom/eprom registers
+ * @supply : regulator that supplies 3V input
*/
struct lp855x_platform_data {
const char *name;
@@ -144,6 +145,7 @@ struct lp855x_platform_data {
unsigned int period_ns;
int size_program;
struct lp855x_rom_data *rom_data;
+ struct regulator *supply;
};
#endif
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
deleted file mode 100644
index a5f045e1d8fe..000000000000
--- a/include/linux/platform_data/rcar-du.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * rcar_du.h -- R-Car Display Unit DRM driver
- *
- * Copyright (C) 2013 Renesas Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_H__
-#define __RCAR_DU_H__
-
-#include <video/videomode.h>
-
-enum rcar_du_output {
- RCAR_DU_OUTPUT_DPAD0,
- RCAR_DU_OUTPUT_DPAD1,
- RCAR_DU_OUTPUT_LVDS0,
- RCAR_DU_OUTPUT_LVDS1,
- RCAR_DU_OUTPUT_TCON,
- RCAR_DU_OUTPUT_MAX,
-};
-
-enum rcar_du_encoder_type {
- RCAR_DU_ENCODER_UNUSED = 0,
- RCAR_DU_ENCODER_NONE,
- RCAR_DU_ENCODER_VGA,
- RCAR_DU_ENCODER_LVDS,
-};
-
-struct rcar_du_panel_data {
- unsigned int width_mm; /* Panel width in mm */
- unsigned int height_mm; /* Panel height in mm */
- struct videomode mode;
-};
-
-struct rcar_du_connector_lvds_data {
- struct rcar_du_panel_data panel;
-};
-
-struct rcar_du_connector_vga_data {
- /* TODO: Add DDC information for EDID retrieval */
-};
-
-/*
- * struct rcar_du_encoder_data - Encoder platform data
- * @type: the encoder type (RCAR_DU_ENCODER_*)
- * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
- * @connector.lvds: platform data for LVDS connectors
- * @connector.vga: platform data for VGA connectors
- *
- * Encoder platform data describes an on-board encoder, its associated DU SoC
- * output, and the connector.
- */
-struct rcar_du_encoder_data {
- enum rcar_du_encoder_type type;
- enum rcar_du_output output;
-
- union {
- struct rcar_du_connector_lvds_data lvds;
- struct rcar_du_connector_vga_data vga;
- } connector;
-};
-
-struct rcar_du_platform_data {
- struct rcar_du_encoder_data *encoders;
- unsigned int num_encoders;
-};
-
-#endif /* __RCAR_DU_H__ */
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
index 1730312398ff..5087fff96d86 100644
--- a/include/linux/platform_data/st21nfca.h
+++ b/include/linux/platform_data/st21nfca.h
@@ -24,7 +24,6 @@
#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
struct st21nfca_nfc_platform_data {
- unsigned int gpio_irq;
unsigned int gpio_ena;
unsigned int irq_polarity;
};
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
index 2d11f1f5efab..c3b432f5b63e 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st21nfcb.h
@@ -24,7 +24,6 @@
#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
struct st21nfcb_nfc_platform_data {
- unsigned int gpio_irq;
unsigned int gpio_reset;
unsigned int irq_polarity;
};
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 153d303af7eb..ae4882ca4a64 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -197,8 +197,10 @@ extern void platform_driver_unregister(struct platform_driver *);
/* non-hotpluggable platform devices may use this so that probe() and
* its support may live in __init sections, conserving runtime memory.
*/
-extern int platform_driver_probe(struct platform_driver *driver,
- int (*probe)(struct platform_device *));
+#define platform_driver_probe(drv, probe) \
+ __platform_driver_probe(drv, probe, THIS_MODULE)
+extern int __platform_driver_probe(struct platform_driver *driver,
+ int (*probe)(struct platform_device *), struct module *module);
static inline void *platform_get_drvdata(const struct platform_device *pdev)
{
@@ -238,10 +240,12 @@ static void __exit __platform_driver##_exit(void) \
} \
module_exit(__platform_driver##_exit);
-extern struct platform_device *platform_create_bundle(
+#define platform_create_bundle(driver, probe, res, n_res, data, size) \
+ __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
+extern struct platform_device *__platform_create_bundle(
struct platform_driver *driver, int (*probe)(struct platform_device *),
struct resource *res, unsigned int n_res,
- const void *data, size_t size);
+ const void *data, size_t size, struct module *module);
/* early platform driver interface */
struct early_platform_driver {
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 8b6c970cff6c..97883604a3c5 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
* plist_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop counter
* @head: the head for your list
- * @mem: the name of the list_struct within the struct
+ * @mem: the name of the list_head within the struct
*/
#define plist_for_each_entry(pos, head, mem) \
list_for_each_entry(pos, &(head)->node_list, mem.node_list)
@@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
* plist_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor
* @head: the head for your list
- * @m: the name of the list_struct within the struct
+ * @m: the name of the list_head within the struct
*
* Continue to iterate over list of given type, continuing after
* the current position.
@@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
* @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
* @head: the head for your list
- * @m: the name of the list_struct within the struct
+ * @m: the name of the list_head within the struct
*
* Iterate over list of given type, safe against removal of list entry.
*/
@@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node)
* plist_first_entry - get the struct for the first entry
* @head: the &struct plist_head pointer
* @type: the type of the struct this is embedded in
- * @member: the name of the list_struct within the struct
+ * @member: the name of the list_head within the struct
*/
#ifdef CONFIG_DEBUG_PI_LIST
# define plist_first_entry(head, type, member) \
@@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node)
* plist_last_entry - get the struct for the last entry
* @head: the &struct plist_head pointer
* @type: the type of the struct this is embedded in
- * @member: the name of the list_struct within the struct
+ * @member: the name of the list_head within the struct
*/
#ifdef CONFIG_DEBUG_PI_LIST
# define plist_last_entry(head, type, member) \
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index eda4feede048..30e84d48bfea 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -118,6 +118,11 @@ static inline void pm_runtime_mark_last_busy(struct device *dev)
ACCESS_ONCE(dev->power.last_busy) = jiffies;
}
+static inline bool pm_runtime_is_irq_safe(struct device *dev)
+{
+ return dev->power.irq_safe;
+}
+
#else /* !CONFIG_PM */
static inline bool queue_pm_work(struct work_struct *work) { return false; }
@@ -164,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
+static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 34a1e105bef4..42dfc615dbf8 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -4,21 +4,18 @@
#ifndef _LINUX_PROC_NS_H
#define _LINUX_PROC_NS_H
+#include <linux/ns_common.h>
+
struct pid_namespace;
struct nsproxy;
+struct path;
struct proc_ns_operations {
const char *name;
int type;
- void *(*get)(struct task_struct *task);
- void (*put)(void *ns);
- int (*install)(struct nsproxy *nsproxy, void *ns);
- unsigned int (*inum)(void *ns);
-};
-
-struct proc_ns {
- void *ns;
- const struct proc_ns_operations *ns_ops;
+ struct ns_common *(*get)(struct task_struct *task);
+ void (*put)(struct ns_common *ns);
+ int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
};
extern const struct proc_ns_operations netns_operations;
@@ -43,32 +40,38 @@ enum {
extern int pid_ns_prepare_proc(struct pid_namespace *ns);
extern void pid_ns_release_proc(struct pid_namespace *ns);
-extern struct file *proc_ns_fget(int fd);
-extern struct proc_ns *get_proc_ns(struct inode *);
extern int proc_alloc_inum(unsigned int *pino);
extern void proc_free_inum(unsigned int inum);
-extern bool proc_ns_inode(struct inode *inode);
#else /* CONFIG_PROC_FS */
static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; }
static inline void pid_ns_release_proc(struct pid_namespace *ns) {}
-static inline struct file *proc_ns_fget(int fd)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline struct proc_ns *get_proc_ns(struct inode *inode) { return NULL; }
-
static inline int proc_alloc_inum(unsigned int *inum)
{
*inum = 1;
return 0;
}
static inline void proc_free_inum(unsigned int inum) {}
-static inline bool proc_ns_inode(struct inode *inode) { return false; }
#endif /* CONFIG_PROC_FS */
+static inline int ns_alloc_inum(struct ns_common *ns)
+{
+ atomic_long_set(&ns->stashed, 0);
+ return proc_alloc_inum(&ns->inum);
+}
+
+#define ns_free_inum(ns) proc_free_inum((ns)->inum)
+
+extern struct file *proc_ns_fget(int fd);
+#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
+extern void *ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+
+extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+extern void nsfs_init(void);
+
#endif /* _LINUX_PROC_NS_H */
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9974975d40db..4af3fdc85b01 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
};
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
- u32 sig, struct persistent_ram_ecc_info *ecc_info);
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
+ unsigned int memtype);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
struct ramoops_platform_data {
unsigned long mem_size;
unsigned long mem_address;
+ unsigned int mem_type;
unsigned long record_size;
unsigned long console_size;
unsigned long ftrace_size;
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
index 18d75e795606..e1ab6e86cdb3 100644
--- a/include/linux/pxa168_eth.h
+++ b/include/linux/pxa168_eth.h
@@ -4,6 +4,8 @@
#ifndef __LINUX_PXA168_ETH_H
#define __LINUX_PXA168_ETH_H
+#include <linux/phy.h>
+
struct pxa168_eth_platform_data {
int port_number;
int phy_addr;
@@ -13,6 +15,7 @@ struct pxa168_eth_platform_data {
*/
int speed; /* 0, SPEED_10, SPEED_100 */
int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
+ phy_interface_t intf;
/*
* Override default RX/TX queue sizes if nonzero.
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index f2b405116166..77aed9ea1d26 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -108,6 +108,25 @@
#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
#endif
+/* QUARK_X1000 SSCR0 bit definition */
+#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */
+#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
+#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
+#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
+
+#define RX_THRESH_QUARK_X1000_DFLT 1
+#define TX_THRESH_QUARK_X1000_DFLT 16
+
+#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */
+#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */
+
+#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
+#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
+#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
+#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
+
/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
@@ -175,6 +194,7 @@ enum pxa_ssp_type {
PXA910_SSP,
CE4100_SSP,
LPSS_SSP,
+ QUARK_X1000_SSP,
};
struct ssp_device {
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 0a260d8a18bf..18102529254e 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -17,14 +17,20 @@ struct ratelimit_state {
unsigned long begin;
};
-#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
- \
- struct ratelimit_state name = { \
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
.interval = interval_init, \
.burst = burst_init, \
}
+#define RATELIMIT_STATE_INIT_DISABLED \
+ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
+
+#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
+ \
+ struct ratelimit_state name = \
+ RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 372ad5e0dcb8..529bc946f450 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list_entry_rcu - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
@@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list_first_or_null_rcu - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Note that if the list is empty, it returns NULL.
*
@@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
@@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list_for_each_entry_continue_rcu - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
@@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member))
+/**
+ * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from_rcu(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
+ typeof(*(pos)), member))
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index fb298e9d6d3a..b93fd89b2e5e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -65,7 +65,10 @@ struct rhashtable_params {
size_t new_size);
bool (*shrink_decision)(const struct rhashtable *ht,
size_t new_size);
- int (*mutex_is_held)(void);
+#ifdef CONFIG_PROVE_LOCKING
+ int (*mutex_is_held)(void *parent);
+ void *parent;
+#endif
};
/**
@@ -96,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t);
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t);
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
- struct rhash_head __rcu **pprev, gfp_t flags);
+ struct rhash_head __rcu **pprev);
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
-int rhashtable_expand(struct rhashtable *ht, gfp_t flags);
-int rhashtable_shrink(struct rhashtable *ht, gfp_t flags);
+int rhashtable_expand(struct rhashtable *ht);
+int rhashtable_shrink(struct rhashtable *ht);
void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 6cacbce1a06c..5db76a32fcab 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, long expires, u32 error);
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
+struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
+ unsigned change, gfp_t flags);
+void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
+ gfp_t flags);
+
/* RTNL is used as a global lock for all changes to network configuration */
extern void rtnl_lock(void);
@@ -94,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 flags);
+ u16 vid,
+ u16 flags);
extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr);
+ const unsigned char *addr,
+ u16 vid);
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u16 mode);
+ struct net_device *dev, u16 mode,
+ u32 flags, u32 mask);
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 55f5ee7cc3d3..8db31ef98d2f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1364,6 +1364,10 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+#ifdef CONFIG_MEMCG_KMEM
+ unsigned memcg_kmem_skip_account:1;
+#endif
+
unsigned long atomic_flags; /* Flags needing atomic access. */
pid_t pid;
@@ -1679,8 +1683,7 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
-#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
- unsigned int memcg_kmem_skip_account;
+#ifdef CONFIG_MEMCG
struct memcg_oom_info {
struct mem_cgroup *memcg;
gfp_t gfp_mask;
@@ -2482,6 +2485,10 @@ extern void do_group_exit(int);
extern int do_execve(struct filename *,
const char __user * const __user *,
const char __user * const __user *);
+extern int do_execveat(int, struct filename *,
+ const char __user * const __user *,
+ const char __user * const __user *,
+ int);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 3df10d5f154b..e02acf0a0ec9 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -97,13 +97,10 @@ struct uart_8250_port {
unsigned char msr_saved_flags;
struct uart_8250_dma *dma;
- struct serial_rs485 rs485;
/* 8250 specific callbacks */
int (*dl_read)(struct uart_8250_port *);
void (*dl_write)(struct uart_8250_port *, int);
- int (*rs485_config)(struct uart_8250_port *,
- struct serial_rs485 *rs485);
};
static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h
index a80aa1a5bee2..570e964dc899 100644
--- a/include/linux/serial_bcm63xx.h
+++ b/include/linux/serial_bcm63xx.h
@@ -116,6 +116,4 @@
UART_FIFO_PARERR_MASK | \
UART_FIFO_BRKDET_MASK)
-#define UART_REG_SIZE 24
-
#endif /* _LINUX_SERIAL_BCM63XX_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 21c2e05c1bc3..057038cf2788 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -63,7 +63,7 @@ struct uart_ops {
void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
struct ktermios *old);
- void (*set_ldisc)(struct uart_port *, int new);
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
@@ -131,6 +131,8 @@ struct uart_port {
void (*pm)(struct uart_port *, unsigned int state,
unsigned int old);
void (*handle_break)(struct uart_port *);
+ int (*rs485_config)(struct uart_port *,
+ struct serial_rs485 *rs485);
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
@@ -140,12 +142,13 @@ struct uart_port {
unsigned char iotype; /* io access style */
unsigned char unused1;
-#define UPIO_PORT (0)
-#define UPIO_HUB6 (1)
-#define UPIO_MEM (2)
-#define UPIO_MEM32 (3)
-#define UPIO_AU (4) /* Au1x00 and RT288x type IO */
-#define UPIO_TSI (5) /* Tsi108/109 type IO */
+#define UPIO_PORT (0) /* 8b I/O port access */
+#define UPIO_HUB6 (1) /* Hub6 ISA card */
+#define UPIO_MEM (2) /* 8b MMIO access */
+#define UPIO_MEM32 (3) /* 32b little endian */
+#define UPIO_MEM32BE (4) /* 32b big endian */
+#define UPIO_AU (5) /* Au1x00 and RT288x type IO */
+#define UPIO_TSI (6) /* Tsi108/109 type IO */
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
@@ -160,21 +163,33 @@ struct uart_port {
/* flags must be updated while holding port mutex */
upf_t flags;
-#define UPF_FOURPORT ((__force upf_t) (1 << 1))
-#define UPF_SAK ((__force upf_t) (1 << 2))
-#define UPF_SPD_MASK ((__force upf_t) (0x1030))
-#define UPF_SPD_HI ((__force upf_t) (0x0010))
-#define UPF_SPD_VHI ((__force upf_t) (0x0020))
-#define UPF_SPD_CUST ((__force upf_t) (0x0030))
-#define UPF_SPD_SHI ((__force upf_t) (0x1000))
-#define UPF_SPD_WARP ((__force upf_t) (0x1010))
-#define UPF_SKIP_TEST ((__force upf_t) (1 << 6))
-#define UPF_AUTO_IRQ ((__force upf_t) (1 << 7))
-#define UPF_HARDPPS_CD ((__force upf_t) (1 << 11))
-#define UPF_LOW_LATENCY ((__force upf_t) (1 << 13))
-#define UPF_BUGGY_UART ((__force upf_t) (1 << 14))
+ /*
+ * These flags must be equivalent to the flags defined in
+ * include/uapi/linux/tty_flags.h which are the userspace definitions
+ * assigned from the serial_struct flags in uart_set_info()
+ * [for bit definitions in the UPF_CHANGE_MASK]
+ *
+ * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
+ * except bit 15 (UPF_NO_TXEN_TEST) which is masked off.
+ * The remaining bits are serial-core specific and not modifiable by
+ * userspace.
+ */
+#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ )
+#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ )
+#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ )
+#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ )
+#define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ )
+#define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ )
+#define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ )
+#define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ )
+#define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ )
+#define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ )
+#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ )
+#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ )
+#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
-#define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16))
+#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
+
/* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */
#define UPF_HARD_FLOW ((__force upf_t) (1 << 21))
/* Port has hardware-assisted s/w flow control */
@@ -190,9 +205,14 @@ struct uart_port {
#define UPF_DEAD ((__force upf_t) (1 << 30))
#define UPF_IOREMAP ((__force upf_t) (1 << 31))
-#define UPF_CHANGE_MASK ((__force upf_t) (0x17fff))
+#define __UPF_CHANGE_MASK 0x17fff
+#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY))
+#if __UPF_CHANGE_MASK > ASYNC_FLAGS
+#error Change mask not equivalent to userspace-visible bit defines
+#endif
+
/* status must be updated while holding port lock */
upstat_t status;
@@ -214,6 +234,7 @@ struct uart_port {
unsigned char unused[2];
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
+ struct serial_rs485 rs485;
void *private_data; /* generic platform data pointer */
};
@@ -367,7 +388,7 @@ static inline int uart_tx_stopped(struct uart_port *port)
static inline bool uart_cts_enabled(struct uart_port *uport)
{
- return uport->status & UPSTAT_CTS_ENABLE;
+ return !!(uport->status & UPSTAT_CTS_ENABLE);
}
/*
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 68c097077ef0..f4aee75f00b1 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -18,8 +18,6 @@ struct shrink_control {
*/
unsigned long nr_to_scan;
- /* shrink from these nodes */
- nodemask_t nodes_to_scan;
/* current node being shrunk (for NUMA aware shrinkers) */
int nid;
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6c8b6f604e76..85ab7d72b54c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -20,6 +20,8 @@
#include <linux/time.h>
#include <linux/bug.h>
#include <linux/cache.h>
+#include <linux/rbtree.h>
+#include <linux/socket.h>
#include <linux/atomic.h>
#include <asm/types.h>
@@ -148,6 +150,8 @@
struct net_device;
struct scatterlist;
struct pipe_inode_info;
+struct iov_iter;
+struct napi_struct;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack {
@@ -341,7 +345,6 @@ enum {
SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
- SKB_FCLONE_FREE, /* this companion fclone skb is available */
};
enum {
@@ -370,8 +373,7 @@ enum {
SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
- SKB_GSO_MPLS = 1 << 12,
-
+ SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
};
#if BITS_PER_LONG > 32
@@ -440,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @next: Next buffer in list
* @prev: Previous buffer in list
* @tstamp: Time we arrived/left
+ * @rbnode: RB tree node, alternative to next/prev for netem/tcp
* @sk: Socket we are owned by
* @dev: Device we arrived on/are leaving by
* @cb: Control buffer. Free for use by every layer. Put private vars here
@@ -504,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
*/
struct sk_buff {
- /* These two members must be first. */
- struct sk_buff *next;
- struct sk_buff *prev;
-
union {
- ktime_t tstamp;
- struct skb_mstamp skb_mstamp;
+ struct {
+ /* These two members must be first. */
+ struct sk_buff *next;
+ struct sk_buff *prev;
+
+ union {
+ ktime_t tstamp;
+ struct skb_mstamp skb_mstamp;
+ };
+ };
+ struct rb_node rbnode; /* used in netem & tcp stack */
};
-
struct sock *sk;
struct net_device *dev;
@@ -597,7 +604,8 @@ struct sk_buff {
#endif
__u8 ipvs_property:1;
__u8 inner_protocol_type:1;
- /* 4 or 6 bit hole */
+ __u8 remcsum_offload:1;
+ /* 3 or 5 bit hole */
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -666,6 +674,7 @@ struct sk_buff {
#define SKB_ALLOC_FCLONE 0x01
#define SKB_ALLOC_RX 0x02
+#define SKB_ALLOC_NAPI 0x04
/* Returns true if the skb was allocated from PFMEMALLOC reserves */
static inline bool skb_pfmemalloc(const struct sk_buff *skb)
@@ -710,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
skb->_skb_refdst = (unsigned long)dst;
}
-void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
- bool force);
-
/**
* skb_dst_set_noref - sets skb dst, hopefully, without taking reference
* @skb: buffer
@@ -725,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
*/
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
- __skb_dst_set_noref(skb, dst, false);
-}
-
-/**
- * skb_dst_set_noref_force - sets skb dst, without taking reference
- * @skb: buffer
- * @dst: dst entry
- *
- * Sets skb dst, assuming a reference was not taken on dst.
- * No reference is taken and no dst_release will be called. While for
- * cached dsts deferred reclaim is a basic feature, for entries that are
- * not cached it is caller's job to guarantee that last dst_release for
- * provided dst happens when nobody uses it, eg. after a RCU grace period.
- */
-static inline void skb_dst_set_noref_force(struct sk_buff *skb,
- struct dst_entry *dst)
-{
- __skb_dst_set_noref(skb, dst, true);
+ WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}
/**
@@ -810,7 +800,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
fclones = container_of(skb, struct sk_buff_fclones, skb1);
return skb->fclone == SKB_FCLONE_ORIG &&
- fclones->skb2.fclone == SKB_FCLONE_CLONE &&
+ atomic_read(&fclones->fclone_ref) > 1 &&
fclones->skb2.sk == sk;
}
@@ -2176,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
+void *napi_alloc_frag(unsigned int fragsz);
+struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
+ unsigned int length, gfp_t gfp_mask);
+static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
+ unsigned int length)
+{
+ return __napi_alloc_skb(napi, length, GFP_ATOMIC);
+}
+
/**
- * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data
- * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
- * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
- * @order: size of the allocation
+ * __dev_alloc_pages - allocate page for network Rx
+ * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
+ * @order: size of the allocation
*
- * Allocate a new page.
+ * Allocate a new page.
*
- * %NULL is returned if there is no free memory.
+ * %NULL is returned if there is no free memory.
*/
-static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
- struct sk_buff *skb,
- unsigned int order)
-{
- struct page *page;
-
- gfp_mask |= __GFP_COLD;
-
- if (!(gfp_mask & __GFP_NOMEMALLOC))
- gfp_mask |= __GFP_MEMALLOC;
+static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
+ unsigned int order)
+{
+ /* This piece of code contains several assumptions.
+ * 1. This is for device Rx, therefor a cold page is preferred.
+ * 2. The expectation is the user wants a compound page.
+ * 3. If requesting a order 0 page it will not be compound
+ * due to the check to see if order has a value in prep_new_page
+ * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
+ * code in gfp_to_alloc_flags that should be enforcing this.
+ */
+ gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
- page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
- if (skb && page && page->pfmemalloc)
- skb->pfmemalloc = true;
+ return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+}
- return page;
+static inline struct page *dev_alloc_pages(unsigned int order)
+{
+ return __dev_alloc_pages(GFP_ATOMIC, order);
}
/**
- * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
- * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
- * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
+ * __dev_alloc_page - allocate a page for network Rx
+ * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
*
- * Allocate a new page.
+ * Allocate a new page.
*
- * %NULL is returned if there is no free memory.
+ * %NULL is returned if there is no free memory.
*/
-static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
- struct sk_buff *skb)
+static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
+{
+ return __dev_alloc_pages(gfp_mask, 0);
+}
+
+static inline struct page *dev_alloc_page(void)
{
- return __skb_alloc_pages(gfp_mask, skb, 0);
+ return __dev_alloc_page(GFP_ATOMIC);
}
/**
@@ -2448,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
-
static inline int skb_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
@@ -2457,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
return skb_pad(skb, len - size);
}
+/**
+ * skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+
+ if (unlikely(size < len)) {
+ len -= size;
+ if (skb_pad(skb, len))
+ return -ENOMEM;
+ __skb_put(skb, len);
+ }
+ return 0;
+}
+
static inline int skb_add_data(struct sk_buff *skb,
char __user *from, int copy)
{
@@ -2629,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
- struct iovec *to, int size);
-int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
- struct iovec *iov);
-int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
- const struct iovec *from, int from_offset,
- int len);
-int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
- int offset, size_t count);
-int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
- const struct iovec *to, int to_offset,
- int size);
+int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
+ struct iov_iter *to, int size);
+static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
+ struct msghdr *msg, int size)
+{
+ return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
+}
+int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
+ struct msghdr *msg);
+int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
+ struct iov_iter *from, int len);
+int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
@@ -2661,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
+int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int skb_vlan_pop(struct sk_buff *skb);
+int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+
+static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
+{
+ /* XXX: stripping const */
+ return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
+}
+
+static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
+{
+ return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
+}
struct skb_checksum_ops {
__wsum (*update)(const void *mem, int len, __wsum wsum);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 8a2457d42fc8..9a139b637069 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -493,7 +493,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* @memcg: pointer to the memcg this cache belongs to
* @list: list_head for the list of all caches in this memcg
* @root_cache: pointer to the global, root cache, this cache was derived from
- * @nr_pages: number of pages that belongs to this cache.
*/
struct memcg_cache_params {
bool is_root_cache;
@@ -506,7 +505,6 @@ struct memcg_cache_params {
struct mem_cgroup *memcg;
struct list_head list;
struct kmem_cache *root_cache;
- atomic_t nr_pages;
};
};
};
diff --git a/include/linux/socket.h b/include/linux/socket.h
index bb9b83640070..6e49a14365dc 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -47,16 +47,25 @@ struct linger {
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
- struct iovec *msg_iov; /* scatter/gather array */
- __kernel_size_t msg_iovlen; /* # elements in msg_iov */
+ struct iov_iter msg_iter; /* data */
void *msg_control; /* ancillary data */
__kernel_size_t msg_controllen; /* ancillary data buffer length */
unsigned int msg_flags; /* flags on received message */
};
+
+struct user_msghdr {
+ void __user *msg_name; /* ptr to socket address structure */
+ int msg_namelen; /* size of socket address structure */
+ struct iovec __user *msg_iov; /* scatter/gather array */
+ __kernel_size_t msg_iovlen; /* # elements in msg_iov */
+ void __user *msg_control; /* ancillary data */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
+ unsigned int msg_flags; /* flags on received message */
+};
/* For recvmmsg/sendmmsg */
struct mmsghdr {
- struct msghdr msg_hdr;
+ struct user_msghdr msg_hdr;
unsigned int msg_len;
};
@@ -94,6 +103,10 @@ struct cmsghdr {
(cmsg)->cmsg_len <= (unsigned long) \
((mhdr)->msg_controllen - \
((char *)(cmsg) - (char *)(mhdr)->msg_control)))
+#define for_each_cmsghdr(cmsg, msg) \
+ for (cmsg = CMSG_FIRSTHDR(msg); \
+ cmsg; \
+ cmsg = CMSG_NXTHDR(msg, cmsg))
/*
* Get the next cmsg header
@@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
extern unsigned long iov_pages(const struct iovec *iov, int offset,
unsigned long nr_segs);
-extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
struct timespec;
/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
-extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
-extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
unsigned int flags, struct timespec *timeout);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 46d188a9947c..a6ef2a8e6de4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi)
extern const struct spi_device_id *
spi_get_device_id(const struct spi_device *sdev);
+static inline bool
+spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer)
+{
+ return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers);
+}
+
#endif /* __LINUX_SPI_H */
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 91f5eab9e428..f84212cd3b7d 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -134,9 +134,6 @@ void spmi_controller_remove(struct spmi_controller *ctrl);
* this structure.
* @probe: binds this driver to a SPMI device.
* @remove: unbinds this driver from the SPMI device.
- * @shutdown: standard shutdown callback used during powerdown/halt.
- * @suspend: standard suspend callback used during system suspend.
- * @resume: standard resume callback used during system resume.
*
* If PM runtime support is desired for a slave, a device driver can call
* pm_runtime_put() from their probe() routine (and a balancing
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 115b570e3bff..669045ab73f3 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_STACKTRACE_H
#define __LINUX_STACKTRACE_H
+#include <linux/types.h>
+
struct task_struct;
struct pt_regs;
@@ -20,6 +22,8 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
+extern int snprint_stack_trace(char *buf, size_t size,
+ struct stack_trace *trace, int spaces);
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
@@ -32,6 +36,7 @@ extern void save_stack_trace_user(struct stack_trace *trace);
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
+# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
#endif
#endif
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 21678464883a..6f22cfeef5e3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -26,10 +26,10 @@ typedef int (*svc_thread_fn)(void *);
/* statistics for svc_pool structures */
struct svc_pool_stats {
- unsigned long packets;
+ atomic_long_t packets;
unsigned long sockets_queued;
- unsigned long threads_woken;
- unsigned long threads_timedout;
+ atomic_long_t threads_woken;
+ atomic_long_t threads_timedout;
};
/*
@@ -45,12 +45,13 @@ struct svc_pool_stats {
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_threads; /* idle server threads */
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
- int sp_task_pending;/* has pending task */
+#define SP_TASK_PENDING (0) /* still work to do even if no
+ * xprt is queued. */
+ unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
/*
@@ -219,8 +220,8 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
* processed.
*/
struct svc_rqst {
- struct list_head rq_list; /* idle list */
struct list_head rq_all; /* all threads list */
+ struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
struct sockaddr_storage rq_addr; /* peer address */
@@ -236,7 +237,6 @@ struct svc_rqst {
struct svc_cred rq_cred; /* auth info */
void * rq_xprt_ctxt; /* transport specific context ptr */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
- bool rq_usedeferral; /* use deferral */
size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
@@ -253,9 +253,17 @@ struct svc_rqst {
u32 rq_vers; /* program version */
u32 rq_proc; /* procedure number */
u32 rq_prot; /* IP protocol */
- unsigned short
- rq_secure : 1; /* secure port */
- unsigned short rq_local : 1; /* local request */
+ int rq_cachetype; /* catering to nfsd */
+#define RQ_SECURE (0) /* secure port */
+#define RQ_LOCAL (1) /* local request */
+#define RQ_USEDEFERRAL (2) /* use deferral */
+#define RQ_DROPME (3) /* drop current reply */
+#define RQ_SPLICE_OK (4) /* turned off in gss privacy
+ * to prevent encrypting page
+ * cache pages */
+#define RQ_VICTIM (5) /* about to be shut down */
+#define RQ_BUSY (6) /* request is busy */
+ unsigned long rq_flags; /* flags field */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
@@ -271,16 +279,12 @@ struct svc_rqst {
struct cache_req rq_chandle; /* handle passed to caches for
* request delaying
*/
- bool rq_dropme;
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
- int rq_cachetype;
struct svc_cacherep * rq_cacherep; /* cache info */
- bool rq_splice_ok; /* turned off in gss privacy
- * to prevent encrypting page
- * cache pages */
struct task_struct *rq_task; /* service thread */
+ spinlock_t rq_lock; /* per-request lock */
};
#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ce6e4182a5b2..79f6f8f3dc0a 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -63,10 +63,9 @@ struct svc_xprt {
#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
#define XPT_DEFERRED 8 /* deferred request pending */
#define XPT_OLD 9 /* used for xprt aging mark+sweep */
-#define XPT_DETACHED 10 /* detached from tempsocks list */
-#define XPT_LISTENER 11 /* listening endpoint */
-#define XPT_CACHE_AUTH 12 /* cache auth info */
-#define XPT_LOCAL 13 /* connection from loopback interface */
+#define XPT_LISTENER 10 /* listening endpoint */
+#define XPT_CACHE_AUTH 11 /* cache auth info */
+#define XPT_LOCAL 12 /* connection from loopback interface */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 37a585beef5c..34e8b60ab973 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -102,14 +102,6 @@ union swap_header {
} info;
};
- /* A swap entry has to fit into a "unsigned long", as
- * the entry is hidden in the "index" field of the
- * swapper address space.
- */
-typedef struct {
- unsigned long val;
-} swp_entry_t;
-
/*
* current->reclaim_state points to one of these when a task is running
* memory reclaim
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bda9b81357cc..85893d744901 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -25,7 +25,7 @@ struct linux_dirent64;
struct list_head;
struct mmap_arg_struct;
struct msgbuf;
-struct msghdr;
+struct user_msghdr;
struct mmsghdr;
struct msqid_ds;
struct new_utsname;
@@ -601,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
asmlinkage long sys_send(int, void __user *, size_t, unsigned);
asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
struct sockaddr __user *, int);
-asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags);
asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned,
struct sockaddr __user *, int __user *);
-asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
+asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
struct timespec __user *timeout);
@@ -877,4 +877,9 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
asmlinkage long sys_getrandom(char __user *buf, size_t count,
unsigned int flags);
asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+
+asmlinkage long sys_execveat(int dfd, const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp, int flags);
+
#endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index f97d0dbb59fa..ddad16148bd6 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -70,6 +70,8 @@ struct attribute_group {
* for examples..
*/
+#define SYSFS_PREALLOC 010000
+
#define __ATTR(_name, _mode, _show, _store) { \
.attr = {.name = __stringify(_name), \
.mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
@@ -77,6 +79,13 @@ struct attribute_group {
.store = _store, \
}
+#define __ATTR_PREALLOC(_name, _mode, _show, _store) { \
+ .attr = {.name = __stringify(_name), \
+ .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\
+ .show = _show, \
+ .store = _store, \
+}
+
#define __ATTR_RO(_name) { \
.attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
.show = _name##_show, \
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index c2dee7deefa8..67309ece0772 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -130,7 +130,7 @@ struct tcp_sock {
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
u16 tcp_header_len; /* Bytes of tcp header to send */
- u16 xmit_size_goal_segs; /* Goal for segmenting output packets */
+ u16 gso_segs; /* Max number of segs per GSO packet */
/*
* Header prediction flags
@@ -162,7 +162,7 @@ struct tcp_sock {
struct {
struct sk_buff_head prequeue;
struct task_struct *task;
- struct iovec *iov;
+ struct msghdr *msg;
int memory;
int len;
} ucopy;
@@ -204,10 +204,10 @@ struct tcp_sock {
u16 urg_data; /* Saved octet of OOB data and control flags */
u8 ecn_flags; /* ECN status bits. */
- u8 reordering; /* Packet reordering metric. */
+ u8 keepalive_probes; /* num of allowed keep alive probes */
+ u32 reordering; /* Packet reordering metric. */
u32 snd_up; /* Urgent pointer */
- u8 keepalive_probes; /* num of allowed keep alive probes */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index ef90838b36a0..c611a02fbc51 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -29,10 +29,10 @@
#include <linux/idr.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <uapi/linux/thermal.h>
#define THERMAL_TRIPS_NONE -1
#define THERMAL_MAX_TRIPS 12
-#define THERMAL_NAME_LENGTH 20
/* invalid cooling state */
#define THERMAL_CSTATE_INVALID -1UL
@@ -49,11 +49,6 @@
#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
-/* Adding event notification support elements */
-#define THERMAL_GENL_FAMILY_NAME "thermal_event"
-#define THERMAL_GENL_VERSION 0x01
-#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
-
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
#define DEFAULT_THERMAL_GOVERNOR "step_wise"
@@ -86,30 +81,6 @@ enum thermal_trend {
THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
};
-/* Events supported by Thermal Netlink */
-enum events {
- THERMAL_AUX0,
- THERMAL_AUX1,
- THERMAL_CRITICAL,
- THERMAL_DEV_FAULT,
-};
-
-/* attributes of thermal_genl_family */
-enum {
- THERMAL_GENL_ATTR_UNSPEC,
- THERMAL_GENL_ATTR_EVENT,
- __THERMAL_GENL_ATTR_MAX,
-};
-#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
-
-/* commands supported by the thermal_genl_family */
-enum {
- THERMAL_GENL_CMD_UNSPEC,
- THERMAL_GENL_CMD_EVENT,
- __THERMAL_GENL_CMD_MAX,
-};
-#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
-
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
@@ -289,19 +260,49 @@ struct thermal_genl_event {
enum events event;
};
+/**
+ * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
+ *
+ * Mandatory:
+ * @get_temp: a pointer to a function that reads the sensor temperature.
+ *
+ * Optional:
+ * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ * @set_emul_temp: a pointer to a function that sets sensor emulated
+ * temperature.
+ */
+struct thermal_zone_of_device_ops {
+ int (*get_temp)(void *, long *);
+ int (*get_trend)(void *, long *);
+ int (*set_emul_temp)(void *, unsigned long);
+};
+
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @np: pointer to struct device_node that this trip point was created from
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ */
+
+struct thermal_trip {
+ struct device_node *np;
+ unsigned long int temperature;
+ unsigned long int hysteresis;
+ enum thermal_trip_type type;
+};
+
/* Function declarations */
#ifdef CONFIG_THERMAL_OF
struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id,
- void *data, int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *));
+thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops);
void thermal_zone_of_sensor_unregister(struct device *dev,
struct thermal_zone_device *tz);
#else
static inline struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id,
- void *data, int (*get_temp)(void *, long *),
- int (*get_trend)(void *, long *))
+thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
{
return NULL;
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5171ef8f7b85..7d66ae508e5c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -284,7 +284,7 @@ struct tty_struct {
#define N_TTY_BUF_SIZE 4096
- unsigned char closing:1;
+ int closing;
unsigned char *write_buf;
int write_cnt;
/* If the tty has a pending do_SAK, queue it here - akpm */
@@ -316,12 +316,10 @@ struct tty_file_private {
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DEBUG 4 /* Debugging */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
-#define TTY_CLOSING 7 /* ->close() in progress */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
-#define TTY_HUPPING 21 /* ->hangup() in progress */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
@@ -437,14 +435,13 @@ extern int is_ignored(int sig);
extern int tty_signal(int sig, struct tty_struct *tty);
extern void tty_hangup(struct tty_struct *tty);
extern void tty_vhangup(struct tty_struct *tty);
-extern void tty_unhangup(struct file *filp);
extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
extern void no_tty(void);
extern void tty_flush_to_ldisc(struct tty_struct *tty);
extern void tty_buffer_free_all(struct tty_port *port);
-extern void tty_buffer_flush(struct tty_struct *tty);
+extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
extern void tty_buffer_init(struct tty_port *port);
extern speed_t tty_termios_baud_rate(struct ktermios *termios);
extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
@@ -498,9 +495,6 @@ extern int tty_init_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);
-extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
-extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
-
extern struct mutex tty_mutex;
extern spinlock_t tty_files_lock;
@@ -562,7 +556,7 @@ extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
extern int tty_unregister_ldisc(int disc);
extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
-extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty);
+extern void tty_ldisc_release(struct tty_struct *tty);
extern void tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern void tty_ldisc_begin(void);
@@ -623,14 +617,6 @@ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
-/* serial.c */
-
-extern void serial_console_init(void);
-
-/* pcxx.c */
-
-extern int pcxe_open(struct tty_struct *tty, struct file *filp);
-
/* vt.c */
extern int vt_ioctl(struct tty_struct *tty,
@@ -643,11 +629,9 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(struct tty_struct *tty);
extern void __lockfunc tty_unlock(struct tty_struct *tty);
-extern void __lockfunc tty_lock_pair(struct tty_struct *tty,
- struct tty_struct *tty2);
-extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
- struct tty_struct *tty2);
-
+extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
+extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
+extern void tty_set_lock_subclass(struct tty_struct *tty);
/*
* this shall be called only from where BTM is held (like close)
*
diff --git a/include/linux/uio.h b/include/linux/uio.h
index bd8569a14c4a..a41e252396c0 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -131,7 +131,6 @@ size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
-int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index baa81718d985..32c0e83d6239 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -35,7 +35,7 @@ struct uio_map;
struct uio_mem {
const char *name;
phys_addr_t addr;
- unsigned long size;
+ resource_size_t size;
int memtype;
void __iomem *internal_addr;
struct uio_map *map;
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index e14c09a45c5a..535997a6681b 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -13,11 +13,12 @@ struct ci_hdrc_platform_data {
/* offset of the capability registers */
uintptr_t capoffset;
unsigned power_budget;
- struct usb_phy *phy;
+ struct phy *phy;
+ /* old usb_phy interface */
+ struct usb_phy *usb_phy;
enum usb_phy_interface phy_mode;
unsigned long flags;
#define CI_HDRC_REGS_SHARED BIT(0)
-#define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1)
#define CI_HDRC_DISABLE_STREAMING BIT(3)
/*
* Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index c330f5ef42cf..3d87defcc527 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -427,6 +427,8 @@ static inline struct usb_composite_driver *to_cdriver(
* @b_vendor_code: bMS_VendorCode part of the OS string
* @use_os_string: false by default, interested gadgets set it
* @os_desc_config: the configuration to be used with OS descriptors
+ * @setup_pending: true when setup request is queued but not completed
+ * @os_desc_pending: true when os_desc request is queued but not completed
*
* One of these devices is allocated and initialized before the
* associated device driver's bind() is called.
@@ -488,6 +490,9 @@ struct usb_composite_dev {
/* protects deactivations and delayed_status counts*/
spinlock_t lock;
+
+ unsigned setup_pending:1;
+ unsigned os_desc_pending:1;
};
extern int usb_string_id(struct usb_composite_dev *c);
@@ -501,6 +506,8 @@ extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
extern void composite_disconnect(struct usb_gadget *gadget);
extern int composite_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl);
+extern void composite_suspend(struct usb_gadget *gadget);
+extern void composite_resume(struct usb_gadget *gadget);
/*
* Some systems will need runtime overrides for the product identifiers
diff --git a/include/linux/usb/ehci-dbgp.h b/include/linux/usb/ehci-dbgp.h
new file mode 100644
index 000000000000..7344d9e591cc
--- /dev/null
+++ b/include/linux/usb/ehci-dbgp.h
@@ -0,0 +1,83 @@
+/*
+ * Standalone EHCI usb debug driver
+ *
+ * Originally written by:
+ * Eric W. Biederman" <ebiederm@xmission.com> and
+ * Yinghai Lu <yhlu.kernel@gmail.com>
+ *
+ * Changes for early/late printk and HW errata:
+ * Jason Wessel <jason.wessel@windriver.com>
+ * Copyright (C) 2009 Wind River Systems, Inc.
+ *
+ */
+
+#ifndef __LINUX_USB_EHCI_DBGP_H
+#define __LINUX_USB_EHCI_DBGP_H
+
+#include <linux/console.h>
+#include <linux/types.h>
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct ehci_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+extern int early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+#ifdef CONFIG_XEN_DOM0
+extern int xen_dbgp_reset_prep(struct usb_hcd *);
+extern int xen_dbgp_external_startup(struct usb_hcd *);
+#else
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return -1;
+}
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from ehci host driver to ehci debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return xen_dbgp_reset_prep(hcd);
+}
+
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+#endif /* __LINUX_USB_EHCI_DBGP_H */
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index daec99af5d54..966889a20ea3 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -19,6 +19,8 @@
#ifndef __LINUX_USB_EHCI_DEF_H
#define __LINUX_USB_EHCI_DEF_H
+#include <linux/usb/ehci-dbgp.h>
+
/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
/* Section 2.2 Host Controller Capability Registers */
@@ -190,67 +192,4 @@ struct ehci_regs {
#define USBMODE_EX_HC (3<<0) /* host controller mode */
};
-/* Appendix C, Debug port ... intended for use with special "debug devices"
- * that can help if there's no serial console. (nonstandard enumeration.)
- */
-struct ehci_dbg_port {
- u32 control;
-#define DBGP_OWNER (1<<30)
-#define DBGP_ENABLED (1<<28)
-#define DBGP_DONE (1<<16)
-#define DBGP_INUSE (1<<10)
-#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
-# define DBGP_ERR_BAD 1
-# define DBGP_ERR_SIGNAL 2
-#define DBGP_ERROR (1<<6)
-#define DBGP_GO (1<<5)
-#define DBGP_OUT (1<<4)
-#define DBGP_LEN(x) (((x)>>0)&0x0f)
- u32 pids;
-#define DBGP_PID_GET(x) (((x)>>16)&0xff)
-#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
- u32 data03;
- u32 data47;
- u32 address;
-#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
-};
-
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-#include <linux/init.h>
-extern int __init early_dbgp_init(char *s);
-extern struct console early_dbgp_console;
-#endif /* CONFIG_EARLY_PRINTK_DBGP */
-
-struct usb_hcd;
-
-#ifdef CONFIG_XEN_DOM0
-extern int xen_dbgp_reset_prep(struct usb_hcd *);
-extern int xen_dbgp_external_startup(struct usb_hcd *);
-#else
-static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
-{
- return 1; /* Shouldn't this be 0? */
-}
-
-static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
-{
- return -1;
-}
-#endif
-
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-/* Call backs from ehci host driver to ehci debug driver */
-extern int dbgp_external_startup(struct usb_hcd *);
-extern int dbgp_reset_prep(struct usb_hcd *hcd);
-#else
-static inline int dbgp_reset_prep(struct usb_hcd *hcd)
-{
- return xen_dbgp_reset_prep(hcd);
-}
-static inline int dbgp_external_startup(struct usb_hcd *hcd)
-{
- return xen_dbgp_external_startup(hcd);
-}
-#endif
-
#endif /* __LINUX_USB_EHCI_DEF_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 522cafe26790..70ddb3943b62 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -490,8 +490,7 @@ struct usb_gadget_ops {
void (*get_config_params)(struct usb_dcd_config_params *);
int (*udc_start)(struct usb_gadget *,
struct usb_gadget_driver *);
- int (*udc_stop)(struct usb_gadget *,
- struct usb_gadget_driver *);
+ int (*udc_stop)(struct usb_gadget *);
};
/**
@@ -925,7 +924,7 @@ extern int usb_add_gadget_udc_release(struct device *parent,
struct usb_gadget *gadget, void (*release)(struct device *dev));
extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
extern void usb_del_gadget_udc(struct usb_gadget *gadget);
-extern int udc_attach_driver(const char *name,
+extern int usb_udc_attach_driver(const char *name,
struct usb_gadget_driver *driver);
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 668898e29d0e..086bf13307e6 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -379,6 +379,9 @@ struct hc_driver {
int (*disable_usb3_lpm_timeout)(struct usb_hcd *,
struct usb_device *, enum usb3_link_state state);
int (*find_raw_port_number)(struct usb_hcd *, int);
+ /* Call for power on/off the port if necessary */
+ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
+
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 154332b7c8c0..52661c5da690 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -9,15 +9,20 @@
#ifndef __LINUX_USB_OTG_H
#define __LINUX_USB_OTG_H
+#include <linux/phy/phy.h>
#include <linux/usb/phy.h>
struct usb_otg {
u8 default_a;
- struct usb_phy *phy;
+ struct phy *phy;
+ /* old usb_phy interface */
+ struct usb_phy *usb_phy;
struct usb_bus *host;
struct usb_gadget *gadget;
+ enum usb_otg_state state;
+
/* bind/unbind the host controller */
int (*set_host)(struct usb_otg *otg, struct usb_bus *host);
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 353053a33f21..f499c23e6342 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -77,7 +77,6 @@ struct usb_phy {
unsigned int flags;
enum usb_phy_type type;
- enum usb_otg_state state;
enum usb_phy_events last_event;
struct usb_otg *otg;
@@ -210,6 +209,7 @@ extern void usb_put_phy(struct usb_phy *);
extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
extern int usb_bind_phy(const char *dev_name, u8 index,
const char *phy_dev_name);
+extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
#else
static inline struct usb_phy *usb_get_phy(enum usb_phy_type type)
{
@@ -251,6 +251,10 @@ static inline int usb_bind_phy(const char *dev_name, u8 index,
{
return -EOPNOTSUPP;
}
+
+static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
+{
+}
#endif
static inline int
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index d5952bb66752..9fd9e481ea98 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -145,6 +145,10 @@ struct renesas_usbhs_driver_param {
int d0_rx_id;
int d1_tx_id;
int d1_rx_id;
+ int d2_tx_id;
+ int d2_rx_id;
+ int d3_tx_id;
+ int d3_rx_id;
/*
* option:
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index e95372654f09..8297e5b341d8 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -3,6 +3,7 @@
#include <linux/kref.h>
#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
#include <linux/sched.h>
#include <linux/err.h>
@@ -17,6 +18,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
} extent[UID_GID_MAP_MAX_EXTENTS];
};
+#define USERNS_SETGROUPS_ALLOWED 1UL
+
+#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -26,7 +31,8 @@ struct user_namespace {
int level;
kuid_t owner;
kgid_t group;
- unsigned int proc_inum;
+ struct ns_common ns;
+ unsigned long flags;
/* Register of per-UID persistent keyrings for this namespace */
#ifdef CONFIG_PERSISTENT_KEYRINGS
@@ -63,6 +69,9 @@ extern const struct seq_operations proc_projid_seq_operations;
extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
+extern int proc_setgroups_show(struct seq_file *m, void *v);
+extern bool userns_may_setgroups(const struct user_namespace *ns);
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -87,6 +96,10 @@ static inline void put_user_ns(struct user_namespace *ns)
{
}
+static inline bool userns_may_setgroups(const struct user_namespace *ns)
+{
+ return true;
+}
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 239e27733d6c..5093f58ae192 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/kref.h>
#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
#include <linux/err.h>
#include <uapi/linux/utsname.h>
@@ -23,7 +24,7 @@ struct uts_namespace {
struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
- unsigned int proc_inum;
+ struct ns_common ns;
};
extern struct uts_namespace init_uts_ns;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 65261a7244fc..28f0e65b9a11 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -75,10 +75,13 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
bool virtqueue_is_broken(struct virtqueue *vq);
+void *virtqueue_get_avail(struct virtqueue *vq);
+void *virtqueue_get_used(struct virtqueue *vq);
+
/**
* virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
- * @failed: saved value for CONFIG_S_FAILED bit (for restore)
+ * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
* @config_enabled: configuration change reporting enabled
* @config_change_pending: configuration change reported while disabled
* @config_lock: protects configuration change reporting
@@ -101,11 +104,12 @@ struct virtio_device {
const struct virtio_config_ops *config;
const struct vringh_config_ops *vringh_config;
struct list_head vqs;
- /* Note that this is a Linux set_bit-style bitmap. */
- unsigned long features[1];
+ u64 features;
void *priv;
};
+bool virtio_device_is_legacy_only(struct virtio_device_id id);
+
static inline struct virtio_device *dev_to_virtio(struct device *_dev)
{
return container_of(_dev, struct virtio_device, dev);
@@ -128,6 +132,8 @@ int virtio_device_restore(struct virtio_device *dev);
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this driver.
* @feature_table_size: number of entries in the feature table array.
+ * @feature_table_legacy: same as feature_table but when working in legacy mode.
+ * @feature_table_size_legacy: number of entries in feature table legacy array.
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @remove: the function to call when a device is removed.
* @config_changed: optional function to call when the device configuration
@@ -138,6 +144,8 @@ struct virtio_driver {
const struct virtio_device_id *id_table;
const unsigned int *feature_table;
unsigned int feature_table_size;
+ const unsigned int *feature_table_legacy;
+ unsigned int feature_table_size_legacy;
int (*probe)(struct virtio_device *dev);
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h
new file mode 100644
index 000000000000..51865d05b267
--- /dev/null
+++ b/include/linux/virtio_byteorder.h
@@ -0,0 +1,59 @@
+#ifndef _LINUX_VIRTIO_BYTEORDER_H
+#define _LINUX_VIRTIO_BYTEORDER_H
+#include <linux/types.h>
+#include <uapi/linux/virtio_types.h>
+
+/*
+ * Low-level memory accessors for handling virtio in modern little endian and in
+ * compatibility native endian format.
+ */
+
+static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
+{
+ if (little_endian)
+ return le16_to_cpu((__force __le16)val);
+ else
+ return (__force u16)val;
+}
+
+static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
+{
+ if (little_endian)
+ return (__force __virtio16)cpu_to_le16(val);
+ else
+ return (__force __virtio16)val;
+}
+
+static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
+{
+ if (little_endian)
+ return le32_to_cpu((__force __le32)val);
+ else
+ return (__force u32)val;
+}
+
+static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
+{
+ if (little_endian)
+ return (__force __virtio32)cpu_to_le32(val);
+ else
+ return (__force __virtio32)val;
+}
+
+static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
+{
+ if (little_endian)
+ return le64_to_cpu((__force __le64)val);
+ else
+ return (__force u64)val;
+}
+
+static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
+{
+ if (little_endian)
+ return (__force __virtio64)cpu_to_le64(val);
+ else
+ return (__force __virtio64)val;
+}
+
+#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 7f4ef66873ef..7979f850e7ac 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/virtio.h>
+#include <linux/virtio_byteorder.h>
#include <uapi/linux/virtio_config.h>
/**
@@ -46,6 +47,7 @@
* vdev: the virtio_device
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
+ * Returns 0 on success or error status
* @bus_name: return the bus name associated with the device
* vdev: the virtio_device
* This returns a pointer to the bus name a la pci_name from which
@@ -66,8 +68,8 @@ struct virtio_config_ops {
vq_callback_t *callbacks[],
const char *names[]);
void (*del_vqs)(struct virtio_device *);
- u32 (*get_features)(struct virtio_device *vdev);
- void (*finalize_features)(struct virtio_device *vdev);
+ u64 (*get_features)(struct virtio_device *vdev);
+ int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
};
@@ -77,23 +79,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
unsigned int fbit);
/**
- * virtio_has_feature - helper to determine if this device has this feature.
+ * __virtio_test_bit - helper to test feature bits. For use by transports.
+ * Devices should normally use virtio_has_feature,
+ * which includes more checks.
* @vdev: the device
* @fbit: the feature bit
*/
-static inline bool virtio_has_feature(const struct virtio_device *vdev,
+static inline bool __virtio_test_bit(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ /* Did you forget to fix assumptions on max features? */
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 64);
+ else
+ BUG_ON(fbit >= 64);
+
+ return vdev->features & BIT_ULL(fbit);
+}
+
+/**
+ * __virtio_set_bit - helper to set feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_set_bit(struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ /* Did you forget to fix assumptions on max features? */
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 64);
+ else
+ BUG_ON(fbit >= 64);
+
+ vdev->features |= BIT_ULL(fbit);
+}
+
+/**
+ * __virtio_clear_bit - helper to clear feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_clear_bit(struct virtio_device *vdev,
unsigned int fbit)
{
/* Did you forget to fix assumptions on max features? */
if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 32);
+ BUILD_BUG_ON(fbit >= 64);
else
- BUG_ON(fbit >= 32);
+ BUG_ON(fbit >= 64);
+ vdev->features &= ~BIT_ULL(fbit);
+}
+
+/**
+ * virtio_has_feature - helper to determine if this device has this feature.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline bool virtio_has_feature(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
if (fbit < VIRTIO_TRANSPORT_F_START)
virtio_check_driver_offered_feature(vdev, fbit);
- return test_bit(fbit, vdev->features);
+ return __virtio_test_bit(vdev, fbit);
}
static inline
@@ -152,6 +201,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
return 0;
}
+/* Memory accessors */
+static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
+{
+ return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
+{
+ return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
+{
+ return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
+{
+ return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
+{
+ return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
+{
+ return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
/* Config space accessors. */
#define virtio_cread(vdev, structname, member, ptr) \
do { \
@@ -239,12 +319,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev,
{
u16 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
+ return virtio16_to_cpu(vdev, (__force __virtio16)ret);
}
static inline void virtio_cwrite16(struct virtio_device *vdev,
unsigned int offset, u16 val)
{
+ val = (__force u16)cpu_to_virtio16(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
@@ -253,12 +334,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev,
{
u32 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
+ return virtio32_to_cpu(vdev, (__force __virtio32)ret);
}
static inline void virtio_cwrite32(struct virtio_device *vdev,
unsigned int offset, u32 val)
{
+ val = (__force u32)cpu_to_virtio32(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
@@ -267,12 +349,13 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
{
u64 ret;
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
+ return virtio64_to_cpu(vdev, (__force __virtio64)ret);
}
static inline void virtio_cwrite64(struct virtio_device *vdev,
unsigned int offset, u64 val)
{
+ val = (__force u64)cpu_to_virtio64(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val));
}
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 730334cdf037..9246d32dc973 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -90,6 +90,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
+ VMACACHE_FULL_FLUSHES,
#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index 023430e265fe..5691f752ce8f 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -24,6 +24,7 @@
#define VMCI_KERNEL_API_VERSION_2 2
#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2
+struct msghdr;
typedef void (vmci_device_shutdown_fn) (void *device_registration,
void *user_data);
@@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
void *iov, size_t iov_size, int mode);
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
- void *iov, size_t iov_size, int mode);
-ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size,
+ struct msghdr *msg, size_t iov_size, int mode);
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
int mode);
#endif /* !__VMW_VMCI_API_H__ */
diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
index 57585c7004a4..4376beeb28c2 100644
--- a/include/media/davinci/vpbe.h
+++ b/include/media/davinci/vpbe.h
@@ -63,7 +63,7 @@ struct vpbe_output {
* output basis. If per mode is needed, we may have to move this to
* mode_info structure
*/
- enum v4l2_mbus_pixelcode if_params;
+ u32 if_params;
};
/* encoder configuration info */
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
index 637749a91432..fa0247ad815f 100644
--- a/include/media/davinci/vpbe_display.h
+++ b/include/media/davinci/vpbe_display.h
@@ -70,8 +70,6 @@ struct vpbe_disp_buffer {
/* vpbe display object structure */
struct vpbe_layer {
- /* number of buffers in fbuffers */
- unsigned int numbuffers;
/* Pointer to the vpbe_display */
struct vpbe_display *disp_dev;
/* Pointer pointing to current v4l2_buffer */
@@ -91,10 +89,6 @@ struct vpbe_layer {
/* V4l2 specific parameters */
/* Identifies video device for this layer */
struct video_device video_dev;
- /* This field keeps track of type of buffer exchange mechanism user
- * has selected
- */
- enum v4l2_memory memory;
/* Used to store pixel format */
struct v4l2_pix_format pix_fmt;
enum v4l2_field buf_field;
@@ -106,12 +100,8 @@ struct vpbe_layer {
unsigned char window_enable;
/* number of open instances of the layer */
unsigned int usrs;
- /* number of users performing IO */
- unsigned int io_usrs;
/* Indicates id of the field which is being displayed */
unsigned int field_id;
- /* Indicates whether streaming started */
- unsigned char started;
/* Identifies device object */
enum vpbe_display_device_id device_id;
/* facilitation of ioctl ops lock by v4l2*/
@@ -131,17 +121,6 @@ struct vpbe_display {
struct osd_state *osd_device;
};
-/* File handle structure */
-struct vpbe_fh {
- struct v4l2_fh fh;
- /* vpbe device structure */
- struct vpbe_display *disp_dev;
- /* pointer to layer object for opened device */
- struct vpbe_layer *layer;
- /* Indicates whether this file handle is doing IO */
- unsigned char io_allowed;
-};
-
struct buf_config_params {
unsigned char min_numbuffers;
unsigned char numbuffers[VPBE_DISPLAY_MAX_DEVICES];
diff --git a/include/media/davinci/vpbe_venc.h b/include/media/davinci/vpbe_venc.h
index 476fafc2f522..3dbd20026107 100644
--- a/include/media/davinci/vpbe_venc.h
+++ b/include/media/davinci/vpbe_venc.h
@@ -30,11 +30,10 @@
#define VENC_SECOND_FIELD BIT(2)
struct venc_platform_data {
- int (*setup_pinmux)(enum v4l2_mbus_pixelcode if_type,
- int field);
+ int (*setup_pinmux)(u32 if_type, int field);
int (*setup_clock)(enum vpbe_enc_timings_type type,
unsigned int pixclock);
- int (*setup_if_config)(enum v4l2_mbus_pixelcode pixcode);
+ int (*setup_if_config)(u32 pixcode);
/* Number of LCD outputs supported */
int num_lcd_outputs;
struct vpbe_if_params *lcd_if_params;
diff --git a/include/media/exynos-fimc.h b/include/media/exynos-fimc.h
index aa44660e2041..69bcd2a07d5c 100644
--- a/include/media/exynos-fimc.h
+++ b/include/media/exynos-fimc.h
@@ -101,7 +101,7 @@ struct fimc_source_info {
* @flags: flags indicating which operation mode format applies to
*/
struct fimc_fmt {
- enum v4l2_mbus_pixelcode mbus_code;
+ u32 mbus_code;
char *name;
u32 fourcc;
u32 color;
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 78f0637ca68d..05e7ad5d2c8b 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -29,14 +29,13 @@ struct lirc_buffer {
/* Using chunks instead of bytes pretends to simplify boundary checking
* And should allow for some performance fine tunning later */
struct kfifo fifo;
- u8 fifo_initialized;
};
static inline void lirc_buffer_clear(struct lirc_buffer *buf)
{
unsigned long flags;
- if (buf->fifo_initialized) {
+ if (kfifo_initialized(&buf->fifo)) {
spin_lock_irqsave(&buf->fifo_lock, flags);
kfifo_reset(&buf->fifo);
spin_unlock_irqrestore(&buf->fifo_lock, flags);
@@ -56,17 +55,14 @@ static inline int lirc_buffer_init(struct lirc_buffer *buf,
buf->chunk_size = chunk_size;
buf->size = size;
ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL);
- if (ret == 0)
- buf->fifo_initialized = 1;
return ret;
}
static inline void lirc_buffer_free(struct lirc_buffer *buf)
{
- if (buf->fifo_initialized) {
+ if (kfifo_initialized(&buf->fifo)) {
kfifo_free(&buf->fifo);
- buf->fifo_initialized = 0;
} else
WARN(1, "calling %s on an uninitialized lirc_buffer\n",
__func__);
diff --git a/include/media/radio-si4713.h b/include/media/radio-si4713.h
deleted file mode 100644
index f6aae29c7741..000000000000
--- a/include/media/radio-si4713.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * include/media/radio-si4713.h
- *
- * Board related data definitions for Si4713 radio transmitter chip.
- *
- * Copyright (c) 2009 Nokia Corporation
- * Contact: Eduardo Valentin <eduardo.valentin@nokia.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
- */
-
-#ifndef RADIO_SI4713_H
-#define RADIO_SI4713_H
-
-#include <linux/i2c.h>
-
-#define SI4713_NAME "radio-si4713"
-
-/*
- * Platform dependent definition
- */
-struct radio_si4713_platform_data {
- int i2c_bus;
- struct i2c_board_info *subdev_board_info;
-};
-
-#endif /* ifndef RADIO_SI4713_H*/
diff --git a/include/media/si4713.h b/include/media/si4713.h
index f98a0a7af61c..be4f58e2440b 100644
--- a/include/media/si4713.h
+++ b/include/media/si4713.h
@@ -23,9 +23,7 @@
* Platform dependent definition
*/
struct si4713_platform_data {
- const char * const *supply_names;
- unsigned supplies;
- int gpio_reset; /* < 0 if not used */
+ bool is_platform_device;
};
/*
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 865246b00127..2f6261f3e570 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -296,7 +296,7 @@ const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
* format setup.
*/
struct soc_camera_format_xlate {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
const struct soc_mbus_pixelfmt *host_fmt;
};
diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h
index d33f6d059692..2ff773785fb6 100644
--- a/include/media/soc_mediabus.h
+++ b/include/media/soc_mediabus.h
@@ -91,16 +91,16 @@ struct soc_mbus_pixelfmt {
* @fmt: pixel format description
*/
struct soc_mbus_lookup {
- enum v4l2_mbus_pixelcode code;
+ u32 code;
struct soc_mbus_pixelfmt fmt;
};
const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
- enum v4l2_mbus_pixelcode code,
+ u32 code,
const struct soc_mbus_lookup *lookup,
int n);
const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
- enum v4l2_mbus_pixelcode code);
+ u32 code);
s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf);
s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
u32 bytes_per_line, u32 height);
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 48f974866f13..1cc0c5ba16b3 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -80,24 +80,9 @@
/* ------------------------------------------------------------------------- */
-/* Control helper functions */
+/* Control helper function */
-int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
- const char * const *menu_items);
-const char *v4l2_ctrl_get_name(u32 id);
-const char * const *v4l2_ctrl_get_menu(u32 id);
-const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def);
-int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu,
- struct v4l2_queryctrl *qctrl, const char * const *menu_items);
-#define V4L2_CTRL_MENU_IDS_END (0xffffffff)
-int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids);
-
-/* Note: ctrl_classes points to an array of u32 pointers. Each u32 array is a
- 0-terminated array of control IDs. Each array must be sorted low to high
- and belong to the same control class. The array of u32 pointers must also
- be sorted, from low class IDs to high class IDs. */
-u32 v4l2_ctrl_next(const u32 * const *ctrl_classes, u32 id);
/* ------------------------------------------------------------------------- */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index b7cd7a665e35..911f3e542834 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -670,6 +670,31 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
*/
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv);
+/** v4l2_ctrl_get_name() - Get the name of the control
+ * @id: The control ID.
+ *
+ * This function returns the name of the given control ID or NULL if it isn't
+ * a known control.
+ */
+const char *v4l2_ctrl_get_name(u32 id);
+
+/** v4l2_ctrl_get_menu() - Get the menu string array of the control
+ * @id: The control ID.
+ *
+ * This function returns the NULL-terminated menu string array name of the
+ * given control ID or NULL if it isn't a known menu control.
+ */
+const char * const *v4l2_ctrl_get_menu(u32 id);
+
+/** v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
+ * @id: The control ID.
+ * @len: The size of the integer array.
+ *
+ * This function returns the integer array of the given control ID or NULL if it
+ * if it isn't a known integer menu control.
+ */
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
+
/** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
* @ctrl: The control.
*
diff --git a/include/media/v4l2-image-sizes.h b/include/media/v4l2-image-sizes.h
index 10daf92ff1ab..a07d7a683bd9 100644
--- a/include/media/v4l2-image-sizes.h
+++ b/include/media/v4l2-image-sizes.h
@@ -25,10 +25,19 @@
#define QVGA_WIDTH 320
#define QVGA_HEIGHT 240
+#define SVGA_WIDTH 800
+#define SVGA_HEIGHT 600
+
#define SXGA_WIDTH 1280
#define SXGA_HEIGHT 1024
#define VGA_WIDTH 640
#define VGA_HEIGHT 480
+#define UXGA_WIDTH 1600
+#define UXGA_HEIGHT 1200
+
+#define XGA_WIDTH 1024
+#define XGA_HEIGHT 768
+
#endif /* _IMAGE_SIZES_H */
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 395c4a95a42a..38d960d8dccd 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -94,16 +94,20 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
pix_fmt->height = mbus_fmt->height;
pix_fmt->field = mbus_fmt->field;
pix_fmt->colorspace = mbus_fmt->colorspace;
+ pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc;
+ pix_fmt->quantization = mbus_fmt->quantization;
}
static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
const struct v4l2_pix_format *pix_fmt,
- enum v4l2_mbus_pixelcode code)
+ u32 code)
{
mbus_fmt->width = pix_fmt->width;
mbus_fmt->height = pix_fmt->height;
mbus_fmt->field = pix_fmt->field;
mbus_fmt->colorspace = pix_fmt->colorspace;
+ mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc;
+ mbus_fmt->quantization = pix_fmt->quantization;
mbus_fmt->code = code;
}
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index d7465725773d..5860292d42eb 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -341,7 +341,7 @@ struct v4l2_subdev_video_ops {
int (*query_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index,
- enum v4l2_mbus_pixelcode *code);
+ u32 *code);
int (*enum_mbus_fsizes)(struct v4l2_subdev *sd,
struct v4l2_frmsizeenum *fsize);
int (*g_mbus_fmt)(struct v4l2_subdev *sd,
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 6ef2d01197da..bd2cec2d6c3d 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -82,19 +82,23 @@ struct vb2_threadio_data;
* unmap_dmabuf.
*/
struct vb2_mem_ops {
- void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags);
+ void *(*alloc)(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir,
+ gfp_t gfp_flags);
void (*put)(void *buf_priv);
struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write);
+ unsigned long size,
+ enum dma_data_direction dma_dir);
void (*put_userptr)(void *buf_priv);
void (*prepare)(void *buf_priv);
void (*finish)(void *buf_priv);
void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf,
- unsigned long size, int write);
+ unsigned long size,
+ enum dma_data_direction dma_dir);
void (*detach_dmabuf)(void *buf_priv);
int (*map_dmabuf)(void *buf_priv);
void (*unmap_dmabuf)(void *buf_priv);
@@ -270,22 +274,24 @@ struct vb2_buffer {
* queue setup from completing successfully; optional.
* @buf_prepare: called every time the buffer is queued from userspace
* and from the VIDIOC_PREPARE_BUF ioctl; drivers may
- * perform any initialization required before each hardware
- * operation in this callback; drivers that support
- * VIDIOC_CREATE_BUFS must also validate the buffer size;
- * if an error is returned, the buffer will not be queued
- * in driver; optional.
+ * perform any initialization required before each
+ * hardware operation in this callback; drivers can
+ * access/modify the buffer here as it is still synced for
+ * the CPU; drivers that support VIDIOC_CREATE_BUFS must
+ * also validate the buffer size; if an error is returned,
+ * the buffer will not be queued in driver; optional.
* @buf_finish: called before every dequeue of the buffer back to
- * userspace; drivers may perform any operations required
- * before userspace accesses the buffer; optional. The
- * buffer state can be one of the following: DONE and
- * ERROR occur while streaming is in progress, and the
- * PREPARED state occurs when the queue has been canceled
- * and all pending buffers are being returned to their
- * default DEQUEUED state. Typically you only have to do
- * something if the state is VB2_BUF_STATE_DONE, since in
- * all other cases the buffer contents will be ignored
- * anyway.
+ * userspace; the buffer is synced for the CPU, so drivers
+ * can access/modify the buffer contents; drivers may
+ * perform any operations required before userspace
+ * accesses the buffer; optional. The buffer state can be
+ * one of the following: DONE and ERROR occur while
+ * streaming is in progress, and the PREPARED state occurs
+ * when the queue has been canceled and all pending
+ * buffers are being returned to their default DEQUEUED
+ * state. Typically you only have to do something if the
+ * state is VB2_BUF_STATE_DONE, since in all other cases
+ * the buffer contents will be ignored anyway.
* @buf_cleanup: called once before the buffer is freed; drivers may
* perform any additional cleanup; optional.
* @start_streaming: called once to enter 'streaming' state; the driver may
diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h
index 7b89852779af..14ce3068b642 100644
--- a/include/media/videobuf2-dma-sg.h
+++ b/include/media/videobuf2-dma-sg.h
@@ -21,6 +21,9 @@ static inline struct sg_table *vb2_dma_sg_plane_desc(
return (struct sg_table *)vb2_plane_cookie(vb, plane_no);
}
+void *vb2_dma_sg_init_ctx(struct device *dev);
+void vb2_dma_sg_cleanup_ctx(void *alloc_ctx);
+
extern const struct vb2_mem_ops vb2_dma_sg_memops;
#endif
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index d184df1d0d41..dc03d77ad23b 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -372,12 +372,12 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
return skb->len + uncomp_header - ret;
}
-typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev);
-
-int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
- const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
- const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
- u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver);
+int
+lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
+ const u8 *saddr, const u8 saddr_type,
+ const u8 saddr_len, const u8 *daddr,
+ const u8 daddr_type, const u8 daddr_len,
+ u8 iphc0, u8 iphc1);
int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len);
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index 085940f7eeec..7d38e2ffd256 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 428277869400..0d87674fb775 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -103,14 +103,14 @@ struct vsock_transport {
int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
struct msghdr *msg, size_t len, int flags);
int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
- struct iovec *, size_t len);
+ struct msghdr *, size_t len);
bool (*dgram_allow)(u32 cid, u32 port);
/* STREAM. */
/* TODO: stream_bind() */
- ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *,
+ ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *,
size_t len, int flags);
- ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *,
+ ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *,
size_t len);
s64 (*stream_has_data)(struct vsock_sock *);
s64 (*stream_has_space)(struct vsock_sock *);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 6e8f24967308..40129b3838b2 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -129,6 +129,15 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_INVALID_BDADDR,
+
+ /* When this quirk is set, the duplicate filtering during
+ * scanning is based on Bluetooth devices addresses. To allow
+ * RSSI based updates, restart scanning if needed.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_STRICT_DUPLICATE_FILTER,
};
/* HCI device flags */
@@ -154,6 +163,7 @@ enum {
enum {
HCI_DUT_MODE,
HCI_FORCE_SC,
+ HCI_FORCE_LESC,
HCI_FORCE_STATIC_ADDR,
};
@@ -265,6 +275,7 @@ enum {
/* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80
#define AMP_LINK 0x81
+#define INVALID_LINK 0xff
/* LMP features */
#define LMP_3SLOT 0x01
@@ -332,6 +343,7 @@ enum {
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
#define HCI_LE_PING 0x10
+#define HCI_LE_EXT_SCAN_POLICY 0x80
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -401,6 +413,7 @@ enum {
/* The core spec defines 127 as the "not available" value */
#define HCI_TX_POWER_INVALID 127
+#define HCI_RSSI_INVALID 127
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01
@@ -629,7 +642,7 @@ struct hci_cp_user_passkey_reply {
struct hci_cp_remote_oob_data_reply {
bdaddr_t bdaddr;
__u8 hash[16];
- __u8 randomizer[16];
+ __u8 rand[16];
} __packed;
#define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433
@@ -721,9 +734,9 @@ struct hci_rp_set_csb {
struct hci_cp_remote_oob_ext_data_reply {
bdaddr_t bdaddr;
__u8 hash192[16];
- __u8 randomizer192[16];
+ __u8 rand192[16];
__u8 hash256[16];
- __u8 randomizer256[16];
+ __u8 rand256[16];
} __packed;
#define HCI_OP_SNIFF_MODE 0x0803
@@ -930,7 +943,7 @@ struct hci_cp_write_ssp_mode {
struct hci_rp_read_local_oob_data {
__u8 status;
__u8 hash[16];
- __u8 randomizer[16];
+ __u8 rand[16];
} __packed;
#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
@@ -1014,9 +1027,9 @@ struct hci_cp_write_sc_support {
struct hci_rp_read_local_oob_ext_data {
__u8 status;
__u8 hash192[16];
- __u8 randomizer192[16];
+ __u8 rand192[16];
__u8 hash256[16];
- __u8 randomizer256[16];
+ __u8 rand256[16];
} __packed;
#define HCI_OP_READ_LOCAL_VERSION 0x1001
@@ -1463,6 +1476,11 @@ struct hci_ev_cmd_status {
__le16 opcode;
} __packed;
+#define HCI_EV_HARDWARE_ERROR 0x10
+struct hci_ev_hardware_error {
+ __u8 code;
+} __packed;
+
#define HCI_EV_ROLE_CHANGE 0x12
struct hci_ev_role_change {
__u8 status;
@@ -1734,6 +1752,25 @@ struct hci_ev_le_conn_complete {
__u8 clk_accurancy;
} __packed;
+/* Advertising report event types */
+#define LE_ADV_IND 0x00
+#define LE_ADV_DIRECT_IND 0x01
+#define LE_ADV_SCAN_IND 0x02
+#define LE_ADV_NONCONN_IND 0x03
+#define LE_ADV_SCAN_RSP 0x04
+
+#define ADDR_LE_DEV_PUBLIC 0x00
+#define ADDR_LE_DEV_RANDOM 0x01
+
+#define HCI_EV_LE_ADVERTISING_REPORT 0x02
+struct hci_ev_le_advertising_info {
+ __u8 evt_type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 length;
+ __u8 data[0];
+} __packed;
+
#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
struct hci_ev_le_conn_update_complete {
__u8 status;
@@ -1759,23 +1796,14 @@ struct hci_ev_le_remote_conn_param_req {
__le16 timeout;
} __packed;
-/* Advertising report event types */
-#define LE_ADV_IND 0x00
-#define LE_ADV_DIRECT_IND 0x01
-#define LE_ADV_SCAN_IND 0x02
-#define LE_ADV_NONCONN_IND 0x03
-#define LE_ADV_SCAN_RSP 0x04
-
-#define ADDR_LE_DEV_PUBLIC 0x00
-#define ADDR_LE_DEV_RANDOM 0x01
-
-#define HCI_EV_LE_ADVERTISING_REPORT 0x02
-struct hci_ev_le_advertising_info {
+#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
+struct hci_ev_le_direct_adv_info {
__u8 evt_type;
__u8 bdaddr_type;
bdaddr_t bdaddr;
- __u8 length;
- __u8 data[0];
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __s8 rssi;
} __packed;
/* Internal events generated by Bluetooth stack */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 37ff1aef0845..3c7827005c25 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -75,6 +75,10 @@ struct discovery_state {
u32 last_adv_flags;
u8 last_adv_data[HCI_MAX_AD_LENGTH];
u8 last_adv_data_len;
+ bool report_invalid_rssi;
+ s8 rssi;
+ u16 uuid_count;
+ u8 (*uuids)[16];
};
struct hci_conn_hash {
@@ -108,6 +112,7 @@ struct smp_csrk {
struct smp_ltk {
struct list_head list;
+ struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
u8 authenticated;
@@ -120,6 +125,7 @@ struct smp_ltk {
struct smp_irk {
struct list_head list;
+ struct rcu_head rcu;
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
@@ -128,6 +134,7 @@ struct smp_irk {
struct link_key {
struct list_head list;
+ struct rcu_head rcu;
bdaddr_t bdaddr;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
@@ -137,10 +144,11 @@ struct link_key {
struct oob_data {
struct list_head list;
bdaddr_t bdaddr;
+ u8 bdaddr_type;
u8 hash192[16];
- u8 randomizer192[16];
+ u8 rand192[16];
u8 hash256[16];
- u8 randomizer256[16];
+ u8 rand256[16];
};
#define HCI_MAX_SHORT_NAME_LENGTH 10
@@ -303,6 +311,7 @@ struct hci_dev {
__u32 req_result;
void *smp_data;
+ void *smp_bredr_data;
struct discovery_state discovery;
struct hci_conn_hash conn_hash;
@@ -398,6 +407,8 @@ struct hci_conn {
__u16 le_conn_interval;
__u16 le_conn_latency;
__u16 le_supv_timeout;
+ __u8 le_adv_data[HCI_MAX_AD_LENGTH];
+ __u8 le_adv_data_len;
__s8 rssi;
__s8 tx_power;
__s8 max_tx_power;
@@ -496,6 +507,17 @@ static inline void discovery_init(struct hci_dev *hdev)
INIT_LIST_HEAD(&hdev->discovery.all);
INIT_LIST_HEAD(&hdev->discovery.unknown);
INIT_LIST_HEAD(&hdev->discovery.resolve);
+ hdev->discovery.report_invalid_rssi = true;
+ hdev->discovery.rssi = HCI_RSSI_INVALID;
+}
+
+static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
+{
+ hdev->discovery.report_invalid_rssi = true;
+ hdev->discovery.rssi = HCI_RSSI_INVALID;
+ hdev->discovery.uuid_count = 0;
+ kfree(hdev->discovery.uuids);
+ hdev->discovery.uuids = NULL;
}
bool hci_discovery_active(struct hci_dev *hdev);
@@ -553,6 +575,8 @@ enum {
HCI_CONN_STK_ENCRYPT,
HCI_CONN_AUTH_INITIATOR,
HCI_CONN_DROP,
+ HCI_CONN_PARAM_REMOVAL_PEND,
+ HCI_CONN_NEW_LINK_KEY,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -643,6 +667,26 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
return c->acl_num + c->amp_num + c->sco_num + c->le_num;
}
+static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+ __u8 type = INVALID_LINK;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ type = c->type;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return type;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
@@ -853,6 +897,7 @@ int hci_register_dev(struct hci_dev *hdev);
void hci_unregister_dev(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
+int hci_reset_dev(struct hci_dev *hdev);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_reset(__u16 dev);
@@ -894,13 +939,11 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
bdaddr_t *bdaddr, u8 *val, u8 type,
u8 pin_len, bool *persistent);
-struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
- u8 role);
struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type, u8 type, u8 authenticated,
u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
-struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 addr_type, u8 role);
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 role);
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
void hci_smp_ltks_clear(struct hci_dev *hdev);
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
@@ -915,13 +958,12 @@ void hci_smp_irks_clear(struct hci_dev *hdev);
void hci_remote_oob_data_clear(struct hci_dev *hdev);
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
- bdaddr_t *bdaddr);
+ bdaddr_t *bdaddr, u8 bdaddr_type);
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 *hash, u8 *randomizer);
-int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 *hash192, u8 *randomizer192,
- u8 *hash256, u8 *randomizer256);
-int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
+ u8 bdaddr_type, u8 *hash192, u8 *rand192,
+ u8 *hash256, u8 *rand256);
+int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -972,6 +1014,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+#define bredr_sc_enabled(dev) ((lmp_sc_capable(dev) || \
+ test_bit(HCI_FORCE_SC, &(dev)->dbg_flags)) && \
+ test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1310,9 +1355,8 @@ int mgmt_update_adv_data(struct hci_dev *hdev);
void mgmt_discoverable_timeout(struct hci_dev *hdev);
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent);
-void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u32 flags, u8 *name, u8 name_len,
- u8 *dev_class);
+void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ u32 flags, u8 *name, u8 name_len);
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 reason,
bool mgmt_connected);
@@ -1349,8 +1393,8 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
- u8 *randomizer192, u8 *hash256,
- u8 *randomizer256, u8 status);
+ u8 *rand192, u8 *hash256, u8 *rand256,
+ u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index ead99f032f7a..d1bb342d083f 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -28,6 +28,7 @@
#define __L2CAP_H
#include <asm/unaligned.h>
+#include <linux/atomic.h>
/* L2CAP defaults */
#define L2CAP_DEFAULT_MTU 672
@@ -140,6 +141,7 @@ struct l2cap_conninfo {
#define L2CAP_FC_ATT 0x10
#define L2CAP_FC_SIG_LE 0x20
#define L2CAP_FC_SMP_LE 0x40
+#define L2CAP_FC_SMP_BREDR 0x80
/* L2CAP Control Field bit masks */
#define L2CAP_CTRL_SAR 0xC000
@@ -254,6 +256,7 @@ struct l2cap_conn_rsp {
#define L2CAP_CID_ATT 0x0004
#define L2CAP_CID_LE_SIGNALING 0x0005
#define L2CAP_CID_SMP 0x0006
+#define L2CAP_CID_SMP_BREDR 0x0007
#define L2CAP_CID_DYN_START 0x0040
#define L2CAP_CID_DYN_END 0xffff
#define L2CAP_CID_LE_DYN_END 0x007f
@@ -481,6 +484,7 @@ struct l2cap_chan {
struct hci_conn *hs_hcon;
struct hci_chan *hs_hchan;
struct kref kref;
+ atomic_t nesting;
__u8 state;
@@ -604,10 +608,6 @@ struct l2cap_ops {
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
- int (*memcpy_fromiovec) (struct l2cap_chan *chan,
- unsigned char *kdata,
- struct iovec *iov,
- int len);
};
struct l2cap_conn {
@@ -617,8 +617,8 @@ struct l2cap_conn {
unsigned int mtu;
__u32 feat_mask;
- __u8 fixed_chan_mask;
- bool hs_enabled;
+ __u8 remote_fixed_chan;
+ __u8 local_fixed_chan;
__u8 info_state;
__u8 info_ident;
@@ -713,6 +713,17 @@ enum {
FLAG_HOLD_HCI_CONN,
};
+/* Lock nesting levels for L2CAP channels. We need these because lockdep
+ * otherwise considers all channels equal and will e.g. complain about a
+ * connection oriented channel triggering SMP procedures or a listening
+ * channel creating and locking a child channel.
+ */
+enum {
+ L2CAP_NESTING_SMP,
+ L2CAP_NESTING_NORMAL,
+ L2CAP_NESTING_PARENT,
+};
+
enum {
L2CAP_TX_STATE_XMIT,
L2CAP_TX_STATE_WAIT_F,
@@ -778,7 +789,7 @@ void l2cap_chan_put(struct l2cap_chan *c);
static inline void l2cap_chan_lock(struct l2cap_chan *chan)
{
- mutex_lock(&chan->lock);
+ mutex_lock_nested(&chan->lock, atomic_read(&chan->nesting));
}
static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
@@ -890,31 +901,6 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
return 0;
}
-static inline int l2cap_chan_no_memcpy_fromiovec(struct l2cap_chan *chan,
- unsigned char *kdata,
- struct iovec *iov,
- int len)
-{
- /* Following is safe since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- struct kvec *vec = (struct kvec *)iov;
-
- while (len > 0) {
- if (vec->iov_len) {
- int copy = min_t(unsigned int, len, vec->iov_len);
- memcpy(kdata, vec->iov_base, copy);
- len -= copy;
- kdata += copy;
- vec->iov_base += copy;
- vec->iov_len -= copy;
- }
- vec++;
- }
-
- return 0;
-}
-
extern bool disable_ertm;
int l2cap_init_sockets(void);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 414cd2f9a437..95c34d5180fa 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -184,6 +184,9 @@ struct mgmt_cp_load_link_keys {
#define MGMT_LTK_UNAUTHENTICATED 0x00
#define MGMT_LTK_AUTHENTICATED 0x01
+#define MGMT_LTK_P256_UNAUTH 0x02
+#define MGMT_LTK_P256_AUTH 0x03
+#define MGMT_LTK_P256_DEBUG 0x04
struct mgmt_ltk_info {
struct mgmt_addr_info addr;
@@ -299,28 +302,28 @@ struct mgmt_cp_user_passkey_neg_reply {
#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
struct mgmt_rp_read_local_oob_data {
__u8 hash[16];
- __u8 randomizer[16];
+ __u8 rand[16];
} __packed;
struct mgmt_rp_read_local_oob_ext_data {
__u8 hash192[16];
- __u8 randomizer192[16];
+ __u8 rand192[16];
__u8 hash256[16];
- __u8 randomizer256[16];
+ __u8 rand256[16];
} __packed;
#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021
struct mgmt_cp_add_remote_oob_data {
struct mgmt_addr_info addr;
__u8 hash[16];
- __u8 randomizer[16];
+ __u8 rand[16];
} __packed;
#define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32)
struct mgmt_cp_add_remote_oob_ext_data {
struct mgmt_addr_info addr;
__u8 hash192[16];
- __u8 randomizer192[16];
+ __u8 rand192[16];
__u8 hash256[16];
- __u8 randomizer256[16];
+ __u8 rand256[16];
} __packed;
#define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64)
@@ -495,6 +498,15 @@ struct mgmt_cp_set_public_address {
} __packed;
#define MGMT_SET_PUBLIC_ADDRESS_SIZE 6
+#define MGMT_OP_START_SERVICE_DISCOVERY 0x003A
+struct mgmt_cp_start_service_discovery {
+ __u8 type;
+ __s8 rssi;
+ __le16 uuid_count;
+ __u8 uuids[0][16];
+} __packed;
+#define MGMT_START_SERVICE_DISCOVERY_SIZE 4
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/drivers/net/bonding/bond_3ad.h b/include/net/bond_3ad.h
index c5f14ac63f3e..e01d903633ef 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -20,8 +20,8 @@
*
*/
-#ifndef __BOND_3AD_H__
-#define __BOND_3AD_H__
+#ifndef _NET_BOND_3AD_H
+#define _NET_BOND_3AD_H
#include <asm/byteorder.h>
#include <linux/skbuff.h>
@@ -279,5 +279,5 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
-#endif /* __BOND_3AD_H__ */
+#endif /* _NET_BOND_3AD_H */
diff --git a/drivers/net/bonding/bond_alb.h b/include/net/bond_alb.h
index 1ad473b4ade5..313a8d3b3069 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -19,8 +19,8 @@
*
*/
-#ifndef __BOND_ALB_H__
-#define __BOND_ALB_H__
+#ifndef _NET_BOND_ALB_H
+#define _NET_BOND_ALB_H
#include <linux/if_ether.h>
@@ -177,5 +177,5 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
-#endif /* __BOND_ALB_H__ */
+#endif /* _NET_BOND_ALB_H */
diff --git a/drivers/net/bonding/bond_options.h b/include/net/bond_options.h
index 17ded5b29176..ea6546d2c946 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/include/net/bond_options.h
@@ -8,8 +8,8 @@
* (at your option) any later version.
*/
-#ifndef _BOND_OPTIONS_H
-#define _BOND_OPTIONS_H
+#ifndef _NET_BOND_OPTIONS_H
+#define _NET_BOND_OPTIONS_H
#define BOND_OPT_MAX_NAMELEN 32
#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
@@ -127,4 +127,4 @@ static inline void __bond_opt_init(struct bond_opt_value *optval,
void bond_option_arp_ip_targets_clear(struct bonding *bond);
-#endif /* _BOND_OPTIONS_H */
+#endif /* _NET_BOND_OPTIONS_H */
diff --git a/drivers/net/bonding/bonding.h b/include/net/bonding.h
index 10920f0686e2..983a94b86b95 100644
--- a/drivers/net/bonding/bonding.h
+++ b/include/net/bonding.h
@@ -12,8 +12,8 @@
*
*/
-#ifndef _LINUX_BONDING_H
-#define _LINUX_BONDING_H
+#ifndef _NET_BONDING_H
+#define _NET_BONDING_H
#include <linux/timer.h>
#include <linux/proc_fs.h>
@@ -26,9 +26,9 @@
#include <linux/reciprocal_div.h>
#include <linux/if_link.h>
-#include "bond_3ad.h"
-#include "bond_alb.h"
-#include "bond_options.h"
+#include <net/bond_3ad.h>
+#include <net/bond_alb.h>
+#include <net/bond_options.h>
#define DRV_VERSION "3.7.1"
#define DRV_RELDATE "April 27, 2011"
@@ -645,4 +645,10 @@ extern struct bond_parm_tbl ad_select_tbl[];
/* exported from bond_netlink.c */
extern struct rtnl_link_ops bond_link_ops;
-#endif /* _LINUX_BONDING_H */
+static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+{
+ atomic_long_inc(&dev->tx_dropped);
+ dev_kfree_skb_any(skb);
+}
+
+#endif /* _NET_BONDING_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a2ddcf2398fd..4ebb816241fa 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -319,9 +319,12 @@ struct ieee80211_supported_band {
/**
* struct vif_params - describes virtual interface parameters
* @use_4addr: use 4-address frames
- * @macaddr: address to use for this virtual interface. This will only
- * be used for non-netdevice interfaces. If this parameter is set
- * to zero address the driver may determine the address as needed.
+ * @macaddr: address to use for this virtual interface.
+ * If this parameter is set to zero address the driver may
+ * determine the address as needed.
+ * This feature is only fully supported by drivers that enable the
+ * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating
+ ** only p2p devices with specified MAC.
*/
struct vif_params {
int use_4addr;
@@ -799,6 +802,22 @@ struct station_parameters {
};
/**
+ * struct station_del_parameters - station deletion parameters
+ *
+ * Used to delete a station entry (or all stations).
+ *
+ * @mac: MAC address of the station to remove or NULL to remove all stations
+ * @subtype: Management frame subtype to use for indicating removal
+ * (10 = Disassociation, 12 = Deauthentication)
+ * @reason_code: Reason code for the Disassociation/Deauthentication frame
+ */
+struct station_del_parameters {
+ const u8 *mac;
+ u8 subtype;
+ u16 reason_code;
+};
+
+/**
* enum cfg80211_station_type - the type of station being modified
* @CFG80211_STA_AP_CLIENT: client of an AP interface
* @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
@@ -1340,6 +1359,16 @@ struct mesh_setup {
};
/**
+ * struct ocb_setup - 802.11p OCB mode setup configuration
+ * @chandef: defines the channel to use
+ *
+ * These parameters are fixed when connecting to the network
+ */
+struct ocb_setup {
+ struct cfg80211_chan_def chandef;
+};
+
+/**
* struct ieee80211_txq_params - TX queue parameters
* @ac: AC identifier
* @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled
@@ -1408,6 +1437,10 @@ struct cfg80211_ssid {
* @aborted: (internal) scan request was notified as aborted
* @notified: (internal) scan request was notified as done or aborted
* @no_cck: used to send probe requests at non CCK rate in 2GHz band
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -1422,6 +1455,9 @@ struct cfg80211_scan_request {
struct wireless_dev *wdev;
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
/* internal */
struct wiphy *wiphy;
unsigned long scan_start;
@@ -1432,6 +1468,17 @@ struct cfg80211_scan_request {
struct ieee80211_channel *channels[0];
};
+static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
+{
+ int i;
+
+ get_random_bytes(buf, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++) {
+ buf[i] &= ~mask[i];
+ buf[i] |= addr[i] & mask[i];
+ }
+}
+
/**
* struct cfg80211_match_set - sets of attributes to match
*
@@ -1465,6 +1512,10 @@ struct cfg80211_match_set {
* @channels: channels to scan
* @min_rssi_thold: for drivers only supporting a single threshold, this
* contains the minimum over all matchsets
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1479,6 +1530,9 @@ struct cfg80211_sched_scan_request {
int n_match_sets;
s32 min_rssi_thold;
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
/* internal */
struct wiphy *wiphy;
struct net_device *dev;
@@ -1911,6 +1965,7 @@ struct cfg80211_wowlan_tcp {
* @rfkill_release: wake up when rfkill is released
* @tcp: TCP connection establishment/wakeup parameters, see nl80211.h.
* NULL if not configured.
+ * @nd_config: configuration for the scan to be used for net detect wake.
*/
struct cfg80211_wowlan {
bool any, disconnect, magic_pkt, gtk_rekey_failure,
@@ -1919,6 +1974,7 @@ struct cfg80211_wowlan {
struct cfg80211_pkt_pattern *patterns;
struct cfg80211_wowlan_tcp *tcp;
int n_patterns;
+ struct cfg80211_sched_scan_request *nd_config;
};
/**
@@ -1951,6 +2007,35 @@ struct cfg80211_coalesce {
};
/**
+ * struct cfg80211_wowlan_nd_match - information about the match
+ *
+ * @ssid: SSID of the match that triggered the wake up
+ * @n_channels: Number of channels where the match occurred. This
+ * value may be zero if the driver can't report the channels.
+ * @channels: center frequencies of the channels where a match
+ * occurred (in MHz)
+ */
+struct cfg80211_wowlan_nd_match {
+ struct cfg80211_ssid ssid;
+ int n_channels;
+ u32 channels[];
+};
+
+/**
+ * struct cfg80211_wowlan_nd_info - net detect wake up information
+ *
+ * @n_matches: Number of match information instances provided in
+ * @matches. This value may be zero if the driver can't provide
+ * match information.
+ * @matches: Array of pointers to matches containing information about
+ * the matches that triggered the wake up.
+ */
+struct cfg80211_wowlan_nd_info {
+ int n_matches;
+ struct cfg80211_wowlan_nd_match *matches[];
+};
+
+/**
* struct cfg80211_wowlan_wakeup - wakeup report
* @disconnect: woke up by getting disconnected
* @magic_pkt: woke up by receiving magic packet
@@ -1969,6 +2054,7 @@ struct cfg80211_coalesce {
* @tcp_match: TCP wakeup packet received
* @tcp_connlost: TCP connection lost or failed to establish
* @tcp_nomoretokens: TCP data ran out of tokens
+ * @net_detect: if not %NULL, woke up because of net detect
*/
struct cfg80211_wowlan_wakeup {
bool disconnect, magic_pkt, gtk_rekey_failure,
@@ -1978,6 +2064,7 @@ struct cfg80211_wowlan_wakeup {
s32 pattern_idx;
u32 packet_present_len, packet_len;
const void *packet;
+ struct cfg80211_wowlan_nd_info *net_detect;
};
/**
@@ -2132,7 +2219,7 @@ struct cfg80211_qos_map {
* @stop_ap: Stop being an AP, including stopping beaconing.
*
* @add_station: Add a new station.
- * @del_station: Remove a station; @mac may be NULL to remove all stations.
+ * @del_station: Remove a station
* @change_station: Modify a given station. Note that flags changes are not much
* validated in cfg80211, in particular the auth/assoc/authorized flags
* might come to the driver in invalid combinations -- make sure to check
@@ -2146,6 +2233,8 @@ struct cfg80211_qos_map {
* @change_mpath: change a given mesh path
* @get_mpath: get a mesh path for the given parameters
* @dump_mpath: dump mesh path callback -- resume dump at index @idx
+ * @get_mpp: get a mesh proxy path for the given parameters
+ * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx
* @join_mesh: join the mesh network with the specified parameters
* (invoked with the wireless_dev mutex held)
* @leave_mesh: leave the current mesh network
@@ -2331,6 +2420,17 @@ struct cfg80211_qos_map {
* with the peer followed by immediate teardown when the addition is later
* rejected)
* @del_tx_ts: remove an existing TX TS
+ *
+ * @join_ocb: join the OCB network with the specified parameters
+ * (invoked with the wireless_dev mutex held)
+ * @leave_ocb: leave the current OCB network
+ * (invoked with the wireless_dev mutex held)
+ *
+ * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver
+ * is responsible for continually initiating channel-switching operations
+ * and returning to the base channel for communication with the AP.
+ * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
+ * peers must be on the base channel when the call completes.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2376,7 +2476,7 @@ struct cfg80211_ops {
const u8 *mac,
struct station_parameters *params);
int (*del_station)(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac);
+ struct station_del_parameters *params);
int (*change_station)(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac,
struct station_parameters *params);
@@ -2396,6 +2496,11 @@ struct cfg80211_ops {
int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *dst, u8 *next_hop,
struct mpath_info *pinfo);
+ int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev,
+ u8 *dst, u8 *mpp, struct mpath_info *pinfo);
+ int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev,
+ int idx, u8 *dst, u8 *mpp,
+ struct mpath_info *pinfo);
int (*get_mesh_config)(struct wiphy *wiphy,
struct net_device *dev,
struct mesh_config *conf);
@@ -2407,6 +2512,10 @@ struct cfg80211_ops {
const struct mesh_setup *setup);
int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev);
+ int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev,
+ struct ocb_setup *setup);
+ int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev);
+
int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
struct bss_parameters *params);
@@ -2577,6 +2686,14 @@ struct cfg80211_ops {
u16 admitted_time);
int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
u8 tsid, const u8 *peer);
+
+ int (*tdls_channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef);
+ void (*tdls_cancel_channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr);
};
/*
@@ -2623,13 +2740,9 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
- * @WIPHY_FLAG_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM
- * TSPEC sessions (TID aka TSID 0-7) with the NL80211_CMD_ADD_TX_TS
- * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it
- * needs to be able to handle Block-Ack agreements and other things.
*/
enum wiphy_flags {
- WIPHY_FLAG_SUPPORTS_WMM_ADMISSION = BIT(0),
+ /* use hole at 0 */
/* use hole at 1 */
/* use hole at 2 */
WIPHY_FLAG_NETNS_OK = BIT(3),
@@ -2755,6 +2868,7 @@ struct ieee80211_txrx_stypes {
* @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request
* @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure
* @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release
+ * @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection
*/
enum wiphy_wowlan_support_flags {
WIPHY_WOWLAN_ANY = BIT(0),
@@ -2765,6 +2879,7 @@ enum wiphy_wowlan_support_flags {
WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5),
WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6),
WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7),
+ WIPHY_WOWLAN_NET_DETECT = BIT(8),
};
struct wiphy_wowlan_tcp_support {
@@ -2783,6 +2898,11 @@ struct wiphy_wowlan_tcp_support {
* @pattern_max_len: maximum length of each pattern
* @pattern_min_len: minimum length of each pattern
* @max_pkt_offset: maximum Rx packet offset
+ * @max_nd_match_sets: maximum number of matchsets for net-detect,
+ * similar, but not necessarily identical, to max_match_sets for
+ * scheduled scans.
+ * See &struct cfg80211_sched_scan_request.@match_sets for more
+ * details.
* @tcp: TCP wakeup support information
*/
struct wiphy_wowlan_support {
@@ -2791,6 +2911,7 @@ struct wiphy_wowlan_support {
int pattern_max_len;
int pattern_min_len;
int max_pkt_offset;
+ int max_nd_match_sets;
const struct wiphy_wowlan_tcp_support *tcp;
};
@@ -3166,6 +3287,23 @@ static inline const char *wiphy_name(const struct wiphy *wiphy)
}
/**
+ * wiphy_new_nm - create a new wiphy for use with cfg80211
+ *
+ * @ops: The configuration operations for this device
+ * @sizeof_priv: The size of the private area to allocate
+ * @requested_name: Request a particular name.
+ * NULL is valid value, and means use the default phy%d naming.
+ *
+ * Create a new wiphy and associate the given operations with it.
+ * @sizeof_priv bytes are allocated for private use.
+ *
+ * Return: A pointer to the new wiphy. This pointer must be
+ * assigned to each netdev's ieee80211_ptr for proper operation.
+ */
+struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ const char *requested_name);
+
+/**
* wiphy_new - create a new wiphy for use with cfg80211
*
* @ops: The configuration operations for this device
@@ -3177,7 +3315,11 @@ static inline const char *wiphy_name(const struct wiphy *wiphy)
* Return: A pointer to the new wiphy. This pointer must be
* assigned to each netdev's ieee80211_ptr for proper operation.
*/
-struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv);
+static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops,
+ int sizeof_priv)
+{
+ return wiphy_new_nm(ops, sizeof_priv, NULL);
+}
/**
* wiphy_register - register a wiphy with cfg80211
@@ -4501,33 +4643,6 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
gfp_t gfp);
/**
- * cfg80211_radar_event - radar detection event
- * @wiphy: the wiphy
- * @chandef: chandef for the current channel
- * @gfp: context flags
- *
- * This function is called when a radar is detected on the current chanenl.
- */
-void cfg80211_radar_event(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef, gfp_t gfp);
-
-/**
- * cfg80211_cac_event - Channel availability check (CAC) event
- * @netdev: network device
- * @chandef: chandef for the current channel
- * @event: type of event
- * @gfp: context flags
- *
- * This function is called when a Channel availability check (CAC) is finished
- * or aborted. This must be called to notify the completion of a CAC process,
- * also by full-MAC drivers.
- */
-void cfg80211_cac_event(struct net_device *netdev,
- const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp);
-
-
-/**
* cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer
* @dev: network device
* @peer: peer's MAC address
@@ -4555,6 +4670,42 @@ void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
/**
+ * cfg80211_cqm_beacon_loss_notify - beacon loss event
+ * @dev: network device
+ * @gfp: context flags
+ *
+ * Notify userspace about beacon loss from the connected AP.
+ */
+void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp);
+
+/**
+ * cfg80211_radar_event - radar detection event
+ * @wiphy: the wiphy
+ * @chandef: chandef for the current channel
+ * @gfp: context flags
+ *
+ * This function is called when a radar is detected on the current chanenl.
+ */
+void cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef, gfp_t gfp);
+
+/**
+ * cfg80211_cac_event - Channel availability check (CAC) event
+ * @netdev: network device
+ * @chandef: chandef for the current channel
+ * @event: type of event
+ * @gfp: context flags
+ *
+ * This function is called when a Channel availability check (CAC) is finished
+ * or aborted. This must be called to notify the completion of a CAC process,
+ * also by full-MAC drivers.
+ */
+void cfg80211_cac_event(struct net_device *netdev,
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_radar_event event, gfp_t gfp);
+
+
+/**
* cfg80211_gtk_rekey_notify - notify userspace about driver rekeying
* @dev: network device
* @bssid: BSSID of AP (to avoid races)
@@ -4657,6 +4808,20 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef);
+/*
+ * cfg80211_ch_switch_started_notify - notify channel switch start
+ * @dev: the device on which the channel switch started
+ * @chandef: the future channel definition
+ * @count: the number of TBTTs until the channel switch happens
+ *
+ * Inform the userspace about the channel switch that has just
+ * started, so that it can take appropriate actions (eg. starting
+ * channel switch on other vifs), if necessary.
+ */
+void cfg80211_ch_switch_started_notify(struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ u8 count);
+
/**
* ieee80211_operating_class_to_band - convert operating class to band
*
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
new file mode 100644
index 000000000000..7f713acfa106
--- /dev/null
+++ b/include/net/cfg802154.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ */
+
+#ifndef __NET_CFG802154_H
+#define __NET_CFG802154_H
+
+#include <linux/ieee802154.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <linux/bug.h>
+
+#include <net/nl802154.h>
+
+struct wpan_phy;
+
+struct cfg802154_ops {
+ struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
+ const char *name,
+ int type);
+ void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
+ struct net_device *dev);
+ int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
+ const char *name,
+ enum nl802154_iftype type,
+ __le64 extended_addr);
+ int (*del_virtual_intf)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev);
+ int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
+ int (*set_pan_id)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, __le16 pan_id);
+ int (*set_short_addr)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, __le16 short_addr);
+ int (*set_backoff_exponent)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, u8 min_be,
+ u8 max_be);
+ int (*set_max_csma_backoffs)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ u8 max_csma_backoffs);
+ int (*set_max_frame_retries)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ s8 max_frame_retries);
+ int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool mode);
+};
+
+struct wpan_phy {
+ struct mutex pib_lock;
+
+ /* If multiple wpan_phys are registered and you're handed e.g.
+ * a regular netdev with assigned ieee802154_ptr, you won't
+ * know whether it points to a wpan_phy your driver has registered
+ * or not. Assign this to something global to your driver to
+ * help determine whether you own this wpan_phy or not.
+ */
+ const void *privid;
+
+ /*
+ * This is a PIB according to 802.15.4-2011.
+ * We do not provide timing-related variables, as they
+ * aren't used outside of driver
+ */
+ u8 current_channel;
+ u8 current_page;
+ u32 channels_supported[IEEE802154_MAX_PAGE + 1];
+ s8 transmit_power;
+ u8 cca_mode;
+
+ __le64 perm_extended_addr;
+
+ s32 cca_ed_level;
+
+ /* PHY depended MAC PIB values */
+
+ /* 802.15.4 acronym: Tdsym in usec */
+ u8 symbol_duration;
+ /* lifs and sifs periods timing */
+ u16 lifs_period;
+ u16 sifs_period;
+
+ struct device dev;
+
+ char priv[0] __aligned(NETDEV_ALIGN);
+};
+
+struct wpan_dev {
+ struct wpan_phy *wpan_phy;
+ int iftype;
+
+ /* the remainder of this struct should be private to cfg802154 */
+ struct list_head list;
+ struct net_device *netdev;
+
+ u32 identifier;
+
+ /* MAC PIB */
+ __le16 pan_id;
+ __le16 short_addr;
+ __le64 extended_addr;
+
+ /* MAC BSN field */
+ u8 bsn;
+ /* MAC DSN field */
+ u8 dsn;
+
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+
+ bool lbt;
+
+ bool promiscuous_mode;
+};
+
+#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
+
+struct wpan_phy *
+wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
+static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)
+{
+ phy->dev.parent = dev;
+}
+
+int wpan_phy_register(struct wpan_phy *phy);
+void wpan_phy_unregister(struct wpan_phy *phy);
+void wpan_phy_free(struct wpan_phy *phy);
+/* Same semantics as for class_for_each_device */
+int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data);
+
+static inline void *wpan_phy_priv(struct wpan_phy *phy)
+{
+ BUG_ON(!phy);
+ return &phy->priv;
+}
+
+struct wpan_phy *wpan_phy_find(const char *str);
+
+static inline void wpan_phy_put(struct wpan_phy *phy)
+{
+ put_device(&phy->dev);
+}
+
+static inline const char *wpan_phy_name(struct wpan_phy *phy)
+{
+ return dev_name(&phy->dev);
+}
+
+#endif /* __NET_CFG802154_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 6465bae80a4f..e339a9513e29 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -151,4 +151,20 @@ static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
(__force __be32)to, pseudohdr);
}
+static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+ int start, int offset)
+{
+ __sum16 *psum = (__sum16 *)(ptr + offset);
+ __wsum delta;
+
+ /* Subtract out checksum up to start */
+ csum = csum_sub(csum, csum_partial(ptr, start, 0));
+
+ /* Set derived checksum in packet */
+ delta = csum_sub(csum_fold(csum), *psum);
+ *psum = csum_fold(csum);
+
+ return delta;
+}
+
#endif
diff --git a/include/net/compat.h b/include/net/compat.h
index 3b603b199c01..42a9c8431177 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,9 +40,8 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#define compat_mmsghdr mmsghdr
#endif /* defined(CONFIG_COMPAT) */
-int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
-int verify_compat_iovec(struct msghdr *, struct iovec *,
- struct sockaddr_storage *, int);
+ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
+ struct sockaddr __user **, struct iovec **);
asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
unsigned int);
asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b76559293535..ed3c34bbb67a 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -38,6 +38,9 @@ struct dsa_chip_data {
struct device *host_dev;
int sw_addr;
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
+
/* Device tree node pointer for this specific switch chip
* used during switch setup in case additional properties
* and resources needs to be used
@@ -139,6 +142,14 @@ struct dsa_switch {
*/
struct device *master_dev;
+#ifdef CONFIG_NET_DSA_HWMON
+ /*
+ * Hardware monitoring information
+ */
+ char hwmon_name[IFNAMSIZ + 8];
+ struct device *hwmon_dev;
+#endif
+
/*
* Slave mii_bus and devices for the individual ports.
*/
@@ -242,6 +253,28 @@ struct dsa_switch_driver {
struct ethtool_eee *e);
int (*get_eee)(struct dsa_switch *ds, int port,
struct ethtool_eee *e);
+
+#ifdef CONFIG_NET_DSA_HWMON
+ /* Hardware monitoring */
+ int (*get_temp)(struct dsa_switch *ds, int *temp);
+ int (*get_temp_limit)(struct dsa_switch *ds, int *temp);
+ int (*set_temp_limit)(struct dsa_switch *ds, int temp);
+ int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm);
+#endif
+
+ /* EEPROM access */
+ int (*get_eeprom_len)(struct dsa_switch *ds);
+ int (*get_eeprom)(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data);
+ int (*set_eeprom)(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ /*
+ * Register access.
+ */
+ int (*get_regs_len)(struct dsa_switch *ds, int port);
+ void (*get_regs)(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *p);
};
void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/fou.h b/include/net/fou.h
new file mode 100644
index 000000000000..19b8a0c62a98
--- /dev/null
+++ b/include/net/fou.h
@@ -0,0 +1,19 @@
+#ifndef __NET_FOU_H
+#define __NET_FOU_H
+
+#include <linux/skbuff.h>
+
+#include <net/flow.h>
+#include <net/gue.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+
+size_t fou_encap_hlen(struct ip_tunnel_encap *e);
+static size_t gue_encap_hlen(struct ip_tunnel_encap *e);
+
+int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+
+#endif
diff --git a/include/net/gue.h b/include/net/gue.h
index b6c332788084..3f28ec7f1c7f 100644
--- a/include/net/gue.h
+++ b/include/net/gue.h
@@ -1,23 +1,116 @@
#ifndef __NET_GUE_H
#define __NET_GUE_H
+/* Definitions for the GUE header, standard and private flags, lengths
+ * of optional fields are below.
+ *
+ * Diagram of GUE header:
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver|C| Hlen | Proto/ctype | Standard flags |P|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * ~ Fields (optional) ~
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Private flags (optional, P bit is set) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * ~ Private fields (optional) ~
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C bit indicates contol message when set, data message when unset.
+ * For a control message, proto/ctype is interpreted as a type of
+ * control message. For data messages, proto/ctype is the IP protocol
+ * of the next header.
+ *
+ * P bit indicates private flags field is present. The private flags
+ * may refer to options placed after this field.
+ */
+
struct guehdr {
union {
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 hlen:4,
- version:4;
+ __u8 hlen:5,
+ control:1,
+ version:2;
#elif defined (__BIG_ENDIAN_BITFIELD)
- __u8 version:4,
- hlen:4;
+ __u8 version:2,
+ control:1,
+ hlen:5;
#else
#error "Please fix <asm/byteorder.h>"
#endif
- __u8 next_hdr;
+ __u8 proto_ctype;
__u16 flags;
};
__u32 word;
};
};
+/* Standard flags in GUE header */
+
+#define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */
+#define GUE_LEN_PRIV 4
+
+#define GUE_FLAGS_ALL (GUE_FLAG_PRIV)
+
+/* Private flags in the private option extension */
+
+#define GUE_PFLAG_REMCSUM htonl(1 << 31)
+#define GUE_PLEN_REMCSUM 4
+
+#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM)
+
+/* Functions to compute options length corresponding to flags.
+ * If we ever have a lot of flags this can be potentially be
+ * converted to a more optimized algorithm (table lookup
+ * for instance).
+ */
+static inline size_t guehdr_flags_len(__be16 flags)
+{
+ return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0);
+}
+
+static inline size_t guehdr_priv_flags_len(__be32 flags)
+{
+ return 0;
+}
+
+/* Validate standard and private flags. Returns non-zero (meaning invalid)
+ * if there is an unknown standard or private flags, or the options length for
+ * the flags exceeds the options length specific in hlen of the GUE header.
+ */
+static inline int validate_gue_flags(struct guehdr *guehdr,
+ size_t optlen)
+{
+ size_t len;
+ __be32 flags = guehdr->flags;
+
+ if (flags & ~GUE_FLAGS_ALL)
+ return 1;
+
+ len = guehdr_flags_len(flags);
+ if (len > optlen)
+ return 1;
+
+ if (flags & GUE_FLAG_PRIV) {
+ /* Private flags are last four bytes accounted in
+ * guehdr_flags_len
+ */
+ flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV);
+
+ if (flags & ~GUE_PFLAGS_ALL)
+ return 1;
+
+ len += guehdr_priv_flags_len(flags);
+ if (len > optlen)
+ return 1;
+ }
+
+ return 0;
+}
+
#endif
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 3b53c8e405e4..83bb8a73d23c 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
@@ -27,10 +23,10 @@
#ifndef IEEE802154_NETDEVICE_H
#define IEEE802154_NETDEVICE_H
-#include <net/ieee802154.h>
#include <net/af_ieee802154.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <linux/ieee802154.h>
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -427,8 +423,6 @@ struct ieee802154_mlme_ops {
/* The fields below are required. */
- struct wpan_phy *(*get_phy)(const struct net_device *dev);
-
/*
* FIXME: these should become the part of PIB/MIB interface.
* However we still don't have IB interface of any kind
@@ -438,16 +432,6 @@ struct ieee802154_mlme_ops {
u8 (*get_dsn)(const struct net_device *dev);
};
-/* The IEEE 802.15.4 standard defines 2 type of the devices:
- * - FFD - full functionality device
- * - RFD - reduce functionality device
- *
- * So 2 sets of mlme operations are needed
- */
-struct ieee802154_reduced_mlme_ops {
- struct wpan_phy *(*get_phy)(const struct net_device *dev);
-};
-
static inline struct ieee802154_mlme_ops *
ieee802154_mlme_ops(const struct net_device *dev)
{
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index d1d272843b3b..9201afe083fa 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -99,4 +99,14 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
const struct in6_addr *daddr, const __be16 dport,
const int dif);
#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
+ (((__sk)->sk_portpair == (__ports)) && \
+ ((__sk)->sk_family == AF_INET6) && \
+ ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
+ ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
+ (!(__sk)->sk_bound_dev_if || \
+ ((__sk)->sk_bound_dev_if == (__dif))) && \
+ net_eq(sock_net(__sk), (__net)))
+
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index a5593dab6af7..9326c41c2d7f 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -65,7 +65,8 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t);
void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
-int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+ const struct in6_addr *raddr);
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index dc9d2a27c315..09a819ee2151 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -201,8 +201,8 @@ void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES
-#define TABLE_LOCAL_INDEX 0
-#define TABLE_MAIN_INDEX 1
+#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
+#define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1))
static inline struct fib_table *fib_get_table(struct net *net, u32 id)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 5bc6edeb7143..25a59eb388a6 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -117,6 +117,22 @@ struct ip_tunnel_net {
struct hlist_head tunnels[IP_TNL_HASH_SIZE];
};
+struct ip_tunnel_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *e);
+ int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+};
+
+#define MAX_IPTUN_ENCAP_OPS 8
+
+extern const struct ip_tunnel_encap_ops __rcu *
+ iptun_encaps[MAX_IPTUN_ENCAP_OPS];
+
+int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 0143180fecc9..e5cff6811b30 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -42,6 +42,9 @@ struct ipxhdr {
struct ipx_address ipx_source __packed;
};
+/* From af_ipx.c */
+extern int sysctl_ipx_pprop_broadcasting;
+
static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
{
return (struct ipxhdr *)skb_transport_header(skb);
@@ -147,7 +150,7 @@ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
unsigned char *node);
void ipxrtr_del_routes(struct ipx_interface *intrfc);
int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct iovec *iov, size_t len, int noblock);
+ struct msghdr *msg, size_t len, int noblock);
int ipxrtr_route_skb(struct sk_buff *skb);
struct ipx_route *ipxrtr_lookup(__be32 net);
int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
index a059465101ff..92c8fb575213 100644
--- a/include/net/irda/irda.h
+++ b/include/net/irda/irda.h
@@ -55,16 +55,6 @@ typedef __u32 magic_t;
#endif
#ifdef CONFIG_IRDA_DEBUG
-
-extern unsigned int irda_debug;
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#define IRDA_DEBUG_LEVEL 0
-
-#define IRDA_DEBUG(n, args...) \
-do { if (irda_debug >= (n)) \
- printk(KERN_DEBUG args); \
-} while (0)
#define IRDA_ASSERT(expr, func) \
do { if(!(expr)) { \
printk( "Assertion failed! %s:%s:%d %s\n", \
@@ -72,15 +62,10 @@ do { if(!(expr)) { \
func } } while (0)
#define IRDA_ASSERT_LABEL(label) label
#else
-#define IRDA_DEBUG(n, args...) do { } while (0)
#define IRDA_ASSERT(expr, func) do { (void)(expr); } while (0)
#define IRDA_ASSERT_LABEL(label)
#endif /* CONFIG_IRDA_DEBUG */
-#define IRDA_WARNING(args...) do { if (net_ratelimit()) printk(KERN_WARNING args); } while (0)
-#define IRDA_MESSAGE(args...) do { if (net_ratelimit()) printk(KERN_INFO args); } while (0)
-#define IRDA_ERROR(args...) do { if (net_ratelimit()) printk(KERN_ERR args); } while (0)
-
/*
* Magic numbers used by Linux-IrDA. Random numbers which must be unique to
* give the best protection
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h
index fb4b76d5d7f1..6f23e820618c 100644
--- a/include/net/irda/irlap.h
+++ b/include/net/irda/irlap.h
@@ -303,7 +303,7 @@ static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
if (!self || self->magic != LAP_MAGIC)
return;
- IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]);
+ pr_debug("next LAP state = %s\n", irlap_state[state]);
*/
self->state = state;
}
diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h
index 42713c931d1f..2d9cd0007cba 100644
--- a/include/net/irda/parameters.h
+++ b/include/net/irda/parameters.h
@@ -71,17 +71,17 @@ typedef int (*PV_HANDLER)(void *self, __u8 *buf, int len, __u8 pi,
PV_TYPE type, PI_HANDLER func);
typedef struct {
- PI_HANDLER func; /* Handler for this parameter identifier */
+ const PI_HANDLER func; /* Handler for this parameter identifier */
PV_TYPE type; /* Data type for this parameter */
} pi_minor_info_t;
typedef struct {
- pi_minor_info_t *pi_minor_call_table;
+ const pi_minor_info_t *pi_minor_call_table;
int len;
} pi_major_info_t;
typedef struct {
- pi_major_info_t *tables;
+ const pi_major_info_t *tables;
int len;
__u8 pi_mask;
int pi_major_offset;
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
index 0e79cfba4b3b..48f3f891b2f9 100644
--- a/include/net/llc_c_st.h
+++ b/include/net/llc_c_st.h
@@ -35,8 +35,8 @@
struct llc_conn_state_trans {
llc_conn_ev_t ev;
u8 next_state;
- llc_conn_ev_qfyr_t *ev_qualifiers;
- llc_conn_action_t *ev_actions;
+ const llc_conn_ev_qfyr_t *ev_qualifiers;
+ const llc_conn_action_t *ev_actions;
};
struct llc_conn_state {
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
index 567c681f1f3e..c4359e203013 100644
--- a/include/net/llc_s_st.h
+++ b/include/net/llc_s_st.h
@@ -19,7 +19,7 @@
struct llc_sap_state_trans {
llc_sap_ev_t ev;
u8 next_state;
- llc_sap_action_t *ev_actions;
+ const llc_sap_action_t *ev_actions;
};
struct llc_sap_state {
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0ad1f47d2dc7..58d719ddaa60 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -263,6 +263,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
* note that this is only called when it changes after the channel
* context had been assigned.
+ * @BSS_CHANGED_OCB: OCB join status changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -287,6 +288,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_P2P_PS = 1<<19,
BSS_CHANGED_BEACON_INFO = 1<<20,
BSS_CHANGED_BANDWIDTH = 1<<21,
+ BSS_CHANGED_OCB = 1<<22,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -739,7 +741,8 @@ struct ieee80211_tx_info {
u8 ampdu_ack_len;
u8 ampdu_len;
u8 antenna;
- void *status_driver_data[21 / sizeof(void *)];
+ u16 tx_time;
+ void *status_driver_data[19 / sizeof(void *)];
} status;
struct {
struct ieee80211_tx_rate driver_rates[
@@ -879,6 +882,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* subframes share the same sequence number. Reported subframes can be
* either regular MSDU or singly A-MSDUs. Subframes must not be
* interleaved with other frames.
+ * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
+ * radiotap data in the skb->data (before the frame) as described by
+ * the &struct ieee80211_vendor_radiotap.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -908,6 +914,7 @@ enum mac80211_rx_flags {
RX_FLAG_10MHZ = BIT(28),
RX_FLAG_5MHZ = BIT(29),
RX_FLAG_AMSDU_MORE = BIT(30),
+ RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31),
};
#define RX_FLAG_STBC_SHIFT 26
@@ -979,6 +986,39 @@ struct ieee80211_rx_status {
};
/**
+ * struct ieee80211_vendor_radiotap - vendor radiotap data information
+ * @present: presence bitmap for this vendor namespace
+ * (this could be extended in the future if any vendor needs more
+ * bits, the radiotap spec does allow for that)
+ * @align: radiotap vendor namespace alignment. This defines the needed
+ * alignment for the @data field below, not for the vendor namespace
+ * description itself (which has a fixed 2-byte alignment)
+ * Must be a power of two, and be set to at least 1!
+ * @oui: radiotap vendor namespace OUI
+ * @subns: radiotap vendor sub namespace
+ * @len: radiotap vendor sub namespace skip length, if alignment is done
+ * then that's added to this, i.e. this is only the length of the
+ * @data field.
+ * @pad: number of bytes of padding after the @data, this exists so that
+ * the skb data alignment can be preserved even if the data has odd
+ * length
+ * @data: the actual vendor namespace data
+ *
+ * This struct, including the vendor data, goes into the skb->data before
+ * the 802.11 header. It's split up in mac80211 using the align/oui/subns
+ * data.
+ */
+struct ieee80211_vendor_radiotap {
+ u32 present;
+ u8 align;
+ u8 oui[3];
+ u8 subns;
+ u8 pad;
+ u16 len;
+ u8 data[];
+} __packed;
+
+/**
* enum ieee80211_conf_flags - configuration flags
*
* Flags to define PHY configuration options
@@ -1117,6 +1157,8 @@ struct ieee80211_conf {
* Function (TSF) timer when the frame containing the channel switch
* announcement was received. This is simply the rx.mactime parameter
* the driver passed into mac80211.
+ * @device_timestamp: arbitrary timestamp for the device, this is the
+ * rx.device_timestamp parameter the driver passed to mac80211.
* @block_tx: Indicates whether transmission must be blocked before the
* scheduled channel switch, as indicated by the AP.
* @chandef: the new channel to switch to
@@ -1124,6 +1166,7 @@ struct ieee80211_conf {
*/
struct ieee80211_channel_switch {
u64 timestamp;
+ u32 device_timestamp;
bool block_tx;
struct cfg80211_chan_def chandef;
u8 count;
@@ -1423,6 +1466,8 @@ struct ieee80211_sta_rates {
* @smps_mode: current SMPS mode (off, static or dynamic)
* @rates: rate control selection table
* @tdls: indicates whether the STA is a TDLS peer
+ * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
+ * valid if the STA is a TDLS peer in the first place.
*/
struct ieee80211_sta {
u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1438,6 +1483,7 @@ struct ieee80211_sta {
enum ieee80211_smps_mode smps_mode;
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
+ bool tdls_initiator;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1576,6 +1622,10 @@ struct ieee80211_tx_control {
* a virtual monitor interface when monitor interfaces are the only
* active interfaces.
*
+ * @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to
+ * be created. It is expected user-space will create vifs as
+ * desired (and thus have them named as desired).
+ *
* @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
* queue mapping in order to use different queues (not just one per AC)
* for different virtual interfaces. See the doc section on HW queue
@@ -1622,7 +1672,8 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
IEEE80211_HW_MFP_CAPABLE = 1<<13,
IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
- /* free slots */
+ IEEE80211_HW_NO_AUTO_VIF = 1<<15,
+ /* free slot */
IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
@@ -1776,6 +1827,31 @@ struct ieee80211_scan_request {
};
/**
+ * struct ieee80211_tdls_ch_sw_params - TDLS channel switch parameters
+ *
+ * @sta: peer this TDLS channel-switch request/response came from
+ * @chandef: channel referenced in a TDLS channel-switch request
+ * @action_code: see &enum ieee80211_tdls_actioncode
+ * @status: channel-switch response status
+ * @timestamp: time at which the frame was received
+ * @switch_time: switch-timing parameter received in the frame
+ * @switch_timeout: switch-timing parameter received in the frame
+ * @tmpl_skb: TDLS switch-channel response template
+ * @ch_sw_tm_ie: offset of the channel-switch timing IE inside @tmpl_skb
+ */
+struct ieee80211_tdls_ch_sw_params {
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def *chandef;
+ u8 action_code;
+ u32 status;
+ u32 timestamp;
+ u16 switch_time;
+ u16 switch_timeout;
+ struct sk_buff *tmpl_skb;
+ u32 ch_sw_tm_ie;
+};
+
+/**
* wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy
*
* @wiphy: the &struct wiphy which we want to query
@@ -2375,6 +2451,22 @@ enum ieee80211_roc_type {
};
/**
+ * enum ieee80211_reconfig_complete_type - reconfig type
+ *
+ * This enum is used by the reconfig_complete() callback to indicate what
+ * reconfiguration type was completed.
+ *
+ * @IEEE80211_RECONFIG_TYPE_RESTART: hw restart type
+ * (also due to resume() callback returning 1)
+ * @IEEE80211_RECONFIG_TYPE_SUSPEND: suspend type (regardless
+ * of wowlan configuration)
+ */
+enum ieee80211_reconfig_type {
+ IEEE80211_RECONFIG_TYPE_RESTART,
+ IEEE80211_RECONFIG_TYPE_SUSPEND,
+};
+
+/**
* struct ieee80211_ops - callbacks from mac80211 to the driver
*
* This structure contains various callbacks that the driver may
@@ -2530,7 +2622,9 @@ enum ieee80211_roc_type {
*
* @sw_scan_start: Notifier function that is called just before a software scan
* is started. Can be NULL, if the driver doesn't need this notification.
- * The callback can sleep.
+ * The mac_addr parameter allows supporting NL80211_SCAN_FLAG_RANDOM_ADDR,
+ * the driver may set the NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR flag if it
+ * can use this parameter. The callback can sleep.
*
* @sw_scan_complete: Notifier function that is called just after a
* software scan finished. Can be NULL, if the driver doesn't need
@@ -2601,6 +2695,9 @@ enum ieee80211_roc_type {
* uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
* otherwise the rate control algorithm is notified directly.
* Must be atomic.
+ * @sta_rate_tbl_update: Notifies the driver that the rate table changed. This
+ * is only used if the configured rate control algorithm actually uses
+ * the new rate table API, and is therefore optional. Must be atomic.
*
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
@@ -2809,11 +2906,11 @@ enum ieee80211_roc_type {
* disabled/enabled via @bss_info_changed.
* @stop_ap: Stop operation on the AP interface.
*
- * @restart_complete: Called after a call to ieee80211_restart_hw(), when the
- * reconfiguration has completed. This can help the driver implement the
- * reconfiguration step. Also called when reconfiguring because the
- * driver's resume function returned 1, as this is just like an "inline"
- * hardware restart. This callback may sleep.
+ * @reconfig_complete: Called after a call to ieee80211_restart_hw() and
+ * during resume, when the reconfiguration has completed.
+ * This can help the driver implement the reconfiguration step (and
+ * indicate mac80211 is ready to receive frames).
+ * This callback may sleep.
*
* @ipv6_addr_change: IPv6 address assignment on the given interface changed.
* Currently, this is only called for managed or P2P client interfaces.
@@ -2829,6 +2926,13 @@ enum ieee80211_roc_type {
* transmitted and then call ieee80211_csa_finish().
* If the CSA count starts as zero or 1, this function will not be called,
* since there won't be any time to beacon before the switch anyway.
+ * @pre_channel_switch: This is an optional callback that is called
+ * before a channel switch procedure is started (ie. when a STA
+ * gets a CSA or an userspace initiated channel-switch), allowing
+ * the driver to prepare for the channel switch.
+ * @post_channel_switch: This is an optional callback that is called
+ * after a channel switch procedure is completed, allowing the
+ * driver to go back to a normal configuration.
*
* @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
* information in bss_conf is set up and the beacon can be retrieved. A
@@ -2838,6 +2942,26 @@ enum ieee80211_roc_type {
* @get_expected_throughput: extract the expected throughput towards the
* specified station. The returned value is expressed in Kbps. It returns 0
* if the RC algorithm does not have proper data to provide.
+ *
+ * @get_txpower: get current maximum tx power (in dBm) based on configuration
+ * and hardware limits.
+ *
+ * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver
+ * is responsible for continually initiating channel-switching operations
+ * and returning to the base channel for communication with the AP. The
+ * driver receives a channel-switch request template and the location of
+ * the switch-timing IE within the template as part of the invocation.
+ * The template is valid only within the call, and the driver can
+ * optionally copy the skb for further re-use.
+ * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
+ * peers must be on the base channel when the call completes.
+ * @tdls_recv_channel_switch: a TDLS channel-switch related frame (request or
+ * response) has been received from a remote peer. The driver gets
+ * parameters parsed from the incoming frame and may use them to continue
+ * an ongoing channel-switch operation. In addition, a channel-switch
+ * response template is provided, together with the location of the
+ * switch-timing IE within the template. The skb can only be used within
+ * the function call.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -2897,8 +3021,11 @@ struct ieee80211_ops {
struct ieee80211_scan_ies *ies);
int (*sched_scan_stop)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
- void (*sw_scan_start)(struct ieee80211_hw *hw);
- void (*sw_scan_complete)(struct ieee80211_hw *hw);
+ void (*sw_scan_start)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr);
+ void (*sw_scan_complete)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
int (*get_stats)(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
@@ -2932,6 +3059,9 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 changed);
+ void (*sta_rate_tbl_update)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
int (*conf_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params);
@@ -2959,6 +3089,7 @@ struct ieee80211_ops {
void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
void (*channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
@@ -3025,7 +3156,8 @@ struct ieee80211_ops {
int n_vifs,
enum ieee80211_chanctx_switch_mode mode);
- void (*restart_complete)(struct ieee80211_hw *hw);
+ void (*reconfig_complete)(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
#if IS_ENABLED(CONFIG_IPV6)
void (*ipv6_addr_change)(struct ieee80211_hw *hw,
@@ -3035,14 +3167,54 @@ struct ieee80211_ops {
void (*channel_switch_beacon)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef);
+ int (*pre_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *ch_switch);
+
+ int (*post_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
u32 (*get_expected_throughput)(struct ieee80211_sta *sta);
+ int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm);
+
+ int (*tdls_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
+ void (*tdls_cancel_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params);
};
/**
- * ieee80211_alloc_hw - Allocate a new hardware device
+ * ieee80211_alloc_hw_nm - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac80211 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee80211_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ * @requested_name: Requested name for this device.
+ * NULL is valid value, and means use the default naming (phy%d)
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
+struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ const struct ieee80211_ops *ops,
+ const char *requested_name);
+
+/**
+ * ieee80211_alloc_hw - Allocate a new hardware device
*
* This must be called once for each hardware device. The returned pointer
* must be used to refer to this device when calling other functions.
@@ -3055,8 +3227,12 @@ struct ieee80211_ops {
*
* Return: A pointer to the new hardware device, or %NULL on error.
*/
+static inline
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
- const struct ieee80211_ops *ops);
+ const struct ieee80211_ops *ops)
+{
+ return ieee80211_alloc_hw_nm(priv_data_len, ops, NULL);
+}
/**
* ieee80211_register_hw - Register hardware device
@@ -3443,6 +3619,26 @@ void ieee80211_tx_status(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
+ * ieee80211_tx_status_noskb - transmit status callback without skb
+ *
+ * This function can be used as a replacement for ieee80211_tx_status
+ * in drivers that cannot reliably map tx status information back to
+ * specific skbs.
+ *
+ * Calls to this function for a single hardware must be synchronized
+ * against each other. Calls to this function, ieee80211_tx_status_ni()
+ * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @sta: the receiver station to which this packet is sent
+ * (NULL for multicast packets)
+ * @info: tx status information
+ */
+void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info);
+
+/**
* ieee80211_tx_status_ni - transmit status callback (in process context)
*
* Like ieee80211_tx_status() but can be called in process context.
@@ -3655,7 +3851,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
/**
* ieee80211_probereq_get - retrieve a Probe Request template
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @src_addr: source MAC address
* @ssid: SSID buffer
* @ssid_len: length of SSID
* @tailroom: tailroom to reserve at end of SKB for IEs
@@ -3666,7 +3862,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
* Return: The Probe Request template. %NULL on error.
*/
struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+ const u8 *src_addr,
const u8 *ssid, size_t ssid_len,
size_t tailroom);
@@ -4172,6 +4368,22 @@ void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw,
void *data);
/**
+ * ieee80211_iterate_stations_atomic - iterate stations
+ *
+ * This function iterates over all stations associated with a given
+ * hardware that are currently uploaded to the driver and calls the callback
+ * function for them.
+ * This function requires the iterator callback function to be atomic,
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
+/**
* ieee80211_queue_work - add work onto the mac80211 workqueue
*
* Drivers and mac80211 use this to add work onto the mac80211 workqueue.
@@ -4480,6 +4692,14 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
gfp_t gfp);
/**
+ * ieee80211_cqm_beacon_loss_notify - inform CQM of beacon loss
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @gfp: context flags
+ */
+void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
+
+/**
* ieee80211_radar_detected - inform that a radar was detected
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
@@ -4637,6 +4857,10 @@ struct rate_control_ops {
void (*free_sta)(void *priv, struct ieee80211_sta *sta,
void *priv_sta);
+ void (*tx_status_noskb)(void *priv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct ieee80211_tx_info *info);
void (*tx_status)(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb);
@@ -4888,4 +5112,69 @@ void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf);
void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
enum nl80211_tdls_operation oper,
u16 reason_code, gfp_t gfp);
+
+/**
+ * ieee80211_reserve_tid - request to reserve a specific TID
+ *
+ * There is sometimes a need (such as in TDLS) for blocking the driver from
+ * using a specific TID so that the FW can use it for certain operations such
+ * as sending PTI requests. To make sure that the driver doesn't use that TID,
+ * this function must be called as it flushes out packets on this TID and marks
+ * it as blocked, so that any transmit for the station on this TID will be
+ * redirected to the alternative TID in the same AC.
+ *
+ * Note that this function blocks and may call back into the driver, so it
+ * should be called without driver locks held. Also note this function should
+ * only be called from the driver's @sta_state callback.
+ *
+ * @sta: the station to reserve the TID for
+ * @tid: the TID to reserve
+ *
+ * Returns: 0 on success, else on failure
+ */
+int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid);
+
+/**
+ * ieee80211_unreserve_tid - request to unreserve a specific TID
+ *
+ * Once there is no longer any need for reserving a certain TID, this function
+ * should be called, and no longer will packets have their TID modified for
+ * preventing use of this TID in the driver.
+ *
+ * Note that this function blocks and acquires a lock, so it should be called
+ * without driver locks held. Also note this function should only be called
+ * from the driver's @sta_state callback.
+ *
+ * @sta: the station
+ * @tid: the TID to unreserve
+ */
+void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
+
+/**
+ * ieee80211_ie_split - split an IE buffer according to ordering
+ *
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 2e67cdd19cdc..c823d910b46c 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -12,14 +12,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef NET_MAC802154_H
#define NET_MAC802154_H
#include <net/af_ieee802154.h>
+#include <linux/ieee802154.h>
#include <linux/skbuff.h>
/* General MAC frame format:
@@ -35,13 +33,13 @@
*/
/* indicates that the Short Address changed */
-#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001
+#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001
/* indicates that the IEEE Address changed */
-#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002
+#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002
/* indicates that the PAN ID changed */
-#define IEEE802515_AFILT_PANID_CHANGED 0x00000004
+#define IEEE802154_AFILT_PANID_CHANGED 0x00000004
/* indicates that PAN Coordinator status changed */
-#define IEEE802515_AFILT_PANC_CHANGED 0x00000008
+#define IEEE802154_AFILT_PANC_CHANGED 0x00000008
struct ieee802154_hw_addr_filt {
__le16 pan_id; /* Each independent PAN selects a unique
@@ -55,7 +53,14 @@ struct ieee802154_hw_addr_filt {
u8 pan_coord;
};
-struct ieee802154_dev {
+struct ieee802154_vif {
+ int type;
+
+ /* must be last */
+ u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+struct ieee802154_hw {
/* filled by the driver */
int extra_tx_headroom;
u32 flags;
@@ -65,6 +70,7 @@ struct ieee802154_dev {
struct ieee802154_hw_addr_filt hw_filt;
void *priv;
struct wpan_phy *phy;
+ size_t vif_data_size;
};
/* Checksum is in hardware and is omitted from a packet
@@ -76,28 +82,43 @@ struct ieee802154_dev {
* however, so you are advised to review these flags carefully.
*/
-/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
-#define IEEE802154_HW_OMIT_CKSUM 0x00000001
+/* Indicates that xmitter will add FCS on it's own. */
+#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001
/* Indicates that receiver will autorespond with ACK frames. */
-#define IEEE802154_HW_AACK 0x00000002
+#define IEEE802154_HW_AACK 0x00000002
/* Indicates that transceiver will support transmit power setting. */
-#define IEEE802154_HW_TXPOWER 0x00000004
+#define IEEE802154_HW_TXPOWER 0x00000004
/* Indicates that transceiver will support listen before transmit. */
-#define IEEE802154_HW_LBT 0x00000008
+#define IEEE802154_HW_LBT 0x00000008
/* Indicates that transceiver will support cca mode setting. */
-#define IEEE802154_HW_CCA_MODE 0x00000010
+#define IEEE802154_HW_CCA_MODE 0x00000010
/* Indicates that transceiver will support cca ed level setting. */
-#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020
+#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020
/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
* settings. */
-#define IEEE802154_HW_CSMA_PARAMS 0x00000040
+#define IEEE802154_HW_CSMA_PARAMS 0x00000040
/* Indicates that transceiver will support ARET frame retries setting. */
-#define IEEE802154_HW_FRAME_RETRIES 0x00000080
+#define IEEE802154_HW_FRAME_RETRIES 0x00000080
+/* Indicates that transceiver will support hardware address filter setting. */
+#define IEEE802154_HW_AFILT 0x00000100
+/* Indicates that transceiver will support promiscuous mode setting. */
+#define IEEE802154_HW_PROMISCUOUS 0x00000200
+/* Indicates that receiver omits FCS. */
+#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400
+/* Indicates that receiver will not filter frames with bad checksum. */
+#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800
+
+/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
+#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \
+ IEEE802154_HW_RX_OMIT_CKSUM)
/* This groups the most common CSMA support fields into one. */
#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \
IEEE802154_HW_CCA_ED_LEVEL | \
- IEEE802154_HW_CSMA_PARAMS | \
+ IEEE802154_HW_CSMA_PARAMS)
+
+/* This groups the most common ARET support fields into one. */
+#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \
IEEE802154_HW_FRAME_RETRIES)
/* struct ieee802154_ops - callbacks from mac802154 to the driver
@@ -112,12 +133,24 @@ struct ieee802154_dev {
* stop: Handler that 802.15.4 module calls for device cleanup.
* This function is called after the last interface is removed.
*
- * xmit: Handler that 802.15.4 module calls for each transmitted frame.
+ * xmit_sync:
+ * Handler that 802.15.4 module calls for each transmitted frame.
+ * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * The low-level driver should send the frame based on available
+ * configuration. This is called by a workqueue and useful for
+ * synchronous 802.15.4 drivers.
+ * This function should return zero or negative errno.
+ *
+ * WARNING:
+ * This will be deprecated soon. We don't accept synced xmit callbacks
+ * drivers anymore.
+ *
+ * xmit_async:
+ * Handler that 802.15.4 module calls for each transmitted frame.
* skb cntains the buffer starting from the IEEE 802.15.4 header.
* The low-level driver should send the frame based on available
* configuration.
- * This function should return zero or negative errno. Called with
- * pib_lock held.
+ * This function should return zero or negative errno.
*
* ed: Handler that 802.15.4 module calls for Energy Detection.
* This function should place the value for detected energy
@@ -159,40 +192,75 @@ struct ieee802154_dev {
* set_frame_retries
* Sets the retransmission attempt limit. Called with pib_lock held.
* Returns either zero, or negative errno.
+ *
+ * set_promiscuous_mode
+ * Enables or disable promiscuous mode.
*/
struct ieee802154_ops {
struct module *owner;
- int (*start)(struct ieee802154_dev *dev);
- void (*stop)(struct ieee802154_dev *dev);
- int (*xmit)(struct ieee802154_dev *dev,
- struct sk_buff *skb);
- int (*ed)(struct ieee802154_dev *dev, u8 *level);
- int (*set_channel)(struct ieee802154_dev *dev,
- int page,
- int channel);
- int (*set_hw_addr_filt)(struct ieee802154_dev *dev,
- struct ieee802154_hw_addr_filt *filt,
+ int (*start)(struct ieee802154_hw *hw);
+ void (*stop)(struct ieee802154_hw *hw);
+ int (*xmit_sync)(struct ieee802154_hw *hw,
+ struct sk_buff *skb);
+ int (*xmit_async)(struct ieee802154_hw *hw,
+ struct sk_buff *skb);
+ int (*ed)(struct ieee802154_hw *hw, u8 *level);
+ int (*set_channel)(struct ieee802154_hw *hw, u8 page,
+ u8 channel);
+ int (*set_hw_addr_filt)(struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
unsigned long changed);
- int (*ieee_addr)(struct ieee802154_dev *dev, __le64 addr);
- int (*set_txpower)(struct ieee802154_dev *dev, int db);
- int (*set_lbt)(struct ieee802154_dev *dev, bool on);
- int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode);
- int (*set_cca_ed_level)(struct ieee802154_dev *dev,
+ int (*set_txpower)(struct ieee802154_hw *hw, int db);
+ int (*set_lbt)(struct ieee802154_hw *hw, bool on);
+ int (*set_cca_mode)(struct ieee802154_hw *hw, u8 mode);
+ int (*set_cca_ed_level)(struct ieee802154_hw *hw,
s32 level);
- int (*set_csma_params)(struct ieee802154_dev *dev,
+ int (*set_csma_params)(struct ieee802154_hw *hw,
u8 min_be, u8 max_be, u8 retries);
- int (*set_frame_retries)(struct ieee802154_dev *dev,
+ int (*set_frame_retries)(struct ieee802154_hw *hw,
s8 retries);
+ int (*set_promiscuous_mode)(struct ieee802154_hw *hw,
+ const bool on);
};
-/* Basic interface to register ieee802154 device */
-struct ieee802154_dev *
-ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops);
-void ieee802154_free_device(struct ieee802154_dev *dev);
-int ieee802154_register_device(struct ieee802154_dev *dev);
-void ieee802154_unregister_device(struct ieee802154_dev *dev);
+/**
+ * ieee802154_be64_to_le64 - copies and convert be64 to le64
+ * @le64_dst: le64 destination pointer
+ * @be64_src: be64 source pointer
+ */
+static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
+{
+ __le64 tmp = (__force __le64)swab64p(be64_src);
+
+ memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+}
-void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb,
+/**
+ * ieee802154_le64_to_be64 - copies and convert le64 to be64
+ * @be64_dst: be64 destination pointer
+ * @le64_src: le64 source pointer
+ */
+static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
+{
+ __be64 tmp = (__force __be64)swab64p(le64_src);
+
+ memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+}
+
+/* Basic interface to register ieee802154 hwice */
+struct ieee802154_hw *
+ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
+void ieee802154_free_hw(struct ieee802154_hw *hw);
+int ieee802154_register_hw(struct ieee802154_hw *hw);
+void ieee802154_unregister_hw(struct ieee802154_hw *hw);
+
+void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
+void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
u8 lqi);
+void ieee802154_wake_queue(struct ieee802154_hw *hw);
+void ieee802154_stop_queue(struct ieee802154_hw *hw);
+void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
+ bool ifs_handling);
+
#endif /* NET_MAC802154_H */
diff --git a/include/net/mpls.h b/include/net/mpls.h
new file mode 100644
index 000000000000..5b3b5addfb08
--- /dev/null
+++ b/include/net/mpls.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_H
+#define _NET_MPLS_H 1
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#define MPLS_HLEN 4
+
+static inline bool eth_p_mpls(__be16 eth_type)
+{
+ return eth_type == htons(ETH_P_MPLS_UC) ||
+ eth_type == htons(ETH_P_MPLS_MC);
+}
+
+/*
+ * For non-MPLS skbs this will correspond to the network header.
+ * For MPLS skbs it will be before the network_header as the MPLS
+ * label stack lies between the end of the mac header and the network
+ * header. That is, for MPLS skbs the end of the mac header
+ * is the top of the MPLS label stack.
+ */
+static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
+{
+ return skb_mac_header(skb) + skb->mac_len;
+}
+#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index f60558d0254c..eb070b3674a1 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -69,7 +69,7 @@ struct neigh_parms {
struct net *net;
#endif
struct net_device *dev;
- struct neigh_parms *next;
+ struct list_head list;
int (*neigh_setup)(struct neighbour *);
void (*neigh_cleanup)(struct neighbour *);
struct neigh_table *tbl;
@@ -203,6 +203,7 @@ struct neigh_table {
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
+ struct list_head parms_list;
int gc_interval;
int gc_thresh1;
int gc_thresh2;
@@ -219,6 +220,13 @@ struct neigh_table {
struct pneigh_entry **phash_buckets;
};
+enum {
+ NEIGH_ARP_TABLE = 0,
+ NEIGH_ND_TABLE = 1,
+ NEIGH_DN_TABLE = 2,
+ NEIGH_NR_TABLES,
+};
+
static inline int neigh_parms_family(struct neigh_parms *p)
{
return p->tbl->family;
@@ -239,8 +247,8 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
-void neigh_table_init(struct neigh_table *tbl);
-int neigh_table_clear(struct neigh_table *tbl);
+void neigh_table_init(int index, struct neigh_table *tbl);
+int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev);
struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e0d64667a4b3..2e8756b8c775 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -26,6 +26,7 @@
#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
+#include <linux/ns_common.h>
struct user_namespace;
struct proc_dir_entry;
@@ -60,7 +61,7 @@ struct net {
struct user_namespace *user_ns; /* Owning user namespace */
- unsigned int proc_inum;
+ struct ns_common ns;
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index c8a7db605e03..f0daed2b54d1 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -92,12 +92,18 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
- /* If we were expected by an expectation, this will be it */
- struct nf_conn *master;
-
/* Timer function; drops refcnt when it goes off. */
struct timer_list timeout;
+#ifdef CONFIG_NET_NS
+ struct net *ct_net;
+#endif
+ /* all members below initialized via memset */
+ u8 __nfct_init_offset[0];
+
+ /* If we were expected by an expectation, this will be it */
+ struct nf_conn *master;
+
#if defined(CONFIG_NF_CONNTRACK_MARK)
u_int32_t mark;
#endif
@@ -108,9 +114,6 @@ struct nf_conn {
/* Extensions */
struct nf_ct_ext *ext;
-#ifdef CONFIG_NET_NS
- struct net *ct_net;
-#endif
/* Storage reserved for other modules, must be the last member */
union nf_conntrack_proto proto;
diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
new file mode 100644
index 000000000000..73b729543309
--- /dev/null
+++ b/include/net/netfilter/nf_nat_redirect.h
@@ -0,0 +1,12 @@
+#ifndef _NF_NAT_REDIRECT_H_
+#define _NF_NAT_REDIRECT_H_
+
+unsigned int
+nf_nat_redirect_ipv4(struct sk_buff *skb,
+ const struct nf_nat_ipv4_multi_range_compat *mr,
+ unsigned int hooknum);
+unsigned int
+nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+ unsigned int hooknum);
+
+#endif /* _NF_NAT_REDIRECT_H_ */
diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h
new file mode 100644
index 000000000000..511fb79f6dad
--- /dev/null
+++ b/include/net/netfilter/nf_tables_bridge.h
@@ -0,0 +1,7 @@
+#ifndef _NET_NF_TABLES_BRIDGE_H
+#define _NET_NF_TABLES_BRIDGE_H
+
+int nft_bridge_iphdr_validate(struct sk_buff *skb);
+int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
+
+#endif /* _NET_NF_TABLES_BRIDGE_H */
diff --git a/include/net/netfilter/nft_redir.h b/include/net/netfilter/nft_redir.h
new file mode 100644
index 000000000000..a2d67546afab
--- /dev/null
+++ b/include/net/netfilter/nft_redir.h
@@ -0,0 +1,21 @@
+#ifndef _NFT_REDIR_H_
+#define _NFT_REDIR_H_
+
+struct nft_redir {
+ enum nft_registers sreg_proto_min:8;
+ enum nft_registers sreg_proto_max:8;
+ u16 flags;
+};
+
+extern const struct nla_policy nft_redir_policy[];
+
+int nft_redir_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nft_data **data);
+
+#endif /* _NFT_REDIR_H_ */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 7b903e1bdbbb..64158353ecb2 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1185,4 +1185,14 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
#define nla_for_each_nested(pos, nla, rem) \
nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
+/**
+ * nla_is_last - Test if attribute is last in stream
+ * @nla: attribute to test
+ * @rem: bytes remaining in stream
+ */
+static inline bool nla_is_last(const struct nlattr *nla, int rem)
+{
+ return nla->nla_len == rem;
+}
+
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 9da798256f0e..730d82ad6ee5 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -50,8 +50,8 @@ struct netns_xfrm {
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
- struct hlist_head policy_inexact[XFRM_POLICY_MAX * 2];
- struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
+ struct hlist_head policy_inexact[XFRM_POLICY_MAX];
+ struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
struct xfrm_policy_hthresh policy_hthresh;
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
index d9a5cf7ac1c4..0ae101eef0f4 100644
--- a/include/net/nfc/digital.h
+++ b/include/net/nfc/digital.h
@@ -225,6 +225,19 @@ struct nfc_digital_dev {
u8 curr_protocol;
u8 curr_rf_tech;
u8 curr_nfc_dep_pni;
+ u8 did;
+
+ u8 local_payload_max;
+ u8 remote_payload_max;
+
+ struct sk_buff *chaining_skb;
+ struct digital_data_exch *data_exch;
+
+ int atn_count;
+ int nack_count;
+
+ struct sk_buff *saved_skb;
+ unsigned int saved_skb_len;
u16 target_fsc;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 7ee8f4cc610b..14bd0e1c47fa 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -57,10 +57,14 @@ struct nfc_hci_ops {
int (*discover_se)(struct nfc_hci_dev *dev);
int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);
+ int (*se_io)(struct nfc_hci_dev *dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
};
/* Pipes */
#define NFC_HCI_INVALID_PIPE 0x80
+#define NFC_HCI_DO_NOT_CREATE_PIPE 0x81
#define NFC_HCI_LINK_MGMT_PIPE 0x00
#define NFC_HCI_ADMIN_PIPE 0x01
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 9eca9ae2280c..e7257a4653b4 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -28,6 +28,8 @@
#ifndef __NCI_H
#define __NCI_H
+#include <net/nfc/nfc.h>
+
/* NCI constants */
#define NCI_MAX_NUM_MAPPING_CONFIGS 10
#define NCI_MAX_NUM_RF_CONFIGS 10
@@ -73,6 +75,8 @@
#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
+#define NCI_RF_TECH_MODE_LISTEN_MASK 0x80
+
/* NCI RF Technologies */
#define NCI_NFC_RF_TECHNOLOGY_A 0x00
#define NCI_NFC_RF_TECHNOLOGY_B 0x01
@@ -106,6 +110,17 @@
/* NCI Configuration Parameter Tags */
#define NCI_PN_ATR_REQ_GEN_BYTES 0x29
+#define NCI_LN_ATR_RES_GEN_BYTES 0x61
+#define NCI_LA_SEL_INFO 0x32
+#define NCI_LF_PROTOCOL_TYPE 0x50
+#define NCI_LF_CON_BITR_F 0x54
+
+/* NCI Configuration Parameters masks */
+#define NCI_LA_SEL_INFO_ISO_DEP_MASK 0x20
+#define NCI_LA_SEL_INFO_NFC_DEP_MASK 0x40
+#define NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK 0x02
+#define NCI_LF_CON_BITR_F_212 0x02
+#define NCI_LF_CON_BITR_F_424 0x04
/* NCI Reset types */
#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
@@ -314,26 +329,31 @@ struct nci_core_intf_error_ntf {
struct rf_tech_specific_params_nfca_poll {
__u16 sens_res;
__u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */
- __u8 nfcid1[10];
+ __u8 nfcid1[NFC_NFCID1_MAXSIZE];
__u8 sel_res_len; /* 0 or 1 Bytes */
__u8 sel_res;
} __packed;
struct rf_tech_specific_params_nfcb_poll {
__u8 sensb_res_len;
- __u8 sensb_res[12]; /* 11 or 12 Bytes */
+ __u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; /* 11 or 12 Bytes */
} __packed;
struct rf_tech_specific_params_nfcf_poll {
__u8 bit_rate;
__u8 sensf_res_len;
- __u8 sensf_res[18]; /* 16 or 18 Bytes */
+ __u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; /* 16 or 18 Bytes */
} __packed;
struct rf_tech_specific_params_nfcv_poll {
__u8 res_flags;
__u8 dsfid;
- __u8 uid[8]; /* 8 Bytes */
+ __u8 uid[NFC_ISO15693_UID_MAXSIZE]; /* 8 Bytes */
+} __packed;
+
+struct rf_tech_specific_params_nfcf_listen {
+ __u8 local_nfcid2_len;
+ __u8 local_nfcid2[NFC_NFCID2_MAXSIZE]; /* 0 or 8 Bytes */
} __packed;
struct nci_rf_discover_ntf {
@@ -365,7 +385,12 @@ struct activation_params_nfcb_poll_iso_dep {
struct activation_params_poll_nfc_dep {
__u8 atr_res_len;
- __u8 atr_res[63];
+ __u8 atr_res[NFC_ATR_RES_MAXSIZE - 2]; /* ATR_RES from byte 3 */
+};
+
+struct activation_params_listen_nfc_dep {
+ __u8 atr_req_len;
+ __u8 atr_req[NFC_ATR_REQ_MAXSIZE - 2]; /* ATR_REQ from byte 3 */
};
struct nci_rf_intf_activated_ntf {
@@ -382,6 +407,7 @@ struct nci_rf_intf_activated_ntf {
struct rf_tech_specific_params_nfcb_poll nfcb_poll;
struct rf_tech_specific_params_nfcf_poll nfcf_poll;
struct rf_tech_specific_params_nfcv_poll nfcv_poll;
+ struct rf_tech_specific_params_nfcf_listen nfcf_listen;
} rf_tech_specific_params;
__u8 data_exch_rf_tech_and_mode;
@@ -393,6 +419,7 @@ struct nci_rf_intf_activated_ntf {
struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep;
struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep;
struct activation_params_poll_nfc_dep poll_nfc_dep;
+ struct activation_params_listen_nfc_dep listen_nfc_dep;
} activation_params;
} __packed;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 75d10e625c49..9e51bb4d841e 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -4,6 +4,7 @@
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2013 Intel Corporation. All rights reserved.
+ * Copyright (C) 2014 Marvell International Ltd.
*
* Written by Ilan Elias <ilane@ti.com>
*
@@ -49,6 +50,8 @@ enum nci_state {
NCI_W4_ALL_DISCOVERIES,
NCI_W4_HOST_SELECT,
NCI_POLL_ACTIVE,
+ NCI_LISTEN_ACTIVE,
+ NCI_LISTEN_SLEEP,
};
/* NCI timeouts */
@@ -69,6 +72,12 @@ struct nci_ops {
int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
int (*setup)(struct nci_dev *ndev);
__u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
+ int (*discover_se)(struct nci_dev *ndev);
+ int (*disable_se)(struct nci_dev *ndev, u32 se_idx);
+ int (*enable_se)(struct nci_dev *ndev, u32 se_idx);
+ int (*se_io)(struct nci_dev *ndev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context);
};
#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 6c583e244de2..12adb817c27a 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2014 Marvell International Ltd.
*
* Authors:
* Lauro Ramos Venancio <lauro.venancio@openbossa.org>
@@ -87,6 +88,7 @@ struct nfc_ops {
#define NFC_TARGET_IDX_ANY -1
#define NFC_MAX_GT_LEN 48
#define NFC_ATR_RES_GT_OFFSET 15
+#define NFC_ATR_REQ_GT_OFFSET 14
/**
* struct nfc_target - NFC target descriptiom
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b23548e04098..6dbd406ca41b 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -1,126 +1,122 @@
+#ifndef __NL802154_H
+#define __NL802154_H
/*
- * nl802154.h
+ * 802.15.4 netlink interface public header
*
- * Copyright (C) 2007, 2008, 2009 Siemens AG
+ * Copyright 2014 Alexander Aring <aar@pengutronix.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
-#ifndef IEEE802154_NL_H
-#define IEEE802154_NL_H
+#define NL802154_GENL_NAME "nl802154"
-struct net_device;
-struct ieee802154_addr;
+enum nl802154_commands {
+/* don't change the order or add anything between, this is ABI! */
+/* currently we don't shipping this file via uapi, ignore the above one */
+ NL802154_CMD_UNSPEC,
-/**
- * ieee802154_nl_assoc_indic - Notify userland of an association request.
- * @dev: The network device on which this association request was
- * received.
- * @addr: The address of the device requesting association.
- * @cap: The capability information field from the device.
- *
- * This informs a userland coordinator of a device requesting to
- * associate with the PAN controlled by the coordinator.
- *
- * Note: This is in section 7.3.1 of the IEEE 802.15.4-2006 document.
- */
-int ieee802154_nl_assoc_indic(struct net_device *dev,
- struct ieee802154_addr *addr, u8 cap);
-
-/**
- * ieee802154_nl_assoc_confirm - Notify userland of association.
- * @dev: The device which has completed association.
- * @short_addr: The short address assigned to the device.
- * @status: The status of the association.
- *
- * Inform userland of the result of an association request. If the
- * association request included asking the coordinator to allocate
- * a short address then it is returned in @short_addr.
- *
- * Note: This is in section 7.3.2 of the IEEE 802.15.4 document.
- */
-int ieee802154_nl_assoc_confirm(struct net_device *dev,
- __le16 short_addr, u8 status);
-
-/**
- * ieee802154_nl_disassoc_indic - Notify userland of disassociation.
- * @dev: The device on which disassociation was indicated.
- * @addr: The device which is disassociating.
- * @reason: The reason for the disassociation.
- *
- * Inform userland that a device has disassociated from the network.
- *
- * Note: This is in section 7.3.3 of the IEEE 802.15.4 document.
- */
-int ieee802154_nl_disassoc_indic(struct net_device *dev,
- struct ieee802154_addr *addr, u8 reason);
-
-/**
- * ieee802154_nl_disassoc_confirm - Notify userland of disassociation
- * completion.
- * @dev: The device on which disassociation was ordered.
- * @status: The result of the disassociation.
- *
- * Inform userland of the result of requesting that a device
- * disassociate, or the result of requesting that we disassociate from
- * a PAN managed by another coordinator.
- *
- * Note: This is in section 7.1.4.3 of the IEEE 802.15.4 document.
- */
-int ieee802154_nl_disassoc_confirm(struct net_device *dev,
- u8 status);
-
-/**
- * ieee802154_nl_scan_confirm - Notify userland of completion of scan.
- * @dev: The device which was instructed to scan.
- * @status: The status of the scan operation.
- * @scan_type: What type of scan was performed.
- * @unscanned: Any channels that the device was unable to scan.
- * @edl: The energy levels (if a passive scan).
- *
- *
- * Note: This is in section 7.1.11 of the IEEE 802.15.4 document.
- * Note: This API does not permit the return of an active scan result.
- */
-int ieee802154_nl_scan_confirm(struct net_device *dev,
- u8 status, u8 scan_type, u32 unscanned, u8 page,
- u8 *edl/*, struct list_head *pan_desc_list */);
-
-/**
- * ieee802154_nl_beacon_indic - Notify userland of a received beacon.
- * @dev: The device on which a beacon was received.
- * @panid: The PAN of the coordinator.
- * @coord_addr: The short address of the coordinator on that PAN.
- *
- * Note: This is in section 7.1.5 of the IEEE 802.15.4 document.
- * Note: This API does not provide extended information such as what
- * channel the PAN is on or what the LQI of the beacon frame was on
- * receipt.
- * Note: This API cannot indicate a beacon frame for a coordinator
- * operating in long addressing mode.
- */
-int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid,
- __le16 coord_addr);
+ NL802154_CMD_GET_WPAN_PHY, /* can dump */
+ NL802154_CMD_SET_WPAN_PHY,
+ NL802154_CMD_NEW_WPAN_PHY,
+ NL802154_CMD_DEL_WPAN_PHY,
-/**
- * ieee802154_nl_start_confirm - Notify userland of completion of start.
- * @dev: The device which was instructed to scan.
- * @status: The status of the scan operation.
- *
- * Note: This is in section 7.1.14 of the IEEE 802.15.4 document.
- */
-int ieee802154_nl_start_confirm(struct net_device *dev, u8 status);
+ NL802154_CMD_GET_INTERFACE, /* can dump */
+ NL802154_CMD_SET_INTERFACE,
+ NL802154_CMD_NEW_INTERFACE,
+ NL802154_CMD_DEL_INTERFACE,
+
+ NL802154_CMD_SET_CHANNEL,
+
+ NL802154_CMD_SET_PAN_ID,
+ NL802154_CMD_SET_SHORT_ADDR,
+
+ NL802154_CMD_SET_TX_POWER,
+ NL802154_CMD_SET_CCA_MODE,
+ NL802154_CMD_SET_CCA_ED_LEVEL,
+
+ NL802154_CMD_SET_MAX_FRAME_RETRIES,
+
+ NL802154_CMD_SET_BACKOFF_EXPONENT,
+ NL802154_CMD_SET_MAX_CSMA_BACKOFFS,
+
+ NL802154_CMD_SET_LBT_MODE,
+
+ /* add new commands above here */
+
+ /* used to define NL802154_CMD_MAX below */
+ __NL802154_CMD_AFTER_LAST,
+ NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1
+};
+
+enum nl802154_attrs {
+/* don't change the order or add anything between, this is ABI! */
+/* currently we don't shipping this file via uapi, ignore the above one */
+ NL802154_ATTR_UNSPEC,
+
+ NL802154_ATTR_WPAN_PHY,
+ NL802154_ATTR_WPAN_PHY_NAME,
+
+ NL802154_ATTR_IFINDEX,
+ NL802154_ATTR_IFNAME,
+ NL802154_ATTR_IFTYPE,
+
+ NL802154_ATTR_WPAN_DEV,
+
+ NL802154_ATTR_PAGE,
+ NL802154_ATTR_CHANNEL,
+
+ NL802154_ATTR_PAN_ID,
+ NL802154_ATTR_SHORT_ADDR,
+
+ NL802154_ATTR_TX_POWER,
+
+ NL802154_ATTR_CCA_MODE,
+ NL802154_ATTR_CCA_MODE3_AND,
+ NL802154_ATTR_CCA_ED_LEVEL,
+
+ NL802154_ATTR_MAX_FRAME_RETRIES,
+
+ NL802154_ATTR_MAX_BE,
+ NL802154_ATTR_MIN_BE,
+ NL802154_ATTR_MAX_CSMA_BACKOFFS,
+
+ NL802154_ATTR_LBT_MODE,
+
+ NL802154_ATTR_GENERATION,
+
+ NL802154_ATTR_CHANNELS_SUPPORTED,
+ NL802154_ATTR_SUPPORTED_CHANNEL,
+
+ NL802154_ATTR_EXTENDED_ADDR,
+
+ /* add attributes here, update the policy in nl802154.c */
+
+ __NL802154_ATTR_AFTER_LAST,
+ NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_iftype {
+ /* for backwards compatibility TODO */
+ NL802154_IFTYPE_UNSPEC = -1,
+
+ NL802154_IFTYPE_NODE,
+ NL802154_IFTYPE_MONITOR,
+ NL802154_IFTYPE_COORD,
+
+ /* keep last */
+ NUM_NL802154_IFTYPES,
+ NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1
+};
-#endif
+#endif /* __NL802154_H */
diff --git a/include/net/ping.h b/include/net/ping.h
index 026479b61a2d..f074060bc5de 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -82,7 +82,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-void ping_rcv(struct sk_buff *skb);
+bool ping_rcv(struct sk_buff *skb);
#ifdef CONFIG_PROC_FS
struct ping_seq_afinfo {
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index dad7ab20a8cb..b776d72d84be 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -136,6 +136,17 @@ struct regulatory_request {
* otherwise initiating radiation is not allowed. This will enable the
* relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
* option
+ * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure
+ * all interfaces on this wiphy reside on allowed channels. If this flag
+ * is not set, upon a regdomain change, the interfaces are given a grace
+ * period (currently 60 seconds) to disconnect or move to an allowed
+ * channel. Interfaces on forbidden channels are forcibly disconnected.
+ * Currently these types of interfaces are supported for enforcement:
+ * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP,
+ * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR,
+ * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
+ * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
+ * includes any modes unsupported for enforcement checking.
*/
enum ieee80211_regulatory_flags {
REGULATORY_CUSTOM_REG = BIT(0),
@@ -144,6 +155,7 @@ enum ieee80211_regulatory_flags {
REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3),
REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
+ REGULATORY_IGNORE_STALE_KICKOFF = BIT(6),
};
struct ieee80211_freq_range {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d17ed6fb2f70..3d282cbb66bf 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -219,7 +219,6 @@ struct tcf_proto_ops {
void (*destroy)(struct tcf_proto*);
unsigned long (*get)(struct tcf_proto*, u32 handle);
- void (*put)(struct tcf_proto*, unsigned long);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 72a31db47ded..487ef34bbd63 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -219,7 +219,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *,
const struct sctp_chunk *,
__u32 tsn);
struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *,
- const struct msghdr *, size_t msg_len);
+ struct msghdr *, size_t msg_len);
struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
const struct sctp_chunk *,
const __u8 *,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 4ff3f67be62c..2bb2fcf5b11f 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -531,7 +531,7 @@ struct sctp_datamsg {
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
struct sctp_sndrcvinfo *,
- struct msghdr *, int len);
+ struct iov_iter *);
void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_put(struct sctp_datamsg *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
@@ -647,8 +647,8 @@ struct sctp_chunk {
void sctp_chunk_hold(struct sctp_chunk *);
void sctp_chunk_put(struct sctp_chunk *);
-int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
- struct iovec *data);
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
+ struct iov_iter *from);
void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
@@ -1116,7 +1116,6 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
sctp_scope_t sctp_scope(const union sctp_addr *);
int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
-int sctp_addr_is_valid(const union sctp_addr *addr);
int sctp_is_ep_boundall(struct sock *sk);
diff --git a/include/net/sock.h b/include/net/sock.h
index 7ff44e062a38..2210fec65669 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -273,6 +273,7 @@ struct cg_proto;
* @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting
* @sk_rxhash: flow hash received from netif layer
+ * @sk_incoming_cpu: record cpu processing incoming packets
* @sk_txhash: computed flow hash for use on transmit
* @sk_filter: socket filtering instructions
* @sk_protinfo: private area, net family specific, when not using slab
@@ -350,6 +351,12 @@ struct sock {
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
+ u16 sk_incoming_cpu;
+ /* 16bit hole
+ * Warned : sk_incoming_cpu can be set from softirq,
+ * Do not use this hole without fully understanding possible issues.
+ */
+
__u32 sk_txhash;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_napi_id;
@@ -833,6 +840,11 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return sk->sk_backlog_rcv(sk, skb);
}
+static inline void sk_incoming_cpu_update(struct sock *sk)
+{
+ sk->sk_incoming_cpu = raw_smp_processor_id();
+}
+
static inline void sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
@@ -1581,6 +1593,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
int *errcode, int max_page_order);
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
/*
@@ -1865,29 +1878,6 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
return 0;
}
-static inline int skb_copy_to_page(struct sock *sk, char __user *from,
- struct sk_buff *skb, struct page *page,
- int off, int copy)
-{
- if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
- __wsum csum = csum_and_copy_from_user(from,
- page_address(page) + off,
- copy, 0, &err);
- if (err)
- return err;
- skb->csum = csum_block_add(skb->csum, csum, skb->len);
- } else if (copy_from_user(page_address(page) + off, from, copy))
- return -EFAULT;
-
- skb->len += copy;
- skb->data_len += copy;
- skb->truesize += copy;
- sk->sk_wmem_queued += copy;
- sk_mem_charge(sk, copy);
- return 0;
-}
-
/**
* sk_wmem_alloc_get - returns write allocations
* @sk: socket
@@ -2269,16 +2259,6 @@ bool sk_ns_capable(const struct sock *sk,
bool sk_capable(const struct sock *sk, int cap);
bool sk_net_capable(const struct sock *sk, int cap);
-/*
- * Enable debug/info messages
- */
-extern int net_msg_warn;
-#define NETDEBUG(fmt, args...) \
- do { if (net_msg_warn) printk(fmt,##args); } while (0)
-
-#define LIMIT_NETDEBUG(fmt, args...) \
- do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
-
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
new file mode 100644
index 000000000000..8a6d1641fd9b
--- /dev/null
+++ b/include/net/switchdev.h
@@ -0,0 +1,37 @@
+/*
+ * include/net/switchdev.h - Switch device API
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _LINUX_SWITCHDEV_H_
+#define _LINUX_SWITCHDEV_H_
+
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NET_SWITCHDEV
+
+int netdev_switch_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid);
+int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
+
+#else
+
+static inline int netdev_switch_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netdev_switch_port_stp_update(struct net_device *dev,
+ u8 state)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
new file mode 100644
index 000000000000..93b70ade1ff3
--- /dev/null
+++ b/include/net/tc_act/tc_vlan.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_VLAN_H
+#define __NET_TC_VLAN_H
+
+#include <net/act_api.h>
+
+#define VLAN_F_POP 0x1
+#define VLAN_F_PUSH 0x2
+
+struct tcf_vlan {
+ struct tcf_common common;
+ int tcfv_action;
+ u16 tcfv_push_vid;
+ __be16 tcfv_push_proto;
+};
+#define to_vlan(a) \
+ container_of(a->priv, struct tcf_vlan, common)
+
+#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4062b4f0d121..f50f29faf76f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -55,9 +55,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
-/*
+/*
* Never offer a window over 32767 without using window scaling. Some
- * poor stacks do signed 16bit maths!
+ * poor stacks do signed 16bit maths!
*/
#define MAX_TCP_WINDOW 32767U
@@ -70,9 +70,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/* After receiving this amount of duplicate ACKs fast retransmit starts. */
#define TCP_FASTRETRANS_THRESH 3
-/* Maximal reordering. */
-#define TCP_MAX_REORDERING 127
-
/* Maximal number of ACKs sent quickly to accelerate slow-start. */
#define TCP_MAX_QUICKACKS 16U
@@ -167,7 +164,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/*
* TCP option
*/
-
+
#define TCPOPT_NOP 1 /* Padding */
#define TCPOPT_EOL 0 /* End of options */
#define TCPOPT_MSS 2 /* Segment size negotiating */
@@ -252,6 +249,7 @@ extern int sysctl_tcp_abort_on_overflow;
extern int sysctl_tcp_max_orphans;
extern int sysctl_tcp_fack;
extern int sysctl_tcp_reordering;
+extern int sysctl_tcp_max_reordering;
extern int sysctl_tcp_dsack;
extern long sysctl_tcp_mem[3];
extern int sysctl_tcp_wmem[3];
@@ -492,17 +490,16 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
__u16 *mss);
-#endif
-
__u32 cookie_init_timestamp(struct request_sock *req);
-bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
- bool *ecn_ok);
+bool cookie_timestamp_decode(struct tcp_options_received *opt);
+bool cookie_ecn_ok(const struct tcp_options_received *opt,
+ const struct net *net, const struct dst_entry *dst);
/* From net/ipv6/syncookies.c */
int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
-#ifdef CONFIG_SYN_COOKIES
+
u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
const struct tcphdr *th, u16 *mssp);
__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
@@ -1104,16 +1101,16 @@ static inline int tcp_win_from_space(int space)
space - (space>>sysctl_tcp_adv_win_scale);
}
-/* Note: caller must be prepared to deal with negative returns */
+/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
return tcp_win_from_space(sk->sk_rcvbuf -
atomic_read(&sk->sk_rmem_alloc));
-}
+}
static inline int tcp_full_space(const struct sock *sk)
{
- return tcp_win_from_space(sk->sk_rcvbuf);
+ return tcp_win_from_space(sk->sk_rcvbuf);
}
static inline void tcp_openreq_init(struct request_sock *req,
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 2caadabcd07b..ae7c8d1fbcad 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -19,7 +19,9 @@ extern struct udp_table udplite_table;
static __inline__ int udplite_getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
- return memcpy_fromiovecend(to, (struct iovec *) from, offset, len);
+ struct msghdr *msg = from;
+ /* XXX: stripping const */
+ return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len);
}
/* Designate sk as UDP-Lite socket */
@@ -40,7 +42,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
* checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
* with a zero checksum field are illegal. */
if (uh->check == 0) {
- LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: zeroed checksum field\n");
+ net_dbg_ratelimited("UDPLite: zeroed checksum field\n");
return 1;
}
@@ -52,8 +54,8 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
/*
* Coverage length violates RFC 3828: log and discard silently.
*/
- LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: bad csum coverage %d/%d\n",
- cscov, skb->len);
+ net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n",
+ cscov, skb->len);
return 1;
} else if (cscov < skb->len) {
diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h
deleted file mode 100644
index 10ab0fc6d4f7..000000000000
--- a/include/net/wpan-phy.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (C) 2007, 2008, 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- */
-
-#ifndef WPAN_PHY_H
-#define WPAN_PHY_H
-
-#include <linux/netdevice.h>
-#include <linux/mutex.h>
-#include <linux/bug.h>
-
-/* According to the IEEE 802.15.4 stadard the upper most significant bits of
- * the 32-bit channel bitmaps shall be used as an integer value to specify 32
- * possible channel pages. The lower 27 bits of the channel bit map shall be
- * used as a bit mask to specify channel numbers within a channel page.
- */
-#define WPAN_NUM_CHANNELS 27
-#define WPAN_NUM_PAGES 32
-
-struct wpan_phy {
- struct mutex pib_lock;
-
- /*
- * This is a PIB according to 802.15.4-2011.
- * We do not provide timing-related variables, as they
- * aren't used outside of driver
- */
- u8 current_channel;
- u8 current_page;
- u32 channels_supported[32];
- s8 transmit_power;
- u8 cca_mode;
- u8 min_be;
- u8 max_be;
- u8 csma_retries;
- s8 frame_retries;
-
- bool lbt;
- s32 cca_ed_level;
-
- struct device dev;
- int idx;
-
- struct net_device *(*add_iface)(struct wpan_phy *phy,
- const char *name, int type);
- void (*del_iface)(struct wpan_phy *phy, struct net_device *dev);
-
- int (*set_txpower)(struct wpan_phy *phy, int db);
- int (*set_lbt)(struct wpan_phy *phy, bool on);
- int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode);
- int (*set_cca_ed_level)(struct wpan_phy *phy, int level);
- int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be,
- u8 retries);
- int (*set_frame_retries)(struct wpan_phy *phy, s8 retries);
-
- char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
-};
-
-#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
-
-struct wpan_phy *wpan_phy_alloc(size_t priv_size);
-static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)
-{
- phy->dev.parent = dev;
-}
-int wpan_phy_register(struct wpan_phy *phy);
-void wpan_phy_unregister(struct wpan_phy *phy);
-void wpan_phy_free(struct wpan_phy *phy);
-/* Same semantics as for class_for_each_device */
-int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data);
-
-static inline void *wpan_phy_priv(struct wpan_phy *phy)
-{
- BUG_ON(!phy);
- return &phy->priv;
-}
-
-struct wpan_phy *wpan_phy_find(const char *str);
-
-static inline void wpan_phy_put(struct wpan_phy *phy)
-{
- put_device(&phy->dev);
-}
-
-static inline const char *wpan_phy_name(struct wpan_phy *phy)
-{
- return dev_name(&phy->dev);
-}
-#endif
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a2bf41e0bde9..2d83cfd7e6ce 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -38,11 +38,12 @@
#include <linux/workqueue.h>
struct ib_ucontext;
+struct ib_umem_odp;
struct ib_umem {
struct ib_ucontext *context;
size_t length;
- int offset;
+ unsigned long address;
int page_size;
int writable;
int hugetlb;
@@ -50,17 +51,43 @@ struct ib_umem {
struct pid *pid;
struct mm_struct *mm;
unsigned long diff;
+ struct ib_umem_odp *odp_data;
struct sg_table sg_head;
int nmap;
int npages;
};
+/* Returns the offset of the umem start relative to the first page. */
+static inline int ib_umem_offset(struct ib_umem *umem)
+{
+ return umem->address & ((unsigned long)umem->page_size - 1);
+}
+
+/* Returns the first page of an ODP umem. */
+static inline unsigned long ib_umem_start(struct ib_umem *umem)
+{
+ return umem->address - ib_umem_offset(umem);
+}
+
+/* Returns the address of the page after the last one of an ODP umem. */
+static inline unsigned long ib_umem_end(struct ib_umem *umem)
+{
+ return PAGE_ALIGN(umem->address + umem->length);
+}
+
+static inline size_t ib_umem_num_pages(struct ib_umem *umem)
+{
+ return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT;
+}
+
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
size_t size, int access, int dmasync);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
+int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length);
#else /* CONFIG_INFINIBAND_USER_MEM */
@@ -73,7 +100,10 @@ static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
}
static inline void ib_umem_release(struct ib_umem *umem) { }
static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
-
+static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ size_t length) {
+ return -EINVAL;
+}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
new file mode 100644
index 000000000000..3da0b167041b
--- /dev/null
+++ b/include/rdma/ib_umem_odp.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_UMEM_ODP_H
+#define IB_UMEM_ODP_H
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <linux/interval_tree.h>
+
+struct umem_odp_node {
+ u64 __subtree_last;
+ struct rb_node rb;
+};
+
+struct ib_umem_odp {
+ /*
+ * An array of the pages included in the on-demand paging umem.
+ * Indices of pages that are currently not mapped into the device will
+ * contain NULL.
+ */
+ struct page **page_list;
+ /*
+ * An array of the same size as page_list, with DMA addresses mapped
+ * for pages the pages in page_list. The lower two bits designate
+ * access permissions. See ODP_READ_ALLOWED_BIT and
+ * ODP_WRITE_ALLOWED_BIT.
+ */
+ dma_addr_t *dma_list;
+ /*
+ * The umem_mutex protects the page_list and dma_list fields of an ODP
+ * umem, allowing only a single thread to map/unmap pages. The mutex
+ * also protects access to the mmu notifier counters.
+ */
+ struct mutex umem_mutex;
+ void *private; /* for the HW driver to use. */
+
+ /* When false, use the notifier counter in the ucontext struct. */
+ bool mn_counters_active;
+ int notifiers_seq;
+ int notifiers_count;
+
+ /* A linked list of umems that don't have private mmu notifier
+ * counters yet. */
+ struct list_head no_private_counters;
+ struct ib_umem *umem;
+
+ /* Tree tracking */
+ struct umem_odp_node interval_tree;
+
+ struct completion notifier_completion;
+ int dying;
+};
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+
+int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
+
+void ib_umem_odp_release(struct ib_umem *umem);
+
+/*
+ * The lower 2 bits of the DMA address signal the R/W permissions for
+ * the entry. To upgrade the permissions, provide the appropriate
+ * bitmask to the map_dma_pages function.
+ *
+ * Be aware that upgrading a mapped address might result in change of
+ * the DMA address for the page.
+ */
+#define ODP_READ_ALLOWED_BIT (1<<0ULL)
+#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
+
+#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
+
+int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
+ u64 access_mask, unsigned long current_seq);
+
+void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
+ u64 bound);
+
+void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root);
+void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root);
+typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
+ void *cookie);
+/*
+ * Call the callback on each ib_umem in the range. Returns the logical or of
+ * the return values of the functions called.
+ */
+int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end,
+ umem_call_back cb, void *cookie);
+
+struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root,
+ u64 start, u64 last);
+struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node,
+ u64 start, u64 last);
+
+static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
+ unsigned long mmu_seq)
+{
+ /*
+ * This code is strongly based on the KVM code from
+ * mmu_notifier_retry. Should be called with
+ * the relevant locks taken (item->odp_data->umem_mutex
+ * and the ucontext umem_mutex semaphore locked for read).
+ */
+
+ /* Do not allow page faults while the new ib_umem hasn't seen a state
+ * with zero notifiers yet, and doesn't have its own valid set of
+ * private counters. */
+ if (!item->odp_data->mn_counters_active)
+ return 1;
+
+ if (unlikely(item->odp_data->notifiers_count))
+ return 1;
+ if (item->odp_data->notifiers_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
+static inline int ib_umem_odp_get(struct ib_ucontext *context,
+ struct ib_umem *umem)
+{
+ return -EINVAL;
+}
+
+static inline void ib_umem_odp_release(struct ib_umem *umem) {}
+
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
+#endif /* IB_UMEM_ODP_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 470a011d6fa4..0d74f1de99aa 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -51,6 +51,7 @@
#include <uapi/linux/if_ether.h>
#include <linux/atomic.h>
+#include <linux/mmu_notifier.h>
#include <asm/uaccess.h>
extern struct workqueue_struct *ib_wq;
@@ -123,7 +124,8 @@ enum ib_device_cap_flags {
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
- IB_DEVICE_SIGNATURE_HANDOVER = (1<<30)
+ IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
+ IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
};
enum ib_signature_prot_cap {
@@ -143,6 +145,27 @@ enum ib_atomic_cap {
IB_ATOMIC_GLOB
};
+enum ib_odp_general_cap_bits {
+ IB_ODP_SUPPORT = 1 << 0,
+};
+
+enum ib_odp_transport_cap_bits {
+ IB_ODP_SUPPORT_SEND = 1 << 0,
+ IB_ODP_SUPPORT_RECV = 1 << 1,
+ IB_ODP_SUPPORT_WRITE = 1 << 2,
+ IB_ODP_SUPPORT_READ = 1 << 3,
+ IB_ODP_SUPPORT_ATOMIC = 1 << 4,
+};
+
+struct ib_odp_caps {
+ uint64_t general_caps;
+ struct {
+ uint32_t rc_odp_caps;
+ uint32_t uc_odp_caps;
+ uint32_t ud_odp_caps;
+ } per_transport_caps;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -186,6 +209,7 @@ struct ib_device_attr {
u8 local_ca_ack_delay;
int sig_prot_cap;
int sig_guard_cap;
+ struct ib_odp_caps odp_caps;
};
enum ib_mtu {
@@ -1073,7 +1097,8 @@ enum ib_access_flags {
IB_ACCESS_REMOTE_READ = (1<<2),
IB_ACCESS_REMOTE_ATOMIC = (1<<3),
IB_ACCESS_MW_BIND = (1<<4),
- IB_ZERO_BASED = (1<<5)
+ IB_ZERO_BASED = (1<<5),
+ IB_ACCESS_ON_DEMAND = (1<<6),
};
struct ib_phys_buf {
@@ -1115,6 +1140,8 @@ struct ib_fmr_attr {
u8 page_shift;
};
+struct ib_umem;
+
struct ib_ucontext {
struct ib_device *device;
struct list_head pd_list;
@@ -1127,6 +1154,24 @@ struct ib_ucontext {
struct list_head xrcd_list;
struct list_head rule_list;
int closing;
+
+ struct pid *tgid;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct rb_root umem_tree;
+ /*
+ * Protects .umem_rbroot and tree, as well as odp_mrs_count and
+ * mmu notifiers registration.
+ */
+ struct rw_semaphore umem_rwsem;
+ void (*invalidate_range)(struct ib_umem *umem,
+ unsigned long start, unsigned long end);
+
+ struct mmu_notifier mn;
+ atomic_t notifier_count;
+ /* A list of umems that don't have private mmu notifier counters yet. */
+ struct list_head no_private_counters;
+ int odp_mrs_count;
+#endif
};
struct ib_uobject {
@@ -1662,7 +1707,10 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
{
- return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
+ size_t copy_sz;
+
+ copy_sz = min_t(size_t, len, udata->outlen);
+ return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
}
/**
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 832dcc9f86ec..9d87a37aecad 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -161,17 +161,12 @@ struct expander_device {
};
/* ---------- SATA device ---------- */
-enum ata_command_set {
- ATA_COMMAND_SET = 0,
- ATAPI_COMMAND_SET = 1,
-};
-
#define ATA_RESP_FIS_SIZE 24
struct sata_device {
- enum ata_command_set command_set;
- struct smp_resp rps_resp; /* report_phy_sata_resp */
- u8 port_no; /* port number, if this is a PM (Port) */
+ unsigned int class;
+ struct smp_resp rps_resp; /* report_phy_sata_resp */
+ u8 port_no; /* port number, if this is a PM (Port) */
struct ata_port *ap;
struct ata_host ata_host;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index c8a462ef9a4e..e939d2b3757a 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -554,7 +554,7 @@ struct Scsi_Host {
* __devices is protected by the host_lock, but you should
* usually use scsi_device_lookup / shost_for_each_device
* to access it and don't care about locking yourself.
- * In the rare case of beeing in irq context you can use
+ * In the rare case of being in irq context you can use
* their __ prefixed variants with the lock held. NEVER
* access this list directly from a driver.
*/
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index ae6c3b8ed2f5..396e8f73670a 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -42,12 +42,11 @@ struct snd_compr_ops;
* @buffer_size: size of the above buffer
* @fragment_size: size of buffer fragment in bytes
* @fragments: number of such fragments
- * @hw_pointer: offset of last location in buffer where DSP copied data
- * @app_pointer: offset of last location in buffer where app wrote data
* @total_bytes_available: cumulative number of bytes made available in
* the ring buffer
* @total_bytes_transferred: cumulative bytes transferred by offload DSP
* @sleep: poll sleep
+ * @private_data: driver private data pointer
*/
struct snd_compr_runtime {
snd_pcm_state_t state;
@@ -94,6 +93,8 @@ struct snd_compr_stream {
* This can be called in during stream creation only to set codec params
* and the stream properties
* @get_params: retrieve the codec parameters, mandatory
+ * @set_metadata: Set the metadata values for a stream
+ * @get_metadata: retreives the requested metadata values from stream
* @trigger: Trigger operations like start, pause, resume, drain, stop.
* This callback is mandatory
* @pointer: Retrieve current h/w pointer information. Mandatory
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 58916573db58..218235030ebc 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -28,8 +28,23 @@
struct input_dev;
/**
- * Jack types which can be reported. These values are used as a
- * bitmask.
+ * enum snd_jack_types - Jack types which can be reported
+ * @SND_JACK_HEADPHONE: Headphone
+ * @SND_JACK_MICROPHONE: Microphone
+ * @SND_JACK_HEADSET: Headset
+ * @SND_JACK_LINEOUT: Line out
+ * @SND_JACK_MECHANICAL: Mechanical switch
+ * @SND_JACK_VIDEOOUT: Video out
+ * @SND_JACK_AVOUT: AV (Audio Video) out
+ * @SND_JACK_LINEIN: Line in
+ * @SND_JACK_BTN_0: Button 0
+ * @SND_JACK_BTN_1: Button 1
+ * @SND_JACK_BTN_2: Button 2
+ * @SND_JACK_BTN_3: Button 3
+ * @SND_JACK_BTN_4: Button 4
+ * @SND_JACK_BTN_5: Button 5
+ *
+ * These values are used as a bitmask.
*
* Note that this must be kept in sync with the lookup table in
* sound/core/jack.c.
@@ -90,6 +105,13 @@ static inline void snd_jack_set_parent(struct snd_jack *jack,
{
}
+static inline int snd_jack_set_key(struct snd_jack *jack,
+ enum snd_jack_types type,
+ int keytype)
+{
+ return 0;
+}
+
static inline void snd_jack_report(struct snd_jack *jack, int status)
{
}
diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h
new file mode 100644
index 000000000000..afdb416898e0
--- /dev/null
+++ b/include/sound/omap-hdmi-audio.h
@@ -0,0 +1,43 @@
+/*
+ * hdmi-audio.c -- OMAP4+ DSS HDMI audio support library
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <video/omapdss.h>
+
+#ifndef __OMAP_HDMI_AUDIO_H__
+#define __OMAP_HDMI_AUDIO_H__
+
+struct omap_hdmi_audio_ops {
+ int (*audio_startup)(struct device *dev,
+ void (*abort_cb)(struct device *dev));
+ int (*audio_shutdown)(struct device *dev);
+ int (*audio_start)(struct device *dev);
+ void (*audio_stop)(struct device *dev);
+ int (*audio_config)(struct device *dev,
+ struct omap_dss_audio *dss_audio);
+};
+
+/* HDMI audio initalization data */
+struct omap_hdmi_audio_pdata {
+ struct device *dev;
+ enum omapdss_version dss_version;
+ phys_addr_t audio_dma_addr;
+
+ const struct omap_hdmi_audio_ops *ops;
+};
+
+#endif /* __OMAP_HDMI_AUDIO_H__ */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 8bb00a27e219..1e7f74acc2ec 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -418,7 +418,10 @@ struct snd_pcm_substream {
struct snd_info_entry *proc_status_entry;
struct snd_info_entry *proc_prealloc_entry;
struct snd_info_entry *proc_prealloc_max_entry;
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ struct snd_info_entry *proc_xrun_injection_entry;
#endif
+#endif /* CONFIG_SND_VERBOSE_PROCFS */
/* misc flags */
unsigned int hw_opened: 1;
};
@@ -505,6 +508,7 @@ int snd_pcm_status(struct snd_pcm_substream *substream,
int snd_pcm_start(struct snd_pcm_substream *substream);
int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status);
int snd_pcm_drain_done(struct snd_pcm_substream *substream);
+int snd_pcm_stop_xrun(struct snd_pcm_substream *substream);
#ifdef CONFIG_PM
int snd_pcm_suspend(struct snd_pcm_substream *substream);
int snd_pcm_suspend_all(struct snd_pcm *pcm);
@@ -535,6 +539,12 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
* PCM library
*/
+/**
+ * snd_pcm_stream_linked - Check whether the substream is linked with others
+ * @substream: substream to check
+ *
+ * Returns true if the given substream is being linked with others.
+ */
static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
{
return substream->group != &substream->self_group;
@@ -545,6 +555,16 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+
+/**
+ * snd_pcm_stream_lock_irqsave - Lock the PCM stream
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock() but with the local
+ * IRQ (only when nonatomic is false). In nonatomic case, this is identical
+ * as snd_pcm_stream_lock().
+ */
#define snd_pcm_stream_lock_irqsave(substream, flags) \
do { \
typecheck(unsigned long, flags); \
@@ -553,9 +573,25 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags);
+/**
+ * snd_pcm_group_for_each_entry - iterate over the linked substreams
+ * @s: the iterator
+ * @substream: the substream
+ *
+ * Iterate over the all linked substreams to the given @substream.
+ * When @substream isn't linked with any others, this gives returns @substream
+ * itself once.
+ */
#define snd_pcm_group_for_each_entry(s, substream) \
list_for_each_entry(s, &substream->group->substreams, link_list)
+/**
+ * snd_pcm_running - Check whether the substream is in a running state
+ * @substream: substream to check
+ *
+ * Returns true if the given substream is in the state RUNNING, or in the
+ * state DRAINING for playback.
+ */
static inline int snd_pcm_running(struct snd_pcm_substream *substream)
{
return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
@@ -563,45 +599,81 @@ static inline int snd_pcm_running(struct snd_pcm_substream *substream)
substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
}
+/**
+ * bytes_to_samples - Unit conversion of the size from bytes to samples
+ * @runtime: PCM runtime instance
+ * @size: size in bytes
+ */
static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * 8 / runtime->sample_bits;
}
+/**
+ * bytes_to_frames - Unit conversion of the size from bytes to frames
+ * @runtime: PCM runtime instance
+ * @size: size in bytes
+ */
static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * 8 / runtime->frame_bits;
}
+/**
+ * samples_to_bytes - Unit conversion of the size from samples to bytes
+ * @runtime: PCM runtime instance
+ * @size: size in samples
+ */
static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size)
{
return size * runtime->sample_bits / 8;
}
+/**
+ * frames_to_bytes - Unit conversion of the size from frames to bytes
+ * @runtime: PCM runtime instance
+ * @size: size in frames
+ */
static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size)
{
return size * runtime->frame_bits / 8;
}
+/**
+ * frame_aligned - Check whether the byte size is aligned to frames
+ * @runtime: PCM runtime instance
+ * @bytes: size in bytes
+ */
static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
{
return bytes % runtime->byte_align == 0;
}
+/**
+ * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes
+ * @substream: PCM substream
+ */
static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
return frames_to_bytes(runtime, runtime->buffer_size);
}
+/**
+ * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes
+ * @substream: PCM substream
+ */
static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
return frames_to_bytes(runtime, runtime->period_size);
}
-/*
- * result is: 0 ... (boundary - 1)
+/**
+ * snd_pcm_playback_avail - Get the available (writable) space for playback
+ * @runtime: PCM runtime instance
+ *
+ * Result is between 0 ... (boundary - 1)
*/
static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime)
{
@@ -613,8 +685,11 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r
return avail;
}
-/*
- * result is: 0 ... (boundary - 1)
+/**
+ * snd_pcm_playback_avail - Get the available (readable) space for capture
+ * @runtime: PCM runtime instance
+ *
+ * Result is between 0 ... (boundary - 1)
*/
static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime)
{
@@ -624,11 +699,19 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru
return avail;
}
+/**
+ * snd_pcm_playback_hw_avail - Get the queued space for playback
+ * @runtime: PCM runtime instance
+ */
static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime)
{
return runtime->buffer_size - snd_pcm_playback_avail(runtime);
}
+/**
+ * snd_pcm_capture_hw_avail - Get the free space for capture
+ * @runtime: PCM runtime instance
+ */
static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime)
{
return runtime->buffer_size - snd_pcm_capture_avail(runtime);
@@ -708,6 +791,20 @@ static inline int snd_pcm_capture_empty(struct snd_pcm_substream *substream)
return snd_pcm_capture_avail(runtime) == 0;
}
+/**
+ * snd_pcm_trigger_done - Mark the master substream
+ * @substream: the pcm substream instance
+ * @master: the linked master substream
+ *
+ * When multiple substreams of the same card are linked and the hardware
+ * supports the single-shot operation, the driver calls this in the loop
+ * in snd_pcm_group_for_each_entry() for marking the substream as "done".
+ * Then most of trigger operations are performed only to the given master
+ * substream.
+ *
+ * The trigger_master mark is cleared at timestamp updates at the end
+ * of trigger operations.
+ */
static inline void snd_pcm_trigger_done(struct snd_pcm_substream *substream,
struct snd_pcm_substream *master)
{
@@ -750,18 +847,59 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc
return &params->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL];
}
-#define params_channels(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_CHANNELS)->min)
-#define params_rate(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_RATE)->min)
-#define params_period_size(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min)
-#define params_periods(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIODS)->min)
-#define params_buffer_size(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min)
-#define params_buffer_bytes(p) \
- (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min)
+/**
+ * params_channels - Get the number of channels from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_CHANNELS)->min;
+}
+
+/**
+ * params_channels - Get the sample rate from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_RATE)->min;
+}
+
+/**
+ * params_channels - Get the period size (in frames) from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min;
+}
+
+/**
+ * params_channels - Get the number of periods from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIODS)->min;
+}
+
+/**
+ * params_channels - Get the buffer size (in frames) from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min;
+}
+
+/**
+ * params_channels - Get the buffer size (in bytes) from the hw params
+ * @p: hw params
+ */
+static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
+{
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min;
+}
int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v);
void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c);
@@ -883,6 +1021,14 @@ unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
unsigned int rates_b);
+/**
+ * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer
+ * @substream: PCM substream to set
+ * @bufp: the buffer information, NULL to clear
+ *
+ * Copy the buffer information to runtime->dma_buffer when @bufp is non-NULL.
+ * Otherwise it clears the current buffer information.
+ */
static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream,
struct snd_dma_buffer *bufp)
{
@@ -908,6 +1054,11 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream);
void snd_pcm_timer_init(struct snd_pcm_substream *substream);
void snd_pcm_timer_done(struct snd_pcm_substream *substream);
+/**
+ * snd_pcm_gettime - Fill the timespec depending on the timestamp mode
+ * @runtime: PCM runtime instance
+ * @tv: timespec to fill
+ */
static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime,
struct timespec *tv)
{
@@ -944,7 +1095,6 @@ int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
unsigned long offset);
-#if 0 /* for kernel-doc */
/**
* snd_pcm_lib_alloc_vmalloc_buffer - allocate virtual DMA buffer
* @substream: the substream to allocate the buffer to
@@ -957,8 +1107,13 @@ struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
* Return: 1 if the buffer was changed, 0 if not changed, or a negative error
* code.
*/
-static int snd_pcm_lib_alloc_vmalloc_buffer
- (struct snd_pcm_substream *substream, size_t size);
+static inline int snd_pcm_lib_alloc_vmalloc_buffer
+ (struct snd_pcm_substream *substream, size_t size)
+{
+ return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+}
+
/**
* snd_pcm_lib_alloc_vmalloc_32_buffer - allocate 32-bit-addressable buffer
* @substream: the substream to allocate the buffer to
@@ -970,15 +1125,12 @@ static int snd_pcm_lib_alloc_vmalloc_buffer
* Return: 1 if the buffer was changed, 0 if not changed, or a negative error
* code.
*/
-static int snd_pcm_lib_alloc_vmalloc_32_buffer
- (struct snd_pcm_substream *substream, size_t size);
-#endif
-#define snd_pcm_lib_alloc_vmalloc_buffer(subs, size) \
- _snd_pcm_lib_alloc_vmalloc_buffer \
- (subs, size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)
-#define snd_pcm_lib_alloc_vmalloc_32_buffer(subs, size) \
- _snd_pcm_lib_alloc_vmalloc_buffer \
- (subs, size, GFP_KERNEL | GFP_DMA32 | __GFP_ZERO)
+static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
+ (struct snd_pcm_substream *substream, size_t size)
+{
+ return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
+ GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+}
#define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
@@ -998,18 +1150,35 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
#define snd_pcm_sgbuf_ops_page NULL
#endif /* SND_DMA_SGBUF */
+/**
+ * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
+ * @substream: PCM substream
+ * @ofs: byte offset
+ */
static inline dma_addr_t
snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
{
return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs);
}
+/**
+ * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset
+ * @substream: PCM substream
+ * @ofs: byte offset
+ */
static inline void *
snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
{
return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs);
}
+/**
+ * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig.
+ * page from the given size
+ * @substream: PCM substream
+ * @ofs: byte offset
+ * @size: byte size to examine
+ */
static inline unsigned int
snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
unsigned int ofs, unsigned int size)
@@ -1017,13 +1186,24 @@ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size);
}
-/* handle mmap counter - PCM mmap callback should handle this counter properly */
+/**
+ * snd_pcm_mmap_data_open - increase the mmap counter
+ * @area: VMA
+ *
+ * PCM mmap callback should handle this counter properly
+ */
static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area)
{
struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
atomic_inc(&substream->mmap_count);
}
+/**
+ * snd_pcm_mmap_data_close - decrease the mmap counter
+ * @area: VMA
+ *
+ * PCM mmap callback should handle this counter properly
+ */
static inline void snd_pcm_mmap_data_close(struct vm_area_struct *area)
{
struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
@@ -1043,6 +1223,11 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_vmalloc NULL
+/**
+ * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
+ * @dma: DMA number
+ * @max: pointer to store the max size
+ */
static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
{
*max = dma < 4 ? 64 * 1024 : 128 * 1024;
@@ -1095,7 +1280,11 @@ struct snd_pcm_chmap {
void *private_data; /* optional: private data pointer */
};
-/* get the PCM substream assigned to the given chmap info */
+/**
+ * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info
+ * @info: chmap information
+ * @idx: the substream number index
+ */
static inline struct snd_pcm_substream *
snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx)
{
@@ -1122,7 +1311,10 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
unsigned long private_value,
struct snd_pcm_chmap **info_ret);
-/* Strong-typed conversion of pcm_format to bitwise */
+/**
+ * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise
+ * @pcm_format: PCM format
+ */
static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
{
return 1ULL << (__force int) pcm_format;
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index d76412b84b48..83284cae464c 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -36,14 +36,14 @@
#define RSND_SSI_CLK_PIN_SHARE (1 << 31)
#define RSND_SSI_NO_BUSIF (1 << 30) /* SSI+DMA without BUSIF */
-#define RSND_SSI(_dma_id, _pio_irq, _flags) \
-{ .dma_id = _dma_id, .pio_irq = _pio_irq, .flags = _flags }
+#define RSND_SSI(_dma_id, _irq, _flags) \
+{ .dma_id = _dma_id, .irq = _irq, .flags = _flags }
#define RSND_SSI_UNUSED \
-{ .dma_id = -1, .pio_irq = -1, .flags = 0 }
+{ .dma_id = -1, .irq = -1, .flags = 0 }
struct rsnd_ssi_platform_info {
int dma_id;
- int pio_irq;
+ int irq;
u32 flags;
};
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index a5352712194b..120d9610054e 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -23,6 +23,10 @@ struct rt5645_platform_data {
unsigned int hp_det_gpio;
bool gpio_hp_det_active_high;
+
+ /* true if codec's jd function is used */
+ bool en_jd_func;
+ unsigned int jd_mode;
};
#endif
diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h
index 082670e3a353..d9eb7d861cd0 100644
--- a/include/sound/rt5677.h
+++ b/include/sound/rt5677.h
@@ -27,6 +27,16 @@ struct rt5677_platform_data {
bool lout3_diff;
/* DMIC2 clock source selection */
enum rt5677_dmic2_clk dmic2_clk_pin;
+
+ /* configures GPIO, 0 - floating, 1 - pulldown, 2 - pullup */
+ u8 gpio_config[6];
+
+ /* jd1 can select 0 ~ 3 as OFF, GPIO1, GPIO2 and GPIO3 respectively */
+ unsigned int jd1_gpio;
+ /* jd2 and jd3 can select 0 ~ 3 as
+ OFF, GPIO4, GPIO5 and GPIO6 respectively */
+ unsigned int jd2_gpio;
+ unsigned int jd3_gpio;
};
#endif
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index 2398521f0998..eea5400fe373 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -108,9 +108,13 @@ int snd_seq_event_port_detach(int client, int port);
#ifdef CONFIG_MODULES
void snd_seq_autoload_lock(void);
void snd_seq_autoload_unlock(void);
+void snd_seq_autoload_init(void);
+#define snd_seq_autoload_exit() snd_seq_autoload_lock()
#else
#define snd_seq_autoload_lock()
#define snd_seq_autoload_unlock()
+#define snd_seq_autoload_init()
+#define snd_seq_autoload_exit()
#endif
#endif /* __SOUND_SEQ_KERNEL_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e8b3080d196a..2df96b1384c7 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -206,7 +206,6 @@ struct snd_soc_dai_driver {
/* DAI description */
const char *name;
unsigned int id;
- int ac97_control;
unsigned int base;
/* DAI driver callbacks */
@@ -216,6 +215,8 @@ struct snd_soc_dai_driver {
int (*resume)(struct snd_soc_dai *dai);
/* compress dai */
bool compress_dai;
+ /* DAI is also used for the control bus */
+ bool bus_control;
/* ops */
const struct snd_soc_dai_ops *ops;
@@ -241,7 +242,6 @@ struct snd_soc_dai {
const char *name;
int id;
struct device *dev;
- void *ac97_pdata; /* platform_data for the ac97 codec */
/* driver ops */
struct snd_soc_dai_driver *driver;
@@ -268,7 +268,6 @@ struct snd_soc_dai {
unsigned int sample_bits;
/* parent platform/codec */
- struct snd_soc_platform *platform;
struct snd_soc_codec *codec;
struct snd_soc_component *component;
@@ -276,8 +275,6 @@ struct snd_soc_dai {
unsigned int tx_mask;
unsigned int rx_mask;
- struct snd_soc_card *card;
-
struct list_head list;
};
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 3a4d7da67b8d..89823cfe6f04 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -435,7 +435,7 @@ void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card);
unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
/* Mostly internal - should not normally be used */
-void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm);
+void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
/* dapm path query */
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
@@ -508,9 +508,9 @@ struct snd_soc_dapm_path {
/* status */
u32 connect:1; /* source and sink widgets are connected */
- u32 walked:1; /* path has been walked */
u32 walking:1; /* path is in the process of being walked */
u32 weak:1; /* path ignored for power management */
+ u32 is_supply:1; /* At least one of the connected widgets is a supply */
int (*connected)(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
@@ -544,11 +544,13 @@ struct snd_soc_dapm_widget {
unsigned char active:1; /* active stream on DAC, ADC's */
unsigned char connected:1; /* connected codec pin */
unsigned char new:1; /* cnew complete */
- unsigned char ext:1; /* has external widgets */
unsigned char force:1; /* force state */
unsigned char ignore_suspend:1; /* kept enabled over suspend */
unsigned char new_power:1; /* power from this run */
unsigned char power_checked:1; /* power checked this run */
+ unsigned char is_supply:1; /* Widget is a supply type widget */
+ unsigned char is_sink:1; /* Widget is a sink type widget */
+ unsigned char is_source:1; /* Widget is a source type widget */
int subseq; /* sort within widget type */
int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -567,6 +569,7 @@ struct snd_soc_dapm_widget {
struct list_head sinks;
/* used during DAPM updates */
+ struct list_head work_list;
struct list_head power_list;
struct list_head dirty;
int inputs;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 7ba7130037a0..b4fca9aed2a2 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -36,6 +36,11 @@
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
.rshift = shift_right, .max = xmax, .platform_max = xmax, \
.invert = xinvert, .autodisable = xautodisable})
+#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \
+ ((unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .rreg = xreg, .shift = shift_left, \
+ .rshift = shift_right, .min = xmin, .max = xmax, .platform_max = xmax, \
+ .sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable})
#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
@@ -171,11 +176,9 @@
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
SNDRV_CTL_ELEM_ACCESS_READWRITE, \
.tlv.p = (tlv_array), \
- .info = snd_soc_info_volsw_s8, .get = snd_soc_get_volsw_s8, \
- .put = snd_soc_put_volsw_s8, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .min = xmin, .max = xmax, \
- .platform_max = xmax} }
+ .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
+ .put = snd_soc_put_volsw, \
+ .private_value = SOC_DOUBLE_S_VALUE(xreg, 0, 8, xmin, xmax, 7, 0, 0) }
#define SOC_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xitems, xtexts) \
{ .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \
.items = xitems, .texts = xtexts, \
@@ -366,8 +369,6 @@ struct snd_soc_jack_gpio;
typedef int (*hw_write_t)(void *,const char* ,int);
-extern struct snd_ac97_bus_ops *soc_ac97_ops;
-
enum snd_soc_pcm_subclass {
SND_SOC_PCM_CLASS_PCM = 0,
SND_SOC_PCM_CLASS_BE = 1,
@@ -409,13 +410,9 @@ int devm_snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *cmpnt_drv,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_component(struct device *dev);
-int snd_soc_cache_sync(struct snd_soc_codec *codec);
int snd_soc_cache_init(struct snd_soc_codec *codec);
int snd_soc_cache_exit(struct snd_soc_codec *codec);
-int snd_soc_cache_write(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value);
-int snd_soc_cache_read(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int *value);
+
int snd_soc_platform_read(struct snd_soc_platform *platform,
unsigned int reg);
int snd_soc_platform_write(struct snd_soc_platform *platform,
@@ -500,14 +497,28 @@ int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
unsigned int mask, unsigned int value);
-int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
- struct snd_ac97_bus_ops *ops, int num);
-void snd_soc_free_ac97_codec(struct snd_soc_codec *codec);
+#ifdef CONFIG_SND_SOC_AC97_BUS
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
+void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
struct platform_device *pdev);
+extern struct snd_ac97_bus_ops *soc_ac97_ops;
+#else
+static inline int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+
+static inline int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
+{
+ return 0;
+}
+#endif
+
/*
*Controls
*/
@@ -545,12 +556,6 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
-int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
-int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo);
int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
@@ -780,24 +785,18 @@ struct snd_soc_codec {
struct device *dev;
const struct snd_soc_codec_driver *driver;
- struct mutex mutex;
struct list_head list;
struct list_head card_list;
/* runtime */
- struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */
unsigned int cache_bypass:1; /* Suppress access to the cache */
unsigned int suspended:1; /* Codec is in suspend PM state */
- unsigned int ac97_registered:1; /* Codec has been AC97 registered */
- unsigned int ac97_created:1; /* Codec has been created by SoC */
unsigned int cache_init:1; /* codec cache has been initialized */
- u32 cache_sync; /* Cache needs to be synced to hardware */
/* codec IO */
void *control_data; /* codec control (i2c/3wire) data */
hw_write_t hw_write;
void *reg_cache;
- struct mutex cache_rw_mutex;
/* component */
struct snd_soc_component component;
@@ -860,8 +859,6 @@ struct snd_soc_platform_driver {
int (*probe)(struct snd_soc_platform *);
int (*remove)(struct snd_soc_platform *);
- int (*suspend)(struct snd_soc_dai *dai);
- int (*resume)(struct snd_soc_dai *dai);
struct snd_soc_component_driver component_driver;
/* pcm creation and destruction */
@@ -886,7 +883,7 @@ struct snd_soc_platform_driver {
struct snd_soc_dai_link_component {
const char *name;
- const struct device_node *of_node;
+ struct device_node *of_node;
const char *dai_name;
};
@@ -894,8 +891,6 @@ struct snd_soc_platform {
struct device *dev;
const struct snd_soc_platform_driver *driver;
- unsigned int suspended:1; /* platform is suspended */
-
struct list_head list;
struct snd_soc_component component;
@@ -990,7 +985,7 @@ struct snd_soc_codec_conf {
* DT/OF node, but not both.
*/
const char *dev_name;
- const struct device_node *of_node;
+ struct device_node *of_node;
/*
* optional map of kcontrol, widget and path name prefixes that are
@@ -1007,7 +1002,7 @@ struct snd_soc_aux_dev {
* DT/OF node, but not both.
*/
const char *codec_name;
- const struct device_node *codec_of_node;
+ struct device_node *codec_of_node;
/* codec/machine specific init - e.g. add machine controls */
int (*init)(struct snd_soc_component *component);
@@ -1264,6 +1259,17 @@ unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int val);
+/**
+ * snd_soc_cache_sync() - Sync the register cache with the hardware
+ * @codec: CODEC to sync
+ *
+ * Note: This function will call regcache_sync()
+ */
+static inline int snd_soc_cache_sync(struct snd_soc_codec *codec)
+{
+ return regcache_sync(codec->component.regmap);
+}
+
/* component IO */
int snd_soc_component_read(struct snd_soc_component *component,
unsigned int reg, unsigned int *val);
@@ -1277,6 +1283,45 @@ void snd_soc_component_async_complete(struct snd_soc_component *component);
int snd_soc_component_test_bits(struct snd_soc_component *component,
unsigned int reg, unsigned int mask, unsigned int value);
+#ifdef CONFIG_REGMAP
+
+void snd_soc_component_init_regmap(struct snd_soc_component *component,
+ struct regmap *regmap);
+void snd_soc_component_exit_regmap(struct snd_soc_component *component);
+
+/**
+ * snd_soc_codec_init_regmap() - Initialize regmap instance for the CODEC
+ * @codec: The CODEC for which to initialize the regmap instance
+ * @regmap: The regmap instance that should be used by the CODEC
+ *
+ * This function allows deferred assignment of the regmap instance that is
+ * associated with the CODEC. Only use this if the regmap instance is not yet
+ * ready when the CODEC is registered. The function must also be called before
+ * the first IO attempt of the CODEC.
+ */
+static inline void snd_soc_codec_init_regmap(struct snd_soc_codec *codec,
+ struct regmap *regmap)
+{
+ snd_soc_component_init_regmap(&codec->component, regmap);
+}
+
+/**
+ * snd_soc_codec_exit_regmap() - De-initialize regmap instance for the CODEC
+ * @codec: The CODEC for which to de-initialize the regmap instance
+ *
+ * Calls regmap_exit() on the regmap instance associated to the CODEC and
+ * removes the regmap instance from the CODEC.
+ *
+ * This function should only be used if snd_soc_codec_init_regmap() was used to
+ * initialize the regmap instance.
+ */
+static inline void snd_soc_codec_exit_regmap(struct snd_soc_codec *codec)
+{
+ snd_soc_component_exit_regmap(&codec->component);
+}
+
+#endif
+
/* device driver data */
static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
@@ -1451,6 +1496,9 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
struct device_node **framemaster);
int snd_soc_of_get_dai_name(struct device_node *of_node,
const char **dai_name);
+int snd_soc_of_get_dai_link_codecs(struct device *dev,
+ struct device_node *of_node,
+ struct snd_soc_dai_link *dai_link);
#include <sound/soc-dai.h>
diff --git a/include/sound/uda134x.h b/include/sound/uda134x.h
index e475659bd3be..509efb050176 100644
--- a/include/sound/uda134x.h
+++ b/include/sound/uda134x.h
@@ -18,18 +18,6 @@ struct uda134x_platform_data {
struct l3_pins l3;
void (*power) (int);
int model;
- /*
- ALSA SOC usually puts the device in standby mode when it's not used
- for sometime. If you unset is_powered_on_standby the driver will
- turn off the ADC/DAC when this callback is invoked and turn it back
- on when needed. Unfortunately this will result in a very light bump
- (it can be audible only with good earphones). If this bothers you
- set is_powered_on_standby, you will have slightly higher power
- consumption. Please note that sending the L3 command for ADC is
- enough to make the bump, so it doesn't make difference if you
- completely take off power from the codec.
- */
- int is_powered_on_standby;
#define UDA134X_UDA1340 1
#define UDA134X_UDA1341 2
#define UDA134X_UDA1344 3
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index b04ee7e5a466..88cf39d96d0f 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -288,31 +288,6 @@ TRACE_EVENT(snd_soc_jack_notify,
TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
);
-TRACE_EVENT(snd_soc_cache_sync,
-
- TP_PROTO(struct snd_soc_codec *codec, const char *type,
- const char *status),
-
- TP_ARGS(codec, type, status),
-
- TP_STRUCT__entry(
- __string( name, codec->component.name)
- __string( status, status )
- __string( type, type )
- __field( int, id )
- ),
-
- TP_fast_assign(
- __assign_str(name, codec->component.name);
- __assign_str(status, status);
- __assign_str(type, type);
- __entry->id = codec->component.id;
- ),
-
- TP_printk("codec=%s.%d type=%s status=%s", __get_str(name),
- (int)__entry->id, __get_str(type), __get_str(status))
-);
-
#endif /* _TRACE_ASOC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index ff4bd1b35246..6cfb841fea7c 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -43,15 +43,13 @@ struct extent_status;
{ EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
{ EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
{ EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
- { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }, \
- { EXT4_GET_BLOCKS_NO_PUT_HOLE, "NO_PUT_HOLE" })
+ { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" })
#define show_mflags(flags) __print_flags(flags, "", \
{ EXT4_MAP_NEW, "N" }, \
{ EXT4_MAP_MAPPED, "M" }, \
{ EXT4_MAP_UNWRITTEN, "U" }, \
- { EXT4_MAP_BOUNDARY, "B" }, \
- { EXT4_MAP_FROM_CLUSTER, "C" })
+ { EXT4_MAP_BOUNDARY, "B" })
#define show_free_flags(flags) __print_flags(flags, "|", \
{ EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \
@@ -2452,15 +2450,14 @@ TRACE_EVENT(ext4_collapse_range,
TRACE_EVENT(ext4_es_shrink,
TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
- int skip_precached, int nr_skipped, int retried),
+ int nr_skipped, int retried),
- TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried),
+ TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, nr_shrunk )
__field( unsigned long long, scan_time )
- __field( int, skip_precached )
__field( int, nr_skipped )
__field( int, retried )
),
@@ -2469,16 +2466,14 @@ TRACE_EVENT(ext4_es_shrink,
__entry->dev = sb->s_dev;
__entry->nr_shrunk = nr_shrunk;
__entry->scan_time = div_u64(scan_time, 1000);
- __entry->skip_precached = skip_precached;
__entry->nr_skipped = nr_skipped;
__entry->retried = retried;
),
- TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d "
+ TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu "
"nr_skipped %d retried %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk,
- __entry->scan_time, __entry->skip_precached,
- __entry->nr_skipped, __entry->retried)
+ __entry->scan_time, __entry->nr_skipped, __entry->retried)
);
#endif /* _TRACE_EXT4_H */
diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h
index 94db6a2c3540..63116362543c 100644
--- a/include/trace/events/host1x.h
+++ b/include/trace/events/host1x.h
@@ -29,6 +29,8 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
+struct host1x_bo;
+
DECLARE_EVENT_CLASS(host1x,
TP_PROTO(const char *name),
TP_ARGS(name),
@@ -79,14 +81,14 @@ TRACE_EVENT(host1x_cdma_push,
);
TRACE_EVENT(host1x_cdma_push_gather,
- TP_PROTO(const char *name, u32 mem_id,
+ TP_PROTO(const char *name, struct host1x_bo *bo,
u32 words, u32 offset, void *cmdbuf),
- TP_ARGS(name, mem_id, words, offset, cmdbuf),
+ TP_ARGS(name, bo, words, offset, cmdbuf),
TP_STRUCT__entry(
__field(const char *, name)
- __field(u32, mem_id)
+ __field(struct host1x_bo *, bo)
__field(u32, words)
__field(u32, offset)
__field(bool, cmdbuf)
@@ -100,13 +102,13 @@ TRACE_EVENT(host1x_cdma_push_gather,
}
__entry->cmdbuf = cmdbuf;
__entry->name = name;
- __entry->mem_id = mem_id;
+ __entry->bo = bo;
__entry->words = words;
__entry->offset = offset;
),
- TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
- __entry->name, __entry->mem_id,
+ TP_printk("name=%s, bo=%p, words=%u, offset=%d, contents=[%s]",
+ __entry->name, __entry->bo,
__entry->words, __entry->offset,
__print_hex(__get_dynamic_array(cmdbuf),
__entry->cmdbuf ? __entry->words * 4 : 0))
@@ -221,12 +223,13 @@ TRACE_EVENT(host1x_syncpt_load_min,
);
TRACE_EVENT(host1x_syncpt_wait_check,
- TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min),
+ TP_PROTO(struct host1x_bo *bo, u32 offset, u32 syncpt_id, u32 thresh,
+ u32 min),
- TP_ARGS(mem_id, offset, syncpt_id, thresh, min),
+ TP_ARGS(bo, offset, syncpt_id, thresh, min),
TP_STRUCT__entry(
- __field(void *, mem_id)
+ __field(struct host1x_bo *, bo)
__field(u32, offset)
__field(u32, syncpt_id)
__field(u32, thresh)
@@ -234,15 +237,15 @@ TRACE_EVENT(host1x_syncpt_wait_check,
),
TP_fast_assign(
- __entry->mem_id = mem_id;
+ __entry->bo = bo;
__entry->offset = offset;
__entry->syncpt_id = syncpt_id;
__entry->thresh = thresh;
__entry->min = min;
),
- TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d",
- __entry->mem_id, __entry->offset,
+ TP_printk("bo=%p, offset=%05x, id=%d, thresh=%d, current=%d",
+ __entry->bo, __entry->offset,
__entry->syncpt_id, __entry->thresh,
__entry->min)
);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 171ca4ff6d99..b9c1dc6c825a 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -8,6 +8,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/xprtsock.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <net/tcp_states.h>
#include <linux/net.h>
#include <linux/tracepoint.h>
@@ -412,6 +413,16 @@ TRACE_EVENT(xs_tcp_data_recv,
__entry->copied, __entry->reclen, __entry->offset)
);
+#define show_rqstp_flags(flags) \
+ __print_flags(flags, "|", \
+ { (1UL << RQ_SECURE), "RQ_SECURE"}, \
+ { (1UL << RQ_LOCAL), "RQ_LOCAL"}, \
+ { (1UL << RQ_USEDEFERRAL), "RQ_USEDEFERRAL"}, \
+ { (1UL << RQ_DROPME), "RQ_DROPME"}, \
+ { (1UL << RQ_SPLICE_OK), "RQ_SPLICE_OK"}, \
+ { (1UL << RQ_VICTIM), "RQ_VICTIM"}, \
+ { (1UL << RQ_BUSY), "RQ_BUSY"})
+
TRACE_EVENT(svc_recv,
TP_PROTO(struct svc_rqst *rqst, int status),
@@ -421,16 +432,19 @@ TRACE_EVENT(svc_recv,
__field(struct sockaddr *, addr)
__field(__be32, xid)
__field(int, status)
+ __field(unsigned long, flags)
),
TP_fast_assign(
__entry->addr = (struct sockaddr *)&rqst->rq_addr;
__entry->xid = status > 0 ? rqst->rq_xid : 0;
__entry->status = status;
+ __entry->flags = rqst->rq_flags;
),
- TP_printk("addr=%pIScp xid=0x%x status=%d", __entry->addr,
- be32_to_cpu(__entry->xid), __entry->status)
+ TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
+ be32_to_cpu(__entry->xid), __entry->status,
+ show_rqstp_flags(__entry->flags))
);
DECLARE_EVENT_CLASS(svc_rqst_status,
@@ -444,18 +458,19 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
__field(__be32, xid)
__field(int, dropme)
__field(int, status)
+ __field(unsigned long, flags)
),
TP_fast_assign(
__entry->addr = (struct sockaddr *)&rqst->rq_addr;
__entry->xid = rqst->rq_xid;
- __entry->dropme = (int)rqst->rq_dropme;
__entry->status = status;
+ __entry->flags = rqst->rq_flags;
),
- TP_printk("addr=%pIScp rq_xid=0x%x dropme=%d status=%d",
- __entry->addr, be32_to_cpu(__entry->xid), __entry->dropme,
- __entry->status)
+ TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
+ __entry->addr, be32_to_cpu(__entry->xid),
+ __entry->status, show_rqstp_flags(__entry->flags))
);
DEFINE_EVENT(svc_rqst_status, svc_process,
@@ -466,6 +481,99 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
TP_PROTO(struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
+#define show_svc_xprt_flags(flags) \
+ __print_flags(flags, "|", \
+ { (1UL << XPT_BUSY), "XPT_BUSY"}, \
+ { (1UL << XPT_CONN), "XPT_CONN"}, \
+ { (1UL << XPT_CLOSE), "XPT_CLOSE"}, \
+ { (1UL << XPT_DATA), "XPT_DATA"}, \
+ { (1UL << XPT_TEMP), "XPT_TEMP"}, \
+ { (1UL << XPT_DEAD), "XPT_DEAD"}, \
+ { (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \
+ { (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \
+ { (1UL << XPT_OLD), "XPT_OLD"}, \
+ { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
+ { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
+ { (1UL << XPT_LOCAL), "XPT_LOCAL"})
+
+TRACE_EVENT(svc_xprt_do_enqueue,
+ TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
+
+ TP_ARGS(xprt, rqst),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field(struct svc_rqst *, rqst)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt;
+ __entry->rqst = rqst;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->xprt->xpt_remote,
+ __entry->rqst ? __entry->rqst->rq_task->pid : 0,
+ show_svc_xprt_flags(__entry->xprt->xpt_flags))
+);
+
+TRACE_EVENT(svc_xprt_dequeue,
+ TP_PROTO(struct svc_xprt *xprt),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field_struct(struct sockaddr_storage, ss)
+ __field(unsigned long, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt,
+ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ __entry->flags = xprt ? xprt->xpt_flags : 0;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->ss,
+ show_svc_xprt_flags(__entry->flags))
+);
+
+TRACE_EVENT(svc_wake_up,
+ TP_PROTO(int pid),
+
+ TP_ARGS(pid),
+
+ TP_STRUCT__entry(
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ ),
+
+ TP_printk("pid=%d", __entry->pid)
+);
+
+TRACE_EVENT(svc_handle_xprt,
+ TP_PROTO(struct svc_xprt *xprt, int len),
+
+ TP_ARGS(xprt, len),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field(int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = xprt;
+ __entry->len = len;
+ ),
+
+ TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
+ (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
+ show_svc_xprt_flags(__entry->xprt->xpt_flags))
+);
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index ea0796bdcf88..5c15c2a5c123 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -82,4 +82,9 @@
#define SO_BPF_EXTENSIONS 48
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 22749c134117..e016bd9b1a04 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -707,9 +707,11 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
__SYSCALL(__NR_memfd_create, sys_memfd_create)
#define __NR_bpf 280
__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 281
+__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
#undef __NR_syscalls
-#define __NR_syscalls 281
+#define __NR_syscalls 282
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index a0db2d4aa5f0..86574b0005ff 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -286,6 +286,8 @@ struct drm_mode_get_property {
char name[DRM_PROP_NAME_LEN];
__u32 count_values;
+ /* This is only used to count enum values, not blobs. The _blobs is
+ * simply because of a historical reason, i.e. backwards compat. */
__u32 count_enum_blobs;
};
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index ff57f07c3249..250262265ee3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -340,6 +340,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
+#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
typedef struct drm_i915_getparam {
int param;
@@ -876,6 +877,12 @@ struct drm_i915_gem_get_tiling {
* mmap mapping.
*/
__u32 swizzle_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping whilst bound.
+ */
+ __u32 phys_swizzle_mode;
};
struct drm_i915_gem_get_aperture {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 8523f9bb72f2..00b100023c47 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -1,4 +1,5 @@
# UAPI Header export list
+header-y += android/
header-y += byteorder/
header-y += can/
header-y += caif/
@@ -37,27 +38,27 @@ header-y += aio_abi.h
header-y += apm_bios.h
header-y += arcfb.h
header-y += atalk.h
-header-y += atm.h
-header-y += atm_eni.h
-header-y += atm_he.h
-header-y += atm_idt77105.h
-header-y += atm_nicstar.h
-header-y += atm_tcp.h
-header-y += atm_zatm.h
header-y += atmapi.h
header-y += atmarp.h
header-y += atmbr2684.h
header-y += atmclip.h
header-y += atmdev.h
+header-y += atm_eni.h
+header-y += atm.h
+header-y += atm_he.h
+header-y += atm_idt77105.h
header-y += atmioc.h
header-y += atmlec.h
header-y += atmmpc.h
+header-y += atm_nicstar.h
header-y += atmppp.h
header-y += atmsap.h
header-y += atmsvc.h
+header-y += atm_tcp.h
+header-y += atm_zatm.h
header-y += audit.h
-header-y += auto_fs.h
header-y += auto_fs4.h
+header-y += auto_fs.h
header-y += auxvec.h
header-y += ax25.h
header-y += b1lli.h
@@ -67,8 +68,8 @@ header-y += bfs_fs.h
header-y += binfmts.h
header-y += blkpg.h
header-y += blktrace_api.h
-header-y += bpf.h
header-y += bpf_common.h
+header-y += bpf.h
header-y += bpqether.h
header-y += bsg.h
header-y += btrfs.h
@@ -93,21 +94,21 @@ header-y += cyclades.h
header-y += cycx_cfm.h
header-y += dcbnl.h
header-y += dccp.h
-header-y += dlm.h
+header-y += dlmconstants.h
header-y += dlm_device.h
+header-y += dlm.h
header-y += dlm_netlink.h
header-y += dlm_plock.h
-header-y += dlmconstants.h
header-y += dm-ioctl.h
header-y += dm-log-userspace.h
header-y += dn.h
header-y += dqblk_xfs.h
header-y += edd.h
header-y += efs_fs_sb.h
+header-y += elfcore.h
header-y += elf-em.h
header-y += elf-fdpic.h
header-y += elf.h
-header-y += elfcore.h
header-y += errno.h
header-y += errqueue.h
header-y += ethtool.h
@@ -131,15 +132,15 @@ header-y += fsl_hypervisor.h
header-y += fuse.h
header-y += futex.h
header-y += gameport.h
-header-y += gen_stats.h
header-y += genetlink.h
+header-y += gen_stats.h
header-y += gfs2_ondisk.h
header-y += gigaset_dev.h
-header-y += hdlc.h
header-y += hdlcdrv.h
+header-y += hdlc.h
header-y += hdreg.h
-header-y += hid.h
header-y += hiddev.h
+header-y += hid.h
header-y += hidraw.h
header-y += hpet.h
header-y += hsr_netlink.h
@@ -151,7 +152,6 @@ header-y += i2o-dev.h
header-y += i8k.h
header-y += icmp.h
header-y += icmpv6.h
-header-y += if.h
header-y += if_addr.h
header-y += if_addrlabel.h
header-y += if_alg.h
@@ -165,6 +165,7 @@ header-y += if_ether.h
header-y += if_fc.h
header-y += if_fddi.h
header-y += if_frad.h
+header-y += if.h
header-y += if_hippi.h
header-y += if_infiniband.h
header-y += if_link.h
@@ -182,40 +183,41 @@ header-y += if_tunnel.h
header-y += if_vlan.h
header-y += if_x25.h
header-y += igmp.h
-header-y += in.h
header-y += in6.h
-header-y += in_route.h
header-y += inet_diag.h
+header-y += in.h
header-y += inotify.h
header-y += input.h
+header-y += in_route.h
header-y += ioctl.h
-header-y += ip.h
header-y += ip6_tunnel.h
-header-y += ip_vs.h
header-y += ipc.h
+header-y += ip.h
header-y += ipmi.h
header-y += ipmi_msgdefs.h
header-y += ipsec.h
header-y += ipv6.h
header-y += ipv6_route.h
+header-y += ip_vs.h
header-y += ipx.h
header-y += irda.h
header-y += irqnr.h
-header-y += isdn.h
header-y += isdn_divertif.h
-header-y += isdn_ppp.h
+header-y += isdn.h
header-y += isdnif.h
+header-y += isdn_ppp.h
header-y += iso_fs.h
-header-y += ivtv.h
header-y += ivtvfb.h
+header-y += ivtv.h
header-y += ixjuser.h
header-y += jffs2.h
header-y += joystick.h
-header-y += kd.h
+header-y += kcmp.h
header-y += kdev_t.h
-header-y += kernel-page-flags.h
-header-y += kernel.h
+header-y += kd.h
header-y += kernelcapi.h
+header-y += kernel.h
+header-y += kernel-page-flags.h
header-y += kexec.h
header-y += keyboard.h
header-y += keyctl.h
@@ -231,6 +233,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h \
header-y += kvm_para.h
endif
+header-y += hw_breakpoint.h
header-y += l2tp.h
header-y += libc-compat.h
header-y += limits.h
@@ -243,6 +246,7 @@ header-y += map_to_7segment.h
header-y += matroxfb.h
header-y += mdio.h
header-y += media.h
+header-y += media-bus-format.h
header-y += mei.h
header-y += memfd.h
header-y += mempolicy.h
@@ -255,43 +259,43 @@ header-y += mman.h
header-y += mmtimer.h
header-y += mpls.h
header-y += mqueue.h
-header-y += mroute.h
header-y += mroute6.h
+header-y += mroute.h
header-y += msdos_fs.h
header-y += msg.h
header-y += mtio.h
-header-y += n_r3964.h
header-y += nbd.h
-header-y += ncp.h
header-y += ncp_fs.h
+header-y += ncp.h
header-y += ncp_mount.h
header-y += ncp_no.h
header-y += neighbour.h
-header-y += net.h
-header-y += net_dropmon.h
-header-y += net_tstamp.h
header-y += netconf.h
header-y += netdevice.h
-header-y += netlink_diag.h
-header-y += netfilter.h
+header-y += net_dropmon.h
header-y += netfilter_arp.h
header-y += netfilter_bridge.h
header-y += netfilter_decnet.h
+header-y += netfilter.h
header-y += netfilter_ipv4.h
header-y += netfilter_ipv6.h
+header-y += net.h
+header-y += netlink_diag.h
header-y += netlink.h
header-y += netrom.h
+header-y += net_tstamp.h
header-y += nfc.h
-header-y += nfs.h
header-y += nfs2.h
header-y += nfs3.h
header-y += nfs4.h
header-y += nfs4_mount.h
+header-y += nfsacl.h
header-y += nfs_fs.h
+header-y += nfs.h
header-y += nfs_idmap.h
header-y += nfs_mount.h
-header-y += nfsacl.h
header-y += nl80211.h
+header-y += n_r3964.h
header-y += nubus.h
header-y += nvme.h
header-y += nvram.h
@@ -311,16 +315,16 @@ header-y += pfkeyv2.h
header-y += pg.h
header-y += phantom.h
header-y += phonet.h
+header-y += pktcdvd.h
header-y += pkt_cls.h
header-y += pkt_sched.h
-header-y += pktcdvd.h
header-y += pmu.h
header-y += poll.h
header-y += posix_types.h
header-y += ppdev.h
header-y += ppp-comp.h
-header-y += ppp-ioctl.h
header-y += ppp_defs.h
+header-y += ppp-ioctl.h
header-y += pps.h
header-y += prctl.h
header-y += psci.h
@@ -352,13 +356,13 @@ header-y += seccomp.h
header-y += securebits.h
header-y += selinux_netlink.h
header-y += sem.h
-header-y += serial.h
header-y += serial_core.h
+header-y += serial.h
header-y += serial_reg.h
header-y += serio.h
header-y += shm.h
-header-y += signal.h
header-y += signalfd.h
+header-y += signal.h
header-y += smiapp.h
header-y += snmp.h
header-y += sock_diag.h
@@ -367,8 +371,8 @@ header-y += sockios.h
header-y += som.h
header-y += sonet.h
header-y += sonypi.h
-header-y += sound.h
header-y += soundcard.h
+header-y += sound.h
header-y += stat.h
header-y += stddef.h
header-y += string.h
@@ -383,15 +387,17 @@ header-y += tcp.h
header-y += tcp_metrics.h
header-y += telephony.h
header-y += termios.h
+header-y += thermal.h
header-y += time.h
header-y += times.h
header-y += timex.h
header-y += tiocl.h
-header-y += tipc.h
header-y += tipc_config.h
+header-y += tipc_netlink.h
+header-y += tipc.h
header-y += toshiba.h
-header-y += tty.h
header-y += tty_flags.h
+header-y += tty.h
header-y += types.h
header-y += udf_fs_i.h
header-y += udp.h
@@ -427,6 +433,8 @@ header-y += virtio_net.h
header-y += virtio_pci.h
header-y += virtio_ring.h
header-y += virtio_rng.h
+header-y += virtio_scsi.h
+header-y += virtio_types.h
header-y += vm_sockets.h
header-y += vt.h
header-y += wait.h
@@ -437,6 +445,5 @@ header-y += wireless.h
header-y += x25.h
header-y += xattr.h
header-y += xfrm.h
-header-y += hw_breakpoint.h
header-y += zorro.h
header-y += zorro_ids.h
diff --git a/include/uapi/linux/android/Kbuild b/include/uapi/linux/android/Kbuild
new file mode 100644
index 000000000000..ca011eec252a
--- /dev/null
+++ b/include/uapi/linux/android/Kbuild
@@ -0,0 +1,2 @@
+# UAPI Header export list
+header-y += binder.h
diff --git a/drivers/staging/android/uapi/binder.h b/include/uapi/linux/android/binder.h
index dba4cef3a8d3..41420e341e75 100644
--- a/drivers/staging/android/uapi/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -20,6 +20,7 @@
#ifndef _UAPI_LINUX_BINDER_H
#define _UAPI_LINUX_BINDER_H
+#include <linux/types.h>
#include <linux/ioctl.h>
#define B_PACK_CHARS(c1, c2, c3, c4) \
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d4dbef14d4df..12e26683c706 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -322,9 +322,15 @@ enum {
#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010
#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020
-#define AUDIT_VERSION_BACKLOG_LIMIT 1
-#define AUDIT_VERSION_BACKLOG_WAIT_TIME 2
-#define AUDIT_VERSION_LATEST AUDIT_VERSION_BACKLOG_WAIT_TIME
+#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
+#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
+#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
+ AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME)
+
+/* deprecated: AUDIT_VERSION_* */
+#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
+#define AUDIT_VERSION_BACKLOG_LIMIT AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT
+#define AUDIT_VERSION_BACKLOG_WAIT_TIME AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME
/* Failure-to-log actions */
#define AUDIT_FAIL_SILENT 0
@@ -404,7 +410,10 @@ struct audit_status {
__u32 backlog_limit; /* waiting messages limit */
__u32 lost; /* messages lost */
__u32 backlog; /* messages waiting in queue */
- __u32 version; /* audit api version number */
+ union {
+ __u32 version; /* deprecated: audit api version num */
+ __u32 feature_bitmap; /* bitmap of kernel audit features */
+ };
__u32 backlog_wait_time;/* message queue wait timeout */
};
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index d18316f9e9c4..45da7ec7d274 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -82,7 +82,7 @@ enum bpf_cmd {
/* create or update key/value pair in a given map
* err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
- * Using attr->map_fd, attr->key, attr->value
+ * Using attr->map_fd, attr->key, attr->value, attr->flags
* returns zero or negative error
*/
BPF_MAP_UPDATE_ELEM,
@@ -111,12 +111,20 @@ enum bpf_cmd {
enum bpf_map_type {
BPF_MAP_TYPE_UNSPEC,
+ BPF_MAP_TYPE_HASH,
+ BPF_MAP_TYPE_ARRAY,
};
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
+ BPF_PROG_TYPE_SOCKET_FILTER,
};
+/* flags for BPF_MAP_UPDATE_ELEM command */
+#define BPF_ANY 0 /* create new element or update existing */
+#define BPF_NOEXIST 1 /* create new element if it didn't exist */
+#define BPF_EXIST 2 /* update existing element */
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -132,6 +140,7 @@ union bpf_attr {
__aligned_u64 value;
__aligned_u64 next_key;
};
+ __u64 flags;
};
struct { /* anonymous struct used by BPF_PROG_LOAD command */
@@ -150,6 +159,9 @@ union bpf_attr {
*/
enum bpf_func_id {
BPF_FUNC_unspec,
+ BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
+ BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
+ BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
__BPF_FUNC_MAX_ID,
};
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 2f47824e7a36..611e1c5893b4 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -157,6 +157,7 @@ struct btrfs_ioctl_dev_replace_status_params {
#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR 0
#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED 1
#define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED 2
+#define BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS 3
struct btrfs_ioctl_dev_replace_args {
__u64 cmd; /* in */
__u64 result; /* out */
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index c247446ab25a..1c508be9687f 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -71,6 +71,7 @@
#define CAN_ERR_CRTL_TX_PASSIVE 0x20 /* reached error passive status TX */
/* (at least one error counter exceeds */
/* the protocol-defined level of 127) */
+#define CAN_ERR_CRTL_ACTIVE 0x40 /* recovered to error active state */
/* error in CAN protocol (type) / data[2] */
#define CAN_ERR_PROT_UNSPEC 0x00 /* unspecified */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 99b43056a6fe..5f66d9c2889d 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -534,6 +534,7 @@ struct ethtool_pauseparam {
* @ETH_SS_NTUPLE_FILTERS: Previously used with %ETHTOOL_GRXNTUPLE;
* now deprecated
* @ETH_SS_FEATURES: Device feature names
+ * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -541,6 +542,7 @@ enum ethtool_stringset {
ETH_SS_PRIV_FLAGS,
ETH_SS_NTUPLE_FILTERS,
ETH_SS_FEATURES,
+ ETH_SS_RSS_HASH_FUNCS,
};
/**
@@ -884,6 +886,8 @@ struct ethtool_rxfh_indir {
* @key_size: On entry, the array size of the user buffer for the hash key,
* which may be zero. On return from %ETHTOOL_GRSSH, the size of the
* hardware hash key.
+ * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
+ * Valid values are one of the %ETH_RSS_HASH_*.
* @rsvd: Reserved for future extensions.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
* of @indir_size __u32 elements, followed by hash key of @key_size
@@ -893,14 +897,16 @@ struct ethtool_rxfh_indir {
* size should be returned. For %ETHTOOL_SRSSH, an @indir_size of
* %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested
* and a @indir_size of zero means the indir table should be reset to default
- * values.
+ * values. An hfunc of zero means that hash function setting is not requested.
*/
struct ethtool_rxfh {
__u32 cmd;
__u32 rss_context;
__u32 indir_size;
__u32 key_size;
- __u32 rsvd[2];
+ __u8 hfunc;
+ __u8 rsvd8[3];
+ __u32 rsvd32;
__u32 rss_config[0];
};
#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
@@ -1213,6 +1219,10 @@ enum ethtool_sfeatures_retval_bits {
#define SUPPORTED_40000baseCR4_Full (1 << 24)
#define SUPPORTED_40000baseSR4_Full (1 << 25)
#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#define SUPPORTED_56000baseKR4_Full (1 << 27)
+#define SUPPORTED_56000baseCR4_Full (1 << 28)
+#define SUPPORTED_56000baseSR4_Full (1 << 29)
+#define SUPPORTED_56000baseLR4_Full (1 << 30)
#define ADVERTISED_10baseT_Half (1 << 0)
#define ADVERTISED_10baseT_Full (1 << 1)
@@ -1241,6 +1251,10 @@ enum ethtool_sfeatures_retval_bits {
#define ADVERTISED_40000baseCR4_Full (1 << 24)
#define ADVERTISED_40000baseSR4_Full (1 << 25)
#define ADVERTISED_40000baseLR4_Full (1 << 26)
+#define ADVERTISED_56000baseKR4_Full (1 << 27)
+#define ADVERTISED_56000baseCR4_Full (1 << 28)
+#define ADVERTISED_56000baseSR4_Full (1 << 29)
+#define ADVERTISED_56000baseLR4_Full (1 << 30)
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
@@ -1248,12 +1262,16 @@ enum ethtool_sfeatures_retval_bits {
* it was forced up into this mode or autonegotiated.
*/
-/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */
+/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_2500 2500
#define SPEED_10000 10000
+#define SPEED_20000 20000
+#define SPEED_40000 40000
+#define SPEED_56000 56000
+
#define SPEED_UNKNOWN -1
/* Duplex, half or full. */
@@ -1343,6 +1361,10 @@ enum ethtool_sfeatures_retval_bits {
#define ETH_MODULE_SFF_8079_LEN 256
#define ETH_MODULE_SFF_8472 0x2
#define ETH_MODULE_SFF_8472_LEN 512
+#define ETH_MODULE_SFF_8636 0x3
+#define ETH_MODULE_SFF_8636_LEN 256
+#define ETH_MODULE_SFF_8436 0x4
+#define ETH_MODULE_SFF_8436_LEN 256
/* Reset flags */
/* The reset() operation must clear the flags for the components which
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index 0a8e6badb29b..bb1cb73c927a 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -134,6 +134,7 @@ struct hv_start_fcopy {
struct hv_do_fcopy {
struct hv_fcopy_hdr hdr;
+ __u32 pad;
__u64 offset;
__u32 size;
__u8 data[DATA_FRAGMENT];
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index 0f9acce5b1ff..f2acd2fde1f3 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -32,6 +32,8 @@ struct af_alg_iv {
#define ALG_SET_KEY 1
#define ALG_SET_IV 2
#define ALG_SET_OP 3
+#define ALG_SET_AEAD_ASSOCLEN 4
+#define ALG_SET_AEAD_AUTHSIZE 5
/* Operations */
#define ALG_OP_DECRYPT 0
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index da17e456908d..b03ee8f62d3c 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -105,6 +105,7 @@ struct __fdb_entry {
#define BRIDGE_MODE_VEB 0 /* Default loopback mode */
#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
+#define BRIDGE_MODE_UNDEF 0xFFFF /* mode undefined */
/* Bridge management nested attributes
* [IFLA_AF_SPEC] = {
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 0bdb77e16875..f7d0d2d7173a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -145,6 +145,7 @@ enum {
IFLA_CARRIER,
IFLA_PHYS_PORT_ID,
IFLA_CARRIER_CHANGES,
+ IFLA_PHYS_SWITCH_ID,
__IFLA_MAX
};
@@ -243,6 +244,8 @@ enum {
IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */
IFLA_BRPORT_LEARNING, /* mac learning */
IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
+ IFLA_BRPORT_PROXYARP, /* proxy ARP */
+ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -329,6 +332,21 @@ enum macvlan_macaddr_mode {
#define MACVLAN_FLAG_NOPROMISC 1
+/* IPVLAN section */
+enum {
+ IFLA_IPVLAN_UNSPEC,
+ IFLA_IPVLAN_MODE,
+ __IFLA_IPVLAN_MAX
+};
+
+#define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1)
+
+enum ipvlan_mode {
+ IPVLAN_MODE_L2 = 0,
+ IPVLAN_MODE_L3,
+ IPVLAN_MODE_MAX
+};
+
/* VXLAN section */
enum {
IFLA_VXLAN_UNSPEC,
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index e9502dd1ee2c..50ae24335444 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -22,21 +22,11 @@
/* Read queue size */
#define TUN_READQ_SIZE 500
-
-/* TUN device flags */
-#define TUN_TUN_DEV 0x0001
-#define TUN_TAP_DEV 0x0002
+/* TUN device type flags: deprecated. Use IFF_TUN/IFF_TAP instead. */
+#define TUN_TUN_DEV IFF_TUN
+#define TUN_TAP_DEV IFF_TAP
#define TUN_TYPE_MASK 0x000f
-#define TUN_FASYNC 0x0010
-#define TUN_NOCHECKSUM 0x0020
-#define TUN_NO_PI 0x0040
-/* This flag has no real effect */
-#define TUN_ONE_QUEUE 0x0080
-#define TUN_PERSIST 0x0100
-#define TUN_VNET_HDR 0x0200
-#define TUN_TAP_MQ 0x0400
-
/* Ioctl defines */
#define TUNSETNOCSUM _IOW('T', 200, int)
#define TUNSETDEBUG _IOW('T', 201, int)
@@ -58,6 +48,8 @@
#define TUNSETQUEUE _IOW('T', 217, int)
#define TUNSETIFINDEX _IOW('T', 218, unsigned int)
#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
+#define TUNSETVNETLE _IOW('T', 220, int)
+#define TUNGETVNETLE _IOR('T', 221, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 280d9e092283..bd3cc11a431f 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -69,6 +69,7 @@ enum tunnel_encap_types {
#define TUNNEL_ENCAP_FLAG_CSUM (1<<0)
#define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1)
+#define TUNNEL_ENCAP_FLAG_REMCSUM (1<<2)
/* SIT-mode i_flags */
#define SIT_ISATAP 0x0001
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index efa2666f4b8a..e863d088b9a5 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -164,6 +164,7 @@ enum {
DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
DEVCONF_SUPPRESS_FRAG_NDISC,
DEVCONF_ACCEPT_RA_FROM_LOCAL,
+ DEVCONF_USE_OPTIMISTIC,
DEVCONF_MAX
};
diff --git a/include/linux/kcmp.h b/include/uapi/linux/kcmp.h
index 2dcd1b3aafc8..84df14b37360 100644
--- a/include/linux/kcmp.h
+++ b/include/uapi/linux/kcmp.h
@@ -1,5 +1,5 @@
-#ifndef _LINUX_KCMP_H
-#define _LINUX_KCMP_H
+#ifndef _UAPI_LINUX_KCMP_H
+#define _UAPI_LINUX_KCMP_H
/* Comparison type */
enum kcmp_type {
@@ -14,4 +14,4 @@ enum kcmp_type {
KCMP_TYPES,
};
-#endif /* _LINUX_KCMP_H */
+#endif /* _UAPI_LINUX_KCMP_H */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
new file mode 100644
index 000000000000..7acef41fc209
--- /dev/null
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_IOCTL_H_INCLUDED
+#define KFD_IOCTL_H_INCLUDED
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define KFD_IOCTL_MAJOR_VERSION 1
+#define KFD_IOCTL_MINOR_VERSION 0
+
+struct kfd_ioctl_get_version_args {
+ uint32_t major_version; /* from KFD */
+ uint32_t minor_version; /* from KFD */
+};
+
+/* For kfd_ioctl_create_queue_args.queue_type. */
+#define KFD_IOC_QUEUE_TYPE_COMPUTE 0
+#define KFD_IOC_QUEUE_TYPE_SDMA 1
+#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2
+
+#define KFD_MAX_QUEUE_PERCENTAGE 100
+#define KFD_MAX_QUEUE_PRIORITY 15
+
+struct kfd_ioctl_create_queue_args {
+ uint64_t ring_base_address; /* to KFD */
+ uint64_t write_pointer_address; /* from KFD */
+ uint64_t read_pointer_address; /* from KFD */
+ uint64_t doorbell_offset; /* from KFD */
+
+ uint32_t ring_size; /* to KFD */
+ uint32_t gpu_id; /* to KFD */
+ uint32_t queue_type; /* to KFD */
+ uint32_t queue_percentage; /* to KFD */
+ uint32_t queue_priority; /* to KFD */
+ uint32_t queue_id; /* from KFD */
+
+ uint64_t eop_buffer_address; /* to KFD */
+ uint64_t eop_buffer_size; /* to KFD */
+ uint64_t ctx_save_restore_address; /* to KFD */
+ uint64_t ctx_save_restore_size; /* to KFD */
+};
+
+struct kfd_ioctl_destroy_queue_args {
+ uint32_t queue_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_update_queue_args {
+ uint64_t ring_base_address; /* to KFD */
+
+ uint32_t queue_id; /* to KFD */
+ uint32_t ring_size; /* to KFD */
+ uint32_t queue_percentage; /* to KFD */
+ uint32_t queue_priority; /* to KFD */
+};
+
+/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
+#define KFD_IOC_CACHE_POLICY_COHERENT 0
+#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
+
+struct kfd_ioctl_set_memory_policy_args {
+ uint64_t alternate_aperture_base; /* to KFD */
+ uint64_t alternate_aperture_size; /* to KFD */
+
+ uint32_t gpu_id; /* to KFD */
+ uint32_t default_policy; /* to KFD */
+ uint32_t alternate_policy; /* to KFD */
+ uint32_t pad;
+};
+
+/*
+ * All counters are monotonic. They are used for profiling of compute jobs.
+ * The profiling is done by userspace.
+ *
+ * In case of GPU reset, the counter should not be affected.
+ */
+
+struct kfd_ioctl_get_clock_counters_args {
+ uint64_t gpu_clock_counter; /* from KFD */
+ uint64_t cpu_clock_counter; /* from KFD */
+ uint64_t system_clock_counter; /* from KFD */
+ uint64_t system_clock_freq; /* from KFD */
+
+ uint32_t gpu_id; /* to KFD */
+ uint32_t pad;
+};
+
+#define NUM_OF_SUPPORTED_GPUS 7
+
+struct kfd_process_device_apertures {
+ uint64_t lds_base; /* from KFD */
+ uint64_t lds_limit; /* from KFD */
+ uint64_t scratch_base; /* from KFD */
+ uint64_t scratch_limit; /* from KFD */
+ uint64_t gpuvm_base; /* from KFD */
+ uint64_t gpuvm_limit; /* from KFD */
+ uint32_t gpu_id; /* from KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_get_process_apertures_args {
+ struct kfd_process_device_apertures
+ process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
+
+ /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
+ uint32_t num_of_nodes;
+ uint32_t pad;
+};
+
+#define KFD_IOC_MAGIC 'K'
+
+#define KFD_IOC_GET_VERSION \
+ _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+
+#define KFD_IOC_CREATE_QUEUE \
+ _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+
+#define KFD_IOC_DESTROY_QUEUE \
+ _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+
+#define KFD_IOC_SET_MEMORY_POLICY \
+ _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+
+#define KFD_IOC_GET_CLOCK_COUNTERS \
+ _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+
+#define KFD_IOC_GET_PROCESS_APERTURES \
+ _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+
+#define KFD_IOC_UPDATE_QUEUE \
+ _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+
+#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 60768822b140..a37fd1224f36 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -647,11 +647,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_MP_STATE 14
#define KVM_CAP_COALESCED_MMIO 15
#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
-#define KVM_CAP_DEVICE_ASSIGNMENT 17
#define KVM_CAP_IOMMU 18
-#ifdef __KVM_HAVE_MSI
-#define KVM_CAP_DEVICE_MSI 20
-#endif
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
#define KVM_CAP_USER_NMI 22
@@ -663,10 +659,6 @@ struct kvm_ppc_smmu_info {
#endif
#define KVM_CAP_IRQ_ROUTING 25
#define KVM_CAP_IRQ_INJECT_STATUS 26
-#define KVM_CAP_DEVICE_DEASSIGNMENT 27
-#ifdef __KVM_HAVE_MSIX
-#define KVM_CAP_DEVICE_MSIX 28
-#endif
#define KVM_CAP_ASSIGN_DEV_IRQ 29
/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
@@ -1107,9 +1099,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64)
#define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64)
#define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce)
-/* IA64 stack access */
-#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *)
-#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *)
/* Available with KVM_CAP_VCPU_EVENTS */
#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 77c60311a6c6..7d664ea85ebd 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -72,5 +72,6 @@
#define MTD_INODE_FS_MAGIC 0x11307854
#define ANON_INODE_FS_MAGIC 0x09041934
#define BTRFS_TEST_MAGIC 0x73727279
+#define NSFS_MAGIC 0x6e736673
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
new file mode 100644
index 000000000000..23b40908be30
--- /dev/null
+++ b/include/uapi/linux/media-bus-format.h
@@ -0,0 +1,125 @@
+/*
+ * Media Bus API header
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MEDIA_BUS_FORMAT_H
+#define __LINUX_MEDIA_BUS_FORMAT_H
+
+/*
+ * These bus formats uniquely identify data formats on the data bus. Format 0
+ * is reserved, MEDIA_BUS_FMT_FIXED shall be used by host-client pairs, where
+ * the data format is fixed. Additionally, "2X8" means that one pixel is
+ * transferred in two 8-bit samples, "BE" or "LE" specify in which order those
+ * samples are transferred over the bus: "LE" means that the least significant
+ * bits are transferred first, "BE" means that the most significant bits are
+ * transferred first, and "PADHI" and "PADLO" define which bits - low or high,
+ * in the incomplete high byte, are filled with padding bits.
+ *
+ * The bus formats are grouped by type, bus_width, bits per component, samples
+ * per pixel and order of subsamples. Numerical values are sorted using generic
+ * numerical sort order (8 thus comes before 10).
+ *
+ * As their value can't change when a new bus format is inserted in the
+ * enumeration, the bus formats are explicitly given a numerical value. The next
+ * free values for each category are listed below, update them when inserting
+ * new pixel codes.
+ */
+
+#define MEDIA_BUS_FMT_FIXED 0x0001
+
+/* RGB - next is 0x100e */
+#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
+#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
+#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003
+#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004
+#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005
+#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006
+#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
+#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
+#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
+#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
+#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
+#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
+
+/* YUV (including grey) - next is 0x2024 */
+#define MEDIA_BUS_FMT_Y8_1X8 0x2001
+#define MEDIA_BUS_FMT_UV8_1X8 0x2015
+#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
+#define MEDIA_BUS_FMT_VYUY8_1_5X8 0x2003
+#define MEDIA_BUS_FMT_YUYV8_1_5X8 0x2004
+#define MEDIA_BUS_FMT_YVYU8_1_5X8 0x2005
+#define MEDIA_BUS_FMT_UYVY8_2X8 0x2006
+#define MEDIA_BUS_FMT_VYUY8_2X8 0x2007
+#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008
+#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009
+#define MEDIA_BUS_FMT_Y10_1X10 0x200a
+#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018
+#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
+#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
+#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c
+#define MEDIA_BUS_FMT_Y12_1X12 0x2013
+#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
+#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
+#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
+#define MEDIA_BUS_FMT_YVYU8_1X16 0x2012
+#define MEDIA_BUS_FMT_YDYUYDYV8_1X16 0x2014
+#define MEDIA_BUS_FMT_UYVY10_1X20 0x201a
+#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b
+#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d
+#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
+#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
+#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
+#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
+#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
+#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
+#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
+#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
+#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
+#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
+#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
+
+/* Bayer - next is 0x3019 */
+#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
+#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013
+#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002
+#define MEDIA_BUS_FMT_SRGGB8_1X8 0x3014
+#define MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 0x3015
+#define MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 0x3016
+#define MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 0x3017
+#define MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 0x3018
+#define MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 0x300b
+#define MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 0x300c
+#define MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 0x3009
+#define MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 0x300d
+#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE 0x3003
+#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE 0x3004
+#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE 0x3005
+#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE 0x3006
+#define MEDIA_BUS_FMT_SBGGR10_1X10 0x3007
+#define MEDIA_BUS_FMT_SGBRG10_1X10 0x300e
+#define MEDIA_BUS_FMT_SGRBG10_1X10 0x300a
+#define MEDIA_BUS_FMT_SRGGB10_1X10 0x300f
+#define MEDIA_BUS_FMT_SBGGR12_1X12 0x3008
+#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010
+#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011
+#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012
+
+/* JPEG compressed formats - next is 0x4002 */
+#define MEDIA_BUS_FMT_JPEG_1X8 0x4001
+
+/* Vendor specific formats - next is 0x5002 */
+
+/* S5C73M3 sensor specific interleaved UYVY and JPEG */
+#define MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8 0x5001
+
+/* HSV - next is 0x6002 */
+#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
+
+#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h
index a70375526578..f51c8001dbe5 100644
--- a/include/uapi/linux/msg.h
+++ b/include/uapi/linux/msg.h
@@ -51,16 +51,28 @@ struct msginfo {
};
/*
- * Scaling factor to compute msgmni:
- * the memory dedicated to msg queues (msgmni * msgmnb) should occupy
- * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c):
- * up to 8MB : msgmni = 16 (MSGMNI)
- * 4 GB : msgmni = 8K
- * more than 16 GB : msgmni = 32K (IPCMNI)
+ * MSGMNI, MSGMAX and MSGMNB are default values which can be
+ * modified by sysctl.
+ *
+ * MSGMNI is the upper limit for the number of messages queues per
+ * namespace.
+ * It has been chosen to be as large possible without facilitating
+ * scenarios where userspace causes overflows when adjusting the limits via
+ * operations of the form retrieve current limit; add X; update limit".
+ *
+ * MSGMNB is the default size of a new message queue. Non-root tasks can
+ * decrease the size with msgctl(IPC_SET), root tasks
+ * (actually: CAP_SYS_RESOURCE) can both increase and decrease the queue
+ * size. The optimal value is application dependent.
+ * 16384 is used because it was always used (since 0.99.10)
+ *
+ * MAXMAX is the maximum size of an individual message, it's a global
+ * (per-namespace) limit that applies for all message queues.
+ * It's set to 1/2 of MSGMNB, to ensure that at least two messages fit into
+ * the queue. This is also an arbitrary choice (since 2.6.0).
*/
-#define MSG_MEM_SCALE 32
-#define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */
+#define MSGMNI 32000 /* <= IPCMNI */ /* max # of msg queue identifiers */
#define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */
#define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 4a1d7e96dfe3..f3d77f9f1e0b 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -35,11 +35,11 @@ enum {
*/
#define NTF_USE 0x01
-#define NTF_PROXY 0x08 /* == ATF_PUBL */
-#define NTF_ROUTER 0x80
-
#define NTF_SELF 0x02
#define NTF_MASTER 0x04
+#define NTF_PROXY 0x08 /* == ATF_PUBL */
+#define NTF_EXT_LEARNED 0x10
+#define NTF_ROUTER 0x80
/*
* Neighbor Cache Entry States.
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index ff354021bb69..edbc888ceb51 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -23,8 +23,9 @@ enum {
SOF_TIMESTAMPING_OPT_ID = (1<<7),
SOF_TIMESTAMPING_TX_SCHED = (1<<8),
SOF_TIMESTAMPING_TX_ACK = (1<<9),
+ SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_TX_ACK,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_CMSG,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index ca03119111a2..5ab4e60894cf 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -256,11 +256,17 @@ enum {
IPSET_COUNTER_GT,
};
-struct ip_set_counter_match {
+/* Backward compatibility for set match v3 */
+struct ip_set_counter_match0 {
__u8 op;
__u64 value;
};
+struct ip_set_counter_match {
+ __aligned_u64 value;
+ __u8 op;
+};
+
/* Interface to iptables/ip6tables */
#define SO_IP_SET 83
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index f31fe7b660a5..832bc46db78b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -579,6 +579,7 @@ enum nft_exthdr_attributes {
* @NFT_META_CPU: cpu id through smp_processor_id()
* @NFT_META_IIFGROUP: packet input interface group
* @NFT_META_OIFGROUP: packet output interface group
+ * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -604,6 +605,7 @@ enum nft_meta_keys {
NFT_META_CPU,
NFT_META_IIFGROUP,
NFT_META_OIFGROUP,
+ NFT_META_CGROUP,
};
/**
@@ -838,6 +840,22 @@ enum nft_masq_attributes {
#define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1)
/**
+ * enum nft_redir_attributes - nf_tables redirect expression netlink attributes
+ *
+ * @NFTA_REDIR_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
+ * @NFTA_REDIR_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
+ * @NFTA_REDIR_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
+ */
+enum nft_redir_attributes {
+ NFTA_REDIR_UNSPEC,
+ NFTA_REDIR_REG_PROTO_MIN,
+ NFTA_REDIR_REG_PROTO_MAX,
+ NFTA_REDIR_FLAGS,
+ __NFTA_REDIR_MAX
+};
+#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1)
+
+/**
* enum nft_gen_attributes - nf_tables ruleset generation attributes
*
* @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h
index d6a1df1f2947..d4e02348384c 100644
--- a/include/uapi/linux/netfilter/xt_set.h
+++ b/include/uapi/linux/netfilter/xt_set.h
@@ -66,8 +66,8 @@ struct xt_set_info_target_v2 {
struct xt_set_info_match_v3 {
struct xt_set_info match_set;
- struct ip_set_counter_match packets;
- struct ip_set_counter_match bytes;
+ struct ip_set_counter_match0 packets;
+ struct ip_set_counter_match0 bytes;
__u32 flags;
};
@@ -81,4 +81,13 @@ struct xt_set_info_target_v3 {
__u32 timeout;
};
+/* Revision 4 match */
+
+struct xt_set_info_match_v4 {
+ struct xt_set_info match_set;
+ struct ip_set_counter_match packets;
+ struct ip_set_counter_match bytes;
+ __u32 flags;
+};
+
#endif /*_XT_SET_H*/
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 9b19b4461928..8119255feae4 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -116,6 +116,7 @@ enum nfc_commands {
NFC_EVENT_SE_TRANSACTION,
NFC_CMD_GET_SE,
NFC_CMD_SE_IO,
+ NFC_CMD_ACTIVATE_TARGET,
/* private: internal use only */
__NFC_CMD_AFTER_LAST
};
@@ -196,15 +197,19 @@ enum nfc_sdp_attr {
};
#define NFC_SDP_ATTR_MAX (__NFC_SDP_ATTR_AFTER_LAST - 1)
-#define NFC_DEVICE_NAME_MAXSIZE 8
-#define NFC_NFCID1_MAXSIZE 10
-#define NFC_NFCID2_MAXSIZE 8
-#define NFC_NFCID3_MAXSIZE 10
-#define NFC_SENSB_RES_MAXSIZE 12
-#define NFC_SENSF_RES_MAXSIZE 18
-#define NFC_GB_MAXSIZE 48
-#define NFC_FIRMWARE_NAME_MAXSIZE 32
-#define NFC_ISO15693_UID_MAXSIZE 8
+#define NFC_DEVICE_NAME_MAXSIZE 8
+#define NFC_NFCID1_MAXSIZE 10
+#define NFC_NFCID2_MAXSIZE 8
+#define NFC_NFCID3_MAXSIZE 10
+#define NFC_SENSB_RES_MAXSIZE 12
+#define NFC_SENSF_RES_MAXSIZE 18
+#define NFC_ATR_REQ_MAXSIZE 64
+#define NFC_ATR_RES_MAXSIZE 64
+#define NFC_ATR_REQ_GB_MAXSIZE 48
+#define NFC_ATR_RES_GB_MAXSIZE 47
+#define NFC_GB_MAXSIZE 48
+#define NFC_FIRMWARE_NAME_MAXSIZE 32
+#define NFC_ISO15693_UID_MAXSIZE 8
/* NFC protocols */
#define NFC_PROTO_JEWEL 1
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 4b28dc07bcb1..b37bd5a1cb82 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -227,7 +227,11 @@
* the interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all stations, on the interface identified
- * by %NL80211_ATTR_IFINDEX.
+ * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and
+ * %NL80211_ATTR_REASON_CODE can optionally be used to specify which type
+ * of disconnection indication should be sent to the station
+ * (Deauthentication or Disassociation frame and reason code for that
+ * frame).
*
* @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to
* destination %NL80211_ATTR_MAC on the interface identified by
@@ -639,7 +643,18 @@
* @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels
* independently of the userspace SME, send this event indicating
* %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ and the
- * attributes determining channel width.
+ * attributes determining channel width. This indication may also be
+ * sent when a remotely-initiated switch (e.g., when a STA receives a CSA
+ * from the remote AP) is completed;
+ *
+ * @NL80211_CMD_CH_SWITCH_STARTED_NOTIFY: Notify that a channel switch
+ * has been started on an interface, regardless of the initiator
+ * (ie. whether it was requested from a remote device or
+ * initiated on our own). It indicates that
+ * %NL80211_ATTR_IFINDEX will be on %NL80211_ATTR_WIPHY_FREQ
+ * after %NL80211_ATTR_CH_SWITCH_COUNT TBTT's. The userspace may
+ * decide to react to this indication by requesting other
+ * interfaces to change channel as well.
*
* @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
* its %NL80211_ATTR_WDEV identifier. It must have been created with
@@ -738,6 +753,27 @@
* before removing a station entry entirely, or before disassociating
* or similar, cleanup will happen in the driver/device in this case.
*
+ * @NL80211_CMD_GET_MPP: Get mesh path attributes for mesh proxy path to
+ * destination %NL80211_ATTR_MAC on the interface identified by
+ * %NL80211_ATTR_IFINDEX.
+ *
+ * @NL80211_CMD_JOIN_OCB: Join the OCB network. The center frequency and
+ * bandwidth of a channel must be given.
+ * @NL80211_CMD_LEAVE_OCB: Leave the OCB network -- no special arguments, the
+ * network is determined by the network interface.
+ *
+ * @NL80211_CMD_TDLS_CHANNEL_SWITCH: Start channel-switching with a TDLS peer,
+ * identified by the %NL80211_ATTR_MAC parameter. A target channel is
+ * provided via %NL80211_ATTR_WIPHY_FREQ and other attributes determining
+ * channel width/type. The target operating class is given via
+ * %NL80211_ATTR_OPER_CLASS.
+ * The driver is responsible for continually initiating channel-switching
+ * operations and returning to the base channel for communication with the
+ * AP.
+ * @NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH: Stop channel-switching with a TDLS
+ * peer given by %NL80211_ATTR_MAC. Both peers must be on the base channel
+ * when this command completes.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -912,6 +948,16 @@ enum nl80211_commands {
NL80211_CMD_ADD_TX_TS,
NL80211_CMD_DEL_TX_TS,
+ NL80211_CMD_GET_MPP,
+
+ NL80211_CMD_JOIN_OCB,
+ NL80211_CMD_LEAVE_OCB,
+
+ NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
+
+ NL80211_CMD_TDLS_CHANNEL_SWITCH,
+ NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1606,9 +1652,9 @@ enum nl80211_commands {
* @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
* As specified in the &enum nl80211_tdls_peer_capability.
*
- * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface
+ * @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface
* creation then the new interface will be owned by the netlink socket
- * that created it and will be destroyed when the socket is closed
+ * that created it and will be destroyed when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1638,6 +1684,11 @@ enum nl80211_commands {
* @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see
* &enum nl80211_smps_mode.
*
+ * @NL80211_ATTR_OPER_CLASS: operating class
+ *
+ * @NL80211_ATTR_MAC_MASK: MAC address mask
+ *
+ * @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1973,7 +2024,7 @@ enum nl80211_attrs {
NL80211_ATTR_TDLS_PEER_CAPABILITY,
- NL80211_ATTR_IFACE_SOCKET_OWNER,
+ NL80211_ATTR_SOCKET_OWNER,
NL80211_ATTR_CSA_C_OFFSETS_TX,
NL80211_ATTR_MAX_CSA_COUNTERS,
@@ -1990,15 +2041,21 @@ enum nl80211_attrs {
NL80211_ATTR_SMPS_MODE,
+ NL80211_ATTR_OPER_CLASS,
+
+ NL80211_ATTR_MAC_MASK,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
+ NUM_NL80211_ATTR = __NL80211_ATTR_AFTER_LAST,
NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
};
/* source-level API compatibility */
#define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION
#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
+#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
/*
* Allow user space programs to use #ifdef on new attributes by defining them
@@ -2064,6 +2121,8 @@ enum nl80211_attrs {
* and therefore can't be created in the normal ways, use the
* %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
* commands to create and destroy one
+ * @NL80211_IF_TYPE_OCB: Outside Context of a BSS
+ * This mode corresponds to the MIB variable dot11OCBActivated=true
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
* @NUM_NL80211_IFTYPES: number of defined interface types
*
@@ -2083,6 +2142,7 @@ enum nl80211_iftype {
NL80211_IFTYPE_P2P_CLIENT,
NL80211_IFTYPE_P2P_GO,
NL80211_IFTYPE_P2P_DEVICE,
+ NL80211_IFTYPE_OCB,
/* keep last */
NUM_NL80211_IFTYPES,
@@ -2631,6 +2691,11 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
* base on contiguous rules and wider channels will be allowed to cross
* multiple contiguous/overlapping frequency ranges.
+ * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
+ * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
+ * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
+ * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -2643,11 +2708,18 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_IR = 1<<7,
__NL80211_RRF_NO_IBSS = 1<<8,
NL80211_RRF_AUTO_BW = 1<<11,
+ NL80211_RRF_GO_CONCURRENT = 1<<12,
+ NL80211_RRF_NO_HT40MINUS = 1<<13,
+ NL80211_RRF_NO_HT40PLUS = 1<<14,
+ NL80211_RRF_NO_80MHZ = 1<<15,
+ NL80211_RRF_NO_160MHZ = 1<<16,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
#define NL80211_RRF_NO_IBSS NL80211_RRF_NO_IR
#define NL80211_RRF_NO_IR NL80211_RRF_NO_IR
+#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
+ NL80211_RRF_NO_HT40PLUS)
/* For backport compatibility with older userspace */
#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
@@ -3379,6 +3451,8 @@ enum nl80211_ps_state {
* interval in which %NL80211_ATTR_CQM_TXE_PKTS and
* %NL80211_ATTR_CQM_TXE_RATE must be satisfied before generating an
* %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting.
+ * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon
+ * loss event
* @__NL80211_ATTR_CQM_AFTER_LAST: internal
* @NL80211_ATTR_CQM_MAX: highest key attribute
*/
@@ -3391,6 +3465,7 @@ enum nl80211_attr_cqm {
NL80211_ATTR_CQM_TXE_RATE,
NL80211_ATTR_CQM_TXE_PKTS,
NL80211_ATTR_CQM_TXE_INTVL,
+ NL80211_ATTR_CQM_BEACON_LOSS_EVENT,
/* keep last */
__NL80211_ATTR_CQM_AFTER_LAST,
@@ -3403,9 +3478,7 @@ enum nl80211_attr_cqm {
* configured threshold
* @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the
* configured threshold
- * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: The device experienced beacon loss.
- * (Note that deauth/disassoc will still follow if the AP is not
- * available. This event might get used as roaming event, etc.)
+ * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: (reserved, never sent)
*/
enum nl80211_cqm_rssi_threshold_event {
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
@@ -3545,6 +3618,25 @@ struct nl80211_pattern_support {
* @NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS: For wakeup reporting only,
* the TCP connection ran out of tokens to use for data to send to the
* service
+ * @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network
+ * is detected. This is a nested attribute that contains the
+ * same attributes used with @NL80211_CMD_START_SCHED_SCAN. It
+ * specifies how the scan is performed (e.g. the interval and the
+ * channels to scan) as well as the scan results that will
+ * trigger a wake (i.e. the matchsets).
+ * @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute
+ * containing an array with information about what triggered the
+ * wake up. If no elements are present in the array, it means
+ * that the information is not available. If more than one
+ * element is present, it means that more than one match
+ * occurred.
+ * Each element in the array is a nested attribute that contains
+ * one optional %NL80211_ATTR_SSID attribute and one optional
+ * %NL80211_ATTR_SCAN_FREQUENCIES attribute. At least one of
+ * these attributes must be present. If
+ * %NL80211_ATTR_SCAN_FREQUENCIES contains more than one
+ * frequency, it means that the match occurred in more than one
+ * channel.
* @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers
* @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number
*
@@ -3570,6 +3662,8 @@ enum nl80211_wowlan_triggers {
NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH,
NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST,
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS,
+ NL80211_WOWLAN_TRIG_NET_DETECT,
+ NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS,
/* keep last */
NUM_NL80211_WOWLAN_TRIG,
@@ -4042,6 +4136,27 @@ enum nl80211_ap_sme_features {
* multiplexing powersave, ie. can turn off all but one chain
* and then wake the rest up as required after, for example,
* rts/cts handshake.
+ * @NL80211_FEATURE_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM
+ * TSPEC sessions (TID aka TSID 0-7) with the %NL80211_CMD_ADD_TX_TS
+ * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it
+ * needs to be able to handle Block-Ack agreements and other things.
+ * @NL80211_FEATURE_MAC_ON_CREATE: Device supports configuring
+ * the vif's MAC address upon creation.
+ * See 'macaddr' field in the vif_params (cfg80211.h).
+ * @NL80211_FEATURE_TDLS_CHANNEL_SWITCH: Driver supports channel switching when
+ * operating as a TDLS peer.
+ * @NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR: This device/driver supports using a
+ * random MAC address during scan (if the device is unassociated); the
+ * %NL80211_SCAN_FLAG_RANDOM_ADDR flag may be set for scans and the MAC
+ * address mask/value will be used.
+ * @NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR: This device/driver supports
+ * using a random MAC address for every scan iteration during scheduled
+ * scan (while not associated), the %NL80211_SCAN_FLAG_RANDOM_ADDR may
+ * be set for scheduled scan and the MAC address mask/value will be used.
+ * @NL80211_FEATURE_ND_RANDOM_MAC_ADDR: This device/driver supports using a
+ * random MAC address for every scan iteration during "net detect", i.e.
+ * scan in unassociated WoWLAN, the %NL80211_SCAN_FLAG_RANDOM_ADDR may
+ * be set for scheduled scan and the MAC address mask/value will be used.
*/
enum nl80211_feature_flags {
NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
@@ -4070,6 +4185,12 @@ enum nl80211_feature_flags {
NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23,
NL80211_FEATURE_STATIC_SMPS = 1 << 24,
NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25,
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 1 << 26,
+ NL80211_FEATURE_MAC_ON_CREATE = 1 << 27,
+ NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28,
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29,
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30,
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31,
};
/**
@@ -4118,11 +4239,21 @@ enum nl80211_connect_failed_reason {
* dangerous because will destroy stations performance as a lot of frames
* will be lost while scanning off-channel, therefore it must be used only
* when really needed
+ * @NL80211_SCAN_FLAG_RANDOM_ADDR: use a random MAC address for this scan (or
+ * for scheduled scan: a different one for every scan iteration). When the
+ * flag is set, depending on device capabilities the @NL80211_ATTR_MAC and
+ * @NL80211_ATTR_MAC_MASK attributes may also be given in which case only
+ * the masked bits will be preserved from the MAC address and the remainder
+ * randomised. If the attributes are not given full randomisation (46 bits,
+ * locally administered 1, multicast 0) is assumed.
+ * This flag must not be requested when the feature isn't supported, check
+ * the nl80211 feature flags for the device.
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
NL80211_SCAN_FLAG_FLUSH = 1<<1,
NL80211_SCAN_FLAG_AP = 1<<2,
+ NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3,
};
/**
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 29a7d8619d8d..26386cf3db44 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -181,6 +181,22 @@ enum {
NVME_LBART_ATTRIB_HIDE = 1 << 1,
};
+struct nvme_reservation_status {
+ __le32 gen;
+ __u8 rtype;
+ __u8 regctl[2];
+ __u8 resv5[2];
+ __u8 ptpls;
+ __u8 resv10[13];
+ struct {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 resv3[5];
+ __le64 hostid;
+ __le64 rkey;
+ } regctl_ds[];
+};
+
/* I/O commands */
enum nvme_opcode {
@@ -189,7 +205,12 @@ enum nvme_opcode {
nvme_cmd_read = 0x02,
nvme_cmd_write_uncor = 0x04,
nvme_cmd_compare = 0x05,
+ nvme_cmd_write_zeroes = 0x08,
nvme_cmd_dsm = 0x09,
+ nvme_cmd_resv_register = 0x0d,
+ nvme_cmd_resv_report = 0x0e,
+ nvme_cmd_resv_acquire = 0x11,
+ nvme_cmd_resv_release = 0x15,
};
struct nvme_common_command {
@@ -305,7 +326,11 @@ enum {
NVME_FEAT_IRQ_CONFIG = 0x09,
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
- NVME_FEAT_SW_PROGRESS = 0x0c,
+ NVME_FEAT_AUTO_PST = 0x0c,
+ NVME_FEAT_SW_PROGRESS = 0x80,
+ NVME_FEAT_HOST_ID = 0x81,
+ NVME_FEAT_RESV_MASK = 0x82,
+ NVME_FEAT_RESV_PERSIST = 0x83,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
@@ -440,9 +465,15 @@ enum {
NVME_SC_FUSED_MISSING = 0xa,
NVME_SC_INVALID_NS = 0xb,
NVME_SC_CMD_SEQ_ERROR = 0xc,
+ NVME_SC_SGL_INVALID_LAST = 0xd,
+ NVME_SC_SGL_INVALID_COUNT = 0xe,
+ NVME_SC_SGL_INVALID_DATA = 0xf,
+ NVME_SC_SGL_INVALID_METADATA = 0x10,
+ NVME_SC_SGL_INVALID_TYPE = 0x11,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
+ NVME_SC_RESERVATION_CONFLICT = 0x83,
NVME_SC_CQ_INVALID = 0x100,
NVME_SC_QID_INVALID = 0x101,
NVME_SC_QUEUE_SIZE = 0x102,
@@ -454,7 +485,15 @@ enum {
NVME_SC_INVALID_VECTOR = 0x108,
NVME_SC_INVALID_LOG_PAGE = 0x109,
NVME_SC_INVALID_FORMAT = 0x10a,
+ NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
+ NVME_SC_INVALID_QUEUE = 0x10c,
+ NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
+ NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
+ NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
+ NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
NVME_SC_BAD_ATTRIBUTES = 0x180,
+ NVME_SC_INVALID_PI = 0x181,
+ NVME_SC_READ_ONLY = 0x182,
NVME_SC_WRITE_FAULT = 0x280,
NVME_SC_READ_ERROR = 0x281,
NVME_SC_GUARD_CHECK = 0x282,
@@ -489,7 +528,7 @@ struct nvme_user_io {
__u16 appmask;
};
-struct nvme_admin_cmd {
+struct nvme_passthru_cmd {
__u8 opcode;
__u8 flags;
__u16 rsvd1;
@@ -510,8 +549,11 @@ struct nvme_admin_cmd {
__u32 result;
};
+#define nvme_admin_cmd nvme_passthru_cmd
+
#define NVME_IOCTL_ID _IO('N', 0x40)
#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
+#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
#endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 435eabc5ffaa..3a6dcaa359b7 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -157,6 +157,11 @@ enum ovs_packet_cmd {
* notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
* %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
* specified there.
+ * @OVS_PACKET_ATTR_EGRESS_TUN_KEY: Present for an %OVS_PACKET_CMD_ACTION
+ * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
+ * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the
+ * output port is actually a tunnel port. Contains the output tunnel key
+ * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_PACKET_* commands.
@@ -167,6 +172,8 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */
OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
+ OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
+ attributes. */
__OVS_PACKET_ATTR_MAX
};
@@ -293,6 +300,9 @@ enum ovs_key_attr {
OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash
is not computed by the datapath. */
OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */
+ OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls.
+ * The implementation may restrict
+ * the accepted length of the array. */
#ifdef __KERNEL__
OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
@@ -312,6 +322,8 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */
OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */
OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */
+ OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */
+ OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */
__OVS_TUNNEL_KEY_ATTR_MAX
};
@@ -340,6 +352,10 @@ struct ovs_key_ethernet {
__u8 eth_dst[ETH_ALEN];
};
+struct ovs_key_mpls {
+ __be32 mpls_lse;
+};
+
struct ovs_key_ipv4 {
__be32 ipv4_src;
__be32 ipv4_dst;
@@ -393,9 +409,9 @@ struct ovs_key_arp {
};
struct ovs_key_nd {
- __u32 nd_target[4];
- __u8 nd_sll[ETH_ALEN];
- __u8 nd_tll[ETH_ALEN];
+ __be32 nd_target[4];
+ __u8 nd_sll[ETH_ALEN];
+ __u8 nd_tll[ETH_ALEN];
};
/**
@@ -441,6 +457,8 @@ enum ovs_flow_attr {
OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */
OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */
OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */
+ OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error
+ * logging should be suppressed. */
__OVS_FLOW_ATTR_MAX
};
@@ -473,17 +491,34 @@ enum ovs_sample_attr {
* message should be sent. Required.
* @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is
* copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
+ * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get
+ * tunnel info.
*/
enum ovs_userspace_attr {
OVS_USERSPACE_ATTR_UNSPEC,
OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */
OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */
+ OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port
+ * to get tunnel info. */
__OVS_USERSPACE_ATTR_MAX
};
#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
/**
+ * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument.
+ * @mpls_lse: MPLS label stack entry to push.
+ * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame.
+ *
+ * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and
+ * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected.
+ */
+struct ovs_action_push_mpls {
+ __be32 mpls_lse;
+ __be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */
+};
+
+/**
* struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
* @vlan_tpid: Tag protocol identifier (TPID) to push.
* @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
@@ -534,6 +569,15 @@ struct ovs_action_hash {
* @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
* @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
* the nested %OVS_SAMPLE_ATTR_* attributes.
+ * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
+ * top of the packets MPLS label stack. Set the ethertype of the
+ * encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to
+ * indicate the new packet contents.
+ * @OVS_ACTION_ATTR_POP_MPLS: Pop an MPLS label stack entry off of the
+ * packet's MPLS label stack. Set the encapsulating frame's ethertype to
+ * indicate the new packet contents. This could potentially still be
+ * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there
+ * is no MPLS label stack, as determined by ethertype, no action is taken.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -550,6 +594,9 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */
OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */
OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */
+ OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */
+ OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */
+
__OVS_ACTION_ATTR_MAX
};
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index eb0f1a554d7b..9c9b8b4480cd 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -235,6 +235,7 @@ enum {
#define RTPROT_NTK 15 /* Netsukuku */
#define RTPROT_DHCP 16 /* DHCP client */
#define RTPROT_MROUTED 17 /* Multicast daemon */
+#define RTPROT_BABEL 42 /* Babel daemon */
/* rtm_scope
diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h
index 541fce03b50c..dd73b908b2f3 100644
--- a/include/uapi/linux/sem.h
+++ b/include/uapi/linux/sem.h
@@ -63,10 +63,22 @@ struct seminfo {
int semaem;
};
-#define SEMMNI 128 /* <= IPCMNI max # of semaphore identifiers */
-#define SEMMSL 250 /* <= 8 000 max num of semaphores per id */
+/*
+ * SEMMNI, SEMMSL and SEMMNS are default values which can be
+ * modified by sysctl.
+ * The values has been chosen to be larger than necessary for any
+ * known configuration.
+ *
+ * SEMOPM should not be increased beyond 1000, otherwise there is the
+ * risk that semop()/semtimedop() fails due to kernel memory fragmentation when
+ * allocating the sop array.
+ */
+
+
+#define SEMMNI 32000 /* <= IPCMNI max # of semaphore identifiers */
+#define SEMMSL 32000 /* <= INT_MAX max num of semaphores per id */
#define SEMMNS (SEMMNI*SEMMSL) /* <= INT_MAX max # of semaphores in system */
-#define SEMOPM 32 /* <= 1 000 max num of ops per semop call */
+#define SEMOPM 500 /* <= 1 000 max num of ops per semop call */
#define SEMVMX 32767 /* <= 32767 semaphore maximum value */
#define SEMAEM SEMVMX /* adjust on exit max value */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 16ad8521af6a..c17218094f18 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -54,7 +54,8 @@
#define PORT_ALTR_16550_F32 26 /* Altera 16550 UART with 32 FIFOs */
#define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */
#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
-#define PORT_MAX_8250 28 /* max port ID */
+#define PORT_RT2880 29 /* Ralink RT2880 internal UART */
+#define PORT_MAX_8250 29 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index df6c9ab6b0cd..53af3b790129 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -359,6 +359,7 @@
#define UART_OMAP_SYSC 0x15 /* System configuration register */
#define UART_OMAP_SYSS 0x16 /* System status register */
#define UART_OMAP_WER 0x17 /* Wake-up enable register */
+#define UART_OMAP_TX_LVL 0x1a /* TX FIFO level register */
/*
* These are the definitions for the MDR1 register
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index df40137f33dd..b22224100011 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -156,6 +156,7 @@ enum
UDP_MIB_RCVBUFERRORS, /* RcvbufErrors */
UDP_MIB_SNDBUFERRORS, /* SndbufErrors */
UDP_MIB_CSUMERRORS, /* InCsumErrors */
+ UDP_MIB_IGNOREDMULTI, /* IgnoredMulti */
__UDP_MIB_MAX
};
@@ -265,6 +266,10 @@ enum
LINUX_MIB_TCPWANTZEROWINDOWADV, /* TCPWantZeroWindowAdv */
LINUX_MIB_TCPSYNRETRANS, /* TCPSynRetrans */
LINUX_MIB_TCPORIGDATASENT, /* TCPOrigDataSent */
+ LINUX_MIB_TCPHYSTARTTRAINDETECT, /* TCPHystartTrainDetect */
+ LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */
+ LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */
+ LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 56f121605c99..b057da2b87a4 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -7,3 +7,4 @@ header-y += tc_mirred.h
header-y += tc_nat.h
header-y += tc_pedit.h
header-y += tc_skbedit.h
+header-y += tc_vlan.h
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
new file mode 100644
index 000000000000..f7b8d448b960
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_VLAN_H
+#define __LINUX_TC_VLAN_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_VLAN 12
+
+#define TCA_VLAN_ACT_POP 1
+#define TCA_VLAN_ACT_PUSH 2
+
+struct tc_vlan {
+ tc_gen;
+ int v_action;
+};
+
+enum {
+ TCA_VLAN_UNSPEC,
+ TCA_VLAN_TM,
+ TCA_VLAN_PARMS,
+ TCA_VLAN_PUSH_VLAN_ID,
+ TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ __TCA_VLAN_MAX,
+};
+#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
new file mode 100644
index 000000000000..ac5535855982
--- /dev/null
+++ b/include/uapi/linux/thermal.h
@@ -0,0 +1,35 @@
+#ifndef _UAPI_LINUX_THERMAL_H
+#define _UAPI_LINUX_THERMAL_H
+
+#define THERMAL_NAME_LENGTH 20
+
+/* Adding event notification support elements */
+#define THERMAL_GENL_FAMILY_NAME "thermal_event"
+#define THERMAL_GENL_VERSION 0x01
+#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
+
+/* Events supported by Thermal Netlink */
+enum events {
+ THERMAL_AUX0,
+ THERMAL_AUX1,
+ THERMAL_CRITICAL,
+ THERMAL_DEV_FAULT,
+};
+
+/* attributes of thermal_genl_family */
+enum {
+ THERMAL_GENL_ATTR_UNSPEC,
+ THERMAL_GENL_ATTR_EVENT,
+ __THERMAL_GENL_ATTR_MAX,
+};
+#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
+
+/* commands supported by the thermal_genl_family */
+enum {
+ THERMAL_GENL_CMD_UNSPEC,
+ THERMAL_GENL_CMD_EVENT,
+ __THERMAL_GENL_CMD_MAX,
+};
+#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
+
+#endif /* _UAPI_LINUX_THERMAL_H */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
new file mode 100644
index 000000000000..8d723824ad69
--- /dev/null
+++ b/include/uapi/linux/tipc_netlink.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2014, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_TIPC_NETLINK_H_
+#define _LINUX_TIPC_NETLINK_H_
+
+#define TIPC_GENL_V2_NAME "TIPCv2"
+#define TIPC_GENL_V2_VERSION 0x1
+
+/* Netlink commands */
+enum {
+ TIPC_NL_UNSPEC,
+ TIPC_NL_LEGACY,
+ TIPC_NL_BEARER_DISABLE,
+ TIPC_NL_BEARER_ENABLE,
+ TIPC_NL_BEARER_GET,
+ TIPC_NL_BEARER_SET,
+ TIPC_NL_SOCK_GET,
+ TIPC_NL_PUBL_GET,
+ TIPC_NL_LINK_GET,
+ TIPC_NL_LINK_SET,
+ TIPC_NL_LINK_RESET_STATS,
+ TIPC_NL_MEDIA_GET,
+ TIPC_NL_MEDIA_SET,
+ TIPC_NL_NODE_GET,
+ TIPC_NL_NET_GET,
+ TIPC_NL_NET_SET,
+ TIPC_NL_NAME_TABLE_GET,
+
+ __TIPC_NL_CMD_MAX,
+ TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
+};
+
+/* Top level netlink attributes */
+enum {
+ TIPC_NLA_UNSPEC,
+ TIPC_NLA_BEARER, /* nest */
+ TIPC_NLA_SOCK, /* nest */
+ TIPC_NLA_PUBL, /* nest */
+ TIPC_NLA_LINK, /* nest */
+ TIPC_NLA_MEDIA, /* nest */
+ TIPC_NLA_NODE, /* nest */
+ TIPC_NLA_NET, /* nest */
+ TIPC_NLA_NAME_TABLE, /* nest */
+
+ __TIPC_NLA_MAX,
+ TIPC_NLA_MAX = __TIPC_NLA_MAX - 1
+};
+
+/* Bearer info */
+enum {
+ TIPC_NLA_BEARER_UNSPEC,
+ TIPC_NLA_BEARER_NAME, /* string */
+ TIPC_NLA_BEARER_PROP, /* nest */
+ TIPC_NLA_BEARER_DOMAIN, /* u32 */
+
+ __TIPC_NLA_BEARER_MAX,
+ TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1
+};
+
+/* Socket info */
+enum {
+ TIPC_NLA_SOCK_UNSPEC,
+ TIPC_NLA_SOCK_ADDR, /* u32 */
+ TIPC_NLA_SOCK_REF, /* u32 */
+ TIPC_NLA_SOCK_CON, /* nest */
+ TIPC_NLA_SOCK_HAS_PUBL, /* flag */
+
+ __TIPC_NLA_SOCK_MAX,
+ TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1
+};
+
+/* Link info */
+enum {
+ TIPC_NLA_LINK_UNSPEC,
+ TIPC_NLA_LINK_NAME, /* string */
+ TIPC_NLA_LINK_DEST, /* u32 */
+ TIPC_NLA_LINK_MTU, /* u32 */
+ TIPC_NLA_LINK_BROADCAST, /* flag */
+ TIPC_NLA_LINK_UP, /* flag */
+ TIPC_NLA_LINK_ACTIVE, /* flag */
+ TIPC_NLA_LINK_PROP, /* nest */
+ TIPC_NLA_LINK_STATS, /* nest */
+ TIPC_NLA_LINK_RX, /* u32 */
+ TIPC_NLA_LINK_TX, /* u32 */
+
+ __TIPC_NLA_LINK_MAX,
+ TIPC_NLA_LINK_MAX = __TIPC_NLA_LINK_MAX - 1
+};
+
+/* Media info */
+enum {
+ TIPC_NLA_MEDIA_UNSPEC,
+ TIPC_NLA_MEDIA_NAME, /* string */
+ TIPC_NLA_MEDIA_PROP, /* nest */
+
+ __TIPC_NLA_MEDIA_MAX,
+ TIPC_NLA_MEDIA_MAX = __TIPC_NLA_MEDIA_MAX - 1
+};
+
+/* Node info */
+enum {
+ TIPC_NLA_NODE_UNSPEC,
+ TIPC_NLA_NODE_ADDR, /* u32 */
+ TIPC_NLA_NODE_UP, /* flag */
+
+ __TIPC_NLA_NODE_MAX,
+ TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
+};
+
+/* Net info */
+enum {
+ TIPC_NLA_NET_UNSPEC,
+ TIPC_NLA_NET_ID, /* u32 */
+ TIPC_NLA_NET_ADDR, /* u32 */
+
+ __TIPC_NLA_NET_MAX,
+ TIPC_NLA_NET_MAX = __TIPC_NLA_NET_MAX - 1
+};
+
+/* Name table info */
+enum {
+ TIPC_NLA_NAME_TABLE_UNSPEC,
+ TIPC_NLA_NAME_TABLE_PUBL, /* nest */
+
+ __TIPC_NLA_NAME_TABLE_MAX,
+ TIPC_NLA_NAME_TABLE_MAX = __TIPC_NLA_NAME_TABLE_MAX - 1
+};
+
+/* Publication info */
+enum {
+ TIPC_NLA_PUBL_UNSPEC,
+
+ TIPC_NLA_PUBL_TYPE, /* u32 */
+ TIPC_NLA_PUBL_LOWER, /* u32 */
+ TIPC_NLA_PUBL_UPPER, /* u32 */
+ TIPC_NLA_PUBL_SCOPE, /* u32 */
+ TIPC_NLA_PUBL_NODE, /* u32 */
+ TIPC_NLA_PUBL_REF, /* u32 */
+ TIPC_NLA_PUBL_KEY, /* u32 */
+
+ __TIPC_NLA_PUBL_MAX,
+ TIPC_NLA_PUBL_MAX = __TIPC_NLA_PUBL_MAX - 1
+};
+
+/* Nest, connection info */
+enum {
+ TIPC_NLA_CON_UNSPEC,
+
+ TIPC_NLA_CON_FLAG, /* flag */
+ TIPC_NLA_CON_NODE, /* u32 */
+ TIPC_NLA_CON_SOCK, /* u32 */
+ TIPC_NLA_CON_TYPE, /* u32 */
+ TIPC_NLA_CON_INST, /* u32 */
+
+ __TIPC_NLA_CON_MAX,
+ TIPC_NLA_CON_MAX = __TIPC_NLA_CON_MAX - 1
+};
+
+/* Nest, link propreties. Valid for link, media and bearer */
+enum {
+ TIPC_NLA_PROP_UNSPEC,
+
+ TIPC_NLA_PROP_PRIO, /* u32 */
+ TIPC_NLA_PROP_TOL, /* u32 */
+ TIPC_NLA_PROP_WIN, /* u32 */
+
+ __TIPC_NLA_PROP_MAX,
+ TIPC_NLA_PROP_MAX = __TIPC_NLA_PROP_MAX - 1
+};
+
+/* Nest, statistics info */
+enum {
+ TIPC_NLA_STATS_UNSPEC,
+
+ TIPC_NLA_STATS_RX_INFO, /* u32 */
+ TIPC_NLA_STATS_RX_FRAGMENTS, /* u32 */
+ TIPC_NLA_STATS_RX_FRAGMENTED, /* u32 */
+ TIPC_NLA_STATS_RX_BUNDLES, /* u32 */
+ TIPC_NLA_STATS_RX_BUNDLED, /* u32 */
+ TIPC_NLA_STATS_TX_INFO, /* u32 */
+ TIPC_NLA_STATS_TX_FRAGMENTS, /* u32 */
+ TIPC_NLA_STATS_TX_FRAGMENTED, /* u32 */
+ TIPC_NLA_STATS_TX_BUNDLES, /* u32 */
+ TIPC_NLA_STATS_TX_BUNDLED, /* u32 */
+ TIPC_NLA_STATS_MSG_PROF_TOT, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_CNT, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_TOT, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P0, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P1, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P2, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P3, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P4, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P5, /* u32 */
+ TIPC_NLA_STATS_MSG_LEN_P6, /* u32 */
+ TIPC_NLA_STATS_RX_STATES, /* u32 */
+ TIPC_NLA_STATS_RX_PROBES, /* u32 */
+ TIPC_NLA_STATS_RX_NACKS, /* u32 */
+ TIPC_NLA_STATS_RX_DEFERRED, /* u32 */
+ TIPC_NLA_STATS_TX_STATES, /* u32 */
+ TIPC_NLA_STATS_TX_PROBES, /* u32 */
+ TIPC_NLA_STATS_TX_NACKS, /* u32 */
+ TIPC_NLA_STATS_TX_ACKS, /* u32 */
+ TIPC_NLA_STATS_RETRANSMITTED, /* u32 */
+ TIPC_NLA_STATS_DUPLICATES, /* u32 */
+ TIPC_NLA_STATS_LINK_CONGS, /* u32 */
+ TIPC_NLA_STATS_MAX_QUEUE, /* u32 */
+ TIPC_NLA_STATS_AVG_QUEUE, /* u32 */
+
+ __TIPC_NLA_STATS_MAX,
+ TIPC_NLA_STATS_MAX = __TIPC_NLA_STATS_MAX - 1
+};
+
+#endif
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index eefcb483a2c0..fae4864737fa 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -6,27 +6,31 @@
* shared by the tty_port flags structures.
*
* Define ASYNCB_* for convenient use with {test,set,clear}_bit.
+ *
+ * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable
+ * [x] in the bit comments indicates the flag is defunct and no longer used.
*/
#define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes
* on the callout port */
#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */
#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */
-#define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */
+#define ASYNCB_SPLIT_TERMIOS 3 /* [x] Separate termios for dialin/callout */
#define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */
#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */
#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */
#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during
* autoconfiguration */
-#define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */
-#define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */
-#define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */
+#define ASYNCB_SESSION_LOCKOUT 8 /* [x] Lock out cua opens based on session */
+#define ASYNCB_PGRP_LOCKOUT 9 /* [x] Lock out cua opens based on pgrp */
+#define ASYNCB_CALLOUT_NOHUP 10 /* [x] Don't do hangups for cua device */
#define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */
#define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */
#define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */
#define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety
* checks. Note: can be dangerous! */
-#define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */
-#define ASYNCB_LAST_USER 15
+#define ASYNCB_AUTOPROBE 15 /* [x] Port was autoprobed by PCI/PNP code */
+#define ASYNCB_MAGIC_MULTIPLIER 16 /* Use special CLK or divisor */
+#define ASYNCB_LAST_USER 16
/* Internal flags used only by kernel */
#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
@@ -57,8 +61,11 @@
#define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY)
#define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART)
#define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE)
+#define ASYNC_MAGIC_MULTIPLIER (1U << ASYNCB_MAGIC_MULTIPLIER)
#define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1)
+#define ASYNC_DEPRECATED (ASYNC_SESSION_LOCKOUT | ASYNC_PGRP_LOCKOUT | \
+ ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE)
#define ASYNC_USR_MASK (ASYNC_SPD_MASK|ASYNC_CALLOUT_NOHUP| \
ASYNC_LOW_LATENCY)
#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI)
diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h
index 2f6f8cafe773..15273987093e 100644
--- a/include/uapi/linux/v4l2-common.h
+++ b/include/uapi/linux/v4l2-common.h
@@ -43,6 +43,8 @@
#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001
/* Cropping bounds */
#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002
+/* Native frame size */
+#define V4L2_SEL_TGT_NATIVE_SIZE 0x0003
/* Current composing area */
#define V4L2_SEL_TGT_COMPOSE 0x0100
/* Default composing area */
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 1445e858854f..26db20647e6f 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -11,122 +11,10 @@
#ifndef __LINUX_V4L2_MEDIABUS_H
#define __LINUX_V4L2_MEDIABUS_H
+#include <linux/media-bus-format.h>
#include <linux/types.h>
#include <linux/videodev2.h>
-/*
- * These pixel codes uniquely identify data formats on the media bus. Mostly
- * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is
- * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the
- * data format is fixed. Additionally, "2X8" means that one pixel is transferred
- * in two 8-bit samples, "BE" or "LE" specify in which order those samples are
- * transferred over the bus: "LE" means that the least significant bits are
- * transferred first, "BE" means that the most significant bits are transferred
- * first, and "PADHI" and "PADLO" define which bits - low or high, in the
- * incomplete high byte, are filled with padding bits.
- *
- * The pixel codes are grouped by type, bus_width, bits per component, samples
- * per pixel and order of subsamples. Numerical values are sorted using generic
- * numerical sort order (8 thus comes before 10).
- *
- * As their value can't change when a new pixel code is inserted in the
- * enumeration, the pixel codes are explicitly given a numerical value. The next
- * free values for each category are listed below, update them when inserting
- * new pixel codes.
- */
-enum v4l2_mbus_pixelcode {
- V4L2_MBUS_FMT_FIXED = 0x0001,
-
- /* RGB - next is 0x100e */
- V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE = 0x1001,
- V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE = 0x1002,
- V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE = 0x1003,
- V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE = 0x1004,
- V4L2_MBUS_FMT_BGR565_2X8_BE = 0x1005,
- V4L2_MBUS_FMT_BGR565_2X8_LE = 0x1006,
- V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007,
- V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008,
- V4L2_MBUS_FMT_RGB666_1X18 = 0x1009,
- V4L2_MBUS_FMT_RGB888_1X24 = 0x100a,
- V4L2_MBUS_FMT_RGB888_2X12_BE = 0x100b,
- V4L2_MBUS_FMT_RGB888_2X12_LE = 0x100c,
- V4L2_MBUS_FMT_ARGB8888_1X32 = 0x100d,
-
- /* YUV (including grey) - next is 0x2024 */
- V4L2_MBUS_FMT_Y8_1X8 = 0x2001,
- V4L2_MBUS_FMT_UV8_1X8 = 0x2015,
- V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002,
- V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003,
- V4L2_MBUS_FMT_YUYV8_1_5X8 = 0x2004,
- V4L2_MBUS_FMT_YVYU8_1_5X8 = 0x2005,
- V4L2_MBUS_FMT_UYVY8_2X8 = 0x2006,
- V4L2_MBUS_FMT_VYUY8_2X8 = 0x2007,
- V4L2_MBUS_FMT_YUYV8_2X8 = 0x2008,
- V4L2_MBUS_FMT_YVYU8_2X8 = 0x2009,
- V4L2_MBUS_FMT_Y10_1X10 = 0x200a,
- V4L2_MBUS_FMT_UYVY10_2X10 = 0x2018,
- V4L2_MBUS_FMT_VYUY10_2X10 = 0x2019,
- V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b,
- V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c,
- V4L2_MBUS_FMT_Y12_1X12 = 0x2013,
- V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f,
- V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010,
- V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011,
- V4L2_MBUS_FMT_YVYU8_1X16 = 0x2012,
- V4L2_MBUS_FMT_YDYUYDYV8_1X16 = 0x2014,
- V4L2_MBUS_FMT_UYVY10_1X20 = 0x201a,
- V4L2_MBUS_FMT_VYUY10_1X20 = 0x201b,
- V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d,
- V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e,
- V4L2_MBUS_FMT_YUV10_1X30 = 0x2016,
- V4L2_MBUS_FMT_AYUV8_1X32 = 0x2017,
- V4L2_MBUS_FMT_UYVY12_2X12 = 0x201c,
- V4L2_MBUS_FMT_VYUY12_2X12 = 0x201d,
- V4L2_MBUS_FMT_YUYV12_2X12 = 0x201e,
- V4L2_MBUS_FMT_YVYU12_2X12 = 0x201f,
- V4L2_MBUS_FMT_UYVY12_1X24 = 0x2020,
- V4L2_MBUS_FMT_VYUY12_1X24 = 0x2021,
- V4L2_MBUS_FMT_YUYV12_1X24 = 0x2022,
- V4L2_MBUS_FMT_YVYU12_1X24 = 0x2023,
-
- /* Bayer - next is 0x3019 */
- V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001,
- V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013,
- V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002,
- V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014,
- V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8 = 0x3015,
- V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8 = 0x3016,
- V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8 = 0x3017,
- V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8 = 0x3018,
- V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b,
- V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c,
- V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009,
- V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8 = 0x300d,
- V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE = 0x3003,
- V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE = 0x3004,
- V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE = 0x3005,
- V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE = 0x3006,
- V4L2_MBUS_FMT_SBGGR10_1X10 = 0x3007,
- V4L2_MBUS_FMT_SGBRG10_1X10 = 0x300e,
- V4L2_MBUS_FMT_SGRBG10_1X10 = 0x300a,
- V4L2_MBUS_FMT_SRGGB10_1X10 = 0x300f,
- V4L2_MBUS_FMT_SBGGR12_1X12 = 0x3008,
- V4L2_MBUS_FMT_SGBRG12_1X12 = 0x3010,
- V4L2_MBUS_FMT_SGRBG12_1X12 = 0x3011,
- V4L2_MBUS_FMT_SRGGB12_1X12 = 0x3012,
-
- /* JPEG compressed formats - next is 0x4002 */
- V4L2_MBUS_FMT_JPEG_1X8 = 0x4001,
-
- /* Vendor specific formats - next is 0x5002 */
-
- /* S5C73M3 sensor specific interleaved UYVY and JPEG */
- V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8 = 0x5001,
-
- /* HSV - next is 0x6002 */
- V4L2_MBUS_FMT_AHSV8888_1X32 = 0x6001,
-};
-
/**
* struct v4l2_mbus_framefmt - frame format on the media bus
* @width: frame width
@@ -134,6 +22,8 @@ enum v4l2_mbus_pixelcode {
* @code: data format code (from enum v4l2_mbus_pixelcode)
* @field: used interlacing type (from enum v4l2_field)
* @colorspace: colorspace of the data (from enum v4l2_colorspace)
+ * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding)
+ * @quantization: quantization of the data (from enum v4l2_quantization)
*/
struct v4l2_mbus_framefmt {
__u32 width;
@@ -141,7 +31,108 @@ struct v4l2_mbus_framefmt {
__u32 code;
__u32 field;
__u32 colorspace;
- __u32 reserved[7];
+ __u16 ycbcr_enc;
+ __u16 quantization;
+ __u32 reserved[6];
+};
+
+#ifndef __KERNEL__
+/*
+ * enum v4l2_mbus_pixelcode and its definitions are now deprecated, and
+ * MEDIA_BUS_FMT_ definitions (defined in media-bus-format.h) should be
+ * used instead.
+ *
+ * New defines should only be added to media-bus-format.h. The
+ * v4l2_mbus_pixelcode enum is frozen.
+ */
+
+#define V4L2_MBUS_FROM_MEDIA_BUS_FMT(name) \
+ V4L2_MBUS_FMT_ ## name = MEDIA_BUS_FMT_ ## name
+
+enum v4l2_mbus_pixelcode {
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(FIXED),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB666_1X18),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1_5X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1_5X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1_5X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1_5X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_2X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_2X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_2X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_2X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_2X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_2X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_2X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_2X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y12_1X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YDYUYDYV8_1X16),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_1X20),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_1X20),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_1X20),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_1X20),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUV10_1X30),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(AYUV8_1X32),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_2X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_2X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_2X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_2X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_ALAW8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_ALAW8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_ALAW8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_ALAW8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_DPCM8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_DPCM8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_DPCM8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_DPCM8_1X8),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_BE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_LE),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_1X10),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR12_1X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG12_1X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG12_1X12),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB12_1X12),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(JPEG_1X8),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(S5C_UYVY_JPEG_1X8),
+
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(AHSV8888_1X32),
};
+#endif /* __KERNEL__ */
#endif
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index a619cdd300ac..e0a7e3da498a 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -68,7 +68,7 @@ struct v4l2_subdev_crop {
* struct v4l2_subdev_mbus_code_enum - Media bus format enumeration
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
- * @code: format code (from enum v4l2_mbus_pixelcode)
+ * @code: format code (MEDIA_BUS_FMT_ definitions)
*/
struct v4l2_subdev_mbus_code_enum {
__u32 pad;
@@ -81,7 +81,7 @@ struct v4l2_subdev_mbus_code_enum {
* struct v4l2_subdev_frame_size_enum - Media bus format enumeration
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
- * @code: format code (from enum v4l2_mbus_pixelcode)
+ * @code: format code (MEDIA_BUS_FMT_ definitions)
*/
struct v4l2_subdev_frame_size_enum {
__u32 index;
@@ -109,7 +109,7 @@ struct v4l2_subdev_frame_interval {
* struct v4l2_subdev_frame_interval_enum - Frame interval enumeration
* @pad: pad number, as reported by the media API
* @index: frame interval index during enumeration
- * @code: format code (from enum v4l2_mbus_pixelcode)
+ * @code: format code (MEDIA_BUS_FMT_ definitions)
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 1c2f84fd4d99..d279c1b75cf7 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -178,30 +178,103 @@ enum v4l2_memory {
/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
enum v4l2_colorspace {
- /* ITU-R 601 -- broadcast NTSC/PAL */
+ /* SMPTE 170M: used for broadcast NTSC/PAL SDTV */
V4L2_COLORSPACE_SMPTE170M = 1,
- /* 1125-Line (US) HDTV */
+ /* Obsolete pre-1998 SMPTE 240M HDTV standard, superseded by Rec 709 */
V4L2_COLORSPACE_SMPTE240M = 2,
- /* HD and modern captures. */
+ /* Rec.709: used for HDTV */
V4L2_COLORSPACE_REC709 = 3,
- /* broken BT878 extents (601, luma range 16-253 instead of 16-235) */
+ /*
+ * Deprecated, do not use. No driver will ever return this. This was
+ * based on a misunderstanding of the bt878 datasheet.
+ */
V4L2_COLORSPACE_BT878 = 4,
- /* These should be useful. Assume 601 extents. */
+ /*
+ * NTSC 1953 colorspace. This only makes sense when dealing with
+ * really, really old NTSC recordings. Superseded by SMPTE 170M.
+ */
V4L2_COLORSPACE_470_SYSTEM_M = 5,
+
+ /*
+ * EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when
+ * dealing with really old PAL/SECAM recordings. Superseded by
+ * SMPTE 170M.
+ */
V4L2_COLORSPACE_470_SYSTEM_BG = 6,
- /* I know there will be cameras that send this. So, this is
- * unspecified chromaticities and full 0-255 on each of the
- * Y'CbCr components
+ /*
+ * Effectively shorthand for V4L2_COLORSPACE_SRGB, V4L2_YCBCR_ENC_601
+ * and V4L2_QUANTIZATION_FULL_RANGE. To be used for (Motion-)JPEG.
*/
V4L2_COLORSPACE_JPEG = 7,
- /* For RGB colourspaces, this is probably a good start. */
+ /* For RGB colorspaces such as produces by most webcams. */
V4L2_COLORSPACE_SRGB = 8,
+
+ /* AdobeRGB colorspace */
+ V4L2_COLORSPACE_ADOBERGB = 9,
+
+ /* BT.2020 colorspace, used for UHDTV. */
+ V4L2_COLORSPACE_BT2020 = 10,
+};
+
+enum v4l2_ycbcr_encoding {
+ /*
+ * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the
+ * various colorspaces:
+ *
+ * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
+ * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and
+ * V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
+ *
+ * V4L2_COLORSPACE_REC709: V4L2_YCBCR_ENC_709
+ *
+ * V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC
+ *
+ * V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020
+ *
+ * V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M
+ */
+ V4L2_YCBCR_ENC_DEFAULT = 0,
+
+ /* ITU-R 601 -- SDTV */
+ V4L2_YCBCR_ENC_601 = 1,
+
+ /* Rec. 709 -- HDTV */
+ V4L2_YCBCR_ENC_709 = 2,
+
+ /* ITU-R 601/EN 61966-2-4 Extended Gamut -- SDTV */
+ V4L2_YCBCR_ENC_XV601 = 3,
+
+ /* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */
+ V4L2_YCBCR_ENC_XV709 = 4,
+
+ /* sYCC (Y'CbCr encoding of sRGB) */
+ V4L2_YCBCR_ENC_SYCC = 5,
+
+ /* BT.2020 Non-constant Luminance Y'CbCr */
+ V4L2_YCBCR_ENC_BT2020 = 6,
+
+ /* BT.2020 Constant Luminance Y'CbcCrc */
+ V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7,
+
+ /* SMPTE 240M -- Obsolete HDTV */
+ V4L2_YCBCR_ENC_SMPTE240M = 8,
+};
+
+enum v4l2_quantization {
+ /*
+ * The default for R'G'B' quantization is always full range. For
+ * Y'CbCr the quantization is always limited range, except for
+ * SYCC, XV601, XV709 or JPEG: those are full range.
+ */
+ V4L2_QUANTIZATION_DEFAULT = 0,
+ V4L2_QUANTIZATION_FULL_RANGE = 1,
+ V4L2_QUANTIZATION_LIM_RANGE = 2,
};
enum v4l2_priority {
@@ -294,6 +367,8 @@ struct v4l2_pix_format {
__u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
+ __u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */
+ __u32 quantization; /* enum v4l2_quantization */
};
/* Pixel format FOURCC depth Description */
@@ -1249,6 +1324,7 @@ struct v4l2_input {
#define V4L2_IN_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
#define V4L2_IN_CAP_CUSTOM_TIMINGS V4L2_IN_CAP_DV_TIMINGS /* For compatibility */
#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
+#define V4L2_IN_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */
/*
* V I D E O O U T P U T S
@@ -1272,6 +1348,7 @@ struct v4l2_output {
#define V4L2_OUT_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
#define V4L2_OUT_CAP_CUSTOM_TIMINGS V4L2_OUT_CAP_DV_TIMINGS /* For compatibility */
#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
+#define V4L2_OUT_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */
/*
* C O N T R O L S
@@ -1777,6 +1854,8 @@ struct v4l2_plane_pix_format {
* @plane_fmt: per-plane information
* @num_planes: number of planes for this format
* @flags: format flags (V4L2_PIX_FMT_FLAG_*)
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
*/
struct v4l2_pix_format_mplane {
__u32 width;
@@ -1788,7 +1867,9 @@ struct v4l2_pix_format_mplane {
struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES];
__u8 num_planes;
__u8 flags;
- __u8 reserved[10];
+ __u8 ycbcr_enc;
+ __u8 quantization;
+ __u8 reserved[8];
} __attribute__ ((packed));
/**
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 5e26f61b5df5..be40f7059e93 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -31,6 +31,7 @@
/* The feature bitmap for virtio balloon */
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
+#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 9ad67b267584..247c8ba8544a 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+#include <linux/virtio_types.h>
/* Feature bits */
#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
@@ -114,18 +115,18 @@ struct virtio_blk_config {
/* This is the first element of the read scatter-gather list. */
struct virtio_blk_outhdr {
/* VIRTIO_BLK_T* */
- __u32 type;
+ __virtio32 type;
/* io priority. */
- __u32 ioprio;
+ __virtio32 ioprio;
/* Sector (ie. 512 byte offset) */
- __u64 sector;
+ __virtio64 sector;
};
struct virtio_scsi_inhdr {
- __u32 errors;
- __u32 data_len;
- __u32 sense_len;
- __u32 residual;
+ __virtio32 errors;
+ __virtio32 data_len;
+ __virtio32 sense_len;
+ __virtio32 residual;
};
/* And this is the final byte of the write scatter-gather list. */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 3ce768c6910d..a6d0cdeaacd4 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -38,14 +38,16 @@
#define VIRTIO_CONFIG_S_DRIVER 2
/* Driver has used its parts of the config, and is happy */
#define VIRTIO_CONFIG_S_DRIVER_OK 4
+/* Driver has finished configuring features */
+#define VIRTIO_CONFIG_S_FEATURES_OK 8
/* We've given up on this device. */
#define VIRTIO_CONFIG_S_FAILED 0x80
-/* Some virtio feature bits (currently bits 28 through 31) are reserved for the
+/* Some virtio feature bits (currently bits 28 through 32) are reserved for the
* transport being used (eg. virtio_ring), the rest are per-device feature
* bits. */
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 32
+#define VIRTIO_TRANSPORT_F_END 33
/* Do we get callbacks when the ring is completely used, even if we've
* suppressed them? */
@@ -54,4 +56,7 @@
/* Can the device handle any descriptor layout? */
#define VIRTIO_F_ANY_LAYOUT 27
+/* v1.0 compliant. */
+#define VIRTIO_F_VERSION_1 32
+
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h
index ba260dd0b33a..b7fb108c9310 100644
--- a/include/uapi/linux/virtio_console.h
+++ b/include/uapi/linux/virtio_console.h
@@ -32,6 +32,7 @@
#ifndef _UAPI_LINUX_VIRTIO_CONSOLE_H
#define _UAPI_LINUX_VIRTIO_CONSOLE_H
#include <linux/types.h>
+#include <linux/virtio_types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -58,9 +59,9 @@ struct virtio_console_config {
* particular port.
*/
struct virtio_console_control {
- __u32 id; /* Port number */
- __u16 event; /* The kind of control event (see below) */
- __u16 value; /* Extra information for the key */
+ __virtio32 id; /* Port number */
+ __virtio16 event; /* The kind of control event (see below) */
+ __virtio16 value; /* Extra information for the key */
};
/* Some events for control messages */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 172a7f00780c..b5f1677b291c 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+#include <linux/virtio_types.h>
#include <linux/if_ether.h>
/* The feature bitmap for virtio net */
@@ -84,17 +85,17 @@ struct virtio_net_hdr {
#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP
#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
__u8 gso_type;
- __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
- __u16 gso_size; /* Bytes to append to hdr_len per frame */
- __u16 csum_start; /* Position to start checksumming from */
- __u16 csum_offset; /* Offset after that to place checksum */
+ __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
+ __virtio16 gso_size; /* Bytes to append to hdr_len per frame */
+ __virtio16 csum_start; /* Position to start checksumming from */
+ __virtio16 csum_offset; /* Offset after that to place checksum */
};
/* This is the version of the header to use when the MRG_RXBUF
* feature has been negotiated. */
struct virtio_net_hdr_mrg_rxbuf {
struct virtio_net_hdr hdr;
- __u16 num_buffers; /* Number of merged rx buffers */
+ __virtio16 num_buffers; /* Number of merged rx buffers */
};
/*
@@ -149,7 +150,7 @@ typedef __u8 virtio_net_ctrl_ack;
* VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
*/
struct virtio_net_ctrl_mac {
- __u32 entries;
+ __virtio32 entries;
__u8 macs[][ETH_ALEN];
} __attribute__((packed));
@@ -193,7 +194,7 @@ struct virtio_net_ctrl_mac {
* specified.
*/
struct virtio_net_ctrl_mq {
- __u16 virtqueue_pairs;
+ __virtio16 virtqueue_pairs;
};
#define VIRTIO_NET_CTRL_MQ 4
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index a99f9b7caa67..61c818a7fe70 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -32,6 +32,7 @@
*
* Copyright Rusty Russell IBM Corporation 2007. */
#include <linux/types.h>
+#include <linux/virtio_types.h>
/* This marks a buffer as continuing via the next field. */
#define VRING_DESC_F_NEXT 1
@@ -61,32 +62,32 @@
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
- __u64 addr;
+ __virtio64 addr;
/* Length. */
- __u32 len;
+ __virtio32 len;
/* The flags as indicated above. */
- __u16 flags;
+ __virtio16 flags;
/* We chain unused descriptors via this, too */
- __u16 next;
+ __virtio16 next;
};
struct vring_avail {
- __u16 flags;
- __u16 idx;
- __u16 ring[];
+ __virtio16 flags;
+ __virtio16 idx;
+ __virtio16 ring[];
};
/* u32 is used here for ids for padding reasons. */
struct vring_used_elem {
/* Index of start of used descriptor chain. */
- __u32 id;
+ __virtio32 id;
/* Total length of the descriptor chain which was used (written to) */
- __u32 len;
+ __virtio32 len;
};
struct vring_used {
- __u16 flags;
- __u16 idx;
+ __virtio16 flags;
+ __virtio16 idx;
struct vring_used_elem ring[];
};
@@ -109,25 +110,25 @@ struct vring {
* struct vring_desc desc[num];
*
* // A ring of available descriptor heads with free-running index.
- * __u16 avail_flags;
- * __u16 avail_idx;
- * __u16 available[num];
- * __u16 used_event_idx;
+ * __virtio16 avail_flags;
+ * __virtio16 avail_idx;
+ * __virtio16 available[num];
+ * __virtio16 used_event_idx;
*
* // Padding to the next align boundary.
* char pad[];
*
* // A ring of used descriptor heads with free-running index.
- * __u16 used_flags;
- * __u16 used_idx;
+ * __virtio16 used_flags;
+ * __virtio16 used_idx;
* struct vring_used_elem used[num];
- * __u16 avail_event_idx;
+ * __virtio16 avail_event_idx;
* };
*/
/* We publish the used event index at the end of the available ring, and vice
* versa. They are at the end for backwards compatibility. */
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
-#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num])
+#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
static inline void vring_init(struct vring *vr, unsigned int num, void *p,
unsigned long align)
@@ -135,15 +136,15 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
vr->num = num;
vr->desc = p;
vr->avail = p + num*sizeof(struct vring_desc);
- vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16)
+ vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16)
+ align-1) & ~(align - 1));
}
static inline unsigned vring_size(unsigned int num, unsigned long align)
{
- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
+ return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num)
+ align - 1) & ~(align - 1))
- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
+ + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
diff --git a/include/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h
index de429d1f4357..42b9370771b0 100644
--- a/include/linux/virtio_scsi.h
+++ b/include/uapi/linux/virtio_scsi.h
@@ -27,83 +27,85 @@
#ifndef _LINUX_VIRTIO_SCSI_H
#define _LINUX_VIRTIO_SCSI_H
+#include <linux/virtio_types.h>
+
#define VIRTIO_SCSI_CDB_SIZE 32
#define VIRTIO_SCSI_SENSE_SIZE 96
/* SCSI command request, followed by data-out */
struct virtio_scsi_cmd_req {
- u8 lun[8]; /* Logical Unit Number */
- u64 tag; /* Command identifier */
- u8 task_attr; /* Task attribute */
- u8 prio; /* SAM command priority field */
- u8 crn;
- u8 cdb[VIRTIO_SCSI_CDB_SIZE];
-} __packed;
+ __u8 lun[8]; /* Logical Unit Number */
+ __virtio64 tag; /* Command identifier */
+ __u8 task_attr; /* Task attribute */
+ __u8 prio; /* SAM command priority field */
+ __u8 crn;
+ __u8 cdb[VIRTIO_SCSI_CDB_SIZE];
+} __attribute__((packed));
/* SCSI command request, followed by protection information */
struct virtio_scsi_cmd_req_pi {
- u8 lun[8]; /* Logical Unit Number */
- u64 tag; /* Command identifier */
- u8 task_attr; /* Task attribute */
- u8 prio; /* SAM command priority field */
- u8 crn;
- u32 pi_bytesout; /* DataOUT PI Number of bytes */
- u32 pi_bytesin; /* DataIN PI Number of bytes */
- u8 cdb[VIRTIO_SCSI_CDB_SIZE];
-} __packed;
+ __u8 lun[8]; /* Logical Unit Number */
+ __virtio64 tag; /* Command identifier */
+ __u8 task_attr; /* Task attribute */
+ __u8 prio; /* SAM command priority field */
+ __u8 crn;
+ __virtio32 pi_bytesout; /* DataOUT PI Number of bytes */
+ __virtio32 pi_bytesin; /* DataIN PI Number of bytes */
+ __u8 cdb[VIRTIO_SCSI_CDB_SIZE];
+} __attribute__((packed));
/* Response, followed by sense data and data-in */
struct virtio_scsi_cmd_resp {
- u32 sense_len; /* Sense data length */
- u32 resid; /* Residual bytes in data buffer */
- u16 status_qualifier; /* Status qualifier */
- u8 status; /* Command completion status */
- u8 response; /* Response values */
- u8 sense[VIRTIO_SCSI_SENSE_SIZE];
-} __packed;
+ __virtio32 sense_len; /* Sense data length */
+ __virtio32 resid; /* Residual bytes in data buffer */
+ __virtio16 status_qualifier; /* Status qualifier */
+ __u8 status; /* Command completion status */
+ __u8 response; /* Response values */
+ __u8 sense[VIRTIO_SCSI_SENSE_SIZE];
+} __attribute__((packed));
/* Task Management Request */
struct virtio_scsi_ctrl_tmf_req {
- u32 type;
- u32 subtype;
- u8 lun[8];
- u64 tag;
-} __packed;
+ __virtio32 type;
+ __virtio32 subtype;
+ __u8 lun[8];
+ __virtio64 tag;
+} __attribute__((packed));
struct virtio_scsi_ctrl_tmf_resp {
- u8 response;
-} __packed;
+ __u8 response;
+} __attribute__((packed));
/* Asynchronous notification query/subscription */
struct virtio_scsi_ctrl_an_req {
- u32 type;
- u8 lun[8];
- u32 event_requested;
-} __packed;
+ __virtio32 type;
+ __u8 lun[8];
+ __virtio32 event_requested;
+} __attribute__((packed));
struct virtio_scsi_ctrl_an_resp {
- u32 event_actual;
- u8 response;
-} __packed;
+ __virtio32 event_actual;
+ __u8 response;
+} __attribute__((packed));
struct virtio_scsi_event {
- u32 event;
- u8 lun[8];
- u32 reason;
-} __packed;
+ __virtio32 event;
+ __u8 lun[8];
+ __virtio32 reason;
+} __attribute__((packed));
struct virtio_scsi_config {
- u32 num_queues;
- u32 seg_max;
- u32 max_sectors;
- u32 cmd_per_lun;
- u32 event_info_size;
- u32 sense_size;
- u32 cdb_size;
- u16 max_channel;
- u16 max_target;
- u32 max_lun;
-} __packed;
+ __u32 num_queues;
+ __u32 seg_max;
+ __u32 max_sectors;
+ __u32 cmd_per_lun;
+ __u32 event_info_size;
+ __u32 sense_size;
+ __u32 cdb_size;
+ __u16 max_channel;
+ __u16 max_target;
+ __u32 max_lun;
+} __attribute__((packed));
/* Feature Bits */
#define VIRTIO_SCSI_F_INOUT 0
diff --git a/include/uapi/linux/virtio_types.h b/include/uapi/linux/virtio_types.h
new file mode 100644
index 000000000000..e845e8c4cbee
--- /dev/null
+++ b/include/uapi/linux/virtio_types.h
@@ -0,0 +1,46 @@
+#ifndef _UAPI_LINUX_VIRTIO_TYPES_H
+#define _UAPI_LINUX_VIRTIO_TYPES_H
+/* Type definitions for virtio implementations.
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 2014 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ */
+#include <linux/types.h>
+
+/*
+ * __virtio{16,32,64} have the following meaning:
+ * - __u{16,32,64} for virtio devices in legacy mode, accessed in native endian
+ * - __le{16,32,64} for standard-compliant virtio devices
+ */
+
+typedef __u16 __bitwise__ __virtio16;
+typedef __u32 __bitwise__ __virtio32;
+typedef __u64 __bitwise__ __virtio64;
+
+#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */
diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h
index 4b59a26799a3..978578bd1895 100644
--- a/include/uapi/linux/vt.h
+++ b/include/uapi/linux/vt.h
@@ -84,7 +84,4 @@ struct vt_setactivate {
#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
-
-#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
-
#endif /* _UAPI_LINUX_VT_H */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 26daf55ff76e..4275b961bf60 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,8 +90,9 @@ enum {
};
enum {
+ IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
- IB_USER_VERBS_EX_CMD_DESTROY_FLOW
+ IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
/*
@@ -201,6 +202,32 @@ struct ib_uverbs_query_device_resp {
__u8 reserved[4];
};
+enum {
+ IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0,
+};
+
+struct ib_uverbs_ex_query_device {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+ __u64 general_caps;
+ struct {
+ __u32 rc_odp_caps;
+ __u32 uc_odp_caps;
+ __u32 ud_odp_caps;
+ } per_transport_caps;
+ __u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+ struct ib_uverbs_query_device_resp base;
+ __u32 comp_mask;
+ __u32 reserved;
+ struct ib_uverbs_odp_caps odp_caps;
+};
+
struct ib_uverbs_query_port {
__u64 response;
__u8 port_num;
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 941d32f007dc..1f23cd635957 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -96,9 +96,10 @@ enum {
SNDRV_HWDEP_IFACE_FW_DICE, /* TC DICE FireWire device */
SNDRV_HWDEP_IFACE_FW_FIREWORKS, /* Echo Audio Fireworks based device */
SNDRV_HWDEP_IFACE_FW_BEBOB, /* BridgeCo BeBoB based device */
+ SNDRV_HWDEP_IFACE_FW_OXFW, /* Oxford OXFW970/971 based device */
/* Don't forget to change the following: */
- SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_BEBOB
+ SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_OXFW
};
struct snd_hwdep_info {
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 1964026b5e09..22ed8cb7800b 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -32,7 +32,7 @@
#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
/**
- * struct snd_compressed_buffer: compressed buffer
+ * struct snd_compressed_buffer - compressed buffer
* @fragment_size: size of buffer fragment in bytes
* @fragments: number of such fragments
*/
@@ -42,7 +42,7 @@ struct snd_compressed_buffer {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_compr_params: compressed stream params
+ * struct snd_compr_params - compressed stream params
* @buffer: buffer description
* @codec: codec parameters
* @no_wake_mode: dont wake on fragment elapsed
@@ -54,7 +54,7 @@ struct snd_compr_params {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_compr_tstamp: timestamp descriptor
+ * struct snd_compr_tstamp - timestamp descriptor
* @byte_offset: Byte offset in ring buffer to DSP
* @copied_total: Total number of bytes copied from/to ring buffer to/by DSP
* @pcm_frames: Frames decoded or encoded by DSP. This field will evolve by
@@ -73,7 +73,7 @@ struct snd_compr_tstamp {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_compr_avail: avail descriptor
+ * struct snd_compr_avail - avail descriptor
* @avail: Number of bytes available in ring buffer for writing/reading
* @tstamp: timestamp infomation
*/
@@ -88,7 +88,7 @@ enum snd_compr_direction {
};
/**
- * struct snd_compr_caps: caps descriptor
+ * struct snd_compr_caps - caps descriptor
* @codecs: pointer to array of codecs
* @direction: direction supported. Of type snd_compr_direction
* @min_fragment_size: minimum fragment supported by DSP
@@ -110,7 +110,7 @@ struct snd_compr_caps {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_compr_codec_caps: query capability of codec
+ * struct snd_compr_codec_caps - query capability of codec
* @codec: codec for which capability is queried
* @num_descriptors: number of codec descriptors
* @descriptor: array of codec capability descriptor
@@ -122,18 +122,19 @@ struct snd_compr_codec_caps {
} __attribute__((packed, aligned(4)));
/**
+ * enum sndrv_compress_encoder
* @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
* end of the track
* @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
* beginning of the track
*/
-enum {
+enum sndrv_compress_encoder {
SNDRV_COMPRESS_ENCODER_PADDING = 1,
SNDRV_COMPRESS_ENCODER_DELAY = 2,
};
/**
- * struct snd_compr_metadata: compressed stream metadata
+ * struct snd_compr_metadata - compressed stream metadata
* @key: key id
* @value: key value
*/
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index af4bd136c75d..49122df3b56b 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -55,7 +55,8 @@ union snd_firewire_event {
#define SNDRV_FIREWIRE_TYPE_DICE 1
#define SNDRV_FIREWIRE_TYPE_FIREWORKS 2
#define SNDRV_FIREWIRE_TYPE_BEBOB 3
-/* AV/C, RME, MOTU, ... */
+#define SNDRV_FIREWIRE_TYPE_OXFW 4
+/* RME, MOTU, ... */
struct snd_firewire_get_info {
unsigned int type; /* SNDRV_FIREWIRE_TYPE_xxx */
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index d956c3593f65..b357f1a5e29c 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -74,14 +74,14 @@ struct hdspm_config {
#define SNDRV_HDSPM_IOCTL_GET_CONFIG \
_IOR('H', 0x41, struct hdspm_config)
-/**
+/*
* If there's a TCO (TimeCode Option) board installed,
* there are further options and status data available.
* The hdspm_ltc structure contains the current SMPTE
* timecode and some status information and can be
* obtained via SNDRV_HDSPM_IOCTL_GET_LTC or in the
* hdspm_status struct.
- **/
+ */
enum hdspm_ltc_format {
format_invalid,
@@ -113,11 +113,11 @@ struct hdspm_ltc {
#define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_ltc)
-/**
+/*
* The status data reflects the device's current state
* as determined by the card's configuration and
* connection status.
- **/
+ */
enum hdspm_sync {
hdspm_sync_no_lock = 0,
@@ -171,9 +171,9 @@ struct hdspm_status {
#define SNDRV_HDSPM_IOCTL_GET_STATUS \
_IOR('H', 0x47, struct hdspm_status)
-/**
+/*
* Get information about the card and its add-ons.
- **/
+ */
#define HDSPM_ADDON_TCO 1
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 069dfca9549a..6a84498ea513 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -166,13 +166,6 @@ enum omap_dss_display_state {
OMAP_DSS_DISPLAY_ACTIVE,
};
-enum omap_dss_audio_state {
- OMAP_DSS_AUDIO_DISABLED = 0,
- OMAP_DSS_AUDIO_ENABLED,
- OMAP_DSS_AUDIO_CONFIGURED,
- OMAP_DSS_AUDIO_PLAYING,
-};
-
struct omap_dss_audio {
struct snd_aes_iec958 *iec;
struct snd_cea_861_aud_if *cea;
@@ -635,19 +628,6 @@ struct omapdss_hdmi_ops {
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
-
- /*
- * Note: These functions might sleep. Do not call while
- * holding a spinlock/readlock.
- */
- int (*audio_enable)(struct omap_dss_device *dssdev);
- void (*audio_disable)(struct omap_dss_device *dssdev);
- bool (*audio_supported)(struct omap_dss_device *dssdev);
- int (*audio_config)(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio);
- /* Note: These functions may not sleep */
- int (*audio_start)(struct omap_dss_device *dssdev);
- void (*audio_stop)(struct omap_dss_device *dssdev);
};
struct omapdss_dsi_ops {
@@ -783,8 +763,6 @@ struct omap_dss_device {
enum omap_dss_display_state state;
- enum omap_dss_audio_state audio_state;
-
/* OMAP DSS output specific fields */
struct list_head list;
@@ -795,6 +773,9 @@ struct omap_dss_device {
/* output instance */
enum omap_dss_output_id id;
+ /* the port number in the DT node */
+ int port_num;
+
/* dynamic fields */
struct omap_overlay_manager *manager;
@@ -858,24 +839,6 @@ struct omap_dss_driver {
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
-
- /*
- * For display drivers that support audio. This encompasses
- * HDMI and DisplayPort at the moment.
- */
- /*
- * Note: These functions might sleep. Do not call while
- * holding a spinlock/readlock.
- */
- int (*audio_enable)(struct omap_dss_device *dssdev);
- void (*audio_disable)(struct omap_dss_device *dssdev);
- bool (*audio_supported)(struct omap_dss_device *dssdev);
- int (*audio_config)(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio);
- /* Note: These functions may not sleep */
- int (*audio_start)(struct omap_dss_device *dssdev);
- void (*audio_stop)(struct omap_dss_device *dssdev);
-
};
enum omapdss_version omapdss_get_version(void);
@@ -918,7 +881,7 @@ int omapdss_register_output(struct omap_dss_device *output);
void omapdss_unregister_output(struct omap_dss_device *output);
struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
struct omap_dss_device *omap_dss_find_output(const char *name);
-struct omap_dss_device *omap_dss_find_output_by_node(struct device_node *node);
+struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
int omapdss_output_set_device(struct omap_dss_device *out,
struct omap_dss_device *dssdev);
int omapdss_output_unset_device(struct omap_dss_device *out);
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 14334d0161d5..131a6ccdba25 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -53,9 +53,6 @@
/* operation as Dom0 is supported */
#define XENFEAT_dom0 11
-/* Xen also maps grant references at pfn = mfn */
-#define XENFEAT_grant_map_identity 12
-
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index e40fae9bf11a..bcce56439d64 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -479,6 +479,25 @@ struct gnttab_get_version {
DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
/*
+ * Issue one or more cache maintenance operations on a portion of a
+ * page granted to the calling domain by a foreign domain.
+ */
+#define GNTTABOP_cache_flush 12
+struct gnttab_cache_flush {
+ union {
+ uint64_t dev_bus_addr;
+ grant_ref_t ref;
+ } a;
+ uint16_t offset; /* offset from start of grant */
+ uint16_t length; /* size within the grant */
+#define GNTTAB_CACHE_CLEAN (1<<0)
+#define GNTTAB_CACHE_INVAL (1<<1)
+#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
+ uint32_t op;
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
+
+/*
* Bitfield values for update_pin_status.flags.
*/
/* Map the grant entry for access by I/O devices. */
diff --git a/init/main.c b/init/main.c
index ca380ec685de..61b993767db5 100644
--- a/init/main.c
+++ b/init/main.c
@@ -51,6 +51,7 @@
#include <linux/mempolicy.h>
#include <linux/key.h>
#include <linux/buffer_head.h>
+#include <linux/page_ext.h>
#include <linux/debug_locks.h>
#include <linux/debugobjects.h>
#include <linux/lockdep.h>
@@ -77,6 +78,8 @@
#include <linux/context_tracking.h>
#include <linux/random.h>
#include <linux/list.h>
+#include <linux/integrity.h>
+#include <linux/proc_ns.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -484,6 +487,11 @@ void __init __weak thread_info_cache_init(void)
*/
static void __init mm_init(void)
{
+ /*
+ * page_ext requires contiguous pages,
+ * bigger than MAX_ORDER unless SPARSEMEM.
+ */
+ page_ext_init_flatmem();
mem_init();
kmem_cache_init();
percpu_init_late();
@@ -571,6 +579,10 @@ asmlinkage __visible void __init start_kernel(void)
local_irq_disable();
idr_init_cache();
rcu_init();
+
+ /* trace_printk() and trace points may be used after this */
+ trace_init();
+
context_tracking_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
@@ -621,6 +633,7 @@ asmlinkage __visible void __init start_kernel(void)
initrd_start = 0;
}
#endif
+ page_ext_init();
debug_objects_mem_init();
kmemleak_init();
setup_per_cpu_pageset();
@@ -653,6 +666,7 @@ asmlinkage __visible void __init start_kernel(void)
/* rootfs populating might need page-writeback */
page_writeback_init();
proc_root_init();
+ nsfs_init();
cgroup_init();
cpuset_init();
taskstats_init_early();
@@ -1024,8 +1038,11 @@ static noinline void __init kernel_init_freeable(void)
* Ok, we have completed the initial bootup, and
* we're essentially up and running. Get rid of the
* initmem segments and start the user-mode stuff..
+ *
+ * rootfs is available now, try loading the public keys
+ * and default modules
*/
- /* rootfs is available now, try loading default modules */
+ integrity_load_keys();
load_default_modules();
}
diff --git a/init/version.c b/init/version.c
index 1a4718e500fe..fe41a63efed6 100644
--- a/init/version.c
+++ b/init/version.c
@@ -35,7 +35,10 @@ struct uts_namespace init_uts_ns = {
.domainname = UTS_DOMAINNAME,
},
.user_ns = &init_user_ns,
- .proc_inum = PROC_UTS_INIT_INO,
+ .ns.inum = PROC_UTS_INIT_INO,
+#ifdef CONFIG_UTS_NS
+ .ns.ops = &utsns_operations,
+#endif
};
EXPORT_SYMBOL_GPL(init_uts_ns);
diff --git a/ipc/Makefile b/ipc/Makefile
index 9075e172e52c..86c7300ecdf5 100644
--- a/ipc/Makefile
+++ b/ipc/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o
-obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o ipcns_notifier.o syscall.o
+obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o syscall.o
obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o
obj_mq-$(CONFIG_COMPAT) += compat_mq.o
obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y)
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index e8075b247497..8ad93c29f511 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -62,29 +62,6 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
return err;
}
-static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table ipc_table;
- size_t lenp_bef = *lenp;
- int rc;
-
- memcpy(&ipc_table, table, sizeof(ipc_table));
- ipc_table.data = get_ipc(table);
-
- rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
-
- if (write && !rc && lenp_bef == *lenp)
- /*
- * Tunable has successfully been changed by hand. Disable its
- * automatic adjustment. This simply requires unregistering
- * the notifiers that trigger recalculation.
- */
- unregister_ipcns_notifier(current->nsproxy->ipc_ns);
-
- return rc;
-}
-
static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -96,54 +73,19 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
lenp, ppos);
}
-/*
- * Routine that is called when the file "auto_msgmni" has successfully been
- * written.
- * Two values are allowed:
- * 0: unregister msgmni's callback routine from the ipc namespace notifier
- * chain. This means that msgmni won't be recomputed anymore upon memory
- * add/remove or ipc namespace creation/removal.
- * 1: register back the callback routine.
- */
-static void ipc_auto_callback(int val)
-{
- if (!val)
- unregister_ipcns_notifier(current->nsproxy->ipc_ns);
- else {
- /*
- * Re-enable automatic recomputing only if not already
- * enabled.
- */
- recompute_msgmni(current->nsproxy->ipc_ns);
- cond_register_ipcns_notifier(current->nsproxy->ipc_ns);
- }
-}
-
-static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
+static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
- int oldval;
- int rc;
+ int dummy = 0;
memcpy(&ipc_table, table, sizeof(ipc_table));
- ipc_table.data = get_ipc(table);
- oldval = *((int *)(ipc_table.data));
+ ipc_table.data = &dummy;
- rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
+ if (write)
+ pr_info_once("writing to auto_msgmni has no effect");
- if (write && !rc) {
- int newval = *((int *)(ipc_table.data));
- /*
- * The file "auto_msgmni" has correctly been set.
- * React by (un)registering the corresponding tunable, if the
- * value has changed.
- */
- if (newval != oldval)
- ipc_auto_callback(newval);
- }
-
- return rc;
+ return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
}
#else
@@ -151,8 +93,7 @@ static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
#define proc_ipc_dointvec NULL
#define proc_ipc_dointvec_minmax NULL
#define proc_ipc_dointvec_minmax_orphans NULL
-#define proc_ipc_callback_dointvec_minmax NULL
-#define proc_ipcauto_dointvec_minmax NULL
+#define proc_ipc_auto_msgmni NULL
#endif
static int zero;
@@ -204,11 +145,20 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.msg_ctlmni,
.maxlen = sizeof(init_ipc_ns.msg_ctlmni),
.mode = 0644,
- .proc_handler = proc_ipc_callback_dointvec_minmax,
+ .proc_handler = proc_ipc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &int_max,
},
{
+ .procname = "auto_msgmni",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_ipc_auto_msgmni,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
.procname = "msgmnb",
.data = &init_ipc_ns.msg_ctlmnb,
.maxlen = sizeof(init_ipc_ns.msg_ctlmnb),
@@ -224,15 +174,6 @@ static struct ctl_table ipc_kern_table[] = {
.mode = 0644,
.proc_handler = proc_ipc_dointvec,
},
- {
- .procname = "auto_msgmni",
- .data = &init_ipc_ns.auto_msgmni,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_ipcauto_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
#ifdef CONFIG_CHECKPOINT_RESTORE
{
.procname = "sem_next_id",
diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c
deleted file mode 100644
index b9b31a4f77e1..000000000000
--- a/ipc/ipcns_notifier.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * linux/ipc/ipcns_notifier.c
- * Copyright (C) 2007 BULL SA. Nadia Derbey
- *
- * Notification mechanism for ipc namespaces:
- * The callback routine registered in the memory chain invokes the ipcns
- * notifier chain with the IPCNS_MEMCHANGED event.
- * Each callback routine registered in the ipcns namespace recomputes msgmni
- * for the owning namespace.
- */
-
-#include <linux/msg.h>
-#include <linux/rcupdate.h>
-#include <linux/notifier.h>
-#include <linux/nsproxy.h>
-#include <linux/ipc_namespace.h>
-
-#include "util.h"
-
-
-
-static BLOCKING_NOTIFIER_HEAD(ipcns_chain);
-
-
-static int ipcns_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- struct ipc_namespace *ns;
-
- switch (action) {
- case IPCNS_MEMCHANGED: /* amount of lowmem has changed */
- case IPCNS_CREATED:
- case IPCNS_REMOVED:
- /*
- * It's time to recompute msgmni
- */
- ns = container_of(self, struct ipc_namespace, ipcns_nb);
- /*
- * No need to get a reference on the ns: the 1st job of
- * free_ipc_ns() is to unregister the callback routine.
- * blocking_notifier_chain_unregister takes the wr lock to do
- * it.
- * When this callback routine is called the rd lock is held by
- * blocking_notifier_call_chain.
- * So the ipc ns cannot be freed while we are here.
- */
- recompute_msgmni(ns);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-int register_ipcns_notifier(struct ipc_namespace *ns)
-{
- int rc;
-
- memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb));
- ns->ipcns_nb.notifier_call = ipcns_callback;
- ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI;
- rc = blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb);
- if (!rc)
- ns->auto_msgmni = 1;
- return rc;
-}
-
-int cond_register_ipcns_notifier(struct ipc_namespace *ns)
-{
- int rc;
-
- memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb));
- ns->ipcns_nb.notifier_call = ipcns_callback;
- ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI;
- rc = blocking_notifier_chain_cond_register(&ipcns_chain,
- &ns->ipcns_nb);
- if (!rc)
- ns->auto_msgmni = 1;
- return rc;
-}
-
-void unregister_ipcns_notifier(struct ipc_namespace *ns)
-{
- blocking_notifier_chain_unregister(&ipcns_chain, &ns->ipcns_nb);
- ns->auto_msgmni = 0;
-}
-
-int ipcns_notify(unsigned long val)
-{
- return blocking_notifier_call_chain(&ipcns_chain, val, NULL);
-}
diff --git a/ipc/msg.c b/ipc/msg.c
index c5d8e3749985..a7261d5cbc89 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -989,43 +989,12 @@ SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
}
-/*
- * Scale msgmni with the available lowmem size: the memory dedicated to msg
- * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
- * Also take into account the number of nsproxies created so far.
- * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
- */
-void recompute_msgmni(struct ipc_namespace *ns)
-{
- struct sysinfo i;
- unsigned long allowed;
- int nb_ns;
-
- si_meminfo(&i);
- allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
- / MSGMNB;
- nb_ns = atomic_read(&nr_ipc_ns);
- allowed /= nb_ns;
-
- if (allowed < MSGMNI) {
- ns->msg_ctlmni = MSGMNI;
- return;
- }
-
- if (allowed > IPCMNI / nb_ns) {
- ns->msg_ctlmni = IPCMNI / nb_ns;
- return;
- }
-
- ns->msg_ctlmni = allowed;
-}
void msg_init_ns(struct ipc_namespace *ns)
{
ns->msg_ctlmax = MSGMAX;
ns->msg_ctlmnb = MSGMNB;
-
- recompute_msgmni(ns);
+ ns->msg_ctlmni = MSGMNI;
atomic_set(&ns->msg_bytes, 0);
atomic_set(&ns->msg_hdrs, 0);
@@ -1069,9 +1038,6 @@ void __init msg_init(void)
{
msg_init_ns(&init_ipc_ns);
- printk(KERN_INFO "msgmni has been set to %d\n",
- init_ipc_ns.msg_ctlmni);
-
ipc_init_proc_interface("sysvipc/msg",
" key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
IPC_MSG_IDS, sysvipc_msg_proc_show);
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 7e7095974d54..2b491590ebab 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -31,7 +31,10 @@ DEFINE_SPINLOCK(mq_lock);
struct ipc_namespace init_ipc_ns = {
.count = ATOMIC_INIT(1),
.user_ns = &init_user_ns,
- .proc_inum = PROC_IPC_INIT_INO,
+ .ns.inum = PROC_IPC_INIT_INO,
+#ifdef CONFIG_IPC_NS
+ .ns.ops = &ipcns_operations,
+#endif
};
atomic_t nr_ipc_ns = ATOMIC_INIT(1);
diff --git a/ipc/namespace.c b/ipc/namespace.c
index b54468e48e32..068caf18d565 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -26,16 +26,17 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
if (ns == NULL)
return ERR_PTR(-ENOMEM);
- err = proc_alloc_inum(&ns->proc_inum);
+ err = ns_alloc_inum(&ns->ns);
if (err) {
kfree(ns);
return ERR_PTR(err);
}
+ ns->ns.ops = &ipcns_operations;
atomic_set(&ns->count, 1);
err = mq_init_ns(ns);
if (err) {
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
kfree(ns);
return ERR_PTR(err);
}
@@ -45,14 +46,6 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
msg_init_ns(ns);
shm_init_ns(ns);
- /*
- * msgmni has already been computed for the new ipc ns.
- * Thus, do the ipcns creation notification before registering that
- * new ipcns in the chain.
- */
- ipcns_notify(IPCNS_CREATED);
- register_ipcns_notifier(ns);
-
ns->user_ns = get_user_ns(user_ns);
return ns;
@@ -99,27 +92,13 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
static void free_ipc_ns(struct ipc_namespace *ns)
{
- /*
- * Unregistering the hotplug notifier at the beginning guarantees
- * that the ipc namespace won't be freed while we are inside the
- * callback routine. Since the blocking_notifier_chain_XXX routines
- * hold a rw lock on the notifier list, unregister_ipcns_notifier()
- * won't take the rw lock before blocking_notifier_call_chain() has
- * released the rd lock.
- */
- unregister_ipcns_notifier(ns);
sem_exit_ns(ns);
msg_exit_ns(ns);
shm_exit_ns(ns);
atomic_dec(&nr_ipc_ns);
- /*
- * Do the ipcns removal notification after decrementing nr_ipc_ns in
- * order to have a correct value when recomputing msgmni.
- */
- ipcns_notify(IPCNS_REMOVED);
put_user_ns(ns->user_ns);
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
kfree(ns);
}
@@ -149,7 +128,12 @@ void put_ipc_ns(struct ipc_namespace *ns)
}
}
-static void *ipcns_get(struct task_struct *task)
+static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct ipc_namespace, ns);
+}
+
+static struct ns_common *ipcns_get(struct task_struct *task)
{
struct ipc_namespace *ns = NULL;
struct nsproxy *nsproxy;
@@ -160,17 +144,17 @@ static void *ipcns_get(struct task_struct *task)
ns = get_ipc_ns(nsproxy->ipc_ns);
task_unlock(task);
- return ns;
+ return ns ? &ns->ns : NULL;
}
-static void ipcns_put(void *ns)
+static void ipcns_put(struct ns_common *ns)
{
- return put_ipc_ns(ns);
+ return put_ipc_ns(to_ipc_ns(ns));
}
-static int ipcns_install(struct nsproxy *nsproxy, void *new)
+static int ipcns_install(struct nsproxy *nsproxy, struct ns_common *new)
{
- struct ipc_namespace *ns = new;
+ struct ipc_namespace *ns = to_ipc_ns(new);
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
@@ -182,18 +166,10 @@ static int ipcns_install(struct nsproxy *nsproxy, void *new)
return 0;
}
-static unsigned int ipcns_inum(void *vp)
-{
- struct ipc_namespace *ns = vp;
-
- return ns->proc_inum;
-}
-
const struct proc_ns_operations ipcns_operations = {
.name = "ipc",
.type = CLONE_NEWIPC,
.get = ipcns_get,
.put = ipcns_put,
.install = ipcns_install,
- .inum = ipcns_inum,
};
diff --git a/ipc/sem.c b/ipc/sem.c
index 53c3310f41c6..6115146563f9 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -326,10 +326,17 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
/* Then check that the global lock is free */
if (!spin_is_locked(&sma->sem_perm.lock)) {
- /* spin_is_locked() is not a memory barrier */
- smp_mb();
+ /*
+ * The ipc object lock check must be visible on all
+ * cores before rechecking the complex count. Otherwise
+ * we can race with another thread that does:
+ * complex_count++;
+ * spin_unlock(sem_perm.lock);
+ */
+ smp_rmb();
- /* Now repeat the test of complex_count:
+ /*
+ * Now repeat the test of complex_count:
* It can't change anymore until we drop sem->lock.
* Thus: if is now 0, then it will stay 0.
*/
diff --git a/ipc/shm.c b/ipc/shm.c
index 01454796ba3c..19633b4a2350 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -219,7 +219,8 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
if (!is_file_hugepages(shm_file))
shmem_lock(shm_file, 0, shp->mlock_user);
else if (shp->mlock_user)
- user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
+ user_shm_unlock(i_size_read(file_inode(shm_file)),
+ shp->mlock_user);
fput(shm_file);
ipc_rcu_putref(shp, shm_rcu_free);
}
@@ -1229,6 +1230,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
int retval = -EINVAL;
#ifdef CONFIG_MMU
loff_t size = 0;
+ struct file *file;
struct vm_area_struct *next;
#endif
@@ -1245,7 +1247,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
* started at address shmaddr. It records it's size and then unmaps
* it.
* - Then it unmaps all shm vmas that started at shmaddr and that
- * are within the initially determined size.
+ * are within the initially determined size and that are from the
+ * same shm segment from which we determined the size.
* Errors from do_munmap are ignored: the function only fails if
* it's called with invalid parameters or if it's called to unmap
* a part of a vma. Both calls in this function are for full vmas,
@@ -1271,8 +1274,14 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
if ((vma->vm_ops == &shm_vm_ops) &&
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
-
- size = file_inode(vma->vm_file)->i_size;
+ /*
+ * Record the file of the shm segment being
+ * unmapped. With mremap(), someone could place
+ * page from another segment but with equal offsets
+ * in the range we are unmapping.
+ */
+ file = vma->vm_file;
+ size = i_size_read(file_inode(vma->vm_file));
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
/*
* We discovered the size of the shm segment, so
@@ -1298,8 +1307,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
/* finding a matching vma now does not alter retval */
if ((vma->vm_ops == &shm_vm_ops) &&
- (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
-
+ ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
+ (vma->vm_file == file))
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
vma = next;
}
diff --git a/ipc/util.c b/ipc/util.c
index 88adc329888c..106bed0378ab 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -71,44 +71,6 @@ struct ipc_proc_iface {
int (*show)(struct seq_file *, void *);
};
-static void ipc_memory_notifier(struct work_struct *work)
-{
- ipcns_notify(IPCNS_MEMCHANGED);
-}
-
-static int ipc_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
-
- switch (action) {
- case MEM_ONLINE: /* memory successfully brought online */
- case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */
- /*
- * This is done by invoking the ipcns notifier chain with the
- * IPC_MEMCHANGED event.
- * In order not to keep the lock on the hotplug memory chain
- * for too long, queue a work item that will, when waken up,
- * activate the ipcns notification chain.
- */
- schedule_work(&ipc_memory_wq);
- break;
- case MEM_GOING_ONLINE:
- case MEM_GOING_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_CANCEL_OFFLINE:
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block ipc_memory_nb = {
- .notifier_call = ipc_memory_callback,
- .priority = IPC_CALLBACK_PRI,
-};
-
/**
* ipc_init - initialise ipc subsystem
*
@@ -124,8 +86,6 @@ static int __init ipc_init(void)
sem_init();
msg_init();
shm_init();
- register_hotmemory_notifier(&ipc_memory_nb);
- register_ipcns_notifier(&init_ipc_ns);
return 0;
}
device_initcall(ipc_init);
diff --git a/kernel/audit.c b/kernel/audit.c
index 1f37f15117e5..f8f203e8018c 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -833,7 +833,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
s.backlog_limit = audit_backlog_limit;
s.lost = atomic_read(&audit_lost);
s.backlog = skb_queue_len(&audit_skb_queue);
- s.version = AUDIT_VERSION_LATEST;
+ s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
s.backlog_wait_time = audit_backlog_wait_time;
audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
break;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 80f29e015570..2e0c97427b33 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -174,9 +174,9 @@ static void insert_hash(struct audit_chunk *chunk)
struct fsnotify_mark *entry = &chunk->mark;
struct list_head *list;
- if (!entry->i.inode)
+ if (!entry->inode)
return;
- list = chunk_hash(entry->i.inode);
+ list = chunk_hash(entry->inode);
list_add_rcu(&chunk->hash, list);
}
@@ -188,7 +188,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
list_for_each_entry_rcu(p, list, hash) {
/* mark.inode may have gone NULL, but who cares? */
- if (p->mark.i.inode == inode) {
+ if (p->mark.inode == inode) {
atomic_long_inc(&p->refs);
return p;
}
@@ -231,7 +231,7 @@ static void untag_chunk(struct node *p)
new = alloc_chunk(size);
spin_lock(&entry->lock);
- if (chunk->dead || !entry->i.inode) {
+ if (chunk->dead || !entry->inode) {
spin_unlock(&entry->lock);
if (new)
free_chunk(new);
@@ -258,7 +258,7 @@ static void untag_chunk(struct node *p)
goto Fallback;
fsnotify_duplicate_mark(&new->mark, entry);
- if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
+ if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
fsnotify_put_mark(&new->mark);
goto Fallback;
}
@@ -386,7 +386,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk_entry = &chunk->mark;
spin_lock(&old_entry->lock);
- if (!old_entry->i.inode) {
+ if (!old_entry->inode) {
/* old_entry is being shot, lets just lie */
spin_unlock(&old_entry->lock);
fsnotify_put_mark(old_entry);
@@ -395,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
}
fsnotify_duplicate_mark(chunk_entry, old_entry);
- if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
+ if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
spin_unlock(&old_entry->lock);
fsnotify_put_mark(chunk_entry);
fsnotify_put_mark(old_entry);
@@ -611,7 +611,7 @@ void audit_trim_trees(void)
list_for_each_entry(node, &tree->chunks, list) {
struct audit_chunk *chunk = find_chunk(node);
/* this could be NULL if the watch is dying else where... */
- struct inode *inode = chunk->mark.i.inode;
+ struct inode *inode = chunk->mark.inode;
node->index |= 1U<<31;
if (iterate_mounts(compare_root, inode, root_mnt))
node->index &= ~(1U<<31);
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 0daf7f6ae7df..a5ae60f0b0a2 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,5 +1,5 @@
obj-y := core.o
-obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o
+obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o hashtab.o arraymap.o helpers.o
ifdef CONFIG_TEST_BPF
obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
new file mode 100644
index 000000000000..9eb4d8a7cd87
--- /dev/null
+++ b/kernel/bpf/arraymap.c
@@ -0,0 +1,156 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+struct bpf_array {
+ struct bpf_map map;
+ u32 elem_size;
+ char value[0] __aligned(8);
+};
+
+/* Called from syscall */
+static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_array *array;
+ u32 elem_size, array_size;
+
+ /* check sanity of attributes */
+ if (attr->max_entries == 0 || attr->key_size != 4 ||
+ attr->value_size == 0)
+ return ERR_PTR(-EINVAL);
+
+ elem_size = round_up(attr->value_size, 8);
+
+ /* check round_up into zero and u32 overflow */
+ if (elem_size == 0 ||
+ attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size)
+ return ERR_PTR(-ENOMEM);
+
+ array_size = sizeof(*array) + attr->max_entries * elem_size;
+
+ /* allocate all map elements and zero-initialize them */
+ array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
+ if (!array) {
+ array = vzalloc(array_size);
+ if (!array)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* copy mandatory map attributes */
+ array->map.key_size = attr->key_size;
+ array->map.value_size = attr->value_size;
+ array->map.max_entries = attr->max_entries;
+
+ array->elem_size = elem_size;
+
+ return &array->map;
+}
+
+/* Called from syscall or from eBPF program */
+static void *array_map_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ u32 index = *(u32 *)key;
+
+ if (index >= array->map.max_entries)
+ return NULL;
+
+ return array->value + array->elem_size * index;
+}
+
+/* Called from syscall */
+static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ u32 index = *(u32 *)key;
+ u32 *next = (u32 *)next_key;
+
+ if (index >= array->map.max_entries) {
+ *next = 0;
+ return 0;
+ }
+
+ if (index == array->map.max_entries - 1)
+ return -ENOENT;
+
+ *next = index + 1;
+ return 0;
+}
+
+/* Called from syscall or from eBPF program */
+static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
+ u64 map_flags)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ u32 index = *(u32 *)key;
+
+ if (map_flags > BPF_EXIST)
+ /* unknown flags */
+ return -EINVAL;
+
+ if (index >= array->map.max_entries)
+ /* all elements were pre-allocated, cannot insert a new one */
+ return -E2BIG;
+
+ if (map_flags == BPF_NOEXIST)
+ /* all elements already exist */
+ return -EEXIST;
+
+ memcpy(array->value + array->elem_size * index, value, array->elem_size);
+ return 0;
+}
+
+/* Called from syscall or from eBPF program */
+static int array_map_delete_elem(struct bpf_map *map, void *key)
+{
+ return -EINVAL;
+}
+
+/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
+static void array_map_free(struct bpf_map *map)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+
+ /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
+ * so the programs (can be more than one that used this map) were
+ * disconnected from events. Wait for outstanding programs to complete
+ * and free the array
+ */
+ synchronize_rcu();
+
+ kvfree(array);
+}
+
+static struct bpf_map_ops array_ops = {
+ .map_alloc = array_map_alloc,
+ .map_free = array_map_free,
+ .map_get_next_key = array_map_get_next_key,
+ .map_lookup_elem = array_map_lookup_elem,
+ .map_update_elem = array_map_update_elem,
+ .map_delete_elem = array_map_delete_elem,
+};
+
+static struct bpf_map_type_list tl = {
+ .ops = &array_ops,
+ .type = BPF_MAP_TYPE_ARRAY,
+};
+
+static int __init register_array_map(void)
+{
+ bpf_register_map_type(&tl);
+ return 0;
+}
+late_initcall(register_array_map);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
new file mode 100644
index 000000000000..b3ba43674310
--- /dev/null
+++ b/kernel/bpf/hashtab.c
@@ -0,0 +1,367 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bpf.h>
+#include <linux/jhash.h>
+#include <linux/filter.h>
+#include <linux/vmalloc.h>
+
+struct bpf_htab {
+ struct bpf_map map;
+ struct hlist_head *buckets;
+ spinlock_t lock;
+ u32 count; /* number of elements in this hashtable */
+ u32 n_buckets; /* number of hash buckets */
+ u32 elem_size; /* size of each element in bytes */
+};
+
+/* each htab element is struct htab_elem + key + value */
+struct htab_elem {
+ struct hlist_node hash_node;
+ struct rcu_head rcu;
+ u32 hash;
+ char key[0] __aligned(8);
+};
+
+/* Called from syscall */
+static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_htab *htab;
+ int err, i;
+
+ htab = kzalloc(sizeof(*htab), GFP_USER);
+ if (!htab)
+ return ERR_PTR(-ENOMEM);
+
+ /* mandatory map attributes */
+ htab->map.key_size = attr->key_size;
+ htab->map.value_size = attr->value_size;
+ htab->map.max_entries = attr->max_entries;
+
+ /* check sanity of attributes.
+ * value_size == 0 may be allowed in the future to use map as a set
+ */
+ err = -EINVAL;
+ if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
+ htab->map.value_size == 0)
+ goto free_htab;
+
+ /* hash table size must be power of 2 */
+ htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
+
+ err = -E2BIG;
+ if (htab->map.key_size > MAX_BPF_STACK)
+ /* eBPF programs initialize keys on stack, so they cannot be
+ * larger than max stack size
+ */
+ goto free_htab;
+
+ err = -ENOMEM;
+ /* prevent zero size kmalloc and check for u32 overflow */
+ if (htab->n_buckets == 0 ||
+ htab->n_buckets > U32_MAX / sizeof(struct hlist_head))
+ goto free_htab;
+
+ htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head),
+ GFP_USER | __GFP_NOWARN);
+
+ if (!htab->buckets) {
+ htab->buckets = vmalloc(htab->n_buckets * sizeof(struct hlist_head));
+ if (!htab->buckets)
+ goto free_htab;
+ }
+
+ for (i = 0; i < htab->n_buckets; i++)
+ INIT_HLIST_HEAD(&htab->buckets[i]);
+
+ spin_lock_init(&htab->lock);
+ htab->count = 0;
+
+ htab->elem_size = sizeof(struct htab_elem) +
+ round_up(htab->map.key_size, 8) +
+ htab->map.value_size;
+ return &htab->map;
+
+free_htab:
+ kfree(htab);
+ return ERR_PTR(err);
+}
+
+static inline u32 htab_map_hash(const void *key, u32 key_len)
+{
+ return jhash(key, key_len, 0);
+}
+
+static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+{
+ return &htab->buckets[hash & (htab->n_buckets - 1)];
+}
+
+static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
+ void *key, u32 key_size)
+{
+ struct htab_elem *l;
+
+ hlist_for_each_entry_rcu(l, head, hash_node)
+ if (l->hash == hash && !memcmp(&l->key, key, key_size))
+ return l;
+
+ return NULL;
+}
+
+/* Called from syscall or from eBPF program */
+static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct hlist_head *head;
+ struct htab_elem *l;
+ u32 hash, key_size;
+
+ /* Must be called with rcu_read_lock. */
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ key_size = map->key_size;
+
+ hash = htab_map_hash(key, key_size);
+
+ head = select_bucket(htab, hash);
+
+ l = lookup_elem_raw(head, hash, key, key_size);
+
+ if (l)
+ return l->key + round_up(map->key_size, 8);
+
+ return NULL;
+}
+
+/* Called from syscall */
+static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct hlist_head *head;
+ struct htab_elem *l, *next_l;
+ u32 hash, key_size;
+ int i;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ key_size = map->key_size;
+
+ hash = htab_map_hash(key, key_size);
+
+ head = select_bucket(htab, hash);
+
+ /* lookup the key */
+ l = lookup_elem_raw(head, hash, key, key_size);
+
+ if (!l) {
+ i = 0;
+ goto find_first_elem;
+ }
+
+ /* key was found, get next key in the same bucket */
+ next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
+ struct htab_elem, hash_node);
+
+ if (next_l) {
+ /* if next elem in this hash list is non-zero, just return it */
+ memcpy(next_key, next_l->key, key_size);
+ return 0;
+ }
+
+ /* no more elements in this hash list, go to the next bucket */
+ i = hash & (htab->n_buckets - 1);
+ i++;
+
+find_first_elem:
+ /* iterate over buckets */
+ for (; i < htab->n_buckets; i++) {
+ head = select_bucket(htab, i);
+
+ /* pick first element in the bucket */
+ next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ struct htab_elem, hash_node);
+ if (next_l) {
+ /* if it's not empty, just return it */
+ memcpy(next_key, next_l->key, key_size);
+ return 0;
+ }
+ }
+
+ /* itereated over all buckets and all elements */
+ return -ENOENT;
+}
+
+/* Called from syscall or from eBPF program */
+static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
+ u64 map_flags)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct htab_elem *l_new, *l_old;
+ struct hlist_head *head;
+ unsigned long flags;
+ u32 key_size;
+ int ret;
+
+ if (map_flags > BPF_EXIST)
+ /* unknown flags */
+ return -EINVAL;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ /* allocate new element outside of lock */
+ l_new = kmalloc(htab->elem_size, GFP_ATOMIC);
+ if (!l_new)
+ return -ENOMEM;
+
+ key_size = map->key_size;
+
+ memcpy(l_new->key, key, key_size);
+ memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
+
+ l_new->hash = htab_map_hash(l_new->key, key_size);
+
+ /* bpf_map_update_elem() can be called in_irq() */
+ spin_lock_irqsave(&htab->lock, flags);
+
+ head = select_bucket(htab, l_new->hash);
+
+ l_old = lookup_elem_raw(head, l_new->hash, key, key_size);
+
+ if (!l_old && unlikely(htab->count >= map->max_entries)) {
+ /* if elem with this 'key' doesn't exist and we've reached
+ * max_entries limit, fail insertion of new elem
+ */
+ ret = -E2BIG;
+ goto err;
+ }
+
+ if (l_old && map_flags == BPF_NOEXIST) {
+ /* elem already exists */
+ ret = -EEXIST;
+ goto err;
+ }
+
+ if (!l_old && map_flags == BPF_EXIST) {
+ /* elem doesn't exist, cannot update it */
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* add new element to the head of the list, so that concurrent
+ * search will find it before old elem
+ */
+ hlist_add_head_rcu(&l_new->hash_node, head);
+ if (l_old) {
+ hlist_del_rcu(&l_old->hash_node);
+ kfree_rcu(l_old, rcu);
+ } else {
+ htab->count++;
+ }
+ spin_unlock_irqrestore(&htab->lock, flags);
+
+ return 0;
+err:
+ spin_unlock_irqrestore(&htab->lock, flags);
+ kfree(l_new);
+ return ret;
+}
+
+/* Called from syscall or from eBPF program */
+static int htab_map_delete_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct hlist_head *head;
+ struct htab_elem *l;
+ unsigned long flags;
+ u32 hash, key_size;
+ int ret = -ENOENT;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ key_size = map->key_size;
+
+ hash = htab_map_hash(key, key_size);
+
+ spin_lock_irqsave(&htab->lock, flags);
+
+ head = select_bucket(htab, hash);
+
+ l = lookup_elem_raw(head, hash, key, key_size);
+
+ if (l) {
+ hlist_del_rcu(&l->hash_node);
+ htab->count--;
+ kfree_rcu(l, rcu);
+ ret = 0;
+ }
+
+ spin_unlock_irqrestore(&htab->lock, flags);
+ return ret;
+}
+
+static void delete_all_elements(struct bpf_htab *htab)
+{
+ int i;
+
+ for (i = 0; i < htab->n_buckets; i++) {
+ struct hlist_head *head = select_bucket(htab, i);
+ struct hlist_node *n;
+ struct htab_elem *l;
+
+ hlist_for_each_entry_safe(l, n, head, hash_node) {
+ hlist_del_rcu(&l->hash_node);
+ htab->count--;
+ kfree(l);
+ }
+ }
+}
+
+/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
+static void htab_map_free(struct bpf_map *map)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+
+ /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
+ * so the programs (can be more than one that used this map) were
+ * disconnected from events. Wait for outstanding critical sections in
+ * these programs to complete
+ */
+ synchronize_rcu();
+
+ /* some of kfree_rcu() callbacks for elements of this map may not have
+ * executed. It's ok. Proceed to free residual elements and map itself
+ */
+ delete_all_elements(htab);
+ kvfree(htab->buckets);
+ kfree(htab);
+}
+
+static struct bpf_map_ops htab_ops = {
+ .map_alloc = htab_map_alloc,
+ .map_free = htab_map_free,
+ .map_get_next_key = htab_map_get_next_key,
+ .map_lookup_elem = htab_map_lookup_elem,
+ .map_update_elem = htab_map_update_elem,
+ .map_delete_elem = htab_map_delete_elem,
+};
+
+static struct bpf_map_type_list tl = {
+ .ops = &htab_ops,
+ .type = BPF_MAP_TYPE_HASH,
+};
+
+static int __init register_htab_map(void)
+{
+ bpf_register_map_type(&tl);
+ return 0;
+}
+late_initcall(register_htab_map);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
new file mode 100644
index 000000000000..9e3414d85459
--- /dev/null
+++ b/kernel/bpf/helpers.c
@@ -0,0 +1,89 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bpf.h>
+#include <linux/rcupdate.h>
+
+/* If kernel subsystem is allowing eBPF programs to call this function,
+ * inside its own verifier_ops->get_func_proto() callback it should return
+ * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
+ *
+ * Different map implementations will rely on rcu in map methods
+ * lookup/update/delete, therefore eBPF programs must run under rcu lock
+ * if program is allowed to access maps, so check rcu_read_lock_held in
+ * all three functions.
+ */
+static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ /* verifier checked that R1 contains a valid pointer to bpf_map
+ * and R2 points to a program stack and map->key_size bytes were
+ * initialized
+ */
+ struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+ void *key = (void *) (unsigned long) r2;
+ void *value;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ value = map->ops->map_lookup_elem(map, key);
+
+ /* lookup() returns either pointer to element value or NULL
+ * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type
+ */
+ return (unsigned long) value;
+}
+
+struct bpf_func_proto bpf_map_lookup_elem_proto = {
+ .func = bpf_map_lookup_elem,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_MAP_KEY,
+};
+
+static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+ void *key = (void *) (unsigned long) r2;
+ void *value = (void *) (unsigned long) r3;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ return map->ops->map_update_elem(map, key, value, r4);
+}
+
+struct bpf_func_proto bpf_map_update_elem_proto = {
+ .func = bpf_map_update_elem,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_MAP_KEY,
+ .arg3_type = ARG_PTR_TO_MAP_VALUE,
+ .arg4_type = ARG_ANYTHING,
+};
+
+static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+ void *key = (void *) (unsigned long) r2;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ return map->ops->map_delete_elem(map, key);
+}
+
+struct bpf_func_proto bpf_map_delete_elem_proto = {
+ .func = bpf_map_delete_elem,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_MAP_KEY,
+};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ba61c8c16032..088ac0b1b106 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -169,7 +169,7 @@ static int map_lookup_elem(union bpf_attr *attr)
if (copy_from_user(key, ukey, map->key_size) != 0)
goto free_key;
- err = -ESRCH;
+ err = -ENOENT;
rcu_read_lock();
value = map->ops->map_lookup_elem(map, key);
if (!value)
@@ -190,7 +190,7 @@ err_put:
return err;
}
-#define BPF_MAP_UPDATE_ELEM_LAST_FIELD value
+#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
static int map_update_elem(union bpf_attr *attr)
{
@@ -231,7 +231,7 @@ static int map_update_elem(union bpf_attr *attr)
* therefore all map accessors rely on this fact, so do the same here
*/
rcu_read_lock();
- err = map->ops->map_update_elem(map, key, value);
+ err = map->ops->map_update_elem(map, key, value, attr->flags);
rcu_read_unlock();
free_value:
diff --git a/kernel/bpf/test_stub.c b/kernel/bpf/test_stub.c
index fcaddff4003e..0ceae1e6e8b5 100644
--- a/kernel/bpf/test_stub.c
+++ b/kernel/bpf/test_stub.c
@@ -18,26 +18,18 @@ struct bpf_context {
u64 arg2;
};
-static u64 test_func(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
- return 0;
-}
-
-static struct bpf_func_proto test_funcs[] = {
- [BPF_FUNC_unspec] = {
- .func = test_func,
- .gpl_only = true,
- .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
- .arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_PTR_TO_MAP_KEY,
- },
-};
-
static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id)
{
- if (func_id < 0 || func_id >= ARRAY_SIZE(test_funcs))
+ switch (func_id) {
+ case BPF_FUNC_map_lookup_elem:
+ return &bpf_map_lookup_elem_proto;
+ case BPF_FUNC_map_update_elem:
+ return &bpf_map_update_elem_proto;
+ case BPF_FUNC_map_delete_elem:
+ return &bpf_map_delete_elem_proto;
+ default:
return NULL;
- return &test_funcs[func_id];
+ }
}
static const struct bpf_context_access {
@@ -78,38 +70,8 @@ static struct bpf_prog_type_list tl_prog = {
.type = BPF_PROG_TYPE_UNSPEC,
};
-static struct bpf_map *test_map_alloc(union bpf_attr *attr)
-{
- struct bpf_map *map;
-
- map = kzalloc(sizeof(*map), GFP_USER);
- if (!map)
- return ERR_PTR(-ENOMEM);
-
- map->key_size = attr->key_size;
- map->value_size = attr->value_size;
- map->max_entries = attr->max_entries;
- return map;
-}
-
-static void test_map_free(struct bpf_map *map)
-{
- kfree(map);
-}
-
-static struct bpf_map_ops test_map_ops = {
- .map_alloc = test_map_alloc,
- .map_free = test_map_free,
-};
-
-static struct bpf_map_type_list tl_map = {
- .ops = &test_map_ops,
- .type = BPF_MAP_TYPE_UNSPEC,
-};
-
static int __init register_test_ops(void)
{
- bpf_register_map_type(&tl_map);
bpf_register_prog_type(&tl_prog);
return 0;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9f81818f2941..a28e09c7825d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -153,22 +153,19 @@ struct reg_state {
enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */
- STACK_SPILL, /* 1st byte of register spilled into stack */
- STACK_SPILL_PART, /* other 7 bytes of register spill */
+ STACK_SPILL, /* register spilled into stack */
STACK_MISC /* BPF program wrote some data into this slot */
};
-struct bpf_stack_slot {
- enum bpf_stack_slot_type stype;
- struct reg_state reg_st;
-};
+#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
/* state of the program:
* type of all registers and stack info
*/
struct verifier_state {
struct reg_state regs[MAX_BPF_REG];
- struct bpf_stack_slot stack[MAX_BPF_STACK];
+ u8 stack_slot_type[MAX_BPF_STACK];
+ struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
};
/* linked list of verifier states used to prune search */
@@ -259,10 +256,10 @@ static void print_verifier_state(struct verifier_env *env)
env->cur_state.regs[i].map_ptr->key_size,
env->cur_state.regs[i].map_ptr->value_size);
}
- for (i = 0; i < MAX_BPF_STACK; i++) {
- if (env->cur_state.stack[i].stype == STACK_SPILL)
+ for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+ if (env->cur_state.stack_slot_type[i] == STACK_SPILL)
verbose(" fp%d=%s", -MAX_BPF_STACK + i,
- reg_type_str[env->cur_state.stack[i].reg_st.type]);
+ reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]);
}
verbose("\n");
}
@@ -539,8 +536,10 @@ static int bpf_size_to_bytes(int bpf_size)
static int check_stack_write(struct verifier_state *state, int off, int size,
int value_regno)
{
- struct bpf_stack_slot *slot;
int i;
+ /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
+ * so it's aligned access and [off, off + size) are within stack limits
+ */
if (value_regno >= 0 &&
(state->regs[value_regno].type == PTR_TO_MAP_VALUE ||
@@ -548,30 +547,24 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
state->regs[value_regno].type == PTR_TO_CTX)) {
/* register containing pointer is being spilled into stack */
- if (size != 8) {
+ if (size != BPF_REG_SIZE) {
verbose("invalid size of register spill\n");
return -EACCES;
}
- slot = &state->stack[MAX_BPF_STACK + off];
- slot->stype = STACK_SPILL;
/* save register state */
- slot->reg_st = state->regs[value_regno];
- for (i = 1; i < 8; i++) {
- slot = &state->stack[MAX_BPF_STACK + off + i];
- slot->stype = STACK_SPILL_PART;
- slot->reg_st.type = UNKNOWN_VALUE;
- slot->reg_st.map_ptr = NULL;
- }
- } else {
+ state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
+ state->regs[value_regno];
+ for (i = 0; i < BPF_REG_SIZE; i++)
+ state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
+ } else {
/* regular write of data into stack */
- for (i = 0; i < size; i++) {
- slot = &state->stack[MAX_BPF_STACK + off + i];
- slot->stype = STACK_MISC;
- slot->reg_st.type = UNKNOWN_VALUE;
- slot->reg_st.map_ptr = NULL;
- }
+ state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
+ (struct reg_state) {};
+
+ for (i = 0; i < size; i++)
+ state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
}
return 0;
}
@@ -579,19 +572,18 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
static int check_stack_read(struct verifier_state *state, int off, int size,
int value_regno)
{
+ u8 *slot_type;
int i;
- struct bpf_stack_slot *slot;
- slot = &state->stack[MAX_BPF_STACK + off];
+ slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
- if (slot->stype == STACK_SPILL) {
- if (size != 8) {
+ if (slot_type[0] == STACK_SPILL) {
+ if (size != BPF_REG_SIZE) {
verbose("invalid size of register spill\n");
return -EACCES;
}
- for (i = 1; i < 8; i++) {
- if (state->stack[MAX_BPF_STACK + off + i].stype !=
- STACK_SPILL_PART) {
+ for (i = 1; i < BPF_REG_SIZE; i++) {
+ if (slot_type[i] != STACK_SPILL) {
verbose("corrupted spill memory\n");
return -EACCES;
}
@@ -599,12 +591,12 @@ static int check_stack_read(struct verifier_state *state, int off, int size,
if (value_regno >= 0)
/* restore register state from stack */
- state->regs[value_regno] = slot->reg_st;
+ state->regs[value_regno] =
+ state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE];
return 0;
} else {
for (i = 0; i < size; i++) {
- if (state->stack[MAX_BPF_STACK + off + i].stype !=
- STACK_MISC) {
+ if (slot_type[i] != STACK_MISC) {
verbose("invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
@@ -747,7 +739,7 @@ static int check_stack_boundary(struct verifier_env *env,
}
for (i = 0; i < access_size; i++) {
- if (state->stack[MAX_BPF_STACK + off + i].stype != STACK_MISC) {
+ if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
verbose("invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
@@ -1180,6 +1172,70 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
return 0;
}
+/* verify safety of LD_ABS|LD_IND instructions:
+ * - they can only appear in the programs where ctx == skb
+ * - since they are wrappers of function calls, they scratch R1-R5 registers,
+ * preserve R6-R9, and store return value into R0
+ *
+ * Implicit input:
+ * ctx == skb == R6 == CTX
+ *
+ * Explicit input:
+ * SRC == any register
+ * IMM == 32-bit immediate
+ *
+ * Output:
+ * R0 - 8/16/32-bit skb data converted to cpu endianness
+ */
+static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
+{
+ struct reg_state *regs = env->cur_state.regs;
+ u8 mode = BPF_MODE(insn->code);
+ struct reg_state *reg;
+ int i, err;
+
+ if (env->prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) {
+ verbose("BPF_LD_ABS|IND instructions are only allowed in socket filters\n");
+ return -EINVAL;
+ }
+
+ if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
+ (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
+ verbose("BPF_LD_ABS uses reserved fields\n");
+ return -EINVAL;
+ }
+
+ /* check whether implicit source operand (register R6) is readable */
+ err = check_reg_arg(regs, BPF_REG_6, SRC_OP);
+ if (err)
+ return err;
+
+ if (regs[BPF_REG_6].type != PTR_TO_CTX) {
+ verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
+ return -EINVAL;
+ }
+
+ if (mode == BPF_IND) {
+ /* check explicit source operand */
+ err = check_reg_arg(regs, insn->src_reg, SRC_OP);
+ if (err)
+ return err;
+ }
+
+ /* reset caller saved regs to unreadable */
+ for (i = 0; i < CALLER_SAVED_REGS; i++) {
+ reg = regs + caller_saved[i];
+ reg->type = NOT_INIT;
+ reg->imm = 0;
+ }
+
+ /* mark destination R0 register as readable, since it contains
+ * the value fetched from the packet
+ */
+ regs[BPF_REG_0].type = UNKNOWN_VALUE;
+ return 0;
+}
+
/* non-recursive DFS pseudo code
* 1 procedure DFS-iterative(G,v):
* 2 label v as discovered
@@ -1417,12 +1473,33 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
}
for (i = 0; i < MAX_BPF_STACK; i++) {
- if (memcmp(&old->stack[i], &cur->stack[i],
- sizeof(old->stack[0])) != 0) {
- if (old->stack[i].stype == STACK_INVALID)
- continue;
+ if (old->stack_slot_type[i] == STACK_INVALID)
+ continue;
+ if (old->stack_slot_type[i] != cur->stack_slot_type[i])
+ /* Ex: old explored (safe) state has STACK_SPILL in
+ * this stack slot, but current has has STACK_MISC ->
+ * this verifier states are not equivalent,
+ * return false to continue verification of this path
+ */
return false;
- }
+ if (i % BPF_REG_SIZE)
+ continue;
+ if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
+ &cur->spilled_regs[i / BPF_REG_SIZE],
+ sizeof(old->spilled_regs[0])))
+ /* when explored and current stack slot types are
+ * the same, check that stored pointers types
+ * are the same as well.
+ * Ex: explored safe path could have stored
+ * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
+ * but current path has stored:
+ * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
+ * such verifier states are not equivalent.
+ * return false to continue verification of this path
+ */
+ return false;
+ else
+ continue;
}
return true;
}
@@ -1664,8 +1741,10 @@ process_bpf_exit:
u8 mode = BPF_MODE(insn->code);
if (mode == BPF_ABS || mode == BPF_IND) {
- verbose("LD_ABS is not supported yet\n");
- return -EINVAL;
+ err = check_ld_abs(env, insn);
+ if (err)
+ return err;
+
} else if (mode == BPF_IMM) {
err = check_ld_imm(env, insn);
if (err)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 136eceadeed1..bb263d0caab3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -277,6 +277,10 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
if (!(cgrp->root->subsys_mask & (1 << ss->id)))
return NULL;
+ /*
+ * This function is used while updating css associations and thus
+ * can't test the csses directly. Use ->child_subsys_mask.
+ */
while (cgroup_parent(cgrp) &&
!(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
cgrp = cgroup_parent(cgrp);
@@ -284,6 +288,39 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
return cgroup_css(cgrp, ss);
}
+/**
+ * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest
+ *
+ * Find and get the effective css of @cgrp for @ss. The effective css is
+ * defined as the matching css of the nearest ancestor including self which
+ * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
+ * the root css is returned, so this function always returns a valid css.
+ * The returned css must be put using css_put().
+ */
+struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ struct cgroup_subsys_state *css;
+
+ rcu_read_lock();
+
+ do {
+ css = cgroup_css(cgrp, ss);
+
+ if (css && css_tryget_online(css))
+ goto out_unlock;
+ cgrp = cgroup_parent(cgrp);
+ } while (cgrp);
+
+ css = init_css_set.subsys[ss->id];
+ css_get(css);
+out_unlock:
+ rcu_read_unlock();
+ return css;
+}
+
/* convenient tests for these bits */
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
{
@@ -1019,31 +1056,30 @@ static void cgroup_put(struct cgroup *cgrp)
}
/**
- * cgroup_refresh_child_subsys_mask - update child_subsys_mask
+ * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
* @cgrp: the target cgroup
+ * @subtree_control: the new subtree_control mask to consider
*
* On the default hierarchy, a subsystem may request other subsystems to be
* enabled together through its ->depends_on mask. In such cases, more
* subsystems than specified in "cgroup.subtree_control" may be enabled.
*
- * This function determines which subsystems need to be enabled given the
- * current @cgrp->subtree_control and records it in
- * @cgrp->child_subsys_mask. The resulting mask is always a superset of
- * @cgrp->subtree_control and follows the usual hierarchy rules.
+ * This function calculates which subsystems need to be enabled if
+ * @subtree_control is to be applied to @cgrp. The returned mask is always
+ * a superset of @subtree_control and follows the usual hierarchy rules.
*/
-static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
+static unsigned int cgroup_calc_child_subsys_mask(struct cgroup *cgrp,
+ unsigned int subtree_control)
{
struct cgroup *parent = cgroup_parent(cgrp);
- unsigned int cur_ss_mask = cgrp->subtree_control;
+ unsigned int cur_ss_mask = subtree_control;
struct cgroup_subsys *ss;
int ssid;
lockdep_assert_held(&cgroup_mutex);
- if (!cgroup_on_dfl(cgrp)) {
- cgrp->child_subsys_mask = cur_ss_mask;
- return;
- }
+ if (!cgroup_on_dfl(cgrp))
+ return cur_ss_mask;
while (true) {
unsigned int new_ss_mask = cur_ss_mask;
@@ -1067,7 +1103,20 @@ static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
cur_ss_mask = new_ss_mask;
}
- cgrp->child_subsys_mask = cur_ss_mask;
+ return cur_ss_mask;
+}
+
+/**
+ * cgroup_refresh_child_subsys_mask - update child_subsys_mask
+ * @cgrp: the target cgroup
+ *
+ * Update @cgrp->child_subsys_mask according to the current
+ * @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
+ */
+static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
+{
+ cgrp->child_subsys_mask =
+ cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control);
}
/**
@@ -2641,7 +2690,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
loff_t off)
{
unsigned int enable = 0, disable = 0;
- unsigned int css_enable, css_disable, old_ctrl, new_ctrl;
+ unsigned int css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
struct cgroup *cgrp, *child;
struct cgroup_subsys *ss;
char *tok;
@@ -2693,36 +2742,6 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
ret = -ENOENT;
goto out_unlock;
}
-
- /*
- * @ss is already enabled through dependency and
- * we'll just make it visible. Skip draining.
- */
- if (cgrp->child_subsys_mask & (1 << ssid))
- continue;
-
- /*
- * Because css offlining is asynchronous, userland
- * might try to re-enable the same controller while
- * the previous instance is still around. In such
- * cases, wait till it's gone using offline_waitq.
- */
- cgroup_for_each_live_child(child, cgrp) {
- DEFINE_WAIT(wait);
-
- if (!cgroup_css(child, ss))
- continue;
-
- cgroup_get(child);
- prepare_to_wait(&child->offline_waitq, &wait,
- TASK_UNINTERRUPTIBLE);
- cgroup_kn_unlock(of->kn);
- schedule();
- finish_wait(&child->offline_waitq, &wait);
- cgroup_put(child);
-
- return restart_syscall();
- }
} else if (disable & (1 << ssid)) {
if (!(cgrp->subtree_control & (1 << ssid))) {
disable &= ~(1 << ssid);
@@ -2758,19 +2777,48 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
* subsystems than specified may need to be enabled or disabled
* depending on subsystem dependencies.
*/
- cgrp->subtree_control |= enable;
- cgrp->subtree_control &= ~disable;
+ old_sc = cgrp->subtree_control;
+ old_ss = cgrp->child_subsys_mask;
+ new_sc = (old_sc | enable) & ~disable;
+ new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
- old_ctrl = cgrp->child_subsys_mask;
- cgroup_refresh_child_subsys_mask(cgrp);
- new_ctrl = cgrp->child_subsys_mask;
-
- css_enable = ~old_ctrl & new_ctrl;
- css_disable = old_ctrl & ~new_ctrl;
+ css_enable = ~old_ss & new_ss;
+ css_disable = old_ss & ~new_ss;
enable |= css_enable;
disable |= css_disable;
/*
+ * Because css offlining is asynchronous, userland might try to
+ * re-enable the same controller while the previous instance is
+ * still around. In such cases, wait till it's gone using
+ * offline_waitq.
+ */
+ for_each_subsys(ss, ssid) {
+ if (!(css_enable & (1 << ssid)))
+ continue;
+
+ cgroup_for_each_live_child(child, cgrp) {
+ DEFINE_WAIT(wait);
+
+ if (!cgroup_css(child, ss))
+ continue;
+
+ cgroup_get(child);
+ prepare_to_wait(&child->offline_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ cgroup_kn_unlock(of->kn);
+ schedule();
+ finish_wait(&child->offline_waitq, &wait);
+ cgroup_put(child);
+
+ return restart_syscall();
+ }
+ }
+
+ cgrp->subtree_control = new_sc;
+ cgrp->child_subsys_mask = new_ss;
+
+ /*
* Create new csses or make the existing ones visible. A css is
* created invisible if it's being implicitly enabled through
* dependency. An invisible css is made visible when the userland
@@ -2825,6 +2873,24 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
}
}
+ /*
+ * The effective csses of all the descendants (excluding @cgrp) may
+ * have changed. Subsystems can optionally subscribe to this event
+ * by implementing ->css_e_css_changed() which is invoked if any of
+ * the effective csses seen from the css's cgroup may have changed.
+ */
+ for_each_subsys(ss, ssid) {
+ struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss);
+ struct cgroup_subsys_state *css;
+
+ if (!ss->css_e_css_changed || !this_css)
+ continue;
+
+ css_for_each_descendant_pre(css, this_css)
+ if (css != this_css)
+ ss->css_e_css_changed(css);
+ }
+
kernfs_activate(cgrp->kn);
ret = 0;
out_unlock:
@@ -2832,9 +2898,8 @@ out_unlock:
return ret ?: nbytes;
err_undo_css:
- cgrp->subtree_control &= ~enable;
- cgrp->subtree_control |= disable;
- cgroup_refresh_child_subsys_mask(cgrp);
+ cgrp->subtree_control = old_sc;
+ cgrp->child_subsys_mask = old_ss;
for_each_subsys(ss, ssid) {
if (!(enable & (1 << ssid)))
@@ -4370,6 +4435,8 @@ static void css_release_work_fn(struct work_struct *work)
if (ss) {
/* css release path */
cgroup_idr_remove(&ss->css_idr, css->id);
+ if (ss->css_released)
+ ss->css_released(css);
} else {
/* cgroup release path */
cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 723cfc9d0ad7..64b257f6bca2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -248,34 +248,34 @@ static struct cpuset top_cpuset = {
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
- * There are two global mutexes guarding cpuset structures - cpuset_mutex
- * and callback_mutex. The latter may nest inside the former. We also
- * require taking task_lock() when dereferencing a task's cpuset pointer.
- * See "The task_lock() exception", at the end of this comment.
+ * There are two global locks guarding cpuset structures - cpuset_mutex and
+ * callback_lock. We also require taking task_lock() when dereferencing a
+ * task's cpuset pointer. See "The task_lock() exception", at the end of this
+ * comment.
*
- * A task must hold both mutexes to modify cpusets. If a task holds
+ * A task must hold both locks to modify cpusets. If a task holds
* cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
- * is the only task able to also acquire callback_mutex and be able to
+ * is the only task able to also acquire callback_lock and be able to
* modify cpusets. It can perform various checks on the cpuset structure
* first, knowing nothing will change. It can also allocate memory while
* just holding cpuset_mutex. While it is performing these checks, various
- * callback routines can briefly acquire callback_mutex to query cpusets.
- * Once it is ready to make the changes, it takes callback_mutex, blocking
+ * callback routines can briefly acquire callback_lock to query cpusets.
+ * Once it is ready to make the changes, it takes callback_lock, blocking
* everyone else.
*
* Calls to the kernel memory allocator can not be made while holding
- * callback_mutex, as that would risk double tripping on callback_mutex
+ * callback_lock, as that would risk double tripping on callback_lock
* from one of the callbacks into the cpuset code from within
* __alloc_pages().
*
- * If a task is only holding callback_mutex, then it has read-only
+ * If a task is only holding callback_lock, then it has read-only
* access to cpusets.
*
* Now, the task_struct fields mems_allowed and mempolicy may be changed
* by other task, we use alloc_lock in the task_struct fields to protect
* them.
*
- * The cpuset_common_file_read() handlers only hold callback_mutex across
+ * The cpuset_common_file_read() handlers only hold callback_lock across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
*
@@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
*/
static DEFINE_MUTEX(cpuset_mutex);
-static DEFINE_MUTEX(callback_mutex);
+static DEFINE_SPINLOCK(callback_lock);
/*
* CPU / memory hotplug is handled asynchronously.
@@ -329,7 +329,7 @@ static struct file_system_type cpuset_fs_type = {
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
*
- * Call with callback_mutex held.
+ * Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
{
@@ -347,7 +347,7 @@ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_MEMORY].
*
- * Call with callback_mutex held.
+ * Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
@@ -359,7 +359,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
- * Called with callback_mutex/cpuset_mutex held
+ * Call with callback_lock or cpuset_mutex held.
*/
static void cpuset_update_task_spread_flag(struct cpuset *cs,
struct task_struct *tsk)
@@ -886,9 +886,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
continue;
rcu_read_unlock();
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
cpumask_copy(cp->effective_cpus, new_cpus);
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
@@ -953,9 +953,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
return retval;
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
/* use trialcs->cpus_allowed as a temp variable */
update_cpumasks_hier(cs, trialcs->cpus_allowed);
@@ -1142,9 +1142,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
continue;
rcu_read_unlock();
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
cp->effective_mems = *new_mems;
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
@@ -1165,7 +1165,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
- * Call with cpuset_mutex held. May take callback_mutex during call.
+ * Call with cpuset_mutex held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_sem, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
@@ -1212,9 +1212,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
goto done;
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
cs->mems_allowed = trialcs->mems_allowed;
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &cs->mems_allowed);
@@ -1305,9 +1305,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
cs->flags = trialcs->flags;
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
@@ -1714,7 +1714,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
count = seq_get_buf(sf, &buf);
s = buf;
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
switch (type) {
case FILE_CPULIST:
@@ -1741,7 +1741,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
seq_commit(sf, -1);
}
out_unlock:
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
return ret;
}
@@ -1958,12 +1958,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpuset_inc();
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
if (cgroup_on_dfl(cs->css.cgroup)) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
}
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
@@ -1990,10 +1990,10 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
}
rcu_read_unlock();
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
out_unlock:
mutex_unlock(&cpuset_mutex);
return 0;
@@ -2032,7 +2032,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
if (cgroup_on_dfl(root_css->cgroup)) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
@@ -2043,7 +2043,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
mutex_unlock(&cpuset_mutex);
}
@@ -2128,12 +2128,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
{
bool is_empty;
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, new_cpus);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->mems_allowed = *new_mems;
cs->effective_mems = *new_mems;
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -2170,10 +2170,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->effective_mems = *new_mems;
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
if (cpus_updated)
update_tasks_cpumask(cs);
@@ -2259,21 +2259,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
/* we don't mess with cpumasks of tasks in top_cpuset */
}
/* synchronize mems_allowed to N_MEMORY */
if (mems_updated) {
- mutex_lock(&callback_mutex);
+ spin_lock_irq(&callback_lock);
if (!on_dfl)
top_cpuset.mems_allowed = new_mems;
top_cpuset.effective_mems = new_mems;
- mutex_unlock(&callback_mutex);
+ spin_unlock_irq(&callback_lock);
update_tasks_nodemask(&top_cpuset);
}
@@ -2366,11 +2366,13 @@ void __init cpuset_init_smp(void)
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
- mutex_lock(&callback_mutex);
+ unsigned long flags;
+
+ spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_cpus(task_cs(tsk), pmask);
rcu_read_unlock();
- mutex_unlock(&callback_mutex);
+ spin_unlock_irqrestore(&callback_lock, flags);
}
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
@@ -2416,12 +2418,13 @@ void cpuset_init_current_mems_allowed(void)
nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
{
nodemask_t mask;
+ unsigned long flags;
- mutex_lock(&callback_mutex);
+ spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_mems(task_cs(tsk), &mask);
rcu_read_unlock();
- mutex_unlock(&callback_mutex);
+ spin_unlock_irqrestore(&callback_lock, flags);
return mask;
}
@@ -2440,7 +2443,7 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
/*
* nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
* mem_hardwall ancestor to the specified cpuset. Call holding
- * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
+ * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
* (an unusual configuration), then returns the root cpuset.
*/
static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
@@ -2451,7 +2454,7 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
}
/**
- * cpuset_node_allowed_softwall - Can we allocate on a memory node?
+ * cpuset_node_allowed - Can we allocate on a memory node?
* @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
@@ -2463,13 +2466,6 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
* flag, yes.
* Otherwise, no.
*
- * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
- * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
- * might sleep, and might allow a node from an enclosing cpuset.
- *
- * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
- * cpusets, and never sleeps.
- *
* The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by
* (in get_page_from_freelist()) refusing to consider the zones for
@@ -2482,13 +2478,12 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
* GFP_KERNEL allocations are not so marked, so can escape to the
* nearest enclosing hardwalled ancestor cpuset.
*
- * Scanning up parent cpusets requires callback_mutex. The
+ * Scanning up parent cpusets requires callback_lock. The
* __alloc_pages() routine only calls here with __GFP_HARDWALL bit
* _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
* current tasks mems_allowed came up empty on the first pass over
* the zonelist. So only GFP_KERNEL allocations, if all nodes in the
- * cpuset are short of memory, might require taking the callback_mutex
- * mutex.
+ * cpuset are short of memory, might require taking the callback_lock.
*
* The first call here from mm/page_alloc:get_page_from_freelist()
* has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
@@ -2505,20 +2500,15 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
* TIF_MEMDIE - any node ok
* GFP_KERNEL - any node in enclosing hardwalled cpuset ok
* GFP_USER - only nodes in current tasks mems allowed ok.
- *
- * Rule:
- * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
- * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
- * the code that might scan up ancestor cpusets and sleep.
*/
-int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+int __cpuset_node_allowed(int node, gfp_t gfp_mask)
{
struct cpuset *cs; /* current cpuset ancestors */
int allowed; /* is allocation in zone z allowed? */
+ unsigned long flags;
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1;
- might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
if (node_isset(node, current->mems_allowed))
return 1;
/*
@@ -2534,55 +2524,17 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
return 1;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
- mutex_lock(&callback_mutex);
+ spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
rcu_read_unlock();
- mutex_unlock(&callback_mutex);
+ spin_unlock_irqrestore(&callback_lock, flags);
return allowed;
}
-/*
- * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
- * @node: is this an allowed node?
- * @gfp_mask: memory allocation flags
- *
- * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
- * set, yes, we can always allocate. If node is in our task's mems_allowed,
- * yes. If the task has been OOM killed and has access to memory reserves as
- * specified by the TIF_MEMDIE flag, yes.
- * Otherwise, no.
- *
- * The __GFP_THISNODE placement logic is really handled elsewhere,
- * by forcibly using a zonelist starting at a specified node, and by
- * (in get_page_from_freelist()) refusing to consider the zones for
- * any node on the zonelist except the first. By the time any such
- * calls get to this routine, we should just shut up and say 'yes'.
- *
- * Unlike the cpuset_node_allowed_softwall() variant, above,
- * this variant requires that the node be in the current task's
- * mems_allowed or that we're in interrupt. It does not scan up the
- * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
- * It never sleeps.
- */
-int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
-{
- if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
- return 1;
- if (node_isset(node, current->mems_allowed))
- return 1;
- /*
- * Allow tasks that have access to memory reserves because they have
- * been OOM killed to get memory anywhere.
- */
- if (unlikely(test_thread_flag(TIF_MEMDIE)))
- return 1;
- return 0;
-}
-
/**
* cpuset_mem_spread_node() - On which node to begin search for a file page
* cpuset_slab_spread_node() - On which node to begin search for a slab page
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index ed8f2cde34c5..cb346f26a22d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -193,7 +193,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
}
flush_cache_page(vma, addr, pte_pfn(*ptep));
- ptep_clear_flush(vma, addr, ptep);
+ ptep_clear_flush_notify(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
page_remove_rmap(page);
@@ -724,14 +724,14 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
int more = 0;
again:
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
if (!valid_vma(vma, is_register))
continue;
if (!prev && !more) {
/*
- * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
+ * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
* reclaim. This is optimistic, no harm done if it fails.
*/
prev = kmalloc(sizeof(struct map_info),
@@ -755,7 +755,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
info->mm = vma->vm_mm;
info->vaddr = offset_to_vaddr(vma, offset);
}
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_read(mapping);
if (!more)
goto out;
diff --git a/kernel/exit.c b/kernel/exit.c
index 8714e5ded8b4..1ea4369890a3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -212,27 +212,6 @@ repeat:
}
/*
- * This checks not only the pgrp, but falls back on the pid if no
- * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
- * without this...
- *
- * The caller must hold rcu lock or the tasklist lock.
- */
-struct pid *session_of_pgrp(struct pid *pgrp)
-{
- struct task_struct *p;
- struct pid *sid = NULL;
-
- p = pid_task(pgrp, PIDTYPE_PGID);
- if (p == NULL)
- p = pid_task(pgrp, PIDTYPE_PID);
- if (p != NULL)
- sid = task_session(p);
-
- return sid;
-}
-
-/*
* Determine if a process group is "orphaned", according to the POSIX
* definition in 2.2.2.52. Orphaned process groups are not to be affected
* by terminal-generated stop signals. Newly orphaned process groups are
diff --git a/kernel/fork.c b/kernel/fork.c
index 9ca84189cfc2..4dc2ddade9f1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -433,7 +433,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
if (tmp->vm_flags & VM_SHARED)
atomic_inc(&mapping->i_mmap_writable);
flush_dcache_mmap_lock(mapping);
@@ -445,7 +445,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
vma_interval_tree_insert_after(tmp, mpnt,
&mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
}
/*
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 3b7408759bdf..c92e44855ddd 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -32,10 +32,13 @@ config GCOV_KERNEL
Note that the debugfs filesystem has to be mounted to access
profiling data.
+config ARCH_HAS_GCOV_PROFILE_ALL
+ def_bool n
+
config GCOV_PROFILE_ALL
bool "Profile entire Kernel"
depends on GCOV_KERNEL
- depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64
+ depends on ARCH_HAS_GCOV_PROFILE_ALL
default n
---help---
This options activates profiling for the entire kernel.
diff --git a/kernel/groups.c b/kernel/groups.c
index 451698f86cfa..664411f171b5 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/user_namespace.h>
#include <asm/uaccess.h>
/* init to 2 - one for init_task, one to ensure it is never freed */
@@ -213,6 +214,14 @@ out:
return i;
}
+bool may_setgroups(void)
+{
+ struct user_namespace *user_ns = current_user_ns();
+
+ return ns_capable(user_ns, CAP_SETGID) &&
+ userns_may_setgroups(user_ns);
+}
+
/*
* SMP: Our groups are copy-on-write. We can set them safely
* without another task interfering.
@@ -223,7 +232,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
struct group_info *group_info;
int retval;
- if (!ns_capable(current_user_ns(), CAP_SETGID))
+ if (!may_setgroups())
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 3ab9048483fa..cbf9fb899d92 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(irq_work_run);
void irq_work_tick(void)
{
- struct llist_head *raised = &__get_cpu_var(raised_list);
+ struct llist_head *raised = this_cpu_ptr(&raised_list);
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
- irq_work_run_list(&__get_cpu_var(lazy_list));
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
}
/*
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2abf9f6e9a61..9a8a01abbaed 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -600,7 +600,7 @@ kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
if (!kexec_on_panic) {
image->swap_page = kimage_alloc_control_pages(image, 0);
if (!image->swap_page) {
- pr_err(KERN_ERR "Could not allocate swap buffer\n");
+ pr_err("Could not allocate swap buffer\n");
goto out_free_control_pages;
}
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 831978cebf1d..06f58309fed2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1410,16 +1410,10 @@ static inline int check_kprobe_rereg(struct kprobe *p)
return ret;
}
-static int check_kprobe_address_safe(struct kprobe *p,
- struct module **probed_mod)
+int __weak arch_check_ftrace_location(struct kprobe *p)
{
- int ret = 0;
unsigned long ftrace_addr;
- /*
- * If the address is located on a ftrace nop, set the
- * breakpoint to the following instruction.
- */
ftrace_addr = ftrace_location((unsigned long)p->addr);
if (ftrace_addr) {
#ifdef CONFIG_KPROBES_ON_FTRACE
@@ -1431,7 +1425,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
return -EINVAL;
#endif
}
+ return 0;
+}
+static int check_kprobe_address_safe(struct kprobe *p,
+ struct module **probed_mod)
+{
+ int ret;
+
+ ret = arch_check_ftrace_location(p);
+ if (ret)
+ return ret;
jump_label_lock();
preempt_disable();
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index ef42d0ab3115..49746c81ad8d 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -220,11 +220,10 @@ void exit_task_namespaces(struct task_struct *p)
SYSCALL_DEFINE2(setns, int, fd, int, nstype)
{
- const struct proc_ns_operations *ops;
struct task_struct *tsk = current;
struct nsproxy *new_nsproxy;
- struct proc_ns *ei;
struct file *file;
+ struct ns_common *ns;
int err;
file = proc_ns_fget(fd);
@@ -232,9 +231,8 @@ SYSCALL_DEFINE2(setns, int, fd, int, nstype)
return PTR_ERR(file);
err = -EINVAL;
- ei = get_proc_ns(file_inode(file));
- ops = ei->ns_ops;
- if (nstype && (ops->type != nstype))
+ ns = get_proc_ns(file_inode(file));
+ if (nstype && (ns->ops->type != nstype))
goto out;
new_nsproxy = create_new_namespaces(0, tsk, current_user_ns(), tsk->fs);
@@ -243,7 +241,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, nstype)
goto out;
}
- err = ops->install(new_nsproxy, ei->ns);
+ err = ns->ops->install(new_nsproxy, ns);
if (err) {
free_nsproxy(new_nsproxy);
goto out;
diff --git a/kernel/pid.c b/kernel/pid.c
index 82430c858d69..cd36a5e0d173 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -79,7 +79,10 @@ struct pid_namespace init_pid_ns = {
.level = 0,
.child_reaper = &init_task,
.user_ns = &init_user_ns,
- .proc_inum = PROC_PID_INIT_INO,
+ .ns.inum = PROC_PID_INIT_INO,
+#ifdef CONFIG_PID_NS
+ .ns.ops = &pidns_operations,
+#endif
};
EXPORT_SYMBOL_GPL(init_pid_ns);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index bc6d6a89b6e6..a65ba137fd15 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -105,9 +105,10 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
if (ns->pid_cachep == NULL)
goto out_free_map;
- err = proc_alloc_inum(&ns->proc_inum);
+ err = ns_alloc_inum(&ns->ns);
if (err)
goto out_free_map;
+ ns->ns.ops = &pidns_operations;
kref_init(&ns->kref);
ns->level = level;
@@ -142,7 +143,7 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
{
int i;
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
for (i = 0; i < PIDMAP_ENTRIES; i++)
kfree(ns->pidmap[i].page);
put_user_ns(ns->user_ns);
@@ -333,7 +334,12 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
return 0;
}
-static void *pidns_get(struct task_struct *task)
+static inline struct pid_namespace *to_pid_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct pid_namespace, ns);
+}
+
+static struct ns_common *pidns_get(struct task_struct *task)
{
struct pid_namespace *ns;
@@ -343,18 +349,18 @@ static void *pidns_get(struct task_struct *task)
get_pid_ns(ns);
rcu_read_unlock();
- return ns;
+ return ns ? &ns->ns : NULL;
}
-static void pidns_put(void *ns)
+static void pidns_put(struct ns_common *ns)
{
- put_pid_ns(ns);
+ put_pid_ns(to_pid_ns(ns));
}
-static int pidns_install(struct nsproxy *nsproxy, void *ns)
+static int pidns_install(struct nsproxy *nsproxy, struct ns_common *ns)
{
struct pid_namespace *active = task_active_pid_ns(current);
- struct pid_namespace *ancestor, *new = ns;
+ struct pid_namespace *ancestor, *new = to_pid_ns(ns);
if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
@@ -382,19 +388,12 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
-static unsigned int pidns_inum(void *ns)
-{
- struct pid_namespace *pid_ns = ns;
- return pid_ns->proc_inum;
-}
-
const struct proc_ns_operations pidns_operations = {
.name = "pid",
.type = CLONE_NEWPID,
.get = pidns_get,
.put = pidns_put,
.install = pidns_install,
- .inum = pidns_inum,
};
static __init int pid_namespaces_init(void)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index f900dc9f6822..02d6b6d28796 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1857,10 +1857,16 @@ asmlinkage __visible int printk(const char *fmt, ...)
int r;
va_start(args, fmt);
- preempt_disable();
+
+ /*
+ * If a caller overrides the per_cpu printk_func, then it needs
+ * to disable preemption when calling printk(). Otherwise
+ * the printk_func should be set to the default. No need to
+ * disable preemption here.
+ */
vprintk_func = this_cpu_read(printk_func);
r = vprintk_func(fmt, args);
- preempt_enable();
+
va_end(args);
return r;
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 00fe55cc5a82..b6e4c16377c7 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -25,6 +25,38 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
}
EXPORT_SYMBOL_GPL(print_stack_trace);
+int snprint_stack_trace(char *buf, size_t size,
+ struct stack_trace *trace, int spaces)
+{
+ int i;
+ unsigned long ip;
+ int generated;
+ int total = 0;
+
+ if (WARN_ON(!trace->entries))
+ return 0;
+
+ for (i = 0; i < trace->nr_entries; i++) {
+ ip = trace->entries[i];
+ generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
+ 1 + spaces, ' ', (void *) ip, (void *) ip);
+
+ total += generated;
+
+ /* Assume that generated isn't a negative number */
+ if (generated >= size) {
+ buf += size;
+ size = 0;
+ } else {
+ buf += generated;
+ size -= generated;
+ }
+ }
+
+ return total;
+}
+EXPORT_SYMBOL_GPL(snprint_stack_trace);
+
/*
* Architectures that do not implement save_stack_trace_tsk or
* save_stack_trace_regs get this weak alias and a once-per-bootup warning
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 02aa4185b17e..5adcb0ae3a58 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -169,6 +169,8 @@ cond_syscall(ppc_rtas);
cond_syscall(sys_spu_run);
cond_syscall(sys_spu_create);
cond_syscall(sys_subpage_prot);
+cond_syscall(sys_s390_pci_mmio_read);
+cond_syscall(sys_s390_pci_mmio_write);
/* mmu depending weak syscall entries */
cond_syscall(sys_mprotect);
@@ -224,3 +226,6 @@ cond_syscall(sys_seccomp);
/* access BPF programs and maps */
cond_syscall(sys_bpf);
+
+/* execveat */
+cond_syscall(sys_execveat);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7c54ff79afd7..137c7f69b264 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -623,6 +623,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "tracepoint_printk",
+ .data = &tracepoint_printk,
+ .maxlen = sizeof(tracepoint_printk),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#endif
#ifdef CONFIG_KEXEC
{
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 2e949cc9c9f1..b79f39bda7e1 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -792,7 +792,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
/* Initialize mult/shift and max_idle_ns */
__clocksource_updatefreq_scale(cs, scale, freq);
- /* Add clocksource to the clcoksource list */
+ /* Add clocksource to the clocksource list */
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1f4356037a7d..4d54b7540585 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -235,7 +235,7 @@ void tick_nohz_full_kick(void)
if (!tick_nohz_full_cpu(smp_processor_id()))
return;
- irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+ irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
}
/*
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 65015ff2f07c..6390517e77d4 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -741,6 +741,7 @@ u64 nsecs_to_jiffies64(u64 n)
return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
#endif
}
+EXPORT_SYMBOL(nsecs_to_jiffies64);
/**
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 11b9cb36092b..483cecfa5c17 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1477,9 +1477,6 @@ static int blk_trace_remove_queue(struct request_queue *q)
if (atomic_dec_and_test(&blk_probes_ref))
blk_unregister_tracepoints();
- spin_lock_irq(&running_trace_lock);
- list_del(&bt->running_list);
- spin_unlock_irq(&running_trace_lock);
blk_trace_free(bt);
return 0;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1af4f8f2ab5d..2e767972e99c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -63,6 +63,10 @@ static bool __read_mostly tracing_selftest_running;
*/
bool __read_mostly tracing_selftest_disabled;
+/* Pipe tracepoints to printk */
+struct trace_iterator *tracepoint_print_iter;
+int tracepoint_printk;
+
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
{ }
@@ -193,6 +197,13 @@ static int __init set_trace_boot_clock(char *str)
}
__setup("trace_clock=", set_trace_boot_clock);
+static int __init set_tracepoint_printk(char *str)
+{
+ if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
+ tracepoint_printk = 1;
+ return 1;
+}
+__setup("tp_printk", set_tracepoint_printk);
unsigned long long ns2usecs(cycle_t nsec)
{
@@ -2031,7 +2042,7 @@ void trace_printk_init_buffers(void)
pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
pr_warning("** **\n");
pr_warning("** This means that this is a DEBUG kernel and it is **\n");
- pr_warning("** unsafe for produciton use. **\n");
+ pr_warning("** unsafe for production use. **\n");
pr_warning("** **\n");
pr_warning("** If you see this message and you are not debugging **\n");
pr_warning("** the kernel, report this immediately to your vendor! **\n");
@@ -6898,6 +6909,19 @@ out:
return ret;
}
+void __init trace_init(void)
+{
+ if (tracepoint_printk) {
+ tracepoint_print_iter =
+ kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
+ if (WARN_ON(!tracepoint_print_iter))
+ tracepoint_printk = 0;
+ }
+ tracer_alloc_buffers();
+ init_ftrace_syscalls();
+ trace_event_init();
+}
+
__init static int clear_boot_tracer(void)
{
/*
@@ -6917,6 +6941,5 @@ __init static int clear_boot_tracer(void)
return 0;
}
-early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
late_initcall(clear_boot_tracer);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3255dfb054a0..8de48bac1ce2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1301,4 +1301,18 @@ int perf_ftrace_event_register(struct ftrace_event_call *call,
#define perf_ftrace_event_register NULL
#endif
+#ifdef CONFIG_FTRACE_SYSCALLS
+void init_ftrace_syscalls(void);
+#else
+static inline void init_ftrace_syscalls(void) { }
+#endif
+
+#ifdef CONFIG_EVENT_TRACING
+void trace_event_init(void);
+#else
+static inline void __init trace_event_init(void) { }
+#endif
+
+extern struct trace_iterator *tracepoint_print_iter;
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d0e4f92b5eb6..366a78a3e61e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -212,8 +212,40 @@ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
}
EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
+static DEFINE_SPINLOCK(tracepoint_iter_lock);
+
+static void output_printk(struct ftrace_event_buffer *fbuffer)
+{
+ struct ftrace_event_call *event_call;
+ struct trace_event *event;
+ unsigned long flags;
+ struct trace_iterator *iter = tracepoint_print_iter;
+
+ if (!iter)
+ return;
+
+ event_call = fbuffer->ftrace_file->event_call;
+ if (!event_call || !event_call->event.funcs ||
+ !event_call->event.funcs->trace)
+ return;
+
+ event = &fbuffer->ftrace_file->event_call->event;
+
+ spin_lock_irqsave(&tracepoint_iter_lock, flags);
+ trace_seq_init(&iter->seq);
+ iter->ent = fbuffer->entry;
+ event_call->event.funcs->trace(iter, 0, event);
+ trace_seq_putc(&iter->seq, 0);
+ printk("%s", iter->seq.buffer);
+
+ spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
+}
+
void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
{
+ if (tracepoint_printk)
+ output_printk(fbuffer);
+
event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
fbuffer->flags, fbuffer->pc);
@@ -2480,8 +2512,14 @@ static __init int event_trace_init(void)
#endif
return 0;
}
-early_initcall(event_trace_memsetup);
-core_initcall(event_trace_enable);
+
+void __init trace_event_init(void)
+{
+ event_trace_memsetup();
+ init_ftrace_syscalls();
+ event_trace_enable();
+}
+
fs_initcall(event_trace_init);
#ifdef CONFIG_FTRACE_STARTUP_TEST
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index dfe00a4f3f3e..c6ee36fcbf90 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -514,7 +514,7 @@ unsigned long __init __weak arch_syscall_addr(int nr)
return (unsigned long)sys_call_table[nr];
}
-static int __init init_ftrace_syscalls(void)
+void __init init_ftrace_syscalls(void)
{
struct syscall_metadata *meta;
unsigned long addr;
@@ -524,7 +524,7 @@ static int __init init_ftrace_syscalls(void)
GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
- return -ENOMEM;
+ return;
}
for (i = 0; i < NR_syscalls; i++) {
@@ -536,10 +536,7 @@ static int __init init_ftrace_syscalls(void)
meta->syscall_nr = i;
syscalls_metadata[i] = meta;
}
-
- return 0;
}
-early_initcall(init_ftrace_syscalls);
#ifdef CONFIG_PERF_EVENTS
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 602e5bbbceff..d58cc4d8f0d1 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
struct group_info *group_info;
int retval;
- if (!ns_capable(current_user_ns(), CAP_SETGID))
+ if (!may_setgroups())
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
diff --git a/kernel/user.c b/kernel/user.c
index 4efa39350e44..b069ccbfb0b0 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -50,7 +50,11 @@ struct user_namespace init_user_ns = {
.count = ATOMIC_INIT(3),
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
- .proc_inum = PROC_USER_INIT_INO,
+ .ns.inum = PROC_USER_INIT_INO,
+#ifdef CONFIG_USER_NS
+ .ns.ops = &userns_operations,
+#endif
+ .flags = USERNS_INIT_FLAGS,
#ifdef CONFIG_PERSISTENT_KEYRINGS
.persistent_keyring_register_sem =
__RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index aa312b0dc3ec..4109f8320684 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -24,6 +24,7 @@
#include <linux/fs_struct.h>
static struct kmem_cache *user_ns_cachep __read_mostly;
+static DEFINE_MUTEX(userns_state_mutex);
static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
@@ -86,11 +87,12 @@ int create_user_ns(struct cred *new)
if (!ns)
return -ENOMEM;
- ret = proc_alloc_inum(&ns->proc_inum);
+ ret = ns_alloc_inum(&ns->ns);
if (ret) {
kmem_cache_free(user_ns_cachep, ns);
return ret;
}
+ ns->ns.ops = &userns_operations;
atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
@@ -99,6 +101,11 @@ int create_user_ns(struct cred *new)
ns->owner = owner;
ns->group = group;
+ /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
+ mutex_lock(&userns_state_mutex);
+ ns->flags = parent_ns->flags;
+ mutex_unlock(&userns_state_mutex);
+
set_cred_user_ns(new, ns);
#ifdef CONFIG_PERSISTENT_KEYRINGS
@@ -136,7 +143,7 @@ void free_user_ns(struct user_namespace *ns)
#ifdef CONFIG_PERSISTENT_KEYRINGS
key_put(ns->persistent_keyring_register);
#endif
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
kmem_cache_free(user_ns_cachep, ns);
ns = parent;
} while (atomic_dec_and_test(&parent->count));
@@ -583,9 +590,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map,
return false;
}
-
-static DEFINE_MUTEX(id_map_mutex);
-
static ssize_t map_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos,
int cap_setid,
@@ -602,7 +606,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
ssize_t ret = -EINVAL;
/*
- * The id_map_mutex serializes all writes to any given map.
+ * The userns_state_mutex serializes all writes to any given map.
*
* Any map is only ever written once.
*
@@ -620,7 +624,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
* order and smp_rmb() is guaranteed that we don't have crazy
* architectures returning stale data.
*/
- mutex_lock(&id_map_mutex);
+ mutex_lock(&userns_state_mutex);
ret = -EPERM;
/* Only allow one successful write to the map */
@@ -640,7 +644,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if (!page)
goto out;
- /* Only allow <= page size writes at the beginning of the file */
+ /* Only allow < page size writes at the beginning of the file */
ret = -EINVAL;
if ((*ppos != 0) || (count >= PAGE_SIZE))
goto out;
@@ -750,7 +754,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
*ppos = count;
ret = count;
out:
- mutex_unlock(&id_map_mutex);
+ mutex_unlock(&userns_state_mutex);
if (page)
free_page(page);
return ret;
@@ -812,16 +816,21 @@ static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
struct uid_gid_map *new_map)
{
- /* Allow mapping to your own filesystem ids */
- if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
+ const struct cred *cred = file->f_cred;
+ /* Don't allow mappings that would allow anything that wouldn't
+ * be allowed without the establishment of unprivileged mappings.
+ */
+ if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
+ uid_eq(ns->owner, cred->euid)) {
u32 id = new_map->extent[0].lower_first;
if (cap_setid == CAP_SETUID) {
kuid_t uid = make_kuid(ns->parent, id);
- if (uid_eq(uid, file->f_cred->fsuid))
+ if (uid_eq(uid, cred->euid))
return true;
} else if (cap_setid == CAP_SETGID) {
kgid_t gid = make_kgid(ns->parent, id);
- if (gid_eq(gid, file->f_cred->fsgid))
+ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
+ gid_eq(gid, cred->egid))
return true;
}
}
@@ -841,7 +850,106 @@ static bool new_idmap_permitted(const struct file *file,
return false;
}
-static void *userns_get(struct task_struct *task)
+int proc_setgroups_show(struct seq_file *seq, void *v)
+{
+ struct user_namespace *ns = seq->private;
+ unsigned long userns_flags = ACCESS_ONCE(ns->flags);
+
+ seq_printf(seq, "%s\n",
+ (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
+ "allow" : "deny");
+ return 0;
+}
+
+ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = file->private_data;
+ struct user_namespace *ns = seq->private;
+ char kbuf[8], *pos;
+ bool setgroups_allowed;
+ ssize_t ret;
+
+ /* Only allow a very narrow range of strings to be written */
+ ret = -EINVAL;
+ if ((*ppos != 0) || (count >= sizeof(kbuf)))
+ goto out;
+
+ /* What was written? */
+ ret = -EFAULT;
+ if (copy_from_user(kbuf, buf, count))
+ goto out;
+ kbuf[count] = '\0';
+ pos = kbuf;
+
+ /* What is being requested? */
+ ret = -EINVAL;
+ if (strncmp(pos, "allow", 5) == 0) {
+ pos += 5;
+ setgroups_allowed = true;
+ }
+ else if (strncmp(pos, "deny", 4) == 0) {
+ pos += 4;
+ setgroups_allowed = false;
+ }
+ else
+ goto out;
+
+ /* Verify there is not trailing junk on the line */
+ pos = skip_spaces(pos);
+ if (*pos != '\0')
+ goto out;
+
+ ret = -EPERM;
+ mutex_lock(&userns_state_mutex);
+ if (setgroups_allowed) {
+ /* Enabling setgroups after setgroups has been disabled
+ * is not allowed.
+ */
+ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
+ goto out_unlock;
+ } else {
+ /* Permanently disabling setgroups after setgroups has
+ * been enabled by writing the gid_map is not allowed.
+ */
+ if (ns->gid_map.nr_extents != 0)
+ goto out_unlock;
+ ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
+ }
+ mutex_unlock(&userns_state_mutex);
+
+ /* Report a successful write */
+ *ppos = count;
+ ret = count;
+out:
+ return ret;
+out_unlock:
+ mutex_unlock(&userns_state_mutex);
+ goto out;
+}
+
+bool userns_may_setgroups(const struct user_namespace *ns)
+{
+ bool allowed;
+
+ mutex_lock(&userns_state_mutex);
+ /* It is not safe to use setgroups until a gid mapping in
+ * the user namespace has been established.
+ */
+ allowed = ns->gid_map.nr_extents != 0;
+ /* Is setgroups allowed? */
+ allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
+ mutex_unlock(&userns_state_mutex);
+
+ return allowed;
+}
+
+static inline struct user_namespace *to_user_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct user_namespace, ns);
+}
+
+static struct ns_common *userns_get(struct task_struct *task)
{
struct user_namespace *user_ns;
@@ -849,17 +957,17 @@ static void *userns_get(struct task_struct *task)
user_ns = get_user_ns(__task_cred(task)->user_ns);
rcu_read_unlock();
- return user_ns;
+ return user_ns ? &user_ns->ns : NULL;
}
-static void userns_put(void *ns)
+static void userns_put(struct ns_common *ns)
{
- put_user_ns(ns);
+ put_user_ns(to_user_ns(ns));
}
-static int userns_install(struct nsproxy *nsproxy, void *ns)
+static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
{
- struct user_namespace *user_ns = ns;
+ struct user_namespace *user_ns = to_user_ns(ns);
struct cred *cred;
/* Don't allow gaining capabilities by reentering
@@ -888,19 +996,12 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
return commit_creds(cred);
}
-static unsigned int userns_inum(void *ns)
-{
- struct user_namespace *user_ns = ns;
- return user_ns->proc_inum;
-}
-
const struct proc_ns_operations userns_operations = {
.name = "user",
.type = CLONE_NEWUSER,
.get = userns_get,
.put = userns_put,
.install = userns_install,
- .inum = userns_inum,
};
static __init int user_namespaces_init(void)
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 883aaaa7de8a..831ea7108232 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -42,12 +42,14 @@ static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
if (!ns)
return ERR_PTR(-ENOMEM);
- err = proc_alloc_inum(&ns->proc_inum);
+ err = ns_alloc_inum(&ns->ns);
if (err) {
kfree(ns);
return ERR_PTR(err);
}
+ ns->ns.ops = &utsns_operations;
+
down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
ns->user_ns = get_user_ns(user_ns);
@@ -84,11 +86,16 @@ void free_uts_ns(struct kref *kref)
ns = container_of(kref, struct uts_namespace, kref);
put_user_ns(ns->user_ns);
- proc_free_inum(ns->proc_inum);
+ ns_free_inum(&ns->ns);
kfree(ns);
}
-static void *utsns_get(struct task_struct *task)
+static inline struct uts_namespace *to_uts_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct uts_namespace, ns);
+}
+
+static struct ns_common *utsns_get(struct task_struct *task)
{
struct uts_namespace *ns = NULL;
struct nsproxy *nsproxy;
@@ -101,17 +108,17 @@ static void *utsns_get(struct task_struct *task)
}
task_unlock(task);
- return ns;
+ return ns ? &ns->ns : NULL;
}
-static void utsns_put(void *ns)
+static void utsns_put(struct ns_common *ns)
{
- put_uts_ns(ns);
+ put_uts_ns(to_uts_ns(ns));
}
-static int utsns_install(struct nsproxy *nsproxy, void *new)
+static int utsns_install(struct nsproxy *nsproxy, struct ns_common *new)
{
- struct uts_namespace *ns = new;
+ struct uts_namespace *ns = to_uts_ns(new);
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
@@ -123,18 +130,10 @@ static int utsns_install(struct nsproxy *nsproxy, void *new)
return 0;
}
-static unsigned int utsns_inum(void *vp)
-{
- struct uts_namespace *ns = vp;
-
- return ns->proc_inum;
-}
-
const struct proc_ns_operations utsns_operations = {
.name = "uts",
.type = CLONE_NEWUTS,
.get = utsns_get,
.put = utsns_put,
.install = utsns_install,
- .inum = utsns_inum,
};
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 09b685daee3d..6202b08f1933 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1804,8 +1804,8 @@ static void pool_mayday_timeout(unsigned long __pool)
struct worker_pool *pool = (void *)__pool;
struct work_struct *work;
- spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */
- spin_lock(&pool->lock);
+ spin_lock_irq(&pool->lock);
+ spin_lock(&wq_mayday_lock); /* for wq->maydays */
if (need_to_create_worker(pool)) {
/*
@@ -1818,8 +1818,8 @@ static void pool_mayday_timeout(unsigned long __pool)
send_mayday(work);
}
- spin_unlock(&pool->lock);
- spin_unlock_irq(&wq_mayday_lock);
+ spin_unlock(&wq_mayday_lock);
+ spin_unlock_irq(&pool->lock);
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
@@ -2248,12 +2248,30 @@ repeat:
* Slurp in all works issued via this workqueue and
* process'em.
*/
- WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
+ WARN_ON_ONCE(!list_empty(scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry)
if (get_work_pwq(work) == pwq)
move_linked_works(work, scheduled, &n);
- process_scheduled_works(rescuer);
+ if (!list_empty(scheduled)) {
+ process_scheduled_works(rescuer);
+
+ /*
+ * The above execution of rescued work items could
+ * have created more to rescue through
+ * pwq_activate_first_delayed() or chained
+ * queueing. Let's put @pwq back on mayday list so
+ * that such back-to-back work items, which may be
+ * being used to relieve memory pressure, don't
+ * incur MAYDAY_INTERVAL delay inbetween.
+ */
+ if (need_to_create_worker(pool)) {
+ spin_lock(&wq_mayday_lock);
+ get_pwq(pwq);
+ list_move_tail(&pwq->mayday_node, &wq->maydays);
+ spin_unlock(&wq_mayday_lock);
+ }
+ }
/*
* Put the reference grabbed by send_mayday(). @pool won't
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d780351835e9..5f2ce616c046 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -227,6 +227,22 @@ config UNUSED_SYMBOLS
you really need it, and what the merge plan to the mainline kernel for
your module is.
+config PAGE_OWNER
+ bool "Track page owner"
+ depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
+ select DEBUG_FS
+ select STACKTRACE
+ select PAGE_EXTENSION
+ help
+ This keeps track of what call chain is the owner of a page, may
+ help to find bare alloc_page(s) leaks. Even if you include this
+ feature on your build, it is disabled in default. You should pass
+ "page_owner=on" to boot parameter in order to enable it. Eats
+ a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
+ for user-space helper.
+
+ If unsure, say N.
+
config DEBUG_FS
bool "Debug Filesystem"
help
diff --git a/lib/Makefile b/lib/Makefile
index 923a191eaf71..3c3b30b9e020 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o
+ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
diff --git a/lib/audit.c b/lib/audit.c
index 1d726a22565b..b8fb5ee81e26 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -54,6 +54,9 @@ int audit_classify_syscall(int abi, unsigned syscall)
case __NR_socketcall:
return 4;
#endif
+#ifdef __NR_execveat
+ case __NR_execveat:
+#endif
case __NR_execve:
return 5;
default:
diff --git a/lib/bitmap.c b/lib/bitmap.c
index b499ab6ada29..324ea9eab8c1 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -12,6 +12,8 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
+
+#include <asm/page.h>
#include <asm/uaccess.h>
/*
@@ -326,30 +328,32 @@ void bitmap_clear(unsigned long *map, unsigned int start, int len)
}
EXPORT_SYMBOL(bitmap_clear);
-/*
- * bitmap_find_next_zero_area - find a contiguous aligned zero area
+/**
+ * bitmap_find_next_zero_area_off - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
+ * @align_offset: Alignment offset for zero area.
*
* The @align_mask should be one less than a power of 2; the effect is that
- * the bit offset of all zero areas this function finds is multiples of that
- * power of 2. A @align_mask of 0 means no alignment is required.
+ * the bit offset of all zero areas this function finds plus @align_offset
+ * is multiple of that power of 2.
*/
-unsigned long bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
- index = __ALIGN_MASK(index, align_mask);
+ index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
end = index + nr;
if (end > size)
@@ -361,7 +365,7 @@ again:
}
return index;
}
-EXPORT_SYMBOL(bitmap_find_next_zero_area);
+EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
/*
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
@@ -584,6 +588,33 @@ int bitmap_scnlistprintf(char *buf, unsigned int buflen,
EXPORT_SYMBOL(bitmap_scnlistprintf);
/**
+ * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * @buf: page aligned buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Output format is a comma-separated list of decimal numbers and
+ * ranges if list is specified or hex digits grouped into comma-separated
+ * sets of 8 digits/set. Returns the number of characters written to buf.
+ */
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits)
+{
+ ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2;
+ int n = 0;
+
+ if (len > 1) {
+ n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) :
+ bitmap_scnprintf(buf, len, maskp, nmaskbits);
+ buf[n++] = '\n';
+ buf[n] = '\0';
+ }
+ return n;
+}
+EXPORT_SYMBOL(bitmap_print_to_pagebuf);
+
+/**
* __bitmap_parselist - convert list format ASCII string to bitmap
* @buf: read nul-terminated user string from this buffer
* @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/decompress.c b/lib/decompress.c
index 37f3c786348f..528ff932d8e4 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -44,8 +44,8 @@ struct compress_format {
};
static const struct compress_format compressed_formats[] __initconst = {
- { {037, 0213}, "gzip", gunzip },
- { {037, 0236}, "gzip", gunzip },
+ { {0x1f, 0x8b}, "gzip", gunzip },
+ { {0x1f, 0x9e}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
{ {0xfd, 0x37}, "xz", unxz },
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 8290e0bef7ea..6dd0335ea61b 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
if (get_bits(bd, 1))
return RETVAL_OBSOLETE_INPUT;
origPtr = get_bits(bd, 24);
- if (origPtr > dbufSize)
+ if (origPtr >= dbufSize)
return RETVAL_DATA_ERROR;
/* mapping table: if some byte values are never used (encoding things
like ascii text), the compression code removes the gaps to have fewer
diff --git a/lib/devres.c b/lib/devres.c
index f4a195a6efe4..0f1dd2e9d2c1 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -23,7 +23,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
* Managed ioremap(). Map is automatically unmapped on driver detach.
*/
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
- unsigned long size)
+ resource_size_t size)
{
void __iomem **ptr, *addr;
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(devm_ioremap);
* detach.
*/
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
- unsigned long size)
+ resource_size_t size)
{
void __iomem **ptr, *addr;
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index d7d501ea856d..f1cdeb024d17 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -40,10 +40,16 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
- if (attr->verbose > 0)
- printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure\n");
- if (attr->verbose > 1)
- dump_stack();
+ if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
+ printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
+ "name %pd, interval %lu, probability %lu, "
+ "space %d, times %d\n", attr->dname,
+ attr->probability, attr->interval,
+ atomic_read(&attr->space),
+ atomic_read(&attr->times));
+ if (attr->verbose > 1)
+ dump_stack();
+ }
}
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
@@ -202,6 +208,12 @@ struct dentry *fault_create_debugfs_attr(const char *name,
goto fail;
if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose))
goto fail;
+ if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
+ &attr->ratelimit_state.interval))
+ goto fail;
+ if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
+ &attr->ratelimit_state.burst))
+ goto fail;
if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
goto fail;
@@ -222,6 +234,7 @@ struct dentry *fault_create_debugfs_attr(const char *name,
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+ attr->dname = dget(dir);
return dir;
fail:
debugfs_remove_recursive(dir);
diff --git a/lib/hash.c b/lib/hash.c
deleted file mode 100644
index fea973f4bd57..000000000000
--- a/lib/hash.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/* General purpose hashing library
- *
- * That's a start of a kernel hashing library, which can be extended
- * with further algorithms in future. arch_fast_hash{2,}() will
- * eventually resolve to an architecture optimized implementation.
- *
- * Copyright 2013 Francesco Fusco <ffusco@redhat.com>
- * Copyright 2013 Daniel Borkmann <dborkman@redhat.com>
- * Copyright 2013 Thomas Graf <tgraf@redhat.com>
- * Licensed under the GNU General Public License, version 2.0 (GPLv2)
- */
-
-#include <linux/jhash.h>
-#include <linux/hash.h>
-#include <linux/cache.h>
-
-static struct fast_hash_ops arch_hash_ops __read_mostly = {
- .hash = jhash,
- .hash2 = jhash2,
-};
-
-u32 arch_fast_hash(const void *data, u32 len, u32 seed)
-{
- return arch_hash_ops.hash(data, len, seed);
-}
-EXPORT_SYMBOL_GPL(arch_fast_hash);
-
-u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
-{
- return arch_hash_ops.hash2(data, len, seed);
-}
-EXPORT_SYMBOL_GPL(arch_fast_hash2);
-
-static int __init hashlib_init(void)
-{
- setup_arch_fast_hash(&arch_hash_ops);
- return 0;
-}
-early_initcall(hashlib_init);
diff --git a/lib/iovec.c b/lib/iovec.c
index df3abd1eaa4a..2d99cb4a5006 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -29,31 +29,6 @@ EXPORT_SYMBOL(memcpy_fromiovec);
/*
* Copy kernel to iovec. Returns -EFAULT on error.
- *
- * Note: this modifies the original iovec.
- */
-
-int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
-{
- while (len > 0) {
- if (iov->iov_len) {
- int copy = min_t(unsigned int, iov->iov_len, len);
- if (copy_to_user(iov->iov_base, kdata, copy))
- return -EFAULT;
- kdata += copy;
- len -= copy;
- iov->iov_len -= copy;
- iov->iov_base += copy;
- }
- iov++;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovec);
-
-/*
- * Copy kernel to iovec. Returns -EFAULT on error.
*/
int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
diff --git a/lib/kobject.c b/lib/kobject.c
index 58751bb80a7c..03d4ab349fa7 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -976,7 +976,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
{
const struct kobj_ns_type_operations *ops = NULL;
- if (parent && parent->ktype->child_ns_type)
+ if (parent && parent->ktype && parent->ktype->child_ns_type)
ops = parent->ktype->child_ns_type(parent);
return ops;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 624a0b7c05ef..6c3c723e902b 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
-#include <linux/hash.h>
+#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
@@ -32,7 +32,7 @@
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
{
- return ht->p.mutex_is_held();
+ return ht->p.mutex_is_held(ht->p.parent);
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
#endif
@@ -107,13 +107,13 @@ static u32 head_hashfn(const struct rhashtable *ht,
return obj_hashfn(ht, rht_obj(ht, he), hsize);
}
-static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
+static struct bucket_table *bucket_table_alloc(size_t nbuckets)
{
struct bucket_table *tbl;
size_t size;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
- tbl = kzalloc(size, flags);
+ tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (tbl == NULL)
tbl = vzalloc(size);
@@ -200,7 +200,6 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
/**
* rhashtable_expand - Expand hash table while allowing concurrent lookups
* @ht: the hash table to expand
- * @flags: allocation flags
*
* A secondary bucket array is allocated and the hash entries are migrated
* while keeping them on both lists until the end of the RCU grace period.
@@ -211,7 +210,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
* The caller must ensure that no concurrent table mutations take place.
* It is however valid to have concurrent lookups if they are RCU protected.
*/
-int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
+int rhashtable_expand(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
struct rhash_head *he;
@@ -223,7 +222,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
return 0;
- new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
+ new_tbl = bucket_table_alloc(old_tbl->size * 2);
if (new_tbl == NULL)
return -ENOMEM;
@@ -281,7 +280,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
/**
* rhashtable_shrink - Shrink hash table while allowing concurrent lookups
* @ht: the hash table to shrink
- * @flags: allocation flags
*
* This function may only be called in a context where it is safe to call
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
@@ -289,7 +287,7 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
* The caller must ensure that no concurrent table mutations take place.
* It is however valid to have concurrent lookups if they are RCU protected.
*/
-int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
+int rhashtable_shrink(struct rhashtable *ht)
{
struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
struct rhash_head __rcu **pprev;
@@ -300,7 +298,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
if (ht->shift <= ht->p.min_shift)
return 0;
- ntbl = bucket_table_alloc(tbl->size / 2, flags);
+ ntbl = bucket_table_alloc(tbl->size / 2);
if (ntbl == NULL)
return -ENOMEM;
@@ -341,7 +339,6 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink);
* rhashtable_insert - insert object into hash hash table
* @ht: hash table
* @obj: pointer to hash head inside object
- * @flags: allocation flags (table expansion)
*
* Will automatically grow the table via rhashtable_expand() if the the
* grow_decision function specified at rhashtable_init() returns true.
@@ -349,8 +346,7 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink);
* The caller must ensure that no concurrent table mutations occur. It is
* however valid to have concurrent lookups if they are RCU protected.
*/
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
- gfp_t flags)
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
u32 hash;
@@ -363,7 +359,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
ht->nelems++;
if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
- rhashtable_expand(ht, flags);
+ rhashtable_expand(ht);
}
EXPORT_SYMBOL_GPL(rhashtable_insert);
@@ -372,14 +368,13 @@ EXPORT_SYMBOL_GPL(rhashtable_insert);
* @ht: hash table
* @obj: pointer to hash head inside object
* @pprev: pointer to previous element
- * @flags: allocation flags (table expansion)
*
* Identical to rhashtable_remove() but caller is alreayd aware of the element
* in front of the element to be deleted. This is in particular useful for
* deletion when combined with walking or lookup.
*/
void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
- struct rhash_head __rcu **pprev, gfp_t flags)
+ struct rhash_head __rcu **pprev)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
@@ -390,7 +385,7 @@ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
if (ht->p.shrink_decision &&
ht->p.shrink_decision(ht, tbl->size))
- rhashtable_shrink(ht, flags);
+ rhashtable_shrink(ht);
}
EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
@@ -398,7 +393,6 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
* rhashtable_remove - remove object from hash table
* @ht: hash table
* @obj: pointer to hash head inside object
- * @flags: allocation flags (table expansion)
*
* Since the hash chain is single linked, the removal operation needs to
* walk the bucket chain upon removal. The removal operation is thus
@@ -410,8 +404,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
* The caller must ensure that no concurrent table mutations occur. It is
* however valid to have concurrent lookups if they are RCU protected.
*/
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
- gfp_t flags)
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
struct rhash_head __rcu **pprev;
@@ -429,7 +422,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
continue;
}
- rhashtable_remove_pprev(ht, he, pprev, flags);
+ rhashtable_remove_pprev(ht, he, pprev);
return true;
}
@@ -531,8 +524,10 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
* .head_offset = offsetof(struct test_obj, node),
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
- * .hashfn = arch_fast_hash,
+ * .hashfn = jhash,
+ * #ifdef CONFIG_PROVE_LOCKING
* .mutex_is_held = &my_mutex_is_held,
+ * #endif
* };
*
* Configuration Example 2: Variable length keys
@@ -550,9 +545,11 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
*
* struct rhashtable_params params = {
* .head_offset = offsetof(struct test_obj, node),
- * .hashfn = arch_fast_hash,
+ * .hashfn = jhash,
* .obj_hashfn = my_hash_fn,
+ * #ifdef CONFIG_PROVE_LOCKING
* .mutex_is_held = &my_mutex_is_held,
+ * #endif
* };
*/
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
@@ -572,7 +569,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
if (params->nelem_hint)
size = rounded_hashtable_size(params);
- tbl = bucket_table_alloc(size, GFP_KERNEL);
+ tbl = bucket_table_alloc(size);
if (tbl == NULL)
return -ENOMEM;
@@ -613,10 +610,12 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy);
#define TEST_PTR ((void *) 0xdeadbeef)
#define TEST_NEXPANDS 4
-static int test_mutex_is_held(void)
+#ifdef CONFIG_PROVE_LOCKING
+static int test_mutex_is_held(void *parent)
{
return 1;
}
+#endif
struct test_obj {
void *ptr;
@@ -654,15 +653,15 @@ static int __init test_rht_lookup(struct rhashtable *ht)
return 0;
}
-static void test_bucket_stats(struct rhashtable *ht,
- struct bucket_table *tbl,
- bool quiet)
+static void test_bucket_stats(struct rhashtable *ht, bool quiet)
{
- unsigned int cnt, i, total = 0;
+ unsigned int cnt, rcu_cnt, i, total = 0;
struct test_obj *obj;
+ struct bucket_table *tbl;
+ tbl = rht_dereference_rcu(ht->tbl, ht);
for (i = 0; i < tbl->size; i++) {
- cnt = 0;
+ rcu_cnt = cnt = 0;
if (!quiet)
pr_info(" [%#4x/%zu]", i, tbl->size);
@@ -674,6 +673,13 @@ static void test_bucket_stats(struct rhashtable *ht,
pr_cont(" [%p],", obj);
}
+ rht_for_each_entry_rcu(obj, tbl->buckets[i], node)
+ rcu_cnt++;
+
+ if (rcu_cnt != cnt)
+ pr_warn("Test failed: Chain count mismach %d != %d",
+ cnt, rcu_cnt);
+
if (!quiet)
pr_cont("\n [%#x] first element: %p, chain length: %u\n",
i, tbl->buckets[i], cnt);
@@ -681,6 +687,9 @@ static void test_bucket_stats(struct rhashtable *ht,
pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
total, ht->nelems, TEST_ENTRIES);
+
+ if (total != ht->nelems || total != TEST_ENTRIES)
+ pr_warn("Test failed: Total count mismatch ^^^");
}
static int __init test_rhashtable(struct rhashtable *ht)
@@ -707,18 +716,17 @@ static int __init test_rhashtable(struct rhashtable *ht)
obj->ptr = TEST_PTR;
obj->value = i * 2;
- rhashtable_insert(ht, &obj->node, GFP_KERNEL);
+ rhashtable_insert(ht, &obj->node);
}
rcu_read_lock();
- tbl = rht_dereference_rcu(ht->tbl, ht);
- test_bucket_stats(ht, tbl, true);
+ test_bucket_stats(ht, true);
test_rht_lookup(ht);
rcu_read_unlock();
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table expansion iteration %u...\n", i);
- rhashtable_expand(ht, GFP_KERNEL);
+ rhashtable_expand(ht);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
@@ -728,7 +736,7 @@ static int __init test_rhashtable(struct rhashtable *ht)
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table shrinkage iteration %u...\n", i);
- rhashtable_shrink(ht, GFP_KERNEL);
+ rhashtable_shrink(ht);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
@@ -736,6 +744,10 @@ static int __init test_rhashtable(struct rhashtable *ht)
rcu_read_unlock();
}
+ rcu_read_lock();
+ test_bucket_stats(ht, true);
+ rcu_read_unlock();
+
pr_info(" Deleting %d keys\n", TEST_ENTRIES);
for (i = 0; i < TEST_ENTRIES; i++) {
u32 key = i * 2;
@@ -743,7 +755,7 @@ static int __init test_rhashtable(struct rhashtable *ht)
obj = rhashtable_lookup(ht, &key);
BUG_ON(!obj);
- rhashtable_remove(ht, &obj->node, GFP_KERNEL);
+ rhashtable_remove(ht, &obj->node);
kfree(obj);
}
@@ -766,8 +778,10 @@ static int __init test_rht_init(void)
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int),
- .hashfn = arch_fast_hash,
+ .hashfn = jhash,
+#ifdef CONFIG_PROVE_LOCKING
.mutex_is_held = &test_mutex_is_held,
+#endif
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
};
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 5e256271b47b..7de89f4a36cf 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/quicklist.h>
+#include <linux/cma.h>
void show_mem(unsigned int filter)
{
@@ -38,7 +39,12 @@ void show_mem(unsigned int filter)
printk("%lu pages RAM\n", total);
printk("%lu pages HighMem/MovableOnly\n", highmem);
+#ifdef CONFIG_CMA
+ printk("%lu pages reserved\n", (reserved - totalcma_pages));
+ printk("%lu pages cma reserved\n", totalcma_pages);
+#else
printk("%lu pages reserved\n", reserved);
+#endif
#ifdef CONFIG_QUICKLIST
printk("%lu pages in pagetable cache\n",
quicklist_total_size());
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 23e070bcf72d..80d78c51f65f 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -124,7 +124,7 @@ static struct bpf_test tests[] = {
{ { 0, 0xfffffffd } }
},
{
- "DIV_KX",
+ "DIV_MOD_KX",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 8),
BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
@@ -134,12 +134,18 @@ static struct bpf_test tests[] = {
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC | FLAG_NO_DATA,
{ },
- { { 0, 0x40000001 } }
+ { { 0, 0x20000000 } }
},
{
"AND_OR_LSH_K",
@@ -1756,6 +1762,49 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } }
},
+ {
+ "nmap reduced",
+ .u.insns_int = {
+ BPF_MOV64_REG(R6, R1),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26),
+ BPF_MOV32_IMM(R0, 18),
+ BPF_STX_MEM(BPF_W, R10, R0, -64),
+ BPF_LDX_MEM(BPF_W, R7, R10, -64),
+ BPF_LD_IND(BPF_W, R7, 14),
+ BPF_STX_MEM(BPF_W, R10, R0, -60),
+ BPF_MOV32_IMM(R0, 280971478),
+ BPF_STX_MEM(BPF_W, R10, R0, -56),
+ BPF_LDX_MEM(BPF_W, R7, R10, -56),
+ BPF_LDX_MEM(BPF_W, R0, R10, -60),
+ BPF_ALU32_REG(BPF_SUB, R0, R7),
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 15),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13),
+ BPF_MOV32_IMM(R0, 22),
+ BPF_STX_MEM(BPF_W, R10, R0, -56),
+ BPF_LDX_MEM(BPF_W, R7, R10, -56),
+ BPF_LD_IND(BPF_H, R7, 14),
+ BPF_STX_MEM(BPF_W, R10, R0, -52),
+ BPF_MOV32_IMM(R0, 17366),
+ BPF_STX_MEM(BPF_W, R10, R0, -48),
+ BPF_LDX_MEM(BPF_W, R7, R10, -48),
+ BPF_LDX_MEM(BPF_W, R0, R10, -52),
+ BPF_ALU32_REG(BPF_SUB, R0, R7),
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 2),
+ BPF_MOV32_IMM(R0, 256),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
+ { { 38, 256 } }
+ },
};
static struct net_device dev;
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 4b2443254de2..56badfc4810a 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -1,8 +1,18 @@
+config PAGE_EXTENSION
+ bool "Extend memmap on extra space for more information on page"
+ ---help---
+ Extend memmap on extra space for more information on page. This
+ could be used for debugging features that need to insert extra
+ field for every page. This extension enables us to save memory
+ by not allocating this extra memory according to boottime
+ configuration.
+
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL
depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
depends on !KMEMCHECK
+ select PAGE_EXTENSION
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help---
diff --git a/mm/Makefile b/mm/Makefile
index b3c6ce932c64..4bf586e66378 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
+obj-$(CONFIG_PAGE_OWNER) += page_owner.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
obj-$(CONFIG_ZPOOL) += zpool.o
@@ -71,3 +72,4 @@ obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
obj-$(CONFIG_CMA) += cma.o
obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
+obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
diff --git a/mm/cma.c b/mm/cma.c
index 8e9ec13d31db..a85ae28709a3 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -33,6 +33,7 @@
#include <linux/log2.h>
#include <linux/cma.h>
#include <linux/highmem.h>
+#include <linux/io.h>
struct cma {
unsigned long base_pfn;
@@ -63,6 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
return (1UL << (align_order - cma->order_per_bit)) - 1;
}
+static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
+{
+ unsigned int alignment;
+
+ if (align_order <= cma->order_per_bit)
+ return 0;
+ alignment = 1UL << (align_order - cma->order_per_bit);
+ return ALIGN(cma->base_pfn, alignment) -
+ (cma->base_pfn >> cma->order_per_bit);
+}
+
static unsigned long cma_bitmap_maxno(struct cma *cma)
{
return cma->count >> cma->order_per_bit;
@@ -313,6 +325,11 @@ int __init cma_declare_contiguous(phys_addr_t base,
}
}
+ /*
+ * kmemleak scans/reads tracked objects for pointers to other
+ * objects but this address isn't mapped and accessible
+ */
+ kmemleak_ignore(phys_to_virt(addr));
base = addr;
}
@@ -320,6 +337,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
if (ret)
goto err;
+ totalcma_pages += (size / PAGE_SIZE);
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
&base);
return 0;
@@ -340,7 +358,7 @@ err:
*/
struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
{
- unsigned long mask, pfn, start = 0;
+ unsigned long mask, offset, pfn, start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
int ret;
@@ -355,13 +373,15 @@ struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
return NULL;
mask = cma_bitmap_aligned_mask(cma, align);
+ offset = cma_bitmap_aligned_offset(cma, align);
bitmap_maxno = cma_bitmap_maxno(cma);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
for (;;) {
mutex_lock(&cma->lock);
- bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
- bitmap_maxno, start, bitmap_count, mask);
+ bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
+ bitmap_maxno, start, bitmap_count, mask,
+ offset);
if (bitmap_no >= bitmap_maxno) {
mutex_unlock(&cma->lock);
break;
diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c
index 789ff70c8a4a..5bf5906ce13b 100644
--- a/mm/debug-pagealloc.c
+++ b/mm/debug-pagealloc.c
@@ -2,23 +2,55 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <linux/page-debug-flags.h>
+#include <linux/page_ext.h>
#include <linux/poison.h>
#include <linux/ratelimit.h>
+static bool page_poisoning_enabled __read_mostly;
+
+static bool need_page_poisoning(void)
+{
+ if (!debug_pagealloc_enabled())
+ return false;
+
+ return true;
+}
+
+static void init_page_poisoning(void)
+{
+ if (!debug_pagealloc_enabled())
+ return;
+
+ page_poisoning_enabled = true;
+}
+
+struct page_ext_operations page_poisoning_ops = {
+ .need = need_page_poisoning,
+ .init = init_page_poisoning,
+};
+
static inline void set_page_poison(struct page *page)
{
- __set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+ struct page_ext *page_ext;
+
+ page_ext = lookup_page_ext(page);
+ __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
}
static inline void clear_page_poison(struct page *page)
{
- __clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+ struct page_ext *page_ext;
+
+ page_ext = lookup_page_ext(page);
+ __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
}
static inline bool page_poison(struct page *page)
{
- return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+ struct page_ext *page_ext;
+
+ page_ext = lookup_page_ext(page);
+ return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
}
static void poison_page(struct page *page)
@@ -93,8 +125,11 @@ static void unpoison_pages(struct page *page, int n)
unpoison_page(page + i);
}
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
+ if (!page_poisoning_enabled)
+ return;
+
if (enable)
unpoison_pages(page, numpages);
else
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3bcfd81db45e..2ad7adf4f0a4 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -117,7 +117,11 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
__filemap_fdatawrite_range(mapping, offset, endbyte,
WB_SYNC_NONE);
- /* First and last FULL page! */
+ /*
+ * First and last FULL page! Partial pages are deliberately
+ * preserved on the expectation that it is better to preserve
+ * needed memory than to discard unneeded memory.
+ */
start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
end_index = (endbyte >> PAGE_CACHE_SHIFT);
diff --git a/mm/filemap.c b/mm/filemap.c
index 14b4642279f1..e8905bc3cbd7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -62,16 +62,16 @@
/*
* Lock ordering:
*
- * ->i_mmap_mutex (truncate_pagecache)
+ * ->i_mmap_rwsem (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
*
* ->i_mutex
- * ->i_mmap_mutex (truncate->unmap_mapping_range)
+ * ->i_mmap_rwsem (truncate->unmap_mapping_range)
*
* ->mmap_sem
- * ->i_mmap_mutex
+ * ->i_mmap_rwsem
* ->page_table_lock or pte_lock (various, mainly in memory.c)
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
*
@@ -85,7 +85,7 @@
* sb_lock (fs/fs-writeback.c)
* ->mapping->tree_lock (__sync_single_inode)
*
- * ->i_mmap_mutex
+ * ->i_mmap_rwsem
* ->anon_vma.lock (vma_adjust)
*
* ->anon_vma.lock
@@ -105,7 +105,7 @@
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
*
- * ->i_mmap_mutex
+ * ->i_mmap_rwsem
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index d8d9fe3f685c..0d105aeff82f 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -155,22 +155,14 @@ xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
EXPORT_SYMBOL_GPL(xip_file_read);
/*
- * __xip_unmap is invoked from xip_unmap and
- * xip_write
+ * __xip_unmap is invoked from xip_unmap and xip_write
*
* This function walks all vmas of the address_space and unmaps the
* __xip_sparse_page when found at pgoff.
*/
-static void
-__xip_unmap (struct address_space * mapping,
- unsigned long pgoff)
+static void __xip_unmap(struct address_space * mapping, unsigned long pgoff)
{
struct vm_area_struct *vma;
- struct mm_struct *mm;
- unsigned long address;
- pte_t *pte;
- pte_t pteval;
- spinlock_t *ptl;
struct page *page;
unsigned count;
int locked = 0;
@@ -182,11 +174,14 @@ __xip_unmap (struct address_space * mapping,
return;
retry:
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
- mm = vma->vm_mm;
- address = vma->vm_start +
+ pte_t *pte, pteval;
+ spinlock_t *ptl;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address = vma->vm_start +
((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
pte = page_check_address(page, mm, address, &ptl, 1);
if (pte) {
@@ -202,7 +197,7 @@ retry:
page_cache_release(page);
}
}
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_read(mapping);
if (locked) {
mutex_unlock(&xip_sparse_mutex);
diff --git a/mm/fremap.c b/mm/fremap.c
index 72b8fa361433..2805d71cf476 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
if (pte_present(pte)) {
flush_cache_page(vma, addr, pte_pfn(pte));
- pte = ptep_clear_flush(vma, addr, ptep);
+ pte = ptep_clear_flush_notify(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (page) {
if (pte_dirty(pte))
@@ -238,13 +238,13 @@ get_write_lock:
}
goto out_freed;
}
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
flush_dcache_mmap_lock(mapping);
vma->vm_flags |= VM_NONLINEAR;
vma_interval_tree_remove(vma, &mapping->i_mmap);
vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
flush_dcache_mmap_unlock(mapping);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
}
if (vma->vm_flags & VM_LOCKED) {
diff --git a/mm/gup.c b/mm/gup.c
index cd62c8c90d4a..0ca1df9075ab 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -3,7 +3,6 @@
#include <linux/err.h>
#include <linux/spinlock.h>
-#include <linux/hugetlb.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
@@ -12,6 +11,7 @@
#include <linux/sched.h>
#include <linux/rwsem.h>
+#include <linux/hugetlb.h>
#include <asm/pgtable.h>
#include "internal.h"
@@ -875,6 +875,49 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return 1;
}
+static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
+ unsigned long end, int write,
+ struct page **pages, int *nr)
+{
+ int refs;
+ struct page *head, *page, *tail;
+
+ if (write && !pgd_write(orig))
+ return 0;
+
+ refs = 0;
+ head = pgd_page(orig);
+ page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+ VM_BUG_ON_PAGE(compound_head(page) != head, page);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+ }
+
+ if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ return 0;
+ }
+
+ while (refs--) {
+ if (PageTail(tail))
+ get_huge_page_tail(tail);
+ tail++;
+ }
+
+ return 1;
+}
+
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
@@ -902,6 +945,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
pages, nr))
return 0;
+ } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
+ /*
+ * architecture have different format for hugetlbfs
+ * pmd format and THP pmd format
+ */
+ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
+ PMD_SHIFT, next, write, pages, nr))
+ return 0;
} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
@@ -909,22 +960,26 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
return 1;
}
-static int gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
+static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
- pudp = pud_offset(pgdp, addr);
+ pudp = pud_offset(&pgd, addr);
do {
pud_t pud = ACCESS_ONCE(*pudp);
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
- if (pud_huge(pud)) {
+ if (unlikely(pud_huge(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, write,
- pages, nr))
+ pages, nr))
+ return 0;
+ } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
+ if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
+ PUD_SHIFT, next, write, pages, nr))
return 0;
} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0;
@@ -970,10 +1025,20 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
+ pgd_t pgd = ACCESS_ONCE(*pgdp);
+
next = pgd_addr_end(addr, end);
- if (pgd_none(*pgdp))
+ if (pgd_none(pgd))
break;
- else if (!gup_pud_range(pgdp, addr, next, write, pages, &nr))
+ if (unlikely(pgd_huge(pgd))) {
+ if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
+ pages, &nr))
+ break;
+ } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
+ if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
+ PGDIR_SHIFT, next, write, pages, &nr))
+ break;
+ } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
break;
} while (pgdp++, addr = next, addr != end);
local_irq_restore(flags);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5b2c6875fc38..817a875f2b8c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -804,7 +804,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
return VM_FAULT_OOM;
- if (!(flags & FAULT_FLAG_WRITE) &&
+ if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
transparent_hugepage_use_zero_page()) {
spinlock_t *ptl;
pgtable_t pgtable;
@@ -1035,7 +1035,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
goto out_free_pages;
VM_BUG_ON_PAGE(!PageHead(page), page);
- pmdp_clear_flush(vma, haddr, pmd);
+ pmdp_clear_flush_notify(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -1178,7 +1178,7 @@ alloc:
pmd_t entry;
entry = mk_huge_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- pmdp_clear_flush(vma, haddr, pmd);
+ pmdp_clear_flush_notify(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
@@ -1399,7 +1399,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* pgtable_trans_huge_withdraw after finishing pmdp related
* operations.
*/
- orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
+ orig_pmd = pmdp_get_and_clear_full(tlb->mm, addr, pmd,
+ tlb->fullmm);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
if (is_huge_zero_pmd(orig_pmd)) {
@@ -1511,7 +1512,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
pmd_t entry;
ret = 1;
if (!prot_numa) {
- entry = pmdp_get_and_clear(mm, addr, pmd);
+ entry = pmdp_get_and_clear_notify(mm, addr, pmd);
if (pmd_numa(entry))
entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot);
@@ -1643,6 +1644,7 @@ static int __split_huge_page_splitting(struct page *page,
* serialize against split_huge_page*.
*/
pmdp_splitting_flush(vma, address, pmd);
+
ret = 1;
spin_unlock(ptl);
}
@@ -2833,7 +2835,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
pmd_t _pmd;
int i;
- pmdp_clear_flush(vma, haddr, pmd);
+ pmdp_clear_flush_notify(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 30cd96879152..85032de5e20f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -582,7 +582,7 @@ retry_cpuset:
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
- if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
+ if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) {
if (avoid_reserve)
@@ -1457,7 +1457,7 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
return 0;
found:
- BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
+ BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
/* Put them into a private list first because mem_map is not up yet */
list_add(&m->list, &huge_boot_pages);
m->hstate = h;
@@ -2083,7 +2083,7 @@ static void hugetlb_register_node(struct node *node)
* devices of nodes that have memory. All on-line nodes should have
* registered their associated device by this time.
*/
-static void hugetlb_register_all_nodes(void)
+static void __init hugetlb_register_all_nodes(void)
{
int nid;
@@ -2598,8 +2598,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
}
set_huge_pte_at(dst, addr, dst_pte, entry);
} else {
- if (cow)
+ if (cow) {
huge_ptep_set_wrprotect(src, addr, src_pte);
+ mmu_notifier_invalidate_range(src, mmun_start,
+ mmun_end);
+ }
entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
@@ -2726,9 +2729,9 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
* on its way out. We're lucky that the flag has such an appropriate
* name, and can in fact be safely cleared here. We could clear it
* before the __unmap_hugepage_range above, but all that's necessary
- * is to clear it before releasing the i_mmap_mutex. This works
+ * is to clear it before releasing the i_mmap_rwsem. This works
* because in the context this is called, the VMA is about to be
- * destroyed and the i_mmap_mutex is held.
+ * destroyed and the i_mmap_rwsem is held.
*/
vma->vm_flags &= ~VM_MAYSHARE;
}
@@ -2774,7 +2777,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* this mapping should be shared between all the VMAs,
* __unmap_hugepage_range() is called as the lock is already held
*/
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
@@ -2791,7 +2794,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
unmap_hugepage_range(iter_vma, address,
address + huge_page_size(h), page);
}
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
}
/*
@@ -2901,6 +2904,7 @@ retry_avoidcopy:
/* Break COW */
huge_ptep_clear_flush(vma, address, ptep);
+ mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
@@ -3348,7 +3352,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
flush_cache_range(vma, address, end);
mmu_notifier_invalidate_range_start(mm, start, end);
- mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ i_mmap_lock_write(vma->vm_file->f_mapping);
for (; address < end; address += huge_page_size(h)) {
spinlock_t *ptl;
ptep = huge_pte_offset(mm, address);
@@ -3370,13 +3374,14 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
spin_unlock(ptl);
}
/*
- * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
+ * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
* may have cleared our pud entry and done put_page on the page table:
- * once we release i_mmap_mutex, another task can do the final put_page
+ * once we release i_mmap_rwsem, another task can do the final put_page
* and that page table be reused and filled with junk.
*/
flush_tlb_range(vma, start, end);
- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ mmu_notifier_invalidate_range(mm, start, end);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end);
return pages << h->order;
@@ -3525,7 +3530,7 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
* code much cleaner. pmd allocation is essential for the shared case because
- * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * pud has to be populated inside the same i_mmap_rwsem section - otherwise
* racing tasks could either miss the sharing (see huge_pte_offset) or select a
* bad pmd for sharing.
*/
@@ -3544,7 +3549,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
if (!vma_shareable(vma, addr))
return (pte_t *)pmd_alloc(mm, pud, addr);
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;
@@ -3572,7 +3577,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
spin_unlock(ptl);
out:
pte = (pte_t *)pmd_alloc(mm, pud, addr);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
return pte;
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 6b2e337bc03c..d247efab5073 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -892,7 +892,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* this assure us that no O_DIRECT can happen after the check
* or in the middle of the check.
*/
- entry = ptep_clear_flush(vma, addr, ptep);
+ entry = ptep_clear_flush_notify(vma, addr, ptep);
/*
* Check that no O_DIRECT or similar I/O is in progress on the
* page
@@ -960,7 +960,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
page_add_anon_rmap(kpage, vma, addr);
flush_cache_page(vma, addr, pte_pfn(*ptep));
- ptep_clear_flush(vma, addr, ptep);
+ ptep_clear_flush_notify(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
page_remove_rmap(page);
diff --git a/mm/madvise.c b/mm/madvise.c
index 0938b30da4ab..a271adc93289 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -326,7 +326,7 @@ static long madvise_remove(struct vm_area_struct *vma,
*/
get_file(f);
up_read(&current->mm->mmap_sem);
- error = do_fallocate(f,
+ error = vfs_fallocate(f,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, end - start);
fput(f);
diff --git a/mm/memblock.c b/mm/memblock.c
index 6ecb0d937fb5..252b77bdf65e 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -715,16 +715,13 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
}
/**
- * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
- * @base: the base phys addr of the region
- * @size: the size of the region
*
- * This function isolates region [@base, @base + @size), and mark it with flag
- * MEMBLOCK_HOTPLUG.
+ * This function isolates region [@base, @base + @size), and sets/clears flag
*
* Return 0 on succees, -errno on failure.
*/
-int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
+static int __init_memblock memblock_setclr_flag(phys_addr_t base,
+ phys_addr_t size, int set, int flag)
{
struct memblock_type *type = &memblock.memory;
int i, ret, start_rgn, end_rgn;
@@ -734,37 +731,37 @@ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
return ret;
for (i = start_rgn; i < end_rgn; i++)
- memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG);
+ if (set)
+ memblock_set_region_flags(&type->regions[i], flag);
+ else
+ memblock_clear_region_flags(&type->regions[i], flag);
memblock_merge_regions(type);
return 0;
}
/**
- * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
+ * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
* @base: the base phys addr of the region
* @size: the size of the region
*
- * This function isolates region [@base, @base + @size), and clear flag
- * MEMBLOCK_HOTPLUG for the isolated regions.
+ * Return 0 on succees, -errno on failure.
+ */
+int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
+{
+ return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
+}
+
+/**
+ * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
+ * @base: the base phys addr of the region
+ * @size: the size of the region
*
* Return 0 on succees, -errno on failure.
*/
int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
{
- struct memblock_type *type = &memblock.memory;
- int i, ret, start_rgn, end_rgn;
-
- ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
- if (ret)
- return ret;
-
- for (i = start_rgn; i < end_rgn; i++)
- memblock_clear_region_flags(&type->regions[i],
- MEMBLOCK_HOTPLUG);
-
- memblock_merge_regions(type);
- return 0;
+ return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
}
/**
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 85df503ec023..ef91e856c7e4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -296,7 +296,6 @@ struct mem_cgroup {
* Should the accounting and control be hierarchical, per subtree?
*/
bool use_hierarchy;
- unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
bool oom_lock;
atomic_t under_oom;
@@ -366,22 +365,11 @@ struct mem_cgroup {
/* WARNING: nodeinfo must be the last member here */
};
-/* internal only representation about the status of kmem accounting. */
-enum {
- KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
-};
-
#ifdef CONFIG_MEMCG_KMEM
-static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
-{
- set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
-}
-
static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
{
- return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
+ return memcg->kmemcg_id >= 0;
}
-
#endif
/* Stuffs for move charges at task migration. */
@@ -1571,7 +1559,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
* select it. The goal is to allow it to allocate so that it may
* quickly exit and free its memory.
*/
- if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
+ if (fatal_signal_pending(current) || task_will_free_mem(current)) {
set_thread_flag(TIF_MEMDIE);
return;
}
@@ -1628,6 +1616,8 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
NULL, "Memory cgroup out of memory");
}
+#if MAX_NUMNODES > 1
+
/**
* test_mem_cgroup_node_reclaimable
* @memcg: the target memcg
@@ -1650,7 +1640,6 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
return false;
}
-#if MAX_NUMNODES > 1
/*
* Always updating the nodemask is not very good - even if we have an empty
@@ -2646,7 +2635,6 @@ static void memcg_register_cache(struct mem_cgroup *memcg,
if (!cachep)
return;
- css_get(&memcg->css);
list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
/*
@@ -2680,40 +2668,6 @@ static void memcg_unregister_cache(struct kmem_cache *cachep)
list_del(&cachep->memcg_params->list);
kmem_cache_destroy(cachep);
-
- /* drop the reference taken in memcg_register_cache */
- css_put(&memcg->css);
-}
-
-/*
- * During the creation a new cache, we need to disable our accounting mechanism
- * altogether. This is true even if we are not creating, but rather just
- * enqueing new caches to be created.
- *
- * This is because that process will trigger allocations; some visible, like
- * explicit kmallocs to auxiliary data structures, name strings and internal
- * cache structures; some well concealed, like INIT_WORK() that can allocate
- * objects during debug.
- *
- * If any allocation happens during memcg_kmem_get_cache, we will recurse back
- * to it. This may not be a bounded recursion: since the first cache creation
- * failed to complete (waiting on the allocation), we'll just try to create the
- * cache again, failing at the same point.
- *
- * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
- * memcg_kmem_skip_account. So we enclose anything that might allocate memory
- * inside the following two functions.
- */
-static inline void memcg_stop_kmem_account(void)
-{
- VM_BUG_ON(!current->mm);
- current->memcg_kmem_skip_account++;
-}
-
-static inline void memcg_resume_kmem_account(void)
-{
- VM_BUG_ON(!current->mm);
- current->memcg_kmem_skip_account--;
}
int __memcg_cleanup_cache_params(struct kmem_cache *s)
@@ -2747,9 +2701,7 @@ static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
mutex_lock(&memcg_slab_mutex);
list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
cachep = memcg_params_to_cache(params);
- kmem_cache_shrink(cachep);
- if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
- memcg_unregister_cache(cachep);
+ memcg_unregister_cache(cachep);
}
mutex_unlock(&memcg_slab_mutex);
}
@@ -2784,10 +2736,10 @@ static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
struct memcg_register_cache_work *cw;
cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
- if (cw == NULL) {
- css_put(&memcg->css);
+ if (!cw)
return;
- }
+
+ css_get(&memcg->css);
cw->memcg = memcg;
cw->cachep = cachep;
@@ -2810,20 +2762,16 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
* this point we can't allow ourselves back into memcg_kmem_get_cache,
* the safest choice is to do it like this, wrapping the whole function.
*/
- memcg_stop_kmem_account();
+ current->memcg_kmem_skip_account = 1;
__memcg_schedule_register_cache(memcg, cachep);
- memcg_resume_kmem_account();
+ current->memcg_kmem_skip_account = 0;
}
int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
{
unsigned int nr_pages = 1 << order;
- int res;
- res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages);
- if (!res)
- atomic_add(nr_pages, &cachep->memcg_params->nr_pages);
- return res;
+ return memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages);
}
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
@@ -2831,7 +2779,6 @@ void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
unsigned int nr_pages = 1 << order;
memcg_uncharge_kmem(cachep->memcg_params->memcg, nr_pages);
- atomic_sub(nr_pages, &cachep->memcg_params->nr_pages);
}
/*
@@ -2847,8 +2794,7 @@ void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
* Can't be called in interrupt context or from kernel threads.
* This function needs to be called with rcu_read_lock() held.
*/
-struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
- gfp_t gfp)
+struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
{
struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep;
@@ -2856,25 +2802,16 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
VM_BUG_ON(!cachep->memcg_params);
VM_BUG_ON(!cachep->memcg_params->is_root_cache);
- if (!current->mm || current->memcg_kmem_skip_account)
+ if (current->memcg_kmem_skip_account)
return cachep;
- rcu_read_lock();
- memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
-
+ memcg = get_mem_cgroup_from_mm(current->mm);
if (!memcg_kmem_is_active(memcg))
goto out;
memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
- if (likely(memcg_cachep)) {
- cachep = memcg_cachep;
- goto out;
- }
-
- /* The corresponding put will be done in the workqueue. */
- if (!css_tryget_online(&memcg->css))
- goto out;
- rcu_read_unlock();
+ if (likely(memcg_cachep))
+ return memcg_cachep;
/*
* If we are in a safe context (can wait, and not in interrupt
@@ -2889,12 +2826,17 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
* defer everything.
*/
memcg_schedule_register_cache(memcg, cachep);
- return cachep;
out:
- rcu_read_unlock();
+ css_put(&memcg->css);
return cachep;
}
+void __memcg_kmem_put_cache(struct kmem_cache *cachep)
+{
+ if (!is_root_cache(cachep))
+ css_put(&cachep->memcg_params->memcg->css);
+}
+
/*
* We need to verify if the allocation against current->mm->owner's memcg is
* possible for the given order. But the page is not allocated yet, so we'll
@@ -2917,34 +2859,6 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
*_memcg = NULL;
- /*
- * Disabling accounting is only relevant for some specific memcg
- * internal allocations. Therefore we would initially not have such
- * check here, since direct calls to the page allocator that are
- * accounted to kmemcg (alloc_kmem_pages and friends) only happen
- * outside memcg core. We are mostly concerned with cache allocations,
- * and by having this test at memcg_kmem_get_cache, we are already able
- * to relay the allocation to the root cache and bypass the memcg cache
- * altogether.
- *
- * There is one exception, though: the SLUB allocator does not create
- * large order caches, but rather service large kmallocs directly from
- * the page allocator. Therefore, the following sequence when backed by
- * the SLUB allocator:
- *
- * memcg_stop_kmem_account();
- * kmalloc(<large_number>)
- * memcg_resume_kmem_account();
- *
- * would effectively ignore the fact that we should skip accounting,
- * since it will drive us directly to this function without passing
- * through the cache selector memcg_kmem_get_cache. Such large
- * allocations are extremely rare but can happen, for instance, for the
- * cache arrays. We bring this test here.
- */
- if (!current->mm || current->memcg_kmem_skip_account)
- return true;
-
memcg = get_mem_cgroup_from_mm(current->mm);
if (!memcg_kmem_is_active(memcg)) {
@@ -2985,10 +2899,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
memcg_uncharge_kmem(memcg, 1 << order);
page->mem_cgroup = NULL;
}
-#else
-static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
-{
-}
#endif /* CONFIG_MEMCG_KMEM */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -3539,12 +3449,6 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
return 0;
/*
- * We are going to allocate memory for data shared by all memory
- * cgroups so let's stop accounting here.
- */
- memcg_stop_kmem_account();
-
- /*
* For simplicity, we won't allow this to be disabled. It also can't
* be changed if the cgroup has children already, or if tasks had
* already joined.
@@ -3570,25 +3474,22 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
goto out;
}
- memcg->kmemcg_id = memcg_id;
- INIT_LIST_HEAD(&memcg->memcg_slab_caches);
-
/*
- * We couldn't have accounted to this cgroup, because it hasn't got the
- * active bit set yet, so this should succeed.
+ * We couldn't have accounted to this cgroup, because it hasn't got
+ * activated yet, so this should succeed.
*/
err = page_counter_limit(&memcg->kmem, nr_pages);
VM_BUG_ON(err);
static_key_slow_inc(&memcg_kmem_enabled_key);
/*
- * Setting the active bit after enabling static branching will
+ * A memory cgroup is considered kmem-active as soon as it gets
+ * kmemcg_id. Setting the id after enabling static branching will
* guarantee no one starts accounting before all call sites are
* patched.
*/
- memcg_kmem_set_active(memcg);
+ memcg->kmemcg_id = memcg_id;
out:
- memcg_resume_kmem_account();
return err;
}
@@ -3791,11 +3692,6 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
}
#endif /* CONFIG_NUMA */
-static inline void mem_cgroup_lru_names_not_uptodate(void)
-{
- BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
-}
-
static int memcg_stat_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
@@ -3803,6 +3699,8 @@ static int memcg_stat_show(struct seq_file *m, void *v)
struct mem_cgroup *mi;
unsigned int i;
+ BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
+
for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
continue;
@@ -4259,7 +4157,6 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
int ret;
- memcg->kmemcg_id = -1;
ret = memcg_propagate_kmem(memcg);
if (ret)
return ret;
@@ -4269,6 +4166,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
+ memcg_unregister_all_caches(memcg);
mem_cgroup_sockets_destroy(memcg);
}
#else
@@ -4724,17 +4622,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
free_percpu(memcg->stat);
- /*
- * We need to make sure that (at least for now), the jump label
- * destruction code runs outside of the cgroup lock. This is because
- * get_online_cpus(), which is called from the static_branch update,
- * can't be called inside the cgroup_lock. cpusets are the ones
- * enforcing this dependency, so if they ever change, we might as well.
- *
- * schedule_work() will guarantee this happens. Be careful if you need
- * to move this code around, and make sure it is outside
- * the cgroup_lock.
- */
disarm_static_keys(memcg);
kfree(memcg);
}
@@ -4804,6 +4691,10 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
vmpressure_init(&memcg->vmpressure);
INIT_LIST_HEAD(&memcg->event_list);
spin_lock_init(&memcg->event_list_lock);
+#ifdef CONFIG_MEMCG_KMEM
+ memcg->kmemcg_id = -1;
+ INIT_LIST_HEAD(&memcg->memcg_slab_caches);
+#endif
return &memcg->css;
@@ -4885,7 +4776,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
}
spin_unlock(&memcg->event_list_lock);
- memcg_unregister_all_caches(memcg);
vmpressure_cleanup(&memcg->vmpressure);
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e5ee0ca7ae85..feb803bf3443 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -239,19 +239,14 @@ void shake_page(struct page *p, int access)
}
/*
- * Only call shrink_slab here (which would also shrink other caches) if
- * access is not potentially fatal.
+ * Only call shrink_node_slabs here (which would also shrink
+ * other caches) if access is not potentially fatal.
*/
if (access) {
int nr;
int nid = page_to_nid(p);
do {
- struct shrink_control shrink = {
- .gfp_mask = GFP_KERNEL,
- };
- node_set(nid, shrink.nodes_to_scan);
-
- nr = shrink_slab(&shrink, 1000, 1000);
+ nr = shrink_node_slabs(GFP_KERNEL, nid, 1000, 1000);
if (page_count(p) == 1)
break;
} while (nr > 10);
@@ -466,7 +461,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
struct task_struct *tsk;
struct address_space *mapping = page->mapping;
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_read(mapping);
read_lock(&tasklist_lock);
for_each_process(tsk) {
pgoff_t pgoff = page_to_pgoff(page);
@@ -488,7 +483,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
}
}
read_unlock(&tasklist_lock);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_read(mapping);
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index 0b3f6c71620d..d8aebc52265f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -235,10 +235,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
- if (!tlb->end)
- return;
-
tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
@@ -258,6 +256,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
void tlb_flush_mmu(struct mmu_gather *tlb)
{
+ if (!tlb->end)
+ return;
+
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
@@ -1326,9 +1327,9 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* safe to do nothing in this case.
*/
if (vma->vm_file) {
- mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ i_mmap_lock_write(vma->vm_file->f_mapping);
__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
}
} else
unmap_page_range(tlb, vma, start, end, details);
@@ -2220,7 +2221,7 @@ gotten:
* seen in the presence of one thread doing SMC and another
* thread doing COW.
*/
- ptep_clear_flush(vma, address, page_table);
+ ptep_clear_flush_notify(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
@@ -2377,12 +2378,12 @@ void unmap_mapping_range(struct address_space *mapping,
details.last_index = ULONG_MAX;
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_read(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_read(mapping);
}
EXPORT_SYMBOL(unmap_mapping_range);
@@ -2627,7 +2628,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_SIGBUS;
/* Use the zero-page for reads */
- if (!(flags & FAULT_FLAG_WRITE)) {
+ if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
vma->vm_page_prot));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2995,6 +2996,12 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (set_page_dirty(fault_page))
dirtied = 1;
+ /*
+ * Take a local copy of the address_space - page.mapping may be zeroed
+ * by truncate after unlock_page(). The address_space itself remains
+ * pinned by vma->vm_file's reference. We rely on unlock_page()'s
+ * release semantics to prevent the compiler from undoing this copying.
+ */
mapping = fault_page->mapping;
unlock_page(fault_page);
if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
@@ -3365,6 +3372,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
+EXPORT_SYMBOL_GPL(handle_mm_fault);
#ifndef __PAGETABLE_PUD_FOLDED
/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e58725aff7e9..f22c55947181 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -162,12 +162,6 @@ static const struct mempolicy_operations {
enum mpol_rebind_step step);
} mpol_ops[MPOL_MAX];
-/* Check that the nodemask contains at least one populated zone */
-static int is_valid_nodemask(const nodemask_t *nodemask)
-{
- return nodes_intersects(*nodemask, node_states[N_MEMORY]);
-}
-
static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
{
return pol->flags & MPOL_MODE_FLAGS;
@@ -202,7 +196,7 @@ static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
{
- if (!is_valid_nodemask(nodes))
+ if (nodes_empty(*nodes))
return -EINVAL;
pol->v.nodes = *nodes;
return 0;
@@ -234,7 +228,7 @@ static int mpol_set_nodemask(struct mempolicy *pol,
nodes = NULL; /* explicit local allocation */
else {
if (pol->flags & MPOL_F_RELATIVE_NODES)
- mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
+ mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
else
nodes_and(nsc->mask2, *nodes, nsc->mask1);
diff --git a/mm/migrate.c b/mm/migrate.c
index 01439953abf5..b1d02127e1be 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -746,7 +746,7 @@ static int fallback_migrate_page(struct address_space *mapping,
* MIGRATEPAGE_SUCCESS - success
*/
static int move_to_new_page(struct page *newpage, struct page *page,
- int remap_swapcache, enum migrate_mode mode)
+ int page_was_mapped, enum migrate_mode mode)
{
struct address_space *mapping;
int rc;
@@ -784,7 +784,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
newpage->mapping = NULL;
} else {
mem_cgroup_migrate(page, newpage, false);
- if (remap_swapcache)
+ if (page_was_mapped)
remove_migration_ptes(page, newpage);
page->mapping = NULL;
}
@@ -798,7 +798,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
int force, enum migrate_mode mode)
{
int rc = -EAGAIN;
- int remap_swapcache = 1;
+ int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
if (!trylock_page(page)) {
@@ -870,7 +870,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* migrated but are not remapped when migration
* completes
*/
- remap_swapcache = 0;
} else {
goto out_unlock;
}
@@ -910,13 +909,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
/* Establish migration ptes or remove ptes */
- try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ if (page_mapped(page)) {
+ try_to_unmap(page,
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ page_was_mapped = 1;
+ }
skip_unmap:
if (!page_mapped(page))
- rc = move_to_new_page(newpage, page, remap_swapcache, mode);
+ rc = move_to_new_page(newpage, page, page_was_mapped, mode);
- if (rc && remap_swapcache)
+ if (rc && page_was_mapped)
remove_migration_ptes(page, page);
/* Drop an anon_vma reference if we took one */
@@ -1017,6 +1020,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
{
int rc = 0;
int *result = NULL;
+ int page_was_mapped = 0;
struct page *new_hpage;
struct anon_vma *anon_vma = NULL;
@@ -1047,12 +1051,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
- try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ if (page_mapped(hpage)) {
+ try_to_unmap(hpage,
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ page_was_mapped = 1;
+ }
if (!page_mapped(hpage))
- rc = move_to_new_page(new_hpage, hpage, 1, mode);
+ rc = move_to_new_page(new_hpage, hpage, page_was_mapped, mode);
- if (rc != MIGRATEPAGE_SUCCESS)
+ if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
remove_migration_ptes(hpage, hpage);
if (anon_vma)
@@ -1854,7 +1862,7 @@ fail_putback:
*/
flush_cache_range(vma, mmun_start, mmun_end);
page_add_anon_rmap(new_page, vma, mmun_start);
- pmdp_clear_flush(vma, mmun_start, pmd);
+ pmdp_clear_flush_notify(vma, mmun_start, pmd);
set_pmd_at(mm, mmun_start, pmd, entry);
flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
@@ -1862,6 +1870,7 @@ fail_putback:
if (page_count(page) != 2) {
set_pmd_at(mm, mmun_start, pmd, orig_entry);
flush_tlb_range(vma, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(new_page);
goto fail_putback;
diff --git a/mm/mincore.c b/mm/mincore.c
index 725c80961048..c8c528b36641 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -137,8 +137,11 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
} else { /* pte is a swap entry */
swp_entry_t entry = pte_to_swp_entry(pte);
- if (is_migration_entry(entry)) {
- /* migration entries are always uptodate */
+ if (non_swap_entry(entry)) {
+ /*
+ * migration or hwpoison entries are always
+ * uptodate
+ */
*vec = 1;
} else {
#ifdef CONFIG_SWAP
diff --git a/mm/mmap.c b/mm/mmap.c
index b6c0a77fc1c8..7b36aa7cc89a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -232,7 +232,7 @@ error:
}
/*
- * Requires inode->i_mapping->i_mmap_mutex
+ * Requires inode->i_mapping->i_mmap_rwsem
*/
static void __remove_shared_vm_struct(struct vm_area_struct *vma,
struct file *file, struct address_space *mapping)
@@ -260,9 +260,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
if (file) {
struct address_space *mapping = file->f_mapping;
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
__remove_shared_vm_struct(vma, file, mapping);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
}
}
@@ -674,14 +674,14 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
}
__vma_link(mm, vma, prev, rb_link, rb_parent);
__vma_link_file(vma);
if (mapping)
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
mm->map_count++;
validate_mm(mm);
@@ -796,7 +796,7 @@ again: remove_next = 1 + (end > next->vm_end);
next->vm_end);
}
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
if (insert) {
/*
* Put into interval tree now, so instantiated pages
@@ -883,7 +883,7 @@ again: remove_next = 1 + (end > next->vm_end);
anon_vma_unlock_write(anon_vma);
}
if (mapping)
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
if (root) {
uprobe_mmap(vma);
@@ -2362,6 +2362,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
}
#endif
+EXPORT_SYMBOL_GPL(find_extend_vma);
+
/*
* Ok - we have the memory areas we should free on the vma list,
* so release them, and do the vma updates.
@@ -2791,7 +2793,7 @@ void exit_mmap(struct mm_struct *mm)
/* Insert vm structure into process list sorted by address
* and into the inode's i_mmap tree. If vm_file is non-NULL
- * then i_mmap_mutex is taken here.
+ * then i_mmap_rwsem is taken here.
*/
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
@@ -3086,7 +3088,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
*/
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
BUG();
- mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
+ down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
}
}
@@ -3113,7 +3115,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
* vma in this mm is backed by the same anon_vma or address_space.
*
* We can take all the locks in random order because the VM code
- * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
+ * taking i_mmap_rwsem or anon_vma->rwsem outside the mmap_sem never
* takes more than one of them in a row. Secondly we're protected
* against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
*
@@ -3182,7 +3184,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
* AS_MM_ALL_LOCKS can't change to 0 from under us
* because we hold the mm_all_locks_mutex.
*/
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
&mapping->flags))
BUG();
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2c8da9825fe3..3b9b3d0741b2 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -193,6 +193,16 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
+ /*
+ * Call invalidate_range here too to avoid the need for the
+ * subsystem of having to register an invalidate_range_end
+ * call-back when there is invalidate_range already. Usually a
+ * subsystem registers either invalidate_range_start()/end() or
+ * invalidate_range(), so this will be no additional overhead
+ * (besides the pointer check).
+ */
+ if (mn->ops->invalidate_range)
+ mn->ops->invalidate_range(mn, mm, start, end);
if (mn->ops->invalidate_range_end)
mn->ops->invalidate_range_end(mn, mm, start, end);
}
@@ -200,6 +210,21 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
+void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct mmu_notifier *mn;
+ int id;
+
+ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range)
+ mn->ops->invalidate_range(mn, mm, start, end);
+ }
+ srcu_read_unlock(&srcu, id);
+}
+EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
+
static int do_mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm,
int take_mmap_sem)
diff --git a/mm/mremap.c b/mm/mremap.c
index b147f66f4c40..17fa018f5f39 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -99,7 +99,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
spinlock_t *old_ptl, *new_ptl;
/*
- * When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma
+ * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
* locks to ensure that rmap will always observe either the old or the
* new ptes. This is the easiest way to avoid races with
* truncate_pagecache(), page migration, etc...
@@ -119,7 +119,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
if (need_rmap_locks) {
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
}
if (vma->anon_vma) {
anon_vma = vma->anon_vma;
@@ -156,7 +156,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
if (anon_vma)
anon_vma_unlock_write(anon_vma);
if (mapping)
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
}
#define LATENCY_LIMIT (64 * PAGE_SIZE)
@@ -288,7 +288,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
old_len = new_len;
old_addr = new_addr;
new_addr = -ENOMEM;
- }
+ } else if (vma->vm_file && vma->vm_file->f_op->mremap)
+ vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
/* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) {
diff --git a/mm/nommu.c b/mm/nommu.c
index bd1808e194a7..b51eadf6d952 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -722,11 +722,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
flush_dcache_mmap_lock(mapping);
vma_interval_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
}
/* add the VMA to the tree */
@@ -795,11 +795,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
- mutex_lock(&mapping->i_mmap_mutex);
+ i_mmap_lock_write(mapping);
flush_dcache_mmap_lock(mapping);
vma_interval_tree_remove(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_write(mapping);
}
/* remove from the MM's tree and list */
@@ -1149,8 +1149,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
unsigned long len,
unsigned long capabilities)
{
- struct page *pages;
- unsigned long total, point, n;
+ unsigned long total, point;
void *base;
int ret, order;
@@ -1182,33 +1181,23 @@ static int do_mmap_private(struct vm_area_struct *vma,
order = get_order(len);
kdebug("alloc order %d for %lx", order, len);
- pages = alloc_pages(GFP_KERNEL, order);
- if (!pages)
- goto enomem;
-
total = 1 << order;
- atomic_long_add(total, &mmap_pages_allocated);
-
point = len >> PAGE_SHIFT;
- /* we allocated a power-of-2 sized page set, so we may want to trim off
- * the excess */
+ /* we don't want to allocate a power-of-2 sized page set */
if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
- while (total > point) {
- order = ilog2(total - point);
- n = 1 << order;
- kdebug("shave %lu/%lu @%lu", n, total - point, total);
- atomic_long_sub(n, &mmap_pages_allocated);
- total -= n;
- set_page_refcounted(pages + total);
- __free_pages(pages + total, order);
- }
+ total = point;
+ kdebug("try to alloc exact %lu pages", total);
+ base = alloc_pages_exact(len, GFP_KERNEL);
+ } else {
+ base = (void *)__get_free_pages(GFP_KERNEL, order);
}
- for (point = 1; point < total; point++)
- set_page_refcounted(&pages[point]);
+ if (!base)
+ goto enomem;
+
+ atomic_long_add(total, &mmap_pages_allocated);
- base = page_address(pages);
region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
region->vm_start = (unsigned long) base;
region->vm_end = region->vm_start + len;
@@ -2094,14 +2083,14 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
down_write(&nommu_region_sem);
- mutex_lock(&inode->i_mapping->i_mmap_mutex);
+ i_mmap_lock_read(inode->i_mapping);
/* search for VMAs that fall within the dead zone */
vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
/* found one - only interested if it's shared out of the page
* cache */
if (vma->vm_flags & VM_SHARED) {
- mutex_unlock(&inode->i_mapping->i_mmap_mutex);
+ i_mmap_unlock_read(inode->i_mapping);
up_write(&nommu_region_sem);
return -ETXTBSY; /* not quite true, but near enough */
}
@@ -2113,8 +2102,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
* we don't check for any regions that start beyond the EOF as there
* shouldn't be any
*/
- vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
- 0, ULONG_MAX) {
+ vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
if (!(vma->vm_flags & VM_SHARED))
continue;
@@ -2129,7 +2117,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
}
}
- mutex_unlock(&inode->i_mapping->i_mmap_mutex);
+ i_mmap_unlock_read(inode->i_mapping);
up_write(&nommu_region_sem);
return 0;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 3b014d326151..d503e9ce1c7b 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -233,7 +233,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
/* Check this allocation failure is caused by cpuset's wall function */
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask)
- if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
+ if (!cpuset_zone_allowed(zone, gfp_mask))
cpuset_limited = true;
if (cpuset_limited) {
@@ -281,14 +281,9 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
if (oom_task_origin(task))
return OOM_SCAN_SELECT;
- if (task->flags & PF_EXITING && !force_kill) {
- /*
- * If this task is not being ptraced on exit, then wait for it
- * to finish before killing some other task unnecessarily.
- */
- if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
- return OOM_SCAN_ABORT;
- }
+ if (task_will_free_mem(task) && !force_kill)
+ return OOM_SCAN_ABORT;
+
return OOM_SCAN_OK;
}
@@ -443,7 +438,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
* If the task is already exiting, don't alarm the sysadmin or kill
* its children or threads, just set TIF_MEMDIE so it can die quickly
*/
- if (p->flags & PF_EXITING) {
+ if (task_will_free_mem(p)) {
set_tsk_thread_flag(p, TIF_MEMDIE);
put_task_struct(p);
return;
@@ -649,7 +644,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
* select it. The goal is to allow it to allocate so that it may
* quickly exit and free its memory.
*/
- if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
+ if (fatal_signal_pending(current) || task_will_free_mem(current)) {
set_thread_flag(TIF_MEMDIE);
return;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a7198c065999..7633c503a116 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -48,6 +48,7 @@
#include <linux/backing-dev.h>
#include <linux/fault-inject.h>
#include <linux/page-isolation.h>
+#include <linux/page_ext.h>
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
#include <linux/compaction.h>
@@ -55,9 +56,10 @@
#include <linux/prefetch.h>
#include <linux/mm_inline.h>
#include <linux/migrate.h>
-#include <linux/page-debug-flags.h>
+#include <linux/page_ext.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
+#include <linux/page_owner.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -109,6 +111,7 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
+unsigned long totalcma_pages __read_mostly;
/*
* When calculating the number of globally allowed dirty pages, there
* is a certain number of per-zone reserves that should not be
@@ -424,6 +427,42 @@ static inline void prep_zero_page(struct page *page, unsigned int order,
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
+bool _debug_pagealloc_enabled __read_mostly;
+bool _debug_guardpage_enabled __read_mostly;
+
+static int __init early_debug_pagealloc(char *buf)
+{
+ if (!buf)
+ return -EINVAL;
+
+ if (strcmp(buf, "on") == 0)
+ _debug_pagealloc_enabled = true;
+
+ return 0;
+}
+early_param("debug_pagealloc", early_debug_pagealloc);
+
+static bool need_debug_guardpage(void)
+{
+ /* If we don't use debug_pagealloc, we don't need guard page */
+ if (!debug_pagealloc_enabled())
+ return false;
+
+ return true;
+}
+
+static void init_debug_guardpage(void)
+{
+ if (!debug_pagealloc_enabled())
+ return;
+
+ _debug_guardpage_enabled = true;
+}
+
+struct page_ext_operations debug_guardpage_ops = {
+ .need = need_debug_guardpage,
+ .init = init_debug_guardpage,
+};
static int __init debug_guardpage_minorder_setup(char *buf)
{
@@ -439,18 +478,44 @@ static int __init debug_guardpage_minorder_setup(char *buf)
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
-static inline void set_page_guard_flag(struct page *page)
+static inline void set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
{
- __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+ struct page_ext *page_ext;
+
+ if (!debug_guardpage_enabled())
+ return;
+
+ page_ext = lookup_page_ext(page);
+ __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+
+ INIT_LIST_HEAD(&page->lru);
+ set_page_private(page, order);
+ /* Guard pages are not available for any usage */
+ __mod_zone_freepage_state(zone, -(1 << order), migratetype);
}
-static inline void clear_page_guard_flag(struct page *page)
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
{
- __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+ struct page_ext *page_ext;
+
+ if (!debug_guardpage_enabled())
+ return;
+
+ page_ext = lookup_page_ext(page);
+ __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+
+ set_page_private(page, 0);
+ if (!is_migrate_isolate(migratetype))
+ __mod_zone_freepage_state(zone, (1 << order), migratetype);
}
#else
-static inline void set_page_guard_flag(struct page *page) { }
-static inline void clear_page_guard_flag(struct page *page) { }
+struct page_ext_operations debug_guardpage_ops = { NULL, };
+static inline void set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) {}
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) {}
#endif
static inline void set_page_order(struct page *page, unsigned int order)
@@ -581,12 +646,7 @@ static inline void __free_one_page(struct page *page,
* merge with it and move up one order.
*/
if (page_is_guard(buddy)) {
- clear_page_guard_flag(buddy);
- set_page_private(buddy, 0);
- if (!is_migrate_isolate(migratetype)) {
- __mod_zone_freepage_state(zone, 1 << order,
- migratetype);
- }
+ clear_page_guard(zone, buddy, order, migratetype);
} else {
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
@@ -755,6 +815,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
if (bad)
return false;
+ reset_page_owner(page, order);
+
if (!PageHighMem(page)) {
debug_check_no_locks_freed(page_address(page),
PAGE_SIZE << order);
@@ -861,23 +923,18 @@ static inline void expand(struct zone *zone, struct page *page,
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if (high < debug_guardpage_minorder()) {
+ if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+ debug_guardpage_enabled() &&
+ high < debug_guardpage_minorder()) {
/*
* Mark as guard pages (or page), that will allow to
* merge back to allocator when buddy will be freed.
* Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space
*/
- INIT_LIST_HEAD(&page[size].lru);
- set_page_guard_flag(&page[size]);
- set_page_private(&page[size], high);
- /* Guard pages are not available for any usage */
- __mod_zone_freepage_state(zone, -(1 << high),
- migratetype);
+ set_page_guard(zone, &page[size], high, migratetype);
continue;
}
-#endif
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
@@ -935,6 +992,8 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
+ set_page_owner(page, order, gfp_flags);
+
return 0;
}
@@ -1507,8 +1566,11 @@ void split_page(struct page *page, unsigned int order)
split_page(virt_to_page(page[0].shadow), order);
#endif
- for (i = 1; i < (1 << order); i++)
+ set_page_owner(page, 0, 0);
+ for (i = 1; i < (1 << order); i++) {
set_page_refcounted(page + i);
+ set_page_owner(page + i, 0, 0);
+ }
}
EXPORT_SYMBOL_GPL(split_page);
@@ -1548,6 +1610,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
}
+ set_page_owner(page, order, 0);
return 1UL << order;
}
@@ -1990,7 +2053,7 @@ zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
- * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
+ * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) {
@@ -2001,7 +2064,7 @@ zonelist_scan:
continue;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
- !cpuset_zone_allowed_softwall(zone, gfp_mask))
+ !cpuset_zone_allowed(zone, gfp_mask))
continue;
/*
* Distribute pages in proportion to the individual
@@ -2529,7 +2592,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
alloc_flags |= ALLOC_HARDER;
/*
* Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
- * comment for __cpuset_node_allowed_softwall().
+ * comment for __cpuset_node_allowed().
*/
alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(current)) && !in_interrupt())
@@ -4856,6 +4919,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
#endif
init_waitqueue_head(&pgdat->kswapd_wait);
init_waitqueue_head(&pgdat->pfmemalloc_wait);
+ pgdat_page_ext_init(pgdat);
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
@@ -4874,16 +4938,18 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
* and per-cpu initialisations
*/
memmap_pages = calc_memmap_size(size, realsize);
- if (freesize >= memmap_pages) {
- freesize -= memmap_pages;
- if (memmap_pages)
- printk(KERN_DEBUG
- " %s zone: %lu pages used for memmap\n",
- zone_names[j], memmap_pages);
- } else
- printk(KERN_WARNING
- " %s zone: %lu pages exceeds freesize %lu\n",
- zone_names[j], memmap_pages, freesize);
+ if (!is_highmem_idx(j)) {
+ if (freesize >= memmap_pages) {
+ freesize -= memmap_pages;
+ if (memmap_pages)
+ printk(KERN_DEBUG
+ " %s zone: %lu pages used for memmap\n",
+ zone_names[j], memmap_pages);
+ } else
+ printk(KERN_WARNING
+ " %s zone: %lu pages exceeds freesize %lu\n",
+ zone_names[j], memmap_pages, freesize);
+ }
/* Account for reserved pages */
if (j == 0 && freesize > dma_reserve) {
@@ -5521,7 +5587,7 @@ void __init mem_init_print_info(const char *str)
pr_info("Memory: %luK/%luK available "
"(%luK kernel code, %luK rwdata, %luK rodata, "
- "%luK init, %luK bss, %luK reserved"
+ "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
#ifdef CONFIG_HIGHMEM
", %luK highmem"
#endif
@@ -5529,7 +5595,8 @@ void __init mem_init_print_info(const char *str)
nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
codesize >> 10, datasize >> 10, rosize >> 10,
(init_data_size + init_code_size) >> 10, bss_size >> 10,
- (physpages - totalram_pages) << (PAGE_SHIFT-10),
+ (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
+ totalcma_pages << (PAGE_SHIFT-10),
#ifdef CONFIG_HIGHMEM
totalhigh_pages << (PAGE_SHIFT-10),
#endif
@@ -6221,9 +6288,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
if (!PageLRU(page))
found++;
/*
- * If there are RECLAIMABLE pages, we need to check it.
- * But now, memory offline itself doesn't call shrink_slab()
- * and it still to be fixed.
+ * If there are RECLAIMABLE pages, we need to check
+ * it. But now, memory offline itself doesn't call
+ * shrink_node_slabs() and it still to be fixed.
*/
/*
* If the page is not RAM, page_count()should be 0.
diff --git a/mm/page_ext.c b/mm/page_ext.c
new file mode 100644
index 000000000000..d86fd2f5353f
--- /dev/null
+++ b/mm/page_ext.c
@@ -0,0 +1,403 @@
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/bootmem.h>
+#include <linux/page_ext.h>
+#include <linux/memory.h>
+#include <linux/vmalloc.h>
+#include <linux/kmemleak.h>
+#include <linux/page_owner.h>
+
+/*
+ * struct page extension
+ *
+ * This is the feature to manage memory for extended data per page.
+ *
+ * Until now, we must modify struct page itself to store extra data per page.
+ * This requires rebuilding the kernel and it is really time consuming process.
+ * And, sometimes, rebuild is impossible due to third party module dependency.
+ * At last, enlarging struct page could cause un-wanted system behaviour change.
+ *
+ * This feature is intended to overcome above mentioned problems. This feature
+ * allocates memory for extended data per page in certain place rather than
+ * the struct page itself. This memory can be accessed by the accessor
+ * functions provided by this code. During the boot process, it checks whether
+ * allocation of huge chunk of memory is needed or not. If not, it avoids
+ * allocating memory at all. With this advantage, we can include this feature
+ * into the kernel in default and can avoid rebuild and solve related problems.
+ *
+ * To help these things to work well, there are two callbacks for clients. One
+ * is the need callback which is mandatory if user wants to avoid useless
+ * memory allocation at boot-time. The other is optional, init callback, which
+ * is used to do proper initialization after memory is allocated.
+ *
+ * The need callback is used to decide whether extended memory allocation is
+ * needed or not. Sometimes users want to deactivate some features in this
+ * boot and extra memory would be unneccessary. In this case, to avoid
+ * allocating huge chunk of memory, each clients represent their need of
+ * extra memory through the need callback. If one of the need callbacks
+ * returns true, it means that someone needs extra memory so that
+ * page extension core should allocates memory for page extension. If
+ * none of need callbacks return true, memory isn't needed at all in this boot
+ * and page extension core can skip to allocate memory. As result,
+ * none of memory is wasted.
+ *
+ * The init callback is used to do proper initialization after page extension
+ * is completely initialized. In sparse memory system, extra memory is
+ * allocated some time later than memmap is allocated. In other words, lifetime
+ * of memory for page extension isn't same with memmap for struct page.
+ * Therefore, clients can't store extra data until page extension is
+ * initialized, even if pages are allocated and used freely. This could
+ * cause inadequate state of extra data per page, so, to prevent it, client
+ * can utilize this callback to initialize the state of it correctly.
+ */
+
+static struct page_ext_operations *page_ext_ops[] = {
+ &debug_guardpage_ops,
+#ifdef CONFIG_PAGE_POISONING
+ &page_poisoning_ops,
+#endif
+#ifdef CONFIG_PAGE_OWNER
+ &page_owner_ops,
+#endif
+};
+
+static unsigned long total_usage;
+
+static bool __init invoke_need_callbacks(void)
+{
+ int i;
+ int entries = ARRAY_SIZE(page_ext_ops);
+
+ for (i = 0; i < entries; i++) {
+ if (page_ext_ops[i]->need && page_ext_ops[i]->need())
+ return true;
+ }
+
+ return false;
+}
+
+static void __init invoke_init_callbacks(void)
+{
+ int i;
+ int entries = ARRAY_SIZE(page_ext_ops);
+
+ for (i = 0; i < entries; i++) {
+ if (page_ext_ops[i]->init)
+ page_ext_ops[i]->init();
+ }
+}
+
+#if !defined(CONFIG_SPARSEMEM)
+
+
+void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
+{
+ pgdat->node_page_ext = NULL;
+}
+
+struct page_ext *lookup_page_ext(struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long offset;
+ struct page_ext *base;
+
+ base = NODE_DATA(page_to_nid(page))->node_page_ext;
+#ifdef CONFIG_DEBUG_VM
+ /*
+ * The sanity checks the page allocator does upon freeing a
+ * page can reach here before the page_ext arrays are
+ * allocated when feeding a range of pages to the allocator
+ * for the first time during bootup or memory hotplug.
+ */
+ if (unlikely(!base))
+ return NULL;
+#endif
+ offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
+ MAX_ORDER_NR_PAGES);
+ return base + offset;
+}
+
+static int __init alloc_node_page_ext(int nid)
+{
+ struct page_ext *base;
+ unsigned long table_size;
+ unsigned long nr_pages;
+
+ nr_pages = NODE_DATA(nid)->node_spanned_pages;
+ if (!nr_pages)
+ return 0;
+
+ /*
+ * Need extra space if node range is not aligned with
+ * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
+ * checks buddy's status, range could be out of exact node range.
+ */
+ if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
+ !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
+ nr_pages += MAX_ORDER_NR_PAGES;
+
+ table_size = sizeof(struct page_ext) * nr_pages;
+
+ base = memblock_virt_alloc_try_nid_nopanic(
+ table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ if (!base)
+ return -ENOMEM;
+ NODE_DATA(nid)->node_page_ext = base;
+ total_usage += table_size;
+ return 0;
+}
+
+void __init page_ext_init_flatmem(void)
+{
+
+ int nid, fail;
+
+ if (!invoke_need_callbacks())
+ return;
+
+ for_each_online_node(nid) {
+ fail = alloc_node_page_ext(nid);
+ if (fail)
+ goto fail;
+ }
+ pr_info("allocated %ld bytes of page_ext\n", total_usage);
+ invoke_init_callbacks();
+ return;
+
+fail:
+ pr_crit("allocation of page_ext failed.\n");
+ panic("Out of memory");
+}
+
+#else /* CONFIG_FLAT_NODE_MEM_MAP */
+
+struct page_ext *lookup_page_ext(struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+ struct mem_section *section = __pfn_to_section(pfn);
+#ifdef CONFIG_DEBUG_VM
+ /*
+ * The sanity checks the page allocator does upon freeing a
+ * page can reach here before the page_ext arrays are
+ * allocated when feeding a range of pages to the allocator
+ * for the first time during bootup or memory hotplug.
+ */
+ if (!section->page_ext)
+ return NULL;
+#endif
+ return section->page_ext + pfn;
+}
+
+static void *__meminit alloc_page_ext(size_t size, int nid)
+{
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
+ void *addr = NULL;
+
+ addr = alloc_pages_exact_nid(nid, size, flags);
+ if (addr) {
+ kmemleak_alloc(addr, size, 1, flags);
+ return addr;
+ }
+
+ if (node_state(nid, N_HIGH_MEMORY))
+ addr = vzalloc_node(size, nid);
+ else
+ addr = vzalloc(size);
+
+ return addr;
+}
+
+static int __meminit init_section_page_ext(unsigned long pfn, int nid)
+{
+ struct mem_section *section;
+ struct page_ext *base;
+ unsigned long table_size;
+
+ section = __pfn_to_section(pfn);
+
+ if (section->page_ext)
+ return 0;
+
+ table_size = sizeof(struct page_ext) * PAGES_PER_SECTION;
+ base = alloc_page_ext(table_size, nid);
+
+ /*
+ * The value stored in section->page_ext is (base - pfn)
+ * and it does not point to the memory block allocated above,
+ * causing kmemleak false positives.
+ */
+ kmemleak_not_leak(base);
+
+ if (!base) {
+ pr_err("page ext allocation failure\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The passed "pfn" may not be aligned to SECTION. For the calculation
+ * we need to apply a mask.
+ */
+ pfn &= PAGE_SECTION_MASK;
+ section->page_ext = base - pfn;
+ total_usage += table_size;
+ return 0;
+}
+#ifdef CONFIG_MEMORY_HOTPLUG
+static void free_page_ext(void *addr)
+{
+ if (is_vmalloc_addr(addr)) {
+ vfree(addr);
+ } else {
+ struct page *page = virt_to_page(addr);
+ size_t table_size;
+
+ table_size = sizeof(struct page_ext) * PAGES_PER_SECTION;
+
+ BUG_ON(PageReserved(page));
+ free_pages_exact(addr, table_size);
+ }
+}
+
+static void __free_page_ext(unsigned long pfn)
+{
+ struct mem_section *ms;
+ struct page_ext *base;
+
+ ms = __pfn_to_section(pfn);
+ if (!ms || !ms->page_ext)
+ return;
+ base = ms->page_ext + pfn;
+ free_page_ext(base);
+ ms->page_ext = NULL;
+}
+
+static int __meminit online_page_ext(unsigned long start_pfn,
+ unsigned long nr_pages,
+ int nid)
+{
+ unsigned long start, end, pfn;
+ int fail = 0;
+
+ start = SECTION_ALIGN_DOWN(start_pfn);
+ end = SECTION_ALIGN_UP(start_pfn + nr_pages);
+
+ if (nid == -1) {
+ /*
+ * In this case, "nid" already exists and contains valid memory.
+ * "start_pfn" passed to us is a pfn which is an arg for
+ * online__pages(), and start_pfn should exist.
+ */
+ nid = pfn_to_nid(start_pfn);
+ VM_BUG_ON(!node_state(nid, N_ONLINE));
+ }
+
+ for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
+ if (!pfn_present(pfn))
+ continue;
+ fail = init_section_page_ext(pfn, nid);
+ }
+ if (!fail)
+ return 0;
+
+ /* rollback */
+ for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
+ __free_page_ext(pfn);
+
+ return -ENOMEM;
+}
+
+static int __meminit offline_page_ext(unsigned long start_pfn,
+ unsigned long nr_pages, int nid)
+{
+ unsigned long start, end, pfn;
+
+ start = SECTION_ALIGN_DOWN(start_pfn);
+ end = SECTION_ALIGN_UP(start_pfn + nr_pages);
+
+ for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
+ __free_page_ext(pfn);
+ return 0;
+
+}
+
+static int __meminit page_ext_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mn = arg;
+ int ret = 0;
+
+ switch (action) {
+ case MEM_GOING_ONLINE:
+ ret = online_page_ext(mn->start_pfn,
+ mn->nr_pages, mn->status_change_nid);
+ break;
+ case MEM_OFFLINE:
+ offline_page_ext(mn->start_pfn,
+ mn->nr_pages, mn->status_change_nid);
+ break;
+ case MEM_CANCEL_ONLINE:
+ offline_page_ext(mn->start_pfn,
+ mn->nr_pages, mn->status_change_nid);
+ break;
+ case MEM_GOING_OFFLINE:
+ break;
+ case MEM_ONLINE:
+ case MEM_CANCEL_OFFLINE:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+#endif
+
+void __init page_ext_init(void)
+{
+ unsigned long pfn;
+ int nid;
+
+ if (!invoke_need_callbacks())
+ return;
+
+ for_each_node_state(nid, N_MEMORY) {
+ unsigned long start_pfn, end_pfn;
+
+ start_pfn = node_start_pfn(nid);
+ end_pfn = node_end_pfn(nid);
+ /*
+ * start_pfn and end_pfn may not be aligned to SECTION and the
+ * page->flags of out of node pages are not initialized. So we
+ * scan [start_pfn, the biggest section's pfn < end_pfn) here.
+ */
+ for (pfn = start_pfn; pfn < end_pfn;
+ pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
+
+ if (!pfn_valid(pfn))
+ continue;
+ /*
+ * Nodes's pfns can be overlapping.
+ * We know some arch can have a nodes layout such as
+ * -------------pfn-------------->
+ * N0 | N1 | N2 | N0 | N1 | N2|....
+ */
+ if (pfn_to_nid(pfn) != nid)
+ continue;
+ if (init_section_page_ext(pfn, nid))
+ goto oom;
+ }
+ }
+ hotplug_memory_notifier(page_ext_callback, 0);
+ pr_info("allocated %ld bytes of page_ext\n", total_usage);
+ invoke_init_callbacks();
+ return;
+
+oom:
+ panic("Out of memory");
+}
+
+void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
+{
+}
+
+#endif
diff --git a/mm/page_owner.c b/mm/page_owner.c
new file mode 100644
index 000000000000..9ab4a9b5bc09
--- /dev/null
+++ b/mm/page_owner.c
@@ -0,0 +1,311 @@
+#include <linux/debugfs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/bootmem.h>
+#include <linux/stacktrace.h>
+#include <linux/page_owner.h>
+#include "internal.h"
+
+static bool page_owner_disabled = true;
+bool page_owner_inited __read_mostly;
+
+static void init_early_allocated_pages(void);
+
+static int early_page_owner_param(char *buf)
+{
+ if (!buf)
+ return -EINVAL;
+
+ if (strcmp(buf, "on") == 0)
+ page_owner_disabled = false;
+
+ return 0;
+}
+early_param("page_owner", early_page_owner_param);
+
+static bool need_page_owner(void)
+{
+ if (page_owner_disabled)
+ return false;
+
+ return true;
+}
+
+static void init_page_owner(void)
+{
+ if (page_owner_disabled)
+ return;
+
+ page_owner_inited = true;
+ init_early_allocated_pages();
+}
+
+struct page_ext_operations page_owner_ops = {
+ .need = need_page_owner,
+ .init = init_page_owner,
+};
+
+void __reset_page_owner(struct page *page, unsigned int order)
+{
+ int i;
+ struct page_ext *page_ext;
+
+ for (i = 0; i < (1 << order); i++) {
+ page_ext = lookup_page_ext(page + i);
+ __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
+ }
+}
+
+void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
+{
+ struct page_ext *page_ext;
+ struct stack_trace *trace;
+
+ page_ext = lookup_page_ext(page);
+
+ trace = &page_ext->trace;
+ trace->nr_entries = 0;
+ trace->max_entries = ARRAY_SIZE(page_ext->trace_entries);
+ trace->entries = &page_ext->trace_entries[0];
+ trace->skip = 3;
+ save_stack_trace(&page_ext->trace);
+
+ page_ext->order = order;
+ page_ext->gfp_mask = gfp_mask;
+
+ __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+}
+
+static ssize_t
+print_page_owner(char __user *buf, size_t count, unsigned long pfn,
+ struct page *page, struct page_ext *page_ext)
+{
+ int ret;
+ int pageblock_mt, page_mt;
+ char *kbuf;
+
+ kbuf = kmalloc(count, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = snprintf(kbuf, count,
+ "Page allocated via order %u, mask 0x%x\n",
+ page_ext->order, page_ext->gfp_mask);
+
+ if (ret >= count)
+ goto err;
+
+ /* Print information relevant to grouping pages by mobility */
+ pageblock_mt = get_pfnblock_migratetype(page, pfn);
+ page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
+ ret += snprintf(kbuf + ret, count - ret,
+ "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
+ pfn,
+ pfn >> pageblock_order,
+ pageblock_mt,
+ pageblock_mt != page_mt ? "Fallback" : " ",
+ PageLocked(page) ? "K" : " ",
+ PageError(page) ? "E" : " ",
+ PageReferenced(page) ? "R" : " ",
+ PageUptodate(page) ? "U" : " ",
+ PageDirty(page) ? "D" : " ",
+ PageLRU(page) ? "L" : " ",
+ PageActive(page) ? "A" : " ",
+ PageSlab(page) ? "S" : " ",
+ PageWriteback(page) ? "W" : " ",
+ PageCompound(page) ? "C" : " ",
+ PageSwapCache(page) ? "B" : " ",
+ PageMappedToDisk(page) ? "M" : " ");
+
+ if (ret >= count)
+ goto err;
+
+ ret += snprint_stack_trace(kbuf + ret, count - ret,
+ &page_ext->trace, 0);
+ if (ret >= count)
+ goto err;
+
+ ret += snprintf(kbuf + ret, count - ret, "\n");
+ if (ret >= count)
+ goto err;
+
+ if (copy_to_user(buf, kbuf, ret))
+ ret = -EFAULT;
+
+ kfree(kbuf);
+ return ret;
+
+err:
+ kfree(kbuf);
+ return -ENOMEM;
+}
+
+static ssize_t
+read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ unsigned long pfn;
+ struct page *page;
+ struct page_ext *page_ext;
+
+ if (!page_owner_inited)
+ return -EINVAL;
+
+ page = NULL;
+ pfn = min_low_pfn + *ppos;
+
+ /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
+ while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
+ pfn++;
+
+ drain_all_pages(NULL);
+
+ /* Find an allocated page */
+ for (; pfn < max_pfn; pfn++) {
+ /*
+ * If the new page is in a new MAX_ORDER_NR_PAGES area,
+ * validate the area as existing, skip it if not
+ */
+ if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
+ pfn += MAX_ORDER_NR_PAGES - 1;
+ continue;
+ }
+
+ /* Check for holes within a MAX_ORDER area */
+ if (!pfn_valid_within(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+ if (PageBuddy(page)) {
+ unsigned long freepage_order = page_order_unsafe(page);
+
+ if (freepage_order < MAX_ORDER)
+ pfn += (1UL << freepage_order) - 1;
+ continue;
+ }
+
+ page_ext = lookup_page_ext(page);
+
+ /*
+ * Some pages could be missed by concurrent allocation or free,
+ * because we don't hold the zone lock.
+ */
+ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+ continue;
+
+ /* Record the next PFN to read in the file offset */
+ *ppos = (pfn - min_low_pfn) + 1;
+
+ return print_page_owner(buf, count, pfn, page, page_ext);
+ }
+
+ return 0;
+}
+
+static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
+{
+ struct page *page;
+ struct page_ext *page_ext;
+ unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
+ unsigned long end_pfn = pfn + zone->spanned_pages;
+ unsigned long count = 0;
+
+ /* Scan block by block. First and last block may be incomplete */
+ pfn = zone->zone_start_pfn;
+
+ /*
+ * Walk the zone in pageblock_nr_pages steps. If a page block spans
+ * a zone boundary, it will be double counted between zones. This does
+ * not matter as the mixed block count will still be correct
+ */
+ for (; pfn < end_pfn; ) {
+ if (!pfn_valid(pfn)) {
+ pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
+ continue;
+ }
+
+ block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = min(block_end_pfn, end_pfn);
+
+ page = pfn_to_page(pfn);
+
+ for (; pfn < block_end_pfn; pfn++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+
+ /*
+ * We are safe to check buddy flag and order, because
+ * this is init stage and only single thread runs.
+ */
+ if (PageBuddy(page)) {
+ pfn += (1UL << page_order(page)) - 1;
+ continue;
+ }
+
+ if (PageReserved(page))
+ continue;
+
+ page_ext = lookup_page_ext(page);
+
+ /* Maybe overraping zone */
+ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+ continue;
+
+ /* Found early allocated page */
+ set_page_owner(page, 0, 0);
+ count++;
+ }
+ }
+
+ pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
+ pgdat->node_id, zone->name, count);
+}
+
+static void init_zones_in_node(pg_data_t *pgdat)
+{
+ struct zone *zone;
+ struct zone *node_zones = pgdat->node_zones;
+ unsigned long flags;
+
+ for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+ if (!populated_zone(zone))
+ continue;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ init_pages_in_zone(pgdat, zone);
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+}
+
+static void init_early_allocated_pages(void)
+{
+ pg_data_t *pgdat;
+
+ drain_all_pages(NULL);
+ for_each_online_pgdat(pgdat)
+ init_zones_in_node(pgdat);
+}
+
+static const struct file_operations proc_page_owner_operations = {
+ .read = read_page_owner,
+};
+
+static int __init pageowner_init(void)
+{
+ struct dentry *dentry;
+
+ if (!page_owner_inited) {
+ pr_info("page_owner is disabled\n");
+ return 0;
+ }
+
+ dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
+ NULL, &proc_page_owner_operations);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return 0;
+}
+module_init(pageowner_init)
diff --git a/mm/percpu.c b/mm/percpu.c
index 014bab65e0ff..d39e2f4e335c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1591,7 +1591,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
if (cpu == NR_CPUS)
continue;
- PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
+ PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
diff --git a/mm/rmap.c b/mm/rmap.c
index 45eba36fd673..45ba250babd8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -23,7 +23,7 @@
* inode->i_mutex (while writing or truncating, not reading or faulting)
* mm->mmap_sem
* page->flags PG_locked (lock_page)
- * mapping->i_mmap_mutex
+ * mapping->i_mmap_rwsem
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1260,7 +1260,7 @@ out_mlock:
/*
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
* unstable result and race. Plus, We can't wait here because
- * we now hold anon_vma->rwsem or mapping->i_mmap_mutex.
+ * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem.
* if trylock failed, the page remain in evictable lru and later
* vmscan could retry to move the page to unevictable lru if the
* page is actually mlocked.
@@ -1380,7 +1380,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte));
- pteval = ptep_clear_flush(vma, address, pte);
+ pteval = ptep_clear_flush_notify(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
if (page->index != linear_page_index(vma, address)) {
@@ -1635,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
- pgoff_t pgoff = page_to_pgoff(page);
+ pgoff_t pgoff;
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
@@ -1643,6 +1643,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
if (!anon_vma)
return ret;
+ pgoff = page_to_pgoff(page);
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
@@ -1676,7 +1677,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{
struct address_space *mapping = page->mapping;
- pgoff_t pgoff = page_to_pgoff(page);
+ pgoff_t pgoff;
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
@@ -1684,13 +1685,15 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
* The page lock not only makes sure that page->mapping cannot
* suddenly be NULLified by truncation, it makes sure that the
* structure at mapping cannot be freed and reused yet,
- * so we can safely take mapping->i_mmap_mutex.
+ * so we can safely take mapping->i_mmap_rwsem.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (!mapping)
return ret;
- mutex_lock(&mapping->i_mmap_mutex);
+
+ pgoff = page_to_pgoff(page);
+ i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
@@ -1711,9 +1714,8 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
goto done;
ret = rwc->file_nonlinear(page, mapping, rwc->arg);
-
done:
- mutex_unlock(&mapping->i_mmap_mutex);
+ i_mmap_unlock_read(mapping);
return ret;
}
diff --git a/mm/slab.c b/mm/slab.c
index 79e15f0a2a6e..65b5dcb6f671 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3015,7 +3015,7 @@ retry:
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
nid = zone_to_nid(zone);
- if (cpuset_zone_allowed_hardwall(zone, flags) &&
+ if (cpuset_zone_allowed(zone, flags) &&
get_node(cache, nid) &&
get_node(cache, nid)->free_objects) {
obj = ____cache_alloc_node(cache,
@@ -3182,6 +3182,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
memset(ptr, 0, cachep->object_size);
}
+ memcg_kmem_put_cache(cachep);
return ptr;
}
@@ -3247,6 +3248,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
memset(objp, 0, cachep->object_size);
}
+ memcg_kmem_put_cache(cachep);
return objp;
}
diff --git a/mm/slub.c b/mm/slub.c
index 386bbed76e94..fe376fe1f4fe 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1233,13 +1233,17 @@ static inline void kfree_hook(const void *x)
kmemleak_free(x);
}
-static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
+static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
+ gfp_t flags)
{
flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
might_sleep_if(flags & __GFP_WAIT);
- return should_failslab(s->object_size, flags, s->flags);
+ if (should_failslab(s->object_size, flags, s->flags))
+ return NULL;
+
+ return memcg_kmem_get_cache(s, flags);
}
static inline void slab_post_alloc_hook(struct kmem_cache *s,
@@ -1248,6 +1252,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
flags &= gfp_allowed_mask;
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
+ memcg_kmem_put_cache(s);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1665,7 +1670,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
n = get_node(s, zone_to_nid(zone));
- if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+ if (n && cpuset_zone_allowed(zone, flags) &&
n->nr_partial > s->min_partial) {
object = get_partial_node(s, n, c, flags);
if (object) {
@@ -2383,10 +2388,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
struct page *page;
unsigned long tid;
- if (slab_pre_alloc_hook(s, gfpflags))
+ s = slab_pre_alloc_hook(s, gfpflags);
+ if (!s)
return NULL;
-
- s = memcg_kmem_get_cache(s, gfpflags);
redo:
/*
* Must read kmem_cache cpu data via this cpu ptr. Preemption is
diff --git a/mm/vmacache.c b/mm/vmacache.c
index 9f25af825dec..b6e3662fe339 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -17,6 +17,8 @@ void vmacache_flush_all(struct mm_struct *mm)
{
struct task_struct *g, *p;
+ count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
+
/*
* Single threaded tasks need not iterate the entire
* list of process. We can avoid the flushing as well
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8a18196fcdff..39c338896416 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2574,10 +2574,10 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
if (!counters)
return;
- /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
- smp_rmb();
if (v->flags & VM_UNINITIALIZED)
return;
+ /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
+ smp_rmb();
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4636d9e822c1..bd9a72bc4a1b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -229,9 +229,10 @@ EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
-static unsigned long
-shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
- unsigned long nr_pages_scanned, unsigned long lru_pages)
+static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
+ struct shrinker *shrinker,
+ unsigned long nr_scanned,
+ unsigned long nr_eligible)
{
unsigned long freed = 0;
unsigned long long delta;
@@ -255,9 +256,9 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
total_scan = nr;
- delta = (4 * nr_pages_scanned) / shrinker->seeks;
+ delta = (4 * nr_scanned) / shrinker->seeks;
delta *= freeable;
- do_div(delta, lru_pages + 1);
+ do_div(delta, nr_eligible + 1);
total_scan += delta;
if (total_scan < 0) {
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
@@ -289,8 +290,8 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
total_scan = freeable * 2;
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
- nr_pages_scanned, lru_pages,
- freeable, delta, total_scan);
+ nr_scanned, nr_eligible,
+ freeable, delta, total_scan);
/*
* Normally, we should not scan less than batch_size objects in one
@@ -339,34 +340,37 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
return freed;
}
-/*
- * Call the shrink functions to age shrinkable caches
- *
- * Here we assume it costs one seek to replace a lru page and that it also
- * takes a seek to recreate a cache object. With this in mind we age equal
- * percentages of the lru and ageable caches. This should balance the seeks
- * generated by these structures.
+/**
+ * shrink_node_slabs - shrink slab caches of a given node
+ * @gfp_mask: allocation context
+ * @nid: node whose slab caches to target
+ * @nr_scanned: pressure numerator
+ * @nr_eligible: pressure denominator
*
- * If the vm encountered mapped pages on the LRU it increase the pressure on
- * slab to avoid swapping.
+ * Call the shrink functions to age shrinkable caches.
*
- * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
+ * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
+ * unaware shrinkers will receive a node id of 0 instead.
*
- * `lru_pages' represents the number of on-LRU pages in all the zones which
- * are eligible for the caller's allocation attempt. It is used for balancing
- * slab reclaim versus page reclaim.
+ * @nr_scanned and @nr_eligible form a ratio that indicate how much of
+ * the available objects should be scanned. Page reclaim for example
+ * passes the number of pages scanned and the number of pages on the
+ * LRU lists that it considered on @nid, plus a bias in @nr_scanned
+ * when it encountered mapped pages. The ratio is further biased by
+ * the ->seeks setting of the shrink function, which indicates the
+ * cost to recreate an object relative to that of an LRU page.
*
- * Returns the number of slab objects which we shrunk.
+ * Returns the number of reclaimed slab objects.
*/
-unsigned long shrink_slab(struct shrink_control *shrinkctl,
- unsigned long nr_pages_scanned,
- unsigned long lru_pages)
+unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
+ unsigned long nr_scanned,
+ unsigned long nr_eligible)
{
struct shrinker *shrinker;
unsigned long freed = 0;
- if (nr_pages_scanned == 0)
- nr_pages_scanned = SWAP_CLUSTER_MAX;
+ if (nr_scanned == 0)
+ nr_scanned = SWAP_CLUSTER_MAX;
if (!down_read_trylock(&shrinker_rwsem)) {
/*
@@ -380,20 +384,17 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl,
}
list_for_each_entry(shrinker, &shrinker_list, list) {
- if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) {
- shrinkctl->nid = 0;
- freed += shrink_slab_node(shrinkctl, shrinker,
- nr_pages_scanned, lru_pages);
- continue;
- }
+ struct shrink_control sc = {
+ .gfp_mask = gfp_mask,
+ .nid = nid,
+ };
- for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
- if (node_online(shrinkctl->nid))
- freed += shrink_slab_node(shrinkctl, shrinker,
- nr_pages_scanned, lru_pages);
+ if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+ sc.nid = 0;
- }
+ freed += shrink_slabs(&sc, shrinker, nr_scanned, nr_eligible);
}
+
up_read(&shrinker_rwsem);
out:
cond_resched();
@@ -1876,7 +1877,8 @@ enum scan_balance {
* nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
*/
static void get_scan_count(struct lruvec *lruvec, int swappiness,
- struct scan_control *sc, unsigned long *nr)
+ struct scan_control *sc, unsigned long *nr,
+ unsigned long *lru_pages)
{
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
u64 fraction[2];
@@ -2022,6 +2024,7 @@ out:
some_scanned = false;
/* Only use force_scan on second pass. */
for (pass = 0; !some_scanned && pass < 2; pass++) {
+ *lru_pages = 0;
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
unsigned long size;
@@ -2048,14 +2051,19 @@ out:
case SCAN_FILE:
case SCAN_ANON:
/* Scan one type exclusively */
- if ((scan_balance == SCAN_FILE) != file)
+ if ((scan_balance == SCAN_FILE) != file) {
+ size = 0;
scan = 0;
+ }
break;
default:
/* Look ma, no brain */
BUG();
}
+
+ *lru_pages += size;
nr[lru] = scan;
+
/*
* Skip the second pass and don't force_scan,
* if we found something to scan.
@@ -2069,7 +2077,7 @@ out:
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
- struct scan_control *sc)
+ struct scan_control *sc, unsigned long *lru_pages)
{
unsigned long nr[NR_LRU_LISTS];
unsigned long targets[NR_LRU_LISTS];
@@ -2080,7 +2088,7 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
struct blk_plug plug;
bool scan_adjusted;
- get_scan_count(lruvec, swappiness, sc, nr);
+ get_scan_count(lruvec, swappiness, sc, nr, lru_pages);
/* Record the original scan target for proportional adjustments later */
memcpy(targets, nr, sizeof(nr));
@@ -2258,7 +2266,8 @@ static inline bool should_continue_reclaim(struct zone *zone,
}
}
-static bool shrink_zone(struct zone *zone, struct scan_control *sc)
+static bool shrink_zone(struct zone *zone, struct scan_control *sc,
+ bool is_classzone)
{
unsigned long nr_reclaimed, nr_scanned;
bool reclaimable = false;
@@ -2269,6 +2278,7 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc)
.zone = zone,
.priority = sc->priority,
};
+ unsigned long zone_lru_pages = 0;
struct mem_cgroup *memcg;
nr_reclaimed = sc->nr_reclaimed;
@@ -2276,13 +2286,15 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc)
memcg = mem_cgroup_iter(root, NULL, &reclaim);
do {
+ unsigned long lru_pages;
struct lruvec *lruvec;
int swappiness;
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
swappiness = mem_cgroup_swappiness(memcg);
- shrink_lruvec(lruvec, swappiness, sc);
+ shrink_lruvec(lruvec, swappiness, sc, &lru_pages);
+ zone_lru_pages += lru_pages;
/*
* Direct reclaim and kswapd have to scan all memory
@@ -2302,6 +2314,25 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc)
memcg = mem_cgroup_iter(root, memcg, &reclaim);
} while (memcg);
+ /*
+ * Shrink the slab caches in the same proportion that
+ * the eligible LRU pages were scanned.
+ */
+ if (global_reclaim(sc) && is_classzone) {
+ struct reclaim_state *reclaim_state;
+
+ shrink_node_slabs(sc->gfp_mask, zone_to_nid(zone),
+ sc->nr_scanned - nr_scanned,
+ zone_lru_pages);
+
+ reclaim_state = current->reclaim_state;
+ if (reclaim_state) {
+ sc->nr_reclaimed +=
+ reclaim_state->reclaimed_slab;
+ reclaim_state->reclaimed_slab = 0;
+ }
+ }
+
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
sc->nr_scanned - nr_scanned,
sc->nr_reclaimed - nr_reclaimed);
@@ -2376,12 +2407,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
struct zone *zone;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
- unsigned long lru_pages = 0;
- struct reclaim_state *reclaim_state = current->reclaim_state;
gfp_t orig_mask;
- struct shrink_control shrink = {
- .gfp_mask = sc->gfp_mask,
- };
enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
bool reclaimable = false;
@@ -2394,23 +2420,27 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (buffer_heads_over_limit)
sc->gfp_mask |= __GFP_HIGHMEM;
- nodes_clear(shrink.nodes_to_scan);
-
for_each_zone_zonelist_nodemask(zone, z, zonelist,
- gfp_zone(sc->gfp_mask), sc->nodemask) {
+ requested_highidx, sc->nodemask) {
+ enum zone_type classzone_idx;
+
if (!populated_zone(zone))
continue;
+
+ classzone_idx = requested_highidx;
+ while (!populated_zone(zone->zone_pgdat->node_zones +
+ classzone_idx))
+ classzone_idx--;
+
/*
* Take care memory controller reclaiming has small influence
* to global LRU.
*/
if (global_reclaim(sc)) {
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ if (!cpuset_zone_allowed(zone,
+ GFP_KERNEL | __GFP_HARDWALL))
continue;
- lru_pages += zone_reclaimable_pages(zone);
- node_set(zone_to_nid(zone), shrink.nodes_to_scan);
-
if (sc->priority != DEF_PRIORITY &&
!zone_reclaimable(zone))
continue; /* Let kswapd poll it */
@@ -2449,7 +2479,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
/* need some check for avoid more shrink_zone() */
}
- if (shrink_zone(zone, sc))
+ if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx))
reclaimable = true;
if (global_reclaim(sc) &&
@@ -2458,20 +2488,6 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
}
/*
- * Don't shrink slabs when reclaiming memory from over limit cgroups
- * but do shrink slab at least once when aborting reclaim for
- * compaction to avoid unevenly scanning file/anon LRU pages over slab
- * pages.
- */
- if (global_reclaim(sc)) {
- shrink_slab(&shrink, sc->nr_scanned, lru_pages);
- if (reclaim_state) {
- sc->nr_reclaimed += reclaim_state->reclaimed_slab;
- reclaim_state->reclaimed_slab = 0;
- }
- }
-
- /*
* Restore to original mask to avoid the impact on the caller if we
* promoted it to __GFP_HIGHMEM.
*/
@@ -2735,6 +2751,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
};
struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
int swappiness = mem_cgroup_swappiness(memcg);
+ unsigned long lru_pages;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2750,7 +2767,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
* will pick up pages from other mem cgroup's as well. We hack
* the priority and make it zero.
*/
- shrink_lruvec(lruvec, swappiness, &sc);
+ shrink_lruvec(lruvec, swappiness, &sc, &lru_pages);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
@@ -2931,15 +2948,10 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
static bool kswapd_shrink_zone(struct zone *zone,
int classzone_idx,
struct scan_control *sc,
- unsigned long lru_pages,
unsigned long *nr_attempted)
{
int testorder = sc->order;
unsigned long balance_gap;
- struct reclaim_state *reclaim_state = current->reclaim_state;
- struct shrink_control shrink = {
- .gfp_mask = sc->gfp_mask,
- };
bool lowmem_pressure;
/* Reclaim above the high watermark. */
@@ -2974,13 +2986,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
balance_gap, classzone_idx))
return true;
- shrink_zone(zone, sc);
- nodes_clear(shrink.nodes_to_scan);
- node_set(zone_to_nid(zone), shrink.nodes_to_scan);
-
- reclaim_state->reclaimed_slab = 0;
- shrink_slab(&shrink, sc->nr_scanned, lru_pages);
- sc->nr_reclaimed += reclaim_state->reclaimed_slab;
+ shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
/* Account for the number of pages attempted to reclaim */
*nr_attempted += sc->nr_to_reclaim;
@@ -3041,7 +3047,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
count_vm_event(PAGEOUTRUN);
do {
- unsigned long lru_pages = 0;
unsigned long nr_attempted = 0;
bool raise_priority = true;
bool pgdat_needs_compaction = (order > 0);
@@ -3101,8 +3106,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (!populated_zone(zone))
continue;
- lru_pages += zone_reclaimable_pages(zone);
-
/*
* If any zone is currently balanced then kswapd will
* not call compaction as it is expected that the
@@ -3158,8 +3161,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
* that that high watermark would be met at 100%
* efficiency.
*/
- if (kswapd_shrink_zone(zone, end_zone, &sc,
- lru_pages, &nr_attempted))
+ if (kswapd_shrink_zone(zone, end_zone,
+ &sc, &nr_attempted))
raise_priority = false;
}
@@ -3388,7 +3391,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
if (!populated_zone(zone))
return;
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
return;
pgdat = zone->zone_pgdat;
if (pgdat->kswapd_max_order < order) {
@@ -3611,10 +3614,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.may_swap = 1,
};
- struct shrink_control shrink = {
- .gfp_mask = sc.gfp_mask,
- };
- unsigned long nr_slab_pages0, nr_slab_pages1;
cond_resched();
/*
@@ -3633,44 +3632,10 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* priorities until we have enough memory freed.
*/
do {
- shrink_zone(zone, &sc);
+ shrink_zone(zone, &sc, true);
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}
- nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
- if (nr_slab_pages0 > zone->min_slab_pages) {
- /*
- * shrink_slab() does not currently allow us to determine how
- * many pages were freed in this zone. So we take the current
- * number of slab pages and shake the slab until it is reduced
- * by the same nr_pages that we used for reclaiming unmapped
- * pages.
- */
- nodes_clear(shrink.nodes_to_scan);
- node_set(zone_to_nid(zone), shrink.nodes_to_scan);
- for (;;) {
- unsigned long lru_pages = zone_reclaimable_pages(zone);
-
- /* No reclaimable slab or very low memory pressure */
- if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
- break;
-
- /* Freed enough memory */
- nr_slab_pages1 = zone_page_state(zone,
- NR_SLAB_RECLAIMABLE);
- if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
- break;
- }
-
- /*
- * Update nr_reclaimed by the number of slab pages we
- * reclaimed from this zone.
- */
- nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
- if (nr_slab_pages1 < nr_slab_pages0)
- sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
- }
-
p->reclaim_state = NULL;
current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
lockdep_clear_current_reclaim_state();
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 1b12d390dc68..1284f89fca08 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -22,6 +22,8 @@
#include <linux/writeback.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
+#include <linux/page_ext.h>
+#include <linux/page_owner.h>
#include "internal.h"
@@ -898,6 +900,7 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_DEBUG_VM_VMACACHE
"vmacache_find_calls",
"vmacache_find_hits",
+ "vmacache_full_flushes",
#endif
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
@@ -1017,6 +1020,104 @@ static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
return 0;
}
+#ifdef CONFIG_PAGE_OWNER
+static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+ pg_data_t *pgdat,
+ struct zone *zone)
+{
+ struct page *page;
+ struct page_ext *page_ext;
+ unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
+ unsigned long end_pfn = pfn + zone->spanned_pages;
+ unsigned long count[MIGRATE_TYPES] = { 0, };
+ int pageblock_mt, page_mt;
+ int i;
+
+ /* Scan block by block. First and last block may be incomplete */
+ pfn = zone->zone_start_pfn;
+
+ /*
+ * Walk the zone in pageblock_nr_pages steps. If a page block spans
+ * a zone boundary, it will be double counted between zones. This does
+ * not matter as the mixed block count will still be correct
+ */
+ for (; pfn < end_pfn; ) {
+ if (!pfn_valid(pfn)) {
+ pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
+ continue;
+ }
+
+ block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = min(block_end_pfn, end_pfn);
+
+ page = pfn_to_page(pfn);
+ pageblock_mt = get_pfnblock_migratetype(page, pfn);
+
+ for (; pfn < block_end_pfn; pfn++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+ if (PageBuddy(page)) {
+ pfn += (1UL << page_order(page)) - 1;
+ continue;
+ }
+
+ if (PageReserved(page))
+ continue;
+
+ page_ext = lookup_page_ext(page);
+
+ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+ continue;
+
+ page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
+ if (pageblock_mt != page_mt) {
+ if (is_migrate_cma(pageblock_mt))
+ count[MIGRATE_MOVABLE]++;
+ else
+ count[pageblock_mt]++;
+
+ pfn = block_end_pfn;
+ break;
+ }
+ pfn += (1UL << page_ext->order) - 1;
+ }
+ }
+
+ /* Print counts */
+ seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
+ for (i = 0; i < MIGRATE_TYPES; i++)
+ seq_printf(m, "%12lu ", count[i]);
+ seq_putc(m, '\n');
+}
+#endif /* CONFIG_PAGE_OWNER */
+
+/*
+ * Print out the number of pageblocks for each migratetype that contain pages
+ * of other types. This gives an indication of how well fallbacks are being
+ * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
+ * to determine what is going on
+ */
+static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
+{
+#ifdef CONFIG_PAGE_OWNER
+ int mtype;
+
+ if (!page_owner_inited)
+ return;
+
+ drain_all_pages(NULL);
+
+ seq_printf(m, "\n%-23s", "Number of mixed blocks ");
+ for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
+ seq_printf(m, "%12s ", migratetype_names[mtype]);
+ seq_putc(m, '\n');
+
+ walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
+#endif /* CONFIG_PAGE_OWNER */
+}
+
/*
* This prints out statistics in relation to grouping pages by mobility.
* It is expensive to collect so do not constantly read the file.
@@ -1034,6 +1135,7 @@ static int pagetypeinfo_show(struct seq_file *m, void *arg)
seq_putc(m, '\n');
pagetypeinfo_showfree(m, pgdat);
pagetypeinfo_showblockcount(m, pgdat);
+ pagetypeinfo_showmixedcount(m, pgdat);
return 0;
}
diff --git a/mm/zbud.c b/mm/zbud.c
index ecf1dbef6983..4e387bea702e 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -132,7 +132,7 @@ static struct zbud_ops zbud_zpool_ops = {
static void *zbud_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
{
- return zbud_create_pool(gfp, &zbud_zpool_ops);
+ return zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
}
static void zbud_zpool_destroy(void *pool)
@@ -619,5 +619,5 @@ module_init(init_zbud);
module_exit(exit_zbud);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 839a48c3ca27..b72403927aa4 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -155,8 +155,6 @@
* (reason above)
*/
#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
-#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
- ZS_SIZE_CLASS_DELTA + 1)
/*
* We do not maintain any list for completely empty or full pages
@@ -171,6 +169,11 @@ enum fullness_group {
};
/*
+ * number of size_classes
+ */
+static int zs_size_classes;
+
+/*
* We assign a page to ZS_ALMOST_EMPTY fullness group when:
* n <= N / f, where
* n = number of allocated objects
@@ -214,7 +217,7 @@ struct link_free {
};
struct zs_pool {
- struct size_class size_class[ZS_SIZE_CLASSES];
+ struct size_class **size_class;
gfp_t flags; /* allocation flags used when growing pool */
atomic_long_t pages_allocated;
@@ -468,7 +471,7 @@ static enum fullness_group fix_fullness_group(struct zs_pool *pool,
if (newfg == currfg)
goto out;
- class = &pool->size_class[class_idx];
+ class = pool->size_class[class_idx];
remove_zspage(page, class, currfg);
insert_zspage(page, class, newfg);
set_zspage_mapping(page, class_idx, newfg);
@@ -629,6 +632,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
struct page *next_page;
struct link_free *link;
unsigned int i = 1;
+ void *vaddr;
/*
* page->index stores offset of first object starting
@@ -639,8 +643,8 @@ static void init_zspage(struct page *first_page, struct size_class *class)
if (page != first_page)
page->index = off;
- link = (struct link_free *)kmap_atomic(page) +
- off / sizeof(*link);
+ vaddr = kmap_atomic(page);
+ link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
link->next = obj_location_to_handle(page, i++);
@@ -654,7 +658,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
*/
next_page = get_next_page(page);
link->next = obj_location_to_handle(next_page, 0);
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
page = next_page;
off %= PAGE_SIZE;
}
@@ -784,7 +788,7 @@ static inline int __zs_cpu_up(struct mapping_area *area)
*/
if (area->vm_buf)
return 0;
- area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
+ area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
if (!area->vm_buf)
return -ENOMEM;
return 0;
@@ -792,8 +796,7 @@ static inline int __zs_cpu_up(struct mapping_area *area)
static inline void __zs_cpu_down(struct mapping_area *area)
{
- if (area->vm_buf)
- free_page((unsigned long)area->vm_buf);
+ kfree(area->vm_buf);
area->vm_buf = NULL;
}
@@ -881,13 +884,26 @@ static struct notifier_block zs_cpu_nb = {
.notifier_call = zs_cpu_notifier
};
-static void zs_exit(void)
+static int zs_register_cpu_notifier(void)
{
- int cpu;
+ int cpu, uninitialized_var(ret);
-#ifdef CONFIG_ZPOOL
- zpool_unregister_driver(&zs_zpool_driver);
-#endif
+ cpu_notifier_register_begin();
+
+ __register_cpu_notifier(&zs_cpu_nb);
+ for_each_online_cpu(cpu) {
+ ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+ if (notifier_to_errno(ret))
+ break;
+ }
+
+ cpu_notifier_register_done();
+ return notifier_to_errno(ret);
+}
+
+static void zs_unregister_cpu_notifier(void)
+{
+ int cpu;
cpu_notifier_register_begin();
@@ -898,93 +914,129 @@ static void zs_exit(void)
cpu_notifier_register_done();
}
-static int zs_init(void)
+static void init_zs_size_classes(void)
{
- int cpu, ret;
+ int nr;
- cpu_notifier_register_begin();
+ nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1;
+ if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA)
+ nr += 1;
- __register_cpu_notifier(&zs_cpu_nb);
- for_each_online_cpu(cpu) {
- ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
- if (notifier_to_errno(ret)) {
- cpu_notifier_register_done();
- goto fail;
- }
- }
+ zs_size_classes = nr;
+}
- cpu_notifier_register_done();
+static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
+{
+ return pages_per_zspage * PAGE_SIZE / size;
+}
-#ifdef CONFIG_ZPOOL
- zpool_register_driver(&zs_zpool_driver);
-#endif
+static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
+{
+ if (prev->pages_per_zspage != pages_per_zspage)
+ return false;
- return 0;
-fail:
- zs_exit();
- return notifier_to_errno(ret);
+ if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage)
+ != get_maxobj_per_zspage(size, pages_per_zspage))
+ return false;
+
+ return true;
}
+unsigned long zs_get_total_pages(struct zs_pool *pool)
+{
+ return atomic_long_read(&pool->pages_allocated);
+}
+EXPORT_SYMBOL_GPL(zs_get_total_pages);
+
/**
- * zs_create_pool - Creates an allocation pool to work from.
- * @flags: allocation flags used to allocate pool metadata
+ * zs_map_object - get address of allocated object from handle.
+ * @pool: pool from which the object was allocated
+ * @handle: handle returned from zs_malloc
*
- * This function must be called before anything when using
- * the zsmalloc allocator.
+ * Before using an object allocated from zs_malloc, it must be mapped using
+ * this function. When done with the object, it must be unmapped using
+ * zs_unmap_object.
*
- * On success, a pointer to the newly created pool is returned,
- * otherwise NULL.
+ * Only one object can be mapped per cpu at a time. There is no protection
+ * against nested mappings.
+ *
+ * This function returns with preemption and page faults disabled.
*/
-struct zs_pool *zs_create_pool(gfp_t flags)
+void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ enum zs_mapmode mm)
{
- int i, ovhd_size;
- struct zs_pool *pool;
+ struct page *page;
+ unsigned long obj_idx, off;
- ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
- pool = kzalloc(ovhd_size, GFP_KERNEL);
- if (!pool)
- return NULL;
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
+ struct page *pages[2];
- for (i = 0; i < ZS_SIZE_CLASSES; i++) {
- int size;
- struct size_class *class;
+ BUG_ON(!handle);
- size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
- if (size > ZS_MAX_ALLOC_SIZE)
- size = ZS_MAX_ALLOC_SIZE;
+ /*
+ * Because we use per-cpu mapping areas shared among the
+ * pools/users, we can't allow mapping in interrupt context
+ * because it can corrupt another users mappings.
+ */
+ BUG_ON(in_interrupt());
- class = &pool->size_class[i];
- class->size = size;
- class->index = i;
- spin_lock_init(&class->lock);
- class->pages_per_zspage = get_pages_per_zspage(size);
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+ area = &get_cpu_var(zs_map_area);
+ area->vm_mm = mm;
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+ area->vm_addr = kmap_atomic(page);
+ return area->vm_addr + off;
}
- pool->flags = flags;
+ /* this object spans two pages */
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
- return pool;
+ return __zs_map_object(area, pages, off, class->size);
}
-EXPORT_SYMBOL_GPL(zs_create_pool);
+EXPORT_SYMBOL_GPL(zs_map_object);
-void zs_destroy_pool(struct zs_pool *pool)
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
- int i;
+ struct page *page;
+ unsigned long obj_idx, off;
- for (i = 0; i < ZS_SIZE_CLASSES; i++) {
- int fg;
- struct size_class *class = &pool->size_class[i];
+ unsigned int class_idx;
+ enum fullness_group fg;
+ struct size_class *class;
+ struct mapping_area *area;
- for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
- if (class->fullness_list[fg]) {
- pr_info("Freeing non-empty class with size %db, fullness group %d\n",
- class->size, fg);
- }
- }
+ BUG_ON(!handle);
+
+ obj_handle_to_location(handle, &page, &obj_idx);
+ get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ class = pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+ area = this_cpu_ptr(&zs_map_area);
+ if (off + class->size <= PAGE_SIZE)
+ kunmap_atomic(area->vm_addr);
+ else {
+ struct page *pages[2];
+
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
+
+ __zs_unmap_object(area, pages, off, class->size);
}
- kfree(pool);
+ put_cpu_var(zs_map_area);
}
-EXPORT_SYMBOL_GPL(zs_destroy_pool);
+EXPORT_SYMBOL_GPL(zs_unmap_object);
/**
* zs_malloc - Allocate block of given size from pool.
@@ -999,8 +1051,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
{
unsigned long obj;
struct link_free *link;
- int class_idx;
struct size_class *class;
+ void *vaddr;
struct page *first_page, *m_page;
unsigned long m_objidx, m_offset;
@@ -1008,9 +1060,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;
- class_idx = get_size_class_index(size);
- class = &pool->size_class[class_idx];
- BUG_ON(class_idx != class->index);
+ class = pool->size_class[get_size_class_index(size)];
spin_lock(&class->lock);
first_page = find_get_zspage(class);
@@ -1031,11 +1081,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
obj_handle_to_location(obj, &m_page, &m_objidx);
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
- link = (struct link_free *)kmap_atomic(m_page) +
- m_offset / sizeof(*link);
+ vaddr = kmap_atomic(m_page);
+ link = (struct link_free *)vaddr + m_offset / sizeof(*link);
first_page->freelist = link->next;
memset(link, POISON_INUSE, sizeof(*link));
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
first_page->inuse++;
/* Now move the zspage to another fullness group, if required */
@@ -1051,6 +1101,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
struct link_free *link;
struct page *first_page, *f_page;
unsigned long f_objidx, f_offset;
+ void *vaddr;
int class_idx;
struct size_class *class;
@@ -1063,16 +1114,16 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
first_page = get_first_page(f_page);
get_zspage_mapping(first_page, &class_idx, &fullness);
- class = &pool->size_class[class_idx];
+ class = pool->size_class[class_idx];
f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
spin_lock(&class->lock);
/* Insert this object in containing zspage's freelist */
- link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
- + f_offset);
+ vaddr = kmap_atomic(f_page);
+ link = (struct link_free *)(vaddr + f_offset);
link->next = first_page->freelist;
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
first_page->freelist = (void *)obj;
first_page->inuse--;
@@ -1088,100 +1139,137 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
EXPORT_SYMBOL_GPL(zs_free);
/**
- * zs_map_object - get address of allocated object from handle.
- * @pool: pool from which the object was allocated
- * @handle: handle returned from zs_malloc
- *
- * Before using an object allocated from zs_malloc, it must be mapped using
- * this function. When done with the object, it must be unmapped using
- * zs_unmap_object.
+ * zs_create_pool - Creates an allocation pool to work from.
+ * @flags: allocation flags used to allocate pool metadata
*
- * Only one object can be mapped per cpu at a time. There is no protection
- * against nested mappings.
+ * This function must be called before anything when using
+ * the zsmalloc allocator.
*
- * This function returns with preemption and page faults disabled.
+ * On success, a pointer to the newly created pool is returned,
+ * otherwise NULL.
*/
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm)
+struct zs_pool *zs_create_pool(gfp_t flags)
{
- struct page *page;
- unsigned long obj_idx, off;
+ int i;
+ struct zs_pool *pool;
+ struct size_class *prev_class = NULL;
- unsigned int class_idx;
- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
- struct page *pages[2];
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
- BUG_ON(!handle);
+ pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
+ GFP_KERNEL);
+ if (!pool->size_class) {
+ kfree(pool);
+ return NULL;
+ }
/*
- * Because we use per-cpu mapping areas shared among the
- * pools/users, we can't allow mapping in interrupt context
- * because it can corrupt another users mappings.
+ * Iterate reversly, because, size of size_class that we want to use
+ * for merging should be larger or equal to current size.
*/
- BUG_ON(in_interrupt());
+ for (i = zs_size_classes - 1; i >= 0; i--) {
+ int size;
+ int pages_per_zspage;
+ struct size_class *class;
- obj_handle_to_location(handle, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = &pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
+ if (size > ZS_MAX_ALLOC_SIZE)
+ size = ZS_MAX_ALLOC_SIZE;
+ pages_per_zspage = get_pages_per_zspage(size);
- area = &get_cpu_var(zs_map_area);
- area->vm_mm = mm;
- if (off + class->size <= PAGE_SIZE) {
- /* this object is contained entirely within a page */
- area->vm_addr = kmap_atomic(page);
- return area->vm_addr + off;
+ /*
+ * size_class is used for normal zsmalloc operation such
+ * as alloc/free for that size. Although it is natural that we
+ * have one size_class for each size, there is a chance that we
+ * can get more memory utilization if we use one size_class for
+ * many different sizes whose size_class have same
+ * characteristics. So, we makes size_class point to
+ * previous size_class if possible.
+ */
+ if (prev_class) {
+ if (can_merge(prev_class, size, pages_per_zspage)) {
+ pool->size_class[i] = prev_class;
+ continue;
+ }
+ }
+
+ class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
+ if (!class)
+ goto err;
+
+ class->size = size;
+ class->index = i;
+ class->pages_per_zspage = pages_per_zspage;
+ spin_lock_init(&class->lock);
+ pool->size_class[i] = class;
+
+ prev_class = class;
}
- /* this object spans two pages */
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ pool->flags = flags;
- return __zs_map_object(area, pages, off, class->size);
+ return pool;
+
+err:
+ zs_destroy_pool(pool);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(zs_map_object);
+EXPORT_SYMBOL_GPL(zs_create_pool);
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
+void zs_destroy_pool(struct zs_pool *pool)
{
- struct page *page;
- unsigned long obj_idx, off;
+ int i;
- unsigned int class_idx;
- enum fullness_group fg;
- struct size_class *class;
- struct mapping_area *area;
+ for (i = 0; i < zs_size_classes; i++) {
+ int fg;
+ struct size_class *class = pool->size_class[i];
- BUG_ON(!handle);
+ if (!class)
+ continue;
- obj_handle_to_location(handle, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = &pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ if (class->index != i)
+ continue;
- area = this_cpu_ptr(&zs_map_area);
- if (off + class->size <= PAGE_SIZE)
- kunmap_atomic(area->vm_addr);
- else {
- struct page *pages[2];
+ for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
+ if (class->fullness_list[fg]) {
+ pr_info("Freeing non-empty class with size %db, fullness group %d\n",
+ class->size, fg);
+ }
+ }
+ kfree(class);
+ }
- pages[0] = page;
- pages[1] = get_next_page(page);
- BUG_ON(!pages[1]);
+ kfree(pool->size_class);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(zs_destroy_pool);
- __zs_unmap_object(area, pages, off, class->size);
+static int __init zs_init(void)
+{
+ int ret = zs_register_cpu_notifier();
+
+ if (ret) {
+ zs_unregister_cpu_notifier();
+ return ret;
}
- put_cpu_var(zs_map_area);
+
+ init_zs_size_classes();
+
+#ifdef CONFIG_ZPOOL
+ zpool_register_driver(&zs_zpool_driver);
+#endif
+ return 0;
}
-EXPORT_SYMBOL_GPL(zs_unmap_object);
-unsigned long zs_get_total_pages(struct zs_pool *pool)
+static void __exit zs_exit(void)
{
- return atomic_long_read(&pool->pages_allocated);
+#ifdef CONFIG_ZPOOL
+ zpool_unregister_driver(&zs_zpool_driver);
+#endif
+ zs_unregister_cpu_notifier();
}
-EXPORT_SYMBOL_GPL(zs_get_total_pages);
module_init(zs_init);
module_exit(zs_exit);
diff --git a/mm/zswap.c b/mm/zswap.c
index ea064c1a09ba..0cfce9bc51e4 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -149,11 +149,10 @@ static int __init zswap_comp_init(void)
return 0;
}
-static void zswap_comp_exit(void)
+static void __init zswap_comp_exit(void)
{
/* free percpu transforms */
- if (zswap_comp_pcpu_tfms)
- free_percpu(zswap_comp_pcpu_tfms);
+ free_percpu(zswap_comp_pcpu_tfms);
}
/*********************************
@@ -206,7 +205,7 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
**********************************/
static struct kmem_cache *zswap_entry_cache;
-static int zswap_entry_cache_create(void)
+static int __init zswap_entry_cache_create(void)
{
zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
return zswap_entry_cache == NULL;
@@ -389,7 +388,7 @@ static struct notifier_block zswap_cpu_notifier_block = {
.notifier_call = zswap_cpu_notifier
};
-static int zswap_cpu_init(void)
+static int __init zswap_cpu_init(void)
{
unsigned long cpu;
@@ -951,5 +950,5 @@ error:
late_initcall(init_zswap);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
MODULE_DESCRIPTION("Compressed cache for swap pages");
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 142eef55c9e2..32ffec6ef164 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -15,9 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Jon's code is based on 6lowpan implementation for Contiki which is:
@@ -171,37 +168,6 @@ static int uncompress_context_based_src_addr(struct sk_buff *skb,
return 0;
}
-static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
- struct net_device *dev, skb_delivery_cb deliver_skb)
-{
- struct sk_buff *new;
- int stat;
-
- new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
- GFP_ATOMIC);
- kfree_skb(skb);
-
- if (!new)
- return -ENOMEM;
-
- skb_push(new, sizeof(struct ipv6hdr));
- skb_reset_network_header(new);
- skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
-
- new->protocol = htons(ETH_P_IPV6);
- new->pkt_type = PACKET_HOST;
- new->dev = dev;
-
- raw_dump_table(__func__, "raw skb data dump before receiving",
- new->data, new->len);
-
- stat = deliver_skb(new, dev);
-
- kfree_skb(new);
-
- return stat;
-}
-
/* Uncompress function for multicast destination address,
* when M bit is set.
*/
@@ -332,10 +298,12 @@ err:
/* TTL uncompression values */
static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
-int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
- const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
- const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
- u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
+int
+lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
+ const u8 *saddr, const u8 saddr_type,
+ const u8 saddr_len, const u8 *daddr,
+ const u8 daddr_type, const u8 daddr_len,
+ u8 iphc0, u8 iphc1)
{
struct ipv6hdr hdr = {};
u8 tmp, num_context = 0;
@@ -348,7 +316,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
if (iphc1 & LOWPAN_IPHC_CID) {
pr_debug("CID flag is set, increase header with one\n");
if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context)))
- goto drop;
+ return -EINVAL;
}
hdr.version = 6;
@@ -360,7 +328,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
*/
case 0: /* 00b */
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
- goto drop;
+ return -EINVAL;
memcpy(&hdr.flow_lbl, &skb->data[0], 3);
skb_pull(skb, 3);
@@ -373,7 +341,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
*/
case 2: /* 10b */
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
- goto drop;
+ return -EINVAL;
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
@@ -383,7 +351,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
*/
case 1: /* 01b */
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
- goto drop;
+ return -EINVAL;
hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
@@ -400,7 +368,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
/* Next header is carried inline */
if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
- goto drop;
+ return -EINVAL;
pr_debug("NH flag is set, next header carried inline: %02x\n",
hdr.nexthdr);
@@ -412,7 +380,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
} else {
if (lowpan_fetch_skb(skb, &hdr.hop_limit,
sizeof(hdr.hop_limit)))
- goto drop;
+ return -EINVAL;
}
/* Extract SAM to the tmp variable */
@@ -431,7 +399,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
/* Check on error of previous branch */
if (err)
- goto drop;
+ return -EINVAL;
/* Extract DAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
@@ -446,7 +414,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
tmp);
if (err)
- goto drop;
+ return -EINVAL;
}
} else {
err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
@@ -454,28 +422,23 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
pr_debug("dest: stateless compression mode %d dest %pI6c\n",
tmp, &hdr.daddr);
if (err)
- goto drop;
+ return -EINVAL;
}
/* UDP data uncompression */
if (iphc0 & LOWPAN_IPHC_NH_C) {
struct udphdr uh;
- struct sk_buff *new;
+ const int needed = sizeof(struct udphdr) + sizeof(hdr);
if (uncompress_udp_header(skb, &uh))
- goto drop;
+ return -EINVAL;
/* replace the compressed UDP head by the uncompressed UDP
* header
*/
- new = skb_copy_expand(skb, sizeof(struct udphdr),
- skb_tailroom(skb), GFP_ATOMIC);
- kfree_skb(skb);
-
- if (!new)
- return -ENOMEM;
-
- skb = new;
+ err = skb_cow(skb, needed);
+ if (unlikely(err))
+ return err;
skb_push(skb, sizeof(struct udphdr));
skb_reset_transport_header(skb);
@@ -485,6 +448,10 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
(u8 *)&uh, sizeof(uh));
hdr.nexthdr = UIP_PROTO_UDP;
+ } else {
+ err = skb_cow(skb, sizeof(hdr));
+ if (unlikely(err))
+ return err;
}
hdr.payload_len = htons(skb->len);
@@ -497,15 +464,15 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
hdr.version, ntohs(hdr.payload_len), hdr.nexthdr,
hdr.hop_limit, &hdr.daddr);
- raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr));
+ skb_push(skb, sizeof(hdr));
+ skb_reset_network_header(skb);
+ skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
- return skb_deliver(skb, &hdr, dev, deliver_skb);
+ raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr));
-drop:
- kfree_skb(skb);
- return -EINVAL;
+ return 0;
}
-EXPORT_SYMBOL_GPL(lowpan_process_data);
+EXPORT_SYMBOL_GPL(lowpan_header_decompress);
static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
const struct in6_addr *ipaddr,
@@ -535,9 +502,17 @@ static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
static void compress_udp_header(u8 **hc_ptr, struct sk_buff *skb)
{
- struct udphdr *uh = udp_hdr(skb);
+ struct udphdr *uh;
u8 tmp;
+ /* In the case of RAW sockets the transport header is not set by
+ * the ip6 stack so we must set it ourselves
+ */
+ if (skb->transport_header == skb->network_header)
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
+ uh = udp_hdr(skb);
+
if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT) &&
((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 0d441ec8763e..118956448cf6 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <net/arp.h>
@@ -150,7 +151,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
u16 vlan_tci;
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
- skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
+ __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
}
skb->dev = vlan->real_dev;
@@ -578,11 +579,12 @@ static int vlan_dev_init(struct net_device *dev)
(1<<__LINK_STATE_PRESENT);
dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
- NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
NETIF_F_ALL_FCOE;
- dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
+ dev->features |= real_dev->vlan_features | NETIF_F_LLTX |
+ NETIF_F_GSO_SOFTWARE;
dev->gso_max_size = real_dev->gso_max_size;
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
@@ -647,7 +649,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
features |= NETIF_F_RXCSUM;
features = netdev_intersect_features(features, real_dev->features);
- features |= old_features & NETIF_F_SOFT_FEATURES;
+ features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
features |= NETIF_F_LLTX;
return features;
@@ -669,6 +671,23 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
+static int vlan_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
+
+ if (ops->get_ts_info) {
+ return ops->get_ts_info(vlan->real_dev, info);
+ } else {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ }
+
+ return 0;
+}
+
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct vlan_pcpu_stats *p;
@@ -752,6 +771,7 @@ static const struct ethtool_ops vlan_ethtool_ops = {
.get_settings = vlan_ethtool_get_settings,
.get_drvinfo = vlan_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = vlan_ethtool_get_ts_info,
};
static const struct net_device_ops vlan_netdev_ops = {
diff --git a/net/Kconfig b/net/Kconfig
index 99815b5454bf..ff9ffc17fa0e 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -228,6 +228,7 @@ source "net/vmw_vsock/Kconfig"
source "net/netlink/Kconfig"
source "net/mpls/Kconfig"
source "net/hsr/Kconfig"
+source "net/switchdev/Kconfig"
config RPS
boolean
diff --git a/net/Makefile b/net/Makefile
index 7ed1970074b0..38704bdf941a 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -5,8 +5,6 @@
# Rewritten to use lists instead of if-statements.
#
-obj-y := nonet.o
-
obj-$(CONFIG_NET) := socket.o core/
tmp-$(CONFIG_COMPAT) := compat.o
@@ -73,3 +71,6 @@ obj-$(CONFIG_OPENVSWITCH) += openvswitch/
obj-$(CONFIG_VSOCKETS) += vmw_vsock/
obj-$(CONFIG_NET_MPLS_GSO) += mpls/
obj-$(CONFIG_HSR) += hsr/
+ifneq ($(CONFIG_NET_SWITCHDEV),)
+obj-y += switchdev/
+endif
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index c00897f65a31..0d0766ea5ab1 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1659,7 +1659,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
SOCK_DEBUG(sk, "SK %p: Copy user data (%Zd bytes).\n", sk, len);
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ err = memcpy_from_msg(skb_put(skb, len), msg, len);
if (err) {
kfree_skb(skb);
err = -EFAULT;
@@ -1758,7 +1758,7 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
- err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, offset, msg, copied);
if (!err && msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_at *, sat, msg->msg_name);
diff --git a/net/atm/common.c b/net/atm/common.c
index 6a765156a3f6..b84057e41bd6 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -554,7 +554,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
msg->msg_flags |= MSG_TRUNC;
}
- error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ error = skb_copy_datagram_msg(skb, 0, msg, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
@@ -570,15 +570,13 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
}
int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t total_len)
+ size_t size)
{
struct sock *sk = sock->sk;
DEFINE_WAIT(wait);
struct atm_vcc *vcc;
struct sk_buff *skb;
int eff, error;
- const void __user *buff;
- int size;
lock_sock(sk);
if (sock->state != SS_CONNECTED) {
@@ -589,12 +587,6 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
error = -EISCONN;
goto out;
}
- if (m->msg_iovlen != 1) {
- error = -ENOSYS; /* fix this later @@@ */
- goto out;
- }
- buff = m->msg_iov->iov_base;
- size = m->msg_iov->iov_len;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
@@ -607,7 +599,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
error = 0;
goto out;
}
- if (size < 0 || size > vcc->qos.txtp.max_sdu) {
+ if (size > vcc->qos.txtp.max_sdu) {
error = -EMSGSIZE;
goto out;
}
@@ -639,7 +631,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
goto out;
skb->dev = NULL; /* for paths shared with net_device interfaces */
ATM_SKB(skb)->atm_options = vcc->atm_options;
- if (copy_from_user(skb_put(skb, size), buff, size)) {
+ if (copy_from_iter(skb_put(skb, size), size, &m->msg_iter) != size) {
kfree_skb(skb);
error = -EFAULT;
goto out;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index c35c3f48fc0f..ca049a7c9287 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1549,7 +1549,7 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
skb_reserve(skb, size - len);
/* User data follows immediately after the AX.25 data */
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
err = -EFAULT;
kfree_skb(skb);
goto out;
@@ -1634,7 +1634,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_flags |= MSG_TRUNC;
}
- skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ skb_copy_datagram_msg(skb, 0, msg, copied);
if (msg->msg_name) {
ax25_digi digi;
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index c2e0d14433df..76617be1e797 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -53,7 +53,7 @@ struct skb_cb {
* The list contains struct lowpan_dev elements.
*/
static LIST_HEAD(bt_6lowpan_devices);
-static DEFINE_RWLOCK(devices_lock);
+static DEFINE_SPINLOCK(devices_lock);
/* If psm is set to 0 (default value), then 6lowpan is disabled.
* Other values are used to indicate a Protocol Service Multiplexer
@@ -67,6 +67,7 @@ static struct l2cap_chan *listen_chan;
struct lowpan_peer {
struct list_head list;
+ struct rcu_head rcu;
struct l2cap_chan *chan;
/* peer addresses in various formats */
@@ -93,13 +94,14 @@ static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
{
- list_add(&peer->list, &dev->peers);
+ list_add_rcu(&peer->list, &dev->peers);
atomic_inc(&dev->peer_count);
}
static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
{
- list_del(&peer->list);
+ list_del_rcu(&peer->list);
+ kfree_rcu(peer, rcu);
module_put(THIS_MODULE);
@@ -114,31 +116,37 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
bdaddr_t *ba, __u8 type)
{
- struct lowpan_peer *peer, *tmp;
+ struct lowpan_peer *peer;
BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
ba, type);
- list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(peer, &dev->peers, list) {
BT_DBG("dst addr %pMR dst type %d",
&peer->chan->dst, peer->chan->dst_type);
if (bacmp(&peer->chan->dst, ba))
continue;
- if (type == peer->chan->dst_type)
+ if (type == peer->chan->dst_type) {
+ rcu_read_unlock();
return peer;
+ }
}
+ rcu_read_unlock();
+
return NULL;
}
-static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev,
- struct l2cap_chan *chan)
+static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev,
+ struct l2cap_chan *chan)
{
- struct lowpan_peer *peer, *tmp;
+ struct lowpan_peer *peer;
- list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ list_for_each_entry_rcu(peer, &dev->peers, list) {
if (peer->chan == chan)
return peer;
}
@@ -146,12 +154,12 @@ static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev,
return NULL;
}
-static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
- struct l2cap_conn *conn)
+static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev,
+ struct l2cap_conn *conn)
{
- struct lowpan_peer *peer, *tmp;
+ struct lowpan_peer *peer;
- list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ list_for_each_entry_rcu(peer, &dev->peers, list) {
if (peer->chan->conn == conn)
return peer;
}
@@ -163,7 +171,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
struct in6_addr *daddr,
struct sk_buff *skb)
{
- struct lowpan_peer *peer, *tmp;
+ struct lowpan_peer *peer;
struct in6_addr *nexthop;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
int count = atomic_read(&dev->peer_count);
@@ -174,9 +182,13 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
* send the packet. If only one peer exists, then we can send the
* packet right away.
*/
- if (count == 1)
- return list_first_entry(&dev->peers, struct lowpan_peer,
- list);
+ if (count == 1) {
+ rcu_read_lock();
+ peer = list_first_or_null_rcu(&dev->peers, struct lowpan_peer,
+ list);
+ rcu_read_unlock();
+ return peer;
+ }
if (!rt) {
nexthop = &lowpan_cb(skb)->gw;
@@ -195,53 +207,57 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
BT_DBG("gw %pI6c", nexthop);
- list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(peer, &dev->peers, list) {
BT_DBG("dst addr %pMR dst type %d ip %pI6c",
&peer->chan->dst, peer->chan->dst_type,
&peer->peer_addr);
- if (!ipv6_addr_cmp(&peer->peer_addr, nexthop))
+ if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
+ rcu_read_unlock();
return peer;
+ }
}
+ rcu_read_unlock();
+
return NULL;
}
static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
{
- struct lowpan_dev *entry, *tmp;
+ struct lowpan_dev *entry;
struct lowpan_peer *peer = NULL;
- unsigned long flags;
- read_lock_irqsave(&devices_lock, flags);
+ rcu_read_lock();
- list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
- peer = peer_lookup_conn(entry, conn);
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
+ peer = __peer_lookup_conn(entry, conn);
if (peer)
break;
}
- read_unlock_irqrestore(&devices_lock, flags);
+ rcu_read_unlock();
return peer;
}
static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
{
- struct lowpan_dev *entry, *tmp;
+ struct lowpan_dev *entry;
struct lowpan_dev *dev = NULL;
- unsigned long flags;
- read_lock_irqsave(&devices_lock, flags);
+ rcu_read_lock();
- list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
if (conn->hcon->hdev == entry->hdev) {
dev = entry;
break;
}
}
- read_unlock_irqrestore(&devices_lock, flags);
+ rcu_read_unlock();
return dev;
}
@@ -249,59 +265,49 @@ static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
{
struct sk_buff *skb_cp;
- int ret;
skb_cp = skb_copy(skb, GFP_ATOMIC);
if (!skb_cp)
- return -ENOMEM;
-
- ret = netif_rx(skb_cp);
- if (ret < 0) {
- BT_DBG("receive skb %d", ret);
return NET_RX_DROP;
- }
- return ret;
+ return netif_rx(skb_cp);
}
-static int process_data(struct sk_buff *skb, struct net_device *netdev,
- struct l2cap_chan *chan)
+static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
+ struct l2cap_chan *chan)
{
const u8 *saddr, *daddr;
u8 iphc0, iphc1;
struct lowpan_dev *dev;
struct lowpan_peer *peer;
- unsigned long flags;
dev = lowpan_dev(netdev);
- read_lock_irqsave(&devices_lock, flags);
- peer = peer_lookup_chan(dev, chan);
- read_unlock_irqrestore(&devices_lock, flags);
+ rcu_read_lock();
+ peer = __peer_lookup_chan(dev, chan);
+ rcu_read_unlock();
if (!peer)
- goto drop;
+ return -EINVAL;
saddr = peer->eui64_addr;
daddr = dev->netdev->dev_addr;
/* at least two bytes will be used for the encoding */
if (skb->len < 2)
- goto drop;
+ return -EINVAL;
if (lowpan_fetch_skb_u8(skb, &iphc0))
- goto drop;
+ return -EINVAL;
if (lowpan_fetch_skb_u8(skb, &iphc1))
- goto drop;
+ return -EINVAL;
- return lowpan_process_data(skb, netdev,
- saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
- daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
- iphc0, iphc1, give_skb_to_upper);
+ return lowpan_header_decompress(skb, netdev,
+ saddr, IEEE802154_ADDR_LONG,
+ EUI64_ADDR_LEN, daddr,
+ IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
+ iphc0, iphc1);
-drop:
- kfree_skb(skb);
- return -EINVAL;
}
static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
@@ -316,6 +322,10 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
if (dev->type != ARPHRD_6LOWPAN)
goto drop;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto drop;
+
/* check that it's our buffer */
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
/* Copy the packet so that the IPv6 header is
@@ -340,8 +350,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
- kfree_skb(local_skb);
- kfree_skb(skb);
+ consume_skb(local_skb);
+ consume_skb(skb);
} else {
switch (skb->data[0] & 0xe0) {
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
@@ -349,14 +359,27 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
if (!local_skb)
goto drop;
- ret = process_data(local_skb, dev, chan);
- if (ret != NET_RX_SUCCESS)
+ ret = iphc_decompress(local_skb, dev, chan);
+ if (ret < 0) {
+ kfree_skb(local_skb);
+ goto drop;
+ }
+
+ local_skb->protocol = htons(ETH_P_IPV6);
+ local_skb->pkt_type = PACKET_HOST;
+ local_skb->dev = dev;
+
+ if (give_skb_to_upper(local_skb, dev)
+ != NET_RX_SUCCESS) {
+ kfree_skb(local_skb);
goto drop;
+ }
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
- kfree_skb(skb);
+ consume_skb(local_skb);
+ consume_skb(skb);
break;
default:
break;
@@ -443,7 +466,6 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
if (ipv6_addr_is_multicast(&ipv6_daddr)) {
lowpan_cb(skb)->chan = NULL;
} else {
- unsigned long flags;
u8 addr_type;
/* Get destination BT device from skb.
@@ -454,19 +476,14 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
addr_type, &ipv6_daddr);
- read_lock_irqsave(&devices_lock, flags);
peer = peer_lookup_ba(dev, &addr, addr_type);
- read_unlock_irqrestore(&devices_lock, flags);
-
if (!peer) {
/* The packet might be sent to 6lowpan interface
* because of routing (either via default route
* or user set route) so get peer according to
* the destination address.
*/
- read_lock_irqsave(&devices_lock, flags);
peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
- read_unlock_irqrestore(&devices_lock, flags);
if (!peer) {
BT_DBG("no such peer %pMR found", &addr);
return -ENOENT;
@@ -520,12 +537,12 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
*/
chan->data = skb;
- memset(&msg, 0, sizeof(msg));
- msg.msg_iov = (struct iovec *) &iv;
- msg.msg_iovlen = 1;
iv.iov_base = skb->data;
iv.iov_len = skb->len;
+ memset(&msg, 0, sizeof(msg));
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len);
+
err = l2cap_chan_send(chan, &msg, skb->len);
if (err > 0) {
netdev->stats.tx_bytes += err;
@@ -549,14 +566,13 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
{
struct sk_buff *local_skb;
- struct lowpan_dev *entry, *tmp;
- unsigned long flags;
+ struct lowpan_dev *entry;
int err = 0;
- read_lock_irqsave(&devices_lock, flags);
+ rcu_read_lock();
- list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
- struct lowpan_peer *pentry, *ptmp;
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
+ struct lowpan_peer *pentry;
struct lowpan_dev *dev;
if (entry->netdev != netdev)
@@ -564,7 +580,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
dev = lowpan_dev(entry->netdev);
- list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
+ list_for_each_entry_rcu(pentry, &dev->peers, list) {
int ret;
local_skb = skb_clone(skb, GFP_ATOMIC);
@@ -581,7 +597,7 @@ static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
}
}
- read_unlock_irqrestore(&devices_lock, flags);
+ rcu_read_unlock();
return err;
}
@@ -591,17 +607,13 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
int err = 0;
bdaddr_t addr;
u8 addr_type;
- struct sk_buff *tmpskb;
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
- tmpskb = skb_unshare(skb, GFP_ATOMIC);
- if (!tmpskb) {
- kfree_skb(skb);
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
return NET_XMIT_DROP;
- }
- skb = tmpskb;
/* Return values from setup_header()
* <0 - error, packet is dropped
@@ -638,7 +650,26 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
return err < 0 ? NET_XMIT_DROP : err;
}
+static struct lock_class_key bt_tx_busylock;
+static struct lock_class_key bt_netdev_xmit_lock_key;
+
+static void bt_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key);
+}
+
+static int bt_dev_init(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL);
+ dev->qdisc_tx_busylock = &bt_tx_busylock;
+
+ return 0;
+}
+
static const struct net_device_ops netdev_ops = {
+ .ndo_init = bt_dev_init,
.ndo_start_xmit = bt_xmit,
};
@@ -783,7 +814,6 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
struct lowpan_dev *dev)
{
struct lowpan_peer *peer;
- unsigned long flags;
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
@@ -806,10 +836,10 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
*/
set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8);
- write_lock_irqsave(&devices_lock, flags);
+ spin_lock(&devices_lock);
INIT_LIST_HEAD(&peer->list);
peer_add(dev, peer);
- write_unlock_irqrestore(&devices_lock, flags);
+ spin_unlock(&devices_lock);
/* Notifying peers about us needs to be done without locks held */
INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
@@ -822,7 +852,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
{
struct net_device *netdev;
int err = 0;
- unsigned long flags;
netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
NET_NAME_UNKNOWN, netdev_setup);
@@ -852,10 +881,10 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
(*dev)->hdev = chan->conn->hcon->hdev;
INIT_LIST_HEAD(&(*dev)->peers);
- write_lock_irqsave(&devices_lock, flags);
+ spin_lock(&devices_lock);
INIT_LIST_HEAD(&(*dev)->list);
- list_add(&(*dev)->list, &bt_6lowpan_devices);
- write_unlock_irqrestore(&devices_lock, flags);
+ list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
+ spin_unlock(&devices_lock);
return 0;
@@ -909,11 +938,10 @@ static void delete_netdev(struct work_struct *work)
static void chan_close_cb(struct l2cap_chan *chan)
{
- struct lowpan_dev *entry, *tmp;
+ struct lowpan_dev *entry;
struct lowpan_dev *dev = NULL;
struct lowpan_peer *peer;
int err = -ENOENT;
- unsigned long flags;
bool last = false, removed = true;
BT_DBG("chan %p conn %p", chan, chan->conn);
@@ -928,11 +956,11 @@ static void chan_close_cb(struct l2cap_chan *chan)
removed = false;
}
- write_lock_irqsave(&devices_lock, flags);
+ spin_lock(&devices_lock);
- list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
dev = lowpan_dev(entry->netdev);
- peer = peer_lookup_chan(dev, chan);
+ peer = __peer_lookup_chan(dev, chan);
if (peer) {
last = peer_del(dev, peer);
err = 0;
@@ -943,13 +971,12 @@ static void chan_close_cb(struct l2cap_chan *chan)
atomic_read(&chan->kref.refcount));
l2cap_chan_put(chan);
- kfree(peer);
break;
}
}
if (!err && last && dev && !atomic_read(&dev->peer_count)) {
- write_unlock_irqrestore(&devices_lock, flags);
+ spin_unlock(&devices_lock);
cancel_delayed_work_sync(&dev->notify_peers);
@@ -960,7 +987,7 @@ static void chan_close_cb(struct l2cap_chan *chan)
schedule_work(&entry->delete_netdev);
}
} else {
- write_unlock_irqrestore(&devices_lock, flags);
+ spin_unlock(&devices_lock);
}
return;
@@ -1023,7 +1050,6 @@ static const struct l2cap_ops bt_6lowpan_chan_ops = {
.suspend = chan_suspend_cb,
.get_sndtimeo = chan_get_sndtimeo_cb,
.alloc_skb = chan_alloc_skb_cb,
- .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
.teardown = l2cap_chan_no_teardown,
.defer = l2cap_chan_no_defer,
@@ -1103,6 +1129,8 @@ static struct l2cap_chan *bt_6lowpan_listen(void)
pchan->state = BT_LISTEN;
pchan->src_type = BDADDR_LE_PUBLIC;
+ atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT);
+
BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan,
pchan->src_type);
@@ -1152,10 +1180,9 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
static void disconnect_all_peers(void)
{
- struct lowpan_dev *entry, *tmp_dev;
+ struct lowpan_dev *entry;
struct lowpan_peer *peer, *tmp_peer, *new_peer;
struct list_head peers;
- unsigned long flags;
INIT_LIST_HEAD(&peers);
@@ -1164,10 +1191,10 @@ static void disconnect_all_peers(void)
* with the same list at the same time.
*/
- read_lock_irqsave(&devices_lock, flags);
+ rcu_read_lock();
- list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
- list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) {
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
+ list_for_each_entry_rcu(peer, &entry->peers, list) {
new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
if (!new_peer)
break;
@@ -1179,26 +1206,36 @@ static void disconnect_all_peers(void)
}
}
- read_unlock_irqrestore(&devices_lock, flags);
+ rcu_read_unlock();
+ spin_lock(&devices_lock);
list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
l2cap_chan_close(peer->chan, ENOENT);
- kfree(peer);
+
+ list_del_rcu(&peer->list);
+ kfree_rcu(peer, rcu);
+
+ module_put(THIS_MODULE);
}
+ spin_unlock(&devices_lock);
}
-static int lowpan_psm_set(void *data, u64 val)
-{
+struct set_psm {
+ struct work_struct work;
u16 psm;
+};
- psm = val;
- if (psm == 0 || psm_6lowpan != psm)
+static void do_psm_set(struct work_struct *work)
+{
+ struct set_psm *set_psm = container_of(work, struct set_psm, work);
+
+ if (set_psm->psm == 0 || psm_6lowpan != set_psm->psm)
/* Disconnect existing connections if 6lowpan is
* disabled (psm = 0), or if psm changes.
*/
disconnect_all_peers();
- psm_6lowpan = psm;
+ psm_6lowpan = set_psm->psm;
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
@@ -1207,6 +1244,22 @@ static int lowpan_psm_set(void *data, u64 val)
listen_chan = bt_6lowpan_listen();
+ kfree(set_psm);
+}
+
+static int lowpan_psm_set(void *data, u64 val)
+{
+ struct set_psm *set_psm;
+
+ set_psm = kzalloc(sizeof(*set_psm), GFP_KERNEL);
+ if (!set_psm)
+ return -ENOMEM;
+
+ set_psm->psm = val;
+ INIT_WORK(&set_psm->work, do_psm_set);
+
+ schedule_work(&set_psm->work);
+
return 0;
}
@@ -1288,19 +1341,18 @@ static ssize_t lowpan_control_write(struct file *fp,
static int lowpan_control_show(struct seq_file *f, void *ptr)
{
- struct lowpan_dev *entry, *tmp_dev;
- struct lowpan_peer *peer, *tmp_peer;
- unsigned long flags;
+ struct lowpan_dev *entry;
+ struct lowpan_peer *peer;
- read_lock_irqsave(&devices_lock, flags);
+ spin_lock(&devices_lock);
- list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
- list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list)
+ list_for_each_entry(entry, &bt_6lowpan_devices, list) {
+ list_for_each_entry(peer, &entry->peers, list)
seq_printf(f, "%pMR (type %u)\n",
&peer->chan->dst, peer->chan->dst_type);
}
- read_unlock_irqrestore(&devices_lock, flags);
+ spin_unlock(&devices_lock);
return 0;
}
@@ -1322,7 +1374,6 @@ static void disconnect_devices(void)
{
struct lowpan_dev *entry, *tmp, *new_dev;
struct list_head devices;
- unsigned long flags;
INIT_LIST_HEAD(&devices);
@@ -1331,9 +1382,9 @@ static void disconnect_devices(void)
* devices list.
*/
- read_lock_irqsave(&devices_lock, flags);
+ rcu_read_lock();
- list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
if (!new_dev)
break;
@@ -1341,10 +1392,10 @@ static void disconnect_devices(void)
new_dev->netdev = entry->netdev;
INIT_LIST_HEAD(&new_dev->list);
- list_add(&new_dev->list, &devices);
+ list_add_rcu(&new_dev->list, &devices);
}
- read_unlock_irqrestore(&devices_lock, flags);
+ rcu_read_unlock();
list_for_each_entry_safe(entry, tmp, &devices, list) {
ifdown(entry->netdev);
@@ -1359,17 +1410,15 @@ static int device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct lowpan_dev *entry, *tmp;
- unsigned long flags;
+ struct lowpan_dev *entry;
if (netdev->type != ARPHRD_6LOWPAN)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
- write_lock_irqsave(&devices_lock, flags);
- list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
- list) {
+ spin_lock(&devices_lock);
+ list_for_each_entry(entry, &bt_6lowpan_devices, list) {
if (entry->netdev == netdev) {
BT_DBG("Unregistered netdev %s %p",
netdev->name, netdev);
@@ -1378,7 +1427,7 @@ static int device_event(struct notifier_block *unused,
break;
}
}
- write_unlock_irqrestore(&devices_lock, flags);
+ spin_unlock(&devices_lock);
break;
}
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 600fb29288f4..29bcafc41adf 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -10,6 +10,7 @@ menuconfig BT
select CRYPTO
select CRYPTO_BLKCIPHER
select CRYPTO_AES
+ select CRYPTO_CMAC
select CRYPTO_ECB
select CRYPTO_SHA256
help
@@ -39,11 +40,10 @@ menuconfig BT
to Bluetooth kernel modules are provided in the BlueZ packages. For
more information, see <http://www.bluez.org/>.
-config BT_6LOWPAN
- tristate "Bluetooth 6LoWPAN support"
- depends on BT && 6LOWPAN
- help
- IPv6 compression over Bluetooth Low Energy.
+config BT_BREDR
+ bool "Bluetooth Classic (BR/EDR) features"
+ depends on BT
+ default y
source "net/bluetooth/rfcomm/Kconfig"
@@ -53,4 +53,15 @@ source "net/bluetooth/cmtp/Kconfig"
source "net/bluetooth/hidp/Kconfig"
+config BT_LE
+ bool "Bluetooth Low Energy (LE) features"
+ depends on BT
+ default y
+
+config BT_6LOWPAN
+ tristate "Bluetooth 6LoWPAN support"
+ depends on BT_LE && 6LOWPAN
+ help
+ IPv6 compression over Bluetooth Low Energy.
+
source "drivers/bluetooth/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 886e9aa3ecf1..a5432a6a0ae6 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -13,6 +13,6 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
- a2mp.o amp.o
+ a2mp.o amp.o ecc.o
subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 5dcade511fdb..cedfbda15dad 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -60,8 +60,7 @@ void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
memset(&msg, 0, sizeof(msg));
- msg.msg_iov = (struct iovec *) &iv;
- msg.msg_iovlen = 1;
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, total_len);
l2cap_chan_send(chan, &msg, total_len);
@@ -720,7 +719,6 @@ static const struct l2cap_ops a2mp_chan_ops = {
.resume = l2cap_chan_no_resume,
.set_shutdown = l2cap_chan_no_set_shutdown,
.get_sndtimeo = l2cap_chan_no_get_sndtimeo,
- .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
};
static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 339c74ad4553..012e3b03589d 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -31,7 +31,7 @@
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
-#define VERSION "2.19"
+#define VERSION "2.20"
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
@@ -237,7 +237,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
}
skb_reset_transport_header(skb);
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err == 0) {
sock_recv_ts_and_drops(msg, sk, skb);
@@ -328,7 +328,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
}
chunk = min_t(unsigned int, skb->len, size);
- if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
+ if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 2640d78f30b8..ee016f039100 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -134,6 +134,7 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
{
struct crypto_shash *tfm;
+ struct shash_desc *shash;
int ret;
if (!ksize)
@@ -148,18 +149,24 @@ static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
ret = crypto_shash_setkey(tfm, key, ksize);
if (ret) {
BT_DBG("crypto_ahash_setkey failed: err %d", ret);
- } else {
- char desc[sizeof(struct shash_desc) +
- crypto_shash_descsize(tfm)] CRYPTO_MINALIGN_ATTR;
- struct shash_desc *shash = (struct shash_desc *)desc;
-
- shash->tfm = tfm;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ goto failed;
+ }
- ret = crypto_shash_digest(shash, plaintext, psize,
- output);
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto failed;
}
+ shash->tfm = tfm;
+ shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ ret = crypto_shash_digest(shash, plaintext, psize, output);
+
+ kfree(shash);
+
+failed:
crypto_free_shash(tfm);
return ret;
}
diff --git a/net/bluetooth/bnep/Kconfig b/net/bluetooth/bnep/Kconfig
index 71791fc9f6b1..9b70317c49dc 100644
--- a/net/bluetooth/bnep/Kconfig
+++ b/net/bluetooth/bnep/Kconfig
@@ -1,6 +1,6 @@
config BT_BNEP
tristate "BNEP protocol support"
- depends on BT
+ depends on BT_BREDR
select CRC32
help
BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet
diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig
index 94cbf42ce155..939da0fbdd88 100644
--- a/net/bluetooth/cmtp/Kconfig
+++ b/net/bluetooth/cmtp/Kconfig
@@ -1,6 +1,6 @@
config BT_CMTP
tristate "CMTP protocol support"
- depends on BT && ISDN_CAPI
+ depends on BT_BREDR && ISDN_CAPI
help
CMTP (CAPI Message Transport Protocol) is a transport layer
for CAPI messages. CMTP is required for the Bluetooth Common
diff --git a/net/bluetooth/ecc.c b/net/bluetooth/ecc.c
new file mode 100644
index 000000000000..e1709f8467ac
--- /dev/null
+++ b/net/bluetooth/ecc.c
@@ -0,0 +1,816 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/random.h>
+
+#include "ecc.h"
+
+/* 256-bit curve */
+#define ECC_BYTES 32
+
+#define MAX_TRIES 16
+
+/* Number of u64's needed */
+#define NUM_ECC_DIGITS (ECC_BYTES / 8)
+
+struct ecc_point {
+ u64 x[NUM_ECC_DIGITS];
+ u64 y[NUM_ECC_DIGITS];
+};
+
+typedef struct {
+ u64 m_low;
+ u64 m_high;
+} uint128_t;
+
+#define CURVE_P_32 { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull, \
+ 0x0000000000000000ull, 0xFFFFFFFF00000001ull }
+
+#define CURVE_G_32 { \
+ { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull, \
+ 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull }, \
+ { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull, \
+ 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull } \
+}
+
+#define CURVE_N_32 { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull, \
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull }
+
+static u64 curve_p[NUM_ECC_DIGITS] = CURVE_P_32;
+static struct ecc_point curve_g = CURVE_G_32;
+static u64 curve_n[NUM_ECC_DIGITS] = CURVE_N_32;
+
+static void vli_clear(u64 *vli)
+{
+ int i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++)
+ vli[i] = 0;
+}
+
+/* Returns true if vli == 0, false otherwise. */
+static bool vli_is_zero(const u64 *vli)
+{
+ int i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ if (vli[i])
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns nonzero if bit bit of vli is set. */
+static u64 vli_test_bit(const u64 *vli, unsigned int bit)
+{
+ return (vli[bit / 64] & ((u64) 1 << (bit % 64)));
+}
+
+/* Counts the number of 64-bit "digits" in vli. */
+static unsigned int vli_num_digits(const u64 *vli)
+{
+ int i;
+
+ /* Search from the end until we find a non-zero digit.
+ * We do it in reverse because we expect that most digits will
+ * be nonzero.
+ */
+ for (i = NUM_ECC_DIGITS - 1; i >= 0 && vli[i] == 0; i--);
+
+ return (i + 1);
+}
+
+/* Counts the number of bits required for vli. */
+static unsigned int vli_num_bits(const u64 *vli)
+{
+ unsigned int i, num_digits;
+ u64 digit;
+
+ num_digits = vli_num_digits(vli);
+ if (num_digits == 0)
+ return 0;
+
+ digit = vli[num_digits - 1];
+ for (i = 0; digit; i++)
+ digit >>= 1;
+
+ return ((num_digits - 1) * 64 + i);
+}
+
+/* Sets dest = src. */
+static void vli_set(u64 *dest, const u64 *src)
+{
+ int i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++)
+ dest[i] = src[i];
+}
+
+/* Returns sign of left - right. */
+static int vli_cmp(const u64 *left, const u64 *right)
+{
+ int i;
+
+ for (i = NUM_ECC_DIGITS - 1; i >= 0; i--) {
+ if (left[i] > right[i])
+ return 1;
+ else if (left[i] < right[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Computes result = in << c, returning carry. Can modify in place
+ * (if result == in). 0 < shift < 64.
+ */
+static u64 vli_lshift(u64 *result, const u64 *in,
+ unsigned int shift)
+{
+ u64 carry = 0;
+ int i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ u64 temp = in[i];
+
+ result[i] = (temp << shift) | carry;
+ carry = temp >> (64 - shift);
+ }
+
+ return carry;
+}
+
+/* Computes vli = vli >> 1. */
+static void vli_rshift1(u64 *vli)
+{
+ u64 *end = vli;
+ u64 carry = 0;
+
+ vli += NUM_ECC_DIGITS;
+
+ while (vli-- > end) {
+ u64 temp = *vli;
+ *vli = (temp >> 1) | carry;
+ carry = temp << 63;
+ }
+}
+
+/* Computes result = left + right, returning carry. Can modify in place. */
+static u64 vli_add(u64 *result, const u64 *left,
+ const u64 *right)
+{
+ u64 carry = 0;
+ int i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ u64 sum;
+
+ sum = left[i] + right[i] + carry;
+ if (sum != left[i])
+ carry = (sum < left[i]);
+
+ result[i] = sum;
+ }
+
+ return carry;
+}
+
+/* Computes result = left - right, returning borrow. Can modify in place. */
+static u64 vli_sub(u64 *result, const u64 *left, const u64 *right)
+{
+ u64 borrow = 0;
+ int i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ u64 diff;
+
+ diff = left[i] - right[i] - borrow;
+ if (diff != left[i])
+ borrow = (diff > left[i]);
+
+ result[i] = diff;
+ }
+
+ return borrow;
+}
+
+static uint128_t mul_64_64(u64 left, u64 right)
+{
+ u64 a0 = left & 0xffffffffull;
+ u64 a1 = left >> 32;
+ u64 b0 = right & 0xffffffffull;
+ u64 b1 = right >> 32;
+ u64 m0 = a0 * b0;
+ u64 m1 = a0 * b1;
+ u64 m2 = a1 * b0;
+ u64 m3 = a1 * b1;
+ uint128_t result;
+
+ m2 += (m0 >> 32);
+ m2 += m1;
+
+ /* Overflow */
+ if (m2 < m1)
+ m3 += 0x100000000ull;
+
+ result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
+ result.m_high = m3 + (m2 >> 32);
+
+ return result;
+}
+
+static uint128_t add_128_128(uint128_t a, uint128_t b)
+{
+ uint128_t result;
+
+ result.m_low = a.m_low + b.m_low;
+ result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low);
+
+ return result;
+}
+
+static void vli_mult(u64 *result, const u64 *left, const u64 *right)
+{
+ uint128_t r01 = { 0, 0 };
+ u64 r2 = 0;
+ unsigned int i, k;
+
+ /* Compute each digit of result in sequence, maintaining the
+ * carries.
+ */
+ for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; k++) {
+ unsigned int min;
+
+ if (k < NUM_ECC_DIGITS)
+ min = 0;
+ else
+ min = (k + 1) - NUM_ECC_DIGITS;
+
+ for (i = min; i <= k && i < NUM_ECC_DIGITS; i++) {
+ uint128_t product;
+
+ product = mul_64_64(left[i], right[k - i]);
+
+ r01 = add_128_128(r01, product);
+ r2 += (r01.m_high < product.m_high);
+ }
+
+ result[k] = r01.m_low;
+ r01.m_low = r01.m_high;
+ r01.m_high = r2;
+ r2 = 0;
+ }
+
+ result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low;
+}
+
+static void vli_square(u64 *result, const u64 *left)
+{
+ uint128_t r01 = { 0, 0 };
+ u64 r2 = 0;
+ int i, k;
+
+ for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; k++) {
+ unsigned int min;
+
+ if (k < NUM_ECC_DIGITS)
+ min = 0;
+ else
+ min = (k + 1) - NUM_ECC_DIGITS;
+
+ for (i = min; i <= k && i <= k - i; i++) {
+ uint128_t product;
+
+ product = mul_64_64(left[i], left[k - i]);
+
+ if (i < k - i) {
+ r2 += product.m_high >> 63;
+ product.m_high = (product.m_high << 1) |
+ (product.m_low >> 63);
+ product.m_low <<= 1;
+ }
+
+ r01 = add_128_128(r01, product);
+ r2 += (r01.m_high < product.m_high);
+ }
+
+ result[k] = r01.m_low;
+ r01.m_low = r01.m_high;
+ r01.m_high = r2;
+ r2 = 0;
+ }
+
+ result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low;
+}
+
+/* Computes result = (left + right) % mod.
+ * Assumes that left < mod and right < mod, result != mod.
+ */
+static void vli_mod_add(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod)
+{
+ u64 carry;
+
+ carry = vli_add(result, left, right);
+
+ /* result > mod (result = mod + remainder), so subtract mod to
+ * get remainder.
+ */
+ if (carry || vli_cmp(result, mod) >= 0)
+ vli_sub(result, result, mod);
+}
+
+/* Computes result = (left - right) % mod.
+ * Assumes that left < mod and right < mod, result != mod.
+ */
+static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod)
+{
+ u64 borrow = vli_sub(result, left, right);
+
+ /* In this case, p_result == -diff == (max int) - diff.
+ * Since -x % d == d - x, we can get the correct result from
+ * result + mod (with overflow).
+ */
+ if (borrow)
+ vli_add(result, result, mod);
+}
+
+/* Computes result = product % curve_p
+ from http://www.nsa.gov/ia/_files/nist-routines.pdf */
+static void vli_mmod_fast(u64 *result, const u64 *product)
+{
+ u64 tmp[NUM_ECC_DIGITS];
+ int carry;
+
+ /* t */
+ vli_set(result, product);
+
+ /* s1 */
+ tmp[0] = 0;
+ tmp[1] = product[5] & 0xffffffff00000000ull;
+ tmp[2] = product[6];
+ tmp[3] = product[7];
+ carry = vli_lshift(tmp, tmp, 1);
+ carry += vli_add(result, result, tmp);
+
+ /* s2 */
+ tmp[1] = product[6] << 32;
+ tmp[2] = (product[6] >> 32) | (product[7] << 32);
+ tmp[3] = product[7] >> 32;
+ carry += vli_lshift(tmp, tmp, 1);
+ carry += vli_add(result, result, tmp);
+
+ /* s3 */
+ tmp[0] = product[4];
+ tmp[1] = product[5] & 0xffffffff;
+ tmp[2] = 0;
+ tmp[3] = product[7];
+ carry += vli_add(result, result, tmp);
+
+ /* s4 */
+ tmp[0] = (product[4] >> 32) | (product[5] << 32);
+ tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull);
+ tmp[2] = product[7];
+ tmp[3] = (product[6] >> 32) | (product[4] << 32);
+ carry += vli_add(result, result, tmp);
+
+ /* d1 */
+ tmp[0] = (product[5] >> 32) | (product[6] << 32);
+ tmp[1] = (product[6] >> 32);
+ tmp[2] = 0;
+ tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32);
+ carry -= vli_sub(result, result, tmp);
+
+ /* d2 */
+ tmp[0] = product[6];
+ tmp[1] = product[7];
+ tmp[2] = 0;
+ tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull);
+ carry -= vli_sub(result, result, tmp);
+
+ /* d3 */
+ tmp[0] = (product[6] >> 32) | (product[7] << 32);
+ tmp[1] = (product[7] >> 32) | (product[4] << 32);
+ tmp[2] = (product[4] >> 32) | (product[5] << 32);
+ tmp[3] = (product[6] << 32);
+ carry -= vli_sub(result, result, tmp);
+
+ /* d4 */
+ tmp[0] = product[7];
+ tmp[1] = product[4] & 0xffffffff00000000ull;
+ tmp[2] = product[5];
+ tmp[3] = product[6] & 0xffffffff00000000ull;
+ carry -= vli_sub(result, result, tmp);
+
+ if (carry < 0) {
+ do {
+ carry += vli_add(result, result, curve_p);
+ } while (carry < 0);
+ } else {
+ while (carry || vli_cmp(curve_p, result) != 1)
+ carry -= vli_sub(result, result, curve_p);
+ }
+}
+
+/* Computes result = (left * right) % curve_p. */
+static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right)
+{
+ u64 product[2 * NUM_ECC_DIGITS];
+
+ vli_mult(product, left, right);
+ vli_mmod_fast(result, product);
+}
+
+/* Computes result = left^2 % curve_p. */
+static void vli_mod_square_fast(u64 *result, const u64 *left)
+{
+ u64 product[2 * NUM_ECC_DIGITS];
+
+ vli_square(product, left);
+ vli_mmod_fast(result, product);
+}
+
+#define EVEN(vli) (!(vli[0] & 1))
+/* Computes result = (1 / p_input) % mod. All VLIs are the same size.
+ * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
+ * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
+ */
+static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod)
+{
+ u64 a[NUM_ECC_DIGITS], b[NUM_ECC_DIGITS];
+ u64 u[NUM_ECC_DIGITS], v[NUM_ECC_DIGITS];
+ u64 carry;
+ int cmp_result;
+
+ if (vli_is_zero(input)) {
+ vli_clear(result);
+ return;
+ }
+
+ vli_set(a, input);
+ vli_set(b, mod);
+ vli_clear(u);
+ u[0] = 1;
+ vli_clear(v);
+
+ while ((cmp_result = vli_cmp(a, b)) != 0) {
+ carry = 0;
+
+ if (EVEN(a)) {
+ vli_rshift1(a);
+
+ if (!EVEN(u))
+ carry = vli_add(u, u, mod);
+
+ vli_rshift1(u);
+ if (carry)
+ u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
+ } else if (EVEN(b)) {
+ vli_rshift1(b);
+
+ if (!EVEN(v))
+ carry = vli_add(v, v, mod);
+
+ vli_rshift1(v);
+ if (carry)
+ v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
+ } else if (cmp_result > 0) {
+ vli_sub(a, a, b);
+ vli_rshift1(a);
+
+ if (vli_cmp(u, v) < 0)
+ vli_add(u, u, mod);
+
+ vli_sub(u, u, v);
+ if (!EVEN(u))
+ carry = vli_add(u, u, mod);
+
+ vli_rshift1(u);
+ if (carry)
+ u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
+ } else {
+ vli_sub(b, b, a);
+ vli_rshift1(b);
+
+ if (vli_cmp(v, u) < 0)
+ vli_add(v, v, mod);
+
+ vli_sub(v, v, u);
+ if (!EVEN(v))
+ carry = vli_add(v, v, mod);
+
+ vli_rshift1(v);
+ if (carry)
+ v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
+ }
+ }
+
+ vli_set(result, u);
+}
+
+/* ------ Point operations ------ */
+
+/* Returns true if p_point is the point at infinity, false otherwise. */
+static bool ecc_point_is_zero(const struct ecc_point *point)
+{
+ return (vli_is_zero(point->x) && vli_is_zero(point->y));
+}
+
+/* Point multiplication algorithm using Montgomery's ladder with co-Z
+ * coordinates. From http://eprint.iacr.org/2011/338.pdf
+ */
+
+/* Double in place */
+static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1)
+{
+ /* t1 = x, t2 = y, t3 = z */
+ u64 t4[NUM_ECC_DIGITS];
+ u64 t5[NUM_ECC_DIGITS];
+
+ if (vli_is_zero(z1))
+ return;
+
+ vli_mod_square_fast(t4, y1); /* t4 = y1^2 */
+ vli_mod_mult_fast(t5, x1, t4); /* t5 = x1*y1^2 = A */
+ vli_mod_square_fast(t4, t4); /* t4 = y1^4 */
+ vli_mod_mult_fast(y1, y1, z1); /* t2 = y1*z1 = z3 */
+ vli_mod_square_fast(z1, z1); /* t3 = z1^2 */
+
+ vli_mod_add(x1, x1, z1, curve_p); /* t1 = x1 + z1^2 */
+ vli_mod_add(z1, z1, z1, curve_p); /* t3 = 2*z1^2 */
+ vli_mod_sub(z1, x1, z1, curve_p); /* t3 = x1 - z1^2 */
+ vli_mod_mult_fast(x1, x1, z1); /* t1 = x1^2 - z1^4 */
+
+ vli_mod_add(z1, x1, x1, curve_p); /* t3 = 2*(x1^2 - z1^4) */
+ vli_mod_add(x1, x1, z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */
+ if (vli_test_bit(x1, 0)) {
+ u64 carry = vli_add(x1, x1, curve_p);
+ vli_rshift1(x1);
+ x1[NUM_ECC_DIGITS - 1] |= carry << 63;
+ } else {
+ vli_rshift1(x1);
+ }
+ /* t1 = 3/2*(x1^2 - z1^4) = B */
+
+ vli_mod_square_fast(z1, x1); /* t3 = B^2 */
+ vli_mod_sub(z1, z1, t5, curve_p); /* t3 = B^2 - A */
+ vli_mod_sub(z1, z1, t5, curve_p); /* t3 = B^2 - 2A = x3 */
+ vli_mod_sub(t5, t5, z1, curve_p); /* t5 = A - x3 */
+ vli_mod_mult_fast(x1, x1, t5); /* t1 = B * (A - x3) */
+ vli_mod_sub(t4, x1, t4, curve_p); /* t4 = B * (A - x3) - y1^4 = y3 */
+
+ vli_set(x1, z1);
+ vli_set(z1, y1);
+ vli_set(y1, t4);
+}
+
+/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
+static void apply_z(u64 *x1, u64 *y1, u64 *z)
+{
+ u64 t1[NUM_ECC_DIGITS];
+
+ vli_mod_square_fast(t1, z); /* z^2 */
+ vli_mod_mult_fast(x1, x1, t1); /* x1 * z^2 */
+ vli_mod_mult_fast(t1, t1, z); /* z^3 */
+ vli_mod_mult_fast(y1, y1, t1); /* y1 * z^3 */
+}
+
+/* P = (x1, y1) => 2P, (x2, y2) => P' */
+static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
+ u64 *p_initial_z)
+{
+ u64 z[NUM_ECC_DIGITS];
+
+ vli_set(x2, x1);
+ vli_set(y2, y1);
+
+ vli_clear(z);
+ z[0] = 1;
+
+ if (p_initial_z)
+ vli_set(z, p_initial_z);
+
+ apply_z(x1, y1, z);
+
+ ecc_point_double_jacobian(x1, y1, z);
+
+ apply_z(x2, y2, z);
+}
+
+/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
+ * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
+ * or P => P', Q => P + Q
+ */
+static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2)
+{
+ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
+ u64 t5[NUM_ECC_DIGITS];
+
+ vli_mod_sub(t5, x2, x1, curve_p); /* t5 = x2 - x1 */
+ vli_mod_square_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
+ vli_mod_mult_fast(x1, x1, t5); /* t1 = x1*A = B */
+ vli_mod_mult_fast(x2, x2, t5); /* t3 = x2*A = C */
+ vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y2 - y1 */
+ vli_mod_square_fast(t5, y2); /* t5 = (y2 - y1)^2 = D */
+
+ vli_mod_sub(t5, t5, x1, curve_p); /* t5 = D - B */
+ vli_mod_sub(t5, t5, x2, curve_p); /* t5 = D - B - C = x3 */
+ vli_mod_sub(x2, x2, x1, curve_p); /* t3 = C - B */
+ vli_mod_mult_fast(y1, y1, x2); /* t2 = y1*(C - B) */
+ vli_mod_sub(x2, x1, t5, curve_p); /* t3 = B - x3 */
+ vli_mod_mult_fast(y2, y2, x2); /* t4 = (y2 - y1)*(B - x3) */
+ vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y3 */
+
+ vli_set(x2, t5);
+}
+
+/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
+ * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
+ * or P => P - Q, Q => P + Q
+ */
+static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2)
+{
+ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
+ u64 t5[NUM_ECC_DIGITS];
+ u64 t6[NUM_ECC_DIGITS];
+ u64 t7[NUM_ECC_DIGITS];
+
+ vli_mod_sub(t5, x2, x1, curve_p); /* t5 = x2 - x1 */
+ vli_mod_square_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
+ vli_mod_mult_fast(x1, x1, t5); /* t1 = x1*A = B */
+ vli_mod_mult_fast(x2, x2, t5); /* t3 = x2*A = C */
+ vli_mod_add(t5, y2, y1, curve_p); /* t4 = y2 + y1 */
+ vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y2 - y1 */
+
+ vli_mod_sub(t6, x2, x1, curve_p); /* t6 = C - B */
+ vli_mod_mult_fast(y1, y1, t6); /* t2 = y1 * (C - B) */
+ vli_mod_add(t6, x1, x2, curve_p); /* t6 = B + C */
+ vli_mod_square_fast(x2, y2); /* t3 = (y2 - y1)^2 */
+ vli_mod_sub(x2, x2, t6, curve_p); /* t3 = x3 */
+
+ vli_mod_sub(t7, x1, x2, curve_p); /* t7 = B - x3 */
+ vli_mod_mult_fast(y2, y2, t7); /* t4 = (y2 - y1)*(B - x3) */
+ vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y3 */
+
+ vli_mod_square_fast(t7, t5); /* t7 = (y2 + y1)^2 = F */
+ vli_mod_sub(t7, t7, t6, curve_p); /* t7 = x3' */
+ vli_mod_sub(t6, t7, x1, curve_p); /* t6 = x3' - B */
+ vli_mod_mult_fast(t6, t6, t5); /* t6 = (y2 + y1)*(x3' - B) */
+ vli_mod_sub(y1, t6, y1, curve_p); /* t2 = y3' */
+
+ vli_set(x1, t7);
+}
+
+static void ecc_point_mult(struct ecc_point *result,
+ const struct ecc_point *point, u64 *scalar,
+ u64 *initial_z, int num_bits)
+{
+ /* R0 and R1 */
+ u64 rx[2][NUM_ECC_DIGITS];
+ u64 ry[2][NUM_ECC_DIGITS];
+ u64 z[NUM_ECC_DIGITS];
+ int i, nb;
+
+ vli_set(rx[1], point->x);
+ vli_set(ry[1], point->y);
+
+ xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z);
+
+ for (i = num_bits - 2; i > 0; i--) {
+ nb = !vli_test_bit(scalar, i);
+ xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb]);
+ xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb]);
+ }
+
+ nb = !vli_test_bit(scalar, 0);
+ xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb]);
+
+ /* Find final 1/Z value. */
+ vli_mod_sub(z, rx[1], rx[0], curve_p); /* X1 - X0 */
+ vli_mod_mult_fast(z, z, ry[1 - nb]); /* Yb * (X1 - X0) */
+ vli_mod_mult_fast(z, z, point->x); /* xP * Yb * (X1 - X0) */
+ vli_mod_inv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */
+ vli_mod_mult_fast(z, z, point->y); /* yP / (xP * Yb * (X1 - X0)) */
+ vli_mod_mult_fast(z, z, rx[1 - nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */
+ /* End 1/Z calculation */
+
+ xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb]);
+
+ apply_z(rx[0], ry[0], z);
+
+ vli_set(result->x, rx[0]);
+ vli_set(result->y, ry[0]);
+}
+
+static void ecc_bytes2native(const u8 bytes[ECC_BYTES],
+ u64 native[NUM_ECC_DIGITS])
+{
+ int i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ const u8 *digit = bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
+
+ native[NUM_ECC_DIGITS - 1 - i] =
+ ((u64) digit[0] << 0) |
+ ((u64) digit[1] << 8) |
+ ((u64) digit[2] << 16) |
+ ((u64) digit[3] << 24) |
+ ((u64) digit[4] << 32) |
+ ((u64) digit[5] << 40) |
+ ((u64) digit[6] << 48) |
+ ((u64) digit[7] << 56);
+ }
+}
+
+static void ecc_native2bytes(const u64 native[NUM_ECC_DIGITS],
+ u8 bytes[ECC_BYTES])
+{
+ int i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ u8 *digit = bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
+
+ digit[0] = native[NUM_ECC_DIGITS - 1 - i] >> 0;
+ digit[1] = native[NUM_ECC_DIGITS - 1 - i] >> 8;
+ digit[2] = native[NUM_ECC_DIGITS - 1 - i] >> 16;
+ digit[3] = native[NUM_ECC_DIGITS - 1 - i] >> 24;
+ digit[4] = native[NUM_ECC_DIGITS - 1 - i] >> 32;
+ digit[5] = native[NUM_ECC_DIGITS - 1 - i] >> 40;
+ digit[6] = native[NUM_ECC_DIGITS - 1 - i] >> 48;
+ digit[7] = native[NUM_ECC_DIGITS - 1 - i] >> 56;
+ }
+}
+
+bool ecc_make_key(u8 public_key[64], u8 private_key[32])
+{
+ struct ecc_point pk;
+ u64 priv[NUM_ECC_DIGITS];
+ unsigned int tries = 0;
+
+ do {
+ if (tries++ >= MAX_TRIES)
+ return false;
+
+ get_random_bytes(priv, ECC_BYTES);
+
+ if (vli_is_zero(priv))
+ continue;
+
+ /* Make sure the private key is in the range [1, n-1]. */
+ if (vli_cmp(curve_n, priv) != 1)
+ continue;
+
+ ecc_point_mult(&pk, &curve_g, priv, NULL, vli_num_bits(priv));
+ } while (ecc_point_is_zero(&pk));
+
+ ecc_native2bytes(priv, private_key);
+ ecc_native2bytes(pk.x, public_key);
+ ecc_native2bytes(pk.y, &public_key[32]);
+
+ return true;
+}
+
+bool ecdh_shared_secret(const u8 public_key[64], const u8 private_key[32],
+ u8 secret[32])
+{
+ u64 priv[NUM_ECC_DIGITS];
+ u64 rand[NUM_ECC_DIGITS];
+ struct ecc_point product, pk;
+
+ get_random_bytes(rand, ECC_BYTES);
+
+ ecc_bytes2native(public_key, pk.x);
+ ecc_bytes2native(&public_key[32], pk.y);
+ ecc_bytes2native(private_key, priv);
+
+ ecc_point_mult(&product, &pk, priv, rand, vli_num_bits(priv));
+
+ ecc_native2bytes(product.x, secret);
+
+ return !ecc_point_is_zero(&product);
+}
diff --git a/net/bluetooth/ecc.h b/net/bluetooth/ecc.h
new file mode 100644
index 000000000000..8d6a2f4d1905
--- /dev/null
+++ b/net/bluetooth/ecc.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Create a public/private key pair.
+ * Outputs:
+ * public_key - Will be filled in with the public key.
+ * private_key - Will be filled in with the private key.
+ *
+ * Returns true if the key pair was generated successfully, false
+ * if an error occurred. The keys are with the LSB first.
+ */
+bool ecc_make_key(u8 public_key[64], u8 private_key[32]);
+
+/* Compute a shared secret given your secret key and someone else's
+ * public key.
+ * Note: It is recommended that you hash the result of ecdh_shared_secret
+ * before using it for symmetric encryption or HMAC.
+ *
+ * Inputs:
+ * public_key - The public key of the remote party
+ * private_key - Your private key.
+ *
+ * Outputs:
+ * secret - Will be filled in with the shared secret value.
+ *
+ * Returns true if the shared secret was generated successfully, false
+ * if an error occurred. Both input and output parameters are with the
+ * LSB first.
+ */
+bool ecdh_shared_secret(const u8 public_key[64], const u8 private_key[32],
+ u8 secret[32]);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b9517bd17190..fe18825cc8a4 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -141,10 +141,11 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason)
*/
if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
struct hci_dev *hdev = conn->hdev;
- struct hci_cp_read_clock_offset cp;
+ struct hci_cp_read_clock_offset clkoff_cp;
- cp.handle = cpu_to_le16(conn->handle);
- hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(cp), &cp);
+ clkoff_cp.handle = cpu_to_le16(conn->handle);
+ hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
+ &clkoff_cp);
}
conn->state = BT_DISCONN;
@@ -415,7 +416,7 @@ static void le_conn_timeout(struct work_struct *work)
* happen with broken hardware or if low duty cycle was used
* (which doesn't have a timeout of its own).
*/
- if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+ if (conn->role == HCI_ROLE_SLAVE) {
u8 enable = 0x00;
hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
&enable);
@@ -448,6 +449,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
conn->io_capability = hdev->io_capability;
conn->remote_auth = 0xff;
conn->key_type = 0xff;
+ conn->rssi = HCI_RSSI_INVALID;
conn->tx_power = HCI_TX_POWER_INVALID;
conn->max_tx_power = HCI_TX_POWER_INVALID;
@@ -517,7 +519,7 @@ int hci_conn_del(struct hci_conn *conn)
/* Unacked frames */
hdev->acl_cnt += conn->sent;
} else if (conn->type == LE_LINK) {
- cancel_delayed_work_sync(&conn->le_conn_timeout);
+ cancel_delayed_work(&conn->le_conn_timeout);
if (hdev->le_pkts)
hdev->le_cnt += conn->sent;
@@ -544,6 +546,9 @@ int hci_conn_del(struct hci_conn *conn)
hci_conn_del_sysfs(conn);
+ if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
+ hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
+
hci_dev_put(hdev);
hci_conn_put(conn);
@@ -656,7 +661,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
memset(&cp, 0, sizeof(cp));
/* Update random address, but set require_privacy to false so
- * that we never connect with an unresolvable address.
+ * that we never connect with an non-resolvable address.
*/
if (hci_update_random_address(req, false, &own_addr_type))
return;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index cb05d7f16a34..5dcacf9607e4 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -200,31 +200,6 @@ static const struct file_operations blacklist_fops = {
.release = single_release,
};
-static int whitelist_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct bdaddr_list *b;
-
- hci_dev_lock(hdev);
- list_for_each_entry(b, &hdev->whitelist, list)
- seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static int whitelist_open(struct inode *inode, struct file *file)
-{
- return single_open(file, whitelist_show, inode->i_private);
-}
-
-static const struct file_operations whitelist_fops = {
- .open = whitelist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int uuids_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
@@ -299,15 +274,13 @@ static const struct file_operations inquiry_cache_fops = {
static int link_keys_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
- struct list_head *p, *n;
+ struct link_key *key;
- hci_dev_lock(hdev);
- list_for_each_safe(p, n, &hdev->link_keys) {
- struct link_key *key = list_entry(p, struct link_key, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(key, &hdev->link_keys, list)
seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
HCI_LINK_KEY_SIZE, key->val, key->pin_len);
- }
- hci_dev_unlock(hdev);
+ rcu_read_unlock();
return 0;
}
@@ -433,6 +406,49 @@ static const struct file_operations force_sc_support_fops = {
.llseek = default_llseek,
};
+static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_lesc_support_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+ bool enable;
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+ return -EALREADY;
+
+ change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
+
+ return count;
+}
+
+static const struct file_operations force_lesc_support_fops = {
+ .open = simple_open,
+ .read = force_lesc_support_read,
+ .write = force_lesc_support_write,
+ .llseek = default_llseek,
+};
+
static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -773,16 +789,15 @@ static const struct file_operations white_list_fops = {
static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
- struct list_head *p, *n;
+ struct smp_irk *irk;
- hci_dev_lock(hdev);
- list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
- struct smp_irk *irk = list_entry(p, struct smp_irk, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
&irk->bdaddr, irk->addr_type,
16, irk->val, &irk->rpa);
}
- hci_dev_unlock(hdev);
+ rcu_read_unlock();
return 0;
}
@@ -803,17 +818,15 @@ static const struct file_operations identity_resolving_keys_fops = {
static int long_term_keys_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
- struct list_head *p, *n;
+ struct smp_ltk *ltk;
- hci_dev_lock(hdev);
- list_for_each_safe(p, n, &hdev->long_term_keys) {
- struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
&ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
__le64_to_cpu(ltk->rand), 16, ltk->val);
- }
- hci_dev_unlock(hdev);
+ rcu_read_unlock();
return 0;
}
@@ -1030,10 +1043,13 @@ static int device_list_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
struct hci_conn_params *p;
+ struct bdaddr_list *b;
hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->whitelist, list)
+ seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
list_for_each_entry(p, &hdev->le_conn_params, list) {
- seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
+ seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
p->auto_connect);
}
hci_dev_unlock(hdev);
@@ -1147,13 +1163,16 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
hdev->req_status = HCI_REQ_PEND;
- err = hci_req_run(&req, hci_req_sync_complete);
- if (err < 0)
- return ERR_PTR(err);
-
add_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
+ err = hci_req_run(&req, hci_req_sync_complete);
+ if (err < 0) {
+ remove_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+ return ERR_PTR(err);
+ }
+
schedule_timeout(timeout);
remove_wait_queue(&hdev->req_wait_q, &wait);
@@ -1211,10 +1230,16 @@ static int __hci_req_sync(struct hci_dev *hdev,
func(&req, opt);
+ add_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
err = hci_req_run(&req, hci_req_sync_complete);
if (err < 0) {
hdev->req_status = 0;
+ remove_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+
/* ENODATA means the HCI request command queue is empty.
* This can happen when a request with conditionals doesn't
* trigger any commands to be sent. This is normal behavior
@@ -1226,9 +1251,6 @@ static int __hci_req_sync(struct hci_dev *hdev,
return err;
}
- add_wait_queue(&hdev->req_wait_q, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
schedule_timeout(timeout);
remove_wait_queue(&hdev->req_wait_q, &wait);
@@ -1351,8 +1373,6 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
static void bredr_setup(struct hci_request *req)
{
- struct hci_dev *hdev = req->hdev;
-
__le16 param;
__u8 flt_type;
@@ -1381,14 +1401,6 @@ static void bredr_setup(struct hci_request *req)
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
-
- /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
- * but it does not support page scan related HCI commands.
- */
- if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
- }
}
static void le_setup(struct hci_request *req)
@@ -1696,6 +1708,16 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[5] & 0x10)
hci_setup_link_policy(req);
+ if (hdev->commands[8] & 0x01)
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
+
+ /* Some older Broadcom based Bluetooth 1.2 controllers do not
+ * support the Read Page Scan Type command. Check support for
+ * this command in the bit mask of supported commands.
+ */
+ if (hdev->commands[13] & 0x01)
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
+
if (lmp_le_capable(hdev)) {
u8 events[8];
@@ -1713,6 +1735,28 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
* Parameter Request
*/
+ /* If the controller supports Extended Scanner Filter
+ * Policies, enable the correspondig event.
+ */
+ if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
+ events[1] |= 0x04; /* LE Direct Advertising
+ * Report
+ */
+
+ /* If the controller supports the LE Read Local P-256
+ * Public Key command, enable the corresponding event.
+ */
+ if (hdev->commands[34] & 0x02)
+ events[0] |= 0x80; /* LE Read Local P-256
+ * Public Key Complete
+ */
+
+ /* If the controller supports the LE Generate DHKey
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[34] & 0x04)
+ events[1] |= 0x01; /* LE Generate DHKey Complete */
+
hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
events);
@@ -1755,9 +1799,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
/* Enable Secure Connections if supported and configured */
- if ((lmp_sc_capable(hdev) ||
- test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
- test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
+ if (bredr_sc_enabled(hdev)) {
u8 support = 0x01;
hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
sizeof(support), &support);
@@ -1811,10 +1853,10 @@ static int __hci_init(struct hci_dev *hdev)
&hdev->manufacturer);
debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+ debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
+ &device_list_fops);
debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
&blacklist_fops);
- debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
- &whitelist_fops);
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
@@ -1840,6 +1882,10 @@ static int __hci_init(struct hci_dev *hdev)
hdev, &force_sc_support_fops);
debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
hdev, &sc_only_mode_fops);
+ if (lmp_le_capable(hdev))
+ debugfs_create_file("force_lesc_support", 0644,
+ hdev->debugfs, hdev,
+ &force_lesc_support_fops);
}
if (lmp_sniff_capable(hdev)) {
@@ -1893,8 +1939,6 @@ static int __hci_init(struct hci_dev *hdev)
hdev, &adv_min_interval_fops);
debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
hdev, &adv_max_interval_fops);
- debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
- &device_list_fops);
debugfs_create_u16("discov_interleaved_timeout", 0644,
hdev->debugfs,
&hdev->discov_interleaved_timeout);
@@ -2138,7 +2182,7 @@ u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
- hci_remove_remote_oob_data(hdev, &data->bdaddr);
+ hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
if (!data->ssp_mode)
flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
@@ -2584,7 +2628,18 @@ static int hci_dev_do_close(struct hci_dev *hdev)
if (test_bit(HCI_MGMT, &hdev->dev_flags))
cancel_delayed_work_sync(&hdev->rpa_expired);
+ /* Avoid potential lockdep warnings from the *_flush() calls by
+ * ensuring the workqueue is empty up front.
+ */
+ drain_workqueue(hdev->workqueue);
+
hci_dev_lock(hdev);
+
+ if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ if (hdev->dev_type == HCI_BREDR)
+ mgmt_powered(hdev, 0);
+ }
+
hci_inquiry_cache_flush(hdev);
hci_pend_le_actions_clear(hdev);
hci_conn_hash_flush(hdev);
@@ -2632,14 +2687,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hdev->flags &= BIT(HCI_RAW);
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
- if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
- if (hdev->dev_type == HCI_BREDR) {
- hci_dev_lock(hdev);
- mgmt_powered(hdev, 0);
- hci_dev_unlock(hdev);
- }
- }
-
/* Controller radio is available but is currently powered down */
hdev->amp_status = AMP_STATUS_POWERED_DOWN;
@@ -2707,6 +2754,11 @@ int hci_dev_reset(__u16 dev)
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
+ /* Avoid potential lockdep warnings from the *_flush() calls by
+ * ensuring the workqueue is empty up front.
+ */
+ drain_workqueue(hdev->workqueue);
+
hci_dev_lock(hdev);
hci_inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
@@ -3029,7 +3081,9 @@ static void hci_power_on(struct work_struct *work)
err = hci_dev_do_open(hdev);
if (err < 0) {
+ hci_dev_lock(hdev);
mgmt_set_powered_failed(hdev, err);
+ hci_dev_unlock(hdev);
return;
}
@@ -3112,35 +3166,31 @@ void hci_uuids_clear(struct hci_dev *hdev)
void hci_link_keys_clear(struct hci_dev *hdev)
{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &hdev->link_keys) {
- struct link_key *key;
-
- key = list_entry(p, struct link_key, list);
+ struct link_key *key;
- list_del(p);
- kfree(key);
+ list_for_each_entry_rcu(key, &hdev->link_keys, list) {
+ list_del_rcu(&key->list);
+ kfree_rcu(key, rcu);
}
}
void hci_smp_ltks_clear(struct hci_dev *hdev)
{
- struct smp_ltk *k, *tmp;
+ struct smp_ltk *k;
- list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
- list_del(&k->list);
- kfree(k);
+ list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
+ list_del_rcu(&k->list);
+ kfree_rcu(k, rcu);
}
}
void hci_smp_irks_clear(struct hci_dev *hdev)
{
- struct smp_irk *k, *tmp;
+ struct smp_irk *k;
- list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
- list_del(&k->list);
- kfree(k);
+ list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
+ list_del_rcu(&k->list);
+ kfree_rcu(k, rcu);
}
}
@@ -3148,9 +3198,14 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct link_key *k;
- list_for_each_entry(k, &hdev->link_keys, list)
- if (bacmp(bdaddr, &k->bdaddr) == 0)
+ rcu_read_lock();
+ list_for_each_entry_rcu(k, &hdev->link_keys, list) {
+ if (bacmp(bdaddr, &k->bdaddr) == 0) {
+ rcu_read_unlock();
return k;
+ }
+ }
+ rcu_read_unlock();
return NULL;
}
@@ -3174,6 +3229,10 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
if (!conn)
return true;
+ /* BR/EDR key derived using SC from an LE link */
+ if (conn->type == LE_LINK)
+ return true;
+
/* Neither local nor remote side had no-bonding as requirement */
if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
return true;
@@ -3199,34 +3258,22 @@ static u8 ltk_role(u8 type)
return HCI_ROLE_SLAVE;
}
-struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
- u8 role)
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 role)
{
struct smp_ltk *k;
- list_for_each_entry(k, &hdev->long_term_keys, list) {
- if (k->ediv != ediv || k->rand != rand)
- continue;
-
- if (ltk_role(k->type) != role)
+ rcu_read_lock();
+ list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
+ if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
continue;
- return k;
- }
-
- return NULL;
-}
-
-struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 addr_type, u8 role)
-{
- struct smp_ltk *k;
-
- list_for_each_entry(k, &hdev->long_term_keys, list)
- if (addr_type == k->bdaddr_type &&
- bacmp(bdaddr, &k->bdaddr) == 0 &&
- ltk_role(k->type) == role)
+ if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
+ rcu_read_unlock();
return k;
+ }
+ }
+ rcu_read_unlock();
return NULL;
}
@@ -3235,17 +3282,22 @@ struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
{
struct smp_irk *irk;
- list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
- if (!bacmp(&irk->rpa, rpa))
+ rcu_read_lock();
+ list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
+ if (!bacmp(&irk->rpa, rpa)) {
+ rcu_read_unlock();
return irk;
+ }
}
- list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
+ list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
if (smp_irk_matches(hdev, irk->val, rpa)) {
bacpy(&irk->rpa, rpa);
+ rcu_read_unlock();
return irk;
}
}
+ rcu_read_unlock();
return NULL;
}
@@ -3259,11 +3311,15 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
return NULL;
- list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
if (addr_type == irk->addr_type &&
- bacmp(bdaddr, &irk->bdaddr) == 0)
+ bacmp(bdaddr, &irk->bdaddr) == 0) {
+ rcu_read_unlock();
return irk;
+ }
}
+ rcu_read_unlock();
return NULL;
}
@@ -3284,7 +3340,7 @@ struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key)
return NULL;
- list_add(&key->list, &hdev->link_keys);
+ list_add_rcu(&key->list, &hdev->link_keys);
}
BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
@@ -3322,14 +3378,14 @@ struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
struct smp_ltk *key, *old_key;
u8 role = ltk_role(type);
- old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
+ old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
if (old_key)
key = old_key;
else {
key = kzalloc(sizeof(*key), GFP_KERNEL);
if (!key)
return NULL;
- list_add(&key->list, &hdev->long_term_keys);
+ list_add_rcu(&key->list, &hdev->long_term_keys);
}
bacpy(&key->bdaddr, bdaddr);
@@ -3358,7 +3414,7 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
bacpy(&irk->bdaddr, bdaddr);
irk->addr_type = addr_type;
- list_add(&irk->list, &hdev->identity_resolving_keys);
+ list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
}
memcpy(irk->val, val, 16);
@@ -3377,25 +3433,25 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
BT_DBG("%s removing %pMR", hdev->name, bdaddr);
- list_del(&key->list);
- kfree(key);
+ list_del_rcu(&key->list);
+ kfree_rcu(key, rcu);
return 0;
}
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
{
- struct smp_ltk *k, *tmp;
+ struct smp_ltk *k;
int removed = 0;
- list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
continue;
BT_DBG("%s removing %pMR", hdev->name, bdaddr);
- list_del(&k->list);
- kfree(k);
+ list_del_rcu(&k->list);
+ kfree_rcu(k, rcu);
removed++;
}
@@ -3404,16 +3460,16 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
{
- struct smp_irk *k, *tmp;
+ struct smp_irk *k;
- list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+ list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
continue;
BT_DBG("%s removing %pMR", hdev->name, bdaddr);
- list_del(&k->list);
- kfree(k);
+ list_del_rcu(&k->list);
+ kfree_rcu(k, rcu);
}
}
@@ -3437,26 +3493,31 @@ static void hci_cmd_timeout(struct work_struct *work)
}
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
- bdaddr_t *bdaddr)
+ bdaddr_t *bdaddr, u8 bdaddr_type)
{
struct oob_data *data;
- list_for_each_entry(data, &hdev->remote_oob_data, list)
- if (bacmp(bdaddr, &data->bdaddr) == 0)
- return data;
+ list_for_each_entry(data, &hdev->remote_oob_data, list) {
+ if (bacmp(bdaddr, &data->bdaddr) != 0)
+ continue;
+ if (data->bdaddr_type != bdaddr_type)
+ continue;
+ return data;
+ }
return NULL;
}
-int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type)
{
struct oob_data *data;
- data = hci_find_remote_oob_data(hdev, bdaddr);
+ data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
if (!data)
return -ENOENT;
- BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+ BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
list_del(&data->list);
kfree(data);
@@ -3475,52 +3536,37 @@ void hci_remote_oob_data_clear(struct hci_dev *hdev)
}
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 *hash, u8 *randomizer)
+ u8 bdaddr_type, u8 *hash192, u8 *rand192,
+ u8 *hash256, u8 *rand256)
{
struct oob_data *data;
- data = hci_find_remote_oob_data(hdev, bdaddr);
+ data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
if (!data) {
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
bacpy(&data->bdaddr, bdaddr);
+ data->bdaddr_type = bdaddr_type;
list_add(&data->list, &hdev->remote_oob_data);
}
- memcpy(data->hash192, hash, sizeof(data->hash192));
- memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
-
- memset(data->hash256, 0, sizeof(data->hash256));
- memset(data->randomizer256, 0, sizeof(data->randomizer256));
-
- BT_DBG("%s for %pMR", hdev->name, bdaddr);
-
- return 0;
-}
-
-int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 *hash192, u8 *randomizer192,
- u8 *hash256, u8 *randomizer256)
-{
- struct oob_data *data;
-
- data = hci_find_remote_oob_data(hdev, bdaddr);
- if (!data) {
- data = kmalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- bacpy(&data->bdaddr, bdaddr);
- list_add(&data->list, &hdev->remote_oob_data);
+ if (hash192 && rand192) {
+ memcpy(data->hash192, hash192, sizeof(data->hash192));
+ memcpy(data->rand192, rand192, sizeof(data->rand192));
+ } else {
+ memset(data->hash192, 0, sizeof(data->hash192));
+ memset(data->rand192, 0, sizeof(data->rand192));
}
- memcpy(data->hash192, hash192, sizeof(data->hash192));
- memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
-
- memcpy(data->hash256, hash256, sizeof(data->hash256));
- memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
+ if (hash256 && rand256) {
+ memcpy(data->hash256, hash256, sizeof(data->hash256));
+ memcpy(data->rand256, rand256, sizeof(data->rand256));
+ } else {
+ memset(data->hash256, 0, sizeof(data->hash256));
+ memset(data->rand256, 0, sizeof(data->rand256));
+ }
BT_DBG("%s for %pMR", hdev->name, bdaddr);
@@ -3913,17 +3959,29 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
}
/* In case of required privacy without resolvable private address,
- * use an unresolvable private address. This is useful for active
+ * use an non-resolvable private address. This is useful for active
* scanning and non-connectable advertising.
*/
if (require_privacy) {
- bdaddr_t urpa;
+ bdaddr_t nrpa;
- get_random_bytes(&urpa, 6);
- urpa.b[5] &= 0x3f; /* Clear two most significant bits */
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
+
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
*own_addr_type = ADDR_LE_DEV_RANDOM;
- set_random_addr(req, &urpa);
+ set_random_addr(req, &nrpa);
return 0;
}
@@ -4220,6 +4278,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_remote_oob_data_clear(hdev);
hci_bdaddr_list_clear(&hdev->le_white_list);
hci_conn_params_clear_all(hdev);
+ hci_discovery_filter_clear(hdev);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
@@ -4244,6 +4303,24 @@ int hci_resume_dev(struct hci_dev *hdev)
}
EXPORT_SYMBOL(hci_resume_dev);
+/* Reset HCI device */
+int hci_reset_dev(struct hci_dev *hdev)
+{
+ const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(3, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+ memcpy(skb_put(skb, 3), hw_err, 3);
+
+ /* Send Hardware Error to upper stack */
+ return hci_recv_frame(hdev, skb);
+}
+EXPORT_SYMBOL(hci_reset_dev);
+
/* Receive frame from HCI drivers */
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
@@ -4477,7 +4554,7 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
BT_DBG("length %u", skb_queue_len(&req->cmd_q));
- /* If an error occured during request building, remove all HCI
+ /* If an error occurred during request building, remove all HCI
* commands queued on the HCI request queue.
*/
if (req->err) {
@@ -4546,7 +4623,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
return -ENOMEM;
}
- /* Stand-alone HCI commands must be flaged as
+ /* Stand-alone HCI commands must be flagged as
* single-command requests.
*/
bt_cb(skb)->req.start = true;
@@ -4566,7 +4643,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
- /* If an error occured during request building, there is no point in
+ /* If an error occurred during request building, there is no point in
* queueing the HCI command. We can simply return.
*/
if (req->err)
@@ -4661,8 +4738,12 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
skb_shinfo(skb)->frag_list = NULL;
- /* Queue all fragments atomically */
- spin_lock(&queue->lock);
+ /* Queue all fragments atomically. We need to use spin_lock_bh
+ * here because of 6LoWPAN links, as there this function is
+ * called from softirq and using normal spin lock could cause
+ * deadlocks.
+ */
+ spin_lock_bh(&queue->lock);
__skb_queue_tail(queue, skb);
@@ -4679,7 +4760,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
__skb_queue_tail(queue, skb);
} while (list);
- spin_unlock(&queue->lock);
+ spin_unlock_bh(&queue->lock);
}
}
@@ -5556,7 +5637,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
u8 filter_policy;
/* Set require_privacy to false since no SCAN_REQ are send
- * during passive scanning. Not using an unresolvable address
+ * during passive scanning. Not using an non-resolvable address
* here is important so that peer devices using direct
* advertising with our address will be correctly reported
* by the controller.
@@ -5570,6 +5651,19 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
*/
filter_policy = update_white_list(req);
+ /* When the controller is using random resolvable addresses and
+ * with that having LE privacy enabled, then controllers with
+ * Extended Scanner Filter Policies support can now enable support
+ * for handling directed advertising.
+ *
+ * So instead of using filter polices 0x00 (no whitelist)
+ * and 0x01 (whitelist enabled) use the new filter policies
+ * 0x02 (no whitelist) and 0x03 (whitelist enabled).
+ */
+ if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
+ (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
+ filter_policy |= 0x02;
+
memset(&param_cp, 0, sizeof(param_cp));
param_cp.type = LE_SCAN_PASSIVE;
param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
@@ -5621,6 +5715,15 @@ void hci_update_background_scan(struct hci_dev *hdev)
if (hdev->discovery.state != DISCOVERY_STOPPED)
return;
+ /* Reset RSSI and UUID filters when starting background scanning
+ * since these filters are meant for service discovery only.
+ *
+ * The Start Discovery and Start Service Discovery operations
+ * ensure to set proper values for RSSI threshold and UUID
+ * filter list. So it is safe to just reset them here.
+ */
+ hci_discovery_filter_clear(hdev);
+
hci_req_init(&req, hdev);
if (list_empty(&hdev->pend_le_conns) &&
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8b0a2a6de419..39a5c8a01726 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -189,6 +189,9 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_RESET, &hdev->flags);
+ if (status)
+ return;
+
/* Reset all non-persistent flags */
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
@@ -205,6 +208,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hdev->le_scan_type = LE_SCAN_PASSIVE;
hdev->ssp_debug_mode = 0;
+
+ hci_bdaddr_list_clear(&hdev->le_white_list);
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -252,6 +257,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
__u8 param = *((__u8 *) sent);
@@ -263,6 +270,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_auth_enable_complete(hdev, status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -438,6 +447,8 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
if (sent->mode)
hdev->features[1][0] |= LMP_HOST_SSP;
@@ -453,6 +464,8 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
@@ -466,6 +479,8 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (!status) {
if (sent->support)
hdev->features[1][0] |= LMP_HOST_SC;
@@ -481,6 +496,8 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -989,8 +1006,8 @@ static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
- mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
- NULL, NULL, rp->status);
+ mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
+ rp->status);
hci_dev_unlock(hdev);
}
@@ -1002,8 +1019,8 @@ static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
- mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
- rp->hash256, rp->randomizer256,
+ mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
+ rp->hash256, rp->rand256,
rp->status);
hci_dev_unlock(hdev);
}
@@ -1045,7 +1062,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
- /* If we're doing connection initation as peripheral. Set a
+ /* If we're doing connection initiation as peripheral. Set a
* timeout in case something goes wrong.
*/
if (*sent) {
@@ -1130,6 +1147,8 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
if (!cp)
return;
+ hci_dev_lock(hdev);
+
switch (cp->enable) {
case LE_SCAN_ENABLE:
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
@@ -1179,6 +1198,8 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
break;
}
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
@@ -1273,6 +1294,8 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (sent->le) {
hdev->features[1][0] |= LMP_HOST_LE;
set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
@@ -1286,6 +1309,8 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
hdev->features[1][0] |= LMP_HOST_LE_BREDR;
else
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1576,9 +1601,15 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
struct discovery_state *discov = &hdev->discovery;
struct inquiry_entry *e;
- if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
- name_len, conn->dev_class);
+ /* Update the mgmt connected state if necessary. Be careful with
+ * conn objects that exist but are not (yet) connected however.
+ * Only those in BT_CONFIG or BT_CONNECTED states can be
+ * considered connected.
+ */
+ if (conn &&
+ (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
+ !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, conn, 0, name, name_len);
if (discov->state == DISCOVERY_STOPPED)
return;
@@ -1943,6 +1974,29 @@ unlock:
hci_dev_unlock(hdev);
}
+static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_switch_role *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (!status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn)
+ clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -2009,13 +2063,14 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
data.pscan_mode = info->pscan_mode;
memcpy(data.dev_class, info->dev_class, 3);
data.clock_offset = info->clock_offset;
- data.rssi = 0x00;
+ data.rssi = HCI_RSSI_INVALID;
data.ssp_mode = 0x00;
flags = hci_inquiry_cache_update(hdev, &data, false);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
- info->dev_class, 0, flags, NULL, 0, NULL, 0);
+ info->dev_class, HCI_RSSI_INVALID,
+ flags, NULL, 0, NULL, 0);
}
hci_dev_unlock(hdev);
@@ -2536,9 +2591,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, &conn->dst, conn->type,
- conn->dst_type, 0, NULL, 0,
- conn->dev_class);
+ mgmt_device_connected(hdev, conn, 0, NULL, 0);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -2848,6 +2901,10 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cs_create_conn(hdev, ev->status);
break;
+ case HCI_OP_DISCONNECT:
+ hci_cs_disconnect(hdev, ev->status);
+ break;
+
case HCI_OP_ADD_SCO:
hci_cs_add_sco(hdev, ev->status);
break;
@@ -2876,24 +2933,24 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cs_setup_sync_conn(hdev, ev->status);
break;
- case HCI_OP_SNIFF_MODE:
- hci_cs_sniff_mode(hdev, ev->status);
+ case HCI_OP_CREATE_PHY_LINK:
+ hci_cs_create_phylink(hdev, ev->status);
break;
- case HCI_OP_EXIT_SNIFF_MODE:
- hci_cs_exit_sniff_mode(hdev, ev->status);
+ case HCI_OP_ACCEPT_PHY_LINK:
+ hci_cs_accept_phylink(hdev, ev->status);
break;
- case HCI_OP_DISCONNECT:
- hci_cs_disconnect(hdev, ev->status);
+ case HCI_OP_SNIFF_MODE:
+ hci_cs_sniff_mode(hdev, ev->status);
break;
- case HCI_OP_CREATE_PHY_LINK:
- hci_cs_create_phylink(hdev, ev->status);
+ case HCI_OP_EXIT_SNIFF_MODE:
+ hci_cs_exit_sniff_mode(hdev, ev->status);
break;
- case HCI_OP_ACCEPT_PHY_LINK:
- hci_cs_accept_phylink(hdev, ev->status);
+ case HCI_OP_SWITCH_ROLE:
+ hci_cs_switch_role(hdev, ev->status);
break;
case HCI_OP_LE_CREATE_CONN:
@@ -2923,6 +2980,13 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
}
+static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_hardware_error *ev = (void *) skb->data;
+
+ BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
+}
+
static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_role_change *ev = (void *) skb->data;
@@ -3148,6 +3212,38 @@ unlock:
hci_dev_unlock(hdev);
}
+static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
+{
+ if (key_type == HCI_LK_CHANGED_COMBINATION)
+ return;
+
+ conn->pin_length = pin_len;
+ conn->key_type = key_type;
+
+ switch (key_type) {
+ case HCI_LK_LOCAL_UNIT:
+ case HCI_LK_REMOTE_UNIT:
+ case HCI_LK_DEBUG_COMBINATION:
+ return;
+ case HCI_LK_COMBINATION:
+ if (pin_len == 16)
+ conn->pending_sec_level = BT_SECURITY_HIGH;
+ else
+ conn->pending_sec_level = BT_SECURITY_MEDIUM;
+ break;
+ case HCI_LK_UNAUTH_COMBINATION_P192:
+ case HCI_LK_UNAUTH_COMBINATION_P256:
+ conn->pending_sec_level = BT_SECURITY_MEDIUM;
+ break;
+ case HCI_LK_AUTH_COMBINATION_P192:
+ conn->pending_sec_level = BT_SECURITY_HIGH;
+ break;
+ case HCI_LK_AUTH_COMBINATION_P256:
+ conn->pending_sec_level = BT_SECURITY_FIPS;
+ break;
+ }
+}
+
static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_req *ev = (void *) skb->data;
@@ -3174,6 +3270,8 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn) {
+ clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
+
if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
@@ -3189,8 +3287,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
goto not_found;
}
- conn->key_type = key->type;
- conn->pin_length = key->pin_len;
+ conn_set_key(conn, key->type, key->pin_len);
}
bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -3220,16 +3317,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn) {
- hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- pin_len = conn->pin_length;
+ if (!conn)
+ goto unlock;
- if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
- conn->key_type = ev->key_type;
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ hci_conn_drop(conn);
- hci_conn_drop(conn);
- }
+ set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
+ conn_set_key(conn, ev->key_type, conn->pin_length);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
goto unlock;
@@ -3239,6 +3335,12 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (!key)
goto unlock;
+ /* Update connection information since adding the key will have
+ * fixed up the type in the case of changed combination keys.
+ */
+ if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
+ conn_set_key(conn, key->type, key->pin_len);
+
mgmt_new_link_key(hdev, key, persistent);
/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
@@ -3248,15 +3350,16 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
*/
if (key->type == HCI_LK_DEBUG_COMBINATION &&
!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
- list_del(&key->list);
- kfree(key);
- } else if (conn) {
- if (persistent)
- clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
- else
- set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
+ list_del_rcu(&key->list);
+ kfree_rcu(key, rcu);
+ goto unlock;
}
+ if (persistent)
+ clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
+ else
+ set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
+
unlock:
hci_dev_unlock(hdev);
}
@@ -3434,9 +3537,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, &conn->dst, conn->type,
- conn->dst_type, 0, NULL, 0,
- conn->dev_class);
+ mgmt_device_connected(hdev, conn, 0, NULL, 0);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -3693,7 +3794,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
cp.authentication = conn->auth_type;
- if (hci_find_remote_oob_data(hdev, &conn->dst) &&
+ if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
(conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
cp.oob_data = 0x01;
else
@@ -3948,18 +4049,16 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
goto unlock;
- data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
+ data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
if (data) {
- if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
+ if (bredr_sc_enabled(hdev)) {
struct hci_cp_remote_oob_ext_data_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
- memcpy(cp.randomizer192, data->randomizer192,
- sizeof(cp.randomizer192));
+ memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
- memcpy(cp.randomizer256, data->randomizer256,
- sizeof(cp.randomizer256));
+ memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
sizeof(cp), &cp);
@@ -3968,8 +4067,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
bacpy(&cp.bdaddr, &ev->bdaddr);
memcpy(cp.hash, data->hash192, sizeof(cp.hash));
- memcpy(cp.randomizer, data->randomizer192,
- sizeof(cp.randomizer));
+ memcpy(cp.rand, data->rand192, sizeof(cp.rand));
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
sizeof(cp), &cp);
@@ -4214,8 +4312,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, &conn->dst, conn->type,
- conn->dst_type, 0, NULL, 0, NULL);
+ mgmt_device_connected(hdev, conn, 0, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
@@ -4269,25 +4366,26 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
}
/* This function requires the caller holds hdev->lock */
-static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
- u8 addr_type, u8 adv_type)
+static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
+ bdaddr_t *addr,
+ u8 addr_type, u8 adv_type)
{
struct hci_conn *conn;
struct hci_conn_params *params;
/* If the event is not connectable don't proceed further */
if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
- return;
+ return NULL;
/* Ignore if the device is blocked */
if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
- return;
+ return NULL;
/* Most controller will fail if we try to create new connections
* while we have an existing one in slave role.
*/
if (hdev->conn_hash.le_num_slave > 0)
- return;
+ return NULL;
/* If we're not connectable only connect devices that we have in
* our pend_le_conns list.
@@ -4295,7 +4393,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
addr, addr_type);
if (!params)
- return;
+ return NULL;
switch (params->auto_connect) {
case HCI_AUTO_CONN_DIRECT:
@@ -4304,7 +4402,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
* incoming connections from slave devices.
*/
if (adv_type != LE_ADV_DIRECT_IND)
- return;
+ return NULL;
break;
case HCI_AUTO_CONN_ALWAYS:
/* Devices advertising with ADV_IND or ADV_DIRECT_IND
@@ -4315,7 +4413,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
*/
break;
default:
- return;
+ return NULL;
}
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
@@ -4328,7 +4426,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
* count consistent once the connection is established.
*/
params->conn = hci_conn_get(conn);
- return;
+ return conn;
}
switch (PTR_ERR(conn)) {
@@ -4341,17 +4439,48 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
break;
default:
BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
+ return NULL;
}
+
+ return NULL;
}
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
- u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
+ u8 bdaddr_type, bdaddr_t *direct_addr,
+ u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
{
struct discovery_state *d = &hdev->discovery;
struct smp_irk *irk;
+ struct hci_conn *conn;
bool match;
u32 flags;
+ /* If the direct address is present, then this report is from
+ * a LE Direct Advertising Report event. In that case it is
+ * important to see if the address is matching the local
+ * controller address.
+ */
+ if (direct_addr) {
+ /* Only resolvable random addresses are valid for these
+ * kind of reports and others can be ignored.
+ */
+ if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
+ return;
+
+ /* If the controller is not using resolvable random
+ * addresses, then this report can be ignored.
+ */
+ if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
+ return;
+
+ /* If the local IRK of the controller does not match
+ * with the resolvable random address provided, then
+ * this report can be ignored.
+ */
+ if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
+ return;
+ }
+
/* Check if we need to convert to identity address */
irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
if (irk) {
@@ -4360,7 +4489,14 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
}
/* Check if we have been requested to connect to this device */
- check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
+ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
+ if (conn && type == LE_ADV_IND) {
+ /* Store report for later inclusion by
+ * mgmt_device_connected
+ */
+ memcpy(conn->le_adv_data, data, len);
+ conn->le_adv_data_len = len;
+ }
/* Passive scanning shouldn't trigger any device found events,
* except for devices marked as CONN_REPORT for which we do send
@@ -4481,7 +4617,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
rssi = ev->data[ev->length];
process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
- ev->bdaddr_type, rssi, ev->data, ev->length);
+ ev->bdaddr_type, NULL, 0, rssi,
+ ev->data, ev->length);
ptr += sizeof(*ev) + ev->length + 1;
}
@@ -4505,10 +4642,20 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (conn == NULL)
goto not_found;
- ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
- if (ltk == NULL)
+ ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
+ if (!ltk)
goto not_found;
+ if (smp_ltk_is_sc(ltk)) {
+ /* With SC both EDiv and Rand are set to zero */
+ if (ev->ediv || ev->rand)
+ goto not_found;
+ } else {
+ /* For non-SC keys check that EDiv and Rand match */
+ if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
+ goto not_found;
+ }
+
memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
cp.handle = cpu_to_le16(conn->handle);
@@ -4526,8 +4673,8 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
*/
if (ltk->type == SMP_STK) {
set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
- list_del(&ltk->list);
- kfree(ltk);
+ list_del_rcu(&ltk->list);
+ kfree_rcu(ltk, rcu);
} else {
clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
}
@@ -4612,6 +4759,27 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
}
+static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ u8 num_reports = skb->data[0];
+ void *ptr = &skb->data[1];
+
+ hci_dev_lock(hdev);
+
+ while (num_reports--) {
+ struct hci_ev_le_direct_adv_info *ev = ptr;
+
+ process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
+ ev->bdaddr_type, &ev->direct_addr,
+ ev->direct_addr_type, ev->rssi, NULL, 0);
+
+ ptr += sizeof(*ev);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -4639,6 +4807,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_le_remote_conn_param_req_evt(hdev, skb);
break;
+ case HCI_EV_LE_DIRECT_ADV_REPORT:
+ hci_le_direct_adv_report_evt(hdev, skb);
+ break;
+
default:
break;
}
@@ -4735,6 +4907,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_cmd_status_evt(hdev, skb);
break;
+ case HCI_EV_HARDWARE_ERROR:
+ hci_hardware_error_evt(hdev, skb);
+ break;
+
case HCI_EV_ROLE_CHANGE:
hci_role_change_evt(hdev, skb);
break;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 115f149362ba..2c245fdf319a 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -878,7 +878,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
}
skb_reset_transport_header(skb);
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
switch (hci_pi(sk)->channel) {
case HCI_CHANNEL_RAW:
@@ -947,7 +947,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
goto done;
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
err = -EFAULT;
goto drop;
}
@@ -987,7 +987,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
skb_queue_tail(&hdev->raw_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
} else {
- /* Stand-alone HCI commands must be flaged as
+ /* Stand-alone HCI commands must be flagged as
* single-command requests.
*/
bt_cb(skb)->req.start = true;
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 9332bc7aa851..bc8610b24077 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
config BT_HIDP
tristate "HIDP protocol support"
- depends on BT && INPUT
+ depends on BT_BREDR && INPUT
select HID
help
HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 1b7d605706aa..cc25d0b74b36 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -736,14 +736,10 @@ static int hidp_setup_hid(struct hidp_session *session,
struct hid_device *hid;
int err;
- session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
- if (!session->rd_data)
- return -ENOMEM;
+ session->rd_data = memdup_user(req->rd_data, req->rd_size);
+ if (IS_ERR(session->rd_data))
+ return PTR_ERR(session->rd_data);
- if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
- err = -EFAULT;
- goto fault;
- }
session->rd_size = req->rd_size;
hid = hid_allocate_device();
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b6f9777e057d..d04dc0095736 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -46,7 +46,6 @@
bool disable_ertm;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
-static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
static LIST_HEAD(chan_list);
static DEFINE_RWLOCK(chan_list_lock);
@@ -424,6 +423,9 @@ struct l2cap_chan *l2cap_chan_create(void)
mutex_init(&chan->lock);
+ /* Set default lock nesting level */
+ atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
+
write_lock(&chan_list_lock);
list_add(&chan->global_l, &chan_list);
write_unlock(&chan_list_lock);
@@ -567,7 +569,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
__clear_chan_timer(chan);
- BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
+ BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
+ state_to_string(chan->state));
chan->ops->teardown(chan, err);
@@ -836,7 +839,10 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
if (!skb)
return;
- if (lmp_no_flush_capable(conn->hcon->hdev))
+ /* Use NO_FLUSH if supported or we have an LE link (which does
+ * not support auto-flushing packets) */
+ if (lmp_no_flush_capable(conn->hcon->hdev) ||
+ conn->hcon->type == LE_LINK)
flags = ACL_START_NO_FLUSH;
else
flags = ACL_START;
@@ -870,8 +876,13 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
return;
}
- if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
- lmp_no_flush_capable(hcon->hdev))
+ /* Use NO_FLUSH for LE links (where this is the only option) or
+ * if the BR/EDR link supports it and flushing has not been
+ * explicitly requested (through FLAG_FLUSHABLE).
+ */
+ if (hcon->type == LE_LINK ||
+ (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
+ lmp_no_flush_capable(hcon->hdev)))
flags = ACL_START_NO_FLUSH;
else
flags = ACL_START;
@@ -1108,10 +1119,10 @@ static bool __amp_capable(struct l2cap_chan *chan)
struct hci_dev *hdev;
bool amp_available = false;
- if (!conn->hs_enabled)
+ if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
return false;
- if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
+ if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
return false;
read_lock(&hci_dev_list_lock);
@@ -2092,8 +2103,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
struct sk_buff **frag;
int sent = 0;
- if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
- msg->msg_iov, count))
+ if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
return -EFAULT;
sent += count;
@@ -2113,8 +2123,8 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
*frag = tmp;
- if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
- msg->msg_iov, count))
+ if (copy_from_iter(skb_put(*frag, count), count,
+ &msg->msg_iter) != count)
return -EFAULT;
sent += count;
@@ -3084,12 +3094,14 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
{
- return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+ return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
+ (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
}
static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
{
- return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+ return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
+ (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
}
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
@@ -3318,7 +3330,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
break;
case L2CAP_CONF_EWS:
- if (!chan->conn->hs_enabled)
+ if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
return -ECONNREFUSED;
set_bit(FLAG_EXT_CTRL, &chan->flags);
@@ -3873,9 +3885,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
- mgmt_device_connected(hdev, &hcon->dst, hcon->type,
- hcon->dst_type, 0, NULL, 0,
- hcon->dev_class);
+ mgmt_device_connected(hdev, hcon, 0, NULL, 0);
hci_dev_unlock(hdev);
l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
@@ -4084,7 +4094,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
chan->num_conf_req++;
}
- /* Got Conf Rsp PENDING from remote side and asume we sent
+ /* Got Conf Rsp PENDING from remote side and assume we sent
Conf Rsp PENDING in the code above */
if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
@@ -4324,7 +4334,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
- if (conn->hs_enabled)
+ if (conn->local_fixed_chan & L2CAP_FC_A2MP)
feat_mask |= L2CAP_FEAT_EXT_FLOW
| L2CAP_FEAT_EXT_WINDOW;
@@ -4335,14 +4345,10 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
u8 buf[12];
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
- if (conn->hs_enabled)
- l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
- else
- l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
-
rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
+ rsp->data[0] = conn->local_fixed_chan;
+ memset(rsp->data + 1, 0, 7);
l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
buf);
} else {
@@ -4408,7 +4414,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
break;
case L2CAP_IT_FIXED_CHAN:
- conn->fixed_chan_mask = rsp->data[0];
+ conn->remote_fixed_chan = rsp->data[0];
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -4432,7 +4438,7 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
if (cmd_len != sizeof(*req))
return -EPROTO;
- if (!conn->hs_enabled)
+ if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
return -EINVAL;
psm = le16_to_cpu(req->psm);
@@ -4862,7 +4868,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
- if (!conn->hs_enabled)
+ if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
return -EINVAL;
chan = l2cap_get_chan_by_dcid(conn, icid);
@@ -5217,9 +5223,10 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
u8 *data)
{
struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
+ struct hci_conn *hcon = conn->hcon;
u16 dcid, mtu, mps, credits, result;
struct l2cap_chan *chan;
- int err;
+ int err, sec_level;
if (cmd_len < sizeof(*rsp))
return -EPROTO;
@@ -5258,6 +5265,26 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
l2cap_chan_ready(chan);
break;
+ case L2CAP_CR_AUTHENTICATION:
+ case L2CAP_CR_ENCRYPTION:
+ /* If we already have MITM protection we can't do
+ * anything.
+ */
+ if (hcon->sec_level > BT_SECURITY_MEDIUM) {
+ l2cap_chan_del(chan, ECONNREFUSED);
+ break;
+ }
+
+ sec_level = hcon->sec_level + 1;
+ if (chan->sec_level < sec_level)
+ chan->sec_level = sec_level;
+
+ /* We'll need to send a new Connect Request */
+ clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
+
+ smp_conn_security(hcon, chan->sec_level);
+ break;
+
default:
l2cap_chan_del(chan, ECONNREFUSED);
break;
@@ -5390,7 +5417,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(pchan);
- if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
+ if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
+ SMP_ALLOW_STK)) {
result = L2CAP_CR_AUTHENTICATION;
chan = NULL;
goto response_unlock;
@@ -5494,6 +5522,7 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
if (credits > max_credits) {
BT_ERR("LE credits overflow");
l2cap_send_disconn_req(chan, ECONNRESET);
+ l2cap_chan_unlock(chan);
/* Return 0 so that we don't trigger an unnecessary
* command reject packet.
@@ -6931,9 +6960,16 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->feat_mask = 0;
- if (hcon->type == ACL_LINK)
- conn->hs_enabled = test_bit(HCI_HS_ENABLED,
- &hcon->hdev->dev_flags);
+ conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
+
+ if (hcon->type == ACL_LINK &&
+ test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
+ conn->local_fixed_chan |= L2CAP_FC_A2MP;
+
+ if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
+ (bredr_sc_enabled(hcon->hdev) ||
+ test_bit(HCI_FORCE_LESC, &hcon->hdev->dbg_flags)))
+ conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);
mutex_init(&conn->chan_lock);
@@ -7330,7 +7366,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
l2cap_start_connection(chan);
else
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- } else if (chan->state == BT_CONNECT2) {
+ } else if (chan->state == BT_CONNECT2 &&
+ chan->mode != L2CAP_MODE_LE_FLOWCTL) {
struct l2cap_conn_rsp rsp;
__u16 res, stat;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 31f106e61ca2..f65caf41953f 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -285,6 +285,12 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
+ /* Listening channels need to use nested locking in order not to
+ * cause lockdep warnings when the created child channels end up
+ * being locked in the same thread as the parent channel.
+ */
+ atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
+
chan->state = BT_LISTEN;
sk->sk_state = BT_LISTEN;
@@ -301,7 +307,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
long timeo;
int err = 0;
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ lock_sock_nested(sk, L2CAP_NESTING_PARENT);
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
@@ -333,7 +339,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
release_sock(sk);
timeo = schedule_timeout(timeo);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ lock_sock_nested(sk, L2CAP_NESTING_PARENT);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
@@ -1096,6 +1102,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
chan = l2cap_pi(sk)->chan;
conn = chan->conn;
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+
if (conn)
mutex_lock(&conn->chan_lock);
@@ -1153,12 +1161,16 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
- BT_DBG("parent %p", parent);
+ BT_DBG("parent %p state %s", parent,
+ state_to_string(parent->sk_state));
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ BT_DBG("child chan %p state %s", chan,
+ state_to_string(chan->state));
+
l2cap_chan_lock(chan);
__clear_chan_timer(chan);
l2cap_chan_close(chan, ECONNRESET);
@@ -1246,7 +1258,16 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
struct sock *sk = chan->data;
struct sock *parent;
- lock_sock(sk);
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+
+ /* This callback can be called both for server (BT_LISTEN)
+ * sockets as well as "normal" ones. To avoid lockdep warnings
+ * with child socket locking (through l2cap_sock_cleanup_listen)
+ * we need separation into separate nesting levels. The simplest
+ * way to accomplish this is to inherit the nesting level used
+ * for the channel.
+ */
+ lock_sock_nested(sk, atomic_read(&chan->nesting));
parent = bt_sk(sk)->parent;
@@ -1315,13 +1336,6 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
return skb;
}
-static int l2cap_sock_memcpy_fromiovec_cb(struct l2cap_chan *chan,
- unsigned char *kdata,
- struct iovec *iov, int len)
-{
- return memcpy_fromiovec(kdata, iov, len);
-}
-
static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
@@ -1406,7 +1420,6 @@ static const struct l2cap_ops l2cap_chan_ops = {
.set_shutdown = l2cap_sock_set_shutdown_cb,
.get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
- .memcpy_fromiovec = l2cap_sock_memcpy_fromiovec_cb,
};
static void l2cap_sock_destruct(struct sock *sk)
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index efb71b022ab6..693ce8bcd06e 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,7 +35,7 @@
#include "smp.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 7
+#define MGMT_REVISION 8
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -93,6 +93,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_READ_CONFIG_INFO,
MGMT_OP_SET_EXTERNAL_CONFIG,
MGMT_OP_SET_PUBLIC_ADDRESS,
+ MGMT_OP_START_SERVICE_DISCOVERY,
};
static const u16 mgmt_events[] = {
@@ -134,8 +135,10 @@ struct pending_cmd {
u16 opcode;
int index;
void *param;
+ size_t param_len;
struct sock *sk;
void *user_data;
+ void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
};
/* HCI to MGMT error code conversion table */
@@ -574,6 +577,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_le_capable(hdev)) {
settings |= MGMT_SETTING_LE;
settings |= MGMT_SETTING_ADVERTISING;
+ settings |= MGMT_SETTING_SECURE_CONN;
settings |= MGMT_SETTING_PRIVACY;
}
@@ -1202,14 +1206,13 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
cmd->opcode = opcode;
cmd->index = hdev->id;
- cmd->param = kmalloc(len, GFP_KERNEL);
+ cmd->param = kmemdup(data, len, GFP_KERNEL);
if (!cmd->param) {
kfree(cmd);
return NULL;
}
- if (data)
- memcpy(cmd->param, data, len);
+ cmd->param_len = len;
cmd->sk = sk;
sock_hold(sk);
@@ -1469,6 +1472,32 @@ static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_remove(cmd);
}
+static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
+{
+ if (cmd->cmd_complete) {
+ u8 *status = data;
+
+ cmd->cmd_complete(cmd, *status);
+ mgmt_pending_remove(cmd);
+
+ return;
+ }
+
+ cmd_status_rsp(cmd, data);
+}
+
+static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
+{
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
+ cmd->param_len);
+}
+
+static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
+{
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
+ sizeof(struct mgmt_addr_info));
+}
+
static u8 mgmt_bredr_support(struct hci_dev *hdev)
{
if (!lmp_bredr_capable(hdev))
@@ -2170,12 +2199,14 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
+ hci_dev_lock(hdev);
+
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
&mgmt_err);
- return;
+ goto unlock;
}
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
@@ -2193,17 +2224,16 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
struct hci_request req;
- hci_dev_lock(hdev);
-
hci_req_init(&req, hdev);
update_adv_data(&req);
update_scan_rsp_data(&req);
hci_req_run(&req, NULL);
hci_update_background_scan(hdev);
-
- hci_dev_unlock(hdev);
}
+
+unlock:
+ hci_dev_unlock(hdev);
}
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@@ -2725,10 +2755,40 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->addr.type == BDADDR_BREDR) {
+ /* If disconnection is requested, then look up the
+ * connection. If the remote device is connected, it
+ * will be later used to terminate the link.
+ *
+ * Setting it to NULL explicitly will cause no
+ * termination of the link.
+ */
+ if (cp->disconnect)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = NULL;
+
err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
} else {
u8 addr_type;
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
+ &cp->addr.bdaddr);
+ if (conn) {
+ /* Defer clearing up the connection parameters
+ * until closing to give a chance of keeping
+ * them if a repairing happens.
+ */
+ set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
+
+ /* If disconnection is not requested, then
+ * clear the connection variable so that the
+ * link is not terminated.
+ */
+ if (!cp->disconnect)
+ conn = NULL;
+ }
+
if (cp->addr.type == BDADDR_LE_PUBLIC)
addr_type = ADDR_LE_DEV_PUBLIC;
else
@@ -2736,8 +2796,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
- hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
-
err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
}
@@ -2747,17 +2805,9 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (cp->disconnect) {
- if (cp->addr.type == BDADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &cp->addr.bdaddr);
- else
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
- &cp->addr.bdaddr);
- } else {
- conn = NULL;
- }
-
+ /* If the connection variable is set, then termination of the
+ * link is requested.
+ */
if (!conn) {
err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
&rp, sizeof(rp));
@@ -2772,6 +2822,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
+ cmd->cmd_complete = addr_cmd_complete;
+
dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
@@ -2835,6 +2887,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
+ cmd->cmd_complete = generic_cmd_complete;
+
err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
if (err < 0)
mgmt_pending_remove(cmd);
@@ -2987,6 +3041,8 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
+ cmd->cmd_complete = addr_cmd_complete;
+
bacpy(&reply.bdaddr, &cp->addr.bdaddr);
reply.pin_len = cp->pin_len;
memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
@@ -3059,9 +3115,13 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
conn->disconn_cfm_cb = NULL;
hci_conn_drop(conn);
- hci_conn_put(conn);
- mgmt_pending_remove(cmd);
+ /* The device is paired so there is no need to remove
+ * its connection parameters anymore.
+ */
+ clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
+
+ hci_conn_put(conn);
}
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
@@ -3070,8 +3130,10 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete)
struct pending_cmd *cmd;
cmd = find_pairing(conn);
- if (cmd)
- pairing_complete(cmd, status);
+ if (cmd) {
+ cmd->cmd_complete(cmd, status);
+ mgmt_pending_remove(cmd);
+ }
}
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
@@ -3081,10 +3143,13 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
BT_DBG("status %u", status);
cmd = find_pairing(conn);
- if (!cmd)
+ if (!cmd) {
BT_DBG("Unable to find a pending command");
- else
- pairing_complete(cmd, mgmt_status(status));
+ return;
+ }
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
}
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
@@ -3097,10 +3162,13 @@ static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
return;
cmd = find_pairing(conn);
- if (!cmd)
+ if (!cmd) {
BT_DBG("Unable to find a pending command");
- else
- pairing_complete(cmd, mgmt_status(status));
+ return;
+ }
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
}
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -3197,6 +3265,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
+ cmd->cmd_complete = pairing_complete;
+
/* For LE, just connecting isn't a proof that the pairing finished */
if (cp->addr.type == BDADDR_BREDR) {
conn->connect_cfm_cb = pairing_complete_cb;
@@ -3212,8 +3282,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->user_data = hci_conn_get(conn);
if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
- hci_conn_security(conn, sec_level, auth_type, true))
- pairing_complete(cmd, 0);
+ hci_conn_security(conn, sec_level, auth_type, true)) {
+ cmd->cmd_complete(cmd, 0);
+ mgmt_pending_remove(cmd);
+ }
err = 0;
@@ -3255,7 +3327,8 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- pairing_complete(cmd, MGMT_STATUS_CANCELLED);
+ cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
+ mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
addr, sizeof(*addr));
@@ -3313,6 +3386,8 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
+ cmd->cmd_complete = addr_cmd_complete;
+
/* Continue with pairing via HCI */
if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
struct hci_cp_user_passkey_reply cp;
@@ -3537,7 +3612,7 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+ if (bredr_sc_enabled(hdev))
err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
0, NULL);
else
@@ -3564,8 +3639,17 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_add_remote_oob_data *cp = data;
u8 status;
+ if (cp->addr.type != BDADDR_BREDR) {
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
+ goto unlock;
+ }
+
err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
- cp->hash, cp->randomizer);
+ cp->addr.type, cp->hash,
+ cp->rand, NULL, NULL);
if (err < 0)
status = MGMT_STATUS_FAILED;
else
@@ -3575,13 +3659,28 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
status, &cp->addr, sizeof(cp->addr));
} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
struct mgmt_cp_add_remote_oob_ext_data *cp = data;
+ u8 *rand192, *hash192;
u8 status;
- err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
- cp->hash192,
- cp->randomizer192,
- cp->hash256,
- cp->randomizer256);
+ if (cp->addr.type != BDADDR_BREDR) {
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
+ goto unlock;
+ }
+
+ if (bdaddr_type_is_le(cp->addr.type)) {
+ rand192 = NULL;
+ hash192 = NULL;
+ } else {
+ rand192 = cp->rand192;
+ hash192 = cp->hash192;
+ }
+
+ err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
+ cp->addr.type, hash192, rand192,
+ cp->hash256, cp->rand256);
if (err < 0)
status = MGMT_STATUS_FAILED;
else
@@ -3595,6 +3694,7 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_INVALID_PARAMS);
}
+unlock:
hci_dev_unlock(hdev);
return err;
}
@@ -3608,14 +3708,26 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
+ if (cp->addr.type != BDADDR_BREDR)
+ return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
+
hci_dev_lock(hdev);
- err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
+ if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
+ hci_remote_oob_data_clear(hdev);
+ status = MGMT_STATUS_SUCCESS;
+ goto done;
+ }
+
+ err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
if (err < 0)
status = MGMT_STATUS_INVALID_PARAMS;
else
status = MGMT_STATUS_SUCCESS;
+done:
err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
status, &cp->addr, sizeof(cp->addr));
@@ -3623,64 +3735,150 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
+static bool trigger_discovery(struct hci_request *req, u8 *status)
{
- struct pending_cmd *cmd;
- u8 type;
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_scan_param param_cp;
+ struct hci_cp_le_set_scan_enable enable_cp;
+ struct hci_cp_inquiry inq_cp;
+ /* General inquiry access code (GIAC) */
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ u8 own_addr_type;
int err;
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_BREDR:
+ *status = mgmt_bredr_support(hdev);
+ if (*status)
+ return false;
- cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
- if (!cmd)
- return -ENOENT;
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ *status = MGMT_STATUS_BUSY;
+ return false;
+ }
- type = hdev->discovery.type;
+ hci_inquiry_cache_flush(hdev);
- err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
- &type, sizeof(type));
- mgmt_pending_remove(cmd);
+ memset(&inq_cp, 0, sizeof(inq_cp));
+ memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
+ inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
+ hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
+ break;
- return err;
+ case DISCOV_TYPE_LE:
+ case DISCOV_TYPE_INTERLEAVED:
+ *status = mgmt_le_support(hdev);
+ if (*status)
+ return false;
+
+ if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
+ !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ *status = MGMT_STATUS_NOT_SUPPORTED;
+ return false;
+ }
+
+ if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
+ /* Don't let discovery abort an outgoing
+ * connection attempt that's using directed
+ * advertising.
+ */
+ if (hci_conn_hash_lookup_state(hdev, LE_LINK,
+ BT_CONNECT)) {
+ *status = MGMT_STATUS_REJECTED;
+ return false;
+ }
+
+ disable_advertising(req);
+ }
+
+ /* If controller is scanning, it means the background scanning
+ * is running. Thus, we should temporarily stop it in order to
+ * set the discovery scanning parameters.
+ */
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ hci_req_add_le_scan_disable(req);
+
+ memset(&param_cp, 0, sizeof(param_cp));
+
+ /* All active scans will be done with either a resolvable
+ * private address (when privacy feature has been enabled)
+ * or non-resolvable private address.
+ */
+ err = hci_update_random_address(req, true, &own_addr_type);
+ if (err < 0) {
+ *status = MGMT_STATUS_FAILED;
+ return false;
+ }
+
+ param_cp.type = LE_SCAN_ACTIVE;
+ param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
+ param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+ param_cp.own_address_type = own_addr_type;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+ &param_cp);
+
+ memset(&enable_cp, 0, sizeof(enable_cp));
+ enable_cp.enable = LE_SCAN_ENABLE;
+ enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+ &enable_cp);
+ break;
+
+ default:
+ *status = MGMT_STATUS_INVALID_PARAMS;
+ return false;
+ }
+
+ return true;
}
static void start_discovery_complete(struct hci_dev *hdev, u8 status)
{
- unsigned long timeout = 0;
+ struct pending_cmd *cmd;
+ unsigned long timeout;
BT_DBG("status %d", status);
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ if (!cmd)
+ cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
+
+ if (cmd) {
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
+ }
+
if (status) {
- hci_dev_lock(hdev);
- mgmt_start_discovery_failed(hdev, status);
- hci_dev_unlock(hdev);
- return;
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
}
- hci_dev_lock(hdev);
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
- hci_dev_unlock(hdev);
switch (hdev->discovery.type) {
case DISCOV_TYPE_LE:
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
break;
-
case DISCOV_TYPE_INTERLEAVED:
timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
break;
-
case DISCOV_TYPE_BREDR:
+ timeout = 0;
break;
-
default:
BT_ERR("Invalid discovery type %d", hdev->discovery.type);
+ timeout = 0;
+ break;
}
- if (!timeout)
- return;
+ if (timeout)
+ queue_delayed_work(hdev->workqueue,
+ &hdev->le_scan_disable, timeout);
- queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
+unlock:
+ hci_dev_unlock(hdev);
}
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -3688,13 +3886,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_cp_start_discovery *cp = data;
struct pending_cmd *cmd;
- struct hci_cp_le_set_scan_param param_cp;
- struct hci_cp_le_set_scan_enable enable_cp;
- struct hci_cp_inquiry inq_cp;
struct hci_request req;
- /* General inquiry access code (GIAC) */
- u8 lap[3] = { 0x33, 0x8b, 0x9e };
- u8 status, own_addr_type;
+ u8 status;
int err;
BT_DBG("%s", hdev->name);
@@ -3702,176 +3895,188 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_NOT_POWERED);
- goto failed;
- }
-
- if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_BUSY);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED,
+ &cp->type, sizeof(cp->type));
goto failed;
}
- if (hdev->discovery.state != DISCOVERY_STOPPED) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_BUSY);
+ if (hdev->discovery.state != DISCOVERY_STOPPED ||
+ test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY, &cp->type,
+ sizeof(cp->type));
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
+ cmd->cmd_complete = generic_cmd_complete;
+
+ /* Clear the discovery filter first to free any previously
+ * allocated memory for the UUID list.
+ */
+ hci_discovery_filter_clear(hdev);
+
hdev->discovery.type = cp->type;
+ hdev->discovery.report_invalid_rssi = false;
hci_req_init(&req, hdev);
- switch (hdev->discovery.type) {
- case DISCOV_TYPE_BREDR:
- status = mgmt_bredr_support(hdev);
- if (status) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- status);
- mgmt_pending_remove(cmd);
- goto failed;
- }
+ if (!trigger_discovery(&req, &status)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ status, &cp->type, sizeof(cp->type));
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
- if (test_bit(HCI_INQUIRY, &hdev->flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_BUSY);
- mgmt_pending_remove(cmd);
- goto failed;
- }
+ err = hci_req_run(&req, start_discovery_complete);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
- hci_inquiry_cache_flush(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STARTING);
- memset(&inq_cp, 0, sizeof(inq_cp));
- memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
- inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
- hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
- break;
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
- case DISCOV_TYPE_LE:
- case DISCOV_TYPE_INTERLEAVED:
- status = mgmt_le_support(hdev);
- if (status) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- status);
- mgmt_pending_remove(cmd);
- goto failed;
- }
+static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
+{
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
+}
- if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
- !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_NOT_SUPPORTED);
- mgmt_pending_remove(cmd);
- goto failed;
- }
+static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_start_service_discovery *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
+ u16 uuid_count, expected_len;
+ u8 status;
+ int err;
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
- /* Don't let discovery abort an outgoing
- * connection attempt that's using directed
- * advertising.
- */
- if (hci_conn_hash_lookup_state(hdev, LE_LINK,
- BT_CONNECT)) {
- err = cmd_status(sk, hdev->id,
- MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_REJECTED);
- mgmt_pending_remove(cmd);
- goto failed;
- }
+ BT_DBG("%s", hdev->name);
- disable_advertising(&req);
- }
+ hci_dev_lock(hdev);
- /* If controller is scanning, it means the background scanning
- * is running. Thus, we should temporarily stop it in order to
- * set the discovery scanning parameters.
- */
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
- hci_req_add_le_scan_disable(&req);
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED,
+ &cp->type, sizeof(cp->type));
+ goto failed;
+ }
- memset(&param_cp, 0, sizeof(param_cp));
+ if (hdev->discovery.state != DISCOVERY_STOPPED ||
+ test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_BUSY, &cp->type,
+ sizeof(cp->type));
+ goto failed;
+ }
- /* All active scans will be done with either a resolvable
- * private address (when privacy feature has been enabled)
- * or unresolvable private address.
- */
- err = hci_update_random_address(&req, true, &own_addr_type);
- if (err < 0) {
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_FAILED);
+ uuid_count = __le16_to_cpu(cp->uuid_count);
+ if (uuid_count > max_uuid_count) {
+ BT_ERR("service_discovery: too big uuid_count value %u",
+ uuid_count);
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS, &cp->type,
+ sizeof(cp->type));
+ goto failed;
+ }
+
+ expected_len = sizeof(*cp) + uuid_count * 16;
+ if (expected_len != len) {
+ BT_ERR("service_discovery: expected %u bytes, got %u bytes",
+ expected_len, len);
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS, &cp->type,
+ sizeof(cp->type));
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
+ hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ cmd->cmd_complete = service_discovery_cmd_complete;
+
+ /* Clear the discovery filter first to free any previously
+ * allocated memory for the UUID list.
+ */
+ hci_discovery_filter_clear(hdev);
+
+ hdev->discovery.type = cp->type;
+ hdev->discovery.rssi = cp->rssi;
+ hdev->discovery.uuid_count = uuid_count;
+
+ if (uuid_count > 0) {
+ hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
+ GFP_KERNEL);
+ if (!hdev->discovery.uuids) {
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ MGMT_STATUS_FAILED,
+ &cp->type, sizeof(cp->type));
mgmt_pending_remove(cmd);
goto failed;
}
+ }
- param_cp.type = LE_SCAN_ACTIVE;
- param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
- param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
- param_cp.own_address_type = own_addr_type;
- hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
- &param_cp);
-
- memset(&enable_cp, 0, sizeof(enable_cp));
- enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
- &enable_cp);
- break;
+ hci_req_init(&req, hdev);
- default:
- err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- MGMT_STATUS_INVALID_PARAMS);
+ if (!trigger_discovery(&req, &status)) {
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ status, &cp->type, sizeof(cp->type));
mgmt_pending_remove(cmd);
goto failed;
}
err = hci_req_run(&req, start_discovery_complete);
- if (err < 0)
+ if (err < 0) {
mgmt_pending_remove(cmd);
- else
- hci_discovery_set_state(hdev, DISCOVERY_STARTING);
+ goto failed;
+ }
+
+ hci_discovery_set_state(hdev, DISCOVERY_STARTING);
failed:
hci_dev_unlock(hdev);
return err;
}
-static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
{
struct pending_cmd *cmd;
- int err;
- cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
- if (!cmd)
- return -ENOENT;
-
- err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
- &hdev->discovery.type, sizeof(hdev->discovery.type));
- mgmt_pending_remove(cmd);
-
- return err;
-}
-
-static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
-{
BT_DBG("status %d", status);
hci_dev_lock(hdev);
- if (status) {
- mgmt_stop_discovery_failed(hdev, status);
- goto unlock;
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+ if (cmd) {
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
}
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ if (!status)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-unlock:
hci_dev_unlock(hdev);
}
@@ -3901,12 +4106,14 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
+ cmd->cmd_complete = generic_cmd_complete;
+
hci_req_init(&req, hdev);
hci_stop_discovery(&req);
@@ -4083,12 +4290,14 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
+ hci_dev_lock(hdev);
+
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
cmd_status_rsp, &mgmt_err);
- return;
+ goto unlock;
}
if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
@@ -4103,6 +4312,9 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
if (match.sk)
sock_put(match.sk);
+
+unlock:
+ hci_dev_unlock(hdev);
}
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -4506,18 +4718,13 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_mode *cp = data;
struct pending_cmd *cmd;
- u8 val, status;
+ u8 val;
int err;
BT_DBG("request for %s", hdev->name);
- status = mgmt_bredr_support(hdev);
- if (status)
- return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
- status);
-
- if (!lmp_sc_capable(hdev) &&
- !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+ !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
MGMT_STATUS_NOT_SUPPORTED);
@@ -4527,7 +4734,10 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
hci_dev_lock(hdev);
- if (!hdev_is_powered(hdev)) {
+ if (!hdev_is_powered(hdev) ||
+ (!lmp_sc_capable(hdev) &&
+ !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
+ !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
bool changed;
if (cp->val) {
@@ -4844,18 +5054,26 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
else
addr_type = ADDR_LE_DEV_RANDOM;
- if (key->master)
- type = SMP_LTK;
- else
- type = SMP_LTK_SLAVE;
-
switch (key->type) {
case MGMT_LTK_UNAUTHENTICATED:
authenticated = 0x00;
+ type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
break;
case MGMT_LTK_AUTHENTICATED:
authenticated = 0x01;
+ type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
+ break;
+ case MGMT_LTK_P256_UNAUTH:
+ authenticated = 0x00;
+ type = SMP_LTK_P256;
break;
+ case MGMT_LTK_P256_AUTH:
+ authenticated = 0x01;
+ type = SMP_LTK_P256;
+ break;
+ case MGMT_LTK_P256_DEBUG:
+ authenticated = 0x00;
+ type = SMP_LTK_P256_DEBUG;
default:
continue;
}
@@ -4873,67 +5091,42 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return err;
}
-struct cmd_conn_lookup {
- struct hci_conn *conn;
- bool valid_tx_power;
- u8 mgmt_status;
-};
-
-static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
+static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
{
- struct cmd_conn_lookup *match = data;
- struct mgmt_cp_get_conn_info *cp;
- struct mgmt_rp_get_conn_info rp;
struct hci_conn *conn = cmd->user_data;
+ struct mgmt_rp_get_conn_info rp;
- if (conn != match->conn)
- return;
-
- cp = (struct mgmt_cp_get_conn_info *) cmd->param;
+ memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
- memset(&rp, 0, sizeof(rp));
- bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
- rp.addr.type = cp->addr.type;
-
- if (!match->mgmt_status) {
+ if (status == MGMT_STATUS_SUCCESS) {
rp.rssi = conn->rssi;
-
- if (match->valid_tx_power) {
- rp.tx_power = conn->tx_power;
- rp.max_tx_power = conn->max_tx_power;
- } else {
- rp.tx_power = HCI_TX_POWER_INVALID;
- rp.max_tx_power = HCI_TX_POWER_INVALID;
- }
+ rp.tx_power = conn->tx_power;
+ rp.max_tx_power = conn->max_tx_power;
+ } else {
+ rp.rssi = HCI_RSSI_INVALID;
+ rp.tx_power = HCI_TX_POWER_INVALID;
+ rp.max_tx_power = HCI_TX_POWER_INVALID;
}
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
- match->mgmt_status, &rp, sizeof(rp));
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
+ &rp, sizeof(rp));
hci_conn_drop(conn);
hci_conn_put(conn);
-
- mgmt_pending_remove(cmd);
}
-static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
+static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
{
struct hci_cp_read_rssi *cp;
+ struct pending_cmd *cmd;
struct hci_conn *conn;
- struct cmd_conn_lookup match;
u16 handle;
+ u8 status;
- BT_DBG("status 0x%02x", status);
+ BT_DBG("status 0x%02x", hci_status);
hci_dev_lock(hdev);
- /* TX power data is valid in case request completed successfully,
- * otherwise we assume it's not valid. At the moment we assume that
- * either both or none of current and max values are valid to keep code
- * simple.
- */
- match.valid_tx_power = !status;
-
/* Commands sent in request are either Read RSSI or Read Transmit Power
* Level so we check which one was last sent to retrieve connection
* handle. Both commands have handle as first parameter so it's safe to
@@ -4946,29 +5139,29 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
if (!cp) {
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
- status = 0;
+ status = MGMT_STATUS_SUCCESS;
+ } else {
+ status = mgmt_status(hci_status);
}
if (!cp) {
- BT_ERR("invalid sent_cmd in response");
+ BT_ERR("invalid sent_cmd in conn_info response");
goto unlock;
}
handle = __le16_to_cpu(cp->handle);
conn = hci_conn_hash_lookup_handle(hdev, handle);
if (!conn) {
- BT_ERR("unknown handle (%d) in response", handle);
+ BT_ERR("unknown handle (%d) in conn_info response", handle);
goto unlock;
}
- match.conn = conn;
- match.mgmt_status = mgmt_status(status);
+ cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
+ if (!cmd)
+ goto unlock;
- /* Cache refresh is complete, now reply for mgmt request for given
- * connection only.
- */
- mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
- get_conn_info_complete, &match);
+ cmd->cmd_complete(cmd, status);
+ mgmt_pending_remove(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -5014,6 +5207,12 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
+ if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
+ goto unlock;
+ }
+
/* To avoid client trying to guess when to poll again for information we
* calculate conn info age as random value between min/max set in hdev.
*/
@@ -5069,6 +5268,7 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
hci_conn_hold(conn);
cmd->user_data = hci_conn_get(conn);
+ cmd->cmd_complete = conn_info_cmd_complete;
conn->conn_info_timestamp = jiffies;
} else {
@@ -5086,10 +5286,40 @@ unlock:
return err;
}
-static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
+static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
{
- struct mgmt_cp_get_clock_info *cp;
+ struct hci_conn *conn = cmd->user_data;
struct mgmt_rp_get_clock_info rp;
+ struct hci_dev *hdev;
+
+ memset(&rp, 0, sizeof(rp));
+ memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
+
+ if (status)
+ goto complete;
+
+ hdev = hci_dev_get(cmd->index);
+ if (hdev) {
+ rp.local_clock = cpu_to_le32(hdev->clock);
+ hci_dev_put(hdev);
+ }
+
+ if (conn) {
+ rp.piconet_clock = cpu_to_le32(conn->clock);
+ rp.accuracy = cpu_to_le16(conn->clock_accuracy);
+ }
+
+complete:
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
+
+ if (conn) {
+ hci_conn_drop(conn);
+ hci_conn_put(conn);
+ }
+}
+
+static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
+{
struct hci_cp_read_clock *hci_cp;
struct pending_cmd *cmd;
struct hci_conn *conn;
@@ -5113,29 +5343,8 @@ static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
if (!cmd)
goto unlock;
- cp = cmd->param;
-
- memset(&rp, 0, sizeof(rp));
- memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
-
- if (status)
- goto send_rsp;
-
- rp.local_clock = cpu_to_le32(hdev->clock);
-
- if (conn) {
- rp.piconet_clock = cpu_to_le32(conn->clock);
- rp.accuracy = cpu_to_le16(conn->clock_accuracy);
- }
-
-send_rsp:
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
- &rp, sizeof(rp));
+ cmd->cmd_complete(cmd, mgmt_status(status));
mgmt_pending_remove(cmd);
- if (conn) {
- hci_conn_drop(conn);
- hci_conn_put(conn);
- }
unlock:
hci_dev_unlock(hdev);
@@ -5191,6 +5400,8 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
+ cmd->cmd_complete = clock_info_cmd_complete;
+
hci_req_init(&req, hdev);
memset(&hci_cp, 0, sizeof(hci_cp));
@@ -5680,6 +5891,7 @@ static const struct mgmt_handler {
{ read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
{ set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
{ set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
+ { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
};
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
@@ -5701,7 +5913,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
if (!buf)
return -ENOMEM;
- if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
+ if (memcpy_from_msg(buf, msg, msglen)) {
err = -EFAULT;
goto done;
}
@@ -5816,7 +6028,7 @@ void mgmt_index_removed(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return;
- mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
@@ -5885,6 +6097,11 @@ static int powered_update_hci(struct hci_dev *hdev)
hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
}
+ if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
+ u8 sc = 0x01;
+ hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
+ }
+
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
@@ -5934,8 +6151,7 @@ static int powered_update_hci(struct hci_dev *hdev)
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
struct cmd_lookup match = { NULL, hdev };
- u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
- u8 zero_cod[] = { 0, 0, 0 };
+ u8 status, zero_cod[] = { 0, 0, 0 };
int err;
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
@@ -5951,7 +6167,20 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
}
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
- mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
+
+ /* If the power off is because of hdev unregistration let
+ * use the appropriate INVALID_INDEX status. Otherwise use
+ * NOT_POWERED. We cover both scenarios here since later in
+ * mgmt_index_removed() any hci_conn callbacks will have already
+ * been triggered, potentially causing misleading DISCONNECTED
+ * status responses.
+ */
+ if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+ status = MGMT_STATUS_INVALID_INDEX;
+ else
+ status = MGMT_STATUS_NOT_POWERED;
+
+ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
@@ -6035,8 +6264,19 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
- if (ltk->authenticated)
- return MGMT_LTK_AUTHENTICATED;
+ switch (ltk->type) {
+ case SMP_LTK:
+ case SMP_LTK_SLAVE:
+ if (ltk->authenticated)
+ return MGMT_LTK_AUTHENTICATED;
+ return MGMT_LTK_UNAUTHENTICATED;
+ case SMP_LTK_P256:
+ if (ltk->authenticated)
+ return MGMT_LTK_P256_AUTH;
+ return MGMT_LTK_P256_UNAUTH;
+ case SMP_LTK_P256_DEBUG:
+ return MGMT_LTK_P256_DEBUG;
+ }
return MGMT_LTK_UNAUTHENTICATED;
}
@@ -6171,26 +6411,36 @@ static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
return eir_len;
}
-void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u32 flags, u8 *name, u8 name_len,
- u8 *dev_class)
+void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ u32 flags, u8 *name, u8 name_len)
{
char buf[512];
struct mgmt_ev_device_connected *ev = (void *) buf;
u16 eir_len = 0;
- bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_bdaddr(link_type, addr_type);
+ bacpy(&ev->addr.bdaddr, &conn->dst);
+ ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
ev->flags = __cpu_to_le32(flags);
- if (name_len > 0)
- eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
- name, name_len);
+ /* We must ensure that the EIR Data fields are ordered and
+ * unique. Keep it simple for now and avoid the problem by not
+ * adding any BR/EDR data to the LE adv.
+ */
+ if (conn->le_adv_data_len > 0) {
+ memcpy(&ev->eir[eir_len],
+ conn->le_adv_data, conn->le_adv_data_len);
+ eir_len = conn->le_adv_data_len;
+ } else {
+ if (name_len > 0)
+ eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
+ name, name_len);
- if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
- eir_len = eir_append_data(ev->eir, eir_len,
- EIR_CLASS_OF_DEV, dev_class, 3);
+ if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
+ eir_len = eir_append_data(ev->eir, eir_len,
+ EIR_CLASS_OF_DEV,
+ conn->dev_class, 3);
+ }
ev->eir_len = cpu_to_le16(eir_len);
@@ -6200,15 +6450,9 @@ void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
{
- struct mgmt_cp_disconnect *cp = cmd->param;
struct sock **sk = data;
- struct mgmt_rp_disconnect rp;
- bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
- rp.addr.type = cp->addr.type;
-
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
- sizeof(rp));
+ cmd->cmd_complete(cmd, 0);
*sk = cmd->sk;
sock_hold(*sk);
@@ -6220,16 +6464,10 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
{
struct hci_dev *hdev = data;
struct mgmt_cp_unpair_device *cp = cmd->param;
- struct mgmt_rp_unpair_device rp;
-
- memset(&rp, 0, sizeof(rp));
- bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
- rp.addr.type = cp->addr.type;
device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
- cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
-
+ cmd->cmd_complete(cmd, 0);
mgmt_pending_remove(cmd);
}
@@ -6290,7 +6528,6 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
{
u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
struct mgmt_cp_disconnect *cp;
- struct mgmt_rp_disconnect rp;
struct pending_cmd *cmd;
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
@@ -6308,12 +6545,7 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (cp->addr.type != bdaddr_type)
return;
- bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = bdaddr_type;
-
- cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
- mgmt_status(status), &rp, sizeof(rp));
-
+ cmd->cmd_complete(cmd, mgmt_status(status));
mgmt_pending_remove(cmd);
}
@@ -6352,18 +6584,12 @@ void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status)
{
struct pending_cmd *cmd;
- struct mgmt_rp_pin_code_reply rp;
cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
if (!cmd)
return;
- bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = BDADDR_BREDR;
-
- cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
- mgmt_status(status), &rp, sizeof(rp));
-
+ cmd->cmd_complete(cmd, mgmt_status(status));
mgmt_pending_remove(cmd);
}
@@ -6371,18 +6597,12 @@ void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status)
{
struct pending_cmd *cmd;
- struct mgmt_rp_pin_code_reply rp;
cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
if (!cmd)
return;
- bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = BDADDR_BREDR;
-
- cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
- mgmt_status(status), &rp, sizeof(rp));
-
+ cmd->cmd_complete(cmd, mgmt_status(status));
mgmt_pending_remove(cmd);
}
@@ -6422,21 +6642,15 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 opcode)
{
struct pending_cmd *cmd;
- struct mgmt_rp_user_confirm_reply rp;
- int err;
cmd = mgmt_pending_find(opcode, hdev);
if (!cmd)
return -ENOENT;
- bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_bdaddr(link_type, addr_type);
- err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
- &rp, sizeof(rp));
-
+ cmd->cmd_complete(cmd, mgmt_status(status));
mgmt_pending_remove(cmd);
- return err;
+ return 0;
}
int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -6500,8 +6714,10 @@ void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
cmd ? cmd->sk : NULL);
- if (cmd)
- pairing_complete(cmd, status);
+ if (cmd) {
+ cmd->cmd_complete(cmd, status);
+ mgmt_pending_remove(cmd);
+ }
}
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
@@ -6693,8 +6909,8 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
}
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
- u8 *randomizer192, u8 *hash256,
- u8 *randomizer256, u8 status)
+ u8 *rand192, u8 *hash256, u8 *rand256,
+ u8 status)
{
struct pending_cmd *cmd;
@@ -6708,17 +6924,14 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
mgmt_status(status));
} else {
- if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
- hash256 && randomizer256) {
+ if (bredr_sc_enabled(hdev) && hash256 && rand256) {
struct mgmt_rp_read_local_oob_ext_data rp;
memcpy(rp.hash192, hash192, sizeof(rp.hash192));
- memcpy(rp.randomizer192, randomizer192,
- sizeof(rp.randomizer192));
+ memcpy(rp.rand192, rand192, sizeof(rp.rand192));
memcpy(rp.hash256, hash256, sizeof(rp.hash256));
- memcpy(rp.randomizer256, randomizer256,
- sizeof(rp.randomizer256));
+ memcpy(rp.rand256, rand256, sizeof(rp.rand256));
cmd_complete(cmd->sk, hdev->id,
MGMT_OP_READ_LOCAL_OOB_DATA, 0,
@@ -6727,8 +6940,7 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
struct mgmt_rp_read_local_oob_data rp;
memcpy(rp.hash, hash192, sizeof(rp.hash));
- memcpy(rp.randomizer, randomizer192,
- sizeof(rp.randomizer));
+ memcpy(rp.rand, rand192, sizeof(rp.rand));
cmd_complete(cmd->sk, hdev->id,
MGMT_OP_READ_LOCAL_OOB_DATA, 0,
@@ -6739,6 +6951,73 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
mgmt_pending_remove(cmd);
}
+static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
+{
+ int i;
+
+ for (i = 0; i < uuid_count; i++) {
+ if (!memcmp(uuid, uuids[i], 16))
+ return true;
+ }
+
+ return false;
+}
+
+static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
+{
+ u16 parsed = 0;
+
+ while (parsed < eir_len) {
+ u8 field_len = eir[0];
+ u8 uuid[16];
+ int i;
+
+ if (field_len == 0)
+ break;
+
+ if (eir_len - parsed < field_len + 1)
+ break;
+
+ switch (eir[1]) {
+ case EIR_UUID16_ALL:
+ case EIR_UUID16_SOME:
+ for (i = 0; i + 3 <= field_len; i += 2) {
+ memcpy(uuid, bluetooth_base_uuid, 16);
+ uuid[13] = eir[i + 3];
+ uuid[12] = eir[i + 2];
+ if (has_uuid(uuid, uuid_count, uuids))
+ return true;
+ }
+ break;
+ case EIR_UUID32_ALL:
+ case EIR_UUID32_SOME:
+ for (i = 0; i + 5 <= field_len; i += 4) {
+ memcpy(uuid, bluetooth_base_uuid, 16);
+ uuid[15] = eir[i + 5];
+ uuid[14] = eir[i + 4];
+ uuid[13] = eir[i + 3];
+ uuid[12] = eir[i + 2];
+ if (has_uuid(uuid, uuid_count, uuids))
+ return true;
+ }
+ break;
+ case EIR_UUID128_ALL:
+ case EIR_UUID128_SOME:
+ for (i = 0; i + 17 <= field_len; i += 16) {
+ memcpy(uuid, eir + i + 2, 16);
+ if (has_uuid(uuid, uuid_count, uuids))
+ return true;
+ }
+ break;
+ }
+
+ parsed += field_len + 1;
+ eir += field_len + 1;
+ }
+
+ return false;
+}
+
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
@@ -6746,6 +7025,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
char buf[512];
struct mgmt_ev_device_found *ev = (void *) buf;
size_t ev_size;
+ bool match;
/* Don't send events for a non-kernel initiated discovery. With
* LE one exception is if we have pend_le_reports > 0 in which
@@ -6758,6 +7038,18 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
return;
}
+ /* When using service discovery with a RSSI threshold, then check
+ * if such a RSSI threshold is specified. If a RSSI threshold has
+ * been specified, then all results with a RSSI smaller than the
+ * RSSI threshold will be dropped.
+ *
+ * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
+ * the results are also dropped.
+ */
+ if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
+ (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
+ return;
+
/* Make sure that the buffer is big enough. The 5 extra bytes
* are for the potential CoD field.
*/
@@ -6766,20 +7058,79 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
+ /* In case of device discovery with BR/EDR devices (pre 1.2), the
+ * RSSI value was reported as 0 when not available. This behavior
+ * is kept when using device discovery. This is required for full
+ * backwards compatibility with the API.
+ *
+ * However when using service discovery, the value 127 will be
+ * returned when the RSSI is not available.
+ */
+ if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
+ rssi = 0;
+
bacpy(&ev->addr.bdaddr, bdaddr);
ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
ev->flags = cpu_to_le32(flags);
- if (eir_len > 0)
+ if (eir_len > 0) {
+ /* When using service discovery and a list of UUID is
+ * provided, results with no matching UUID should be
+ * dropped. In case there is a match the result is
+ * kept and checking possible scan response data
+ * will be skipped.
+ */
+ if (hdev->discovery.uuid_count > 0)
+ match = eir_has_uuids(eir, eir_len,
+ hdev->discovery.uuid_count,
+ hdev->discovery.uuids);
+ else
+ match = true;
+
+ if (!match && !scan_rsp_len)
+ return;
+
+ /* Copy EIR or advertising data into event */
memcpy(ev->eir, eir, eir_len);
+ } else {
+ /* When using service discovery and a list of UUID is
+ * provided, results with empty EIR or advertising data
+ * should be dropped since they do not match any UUID.
+ */
+ if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
+ return;
+
+ match = false;
+ }
if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3);
- if (scan_rsp_len > 0)
+ if (scan_rsp_len > 0) {
+ /* When using service discovery and a list of UUID is
+ * provided, results with no matching UUID should be
+ * dropped if there is no previous match from the
+ * advertising data.
+ */
+ if (hdev->discovery.uuid_count > 0) {
+ if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
+ hdev->discovery.uuid_count,
+ hdev->discovery.uuids))
+ return;
+ }
+
+ /* Append scan response data to event */
memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
+ } else {
+ /* When using service discovery and a list of UUID is
+ * provided, results with empty scan response and no
+ * previous matched advertising data should be dropped.
+ */
+ if (hdev->discovery.uuid_count > 0 && !match)
+ return;
+ }
ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
@@ -6813,23 +7164,9 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
{
struct mgmt_ev_discovering ev;
- struct pending_cmd *cmd;
BT_DBG("%s discovering %u", hdev->name, discovering);
- if (discovering)
- cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
- else
- cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
-
- if (cmd != NULL) {
- u8 type = hdev->discovery.type;
-
- cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
- sizeof(type));
- mgmt_pending_remove(cmd);
- }
-
memset(&ev, 0, sizeof(ev));
ev.type = hdev->discovery.type;
ev.discovering = discovering;
diff --git a/net/bluetooth/rfcomm/Kconfig b/net/bluetooth/rfcomm/Kconfig
index 18d352ea2bc7..335df7515220 100644
--- a/net/bluetooth/rfcomm/Kconfig
+++ b/net/bluetooth/rfcomm/Kconfig
@@ -1,6 +1,6 @@
config BT_RFCOMM
tristate "RFCOMM protocol support"
- depends on BT
+ depends on BT_BREDR
help
RFCOMM provides connection oriented stream transport. RFCOMM
support is required for Dialup Networking, OBEX and other Bluetooth
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 410dd5e76c41..73f8c75abe6e 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -78,8 +78,10 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
#define __get_type(b) ((b & 0xef))
#define __test_ea(b) ((b & 0x01))
-#define __test_cr(b) ((b & 0x02))
-#define __test_pf(b) ((b & 0x10))
+#define __test_cr(b) (!!(b & 0x02))
+#define __test_pf(b) (!!(b & 0x10))
+
+#define __session_dir(s) ((s)->initiator ? 0x00 : 0x01)
#define __addr(cr, dlci) (((dlci & 0x3f) << 2) | (cr << 1) | 0x01)
#define __ctrl(type, pf) (((type & 0xef) | (pf << 4)))
@@ -388,7 +390,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
return err;
}
- dlci = __dlci(!s->initiator, channel);
+ dlci = __dlci(__session_dir(s), channel);
/* Check if DLCI already exists */
if (rfcomm_dlc_get(s, dlci))
@@ -543,7 +545,7 @@ struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel)
rfcomm_lock();
s = rfcomm_session_get(src, dst);
if (s) {
- dlci = __dlci(!s->initiator, channel);
+ dlci = __dlci(__session_dir(s), channel);
dlc = rfcomm_dlc_get(s, dlci);
}
rfcomm_unlock();
@@ -904,7 +906,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
hdr->len = __len8(sizeof(*mcc) + 1);
mcc = (void *) ptr; ptr += sizeof(*mcc);
- mcc->type = __mcc_type(cr, RFCOMM_NSC);
+ mcc->type = __mcc_type(0, RFCOMM_NSC);
mcc->len = __len8(1);
/* Type that we didn't like */
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 8bbbb5ec468c..2348176401a0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -588,7 +588,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
}
skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
- err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err) {
kfree_skb(skb);
if (sent == 0)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 7ee9e4ab00f8..30e5ea3f1ad3 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -285,7 +285,7 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
if (!skb)
return err;
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
kfree_skb(skb);
return -EFAULT;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index f09b6b65cf6b..b67749bb55bf 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -29,14 +29,34 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
+#include "ecc.h"
#include "smp.h"
+/* Low-level debug macros to be used for stuff that we don't want
+ * accidentially in dmesg, i.e. the values of the various crypto keys
+ * and the inputs & outputs of crypto functions.
+ */
+#ifdef DEBUG
+#define SMP_DBG(fmt, ...) printk(KERN_DEBUG "%s: " fmt, __func__, \
+ ##__VA_ARGS__)
+#else
+#define SMP_DBG(fmt, ...) no_printk(KERN_DEBUG "%s: " fmt, __func__, \
+ ##__VA_ARGS__)
+#endif
+
#define SMP_ALLOW_CMD(smp, code) set_bit(code, &smp->allow_cmd)
+/* Keys which are not distributed with Secure Connections */
+#define SMP_SC_NO_DIST (SMP_DIST_ENC_KEY | SMP_DIST_LINK_KEY);
+
#define SMP_TIMEOUT msecs_to_jiffies(30000)
-#define AUTH_REQ_MASK 0x07
-#define KEY_DIST_MASK 0x07
+#define AUTH_REQ_MASK(dev) (test_bit(HCI_SC_ENABLED, &(dev)->dev_flags) ? \
+ 0x1f : 0x07)
+#define KEY_DIST_MASK 0x07
+
+/* Maximum message length that can be passed to aes_cmac */
+#define CMAC_MSG_MAX 80
enum {
SMP_FLAG_TK_VALID,
@@ -44,6 +64,12 @@ enum {
SMP_FLAG_MITM_AUTH,
SMP_FLAG_COMPLETE,
SMP_FLAG_INITIATOR,
+ SMP_FLAG_SC,
+ SMP_FLAG_REMOTE_PK,
+ SMP_FLAG_DEBUG_KEY,
+ SMP_FLAG_WAIT_USER,
+ SMP_FLAG_DHKEY_PENDING,
+ SMP_FLAG_OOB,
};
struct smp_chan {
@@ -57,6 +83,7 @@ struct smp_chan {
u8 rrnd[16]; /* SMP Pairing Random (remote) */
u8 pcnf[16]; /* SMP Pairing Confirm */
u8 tk[16]; /* SMP Temporary Key */
+ u8 rr[16];
u8 enc_key_size;
u8 remote_key_dist;
bdaddr_t id_addr;
@@ -67,9 +94,43 @@ struct smp_chan {
struct smp_ltk *ltk;
struct smp_ltk *slave_ltk;
struct smp_irk *remote_irk;
+ u8 *link_key;
unsigned long flags;
+ u8 method;
+ u8 passkey_round;
+
+ /* Secure Connections variables */
+ u8 local_pk[64];
+ u8 local_sk[32];
+ u8 remote_pk[64];
+ u8 dhkey[32];
+ u8 mackey[16];
struct crypto_blkcipher *tfm_aes;
+ struct crypto_hash *tfm_cmac;
+};
+
+/* These debug key values are defined in the SMP section of the core
+ * specification. debug_pk is the public debug key and debug_sk the
+ * private debug key.
+ */
+static const u8 debug_pk[64] = {
+ 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
+ 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
+ 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
+ 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20,
+
+ 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74,
+ 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76,
+ 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63,
+ 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc,
+};
+
+static const u8 debug_sk[32] = {
+ 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58,
+ 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a,
+ 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74,
+ 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f,
};
static inline void swap_buf(const u8 *src, u8 *dst, size_t len)
@@ -80,14 +141,22 @@ static inline void swap_buf(const u8 *src, u8 *dst, size_t len)
dst[len - 1 - i] = src[i];
}
-static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
+/* The following functions map to the LE SC SMP crypto functions
+ * AES-CMAC, f4, f5, f6, g2 and h6.
+ */
+
+static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
+ size_t len, u8 mac[16])
{
- struct blkcipher_desc desc;
+ uint8_t tmp[16], mac_msb[16], msg_msb[CMAC_MSG_MAX];
+ struct hash_desc desc;
struct scatterlist sg;
- uint8_t tmp[16], data[16];
int err;
- if (tfm == NULL) {
+ if (len > CMAC_MSG_MAX)
+ return -EFBIG;
+
+ if (!tfm) {
BT_ERR("tfm %p", tfm);
return -EINVAL;
}
@@ -95,112 +164,237 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
desc.tfm = tfm;
desc.flags = 0;
- /* The most significant octet of key corresponds to k[0] */
+ crypto_hash_init(&desc);
+
+ /* Swap key and message from LSB to MSB */
swap_buf(k, tmp, 16);
+ swap_buf(m, msg_msb, len);
- err = crypto_blkcipher_setkey(tfm, tmp, 16);
+ SMP_DBG("msg (len %zu) %*phN", len, (int) len, m);
+ SMP_DBG("key %16phN", k);
+
+ err = crypto_hash_setkey(tfm, tmp, 16);
if (err) {
BT_ERR("cipher setkey failed: %d", err);
return err;
}
- /* Most significant octet of plaintextData corresponds to data[0] */
- swap_buf(r, data, 16);
+ sg_init_one(&sg, msg_msb, len);
- sg_init_one(&sg, data, 16);
+ err = crypto_hash_update(&desc, &sg, len);
+ if (err) {
+ BT_ERR("Hash update error %d", err);
+ return err;
+ }
- err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
+ err = crypto_hash_final(&desc, mac_msb);
+ if (err) {
+ BT_ERR("Hash final error %d", err);
+ return err;
+ }
+
+ swap_buf(mac_msb, mac, 16);
+
+ SMP_DBG("mac %16phN", mac);
+
+ return 0;
+}
+
+static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
+ const u8 x[16], u8 z, u8 res[16])
+{
+ u8 m[65];
+ int err;
+
+ SMP_DBG("u %32phN", u);
+ SMP_DBG("v %32phN", v);
+ SMP_DBG("x %16phN z %02x", x, z);
+
+ m[0] = z;
+ memcpy(m + 1, v, 32);
+ memcpy(m + 33, u, 32);
+
+ err = aes_cmac(tfm_cmac, x, m, sizeof(m), res);
if (err)
- BT_ERR("Encrypt data error %d", err);
+ return err;
- /* Most significant octet of encryptedData corresponds to data[0] */
- swap_buf(data, r, 16);
+ SMP_DBG("res %16phN", res);
return err;
}
-static int smp_ah(struct crypto_blkcipher *tfm, u8 irk[16], u8 r[3], u8 res[3])
+static int smp_f5(struct crypto_hash *tfm_cmac, u8 w[32], u8 n1[16], u8 n2[16],
+ u8 a1[7], u8 a2[7], u8 mackey[16], u8 ltk[16])
{
- u8 _res[16];
+ /* The btle, salt and length "magic" values are as defined in
+ * the SMP section of the Bluetooth core specification. In ASCII
+ * the btle value ends up being 'btle'. The salt is just a
+ * random number whereas length is the value 256 in little
+ * endian format.
+ */
+ const u8 btle[4] = { 0x65, 0x6c, 0x74, 0x62 };
+ const u8 salt[16] = { 0xbe, 0x83, 0x60, 0x5a, 0xdb, 0x0b, 0x37, 0x60,
+ 0x38, 0xa5, 0xf5, 0xaa, 0x91, 0x83, 0x88, 0x6c };
+ const u8 length[2] = { 0x00, 0x01 };
+ u8 m[53], t[16];
int err;
- /* r' = padding || r */
- memcpy(_res, r, 3);
- memset(_res + 3, 0, 13);
+ SMP_DBG("w %32phN", w);
+ SMP_DBG("n1 %16phN n2 %16phN", n1, n2);
+ SMP_DBG("a1 %7phN a2 %7phN", a1, a2);
- err = smp_e(tfm, irk, _res);
- if (err) {
- BT_ERR("Encrypt error");
+ err = aes_cmac(tfm_cmac, salt, w, 32, t);
+ if (err)
return err;
- }
- /* The output of the random address function ah is:
- * ah(h, r) = e(k, r') mod 2^24
- * The output of the security function e is then truncated to 24 bits
- * by taking the least significant 24 bits of the output of e as the
- * result of ah.
- */
- memcpy(res, _res, 3);
+ SMP_DBG("t %16phN", t);
+
+ memcpy(m, length, 2);
+ memcpy(m + 2, a2, 7);
+ memcpy(m + 9, a1, 7);
+ memcpy(m + 16, n2, 16);
+ memcpy(m + 32, n1, 16);
+ memcpy(m + 48, btle, 4);
+
+ m[52] = 0; /* Counter */
+
+ err = aes_cmac(tfm_cmac, t, m, sizeof(m), mackey);
+ if (err)
+ return err;
+
+ SMP_DBG("mackey %16phN", mackey);
+
+ m[52] = 1; /* Counter */
+
+ err = aes_cmac(tfm_cmac, t, m, sizeof(m), ltk);
+ if (err)
+ return err;
+
+ SMP_DBG("ltk %16phN", ltk);
return 0;
}
-bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr)
+static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
+ const u8 n1[16], u8 n2[16], const u8 r[16],
+ const u8 io_cap[3], const u8 a1[7], const u8 a2[7],
+ u8 res[16])
{
- struct l2cap_chan *chan = hdev->smp_data;
- struct crypto_blkcipher *tfm;
- u8 hash[3];
+ u8 m[65];
int err;
- if (!chan || !chan->data)
- return false;
+ SMP_DBG("w %16phN", w);
+ SMP_DBG("n1 %16phN n2 %16phN", n1, n2);
+ SMP_DBG("r %16phN io_cap %3phN a1 %7phN a2 %7phN", r, io_cap, a1, a2);
- tfm = chan->data;
+ memcpy(m, a2, 7);
+ memcpy(m + 7, a1, 7);
+ memcpy(m + 14, io_cap, 3);
+ memcpy(m + 17, r, 16);
+ memcpy(m + 33, n2, 16);
+ memcpy(m + 49, n1, 16);
- BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
+ err = aes_cmac(tfm_cmac, w, m, sizeof(m), res);
+ if (err)
+ return err;
- err = smp_ah(tfm, irk, &bdaddr->b[3], hash);
+ BT_DBG("res %16phN", res);
+
+ return err;
+}
+
+static int smp_g2(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
+ const u8 x[16], const u8 y[16], u32 *val)
+{
+ u8 m[80], tmp[16];
+ int err;
+
+ SMP_DBG("u %32phN", u);
+ SMP_DBG("v %32phN", v);
+ SMP_DBG("x %16phN y %16phN", x, y);
+
+ memcpy(m, y, 16);
+ memcpy(m + 16, v, 32);
+ memcpy(m + 48, u, 32);
+
+ err = aes_cmac(tfm_cmac, x, m, sizeof(m), tmp);
if (err)
- return false;
+ return err;
- return !memcmp(bdaddr->b, hash, 3);
+ *val = get_unaligned_le32(tmp);
+ *val %= 1000000;
+
+ SMP_DBG("val %06u", *val);
+
+ return 0;
}
-int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa)
+static int smp_h6(struct crypto_hash *tfm_cmac, const u8 w[16],
+ const u8 key_id[4], u8 res[16])
{
- struct l2cap_chan *chan = hdev->smp_data;
- struct crypto_blkcipher *tfm;
int err;
- if (!chan || !chan->data)
- return -EOPNOTSUPP;
+ SMP_DBG("w %16phN key_id %4phN", w, key_id);
- tfm = chan->data;
+ err = aes_cmac(tfm_cmac, w, key_id, 4, res);
+ if (err)
+ return err;
- get_random_bytes(&rpa->b[3], 3);
+ SMP_DBG("res %16phN", res);
- rpa->b[5] &= 0x3f; /* Clear two most significant bits */
- rpa->b[5] |= 0x40; /* Set second most significant bit */
+ return err;
+}
- err = smp_ah(tfm, irk, &rpa->b[3], rpa->b);
- if (err < 0)
+/* The following functions map to the legacy SMP crypto functions e, c1,
+ * s1 and ah.
+ */
+
+static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
+{
+ struct blkcipher_desc desc;
+ struct scatterlist sg;
+ uint8_t tmp[16], data[16];
+ int err;
+
+ if (!tfm) {
+ BT_ERR("tfm %p", tfm);
+ return -EINVAL;
+ }
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ /* The most significant octet of key corresponds to k[0] */
+ swap_buf(k, tmp, 16);
+
+ err = crypto_blkcipher_setkey(tfm, tmp, 16);
+ if (err) {
+ BT_ERR("cipher setkey failed: %d", err);
return err;
+ }
- BT_DBG("RPA %pMR", rpa);
+ /* Most significant octet of plaintextData corresponds to data[0] */
+ swap_buf(r, data, 16);
- return 0;
+ sg_init_one(&sg, data, 16);
+
+ err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
+ if (err)
+ BT_ERR("Encrypt data error %d", err);
+
+ /* Most significant octet of encryptedData corresponds to data[0] */
+ swap_buf(data, r, 16);
+
+ return err;
}
-static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7],
- u8 pres[7], u8 _iat, bdaddr_t *ia, u8 _rat, bdaddr_t *ra,
- u8 res[16])
+static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
+ const u8 r[16], const u8 preq[7], const u8 pres[7], u8 _iat,
+ const bdaddr_t *ia, u8 _rat, const bdaddr_t *ra, u8 res[16])
{
- struct hci_dev *hdev = smp->conn->hcon->hdev;
u8 p1[16], p2[16];
int err;
- BT_DBG("%s", hdev->name);
-
memset(p1, 0, 16);
/* p1 = pres || preq || _rat || _iat */
@@ -218,7 +412,7 @@ static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7],
u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
/* res = e(k, res) */
- err = smp_e(smp->tfm_aes, k, res);
+ err = smp_e(tfm_aes, k, res);
if (err) {
BT_ERR("Encrypt data error");
return err;
@@ -228,32 +422,103 @@ static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7],
u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
/* res = e(k, res) */
- err = smp_e(smp->tfm_aes, k, res);
+ err = smp_e(tfm_aes, k, res);
if (err)
BT_ERR("Encrypt data error");
return err;
}
-static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16],
- u8 _r[16])
+static int smp_s1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
+ const u8 r1[16], const u8 r2[16], u8 _r[16])
{
- struct hci_dev *hdev = smp->conn->hcon->hdev;
int err;
- BT_DBG("%s", hdev->name);
-
/* Just least significant octets from r1 and r2 are considered */
memcpy(_r, r2, 8);
memcpy(_r + 8, r1, 8);
- err = smp_e(smp->tfm_aes, k, _r);
+ err = smp_e(tfm_aes, k, _r);
if (err)
BT_ERR("Encrypt data error");
return err;
}
+static int smp_ah(struct crypto_blkcipher *tfm, const u8 irk[16],
+ const u8 r[3], u8 res[3])
+{
+ u8 _res[16];
+ int err;
+
+ /* r' = padding || r */
+ memcpy(_res, r, 3);
+ memset(_res + 3, 0, 13);
+
+ err = smp_e(tfm, irk, _res);
+ if (err) {
+ BT_ERR("Encrypt error");
+ return err;
+ }
+
+ /* The output of the random address function ah is:
+ * ah(h, r) = e(k, r') mod 2^24
+ * The output of the security function e is then truncated to 24 bits
+ * by taking the least significant 24 bits of the output of e as the
+ * result of ah.
+ */
+ memcpy(res, _res, 3);
+
+ return 0;
+}
+
+bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
+ const bdaddr_t *bdaddr)
+{
+ struct l2cap_chan *chan = hdev->smp_data;
+ struct crypto_blkcipher *tfm;
+ u8 hash[3];
+ int err;
+
+ if (!chan || !chan->data)
+ return false;
+
+ tfm = chan->data;
+
+ BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
+
+ err = smp_ah(tfm, irk, &bdaddr->b[3], hash);
+ if (err)
+ return false;
+
+ return !memcmp(bdaddr->b, hash, 3);
+}
+
+int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
+{
+ struct l2cap_chan *chan = hdev->smp_data;
+ struct crypto_blkcipher *tfm;
+ int err;
+
+ if (!chan || !chan->data)
+ return -EOPNOTSUPP;
+
+ tfm = chan->data;
+
+ get_random_bytes(&rpa->b[3], 3);
+
+ rpa->b[5] &= 0x3f; /* Clear two most significant bits */
+ rpa->b[5] |= 0x40; /* Set second most significant bit */
+
+ err = smp_ah(tfm, irk, &rpa->b[3], rpa->b);
+ if (err < 0)
+ return err;
+
+ BT_DBG("RPA %pMR", rpa);
+
+ return 0;
+}
+
static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
{
struct l2cap_chan *chan = conn->smp;
@@ -274,8 +539,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
memset(&msg, 0, sizeof(msg));
- msg.msg_iov = (struct iovec *) &iv;
- msg.msg_iovlen = 2;
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iv, 2, 1 + len);
l2cap_chan_send(chan, &msg, 1 + len);
@@ -288,17 +552,22 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
schedule_delayed_work(&smp->security_timer, SMP_TIMEOUT);
}
-static __u8 authreq_to_seclevel(__u8 authreq)
+static u8 authreq_to_seclevel(u8 authreq)
{
- if (authreq & SMP_AUTH_MITM)
- return BT_SECURITY_HIGH;
- else
+ if (authreq & SMP_AUTH_MITM) {
+ if (authreq & SMP_AUTH_SC)
+ return BT_SECURITY_FIPS;
+ else
+ return BT_SECURITY_HIGH;
+ } else {
return BT_SECURITY_MEDIUM;
+ }
}
static __u8 seclevel_to_authreq(__u8 sec_level)
{
switch (sec_level) {
+ case BT_SECURITY_FIPS:
case BT_SECURITY_HIGH:
return SMP_AUTH_MITM | SMP_AUTH_BONDING;
case BT_SECURITY_MEDIUM:
@@ -316,7 +585,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
struct smp_chan *smp = chan->data;
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
- u8 local_dist = 0, remote_dist = 0;
+ u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT;
if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) {
local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
@@ -332,24 +601,52 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
local_dist |= SMP_DIST_ID_KEY;
+ if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+ (authreq & SMP_AUTH_SC)) {
+ struct oob_data *oob_data;
+ u8 bdaddr_type;
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ local_dist |= SMP_DIST_LINK_KEY;
+ remote_dist |= SMP_DIST_LINK_KEY;
+ }
+
+ if (hcon->dst_type == ADDR_LE_DEV_PUBLIC)
+ bdaddr_type = BDADDR_LE_PUBLIC;
+ else
+ bdaddr_type = BDADDR_LE_RANDOM;
+
+ oob_data = hci_find_remote_oob_data(hdev, &hcon->dst,
+ bdaddr_type);
+ if (oob_data) {
+ set_bit(SMP_FLAG_OOB, &smp->flags);
+ oob_flag = SMP_OOB_PRESENT;
+ memcpy(smp->rr, oob_data->rand256, 16);
+ memcpy(smp->pcnf, oob_data->hash256, 16);
+ }
+
+ } else {
+ authreq &= ~SMP_AUTH_SC;
+ }
+
if (rsp == NULL) {
req->io_capability = conn->hcon->io_capability;
- req->oob_flag = SMP_OOB_NOT_PRESENT;
+ req->oob_flag = oob_flag;
req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
req->init_key_dist = local_dist;
req->resp_key_dist = remote_dist;
- req->auth_req = (authreq & AUTH_REQ_MASK);
+ req->auth_req = (authreq & AUTH_REQ_MASK(hdev));
smp->remote_key_dist = remote_dist;
return;
}
rsp->io_capability = conn->hcon->io_capability;
- rsp->oob_flag = SMP_OOB_NOT_PRESENT;
+ rsp->oob_flag = oob_flag;
rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
rsp->init_key_dist = req->init_key_dist & remote_dist;
rsp->resp_key_dist = req->resp_key_dist & local_dist;
- rsp->auth_req = (authreq & AUTH_REQ_MASK);
+ rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev));
smp->remote_key_dist = rsp->init_key_dist;
}
@@ -372,6 +669,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
{
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
+ struct hci_conn *hcon = conn->hcon;
bool complete;
BUG_ON(!smp);
@@ -379,34 +677,46 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
cancel_delayed_work_sync(&smp->security_timer);
complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
- mgmt_smp_complete(conn->hcon, complete);
+ mgmt_smp_complete(hcon, complete);
kfree(smp->csrk);
kfree(smp->slave_csrk);
+ kfree(smp->link_key);
crypto_free_blkcipher(smp->tfm_aes);
+ crypto_free_hash(smp->tfm_cmac);
+
+ /* Ensure that we don't leave any debug key around if debug key
+ * support hasn't been explicitly enabled.
+ */
+ if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG &&
+ !test_bit(HCI_KEEP_DEBUG_KEYS, &hcon->hdev->dev_flags)) {
+ list_del_rcu(&smp->ltk->list);
+ kfree_rcu(smp->ltk, rcu);
+ smp->ltk = NULL;
+ }
/* If pairing failed clean up any keys we might have */
if (!complete) {
if (smp->ltk) {
- list_del(&smp->ltk->list);
- kfree(smp->ltk);
+ list_del_rcu(&smp->ltk->list);
+ kfree_rcu(smp->ltk, rcu);
}
if (smp->slave_ltk) {
- list_del(&smp->slave_ltk->list);
- kfree(smp->slave_ltk);
+ list_del_rcu(&smp->slave_ltk->list);
+ kfree_rcu(smp->slave_ltk, rcu);
}
if (smp->remote_irk) {
- list_del(&smp->remote_irk->list);
- kfree(smp->remote_irk);
+ list_del_rcu(&smp->remote_irk->list);
+ kfree_rcu(smp->remote_irk, rcu);
}
}
chan->data = NULL;
kfree(smp);
- hci_conn_drop(conn->hcon);
+ hci_conn_drop(hcon);
}
static void smp_failure(struct l2cap_conn *conn, u8 reason)
@@ -430,6 +740,7 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason)
#define REQ_PASSKEY 0x02
#define CFM_PASSKEY 0x03
#define REQ_OOB 0x04
+#define DSP_PASSKEY 0x05
#define OVERLAP 0xFF
static const u8 gen_method[5][5] = {
@@ -440,6 +751,14 @@ static const u8 gen_method[5][5] = {
{ CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
};
+static const u8 sc_method[5][5] = {
+ { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+ { JUST_WORKS, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY },
+ { DSP_PASSKEY, DSP_PASSKEY, REQ_PASSKEY, JUST_WORKS, DSP_PASSKEY },
+ { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM },
+ { DSP_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY },
+};
+
static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
{
/* If either side has unknown io_caps, use JUST_CFM (which gets
@@ -449,6 +768,9 @@ static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
remote_io > SMP_IO_KEYBOARD_DISPLAY)
return JUST_CFM;
+ if (test_bit(SMP_FLAG_SC, &smp->flags))
+ return sc_method[remote_io][local_io];
+
return gen_method[remote_io][local_io];
}
@@ -458,7 +780,6 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
struct hci_conn *hcon = conn->hcon;
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
- u8 method;
u32 passkey = 0;
int ret = 0;
@@ -475,26 +796,28 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
* table.
*/
if (!(auth & SMP_AUTH_MITM))
- method = JUST_CFM;
+ smp->method = JUST_CFM;
else
- method = get_auth_method(smp, local_io, remote_io);
+ smp->method = get_auth_method(smp, local_io, remote_io);
/* Don't confirm locally initiated pairing attempts */
- if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
- method = JUST_WORKS;
+ if (smp->method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR,
+ &smp->flags))
+ smp->method = JUST_WORKS;
/* Don't bother user space with no IO capabilities */
- if (method == JUST_CFM && hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
- method = JUST_WORKS;
+ if (smp->method == JUST_CFM &&
+ hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
+ smp->method = JUST_WORKS;
/* If Just Works, Continue with Zero TK */
- if (method == JUST_WORKS) {
+ if (smp->method == JUST_WORKS) {
set_bit(SMP_FLAG_TK_VALID, &smp->flags);
return 0;
}
/* Not Just Works/Confirm results in MITM Authentication */
- if (method != JUST_CFM) {
+ if (smp->method != JUST_CFM) {
set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);
if (hcon->pending_sec_level < BT_SECURITY_HIGH)
hcon->pending_sec_level = BT_SECURITY_HIGH;
@@ -503,15 +826,15 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
/* If both devices have Keyoard-Display I/O, the master
* Confirms and the slave Enters the passkey.
*/
- if (method == OVERLAP) {
+ if (smp->method == OVERLAP) {
if (hcon->role == HCI_ROLE_MASTER)
- method = CFM_PASSKEY;
+ smp->method = CFM_PASSKEY;
else
- method = REQ_PASSKEY;
+ smp->method = REQ_PASSKEY;
}
/* Generate random passkey. */
- if (method == CFM_PASSKEY) {
+ if (smp->method == CFM_PASSKEY) {
memset(smp->tk, 0, sizeof(smp->tk));
get_random_bytes(&passkey, sizeof(passkey));
passkey %= 1000000;
@@ -520,12 +843,10 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
set_bit(SMP_FLAG_TK_VALID, &smp->flags);
}
- hci_dev_lock(hcon->hdev);
-
- if (method == REQ_PASSKEY)
+ if (smp->method == REQ_PASSKEY)
ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst,
hcon->type, hcon->dst_type);
- else if (method == JUST_CFM)
+ else if (smp->method == JUST_CFM)
ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst,
hcon->type, hcon->dst_type,
passkey, 1);
@@ -534,8 +855,6 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
hcon->type, hcon->dst_type,
passkey, 0);
- hci_dev_unlock(hcon->hdev);
-
return ret;
}
@@ -547,7 +866,7 @@ static u8 smp_confirm(struct smp_chan *smp)
BT_DBG("conn %p", conn);
- ret = smp_c1(smp, smp->tk, smp->prnd, smp->preq, smp->prsp,
+ ret = smp_c1(smp->tfm_aes, smp->tk, smp->prnd, smp->preq, smp->prsp,
conn->hcon->init_addr_type, &conn->hcon->init_addr,
conn->hcon->resp_addr_type, &conn->hcon->resp_addr,
cp.confirm_val);
@@ -578,7 +897,7 @@ static u8 smp_random(struct smp_chan *smp)
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
- ret = smp_c1(smp, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+ ret = smp_c1(smp->tfm_aes, smp->tk, smp->rrnd, smp->preq, smp->prsp,
hcon->init_addr_type, &hcon->init_addr,
hcon->resp_addr_type, &hcon->resp_addr, confirm);
if (ret)
@@ -594,7 +913,7 @@ static u8 smp_random(struct smp_chan *smp)
__le64 rand = 0;
__le16 ediv = 0;
- smp_s1(smp, smp->tk, smp->rrnd, smp->prnd, stk);
+ smp_s1(smp->tfm_aes, smp->tk, smp->rrnd, smp->prnd, stk);
memset(stk + smp->enc_key_size, 0,
SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -613,7 +932,7 @@ static u8 smp_random(struct smp_chan *smp)
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
- smp_s1(smp, smp->tk, smp->prnd, smp->rrnd, stk);
+ smp_s1(smp->tfm_aes, smp->tk, smp->prnd, smp->rrnd, stk);
memset(stk + smp->enc_key_size, 0,
SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -648,11 +967,13 @@ static void smp_notify_keys(struct l2cap_conn *conn)
mgmt_new_irk(hdev, smp->remote_irk);
/* Now that user space can be considered to know the
* identity address track the connection based on it
- * from now on.
+ * from now on (assuming this is an LE link).
*/
- bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
- hcon->dst_type = smp->remote_irk->addr_type;
- queue_work(hdev->workqueue, &conn->id_addr_update_work);
+ if (hcon->type == LE_LINK) {
+ bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
+ hcon->dst_type = smp->remote_irk->addr_type;
+ queue_work(hdev->workqueue, &conn->id_addr_update_work);
+ }
/* When receiving an indentity resolving key for
* a remote device that does not use a resolvable
@@ -665,16 +986,26 @@ static void smp_notify_keys(struct l2cap_conn *conn)
* just remove it.
*/
if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
- list_del(&smp->remote_irk->list);
- kfree(smp->remote_irk);
+ list_del_rcu(&smp->remote_irk->list);
+ kfree_rcu(smp->remote_irk, rcu);
smp->remote_irk = NULL;
}
}
- /* The LTKs and CSRKs should be persistent only if both sides
- * had the bonding bit set in their authentication requests.
- */
- persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING);
+ if (hcon->type == ACL_LINK) {
+ if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
+ persistent = false;
+ else
+ persistent = !test_bit(HCI_CONN_FLUSH_KEY,
+ &hcon->flags);
+ } else {
+ /* The LTKs and CSRKs should be persistent only if both sides
+ * had the bonding bit set in their authentication requests.
+ */
+ persistent = !!((req->auth_req & rsp->auth_req) &
+ SMP_AUTH_BONDING);
+ }
+
if (smp->csrk) {
smp->csrk->bdaddr_type = hcon->dst_type;
@@ -699,6 +1030,81 @@ static void smp_notify_keys(struct l2cap_conn *conn)
bacpy(&smp->slave_ltk->bdaddr, &hcon->dst);
mgmt_new_ltk(hdev, smp->slave_ltk, persistent);
}
+
+ if (smp->link_key) {
+ struct link_key *key;
+ u8 type;
+
+ if (test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags))
+ type = HCI_LK_DEBUG_COMBINATION;
+ else if (hcon->sec_level == BT_SECURITY_FIPS)
+ type = HCI_LK_AUTH_COMBINATION_P256;
+ else
+ type = HCI_LK_UNAUTH_COMBINATION_P256;
+
+ key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
+ smp->link_key, type, 0, &persistent);
+ if (key) {
+ mgmt_new_link_key(hdev, key, persistent);
+
+ /* Don't keep debug keys around if the relevant
+ * flag is not set.
+ */
+ if (!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags) &&
+ key->type == HCI_LK_DEBUG_COMBINATION) {
+ list_del_rcu(&key->list);
+ kfree_rcu(key, rcu);
+ }
+ }
+ }
+}
+
+static void sc_add_ltk(struct smp_chan *smp)
+{
+ struct hci_conn *hcon = smp->conn->hcon;
+ u8 key_type, auth;
+
+ if (test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags))
+ key_type = SMP_LTK_P256_DEBUG;
+ else
+ key_type = SMP_LTK_P256;
+
+ if (hcon->pending_sec_level == BT_SECURITY_FIPS)
+ auth = 1;
+ else
+ auth = 0;
+
+ memset(smp->tk + smp->enc_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
+
+ smp->ltk = hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
+ key_type, auth, smp->tk, smp->enc_key_size,
+ 0, 0);
+}
+
+static void sc_generate_link_key(struct smp_chan *smp)
+{
+ /* These constants are as specified in the core specification.
+ * In ASCII they spell out to 'tmp1' and 'lebr'.
+ */
+ const u8 tmp1[4] = { 0x31, 0x70, 0x6d, 0x74 };
+ const u8 lebr[4] = { 0x72, 0x62, 0x65, 0x6c };
+
+ smp->link_key = kzalloc(16, GFP_KERNEL);
+ if (!smp->link_key)
+ return;
+
+ if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) {
+ kfree(smp->link_key);
+ smp->link_key = NULL;
+ return;
+ }
+
+ if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) {
+ kfree(smp->link_key);
+ smp->link_key = NULL;
+ return;
+ }
}
static void smp_allow_key_dist(struct smp_chan *smp)
@@ -715,6 +1121,35 @@ static void smp_allow_key_dist(struct smp_chan *smp)
SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO);
}
+static void sc_generate_ltk(struct smp_chan *smp)
+{
+ /* These constants are as specified in the core specification.
+ * In ASCII they spell out to 'tmp2' and 'brle'.
+ */
+ const u8 tmp2[4] = { 0x32, 0x70, 0x6d, 0x74 };
+ const u8 brle[4] = { 0x65, 0x6c, 0x72, 0x62 };
+ struct hci_conn *hcon = smp->conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ struct link_key *key;
+
+ key = hci_find_link_key(hdev, &hcon->dst);
+ if (!key) {
+ BT_ERR("%s No Link Key found to generate LTK", hdev->name);
+ return;
+ }
+
+ if (key->type == HCI_LK_DEBUG_COMBINATION)
+ set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
+
+ if (smp_h6(smp->tfm_cmac, key->val, tmp2, smp->tk))
+ return;
+
+ if (smp_h6(smp->tfm_cmac, smp->tk, brle, smp->tk))
+ return;
+
+ sc_add_ltk(smp);
+}
+
static void smp_distribute_keys(struct smp_chan *smp)
{
struct smp_cmd_pairing *req, *rsp;
@@ -743,6 +1178,16 @@ static void smp_distribute_keys(struct smp_chan *smp)
*keydist &= req->resp_key_dist;
}
+ if (test_bit(SMP_FLAG_SC, &smp->flags)) {
+ if (hcon->type == LE_LINK && (*keydist & SMP_DIST_LINK_KEY))
+ sc_generate_link_key(smp);
+ if (hcon->type == ACL_LINK && (*keydist & SMP_DIST_ENC_KEY))
+ sc_generate_ltk(smp);
+
+ /* Clear the keys which are generated but not distributed */
+ *keydist &= ~SMP_SC_NO_DIST;
+ }
+
BT_DBG("keydist 0x%x", *keydist);
if (*keydist & SMP_DIST_ENC_KEY) {
@@ -854,6 +1299,14 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
return NULL;
}
+ smp->tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(smp->tfm_cmac)) {
+ BT_ERR("Unable to create CMAC crypto context");
+ crypto_free_blkcipher(smp->tfm_aes);
+ kfree(smp);
+ return NULL;
+ }
+
smp->conn = conn;
chan->data = smp;
@@ -866,6 +1319,213 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
return smp;
}
+static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16])
+{
+ struct hci_conn *hcon = smp->conn->hcon;
+ u8 *na, *nb, a[7], b[7];
+
+ if (hcon->out) {
+ na = smp->prnd;
+ nb = smp->rrnd;
+ } else {
+ na = smp->rrnd;
+ nb = smp->prnd;
+ }
+
+ memcpy(a, &hcon->init_addr, 6);
+ memcpy(b, &hcon->resp_addr, 6);
+ a[6] = hcon->init_addr_type;
+ b[6] = hcon->resp_addr_type;
+
+ return smp_f5(smp->tfm_cmac, smp->dhkey, na, nb, a, b, mackey, ltk);
+}
+
+static void sc_dhkey_check(struct smp_chan *smp)
+{
+ struct hci_conn *hcon = smp->conn->hcon;
+ struct smp_cmd_dhkey_check check;
+ u8 a[7], b[7], *local_addr, *remote_addr;
+ u8 io_cap[3], r[16];
+
+ memcpy(a, &hcon->init_addr, 6);
+ memcpy(b, &hcon->resp_addr, 6);
+ a[6] = hcon->init_addr_type;
+ b[6] = hcon->resp_addr_type;
+
+ if (hcon->out) {
+ local_addr = a;
+ remote_addr = b;
+ memcpy(io_cap, &smp->preq[1], 3);
+ } else {
+ local_addr = b;
+ remote_addr = a;
+ memcpy(io_cap, &smp->prsp[1], 3);
+ }
+
+ memset(r, 0, sizeof(r));
+
+ if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
+ put_unaligned_le32(hcon->passkey_notify, r);
+
+ if (smp->method == REQ_OOB)
+ memcpy(r, smp->rr, 16);
+
+ smp_f6(smp->tfm_cmac, smp->mackey, smp->prnd, smp->rrnd, r, io_cap,
+ local_addr, remote_addr, check.e);
+
+ smp_send_cmd(smp->conn, SMP_CMD_DHKEY_CHECK, sizeof(check), &check);
+}
+
+static u8 sc_passkey_send_confirm(struct smp_chan *smp)
+{
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct smp_cmd_pairing_confirm cfm;
+ u8 r;
+
+ r = ((hcon->passkey_notify >> smp->passkey_round) & 0x01);
+ r |= 0x80;
+
+ get_random_bytes(smp->prnd, sizeof(smp->prnd));
+
+ if (smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, r,
+ cfm.confirm_val))
+ return SMP_UNSPECIFIED;
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cfm), &cfm);
+
+ return 0;
+}
+
+static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
+{
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ u8 cfm[16], r;
+
+ /* Ignore the PDU if we've already done 20 rounds (0 - 19) */
+ if (smp->passkey_round >= 20)
+ return 0;
+
+ switch (smp_op) {
+ case SMP_CMD_PAIRING_RANDOM:
+ r = ((hcon->passkey_notify >> smp->passkey_round) & 0x01);
+ r |= 0x80;
+
+ if (smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk,
+ smp->rrnd, r, cfm))
+ return SMP_UNSPECIFIED;
+
+ if (memcmp(smp->pcnf, cfm, 16))
+ return SMP_CONFIRM_FAILED;
+
+ smp->passkey_round++;
+
+ if (smp->passkey_round == 20) {
+ /* Generate MacKey and LTK */
+ if (sc_mackey_and_ltk(smp, smp->mackey, smp->tk))
+ return SMP_UNSPECIFIED;
+ }
+
+ /* The round is only complete when the initiator
+ * receives pairing random.
+ */
+ if (!hcon->out) {
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ sizeof(smp->prnd), smp->prnd);
+ if (smp->passkey_round == 20)
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ else
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+ return 0;
+ }
+
+ /* Start the next round */
+ if (smp->passkey_round != 20)
+ return sc_passkey_round(smp, 0);
+
+ /* Passkey rounds are complete - start DHKey Check */
+ sc_dhkey_check(smp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+
+ break;
+
+ case SMP_CMD_PAIRING_CONFIRM:
+ if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) {
+ set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
+ return 0;
+ }
+
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+
+ if (hcon->out) {
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ sizeof(smp->prnd), smp->prnd);
+ return 0;
+ }
+
+ return sc_passkey_send_confirm(smp);
+
+ case SMP_CMD_PUBLIC_KEY:
+ default:
+ /* Initiating device starts the round */
+ if (!hcon->out)
+ return 0;
+
+ BT_DBG("%s Starting passkey round %u", hdev->name,
+ smp->passkey_round + 1);
+
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+
+ return sc_passkey_send_confirm(smp);
+ }
+
+ return 0;
+}
+
+static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey)
+{
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ u8 smp_op;
+
+ clear_bit(SMP_FLAG_WAIT_USER, &smp->flags);
+
+ switch (mgmt_op) {
+ case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+ smp_failure(smp->conn, SMP_PASSKEY_ENTRY_FAILED);
+ return 0;
+ case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+ smp_failure(smp->conn, SMP_NUMERIC_COMP_FAILED);
+ return 0;
+ case MGMT_OP_USER_PASSKEY_REPLY:
+ hcon->passkey_notify = le32_to_cpu(passkey);
+ smp->passkey_round = 0;
+
+ if (test_and_clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags))
+ smp_op = SMP_CMD_PAIRING_CONFIRM;
+ else
+ smp_op = 0;
+
+ if (sc_passkey_round(smp, smp_op))
+ return -EIO;
+
+ return 0;
+ }
+
+ /* Initiator sends DHKey check first */
+ if (hcon->out) {
+ sc_dhkey_check(smp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ } else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) {
+ sc_dhkey_check(smp);
+ sc_add_ltk(smp);
+ }
+
+ return 0;
+}
+
int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
{
struct l2cap_conn *conn = hcon->l2cap_data;
@@ -891,6 +1551,11 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
smp = chan->data;
+ if (test_bit(SMP_FLAG_SC, &smp->flags)) {
+ err = sc_user_reply(smp, mgmt_op, passkey);
+ goto unlock;
+ }
+
switch (mgmt_op) {
case MGMT_OP_USER_PASSKEY_REPLY:
value = le32_to_cpu(passkey);
@@ -926,6 +1591,46 @@ unlock:
return err;
}
+static void build_bredr_pairing_cmd(struct smp_chan *smp,
+ struct smp_cmd_pairing *req,
+ struct smp_cmd_pairing *rsp)
+{
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ u8 local_dist = 0, remote_dist = 0;
+
+ if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) {
+ local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
+ remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
+ }
+
+ if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
+ remote_dist |= SMP_DIST_ID_KEY;
+
+ if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+ local_dist |= SMP_DIST_ID_KEY;
+
+ if (!rsp) {
+ memset(req, 0, sizeof(*req));
+
+ req->init_key_dist = local_dist;
+ req->resp_key_dist = remote_dist;
+ req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+
+ smp->remote_key_dist = remote_dist;
+
+ return;
+ }
+
+ memset(rsp, 0, sizeof(*rsp));
+
+ rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ rsp->init_key_dist = req->init_key_dist & remote_dist;
+ rsp->resp_key_dist = req->resp_key_dist & local_dist;
+
+ smp->remote_key_dist = rsp->init_key_dist;
+}
+
static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
@@ -952,16 +1657,50 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
return SMP_UNSPECIFIED;
/* We didn't start the pairing, so match remote */
- auth = req->auth_req & AUTH_REQ_MASK;
+ auth = req->auth_req & AUTH_REQ_MASK(hdev);
if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
(auth & SMP_AUTH_BONDING))
return SMP_PAIRING_NOTSUPP;
+ if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+ return SMP_AUTH_REQUIREMENTS;
+
smp->preq[0] = SMP_CMD_PAIRING_REQ;
memcpy(&smp->preq[1], req, sizeof(*req));
skb_pull(skb, sizeof(*req));
+ /* SMP over BR/EDR requires special treatment */
+ if (conn->hcon->type == ACL_LINK) {
+ /* We must have a BR/EDR SC link */
+ if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
+ !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+ return SMP_CROSS_TRANSP_NOT_ALLOWED;
+
+ set_bit(SMP_FLAG_SC, &smp->flags);
+
+ build_bredr_pairing_cmd(smp, req, &rsp);
+
+ key_size = min(req->max_key_size, rsp.max_key_size);
+ if (check_enc_key_size(conn, key_size))
+ return SMP_ENC_KEY_SIZE;
+
+ /* Clear bits which are generated but not distributed */
+ smp->remote_key_dist &= ~SMP_SC_NO_DIST;
+
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
+
+ smp_distribute_keys(smp);
+ return 0;
+ }
+
+ build_pairing_cmd(conn, req, &rsp, auth);
+
+ if (rsp.auth_req & SMP_AUTH_SC)
+ set_bit(SMP_FLAG_SC, &smp->flags);
+
if (conn->hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
sec_level = BT_SECURITY_MEDIUM;
else
@@ -970,7 +1709,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (sec_level > conn->hcon->pending_sec_level)
conn->hcon->pending_sec_level = sec_level;
- /* If we need MITM check that it can be acheived */
+ /* If we need MITM check that it can be achieved */
if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
u8 method;
@@ -980,8 +1719,6 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
return SMP_AUTH_REQUIREMENTS;
}
- build_pairing_cmd(conn, req, &rsp, auth);
-
key_size = min(req->max_key_size, rsp.max_key_size);
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
@@ -992,7 +1729,18 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+
+ clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
+
+ if (test_bit(SMP_FLAG_SC, &smp->flags)) {
+ SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY);
+ /* Clear bits which are generated but not distributed */
+ smp->remote_key_dist &= ~SMP_SC_NO_DIST;
+ /* Wait for Public Key from Initiating Device */
+ return 0;
+ } else {
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+ }
/* Request setup of TK */
ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
@@ -1002,11 +1750,46 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
+static u8 sc_send_public_key(struct smp_chan *smp)
+{
+ struct hci_dev *hdev = smp->conn->hcon->hdev;
+
+ BT_DBG("");
+
+ if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) {
+ BT_DBG("Using debug keys");
+ memcpy(smp->local_pk, debug_pk, 64);
+ memcpy(smp->local_sk, debug_sk, 32);
+ set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
+ } else {
+ while (true) {
+ /* Generate local key pair for Secure Connections */
+ if (!ecc_make_key(smp->local_pk, smp->local_sk))
+ return SMP_UNSPECIFIED;
+
+ /* This is unlikely, but we need to check that
+ * we didn't accidentially generate a debug key.
+ */
+ if (memcmp(smp->local_sk, debug_sk, 32))
+ break;
+ }
+ }
+
+ SMP_DBG("Local Public Key X: %32phN", smp->local_pk);
+ SMP_DBG("Local Public Key Y: %32phN", &smp->local_pk[32]);
+ SMP_DBG("Local Private Key: %32phN", smp->local_sk);
+
+ smp_send_cmd(smp->conn, SMP_CMD_PUBLIC_KEY, 64, smp->local_pk);
+
+ return 0;
+}
+
static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
+ struct hci_dev *hdev = conn->hcon->hdev;
u8 key_size, auth;
int ret;
@@ -1026,9 +1809,33 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
- auth = rsp->auth_req & AUTH_REQ_MASK;
+ auth = rsp->auth_req & AUTH_REQ_MASK(hdev);
+
+ if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+ return SMP_AUTH_REQUIREMENTS;
+
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
- /* If we need MITM check that it can be acheived */
+ /* Update remote key distribution in case the remote cleared
+ * some bits that we had enabled in our request.
+ */
+ smp->remote_key_dist &= rsp->resp_key_dist;
+
+ /* For BR/EDR this means we're done and can start phase 3 */
+ if (conn->hcon->type == ACL_LINK) {
+ /* Clear bits which are generated but not distributed */
+ smp->remote_key_dist &= ~SMP_SC_NO_DIST;
+ smp_distribute_keys(smp);
+ return 0;
+ }
+
+ if ((req->auth_req & SMP_AUTH_SC) && (auth & SMP_AUTH_SC))
+ set_bit(SMP_FLAG_SC, &smp->flags);
+ else if (conn->hcon->pending_sec_level > BT_SECURITY_HIGH)
+ conn->hcon->pending_sec_level = BT_SECURITY_HIGH;
+
+ /* If we need MITM check that it can be achieved */
if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
u8 method;
@@ -1040,14 +1847,18 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
get_random_bytes(smp->prnd, sizeof(smp->prnd));
- smp->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
-
/* Update remote key distribution in case the remote cleared
* some bits that we had enabled in our request.
*/
smp->remote_key_dist &= rsp->resp_key_dist;
+ if (test_bit(SMP_FLAG_SC, &smp->flags)) {
+ /* Clear bits which are generated but not distributed */
+ smp->remote_key_dist &= ~SMP_SC_NO_DIST;
+ SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY);
+ return sc_send_public_key(smp);
+ }
+
auth |= req->auth_req;
ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
@@ -1063,6 +1874,28 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
+static u8 sc_check_confirm(struct smp_chan *smp)
+{
+ struct l2cap_conn *conn = smp->conn;
+
+ BT_DBG("");
+
+ /* Public Key exchange must happen before any other steps */
+ if (!test_bit(SMP_FLAG_REMOTE_PK, &smp->flags))
+ return SMP_UNSPECIFIED;
+
+ if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
+ return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM);
+
+ if (conn->hcon->out) {
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+ smp->prnd);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+ }
+
+ return 0;
+}
+
static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct l2cap_chan *chan = conn->smp;
@@ -1076,6 +1909,9 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
skb_pull(skb, sizeof(smp->pcnf));
+ if (test_bit(SMP_FLAG_SC, &smp->flags))
+ return sc_check_confirm(smp);
+
if (conn->hcon->out) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
@@ -1095,6 +1931,10 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
+ struct hci_conn *hcon = conn->hcon;
+ u8 *pkax, *pkbx, *na, *nb;
+ u32 passkey;
+ int err;
BT_DBG("conn %p", conn);
@@ -1104,7 +1944,75 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd));
skb_pull(skb, sizeof(smp->rrnd));
- return smp_random(smp);
+ if (!test_bit(SMP_FLAG_SC, &smp->flags))
+ return smp_random(smp);
+
+ if (hcon->out) {
+ pkax = smp->local_pk;
+ pkbx = smp->remote_pk;
+ na = smp->prnd;
+ nb = smp->rrnd;
+ } else {
+ pkax = smp->remote_pk;
+ pkbx = smp->local_pk;
+ na = smp->rrnd;
+ nb = smp->prnd;
+ }
+
+ if (smp->method == REQ_OOB) {
+ if (!hcon->out)
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ sizeof(smp->prnd), smp->prnd);
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ goto mackey_and_ltk;
+ }
+
+ /* Passkey entry has special treatment */
+ if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
+ return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM);
+
+ if (hcon->out) {
+ u8 cfm[16];
+
+ err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk,
+ smp->rrnd, 0, cfm);
+ if (err)
+ return SMP_UNSPECIFIED;
+
+ if (memcmp(smp->pcnf, cfm, 16))
+ return SMP_CONFIRM_FAILED;
+ } else {
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+ smp->prnd);
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ }
+
+mackey_and_ltk:
+ /* Generate MacKey and LTK */
+ err = sc_mackey_and_ltk(smp, smp->mackey, smp->tk);
+ if (err)
+ return SMP_UNSPECIFIED;
+
+ if (smp->method == JUST_WORKS || smp->method == REQ_OOB) {
+ if (hcon->out) {
+ sc_dhkey_check(smp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ }
+ return 0;
+ }
+
+ err = smp_g2(smp->tfm_cmac, pkax, pkbx, na, nb, &passkey);
+ if (err)
+ return SMP_UNSPECIFIED;
+
+ err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type,
+ hcon->dst_type, passkey, 0);
+ if (err)
+ return SMP_UNSPECIFIED;
+
+ set_bit(SMP_FLAG_WAIT_USER, &smp->flags);
+
+ return 0;
}
static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
@@ -1112,8 +2020,7 @@ static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
struct smp_ltk *key;
struct hci_conn *hcon = conn->hcon;
- key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
- hcon->role);
+ key = hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role);
if (!key)
return false;
@@ -1132,20 +2039,21 @@ static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
return true;
}
-bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
+bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
+ enum smp_key_pref key_pref)
{
if (sec_level == BT_SECURITY_LOW)
return true;
- /* If we're encrypted with an STK always claim insufficient
- * security. This way we allow the connection to be re-encrypted
- * with an LTK, even if the LTK provides the same level of
- * security. Only exception is if we don't have an LTK (e.g.
- * because of key distribution bits).
+ /* If we're encrypted with an STK but the caller prefers using
+ * LTK claim insufficient security. This way we allow the
+ * connection to be re-encrypted with an LTK, even if the LTK
+ * provides the same level of security. Only exception is if we
+ * don't have an LTK (e.g. because of key distribution bits).
*/
- if (test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags) &&
- hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
- hcon->role))
+ if (key_pref == SMP_USE_LTK &&
+ test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags) &&
+ hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role))
return false;
if (hcon->sec_level >= sec_level)
@@ -1159,6 +2067,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_security_req *rp = (void *) skb->data;
struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
struct smp_chan *smp;
u8 sec_level, auth;
@@ -1170,14 +2079,17 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (hcon->role != HCI_ROLE_MASTER)
return SMP_CMD_NOTSUPP;
- auth = rp->auth_req & AUTH_REQ_MASK;
+ auth = rp->auth_req & AUTH_REQ_MASK(hdev);
+
+ if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
+ return SMP_AUTH_REQUIREMENTS;
if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
sec_level = BT_SECURITY_MEDIUM;
else
sec_level = authreq_to_seclevel(auth);
- if (smp_sufficient_security(hcon, sec_level))
+ if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
return 0;
if (sec_level > hcon->pending_sec_level)
@@ -1227,7 +2139,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
return 1;
- if (smp_sufficient_security(hcon, sec_level))
+ if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
return 1;
if (sec_level > hcon->pending_sec_level)
@@ -1253,6 +2165,9 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
authreq = seclevel_to_authreq(sec_level);
+ if (test_bit(HCI_SC_ENABLED, &hcon->hdev->dev_flags))
+ authreq |= SMP_AUTH_SC;
+
/* Require MITM if IO Capability allows or the security level
* requires it.
*/
@@ -1329,7 +2244,6 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
skb_pull(skb, sizeof(*rp));
- hci_dev_lock(hdev);
authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, SMP_LTK,
authenticated, smp->tk, smp->enc_key_size,
@@ -1337,7 +2251,6 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
smp->ltk = ltk;
if (!(smp->remote_key_dist & KEY_DIST_MASK))
smp_distribute_keys(smp);
- hci_dev_unlock(hdev);
return 0;
}
@@ -1384,8 +2297,6 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
skb_pull(skb, sizeof(*info));
- hci_dev_lock(hcon->hdev);
-
/* Strictly speaking the Core Specification (4.1) allows sending
* an empty address which would force us to rely on just the IRK
* as "identity information". However, since such
@@ -1413,8 +2324,6 @@ distribute:
if (!(smp->remote_key_dist & KEY_DIST_MASK))
smp_distribute_keys(smp);
- hci_dev_unlock(hcon->hdev);
-
return 0;
}
@@ -1423,7 +2332,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_sign_info *rp = (void *) skb->data;
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
- struct hci_dev *hdev = conn->hcon->hdev;
struct smp_csrk *csrk;
BT_DBG("conn %p", conn);
@@ -1436,7 +2344,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
skb_pull(skb, sizeof(*rp));
- hci_dev_lock(hdev);
csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
if (csrk) {
csrk->master = 0x01;
@@ -1444,7 +2351,234 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
}
smp->csrk = csrk;
smp_distribute_keys(smp);
- hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static u8 sc_select_method(struct smp_chan *smp)
+{
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct smp_cmd_pairing *local, *remote;
+ u8 local_mitm, remote_mitm, local_io, remote_io, method;
+
+ if (test_bit(SMP_FLAG_OOB, &smp->flags))
+ return REQ_OOB;
+
+ /* The preq/prsp contain the raw Pairing Request/Response PDUs
+ * which are needed as inputs to some crypto functions. To get
+ * the "struct smp_cmd_pairing" from them we need to skip the
+ * first byte which contains the opcode.
+ */
+ if (hcon->out) {
+ local = (void *) &smp->preq[1];
+ remote = (void *) &smp->prsp[1];
+ } else {
+ local = (void *) &smp->prsp[1];
+ remote = (void *) &smp->preq[1];
+ }
+
+ local_io = local->io_capability;
+ remote_io = remote->io_capability;
+
+ local_mitm = (local->auth_req & SMP_AUTH_MITM);
+ remote_mitm = (remote->auth_req & SMP_AUTH_MITM);
+
+ /* If either side wants MITM, look up the method from the table,
+ * otherwise use JUST WORKS.
+ */
+ if (local_mitm || remote_mitm)
+ method = get_auth_method(smp, local_io, remote_io);
+ else
+ method = JUST_WORKS;
+
+ /* Don't confirm locally initiated pairing attempts */
+ if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ method = JUST_WORKS;
+
+ return method;
+}
+
+static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_public_key *key = (void *) skb->data;
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
+ struct hci_dev *hdev = hcon->hdev;
+ struct smp_cmd_pairing_confirm cfm;
+ int err;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(*key))
+ return SMP_INVALID_PARAMS;
+
+ memcpy(smp->remote_pk, key, 64);
+
+ /* Non-initiating device sends its public key after receiving
+ * the key from the initiating device.
+ */
+ if (!hcon->out) {
+ err = sc_send_public_key(smp);
+ if (err)
+ return err;
+ }
+
+ SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk);
+ SMP_DBG("Remote Public Key Y: %32phN", &smp->remote_pk[32]);
+
+ if (!ecdh_shared_secret(smp->remote_pk, smp->local_sk, smp->dhkey))
+ return SMP_UNSPECIFIED;
+
+ SMP_DBG("DHKey %32phN", smp->dhkey);
+
+ set_bit(SMP_FLAG_REMOTE_PK, &smp->flags);
+
+ smp->method = sc_select_method(smp);
+
+ BT_DBG("%s selected method 0x%02x", hdev->name, smp->method);
+
+ /* JUST_WORKS and JUST_CFM result in an unauthenticated key */
+ if (smp->method == JUST_WORKS || smp->method == JUST_CFM)
+ hcon->pending_sec_level = BT_SECURITY_MEDIUM;
+ else
+ hcon->pending_sec_level = BT_SECURITY_FIPS;
+
+ if (!memcmp(debug_pk, smp->remote_pk, 64))
+ set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
+
+ if (smp->method == DSP_PASSKEY) {
+ get_random_bytes(&hcon->passkey_notify,
+ sizeof(hcon->passkey_notify));
+ hcon->passkey_notify %= 1000000;
+ hcon->passkey_entered = 0;
+ smp->passkey_round = 0;
+ if (mgmt_user_passkey_notify(hdev, &hcon->dst, hcon->type,
+ hcon->dst_type,
+ hcon->passkey_notify,
+ hcon->passkey_entered))
+ return SMP_UNSPECIFIED;
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+ return sc_passkey_round(smp, SMP_CMD_PUBLIC_KEY);
+ }
+
+ if (smp->method == REQ_OOB) {
+ err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk,
+ smp->rr, 0, cfm.confirm_val);
+ if (err)
+ return SMP_UNSPECIFIED;
+
+ if (memcmp(cfm.confirm_val, smp->pcnf, 16))
+ return SMP_CONFIRM_FAILED;
+
+ if (hcon->out)
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ sizeof(smp->prnd), smp->prnd);
+
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+
+ return 0;
+ }
+
+ if (hcon->out)
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+
+ if (smp->method == REQ_PASSKEY) {
+ if (mgmt_user_passkey_request(hdev, &hcon->dst, hcon->type,
+ hcon->dst_type))
+ return SMP_UNSPECIFIED;
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+ set_bit(SMP_FLAG_WAIT_USER, &smp->flags);
+ return 0;
+ }
+
+ /* The Initiating device waits for the non-initiating device to
+ * send the confirm value.
+ */
+ if (conn->hcon->out)
+ return 0;
+
+ err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd,
+ 0, cfm.confirm_val);
+ if (err)
+ return SMP_UNSPECIFIED;
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cfm), &cfm);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+
+ return 0;
+}
+
+static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_dhkey_check *check = (void *) skb->data;
+ struct l2cap_chan *chan = conn->smp;
+ struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp = chan->data;
+ u8 a[7], b[7], *local_addr, *remote_addr;
+ u8 io_cap[3], r[16], e[16];
+ int err;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(*check))
+ return SMP_INVALID_PARAMS;
+
+ memcpy(a, &hcon->init_addr, 6);
+ memcpy(b, &hcon->resp_addr, 6);
+ a[6] = hcon->init_addr_type;
+ b[6] = hcon->resp_addr_type;
+
+ if (hcon->out) {
+ local_addr = a;
+ remote_addr = b;
+ memcpy(io_cap, &smp->prsp[1], 3);
+ } else {
+ local_addr = b;
+ remote_addr = a;
+ memcpy(io_cap, &smp->preq[1], 3);
+ }
+
+ memset(r, 0, sizeof(r));
+
+ if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
+ put_unaligned_le32(hcon->passkey_notify, r);
+
+ err = smp_f6(smp->tfm_cmac, smp->mackey, smp->rrnd, smp->prnd, r,
+ io_cap, remote_addr, local_addr, e);
+ if (err)
+ return SMP_UNSPECIFIED;
+
+ if (memcmp(check->e, e, 16))
+ return SMP_DHKEY_CHECK_FAILED;
+
+ if (!hcon->out) {
+ if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) {
+ set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags);
+ return 0;
+ }
+
+ /* Slave sends DHKey check as response to master */
+ sc_dhkey_check(smp);
+ }
+
+ sc_add_ltk(smp);
+
+ if (hcon->out) {
+ hci_le_start_enc(hcon, 0, 0, smp->tk);
+ hcon->enc_key_size = smp->enc_key_size;
+ }
+
+ return 0;
+}
+
+static int smp_cmd_keypress_notify(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct smp_cmd_keypress_notify *kp = (void *) skb->data;
+
+ BT_DBG("value 0x%02x", kp->value);
return 0;
}
@@ -1457,11 +2591,6 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
__u8 code, reason;
int err = 0;
- if (hcon->type != LE_LINK) {
- kfree_skb(skb);
- return 0;
- }
-
if (skb->len < 1)
return -EILSEQ;
@@ -1533,6 +2662,18 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
reason = smp_cmd_sign_info(conn, skb);
break;
+ case SMP_CMD_PUBLIC_KEY:
+ reason = smp_cmd_public_key(conn, skb);
+ break;
+
+ case SMP_CMD_DHKEY_CHECK:
+ reason = smp_cmd_dhkey_check(conn, skb);
+ break;
+
+ case SMP_CMD_KEYPRESS_NOTIFY:
+ reason = smp_cmd_keypress_notify(conn, skb);
+ break;
+
default:
BT_DBG("Unknown command code 0x%2.2x", code);
reason = SMP_CMD_NOTSUPP;
@@ -1568,6 +2709,74 @@ static void smp_teardown_cb(struct l2cap_chan *chan, int err)
l2cap_chan_put(chan);
}
+static void bredr_pairing(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ struct smp_cmd_pairing req;
+ struct smp_chan *smp;
+
+ BT_DBG("chan %p", chan);
+
+ /* Only new pairings are interesting */
+ if (!test_bit(HCI_CONN_NEW_LINK_KEY, &hcon->flags))
+ return;
+
+ /* Don't bother if we're not encrypted */
+ if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
+ return;
+
+ /* Only master may initiate SMP over BR/EDR */
+ if (hcon->role != HCI_ROLE_MASTER)
+ return;
+
+ /* Secure Connections support must be enabled */
+ if (!test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+ return;
+
+ /* BR/EDR must use Secure Connections for SMP */
+ if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) &&
+ !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+ return;
+
+ /* If our LE support is not enabled don't do anything */
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return;
+
+ /* Don't bother if remote LE support is not enabled */
+ if (!lmp_host_le_capable(hcon))
+ return;
+
+ /* Remote must support SMP fixed chan for BR/EDR */
+ if (!(conn->remote_fixed_chan & L2CAP_FC_SMP_BREDR))
+ return;
+
+ /* Don't bother if SMP is already ongoing */
+ if (chan->data)
+ return;
+
+ smp = smp_chan_create(conn);
+ if (!smp) {
+ BT_ERR("%s unable to create SMP context for BR/EDR",
+ hdev->name);
+ return;
+ }
+
+ set_bit(SMP_FLAG_SC, &smp->flags);
+
+ BT_DBG("%s starting SMP over BR/EDR", hdev->name);
+
+ /* Prepare and send the BR/EDR SMP Pairing Request */
+ build_bredr_pairing_cmd(smp, &req, NULL);
+
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &req, sizeof(req));
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+}
+
static void smp_resume_cb(struct l2cap_chan *chan)
{
struct smp_chan *smp = chan->data;
@@ -1576,6 +2785,11 @@ static void smp_resume_cb(struct l2cap_chan *chan)
BT_DBG("chan %p", chan);
+ if (hcon->type == ACL_LINK) {
+ bredr_pairing(chan);
+ return;
+ }
+
if (!smp)
return;
@@ -1590,11 +2804,15 @@ static void smp_resume_cb(struct l2cap_chan *chan)
static void smp_ready_cb(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
+ struct hci_conn *hcon = conn->hcon;
BT_DBG("chan %p", chan);
conn->smp = chan;
l2cap_chan_hold(chan);
+
+ if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
+ bredr_pairing(chan);
}
static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
@@ -1647,7 +2865,6 @@ static const struct l2cap_ops smp_chan_ops = {
.suspend = l2cap_chan_no_suspend,
.set_shutdown = l2cap_chan_no_set_shutdown,
.get_sndtimeo = l2cap_chan_no_get_sndtimeo,
- .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
};
static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
@@ -1668,6 +2885,13 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
chan->omtu = pchan->omtu;
chan->mode = pchan->mode;
+ /* Other L2CAP channels may request SMP routines in order to
+ * change the security level. This means that the SMP channel
+ * lock must be considered in its own category to avoid lockdep
+ * warnings.
+ */
+ atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
+
BT_DBG("created chan %p", chan);
return chan;
@@ -1689,56 +2913,58 @@ static const struct l2cap_ops smp_root_chan_ops = {
.resume = l2cap_chan_no_resume,
.set_shutdown = l2cap_chan_no_set_shutdown,
.get_sndtimeo = l2cap_chan_no_get_sndtimeo,
- .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
};
-int smp_register(struct hci_dev *hdev)
+static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
{
struct l2cap_chan *chan;
struct crypto_blkcipher *tfm_aes;
- BT_DBG("%s", hdev->name);
+ if (cid == L2CAP_CID_SMP_BREDR) {
+ tfm_aes = NULL;
+ goto create_chan;
+ }
- tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0);
if (IS_ERR(tfm_aes)) {
- int err = PTR_ERR(tfm_aes);
BT_ERR("Unable to create crypto context");
- return err;
+ return ERR_CAST(tfm_aes);
}
+create_chan:
chan = l2cap_chan_create();
if (!chan) {
crypto_free_blkcipher(tfm_aes);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
chan->data = tfm_aes;
- l2cap_add_scid(chan, L2CAP_CID_SMP);
+ l2cap_add_scid(chan, cid);
l2cap_chan_set_defaults(chan);
bacpy(&chan->src, &hdev->bdaddr);
- chan->src_type = BDADDR_LE_PUBLIC;
+ if (cid == L2CAP_CID_SMP)
+ chan->src_type = BDADDR_LE_PUBLIC;
+ else
+ chan->src_type = BDADDR_BREDR;
chan->state = BT_LISTEN;
chan->mode = L2CAP_MODE_BASIC;
chan->imtu = L2CAP_DEFAULT_MTU;
chan->ops = &smp_root_chan_ops;
- hdev->smp_data = chan;
+ /* Set correct nesting level for a parent/listening channel */
+ atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
- return 0;
+ return chan;
}
-void smp_unregister(struct hci_dev *hdev)
+static void smp_del_chan(struct l2cap_chan *chan)
{
- struct l2cap_chan *chan = hdev->smp_data;
- struct crypto_blkcipher *tfm_aes;
-
- if (!chan)
- return;
+ struct crypto_blkcipher *tfm_aes;
- BT_DBG("%s chan %p", hdev->name, chan);
+ BT_DBG("chan %p", chan);
tfm_aes = chan->data;
if (tfm_aes) {
@@ -1746,6 +2972,52 @@ void smp_unregister(struct hci_dev *hdev)
crypto_free_blkcipher(tfm_aes);
}
- hdev->smp_data = NULL;
l2cap_chan_put(chan);
}
+
+int smp_register(struct hci_dev *hdev)
+{
+ struct l2cap_chan *chan;
+
+ BT_DBG("%s", hdev->name);
+
+ chan = smp_add_cid(hdev, L2CAP_CID_SMP);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ hdev->smp_data = chan;
+
+ if (!lmp_sc_capable(hdev) &&
+ !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+ return 0;
+
+ chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR);
+ if (IS_ERR(chan)) {
+ int err = PTR_ERR(chan);
+ chan = hdev->smp_data;
+ hdev->smp_data = NULL;
+ smp_del_chan(chan);
+ return err;
+ }
+
+ hdev->smp_bredr_data = chan;
+
+ return 0;
+}
+
+void smp_unregister(struct hci_dev *hdev)
+{
+ struct l2cap_chan *chan;
+
+ if (hdev->smp_bredr_data) {
+ chan = hdev->smp_bredr_data;
+ hdev->smp_bredr_data = NULL;
+ smp_del_chan(chan);
+ }
+
+ if (hdev->smp_data) {
+ chan = hdev->smp_data;
+ hdev->smp_data = NULL;
+ smp_del_chan(chan);
+ }
+}
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 86a683a8b491..3296bf42ae80 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -50,10 +50,13 @@ struct smp_cmd_pairing {
#define SMP_DIST_ENC_KEY 0x01
#define SMP_DIST_ID_KEY 0x02
#define SMP_DIST_SIGN 0x04
+#define SMP_DIST_LINK_KEY 0x08
#define SMP_AUTH_NONE 0x00
#define SMP_AUTH_BONDING 0x01
#define SMP_AUTH_MITM 0x04
+#define SMP_AUTH_SC 0x08
+#define SMP_AUTH_KEYPRESS 0x10
#define SMP_CMD_PAIRING_CONFIRM 0x03
struct smp_cmd_pairing_confirm {
@@ -102,7 +105,23 @@ struct smp_cmd_security_req {
__u8 auth_req;
} __packed;
-#define SMP_CMD_MAX 0x0b
+#define SMP_CMD_PUBLIC_KEY 0x0c
+struct smp_cmd_public_key {
+ __u8 x[32];
+ __u8 y[32];
+} __packed;
+
+#define SMP_CMD_DHKEY_CHECK 0x0d
+struct smp_cmd_dhkey_check {
+ __u8 e[16];
+} __packed;
+
+#define SMP_CMD_KEYPRESS_NOTIFY 0x0e
+struct smp_cmd_keypress_notify {
+ __u8 value;
+} __packed;
+
+#define SMP_CMD_MAX 0x0e
#define SMP_PASSKEY_ENTRY_FAILED 0x01
#define SMP_OOB_NOT_AVAIL 0x02
@@ -114,6 +133,10 @@ struct smp_cmd_security_req {
#define SMP_UNSPECIFIED 0x08
#define SMP_REPEATED_ATTEMPTS 0x09
#define SMP_INVALID_PARAMS 0x0a
+#define SMP_DHKEY_CHECK_FAILED 0x0b
+#define SMP_NUMERIC_COMP_FAILED 0x0c
+#define SMP_BREDR_PAIRING_IN_PROGRESS 0x0d
+#define SMP_CROSS_TRANSP_NOT_ALLOWED 0x0e
#define SMP_MIN_ENC_KEY_SIZE 7
#define SMP_MAX_ENC_KEY_SIZE 16
@@ -123,23 +146,48 @@ enum {
SMP_STK,
SMP_LTK,
SMP_LTK_SLAVE,
+ SMP_LTK_P256,
+ SMP_LTK_P256_DEBUG,
};
+static inline bool smp_ltk_is_sc(struct smp_ltk *key)
+{
+ switch (key->type) {
+ case SMP_LTK_P256:
+ case SMP_LTK_P256_DEBUG:
+ return true;
+ }
+
+ return false;
+}
+
static inline u8 smp_ltk_sec_level(struct smp_ltk *key)
{
- if (key->authenticated)
- return BT_SECURITY_HIGH;
+ if (key->authenticated) {
+ if (smp_ltk_is_sc(key))
+ return BT_SECURITY_FIPS;
+ else
+ return BT_SECURITY_HIGH;
+ }
return BT_SECURITY_MEDIUM;
}
+/* Key preferences for smp_sufficient security */
+enum smp_key_pref {
+ SMP_ALLOW_STK,
+ SMP_USE_LTK,
+};
+
/* SMP Commands */
-bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
+bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
+ enum smp_key_pref key_pref);
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
-bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr);
-int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa);
+bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
+ const bdaddr_t *bdaddr);
+int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa);
int smp_register(struct hci_dev *hdev);
void smp_unregister(struct hci_dev *hdev);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 6f6c95cfe8f2..cc36e59db7d7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -90,7 +90,7 @@ static void fdb_rcu_free(struct rcu_head *head)
* are then updated with the new information.
* Called under RTNL.
*/
-static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
+static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
{
int err;
struct net_bridge_port *p;
@@ -118,7 +118,7 @@ undo:
* the ports with needed information.
* Called under RTNL.
*/
-static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
+static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
{
struct net_bridge_port *p;
@@ -133,7 +133,7 @@ static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
{
if (f->is_static)
- fdb_del_hw(br, f->addr.addr);
+ fdb_del_hw_addr(br, f->addr.addr);
hlist_del_rcu(&f->hlist);
fdb_notify(br, f, RTM_DELNEIGH);
@@ -481,6 +481,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
fdb->is_local = 0;
fdb->is_static = 0;
fdb->added_by_user = 0;
+ fdb->added_by_external_learn = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
}
@@ -514,7 +515,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
return -ENOMEM;
fdb->is_local = fdb->is_static = 1;
- fdb_add_hw(br, addr);
+ fdb_add_hw_addr(br, addr);
fdb_notify(br, fdb, RTM_NEWNEIGH);
return 0;
}
@@ -613,7 +614,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
- ndm->ndm_flags = 0;
+ ndm->ndm_flags = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0;
ndm->ndm_type = 0;
ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(fdb);
@@ -754,19 +755,19 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
fdb->is_local = 1;
if (!fdb->is_static) {
fdb->is_static = 1;
- fdb_add_hw(br, addr);
+ fdb_add_hw_addr(br, addr);
}
} else if (state & NUD_NOARP) {
fdb->is_local = 0;
if (!fdb->is_static) {
fdb->is_static = 1;
- fdb_add_hw(br, addr);
+ fdb_add_hw_addr(br, addr);
}
} else {
fdb->is_local = 0;
if (fdb->is_static) {
fdb->is_static = 0;
- fdb_del_hw(br, addr);
+ fdb_del_hw_addr(br, addr);
}
}
@@ -805,33 +806,17 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
/* Add new permanent fdb entry with RTM_NEWNEIGH */
int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 nlh_flags)
+ const unsigned char *addr, u16 vid, u16 nlh_flags)
{
struct net_bridge_port *p;
int err = 0;
struct net_port_vlans *pv;
- unsigned short vid = VLAN_N_VID;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
return -EINVAL;
}
- if (tb[NDA_VLAN]) {
- if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
- pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
- return -EINVAL;
- }
-
- vid = nla_get_u16(tb[NDA_VLAN]);
-
- if (!vid || vid >= VLAN_VID_MASK) {
- pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
- vid);
- return -EINVAL;
- }
- }
-
if (is_zero_ether_addr(addr)) {
pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
return -EINVAL;
@@ -845,7 +830,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
}
pv = nbp_get_vlan_info(p);
- if (vid != VLAN_N_VID) {
+ if (vid) {
if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
pr_info("bridge: RTM_NEWNEIGH with unconfigured "
"vlan %d on port %s\n", vid, dev->name);
@@ -903,27 +888,12 @@ static int __br_fdb_delete(struct net_bridge_port *p,
/* Remove neighbor entry with RTM_DELNEIGH */
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr)
+ const unsigned char *addr, u16 vid)
{
struct net_bridge_port *p;
int err;
struct net_port_vlans *pv;
- unsigned short vid = VLAN_N_VID;
- if (tb[NDA_VLAN]) {
- if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
- pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
- return -EINVAL;
- }
-
- vid = nla_get_u16(tb[NDA_VLAN]);
-
- if (!vid || vid >= VLAN_VID_MASK) {
- pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
- vid);
- return -EINVAL;
- }
- }
p = br_port_get_rtnl(dev);
if (p == NULL) {
pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
@@ -932,7 +902,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
}
pv = nbp_get_vlan_info(p);
- if (vid != VLAN_N_VID) {
+ if (vid) {
if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
pr_info("bridge: RTM_DELNEIGH with unconfigured "
"vlan %d on port %s\n", vid, dev->name);
@@ -1014,3 +984,91 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
}
}
}
+
+int br_fdb_external_learn_add(struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct net_bridge_port *p;
+ struct net_bridge *br;
+ struct hlist_head *head;
+ struct net_bridge_fdb_entry *fdb;
+ int err = 0;
+
+ rtnl_lock();
+
+ p = br_port_get_rtnl(dev);
+ if (!p) {
+ pr_info("bridge: %s not a bridge port\n", dev->name);
+ err = -EINVAL;
+ goto err_rtnl_unlock;
+ }
+
+ br = p->br;
+
+ spin_lock_bh(&br->hash_lock);
+
+ head = &br->hash[br_mac_hash(addr, vid)];
+ fdb = fdb_find(head, addr, vid);
+ if (!fdb) {
+ fdb = fdb_create(head, p, addr, vid);
+ if (!fdb) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+ fdb->added_by_external_learn = 1;
+ fdb_notify(br, fdb, RTM_NEWNEIGH);
+ } else if (fdb->added_by_external_learn) {
+ /* Refresh entry */
+ fdb->updated = fdb->used = jiffies;
+ } else if (!fdb->added_by_user) {
+ /* Take over SW learned entry */
+ fdb->added_by_external_learn = 1;
+ fdb->updated = jiffies;
+ fdb_notify(br, fdb, RTM_NEWNEIGH);
+ }
+
+err_unlock:
+ spin_unlock_bh(&br->hash_lock);
+err_rtnl_unlock:
+ rtnl_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(br_fdb_external_learn_add);
+
+int br_fdb_external_learn_del(struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct net_bridge_port *p;
+ struct net_bridge *br;
+ struct hlist_head *head;
+ struct net_bridge_fdb_entry *fdb;
+ int err = 0;
+
+ rtnl_lock();
+
+ p = br_port_get_rtnl(dev);
+ if (!p) {
+ pr_info("bridge: %s not a bridge port\n", dev->name);
+ err = -EINVAL;
+ goto err_rtnl_unlock;
+ }
+
+ br = p->br;
+
+ spin_lock_bh(&br->hash_lock);
+
+ head = &br->hash[br_mac_hash(addr, vid)];
+ fdb = fdb_find(head, addr, vid);
+ if (fdb && fdb->added_by_external_learn)
+ fdb_delete(br, fdb);
+ else
+ err = -ENOENT;
+
+ spin_unlock_bh(&br->hash_lock);
+err_rtnl_unlock:
+ rtnl_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(br_fdb_external_learn_del);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 44cb786b925a..f96933a823e3 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -184,6 +184,11 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
/* Do not flood unicast traffic to ports that turn it off */
if (unicast && !(p->flags & BR_FLOOD))
continue;
+
+ /* Do not flood to ports that enable proxy ARP */
+ if (p->flags & BR_PROXYARP)
+ continue;
+
prev = maybe_deliver(prev, p, skb, __packet_hook);
if (IS_ERR(prev))
goto out;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6fd5522df696..1f1de715197c 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -16,6 +16,8 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/netfilter_bridge.h>
+#include <linux/neighbour.h>
+#include <net/arp.h>
#include <linux/export.h>
#include <linux/rculist.h>
#include "br_private.h"
@@ -57,6 +59,60 @@ static int br_pass_frame_up(struct sk_buff *skb)
netif_receive_skb);
}
+static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
+ u16 vid)
+{
+ struct net_device *dev = br->dev;
+ struct neighbour *n;
+ struct arphdr *parp;
+ u8 *arpptr, *sha;
+ __be32 sip, tip;
+
+ if (dev->flags & IFF_NOARP)
+ return;
+
+ if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
+ dev->stats.tx_dropped++;
+ return;
+ }
+ parp = arp_hdr(skb);
+
+ if (parp->ar_pro != htons(ETH_P_IP) ||
+ parp->ar_op != htons(ARPOP_REQUEST) ||
+ parp->ar_hln != dev->addr_len ||
+ parp->ar_pln != 4)
+ return;
+
+ arpptr = (u8 *)parp + sizeof(struct arphdr);
+ sha = arpptr;
+ arpptr += dev->addr_len; /* sha */
+ memcpy(&sip, arpptr, sizeof(sip));
+ arpptr += sizeof(sip);
+ arpptr += dev->addr_len; /* tha */
+ memcpy(&tip, arpptr, sizeof(tip));
+
+ if (ipv4_is_loopback(tip) ||
+ ipv4_is_multicast(tip))
+ return;
+
+ n = neigh_lookup(&arp_tbl, &tip, dev);
+ if (n) {
+ struct net_bridge_fdb_entry *f;
+
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_release(n);
+ return;
+ }
+
+ f = __br_fdb_get(br, n->ha, vid);
+ if (f)
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip,
+ sha, n->ha, sha);
+
+ neigh_release(n);
+ }
+}
+
/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct sk_buff *skb)
{
@@ -98,6 +154,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL;
if (is_broadcast_ether_addr(dest)) {
+ if (p->flags & BR_PROXYARP &&
+ skb->protocol == htons(ETH_P_ARP))
+ br_do_proxy_arp(skb, br, vid);
+
skb2 = skb;
unicast = false;
} else if (is_multicast_ether_addr(dest)) {
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 1a4f32c09ad5..c190d22b6b3d 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -35,6 +35,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/route.h>
+#include <net/netfilter/br_netfilter.h>
#include <asm/uaccess.h>
#include "br_private.h"
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e5ec470b851f..9f5eb55a4d3a 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -60,7 +60,8 @@ static int br_port_fill_attrs(struct sk_buff *skb,
nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
- nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)))
+ nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
+ nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)))
return -EMSGSIZE;
return 0;
@@ -333,6 +334,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
+ br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
if (tb[IFLA_BRPORT_COST]) {
err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 4d783d071305..aea3d1339b3f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -98,9 +98,10 @@ struct net_bridge_fdb_entry
unsigned long updated;
unsigned long used;
mac_addr addr;
- unsigned char is_local;
- unsigned char is_static;
- unsigned char added_by_user;
+ unsigned char is_local:1,
+ is_static:1,
+ added_by_user:1,
+ added_by_external_learn:1;
__u16 vlan_id;
};
@@ -163,15 +164,6 @@ struct net_bridge_port
struct rcu_head rcu;
unsigned long flags;
-#define BR_HAIRPIN_MODE 0x00000001
-#define BR_BPDU_GUARD 0x00000002
-#define BR_ROOT_BLOCK 0x00000004
-#define BR_MULTICAST_FAST_LEAVE 0x00000008
-#define BR_ADMIN_COST 0x00000010
-#define BR_LEARNING 0x00000020
-#define BR_FLOOD 0x00000040
-#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
-#define BR_PROMISC 0x00000080
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct bridge_mcast_own_query ip4_own_query;
@@ -403,9 +395,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, bool added_by_user);
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr);
+ struct net_device *dev, const unsigned char *addr, u16 vid);
int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
- const unsigned char *addr, u16 nlh_flags);
+ const unsigned char *addr, u16 vid, u16 nlh_flags);
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, struct net_device *fdev, int idx);
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 2b047bcf42a4..fb3ebe615513 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -12,6 +12,7 @@
*/
#include <linux/kernel.h>
#include <linux/rculist.h>
+#include <net/switchdev.h>
#include "br_private.h"
#include "br_private_stp.h"
@@ -38,7 +39,13 @@ void br_log_state(const struct net_bridge_port *p)
void br_set_state(struct net_bridge_port *p, unsigned int state)
{
+ int err;
+
p->state = state;
+ err = netdev_switch_port_stp_update(p->dev, state);
+ if (err && err != -EOPNOTSUPP)
+ br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
+ (unsigned int) p->port_no, p->dev->name);
}
/* called under bridge lock */
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index e561cd59b8a6..2de5d91199e8 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -170,6 +170,7 @@ BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD);
BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
BRPORT_ATTR_FLAG(learning, BR_LEARNING);
BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
+BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -213,6 +214,7 @@ static const struct brport_attribute *brport_attrs[] = {
&brport_attr_multicast_router,
&brport_attr_multicast_fast_leave,
#endif
+ &brport_attr_proxyarp,
NULL
};
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 150048fb99b0..97b8ddf57363 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -199,8 +199,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
if (skb->vlan_proto != proto) {
/* Protocol-mismatch, empty out vlan_tci for new tag */
skb_push(skb, ETH_HLEN);
- skb = __vlan_put_tag(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
+ skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
+ vlan_tx_tag_get(skb));
if (unlikely(!skb))
return false;
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 074c557ab505..19473a9371b8 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -13,6 +13,82 @@
#include <linux/module.h>
#include <linux/netfilter_bridge.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_bridge.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+int nft_bridge_iphdr_validate(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ u32 len;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return 0;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len)
+ return 0;
+ else if (len < (iph->ihl*4))
+ return 0;
+
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(nft_bridge_iphdr_validate);
+
+int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
+{
+ struct ipv6hdr *hdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+
+ hdr = ipv6_hdr(skb);
+ if (hdr->version != 6)
+ return 0;
+
+ pkt_len = ntohs(hdr->payload_len);
+ if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate);
+
+static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out)
+{
+ if (nft_bridge_iphdr_validate(skb))
+ nft_set_pktinfo_ipv4(pkt, ops, skb, in, out);
+ else
+ nft_set_pktinfo(pkt, ops, skb, in, out);
+}
+
+static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (nft_bridge_ip6hdr_validate(skb) &&
+ nft_set_pktinfo_ipv6(pkt, ops, skb, in, out) == 0)
+ return;
+#endif
+ nft_set_pktinfo(pkt, ops, skb, in, out);
+}
static unsigned int
nft_do_chain_bridge(const struct nf_hook_ops *ops,
@@ -23,7 +99,17 @@ nft_do_chain_bridge(const struct nf_hook_ops *ops,
{
struct nft_pktinfo pkt;
- nft_set_pktinfo(&pkt, ops, skb, in, out);
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+ break;
+ case htons(ETH_P_IPV6):
+ nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+ break;
+ default:
+ nft_set_pktinfo(&pkt, ops, skb, in, out);
+ break;
+ }
return nft_do_chain(&pkt, ops);
}
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 48da2c54a69e..b0330aecbf97 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -14,6 +14,7 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
+#include <net/netfilter/nf_tables_bridge.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
#include <linux/ip.h>
@@ -35,30 +36,6 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
skb_pull(nskb, ETH_HLEN);
}
-static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
-{
- struct iphdr *iph;
- u32 len;
-
- if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
- return 0;
-
- iph = ip_hdr(oldskb);
- if (iph->ihl < 5 || iph->version != 4)
- return 0;
-
- len = ntohs(iph->tot_len);
- if (oldskb->len < len)
- return 0;
- else if (len < (iph->ihl*4))
- return 0;
-
- if (!pskb_may_pull(oldskb, iph->ihl*4))
- return 0;
-
- return 1;
-}
-
static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
@@ -66,7 +43,7 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
const struct tcphdr *oth;
struct tcphdr _oth;
- if (!nft_reject_iphdr_validate(oldskb))
+ if (!nft_bridge_iphdr_validate(oldskb))
return;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
@@ -101,7 +78,7 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
void *payload;
__wsum csum;
- if (!nft_reject_iphdr_validate(oldskb))
+ if (!nft_bridge_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
@@ -146,25 +123,6 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
-static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
-{
- struct ipv6hdr *hdr;
- u32 pkt_len;
-
- if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
- return 0;
-
- hdr = ipv6_hdr(oldskb);
- if (hdr->version != 6)
- return 0;
-
- pkt_len = ntohs(hdr->payload_len);
- if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
- return 0;
-
- return 1;
-}
-
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb, int hook)
{
@@ -174,7 +132,7 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net,
unsigned int otcplen;
struct ipv6hdr *nip6h;
- if (!nft_reject_ip6hdr_validate(oldskb))
+ if (!nft_bridge_ip6hdr_validate(oldskb))
return;
oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
@@ -207,7 +165,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
unsigned int len;
void *payload;
- if (!nft_reject_ip6hdr_validate(oldskb))
+ if (!nft_bridge_ip6hdr_validate(oldskb))
return;
/* Include "As much of invoking packet as possible without the ICMPv6
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 43f750e88e19..769b185fefbd 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -293,7 +293,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
copylen = len;
}
- ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
+ ret = skb_copy_datagram_msg(skb, 0, m, copylen);
if (ret)
goto out_free;
@@ -418,7 +418,7 @@ unlock:
}
release_sock(sk);
chunk = min_t(unsigned int, skb->len, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ if (memcpy_to_msg(msg, skb->data, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (copied == 0)
copied = -EFAULT;
@@ -535,7 +535,7 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto err;
ret = -EINVAL;
- if (unlikely(msg->msg_iov->iov_base == NULL))
+ if (unlikely(msg->msg_iter.iov->iov_base == NULL))
goto err;
noblock = msg->msg_flags & MSG_DONTWAIT;
@@ -566,7 +566,7 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
skb_reserve(skb, cf_sk->headroom);
- ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ ret = memcpy_from_msg(skb_put(skb, len), msg, len);
if (ret)
goto err;
@@ -641,7 +641,7 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
*/
size = min_t(int, size, skb_tailroom(skb));
- err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err) {
kfree_skb(skb);
goto out_err;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index ce82337521f6..66e08040ced7 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -64,9 +64,6 @@
#include "af_can.h"
-static __initconst const char banner[] = KERN_INFO
- "can: controller area network core (" CAN_VERSION_STRING ")\n";
-
MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
@@ -524,7 +521,7 @@ static void can_rx_delete_receiver(struct rcu_head *rp)
/**
* can_rx_unregister - unsubscribe CAN frames from a specific interface
- * @dev: pointer to netdevice (NULL => unsubcribe from 'all' CAN devices list)
+ * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list)
* @can_id: CAN identifier
* @mask: CAN mask
* @func: callback function on filter match
@@ -896,7 +893,7 @@ static __init int can_init(void)
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
- printk(banner);
+ pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n");
memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
diff --git a/net/can/bcm.c b/net/can/bcm.c
index dcb75c0e66c1..ee9ffd956552 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -78,8 +78,6 @@
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
#define CAN_BCM_VERSION CAN_VERSION
-static __initconst const char banner[] = KERN_INFO
- "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
MODULE_LICENSE("Dual BSD/GPL");
@@ -441,7 +439,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
/* mark as used and throttled by default */
lastdata->can_dlc |= (RX_RECV|RX_THR);
- /* throtteling mode inactive ? */
+ /* throttling mode inactive ? */
if (!op->kt_ival2.tv64) {
/* send RX_CHANGED to the user immediately */
bcm_rx_changed(op, lastdata);
@@ -452,7 +450,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
if (hrtimer_active(&op->thrtimer))
return;
- /* first receiption with enabled throttling mode */
+ /* first reception with enabled throttling mode */
if (!op->kt_lastmsg.tv64)
goto rx_changed_settime;
@@ -480,7 +478,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
const struct can_frame *rxdata)
{
/*
- * no one uses the MSBs of can_dlc for comparation,
+ * no one uses the MSBs of can_dlc for comparison,
* so we use it here to detect the first time of reception
*/
@@ -510,7 +508,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
}
/*
- * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
+ * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
*/
static void bcm_rx_starttimer(struct bcm_op *op)
{
@@ -539,7 +537,7 @@ static void bcm_rx_timeout_tsklet(unsigned long data)
}
/*
- * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
+ * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out
*/
static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
{
@@ -627,7 +625,7 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
}
/*
- * bcm_rx_handler - handle a CAN frame receiption
+ * bcm_rx_handler - handle a CAN frame reception
*/
static void bcm_rx_handler(struct sk_buff *skb, void *data)
{
@@ -858,8 +856,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
/* update can_frames content */
for (i = 0; i < msg_head->nframes; i++) {
- err = memcpy_fromiovec((u8 *)&op->frames[i],
- msg->msg_iov, CFSIZ);
+ err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
if (op->frames[i].can_dlc > 8)
err = -EINVAL;
@@ -894,8 +891,7 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
op->frames = &op->sframe;
for (i = 0; i < msg_head->nframes; i++) {
- err = memcpy_fromiovec((u8 *)&op->frames[i],
- msg->msg_iov, CFSIZ);
+ err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
if (op->frames[i].can_dlc > 8)
err = -EINVAL;
@@ -1024,9 +1020,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
if (msg_head->nframes) {
/* update can_frames content */
- err = memcpy_fromiovec((u8 *)op->frames,
- msg->msg_iov,
- msg_head->nframes * CFSIZ);
+ err = memcpy_from_msg((u8 *)op->frames, msg,
+ msg_head->nframes * CFSIZ);
if (err < 0)
return err;
@@ -1072,8 +1067,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
}
if (msg_head->nframes) {
- err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
- msg_head->nframes * CFSIZ);
+ err = memcpy_from_msg((u8 *)op->frames, msg,
+ msg_head->nframes * CFSIZ);
if (err < 0) {
if (op->frames != &op->sframe)
kfree(op->frames);
@@ -1209,7 +1204,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
can_skb_reserve(skb);
- err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
+ err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ);
if (err < 0) {
kfree_skb(skb);
return err;
@@ -1285,7 +1280,7 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
/* read message head information */
- ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
+ ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
if (ret < 0)
return ret;
@@ -1558,7 +1553,7 @@ static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
if (skb->len < size)
size = skb->len;
- err = memcpy_toiovec(msg->msg_iov, skb->data, size);
+ err = memcpy_to_msg(msg, skb->data, size);
if (err < 0) {
skb_free_datagram(sk, skb);
return err;
@@ -1615,7 +1610,7 @@ static int __init bcm_module_init(void)
{
int err;
- printk(banner);
+ pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n");
err = can_proto_register(&bcm_can_proto);
if (err < 0) {
diff --git a/net/can/gw.c b/net/can/gw.c
index 050a2110d43f..295f62e62eb3 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -361,7 +361,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
* The Controller Area Network controllers only accept CAN frames with
* correct CRCs - which are not visible in the controller registers.
* According to skbuff.h documentation the csum_start element for IP
- * checksums is undefined/unsued when ip_summed == CHECKSUM_UNNECESSARY.
+ * checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY.
* Only CAN skbs can be processed here which already have this property.
*/
diff --git a/net/can/raw.c b/net/can/raw.c
index 081e81fd017f..00c13ef23661 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -56,8 +56,6 @@
#include <net/net_namespace.h>
#define CAN_RAW_VERSION CAN_VERSION
-static __initconst const char banner[] =
- KERN_INFO "can: raw protocol (rev " CAN_RAW_VERSION ")\n";
MODULE_DESCRIPTION("PF_CAN raw protocol");
MODULE_LICENSE("Dual BSD/GPL");
@@ -703,7 +701,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err < 0)
goto free_skb;
@@ -750,7 +748,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
else
size = skb->len;
- err = memcpy_toiovec(msg->msg_iov, skb->data, size);
+ err = memcpy_to_msg(msg, skb->data, size);
if (err < 0) {
skb_free_datagram(sk, skb);
return err;
@@ -810,7 +808,7 @@ static __init int raw_module_init(void)
{
int err;
- printk(banner);
+ pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
err = can_proto_register(&raw_can_proto);
if (err < 0)
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 7e38b729696a..15845814a0f2 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -8,6 +8,7 @@
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
+#include <linux/ceph/messenger.h>
#include "crypto.h"
#include "auth_x.h"
@@ -293,6 +294,11 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
dout("build_authorizer for %s %p\n",
ceph_entity_type_name(th->service), au);
+ ceph_crypto_key_destroy(&au->session_key);
+ ret = ceph_crypto_key_clone(&au->session_key, &th->session_key);
+ if (ret)
+ return ret;
+
maxlen = sizeof(*msg_a) + sizeof(msg_b) +
ceph_x_encrypt_buflen(ticket_blob_len);
dout(" need len %d\n", maxlen);
@@ -302,8 +308,10 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
}
if (!au->buf) {
au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
- if (!au->buf)
+ if (!au->buf) {
+ ceph_crypto_key_destroy(&au->session_key);
return -ENOMEM;
+ }
}
au->service = th->service;
au->secret_id = th->secret_id;
@@ -329,7 +337,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
get_random_bytes(&au->nonce, sizeof(au->nonce));
msg_b.struct_v = 1;
msg_b.nonce = cpu_to_le64(au->nonce);
- ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b),
+ ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b),
p, end - p);
if (ret < 0)
goto out_buf;
@@ -560,6 +568,8 @@ static int ceph_x_create_authorizer(
auth->authorizer_buf_len = au->buf->vec.iov_len;
auth->authorizer_reply_buf = au->reply_buf;
auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+ auth->sign_message = ac->ops->sign_message;
+ auth->check_message_signature = ac->ops->check_message_signature;
return 0;
}
@@ -588,17 +598,13 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len)
{
struct ceph_x_authorizer *au = (void *)a;
- struct ceph_x_ticket_handler *th;
int ret = 0;
struct ceph_x_authorize_reply reply;
void *preply = &reply;
void *p = au->reply_buf;
void *end = p + sizeof(au->reply_buf);
- th = get_ticket_handler(ac, au->service);
- if (IS_ERR(th))
- return PTR_ERR(th);
- ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
+ ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply));
if (ret < 0)
return ret;
if (ret != sizeof(reply))
@@ -618,6 +624,7 @@ static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
{
struct ceph_x_authorizer *au = (void *)a;
+ ceph_crypto_key_destroy(&au->session_key);
ceph_buffer_put(au->buf);
kfree(au);
}
@@ -663,6 +670,59 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
memset(&th->validity, 0, sizeof(th->validity));
}
+static int calcu_signature(struct ceph_x_authorizer *au,
+ struct ceph_msg *msg, __le64 *sig)
+{
+ int ret;
+ char tmp_enc[40];
+ __le32 tmp[5] = {
+ 16u, msg->hdr.crc, msg->footer.front_crc,
+ msg->footer.middle_crc, msg->footer.data_crc,
+ };
+ ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
+ tmp_enc, sizeof(tmp_enc));
+ if (ret < 0)
+ return ret;
+ *sig = *(__le64*)(tmp_enc + 4);
+ return 0;
+}
+
+static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ int ret;
+ if (!auth->authorizer)
+ return 0;
+ ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
+ msg, &msg->footer.sig);
+ if (ret < 0)
+ return ret;
+ msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED;
+ return 0;
+}
+
+static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ __le64 sig_check;
+ int ret;
+
+ if (!auth->authorizer)
+ return 0;
+ ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
+ msg, &sig_check);
+ if (ret < 0)
+ return ret;
+ if (sig_check == msg->footer.sig)
+ return 0;
+ if (msg->footer.flags & CEPH_MSG_FOOTER_SIGNED)
+ dout("ceph_x_check_message_signature %p has signature %llx "
+ "expect %llx\n", msg, msg->footer.sig, sig_check);
+ else
+ dout("ceph_x_check_message_signature %p sender did not set "
+ "CEPH_MSG_FOOTER_SIGNED\n", msg);
+ return -EBADMSG;
+}
static const struct ceph_auth_client_ops ceph_x_ops = {
.name = "x",
@@ -677,6 +737,8 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
.invalidate_authorizer = ceph_x_invalidate_authorizer,
.reset = ceph_x_reset,
.destroy = ceph_x_destroy,
+ .sign_message = ceph_x_sign_message,
+ .check_message_signature = ceph_x_check_message_signature,
};
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 65ee72082d99..e8b7c6917d47 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
struct ceph_x_authorizer {
+ struct ceph_crypto_key session_key;
struct ceph_buffer *buf;
unsigned int service;
u64 nonce;
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c
index 621b5f65407f..add5f921a0ff 100644
--- a/net/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -6,7 +6,7 @@
#include <linux/ceph/buffer.h>
#include <linux/ceph/decode.h>
-#include <linux/ceph/libceph.h> /* for ceph_kv{malloc,free} */
+#include <linux/ceph/libceph.h> /* for ceph_kvmalloc */
struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
{
@@ -35,7 +35,7 @@ void ceph_buffer_release(struct kref *kref)
struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref);
dout("buffer_release %p\n", b);
- ceph_kvfree(b->vec.iov_base);
+ kvfree(b->vec.iov_base);
kfree(b);
}
EXPORT_SYMBOL(ceph_buffer_release);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 58fbfe134f93..5d5ab67f516d 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -184,14 +184,6 @@ void *ceph_kvmalloc(size_t size, gfp_t flags)
return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
}
-void ceph_kvfree(const void *ptr)
-{
- if (is_vmalloc_addr(ptr))
- vfree(ptr);
- else
- kfree(ptr);
-}
-
static int parse_fsid(const char *str, struct ceph_fsid *fsid)
{
@@ -245,6 +237,8 @@ enum {
Opt_noshare,
Opt_crc,
Opt_nocrc,
+ Opt_cephx_require_signatures,
+ Opt_nocephx_require_signatures,
};
static match_table_t opt_tokens = {
@@ -263,6 +257,8 @@ static match_table_t opt_tokens = {
{Opt_noshare, "noshare"},
{Opt_crc, "crc"},
{Opt_nocrc, "nocrc"},
+ {Opt_cephx_require_signatures, "cephx_require_signatures"},
+ {Opt_nocephx_require_signatures, "nocephx_require_signatures"},
{-1, NULL}
};
@@ -461,6 +457,12 @@ ceph_parse_options(char *options, const char *dev_name,
case Opt_nocrc:
opt->flags |= CEPH_OPT_NOCRC;
break;
+ case Opt_cephx_require_signatures:
+ opt->flags &= ~CEPH_OPT_NOMSGAUTH;
+ break;
+ case Opt_nocephx_require_signatures:
+ opt->flags |= CEPH_OPT_NOMSGAUTH;
+ break;
default:
BUG_ON(token);
@@ -504,6 +506,9 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
init_waitqueue_head(&client->auth_wq);
client->auth_err = 0;
+ if (!ceph_test_opt(client, NOMSGAUTH))
+ required_features |= CEPH_FEATURE_MSG_AUTH;
+
client->extra_mon_dispatch = NULL;
client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT |
supported_features;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 8d1653caffdb..33a2f201e460 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1196,8 +1196,18 @@ static void prepare_write_message_footer(struct ceph_connection *con)
dout("prepare_write_message_footer %p\n", con);
con->out_kvec_is_msg = true;
con->out_kvec[v].iov_base = &m->footer;
- con->out_kvec[v].iov_len = sizeof(m->footer);
- con->out_kvec_bytes += sizeof(m->footer);
+ if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
+ if (con->ops->sign_message)
+ con->ops->sign_message(con, m);
+ else
+ m->footer.sig = 0;
+ con->out_kvec[v].iov_len = sizeof(m->footer);
+ con->out_kvec_bytes += sizeof(m->footer);
+ } else {
+ m->old_footer.flags = m->footer.flags;
+ con->out_kvec[v].iov_len = sizeof(m->old_footer);
+ con->out_kvec_bytes += sizeof(m->old_footer);
+ }
con->out_kvec_left++;
con->out_more = m->more_to_follow;
con->out_msg_done = true;
@@ -2249,6 +2259,7 @@ static int read_partial_message(struct ceph_connection *con)
int ret;
unsigned int front_len, middle_len, data_len;
bool do_datacrc = !con->msgr->nocrc;
+ bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
u64 seq;
u32 crc;
@@ -2361,12 +2372,21 @@ static int read_partial_message(struct ceph_connection *con)
}
/* footer */
- size = sizeof (m->footer);
+ if (need_sign)
+ size = sizeof(m->footer);
+ else
+ size = sizeof(m->old_footer);
+
end += size;
ret = read_partial(con, end, size, &m->footer);
if (ret <= 0)
return ret;
+ if (!need_sign) {
+ m->footer.flags = m->old_footer.flags;
+ m->footer.sig = 0;
+ }
+
dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
m, front_len, m->footer.front_crc, middle_len,
m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -2390,6 +2410,12 @@ static int read_partial_message(struct ceph_connection *con)
return -EBADMSG;
}
+ if (need_sign && con->ops->check_message_signature &&
+ con->ops->check_message_signature(con, m)) {
+ pr_err("read_partial_message %p signature check failed\n", m);
+ return -EBADMSG;
+ }
+
return 1; /* done! */
}
@@ -3288,7 +3314,7 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
static void ceph_msg_free(struct ceph_msg *m)
{
dout("%s %p\n", __func__, m);
- ceph_kvfree(m->front.iov_base);
+ kvfree(m->front.iov_base);
kmem_cache_free(ceph_msg_cache, m);
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6f164289bde8..53299c7b0ca4 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -292,6 +292,10 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
ceph_osd_data_release(&op->cls.request_data);
ceph_osd_data_release(&op->cls.response_data);
break;
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ ceph_osd_data_release(&op->xattr.osd_data);
+ break;
default:
break;
}
@@ -476,8 +480,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
size_t payload_len = 0;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
- opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO &&
- opcode != CEPH_OSD_OP_TRUNCATE);
+ opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
op->extent.offset = offset;
op->extent.length = length;
@@ -545,6 +548,39 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
}
EXPORT_SYMBOL(osd_req_op_cls_init);
+int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
+ u16 opcode, const char *name, const void *value,
+ size_t size, u8 cmp_op, u8 cmp_mode)
+{
+ struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
+ struct ceph_pagelist *pagelist;
+ size_t payload_len;
+
+ BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
+
+ pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+ if (!pagelist)
+ return -ENOMEM;
+
+ ceph_pagelist_init(pagelist);
+
+ payload_len = strlen(name);
+ op->xattr.name_len = payload_len;
+ ceph_pagelist_append(pagelist, name, payload_len);
+
+ op->xattr.value_len = size;
+ ceph_pagelist_append(pagelist, value, size);
+ payload_len += size;
+
+ op->xattr.cmp_op = cmp_op;
+ op->xattr.cmp_mode = cmp_mode;
+
+ ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
+ op->payload_len = payload_len;
+ return 0;
+}
+EXPORT_SYMBOL(osd_req_op_xattr_init);
+
void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
u64 cookie, u64 version, int flag)
@@ -626,7 +662,6 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_ZERO:
- case CEPH_OSD_OP_DELETE:
case CEPH_OSD_OP_TRUNCATE:
if (src->op == CEPH_OSD_OP_WRITE)
request_data_len = src->extent.length;
@@ -676,6 +711,19 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
dst->alloc_hint.expected_write_size =
cpu_to_le64(src->alloc_hint.expected_write_size);
break;
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
+ dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
+ dst->xattr.cmp_op = src->xattr.cmp_op;
+ dst->xattr.cmp_mode = src->xattr.cmp_mode;
+ osd_data = &src->xattr.osd_data;
+ ceph_osdc_msg_data_add(req->r_request, osd_data);
+ request_data_len = osd_data->pagelist->length;
+ break;
+ case CEPH_OSD_OP_CREATE:
+ case CEPH_OSD_OP_DELETE:
+ break;
default:
pr_err("unsupported osd opcode %s\n",
ceph_osd_op_name(src->op));
@@ -705,7 +753,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
struct ceph_file_layout *layout,
struct ceph_vino vino,
- u64 off, u64 *plen, int num_ops,
+ u64 off, u64 *plen,
+ unsigned int which, int num_ops,
int opcode, int flags,
struct ceph_snap_context *snapc,
u32 truncate_seq,
@@ -716,13 +765,11 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
u64 objnum = 0;
u64 objoff = 0;
u64 objlen = 0;
- u32 object_size;
- u64 object_base;
int r;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
- opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO &&
- opcode != CEPH_OSD_OP_TRUNCATE);
+ opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
+ opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
GFP_NOFS);
@@ -738,29 +785,24 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
return ERR_PTR(r);
}
- object_size = le32_to_cpu(layout->fl_object_size);
- object_base = off - objoff;
- if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
- if (truncate_size <= object_base) {
- truncate_size = 0;
- } else {
- truncate_size -= object_base;
- if (truncate_size > object_size)
- truncate_size = object_size;
+ if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
+ osd_req_op_init(req, which, opcode);
+ } else {
+ u32 object_size = le32_to_cpu(layout->fl_object_size);
+ u32 object_base = off - objoff;
+ if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
+ if (truncate_size <= object_base) {
+ truncate_size = 0;
+ } else {
+ truncate_size -= object_base;
+ if (truncate_size > object_size)
+ truncate_size = object_size;
+ }
}
+ osd_req_op_extent_init(req, which, opcode, objoff, objlen,
+ truncate_size, truncate_seq);
}
- osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
- truncate_size, truncate_seq);
-
- /*
- * A second op in the ops array means the caller wants to
- * also issue a include a 'startsync' command so that the
- * osd will flush data quickly.
- */
- if (num_ops > 1)
- osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
-
req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name),
@@ -2626,7 +2668,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
vino.snap, off, *plen);
- req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1,
+ req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, truncate_seq, truncate_size,
false);
@@ -2669,7 +2711,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
int page_align = off & ~PAGE_MASK;
BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
- req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1,
+ req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
snapc, truncate_seq, truncate_size,
@@ -2920,6 +2962,20 @@ static int invalidate_authorizer(struct ceph_connection *con)
return ceph_monc_validate_auth(&osdc->client->monc);
}
+static int sign_message(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_auth_handshake *auth = &o->o_auth;
+ return ceph_auth_sign_message(auth, msg);
+}
+
+static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_auth_handshake *auth = &o->o_auth;
+ return ceph_auth_check_message_signature(auth, msg);
+}
+
static const struct ceph_connection_operations osd_con_ops = {
.get = get_osd_con,
.put = put_osd_con,
@@ -2928,5 +2984,7 @@ static const struct ceph_connection_operations osd_con_ops = {
.verify_authorizer_reply = verify_authorizer_reply,
.invalidate_authorizer = invalidate_authorizer,
.alloc_msg = alloc_msg,
+ .sign_message = sign_message,
+ .check_message_signature = check_message_signature,
.fault = osd_reset,
};
diff --git a/net/compat.c b/net/compat.c
index bc8aeefddf3f..3236b4167a32 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -31,81 +31,54 @@
#include <asm/uaccess.h>
#include <net/compat.h>
-static inline int iov_from_user_compat_to_kern(struct iovec *kiov,
- struct compat_iovec __user *uiov32,
- int niov)
+ssize_t get_compat_msghdr(struct msghdr *kmsg,
+ struct compat_msghdr __user *umsg,
+ struct sockaddr __user **save_addr,
+ struct iovec **iov)
{
- int tot_len = 0;
-
- while (niov > 0) {
- compat_uptr_t buf;
- compat_size_t len;
-
- if (get_user(len, &uiov32->iov_len) ||
- get_user(buf, &uiov32->iov_base))
- return -EFAULT;
-
- if (len > INT_MAX - tot_len)
- len = INT_MAX - tot_len;
-
- tot_len += len;
- kiov->iov_base = compat_ptr(buf);
- kiov->iov_len = (__kernel_size_t) len;
- uiov32++;
- kiov++;
- niov--;
- }
- return tot_len;
-}
-
-int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
-{
- compat_uptr_t tmp1, tmp2, tmp3;
+ compat_uptr_t uaddr, uiov, tmp3;
+ compat_size_t nr_segs;
+ ssize_t err;
if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) ||
- __get_user(tmp1, &umsg->msg_name) ||
+ __get_user(uaddr, &umsg->msg_name) ||
__get_user(kmsg->msg_namelen, &umsg->msg_namelen) ||
- __get_user(tmp2, &umsg->msg_iov) ||
- __get_user(kmsg->msg_iovlen, &umsg->msg_iovlen) ||
+ __get_user(uiov, &umsg->msg_iov) ||
+ __get_user(nr_segs, &umsg->msg_iovlen) ||
__get_user(tmp3, &umsg->msg_control) ||
__get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
__get_user(kmsg->msg_flags, &umsg->msg_flags))
return -EFAULT;
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
- kmsg->msg_name = compat_ptr(tmp1);
- kmsg->msg_iov = compat_ptr(tmp2);
kmsg->msg_control = compat_ptr(tmp3);
- return 0;
-}
-/* I've named the args so it is easy to tell whose space the pointers are in. */
-int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
- struct sockaddr_storage *kern_address, int mode)
-{
- int tot_len;
+ if (save_addr)
+ *save_addr = compat_ptr(uaddr);
- if (kern_msg->msg_name && kern_msg->msg_namelen) {
- if (mode == VERIFY_READ) {
- int err = move_addr_to_kernel(kern_msg->msg_name,
- kern_msg->msg_namelen,
- kern_address);
+ if (uaddr && kmsg->msg_namelen) {
+ if (!save_addr) {
+ err = move_addr_to_kernel(compat_ptr(uaddr),
+ kmsg->msg_namelen,
+ kmsg->msg_name);
if (err < 0)
return err;
}
- kern_msg->msg_name = kern_address;
} else {
- kern_msg->msg_name = NULL;
- kern_msg->msg_namelen = 0;
+ kmsg->msg_name = NULL;
+ kmsg->msg_namelen = 0;
}
- tot_len = iov_from_user_compat_to_kern(kern_iov,
- (struct compat_iovec __user *)kern_msg->msg_iov,
- kern_msg->msg_iovlen);
- if (tot_len >= 0)
- kern_msg->msg_iov = kern_iov;
+ if (nr_segs > UIO_MAXIOV)
+ return -EMSGSIZE;
- return tot_len;
+ err = compat_rw_copy_check_uvector(save_addr ? READ : WRITE,
+ compat_ptr(uiov), nr_segs,
+ UIO_FASTIOV, *iov, iov);
+ if (err >= 0)
+ iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE,
+ *iov, nr_segs, err);
+ return err;
}
/* Bleech... */
@@ -740,7 +713,7 @@ COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, uns
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
- return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
@@ -756,7 +729,7 @@ COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, uns
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
- return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
COMPAT_SYSCALL_DEFINE4(recv, int, fd, void __user *, buf, compat_size_t, len, unsigned int, flags)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index fdbc9a81d4c2..df493d68330c 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -49,6 +49,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
+#include <linux/uio.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
@@ -309,16 +310,14 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
EXPORT_SYMBOL(skb_kill_datagram);
/**
- * skb_copy_datagram_iovec - Copy a datagram to an iovec.
+ * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
* @skb: buffer to copy
* @offset: offset in the buffer to start copying from
- * @to: io vector to copy to
+ * @to: iovec iterator to copy to
* @len: amount of data to copy from buffer to iovec
- *
- * Note: the iovec is modified during the copy.
*/
-int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
- struct iovec *to, int len)
+int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
@@ -330,8 +329,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
if (copy > 0) {
if (copy > len)
copy = len;
- if (memcpy_toiovec(to, skb->data + offset, copy))
- goto fault;
+ if (copy_to_iter(skb->data + offset, copy, to) != copy)
+ goto short_copy;
if ((len -= copy) == 0)
return 0;
offset += copy;
@@ -346,18 +345,12 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
- int err;
- u8 *vaddr;
- struct page *page = skb_frag_page(frag);
-
if (copy > len)
copy = len;
- vaddr = kmap(page);
- err = memcpy_toiovec(to, vaddr + frag->page_offset +
- offset - start, copy);
- kunmap(page);
- if (err)
- goto fault;
+ if (copy_page_to_iter(skb_frag_page(frag),
+ frag->page_offset + offset -
+ start, copy, to) != copy)
+ goto short_copy;
if (!(len -= copy))
return 0;
offset += copy;
@@ -374,9 +367,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_copy_datagram_iovec(frag_iter,
- offset - start,
- to, copy))
+ if (skb_copy_datagram_iter(frag_iter, offset - start,
+ to, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
@@ -387,113 +379,33 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
if (!len)
return 0;
+ /* This is not really a user copy fault, but rather someone
+ * gave us a bogus length on the skb. We should probably
+ * print a warning here as it may indicate a kernel bug.
+ */
+
fault:
return -EFAULT;
-}
-EXPORT_SYMBOL(skb_copy_datagram_iovec);
-/**
- * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
- * @skb: buffer to copy
- * @offset: offset in the buffer to start copying from
- * @to: io vector to copy to
- * @to_offset: offset in the io vector to start copying to
- * @len: amount of data to copy from buffer to iovec
- *
- * Returns 0 or -EFAULT.
- * Note: the iovec is not modified during the copy.
- */
-int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
- const struct iovec *to, int to_offset,
- int len)
-{
- int start = skb_headlen(skb);
- int i, copy = start - offset;
- struct sk_buff *frag_iter;
+short_copy:
+ if (iov_iter_count(to))
+ goto fault;
- /* Copy header. */
- if (copy > 0) {
- if (copy > len)
- copy = len;
- if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
- goto fault;
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- to_offset += copy;
- }
-
- /* Copy paged appendix. Hmm... why does this look so complicated? */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- WARN_ON(start > offset + len);
-
- end = start + skb_frag_size(frag);
- if ((copy = end - offset) > 0) {
- int err;
- u8 *vaddr;
- struct page *page = skb_frag_page(frag);
-
- if (copy > len)
- copy = len;
- vaddr = kmap(page);
- err = memcpy_toiovecend(to, vaddr + frag->page_offset +
- offset - start, to_offset, copy);
- kunmap(page);
- if (err)
- goto fault;
- if (!(len -= copy))
- return 0;
- offset += copy;
- to_offset += copy;
- }
- start = end;
- }
-
- skb_walk_frags(skb, frag_iter) {
- int end;
-
- WARN_ON(start > offset + len);
-
- end = start + frag_iter->len;
- if ((copy = end - offset) > 0) {
- if (copy > len)
- copy = len;
- if (skb_copy_datagram_const_iovec(frag_iter,
- offset - start,
- to, to_offset,
- copy))
- goto fault;
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- to_offset += copy;
- }
- start = end;
- }
- if (!len)
- return 0;
-
-fault:
- return -EFAULT;
+ return 0;
}
-EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
+EXPORT_SYMBOL(skb_copy_datagram_iter);
/**
- * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
+ * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
* @skb: buffer to copy
* @offset: offset in the buffer to start copying to
- * @from: io vector to copy to
- * @from_offset: offset in the io vector to start copying from
+ * @from: the copy source
* @len: amount of data to copy to buffer from iovec
*
* Returns 0 or -EFAULT.
- * Note: the iovec is not modified during the copy.
*/
-int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
- const struct iovec *from, int from_offset,
+int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
+ struct iov_iter *from,
int len)
{
int start = skb_headlen(skb);
@@ -504,13 +416,11 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
if (copy > 0) {
if (copy > len)
copy = len;
- if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
- copy))
+ if (copy_from_iter(skb->data + offset, copy, from) != copy)
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
- from_offset += copy;
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -522,24 +432,19 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
- int err;
- u8 *vaddr;
- struct page *page = skb_frag_page(frag);
+ size_t copied;
if (copy > len)
copy = len;
- vaddr = kmap(page);
- err = memcpy_fromiovecend(vaddr + frag->page_offset +
- offset - start,
- from, from_offset, copy);
- kunmap(page);
- if (err)
+ copied = copy_page_from_iter(skb_frag_page(frag),
+ frag->page_offset + offset - start,
+ copy, from);
+ if (copied != copy)
goto fault;
if (!(len -= copy))
return 0;
offset += copy;
- from_offset += copy;
}
start = end;
}
@@ -553,16 +458,13 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_copy_datagram_from_iovec(frag_iter,
- offset - start,
- from,
- from_offset,
- copy))
+ if (skb_copy_datagram_from_iter(frag_iter,
+ offset - start,
+ from, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
- from_offset += copy;
}
start = end;
}
@@ -572,101 +474,82 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
fault:
return -EFAULT;
}
-EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+EXPORT_SYMBOL(skb_copy_datagram_from_iter);
/**
- * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
+ * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
* @skb: buffer to copy
- * @from: io vector to copy from
- * @offset: offset in the io vector to start copying from
- * @count: amount of vectors to copy to buffer from
+ * @from: the source to copy from
*
* The function will first copy up to headlen, and then pin the userspace
* pages and build frags through them.
*
* Returns 0, -EFAULT or -EMSGSIZE.
- * Note: the iovec is not modified during the copy
*/
-int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- int offset, size_t count)
+int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
{
- int len = iov_length(from, count) - offset;
+ int len = iov_iter_count(from);
int copy = min_t(int, skb_headlen(skb), len);
- int size;
- int i = 0;
+ int frag = 0;
/* copy up to skb headlen */
- if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy))
+ if (skb_copy_datagram_from_iter(skb, 0, from, copy))
return -EFAULT;
- if (len == copy)
- return 0;
-
- offset += copy;
- while (count--) {
- struct page *page[MAX_SKB_FRAGS];
- int num_pages;
- unsigned long base;
+ while (iov_iter_count(from)) {
+ struct page *pages[MAX_SKB_FRAGS];
+ size_t start;
+ ssize_t copied;
unsigned long truesize;
+ int n = 0;
- /* Skip over from offset and copied */
- if (offset >= from->iov_len) {
- offset -= from->iov_len;
- ++from;
- continue;
- }
- len = from->iov_len - offset;
- base = (unsigned long)from->iov_base + offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (i + size > MAX_SKB_FRAGS)
+ if (frag == MAX_SKB_FRAGS)
return -EMSGSIZE;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if (num_pages != size) {
- release_pages(&page[i], num_pages, 0);
+
+ copied = iov_iter_get_pages(from, pages, ~0U,
+ MAX_SKB_FRAGS - frag, &start);
+ if (copied < 0)
return -EFAULT;
- }
- truesize = size * PAGE_SIZE;
- skb->data_len += len;
- skb->len += len;
+
+ iov_iter_advance(from, copied);
+
+ truesize = PAGE_ALIGN(copied + start);
+ skb->data_len += copied;
+ skb->len += copied;
skb->truesize += truesize;
atomic_add(truesize, &skb->sk->sk_wmem_alloc);
- while (len) {
- int off = base & ~PAGE_MASK;
- int size = min_t(int, len, PAGE_SIZE - off);
- skb_fill_page_desc(skb, i, page[i], off, size);
- base += size;
- len -= size;
- i++;
+ while (copied) {
+ int size = min_t(int, copied, PAGE_SIZE - start);
+ skb_fill_page_desc(skb, frag++, pages[n], start, size);
+ start = 0;
+ copied -= size;
+ n++;
}
- offset = 0;
- ++from;
}
return 0;
}
-EXPORT_SYMBOL(zerocopy_sg_from_iovec);
+EXPORT_SYMBOL(zerocopy_sg_from_iter);
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
- u8 __user *to, int len,
+ struct iov_iter *to, int len,
__wsum *csump)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int pos = 0;
+ int n;
/* Copy header. */
if (copy > 0) {
- int err = 0;
if (copy > len)
copy = len;
- *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
- *csump, &err);
- if (err)
+ n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
+ if (n != copy)
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
- to += copy;
pos = copy;
}
@@ -678,26 +561,22 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
- __wsum csum2;
- int err = 0;
- u8 *vaddr;
+ __wsum csum2 = 0;
struct page *page = skb_frag_page(frag);
+ u8 *vaddr = kmap(page);
if (copy > len)
copy = len;
- vaddr = kmap(page);
- csum2 = csum_and_copy_to_user(vaddr +
- frag->page_offset +
- offset - start,
- to, copy, 0, &err);
+ n = csum_and_copy_to_iter(vaddr + frag->page_offset +
+ offset - start, copy,
+ &csum2, to);
kunmap(page);
- if (err)
+ if (n != copy)
goto fault;
*csump = csum_block_add(*csump, csum2, pos);
if (!(len -= copy))
return 0;
offset += copy;
- to += copy;
pos += copy;
}
start = end;
@@ -722,7 +601,6 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
if ((len -= copy) == 0)
return 0;
offset += copy;
- to += copy;
pos += copy;
}
start = end;
@@ -775,20 +653,19 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
EXPORT_SYMBOL(__skb_checksum_complete);
/**
- * skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec.
+ * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
* @skb: skbuff
* @hlen: hardware length
- * @iov: io vector
+ * @msg: destination
*
* Caller _must_ check that skb will fit to this iovec.
*
* Returns: 0 - success.
* -EINVAL - checksum failure.
- * -EFAULT - fault during copy. Beware, in this case iovec
- * can be modified!
+ * -EFAULT - fault during copy.
*/
-int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
- int hlen, struct iovec *iov)
+int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
+ int hlen, struct msghdr *msg)
{
__wsum csum;
int chunk = skb->len - hlen;
@@ -796,28 +673,20 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
if (!chunk)
return 0;
- /* Skip filled elements.
- * Pretty silly, look at memcpy_toiovec, though 8)
- */
- while (!iov->iov_len)
- iov++;
-
- if (iov->iov_len < chunk) {
+ if (iov_iter_count(&msg->msg_iter) < chunk) {
if (__skb_checksum_complete(skb))
goto csum_error;
- if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
+ if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
goto fault;
} else {
csum = csum_partial(skb->data, hlen, skb->csum);
- if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
+ if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
chunk, &csum))
goto fault;
if (csum_fold(csum))
goto csum_error;
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
netdev_rx_csum_fault(skb->dev);
- iov->iov_len -= chunk;
- iov->iov_base += chunk;
}
return 0;
csum_error:
@@ -825,7 +694,7 @@ csum_error:
fault:
return -EFAULT;
}
-EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
+EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
/**
* datagram_poll - generic datagram poll
diff --git a/net/core/dev.c b/net/core/dev.c
index 3acff0974560..f411c28d0a66 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -118,6 +118,7 @@
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <net/ip.h>
+#include <net/mpls.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/jhash.h>
@@ -133,6 +134,7 @@
#include <linux/vmalloc.h>
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
+#include <linux/hrtimer.h>
#include "net-sysfs.h"
@@ -1435,22 +1437,17 @@ EXPORT_SYMBOL(dev_close);
*/
void dev_disable_lro(struct net_device *dev)
{
- /*
- * If we're trying to disable lro on a vlan device
- * use the underlying physical device instead
- */
- if (is_vlan_dev(dev))
- dev = vlan_dev_real_dev(dev);
-
- /* the same for macvlan devices */
- if (netif_is_macvlan(dev))
- dev = macvlan_dev_real_dev(dev);
+ struct net_device *lower_dev;
+ struct list_head *iter;
dev->wanted_features &= ~NETIF_F_LRO;
netdev_update_features(dev);
if (unlikely(dev->features & NETIF_F_LRO))
netdev_WARN(dev, "failed to disable LRO!\n");
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter)
+ dev_disable_lro(lower_dev);
}
EXPORT_SYMBOL(dev_disable_lro);
@@ -2530,7 +2527,7 @@ static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
{
- if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
+ if (eth_p_mpls(type))
features &= skb->dev->mpls_features;
return features;
@@ -2647,12 +2644,8 @@ static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
netdev_features_t features)
{
if (vlan_tx_tag_present(skb) &&
- !vlan_hw_offload_capable(features, skb->vlan_proto)) {
- skb = __vlan_put_tag(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
- if (skb)
- skb->vlan_tci = 0;
- }
+ !vlan_hw_offload_capable(features, skb->vlan_proto))
+ skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
@@ -3304,7 +3297,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
rps_lock(sd);
qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
- if (skb_queue_len(&sd->input_pkt_queue)) {
+ if (qlen) {
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
input_queue_tail_incr_save(sd, qtail);
@@ -4179,7 +4172,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
struct sk_buff *skb = napi->skb;
if (!skb) {
- skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
+ skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
napi->skb = skb;
}
return skb;
@@ -4316,20 +4309,28 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
local_irq_enable();
}
+static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ return sd->rps_ipi_list != NULL;
+#else
+ return false;
+#endif
+}
+
static int process_backlog(struct napi_struct *napi, int quota)
{
int work = 0;
struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
-#ifdef CONFIG_RPS
/* Check if we have pending ipi, its better to send them now,
* not waiting net_rx_action() end.
*/
- if (sd->rps_ipi_list) {
+ if (sd_has_rps_ipi_waiting(sd)) {
local_irq_disable();
net_rps_action_and_irq_enable(sd);
}
-#endif
+
napi->weight = weight_p;
local_irq_disable();
while (1) {
@@ -4356,7 +4357,6 @@ static int process_backlog(struct napi_struct *napi, int quota)
* We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
- list_del(&napi->poll_list);
napi->state = 0;
rps_unlock(sd);
@@ -4376,7 +4376,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
* __napi_schedule - schedule for receive
* @n: entry to schedule
*
- * The entry's receive function will be scheduled to run
+ * The entry's receive function will be scheduled to run.
+ * Consider using __napi_schedule_irqoff() if hard irqs are masked.
*/
void __napi_schedule(struct napi_struct *n)
{
@@ -4388,18 +4389,29 @@ void __napi_schedule(struct napi_struct *n)
}
EXPORT_SYMBOL(__napi_schedule);
+/**
+ * __napi_schedule_irqoff - schedule for receive
+ * @n: entry to schedule
+ *
+ * Variant of __napi_schedule() assuming hard irqs are masked
+ */
+void __napi_schedule_irqoff(struct napi_struct *n)
+{
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+}
+EXPORT_SYMBOL(__napi_schedule_irqoff);
+
void __napi_complete(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- BUG_ON(n->gro_list);
- list_del(&n->poll_list);
+ list_del_init(&n->poll_list);
smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);
-void napi_complete(struct napi_struct *n)
+void napi_complete_done(struct napi_struct *n, int work_done)
{
unsigned long flags;
@@ -4410,12 +4422,28 @@ void napi_complete(struct napi_struct *n)
if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
return;
- napi_gro_flush(n, false);
- local_irq_save(flags);
- __napi_complete(n);
- local_irq_restore(flags);
+ if (n->gro_list) {
+ unsigned long timeout = 0;
+
+ if (work_done)
+ timeout = n->dev->gro_flush_timeout;
+
+ if (timeout)
+ hrtimer_start(&n->timer, ns_to_ktime(timeout),
+ HRTIMER_MODE_REL_PINNED);
+ else
+ napi_gro_flush(n, false);
+ }
+ if (likely(list_empty(&n->poll_list))) {
+ WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
+ } else {
+ /* If n->poll_list is not empty, we need to mask irqs */
+ local_irq_save(flags);
+ __napi_complete(n);
+ local_irq_restore(flags);
+ }
}
-EXPORT_SYMBOL(napi_complete);
+EXPORT_SYMBOL(napi_complete_done);
/* must be called under rcu_read_lock(), as we dont take a reference */
struct napi_struct *napi_by_id(unsigned int napi_id)
@@ -4469,10 +4497,23 @@ void napi_hash_del(struct napi_struct *napi)
}
EXPORT_SYMBOL_GPL(napi_hash_del);
+static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
+{
+ struct napi_struct *napi;
+
+ napi = container_of(timer, struct napi_struct, timer);
+ if (napi->gro_list)
+ napi_schedule(napi);
+
+ return HRTIMER_NORESTART;
+}
+
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
INIT_LIST_HEAD(&napi->poll_list);
+ hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ napi->timer.function = napi_watchdog;
napi->gro_count = 0;
napi->gro_list = NULL;
napi->skb = NULL;
@@ -4491,6 +4532,20 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
}
EXPORT_SYMBOL(netif_napi_add);
+void napi_disable(struct napi_struct *n)
+{
+ might_sleep();
+ set_bit(NAPI_STATE_DISABLE, &n->state);
+
+ while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
+ msleep(1);
+
+ hrtimer_cancel(&n->timer);
+
+ clear_bit(NAPI_STATE_DISABLE, &n->state);
+}
+EXPORT_SYMBOL(napi_disable);
+
void netif_napi_del(struct napi_struct *napi)
{
list_del_init(&napi->dev_list);
@@ -4507,29 +4562,28 @@ static void net_rx_action(struct softirq_action *h)
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
+ LIST_HEAD(list);
+ LIST_HEAD(repoll);
void *have;
local_irq_disable();
+ list_splice_init(&sd->poll_list, &list);
+ local_irq_enable();
- while (!list_empty(&sd->poll_list)) {
+ while (!list_empty(&list)) {
struct napi_struct *n;
int work, weight;
- /* If softirq window is exhuasted then punt.
+ /* If softirq window is exhausted then punt.
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
goto softnet_break;
- local_irq_enable();
- /* Even though interrupts have been re-enabled, this
- * access is safe because interrupts can only add new
- * entries to the tail of this list, and only ->poll()
- * calls can remove this head entry from the list.
- */
- n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
+ n = list_first_entry(&list, struct napi_struct, poll_list);
+ list_del_init(&n->poll_list);
have = netpoll_poll_lock(n);
@@ -4551,8 +4605,6 @@ static void net_rx_action(struct softirq_action *h)
budget -= work;
- local_irq_disable();
-
/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
@@ -4560,32 +4612,40 @@ static void net_rx_action(struct softirq_action *h)
*/
if (unlikely(work == weight)) {
if (unlikely(napi_disable_pending(n))) {
- local_irq_enable();
napi_complete(n);
- local_irq_disable();
} else {
if (n->gro_list) {
/* flush too old packets
* If HZ < 1000, flush all packets.
*/
- local_irq_enable();
napi_gro_flush(n, HZ >= 1000);
- local_irq_disable();
}
- list_move_tail(&n->poll_list, &sd->poll_list);
+ list_add_tail(&n->poll_list, &repoll);
}
}
netpoll_poll_unlock(have);
}
+
+ if (!sd_has_rps_ipi_waiting(sd) &&
+ list_empty(&list) &&
+ list_empty(&repoll))
+ return;
out:
+ local_irq_disable();
+
+ list_splice_tail_init(&sd->poll_list, &list);
+ list_splice_tail(&repoll, &list);
+ list_splice(&list, &sd->poll_list);
+ if (!list_empty(&sd->poll_list))
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+
net_rps_action_and_irq_enable(sd);
return;
softnet_break:
sd->time_squeeze++;
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
@@ -5786,7 +5846,7 @@ EXPORT_SYMBOL(dev_change_carrier);
* Get device physical port ID
*/
int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
const struct net_device_ops *ops = dev->netdev_ops;
@@ -5865,6 +5925,8 @@ static void rollback_registered_many(struct list_head *head)
synchronize_net();
list_for_each_entry(dev, head, unreg_list) {
+ struct sk_buff *skb = NULL;
+
/* Shutdown queueing discipline. */
dev_shutdown(dev);
@@ -5874,6 +5936,11 @@ static void rollback_registered_many(struct list_head *head)
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+ if (!dev->rtnl_link_ops ||
+ dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+ skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
+ GFP_KERNEL);
+
/*
* Flush the unicast and multicast chains
*/
@@ -5883,9 +5950,8 @@ static void rollback_registered_many(struct list_head *head)
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
- if (!dev->rtnl_link_ops ||
- dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
- rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
+ if (skb)
+ rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
/* Notifier chain MUST detach us all upper devices. */
WARN_ON(netdev_has_any_upper_dev(dev));
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index b6b230600b97..c0548d268e1a 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -278,8 +278,8 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
EXPORT_SYMBOL(__hw_addr_sync_dev);
/**
- * __hw_addr_unsync_dev - Remove synchonized addresses from device
- * @list: address list to remove syncronized addresses from
+ * __hw_addr_unsync_dev - Remove synchronized addresses from device
+ * @list: address list to remove synchronized addresses from
* @dev: device to sync
* @unsync: function to call if address should be removed
*
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 72e899a3efda..b94b1d293506 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -142,10 +142,12 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
case SIOCGIFHWADDR:
if (!dev->addr_len)
- memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
+ memset(ifr->ifr_hwaddr.sa_data, 0,
+ sizeof(ifr->ifr_hwaddr.sa_data));
else
memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
- min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+ min(sizeof(ifr->ifr_hwaddr.sa_data),
+ (size_t)dev->addr_len));
ifr->ifr_hwaddr.sa_family = dev->type;
return 0;
@@ -265,7 +267,8 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
if (ifr->ifr_hwaddr.sa_family != dev->type)
return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
- min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+ min(sizeof(ifr->ifr_hwaddr.sa_data),
+ (size_t)dev->addr_len));
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return 0;
diff --git a/net/core/dst.c b/net/core/dst.c
index a028409ee438..e956ce6d1378 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -327,30 +327,6 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
}
EXPORT_SYMBOL(__dst_destroy_metrics_generic);
-/**
- * __skb_dst_set_noref - sets skb dst, without a reference
- * @skb: buffer
- * @dst: dst entry
- * @force: if force is set, use noref version even for DST_NOCACHE entries
- *
- * Sets skb dst, assuming a reference was not taken on dst
- * skb_dst_drop() should not dst_release() this dst
- */
-void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, bool force)
-{
- WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
- /* If dst not in cache, we must take a reference, because
- * dst_release() will destroy dst as soon as its refcount becomes zero
- */
- if (unlikely((dst->flags & DST_NOCACHE) && !force)) {
- dst_hold(dst);
- skb_dst_set(skb, dst);
- } else {
- skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
- }
-}
-EXPORT_SYMBOL(__skb_dst_set_noref);
-
/* Dirty hack. We did it in 2.2 (in __dst_free),
* we have _very_ good reasons not to repeat
* this mistake in 2.3, but we have no choice
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 06dfb293e5aa..550892cd6b3f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
+#include <linux/net.h>
/*
* Some useful ethtool_ops methods that're device independent.
@@ -84,7 +85,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation",
[NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
- [NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation",
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",
@@ -100,6 +100,12 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_BUSY_POLL_BIT] = "busy-poll",
};
+static const char
+rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = {
+ [ETH_RSS_HASH_TOP_BIT] = "toeplitz",
+ [ETH_RSS_HASH_XOR_BIT] = "xor",
+};
+
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
{
struct ethtool_gfeatures cmd = {
@@ -185,6 +191,9 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset)
if (sset == ETH_SS_FEATURES)
return ARRAY_SIZE(netdev_features_strings);
+ if (sset == ETH_SS_RSS_HASH_FUNCS)
+ return ARRAY_SIZE(rss_hash_func_strings);
+
if (ops->get_sset_count && ops->get_strings)
return ops->get_sset_count(dev, sset);
else
@@ -199,6 +208,9 @@ static void __ethtool_get_strings(struct net_device *dev,
if (stringset == ETH_SS_FEATURES)
memcpy(data, netdev_features_strings,
sizeof(netdev_features_strings));
+ else if (stringset == ETH_SS_RSS_HASH_FUNCS)
+ memcpy(data, rss_hash_func_strings,
+ sizeof(rss_hash_func_strings));
else
/* ops->get_strings is valid because checked earlier */
ops->get_strings(dev, stringset, data);
@@ -574,6 +586,16 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
return 0;
}
+u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+
+void netdev_rss_key_fill(void *buffer, size_t len)
+{
+ BUG_ON(len > sizeof(netdev_rss_key));
+ net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key));
+ memcpy(buffer, netdev_rss_key, len);
+}
+EXPORT_SYMBOL(netdev_rss_key_fill);
+
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
@@ -608,7 +630,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
if (!indir)
return -ENOMEM;
- ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL);
+ ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
if (ret)
goto out;
@@ -669,7 +691,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
goto out;
}
- ret = ops->set_rxfh(dev, indir, NULL);
+ ret = ops->set_rxfh(dev, indir, NULL, ETH_RSS_HASH_NO_CHANGE);
out:
kfree(indir);
@@ -687,12 +709,11 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
u32 total_size;
u32 indir_bytes;
u32 *indir = NULL;
+ u8 dev_hfunc = 0;
u8 *hkey = NULL;
u8 *rss_config;
- if (!(dev->ethtool_ops->get_rxfh_indir_size ||
- dev->ethtool_ops->get_rxfh_key_size) ||
- !dev->ethtool_ops->get_rxfh)
+ if (!ops->get_rxfh)
return -EOPNOTSUPP;
if (ops->get_rxfh_indir_size)
@@ -700,16 +721,14 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (ops->get_rxfh_key_size)
dev_key_size = ops->get_rxfh_key_size(dev);
- if ((dev_key_size + dev_indir_size) == 0)
- return -EOPNOTSUPP;
-
if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
return -EFAULT;
user_indir_size = rxfh.indir_size;
user_key_size = rxfh.key_size;
/* Check that reserved fields are 0 for now */
- if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1])
+ if (rxfh.rss_context || rxfh.rsvd8[0] || rxfh.rsvd8[1] ||
+ rxfh.rsvd8[2] || rxfh.rsvd32)
return -EINVAL;
rxfh.indir_size = dev_indir_size;
@@ -717,13 +736,6 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (copy_to_user(useraddr, &rxfh, sizeof(rxfh)))
return -EFAULT;
- /* If the user buffer size is 0, this is just a query for the
- * device table size and key size. Otherwise, if the User size is
- * not equal to device table size or key size it's an error.
- */
- if (!user_indir_size && !user_key_size)
- return 0;
-
if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
(user_key_size && (user_key_size != dev_key_size)))
return -EINVAL;
@@ -740,14 +752,19 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (user_key_size)
hkey = rss_config + indir_bytes;
- ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey);
- if (!ret) {
- if (copy_to_user(useraddr +
- offsetof(struct ethtool_rxfh, rss_config[0]),
- rss_config, total_size))
- ret = -EFAULT;
- }
+ ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey, &dev_hfunc);
+ if (ret)
+ goto out;
+ if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc),
+ &dev_hfunc, sizeof(rxfh.hfunc))) {
+ ret = -EFAULT;
+ } else if (copy_to_user(useraddr +
+ offsetof(struct ethtool_rxfh, rss_config[0]),
+ rss_config, total_size)) {
+ ret = -EFAULT;
+ }
+out:
kfree(rss_config);
return ret;
@@ -766,33 +783,31 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
u8 *rss_config;
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
- if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) ||
- !ops->get_rxnfc || !ops->set_rxfh)
+ if (!ops->get_rxnfc || !ops->set_rxfh)
return -EOPNOTSUPP;
if (ops->get_rxfh_indir_size)
dev_indir_size = ops->get_rxfh_indir_size(dev);
if (ops->get_rxfh_key_size)
dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev);
- if ((dev_key_size + dev_indir_size) == 0)
- return -EOPNOTSUPP;
if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
return -EFAULT;
/* Check that reserved fields are 0 for now */
- if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1])
+ if (rxfh.rss_context || rxfh.rsvd8[0] || rxfh.rsvd8[1] ||
+ rxfh.rsvd8[2] || rxfh.rsvd32)
return -EINVAL;
- /* If either indir or hash key is valid, proceed further.
- * It is not valid to request that both be unchanged.
+ /* If either indir, hash key or function is valid, proceed further.
+ * Must request at least one change: indir size, hash key or function.
*/
if ((rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.indir_size != dev_indir_size) ||
(rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
(rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
- rxfh.key_size == 0))
+ rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE))
return -EINVAL;
if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
@@ -835,7 +850,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
}
}
- ret = ops->set_rxfh(dev, indir, hkey);
+ ret = ops->set_rxfh(dev, indir, hkey, rxfh.hfunc);
out:
kfree(rss_config);
diff --git a/net/core/filter.c b/net/core/filter.c
index 647b12265e18..ec9baea10c16 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -44,6 +44,7 @@
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
+#include <linux/bpf.h>
/**
* sk_filter - run a packet through a socket filter
@@ -813,8 +814,12 @@ static void bpf_release_orig_filter(struct bpf_prog *fp)
static void __bpf_prog_release(struct bpf_prog *prog)
{
- bpf_release_orig_filter(prog);
- bpf_prog_free(prog);
+ if (prog->aux->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
+ bpf_prog_put(prog);
+ } else {
+ bpf_release_orig_filter(prog);
+ bpf_prog_free(prog);
+ }
}
static void __sk_filter_release(struct sk_filter *fp)
@@ -1088,6 +1093,94 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
}
EXPORT_SYMBOL_GPL(sk_attach_filter);
+#ifdef CONFIG_BPF_SYSCALL
+int sk_attach_bpf(u32 ufd, struct sock *sk)
+{
+ struct sk_filter *fp, *old_fp;
+ struct bpf_prog *prog;
+
+ if (sock_flag(sk, SOCK_FILTER_LOCKED))
+ return -EPERM;
+
+ prog = bpf_prog_get(ufd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ if (prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) {
+ /* valid fd, but invalid program type */
+ bpf_prog_put(prog);
+ return -EINVAL;
+ }
+
+ fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+ if (!fp) {
+ bpf_prog_put(prog);
+ return -ENOMEM;
+ }
+ fp->prog = prog;
+
+ atomic_set(&fp->refcnt, 0);
+
+ if (!sk_filter_charge(sk, fp)) {
+ __sk_filter_release(fp);
+ return -ENOMEM;
+ }
+
+ old_fp = rcu_dereference_protected(sk->sk_filter,
+ sock_owned_by_user(sk));
+ rcu_assign_pointer(sk->sk_filter, fp);
+
+ if (old_fp)
+ sk_filter_uncharge(sk, old_fp);
+
+ return 0;
+}
+
+/* allow socket filters to call
+ * bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem()
+ */
+static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func_id)
+{
+ switch (func_id) {
+ case BPF_FUNC_map_lookup_elem:
+ return &bpf_map_lookup_elem_proto;
+ case BPF_FUNC_map_update_elem:
+ return &bpf_map_update_elem_proto;
+ case BPF_FUNC_map_delete_elem:
+ return &bpf_map_delete_elem_proto;
+ default:
+ return NULL;
+ }
+}
+
+static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type)
+{
+ /* skb fields cannot be accessed yet */
+ return false;
+}
+
+static struct bpf_verifier_ops sock_filter_ops = {
+ .get_func_proto = sock_filter_func_proto,
+ .is_valid_access = sock_filter_is_valid_access,
+};
+
+static struct bpf_prog_type_list tl = {
+ .ops = &sock_filter_ops,
+ .type = BPF_PROG_TYPE_SOCKET_FILTER,
+};
+
+static int __init register_sock_filter_ops(void)
+{
+ bpf_register_prog_type(&tl);
+ return 0;
+}
+late_initcall(register_sock_filter_ops);
+#else
+int sk_attach_bpf(u32 ufd, struct sock *sk)
+{
+ return -EOPNOTSUPP;
+}
+#endif
int sk_detach_filter(struct sock *sk)
{
int ret = -ENOENT;
diff --git a/net/core/iovec.c b/net/core/iovec.c
index e1ec45ab1e63..dcbe98b3726a 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -28,53 +28,6 @@
#include <net/sock.h>
/*
- * Verify iovec. The caller must ensure that the iovec is big enough
- * to hold the message iovec.
- *
- * Save time not doing access_ok. copy_*_user will make this work
- * in any case.
- */
-
-int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
-{
- int size, ct, err;
-
- if (m->msg_name && m->msg_namelen) {
- if (mode == VERIFY_READ) {
- void __user *namep;
- namep = (void __user __force *) m->msg_name;
- err = move_addr_to_kernel(namep, m->msg_namelen,
- address);
- if (err < 0)
- return err;
- }
- m->msg_name = address;
- } else {
- m->msg_name = NULL;
- m->msg_namelen = 0;
- }
-
- size = m->msg_iovlen * sizeof(struct iovec);
- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
- return -EFAULT;
-
- m->msg_iov = iov;
- err = 0;
-
- for (ct = 0; ct < m->msg_iovlen; ct++) {
- size_t len = iov[ct].iov_len;
-
- if (len > INT_MAX - err) {
- len = INT_MAX - err;
- iov[ct].iov_len = len;
- }
- err += len;
- }
-
- return err;
-}
-
-/*
* And now for the all-in-one: copy and checksum from a user iovec
* directly to a datagram
* Calls to csum_partial but the last must be in 32 bit chunks
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index bd0767e6b2b3..49a9e3e06c08 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -21,7 +21,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
-#include <asm/types.h>
+#include <linux/types.h>
enum lw_bits {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ef31fef25e5a..8e38f17288d3 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -56,7 +56,6 @@ static void __neigh_notify(struct neighbour *n, int type, int flags);
static void neigh_update_notify(struct neighbour *neigh);
static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
-static struct neigh_table *neigh_tables;
#ifdef CONFIG_PROC_FS
static const struct file_operations neigh_stat_seq_fops;
#endif
@@ -87,13 +86,8 @@ static const struct file_operations neigh_stat_seq_fops;
the most complicated procedure, which we allow is dev->hard_header.
It is supposed, that dev->hard_header is simplistic and does
not make callbacks to neighbour tables.
-
- The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
- list of neighbour tables. This list is used only in process context,
*/
-static DEFINE_RWLOCK(neigh_tbl_lock);
-
static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
{
kfree_skb(skb);
@@ -773,7 +767,7 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
tbl->last_rand = jiffies;
- for (p = &tbl->parms; p; p = p->next)
+ list_for_each_entry(p, &tbl->parms_list, list)
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
@@ -1446,7 +1440,7 @@ static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
{
struct neigh_parms *p;
- for (p = &tbl->parms; p; p = p->next) {
+ list_for_each_entry(p, &tbl->parms_list, list) {
if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
(!p->dev && !ifindex && net_eq(net, &init_net)))
return p;
@@ -1481,8 +1475,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
}
write_lock_bh(&tbl->lock);
- p->next = tbl->parms.next;
- tbl->parms.next = p;
+ list_add(&p->list, &tbl->parms.list);
write_unlock_bh(&tbl->lock);
neigh_parms_data_state_cleanall(p);
@@ -1501,24 +1494,15 @@ static void neigh_rcu_free_parms(struct rcu_head *head)
void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
{
- struct neigh_parms **p;
-
if (!parms || parms == &tbl->parms)
return;
write_lock_bh(&tbl->lock);
- for (p = &tbl->parms.next; *p; p = &(*p)->next) {
- if (*p == parms) {
- *p = parms->next;
- parms->dead = 1;
- write_unlock_bh(&tbl->lock);
- if (parms->dev)
- dev_put(parms->dev);
- call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
- return;
- }
- }
+ list_del(&parms->list);
+ parms->dead = 1;
write_unlock_bh(&tbl->lock);
- neigh_dbg(1, "%s: not found\n", __func__);
+ if (parms->dev)
+ dev_put(parms->dev);
+ call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
}
EXPORT_SYMBOL(neigh_parms_release);
@@ -1530,11 +1514,15 @@ static void neigh_parms_destroy(struct neigh_parms *parms)
static struct lock_class_key neigh_table_proxy_queue_class;
-static void neigh_table_init_no_netlink(struct neigh_table *tbl)
+static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
+
+void neigh_table_init(int index, struct neigh_table *tbl)
{
unsigned long now = jiffies;
unsigned long phsize;
+ INIT_LIST_HEAD(&tbl->parms_list);
+ list_add(&tbl->parms.list, &tbl->parms_list);
write_pnet(&tbl->parms.net, &init_net);
atomic_set(&tbl->parms.refcnt, 1);
tbl->parms.reachable_time =
@@ -1574,34 +1562,14 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl)
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time * 20;
-}
-
-void neigh_table_init(struct neigh_table *tbl)
-{
- struct neigh_table *tmp;
-
- neigh_table_init_no_netlink(tbl);
- write_lock(&neigh_tbl_lock);
- for (tmp = neigh_tables; tmp; tmp = tmp->next) {
- if (tmp->family == tbl->family)
- break;
- }
- tbl->next = neigh_tables;
- neigh_tables = tbl;
- write_unlock(&neigh_tbl_lock);
- if (unlikely(tmp)) {
- pr_err("Registering multiple tables for family %d\n",
- tbl->family);
- dump_stack();
- }
+ neigh_tables[index] = tbl;
}
EXPORT_SYMBOL(neigh_table_init);
-int neigh_table_clear(struct neigh_table *tbl)
+int neigh_table_clear(int index, struct neigh_table *tbl)
{
- struct neigh_table **tp;
-
+ neigh_tables[index] = NULL;
/* It is not clean... Fix it to unload IPv6 module safely */
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
@@ -1609,14 +1577,6 @@ int neigh_table_clear(struct neigh_table *tbl)
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
pr_crit("neighbour leakage\n");
- write_lock(&neigh_tbl_lock);
- for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
- if (*tp == tbl) {
- *tp = tbl->next;
- break;
- }
- }
- write_unlock(&neigh_tbl_lock);
call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
neigh_hash_free_rcu);
@@ -1634,12 +1594,32 @@ int neigh_table_clear(struct neigh_table *tbl)
}
EXPORT_SYMBOL(neigh_table_clear);
+static struct neigh_table *neigh_find_table(int family)
+{
+ struct neigh_table *tbl = NULL;
+
+ switch (family) {
+ case AF_INET:
+ tbl = neigh_tables[NEIGH_ARP_TABLE];
+ break;
+ case AF_INET6:
+ tbl = neigh_tables[NEIGH_ND_TABLE];
+ break;
+ case AF_DECnet:
+ tbl = neigh_tables[NEIGH_DN_TABLE];
+ break;
+ }
+
+ return tbl;
+}
+
static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *dst_attr;
struct neigh_table *tbl;
+ struct neighbour *neigh;
struct net_device *dev = NULL;
int err = -EINVAL;
@@ -1660,39 +1640,31 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
}
}
- read_lock(&neigh_tbl_lock);
- for (tbl = neigh_tables; tbl; tbl = tbl->next) {
- struct neighbour *neigh;
+ tbl = neigh_find_table(ndm->ndm_family);
+ if (tbl == NULL)
+ return -EAFNOSUPPORT;
- if (tbl->family != ndm->ndm_family)
- continue;
- read_unlock(&neigh_tbl_lock);
-
- if (nla_len(dst_attr) < tbl->key_len)
- goto out;
-
- if (ndm->ndm_flags & NTF_PROXY) {
- err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
- goto out;
- }
+ if (nla_len(dst_attr) < tbl->key_len)
+ goto out;
- if (dev == NULL)
- goto out;
+ if (ndm->ndm_flags & NTF_PROXY) {
+ err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
+ goto out;
+ }
- neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
- if (neigh == NULL) {
- err = -ENOENT;
- goto out;
- }
+ if (dev == NULL)
+ goto out;
- err = neigh_update(neigh, NULL, NUD_FAILED,
- NEIGH_UPDATE_F_OVERRIDE |
- NEIGH_UPDATE_F_ADMIN);
- neigh_release(neigh);
+ neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
+ if (neigh == NULL) {
+ err = -ENOENT;
goto out;
}
- read_unlock(&neigh_tbl_lock);
- err = -EAFNOSUPPORT;
+
+ err = neigh_update(neigh, NULL, NUD_FAILED,
+ NEIGH_UPDATE_F_OVERRIDE |
+ NEIGH_UPDATE_F_ADMIN);
+ neigh_release(neigh);
out:
return err;
@@ -1700,11 +1672,14 @@ out:
static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
{
+ int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct neigh_table *tbl;
struct net_device *dev = NULL;
+ struct neighbour *neigh;
+ void *dst, *lladdr;
int err;
ASSERT_RTNL();
@@ -1728,70 +1703,60 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
goto out;
}
- read_lock(&neigh_tbl_lock);
- for (tbl = neigh_tables; tbl; tbl = tbl->next) {
- int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
- struct neighbour *neigh;
- void *dst, *lladdr;
+ tbl = neigh_find_table(ndm->ndm_family);
+ if (tbl == NULL)
+ return -EAFNOSUPPORT;
- if (tbl->family != ndm->ndm_family)
- continue;
- read_unlock(&neigh_tbl_lock);
+ if (nla_len(tb[NDA_DST]) < tbl->key_len)
+ goto out;
+ dst = nla_data(tb[NDA_DST]);
+ lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
- if (nla_len(tb[NDA_DST]) < tbl->key_len)
- goto out;
- dst = nla_data(tb[NDA_DST]);
- lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
+ if (ndm->ndm_flags & NTF_PROXY) {
+ struct pneigh_entry *pn;
- if (ndm->ndm_flags & NTF_PROXY) {
- struct pneigh_entry *pn;
+ err = -ENOBUFS;
+ pn = pneigh_lookup(tbl, net, dst, dev, 1);
+ if (pn) {
+ pn->flags = ndm->ndm_flags;
+ err = 0;
+ }
+ goto out;
+ }
- err = -ENOBUFS;
- pn = pneigh_lookup(tbl, net, dst, dev, 1);
- if (pn) {
- pn->flags = ndm->ndm_flags;
- err = 0;
- }
+ if (dev == NULL)
+ goto out;
+
+ neigh = neigh_lookup(tbl, dst, dev);
+ if (neigh == NULL) {
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ err = -ENOENT;
goto out;
}
- if (dev == NULL)
+ neigh = __neigh_lookup_errno(tbl, dst, dev);
+ if (IS_ERR(neigh)) {
+ err = PTR_ERR(neigh);
+ goto out;
+ }
+ } else {
+ if (nlh->nlmsg_flags & NLM_F_EXCL) {
+ err = -EEXIST;
+ neigh_release(neigh);
goto out;
-
- neigh = neigh_lookup(tbl, dst, dev);
- if (neigh == NULL) {
- if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
- err = -ENOENT;
- goto out;
- }
-
- neigh = __neigh_lookup_errno(tbl, dst, dev);
- if (IS_ERR(neigh)) {
- err = PTR_ERR(neigh);
- goto out;
- }
- } else {
- if (nlh->nlmsg_flags & NLM_F_EXCL) {
- err = -EEXIST;
- neigh_release(neigh);
- goto out;
- }
-
- if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
- flags &= ~NEIGH_UPDATE_F_OVERRIDE;
}
- if (ndm->ndm_flags & NTF_USE) {
- neigh_event_send(neigh, NULL);
- err = 0;
- } else
- err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
- neigh_release(neigh);
- goto out;
+ if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
+ flags &= ~NEIGH_UPDATE_F_OVERRIDE;
}
- read_unlock(&neigh_tbl_lock);
- err = -EAFNOSUPPORT;
+ if (ndm->ndm_flags & NTF_USE) {
+ neigh_event_send(neigh, NULL);
+ err = 0;
+ } else
+ err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
+ neigh_release(neigh);
+
out:
return err;
}
@@ -1990,7 +1955,8 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
struct neigh_table *tbl;
struct ndtmsg *ndtmsg;
struct nlattr *tb[NDTA_MAX+1];
- int err;
+ bool found = false;
+ int err, tidx;
err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
nl_neightbl_policy);
@@ -2003,19 +1969,21 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
}
ndtmsg = nlmsg_data(nlh);
- read_lock(&neigh_tbl_lock);
- for (tbl = neigh_tables; tbl; tbl = tbl->next) {
+
+ for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
+ tbl = neigh_tables[tidx];
+ if (!tbl)
+ continue;
if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
continue;
-
- if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
+ if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
+ found = true;
break;
+ }
}
- if (tbl == NULL) {
- err = -ENOENT;
- goto errout_locked;
- }
+ if (!found)
+ return -ENOENT;
/*
* We acquire tbl->lock to be nice to the periodic timers and
@@ -2126,8 +2094,6 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
errout_tbl_lock:
write_unlock_bh(&tbl->lock);
-errout_locked:
- read_unlock(&neigh_tbl_lock);
errout:
return err;
}
@@ -2142,10 +2108,13 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
- read_lock(&neigh_tbl_lock);
- for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
+ for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
struct neigh_parms *p;
+ tbl = neigh_tables[tidx];
+ if (!tbl)
+ continue;
+
if (tidx < tbl_skip || (family && tbl->family != family))
continue;
@@ -2154,7 +2123,9 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
NLM_F_MULTI) <= 0)
break;
- for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
+ nidx = 0;
+ p = list_next_entry(&tbl->parms, list);
+ list_for_each_entry_from(p, &tbl->parms_list, list) {
if (!net_eq(neigh_parms_net(p), net))
continue;
@@ -2174,7 +2145,6 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
neigh_skip = 0;
}
out:
- read_unlock(&neigh_tbl_lock);
cb->args[0] = tidx;
cb->args[1] = nidx;
@@ -2357,7 +2327,6 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
int proxy = 0;
int err;
- read_lock(&neigh_tbl_lock);
family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
/* check for full ndmsg structure presence, family member is
@@ -2369,8 +2338,11 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
- for (tbl = neigh_tables, t = 0; tbl;
- tbl = tbl->next, t++) {
+ for (t = 0; t < NEIGH_NR_TABLES; t++) {
+ tbl = neigh_tables[t];
+
+ if (!tbl)
+ continue;
if (t < s_t || (family && tbl->family != family))
continue;
if (t > s_t)
@@ -2383,7 +2355,6 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
if (err < 0)
break;
}
- read_unlock(&neigh_tbl_lock);
cb->args[0] = t;
return skb->len;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9dd06699b09c..999341244434 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -12,6 +12,7 @@
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <net/switchdev.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/nsproxy.h>
@@ -325,6 +326,23 @@ static ssize_t tx_queue_len_store(struct device *dev,
}
NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
+static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
+{
+ dev->gro_flush_timeout = val;
+ return 0;
+}
+
+static ssize_t gro_flush_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
+}
+NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
+
static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
@@ -387,7 +405,7 @@ static ssize_t phys_port_id_show(struct device *dev,
return restart_syscall();
if (dev_isalive(netdev)) {
- struct netdev_phys_port_id ppid;
+ struct netdev_phys_item_id ppid;
ret = dev_get_phys_port_id(netdev, &ppid);
if (!ret)
@@ -399,6 +417,28 @@ static ssize_t phys_port_id_show(struct device *dev,
}
static DEVICE_ATTR_RO(phys_port_id);
+static ssize_t phys_switch_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (dev_isalive(netdev)) {
+ struct netdev_phys_item_id ppid;
+
+ ret = netdev_switch_parent_id_get(netdev, &ppid);
+ if (!ret)
+ ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+ }
+ rtnl_unlock();
+
+ return ret;
+}
+static DEVICE_ATTR_RO(phys_switch_id);
+
static struct attribute *net_class_attrs[] = {
&dev_attr_netdev_group.attr,
&dev_attr_type.attr,
@@ -422,7 +462,9 @@ static struct attribute *net_class_attrs[] = {
&dev_attr_mtu.attr,
&dev_attr_flags.attr,
&dev_attr_tx_queue_len.attr,
+ &dev_attr_gro_flush_timeout.attr,
&dev_attr_phys_port_id.attr,
+ &dev_attr_phys_switch_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(net_class);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7f155175bba8..ce780c722e48 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -337,17 +337,17 @@ EXPORT_SYMBOL_GPL(__put_net);
struct net *get_net_ns_by_fd(int fd)
{
- struct proc_ns *ei;
struct file *file;
+ struct ns_common *ns;
struct net *net;
file = proc_ns_fget(fd);
if (IS_ERR(file))
return ERR_CAST(file);
- ei = get_proc_ns(file_inode(file));
- if (ei->ns_ops == &netns_operations)
- net = get_net(ei->ns);
+ ns = get_proc_ns(file_inode(file));
+ if (ns->ops == &netns_operations)
+ net = get_net(container_of(ns, struct net, ns));
else
net = ERR_PTR(-EINVAL);
@@ -386,12 +386,15 @@ EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
static __net_init int net_ns_net_init(struct net *net)
{
- return proc_alloc_inum(&net->proc_inum);
+#ifdef CONFIG_NET_NS
+ net->ns.ops = &netns_operations;
+#endif
+ return ns_alloc_inum(&net->ns);
}
static __net_exit void net_ns_net_exit(struct net *net)
{
- proc_free_inum(net->proc_inum);
+ ns_free_inum(&net->ns);
}
static struct pernet_operations __net_initdata net_ns_ops = {
@@ -629,7 +632,7 @@ void unregister_pernet_device(struct pernet_operations *ops)
EXPORT_SYMBOL_GPL(unregister_pernet_device);
#ifdef CONFIG_NET_NS
-static void *netns_get(struct task_struct *task)
+static struct ns_common *netns_get(struct task_struct *task)
{
struct net *net = NULL;
struct nsproxy *nsproxy;
@@ -640,17 +643,22 @@ static void *netns_get(struct task_struct *task)
net = get_net(nsproxy->net_ns);
task_unlock(task);
- return net;
+ return net ? &net->ns : NULL;
}
-static void netns_put(void *ns)
+static inline struct net *to_net_ns(struct ns_common *ns)
{
- put_net(ns);
+ return container_of(ns, struct net, ns);
}
-static int netns_install(struct nsproxy *nsproxy, void *ns)
+static void netns_put(struct ns_common *ns)
{
- struct net *net = ns;
+ put_net(to_net_ns(ns));
+}
+
+static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+{
+ struct net *net = to_net_ns(ns);
if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
@@ -661,18 +669,11 @@ static int netns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
-static unsigned int netns_inum(void *ns)
-{
- struct net *net = ns;
- return net->proc_inum;
-}
-
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
- .inum = netns_inum,
};
#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e6645b4f330a..e0ad5d16c9c5 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -79,8 +79,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
- skb = __vlan_put_tag(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
+ skb = __vlan_hwaccel_push_inside(skb);
if (unlikely(!skb)) {
/* This is actually a packet drop, but we
* don't want the code that calls this
@@ -88,7 +87,6 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
*/
goto out;
}
- skb->vlan_tci = 0;
}
status = netdev_start_xmit(skb, dev, txq, false);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 443256bdcddc..da934fc3faa8 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3728,8 +3728,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
/* Remove proc before if_list entry, because add_device uses
* list to determine if interface already exist, avoid race
* with proc_create_data() */
- if (pkt_dev->entry)
- proc_remove(pkt_dev->entry);
+ proc_remove(pkt_dev->entry);
/* And update the thread if_list */
_rem_dev_from_if_list(t, pkt_dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 88e8de3b59b0..9cf6fe9ddc0c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -36,6 +36,7 @@
#include <linux/mutex.h>
#include <linux/if_addr.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
@@ -43,6 +44,7 @@
#include <linux/inet.h>
#include <linux/netdevice.h>
+#include <net/switchdev.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
@@ -868,7 +870,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
- + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
+ + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+ + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -952,7 +955,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
{
int err;
- struct netdev_phys_port_id ppid;
+ struct netdev_phys_item_id ppid;
err = dev_get_phys_port_id(dev, &ppid);
if (err) {
@@ -967,6 +970,24 @@ static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ struct netdev_phys_item_id psid;
+
+ err = netdev_switch_parent_id_get(dev, &psid);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ if (nla_put(skb, IFLA_PHYS_SWITCH_ID, psid.id_len, psid.id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask)
@@ -1039,6 +1060,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
if (rtnl_phys_port_id_fill(skb, dev))
goto nla_put_failure;
+ if (rtnl_phys_switch_id_fill(skb, dev))
+ goto nla_put_failure;
+
attr = nla_reserve(skb, IFLA_STATS,
sizeof(struct rtnl_link_stats));
if (attr == NULL)
@@ -1196,8 +1220,9 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
- [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
+ [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
[IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
+ [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -2221,8 +2246,8 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
- gfp_t flags)
+struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
+ unsigned int change, gfp_t flags)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
@@ -2240,11 +2265,28 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
kfree_skb(skb);
goto errout;
}
- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
- return;
+ return skb;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+ return NULL;
+}
+
+void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
+{
+ struct net *net = dev_net(dev);
+
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
+}
+
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
+ gfp_t flags)
+{
+ struct sk_buff *skb;
+
+ skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
+ if (skb)
+ rtmsg_ifinfo_send(skb, dev, flags);
}
EXPORT_SYMBOL(rtmsg_ifinfo);
@@ -2313,7 +2355,7 @@ errout:
int ndo_dflt_fdb_add(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr,
+ const unsigned char *addr, u16 vid,
u16 flags)
{
int err = -EINVAL;
@@ -2326,6 +2368,11 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm,
return err;
}
+ if (vid) {
+ pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
+ return err;
+ }
+
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))
@@ -2339,6 +2386,28 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm,
}
EXPORT_SYMBOL(ndo_dflt_fdb_add);
+static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid)
+{
+ u16 vid = 0;
+
+ if (vlan_attr) {
+ if (nla_len(vlan_attr) != sizeof(u16)) {
+ pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
+ return -EINVAL;
+ }
+
+ vid = nla_get_u16(vlan_attr);
+
+ if (!vid || vid >= VLAN_VID_MASK) {
+ pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
+ vid);
+ return -EINVAL;
+ }
+ }
+ *p_vid = vid;
+ return 0;
+}
+
static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
@@ -2346,6 +2415,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
struct nlattr *tb[NDA_MAX+1];
struct net_device *dev;
u8 *addr;
+ u16 vid;
int err;
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
@@ -2371,6 +2441,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
addr = nla_data(tb[NDA_LLADDR]);
+ err = fdb_vid_parse(tb[NDA_VLAN], &vid);
+ if (err)
+ return err;
+
err = -EOPNOTSUPP;
/* Support fdb on master device the net/bridge default case */
@@ -2379,7 +2453,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
- err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags);
+ err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
+ nlh->nlmsg_flags);
if (err)
goto out;
else
@@ -2390,9 +2465,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
if ((ndm->ndm_flags & NTF_SELF)) {
if (dev->netdev_ops->ndo_fdb_add)
err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
+ vid,
nlh->nlmsg_flags);
else
- err = ndo_dflt_fdb_add(ndm, tb, dev, addr,
+ err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
nlh->nlmsg_flags);
if (!err) {
@@ -2410,7 +2486,7 @@ out:
int ndo_dflt_fdb_del(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr)
+ const unsigned char *addr, u16 vid)
{
int err = -EINVAL;
@@ -2439,6 +2515,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
struct net_device *dev;
int err = -EINVAL;
__u8 *addr;
+ u16 vid;
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
@@ -2466,6 +2543,10 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
addr = nla_data(tb[NDA_LLADDR]);
+ err = fdb_vid_parse(tb[NDA_VLAN], &vid);
+ if (err)
+ return err;
+
err = -EOPNOTSUPP;
/* Support fdb on master device the net/bridge default case */
@@ -2475,7 +2556,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
const struct net_device_ops *ops = br_dev->netdev_ops;
if (ops->ndo_fdb_del)
- err = ops->ndo_fdb_del(ndm, tb, dev, addr);
+ err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
if (err)
goto out;
@@ -2486,9 +2567,10 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
/* Embedded bridge, macvlan, and any other device support */
if (ndm->ndm_flags & NTF_SELF) {
if (dev->netdev_ops->ndo_fdb_del)
- err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
+ err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
+ vid);
else
- err = ndo_dflt_fdb_del(ndm, tb, dev, addr);
+ err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
if (!err) {
rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
@@ -2628,12 +2710,22 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
+ unsigned int attrnum, unsigned int flag)
+{
+ if (mask & flag)
+ return nla_put_u8(skb, attrnum, !!(flags & flag));
+ return 0;
+}
+
int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u16 mode)
+ struct net_device *dev, u16 mode,
+ u32 flags, u32 mask)
{
struct nlmsghdr *nlh;
struct ifinfomsg *ifm;
struct nlattr *br_afspec;
+ struct nlattr *protinfo;
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
@@ -2665,13 +2757,46 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
if (!br_afspec)
goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) ||
- nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
+ if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
nla_nest_cancel(skb, br_afspec);
goto nla_put_failure;
}
+
+ if (mode != BRIDGE_MODE_UNDEF) {
+ if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
+ nla_nest_cancel(skb, br_afspec);
+ goto nla_put_failure;
+ }
+ }
nla_nest_end(skb, br_afspec);
+ protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
+ if (!protinfo)
+ goto nla_put_failure;
+
+ if (brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_FAST_LEAVE,
+ BR_MULTICAST_FAST_LEAVE) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_LEARNING, BR_LEARNING) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
+ nla_nest_cancel(skb, protinfo);
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(skb, protinfo);
+
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
diff --git a/net/core/scm.c b/net/core/scm.c
index b442e7e25e60..3b6899b7d810 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -129,8 +129,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
struct cmsghdr *cmsg;
int err;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg))
- {
+ for_each_cmsghdr(cmsg, msg) {
err = -EINVAL;
/* Verify that cmsg_len is at least sizeof(struct cmsghdr) */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 32e31c299631..ae13ef6b3ea7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -265,7 +265,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb->fclone = SKB_FCLONE_ORIG;
atomic_set(&fclones->fclone_ref, 1);
- fclones->skb2.fclone = SKB_FCLONE_FREE;
+ fclones->skb2.fclone = SKB_FCLONE_CLONE;
fclones->skb2.pfmemalloc = pfmemalloc;
}
out:
@@ -336,59 +336,85 @@ struct netdev_alloc_cache {
unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
+static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
-static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
+ gfp_t gfp_mask)
{
- struct netdev_alloc_cache *nc;
- void *data = NULL;
- int order;
- unsigned long flags;
+ const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
+ struct page *page = NULL;
+ gfp_t gfp = gfp_mask;
+
+ if (order) {
+ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
+ page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+ nc->frag.size = PAGE_SIZE << (page ? order : 0);
+ }
- local_irq_save(flags);
- nc = this_cpu_ptr(&netdev_alloc_cache);
- if (unlikely(!nc->frag.page)) {
+ if (unlikely(!page))
+ page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+
+ nc->frag.page = page;
+
+ return page;
+}
+
+static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
+ struct page *page = nc->frag.page;
+ unsigned int size;
+ int offset;
+
+ if (unlikely(!page)) {
refill:
- for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
- gfp_t gfp = gfp_mask;
+ page = __page_frag_refill(nc, gfp_mask);
+ if (!page)
+ return NULL;
+
+ /* if size can vary use frag.size else just use PAGE_SIZE */
+ size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
- if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN;
- nc->frag.page = alloc_pages(gfp, order);
- if (likely(nc->frag.page))
- break;
- if (--order < 0)
- goto end;
- }
- nc->frag.size = PAGE_SIZE << order;
/* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users.
*/
- atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
- &nc->frag.page->_count);
- nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
- nc->frag.offset = 0;
+ atomic_add(size - 1, &page->_count);
+
+ /* reset page count bias and offset to start of new frag */
+ nc->pagecnt_bias = size;
+ nc->frag.offset = size;
}
- if (nc->frag.offset + fragsz > nc->frag.size) {
- if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
- if (!atomic_sub_and_test(nc->pagecnt_bias,
- &nc->frag.page->_count))
- goto refill;
- /* OK, page count is 0, we can safely set it */
- atomic_set(&nc->frag.page->_count,
- NETDEV_PAGECNT_MAX_BIAS);
- } else {
- atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
- &nc->frag.page->_count);
- }
- nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
- nc->frag.offset = 0;
+ offset = nc->frag.offset - fragsz;
+ if (unlikely(offset < 0)) {
+ if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
+ goto refill;
+
+ /* if size can vary use frag.size else just use PAGE_SIZE */
+ size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
+
+ /* OK, page count is 0, we can safely set it */
+ atomic_set(&page->_count, size);
+
+ /* reset page count bias and offset to start of new frag */
+ nc->pagecnt_bias = size;
+ offset = size - fragsz;
}
- data = page_address(nc->frag.page) + nc->frag.offset;
- nc->frag.offset += fragsz;
nc->pagecnt_bias--;
-end:
+ nc->frag.offset = offset;
+
+ return page_address(page) + offset;
+}
+
+static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+{
+ unsigned long flags;
+ void *data;
+
+ local_irq_save(flags);
+ data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
local_irq_restore(flags);
return data;
}
@@ -406,11 +432,25 @@ void *netdev_alloc_frag(unsigned int fragsz)
}
EXPORT_SYMBOL(netdev_alloc_frag);
+static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+{
+ return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
+}
+
+void *napi_alloc_frag(unsigned int fragsz)
+{
+ return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+}
+EXPORT_SYMBOL(napi_alloc_frag);
+
/**
- * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
- * @dev: network device to receive on
+ * __alloc_rx_skb - allocate an skbuff for rx
* @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
+ * @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
+ * allocations in case we have to fallback to __alloc_skb()
+ * If SKB_ALLOC_NAPI is set, page fragment will be allocated
+ * from napi_cache instead of netdev_cache.
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has unspecified headroom built in. Users should allocate
@@ -419,11 +459,11 @@ EXPORT_SYMBOL(netdev_alloc_frag);
*
* %NULL is returned if there is no free memory.
*/
-struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
- unsigned int length, gfp_t gfp_mask)
+static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask,
+ int flags)
{
struct sk_buff *skb = NULL;
- unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
+ unsigned int fragsz = SKB_DATA_ALIGN(length) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
@@ -432,7 +472,9 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
- data = __netdev_alloc_frag(fragsz, gfp_mask);
+ data = (flags & SKB_ALLOC_NAPI) ?
+ __napi_alloc_frag(fragsz, gfp_mask) :
+ __netdev_alloc_frag(fragsz, gfp_mask);
if (likely(data)) {
skb = build_skb(data, fragsz);
@@ -440,17 +482,72 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
put_page(virt_to_head_page(data));
}
} else {
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
+ skb = __alloc_skb(length, gfp_mask,
SKB_ALLOC_RX, NUMA_NO_NODE);
}
+ return skb;
+}
+
+/**
+ * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
+ * @dev: network device to receive on
+ * @length: length to allocate
+ * @gfp_mask: get_free_pages mask, passed to alloc_skb
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has NET_SKB_PAD headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned if there is no free memory.
+ */
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
+ unsigned int length, gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+
+ length += NET_SKB_PAD;
+ skb = __alloc_rx_skb(length, gfp_mask, 0);
+
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
}
+
return skb;
}
EXPORT_SYMBOL(__netdev_alloc_skb);
+/**
+ * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
+ * @napi: napi instance this buffer was allocated for
+ * @length: length to allocate
+ * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
+ *
+ * Allocate a new sk_buff for use in NAPI receive. This buffer will
+ * attempt to allocate the head from a special reserved region used
+ * only for NAPI Rx allocation. By doing this we can save several
+ * CPU cycles by avoiding having to disable and re-enable IRQs.
+ *
+ * %NULL is returned if there is no free memory.
+ */
+struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
+ unsigned int length, gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+
+ length += NET_SKB_PAD + NET_IP_ALIGN;
+ skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI);
+
+ if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb->dev = napi->dev;
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(__napi_alloc_skb);
+
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize)
{
@@ -541,26 +638,27 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
kmem_cache_free(skbuff_head_cache, skb);
- break;
+ return;
case SKB_FCLONE_ORIG:
fclones = container_of(skb, struct sk_buff_fclones, skb1);
- if (atomic_dec_and_test(&fclones->fclone_ref))
- kmem_cache_free(skbuff_fclone_cache, fclones);
- break;
- case SKB_FCLONE_CLONE:
- fclones = container_of(skb, struct sk_buff_fclones, skb2);
-
- /* The clone portion is available for
- * fast-cloning again.
+ /* We usually free the clone (TX completion) before original skb
+ * This test would have no chance to be true for the clone,
+ * while here, branch prediction will be good.
*/
- skb->fclone = SKB_FCLONE_FREE;
+ if (atomic_read(&fclones->fclone_ref) == 1)
+ goto fastpath;
+ break;
- if (atomic_dec_and_test(&fclones->fclone_ref))
- kmem_cache_free(skbuff_fclone_cache, fclones);
+ default: /* SKB_FCLONE_CLONE */
+ fclones = container_of(skb, struct sk_buff_fclones, skb2);
break;
}
+ if (!atomic_dec_and_test(&fclones->fclone_ref))
+ return;
+fastpath:
+ kmem_cache_free(skbuff_fclone_cache, fclones);
}
static void skb_release_head_state(struct sk_buff *skb)
@@ -872,15 +970,15 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
struct sk_buff_fclones *fclones = container_of(skb,
struct sk_buff_fclones,
skb1);
- struct sk_buff *n = &fclones->skb2;
+ struct sk_buff *n;
if (skb_orphan_frags(skb, gfp_mask))
return NULL;
if (skb->fclone == SKB_FCLONE_ORIG &&
- n->fclone == SKB_FCLONE_FREE) {
- n->fclone = SKB_FCLONE_CLONE;
- atomic_inc(&fclones->fclone_ref);
+ atomic_read(&fclones->fclone_ref) == 1) {
+ n = &fclones->skb2;
+ atomic_set(&fclones->fclone_ref, 2);
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
@@ -3002,7 +3100,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
if (nskb->len == len + doffset)
goto perform_csum_check;
- if (!sg) {
+ if (!sg && !nskb->remcsum_offload) {
nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
@@ -3074,7 +3172,7 @@ skip_fraglist:
nskb->truesize += nskb->data_len;
perform_csum_check:
- if (!csum) {
+ if (!csum && !nskb->remcsum_offload) {
nskb->csum = skb_checksum(nskb, doffset,
nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE;
@@ -3088,6 +3186,16 @@ perform_csum_check:
* (see validate_xmit_skb_list() for example)
*/
segs->prev = tail;
+
+ /* Following permits correct backpressure, for protocols
+ * using skb_set_owner_w().
+ * Idea is to tranfert ownership from head_skb to last segment.
+ */
+ if (head_skb->destructor == sock_wfree) {
+ swap(tail->truesize, head_skb->truesize);
+ swap(tail->destructor, head_skb->destructor);
+ swap(tail->sk, head_skb->sk);
+ }
return segs;
err:
@@ -4130,6 +4238,113 @@ err_free:
}
EXPORT_SYMBOL(skb_vlan_untag);
+int skb_ensure_writable(struct sk_buff *skb, int write_len)
+{
+ if (!pskb_may_pull(skb, write_len))
+ return -ENOMEM;
+
+ if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+ return 0;
+
+ return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(skb_ensure_writable);
+
+/* remove VLAN header from packet and update csum accordingly. */
+static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
+{
+ struct vlan_hdr *vhdr;
+ unsigned int offset = skb->data - skb_mac_header(skb);
+ int err;
+
+ __skb_push(skb, offset);
+ err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
+ if (unlikely(err))
+ goto pull;
+
+ skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+
+ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+ *vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, VLAN_HLEN);
+
+ vlan_set_encap_proto(skb, vhdr);
+ skb->mac_header += VLAN_HLEN;
+
+ if (skb_network_offset(skb) < ETH_HLEN)
+ skb_set_network_header(skb, ETH_HLEN);
+
+ skb_reset_mac_len(skb);
+pull:
+ __skb_pull(skb, offset);
+
+ return err;
+}
+
+int skb_vlan_pop(struct sk_buff *skb)
+{
+ u16 vlan_tci;
+ __be16 vlan_proto;
+ int err;
+
+ if (likely(vlan_tx_tag_present(skb))) {
+ skb->vlan_tci = 0;
+ } else {
+ if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)) ||
+ skb->len < VLAN_ETH_HLEN))
+ return 0;
+
+ err = __skb_vlan_pop(skb, &vlan_tci);
+ if (err)
+ return err;
+ }
+ /* move next vlan tag to hw accel tag */
+ if (likely((skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)) ||
+ skb->len < VLAN_ETH_HLEN))
+ return 0;
+
+ vlan_proto = skb->protocol;
+ err = __skb_vlan_pop(skb, &vlan_tci);
+ if (unlikely(err))
+ return err;
+
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
+ return 0;
+}
+EXPORT_SYMBOL(skb_vlan_pop);
+
+int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+{
+ if (vlan_tx_tag_present(skb)) {
+ unsigned int offset = skb->data - skb_mac_header(skb);
+ int err;
+
+ /* __vlan_insert_tag expect skb->data pointing to mac header.
+ * So change skb->data before calling it and change back to
+ * original position later
+ */
+ __skb_push(skb, offset);
+ err = __vlan_insert_tag(skb, skb->vlan_proto,
+ vlan_tx_tag_get(skb));
+ if (err)
+ return err;
+ skb->protocol = skb->vlan_proto;
+ skb->mac_len += VLAN_HLEN;
+ __skb_pull(skb, offset);
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(skb->data
+ + (2 * ETH_ALEN), VLAN_HLEN, 0));
+ }
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
+ return 0;
+}
+EXPORT_SYMBOL(skb_vlan_push);
+
/**
* alloc_skb_with_frags - allocate skb with page frags
*
diff --git a/net/core/sock.c b/net/core/sock.c
index 15e0c67b1069..1c7a33db1314 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -888,6 +888,19 @@ set_rcvbuf:
}
break;
+ case SO_ATTACH_BPF:
+ ret = -EINVAL;
+ if (optlen == sizeof(u32)) {
+ u32 ufd;
+
+ ret = -EFAULT;
+ if (copy_from_user(&ufd, optval, sizeof(ufd)))
+ break;
+
+ ret = sk_attach_bpf(ufd, sk);
+ }
+ break;
+
case SO_DETACH_FILTER:
ret = sk_detach_filter(sk);
break;
@@ -1213,6 +1226,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_max_pacing_rate;
break;
+ case SO_INCOMING_CPU:
+ v.val = sk->sk_incoming_cpu;
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -1517,6 +1534,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_err = 0;
newsk->sk_priority = 0;
+ newsk->sk_incoming_cpu = raw_smp_processor_id();
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)
@@ -1713,18 +1731,34 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
}
EXPORT_SYMBOL(sock_kmalloc);
-/*
- * Free an option memory block.
+/* Free an option memory block. Note, we actually want the inline
+ * here as this allows gcc to detect the nullify and fold away the
+ * condition entirely.
*/
-void sock_kfree_s(struct sock *sk, void *mem, int size)
+static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
+ const bool nullify)
{
if (WARN_ON_ONCE(!mem))
return;
- kfree(mem);
+ if (nullify)
+ kzfree(mem);
+ else
+ kfree(mem);
atomic_sub(size, &sk->sk_omem_alloc);
}
+
+void sock_kfree_s(struct sock *sk, void *mem, int size)
+{
+ __sock_kfree_s(sk, mem, size, false);
+}
EXPORT_SYMBOL(sock_kfree_s);
+void sock_kzfree_s(struct sock *sk, void *mem, int size)
+{
+ __sock_kfree_s(sk, mem, size, true);
+}
+EXPORT_SYMBOL(sock_kzfree_s);
+
/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
I think, these locks should be removed for datagram sockets.
*/
@@ -2457,7 +2491,7 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free_skb;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cf9cd13509a7..31baba2a71ce 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -26,6 +26,8 @@ static int zero = 0;
static int one = 1;
static int ushort_max = USHRT_MAX;
+static int net_msg_warn; /* Unused, but still a sysctl */
+
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -215,6 +217,18 @@ static int set_default_qdisc(struct ctl_table *table, int write,
}
#endif
+static int proc_do_rss_key(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table fake_table;
+ char buf[NETDEV_RSS_KEY_LEN * 3];
+
+ snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
+ fake_table.data = buf;
+ fake_table.maxlen = sizeof(buf);
+ return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+}
+
static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
{
@@ -263,6 +277,13 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "netdev_rss_key",
+ .data = &netdev_rss_key,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_do_rss_key,
+ },
#ifdef CONFIG_BPF_JIT
{
.procname = "bpf_jit_enable",
diff --git a/net/core/utils.c b/net/core/utils.c
index efc76dd9dcd1..7b803884c162 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -33,9 +33,6 @@
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-int net_msg_warn __read_mostly = 1;
-EXPORT_SYMBOL(net_msg_warn);
-
DEFINE_RATELIMIT_STATE(net_ratelimit_state, 5 * HZ, 10);
/*
* All net warning printk()s should be guarded by this function.
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index ba07824af4c0..bd9e718c2a20 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -218,7 +218,7 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
* different underlying data structure.
*/
for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
- u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
+ u8 len = min_t(u32, lost_packets, DCCPAV_MAX_RUNLEN);
av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index c67816647cce..e4c144fa706f 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -22,8 +22,8 @@
/*
* DCCP - specific warning and debugging macros.
*/
-#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \
- __func__, ##a)
+#define DCCP_WARN(fmt, ...) \
+ net_warn_ratelimited("%s: " fmt, __func__, ##__VA_ARGS__)
#define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \
__FILE__, __LINE__, __func__)
#define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0)
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 9733ddbc96cb..1704948e6a12 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -478,7 +478,7 @@ static struct dccp_feat_entry *
* @fn_list: feature-negotiation list to update
* @feat: one of %dccp_feature_numbers
* @local: whether local (1) or remote (0) @feat_num is meant
- * @needs_mandatory: whether to use Mandatory feature negotiation options
+ * @mandatory: whether to use Mandatory feature negotiation options
* @fval: pointer to NN/SP value to be inserted (will be copied)
*/
static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
@@ -1050,7 +1050,7 @@ static u8 dccp_feat_prefer(u8 preferred_value, u8 *array, u8 array_len)
/**
* dccp_feat_reconcile - Reconcile SP preference lists
- * @fval: SP list to reconcile into
+ * @fv: SP list to reconcile into
* @arr: received SP preference list
* @len: length of @arr in bytes
* @is_server: whether this side is the server (and @fv is the server's list)
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 3c8ec7d4a34e..3bd14e885396 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -537,7 +537,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
case DCCP_PKT_DATAACK:
case DCCP_PKT_ACK:
/*
- * FIXME: we should be reseting the PARTOPEN (DELACK) timer
+ * FIXME: we should be resetting the PARTOPEN (DELACK) timer
* here but only if we haven't used the DELACK timer for
* something else, like sending a delayed ack for a TIMESTAMP
* echo, etc, for now were not clearing it, sending an extra
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 6ca645c4b48e..e45b968613a4 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -140,7 +140,6 @@ failure:
inet->inet_dport = 0;
goto out;
}
-
EXPORT_SYMBOL_GPL(dccp_v4_connect);
/*
@@ -376,7 +375,6 @@ void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
inet->inet_saddr,
inet->inet_daddr);
}
-
EXPORT_SYMBOL_GPL(dccp_v4_send_check);
static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
@@ -444,7 +442,6 @@ put_and_exit:
dccp_done(newsk);
goto exit;
}
-
EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
@@ -670,7 +667,6 @@ drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
return -1;
}
-
EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
@@ -729,7 +725,6 @@ discard:
kfree_skb(skb);
return 0;
}
-
EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
/**
@@ -802,7 +797,6 @@ int dccp_invalid_packet(struct sk_buff *skb)
return 0;
}
-
EXPORT_SYMBOL_GPL(dccp_invalid_packet);
/* this is called when real data arrives */
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 5ab6627cf370..e171b780b499 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -703,7 +703,7 @@ EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
{
- struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+ struct cmsghdr *cmsg;
/*
* Assign an (opaque) qpolicy priority value to skb->priority.
@@ -717,8 +717,7 @@ static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
*/
skb->priority = 0;
- for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
-
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
@@ -781,7 +780,7 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto out_release;
skb_reserve(skb, sk->sk_prot->max_header);
- rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ rc = memcpy_from_msg(skb_put(skb, len), msg, len);
if (rc != 0)
goto out_discard;
@@ -896,7 +895,7 @@ verify_sock_status:
else if (len < skb->len)
msg->msg_flags |= MSG_TRUNC;
- if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
+ if (skb_copy_datagram_msg(skb, 0, msg, len)) {
/* Exception. Bailout! */
len = -EFAULT;
break;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 25733d538147..810228646de3 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1760,7 +1760,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
if ((chunk + copied) > size)
chunk = size - copied;
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ if (memcpy_to_msg(msg, skb->data, chunk)) {
rv = -EFAULT;
break;
}
@@ -2032,7 +2032,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
err = -EFAULT;
goto out;
}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index c8121ceddb9e..7ca7c3143da3 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -591,7 +591,7 @@ static const struct file_operations dn_neigh_seq_fops = {
void __init dn_neigh_init(void)
{
- neigh_table_init(&dn_neigh_table);
+ neigh_table_init(NEIGH_DN_TABLE, &dn_neigh_table);
proc_create("decnet_neigh", S_IRUGO, init_net.proc_net,
&dn_neigh_seq_fops);
}
@@ -599,5 +599,5 @@ void __init dn_neigh_init(void)
void __exit dn_neigh_cleanup(void)
{
remove_proc_entry("decnet_neigh", init_net.proc_net);
- neigh_table_clear(&dn_neigh_table);
+ neigh_table_clear(NEIGH_DN_TABLE, &dn_neigh_table);
}
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index a585fd6352eb..5f8ac404535b 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -11,6 +11,17 @@ config NET_DSA
if NET_DSA
+config NET_DSA_HWMON
+ bool "Distributed Switch Architecture HWMON support"
+ default y
+ depends on HWMON && !(NET_DSA=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose thermal sensor data on switches supported
+ by the Distributed Switch Architecture.
+
+ Some of those switches contain thermal sensors. This data is available
+ via the hwmon sysfs interface and exposes the onboard sensors.
+
# tagging formats
config NET_DSA_TAG_BRCM
bool
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 6317b41c99b0..37317149f918 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -9,6 +9,9 @@
* (at your option) any later version.
*/
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -17,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/sysfs.h>
#include "dsa_priv.h"
char dsa_driver_version[] = "0.1";
@@ -71,6 +75,104 @@ dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name)
return ret;
}
+/* hwmon support ************************************************************/
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ int temp, ret;
+
+ ret = ds->drv->get_temp(ds, &temp);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", temp * 1000);
+}
+static DEVICE_ATTR_RO(temp1_input);
+
+static ssize_t temp1_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ int temp, ret;
+
+ ret = ds->drv->get_temp_limit(ds, &temp);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", temp * 1000);
+}
+
+static ssize_t temp1_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ int temp, ret;
+
+ ret = kstrtoint(buf, 0, &temp);
+ if (ret < 0)
+ return ret;
+
+ ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000));
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store);
+
+static ssize_t temp1_max_alarm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ bool alarm;
+ int ret;
+
+ ret = ds->drv->get_temp_alarm(ds, &alarm);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", alarm);
+}
+static DEVICE_ATTR_RO(temp1_max_alarm);
+
+static struct attribute *dsa_hwmon_attrs[] = {
+ &dev_attr_temp1_input.attr, /* 0 */
+ &dev_attr_temp1_max.attr, /* 1 */
+ &dev_attr_temp1_max_alarm.attr, /* 2 */
+ NULL
+};
+
+static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct dsa_switch *ds = dev_get_drvdata(dev);
+ struct dsa_switch_driver *drv = ds->drv;
+ umode_t mode = attr->mode;
+
+ if (index == 1) {
+ if (!drv->get_temp_limit)
+ mode = 0;
+ else if (drv->set_temp_limit)
+ mode |= S_IWUSR;
+ } else if (index == 2 && !drv->get_temp_alarm) {
+ mode = 0;
+ }
+ return mode;
+}
+
+static const struct attribute_group dsa_hwmon_group = {
+ .attrs = dsa_hwmon_attrs,
+ .is_visible = dsa_hwmon_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(dsa_hwmon);
+
+#endif /* CONFIG_NET_DSA_HWMON */
/* basic switch operations **************************************************/
static struct dsa_switch *
@@ -90,12 +192,12 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
*/
drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
if (drv == NULL) {
- printk(KERN_ERR "%s[%d]: could not detect attached switch\n",
- dst->master_netdev->name, index);
+ netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
+ index);
return ERR_PTR(-EINVAL);
}
- printk(KERN_INFO "%s[%d]: detected a %s switch\n",
- dst->master_netdev->name, index, name);
+ netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
+ index, name);
/*
@@ -123,7 +225,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
if (!strcmp(name, "cpu")) {
if (dst->cpu_switch != -1) {
- printk(KERN_ERR "multiple cpu ports?!\n");
+ netdev_err(dst->master_netdev,
+ "multiple cpu ports?!\n");
ret = -EINVAL;
goto out;
}
@@ -218,16 +321,39 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
slave_dev = dsa_slave_create(ds, parent, i, pd->port_names[i]);
if (slave_dev == NULL) {
- printk(KERN_ERR "%s[%d]: can't create dsa "
- "slave device for port %d(%s)\n",
- dst->master_netdev->name,
- index, i, pd->port_names[i]);
+ netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n",
+ index, i, pd->port_names[i]);
continue;
}
ds->ports[i] = slave_dev;
}
+#ifdef CONFIG_NET_DSA_HWMON
+ /* If the switch provides a temperature sensor,
+ * register with hardware monitoring subsystem.
+ * Treat registration error as non-fatal and ignore it.
+ */
+ if (drv->get_temp) {
+ const char *netname = netdev_name(dst->master_netdev);
+ char hname[IFNAMSIZ + 1];
+ int i, j;
+
+ /* Create valid hwmon 'name' attribute */
+ for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) {
+ if (isalnum(netname[i]))
+ hname[j++] = netname[i];
+ }
+ hname[j] = '\0';
+ scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d",
+ hname, index);
+ ds->hwmon_dev = hwmon_device_register_with_groups(NULL,
+ ds->hwmon_name, ds, dsa_hwmon_groups);
+ if (IS_ERR(ds->hwmon_dev))
+ ds->hwmon_dev = NULL;
+ }
+#endif /* CONFIG_NET_DSA_HWMON */
+
return ds;
out_free:
@@ -239,6 +365,10 @@ out:
static void dsa_switch_destroy(struct dsa_switch *ds)
{
+#ifdef CONFIG_NET_DSA_HWMON
+ if (ds->hwmon_dev)
+ hwmon_device_unregister(ds->hwmon_dev);
+#endif
}
#ifdef CONFIG_PM_SLEEP
@@ -396,7 +526,8 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
/* First time routing table allocation */
if (!cd->rtable) {
- cd->rtable = kmalloc(pd->nr_chips * sizeof(s8), GFP_KERNEL);
+ cd->rtable = kmalloc_array(pd->nr_chips, sizeof(s8),
+ GFP_KERNEL);
if (!cd->rtable)
return -ENOMEM;
@@ -447,6 +578,7 @@ static int dsa_of_probe(struct platform_device *pdev)
const char *port_name;
int chip_index, port_index;
const unsigned int *sw_addr, *port_reg;
+ u32 eeprom_len;
int ret;
mdio = of_parse_phandle(np, "dsa,mii-bus", 0);
@@ -475,8 +607,8 @@ static int dsa_of_probe(struct platform_device *pdev)
if (pd->nr_chips > DSA_MAX_SWITCHES)
pd->nr_chips = DSA_MAX_SWITCHES;
- pd->chip = kzalloc(pd->nr_chips * sizeof(struct dsa_chip_data),
- GFP_KERNEL);
+ pd->chip = kcalloc(pd->nr_chips, sizeof(struct dsa_chip_data),
+ GFP_KERNEL);
if (!pd->chip) {
ret = -ENOMEM;
goto out_free;
@@ -498,6 +630,9 @@ static int dsa_of_probe(struct platform_device *pdev)
if (cd->sw_addr > PHY_MAX_ADDR)
continue;
+ if (!of_property_read_u32(np, "eeprom-length", &eeprom_len))
+ cd->eeprom_len = eeprom_len;
+
for_each_available_child_of_node(child, port) {
port_reg = of_get_property(port, "reg", NULL);
if (!port_reg)
@@ -566,15 +701,13 @@ static inline void dsa_of_remove(struct platform_device *pdev)
static int dsa_probe(struct platform_device *pdev)
{
- static int dsa_version_printed;
struct dsa_platform_data *pd = pdev->dev.platform_data;
struct net_device *dev;
struct dsa_switch_tree *dst;
int i, ret;
- if (!dsa_version_printed++)
- printk(KERN_NOTICE "Distributed Switch Architecture "
- "driver version %s\n", dsa_driver_version);
+ pr_notice_once("Distributed Switch Architecture driver version %s\n",
+ dsa_driver_version);
if (pdev->dev.of_node) {
ret = dsa_of_probe(pdev);
@@ -618,9 +751,8 @@ static int dsa_probe(struct platform_device *pdev)
ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev);
if (IS_ERR(ds)) {
- printk(KERN_ERR "%s[%d]: couldn't create dsa switch "
- "instance (error %ld)\n", dev->name, i,
- PTR_ERR(ds));
+ netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n",
+ i, PTR_ERR(ds));
continue;
}
@@ -747,7 +879,6 @@ static struct platform_driver dsa_driver = {
.shutdown = dsa_shutdown,
.driver = {
.name = "dsa",
- .owner = THIS_MODULE,
.of_match_table = dsa_of_match_table,
.pm = &dsa_pm_ops,
},
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index ab03e00ffe8f..515569ffde8a 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -249,6 +249,27 @@ static void dsa_slave_get_drvinfo(struct net_device *dev,
strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
+static int dsa_slave_get_regs_len(struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->drv->get_regs_len)
+ return ds->drv->get_regs_len(ds, p->port);
+
+ return -EOPNOTSUPP;
+}
+
+static void
+dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->drv->get_regs)
+ ds->drv->get_regs(ds, p->port, regs, _p);
+}
+
static int dsa_slave_nway_reset(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
@@ -271,6 +292,44 @@ static u32 dsa_slave_get_link(struct net_device *dev)
return -EOPNOTSUPP;
}
+static int dsa_slave_get_eeprom_len(struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->pd->eeprom_len)
+ return ds->pd->eeprom_len;
+
+ if (ds->drv->get_eeprom_len)
+ return ds->drv->get_eeprom_len(ds);
+
+ return 0;
+}
+
+static int dsa_slave_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->drv->get_eeprom)
+ return ds->drv->get_eeprom(ds, eeprom, data);
+
+ return -EOPNOTSUPP;
+}
+
+static int dsa_slave_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->drv->set_eeprom)
+ return ds->drv->set_eeprom(ds, eeprom, data);
+
+ return -EOPNOTSUPP;
+}
+
static void dsa_slave_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
@@ -385,8 +444,13 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_settings = dsa_slave_get_settings,
.set_settings = dsa_slave_set_settings,
.get_drvinfo = dsa_slave_get_drvinfo,
+ .get_regs_len = dsa_slave_get_regs_len,
+ .get_regs = dsa_slave_get_regs,
.nway_reset = dsa_slave_nway_reset,
.get_link = dsa_slave_get_link,
+ .get_eeprom_len = dsa_slave_get_eeprom_len,
+ .get_eeprom = dsa_slave_get_eeprom,
+ .set_eeprom = dsa_slave_set_eeprom,
.get_strings = dsa_slave_get_strings,
.get_ethtool_stats = dsa_slave_get_ethtool_stats,
.get_sset_count = dsa_slave_get_sset_count,
@@ -448,7 +512,7 @@ static int dsa_slave_fixed_link_update(struct net_device *dev,
}
/* slave device setup *******************************************************/
-static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
+static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
struct net_device *slave_dev)
{
struct dsa_switch *ds = p->parent;
@@ -468,8 +532,8 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
*/
ret = of_phy_register_fixed_link(port_dn);
if (ret) {
- pr_err("failed to register fixed PHY\n");
- return;
+ netdev_err(slave_dev, "failed to register fixed PHY\n");
+ return ret;
}
phy_is_fixed = true;
phy_dn = port_dn;
@@ -491,12 +555,17 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
*/
if (!p->phy) {
p->phy = ds->slave_mii_bus->phy_map[p->port];
+ if (!p->phy)
+ return -ENODEV;
+
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
p->phy_interface);
} else {
- pr_info("attached PHY at address %d [%s]\n",
- p->phy->addr, p->phy->drv->name);
+ netdev_info(slave_dev, "attached PHY at address %d [%s]\n",
+ p->phy->addr, p->phy->drv->name);
}
+
+ return 0;
}
int dsa_slave_suspend(struct net_device *slave_dev)
@@ -589,12 +658,17 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
p->old_link = -1;
p->old_duplex = -1;
- dsa_slave_phy_setup(p, slave_dev);
+ ret = dsa_slave_phy_setup(p, slave_dev);
+ if (ret) {
+ free_netdev(slave_dev);
+ return NULL;
+ }
ret = register_netdev(slave_dev);
if (ret) {
- printk(KERN_ERR "%s: error %d registering interface %s\n",
- master->name, ret, slave_dev->name);
+ netdev_err(master, "error %d registering interface %s\n",
+ ret, slave_dev->name);
+ phy_disconnect(p->phy);
free_netdev(slave_dev);
return NULL;
}
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index ce90c8bdc658..2dab27063273 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -63,8 +63,6 @@ static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
dsa_header[3] = 0x00;
}
- skb->protocol = htons(ETH_P_DSA);
-
skb->dev = p->parent->dst->master_netdev;
dev_queue_xmit(skb);
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 94fcce778679..9aeda596f7ec 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -76,8 +76,6 @@ static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
edsa_header[7] = 0x00;
}
- skb->protocol = htons(ETH_P_EDSA);
-
skb->dev = p->parent->dst->master_netdev;
dev_queue_xmit(skb);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 115fdca34077..e268f9db8893 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -57,8 +57,6 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
trailer[2] = 0x10;
trailer[3] = 0x00;
- nskb->protocol = htons(ETH_P_TRAILER);
-
nskb->dev = p->parent->dst->master_netdev;
dev_queue_xmit(nskb);
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
index 44136297b673..27eaa65e88e1 100644
--- a/net/ieee802154/6lowpan_rtnl.c
+++ b/net/ieee802154/6lowpan_rtnl.c
@@ -49,8 +49,8 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
+#include <linux/ieee802154.h>
#include <net/af_ieee802154.h>
-#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <net/6lowpan.h>
#include <net/ipv6.h>
@@ -58,12 +58,13 @@
#include "reassembly.h"
static LIST_HEAD(lowpan_devices);
+static int lowpan_open_count;
/* private device info */
struct lowpan_dev_info {
struct net_device *real_dev; /* real WPAN device ptr */
struct mutex dev_list_mtx; /* mutex for list ops */
- __be16 fragment_tag;
+ u16 fragment_tag;
};
struct lowpan_dev_record {
@@ -140,24 +141,33 @@ static int lowpan_give_skb_to_devices(struct sk_buff *skb,
struct sk_buff *skb_cp;
int stat = NET_RX_SUCCESS;
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->pkt_type = PACKET_HOST;
+
rcu_read_lock();
list_for_each_entry_rcu(entry, &lowpan_devices, list)
if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
skb_cp = skb_copy(skb, GFP_ATOMIC);
if (!skb_cp) {
- stat = -ENOMEM;
- break;
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return NET_RX_DROP;
}
skb_cp->dev = entry->ldev;
stat = netif_rx(skb_cp);
+ if (stat == NET_RX_DROP)
+ break;
}
rcu_read_unlock();
+ consume_skb(skb);
+
return stat;
}
-static int process_data(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+static int
+iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
{
u8 iphc0, iphc1;
struct ieee802154_addr_sa sa, da;
@@ -166,13 +176,13 @@ static int process_data(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
/* at least two bytes will be used for the encoding */
if (skb->len < 2)
- goto drop;
+ return -EINVAL;
if (lowpan_fetch_skb_u8(skb, &iphc0))
- goto drop;
+ return -EINVAL;
if (lowpan_fetch_skb_u8(skb, &iphc1))
- goto drop;
+ return -EINVAL;
ieee802154_addr_to_sa(&sa, &hdr->source);
ieee802154_addr_to_sa(&da, &hdr->dest);
@@ -187,27 +197,9 @@ static int process_data(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
else
dap = &da.hwaddr;
- return lowpan_process_data(skb, skb->dev, sap, sa.addr_type,
- IEEE802154_ADDR_LEN, dap, da.addr_type,
- IEEE802154_ADDR_LEN, iphc0, iphc1,
- lowpan_give_skb_to_devices);
-
-drop:
- kfree_skb(skb);
- return -EINVAL;
-}
-
-static int lowpan_set_address(struct net_device *dev, void *p)
-{
- struct sockaddr *sa = p;
-
- if (netif_running(dev))
- return -EBUSY;
-
- /* TODO: validate addr */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
-
- return 0;
+ return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
+ IEEE802154_ADDR_LEN, dap, da.addr_type,
+ IEEE802154_ADDR_LEN, iphc0, iphc1);
}
static struct sk_buff*
@@ -233,7 +225,7 @@ lowpan_alloc_frag(struct sk_buff *skb, int size,
&master_hdr->source, size);
if (rc < 0) {
kfree_skb(frag);
- return ERR_PTR(-rc);
+ return ERR_PTR(rc);
}
} else {
frag = ERR_PTR(-ENOMEM);
@@ -275,7 +267,8 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
skb->mac_len;
- frag_tag = lowpan_dev_info(dev)->fragment_tag++;
+ frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
+ lowpan_dev_info(dev)->fragment_tag++;
frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
frag_hdr[1] = dgram_size & 0xff;
@@ -294,7 +287,7 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
frag_len + skb_network_header_len(skb));
if (rc) {
pr_debug("%s unable to send FRAG1 packet (tag: %d)",
- __func__, frag_tag);
+ __func__, ntohs(frag_tag));
goto err;
}
@@ -315,7 +308,7 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
frag_len);
if (rc) {
pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
- __func__, frag_tag, skb_offset);
+ __func__, ntohs(frag_tag), skb_offset);
goto err;
}
} while (skb_unprocessed > frag_cap);
@@ -410,13 +403,6 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
-static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
-}
-
static __le16 lowpan_get_pan_id(const struct net_device *dev)
{
struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
@@ -453,7 +439,6 @@ static void lowpan_set_lockdep_class_one(struct net_device *dev,
&lowpan_netdev_xmit_lock_key);
}
-
static int lowpan_dev_init(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
@@ -464,12 +449,10 @@ static int lowpan_dev_init(struct net_device *dev)
static const struct net_device_ops lowpan_netdev_ops = {
.ndo_init = lowpan_dev_init,
.ndo_start_xmit = lowpan_xmit,
- .ndo_set_mac_address = lowpan_set_address,
};
static struct ieee802154_mlme_ops lowpan_mlme = {
.get_pan_id = lowpan_get_pan_id,
- .get_phy = lowpan_get_phy,
.get_short_addr = lowpan_get_short_addr,
.get_dsn = lowpan_get_dsn,
};
@@ -515,6 +498,9 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
if (!netif_running(dev))
goto drop_skb;
+ if (skb->pkt_type == PACKET_OTHERHOST)
+ goto drop_skb;
+
if (dev->type != ARPHRD_IEEE802154)
goto drop_skb;
@@ -523,55 +509,67 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
/* check that it's our buffer */
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
- skb->protocol = htons(ETH_P_IPV6);
- skb->pkt_type = PACKET_HOST;
-
/* Pull off the 1-byte of 6lowpan header. */
skb_pull(skb, 1);
-
- ret = lowpan_give_skb_to_devices(skb, NULL);
- if (ret == NET_RX_DROP)
- goto drop;
+ return lowpan_give_skb_to_devices(skb, NULL);
} else {
switch (skb->data[0] & 0xe0) {
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
- ret = process_data(skb, &hdr);
- if (ret == NET_RX_DROP)
- goto drop;
- break;
+ ret = iphc_decompress(skb, &hdr);
+ if (ret < 0)
+ goto drop_skb;
+
+ return lowpan_give_skb_to_devices(skb, NULL);
case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
if (ret == 1) {
- ret = process_data(skb, &hdr);
- if (ret == NET_RX_DROP)
- goto drop;
+ ret = iphc_decompress(skb, &hdr);
+ if (ret < 0)
+ goto drop_skb;
+
+ return lowpan_give_skb_to_devices(skb, NULL);
+ } else if (ret == -1) {
+ return NET_RX_DROP;
+ } else {
+ return NET_RX_SUCCESS;
}
- break;
case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
if (ret == 1) {
- ret = process_data(skb, &hdr);
- if (ret == NET_RX_DROP)
- goto drop;
+ ret = iphc_decompress(skb, &hdr);
+ if (ret < 0)
+ goto drop_skb;
+
+ return lowpan_give_skb_to_devices(skb, NULL);
+ } else if (ret == -1) {
+ return NET_RX_DROP;
+ } else {
+ return NET_RX_SUCCESS;
}
- break;
default:
break;
}
}
- return NET_RX_SUCCESS;
drop_skb:
kfree_skb(skb);
drop:
return NET_RX_DROP;
}
+static struct packet_type lowpan_packet_type = {
+ .type = htons(ETH_P_IEEE802154),
+ .func = lowpan_rcv,
+};
+
static int lowpan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net_device *real_dev;
struct lowpan_dev_record *entry;
+ int ret;
+
+ ASSERT_RTNL();
pr_debug("adding new link\n");
@@ -598,7 +596,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
entry->ldev = dev;
- /* Set the lowpan harware address to the wpan hardware address. */
+ /* Set the lowpan hardware address to the wpan hardware address. */
memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
@@ -606,9 +604,14 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
list_add_tail(&entry->list, &lowpan_devices);
mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
- register_netdevice(dev);
+ ret = register_netdevice(dev);
+ if (ret >= 0) {
+ if (!lowpan_open_count)
+ dev_add_pack(&lowpan_packet_type);
+ lowpan_open_count++;
+ }
- return 0;
+ return ret;
}
static void lowpan_dellink(struct net_device *dev, struct list_head *head)
@@ -619,6 +622,10 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
ASSERT_RTNL();
+ lowpan_open_count--;
+ if (!lowpan_open_count)
+ dev_remove_pack(&lowpan_packet_type);
+
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
if (entry->ldev == dev) {
@@ -681,11 +688,6 @@ static struct notifier_block lowpan_dev_notifier = {
.notifier_call = lowpan_device_event,
};
-static struct packet_type lowpan_packet_type = {
- .type = htons(ETH_P_IEEE802154),
- .func = lowpan_rcv,
-};
-
static int __init lowpan_init_module(void)
{
int err = 0;
@@ -698,8 +700,6 @@ static int __init lowpan_init_module(void)
if (err < 0)
goto out_frag;
- dev_add_pack(&lowpan_packet_type);
-
err = register_netdevice_notifier(&lowpan_dev_notifier);
if (err < 0)
goto out_pack;
@@ -707,7 +707,6 @@ static int __init lowpan_init_module(void)
return 0;
out_pack:
- dev_remove_pack(&lowpan_packet_type);
lowpan_netlink_fini();
out_frag:
lowpan_net_frag_exit();
@@ -719,8 +718,6 @@ static void __exit lowpan_cleanup_module(void)
{
lowpan_netlink_fini();
- dev_remove_pack(&lowpan_packet_type);
-
lowpan_net_frag_exit();
unregister_netdevice_notifier(&lowpan_dev_notifier);
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 3914b1ed4274..9f6970f2a28b 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -2,8 +2,8 @@ obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
obj-$(CONFIG_IEEE802154_6LOWPAN) += ieee802154_6lowpan.o
ieee802154_6lowpan-y := 6lowpan_rtnl.o reassembly.o
-ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o \
- header_ops.o
+ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \
+ header_ops.o sysfs.o nl802154.o
af_802154-y := af_ieee802154.o raw.o dgram.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/ieee802154/af802154.h b/net/ieee802154/af802154.h
index 8330a09bfc95..343b63e6f953 100644
--- a/net/ieee802154/af802154.h
+++ b/net/ieee802154/af802154.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 29e0de63001b..d0a1282cdf43 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
@@ -103,6 +99,7 @@ static int ieee802154_sock_release(struct socket *sock)
}
return 0;
}
+
static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
@@ -235,7 +232,6 @@ static const struct proto_ops ieee802154_dgram_ops = {
#endif
};
-
/* Create a socket. Initialise the socket, blank the addresses
* set the state.
*/
@@ -324,7 +320,6 @@ drop:
return NET_RX_DROP;
}
-
static struct packet_type ieee802154_packet_type = {
.type = htons(ETH_P_IEEE802154),
.func = ieee802154_rcv,
@@ -358,6 +353,7 @@ err_dgram:
out:
return rc;
}
+
static void __exit af_ieee802154_remove(void)
{
dev_remove_pack(&ieee802154_packet_type);
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
new file mode 100644
index 000000000000..18bc7e738507
--- /dev/null
+++ b/net/ieee802154/core.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+
+#include <net/cfg802154.h>
+#include <net/rtnetlink.h>
+
+#include "ieee802154.h"
+#include "nl802154.h"
+#include "sysfs.h"
+#include "core.h"
+
+/* RCU-protected (and RTNL for writers) */
+LIST_HEAD(cfg802154_rdev_list);
+int cfg802154_rdev_list_generation;
+
+static int wpan_phy_match(struct device *dev, const void *data)
+{
+ return !strcmp(dev_name(dev), (const char *)data);
+}
+
+struct wpan_phy *wpan_phy_find(const char *str)
+{
+ struct device *dev;
+
+ if (WARN_ON(!str))
+ return NULL;
+
+ dev = class_find_device(&wpan_phy_class, NULL, str, wpan_phy_match);
+ if (!dev)
+ return NULL;
+
+ return container_of(dev, struct wpan_phy, dev);
+}
+EXPORT_SYMBOL(wpan_phy_find);
+
+struct wpan_phy_iter_data {
+ int (*fn)(struct wpan_phy *phy, void *data);
+ void *data;
+};
+
+static int wpan_phy_iter(struct device *dev, void *_data)
+{
+ struct wpan_phy_iter_data *wpid = _data;
+ struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
+
+ return wpid->fn(phy, wpid->data);
+}
+
+int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data),
+ void *data)
+{
+ struct wpan_phy_iter_data wpid = {
+ .fn = fn,
+ .data = data,
+ };
+
+ return class_for_each_device(&wpan_phy_class, NULL,
+ &wpid, wpan_phy_iter);
+}
+EXPORT_SYMBOL(wpan_phy_for_each);
+
+struct cfg802154_registered_device *
+cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx)
+{
+ struct cfg802154_registered_device *result = NULL, *rdev;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
+ if (rdev->wpan_phy_idx == wpan_phy_idx) {
+ result = rdev;
+ break;
+ }
+ }
+
+ return result;
+}
+
+struct wpan_phy *
+wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
+{
+ static atomic_t wpan_phy_counter = ATOMIC_INIT(0);
+ struct cfg802154_registered_device *rdev;
+ size_t alloc_size;
+
+ alloc_size = sizeof(*rdev) + priv_size;
+ rdev = kzalloc(alloc_size, GFP_KERNEL);
+ if (!rdev)
+ return NULL;
+
+ rdev->ops = ops;
+
+ rdev->wpan_phy_idx = atomic_inc_return(&wpan_phy_counter);
+
+ if (unlikely(rdev->wpan_phy_idx < 0)) {
+ /* ugh, wrapped! */
+ atomic_dec(&wpan_phy_counter);
+ kfree(rdev);
+ return NULL;
+ }
+
+ /* atomic_inc_return makes it start at 1, make it start at 0 */
+ rdev->wpan_phy_idx--;
+
+ mutex_init(&rdev->wpan_phy.pib_lock);
+
+ INIT_LIST_HEAD(&rdev->wpan_dev_list);
+ device_initialize(&rdev->wpan_phy.dev);
+ dev_set_name(&rdev->wpan_phy.dev, "wpan-phy%d", rdev->wpan_phy_idx);
+
+ rdev->wpan_phy.dev.class = &wpan_phy_class;
+ rdev->wpan_phy.dev.platform_data = rdev;
+
+ init_waitqueue_head(&rdev->dev_wait);
+
+ return &rdev->wpan_phy;
+}
+EXPORT_SYMBOL(wpan_phy_new);
+
+int wpan_phy_register(struct wpan_phy *phy)
+{
+ struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy);
+ int ret;
+
+ rtnl_lock();
+ ret = device_add(&phy->dev);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ list_add_rcu(&rdev->list, &cfg802154_rdev_list);
+ cfg802154_rdev_list_generation++;
+
+ /* TODO phy registered lock */
+ rtnl_unlock();
+
+ /* TODO nl802154 phy notify */
+
+ return 0;
+}
+EXPORT_SYMBOL(wpan_phy_register);
+
+void wpan_phy_unregister(struct wpan_phy *phy)
+{
+ struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy);
+
+ wait_event(rdev->dev_wait, ({
+ int __count;
+ rtnl_lock();
+ __count = rdev->opencount;
+ rtnl_unlock();
+ __count == 0; }));
+
+ rtnl_lock();
+ /* TODO nl802154 phy notify */
+ /* TODO phy registered lock */
+
+ WARN_ON(!list_empty(&rdev->wpan_dev_list));
+
+ /* First remove the hardware from everywhere, this makes
+ * it impossible to find from userspace.
+ */
+ list_del_rcu(&rdev->list);
+ synchronize_rcu();
+
+ cfg802154_rdev_list_generation++;
+
+ device_del(&phy->dev);
+
+ rtnl_unlock();
+}
+EXPORT_SYMBOL(wpan_phy_unregister);
+
+void wpan_phy_free(struct wpan_phy *phy)
+{
+ put_device(&phy->dev);
+}
+EXPORT_SYMBOL(wpan_phy_free);
+
+void cfg802154_dev_free(struct cfg802154_registered_device *rdev)
+{
+ kfree(rdev);
+}
+
+static void
+cfg802154_update_iface_num(struct cfg802154_registered_device *rdev,
+ int iftype, int num)
+{
+ ASSERT_RTNL();
+
+ rdev->num_running_ifaces += num;
+}
+
+static int cfg802154_netdev_notifier_call(struct notifier_block *nb,
+ unsigned long state, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ struct cfg802154_registered_device *rdev;
+
+ if (!wpan_dev)
+ return NOTIFY_DONE;
+
+ rdev = wpan_phy_to_rdev(wpan_dev->wpan_phy);
+
+ /* TODO WARN_ON unspec type */
+
+ switch (state) {
+ /* TODO NETDEV_DEVTYPE */
+ case NETDEV_REGISTER:
+ wpan_dev->identifier = ++rdev->wpan_dev_id;
+ list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list);
+ rdev->devlist_generation++;
+
+ wpan_dev->netdev = dev;
+ break;
+ case NETDEV_DOWN:
+ cfg802154_update_iface_num(rdev, wpan_dev->iftype, -1);
+
+ rdev->opencount--;
+ wake_up(&rdev->dev_wait);
+ break;
+ case NETDEV_UP:
+ cfg802154_update_iface_num(rdev, wpan_dev->iftype, 1);
+
+ rdev->opencount++;
+ break;
+ case NETDEV_UNREGISTER:
+ /* It is possible to get NETDEV_UNREGISTER
+ * multiple times. To detect that, check
+ * that the interface is still on the list
+ * of registered interfaces, and only then
+ * remove and clean it up.
+ */
+ if (!list_empty(&wpan_dev->list)) {
+ list_del_rcu(&wpan_dev->list);
+ rdev->devlist_generation++;
+ }
+ /* synchronize (so that we won't find this netdev
+ * from other code any more) and then clear the list
+ * head so that the above code can safely check for
+ * !list_empty() to avoid double-cleanup.
+ */
+ synchronize_rcu();
+ INIT_LIST_HEAD(&wpan_dev->list);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cfg802154_netdev_notifier = {
+ .notifier_call = cfg802154_netdev_notifier_call,
+};
+
+static int __init wpan_phy_class_init(void)
+{
+ int rc;
+
+ rc = wpan_phy_sysfs_init();
+ if (rc)
+ goto err;
+
+ rc = register_netdevice_notifier(&cfg802154_netdev_notifier);
+ if (rc)
+ goto err_nl;
+
+ rc = ieee802154_nl_init();
+ if (rc)
+ goto err_notifier;
+
+ rc = nl802154_init();
+ if (rc)
+ goto err_ieee802154_nl;
+
+ return 0;
+
+err_ieee802154_nl:
+ ieee802154_nl_exit();
+
+err_notifier:
+ unregister_netdevice_notifier(&cfg802154_netdev_notifier);
+err_nl:
+ wpan_phy_sysfs_exit();
+err:
+ return rc;
+}
+subsys_initcall(wpan_phy_class_init);
+
+static void __exit wpan_phy_class_exit(void)
+{
+ nl802154_exit();
+ ieee802154_nl_exit();
+ unregister_netdevice_notifier(&cfg802154_netdev_notifier);
+ wpan_phy_sysfs_exit();
+}
+module_exit(wpan_phy_class_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface");
+MODULE_AUTHOR("Dmitry Eremin-Solenikov");
+
diff --git a/net/ieee802154/core.h b/net/ieee802154/core.h
new file mode 100644
index 000000000000..f3e95580caee
--- /dev/null
+++ b/net/ieee802154/core.h
@@ -0,0 +1,46 @@
+#ifndef __IEEE802154_CORE_H
+#define __IEEE802154_CORE_H
+
+#include <net/cfg802154.h>
+
+struct cfg802154_registered_device {
+ const struct cfg802154_ops *ops;
+ struct list_head list;
+
+ /* wpan_phy index, internal only */
+ int wpan_phy_idx;
+
+ /* also protected by devlist_mtx */
+ int opencount;
+ wait_queue_head_t dev_wait;
+
+ /* protected by RTNL only */
+ int num_running_ifaces;
+
+ /* associated wpan interfaces, protected by rtnl or RCU */
+ struct list_head wpan_dev_list;
+ int devlist_generation, wpan_dev_id;
+
+ /* must be last because of the way we do wpan_phy_priv(),
+ * and it should at least be aligned to NETDEV_ALIGN
+ */
+ struct wpan_phy wpan_phy __aligned(NETDEV_ALIGN);
+};
+
+static inline struct cfg802154_registered_device *
+wpan_phy_to_rdev(struct wpan_phy *wpan_phy)
+{
+ BUG_ON(!wpan_phy);
+ return container_of(wpan_phy, struct cfg802154_registered_device,
+ wpan_phy);
+}
+
+extern struct list_head cfg802154_rdev_list;
+extern int cfg802154_rdev_list_generation;
+
+/* free object */
+void cfg802154_dev_free(struct cfg802154_registered_device *rdev);
+struct cfg802154_registered_device *
+cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx);
+
+#endif /* __IEEE802154_CORE_H */
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index ef2ad8aaef13..d1930b70c4aa 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
@@ -27,9 +23,9 @@
#include <linux/if_arp.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/ieee802154.h>
#include <net/sock.h>
#include <net/af_ieee802154.h>
-#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <asm/ioctls.h>
@@ -158,7 +154,6 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
-
}
return -ENOIOCTLCMD;
@@ -280,7 +275,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
if (err < 0)
goto out_skb;
- err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err < 0)
goto out_skb;
@@ -324,7 +319,7 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
}
/* FIXME: skip headers if necessary ?! */
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c
index c09294e39ca6..a051b6993177 100644
--- a/net/ieee802154/header_ops.c
+++ b/net/ieee802154/header_ops.c
@@ -14,8 +14,9 @@
* Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
*/
+#include <linux/ieee802154.h>
+
#include <net/mac802154.h>
-#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
static int
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h
index 5d352f86979e..a5d7515b7f62 100644
--- a/net/ieee802154/ieee802154.h
+++ b/net/ieee802154/ieee802154.h
@@ -10,16 +10,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef IEEE_802154_LOCAL_H
#define IEEE_802154_LOCAL_H
int __init ieee802154_nl_init(void);
-void __exit ieee802154_nl_exit(void);
+void ieee802154_nl_exit(void);
#define IEEE802154_OP(_cmd, _func) \
{ \
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 9222966f5e6d..fa1464762d0d 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -1,5 +1,5 @@
/*
- * Netlink inteface for IEEE 802.15.4 stack
+ * Netlink interface for IEEE 802.15.4 stack
*
* Copyright 2007, 2008 Siemens AG
*
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
@@ -77,7 +73,7 @@ out:
}
struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
- int flags, u8 req)
+ int flags, u8 req)
{
void *hdr;
struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -151,7 +147,6 @@ static const struct genl_multicast_group ieee802154_mcgrps[] = {
[IEEE802154_BEACON_MCGRP] = { .name = IEEE802154_MCAST_BEACON_NAME, },
};
-
int __init ieee802154_nl_init(void)
{
return genl_register_family_with_ops_groups(&nl802154_family,
@@ -159,7 +154,7 @@ int __init ieee802154_nl_init(void)
ieee802154_mcgrps);
}
-void __exit ieee802154_nl_exit(void)
+void ieee802154_nl_exit(void)
{
genl_unregister_family(&nl802154_family);
}
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index c6bfe22bfa5e..cd919493c976 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -1,5 +1,5 @@
/*
- * Netlink inteface for IEEE 802.15.4 stack
+ * Netlink interface for IEEE 802.15.4 stack
*
* Copyright 2007, 2008 Siemens AG
*
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
@@ -26,16 +22,15 @@
#include <linux/kernel.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
+#include <linux/ieee802154.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <net/sock.h>
#include <linux/nl802154.h>
#include <linux/export.h>
#include <net/af_ieee802154.h>
-#include <net/nl802154.h>
-#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
-#include <net/wpan-phy.h>
+#include <net/cfg802154.h>
#include "ieee802154.h"
@@ -59,186 +54,7 @@ static __le16 nla_get_shortaddr(const struct nlattr *nla)
return cpu_to_le16(nla_get_u16(nla));
}
-int ieee802154_nl_assoc_indic(struct net_device *dev,
- struct ieee802154_addr *addr,
- u8 cap)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- if (addr->mode != IEEE802154_ADDR_LONG) {
- pr_err("%s: received non-long source address!\n", __func__);
- return -EINVAL;
- }
-
- msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC);
- if (!msg)
- return -ENOBUFS;
-
- if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
- nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
- nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr) ||
- nla_put_hwaddr(msg, IEEE802154_ATTR_SRC_HW_ADDR,
- addr->extended_addr) ||
- nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
- goto nla_put_failure;
-
- return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
-
-int ieee802154_nl_assoc_confirm(struct net_device *dev, __le16 short_addr,
- u8 status)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF);
- if (!msg)
- return -ENOBUFS;
-
- if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
- nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
- nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr) ||
- nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
- nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
- goto nla_put_failure;
- return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
-
-int ieee802154_nl_disassoc_indic(struct net_device *dev,
- struct ieee802154_addr *addr,
- u8 reason)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC);
- if (!msg)
- return -ENOBUFS;
-
- if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
- nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
- nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr))
- goto nla_put_failure;
- if (addr->mode == IEEE802154_ADDR_LONG) {
- if (nla_put_hwaddr(msg, IEEE802154_ATTR_SRC_HW_ADDR,
- addr->extended_addr))
- goto nla_put_failure;
- } else {
- if (nla_put_shortaddr(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
- addr->short_addr))
- goto nla_put_failure;
- }
- if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
- goto nla_put_failure;
- return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_disassoc_indic);
-
-int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF);
- if (!msg)
- return -ENOBUFS;
-
- if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
- nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
- nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr) ||
- nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
- goto nla_put_failure;
- return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm);
-
-int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid,
- __le16 coord_addr)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC);
- if (!msg)
- return -ENOBUFS;
-
- if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
- nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
- nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr) ||
- nla_put_shortaddr(msg, IEEE802154_ATTR_COORD_SHORT_ADDR,
- coord_addr) ||
- nla_put_shortaddr(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
- goto nla_put_failure;
- return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
-
-int ieee802154_nl_scan_confirm(struct net_device *dev,
- u8 status, u8 scan_type,
- u32 unscanned, u8 page,
- u8 *edl/* , struct list_head *pan_desc_list */)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF);
- if (!msg)
- return -ENOBUFS;
-
- if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
- nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
- nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr) ||
- nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) ||
- nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) ||
- nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) ||
- nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) ||
- (edl &&
- nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl)))
- goto nla_put_failure;
- return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
-
-int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
+static int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
{
struct sk_buff *msg;
@@ -278,8 +94,9 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
goto out;
ops = ieee802154_mlme_ops(dev);
- phy = ops->get_phy(dev);
+ phy = dev->ieee802154_ptr->wpan_phy;
BUG_ON(!phy);
+ get_device(&phy->dev);
short_addr = ops->get_short_addr(dev);
pan_id = ops->get_pan_id(dev);
@@ -296,7 +113,9 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
if (ops->get_mac_params) {
struct ieee802154_mac_params params;
+ rtnl_lock();
ops->get_mac_params(dev, &params);
+ rtnl_unlock();
if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER,
params.transmit_power) ||
@@ -347,7 +166,10 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
if (!dev)
return NULL;
- if (dev->type != ARPHRD_IEEE802154) {
+ /* Check on mtu is currently a hacked solution because lowpan
+ * and wpan have the same ARPHRD type.
+ */
+ if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) {
dev_put(dev);
return NULL;
}
@@ -481,7 +303,7 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
u8 channel, bcn_ord, sf_ord;
u8 page;
int pan_coord, blx, coord_realign;
- int ret = -EOPNOTSUPP;
+ int ret = -EBUSY;
if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
!info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
@@ -497,9 +319,15 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
- if (!ieee802154_mlme_ops(dev)->start_req)
+
+ if (netif_running(dev))
goto out;
+ if (!ieee802154_mlme_ops(dev)->start_req) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
addr.mode = IEEE802154_ADDR_SHORT;
addr.short_addr = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
@@ -518,15 +346,21 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
else
page = 0;
-
if (addr.short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
dev_put(dev);
return -EINVAL;
}
+ rtnl_lock();
ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
bcn_ord, sf_ord, pan_coord, blx, coord_realign);
+ rtnl_unlock();
+
+ /* FIXME: add validation for unused parameters to be sane
+ * for SoftMAC
+ */
+ ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
out:
dev_put(dev);
@@ -562,7 +396,6 @@ int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
else
page = 0;
-
ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
page, duration);
@@ -616,7 +449,11 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
for_each_netdev(net, dev) {
- if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
+ /* Check on mtu is currently a hacked solution because lowpan
+ * and wpan have the same ARPHRD type.
+ */
+ if (idx < s_idx || dev->type != ARPHRD_IEEE802154 ||
+ dev->mtu != IEEE802154_MTU)
goto cont;
if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
@@ -666,22 +503,10 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
!info->attrs[IEEE802154_ATTR_FRAME_RETRIES])
goto out;
- phy = ops->get_phy(dev);
-
- if ((!phy->set_lbt && info->attrs[IEEE802154_ATTR_LBT_ENABLED]) ||
- (!phy->set_cca_mode && info->attrs[IEEE802154_ATTR_CCA_MODE]) ||
- (!phy->set_cca_ed_level &&
- info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) ||
- (!phy->set_csma_params &&
- (info->attrs[IEEE802154_ATTR_CSMA_RETRIES] ||
- info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] ||
- info->attrs[IEEE802154_ATTR_CSMA_MAX_BE])) ||
- (!phy->set_frame_retries &&
- info->attrs[IEEE802154_ATTR_FRAME_RETRIES])) {
- rc = -EOPNOTSUPP;
- goto out_phy;
- }
+ phy = dev->ieee802154_ptr->wpan_phy;
+ get_device(&phy->dev);
+ rtnl_lock();
ops->get_mac_params(dev, &params);
if (info->attrs[IEEE802154_ATTR_TXPOWER])
@@ -709,20 +534,18 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
params.frame_retries = nla_get_s8(info->attrs[IEEE802154_ATTR_FRAME_RETRIES]);
rc = ops->set_mac_params(dev, &params);
+ rtnl_unlock();
wpan_phy_put(phy);
dev_put(dev);
- return rc;
-out_phy:
- wpan_phy_put(phy);
+ return 0;
+
out:
dev_put(dev);
return rc;
}
-
-
static int
ieee802154_llsec_parse_key_id(struct genl_info *info,
struct ieee802154_llsec_key_id *desc)
@@ -938,8 +761,6 @@ out:
return rc;
}
-
-
struct llsec_dump_data {
struct sk_buff *skb;
int s_idx, s_idx2;
@@ -962,7 +783,11 @@ ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
int rc;
for_each_netdev(net, dev) {
- if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
+ /* Check on mtu is currently a hacked solution because lowpan
+ * and wpan have the same ARPHRD type.
+ */
+ if (idx < first_dev || dev->type != ARPHRD_IEEE802154 ||
+ dev->mtu != IEEE802154_MTU)
goto skip;
data.ops = ieee802154_mlme_ops(dev);
@@ -1012,8 +837,6 @@ ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info,
return rc;
}
-
-
static int
ieee802154_llsec_parse_key(struct genl_info *info,
struct ieee802154_llsec_key *key)
@@ -1158,8 +981,6 @@ int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
}
-
-
static int
llsec_parse_dev(struct genl_info *info,
struct ieee802154_llsec_device *dev)
@@ -1290,8 +1111,6 @@ int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
}
-
-
static int llsec_add_devkey(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
@@ -1406,8 +1225,6 @@ int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
}
-
-
static int
llsec_parse_seclevel(struct genl_info *info,
struct ieee802154_llsec_seclevel *sl)
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 972baf83411a..7baf98b14611 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -1,5 +1,5 @@
/*
- * Netlink inteface for IEEE 802.15.4 stack
+ * Netlink interface for IEEE 802.15.4 stack
*
* Copyright 2007, 2008 Siemens AG
*
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
@@ -27,13 +23,15 @@
#include <linux/if_arp.h>
#include <net/netlink.h>
#include <net/genetlink.h>
-#include <net/wpan-phy.h>
+#include <net/cfg802154.h>
#include <net/af_ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <net/rtnetlink.h> /* for rtnl_{un,}lock */
#include <linux/nl802154.h>
#include "ieee802154.h"
+#include "rdev-ops.h"
+#include "core.h"
static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
u32 seq, int flags, struct wpan_phy *phy)
@@ -96,7 +94,6 @@ int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
return -EINVAL; /* phy name should be null-terminated */
-
phy = wpan_phy_find(name);
if (!phy)
return -ENODEV;
@@ -207,11 +204,6 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
if (!msg)
goto out_dev;
- if (!phy->add_iface) {
- rc = -EINVAL;
- goto nla_put_failure;
- }
-
if (info->attrs[IEEE802154_ATTR_HW_ADDR] &&
nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) !=
IEEE802154_ADDR_LEN) {
@@ -227,11 +219,13 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
}
}
- dev = phy->add_iface(phy, devname, type);
+ dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname,
+ type);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
goto nla_put_failure;
}
+ dev_hold(dev);
if (info->attrs[IEEE802154_ATTR_HW_ADDR]) {
struct sockaddr addr;
@@ -261,7 +255,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
dev_unregister:
rtnl_lock(); /* del_iface must be called with RTNL lock */
- phy->del_iface(phy, dev);
+ rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev);
dev_put(dev);
rtnl_unlock();
nla_put_failure:
@@ -292,8 +286,9 @@ int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info)
if (!dev)
return -ENODEV;
- phy = ieee802154_mlme_ops(dev)->get_phy(dev);
+ phy = dev->ieee802154_ptr->wpan_phy;
BUG_ON(!phy);
+ get_device(&phy->dev);
rc = -EINVAL;
/* phy name is optional, but should be checked if it's given */
@@ -323,13 +318,8 @@ int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info)
if (!msg)
goto out_dev;
- if (!phy->del_iface) {
- rc = -EINVAL;
- goto nla_put_failure;
- }
-
rtnl_lock();
- phy->del_iface(phy, dev);
+ rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev);
/* We don't have device anymore */
dev_put(dev);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
new file mode 100644
index 000000000000..889647744697
--- /dev/null
+++ b/net/ieee802154/nl802154.c
@@ -0,0 +1,957 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * Alexander Aring <aar@pengutronix.de>
+ *
+ * Based on: net/wireless/nl80211.c
+ */
+
+#include <linux/rtnetlink.h>
+
+#include <net/cfg802154.h>
+#include <net/genetlink.h>
+#include <net/mac802154.h>
+#include <net/netlink.h>
+#include <net/nl802154.h>
+#include <net/sock.h>
+
+#include "nl802154.h"
+#include "rdev-ops.h"
+#include "core.h"
+
+static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+
+static void nl802154_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+
+/* the netlink family */
+static struct genl_family nl802154_fam = {
+ .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */
+ .name = NL802154_GENL_NAME, /* have users key off the name instead */
+ .hdrsize = 0, /* no private header */
+ .version = 1, /* no particular meaning now */
+ .maxattr = NL802154_ATTR_MAX,
+ .netnsok = true,
+ .pre_doit = nl802154_pre_doit,
+ .post_doit = nl802154_post_doit,
+};
+
+/* multicast groups */
+enum nl802154_multicast_groups {
+ NL802154_MCGRP_CONFIG,
+};
+
+static const struct genl_multicast_group nl802154_mcgrps[] = {
+ [NL802154_MCGRP_CONFIG] = { .name = "config", },
+};
+
+/* returns ERR_PTR values */
+static struct wpan_dev *
+__cfg802154_wpan_dev_from_attrs(struct net *netns, struct nlattr **attrs)
+{
+ struct cfg802154_registered_device *rdev;
+ struct wpan_dev *result = NULL;
+ bool have_ifidx = attrs[NL802154_ATTR_IFINDEX];
+ bool have_wpan_dev_id = attrs[NL802154_ATTR_WPAN_DEV];
+ u64 wpan_dev_id;
+ int wpan_phy_idx = -1;
+ int ifidx = -1;
+
+ ASSERT_RTNL();
+
+ if (!have_ifidx && !have_wpan_dev_id)
+ return ERR_PTR(-EINVAL);
+
+ if (have_ifidx)
+ ifidx = nla_get_u32(attrs[NL802154_ATTR_IFINDEX]);
+ if (have_wpan_dev_id) {
+ wpan_dev_id = nla_get_u64(attrs[NL802154_ATTR_WPAN_DEV]);
+ wpan_phy_idx = wpan_dev_id >> 32;
+ }
+
+ list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
+ struct wpan_dev *wpan_dev;
+
+ /* TODO netns compare */
+
+ if (have_wpan_dev_id && rdev->wpan_phy_idx != wpan_phy_idx)
+ continue;
+
+ list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) {
+ if (have_ifidx && wpan_dev->netdev &&
+ wpan_dev->netdev->ifindex == ifidx) {
+ result = wpan_dev;
+ break;
+ }
+ if (have_wpan_dev_id &&
+ wpan_dev->identifier == (u32)wpan_dev_id) {
+ result = wpan_dev;
+ break;
+ }
+ }
+
+ if (result)
+ break;
+ }
+
+ if (result)
+ return result;
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct cfg802154_registered_device *
+__cfg802154_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
+{
+ struct cfg802154_registered_device *rdev = NULL, *tmp;
+ struct net_device *netdev;
+
+ ASSERT_RTNL();
+
+ if (!attrs[NL802154_ATTR_WPAN_PHY] &&
+ !attrs[NL802154_ATTR_IFINDEX] &&
+ !attrs[NL802154_ATTR_WPAN_DEV])
+ return ERR_PTR(-EINVAL);
+
+ if (attrs[NL802154_ATTR_WPAN_PHY])
+ rdev = cfg802154_rdev_by_wpan_phy_idx(
+ nla_get_u32(attrs[NL802154_ATTR_WPAN_PHY]));
+
+ if (attrs[NL802154_ATTR_WPAN_DEV]) {
+ u64 wpan_dev_id = nla_get_u64(attrs[NL802154_ATTR_WPAN_DEV]);
+ struct wpan_dev *wpan_dev;
+ bool found = false;
+
+ tmp = cfg802154_rdev_by_wpan_phy_idx(wpan_dev_id >> 32);
+ if (tmp) {
+ /* make sure wpan_dev exists */
+ list_for_each_entry(wpan_dev, &tmp->wpan_dev_list, list) {
+ if (wpan_dev->identifier != (u32)wpan_dev_id)
+ continue;
+ found = true;
+ break;
+ }
+
+ if (!found)
+ tmp = NULL;
+
+ if (rdev && tmp != rdev)
+ return ERR_PTR(-EINVAL);
+ rdev = tmp;
+ }
+ }
+
+ if (attrs[NL802154_ATTR_IFINDEX]) {
+ int ifindex = nla_get_u32(attrs[NL802154_ATTR_IFINDEX]);
+
+ netdev = __dev_get_by_index(netns, ifindex);
+ if (netdev) {
+ if (netdev->ieee802154_ptr)
+ tmp = wpan_phy_to_rdev(
+ netdev->ieee802154_ptr->wpan_phy);
+ else
+ tmp = NULL;
+
+ /* not wireless device -- return error */
+ if (!tmp)
+ return ERR_PTR(-EINVAL);
+
+ /* mismatch -- return error */
+ if (rdev && tmp != rdev)
+ return ERR_PTR(-EINVAL);
+
+ rdev = tmp;
+ }
+ }
+
+ if (!rdev)
+ return ERR_PTR(-ENODEV);
+
+ /* TODO netns compare */
+
+ return rdev;
+}
+
+/* This function returns a pointer to the driver
+ * that the genl_info item that is passed refers to.
+ *
+ * The result of this can be a PTR_ERR and hence must
+ * be checked with IS_ERR() for errors.
+ */
+static struct cfg802154_registered_device *
+cfg802154_get_dev_from_info(struct net *netns, struct genl_info *info)
+{
+ return __cfg802154_rdev_from_attrs(netns, info->attrs);
+}
+
+/* policy for the attributes */
+static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
+ [NL802154_ATTR_WPAN_PHY] = { .type = NLA_U32 },
+ [NL802154_ATTR_WPAN_PHY_NAME] = { .type = NLA_NUL_STRING,
+ .len = 20-1 },
+
+ [NL802154_ATTR_IFINDEX] = { .type = NLA_U32 },
+ [NL802154_ATTR_IFTYPE] = { .type = NLA_U32 },
+ [NL802154_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
+
+ [NL802154_ATTR_WPAN_DEV] = { .type = NLA_U64 },
+
+ [NL802154_ATTR_PAGE] = { .type = NLA_U8, },
+ [NL802154_ATTR_CHANNEL] = { .type = NLA_U8, },
+
+ [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, },
+
+ [NL802154_ATTR_CCA_MODE] = { .type = NLA_U8, },
+
+ [NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
+
+ [NL802154_ATTR_PAN_ID] = { .type = NLA_U16, },
+ [NL802154_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 },
+ [NL802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, },
+
+ [NL802154_ATTR_MIN_BE] = { .type = NLA_U8, },
+ [NL802154_ATTR_MAX_BE] = { .type = NLA_U8, },
+ [NL802154_ATTR_MAX_CSMA_BACKOFFS] = { .type = NLA_U8, },
+
+ [NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, },
+
+ [NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, },
+};
+
+/* message building helper */
+static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
+ int flags, u8 cmd)
+{
+ /* since there is no private header just add the generic one */
+ return genlmsg_put(skb, portid, seq, &nl802154_fam, flags, cmd);
+}
+
+static int
+nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
+ struct sk_buff *msg)
+{
+ struct nlattr *nl_page;
+ unsigned long page;
+
+ nl_page = nla_nest_start(msg, NL802154_ATTR_CHANNELS_SUPPORTED);
+ if (!nl_page)
+ return -ENOBUFS;
+
+ for (page = 0; page <= IEEE802154_MAX_PAGE; page++) {
+ if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL,
+ rdev->wpan_phy.channels_supported[page]))
+ return -ENOBUFS;
+ }
+ nla_nest_end(msg, nl_page);
+
+ return 0;
+}
+
+static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
+ enum nl802154_commands cmd,
+ struct sk_buff *msg, u32 portid, u32 seq,
+ int flags)
+{
+ void *hdr;
+
+ hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) ||
+ nla_put_string(msg, NL802154_ATTR_WPAN_PHY_NAME,
+ wpan_phy_name(&rdev->wpan_phy)) ||
+ nla_put_u32(msg, NL802154_ATTR_GENERATION,
+ cfg802154_rdev_list_generation))
+ goto nla_put_failure;
+
+ if (cmd != NL802154_CMD_NEW_WPAN_PHY)
+ goto finish;
+
+ /* DUMP PHY PIB */
+
+ /* current channel settings */
+ if (nla_put_u8(msg, NL802154_ATTR_PAGE,
+ rdev->wpan_phy.current_page) ||
+ nla_put_u8(msg, NL802154_ATTR_CHANNEL,
+ rdev->wpan_phy.current_channel))
+ goto nla_put_failure;
+
+ /* supported channels array */
+ if (nl802154_send_wpan_phy_channels(rdev, msg))
+ goto nla_put_failure;
+
+ /* cca mode */
+ if (nla_put_u8(msg, NL802154_ATTR_CCA_MODE,
+ rdev->wpan_phy.cca_mode))
+ goto nla_put_failure;
+
+ if (nla_put_s8(msg, NL802154_ATTR_TX_POWER,
+ rdev->wpan_phy.transmit_power))
+ goto nla_put_failure;
+
+finish:
+ return genlmsg_end(msg, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+struct nl802154_dump_wpan_phy_state {
+ s64 filter_wpan_phy;
+ long start;
+
+};
+
+static int nl802154_dump_wpan_phy_parse(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct nl802154_dump_wpan_phy_state *state)
+{
+ struct nlattr **tb = nl802154_fam.attrbuf;
+ int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize,
+ tb, nl802154_fam.maxattr, nl802154_policy);
+
+ /* TODO check if we can handle error here,
+ * we have no backward compatibility
+ */
+ if (ret)
+ return 0;
+
+ if (tb[NL802154_ATTR_WPAN_PHY])
+ state->filter_wpan_phy = nla_get_u32(tb[NL802154_ATTR_WPAN_PHY]);
+ if (tb[NL802154_ATTR_WPAN_DEV])
+ state->filter_wpan_phy = nla_get_u64(tb[NL802154_ATTR_WPAN_DEV]) >> 32;
+ if (tb[NL802154_ATTR_IFINDEX]) {
+ struct net_device *netdev;
+ struct cfg802154_registered_device *rdev;
+ int ifidx = nla_get_u32(tb[NL802154_ATTR_IFINDEX]);
+
+ /* TODO netns */
+ netdev = __dev_get_by_index(&init_net, ifidx);
+ if (!netdev)
+ return -ENODEV;
+ if (netdev->ieee802154_ptr) {
+ rdev = wpan_phy_to_rdev(
+ netdev->ieee802154_ptr->wpan_phy);
+ state->filter_wpan_phy = rdev->wpan_phy_idx;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nl802154_dump_wpan_phy(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int idx = 0, ret;
+ struct nl802154_dump_wpan_phy_state *state = (void *)cb->args[0];
+ struct cfg802154_registered_device *rdev;
+
+ rtnl_lock();
+ if (!state) {
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state) {
+ rtnl_unlock();
+ return -ENOMEM;
+ }
+ state->filter_wpan_phy = -1;
+ ret = nl802154_dump_wpan_phy_parse(skb, cb, state);
+ if (ret) {
+ kfree(state);
+ rtnl_unlock();
+ return ret;
+ }
+ cb->args[0] = (long)state;
+ }
+
+ list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
+ /* TODO net ns compare */
+ if (++idx <= state->start)
+ continue;
+ if (state->filter_wpan_phy != -1 &&
+ state->filter_wpan_phy != rdev->wpan_phy_idx)
+ continue;
+ /* attempt to fit multiple wpan_phy data chunks into the skb */
+ ret = nl802154_send_wpan_phy(rdev,
+ NL802154_CMD_NEW_WPAN_PHY,
+ skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ if (ret < 0) {
+ if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
+ !skb->len && cb->min_dump_alloc < 4096) {
+ cb->min_dump_alloc = 4096;
+ rtnl_unlock();
+ return 1;
+ }
+ idx--;
+ break;
+ }
+ break;
+ }
+ rtnl_unlock();
+
+ state->start = idx;
+
+ return skb->len;
+}
+
+static int nl802154_dump_wpan_phy_done(struct netlink_callback *cb)
+{
+ kfree((void *)cb->args[0]);
+ return 0;
+}
+
+static int nl802154_get_wpan_phy(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nl802154_send_wpan_phy(rdev, NL802154_CMD_NEW_WPAN_PHY, msg,
+ info->snd_portid, info->snd_seq, 0) < 0) {
+ nlmsg_free(msg);
+ return -ENOBUFS;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static inline u64 wpan_dev_id(struct wpan_dev *wpan_dev)
+{
+ return (u64)wpan_dev->identifier |
+ ((u64)wpan_phy_to_rdev(wpan_dev->wpan_phy)->wpan_phy_idx << 32);
+}
+
+static int
+nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
+ struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev)
+{
+ struct net_device *dev = wpan_dev->netdev;
+ void *hdr;
+
+ hdr = nl802154hdr_put(msg, portid, seq, flags,
+ NL802154_CMD_NEW_INTERFACE);
+ if (!hdr)
+ return -1;
+
+ if (dev &&
+ (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put_string(msg, NL802154_ATTR_IFNAME, dev->name)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) ||
+ nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) ||
+ nla_put_u64(msg, NL802154_ATTR_WPAN_DEV, wpan_dev_id(wpan_dev)) ||
+ nla_put_u32(msg, NL802154_ATTR_GENERATION,
+ rdev->devlist_generation ^
+ (cfg802154_rdev_list_generation << 2)))
+ goto nla_put_failure;
+
+ /* address settings */
+ if (nla_put_le64(msg, NL802154_ATTR_EXTENDED_ADDR,
+ wpan_dev->extended_addr) ||
+ nla_put_le16(msg, NL802154_ATTR_SHORT_ADDR,
+ wpan_dev->short_addr) ||
+ nla_put_le16(msg, NL802154_ATTR_PAN_ID, wpan_dev->pan_id))
+ goto nla_put_failure;
+
+ /* ARET handling */
+ if (nla_put_s8(msg, NL802154_ATTR_MAX_FRAME_RETRIES,
+ wpan_dev->frame_retries) ||
+ nla_put_u8(msg, NL802154_ATTR_MAX_BE, wpan_dev->max_be) ||
+ nla_put_u8(msg, NL802154_ATTR_MAX_CSMA_BACKOFFS,
+ wpan_dev->csma_retries) ||
+ nla_put_u8(msg, NL802154_ATTR_MIN_BE, wpan_dev->min_be))
+ goto nla_put_failure;
+
+ /* listen before transmit */
+ if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
+ goto nla_put_failure;
+
+ return genlmsg_end(msg, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int wp_idx = 0;
+ int if_idx = 0;
+ int wp_start = cb->args[0];
+ int if_start = cb->args[1];
+ struct cfg802154_registered_device *rdev;
+ struct wpan_dev *wpan_dev;
+
+ rtnl_lock();
+ list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
+ /* TODO netns compare */
+ if (wp_idx < wp_start) {
+ wp_idx++;
+ continue;
+ }
+ if_idx = 0;
+
+ list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) {
+ if (if_idx < if_start) {
+ if_idx++;
+ continue;
+ }
+ if (nl802154_send_iface(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ rdev, wpan_dev) < 0) {
+ goto out;
+ }
+ if_idx++;
+ }
+
+ wp_idx++;
+ }
+out:
+ rtnl_unlock();
+
+ cb->args[0] = wp_idx;
+ cb->args[1] = if_idx;
+
+ return skb->len;
+}
+
+static int nl802154_get_interface(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct wpan_dev *wdev = info->user_ptr[1];
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nl802154_send_iface(msg, info->snd_portid, info->snd_seq, 0,
+ rdev, wdev) < 0) {
+ nlmsg_free(msg);
+ return -ENOBUFS;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ enum nl802154_iftype type = NL802154_IFTYPE_UNSPEC;
+ __le64 extended_addr = cpu_to_le64(0x0000000000000000ULL);
+
+ /* TODO avoid failing a new interface
+ * creation due to pending removal?
+ */
+
+ if (!info->attrs[NL802154_ATTR_IFNAME])
+ return -EINVAL;
+
+ if (info->attrs[NL802154_ATTR_IFTYPE]) {
+ type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]);
+ if (type > NL802154_IFTYPE_MAX)
+ return -EINVAL;
+ }
+
+ /* TODO add nla_get_le64 to netlink */
+ if (info->attrs[NL802154_ATTR_EXTENDED_ADDR])
+ extended_addr = (__force __le64)nla_get_u64(
+ info->attrs[NL802154_ATTR_EXTENDED_ADDR]);
+
+ if (!rdev->ops->add_virtual_intf)
+ return -EOPNOTSUPP;
+
+ return rdev_add_virtual_intf(rdev,
+ nla_data(info->attrs[NL802154_ATTR_IFNAME]),
+ type, extended_addr);
+}
+
+static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct wpan_dev *wpan_dev = info->user_ptr[1];
+
+ if (!rdev->ops->del_virtual_intf)
+ return -EOPNOTSUPP;
+
+ /* If we remove a wpan device without a netdev then clear
+ * user_ptr[1] so that nl802154_post_doit won't dereference it
+ * to check if it needs to do dev_put(). Otherwise it crashes
+ * since the wpan_dev has been freed, unlike with a netdev where
+ * we need the dev_put() for the netdev to really be freed.
+ */
+ if (!wpan_dev->netdev)
+ info->user_ptr[1] = NULL;
+
+ return rdev_del_virtual_intf(rdev, wpan_dev);
+}
+
+static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ u8 channel, page;
+
+ if (!info->attrs[NL802154_ATTR_PAGE] ||
+ !info->attrs[NL802154_ATTR_CHANNEL])
+ return -EINVAL;
+
+ page = nla_get_u8(info->attrs[NL802154_ATTR_PAGE]);
+ channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]);
+
+ /* check 802.15.4 constraints */
+ if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL)
+ return -EINVAL;
+
+ return rdev_set_channel(rdev, page, channel);
+}
+
+static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ __le16 pan_id;
+
+ /* conflict here while tx/rx calls */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* don't change address fields on monitor */
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EINVAL;
+
+ if (!info->attrs[NL802154_ATTR_PAN_ID])
+ return -EINVAL;
+
+ pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]);
+
+ return rdev_set_pan_id(rdev, wpan_dev, pan_id);
+}
+
+static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ __le16 short_addr;
+
+ /* conflict here while tx/rx calls */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* don't change address fields on monitor */
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EINVAL;
+
+ if (!info->attrs[NL802154_ATTR_SHORT_ADDR])
+ return -EINVAL;
+
+ short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]);
+
+ return rdev_set_short_addr(rdev, wpan_dev, short_addr);
+}
+
+static int
+nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ u8 min_be, max_be;
+
+ /* should be set on netif open inside phy settings */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!info->attrs[NL802154_ATTR_MIN_BE] ||
+ !info->attrs[NL802154_ATTR_MAX_BE])
+ return -EINVAL;
+
+ min_be = nla_get_u8(info->attrs[NL802154_ATTR_MIN_BE]);
+ max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]);
+
+ /* check 802.15.4 constraints */
+ if (max_be < 3 || max_be > 8 || min_be > max_be)
+ return -EINVAL;
+
+ return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be);
+}
+
+static int
+nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ u8 max_csma_backoffs;
+
+ /* conflict here while other running iface settings */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS])
+ return -EINVAL;
+
+ max_csma_backoffs = nla_get_u8(
+ info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]);
+
+ /* check 802.15.4 constraints */
+ if (max_csma_backoffs > 5)
+ return -EINVAL;
+
+ return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs);
+}
+
+static int
+nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ s8 max_frame_retries;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES])
+ return -EINVAL;
+
+ max_frame_retries = nla_get_s8(
+ info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]);
+
+ /* check 802.15.4 constraints */
+ if (max_frame_retries < -1 || max_frame_retries > 7)
+ return -EINVAL;
+
+ return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries);
+}
+
+static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ bool mode;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!info->attrs[NL802154_ATTR_LBT_MODE])
+ return -EINVAL;
+
+ mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+ return rdev_set_lbt_mode(rdev, wpan_dev, mode);
+}
+
+#define NL802154_FLAG_NEED_WPAN_PHY 0x01
+#define NL802154_FLAG_NEED_NETDEV 0x02
+#define NL802154_FLAG_NEED_RTNL 0x04
+#define NL802154_FLAG_CHECK_NETDEV_UP 0x08
+#define NL802154_FLAG_NEED_NETDEV_UP (NL802154_FLAG_NEED_NETDEV |\
+ NL802154_FLAG_CHECK_NETDEV_UP)
+#define NL802154_FLAG_NEED_WPAN_DEV 0x10
+#define NL802154_FLAG_NEED_WPAN_DEV_UP (NL802154_FLAG_NEED_WPAN_DEV |\
+ NL802154_FLAG_CHECK_NETDEV_UP)
+
+static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev;
+ struct wpan_dev *wpan_dev;
+ struct net_device *dev;
+ bool rtnl = ops->internal_flags & NL802154_FLAG_NEED_RTNL;
+
+ if (rtnl)
+ rtnl_lock();
+
+ if (ops->internal_flags & NL802154_FLAG_NEED_WPAN_PHY) {
+ rdev = cfg802154_get_dev_from_info(genl_info_net(info), info);
+ if (IS_ERR(rdev)) {
+ if (rtnl)
+ rtnl_unlock();
+ return PTR_ERR(rdev);
+ }
+ info->user_ptr[0] = rdev;
+ } else if (ops->internal_flags & NL802154_FLAG_NEED_NETDEV ||
+ ops->internal_flags & NL802154_FLAG_NEED_WPAN_DEV) {
+ ASSERT_RTNL();
+ wpan_dev = __cfg802154_wpan_dev_from_attrs(genl_info_net(info),
+ info->attrs);
+ if (IS_ERR(wpan_dev)) {
+ if (rtnl)
+ rtnl_unlock();
+ return PTR_ERR(wpan_dev);
+ }
+
+ dev = wpan_dev->netdev;
+ rdev = wpan_phy_to_rdev(wpan_dev->wpan_phy);
+
+ if (ops->internal_flags & NL802154_FLAG_NEED_NETDEV) {
+ if (!dev) {
+ if (rtnl)
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ info->user_ptr[1] = dev;
+ } else {
+ info->user_ptr[1] = wpan_dev;
+ }
+
+ if (dev) {
+ if (ops->internal_flags & NL802154_FLAG_CHECK_NETDEV_UP &&
+ !netif_running(dev)) {
+ if (rtnl)
+ rtnl_unlock();
+ return -ENETDOWN;
+ }
+
+ dev_hold(dev);
+ }
+
+ info->user_ptr[0] = rdev;
+ }
+
+ return 0;
+}
+
+static void nl802154_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ if (info->user_ptr[1]) {
+ if (ops->internal_flags & NL802154_FLAG_NEED_WPAN_DEV) {
+ struct wpan_dev *wpan_dev = info->user_ptr[1];
+
+ if (wpan_dev->netdev)
+ dev_put(wpan_dev->netdev);
+ } else {
+ dev_put(info->user_ptr[1]);
+ }
+ }
+
+ if (ops->internal_flags & NL802154_FLAG_NEED_RTNL)
+ rtnl_unlock();
+}
+
+static const struct genl_ops nl802154_ops[] = {
+ {
+ .cmd = NL802154_CMD_GET_WPAN_PHY,
+ .doit = nl802154_get_wpan_phy,
+ .dumpit = nl802154_dump_wpan_phy,
+ .done = nl802154_dump_wpan_phy_done,
+ .policy = nl802154_policy,
+ /* can be retrieved by unprivileged users */
+ .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_GET_INTERFACE,
+ .doit = nl802154_get_interface,
+ .dumpit = nl802154_dump_interface,
+ .policy = nl802154_policy,
+ /* can be retrieved by unprivileged users */
+ .internal_flags = NL802154_FLAG_NEED_WPAN_DEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_NEW_INTERFACE,
+ .doit = nl802154_new_interface,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_DEL_INTERFACE,
+ .doit = nl802154_del_interface,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_WPAN_DEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_CHANNEL,
+ .doit = nl802154_set_channel,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_PAN_ID,
+ .doit = nl802154_set_pan_id,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_SHORT_ADDR,
+ .doit = nl802154_set_short_addr,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_BACKOFF_EXPONENT,
+ .doit = nl802154_set_backoff_exponent,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_MAX_CSMA_BACKOFFS,
+ .doit = nl802154_set_max_csma_backoffs,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_MAX_FRAME_RETRIES,
+ .doit = nl802154_set_max_frame_retries,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_LBT_MODE,
+ .doit = nl802154_set_lbt_mode,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
+};
+
+/* initialisation/exit functions */
+int nl802154_init(void)
+{
+ return genl_register_family_with_ops_groups(&nl802154_fam, nl802154_ops,
+ nl802154_mcgrps);
+}
+
+void nl802154_exit(void)
+{
+ genl_unregister_family(&nl802154_fam);
+}
diff --git a/net/ieee802154/nl802154.h b/net/ieee802154/nl802154.h
new file mode 100644
index 000000000000..3846a89d0958
--- /dev/null
+++ b/net/ieee802154/nl802154.h
@@ -0,0 +1,7 @@
+#ifndef __IEEE802154_NL802154_H
+#define __IEEE802154_NL802154_H
+
+int nl802154_init(void);
+void nl802154_exit(void);
+
+#endif /* __IEEE802154_NL802154_H */
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index 3a703ab88348..35c432668454 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#include <linux/kernel.h>
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 9d1f64806f02..1674b115c891 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
@@ -154,7 +150,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
- err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err < 0)
goto out_skb;
@@ -195,7 +191,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
copied = len;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
@@ -225,7 +221,6 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-
void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk;
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
new file mode 100644
index 000000000000..aff54fbd9264
--- /dev/null
+++ b/net/ieee802154/rdev-ops.h
@@ -0,0 +1,89 @@
+#ifndef __CFG802154_RDEV_OPS
+#define __CFG802154_RDEV_OPS
+
+#include <net/cfg802154.h>
+
+#include "core.h"
+
+static inline struct net_device *
+rdev_add_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
+ const char *name, int type)
+{
+ return rdev->ops->add_virtual_intf_deprecated(&rdev->wpan_phy, name,
+ type);
+}
+
+static inline void
+rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
+ struct net_device *dev)
+{
+ rdev->ops->del_virtual_intf_deprecated(&rdev->wpan_phy, dev);
+}
+
+static inline int
+rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
+ enum nl802154_iftype type, __le64 extended_addr)
+{
+ return rdev->ops->add_virtual_intf(&rdev->wpan_phy, name, type,
+ extended_addr);
+}
+
+static inline int
+rdev_del_virtual_intf(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev)
+{
+ return rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev);
+}
+
+static inline int
+rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel)
+{
+ return rdev->ops->set_channel(&rdev->wpan_phy, page, channel);
+}
+
+static inline int
+rdev_set_pan_id(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, __le16 pan_id)
+{
+ return rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
+}
+
+static inline int
+rdev_set_short_addr(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, __le16 short_addr)
+{
+ return rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
+}
+
+static inline int
+rdev_set_backoff_exponent(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, u8 min_be, u8 max_be)
+{
+ return rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
+ min_be, max_be);
+}
+
+static inline int
+rdev_set_max_csma_backoffs(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, u8 max_csma_backoffs)
+{
+ return rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev,
+ max_csma_backoffs);
+}
+
+static inline int
+rdev_set_max_frame_retries(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, s8 max_frame_retries)
+{
+ return rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
+ max_frame_retries);
+}
+
+static inline int
+rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, bool mode)
+{
+ return rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
+}
+
+#endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
index 7cfcd6885225..9d980ed3ffe2 100644
--- a/net/ieee802154/reassembly.c
+++ b/net/ieee802154/reassembly.c
@@ -33,7 +33,7 @@
static const char lowpan_frags_cache_name[] = "lowpan-frags";
struct lowpan_frag_info {
- __be16 d_tag;
+ u16 d_tag;
u16 d_size;
u8 d_offset;
};
@@ -48,7 +48,7 @@ static struct inet_frags lowpan_frags;
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
struct sk_buff *prev, struct net_device *dev);
-static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
+static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
const struct ieee802154_addr *saddr,
const struct ieee802154_addr *daddr)
{
@@ -330,11 +330,13 @@ static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
{
bool fail;
u8 pattern = 0, low = 0;
+ __be16 d_tag = 0;
fail = lowpan_fetch_skb(skb, &pattern, 1);
fail |= lowpan_fetch_skb(skb, &low, 1);
frag_info->d_size = (pattern & 7) << 8 | low;
- fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
+ fail |= lowpan_fetch_skb(skb, &d_tag, 2);
+ frag_info->d_tag = ntohs(d_tag);
if (frag_type == LOWPAN_DISPATCH_FRAGN) {
fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
diff --git a/net/ieee802154/reassembly.h b/net/ieee802154/reassembly.h
index 74e4a7c98191..836b16fa001f 100644
--- a/net/ieee802154/reassembly.h
+++ b/net/ieee802154/reassembly.h
@@ -4,7 +4,7 @@
#include <net/inet_frag.h>
struct lowpan_create_arg {
- __be16 tag;
+ u16 tag;
u16 d_size;
const struct ieee802154_addr *src;
const struct ieee802154_addr *dst;
@@ -15,7 +15,7 @@ struct lowpan_create_arg {
struct lowpan_frag_queue {
struct inet_frag_queue q;
- __be16 tag;
+ u16 tag;
u16 d_size;
struct ieee802154_addr saddr;
struct ieee802154_addr daddr;
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
new file mode 100644
index 000000000000..1613b9c65dfa
--- /dev/null
+++ b/net/ieee802154/sysfs.c
@@ -0,0 +1,128 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * Alexander Aring <aar@pengutronix.de>
+ *
+ * Based on: net/wireless/sysfs.c
+ */
+
+#include <linux/device.h>
+
+#include <net/cfg802154.h>
+
+#include "core.h"
+#include "sysfs.h"
+
+static inline struct cfg802154_registered_device *
+dev_to_rdev(struct device *dev)
+{
+ return container_of(dev, struct cfg802154_registered_device,
+ wpan_phy.dev);
+}
+
+#define SHOW_FMT(name, fmt, member) \
+static ssize_t name ## _show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \
+} \
+static DEVICE_ATTR_RO(name)
+
+SHOW_FMT(index, "%d", wpan_phy_idx);
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wpan_phy *wpan_phy = &dev_to_rdev(dev)->wpan_phy;
+
+ return sprintf(buf, "%s\n", dev_name(&wpan_phy->dev));
+}
+static DEVICE_ATTR_RO(name);
+
+#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
+static ssize_t name ## _show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \
+ int ret; \
+ \
+ mutex_lock(&phy->pib_lock); \
+ ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
+ mutex_unlock(&phy->pib_lock); \
+ return ret; \
+} \
+static DEVICE_ATTR_RO(name)
+
+#define MASTER_SHOW(field, format_string) \
+ MASTER_SHOW_COMPLEX(field, format_string, phy->field)
+
+MASTER_SHOW(current_channel, "%d");
+MASTER_SHOW(current_page, "%d");
+MASTER_SHOW(transmit_power, "%d +- 1 dB");
+MASTER_SHOW(cca_mode, "%d");
+
+static ssize_t channels_supported_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
+ int ret;
+ int i, len = 0;
+
+ mutex_lock(&phy->pib_lock);
+ for (i = 0; i < 32; i++) {
+ ret = snprintf(buf + len, PAGE_SIZE - len,
+ "%#09x\n", phy->channels_supported[i]);
+ if (ret < 0)
+ break;
+ len += ret;
+ }
+ mutex_unlock(&phy->pib_lock);
+ return len;
+}
+static DEVICE_ATTR_RO(channels_supported);
+
+static void wpan_phy_release(struct device *dev)
+{
+ struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+
+ cfg802154_dev_free(rdev);
+}
+
+static struct attribute *pmib_attrs[] = {
+ &dev_attr_index.attr,
+ &dev_attr_name.attr,
+ /* below will be removed soon */
+ &dev_attr_current_channel.attr,
+ &dev_attr_current_page.attr,
+ &dev_attr_channels_supported.attr,
+ &dev_attr_transmit_power.attr,
+ &dev_attr_cca_mode.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(pmib);
+
+struct class wpan_phy_class = {
+ .name = "ieee802154",
+ .dev_release = wpan_phy_release,
+ .dev_groups = pmib_groups,
+};
+
+int wpan_phy_sysfs_init(void)
+{
+ return class_register(&wpan_phy_class);
+}
+
+void wpan_phy_sysfs_exit(void)
+{
+ class_unregister(&wpan_phy_class);
+}
diff --git a/net/ieee802154/sysfs.h b/net/ieee802154/sysfs.h
new file mode 100644
index 000000000000..aa42e39ecbec
--- /dev/null
+++ b/net/ieee802154/sysfs.h
@@ -0,0 +1,9 @@
+#ifndef __IEEE802154_SYSFS_H
+#define __IEEE802154_SYSFS_H
+
+int wpan_phy_sysfs_init(void);
+void wpan_phy_sysfs_exit(void);
+
+extern struct class wpan_phy_class;
+
+#endif /* __IEEE802154_SYSFS_H */
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
deleted file mode 100644
index 4955e0fe5883..000000000000
--- a/net/ieee802154/wpan-class.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright (C) 2007, 2008, 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-
-#include <net/wpan-phy.h>
-
-#include "ieee802154.h"
-
-#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
-static ssize_t name ## _show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \
- int ret; \
- \
- mutex_lock(&phy->pib_lock); \
- ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
- mutex_unlock(&phy->pib_lock); \
- return ret; \
-} \
-static DEVICE_ATTR_RO(name);
-
-#define MASTER_SHOW(field, format_string) \
- MASTER_SHOW_COMPLEX(field, format_string, phy->field)
-
-MASTER_SHOW(current_channel, "%d");
-MASTER_SHOW(current_page, "%d");
-MASTER_SHOW(transmit_power, "%d +- 1 dB");
-MASTER_SHOW(cca_mode, "%d");
-
-static ssize_t channels_supported_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
- int ret;
- int i, len = 0;
-
- mutex_lock(&phy->pib_lock);
- for (i = 0; i < 32; i++) {
- ret = snprintf(buf + len, PAGE_SIZE - len,
- "%#09x\n", phy->channels_supported[i]);
- if (ret < 0)
- break;
- len += ret;
- }
- mutex_unlock(&phy->pib_lock);
- return len;
-}
-static DEVICE_ATTR_RO(channels_supported);
-
-static struct attribute *pmib_attrs[] = {
- &dev_attr_current_channel.attr,
- &dev_attr_current_page.attr,
- &dev_attr_channels_supported.attr,
- &dev_attr_transmit_power.attr,
- &dev_attr_cca_mode.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(pmib);
-
-static void wpan_phy_release(struct device *d)
-{
- struct wpan_phy *phy = container_of(d, struct wpan_phy, dev);
-
- kfree(phy);
-}
-
-static struct class wpan_phy_class = {
- .name = "ieee802154",
- .dev_release = wpan_phy_release,
- .dev_groups = pmib_groups,
-};
-
-static DEFINE_MUTEX(wpan_phy_mutex);
-static int wpan_phy_idx;
-
-static int wpan_phy_match(struct device *dev, const void *data)
-{
- return !strcmp(dev_name(dev), (const char *)data);
-}
-
-struct wpan_phy *wpan_phy_find(const char *str)
-{
- struct device *dev;
-
- if (WARN_ON(!str))
- return NULL;
-
- dev = class_find_device(&wpan_phy_class, NULL, str, wpan_phy_match);
- if (!dev)
- return NULL;
-
- return container_of(dev, struct wpan_phy, dev);
-}
-EXPORT_SYMBOL(wpan_phy_find);
-
-struct wpan_phy_iter_data {
- int (*fn)(struct wpan_phy *phy, void *data);
- void *data;
-};
-
-static int wpan_phy_iter(struct device *dev, void *_data)
-{
- struct wpan_phy_iter_data *wpid = _data;
- struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
-
- return wpid->fn(phy, wpid->data);
-}
-
-int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data),
- void *data)
-{
- struct wpan_phy_iter_data wpid = {
- .fn = fn,
- .data = data,
- };
-
- return class_for_each_device(&wpan_phy_class, NULL,
- &wpid, wpan_phy_iter);
-}
-EXPORT_SYMBOL(wpan_phy_for_each);
-
-static int wpan_phy_idx_valid(int idx)
-{
- return idx >= 0;
-}
-
-struct wpan_phy *wpan_phy_alloc(size_t priv_size)
-{
- struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size,
- GFP_KERNEL);
-
- if (!phy)
- goto out;
- mutex_lock(&wpan_phy_mutex);
- phy->idx = wpan_phy_idx++;
- if (unlikely(!wpan_phy_idx_valid(phy->idx))) {
- wpan_phy_idx--;
- mutex_unlock(&wpan_phy_mutex);
- kfree(phy);
- goto out;
- }
- mutex_unlock(&wpan_phy_mutex);
-
- mutex_init(&phy->pib_lock);
-
- device_initialize(&phy->dev);
- dev_set_name(&phy->dev, "wpan-phy%d", phy->idx);
-
- phy->dev.class = &wpan_phy_class;
-
- phy->current_channel = -1; /* not initialised */
- phy->current_page = 0; /* for compatibility */
-
- return phy;
-
-out:
- return NULL;
-}
-EXPORT_SYMBOL(wpan_phy_alloc);
-
-int wpan_phy_register(struct wpan_phy *phy)
-{
- return device_add(&phy->dev);
-}
-EXPORT_SYMBOL(wpan_phy_register);
-
-void wpan_phy_unregister(struct wpan_phy *phy)
-{
- device_del(&phy->dev);
-}
-EXPORT_SYMBOL(wpan_phy_unregister);
-
-void wpan_phy_free(struct wpan_phy *phy)
-{
- put_device(&phy->dev);
-}
-EXPORT_SYMBOL(wpan_phy_free);
-
-static int __init wpan_phy_class_init(void)
-{
- int rc;
-
- rc = class_register(&wpan_phy_class);
- if (rc)
- goto err;
-
- rc = ieee802154_nl_init();
- if (rc)
- goto err_nl;
-
- return 0;
-err_nl:
- class_unregister(&wpan_phy_class);
-err:
- return rc;
-}
-subsys_initcall(wpan_phy_class_init);
-
-static void __exit wpan_phy_class_exit(void)
-{
- ieee802154_nl_exit();
- class_unregister(&wpan_phy_class);
-}
-module_exit(wpan_phy_class_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface");
-MODULE_AUTHOR("Dmitry Eremin-Solenikov");
-
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index e682b48e0709..bd2901604842 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -322,6 +322,15 @@ config NET_FOU
network mechanisms and optimizations for UDP (such as ECMP
and RSS) can be leveraged to provide better service.
+config NET_FOU_IP_TUNNELS
+ bool "IP: FOU encapsulation of IP tunnels"
+ depends on NET_IPIP || NET_IPGRE || IPV6_SIT
+ select NET_FOU
+ ---help---
+ Allow configuration of FOU or GUE encapsulation for IP tunnels.
+ When this option is enabled IP tunnels can be configured to use
+ FOU or GUE encapsulation.
+
config GENEVE
tristate "Generic Network Virtualization Encapsulation (Geneve)"
depends on INET
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e67da4e6c324..a44773c8346c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1222,7 +1222,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
SKB_GSO_TCPV6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
- SKB_GSO_MPLS |
+ SKB_GSO_TUNNEL_REMCSUM |
0)))
goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 16acb59d665e..205e1472aa78 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1292,7 +1292,7 @@ static int arp_proc_init(void);
void __init arp_init(void)
{
- neigh_table_init(&arp_tbl);
+ neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl);
dev_add_pack(&arp_packet_type);
arp_proc_init();
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 4715f25dfe03..5160c710f2eb 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -50,7 +50,7 @@
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <linux/atomic.h>
-#include <asm/bug.h>
+#include <linux/bug.h>
#include <asm/unaligned.h>
/* List of available DOI definitions */
@@ -72,6 +72,7 @@ struct cipso_v4_map_cache_bkt {
u32 size;
struct list_head list;
};
+
struct cipso_v4_map_cache_entry {
u32 hash;
unsigned char *key;
@@ -82,7 +83,8 @@ struct cipso_v4_map_cache_entry {
u32 activity;
struct list_head list;
};
-static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL;
+
+static struct cipso_v4_map_cache_bkt *cipso_v4_cache;
/* Restricted bitmap (tag #1) flags */
int cipso_v4_rbm_optfmt = 0;
@@ -539,7 +541,7 @@ doi_add_return:
/**
* cipso_v4_doi_free - Frees a DOI definition
- * @entry: the entry's RCU field
+ * @doi_def: the DOI definition
*
* Description:
* This function frees all of the memory associated with a DOI definition.
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 360b565918c4..60173d4d3a0e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -392,8 +392,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
if (elen <= 0)
goto out;
- if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ err = skb_cow_data(skb, 0, &trailer);
+ if (err < 0)
goto out;
+
nfrags = err;
assoclen = sizeof(*esph);
@@ -601,12 +603,12 @@ static int esp_init_authenc(struct xfrm_state *x)
BUG_ON(!aalg_desc);
err = -EINVAL;
- if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
+ if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
crypto_aead_authsize(aead)) {
- NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
- x->aalg->alg_name,
- crypto_aead_authsize(aead),
- aalg_desc->uinfo.auth.icv_fullbits/8);
+ pr_info("ESP: %s digestsize %u != %hu\n",
+ x->aalg->alg_name,
+ crypto_aead_authsize(aead),
+ aalg_desc->uinfo.auth.icv_fullbits / 8);
goto free_key;
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index e9cb2588e416..18bcaf2ff2fd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1143,8 +1143,9 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
put_child(tp, cindex, (struct rt_trie_node *)tn);
} else {
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
- tp = tn;
}
+
+ tp = tn;
}
if (tp && tp->pos + tp->bits > 32)
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 606c520ffd5a..b986298a7ba3 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -38,21 +38,17 @@ static inline struct fou *fou_from_sock(struct sock *sk)
return sk->sk_user_data;
}
-static int fou_udp_encap_recv_deliver(struct sk_buff *skb,
- u8 protocol, size_t len)
+static void fou_recv_pull(struct sk_buff *skb, size_t len)
{
struct iphdr *iph = ip_hdr(skb);
/* Remove 'len' bytes from the packet (UDP header and
- * FOU header if present), modify the protocol to the one
- * we found, and then call rcv_encap.
+ * FOU header if present).
*/
iph->tot_len = htons(ntohs(iph->tot_len) - len);
__skb_pull(skb, len);
skb_postpull_rcsum(skb, udp_hdr(skb), len);
skb_reset_transport_header(skb);
-
- return -protocol;
}
static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
@@ -62,16 +58,56 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
if (!fou)
return 1;
- return fou_udp_encap_recv_deliver(skb, fou->protocol,
- sizeof(struct udphdr));
+ fou_recv_pull(skb, sizeof(struct udphdr));
+
+ return -fou->protocol;
+}
+
+static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
+ void *data, size_t hdrlen, u8 ipproto)
+{
+ __be16 *pd = data;
+ size_t start = ntohs(pd[0]);
+ size_t offset = ntohs(pd[1]);
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+ __wsum delta;
+
+ if (skb->remcsum_offload) {
+ /* Already processed in GRO path */
+ skb->remcsum_offload = 0;
+ return guehdr;
+ }
+
+ if (!pskb_may_pull(skb, plen))
+ return NULL;
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
+ __skb_checksum_complete(skb);
+
+ delta = remcsum_adjust((void *)guehdr + hdrlen,
+ skb->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+
+ return guehdr;
+}
+
+static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
+{
+ /* No support yet */
+ kfree_skb(skb);
+ return 0;
}
static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
{
struct fou *fou = fou_from_sock(sk);
- size_t len;
+ size_t len, optlen, hdrlen;
struct guehdr *guehdr;
- struct udphdr *uh;
+ void *data;
+ u16 doffset = 0;
if (!fou)
return 1;
@@ -80,25 +116,58 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
if (!pskb_may_pull(skb, len))
goto drop;
- uh = udp_hdr(skb);
- guehdr = (struct guehdr *)&uh[1];
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+
+ optlen = guehdr->hlen << 2;
+ len += optlen;
- len += guehdr->hlen << 2;
if (!pskb_may_pull(skb, len))
goto drop;
- uh = udp_hdr(skb);
- guehdr = (struct guehdr *)&uh[1];
+ /* guehdr may change after pull */
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
- if (guehdr->version != 0)
- goto drop;
+ hdrlen = sizeof(struct guehdr) + optlen;
- if (guehdr->flags) {
- /* No support yet */
+ if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
goto drop;
+
+ hdrlen = sizeof(struct guehdr) + optlen;
+
+ ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
+
+ /* Pull csum through the guehdr now . This can be used if
+ * there is a remote checksum offload.
+ */
+ skb_postpull_rcsum(skb, udp_hdr(skb), len);
+
+ data = &guehdr[1];
+
+ if (guehdr->flags & GUE_FLAG_PRIV) {
+ __be32 flags = *(__be32 *)(data + doffset);
+
+ doffset += GUE_LEN_PRIV;
+
+ if (flags & GUE_PFLAG_REMCSUM) {
+ guehdr = gue_remcsum(skb, guehdr, data + doffset,
+ hdrlen, guehdr->proto_ctype);
+ if (!guehdr)
+ goto drop;
+
+ data = &guehdr[1];
+
+ doffset += GUE_PLEN_REMCSUM;
+ }
}
- return fou_udp_encap_recv_deliver(skb, guehdr->next_hdr, len);
+ if (unlikely(guehdr->control))
+ return gue_control_message(skb, guehdr);
+
+ __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
+ skb_reset_transport_header(skb);
+
+ return -guehdr->proto_ctype;
+
drop:
kfree_skb(skb);
return 0;
@@ -149,6 +218,41 @@ out_unlock:
return err;
}
+static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
+ struct guehdr *guehdr, void *data,
+ size_t hdrlen, u8 ipproto)
+{
+ __be16 *pd = data;
+ size_t start = ntohs(pd[0]);
+ size_t offset = ntohs(pd[1]);
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+ __wsum delta;
+
+ if (skb->remcsum_offload)
+ return guehdr;
+
+ if (!NAPI_GRO_CB(skb)->csum_valid)
+ return NULL;
+
+ /* Pull checksum that will be written */
+ if (skb_gro_header_hard(skb, off + plen)) {
+ guehdr = skb_gro_header_slow(skb, off + plen, off);
+ if (!guehdr)
+ return NULL;
+ }
+
+ delta = remcsum_adjust((void *)guehdr + hdrlen,
+ NAPI_GRO_CB(skb)->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+ skb->remcsum_offload = 1;
+
+ return guehdr;
+}
+
static struct sk_buff **gue_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
@@ -156,38 +260,64 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
const struct net_offload *ops;
struct sk_buff **pp = NULL;
struct sk_buff *p;
- u8 proto;
struct guehdr *guehdr;
- unsigned int hlen, guehlen;
- unsigned int off;
+ size_t len, optlen, hdrlen, off;
+ void *data;
+ u16 doffset = 0;
int flush = 1;
off = skb_gro_offset(skb);
- hlen = off + sizeof(*guehdr);
+ len = off + sizeof(*guehdr);
+
guehdr = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- guehdr = skb_gro_header_slow(skb, hlen, off);
+ if (skb_gro_header_hard(skb, len)) {
+ guehdr = skb_gro_header_slow(skb, len, off);
if (unlikely(!guehdr))
goto out;
}
- proto = guehdr->next_hdr;
+ optlen = guehdr->hlen << 2;
+ len += optlen;
- rcu_read_lock();
- offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
- ops = rcu_dereference(offloads[proto]);
- if (WARN_ON(!ops || !ops->callbacks.gro_receive))
- goto out_unlock;
+ if (skb_gro_header_hard(skb, len)) {
+ guehdr = skb_gro_header_slow(skb, len, off);
+ if (unlikely(!guehdr))
+ goto out;
+ }
- guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
+ if (unlikely(guehdr->control) || guehdr->version != 0 ||
+ validate_gue_flags(guehdr, optlen))
+ goto out;
- hlen = off + guehlen;
- if (skb_gro_header_hard(skb, hlen)) {
- guehdr = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!guehdr))
- goto out_unlock;
+ hdrlen = sizeof(*guehdr) + optlen;
+
+ /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
+ * this is needed if there is a remote checkcsum offload.
+ */
+ skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
+
+ data = &guehdr[1];
+
+ if (guehdr->flags & GUE_FLAG_PRIV) {
+ __be32 flags = *(__be32 *)(data + doffset);
+
+ doffset += GUE_LEN_PRIV;
+
+ if (flags & GUE_PFLAG_REMCSUM) {
+ guehdr = gue_gro_remcsum(skb, off, guehdr,
+ data + doffset, hdrlen,
+ guehdr->proto_ctype);
+ if (!guehdr)
+ goto out;
+
+ data = &guehdr[1];
+
+ doffset += GUE_PLEN_REMCSUM;
+ }
}
+ skb_gro_pull(skb, hdrlen);
+
flush = 0;
for (p = *head; p; p = p->next) {
@@ -199,7 +329,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
guehdr2 = (struct guehdr *)(p->data + off);
/* Compare base GUE header to be equal (covers
- * hlen, version, next_hdr, and flags.
+ * hlen, version, proto_ctype, and flags.
*/
if (guehdr->word != guehdr2->word) {
NAPI_GRO_CB(p)->same_flow = 0;
@@ -214,10 +344,11 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
}
}
- skb_gro_pull(skb, guehlen);
-
- /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
- skb_gro_postpull_rcsum(skb, guehdr, guehlen);
+ rcu_read_lock();
+ offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[guehdr->proto_ctype]);
+ if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+ goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb);
@@ -238,7 +369,7 @@ static int gue_gro_complete(struct sk_buff *skb, int nhoff)
u8 proto;
int err = -ENOENT;
- proto = guehdr->next_hdr;
+ proto = guehdr->proto_ctype;
guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
@@ -489,6 +620,200 @@ static const struct genl_ops fou_nl_ops[] = {
},
};
+size_t fou_encap_hlen(struct ip_tunnel_encap *e)
+{
+ return sizeof(struct udphdr);
+}
+EXPORT_SYMBOL(fou_encap_hlen);
+
+size_t gue_encap_hlen(struct ip_tunnel_encap *e)
+{
+ size_t len;
+ bool need_priv = false;
+
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+
+ if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
+ len += GUE_PLEN_REMCSUM;
+ need_priv = true;
+ }
+
+ len += need_priv ? GUE_LEN_PRIV : 0;
+
+ return len;
+}
+EXPORT_SYMBOL(gue_encap_hlen);
+
+static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ struct flowi4 *fl4, u8 *protocol, __be16 sport)
+{
+ struct udphdr *uh;
+
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+
+ uh = udp_hdr(skb);
+
+ uh->dest = e->dport;
+ uh->source = sport;
+ uh->len = htons(skb->len);
+ uh->check = 0;
+ udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
+ fl4->saddr, fl4->daddr, skb->len);
+
+ *protocol = IPPROTO_UDP;
+}
+
+int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4)
+{
+ bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
+ int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ __be16 sport;
+
+ skb = iptunnel_handle_offloads(skb, csum, type);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
+ skb, 0, 0, false);
+ fou_build_udp(skb, e, fl4, protocol, sport);
+
+ return 0;
+}
+EXPORT_SYMBOL(fou_build_header);
+
+int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4)
+{
+ bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
+ int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ struct guehdr *guehdr;
+ size_t hdrlen, optlen = 0;
+ __be16 sport;
+ void *data;
+ bool need_priv = false;
+
+ if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
+ skb->ip_summed == CHECKSUM_PARTIAL) {
+ csum = false;
+ optlen += GUE_PLEN_REMCSUM;
+ type |= SKB_GSO_TUNNEL_REMCSUM;
+ need_priv = true;
+ }
+
+ optlen += need_priv ? GUE_LEN_PRIV : 0;
+
+ skb = iptunnel_handle_offloads(skb, csum, type);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* Get source port (based on flow hash) before skb_push */
+ sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
+ skb, 0, 0, false);
+
+ hdrlen = sizeof(struct guehdr) + optlen;
+
+ skb_push(skb, hdrlen);
+
+ guehdr = (struct guehdr *)skb->data;
+
+ guehdr->control = 0;
+ guehdr->version = 0;
+ guehdr->hlen = optlen >> 2;
+ guehdr->flags = 0;
+ guehdr->proto_ctype = *protocol;
+
+ data = &guehdr[1];
+
+ if (need_priv) {
+ __be32 *flags = data;
+
+ guehdr->flags |= GUE_FLAG_PRIV;
+ *flags = 0;
+ data += GUE_LEN_PRIV;
+
+ if (type & SKB_GSO_TUNNEL_REMCSUM) {
+ u16 csum_start = skb_checksum_start_offset(skb);
+ __be16 *pd = data;
+
+ if (csum_start < hdrlen)
+ return -EINVAL;
+
+ csum_start -= hdrlen;
+ pd[0] = htons(csum_start);
+ pd[1] = htons(csum_start + skb->csum_offset);
+
+ if (!skb_is_gso(skb)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->encapsulation = 0;
+ }
+
+ *flags |= GUE_PFLAG_REMCSUM;
+ data += GUE_PLEN_REMCSUM;
+ }
+
+ }
+
+ fou_build_udp(skb, e, fl4, protocol, sport);
+
+ return 0;
+}
+EXPORT_SYMBOL(gue_build_header);
+
+#ifdef CONFIG_NET_FOU_IP_TUNNELS
+
+static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
+ .encap_hlen = fou_encap_hlen,
+ .build_header = fou_build_header,
+};
+
+static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
+ .encap_hlen = gue_encap_hlen,
+ .build_header = gue_build_header,
+};
+
+static int ip_tunnel_encap_add_fou_ops(void)
+{
+ int ret;
+
+ ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
+ if (ret < 0) {
+ pr_err("can't add fou ops\n");
+ return ret;
+ }
+
+ ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
+ if (ret < 0) {
+ pr_err("can't add gue ops\n");
+ ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ip_tunnel_encap_del_fou_ops(void)
+{
+ ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
+ ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
+}
+
+#else
+
+static int ip_tunnel_encap_add_fou_ops(void)
+{
+ return 0;
+}
+
+static void ip_tunnel_encap_del_fou_ops(void)
+{
+}
+
+#endif
+
static int __init fou_init(void)
{
int ret;
@@ -496,6 +821,14 @@ static int __init fou_init(void)
ret = genl_register_family_with_ops(&fou_nl_family,
fou_nl_ops);
+ if (ret < 0)
+ goto exit;
+
+ ret = ip_tunnel_encap_add_fou_ops();
+ if (ret < 0)
+ genl_unregister_family(&fou_nl_family);
+
+exit:
return ret;
}
@@ -503,6 +836,8 @@ static void __exit fou_fini(void)
{
struct fou *fou, *next;
+ ip_tunnel_encap_del_fou_ops();
+
genl_unregister_family(&fou_nl_family);
/* Close all the FOU sockets */
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index dedb21e99914..95e47c97585e 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -104,7 +104,7 @@ static void geneve_build_header(struct genevehdr *geneveh,
memcpy(geneveh->options, options, options_len);
}
-/* Transmit a fully formated Geneve frame.
+/* Transmit a fully formatted Geneve frame.
*
* When calling this function. The skb->data should point
* to the geneve header which is fully formed.
@@ -131,15 +131,9 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
if (unlikely(err))
return err;
- if (vlan_tx_tag_present(skb)) {
- if (unlikely(!__vlan_put_tag(skb,
- skb->vlan_proto,
- vlan_tx_tag_get(skb)))) {
- err = -ENOMEM;
- return err;
- }
- skb->vlan_tci = 0;
- }
+ skb = vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb))
+ return -ENOMEM;
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -165,6 +159,15 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
}
}
+static void geneve_notify_del_rx_port(struct geneve_sock *gs)
+{
+ struct sock *sk = gs->sock->sk;
+ sa_family_t sa_family = sk->sk_family;
+
+ if (sa_family == AF_INET)
+ udp_del_offload(&gs->udp_offloads);
+}
+
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
@@ -293,6 +296,7 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
geneve_rcv_t *rcv, void *data,
bool no_share, bool ipv6)
{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
gs = geneve_socket_create(net, port, rcv, data, ipv6);
@@ -302,15 +306,15 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
if (no_share) /* Return error if sharing is not allowed. */
return ERR_PTR(-EINVAL);
+ spin_lock(&gn->sock_lock);
gs = geneve_find_sock(net, port);
- if (gs) {
- if (gs->rcv == rcv)
- atomic_inc(&gs->refcnt);
- else
+ if (gs && ((gs->rcv != rcv) ||
+ !atomic_add_unless(&gs->refcnt, 1, 0)))
gs = ERR_PTR(-EBUSY);
- } else {
+ spin_unlock(&gn->sock_lock);
+
+ if (!gs)
gs = ERR_PTR(-EINVAL);
- }
return gs;
}
@@ -318,9 +322,17 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
void geneve_sock_release(struct geneve_sock *gs)
{
+ struct net *net = sock_net(gs->sock->sk);
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+
if (!atomic_dec_and_test(&gs->refcnt))
return;
+ spin_lock(&gn->sock_lock);
+ hlist_del_rcu(&gs->hlist);
+ geneve_notify_del_rx_port(gs);
+ spin_unlock(&gn->sock_lock);
+
queue_work(geneve_wq, &gs->del_work);
}
EXPORT_SYMBOL_GPL(geneve_sock_release);
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index bb5947b0ce2d..51973ddc05a6 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -247,6 +247,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
rcu_read_unlock();
+
+ skb_set_inner_mac_header(skb, nhoff + grehlen);
+
return err;
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5882f584910e..36f5584d93c5 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(icmp_err_convert);
*/
struct icmp_control {
- void (*handler)(struct sk_buff *skb);
+ bool (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
@@ -746,7 +746,7 @@ static bool icmp_tag_validation(int proto)
* ICMP_PARAMETERPROB.
*/
-static void icmp_unreach(struct sk_buff *skb)
+static bool icmp_unreach(struct sk_buff *skb)
{
const struct iphdr *iph;
struct icmphdr *icmph;
@@ -784,8 +784,8 @@ static void icmp_unreach(struct sk_buff *skb)
*/
switch (net->ipv4.sysctl_ip_no_pmtu_disc) {
default:
- LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
- &iph->daddr);
+ net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n",
+ &iph->daddr);
break;
case 2:
goto out;
@@ -798,8 +798,8 @@ static void icmp_unreach(struct sk_buff *skb)
}
break;
case ICMP_SR_FAILED:
- LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: Source Route Failed\n"),
- &iph->daddr);
+ net_dbg_ratelimited("%pI4: Source Route Failed\n",
+ &iph->daddr);
break;
default:
break;
@@ -839,10 +839,10 @@ static void icmp_unreach(struct sk_buff *skb)
icmp_socket_deliver(skb, info);
out:
- return;
+ return true;
out_err:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
- goto out;
+ return false;
}
@@ -850,17 +850,20 @@ out_err:
* Handle ICMP_REDIRECT.
*/
-static void icmp_redirect(struct sk_buff *skb)
+static bool icmp_redirect(struct sk_buff *skb)
{
if (skb->len < sizeof(struct iphdr)) {
ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
- return;
+ return false;
}
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- return;
+ if (!pskb_may_pull(skb, sizeof(struct iphdr))) {
+ /* there aught to be a stat */
+ return false;
+ }
icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
+ return true;
}
/*
@@ -875,7 +878,7 @@ static void icmp_redirect(struct sk_buff *skb)
* See also WRT handling of options once they are done and working.
*/
-static void icmp_echo(struct sk_buff *skb)
+static bool icmp_echo(struct sk_buff *skb)
{
struct net *net;
@@ -891,6 +894,8 @@ static void icmp_echo(struct sk_buff *skb)
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
}
+ /* should there be an ICMP stat for ignored echos? */
+ return true;
}
/*
@@ -900,7 +905,7 @@ static void icmp_echo(struct sk_buff *skb)
* MUST be accurate to a few minutes.
* MUST be updated at least at 15Hz.
*/
-static void icmp_timestamp(struct sk_buff *skb)
+static bool icmp_timestamp(struct sk_buff *skb)
{
struct timespec tv;
struct icmp_bxm icmp_param;
@@ -927,15 +932,17 @@ static void icmp_timestamp(struct sk_buff *skb)
icmp_param.data_len = 0;
icmp_param.head_len = sizeof(struct icmphdr) + 12;
icmp_reply(&icmp_param, skb);
-out:
- return;
+ return true;
+
out_err:
ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
- goto out;
+ return false;
}
-static void icmp_discard(struct sk_buff *skb)
+static bool icmp_discard(struct sk_buff *skb)
{
+ /* pretend it was a success */
+ return true;
}
/*
@@ -946,6 +953,7 @@ int icmp_rcv(struct sk_buff *skb)
struct icmphdr *icmph;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
+ bool success;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
@@ -1012,7 +1020,12 @@ int icmp_rcv(struct sk_buff *skb)
}
}
- icmp_pointers[icmph->type].handler(skb);
+ success = icmp_pointers[icmph->type].handler(skb);
+
+ if (success) {
+ consume_skb(skb);
+ return 0;
+ }
drop:
kfree_skb(skb);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index bb15d0e03d4f..666cf364df86 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -112,17 +112,17 @@
#ifdef CONFIG_IP_MULTICAST
/* Parameter names and values are taken from igmp-v2-06 draft */
-#define IGMP_V1_Router_Present_Timeout (400*HZ)
-#define IGMP_V2_Router_Present_Timeout (400*HZ)
-#define IGMP_V2_Unsolicited_Report_Interval (10*HZ)
-#define IGMP_V3_Unsolicited_Report_Interval (1*HZ)
-#define IGMP_Query_Response_Interval (10*HZ)
-#define IGMP_Query_Robustness_Variable 2
+#define IGMP_V1_ROUTER_PRESENT_TIMEOUT (400*HZ)
+#define IGMP_V2_ROUTER_PRESENT_TIMEOUT (400*HZ)
+#define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ)
+#define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ)
+#define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ)
+#define IGMP_QUERY_ROBUSTNESS_VARIABLE 2
-#define IGMP_Initial_Report_Delay (1)
+#define IGMP_INITIAL_REPORT_DELAY (1)
-/* IGMP_Initial_Report_Delay is not from IGMP specs!
+/* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs!
* IGMP specs require to report membership immediately after
* joining a group, but we delay the first report by a
* small interval. It seems more natural and still does not
@@ -878,15 +878,15 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
if (ih->code == 0) {
/* Alas, old v1 router presents here. */
- max_delay = IGMP_Query_Response_Interval;
+ max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
in_dev->mr_v1_seen = jiffies +
- IGMP_V1_Router_Present_Timeout;
+ IGMP_V1_ROUTER_PRESENT_TIMEOUT;
group = 0;
} else {
/* v2 router present */
max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
in_dev->mr_v2_seen = jiffies +
- IGMP_V2_Router_Present_Timeout;
+ IGMP_V2_ROUTER_PRESENT_TIMEOUT;
}
/* cancel the interface change timer */
in_dev->mr_ifc_count = 0;
@@ -898,7 +898,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
return true; /* ignore bogus packet; freed by caller */
} else if (IGMP_V1_SEEN(in_dev)) {
/* This is a v3 query with v1 queriers present */
- max_delay = IGMP_Query_Response_Interval;
+ max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
group = 0;
} else if (IGMP_V2_SEEN(in_dev)) {
/* this is a v3 query with v2 queriers present;
@@ -1217,7 +1217,7 @@ static void igmp_group_added(struct ip_mc_list *im)
return;
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
spin_lock_bh(&im->lock);
- igmp_start_timer(im, IGMP_Initial_Report_Delay);
+ igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
spin_unlock_bh(&im->lock);
return;
}
@@ -1540,7 +1540,7 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
#ifdef CONFIG_IP_MULTICAST
-int sysctl_igmp_qrv __read_mostly = IGMP_Query_Robustness_Variable;
+int sysctl_igmp_qrv __read_mostly = IGMP_QUERY_ROBUSTNESS_VARIABLE;
#endif
static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
@@ -2686,11 +2686,7 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
if (v == SEQ_START_TOKEN) {
- seq_printf(seq,
- "%3s %6s "
- "%10s %10s %6s %6s\n", "Idx",
- "Device", "MCA",
- "SRC", "INC", "EXC");
+ seq_puts(seq, "Idx Device MCA SRC INC EXC\n");
} else {
seq_printf(seq,
"%3d %6.6s 0x%08x "
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 19419b60cb37..e7920352646a 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -458,6 +458,6 @@ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
". Dropping fragment.\n";
if (PTR_ERR(q) == -ENOBUFS)
- LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
+ net_dbg_ratelimited("%s%s", prefix, msg);
}
EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 2811cc18701a..e5b6d0ddcb58 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -80,7 +80,7 @@ struct ipq {
struct inet_peer *peer;
};
-static inline u8 ip4_frag_ecn(u8 tos)
+static u8 ip4_frag_ecn(u8 tos)
{
return 1 << (tos & INET_ECN_MASK);
}
@@ -148,7 +148,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
}
-static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
+static void ip4_frag_free(struct inet_frag_queue *q)
{
struct ipq *qp;
@@ -160,7 +160,7 @@ static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
/* Destruction primitives. */
-static __inline__ void ipq_put(struct ipq *ipq)
+static void ipq_put(struct ipq *ipq)
{
inet_frag_put(&ipq->q, &ip4_frags);
}
@@ -236,7 +236,7 @@ out:
/* Find the correct entry in the "incomplete datagrams" queue for
* this IP datagram, and create new one, if nothing is found.
*/
-static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
+static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
{
struct inet_frag_queue *q;
struct ip4_create_arg arg;
@@ -256,7 +256,7 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
}
/* Is the fragment too far ahead to be part of ipq? */
-static inline int ip_frag_too_far(struct ipq *qp)
+static int ip_frag_too_far(struct ipq *qp)
{
struct inet_peer *peer = qp->peer;
unsigned int max = sysctl_ipfrag_max_dist;
@@ -618,8 +618,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
return 0;
out_nomem:
- LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
- qp);
+ net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
err = -ENOMEM;
goto out_fail;
out_oversize:
@@ -795,16 +794,16 @@ static void __init ip4_frags_ctl_register(void)
register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
}
#else
-static inline int ip4_frags_ns_ctl_register(struct net *net)
+static int ip4_frags_ns_ctl_register(struct net *net)
{
return 0;
}
-static inline void ip4_frags_ns_ctl_unregister(struct net *net)
+static void ip4_frags_ns_ctl_unregister(struct net *net)
{
}
-static inline void __init ip4_frags_ctl_register(void)
+static void __init ip4_frags_ctl_register(void)
{
}
#endif
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 12055fdbe716..4f4bf5b99686 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -252,10 +252,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
- if (IS_ERR(skb))
- goto out;
-
if (dev->header_ops) {
/* Need space for new headers */
if (skb_cow_head(skb, dev->needed_headroom -
@@ -268,6 +264,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
* to gre header.
*/
skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
+ skb_reset_mac_header(skb);
} else {
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
@@ -275,6 +272,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
tnl_params = &tunnel->parms.iph;
}
+ skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
+ if (IS_ERR(skb))
+ goto out;
+
__gre_xmit(skb, dev, tnl_params, skb->protocol);
return NETDEV_TX_OK;
@@ -789,7 +790,7 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT,
t->encap.dport) ||
nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
- t->encap.dport))
+ t->encap.flags))
goto nla_put_failure;
return 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index bc6471d4abcd..b50861b22b6b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -662,12 +662,10 @@ slow_path:
if (len < left) {
len &= ~7;
}
- /*
- * Allocate buffer.
- */
- if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
- NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
+ /* Allocate buffer */
+ skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC);
+ if (!skb2) {
err = -ENOMEM;
goto fail;
}
@@ -754,14 +752,16 @@ EXPORT_SYMBOL(ip_fragment);
int
ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
{
- struct iovec *iov = from;
+ struct msghdr *msg = from;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (memcpy_fromiovecend(to, iov, offset, len) < 0)
+ /* XXX: stripping const */
+ if (memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len) < 0)
return -EFAULT;
} else {
__wsum csum = 0;
- if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
+ /* XXX: stripping const */
+ if (csum_partial_copy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len, &csum) < 0)
return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, odd);
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 9daf2177dc00..8a89c738b7a3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -192,7 +192,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
int err, val;
struct cmsghdr *cmsg;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
#if IS_ENABLED(CONFIG_IPV6)
@@ -399,6 +399,22 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
kfree_skb(skb);
}
+static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk,
+ const struct sk_buff *skb,
+ int ee_origin)
+{
+ struct in_pktinfo *info = PKTINFO_SKB_CB(skb);
+
+ if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) ||
+ (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
+ (!skb->dev))
+ return false;
+
+ info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
+ info->ipi_ifindex = skb->dev->ifindex;
+ return true;
+}
+
/*
* Handle MSG_ERRQUEUE
*/
@@ -414,6 +430,8 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
int err;
int copied;
+ WARN_ON_ONCE(sk->sk_family == AF_INET6);
+
err = -EAGAIN;
skb = sock_dequeue_err_skb(sk);
if (skb == NULL)
@@ -424,7 +442,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free_skb;
@@ -444,7 +462,9 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
sin->sin_family = AF_UNSPEC;
- if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
+
+ if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+ ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) {
struct inet_sock *inet = inet_sk(sk);
sin->sin_family = AF_INET;
@@ -1049,7 +1069,7 @@ e_inval:
}
/**
- * ipv4_pktinfo_prepare - transfert some info from rtable to skb
+ * ipv4_pktinfo_prepare - transfer some info from rtable to skb
* @sk: socket
* @skb: buffer
*
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 0bb8e141eacc..d3e447936720 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -56,7 +56,6 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/udp.h>
-#include <net/gue.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -491,17 +490,56 @@ EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
static int ip_encap_hlen(struct ip_tunnel_encap *e)
{
- switch (e->type) {
- case TUNNEL_ENCAP_NONE:
+ const struct ip_tunnel_encap_ops *ops;
+ int hlen = -EINVAL;
+
+ if (e->type == TUNNEL_ENCAP_NONE)
return 0;
- case TUNNEL_ENCAP_FOU:
- return sizeof(struct udphdr);
- case TUNNEL_ENCAP_GUE:
- return sizeof(struct udphdr) + sizeof(struct guehdr);
- default:
+
+ if (e->type >= MAX_IPTUN_ENCAP_OPS)
return -EINVAL;
- }
+
+ rcu_read_lock();
+ ops = rcu_dereference(iptun_encaps[e->type]);
+ if (likely(ops && ops->encap_hlen))
+ hlen = ops->encap_hlen(e);
+ rcu_read_unlock();
+
+ return hlen;
+}
+
+const struct ip_tunnel_encap_ops __rcu *
+ iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
+
+int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
+ unsigned int num)
+{
+ if (num >= MAX_IPTUN_ENCAP_OPS)
+ return -ERANGE;
+
+ return !cmpxchg((const struct ip_tunnel_encap_ops **)
+ &iptun_encaps[num],
+ NULL, ops) ? 0 : -1;
}
+EXPORT_SYMBOL(ip_tunnel_encap_add_ops);
+
+int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
+ unsigned int num)
+{
+ int ret;
+
+ if (num >= MAX_IPTUN_ENCAP_OPS)
+ return -ERANGE;
+
+ ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
+ &iptun_encaps[num],
+ ops, NULL) == ops) ? 0 : -1;
+
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL(ip_tunnel_encap_del_ops);
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap)
@@ -526,63 +564,25 @@ int ip_tunnel_encap_setup(struct ip_tunnel *t,
}
EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
-static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- size_t hdr_len, u8 *protocol, struct flowi4 *fl4)
-{
- struct udphdr *uh;
- __be16 sport;
- bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
- int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-
- skb = iptunnel_handle_offloads(skb, csum, type);
-
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- /* Get length and hash before making space in skb */
-
- sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
- skb, 0, 0, false);
-
- skb_push(skb, hdr_len);
-
- skb_reset_transport_header(skb);
- uh = udp_hdr(skb);
-
- if (e->type == TUNNEL_ENCAP_GUE) {
- struct guehdr *guehdr = (struct guehdr *)&uh[1];
-
- guehdr->version = 0;
- guehdr->hlen = 0;
- guehdr->flags = 0;
- guehdr->next_hdr = *protocol;
- }
-
- uh->dest = e->dport;
- uh->source = sport;
- uh->len = htons(skb->len);
- uh->check = 0;
- udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
- fl4->saddr, fl4->daddr, skb->len);
-
- *protocol = IPPROTO_UDP;
-
- return 0;
-}
-
int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
u8 *protocol, struct flowi4 *fl4)
{
- switch (t->encap.type) {
- case TUNNEL_ENCAP_NONE:
+ const struct ip_tunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (t->encap.type == TUNNEL_ENCAP_NONE)
return 0;
- case TUNNEL_ENCAP_FOU:
- case TUNNEL_ENCAP_GUE:
- return fou_build_header(skb, &t->encap, t->encap_hlen,
- protocol, fl4);
- default:
+
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
return -EINVAL;
- }
+
+ rcu_read_lock();
+ ops = rcu_dereference(iptun_encaps[t->encap.type]);
+ if (likely(ops && ops->build_header))
+ ret = ops->build_header(skb, &t->encap, protocol, fl4);
+ rcu_read_unlock();
+
+ return ret;
}
EXPORT_SYMBOL(ip_tunnel_encap);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 648fa1490ea7..7fa18bc7e47f 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -115,7 +115,7 @@
*/
int ic_set_manually __initdata = 0; /* IPconfig parameters set manually */
-static int ic_enable __initdata = 0; /* IP config enabled? */
+static int ic_enable __initdata; /* IP config enabled? */
/* Protocol choice */
int ic_proto_enabled __initdata = 0
@@ -130,7 +130,7 @@ int ic_proto_enabled __initdata = 0
#endif
;
-static int ic_host_name_set __initdata = 0; /* Host name set by us? */
+static int ic_host_name_set __initdata; /* Host name set by us? */
__be32 ic_myaddr = NONE; /* My IP address */
static __be32 ic_netmask = NONE; /* Netmask for local subnet */
@@ -160,17 +160,17 @@ static u8 ic_domain[64]; /* DNS (not NIS) domain name */
static char user_dev_name[IFNAMSIZ] __initdata = { 0, };
/* Protocols supported by available interfaces */
-static int ic_proto_have_if __initdata = 0;
+static int ic_proto_have_if __initdata;
/* MTU for boot device */
-static int ic_dev_mtu __initdata = 0;
+static int ic_dev_mtu __initdata;
#ifdef IPCONFIG_DYNAMIC
static DEFINE_SPINLOCK(ic_recv_lock);
-static volatile int ic_got_reply __initdata = 0; /* Proto(s) that replied */
+static volatile int ic_got_reply __initdata; /* Proto(s) that replied */
#endif
#ifdef IPCONFIG_DHCP
-static int ic_dhcp_msgtype __initdata = 0; /* DHCP msg type received */
+static int ic_dhcp_msgtype __initdata; /* DHCP msg type received */
#endif
@@ -186,8 +186,8 @@ struct ic_device {
__be32 xid;
};
-static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */
-static struct net_device *ic_dev __initdata = NULL; /* Selected device */
+static struct ic_device *ic_first_dev __initdata; /* List of open device */
+static struct net_device *ic_dev __initdata; /* Selected device */
static bool __init ic_is_init_dev(struct net_device *dev)
{
@@ -498,7 +498,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
struct arphdr *rarp;
unsigned char *rarp_ptr;
__be32 sip, tip;
- unsigned char *sha, *tha; /* s for "source", t for "target" */
+ unsigned char *tha; /* t for "target" */
struct ic_device *d;
if (!net_eq(dev_net(dev), &init_net))
@@ -549,7 +549,6 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
goto drop_unlock; /* should never happen */
/* Extract variable-width fields */
- sha = rarp_ptr;
rarp_ptr += dev->addr_len;
memcpy(&sip, rarp_ptr, 4);
rarp_ptr += 4;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 37096d64730e..40403114f00a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -465,7 +465,7 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
tunnel->encap.dport) ||
nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
- tunnel->encap.dport))
+ tunnel->encap.flags))
goto nla_put_failure;
return 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 4c019d5c3f57..59f883d9cadf 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -113,6 +113,15 @@ config NFT_MASQ_IPV4
This is the expression that provides IPv4 masquerading support for
nf_tables.
+config NFT_REDIR_IPV4
+ tristate "IPv4 redirect support for nf_tables"
+ depends on NF_TABLES_IPV4
+ depends on NFT_REDIR
+ select NF_NAT_REDIRECT
+ help
+ This is the expression that provides IPv4 redirect support for
+ nf_tables.
+
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"
depends on NF_CONNTRACK_SNMP
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index f4cef5af0969..7fe6c703528f 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
+obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
# generic IP tables
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
index ccfc78db12ee..d059182c1466 100644
--- a/net/ipv4/netfilter/nf_log_arp.c
+++ b/net/ipv4/netfilter/nf_log_arp.c
@@ -10,6 +10,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -74,12 +75,12 @@ static void dump_arp_packet(struct nf_log_buf *m,
ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst);
}
-void nf_log_arp_packet(struct net *net, u_int8_t pf,
- unsigned int hooknum, const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct nf_loginfo *loginfo,
- const char *prefix)
+static void nf_log_arp_packet(struct net *net, u_int8_t pf,
+ unsigned int hooknum, const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo,
+ const char *prefix)
{
struct nf_log_buf *m;
@@ -130,8 +131,17 @@ static int __init nf_log_arp_init(void)
if (ret < 0)
return ret;
- nf_log_register(NFPROTO_ARP, &nf_arp_logger);
+ ret = nf_log_register(NFPROTO_ARP, &nf_arp_logger);
+ if (ret < 0) {
+ pr_err("failed to register logger\n");
+ goto err1;
+ }
+
return 0;
+
+err1:
+ unregister_pernet_subsys(&nf_log_arp_net_ops);
+ return ret;
}
static void __exit nf_log_arp_exit(void)
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 078bdca1b607..75101980eeee 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -366,8 +367,17 @@ static int __init nf_log_ipv4_init(void)
if (ret < 0)
return ret;
- nf_log_register(NFPROTO_IPV4, &nf_ip_logger);
+ ret = nf_log_register(NFPROTO_IPV4, &nf_ip_logger);
+ if (ret < 0) {
+ pr_err("failed to register logger\n");
+ goto err1;
+ }
+
return 0;
+
+err1:
+ unregister_pernet_subsys(&nf_log_ipv4_net_ops);
+ return ret;
}
static void __exit nf_log_ipv4_exit(void)
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 1baaa83dfe5c..536da7bc598a 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -11,6 +11,7 @@
#include <net/tcp.h>
#include <net/route.h>
#include <net/dst.h>
+#include <net/netfilter/ipv4/nf_reject.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_reject.h>
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
new file mode 100644
index 000000000000..ff2d23d8c87a
--- /dev/null
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_redirect.h>
+#include <net/netfilter/nft_redir.h>
+
+static void nft_redir_ipv4_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_redir *priv = nft_expr_priv(expr);
+ struct nf_nat_ipv4_multi_range_compat mr;
+ unsigned int verdict;
+
+ memset(&mr, 0, sizeof(mr));
+ if (priv->sreg_proto_min) {
+ mr.range[0].min.all = (__force __be16)
+ data[priv->sreg_proto_min].data[0];
+ mr.range[0].max.all = (__force __be16)
+ data[priv->sreg_proto_max].data[0];
+ mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ }
+
+ mr.range[0].flags |= priv->flags;
+
+ verdict = nf_nat_redirect_ipv4(pkt->skb, &mr, pkt->ops->hooknum);
+ data[NFT_REG_VERDICT].verdict = verdict;
+}
+
+static struct nft_expr_type nft_redir_ipv4_type;
+static const struct nft_expr_ops nft_redir_ipv4_ops = {
+ .type = &nft_redir_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+ .eval = nft_redir_ipv4_eval,
+ .init = nft_redir_init,
+ .dump = nft_redir_dump,
+ .validate = nft_redir_validate,
+};
+
+static struct nft_expr_type nft_redir_ipv4_type __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .name = "redir",
+ .ops = &nft_redir_ipv4_ops,
+ .policy = nft_redir_policy,
+ .maxattr = NFTA_REDIR_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_redir_ipv4_module_init(void)
+{
+ return nft_register_expr(&nft_redir_ipv4_type);
+}
+
+static void __exit nft_redir_ipv4_module_exit(void)
+{
+ nft_unregister_expr(&nft_redir_ipv4_type);
+}
+
+module_init(nft_redir_ipv4_module_init);
+module_exit(nft_redir_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir");
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index ed33299c56d1..d729542bd1b7 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -19,9 +19,9 @@
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/nft_reject.h>
-void nft_reject_ipv4_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
+static void nft_reject_ipv4_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
@@ -36,7 +36,6 @@ void nft_reject_ipv4_eval(const struct nft_expr *expr,
data[NFT_REG_VERDICT].verdict = NF_DROP;
}
-EXPORT_SYMBOL_GPL(nft_reject_ipv4_eval);
static struct nft_expr_type nft_reject_ipv4_type;
static const struct nft_expr_ops nft_reject_ipv4_ops = {
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 5d740cccf69e..c0d82f78d364 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -662,7 +662,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
* Fetch the ICMP header provided by the userland.
* iovec is modified! The ICMP header is consumed.
*/
- if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len))
+ if (memcpy_from_msg(user_icmph, msg, icmph_len))
return -EFAULT;
if (family == AF_INET) {
@@ -811,7 +811,8 @@ back_from_confirm:
pfh.icmph.checksum = 0;
pfh.icmph.un.echo.id = inet->inet_sport;
pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
- pfh.iov = msg->msg_iov;
+ /* XXX: stripping const */
+ pfh.iov = (struct iovec *)msg->msg_iter.iov;
pfh.wcheck = 0;
pfh.family = AF_INET;
@@ -869,7 +870,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
/* Don't bother checking the checksum */
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
@@ -949,7 +950,7 @@ EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
* All we need to do is get the socket.
*/
-void ping_rcv(struct sk_buff *skb)
+bool ping_rcv(struct sk_buff *skb)
{
struct sock *sk;
struct net *net = dev_net(skb->dev);
@@ -968,11 +969,11 @@ void ping_rcv(struct sk_buff *skb)
pr_debug("rcv on socket %p\n", sk);
ping_queue_rcv_skb(sk, skb_get(skb));
sock_put(sk);
- return;
+ return true;
}
pr_debug("no socket, dropping\n");
- /* We're called from icmp_rcv(). kfree_skb() is done there. */
+ return false;
}
EXPORT_SYMBOL_GPL(ping_rcv);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8e3eb39f84e7..8f9cd200ce20 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -181,6 +181,7 @@ static const struct snmp_mib snmp4_udp_list[] = {
SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS),
SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS),
+ SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI),
SNMP_MIB_SENTINEL
};
@@ -287,6 +288,10 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPWantZeroWindowAdv", LINUX_MIB_TCPWANTZEROWINDOWADV),
SNMP_MIB_ITEM("TCPSynRetrans", LINUX_MIB_TCPSYNRETRANS),
SNMP_MIB_ITEM("TCPOrigDataSent", LINUX_MIB_TCPORIGDATASENT),
+ SNMP_MIB_ITEM("TCPHystartTrainDetect", LINUX_MIB_TCPHYSTARTTRAINDETECT),
+ SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND),
+ SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT),
+ SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND),
SNMP_MIB_SENTINEL
};
@@ -296,12 +301,12 @@ static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals,
int j;
if (count) {
- seq_printf(seq, "\nIcmpMsg:");
+ seq_puts(seq, "\nIcmpMsg:");
for (j = 0; j < count; ++j)
seq_printf(seq, " %sType%u",
type[j] & 0x100 ? "Out" : "In",
type[j] & 0xff);
- seq_printf(seq, "\nIcmpMsg:");
+ seq_puts(seq, "\nIcmpMsg:");
for (j = 0; j < count; ++j)
seq_printf(seq, " %lu", vals[j]);
}
@@ -342,7 +347,7 @@ static void icmp_put(struct seq_file *seq)
seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors");
for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " In%s", icmpmibmap[i].name);
- seq_printf(seq, " OutMsgs OutErrors");
+ seq_puts(seq, " OutMsgs OutErrors");
for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " Out%s", icmpmibmap[i].name);
seq_printf(seq, "\nIcmp: %lu %lu %lu",
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 739db3100c23..0bb68df5055d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -79,6 +79,16 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
+#include <linux/uio.h>
+
+struct raw_frag_vec {
+ struct msghdr *msg;
+ union {
+ struct icmphdr icmph;
+ char c[1];
+ } hdr;
+ int hlen;
+};
static struct raw_hashinfo raw_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@@ -420,53 +430,57 @@ error:
return err;
}
-static int raw_probe_proto_opt(struct flowi4 *fl4, struct msghdr *msg)
+static int raw_probe_proto_opt(struct raw_frag_vec *rfv, struct flowi4 *fl4)
{
- struct iovec *iov;
- u8 __user *type = NULL;
- u8 __user *code = NULL;
- int probed = 0;
- unsigned int i;
+ int err;
- if (!msg->msg_iov)
+ if (fl4->flowi4_proto != IPPROTO_ICMP)
return 0;
- for (i = 0; i < msg->msg_iovlen; i++) {
- iov = &msg->msg_iov[i];
- if (!iov)
- continue;
-
- switch (fl4->flowi4_proto) {
- case IPPROTO_ICMP:
- /* check if one-byte field is readable or not. */
- if (iov->iov_base && iov->iov_len < 1)
- break;
-
- if (!type) {
- type = iov->iov_base;
- /* check if code field is readable or not. */
- if (iov->iov_len > 1)
- code = type + 1;
- } else if (!code)
- code = iov->iov_base;
-
- if (type && code) {
- if (get_user(fl4->fl4_icmp_type, type) ||
- get_user(fl4->fl4_icmp_code, code))
- return -EFAULT;
- probed = 1;
- }
- break;
- default:
- probed = 1;
- break;
- }
- if (probed)
- break;
- }
+ /* We only need the first two bytes. */
+ rfv->hlen = 2;
+
+ err = memcpy_from_msg(rfv->hdr.c, rfv->msg, rfv->hlen);
+ if (err)
+ return err;
+
+ fl4->fl4_icmp_type = rfv->hdr.icmph.type;
+ fl4->fl4_icmp_code = rfv->hdr.icmph.code;
+
return 0;
}
+static int raw_getfrag(void *from, char *to, int offset, int len, int odd,
+ struct sk_buff *skb)
+{
+ struct raw_frag_vec *rfv = from;
+
+ if (offset < rfv->hlen) {
+ int copy = min(rfv->hlen - offset, len);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ memcpy(to, rfv->hdr.c + offset, copy);
+ else
+ skb->csum = csum_block_add(
+ skb->csum,
+ csum_partial_copy_nocheck(rfv->hdr.c + offset,
+ to, copy, 0),
+ odd);
+
+ odd = 0;
+ offset += copy;
+ to += copy;
+ len -= copy;
+
+ if (!len)
+ return 0;
+ }
+
+ offset -= rfv->hlen;
+
+ return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
+}
+
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
@@ -480,6 +494,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
u8 tos;
int err;
struct ip_options_data opt_copy;
+ struct raw_frag_vec rfv;
err = -EMSGSIZE;
if (len > 0xFFFF)
@@ -585,7 +600,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
daddr, saddr, 0, 0);
if (!inet->hdrincl) {
- err = raw_probe_proto_opt(&fl4, msg);
+ rfv.msg = msg;
+ rfv.hlen = 0;
+
+ err = raw_probe_proto_opt(&rfv, &fl4);
if (err)
goto done;
}
@@ -607,7 +625,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
back_from_confirm:
if (inet->hdrincl)
- err = raw_send_hdrinc(sk, &fl4, msg->msg_iov, len,
+ /* XXX: stripping const */
+ err = raw_send_hdrinc(sk, &fl4, (struct iovec *)msg->msg_iter.iov, len,
&rt, msg->msg_flags);
else {
@@ -616,8 +635,8 @@ back_from_confirm:
if (!ipc.addr)
ipc.addr = fl4.daddr;
lock_sock(sk);
- err = ip_append_data(sk, &fl4, ip_generic_getfrag,
- msg->msg_iov, len, 0,
+ err = ip_append_data(sk, &fl4, raw_getfrag,
+ &rfv, len, 0,
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
@@ -718,7 +737,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
copied = len;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 32b98d0207b4..45fe60c5238e 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -19,10 +19,6 @@
#include <net/tcp.h>
#include <net/route.h>
-/* Timestamps: lowest bits store TCP options */
-#define TSBITS 6
-#define TSMASK (((__u32)1 << TSBITS) - 1)
-
extern int sysctl_tcp_syncookies;
static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
@@ -30,6 +26,30 @@ static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
+/* TCP Timestamp: 6 lowest bits of timestamp sent in the cookie SYN-ACK
+ * stores TCP options:
+ *
+ * MSB LSB
+ * | 31 ... 6 | 5 | 4 | 3 2 1 0 |
+ * | Timestamp | ECN | SACK | WScale |
+ *
+ * When we receive a valid cookie-ACK, we look at the echoed tsval (if
+ * any) to figure out which TCP options we should use for the rebuilt
+ * connection.
+ *
+ * A WScale setting of '0xf' (which is an invalid scaling value)
+ * means that original syn did not include the TCP window scaling option.
+ */
+#define TS_OPT_WSCALE_MASK 0xf
+#define TS_OPT_SACK BIT(4)
+#define TS_OPT_ECN BIT(5)
+/* There is no TS_OPT_TIMESTAMP:
+ * if ACK contains timestamp option, we already know it was
+ * requested/supported by the syn/synack exchange.
+ */
+#define TSBITS 6
+#define TSMASK (((__u32)1 << TSBITS) - 1)
+
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
ipv4_cookie_scratch);
@@ -67,9 +87,11 @@ __u32 cookie_init_timestamp(struct request_sock *req)
ireq = inet_rsk(req);
- options = ireq->wscale_ok ? ireq->snd_wscale : 0xf;
- options |= ireq->sack_ok << 4;
- options |= ireq->ecn_ok << 5;
+ options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
+ if (ireq->sack_ok)
+ options |= TS_OPT_SACK;
+ if (ireq->ecn_ok)
+ options |= TS_OPT_ECN;
ts = ts_now & ~TSMASK;
ts |= options;
@@ -219,16 +241,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
* additional tcp options in the timestamp.
* This extracts these options from the timestamp echo.
*
- * The lowest 4 bits store snd_wscale.
- * next 2 bits indicate SACK and ECN support.
- *
- * return false if we decode an option that should not be.
+ * return false if we decode a tcp option that is disabled
+ * on the host.
*/
-bool cookie_check_timestamp(struct tcp_options_received *tcp_opt,
- struct net *net, bool *ecn_ok)
+bool cookie_timestamp_decode(struct tcp_options_received *tcp_opt)
{
/* echoed timestamp, lowest bits contain options */
- u32 options = tcp_opt->rcv_tsecr & TSMASK;
+ u32 options = tcp_opt->rcv_tsecr;
if (!tcp_opt->saw_tstamp) {
tcp_clear_options(tcp_opt);
@@ -238,22 +257,35 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt,
if (!sysctl_tcp_timestamps)
return false;
- tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
- *ecn_ok = (options >> 5) & 1;
- if (*ecn_ok && !net->ipv4.sysctl_tcp_ecn)
- return false;
+ tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
if (tcp_opt->sack_ok && !sysctl_tcp_sack)
return false;
- if ((options & 0xf) == 0xf)
+ if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
return true; /* no window scaling */
tcp_opt->wscale_ok = 1;
- tcp_opt->snd_wscale = options & 0xf;
+ tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
+
return sysctl_tcp_window_scaling != 0;
}
-EXPORT_SYMBOL(cookie_check_timestamp);
+EXPORT_SYMBOL(cookie_timestamp_decode);
+
+bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
+ const struct net *net, const struct dst_entry *dst)
+{
+ bool ecn_ok = tcp_opt->rcv_tsecr & TS_OPT_ECN;
+
+ if (!ecn_ok)
+ return false;
+
+ if (net->ipv4.sysctl_tcp_ecn)
+ return true;
+
+ return dst_feature(dst, RTAX_FEATURE_ECN);
+}
+EXPORT_SYMBOL(cookie_ecn_ok);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
{
@@ -269,14 +301,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
int mss;
struct rtable *rt;
__u8 rcv_wscale;
- bool ecn_ok = false;
struct flowi4 fl4;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
- if (tcp_synq_no_recent_overflow(sk) ||
- (mss = __cookie_v4_check(ip_hdr(skb), th, cookie)) == 0) {
+ if (tcp_synq_no_recent_overflow(sk))
+ goto out;
+
+ mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
+ if (mss == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
@@ -287,7 +321,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, 0, NULL);
- if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
+ if (!cookie_timestamp_decode(&tcp_opt))
goto out;
ret = NULL;
@@ -305,7 +339,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->ir_loc_addr = ip_hdr(skb)->daddr;
ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
ireq->ir_mark = inet_request_mark(sk, skb);
- ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
ireq->wscale_ok = tcp_opt.wscale_ok;
@@ -354,6 +387,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
dst_metric(&rt->dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
+ ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
ret = get_cookie_sock(sk, skb, req, &rt->dst);
/* ip_queue_xmit() depends on our flow being setup
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b3c53c8b331e..e0ee384a448f 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -496,6 +496,13 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_max_reordering",
+ .data = &sysctl_tcp_max_reordering,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "tcp_dsack",
.data = &sysctl_tcp_dsack,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 38c2bcb8dd5d..3075723c729b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -835,47 +835,29 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
int large_allowed)
{
struct tcp_sock *tp = tcp_sk(sk);
- u32 xmit_size_goal, old_size_goal;
-
- xmit_size_goal = mss_now;
-
- if (large_allowed && sk_can_gso(sk)) {
- u32 gso_size, hlen;
-
- /* Maybe we should/could use sk->sk_prot->max_header here ? */
- hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
- inet_csk(sk)->icsk_ext_hdr_len +
- tp->tcp_header_len;
-
- /* Goal is to send at least one packet per ms,
- * not one big TSO packet every 100 ms.
- * This preserves ACK clocking and is consistent
- * with tcp_tso_should_defer() heuristic.
- */
- gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
- gso_size = max_t(u32, gso_size,
- sysctl_tcp_min_tso_segs * mss_now);
-
- xmit_size_goal = min_t(u32, gso_size,
- sk->sk_gso_max_size - 1 - hlen);
-
- xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
-
- /* We try hard to avoid divides here */
- old_size_goal = tp->xmit_size_goal_segs * mss_now;
-
- if (likely(old_size_goal <= xmit_size_goal &&
- old_size_goal + mss_now > xmit_size_goal)) {
- xmit_size_goal = old_size_goal;
- } else {
- tp->xmit_size_goal_segs =
- min_t(u16, xmit_size_goal / mss_now,
- sk->sk_gso_max_segs);
- xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
- }
+ u32 new_size_goal, size_goal, hlen;
+
+ if (!large_allowed || !sk_can_gso(sk))
+ return mss_now;
+
+ /* Maybe we should/could use sk->sk_prot->max_header here ? */
+ hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
+ inet_csk(sk)->icsk_ext_hdr_len +
+ tp->tcp_header_len;
+
+ new_size_goal = sk->sk_gso_max_size - 1 - hlen;
+ new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
+
+ /* We try hard to avoid divides here */
+ size_goal = tp->gso_segs * mss_now;
+ if (unlikely(new_size_goal < size_goal ||
+ new_size_goal >= size_goal + mss_now)) {
+ tp->gso_segs = min_t(u16, new_size_goal / mss_now,
+ sk->sk_gso_max_segs);
+ size_goal = tp->gso_segs * mss_now;
}
- return max(xmit_size_goal, mss_now);
+ return max(size_goal, mss_now);
}
static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
@@ -1085,7 +1067,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
- struct iovec *iov;
+ const struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags, err, copied = 0;
@@ -1136,8 +1118,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
mss_now = tcp_send_mss(sk, &size_goal, flags);
/* Ok commence sending. */
- iovlen = msg->msg_iovlen;
- iov = msg->msg_iov;
+ iovlen = msg->msg_iter.nr_segs;
+ iov = msg->msg_iter.iov;
copied = 0;
err = -EPIPE;
@@ -1349,7 +1331,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
if (len > 0) {
if (!(flags & MSG_TRUNC))
- err = memcpy_toiovec(msg->msg_iov, &c, 1);
+ err = memcpy_to_msg(msg, &c, 1);
len = 1;
} else
msg->msg_flags |= MSG_TRUNC;
@@ -1377,7 +1359,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
/* XXX -- need to support SO_PEEK_OFF */
skb_queue_walk(&sk->sk_write_queue, skb) {
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
+ err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
if (err)
break;
@@ -1729,7 +1711,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
user_recv = current;
tp->ucopy.task = user_recv;
- tp->ucopy.iov = msg->msg_iov;
+ tp->ucopy.msg = msg;
}
tp->ucopy.len = len;
@@ -1833,8 +1815,7 @@ do_prequeue:
}
if (!(flags & MSG_TRUNC)) {
- err = skb_copy_datagram_iovec(skb, offset,
- msg->msg_iov, used);
+ err = skb_copy_datagram_msg(skb, offset, msg, used);
if (err) {
/* Exception. Bailout! */
if (!copied)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index b1c5970d47a1..27ead0dd16bc 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -1,5 +1,5 @@
/*
- * Plugable TCP congestion control support and newReno
+ * Pluggable TCP congestion control support and newReno
* congestion control.
* Based on ideas from I/O scheduler support and Web100.
*
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 20de0118c98e..6b6002416a73 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -363,16 +363,28 @@ static void hystart_update(struct sock *sk, u32 delay)
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
- if (!(ca->found & hystart_detect)) {
+ if (ca->found & hystart_detect)
+ return;
+
+ if (hystart_detect & HYSTART_ACK_TRAIN) {
u32 now = bictcp_clock();
/* first detection parameter - ack-train detection */
if ((s32)(now - ca->last_ack) <= hystart_ack_delta) {
ca->last_ack = now;
- if ((s32)(now - ca->round_start) > ca->delay_min >> 4)
+ if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
ca->found |= HYSTART_ACK_TRAIN;
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTTRAINDETECT);
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTTRAINCWND,
+ tp->snd_cwnd);
+ tp->snd_ssthresh = tp->snd_cwnd;
+ }
}
+ }
+ if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
if (ca->curr_rtt == 0 || ca->curr_rtt > delay)
@@ -381,15 +393,16 @@ static void hystart_update(struct sock *sk, u32 delay)
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
- HYSTART_DELAY_THRESH(ca->delay_min>>4))
+ HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
ca->found |= HYSTART_DELAY;
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTDELAYDETECT);
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTDELAYCWND,
+ tp->snd_cwnd);
+ tp->snd_ssthresh = tp->snd_cwnd;
+ }
}
- /*
- * Either one of two conditions are met,
- * we exit from slow start immediately.
- */
- if (ca->found & hystart_detect)
- tp->snd_ssthresh = tp->snd_cwnd;
}
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d107ee246a1d..075ab4d5af5e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -81,6 +81,7 @@ int sysctl_tcp_window_scaling __read_mostly = 1;
int sysctl_tcp_sack __read_mostly = 1;
int sysctl_tcp_fack __read_mostly = 1;
int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
+int sysctl_tcp_max_reordering __read_mostly = 300;
EXPORT_SYMBOL(sysctl_tcp_reordering);
int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
@@ -833,7 +834,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
if (metric > tp->reordering) {
int mib_idx;
- tp->reordering = min(TCP_MAX_REORDERING, metric);
+ tp->reordering = min(sysctl_tcp_max_reordering, metric);
/* This exciting event is worth to be remembered. 8) */
if (ts)
@@ -4367,7 +4368,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
goto err_free;
- if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
+ if (memcpy_from_msg(skb_put(skb, size), msg, size))
goto err_free;
TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
@@ -4420,7 +4421,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
__set_current_state(TASK_RUNNING);
local_bh_enable();
- if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
+ if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) {
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
eaten = (chunk == skb->len);
@@ -4940,10 +4941,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
local_bh_enable();
if (skb_csum_unnecessary(skb))
- err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
+ err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk);
else
- err = skb_copy_and_csum_datagram_iovec(skb, hlen,
- tp->ucopy.iov);
+ err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg);
if (!err) {
tp->ucopy.len -= chunk;
@@ -5030,7 +5030,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
/* step 3: check security and precedence [ignored] */
/* step 4: Check for a SYN
- * RFC 5691 4.2 : Send a challenge ack
+ * RFC 5961 4.2 : Send a challenge ack
*/
if (th->syn) {
syn_challenge:
@@ -5853,12 +5853,12 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
struct inet_request_sock *ireq = inet_rsk(req);
if (family == AF_INET)
- LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
- &ireq->ir_rmt_addr, port);
+ net_dbg_ratelimited("drop open request from %pI4/%u\n",
+ &ireq->ir_rmt_addr, port);
#if IS_ENABLED(CONFIG_IPV6)
else if (family == AF_INET6)
- LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"),
- &ireq->ir_v6_rmt_addr, port);
+ net_dbg_ratelimited("drop open request from %pI6/%u\n",
+ &ireq->ir_v6_rmt_addr, port);
#endif
}
@@ -5867,7 +5867,7 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
* If we receive a SYN packet with these bits set, it means a
* network is playing bad games with TOS bits. In order to
* avoid possible false congestion notifications, we disable
- * TCP ECN negociation.
+ * TCP ECN negotiation.
*
* Exception: tcp_ca wants ECN. This is required for DCTCP
* congestion control; it requires setting ECT on all packets,
@@ -5877,20 +5877,22 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
*/
static void tcp_ecn_create_request(struct request_sock *req,
const struct sk_buff *skb,
- const struct sock *listen_sk)
+ const struct sock *listen_sk,
+ const struct dst_entry *dst)
{
const struct tcphdr *th = tcp_hdr(skb);
const struct net *net = sock_net(listen_sk);
bool th_ecn = th->ece && th->cwr;
- bool ect, need_ecn;
+ bool ect, need_ecn, ecn_ok;
if (!th_ecn)
return;
ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
need_ecn = tcp_ca_needs_ecn(listen_sk);
+ ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN);
- if (!ect && !need_ecn && net->ipv4.sysctl_tcp_ecn)
+ if (!ect && !need_ecn && ecn_ok)
inet_rsk(req)->ecn_ok = 1;
else if (ect && need_ecn)
inet_rsk(req)->ecn_ok = 1;
@@ -5955,13 +5957,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
- if (!want_cookie || tmp_opt.tstamp_ok)
- tcp_ecn_create_request(req, skb, sk);
-
- if (want_cookie) {
- isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
- req->cookie_ts = tmp_opt.tstamp_ok;
- } else if (!isn) {
+ if (!want_cookie && !isn) {
/* VJ's idea. We save last timestamp seen
* from the destination in peer table, when entering
* state TIME-WAIT, and check against it before
@@ -6009,6 +6005,15 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop_and_free;
}
+ tcp_ecn_create_request(req, skb, sk, dst);
+
+ if (want_cookie) {
+ isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
+ req->cookie_ts = tmp_opt.tstamp_ok;
+ if (!tmp_opt.tstamp_ok)
+ inet_rsk(req)->ecn_ok = 0;
+ }
+
tcp_rsk(req)->snt_isn = isn;
tcp_openreq_init_rwin(req, sk, dst);
fastopen = !want_cookie &&
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 147be2024290..a3f72d7fc06c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -623,6 +623,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
+ net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
#ifdef CONFIG_TCP_MD5SIG
hash_location = tcp_parse_md5sig_option(th);
if (!sk && hash_location) {
@@ -633,7 +634,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
* Incoming packet is checked with md5 hash with finding key,
* no RST generated if md5 hash doesn't match.
*/
- sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
+ sk1 = __inet_lookup_listener(net,
&tcp_hashinfo, ip_hdr(skb)->saddr,
th->source, ip_hdr(skb)->daddr,
ntohs(th->source), inet_iif(skb));
@@ -681,7 +682,6 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
if (sk)
arg.bound_dev_if = sk->sk_bound_dev_if;
- net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
@@ -1432,6 +1432,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
struct dst_entry *dst = sk->sk_rx_dst;
sock_rps_save_rxhash(sk, skb);
+ sk_mark_napi_id(sk, skb);
if (dst) {
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
dst->ops->check(dst, 0) == NULL) {
@@ -1453,6 +1454,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (nsk != sk) {
sock_rps_save_rxhash(nsk, skb);
+ sk_mark_napi_id(sk, skb);
if (tcp_child_process(sk, nsk, skb)) {
rsk = nsk;
goto reset;
@@ -1664,7 +1666,7 @@ process:
if (sk_filter(sk, skb))
goto discard_and_relse;
- sk_mark_napi_id(sk, skb);
+ sk_incoming_cpu_update(sk);
skb->dev = NULL;
bh_lock_sock_nested(sk);
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 5b90f2f447a5..9d7930ba8e0f 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -94,9 +94,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP |
SKB_GSO_SIT |
- SKB_GSO_MPLS |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
+ SKB_GSO_TUNNEL_REMCSUM |
0) ||
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
goto out;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a3d453b94747..7f18262e2326 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -333,10 +333,19 @@ static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
+ tcp_ca_needs_ecn(sk);
+
+ if (!use_ecn) {
+ const struct dst_entry *dst = __sk_dst_get(sk);
+
+ if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
+ use_ecn = true;
+ }
tp->ecn_flags = 0;
- if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
- tcp_ca_needs_ecn(sk)) {
+
+ if (use_ecn) {
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
if (tcp_ca_needs_ecn(sk))
@@ -1515,6 +1524,27 @@ static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
((nonagle & TCP_NAGLE_CORK) ||
(!nonagle && tp->packets_out && tcp_minshall_check(tp)));
}
+
+/* Return how many segs we'd like on a TSO packet,
+ * to send one TSO packet per ms
+ */
+static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now)
+{
+ u32 bytes, segs;
+
+ bytes = min(sk->sk_pacing_rate >> 10,
+ sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
+
+ /* Goal is to send at least one packet per ms,
+ * not one big TSO packet every 100 ms.
+ * This preserves ACK clocking and is consistent
+ * with tcp_tso_should_defer() heuristic.
+ */
+ segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs);
+
+ return min_t(u32, segs, sk->sk_gso_max_segs);
+}
+
/* Returns the portion of skb which can be sent right away */
static unsigned int tcp_mss_split_point(const struct sock *sk,
const struct sk_buff *skb,
@@ -1553,7 +1583,7 @@ static unsigned int tcp_mss_split_point(const struct sock *sk,
static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
const struct sk_buff *skb)
{
- u32 in_flight, cwnd;
+ u32 in_flight, cwnd, halfcwnd;
/* Don't be strict about the congestion window for the final FIN. */
if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
@@ -1562,10 +1592,14 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
in_flight = tcp_packets_in_flight(tp);
cwnd = tp->snd_cwnd;
- if (in_flight < cwnd)
- return (cwnd - in_flight);
+ if (in_flight >= cwnd)
+ return 0;
- return 0;
+ /* For better scheduling, ensure we have at least
+ * 2 GSO packets in flight.
+ */
+ halfcwnd = max(cwnd >> 1, 1U);
+ return min(halfcwnd, cwnd - in_flight);
}
/* Initialize TSO state of a skb.
@@ -1718,7 +1752,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
* This algorithm is from John Heffner.
*/
static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
- bool *is_cwnd_limited)
+ bool *is_cwnd_limited, u32 max_segs)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1748,8 +1782,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
limit = min(send_win, cong_win);
/* If a full-sized TSO skb can be sent, do it. */
- if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
- tp->xmit_size_goal_segs * tp->mss_cache))
+ if (limit >= max_segs * tp->mss_cache)
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
@@ -1946,6 +1979,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int cwnd_quota;
int result;
bool is_cwnd_limited = false;
+ u32 max_segs;
sent_pkts = 0;
@@ -1959,6 +1993,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
}
+ max_segs = tcp_tso_autosize(sk, mss_now);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
@@ -1991,10 +2026,23 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
break;
} else {
if (!push_one &&
- tcp_tso_should_defer(sk, skb, &is_cwnd_limited))
+ tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
+ max_segs))
break;
}
+ limit = mss_now;
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
+ limit = tcp_mss_split_point(sk, skb, mss_now,
+ min_t(unsigned int,
+ cwnd_quota,
+ max_segs),
+ nonagle);
+
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
+ break;
+
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* This allows for :
@@ -2005,8 +2053,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
* of queued bytes to ensure line rate.
* One example is wifi aggregation (802.11 AMPDU)
*/
- limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
- sk->sk_pacing_rate >> 10);
+ limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
+ limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
set_bit(TSQ_THROTTLED, &tp->tsq_flags);
@@ -2019,18 +2067,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
break;
}
- limit = mss_now;
- if (tso_segs > 1 && !tcp_urg_mode(tp))
- limit = tcp_mss_split_point(sk, skb, mss_now,
- min_t(unsigned int,
- cwnd_quota,
- sk->sk_gso_max_segs),
- nonagle);
-
- if (skb->len > limit &&
- unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
- break;
-
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
@@ -2998,9 +3034,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_fastopen_request *fo = tp->fastopen_req;
- int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
- struct sk_buff *syn_data = NULL, *data;
+ int syn_loss = 0, space, err = 0;
unsigned long last_syn_loss = 0;
+ struct sk_buff *syn_data;
tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
@@ -3031,48 +3067,40 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
/* limit to order-0 allocations */
space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
- syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
- sk->sk_allocation);
- if (syn_data == NULL)
+ syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation);
+ if (!syn_data)
+ goto fallback;
+ syn_data->ip_summed = CHECKSUM_PARTIAL;
+ memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
+ if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
+ fo->data->msg_iter.iov, 0, space))) {
+ kfree_skb(syn_data);
goto fallback;
+ }
- for (i = 0; i < iovlen && syn_data->len < space; ++i) {
- struct iovec *iov = &fo->data->msg_iov[i];
- unsigned char __user *from = iov->iov_base;
- int len = iov->iov_len;
+ /* No more data pending in inet_wait_for_connect() */
+ if (space == fo->size)
+ fo->data = NULL;
+ fo->copied = space;
- if (syn_data->len + len > space)
- len = space - syn_data->len;
- else if (i + 1 == iovlen)
- /* No more data pending in inet_wait_for_connect() */
- fo->data = NULL;
+ tcp_connect_queue_skb(sk, syn_data);
- if (skb_add_data(syn_data, from, len))
- goto fallback;
- }
+ err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
- /* Queue a data-only packet after the regular SYN for retransmission */
- data = pskb_copy(syn_data, sk->sk_allocation);
- if (data == NULL)
- goto fallback;
- TCP_SKB_CB(data)->seq++;
- TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
- TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
- tcp_connect_queue_skb(sk, data);
- fo->copied = data->len;
-
- /* syn_data is about to be sent, we need to take current time stamps
- * for the packets that are in write queue : SYN packet and DATA
- */
- skb_mstamp_get(&syn->skb_mstamp);
- data->skb_mstamp = syn->skb_mstamp;
+ syn->skb_mstamp = syn_data->skb_mstamp;
- if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
+ /* Now full SYN+DATA was cloned and sent (or not),
+ * remove the SYN from the original skb (syn_data)
+ * we keep in write queue in case of a retransmit, as we
+ * also have the SYN packet (with no data) in the same queue.
+ */
+ TCP_SKB_CB(syn_data)->seq++;
+ TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
+ if (!err) {
tp->syn_data = (fo->copied > 0);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
goto done;
}
- syn_data = NULL;
fallback:
/* Send a regular SYN with Fast Open cookie request option */
@@ -3081,7 +3109,6 @@ fallback:
err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
if (err)
tp->syn_fastopen = 0;
- kfree_skb(syn_data);
done:
fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
return err;
@@ -3101,13 +3128,10 @@ int tcp_connect(struct sock *sk)
return 0;
}
- buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
- if (unlikely(buff == NULL))
+ buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+ if (unlikely(!buff))
return -ENOBUFS;
- /* Reserve space for headers. */
- skb_reserve(buff, MAX_TCP_HEADER);
-
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
tp->retrans_stamp = tcp_time_stamp;
tcp_connect_queue_skb(sk, buff);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 9b21ae8b2e31..1829c7fbc77e 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -374,17 +374,19 @@ void tcp_retransmit_timer(struct sock *sk)
*/
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
- LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
- &inet->inet_daddr,
- ntohs(inet->inet_dport), inet->inet_num,
- tp->snd_una, tp->snd_nxt);
+ net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
+ &inet->inet_daddr,
+ ntohs(inet->inet_dport),
+ inet->inet_num,
+ tp->snd_una, tp->snd_nxt);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
- LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
- &sk->sk_v6_daddr,
- ntohs(inet->inet_dport), inet->inet_num,
- tp->snd_una, tp->snd_nxt);
+ net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
+ &sk->sk_v6_daddr,
+ ntohs(inet->inet_dport),
+ inet->inet_num,
+ tp->snd_una, tp->snd_nxt);
}
#endif
if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cd0db5471bb5..13b4dcf86ef6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -144,7 +144,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
- sk_nulls_for_each(sk2, node, &hslot->head)
+ sk_nulls_for_each(sk2, node, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(bitmap || udp_sk(sk2)->udp_port_hash == num) &&
@@ -152,14 +152,13 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(!sk2->sk_reuseport || !sk->sk_reuseport ||
- !uid_eq(uid, sock_i_uid(sk2))) &&
- (*saddr_comp)(sk, sk2)) {
- if (bitmap)
- __set_bit(udp_sk(sk2)->udp_port_hash >> log,
- bitmap);
- else
+ !uid_eq(uid, sock_i_uid(sk2))) &&
+ saddr_comp(sk, sk2)) {
+ if (!bitmap)
return 1;
+ __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap);
}
+ }
return 0;
}
@@ -168,10 +167,10 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
* can insert/delete a socket with local_port == num
*/
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
- struct udp_hslot *hslot2,
- struct sock *sk,
- int (*saddr_comp)(const struct sock *sk1,
- const struct sock *sk2))
+ struct udp_hslot *hslot2,
+ struct sock *sk,
+ int (*saddr_comp)(const struct sock *sk1,
+ const struct sock *sk2))
{
struct sock *sk2;
struct hlist_nulls_node *node;
@@ -179,7 +178,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
int res = 0;
spin_lock(&hslot2->lock);
- udp_portaddr_for_each_entry(sk2, node, &hslot2->head)
+ udp_portaddr_for_each_entry(sk2, node, &hslot2->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(udp_sk(sk2)->udp_port_hash == num) &&
@@ -187,11 +186,12 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(!sk2->sk_reuseport || !sk->sk_reuseport ||
- !uid_eq(uid, sock_i_uid(sk2))) &&
- (*saddr_comp)(sk, sk2)) {
+ !uid_eq(uid, sock_i_uid(sk2))) &&
+ saddr_comp(sk, sk2)) {
res = 1;
break;
}
+ }
spin_unlock(&hslot2->lock);
return res;
}
@@ -206,8 +206,8 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
* with NULL address
*/
int udp_lib_get_port(struct sock *sk, unsigned short snum,
- int (*saddr_comp)(const struct sock *sk1,
- const struct sock *sk2),
+ int (*saddr_comp)(const struct sock *sk1,
+ const struct sock *sk2),
unsigned int hash2_nulladdr)
{
struct udp_hslot *hslot, *hslot2;
@@ -336,38 +336,45 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
}
-static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
- unsigned short hnum,
- __be16 sport, __be32 daddr, __be16 dport, int dif)
+static inline int compute_score(struct sock *sk, struct net *net,
+ __be32 saddr, unsigned short hnum, __be16 sport,
+ __be32 daddr, __be16 dport, int dif)
{
- int score = -1;
+ int score;
+ struct inet_sock *inet;
- if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
- !ipv6_only_sock(sk)) {
- struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net) ||
+ udp_sk(sk)->udp_port_hash != hnum ||
+ ipv6_only_sock(sk))
+ return -1;
- score = (sk->sk_family == PF_INET ? 2 : 1);
- if (inet->inet_rcv_saddr) {
- if (inet->inet_rcv_saddr != daddr)
- return -1;
- score += 4;
- }
- if (inet->inet_daddr) {
- if (inet->inet_daddr != saddr)
- return -1;
- score += 4;
- }
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score += 4;
- }
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score += 4;
- }
+ score = (sk->sk_family == PF_INET) ? 2 : 1;
+ inet = inet_sk(sk);
+
+ if (inet->inet_rcv_saddr) {
+ if (inet->inet_rcv_saddr != daddr)
+ return -1;
+ score += 4;
+ }
+
+ if (inet->inet_daddr) {
+ if (inet->inet_daddr != saddr)
+ return -1;
+ score += 4;
+ }
+
+ if (inet->inet_dport) {
+ if (inet->inet_dport != sport)
+ return -1;
+ score += 4;
+ }
+
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+ return -1;
+ score += 4;
}
+
return score;
}
@@ -378,33 +385,39 @@ static inline int compute_score2(struct sock *sk, struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif)
{
- int score = -1;
+ int score;
+ struct inet_sock *inet;
+
+ if (!net_eq(sock_net(sk), net) ||
+ ipv6_only_sock(sk))
+ return -1;
- if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) {
- struct inet_sock *inet = inet_sk(sk);
+ inet = inet_sk(sk);
- if (inet->inet_rcv_saddr != daddr)
+ if (inet->inet_rcv_saddr != daddr ||
+ inet->inet_num != hnum)
+ return -1;
+
+ score = (sk->sk_family == PF_INET) ? 2 : 1;
+
+ if (inet->inet_daddr) {
+ if (inet->inet_daddr != saddr)
return -1;
- if (inet->inet_num != hnum)
+ score += 4;
+ }
+
+ if (inet->inet_dport) {
+ if (inet->inet_dport != sport)
return -1;
+ score += 4;
+ }
- score = (sk->sk_family == PF_INET ? 2 : 1);
- if (inet->inet_daddr) {
- if (inet->inet_daddr != saddr)
- return -1;
- score += 4;
- }
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score += 4;
- }
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score += 4;
- }
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+ return -1;
+ score += 4;
}
+
return score;
}
@@ -1036,7 +1049,7 @@ back_from_confirm:
/* Lockless fast path for the non-corking case. */
if (!corkreq) {
- skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen,
+ skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
@@ -1051,7 +1064,7 @@ back_from_confirm:
/* ... which is an evident application bug. --ANK */
release_sock(sk);
- LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n"));
+ net_dbg_ratelimited("cork app bug 2\n");
err = -EINVAL;
goto out;
}
@@ -1067,7 +1080,7 @@ back_from_confirm:
do_append_data:
up->len += ulen;
- err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen,
+ err = ip_append_data(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
@@ -1133,7 +1146,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
if (unlikely(!up->pending)) {
release_sock(sk);
- LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n"));
+ net_dbg_ratelimited("udp cork app bug 3\n");
return -EINVAL;
}
@@ -1281,12 +1294,11 @@ try_again:
}
if (skb_csum_unnecessary(skb))
- err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
+ msg, copied);
else {
- err = skb_copy_and_csum_datagram_iovec(skb,
- sizeof(struct udphdr),
- msg->msg_iov);
+ err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
+ msg);
if (err == -EINVAL)
goto csum_copy_err;
@@ -1445,6 +1457,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (inet_sk(sk)->inet_daddr) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
+ sk_incoming_cpu_update(sk);
}
rc = sock_queue_rcv_skb(sk, skb);
@@ -1546,8 +1559,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
* provided by the application."
*/
if (up->pcrlen == 0) { /* full coverage was set */
- LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n",
- UDP_SKB_CB(skb)->cscov, skb->len);
+ net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
+ UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
/* The next case involves violating the min. coverage requested
@@ -1557,8 +1570,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
* Therefore the above ...()->partial_cov statement is essential.
*/
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
- LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n",
- UDP_SKB_CB(skb)->cscov, up->pcrlen);
+ net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
+ UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
@@ -1647,7 +1660,8 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udphdr *uh,
__be32 saddr, __be32 daddr,
- struct udp_table *udptable)
+ struct udp_table *udptable,
+ int proto)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
struct hlist_nulls_node *node;
@@ -1656,6 +1670,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
int dif = skb->dev->ifindex;
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
+ bool inner_flushed = false;
if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
@@ -1674,6 +1689,7 @@ start_lookup:
dif, hnum)) {
if (unlikely(count == ARRAY_SIZE(stack))) {
flush_stack(stack, count, skb, ~0);
+ inner_flushed = true;
count = 0;
}
stack[count++] = sk;
@@ -1695,7 +1711,10 @@ start_lookup:
if (count) {
flush_stack(stack, count, skb, count - 1);
} else {
- kfree_skb(skb);
+ if (!inner_flushed)
+ UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+ proto == IPPROTO_UDPLITE);
+ consume_skb(skb);
}
return 0;
}
@@ -1777,14 +1796,13 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (ret > 0)
return -ret;
return 0;
- } else {
- if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
- return __udp4_lib_mcast_deliver(net, skb, uh,
- saddr, daddr, udptable);
-
- sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
}
+ if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+ return __udp4_lib_mcast_deliver(net, skb, uh,
+ saddr, daddr, udptable, proto);
+
+ sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk != NULL) {
int ret;
@@ -1822,11 +1840,11 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
return 0;
short_packet:
- LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
- proto == IPPROTO_UDPLITE ? "Lite" : "",
- &saddr, ntohs(uh->source),
- ulen, skb->len,
- &daddr, ntohs(uh->dest));
+ net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
+ proto == IPPROTO_UDPLITE ? "Lite" : "",
+ &saddr, ntohs(uh->source),
+ ulen, skb->len,
+ &daddr, ntohs(uh->dest));
goto drop;
csum_error:
@@ -1834,10 +1852,10 @@ csum_error:
* RFC1122: OK. Discards the bad packet silently (as far as
* the network is concerned, anyway) as per 4.1.3.4 (MUST).
*/
- LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
- proto == IPPROTO_UDPLITE ? "Lite" : "",
- &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
- ulen);
+ net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
+ proto == IPPROTO_UDPLITE ? "Lite" : "",
+ &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
+ ulen);
UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
drop:
UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
@@ -2027,7 +2045,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
} else {
up->corkflag = 0;
lock_sock(sk);
- (*push_pending_frames)(sk);
+ push_pending_frames(sk);
release_sock(sk);
}
break;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6480cea7aa53..d3e537ef6b7f 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -29,7 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
netdev_features_t features),
- __be16 new_protocol)
+ __be16 new_protocol, bool is_ipv6)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
@@ -39,7 +39,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t enc_features;
int udp_offset, outer_hlen;
unsigned int oldlen;
- bool need_csum;
+ bool need_csum = !!(skb_shinfo(skb)->gso_type &
+ SKB_GSO_UDP_TUNNEL_CSUM);
+ bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
+ bool offload_csum = false, dont_encap = (need_csum || remcsum);
oldlen = (u16)~skb->len;
@@ -52,10 +55,13 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
skb_set_network_header(skb, skb_inner_network_offset(skb));
skb->mac_len = skb_inner_network_offset(skb);
skb->protocol = new_protocol;
+ skb->encap_hdr_csum = need_csum;
+ skb->remcsum_offload = remcsum;
- need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
- if (need_csum)
- skb->encap_hdr_csum = 1;
+ /* Try to offload checksum if possible */
+ offload_csum = !!(need_csum &&
+ (skb->dev->features &
+ (is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM)));
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & features;
@@ -72,11 +78,21 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
do {
struct udphdr *uh;
int len;
-
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
+ __be32 delta;
+
+ if (dont_encap) {
+ skb->encapsulation = 0;
+ skb->ip_summed = CHECKSUM_NONE;
+ } else {
+ /* Only set up inner headers if we might be offloading
+ * inner checksum.
+ */
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
skb->mac_len = mac_len;
+ skb->protocol = protocol;
skb_push(skb, outer_hlen);
skb_reset_mac_header(skb);
@@ -86,19 +102,36 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
uh = udp_hdr(skb);
uh->len = htons(len);
- if (need_csum) {
- __be32 delta = htonl(oldlen + len);
+ if (!need_csum)
+ continue;
- uh->check = ~csum_fold((__force __wsum)
- ((__force u32)uh->check +
- (__force u32)delta));
+ delta = htonl(oldlen + len);
+
+ uh->check = ~csum_fold((__force __wsum)
+ ((__force u32)uh->check +
+ (__force u32)delta));
+ if (offload_csum) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ } else if (remcsum) {
+ /* Need to calculate checksum from scratch,
+ * inner checksums are never when doing
+ * remote_checksum_offload.
+ */
+
+ skb->csum = skb_checksum(skb, udp_offset,
+ skb->len - udp_offset,
+ 0);
+ uh->check = csum_fold(skb->csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ } else {
uh->check = gso_make_checksum(skb, ~uh->check);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
-
- skb->protocol = protocol;
} while ((skb = skb->next));
out:
return segs;
@@ -134,7 +167,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
}
segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
- protocol);
+ protocol, is_ipv6);
out_unlock:
rcu_read_unlock();
@@ -172,9 +205,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
+ SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_IPIP |
- SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
- SKB_GSO_MPLS) ||
+ SKB_GSO_GRE | SKB_GSO_GRE_CSUM) ||
!(type & (SKB_GSO_UDP))))
goto out;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0169ccf5aa4f..f7c8bbeb27b7 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1170,6 +1170,9 @@ enum {
IPV6_SADDR_RULE_PRIVACY,
IPV6_SADDR_RULE_ORCHID,
IPV6_SADDR_RULE_PREFIX,
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+ IPV6_SADDR_RULE_NOT_OPTIMISTIC,
+#endif
IPV6_SADDR_RULE_MAX
};
@@ -1197,6 +1200,15 @@ static inline int ipv6_saddr_preferred(int type)
return 0;
}
+static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
+{
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+ return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
+#else
+ return false;
+#endif
+}
+
static int ipv6_get_saddr_eval(struct net *net,
struct ipv6_saddr_score *score,
struct ipv6_saddr_dst *dst,
@@ -1257,10 +1269,16 @@ static int ipv6_get_saddr_eval(struct net *net,
score->scopedist = ret;
break;
case IPV6_SADDR_RULE_PREFERRED:
+ {
/* Rule 3: Avoid deprecated and optimistic addresses */
+ u8 avoid = IFA_F_DEPRECATED;
+
+ if (!ipv6_use_optimistic_addr(score->ifa->idev))
+ avoid |= IFA_F_OPTIMISTIC;
ret = ipv6_saddr_preferred(score->addr_type) ||
- !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC));
+ !(score->ifa->flags & avoid);
break;
+ }
#ifdef CONFIG_IPV6_MIP6
case IPV6_SADDR_RULE_HOA:
{
@@ -1306,6 +1324,14 @@ static int ipv6_get_saddr_eval(struct net *net,
ret = score->ifa->prefix_len;
score->matchlen = ret;
break;
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+ case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
+ /* Optimistic addresses still have lower precedence than other
+ * preferred addresses.
+ */
+ ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
+ break;
+#endif
default:
ret = 0;
}
@@ -1385,10 +1411,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
score->addr_type & IPV6_ADDR_MULTICAST)) {
- LIMIT_NETDEBUG(KERN_DEBUG
- "ADDRCONF: unspecified / multicast address "
- "assigned as unicast address on %s",
- dev->name);
+ net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
+ dev->name);
continue;
}
@@ -2315,8 +2339,8 @@ ok:
else
stored_lft = 0;
if (!update_lft && !create && stored_lft) {
- const u32 minimum_lft = min(
- stored_lft, (u32)MIN_VALID_LIFETIME);
+ const u32 minimum_lft = min_t(u32,
+ stored_lft, MIN_VALID_LIFETIME);
valid_lft = max(valid_lft, minimum_lft);
/* RFC4862 Section 5.5.3e:
@@ -2519,7 +2543,8 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
if (!dev)
return -ENODEV;
- if ((idev = __in6_dev_get(dev)) == NULL)
+ idev = __in6_dev_get(dev);
+ if (idev == NULL)
return -ENXIO;
read_lock_bh(&idev->lock);
@@ -2666,7 +2691,8 @@ static void init_loopback(struct net_device *dev)
ASSERT_RTNL();
- if ((idev = ipv6_find_idev(dev)) == NULL) {
+ idev = ipv6_find_idev(dev);
+ if (idev == NULL) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2789,7 +2815,8 @@ static void addrconf_sit_config(struct net_device *dev)
* our v4 addrs in the tunnel
*/
- if ((idev = ipv6_find_idev(dev)) == NULL) {
+ idev = ipv6_find_idev(dev);
+ if (idev == NULL) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -2813,7 +2840,8 @@ static void addrconf_gre_config(struct net_device *dev)
ASSERT_RTNL();
- if ((idev = ipv6_find_idev(dev)) == NULL) {
+ idev = ipv6_find_idev(dev);
+ if (idev == NULL) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
@@ -3222,8 +3250,15 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
* Optimistic nodes can start receiving
* Frames right away
*/
- if (ifp->flags & IFA_F_OPTIMISTIC)
+ if (ifp->flags & IFA_F_OPTIMISTIC) {
ip6_ins_rt(ifp->rt);
+ if (ipv6_use_optimistic_addr(idev)) {
+ /* Because optimistic nodes can use this address,
+ * notify listeners. If DAD fails, RTM_DELADDR is sent.
+ */
+ ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ }
+ }
addrconf_dad_kick(ifp);
out:
@@ -4330,6 +4365,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
+ array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
@@ -5156,6 +5192,14 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "use_optimistic",
+ .data = &ipv6_devconf.use_optimistic,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+
+ },
#endif
#ifdef CONFIG_IPV6_MROUTE
{
@@ -5336,10 +5380,8 @@ static void __net_exit addrconf_exit_net(struct net *net)
__addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
__addrconf_sysctl_unregister(net->ipv6.devconf_all);
#endif
- if (!net_eq(net, &init_net)) {
- kfree(net->ipv6.devconf_dflt);
- kfree(net->ipv6.devconf_all);
- }
+ kfree(net->ipv6.devconf_dflt);
+ kfree(net->ipv6.devconf_all);
}
static struct pernet_operations addrconf_ops = {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 6d16eb0e0c7f..a6727add2624 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -272,10 +272,9 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
ipv6_rearrange_destopt(iph, exthdr.opth);
case NEXTHDR_HOP:
if (!zero_out_mutable_opts(exthdr.opth)) {
- LIMIT_NETDEBUG(
- KERN_WARNING "overrun %sopts\n",
- nexthdr == NEXTHDR_HOP ?
- "hop" : "dest");
+ net_dbg_ratelimited("overrun %sopts\n",
+ nexthdr == NEXTHDR_HOP ?
+ "hop" : "dest");
return -EINVAL;
}
break;
@@ -354,7 +353,8 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
ahp = x->data;
ahash = ahp->ahash;
- if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ err = skb_cow_data(skb, 0, &trailer);
+ if (err < 0)
goto out;
nfrags = err;
@@ -560,8 +560,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, ah_hlen))
goto out;
-
- if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+ err = skb_cow_data(skb, 0, &trailer);
+ if (err < 0)
goto out;
nfrags = err;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 2cdc38338be3..100c589a2a6c 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -325,6 +325,16 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
kfree_skb(skb);
}
+static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb)
+{
+ int ifindex = skb->dev ? skb->dev->ifindex : -1;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ IP6CB(skb)->iif = ifindex;
+ else
+ PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex;
+}
+
/*
* Handle MSG_ERRQUEUE
*/
@@ -351,7 +361,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free_skb;
@@ -388,8 +398,12 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
sin->sin6_port = 0;
- if (np->rxopt.all)
+ if (np->rxopt.all) {
+ if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
+ serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
+ ip6_datagram_prepare_pktinfo_errqueue(skb);
ip6_datagram_recv_common_ctl(sk, msg, skb);
+ }
if (skb->protocol == htons(ETH_P_IPV6)) {
sin->sin6_addr = ipv6_hdr(skb)->saddr;
if (np->rxopt.all)
@@ -445,7 +459,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free_skb;
@@ -491,7 +505,10 @@ void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg,
ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
&src_info.ipi6_addr);
}
- put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
+
+ if (src_info.ipi6_ifindex >= 0)
+ put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO,
+ sizeof(src_info), &src_info);
}
}
@@ -640,7 +657,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
int len;
int err = 0;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ for_each_cmsghdr(cmsg, msg) {
int addr_type;
if (!CMSG_OK(msg, cmsg)) {
@@ -893,8 +910,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
break;
}
default:
- LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n",
- cmsg->cmsg_type);
+ net_dbg_ratelimited("invalid cmsg type: %d\n",
+ cmsg->cmsg_type);
err = -EINVAL;
goto exit_f;
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 83fc3a385a26..e48f2c7c5c59 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -286,8 +286,8 @@ static int esp_input_done2(struct sk_buff *skb, int err)
err = -EINVAL;
padlen = nexthdr[0];
if (padlen + 2 + alen >= elen) {
- LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
- "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
+ net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
+ padlen + 2, elen - alen);
goto out;
}
@@ -345,7 +345,8 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
- if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (nfrags < 0) {
ret = -EINVAL;
goto out;
}
@@ -544,12 +545,12 @@ static int esp_init_authenc(struct xfrm_state *x)
BUG_ON(!aalg_desc);
err = -EINVAL;
- if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
+ if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
crypto_aead_authsize(aead)) {
- NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
- x->aalg->alg_name,
- crypto_aead_authsize(aead),
- aalg_desc->uinfo.auth.icv_fullbits/8);
+ pr_info("ESP: %s digestsize %u != %hu\n",
+ x->aalg->alg_name,
+ crypto_aead_authsize(aead),
+ aalg_desc->uinfo.auth.icv_fullbits / 8);
goto free_key;
}
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index bfde361b6134..a7bbbe45570b 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -47,7 +47,7 @@
#include <net/xfrm.h>
#endif
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* Parsing tlv encoded headers.
@@ -184,7 +184,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
int ret;
if (opt->dsthao) {
- LIMIT_NETDEBUG(KERN_DEBUG "hao duplicated\n");
+ net_dbg_ratelimited("hao duplicated\n");
goto discard;
}
opt->dsthao = opt->dst1;
@@ -193,14 +193,14 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
if (hao->length != 16) {
- LIMIT_NETDEBUG(
- KERN_DEBUG "hao invalid option length = %d\n", hao->length);
+ net_dbg_ratelimited("hao invalid option length = %d\n",
+ hao->length);
goto discard;
}
if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
- LIMIT_NETDEBUG(
- KERN_DEBUG "hao is not an unicast addr: %pI6\n", &hao->addr);
+ net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
+ &hao->addr);
goto discard;
}
@@ -551,8 +551,8 @@ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
return true;
}
- LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
- nh[optoff + 1]);
+ net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
+ nh[optoff + 1]);
kfree_skb(skb);
return false;
}
@@ -566,8 +566,8 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
u32 pkt_len;
if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
- LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
- nh[optoff+1]);
+ net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
+ nh[optoff+1]);
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS);
goto drop;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 97ae70077a4f..d674152b6ede 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -243,7 +243,8 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct icmp6hdr *icmp6h;
int err = 0;
- if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
+ skb = skb_peek(&sk->sk_write_queue);
+ if (skb == NULL)
goto out;
icmp6h = icmp6_hdr(skb);
@@ -338,7 +339,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
* anycast.
*/
if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
- LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: acast source\n");
+ net_dbg_ratelimited("icmp6_send: acast source\n");
dst_release(dst);
return ERR_PTR(-EINVAL);
}
@@ -452,7 +453,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
* and anycast addresses will be checked later.
*/
if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
- LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: addr_any/mcast source\n");
+ net_dbg_ratelimited("icmp6_send: addr_any/mcast source\n");
return;
}
@@ -460,7 +461,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
* Never answer to a ICMP packet.
*/
if (is_ineligible(skb)) {
- LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: no reply to icmp error\n");
+ net_dbg_ratelimited("icmp6_send: no reply to icmp error\n");
return;
}
@@ -509,7 +510,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
len = skb->len - msg.offset;
len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
if (len < 0) {
- LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
+ net_dbg_ratelimited("icmp: len problem\n");
goto out_dst_release;
}
@@ -679,6 +680,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
const struct in6_addr *saddr, *daddr;
struct icmp6hdr *hdr;
u8 type;
+ bool success = false;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
@@ -706,9 +708,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
daddr = &ipv6_hdr(skb)->daddr;
if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
- LIMIT_NETDEBUG(KERN_DEBUG
- "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
- saddr, daddr);
+ net_dbg_ratelimited("ICMPv6 checksum failed [%pI6c > %pI6c]\n",
+ saddr, daddr);
goto csum_error;
}
@@ -727,7 +728,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
break;
case ICMPV6_ECHO_REPLY:
- ping_rcv(skb);
+ success = ping_rcv(skb);
break;
case ICMPV6_PKT_TOOBIG:
@@ -781,7 +782,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
if (type & ICMPV6_INFOMSG_MASK)
break;
- LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
+ net_dbg_ratelimited("icmpv6: msg of unknown type\n");
/*
* error of unknown type.
@@ -791,7 +792,14 @@ static int icmpv6_rcv(struct sk_buff *skb)
icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
}
- kfree_skb(skb);
+ /* until the v6 path can be better sorted assume failure and
+ * preserve the status quo behaviour for the rest of the paths to here
+ */
+ if (success)
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
+
return 0;
csum_error:
@@ -1009,4 +1017,3 @@ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
return table;
}
#endif
-
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 3dd7d4ebd7cd..2f780cba6e12 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -654,7 +654,11 @@ release:
goto done;
err = -ENOMEM;
- if (sfl1 == NULL || (err = mem_check(sk)) != 0)
+ if (sfl1 == NULL)
+ goto done;
+
+ err = mem_check(sk);
+ if (err != 0)
goto done;
fl1 = fl_intern(net, fl, freq.flr_label);
@@ -769,10 +773,9 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v)
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
- if (v == SEQ_START_TOKEN)
- seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
- "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
- else {
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n");
+ } else {
struct ip6_flowlabel *fl = v;
seq_printf(seq,
"%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 0e32d2e1bdbf..13cda4c6313b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -902,7 +902,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
struct net_device_stats *stats = &t->dev->stats;
int ret;
- if (!ip6_tnl_xmit_ctl(t))
+ if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
goto tx_err;
switch (skb->protocol) {
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a3084ab5df6c..aacdcb4dc762 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -220,7 +220,8 @@ resubmit:
nexthdr = skb_network_header(skb)[nhoff];
raw = raw6_local_deliver(skb, nexthdr);
- if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) {
+ ipprot = rcu_dereference(inet6_protos[nexthdr]);
+ if (ipprot != NULL) {
int ret;
if (ipprot->flags & INET6_PROTO_FINAL) {
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 01e12d0d8fcc..46d452a56d3e 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -79,7 +79,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
SKB_GSO_SIT |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
- SKB_GSO_MPLS |
+ SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_TCPV6 |
0)))
goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 8e950c250ada..ce69a12ae48c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -747,13 +747,11 @@ slow_path:
if (len < left) {
len &= ~7;
}
- /*
- * Allocate buffer.
- */
- if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
- hroom + troom, GFP_ATOMIC)) == NULL) {
- NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
+ /* Allocate buffer */
+ frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+ hroom + troom, GFP_ATOMIC);
+ if (!frag) {
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
err = -ENOMEM;
@@ -900,7 +898,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
if (*dst == NULL)
*dst = ip6_route_output(net, sk, fl6);
- if ((err = (*dst)->error))
+ err = (*dst)->error;
+ if (err)
goto out_err_release;
if (ipv6_addr_any(&fl6->saddr)) {
@@ -948,7 +947,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
*dst = ip6_route_output(net, sk, &fl_gw6);
- if ((err = (*dst)->error))
+ err = (*dst)->error;
+ if (err)
goto out_err_release;
}
}
@@ -1056,7 +1056,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
* device, so create one single skb packet containing complete
* udp datagram
*/
- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
+ skb = skb_peek_tail(&sk->sk_write_queue);
+ if (skb == NULL) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
@@ -1536,7 +1537,8 @@ int ip6_push_pending_frames(struct sock *sk)
unsigned char proto = fl6->flowi6_proto;
int err = 0;
- if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
+ skb = __skb_dequeue(&sk->sk_write_queue);
+ if (skb == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9cb94cfa0ae7..92b3da571980 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -183,6 +183,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
unsigned int hash = HASH(remote, local);
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ struct in6_addr any;
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
@@ -190,6 +191,22 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
(t->dev->flags & IFF_UP))
return t;
}
+
+ memset(&any, 0, sizeof(any));
+ hash = HASH(&any, local);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ (t->dev->flags & IFF_UP))
+ return t;
+ }
+
+ hash = HASH(remote, &any);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+ (t->dev->flags & IFF_UP))
+ return t;
+ }
+
t = rcu_dereference(ip6n->tnls_wc[0]);
if (t && (t->dev->flags & IFF_UP))
return t;
@@ -474,6 +491,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
int rel_msg = 0;
u8 rel_type = ICMPV6_DEST_UNREACH;
u8 rel_code = ICMPV6_ADDR_UNREACH;
+ u8 tproto;
__u32 rel_info = 0;
__u16 len;
int err = -ENOENT;
@@ -483,11 +501,12 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
processing of the error. */
rcu_read_lock();
- if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
- &ipv6h->saddr)) == NULL)
+ t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
+ if (t == NULL)
goto out;
- if (t->parms.proto != ipproto && t->parms.proto != 0)
+ tproto = ACCESS_ONCE(t->parms.proto);
+ if (tproto != ipproto && tproto != 0)
goto out;
err = 0;
@@ -531,7 +550,8 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
- if ((len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
+ len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
+ if (len > mtu) {
rel_type = ICMPV6_PKT_TOOBIG;
rel_code = 0;
rel_info = mtu;
@@ -788,15 +808,16 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
{
struct ip6_tnl *t;
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ u8 tproto;
int err;
rcu_read_lock();
-
- if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
- &ipv6h->daddr)) != NULL) {
+ t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
+ if (t != NULL) {
struct pcpu_sw_netstats *tstats;
- if (t->parms.proto != ipproto && t->parms.proto != 0) {
+ tproto = ACCESS_ONCE(t->parms.proto);
+ if (tproto != ipproto && tproto != 0) {
rcu_read_unlock();
goto discard;
}
@@ -902,24 +923,28 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
}
-int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
+ const struct in6_addr *laddr,
+ const struct in6_addr *raddr)
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
struct net *net = t->net;
- if (p->flags & IP6_TNL_F_CAP_XMIT) {
+ if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
+ ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
+ (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
struct net_device *ldev = NULL;
rcu_read_lock();
if (p->link)
ldev = dev_get_by_index_rcu(net, p->link);
- if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
+ if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
pr_warn("%s xmit: Local address not yet configured!\n",
p->name);
- else if (!ipv6_addr_is_multicast(&p->raddr) &&
- unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
+ else if (!ipv6_addr_is_multicast(raddr) &&
+ unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
p->name);
else
@@ -968,8 +993,34 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
u8 proto;
int err = -1;
- if (!fl6->flowi6_mark)
+ /* NBMA tunnel */
+ if (ipv6_addr_any(&t->parms.raddr)) {
+ struct in6_addr *addr6;
+ struct neighbour *neigh;
+ int addr_type;
+
+ if (!skb_dst(skb))
+ goto tx_err_link_failure;
+
+ neigh = dst_neigh_lookup(skb_dst(skb),
+ &ipv6_hdr(skb)->daddr);
+ if (!neigh)
+ goto tx_err_link_failure;
+
+ addr6 = (struct in6_addr *)&neigh->primary_key;
+ addr_type = ipv6_addr_type(addr6);
+
+ if (addr_type == IPV6_ADDR_ANY)
+ addr6 = &ipv6_hdr(skb)->daddr;
+
+ memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+ neigh_release(neigh);
+ } else if (!fl6->flowi6_mark)
dst = ip6_tnl_dst_check(t);
+
+ if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
+ goto tx_err_link_failure;
+
if (!dst) {
ndst = ip6_route_output(net, NULL, fl6);
@@ -1018,7 +1069,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb;
- if (!(new_skb = skb_realloc_headroom(skb, max_headroom)))
+ new_skb = skb_realloc_headroom(skb, max_headroom);
+ if (!new_skb)
goto tx_err_dst_release;
if (skb->sk)
@@ -1075,10 +1127,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
struct flowi6 fl6;
__u8 dsfield;
__u32 mtu;
+ u8 tproto;
int err;
- if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) ||
- !ip6_tnl_xmit_ctl(t))
+ tproto = ACCESS_ONCE(t->parms.proto);
+ if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
@@ -1117,10 +1170,12 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
struct flowi6 fl6;
__u8 dsfield;
__u32 mtu;
+ u8 tproto;
int err;
- if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
- !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
+ tproto = ACCESS_ONCE(t->parms.proto);
+ if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
+ ip6_tnl_addr_conflict(t, ipv6h))
return -1;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
@@ -1282,6 +1337,14 @@ static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
return err;
}
+static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+{
+ /* for default tnl0 device allow to change only the proto */
+ t->parms.proto = p->proto;
+ netdev_state_change(t->dev);
+ return 0;
+}
+
static void
ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
{
@@ -1381,7 +1444,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
ip6_tnl_parm_from_user(&p1, &p);
t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
- if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
+ if (cmd == SIOCCHGTUNNEL) {
if (t != NULL) {
if (t->dev != dev) {
err = -EEXIST;
@@ -1389,8 +1452,10 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
} else
t = netdev_priv(dev);
-
- err = ip6_tnl_update(t, &p1);
+ if (dev == ip6n->fb_tnl_dev)
+ err = ip6_tnl0_update(t, &p1);
+ else
+ err = ip6_tnl_update(t, &p1);
}
if (t) {
err = 0;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index bcda14de7f84..ace10d0b3aac 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -95,6 +95,7 @@ vti6_tnl_lookup(struct net *net, const struct in6_addr *remote,
unsigned int hash = HASH(remote, local);
struct ip6_tnl *t;
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ struct in6_addr any;
for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
@@ -102,6 +103,22 @@ vti6_tnl_lookup(struct net *net, const struct in6_addr *remote,
(t->dev->flags & IFF_UP))
return t;
}
+
+ memset(&any, 0, sizeof(any));
+ hash = HASH(&any, local);
+ for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ (t->dev->flags & IFF_UP))
+ return t;
+ }
+
+ hash = HASH(remote, &any);
+ for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+ (t->dev->flags & IFF_UP))
+ return t;
+ }
+
t = rcu_dereference(ip6n->tnls_wc[0]);
if (t && (t->dev->flags & IFF_UP))
return t;
@@ -287,8 +304,8 @@ static int vti6_rcv(struct sk_buff *skb)
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
rcu_read_lock();
- if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
- &ipv6h->daddr)) != NULL) {
+ t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
+ if (t != NULL) {
if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
rcu_read_unlock();
goto discard;
@@ -412,6 +429,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct net_device_stats *stats = &t->dev->stats;
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev;
+ struct xfrm_state *x;
int err = -1;
if (!dst)
@@ -425,7 +443,12 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
goto tx_err_link_failure;
}
- if (!vti6_state_check(dst->xfrm, &t->parms.raddr, &t->parms.laddr))
+ x = dst->xfrm;
+ if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
+ goto tx_err_link_failure;
+
+ if (!ip6_tnl_xmit_ctl(t, (const struct in6_addr *)&x->props.saddr,
+ (const struct in6_addr *)&x->id.daddr))
goto tx_err_link_failure;
tdev = dst->dev;
@@ -480,7 +503,7 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
ipv6h = ipv6_hdr(skb);
if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
- !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h))
+ vti6_addr_conflict(t, ipv6h))
goto tx_err;
xfrm_decode_session(skb, &fl, AF_INET6);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 1a01d79b8698..722669754bbf 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2094,7 +2094,7 @@ static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
struct mfc6_cache *cache_proxy;
- /* For an (*,G) entry, we only check that the incomming
+ /* For an (*,G) entry, we only check that the incoming
* interface is part of the static tree.
*/
cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index e1a9583bb419..66980d8d98d1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -110,12 +110,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
- opt = xchg(&inet6_sk(sk)->opt, opt);
- } else {
- spin_lock(&sk->sk_dst_lock);
- opt = xchg(&inet6_sk(sk)->opt, opt);
- spin_unlock(&sk->sk_dst_lock);
}
+ opt = xchg(&inet6_sk(sk)->opt, opt);
sk_dst_reset(sk);
return opt;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ed2c4e400b46..5ce107c8aab3 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2824,11 +2824,7 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
if (v == SEQ_START_TOKEN) {
- seq_printf(seq,
- "%3s %6s "
- "%32s %32s %6s %6s\n", "Idx",
- "Device", "Multicast Address",
- "Source Address", "INC", "EXC");
+ seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n");
} else {
seq_printf(seq,
"%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index f61429d391d3..b9779d441b12 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -97,16 +97,17 @@ static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
return -1;
if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
- LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
- mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
+ net_dbg_ratelimited("mip6: MH message too short: %d vs >=%d\n",
+ mh->ip6mh_hdrlen,
+ mip6_mh_len(mh->ip6mh_type));
mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
skb_network_header_len(skb));
return -1;
}
if (mh->ip6mh_proto != IPPROTO_NONE) {
- LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
- mh->ip6mh_proto);
+ net_dbg_ratelimited("mip6: MH invalid payload proto = %d\n",
+ mh->ip6mh_proto);
mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
skb_network_header_len(skb));
return -1;
@@ -288,7 +289,7 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
* XXX: packet if HAO exists.
*/
if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
- LIMIT_NETDEBUG(KERN_WARNING "mip6: hao exists already, override\n");
+ net_dbg_ratelimited("mip6: hao exists already, override\n");
return offset;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 4cb45c1079a2..682866777d53 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -162,7 +162,8 @@ static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data)
memcpy(opt+2, data, data_len);
data_len += 2;
opt += data_len;
- if ((space -= data_len) > 0)
+ space -= data_len;
+ if (space > 0)
memset(opt, 0, space);
}
@@ -656,8 +657,8 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (skb && ipv6_chk_addr(dev_net(dev), &ipv6_hdr(skb)->saddr, dev, 1))
saddr = &ipv6_hdr(skb)->saddr;
-
- if ((probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES)) < 0) {
+ probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES);
+ if (probes < 0) {
if (!(neigh->nud_state & NUD_VALID)) {
ND_PRINTK(1, dbg,
"%s: trying to ucast probe in NUD_INVALID: %pI6\n",
@@ -1763,7 +1764,7 @@ int __init ndisc_init(void)
/*
* Initialize the neighbour table
*/
- neigh_table_init(&nd_tbl);
+ neigh_table_init(NEIGH_ND_TABLE, &nd_tbl);
#ifdef CONFIG_SYSCTL
err = neigh_sysctl_register(NULL, &nd_tbl.parms,
@@ -1796,6 +1797,6 @@ void ndisc_cleanup(void)
#ifdef CONFIG_SYSCTL
neigh_sysctl_unregister(&nd_tbl.parms);
#endif
- neigh_table_clear(&nd_tbl);
+ neigh_table_clear(NEIGH_ND_TABLE, &nd_tbl);
unregister_pernet_subsys(&ndisc_net_ops);
}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index d38e6a8d8b9f..398377a9d018 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -36,7 +36,7 @@ int ip6_route_me_harder(struct sk_buff *skb)
err = dst->error;
if (err) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
- LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
+ net_dbg_ratelimited("ip6_route_me_harder: No more route\n");
dst_release(dst);
return err;
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 6af874fc187f..a069822936e6 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -91,6 +91,15 @@ config NFT_MASQ_IPV6
This is the expression that provides IPv4 masquerading support for
nf_tables.
+config NFT_REDIR_IPV6
+ tristate "IPv6 redirect support for nf_tables"
+ depends on NF_TABLES_IPV6
+ depends on NFT_REDIR
+ select NF_NAT_REDIRECT
+ help
+ This is the expression that provides IPv4 redirect support for
+ nf_tables.
+
endif # NF_NAT_IPV6
config IP6_NF_IPTABLES
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index fbb25f01143c..c36e0a5490de 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
+obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index 7b17a0be93e7..ddf07e6f59d7 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -398,8 +399,17 @@ static int __init nf_log_ipv6_init(void)
if (ret < 0)
return ret;
- nf_log_register(NFPROTO_IPV6, &nf_ip6_logger);
+ ret = nf_log_register(NFPROTO_IPV6, &nf_ip6_logger);
+ if (ret < 0) {
+ pr_err("failed to register logger\n");
+ goto err1;
+ }
+
return 0;
+
+err1:
+ unregister_pernet_subsys(&nf_log_ipv6_net_ops);
+ return ret;
}
static void __exit nf_log_ipv6_exit(void)
@@ -412,6 +422,6 @@ module_init(nf_log_ipv6_init);
module_exit(nf_log_ipv6_exit);
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("Netfilter IPv4 packet logging");
+MODULE_DESCRIPTION("Netfilter IPv6 packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_LOGGER(AF_INET6, 0);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 015eb8a80766..d05b36440e8b 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -11,6 +11,7 @@
#include <net/ip6_route.h>
#include <net/ip6_fib.h>
#include <net/ip6_checksum.h>
+#include <net/netfilter/ipv6/nf_reject.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/ipv6/nf_reject.h>
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
new file mode 100644
index 000000000000..2433a6bfb191
--- /dev/null
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nft_redir.h>
+#include <net/netfilter/nf_nat_redirect.h>
+
+static void nft_redir_ipv6_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_redir *priv = nft_expr_priv(expr);
+ struct nf_nat_range range;
+ unsigned int verdict;
+
+ memset(&range, 0, sizeof(range));
+ if (priv->sreg_proto_min) {
+ range.min_proto.all = (__force __be16)
+ data[priv->sreg_proto_min].data[0];
+ range.max_proto.all = (__force __be16)
+ data[priv->sreg_proto_max].data[0];
+ range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ }
+
+ range.flags |= priv->flags;
+
+ verdict = nf_nat_redirect_ipv6(pkt->skb, &range, pkt->ops->hooknum);
+ data[NFT_REG_VERDICT].verdict = verdict;
+}
+
+static struct nft_expr_type nft_redir_ipv6_type;
+static const struct nft_expr_ops nft_redir_ipv6_ops = {
+ .type = &nft_redir_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+ .eval = nft_redir_ipv6_eval,
+ .init = nft_redir_init,
+ .dump = nft_redir_dump,
+ .validate = nft_redir_validate,
+};
+
+static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .name = "redir",
+ .ops = &nft_redir_ipv6_ops,
+ .policy = nft_redir_policy,
+ .maxattr = NFTA_REDIR_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_redir_ipv6_module_init(void)
+{
+ return nft_register_expr(&nft_redir_ipv6_type);
+}
+
+static void __exit nft_redir_ipv6_module_exit(void)
+{
+ nft_unregister_expr(&nft_redir_ipv6_type);
+}
+
+module_init(nft_redir_ipv6_module_init);
+module_exit(nft_redir_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir");
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index 0bc19fa87821..f73285924144 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -19,9 +19,9 @@
#include <net/netfilter/nft_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
-void nft_reject_ipv6_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
+static void nft_reject_ipv6_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
@@ -38,7 +38,6 @@ void nft_reject_ipv6_eval(const struct nft_expr *expr,
data[NFT_REG_VERDICT].verdict = NF_DROP;
}
-EXPORT_SYMBOL_GPL(nft_reject_ipv6_eval);
static struct nft_expr_type nft_reject_ipv6_type;
static const struct nft_expr_ops nft_reject_ipv6_ops = {
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 5b7a1ed2aba9..2d3148378a1f 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -163,7 +163,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
pfh.icmph.checksum = 0;
pfh.icmph.un.echo.id = inet->inet_sport;
pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
- pfh.iov = msg->msg_iov;
+ /* XXX: stripping const */
+ pfh.iov = (struct iovec *)msg->msg_iter.iov;
pfh.wcheck = 0;
pfh.family = AF_INET6;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 1752cd0b4882..679253d0af84 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -136,6 +136,7 @@ static const struct snmp_mib snmp6_udp6_list[] = {
SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS),
+ SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 896af8807979..ee25631f8c29 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -486,13 +486,13 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
}
if (skb_csum_unnecessary(skb)) {
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
} else if (msg->msg_flags&MSG_TRUNC) {
if (__skb_checksum_complete(skb))
goto csum_copy_err;
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
} else {
- err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
+ err = skb_copy_and_csum_datagram_msg(skb, 0, msg);
if (err == -EINVAL)
goto csum_copy_err;
}
@@ -548,7 +548,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
if (!rp->checksum)
goto send;
- if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
+ skb = skb_peek(&sk->sk_write_queue);
+ if (!skb)
goto out;
offset = rp->offset;
@@ -671,65 +672,62 @@ error:
return err;
}
-static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg)
+struct raw6_frag_vec {
+ struct msghdr *msg;
+ int hlen;
+ char c[4];
+};
+
+static int rawv6_probe_proto_opt(struct raw6_frag_vec *rfv, struct flowi6 *fl6)
{
- struct iovec *iov;
- u8 __user *type = NULL;
- u8 __user *code = NULL;
- u8 len = 0;
- int probed = 0;
- int i;
-
- if (!msg->msg_iov)
- return 0;
+ int err = 0;
+ switch (fl6->flowi6_proto) {
+ case IPPROTO_ICMPV6:
+ rfv->hlen = 2;
+ err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
+ if (!err) {
+ fl6->fl6_icmp_type = rfv->c[0];
+ fl6->fl6_icmp_code = rfv->c[1];
+ }
+ break;
+ case IPPROTO_MH:
+ rfv->hlen = 4;
+ err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
+ if (!err)
+ fl6->fl6_mh_type = rfv->c[2];
+ }
+ return err;
+}
- for (i = 0; i < msg->msg_iovlen; i++) {
- iov = &msg->msg_iov[i];
- if (!iov)
- continue;
+static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
+ struct sk_buff *skb)
+{
+ struct raw6_frag_vec *rfv = from;
- switch (fl6->flowi6_proto) {
- case IPPROTO_ICMPV6:
- /* check if one-byte field is readable or not. */
- if (iov->iov_base && iov->iov_len < 1)
- break;
-
- if (!type) {
- type = iov->iov_base;
- /* check if code field is readable or not. */
- if (iov->iov_len > 1)
- code = type + 1;
- } else if (!code)
- code = iov->iov_base;
-
- if (type && code) {
- if (get_user(fl6->fl6_icmp_type, type) ||
- get_user(fl6->fl6_icmp_code, code))
- return -EFAULT;
- probed = 1;
- }
- break;
- case IPPROTO_MH:
- if (iov->iov_base && iov->iov_len < 1)
- break;
- /* check if type field is readable or not. */
- if (iov->iov_len > 2 - len) {
- u8 __user *p = iov->iov_base;
- if (get_user(fl6->fl6_mh_type, &p[2 - len]))
- return -EFAULT;
- probed = 1;
- } else
- len += iov->iov_len;
+ if (offset < rfv->hlen) {
+ int copy = min(rfv->hlen - offset, len);
- break;
- default:
- probed = 1;
- break;
- }
- if (probed)
- break;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ memcpy(to, rfv->c + offset, copy);
+ else
+ skb->csum = csum_block_add(
+ skb->csum,
+ csum_partial_copy_nocheck(rfv->c + offset,
+ to, copy, 0),
+ odd);
+
+ odd = 0;
+ offset += copy;
+ to += copy;
+ len -= copy;
+
+ if (!len)
+ return 0;
}
- return 0;
+
+ offset -= rfv->hlen;
+
+ return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
}
static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
@@ -744,6 +742,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
+ struct raw6_frag_vec rfv;
struct flowi6 fl6;
int addr_len = msg->msg_namelen;
int hlimit = -1;
@@ -847,7 +846,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
- err = rawv6_probe_proto_opt(&fl6, msg);
+ rfv.msg = msg;
+ rfv.hlen = 0;
+ err = rawv6_probe_proto_opt(&rfv, &fl6);
if (err)
goto out;
@@ -885,10 +886,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
back_from_confirm:
if (inet->hdrincl)
- err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl6, &dst, msg->msg_flags);
+ /* XXX: stripping const */
+ err = rawv6_send_hdrinc(sk, (struct iovec *)msg->msg_iter.iov, len, &fl6, &dst, msg->msg_flags);
else {
lock_sock(sk);
- err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
+ err = ip6_append_data(sk, raw6_getfrag, &rfv,
len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 1a157ca2ebc1..d7d70e69973b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -69,7 +69,7 @@ struct ip6frag_skb_cb {
#define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb))
-static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
+static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
{
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
}
@@ -178,7 +178,7 @@ static void ip6_frag_expire(unsigned long data)
ip6_expire_frag_queue(net, fq, &ip6_frags);
}
-static __inline__ struct frag_queue *
+static struct frag_queue *
fq_find(struct net *net, __be32 id, const struct in6_addr *src,
const struct in6_addr *dst, u8 ecn)
{
@@ -429,7 +429,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
struct sk_buff *clone;
int i, plen = 0;
- if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (clone == NULL)
goto out_oom;
clone->next = head->next;
head->next = clone;
@@ -684,21 +685,21 @@ static void ip6_frags_sysctl_unregister(void)
unregister_net_sysctl_table(ip6_ctl_header);
}
#else
-static inline int ip6_frags_ns_sysctl_register(struct net *net)
+static int ip6_frags_ns_sysctl_register(struct net *net)
{
return 0;
}
-static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
+static void ip6_frags_ns_sysctl_unregister(struct net *net)
{
}
-static inline int ip6_frags_sysctl_register(void)
+static int ip6_frags_sysctl_register(void)
{
return 0;
}
-static inline void ip6_frags_sysctl_unregister(void)
+static void ip6_frags_sysctl_unregister(void)
{
}
#endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a318dd89b6d9..c91083156edb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -772,23 +772,22 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
}
#endif
-#define BACKTRACK(__net, saddr) \
-do { \
- if (rt == __net->ipv6.ip6_null_entry) { \
- struct fib6_node *pn; \
- while (1) { \
- if (fn->fn_flags & RTN_TL_ROOT) \
- goto out; \
- pn = fn->parent; \
- if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
- fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
- else \
- fn = pn; \
- if (fn->fn_flags & RTN_RTINFO) \
- goto restart; \
- } \
- } \
-} while (0)
+static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
+ struct in6_addr *saddr)
+{
+ struct fib6_node *pn;
+ while (1) {
+ if (fn->fn_flags & RTN_TL_ROOT)
+ return NULL;
+ pn = fn->parent;
+ if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
+ fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
+ else
+ fn = pn;
+ if (fn->fn_flags & RTN_RTINFO)
+ return fn;
+ }
+}
static struct rt6_info *ip6_pol_route_lookup(struct net *net,
struct fib6_table *table,
@@ -804,8 +803,11 @@ restart:
rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
- BACKTRACK(net, &fl6->saddr);
-out:
+ if (rt == net->ipv6.ip6_null_entry) {
+ fn = fib6_backtrack(fn, &fl6->saddr);
+ if (fn)
+ goto restart;
+ }
dst_use(&rt->dst, jiffies);
read_unlock_bh(&table->tb6_lock);
return rt;
@@ -915,33 +917,48 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
struct flowi6 *fl6, int flags)
{
- struct fib6_node *fn;
+ struct fib6_node *fn, *saved_fn;
struct rt6_info *rt, *nrt;
int strict = 0;
int attempts = 3;
int err;
- int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
strict |= flags & RT6_LOOKUP_F_IFACE;
+ if (net->ipv6.devconf_all->forwarding == 0)
+ strict |= RT6_LOOKUP_F_REACHABLE;
-relookup:
+redo_fib6_lookup_lock:
read_lock_bh(&table->tb6_lock);
-restart_2:
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
+ saved_fn = fn;
-restart:
- rt = rt6_select(fn, oif, strict | reachable);
+redo_rt6_select:
+ rt = rt6_select(fn, oif, strict);
if (rt->rt6i_nsiblings)
- rt = rt6_multipath_select(rt, fl6, oif, strict | reachable);
- BACKTRACK(net, &fl6->saddr);
- if (rt == net->ipv6.ip6_null_entry ||
- rt->rt6i_flags & RTF_CACHE)
- goto out;
+ rt = rt6_multipath_select(rt, fl6, oif, strict);
+ if (rt == net->ipv6.ip6_null_entry) {
+ fn = fib6_backtrack(fn, &fl6->saddr);
+ if (fn)
+ goto redo_rt6_select;
+ else if (strict & RT6_LOOKUP_F_REACHABLE) {
+ /* also consider unreachable route */
+ strict &= ~RT6_LOOKUP_F_REACHABLE;
+ fn = saved_fn;
+ goto redo_rt6_select;
+ } else {
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+ goto out2;
+ }
+ }
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
+ if (rt->rt6i_flags & RTF_CACHE)
+ goto out2;
+
if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
@@ -967,15 +984,8 @@ restart:
* released someone could insert this route. Relookup.
*/
ip6_rt_put(rt);
- goto relookup;
+ goto redo_fib6_lookup_lock;
-out:
- if (reachable) {
- reachable = 0;
- goto restart_2;
- }
- dst_hold(&rt->dst);
- read_unlock_bh(&table->tb6_lock);
out2:
rt->dst.lastuse = jiffies;
rt->dst.__use++;
@@ -1235,10 +1245,12 @@ restart:
rt = net->ipv6.ip6_null_entry;
else if (rt->dst.error) {
rt = net->ipv6.ip6_null_entry;
- goto out;
+ } else if (rt == net->ipv6.ip6_null_entry) {
+ fn = fib6_backtrack(fn, &fl6->saddr);
+ if (fn)
+ goto restart;
}
- BACKTRACK(net, &fl6->saddr);
-out:
+
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a24557a1c1d8..213546bd6d5d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1241,7 +1241,8 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
goto done;
err = -ENOENT;
- if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL)
+ t = ipip6_tunnel_locate(net, &p, 0);
+ if (t == NULL)
goto done;
err = -EPERM;
if (t == netdev_priv(sitn->fb_tunnel_dev))
@@ -1711,7 +1712,7 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
tunnel->encap.dport) ||
nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
- tunnel->encap.dport))
+ tunnel->encap.flags))
goto nla_put_failure;
return 0;
@@ -1836,8 +1837,8 @@ static int __net_init sit_init_net(struct net *net)
goto err_dev_free;
ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
-
- if ((err = register_netdev(sitn->fb_tunnel_dev)))
+ err = register_netdev(sitn->fb_tunnel_dev);
+ if (err)
goto err_reg_dev;
t = netdev_priv(sitn->fb_tunnel_dev);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 2f25cb6347ca..7337fc7947e2 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -166,13 +166,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
int mss;
struct dst_entry *dst;
__u8 rcv_wscale;
- bool ecn_ok = false;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
- if (tcp_synq_no_recent_overflow(sk) ||
- (mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie)) == 0) {
+ if (tcp_synq_no_recent_overflow(sk))
+ goto out;
+
+ mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
+ if (mss == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
@@ -183,7 +185,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, 0, NULL);
- if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
+ if (!cookie_timestamp_decode(&tcp_opt))
goto out;
ret = NULL;
@@ -220,7 +222,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->expires = 0UL;
req->num_retrans = 0;
- ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
ireq->wscale_ok = tcp_opt.wscale_ok;
@@ -261,6 +262,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
dst_metric(dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
+ ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
ret = get_cookie_sock(sk, skb, req, dst);
out:
@@ -269,4 +271,3 @@ out_free:
reqsk_free(req);
return NULL;
}
-
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index dc495ae2ead0..5ff87805258e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -787,16 +787,16 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.queue_hash_add = inet6_csk_reqsk_queue_hash_add,
};
-static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
- u32 tsval, u32 tsecr, int oif,
- struct tcp_md5sig_key *key, int rst, u8 tclass,
- u32 label)
+static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
+ u32 ack, u32 win, u32 tsval, u32 tsecr,
+ int oif, struct tcp_md5sig_key *key, int rst,
+ u8 tclass, u32 label)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1;
struct sk_buff *buff;
struct flowi6 fl6;
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
struct sock *ctl_sk = net->ipv6.tcp_sk;
unsigned int tot_len = sizeof(struct tcphdr);
struct dst_entry *dst;
@@ -946,7 +946,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
(th->doff << 2);
oif = sk ? sk->sk_bound_dev_if : 0;
- tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
+ tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
#ifdef CONFIG_TCP_MD5SIG
release_sk1:
@@ -957,13 +957,13 @@ release_sk1:
#endif
}
-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
- u32 win, u32 tsval, u32 tsecr, int oif,
+static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
+ u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key, u8 tclass,
u32 label)
{
- tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
- label);
+ tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
+ tclass, label);
}
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -971,7 +971,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
- tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
@@ -986,10 +986,10 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
- tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
- tcp_rsk(req)->rcv_nxt,
- req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
+ tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+ tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0);
}
@@ -1296,6 +1296,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
struct dst_entry *dst = sk->sk_rx_dst;
sock_rps_save_rxhash(sk, skb);
+ sk_mark_napi_id(sk, skb);
if (dst) {
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
@@ -1325,6 +1326,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
*/
if (nsk != sk) {
sock_rps_save_rxhash(nsk, skb);
+ sk_mark_napi_id(sk, skb);
if (tcp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb)
@@ -1457,7 +1459,7 @@ process:
if (sk_filter(sk, skb))
goto discard_and_relse;
- sk_mark_napi_id(sk, skb);
+ sk_incoming_cpu_update(sk);
skb->dev = NULL;
bh_lock_sock_nested(sk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f6ba535b6feb..189dc4ae3eca 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -148,72 +148,85 @@ static inline int compute_score(struct sock *sk, struct net *net,
const struct in6_addr *daddr, __be16 dport,
int dif)
{
- int score = -1;
+ int score;
+ struct inet_sock *inet;
- if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
- sk->sk_family == PF_INET6) {
- struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net) ||
+ udp_sk(sk)->udp_port_hash != hnum ||
+ sk->sk_family != PF_INET6)
+ return -1;
- score = 0;
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score++;
- }
- if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
- if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
- return -1;
- score++;
- }
- if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
- if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
- return -1;
- score++;
- }
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score++;
- }
+ score = 0;
+ inet = inet_sk(sk);
+
+ if (inet->inet_dport) {
+ if (inet->inet_dport != sport)
+ return -1;
+ score++;
+ }
+
+ if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return -1;
+ score++;
+ }
+
+ if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+ if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
+ return -1;
+ score++;
}
+
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+ return -1;
+ score++;
+ }
+
return score;
}
#define SCORE2_MAX (1 + 1 + 1)
static inline int compute_score2(struct sock *sk, struct net *net,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, unsigned short hnum,
- int dif)
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr,
+ unsigned short hnum, int dif)
{
- int score = -1;
+ int score;
+ struct inet_sock *inet;
- if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
- sk->sk_family == PF_INET6) {
- struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net) ||
+ udp_sk(sk)->udp_port_hash != hnum ||
+ sk->sk_family != PF_INET6)
+ return -1;
- if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return -1;
+
+ score = 0;
+ inet = inet_sk(sk);
+
+ if (inet->inet_dport) {
+ if (inet->inet_dport != sport)
return -1;
- score = 0;
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score++;
- }
- if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
- if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
- return -1;
- score++;
- }
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score++;
- }
+ score++;
}
+
+ if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+ if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
+ return -1;
+ score++;
+ }
+
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+ return -1;
+ score++;
+ }
+
return score;
}
-
/* called with read_rcu_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
@@ -357,7 +370,8 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
struct sock *sk;
const struct ipv6hdr *iph = ipv6_hdr(skb);
- if (unlikely(sk = skb_steal_sock(skb)))
+ sk = skb_steal_sock(skb);
+ if (unlikely(sk))
return sk;
return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
@@ -424,10 +438,10 @@ try_again:
}
if (skb_csum_unnecessary(skb))
- err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
+ msg, copied);
else {
- err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
+ err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
if (err == -EINVAL)
goto csum_copy_err;
}
@@ -577,6 +591,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
+ sk_incoming_cpu_update(sk);
}
rc = sock_queue_rcv_skb(sk, skb);
@@ -659,15 +674,13 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
- LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage"
- " %d while full coverage %d requested\n",
- UDP_SKB_CB(skb)->cscov, skb->len);
+ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
+ UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
- LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d "
- "too small, need min %d\n",
- UDP_SKB_CB(skb)->cscov, up->pcrlen);
+ net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
+ UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
@@ -760,9 +773,9 @@ static void udp6_csum_zero_error(struct sk_buff *skb)
/* RFC 2460 section 8.1 says that we SHOULD log
* this error. Well, it is reasonable.
*/
- LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
- &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
- &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
+ net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
+ &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
+ &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
}
/*
@@ -771,7 +784,7 @@ static void udp6_csum_zero_error(struct sk_buff *skb)
*/
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr, const struct in6_addr *daddr,
- struct udp_table *udptable)
+ struct udp_table *udptable, int proto)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
const struct udphdr *uh = udp_hdr(skb);
@@ -781,6 +794,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
int dif = inet6_iif(skb);
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
+ bool inner_flushed = false;
if (use_hash2) {
hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
@@ -803,6 +817,7 @@ start_lookup:
(uh->check || udp_sk(sk)->no_check6_rx)) {
if (unlikely(count == ARRAY_SIZE(stack))) {
flush_stack(stack, count, skb, ~0);
+ inner_flushed = true;
count = 0;
}
stack[count++] = sk;
@@ -821,7 +836,10 @@ start_lookup:
if (count) {
flush_stack(stack, count, skb, count - 1);
} else {
- kfree_skb(skb);
+ if (!inner_flushed)
+ UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+ proto == IPPROTO_UDPLITE);
+ consume_skb(skb);
}
return 0;
}
@@ -873,7 +891,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
- saddr, daddr, udptable);
+ saddr, daddr, udptable, proto);
/* Unicast */
@@ -925,14 +943,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
return 0;
short_packet:
- LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
- proto == IPPROTO_UDPLITE ? "-Lite" : "",
- saddr,
- ntohs(uh->source),
- ulen,
- skb->len,
- daddr,
- ntohs(uh->dest));
+ net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
+ proto == IPPROTO_UDPLITE ? "-Lite" : "",
+ saddr, ntohs(uh->source),
+ ulen, skb->len,
+ daddr, ntohs(uh->dest));
goto discard;
csum_error:
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
@@ -1025,7 +1040,8 @@ static int udp_v6_push_pending_frames(struct sock *sk)
fl6 = &inet->cork.fl.u.ip6;
/* Grab the skbuff where UDP header space exists. */
- if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
+ skb = skb_peek(&sk->sk_write_queue);
+ if (skb == NULL)
goto out;
/*
@@ -1284,7 +1300,7 @@ back_from_confirm:
/* ... which is an evident application bug. --ANK */
release_sock(sk);
- LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
+ net_dbg_ratelimited("udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
@@ -1296,7 +1312,7 @@ do_append_data:
dontfrag = np->dontfrag;
up->len += ulen;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
- err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
+ err = ip6_append_data(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
(struct rt6_info *)dst,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 6b8f543f6ac6..b6aa8ed18257 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -42,11 +42,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
+ SKB_GSO_TUNNEL_REMCSUM |
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP |
- SKB_GSO_SIT |
- SKB_GSO_MPLS) ||
+ SKB_GSO_SIT) ||
!(type & (SKB_GSO_UDP))))
goto out;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 1b095ca37aa4..f11ad1d95e0e 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -306,7 +306,7 @@ void ipxitf_down(struct ipx_interface *intrfc)
spin_unlock_bh(&ipx_interfaces_lock);
}
-static __inline__ void __ipxitf_put(struct ipx_interface *intrfc)
+static void __ipxitf_put(struct ipx_interface *intrfc)
{
if (atomic_dec_and_test(&intrfc->refcnt))
__ipxitf_down(intrfc);
@@ -1745,8 +1745,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
memcpy(usipx->sipx_node, ipxs->dest_addr.node, IPX_NODE_LEN);
}
- rc = ipxrtr_route_packet(sk, usipx, msg->msg_iov, len,
- flags & MSG_DONTWAIT);
+ rc = ipxrtr_route_packet(sk, usipx, msg, len, flags & MSG_DONTWAIT);
if (rc >= 0)
rc = len;
out:
@@ -1808,8 +1807,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_flags |= MSG_TRUNC;
}
- rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov,
- copied);
+ rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied);
if (rc)
goto out_free;
if (skb->tstamp.tv64)
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index e15c16a517e7..c1d247ebe916 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -45,7 +45,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
}
i = list_entry(v, struct ipx_interface, node);
- seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum));
+ seq_printf(seq, "%08X ", ntohl(i->if_netnum));
seq_printf(seq, "%02X%02X%02X%02X%02X%02X ",
i->if_node[0], i->if_node[1], i->if_node[2],
i->if_node[3], i->if_node[4], i->if_node[5]);
@@ -87,10 +87,10 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v)
rt = list_entry(v, struct ipx_route, node);
- seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net));
+ seq_printf(seq, "%08X ", ntohl(rt->ir_net));
if (rt->ir_routed)
- seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n",
- (long unsigned int)ntohl(rt->ir_intrfc->if_netnum),
+ seq_printf(seq, "%08X %02X%02X%02X%02X%02X%02X\n",
+ ntohl(rt->ir_intrfc->if_netnum),
rt->ir_router_node[0], rt->ir_router_node[1],
rt->ir_router_node[2], rt->ir_router_node[3],
rt->ir_router_node[4], rt->ir_router_node[5]);
@@ -194,19 +194,19 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
s = v;
ipxs = ipx_sk(s);
#ifdef CONFIG_IPX_INTERN
- seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ",
- (unsigned long)ntohl(ipxs->intrfc->if_netnum),
+ seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ",
+ ntohl(ipxs->intrfc->if_netnum),
ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3],
ipxs->node[4], ipxs->node[5], ntohs(ipxs->port));
#else
- seq_printf(seq, "%08lX:%04X ", (unsigned long) ntohl(ipxs->intrfc->if_netnum),
+ seq_printf(seq, "%08X:%04X ", ntohl(ipxs->intrfc->if_netnum),
ntohs(ipxs->port));
#endif /* CONFIG_IPX_INTERN */
if (s->sk_state != TCP_ESTABLISHED)
seq_printf(seq, "%-28s", "Not_Connected");
else {
- seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ",
- (unsigned long)ntohl(ipxs->dest_addr.net),
+ seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ",
+ ntohl(ipxs->dest_addr.net),
ipxs->dest_addr.node[0], ipxs->dest_addr.node[1],
ipxs->dest_addr.node[2], ipxs->dest_addr.node[3],
ipxs->dest_addr.node[4], ipxs->dest_addr.node[5],
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 67e7ad3d46b1..3e2a32a9f3bd 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -165,7 +165,7 @@ int ipxrtr_route_skb(struct sk_buff *skb)
* Route an outgoing frame from a socket.
*/
int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct iovec *iov, size_t len, int noblock)
+ struct msghdr *msg, size_t len, int noblock)
{
struct sk_buff *skb;
struct ipx_sock *ipxs = ipx_sk(sk);
@@ -229,7 +229,7 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
memcpy(ipx->ipx_dest.node, usipx->sipx_node, IPX_NODE_LEN);
ipx->ipx_dest.sock = usipx->sipx_port;
- rc = memcpy_fromiovec(skb_put(skb, len), iov, len);
+ rc = memcpy_from_msg(skb_put(skb, len), msg, len);
if (rc) {
kfree_skb(skb);
goto out_put;
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c
index ad7c03dedaab..0dafcc561ed6 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/net/ipx/sysctl_net_ipx.c
@@ -9,14 +9,12 @@
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <net/net_namespace.h>
+#include <net/ipx.h>
#ifndef CONFIG_SYSCTL
#error This file should not be compiled without CONFIG_SYSCTL defined
#endif
-/* From af_ipx.c */
-extern int sysctl_ipx_pprop_broadcasting;
-
static struct ctl_table ipx_table[] = {
{
.procname = "ipx_pprop_broadcasting",
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 3f3a6cbdceb7..568edc72d737 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -84,14 +84,12 @@ static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb)
struct sock *sk;
int err;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
self = instance;
sk = instance;
err = sock_queue_rcv_skb(sk, skb);
if (err) {
- IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__);
+ pr_debug("%s(), error: no more mem!\n", __func__);
self->rx_flow = FLOW_STOP;
/* When we return error, TTP will need to requeue the skb */
@@ -115,7 +113,7 @@ static void irda_disconnect_indication(void *instance, void *sap,
self = instance;
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
/* Don't care about it, but let's not leak it */
if(skb)
@@ -123,8 +121,8 @@ static void irda_disconnect_indication(void *instance, void *sap,
sk = instance;
if (sk == NULL) {
- IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n",
- __func__, self);
+ pr_debug("%s(%p) : BUG : sk is NULL\n",
+ __func__, self);
return;
}
@@ -180,7 +178,7 @@ static void irda_connect_confirm(void *instance, void *sap,
self = instance;
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
sk = instance;
if (sk == NULL) {
@@ -201,16 +199,16 @@ static void irda_connect_confirm(void *instance, void *sap,
switch (sk->sk_type) {
case SOCK_STREAM:
if (max_sdu_size != 0) {
- IRDA_ERROR("%s: max_sdu_size must be 0\n",
- __func__);
+ net_err_ratelimited("%s: max_sdu_size must be 0\n",
+ __func__);
return;
}
self->max_data_size = irttp_get_max_seg_size(self->tsap);
break;
case SOCK_SEQPACKET:
if (max_sdu_size == 0) {
- IRDA_ERROR("%s: max_sdu_size cannot be 0\n",
- __func__);
+ net_err_ratelimited("%s: max_sdu_size cannot be 0\n",
+ __func__);
return;
}
self->max_data_size = max_sdu_size;
@@ -219,8 +217,8 @@ static void irda_connect_confirm(void *instance, void *sap,
self->max_data_size = irttp_get_max_seg_size(self->tsap);
}
- IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__,
- self->max_data_size);
+ pr_debug("%s(), max_data_size=%d\n", __func__,
+ self->max_data_size);
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
@@ -244,7 +242,7 @@ static void irda_connect_indication(void *instance, void *sap,
self = instance;
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
sk = instance;
if (sk == NULL) {
@@ -262,8 +260,8 @@ static void irda_connect_indication(void *instance, void *sap,
switch (sk->sk_type) {
case SOCK_STREAM:
if (max_sdu_size != 0) {
- IRDA_ERROR("%s: max_sdu_size must be 0\n",
- __func__);
+ net_err_ratelimited("%s: max_sdu_size must be 0\n",
+ __func__);
kfree_skb(skb);
return;
}
@@ -271,8 +269,8 @@ static void irda_connect_indication(void *instance, void *sap,
break;
case SOCK_SEQPACKET:
if (max_sdu_size == 0) {
- IRDA_ERROR("%s: max_sdu_size cannot be 0\n",
- __func__);
+ net_err_ratelimited("%s: max_sdu_size cannot be 0\n",
+ __func__);
kfree_skb(skb);
return;
}
@@ -282,8 +280,8 @@ static void irda_connect_indication(void *instance, void *sap,
self->max_data_size = irttp_get_max_seg_size(self->tsap);
}
- IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__,
- self->max_data_size);
+ pr_debug("%s(), max_data_size=%d\n", __func__,
+ self->max_data_size);
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
@@ -301,12 +299,10 @@ static void irda_connect_response(struct irda_sock *self)
{
struct sk_buff *skb;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL);
if (skb == NULL) {
- IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n",
- __func__);
+ pr_debug("%s() Unable to allocate sk_buff!\n",
+ __func__);
return;
}
@@ -327,26 +323,24 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
struct irda_sock *self;
struct sock *sk;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
self = instance;
sk = instance;
BUG_ON(sk == NULL);
switch (flow) {
case FLOW_STOP:
- IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n",
- __func__);
+ pr_debug("%s(), IrTTP wants us to slow down\n",
+ __func__);
self->tx_flow = flow;
break;
case FLOW_START:
self->tx_flow = flow;
- IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
- __func__);
+ pr_debug("%s(), IrTTP wants us to start again\n",
+ __func__);
wake_up_interruptible(sk_sleep(sk));
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__);
+ pr_debug("%s(), Unknown flow command!\n", __func__);
/* Unknown flow command, better stop */
self->tx_flow = flow;
break;
@@ -368,11 +362,11 @@ static void irda_getvalue_confirm(int result, __u16 obj_id,
self = priv;
if (!self) {
- IRDA_WARNING("%s: lost myself!\n", __func__);
+ net_warn_ratelimited("%s: lost myself!\n", __func__);
return;
}
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
/* We probably don't need to make any more queries */
iriap_close(self->iriap);
@@ -380,8 +374,8 @@ static void irda_getvalue_confirm(int result, __u16 obj_id,
/* Check if request succeeded */
if (result != IAS_SUCCESS) {
- IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __func__,
- result);
+ pr_debug("%s(), IAS query failed! (%d)\n", __func__,
+ result);
self->errno = result; /* We really need it later */
@@ -413,11 +407,9 @@ static void irda_selective_discovery_indication(discinfo_t *discovery,
{
struct irda_sock *self;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
self = priv;
if (!self) {
- IRDA_WARNING("%s: lost myself!\n", __func__);
+ net_warn_ratelimited("%s: lost myself!\n", __func__);
return;
}
@@ -440,8 +432,6 @@ static void irda_discovery_timeout(u_long priv)
{
struct irda_sock *self;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
self = (struct irda_sock *) priv;
BUG_ON(self == NULL);
@@ -465,7 +455,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
notify_t notify;
if (self->tsap) {
- IRDA_DEBUG(0, "%s: busy!\n", __func__);
+ pr_debug("%s: busy!\n", __func__);
return -EBUSY;
}
@@ -483,8 +473,8 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT,
&notify);
if (self->tsap == NULL) {
- IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n",
- __func__);
+ pr_debug("%s(), Unable to allocate TSAP!\n",
+ __func__);
return -ENOMEM;
}
/* Remember which TSAP selector we actually got */
@@ -505,7 +495,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid)
notify_t notify;
if (self->lsap) {
- IRDA_WARNING("%s(), busy!\n", __func__);
+ net_warn_ratelimited("%s(), busy!\n", __func__);
return -EBUSY;
}
@@ -517,7 +507,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid)
self->lsap = irlmp_open_lsap(LSAP_CONNLESS, &notify, pid);
if (self->lsap == NULL) {
- IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__);
+ pr_debug("%s(), Unable to allocate LSAP!\n", __func__);
return -ENOMEM;
}
@@ -538,11 +528,11 @@ static int irda_open_lsap(struct irda_sock *self, int pid)
*/
static int irda_find_lsap_sel(struct irda_sock *self, char *name)
{
- IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name);
+ pr_debug("%s(%p, %s)\n", __func__, self, name);
if (self->iriap) {
- IRDA_WARNING("%s(): busy with a previous query\n",
- __func__);
+ net_warn_ratelimited("%s(): busy with a previous query\n",
+ __func__);
return -EBUSY;
}
@@ -577,8 +567,8 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
/* Get the remote TSAP selector */
switch (self->ias_result->type) {
case IAS_INTEGER:
- IRDA_DEBUG(4, "%s() int=%d\n",
- __func__, self->ias_result->t.integer);
+ pr_debug("%s() int=%d\n",
+ __func__, self->ias_result->t.integer);
if (self->ias_result->t.integer != -1)
self->dtsap_sel = self->ias_result->t.integer;
@@ -587,7 +577,7 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
break;
default:
self->dtsap_sel = 0;
- IRDA_DEBUG(0, "%s(), bad type!\n", __func__);
+ pr_debug("%s(), bad type!\n", __func__);
break;
}
if (self->ias_result)
@@ -625,7 +615,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
__u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */
__u8 dtsap_sel = 0x0; /* TSAP associated with it */
- IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name);
+ pr_debug("%s(), name=%s\n", __func__, name);
/* Ask lmp for the current discovery log
* Note : we have to use irlmp_get_discoveries(), as opposed
@@ -646,8 +636,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
/* Try the address in the log */
self->daddr = discoveries[i].daddr;
self->saddr = 0x0;
- IRDA_DEBUG(1, "%s(), trying daddr = %08x\n",
- __func__, self->daddr);
+ pr_debug("%s(), trying daddr = %08x\n",
+ __func__, self->daddr);
/* Query remote LM-IAS for this service */
err = irda_find_lsap_sel(self, name);
@@ -655,8 +645,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
case 0:
/* We found the requested service */
if(daddr != DEV_ADDR_ANY) {
- IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n",
- __func__, name);
+ pr_debug("%s(), discovered service ''%s'' in two different devices !!!\n",
+ __func__, name);
self->daddr = DEV_ADDR_ANY;
kfree(discoveries);
return -ENOTUNIQ;
@@ -670,7 +660,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
break;
default:
/* Something bad did happen :-( */
- IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__);
+ pr_debug("%s(), unexpected IAS query failure\n",
+ __func__);
self->daddr = DEV_ADDR_ANY;
kfree(discoveries);
return -EHOSTUNREACH;
@@ -681,8 +672,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
/* Check out what we found */
if(daddr == DEV_ADDR_ANY) {
- IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n",
- __func__, name);
+ pr_debug("%s(), cannot discover service ''%s'' in any device !!!\n",
+ __func__, name);
self->daddr = DEV_ADDR_ANY;
return -EADDRNOTAVAIL;
}
@@ -692,8 +683,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
self->saddr = 0x0;
self->dtsap_sel = dtsap_sel;
- IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n",
- __func__, name, self->daddr);
+ pr_debug("%s(), discovered requested service ''%s'' at address %08x\n",
+ __func__, name, self->daddr);
return 0;
}
@@ -725,8 +716,8 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
saddr.sir_addr = self->saddr;
}
- IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel);
- IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr);
+ pr_debug("%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel);
+ pr_debug("%s(), addr = %08x\n", __func__, saddr.sir_addr);
/* uaddr_len come to us uninitialised */
*uaddr_len = sizeof (struct sockaddr_irda);
@@ -746,8 +737,6 @@ static int irda_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
int err = -EOPNOTSUPP;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
lock_sock(sk);
if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
@@ -779,7 +768,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct irda_sock *self = irda_sk(sk);
int err;
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
if (addr_len != sizeof(struct sockaddr_irda))
return -EINVAL;
@@ -792,7 +781,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
self->pid = addr->sir_lsap_sel;
err = -EOPNOTSUPP;
if (self->pid & 0x80) {
- IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
+ pr_debug("%s(), extension in PID not supp!\n",
+ __func__);
goto out;
}
err = irda_open_lsap(self, self->pid);
@@ -845,8 +835,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
struct sk_buff *skb;
int err;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
if (err)
return err;
@@ -911,7 +899,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
new->tsap = irttp_dup(self->tsap, new);
err = -EPERM; /* value does not seem to make sense. -arnd */
if (!new->tsap) {
- IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
+ pr_debug("%s(), dup failed!\n", __func__);
kfree_skb(skb);
goto out;
}
@@ -971,7 +959,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
struct irda_sock *self = irda_sk(sk);
int err;
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
lock_sock(sk);
/* Don't allow connect for Ultra sockets */
@@ -1007,13 +995,13 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
/* Try to find one suitable */
err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name);
if (err) {
- IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__);
+ pr_debug("%s(), auto-connect failed!\n", __func__);
goto out;
}
} else {
/* Use the one provided by the user */
self->daddr = addr->sir_addr;
- IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr);
+ pr_debug("%s(), daddr = %08x\n", __func__, self->daddr);
/* If we don't have a valid service name, we assume the
* user want to connect on a specific LSAP. Prevent
@@ -1023,7 +1011,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
/* Query remote LM-IAS using service name */
err = irda_find_lsap_sel(self, addr->sir_name);
if (err) {
- IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
+ pr_debug("%s(), connect failed!\n", __func__);
goto out;
}
} else {
@@ -1048,7 +1036,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
self->saddr, self->daddr, NULL,
self->max_sdu_size_rx, NULL);
if (err) {
- IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
+ pr_debug("%s(), connect failed!\n", __func__);
goto out;
}
@@ -1098,8 +1086,6 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
struct sock *sk;
struct irda_sock *self;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
if (net != &init_net)
return -EAFNOSUPPORT;
@@ -1119,7 +1105,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
return -ENOMEM;
self = irda_sk(sk);
- IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self);
+ pr_debug("%s() : self is %p\n", __func__, self);
init_waitqueue_head(&self->query_wait);
@@ -1181,7 +1167,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
*/
static void irda_destroy_socket(struct irda_sock *self)
{
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
/* Unregister with IrLMP */
irlmp_unregister_client(self->ckey);
@@ -1218,8 +1204,6 @@ static int irda_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
if (sk == NULL)
return 0;
@@ -1286,7 +1270,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int err = -EPIPE;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
+ pr_debug("%s(), len=%zd\n", __func__, len);
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
@@ -1322,8 +1306,8 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Check that we don't send out too big frames */
if (len > self->max_data_size) {
- IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n",
- __func__, len, self->max_data_size);
+ pr_debug("%s(), Chopping frame from %zd to %d bytes!\n",
+ __func__, len, self->max_data_size);
len = self->max_data_size;
}
@@ -1335,7 +1319,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
skb_reserve(skb, self->max_header_size + 16);
skb_reset_transport_header(skb);
skb_put(skb, len);
- err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+ err = memcpy_from_msg(skb_transport_header(skb), msg, len);
if (err) {
kfree_skb(skb);
goto out_err;
@@ -1347,7 +1331,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
*/
err = irttp_data_request(self->tsap, skb);
if (err) {
- IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
+ pr_debug("%s(), err=%d\n", __func__, err);
goto out_err;
}
@@ -1378,8 +1362,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
size_t copied;
int err;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
@@ -1389,12 +1371,12 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
copied = skb->len;
if (copied > size) {
- IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
- __func__, copied, size);
+ pr_debug("%s(), Received truncated frame (%zd < %zd)!\n",
+ __func__, copied, size);
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
- skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ skb_copy_datagram_msg(skb, 0, msg, copied);
skb_free_datagram(sk, skb);
@@ -1406,7 +1388,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
*/
if (self->rx_flow == FLOW_STOP) {
if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
- IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
+ pr_debug("%s(), Starting IrTTP\n", __func__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
}
@@ -1428,8 +1410,6 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
int target, err;
long timeo;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
if ((err = sock_error(sk)) < 0)
return err;
@@ -1486,7 +1466,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
}
chunk = min_t(unsigned int, skb->len, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ if (memcpy_to_msg(msg, skb->data, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (copied == 0)
copied = -EFAULT;
@@ -1501,15 +1481,15 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
/* put the skb back if we didn't use it up.. */
if (skb->len) {
- IRDA_DEBUG(1, "%s(), back on q!\n",
- __func__);
+ pr_debug("%s(), back on q!\n",
+ __func__);
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
- IRDA_DEBUG(0, "%s() questionable!?\n", __func__);
+ pr_debug("%s() questionable!?\n", __func__);
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1525,7 +1505,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
*/
if (self->rx_flow == FLOW_STOP) {
if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) {
- IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__);
+ pr_debug("%s(), Starting IrTTP\n", __func__);
self->rx_flow = FLOW_START;
irttp_flow_request(self->tsap, FLOW_START);
}
@@ -1549,7 +1529,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int err;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
+ pr_debug("%s(), len=%zd\n", __func__, len);
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
@@ -1573,9 +1553,8 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
* service, so we have no fragmentation and no coalescence
*/
if (len > self->max_data_size) {
- IRDA_DEBUG(0, "%s(), Warning to much data! "
- "Chopping frame from %zd to %d bytes!\n",
- __func__, len, self->max_data_size);
+ pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n",
+ __func__, len, self->max_data_size);
len = self->max_data_size;
}
@@ -1588,9 +1567,9 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
skb_reserve(skb, self->max_header_size);
skb_reset_transport_header(skb);
- IRDA_DEBUG(4, "%s(), appending user data\n", __func__);
+ pr_debug("%s(), appending user data\n", __func__);
skb_put(skb, len);
- err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+ err = memcpy_from_msg(skb_transport_header(skb), msg, len);
if (err) {
kfree_skb(skb);
goto out;
@@ -1602,7 +1581,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
*/
err = irttp_udata_request(self->tsap, skb);
if (err) {
- IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
+ pr_debug("%s(), err=%d\n", __func__, err);
goto out;
}
@@ -1631,7 +1610,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int err;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
+ pr_debug("%s(), len=%zd\n", __func__, len);
err = -EINVAL;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
@@ -1659,7 +1638,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
pid = addr->sir_lsap_sel;
if (pid & 0x80) {
- IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
+ pr_debug("%s(), extension in PID not supp!\n",
+ __func__);
err = -EOPNOTSUPP;
goto out;
}
@@ -1668,8 +1648,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
* port. Jean II */
if ((self->lsap == NULL) ||
(sk->sk_state != TCP_ESTABLISHED)) {
- IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n",
- __func__);
+ pr_debug("%s(), socket not bound to Ultra PID.\n",
+ __func__);
err = -ENOTCONN;
goto out;
}
@@ -1682,9 +1662,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
* service, so we have no fragmentation and no coalescence
*/
if (len > self->max_data_size) {
- IRDA_DEBUG(0, "%s(), Warning to much data! "
- "Chopping frame from %zd to %d bytes!\n",
- __func__, len, self->max_data_size);
+ pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n",
+ __func__, len, self->max_data_size);
len = self->max_data_size;
}
@@ -1697,9 +1676,9 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
skb_reserve(skb, self->max_header_size);
skb_reset_transport_header(skb);
- IRDA_DEBUG(4, "%s(), appending user data\n", __func__);
+ pr_debug("%s(), appending user data\n", __func__);
skb_put(skb, len);
- err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+ err = memcpy_from_msg(skb_transport_header(skb), msg, len);
if (err) {
kfree_skb(skb);
goto out;
@@ -1708,7 +1687,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
err = irlmp_connless_data_request((bound ? self->lsap : NULL),
skb, pid);
if (err)
- IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
+ pr_debug("%s(), err=%d\n", __func__, err);
out:
release_sock(sk);
return err ? : len;
@@ -1723,7 +1702,7 @@ static int irda_shutdown(struct socket *sock, int how)
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
- IRDA_DEBUG(1, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
lock_sock(sk);
@@ -1762,8 +1741,6 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
struct irda_sock *self = irda_sk(sk);
unsigned int mask;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
poll_wait(file, sk_sleep(sk), wait);
mask = 0;
@@ -1771,13 +1748,13 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
if (sk->sk_err)
mask |= POLLERR;
if (sk->sk_shutdown & RCV_SHUTDOWN) {
- IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__);
+ pr_debug("%s(), POLLHUP\n", __func__);
mask |= POLLHUP;
}
/* Readable? */
if (!skb_queue_empty(&sk->sk_receive_queue)) {
- IRDA_DEBUG(4, "Socket is readable\n");
+ pr_debug("Socket is readable\n");
mask |= POLLIN | POLLRDNORM;
}
@@ -1785,7 +1762,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
switch (sk->sk_type) {
case SOCK_STREAM:
if (sk->sk_state == TCP_CLOSE) {
- IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__);
+ pr_debug("%s(), POLLHUP\n", __func__);
mask |= POLLHUP;
}
@@ -1823,7 +1800,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sock *sk = sock->sk;
int err;
- IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd);
+ pr_debug("%s(), cmd=%#x\n", __func__, cmd);
err = -EINVAL;
switch (cmd) {
@@ -1864,7 +1841,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCSIFMETRIC:
break;
default:
- IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__);
+ pr_debug("%s(), doing device ioctl!\n", __func__);
err = -ENOIOCTLCMD;
}
@@ -1900,7 +1877,7 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
struct ias_attrib * ias_attr; /* Attribute in IAS object */
int opt, free_ias = 0, err = 0;
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
if (level != SOL_IRLMP)
return -ENOPROTOOPT;
@@ -2100,7 +2077,8 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
/* Check is the user space own the object */
if(ias_attr->value->owner != IAS_USER_ATTR) {
- IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__);
+ pr_debug("%s(), attempting to delete a kernel attribute\n",
+ __func__);
kfree(ias_opt);
err = -EPERM;
goto out;
@@ -2123,12 +2101,12 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
/* Only possible for a seqpacket service (TTP with SAR) */
if (sk->sk_type != SOCK_SEQPACKET) {
- IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n",
- __func__, opt);
+ pr_debug("%s(), setting max_sdu_size = %d\n",
+ __func__, opt);
self->max_sdu_size_rx = opt;
} else {
- IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n",
- __func__);
+ net_warn_ratelimited("%s: not allowed to set MAXSDUSIZE for this socket type!\n",
+ __func__);
err = -ENOPROTOOPT;
goto out;
}
@@ -2256,7 +2234,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
int err = 0;
int offset, total;
- IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ pr_debug("%s(%p)\n", __func__, self);
if (level != SOL_IRLMP)
return -ENOPROTOOPT;
@@ -2439,8 +2417,8 @@ bed:
/* Check that we can proceed with IAP */
if (self->iriap) {
- IRDA_WARNING("%s: busy with a previous query\n",
- __func__);
+ net_warn_ratelimited("%s: busy with a previous query\n",
+ __func__);
kfree(ias_opt);
err = -EBUSY;
goto out;
@@ -2544,7 +2522,8 @@ bed:
/* Wait until a node is discovered */
if (!self->cachedaddr) {
- IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__);
+ pr_debug("%s(), nothing discovered yet, going to sleep...\n",
+ __func__);
/* Set watchdog timer to expire in <val> ms. */
self->errno = 0;
@@ -2560,14 +2539,14 @@ bed:
/* If watchdog is still activated, kill it! */
del_timer(&(self->watchdog));
- IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__);
+ pr_debug("%s(), ...waking up !\n", __func__);
if (err != 0)
goto out;
}
else
- IRDA_DEBUG(1, "%s(), found immediately !\n",
- __func__);
+ pr_debug("%s(), found immediately !\n",
+ __func__);
/* Tell IrLMP that we have been notified */
irlmp_update_client(self->ckey, self->mask.word,
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index 6786e7f193d2..364d70aed068 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -112,8 +112,6 @@ void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
{
discovery_t *discovery;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
/*
* If log is missing this means that IrLAP was unable to perform the
* discovery, so restart discovery again with just the half timeout
@@ -159,8 +157,6 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
int i = 0; /* How many we expired */
IRDA_ASSERT(log != NULL, return;);
- IRDA_DEBUG(4, "%s()\n", __func__);
-
spin_lock_irqsave(&log->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(log);
@@ -232,10 +228,10 @@ void irlmp_dump_discoveries(hashbin_t *log)
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
- IRDA_DEBUG(0, "Discovery:\n");
- IRDA_DEBUG(0, " daddr=%08x\n", discovery->data.daddr);
- IRDA_DEBUG(0, " saddr=%08x\n", discovery->data.saddr);
- IRDA_DEBUG(0, " nickname=%s\n", discovery->data.info);
+ pr_debug("Discovery:\n");
+ pr_debug(" daddr=%08x\n", discovery->data.daddr);
+ pr_debug(" saddr=%08x\n", discovery->data.saddr);
+ pr_debug(" nickname=%s\n", discovery->data.info);
discovery = (discovery_t *) hashbin_get_next(log);
}
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 4490a675b1bb..3af219545f6d 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -69,7 +69,8 @@ static int __init ircomm_init(void)
{
ircomm = hashbin_new(HB_LOCK);
if (ircomm == NULL) {
- IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__);
+ net_err_ratelimited("%s(), can't allocate hashbin!\n",
+ __func__);
return -ENOMEM;
}
@@ -83,15 +84,13 @@ static int __init ircomm_init(void)
}
#endif /* CONFIG_PROC_FS */
- IRDA_MESSAGE("IrCOMM protocol (Dag Brattli)\n");
+ net_info_ratelimited("IrCOMM protocol (Dag Brattli)\n");
return 0;
}
static void __exit ircomm_cleanup(void)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close);
#ifdef CONFIG_PROC_FS
@@ -110,8 +109,8 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line)
struct ircomm_cb *self = NULL;
int ret;
- IRDA_DEBUG(2, "%s(), service_type=0x%02x\n", __func__ ,
- service_type);
+ pr_debug("%s(), service_type=0x%02x\n", __func__ ,
+ service_type);
IRDA_ASSERT(ircomm != NULL, return NULL;);
@@ -154,8 +153,6 @@ EXPORT_SYMBOL(ircomm_open);
*/
static int __ircomm_close(struct ircomm_cb *self)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Disconnect link if any */
ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL);
@@ -190,8 +187,6 @@ int ircomm_close(struct ircomm_cb *self)
IRDA_ASSERT(self != NULL, return -EIO;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;);
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
entry = hashbin_remove(ircomm, self->line, NULL);
IRDA_ASSERT(entry == self, return -1;);
@@ -215,8 +210,6 @@ int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel,
struct ircomm_info info;
int ret;
- IRDA_DEBUG(2 , "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
@@ -242,8 +235,6 @@ EXPORT_SYMBOL(ircomm_connect_request);
void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
struct ircomm_info *info)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/*
* If there are any data hiding in the control channel, we must
* deliver it first. The side effect is that the control channel
@@ -254,7 +245,7 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
info->qos, info->max_data_size,
info->max_header_size, skb);
else {
- IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
+ pr_debug("%s(), missing handler\n", __func__);
}
}
@@ -271,8 +262,6 @@ int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata)
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL);
return ret;
@@ -289,15 +278,13 @@ EXPORT_SYMBOL(ircomm_connect_response);
void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
struct ircomm_info *info)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
if (self->notify.connect_confirm )
self->notify.connect_confirm(self->notify.instance,
self, info->qos,
info->max_data_size,
info->max_header_size, skb);
else {
- IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
+ pr_debug("%s(), missing handler\n", __func__);
}
}
@@ -311,8 +298,6 @@ int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb)
{
int ret;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -EFAULT;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;);
IRDA_ASSERT(skb != NULL, return -EFAULT;);
@@ -332,14 +317,12 @@ EXPORT_SYMBOL(ircomm_data_request);
*/
void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(skb->len > 0, return;);
if (self->notify.data_indication)
self->notify.data_indication(self->notify.instance, self, skb);
else {
- IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
+ pr_debug("%s(), missing handler\n", __func__);
}
}
@@ -364,8 +347,8 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
* fine
*/
if (unlikely(skb->len < (clen + 1))) {
- IRDA_DEBUG(2, "%s() throwing away illegal frame\n",
- __func__ );
+ pr_debug("%s() throwing away illegal frame\n",
+ __func__);
return;
}
@@ -383,8 +366,8 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb)
if (skb->len)
ircomm_data_indication(self, skb);
else {
- IRDA_DEBUG(4, "%s(), data was control info only!\n",
- __func__ );
+ pr_debug("%s(), data was control info only!\n",
+ __func__);
}
}
@@ -398,8 +381,6 @@ int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb)
{
int ret;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -EFAULT;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;);
IRDA_ASSERT(skb != NULL, return -EFAULT;);
@@ -420,8 +401,6 @@ EXPORT_SYMBOL(ircomm_control_request);
static void ircomm_control_indication(struct ircomm_cb *self,
struct sk_buff *skb, int clen)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Use udata for delivering data on the control channel */
if (self->notify.udata_indication) {
struct sk_buff *ctrl_skb;
@@ -441,7 +420,7 @@ static void ircomm_control_indication(struct ircomm_cb *self,
* see ircomm_tty_control_indication(). */
dev_kfree_skb(ctrl_skb);
} else {
- IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
+ pr_debug("%s(), missing handler\n", __func__);
}
}
@@ -456,8 +435,6 @@ int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata)
struct ircomm_info info;
int ret;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
@@ -477,15 +454,13 @@ EXPORT_SYMBOL(ircomm_disconnect_request);
void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
struct ircomm_info *info)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(info != NULL, return;);
if (self->notify.disconnect_indication) {
self->notify.disconnect_indication(self->notify.instance, self,
info->reason, skb);
} else {
- IRDA_DEBUG(0, "%s(), missing handler\n", __func__ );
+ pr_debug("%s(), missing handler\n", __func__);
}
}
@@ -497,8 +472,6 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
*/
void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c
index b172c6522328..b0730ac9f388 100644
--- a/net/irda/ircomm/ircomm_event.c
+++ b/net/irda/ircomm/ircomm_event.c
@@ -54,8 +54,7 @@ const char *const ircomm_state[] = {
"IRCOMM_CONN",
};
-#ifdef CONFIG_IRDA_DEBUG
-static const char *const ircomm_event[] = {
+static const char *const ircomm_event[] __maybe_unused = {
"IRCOMM_CONNECT_REQUEST",
"IRCOMM_CONNECT_RESPONSE",
"IRCOMM_TTP_CONNECT_INDICATION",
@@ -73,7 +72,6 @@ static const char *const ircomm_event[] = {
"IRCOMM_CONTROL_REQUEST",
"IRCOMM_CONTROL_INDICATION",
};
-#endif /* CONFIG_IRDA_DEBUG */
static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb, struct ircomm_info *info) =
@@ -106,8 +104,8 @@ static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
ircomm_connect_indication(self, skb, info);
break;
default:
- IRDA_DEBUG(4, "%s(), unknown event: %s\n", __func__ ,
- ircomm_event[event]);
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_event[event]);
ret = -EINVAL;
}
return ret;
@@ -136,8 +134,8 @@ static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
ircomm_disconnect_indication(self, skb, info);
break;
default:
- IRDA_DEBUG(0, "%s(), unknown event: %s\n", __func__ ,
- ircomm_event[event]);
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_event[event]);
ret = -EINVAL;
}
return ret;
@@ -169,8 +167,8 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
ircomm_disconnect_indication(self, skb, info);
break;
default:
- IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ ,
- ircomm_event[event]);
+ pr_debug("%s(), unknown event = %s\n", __func__ ,
+ ircomm_event[event]);
ret = -EINVAL;
}
return ret;
@@ -211,8 +209,8 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
ret = self->issue.disconnect_request(self, skb, info);
break;
default:
- IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ ,
- ircomm_event[event]);
+ pr_debug("%s(), unknown event = %s\n", __func__ ,
+ ircomm_event[event]);
ret = -EINVAL;
}
return ret;
@@ -227,8 +225,8 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb, struct ircomm_info *info)
{
- IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __func__ ,
- ircomm_state[self->state], ircomm_event[event]);
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_state[self->state], ircomm_event[event]);
return (*state[self->state])(self, event, skb, info);
}
@@ -243,6 +241,6 @@ void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state)
{
self->state = state;
- IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __func__ ,
- ircomm_state[self->state], self->service_type);
+ pr_debug("%s: next state=%s, service type=%d\n", __func__ ,
+ ircomm_state[self->state], self->service_type);
}
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 6536114adf37..e4cc847bb933 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -52,8 +52,6 @@ static int ircomm_lmp_connect_request(struct ircomm_cb *self,
{
int ret = 0;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
/* Don't forget to refcount it - should be NULL anyway */
if(userdata)
skb_get(userdata);
@@ -74,8 +72,6 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
{
struct sk_buff *tx_skb;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
/* Any userdata supplied? */
if (userdata == NULL) {
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
@@ -107,8 +103,6 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
struct sk_buff *tx_skb;
int ret;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
if (!userdata) {
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
if (!tx_skb)
@@ -144,13 +138,11 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb)
cb = (struct irda_skb_cb *) skb->cb;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
line = cb->line;
self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL);
if (!self) {
- IRDA_DEBUG(2, "%s(), didn't find myself\n", __func__ );
+ pr_debug("%s(), didn't find myself\n", __func__);
return;
}
@@ -160,7 +152,7 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb)
self->pkt_count--;
if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) {
- IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __func__ );
+ pr_debug("%s(), asking TTY to start again!\n", __func__);
self->flow_status = FLOW_START;
if (self->notify.flow_indication)
self->notify.flow_indication(self->notify.instance,
@@ -187,7 +179,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
cb->line = self->line;
- IRDA_DEBUG(4, "%s(), sending frame\n", __func__ );
+ pr_debug("%s(), sending frame\n", __func__);
/* Don't forget to refcount it - see ircomm_tty_do_softint() */
skb_get(skb);
@@ -196,7 +188,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
skb->destructor = ircomm_lmp_flow_control;
if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) {
- IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __func__ );
+ pr_debug("%s(), asking TTY to slow down!\n", __func__);
self->flow_status = FLOW_STOP;
if (self->notify.flow_indication)
self->notify.flow_indication(self->notify.instance,
@@ -204,7 +196,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
}
ret = irlmp_data_request(self->lsap, skb);
if (ret) {
- IRDA_ERROR("%s(), failed\n", __func__);
+ net_err_ratelimited("%s(), failed\n", __func__);
/* irlmp_data_request already free the packet */
}
@@ -222,8 +214,6 @@ static int ircomm_lmp_data_indication(void *instance, void *sap,
{
struct ircomm_cb *self = (struct ircomm_cb *) instance;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
@@ -252,8 +242,6 @@ static void ircomm_lmp_connect_confirm(void *instance, void *sap,
struct ircomm_cb *self = (struct ircomm_cb *) instance;
struct ircomm_info info;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -285,8 +273,6 @@ static void ircomm_lmp_connect_indication(void *instance, void *sap,
struct ircomm_cb *self = (struct ircomm_cb *)instance;
struct ircomm_info info;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -315,8 +301,6 @@ static void ircomm_lmp_disconnect_indication(void *instance, void *sap,
struct ircomm_cb *self = (struct ircomm_cb *) instance;
struct ircomm_info info;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
@@ -338,8 +322,6 @@ int ircomm_open_lsap(struct ircomm_cb *self)
{
notify_t notify;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
/* Register callbacks */
irda_notify_init(&notify);
notify.data_indication = ircomm_lmp_data_indication;
@@ -351,7 +333,7 @@ int ircomm_open_lsap(struct ircomm_cb *self)
self->lsap = irlmp_open_lsap(LSAP_ANY, &notify, 0);
if (!self->lsap) {
- IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __func__ );
+ pr_debug("%sfailed to allocate tsap\n", __func__);
return -1;
}
self->slsap_sel = self->lsap->slsap_sel;
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index f80b1a6a244b..3c4caa60c926 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -61,12 +61,12 @@ static int ircomm_param_dte(void *instance, irda_param_t *param, int get);
static int ircomm_param_dce(void *instance, irda_param_t *param, int get);
static int ircomm_param_poll(void *instance, irda_param_t *param, int get);
-static pi_minor_info_t pi_minor_call_table_common[] = {
+static const pi_minor_info_t pi_minor_call_table_common[] = {
{ ircomm_param_service_type, PV_INT_8_BITS },
{ ircomm_param_port_type, PV_INT_8_BITS },
{ ircomm_param_port_name, PV_STRING }
};
-static pi_minor_info_t pi_minor_call_table_non_raw[] = {
+static const pi_minor_info_t pi_minor_call_table_non_raw[] = {
{ ircomm_param_data_rate, PV_INT_32_BITS | PV_BIG_ENDIAN },
{ ircomm_param_data_format, PV_INT_8_BITS },
{ ircomm_param_flow_control, PV_INT_8_BITS },
@@ -74,13 +74,13 @@ static pi_minor_info_t pi_minor_call_table_non_raw[] = {
{ ircomm_param_enq_ack, PV_INT_16_BITS },
{ ircomm_param_line_status, PV_INT_8_BITS }
};
-static pi_minor_info_t pi_minor_call_table_9_wire[] = {
+static const pi_minor_info_t pi_minor_call_table_9_wire[] = {
{ ircomm_param_dte, PV_INT_8_BITS },
{ ircomm_param_dce, PV_INT_8_BITS },
{ ircomm_param_poll, PV_NO_VALUE },
};
-static pi_major_info_t pi_major_call_table[] = {
+static const pi_major_info_t pi_major_call_table[] = {
{ pi_minor_call_table_common, 3 },
{ pi_minor_call_table_non_raw, 6 },
{ pi_minor_call_table_9_wire, 3 }
@@ -101,8 +101,6 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
struct sk_buff *skb;
int count;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
@@ -130,7 +128,8 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
count = irda_param_insert(self, pi, skb_tail_pointer(skb),
skb_tailroom(skb), &ircomm_param_info);
if (count < 0) {
- IRDA_WARNING("%s(), no room for parameter!\n", __func__);
+ net_warn_ratelimited("%s(), no room for parameter!\n",
+ __func__);
spin_unlock_irqrestore(&self->spinlock, flags);
return -1;
}
@@ -138,7 +137,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
spin_unlock_irqrestore(&self->spinlock, flags);
- IRDA_DEBUG(2, "%s(), skb->len=%d\n", __func__ , skb->len);
+ pr_debug("%s(), skb->len=%d\n", __func__ , skb->len);
if (flush) {
/* ircomm_tty_do_softint will take care of the rest */
@@ -172,12 +171,11 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param,
/* Find all common service types */
service_type &= self->service_type;
if (!service_type) {
- IRDA_DEBUG(2,
- "%s(), No common service type to use!\n", __func__ );
+ pr_debug("%s(), No common service type to use!\n", __func__);
return -1;
}
- IRDA_DEBUG(0, "%s(), services in common=%02x\n", __func__ ,
- service_type);
+ pr_debug("%s(), services in common=%02x\n", __func__ ,
+ service_type);
/*
* Now choose a preferred service type of those available
@@ -191,8 +189,8 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param,
else if (service_type & IRCOMM_3_WIRE_RAW)
self->settings.service_type = IRCOMM_3_WIRE_RAW;
- IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __func__ ,
- self->settings.service_type);
+ pr_debug("%s(), resulting service type=0x%02x\n", __func__ ,
+ self->settings.service_type);
/*
* Now the line is ready for some communication. Check if we are a
@@ -234,8 +232,8 @@ static int ircomm_param_port_type(void *instance, irda_param_t *param, int get)
else {
self->settings.port_type = (__u8) param->pv.i;
- IRDA_DEBUG(0, "%s(), port type=%d\n", __func__ ,
- self->settings.port_type);
+ pr_debug("%s(), port type=%d\n", __func__ ,
+ self->settings.port_type);
}
return 0;
}
@@ -254,9 +252,9 @@ static int ircomm_param_port_name(void *instance, irda_param_t *param, int get)
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (get) {
- IRDA_DEBUG(0, "%s(), not imp!\n", __func__ );
+ pr_debug("%s(), not imp!\n", __func__);
} else {
- IRDA_DEBUG(0, "%s(), port-name=%s\n", __func__ , param->pv.c);
+ pr_debug("%s(), port-name=%s\n", __func__ , param->pv.c);
strncpy(self->settings.port_name, param->pv.c, 32);
}
@@ -281,7 +279,7 @@ static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get)
else
self->settings.data_rate = param->pv.i;
- IRDA_DEBUG(2, "%s(), data rate = %d\n", __func__ , param->pv.i);
+ pr_debug("%s(), data rate = %d\n", __func__ , param->pv.i);
return 0;
}
@@ -327,7 +325,7 @@ static int ircomm_param_flow_control(void *instance, irda_param_t *param,
else
self->settings.flow_control = (__u8) param->pv.i;
- IRDA_DEBUG(1, "%s(), flow control = 0x%02x\n", __func__ , (__u8) param->pv.i);
+ pr_debug("%s(), flow control = 0x%02x\n", __func__ , (__u8)param->pv.i);
return 0;
}
@@ -353,8 +351,8 @@ static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get)
self->settings.xonxoff[1] = (__u16) param->pv.i >> 8;
}
- IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ ,
- param->pv.i & 0xff, param->pv.i >> 8);
+ pr_debug("%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ ,
+ param->pv.i & 0xff, param->pv.i >> 8);
return 0;
}
@@ -380,8 +378,8 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get)
self->settings.enqack[1] = (__u16) param->pv.i >> 8;
}
- IRDA_DEBUG(0, "%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ ,
- param->pv.i & 0xff, param->pv.i >> 8);
+ pr_debug("%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ ,
+ param->pv.i & 0xff, param->pv.i >> 8);
return 0;
}
@@ -395,7 +393,7 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get)
static int ircomm_param_line_status(void *instance, irda_param_t *param,
int get)
{
- IRDA_DEBUG(2, "%s(), not impl.\n", __func__ );
+ pr_debug("%s(), not impl.\n", __func__);
return 0;
}
@@ -456,7 +454,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
__u8 dce;
- IRDA_DEBUG(1, "%s(), dce = 0x%02x\n", __func__ , (__u8) param->pv.i);
+ pr_debug("%s(), dce = 0x%02x\n", __func__ , (__u8)param->pv.i);
dce = (__u8) param->pv.i;
@@ -468,7 +466,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
/* Check if any of the settings have changed */
if (dce & 0x0f) {
if (dce & IRCOMM_DELTA_CTS) {
- IRDA_DEBUG(2, "%s(), CTS\n", __func__ );
+ pr_debug("%s(), CTS\n", __func__);
}
}
diff --git a/net/irda/ircomm/ircomm_ttp.c b/net/irda/ircomm/ircomm_ttp.c
index d362d711b79c..4b81e0934770 100644
--- a/net/irda/ircomm/ircomm_ttp.c
+++ b/net/irda/ircomm/ircomm_ttp.c
@@ -76,8 +76,6 @@ int ircomm_open_tsap(struct ircomm_cb *self)
{
notify_t notify;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
/* Register callbacks */
irda_notify_init(&notify);
notify.data_indication = ircomm_ttp_data_indication;
@@ -91,7 +89,7 @@ int ircomm_open_tsap(struct ircomm_cb *self)
self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
&notify);
if (!self->tsap) {
- IRDA_DEBUG(0, "%sfailed to allocate tsap\n", __func__ );
+ pr_debug("%sfailed to allocate tsap\n", __func__);
return -1;
}
self->slsap_sel = self->tsap->stsap_sel;
@@ -119,8 +117,6 @@ static int ircomm_ttp_connect_request(struct ircomm_cb *self,
{
int ret = 0;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
/* Don't forget to refcount it - should be NULL anyway */
if(userdata)
skb_get(userdata);
@@ -143,8 +139,6 @@ static int ircomm_ttp_connect_response(struct ircomm_cb *self,
{
int ret;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
/* Don't forget to refcount it - should be NULL anyway */
if(userdata)
skb_get(userdata);
@@ -171,7 +165,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self,
IRDA_ASSERT(skb != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), clen=%d\n", __func__ , clen);
+ pr_debug("%s(), clen=%d\n", __func__ , clen);
/*
* Insert clen field, currently we either send data only, or control
@@ -188,7 +182,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self,
ret = irttp_data_request(self->tsap, skb);
if (ret) {
- IRDA_ERROR("%s(), failed\n", __func__);
+ net_err_ratelimited("%s(), failed\n", __func__);
/* irttp_data_request already free the packet */
}
@@ -206,8 +200,6 @@ static int ircomm_ttp_data_indication(void *instance, void *sap,
{
struct ircomm_cb *self = (struct ircomm_cb *) instance;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
@@ -229,16 +221,14 @@ static void ircomm_ttp_connect_confirm(void *instance, void *sap,
struct ircomm_cb *self = (struct ircomm_cb *) instance;
struct ircomm_info info;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(qos != NULL, goto out;);
if (max_sdu_size != TTP_SAR_DISABLE) {
- IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n",
- __func__);
+ net_err_ratelimited("%s(), SAR not allowed for IrCOMM!\n",
+ __func__);
goto out;
}
@@ -270,16 +260,14 @@ static void ircomm_ttp_connect_indication(void *instance, void *sap,
struct ircomm_cb *self = (struct ircomm_cb *)instance;
struct ircomm_info info;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(qos != NULL, goto out;);
if (max_sdu_size != TTP_SAR_DISABLE) {
- IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n",
- __func__);
+ net_err_ratelimited("%s(), SAR not allowed for IrCOMM!\n",
+ __func__);
goto out;
}
@@ -329,8 +317,6 @@ static void ircomm_ttp_disconnect_indication(void *instance, void *sap,
struct ircomm_cb *self = (struct ircomm_cb *) instance;
struct ircomm_info info;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
@@ -354,8 +340,6 @@ static void ircomm_ttp_flow_indication(void *instance, void *sap,
{
struct ircomm_cb *self = (struct ircomm_cb *) instance;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 61ceb4cdb4a2..40695b9751c1 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -147,7 +147,8 @@ static int __init ircomm_tty_init(void)
return -ENOMEM;
ircomm_tty = hashbin_new(HB_LOCK);
if (ircomm_tty == NULL) {
- IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__);
+ net_err_ratelimited("%s(), can't allocate hashbin!\n",
+ __func__);
put_tty_driver(driver);
return -ENOMEM;
}
@@ -163,8 +164,8 @@ static int __init ircomm_tty_init(void)
driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(driver, &ops);
if (tty_register_driver(driver)) {
- IRDA_ERROR("%s(): Couldn't register serial driver\n",
- __func__);
+ net_err_ratelimited("%s(): Couldn't register serial driver\n",
+ __func__);
put_tty_driver(driver);
return -1;
}
@@ -173,8 +174,6 @@ static int __init ircomm_tty_init(void)
static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self)
{
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -195,12 +194,10 @@ static void __exit ircomm_tty_cleanup(void)
{
int ret;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
ret = tty_unregister_driver(driver);
if (ret) {
- IRDA_ERROR("%s(), failed to unregister driver\n",
- __func__);
+ net_err_ratelimited("%s(), failed to unregister driver\n",
+ __func__);
return;
}
@@ -219,14 +216,12 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
notify_t notify;
int ret = -ENODEV;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
/* Check if already open */
if (test_and_set_bit(ASYNCB_INITIALIZED, &self->port.flags)) {
- IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ );
+ pr_debug("%s(), already open so break out!\n", __func__);
return 0;
}
@@ -256,7 +251,7 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
/* Connect IrCOMM link with remote device */
ret = ircomm_tty_attach_cable(self);
if (ret < 0) {
- IRDA_ERROR("%s(), error attaching cable!\n", __func__);
+ net_err_ratelimited("%s(), error attaching cable!\n", __func__);
goto err;
}
@@ -281,8 +276,6 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
int do_clocal = 0;
unsigned long flags;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/*
* If non-blocking mode is set, or the port is not enabled,
* then make the check up front and then exit.
@@ -297,12 +290,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
if (tty->termios.c_cflag & CBAUD)
tty_port_raise_dtr_rts(port);
port->flags |= ASYNC_NORMAL_ACTIVE;
- IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ );
+ pr_debug("%s(), O_NONBLOCK requested!\n", __func__);
return 0;
}
if (tty->termios.c_cflag & CLOCAL) {
- IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ );
+ pr_debug("%s(), doing CLOCAL!\n", __func__);
do_clocal = 1;
}
@@ -316,8 +309,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
retval = 0;
add_wait_queue(&port->open_wait, &wait);
- IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
- __FILE__, __LINE__, tty->driver->name, port->count);
+ pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
+ __FILE__, __LINE__, tty->driver->name, port->count);
spin_lock_irqsave(&port->lock, flags);
port->count--;
@@ -354,8 +347,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
break;
}
- IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
- __FILE__, __LINE__, tty->driver->name, port->count);
+ pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
+ __FILE__, __LINE__, tty->driver->name, port->count);
schedule();
}
@@ -369,8 +362,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
port->blocked_open--;
spin_unlock_irqrestore(&port->lock, flags);
- IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
- __FILE__, __LINE__, tty->driver->name, port->count);
+ pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
+ __FILE__, __LINE__, tty->driver->name, port->count);
if (!retval)
port->flags |= ASYNC_NORMAL_ACTIVE;
@@ -389,10 +382,8 @@ static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
if (!self) {
/* No, so make new instance */
self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
- if (self == NULL) {
- IRDA_ERROR("%s(), kmalloc failed!\n", __func__);
+ if (self == NULL)
return -ENOMEM;
- }
tty_port_init(&self->port);
self->port.ops = &ircomm_port_ops;
@@ -440,16 +431,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
unsigned long flags;
int ret;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* ++ is not atomic, so this should be protected - Jean II */
spin_lock_irqsave(&self->port.lock, flags);
self->port.count++;
spin_unlock_irqrestore(&self->port.lock, flags);
tty_port_tty_set(&self->port, tty);
- IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
- self->line, self->port.count);
+ pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
+ self->line, self->port.count);
/* Not really used by us, but lets do it anyway */
self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
@@ -469,8 +458,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
if (wait_event_interruptible(self->port.close_wait,
!test_bit(ASYNCB_CLOSING, &self->port.flags))) {
- IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n",
- __func__);
+ net_warn_ratelimited("%s - got signal while blocking on ASYNC_CLOSING!\n",
+ __func__);
return -ERESTARTSYS;
}
@@ -488,9 +477,9 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */
/* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */
self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */
- IRDA_DEBUG(2, "%s(), IrCOMM device\n", __func__ );
+ pr_debug("%s(), IrCOMM device\n", __func__);
} else {
- IRDA_DEBUG(2, "%s(), IrLPT device\n", __func__ );
+ pr_debug("%s(), IrLPT device\n", __func__);
self->service_type = IRCOMM_3_WIRE_RAW;
self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */
}
@@ -501,9 +490,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
ret = ircomm_tty_block_til_ready(self, tty, filp);
if (ret) {
- IRDA_DEBUG(2,
- "%s(), returning after block_til_ready with %d\n", __func__ ,
- ret);
+ pr_debug("%s(), returning after block_til_ready with %d\n",
+ __func__, ret);
return ret;
}
@@ -521,8 +509,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
struct tty_port *port = &self->port;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -572,8 +558,6 @@ static void ircomm_tty_do_softint(struct work_struct *work)
unsigned long flags;
struct sk_buff *skb, *ctrl_skb;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if (!self || self->magic != IRCOMM_TTY_MAGIC)
return;
@@ -639,8 +623,8 @@ static int ircomm_tty_write(struct tty_struct *tty,
int len = 0;
int size;
- IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __func__ , count,
- tty->hw_stopped);
+ pr_debug("%s(), count=%d, hw_stopped=%d\n", __func__ , count,
+ tty->hw_stopped);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
@@ -662,7 +646,7 @@ static int ircomm_tty_write(struct tty_struct *tty,
* we don't mess up the original "safe skb" (see tx_data_size).
* Jean II */
if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) {
- IRDA_DEBUG(1, "%s() : not initialised\n", __func__);
+ pr_debug("%s() : not initialised\n", __func__);
#ifdef IRCOMM_NO_TX_BEFORE_INIT
/* We didn't consume anything, TTY will retry */
return 0;
@@ -791,7 +775,7 @@ static int ircomm_tty_write_room(struct tty_struct *tty)
ret = self->max_data_size;
spin_unlock_irqrestore(&self->spinlock, flags);
}
- IRDA_DEBUG(2, "%s(), ret=%d\n", __func__ , ret);
+ pr_debug("%s(), ret=%d\n", __func__ , ret);
return ret;
}
@@ -808,8 +792,6 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
unsigned long orig_jiffies, poll_time;
unsigned long flags;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -843,8 +825,6 @@ static void ircomm_tty_throttle(struct tty_struct *tty)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -874,8 +854,6 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -889,7 +867,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty)
self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS);
ircomm_param_request(self, IRCOMM_DTE, TRUE);
- IRDA_DEBUG(1, "%s(), FLOW_START\n", __func__ );
+ pr_debug("%s(), FLOW_START\n", __func__);
}
ircomm_flow_request(self->ircomm, FLOW_START);
}
@@ -926,8 +904,6 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
if (!test_and_clear_bit(ASYNCB_INITIALIZED, &self->port.flags))
return;
@@ -970,8 +946,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
struct tty_port *port = &self->port;
unsigned long flags;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -999,7 +973,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
*/
static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch)
{
- IRDA_DEBUG(0, "%s(), not impl\n", __func__ );
+ pr_debug("%s(), not impl\n", __func__);
}
/*
@@ -1043,8 +1017,6 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
struct tty_struct *tty;
int status;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -1056,15 +1028,13 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
/*wake_up_interruptible(&self->delta_msr_wait);*/
}
if ((self->port.flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) {
- IRDA_DEBUG(2,
- "%s(), ircomm%d CD now %s...\n", __func__ , self->line,
- (status & IRCOMM_CD) ? "on" : "off");
+ pr_debug("%s(), ircomm%d CD now %s...\n", __func__ , self->line,
+ (status & IRCOMM_CD) ? "on" : "off");
if (status & IRCOMM_CD) {
wake_up_interruptible(&self->port.open_wait);
} else {
- IRDA_DEBUG(2,
- "%s(), Doing serial hangup..\n", __func__ );
+ pr_debug("%s(), Doing serial hangup..\n", __func__);
if (tty)
tty_hangup(tty);
@@ -1075,8 +1045,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
if (tty && tty_port_cts_enabled(&self->port)) {
if (tty->hw_stopped) {
if (status & IRCOMM_CTS) {
- IRDA_DEBUG(2,
- "%s(), CTS tx start...\n", __func__ );
+ pr_debug("%s(), CTS tx start...\n", __func__);
tty->hw_stopped = 0;
/* Wake up processes blocked on open */
@@ -1087,8 +1056,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self)
}
} else {
if (!(status & IRCOMM_CTS)) {
- IRDA_DEBUG(2,
- "%s(), CTS tx stop...\n", __func__ );
+ pr_debug("%s(), CTS tx stop...\n", __func__);
tty->hw_stopped = 1;
}
}
@@ -1109,15 +1077,13 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
struct tty_struct *tty;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
tty = tty_port_tty_get(&self->port);
if (!tty) {
- IRDA_DEBUG(0, "%s(), no tty!\n", __func__ );
+ pr_debug("%s(), no tty!\n", __func__);
return 0;
}
@@ -1128,7 +1094,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
* params, we can just as well declare the hardware for running.
*/
if (tty->hw_stopped && (self->flow == FLOW_START)) {
- IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ );
+ pr_debug("%s(), polling for line settings!\n", __func__);
ircomm_param_request(self, IRCOMM_POLL, TRUE);
/* We can just as well declare the hardware for running */
@@ -1161,8 +1127,6 @@ static int ircomm_tty_control_indication(void *instance, void *sap,
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
int clen;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
@@ -1197,7 +1161,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
switch (cmd) {
case FLOW_START:
- IRDA_DEBUG(2, "%s(), hw start!\n", __func__ );
+ pr_debug("%s(), hw start!\n", __func__);
if (tty)
tty->hw_stopped = 0;
@@ -1206,7 +1170,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
break;
default: /* If we get here, something is very wrong, better stop */
case FLOW_STOP:
- IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ );
+ pr_debug("%s(), hw stopped!\n", __func__);
if (tty)
tty->hw_stopped = 1;
break;
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index 2ee87bf387cc..61137f8b5293 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -89,8 +89,7 @@ const char *const ircomm_tty_state[] = {
"*** ERROR *** ",
};
-#ifdef CONFIG_IRDA_DEBUG
-static const char *const ircomm_tty_event[] = {
+static const char *const ircomm_tty_event[] __maybe_unused = {
"IRCOMM_TTY_ATTACH_CABLE",
"IRCOMM_TTY_DETACH_CABLE",
"IRCOMM_TTY_DATA_REQUEST",
@@ -106,7 +105,6 @@ static const char *const ircomm_tty_event[] = {
"IRCOMM_TTY_GOT_LSAPSEL",
"*** ERROR ****",
};
-#endif /* CONFIG_IRDA_DEBUG */
static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
struct sk_buff *skb, struct ircomm_tty_info *info) =
@@ -130,14 +128,12 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
{
struct tty_struct *tty;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
/* Check if somebody has already connected to us */
if (ircomm_is_connected(self->ircomm)) {
- IRDA_DEBUG(0, "%s(), already connected!\n", __func__ );
+ pr_debug("%s(), already connected!\n", __func__);
return 0;
}
@@ -163,8 +159,6 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
*/
void ircomm_tty_detach_cable(struct ircomm_tty_cb *self)
{
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -212,8 +206,6 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
__u8 oct_seq[6];
__u16 hints;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -313,17 +305,17 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
* Set default values, but only if the application for some reason
* haven't set them already
*/
- IRDA_DEBUG(2, "%s(), data-rate = %d\n", __func__ ,
- self->settings.data_rate);
+ pr_debug("%s(), data-rate = %d\n", __func__ ,
+ self->settings.data_rate);
if (!self->settings.data_rate)
self->settings.data_rate = 9600;
- IRDA_DEBUG(2, "%s(), data-format = %d\n", __func__ ,
- self->settings.data_format);
+ pr_debug("%s(), data-format = %d\n", __func__ ,
+ self->settings.data_format);
if (!self->settings.data_format)
self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */
- IRDA_DEBUG(2, "%s(), flow-control = %d\n", __func__ ,
- self->settings.flow_control);
+ pr_debug("%s(), flow-control = %d\n", __func__ ,
+ self->settings.flow_control);
/*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/
/* Do not set delta values for the initial parameters */
@@ -367,8 +359,6 @@ static void ircomm_tty_discovery_indication(discinfo_t *discovery,
struct ircomm_tty_cb *self;
struct ircomm_tty_info info;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Important note :
* We need to drop all passive discoveries.
* The LSAP management of IrComm is deficient and doesn't deal
@@ -404,8 +394,6 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap,
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
struct tty_struct *tty;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -436,8 +424,6 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -447,13 +433,13 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
/* Check if request succeeded */
if (result != IAS_SUCCESS) {
- IRDA_DEBUG(4, "%s(), got NULL value!\n", __func__ );
+ pr_debug("%s(), got NULL value!\n", __func__);
return;
}
switch (value->type) {
case IAS_OCT_SEQ:
- IRDA_DEBUG(2, "%s(), got octet sequence\n", __func__ );
+ pr_debug("%s(), got octet sequence\n", __func__);
irda_param_extract_all(self, value->t.oct_seq, value->len,
&ircomm_param_info);
@@ -463,21 +449,21 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
break;
case IAS_INTEGER:
/* Got LSAP selector */
- IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __func__ ,
- value->t.integer);
+ pr_debug("%s(), got lsapsel = %d\n", __func__ ,
+ value->t.integer);
if (value->t.integer == -1) {
- IRDA_DEBUG(0, "%s(), invalid value!\n", __func__ );
+ pr_debug("%s(), invalid value!\n", __func__);
} else
self->dlsap_sel = value->t.integer;
ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL);
break;
case IAS_MISSING:
- IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __func__ );
+ pr_debug("%s(), got IAS_MISSING\n", __func__);
break;
default:
- IRDA_DEBUG(0, "%s(), got unknown type!\n", __func__ );
+ pr_debug("%s(), got unknown type!\n", __func__);
break;
}
irias_delete_value(value);
@@ -497,8 +483,6 @@ void ircomm_tty_connect_confirm(void *instance, void *sap,
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -528,8 +512,6 @@ void ircomm_tty_connect_indication(void *instance, void *sap,
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
int clen;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -559,8 +541,6 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
{
struct tty_struct *tty;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -578,10 +558,10 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
*/
if (tty_port_cts_enabled(&self->port) &&
((self->settings.dce & IRCOMM_CTS) == 0)) {
- IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ );
+ pr_debug("%s(), waiting for CTS ...\n", __func__);
goto put;
} else {
- IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ );
+ pr_debug("%s(), starting hardware!\n", __func__);
tty->hw_stopped = 0;
@@ -621,8 +601,6 @@ static void ircomm_tty_watchdog_timer_expired(void *data)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -642,8 +620,8 @@ int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
- IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
- ircomm_tty_state[self->state], ircomm_tty_event[event]);
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
return (*state[self->state])(self, event, skb, info);
}
@@ -660,8 +638,8 @@ static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
- IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __func__ ,
- ircomm_tty_state[self->state], self->service_type);
+ pr_debug("%s: next state=%s, service type=%d\n", __func__ ,
+ ircomm_tty_state[self->state], self->service_type);
*/
self->state = state;
}
@@ -679,8 +657,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
{
int ret = 0;
- IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
- ircomm_tty_state[self->state], ircomm_tty_event[event]);
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_ATTACH_CABLE:
/* Try to discover any remote devices */
@@ -694,8 +672,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
self->saddr = info->saddr;
if (self->iriap) {
- IRDA_WARNING("%s(), busy with a previous query\n",
- __func__);
+ net_warn_ratelimited("%s(), busy with a previous query\n",
+ __func__);
return -EBUSY;
}
@@ -723,8 +701,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
- IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
- ircomm_tty_event[event]);
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
@@ -743,8 +721,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
{
int ret = 0;
- IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
- ircomm_tty_state[self->state], ircomm_tty_event[event]);
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_DISCOVERY_INDICATION:
@@ -752,8 +730,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
self->saddr = info->saddr;
if (self->iriap) {
- IRDA_WARNING("%s(), busy with a previous query\n",
- __func__);
+ net_warn_ratelimited("%s(), busy with a previous query\n",
+ __func__);
return -EBUSY;
}
@@ -796,8 +774,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
- IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
- ircomm_tty_event[event]);
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
@@ -816,14 +794,14 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
{
int ret = 0;
- IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
- ircomm_tty_state[self->state], ircomm_tty_event[event]);
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_GOT_PARAMETERS:
if (self->iriap) {
- IRDA_WARNING("%s(), busy with a previous query\n",
- __func__);
+ net_warn_ratelimited("%s(), busy with a previous query\n",
+ __func__);
return -EBUSY;
}
@@ -854,8 +832,8 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
- IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
- ircomm_tty_event[event]);
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
@@ -874,8 +852,8 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
{
int ret = 0;
- IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
- ircomm_tty_state[self->state], ircomm_tty_event[event]);
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_GOT_LSAPSEL:
@@ -903,8 +881,8 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
- IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
- ircomm_tty_event[event]);
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
@@ -923,8 +901,8 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
{
int ret = 0;
- IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ ,
- ircomm_tty_state[self->state], ircomm_tty_event[event]);
+ pr_debug("%s: state=%s, event=%s\n", __func__ ,
+ ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_CONNECT_CONFIRM:
@@ -957,8 +935,8 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
- IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
- ircomm_tty_event[event]);
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
@@ -995,13 +973,13 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
self->settings.dce = IRCOMM_DELTA_CD;
ircomm_tty_check_modem_status(self);
} else {
- IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ );
+ pr_debug("%s(), hanging up!\n", __func__);
tty_port_tty_hangup(&self->port, false);
}
break;
default:
- IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ ,
- ircomm_tty_event[event]);
+ pr_debug("%s(), unknown event: %s\n", __func__ ,
+ ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index ce943853c38d..75ccdbd0728e 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -56,8 +56,6 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self,
unsigned int cflag, cval;
int baud;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if (!self->ircomm)
return;
@@ -93,7 +91,8 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self,
self->settings.flow_control |= IRCOMM_RTS_CTS_IN;
/* This got me. Bummer. Jean II */
if (self->service_type == IRCOMM_3_WIRE_RAW)
- IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __func__);
+ net_warn_ratelimited("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n",
+ __func__);
} else {
self->port.flags &= ~ASYNC_CTS_FLOW;
self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN;
@@ -149,8 +148,6 @@ void ircomm_tty_set_termios(struct tty_struct *tty,
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
unsigned int cflag = tty->termios.c_cflag;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if ((cflag == old_termios->c_cflag) &&
(RELEVANT_IFLAG(tty->termios.c_iflag) ==
RELEVANT_IFLAG(old_termios->c_iflag)))
@@ -198,8 +195,6 @@ int ircomm_tty_tiocmget(struct tty_struct *tty)
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
unsigned int result;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
@@ -223,8 +218,6 @@ int ircomm_tty_tiocmset(struct tty_struct *tty,
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
@@ -265,8 +258,6 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
if (!retinfo)
return -EFAULT;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
memset(&info, 0, sizeof(info));
info.line = self->line;
info.flags = self->port.flags;
@@ -301,8 +292,6 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self,
struct serial_struct new_serial;
struct ircomm_tty_cb old_state, *state;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
return -EFAULT;
@@ -375,8 +364,6 @@ int ircomm_tty_ioctl(struct tty_struct *tty,
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
@@ -392,11 +379,11 @@ int ircomm_tty_ioctl(struct tty_struct *tty,
ret = ircomm_tty_set_serial_info(self, (struct serial_struct __user *) arg);
break;
case TIOCMIWAIT:
- IRDA_DEBUG(0, "(), TIOCMIWAIT, not impl!\n");
+ pr_debug("(), TIOCMIWAIT, not impl!\n");
break;
case TIOCGICOUNT:
- IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __func__ );
+ pr_debug("%s(), TIOCGICOUNT not impl!\n", __func__);
#if 0
save_flags(flags); cli();
cnow = driver->icount;
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 9e0d909390fd..856736656a30 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -63,14 +63,14 @@ int __init irda_device_init( void)
{
dongles = hashbin_new(HB_NOLOCK);
if (dongles == NULL) {
- IRDA_WARNING("IrDA: Can't allocate dongles hashbin!\n");
+ net_warn_ratelimited("IrDA: Can't allocate dongles hashbin!\n");
return -ENOMEM;
}
spin_lock_init(&dongles->hb_spinlock);
tasks = hashbin_new(HB_LOCK);
if (tasks == NULL) {
- IRDA_WARNING("IrDA: Can't allocate tasks hashbin!\n");
+ net_warn_ratelimited("IrDA: Can't allocate tasks hashbin!\n");
hashbin_delete(dongles, NULL);
return -ENOMEM;
}
@@ -84,14 +84,12 @@ int __init irda_device_init( void)
static void leftover_dongle(void *arg)
{
struct dongle_reg *reg = arg;
- IRDA_WARNING("IrDA: Dongle type %x not unregistered\n",
- reg->type);
+ net_warn_ratelimited("IrDA: Dongle type %x not unregistered\n",
+ reg->type);
}
void irda_device_cleanup(void)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete);
hashbin_delete(dongles, leftover_dongle);
@@ -107,7 +105,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
{
struct irlap_cb *self;
- IRDA_DEBUG(4, "%s(%s)\n", __func__, status ? "TRUE" : "FALSE");
+ pr_debug("%s(%s)\n", __func__, status ? "TRUE" : "FALSE");
self = (struct irlap_cb *) dev->atalk_ptr;
@@ -127,7 +125,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
irlap_start_mbusy_timer(self, SMALLBUSY_TIMEOUT);
else
irlap_start_mbusy_timer(self, MEDIABUSY_TIMEOUT);
- IRDA_DEBUG( 4, "Media busy!\n");
+ pr_debug("Media busy!\n");
} else {
self->media_busy = FALSE;
irlap_stop_mbusy_timer(self);
@@ -147,11 +145,9 @@ int irda_device_is_receiving(struct net_device *dev)
struct if_irda_req req;
int ret;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
if (!dev->netdev_ops->ndo_do_ioctl) {
- IRDA_ERROR("%s: do_ioctl not impl. by device driver\n",
- __func__);
+ net_err_ratelimited("%s: do_ioctl not impl. by device driver\n",
+ __func__);
return -1;
}
@@ -192,8 +188,6 @@ static int irda_task_kick(struct irda_task *task)
int count = 0;
int timeout;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
IRDA_ASSERT(task != NULL, return -1;);
IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;);
@@ -201,15 +195,15 @@ static int irda_task_kick(struct irda_task *task)
do {
timeout = task->function(task);
if (count++ > 100) {
- IRDA_ERROR("%s: error in task handler!\n",
- __func__);
+ net_err_ratelimited("%s: error in task handler!\n",
+ __func__);
irda_task_delete(task);
return TRUE;
}
} while ((timeout == 0) && (task->state != IRDA_TASK_DONE));
if (timeout < 0) {
- IRDA_ERROR("%s: Error executing task!\n", __func__);
+ net_err_ratelimited("%s: Error executing task!\n", __func__);
irda_task_delete(task);
return TRUE;
}
@@ -241,8 +235,8 @@ static int irda_task_kick(struct irda_task *task)
irda_task_timer_expired);
finished = FALSE;
} else {
- IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n",
- __func__);
+ pr_debug("%s(), not finished, and no timeout!\n",
+ __func__);
finished = FALSE;
}
@@ -259,8 +253,6 @@ static void irda_task_timer_expired(void *data)
{
struct irda_task *task;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
task = data;
irda_task_kick(task);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index e1b37f5a2691..4a7ae32afa09 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -43,9 +43,8 @@
#include <net/irda/iriap_event.h>
#include <net/irda/iriap.h>
-#ifdef CONFIG_IRDA_DEBUG
/* FIXME: This one should go in irlmp.c */
-static const char *const ias_charset_types[] = {
+static const char *const ias_charset_types[] __maybe_unused = {
"CS_ASCII",
"CS_ISO_8859_1",
"CS_ISO_8859_2",
@@ -58,7 +57,6 @@ static const char *const ias_charset_types[] = {
"CS_ISO_8859_9",
"CS_UNICODE"
};
-#endif /* CONFIG_IRDA_DEBUG */
static hashbin_t *iriap = NULL;
static void *service_handle;
@@ -110,8 +108,8 @@ int __init iriap_init(void)
/* Object repository - defined in irias_object.c */
irias_objects = hashbin_new(HB_LOCK);
if (!irias_objects) {
- IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n",
- __func__);
+ net_warn_ratelimited("%s: Can't allocate irias_objects hashbin!\n",
+ __func__);
hashbin_delete(iriap, NULL);
return -ENOMEM;
}
@@ -145,7 +143,7 @@ int __init iriap_init(void)
*/
server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
if (!server) {
- IRDA_DEBUG(0, "%s(), unable to open server\n", __func__);
+ pr_debug("%s(), unable to open server\n", __func__);
return -1;
}
iriap_register_lsap(server, LSAP_IAS, IAS_SERVER);
@@ -177,13 +175,9 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
{
struct iriap_cb *self;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
self = kzalloc(sizeof(*self), GFP_ATOMIC);
- if (!self) {
- IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
+ if (!self)
return NULL;
- }
/*
* Initialize instance
@@ -223,8 +217,6 @@ EXPORT_SYMBOL(iriap_open);
*/
static void __iriap_close(struct iriap_cb *self)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -247,8 +239,6 @@ void iriap_close(struct iriap_cb *self)
{
struct iriap_cb *entry;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -268,8 +258,6 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode)
{
notify_t notify;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
irda_notify_init(&notify);
notify.connect_confirm = iriap_connect_confirm;
notify.connect_indication = iriap_connect_indication;
@@ -283,7 +271,8 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode)
self->lsap = irlmp_open_lsap(slsap_sel, &notify, 0);
if (self->lsap == NULL) {
- IRDA_ERROR("%s: Unable to allocated LSAP!\n", __func__);
+ net_err_ratelimited("%s: Unable to allocated LSAP!\n",
+ __func__);
return -1;
}
self->slsap_sel = self->lsap->slsap_sel;
@@ -303,8 +292,8 @@ static void iriap_disconnect_indication(void *instance, void *sap,
{
struct iriap_cb *self;
- IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__,
- irlmp_reason_str(reason), reason);
+ pr_debug("%s(), reason=%s [%d]\n", __func__,
+ irlmp_reason_str(reason), reason);
self = instance;
@@ -320,7 +309,7 @@ static void iriap_disconnect_indication(void *instance, void *sap,
dev_kfree_skb(skb);
if (self->mode == IAS_CLIENT) {
- IRDA_DEBUG(4, "%s(), disconnect as client\n", __func__);
+ pr_debug("%s(), disconnect as client\n", __func__);
iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION,
@@ -333,7 +322,7 @@ static void iriap_disconnect_indication(void *instance, void *sap,
if (self->confirm)
self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
} else {
- IRDA_DEBUG(4, "%s(), disconnect as server\n", __func__);
+ pr_debug("%s(), disconnect as server\n", __func__);
iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION,
NULL);
iriap_close(self);
@@ -347,16 +336,13 @@ static void iriap_disconnect_request(struct iriap_cb *self)
{
struct sk_buff *tx_skb;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
if (tx_skb == NULL) {
- IRDA_DEBUG(0,
- "%s(), Could not allocate an sk_buff of length %d\n",
- __func__, LMP_MAX_HEADER);
+ pr_debug("%s(), Could not allocate an sk_buff of length %d\n",
+ __func__, LMP_MAX_HEADER);
return;
}
@@ -461,14 +447,14 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
len = get_unaligned_be16(fp + n);
n += 2;
- IRDA_DEBUG(4, "%s(), len=%d\n", __func__, len);
+ pr_debug("%s(), len=%d\n", __func__, len);
/* Get object ID, MSB first */
obj_id = get_unaligned_be16(fp + n);
n += 2;
type = fp[n++];
- IRDA_DEBUG(4, "%s(), Value type = %d\n", __func__, type);
+ pr_debug("%s(), Value type = %d\n", __func__, type);
switch (type) {
case IAS_INTEGER:
@@ -477,7 +463,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
value = irias_new_integer_value(tmp_cpu32);
/* Legal values restricted to 0x01-0x6f, page 15 irttp */
- IRDA_DEBUG(4, "%s(), lsap=%d\n", __func__, value->t.integer);
+ pr_debug("%s(), lsap=%d\n", __func__, value->t.integer);
break;
case IAS_STRING:
charset = fp[n++];
@@ -496,11 +482,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
/* case CS_ISO_8859_9: */
/* case CS_UNICODE: */
default:
- IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
- __func__, charset,
- charset < ARRAY_SIZE(ias_charset_types) ?
- ias_charset_types[charset] :
- "(unknown)");
+ pr_debug("%s(), charset [%d] %s, not supported\n",
+ __func__, charset,
+ charset < ARRAY_SIZE(ias_charset_types) ?
+ ias_charset_types[charset] :
+ "(unknown)");
/* Aborting, close connection! */
iriap_disconnect_request(self);
@@ -508,12 +494,12 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
/* break; */
}
value_len = fp[n++];
- IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
+ pr_debug("%s(), strlen=%d\n", __func__, value_len);
/* Make sure the string is null-terminated */
if (n + value_len < skb->len)
fp[n + value_len] = 0x00;
- IRDA_DEBUG(4, "Got string %s\n", fp+n);
+ pr_debug("Got string %s\n", fp+n);
/* Will truncate to IAS_MAX_STRING bytes */
value = irias_new_string_value(fp+n);
@@ -539,7 +525,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
if (self->confirm)
self->confirm(IAS_SUCCESS, obj_id, value, self->priv);
else {
- IRDA_DEBUG(0, "%s(), missing handler!\n", __func__);
+ pr_debug("%s(), missing handler!\n", __func__);
irias_delete_value(value);
}
}
@@ -561,8 +547,6 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
__be16 tmp_be16;
__u8 *fp;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(value != NULL, return;);
@@ -623,12 +607,12 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len;
break;
case IAS_MISSING:
- IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __func__);
+ pr_debug("%s: sending IAS_MISSING\n", __func__);
skb_put(tx_skb, 1);
fp[n++] = value->type;
break;
default:
- IRDA_DEBUG(0, "%s(), type not implemented!\n", __func__);
+ pr_debug("%s(), type not implemented!\n", __func__);
break;
}
iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb);
@@ -655,8 +639,6 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
__u8 *fp;
int n;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -678,20 +660,20 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
memcpy(attr, fp+n, attr_len); n+=attr_len;
attr[attr_len] = '\0';
- IRDA_DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr);
+ pr_debug("LM-IAS: Looking up %s: %s\n", name, attr);
obj = irias_find_object(name);
if (obj == NULL) {
- IRDA_DEBUG(2, "LM-IAS: Object %s not found\n", name);
+ pr_debug("LM-IAS: Object %s not found\n", name);
iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN,
&irias_missing);
return;
}
- IRDA_DEBUG(4, "LM-IAS: found %s, id=%d\n", obj->name, obj->id);
+ pr_debug("LM-IAS: found %s, id=%d\n", obj->name, obj->id);
attrib = irias_find_attrib(obj, attr);
if (attrib == NULL) {
- IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr);
+ pr_debug("LM-IAS: Attribute %s not found\n", attr);
iriap_getvaluebyclass_response(self, obj->id,
IAS_ATTRIB_UNKNOWN,
&irias_missing);
@@ -714,8 +696,6 @@ void iriap_send_ack(struct iriap_cb *self)
struct sk_buff *tx_skb;
__u8 *frame;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -745,7 +725,7 @@ void iriap_connect_request(struct iriap_cb *self)
self->saddr, self->daddr,
NULL, NULL);
if (ret < 0) {
- IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
+ pr_debug("%s(), connect failed!\n", __func__);
self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
}
}
@@ -793,8 +773,6 @@ static void iriap_connect_indication(void *instance, void *sap,
{
struct iriap_cb *self, *new;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
self = instance;
IRDA_ASSERT(skb != NULL, return;);
@@ -804,14 +782,14 @@ static void iriap_connect_indication(void *instance, void *sap,
/* Start new server */
new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
if (!new) {
- IRDA_DEBUG(0, "%s(), open failed\n", __func__);
+ pr_debug("%s(), open failed\n", __func__);
goto out;
}
/* Now attach up the new "socket" */
new->lsap = irlmp_dup(self->lsap, new);
if (!new->lsap) {
- IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
+ pr_debug("%s(), dup failed!\n", __func__);
goto out;
}
@@ -841,8 +819,6 @@ static int iriap_data_indication(void *instance, void *sap,
__u8 *frame;
__u8 opcode;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
self = instance;
IRDA_ASSERT(skb != NULL, return 0;);
@@ -853,21 +829,20 @@ static int iriap_data_indication(void *instance, void *sap,
if (self->mode == IAS_SERVER) {
/* Call server */
- IRDA_DEBUG(4, "%s(), Calling server!\n", __func__);
+ pr_debug("%s(), Calling server!\n", __func__);
iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb);
goto out;
}
opcode = frame[0];
if (~opcode & IAP_LST) {
- IRDA_WARNING("%s:, IrIAS multiframe commands or "
- "results is not implemented yet!\n",
- __func__);
+ net_warn_ratelimited("%s:, IrIAS multiframe commands or results is not implemented yet!\n",
+ __func__);
goto out;
}
/* Check for ack frames since they don't contain any data */
if (opcode & IAP_ACK) {
- IRDA_DEBUG(0, "%s() Got ack frame!\n", __func__);
+ pr_debug("%s() Got ack frame!\n", __func__);
goto out;
}
@@ -875,7 +850,7 @@ static int iriap_data_indication(void *instance, void *sap,
switch (opcode) {
case GET_INFO_BASE:
- IRDA_DEBUG(0, "IrLMP GetInfoBaseDetails not implemented!\n");
+ pr_debug("IrLMP GetInfoBaseDetails not implemented!\n");
break;
case GET_VALUE_BY_CLASS:
iriap_do_call_event(self, IAP_RECV_F_LST, NULL);
@@ -885,7 +860,7 @@ static int iriap_data_indication(void *instance, void *sap,
iriap_getvaluebyclass_confirm(self, skb);
break;
case IAS_CLASS_UNKNOWN:
- IRDA_DEBUG(1, "%s(), No such class!\n", __func__);
+ pr_debug("%s(), No such class!\n", __func__);
/* Finished, close connection! */
iriap_disconnect_request(self);
@@ -898,7 +873,7 @@ static int iriap_data_indication(void *instance, void *sap,
self->priv);
break;
case IAS_ATTRIB_UNKNOWN:
- IRDA_DEBUG(1, "%s(), No such attribute!\n", __func__);
+ pr_debug("%s(), No such attribute!\n", __func__);
/* Finished, close connection! */
iriap_disconnect_request(self);
@@ -913,8 +888,8 @@ static int iriap_data_indication(void *instance, void *sap,
}
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __func__,
- opcode);
+ pr_debug("%s(), Unknown op-code: %02x\n", __func__,
+ opcode);
break;
}
@@ -935,8 +910,6 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
__u8 *fp;
__u8 opcode;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -945,16 +918,16 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
opcode = fp[0];
if (~opcode & 0x80) {
- IRDA_WARNING("%s: IrIAS multiframe commands or results "
- "is not implemented yet!\n", __func__);
+ net_warn_ratelimited("%s: IrIAS multiframe commands or results is not implemented yet!\n",
+ __func__);
return;
}
opcode &= 0x7f; /* Mask away LST bit */
switch (opcode) {
case GET_INFO_BASE:
- IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n",
- __func__);
+ net_warn_ratelimited("%s: GetInfoBaseDetails not implemented yet!\n",
+ __func__);
break;
case GET_VALUE_BY_CLASS:
iriap_getvaluebyclass_indication(self, skb);
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index 703774e29e32..e6098b2e048a 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -187,7 +187,7 @@ static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
case IAP_LM_DISCONNECT_INDICATION:
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
break;
}
}
@@ -219,7 +219,7 @@ static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event,
iriap_next_client_state(self, S_DISCONNECT);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
break;
}
}
@@ -243,7 +243,7 @@ static void state_s_call(struct iriap_cb *self, IRIAP_EVENT event,
iriap_next_client_state(self, S_DISCONNECT);
break;
default:
- IRDA_DEBUG(0, "state_s_call: Unknown event %d\n", event);
+ pr_debug("state_s_call: Unknown event %d\n", event);
break;
}
}
@@ -271,7 +271,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event,
iriap_next_call_state(self, S_OUTSTANDING);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
break;
}
}
@@ -285,7 +285,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event,
static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
+ pr_debug("%s(), Not implemented\n", __func__);
}
/*
@@ -307,7 +307,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event,
iriap_next_call_state(self, S_WAIT_FOR_CALL);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event);
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
break;
}
}
@@ -320,7 +320,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event,
static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
+ pr_debug("%s(), Not implemented\n", __func__);
}
/*
@@ -332,7 +332,7 @@ static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event,
static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
+ pr_debug("%s(), Not implemented\n", __func__);
}
@@ -345,7 +345,7 @@ static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event,
static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
+ pr_debug("%s(), Not implemented\n", __func__);
}
/**************************************************************************
@@ -368,10 +368,8 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
switch (event) {
case IAP_LM_CONNECT_INDICATION:
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
- if (tx_skb == NULL) {
- IRDA_WARNING("%s: unable to malloc!\n", __func__);
+ if (tx_skb == NULL)
return;
- }
/* Reserve space for MUX_CONTROL and LAP header */
skb_reserve(tx_skb, LMP_MAX_HEADER);
@@ -388,7 +386,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
iriap_next_r_connect_state(self, R_RECEIVING);
break;
default:
- IRDA_DEBUG(0, "%s(), unknown event %d\n", __func__, event);
+ pr_debug("%s(), unknown event %d\n", __func__, event);
break;
}
}
@@ -399,8 +397,6 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event,
static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
switch (event) {
case IAP_LM_DISCONNECT_INDICATION:
/* Abort call */
@@ -408,7 +404,7 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
iriap_next_r_connect_state(self, R_WAITING);
break;
default:
- IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
+ pr_debug("%s(), unknown event!\n", __func__);
break;
}
}
@@ -423,13 +419,13 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event,
static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
+ pr_debug("%s(), Not implemented\n", __func__);
}
static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(0, "%s(), Not implemented\n", __func__);
+ pr_debug("%s(), Not implemented\n", __func__);
}
/*
@@ -441,8 +437,6 @@ static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event,
static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
switch (event) {
case IAP_RECV_F_LST:
iriap_next_r_connect_state(self, R_EXECUTE);
@@ -450,7 +444,7 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event,
iriap_call_indication(self, skb);
break;
default:
- IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
+ pr_debug("%s(), unknown event!\n", __func__);
break;
}
}
@@ -464,8 +458,6 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event,
static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -485,7 +477,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event,
irlmp_data_request(self->lsap, skb);
break;
default:
- IRDA_DEBUG(0, "%s(), unknown event!\n", __func__);
+ pr_debug("%s(), unknown event!\n", __func__);
break;
}
}
@@ -493,7 +485,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event,
static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(0, "%s(), event=%d\n", __func__, event);
+ pr_debug("%s(), event=%d\n", __func__, event);
switch (event) {
case IAP_RECV_F_LST:
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index f07ed9fd5792..53b86d0e1630 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -48,20 +48,18 @@ struct ias_object *irias_new_object( char *name, int id)
{
struct ias_object *obj;
- IRDA_DEBUG( 4, "%s()\n", __func__);
-
obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
if (obj == NULL) {
- IRDA_WARNING("%s(), Unable to allocate object!\n",
- __func__);
+ net_warn_ratelimited("%s(), Unable to allocate object!\n",
+ __func__);
return NULL;
}
obj->magic = IAS_OBJECT_MAGIC;
obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC);
if (!obj->name) {
- IRDA_WARNING("%s(), Unable to allocate name!\n",
- __func__);
+ net_warn_ratelimited("%s(), Unable to allocate name!\n",
+ __func__);
kfree(obj);
return NULL;
}
@@ -73,8 +71,8 @@ struct ias_object *irias_new_object( char *name, int id)
obj->attribs = hashbin_new(HB_LOCK);
if (obj->attribs == NULL) {
- IRDA_WARNING("%s(), Unable to allocate attribs!\n",
- __func__);
+ net_warn_ratelimited("%s(), Unable to allocate attribs!\n",
+ __func__);
kfree(obj->name);
kfree(obj);
return NULL;
@@ -134,8 +132,8 @@ int irias_delete_object(struct ias_object *obj)
/* Remove from list */
node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj);
if (!node)
- IRDA_DEBUG( 0, "%s(), object already removed!\n",
- __func__);
+ pr_debug("%s(), object already removed!\n",
+ __func__);
/* Destroy */
__irias_delete_object(obj);
@@ -269,8 +267,8 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name,
/* Find object */
obj = hashbin_lock_find(irias_objects, 0, obj_name);
if (obj == NULL) {
- IRDA_WARNING("%s: Unable to find object: %s\n", __func__,
- obj_name);
+ net_warn_ratelimited("%s: Unable to find object: %s\n",
+ __func__, obj_name);
return -1;
}
@@ -280,15 +278,15 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name,
/* Find attribute */
attrib = hashbin_find(obj->attribs, 0, attrib_name);
if (attrib == NULL) {
- IRDA_WARNING("%s: Unable to find attribute: %s\n",
- __func__, attrib_name);
+ net_warn_ratelimited("%s: Unable to find attribute: %s\n",
+ __func__, attrib_name);
spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
return -1;
}
if ( attrib->value->type != new_value->type) {
- IRDA_DEBUG( 0, "%s(), changing value type not allowed!\n",
- __func__);
+ pr_debug("%s(), changing value type not allowed!\n",
+ __func__);
spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
return -1;
}
@@ -322,8 +320,8 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
- IRDA_WARNING("%s: Unable to allocate attribute!\n",
- __func__);
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
return;
}
@@ -333,8 +331,8 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
/* Insert value */
attrib->value = irias_new_integer_value(value);
if (!attrib->name || !attrib->value) {
- IRDA_WARNING("%s: Unable to allocate attribute!\n",
- __func__);
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
if (attrib->value)
irias_delete_value(attrib->value);
kfree(attrib->name);
@@ -366,8 +364,8 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
- IRDA_WARNING("%s: Unable to allocate attribute!\n",
- __func__);
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
return;
}
@@ -376,8 +374,8 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
attrib->value = irias_new_octseq_value( octets, len);
if (!attrib->name || !attrib->value) {
- IRDA_WARNING("%s: Unable to allocate attribute!\n",
- __func__);
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
if (attrib->value)
irias_delete_value(attrib->value);
kfree(attrib->name);
@@ -408,8 +406,8 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
- IRDA_WARNING("%s: Unable to allocate attribute!\n",
- __func__);
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
return;
}
@@ -418,8 +416,8 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
attrib->value = irias_new_string_value(value);
if (!attrib->name || !attrib->value) {
- IRDA_WARNING("%s: Unable to allocate attribute!\n",
- __func__);
+ net_warn_ratelimited("%s: Unable to allocate attribute!\n",
+ __func__);
if (attrib->value)
irias_delete_value(attrib->value);
kfree(attrib->name);
@@ -442,10 +440,8 @@ struct ias_value *irias_new_integer_value(int integer)
struct ias_value *value;
value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
- if (value == NULL) {
- IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
+ if (value == NULL)
return NULL;
- }
value->type = IAS_INTEGER;
value->len = 4;
@@ -467,16 +463,14 @@ struct ias_value *irias_new_string_value(char *string)
struct ias_value *value;
value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
- if (value == NULL) {
- IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
+ if (value == NULL)
return NULL;
- }
value->type = IAS_STRING;
value->charset = CS_ASCII;
value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC);
if (!value->t.string) {
- IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
+ net_warn_ratelimited("%s: Unable to kmalloc!\n", __func__);
kfree(value);
return NULL;
}
@@ -498,10 +492,8 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
struct ias_value *value;
value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
- if (value == NULL) {
- IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
+ if (value == NULL)
return NULL;
- }
value->type = IAS_OCT_SEQ;
/* Check length */
@@ -511,7 +503,7 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC);
if (value->t.oct_seq == NULL){
- IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
+ net_warn_ratelimited("%s: Unable to kmalloc!\n", __func__);
kfree(value);
return NULL;
}
@@ -523,10 +515,8 @@ struct ias_value *irias_new_missing_value(void)
struct ias_value *value;
value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
- if (value == NULL) {
- IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
+ if (value == NULL)
return NULL;
- }
value->type = IAS_MISSING;
@@ -541,8 +531,6 @@ struct ias_value *irias_new_missing_value(void)
*/
void irias_delete_value(struct ias_value *value)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(value != NULL, return;);
switch (value->type) {
@@ -559,7 +547,7 @@ void irias_delete_value(struct ias_value *value)
kfree(value->t.oct_seq);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown value type!\n", __func__);
+ pr_debug("%s(), Unknown value type!\n", __func__);
break;
}
kfree(value);
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index 42cf1390ce9c..c5837a40c78e 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -72,8 +72,6 @@ static void irlan_client_kick_timer_expired(void *data)
{
struct irlan_cb *self = (struct irlan_cb *) data;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -91,8 +89,6 @@ static void irlan_client_kick_timer_expired(void *data)
static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
irda_start_timer(&self->client.kick_timer, timeout, (void *) self,
irlan_client_kick_timer_expired);
}
@@ -105,8 +101,6 @@ static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
*/
void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
{
- IRDA_DEBUG(1, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -117,7 +111,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
if ((self->client.state != IRLAN_IDLE) ||
(self->provider.access_type == ACCESS_DIRECT))
{
- IRDA_DEBUG(0, "%s(), already awake!\n", __func__ );
+ pr_debug("%s(), already awake!\n", __func__);
return;
}
@@ -126,7 +120,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
self->daddr = daddr;
if (self->disconnect_reason == LM_USER_REQUEST) {
- IRDA_DEBUG(0, "%s(), still stopped by user\n", __func__ );
+ pr_debug("%s(), still stopped by user\n", __func__);
return;
}
@@ -153,8 +147,6 @@ void irlan_client_discovery_indication(discinfo_t *discovery,
struct irlan_cb *self;
__u32 saddr, daddr;
- IRDA_DEBUG(1, "%s()\n", __func__ );
-
IRDA_ASSERT(discovery != NULL, return;);
/*
@@ -175,8 +167,8 @@ void irlan_client_discovery_indication(discinfo_t *discovery,
if (self) {
IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;);
- IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __func__ ,
- daddr);
+ pr_debug("%s(), Found instance (%08x)!\n", __func__ ,
+ daddr);
irlan_client_wakeup(self, saddr, daddr);
}
@@ -195,8 +187,6 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
{
struct irlan_cb *self;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
self = instance;
IRDA_ASSERT(self != NULL, return -1;);
@@ -206,7 +196,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb);
/* Ready for a new command */
- IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __func__ );
+ pr_debug("%s(), clearing tx_busy\n", __func__);
self->client.tx_busy = FALSE;
/* Check if we have some queued commands waiting to be sent */
@@ -223,7 +213,7 @@ static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
struct tsap_cb *tsap;
struct sk_buff *skb;
- IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
+ pr_debug("%s(), reason=%d\n", __func__ , reason);
self = instance;
tsap = sap;
@@ -255,8 +245,6 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
struct tsap_cb *tsap;
notify_t notify;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -275,7 +263,7 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
if (!tsap) {
- IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ );
+ pr_debug("%s(), Got no tsap!\n", __func__);
return;
}
self->client.tsap_ctrl = tsap;
@@ -295,8 +283,6 @@ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
{
struct irlan_cb *self;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
self = instance;
IRDA_ASSERT(self != NULL, return;);
@@ -323,34 +309,34 @@ static void print_ret_code(__u8 code)
printk(KERN_INFO "Success\n");
break;
case 1:
- IRDA_WARNING("IrLAN: Insufficient resources\n");
+ net_warn_ratelimited("IrLAN: Insufficient resources\n");
break;
case 2:
- IRDA_WARNING("IrLAN: Invalid command format\n");
+ net_warn_ratelimited("IrLAN: Invalid command format\n");
break;
case 3:
- IRDA_WARNING("IrLAN: Command not supported\n");
+ net_warn_ratelimited("IrLAN: Command not supported\n");
break;
case 4:
- IRDA_WARNING("IrLAN: Parameter not supported\n");
+ net_warn_ratelimited("IrLAN: Parameter not supported\n");
break;
case 5:
- IRDA_WARNING("IrLAN: Value not supported\n");
+ net_warn_ratelimited("IrLAN: Value not supported\n");
break;
case 6:
- IRDA_WARNING("IrLAN: Not open\n");
+ net_warn_ratelimited("IrLAN: Not open\n");
break;
case 7:
- IRDA_WARNING("IrLAN: Authentication required\n");
+ net_warn_ratelimited("IrLAN: Authentication required\n");
break;
case 8:
- IRDA_WARNING("IrLAN: Invalid password\n");
+ net_warn_ratelimited("IrLAN: Invalid password\n");
break;
case 9:
- IRDA_WARNING("IrLAN: Protocol error\n");
+ net_warn_ratelimited("IrLAN: Protocol error\n");
break;
case 255:
- IRDA_WARNING("IrLAN: Asynchronous status\n");
+ net_warn_ratelimited("IrLAN: Asynchronous status\n");
break;
}
}
@@ -374,13 +360,13 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
IRDA_ASSERT(skb != NULL, return;);
- IRDA_DEBUG(4, "%s() skb->len=%d\n", __func__ , (int) skb->len);
+ pr_debug("%s() skb->len=%d\n", __func__ , (int)skb->len);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
if (!skb) {
- IRDA_ERROR("%s(), Got NULL skb!\n", __func__);
+ net_err_ratelimited("%s(), Got NULL skb!\n", __func__);
return;
}
frame = skb->data;
@@ -405,7 +391,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
/* How many parameters? */
count = frame[1];
- IRDA_DEBUG(4, "%s(), got %d parameters\n", __func__ , count);
+ pr_debug("%s(), got %d parameters\n", __func__ , count);
ptr = frame+2;
@@ -413,7 +399,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
for (i=0; i<count;i++) {
ret = irlan_extract_param(ptr, name, value, &val_len);
if (ret < 0) {
- IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ );
+ pr_debug("%s(), IrLAN, Error!\n", __func__);
break;
}
ptr += ret;
@@ -437,7 +423,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
__u8 *bytes;
int i;
- IRDA_DEBUG(4, "%s(), parm=%s\n", __func__ , param);
+ pr_debug("%s(), parm=%s\n", __func__ , param);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -475,13 +461,13 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
else if (strcmp(value, "HOSTED") == 0)
self->client.access_type = ACCESS_HOSTED;
else {
- IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ );
+ pr_debug("%s(), unknown access type!\n", __func__);
}
}
/* IRLAN version */
if (strcmp(param, "IRLAN_VER") == 0) {
- IRDA_DEBUG(4, "IrLAN version %d.%d\n", (__u8) value[0],
- (__u8) value[1]);
+ pr_debug("IrLAN version %d.%d\n", (__u8)value[0],
+ (__u8)value[1]);
self->version[0] = value[0];
self->version[1] = value[1];
@@ -490,37 +476,37 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
/* Which remote TSAP to use for data channel */
if (strcmp(param, "DATA_CHAN") == 0) {
self->dtsap_sel_data = value[0];
- IRDA_DEBUG(4, "Data TSAP = %02x\n", self->dtsap_sel_data);
+ pr_debug("Data TSAP = %02x\n", self->dtsap_sel_data);
return;
}
if (strcmp(param, "CON_ARB") == 0) {
memcpy(&tmp_cpu, value, 2); /* Align value */
le16_to_cpus(&tmp_cpu); /* Convert to host order */
self->client.recv_arb_val = tmp_cpu;
- IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __func__ ,
- self->client.recv_arb_val);
+ pr_debug("%s(), receive arb val=%d\n", __func__ ,
+ self->client.recv_arb_val);
}
if (strcmp(param, "MAX_FRAME") == 0) {
memcpy(&tmp_cpu, value, 2); /* Align value */
le16_to_cpus(&tmp_cpu); /* Convert to host order */
self->client.max_frame = tmp_cpu;
- IRDA_DEBUG(4, "%s(), max frame=%d\n", __func__ ,
- self->client.max_frame);
+ pr_debug("%s(), max frame=%d\n", __func__ ,
+ self->client.max_frame);
}
/* RECONNECT_KEY, in case the link goes down! */
if (strcmp(param, "RECONNECT_KEY") == 0) {
- IRDA_DEBUG(4, "Got reconnect key: ");
+ pr_debug("Got reconnect key: ");
/* for (i = 0; i < val_len; i++) */
/* printk("%02x", value[i]); */
memcpy(self->client.reconnect_key, value, val_len);
self->client.key_len = val_len;
- IRDA_DEBUG(4, "\n");
+ pr_debug("\n");
}
/* FILTER_ENTRY, have we got an ethernet address? */
if (strcmp(param, "FILTER_ENTRY") == 0) {
bytes = value;
- IRDA_DEBUG(4, "Ethernet address = %pM\n", bytes);
+ pr_debug("Ethernet address = %pM\n", bytes);
for (i = 0; i < 6; i++)
self->dev->dev_addr[i] = bytes[i];
}
@@ -537,8 +523,6 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
{
struct irlan_cb *self;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(priv != NULL, return;);
self = priv;
@@ -550,7 +534,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
/* Check if request succeeded */
if (result != IAS_SUCCESS) {
- IRDA_DEBUG(2, "%s(), got NULL value!\n", __func__ );
+ pr_debug("%s(), got NULL value!\n", __func__);
irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL,
NULL);
return;
@@ -568,7 +552,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
irias_delete_value(value);
break;
default:
- IRDA_DEBUG(2, "%s(), unknown type!\n", __func__ );
+ pr_debug("%s(), unknown type!\n", __func__);
break;
}
irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL);
diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c
index 8d5a8ebc444f..cc93fabbbb19 100644
--- a/net/irda/irlan/irlan_client_event.c
+++ b/net/irda/irlan/irlan_client_event.c
@@ -92,16 +92,14 @@ void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
switch (event) {
case IRLAN_DISCOVERY_INDICATION:
if (self->client.iriap) {
- IRDA_WARNING("%s(), busy with a previous query\n",
- __func__);
+ net_warn_ratelimited("%s(), busy with a previous query\n",
+ __func__);
return -EBUSY;
}
@@ -114,10 +112,10 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
"IrLAN", "IrDA:TinyTP:LsapSel");
break;
case IRLAN_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
break;
default:
- IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -136,8 +134,6 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -154,7 +150,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_CONN);
break;
case IRLAN_IAS_PROVIDER_NOT_AVAIL:
- IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__ );
+ pr_debug("%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__);
irlan_next_client_state(self, IRLAN_IDLE);
/* Give the client a kick! */
@@ -167,10 +163,10 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
break;
case IRLAN_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -189,8 +185,6 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch (event) {
@@ -204,10 +198,10 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
break;
case IRLAN_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -224,8 +218,6 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch (event) {
@@ -244,10 +236,10 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
break;
case IRLAN_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -266,8 +258,6 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch(event) {
@@ -281,10 +271,10 @@ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
break;
case IRLAN_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -305,8 +295,6 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
{
struct qos_info qos;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch(event) {
@@ -344,7 +332,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_DATA);
break;
default:
- IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ );
+ pr_debug("%s(), unknown access type!\n", __func__);
break;
}
break;
@@ -353,10 +341,10 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
break;
case IRLAN_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
@@ -376,8 +364,6 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch(event) {
@@ -390,10 +376,10 @@ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
break;
case IRLAN_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -407,8 +393,6 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
{
struct qos_info qos;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch(event) {
@@ -429,7 +413,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
} else if (self->client.recv_arb_val >
self->provider.send_arb_val)
{
- IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __func__ );
+ pr_debug("%s(), lost the battle :-(\n", __func__);
}
break;
case IRLAN_DATA_CONNECT_INDICATION:
@@ -440,10 +424,10 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
break;
case IRLAN_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ );
+ pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -462,8 +446,6 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -476,7 +458,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_client_state(self, IRLAN_IDLE);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -494,8 +476,6 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if (skb)
dev_kfree_skb(skb);
@@ -511,8 +491,6 @@ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if (skb)
dev_kfree_skb(skb);
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 5a2d0a695529..481bbc2a4349 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -118,8 +118,6 @@ static int __init irlan_init(void)
struct irlan_cb *new;
__u16 hints;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
#ifdef CONFIG_PROC_FS
{ struct proc_dir_entry *proc;
proc = proc_create("irlan", 0, proc_irda, &irlan_fops);
@@ -130,7 +128,6 @@ static int __init irlan_init(void)
}
#endif /* CONFIG_PROC_FS */
- IRDA_DEBUG(4, "%s()\n", __func__ );
hints = irlmp_service_to_hint(S_LAN);
/* Register with IrLMP as a client */
@@ -173,8 +170,6 @@ static void __exit irlan_cleanup(void)
{
struct irlan_cb *self, *next;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
irlmp_unregister_client(ckey);
irlmp_unregister_service(skey);
@@ -201,8 +196,6 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr)
struct net_device *dev;
struct irlan_cb *self;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Create network device with irlan */
dev = alloc_irlandev(eth ? "eth%d" : "irlan%d");
if (!dev)
@@ -245,8 +238,8 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr)
irlan_next_provider_state(self, IRLAN_IDLE);
if (register_netdev(dev)) {
- IRDA_DEBUG(2, "%s(), register_netdev() failed!\n",
- __func__ );
+ pr_debug("%s(), register_netdev() failed!\n",
+ __func__);
self = NULL;
free_netdev(dev);
} else {
@@ -266,8 +259,6 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr)
*/
static void __irlan_close(struct irlan_cb *self)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
ASSERT_RTNL();
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -314,8 +305,6 @@ static void irlan_connect_indication(void *instance, void *sap,
struct irlan_cb *self;
struct tsap_cb *tsap;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
self = instance;
tsap = sap;
@@ -326,7 +315,7 @@ static void irlan_connect_indication(void *instance, void *sap,
self->max_sdu_size = max_sdu_size;
self->max_header_size = max_header_size;
- IRDA_DEBUG(0, "%s: We are now connected!\n", __func__);
+ pr_debug("%s: We are now connected!\n", __func__);
del_timer(&self->watchdog_timer);
@@ -370,7 +359,7 @@ static void irlan_connect_confirm(void *instance, void *sap,
/* TODO: we could set the MTU depending on the max_sdu_size */
- IRDA_DEBUG(0, "%s: We are now connected!\n", __func__);
+ pr_debug("%s: We are now connected!\n", __func__);
del_timer(&self->watchdog_timer);
/*
@@ -403,7 +392,7 @@ static void irlan_disconnect_indication(void *instance,
struct irlan_cb *self;
struct tsap_cb *tsap;
- IRDA_DEBUG(0, "%s(), reason=%d\n", __func__ , reason);
+ pr_debug("%s(), reason=%d\n", __func__ , reason);
self = instance;
tsap = sap;
@@ -415,29 +404,30 @@ static void irlan_disconnect_indication(void *instance,
IRDA_ASSERT(tsap == self->tsap_data, return;);
- IRDA_DEBUG(2, "IrLAN, data channel disconnected by peer!\n");
+ pr_debug("IrLAN, data channel disconnected by peer!\n");
/* Save reason so we know if we should try to reconnect or not */
self->disconnect_reason = reason;
switch (reason) {
case LM_USER_REQUEST: /* User request */
- IRDA_DEBUG(2, "%s(), User requested\n", __func__ );
+ pr_debug("%s(), User requested\n", __func__);
break;
case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */
- IRDA_DEBUG(2, "%s(), Unexpected IrLAP disconnect\n", __func__ );
+ pr_debug("%s(), Unexpected IrLAP disconnect\n", __func__);
break;
case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */
- IRDA_DEBUG(2, "%s(), IrLAP connect failed\n", __func__ );
+ pr_debug("%s(), IrLAP connect failed\n", __func__);
break;
case LM_LAP_RESET: /* IrLAP reset */
- IRDA_DEBUG(2, "%s(), IrLAP reset\n", __func__ );
+ pr_debug("%s(), IrLAP reset\n", __func__);
break;
case LM_INIT_DISCONNECT:
- IRDA_DEBUG(2, "%s(), IrLMP connect failed\n", __func__ );
+ pr_debug("%s(), IrLMP connect failed\n", __func__);
break;
default:
- IRDA_ERROR("%s(), Unknown disconnect reason\n", __func__);
+ net_err_ratelimited("%s(), Unknown disconnect reason\n",
+ __func__);
break;
}
@@ -459,8 +449,6 @@ void irlan_open_data_tsap(struct irlan_cb *self)
struct tsap_cb *tsap;
notify_t notify;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -481,7 +469,7 @@ void irlan_open_data_tsap(struct irlan_cb *self)
tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
if (!tsap) {
- IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ );
+ pr_debug("%s(), Got no tsap!\n", __func__);
return;
}
self->tsap_data = tsap;
@@ -495,8 +483,6 @@ void irlan_open_data_tsap(struct irlan_cb *self)
void irlan_close_tsaps(struct irlan_cb *self)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -585,8 +571,6 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
{
struct sk_buff *skb;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
if (irda_lock(&self->client.tx_busy) == FALSE)
return -EBUSY;
@@ -604,7 +588,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
dev_kfree_skb(skb);
return -1;
}
- IRDA_DEBUG(2, "%s(), sending ...\n", __func__ );
+ pr_debug("%s(), sending ...\n", __func__);
return irttp_data_request(self->client.tsap_ctrl, skb);
}
@@ -617,8 +601,6 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
*/
static void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Queue command */
skb_queue_tail(&self->client.txq, skb);
@@ -637,8 +619,6 @@ void irlan_get_provider_info(struct irlan_cb *self)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -670,8 +650,6 @@ void irlan_open_data_channel(struct irlan_cb *self)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -705,8 +683,6 @@ void irlan_close_data_channel(struct irlan_cb *self)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -746,8 +722,6 @@ static void irlan_open_unicast_addr(struct irlan_cb *self)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -788,8 +762,6 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -832,8 +804,6 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -877,8 +847,6 @@ static void irlan_get_unicast_addr(struct irlan_cb *self)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -917,8 +885,6 @@ void irlan_get_media_char(struct irlan_cb *self)
struct sk_buff *skb;
__u8 *frame;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -1005,7 +971,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
int n=0;
if (skb == NULL) {
- IRDA_DEBUG(2, "%s(), Got NULL skb\n", __func__ );
+ pr_debug("%s(), Got NULL skb\n", __func__);
return 0;
}
@@ -1022,7 +988,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
IRDA_ASSERT(value_len > 0, return 0;);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __func__ );
+ pr_debug("%s(), Unknown parameter type!\n", __func__);
return 0;
}
@@ -1031,7 +997,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
/* Make space for data */
if (skb_tailroom(skb) < (param_len+value_len+3)) {
- IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __func__ );
+ pr_debug("%s(), No more space at end of skb\n", __func__);
return 0;
}
skb_put(skb, param_len+value_len+3);
@@ -1078,13 +1044,11 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
__u16 val_len;
int n=0;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
/* get length of parameter name (1 byte) */
name_len = buf[n++];
if (name_len > 254) {
- IRDA_DEBUG(2, "%s(), name_len > 254\n", __func__ );
+ pr_debug("%s(), name_len > 254\n", __func__);
return -RSP_INVALID_COMMAND_FORMAT;
}
@@ -1101,7 +1065,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
le16_to_cpus(&val_len); n+=2;
if (val_len >= 1016) {
- IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
+ pr_debug("%s(), parameter length to long\n", __func__);
return -RSP_INVALID_COMMAND_FORMAT;
}
*len = val_len;
@@ -1111,8 +1075,8 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
value[val_len] = '\0';
n+=val_len;
- IRDA_DEBUG(4, "Parameter: %s ", name);
- IRDA_DEBUG(4, "Value: %s\n", value);
+ pr_debug("Parameter: %s ", name);
+ pr_debug("Value: %s\n", value);
return n;
}
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index dc13f1a45f2f..fcfbe579434a 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -110,8 +110,6 @@ static int irlan_eth_open(struct net_device *dev)
{
struct irlan_cb *self = netdev_priv(dev);
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Ready to play! */
netif_stop_queue(dev); /* Wait until data link is ready */
@@ -137,8 +135,6 @@ static int irlan_eth_close(struct net_device *dev)
{
struct irlan_cb *self = netdev_priv(dev);
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Stop device */
netif_stop_queue(dev);
@@ -231,8 +227,8 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
return 0;
}
if (skb->len < ETH_HLEN) {
- IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
- __func__, skb->len);
+ pr_debug("%s() : IrLAN frame too short (%d)\n",
+ __func__, skb->len);
dev->stats.rx_dropped++;
dev_kfree_skb(skb);
return 0;
@@ -281,9 +277,9 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
IRDA_ASSERT(dev != NULL, return;);
- IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__,
- flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
- netif_running(dev));
+ pr_debug("%s() : flow %s ; running %d\n", __func__,
+ flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
+ netif_running(dev));
switch (flow) {
case FLOW_STOP:
@@ -310,32 +306,30 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
{
struct irlan_cb *self = netdev_priv(dev);
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Check if data channel has been connected yet */
if (self->client.state != IRLAN_DATA) {
- IRDA_DEBUG(1, "%s(), delaying!\n", __func__);
+ pr_debug("%s(), delaying!\n", __func__);
return;
}
if (dev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
- IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
+ net_warn_ratelimited("Promiscuous mode not implemented by IrLAN!\n");
} else if ((dev->flags & IFF_ALLMULTI) ||
netdev_mc_count(dev) > HW_MAX_ADDRS) {
/* Disable promiscuous mode, use normal mode. */
- IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__);
+ pr_debug("%s(), Setting multicast filter\n", __func__);
/* hardware_set_filter(NULL); */
irlan_set_multicast_filter(self, TRUE);
} else if (!netdev_mc_empty(dev)) {
- IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__);
+ pr_debug("%s(), Setting multicast filter\n", __func__);
/* Walk the address list, and load the filter */
/* hardware_set_filter(dev->mc_list); */
irlan_set_multicast_filter(self, TRUE);
} else {
- IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__);
+ pr_debug("%s(), Clearing multicast filter\n", __func__);
irlan_set_multicast_filter(self, FALSE);
}
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c
index 43f16040a6fe..9a1cc11c16f6 100644
--- a/net/irda/irlan/irlan_event.c
+++ b/net/irda/irlan/irlan_event.c
@@ -40,7 +40,7 @@ const char * const irlan_state[] = {
void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state)
{
- IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]);
+ pr_debug("%s(), %s\n", __func__ , irlan_state[state]);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -50,7 +50,7 @@ void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state)
void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state)
{
- IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]);
+ pr_debug("%s(), %s\n", __func__ , irlan_state[state]);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c
index 7977be7caf0f..e755e90b2f26 100644
--- a/net/irda/irlan/irlan_filter.c
+++ b/net/irda/irlan/irlan_filter.c
@@ -43,7 +43,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
if ((self->provider.filter_type == IRLAN_DIRECTED) &&
(self->provider.filter_operation == DYNAMIC))
{
- IRDA_DEBUG(0, "Giving peer a dynamic Ethernet address\n");
+ pr_debug("Giving peer a dynamic Ethernet address\n");
self->provider.mac_address[0] = 0x40;
self->provider.mac_address[1] = 0x00;
self->provider.mac_address[2] = 0x00;
@@ -73,7 +73,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
if ((self->provider.filter_type == IRLAN_DIRECTED) &&
(self->provider.filter_mode == FILTER))
{
- IRDA_DEBUG(0, "Directed filter on\n");
+ pr_debug("Directed filter on\n");
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x00;
return;
@@ -81,7 +81,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
if ((self->provider.filter_type == IRLAN_DIRECTED) &&
(self->provider.filter_mode == NONE))
{
- IRDA_DEBUG(0, "Directed filter off\n");
+ pr_debug("Directed filter off\n");
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x00;
return;
@@ -90,7 +90,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
if ((self->provider.filter_type == IRLAN_BROADCAST) &&
(self->provider.filter_mode == FILTER))
{
- IRDA_DEBUG(0, "Broadcast filter on\n");
+ pr_debug("Broadcast filter on\n");
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x00;
return;
@@ -98,7 +98,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
if ((self->provider.filter_type == IRLAN_BROADCAST) &&
(self->provider.filter_mode == NONE))
{
- IRDA_DEBUG(0, "Broadcast filter off\n");
+ pr_debug("Broadcast filter off\n");
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x00;
return;
@@ -106,7 +106,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
if ((self->provider.filter_type == IRLAN_MULTICAST) &&
(self->provider.filter_mode == FILTER))
{
- IRDA_DEBUG(0, "Multicast filter on\n");
+ pr_debug("Multicast filter on\n");
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x00;
return;
@@ -114,7 +114,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
if ((self->provider.filter_type == IRLAN_MULTICAST) &&
(self->provider.filter_mode == NONE))
{
- IRDA_DEBUG(0, "Multicast filter off\n");
+ pr_debug("Multicast filter off\n");
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x00;
return;
@@ -122,7 +122,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
if ((self->provider.filter_type == IRLAN_MULTICAST) &&
(self->provider.filter_operation == GET))
{
- IRDA_DEBUG(0, "Multicast filter get\n");
+ pr_debug("Multicast filter get\n");
skb->data[0] = 0x00; /* Success? */
skb->data[1] = 0x02;
irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
@@ -132,7 +132,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
skb->data[0] = 0x00; /* Command not supported */
skb->data[1] = 0x00;
- IRDA_DEBUG(0, "Not implemented!\n");
+ pr_debug("Not implemented!\n");
}
/*
@@ -143,18 +143,15 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
*/
void irlan_check_command_param(struct irlan_cb *self, char *param, char *value)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
- IRDA_DEBUG(4, "%s, %s\n", param, value);
+ pr_debug("%s, %s\n", param, value);
/*
* This is experimental!! DB.
*/
if (strcmp(param, "MODE") == 0) {
- IRDA_DEBUG(0, "%s()\n", __func__ );
self->use_udata = TRUE;
return;
}
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 4664855222f4..15c292cf2644 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -70,8 +70,6 @@ static int irlan_provider_data_indication(void *instance, void *sap,
struct irlan_cb *self;
__u8 code;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
self = instance;
IRDA_ASSERT(self != NULL, return -1;);
@@ -82,32 +80,32 @@ static int irlan_provider_data_indication(void *instance, void *sap,
code = skb->data[0];
switch(code) {
case CMD_GET_PROVIDER_INFO:
- IRDA_DEBUG(4, "Got GET_PROVIDER_INFO command!\n");
+ pr_debug("Got GET_PROVIDER_INFO command!\n");
irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb);
break;
case CMD_GET_MEDIA_CHAR:
- IRDA_DEBUG(4, "Got GET_MEDIA_CHAR command!\n");
+ pr_debug("Got GET_MEDIA_CHAR command!\n");
irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb);
break;
case CMD_OPEN_DATA_CHANNEL:
- IRDA_DEBUG(4, "Got OPEN_DATA_CHANNEL command!\n");
+ pr_debug("Got OPEN_DATA_CHANNEL command!\n");
irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb);
break;
case CMD_FILTER_OPERATION:
- IRDA_DEBUG(4, "Got FILTER_OPERATION command!\n");
+ pr_debug("Got FILTER_OPERATION command!\n");
irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb);
break;
case CMD_RECONNECT_DATA_CHAN:
- IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __func__ );
- IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ );
+ pr_debug("%s(), Got RECONNECT_DATA_CHAN command\n", __func__);
+ pr_debug("%s(), NOT IMPLEMENTED\n", __func__);
break;
case CMD_CLOSE_DATA_CHAN:
- IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n");
- IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ );
+ pr_debug("Got CLOSE_DATA_CHAN command!\n");
+ pr_debug("%s(), NOT IMPLEMENTED\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ );
+ pr_debug("%s(), Unknown command!\n", __func__);
break;
}
return 0;
@@ -128,8 +126,6 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
struct irlan_cb *self;
struct tsap_cb *tsap;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
self = instance;
tsap = sap;
@@ -179,7 +175,7 @@ static void irlan_provider_disconnect_indication(void *instance, void *sap,
struct irlan_cb *self;
struct tsap_cb *tsap;
- IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
+ pr_debug("%s(), reason=%d\n", __func__ , reason);
self = instance;
tsap = sap;
@@ -233,7 +229,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;);
- IRDA_DEBUG(4, "%s(), skb->len=%d\n", __func__ , (int)skb->len);
+ pr_debug("%s(), skb->len=%d\n", __func__ , (int)skb->len);
IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;);
@@ -255,7 +251,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
/* How many parameters? */
count = frame[1];
- IRDA_DEBUG(4, "Got %d parameters\n", count);
+ pr_debug("Got %d parameters\n", count);
ptr = frame+2;
@@ -263,7 +259,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
for (i=0; i<count;i++) {
ret = irlan_extract_param(ptr, name, value, &val_len);
if (ret < 0) {
- IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ );
+ pr_debug("%s(), IrLAN, Error!\n", __func__);
break;
}
ptr+=ret;
@@ -288,8 +284,6 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
{
struct sk_buff *skb;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -320,7 +314,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
irlan_insert_string_param(skb, "MEDIA", "802.5");
break;
default:
- IRDA_DEBUG(2, "%s(), unknown media type!\n", __func__ );
+ pr_debug("%s(), unknown media type!\n", __func__);
break;
}
irlan_insert_short_param(skb, "IRLAN_VER", 0x0101);
@@ -344,7 +338,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED");
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown access type\n", __func__ );
+ pr_debug("%s(), Unknown access type\n", __func__);
break;
}
irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee);
@@ -364,7 +358,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
irlan_filter_request(self, skb);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ );
+ pr_debug("%s(), Unknown command!\n", __func__);
break;
}
@@ -382,8 +376,6 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
struct tsap_cb *tsap;
notify_t notify;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -403,7 +395,7 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
tsap = irttp_open_tsap(LSAP_ANY, 1, &notify);
if (!tsap) {
- IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ );
+ pr_debug("%s(), Got no tsap!\n", __func__);
return -1;
}
self->provider.tsap_ctrl = tsap;
diff --git a/net/irda/irlan/irlan_provider_event.c b/net/irda/irlan/irlan_provider_event.c
index 01a9d7c993ee..9c4f7f51d6b5 100644
--- a/net/irda/irlan/irlan_provider_event.c
+++ b/net/irda/irlan/irlan_provider_event.c
@@ -72,8 +72,6 @@ void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch(event) {
@@ -82,7 +80,7 @@ static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_provider_state( self, IRLAN_INFO);
break;
default:
- IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -101,8 +99,6 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
{
int ret;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch(event) {
@@ -147,7 +143,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_provider_state(self, IRLAN_IDLE);
break;
default:
- IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -166,8 +162,6 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
switch(event) {
@@ -186,7 +180,7 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_provider_state(self, IRLAN_IDLE);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
@@ -205,8 +199,6 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -221,7 +213,7 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
irlan_next_provider_state(self, IRLAN_IDLE);
break;
default:
- IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event);
+ pr_debug("%s(), Unknown event %d\n", __func__ , event);
break;
}
if (skb)
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index a778df55f5d6..7f2cafddfb6e 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -60,8 +60,7 @@ static void __irlap_close(struct irlap_cb *self);
static void irlap_init_qos_capabilities(struct irlap_cb *self,
struct qos_info *qos_user);
-#ifdef CONFIG_IRDA_DEBUG
-static const char *const lap_reasons[] = {
+static const char *const lap_reasons[] __maybe_unused = {
"ERROR, NOT USED",
"LAP_DISC_INDICATION",
"LAP_NO_RESPONSE",
@@ -71,7 +70,6 @@ static const char *const lap_reasons[] = {
"LAP_PRIMARY_CONFLICT",
"ERROR, NOT USED",
};
-#endif /* CONFIG_IRDA_DEBUG */
int __init irlap_init(void)
{
@@ -85,8 +83,8 @@ int __init irlap_init(void)
/* Allocate master array */
irlap = hashbin_new(HB_LOCK);
if (irlap == NULL) {
- IRDA_ERROR("%s: can't allocate irlap hashbin!\n",
- __func__);
+ net_err_ratelimited("%s: can't allocate irlap hashbin!\n",
+ __func__);
return -ENOMEM;
}
@@ -111,8 +109,6 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
{
struct irlap_cb *self;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
/* Initialize the irlap structure. */
self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL);
if (self == NULL)
@@ -213,8 +209,6 @@ void irlap_close(struct irlap_cb *self)
{
struct irlap_cb *lap;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -229,7 +223,7 @@ void irlap_close(struct irlap_cb *self)
/* Be sure that we manage to remove ourself from the hash */
lap = hashbin_remove(irlap, self->saddr, NULL);
if (!lap) {
- IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__);
+ pr_debug("%s(), Didn't find myself!\n", __func__);
return;
}
__irlap_close(lap);
@@ -244,8 +238,6 @@ EXPORT_SYMBOL(irlap_close);
*/
void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -263,8 +255,6 @@ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
*/
void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL);
}
@@ -278,7 +268,7 @@ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata)
void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
struct qos_info *qos_user, int sniff)
{
- IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr);
+ pr_debug("%s(), daddr=0x%08x\n", __func__, daddr);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -305,8 +295,6 @@ void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
*/
void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -342,8 +330,6 @@ void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
return;);
skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
@@ -389,8 +375,6 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
return;);
skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
@@ -415,8 +399,6 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb)
#ifdef CONFIG_IRDA_ULTRA
void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -435,8 +417,6 @@ void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb)
*/
void irlap_disconnect_request(struct irlap_cb *self)
{
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -456,7 +436,7 @@ void irlap_disconnect_request(struct irlap_cb *self)
irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
break;
default:
- IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__);
+ pr_debug("%s(), disconnect pending!\n", __func__);
self->disconnect_pending = TRUE;
break;
}
@@ -470,7 +450,7 @@ void irlap_disconnect_request(struct irlap_cb *self)
*/
void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
{
- IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]);
+ pr_debug("%s(), reason=%s\n", __func__, lap_reasons[reason]);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -480,7 +460,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
switch (reason) {
case LAP_RESET_INDICATION:
- IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
+ pr_debug("%s(), Sending reset request!\n", __func__);
irlap_do_event(self, RESET_REQUEST, NULL, NULL);
break;
case LAP_NO_RESPONSE: /* FALLTHROUGH */
@@ -491,7 +471,8 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
reason, NULL);
break;
default:
- IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason);
+ net_err_ratelimited("%s: Unknown reason %d\n",
+ __func__, reason);
}
}
@@ -509,7 +490,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
IRDA_ASSERT(discovery != NULL, return;);
- IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots);
+ pr_debug("%s(), nslots = %d\n", __func__, discovery->nslots);
IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
(discovery->nslots == 8) || (discovery->nslots == 16),
@@ -517,8 +498,8 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
/* Discovery is only possible in NDM mode */
if (self->state != LAP_NDM) {
- IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n",
- __func__);
+ pr_debug("%s(), discovery only possible in NDM mode\n",
+ __func__);
irlap_discovery_confirm(self, NULL);
/* Note : in theory, if we are not in NDM, we could postpone
* the discovery like we do for connection request.
@@ -540,8 +521,8 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
self->discovery_log = hashbin_new(HB_NOLOCK);
if (self->discovery_log == NULL) {
- IRDA_WARNING("%s(), Unable to allocate discovery log!\n",
- __func__);
+ net_warn_ratelimited("%s(), Unable to allocate discovery log!\n",
+ __func__);
return;
}
@@ -596,8 +577,6 @@ void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
*/
void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
IRDA_ASSERT(discovery != NULL, return;);
@@ -625,10 +604,10 @@ void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
{
switch (quality_of_link) {
case STATUS_NO_ACTIVITY:
- IRDA_MESSAGE("IrLAP, no activity on link!\n");
+ net_info_ratelimited("IrLAP, no activity on link!\n");
break;
case STATUS_NOISY:
- IRDA_MESSAGE("IrLAP, noisy link!\n");
+ net_info_ratelimited("IrLAP, noisy link!\n");
break;
default:
break;
@@ -642,8 +621,6 @@ void irlap_status_indication(struct irlap_cb *self, int quality_of_link)
*/
void irlap_reset_indication(struct irlap_cb *self)
{
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -658,7 +635,6 @@ void irlap_reset_indication(struct irlap_cb *self)
*/
void irlap_reset_confirm(void)
{
- IRDA_DEBUG(1, "%s()\n", __func__);
}
/*
@@ -758,7 +734,7 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr)
{
/* nr as expected? */
if (nr == self->vs) {
- IRDA_DEBUG(4, "%s(), expected!\n", __func__);
+ pr_debug("%s(), expected!\n", __func__);
return NR_EXPECTED;
}
@@ -786,8 +762,6 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr)
*/
void irlap_initiate_connection_state(struct irlap_cb *self)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -869,7 +843,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
{
struct sk_buff *skb;
- IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed);
+ pr_debug("%s(), setting speed to %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -912,7 +886,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self,
* user may not have set all of them.
*/
if (qos_user) {
- IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__);
+ pr_debug("%s(), Found user specified QoS!\n", __func__);
if (qos_user->baud_rate.bits)
self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
@@ -942,8 +916,6 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self,
*/
void irlap_apply_default_connection_parameters(struct irlap_cb *self)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -1005,8 +977,6 @@ void irlap_apply_default_connection_parameters(struct irlap_cb *self)
*/
void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -1085,12 +1055,12 @@ void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
self->N1 = sysctl_warn_noreply_time * 1000 /
self->qos_rx.max_turn_time.value;
- IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1);
+ pr_debug("Setting N1 = %d\n", self->N1);
/* Set N2 to match our own disconnect time */
self->N2 = self->qos_tx.link_disc_time.value * 1000 /
self->qos_rx.max_turn_time.value;
- IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2);
+ pr_debug("Setting N2 = %d\n", self->N2);
}
#ifdef CONFIG_PROC_FS
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index ccd214f9d196..0e1b4d79f745 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -78,8 +78,7 @@ static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event,
static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event,
struct sk_buff *, struct irlap_info *);
-#ifdef CONFIG_IRDA_DEBUG
-static const char *const irlap_event[] = {
+static const char *const irlap_event[] __maybe_unused = {
"DISCOVERY_REQUEST",
"CONNECT_REQUEST",
"CONNECT_RESPONSE",
@@ -119,7 +118,6 @@ static const char *const irlap_event[] = {
"BACKOFF_TIMER_EXPIRED",
"MEDIA_BUSY_TIMER_EXPIRED",
};
-#endif /* CONFIG_IRDA_DEBUG */
const char *const irlap_state[] = {
"LAP_NDM",
@@ -218,7 +216,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
} else
self->fast_RR = FALSE;
- IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies);
+ pr_debug("%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies);
#endif /* CONFIG_IRDA_FAST_RR */
if (timeout == 0)
@@ -242,8 +240,8 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
if (!self || self->magic != LAP_MAGIC)
return;
- IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __func__,
- irlap_event[event], irlap_state[self->state]);
+ pr_debug("%s(), event = %s, state = %s\n", __func__,
+ irlap_event[event], irlap_state[self->state]);
ret = (*state[self->state])(self, event, skb, info);
@@ -260,8 +258,8 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
* try to disconnect link if we send any data frames, since
* that will change the state away form XMIT
*/
- IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
- skb_queue_len(&self->txq));
+ pr_debug("%s() : queue len = %d\n", __func__,
+ skb_queue_len(&self->txq));
if (!skb_queue_empty(&self->txq)) {
/* Prevent race conditions with irlap_data_request() */
@@ -340,8 +338,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
/* Note : this will never happen, because we test
* media busy in irlap_connect_request() and
* postpone the event... - Jean II */
- IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n",
- __func__);
+ pr_debug("%s(), CONNECT_REQUEST: media busy!\n",
+ __func__);
/* Always switch state before calling upper layers */
irlap_next_state(self, LAP_NDM);
@@ -367,16 +365,16 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
irlap_connect_indication(self, skb);
} else {
- IRDA_DEBUG(0, "%s(), SNRM frame does not "
- "contain an I field!\n", __func__);
+ pr_debug("%s(), SNRM frame does not contain an I field!\n",
+ __func__);
}
break;
case DISCOVERY_REQUEST:
IRDA_ASSERT(info != NULL, return -1;);
if (self->media_busy) {
- IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n",
- __func__);
+ pr_debug("%s(), DISCOVERY_REQUEST: media busy!\n",
+ __func__);
/* irlap->log.condition = MEDIA_BUSY; */
/* This will make IrLMP try again */
@@ -442,7 +440,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
* those cases...
* Jean II
*/
- IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __func__);
+ pr_debug("%s(), Receiving final discovery request, missed the discovery slots :-(\n",
+ __func__);
/* Last discovery request -> in the log */
irlap_discovery_indication(self, info->discovery);
@@ -520,8 +519,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
case RECV_UI_FRAME:
/* Only accept broadcast frames in NDM mode */
if (info->caddr != CBROADCAST) {
- IRDA_DEBUG(0, "%s(), not a broadcast frame!\n",
- __func__);
+ pr_debug("%s(), not a broadcast frame!\n",
+ __func__);
} else
irlap_unitdata_indication(self, skb);
break;
@@ -537,11 +536,11 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
irlap_send_test_frame(self, CBROADCAST, info->daddr, skb);
break;
case RECV_TEST_RSP:
- IRDA_DEBUG(0, "%s() not implemented!\n", __func__);
+ pr_debug("%s() not implemented!\n", __func__);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
- irlap_event[event]);
+ pr_debug("%s(), Unknown event %s\n", __func__,
+ irlap_event[event]);
ret = -1;
break;
@@ -568,13 +567,12 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
IRDA_ASSERT(info != NULL, return -1;);
IRDA_ASSERT(info->discovery != NULL, return -1;);
- IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__,
- info->discovery->data.daddr);
+ pr_debug("%s(), daddr=%08x\n", __func__,
+ info->discovery->data.daddr);
if (!self->discovery_log) {
- IRDA_WARNING("%s: discovery log is gone! "
- "maybe the discovery timeout has been set"
- " too short?\n", __func__);
+ net_warn_ratelimited("%s: discovery log is gone! maybe the discovery timeout has been set too short?\n",
+ __func__);
break;
}
hashbin_insert(self->discovery_log,
@@ -599,7 +597,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
IRDA_ASSERT(info != NULL, return -1;);
- IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __func__, info->s);
+ pr_debug("%s(), Receiving discovery request (s = %d) while performing discovery :-(\n",
+ __func__, info->s);
/* Last discovery request ? */
if (info->s == 0xff)
@@ -613,8 +612,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
* timing requirements.
*/
if (irda_device_is_receiving(self->netdev) && !self->add_wait) {
- IRDA_DEBUG(2, "%s(), device is slow to answer, "
- "waiting some more!\n", __func__);
+ pr_debug("%s(), device is slow to answer, waiting some more!\n",
+ __func__);
irlap_start_slot_timer(self, msecs_to_jiffies(10));
self->add_wait = TRUE;
return ret;
@@ -650,8 +649,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event,
}
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
- irlap_event[event]);
+ pr_debug("%s(), Unknown event %s\n", __func__,
+ irlap_event[event]);
ret = -1;
break;
@@ -672,15 +671,13 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
discovery_t *discovery_rsp;
int ret=0;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
switch (event) {
case QUERY_TIMER_EXPIRED:
- IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n",
- __func__, jiffies);
+ pr_debug("%s(), QUERY_TIMER_EXPIRED <%ld>\n",
+ __func__, jiffies);
irlap_next_state(self, LAP_NDM);
break;
case RECV_DISCOVERY_XID_CMD:
@@ -718,8 +715,8 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
}
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
- event, irlap_event[event]);
+ pr_debug("%s(), Unknown event %d, %s\n", __func__,
+ event, irlap_event[event]);
ret = -1;
break;
@@ -739,7 +736,7 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]);
+ pr_debug("%s(), event=%s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -799,20 +796,20 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
break;
case RECV_DISCOVERY_XID_CMD:
- IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n",
- __func__);
+ pr_debug("%s(), event RECV_DISCOVER_XID_CMD!\n",
+ __func__);
irlap_next_state(self, LAP_NDM);
break;
case DISCONNECT_REQUEST:
- IRDA_DEBUG(0, "%s(), Disconnect request!\n", __func__);
+ pr_debug("%s(), Disconnect request!\n", __func__);
irlap_send_dm_frame(self);
irlap_next_state( self, LAP_NDM);
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
- event, irlap_event[event]);
+ pr_debug("%s(), Unknown event %d, %s\n", __func__,
+ event, irlap_event[event]);
ret = -1;
break;
@@ -833,8 +830,6 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -862,7 +857,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
self->retry_count++;
break;
case RECV_SNRM_CMD:
- IRDA_DEBUG(4, "%s(), SNRM battle!\n", __func__);
+ pr_debug("%s(), SNRM battle!\n", __func__);
IRDA_ASSERT(skb != NULL, return 0;);
IRDA_ASSERT(info != NULL, return 0;);
@@ -949,8 +944,8 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
irlap_disconnect_indication(self, LAP_DISC_INDICATION);
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__,
- event, irlap_event[event]);
+ pr_debug("%s(), Unknown event %d, %s\n", __func__,
+ event, irlap_event[event]);
ret = -1;
break;
@@ -967,7 +962,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
- IRDA_DEBUG( 0, "%s(), Unknown event\n", __func__);
+ pr_debug("%s(), Unknown event\n", __func__);
return -1;
}
@@ -1030,8 +1025,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
* speed and turn-around-time.
*/
if((!nextfit) && (skb->len > self->bytes_left)) {
- IRDA_DEBUG(0, "%s(), Not allowed to transmit"
- " more bytes!\n", __func__);
+ pr_debug("%s(), Not allowed to transmit more bytes!\n",
+ __func__);
/* Requeue the skb */
skb_queue_head(&self->txq, skb_get(skb));
/*
@@ -1082,8 +1077,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
self->fast_RR = FALSE;
#endif /* CONFIG_IRDA_FAST_RR */
} else {
- IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n",
- __func__);
+ pr_debug("%s(), Unable to send! remote busy?\n",
+ __func__);
skb_queue_head(&self->txq, skb_get(skb));
/*
@@ -1094,8 +1089,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
}
break;
case POLL_TIMER_EXPIRED:
- IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n",
- __func__, jiffies);
+ pr_debug("%s(), POLL_TIMER_EXPIRED <%ld>\n",
+ __func__, jiffies);
irlap_send_rr_frame(self, CMD_FRAME);
/* Return to NRM properly - Jean II */
self->window = self->window_size;
@@ -1120,8 +1115,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
* when we return... - Jean II */
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
- __func__, irlap_event[event]);
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlap_event[event]);
ret = -EINVAL;
break;
@@ -1139,8 +1134,6 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -1174,7 +1167,7 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event,
}
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %d\n", __func__, event);
+ pr_debug("%s(), Unknown event %d\n", __func__, event);
ret = -1;
break;
@@ -1296,9 +1289,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Keep state */
irlap_next_state(self, LAP_NRM_P);
} else {
- IRDA_DEBUG(4,
- "%s(), missing or duplicate frame!\n",
- __func__);
+ pr_debug("%s(), missing or duplicate frame!\n",
+ __func__);
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
@@ -1367,8 +1359,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
if ((ns_status == NS_UNEXPECTED) &&
(nr_status == NR_UNEXPECTED))
{
- IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n",
- __func__);
+ pr_debug("%s(), unexpected nr and ns!\n",
+ __func__);
if (info->pf) {
/* Resend rejected frames */
irlap_resend_rejected_frames(self, CMD_FRAME);
@@ -1408,9 +1400,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
}
break;
}
- IRDA_DEBUG(1, "%s(), Not implemented!\n", __func__);
- IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n",
- __func__, irlap_event[event], ns_status, nr_status);
+ pr_debug("%s(), Not implemented!\n", __func__);
+ pr_debug("%s(), event=%s, ns_status=%d, nr_status=%d\n",
+ __func__, irlap_event[event], ns_status, nr_status);
break;
case RECV_UI_FRAME:
/* Poll bit cleared? */
@@ -1421,7 +1413,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
del_timer(&self->final_timer);
irlap_data_indication(self, skb, TRUE);
irlap_next_state(self, LAP_XMIT_P);
- IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __func__, irlap_state[self->state]);
+ pr_debug("%s: RECV_UI_FRAME: next state %s\n",
+ __func__, irlap_state[self->state]);
irlap_start_poll_timer(self, self->poll_timeout);
}
break;
@@ -1464,10 +1457,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
- IRDA_DEBUG(4, "RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, "
- "vs=%d, vr=%d\n",
- self->retry_count, info->nr, self->va,
- self->vs, self->vr);
+ pr_debug("RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, vs=%d, vr=%d\n",
+ self->retry_count, info->nr, self->va,
+ self->vs, self->vr);
/* Resend rejected frames */
irlap_resend_rejected_frames(self, CMD_FRAME);
@@ -1475,8 +1467,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state(self, LAP_NRM_P);
} else if (ret == NR_INVALID) {
- IRDA_DEBUG(1, "%s(), Received RR with "
- "invalid nr !\n", __func__);
+ pr_debug("%s(), Received RR with invalid nr !\n",
+ __func__);
irlap_next_state(self, LAP_RESET_WAIT);
@@ -1512,8 +1504,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
* we only do this once for each frame.
*/
if (irda_device_is_receiving(self->netdev) && !self->add_wait) {
- IRDA_DEBUG(1, "FINAL_TIMER_EXPIRED when receiving a "
- "frame! Waiting a little bit more!\n");
+ pr_debug("FINAL_TIMER_EXPIRED when receiving a frame! Waiting a little bit more!\n");
irlap_start_final_timer(self, msecs_to_jiffies(300));
/*
@@ -1530,18 +1521,18 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
if (self->retry_count < self->N2) {
if (skb_peek(&self->wx_list) == NULL) {
/* Retry sending the pf bit to the secondary */
- IRDA_DEBUG(4, "nrm_p: resending rr");
+ pr_debug("nrm_p: resending rr");
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_rr_frame(self, CMD_FRAME);
} else {
- IRDA_DEBUG(4, "nrm_p: resend frames");
+ pr_debug("nrm_p: resend frames");
irlap_resend_rejected_frames(self, CMD_FRAME);
}
irlap_start_final_timer(self, self->final_timeout);
self->retry_count++;
- IRDA_DEBUG(4, "irlap_state_nrm_p: FINAL_TIMER_EXPIRED:"
- " retry_count=%d\n", self->retry_count);
+ pr_debug("irlap_state_nrm_p: FINAL_TIMER_EXPIRED: retry_count=%d\n",
+ self->retry_count);
/* Early warning event. I'm using a pretty liberal
* interpretation of the spec and generate an event
@@ -1581,7 +1572,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
irlap_start_final_timer(self, 2 * self->final_timeout);
break;
case RECV_RD_RSP:
- IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __func__);
+ pr_debug("%s(), RECV_RD_RSP\n", __func__);
irlap_flush_all_queues(self);
irlap_next_state(self, LAP_XMIT_P);
@@ -1589,8 +1580,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
irlap_disconnect_request(self);
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %s\n",
- __func__, irlap_event[event]);
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlap_event[event]);
ret = -1;
break;
@@ -1610,7 +1601,7 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]);
+ pr_debug("%s(), event = %s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -1636,8 +1627,8 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state( self, LAP_PCLOSE);
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
- irlap_event[event]);
+ pr_debug("%s(), Unknown event %s\n", __func__,
+ irlap_event[event]);
ret = -1;
break;
@@ -1657,7 +1648,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]);
+ pr_debug("%s(), event = %s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -1715,7 +1706,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
* state
*/
if (!info) {
- IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __func__);
+ pr_debug("%s(), RECV_SNRM_CMD\n", __func__);
irlap_initiate_connection_state(self);
irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_send_ua_response_frame(self, &self->qos_rx);
@@ -1723,14 +1714,13 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
irlap_start_wd_timer(self, self->wd_timeout);
irlap_next_state(self, LAP_NDM);
} else {
- IRDA_DEBUG(0,
- "%s(), SNRM frame contained an I field!\n",
- __func__);
+ pr_debug("%s(), SNRM frame contained an I field!\n",
+ __func__);
}
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %s\n",
- __func__, irlap_event[event]);
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlap_event[event]);
ret = -1;
break;
@@ -1750,7 +1740,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[event]);
+ pr_debug("%s(), event=%s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -ENODEV;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
@@ -1786,8 +1776,8 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
* speed and turn-around-time.
*/
if((!nextfit) && (skb->len > self->bytes_left)) {
- IRDA_DEBUG(0, "%s(), Not allowed to transmit"
- " more bytes!\n", __func__);
+ pr_debug("%s(), Not allowed to transmit more bytes!\n",
+ __func__);
/* Requeue the skb */
skb_queue_head(&self->txq, skb_get(skb));
@@ -1833,7 +1823,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
ret = -EPROTO;
}
} else {
- IRDA_DEBUG(2, "%s(), Unable to send!\n", __func__);
+ pr_debug("%s(), Unable to send!\n", __func__);
skb_queue_head(&self->txq, skb_get(skb));
ret = -EPROTO;
}
@@ -1849,8 +1839,8 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
* when we return... - Jean II */
break;
default:
- IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__,
- irlap_event[event]);
+ pr_debug("%s(), Unknown event %s\n", __func__,
+ irlap_event[event]);
ret = -EINVAL;
break;
@@ -1872,7 +1862,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
int nr_status;
int ret = 0;
- IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]);
+ pr_debug("%s(), event=%s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;);
@@ -1880,10 +1870,9 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
switch (event) {
case RECV_I_CMD: /* Optimize for the common case */
/* FIXME: must check for remote_busy below */
- IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, "
- "vr=%d, pf=%d\n", __func__,
- irlap_event[event], info->nr,
- self->vs, info->ns, self->vr, info->pf);
+ pr_debug("%s(), event=%s nr=%d, vs=%d, ns=%d, vr=%d, pf=%d\n",
+ __func__, irlap_event[event], info->nr,
+ self->vs, info->ns, self->vr, info->pf);
self->retry_count = 0;
@@ -1983,7 +1972,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED))
{
if (info->pf) {
- IRDA_DEBUG(4, "RECV_I_RSP: frame(s) lost\n");
+ pr_debug("RECV_I_RSP: frame(s) lost\n");
self->vr = (self->vr + 1) % 8;
@@ -2020,10 +2009,10 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
}
if (ret == NR_INVALID) {
- IRDA_DEBUG(0, "NRM_S, NR_INVALID not implemented!\n");
+ pr_debug("NRM_S, NR_INVALID not implemented!\n");
}
if (ret == NS_INVALID) {
- IRDA_DEBUG(0, "NRM_S, NS_INVALID not implemented!\n");
+ pr_debug("NRM_S, NS_INVALID not implemented!\n");
}
break;
case RECV_UI_FRAME:
@@ -2112,22 +2101,21 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
/* Keep state */
irlap_next_state(self, LAP_NRM_S);
} else {
- IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n",
- __func__);
+ pr_debug("%s(), invalid nr not implemented!\n",
+ __func__);
}
break;
case RECV_SNRM_CMD:
/* SNRM frame is not allowed to contain an I-field */
if (!info) {
del_timer(&self->wd_timer);
- IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __func__);
+ pr_debug("%s(), received SNRM cmd\n", __func__);
irlap_next_state(self, LAP_RESET_CHECK);
irlap_reset_indication(self);
} else {
- IRDA_DEBUG(0,
- "%s(), SNRM frame contained an I-field!\n",
- __func__);
+ pr_debug("%s(), SNRM frame contained an I-field!\n",
+ __func__);
}
break;
@@ -2159,8 +2147,8 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
* which explain why we use (self->N2 / 2) here !!!
* Jean II
*/
- IRDA_DEBUG(1, "%s(), retry_count = %d\n", __func__,
- self->retry_count);
+ pr_debug("%s(), retry_count = %d\n", __func__,
+ self->retry_count);
if (self->retry_count < (self->N2 / 2)) {
/* No retry, just wait for primary */
@@ -2212,8 +2200,8 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
irlap_send_test_frame(self, self->caddr, info->daddr, skb);
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
- event, irlap_event[event]);
+ pr_debug("%s(), Unknown event %d, (%s)\n", __func__,
+ event, irlap_event[event]);
ret = -EINVAL;
break;
@@ -2227,8 +2215,6 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -ENODEV;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
@@ -2284,8 +2270,8 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
break; /* stay in SCLOSE */
}
- IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
- event, irlap_event[event]);
+ pr_debug("%s(), Unknown event %d, (%s)\n", __func__,
+ event, irlap_event[event]);
break;
}
@@ -2299,7 +2285,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(1, "%s(), event=%s\n", __func__, irlap_event[event]);
+ pr_debug("%s(), event=%s\n", __func__, irlap_event[event]);
IRDA_ASSERT(self != NULL, return -ENODEV;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
@@ -2320,8 +2306,8 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state(self, LAP_SCLOSE);
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
- event, irlap_event[event]);
+ pr_debug("%s(), Unknown event %d, (%s)\n", __func__,
+ event, irlap_event[event]);
ret = -EINVAL;
break;
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index a37998c6273d..b936b1251a66 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -103,8 +103,8 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb)
irlap_insert_info(self, skb);
if (unlikely(self->mode & IRDA_MODE_MONITOR)) {
- IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __func__,
- self->netdev->name);
+ pr_debug("%s(): %s is in monitor mode\n", __func__,
+ self->netdev->name);
dev_kfree_skb(skb);
return;
}
@@ -182,8 +182,8 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb,
/* Check if the new connection address is valid */
if ((info->caddr == 0x00) || (info->caddr == 0xfe)) {
- IRDA_DEBUG(3, "%s(), invalid connection address!\n",
- __func__);
+ pr_debug("%s(), invalid connection address!\n",
+ __func__);
return;
}
@@ -193,8 +193,8 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb,
/* Only accept if addressed directly to us */
if (info->saddr != self->saddr) {
- IRDA_DEBUG(2, "%s(), not addressed to us!\n",
- __func__);
+ pr_debug("%s(), not addressed to us!\n",
+ __func__);
return;
}
irlap_do_event(self, RECV_SNRM_CMD, skb, info);
@@ -216,7 +216,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
struct ua_frame *frame;
int ret;
- IRDA_DEBUG(2, "%s() <%ld>\n", __func__, jiffies);
+ pr_debug("%s() <%ld>\n", __func__, jiffies);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -291,8 +291,6 @@ void irlap_send_disc_frame(struct irlap_cb *self)
struct sk_buff *tx_skb = NULL;
struct disc_frame *frame;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -322,8 +320,8 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
__u32 bcast = BROADCAST;
__u8 *info;
- IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __func__,
- s, S, command);
+ pr_debug("%s(), s=%d, S=%d, command=%d\n", __func__,
+ s, S, command);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -415,13 +413,11 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
__u8 *discovery_info;
char *text;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
if (!pskb_may_pull(skb, sizeof(struct xid_frame))) {
- IRDA_ERROR("%s: frame too short!\n", __func__);
+ net_err_ratelimited("%s: frame too short!\n", __func__);
return;
}
@@ -432,13 +428,13 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
/* Make sure frame is addressed to us */
if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
- IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n",
- __func__);
+ pr_debug("%s(), frame is not addressed to us!\n",
+ __func__);
return;
}
if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
- IRDA_WARNING("%s: kmalloc failed!\n", __func__);
+ net_warn_ratelimited("%s: kmalloc failed!\n", __func__);
return;
}
@@ -446,15 +442,15 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
discovery->data.saddr = self->saddr;
discovery->timestamp = jiffies;
- IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__,
- discovery->data.daddr);
+ pr_debug("%s(), daddr=%08x\n", __func__,
+ discovery->data.daddr);
discovery_info = skb_pull(skb, sizeof(struct xid_frame));
/* Get info returned from peer */
discovery->data.hints[0] = discovery_info[0];
if (discovery_info[0] & HINT_EXTENSION) {
- IRDA_DEBUG(4, "EXTENSION\n");
+ pr_debug("EXTENSION\n");
discovery->data.hints[1] = discovery_info[1];
discovery->data.charset = discovery_info[2];
text = (char *) &discovery_info[3];
@@ -492,7 +488,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
char *text;
if (!pskb_may_pull(skb, sizeof(struct xid_frame))) {
- IRDA_ERROR("%s: frame too short!\n", __func__);
+ net_err_ratelimited("%s: frame too short!\n", __func__);
return;
}
@@ -503,8 +499,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
/* Make sure frame is addressed to us */
if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) {
- IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n",
- __func__);
+ pr_debug("%s(), frame is not addressed to us!\n",
+ __func__);
return;
}
@@ -536,8 +532,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
/* Check if things are sane at this point... */
if((discovery_info == NULL) ||
!pskb_may_pull(skb, 3)) {
- IRDA_ERROR("%s: discovery frame too short!\n",
- __func__);
+ net_err_ratelimited("%s: discovery frame too short!\n",
+ __func__);
return;
}
@@ -545,10 +541,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
* We now have some discovery info to deliver!
*/
discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
- if (!discovery) {
- IRDA_WARNING("%s: unable to malloc!\n", __func__);
+ if (!discovery)
return;
- }
discovery->data.daddr = info->daddr;
discovery->data.saddr = self->saddr;
@@ -658,7 +652,7 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb,
{
info->nr = skb->data[1] >> 5;
- IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __func__, info->nr, jiffies);
+ pr_debug("%s(), nr=%d, %ld\n", __func__, info->nr, jiffies);
if (command)
irlap_do_event(self, RECV_RNR_CMD, skb, info);
@@ -669,8 +663,6 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb,
static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb,
struct irlap_info *info, int command)
{
- IRDA_DEBUG(0, "%s()\n", __func__);
-
info->nr = skb->data[1] >> 5;
/* Check if this is a command or a response frame */
@@ -683,8 +675,6 @@ static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb,
static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb,
struct irlap_info *info, int command)
{
- IRDA_DEBUG(0, "%s()\n", __func__);
-
info->nr = skb->data[1] >> 5;
/* Check if this is a command or a response frame */
@@ -697,8 +687,6 @@ static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb,
static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb,
struct irlap_info *info, int command)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Check if this is a command or a response frame */
if (command)
irlap_do_event(self, RECV_DISC_CMD, skb, info);
@@ -756,7 +744,7 @@ void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb)
irlap_send_i_frame( self, tx_skb, CMD_FRAME);
} else {
- IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__);
+ pr_debug("%s(), sending unreliable frame\n", __func__);
irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
self->window -= 1;
}
@@ -809,7 +797,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
irlap_next_state(self, LAP_NRM_P);
irlap_send_i_frame(self, tx_skb, CMD_FRAME);
} else {
- IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__);
+ pr_debug("%s(), sending unreliable frame\n", __func__);
if (self->ack_required) {
irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
@@ -836,7 +824,9 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
* See max_line_capacities[][] in qos.c for details. Jean II */
transmission_time -= (self->final_timeout * self->bytes_left
/ self->line_capacity);
- IRDA_DEBUG(4, "%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", __func__, self->final_timeout, self->bytes_left, self->line_capacity, transmission_time);
+ pr_debug("%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n",
+ __func__, self->final_timeout, self->bytes_left,
+ self->line_capacity, transmission_time);
/* We are allowed to transmit a maximum number of bytes again. */
self->bytes_left = self->line_capacity;
@@ -997,7 +987,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
/* tx_skb = skb_clone( skb, GFP_ATOMIC); */
tx_skb = skb_copy(skb, GFP_ATOMIC);
if (!tx_skb) {
- IRDA_DEBUG(0, "%s(), unable to copy\n", __func__);
+ pr_debug("%s(), unable to copy\n", __func__);
return;
}
@@ -1020,7 +1010,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
*/
while (!skb_queue_empty(&self->txq)) {
- IRDA_DEBUG(0, "%s(), sending additional frames!\n", __func__);
+ pr_debug("%s(), sending additional frames!\n", __func__);
if (self->window > 0) {
skb = skb_dequeue( &self->txq);
IRDA_ASSERT(skb != NULL, return;);
@@ -1060,7 +1050,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
/* tx_skb = skb_clone( skb, GFP_ATOMIC); */
tx_skb = skb_copy(skb, GFP_ATOMIC);
if (!tx_skb) {
- IRDA_DEBUG(0, "%s(), unable to copy\n", __func__);
+ pr_debug("%s(), unable to copy\n", __func__);
return;
}
@@ -1083,8 +1073,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
__u8 caddr, int command)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -1143,8 +1131,6 @@ static inline void irlap_recv_i_frame(struct irlap_cb *self,
static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
struct irlap_info *info)
{
- IRDA_DEBUG( 4, "%s()\n", __func__);
-
info->pf = skb->data[1] & PF_BIT; /* Final bit */
irlap_do_event(self, RECV_UI_FRAME, skb, info);
@@ -1162,15 +1148,13 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb,
__u8 *frame;
int w, x, y, z;
- IRDA_DEBUG(0, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(info != NULL, return;);
if (!pskb_may_pull(skb, 4)) {
- IRDA_ERROR("%s: frame too short!\n", __func__);
+ net_err_ratelimited("%s: frame too short!\n", __func__);
return;
}
@@ -1186,21 +1170,16 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb,
z = frame[3] & 0x08;
if (w) {
- IRDA_DEBUG(0, "Rejected control field is undefined or not "
- "implemented.\n");
+ pr_debug("Rejected control field is undefined or not implemented\n");
}
if (x) {
- IRDA_DEBUG(0, "Rejected control field was invalid because it "
- "contained a non permitted I field.\n");
+ pr_debug("Rejected control field was invalid because it contained a non permitted I field\n");
}
if (y) {
- IRDA_DEBUG(0, "Received I field exceeded the maximum negotiated "
- "for the existing connection or exceeded the maximum "
- "this station supports if no connection exists.\n");
+ pr_debug("Received I field exceeded the maximum negotiated for the existing connection or exceeded the maximum this station supports if no connection exists\n");
}
if (z) {
- IRDA_DEBUG(0, "Rejected control field control field contained an "
- "invalid Nr count.\n");
+ pr_debug("Rejected control field control field contained an invalid Nr count\n");
}
irlap_do_event(self, RECV_FRMR_RSP, skb, info);
}
@@ -1256,10 +1235,8 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
{
struct test_frame *frame;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
if (!pskb_may_pull(skb, sizeof(*frame))) {
- IRDA_ERROR("%s: frame too short!\n", __func__);
+ net_err_ratelimited("%s: frame too short!\n", __func__);
return;
}
frame = (struct test_frame *) skb->data;
@@ -1267,8 +1244,8 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
/* Broadcast frames must carry saddr and daddr fields */
if (info->caddr == CBROADCAST) {
if (skb->len < sizeof(struct test_frame)) {
- IRDA_DEBUG(0, "%s() test frame too short!\n",
- __func__);
+ pr_debug("%s() test frame too short!\n",
+ __func__);
return;
}
@@ -1328,13 +1305,13 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
* share and non linear skbs. This should never happen, so
* we don't need to be clever about it. Jean II */
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- IRDA_ERROR("%s: can't clone shared skb!\n", __func__);
+ net_err_ratelimited("%s: can't clone shared skb!\n", __func__);
goto err;
}
/* Check if frame is large enough for parsing */
if (!pskb_may_pull(skb, 2)) {
- IRDA_ERROR("%s: frame too short!\n", __func__);
+ net_err_ratelimited("%s: frame too short!\n", __func__);
goto err;
}
@@ -1348,8 +1325,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
/* First we check if this frame has a valid connection address */
if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) {
- IRDA_DEBUG(0, "%s(), wrong connection address!\n",
- __func__);
+ pr_debug("%s(), wrong connection address!\n",
+ __func__);
goto out;
}
/*
@@ -1383,8 +1360,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
irlap_recv_srej_frame(self, skb, &info, command);
break;
default:
- IRDA_WARNING("%s: Unknown S-frame %02x received!\n",
- __func__, info.control);
+ net_warn_ratelimited("%s: Unknown S-frame %02x received!\n",
+ __func__, info.control);
break;
}
goto out;
@@ -1421,8 +1398,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
irlap_recv_ui_frame(self, skb, &info);
break;
default:
- IRDA_WARNING("%s: Unknown frame %02x received!\n",
- __func__, info.control);
+ net_warn_ratelimited("%s: Unknown frame %02x received!\n",
+ __func__, info.control);
break;
}
out:
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index a5f28d421ea8..a26c401ef4a4 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -83,7 +83,6 @@ const char *irlmp_reason_str(LM_REASON reason)
*/
int __init irlmp_init(void)
{
- IRDA_DEBUG(1, "%s()\n", __func__);
/* Initialize the irlmp structure. */
irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
if (irlmp == NULL)
@@ -170,10 +169,8 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
/* Allocate new instance of a LSAP connection */
self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
- if (self == NULL) {
- IRDA_ERROR("%s: can't allocate memory\n", __func__);
+ if (self == NULL)
return NULL;
- }
self->magic = LMP_LSAP_MAGIC;
self->slsap_sel = slsap_sel;
@@ -209,8 +206,6 @@ EXPORT_SYMBOL(irlmp_open_lsap);
*/
static void __irlmp_close_lsap(struct lsap_cb *self)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -269,9 +264,8 @@ void irlmp_close_lsap(struct lsap_cb *self)
NULL);
}
if (!lsap) {
- IRDA_DEBUG(0,
- "%s(), Looks like somebody has removed me already!\n",
- __func__);
+ pr_debug("%s(), Looks like somebody has removed me already!\n",
+ __func__);
return;
}
__irlmp_close_lsap(self);
@@ -297,10 +291,8 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
* Allocate new instance of a LSAP connection
*/
lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL);
- if (lap == NULL) {
- IRDA_ERROR("%s: unable to kmalloc\n", __func__);
+ if (lap == NULL)
return;
- }
lap->irlap = irlap;
lap->magic = LMP_LAP_MAGIC;
@@ -311,7 +303,8 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
#endif
lap->lsaps = hashbin_new(HB_LOCK);
if (lap->lsaps == NULL) {
- IRDA_WARNING("%s(), unable to kmalloc lsaps\n", __func__);
+ net_warn_ratelimited("%s(), unable to kmalloc lsaps\n",
+ __func__);
kfree(lap);
return;
}
@@ -343,8 +336,6 @@ void irlmp_unregister_link(__u32 saddr)
{
struct lap_cb *link;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
/* We must remove ourselves from the hashbin *first*. This ensure
* that no more LSAPs will be open on this link and no discovery
* will be triggered anymore. Jean II */
@@ -386,9 +377,8 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
IRDA_ASSERT(self != NULL, return -EBADR;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -EBADR;);
- IRDA_DEBUG(2,
- "%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n",
- __func__, self->slsap_sel, dlsap_sel, saddr, daddr);
+ pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n",
+ __func__, self->slsap_sel, dlsap_sel, saddr, daddr);
if (test_bit(0, &self->connected)) {
ret = -EISCONN;
@@ -432,7 +422,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
if (daddr != DEV_ADDR_ANY)
discovery = hashbin_find(irlmp->cachelog, daddr, NULL);
else {
- IRDA_DEBUG(2, "%s(), no daddr\n", __func__);
+ pr_debug("%s(), no daddr\n", __func__);
discovery = (discovery_t *)
hashbin_get_first(irlmp->cachelog);
}
@@ -445,7 +435,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
}
lap = hashbin_lock_find(irlmp->links, saddr, NULL);
if (lap == NULL) {
- IRDA_DEBUG(1, "%s(), Unable to find a usable link!\n", __func__);
+ pr_debug("%s(), Unable to find a usable link!\n", __func__);
ret = -EHOSTUNREACH;
goto err;
}
@@ -460,14 +450,15 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
* disconnected yet (waiting for timeout in LAP).
* Maybe we could give LAP a bit of help in this case.
*/
- IRDA_DEBUG(0, "%s(), sorry, but I'm waiting for LAP to timeout!\n", __func__);
+ pr_debug("%s(), sorry, but I'm waiting for LAP to timeout!\n",
+ __func__);
ret = -EAGAIN;
goto err;
}
/* LAP is already connected to a different node, and LAP
* can only talk to one node at a time */
- IRDA_DEBUG(0, "%s(), sorry, but link is busy!\n", __func__);
+ pr_debug("%s(), sorry, but link is busy!\n", __func__);
ret = -EBUSY;
goto err;
}
@@ -528,8 +519,8 @@ void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb)
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(self->lap != NULL, return;);
- IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
- __func__, self->slsap_sel, self->dlsap_sel);
+ pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ __func__, self->slsap_sel, self->dlsap_sel);
/* Note : self->lap is set in irlmp_link_data_indication(),
* (case CONNECT_CMD:) because we have no way to set it here.
@@ -569,8 +560,8 @@ int irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata)
/* We set the connected bit and move the lsap to the connected list
* in the state machine itself. Jean II */
- IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
- __func__, self->slsap_sel, self->dlsap_sel);
+ pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ __func__, self->slsap_sel, self->dlsap_sel);
/* Make room for MUX control header (3 bytes) */
IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;);
@@ -596,8 +587,6 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
int lap_header_size;
int max_seg_size;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -609,8 +598,8 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap);
max_header_size = LMP_HEADER + lap_header_size;
- IRDA_DEBUG(2, "%s(), max_header_size=%d\n",
- __func__, max_header_size);
+ pr_debug("%s(), max_header_size=%d\n",
+ __func__, max_header_size);
/* Hide LMP_CONTROL_HEADER header from layer above */
skb_pull(skb, LMP_CONTROL_HEADER);
@@ -636,16 +625,14 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
struct lsap_cb *new;
unsigned long flags;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
/* Only allowed to duplicate unconnected LSAP's, and only LSAPs
* that have received a connect indication. Jean II */
if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) ||
(orig->lap == NULL)) {
- IRDA_DEBUG(0, "%s(), invalid LSAP (wrong state)\n",
- __func__);
+ pr_debug("%s(), invalid LSAP (wrong state)\n",
+ __func__);
spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
flags);
return NULL;
@@ -654,7 +641,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
/* Allocate a new instance */
new = kmemdup(orig, sizeof(*new), GFP_ATOMIC);
if (!new) {
- IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
+ pr_debug("%s(), unable to kmalloc\n", __func__);
spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
flags);
return NULL;
@@ -700,7 +687,7 @@ int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata)
* and us that might mess up the hashbins below. This fixes it.
* Jean II */
if (! test_and_clear_bit(0, &self->connected)) {
- IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__);
+ pr_debug("%s(), already disconnected!\n", __func__);
dev_kfree_skb(userdata);
return -1;
}
@@ -754,20 +741,20 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
{
struct lsap_cb *lsap;
- IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__,
- irlmp_reason_str(reason), reason);
+ pr_debug("%s(), reason=%s [%d]\n", __func__,
+ irlmp_reason_str(reason), reason);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
- IRDA_DEBUG(3, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
- __func__, self->slsap_sel, self->dlsap_sel);
+ pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ __func__, self->slsap_sel, self->dlsap_sel);
/* Already disconnected ?
* There is a race condition between irlmp_disconnect_request()
* and us that might mess up the hashbins below. This fixes it.
* Jean II */
if (! test_and_clear_bit(0, &self->connected)) {
- IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__);
+ pr_debug("%s(), already disconnected!\n", __func__);
return;
}
@@ -800,7 +787,7 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
self->notify.disconnect_indication(self->notify.instance,
self, reason, skb);
} else {
- IRDA_DEBUG(0, "%s(), no handler\n", __func__);
+ pr_debug("%s(), no handler\n", __func__);
}
}
@@ -852,8 +839,8 @@ void irlmp_do_discovery(int nslots)
/* Make sure the value is sane */
if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){
- IRDA_WARNING("%s: invalid value for number of slots!\n",
- __func__);
+ net_warn_ratelimited("%s: invalid value for number of slots!\n",
+ __func__);
nslots = sysctl_discovery_slots = 8;
}
@@ -971,8 +958,6 @@ irlmp_notify_client(irlmp_client_t *client,
int number; /* Number of nodes in the log */
int i;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
/* Check if client wants or not partial/selective log (optimisation) */
if (!client->disco_callback)
return;
@@ -1022,8 +1007,6 @@ void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode)
irlmp_client_t *client;
irlmp_client_t *client_next;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(log != NULL, return;);
if (!(HASHBIN_GET_SIZE(log)))
@@ -1057,8 +1040,6 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number)
irlmp_client_t *client_next;
int i;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(expiries != NULL, return;);
/* For each client - notify callback may touch client list */
@@ -1091,8 +1072,6 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number)
*/
discovery_t *irlmp_get_discovery_response(void)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(irlmp != NULL, return NULL;);
put_unaligned(irlmp->hints.word, (__u16 *)irlmp->discovery_rsp.data.hints);
@@ -1169,8 +1148,6 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata)
{
int ret;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(userdata != NULL, return -1;);
/* Make room for MUX header */
@@ -1193,8 +1170,6 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata)
*/
void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -1220,8 +1195,6 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata,
struct sk_buff *clone_skb;
struct lap_cb *lap;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(userdata != NULL, return -1;);
/* Make room for MUX and PID header */
@@ -1271,8 +1244,6 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata,
#ifdef CONFIG_IRDA_ULTRA
void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -1314,7 +1285,7 @@ void irlmp_status_indication(struct lap_cb *self,
curr->notify.status_indication(curr->notify.instance,
link, lock);
else
- IRDA_DEBUG(2, "%s(), no handler\n", __func__);
+ pr_debug("%s(), no handler\n", __func__);
curr = next;
}
@@ -1342,7 +1313,7 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow)
/* Get the number of lsap. That's the only safe way to know
* that we have looped around... - Jean II */
lsap_todo = HASHBIN_GET_SIZE(self->lsaps);
- IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __func__, lsap_todo);
+ pr_debug("%s() : %d lsaps to scan\n", __func__, lsap_todo);
/* Poll lsap in order until the queue is full or until we
* tried them all.
@@ -1361,14 +1332,16 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow)
/* Uh-oh... Paranoia */
if(curr == NULL)
break;
- IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __func__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap));
+ pr_debug("%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n",
+ __func__, curr, next, self->flow_next, lsap_todo,
+ IRLAP_GET_TX_QUEUE_LEN(self->irlap));
/* Inform lsap user that it can send one more packet. */
if (curr->notify.flow_indication != NULL)
curr->notify.flow_indication(curr->notify.instance,
curr, flow);
else
- IRDA_DEBUG(1, "%s(), no handler\n", __func__);
+ pr_debug("%s(), no handler\n", __func__);
}
}
@@ -1389,32 +1362,30 @@ __u8 *irlmp_hint_to_service(__u8 *hint)
* since we currently only support 2 hint bytes
*/
service = kmalloc(16, GFP_ATOMIC);
- if (!service) {
- IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__);
+ if (!service)
return NULL;
- }
if (!hint[0]) {
- IRDA_DEBUG(1, "<None>\n");
+ pr_debug("<None>\n");
kfree(service);
return NULL;
}
if (hint[0] & HINT_PNP)
- IRDA_DEBUG(1, "PnP Compatible ");
+ pr_debug("PnP Compatible ");
if (hint[0] & HINT_PDA)
- IRDA_DEBUG(1, "PDA/Palmtop ");
+ pr_debug("PDA/Palmtop ");
if (hint[0] & HINT_COMPUTER)
- IRDA_DEBUG(1, "Computer ");
+ pr_debug("Computer ");
if (hint[0] & HINT_PRINTER) {
- IRDA_DEBUG(1, "Printer ");
+ pr_debug("Printer ");
service[i++] = S_PRINTER;
}
if (hint[0] & HINT_MODEM)
- IRDA_DEBUG(1, "Modem ");
+ pr_debug("Modem ");
if (hint[0] & HINT_FAX)
- IRDA_DEBUG(1, "Fax ");
+ pr_debug("Fax ");
if (hint[0] & HINT_LAN) {
- IRDA_DEBUG(1, "LAN Access ");
+ pr_debug("LAN Access ");
service[i++] = S_LAN;
}
/*
@@ -1424,22 +1395,22 @@ __u8 *irlmp_hint_to_service(__u8 *hint)
*/
if (hint[0] & HINT_EXTENSION) {
if (hint[1] & HINT_TELEPHONY) {
- IRDA_DEBUG(1, "Telephony ");
+ pr_debug("Telephony ");
service[i++] = S_TELEPHONY;
}
if (hint[1] & HINT_FILE_SERVER)
- IRDA_DEBUG(1, "File Server ");
+ pr_debug("File Server ");
if (hint[1] & HINT_COMM) {
- IRDA_DEBUG(1, "IrCOMM ");
+ pr_debug("IrCOMM ");
service[i++] = S_COMM;
}
if (hint[1] & HINT_OBEX) {
- IRDA_DEBUG(1, "IrOBEX ");
+ pr_debug("IrOBEX ");
service[i++] = S_OBEX;
}
}
- IRDA_DEBUG(1, "\n");
+ pr_debug("\n");
/* So that client can be notified about any discovery */
service[i++] = S_ANY;
@@ -1492,14 +1463,13 @@ void *irlmp_register_service(__u16 hints)
{
irlmp_service_t *service;
- IRDA_DEBUG(4, "%s(), hints = %04x\n", __func__, hints);
+ pr_debug("%s(), hints = %04x\n", __func__, hints);
/* Make a new registration */
service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC);
- if (!service) {
- IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__);
+ if (!service)
return NULL;
- }
+
service->hints.word = hints;
hashbin_insert(irlmp->services, (irda_queue_t *) service,
(long) service, NULL);
@@ -1522,15 +1492,13 @@ int irlmp_unregister_service(void *handle)
irlmp_service_t *service;
unsigned long flags;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
if (!handle)
return -1;
/* Caller may call with invalid handle (it's legal) - Jean II */
service = hashbin_lock_find(irlmp->services, (long) handle, NULL);
if (!service) {
- IRDA_DEBUG(1, "%s(), Unknown service!\n", __func__);
+ pr_debug("%s(), Unknown service!\n", __func__);
return -1;
}
@@ -1567,15 +1535,12 @@ void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
{
irlmp_client_t *client;
- IRDA_DEBUG(1, "%s()\n", __func__);
IRDA_ASSERT(irlmp != NULL, return NULL;);
/* Make a new registration */
client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC);
- if (!client) {
- IRDA_DEBUG( 1, "%s(), Unable to kmalloc!\n", __func__);
+ if (!client)
return NULL;
- }
/* Register the details */
client->hint_mask.word = hint_mask;
@@ -1609,7 +1574,7 @@ int irlmp_update_client(void *handle, __u16 hint_mask,
client = hashbin_lock_find(irlmp->clients, (long) handle, NULL);
if (!client) {
- IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__);
+ pr_debug("%s(), Unknown client!\n", __func__);
return -1;
}
@@ -1632,19 +1597,17 @@ int irlmp_unregister_client(void *handle)
{
struct irlmp_client *client;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
if (!handle)
return -1;
/* Caller may call with invalid handle (it's legal) - Jean II */
client = hashbin_lock_find(irlmp->clients, (long) handle, NULL);
if (!client) {
- IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__);
+ pr_debug("%s(), Unknown client!\n", __func__);
return -1;
}
- IRDA_DEBUG(4, "%s(), removing client!\n", __func__);
+ pr_debug("%s(), removing client!\n", __func__);
hashbin_remove_this(irlmp->clients, (irda_queue_t *) client);
kfree(client);
@@ -1673,8 +1636,6 @@ static int irlmp_slsap_inuse(__u8 slsap_sel)
IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;);
IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;);
- IRDA_DEBUG(4, "%s()\n", __func__);
-
#ifdef CONFIG_IRDA_ULTRA
/* Accept all bindings to the connectionless LSAP */
if (slsap_sel == LSAP_CONNLESS)
@@ -1708,8 +1669,8 @@ static int irlmp_slsap_inuse(__u8 slsap_sel)
goto errlsap;);
if ((self->slsap_sel == slsap_sel)) {
- IRDA_DEBUG(4, "Source LSAP selector=%02x in use\n",
- self->slsap_sel);
+ pr_debug("Source LSAP selector=%02x in use\n",
+ self->slsap_sel);
goto errlsap;
}
self = (struct lsap_cb*) hashbin_get_next(lap->lsaps);
@@ -1733,8 +1694,8 @@ static int irlmp_slsap_inuse(__u8 slsap_sel)
while (self != NULL) {
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, goto erruncon;);
if ((self->slsap_sel == slsap_sel)) {
- IRDA_DEBUG(4, "Source LSAP selector=%02x in use (unconnected)\n",
- self->slsap_sel);
+ pr_debug("Source LSAP selector=%02x in use (unconnected)\n",
+ self->slsap_sel);
goto erruncon;
}
self = (struct lsap_cb*) hashbin_get_next(irlmp->unconnected_lsaps);
@@ -1799,8 +1760,8 @@ static __u8 irlmp_find_free_slsap(void)
/* Make sure we terminate the loop */
if (wrapped++) {
- IRDA_ERROR("%s: no more free LSAPs !\n",
- __func__);
+ net_err_ratelimited("%s: no more free LSAPs !\n",
+ __func__);
return 0;
}
}
@@ -1814,8 +1775,8 @@ static __u8 irlmp_find_free_slsap(void)
/* Got it ! */
lsap_sel = irlmp->last_lsap_sel;
- IRDA_DEBUG(4, "%s(), found free lsap_sel=%02x\n",
- __func__, lsap_sel);
+ pr_debug("%s(), found free lsap_sel=%02x\n",
+ __func__, lsap_sel);
return lsap_sel;
}
@@ -1833,26 +1794,27 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason)
switch (lap_reason) {
case LAP_DISC_INDICATION: /* Received a disconnect request from peer */
- IRDA_DEBUG( 1, "%s(), LAP_DISC_INDICATION\n", __func__);
+ pr_debug("%s(), LAP_DISC_INDICATION\n", __func__);
reason = LM_USER_REQUEST;
break;
case LAP_NO_RESPONSE: /* To many retransmits without response */
- IRDA_DEBUG( 1, "%s(), LAP_NO_RESPONSE\n", __func__);
+ pr_debug("%s(), LAP_NO_RESPONSE\n", __func__);
reason = LM_LAP_DISCONNECT;
break;
case LAP_RESET_INDICATION:
- IRDA_DEBUG( 1, "%s(), LAP_RESET_INDICATION\n", __func__);
+ pr_debug("%s(), LAP_RESET_INDICATION\n", __func__);
reason = LM_LAP_RESET;
break;
case LAP_FOUND_NONE:
case LAP_MEDIA_BUSY:
case LAP_PRIMARY_CONFLICT:
- IRDA_DEBUG(1, "%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", __func__);
+ pr_debug("%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n",
+ __func__);
reason = LM_CONNECT_FAILURE;
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown IrLAP disconnect reason %d!\n",
- __func__, lap_reason);
+ pr_debug("%s(), Unknown IrLAP disconnect reason %d!\n",
+ __func__, lap_reason);
reason = LM_LAP_DISCONNECT;
break;
}
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index 9505a7d06f1a..e306cf2c1e04 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -48,8 +48,7 @@ const char *const irlsap_state[] = {
"LSAP_SETUP_PEND",
};
-#ifdef CONFIG_IRDA_DEBUG
-static const char *const irlmp_event[] = {
+static const char *const irlmp_event[] __maybe_unused = {
"LM_CONNECT_REQUEST",
"LM_CONNECT_CONFIRM",
"LM_CONNECT_RESPONSE",
@@ -75,7 +74,6 @@ static const char *const irlmp_event[] = {
"LM_LAP_DISCOVERY_CONFIRM",
"LM_LAP_IDLE_TIMEOUT",
};
-#endif /* CONFIG_IRDA_DEBUG */
/* LAP Connection control proto declarations */
static void irlmp_state_standby (struct lap_cb *, IRLMP_EVENT,
@@ -120,7 +118,7 @@ static inline void irlmp_next_lap_state(struct lap_cb *self,
IRLMP_STATE state)
{
/*
- IRDA_DEBUG(4, "%s(), LMP LAP = %s\n", __func__, irlmp_state[state]);
+ pr_debug("%s(), LMP LAP = %s\n", __func__, irlmp_state[state]);
*/
self->lap_state = state;
}
@@ -130,7 +128,7 @@ static inline void irlmp_next_lsap_state(struct lsap_cb *self,
{
/*
IRDA_ASSERT(self != NULL, return;);
- IRDA_DEBUG(4, "%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]);
+ pr_debug("%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]);
*/
self->lsap_state = state;
}
@@ -142,8 +140,8 @@ int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event,
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
- IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n",
- __func__, irlmp_event[event], irlsap_state[ self->lsap_state]);
+ pr_debug("%s(), EVENT = %s, STATE = %s\n",
+ __func__, irlmp_event[event], irlsap_state[self->lsap_state]);
return (*lsap_state[self->lsap_state]) (self, event, skb);
}
@@ -160,17 +158,15 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
- IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", __func__,
- irlmp_event[event],
- irlmp_state[self->lap_state]);
+ pr_debug("%s(), EVENT = %s, STATE = %s\n", __func__,
+ irlmp_event[event],
+ irlmp_state[self->lap_state]);
(*lap_state[self->lap_state]) (self, event, skb);
}
void irlmp_discovery_timer_expired(void *data)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
/* We always cleanup the log (active & passive discovery) */
irlmp_do_expiry();
@@ -184,8 +180,6 @@ void irlmp_watchdog_timer_expired(void *data)
{
struct lsap_cb *self = (struct lsap_cb *) data;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -196,8 +190,6 @@ void irlmp_idle_timer_expired(void *data)
{
struct lap_cb *self = (struct lap_cb *) data;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
@@ -256,7 +248,6 @@ irlmp_do_all_lsap_event(hashbin_t * lsap_hashbin,
static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self->irlap != NULL, return;);
switch (event) {
@@ -276,7 +267,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
irlap_connect_response(self->irlap, skb);
break;
case LM_LAP_CONNECT_REQUEST:
- IRDA_DEBUG(4, "%s() LS_CONNECT_REQUEST\n", __func__);
+ pr_debug("%s() LS_CONNECT_REQUEST\n", __func__);
irlmp_next_lap_state(self, LAP_U_CONNECT);
@@ -284,14 +275,14 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
irlap_connect_request(self->irlap, self->daddr, NULL, 0);
break;
case LM_LAP_DISCONNECT_INDICATION:
- IRDA_DEBUG(4, "%s(), Error LM_LAP_DISCONNECT_INDICATION\n",
- __func__);
+ pr_debug("%s(), Error LM_LAP_DISCONNECT_INDICATION\n",
+ __func__);
irlmp_next_lap_state(self, LAP_STANDBY);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
- __func__, irlmp_event[event]);
+ pr_debug("%s(), Unknown event %s\n",
+ __func__, irlmp_event[event]);
break;
}
}
@@ -306,7 +297,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(2, "%s(), event=%s\n", __func__, irlmp_event[event]);
+ pr_debug("%s(), event=%s\n", __func__, irlmp_event[event]);
switch (event) {
case LM_LAP_CONNECT_INDICATION:
@@ -326,7 +317,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
* the lsaps may already have gone. This avoid getting stuck
* forever in LAP_ACTIVE state - Jean II */
if (HASHBIN_GET_SIZE(self->lsaps) == 0) {
- IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__);
+ pr_debug("%s() NO LSAPs !\n", __func__);
irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT);
}
break;
@@ -344,12 +335,12 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
* the lsaps may already have gone. This avoid getting stuck
* forever in LAP_ACTIVE state - Jean II */
if (HASHBIN_GET_SIZE(self->lsaps) == 0) {
- IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__);
+ pr_debug("%s() NO LSAPs !\n", __func__);
irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT);
}
break;
case LM_LAP_DISCONNECT_INDICATION:
- IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__);
+ pr_debug("%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__);
irlmp_next_lap_state(self, LAP_STANDBY);
/* Send disconnect event to all LSAPs using this link */
@@ -357,7 +348,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
LM_LAP_DISCONNECT_INDICATION);
break;
case LM_LAP_DISCONNECT_REQUEST:
- IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__);
+ pr_debug("%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__);
/* One of the LSAP did timeout or was closed, if it was
* the last one, try to get out of here - Jean II */
@@ -366,7 +357,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
}
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
+ pr_debug("%s(), Unknown event %s\n",
__func__, irlmp_event[event]);
break;
}
@@ -381,11 +372,9 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
switch (event) {
case LM_LAP_CONNECT_REQUEST:
- IRDA_DEBUG(4, "%s(), LS_CONNECT_REQUEST\n", __func__);
+ pr_debug("%s(), LS_CONNECT_REQUEST\n", __func__);
/*
* IrLAP may have a pending disconnect. We tried to close
@@ -467,7 +456,7 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
irlmp_do_expiry();
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s\n",
+ pr_debug("%s(), Unknown event %s\n",
__func__, irlmp_event[event]);
break;
}
@@ -490,8 +479,6 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
{
int ret = 0;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
@@ -505,11 +492,11 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
break;
#endif /* CONFIG_IRDA_ULTRA */
case LM_CONNECT_REQUEST:
- IRDA_DEBUG(4, "%s(), LM_CONNECT_REQUEST\n", __func__);
+ pr_debug("%s(), LM_CONNECT_REQUEST\n", __func__);
if (self->conn_skb) {
- IRDA_WARNING("%s: busy with another request!\n",
- __func__);
+ net_warn_ratelimited("%s: busy with another request!\n",
+ __func__);
return -EBUSY;
}
/* Don't forget to refcount it (see irlmp_connect_request()) */
@@ -525,8 +512,8 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
break;
case LM_CONNECT_INDICATION:
if (self->conn_skb) {
- IRDA_WARNING("%s: busy with another request!\n",
- __func__);
+ net_warn_ratelimited("%s: busy with another request!\n",
+ __func__);
return -EBUSY;
}
/* Don't forget to refcount it (see irlap_driver_rcv()) */
@@ -551,8 +538,8 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n",
- __func__, irlmp_event[event], self->slsap_sel);
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
break;
}
return ret;
@@ -570,8 +557,6 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
struct lsap_cb *lsap;
int ret = 0;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
@@ -603,7 +588,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
case LM_WATCHDOG_TIMEOUT:
/* May happen, who knows...
* Jean II */
- IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__);
+ pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__);
/* Disconnect, get out... - Jean II */
self->lap = NULL;
@@ -613,8 +598,8 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
default:
/* LM_LAP_DISCONNECT_INDICATION : Should never happen, we
* are *not* yet bound to the IrLAP link. Jean II */
- IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
- __func__, irlmp_event[event], self->slsap_sel);
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
break;
}
return ret;
@@ -632,8 +617,6 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
struct sk_buff *tx_skb;
int ret = 0;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
@@ -642,17 +625,17 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
/* Keep state */
break;
case LM_CONNECT_RESPONSE:
- IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, "
- "no indication issued yet\n", __func__);
+ pr_debug("%s(), LM_CONNECT_RESPONSE, no indication issued yet\n",
+ __func__);
/* Keep state */
break;
case LM_DISCONNECT_REQUEST:
- IRDA_DEBUG(0, "%s(), LM_DISCONNECT_REQUEST, "
- "not yet bound to IrLAP connection\n", __func__);
+ pr_debug("%s(), LM_DISCONNECT_REQUEST, not yet bound to IrLAP connection\n",
+ __func__);
/* Keep state */
break;
case LM_LAP_CONNECT_CONFIRM:
- IRDA_DEBUG(4, "%s(), LS_CONNECT_CONFIRM\n", __func__);
+ pr_debug("%s(), LS_CONNECT_CONFIRM\n", __func__);
irlmp_next_lsap_state(self, LSAP_CONNECT);
tx_skb = self->conn_skb;
@@ -666,7 +649,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
/* Will happen in some rare cases because of a race condition.
* Just make sure we don't stay there forever...
* Jean II */
- IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__);
+ pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__);
/* Go back to disconnected mode, keep the socket waiting */
self->lap = NULL;
@@ -679,8 +662,8 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
default:
/* LM_LAP_DISCONNECT_INDICATION : Should never happen, we
* are *not* yet bound to the IrLAP link. Jean II */
- IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
- __func__, irlmp_event[event], self->slsap_sel);
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
break;
}
return ret;
@@ -698,8 +681,6 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
LM_REASON reason;
int ret = 0;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
IRDA_ASSERT(self->lap != NULL, return -1;);
@@ -721,13 +702,13 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_udata_indication(self, skb);
break;
case LM_CONNECT_REQUEST:
- IRDA_DEBUG(0, "%s(), LM_CONNECT_REQUEST, "
- "error, LSAP already connected\n", __func__);
+ pr_debug("%s(), LM_CONNECT_REQUEST, error, LSAP already connected\n",
+ __func__);
/* Keep state */
break;
case LM_CONNECT_RESPONSE:
- IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, "
- "error, LSAP already connected\n", __func__);
+ pr_debug("%s(), LM_CONNECT_RESPONSE, error, LSAP already connected\n",
+ __func__);
/* Keep state */
break;
case LM_DISCONNECT_REQUEST:
@@ -739,8 +720,8 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
/* Try to close the LAP connection if its still there */
if (self->lap) {
- IRDA_DEBUG(4, "%s(), trying to close IrLAP\n",
- __func__);
+ pr_debug("%s(), trying to close IrLAP\n",
+ __func__);
irlmp_do_lap_event(self->lap,
LM_LAP_DISCONNECT_REQUEST,
NULL);
@@ -764,14 +745,14 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
reason = skb->data[3];
/* Try to close the LAP connection */
- IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__);
+ pr_debug("%s(), trying to close IrLAP\n", __func__);
irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
irlmp_disconnect_indication(self, reason, skb);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
- __func__, irlmp_event[event], self->slsap_sel);
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
break;
}
return ret;
@@ -793,8 +774,6 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;);
- IRDA_DEBUG(4, "%s()\n", __func__);
-
switch (event) {
case LM_CONNECT_CONFIRM:
irlmp_next_lsap_state(self, LSAP_DATA_TRANSFER_READY);
@@ -814,7 +793,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
reason = skb->data[3];
/* Try to close the LAP connection */
- IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__);
+ pr_debug("%s(), trying to close IrLAP\n", __func__);
irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
irlmp_disconnect_indication(self, reason, skb);
@@ -832,7 +811,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, reason, skb);
break;
case LM_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__);
+ pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__);
IRDA_ASSERT(self->lap != NULL, return -1;);
irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
@@ -841,8 +820,8 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
- __func__, irlmp_event[event], self->slsap_sel);
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
break;
}
return ret;
@@ -863,8 +842,6 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
LM_REASON reason;
int ret = 0;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(irlmp != NULL, return -1;);
@@ -883,7 +860,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_SETUP);
break;
case LM_WATCHDOG_TIMEOUT:
- IRDA_DEBUG(0, "%s() : WATCHDOG_TIMEOUT !\n", __func__);
+ pr_debug("%s() : WATCHDOG_TIMEOUT !\n", __func__);
IRDA_ASSERT(self->lap != NULL, return -1;);
irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
@@ -901,8 +878,8 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, reason, NULL);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
- __func__, irlmp_event[event], self->slsap_sel);
+ pr_debug("%s(), Unknown event %s on LSAP %#02x\n",
+ __func__, irlmp_event[event], self->slsap_sel);
break;
}
return ret;
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index 062e63b1c5c4..38b0f994bc7b 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -44,7 +44,7 @@ inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
skb->data[1] = slsap;
if (expedited) {
- IRDA_DEBUG(4, "%s(), sending expedited data\n", __func__);
+ pr_debug("%s(), sending expedited data\n", __func__);
irlap_data_request(self->irlap, skb, TRUE);
} else
irlap_data_request(self->irlap, skb, FALSE);
@@ -60,8 +60,6 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
{
__u8 *frame;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -95,8 +93,6 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
__u8 dlsap_sel; /* Destination LSAP address */
__u8 *fp;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_ASSERT(skb->len > 2, return;);
@@ -115,9 +111,8 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
* it in a different way than other established connections.
*/
if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) {
- IRDA_DEBUG(3, "%s(), incoming connection, "
- "source LSAP=%d, dest LSAP=%d\n",
- __func__, slsap_sel, dlsap_sel);
+ pr_debug("%s(), incoming connection, source LSAP=%d, dest LSAP=%d\n",
+ __func__, slsap_sel, dlsap_sel);
/* Try to find LSAP among the unconnected LSAPs */
lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD,
@@ -125,7 +120,8 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
/* Maybe LSAP was already connected, so try one more time */
if (!lsap) {
- IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __func__);
+ pr_debug("%s(), incoming connection for LSAP already connected\n",
+ __func__);
lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
self->lsaps);
}
@@ -134,14 +130,14 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
self->lsaps);
if (lsap == NULL) {
- IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n");
- IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n",
- __func__, slsap_sel, dlsap_sel);
+ pr_debug("IrLMP, Sorry, no LSAP for received frame!\n");
+ pr_debug("%s(), slsap_sel = %02x, dlsap_sel = %02x\n",
+ __func__, slsap_sel, dlsap_sel);
if (fp[0] & CONTROL_BIT) {
- IRDA_DEBUG(2, "%s(), received control frame %02x\n",
- __func__, fp[2]);
+ pr_debug("%s(), received control frame %02x\n",
+ __func__, fp[2]);
} else {
- IRDA_DEBUG(2, "%s(), received data frame\n", __func__);
+ pr_debug("%s(), received data frame\n", __func__);
}
return;
}
@@ -159,20 +155,20 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb);
break;
case DISCONNECT:
- IRDA_DEBUG(4, "%s(), Disconnect indication!\n",
- __func__);
+ pr_debug("%s(), Disconnect indication!\n",
+ __func__);
irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION,
skb);
break;
case ACCESSMODE_CMD:
- IRDA_DEBUG(0, "Access mode cmd not implemented!\n");
+ pr_debug("Access mode cmd not implemented!\n");
break;
case ACCESSMODE_CNF:
- IRDA_DEBUG(0, "Access mode cnf not implemented!\n");
+ pr_debug("Access mode cnf not implemented!\n");
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n",
- __func__, fp[2]);
+ pr_debug("%s(), Unknown control frame %02x\n",
+ __func__, fp[2]);
break;
}
} else if (unreliable) {
@@ -206,8 +202,6 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
__u8 *fp;
unsigned long flags;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_ASSERT(skb->len > 2, return;);
@@ -223,14 +217,14 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
pid = fp[2];
if (pid & 0x80) {
- IRDA_DEBUG(0, "%s(), extension in PID not supp!\n",
- __func__);
+ pr_debug("%s(), extension in PID not supp!\n",
+ __func__);
return;
}
/* Check if frame is addressed to the connectionless LSAP */
if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) {
- IRDA_DEBUG(0, "%s(), dropping frame!\n", __func__);
+ pr_debug("%s(), dropping frame!\n", __func__);
return;
}
@@ -254,7 +248,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
if (lsap)
irlmp_connless_data_indication(lsap, skb);
else {
- IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __func__);
+ pr_debug("%s(), found no matching LSAP!\n", __func__);
}
}
#endif /* CONFIG_IRDA_ULTRA */
@@ -270,8 +264,6 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap,
LAP_REASON reason,
struct sk_buff *skb)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
IRDA_ASSERT(lap != NULL, return;);
IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
@@ -296,8 +288,6 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
__u32 daddr, struct qos_info *qos,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
/* Copy QoS settings for this session */
self->qos = qos;
@@ -317,8 +307,6 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
struct sk_buff *skb)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_ASSERT(qos != NULL, return;);
@@ -383,8 +371,6 @@ void irlmp_link_discovery_indication(struct lap_cb *self,
*/
void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
{
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index 303a68d92731..c5e35b85c477 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -42,16 +42,6 @@
#include <net/irda/irttp.h> /* irttp_init */
#include <net/irda/irda_device.h> /* irda_device_init */
-/*
- * Module parameters
- */
-#ifdef CONFIG_IRDA_DEBUG
-unsigned int irda_debug = IRDA_DEBUG_LEVEL;
-module_param_named(debug, irda_debug, uint, 0);
-MODULE_PARM_DESC(debug, "IRDA debugging level");
-EXPORT_SYMBOL(irda_debug);
-#endif
-
/* Packet type handler.
* Tell the kernel how IrDA packets should be handled.
*/
@@ -90,8 +80,6 @@ static int __init irda_init(void)
{
int ret = 0;
- IRDA_DEBUG(0, "%s()\n", __func__);
-
/* Lower layer of the stack */
irlmp_init();
irlap_init();
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index a37b81fe0479..e15c40e86660 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -41,7 +41,7 @@ static struct net_device * ifname_to_netdev(struct net *net, struct genl_info *i
ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]);
- IRDA_DEBUG(5, "%s(): Looking for %s\n", __func__, ifname);
+ pr_debug("%s(): Looking for %s\n", __func__, ifname);
return dev_get_by_name(net, ifname);
}
@@ -57,7 +57,7 @@ static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info)
mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]);
- IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __func__, mode);
+ pr_debug("%s(): Switching to mode: %d\n", __func__, mode);
dev = ifname_to_netdev(&init_net, info);
if (!dev)
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 7152624ed5f1..acbe61c7e683 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -233,8 +233,6 @@ static __u32 hash( const char* name)
static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
{
- IRDA_DEBUG( 4, "%s()\n", __func__);
-
/*
* Check if queue is empty.
*/
@@ -267,7 +265,7 @@ static irda_queue_t *dequeue_first(irda_queue_t **queue)
{
irda_queue_t *ret;
- IRDA_DEBUG( 4, "dequeue_first()\n");
+ pr_debug("dequeue_first()\n");
/*
* Set return value
@@ -308,7 +306,7 @@ static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element
{
irda_queue_t *ret;
- IRDA_DEBUG( 4, "dequeue_general()\n");
+ pr_debug("dequeue_general()\n");
/*
* Set return value
@@ -452,8 +450,6 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
unsigned long flags = 0;
int bin;
- IRDA_DEBUG( 4, "%s()\n", __func__);
-
IRDA_ASSERT( hashbin != NULL, return;);
IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;);
@@ -565,8 +561,6 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
unsigned long flags = 0;
irda_queue_t* entry;
- IRDA_DEBUG( 4, "%s()\n", __func__);
-
IRDA_ASSERT( hashbin != NULL, return NULL;);
IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
@@ -658,8 +652,6 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
int bin;
long hashv;
- IRDA_DEBUG( 4, "%s()\n", __func__);
-
IRDA_ASSERT( hashbin != NULL, return NULL;);
IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
IRDA_ASSERT( entry != NULL, return NULL;);
@@ -719,7 +711,7 @@ void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name )
int bin;
irda_queue_t* entry;
- IRDA_DEBUG( 4, "hashbin_find()\n");
+ pr_debug("hashbin_find()\n");
IRDA_ASSERT( hashbin != NULL, return NULL;);
IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index d6a59651767a..873da5e7d428 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -126,15 +126,6 @@ static struct ctl_table irda_table[] = {
.mode = 0644,
.proc_handler = do_devname,
},
-#ifdef CONFIG_IRDA_DEBUG
- {
- .procname = "debug",
- .data = &irda_debug,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
-#endif
#ifdef CONFIG_IRDA_FAST_RR
{
.procname = "fast_poll_increase",
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 85372cfa7b9f..b6ab41d5b3a3 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -71,11 +71,13 @@ static void irttp_status_indication(void *instance,
LINK_STATUS link, LOCK_STATUS lock);
/* Information for parsing parameters in IrTTP */
-static pi_minor_info_t pi_minor_call_table[] = {
+static const pi_minor_info_t pi_minor_call_table[] = {
{ NULL, 0 }, /* 0x00 */
{ irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
};
-static pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table, 2 } };
+static const pi_major_info_t pi_major_call_table[] = {
+ { pi_minor_call_table, 2 }
+};
static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
/************************ GLOBAL PROCEDURES ************************/
@@ -96,8 +98,8 @@ int __init irttp_init(void)
irttp->tsaps = hashbin_new(HB_LOCK);
if (!irttp->tsaps) {
- IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n",
- __func__);
+ net_err_ratelimited("%s: can't allocate IrTTP hashbin!\n",
+ __func__);
kfree(irttp);
return -ENOMEM;
}
@@ -166,7 +168,7 @@ static void irttp_todo_expired(unsigned long data)
if (!self || self->magic != TTP_TSAP_MAGIC)
return;
- IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
+ pr_debug("%s(instance=%p)\n", __func__, self);
/* Try to make some progress, especially on Tx side - Jean II */
irttp_run_rx_queue(self);
@@ -207,8 +209,6 @@ static void irttp_flush_queues(struct tsap_cb *self)
{
struct sk_buff *skb;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -240,8 +240,8 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
IRDA_ASSERT(self != NULL, return NULL;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
- IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__,
- self->rx_sdu_size);
+ pr_debug("%s(), self->rx_sdu_size=%d\n", __func__,
+ self->rx_sdu_size);
skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size);
if (!skb)
@@ -264,9 +264,8 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
dev_kfree_skb(frag);
}
- IRDA_DEBUG(2,
- "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
- __func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
+ pr_debug("%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n",
+ __func__, n, self->rx_sdu_size, self->rx_max_sdu_size);
/* Note : irttp_run_rx_queue() calculate self->rx_sdu_size
* by summing the size of all fragments, so we should always
* have n == self->rx_sdu_size, except in cases where we
@@ -295,8 +294,6 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
struct sk_buff *frag;
__u8 *frame;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
@@ -305,7 +302,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
* Split frame into a number of segments
*/
while (skb->len > self->max_seg_size) {
- IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__);
+ pr_debug("%s(), fragmenting ...\n", __func__);
/* Make new segment */
frag = alloc_skb(self->max_seg_size+self->max_header_size,
@@ -330,7 +327,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self,
skb_queue_tail(&self->tx_queue, frag);
}
/* Queue what is left of the original skb */
- IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__);
+ pr_debug("%s(), queuing last segment\n", __func__);
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
@@ -361,7 +358,7 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
else
self->tx_max_sdu_size = param->pv.i;
- IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i);
+ pr_debug("%s(), MaxSduSize=%d\n", __func__, param->pv.i);
return 0;
}
@@ -402,15 +399,13 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
* JeanII */
if ((stsap_sel != LSAP_ANY) &&
((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
- IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
+ pr_debug("%s(), invalid tsap!\n", __func__);
return NULL;
}
self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
- if (self == NULL) {
- IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__);
+ if (self == NULL)
return NULL;
- }
/* Initialize internal objects */
irttp_init_tsap(self);
@@ -440,7 +435,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
*/
lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
if (lsap == NULL) {
- IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__);
+ pr_debug("%s: unable to allocate LSAP!!\n", __func__);
__irttp_close_tsap(self);
return NULL;
}
@@ -451,7 +446,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
* the stsap_sel we have might not be valid anymore
*/
self->stsap_sel = lsap->slsap_sel;
- IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
+ pr_debug("%s(), stsap_sel=%02x\n", __func__, self->stsap_sel);
self->notify = *notify;
self->lsap = lsap;
@@ -509,8 +504,6 @@ int irttp_close_tsap(struct tsap_cb *self)
{
struct tsap_cb *tsap;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
@@ -518,8 +511,8 @@ int irttp_close_tsap(struct tsap_cb *self)
if (self->connected) {
/* Check if disconnect is not pending */
if (!test_bit(0, &self->disconnect_pend)) {
- IRDA_WARNING("%s: TSAP still connected!\n",
- __func__);
+ net_warn_ratelimited("%s: TSAP still connected!\n",
+ __func__);
irttp_disconnect_request(self, NULL, P_NORMAL);
}
self->close_pend = TRUE;
@@ -558,8 +551,6 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
- IRDA_DEBUG(4, "%s()\n", __func__);
-
/* Take shortcut on zero byte packets */
if (skb->len == 0) {
ret = 0;
@@ -568,13 +559,14 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
/* Check that nothing bad happens */
if (!self->connected) {
- IRDA_WARNING("%s(), Not connected\n", __func__);
+ net_warn_ratelimited("%s(), Not connected\n", __func__);
ret = -ENOTCONN;
goto err;
}
if (skb->len > self->max_seg_size) {
- IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__);
+ net_err_ratelimited("%s(), UData is too large for IrLAP!\n",
+ __func__);
ret = -EMSGSIZE;
goto err;
}
@@ -606,8 +598,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
- IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
- skb_queue_len(&self->tx_queue));
+ pr_debug("%s() : queue len = %d\n", __func__,
+ skb_queue_len(&self->tx_queue));
/* Take shortcut on zero byte packets */
if (skb->len == 0) {
@@ -617,7 +609,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
/* Check that nothing bad happens */
if (!self->connected) {
- IRDA_WARNING("%s: Not connected\n", __func__);
+ net_warn_ratelimited("%s: Not connected\n", __func__);
ret = -ENOTCONN;
goto err;
}
@@ -627,8 +619,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
* inside an IrLAP frame
*/
if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
- IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n",
- __func__);
+ net_err_ratelimited("%s: SAR disabled, and data is too large for IrLAP!\n",
+ __func__);
ret = -EMSGSIZE;
goto err;
}
@@ -640,8 +632,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
if ((self->tx_max_sdu_size != 0) &&
(self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
(skb->len > self->tx_max_sdu_size)) {
- IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
- __func__);
+ net_err_ratelimited("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
+ __func__);
ret = -EMSGSIZE;
goto err;
}
@@ -719,9 +711,9 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
unsigned long flags;
int n;
- IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n",
- __func__,
- self->send_credit, skb_queue_len(&self->tx_queue));
+ pr_debug("%s() : send_credit = %d, queue_len = %d\n",
+ __func__,
+ self->send_credit, skb_queue_len(&self->tx_queue));
/* Get exclusive access to the tx queue, otherwise don't touch it */
if (irda_lock(&self->tx_queue_lock) == FALSE)
@@ -826,9 +818,9 @@ static inline void irttp_give_credit(struct tsap_cb *self)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
- IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n",
- __func__,
- self->send_credit, self->avail_credit, self->remote_credit);
+ pr_debug("%s() send=%d,avail=%d,remote=%d\n",
+ __func__,
+ self->send_credit, self->avail_credit, self->remote_credit);
/* Give credit to peer */
tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
@@ -876,8 +868,6 @@ static int irttp_udata_indication(void *instance, void *sap,
struct tsap_cb *self;
int err;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
self = instance;
IRDA_ASSERT(self != NULL, return -1;);
@@ -993,8 +983,6 @@ static void irttp_status_indication(void *instance,
{
struct tsap_cb *self;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
self = instance;
IRDA_ASSERT(self != NULL, return;);
@@ -1011,7 +999,7 @@ static void irttp_status_indication(void *instance,
self->notify.status_indication(self->notify.instance,
link, lock);
else
- IRDA_DEBUG(2, "%s(), no handler\n", __func__);
+ pr_debug("%s(), no handler\n", __func__);
}
/*
@@ -1029,7 +1017,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
- IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self);
+ pr_debug("%s(instance=%p)\n", __func__, self);
/* We are "polled" directly from LAP, and the LAP want to fill
* its Tx window. We want to do our best to send it data, so that
@@ -1067,18 +1055,16 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
*/
void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
{
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
switch (flow) {
case FLOW_STOP:
- IRDA_DEBUG(1, "%s(), flow stop\n", __func__);
+ pr_debug("%s(), flow stop\n", __func__);
self->rx_sdu_busy = TRUE;
break;
case FLOW_START:
- IRDA_DEBUG(1, "%s(), flow start\n", __func__);
+ pr_debug("%s(), flow start\n", __func__);
self->rx_sdu_busy = FALSE;
/* Client say he can accept more data, try to free our
@@ -1087,7 +1073,7 @@ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
break;
default:
- IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__);
+ pr_debug("%s(), Unknown flow command!\n", __func__);
}
}
EXPORT_SYMBOL(irttp_flow_request);
@@ -1107,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
__u8 *frame;
__u8 n;
- IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
+ pr_debug("%s(), max_sdu_size=%d\n", __func__, max_sdu_size);
IRDA_ASSERT(self != NULL, return -EBADR;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
@@ -1205,8 +1191,6 @@ static void irttp_connect_confirm(void *instance, void *sap,
__u8 plen;
__u8 n;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
self = instance;
IRDA_ASSERT(self != NULL, return;);
@@ -1221,15 +1205,15 @@ static void irttp_connect_confirm(void *instance, void *sap,
* negotiated QoS for the link.
*/
if (qos) {
- IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n",
- qos->baud_rate.bits);
- IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n",
- qos->baud_rate.value);
+ pr_debug("IrTTP, Negotiated BAUD_RATE: %02x\n",
+ qos->baud_rate.bits);
+ pr_debug("IrTTP, Negotiated BAUD_RATE: %d bps.\n",
+ qos->baud_rate.value);
}
n = skb->data[0] & 0x7f;
- IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n);
+ pr_debug("%s(), Initial send_credit=%d\n", __func__, n);
self->send_credit = n;
self->tx_max_sdu_size = 0;
@@ -1249,8 +1233,8 @@ static void irttp_connect_confirm(void *instance, void *sap,
/* Any errors in the parameter list? */
if (ret < 0) {
- IRDA_WARNING("%s: error extracting parameters\n",
- __func__);
+ net_warn_ratelimited("%s: error extracting parameters\n",
+ __func__);
dev_kfree_skb(skb);
/* Do not accept this connection attempt */
@@ -1260,11 +1244,11 @@ static void irttp_connect_confirm(void *instance, void *sap,
skb_pull(skb, IRDA_MIN(skb->len, plen+1));
}
- IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__,
- self->send_credit, self->avail_credit, self->remote_credit);
+ pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__,
+ self->send_credit, self->avail_credit, self->remote_credit);
- IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__,
- self->tx_max_sdu_size);
+ pr_debug("%s(), MaxSduSize=%d\n", __func__,
+ self->tx_max_sdu_size);
if (self->notify.connect_confirm) {
self->notify.connect_confirm(self->notify.instance, self, qos,
@@ -1302,7 +1286,7 @@ static void irttp_connect_indication(void *instance, void *sap,
self->max_seg_size = max_seg_size - TTP_HEADER;
self->max_header_size = max_header_size+TTP_HEADER;
- IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
+ pr_debug("%s(), TSAP sel=%02x\n", __func__, self->stsap_sel);
/* Need to update dtsap_sel if its equal to LSAP_ANY */
self->dtsap_sel = lsap->dlsap_sel;
@@ -1326,8 +1310,8 @@ static void irttp_connect_indication(void *instance, void *sap,
/* Any errors in the parameter list? */
if (ret < 0) {
- IRDA_WARNING("%s: error extracting parameters\n",
- __func__);
+ net_warn_ratelimited("%s: error extracting parameters\n",
+ __func__);
dev_kfree_skb(skb);
/* Do not accept this connection attempt */
@@ -1364,8 +1348,8 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
- IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__,
- self->stsap_sel);
+ pr_debug("%s(), Source TSAP selector=%02x\n", __func__,
+ self->stsap_sel);
/* Any userdata supplied? */
if (userdata == NULL) {
@@ -1446,14 +1430,12 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
struct tsap_cb *new;
unsigned long flags;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
/* Protect our access to the old tsap instance */
spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
/* Find the old instance */
if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) {
- IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__);
+ pr_debug("%s(), unable to find TSAP\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
@@ -1461,7 +1443,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
/* Allocate a new instance */
new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
if (!new) {
- IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
+ pr_debug("%s(), unable to kmalloc\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
@@ -1473,7 +1455,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
/* Try to dup the LSAP (may fail if we were too slow) */
new->lsap = irlmp_dup(orig->lsap, new);
if (!new->lsap) {
- IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
+ pr_debug("%s(), dup failed!\n", __func__);
kfree(new);
return NULL;
}
@@ -1508,7 +1490,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
/* Already disconnected? */
if (!self->connected) {
- IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__);
+ pr_debug("%s(), already disconnected!\n", __func__);
if (userdata)
dev_kfree_skb(userdata);
return -1;
@@ -1520,8 +1502,8 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
* for following a disconnect_indication() (i.e. net_bh).
* Jean II */
if (test_and_set_bit(0, &self->disconnect_pend)) {
- IRDA_DEBUG(0, "%s(), disconnect already pending\n",
- __func__);
+ pr_debug("%s(), disconnect already pending\n",
+ __func__);
if (userdata)
dev_kfree_skb(userdata);
@@ -1540,7 +1522,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
* disconnecting right now since the data will
* not have any usable connection to be sent on
*/
- IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__);
+ pr_debug("%s(): High priority!!()\n", __func__);
irttp_flush_queues(self);
} else if (priority == P_NORMAL) {
/*
@@ -1561,7 +1543,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
* be sent at the LMP level (so even if the peer has its Tx queue
* full of data). - Jean II */
- IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__);
+ pr_debug("%s(), Disconnecting ...\n", __func__);
self->connected = FALSE;
if (!userdata) {
@@ -1597,8 +1579,6 @@ static void irttp_disconnect_indication(void *instance, void *sap,
{
struct tsap_cb *self;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
self = instance;
IRDA_ASSERT(self != NULL, return;);
@@ -1657,7 +1637,7 @@ static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
* give an error back
*/
if (err) {
- IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__);
+ pr_debug("%s() requeueing skb!\n", __func__);
/* Make sure we take a break */
self->rx_sdu_busy = TRUE;
@@ -1682,8 +1662,8 @@ static void irttp_run_rx_queue(struct tsap_cb *self)
struct sk_buff *skb;
int more = 0;
- IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__,
- self->send_credit, self->avail_credit, self->remote_credit);
+ pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__,
+ self->send_credit, self->avail_credit, self->remote_credit);
/* Get exclusive access to the rx queue, otherwise don't touch it */
if (irda_lock(&self->rx_queue_lock) == FALSE)
@@ -1722,8 +1702,8 @@ static void irttp_run_rx_queue(struct tsap_cb *self)
* limits of the maximum size of the rx_sdu
*/
if (self->rx_sdu_size <= self->rx_max_sdu_size) {
- IRDA_DEBUG(4, "%s(), queueing frag\n",
- __func__);
+ pr_debug("%s(), queueing frag\n",
+ __func__);
skb_queue_tail(&self->rx_fragments, skb);
} else {
/* Free the part of the SDU that is too big */
@@ -1752,7 +1732,7 @@ static void irttp_run_rx_queue(struct tsap_cb *self)
/* Now we can deliver the reassembled skb */
irttp_do_data_indication(self, skb);
} else {
- IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__);
+ pr_debug("%s(), Truncated frame\n", __func__);
/* Free the part of the SDU that is too big */
dev_kfree_skb(skb);
diff --git a/net/irda/parameters.c b/net/irda/parameters.c
index 6d0869716bf6..16ce32ffe004 100644
--- a/net/irda/parameters.c
+++ b/net/irda/parameters.c
@@ -52,7 +52,7 @@ static int irda_insert_no_value(void *self, __u8 *buf, int len, __u8 pi,
static int irda_param_unpack(__u8 *buf, char *fmt, ...);
/* Parameter value call table. Must match PV_TYPE */
-static PV_HANDLER pv_extract_table[] = {
+static const PV_HANDLER pv_extract_table[] = {
irda_extract_integer, /* Handler for any length integers */
irda_extract_integer, /* Handler for 8 bits integers */
irda_extract_integer, /* Handler for 16 bits integers */
@@ -62,7 +62,7 @@ static PV_HANDLER pv_extract_table[] = {
irda_extract_no_value /* Handler for no value parameters */
};
-static PV_HANDLER pv_insert_table[] = {
+static const PV_HANDLER pv_insert_table[] = {
irda_insert_integer, /* Handler for any length integers */
irda_insert_integer, /* Handler for 8 bits integers */
irda_insert_integer, /* Handler for 16 bits integers */
@@ -146,24 +146,24 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi,
*/
if (p.pl == 0) {
if (p.pv.i < 0xff) {
- IRDA_DEBUG(2, "%s(), using 1 byte\n", __func__);
+ pr_debug("%s(), using 1 byte\n", __func__);
p.pl = 1;
} else if (p.pv.i < 0xffff) {
- IRDA_DEBUG(2, "%s(), using 2 bytes\n", __func__);
+ pr_debug("%s(), using 2 bytes\n", __func__);
p.pl = 2;
} else {
- IRDA_DEBUG(2, "%s(), using 4 bytes\n", __func__);
+ pr_debug("%s(), using 4 bytes\n", __func__);
p.pl = 4; /* Default length */
}
}
/* Check if buffer is long enough for insertion */
if (len < (2+p.pl)) {
- IRDA_WARNING("%s: buffer too short for insertion!\n",
- __func__);
+ net_warn_ratelimited("%s: buffer too short for insertion!\n",
+ __func__);
return -1;
}
- IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__,
- p.pi, p.pl, p.pv.i);
+ pr_debug("%s(), pi=%#x, pl=%d, pi=%d\n", __func__,
+ p.pi, p.pl, p.pv.i);
switch (p.pl) {
case 1:
n += irda_param_pack(buf, "bbb", p.pi, p.pl, (__u8) p.pv.i);
@@ -184,8 +184,8 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi,
break;
default:
- IRDA_WARNING("%s: length %d not supported\n",
- __func__, p.pl);
+ net_warn_ratelimited("%s: length %d not supported\n",
+ __func__, p.pl);
/* Skip parameter */
return -1;
}
@@ -214,9 +214,8 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
/* Check if buffer is long enough for parsing */
if (len < (2+p.pl)) {
- IRDA_WARNING("%s: buffer too short for parsing! "
- "Need %d bytes, but len is only %d\n",
- __func__, p.pl, len);
+ net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n",
+ __func__, p.pl, len);
return -1;
}
@@ -226,9 +225,8 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
* PV_INTEGER means that the handler is flexible.
*/
if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) {
- IRDA_ERROR("%s: invalid parameter length! "
- "Expected %d bytes, but value had %d bytes!\n",
- __func__, type & PV_MASK, p.pl);
+ net_err_ratelimited("%s: invalid parameter length! Expected %d bytes, but value had %d bytes!\n",
+ __func__, type & PV_MASK, p.pl);
/* Most parameters are bit/byte fields or little endian,
* so it's ok to only extract a subset of it (the subset
@@ -265,15 +263,15 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
le32_to_cpus(&p.pv.i);
break;
default:
- IRDA_WARNING("%s: length %d not supported\n",
- __func__, p.pl);
+ net_warn_ratelimited("%s: length %d not supported\n",
+ __func__, p.pl);
/* Skip parameter */
return p.pl+2;
}
- IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__,
- p.pi, p.pl, p.pv.i);
+ pr_debug("%s(), pi=%#x, pl=%d, pi=%d\n", __func__,
+ p.pi, p.pl, p.pv.i);
/* Call handler for this parameter */
err = (*func)(self, &p, PV_PUT);
if (err < 0)
@@ -292,21 +290,18 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
irda_param_t p;
int err;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
p.pi = pi; /* In case handler needs to know */
p.pl = buf[1]; /* Extract length of value */
if (p.pl > 32)
p.pl = 32;
- IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__,
- p.pi, p.pl);
+ pr_debug("%s(), pi=%#x, pl=%d\n", __func__,
+ p.pi, p.pl);
/* Check if buffer is long enough for parsing */
if (len < (2+p.pl)) {
- IRDA_WARNING("%s: buffer too short for parsing! "
- "Need %d bytes, but len is only %d\n",
- __func__, p.pl, len);
+ net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n",
+ __func__, p.pl, len);
return -1;
}
@@ -314,8 +309,8 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
* checked that the buffer is long enough */
strncpy(str, buf+2, p.pl);
- IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __func__,
- (__u8) str[0], (__u8) str[1]);
+ pr_debug("%s(), str=0x%02x 0x%02x\n",
+ __func__, (__u8)str[0], (__u8)str[1]);
/* Null terminate string */
str[p.pl] = '\0';
@@ -343,13 +338,12 @@ static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi,
/* Check if buffer is long enough for parsing */
if (len < (2+p.pl)) {
- IRDA_WARNING("%s: buffer too short for parsing! "
- "Need %d bytes, but len is only %d\n",
- __func__, p.pl, len);
+ net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n",
+ __func__, p.pl, len);
return -1;
}
- IRDA_DEBUG(0, "%s(), not impl\n", __func__);
+ pr_debug("%s(), not impl\n", __func__);
return p.pl+2; /* Extracted pl+2 bytes */
}
@@ -455,7 +449,7 @@ static int irda_param_unpack(__u8 *buf, char *fmt, ...)
int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
pi_param_info_t *info)
{
- pi_minor_info_t *pi_minor_info;
+ const pi_minor_info_t *pi_minor_info;
__u8 pi_minor;
__u8 pi_major;
int type;
@@ -472,8 +466,8 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
if ((pi_major > info->len-1) ||
(pi_minor > info->tables[pi_major].len-1))
{
- IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n",
- __func__, pi);
+ pr_debug("%s(), no handler for parameter=0x%02x\n",
+ __func__, pi);
/* Skip this parameter */
return -1;
@@ -487,7 +481,8 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
/* Check if handler has been implemented */
if (!pi_minor_info->func) {
- IRDA_MESSAGE("%s: no handler for pi=%#x\n", __func__, pi);
+ net_info_ratelimited("%s: no handler for pi=%#x\n",
+ __func__, pi);
/* Skip this parameter */
return -1;
}
@@ -509,7 +504,7 @@ EXPORT_SYMBOL(irda_param_insert);
static int irda_param_extract(void *self, __u8 *buf, int len,
pi_param_info_t *info)
{
- pi_minor_info_t *pi_minor_info;
+ const pi_minor_info_t *pi_minor_info;
__u8 pi_minor;
__u8 pi_major;
int type;
@@ -526,8 +521,8 @@ static int irda_param_extract(void *self, __u8 *buf, int len,
if ((pi_major > info->len-1) ||
(pi_minor > info->tables[pi_major].len-1))
{
- IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n",
- __func__, buf[0]);
+ pr_debug("%s(), no handler for parameter=0x%02x\n",
+ __func__, buf[0]);
/* Skip this parameter */
return 2 + buf[n + 1]; /* Continue */
@@ -539,13 +534,13 @@ static int irda_param_extract(void *self, __u8 *buf, int len,
/* Find expected data type for this parameter identifier (pi)*/
type = pi_minor_info->type;
- IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __func__,
- pi_major, pi_minor, type);
+ pr_debug("%s(), pi=[%d,%d], type=%d\n", __func__,
+ pi_major, pi_minor, type);
/* Check if handler has been implemented */
if (!pi_minor_info->func) {
- IRDA_MESSAGE("%s: no handler for pi=%#x\n",
- __func__, buf[n]);
+ net_info_ratelimited("%s: no handler for pi=%#x\n",
+ __func__, buf[n]);
/* Skip this parameter */
return 2 + buf[n + 1]; /* Continue */
}
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 11a7cc0cbc28..25ba8509ad3e 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -122,7 +122,7 @@ static __u32 max_line_capacities[10][4] = {
{ 800000, 400000, 160000, 80000 }, /* 16000000 bps */
};
-static pi_minor_info_t pi_minor_call_table_type_0[] = {
+static const pi_minor_info_t pi_minor_call_table_type_0[] = {
{ NULL, 0 },
/* 01 */{ irlap_param_baud_rate, PV_INTEGER | PV_LITTLE_ENDIAN },
{ NULL, 0 },
@@ -134,7 +134,7 @@ static pi_minor_info_t pi_minor_call_table_type_0[] = {
/* 08 */{ irlap_param_link_disconnect, PV_INT_8_BITS }
};
-static pi_minor_info_t pi_minor_call_table_type_1[] = {
+static const pi_minor_info_t pi_minor_call_table_type_1[] = {
{ NULL, 0 },
{ NULL, 0 },
/* 82 */{ irlap_param_max_turn_time, PV_INT_8_BITS },
@@ -144,7 +144,7 @@ static pi_minor_info_t pi_minor_call_table_type_1[] = {
/* 86 */{ irlap_param_min_turn_time, PV_INT_8_BITS },
};
-static pi_major_info_t pi_major_call_table[] = {
+static const pi_major_info_t pi_major_call_table[] = {
{ pi_minor_call_table_type_0, 9 },
{ pi_minor_call_table_type_1, 7 },
};
@@ -200,8 +200,8 @@ static int msb_index (__u16 word)
* able to check precisely what's going on. If a end user sees this,
* it's very likely the peer. - Jean II */
if (word == 0) {
- IRDA_WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n",
- __func__);
+ net_warn_ratelimited("%s(), Detected buggy peer, adjust null PV to 0x1!\n",
+ __func__);
/* The only safe choice (we don't know the array size) */
word = 0x1;
}
@@ -342,8 +342,6 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
__u32 line_capacity;
int index;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/*
* Make sure the mintt is sensible.
* Main culprit : Ericsson T39. - Jean II
@@ -351,8 +349,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
if (sysctl_min_tx_turn_time > qos->min_turn_time.value) {
int i;
- IRDA_WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n",
- __func__, sysctl_min_tx_turn_time);
+ net_warn_ratelimited("%s(), Detected buggy peer, adjust mtt to %dus!\n",
+ __func__, sysctl_min_tx_turn_time);
/* We don't really need bits, but easier this way */
i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times,
@@ -368,9 +366,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
if ((qos->baud_rate.value < 115200) &&
(qos->max_turn_time.value < 500))
{
- IRDA_DEBUG(0,
- "%s(), adjusting max turn time from %d to 500 ms\n",
- __func__, qos->max_turn_time.value);
+ pr_debug("%s(), adjusting max turn time from %d to 500 ms\n",
+ __func__, qos->max_turn_time.value);
qos->max_turn_time.value = 500;
}
@@ -385,8 +382,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
while ((qos->data_size.value > line_capacity) && (index > 0)) {
qos->data_size.value = data_sizes[index--];
- IRDA_DEBUG(2, "%s(), reducing data size to %d\n",
- __func__, qos->data_size.value);
+ pr_debug("%s(), reducing data size to %d\n",
+ __func__, qos->data_size.value);
}
#else /* Use method described in section 6.6.11 of IrLAP */
while (irlap_requested_line_capacity(qos) > line_capacity) {
@@ -395,15 +392,15 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
/* Must be able to send at least one frame */
if (qos->window_size.value > 1) {
qos->window_size.value--;
- IRDA_DEBUG(2, "%s(), reducing window size to %d\n",
- __func__, qos->window_size.value);
+ pr_debug("%s(), reducing window size to %d\n",
+ __func__, qos->window_size.value);
} else if (index > 1) {
qos->data_size.value = data_sizes[index--];
- IRDA_DEBUG(2, "%s(), reducing data size to %d\n",
- __func__, qos->data_size.value);
+ pr_debug("%s(), reducing data size to %d\n",
+ __func__, qos->data_size.value);
} else {
- IRDA_WARNING("%s(), nothing more we can do!\n",
- __func__);
+ net_warn_ratelimited("%s(), nothing more we can do!\n",
+ __func__);
}
}
#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
@@ -440,20 +437,20 @@ int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb)
irlap_adjust_qos_settings(&self->qos_tx);
- IRDA_DEBUG(2, "Setting BAUD_RATE to %d bps.\n",
- self->qos_tx.baud_rate.value);
- IRDA_DEBUG(2, "Setting DATA_SIZE to %d bytes\n",
- self->qos_tx.data_size.value);
- IRDA_DEBUG(2, "Setting WINDOW_SIZE to %d\n",
- self->qos_tx.window_size.value);
- IRDA_DEBUG(2, "Setting XBOFS to %d\n",
- self->qos_tx.additional_bofs.value);
- IRDA_DEBUG(2, "Setting MAX_TURN_TIME to %d ms.\n",
- self->qos_tx.max_turn_time.value);
- IRDA_DEBUG(2, "Setting MIN_TURN_TIME to %d usecs.\n",
- self->qos_tx.min_turn_time.value);
- IRDA_DEBUG(2, "Setting LINK_DISC to %d secs.\n",
- self->qos_tx.link_disc_time.value);
+ pr_debug("Setting BAUD_RATE to %d bps.\n",
+ self->qos_tx.baud_rate.value);
+ pr_debug("Setting DATA_SIZE to %d bytes\n",
+ self->qos_tx.data_size.value);
+ pr_debug("Setting WINDOW_SIZE to %d\n",
+ self->qos_tx.window_size.value);
+ pr_debug("Setting XBOFS to %d\n",
+ self->qos_tx.additional_bofs.value);
+ pr_debug("Setting MAX_TURN_TIME to %d ms.\n",
+ self->qos_tx.max_turn_time.value);
+ pr_debug("Setting MIN_TURN_TIME to %d usecs.\n",
+ self->qos_tx.min_turn_time.value);
+ pr_debug("Setting LINK_DISC to %d secs.\n",
+ self->qos_tx.link_disc_time.value);
return ret;
}
@@ -537,17 +534,17 @@ static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get)
if (get) {
param->pv.i = self->qos_rx.baud_rate.bits;
- IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n",
- __func__, param->pv.i);
+ pr_debug("%s(), baud rate = 0x%02x\n",
+ __func__, param->pv.i);
} else {
/*
* Stations must agree on baud rate, so calculate
* intersection
*/
- IRDA_DEBUG(2, "Requested BAUD_RATE: 0x%04x\n", (__u16) param->pv.i);
+ pr_debug("Requested BAUD_RATE: 0x%04x\n", (__u16)param->pv.i);
final = (__u16) param->pv.i & self->qos_rx.baud_rate.bits;
- IRDA_DEBUG(2, "Final BAUD_RATE: 0x%04x\n", final);
+ pr_debug("Final BAUD_RATE: 0x%04x\n", final);
self->qos_tx.baud_rate.bits = final;
self->qos_rx.baud_rate.bits = final;
}
@@ -578,10 +575,10 @@ static int irlap_param_link_disconnect(void *instance, irda_param_t *param,
* Stations must agree on link disconnect/threshold
* time.
*/
- IRDA_DEBUG(2, "LINK_DISC: %02x\n", (__u8) param->pv.i);
+ pr_debug("LINK_DISC: %02x\n", (__u8)param->pv.i);
final = (__u8) param->pv.i & self->qos_rx.link_disc_time.bits;
- IRDA_DEBUG(2, "Final LINK_DISC: %02x\n", final);
+ pr_debug("Final LINK_DISC: %02x\n", final);
self->qos_tx.link_disc_time.bits = final;
self->qos_rx.link_disc_time.bits = final;
}
@@ -710,8 +707,8 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time)
__u32 line_capacity;
int i,j;
- IRDA_DEBUG(2, "%s(), speed=%d, max_turn_time=%d\n",
- __func__, speed, max_turn_time);
+ pr_debug("%s(), speed=%d, max_turn_time=%d\n",
+ __func__, speed, max_turn_time);
i = value_index(speed, baud_rates, 10);
j = value_index(max_turn_time, max_turn_times, 4);
@@ -721,8 +718,8 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time)
line_capacity = max_line_capacities[i][j];
- IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n",
- __func__, line_capacity);
+ pr_debug("%s(), line capacity=%d bytes\n",
+ __func__, line_capacity);
return line_capacity;
}
@@ -737,8 +734,8 @@ static __u32 irlap_requested_line_capacity(struct qos_info *qos)
irlap_min_turn_time_in_bytes(qos->baud_rate.value,
qos->min_turn_time.value);
- IRDA_DEBUG(2, "%s(), requested line capacity=%d\n",
- __func__, line_capacity);
+ pr_debug("%s(), requested line capacity=%d\n",
+ __func__, line_capacity);
return line_capacity;
}
diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c
index fd0995b1323a..40a0f993bf13 100644
--- a/net/irda/wrapper.c
+++ b/net/irda/wrapper.c
@@ -106,17 +106,17 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
* Nothing to worry about, but we set the default number of
* BOF's
*/
- IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __func__);
+ pr_debug("%s(), wrong magic in skb!\n", __func__);
xbofs = 10;
} else
xbofs = cb->xbofs + cb->xbofs_delay;
- IRDA_DEBUG(4, "%s(), xbofs=%d\n", __func__, xbofs);
+ pr_debug("%s(), xbofs=%d\n", __func__, xbofs);
/* Check that we never use more than 115 + 48 xbofs */
if (xbofs > 163) {
- IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __func__,
- xbofs);
+ pr_debug("%s(), too many xbofs (%d)\n", __func__,
+ xbofs);
xbofs = 163;
}
@@ -134,8 +134,8 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
* transmitted after this point is 5.
*/
if(n >= (buffsize-5)) {
- IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n",
- __func__, n);
+ net_err_ratelimited("%s(), tx buffer overflow (n=%d)\n",
+ __func__, n);
return n;
}
@@ -286,8 +286,8 @@ async_unwrap_bof(struct net_device *dev,
case INSIDE_FRAME:
/* Not supposed to happen, the previous frame is not
* finished - Jean II */
- IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n",
- __func__);
+ pr_debug("%s(), Discarding incomplete frame\n",
+ __func__);
stats->rx_errors++;
stats->rx_missed_errors++;
irda_device_set_media_busy(dev, TRUE);
@@ -360,7 +360,7 @@ async_unwrap_eof(struct net_device *dev,
/* Wrong CRC, discard frame! */
irda_device_set_media_busy(dev, TRUE);
- IRDA_DEBUG(1, "%s(), crc error\n", __func__);
+ pr_debug("%s(), crc error\n", __func__);
stats->rx_errors++;
stats->rx_crc_errors++;
}
@@ -386,7 +386,7 @@ async_unwrap_ce(struct net_device *dev,
break;
case LINK_ESCAPE:
- IRDA_WARNING("%s: state not defined\n", __func__);
+ net_warn_ratelimited("%s: state not defined\n", __func__);
break;
case BEGIN_FRAME:
@@ -420,8 +420,8 @@ async_unwrap_other(struct net_device *dev,
rx_buff->fcs = irda_fcs(rx_buff->fcs, byte);
#endif
} else {
- IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n",
- __func__);
+ pr_debug("%s(), Rx buffer overflow, aborting\n",
+ __func__);
rx_buff->state = OUTSIDE_FRAME;
}
break;
@@ -439,8 +439,8 @@ async_unwrap_other(struct net_device *dev,
#endif
rx_buff->state = INSIDE_FRAME;
} else {
- IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n",
- __func__);
+ pr_debug("%s(), Rx buffer overflow, aborting\n",
+ __func__);
rx_buff->state = OUTSIDE_FRAME;
}
break;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a089b6b91650..2e9953b2db84 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1070,9 +1070,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
txmsg.class = 0;
/* iterate over control messages */
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
- cmsg = CMSG_NXTHDR(msg, cmsg)) {
-
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg)) {
err = -EINVAL;
goto out;
@@ -1122,7 +1120,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
}
if (iucv->transport == AF_IUCV_TRANS_HIPER)
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
err = -EFAULT;
goto fail;
}
@@ -1355,7 +1353,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
- if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
+ if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1847ec4e3930..f8ac939d52b4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3611,7 +3611,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
goto out;
err = -EFAULT;
- if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len))
+ if (memcpy_from_msg(skb_put(skb,len), msg, len))
goto out;
hdr = pfkey_get_base_msg(skb, &err);
@@ -3654,7 +3654,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
}
skb_reset_transport_header(skb);
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index edb78e69efe4..781b3a226ba7 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -126,6 +126,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_uninit = l2tp_eth_dev_uninit,
.ndo_start_xmit = l2tp_eth_dev_xmit,
.ndo_get_stats64 = l2tp_eth_get_stats64,
+ .ndo_set_mac_address = eth_mac_addr,
};
static void l2tp_eth_dev_setup(struct net_device *dev)
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 369a9822488c..05dfc8aa36af 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -441,7 +441,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
*((__be32 *) skb_put(skb, 4)) = 0;
/* Copy user data into skb */
- rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ rc = memcpy_from_msg(skb_put(skb, len), msg, len);
if (rc < 0) {
kfree_skb(skb);
goto error;
@@ -528,7 +528,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
copied = len;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0edb263cc002..8611f1b63141 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -619,7 +619,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
back_from_confirm:
lock_sock(sk);
- err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
+ err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, hlimit, tclass, opt,
&fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
@@ -672,7 +672,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
copied = len;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index b704a9356208..cc7a828fc914 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -208,7 +208,7 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
else if (len < skb->len)
msg->msg_flags |= MSG_TRUNC;
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
+ err = skb_copy_datagram_msg(skb, 0, msg, len);
if (likely(err == 0))
err = len;
@@ -346,8 +346,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
skb_put(skb, 2);
/* Copy user data into skb */
- error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
- total_len);
+ error = memcpy_from_msg(skb_put(skb, total_len), m, total_len);
if (error < 0) {
kfree_skb(skb);
goto error_put_sess_tun;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 3cdaa046c1bc..fc60d9d738b5 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -73,6 +73,7 @@ static void __lapb_remove_cb(struct lapb_cb *lapb)
lapb_put(lapb);
}
}
+EXPORT_SYMBOL(lapb_register);
/*
* Add a socket to the bound sockets list.
@@ -195,6 +196,7 @@ out:
write_unlock_bh(&lapb_list_lock);
return rc;
}
+EXPORT_SYMBOL(lapb_unregister);
int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms)
{
@@ -227,6 +229,7 @@ int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms)
out:
return rc;
}
+EXPORT_SYMBOL(lapb_getparms);
int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
{
@@ -262,6 +265,7 @@ out_put:
out:
return rc;
}
+EXPORT_SYMBOL(lapb_setparms);
int lapb_connect_request(struct net_device *dev)
{
@@ -290,6 +294,7 @@ out_put:
out:
return rc;
}
+EXPORT_SYMBOL(lapb_connect_request);
int lapb_disconnect_request(struct net_device *dev)
{
@@ -334,6 +339,7 @@ out_put:
out:
return rc;
}
+EXPORT_SYMBOL(lapb_disconnect_request);
int lapb_data_request(struct net_device *dev, struct sk_buff *skb)
{
@@ -355,6 +361,7 @@ out_put:
out:
return rc;
}
+EXPORT_SYMBOL(lapb_data_request);
int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
{
@@ -369,6 +376,7 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
return rc;
}
+EXPORT_SYMBOL(lapb_data_received);
void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
{
@@ -415,15 +423,6 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
return used;
}
-EXPORT_SYMBOL(lapb_register);
-EXPORT_SYMBOL(lapb_unregister);
-EXPORT_SYMBOL(lapb_getparms);
-EXPORT_SYMBOL(lapb_setparms);
-EXPORT_SYMBOL(lapb_connect_request);
-EXPORT_SYMBOL(lapb_disconnect_request);
-EXPORT_SYMBOL(lapb_data_request);
-EXPORT_SYMBOL(lapb_data_received);
-
static int __init lapb_init(void)
{
return 0;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index bb9cbc17d926..2c0b83ce43bd 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -819,8 +819,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
used = len;
if (!(flags & MSG_TRUNC)) {
- int rc = skb_copy_datagram_iovec(skb, offset,
- msg->msg_iov, used);
+ int rc = skb_copy_datagram_msg(skb, offset, msg, used);
if (rc) {
/* Exception. Bailout! */
if (!copied)
@@ -922,7 +921,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock,
skb->dev = llc->dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
skb_reserve(skb, hdrlen);
- rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied);
+ rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
if (rc)
goto out;
if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
diff --git a/net/llc/llc_c_st.c b/net/llc/llc_c_st.c
index 818a9428823b..2467573b5f84 100644
--- a/net/llc/llc_c_st.c
+++ b/net/llc/llc_c_st.c
@@ -33,7 +33,7 @@
* LLC_CONN_STATE_AWAIT_REJ states
*/
/* State transitions for LLC_CONN_EV_DISC_REQ event */
-static llc_conn_action_t llc_common_actions_1[] = {
+static const llc_conn_action_t llc_common_actions_1[] = {
[0] = llc_conn_ac_send_disc_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -50,7 +50,7 @@ static struct llc_conn_state_trans llc_common_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_RESET_REQ event */
-static llc_conn_action_t llc_common_actions_2[] = {
+static const llc_conn_action_t llc_common_actions_2[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -67,7 +67,7 @@ static struct llc_conn_state_trans llc_common_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_common_actions_3[] = {
+static const llc_conn_action_t llc_common_actions_3[] = {
[0] = llc_conn_ac_stop_all_timers,
[1] = llc_conn_ac_set_vs_0,
[2] = llc_conn_ac_set_vr_0,
@@ -87,7 +87,7 @@ static struct llc_conn_state_trans llc_common_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_common_actions_4[] = {
+static const llc_conn_action_t llc_common_actions_4[] = {
[0] = llc_conn_ac_stop_all_timers,
[1] = llc_conn_ac_send_ua_rsp_f_set_p,
[2] = llc_conn_ac_disc_ind,
@@ -103,7 +103,7 @@ static struct llc_conn_state_trans llc_common_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */
-static llc_conn_action_t llc_common_actions_5[] = {
+static const llc_conn_action_t llc_common_actions_5[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -122,7 +122,7 @@ static struct llc_conn_state_trans llc_common_state_trans_5 = {
};
/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
-static llc_conn_action_t llc_common_actions_6[] = {
+static const llc_conn_action_t llc_common_actions_6[] = {
[0] = llc_conn_ac_disc_ind,
[1] = llc_conn_ac_stop_all_timers,
[2] = llc_conn_disc,
@@ -137,7 +137,7 @@ static struct llc_conn_state_trans llc_common_state_trans_6 = {
};
/* State transitions for LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr event */
-static llc_conn_action_t llc_common_actions_7a[] = {
+static const llc_conn_action_t llc_common_actions_7a[] = {
[0] = llc_conn_ac_send_frmr_rsp_f_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -153,7 +153,7 @@ static struct llc_conn_state_trans llc_common_state_trans_7a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns event */
-static llc_conn_action_t llc_common_actions_7b[] = {
+static const llc_conn_action_t llc_common_actions_7b[] = {
[0] = llc_conn_ac_send_frmr_rsp_f_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -169,7 +169,7 @@ static struct llc_conn_state_trans llc_common_state_trans_7b = {
};
/* State transitions for LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr event */
-static llc_conn_action_t llc_common_actions_8a[] = {
+static const llc_conn_action_t llc_common_actions_8a[] = {
[0] = llc_conn_ac_send_frmr_rsp_f_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -185,7 +185,7 @@ static struct llc_conn_state_trans llc_common_state_trans_8a = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns event */
-static llc_conn_action_t llc_common_actions_8b[] = {
+static const llc_conn_action_t llc_common_actions_8b[] = {
[0] = llc_conn_ac_send_frmr_rsp_f_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -201,7 +201,7 @@ static struct llc_conn_state_trans llc_common_state_trans_8b = {
};
/* State transitions for LLC_CONN_EV_RX_BAD_PDU event */
-static llc_conn_action_t llc_common_actions_8c[] = {
+static const llc_conn_action_t llc_common_actions_8c[] = {
[0] = llc_conn_ac_send_frmr_rsp_f_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -217,7 +217,7 @@ static struct llc_conn_state_trans llc_common_state_trans_8c = {
};
/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */
-static llc_conn_action_t llc_common_actions_9[] = {
+static const llc_conn_action_t llc_common_actions_9[] = {
[0] = llc_conn_ac_send_frmr_rsp_f_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -234,12 +234,12 @@ static struct llc_conn_state_trans llc_common_state_trans_9 = {
/* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 event */
#if 0
-static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_10[] = {
+static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_10[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_common_actions_10[] = {
+static const llc_conn_action_t llc_common_actions_10[] = {
[0] = llc_conn_ac_send_frmr_rsp_f_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -256,12 +256,12 @@ static struct llc_conn_state_trans llc_common_state_trans_10 = {
#endif
/* State transitions for LLC_CONN_EV_P_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11a[] = {
+static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11a[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_common_actions_11a[] = {
+static const llc_conn_action_t llc_common_actions_11a[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -278,12 +278,12 @@ static struct llc_conn_state_trans llc_common_state_trans_11a = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11b[] = {
+static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11b[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_common_actions_11b[] = {
+static const llc_conn_action_t llc_common_actions_11b[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -300,12 +300,12 @@ static struct llc_conn_state_trans llc_common_state_trans_11b = {
};
/* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11c[] = {
+static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11c[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_common_actions_11c[] = {
+static const llc_conn_action_t llc_common_actions_11c[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -322,12 +322,12 @@ static struct llc_conn_state_trans llc_common_state_trans_11c = {
};
/* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11d[] = {
+static const llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11d[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_common_actions_11d[] = {
+static const llc_conn_action_t llc_common_actions_11d[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_stop_other_timers,
@@ -351,7 +351,7 @@ static struct llc_conn_state_trans llc_common_state_trans_end;
/* LLC_CONN_STATE_ADM transitions */
/* State transitions for LLC_CONN_EV_CONN_REQ event */
-static llc_conn_action_t llc_adm_actions_1[] = {
+static const llc_conn_action_t llc_adm_actions_1[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_set_retry_cnt_0,
@@ -367,7 +367,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_adm_actions_2[] = {
+static const llc_conn_action_t llc_adm_actions_2[] = {
[0] = llc_conn_ac_send_ua_rsp_f_set_p,
[1] = llc_conn_ac_set_vs_0,
[2] = llc_conn_ac_set_vr_0,
@@ -386,7 +386,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_adm_actions_3[] = {
+static const llc_conn_action_t llc_adm_actions_3[] = {
[0] = llc_conn_ac_send_dm_rsp_f_set_p,
[1] = llc_conn_disc,
[2] = NULL,
@@ -400,7 +400,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_adm_actions_4[] = {
+static const llc_conn_action_t llc_adm_actions_4[] = {
[0] = llc_conn_ac_send_dm_rsp_f_set_1,
[1] = llc_conn_disc,
[2] = NULL,
@@ -414,7 +414,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_XXX_YYY event */
-static llc_conn_action_t llc_adm_actions_5[] = {
+static const llc_conn_action_t llc_adm_actions_5[] = {
[0] = llc_conn_disc,
[1] = NULL,
};
@@ -445,7 +445,7 @@ static struct llc_conn_state_trans *llc_adm_state_transitions[] = {
/* LLC_CONN_STATE_SETUP transitions */
/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_setup_actions_1[] = {
+static const llc_conn_action_t llc_setup_actions_1[] = {
[0] = llc_conn_ac_send_ua_rsp_f_set_p,
[1] = llc_conn_ac_set_vs_0,
[2] = llc_conn_ac_set_vr_0,
@@ -461,13 +461,13 @@ static struct llc_conn_state_trans llc_setup_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_2[] = {
+static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_2[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = llc_conn_ev_qlfy_set_status_conn,
[2] = NULL,
};
-static llc_conn_action_t llc_setup_actions_2[] = {
+static const llc_conn_action_t llc_setup_actions_2[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_ac_set_vs_0,
[2] = llc_conn_ac_set_vr_0,
@@ -485,13 +485,13 @@ static struct llc_conn_state_trans llc_setup_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_3[] = {
+static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_3[] = {
[0] = llc_conn_ev_qlfy_s_flag_eq_1,
[1] = llc_conn_ev_qlfy_set_status_conn,
[2] = NULL,
};
-static llc_conn_action_t llc_setup_actions_3[] = {
+static const llc_conn_action_t llc_setup_actions_3[] = {
[0] = llc_conn_ac_set_p_flag_0,
[1] = llc_conn_ac_set_remote_busy_0,
[2] = llc_conn_ac_conn_confirm,
@@ -506,12 +506,12 @@ static struct llc_conn_state_trans llc_setup_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_4[] = {
+static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_4[] = {
[0] = llc_conn_ev_qlfy_set_status_disc,
[1] = NULL,
};
-static llc_conn_action_t llc_setup_actions_4[] = {
+static const llc_conn_action_t llc_setup_actions_4[] = {
[0] = llc_conn_ac_send_dm_rsp_f_set_p,
[1] = llc_conn_ac_stop_ack_timer,
[2] = llc_conn_ac_conn_confirm,
@@ -527,12 +527,12 @@ static struct llc_conn_state_trans llc_setup_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_5[] = {
+static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_5[] = {
[0] = llc_conn_ev_qlfy_set_status_disc,
[1] = NULL,
};
-static llc_conn_action_t llc_setup_actions_5[] = {
+static const llc_conn_action_t llc_setup_actions_5[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_ac_conn_confirm,
[2] = llc_conn_disc,
@@ -547,13 +547,13 @@ static struct llc_conn_state_trans llc_setup_state_trans_5 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_7[] = {
+static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_7[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = llc_conn_ev_qlfy_s_flag_eq_0,
[2] = NULL,
};
-static llc_conn_action_t llc_setup_actions_7[] = {
+static const llc_conn_action_t llc_setup_actions_7[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -568,14 +568,14 @@ static struct llc_conn_state_trans llc_setup_state_trans_7 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_8[] = {
+static const llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_8[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = llc_conn_ev_qlfy_s_flag_eq_0,
[2] = llc_conn_ev_qlfy_set_status_failed,
[3] = NULL,
};
-static llc_conn_action_t llc_setup_actions_8[] = {
+static const llc_conn_action_t llc_setup_actions_8[] = {
[0] = llc_conn_ac_conn_confirm,
[1] = llc_conn_disc,
[2] = NULL,
@@ -609,14 +609,14 @@ static struct llc_conn_state_trans *llc_setup_state_transitions[] = {
/* LLC_CONN_STATE_NORMAL transitions */
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_1[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_1[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_0,
[1] = llc_conn_ev_qlfy_p_flag_eq_0,
[2] = llc_conn_ev_qlfy_last_frame_eq_0,
[3] = NULL,
};
-static llc_conn_action_t llc_normal_actions_1[] = {
+static const llc_conn_action_t llc_normal_actions_1[] = {
[0] = llc_conn_ac_send_i_as_ack,
[1] = llc_conn_ac_start_ack_tmr_if_not_running,
[2] = NULL,
@@ -630,14 +630,14 @@ static struct llc_conn_state_trans llc_normal_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_0,
[1] = llc_conn_ev_qlfy_p_flag_eq_0,
[2] = llc_conn_ev_qlfy_last_frame_eq_1,
[3] = NULL,
};
-static llc_conn_action_t llc_normal_actions_2[] = {
+static const llc_conn_action_t llc_normal_actions_2[] = {
[0] = llc_conn_ac_send_i_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = NULL,
@@ -651,14 +651,14 @@ static struct llc_conn_state_trans llc_normal_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_1,
[1] = llc_conn_ev_qlfy_set_status_remote_busy,
[2] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_normal_actions_2_1[1];
+static const llc_conn_action_t llc_normal_actions_2_1[1];
static struct llc_conn_state_trans llc_normal_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
@@ -668,12 +668,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_2_1 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_3[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_3[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_3[] = {
+static const llc_conn_action_t llc_normal_actions_3[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rnr_xxx_x_set_0,
[2] = llc_conn_ac_set_data_flag_0,
@@ -688,12 +688,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_4[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_4[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_4[] = {
+static const llc_conn_action_t llc_normal_actions_4[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rnr_xxx_x_set_0,
[2] = llc_conn_ac_set_data_flag_0,
@@ -708,12 +708,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5a[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_5a[] = {
+static const llc_conn_action_t llc_normal_actions_5a[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rej_xxx_x_set_0,
[2] = llc_conn_ac_upd_nr_received,
@@ -731,12 +731,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_5a = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5b[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_5b[] = {
+static const llc_conn_action_t llc_normal_actions_5b[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rej_xxx_x_set_0,
[2] = llc_conn_ac_upd_nr_received,
@@ -754,12 +754,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_5b = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5c[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5c[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_5c[] = {
+static const llc_conn_action_t llc_normal_actions_5c[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rej_xxx_x_set_0,
[2] = llc_conn_ac_upd_nr_received,
@@ -777,12 +777,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_5c = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6a[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_6a[] = {
+static const llc_conn_action_t llc_normal_actions_6a[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rej_xxx_x_set_0,
[2] = llc_conn_ac_upd_nr_received,
@@ -798,12 +798,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_6a = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6b[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_6b[] = {
+static const llc_conn_action_t llc_normal_actions_6b[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rej_xxx_x_set_0,
[2] = llc_conn_ac_upd_nr_received,
@@ -819,7 +819,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_6b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_normal_actions_7[] = {
+static const llc_conn_action_t llc_normal_actions_7[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rej_rsp_f_set_1,
[2] = llc_conn_ac_upd_nr_received,
@@ -835,12 +835,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_7 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8a[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_8[] = {
+static const llc_conn_action_t llc_normal_actions_8[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_upd_p_flag,
@@ -858,7 +858,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_8a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
@@ -871,12 +871,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_8b = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9a[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_9a[] = {
+static const llc_conn_action_t llc_normal_actions_9a[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_data_ind,
@@ -892,12 +892,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_9a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9b[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_9b[] = {
+static const llc_conn_action_t llc_normal_actions_9b[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_data_ind,
@@ -913,7 +913,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_9b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_normal_actions_10[] = {
+static const llc_conn_action_t llc_normal_actions_10[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_send_ack_rsp_f_set_1,
[2] = llc_conn_ac_rst_sendack_flag,
@@ -930,7 +930,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_10 = {
};
/* State transitions for * LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_normal_actions_11a[] = {
+static const llc_conn_action_t llc_normal_actions_11a[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -945,7 +945,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_11a = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_normal_actions_11b[] = {
+static const llc_conn_action_t llc_normal_actions_11b[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -960,12 +960,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_11b = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_11c[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_11c[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_11c[] = {
+static const llc_conn_action_t llc_normal_actions_11c[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_inc_tx_win_size,
@@ -981,7 +981,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_11c = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_normal_actions_12[] = {
+static const llc_conn_action_t llc_normal_actions_12[] = {
[0] = llc_conn_ac_send_ack_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_adjust_npta_by_rr,
@@ -998,7 +998,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_12 = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_normal_actions_13a[] = {
+static const llc_conn_action_t llc_normal_actions_13a[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -1013,7 +1013,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_13a = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_normal_actions_13b[] = {
+static const llc_conn_action_t llc_normal_actions_13b[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -1028,12 +1028,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_13b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_13c[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_13c[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_13c[] = {
+static const llc_conn_action_t llc_normal_actions_13c[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -1048,7 +1048,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_13c = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_normal_actions_14[] = {
+static const llc_conn_action_t llc_normal_actions_14[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_adjust_npta_by_rnr,
@@ -1065,12 +1065,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_14 = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15a[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_15a[] = {
+static const llc_conn_action_t llc_normal_actions_15a[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_p_flag,
@@ -1088,12 +1088,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_15a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15b[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_15b[] = {
+static const llc_conn_action_t llc_normal_actions_15b[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_p_flag,
@@ -1111,12 +1111,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_15b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16a[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_16a[] = {
+static const llc_conn_action_t llc_normal_actions_16a[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_dec_tx_win_size,
@@ -1133,12 +1133,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_16a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16b[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_16b[] = {
+static const llc_conn_action_t llc_normal_actions_16b[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_dec_tx_win_size,
@@ -1155,7 +1155,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_16b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_normal_actions_17[] = {
+static const llc_conn_action_t llc_normal_actions_17[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_dec_tx_win_size,
@@ -1172,12 +1172,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_17 = {
};
/* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_18[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_18[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_18[] = {
+static const llc_conn_action_t llc_normal_actions_18[] = {
[0] = llc_conn_ac_send_rr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = NULL,
@@ -1191,12 +1191,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_18 = {
};
/* State transitions for LLC_CONN_EV_P_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_19[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_19[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_19[] = {
+static const llc_conn_action_t llc_normal_actions_19[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rr_cmd_p_set_1,
[2] = llc_conn_ac_rst_vs,
@@ -1213,13 +1213,13 @@ static struct llc_conn_state_trans llc_normal_state_trans_19 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20a[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_normal_actions_20a[] = {
+static const llc_conn_action_t llc_normal_actions_20a[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rr_cmd_p_set_1,
[2] = llc_conn_ac_rst_vs,
@@ -1236,13 +1236,13 @@ static struct llc_conn_state_trans llc_normal_state_trans_20a = {
};
/* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20b[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_normal_actions_20b[] = {
+static const llc_conn_action_t llc_normal_actions_20b[] = {
[0] = llc_conn_ac_rst_sendack_flag,
[1] = llc_conn_ac_send_rr_cmd_p_set_1,
[2] = llc_conn_ac_rst_vs,
@@ -1259,12 +1259,12 @@ static struct llc_conn_state_trans llc_normal_state_trans_20b = {
};
/* State transitions for LLC_CONN_EV_TX_BUFF_FULL event */
-static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_21[] = {
+static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_21[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_normal_actions_21[] = {
+static const llc_conn_action_t llc_normal_actions_21[] = {
[0] = llc_conn_ac_send_rr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = NULL,
@@ -1342,13 +1342,13 @@ static struct llc_conn_state_trans *llc_normal_state_transitions[] = {
/* LLC_CONN_STATE_BUSY transitions */
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_1[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_1[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_0,
[1] = llc_conn_ev_qlfy_p_flag_eq_0,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_1[] = {
+static const llc_conn_action_t llc_busy_actions_1[] = {
[0] = llc_conn_ac_send_i_xxx_x_set_0,
[1] = llc_conn_ac_start_ack_tmr_if_not_running,
[2] = NULL,
@@ -1362,13 +1362,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_0,
[1] = llc_conn_ev_qlfy_p_flag_eq_1,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_2[] = {
+static const llc_conn_action_t llc_busy_actions_2[] = {
[0] = llc_conn_ac_send_i_xxx_x_set_0,
[1] = llc_conn_ac_start_ack_tmr_if_not_running,
[2] = NULL,
@@ -1382,14 +1382,14 @@ static struct llc_conn_state_trans llc_busy_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_1,
[1] = llc_conn_ev_qlfy_set_status_remote_busy,
[2] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_busy_actions_2_1[1];
+static const llc_conn_action_t llc_busy_actions_2_1[1];
static struct llc_conn_state_trans llc_busy_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
@@ -1399,13 +1399,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_2_1 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_3[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_3[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_1,
[1] = llc_conn_ev_qlfy_p_flag_eq_0,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_3[] = {
+static const llc_conn_action_t llc_busy_actions_3[] = {
[0] = llc_conn_ac_send_rej_xxx_x_set_0,
[1] = llc_conn_ac_start_rej_timer,
[2] = NULL,
@@ -1419,13 +1419,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_4[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_4[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_1,
[1] = llc_conn_ev_qlfy_p_flag_eq_1,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_4[] = {
+static const llc_conn_action_t llc_busy_actions_4[] = {
[0] = llc_conn_ac_send_rej_xxx_x_set_0,
[1] = llc_conn_ac_start_rej_timer,
[2] = NULL,
@@ -1439,13 +1439,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_5[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_5[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_0,
[1] = llc_conn_ev_qlfy_p_flag_eq_0,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_5[] = {
+static const llc_conn_action_t llc_busy_actions_5[] = {
[0] = llc_conn_ac_send_rr_xxx_x_set_0,
[1] = NULL,
};
@@ -1458,13 +1458,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_5 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_6[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_6[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_0,
[1] = llc_conn_ev_qlfy_p_flag_eq_1,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_6[] = {
+static const llc_conn_action_t llc_busy_actions_6[] = {
[0] = llc_conn_ac_send_rr_xxx_x_set_0,
[1] = NULL,
};
@@ -1477,13 +1477,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_6 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_7[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_7[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_2,
[1] = llc_conn_ev_qlfy_p_flag_eq_0,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_7[] = {
+static const llc_conn_action_t llc_busy_actions_7[] = {
[0] = llc_conn_ac_send_rr_xxx_x_set_0,
[1] = NULL,
};
@@ -1496,13 +1496,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_7 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_8[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_8[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_2,
[1] = llc_conn_ev_qlfy_p_flag_eq_1,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_8[] = {
+static const llc_conn_action_t llc_busy_actions_8[] = {
[0] = llc_conn_ac_send_rr_xxx_x_set_0,
[1] = NULL,
};
@@ -1515,12 +1515,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_8 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9a[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_9a[] = {
+static const llc_conn_action_t llc_busy_actions_9a[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_upd_p_flag,
[2] = llc_conn_ac_upd_nr_received,
@@ -1537,12 +1537,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_9a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9b[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_9b[] = {
+static const llc_conn_action_t llc_busy_actions_9b[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_upd_p_flag,
[2] = llc_conn_ac_upd_nr_received,
@@ -1559,12 +1559,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_9b = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10a[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_10a[] = {
+static const llc_conn_action_t llc_busy_actions_10a[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0,
@@ -1579,12 +1579,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_10a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10b[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_10b[] = {
+static const llc_conn_action_t llc_busy_actions_10b[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0,
@@ -1599,7 +1599,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_10b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_busy_actions_11[] = {
+static const llc_conn_action_t llc_busy_actions_11[] = {
[0] = llc_conn_ac_send_rnr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0,
@@ -1614,7 +1614,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_11 = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_busy_actions_12[] = {
+static const llc_conn_action_t llc_busy_actions_12[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_rnr_rsp_f_set_1,
@@ -1632,12 +1632,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_12 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13a[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_13a[] = {
+static const llc_conn_action_t llc_busy_actions_13a[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_upd_p_flag,
@@ -1657,12 +1657,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_13a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13b[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_13b[] = {
+static const llc_conn_action_t llc_busy_actions_13b[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_upd_p_flag,
@@ -1682,12 +1682,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_13b = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14a[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_14a[] = {
+static const llc_conn_action_t llc_busy_actions_14a[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
@@ -1705,12 +1705,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_14a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14b[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_14b[] = {
+static const llc_conn_action_t llc_busy_actions_14b[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
@@ -1728,7 +1728,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_14b = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_busy_actions_15a[] = {
+static const llc_conn_action_t llc_busy_actions_15a[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -1743,7 +1743,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_15a = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_busy_actions_15b[] = {
+static const llc_conn_action_t llc_busy_actions_15b[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -1758,12 +1758,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_15b = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_15c[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_15c[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_15c[] = {
+static const llc_conn_action_t llc_busy_actions_15c[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -1778,7 +1778,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_15c = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_busy_actions_16[] = {
+static const llc_conn_action_t llc_busy_actions_16[] = {
[0] = llc_conn_ac_send_rnr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -1793,7 +1793,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_16 = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_busy_actions_17a[] = {
+static const llc_conn_action_t llc_busy_actions_17a[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -1808,7 +1808,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_17a = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_busy_actions_17b[] = {
+static const llc_conn_action_t llc_busy_actions_17b[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -1823,12 +1823,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_17b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_17c[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_17c[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_17c[] = {
+static const llc_conn_action_t llc_busy_actions_17c[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -1843,7 +1843,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_17c = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_busy_actions_18[] = {
+static const llc_conn_action_t llc_busy_actions_18[] = {
[0] = llc_conn_ac_send_rnr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -1858,12 +1858,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_18 = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19a[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_19a[] = {
+static const llc_conn_action_t llc_busy_actions_19a[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_p_flag,
@@ -1880,12 +1880,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_19a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19b[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_19b[] = {
+static const llc_conn_action_t llc_busy_actions_19b[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_p_flag,
@@ -1902,12 +1902,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_19b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20a[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_20a[] = {
+static const llc_conn_action_t llc_busy_actions_20a[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_resend_i_xxx_x_set_0,
@@ -1923,12 +1923,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_20a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20b[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_20b[] = {
+static const llc_conn_action_t llc_busy_actions_20b[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_resend_i_xxx_x_set_0,
@@ -1944,7 +1944,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_20b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_busy_actions_21[] = {
+static const llc_conn_action_t llc_busy_actions_21[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_send_rnr_rsp_f_set_1,
@@ -1961,12 +1961,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_21 = {
};
/* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_22[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_22[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_22[] = {
+static const llc_conn_action_t llc_busy_actions_22[] = {
[0] = llc_conn_ac_send_rnr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = NULL,
@@ -1980,12 +1980,12 @@ static struct llc_conn_state_trans llc_busy_state_trans_22 = {
};
/* State transitions for LLC_CONN_EV_P_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_23[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_23[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_busy_actions_23[] = {
+static const llc_conn_action_t llc_busy_actions_23[] = {
[0] = llc_conn_ac_send_rnr_cmd_p_set_1,
[1] = llc_conn_ac_rst_vs,
[2] = llc_conn_ac_start_p_timer,
@@ -2001,13 +2001,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_23 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24a[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_24a[] = {
+static const llc_conn_action_t llc_busy_actions_24a[] = {
[0] = llc_conn_ac_send_rnr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -2023,13 +2023,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_24a = {
};
/* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24b[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_24b[] = {
+static const llc_conn_action_t llc_busy_actions_24b[] = {
[0] = llc_conn_ac_send_rnr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -2045,13 +2045,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_24b = {
};
/* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_25[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_25[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_25[] = {
+static const llc_conn_action_t llc_busy_actions_25[] = {
[0] = llc_conn_ac_send_rnr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -2068,13 +2068,13 @@ static struct llc_conn_state_trans llc_busy_state_trans_25 = {
};
/* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_26[] = {
+static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_26[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_busy_actions_26[] = {
+static const llc_conn_action_t llc_busy_actions_26[] = {
[0] = llc_conn_ac_set_data_flag_1,
[1] = NULL,
};
@@ -2155,13 +2155,13 @@ static struct llc_conn_state_trans *llc_busy_state_transitions[] = {
/* LLC_CONN_STATE_REJ transitions */
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_1[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_1[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_0,
[1] = llc_conn_ev_qlfy_p_flag_eq_0,
[2] = NULL,
};
-static llc_conn_action_t llc_reject_actions_1[] = {
+static const llc_conn_action_t llc_reject_actions_1[] = {
[0] = llc_conn_ac_send_i_xxx_x_set_0,
[1] = NULL,
};
@@ -2174,13 +2174,13 @@ static struct llc_conn_state_trans llc_reject_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_0,
[1] = llc_conn_ev_qlfy_p_flag_eq_1,
[2] = NULL,
};
-static llc_conn_action_t llc_reject_actions_2[] = {
+static const llc_conn_action_t llc_reject_actions_2[] = {
[0] = llc_conn_ac_send_i_xxx_x_set_0,
[1] = NULL,
};
@@ -2193,14 +2193,14 @@ static struct llc_conn_state_trans llc_reject_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = {
[0] = llc_conn_ev_qlfy_remote_busy_eq_1,
[1] = llc_conn_ev_qlfy_set_status_remote_busy,
[2] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_reject_actions_2_1[1];
+static const llc_conn_action_t llc_reject_actions_2_1[1];
static struct llc_conn_state_trans llc_reject_state_trans_2_1 = {
.ev = llc_conn_ev_data_req,
@@ -2211,12 +2211,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_2_1 = {
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_3[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_3[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_3[] = {
+static const llc_conn_action_t llc_reject_actions_3[] = {
[0] = llc_conn_ac_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_set_data_flag_2,
[2] = NULL,
@@ -2230,12 +2230,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_4[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_4[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_4[] = {
+static const llc_conn_action_t llc_reject_actions_4[] = {
[0] = llc_conn_ac_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_set_data_flag_2,
[2] = NULL,
@@ -2249,7 +2249,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
-static llc_conn_action_t llc_reject_actions_5a[] = {
+static const llc_conn_action_t llc_reject_actions_5a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_p_flag,
[2] = llc_conn_ac_clear_remote_busy_if_f_eq_1,
@@ -2264,7 +2264,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_5a = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
-static llc_conn_action_t llc_reject_actions_5b[] = {
+static const llc_conn_action_t llc_reject_actions_5b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_p_flag,
[2] = llc_conn_ac_clear_remote_busy_if_f_eq_1,
@@ -2279,12 +2279,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_5b = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_5c[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_5c[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_5c[] = {
+static const llc_conn_action_t llc_reject_actions_5c[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_p_flag,
[2] = llc_conn_ac_clear_remote_busy_if_f_eq_1,
@@ -2299,7 +2299,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_5c = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_reject_actions_6[] = {
+static const llc_conn_action_t llc_reject_actions_6[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = NULL,
@@ -2313,12 +2313,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_6 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7a[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_7a[] = {
+static const llc_conn_action_t llc_reject_actions_7a[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_upd_p_flag,
@@ -2338,12 +2338,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_7a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7b[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_7b[] = {
+static const llc_conn_action_t llc_reject_actions_7b[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_upd_p_flag,
@@ -2362,12 +2362,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_7b = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8a[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_8a[] = {
+static const llc_conn_action_t llc_reject_actions_8a[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_ack_xxx_x_set_0,
@@ -2384,12 +2384,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_8a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8b[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_8b[] = {
+static const llc_conn_action_t llc_reject_actions_8b[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_ack_xxx_x_set_0,
@@ -2406,7 +2406,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_8b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_reject_actions_9[] = {
+static const llc_conn_action_t llc_reject_actions_9[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_ack_rsp_f_set_1,
@@ -2423,7 +2423,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_9 = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_reject_actions_10a[] = {
+static const llc_conn_action_t llc_reject_actions_10a[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -2438,7 +2438,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_10a = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_reject_actions_10b[] = {
+static const llc_conn_action_t llc_reject_actions_10b[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -2453,12 +2453,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_10b = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_10c[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_10c[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_10c[] = {
+static const llc_conn_action_t llc_reject_actions_10c[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -2473,7 +2473,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_10c = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_reject_actions_11[] = {
+static const llc_conn_action_t llc_reject_actions_11[] = {
[0] = llc_conn_ac_send_ack_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_clear_remote_busy,
@@ -2488,7 +2488,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_11 = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_reject_actions_12a[] = {
+static const llc_conn_action_t llc_reject_actions_12a[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -2503,7 +2503,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_12a = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_reject_actions_12b[] = {
+static const llc_conn_action_t llc_reject_actions_12b[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -2518,12 +2518,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_12b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_12c[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_12c[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_12c[] = {
+static const llc_conn_action_t llc_reject_actions_12c[] = {
[0] = llc_conn_ac_upd_p_flag,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -2538,7 +2538,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_12c = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_reject_actions_13[] = {
+static const llc_conn_action_t llc_reject_actions_13[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_set_remote_busy,
@@ -2553,12 +2553,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_13 = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14a[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_14a[] = {
+static const llc_conn_action_t llc_reject_actions_14a[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_p_flag,
@@ -2575,12 +2575,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_14a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14b[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_14b[] = {
+static const llc_conn_action_t llc_reject_actions_14b[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_p_flag,
@@ -2597,12 +2597,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_14b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15a[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_15a[] = {
+static const llc_conn_action_t llc_reject_actions_15a[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_resend_i_xxx_x_set_0,
@@ -2618,12 +2618,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_15a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15b[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_15b[] = {
+static const llc_conn_action_t llc_reject_actions_15b[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_resend_i_xxx_x_set_0,
@@ -2639,7 +2639,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_15b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_reject_actions_16[] = {
+static const llc_conn_action_t llc_reject_actions_16[] = {
[0] = llc_conn_ac_set_vs_nr,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_resend_i_rsp_f_set_1,
@@ -2655,12 +2655,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_16 = {
};
/* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_17[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_17[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_17[] = {
+static const llc_conn_action_t llc_reject_actions_17[] = {
[0] = llc_conn_ac_send_rr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = NULL,
@@ -2674,13 +2674,13 @@ static struct llc_conn_state_trans llc_reject_state_trans_17 = {
};
/* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_18[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_18[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_reject_actions_18[] = {
+static const llc_conn_action_t llc_reject_actions_18[] = {
[0] = llc_conn_ac_send_rej_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_start_rej_timer,
@@ -2696,12 +2696,12 @@ static struct llc_conn_state_trans llc_reject_state_trans_18 = {
};
/* State transitions for LLC_CONN_EV_P_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_19[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_19[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_reject_actions_19[] = {
+static const llc_conn_action_t llc_reject_actions_19[] = {
[0] = llc_conn_ac_send_rr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_start_rej_timer,
@@ -2718,13 +2718,13 @@ static struct llc_conn_state_trans llc_reject_state_trans_19 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20a[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20a[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_reject_actions_20a[] = {
+static const llc_conn_action_t llc_reject_actions_20a[] = {
[0] = llc_conn_ac_send_rr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_start_rej_timer,
@@ -2741,13 +2741,13 @@ static struct llc_conn_state_trans llc_reject_state_trans_20a = {
};
/* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20b[] = {
+static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20b[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_0,
[1] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[2] = NULL,
};
-static llc_conn_action_t llc_reject_actions_20b[] = {
+static const llc_conn_action_t llc_reject_actions_20b[] = {
[0] = llc_conn_ac_send_rr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_start_rej_timer,
@@ -2826,13 +2826,13 @@ static struct llc_conn_state_trans *llc_reject_state_transitions[] = {
/* LLC_CONN_STATE_AWAIT transitions */
/* State transitions for LLC_CONN_EV_DATA_REQ event */
-static llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = {
+static const llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = {
[0] = llc_conn_ev_qlfy_set_status_refuse,
[1] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_await_actions_1_0[1];
+static const llc_conn_action_t llc_await_actions_1_0[1];
static struct llc_conn_state_trans llc_await_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
@@ -2842,7 +2842,7 @@ static struct llc_conn_state_trans llc_await_state_trans_1_0 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
-static llc_conn_action_t llc_await_actions_1[] = {
+static const llc_conn_action_t llc_await_actions_1[] = {
[0] = llc_conn_ac_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_set_data_flag_0,
[2] = NULL,
@@ -2856,7 +2856,7 @@ static struct llc_conn_state_trans llc_await_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_actions_2[] = {
+static const llc_conn_action_t llc_await_actions_2[] = {
[0] = llc_conn_ac_send_rej_xxx_x_set_0,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -2875,7 +2875,7 @@ static struct llc_conn_state_trans llc_await_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_actions_3a[] = {
+static const llc_conn_action_t llc_await_actions_3a[] = {
[0] = llc_conn_ac_send_rej_xxx_x_set_0,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -2891,7 +2891,7 @@ static struct llc_conn_state_trans llc_await_state_trans_3a = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_actions_3b[] = {
+static const llc_conn_action_t llc_await_actions_3b[] = {
[0] = llc_conn_ac_send_rej_xxx_x_set_0,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -2907,7 +2907,7 @@ static struct llc_conn_state_trans llc_await_state_trans_3b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_actions_4[] = {
+static const llc_conn_action_t llc_await_actions_4[] = {
[0] = llc_conn_ac_send_rej_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -2924,7 +2924,7 @@ static struct llc_conn_state_trans llc_await_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_actions_5[] = {
+static const llc_conn_action_t llc_await_actions_5[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_stop_p_timer,
@@ -2943,7 +2943,7 @@ static struct llc_conn_state_trans llc_await_state_trans_5 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_actions_6a[] = {
+static const llc_conn_action_t llc_await_actions_6a[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_rr_xxx_x_set_0,
@@ -2960,7 +2960,7 @@ static struct llc_conn_state_trans llc_await_state_trans_6a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_actions_6b[] = {
+static const llc_conn_action_t llc_await_actions_6b[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_rr_xxx_x_set_0,
@@ -2977,7 +2977,7 @@ static struct llc_conn_state_trans llc_await_state_trans_6b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_actions_7[] = {
+static const llc_conn_action_t llc_await_actions_7[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_rr_rsp_f_set_1,
@@ -2994,7 +2994,7 @@ static struct llc_conn_state_trans llc_await_state_trans_7 = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_actions_8a[] = {
+static const llc_conn_action_t llc_await_actions_8a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3011,7 +3011,7 @@ static struct llc_conn_state_trans llc_await_state_trans_8a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_actions_8b[] = {
+static const llc_conn_action_t llc_await_actions_8b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3028,7 +3028,7 @@ static struct llc_conn_state_trans llc_await_state_trans_8b = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_actions_9a[] = {
+static const llc_conn_action_t llc_await_actions_9a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3043,7 +3043,7 @@ static struct llc_conn_state_trans llc_await_state_trans_9a = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_actions_9b[] = {
+static const llc_conn_action_t llc_await_actions_9b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3058,7 +3058,7 @@ static struct llc_conn_state_trans llc_await_state_trans_9b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_actions_9c[] = {
+static const llc_conn_action_t llc_await_actions_9c[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3073,7 +3073,7 @@ static struct llc_conn_state_trans llc_await_state_trans_9c = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_actions_9d[] = {
+static const llc_conn_action_t llc_await_actions_9d[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3088,7 +3088,7 @@ static struct llc_conn_state_trans llc_await_state_trans_9d = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_actions_10a[] = {
+static const llc_conn_action_t llc_await_actions_10a[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3104,7 +3104,7 @@ static struct llc_conn_state_trans llc_await_state_trans_10a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_actions_10b[] = {
+static const llc_conn_action_t llc_await_actions_10b[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3120,7 +3120,7 @@ static struct llc_conn_state_trans llc_await_state_trans_10b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_actions_11[] = {
+static const llc_conn_action_t llc_await_actions_11[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3136,7 +3136,7 @@ static struct llc_conn_state_trans llc_await_state_trans_11 = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_actions_12a[] = {
+static const llc_conn_action_t llc_await_actions_12a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_set_remote_busy,
@@ -3151,7 +3151,7 @@ static struct llc_conn_state_trans llc_await_state_trans_12a = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_actions_12b[] = {
+static const llc_conn_action_t llc_await_actions_12b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_set_remote_busy,
@@ -3166,7 +3166,7 @@ static struct llc_conn_state_trans llc_await_state_trans_12b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_actions_13[] = {
+static const llc_conn_action_t llc_await_actions_13[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3182,12 +3182,12 @@ static struct llc_conn_state_trans llc_await_state_trans_13 = {
};
/* State transitions for LLC_CONN_EV_P_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_await_ev_qfyrs_14[] = {
+static const llc_conn_ev_qfyr_t llc_await_ev_qfyrs_14[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_await_actions_14[] = {
+static const llc_conn_action_t llc_await_actions_14[] = {
[0] = llc_conn_ac_send_rr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -3255,13 +3255,13 @@ static struct llc_conn_state_trans *llc_await_state_transitions[] = {
/* LLC_CONN_STATE_AWAIT_BUSY transitions */
/* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
-static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = {
+static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = {
[0] = llc_conn_ev_qlfy_set_status_refuse,
[1] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_await_busy_actions_1_0[1];
+static const llc_conn_action_t llc_await_busy_actions_1_0[1];
static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
@@ -3271,12 +3271,12 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1[] = {
+static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_1,
[1] = NULL,
};
-static llc_conn_action_t llc_await_busy_actions_1[] = {
+static const llc_conn_action_t llc_await_busy_actions_1[] = {
[0] = llc_conn_ac_send_rej_xxx_x_set_0,
[1] = llc_conn_ac_start_rej_timer,
[2] = NULL,
@@ -3290,12 +3290,12 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_2[] = {
+static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_2[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_0,
[1] = NULL,
};
-static llc_conn_action_t llc_await_busy_actions_2[] = {
+static const llc_conn_action_t llc_await_busy_actions_2[] = {
[0] = llc_conn_ac_send_rr_xxx_x_set_0,
[1] = NULL,
};
@@ -3308,12 +3308,12 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */
-static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_3[] = {
+static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_3[] = {
[0] = llc_conn_ev_qlfy_data_flag_eq_2,
[1] = NULL,
};
-static llc_conn_action_t llc_await_busy_actions_3[] = {
+static const llc_conn_action_t llc_await_busy_actions_3[] = {
[0] = llc_conn_ac_send_rr_xxx_x_set_0,
[1] = NULL,
};
@@ -3326,7 +3326,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_busy_actions_4[] = {
+static const llc_conn_action_t llc_await_busy_actions_4[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3345,7 +3345,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_busy_actions_5a[] = {
+static const llc_conn_action_t llc_await_busy_actions_5a[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3361,7 +3361,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_5a = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_busy_actions_5b[] = {
+static const llc_conn_action_t llc_await_busy_actions_5b[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3377,7 +3377,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_5b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_busy_actions_6[] = {
+static const llc_conn_action_t llc_await_busy_actions_6[] = {
[0] = llc_conn_ac_send_rnr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3393,7 +3393,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_6 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_busy_actions_7[] = {
+static const llc_conn_action_t llc_await_busy_actions_7[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_inc_vr_by_1,
[2] = llc_conn_ac_data_ind,
@@ -3414,7 +3414,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_7 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_busy_actions_8a[] = {
+static const llc_conn_action_t llc_await_busy_actions_8a[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_inc_vr_by_1,
[2] = llc_conn_ac_data_ind,
@@ -3432,7 +3432,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_8a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_busy_actions_8b[] = {
+static const llc_conn_action_t llc_await_busy_actions_8b[] = {
[0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_inc_vr_by_1,
[2] = llc_conn_ac_data_ind,
@@ -3450,7 +3450,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_8b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_busy_actions_9[] = {
+static const llc_conn_action_t llc_await_busy_actions_9[] = {
[0] = llc_conn_ac_send_rnr_rsp_f_set_1,
[1] = llc_conn_ac_inc_vr_by_1,
[2] = llc_conn_ac_data_ind,
@@ -3468,7 +3468,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_9 = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_busy_actions_10a[] = {
+static const llc_conn_action_t llc_await_busy_actions_10a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3485,7 +3485,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_10a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_busy_actions_10b[] = {
+static const llc_conn_action_t llc_await_busy_actions_10b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3502,7 +3502,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_10b = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_busy_actions_11a[] = {
+static const llc_conn_action_t llc_await_busy_actions_11a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3517,7 +3517,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_11a = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_busy_actions_11b[] = {
+static const llc_conn_action_t llc_await_busy_actions_11b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3532,7 +3532,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_11b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_busy_actions_11c[] = {
+static const llc_conn_action_t llc_await_busy_actions_11c[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3547,7 +3547,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_11c = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_busy_actions_11d[] = {
+static const llc_conn_action_t llc_await_busy_actions_11d[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3562,7 +3562,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_11d = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_busy_actions_12a[] = {
+static const llc_conn_action_t llc_await_busy_actions_12a[] = {
[0] = llc_conn_ac_send_rnr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3578,7 +3578,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_12a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_busy_actions_12b[] = {
+static const llc_conn_action_t llc_await_busy_actions_12b[] = {
[0] = llc_conn_ac_send_rnr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3594,7 +3594,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_12b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_busy_actions_13[] = {
+static const llc_conn_action_t llc_await_busy_actions_13[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3610,7 +3610,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_13 = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_busy_actions_14a[] = {
+static const llc_conn_action_t llc_await_busy_actions_14a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_set_remote_busy,
@@ -3625,7 +3625,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_14a = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_busy_actions_14b[] = {
+static const llc_conn_action_t llc_await_busy_actions_14b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_set_remote_busy,
@@ -3640,7 +3640,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_14b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_busy_actions_15[] = {
+static const llc_conn_action_t llc_await_busy_actions_15[] = {
[0] = llc_conn_ac_send_rnr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3656,12 +3656,12 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_15 = {
};
/* State transitions for LLC_CONN_EV_P_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_16[] = {
+static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_16[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_await_busy_actions_16[] = {
+static const llc_conn_action_t llc_await_busy_actions_16[] = {
[0] = llc_conn_ac_send_rnr_cmd_p_set_1,
[1] = llc_conn_ac_start_p_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -3731,13 +3731,13 @@ static struct llc_conn_state_trans *llc_await_busy_state_transitions[] = {
/* ----------------- LLC_CONN_STATE_AWAIT_REJ transitions --------------- */
/* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
-static llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = {
+static const llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = {
[0] = llc_conn_ev_qlfy_set_status_refuse,
[1] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_await_reject_actions_1_0[1];
+static const llc_conn_action_t llc_await_reject_actions_1_0[1];
static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = {
.ev = llc_conn_ev_data_req,
@@ -3747,7 +3747,7 @@ static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = {
};
/* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */
-static llc_conn_action_t llc_await_rejct_actions_1[] = {
+static const llc_conn_action_t llc_await_rejct_actions_1[] = {
[0] = llc_conn_ac_send_rnr_xxx_x_set_0,
[1] = llc_conn_ac_set_data_flag_2,
[2] = NULL
@@ -3761,7 +3761,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_rejct_actions_2a[] = {
+static const llc_conn_action_t llc_await_rejct_actions_2a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = NULL
@@ -3775,7 +3775,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_2a = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_rejct_actions_2b[] = {
+static const llc_conn_action_t llc_await_rejct_actions_2b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = NULL
@@ -3789,7 +3789,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_2b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_rejct_actions_3[] = {
+static const llc_conn_action_t llc_await_rejct_actions_3[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -3804,7 +3804,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_rejct_actions_4[] = {
+static const llc_conn_action_t llc_await_rejct_actions_4[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_stop_p_timer,
@@ -3824,7 +3824,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_rejct_actions_5a[] = {
+static const llc_conn_action_t llc_await_rejct_actions_5a[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_rr_xxx_x_set_0,
@@ -3842,7 +3842,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_5a = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_rejct_actions_5b[] = {
+static const llc_conn_action_t llc_await_rejct_actions_5b[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_rr_xxx_x_set_0,
@@ -3860,7 +3860,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_5b = {
};
/* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_rejct_actions_6[] = {
+static const llc_conn_action_t llc_await_rejct_actions_6[] = {
[0] = llc_conn_ac_inc_vr_by_1,
[1] = llc_conn_ac_data_ind,
[2] = llc_conn_ac_send_rr_rsp_f_set_1,
@@ -3878,7 +3878,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_6 = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_rejct_actions_7a[] = {
+static const llc_conn_action_t llc_await_rejct_actions_7a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3895,7 +3895,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_7a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_rejct_actions_7b[] = {
+static const llc_conn_action_t llc_await_rejct_actions_7b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3912,7 +3912,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_7b = {
};
/* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */
-static llc_conn_action_t llc_await_rejct_actions_7c[] = {
+static const llc_conn_action_t llc_await_rejct_actions_7c[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -3929,7 +3929,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_7c = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_rejct_actions_8a[] = {
+static const llc_conn_action_t llc_await_rejct_actions_8a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3944,7 +3944,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_8a = {
};
/* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_rejct_actions_8b[] = {
+static const llc_conn_action_t llc_await_rejct_actions_8b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3959,7 +3959,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_8b = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_rejct_actions_8c[] = {
+static const llc_conn_action_t llc_await_rejct_actions_8c[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3974,7 +3974,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_8c = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_rejct_actions_8d[] = {
+static const llc_conn_action_t llc_await_rejct_actions_8d[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_clear_remote_busy,
@@ -3989,7 +3989,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_8d = {
};
/* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_rejct_actions_9a[] = {
+static const llc_conn_action_t llc_await_rejct_actions_9a[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -4005,7 +4005,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_9a = {
};
/* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_rejct_actions_9b[] = {
+static const llc_conn_action_t llc_await_rejct_actions_9b[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -4021,7 +4021,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_9b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */
-static llc_conn_action_t llc_await_rejct_actions_10[] = {
+static const llc_conn_action_t llc_await_rejct_actions_10[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_stop_p_timer,
@@ -4037,7 +4037,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_10 = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */
-static llc_conn_action_t llc_await_rejct_actions_11a[] = {
+static const llc_conn_action_t llc_await_rejct_actions_11a[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_set_remote_busy,
@@ -4052,7 +4052,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_11a = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */
-static llc_conn_action_t llc_await_rejct_actions_11b[] = {
+static const llc_conn_action_t llc_await_rejct_actions_11b[] = {
[0] = llc_conn_ac_upd_nr_received,
[1] = llc_conn_ac_upd_vs,
[2] = llc_conn_ac_set_remote_busy,
@@ -4067,7 +4067,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_11b = {
};
/* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */
-static llc_conn_action_t llc_await_rejct_actions_12[] = {
+static const llc_conn_action_t llc_await_rejct_actions_12[] = {
[0] = llc_conn_ac_send_rr_rsp_f_set_1,
[1] = llc_conn_ac_upd_nr_received,
[2] = llc_conn_ac_upd_vs,
@@ -4083,12 +4083,12 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_12 = {
};
/* State transitions for LLC_CONN_EV_P_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_await_rejct_ev_qfyrs_13[] = {
+static const llc_conn_ev_qfyr_t llc_await_rejct_ev_qfyrs_13[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_await_rejct_actions_13[] = {
+static const llc_conn_action_t llc_await_rejct_actions_13[] = {
[0] = llc_conn_ac_send_rej_cmd_p_set_1,
[1] = llc_conn_ac_stop_p_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -4157,13 +4157,13 @@ static struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = {
/* LLC_CONN_STATE_D_CONN transitions */
/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event,
* cause_flag = 1 */
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1[] = {
[0] = llc_conn_ev_qlfy_cause_flag_eq_1,
[1] = llc_conn_ev_qlfy_set_status_conflict,
[2] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_1[] = {
+static const llc_conn_action_t llc_d_conn_actions_1[] = {
[0] = llc_conn_ac_send_dm_rsp_f_set_p,
[1] = llc_conn_ac_stop_ack_timer,
[2] = llc_conn_ac_disc_confirm,
@@ -4181,13 +4181,13 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_1 = {
/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event,
* cause_flag = 0
*/
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1_1[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1_1[] = {
[0] = llc_conn_ev_qlfy_cause_flag_eq_0,
[1] = llc_conn_ev_qlfy_set_status_conflict,
[2] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_1_1[] = {
+static const llc_conn_action_t llc_d_conn_actions_1_1[] = {
[0] = llc_conn_ac_send_dm_rsp_f_set_p,
[1] = llc_conn_ac_stop_ack_timer,
[2] = llc_conn_disc,
@@ -4204,14 +4204,14 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = {
/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
* cause_flag = 1
*/
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = llc_conn_ev_qlfy_cause_flag_eq_1,
[2] = llc_conn_ev_qlfy_set_status_disc,
[3] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_2[] = {
+static const llc_conn_action_t llc_d_conn_actions_2[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_ac_disc_confirm,
[2] = llc_conn_disc,
@@ -4228,14 +4228,14 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_2 = {
/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
* cause_flag = 0
*/
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2_1[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2_1[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = llc_conn_ev_qlfy_cause_flag_eq_0,
[2] = llc_conn_ev_qlfy_set_status_disc,
[3] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_2_1[] = {
+static const llc_conn_action_t llc_d_conn_actions_2_1[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_disc,
[2] = NULL,
@@ -4249,7 +4249,7 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = {
};
/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_d_conn_actions_3[] = {
+static const llc_conn_action_t llc_d_conn_actions_3[] = {
[0] = llc_conn_ac_send_ua_rsp_f_set_p,
[1] = NULL,
};
@@ -4264,13 +4264,13 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_3 = {
/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
* cause_flag = 1
*/
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4[] = {
[0] = llc_conn_ev_qlfy_cause_flag_eq_1,
[1] = llc_conn_ev_qlfy_set_status_disc,
[2] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_4[] = {
+static const llc_conn_action_t llc_d_conn_actions_4[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_ac_disc_confirm,
[2] = llc_conn_disc,
@@ -4287,13 +4287,13 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_4 = {
/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
* cause_flag = 0
*/
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4_1[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4_1[] = {
[0] = llc_conn_ev_qlfy_cause_flag_eq_0,
[1] = llc_conn_ev_qlfy_set_status_disc,
[2] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_4_1[] = {
+static const llc_conn_action_t llc_d_conn_actions_4_1[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_disc,
[2] = NULL,
@@ -4310,13 +4310,13 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = {
* State transition for
* LLC_CONN_EV_DATA_CONN_REQ event
*/
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = {
[0] = llc_conn_ev_qlfy_set_status_refuse,
[1] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_d_conn_actions_5[1];
+static const llc_conn_action_t llc_d_conn_actions_5[1];
static struct llc_conn_state_trans llc_d_conn_state_trans_5 = {
.ev = llc_conn_ev_data_req,
@@ -4326,12 +4326,12 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_5 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_6[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_6[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_6[] = {
+static const llc_conn_action_t llc_d_conn_actions_6[] = {
[0] = llc_conn_ac_send_disc_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -4346,14 +4346,14 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_6 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 1 */
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_7[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_7[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = llc_conn_ev_qlfy_cause_flag_eq_1,
[2] = llc_conn_ev_qlfy_set_status_failed,
[3] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_7[] = {
+static const llc_conn_action_t llc_d_conn_actions_7[] = {
[0] = llc_conn_ac_disc_confirm,
[1] = llc_conn_disc,
[2] = NULL,
@@ -4367,14 +4367,14 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_7 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 0 */
-static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_8[] = {
+static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_8[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = llc_conn_ev_qlfy_cause_flag_eq_0,
[2] = llc_conn_ev_qlfy_set_status_failed,
[3] = NULL,
};
-static llc_conn_action_t llc_d_conn_actions_8[] = {
+static const llc_conn_action_t llc_d_conn_actions_8[] = {
[0] = llc_conn_disc,
[1] = NULL,
};
@@ -4411,7 +4411,7 @@ static struct llc_conn_state_trans *llc_d_conn_state_transitions[] = {
/* LLC_CONN_STATE_RESET transitions */
/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_rst_actions_1[] = {
+static const llc_conn_action_t llc_rst_actions_1[] = {
[0] = llc_conn_ac_set_vs_0,
[1] = llc_conn_ac_set_vr_0,
[2] = llc_conn_ac_set_s_flag_1,
@@ -4429,14 +4429,14 @@ static struct llc_conn_state_trans llc_rst_state_trans_1 = {
/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
* cause_flag = 1
*/
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = llc_conn_ev_qlfy_cause_flag_eq_1,
[2] = llc_conn_ev_qlfy_set_status_conn,
[3] = NULL,
};
-static llc_conn_action_t llc_rst_actions_2[] = {
+static const llc_conn_action_t llc_rst_actions_2[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_ac_set_vs_0,
[2] = llc_conn_ac_set_vr_0,
@@ -4457,14 +4457,14 @@ static struct llc_conn_state_trans llc_rst_state_trans_2 = {
/* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event,
* cause_flag = 0
*/
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2_1[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2_1[] = {
[0] = llc_conn_ev_qlfy_p_flag_eq_f,
[1] = llc_conn_ev_qlfy_cause_flag_eq_0,
[2] = llc_conn_ev_qlfy_set_status_rst_done,
[3] = NULL,
};
-static llc_conn_action_t llc_rst_actions_2_1[] = {
+static const llc_conn_action_t llc_rst_actions_2_1[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_ac_set_vs_0,
[2] = llc_conn_ac_set_vr_0,
@@ -4483,13 +4483,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_2_1 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_3[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_3[] = {
[0] = llc_conn_ev_qlfy_s_flag_eq_1,
[1] = llc_conn_ev_qlfy_set_status_rst_done,
[2] = NULL,
};
-static llc_conn_action_t llc_rst_actions_3[] = {
+static const llc_conn_action_t llc_rst_actions_3[] = {
[0] = llc_conn_ac_set_p_flag_0,
[1] = llc_conn_ac_set_remote_busy_0,
[2] = NULL,
@@ -4505,12 +4505,12 @@ static struct llc_conn_state_trans llc_rst_state_trans_3 = {
/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event,
* cause_flag = 1
*/
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4[] = {
[0] = llc_conn_ev_qlfy_cause_flag_eq_1,
[1] = llc_conn_ev_qlfy_set_status_disc,
[2] = NULL,
};
-static llc_conn_action_t llc_rst_actions_4[] = {
+static const llc_conn_action_t llc_rst_actions_4[] = {
[0] = llc_conn_ac_send_dm_rsp_f_set_p,
[1] = llc_conn_ac_disc_ind,
[2] = llc_conn_ac_stop_ack_timer,
@@ -4528,13 +4528,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_4 = {
/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event,
* cause_flag = 0
*/
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4_1[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4_1[] = {
[0] = llc_conn_ev_qlfy_cause_flag_eq_0,
[1] = llc_conn_ev_qlfy_set_status_refuse,
[2] = NULL,
};
-static llc_conn_action_t llc_rst_actions_4_1[] = {
+static const llc_conn_action_t llc_rst_actions_4_1[] = {
[0] = llc_conn_ac_send_dm_rsp_f_set_p,
[1] = llc_conn_ac_stop_ack_timer,
[2] = llc_conn_disc,
@@ -4551,13 +4551,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_4_1 = {
/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
* cause_flag = 1
*/
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5[] = {
[0] = llc_conn_ev_qlfy_cause_flag_eq_1,
[1] = llc_conn_ev_qlfy_set_status_disc,
[2] = NULL,
};
-static llc_conn_action_t llc_rst_actions_5[] = {
+static const llc_conn_action_t llc_rst_actions_5[] = {
[0] = llc_conn_ac_disc_ind,
[1] = llc_conn_ac_stop_ack_timer,
[2] = llc_conn_disc,
@@ -4574,13 +4574,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_5 = {
/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event,
* cause_flag = 0
*/
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5_1[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5_1[] = {
[0] = llc_conn_ev_qlfy_cause_flag_eq_0,
[1] = llc_conn_ev_qlfy_set_status_refuse,
[2] = NULL,
};
-static llc_conn_action_t llc_rst_actions_5_1[] = {
+static const llc_conn_action_t llc_rst_actions_5_1[] = {
[0] = llc_conn_ac_stop_ack_timer,
[1] = llc_conn_disc,
[2] = NULL,
@@ -4594,13 +4594,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_5_1 = {
};
/* State transitions for DATA_CONN_REQ event */
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = {
[0] = llc_conn_ev_qlfy_set_status_refuse,
[1] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_rst_actions_6[1];
+static const llc_conn_action_t llc_rst_actions_6[1];
static struct llc_conn_state_trans llc_rst_state_trans_6 = {
.ev = llc_conn_ev_data_req,
@@ -4610,13 +4610,13 @@ static struct llc_conn_state_trans llc_rst_state_trans_6 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_7[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_7[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = llc_conn_ev_qlfy_s_flag_eq_0,
[2] = NULL,
};
-static llc_conn_action_t llc_rst_actions_7[] = {
+static const llc_conn_action_t llc_rst_actions_7[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -4631,14 +4631,14 @@ static struct llc_conn_state_trans llc_rst_state_trans_7 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = llc_conn_ev_qlfy_s_flag_eq_0,
[2] = llc_conn_ev_qlfy_cause_flag_eq_1,
[3] = llc_conn_ev_qlfy_set_status_failed,
[4] = NULL,
};
-static llc_conn_action_t llc_rst_actions_8[] = {
+static const llc_conn_action_t llc_rst_actions_8[] = {
[0] = llc_conn_ac_disc_ind,
[1] = llc_conn_disc,
[2] = NULL,
@@ -4652,14 +4652,14 @@ static struct llc_conn_state_trans llc_rst_state_trans_8 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8_1[] = {
+static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8_1[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = llc_conn_ev_qlfy_s_flag_eq_0,
[2] = llc_conn_ev_qlfy_cause_flag_eq_0,
[3] = llc_conn_ev_qlfy_set_status_failed,
[4] = NULL,
};
-static llc_conn_action_t llc_rst_actions_8_1[] = {
+static const llc_conn_action_t llc_rst_actions_8_1[] = {
[0] = llc_conn_ac_disc_ind,
[1] = llc_conn_disc,
[2] = NULL,
@@ -4698,7 +4698,7 @@ static struct llc_conn_state_trans *llc_rst_state_transitions[] = {
/* LLC_CONN_STATE_ERROR transitions */
/* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_error_actions_1[] = {
+static const llc_conn_action_t llc_error_actions_1[] = {
[0] = llc_conn_ac_set_vs_0,
[1] = llc_conn_ac_set_vr_0,
[2] = llc_conn_ac_send_ua_rsp_f_set_p,
@@ -4718,7 +4718,7 @@ static struct llc_conn_state_trans llc_error_state_trans_1 = {
};
/* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_error_actions_2[] = {
+static const llc_conn_action_t llc_error_actions_2[] = {
[0] = llc_conn_ac_send_ua_rsp_f_set_p,
[1] = llc_conn_ac_disc_ind,
[2] = llc_conn_ac_stop_ack_timer,
@@ -4734,7 +4734,7 @@ static struct llc_conn_state_trans llc_error_state_trans_2 = {
};
/* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */
-static llc_conn_action_t llc_error_actions_3[] = {
+static const llc_conn_action_t llc_error_actions_3[] = {
[0] = llc_conn_ac_disc_ind,
[1] = llc_conn_ac_stop_ack_timer,
[2] = llc_conn_disc,
@@ -4749,7 +4749,7 @@ static struct llc_conn_state_trans llc_error_state_trans_3 = {
};
/* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */
-static llc_conn_action_t llc_error_actions_4[] = {
+static const llc_conn_action_t llc_error_actions_4[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_set_retry_cnt_0,
@@ -4765,7 +4765,7 @@ static struct llc_conn_state_trans llc_error_state_trans_4 = {
};
/* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X event */
-static llc_conn_action_t llc_error_actions_5[] = {
+static const llc_conn_action_t llc_error_actions_5[] = {
[0] = llc_conn_ac_resend_frmr_rsp_f_set_p,
[1] = NULL,
};
@@ -4786,12 +4786,12 @@ static struct llc_conn_state_trans llc_error_state_trans_6 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_7[] = {
+static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_7[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_lt_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_error_actions_7[] = {
+static const llc_conn_action_t llc_error_actions_7[] = {
[0] = llc_conn_ac_resend_frmr_rsp_f_set_0,
[1] = llc_conn_ac_start_ack_timer,
[2] = llc_conn_ac_inc_retry_cnt_by_1,
@@ -4806,12 +4806,12 @@ static struct llc_conn_state_trans llc_error_state_trans_7 = {
};
/* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */
-static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_8[] = {
+static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_8[] = {
[0] = llc_conn_ev_qlfy_retry_cnt_gte_n2,
[1] = NULL,
};
-static llc_conn_action_t llc_error_actions_8[] = {
+static const llc_conn_action_t llc_error_actions_8[] = {
[0] = llc_conn_ac_send_sabme_cmd_p_set_x,
[1] = llc_conn_ac_set_s_flag_0,
[2] = llc_conn_ac_start_ack_timer,
@@ -4828,13 +4828,13 @@ static struct llc_conn_state_trans llc_error_state_trans_8 = {
};
/* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */
-static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = {
+static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = {
[0] = llc_conn_ev_qlfy_set_status_refuse,
[1] = NULL,
};
/* just one member, NULL, .bss zeroes it */
-static llc_conn_action_t llc_error_actions_9[1];
+static const llc_conn_action_t llc_error_actions_9[1];
static struct llc_conn_state_trans llc_error_state_trans_9 = {
.ev = llc_conn_ev_data_req,
@@ -4866,7 +4866,7 @@ static struct llc_conn_state_trans *llc_error_state_transitions[] = {
/* LLC_CONN_STATE_TEMP transitions */
/* State transitions for LLC_CONN_EV_DISC_REQ event */
-static llc_conn_action_t llc_temp_actions_1[] = {
+static const llc_conn_action_t llc_temp_actions_1[] = {
[0] = llc_conn_ac_stop_all_timers,
[1] = llc_conn_ac_send_disc_cmd_p_set_x,
[2] = llc_conn_disc,
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 42dc2e45c921..81a61fce3afb 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -406,7 +406,7 @@ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
struct sk_buff *skb)
{
struct llc_conn_state_trans **next_trans;
- llc_conn_ev_qfyr_t *next_qualifier;
+ const llc_conn_ev_qfyr_t *next_qualifier;
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
struct llc_sock *llc = llc_sk(sk);
struct llc_conn_state *curr_state =
@@ -454,7 +454,7 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
struct sk_buff *skb)
{
int rc = 0;
- llc_conn_action_t *next_action;
+ const llc_conn_action_t *next_action;
for (next_action = trans->ev_actions;
next_action && *next_action; next_action++) {
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index 25c31c0a3fdb..6daf391b3e84 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <asm/errno.h>
+#include <linux/errno.h>
#include <net/llc_if.h>
#include <net/llc_sap.h>
#include <net/llc_s_ev.h>
diff --git a/net/llc/llc_s_st.c b/net/llc/llc_s_st.c
index 135f7d80069e..308c616883a4 100644
--- a/net/llc/llc_s_st.c
+++ b/net/llc/llc_s_st.c
@@ -29,7 +29,7 @@ static struct llc_sap_state_trans llc_sap_state_trans_end;
/* state LLC_SAP_STATE_INACTIVE transition for
* LLC_SAP_EV_ACTIVATION_REQ event
*/
-static llc_sap_action_t llc_sap_inactive_state_actions_1[] = {
+static const llc_sap_action_t llc_sap_inactive_state_actions_1[] = {
[0] = llc_sap_action_report_status,
[1] = NULL,
};
@@ -47,7 +47,7 @@ static struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = {
};
/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_UI event */
-static llc_sap_action_t llc_sap_active_state_actions_1[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_1[] = {
[0] = llc_sap_action_unitdata_ind,
[1] = NULL,
};
@@ -59,7 +59,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_1 = {
};
/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_UNITDATA_REQ event */
-static llc_sap_action_t llc_sap_active_state_actions_2[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_2[] = {
[0] = llc_sap_action_send_ui,
[1] = NULL,
};
@@ -71,7 +71,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_2 = {
};
/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_XID_REQ event */
-static llc_sap_action_t llc_sap_active_state_actions_3[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_3[] = {
[0] = llc_sap_action_send_xid_c,
[1] = NULL,
};
@@ -83,7 +83,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_3 = {
};
/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_C event */
-static llc_sap_action_t llc_sap_active_state_actions_4[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_4[] = {
[0] = llc_sap_action_send_xid_r,
[1] = NULL,
};
@@ -95,7 +95,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_4 = {
};
/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_XID_R event */
-static llc_sap_action_t llc_sap_active_state_actions_5[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_5[] = {
[0] = llc_sap_action_xid_ind,
[1] = NULL,
};
@@ -107,7 +107,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_5 = {
};
/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_TEST_REQ event */
-static llc_sap_action_t llc_sap_active_state_actions_6[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_6[] = {
[0] = llc_sap_action_send_test_c,
[1] = NULL,
};
@@ -119,7 +119,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_6 = {
};
/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_C event */
-static llc_sap_action_t llc_sap_active_state_actions_7[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_7[] = {
[0] = llc_sap_action_send_test_r,
[1] = NULL,
};
@@ -131,7 +131,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_7 = {
};
/* state LLC_SAP_STATE_ACTIVE transition for LLC_SAP_EV_RX_TEST_R event */
-static llc_sap_action_t llc_sap_active_state_actions_8[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_8[] = {
[0] = llc_sap_action_test_ind,
[1] = NULL,
};
@@ -145,7 +145,7 @@ static struct llc_sap_state_trans llc_sap_active_state_trans_8 = {
/* state LLC_SAP_STATE_ACTIVE transition for
* LLC_SAP_EV_DEACTIVATION_REQ event
*/
-static llc_sap_action_t llc_sap_active_state_actions_9[] = {
+static const llc_sap_action_t llc_sap_active_state_actions_9[] = {
[0] = llc_sap_action_report_status,
[1] = NULL,
};
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 06033f6c845f..d0e1e804ebd7 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -146,7 +146,7 @@ static int llc_exec_sap_trans_actions(struct llc_sap *sap,
struct sk_buff *skb)
{
int rc = 0;
- llc_sap_action_t *next_action = trans->ev_actions;
+ const llc_sap_action_t *next_action = trans->ev_actions;
for (; next_action && *next_action; next_action++)
if ((*next_action)(sap, skb))
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index aeb6a483b3bc..75cc6801a431 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -33,6 +33,13 @@ config MAC80211_RC_MINSTREL_HT
---help---
This option enables the 'minstrel_ht' TX rate control algorithm
+config MAC80211_RC_MINSTREL_VHT
+ bool "Minstrel 802.11ac support" if EXPERT
+ depends on MAC80211_RC_MINSTREL_HT
+ default n
+ ---help---
+ This option enables VHT in the 'minstrel_ht' TX rate control algorithm
+
choice
prompt "Default rate control algorithm"
depends on MAC80211_HAS_RC
@@ -169,6 +176,17 @@ config MAC80211_HT_DEBUG
Do not select this option.
+config MAC80211_OCB_DEBUG
+ bool "Verbose OCB debugging"
+ depends on MAC80211_DEBUG_MENU
+ ---help---
+ Selecting this option causes mac80211 to print out
+ very verbose OCB debugging messages. It should not
+ be selected on production systems as those messages
+ are remotely triggerable.
+
+ Do not select this option.
+
config MAC80211_IBSS_DEBUG
bool "Verbose IBSS debugging"
depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 7273d2796dd1..e53671b1105e 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -27,7 +27,8 @@ mac80211-y := \
event.o \
chan.o \
trace.o mlme.o \
- tdls.o
+ tdls.o \
+ ocb.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index d6986f3aa5c4..a360c15cc978 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -149,11 +149,6 @@ void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
}
-static inline int ieee80211_ac_from_tid(int tid)
-{
- return ieee802_1d_to_ac[tid & 7];
-}
-
/*
* When multiple aggregation sessions on multiple stations
* are being created/destroyed simultaneously, we need to
@@ -514,6 +509,10 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
struct tid_ampdu_tx *tid_tx;
int ret = 0;
+ if (WARN(sta->reserved_tid == tid,
+ "Requested to start BA session on reserved tid=%d", tid))
+ return -EINVAL;
+
trace_api_start_tx_ba_session(pubsta, tid);
if (WARN_ON_ONCE(!local->ops->ampdu_action))
@@ -770,6 +769,9 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
goto unlock;
}
+ WARN(sta->reserved_tid == tid,
+ "Requested to stop BA session on reserved tid=%d", tid);
+
if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
/* already in progress stopping it */
ret = 0;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 343da1e35025..e75d5c53e97b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -20,6 +20,7 @@
#include "cfg.h"
#include "rate.h"
#include "mesh.h"
+#include "wme.h"
static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
const char *name,
@@ -190,7 +191,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
* receive the key. When wpa_supplicant has roamed
* using FT, it attempts to set the key before
* association has completed, this rejects that attempt
- * so it will set the key again after assocation.
+ * so it will set the key again after association.
*
* TODO: accept the key if we have a station entry and
* add it to the device after the station.
@@ -229,6 +230,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
case NUM_NL80211_IFTYPES:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_OCB:
/* shouldn't happen */
WARN_ON_ONCE(1);
break;
@@ -1040,6 +1042,13 @@ static int sta_apply_parameters(struct ieee80211_local *local,
clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
}
+ /* mark TDLS channel switch support, if the AP allows it */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+ !sdata->u.mgd.tdls_chan_switch_prohibited &&
+ params->ext_capab_len >= 4 &&
+ params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)
+ set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
+
if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
sta->sta.uapsd_queues = params->uapsd_queues;
sta->sta.max_sp = params->max_sp;
@@ -1225,14 +1234,14 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
}
static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac)
+ struct station_del_parameters *params)
{
struct ieee80211_sub_if_data *sdata;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (mac)
- return sta_info_destroy_addr_bss(sdata, mac);
+ if (params->mac)
+ return sta_info_destroy_addr_bss(sdata, params->mac);
sta_info_flush(sdata);
return 0;
@@ -1516,6 +1525,57 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static void mpp_set_pinfo(struct mesh_path *mpath, u8 *mpp,
+ struct mpath_info *pinfo)
+{
+ memset(pinfo, 0, sizeof(*pinfo));
+ memcpy(mpp, mpath->mpp, ETH_ALEN);
+
+ pinfo->generation = mpp_paths_generation;
+}
+
+static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev,
+ u8 *dst, u8 *mpp, struct mpath_info *pinfo)
+
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct mesh_path *mpath;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ rcu_read_lock();
+ mpath = mpp_path_lookup(sdata, dst);
+ if (!mpath) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+ memcpy(dst, mpath->dst, ETH_ALEN);
+ mpp_set_pinfo(mpath, mpp, pinfo);
+ rcu_read_unlock();
+ return 0;
+}
+
+static int ieee80211_dump_mpp(struct wiphy *wiphy, struct net_device *dev,
+ int idx, u8 *dst, u8 *mpp,
+ struct mpath_info *pinfo)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct mesh_path *mpath;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ rcu_read_lock();
+ mpath = mpp_path_lookup_by_idx(sdata, idx);
+ if (!mpath) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+ memcpy(dst, mpath->dst, ETH_ALEN);
+ mpp_set_pinfo(mpath, mpp, pinfo);
+ rcu_read_unlock();
+ return 0;
+}
+
static int ieee80211_get_mesh_config(struct wiphy *wiphy,
struct net_device *dev,
struct mesh_config *conf)
@@ -1966,6 +2026,17 @@ static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
return ieee80211_ibss_leave(IEEE80211_DEV_TO_SUB_IF(dev));
}
+static int ieee80211_join_ocb(struct wiphy *wiphy, struct net_device *dev,
+ struct ocb_setup *setup)
+{
+ return ieee80211_ocb_join(IEEE80211_DEV_TO_SUB_IF(dev), setup);
+}
+
+static int ieee80211_leave_ocb(struct wiphy *wiphy, struct net_device *dev)
+{
+ return ieee80211_ocb_leave(IEEE80211_DEV_TO_SUB_IF(dev));
+}
+
static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
int rate[IEEE80211_NUM_BANDS])
{
@@ -2081,6 +2152,9 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ if (local->ops->get_txpower)
+ return drv_get_txpower(local, sdata, dbm);
+
if (!local->use_chanctx)
*dbm = local->hw.conf.power_level;
else
@@ -2850,11 +2924,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
if (sdata->reserved_ready)
return 0;
- err = ieee80211_vif_use_reserved_context(sdata);
- if (err)
- return err;
-
- return 0;
+ return ieee80211_vif_use_reserved_context(sdata);
}
if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef,
@@ -2868,7 +2938,6 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
return err;
ieee80211_bss_info_change_notify(sdata, changed);
- cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
if (sdata->csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
@@ -2876,6 +2945,12 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
sdata->csa_block_tx = false;
}
+ err = drv_post_channel_switch(sdata);
+ if (err)
+ return err;
+
+ cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
+
return 0;
}
@@ -3053,9 +3128,11 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_channel_switch ch_switch;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
- int err, changed = 0;
+ u32 changed = 0;
+ int err;
sdata_assert_lock(sdata);
lockdep_assert_held(&local->mtx);
@@ -3088,6 +3165,16 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
+ ch_switch.timestamp = 0;
+ ch_switch.device_timestamp = 0;
+ ch_switch.block_tx = params->block_tx;
+ ch_switch.chandef = params->chandef;
+ ch_switch.count = params->count;
+
+ err = drv_pre_channel_switch(sdata, &ch_switch);
+ if (err)
+ goto out;
+
err = ieee80211_vif_reserve_chanctx(sdata, &params->chandef,
chanctx->mode,
params->radar_required);
@@ -3115,6 +3202,9 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
+ cfg80211_ch_switch_started_notify(sdata->dev, &sdata->csa_chandef,
+ params->count);
+
if (changed) {
ieee80211_bss_info_change_notify(sdata, changed);
drv_channel_switch_beacon(sdata, &params->chandef);
@@ -3431,6 +3521,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_NL80211_FRAME_TX;
+ info->band = band;
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
skb->priority = 7;
@@ -3438,7 +3529,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
nullfunc->qos_ctrl = cpu_to_le16(7);
local_bh_disable();
- ieee80211_xmit(sdata, skb, band);
+ ieee80211_xmit(sdata, skb);
local_bh_enable();
rcu_read_unlock();
@@ -3521,6 +3612,76 @@ static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
return ret;
}
+static int ieee80211_add_tx_ts(struct wiphy *wiphy, struct net_device *dev,
+ u8 tsid, const u8 *peer, u8 up,
+ u16 admitted_time)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ int ac = ieee802_1d_to_ac[up];
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (!(sdata->wmm_acm & BIT(up)))
+ return -EINVAL;
+
+ if (ifmgd->tx_tspec[ac].admitted_time)
+ return -EBUSY;
+
+ if (admitted_time) {
+ ifmgd->tx_tspec[ac].admitted_time = 32 * admitted_time;
+ ifmgd->tx_tspec[ac].tsid = tsid;
+ ifmgd->tx_tspec[ac].up = up;
+ }
+
+ return 0;
+}
+
+static int ieee80211_del_tx_ts(struct wiphy *wiphy, struct net_device *dev,
+ u8 tsid, const u8 *peer)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
+
+ /* skip unused entries */
+ if (!tx_tspec->admitted_time)
+ continue;
+
+ if (tx_tspec->tsid != tsid)
+ continue;
+
+ /* due to this new packets will be reassigned to non-ACM ACs */
+ tx_tspec->up = -1;
+
+ /* Make sure that all packets have been sent to avoid to
+ * restore the QoS params on packets that are still on the
+ * queues.
+ */
+ synchronize_net();
+ ieee80211_flush_queues(local, sdata);
+
+ /* restore the normal QoS parameters
+ * (unconditionally to avoid races)
+ */
+ tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE;
+ tx_tspec->downgraded = false;
+ ieee80211_sta_handle_tspec_ac_params(sdata);
+
+ /* finally clear all the data */
+ memset(tx_tspec, 0, sizeof(*tx_tspec));
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -3547,11 +3708,15 @@ const struct cfg80211_ops mac80211_config_ops = {
.change_mpath = ieee80211_change_mpath,
.get_mpath = ieee80211_get_mpath,
.dump_mpath = ieee80211_dump_mpath,
+ .get_mpp = ieee80211_get_mpp,
+ .dump_mpp = ieee80211_dump_mpp,
.update_mesh_config = ieee80211_update_mesh_config,
.get_mesh_config = ieee80211_get_mesh_config,
.join_mesh = ieee80211_join_mesh,
.leave_mesh = ieee80211_leave_mesh,
#endif
+ .join_ocb = ieee80211_join_ocb,
+ .leave_ocb = ieee80211_leave_ocb,
.change_bss = ieee80211_change_bss,
.set_txq_params = ieee80211_set_txq_params,
.set_monitor_channel = ieee80211_set_monitor_channel,
@@ -3587,6 +3752,8 @@ const struct cfg80211_ops mac80211_config_ops = {
.set_rekey_data = ieee80211_set_rekey_data,
.tdls_oper = ieee80211_tdls_oper,
.tdls_mgmt = ieee80211_tdls_mgmt,
+ .tdls_channel_switch = ieee80211_tdls_channel_switch,
+ .tdls_cancel_channel_switch = ieee80211_tdls_cancel_channel_switch,
.probe_client = ieee80211_probe_client,
.set_noack_map = ieee80211_set_noack_map,
#ifdef CONFIG_PM
@@ -3597,4 +3764,6 @@ const struct cfg80211_ops mac80211_config_ops = {
.channel_switch = ieee80211_channel_switch,
.set_qos_map = ieee80211_set_qos_map,
.set_ap_chanwidth = ieee80211_set_ap_chanwidth,
+ .add_tx_ts = ieee80211_add_tx_ts,
+ .del_tx_ts = ieee80211_del_tx_ts,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 4c74e8da64b9..da1c12c34487 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -270,6 +270,7 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_OCB:
width = vif->bss_conf.chandef.width;
break;
case NL80211_IFTYPE_UNSPECIFIED:
@@ -674,6 +675,7 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_OCB:
break;
default:
WARN_ON_ONCE(1);
@@ -909,6 +911,7 @@ ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata)
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_OCB:
ieee80211_queue_work(&sdata->local->hw,
&sdata->csa_finalize_work);
break;
@@ -929,6 +932,21 @@ ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata)
}
}
+static void
+ieee80211_vif_update_chandef(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_sub_if_data *vlan;
+
+ sdata->vif.bss_conf.chandef = *chandef;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return;
+
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ vlan->vif.bss_conf.chandef = *chandef;
+}
+
static int
ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
{
@@ -991,7 +1009,11 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
changed = BSS_CHANGED_BANDWIDTH;
- sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
+ ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
+
+ ieee80211_recalc_smps_chanctx(local, new_ctx);
+ ieee80211_recalc_radar_chanctx(local, new_ctx);
+ ieee80211_recalc_chanctx_min_def(local, new_ctx);
if (changed)
ieee80211_bss_info_change_notify(sdata, changed);
@@ -1333,7 +1355,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
sdata->reserved_chandef.width)
changed = BSS_CHANGED_BANDWIDTH;
- sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
+ ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
if (changed)
ieee80211_bss_info_change_notify(sdata,
changed);
@@ -1504,7 +1526,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
goto out;
}
- sdata->vif.bss_conf.chandef = *chandef;
+ ieee80211_vif_update_chandef(sdata, chandef);
ret = ieee80211_assign_vif_chanctx(sdata, ctx);
if (ret) {
@@ -1634,7 +1656,7 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
}
break;
case IEEE80211_CHANCTX_WILL_BE_REPLACED:
- /* TODO: Perhaps the bandwith change could be treated as a
+ /* TODO: Perhaps the bandwidth change could be treated as a
* reservation itself? */
ret = -EBUSY;
goto out;
@@ -1646,7 +1668,7 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
break;
}
- sdata->vif.bss_conf.chandef = *chandef;
+ ieee80211_vif_update_chandef(sdata, chandef);
ieee80211_recalc_chanctx_chantype(local, ctx);
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index 493d68061f0c..1956b3115dd5 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -2,6 +2,12 @@
#define __MAC80211_DEBUG_H
#include <net/cfg80211.h>
+#ifdef CONFIG_MAC80211_OCB_DEBUG
+#define MAC80211_OCB_DEBUG 1
+#else
+#define MAC80211_OCB_DEBUG 0
+#endif
+
#ifdef CONFIG_MAC80211_IBSS_DEBUG
#define MAC80211_IBSS_DEBUG 1
#else
@@ -131,6 +137,10 @@ do { \
_sdata_dbg(MAC80211_HT_DEBUG && net_ratelimit(), \
sdata, fmt, ##__VA_ARGS__)
+#define ocb_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_OCB_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
#define ibss_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_IBSS_DEBUG, \
sdata, fmt, ##__VA_ARGS__)
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 1521cabad3d6..5523b94c7c90 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -300,10 +300,8 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&sdata->local->key_mtx);
- if (sdata->debugfs.default_unicast_key) {
- debugfs_remove(sdata->debugfs.default_unicast_key);
- sdata->debugfs.default_unicast_key = NULL;
- }
+ debugfs_remove(sdata->debugfs.default_unicast_key);
+ sdata->debugfs.default_unicast_key = NULL;
if (sdata->default_unicast_key) {
key = key_mtx_dereference(sdata->local,
@@ -314,10 +312,8 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
sdata->vif.debugfs_dir, buf);
}
- if (sdata->debugfs.default_multicast_key) {
- debugfs_remove(sdata->debugfs.default_multicast_key);
- sdata->debugfs.default_multicast_key = NULL;
- }
+ debugfs_remove(sdata->debugfs.default_multicast_key);
+ sdata->debugfs.default_multicast_key = NULL;
if (sdata->default_multicast_key) {
key = key_mtx_dereference(sdata->local,
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index bafe48916229..94c70091bbd7 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -74,7 +74,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
int res = scnprintf(buf, sizeof(buf),
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
TEST(PS_DRIVER), TEST(AUTHORIZED),
TEST(SHORT_PREAMBLE),
@@ -82,10 +82,11 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
TEST(WDS), TEST(CLEAR_PS_FILT),
TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
- TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
- TEST(INSERTED), TEST(RATE_CONTROL),
- TEST(TOFFSET_KNOWN), TEST(MPSP_OWNER),
- TEST(MPSP_RECIPIENT));
+ TEST(TDLS_PEER_AUTH), TEST(TDLS_INITIATOR),
+ TEST(TDLS_CHAN_SWITCH), TEST(TDLS_OFF_CHANNEL),
+ TEST(4ADDR_EVENT), TEST(INSERTED),
+ TEST(RATE_CONTROL), TEST(TOFFSET_KNOWN),
+ TEST(MPSP_OWNER), TEST(MPSP_RECIPIENT));
#undef TEST
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 196d48c68134..2ebc9ead9695 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -214,7 +214,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
BSS_CHANGED_BEACON_ENABLED) &&
sdata->vif.type != NL80211_IFTYPE_AP &&
sdata->vif.type != NL80211_IFTYPE_ADHOC &&
- sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
+ sdata->vif.type != NL80211_IFTYPE_OCB))
return;
if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
@@ -379,23 +380,26 @@ static inline int drv_sched_scan_stop(struct ieee80211_local *local,
return ret;
}
-static inline void drv_sw_scan_start(struct ieee80211_local *local)
+static inline void drv_sw_scan_start(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *mac_addr)
{
might_sleep();
- trace_drv_sw_scan_start(local);
+ trace_drv_sw_scan_start(local, sdata, mac_addr);
if (local->ops->sw_scan_start)
- local->ops->sw_scan_start(&local->hw);
+ local->ops->sw_scan_start(&local->hw, &sdata->vif, mac_addr);
trace_drv_return_void(local);
}
-static inline void drv_sw_scan_complete(struct ieee80211_local *local)
+static inline void drv_sw_scan_complete(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
might_sleep();
- trace_drv_sw_scan_complete(local);
+ trace_drv_sw_scan_complete(local, sdata);
if (local->ops->sw_scan_complete)
- local->ops->sw_scan_complete(&local->hw);
+ local->ops->sw_scan_complete(&local->hw, &sdata->vif);
trace_drv_return_void(local);
}
@@ -620,6 +624,21 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta)
+{
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_sta_rate_tbl_update(local, sdata, sta);
+ if (local->ops->sta_rate_tbl_update)
+ local->ops->sta_rate_tbl_update(&local->hw, &sdata->vif, sta);
+
+ trace_drv_return_void(local);
+}
+
static inline int drv_conf_tx(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata, u16 ac,
const struct ieee80211_tx_queue_params *params)
@@ -631,6 +650,12 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
+ if (WARN_ONCE(params->cw_min == 0 ||
+ params->cw_min > params->cw_max,
+ "%s: invalid CW_min/CW_max: %d/%d\n",
+ sdata->name, params->cw_min, params->cw_max))
+ return -EINVAL;
+
trace_drv_conf_tx(local, sdata, ac, params);
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, &sdata->vif,
@@ -764,12 +789,13 @@ static inline void drv_flush(struct ieee80211_local *local,
}
static inline void drv_channel_switch(struct ieee80211_local *local,
- struct ieee80211_channel_switch *ch_switch)
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch)
{
might_sleep();
- trace_drv_channel_switch(local, ch_switch);
- local->ops->channel_switch(&local->hw, ch_switch);
+ trace_drv_channel_switch(local, sdata, ch_switch);
+ local->ops->channel_switch(&local->hw, &sdata->vif, ch_switch);
trace_drv_return_void(local);
}
@@ -1144,13 +1170,15 @@ static inline void drv_stop_ap(struct ieee80211_local *local,
trace_drv_return_void(local);
}
-static inline void drv_restart_complete(struct ieee80211_local *local)
+static inline void
+drv_reconfig_complete(struct ieee80211_local *local,
+ enum ieee80211_reconfig_type reconfig_type)
{
might_sleep();
- trace_drv_restart_complete(local);
- if (local->ops->restart_complete)
- local->ops->restart_complete(&local->hw);
+ trace_drv_reconfig_complete(local, reconfig_type);
+ if (local->ops->reconfig_complete)
+ local->ops->reconfig_complete(&local->hw, reconfig_type);
trace_drv_return_void(local);
}
@@ -1196,6 +1224,40 @@ drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata,
}
}
+static inline int
+drv_pre_channel_switch(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct ieee80211_local *local = sdata->local;
+ int ret = 0;
+
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_pre_channel_switch(local, sdata, ch_switch);
+ if (local->ops->pre_channel_switch)
+ ret = local->ops->pre_channel_switch(&local->hw, &sdata->vif,
+ ch_switch);
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
+static inline int
+drv_post_channel_switch(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ int ret = 0;
+
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_post_channel_switch(local, sdata);
+ if (local->ops->post_channel_switch)
+ ret = local->ops->post_channel_switch(&local->hw, &sdata->vif);
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
static inline int drv_join_ibss(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
@@ -1238,4 +1300,71 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
return ret;
}
+static inline int drv_get_txpower(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata, int *dbm)
+{
+ int ret;
+
+ if (!local->ops->get_txpower)
+ return -EOPNOTSUPP;
+
+ ret = local->ops->get_txpower(&local->hw, &sdata->vif, dbm);
+ trace_drv_get_txpower(local, sdata, *dbm, ret);
+
+ return ret;
+}
+
+static inline int
+drv_tdls_channel_switch(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
+{
+ int ret;
+
+ might_sleep();
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ if (!local->ops->tdls_channel_switch)
+ return -EOPNOTSUPP;
+
+ trace_drv_tdls_channel_switch(local, sdata, sta, oper_class, chandef);
+ ret = local->ops->tdls_channel_switch(&local->hw, &sdata->vif, sta,
+ oper_class, chandef, tmpl_skb,
+ ch_sw_tm_ie);
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
+static inline void
+drv_tdls_cancel_channel_switch(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta)
+{
+ might_sleep();
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ if (!local->ops->tdls_cancel_channel_switch)
+ return;
+
+ trace_drv_tdls_cancel_channel_switch(local, sdata, sta);
+ local->ops->tdls_cancel_channel_switch(&local->hw, &sdata->vif, sta);
+ trace_drv_return_void(local);
+}
+
+static inline void
+drv_tdls_recv_channel_switch(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_tdls_ch_sw_params *params)
+{
+ trace_drv_tdls_recv_channel_switch(local, sdata, params);
+ if (local->ops->tdls_recv_channel_switch)
+ local->ops->tdls_recv_channel_switch(&local->hw, &sdata->vif,
+ params);
+ trace_drv_return_void(local);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8c68da30595d..cc6e964d9837 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -131,7 +131,7 @@ enum ieee80211_bss_corrupt_data_flags {
*
* These are bss flags that are attached to a bss in the
* @valid_data field of &struct ieee80211_bss. They show which parts
- * of the data structure were recieved as a result of an un-corrupted
+ * of the data structure were received as a result of an un-corrupted
* beacon/probe response.
*/
enum ieee80211_bss_valid_data_flags {
@@ -399,6 +399,24 @@ struct ieee80211_mgd_assoc_data {
u8 ie[];
};
+struct ieee80211_sta_tx_tspec {
+ /* timestamp of the first packet in the time slice */
+ unsigned long time_slice_start;
+
+ u32 admitted_time; /* in usecs, unlike over the air */
+ u8 tsid;
+ s8 up; /* signed to be able to invalidate with -1 during teardown */
+
+ /* consumed TX time in microseconds in the time slice */
+ u32 consumed_tx_time;
+ enum {
+ TX_TSPEC_ACTION_NONE = 0,
+ TX_TSPEC_ACTION_DOWNGRADE,
+ TX_TSPEC_ACTION_STOP_DOWNGRADE,
+ } action;
+ bool downgraded;
+};
+
struct ieee80211_if_managed {
struct timer_list timer;
struct timer_list conn_mon_timer;
@@ -434,6 +452,8 @@ struct ieee80211_if_managed {
unsigned int flags;
+ bool csa_waiting_bcn;
+
bool beacon_crc_valid;
u32 beacon_crc;
@@ -505,8 +525,23 @@ struct ieee80211_if_managed {
struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */
struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */
+ /* TDLS support */
u8 tdls_peer[ETH_ALEN] __aligned(2);
struct delayed_work tdls_peer_del_work;
+ struct sk_buff *orig_teardown_skb; /* The original teardown skb */
+ struct sk_buff *teardown_skb; /* A copy to send through the AP */
+ spinlock_t teardown_lock; /* To lock changing teardown_skb */
+ bool tdls_chan_switch_prohibited;
+
+ /* WMM-AC TSPEC support */
+ struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS];
+ /* Use a separate work struct so that we can do something here
+ * while the sdata->work is flushing the queues, for example.
+ * otherwise, in scenarios where we hardly get any traffic out
+ * on the BE queue, but there's a lot of VO traffic, we might
+ * get stuck in a downgraded situation and flush takes forever.
+ */
+ struct delayed_work tx_tspec_wk;
};
struct ieee80211_if_ibss {
@@ -547,6 +582,25 @@ struct ieee80211_if_ibss {
};
/**
+ * struct ieee80211_if_ocb - OCB mode state
+ *
+ * @housekeeping_timer: timer for periodic invocation of a housekeeping task
+ * @wrkq_flags: OCB deferred task action
+ * @incomplete_lock: delayed STA insertion lock
+ * @incomplete_stations: list of STAs waiting for delayed insertion
+ * @joined: indication if the interface is connected to an OCB network
+ */
+struct ieee80211_if_ocb {
+ struct timer_list housekeeping_timer;
+ unsigned long wrkq_flags;
+
+ spinlock_t incomplete_lock;
+ struct list_head incomplete_stations;
+
+ bool joined;
+};
+
+/**
* struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface
*
* these declarations define the interface, which enables
@@ -839,6 +893,7 @@ struct ieee80211_sub_if_data {
struct ieee80211_if_managed mgd;
struct ieee80211_if_ibss ibss;
struct ieee80211_if_mesh mesh;
+ struct ieee80211_if_ocb ocb;
u32 mntr_flags;
} u;
@@ -938,6 +993,7 @@ enum sdata_queue_type {
IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
+ IEEE80211_SDATA_QUEUE_TDLS_CHSW = 5,
};
enum {
@@ -955,6 +1011,7 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
IEEE80211_QUEUE_STOP_REASON_FLUSH,
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
+ IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
IEEE80211_QUEUE_STOP_REASONS,
};
@@ -1181,7 +1238,7 @@ struct ieee80211_local {
unsigned long scanning;
struct cfg80211_ssid scan_ssid;
struct cfg80211_scan_request *int_scan_req;
- struct cfg80211_scan_request *scan_req;
+ struct cfg80211_scan_request __rcu *scan_req;
struct ieee80211_scan_request *hw_scan_req;
struct cfg80211_chan_def scan_chandef;
enum ieee80211_band hw_scan_band;
@@ -1191,7 +1248,8 @@ struct ieee80211_local {
struct work_struct sched_scan_stopped_work;
struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
- struct cfg80211_sched_scan_request *sched_scan_req;
+ struct cfg80211_sched_scan_request __rcu *sched_scan_req;
+ u8 scan_addr[ETH_ALEN];
unsigned long leave_oper_channel_time;
enum mac80211_scan_state next_scan_state;
@@ -1307,6 +1365,9 @@ struct ieee80211_local {
/* virtual monitor interface */
struct ieee80211_sub_if_data __rcu *monitor_sdata;
struct cfg80211_chan_def monitor_chandef;
+
+ /* extended capabilities provided by mac80211 */
+ u8 ext_capa[8];
};
static inline struct ieee80211_sub_if_data *
@@ -1342,6 +1403,9 @@ struct ieee802_11_elems {
size_t total_len;
/* pointers to IEs */
+ const struct ieee80211_tdls_lnkie *lnk_id;
+ const struct ieee80211_ch_switch_timing *ch_sw_timing;
+ const u8 *ext_capab;
const u8 *ssid;
const u8 *supp_rates;
const u8 *ds_params;
@@ -1376,6 +1440,7 @@ struct ieee802_11_elems {
const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
/* length of them, respectively */
+ u8 ext_capab_len;
u8 ssid_len;
u8 supp_rates_len;
u8 tim_len;
@@ -1454,6 +1519,7 @@ void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
__le16 fc, bool acked);
void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1471,6 +1537,15 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata);
void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata);
+/* OCB code */
+void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const u8 *addr, u32 supp_rates);
+void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata);
+int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
+ struct ocb_setup *setup);
+int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata);
+
/* mesh code */
void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -1562,8 +1637,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev);
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
+void __ieee80211_subif_start_xmit(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 info_flags);
void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
struct sk_buff_head *skbs);
+struct sk_buff *
+ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u32 info_flags);
/* HT */
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
@@ -1690,8 +1771,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
bool bss_notify);
-void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- enum ieee80211_band band);
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
@@ -1757,6 +1837,13 @@ static inline bool ieee80211_rx_reorder_ready(struct sk_buff_head *frames)
return true;
}
+extern const int ieee802_1d_to_ac[8];
+
+static inline int ieee80211_ac_from_tid(int tid)
+{
+ return ieee802_1d_to_ac[tid & 7];
+}
+
void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
void ieee80211_dynamic_ps_timer(unsigned long data);
@@ -1766,7 +1853,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr);
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_hdr *hdr, bool ack);
+ struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
unsigned long queues,
@@ -1795,6 +1882,9 @@ void ieee80211_add_pending_skbs(struct ieee80211_local *local,
struct sk_buff_head *skbs);
void ieee80211_flush_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
+void __ieee80211_flush_queues(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ unsigned int queues);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
@@ -1811,12 +1901,14 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
u8 bands_used, u32 *rate_masks,
struct cfg80211_chan_def *chandef);
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
- u8 *dst, u32 ratemask,
+ const u8 *src, const u8 *dst,
+ u32 ratemask,
struct ieee80211_channel *chan,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
bool directed);
-void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
+void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
+ const u8 *src, const u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
u32 ratemask, bool directed, u32 tx_flags,
@@ -1832,8 +1924,10 @@ int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata);
-size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids, size_t offset);
+size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids,
+ const u8 *after_ric, int n_after_ric,
+ size_t offset);
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap);
@@ -1920,6 +2014,14 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper);
void ieee80211_tdls_peer_del_work(struct work_struct *wk);
+int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef);
+void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr);
+void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
extern const struct ethtool_ops ieee80211_ethtool_ops;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 653f5eb07a27..417355390873 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -259,6 +259,15 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
list_for_each_entry(nsdata, &local->interfaces, list) {
if (nsdata != sdata && ieee80211_sdata_running(nsdata)) {
/*
+ * Only OCB and monitor mode may coexist
+ */
+ if ((sdata->vif.type == NL80211_IFTYPE_OCB &&
+ nsdata->vif.type != NL80211_IFTYPE_MONITOR) ||
+ (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
+ nsdata->vif.type == NL80211_IFTYPE_OCB))
+ return -EBUSY;
+
+ /*
* Allow only a single IBSS interface to be up at any
* time. This is restricted because beacon distribution
* cannot work properly if both are in the same IBSS.
@@ -511,6 +520,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
sdata->vif.cab_queue = master->vif.cab_queue;
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
+ sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
break;
}
case NL80211_IFTYPE_AP:
@@ -521,6 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_OCB:
/* no special treatment */
break;
case NL80211_IFTYPE_UNSPECIFIED:
@@ -631,6 +642,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_OCB:
netif_carrier_off(dev);
break;
case NL80211_IFTYPE_WDS:
@@ -844,6 +856,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
sdata_lock(sdata);
mutex_lock(&local->mtx);
sdata->vif.csa_active = false;
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ sdata->u.mgd.csa_waiting_bcn = false;
if (sdata->csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -1195,6 +1209,8 @@ static void ieee80211_iface_work(struct work_struct *work)
WLAN_BACK_RECIPIENT, 0,
false);
mutex_unlock(&local->sta_mtx);
+ } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) {
+ ieee80211_process_tdls_channel_switch(sdata, skb);
} else if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK) {
int len = skb->len;
@@ -1285,6 +1301,9 @@ static void ieee80211_iface_work(struct work_struct *work)
break;
ieee80211_mesh_work(sdata);
break;
+ case NL80211_IFTYPE_OCB:
+ ieee80211_ocb_work(sdata);
+ break;
default:
break;
}
@@ -1304,6 +1323,9 @@ static void ieee80211_recalc_smps_work(struct work_struct *work)
static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
enum nl80211_iftype type)
{
+ static const u8 bssid_wildcard[ETH_ALEN] = {0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff};
+
/* clear type-dependent union */
memset(&sdata->u, 0, sizeof(sdata->u));
@@ -1355,6 +1377,10 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
ieee80211_sta_setup_sdata(sdata);
break;
+ case NL80211_IFTYPE_OCB:
+ sdata->vif.bss_conf.bssid = bssid_wildcard;
+ ieee80211_ocb_setup_sdata(sdata);
+ break;
case NL80211_IFTYPE_ADHOC:
sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
ieee80211_ibss_setup_sdata(sdata);
@@ -1402,6 +1428,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_OCB:
/*
* Could maybe also all others here?
* Just not sure how that interacts
@@ -1417,6 +1444,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_OCB:
/*
* Could probably support everything
* but WDS here (WDS do_open can fail
@@ -1675,7 +1703,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
}
ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
- memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
+ if (params && is_valid_ether_addr(params->macaddr))
+ memcpy(ndev->dev_addr, params->macaddr, ETH_ALEN);
+ else
+ memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
/* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 4712150dc210..0bb7038121ac 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -94,8 +94,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
might_sleep();
- if (key->flags & KEY_FLAG_TAINTED)
+ if (key->flags & KEY_FLAG_TAINTED) {
+ /* If we get here, it's during resume and the key is
+ * tainted so shouldn't be used/programmed any more.
+ * However, its flags may still indicate that it was
+ * programmed into the device (since we're in resume)
+ * so clear that flag now to avoid trying to remove
+ * it again later.
+ */
+ key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
return -EINVAL;
+ }
if (!key->local->ops->set_key)
goto out_unsupported;
@@ -647,7 +656,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
int i;
mutex_lock(&local->key_mtx);
- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
key = key_mtx_dereference(local, sta->gtk[i]);
if (!key)
continue;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0de7c93bf62b..6ab99da38db9 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -478,13 +478,9 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
},
};
-static const u8 extended_capabilities[] = {
- 0, 0, 0, 0, 0, 0, 0,
- WLAN_EXT_CAPA8_OPMODE_NOTIF,
-};
-
-struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
- const struct ieee80211_ops *ops)
+struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ const struct ieee80211_ops *ops,
+ const char *requested_name)
{
struct ieee80211_local *local;
int priv_size, i;
@@ -524,7 +520,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
*/
priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;
- wiphy = wiphy_new(&mac80211_config_ops, priv_size);
+ wiphy = wiphy_new_nm(&mac80211_config_ops, priv_size, requested_name);
if (!wiphy)
return NULL;
@@ -539,10 +535,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
WIPHY_FLAG_REPORTS_OBSS |
WIPHY_FLAG_OFFCHAN_TX;
- wiphy->extended_capabilities = extended_capabilities;
- wiphy->extended_capabilities_mask = extended_capabilities;
- wiphy->extended_capabilities_len = ARRAY_SIZE(extended_capabilities);
-
if (ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -550,6 +542,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
NL80211_FEATURE_SAE |
NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_VIF_TXPOWER |
+ NL80211_FEATURE_MAC_ON_CREATE |
NL80211_FEATURE_USERSPACE_MPM;
if (!ops->hw_scan)
@@ -591,6 +584,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask;
+ local->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF;
+
+ wiphy->extended_capabilities = local->ext_capa;
+ wiphy->extended_capabilities_mask = local->ext_capa;
+ wiphy->extended_capabilities_len =
+ ARRAY_SIZE(local->ext_capa);
+
INIT_LIST_HEAD(&local->interfaces);
__hw_addr_init(&local->mc_list);
@@ -651,7 +651,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
return &local->hw;
}
-EXPORT_SYMBOL(ieee80211_alloc_hw);
+EXPORT_SYMBOL(ieee80211_alloc_hw_nm);
static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
{
@@ -764,6 +764,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.offchannel_tx_hw_queue >= local->hw.queues))
return -EINVAL;
+ if ((hw->wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) &&
+ (!local->ops->tdls_channel_switch ||
+ !local->ops->tdls_cancel_channel_switch ||
+ !local->ops->tdls_recv_channel_switch))
+ return -EOPNOTSUPP;
+
#ifdef CONFIG_PM
if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume))
return -EINVAL;
@@ -787,13 +793,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))
return -EINVAL;
- /* DFS currently not supported with channel context drivers */
+ /* DFS is not supported with multi-channel combinations yet */
for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *comb;
comb = &local->hw.wiphy->iface_combinations[i];
- if (comb->radar_detect_widths)
+ if (comb->radar_detect_widths &&
+ comb->num_different_channels > 1)
return -EINVAL;
}
}
@@ -958,6 +965,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+ /* mac80211 supports eCSA, if the driver supports STA CSA at all */
+ if (local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)
+ local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING;
+
local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
result = wiphy_register(local->hw.wiphy);
@@ -1019,7 +1030,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
}
/* add one default STA interface if supported */
- if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) {
+ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
+ !(hw->flags & IEEE80211_HW_NO_AUTO_VIF)) {
result = ieee80211_if_add(local, "wlan%d", NULL,
NL80211_IFTYPE_STATION, NULL);
if (result)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index f39a19f9090f..50c8473cf9dc 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -270,6 +270,8 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
const u8 *dst, const u8 *mpp);
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx);
+struct mesh_path *
+mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx);
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
@@ -317,6 +319,7 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
extern int mesh_paths_generation;
+extern int mpp_paths_generation;
#ifdef CONFIG_MAC80211_MESH
static inline
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a6699dceae7c..b890e225a8f1 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -44,6 +44,7 @@ static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
+int mpp_paths_generation;
/* This lock will have the grow table function as writer and add / delete nodes
* as readers. RCU provides sufficient protection only when reading the table
@@ -410,6 +411,33 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
}
/**
+ * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
+ * @idx: index
+ * @sdata: local subif, or NULL for all entries
+ *
+ * Returns: pointer to the proxy path structure, or NULL if not found.
+ *
+ * Locking: must be called within a read rcu section.
+ */
+struct mesh_path *
+mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
+{
+ struct mesh_table *tbl = rcu_dereference(mpp_paths);
+ struct mpath_node *node;
+ int i;
+ int j = 0;
+
+ for_each_mesh_entry(tbl, node, i) {
+ if (sdata && node->mpath->sdata != sdata)
+ continue;
+ if (j++ == idx)
+ return node->mpath;
+ }
+
+ return NULL;
+}
+
+/**
* mesh_path_add_gate - add the given mpath to a mesh gate to our path table
* @mpath: gate path to add to table
*/
@@ -691,6 +719,9 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
+
+ mpp_paths_generation++;
+
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 93af0f1c9d99..2c36c4765f47 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -174,6 +174,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!(ht_cap->cap_info &
cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) {
ret = IEEE80211_STA_DISABLE_40MHZ;
+ vht_chandef = *chandef;
goto out;
}
@@ -552,13 +553,17 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
cap = vht_cap.cap;
if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_80P80MHZ) {
- cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
- cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+
+ cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ ||
+ bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
}
if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_160MHZ) {
cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
- cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
}
/*
@@ -775,11 +780,30 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
WLAN_EID_QOS_CAPA,
WLAN_EID_RRM_ENABLED_CAPABILITIES,
WLAN_EID_MOBILITY_DOMAIN,
+ WLAN_EID_FAST_BSS_TRANSITION, /* reassoc only */
+ WLAN_EID_RIC_DATA, /* reassoc only */
WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
};
- noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
- before_ht, ARRAY_SIZE(before_ht),
- offset);
+ static const u8 after_ric[] = {
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ WLAN_EID_HT_CAPABILITY,
+ WLAN_EID_BSS_COEX_2040,
+ WLAN_EID_EXT_CAPABILITY,
+ WLAN_EID_QOS_TRAFFIC_CAPA,
+ WLAN_EID_TIM_BCAST_REQ,
+ WLAN_EID_INTERWORKING,
+ /* 60GHz doesn't happen right now */
+ WLAN_EID_VHT_CAPABILITY,
+ WLAN_EID_OPMODE_NOTIF,
+ };
+
+ noffset = ieee80211_ie_split_ric(assoc_data->ie,
+ assoc_data->ie_len,
+ before_ht,
+ ARRAY_SIZE(before_ht),
+ after_ric,
+ ARRAY_SIZE(after_ric),
+ offset);
pos = skb_put(skb, noffset - offset);
memcpy(pos, assoc_data->ie + offset, noffset - offset);
offset = noffset;
@@ -813,6 +837,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
WLAN_EID_TIM_BCAST_REQ,
WLAN_EID_INTERWORKING,
};
+
+ /* RIC already taken above, so no need to handle here anymore */
noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
before_vht, ARRAY_SIZE(before_vht),
offset);
@@ -1001,14 +1027,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
/* XXX: shouldn't really modify cfg80211-owned data! */
ifmgd->associated->channel = sdata->csa_chandef.chan;
- sdata->vif.csa_active = false;
-
- /* XXX: wait for a beacon first? */
- if (sdata->csa_block_tx) {
- ieee80211_wake_vif_queues(local, sdata,
- IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_block_tx = false;
- }
+ ifmgd->csa_waiting_bcn = true;
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
@@ -1019,6 +1038,37 @@ out:
sdata_unlock(sdata);
}
+static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ int ret;
+
+ sdata_assert_lock(sdata);
+
+ WARN_ON(!sdata->vif.csa_active);
+
+ if (sdata->csa_block_tx) {
+ ieee80211_wake_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_block_tx = false;
+ }
+
+ cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef);
+
+ sdata->vif.csa_active = false;
+ ifmgd->csa_waiting_bcn = false;
+
+ ret = drv_post_channel_switch(sdata);
+ if (ret) {
+ sdata_info(sdata,
+ "driver post channel switch failed, disconnecting\n");
+ ieee80211_queue_work(&local->hw,
+ &ifmgd->csa_connection_drop_work);
+ return;
+ }
+}
+
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -1046,7 +1096,8 @@ static void ieee80211_chswitch_timer(unsigned long data)
static void
ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
- u64 timestamp, struct ieee802_11_elems *elems,
+ u64 timestamp, u32 device_timestamp,
+ struct ieee802_11_elems *elems,
bool beacon)
{
struct ieee80211_local *local = sdata->local;
@@ -1056,6 +1107,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_chanctx *chanctx;
enum ieee80211_band current_band;
struct ieee80211_csa_ie csa_ie;
+ struct ieee80211_channel_switch ch_switch;
int res;
sdata_assert_lock(sdata);
@@ -1110,21 +1162,31 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
- if (local->use_chanctx) {
- u32 num_chanctx = 0;
- list_for_each_entry(chanctx, &local->chanctx_list, list)
- num_chanctx++;
+ if (local->use_chanctx &&
+ !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
+ sdata_info(sdata,
+ "driver doesn't support chan-switch with channel contexts\n");
+ ieee80211_queue_work(&local->hw,
+ &ifmgd->csa_connection_drop_work);
+ mutex_unlock(&local->chanctx_mtx);
+ mutex_unlock(&local->mtx);
+ return;
+ }
+
+ ch_switch.timestamp = timestamp;
+ ch_switch.device_timestamp = device_timestamp;
+ ch_switch.block_tx = csa_ie.mode;
+ ch_switch.chandef = csa_ie.chandef;
+ ch_switch.count = csa_ie.count;
- if (num_chanctx > 1 ||
- !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
- sdata_info(sdata,
- "not handling chan-switch with channel contexts\n");
- ieee80211_queue_work(&local->hw,
- &ifmgd->csa_connection_drop_work);
- mutex_unlock(&local->chanctx_mtx);
- mutex_unlock(&local->mtx);
- return;
- }
+ if (drv_pre_channel_switch(sdata, &ch_switch)) {
+ sdata_info(sdata,
+ "preparing for channel switch failed, disconnecting\n");
+ ieee80211_queue_work(&local->hw,
+ &ifmgd->csa_connection_drop_work);
+ mutex_unlock(&local->chanctx_mtx);
+ mutex_unlock(&local->mtx);
+ return;
}
res = ieee80211_vif_reserve_chanctx(sdata, &csa_ie.chandef,
@@ -1150,16 +1212,12 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
mutex_unlock(&local->mtx);
+ cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef,
+ csa_ie.count);
+
if (local->ops->channel_switch) {
/* use driver's channel switch callback */
- struct ieee80211_channel_switch ch_switch = {
- .timestamp = timestamp,
- .block_tx = csa_ie.mode,
- .chandef = csa_ie.chandef,
- .count = csa_ie.count,
- };
-
- drv_channel_switch(local, &ch_switch);
+ drv_channel_switch(local, sdata, &ch_switch);
return;
}
@@ -1580,6 +1638,95 @@ void ieee80211_dfs_cac_timer_work(struct work_struct *work)
mutex_unlock(&sdata->local->mtx);
}
+static bool
+__ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ bool ret;
+ int ac;
+
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ return false;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
+ int non_acm_ac;
+ unsigned long now = jiffies;
+
+ if (tx_tspec->action == TX_TSPEC_ACTION_NONE &&
+ tx_tspec->admitted_time &&
+ time_after(now, tx_tspec->time_slice_start + HZ)) {
+ tx_tspec->consumed_tx_time = 0;
+ tx_tspec->time_slice_start = now;
+
+ if (tx_tspec->downgraded)
+ tx_tspec->action =
+ TX_TSPEC_ACTION_STOP_DOWNGRADE;
+ }
+
+ switch (tx_tspec->action) {
+ case TX_TSPEC_ACTION_STOP_DOWNGRADE:
+ /* take the original parameters */
+ if (drv_conf_tx(local, sdata, ac, &sdata->tx_conf[ac]))
+ sdata_err(sdata,
+ "failed to set TX queue parameters for queue %d\n",
+ ac);
+ tx_tspec->action = TX_TSPEC_ACTION_NONE;
+ tx_tspec->downgraded = false;
+ ret = true;
+ break;
+ case TX_TSPEC_ACTION_DOWNGRADE:
+ if (time_after(now, tx_tspec->time_slice_start + HZ)) {
+ tx_tspec->action = TX_TSPEC_ACTION_NONE;
+ ret = true;
+ break;
+ }
+ /* downgrade next lower non-ACM AC */
+ for (non_acm_ac = ac + 1;
+ non_acm_ac < IEEE80211_NUM_ACS;
+ non_acm_ac++)
+ if (!(sdata->wmm_acm & BIT(7 - 2 * non_acm_ac)))
+ break;
+ /* The loop will result in using BK even if it requires
+ * admission control, such configuration makes no sense
+ * and we have to transmit somehow - the AC selection
+ * does the same thing.
+ */
+ if (drv_conf_tx(local, sdata, ac,
+ &sdata->tx_conf[non_acm_ac]))
+ sdata_err(sdata,
+ "failed to set TX queue parameters for queue %d\n",
+ ac);
+ tx_tspec->action = TX_TSPEC_ACTION_NONE;
+ ret = true;
+ schedule_delayed_work(&ifmgd->tx_tspec_wk,
+ tx_tspec->time_slice_start + HZ - now + 1);
+ break;
+ case TX_TSPEC_ACTION_NONE:
+ /* nothing now */
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
+{
+ if (__ieee80211_sta_handle_tspec_ac_params(sdata))
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
+}
+
+static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.tx_tspec_wk.work);
+ ieee80211_sta_handle_tspec_ac_params(sdata);
+}
+
/* MLME */
static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
@@ -1664,12 +1811,14 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.uapsd = uapsd;
mlme_dbg(sdata,
- "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
+ "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
queue, aci, acm,
params.aifs, params.cw_min, params.cw_max,
- params.txop, params.uapsd);
+ params.txop, params.uapsd,
+ ifmgd->tx_tspec[queue].downgraded);
sdata->tx_conf[queue] = params;
- if (drv_conf_tx(local, sdata, queue, &params))
+ if (!ifmgd->tx_tspec[queue].downgraded &&
+ drv_conf_tx(local, sdata, queue, &params))
sdata_err(sdata,
"failed to set TX queue parameters for queue %d\n",
queue);
@@ -1924,6 +2073,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_vif_release_channel(sdata);
sdata->vif.csa_active = false;
+ ifmgd->csa_waiting_bcn = false;
if (sdata->csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -1931,6 +2081,10 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
}
mutex_unlock(&local->mtx);
+ /* existing TX TSPEC sessions no longer exist */
+ memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
+ cancel_delayed_work_sync(&ifmgd->tx_tspec_wk);
+
sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
}
@@ -1983,9 +2137,46 @@ out:
mutex_unlock(&local->mtx);
}
+static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_hdr *hdr,
+ u16 tx_time)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ u16 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ int ac = ieee80211_ac_from_tid(tid);
+ struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
+ unsigned long now = jiffies;
+
+ if (likely(!tx_tspec->admitted_time))
+ return;
+
+ if (time_after(now, tx_tspec->time_slice_start + HZ)) {
+ tx_tspec->consumed_tx_time = 0;
+ tx_tspec->time_slice_start = now;
+
+ if (tx_tspec->downgraded) {
+ tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE;
+ schedule_delayed_work(&ifmgd->tx_tspec_wk, 0);
+ }
+ }
+
+ if (tx_tspec->downgraded)
+ return;
+
+ tx_tspec->consumed_tx_time += tx_time;
+
+ if (tx_tspec->consumed_tx_time >= tx_tspec->admitted_time) {
+ tx_tspec->downgraded = true;
+ tx_tspec->action = TX_TSPEC_ACTION_DOWNGRADE;
+ schedule_delayed_work(&ifmgd->tx_tspec_wk, 0);
+ }
+}
+
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_hdr *hdr, bool ack)
+ struct ieee80211_hdr *hdr, bool ack, u16 tx_time)
{
+ ieee80211_sta_tx_wmm_ac_notify(sdata, hdr, tx_time);
+
if (!ieee80211_is_data(hdr->frame_control))
return;
@@ -2040,7 +2231,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
else
ssid_len = ssid[1];
- ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
+ ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
+ ssid + 2, ssid_len, NULL,
0, (u32) -1, true, 0,
ifmgd->associated->channel, false);
rcu_read_unlock();
@@ -2048,8 +2240,6 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
run_again(sdata, ifmgd->probe_timeout);
- if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
- ieee80211_flush_queues(sdata->local, sdata);
}
static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
@@ -2078,9 +2268,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
"detected beacon loss from AP (missed %d beacons) - probing\n",
beacon_loss_count);
- ieee80211_cqm_rssi_notify(&sdata->vif,
- NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
- GFP_KERNEL);
+ ieee80211_cqm_beacon_loss_notify(&sdata->vif, GFP_KERNEL);
}
/*
@@ -2145,7 +2333,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
else
ssid_len = ssid[1];
- skb = ieee80211_build_probe_req(sdata, cbss->bssid,
+ skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid,
(u32) -1, cbss->channel,
ssid + 2, ssid_len,
NULL, 0, true);
@@ -2172,6 +2360,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
true, frame_buf);
mutex_lock(&local->mtx);
sdata->vif.csa_active = false;
+ ifmgd->csa_waiting_bcn = false;
if (sdata->csa_block_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -2618,6 +2807,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
ifmgd->aid = aid;
+ ifmgd->tdls_chan_switch_prohibited =
+ elems.ext_capab && elems.ext_capab_len >= 5 &&
+ (elems.ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED);
/*
* Some APs are erroneously not including some information in their
@@ -3196,6 +3388,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
}
+ if (ifmgd->csa_waiting_bcn)
+ ieee80211_chswitch_post_beacon(sdata);
+
if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
return;
ifmgd->beacon_crc = ncrc;
@@ -3204,6 +3399,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
+ rx_status->device_timestamp,
&elems, true);
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) &&
@@ -3335,8 +3531,9 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
break;
ieee80211_sta_process_chanswitch(sdata,
- rx_status->mactime,
- &elems, false);
+ rx_status->mactime,
+ rx_status->device_timestamp,
+ &elems, false);
} else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) {
ies_len = skb->len -
offsetof(struct ieee80211_mgmt,
@@ -3357,8 +3554,9 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
&mgmt->u.action.u.ext_chan_switch.data;
ieee80211_sta_process_chanswitch(sdata,
- rx_status->mactime,
- &elems, false);
+ rx_status->mactime,
+ rx_status->device_timestamp,
+ &elems, false);
}
break;
}
@@ -3456,7 +3654,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
* Direct probe is sent to broadcast address as some APs
* will not answer to direct packet in unassociated state.
*/
- ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
+ ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
+ ssidie + 2, ssidie[1],
NULL, 0, (u32) -1, true, 0,
auth_data->bss->channel, false);
rcu_read_unlock();
@@ -3665,11 +3864,12 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
struct ieee80211_sub_if_data *sdata =
(struct ieee80211_sub_if_data *) data;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
if (local->quiescing)
return;
- if (sdata->vif.csa_active)
+ if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
return;
sdata->u.mgd.connection_loss = false;
@@ -3687,7 +3887,7 @@ static void ieee80211_sta_conn_mon_timer(unsigned long data)
if (local->quiescing)
return;
- if (sdata->vif.csa_active)
+ if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
return;
ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
@@ -3799,6 +3999,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
(unsigned long) sdata);
setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
(unsigned long) sdata);
+ INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk,
+ ieee80211_sta_handle_tspec_ac_params_wk);
ifmgd->flags = 0;
ifmgd->powersave = sdata->wdev.ps;
@@ -3810,6 +4012,11 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
else
ifmgd->req_smps = IEEE80211_SMPS_OFF;
+
+ /* Setup TDLS data */
+ spin_lock_init(&ifmgd->teardown_lock);
+ ifmgd->teardown_skb = NULL;
+ ifmgd->orig_teardown_skb = NULL;
}
/* scan finished notification */
@@ -4672,6 +4879,13 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
}
if (ifmgd->auth_data)
ieee80211_destroy_auth_data(sdata, false);
+ spin_lock_bh(&ifmgd->teardown_lock);
+ if (ifmgd->teardown_skb) {
+ kfree_skb(ifmgd->teardown_skb);
+ ifmgd->teardown_skb = NULL;
+ ifmgd->orig_teardown_skb = NULL;
+ }
+ spin_unlock_bh(&ifmgd->teardown_lock);
del_timer_sync(&ifmgd->timer);
sdata_unlock(sdata);
}
@@ -4687,3 +4901,13 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
}
EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
+
+void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ trace_api_cqm_beacon_loss_notify(sdata->local, sdata);
+
+ cfg80211_cqm_beacon_loss_notify(sdata->dev, gfp);
+}
+EXPORT_SYMBOL(ieee80211_cqm_beacon_loss_notify);
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
new file mode 100644
index 000000000000..358d5f9d8207
--- /dev/null
+++ b/net/mac80211/ocb.c
@@ -0,0 +1,250 @@
+/*
+ * OCB mode implementation
+ *
+ * Copyright: (c) 2014 Czech Technical University in Prague
+ * (c) 2014 Volkswagen Group Research
+ * Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
+ * Funded by: Volkswagen Group Research
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+
+#include "ieee80211_i.h"
+#include "driver-ops.h"
+#include "rate.h"
+
+#define IEEE80211_OCB_HOUSEKEEPING_INTERVAL (60 * HZ)
+#define IEEE80211_OCB_PEER_INACTIVITY_LIMIT (240 * HZ)
+#define IEEE80211_OCB_MAX_STA_ENTRIES 128
+
+/**
+ * enum ocb_deferred_task_flags - mac80211 OCB deferred tasks
+ * @OCB_WORK_HOUSEKEEPING: run the periodic OCB housekeeping tasks
+ *
+ * These flags are used in @wrkq_flags field of &struct ieee80211_if_ocb
+ */
+enum ocb_deferred_task_flags {
+ OCB_WORK_HOUSEKEEPING,
+};
+
+void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const u8 *addr,
+ u32 supp_rates)
+{
+ struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_supported_band *sband;
+ enum nl80211_bss_scan_width scan_width;
+ struct sta_info *sta;
+ int band;
+
+ /* XXX: Consider removing the least recently used entry and
+ * allow new one to be added.
+ */
+ if (local->num_sta >= IEEE80211_OCB_MAX_STA_ENTRIES) {
+ net_info_ratelimited("%s: No room for a new OCB STA entry %pM\n",
+ sdata->name, addr);
+ return;
+ }
+
+ ocb_dbg(sdata, "Adding new OCB station %pM\n", addr);
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (WARN_ON_ONCE(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+ band = chanctx_conf->def.chan->band;
+ scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
+ rcu_read_unlock();
+
+ sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
+ if (!sta)
+ return;
+
+ sta->last_rx = jiffies;
+
+ /* Add only mandatory rates for now */
+ sband = local->hw.wiphy->bands[band];
+ sta->sta.supp_rates[band] =
+ ieee80211_mandatory_rates(sband, scan_width);
+
+ spin_lock(&ifocb->incomplete_lock);
+ list_add(&sta->list, &ifocb->incomplete_stations);
+ spin_unlock(&ifocb->incomplete_lock);
+ ieee80211_queue_work(&local->hw, &sdata->work);
+}
+
+static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta)
+ __acquires(RCU)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u8 addr[ETH_ALEN];
+
+ memcpy(addr, sta->sta.addr, ETH_ALEN);
+
+ ocb_dbg(sdata, "Adding new IBSS station %pM (dev=%s)\n",
+ addr, sdata->name);
+
+ sta_info_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
+ rate_control_rate_init(sta);
+
+ /* If it fails, maybe we raced another insertion? */
+ if (sta_info_insert_rcu(sta))
+ return sta_info_get(sdata, addr);
+ return sta;
+}
+
+static void ieee80211_ocb_housekeeping(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
+
+ ocb_dbg(sdata, "Running ocb housekeeping\n");
+
+ ieee80211_sta_expire(sdata, IEEE80211_OCB_PEER_INACTIVITY_LIMIT);
+
+ mod_timer(&ifocb->housekeeping_timer,
+ round_jiffies(jiffies + IEEE80211_OCB_HOUSEKEEPING_INTERVAL));
+}
+
+void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
+ struct sta_info *sta;
+
+ if (ifocb->joined != true)
+ return;
+
+ sdata_lock(sdata);
+
+ spin_lock_bh(&ifocb->incomplete_lock);
+ while (!list_empty(&ifocb->incomplete_stations)) {
+ sta = list_first_entry(&ifocb->incomplete_stations,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_bh(&ifocb->incomplete_lock);
+
+ ieee80211_ocb_finish_sta(sta);
+ rcu_read_unlock();
+ spin_lock_bh(&ifocb->incomplete_lock);
+ }
+ spin_unlock_bh(&ifocb->incomplete_lock);
+
+ if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags))
+ ieee80211_ocb_housekeeping(sdata);
+
+ sdata_unlock(sdata);
+}
+
+static void ieee80211_ocb_housekeeping_timer(unsigned long data)
+{
+ struct ieee80211_sub_if_data *sdata = (void *)data;
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
+
+ set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags);
+
+ ieee80211_queue_work(&local->hw, &sdata->work);
+}
+
+void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
+
+ setup_timer(&ifocb->housekeeping_timer,
+ ieee80211_ocb_housekeeping_timer,
+ (unsigned long)sdata);
+ INIT_LIST_HEAD(&ifocb->incomplete_stations);
+ spin_lock_init(&ifocb->incomplete_lock);
+}
+
+int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
+ struct ocb_setup *setup)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
+ u32 changed = BSS_CHANGED_OCB;
+ int err;
+
+ if (ifocb->joined == true)
+ return -EINVAL;
+
+ sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
+ sdata->smps_mode = IEEE80211_SMPS_OFF;
+ sdata->needed_rx_chains = sdata->local->rx_chains;
+
+ mutex_lock(&sdata->local->mtx);
+ err = ieee80211_vif_use_channel(sdata, &setup->chandef,
+ IEEE80211_CHANCTX_SHARED);
+ mutex_unlock(&sdata->local->mtx);
+ if (err)
+ return err;
+
+ ieee80211_bss_info_change_notify(sdata, changed);
+
+ ifocb->joined = true;
+
+ set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags);
+ ieee80211_queue_work(&local->hw, &sdata->work);
+
+ netif_carrier_on(sdata->dev);
+ return 0;
+}
+
+int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ ifocb->joined = false;
+ sta_info_flush(sdata);
+
+ spin_lock_bh(&ifocb->incomplete_lock);
+ while (!list_empty(&ifocb->incomplete_stations)) {
+ sta = list_first_entry(&ifocb->incomplete_stations,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_bh(&ifocb->incomplete_lock);
+
+ sta_info_free(local, sta);
+ spin_lock_bh(&ifocb->incomplete_lock);
+ }
+ spin_unlock_bh(&ifocb->incomplete_lock);
+
+ netif_carrier_off(sdata->dev);
+ clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB);
+
+ mutex_lock(&sdata->local->mtx);
+ ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&sdata->local->mtx);
+
+ skb_queue_purge(&sdata->skb_queue);
+
+ del_timer_sync(&sdata->u.ocb.housekeeping_timer);
+ /* If the timer fired while we waited for it, it will have
+ * requeued the work. Now the work will be running again
+ * but will not rearm the timer again because it checks
+ * whether we are connected to the network or not -- at this
+ * point we shouldn't be anymore.
+ */
+
+ return 0;
+}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 6081329784dd..d53355b011f5 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -385,7 +385,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
*rate = alt_rate;
return;
}
- } else {
+ } else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) {
/* handle legacy rates */
if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask))
return;
@@ -446,7 +446,8 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
*
* XXX: Should this check all retry rates?
*/
- if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ if (!(rates[0].flags &
+ (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) {
u32 basic_rates = vif->bss_conf.basic_rates;
s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
@@ -696,6 +697,7 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta,
struct ieee80211_sta_rates *rates)
{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sta_rates *old;
/*
@@ -709,6 +711,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
if (old)
kfree_rcu(old, rcu_head);
+ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+
return 0;
}
EXPORT_SYMBOL(rate_control_set_rates);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 18babe302832..38652f09feaf 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -37,13 +37,35 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
return;
- ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
+ if (ref->ops->tx_status)
+ ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
+ else
+ ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info);
}
+static inline void
+rate_control_tx_status_noskb(struct ieee80211_local *local,
+ struct ieee80211_supported_band *sband,
+ struct sta_info *sta,
+ struct ieee80211_tx_info *info)
+{
+ struct rate_control_ref *ref = local->rate_ctrl;
+ struct ieee80211_sta *ista = &sta->sta;
+ void *priv_sta = sta->rate_ctrl_priv;
+
+ if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
+ return;
+
+ if (WARN_ON_ONCE(!ref->ops->tx_status_noskb))
+ return;
+
+ ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info);
+}
static inline void rate_control_rate_init(struct sta_info *sta)
{
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 2baa7ed8789d..d51f6b1c549b 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -191,7 +191,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
* (1) if any success probabilitiy >= 95%, out of those rates
* choose the maximum throughput rate as max_prob_rate
* (2) if all success probabilities < 95%, the rate with
- * highest success probability is choosen as max_prob_rate */
+ * highest success probability is chosen as max_prob_rate */
if (mrs->probability >= MINSTREL_FRAC(95, 100)) {
if (mrs->cur_tp >= mi->r[tmp_prob_rate].stats.cur_tp)
tmp_prob_rate = i;
@@ -223,11 +223,10 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
static void
minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
+ struct ieee80211_tx_info *info)
{
struct minstrel_priv *mp = priv;
struct minstrel_sta_info *mi = priv_sta;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *ar = info->status.rates;
int i, ndx;
int success;
@@ -674,7 +673,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
const struct rate_control_ops mac80211_minstrel = {
.name = "minstrel",
- .tx_status = minstrel_tx_status,
+ .tx_status_noskb = minstrel_tx_status,
.get_rate = minstrel_get_rate,
.rate_init = minstrel_rate_init,
.alloc = minstrel_alloc,
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 408fd8ab4eef..80452cfd2dc5 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -10,6 +10,7 @@
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
+#include <linux/moduleparam.h>
#include <linux/ieee80211.h>
#include <net/mac80211.h>
#include "rate.h"
@@ -34,12 +35,17 @@
/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
+#define BW_20 0
+#define BW_40 1
+#define BW_80 2
+
/*
* Define group sort order: HT40 -> SGI -> #streams
*/
#define GROUP_IDX(_streams, _sgi, _ht40) \
+ MINSTREL_HT_GROUP_0 + \
MINSTREL_MAX_STREAMS * 2 * _ht40 + \
- MINSTREL_MAX_STREAMS * _sgi + \
+ MINSTREL_MAX_STREAMS * _sgi + \
_streams - 1
/* MCS rate information for an MCS group */
@@ -47,6 +53,7 @@
[GROUP_IDX(_streams, _sgi, _ht40)] = { \
.streams = _streams, \
.flags = \
+ IEEE80211_TX_RC_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
@@ -61,6 +68,47 @@
} \
}
+#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
+ (MINSTREL_VHT_GROUP_0 + \
+ MINSTREL_MAX_STREAMS * 2 * (_bw) + \
+ MINSTREL_MAX_STREAMS * (_sgi) + \
+ (_streams) - 1)
+
+#define BW2VBPS(_bw, r3, r2, r1) \
+ (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
+
+#define VHT_GROUP(_streams, _sgi, _bw) \
+ [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
+ .streams = _streams, \
+ .flags = \
+ IEEE80211_TX_RC_VHT_MCS | \
+ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
+ (_bw == BW_80 ? IEEE80211_TX_RC_80_MHZ_WIDTH : \
+ _bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
+ .duration = { \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 117, 54, 26)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 234, 108, 52)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 351, 162, 78)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 468, 216, 104)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 702, 324, 156)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 936, 432, 208)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1053, 486, 234)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1170, 540, 260)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1404, 648, 312)), \
+ MCS_DURATION(_streams, _sgi, \
+ BW2VBPS(_bw, 1560, 720, 346)) \
+ } \
+}
+
#define CCK_DURATION(_bitrate, _short, _len) \
(1000 * (10 /* SIFS */ + \
(_short ? 72 + 24 : 144 + 48) + \
@@ -76,53 +124,96 @@
CCK_ACK_DURATION(55, _short), \
CCK_ACK_DURATION(110, _short)
-#define CCK_GROUP \
- [MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS] = { \
- .streams = 0, \
- .duration = { \
- CCK_DURATION_LIST(false), \
- CCK_DURATION_LIST(true) \
- } \
+#define CCK_GROUP \
+ [MINSTREL_CCK_GROUP] = { \
+ .streams = 0, \
+ .flags = 0, \
+ .duration = { \
+ CCK_DURATION_LIST(false), \
+ CCK_DURATION_LIST(true) \
+ } \
}
+#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
+static bool minstrel_vht_only = true;
+module_param(minstrel_vht_only, bool, 0644);
+MODULE_PARM_DESC(minstrel_vht_only,
+ "Use only VHT rates when VHT is supported by sta.");
+#endif
+
/*
* To enable sufficiently targeted rate sampling, MCS rates are divided into
* groups, based on the number of streams and flags (HT40, SGI) that they
* use.
*
* Sortorder has to be fixed for GROUP_IDX macro to be applicable:
- * HT40 -> SGI -> #streams
+ * BW -> SGI -> #streams
*/
const struct mcs_group minstrel_mcs_groups[] = {
- MCS_GROUP(1, 0, 0),
- MCS_GROUP(2, 0, 0),
+ MCS_GROUP(1, 0, BW_20),
+ MCS_GROUP(2, 0, BW_20),
#if MINSTREL_MAX_STREAMS >= 3
- MCS_GROUP(3, 0, 0),
+ MCS_GROUP(3, 0, BW_20),
#endif
- MCS_GROUP(1, 1, 0),
- MCS_GROUP(2, 1, 0),
+ MCS_GROUP(1, 1, BW_20),
+ MCS_GROUP(2, 1, BW_20),
#if MINSTREL_MAX_STREAMS >= 3
- MCS_GROUP(3, 1, 0),
+ MCS_GROUP(3, 1, BW_20),
#endif
- MCS_GROUP(1, 0, 1),
- MCS_GROUP(2, 0, 1),
+ MCS_GROUP(1, 0, BW_40),
+ MCS_GROUP(2, 0, BW_40),
#if MINSTREL_MAX_STREAMS >= 3
- MCS_GROUP(3, 0, 1),
+ MCS_GROUP(3, 0, BW_40),
#endif
- MCS_GROUP(1, 1, 1),
- MCS_GROUP(2, 1, 1),
+ MCS_GROUP(1, 1, BW_40),
+ MCS_GROUP(2, 1, BW_40),
#if MINSTREL_MAX_STREAMS >= 3
- MCS_GROUP(3, 1, 1),
+ MCS_GROUP(3, 1, BW_40),
#endif
- /* must be last */
- CCK_GROUP
-};
+ CCK_GROUP,
+
+#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
+ VHT_GROUP(1, 0, BW_20),
+ VHT_GROUP(2, 0, BW_20),
+#if MINSTREL_MAX_STREAMS >= 3
+ VHT_GROUP(3, 0, BW_20),
+#endif
-#define MINSTREL_CCK_GROUP (ARRAY_SIZE(minstrel_mcs_groups) - 1)
+ VHT_GROUP(1, 1, BW_20),
+ VHT_GROUP(2, 1, BW_20),
+#if MINSTREL_MAX_STREAMS >= 3
+ VHT_GROUP(3, 1, BW_20),
+#endif
+
+ VHT_GROUP(1, 0, BW_40),
+ VHT_GROUP(2, 0, BW_40),
+#if MINSTREL_MAX_STREAMS >= 3
+ VHT_GROUP(3, 0, BW_40),
+#endif
+
+ VHT_GROUP(1, 1, BW_40),
+ VHT_GROUP(2, 1, BW_40),
+#if MINSTREL_MAX_STREAMS >= 3
+ VHT_GROUP(3, 1, BW_40),
+#endif
+
+ VHT_GROUP(1, 0, BW_80),
+ VHT_GROUP(2, 0, BW_80),
+#if MINSTREL_MAX_STREAMS >= 3
+ VHT_GROUP(3, 0, BW_80),
+#endif
+
+ VHT_GROUP(1, 1, BW_80),
+ VHT_GROUP(2, 1, BW_80),
+#if MINSTREL_MAX_STREAMS >= 3
+ VHT_GROUP(3, 1, BW_80),
+#endif
+#endif
+};
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
@@ -130,16 +221,64 @@ static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
/*
+ * Some VHT MCSes are invalid (when Ndbps / Nes is not an integer)
+ * e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1
+ *
+ * Returns the valid mcs map for struct minstrel_mcs_group_data.supported
+ */
+static u16
+minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map)
+{
+ u16 mask = 0;
+
+ if (bw == BW_20) {
+ if (nss != 3 && nss != 6)
+ mask = BIT(9);
+ } else if (bw == BW_80) {
+ if (nss == 3 || nss == 7)
+ mask = BIT(6);
+ else if (nss == 6)
+ mask = BIT(9);
+ } else {
+ WARN_ON(bw != BW_40);
+ }
+
+ switch ((le16_to_cpu(mcs_map) >> (2 * (nss - 1))) & 3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ mask |= 0x300;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ mask |= 0x200;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ break;
+ default:
+ mask = 0x3ff;
+ }
+
+ return 0x3ff & ~mask;
+}
+
+/*
* Look up an MCS group index based on mac80211 rate information
*/
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
- return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
+ return GROUP_IDX((rate->idx / 8) + 1,
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
+static int
+minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate)
+{
+ return VHT_GROUP_IDX(ieee80211_rate_get_vht_nss(rate),
+ !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
+ !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) +
+ 2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH));
+}
+
static struct minstrel_rate_stats *
minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate)
@@ -149,6 +288,9 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (rate->flags & IEEE80211_TX_RC_MCS) {
group = minstrel_ht_get_group_idx(rate);
idx = rate->idx % 8;
+ } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ group = minstrel_vht_get_group_idx(rate);
+ idx = ieee80211_rate_get_vht_mcs(rate);
} else {
group = MINSTREL_CCK_GROUP;
@@ -240,8 +382,8 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
* MCS groups, CCK rates do not provide aggregation and are therefore at last.
*/
static void
-minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
- u8 *tp_list)
+minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
+ u16 *tp_list)
{
int cur_group, cur_idx, cur_thr, cur_prob;
int tmp_group, tmp_idx, tmp_thr, tmp_prob;
@@ -275,7 +417,7 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
* Find and set the topmost probability rate per sta and per group
*/
static void
-minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index)
+minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
{
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mr;
@@ -318,8 +460,8 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index)
*/
static void
minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
- u8 tmp_mcs_tp_rate[MAX_THR_RATES],
- u8 tmp_cck_tp_rate[MAX_THR_RATES])
+ u16 tmp_mcs_tp_rate[MAX_THR_RATES],
+ u16 tmp_cck_tp_rate[MAX_THR_RATES])
{
unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp;
int i;
@@ -383,8 +525,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mr;
int group, i, j;
- u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
- u8 tmp_cck_tp_rate[MAX_THR_RATES], index;
+ u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
+ u16 tmp_cck_tp_rate[MAX_THR_RATES], index;
if (mi->ampdu_packets > 0) {
mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
@@ -482,7 +624,8 @@ minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rat
if (!rate->count)
return false;
- if (rate->flags & IEEE80211_TX_RC_MCS)
+ if (rate->flags & IEEE80211_TX_RC_MCS ||
+ rate->flags & IEEE80211_TX_RC_VHT_MCS)
return true;
return rate->idx == mp->cck_rates[0] ||
@@ -514,7 +657,7 @@ minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
}
static void
-minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary)
+minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
{
int group, orig_group;
@@ -544,6 +687,9 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
u16 tid;
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
return;
@@ -554,20 +700,16 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
if (likely(sta->ampdu_mlme.tid_tx[tid]))
return;
- if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
- return;
-
ieee80211_start_tx_ba_session(pubsta, tid, 5000);
}
static void
minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
+ struct ieee80211_tx_info *info)
{
struct minstrel_ht_sta_priv *msp = priv_sta;
struct minstrel_ht_sta *mi = &msp->ht;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *ar = info->status.rates;
struct minstrel_rate_stats *rate, *rate2;
struct minstrel_priv *mp = priv;
@@ -575,7 +717,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
int i;
if (!msp->is_ht)
- return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);
+ return mac80211_minstrel.tx_status_noskb(priv, sband, sta,
+ &msp->legacy, info);
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
@@ -636,9 +779,6 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
update = true;
minstrel_ht_update_stats(mp, mi);
- if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
- mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
- minstrel_aggr_check(sta, skb);
}
if (update)
@@ -711,7 +851,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
struct minstrel_rate_stats *mr;
u8 idx;
- u16 flags;
+ u16 flags = group->flags;
mr = minstrel_get_ratestats(mi, index);
if (!mr->retry_updated)
@@ -727,13 +867,13 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
ratetbl->rate[offset].count_rts = mr->retry_count_rtscts;
}
- if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
+ if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP)
idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
- flags = 0;
- } else {
+ else if (flags & IEEE80211_TX_RC_VHT_MCS)
+ idx = ((group->streams - 1) << 4) |
+ ((index % MCS_GROUP_RATES) & 0xF);
+ else
idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
- flags = IEEE80211_TX_RC_MCS | group->flags;
- }
if (offset > 0) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
@@ -880,6 +1020,10 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
if (!msp->is_ht)
return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
+ minstrel_aggr_check(sta, txrc->skb);
+
info->flags |= mi->tx_flags;
minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
@@ -913,13 +1057,15 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
rate->idx = mp->cck_rates[idx];
- rate->flags = 0;
- return;
+ } else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
+ ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES,
+ sample_group->streams);
+ } else {
+ rate->idx = sample_idx % MCS_GROUP_RATES +
+ (sample_group->streams - 1) * 8;
}
- rate->idx = sample_idx % MCS_GROUP_RATES +
- (sample_group->streams - 1) * 8;
- rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags;
+ rate->flags = sample_group->flags;
}
static void
@@ -959,6 +1105,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
struct minstrel_ht_sta *mi = &msp->ht;
struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
u16 sta_cap = sta->ht_cap.cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ int use_vht;
int n_supported = 0;
int ack_dur;
int stbc;
@@ -968,8 +1116,14 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
if (!sta->ht_cap.ht_supported)
goto use_legacy;
- BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
- MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1);
+ BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB);
+
+#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
+ if (vht_cap->vht_supported)
+ use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0);
+ else
+#endif
+ use_vht = 0;
msp->is_ht = true;
memset(mi, 0, sizeof(*mi));
@@ -994,22 +1148,28 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
}
mi->sample_tries = 4;
- stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
- IEEE80211_HT_CAP_RX_STBC_SHIFT;
- mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
+ /* TODO tx_flags for vht - ATM the RC API is not fine-grained enough */
+ if (!use_vht) {
+ stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
+ IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
- if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
- mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
+ if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
+ mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
+ }
for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
+ u32 gflags = minstrel_mcs_groups[i].flags;
+ int bw, nss;
+
mi->groups[i].supported = 0;
if (i == MINSTREL_CCK_GROUP) {
minstrel_ht_update_cck(mp, mi, sband, sta);
continue;
}
- if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
- if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ if (gflags & IEEE80211_TX_RC_SHORT_GI) {
+ if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
continue;
} else {
@@ -1018,17 +1178,51 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
}
}
- if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
+ if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
sta->bandwidth < IEEE80211_STA_RX_BW_40)
continue;
+ nss = minstrel_mcs_groups[i].streams;
+
/* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC &&
- minstrel_mcs_groups[i].streams > 1)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC && nss > 1)
+ continue;
+
+ /* HT rate */
+ if (gflags & IEEE80211_TX_RC_MCS) {
+#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
+ if (use_vht && minstrel_vht_only)
+ continue;
+#endif
+ mi->groups[i].supported = mcs->rx_mask[nss - 1];
+ if (mi->groups[i].supported)
+ n_supported++;
continue;
+ }
+
+ /* VHT rate */
+ if (!vht_cap->vht_supported ||
+ WARN_ON(!(gflags & IEEE80211_TX_RC_VHT_MCS)) ||
+ WARN_ON(gflags & IEEE80211_TX_RC_160_MHZ_WIDTH))
+ continue;
+
+ if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) {
+ if (sta->bandwidth < IEEE80211_STA_RX_BW_80 ||
+ ((gflags & IEEE80211_TX_RC_SHORT_GI) &&
+ !(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) {
+ continue;
+ }
+ }
+
+ if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = BW_40;
+ else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = BW_80;
+ else
+ bw = BW_20;
- mi->groups[i].supported =
- mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
+ mi->groups[i].supported = minstrel_get_valid_vht_rates(bw, nss,
+ vht_cap->vht_mcs.tx_mcs_map);
if (mi->groups[i].supported)
n_supported++;
@@ -1146,7 +1340,7 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
static const struct rate_control_ops mac80211_minstrel_ht = {
.name = "minstrel_ht",
- .tx_status = minstrel_ht_tx_status,
+ .tx_status_noskb = minstrel_ht_tx_status,
.get_rate = minstrel_ht_get_rate,
.rate_init = minstrel_ht_rate_init,
.rate_update = minstrel_ht_rate_update,
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 01570e0e014b..f2217d6aa0c2 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -13,10 +13,32 @@
* The number of streams can be changed to 2 to reduce code
* size and memory footprint.
*/
-#define MINSTREL_MAX_STREAMS 3
-#define MINSTREL_STREAM_GROUPS 4
+#define MINSTREL_MAX_STREAMS 3
+#define MINSTREL_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
+#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
+#define MINSTREL_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
+#else
+#define MINSTREL_VHT_STREAM_GROUPS 0
+#endif
-#define MCS_GROUP_RATES 8
+#define MINSTREL_HT_GROUPS_NB (MINSTREL_MAX_STREAMS * \
+ MINSTREL_HT_STREAM_GROUPS)
+#define MINSTREL_VHT_GROUPS_NB (MINSTREL_MAX_STREAMS * \
+ MINSTREL_VHT_STREAM_GROUPS)
+#define MINSTREL_CCK_GROUPS_NB 1
+#define MINSTREL_GROUPS_NB (MINSTREL_HT_GROUPS_NB + \
+ MINSTREL_VHT_GROUPS_NB + \
+ MINSTREL_CCK_GROUPS_NB)
+
+#define MINSTREL_HT_GROUP_0 0
+#define MINSTREL_CCK_GROUP (MINSTREL_HT_GROUP_0 + MINSTREL_HT_GROUPS_NB)
+#define MINSTREL_VHT_GROUP_0 (MINSTREL_CCK_GROUP + 1)
+
+#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
+#define MCS_GROUP_RATES 10
+#else
+#define MCS_GROUP_RATES 8
+#endif
struct mcs_group {
u32 flags;
@@ -31,11 +53,11 @@ struct minstrel_mcs_group_data {
u8 column;
/* bitfield of supported MCS rates of this group */
- u8 supported;
+ u16 supported;
/* sorted rate set within a MCS group*/
- u8 max_group_tp_rate[MAX_THR_RATES];
- u8 max_group_prob_rate;
+ u16 max_group_tp_rate[MAX_THR_RATES];
+ u16 max_group_prob_rate;
/* MCS rate statistics */
struct minstrel_rate_stats rates[MCS_GROUP_RATES];
@@ -52,8 +74,8 @@ struct minstrel_ht_sta {
unsigned int avg_ampdu_len;
/* overall sorted rate set */
- u8 max_tp_rate[MAX_THR_RATES];
- u8 max_prob_rate;
+ u16 max_tp_rate[MAX_THR_RATES];
+ u16 max_prob_rate;
/* time of last status update */
unsigned long stats_update;
@@ -80,7 +102,7 @@ struct minstrel_ht_sta {
u8 cck_supported_short;
/* MCS rate group info and statistics */
- struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1];
+ struct minstrel_mcs_group_data groups[MINSTREL_GROUPS_NB];
};
struct minstrel_ht_sta_priv {
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index d537bec93754..20c676b8e5b6 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -18,19 +18,23 @@
static char *
minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
{
- unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS;
const struct mcs_group *mg;
unsigned int j, tp, prob, eprob;
char htmode = '2';
char gimode = 'L';
+ u32 gflags;
if (!mi->groups[i].supported)
return p;
mg = &minstrel_mcs_groups[i];
- if (mg->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ gflags = mg->flags;
+
+ if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
htmode = '4';
- if (mg->flags & IEEE80211_TX_RC_SHORT_GI)
+ else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ htmode = '8';
+ if (gflags & IEEE80211_TX_RC_SHORT_GI)
gimode = 'S';
for (j = 0; j < MCS_GROUP_RATES; j++) {
@@ -41,10 +45,12 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
if (!(mi->groups[i].supported & BIT(j)))
continue;
- if (i == max_mcs)
- p += sprintf(p, "CCK/%cP ", j < 4 ? 'L' : 'S');
+ if (gflags & IEEE80211_TX_RC_MCS)
+ p += sprintf(p, " HT%c0/%cGI ", htmode, gimode);
+ else if (gflags & IEEE80211_TX_RC_VHT_MCS)
+ p += sprintf(p, "VHT%c0/%cGI ", htmode, gimode);
else
- p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
+ p += sprintf(p, " CCK/%cP ", j < 4 ? 'L' : 'S');
*(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' ';
*(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' ';
@@ -52,11 +58,14 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
*(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' ';
*(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
- if (i == max_mcs) {
- int r = bitrates[j % 4];
- p += sprintf(p, " %2u.%1uM", r / 10, r % 10);
+ if (gflags & IEEE80211_TX_RC_MCS) {
+ p += sprintf(p, " MCS%-2u ", (mg->streams - 1) * 8 + j);
+ } else if (gflags & IEEE80211_TX_RC_VHT_MCS) {
+ p += sprintf(p, " MCS%-1u/%1u", j, mg->streams);
} else {
- p += sprintf(p, " MCS%-2u", (mg->streams - 1) * 8 + j);
+ int r = bitrates[j % 4];
+
+ p += sprintf(p, " %2u.%1uM ", r / 10, r % 10);
}
tp = mr->cur_tp / 10;
@@ -85,7 +94,6 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
struct minstrel_ht_sta *mi = &msp->ht;
struct minstrel_debugfs_info *ms;
unsigned int i;
- unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS;
char *p;
int ret;
@@ -96,18 +104,19 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
return ret;
}
- ms = kmalloc(8192, GFP_KERNEL);
+ ms = kmalloc(32768, GFP_KERNEL);
if (!ms)
return -ENOMEM;
file->private_data = ms;
p = ms->buf;
- p += sprintf(p, "type rate tpt eprob *prob "
+ p += sprintf(p, " type rate tpt eprob *prob "
"ret *ok(*cum) ok( cum)\n");
-
- p = minstrel_ht_stats_dump(mi, max_mcs, p);
- for (i = 0; i < max_mcs; i++)
+ p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
+ for (i = 0; i < MINSTREL_CCK_GROUP; i++)
+ p = minstrel_ht_stats_dump(mi, i, p);
+ for (i++; i < ARRAY_SIZE(mi->groups); i++)
p = minstrel_ht_stats_dump(mi, i, p);
p += sprintf(p, "\nTotal packet count:: ideal %d "
@@ -119,7 +128,7 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
ms->len = p - ms->buf;
- WARN_ON(ms->len + sizeof(*ms) > 8192);
+ WARN_ON(ms->len + sizeof(*ms) > 32768);
return nonseekable_open(inode, file);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a37f9af634cb..683b10f46505 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -39,7 +39,8 @@
* only useful for monitoring.
*/
static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int rtap_vendor_space)
{
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
if (likely(skb->len > FCS_LEN))
@@ -52,20 +53,25 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
}
}
+ __pskb_pull(skb, rtap_vendor_space);
+
return skb;
}
-static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len)
+static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
+ unsigned int rtap_vendor_space)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_hdr *hdr;
+
+ hdr = (void *)(skb->data + rtap_vendor_space);
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_FAILED_PLCP_CRC |
RX_FLAG_AMPDU_IS_ZEROLEN))
return true;
- if (unlikely(skb->len < 16 + present_fcs_len))
+ if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
return true;
if (ieee80211_is_ctl(hdr->frame_control) &&
@@ -77,8 +83,9 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len)
}
static int
-ieee80211_rx_radiotap_space(struct ieee80211_local *local,
- struct ieee80211_rx_status *status)
+ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
+ struct ieee80211_rx_status *status,
+ struct sk_buff *skb)
{
int len;
@@ -121,6 +128,21 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
len += 2 * hweight8(status->chains);
}
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
+ struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
+
+ /* vendor presence bitmap */
+ len += 4;
+ /* alignment for fixed 6-byte vendor data header */
+ len = ALIGN(len, 2);
+ /* vendor data header */
+ len += 6;
+ if (WARN_ON(rtap->align == 0))
+ rtap->align = 1;
+ len = ALIGN(len, rtap->align);
+ len += rtap->len + rtap->pad;
+ }
+
return len;
}
@@ -144,13 +166,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
u16 channel_flags = 0;
int mpdulen, chain;
unsigned long chains = status->chains;
+ struct ieee80211_vendor_radiotap rtap = {};
+
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
+ rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
+ /* rtap.len and rtap.pad are undone immediately */
+ skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
+ }
mpdulen = skb->len;
if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
mpdulen += FCS_LEN;
rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
- memset(rthdr, 0, rtap_len);
+ memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
it_present = &rthdr->it_present;
/* radiotap header, set always present flags */
@@ -172,6 +201,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
}
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
+ it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
+ BIT(IEEE80211_RADIOTAP_EXT);
+ put_unaligned_le32(it_present_val, it_present);
+ it_present++;
+ it_present_val = rtap.present;
+ }
+
put_unaligned_le32(it_present_val, it_present);
pos = (void *)(it_present + 1);
@@ -366,6 +403,22 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*pos++ = status->chain_signal[chain];
*pos++ = chain;
}
+
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
+ /* ensure 2 byte alignment for the vendor field as required */
+ if ((pos - (u8 *)rthdr) & 1)
+ *pos++ = 0;
+ *pos++ = rtap.oui[0];
+ *pos++ = rtap.oui[1];
+ *pos++ = rtap.oui[2];
+ *pos++ = rtap.subns;
+ put_unaligned_le16(rtap.len, pos);
+ pos += 2;
+ /* align the actual payload as requested */
+ while ((pos - (u8 *)rthdr) & (rtap.align - 1))
+ *pos++ = 0;
+ /* data (and possible padding) already follows */
+ }
}
/*
@@ -379,10 +432,17 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
struct ieee80211_sub_if_data *sdata;
- int needed_headroom;
+ int rt_hdrlen, needed_headroom;
struct sk_buff *skb, *skb2;
struct net_device *prev_dev = NULL;
int present_fcs_len = 0;
+ unsigned int rtap_vendor_space = 0;
+
+ if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
+ struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
+
+ rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad;
+ }
/*
* First, we may need to make a copy of the skb because
@@ -396,25 +456,27 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
present_fcs_len = FCS_LEN;
- /* ensure hdr->frame_control is in skb head */
- if (!pskb_may_pull(origskb, 2)) {
+ /* ensure hdr->frame_control and vendor radiotap data are in skb head */
+ if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) {
dev_kfree_skb(origskb);
return NULL;
}
if (!local->monitors) {
- if (should_drop_frame(origskb, present_fcs_len)) {
+ if (should_drop_frame(origskb, present_fcs_len,
+ rtap_vendor_space)) {
dev_kfree_skb(origskb);
return NULL;
}
- return remove_monitor_info(local, origskb);
+ return remove_monitor_info(local, origskb, rtap_vendor_space);
}
/* room for the radiotap header based on driver features */
- needed_headroom = ieee80211_rx_radiotap_space(local, status);
+ rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
+ needed_headroom = rt_hdrlen - rtap_vendor_space;
- if (should_drop_frame(origskb, present_fcs_len)) {
+ if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) {
/* only need to expand headroom if necessary */
skb = origskb;
origskb = NULL;
@@ -438,15 +500,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
*/
skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
- origskb = remove_monitor_info(local, origskb);
+ origskb = remove_monitor_info(local, origskb,
+ rtap_vendor_space);
if (!skb)
return origskb;
}
/* prepend radiotap information */
- ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
- true);
+ ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -985,7 +1047,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
}
static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
+ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
@@ -994,10 +1056,16 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
* Drop duplicate 802.11 retransmissions
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
*/
- if (rx->skb->len >= 24 && rx->sta &&
- !ieee80211_is_ctl(hdr->frame_control) &&
- !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
- !is_multicast_ether_addr(hdr->addr1)) {
+
+ if (rx->skb->len < 24)
+ return RX_CONTINUE;
+
+ if (ieee80211_is_ctl(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return RX_CONTINUE;
+
+ if (rx->sta) {
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] ==
hdr->seq_ctrl)) {
@@ -1011,6 +1079,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
}
}
+ return RX_CONTINUE;
+}
+
+static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+
if (unlikely(rx->skb->len < 16)) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
return RX_DROP_MONITOR;
@@ -1032,6 +1108,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
ieee80211_is_pspoll(hdr->frame_control)) &&
rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
+ rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
/*
* accept port control frames from the AP even when it's not
@@ -1272,6 +1349,12 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->last_rx_rate_vht_nss = status->vht_nss;
}
}
+ } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
+ u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
+ NL80211_IFTYPE_OCB);
+ /* OCB uses wild-card BSSID */
+ if (is_broadcast_ether_addr(bssid))
+ sta->last_rx = jiffies;
} else if (!is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
@@ -1678,14 +1761,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
- if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
- goto out;
-
if (is_multicast_ether_addr(hdr->addr1)) {
rx->local->dot11MulticastReceivedFrameCount++;
- goto out;
+ goto out_no_led;
}
+ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+ goto out;
+
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@@ -1776,9 +1859,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
+ ieee80211_led_rx(rx->local);
+ out_no_led:
if (rx->sta)
rx->sta->rx_packets++;
- ieee80211_led_rx(rx->local);
return RX_CONTINUE;
}
@@ -2250,6 +2334,27 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
if (!ieee80211_frame_allowed(rx, fc))
return RX_DROP_MONITOR;
+ /* directly handle TDLS channel switch requests/responses */
+ if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
+ cpu_to_be16(ETH_P_TDLS))) {
+ struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
+
+ if (pskb_may_pull(rx->skb,
+ offsetof(struct ieee80211_tdls_data, u)) &&
+ tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
+ tf->category == WLAN_CATEGORY_TDLS &&
+ (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
+ tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
+ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW;
+ skb_queue_tail(&sdata->skb_queue, rx->skb);
+ ieee80211_queue_work(&rx->local->hw, &sdata->work);
+ if (rx->sta)
+ rx->sta->rx_packets++;
+
+ return RX_QUEUED;
+ }
+ }
+
if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
unlikely(port_control) && sdata->bss) {
sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
@@ -2820,6 +2925,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
if (!ieee80211_vif_is_mesh(&sdata->vif) &&
sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+ sdata->vif.type != NL80211_IFTYPE_OCB &&
sdata->vif.type != NL80211_IFTYPE_STATION)
return RX_DROP_MONITOR;
@@ -2884,8 +2990,10 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
if (!local->cooked_mntrs)
goto out_free_skb;
+ /* vendor data is long removed here */
+ status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
/* room for the radiotap header based on driver features */
- needed_headroom = ieee80211_rx_radiotap_space(local, status);
+ needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
if (skb_headroom(skb) < needed_headroom &&
pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
@@ -3038,6 +3146,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
goto rxh_next; \
} while (0);
+ CALL_RXH(ieee80211_rx_h_check_dup)
CALL_RXH(ieee80211_rx_h_check)
ieee80211_rx_reorder_ampdu(rx, &reorder_release);
@@ -3130,6 +3239,33 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
BIT(rate_idx));
}
break;
+ case NL80211_IFTYPE_OCB:
+ if (!bssid)
+ return false;
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ return false;
+ } else if (!is_broadcast_ether_addr(bssid)) {
+ ocb_dbg(sdata, "BSSID mismatch in OCB mode!\n");
+ return false;
+ } else if (!multicast &&
+ !ether_addr_equal(sdata->dev->dev_addr,
+ hdr->addr1)) {
+ /* if we are in promisc mode we also accept
+ * packets not destined for us
+ */
+ if (!(sdata->dev->flags & IFF_PROMISC))
+ return false;
+ rx->flags &= ~IEEE80211_RX_RA_MATCH;
+ } else if (!rx->sta) {
+ int rate_idx;
+ if (status->flag & RX_FLAG_HT)
+ rate_idx = 0; /* TODO: HT rates */
+ else
+ rate_idx = status->rate_idx;
+ ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
+ BIT(rate_idx));
+ }
+ break;
case NL80211_IFTYPE_MESH_POINT:
if (!multicast &&
!ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index af0d094b2f2f..ae842678b629 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -184,9 +184,21 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
return;
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
- /* ignore ProbeResp to foreign address */
- if ((!sdata1 || !ether_addr_equal(mgmt->da, sdata1->vif.addr)) &&
- (!sdata2 || !ether_addr_equal(mgmt->da, sdata2->vif.addr)))
+ struct cfg80211_scan_request *scan_req;
+ struct cfg80211_sched_scan_request *sched_scan_req;
+
+ scan_req = rcu_dereference(local->scan_req);
+ sched_scan_req = rcu_dereference(local->sched_scan_req);
+
+ /* ignore ProbeResp to foreign address unless scanning
+ * with randomised address
+ */
+ if (!(sdata1 &&
+ (ether_addr_equal(mgmt->da, sdata1->vif.addr) ||
+ scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) &&
+ !(sdata2 &&
+ (ether_addr_equal(mgmt->da, sdata2->vif.addr) ||
+ sched_scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)))
return;
elements = mgmt->u.probe_resp.variable;
@@ -234,11 +246,14 @@ ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef,
/* return false if no more work */
static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
{
- struct cfg80211_scan_request *req = local->scan_req;
+ struct cfg80211_scan_request *req;
struct cfg80211_chan_def chandef;
u8 bands_used = 0;
int i, ielen, n_chans;
+ req = rcu_dereference_protected(local->scan_req,
+ lockdep_is_held(&local->mtx));
+
if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
return false;
@@ -281,6 +296,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
bands_used, req->rates, &chandef);
local->hw_scan_req->req.ie_len = ielen;
local->hw_scan_req->req.no_cck = req->no_cck;
+ ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
+ ether_addr_copy(local->hw_scan_req->req.mac_addr_mask,
+ req->mac_addr_mask);
return true;
}
@@ -290,6 +308,8 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
struct ieee80211_local *local = hw_to_local(hw);
bool hw_scan = local->ops->hw_scan;
bool was_scanning = local->scanning;
+ struct cfg80211_scan_request *scan_req;
+ struct ieee80211_sub_if_data *scan_sdata;
lockdep_assert_held(&local->mtx);
@@ -322,9 +342,15 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
kfree(local->hw_scan_req);
local->hw_scan_req = NULL;
- if (local->scan_req != local->int_scan_req)
- cfg80211_scan_done(local->scan_req, aborted);
- local->scan_req = NULL;
+ scan_req = rcu_dereference_protected(local->scan_req,
+ lockdep_is_held(&local->mtx));
+
+ if (scan_req != local->int_scan_req)
+ cfg80211_scan_done(scan_req, aborted);
+ RCU_INIT_POINTER(local->scan_req, NULL);
+
+ scan_sdata = rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx));
RCU_INIT_POINTER(local->scan_sdata, NULL);
local->scanning = 0;
@@ -335,7 +361,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
if (!hw_scan) {
ieee80211_configure_filter(local);
- drv_sw_scan_complete(local);
+ drv_sw_scan_complete(local, scan_sdata);
ieee80211_offchannel_return(local);
}
@@ -361,7 +387,8 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
}
EXPORT_SYMBOL(ieee80211_scan_completed);
-static int ieee80211_start_sw_scan(struct ieee80211_local *local)
+static int ieee80211_start_sw_scan(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
/* Software scan is not supported in multi-channel cases */
if (local->use_chanctx)
@@ -380,7 +407,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
* nullfunc frames and probe requests will be dropped in
* ieee80211_tx_h_check_assoc().
*/
- drv_sw_scan_start(local);
+ drv_sw_scan_start(local, sdata, local->scan_addr);
local->leave_oper_channel_time = jiffies;
local->next_scan_state = SCAN_DECISION;
@@ -440,23 +467,26 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
{
int i;
struct ieee80211_sub_if_data *sdata;
+ struct cfg80211_scan_request *scan_req;
enum ieee80211_band band = local->hw.conf.chandef.chan->band;
u32 tx_flags;
+ scan_req = rcu_dereference_protected(local->scan_req,
+ lockdep_is_held(&local->mtx));
+
tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
- if (local->scan_req->no_cck)
+ if (scan_req->no_cck)
tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
sdata = rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx));
- for (i = 0; i < local->scan_req->n_ssids; i++)
+ for (i = 0; i < scan_req->n_ssids; i++)
ieee80211_send_probe_req(
- sdata, NULL,
- local->scan_req->ssids[i].ssid,
- local->scan_req->ssids[i].ssid_len,
- local->scan_req->ie, local->scan_req->ie_len,
- local->scan_req->rates[band], false,
+ sdata, local->scan_addr, NULL,
+ scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len,
+ scan_req->ie, scan_req->ie_len,
+ scan_req->rates[band], false,
tx_flags, local->hw.conf.chandef.chan, true);
/*
@@ -480,7 +510,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_can_scan(local, sdata)) {
/* wait for the work to finish/time out */
- local->scan_req = req;
+ rcu_assign_pointer(local->scan_req, req);
rcu_assign_pointer(local->scan_sdata, sdata);
return 0;
}
@@ -530,9 +560,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
*/
}
- local->scan_req = req;
+ rcu_assign_pointer(local->scan_req, req);
rcu_assign_pointer(local->scan_sdata, sdata);
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ get_random_mask_addr(local->scan_addr,
+ req->mac_addr,
+ req->mac_addr_mask);
+ else
+ memcpy(local->scan_addr, sdata->vif.addr, ETH_ALEN);
+
if (local->ops->hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
} else if ((req->n_channels == 1) &&
@@ -549,7 +586,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
/* Notify driver scan is starting, keep order of operations
* same as normal software scan, in case that matters. */
- drv_sw_scan_start(local);
+ drv_sw_scan_start(local, sdata, local->scan_addr);
ieee80211_configure_filter(local); /* accept probe-responses */
@@ -558,7 +595,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if ((req->channels[0]->flags &
IEEE80211_CHAN_NO_IR) ||
- !local->scan_req->n_ssids) {
+ !req->n_ssids) {
next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
} else {
ieee80211_scan_state_send_probe(local, &next_delay);
@@ -579,8 +616,9 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (local->ops->hw_scan) {
WARN_ON(!ieee80211_prep_hw_scan(local));
rc = drv_hw_scan(local, sdata, local->hw_scan_req);
- } else
- rc = ieee80211_start_sw_scan(local);
+ } else {
+ rc = ieee80211_start_sw_scan(local, sdata);
+ }
if (rc) {
kfree(local->hw_scan_req);
@@ -617,6 +655,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata;
struct ieee80211_channel *next_chan;
enum mac80211_scan_state next_scan_state;
+ struct cfg80211_scan_request *scan_req;
/*
* check if at least one STA interface is associated,
@@ -641,7 +680,10 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
}
mutex_unlock(&local->iflist_mtx);
- next_chan = local->scan_req->channels[local->scan_channel_idx];
+ scan_req = rcu_dereference_protected(local->scan_req,
+ lockdep_is_held(&local->mtx));
+
+ next_chan = scan_req->channels[local->scan_channel_idx];
/*
* we're currently scanning a different channel, let's
@@ -656,7 +698,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
local->leave_oper_channel_time + HZ / 8);
if (associated && !tx_empty) {
- if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+ if (scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
next_scan_state = SCAN_ABORT;
else
next_scan_state = SCAN_SUSPEND;
@@ -677,14 +719,18 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
int skip;
struct ieee80211_channel *chan;
enum nl80211_bss_scan_width oper_scan_width;
+ struct cfg80211_scan_request *scan_req;
+
+ scan_req = rcu_dereference_protected(local->scan_req,
+ lockdep_is_held(&local->mtx));
skip = 0;
- chan = local->scan_req->channels[local->scan_channel_idx];
+ chan = scan_req->channels[local->scan_channel_idx];
local->scan_chandef.chan = chan;
local->scan_chandef.center_freq1 = chan->center_freq;
local->scan_chandef.center_freq2 = 0;
- switch (local->scan_req->scan_width) {
+ switch (scan_req->scan_width) {
case NL80211_BSS_CHAN_WIDTH_5:
local->scan_chandef.width = NL80211_CHAN_WIDTH_5;
break;
@@ -698,7 +744,7 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
oper_scan_width = cfg80211_chandef_to_scan_width(
&local->_oper_chandef);
if (chan == local->_oper_chandef.chan &&
- oper_scan_width == local->scan_req->scan_width)
+ oper_scan_width == scan_req->scan_width)
local->scan_chandef = local->_oper_chandef;
else
local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -727,8 +773,7 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
*
* In any case, it is not necessary for a passive scan.
*/
- if (chan->flags & IEEE80211_CHAN_NO_IR ||
- !local->scan_req->n_ssids) {
+ if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) {
*next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
return;
@@ -777,6 +822,7 @@ void ieee80211_scan_work(struct work_struct *work)
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, scan_work.work);
struct ieee80211_sub_if_data *sdata;
+ struct cfg80211_scan_request *scan_req;
unsigned long next_delay = 0;
bool aborted;
@@ -784,6 +830,8 @@ void ieee80211_scan_work(struct work_struct *work)
sdata = rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx));
+ scan_req = rcu_dereference_protected(local->scan_req,
+ lockdep_is_held(&local->mtx));
/* When scanning on-channel, the first-callback means completed. */
if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
@@ -796,20 +844,19 @@ void ieee80211_scan_work(struct work_struct *work)
goto out_complete;
}
- if (!sdata || !local->scan_req)
+ if (!sdata || !scan_req)
goto out;
- if (local->scan_req && !local->scanning) {
- struct cfg80211_scan_request *req = local->scan_req;
+ if (!local->scanning) {
int rc;
- local->scan_req = NULL;
+ RCU_INIT_POINTER(local->scan_req, NULL);
RCU_INIT_POINTER(local->scan_sdata, NULL);
- rc = __ieee80211_start_scan(sdata, req);
+ rc = __ieee80211_start_scan(sdata, scan_req);
if (rc) {
/* need to complete scan in cfg80211 */
- local->scan_req = req;
+ rcu_assign_pointer(local->scan_req, scan_req);
aborted = true;
goto out_complete;
} else
@@ -829,7 +876,7 @@ void ieee80211_scan_work(struct work_struct *work)
switch (local->next_scan_state) {
case SCAN_DECISION:
/* if no more bands/channels left, complete scan */
- if (local->scan_channel_idx >= local->scan_req->n_channels) {
+ if (local->scan_channel_idx >= scan_req->n_channels) {
aborted = false;
goto out_complete;
}
@@ -1043,7 +1090,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
if (ret == 0) {
rcu_assign_pointer(local->sched_scan_sdata, sdata);
- local->sched_scan_req = req;
+ rcu_assign_pointer(local->sched_scan_req, req);
}
kfree(ie);
@@ -1052,7 +1099,7 @@ out:
if (ret) {
/* Clean in case of failure after HW restart or upon resume. */
RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
- local->sched_scan_req = NULL;
+ RCU_INIT_POINTER(local->sched_scan_req, NULL);
}
return ret;
@@ -1090,7 +1137,7 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
}
/* We don't want to restart sched scan anymore. */
- local->sched_scan_req = NULL;
+ RCU_INIT_POINTER(local->sched_scan_req, NULL);
if (rcu_access_pointer(local->sched_scan_sdata)) {
ret = drv_sched_scan_stop(local, sdata);
@@ -1125,7 +1172,7 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local)
RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
/* If sched scan was aborted by the driver. */
- local->sched_scan_req = NULL;
+ RCU_INIT_POINTER(local->sched_scan_req, NULL);
mutex_unlock(&local->mtx);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index de494df3bab8..a42f5b2b024d 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -351,6 +351,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->sta_state = IEEE80211_STA_NONE;
+ /* Mark TID as unreserved */
+ sta->reserved_tid = IEEE80211_TID_UNRESERVED;
+
ktime_get_ts(&uptime);
sta->last_connected = uptime.tv_sec;
ewma_init(&sta->avg_signal, 1024, 8);
@@ -501,7 +504,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
/* make the station visible */
sta_info_hash_add(local, sta);
- list_add_rcu(&sta->list, &local->sta_list);
+ list_add_tail_rcu(&sta->list, &local->sta_list);
/* notify driver */
err = sta_info_insert_drv_state(local, sdata, sta);
@@ -847,6 +850,15 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
if (WARN_ON(ret))
return ret;
+ /*
+ * for TDLS peers, make sure to return to the base channel before
+ * removal.
+ */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) {
+ drv_tdls_cancel_channel_switch(local, sdata, &sta->sta);
+ clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
+ }
+
list_del_rcu(&sta->list);
drv_sta_pre_rcu_remove(local, sta->sdata, sta);
@@ -1249,7 +1261,8 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
return;
}
- ieee80211_xmit(sdata, skb, chanctx_conf->def.chan->band);
+ info->band = chanctx_conf->def.chan->band;
+ ieee80211_xmit(sdata, skb);
rcu_read_unlock();
}
@@ -1531,7 +1544,7 @@ void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta)
break;
case 0:
/* XXX: what is a good value? */
- n_frames = 8;
+ n_frames = 128;
break;
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index bcda2ac7d844..4f052bb2a5ad 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -49,6 +49,9 @@
* packets. This means the link is enabled.
* @WLAN_STA_TDLS_INITIATOR: We are the initiator of the TDLS link with this
* station.
+ * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching
+ * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this
+ * TDLS peer
* @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
* keeping station in power-save mode, reply when the driver
* unblocks the station.
@@ -78,6 +81,8 @@ enum ieee80211_sta_info_flags {
WLAN_STA_TDLS_PEER,
WLAN_STA_TDLS_PEER_AUTH,
WLAN_STA_TDLS_INITIATOR,
+ WLAN_STA_TDLS_CHAN_SWITCH,
+ WLAN_STA_TDLS_OFF_CHANNEL,
WLAN_STA_UAPSD,
WLAN_STA_SP,
WLAN_STA_4ADDR_EVENT,
@@ -249,6 +254,9 @@ struct ieee80211_tx_latency_stat {
u32 bin_count;
};
+/* Value to indicate no TID reservation */
+#define IEEE80211_TID_UNRESERVED 0xff
+
/**
* struct sta_info - STA information
*
@@ -337,6 +345,7 @@ struct ieee80211_tx_latency_stat {
* AP only.
* @cipher_scheme: optional cipher scheme for this station
* @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed
+ * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
*/
struct sta_info {
/* General information, mostly static */
@@ -454,6 +463,8 @@ struct sta_info {
/* TDLS timeout data */
unsigned long last_tdls_pkt_time;
+ u8 reserved_tid;
+
/* keep last! */
struct ieee80211_sta sta;
};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 89290e33dafe..bb146f377ee4 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -390,6 +390,46 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
}
}
+/*
+ * Handles the tx for TDLS teardown frames.
+ * If the frame wasn't ACKed by the peer - it will be re-sent through the AP
+ */
+static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u32 flags)
+{
+ struct sk_buff *teardown_skb;
+ struct sk_buff *orig_teardown_skb;
+ bool is_teardown = false;
+
+ /* Get the teardown data we need and free the lock */
+ spin_lock(&sdata->u.mgd.teardown_lock);
+ teardown_skb = sdata->u.mgd.teardown_skb;
+ orig_teardown_skb = sdata->u.mgd.orig_teardown_skb;
+ if ((skb == orig_teardown_skb) && teardown_skb) {
+ sdata->u.mgd.teardown_skb = NULL;
+ sdata->u.mgd.orig_teardown_skb = NULL;
+ is_teardown = true;
+ }
+ spin_unlock(&sdata->u.mgd.teardown_lock);
+
+ if (is_teardown) {
+ /* This mechanism relies on being able to get ACKs */
+ WARN_ON(!(local->hw.flags &
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS));
+
+ /* Check if peer has ACKed */
+ if (flags & IEEE80211_TX_STAT_ACK) {
+ dev_kfree_skb_any(teardown_skb);
+ } else {
+ tdls_dbg(sdata,
+ "TDLS Resending teardown through AP\n");
+
+ ieee80211_subif_start_xmit(teardown_skb, skb->dev);
+ }
+ }
+}
+
static void ieee80211_report_used_skb(struct ieee80211_local *local,
struct sk_buff *skb, bool dropped)
{
@@ -426,8 +466,19 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
if (!sdata) {
skb->dev = NULL;
} else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
- ieee80211_mgd_conn_tx_status(sdata, hdr->frame_control,
- acked);
+ unsigned int hdr_size =
+ ieee80211_hdrlen(hdr->frame_control);
+
+ /* Check to see if packet is a TDLS teardown packet */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ (ieee80211_get_tdls_action(skb, hdr_size) ==
+ WLAN_TDLS_TEARDOWN))
+ ieee80211_tdls_td_tx_handle(local, sdata, skb,
+ info->flags);
+ else
+ ieee80211_mgd_conn_tx_status(sdata,
+ hdr->frame_control,
+ acked);
} else if (ieee80211_is_nullfunc(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
cfg80211_probe_status(sdata->dev, hdr->addr1,
@@ -541,10 +592,9 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
#define STA_LOST_TDLS_PKT_THRESHOLD 10
#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
-static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb)
+static void ieee80211_lost_packet(struct sta_info *sta,
+ struct ieee80211_tx_info *info)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
@@ -571,24 +621,13 @@ static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb)
sta->lost_packets = 0;
}
-void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+static int ieee80211_tx_get_rates(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ int *retry_count)
{
- struct sk_buff *skb2;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- __le16 fc;
- struct ieee80211_supported_band *sband;
- struct ieee80211_sub_if_data *sdata;
- struct net_device *prev_dev = NULL;
- struct sta_info *sta, *tmp;
- int retry_count = -1, i;
int rates_idx = -1;
- bool send_to_cooked;
- bool acked;
- struct ieee80211_bar *bar;
- int rtap_len;
- int shift = 0;
+ int count = -1;
+ int i;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
@@ -606,12 +645,91 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
break;
}
- retry_count += info->status.rates[i].count;
+ count += info->status.rates[i].count;
}
rates_idx = i - 1;
- if (retry_count < 0)
- retry_count = 0;
+ if (count < 0)
+ count = 0;
+
+ *retry_count = count;
+ return rates_idx;
+}
+
+void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_supported_band *sband;
+ int retry_count;
+ int rates_idx;
+ bool acked;
+
+ rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
+
+ sband = hw->wiphy->bands[info->band];
+
+ acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ if (pubsta) {
+ struct sta_info *sta;
+
+ sta = container_of(pubsta, struct sta_info, sta);
+
+ if (!acked)
+ sta->tx_retry_failed++;
+ sta->tx_retry_count += retry_count;
+
+ if (acked) {
+ sta->last_rx = jiffies;
+
+ if (sta->lost_packets)
+ sta->lost_packets = 0;
+
+ /* Track when last TDLS packet was ACKed */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
+ sta->last_tdls_pkt_time = jiffies;
+ } else {
+ ieee80211_lost_packet(sta, info);
+ }
+
+ rate_control_tx_status_noskb(local, sband, sta, info);
+ }
+
+ if (acked) {
+ local->dot11TransmittedFrameCount++;
+ if (!pubsta)
+ local->dot11MulticastTransmittedFrameCount++;
+ if (retry_count > 0)
+ local->dot11RetryCount++;
+ if (retry_count > 1)
+ local->dot11MultipleRetryCount++;
+ } else {
+ local->dot11FailedCount++;
+ }
+}
+EXPORT_SYMBOL(ieee80211_tx_status_noskb);
+
+void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct sk_buff *skb2;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ __le16 fc;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_sub_if_data *sdata;
+ struct net_device *prev_dev = NULL;
+ struct sta_info *sta, *tmp;
+ int retry_count;
+ int rates_idx;
+ bool send_to_cooked;
+ bool acked;
+ struct ieee80211_bar *bar;
+ int rtap_len;
+ int shift = 0;
+
+ rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
rcu_read_lock();
@@ -704,7 +822,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
- ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data, acked);
+ ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
+ acked, info->status.tx_time);
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
if (info->flags & IEEE80211_TX_STAT_ACK) {
@@ -715,7 +834,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
sta->last_tdls_pkt_time = jiffies;
} else {
- ieee80211_lost_packet(sta, skb);
+ ieee80211_lost_packet(sta, info);
}
}
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 4ea25dec0698..55ddd77b865d 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -35,19 +35,101 @@ void ieee80211_tdls_peer_del_work(struct work_struct *wk)
mutex_unlock(&local->mtx);
}
-static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
+static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local,
+ struct sk_buff *skb)
{
u8 *pos = (void *)skb_put(skb, 7);
+ bool chan_switch = local->hw.wiphy->features &
+ NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
*pos++ = WLAN_EID_EXT_CAPABILITY;
*pos++ = 5; /* len */
*pos++ = 0x0;
*pos++ = 0x0;
*pos++ = 0x0;
- *pos++ = 0x0;
+ *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
}
+static u8
+ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u16 start, u16 end,
+ u16 spacing)
+{
+ u8 subband_cnt = 0, ch_cnt = 0;
+ struct ieee80211_channel *ch;
+ struct cfg80211_chan_def chandef;
+ int i, subband_start;
+
+ for (i = start; i <= end; i += spacing) {
+ if (!ch_cnt)
+ subband_start = i;
+
+ ch = ieee80211_get_channel(sdata->local->hw.wiphy, i);
+ if (ch) {
+ /* we will be active on the channel */
+ u32 flags = IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_IR;
+ cfg80211_chandef_create(&chandef, ch,
+ NL80211_CHAN_HT20);
+ if (cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ &chandef, flags)) {
+ ch_cnt++;
+ continue;
+ }
+ }
+
+ if (ch_cnt) {
+ u8 *pos = skb_put(skb, 2);
+ *pos++ = ieee80211_frequency_to_channel(subband_start);
+ *pos++ = ch_cnt;
+
+ subband_cnt++;
+ ch_cnt = 0;
+ }
+ }
+
+ return subband_cnt;
+}
+
+static void
+ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ /*
+ * Add possible channels for TDLS. These are channels that are allowed
+ * to be active.
+ */
+ u8 subband_cnt;
+ u8 *pos = skb_put(skb, 2);
+
+ *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
+
+ /*
+ * 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as
+ * this doesn't happen in real world scenarios.
+ */
+
+ /* 2GHz, with 5MHz spacing */
+ subband_cnt = ieee80211_tdls_add_subband(sdata, skb, 2412, 2472, 5);
+
+ /* 5GHz, with 20MHz spacing */
+ subband_cnt += ieee80211_tdls_add_subband(sdata, skb, 5000, 5825, 20);
+
+ /* length */
+ *pos = 2 * subband_cnt;
+}
+
+static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb)
+{
+ u8 *pos = (void *)skb_put(skb, 3);
+
+ *pos++ = WLAN_EID_BSS_COEX_2040;
+ *pos++ = 1; /* len */
+
+ *pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST;
+}
+
static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata,
u16 status_code)
{
@@ -190,6 +272,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
ieee80211_add_srates_ie(sdata, skb, false, band);
ieee80211_add_ext_srates_ie(sdata, skb, false, band);
+ ieee80211_tdls_add_supp_channels(sdata, skb);
/* add any custom IEs that go before Extended Capabilities */
if (extra_ies_len) {
@@ -209,7 +292,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
offset = noffset;
}
- ieee80211_tdls_add_ext_capab(skb);
+ ieee80211_tdls_add_ext_capab(local, skb);
/* add the QoS element if we support it */
if (local->hw.queues >= IEEE80211_NUM_ACS &&
@@ -271,6 +354,10 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
+ if (ht_cap.ht_supported &&
+ (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ ieee80211_tdls_add_bss_coex_ie(skb);
+
/* add any remaining IEs */
if (extra_ies_len) {
noffset = extra_ies_len;
@@ -362,11 +449,68 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
}
+static void
+ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, const u8 *peer,
+ bool initiator, const u8 *extra_ies,
+ size_t extra_ies_len, u8 oper_class,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_tdls_data *tf;
+ size_t offset = 0, noffset;
+ u8 *pos;
+
+ if (WARN_ON_ONCE(!chandef))
+ return;
+
+ tf = (void *)skb->data;
+ tf->u.chan_switch_req.target_channel =
+ ieee80211_frequency_to_channel(chandef->chan->center_freq);
+ tf->u.chan_switch_req.oper_class = oper_class;
+
+ if (extra_ies_len) {
+ static const u8 before_lnkie[] = {
+ WLAN_EID_SECONDARY_CHANNEL_OFFSET,
+ };
+ noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+ before_lnkie,
+ ARRAY_SIZE(before_lnkie),
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, extra_ies + offset, noffset - offset);
+ offset = noffset;
+ }
+
+ ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+
+ /* add any remaining IEs */
+ if (extra_ies_len) {
+ noffset = extra_ies_len;
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, extra_ies + offset, noffset - offset);
+ }
+}
+
+static void
+ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, const u8 *peer,
+ u16 status_code, bool initiator,
+ const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ if (status_code == 0)
+ ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+}
+
static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, const u8 *peer,
u8 action_code, u16 status_code,
bool initiator, const u8 *extra_ies,
- size_t extra_ies_len)
+ size_t extra_ies_len, u8 oper_class,
+ struct cfg80211_chan_def *chandef)
{
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
@@ -393,6 +537,18 @@ static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata,
if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN)
ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
break;
+ case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
+ ieee80211_tdls_add_chan_switch_req_ies(sdata, skb, peer,
+ initiator, extra_ies,
+ extra_ies_len,
+ oper_class, chandef);
+ break;
+ case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
+ ieee80211_tdls_add_chan_switch_resp_ies(sdata, skb, peer,
+ status_code,
+ initiator, extra_ies,
+ extra_ies_len);
+ break;
}
}
@@ -459,6 +615,19 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
skb_put(skb, sizeof(tf->u.discover_req));
tf->u.discover_req.dialog_token = dialog_token;
break;
+ case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST;
+
+ skb_put(skb, sizeof(tf->u.chan_switch_req));
+ break;
+ case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE;
+
+ skb_put(skb, sizeof(tf->u.chan_switch_resp));
+ tf->u.chan_switch_resp.status_code = cpu_to_le16(status_code);
+ break;
default:
return -EINVAL;
}
@@ -502,32 +671,33 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int
-ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
- const u8 *peer, u8 action_code,
- u8 dialog_token, u16 status_code,
- u32 peer_capability, bool initiator,
- const u8 *extra_ies, size_t extra_ies_len)
+static struct sk_buff *
+ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata,
+ const u8 *peer, u8 action_code,
+ u8 dialog_token, u16 status_code,
+ bool initiator, const u8 *extra_ies,
+ size_t extra_ies_len, u8 oper_class,
+ struct cfg80211_chan_def *chandef)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb = NULL;
- bool send_direct;
- struct sta_info *sta;
+ struct sk_buff *skb;
int ret;
- skb = dev_alloc_skb(local->hw.extra_tx_headroom +
- max(sizeof(struct ieee80211_mgmt),
- sizeof(struct ieee80211_tdls_data)) +
- 50 + /* supported rates */
- 7 + /* ext capab */
- 26 + /* max(WMM-info, WMM-param) */
- 2 + max(sizeof(struct ieee80211_ht_cap),
- sizeof(struct ieee80211_ht_operation)) +
- extra_ies_len +
- sizeof(struct ieee80211_tdls_lnkie));
+ skb = netdev_alloc_skb(sdata->dev,
+ local->hw.extra_tx_headroom +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ 50 + /* supported rates */
+ 7 + /* ext capab */
+ 26 + /* max(WMM-info, WMM-param) */
+ 2 + max(sizeof(struct ieee80211_ht_cap),
+ sizeof(struct ieee80211_ht_operation)) +
+ 50 + /* supported channels */
+ 3 + /* 40/20 BSS coex */
+ extra_ies_len +
+ sizeof(struct ieee80211_tdls_lnkie));
if (!skb)
- return -ENOMEM;
+ return NULL;
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -537,16 +707,18 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
case WLAN_TDLS_SETUP_CONFIRM:
case WLAN_TDLS_TEARDOWN:
case WLAN_TDLS_DISCOVERY_REQUEST:
- ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
+ case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
+ case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
+ ret = ieee80211_prep_tdls_encap_data(local->hw.wiphy,
+ sdata->dev, peer,
action_code, dialog_token,
status_code, skb);
- send_direct = false;
break;
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
- ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
+ ret = ieee80211_prep_tdls_direct(local->hw.wiphy, sdata->dev,
+ peer, action_code,
dialog_token, status_code,
skb);
- send_direct = true;
break;
default:
ret = -ENOTSUPP;
@@ -556,14 +728,40 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
if (ret < 0)
goto fail;
+ ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code,
+ initiator, extra_ies, extra_ies_len, oper_class,
+ chandef);
+ return skb;
+
+fail:
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static int
+ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, u32 peer_capability,
+ bool initiator, const u8 *extra_ies,
+ size_t extra_ies_len, u8 oper_class,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct sk_buff *skb = NULL;
+ struct sta_info *sta;
+ u32 flags = 0;
+ int ret = 0;
+
rcu_read_lock();
sta = sta_info_get(sdata, peer);
/* infer the initiator if we can, to support old userspace */
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
- if (sta)
+ if (sta) {
set_sta_flag(sta, WLAN_STA_TDLS_INITIATOR);
+ sta->sta.tdls_initiator = false;
+ }
/* fall-through */
case WLAN_TDLS_SETUP_CONFIRM:
case WLAN_TDLS_DISCOVERY_REQUEST:
@@ -575,13 +773,17 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
* Make the last packet sent take effect for the initiator
* value.
*/
- if (sta)
+ if (sta) {
clear_sta_flag(sta, WLAN_STA_TDLS_INITIATOR);
+ sta->sta.tdls_initiator = true;
+ }
/* fall-through */
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
initiator = false;
break;
case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
+ case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
/* any value is ok */
break;
default:
@@ -596,9 +798,17 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
if (ret < 0)
goto fail;
- ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code,
- initiator, extra_ies, extra_ies_len);
- if (send_direct) {
+ skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, action_code,
+ dialog_token, status_code,
+ initiator, extra_ies,
+ extra_ies_len, oper_class,
+ chandef);
+ if (!skb) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
ieee80211_tx_skb(sdata, skb);
return 0;
}
@@ -619,9 +829,44 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
break;
}
+ /*
+ * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
+ * Later, if no ACK is returned from peer, we will re-send the teardown
+ * packet through the AP.
+ */
+ if ((action_code == WLAN_TDLS_TEARDOWN) &&
+ (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+ struct sta_info *sta = NULL;
+ bool try_resend; /* Should we keep skb for possible resend */
+
+ /* If not sending directly to peer - no point in keeping skb */
+ rcu_read_lock();
+ sta = sta_info_get(sdata, peer);
+ try_resend = sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
+ rcu_read_unlock();
+
+ spin_lock_bh(&sdata->u.mgd.teardown_lock);
+ if (try_resend && !sdata->u.mgd.teardown_skb) {
+ /* Mark it as requiring TX status callback */
+ flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_INTFL_MLME_CONN_TX;
+
+ /*
+ * skb is copied since mac80211 will later set
+ * properties that might not be the same as the AP,
+ * such as encryption, QoS, addresses, etc.
+ *
+ * No problem if skb_copy() fails, so no need to check.
+ */
+ sdata->u.mgd.teardown_skb = skb_copy(skb, GFP_ATOMIC);
+ sdata->u.mgd.orig_teardown_skb = skb;
+ }
+ spin_unlock_bh(&sdata->u.mgd.teardown_lock);
+ }
+
/* disable bottom halves when entering the Tx path */
local_bh_disable();
- ret = ieee80211_subif_start_xmit(skb, dev);
+ __ieee80211_subif_start_xmit(skb, dev, flags);
local_bh_enable();
return ret;
@@ -672,7 +917,8 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
dialog_token, status_code,
peer_capability, initiator,
- extra_ies, extra_ies_len);
+ extra_ies, extra_ies_len, 0,
+ NULL);
if (ret < 0)
goto exit;
@@ -711,7 +957,8 @@ ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev,
ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
dialog_token, status_code,
peer_capability, initiator,
- extra_ies, extra_ies_len);
+ extra_ies, extra_ies_len, 0,
+ NULL);
if (ret < 0)
sdata_err(sdata, "Failed sending TDLS teardown packet %d\n",
ret);
@@ -781,7 +1028,7 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
status_code,
peer_capability,
initiator, extra_ies,
- extra_ies_len);
+ extra_ies_len, 0, NULL);
break;
default:
ret = -EOPNOTSUPP;
@@ -884,3 +1131,480 @@ void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp);
}
EXPORT_SYMBOL(ieee80211_tdls_oper_request);
+
+static void
+iee80211_tdls_add_ch_switch_timing(u8 *buf, u16 switch_time, u16 switch_timeout)
+{
+ struct ieee80211_ch_switch_timing *ch_sw;
+
+ *buf++ = WLAN_EID_CHAN_SWITCH_TIMING;
+ *buf++ = sizeof(struct ieee80211_ch_switch_timing);
+
+ ch_sw = (void *)buf;
+ ch_sw->switch_time = cpu_to_le16(switch_time);
+ ch_sw->switch_timeout = cpu_to_le16(switch_timeout);
+}
+
+/* find switch timing IE in SKB ready for Tx */
+static const u8 *ieee80211_tdls_find_sw_timing_ie(struct sk_buff *skb)
+{
+ struct ieee80211_tdls_data *tf;
+ const u8 *ie_start;
+
+ /*
+ * Get the offset for the new location of the switch timing IE.
+ * The SKB network header will now point to the "payload_type"
+ * element of the TDLS data frame struct.
+ */
+ tf = container_of(skb->data + skb_network_offset(skb),
+ struct ieee80211_tdls_data, payload_type);
+ ie_start = tf->u.chan_switch_req.variable;
+ return cfg80211_find_ie(WLAN_EID_CHAN_SWITCH_TIMING, ie_start,
+ skb->len - (ie_start - skb->data));
+}
+
+static struct sk_buff *
+ieee80211_tdls_ch_sw_tmpl_get(struct sta_info *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ u32 *ch_sw_tm_ie_offset)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u8 extra_ies[2 + sizeof(struct ieee80211_sec_chan_offs_ie) +
+ 2 + sizeof(struct ieee80211_ch_switch_timing)];
+ int extra_ies_len = 2 + sizeof(struct ieee80211_ch_switch_timing);
+ u8 *pos = extra_ies;
+ struct sk_buff *skb;
+
+ /*
+ * if chandef points to a wide channel add a Secondary-Channel
+ * Offset information element
+ */
+ if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ struct ieee80211_sec_chan_offs_ie *sec_chan_ie;
+ bool ht40plus;
+
+ *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET;
+ *pos++ = sizeof(*sec_chan_ie);
+ sec_chan_ie = (void *)pos;
+
+ ht40plus = cfg80211_get_chandef_type(chandef) ==
+ NL80211_CHAN_HT40PLUS;
+ sec_chan_ie->sec_chan_offs = ht40plus ?
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE :
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ pos += sizeof(*sec_chan_ie);
+
+ extra_ies_len += 2 + sizeof(struct ieee80211_sec_chan_offs_ie);
+ }
+
+ /* just set the values to 0, this is a template */
+ iee80211_tdls_add_ch_switch_timing(pos, 0, 0);
+
+ skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr,
+ WLAN_TDLS_CHANNEL_SWITCH_REQUEST,
+ 0, 0, !sta->sta.tdls_initiator,
+ extra_ies, extra_ies_len,
+ oper_class, chandef);
+ if (!skb)
+ return NULL;
+
+ skb = ieee80211_build_data_template(sdata, skb, 0);
+ if (IS_ERR(skb)) {
+ tdls_dbg(sdata, "Failed building TDLS channel switch frame\n");
+ return NULL;
+ }
+
+ if (ch_sw_tm_ie_offset) {
+ const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb);
+
+ if (!tm_ie) {
+ tdls_dbg(sdata, "No switch timing IE in TDLS switch\n");
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ *ch_sw_tm_ie_offset = tm_ie - skb->data;
+ }
+
+ tdls_dbg(sdata,
+ "TDLS channel switch request template for %pM ch %d width %d\n",
+ sta->sta.addr, chandef->chan->center_freq, chandef->width);
+ return skb;
+}
+
+int
+ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ struct sk_buff *skb = NULL;
+ u32 ch_sw_tm_ie;
+ int ret;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, addr);
+ if (!sta) {
+ tdls_dbg(sdata,
+ "Invalid TDLS peer %pM for channel switch request\n",
+ addr);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (!test_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH)) {
+ tdls_dbg(sdata, "TDLS channel switch unsupported by %pM\n",
+ addr);
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ skb = ieee80211_tdls_ch_sw_tmpl_get(sta, oper_class, chandef,
+ &ch_sw_tm_ie);
+ if (!skb) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = drv_tdls_channel_switch(local, sdata, &sta->sta, oper_class,
+ chandef, skb, ch_sw_tm_ie);
+ if (!ret)
+ set_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
+
+out:
+ mutex_unlock(&local->sta_mtx);
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+void
+ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, addr);
+ if (!sta) {
+ tdls_dbg(sdata,
+ "Invalid TDLS peer %pM for channel switch cancel\n",
+ addr);
+ goto out;
+ }
+
+ if (!test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) {
+ tdls_dbg(sdata, "TDLS channel switch not initiated by %pM\n",
+ addr);
+ goto out;
+ }
+
+ drv_tdls_cancel_channel_switch(local, sdata, &sta->sta);
+ clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
+
+out:
+ mutex_unlock(&local->sta_mtx);
+}
+
+static struct sk_buff *
+ieee80211_tdls_ch_sw_resp_tmpl_get(struct sta_info *sta,
+ u32 *ch_sw_tm_ie_offset)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct sk_buff *skb;
+ u8 extra_ies[2 + sizeof(struct ieee80211_ch_switch_timing)];
+
+ /* initial timing are always zero in the template */
+ iee80211_tdls_add_ch_switch_timing(extra_ies, 0, 0);
+
+ skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr,
+ WLAN_TDLS_CHANNEL_SWITCH_RESPONSE,
+ 0, 0, !sta->sta.tdls_initiator,
+ extra_ies, sizeof(extra_ies), 0, NULL);
+ if (!skb)
+ return NULL;
+
+ skb = ieee80211_build_data_template(sdata, skb, 0);
+ if (IS_ERR(skb)) {
+ tdls_dbg(sdata,
+ "Failed building TDLS channel switch resp frame\n");
+ return NULL;
+ }
+
+ if (ch_sw_tm_ie_offset) {
+ const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb);
+
+ if (!tm_ie) {
+ tdls_dbg(sdata,
+ "No switch timing IE in TDLS switch resp\n");
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ *ch_sw_tm_ie_offset = tm_ie - skb->data;
+ }
+
+ tdls_dbg(sdata, "TDLS get channel switch response template for %pM\n",
+ sta->sta.addr);
+ return skb;
+}
+
+static int
+ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee802_11_elems elems;
+ struct sta_info *sta;
+ struct ieee80211_tdls_data *tf = (void *)skb->data;
+ bool local_initiator;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ int baselen = offsetof(typeof(*tf), u.chan_switch_resp.variable);
+ struct ieee80211_tdls_ch_sw_params params = {};
+ int ret;
+
+ params.action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE;
+ params.timestamp = rx_status->device_timestamp;
+
+ if (skb->len < baselen) {
+ tdls_dbg(sdata, "TDLS channel switch resp too short: %d\n",
+ skb->len);
+ return -EINVAL;
+ }
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, tf->sa);
+ if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
+ tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n",
+ tf->sa);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ params.sta = &sta->sta;
+ params.status = le16_to_cpu(tf->u.chan_switch_resp.status_code);
+ if (params.status != 0) {
+ ret = 0;
+ goto call_drv;
+ }
+
+ ieee802_11_parse_elems(tf->u.chan_switch_resp.variable,
+ skb->len - baselen, false, &elems);
+ if (elems.parse_error) {
+ tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!elems.ch_sw_timing || !elems.lnk_id) {
+ tdls_dbg(sdata, "TDLS channel switch resp - missing IEs\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* validate the initiator is set correctly */
+ local_initiator =
+ !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN);
+ if (local_initiator == sta->sta.tdls_initiator) {
+ tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time);
+ params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout);
+
+ params.tmpl_skb =
+ ieee80211_tdls_ch_sw_resp_tmpl_get(sta, &params.ch_sw_tm_ie);
+ if (!params.tmpl_skb) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+call_drv:
+ drv_tdls_recv_channel_switch(sdata->local, sdata, &params);
+
+ tdls_dbg(sdata,
+ "TDLS channel switch response received from %pM status %d\n",
+ tf->sa, params.status);
+
+out:
+ mutex_unlock(&local->sta_mtx);
+ dev_kfree_skb_any(params.tmpl_skb);
+ return ret;
+}
+
+static int
+ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee802_11_elems elems;
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_channel *chan;
+ enum nl80211_channel_type chan_type;
+ int freq;
+ u8 target_channel, oper_class;
+ bool local_initiator;
+ struct sta_info *sta;
+ enum ieee80211_band band;
+ struct ieee80211_tdls_data *tf = (void *)skb->data;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable);
+ struct ieee80211_tdls_ch_sw_params params = {};
+ int ret = 0;
+
+ params.action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST;
+ params.timestamp = rx_status->device_timestamp;
+
+ if (skb->len < baselen) {
+ tdls_dbg(sdata, "TDLS channel switch req too short: %d\n",
+ skb->len);
+ return -EINVAL;
+ }
+
+ target_channel = tf->u.chan_switch_req.target_channel;
+ oper_class = tf->u.chan_switch_req.oper_class;
+
+ /*
+ * We can't easily infer the channel band. The operating class is
+ * ambiguous - there are multiple tables (US/Europe/JP/Global). The
+ * solution here is to treat channels with number >14 as 5GHz ones,
+ * and specifically check for the (oper_class, channel) combinations
+ * where this doesn't hold. These are thankfully unique according to
+ * IEEE802.11-2012.
+ * We consider only the 2GHz and 5GHz bands and 20MHz+ channels as
+ * valid here.
+ */
+ if ((oper_class == 112 || oper_class == 2 || oper_class == 3 ||
+ oper_class == 4 || oper_class == 5 || oper_class == 6) &&
+ target_channel < 14)
+ band = IEEE80211_BAND_5GHZ;
+ else
+ band = target_channel < 14 ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+
+ freq = ieee80211_channel_to_frequency(target_channel, band);
+ if (freq == 0) {
+ tdls_dbg(sdata, "Invalid channel in TDLS chan switch: %d\n",
+ target_channel);
+ return -EINVAL;
+ }
+
+ chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
+ if (!chan) {
+ tdls_dbg(sdata,
+ "Unsupported channel for TDLS chan switch: %d\n",
+ target_channel);
+ return -EINVAL;
+ }
+
+ ieee802_11_parse_elems(tf->u.chan_switch_req.variable,
+ skb->len - baselen, false, &elems);
+ if (elems.parse_error) {
+ tdls_dbg(sdata, "Invalid IEs in TDLS channel switch req\n");
+ return -EINVAL;
+ }
+
+ if (!elems.ch_sw_timing || !elems.lnk_id) {
+ tdls_dbg(sdata, "TDLS channel switch req - missing IEs\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, tf->sa);
+ if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
+ tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n",
+ tf->sa);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ params.sta = &sta->sta;
+
+ /* validate the initiator is set correctly */
+ local_initiator =
+ !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN);
+ if (local_initiator == sta->sta.tdls_initiator) {
+ tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!sta->sta.ht_cap.ht_supported) {
+ chan_type = NL80211_CHAN_NO_HT;
+ } else if (!elems.sec_chan_offs) {
+ chan_type = NL80211_CHAN_HT20;
+ } else {
+ switch (elems.sec_chan_offs->sec_chan_offs) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ chan_type = NL80211_CHAN_HT40PLUS;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ chan_type = NL80211_CHAN_HT40MINUS;
+ break;
+ default:
+ chan_type = NL80211_CHAN_HT20;
+ break;
+ }
+ }
+
+ cfg80211_chandef_create(&chandef, chan, chan_type);
+ params.chandef = &chandef;
+
+ params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time);
+ params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout);
+
+ params.tmpl_skb =
+ ieee80211_tdls_ch_sw_resp_tmpl_get(sta,
+ &params.ch_sw_tm_ie);
+ if (!params.tmpl_skb) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ drv_tdls_recv_channel_switch(sdata->local, sdata, &params);
+
+ tdls_dbg(sdata,
+ "TDLS ch switch request received from %pM ch %d width %d\n",
+ tf->sa, params.chandef->chan->center_freq,
+ params.chandef->width);
+out:
+ mutex_unlock(&local->sta_mtx);
+ dev_kfree_skb_any(params.tmpl_skb);
+ return ret;
+}
+
+void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tdls_data *tf = (void *)skb->data;
+ struct wiphy *wiphy = sdata->local->hw.wiphy;
+
+ /* make sure the driver supports it */
+ if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
+ return;
+
+ /* we want to access the entire packet */
+ if (skb_linearize(skb))
+ return;
+ /*
+ * The packet/size was already validated by mac80211 Rx path, only look
+ * at the action type.
+ */
+ switch (tf->action_code) {
+ case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
+ ieee80211_process_tdls_channel_switch_req(sdata, skb);
+ break;
+ case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
+ ieee80211_process_tdls_channel_switch_resp(sdata, skb);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 38fae7ebe984..8e461a02c6a8 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -16,6 +16,7 @@
#define STA_ENTRY __array(char, sta_addr, ETH_ALEN)
#define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : memset(__entry->sta_addr, 0, ETH_ALEN))
+#define STA_NAMED_ASSIGN(s) memcpy(__entry->sta_addr, (s)->addr, ETH_ALEN)
#define STA_PR_FMT " sta:%pM"
#define STA_PR_ARG __entry->sta_addr
@@ -595,14 +596,33 @@ DEFINE_EVENT(local_sdata_evt, drv_sched_scan_stop,
TP_ARGS(local, sdata)
);
-DEFINE_EVENT(local_only_evt, drv_sw_scan_start,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+TRACE_EVENT(drv_sw_scan_start,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *mac_addr),
+
+ TP_ARGS(local, sdata, mac_addr),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __array(char, mac_addr, ETH_ALEN)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->mac_addr, mac_addr, ETH_ALEN);
+ ),
+
+ TP_printk(LOCAL_PR_FMT ", " VIF_PR_FMT ", addr:%pM",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->mac_addr)
);
-DEFINE_EVENT(local_only_evt, drv_sw_scan_complete,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+DEFINE_EVENT(local_sdata_evt, drv_sw_scan_complete,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
);
TRACE_EVENT(drv_get_stats,
@@ -826,6 +846,13 @@ DEFINE_EVENT(sta_event, drv_sta_pre_rcu_remove,
TP_ARGS(local, sdata, sta)
);
+DEFINE_EVENT(sta_event, drv_sta_rate_tbl_update,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
+ TP_ARGS(local, sdata, sta)
+);
+
TRACE_EVENT(drv_conf_tx,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
@@ -987,29 +1014,34 @@ TRACE_EVENT(drv_flush,
TRACE_EVENT(drv_channel_switch,
TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
- TP_ARGS(local, ch_switch),
+ TP_ARGS(local, sdata, ch_switch),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
CHANDEF_ENTRY
__field(u64, timestamp)
+ __field(u32, device_timestamp)
__field(bool, block_tx)
__field(u8, count)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
CHANDEF_ASSIGN(&ch_switch->chandef)
__entry->timestamp = ch_switch->timestamp;
+ __entry->device_timestamp = ch_switch->device_timestamp;
__entry->block_tx = ch_switch->block_tx;
__entry->count = ch_switch->count;
),
TP_printk(
- LOCAL_PR_FMT " new " CHANDEF_PR_FMT " count:%d",
- LOCAL_PR_ARG, CHANDEF_PR_ARG, __entry->count
+ LOCAL_PR_FMT VIF_PR_FMT " new " CHANDEF_PR_FMT " count:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count
)
);
@@ -1557,9 +1589,26 @@ DEFINE_EVENT(local_sdata_evt, drv_stop_ap,
TP_ARGS(local, sdata)
);
-DEFINE_EVENT(local_only_evt, drv_restart_complete,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+TRACE_EVENT(drv_reconfig_complete,
+ TP_PROTO(struct ieee80211_local *local,
+ enum ieee80211_reconfig_type reconfig_type),
+ TP_ARGS(local, reconfig_type),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u8, reconfig_type)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->reconfig_type = reconfig_type;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " reconfig_type:%d",
+ LOCAL_PR_ARG, __entry->reconfig_type
+ )
+
);
#if IS_ENABLED(CONFIG_IPV6)
@@ -1780,6 +1829,12 @@ TRACE_EVENT(api_cqm_rssi_notify,
)
);
+DEFINE_EVENT(local_sdata_evt, api_cqm_beacon_loss_notify,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
TRACE_EVENT(api_scan_completed,
TP_PROTO(struct ieee80211_local *local, bool aborted),
@@ -2106,6 +2161,175 @@ TRACE_EVENT(drv_channel_switch_beacon,
)
);
+TRACE_EVENT(drv_pre_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch),
+
+ TP_ARGS(local, sdata, ch_switch),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ CHANDEF_ENTRY
+ __field(u64, timestamp)
+ __field(u32, device_timestamp)
+ __field(bool, block_tx)
+ __field(u8, count)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ CHANDEF_ASSIGN(&ch_switch->chandef)
+ __entry->timestamp = ch_switch->timestamp;
+ __entry->device_timestamp = ch_switch->device_timestamp;
+ __entry->block_tx = ch_switch->block_tx;
+ __entry->count = ch_switch->count;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to "
+ CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
+ __entry->block_tx, __entry->timestamp
+ )
+);
+
+DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
+TRACE_EVENT(drv_get_txpower,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ int dbm, int ret),
+
+ TP_ARGS(local, sdata, dbm, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(int, dbm)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->dbm = dbm;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_tdls_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef),
+
+ TP_ARGS(local, sdata, sta, oper_class, chandef),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(u8, oper_class)
+ CHANDEF_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->oper_class = oper_class;
+ CHANDEF_ASSIGN(chandef)
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " tdls channel switch to"
+ CHANDEF_PR_FMT " oper_class:%d " STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->oper_class,
+ STA_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_tdls_cancel_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
+
+ TP_ARGS(local, sdata, sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT
+ " tdls cancel channel switch with " STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_tdls_recv_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_tdls_ch_sw_params *params),
+
+ TP_ARGS(local, sdata, params),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u8, action_code)
+ STA_ENTRY
+ CHANDEF_ENTRY
+ __field(u32, status)
+ __field(bool, peer_initiator)
+ __field(u32, timestamp)
+ __field(u16, switch_time)
+ __field(u16, switch_timeout)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_NAMED_ASSIGN(params->sta);
+ CHANDEF_ASSIGN(params->chandef)
+ __entry->peer_initiator = params->sta->tdls_initiator;
+ __entry->action_code = params->action_code;
+ __entry->status = params->status;
+ __entry->timestamp = params->timestamp;
+ __entry->switch_time = params->switch_time;
+ __entry->switch_timeout = params->switch_timeout;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " received tdls channel switch packet"
+ " action:%d status:%d time:%d switch time:%d switch"
+ " timeout:%d initiator: %d chan:" CHANDEF_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->action_code, __entry->status,
+ __entry->timestamp, __entry->switch_time,
+ __entry->switch_timeout, __entry->peer_initiator,
+ CHANDEF_PR_ARG, STA_PR_ARG
+ )
+);
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
#undef TRACE_SYSTEM
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 900632a250ec..058686a721a1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -60,7 +60,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
rcu_read_unlock();
/* assume HW handles this */
- if (tx->rate.flags & IEEE80211_TX_RC_MCS)
+ if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
return 0;
/* uh huh? */
@@ -296,6 +296,9 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
*/
return TX_DROP;
+ if (tx->sdata->vif.type == NL80211_IFTYPE_OCB)
+ return TX_CONTINUE;
+
if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
return TX_CONTINUE;
@@ -1423,8 +1426,7 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
* Returns false if the frame couldn't be transmitted but was queued instead.
*/
static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool txpending,
- enum ieee80211_band band)
+ struct sk_buff *skb, bool txpending)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_data tx;
@@ -1449,8 +1451,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
return true;
}
- info->band = band;
-
/* set up hw_queue value early */
if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
!(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
@@ -1498,8 +1498,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- enum ieee80211_band band)
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1534,7 +1533,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
}
ieee80211_set_qos_hdr(sdata, skb);
- ieee80211_tx(sdata, skb, false, band);
+ ieee80211_tx(sdata, skb, false);
}
static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
@@ -1754,7 +1753,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
sdata->vif.type))
goto fail_rcu;
- ieee80211_xmit(sdata, skb, chandef->chan->band);
+ info->band = chandef->chan->band;
+ ieee80211_xmit(sdata, skb);
rcu_read_unlock();
return NETDEV_TX_OK;
@@ -1784,23 +1784,26 @@ static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local,
}
/**
- * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type
- * subinterfaces (wlan#, WDS, and VLAN interfaces)
- * @skb: packet to be sent
- * @dev: incoming interface
+ * ieee80211_build_hdr - build 802.11 header in the given frame
+ * @sdata: virtual interface to build the header for
+ * @skb: the skb to build the header in
+ * @info_flags: skb flags to set
+ *
+ * This function takes the skb with 802.3 header and reformats the header to
+ * the appropriate IEEE 802.11 header based on which interface the packet is
+ * being transmitted on.
+ *
+ * Note that this function also takes care of the TX status request and
+ * potential unsharing of the SKB - this needs to be interleaved with the
+ * header building.
*
- * Returns: NETDEV_TX_OK both on success and on failure. On failure skb will
- * be freed.
+ * The function requires the read-side RCU lock held
*
- * This function takes in an Ethernet header and encapsulates it with suitable
- * IEEE 802.11 header based on which interface the packet is coming in. The
- * encapsulated packet will then be passed to master interface, wlan#.11, for
- * transmission (through low-level driver).
+ * Returns: the (possibly reallocated) skb or an ERR_PTR() code
*/
-netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u32 info_flags)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info;
int head_need;
@@ -1816,25 +1819,17 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
bool wme_sta = false, authorized = false, tdls_auth = false;
bool tdls_peer = false, tdls_setup_frame = false;
bool multicast;
- u32 info_flags = 0;
u16 info_id = 0;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_sub_if_data *ap_sdata;
enum ieee80211_band band;
-
- if (unlikely(skb->len < ETH_HLEN))
- goto fail;
+ int ret;
/* convert Ethernet header to proper 802.11 header (based on
* operation mode) */
ethertype = (skb->data[12] << 8) | skb->data[13];
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
- rcu_read_lock();
-
- /* Measure frame arrival for Tx latency statistics calculation */
- ieee80211_tx_latency_start_msrmnt(local, skb);
-
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
sta = rcu_dereference(sdata->u.vlan.sta);
@@ -1852,8 +1847,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
u.ap);
chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
- if (!chanctx_conf)
- goto fail_rcu;
+ if (!chanctx_conf) {
+ ret = -ENOTCONN;
+ goto free;
+ }
band = chanctx_conf->def.chan->band;
if (sta)
break;
@@ -1861,8 +1858,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
case NL80211_IFTYPE_AP:
if (sdata->vif.type == NL80211_IFTYPE_AP)
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf)
- goto fail_rcu;
+ if (!chanctx_conf) {
+ ret = -ENOTCONN;
+ goto free;
+ }
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1949,8 +1948,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
}
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf)
- goto fail_rcu;
+ if (!chanctx_conf) {
+ ret = -ENOTCONN;
+ goto free;
+ }
band = chanctx_conf->def.chan->band;
break;
#endif
@@ -1980,8 +1981,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* of a link teardown after a TDLS sta is removed due to being
* unreachable.
*/
- if (tdls_peer && !tdls_auth && !tdls_setup_frame)
- goto fail_rcu;
+ if (tdls_peer && !tdls_auth && !tdls_setup_frame) {
+ ret = -EINVAL;
+ goto free;
+ }
/* send direct packets to authorized TDLS peers */
if (tdls_peer && tdls_auth) {
@@ -2009,8 +2012,23 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
hdrlen = 24;
}
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf)
- goto fail_rcu;
+ if (!chanctx_conf) {
+ ret = -ENOTCONN;
+ goto free;
+ }
+ band = chanctx_conf->def.chan->band;
+ break;
+ case NL80211_IFTYPE_OCB:
+ /* DA SA BSSID */
+ memcpy(hdr.addr1, skb->data, ETH_ALEN);
+ memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
+ eth_broadcast_addr(hdr.addr3);
+ hdrlen = 24;
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (!chanctx_conf) {
+ ret = -ENOTCONN;
+ goto free;
+ }
band = chanctx_conf->def.chan->band;
break;
case NL80211_IFTYPE_ADHOC:
@@ -2020,12 +2038,15 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
hdrlen = 24;
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf)
- goto fail_rcu;
+ if (!chanctx_conf) {
+ ret = -ENOTCONN;
+ goto free;
+ }
band = chanctx_conf->def.chan->band;
break;
default:
- goto fail_rcu;
+ ret = -EINVAL;
+ goto free;
}
/*
@@ -2057,17 +2078,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* EAPOL frames from the local station.
*/
if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
+ (sdata->vif.type != NL80211_IFTYPE_OCB) &&
!multicast && !authorized &&
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
!ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
- dev->name, hdr.addr1);
+ sdata->name, hdr.addr1);
#endif
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
- goto fail_rcu;
+ ret = -EPERM;
+ goto free;
}
if (unlikely(!multicast && skb->sk &&
@@ -2104,8 +2127,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
skb = skb_clone(skb, GFP_ATOMIC);
kfree_skb(tmp_skb);
- if (!skb)
- goto fail_rcu;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto free;
+ }
}
hdr.frame_control = fc;
@@ -2154,7 +2179,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
ieee80211_free_txskb(&local->hw, skb);
skb = NULL;
- goto fail_rcu;
+ return ERR_PTR(-ENOMEM);
}
}
@@ -2188,9 +2213,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
nh_pos += hdrlen;
h_pos += hdrlen;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
/* Update skb pointers to various headers since this modified frame
* is going to go through Linux networking code that may potentially
* need things like pointer to IP header. */
@@ -2201,23 +2223,90 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
info = IEEE80211_SKB_CB(skb);
memset(info, 0, sizeof(*info));
- dev->trans_start = jiffies;
-
info->flags = info_flags;
info->ack_frame_id = info_id;
+ info->band = band;
- ieee80211_xmit(sdata, skb, band);
- rcu_read_unlock();
+ return skb;
+ free:
+ kfree_skb(skb);
+ return ERR_PTR(ret);
+}
- return NETDEV_TX_OK;
+void __ieee80211_subif_start_xmit(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 info_flags)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+
+ if (unlikely(skb->len < ETH_HLEN)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* Measure frame arrival for Tx latency statistics calculation */
+ ieee80211_tx_latency_start_msrmnt(local, skb);
+
+ skb = ieee80211_build_hdr(sdata, skb, info_flags);
+ if (IS_ERR(skb))
+ goto out;
- fail_rcu:
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+
+ ieee80211_xmit(sdata, skb);
+ out:
rcu_read_unlock();
- fail:
- dev_kfree_skb(skb);
+}
+
+/**
+ * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
+ * @skb: packet to be sent
+ * @dev: incoming interface
+ *
+ * On failure skb will be freed.
+ */
+netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ __ieee80211_subif_start_xmit(skb, dev, 0);
return NETDEV_TX_OK;
}
+struct sk_buff *
+ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u32 info_flags)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_data tx = {
+ .local = sdata->local,
+ .sdata = sdata,
+ };
+
+ rcu_read_lock();
+
+ skb = ieee80211_build_hdr(sdata, skb, info_flags);
+ if (IS_ERR(skb))
+ goto out;
+
+ hdr = (void *)skb->data;
+ tx.sta = sta_info_get(sdata, hdr->addr1);
+ tx.skb = skb;
+
+ if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) {
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return ERR_PTR(-EINVAL);
+ }
+
+out:
+ rcu_read_unlock();
+ return skb;
+}
/*
* ieee80211_clear_tx_pending may not be called in a context where
@@ -2257,8 +2346,8 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
dev_kfree_skb(skb);
return true;
}
- result = ieee80211_tx(sdata, skb, true,
- chanctx_conf->def.chan->band);
+ info->band = chanctx_conf->def.chan->band;
+ result = ieee80211_tx(sdata, skb, true);
} else {
struct sk_buff_head skbs;
@@ -2872,19 +2961,16 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
EXPORT_SYMBOL(ieee80211_nullfunc_get);
struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+ const u8 *src_addr,
const u8 *ssid, size_t ssid_len,
size_t tailroom)
{
- struct ieee80211_sub_if_data *sdata;
- struct ieee80211_local *local;
+ struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_hdr_3addr *hdr;
struct sk_buff *skb;
size_t ie_ssid_len;
u8 *pos;
- sdata = vif_to_sdata(vif);
- local = sdata->local;
ie_ssid_len = 2 + ssid_len;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
@@ -2899,7 +2985,7 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_REQ);
eth_broadcast_addr(hdr->addr1);
- memcpy(hdr->addr2, vif->addr, ETH_ALEN);
+ memcpy(hdr->addr2, src_addr, ETH_ALEN);
eth_broadcast_addr(hdr->addr3);
pos = skb_put(skb, ie_ssid_len);
@@ -3018,6 +3104,97 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_get_buffered_bc);
+int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ int ret;
+ u32 queues;
+
+ lockdep_assert_held(&local->sta_mtx);
+
+ /* only some cases are supported right now */
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(tid >= IEEE80211_NUM_UPS))
+ return -EINVAL;
+
+ if (sta->reserved_tid == tid) {
+ ret = 0;
+ goto out;
+ }
+
+ if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) {
+ sdata_err(sdata, "TID reservation already active\n");
+ ret = -EALREADY;
+ goto out;
+ }
+
+ ieee80211_stop_vif_queues(sdata->local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
+
+ synchronize_net();
+
+ /* Tear down BA sessions so we stop aggregating on this TID */
+ if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ __ieee80211_stop_tx_ba_session(sta, tid,
+ AGG_STOP_LOCAL_REQUEST);
+ }
+
+ queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]);
+ __ieee80211_flush_queues(local, sdata, queues);
+
+ sta->reserved_tid = tid;
+
+ ieee80211_wake_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
+
+ if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
+
+ ret = 0;
+ out:
+ return ret;
+}
+EXPORT_SYMBOL(ieee80211_reserve_tid);
+
+void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+
+ lockdep_assert_held(&sdata->local->sta_mtx);
+
+ /* only some cases are supported right now */
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (tid != sta->reserved_tid) {
+ sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid);
+ return;
+ }
+
+ sta->reserved_tid = IEEE80211_TID_UNRESERVED;
+}
+EXPORT_SYMBOL(ieee80211_unreserve_tid);
+
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
enum ieee80211_band band)
@@ -3039,6 +3216,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
* requirements are that we do not come into tx with bhs on.
*/
local_bh_disable();
- ieee80211_xmit(sdata, skb, band);
+ IEEE80211_SKB_CB(skb)->band = band;
+ ieee80211_xmit(sdata, skb);
local_bh_enable();
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3c61060a4d2b..974ebe70f5b0 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -576,15 +576,19 @@ ieee80211_get_vif_queues(struct ieee80211_local *local,
return queues;
}
-void ieee80211_flush_queues(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+void __ieee80211_flush_queues(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ unsigned int queues)
{
- unsigned int queues;
-
if (!local->ops->flush)
return;
- queues = ieee80211_get_vif_queues(local, sdata);
+ /*
+ * If no queue was set, or if the HW doesn't support
+ * IEEE80211_HW_QUEUE_CONTROL - flush all queues
+ */
+ if (!queues || !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
+ queues = ieee80211_get_vif_queues(local, sdata);
ieee80211_stop_queues_by_reason(&local->hw, queues,
IEEE80211_QUEUE_STOP_REASON_FLUSH,
@@ -597,6 +601,12 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
false);
}
+void ieee80211_flush_queues(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ __ieee80211_flush_queues(local, sdata, 0);
+}
+
void ieee80211_stop_vif_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
enum queue_stop_reason reason)
@@ -693,6 +703,34 @@ void ieee80211_iterate_active_interfaces_rtnl(
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_rtnl);
+static void __iterate_stations(struct ieee80211_local *local,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data)
+{
+ struct sta_info *sta;
+
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (!sta->uploaded)
+ continue;
+
+ iterator(data, &sta->sta);
+ }
+}
+
+void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ rcu_read_lock();
+ __iterate_stations(local, iterator, data);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_atomic);
+
struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
@@ -803,6 +841,9 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
case WLAN_EID_CHAN_SWITCH_PARAM:
+ case WLAN_EID_EXT_CAPABILITY:
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ case WLAN_EID_LINK_ID:
/*
* not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
* that if the content gets bigger it might be needed more than once
@@ -822,6 +863,24 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elem_parse_failed = false;
switch (id) {
+ case WLAN_EID_LINK_ID:
+ if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) {
+ elem_parse_failed = true;
+ break;
+ }
+ elems->lnk_id = (void *)(pos - 2);
+ break;
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ if (elen != sizeof(struct ieee80211_ch_switch_timing)) {
+ elem_parse_failed = true;
+ break;
+ }
+ elems->ch_sw_timing = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ elems->ext_capab = pos;
+ elems->ext_capab_len = elen;
+ break;
case WLAN_EID_SSID:
elems->ssid = pos;
elems->ssid_len = elen;
@@ -1073,6 +1132,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
struct ieee80211_chanctx_conf *chanctx_conf;
int ac;
bool use_11b, enable_qos;
+ bool is_ocb; /* Use another EDCA parameters if dot11OCBActivated=true */
int aCWmin, aCWmax;
if (!local->ops->conf_tx)
@@ -1097,6 +1157,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
*/
enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION);
+ is_ocb = (sdata->vif.type == NL80211_IFTYPE_OCB);
+
/* Set defaults according to 802.11-2007 Table 7-37 */
aCWmax = 1023;
if (use_11b)
@@ -1118,7 +1180,10 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
- qparam.aifs = 7;
+ if (is_ocb)
+ qparam.aifs = 9;
+ else
+ qparam.aifs = 7;
break;
/* never happens but let's not leave undefined */
default:
@@ -1126,21 +1191,32 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
- qparam.aifs = 3;
+ if (is_ocb)
+ qparam.aifs = 6;
+ else
+ qparam.aifs = 3;
break;
case IEEE80211_AC_VI:
qparam.cw_max = aCWmin;
qparam.cw_min = (aCWmin + 1) / 2 - 1;
- if (use_11b)
+ if (is_ocb)
+ qparam.txop = 0;
+ else if (use_11b)
qparam.txop = 6016/32;
else
qparam.txop = 3008/32;
- qparam.aifs = 2;
+
+ if (is_ocb)
+ qparam.aifs = 3;
+ else
+ qparam.aifs = 2;
break;
case IEEE80211_AC_VO:
qparam.cw_max = (aCWmin + 1) / 2 - 1;
qparam.cw_min = (aCWmin + 1) / 4 - 1;
- if (use_11b)
+ if (is_ocb)
+ qparam.txop = 0;
+ else if (use_11b)
qparam.txop = 3264/32;
else
qparam.txop = 1504/32;
@@ -1263,6 +1339,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
int ext_rates_len;
int shift;
u32 rate_flags;
+ bool have_80mhz = false;
*offset = 0;
@@ -1391,7 +1468,15 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
*offset = noffset;
}
- if (sband->vht_cap.vht_supported) {
+ /* Check if any channel in this sband supports at least 80 MHz */
+ for (i = 0; i < sband->n_channels; i++) {
+ if (!(sband->channels[i].flags & IEEE80211_CHAN_NO_80MHZ)) {
+ have_80mhz = true;
+ break;
+ }
+ }
+
+ if (sband->vht_cap.vht_supported && have_80mhz) {
if (end - pos < 2 + sizeof(struct ieee80211_vht_cap))
goto out_err;
pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
@@ -1447,7 +1532,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
};
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
- u8 *dst, u32 ratemask,
+ const u8 *src, const u8 *dst,
+ u32 ratemask,
struct ieee80211_channel *chan,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
@@ -1472,8 +1558,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
else
chandef.chan = chan;
- skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
- ssid, ssid_len, 100 + ie_len);
+ skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len,
+ 100 + ie_len);
if (!skb)
return NULL;
@@ -1495,7 +1581,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
return skb;
}
-void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
+void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
+ const u8 *src, const u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
u32 ratemask, bool directed, u32 tx_flags,
@@ -1503,7 +1590,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
{
struct sk_buff *skb;
- skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel,
+ skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel,
ssid, ssid_len,
ie, ie_len, directed);
if (skb) {
@@ -1645,6 +1732,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
int res, i;
bool reconfig_due_to_wowlan = false;
struct ieee80211_sub_if_data *sched_scan_sdata;
+ struct cfg80211_sched_scan_request *sched_scan_req;
bool sched_scan_stopped = false;
#ifdef CONFIG_PM
@@ -1813,6 +1901,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
ieee80211_bss_info_change_notify(sdata, changed);
sdata_unlock(sdata);
break;
+ case NL80211_IFTYPE_OCB:
+ changed |= BSS_CHANGED_OCB;
+ ieee80211_bss_info_change_notify(sdata, changed);
+ break;
case NL80211_IFTYPE_ADHOC:
changed |= BSS_CHANGED_IBSS;
/* fall through */
@@ -1931,13 +2023,15 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_lock(&local->mtx);
sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
lockdep_is_held(&local->mtx));
- if (sched_scan_sdata && local->sched_scan_req)
+ sched_scan_req = rcu_dereference_protected(local->sched_scan_req,
+ lockdep_is_held(&local->mtx));
+ if (sched_scan_sdata && sched_scan_req)
/*
* Sched scan stopped, but we don't want to report it. Instead,
* we're trying to reschedule.
*/
if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
- local->sched_scan_req))
+ sched_scan_req))
sched_scan_stopped = true;
mutex_unlock(&local->mtx);
@@ -1949,7 +2043,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
* We may want to change that later, however.
*/
if (!local->suspended || reconfig_due_to_wowlan)
- drv_restart_complete(local);
+ drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
if (!local->suspended)
return 0;
@@ -1960,6 +2054,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mb();
local->resuming = false;
+ if (!reconfig_due_to_wowlan)
+ drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
+
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
@@ -2052,42 +2149,36 @@ static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
return false;
}
-/**
- * ieee80211_ie_split - split an IE buffer according to ordering
- *
- * @ies: the IE buffer
- * @ielen: the length of the IE buffer
- * @ids: an array with element IDs that are allowed before
- * the split
- * @n_ids: the size of the element ID array
- * @offset: offset where to start splitting in the buffer
- *
- * This function splits an IE buffer by updating the @offset
- * variable to point to the location where the buffer should be
- * split.
- *
- * It assumes that the given IE buffer is well-formed, this
- * has to be guaranteed by the caller!
- *
- * It also assumes that the IEs in the buffer are ordered
- * correctly, if not the result of using this function will not
- * be ordered correctly either, i.e. it does no reordering.
- *
- * The function returns the offset where the next part of the
- * buffer starts, which may be @ielen if the entire (remainder)
- * of the buffer should be used.
- */
-size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids, size_t offset)
+size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids,
+ const u8 *after_ric, int n_after_ric,
+ size_t offset)
{
size_t pos = offset;
- while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos]))
- pos += 2 + ies[pos + 1];
+ while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) {
+ if (ies[pos] == WLAN_EID_RIC_DATA && n_after_ric) {
+ pos += 2 + ies[pos + 1];
+
+ while (pos < ielen &&
+ !ieee80211_id_in_list(after_ric, n_after_ric,
+ ies[pos]))
+ pos += 2 + ies[pos + 1];
+ } else {
+ pos += 2 + ies[pos + 1];
+ }
+ }
return pos;
}
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset)
+{
+ return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset);
+}
+EXPORT_SYMBOL(ieee80211_ie_split);
+
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
{
size_t pos = offset;
@@ -2526,11 +2617,23 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, radar_detected_work);
struct cfg80211_chan_def chandef = local->hw.conf.chandef;
+ struct ieee80211_chanctx *ctx;
+ int num_chanctx = 0;
+
+ mutex_lock(&local->chanctx_mtx);
+ list_for_each_entry(ctx, &local->chanctx_list, list) {
+ if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)
+ continue;
+
+ num_chanctx++;
+ chandef = ctx->conf.def;
+ }
+ mutex_unlock(&local->chanctx_mtx);
ieee80211_dfs_cac_cancel(local);
- if (local->use_chanctx)
- /* currently not handled */
+ if (num_chanctx > 1)
+ /* XXX: multi-channel is not supported yet */
WARN_ON(1);
else
cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 671ce0d27a80..bc9e8fc48785 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -287,6 +287,8 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
/* fall through */
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
case NL80211_CHAN_WIDTH_40:
bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 9181fb6d6437..a4220e92f0cc 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -111,8 +111,6 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
return newhdr + hdrlen;
- skb_set_network_header(skb, skb_network_offset(skb) +
- IEEE80211_WEP_IV_LEN);
ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
return newhdr + hdrlen;
}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 3b873989992c..9eb0aee9105b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -53,11 +53,49 @@ static int wme_downgrade_ac(struct sk_buff *skb)
}
}
+/**
+ * ieee80211_fix_reserved_tid - return the TID to use if this one is reserved
+ * @tid: the assumed-reserved TID
+ *
+ * Returns: the alternative TID to use, or 0 on error
+ */
+static inline u8 ieee80211_fix_reserved_tid(u8 tid)
+{
+ switch (tid) {
+ case 0:
+ return 3;
+ case 1:
+ return 2;
+ case 2:
+ return 1;
+ case 3:
+ return 0;
+ case 4:
+ return 5;
+ case 5:
+ return 4;
+ case 6:
+ return 7;
+ case 7:
+ return 6;
+ }
+
+ return 0;
+}
+
static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+ struct sta_info *sta, struct sk_buff *skb)
{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
/* in case we are a client verify acm is not set for this ac */
- while (unlikely(sdata->wmm_acm & BIT(skb->priority))) {
+ while (sdata->wmm_acm & BIT(skb->priority)) {
+ int ac = ieee802_1d_to_ac[skb->priority];
+
+ if (ifmgd->tx_tspec[ac].admitted_time &&
+ skb->priority == ifmgd->tx_tspec[ac].up)
+ return ac;
+
if (wme_downgrade_ac(skb)) {
/*
* This should not really happen. The AP has marked all
@@ -69,6 +107,10 @@ static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata,
}
}
+ /* Check to see if this is a reserved TID */
+ if (sta && sta->reserved_tid == skb->priority)
+ skb->priority = ieee80211_fix_reserved_tid(skb->priority);
+
/* look up which queue to use for frames with this 1d tag */
return ieee802_1d_to_ac[skb->priority];
}
@@ -96,7 +138,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
p = ieee80211_get_qos_ctl(hdr);
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
- return ieee80211_downgrade_queue(sdata, skb);
+ return ieee80211_downgrade_queue(sdata, NULL, skb);
}
/* Indicate which queue to use. */
@@ -108,6 +150,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
const u8 *ra = NULL;
bool qos = false;
struct mac80211_qos_map *qos_map;
+ u16 ret;
if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
skb->priority = 0; /* required for correct WPA/11i MIC */
@@ -134,11 +177,20 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
break;
#endif
case NL80211_IFTYPE_STATION:
+ /* might be a TDLS station */
+ sta = sta_info_get(sdata, skb->data);
+ if (sta)
+ qos = sta->sta.wme;
+
ra = sdata->u.mgd.bssid;
break;
case NL80211_IFTYPE_ADHOC:
ra = skb->data;
break;
+ case NL80211_IFTYPE_OCB:
+ /* all stations are required to support WME */
+ qos = true;
+ break;
default:
break;
}
@@ -148,27 +200,29 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
if (sta)
qos = sta->sta.wme;
}
- rcu_read_unlock();
if (!qos) {
skb->priority = 0; /* required for correct WPA/11i MIC */
- return IEEE80211_AC_BE;
+ ret = IEEE80211_AC_BE;
+ goto out;
}
if (skb->protocol == sdata->control_port_protocol) {
skb->priority = 7;
- return ieee80211_downgrade_queue(sdata, skb);
+ goto downgrade;
}
/* use the data classifier to determine what 802.1d tag the
* data frame has */
- rcu_read_lock();
qos_map = rcu_dereference(sdata->qos_map);
skb->priority = cfg80211_classify8021d(skb, qos_map ?
&qos_map->qos_map : NULL);
- rcu_read_unlock();
- return ieee80211_downgrade_queue(sdata, skb);
+ downgrade:
+ ret = ieee80211_downgrade_queue(sdata, sta, skb);
+ out:
+ rcu_read_unlock();
+ return ret;
}
/**
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 7fea4bb8acbc..80151edc5195 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -13,8 +13,6 @@
#include <linux/netdevice.h>
#include "ieee80211_i.h"
-extern const int ieee802_1d_to_ac[8];
-
u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_hdr *hdr);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 983527a4c1ab..12398fde02e8 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -209,8 +209,6 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, IEEE80211_TKIP_IV_LEN);
memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen);
- skb_set_network_header(skb, skb_network_offset(skb) +
- IEEE80211_TKIP_IV_LEN);
pos += hdrlen;
/* the HW only needs room for the IV, but not the actual IV */
@@ -434,8 +432,6 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN);
memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen);
- skb_set_network_header(skb, skb_network_offset(skb) +
- IEEE80211_CCMP_HDR_LEN);
/* the HW only needs room for the IV, but not the actual IV */
if (info->control.hw_key &&
@@ -575,7 +571,6 @@ ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
pos = skb_push(skb, cs->hdr_len);
memmove(pos, pos + cs->hdr_len, hdrlen);
- skb_set_network_header(skb, skb_network_offset(skb) + cs->hdr_len);
return TX_CONTINUE;
}
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
index 1818a99b3081..aa462b480a39 100644
--- a/net/mac802154/Kconfig
+++ b/net/mac802154/Kconfig
@@ -16,5 +16,5 @@ config MAC802154
been tested yet!
If you plan to use HardMAC IEEE 802.15.4 devices, you can
- say N here. Alternatievly you can say M to compile it as
+ say N here. Alternatively you can say M to compile it as
module.
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index 9723d6f3f3e5..702d8b466317 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_MAC802154) += mac802154.o
-mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o \
- monitor.o wpan.o llsec.o
+mac802154-objs := main.o rx.o tx.o mac_cmd.o mib.o \
+ iface.o llsec.o util.o cfg.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
new file mode 100644
index 000000000000..c035708ada16
--- /dev/null
+++ b/net/mac802154/cfg.c
@@ -0,0 +1,210 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * Alexander Aring <aar@pengutronix.de>
+ *
+ * Based on: net/mac80211/cfg.c
+ */
+
+#include <net/rtnetlink.h>
+#include <net/cfg802154.h>
+
+#include "ieee802154_i.h"
+#include "driver-ops.h"
+#include "cfg.h"
+
+static struct net_device *
+ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy,
+ const char *name, int type)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ struct net_device *dev;
+
+ rtnl_lock();
+ dev = ieee802154_if_add(local, name, type,
+ cpu_to_le64(0x0000000000000000ULL));
+ rtnl_unlock();
+
+ return dev;
+}
+
+static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
+ struct net_device *dev)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+ ieee802154_if_remove(sdata);
+}
+
+static int
+ieee802154_add_iface(struct wpan_phy *phy, const char *name,
+ enum nl802154_iftype type, __le64 extended_addr)
+{
+ struct ieee802154_local *local = wpan_phy_priv(phy);
+ struct net_device *err;
+
+ err = ieee802154_if_add(local, name, type, extended_addr);
+ if (IS_ERR(err))
+ return PTR_ERR(err);
+
+ return 0;
+}
+
+static int
+ieee802154_del_iface(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev)
+{
+ ieee802154_if_remove(IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev));
+
+ return 0;
+}
+
+static int
+ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ int ret;
+
+ ASSERT_RTNL();
+
+ /* check if phy support this setting */
+ if (!(wpan_phy->channels_supported[page] & BIT(channel)))
+ return -EINVAL;
+
+ ret = drv_set_channel(local, page, channel);
+ if (!ret) {
+ wpan_phy->current_page = page;
+ wpan_phy->current_channel = channel;
+ }
+
+ return ret;
+}
+
+static int
+ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ __le16 pan_id)
+{
+ ASSERT_RTNL();
+
+ /* TODO
+ * I am not sure about to check here on broadcast pan_id.
+ * Broadcast is a valid setting, comment from 802.15.4:
+ * If this value is 0xffff, the device is not associated.
+ *
+ * This could useful to simple deassociate an device.
+ */
+ if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
+ return -EINVAL;
+
+ wpan_dev->pan_id = pan_id;
+ return 0;
+}
+
+static int
+ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ u8 min_be, u8 max_be)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+ ASSERT_RTNL();
+
+ if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
+ return -EOPNOTSUPP;
+
+ wpan_dev->min_be = min_be;
+ wpan_dev->max_be = max_be;
+ return 0;
+}
+
+static int
+ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ __le16 short_addr)
+{
+ ASSERT_RTNL();
+
+ /* TODO
+ * I am not sure about to check here on broadcast short_addr.
+ * Broadcast is a valid setting, comment from 802.15.4:
+ * A value of 0xfffe indicates that the device has
+ * associated but has not been allocated an address. A
+ * value of 0xffff indicates that the device does not
+ * have a short address.
+ *
+ * I think we should allow to set these settings but
+ * don't allow to allow socket communication with it.
+ */
+ if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
+ short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
+ return -EINVAL;
+
+ wpan_dev->short_addr = short_addr;
+ return 0;
+}
+
+static int
+ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ u8 max_csma_backoffs)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+ ASSERT_RTNL();
+
+ if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
+ return -EOPNOTSUPP;
+
+ wpan_dev->csma_retries = max_csma_backoffs;
+ return 0;
+}
+
+static int
+ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ s8 max_frame_retries)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+ ASSERT_RTNL();
+
+ if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES))
+ return -EOPNOTSUPP;
+
+ wpan_dev->frame_retries = max_frame_retries;
+ return 0;
+}
+
+static int
+ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ bool mode)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+ ASSERT_RTNL();
+
+ if (!(local->hw.flags & IEEE802154_HW_LBT))
+ return -EOPNOTSUPP;
+
+ wpan_dev->lbt = mode;
+ return 0;
+}
+
+const struct cfg802154_ops mac802154_config_ops = {
+ .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
+ .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
+ .add_virtual_intf = ieee802154_add_iface,
+ .del_virtual_intf = ieee802154_del_iface,
+ .set_channel = ieee802154_set_channel,
+ .set_pan_id = ieee802154_set_pan_id,
+ .set_short_addr = ieee802154_set_short_addr,
+ .set_backoff_exponent = ieee802154_set_backoff_exponent,
+ .set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
+ .set_max_frame_retries = ieee802154_set_max_frame_retries,
+ .set_lbt_mode = ieee802154_set_lbt_mode,
+};
diff --git a/net/mac802154/cfg.h b/net/mac802154/cfg.h
new file mode 100644
index 000000000000..e2718f981e82
--- /dev/null
+++ b/net/mac802154/cfg.h
@@ -0,0 +1,9 @@
+/* mac802154 configuration hooks for cfg802154
+ */
+
+#ifndef __CFG_H
+#define __CFG_H
+
+extern const struct cfg802154_ops mac802154_config_ops;
+
+#endif /* __CFG_H */
diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h
new file mode 100644
index 000000000000..f21e864613d0
--- /dev/null
+++ b/net/mac802154/driver-ops.h
@@ -0,0 +1,222 @@
+#ifndef __MAC802154_DRVIER_OPS
+#define __MAC802154_DRIVER_OPS
+
+#include <linux/types.h>
+#include <linux/rtnetlink.h>
+
+#include <net/mac802154.h>
+
+#include "ieee802154_i.h"
+
+static inline int
+drv_xmit_async(struct ieee802154_local *local, struct sk_buff *skb)
+{
+ return local->ops->xmit_async(&local->hw, skb);
+}
+
+static inline int
+drv_xmit_sync(struct ieee802154_local *local, struct sk_buff *skb)
+{
+ /* don't allow other operations while sync xmit */
+ ASSERT_RTNL();
+
+ might_sleep();
+
+ return local->ops->xmit_sync(&local->hw, skb);
+}
+
+static inline int drv_start(struct ieee802154_local *local)
+{
+ might_sleep();
+
+ local->started = true;
+ smp_mb();
+
+ return local->ops->start(&local->hw);
+}
+
+static inline void drv_stop(struct ieee802154_local *local)
+{
+ might_sleep();
+
+ local->ops->stop(&local->hw);
+
+ /* sync away all work on the tasklet before clearing started */
+ tasklet_disable(&local->tasklet);
+ tasklet_enable(&local->tasklet);
+
+ barrier();
+
+ local->started = false;
+}
+
+static inline int
+drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel)
+{
+ might_sleep();
+
+ return local->ops->set_channel(&local->hw, page, channel);
+}
+
+static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
+{
+ might_sleep();
+
+ if (!local->ops->set_txpower) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return local->ops->set_txpower(&local->hw, dbm);
+}
+
+static inline int drv_set_cca_mode(struct ieee802154_local *local, u8 cca_mode)
+{
+ might_sleep();
+
+ if (!local->ops->set_cca_mode) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return local->ops->set_cca_mode(&local->hw, cca_mode);
+}
+
+static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
+{
+ might_sleep();
+
+ if (!local->ops->set_lbt) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return local->ops->set_lbt(&local->hw, mode);
+}
+
+static inline int
+drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level)
+{
+ might_sleep();
+
+ if (!local->ops->set_cca_ed_level) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return local->ops->set_cca_ed_level(&local->hw, ed_level);
+}
+
+static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
+{
+ struct ieee802154_hw_addr_filt filt;
+
+ might_sleep();
+
+ if (!local->ops->set_hw_addr_filt) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ filt.pan_id = pan_id;
+
+ return local->ops->set_hw_addr_filt(&local->hw, &filt,
+ IEEE802154_AFILT_PANID_CHANGED);
+}
+
+static inline int
+drv_set_extended_addr(struct ieee802154_local *local, __le64 extended_addr)
+{
+ struct ieee802154_hw_addr_filt filt;
+
+ might_sleep();
+
+ if (!local->ops->set_hw_addr_filt) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ filt.ieee_addr = extended_addr;
+
+ return local->ops->set_hw_addr_filt(&local->hw, &filt,
+ IEEE802154_AFILT_IEEEADDR_CHANGED);
+}
+
+static inline int
+drv_set_short_addr(struct ieee802154_local *local, __le16 short_addr)
+{
+ struct ieee802154_hw_addr_filt filt;
+
+ might_sleep();
+
+ if (!local->ops->set_hw_addr_filt) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ filt.short_addr = short_addr;
+
+ return local->ops->set_hw_addr_filt(&local->hw, &filt,
+ IEEE802154_AFILT_SADDR_CHANGED);
+}
+
+static inline int
+drv_set_pan_coord(struct ieee802154_local *local, bool is_coord)
+{
+ struct ieee802154_hw_addr_filt filt;
+
+ might_sleep();
+
+ if (!local->ops->set_hw_addr_filt) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ filt.pan_coord = is_coord;
+
+ return local->ops->set_hw_addr_filt(&local->hw, &filt,
+ IEEE802154_AFILT_PANC_CHANGED);
+}
+
+static inline int
+drv_set_csma_params(struct ieee802154_local *local, u8 min_be, u8 max_be,
+ u8 max_csma_backoffs)
+{
+ might_sleep();
+
+ if (!local->ops->set_csma_params) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return local->ops->set_csma_params(&local->hw, min_be, max_be,
+ max_csma_backoffs);
+}
+
+static inline int
+drv_set_max_frame_retries(struct ieee802154_local *local, s8 max_frame_retries)
+{
+ might_sleep();
+
+ if (!local->ops->set_frame_retries) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return local->ops->set_frame_retries(&local->hw, max_frame_retries);
+}
+
+static inline int
+drv_set_promiscuous_mode(struct ieee802154_local *local, bool on)
+{
+ might_sleep();
+
+ if (!local->ops->set_promiscuous_mode) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return local->ops->set_promiscuous_mode(&local->hw, on);
+}
+
+#endif /* __MAC802154_DRVIER_OPS */
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
deleted file mode 100644
index b36b2b996578..000000000000
--- a/net/mac802154/ieee802154_dev.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * Copyright (C) 2007-2012 Siemens AG
- *
- * Written by:
- * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- *
- * Based on the code from 'linux-zigbee.sourceforge.net' project.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-
-#include <net/netlink.h>
-#include <linux/nl802154.h>
-#include <net/mac802154.h>
-#include <net/ieee802154_netdev.h>
-#include <net/route.h>
-#include <net/wpan-phy.h>
-
-#include "mac802154.h"
-
-int mac802154_slave_open(struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct mac802154_sub_if_data *subif;
- struct mac802154_priv *ipriv = priv->hw;
- int res = 0;
-
- ASSERT_RTNL();
-
- if (priv->type == IEEE802154_DEV_WPAN) {
- mutex_lock(&priv->hw->slaves_mtx);
- list_for_each_entry(subif, &priv->hw->slaves, list) {
- if (subif != priv && subif->type == priv->type &&
- subif->running) {
- mutex_unlock(&priv->hw->slaves_mtx);
- return -EBUSY;
- }
- }
- mutex_unlock(&priv->hw->slaves_mtx);
- }
-
- mutex_lock(&priv->hw->slaves_mtx);
- priv->running = true;
- mutex_unlock(&priv->hw->slaves_mtx);
-
- if (ipriv->open_count++ == 0) {
- res = ipriv->ops->start(&ipriv->hw);
- WARN_ON(res);
- if (res)
- goto err;
- }
-
- if (ipriv->ops->ieee_addr) {
- __le64 addr = ieee802154_devaddr_from_raw(dev->dev_addr);
-
- res = ipriv->ops->ieee_addr(&ipriv->hw, addr);
- WARN_ON(res);
- if (res)
- goto err;
- mac802154_dev_set_ieee_addr(dev);
- }
-
- netif_start_queue(dev);
- return 0;
-err:
- priv->hw->open_count--;
-
- return res;
-}
-
-int mac802154_slave_close(struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct mac802154_priv *ipriv = priv->hw;
-
- ASSERT_RTNL();
-
- netif_stop_queue(dev);
-
- mutex_lock(&priv->hw->slaves_mtx);
- priv->running = false;
- mutex_unlock(&priv->hw->slaves_mtx);
-
- if (!--ipriv->open_count)
- ipriv->ops->stop(&ipriv->hw);
-
- return 0;
-}
-
-static int
-mac802154_netdev_register(struct wpan_phy *phy, struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv;
- struct mac802154_priv *ipriv;
- int err;
-
- ipriv = wpan_phy_priv(phy);
-
- priv = netdev_priv(dev);
- priv->dev = dev;
- priv->hw = ipriv;
-
- dev->needed_headroom = ipriv->hw.extra_tx_headroom;
-
- SET_NETDEV_DEV(dev, &ipriv->phy->dev);
-
- mutex_lock(&ipriv->slaves_mtx);
- if (!ipriv->running) {
- mutex_unlock(&ipriv->slaves_mtx);
- return -ENODEV;
- }
- mutex_unlock(&ipriv->slaves_mtx);
-
- err = register_netdev(dev);
- if (err < 0)
- return err;
-
- rtnl_lock();
- mutex_lock(&ipriv->slaves_mtx);
- list_add_tail_rcu(&priv->list, &ipriv->slaves);
- mutex_unlock(&ipriv->slaves_mtx);
- rtnl_unlock();
-
- return 0;
-}
-
-static void
-mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev)
-{
- struct mac802154_sub_if_data *sdata;
-
- ASSERT_RTNL();
-
- sdata = netdev_priv(dev);
-
- BUG_ON(sdata->hw->phy != phy);
-
- mutex_lock(&sdata->hw->slaves_mtx);
- list_del_rcu(&sdata->list);
- mutex_unlock(&sdata->hw->slaves_mtx);
-
- synchronize_rcu();
- unregister_netdevice(sdata->dev);
-}
-
-static struct net_device *
-mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
-{
- struct net_device *dev;
- int err = -ENOMEM;
-
- switch (type) {
- case IEEE802154_DEV_MONITOR:
- dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
- name, NET_NAME_UNKNOWN,
- mac802154_monitor_setup);
- break;
- case IEEE802154_DEV_WPAN:
- dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
- name, NET_NAME_UNKNOWN,
- mac802154_wpan_setup);
- break;
- default:
- dev = NULL;
- err = -EINVAL;
- break;
- }
- if (!dev)
- goto err;
-
- err = mac802154_netdev_register(phy, dev);
- if (err)
- goto err_free;
-
- dev_hold(dev); /* we return an incremented device refcount */
- return dev;
-
-err_free:
- free_netdev(dev);
-err:
- return ERR_PTR(err);
-}
-
-static int mac802154_set_txpower(struct wpan_phy *phy, int db)
-{
- struct mac802154_priv *priv = wpan_phy_priv(phy);
-
- return priv->ops->set_txpower(&priv->hw, db);
-}
-
-static int mac802154_set_lbt(struct wpan_phy *phy, bool on)
-{
- struct mac802154_priv *priv = wpan_phy_priv(phy);
-
- return priv->ops->set_lbt(&priv->hw, on);
-}
-
-static int mac802154_set_cca_mode(struct wpan_phy *phy, u8 mode)
-{
- struct mac802154_priv *priv = wpan_phy_priv(phy);
-
- return priv->ops->set_cca_mode(&priv->hw, mode);
-}
-
-static int mac802154_set_cca_ed_level(struct wpan_phy *phy, s32 level)
-{
- struct mac802154_priv *priv = wpan_phy_priv(phy);
-
- return priv->ops->set_cca_ed_level(&priv->hw, level);
-}
-
-static int mac802154_set_csma_params(struct wpan_phy *phy, u8 min_be,
- u8 max_be, u8 retries)
-{
- struct mac802154_priv *priv = wpan_phy_priv(phy);
-
- return priv->ops->set_csma_params(&priv->hw, min_be, max_be, retries);
-}
-
-static int mac802154_set_frame_retries(struct wpan_phy *phy, s8 retries)
-{
- struct mac802154_priv *priv = wpan_phy_priv(phy);
-
- return priv->ops->set_frame_retries(&priv->hw, retries);
-}
-
-struct ieee802154_dev *
-ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
-{
- struct wpan_phy *phy;
- struct mac802154_priv *priv;
- size_t priv_size;
-
- if (!ops || !ops->xmit || !ops->ed || !ops->start ||
- !ops->stop || !ops->set_channel) {
- pr_err("undefined IEEE802.15.4 device operations\n");
- return NULL;
- }
-
- /* Ensure 32-byte alignment of our private data and hw private data.
- * We use the wpan_phy priv data for both our mac802154_priv and for
- * the driver's private data
- *
- * in memory it'll be like this:
- *
- * +-----------------------+
- * | struct wpan_phy |
- * +-----------------------+
- * | struct mac802154_priv |
- * +-----------------------+
- * | driver's private data |
- * +-----------------------+
- *
- * Due to ieee802154 layer isn't aware of driver and MAC structures,
- * so lets allign them here.
- */
-
- priv_size = ALIGN(sizeof(*priv), NETDEV_ALIGN) + priv_data_len;
-
- phy = wpan_phy_alloc(priv_size);
- if (!phy) {
- pr_err("failure to allocate master IEEE802.15.4 device\n");
- return NULL;
- }
-
- priv = wpan_phy_priv(phy);
- priv->phy = phy;
- priv->hw.phy = priv->phy;
- priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN);
- priv->ops = ops;
-
- INIT_LIST_HEAD(&priv->slaves);
- mutex_init(&priv->slaves_mtx);
-
- return &priv->hw;
-}
-EXPORT_SYMBOL(ieee802154_alloc_device);
-
-void ieee802154_free_device(struct ieee802154_dev *hw)
-{
- struct mac802154_priv *priv = mac802154_to_priv(hw);
-
- BUG_ON(!list_empty(&priv->slaves));
-
- mutex_destroy(&priv->slaves_mtx);
-
- wpan_phy_free(priv->phy);
-}
-EXPORT_SYMBOL(ieee802154_free_device);
-
-int ieee802154_register_device(struct ieee802154_dev *dev)
-{
- struct mac802154_priv *priv = mac802154_to_priv(dev);
- int rc = -ENOSYS;
-
- if (dev->flags & IEEE802154_HW_TXPOWER) {
- if (!priv->ops->set_txpower)
- goto out;
-
- priv->phy->set_txpower = mac802154_set_txpower;
- }
-
- if (dev->flags & IEEE802154_HW_LBT) {
- if (!priv->ops->set_lbt)
- goto out;
-
- priv->phy->set_lbt = mac802154_set_lbt;
- }
-
- if (dev->flags & IEEE802154_HW_CCA_MODE) {
- if (!priv->ops->set_cca_mode)
- goto out;
-
- priv->phy->set_cca_mode = mac802154_set_cca_mode;
- }
-
- if (dev->flags & IEEE802154_HW_CCA_ED_LEVEL) {
- if (!priv->ops->set_cca_ed_level)
- goto out;
-
- priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level;
- }
-
- if (dev->flags & IEEE802154_HW_CSMA_PARAMS) {
- if (!priv->ops->set_csma_params)
- goto out;
-
- priv->phy->set_csma_params = mac802154_set_csma_params;
- }
-
- if (dev->flags & IEEE802154_HW_FRAME_RETRIES) {
- if (!priv->ops->set_frame_retries)
- goto out;
-
- priv->phy->set_frame_retries = mac802154_set_frame_retries;
- }
-
- priv->dev_workqueue =
- create_singlethread_workqueue(wpan_phy_name(priv->phy));
- if (!priv->dev_workqueue) {
- rc = -ENOMEM;
- goto out;
- }
-
- wpan_phy_set_dev(priv->phy, priv->hw.parent);
-
- priv->phy->add_iface = mac802154_add_iface;
- priv->phy->del_iface = mac802154_del_iface;
-
- rc = wpan_phy_register(priv->phy);
- if (rc < 0)
- goto out_wq;
-
- rtnl_lock();
-
- mutex_lock(&priv->slaves_mtx);
- priv->running = MAC802154_DEVICE_RUN;
- mutex_unlock(&priv->slaves_mtx);
-
- rtnl_unlock();
-
- return 0;
-
-out_wq:
- destroy_workqueue(priv->dev_workqueue);
-out:
- return rc;
-}
-EXPORT_SYMBOL(ieee802154_register_device);
-
-void ieee802154_unregister_device(struct ieee802154_dev *dev)
-{
- struct mac802154_priv *priv = mac802154_to_priv(dev);
- struct mac802154_sub_if_data *sdata, *next;
-
- flush_workqueue(priv->dev_workqueue);
- destroy_workqueue(priv->dev_workqueue);
-
- rtnl_lock();
-
- mutex_lock(&priv->slaves_mtx);
- priv->running = MAC802154_DEVICE_STOPPED;
- mutex_unlock(&priv->slaves_mtx);
-
- list_for_each_entry_safe(sdata, next, &priv->slaves, list) {
- mutex_lock(&sdata->hw->slaves_mtx);
- list_del(&sdata->list);
- mutex_unlock(&sdata->hw->slaves_mtx);
-
- unregister_netdevice(sdata->dev);
- }
-
- rtnl_unlock();
-
- wpan_phy_unregister(priv->phy);
-}
-EXPORT_SYMBOL(ieee802154_unregister_device);
-
-MODULE_DESCRIPTION("IEEE 802.15.4 implementation");
-MODULE_LICENSE("GPL v2");
diff --git a/net/mac802154/mac802154.h b/net/mac802154/ieee802154_i.h
index 762a6f849c6b..bebd70ffc7a3 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/ieee802154_i.h
@@ -10,29 +10,28 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
-#ifndef MAC802154_H
-#define MAC802154_H
+#ifndef __IEEE802154_I_H
+#define __IEEE802154_I_H
#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <net/cfg802154.h>
#include <net/mac802154.h>
+#include <net/nl802154.h>
#include <net/ieee802154_netdev.h>
#include "llsec.h"
/* mac802154 device private data */
-struct mac802154_priv {
- struct ieee802154_dev hw;
- struct ieee802154_ops *ops;
+struct ieee802154_local {
+ struct ieee802154_hw hw;
+ const struct ieee802154_ops *ops;
/* ieee802154 phy */
struct wpan_phy *phy;
@@ -46,23 +45,29 @@ struct mac802154_priv {
*
* So atomic readers can use any of this protection methods.
*/
- struct list_head slaves;
- struct mutex slaves_mtx;
+ struct list_head interfaces;
+ struct mutex iflist_mtx;
/* This one is used for scanning and other jobs not to be interfered
* with serial driver.
*/
- struct workqueue_struct *dev_workqueue;
+ struct workqueue_struct *workqueue;
- /* SoftMAC device is registered and running. One can add subinterfaces.
- * This flag should be modified under slaves_mtx and RTNL, so you can
- * read them using any of protection methods.
- */
- bool running;
+ struct hrtimer ifs_timer;
+
+ bool started;
+
+ struct tasklet_struct tasklet;
+ struct sk_buff_head skb_queue;
+};
+
+enum {
+ IEEE802154_RX_MSG = 1,
};
-#define MAC802154_DEVICE_STOPPED 0x00
-#define MAC802154_DEVICE_RUN 0x01
+enum ieee802154_sdata_state_bits {
+ SDATA_STATE_RUNNING,
+};
/* Slave interface definition.
*
@@ -70,72 +75,74 @@ struct mac802154_priv {
* Each ieee802154 device/transceiver may have several slaves and able
* to be associated with several networks at the same time.
*/
-struct mac802154_sub_if_data {
+struct ieee802154_sub_if_data {
struct list_head list; /* the ieee802154_priv->slaves list */
- struct mac802154_priv *hw;
+ struct wpan_dev wpan_dev;
+
+ struct ieee802154_local *local;
struct net_device *dev;
- int type;
- bool running;
+ unsigned long state;
+ char name[IFNAMSIZ];
spinlock_t mib_lock;
- __le16 pan_id;
- __le16 short_addr;
- __le64 extended_addr;
-
- u8 chan;
- u8 page;
-
- struct ieee802154_mac_params mac_params;
-
- /* MAC BSN field */
- u8 bsn;
- /* MAC DSN field */
- u8 dsn;
-
/* protects sec from concurrent access by netlink. access by
* encrypt/decrypt/header_create safe without additional protection.
*/
struct mutex sec_mtx;
struct mac802154_llsec sec;
+ /* must be last, dynamically sized area in this! */
+ struct ieee802154_vif vif;
};
-#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
-
#define MAC802154_CHAN_NONE 0xff /* No channel is assigned */
-extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
-extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
-
-int mac802154_slave_open(struct net_device *dev);
-int mac802154_slave_close(struct net_device *dev);
+/* utility functions/constants */
+extern const void *const mac802154_wpan_phy_privid; /* for wpan_phy privid */
+
+static inline struct ieee802154_local *
+hw_to_local(struct ieee802154_hw *hw)
+{
+ return container_of(hw, struct ieee802154_local, hw);
+}
+
+static inline struct ieee802154_sub_if_data *
+IEEE802154_DEV_TO_SUB_IF(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static inline struct ieee802154_sub_if_data *
+IEEE802154_WPAN_DEV_TO_SUB_IF(struct wpan_dev *wpan_dev)
+{
+ return container_of(wpan_dev, struct ieee802154_sub_if_data, wpan_dev);
+}
+
+static inline bool
+ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata)
+{
+ return test_bit(SDATA_STATE_RUNNING, &sdata->state);
+}
-void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
-void mac802154_monitor_setup(struct net_device *dev);
-
-void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb);
-void mac802154_wpan_setup(struct net_device *dev);
+extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
-netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
- u8 page, u8 chan);
+netdev_tx_t
+ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t
+ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
+enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer);
/* MIB callbacks */
void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val);
__le16 mac802154_dev_get_short_addr(const struct net_device *dev);
-void mac802154_dev_set_ieee_addr(struct net_device *dev);
__le16 mac802154_dev_get_pan_id(const struct net_device *dev);
void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val);
void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
u8 mac802154_dev_get_dsn(const struct net_device *dev);
-int mac802154_set_mac_params(struct net_device *dev,
- const struct ieee802154_mac_params *params);
-void mac802154_get_mac_params(struct net_device *dev,
- struct ieee802154_mac_params *params);
-
int mac802154_get_params(struct net_device *dev,
struct ieee802154_llsec_params *params);
int mac802154_set_params(struct net_device *dev,
@@ -169,4 +176,13 @@ void mac802154_get_table(struct net_device *dev,
struct ieee802154_llsec_table **t);
void mac802154_unlock_table(struct net_device *dev);
-#endif /* MAC802154_H */
+/* interface handling */
+int ieee802154_iface_init(void);
+void ieee802154_iface_exit(void);
+void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata);
+struct net_device *
+ieee802154_if_add(struct ieee802154_local *local, const char *name,
+ enum nl802154_iftype type, __le64 extended_addr);
+void ieee802154_remove_interfaces(struct ieee802154_local *local);
+
+#endif /* __IEEE802154_I_H */
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
new file mode 100644
index 000000000000..9ae893057dd7
--- /dev/null
+++ b/net/mac802154/iface.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/ieee802154.h>
+
+#include <net/nl802154.h>
+#include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/cfg802154.h>
+
+#include "ieee802154_i.h"
+#include "driver-ops.h"
+
+static int mac802154_wpan_update_llsec(struct net_device *dev)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ int rc = 0;
+
+ if (ops->llsec) {
+ struct ieee802154_llsec_params params;
+ int changed = 0;
+
+ params.pan_id = wpan_dev->pan_id;
+ changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+
+ params.hwaddr = wpan_dev->extended_addr;
+ changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+
+ rc = ops->llsec->set_params(dev, &params, changed);
+ }
+
+ return rc;
+}
+
+static int
+mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ struct sockaddr_ieee802154 *sa =
+ (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
+ int err = -ENOIOCTLCMD;
+
+ ASSERT_RTNL();
+
+ spin_lock_bh(&sdata->mib_lock);
+
+ switch (cmd) {
+ case SIOCGIFADDR:
+ {
+ u16 pan_id, short_addr;
+
+ pan_id = le16_to_cpu(wpan_dev->pan_id);
+ short_addr = le16_to_cpu(wpan_dev->short_addr);
+ if (pan_id == IEEE802154_PANID_BROADCAST ||
+ short_addr == IEEE802154_ADDR_BROADCAST) {
+ err = -EADDRNOTAVAIL;
+ break;
+ }
+
+ sa->family = AF_IEEE802154;
+ sa->addr.addr_type = IEEE802154_ADDR_SHORT;
+ sa->addr.pan_id = pan_id;
+ sa->addr.short_addr = short_addr;
+
+ err = 0;
+ break;
+ }
+ case SIOCSIFADDR:
+ if (netif_running(dev)) {
+ spin_unlock_bh(&sdata->mib_lock);
+ return -EBUSY;
+ }
+
+ dev_warn(&dev->dev,
+ "Using DEBUGing ioctl SIOCSIFADDR isn't recommended!\n");
+ if (sa->family != AF_IEEE802154 ||
+ sa->addr.addr_type != IEEE802154_ADDR_SHORT ||
+ sa->addr.pan_id == IEEE802154_PANID_BROADCAST ||
+ sa->addr.short_addr == IEEE802154_ADDR_BROADCAST ||
+ sa->addr.short_addr == IEEE802154_ADDR_UNDEF) {
+ err = -EINVAL;
+ break;
+ }
+
+ wpan_dev->pan_id = cpu_to_le16(sa->addr.pan_id);
+ wpan_dev->short_addr = cpu_to_le16(sa->addr.short_addr);
+
+ err = mac802154_wpan_update_llsec(dev);
+ break;
+ }
+
+ spin_unlock_bh(&sdata->mib_lock);
+ return err;
+}
+
+static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct sockaddr *addr = p;
+ __le64 extended_addr;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
+ if (!ieee802154_is_valid_extended_addr(extended_addr))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ sdata->wpan_dev.extended_addr = extended_addr;
+
+ return mac802154_wpan_update_llsec(dev);
+}
+
+static int mac802154_slave_open(struct net_device *dev)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct ieee802154_sub_if_data *subif;
+ struct ieee802154_local *local = sdata->local;
+ int res = 0;
+
+ ASSERT_RTNL();
+
+ if (sdata->vif.type == NL802154_IFTYPE_NODE) {
+ mutex_lock(&sdata->local->iflist_mtx);
+ list_for_each_entry(subif, &sdata->local->interfaces, list) {
+ if (subif != sdata &&
+ subif->vif.type == sdata->vif.type &&
+ ieee802154_sdata_running(subif)) {
+ mutex_unlock(&sdata->local->iflist_mtx);
+ return -EBUSY;
+ }
+ }
+ mutex_unlock(&sdata->local->iflist_mtx);
+ }
+
+ set_bit(SDATA_STATE_RUNNING, &sdata->state);
+
+ if (!local->open_count) {
+ res = drv_start(local);
+ WARN_ON(res);
+ if (res)
+ goto err;
+ }
+
+ local->open_count++;
+ netif_start_queue(dev);
+ return 0;
+err:
+ /* might already be clear but that doesn't matter */
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+
+ return res;
+}
+
+static int mac802154_wpan_open(struct net_device *dev)
+{
+ int rc;
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct ieee802154_local *local = sdata->local;
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ struct wpan_phy *phy = sdata->local->phy;
+
+ rc = mac802154_slave_open(dev);
+ if (rc < 0)
+ return rc;
+
+ mutex_lock(&phy->pib_lock);
+
+ if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
+ rc = drv_set_promiscuous_mode(local,
+ wpan_dev->promiscuous_mode);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_AFILT) {
+ rc = drv_set_pan_id(local, wpan_dev->pan_id);
+ if (rc < 0)
+ goto out;
+
+ rc = drv_set_extended_addr(local, wpan_dev->extended_addr);
+ if (rc < 0)
+ goto out;
+
+ rc = drv_set_short_addr(local, wpan_dev->short_addr);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_LBT) {
+ rc = drv_set_lbt_mode(local, wpan_dev->lbt);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) {
+ rc = drv_set_csma_params(local, wpan_dev->min_be,
+ wpan_dev->max_be,
+ wpan_dev->csma_retries);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) {
+ rc = drv_set_max_frame_retries(local, wpan_dev->frame_retries);
+ if (rc < 0)
+ goto out;
+ }
+
+ mutex_unlock(&phy->pib_lock);
+ return 0;
+
+out:
+ mutex_unlock(&phy->pib_lock);
+ return rc;
+}
+
+static int mac802154_slave_close(struct net_device *dev)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct ieee802154_local *local = sdata->local;
+
+ ASSERT_RTNL();
+
+ hrtimer_cancel(&local->ifs_timer);
+
+ netif_stop_queue(dev);
+ local->open_count--;
+
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+
+ if (!local->open_count)
+ drv_stop(local);
+
+ return 0;
+}
+
+static int mac802154_set_header_security(struct ieee802154_sub_if_data *sdata,
+ struct ieee802154_hdr *hdr,
+ const struct ieee802154_mac_cb *cb)
+{
+ struct ieee802154_llsec_params params;
+ u8 level;
+
+ mac802154_llsec_get_params(&sdata->sec, &params);
+
+ if (!params.enabled && cb->secen_override && cb->secen)
+ return -EINVAL;
+ if (!params.enabled ||
+ (cb->secen_override && !cb->secen) ||
+ !params.out_level)
+ return 0;
+ if (cb->seclevel_override && !cb->seclevel)
+ return -EINVAL;
+
+ level = cb->seclevel_override ? cb->seclevel : params.out_level;
+
+ hdr->fc.security_enabled = 1;
+ hdr->sec.level = level;
+ hdr->sec.key_id_mode = params.out_key.mode;
+ if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX)
+ hdr->sec.short_src = params.out_key.short_source;
+ else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX)
+ hdr->sec.extended_src = params.out_key.extended_source;
+ hdr->sec.key_id = params.out_key.id;
+
+ return 0;
+}
+
+static int mac802154_header_create(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type,
+ const void *daddr,
+ const void *saddr,
+ unsigned len)
+{
+ struct ieee802154_hdr hdr;
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ struct ieee802154_mac_cb *cb = mac_cb(skb);
+ int hlen;
+
+ if (!daddr)
+ return -EINVAL;
+
+ memset(&hdr.fc, 0, sizeof(hdr.fc));
+ hdr.fc.type = cb->type;
+ hdr.fc.security_enabled = cb->secen;
+ hdr.fc.ack_request = cb->ackreq;
+ hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+
+ if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
+ return -EINVAL;
+
+ if (!saddr) {
+ spin_lock_bh(&sdata->mib_lock);
+
+ if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
+ wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
+ wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
+ hdr.source.mode = IEEE802154_ADDR_LONG;
+ hdr.source.extended_addr = wpan_dev->extended_addr;
+ } else {
+ hdr.source.mode = IEEE802154_ADDR_SHORT;
+ hdr.source.short_addr = wpan_dev->short_addr;
+ }
+
+ hdr.source.pan_id = wpan_dev->pan_id;
+
+ spin_unlock_bh(&sdata->mib_lock);
+ } else {
+ hdr.source = *(const struct ieee802154_addr *)saddr;
+ }
+
+ hdr.dest = *(const struct ieee802154_addr *)daddr;
+
+ hlen = ieee802154_hdr_push(skb, &hdr);
+ if (hlen < 0)
+ return -EINVAL;
+
+ skb_reset_mac_header(skb);
+ skb->mac_len = hlen;
+
+ if (len > ieee802154_max_payload(&hdr))
+ return -EMSGSIZE;
+
+ return hlen;
+}
+
+static int
+mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ struct ieee802154_hdr hdr;
+ struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
+
+ if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
+ pr_debug("malformed packet\n");
+ return 0;
+ }
+
+ *addr = hdr.source;
+ return sizeof(*addr);
+}
+
+static struct header_ops mac802154_header_ops = {
+ .create = mac802154_header_create,
+ .parse = mac802154_header_parse,
+};
+
+static const struct net_device_ops mac802154_wpan_ops = {
+ .ndo_open = mac802154_wpan_open,
+ .ndo_stop = mac802154_slave_close,
+ .ndo_start_xmit = ieee802154_subif_start_xmit,
+ .ndo_do_ioctl = mac802154_wpan_ioctl,
+ .ndo_set_mac_address = mac802154_wpan_mac_addr,
+};
+
+static const struct net_device_ops mac802154_monitor_ops = {
+ .ndo_open = mac802154_wpan_open,
+ .ndo_stop = mac802154_slave_close,
+ .ndo_start_xmit = ieee802154_monitor_start_xmit,
+};
+
+static void mac802154_wpan_free(struct net_device *dev)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+ mac802154_llsec_destroy(&sdata->sec);
+
+ free_netdev(dev);
+}
+
+static void ieee802154_if_setup(struct net_device *dev)
+{
+ dev->addr_len = IEEE802154_EXTENDED_ADDR_LEN;
+ memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN);
+
+ dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
+ dev->needed_tailroom = 2 + 16; /* FCS + MIC */
+ dev->mtu = IEEE802154_MTU;
+ dev->tx_queue_len = 300;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+}
+
+static int
+ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
+ enum nl802154_iftype type)
+{
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+
+ /* set some type-dependent values */
+ sdata->vif.type = type;
+ sdata->wpan_dev.iftype = type;
+
+ get_random_bytes(&wpan_dev->bsn, 1);
+ get_random_bytes(&wpan_dev->dsn, 1);
+
+ /* defaults per 802.15.4-2011 */
+ wpan_dev->min_be = 3;
+ wpan_dev->max_be = 5;
+ wpan_dev->csma_retries = 4;
+ /* for compatibility, actual default is 3 */
+ wpan_dev->frame_retries = -1;
+
+ wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
+ wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+
+ switch (type) {
+ case NL802154_IFTYPE_NODE:
+ ieee802154_be64_to_le64(&wpan_dev->extended_addr,
+ sdata->dev->dev_addr);
+
+ sdata->dev->header_ops = &mac802154_header_ops;
+ sdata->dev->destructor = mac802154_wpan_free;
+ sdata->dev->netdev_ops = &mac802154_wpan_ops;
+ sdata->dev->ml_priv = &mac802154_mlme_wpan;
+ wpan_dev->promiscuous_mode = false;
+
+ spin_lock_init(&sdata->mib_lock);
+ mutex_init(&sdata->sec_mtx);
+
+ mac802154_llsec_init(&sdata->sec);
+ break;
+ case NL802154_IFTYPE_MONITOR:
+ sdata->dev->destructor = free_netdev;
+ sdata->dev->netdev_ops = &mac802154_monitor_ops;
+ wpan_dev->promiscuous_mode = true;
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+struct net_device *
+ieee802154_if_add(struct ieee802154_local *local, const char *name,
+ enum nl802154_iftype type, __le64 extended_addr)
+{
+ struct net_device *ndev = NULL;
+ struct ieee802154_sub_if_data *sdata = NULL;
+ int ret = -ENOMEM;
+
+ ASSERT_RTNL();
+
+ ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, name,
+ NET_NAME_UNKNOWN, ieee802154_if_setup);
+ if (!ndev)
+ return ERR_PTR(-ENOMEM);
+
+ ndev->needed_headroom = local->hw.extra_tx_headroom;
+
+ ret = dev_alloc_name(ndev, ndev->name);
+ if (ret < 0)
+ goto err;
+
+ ieee802154_le64_to_be64(ndev->perm_addr,
+ &local->hw.phy->perm_extended_addr);
+ switch (type) {
+ case NL802154_IFTYPE_NODE:
+ ndev->type = ARPHRD_IEEE802154;
+ if (ieee802154_is_valid_extended_addr(extended_addr))
+ ieee802154_le64_to_be64(ndev->dev_addr, &extended_addr);
+ else
+ memcpy(ndev->dev_addr, ndev->perm_addr,
+ IEEE802154_EXTENDED_ADDR_LEN);
+ break;
+ case NL802154_IFTYPE_MONITOR:
+ ndev->type = ARPHRD_IEEE802154_MONITOR;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* TODO check this */
+ SET_NETDEV_DEV(ndev, &local->phy->dev);
+ sdata = netdev_priv(ndev);
+ ndev->ieee802154_ptr = &sdata->wpan_dev;
+ memcpy(sdata->name, ndev->name, IFNAMSIZ);
+ sdata->dev = ndev;
+ sdata->wpan_dev.wpan_phy = local->hw.phy;
+ sdata->local = local;
+
+ /* setup type-dependent data */
+ ret = ieee802154_setup_sdata(sdata, type);
+ if (ret)
+ goto err;
+
+ ret = register_netdevice(ndev);
+ if (ret < 0)
+ goto err;
+
+ mutex_lock(&local->iflist_mtx);
+ list_add_tail_rcu(&sdata->list, &local->interfaces);
+ mutex_unlock(&local->iflist_mtx);
+
+ return ndev;
+
+err:
+ free_netdev(ndev);
+ return ERR_PTR(ret);
+}
+
+void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata)
+{
+ ASSERT_RTNL();
+
+ mutex_lock(&sdata->local->iflist_mtx);
+ list_del_rcu(&sdata->list);
+ mutex_unlock(&sdata->local->iflist_mtx);
+
+ synchronize_rcu();
+ unregister_netdevice(sdata->dev);
+}
+
+void ieee802154_remove_interfaces(struct ieee802154_local *local)
+{
+ struct ieee802154_sub_if_data *sdata, *tmp;
+
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
+ list_del(&sdata->list);
+
+ unregister_netdevice(sdata->dev);
+ }
+ mutex_unlock(&local->iflist_mtx);
+}
+
+static int netdev_notify(struct notifier_block *nb,
+ unsigned long state, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ieee802154_sub_if_data *sdata;
+
+ if (state != NETDEV_CHANGENAME)
+ return NOTIFY_DONE;
+
+ if (!dev->ieee802154_ptr || !dev->ieee802154_ptr->wpan_phy)
+ return NOTIFY_DONE;
+
+ if (dev->ieee802154_ptr->wpan_phy->privid != mac802154_wpan_phy_privid)
+ return NOTIFY_DONE;
+
+ sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ memcpy(sdata->name, dev->name, IFNAMSIZ);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mac802154_netdev_notifier = {
+ .notifier_call = netdev_notify,
+};
+
+int ieee802154_iface_init(void)
+{
+ return register_netdevice_notifier(&mac802154_netdev_notifier);
+}
+
+void ieee802154_iface_exit(void)
+{
+ unregister_netdevice_notifier(&mac802154_netdev_notifier);
+}
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 457058142098..dcf73958133a 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -17,10 +17,10 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/completion.h>
-#include <net/ieee802154.h>
+#include <linux/ieee802154.h>
#include <crypto/algapi.h>
-#include "mac802154.h"
+#include "ieee802154_i.h"
#include "llsec.h"
static void llsec_key_put(struct mac802154_llsec_key *key);
@@ -75,8 +75,6 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec)
}
}
-
-
int mac802154_llsec_get_params(struct mac802154_llsec *sec,
struct ieee802154_llsec_params *params)
{
@@ -117,8 +115,6 @@ int mac802154_llsec_set_params(struct mac802154_llsec *sec,
return 0;
}
-
-
static struct mac802154_llsec_key*
llsec_key_alloc(const struct ieee802154_llsec_key *template)
{
@@ -294,8 +290,6 @@ int mac802154_llsec_key_del(struct mac802154_llsec *sec,
return -ENOENT;
}
-
-
static bool llsec_dev_use_shortaddr(__le16 short_addr)
{
return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
@@ -304,12 +298,12 @@ static bool llsec_dev_use_shortaddr(__le16 short_addr)
static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
{
- return ((__force u16) short_addr) << 16 | (__force u16) pan_id;
+ return ((__force u16)short_addr) << 16 | (__force u16)pan_id;
}
static u64 llsec_dev_hash_long(__le64 hwaddr)
{
- return (__force u64) hwaddr;
+ return (__force u64)hwaddr;
}
static struct mac802154_llsec_device*
@@ -411,8 +405,6 @@ int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
return 0;
}
-
-
static struct mac802154_llsec_device_key*
llsec_devkey_find(struct mac802154_llsec_device *dev,
const struct ieee802154_llsec_key_id *key)
@@ -475,8 +467,6 @@ int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
return 0;
}
-
-
static struct mac802154_llsec_seclevel*
llsec_find_seclevel(const struct mac802154_llsec *sec,
const struct ieee802154_llsec_seclevel *sl)
@@ -532,8 +522,6 @@ int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
return 0;
}
-
-
static int llsec_recover_addr(struct mac802154_llsec *sec,
struct ieee802154_addr *addr)
{
@@ -609,7 +597,6 @@ found:
return llsec_key_get(key);
}
-
static void llsec_geniv(u8 iv[16], __le64 addr,
const struct ieee802154_sechdr *sec)
{
@@ -786,8 +773,6 @@ fail:
return rc;
}
-
-
static struct mac802154_llsec_device*
llsec_lookup_dev(struct mac802154_llsec *sec,
const struct ieee802154_addr *addr)
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index bf809131eef7..6aacb1816889 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
@@ -24,14 +20,14 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
+#include <linux/ieee802154.h>
-#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
-#include <net/wpan-phy.h>
+#include <net/cfg802154.h>
#include <net/mac802154.h>
-#include <net/nl802154.h>
-#include "mac802154.h"
+#include "ieee802154_i.h"
+#include "driver-ops.h"
static int mac802154_mlme_start_req(struct net_device *dev,
struct ieee802154_addr *addr,
@@ -43,11 +39,12 @@ static int mac802154_mlme_start_req(struct net_device *dev,
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
int rc = 0;
+ ASSERT_RTNL();
+
BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
mac802154_dev_set_pan_id(dev, addr->pan_id);
mac802154_dev_set_short_addr(dev, addr->short_addr);
- mac802154_dev_set_ieee_addr(dev);
mac802154_dev_set_page_channel(dev, page, channel);
if (ops->llsec) {
@@ -69,21 +66,71 @@ static int mac802154_mlme_start_req(struct net_device *dev,
rc = ops->llsec->set_params(dev, &params, changed);
}
- /* FIXME: add validation for unused parameters to be sane
- * for SoftMAC
- */
- ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
-
return rc;
}
-static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
+static int mac802154_set_mac_params(struct net_device *dev,
+ const struct ieee802154_mac_params *params)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct ieee802154_local *local = sdata->local;
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ int ret;
+
+ ASSERT_RTNL();
+
+ /* PHY */
+ wpan_dev->wpan_phy->transmit_power = params->transmit_power;
+ wpan_dev->wpan_phy->cca_mode = params->cca_mode;
+ wpan_dev->wpan_phy->cca_ed_level = params->cca_ed_level;
+
+ /* MAC */
+ wpan_dev->min_be = params->min_be;
+ wpan_dev->max_be = params->max_be;
+ wpan_dev->csma_retries = params->csma_retries;
+ wpan_dev->frame_retries = params->frame_retries;
+ wpan_dev->lbt = params->lbt;
+
+ if (local->hw.flags & IEEE802154_HW_TXPOWER) {
+ ret = drv_set_tx_power(local, params->transmit_power);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (local->hw.flags & IEEE802154_HW_CCA_MODE) {
+ ret = drv_set_cca_mode(local, params->cca_mode);
+ if (ret < 0)
+ return ret;
+ }
- BUG_ON(dev->type != ARPHRD_IEEE802154);
+ if (local->hw.flags & IEEE802154_HW_CCA_ED_LEVEL) {
+ ret = drv_set_cca_ed_level(local, params->cca_ed_level);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
- return to_phy(get_device(&priv->hw->phy->dev));
+static void mac802154_get_mac_params(struct net_device *dev,
+ struct ieee802154_mac_params *params)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+
+ ASSERT_RTNL();
+
+ /* PHY */
+ params->transmit_power = wpan_dev->wpan_phy->transmit_power;
+ params->cca_mode = wpan_dev->wpan_phy->cca_mode;
+ params->cca_ed_level = wpan_dev->wpan_phy->cca_ed_level;
+
+ /* MAC */
+ params->min_be = wpan_dev->min_be;
+ params->max_be = wpan_dev->max_be;
+ params->csma_retries = wpan_dev->csma_retries;
+ params->frame_retries = wpan_dev->frame_retries;
+ params->lbt = wpan_dev->lbt;
}
static struct ieee802154_llsec_ops mac802154_llsec_ops = {
@@ -102,12 +149,7 @@ static struct ieee802154_llsec_ops mac802154_llsec_ops = {
.unlock_table = mac802154_unlock_table,
};
-struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
- .get_phy = mac802154_get_phy,
-};
-
struct ieee802154_mlme_ops mac802154_mlme_wpan = {
- .get_phy = mac802154_get_phy,
.start_req = mac802154_mlme_start_req,
.get_pan_id = mac802154_dev_get_pan_id,
.get_short_addr = mac802154_dev_get_short_addr,
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
new file mode 100644
index 000000000000..8500378c8318
--- /dev/null
+++ b/net/mac802154/main.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * Written by:
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <net/netlink.h>
+#include <net/nl802154.h>
+#include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/route.h>
+#include <net/cfg802154.h>
+
+#include "ieee802154_i.h"
+#include "cfg.h"
+
+static void ieee802154_tasklet_handler(unsigned long data)
+{
+ struct ieee802154_local *local = (struct ieee802154_local *)data;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&local->skb_queue))) {
+ switch (skb->pkt_type) {
+ case IEEE802154_RX_MSG:
+ /* Clear skb->pkt_type in order to not confuse kernel
+ * netstack.
+ */
+ skb->pkt_type = 0;
+ ieee802154_rx(&local->hw, skb);
+ break;
+ default:
+ WARN(1, "mac802154: Packet is of unknown type %d\n",
+ skb->pkt_type);
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+struct ieee802154_hw *
+ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
+{
+ struct wpan_phy *phy;
+ struct ieee802154_local *local;
+ size_t priv_size;
+
+ if (!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
+ !ops->start || !ops->stop || !ops->set_channel) {
+ pr_err("undefined IEEE802.15.4 device operations\n");
+ return NULL;
+ }
+
+ /* Ensure 32-byte alignment of our private data and hw private data.
+ * We use the wpan_phy priv data for both our ieee802154_local and for
+ * the driver's private data
+ *
+ * in memory it'll be like this:
+ *
+ * +-------------------------+
+ * | struct wpan_phy |
+ * +-------------------------+
+ * | struct ieee802154_local |
+ * +-------------------------+
+ * | driver's private data |
+ * +-------------------------+
+ *
+ * Due to ieee802154 layer isn't aware of driver and MAC structures,
+ * so lets align them here.
+ */
+
+ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;
+
+ phy = wpan_phy_new(&mac802154_config_ops, priv_size);
+ if (!phy) {
+ pr_err("failure to allocate master IEEE802.15.4 device\n");
+ return NULL;
+ }
+
+ phy->privid = mac802154_wpan_phy_privid;
+
+ local = wpan_phy_priv(phy);
+ local->phy = phy;
+ local->hw.phy = local->phy;
+ local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
+ local->ops = ops;
+
+ INIT_LIST_HEAD(&local->interfaces);
+ mutex_init(&local->iflist_mtx);
+
+ tasklet_init(&local->tasklet,
+ ieee802154_tasklet_handler,
+ (unsigned long)local);
+
+ skb_queue_head_init(&local->skb_queue);
+
+ return &local->hw;
+}
+EXPORT_SYMBOL(ieee802154_alloc_hw);
+
+void ieee802154_free_hw(struct ieee802154_hw *hw)
+{
+ struct ieee802154_local *local = hw_to_local(hw);
+
+ BUG_ON(!list_empty(&local->interfaces));
+
+ mutex_destroy(&local->iflist_mtx);
+
+ wpan_phy_free(local->phy);
+}
+EXPORT_SYMBOL(ieee802154_free_hw);
+
+static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
+{
+ /* TODO warn on empty symbol_duration
+ * Should be done when all drivers sets this value.
+ */
+
+ wpan_phy->lifs_period = IEEE802154_LIFS_PERIOD *
+ wpan_phy->symbol_duration;
+ wpan_phy->sifs_period = IEEE802154_SIFS_PERIOD *
+ wpan_phy->symbol_duration;
+}
+
+int ieee802154_register_hw(struct ieee802154_hw *hw)
+{
+ struct ieee802154_local *local = hw_to_local(hw);
+ struct net_device *dev;
+ int rc = -ENOSYS;
+
+ local->workqueue =
+ create_singlethread_workqueue(wpan_phy_name(local->phy));
+ if (!local->workqueue) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ hrtimer_init(&local->ifs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ local->ifs_timer.function = ieee802154_xmit_ifs_timer;
+
+ wpan_phy_set_dev(local->phy, local->hw.parent);
+
+ ieee802154_setup_wpan_phy_pib(local->phy);
+
+ rc = wpan_phy_register(local->phy);
+ if (rc < 0)
+ goto out_wq;
+
+ rtnl_lock();
+
+ dev = ieee802154_if_add(local, "wpan%d", NL802154_IFTYPE_NODE,
+ cpu_to_le64(0x0000000000000000ULL));
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
+ rc = PTR_ERR(dev);
+ goto out_wq;
+ }
+
+ rtnl_unlock();
+
+ return 0;
+
+out_wq:
+ destroy_workqueue(local->workqueue);
+out:
+ return rc;
+}
+EXPORT_SYMBOL(ieee802154_register_hw);
+
+void ieee802154_unregister_hw(struct ieee802154_hw *hw)
+{
+ struct ieee802154_local *local = hw_to_local(hw);
+
+ tasklet_kill(&local->tasklet);
+ flush_workqueue(local->workqueue);
+ destroy_workqueue(local->workqueue);
+
+ rtnl_lock();
+
+ ieee802154_remove_interfaces(local);
+
+ rtnl_unlock();
+
+ wpan_phy_unregister(local->phy);
+}
+EXPORT_SYMBOL(ieee802154_unregister_hw);
+
+static int __init ieee802154_init(void)
+{
+ return ieee802154_iface_init();
+}
+
+static void __exit ieee802154_exit(void)
+{
+ ieee802154_iface_exit();
+
+ rcu_barrier();
+}
+
+subsys_initcall(ieee802154_init);
+module_exit(ieee802154_exit);
+
+MODULE_DESCRIPTION("IEEE 802.15.4 subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index 868a040fd422..5cf019a57fd7 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Sergey Lapin <slapin@ossfans.org>
@@ -25,208 +21,100 @@
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
-#include <net/wpan-phy.h>
-
-#include "mac802154.h"
-
-struct phy_chan_notify_work {
- struct work_struct work;
- struct net_device *dev;
-};
-
-struct hw_addr_filt_notify_work {
- struct work_struct work;
- struct net_device *dev;
- unsigned long changed;
-};
-
-static struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
-
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- return priv->hw;
-}
-
-static void hw_addr_notify(struct work_struct *work)
-{
- struct hw_addr_filt_notify_work *nw = container_of(work,
- struct hw_addr_filt_notify_work, work);
- struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
- int res;
+#include <net/cfg802154.h>
- res = hw->ops->set_hw_addr_filt(&hw->hw,
- &hw->hw.hw_filt,
- nw->changed);
- if (res)
- pr_debug("failed changed mask %lx\n", nw->changed);
-
- kfree(nw);
-}
-
-static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct hw_addr_filt_notify_work *work;
-
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return;
-
- INIT_WORK(&work->work, hw_addr_notify);
- work->dev = dev;
- work->changed = changed;
- queue_work(priv->hw->dev_workqueue, &work->work);
-}
+#include "ieee802154_i.h"
+#include "driver-ops.h"
void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
- spin_lock_bh(&priv->mib_lock);
- priv->short_addr = val;
- spin_unlock_bh(&priv->mib_lock);
-
- if ((priv->hw->ops->set_hw_addr_filt) &&
- (priv->hw->hw.hw_filt.short_addr != priv->short_addr)) {
- priv->hw->hw.hw_filt.short_addr = priv->short_addr;
- set_hw_addr_filt(dev, IEEE802515_AFILT_SADDR_CHANGED);
- }
+ spin_lock_bh(&sdata->mib_lock);
+ sdata->wpan_dev.short_addr = val;
+ spin_unlock_bh(&sdata->mib_lock);
}
__le16 mac802154_dev_get_short_addr(const struct net_device *dev)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
__le16 ret;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- spin_lock_bh(&priv->mib_lock);
- ret = priv->short_addr;
- spin_unlock_bh(&priv->mib_lock);
+ spin_lock_bh(&sdata->mib_lock);
+ ret = sdata->wpan_dev.short_addr;
+ spin_unlock_bh(&sdata->mib_lock);
return ret;
}
-void mac802154_dev_set_ieee_addr(struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct mac802154_priv *mac = priv->hw;
-
- priv->extended_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
-
- if (mac->ops->set_hw_addr_filt &&
- mac->hw.hw_filt.ieee_addr != priv->extended_addr) {
- mac->hw.hw_filt.ieee_addr = priv->extended_addr;
- set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
- }
-}
-
__le16 mac802154_dev_get_pan_id(const struct net_device *dev)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
__le16 ret;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- spin_lock_bh(&priv->mib_lock);
- ret = priv->pan_id;
- spin_unlock_bh(&priv->mib_lock);
+ spin_lock_bh(&sdata->mib_lock);
+ ret = sdata->wpan_dev.pan_id;
+ spin_unlock_bh(&sdata->mib_lock);
return ret;
}
void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
- spin_lock_bh(&priv->mib_lock);
- priv->pan_id = val;
- spin_unlock_bh(&priv->mib_lock);
-
- if ((priv->hw->ops->set_hw_addr_filt) &&
- (priv->hw->hw.hw_filt.pan_id != priv->pan_id)) {
- priv->hw->hw.hw_filt.pan_id = priv->pan_id;
- set_hw_addr_filt(dev, IEEE802515_AFILT_PANID_CHANGED);
- }
+ spin_lock_bh(&sdata->mib_lock);
+ sdata->wpan_dev.pan_id = val;
+ spin_unlock_bh(&sdata->mib_lock);
}
u8 mac802154_dev_get_dsn(const struct net_device *dev)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
- return priv->dsn++;
-}
-
-static void phy_chan_notify(struct work_struct *work)
-{
- struct phy_chan_notify_work *nw = container_of(work,
- struct phy_chan_notify_work, work);
- struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
- struct mac802154_sub_if_data *priv = netdev_priv(nw->dev);
- int res;
-
- mutex_lock(&priv->hw->phy->pib_lock);
- res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan);
- if (res) {
- pr_debug("set_channel failed\n");
- } else {
- priv->hw->phy->current_channel = priv->chan;
- priv->hw->phy->current_page = priv->page;
- }
- mutex_unlock(&priv->hw->phy->pib_lock);
-
- kfree(nw);
+ return sdata->wpan_dev.dsn++;
}
void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct phy_chan_notify_work *work;
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ struct ieee802154_local *local = sdata->local;
+ int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- spin_lock_bh(&priv->mib_lock);
- priv->page = page;
- priv->chan = chan;
- spin_unlock_bh(&priv->mib_lock);
-
- mutex_lock(&priv->hw->phy->pib_lock);
- if (priv->hw->phy->current_channel != priv->chan ||
- priv->hw->phy->current_page != priv->page) {
- mutex_unlock(&priv->hw->phy->pib_lock);
-
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return;
-
- INIT_WORK(&work->work, phy_chan_notify);
- work->dev = dev;
- queue_work(priv->hw->dev_workqueue, &work->work);
+ res = drv_set_channel(local, page, chan);
+ if (res) {
+ pr_debug("set_channel failed\n");
} else {
- mutex_unlock(&priv->hw->phy->pib_lock);
+ mutex_lock(&local->phy->pib_lock);
+ local->phy->current_channel = chan;
+ local->phy->current_page = page;
+ mutex_unlock(&local->phy->pib_lock);
}
}
-
int mac802154_get_params(struct net_device *dev,
struct ieee802154_llsec_params *params)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_get_params(&priv->sec, params);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_get_params(&sdata->sec, params);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
@@ -235,31 +123,30 @@ int mac802154_set_params(struct net_device *dev,
const struct ieee802154_llsec_params *params,
int changed)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_set_params(&priv->sec, params, changed);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_set_params(&sdata->sec, params, changed);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
-
int mac802154_add_key(struct net_device *dev,
const struct ieee802154_llsec_key_id *id,
const struct ieee802154_llsec_key *key)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_key_add(&priv->sec, id, key);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_key_add(&sdata->sec, id, key);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
@@ -267,61 +154,59 @@ int mac802154_add_key(struct net_device *dev,
int mac802154_del_key(struct net_device *dev,
const struct ieee802154_llsec_key_id *id)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_key_del(&priv->sec, id);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_key_del(&sdata->sec, id);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
-
int mac802154_add_dev(struct net_device *dev,
const struct ieee802154_llsec_device *llsec_dev)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_dev_add(&priv->sec, llsec_dev);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_dev_add(&sdata->sec, llsec_dev);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
int mac802154_del_dev(struct net_device *dev, __le64 dev_addr)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_dev_del(&priv->sec, dev_addr);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_dev_del(&sdata->sec, dev_addr);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
-
int mac802154_add_devkey(struct net_device *dev,
__le64 device_addr,
const struct ieee802154_llsec_device_key *key)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_devkey_add(&priv->sec, device_addr, key);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_devkey_add(&sdata->sec, device_addr, key);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
@@ -330,30 +215,29 @@ int mac802154_del_devkey(struct net_device *dev,
__le64 device_addr,
const struct ieee802154_llsec_device_key *key)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_devkey_del(&priv->sec, device_addr, key);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_devkey_del(&sdata->sec, device_addr, key);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
-
int mac802154_add_seclevel(struct net_device *dev,
const struct ieee802154_llsec_seclevel *sl)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_seclevel_add(&priv->sec, sl);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_seclevel_add(&sdata->sec, sl);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
@@ -361,43 +245,42 @@ int mac802154_add_seclevel(struct net_device *dev,
int mac802154_del_seclevel(struct net_device *dev,
const struct ieee802154_llsec_seclevel *sl)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
int res;
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
- res = mac802154_llsec_seclevel_del(&priv->sec, sl);
- mutex_unlock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
+ res = mac802154_llsec_seclevel_del(&sdata->sec, sl);
+ mutex_unlock(&sdata->sec_mtx);
return res;
}
-
void mac802154_lock_table(struct net_device *dev)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_lock(&priv->sec_mtx);
+ mutex_lock(&sdata->sec_mtx);
}
void mac802154_get_table(struct net_device *dev,
struct ieee802154_llsec_table **t)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
- *t = &priv->sec.table;
+ *t = &sdata->sec.table;
}
void mac802154_unlock_table(struct net_device *dev)
{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
BUG_ON(dev->type != ARPHRD_IEEE802154);
- mutex_unlock(&priv->sec_mtx);
+ mutex_unlock(&sdata->sec_mtx);
}
diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c
deleted file mode 100644
index a68230e2b25f..000000000000
--- a/net/mac802154/monitor.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2007, 2008, 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- * Sergey Lapin <slapin@ossfans.org>
- * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
- * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- */
-
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/crc-ccitt.h>
-
-#include <net/ieee802154.h>
-#include <net/mac802154.h>
-#include <net/netlink.h>
-#include <net/wpan-phy.h>
-#include <linux/nl802154.h>
-
-#include "mac802154.h"
-
-static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv;
- u8 chan, page;
-
- priv = netdev_priv(dev);
-
- /* FIXME: locking */
- chan = priv->hw->phy->current_channel;
- page = priv->hw->phy->current_page;
-
- if (chan == MAC802154_CHAN_NONE) /* not initialized */
- return NETDEV_TX_OK;
-
- if (WARN_ON(page >= WPAN_NUM_PAGES) ||
- WARN_ON(chan >= WPAN_NUM_CHANNELS))
- return NETDEV_TX_OK;
-
- skb->skb_iif = dev->ifindex;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- return mac802154_tx(priv->hw, skb, page, chan);
-}
-
-
-void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb)
-{
- struct sk_buff *skb2;
- struct mac802154_sub_if_data *sdata;
- u16 crc = crc_ccitt(0, skb->data, skb->len);
- u8 *data;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &priv->slaves, list) {
- if (sdata->type != IEEE802154_DEV_MONITOR ||
- !netif_running(sdata->dev))
- continue;
-
- skb2 = skb_clone(skb, GFP_ATOMIC);
- skb2->dev = sdata->dev;
- skb2->pkt_type = PACKET_HOST;
- data = skb_put(skb2, 2);
- data[0] = crc & 0xff;
- data[1] = crc >> 8;
-
- netif_rx_ni(skb2);
- }
- rcu_read_unlock();
-}
-
-static const struct net_device_ops mac802154_monitor_ops = {
- .ndo_open = mac802154_slave_open,
- .ndo_stop = mac802154_slave_close,
- .ndo_start_xmit = mac802154_monitor_xmit,
-};
-
-void mac802154_monitor_setup(struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv;
-
- dev->addr_len = 0;
- dev->hard_header_len = 0;
- dev->needed_tailroom = 2; /* room for FCS */
- dev->mtu = IEEE802154_MTU;
- dev->tx_queue_len = 10;
- dev->type = ARPHRD_IEEE802154_MONITOR;
- dev->flags = IFF_NOARP | IFF_BROADCAST;
- dev->watchdog_timeo = 0;
-
- dev->destructor = free_netdev;
- dev->netdev_ops = &mac802154_monitor_ops;
- dev->ml_priv = &mac802154_mlme_reduced;
-
- priv = netdev_priv(dev);
- priv->type = IEEE802154_DEV_MONITOR;
-
- priv->chan = MAC802154_CHAN_NONE; /* not initialized */
- priv->page = 0;
-}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index a14cf9ede171..c0d67b2b4132 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
@@ -23,92 +19,284 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/crc-ccitt.h>
+#include <asm/unaligned.h>
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
+#include <net/nl802154.h>
-#include "mac802154.h"
+#include "ieee802154_i.h"
-/* The IEEE 802.15.4 standard defines 4 MAC packet types:
- * - beacon frame
- * - MAC command frame
- * - acknowledgement frame
- * - data frame
- *
- * and only the data frame should be pushed to the upper layers, other types
- * are just internal MAC layer management information. So only data packets
- * are going to be sent to the networking queue, all other will be processed
- * right here by using the device workqueue.
- */
-struct rx_work {
- struct sk_buff *skb;
- struct work_struct work;
- struct ieee802154_dev *dev;
- u8 lqi;
-};
+static int ieee802154_deliver_skb(struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->protocol = htons(ETH_P_IEEE802154);
-static void
-mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
+ return netif_receive_skb(skb);
+}
+
+static int
+ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb, const struct ieee802154_hdr *hdr)
{
- struct mac802154_priv *priv = mac802154_to_priv(hw);
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ __le16 span, sshort;
+ int rc;
- mac_cb(skb)->lqi = lqi;
- skb->protocol = htons(ETH_P_IEEE802154);
- skb_reset_mac_header(skb);
+ pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
- if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
- u16 crc;
+ spin_lock_bh(&sdata->mib_lock);
- if (skb->len < 2) {
- pr_debug("got invalid frame\n");
- goto fail;
- }
- crc = crc_ccitt(0, skb->data, skb->len);
- if (crc) {
- pr_debug("CRC mismatch\n");
- goto fail;
- }
- skb_trim(skb, skb->len - 2); /* CRC */
+ span = wpan_dev->pan_id;
+ sshort = wpan_dev->short_addr;
+
+ switch (mac_cb(skb)->dest.mode) {
+ case IEEE802154_ADDR_NONE:
+ if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
+ /* FIXME: check if we are PAN coordinator */
+ skb->pkt_type = PACKET_OTHERHOST;
+ else
+ /* ACK comes with both addresses empty */
+ skb->pkt_type = PACKET_HOST;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (mac_cb(skb)->dest.pan_id != span &&
+ mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
+ skb->pkt_type = PACKET_OTHERHOST;
+ else if (mac_cb(skb)->dest.extended_addr == wpan_dev->extended_addr)
+ skb->pkt_type = PACKET_HOST;
+ else
+ skb->pkt_type = PACKET_OTHERHOST;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (mac_cb(skb)->dest.pan_id != span &&
+ mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
+ skb->pkt_type = PACKET_OTHERHOST;
+ else if (mac_cb(skb)->dest.short_addr == sshort)
+ skb->pkt_type = PACKET_HOST;
+ else if (mac_cb(skb)->dest.short_addr ==
+ cpu_to_le16(IEEE802154_ADDR_BROADCAST))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_OTHERHOST;
+ break;
+ default:
+ spin_unlock_bh(&sdata->mib_lock);
+ pr_debug("invalid dest mode\n");
+ goto fail;
}
- mac802154_monitors_rx(priv, skb);
- mac802154_wpans_rx(priv, skb);
+ spin_unlock_bh(&sdata->mib_lock);
- return;
+ skb->dev = sdata->dev;
+
+ rc = mac802154_llsec_decrypt(&sdata->sec, skb);
+ if (rc) {
+ pr_debug("decryption failed: %i\n", rc);
+ goto fail;
+ }
+
+ sdata->dev->stats.rx_packets++;
+ sdata->dev->stats.rx_bytes += skb->len;
+
+ switch (mac_cb(skb)->type) {
+ case IEEE802154_FC_TYPE_DATA:
+ return ieee802154_deliver_skb(skb);
+ default:
+ pr_warn("ieee802154: bad frame received (type = %d)\n",
+ mac_cb(skb)->type);
+ goto fail;
+ }
fail:
kfree_skb(skb);
+ return NET_RX_DROP;
}
-static void mac802154_rx_worker(struct work_struct *work)
+static void
+ieee802154_print_addr(const char *name, const struct ieee802154_addr *addr)
{
- struct rx_work *rw = container_of(work, struct rx_work, work);
+ if (addr->mode == IEEE802154_ADDR_NONE)
+ pr_debug("%s not present\n", name);
+
+ pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id));
+ if (addr->mode == IEEE802154_ADDR_SHORT) {
+ pr_debug("%s is short: %04x\n", name,
+ le16_to_cpu(addr->short_addr));
+ } else {
+ u64 hw = swab64((__force u64)addr->extended_addr);
- mac802154_subif_rx(rw->dev, rw->skb, rw->lqi);
- kfree(rw);
+ pr_debug("%s is hardware: %8phC\n", name, &hw);
+ }
}
-void
-ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi)
+static int
+ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
- struct mac802154_priv *priv = mac802154_to_priv(dev);
- struct rx_work *work;
+ int hlen;
+ struct ieee802154_mac_cb *cb = mac_cb_init(skb);
- if (!skb)
- return;
+ skb_reset_mac_header(skb);
+
+ hlen = ieee802154_hdr_pull(skb, hdr);
+ if (hlen < 0)
+ return -EINVAL;
+
+ skb->mac_len = hlen;
+
+ pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc),
+ hdr->seq);
+
+ cb->type = hdr->fc.type;
+ cb->ackreq = hdr->fc.ack_request;
+ cb->secen = hdr->fc.security_enabled;
+
+ ieee802154_print_addr("destination", &hdr->dest);
+ ieee802154_print_addr("source", &hdr->source);
+
+ cb->source = hdr->source;
+ cb->dest = hdr->dest;
+
+ if (hdr->fc.security_enabled) {
+ u64 key;
+
+ pr_debug("seclevel %i\n", hdr->sec.level);
+
+ switch (hdr->sec.key_id_mode) {
+ case IEEE802154_SCF_KEY_IMPLICIT:
+ pr_debug("implicit key\n");
+ break;
+
+ case IEEE802154_SCF_KEY_INDEX:
+ pr_debug("key %02x\n", hdr->sec.key_id);
+ break;
+
+ case IEEE802154_SCF_KEY_SHORT_INDEX:
+ pr_debug("key %04x:%04x %02x\n",
+ le32_to_cpu(hdr->sec.short_src) >> 16,
+ le32_to_cpu(hdr->sec.short_src) & 0xffff,
+ hdr->sec.key_id);
+ break;
+
+ case IEEE802154_SCF_KEY_HW_INDEX:
+ key = swab64((__force u64)hdr->sec.extended_src);
+ pr_debug("key source %8phC %02x\n", &key,
+ hdr->sec.key_id);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void
+__ieee802154_rx_handle_packet(struct ieee802154_local *local,
+ struct sk_buff *skb)
+{
+ int ret;
+ struct ieee802154_sub_if_data *sdata;
+ struct ieee802154_hdr hdr;
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
+ ret = ieee802154_parse_frame_start(skb, &hdr);
+ if (ret) {
+ pr_debug("got invalid frame\n");
+ kfree_skb(skb);
return;
+ }
+
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (sdata->vif.type != NL802154_IFTYPE_NODE ||
+ !netif_running(sdata->dev))
+ continue;
+
+ ieee802154_subif_frame(sdata, skb, &hdr);
+ skb = NULL;
+ break;
+ }
+
+ if (skb)
+ kfree_skb(skb);
+}
+
+static void
+ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb)
+{
+ struct sk_buff *skb2;
+ struct ieee802154_sub_if_data *sdata;
+
+ skb_reset_mac_header(skb);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_IEEE802154);
+
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (sdata->vif.type != NL802154_IFTYPE_MONITOR)
+ continue;
+
+ if (!ieee802154_sdata_running(sdata))
+ continue;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ skb2->dev = sdata->dev;
+ ieee802154_deliver_skb(skb2);
+
+ sdata->dev->stats.rx_packets++;
+ sdata->dev->stats.rx_bytes += skb->len;
+ }
+ }
+}
+
+void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ struct ieee802154_local *local = hw_to_local(hw);
+ u16 crc;
- INIT_WORK(&work->work, mac802154_rx_worker);
- work->skb = skb;
- work->dev = dev;
- work->lqi = lqi;
+ WARN_ON_ONCE(softirq_count() == 0);
- queue_work(priv->dev_workqueue, &work->work);
+ /* TODO: When a transceiver omits the checksum here, we
+ * add an own calculated one. This is currently an ugly
+ * solution because the monitor needs a crc here.
+ */
+ if (local->hw.flags & IEEE802154_HW_RX_OMIT_CKSUM) {
+ crc = crc_ccitt(0, skb->data, skb->len);
+ put_unaligned_le16(crc, skb_put(skb, 2));
+ }
+
+ rcu_read_lock();
+
+ ieee802154_monitors_rx(local, skb);
+
+ /* Check if transceiver doesn't validate the checksum.
+ * If not we validate the checksum here.
+ */
+ if (local->hw.flags & IEEE802154_HW_RX_DROP_BAD_CKSUM) {
+ crc = crc_ccitt(0, skb->data, skb->len);
+ if (crc) {
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return;
+ }
+ }
+ /* remove crc */
+ skb_trim(skb, skb->len - 2);
+
+ __ieee802154_rx_handle_packet(local, skb);
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee802154_rx);
+
+void
+ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
+{
+ struct ieee802154_local *local = hw_to_local(hw);
+
+ mac_cb(skb)->lqi = lqi;
+ skb->pkt_type = IEEE802154_RX_MSG;
+ skb_queue_tail(&local->skb_queue, skb);
+ tasklet_schedule(&local->tasklet);
}
EXPORT_SYMBOL(ieee802154_rx_irqsafe);
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index fdf4c0e67259..c62e95695c78 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Sergey Lapin <slapin@ossfans.org>
@@ -24,106 +20,98 @@
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/crc-ccitt.h>
+#include <asm/unaligned.h>
+#include <net/rtnetlink.h>
#include <net/ieee802154_netdev.h>
#include <net/mac802154.h>
-#include <net/wpan-phy.h>
+#include <net/cfg802154.h>
-#include "mac802154.h"
+#include "ieee802154_i.h"
+#include "driver-ops.h"
/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
* packets through the workqueue.
*/
-struct xmit_work {
+struct ieee802154_xmit_cb {
struct sk_buff *skb;
struct work_struct work;
- struct mac802154_priv *priv;
- u8 chan;
- u8 page;
+ struct ieee802154_local *local;
};
-static void mac802154_xmit_worker(struct work_struct *work)
+static struct ieee802154_xmit_cb ieee802154_xmit_cb;
+
+static void ieee802154_xmit_worker(struct work_struct *work)
{
- struct xmit_work *xw = container_of(work, struct xmit_work, work);
- struct mac802154_sub_if_data *sdata;
+ struct ieee802154_xmit_cb *cb =
+ container_of(work, struct ieee802154_xmit_cb, work);
+ struct ieee802154_local *local = cb->local;
+ struct sk_buff *skb = cb->skb;
+ struct net_device *dev = skb->dev;
int res;
- mutex_lock(&xw->priv->phy->pib_lock);
- if (xw->priv->phy->current_channel != xw->chan ||
- xw->priv->phy->current_page != xw->page) {
- res = xw->priv->ops->set_channel(&xw->priv->hw,
- xw->page,
- xw->chan);
- if (res) {
- pr_debug("set_channel failed\n");
- goto out;
- }
+ rtnl_lock();
- xw->priv->phy->current_channel = xw->chan;
- xw->priv->phy->current_page = xw->page;
- }
+ /* check if ifdown occurred while schedule */
+ if (!netif_running(dev))
+ goto err_tx;
- res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb);
+ res = drv_xmit_sync(local, skb);
if (res)
- pr_debug("transmission failed\n");
+ goto err_tx;
-out:
- mutex_unlock(&xw->priv->phy->pib_lock);
+ ieee802154_xmit_complete(&local->hw, skb, false);
- /* Restart the netif queue on each sub_if_data object. */
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &xw->priv->slaves, list)
- netif_wake_queue(sdata->dev);
- rcu_read_unlock();
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
- dev_kfree_skb(xw->skb);
+ rtnl_unlock();
- kfree(xw);
+ return;
+
+err_tx:
+ /* Restart the netif queue on each sub_if_data object. */
+ ieee802154_wake_queue(&local->hw);
+ rtnl_unlock();
+ kfree_skb(skb);
+ netdev_dbg(dev, "transmission failed\n");
}
-netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
- u8 page, u8 chan)
+static netdev_tx_t
+ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
{
- struct xmit_work *work;
- struct mac802154_sub_if_data *sdata;
-
- if (!(priv->phy->channels_supported[page] & (1 << chan))) {
- WARN_ON(1);
- goto err_tx;
- }
-
- mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb);
+ struct net_device *dev = skb->dev;
+ int ret;
- if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
+ if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
u16 crc = crc_ccitt(0, skb->data, skb->len);
- u8 *data = skb_put(skb, 2);
- data[0] = crc & 0xff;
- data[1] = crc >> 8;
+ put_unaligned_le16(crc, skb_put(skb, 2));
}
- if (skb_cow_head(skb, priv->hw.extra_tx_headroom))
+ if (skb_cow_head(skb, local->hw.extra_tx_headroom))
goto err_tx;
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- kfree_skb(skb);
- return NETDEV_TX_BUSY;
- }
-
/* Stop the netif queue on each sub_if_data object. */
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &priv->slaves, list)
- netif_stop_queue(sdata->dev);
- rcu_read_unlock();
+ ieee802154_stop_queue(&local->hw);
+
+ /* async is priority, otherwise sync is fallback */
+ if (local->ops->xmit_async) {
+ ret = drv_xmit_async(local, skb);
+ if (ret) {
+ ieee802154_wake_queue(&local->hw);
+ goto err_tx;
+ }
- INIT_WORK(&work->work, mac802154_xmit_worker);
- work->skb = skb;
- work->priv = priv;
- work->page = page;
- work->chan = chan;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ } else {
+ INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker);
+ ieee802154_xmit_cb.skb = skb;
+ ieee802154_xmit_cb.local = local;
- queue_work(priv->dev_workqueue, &work->work);
+ queue_work(local->workqueue, &ieee802154_xmit_cb.work);
+ }
return NETDEV_TX_OK;
@@ -131,3 +119,31 @@ err_tx:
kfree_skb(skb);
return NETDEV_TX_OK;
}
+
+netdev_tx_t
+ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+ skb->skb_iif = dev->ifindex;
+
+ return ieee802154_tx(sdata->local, skb);
+}
+
+netdev_tx_t
+ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+ int rc;
+
+ rc = mac802154_llsec_encrypt(&sdata->sec, skb);
+ if (rc) {
+ netdev_warn(dev, "encryption failed: %i\n", rc);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ skb->skb_iif = dev->ifindex;
+
+ return ieee802154_tx(sdata->local, skb);
+}
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
new file mode 100644
index 000000000000..5fc979027919
--- /dev/null
+++ b/net/mac802154/util.c
@@ -0,0 +1,84 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * Alexander Aring <aar@pengutronix.de>
+ *
+ * Based on: net/mac80211/util.c
+ */
+
+#include "ieee802154_i.h"
+
+/* privid for wpan_phys to determine whether they belong to us or not */
+const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
+
+void ieee802154_wake_queue(struct ieee802154_hw *hw)
+{
+ struct ieee802154_local *local = hw_to_local(hw);
+ struct ieee802154_sub_if_data *sdata;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!sdata->dev)
+ continue;
+
+ netif_wake_queue(sdata->dev);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee802154_wake_queue);
+
+void ieee802154_stop_queue(struct ieee802154_hw *hw)
+{
+ struct ieee802154_local *local = hw_to_local(hw);
+ struct ieee802154_sub_if_data *sdata;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!sdata->dev)
+ continue;
+
+ netif_stop_queue(sdata->dev);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee802154_stop_queue);
+
+enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
+{
+ struct ieee802154_local *local =
+ container_of(timer, struct ieee802154_local, ifs_timer);
+
+ ieee802154_wake_queue(&local->hw);
+
+ return HRTIMER_NORESTART;
+}
+
+void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
+ bool ifs_handling)
+{
+ if (ifs_handling) {
+ struct ieee802154_local *local = hw_to_local(hw);
+
+ if (skb->len > 18)
+ hrtimer_start(&local->ifs_timer,
+ ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ else
+ hrtimer_start(&local->ifs_timer,
+ ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
+ consume_skb(skb);
+ } else {
+ ieee802154_wake_queue(hw);
+ consume_skb(skb);
+ }
+}
+EXPORT_SYMBOL(ieee802154_xmit_complete);
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
deleted file mode 100644
index 4ab86a57dca5..000000000000
--- a/net/mac802154/wpan.c
+++ /dev/null
@@ -1,599 +0,0 @@
-/*
- * Copyright 2007-2012 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- * Sergey Lapin <slapin@ossfans.org>
- * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
- * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- */
-
-#include <linux/netdevice.h>
-#include <linux/module.h>
-#include <linux/if_arp.h>
-
-#include <net/rtnetlink.h>
-#include <linux/nl802154.h>
-#include <net/af_ieee802154.h>
-#include <net/mac802154.h>
-#include <net/ieee802154_netdev.h>
-#include <net/ieee802154.h>
-#include <net/wpan-phy.h>
-
-#include "mac802154.h"
-
-static int mac802154_wpan_update_llsec(struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
- int rc = 0;
-
- if (ops->llsec) {
- struct ieee802154_llsec_params params;
- int changed = 0;
-
- params.pan_id = priv->pan_id;
- changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
-
- params.hwaddr = priv->extended_addr;
- changed |= IEEE802154_LLSEC_PARAM_HWADDR;
-
- rc = ops->llsec->set_params(dev, &params, changed);
- }
-
- return rc;
-}
-
-static int
-mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct sockaddr_ieee802154 *sa =
- (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
- int err = -ENOIOCTLCMD;
-
- spin_lock_bh(&priv->mib_lock);
-
- switch (cmd) {
- case SIOCGIFADDR:
- {
- u16 pan_id, short_addr;
-
- pan_id = le16_to_cpu(priv->pan_id);
- short_addr = le16_to_cpu(priv->short_addr);
- if (pan_id == IEEE802154_PANID_BROADCAST ||
- short_addr == IEEE802154_ADDR_BROADCAST) {
- err = -EADDRNOTAVAIL;
- break;
- }
-
- sa->family = AF_IEEE802154;
- sa->addr.addr_type = IEEE802154_ADDR_SHORT;
- sa->addr.pan_id = pan_id;
- sa->addr.short_addr = short_addr;
-
- err = 0;
- break;
- }
- case SIOCSIFADDR:
- dev_warn(&dev->dev,
- "Using DEBUGing ioctl SIOCSIFADDR isn't recommended!\n");
- if (sa->family != AF_IEEE802154 ||
- sa->addr.addr_type != IEEE802154_ADDR_SHORT ||
- sa->addr.pan_id == IEEE802154_PANID_BROADCAST ||
- sa->addr.short_addr == IEEE802154_ADDR_BROADCAST ||
- sa->addr.short_addr == IEEE802154_ADDR_UNDEF) {
- err = -EINVAL;
- break;
- }
-
- priv->pan_id = cpu_to_le16(sa->addr.pan_id);
- priv->short_addr = cpu_to_le16(sa->addr.short_addr);
-
- err = mac802154_wpan_update_llsec(dev);
- break;
- }
-
- spin_unlock_bh(&priv->mib_lock);
- return err;
-}
-
-static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
- if (netif_running(dev))
- return -EBUSY;
-
- /* FIXME: validate addr */
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- mac802154_dev_set_ieee_addr(dev);
- return mac802154_wpan_update_llsec(dev);
-}
-
-int mac802154_set_mac_params(struct net_device *dev,
- const struct ieee802154_mac_params *params)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
-
- mutex_lock(&priv->hw->slaves_mtx);
- priv->mac_params = *params;
- mutex_unlock(&priv->hw->slaves_mtx);
-
- return 0;
-}
-
-void mac802154_get_mac_params(struct net_device *dev,
- struct ieee802154_mac_params *params)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
-
- mutex_lock(&priv->hw->slaves_mtx);
- *params = priv->mac_params;
- mutex_unlock(&priv->hw->slaves_mtx);
-}
-
-static int mac802154_wpan_open(struct net_device *dev)
-{
- int rc;
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct wpan_phy *phy = priv->hw->phy;
-
- rc = mac802154_slave_open(dev);
- if (rc < 0)
- return rc;
-
- mutex_lock(&phy->pib_lock);
-
- if (phy->set_txpower) {
- rc = phy->set_txpower(phy, priv->mac_params.transmit_power);
- if (rc < 0)
- goto out;
- }
-
- if (phy->set_lbt) {
- rc = phy->set_lbt(phy, priv->mac_params.lbt);
- if (rc < 0)
- goto out;
- }
-
- if (phy->set_cca_mode) {
- rc = phy->set_cca_mode(phy, priv->mac_params.cca_mode);
- if (rc < 0)
- goto out;
- }
-
- if (phy->set_cca_ed_level) {
- rc = phy->set_cca_ed_level(phy, priv->mac_params.cca_ed_level);
- if (rc < 0)
- goto out;
- }
-
- if (phy->set_csma_params) {
- rc = phy->set_csma_params(phy, priv->mac_params.min_be,
- priv->mac_params.max_be,
- priv->mac_params.csma_retries);
- if (rc < 0)
- goto out;
- }
-
- if (phy->set_frame_retries) {
- rc = phy->set_frame_retries(phy,
- priv->mac_params.frame_retries);
- if (rc < 0)
- goto out;
- }
-
- mutex_unlock(&phy->pib_lock);
- return 0;
-
-out:
- mutex_unlock(&phy->pib_lock);
- return rc;
-}
-
-static int mac802154_set_header_security(struct mac802154_sub_if_data *priv,
- struct ieee802154_hdr *hdr,
- const struct ieee802154_mac_cb *cb)
-{
- struct ieee802154_llsec_params params;
- u8 level;
-
- mac802154_llsec_get_params(&priv->sec, &params);
-
- if (!params.enabled && cb->secen_override && cb->secen)
- return -EINVAL;
- if (!params.enabled ||
- (cb->secen_override && !cb->secen) ||
- !params.out_level)
- return 0;
- if (cb->seclevel_override && !cb->seclevel)
- return -EINVAL;
-
- level = cb->seclevel_override ? cb->seclevel : params.out_level;
-
- hdr->fc.security_enabled = 1;
- hdr->sec.level = level;
- hdr->sec.key_id_mode = params.out_key.mode;
- if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX)
- hdr->sec.short_src = params.out_key.short_source;
- else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX)
- hdr->sec.extended_src = params.out_key.extended_source;
- hdr->sec.key_id = params.out_key.id;
-
- return 0;
-}
-
-static int mac802154_header_create(struct sk_buff *skb,
- struct net_device *dev,
- unsigned short type,
- const void *daddr,
- const void *saddr,
- unsigned len)
-{
- struct ieee802154_hdr hdr;
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
- struct ieee802154_mac_cb *cb = mac_cb(skb);
- int hlen;
-
- if (!daddr)
- return -EINVAL;
-
- memset(&hdr.fc, 0, sizeof(hdr.fc));
- hdr.fc.type = cb->type;
- hdr.fc.security_enabled = cb->secen;
- hdr.fc.ack_request = cb->ackreq;
- hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
-
- if (mac802154_set_header_security(priv, &hdr, cb) < 0)
- return -EINVAL;
-
- if (!saddr) {
- spin_lock_bh(&priv->mib_lock);
-
- if (priv->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
- priv->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
- priv->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
- hdr.source.mode = IEEE802154_ADDR_LONG;
- hdr.source.extended_addr = priv->extended_addr;
- } else {
- hdr.source.mode = IEEE802154_ADDR_SHORT;
- hdr.source.short_addr = priv->short_addr;
- }
-
- hdr.source.pan_id = priv->pan_id;
-
- spin_unlock_bh(&priv->mib_lock);
- } else {
- hdr.source = *(const struct ieee802154_addr *)saddr;
- }
-
- hdr.dest = *(const struct ieee802154_addr *)daddr;
-
- hlen = ieee802154_hdr_push(skb, &hdr);
- if (hlen < 0)
- return -EINVAL;
-
- skb_reset_mac_header(skb);
- skb->mac_len = hlen;
-
- if (len > ieee802154_max_payload(&hdr))
- return -EMSGSIZE;
-
- return hlen;
-}
-
-static int
-mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
-{
- struct ieee802154_hdr hdr;
- struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
-
- if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
- pr_debug("malformed packet\n");
- return 0;
- }
-
- *addr = hdr.source;
- return sizeof(*addr);
-}
-
-static netdev_tx_t
-mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv;
- u8 chan, page;
- int rc;
-
- priv = netdev_priv(dev);
-
- spin_lock_bh(&priv->mib_lock);
- chan = priv->chan;
- page = priv->page;
- spin_unlock_bh(&priv->mib_lock);
-
- if (chan == MAC802154_CHAN_NONE ||
- page >= WPAN_NUM_PAGES ||
- chan >= WPAN_NUM_CHANNELS) {
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- rc = mac802154_llsec_encrypt(&priv->sec, skb);
- if (rc) {
- pr_warn("encryption failed: %i\n", rc);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- skb->skb_iif = dev->ifindex;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- return mac802154_tx(priv->hw, skb, page, chan);
-}
-
-static struct header_ops mac802154_header_ops = {
- .create = mac802154_header_create,
- .parse = mac802154_header_parse,
-};
-
-static const struct net_device_ops mac802154_wpan_ops = {
- .ndo_open = mac802154_wpan_open,
- .ndo_stop = mac802154_slave_close,
- .ndo_start_xmit = mac802154_wpan_xmit,
- .ndo_do_ioctl = mac802154_wpan_ioctl,
- .ndo_set_mac_address = mac802154_wpan_mac_addr,
-};
-
-static void mac802154_wpan_free(struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv = netdev_priv(dev);
-
- mac802154_llsec_destroy(&priv->sec);
-
- free_netdev(dev);
-}
-
-void mac802154_wpan_setup(struct net_device *dev)
-{
- struct mac802154_sub_if_data *priv;
-
- dev->addr_len = IEEE802154_ADDR_LEN;
- memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-
- dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
- dev->header_ops = &mac802154_header_ops;
- dev->needed_tailroom = 2 + 16; /* FCS + MIC */
- dev->mtu = IEEE802154_MTU;
- dev->tx_queue_len = 300;
- dev->type = ARPHRD_IEEE802154;
- dev->flags = IFF_NOARP | IFF_BROADCAST;
- dev->watchdog_timeo = 0;
-
- dev->destructor = mac802154_wpan_free;
- dev->netdev_ops = &mac802154_wpan_ops;
- dev->ml_priv = &mac802154_mlme_wpan;
-
- priv = netdev_priv(dev);
- priv->type = IEEE802154_DEV_WPAN;
-
- priv->chan = MAC802154_CHAN_NONE;
- priv->page = 0;
-
- spin_lock_init(&priv->mib_lock);
- mutex_init(&priv->sec_mtx);
-
- get_random_bytes(&priv->bsn, 1);
- get_random_bytes(&priv->dsn, 1);
-
- /* defaults per 802.15.4-2011 */
- priv->mac_params.min_be = 3;
- priv->mac_params.max_be = 5;
- priv->mac_params.csma_retries = 4;
- priv->mac_params.frame_retries = -1; /* for compatibility, actual default is 3 */
-
- priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
- priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
-
- mac802154_llsec_init(&priv->sec);
-}
-
-static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
-{
- return netif_rx_ni(skb);
-}
-
-static int
-mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
- const struct ieee802154_hdr *hdr)
-{
- __le16 span, sshort;
- int rc;
-
- pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
-
- spin_lock_bh(&sdata->mib_lock);
-
- span = sdata->pan_id;
- sshort = sdata->short_addr;
-
- switch (mac_cb(skb)->dest.mode) {
- case IEEE802154_ADDR_NONE:
- if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
- /* FIXME: check if we are PAN coordinator */
- skb->pkt_type = PACKET_OTHERHOST;
- else
- /* ACK comes with both addresses empty */
- skb->pkt_type = PACKET_HOST;
- break;
- case IEEE802154_ADDR_LONG:
- if (mac_cb(skb)->dest.pan_id != span &&
- mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
- skb->pkt_type = PACKET_OTHERHOST;
- else if (mac_cb(skb)->dest.extended_addr == sdata->extended_addr)
- skb->pkt_type = PACKET_HOST;
- else
- skb->pkt_type = PACKET_OTHERHOST;
- break;
- case IEEE802154_ADDR_SHORT:
- if (mac_cb(skb)->dest.pan_id != span &&
- mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
- skb->pkt_type = PACKET_OTHERHOST;
- else if (mac_cb(skb)->dest.short_addr == sshort)
- skb->pkt_type = PACKET_HOST;
- else if (mac_cb(skb)->dest.short_addr ==
- cpu_to_le16(IEEE802154_ADDR_BROADCAST))
- skb->pkt_type = PACKET_BROADCAST;
- else
- skb->pkt_type = PACKET_OTHERHOST;
- break;
- default:
- spin_unlock_bh(&sdata->mib_lock);
- pr_debug("invalid dest mode\n");
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-
- spin_unlock_bh(&sdata->mib_lock);
-
- skb->dev = sdata->dev;
-
- rc = mac802154_llsec_decrypt(&sdata->sec, skb);
- if (rc) {
- pr_debug("decryption failed: %i\n", rc);
- goto fail;
- }
-
- sdata->dev->stats.rx_packets++;
- sdata->dev->stats.rx_bytes += skb->len;
-
- switch (mac_cb(skb)->type) {
- case IEEE802154_FC_TYPE_DATA:
- return mac802154_process_data(sdata->dev, skb);
- default:
- pr_warn("ieee802154: bad frame received (type = %d)\n",
- mac_cb(skb)->type);
- goto fail;
- }
-
-fail:
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-static void mac802154_print_addr(const char *name,
- const struct ieee802154_addr *addr)
-{
- if (addr->mode == IEEE802154_ADDR_NONE)
- pr_debug("%s not present\n", name);
-
- pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id));
- if (addr->mode == IEEE802154_ADDR_SHORT) {
- pr_debug("%s is short: %04x\n", name,
- le16_to_cpu(addr->short_addr));
- } else {
- u64 hw = swab64((__force u64) addr->extended_addr);
-
- pr_debug("%s is hardware: %8phC\n", name, &hw);
- }
-}
-
-static int mac802154_parse_frame_start(struct sk_buff *skb,
- struct ieee802154_hdr *hdr)
-{
- int hlen;
- struct ieee802154_mac_cb *cb = mac_cb_init(skb);
-
- hlen = ieee802154_hdr_pull(skb, hdr);
- if (hlen < 0)
- return -EINVAL;
-
- skb->mac_len = hlen;
-
- pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc),
- hdr->seq);
-
- cb->type = hdr->fc.type;
- cb->ackreq = hdr->fc.ack_request;
- cb->secen = hdr->fc.security_enabled;
-
- mac802154_print_addr("destination", &hdr->dest);
- mac802154_print_addr("source", &hdr->source);
-
- cb->source = hdr->source;
- cb->dest = hdr->dest;
-
- if (hdr->fc.security_enabled) {
- u64 key;
-
- pr_debug("seclevel %i\n", hdr->sec.level);
-
- switch (hdr->sec.key_id_mode) {
- case IEEE802154_SCF_KEY_IMPLICIT:
- pr_debug("implicit key\n");
- break;
-
- case IEEE802154_SCF_KEY_INDEX:
- pr_debug("key %02x\n", hdr->sec.key_id);
- break;
-
- case IEEE802154_SCF_KEY_SHORT_INDEX:
- pr_debug("key %04x:%04x %02x\n",
- le32_to_cpu(hdr->sec.short_src) >> 16,
- le32_to_cpu(hdr->sec.short_src) & 0xffff,
- hdr->sec.key_id);
- break;
-
- case IEEE802154_SCF_KEY_HW_INDEX:
- key = swab64((__force u64) hdr->sec.extended_src);
- pr_debug("key source %8phC %02x\n", &key,
- hdr->sec.key_id);
- break;
- }
- }
-
- return 0;
-}
-
-void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
-{
- int ret;
- struct mac802154_sub_if_data *sdata;
- struct ieee802154_hdr hdr;
-
- ret = mac802154_parse_frame_start(skb, &hdr);
- if (ret) {
- pr_debug("got invalid frame\n");
- kfree_skb(skb);
- return;
- }
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &priv->slaves, list) {
- if (sdata->type != IEEE802154_DEV_WPAN ||
- !netif_running(sdata->dev))
- continue;
-
- mac802154_subif_frame(sdata, skb, &hdr);
- skb = NULL;
- break;
- }
- rcu_read_unlock();
-
- if (skb)
- kfree_skb(skb);
-}
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index e3545f21a099..ca27837974fe 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -34,8 +34,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
- SKB_GSO_IPIP |
- SKB_GSO_MPLS)))
+ SKB_GSO_IPIP)))
goto out;
/* Setup inner SKB. */
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index ae5096ab65eb..b02660fa9eb0 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -411,6 +411,13 @@ config NF_NAT_TFTP
depends on NF_CONNTRACK && NF_NAT
default NF_NAT && NF_CONNTRACK_TFTP
+config NF_NAT_REDIRECT
+ tristate "IPv4/IPv6 redirect support"
+ depends on NF_NAT
+ help
+ This is the kernel functionality to redirect packets to local
+ machine through NAT.
+
config NETFILTER_SYNPROXY
tristate
@@ -505,6 +512,15 @@ config NFT_MASQ
This option adds the "masquerade" expression that you can use
to perform NAT in the masquerade flavour.
+config NFT_REDIR
+ depends on NF_TABLES
+ depends on NF_CONNTRACK
+ depends on NF_NAT
+ tristate "Netfilter nf_tables redirect support"
+ help
+ This options adds the "redirect" expression that you can use
+ to perform NAT in the redirect flavour.
+
config NFT_NAT
depends on NF_TABLES
depends on NF_CONNTRACK
@@ -835,6 +851,7 @@ config NETFILTER_XT_TARGET_RATEEST
config NETFILTER_XT_TARGET_REDIRECT
tristate "REDIRECT target support"
depends on NF_NAT
+ select NF_NAT_REDIRECT
---help---
REDIRECT is a special case of NAT: all incoming connections are
mapped onto the incoming interface's address, causing the packets to
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a9571be3f791..89f73a9e9874 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -51,6 +51,7 @@ nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
obj-$(CONFIG_NF_NAT) += nf_nat.o
+obj-$(CONFIG_NF_NAT_REDIRECT) += nf_nat_redirect.o
# NAT protocols (nf_nat)
obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_NFT_HASH) += nft_hash.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o
obj-$(CONFIG_NFT_MASQ) += nft_masq.o
+obj-$(CONFIG_NFT_REDIR) += nft_redir.o
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 024a2e25c8a4..fea9ef566427 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/netdevice.h>
+#include <linux/netfilter_ipv6.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index fee7c64e4dd1..974ff386db0f 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -147,16 +147,22 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#else
#define __CIDR(cidr, i) (cidr)
#endif
+
+/* cidr + 1 is stored in net_prefixes to support /0 */
+#define SCIDR(cidr, i) (__CIDR(cidr, i) + 1)
+
#ifdef IP_SET_HASH_WITH_NETS_PACKED
-/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
-#define CIDR(cidr, i) (__CIDR(cidr, i) + 1)
+/* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
+#define GCIDR(cidr, i) (__CIDR(cidr, i) + 1)
+#define NCIDR(cidr) (cidr)
#else
-#define CIDR(cidr, i) (__CIDR(cidr, i))
+#define GCIDR(cidr, i) (__CIDR(cidr, i))
+#define NCIDR(cidr) (cidr - 1)
#endif
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
-#ifdef IP_SET_HASH_WITH_MULTI
+#ifdef IP_SET_HASH_WITH_NET0
#define NLEN(family) (SET_HOST_MASK(family) + 1)
#else
#define NLEN(family) SET_HOST_MASK(family)
@@ -292,24 +298,22 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
int i, j;
/* Add in increasing prefix order, so larger cidr first */
- for (i = 0, j = -1; i < nets_length && h->nets[i].nets[n]; i++) {
+ for (i = 0, j = -1; i < nets_length && h->nets[i].cidr[n]; i++) {
if (j != -1)
continue;
else if (h->nets[i].cidr[n] < cidr)
j = i;
else if (h->nets[i].cidr[n] == cidr) {
- h->nets[i].nets[n]++;
+ h->nets[cidr - 1].nets[n]++;
return;
}
}
if (j != -1) {
- for (; i > j; i--) {
+ for (; i > j; i--)
h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
- h->nets[i].nets[n] = h->nets[i - 1].nets[n];
- }
}
h->nets[i].cidr[n] = cidr;
- h->nets[i].nets[n] = 1;
+ h->nets[cidr - 1].nets[n] = 1;
}
static void
@@ -320,16 +324,12 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
for (i = 0; i < nets_length; i++) {
if (h->nets[i].cidr[n] != cidr)
continue;
- if (h->nets[i].nets[n] > 1 || i == net_end ||
- h->nets[i + 1].nets[n] == 0) {
- h->nets[i].nets[n]--;
+ h->nets[cidr -1].nets[n]--;
+ if (h->nets[cidr -1].nets[n] > 0)
return;
- }
- for (j = i; j < net_end && h->nets[j].nets[n]; j++) {
+ for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
- h->nets[j].nets[n] = h->nets[j + 1].nets[n];
- }
- h->nets[j].nets[n] = 0;
+ h->nets[j].cidr[n] = 0;
return;
}
}
@@ -486,7 +486,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
pr_debug("expired %u/%u\n", i, j);
#ifdef IP_SET_HASH_WITH_NETS
for (k = 0; k < IPSET_NET_COUNT; k++)
- mtype_del_cidr(h, CIDR(data->cidr, k),
+ mtype_del_cidr(h, SCIDR(data->cidr, k),
nets_length, k);
#endif
ip_set_ext_destroy(set, data);
@@ -633,29 +633,6 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 key, multi = 0;
- if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set)) {
- rcu_read_lock_bh();
- t = rcu_dereference_bh(h->table);
- key = HKEY(value, h->initval, t->htable_bits);
- n = hbucket(t,key);
- if (n->pos) {
- /* Choosing the first entry in the array to replace */
- j = 0;
- goto reuse_slot;
- }
- rcu_read_unlock_bh();
- }
- if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
- /* FIXME: when set is full, we slow down here */
- mtype_expire(set, h, NLEN(set->family), set->dsize);
-
- if (h->elements >= h->maxelem) {
- if (net_ratelimit())
- pr_warn("Set %s is full, maxelem %u reached\n",
- set->name, h->maxelem);
- return -IPSET_ERR_HASH_FULL;
- }
-
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
@@ -680,15 +657,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
j != AHASH_MAX(h) + 1)
j = i;
}
+ if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set) && n->pos) {
+ /* Choosing the first entry in the array to replace */
+ j = 0;
+ goto reuse_slot;
+ }
+ if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
+ /* FIXME: when set is full, we slow down here */
+ mtype_expire(set, h, NLEN(set->family), set->dsize);
+
+ if (h->elements >= h->maxelem) {
+ if (net_ratelimit())
+ pr_warn("Set %s is full, maxelem %u reached\n",
+ set->name, h->maxelem);
+ ret = -IPSET_ERR_HASH_FULL;
+ goto out;
+ }
+
reuse_slot:
if (j != AHASH_MAX(h) + 1) {
/* Fill out reused slot */
data = ahash_data(n, j, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++) {
- mtype_del_cidr(h, CIDR(data->cidr, i),
+ mtype_del_cidr(h, SCIDR(data->cidr, i),
NLEN(set->family), i);
- mtype_add_cidr(h, CIDR(d->cidr, i),
+ mtype_add_cidr(h, SCIDR(d->cidr, i),
NLEN(set->family), i);
}
#endif
@@ -705,7 +699,7 @@ reuse_slot:
data = ahash_data(n, n->pos++, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++)
- mtype_add_cidr(h, CIDR(d->cidr, i), NLEN(set->family),
+ mtype_add_cidr(h, SCIDR(d->cidr, i), NLEN(set->family),
i);
#endif
h->elements++;
@@ -766,7 +760,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
for (j = 0; j < IPSET_NET_COUNT; j++)
- mtype_del_cidr(h, CIDR(d->cidr, j), NLEN(set->family),
+ mtype_del_cidr(h, SCIDR(d->cidr, j), NLEN(set->family),
j);
#endif
ip_set_ext_destroy(set, data);
@@ -827,15 +821,15 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
u8 nets_length = NLEN(set->family);
pr_debug("test by nets\n");
- for (; j < nets_length && h->nets[j].nets[0] && !multi; j++) {
+ for (; j < nets_length && h->nets[j].cidr[0] && !multi; j++) {
#if IPSET_NET_COUNT == 2
mtype_data_reset_elem(d, &orig);
- mtype_data_netmask(d, h->nets[j].cidr[0], false);
- for (k = 0; k < nets_length && h->nets[k].nets[1] && !multi;
+ mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]), false);
+ for (k = 0; k < nets_length && h->nets[k].cidr[1] && !multi;
k++) {
- mtype_data_netmask(d, h->nets[k].cidr[1], true);
+ mtype_data_netmask(d, NCIDR(h->nets[k].cidr[1]), true);
#else
- mtype_data_netmask(d, h->nets[j].cidr[0]);
+ mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]));
#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
@@ -883,7 +877,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
/* If we test an IP address and not a network address,
* try all possible network sizes */
for (i = 0; i < IPSET_NET_COUNT; i++)
- if (CIDR(d->cidr, i) != SET_HOST_MASK(set->family))
+ if (GCIDR(d->cidr, i) != SET_HOST_MASK(set->family))
break;
if (i == IPSET_NET_COUNT) {
ret = mtype_test_cidrs(set, d, ext, mext, flags);
@@ -1107,8 +1101,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
hsize = sizeof(*h);
#ifdef IP_SET_HASH_WITH_NETS
- hsize += sizeof(struct net_prefixes) *
- (set->family == NFPROTO_IPV4 ? 32 : 128);
+ hsize += sizeof(struct net_prefixes) * NLEN(set->family);
#endif
h = kzalloc(hsize, GFP_KERNEL);
if (!h)
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 35dd35873442..758b002130d9 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -115,6 +115,7 @@ iface_add(struct rb_root *root, const char **iface)
#define IP_SET_HASH_WITH_NETS
#define IP_SET_HASH_WITH_RBTREE
#define IP_SET_HASH_WITH_MULTI
+#define IP_SET_HASH_WITH_NET0
#define STREQ(a, b) (strcmp(a, b) == 0)
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index da00284b3571..ea8772afb6e7 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -46,6 +46,7 @@ struct hash_netnet4_elem {
__be64 ipcmp;
};
u8 nomatch;
+ u8 padding;
union {
u8 cidr[2];
u16 ccmp;
@@ -271,6 +272,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netnet6_elem {
union nf_inet_addr ip[2];
u8 nomatch;
+ u8 padding;
union {
u8 cidr[2];
u16 ccmp;
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index b8053d675fc3..bfaa94c7baa7 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -53,6 +53,7 @@ struct hash_netportnet4_elem {
u8 cidr[2];
u16 ccmp;
};
+ u16 padding;
u8 nomatch:1;
u8 proto;
};
@@ -324,6 +325,7 @@ struct hash_netportnet6_elem {
u8 cidr[2];
u16 ccmp;
};
+ u16 padding;
u8 nomatch:1;
u8 proto;
};
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index ac7ba689efe7..b8295a430a56 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -465,8 +465,7 @@ __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
static void ip_vs_service_free(struct ip_vs_service *svc)
{
- if (svc->stats.cpustats)
- free_percpu(svc->stats.cpustats);
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 1a82b29ce8ea..0df17caa8af6 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -37,8 +37,7 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
rcu_read_unlock();
return pe;
}
- if (pe->module)
- module_put(pe->module);
+ module_put(pe->module);
}
rcu_read_unlock();
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 4dbcda6258bc..199760c71f39 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -104,8 +104,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
mutex_unlock(&ip_vs_sched_mutex);
return sched;
}
- if (sched->module)
- module_put(sched->module);
+ module_put(sched->module);
}
mutex_unlock(&ip_vs_sched_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 7162c86fd50d..c47ffd7a0a70 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -820,8 +820,7 @@ ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC);
if (!p->pe_data) {
- if (p->pe->module)
- module_put(p->pe->module);
+ module_put(p->pe->module);
return -ENOMEM;
}
p->pe_data_len = pe_data_len;
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bd90bf8107da..3aedbda7658a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -293,7 +293,6 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
&dest->addr.ip, &dest_dst->dst_saddr.ip,
atomic_read(&rt->dst.__refcnt));
}
- daddr = dest->addr.ip;
if (ret_saddr)
*ret_saddr = dest_dst->dst_saddr.ip;
} else {
@@ -344,7 +343,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
skb_dst_drop(skb);
if (noref) {
if (!local)
- skb_dst_set_noref_force(skb, &rt->dst);
+ skb_dst_set_noref(skb, &rt->dst);
else
skb_dst_set(skb, dst_clone(&rt->dst));
} else
@@ -488,7 +487,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
skb_dst_drop(skb);
if (noref) {
if (!local)
- skb_dst_set_noref_force(skb, &rt->dst);
+ skb_dst_set_noref(skb, &rt->dst);
else
skb_dst_set(skb, dst_clone(&rt->dst));
} else
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5016a6929085..a11674806707 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -824,22 +824,19 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
atomic_dec(&net->ct.count);
return ERR_PTR(-ENOMEM);
}
- /*
- * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
- * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
- */
- memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
- offsetof(struct nf_conn, proto) -
- offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
+ ct->status = 0;
/* Don't set timer yet: wait for confirmation */
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
write_pnet(&ct->ct_net, net);
+ memset(&ct->__nfct_init_offset[0], 0,
+ offsetof(struct nf_conn, proto) -
+ offsetof(struct nf_conn, __nfct_init_offset[0]));
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (zone) {
struct nf_conntrack_zone *nf_ct_zone;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 3a3a60b126e0..1d69f5b9748f 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -728,7 +728,8 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
/* If the calling party is on the same side of the forward-to party,
* we don't need to track the second call */
-static int callforward_do_filter(const union nf_inet_addr *src,
+static int callforward_do_filter(struct net *net,
+ const union nf_inet_addr *src,
const union nf_inet_addr *dst,
u_int8_t family)
{
@@ -750,9 +751,9 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->ip;
- if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
+ if (!afinfo->route(net, (struct dst_entry **)&rt1,
flowi4_to_flowi(&fl1), false)) {
- if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
+ if (!afinfo->route(net, (struct dst_entry **)&rt2,
flowi4_to_flowi(&fl2), false)) {
if (rt_nexthop(rt1, fl1.daddr) ==
rt_nexthop(rt2, fl2.daddr) &&
@@ -774,9 +775,9 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->in6;
- if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
+ if (!afinfo->route(net, (struct dst_entry **)&rt1,
flowi6_to_flowi(&fl1), false)) {
- if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
+ if (!afinfo->route(net, (struct dst_entry **)&rt2,
flowi6_to_flowi(&fl2), false)) {
if (ipv6_addr_equal(rt6_nexthop(rt1),
rt6_nexthop(rt2)) &&
@@ -807,6 +808,7 @@ static int expect_callforwarding(struct sk_buff *skb,
__be16 port;
union nf_inet_addr addr;
struct nf_conntrack_expect *exp;
+ struct net *net = nf_ct_net(ct);
typeof(nat_callforwarding_hook) nat_callforwarding;
/* Read alternativeAddress */
@@ -816,7 +818,7 @@ static int expect_callforwarding(struct sk_buff *skb,
/* If the calling party is on the same side of the forward-to party,
* we don't need to track the second call */
if (callforward_filter &&
- callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3,
+ callforward_do_filter(net, &addr, &ct->tuplehash[!dir].tuple.src.u3,
nf_ct_l3num(ct))) {
pr_debug("nf_ct_q931: Call Forwarding not tracked\n");
return 0;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 5b3eae7d4c9a..bd9d31537905 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -250,7 +250,7 @@ out:
}
EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper);
-/* appropiate ct lock protecting must be taken by caller */
+/* appropriate ct lock protecting must be taken by caller */
static inline int unhelp(struct nf_conntrack_tuple_hash *i,
const struct nf_conntrack_helper *me)
{
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 6e3b9117db1f..43c926cae9c0 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -19,6 +19,9 @@
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
static DEFINE_MUTEX(nf_log_mutex);
+#define nft_log_dereference(logger) \
+ rcu_dereference_protected(logger, lockdep_is_held(&nf_log_mutex))
+
static struct nf_logger *__find_logger(int pf, const char *str_logger)
{
struct nf_logger *log;
@@ -28,8 +31,7 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
if (loggers[pf][i] == NULL)
continue;
- log = rcu_dereference_protected(loggers[pf][i],
- lockdep_is_held(&nf_log_mutex));
+ log = nft_log_dereference(loggers[pf][i]);
if (!strncasecmp(str_logger, log->name, strlen(log->name)))
return log;
}
@@ -45,8 +47,7 @@ void nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger)
return;
mutex_lock(&nf_log_mutex);
- log = rcu_dereference_protected(net->nf.nf_loggers[pf],
- lockdep_is_held(&nf_log_mutex));
+ log = nft_log_dereference(net->nf.nf_loggers[pf]);
if (log == NULL)
rcu_assign_pointer(net->nf.nf_loggers[pf], logger);
@@ -61,8 +62,7 @@ void nf_log_unset(struct net *net, const struct nf_logger *logger)
mutex_lock(&nf_log_mutex);
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
- log = rcu_dereference_protected(net->nf.nf_loggers[i],
- lockdep_is_held(&nf_log_mutex));
+ log = nft_log_dereference(net->nf.nf_loggers[i]);
if (log == logger)
RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL);
}
@@ -75,6 +75,7 @@ EXPORT_SYMBOL(nf_log_unset);
int nf_log_register(u_int8_t pf, struct nf_logger *logger)
{
int i;
+ int ret = 0;
if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers))
return -EINVAL;
@@ -82,16 +83,25 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
mutex_lock(&nf_log_mutex);
if (pf == NFPROTO_UNSPEC) {
+ for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) {
+ if (rcu_access_pointer(loggers[i][logger->type])) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+ }
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
rcu_assign_pointer(loggers[i][logger->type], logger);
} else {
- /* register at end of list to honor first register win */
+ if (rcu_access_pointer(loggers[pf][logger->type])) {
+ ret = -EEXIST;
+ goto unlock;
+ }
rcu_assign_pointer(loggers[pf][logger->type], logger);
}
+unlock:
mutex_unlock(&nf_log_mutex);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(nf_log_register);
@@ -144,8 +154,7 @@ int nf_logger_find_get(int pf, enum nf_log_type type)
struct nf_logger *logger;
int ret = -ENOENT;
- logger = loggers[pf][type];
- if (logger == NULL)
+ if (rcu_access_pointer(loggers[pf][type]) == NULL)
request_module("nf-logger-%u-%u", pf, type);
rcu_read_lock();
@@ -297,8 +306,7 @@ static int seq_show(struct seq_file *s, void *v)
int i;
struct net *net = seq_file_net(s);
- logger = rcu_dereference_protected(net->nf.nf_loggers[*pos],
- lockdep_is_held(&nf_log_mutex));
+ logger = nft_log_dereference(net->nf.nf_loggers[*pos]);
if (!logger)
seq_printf(s, "%2lld NONE (", *pos);
@@ -312,8 +320,7 @@ static int seq_show(struct seq_file *s, void *v)
if (loggers[*pos][i] == NULL)
continue;
- logger = rcu_dereference_protected(loggers[*pos][i],
- lockdep_is_held(&nf_log_mutex));
+ logger = nft_log_dereference(loggers[*pos][i]);
seq_printf(s, "%s", logger->name);
if (i == 0 && loggers[*pos][i + 1] != NULL)
seq_printf(s, ",");
@@ -387,8 +394,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
mutex_unlock(&nf_log_mutex);
} else {
mutex_lock(&nf_log_mutex);
- logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
- lockdep_is_held(&nf_log_mutex));
+ logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
if (!logger)
table->data = "NONE";
else
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
new file mode 100644
index 000000000000..97b75f9bfbcd
--- /dev/null
+++ b/net/netfilter/nf_nat_redirect.c
@@ -0,0 +1,127 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/if.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter.h>
+#include <linux/types.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/addrconf.h>
+#include <net/checksum.h>
+#include <net/protocol.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_redirect.h>
+
+unsigned int
+nf_nat_redirect_ipv4(struct sk_buff *skb,
+ const struct nf_nat_ipv4_multi_range_compat *mr,
+ unsigned int hooknum)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ __be32 newdst;
+ struct nf_nat_range newrange;
+
+ NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING ||
+ hooknum == NF_INET_LOCAL_OUT);
+
+ ct = nf_ct_get(skb, &ctinfo);
+ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+ /* Local packets: make them go to loopback */
+ if (hooknum == NF_INET_LOCAL_OUT) {
+ newdst = htonl(0x7F000001);
+ } else {
+ struct in_device *indev;
+ struct in_ifaddr *ifa;
+
+ newdst = 0;
+
+ rcu_read_lock();
+ indev = __in_dev_get_rcu(skb->dev);
+ if (indev != NULL) {
+ ifa = indev->ifa_list;
+ newdst = ifa->ifa_local;
+ }
+ rcu_read_unlock();
+
+ if (!newdst)
+ return NF_DROP;
+ }
+
+ /* Transfer from original range. */
+ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+ memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+ newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr.ip = newdst;
+ newrange.max_addr.ip = newdst;
+ newrange.min_proto = mr->range[0].min;
+ newrange.max_proto = mr->range[0].max;
+
+ /* Hand modified range to generic setup. */
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
+
+static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+
+unsigned int
+nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+ unsigned int hooknum)
+{
+ struct nf_nat_range newrange;
+ struct in6_addr newdst;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (hooknum == NF_INET_LOCAL_OUT) {
+ newdst = loopback_addr;
+ } else {
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifa;
+ bool addr = false;
+
+ rcu_read_lock();
+ idev = __in6_dev_get(skb->dev);
+ if (idev != NULL) {
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ newdst = ifa->addr;
+ addr = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!addr)
+ return NF_DROP;
+ }
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr.in6 = newdst;
+ newrange.max_addr.in6 = newdst;
+ newrange.min_proto = range->min_proto;
+ newrange.max_proto = range->max_proto;
+
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 66e8425dbfe7..129a8daa4abf 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2477,7 +2477,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
int err;
- /* Verify existance before starting dump */
+ /* Verify existence before starting dump */
err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
if (err < 0)
return err;
@@ -3665,8 +3665,7 @@ static int nf_tables_abort(struct sk_buff *skb)
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
- if (nft_trans_chain_stats(trans))
- free_percpu(nft_trans_chain_stats(trans));
+ free_percpu(nft_trans_chain_stats(trans));
nft_trans_destroy(trans);
} else {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 5f1be5ba3559..11d85b3813f2 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -12,6 +12,9 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
@@ -337,9 +340,6 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
skb = nfnetlink_alloc_skb(net, pkt_size,
peer_portid, GFP_ATOMIC);
- if (!skb)
- pr_err("nfnetlink_log: can't even alloc %u bytes\n",
- pkt_size);
}
}
@@ -570,10 +570,8 @@ __build_packet_message(struct nfnl_log_net *log,
struct nlattr *nla;
int size = nla_attr_size(data_len);
- if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
- printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
- return -1;
- }
+ if (skb_tailroom(inst->skb) < nla_total_size(data_len))
+ goto nla_put_failure;
nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
nla->nla_type = NFULA_PAYLOAD;
@@ -1069,19 +1067,19 @@ static int __init nfnetlink_log_init(void)
netlink_register_notifier(&nfulnl_rtnl_notifier);
status = nfnetlink_subsys_register(&nfulnl_subsys);
if (status < 0) {
- pr_err("log: failed to create netlink socket\n");
+ pr_err("failed to create netlink socket\n");
goto cleanup_netlink_notifier;
}
status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger);
if (status < 0) {
- pr_err("log: failed to register logger\n");
+ pr_err("failed to register logger\n");
goto cleanup_subsys;
}
status = register_pernet_subsys(&nfnl_log_net_ops);
if (status < 0) {
- pr_err("log: failed to register pernet ops\n");
+ pr_err("failed to register pernet ops\n");
goto cleanup_logger;
}
return status;
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 8892b7b6184a..1e316ce4cb5d 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -65,7 +65,7 @@ static int nft_hash_insert(const struct nft_set *set,
if (set->flags & NFT_SET_MAP)
nft_data_copy(he->data, &elem->data);
- rhashtable_insert(priv, &he->node, GFP_KERNEL);
+ rhashtable_insert(priv, &he->node);
return 0;
}
@@ -88,7 +88,7 @@ static void nft_hash_remove(const struct nft_set *set,
pprev = elem->cookie;
he = rht_dereference((*pprev), priv);
- rhashtable_remove_pprev(priv, he, pprev, GFP_KERNEL);
+ rhashtable_remove_pprev(priv, he, pprev);
synchronize_rcu();
kfree(he);
@@ -153,10 +153,12 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
return sizeof(struct rhashtable);
}
-static int lockdep_nfnl_lock_is_held(void)
+#ifdef CONFIG_PROVE_LOCKING
+static int lockdep_nfnl_lock_is_held(void *parent)
{
return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES);
}
+#endif
static int nft_hash_init(const struct nft_set *set,
const struct nft_set_desc *desc,
@@ -171,7 +173,9 @@ static int nft_hash_init(const struct nft_set *set,
.hashfn = jhash,
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
+#ifdef CONFIG_PROVE_LOCKING
.mutex_is_held = lockdep_nfnl_lock_is_held,
+#endif
};
return rhashtable_init(priv, &params);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 1e7c076ca63a..e99911eda915 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -165,6 +165,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
goto err;
dest->data[0] = out->group;
break;
+ case NFT_META_CGROUP:
+ if (skb->sk == NULL)
+ break;
+
+ dest->data[0] = skb->sk->sk_classid;
+ break;
default:
WARN_ON(1);
goto err;
@@ -240,6 +246,7 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
case NFT_META_CPU:
case NFT_META_IIFGROUP:
case NFT_META_OIFGROUP:
+ case NFT_META_CGROUP:
break;
default:
return -EOPNOTSUPP;
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
new file mode 100644
index 000000000000..9e8093f28311
--- /dev/null
+++ b/net/netfilter/nft_redir.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_redir.h>
+
+const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
+ [NFTA_REDIR_REG_PROTO_MIN] = { .type = NLA_U32 },
+ [NFTA_REDIR_REG_PROTO_MAX] = { .type = NLA_U32 },
+ [NFTA_REDIR_FLAGS] = { .type = NLA_U32 },
+};
+EXPORT_SYMBOL_GPL(nft_redir_policy);
+
+int nft_redir_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_redir *priv = nft_expr_priv(expr);
+ int err;
+
+ err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
+ priv->sreg_proto_min =
+ ntohl(nla_get_be32(tb[NFTA_REDIR_REG_PROTO_MIN]));
+
+ err = nft_validate_input_register(priv->sreg_proto_min);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
+ priv->sreg_proto_max =
+ ntohl(nla_get_be32(tb[NFTA_REDIR_REG_PROTO_MAX]));
+
+ err = nft_validate_input_register(priv->sreg_proto_max);
+ if (err < 0)
+ return err;
+ } else {
+ priv->sreg_proto_max = priv->sreg_proto_min;
+ }
+ }
+
+ if (tb[NFTA_REDIR_FLAGS]) {
+ priv->flags = ntohl(nla_get_be32(tb[NFTA_REDIR_FLAGS]));
+ if (priv->flags & ~NF_NAT_RANGE_MASK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_redir_init);
+
+int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_redir *priv = nft_expr_priv(expr);
+
+ if (priv->sreg_proto_min) {
+ if (nla_put_be32(skb, NFTA_REDIR_REG_PROTO_MIN,
+ htonl(priv->sreg_proto_min)))
+ goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_REDIR_REG_PROTO_MAX,
+ htonl(priv->sreg_proto_max)))
+ goto nla_put_failure;
+ }
+
+ if (priv->flags != 0 &&
+ nla_put_be32(skb, NFTA_REDIR_FLAGS, htonl(priv->flags)))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(nft_redir_dump);
+
+int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+}
+EXPORT_SYMBOL_GPL(nft_redir_validate);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index ae8271652efa..3f83d38c4e5b 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -37,7 +37,8 @@ dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
if (!skb_make_writable(skb, sizeof(struct iphdr)))
return NF_DROP;
- ipv4_change_dsfield(ip_hdr(skb), (__u8)(~XT_DSCP_MASK),
+ ipv4_change_dsfield(ip_hdr(skb),
+ (__force __u8)(~XT_DSCP_MASK),
dinfo->dscp << XT_DSCP_SHIFT);
}
@@ -54,7 +55,8 @@ dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
return NF_DROP;
- ipv6_change_dsfield(ipv6_hdr(skb), (__u8)(~XT_DSCP_MASK),
+ ipv6_change_dsfield(ipv6_hdr(skb),
+ (__force __u8)(~XT_DSCP_MASK),
dinfo->dscp << XT_DSCP_SHIFT);
}
return XT_CONTINUE;
diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
index 22a10309297c..03f0b370e178 100644
--- a/net/netfilter/xt_REDIRECT.c
+++ b/net/netfilter/xt_REDIRECT.c
@@ -26,48 +26,12 @@
#include <net/checksum.h>
#include <net/protocol.h>
#include <net/netfilter/nf_nat.h>
-
-static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+#include <net/netfilter/nf_nat_redirect.h>
static unsigned int
redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
- const struct nf_nat_range *range = par->targinfo;
- struct nf_nat_range newrange;
- struct in6_addr newdst;
- enum ip_conntrack_info ctinfo;
- struct nf_conn *ct;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (par->hooknum == NF_INET_LOCAL_OUT)
- newdst = loopback_addr;
- else {
- struct inet6_dev *idev;
- struct inet6_ifaddr *ifa;
- bool addr = false;
-
- rcu_read_lock();
- idev = __in6_dev_get(skb->dev);
- if (idev != NULL) {
- list_for_each_entry(ifa, &idev->addr_list, if_list) {
- newdst = ifa->addr;
- addr = true;
- break;
- }
- }
- rcu_read_unlock();
-
- if (!addr)
- return NF_DROP;
- }
-
- newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
- newrange.min_addr.in6 = newdst;
- newrange.max_addr.in6 = newdst;
- newrange.min_proto = range->min_proto;
- newrange.max_proto = range->max_proto;
-
- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+ return nf_nat_redirect_ipv6(skb, par->targinfo, par->hooknum);
}
static int redirect_tg6_checkentry(const struct xt_tgchk_param *par)
@@ -98,48 +62,7 @@ static int redirect_tg4_check(const struct xt_tgchk_param *par)
static unsigned int
redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- __be32 newdst;
- const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
- struct nf_nat_range newrange;
-
- NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
- par->hooknum == NF_INET_LOCAL_OUT);
-
- ct = nf_ct_get(skb, &ctinfo);
- NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
-
- /* Local packets: make them go to loopback */
- if (par->hooknum == NF_INET_LOCAL_OUT)
- newdst = htonl(0x7F000001);
- else {
- struct in_device *indev;
- struct in_ifaddr *ifa;
-
- newdst = 0;
-
- rcu_read_lock();
- indev = __in_dev_get_rcu(skb->dev);
- if (indev && (ifa = indev->ifa_list))
- newdst = ifa->ifa_local;
- rcu_read_unlock();
-
- if (!newdst)
- return NF_DROP;
- }
-
- /* Transfer from original range. */
- memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
- memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
- newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
- newrange.min_addr.ip = newdst;
- newrange.max_addr.ip = newdst;
- newrange.min_proto = mr->range[0].min;
- newrange.max_proto = mr->range[0].max;
-
- /* Hand modified range to generic setup. */
- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+ return nf_nat_redirect_ipv4(skb, par->targinfo, par->hooknum);
}
static struct xt_target redirect_tg_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index fbc66bb250d5..29ba6218a820 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -134,6 +134,7 @@ static bool add_hlist(struct hlist_head *head,
static unsigned int check_hlist(struct net *net,
struct hlist_head *head,
const struct nf_conntrack_tuple *tuple,
+ u16 zone,
bool *addit)
{
const struct nf_conntrack_tuple_hash *found;
@@ -147,8 +148,7 @@ static unsigned int check_hlist(struct net *net,
/* check the saved connections */
hlist_for_each_entry_safe(conn, n, head, node) {
- found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
- &conn->tuple);
+ found = nf_conntrack_find_get(net, zone, &conn->tuple);
if (found == NULL) {
hlist_del(&conn->node);
kmem_cache_free(connlimit_conn_cachep, conn);
@@ -201,7 +201,7 @@ static unsigned int
count_tree(struct net *net, struct rb_root *root,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr, const union nf_inet_addr *mask,
- u8 family)
+ u8 family, u16 zone)
{
struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
struct rb_node **rbnode, *parent;
@@ -229,7 +229,7 @@ count_tree(struct net *net, struct rb_root *root,
} else {
/* same source network -> be counted! */
unsigned int count;
- count = check_hlist(net, &rbconn->hhead, tuple, &addit);
+ count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
tree_nodes_free(root, gc_nodes, gc_count);
if (!addit)
@@ -245,7 +245,7 @@ count_tree(struct net *net, struct rb_root *root,
continue;
/* only used for GC on hhead, retval and 'addit' ignored */
- check_hlist(net, &rbconn->hhead, tuple, &addit);
+ check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
if (hlist_empty(&rbconn->hhead))
gc_nodes[gc_count++] = rbconn;
}
@@ -290,7 +290,7 @@ static int count_them(struct net *net,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr,
const union nf_inet_addr *mask,
- u_int8_t family)
+ u_int8_t family, u16 zone)
{
struct rb_root *root;
int count;
@@ -306,7 +306,7 @@ static int count_them(struct net *net,
spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
- count = count_tree(net, root, tuple, addr, mask, family);
+ count = count_tree(net, root, tuple, addr, mask, family, zone);
spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
@@ -324,13 +324,16 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int connections;
+ u16 zone = NF_CT_DEFAULT_ZONE;
ct = nf_ct_get(skb, &ctinfo);
- if (ct != NULL)
+ if (ct != NULL) {
tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
- else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
- par->family, &tuple))
+ zone = nf_ct_zone(ct);
+ } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+ par->family, &tuple)) {
goto hotdrop;
+ }
if (par->family == NFPROTO_IPV6) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
@@ -343,7 +346,7 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
connections = count_them(net, info->data, tuple_ptr, &addr,
- &info->mask, par->family);
+ &info->mask, par->family, zone);
if (connections == 0)
/* kmalloc failed, drop it entirely */
goto hotdrop;
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index a9faae89f955..30dbe34915ae 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -43,25 +43,29 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_recent");
MODULE_ALIAS("ip6t_recent");
-static unsigned int ip_list_tot = 100;
-static unsigned int ip_pkt_list_tot = 20;
-static unsigned int ip_list_hash_size = 0;
-static unsigned int ip_list_perms = 0644;
-static unsigned int ip_list_uid = 0;
-static unsigned int ip_list_gid = 0;
+static unsigned int ip_list_tot __read_mostly = 100;
+static unsigned int ip_list_hash_size __read_mostly;
+static unsigned int ip_list_perms __read_mostly = 0644;
+static unsigned int ip_list_uid __read_mostly;
+static unsigned int ip_list_gid __read_mostly;
module_param(ip_list_tot, uint, 0400);
-module_param(ip_pkt_list_tot, uint, 0400);
module_param(ip_list_hash_size, uint, 0400);
module_param(ip_list_perms, uint, 0400);
module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR);
module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
-MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files");
+/* retained for backwards compatibility */
+static unsigned int ip_pkt_list_tot __read_mostly;
+module_param(ip_pkt_list_tot, uint, 0400);
+MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
+
+#define XT_RECENT_MAX_NSTAMPS 256
+
struct recent_entry {
struct list_head list;
struct list_head lru_list;
@@ -79,6 +83,7 @@ struct recent_table {
union nf_inet_addr mask;
unsigned int refcnt;
unsigned int entries;
+ u8 nstamps_max_mask;
struct list_head lru_list;
struct list_head iphash[0];
};
@@ -90,7 +95,8 @@ struct recent_net {
#endif
};
-static int recent_net_id;
+static int recent_net_id __read_mostly;
+
static inline struct recent_net *recent_pernet(struct net *net)
{
return net_generic(net, recent_net_id);
@@ -171,12 +177,15 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
u_int16_t family, u_int8_t ttl)
{
struct recent_entry *e;
+ unsigned int nstamps_max = t->nstamps_max_mask;
if (t->entries >= ip_list_tot) {
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
recent_entry_remove(t, e);
}
- e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * ip_pkt_list_tot,
+
+ nstamps_max += 1;
+ e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * nstamps_max,
GFP_ATOMIC);
if (e == NULL)
return NULL;
@@ -197,7 +206,7 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
{
- e->index %= ip_pkt_list_tot;
+ e->index &= t->nstamps_max_mask;
e->stamps[e->index++] = jiffies;
if (e->index > e->nstamps)
e->nstamps = e->index;
@@ -326,6 +335,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
kuid_t uid;
kgid_t gid;
#endif
+ unsigned int nstamp_mask;
unsigned int i;
int ret = -EINVAL;
size_t sz;
@@ -349,19 +359,33 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
return -EINVAL;
if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
return -EINVAL;
- if (info->hit_count > ip_pkt_list_tot) {
- pr_info("hitcount (%u) is larger than "
- "packets to be remembered (%u)\n",
- info->hit_count, ip_pkt_list_tot);
+ if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
+ pr_info("hitcount (%u) is larger than allowed maximum (%u)\n",
+ info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
return -EINVAL;
}
if (info->name[0] == '\0' ||
strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
return -EINVAL;
+ if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
+ nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
+ else if (info->hit_count)
+ nstamp_mask = roundup_pow_of_two(info->hit_count) - 1;
+ else
+ nstamp_mask = 32 - 1;
+
mutex_lock(&recent_mutex);
t = recent_table_lookup(recent_net, info->name);
if (t != NULL) {
+ if (info->hit_count > t->nstamps_max_mask) {
+ pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n",
+ info->hit_count, t->nstamps_max_mask + 1,
+ info->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
t->refcnt++;
ret = 0;
goto out;
@@ -377,6 +401,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
goto out;
}
t->refcnt = 1;
+ t->nstamps_max_mask = nstamp_mask;
memcpy(&t->mask, &info->mask, sizeof(t->mask));
strcpy(t->name, info->name);
@@ -497,9 +522,12 @@ static void recent_seq_stop(struct seq_file *s, void *v)
static int recent_seq_show(struct seq_file *seq, void *v)
{
const struct recent_entry *e = v;
+ struct recent_iter_state *st = seq->private;
+ const struct recent_table *t = st->table;
unsigned int i;
- i = (e->index - 1) % ip_pkt_list_tot;
+ i = (e->index - 1) & t->nstamps_max_mask;
+
if (e->family == NFPROTO_IPV4)
seq_printf(seq, "src=%pI4 ttl: %u last_seen: %lu oldest_pkt: %u",
&e->addr.ip, e->ttl, e->stamps[i], e->index);
@@ -717,7 +745,9 @@ static int __init recent_mt_init(void)
{
int err;
- if (!ip_list_tot || !ip_pkt_list_tot || ip_pkt_list_tot > 255)
+ BUILD_BUG_ON_NOT_POWER_OF_2(XT_RECENT_MAX_NSTAMPS);
+
+ if (!ip_list_tot || ip_pkt_list_tot >= XT_RECENT_MAX_NSTAMPS)
return -EINVAL;
ip_list_hash_size = 1 << fls(ip_list_tot);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 5732cd64acc0..0d47afea9682 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -157,7 +157,7 @@ set_match_v1_destroy(const struct xt_mtdtor_param *par)
/* Revision 3 match */
static bool
-match_counter(u64 counter, const struct ip_set_counter_match *info)
+match_counter0(u64 counter, const struct ip_set_counter_match0 *info)
{
switch (info->op) {
case IPSET_COUNTER_NONE:
@@ -192,14 +192,60 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
return ret;
- if (!match_counter(opt.ext.packets, &info->packets))
+ if (!match_counter0(opt.ext.packets, &info->packets))
return 0;
- return match_counter(opt.ext.bytes, &info->bytes);
+ return match_counter0(opt.ext.bytes, &info->bytes);
}
#define set_match_v3_checkentry set_match_v1_checkentry
#define set_match_v3_destroy set_match_v1_destroy
+/* Revision 4 match */
+
+static bool
+match_counter(u64 counter, const struct ip_set_counter_match *info)
+{
+ switch (info->op) {
+ case IPSET_COUNTER_NONE:
+ return true;
+ case IPSET_COUNTER_EQ:
+ return counter == info->value;
+ case IPSET_COUNTER_NE:
+ return counter != info->value;
+ case IPSET_COUNTER_LT:
+ return counter < info->value;
+ case IPSET_COUNTER_GT:
+ return counter > info->value;
+ }
+ return false;
+}
+
+static bool
+set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match_v4 *info = par->matchinfo;
+ ADT_OPT(opt, par->family, info->match_set.dim,
+ info->match_set.flags, info->flags, UINT_MAX);
+ int ret;
+
+ if (info->packets.op != IPSET_COUNTER_NONE ||
+ info->bytes.op != IPSET_COUNTER_NONE)
+ opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
+
+ ret = match_set(info->match_set.index, skb, par, &opt,
+ info->match_set.flags & IPSET_INV_MATCH);
+
+ if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
+ return ret;
+
+ if (!match_counter(opt.ext.packets, &info->packets))
+ return 0;
+ return match_counter(opt.ext.bytes, &info->bytes);
+}
+
+#define set_match_v4_checkentry set_match_v1_checkentry
+#define set_match_v4_destroy set_match_v1_destroy
+
/* Revision 0 interface: backward compatible with netfilter/iptables */
static unsigned int
@@ -573,6 +619,27 @@ static struct xt_match set_matches[] __read_mostly = {
.destroy = set_match_v3_destroy,
.me = THIS_MODULE
},
+ /* new revision for counters support: update, match */
+ {
+ .name = "set",
+ .family = NFPROTO_IPV4,
+ .revision = 4,
+ .match = set_match_v4,
+ .matchsize = sizeof(struct xt_set_info_match_v4),
+ .checkentry = set_match_v4_checkentry,
+ .destroy = set_match_v4_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "set",
+ .family = NFPROTO_IPV6,
+ .revision = 4,
+ .match = set_match_v4,
+ .matchsize = sizeof(struct xt_set_info_match_v4),
+ .checkentry = set_match_v4_checkentry,
+ .destroy = set_match_v4_destroy,
+ .me = THIS_MODULE
+ },
};
static struct xt_target set_targets[] __read_mostly = {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0007b8180397..074cf3e91c6f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -114,14 +114,14 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
DEFINE_MUTEX(nl_sk_hash_lock);
EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
-static int lockdep_nl_sk_hash_is_held(void)
+#ifdef CONFIG_PROVE_LOCKING
+static int lockdep_nl_sk_hash_is_held(void *parent)
{
-#ifdef CONFIG_LOCKDEP
if (debug_locks)
return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
-#endif
return 1;
}
+#endif
static ATOMIC_NOTIFIER_HEAD(netlink_chain);
@@ -142,8 +142,7 @@ int netlink_add_tap(struct netlink_tap *nt)
list_add_rcu(&nt->list, &netlink_tap_all);
spin_unlock(&netlink_tap_lock);
- if (nt->module)
- __module_get(nt->module);
+ __module_get(nt->module);
return 0;
}
@@ -526,14 +525,14 @@ out:
return err;
}
-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
+static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
{
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
struct page *p_start, *p_end;
/* First page is flushed through netlink_{get,set}_status */
p_start = pgvec_to_page(hdr + PAGE_SIZE);
- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
+ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
while (p_start <= p_end) {
flush_dcache_page(p_start);
p_start++;
@@ -551,9 +550,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
static void netlink_set_status(struct nl_mmap_hdr *hdr,
enum nl_mmap_status status)
{
+ smp_mb();
hdr->nm_status = status;
flush_dcache_page(pgvec_to_page(hdr));
- smp_wmb();
}
static struct nl_mmap_hdr *
@@ -715,24 +714,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
struct nl_mmap_hdr *hdr;
struct sk_buff *skb;
unsigned int maxlen;
- bool excl = true;
int err = 0, len = 0;
- /* Netlink messages are validated by the receiver before processing.
- * In order to avoid userspace changing the contents of the message
- * after validation, the socket and the ring may only be used by a
- * single process, otherwise we fall back to copying.
- */
- if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
- atomic_read(&nlk->mapped) > 1)
- excl = false;
-
mutex_lock(&nlk->pg_vec_lock);
ring = &nlk->tx_ring;
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
do {
+ unsigned int nm_len;
+
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
if (hdr == NULL) {
if (!(msg->msg_flags & MSG_DONTWAIT) &&
@@ -740,35 +731,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
schedule();
continue;
}
- if (hdr->nm_len > maxlen) {
+
+ nm_len = ACCESS_ONCE(hdr->nm_len);
+ if (nm_len > maxlen) {
err = -EINVAL;
goto out;
}
- netlink_frame_flush_dcache(hdr);
+ netlink_frame_flush_dcache(hdr, nm_len);
- if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
- skb = alloc_skb_head(GFP_KERNEL);
- if (skb == NULL) {
- err = -ENOBUFS;
- goto out;
- }
- sock_hold(sk);
- netlink_ring_setup_skb(skb, sk, ring, hdr);
- NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
- __skb_put(skb, hdr->nm_len);
- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
- atomic_inc(&ring->pending);
- } else {
- skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
- if (skb == NULL) {
- err = -ENOBUFS;
- goto out;
- }
- __skb_put(skb, hdr->nm_len);
- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+ skb = alloc_skb(nm_len, GFP_KERNEL);
+ if (skb == NULL) {
+ err = -ENOBUFS;
+ goto out;
}
+ __skb_put(skb, nm_len);
+ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
+ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
netlink_increment_head(ring);
@@ -814,7 +793,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
- netlink_frame_flush_dcache(hdr);
+ netlink_frame_flush_dcache(hdr, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
@@ -1092,7 +1071,7 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
nlk_sk(sk)->portid = portid;
sock_hold(sk);
- rhashtable_insert(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL);
+ rhashtable_insert(&table->hash, &nlk_sk(sk)->node);
err = 0;
err:
mutex_unlock(&nl_sk_hash_lock);
@@ -1105,7 +1084,7 @@ static void netlink_remove(struct sock *sk)
mutex_lock(&nl_sk_hash_lock);
table = &nl_table[sk->sk_protocol];
- if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL)) {
+ if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
@@ -2306,7 +2285,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
}
if (netlink_tx_is_mmaped(sk) &&
- msg->msg_iov->iov_base == NULL) {
+ msg->msg_iter.iov->iov_base == NULL) {
err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
siocb);
goto out;
@@ -2326,7 +2305,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
NETLINK_CB(skb).flags = netlink_skb_flags;
err = -EFAULT;
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
kfree_skb(skb);
goto out;
}
@@ -2401,7 +2380,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
}
skb_reset_transport_header(data_skb);
- err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
@@ -3130,11 +3109,13 @@ static int __init netlink_proto_init(void)
.head_offset = offsetof(struct netlink_sock, node),
.key_offset = offsetof(struct netlink_sock, portid),
.key_len = sizeof(u32), /* portid */
- .hashfn = arch_fast_hash,
+ .hashfn = jhash,
.max_shift = 16, /* 64K */
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
+#ifdef CONFIG_PROVE_LOCKING
.mutex_is_held = lockdep_nl_sk_hash_is_held,
+#endif
};
if (err != 0)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 1b06a1fcf3e8..69f1d5e9959f 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1113,7 +1113,7 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
skb_put(skb, len);
/* User data follows immediately after the NET/ROM transport header */
- if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
+ if (memcpy_from_msg(skb_transport_header(skb), msg, len)) {
kfree_skb(skb);
err = -EFAULT;
goto out;
@@ -1167,7 +1167,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_flags |= MSG_TRUNC;
}
- er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ er = skb_copy_datagram_msg(skb, 0, msg, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index b60aa35c074f..f72be7433df3 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -17,6 +17,9 @@
#include "digital.h"
+#define DIGITAL_NFC_DEP_N_RETRY_NACK 2
+#define DIGITAL_NFC_DEP_N_RETRY_ATN 2
+
#define DIGITAL_NFC_DEP_FRAME_DIR_OUT 0xD4
#define DIGITAL_NFC_DEP_FRAME_DIR_IN 0xD5
@@ -32,20 +35,32 @@
#define DIGITAL_ATR_REQ_MIN_SIZE 16
#define DIGITAL_ATR_REQ_MAX_SIZE 64
-#define DIGITAL_LR_BITS_PAYLOAD_SIZE_254B 0x30
-#define DIGITAL_FSL_BITS_PAYLOAD_SIZE_254B \
- (DIGITAL_LR_BITS_PAYLOAD_SIZE_254B >> 4)
+#define DIGITAL_DID_MAX 14
+
+#define DIGITAL_PAYLOAD_SIZE_MAX 254
+#define DIGITAL_PAYLOAD_BITS_TO_PP(s) (((s) & 0x3) << 4)
+#define DIGITAL_PAYLOAD_PP_TO_BITS(s) (((s) >> 4) & 0x3)
+#define DIGITAL_PAYLOAD_BITS_TO_FSL(s) ((s) & 0x3)
+#define DIGITAL_PAYLOAD_FSL_TO_BITS(s) ((s) & 0x3)
+
#define DIGITAL_GB_BIT 0x02
+#define DIGITAL_NFC_DEP_REQ_RES_HEADROOM 2 /* SoD: [SB (NFC-A)] + LEN */
+#define DIGITAL_NFC_DEP_REQ_RES_TAILROOM 2 /* EoD: 2-byte CRC */
+
#define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0)
#define DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT 0x10
+#define DIGITAL_NFC_DEP_PFB_MI_BIT 0x10
+#define DIGITAL_NFC_DEP_PFB_NACK_BIT 0x10
+#define DIGITAL_NFC_DEP_PFB_DID_BIT 0x04
#define DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb) \
((pfb) & DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT)
-#define DIGITAL_NFC_DEP_MI_BIT_SET(pfb) ((pfb) & 0x10)
+#define DIGITAL_NFC_DEP_MI_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_MI_BIT)
+#define DIGITAL_NFC_DEP_NACK_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_NACK_BIT)
#define DIGITAL_NFC_DEP_NAD_BIT_SET(pfb) ((pfb) & 0x08)
-#define DIGITAL_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & 0x04)
+#define DIGITAL_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_DID_BIT)
#define DIGITAL_NFC_DEP_PFB_PNI(pfb) ((pfb) & 0x03)
#define DIGITAL_NFC_DEP_PFB_I_PDU 0x00
@@ -97,6 +112,34 @@ struct digital_dep_req_res {
static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
struct sk_buff *resp);
+static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp);
+
+static const u8 digital_payload_bits_map[4] = {
+ [0] = 64,
+ [1] = 128,
+ [2] = 192,
+ [3] = 254
+};
+
+static u8 digital_payload_bits_to_size(u8 payload_bits)
+{
+ if (payload_bits >= ARRAY_SIZE(digital_payload_bits_map))
+ return 0;
+
+ return digital_payload_bits_map[payload_bits];
+}
+
+static u8 digital_payload_size_to_bits(u8 payload_size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(digital_payload_bits_map); i++)
+ if (digital_payload_bits_map[i] == payload_size)
+ return i;
+
+ return 0xff;
+}
static void digital_skb_push_dep_sod(struct nfc_digital_dev *ddev,
struct sk_buff *skb)
@@ -129,6 +172,106 @@ static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev,
return 0;
}
+static struct sk_buff *
+digital_send_dep_data_prep(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+ struct digital_dep_req_res *dep_req_res,
+ struct digital_data_exch *data_exch)
+{
+ struct sk_buff *new_skb;
+
+ if (skb->len > ddev->remote_payload_max) {
+ dep_req_res->pfb |= DIGITAL_NFC_DEP_PFB_MI_BIT;
+
+ new_skb = digital_skb_alloc(ddev, ddev->remote_payload_max);
+ if (!new_skb) {
+ kfree_skb(ddev->chaining_skb);
+ ddev->chaining_skb = NULL;
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ skb_reserve(new_skb, ddev->tx_headroom + NFC_HEADER_SIZE +
+ DIGITAL_NFC_DEP_REQ_RES_HEADROOM);
+ memcpy(skb_put(new_skb, ddev->remote_payload_max), skb->data,
+ ddev->remote_payload_max);
+ skb_pull(skb, ddev->remote_payload_max);
+
+ ddev->chaining_skb = skb;
+ ddev->data_exch = data_exch;
+ } else {
+ ddev->chaining_skb = NULL;
+ new_skb = skb;
+ }
+
+ return new_skb;
+}
+
+static struct sk_buff *
+digital_recv_dep_data_gather(struct nfc_digital_dev *ddev, u8 pfb,
+ struct sk_buff *resp,
+ int (*send_ack)(struct nfc_digital_dev *ddev,
+ struct digital_data_exch
+ *data_exch),
+ struct digital_data_exch *data_exch)
+{
+ struct sk_buff *new_skb;
+ int rc;
+
+ if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb) && (!ddev->chaining_skb)) {
+ ddev->chaining_skb =
+ nfc_alloc_recv_skb(8 * ddev->local_payload_max,
+ GFP_KERNEL);
+ if (!ddev->chaining_skb) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ }
+
+ if (ddev->chaining_skb) {
+ if (resp->len > skb_tailroom(ddev->chaining_skb)) {
+ new_skb = skb_copy_expand(ddev->chaining_skb,
+ skb_headroom(
+ ddev->chaining_skb),
+ 8 * ddev->local_payload_max,
+ GFP_KERNEL);
+ if (!new_skb) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ kfree_skb(ddev->chaining_skb);
+ ddev->chaining_skb = new_skb;
+ }
+
+ memcpy(skb_put(ddev->chaining_skb, resp->len), resp->data,
+ resp->len);
+
+ kfree_skb(resp);
+ resp = NULL;
+
+ if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb)) {
+ rc = send_ack(ddev, data_exch);
+ if (rc)
+ goto error;
+
+ return NULL;
+ }
+
+ resp = ddev->chaining_skb;
+ ddev->chaining_skb = NULL;
+ }
+
+ return resp;
+
+error:
+ kfree_skb(resp);
+
+ kfree_skb(ddev->chaining_skb);
+ ddev->chaining_skb = NULL;
+
+ return ERR_PTR(rc);
+}
+
static void digital_in_recv_psl_res(struct nfc_digital_dev *ddev, void *arg,
struct sk_buff *resp)
{
@@ -198,6 +341,8 @@ static int digital_in_send_psl_req(struct nfc_digital_dev *ddev,
{
struct sk_buff *skb;
struct digital_psl_req *psl_req;
+ int rc;
+ u8 payload_size, payload_bits;
skb = digital_skb_alloc(ddev, sizeof(*psl_req));
if (!skb)
@@ -211,14 +356,24 @@ static int digital_in_send_psl_req(struct nfc_digital_dev *ddev,
psl_req->cmd = DIGITAL_CMD_PSL_REQ;
psl_req->did = 0;
psl_req->brs = (0x2 << 3) | 0x2; /* 424F both directions */
- psl_req->fsl = DIGITAL_FSL_BITS_PAYLOAD_SIZE_254B;
+
+ payload_size = min(ddev->local_payload_max, ddev->remote_payload_max);
+ payload_bits = digital_payload_size_to_bits(payload_size);
+ psl_req->fsl = DIGITAL_PAYLOAD_BITS_TO_FSL(payload_bits);
+
+ ddev->local_payload_max = payload_size;
+ ddev->remote_payload_max = payload_size;
digital_skb_push_dep_sod(ddev, skb);
ddev->skb_add_crc(skb);
- return digital_in_send_cmd(ddev, skb, 500, digital_in_recv_psl_res,
- target);
+ rc = digital_in_send_cmd(ddev, skb, 500, digital_in_recv_psl_res,
+ target);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
}
static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg,
@@ -226,7 +381,7 @@ static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg,
{
struct nfc_target *target = arg;
struct digital_atr_res *atr_res;
- u8 gb_len;
+ u8 gb_len, payload_bits;
int rc;
if (IS_ERR(resp)) {
@@ -256,6 +411,14 @@ static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg,
atr_res = (struct digital_atr_res *)resp->data;
+ payload_bits = DIGITAL_PAYLOAD_PP_TO_BITS(atr_res->pp);
+ ddev->remote_payload_max = digital_payload_bits_to_size(payload_bits);
+
+ if (!ddev->remote_payload_max) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
rc = nfc_set_remote_general_bytes(ddev->nfc_dev, atr_res->gb, gb_len);
if (rc)
goto exit;
@@ -286,6 +449,8 @@ int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
struct sk_buff *skb;
struct digital_atr_req *atr_req;
uint size;
+ int rc;
+ u8 payload_bits;
size = DIGITAL_ATR_REQ_MIN_SIZE + gb_len;
@@ -314,7 +479,9 @@ int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
atr_req->bs = 0;
atr_req->br = 0;
- atr_req->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B;
+ ddev->local_payload_max = DIGITAL_PAYLOAD_SIZE_MAX;
+ payload_bits = digital_payload_size_to_bits(ddev->local_payload_max);
+ atr_req->pp = DIGITAL_PAYLOAD_BITS_TO_PP(payload_bits);
if (gb_len) {
atr_req->pp |= DIGITAL_GB_BIT;
@@ -325,8 +492,113 @@ int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
ddev->skb_add_crc(skb);
- return digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res,
- target);
+ rc = digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res,
+ target);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static int digital_in_send_ack(struct nfc_digital_dev *ddev,
+ struct digital_data_exch *data_exch)
+{
+ struct digital_dep_req_res *dep_req;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_push(skb, sizeof(struct digital_dep_req_res));
+
+ dep_req = (struct digital_dep_req_res *)skb->data;
+
+ dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+ dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+ dep_req->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU |
+ ddev->curr_nfc_dep_pni;
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ ddev->saved_skb = skb_get(skb);
+ ddev->saved_skb_len = skb->len;
+
+ rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+ data_exch);
+ if (rc) {
+ kfree_skb(skb);
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+ }
+
+ return rc;
+}
+
+static int digital_in_send_nack(struct nfc_digital_dev *ddev,
+ struct digital_data_exch *data_exch)
+{
+ struct digital_dep_req_res *dep_req;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_push(skb, sizeof(struct digital_dep_req_res));
+
+ dep_req = (struct digital_dep_req_res *)skb->data;
+
+ dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+ dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+ dep_req->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU |
+ DIGITAL_NFC_DEP_PFB_NACK_BIT | ddev->curr_nfc_dep_pni;
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+ data_exch);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static int digital_in_send_atn(struct nfc_digital_dev *ddev,
+ struct digital_data_exch *data_exch)
+{
+ struct digital_dep_req_res *dep_req;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_push(skb, sizeof(struct digital_dep_req_res));
+
+ dep_req = (struct digital_dep_req_res *)skb->data;
+
+ dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+ dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+ dep_req->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU;
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+ data_exch);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
}
static int digital_in_send_rtox(struct nfc_digital_dev *ddev,
@@ -355,12 +627,30 @@ static int digital_in_send_rtox(struct nfc_digital_dev *ddev,
ddev->skb_add_crc(skb);
+ ddev->saved_skb = skb_get(skb);
+ ddev->saved_skb_len = skb->len;
+
rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
data_exch);
+ if (rc) {
+ kfree_skb(skb);
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+ }
return rc;
}
+static int digital_in_send_saved_skb(struct nfc_digital_dev *ddev,
+ struct digital_data_exch *data_exch)
+{
+ skb_get(ddev->saved_skb);
+ skb_push(ddev->saved_skb, ddev->saved_skb_len);
+
+ return digital_in_send_cmd(ddev, ddev->saved_skb, 1500,
+ digital_in_recv_dep_res, data_exch);
+}
+
static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
struct sk_buff *resp)
{
@@ -373,25 +663,67 @@ static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
resp = NULL;
+
+ if (((rc != -ETIMEDOUT) || ddev->nack_count) &&
+ (ddev->nack_count++ < DIGITAL_NFC_DEP_N_RETRY_NACK)) {
+ ddev->atn_count = 0;
+
+ rc = digital_in_send_nack(ddev, data_exch);
+ if (rc)
+ goto error;
+
+ return;
+ } else if ((rc == -ETIMEDOUT) &&
+ (ddev->atn_count++ < DIGITAL_NFC_DEP_N_RETRY_ATN)) {
+ ddev->nack_count = 0;
+
+ rc = digital_in_send_atn(ddev, data_exch);
+ if (rc)
+ goto error;
+
+ return;
+ }
+
+ goto exit;
+ }
+
+ rc = digital_skb_pull_dep_sod(ddev, resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.2");
goto exit;
}
rc = ddev->skb_check_crc(resp);
if (rc) {
+ if ((resp->len >= 4) &&
+ (ddev->nack_count++ < DIGITAL_NFC_DEP_N_RETRY_NACK)) {
+ ddev->atn_count = 0;
+
+ rc = digital_in_send_nack(ddev, data_exch);
+ if (rc)
+ goto error;
+
+ kfree_skb(resp);
+
+ return;
+ }
+
PROTOCOL_ERR("14.4.1.6");
goto error;
}
- rc = digital_skb_pull_dep_sod(ddev, resp);
- if (rc) {
- PROTOCOL_ERR("14.4.1.2");
+ ddev->atn_count = 0;
+ ddev->nack_count = 0;
+
+ if (resp->len > ddev->local_payload_max) {
+ rc = -EMSGSIZE;
goto exit;
}
+ size = sizeof(struct digital_dep_req_res);
dep_res = (struct digital_dep_req_res *)resp->data;
- if (resp->len < sizeof(struct digital_dep_req_res) ||
- dep_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN ||
+ if (resp->len < size || dep_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN ||
dep_res->cmd != DIGITAL_CMD_DEP_RES) {
rc = -EIO;
goto error;
@@ -399,6 +731,24 @@ static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
pfb = dep_res->pfb;
+ if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb)) {
+ PROTOCOL_ERR("14.8.2.1");
+ rc = -EIO;
+ goto error;
+ }
+
+ if (DIGITAL_NFC_DEP_NAD_BIT_SET(pfb)) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (size > resp->len) {
+ rc = -EIO;
+ goto error;
+ }
+
+ skb_pull(resp, size);
+
switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) {
case DIGITAL_NFC_DEP_PFB_I_PDU:
if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) {
@@ -409,21 +759,71 @@ static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
ddev->curr_nfc_dep_pni =
DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1);
+
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+
+ resp = digital_recv_dep_data_gather(ddev, pfb, resp,
+ digital_in_send_ack,
+ data_exch);
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto error;
+ }
+
+ /* If resp is NULL then we're still chaining so return and
+ * wait for the next part of the PDU. Else, the PDU is
+ * complete so pass it up.
+ */
+ if (!resp)
+ return;
+
rc = 0;
break;
case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU:
+ if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) {
+ PROTOCOL_ERR("14.12.3.3");
+ rc = -EIO;
+ goto exit;
+ }
+
+ ddev->curr_nfc_dep_pni =
+ DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1);
+
+ if (ddev->chaining_skb && !DIGITAL_NFC_DEP_NACK_BIT_SET(pfb)) {
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+
+ rc = digital_in_send_dep_req(ddev, NULL,
+ ddev->chaining_skb,
+ ddev->data_exch);
+ if (rc)
+ goto error;
+
+ return;
+ }
+
pr_err("Received a ACK/NACK PDU\n");
- rc = -EIO;
- goto error;
+ rc = -EINVAL;
+ goto exit;
case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
- if (!DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) {
- rc = -EINVAL;
- goto error;
+ if (!DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) { /* ATN */
+ rc = digital_in_send_saved_skb(ddev, data_exch);
+ if (rc) {
+ kfree_skb(ddev->saved_skb);
+ goto error;
+ }
+
+ return;
}
- rc = digital_in_send_rtox(ddev, data_exch, resp->data[3]);
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+
+ rc = digital_in_send_rtox(ddev, data_exch, resp->data[0]);
if (rc)
goto error;
@@ -431,30 +831,18 @@ static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
return;
}
- if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb)) {
- pr_err("MI bit set. Chained PDU not supported\n");
- rc = -EIO;
- goto error;
- }
-
- size = sizeof(struct digital_dep_req_res);
-
- if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb))
- size++;
-
- if (size > resp->len) {
- rc = -EIO;
- goto error;
- }
-
- skb_pull(resp, size);
-
exit:
data_exch->cb(data_exch->cb_context, resp, rc);
error:
kfree(data_exch);
+ kfree_skb(ddev->chaining_skb);
+ ddev->chaining_skb = NULL;
+
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+
if (rc)
kfree_skb(resp);
}
@@ -464,20 +852,47 @@ int digital_in_send_dep_req(struct nfc_digital_dev *ddev,
struct digital_data_exch *data_exch)
{
struct digital_dep_req_res *dep_req;
+ struct sk_buff *chaining_skb, *tmp_skb;
+ int rc;
skb_push(skb, sizeof(struct digital_dep_req_res));
dep_req = (struct digital_dep_req_res *)skb->data;
+
dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
dep_req->cmd = DIGITAL_CMD_DEP_REQ;
dep_req->pfb = ddev->curr_nfc_dep_pni;
- digital_skb_push_dep_sod(ddev, skb);
+ ddev->atn_count = 0;
+ ddev->nack_count = 0;
- ddev->skb_add_crc(skb);
+ chaining_skb = ddev->chaining_skb;
+
+ tmp_skb = digital_send_dep_data_prep(ddev, skb, dep_req, data_exch);
+ if (IS_ERR(tmp_skb))
+ return PTR_ERR(tmp_skb);
+
+ digital_skb_push_dep_sod(ddev, tmp_skb);
+
+ ddev->skb_add_crc(tmp_skb);
- return digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
- data_exch);
+ ddev->saved_skb = skb_get(tmp_skb);
+ ddev->saved_skb_len = tmp_skb->len;
+
+ rc = digital_in_send_cmd(ddev, tmp_skb, 1500, digital_in_recv_dep_res,
+ data_exch);
+ if (rc) {
+ if (tmp_skb != skb)
+ kfree_skb(tmp_skb);
+
+ kfree_skb(chaining_skb);
+ ddev->chaining_skb = NULL;
+
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+ }
+
+ return rc;
}
static void digital_tg_set_rf_tech(struct nfc_digital_dev *ddev, u8 rf_tech)
@@ -507,11 +922,106 @@ static void digital_tg_set_rf_tech(struct nfc_digital_dev *ddev, u8 rf_tech)
}
}
+static int digital_tg_send_ack(struct nfc_digital_dev *ddev,
+ struct digital_data_exch *data_exch)
+{
+ struct digital_dep_req_res *dep_res;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_push(skb, sizeof(struct digital_dep_req_res));
+
+ dep_res = (struct digital_dep_req_res *)skb->data;
+
+ dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+ dep_res->cmd = DIGITAL_CMD_DEP_RES;
+ dep_res->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU |
+ ddev->curr_nfc_dep_pni;
+
+ if (ddev->did) {
+ dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT;
+
+ memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did,
+ sizeof(ddev->did));
+ }
+
+ ddev->curr_nfc_dep_pni =
+ DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1);
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ ddev->saved_skb = skb_get(skb);
+ ddev->saved_skb_len = skb->len;
+
+ rc = digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req,
+ data_exch);
+ if (rc) {
+ kfree_skb(skb);
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+ }
+
+ return rc;
+}
+
+static int digital_tg_send_atn(struct nfc_digital_dev *ddev)
+{
+ struct digital_dep_req_res *dep_res;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = digital_skb_alloc(ddev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_push(skb, sizeof(struct digital_dep_req_res));
+
+ dep_res = (struct digital_dep_req_res *)skb->data;
+
+ dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+ dep_res->cmd = DIGITAL_CMD_DEP_RES;
+ dep_res->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU;
+
+ if (ddev->did) {
+ dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT;
+
+ memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did,
+ sizeof(ddev->did));
+ }
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ rc = digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req,
+ NULL);
+ if (rc)
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static int digital_tg_send_saved_skb(struct nfc_digital_dev *ddev)
+{
+ skb_get(ddev->saved_skb);
+ skb_push(ddev->saved_skb, ddev->saved_skb_len);
+
+ return digital_tg_send_cmd(ddev, ddev->saved_skb, 1500,
+ digital_tg_recv_dep_req, NULL);
+}
+
static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
struct sk_buff *resp)
{
int rc;
struct digital_dep_req_res *dep_req;
+ u8 pfb;
size_t size;
if (IS_ERR(resp)) {
@@ -532,6 +1042,11 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
goto exit;
}
+ if (resp->len > ddev->local_payload_max) {
+ rc = -EMSGSIZE;
+ goto exit;
+ }
+
size = sizeof(struct digital_dep_req_res);
dep_req = (struct digital_dep_req_res *)resp->data;
@@ -541,34 +1056,147 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
goto exit;
}
- if (DIGITAL_NFC_DEP_DID_BIT_SET(dep_req->pfb))
- size++;
+ pfb = dep_req->pfb;
- if (resp->len < size) {
+ if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb)) {
+ if (ddev->did && (ddev->did == resp->data[3])) {
+ size++;
+ } else {
+ rc = -EIO;
+ goto exit;
+ }
+ } else if (ddev->did) {
rc = -EIO;
goto exit;
}
- switch (DIGITAL_NFC_DEP_PFB_TYPE(dep_req->pfb)) {
+ if (DIGITAL_NFC_DEP_NAD_BIT_SET(pfb)) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (size > resp->len) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ skb_pull(resp, size);
+
+ switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) {
case DIGITAL_NFC_DEP_PFB_I_PDU:
pr_debug("DIGITAL_NFC_DEP_PFB_I_PDU\n");
- ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(dep_req->pfb);
+
+ if ((ddev->atn_count && (DIGITAL_NFC_DEP_PFB_PNI(pfb - 1) !=
+ ddev->curr_nfc_dep_pni)) ||
+ (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni)) {
+ PROTOCOL_ERR("14.12.3.4");
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (ddev->atn_count) {
+ ddev->atn_count = 0;
+
+ rc = digital_tg_send_saved_skb(ddev);
+ if (rc)
+ goto exit;
+
+ return;
+ }
+
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+
+ resp = digital_recv_dep_data_gather(ddev, pfb, resp,
+ digital_tg_send_ack, NULL);
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ /* If resp is NULL then we're still chaining so return and
+ * wait for the next part of the PDU. Else, the PDU is
+ * complete so pass it up.
+ */
+ if (!resp)
+ return;
+
+ rc = 0;
break;
case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU:
- pr_err("Received a ACK/NACK PDU\n");
- rc = -EINVAL;
- goto exit;
+ if (!DIGITAL_NFC_DEP_NACK_BIT_SET(pfb)) { /* ACK */
+ if ((ddev->atn_count &&
+ (DIGITAL_NFC_DEP_PFB_PNI(pfb - 1) !=
+ ddev->curr_nfc_dep_pni)) ||
+ (DIGITAL_NFC_DEP_PFB_PNI(pfb) !=
+ ddev->curr_nfc_dep_pni) ||
+ !ddev->chaining_skb || !ddev->saved_skb) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (ddev->atn_count) {
+ ddev->atn_count = 0;
+
+ rc = digital_tg_send_saved_skb(ddev);
+ if (rc)
+ goto exit;
+
+ return;
+ }
+
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+
+ rc = digital_tg_send_dep_res(ddev, ddev->chaining_skb);
+ if (rc)
+ goto exit;
+ } else { /* NACK */
+ if ((DIGITAL_NFC_DEP_PFB_PNI(pfb + 1) !=
+ ddev->curr_nfc_dep_pni) ||
+ !ddev->saved_skb) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ ddev->atn_count = 0;
+
+ rc = digital_tg_send_saved_skb(ddev);
+ if (rc) {
+ kfree_skb(ddev->saved_skb);
+ goto exit;
+ }
+ }
+
+ return;
case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
- pr_err("Received a SUPERVISOR PDU\n");
- rc = -EINVAL;
- goto exit;
- }
+ if (DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) {
+ rc = -EINVAL;
+ goto exit;
+ }
- skb_pull(resp, size);
+ rc = digital_tg_send_atn(ddev);
+ if (rc)
+ goto exit;
+
+ ddev->atn_count++;
+
+ kfree_skb(resp);
+ return;
+ }
rc = nfc_tm_data_received(ddev->nfc_dev, resp);
exit:
+ kfree_skb(ddev->chaining_skb);
+ ddev->chaining_skb = NULL;
+
+ ddev->atn_count = 0;
+
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+
if (rc)
kfree_skb(resp);
}
@@ -576,20 +1204,54 @@ exit:
int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb)
{
struct digital_dep_req_res *dep_res;
+ struct sk_buff *chaining_skb, *tmp_skb;
+ int rc;
skb_push(skb, sizeof(struct digital_dep_req_res));
+
dep_res = (struct digital_dep_req_res *)skb->data;
dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
dep_res->cmd = DIGITAL_CMD_DEP_RES;
dep_res->pfb = ddev->curr_nfc_dep_pni;
- digital_skb_push_dep_sod(ddev, skb);
+ if (ddev->did) {
+ dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT;
- ddev->skb_add_crc(skb);
+ memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did,
+ sizeof(ddev->did));
+ }
+
+ ddev->curr_nfc_dep_pni =
+ DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1);
+
+ chaining_skb = ddev->chaining_skb;
+
+ tmp_skb = digital_send_dep_data_prep(ddev, skb, dep_res, NULL);
+ if (IS_ERR(tmp_skb))
+ return PTR_ERR(tmp_skb);
+
+ digital_skb_push_dep_sod(ddev, tmp_skb);
+
+ ddev->skb_add_crc(tmp_skb);
+
+ ddev->saved_skb = skb_get(tmp_skb);
+ ddev->saved_skb_len = tmp_skb->len;
+
+ rc = digital_tg_send_cmd(ddev, tmp_skb, 1500, digital_tg_recv_dep_req,
+ NULL);
+ if (rc) {
+ if (tmp_skb != skb)
+ kfree_skb(tmp_skb);
+
+ kfree_skb(chaining_skb);
+ ddev->chaining_skb = NULL;
- return digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req,
- NULL);
+ kfree_skb(ddev->saved_skb);
+ ddev->saved_skb = NULL;
+ }
+
+ return rc;
}
static void digital_tg_send_psl_res_complete(struct nfc_digital_dev *ddev,
@@ -632,9 +1294,10 @@ static int digital_tg_send_psl_res(struct nfc_digital_dev *ddev, u8 did,
ddev->skb_add_crc(skb);
+ ddev->curr_nfc_dep_pni = 0;
+
rc = digital_tg_send_cmd(ddev, skb, 0, digital_tg_send_psl_res_complete,
(void *)(unsigned long)rf_tech);
-
if (rc)
kfree_skb(skb);
@@ -647,7 +1310,7 @@ static void digital_tg_recv_psl_req(struct nfc_digital_dev *ddev, void *arg,
int rc;
struct digital_psl_req *psl_req;
u8 rf_tech;
- u8 dsi;
+ u8 dsi, payload_size, payload_bits;
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -692,6 +1355,18 @@ static void digital_tg_recv_psl_req(struct nfc_digital_dev *ddev, void *arg,
goto exit;
}
+ payload_bits = DIGITAL_PAYLOAD_FSL_TO_BITS(psl_req->fsl);
+ payload_size = digital_payload_bits_to_size(payload_bits);
+
+ if (!payload_size || (payload_size > min(ddev->local_payload_max,
+ ddev->remote_payload_max))) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ ddev->local_payload_max = payload_size;
+ ddev->remote_payload_max = payload_size;
+
rc = digital_tg_send_psl_res(ddev, psl_req->did, rf_tech);
exit:
@@ -712,6 +1387,8 @@ static void digital_tg_send_atr_res_complete(struct nfc_digital_dev *ddev,
if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB)
offset++;
+ ddev->atn_count = 0;
+
if (resp->data[offset] == DIGITAL_CMD_PSL_REQ)
digital_tg_recv_psl_req(ddev, arg, resp);
else
@@ -723,7 +1400,7 @@ static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev,
{
struct digital_atr_res *atr_res;
struct sk_buff *skb;
- u8 *gb;
+ u8 *gb, payload_bits;
size_t gb_len;
int rc;
@@ -744,7 +1421,11 @@ static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev,
atr_res->cmd = DIGITAL_CMD_ATR_RES;
memcpy(atr_res->nfcid3, atr_req->nfcid3, sizeof(atr_req->nfcid3));
atr_res->to = 8;
- atr_res->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B;
+
+ ddev->local_payload_max = DIGITAL_PAYLOAD_SIZE_MAX;
+ payload_bits = digital_payload_size_to_bits(ddev->local_payload_max);
+ atr_res->pp = DIGITAL_PAYLOAD_BITS_TO_PP(payload_bits);
+
if (gb_len) {
skb_put(skb, gb_len);
@@ -756,12 +1437,12 @@ static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev,
ddev->skb_add_crc(skb);
+ ddev->curr_nfc_dep_pni = 0;
+
rc = digital_tg_send_cmd(ddev, skb, 999,
digital_tg_send_atr_res_complete, NULL);
- if (rc) {
+ if (rc)
kfree_skb(skb);
- return rc;
- }
return rc;
}
@@ -772,7 +1453,7 @@ void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
int rc;
struct digital_atr_req *atr_req;
size_t gb_len, min_size;
- u8 poll_tech_count;
+ u8 poll_tech_count, payload_bits;
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -815,11 +1496,22 @@ void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
atr_req = (struct digital_atr_req *)resp->data;
if (atr_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
- atr_req->cmd != DIGITAL_CMD_ATR_REQ) {
+ atr_req->cmd != DIGITAL_CMD_ATR_REQ ||
+ atr_req->did > DIGITAL_DID_MAX) {
rc = -EINVAL;
goto exit;
}
+ payload_bits = DIGITAL_PAYLOAD_PP_TO_BITS(atr_req->pp);
+ ddev->remote_payload_max = digital_payload_bits_to_size(payload_bits);
+
+ if (!ddev->remote_payload_max) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ ddev->did = atr_req->did;
+
rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED);
if (rc)
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 677d24bb70f8..91df487aa0a9 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -345,6 +345,9 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
pr_debug("\n");
+ if (hdev->gate2pipe[dest_gate] == NFC_HCI_DO_NOT_CREATE_PIPE)
+ return 0;
+
if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
return -EADDRINUSE;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 117708263ced..ef50e7716c4a 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -167,6 +167,48 @@ exit:
void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
struct sk_buff *skb)
{
+ int r = 0;
+ u8 gate = nfc_hci_pipe2gate(hdev, pipe);
+ u8 local_gate, new_pipe;
+ u8 gate_opened = 0x00;
+
+ pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
+
+ switch (cmd) {
+ case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
+ if (skb->len != 5) {
+ r = -EPROTO;
+ break;
+ }
+
+ local_gate = skb->data[3];
+ new_pipe = skb->data[4];
+ nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, NULL, 0);
+
+ /* save the new created pipe and bind with local gate,
+ * the description for skb->data[3] is destination gate id
+ * but since we received this cmd from host controller, we
+ * are the destination and it is our local gate
+ */
+ hdev->gate2pipe[local_gate] = new_pipe;
+ break;
+ case NFC_HCI_ANY_OPEN_PIPE:
+ /* if the pipe is already created, we allow remote host to
+ * open it
+ */
+ if (gate != 0xff)
+ nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK,
+ &gate_opened, 1);
+ break;
+ case NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED:
+ nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, NULL, 0);
+ break;
+ default:
+ pr_info("Discarded unknown cmd %x to gate %x\n", cmd, gate);
+ r = -EINVAL;
+ break;
+ }
+
kfree_skb(skb);
}
@@ -717,6 +759,19 @@ static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
return 0;
}
+static int hci_se_io(struct nfc_dev *nfc_dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->se_io)
+ return hdev->ops->se_io(hdev, se_idx, apdu,
+ apdu_length, cb, cb_context);
+
+ return 0;
+}
+
static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
{
mutex_lock(&hdev->msg_tx_mutex);
@@ -830,6 +885,7 @@ static struct nfc_ops hci_nfc_ops = {
.discover_se = hci_discover_se,
.enable_se = hci_enable_se,
.disable_se = hci_disable_se,
+ .se_io = hci_se_io,
};
struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index a3ad69a4c648..3621a902cb6e 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -401,7 +401,8 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
u8 *miux_tlv = NULL, miux_tlv_length;
u8 *rw_tlv = NULL, rw_tlv_length, rw;
int err;
- u16 size = 0, miux;
+ u16 size = 0;
+ __be16 miux;
pr_debug("Sending CONNECT\n");
@@ -465,7 +466,8 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
u8 *miux_tlv = NULL, miux_tlv_length;
u8 *rw_tlv = NULL, rw_tlv_length, rw;
int err;
- u16 size = 0, miux;
+ u16 size = 0;
+ __be16 miux;
pr_debug("Sending CC\n");
@@ -665,7 +667,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
if (msg_data == NULL)
return -ENOMEM;
- if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
+ if (memcpy_from_msg(msg_data, msg, len)) {
kfree(msg_data);
return -EFAULT;
}
@@ -731,7 +733,7 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
if (msg_data == NULL)
return -ENOMEM;
- if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
+ if (memcpy_from_msg(msg_data, msg, len)) {
kfree(msg_data);
return -EFAULT;
}
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index 51e788797317..b18f07ccb504 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011 Intel Corporation. All rights reserved.
+ * Copyright (C) 2014 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1511,8 +1512,10 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
struct nfc_llcp_local *local;
local = nfc_llcp_find_local(dev);
- if (local == NULL)
+ if (local == NULL) {
+ kfree_skb(skb);
return -ENODEV;
+ }
__nfc_llcp_recv(local, skb);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 51f077a92fa9..e181e290427c 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -524,13 +524,13 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
static inline unsigned int llcp_accept_poll(struct sock *parent)
{
- struct nfc_llcp_sock *llcp_sock, *n, *parent_sock;
+ struct nfc_llcp_sock *llcp_sock, *parent_sock;
struct sock *sk;
parent_sock = nfc_llcp_sock(parent);
- list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue,
- accept_queue) {
+ list_for_each_entry(llcp_sock, &parent_sock->accept_queue,
+ accept_queue) {
sk = &llcp_sock->sk;
if (sk->sk_state == LLCP_CONNECTED)
@@ -832,7 +832,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
copied = min_t(unsigned int, rlen, len);
cskb = skb;
- if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
+ if (skb_copy_datagram_msg(cskb, 0, msg, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 90b16cb40058..51feb5e63008 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -3,6 +3,7 @@
* NFC Controller (NFCC) and a Device Host (DH).
*
* Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2014 Marvell International Ltd.
*
* Written by Ilan Elias <ilane@ti.com>
*
@@ -196,18 +197,24 @@ static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
}
+struct nci_rf_discover_param {
+ __u32 im_protocols;
+ __u32 tm_protocols;
+};
+
static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
{
+ struct nci_rf_discover_param *param =
+ (struct nci_rf_discover_param *)opt;
struct nci_rf_disc_cmd cmd;
- __u32 protocols = opt;
cmd.num_disc_configs = 0;
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_JEWEL_MASK ||
- protocols & NFC_PROTO_MIFARE_MASK ||
- protocols & NFC_PROTO_ISO14443_MASK ||
- protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ (param->im_protocols & NFC_PROTO_JEWEL_MASK ||
+ param->im_protocols & NFC_PROTO_MIFARE_MASK ||
+ param->im_protocols & NFC_PROTO_ISO14443_MASK ||
+ param->im_protocols & NFC_PROTO_NFC_DEP_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
NCI_NFC_A_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -215,7 +222,7 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
}
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_ISO14443_B_MASK)) {
+ (param->im_protocols & NFC_PROTO_ISO14443_B_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
NCI_NFC_B_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -223,8 +230,8 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
}
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_FELICA_MASK ||
- protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ (param->im_protocols & NFC_PROTO_FELICA_MASK ||
+ param->im_protocols & NFC_PROTO_NFC_DEP_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
NCI_NFC_F_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -232,13 +239,25 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
}
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_ISO15693_MASK)) {
+ (param->im_protocols & NFC_PROTO_ISO15693_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
NCI_NFC_V_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
cmd.num_disc_configs++;
}
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS - 1) &&
+ (param->tm_protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+ NCI_NFC_A_PASSIVE_LISTEN_MODE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+ NCI_NFC_F_PASSIVE_LISTEN_MODE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
(1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
&cmd);
@@ -280,7 +299,7 @@ static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
{
struct nci_rf_deactivate_cmd cmd;
- cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
+ cmd.type = opt;
nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
sizeof(struct nci_rf_deactivate_cmd), &cmd);
@@ -441,6 +460,7 @@ static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
struct nci_set_config_param param;
+ int rc;
param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
if ((param.val == NULL) || (param.len == 0))
@@ -451,14 +471,45 @@ static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
param.id = NCI_PN_ATR_REQ_GEN_BYTES;
+ rc = nci_request(ndev, nci_set_config_req, (unsigned long)&param,
+ msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
+ if (rc)
+ return rc;
+
+ param.id = NCI_LN_ATR_RES_GEN_BYTES;
+
return nci_request(ndev, nci_set_config_req, (unsigned long)&param,
msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
}
+static int nci_set_listen_parameters(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+ __u8 val;
+
+ val = NCI_LA_SEL_INFO_NFC_DEP_MASK;
+
+ rc = nci_set_config(ndev, NCI_LA_SEL_INFO, 1, &val);
+ if (rc)
+ return rc;
+
+ val = NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK;
+
+ rc = nci_set_config(ndev, NCI_LF_PROTOCOL_TYPE, 1, &val);
+ if (rc)
+ return rc;
+
+ val = NCI_LF_CON_BITR_F_212 | NCI_LF_CON_BITR_F_424;
+
+ return nci_set_config(ndev, NCI_LF_CON_BITR_F, 1, &val);
+}
+
static int nci_start_poll(struct nfc_dev *nfc_dev,
__u32 im_protocols, __u32 tm_protocols)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ struct nci_rf_discover_param param;
int rc;
if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
@@ -476,13 +527,14 @@ static int nci_start_poll(struct nfc_dev *nfc_dev,
(atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
pr_debug("target active or w4 select, implicitly deactivate\n");
- rc = nci_request(ndev, nci_rf_deactivate_req, 0,
+ rc = nci_request(ndev, nci_rf_deactivate_req,
+ NCI_DEACTIVATE_TYPE_IDLE_MODE,
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
if (rc)
return -EBUSY;
}
- if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
rc = nci_set_local_general_bytes(nfc_dev);
if (rc) {
pr_err("failed to set local general bytes\n");
@@ -490,7 +542,15 @@ static int nci_start_poll(struct nfc_dev *nfc_dev,
}
}
- rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
+ if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ rc = nci_set_listen_parameters(nfc_dev);
+ if (rc)
+ pr_err("failed to set listen parameters\n");
+ }
+
+ param.im_protocols = im_protocols;
+ param.tm_protocols = tm_protocols;
+ rc = nci_request(ndev, nci_rf_discover_req, (unsigned long)&param,
msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
if (!rc)
@@ -509,7 +569,7 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
return;
}
- nci_request(ndev, nci_rf_deactivate_req, 0,
+ nci_request(ndev, nci_rf_deactivate_req, NCI_DEACTIVATE_TYPE_IDLE_MODE,
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
@@ -594,7 +654,8 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
ndev->target_active_prot = 0;
if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
- nci_request(ndev, nci_rf_deactivate_req, 0,
+ nci_request(ndev, nci_rf_deactivate_req,
+ NCI_DEACTIVATE_TYPE_SLEEP_MODE,
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
}
@@ -622,9 +683,24 @@ static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
static int nci_dep_link_down(struct nfc_dev *nfc_dev)
{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
pr_debug("entry\n");
- nci_deactivate_target(nfc_dev, NULL);
+ if (nfc_dev->rf_mode == NFC_RF_INITIATOR) {
+ nci_deactivate_target(nfc_dev, NULL);
+ } else {
+ if (atomic_read(&ndev->state) == NCI_LISTEN_ACTIVE ||
+ atomic_read(&ndev->state) == NCI_DISCOVERY) {
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ }
+
+ rc = nfc_tm_deactivated(nfc_dev);
+ if (rc)
+ pr_err("error when signaling tm deactivation\n");
+ }
return 0;
}
@@ -658,18 +734,58 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
return rc;
}
+static int nci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
+ if (rc)
+ pr_err("unable to send data\n");
+
+ return rc;
+}
+
static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx)
{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ if (ndev->ops->enable_se)
+ return ndev->ops->enable_se(ndev, se_idx);
+
return 0;
}
static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ if (ndev->ops->disable_se)
+ return ndev->ops->disable_se(ndev, se_idx);
+
return 0;
}
static int nci_discover_se(struct nfc_dev *nfc_dev)
{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ if (ndev->ops->discover_se)
+ return ndev->ops->discover_se(ndev);
+
+ return 0;
+}
+
+static int nci_se_io(struct nfc_dev *nfc_dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ if (ndev->ops->se_io)
+ return ndev->ops->se_io(ndev, se_idx, apdu,
+ apdu_length, cb, cb_context);
+
return 0;
}
@@ -683,9 +799,11 @@ static struct nfc_ops nci_nfc_ops = {
.activate_target = nci_activate_target,
.deactivate_target = nci_deactivate_target,
.im_transceive = nci_transceive,
+ .tm_send = nci_tm_send,
.enable_se = nci_enable_se,
.disable_se = nci_disable_se,
.discover_se = nci_discover_se,
+ .se_io = nci_se_io,
};
/* ---- Interface to NCI drivers ---- */
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index 427ef2c7ab68..a2de2a8cb00e 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -3,6 +3,7 @@
* NFC Controller (NFCC) and a Device Host (DH).
*
* Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2014 Marvell International Ltd.
*
* Written by Ilan Elias <ilane@ti.com>
*
@@ -184,11 +185,16 @@ exit:
static void nci_add_rx_data_frag(struct nci_dev *ndev,
struct sk_buff *skb,
- __u8 pbf)
+ __u8 pbf, __u8 status)
{
int reassembly_len;
int err = 0;
+ if (status) {
+ err = status;
+ goto exit;
+ }
+
if (ndev->rx_data_reassembly) {
reassembly_len = ndev->rx_data_reassembly->len;
@@ -223,13 +229,24 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
}
exit:
- nci_data_exchange_complete(ndev, skb, err);
+ if (ndev->nfc_dev->rf_mode == NFC_RF_INITIATOR) {
+ nci_data_exchange_complete(ndev, skb, err);
+ } else if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) {
+ /* Data received in Target mode, forward to nfc core */
+ err = nfc_tm_data_received(ndev->nfc_dev, skb);
+ if (err)
+ pr_err("unable to handle received data\n");
+ } else {
+ pr_err("rf mode unknown\n");
+ kfree_skb(skb);
+ }
}
/* Rx Data packet */
void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
__u8 pbf = nci_pbf(skb->data);
+ __u8 status = 0;
pr_debug("len %d\n", skb->len);
@@ -247,8 +264,9 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
ndev->target_active_prot == NFC_PROTO_ISO15693) {
/* frame I/F => remove the status byte */
pr_debug("frame I/F => remove the status byte\n");
+ status = skb->data[skb->len - 1];
skb_trim(skb, (skb->len - 1));
}
- nci_add_rx_data_frag(ndev, skb, pbf);
+ nci_add_rx_data_frag(ndev, skb, pbf, nci_to_errno(status));
}
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 205b35f666db..22e453cb787d 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -103,7 +103,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
struct rf_tech_specific_params_nfca_poll *nfca_poll,
__u8 *data)
{
- nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
+ nfca_poll->sens_res = __le16_to_cpu(*((__le16 *)data));
data += 2;
nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
@@ -167,7 +167,19 @@ static __u8 *nci_extract_rf_params_nfcv_passive_poll(struct nci_dev *ndev,
return data;
}
-__u32 nci_get_prop_rf_protocol(struct nci_dev *ndev, __u8 rf_protocol)
+static __u8 *nci_extract_rf_params_nfcf_passive_listen(struct nci_dev *ndev,
+ struct rf_tech_specific_params_nfcf_listen *nfcf_listen,
+ __u8 *data)
+{
+ nfcf_listen->local_nfcid2_len = min_t(__u8, *data++,
+ NFC_NFCID2_MAXSIZE);
+ memcpy(nfcf_listen->local_nfcid2, data, nfcf_listen->local_nfcid2_len);
+ data += nfcf_listen->local_nfcid2_len;
+
+ return data;
+}
+
+static __u32 nci_get_prop_rf_protocol(struct nci_dev *ndev, __u8 rf_protocol)
{
if (ndev->ops->get_rfprotocol)
return ndev->ops->get_rfprotocol(ndev, rf_protocol);
@@ -401,17 +413,29 @@ static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
{
struct activation_params_poll_nfc_dep *poll;
+ struct activation_params_listen_nfc_dep *listen;
switch (ntf->activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
case NCI_NFC_F_PASSIVE_POLL_MODE:
poll = &ntf->activation_params.poll_nfc_dep;
- poll->atr_res_len = min_t(__u8, *data++, 63);
+ poll->atr_res_len = min_t(__u8, *data++,
+ NFC_ATR_RES_MAXSIZE - 2);
pr_debug("atr_res_len %d\n", poll->atr_res_len);
if (poll->atr_res_len > 0)
memcpy(poll->atr_res, data, poll->atr_res_len);
break;
+ case NCI_NFC_A_PASSIVE_LISTEN_MODE:
+ case NCI_NFC_F_PASSIVE_LISTEN_MODE:
+ listen = &ntf->activation_params.listen_nfc_dep;
+ listen->atr_req_len = min_t(__u8, *data++,
+ NFC_ATR_REQ_MAXSIZE - 2);
+ pr_debug("atr_req_len %d\n", listen->atr_req_len);
+ if (listen->atr_req_len > 0)
+ memcpy(listen->atr_req, data, listen->atr_req_len);
+ break;
+
default:
pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
ntf->activation_rf_tech_and_mode);
@@ -444,6 +468,48 @@ static void nci_target_auto_activated(struct nci_dev *ndev,
nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets);
}
+static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev,
+ struct nci_rf_intf_activated_ntf *ntf)
+{
+ ndev->remote_gb_len = 0;
+
+ if (ntf->activation_params_len <= 0)
+ return NCI_STATUS_OK;
+
+ switch (ntf->activation_rf_tech_and_mode) {
+ case NCI_NFC_A_PASSIVE_POLL_MODE:
+ case NCI_NFC_F_PASSIVE_POLL_MODE:
+ ndev->remote_gb_len = min_t(__u8,
+ (ntf->activation_params.poll_nfc_dep.atr_res_len
+ - NFC_ATR_RES_GT_OFFSET),
+ NFC_ATR_RES_GB_MAXSIZE);
+ memcpy(ndev->remote_gb,
+ (ntf->activation_params.poll_nfc_dep.atr_res
+ + NFC_ATR_RES_GT_OFFSET),
+ ndev->remote_gb_len);
+ break;
+
+ case NCI_NFC_A_PASSIVE_LISTEN_MODE:
+ case NCI_NFC_F_PASSIVE_LISTEN_MODE:
+ ndev->remote_gb_len = min_t(__u8,
+ (ntf->activation_params.listen_nfc_dep.atr_req_len
+ - NFC_ATR_REQ_GT_OFFSET),
+ NFC_ATR_REQ_GB_MAXSIZE);
+ memcpy(ndev->remote_gb,
+ (ntf->activation_params.listen_nfc_dep.atr_req
+ + NFC_ATR_REQ_GT_OFFSET),
+ ndev->remote_gb_len);
+ break;
+
+ default:
+ pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+ ntf->activation_rf_tech_and_mode);
+ return NCI_STATUS_RF_PROTOCOL_ERROR;
+ }
+
+ return NCI_STATUS_OK;
+}
+
static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
@@ -493,6 +559,16 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
&(ntf.rf_tech_specific_params.nfcv_poll), data);
break;
+ case NCI_NFC_A_PASSIVE_LISTEN_MODE:
+ /* no RF technology specific parameters */
+ break;
+
+ case NCI_NFC_F_PASSIVE_LISTEN_MODE:
+ data = nci_extract_rf_params_nfcf_passive_listen(ndev,
+ &(ntf.rf_tech_specific_params.nfcf_listen),
+ data);
+ break;
+
default:
pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
ntf.activation_rf_tech_and_mode);
@@ -546,32 +622,39 @@ exit:
/* store general bytes to be reported later in dep_link_up */
if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) {
- ndev->remote_gb_len = 0;
-
- if (ntf.activation_params_len > 0) {
- /* ATR_RES general bytes at offset 15 */
- ndev->remote_gb_len = min_t(__u8,
- (ntf.activation_params
- .poll_nfc_dep.atr_res_len
- - NFC_ATR_RES_GT_OFFSET),
- NFC_MAX_GT_LEN);
- memcpy(ndev->remote_gb,
- (ntf.activation_params.poll_nfc_dep
- .atr_res + NFC_ATR_RES_GT_OFFSET),
- ndev->remote_gb_len);
- }
+ err = nci_store_general_bytes_nfc_dep(ndev, &ntf);
+ if (err != NCI_STATUS_OK)
+ pr_err("unable to store general bytes\n");
}
}
- if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
- /* A single target was found and activated automatically */
- atomic_set(&ndev->state, NCI_POLL_ACTIVE);
- if (err == NCI_STATUS_OK)
- nci_target_auto_activated(ndev, &ntf);
- } else { /* ndev->state == NCI_W4_HOST_SELECT */
- /* A selected target was activated, so complete the request */
- atomic_set(&ndev->state, NCI_POLL_ACTIVE);
- nci_req_complete(ndev, err);
+ if (!(ntf.activation_rf_tech_and_mode & NCI_RF_TECH_MODE_LISTEN_MASK)) {
+ /* Poll mode */
+ if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
+ /* A single target was found and activated
+ * automatically */
+ atomic_set(&ndev->state, NCI_POLL_ACTIVE);
+ if (err == NCI_STATUS_OK)
+ nci_target_auto_activated(ndev, &ntf);
+ } else { /* ndev->state == NCI_W4_HOST_SELECT */
+ /* A selected target was activated, so complete the
+ * request */
+ atomic_set(&ndev->state, NCI_POLL_ACTIVE);
+ nci_req_complete(ndev, err);
+ }
+ } else {
+ /* Listen mode */
+ atomic_set(&ndev->state, NCI_LISTEN_ACTIVE);
+ if (err == NCI_STATUS_OK &&
+ ntf.rf_protocol == NCI_RF_PROTOCOL_NFC_DEP) {
+ err = nfc_tm_activated(ndev->nfc_dev,
+ NFC_PROTO_NFC_DEP_MASK,
+ NFC_COMM_PASSIVE,
+ ndev->remote_gb,
+ ndev->remote_gb_len);
+ if (err != NCI_STATUS_OK)
+ pr_err("error when signaling tm activation\n");
+ }
}
}
@@ -595,8 +678,21 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
nci_data_exchange_complete(ndev, NULL, -EIO);
- nci_clear_target_list(ndev);
- atomic_set(&ndev->state, NCI_IDLE);
+ switch (ntf->type) {
+ case NCI_DEACTIVATE_TYPE_IDLE_MODE:
+ nci_clear_target_list(ndev);
+ atomic_set(&ndev->state, NCI_IDLE);
+ break;
+ case NCI_DEACTIVATE_TYPE_SLEEP_MODE:
+ case NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE:
+ atomic_set(&ndev->state, NCI_W4_HOST_SELECT);
+ break;
+ case NCI_DEACTIVATE_TYPE_DISCOVERY:
+ nci_clear_target_list(ndev);
+ atomic_set(&ndev->state, NCI_DISCOVERY);
+ break;
+ }
+
nci_req_complete(ndev, NCI_STATUS_OK);
}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 43cb1c17e267..44989fc8cddf 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -810,6 +810,31 @@ out:
return rc;
}
+static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfc_dev *dev;
+ u32 device_idx, target_idx, protocol;
+ int rc;
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ return -EINVAL;
+
+ device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+ dev = nfc_get_device(device_idx);
+ if (!dev)
+ return -ENODEV;
+
+ target_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
+ protocol = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
+
+ nfc_deactivate_target(dev, target_idx);
+ rc = nfc_activate_target(dev, target_idx, protocol);
+
+ nfc_put_device(dev);
+ return 0;
+}
+
static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
@@ -1285,6 +1310,51 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
return 0;
}
+static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context)
+{
+ struct nfc_se *se;
+ int rc;
+
+ pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx);
+
+ device_lock(&dev->dev);
+
+ if (!device_is_registered(&dev->dev)) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (!dev->dev_up) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (!dev->ops->se_io) {
+ rc = -EOPNOTSUPP;
+ goto error;
+ }
+
+ se = nfc_find_se(dev, se_idx);
+ if (!se) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (se->state != NFC_SE_ENABLED) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ rc = dev->ops->se_io(dev, se_idx, apdu,
+ apdu_length, cb, cb_context);
+
+error:
+ device_unlock(&dev->dev);
+ return rc;
+}
+
struct se_io_ctx {
u32 dev_idx;
u32 se_idx;
@@ -1367,7 +1437,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
ctx->dev_idx = dev_idx;
ctx->se_idx = se_idx;
- return dev->ops->se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+ return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
}
static const struct genl_ops nfc_genl_ops[] = {
@@ -1455,6 +1525,11 @@ static const struct genl_ops nfc_genl_ops[] = {
.doit = nfc_genl_se_io,
.policy = nfc_genl_policy,
},
+ {
+ .cmd = NFC_CMD_ACTIVATE_TARGET,
+ .doit = nfc_genl_activate_target,
+ .policy = nfc_genl_policy,
+ },
};
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 11c3544ea546..373e138c0ab6 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -231,7 +231,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (skb == NULL)
return rc;
- rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ rc = memcpy_from_msg(skb_put(skb, len), msg, len);
if (rc < 0) {
kfree_skb(skb);
return rc;
@@ -269,7 +269,7 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
copied = len;
}
- rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ rc = skb_copy_datagram_msg(skb, 0, msg, copied);
skb_free_datagram(sk, skb);
diff --git a/net/nonet.c b/net/nonet.c
deleted file mode 100644
index b1a73fda9c12..000000000000
--- a/net/nonet.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * net/nonet.c
- *
- * Dummy functions to allow us to configure network support entirely
- * out of the kernel.
- *
- * Distributed under the terms of the GNU GPL version 2.
- * Copyright (c) Matthew Wilcox 2003
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- return -ENXIO;
-}
-
-const struct file_operations bad_sock_fops = {
- .owner = THIS_MODULE,
- .open = sock_no_open,
- .llseek = noop_llseek,
-};
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index ba3bb8203b99..b7d818c59423 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -4,7 +4,9 @@
config OPENVSWITCH
tristate "Open vSwitch"
+ depends on INET
select LIBCRC32C
+ select NET_MPLS_GSO
---help---
Open vSwitch is a multilayer Ethernet switch targeted at virtualized
environments. In addition to supporting a variety of features
@@ -29,11 +31,10 @@ config OPENVSWITCH
If unsure, say N.
config OPENVSWITCH_GRE
- bool "Open vSwitch GRE tunneling support"
- depends on INET
+ tristate "Open vSwitch GRE tunneling support"
depends on OPENVSWITCH
- depends on NET_IPGRE_DEMUX && !(OPENVSWITCH=y && NET_IPGRE_DEMUX=m)
- default y
+ depends on NET_IPGRE_DEMUX
+ default OPENVSWITCH
---help---
If you say Y here, then the Open vSwitch will be able create GRE
vport.
@@ -43,11 +44,10 @@ config OPENVSWITCH_GRE
If unsure, say Y.
config OPENVSWITCH_VXLAN
- bool "Open vSwitch VXLAN tunneling support"
- depends on INET
+ tristate "Open vSwitch VXLAN tunneling support"
depends on OPENVSWITCH
- depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m)
- default y
+ depends on VXLAN
+ default OPENVSWITCH
---help---
If you say Y here, then the Open vSwitch will be able create vxlan vport.
@@ -56,11 +56,10 @@ config OPENVSWITCH_VXLAN
If unsure, say Y.
config OPENVSWITCH_GENEVE
- bool "Open vSwitch Geneve tunneling support"
- depends on INET
+ tristate "Open vSwitch Geneve tunneling support"
depends on OPENVSWITCH
- depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m)
- default y
+ depends on GENEVE
+ default OPENVSWITCH
---help---
If you say Y here, then the Open vSwitch will be able create geneve vport.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 9a33a273c375..91b9478413ef 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,14 +15,6 @@ openvswitch-y := \
vport-internal_dev.o \
vport-netdev.o
-ifneq ($(CONFIG_OPENVSWITCH_GENEVE),)
-openvswitch-y += vport-geneve.o
-endif
-
-ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
-openvswitch-y += vport-vxlan.o
-endif
-
-ifneq ($(CONFIG_OPENVSWITCH_GRE),)
-openvswitch-y += vport-gre.o
-endif
+obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
+obj-$(CONFIG_OPENVSWITCH_VXLAN) += vport-vxlan.o
+obj-$(CONFIG_OPENVSWITCH_GRE) += vport-gre.o
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8c4229b11c34..764fdc39c63b 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -28,10 +28,12 @@
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
+
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/checksum.h>
#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/sctp/checksum.h>
#include "datapath.h"
@@ -67,7 +69,7 @@ static void action_fifo_init(struct action_fifo *fifo)
fifo->tail = 0;
}
-static bool action_fifo_is_empty(struct action_fifo *fifo)
+static bool action_fifo_is_empty(const struct action_fifo *fifo)
{
return (fifo->head == fifo->tail);
}
@@ -90,7 +92,7 @@ static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
/* Return true if fifo is not full */
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
- struct sw_flow_key *key,
+ const struct sw_flow_key *key,
const struct nlattr *attr)
{
struct action_fifo *fifo;
@@ -107,100 +109,131 @@ static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
return da;
}
-static int make_writable(struct sk_buff *skb, int write_len)
+static void invalidate_flow_key(struct sw_flow_key *key)
+{
+ key->eth.type = htons(0);
+}
+
+static bool is_flow_key_valid(const struct sw_flow_key *key)
{
- if (!pskb_may_pull(skb, write_len))
+ return !!key->eth.type;
+}
+
+static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_action_push_mpls *mpls)
+{
+ __be32 *new_mpls_lse;
+ struct ethhdr *hdr;
+
+ /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
+ if (skb->encapsulation)
+ return -ENOTSUPP;
+
+ if (skb_cow_head(skb, MPLS_HLEN) < 0)
return -ENOMEM;
- if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
- return 0;
+ skb_push(skb, MPLS_HLEN);
+ memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
+ skb->mac_len);
+ skb_reset_mac_header(skb);
+
+ new_mpls_lse = (__be32 *)skb_mpls_header(skb);
+ *new_mpls_lse = mpls->mpls_lse;
- return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
+ MPLS_HLEN, 0));
+
+ hdr = eth_hdr(skb);
+ hdr->h_proto = mpls->mpls_ethertype;
+
+ skb_set_inner_protocol(skb, skb->protocol);
+ skb->protocol = mpls->mpls_ethertype;
+
+ invalidate_flow_key(key);
+ return 0;
}
-/* remove VLAN header from packet and update csum accordingly. */
-static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
+static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ const __be16 ethertype)
{
- struct vlan_hdr *vhdr;
+ struct ethhdr *hdr;
int err;
- err = make_writable(skb, VLAN_ETH_HLEN);
+ err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_sub(skb->csum, csum_partial(skb->data
- + (2 * ETH_ALEN), VLAN_HLEN, 0));
+ skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
- vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
- *current_tci = vhdr->h_vlan_TCI;
+ memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
+ skb->mac_len);
- memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
- __skb_pull(skb, VLAN_HLEN);
+ __skb_pull(skb, MPLS_HLEN);
+ skb_reset_mac_header(skb);
- vlan_set_encap_proto(skb, vhdr);
- skb->mac_header += VLAN_HLEN;
- if (skb_network_offset(skb) < ETH_HLEN)
- skb_set_network_header(skb, ETH_HLEN);
- skb_reset_mac_len(skb);
+ /* skb_mpls_header() is used to locate the ethertype
+ * field correctly in the presence of VLAN tags.
+ */
+ hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
+ hdr->h_proto = ethertype;
+ if (eth_p_mpls(skb->protocol))
+ skb->protocol = ethertype;
+ invalidate_flow_key(key);
return 0;
}
-static int pop_vlan(struct sk_buff *skb)
+static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ const __be32 *mpls_lse)
{
- __be16 tci;
+ __be32 *stack;
int err;
- if (likely(vlan_tx_tag_present(skb))) {
- skb->vlan_tci = 0;
- } else {
- if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
- skb->len < VLAN_ETH_HLEN))
- return 0;
-
- err = __pop_vlan_tci(skb, &tci);
- if (err)
- return err;
- }
- /* move next vlan tag to hw accel tag */
- if (likely(skb->protocol != htons(ETH_P_8021Q) ||
- skb->len < VLAN_ETH_HLEN))
- return 0;
-
- err = __pop_vlan_tci(skb, &tci);
+ err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
+ stack = (__be32 *)skb_mpls_header(skb);
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ __be32 diff[] = { ~(*stack), *mpls_lse };
+ skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+ ~skb->csum);
+ }
+
+ *stack = *mpls_lse;
+ key->mpls.top_lse = *mpls_lse;
return 0;
}
-static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
+static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
{
- if (unlikely(vlan_tx_tag_present(skb))) {
- u16 current_tag;
-
- /* push down current VLAN tag */
- current_tag = vlan_tx_tag_get(skb);
-
- if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
- return -ENOMEM;
+ int err;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_add(skb->csum, csum_partial(skb->data
- + (2 * ETH_ALEN), VLAN_HLEN, 0));
+ err = skb_vlan_pop(skb);
+ if (vlan_tx_tag_present(skb))
+ invalidate_flow_key(key);
+ else
+ key->eth.tci = 0;
+ return err;
+}
- }
- __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
- return 0;
+static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_action_push_vlan *vlan)
+{
+ if (vlan_tx_tag_present(skb))
+ invalidate_flow_key(key);
+ else
+ key->eth.tci = vlan->vlan_tci;
+ return skb_vlan_push(skb, vlan->vlan_tpid,
+ ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
}
-static int set_eth_addr(struct sk_buff *skb,
+static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_key_ethernet *eth_key)
{
int err;
- err = make_writable(skb, ETH_HLEN);
+ err = skb_ensure_writable(skb, ETH_HLEN);
if (unlikely(err))
return err;
@@ -211,11 +244,13 @@ static int set_eth_addr(struct sk_buff *skb,
ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+ ether_addr_copy(key->eth.src, eth_key->eth_src);
+ ether_addr_copy(key->eth.dst, eth_key->eth_dst);
return 0;
}
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
- __be32 *addr, __be32 new_addr)
+ __be32 *addr, __be32 new_addr)
{
int transport_len = skb->len - skb_transport_offset(skb);
@@ -298,42 +333,52 @@ static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
nh->ttl = new_ttl;
}
-static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
+static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_ipv4 *ipv4_key)
{
struct iphdr *nh;
int err;
- err = make_writable(skb, skb_network_offset(skb) +
- sizeof(struct iphdr));
+ err = skb_ensure_writable(skb, skb_network_offset(skb) +
+ sizeof(struct iphdr));
if (unlikely(err))
return err;
nh = ip_hdr(skb);
- if (ipv4_key->ipv4_src != nh->saddr)
+ if (ipv4_key->ipv4_src != nh->saddr) {
set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
+ key->ipv4.addr.src = ipv4_key->ipv4_src;
+ }
- if (ipv4_key->ipv4_dst != nh->daddr)
+ if (ipv4_key->ipv4_dst != nh->daddr) {
set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
+ key->ipv4.addr.dst = ipv4_key->ipv4_dst;
+ }
- if (ipv4_key->ipv4_tos != nh->tos)
+ if (ipv4_key->ipv4_tos != nh->tos) {
ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
+ key->ip.tos = nh->tos;
+ }
- if (ipv4_key->ipv4_ttl != nh->ttl)
+ if (ipv4_key->ipv4_ttl != nh->ttl) {
set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
+ key->ip.ttl = ipv4_key->ipv4_ttl;
+ }
return 0;
}
-static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
+static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_ipv6 *ipv6_key)
{
struct ipv6hdr *nh;
int err;
__be32 *saddr;
__be32 *daddr;
- err = make_writable(skb, skb_network_offset(skb) +
- sizeof(struct ipv6hdr));
+ err = skb_ensure_writable(skb, skb_network_offset(skb) +
+ sizeof(struct ipv6hdr));
if (unlikely(err))
return err;
@@ -341,9 +386,12 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
saddr = (__be32 *)&nh->saddr;
daddr = (__be32 *)&nh->daddr;
- if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
+ if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) {
set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
ipv6_key->ipv6_src, true);
+ memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src,
+ sizeof(ipv6_key->ipv6_src));
+ }
if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
unsigned int offset = 0;
@@ -357,16 +405,22 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
ipv6_key->ipv6_dst, recalc_csum);
+ memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst,
+ sizeof(ipv6_key->ipv6_dst));
}
set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
+ key->ip.tos = ipv6_get_dsfield(nh);
+
set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
- nh->hop_limit = ipv6_key->ipv6_hlimit;
+ key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+ nh->hop_limit = ipv6_key->ipv6_hlimit;
+ key->ip.ttl = ipv6_key->ipv6_hlimit;
return 0;
}
-/* Must follow make_writable() since that can move the skb data. */
+/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
{
@@ -390,54 +444,64 @@ static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
}
}
-static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
+static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_udp *udp_port_key)
{
struct udphdr *uh;
int err;
- err = make_writable(skb, skb_transport_offset(skb) +
- sizeof(struct udphdr));
+ err = skb_ensure_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr));
if (unlikely(err))
return err;
uh = udp_hdr(skb);
- if (udp_port_key->udp_src != uh->source)
+ if (udp_port_key->udp_src != uh->source) {
set_udp_port(skb, &uh->source, udp_port_key->udp_src);
+ key->tp.src = udp_port_key->udp_src;
+ }
- if (udp_port_key->udp_dst != uh->dest)
+ if (udp_port_key->udp_dst != uh->dest) {
set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
+ key->tp.dst = udp_port_key->udp_dst;
+ }
return 0;
}
-static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
+static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_tcp *tcp_port_key)
{
struct tcphdr *th;
int err;
- err = make_writable(skb, skb_transport_offset(skb) +
- sizeof(struct tcphdr));
+ err = skb_ensure_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct tcphdr));
if (unlikely(err))
return err;
th = tcp_hdr(skb);
- if (tcp_port_key->tcp_src != th->source)
+ if (tcp_port_key->tcp_src != th->source) {
set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
+ key->tp.src = tcp_port_key->tcp_src;
+ }
- if (tcp_port_key->tcp_dst != th->dest)
+ if (tcp_port_key->tcp_dst != th->dest) {
set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
+ key->tp.dst = tcp_port_key->tcp_dst;
+ }
return 0;
}
-static int set_sctp(struct sk_buff *skb,
- const struct ovs_key_sctp *sctp_port_key)
+static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_sctp *sctp_port_key)
{
struct sctphdr *sh;
int err;
unsigned int sctphoff = skb_transport_offset(skb);
- err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
+ err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
if (unlikely(err))
return err;
@@ -458,39 +522,35 @@ static int set_sctp(struct sk_buff *skb,
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
skb_clear_hash(skb);
+ key->tp.src = sctp_port_key->sctp_src;
+ key->tp.dst = sctp_port_key->sctp_dst;
}
return 0;
}
-static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
{
- struct vport *vport;
-
- if (unlikely(!skb))
- return -ENOMEM;
+ struct vport *vport = ovs_vport_rcu(dp, out_port);
- vport = ovs_vport_rcu(dp, out_port);
- if (unlikely(!vport)) {
+ if (likely(vport))
+ ovs_vport_send(vport, skb);
+ else
kfree_skb(skb);
- return -ENODEV;
- }
-
- ovs_vport_send(vport, skb);
- return 0;
}
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key, const struct nlattr *attr)
{
+ struct ovs_tunnel_info info;
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
upcall.cmd = OVS_PACKET_CMD_ACTION;
- upcall.key = key;
upcall.userdata = NULL;
upcall.portid = 0;
+ upcall.egress_tun_info = NULL;
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
a = nla_next(a, &rem)) {
@@ -502,15 +562,27 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
case OVS_USERSPACE_ATTR_PID:
upcall.portid = nla_get_u32(a);
break;
+
+ case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
+ /* Get out tunnel info. */
+ struct vport *vport;
+
+ vport = ovs_vport_rcu(dp, nla_get_u32(a));
+ if (vport) {
+ int err;
+
+ err = ovs_vport_get_egress_tun_info(vport, skb,
+ &info);
+ if (!err)
+ upcall.egress_tun_info = &info;
+ }
+ break;
}
- }
- return ovs_dp_upcall(dp, skb, &upcall);
-}
+ } /* End of switch. */
+ }
-static bool last_action(const struct nlattr *a, int rem)
-{
- return a->nla_len == rem;
+ return ovs_dp_upcall(dp, skb, key, &upcall);
}
static int sample(struct datapath *dp, struct sk_buff *skb,
@@ -547,7 +619,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
* user space. This skb will be consumed by its caller.
*/
if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
- last_action(a, rem)))
+ nla_is_last(a, rem)))
return output_userspace(dp, skb, key, a);
skb = skb_clone(skb, GFP_ATOMIC);
@@ -580,18 +652,20 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
key->ovs_flow_hash = hash;
}
-static int execute_set_action(struct sk_buff *skb,
- const struct nlattr *nested_attr)
+static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct nlattr *nested_attr)
{
int err = 0;
switch (nla_type(nested_attr)) {
case OVS_KEY_ATTR_PRIORITY:
skb->priority = nla_get_u32(nested_attr);
+ key->phy.priority = skb->priority;
break;
case OVS_KEY_ATTR_SKB_MARK:
skb->mark = nla_get_u32(nested_attr);
+ key->phy.skb_mark = skb->mark;
break;
case OVS_KEY_ATTR_TUNNEL_INFO:
@@ -599,27 +673,31 @@ static int execute_set_action(struct sk_buff *skb,
break;
case OVS_KEY_ATTR_ETHERNET:
- err = set_eth_addr(skb, nla_data(nested_attr));
+ err = set_eth_addr(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_IPV4:
- err = set_ipv4(skb, nla_data(nested_attr));
+ err = set_ipv4(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_IPV6:
- err = set_ipv6(skb, nla_data(nested_attr));
+ err = set_ipv6(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_TCP:
- err = set_tcp(skb, nla_data(nested_attr));
+ err = set_tcp(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_UDP:
- err = set_udp(skb, nla_data(nested_attr));
+ err = set_udp(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_SCTP:
- err = set_sctp(skb, nla_data(nested_attr));
+ err = set_sctp(skb, key, nla_data(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_MPLS:
+ err = set_mpls(skb, key, nla_data(nested_attr));
break;
}
@@ -631,13 +709,17 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
const struct nlattr *a, int rem)
{
struct deferred_action *da;
- int err;
- err = ovs_flow_key_update(skb, key);
- if (err)
- return err;
+ if (!is_flow_key_valid(key)) {
+ int err;
+
+ err = ovs_flow_key_update(skb, key);
+ if (err)
+ return err;
+ }
+ BUG_ON(!is_flow_key_valid(key));
- if (!last_action(a, rem)) {
+ if (!nla_is_last(a, rem)) {
/* Recirc action is the not the last action
* of the action list, need to clone the skb.
*/
@@ -672,7 +754,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
/* Every output action needs a separate clone of 'skb', but the common
* case is just a single output action, so that doing a clone and
* then freeing the original skbuff is wasteful. So the following code
- * is slightly obscure just to avoid that. */
+ * is slightly obscure just to avoid that.
+ */
int prev_port = -1;
const struct nlattr *a;
int rem;
@@ -681,8 +764,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
a = nla_next(a, &rem)) {
int err = 0;
- if (prev_port != -1) {
- do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
+ if (unlikely(prev_port != -1)) {
+ struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
+
+ if (out_skb)
+ do_output(dp, out_skb, prev_port);
+
prev_port = -1;
}
@@ -699,19 +786,25 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
execute_hash(skb, key, a);
break;
+ case OVS_ACTION_ATTR_PUSH_MPLS:
+ err = push_mpls(skb, key, nla_data(a));
+ break;
+
+ case OVS_ACTION_ATTR_POP_MPLS:
+ err = pop_mpls(skb, key, nla_get_be16(a));
+ break;
+
case OVS_ACTION_ATTR_PUSH_VLAN:
- err = push_vlan(skb, nla_data(a));
- if (unlikely(err)) /* skb already freed. */
- return err;
+ err = push_vlan(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_POP_VLAN:
- err = pop_vlan(skb);
+ err = pop_vlan(skb, key);
break;
case OVS_ACTION_ATTR_RECIRC:
err = execute_recirc(dp, skb, key, a, rem);
- if (last_action(a, rem)) {
+ if (nla_is_last(a, rem)) {
/* If this is the last action, the skb has
* been consumed or freed.
* Return immediately.
@@ -721,7 +814,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
break;
case OVS_ACTION_ATTR_SET:
- err = execute_set_action(skb, nla_data(a));
+ err = execute_set_action(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_SAMPLE:
@@ -771,14 +864,12 @@ static void process_deferred_actions(struct datapath *dp)
/* Execute a list of actions against 'skb'. */
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ const struct sw_flow_actions *acts,
struct sw_flow_key *key)
{
int level = this_cpu_read(exec_actions_level);
- struct sw_flow_actions *acts;
int err;
- acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
-
this_cpu_inc(exec_actions_level);
OVS_CB(skb)->egress_tun_info = NULL;
err = do_execute_actions(dp, skb, key,
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f9e556b56086..332b5a031739 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -59,6 +59,7 @@
#include "vport-netdev.h"
int ovs_net_id __read_mostly;
+EXPORT_SYMBOL_GPL(ovs_net_id);
static struct genl_family dp_packet_genl_family;
static struct genl_family dp_flow_genl_family;
@@ -130,27 +131,41 @@ int lockdep_ovsl_is_held(void)
else
return 1;
}
+EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
#endif
static struct vport *new_vport(const struct vport_parms *);
static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
+ const struct sw_flow_key *,
const struct dp_upcall_info *);
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
+ const struct sw_flow_key *,
const struct dp_upcall_info *);
-/* Must be called with rcu_read_lock or ovs_mutex. */
-static struct datapath *get_dp(struct net *net, int dp_ifindex)
+/* Must be called with rcu_read_lock. */
+static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
{
- struct datapath *dp = NULL;
- struct net_device *dev;
+ struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
- rcu_read_lock();
- dev = dev_get_by_index_rcu(net, dp_ifindex);
if (dev) {
struct vport *vport = ovs_internal_dev_get_vport(dev);
if (vport)
- dp = vport->dp;
+ return vport->dp;
}
+
+ return NULL;
+}
+
+/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
+ * returned dp pointer valid.
+ */
+static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
+{
+ struct datapath *dp;
+
+ WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
+ rcu_read_lock();
+ dp = get_dp_rcu(net, dp_ifindex);
rcu_read_unlock();
return dp;
@@ -163,7 +178,7 @@ const char *ovs_dp_name(const struct datapath *dp)
return vport->ops->get_name(vport);
}
-static int get_dpifindex(struct datapath *dp)
+static int get_dpifindex(const struct datapath *dp)
{
struct vport *local;
int ifindex;
@@ -185,6 +200,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
+ ovs_flow_tbl_destroy(&dp->table);
free_percpu(dp->stats_percpu);
release_net(ovs_dp_get_net(dp));
kfree(dp->ports);
@@ -243,6 +259,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
const struct vport *p = OVS_CB(skb)->input_vport;
struct datapath *dp = p->dp;
struct sw_flow *flow;
+ struct sw_flow_actions *sf_acts;
struct dp_stats_percpu *stats;
u64 *stats_counter;
u32 n_mask_hit;
@@ -256,10 +273,10 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
int error;
upcall.cmd = OVS_PACKET_CMD_MISS;
- upcall.key = key;
upcall.userdata = NULL;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
- error = ovs_dp_upcall(dp, skb, &upcall);
+ upcall.egress_tun_info = NULL;
+ error = ovs_dp_upcall(dp, skb, key, &upcall);
if (unlikely(error))
kfree_skb(skb);
else
@@ -268,10 +285,10 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
goto out;
}
- OVS_CB(skb)->flow = flow;
+ ovs_flow_stats_update(flow, key->tp.flags, skb);
+ sf_acts = rcu_dereference(flow->sf_acts);
+ ovs_execute_actions(dp, skb, sf_acts, key);
- ovs_flow_stats_update(OVS_CB(skb)->flow, key->tp.flags, skb);
- ovs_execute_actions(dp, skb, key);
stats_counter = &stats->n_hit;
out:
@@ -283,6 +300,7 @@ out:
}
int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+ const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info)
{
struct dp_stats_percpu *stats;
@@ -294,9 +312,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
}
if (!skb_is_gso(skb))
- err = queue_userspace_packet(dp, skb, upcall_info);
+ err = queue_userspace_packet(dp, skb, key, upcall_info);
else
- err = queue_gso_packets(dp, skb, upcall_info);
+ err = queue_gso_packets(dp, skb, key, upcall_info);
if (err)
goto err;
@@ -313,39 +331,43 @@ err:
}
static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
+ const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info)
{
unsigned short gso_type = skb_shinfo(skb)->gso_type;
- struct dp_upcall_info later_info;
struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
+ struct ovs_skb_cb ovs_cb;
int err;
+ ovs_cb = *OVS_CB(skb);
segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+ *OVS_CB(skb) = ovs_cb;
if (IS_ERR(segs))
return PTR_ERR(segs);
if (segs == NULL)
return -EINVAL;
+ if (gso_type & SKB_GSO_UDP) {
+ /* The initial flow key extracted by ovs_flow_key_extract()
+ * in this case is for a first fragment, so we need to
+ * properly mark later fragments.
+ */
+ later_key = *key;
+ later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+ }
+
/* Queue all of the segments. */
skb = segs;
do {
- err = queue_userspace_packet(dp, skb, upcall_info);
+ *OVS_CB(skb) = ovs_cb;
+ if (gso_type & SKB_GSO_UDP && skb != segs)
+ key = &later_key;
+
+ err = queue_userspace_packet(dp, skb, key, upcall_info);
if (err)
break;
- if (skb == segs && gso_type & SKB_GSO_UDP) {
- /* The initial flow key extracted by ovs_flow_extract()
- * in this case is for a first fragment, so we need to
- * properly mark later fragments.
- */
- later_key = *upcall_info->key;
- later_key.ip.frag = OVS_FRAG_TYPE_LATER;
-
- later_info = *upcall_info;
- later_info.key = &later_key;
- upcall_info = &later_info;
- }
} while ((skb = skb->next));
/* Free all of the segments. */
@@ -360,46 +382,26 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
return err;
}
-static size_t key_attr_size(void)
-{
- return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
- + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
- + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
- + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
- + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
- + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
- + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
- + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
- + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
- + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
- + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
- + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
- + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
- + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
- + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
- + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */
- + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
- + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
- + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
- + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
- + nla_total_size(28); /* OVS_KEY_ATTR_ND */
-}
-
-static size_t upcall_msg_size(const struct nlattr *userdata,
+static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
unsigned int hdrlen)
{
size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
- + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
+ + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
/* OVS_PACKET_ATTR_USERDATA */
- if (userdata)
- size += NLA_ALIGN(userdata->nla_len);
+ if (upcall_info->userdata)
+ size += NLA_ALIGN(upcall_info->userdata->nla_len);
+
+ /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
+ if (upcall_info->egress_tun_info)
+ size += nla_total_size(ovs_tun_key_attr_size());
return size;
}
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
+ const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info)
{
struct ovs_header *upcall;
@@ -423,11 +425,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
if (!nskb)
return -ENOMEM;
- nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
+ nskb = __vlan_hwaccel_push_inside(nskb);
if (!nskb)
return -ENOMEM;
- nskb->vlan_tci = 0;
skb = nskb;
}
@@ -450,7 +451,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
else
hlen = skb->len;
- len = upcall_msg_size(upcall_info->userdata, hlen);
+ len = upcall_msg_size(upcall_info, hlen);
user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
if (!user_skb) {
err = -ENOMEM;
@@ -462,7 +463,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
+ err = ovs_nla_put_flow(key, key, user_skb);
BUG_ON(err);
nla_nest_end(user_skb, nla);
@@ -471,6 +472,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
nla_len(upcall_info->userdata),
nla_data(upcall_info->userdata));
+ if (upcall_info->egress_tun_info) {
+ nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
+ err = ovs_nla_put_egress_tunnel_key(user_skb,
+ upcall_info->egress_tun_info);
+ BUG_ON(err);
+ nla_nest_end(user_skb, nla);
+ }
+
/* Only reserve room for attribute header, packet data is added
* in skb_zerocopy() */
if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
@@ -510,11 +519,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_actions *acts;
struct sk_buff *packet;
struct sw_flow *flow;
+ struct sw_flow_actions *sf_acts;
struct datapath *dp;
struct ethhdr *eth;
struct vport *input_vport;
int len;
int err;
+ bool log = !a[OVS_FLOW_ATTR_PROBE];
err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -548,29 +559,22 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
goto err_kfree_skb;
err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
- &flow->key);
+ &flow->key, log);
if (err)
goto err_flow_free;
- acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
- err = PTR_ERR(acts);
- if (IS_ERR(acts))
- goto err_flow_free;
-
err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
- &flow->key, 0, &acts);
+ &flow->key, &acts, log);
if (err)
goto err_flow_free;
rcu_assign_pointer(flow->sf_acts, acts);
-
OVS_CB(packet)->egress_tun_info = NULL;
- OVS_CB(packet)->flow = flow;
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
rcu_read_lock();
- dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+ dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
err = -ENODEV;
if (!dp)
goto err_unlock;
@@ -583,9 +587,10 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
goto err_unlock;
OVS_CB(packet)->input_vport = input_vport;
+ sf_acts = rcu_dereference(flow->sf_acts);
local_bh_disable();
- err = ovs_execute_actions(dp, packet, &flow->key);
+ err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
local_bh_enable();
rcu_read_unlock();
@@ -628,7 +633,7 @@ static struct genl_family dp_packet_genl_family = {
.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
};
-static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
+static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
struct ovs_dp_megaflow_stats *mega_stats)
{
int i;
@@ -662,8 +667,8 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
{
return NLMSG_ALIGN(sizeof(struct ovs_header))
- + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
- + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
+ + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
+ + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
@@ -671,58 +676,67 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
}
/* Called with ovs_mutex or RCU read lock. */
-static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
- struct sk_buff *skb, u32 portid,
- u32 seq, u32 flags, u8 cmd)
+static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
+ struct sk_buff *skb)
{
- const int skb_orig_len = skb->len;
- struct nlattr *start;
- struct ovs_flow_stats stats;
- __be16 tcp_flags;
- unsigned long used;
- struct ovs_header *ovs_header;
struct nlattr *nla;
int err;
- ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
- if (!ovs_header)
- return -EMSGSIZE;
-
- ovs_header->dp_ifindex = dp_ifindex;
-
/* Fill flow key. */
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
- goto nla_put_failure;
+ return -EMSGSIZE;
err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
if (err)
- goto error;
+ return err;
+
nla_nest_end(skb, nla);
+ /* Fill flow mask. */
nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
if (!nla)
- goto nla_put_failure;
+ return -EMSGSIZE;
err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
if (err)
- goto error;
+ return err;
nla_nest_end(skb, nla);
+ return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
+ struct sk_buff *skb)
+{
+ struct ovs_flow_stats stats;
+ __be16 tcp_flags;
+ unsigned long used;
ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
if (used &&
nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
- goto nla_put_failure;
+ return -EMSGSIZE;
if (stats.n_packets &&
nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
- goto nla_put_failure;
+ return -EMSGSIZE;
if ((u8)ntohs(tcp_flags) &&
nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
- goto nla_put_failure;
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
+ struct sk_buff *skb, int skb_orig_len)
+{
+ struct nlattr *start;
+ int err;
/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
* this is the first flow to be dumped into 'skb'. This is unusual for
@@ -746,17 +760,47 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
nla_nest_end(skb, start);
else {
if (skb_orig_len)
- goto error;
+ return err;
nla_nest_cancel(skb, start);
}
- } else if (skb_orig_len)
- goto nla_put_failure;
+ } else if (skb_orig_len) {
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
+ struct sk_buff *skb, u32 portid,
+ u32 seq, u32 flags, u8 cmd)
+{
+ const int skb_orig_len = skb->len;
+ struct ovs_header *ovs_header;
+ int err;
+
+ ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
+ flags, cmd);
+ if (!ovs_header)
+ return -EMSGSIZE;
+
+ ovs_header->dp_ifindex = dp_ifindex;
+
+ err = ovs_flow_cmd_fill_match(flow, skb);
+ if (err)
+ goto error;
+
+ err = ovs_flow_cmd_fill_stats(flow, skb);
+ if (err)
+ goto error;
+
+ err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
+ if (err)
+ goto error;
return genlmsg_end(skb, ovs_header);
-nla_put_failure:
- err = -EMSGSIZE;
error:
genlmsg_cancel(skb, ovs_header);
return err;
@@ -811,13 +855,18 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_actions *acts;
struct sw_flow_match match;
int error;
+ bool log = !a[OVS_FLOW_ATTR_PROBE];
/* Must have key and actions. */
error = -EINVAL;
- if (!a[OVS_FLOW_ATTR_KEY])
+ if (!a[OVS_FLOW_ATTR_KEY]) {
+ OVS_NLERR(log, "Flow key attr not present in new flow.");
goto error;
- if (!a[OVS_FLOW_ATTR_ACTIONS])
+ }
+ if (!a[OVS_FLOW_ATTR_ACTIONS]) {
+ OVS_NLERR(log, "Flow actions attr not present in new flow.");
goto error;
+ }
/* Most of the time we need to allocate a new flow, do it before
* locking.
@@ -830,24 +879,19 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
/* Extract key. */
ovs_match_init(&match, &new_flow->unmasked_key, &mask);
- error = ovs_nla_get_match(&match,
- a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
+ error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+ a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto err_kfree_flow;
ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
/* Validate actions. */
- acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
- error = PTR_ERR(acts);
- if (IS_ERR(acts))
- goto err_kfree_flow;
-
error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
- 0, &acts);
+ &acts, log);
if (error) {
- OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
- goto err_kfree_acts;
+ OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
+ goto err_kfree_flow;
}
reply = ovs_flow_cmd_alloc_info(acts, info, false);
@@ -899,6 +943,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* The unmasked key has to be the same for flow updates. */
if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
+ /* Look for any overlapping flow. */
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (!flow) {
error = -ENOENT;
@@ -938,23 +983,21 @@ error:
return error;
}
+/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
const struct sw_flow_key *key,
- const struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask,
+ bool log)
{
struct sw_flow_actions *acts;
struct sw_flow_key masked_key;
int error;
- acts = ovs_nla_alloc_flow_actions(nla_len(a));
- if (IS_ERR(acts))
- return acts;
-
ovs_flow_mask_key(&masked_key, key, mask);
- error = ovs_nla_copy_actions(a, &masked_key, 0, &acts);
+ error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
if (error) {
- OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
- kfree(acts);
+ OVS_NLERR(log,
+ "Actions may not be safe on all matching packets");
return ERR_PTR(error);
}
@@ -973,29 +1016,31 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_actions *old_acts = NULL, *acts = NULL;
struct sw_flow_match match;
int error;
+ bool log = !a[OVS_FLOW_ATTR_PROBE];
/* Extract key. */
error = -EINVAL;
- if (!a[OVS_FLOW_ATTR_KEY])
+ if (!a[OVS_FLOW_ATTR_KEY]) {
+ OVS_NLERR(log, "Flow key attribute not present in set flow.");
goto error;
+ }
ovs_match_init(&match, &key, &mask);
- error = ovs_nla_get_match(&match,
- a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
+ error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+ a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto error;
/* Validate actions. */
if (a[OVS_FLOW_ATTR_ACTIONS]) {
- acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask);
+ acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask,
+ log);
if (IS_ERR(acts)) {
error = PTR_ERR(acts);
goto error;
}
- }
- /* Can allocate before locking if have acts. */
- if (acts) {
+ /* Can allocate before locking if have acts. */
reply = ovs_flow_cmd_alloc_info(acts, info, false);
if (IS_ERR(reply)) {
error = PTR_ERR(reply);
@@ -1070,14 +1115,16 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct sw_flow_match match;
int err;
+ bool log = !a[OVS_FLOW_ATTR_PROBE];
if (!a[OVS_FLOW_ATTR_KEY]) {
- OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
+ OVS_NLERR(log,
+ "Flow get message rejected, Key attribute missing.");
return -EINVAL;
}
ovs_match_init(&match, &key, NULL);
- err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+ err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, log);
if (err)
return err;
@@ -1118,10 +1165,12 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct sw_flow_match match;
int err;
+ bool log = !a[OVS_FLOW_ATTR_PROBE];
if (likely(a[OVS_FLOW_ATTR_KEY])) {
ovs_match_init(&match, &key, NULL);
- err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+ err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
+ log);
if (unlikely(err))
return err;
}
@@ -1179,7 +1228,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct datapath *dp;
rcu_read_lock();
- dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+ dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
rcu_read_unlock();
return -ENODEV;
@@ -1211,8 +1260,10 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+ [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
[OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+ [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
};
static const struct genl_ops dp_flow_genl_ops[] = {
@@ -1313,7 +1364,7 @@ static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
/* Called with rcu_read_lock or ovs_mutex. */
static struct datapath *lookup_datapath(struct net *net,
- struct ovs_header *ovs_header,
+ const struct ovs_header *ovs_header,
struct nlattr *a[OVS_DP_ATTR_MAX + 1])
{
struct datapath *dp;
@@ -1341,7 +1392,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in
dp->user_features = 0;
}
-static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
+static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
{
if (a[OVS_DP_ATTR_USER_FEATURES])
dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
@@ -1442,7 +1493,7 @@ err_destroy_ports_array:
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- ovs_flow_tbl_destroy(&dp->table, false);
+ ovs_flow_tbl_destroy(&dp->table);
err_free_dp:
release_net(ovs_dp_get_net(dp));
kfree(dp);
@@ -1474,8 +1525,6 @@ static void __dp_destroy(struct datapath *dp)
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
/* RCU destroy the flow table */
- ovs_flow_tbl_destroy(&dp->table, true);
-
call_rcu(&dp->rcu, destroy_dp_rcu);
}
@@ -1707,7 +1756,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
/* Called with ovs_mutex or RCU read lock. */
static struct vport *lookup_vport(struct net *net,
- struct ovs_header *ovs_header,
+ const struct ovs_header *ovs_header,
struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
{
struct datapath *dp;
@@ -1764,6 +1813,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
ovs_lock();
+restart:
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
err = -ENODEV;
if (!dp)
@@ -1795,8 +1845,11 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
vport = new_vport(&parms);
err = PTR_ERR(vport);
- if (IS_ERR(vport))
+ if (IS_ERR(vport)) {
+ if (err == -EAGAIN)
+ goto restart;
goto exit_unlock_free;
+ }
err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
info->snd_seq, 0, OVS_VPORT_CMD_NEW);
@@ -1939,7 +1992,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
int i, j = 0;
rcu_read_lock();
- dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+ dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
rcu_read_unlock();
return -ENODEV;
@@ -2112,12 +2165,18 @@ static int __init dp_init(void)
if (err)
goto error_netns_exit;
+ err = ovs_netdev_init();
+ if (err)
+ goto error_unreg_notifier;
+
err = dp_register_genl();
if (err < 0)
- goto error_unreg_notifier;
+ goto error_unreg_netdev;
return 0;
+error_unreg_netdev:
+ ovs_netdev_exit();
error_unreg_notifier:
unregister_netdevice_notifier(&ovs_dp_device_notifier);
error_netns_exit:
@@ -2137,6 +2196,7 @@ error:
static void dp_cleanup(void)
{
dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
+ ovs_netdev_exit();
unregister_netdevice_notifier(&ovs_dp_device_notifier);
unregister_pernet_device(&ovs_net_ops);
rcu_barrier();
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 974135439c5c..3ece94563079 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -94,14 +94,12 @@ struct datapath {
/**
* struct ovs_skb_cb - OVS data in skb CB
- * @flow: The flow associated with this packet. May be %NULL if no flow.
* @egress_tun_key: Tunnel information about this packet on egress path.
* NULL if the packet is not being tunneled.
* @input_vport: The original vport packet came in on. This value is cached
* when a packet is received by OVS.
*/
struct ovs_skb_cb {
- struct sw_flow *flow;
struct ovs_tunnel_info *egress_tun_info;
struct vport *input_vport;
};
@@ -110,18 +108,18 @@ struct ovs_skb_cb {
/**
* struct dp_upcall - metadata to include with a packet to send to userspace
* @cmd: One of %OVS_PACKET_CMD_*.
- * @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull.
* @userdata: If nonnull, its variable-length value is passed to userspace as
* %OVS_PACKET_ATTR_USERDATA.
- * @pid: Netlink PID to which packet should be sent. If @pid is 0 then no
- * packet is sent and the packet is accounted in the datapath's @n_lost
+ * @portid: Netlink portid to which packet should be sent. If @portid is 0
+ * then no packet is sent and the packet is accounted in the datapath's @n_lost
* counter.
+ * @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
*/
struct dp_upcall_info {
- u8 cmd;
- const struct sw_flow_key *key;
+ const struct ovs_tunnel_info *egress_tun_info;
const struct nlattr *userdata;
u32 portid;
+ u8 cmd;
};
/**
@@ -151,7 +149,7 @@ int lockdep_ovsl_is_held(void);
#define rcu_dereference_ovsl(p) \
rcu_dereference_check(p, lockdep_ovsl_is_held())
-static inline struct net *ovs_dp_get_net(struct datapath *dp)
+static inline struct net *ovs_dp_get_net(const struct datapath *dp)
{
return read_pnet(&dp->net);
}
@@ -187,23 +185,23 @@ extern struct genl_family dp_vport_genl_family;
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key);
void ovs_dp_detach_port(struct vport *);
int ovs_dp_upcall(struct datapath *, struct sk_buff *,
- const struct dp_upcall_info *);
+ const struct sw_flow_key *, const struct dp_upcall_info *);
const char *ovs_dp_name(const struct datapath *dp);
struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
u8 cmd);
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_key *);
+ const struct sw_flow_actions *, struct sw_flow_key *);
void ovs_dp_notify_wq(struct work_struct *work);
int action_fifos_init(void);
void action_fifos_exit(void);
-#define OVS_NLERR(fmt, ...) \
+#define OVS_NLERR(logging_allowed, fmt, ...) \
do { \
- if (net_ratelimit()) \
- pr_info("netlink: " fmt, ##__VA_ARGS__); \
+ if (logging_allowed && net_ratelimit()) \
+ pr_info("netlink: " fmt "\n", ##__VA_ARGS__); \
} while (0)
#endif /* datapath.h */
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 2b78789ea7c5..70bef2ab7f2b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -32,6 +32,7 @@
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/mpls.h>
#include <linux/sctp.h>
#include <linux/smp.h>
#include <linux/tcp.h>
@@ -42,6 +43,7 @@
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/ipv6.h>
+#include <net/mpls.h>
#include <net/ndisc.h>
#include "datapath.h"
@@ -64,7 +66,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
struct flow_stats *stats;
int node = numa_node_id();
@@ -480,6 +482,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
return -ENOMEM;
skb_reset_network_header(skb);
+ skb_reset_mac_len(skb);
__skb_push(skb, skb->data - skb_mac_header(skb));
/* Network layer. */
@@ -584,6 +587,33 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv4, 0, sizeof(key->ipv4));
}
+ } else if (eth_p_mpls(key->eth.type)) {
+ size_t stack_len = MPLS_HLEN;
+
+ /* In the presence of an MPLS label stack the end of the L2
+ * header and the beginning of the L3 header differ.
+ *
+ * Advance network_header to the beginning of the L3
+ * header. mac_len corresponds to the end of the L2 header.
+ */
+ while (1) {
+ __be32 lse;
+
+ error = check_header(skb, skb->mac_len + stack_len);
+ if (unlikely(error))
+ return 0;
+
+ memcpy(&lse, skb_network_header(skb), MPLS_HLEN);
+
+ if (stack_len == MPLS_HLEN)
+ memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
+
+ skb_set_network_header(skb, skb->mac_len + stack_len);
+ if (lse & htonl(MPLS_LS_S_MASK))
+ break;
+
+ stack_len += MPLS_HLEN;
+ }
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
@@ -649,7 +679,7 @@ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
return key_extract(skb, key);
}
-int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
struct sk_buff *skb, struct sw_flow_key *key)
{
/* Extract metadata from packet. */
@@ -682,12 +712,12 @@ int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info,
int ovs_flow_key_extract_userspace(const struct nlattr *attr,
struct sk_buff *skb,
- struct sw_flow_key *key)
+ struct sw_flow_key *key, bool log)
{
int err;
/* Extract metadata from netlink attributes. */
- err = ovs_nla_get_flow_metadata(attr, key);
+ err = ovs_nla_get_flow_metadata(attr, key, log);
if (err)
return err;
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 71813318c8c7..a8b30f334388 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -37,8 +37,8 @@ struct sk_buff;
/* Used to memset ovs_key_ipv4_tunnel padding. */
#define OVS_TUNNEL_KEY_SIZE \
- (offsetof(struct ovs_key_ipv4_tunnel, ipv4_ttl) + \
- FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, ipv4_ttl))
+ (offsetof(struct ovs_key_ipv4_tunnel, tp_dst) + \
+ FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst))
struct ovs_key_ipv4_tunnel {
__be64 tun_id;
@@ -47,11 +47,13 @@ struct ovs_key_ipv4_tunnel {
__be16 tun_flags;
u8 ipv4_tos;
u8 ipv4_ttl;
+ __be16 tp_src;
+ __be16 tp_dst;
} __packed __aligned(4); /* Minimize padding. */
struct ovs_tunnel_info {
struct ovs_key_ipv4_tunnel tunnel;
- struct geneve_opt *options;
+ const struct geneve_opt *options;
u8 options_len;
};
@@ -64,27 +66,59 @@ struct ovs_tunnel_info {
FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \
opt_len))
-static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
- const struct iphdr *iph,
- __be64 tun_id, __be16 tun_flags,
- struct geneve_opt *opts,
- u8 opts_len)
+static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
+ __be32 saddr, __be32 daddr,
+ u8 tos, u8 ttl,
+ __be16 tp_src,
+ __be16 tp_dst,
+ __be64 tun_id,
+ __be16 tun_flags,
+ const struct geneve_opt *opts,
+ u8 opts_len)
{
tun_info->tunnel.tun_id = tun_id;
- tun_info->tunnel.ipv4_src = iph->saddr;
- tun_info->tunnel.ipv4_dst = iph->daddr;
- tun_info->tunnel.ipv4_tos = iph->tos;
- tun_info->tunnel.ipv4_ttl = iph->ttl;
+ tun_info->tunnel.ipv4_src = saddr;
+ tun_info->tunnel.ipv4_dst = daddr;
+ tun_info->tunnel.ipv4_tos = tos;
+ tun_info->tunnel.ipv4_ttl = ttl;
tun_info->tunnel.tun_flags = tun_flags;
- /* clear struct padding. */
- memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, 0,
- sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
+ /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+ * the upper tunnel are used.
+ * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+ */
+ tun_info->tunnel.tp_src = tp_src;
+ tun_info->tunnel.tp_dst = tp_dst;
+
+ /* Clear struct padding. */
+ if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE)
+ memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE,
+ 0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
tun_info->options = opts;
tun_info->options_len = opts_len;
}
+static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
+ const struct iphdr *iph,
+ __be16 tp_src,
+ __be16 tp_dst,
+ __be64 tun_id,
+ __be16 tun_flags,
+ const struct geneve_opt *opts,
+ u8 opts_len)
+{
+ __ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
+ iph->tos, iph->ttl,
+ tp_src, tp_dst,
+ tun_id, tun_flags,
+ opts, opts_len);
+}
+
+#define OVS_SW_FLOW_KEY_METADATA_SIZE \
+ (offsetof(struct sw_flow_key, recirc_id) + \
+ FIELD_SIZEOF(struct sw_flow_key, recirc_id))
+
struct sw_flow_key {
u8 tun_opts[255];
u8 tun_opts_len;
@@ -102,12 +136,17 @@ struct sw_flow_key {
__be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
__be16 type; /* Ethernet frame type. */
} eth;
- struct {
- u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
- u8 tos; /* IP ToS. */
- u8 ttl; /* IP TTL/hop limit. */
- u8 frag; /* One of OVS_FRAG_TYPE_*. */
- } ip;
+ union {
+ struct {
+ __be32 top_lse; /* top label stack entry */
+ } mpls;
+ struct {
+ u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
+ u8 tos; /* IP ToS. */
+ u8 ttl; /* IP TTL/hop limit. */
+ u8 frag; /* One of OVS_FRAG_TYPE_*. */
+ } ip;
+ };
struct {
__be16 src; /* TCP/UDP/SCTP source port. */
__be16 dst; /* TCP/UDP/SCTP destination port. */
@@ -205,18 +244,19 @@ struct arp_eth_header {
} __packed;
void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
- struct sk_buff *);
+ const struct sk_buff *);
void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
unsigned long *used, __be16 *tcp_flags);
void ovs_flow_stats_clear(struct sw_flow *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, struct sk_buff *skb,
+int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+ struct sk_buff *skb,
struct sw_flow_key *key);
/* Extract key from packet coming from userspace. */
int ovs_flow_key_extract_userspace(const struct nlattr *attr,
struct sk_buff *skb,
- struct sw_flow_key *key);
+ struct sw_flow_key *key, bool log);
#endif /* flow.h */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 089b195c064a..9645a21d9eaa 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -46,24 +46,22 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
+#include <net/mpls.h>
#include "flow_netlink.h"
-static void update_range__(struct sw_flow_match *match,
- size_t offset, size_t size, bool is_mask)
+static void update_range(struct sw_flow_match *match,
+ size_t offset, size_t size, bool is_mask)
{
- struct sw_flow_key_range *range = NULL;
+ struct sw_flow_key_range *range;
size_t start = rounddown(offset, sizeof(long));
size_t end = roundup(offset + size, sizeof(long));
if (!is_mask)
range = &match->range;
- else if (match->mask)
+ else
range = &match->mask->range;
- if (!range)
- return;
-
if (range->start == range->end) {
range->start = start;
range->end = end;
@@ -79,22 +77,20 @@ static void update_range__(struct sw_flow_match *match,
#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
do { \
- update_range__(match, offsetof(struct sw_flow_key, field), \
- sizeof((match)->key->field), is_mask); \
- if (is_mask) { \
- if ((match)->mask) \
- (match)->mask->key.field = value; \
- } else { \
+ update_range(match, offsetof(struct sw_flow_key, field), \
+ sizeof((match)->key->field), is_mask); \
+ if (is_mask) \
+ (match)->mask->key.field = value; \
+ else \
(match)->key->field = value; \
- } \
} while (0)
#define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
do { \
- update_range__(match, offset, len, is_mask); \
+ update_range(match, offset, len, is_mask); \
if (is_mask) \
memcpy((u8 *)&(match)->mask->key + offset, value_p, \
- len); \
+ len); \
else \
memcpy((u8 *)(match)->key + offset, value_p, len); \
} while (0)
@@ -103,22 +99,20 @@ static void update_range__(struct sw_flow_match *match,
SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
value_p, len, is_mask)
-#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
- do { \
- update_range__(match, offsetof(struct sw_flow_key, field), \
- sizeof((match)->key->field), is_mask); \
- if (is_mask) { \
- if ((match)->mask) \
- memset((u8 *)&(match)->mask->key.field, value,\
- sizeof((match)->mask->key.field)); \
- } else { \
+#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
+ do { \
+ update_range(match, offsetof(struct sw_flow_key, field), \
+ sizeof((match)->key->field), is_mask); \
+ if (is_mask) \
+ memset((u8 *)&(match)->mask->key.field, value, \
+ sizeof((match)->mask->key.field)); \
+ else \
memset((u8 *)&(match)->key->field, value, \
sizeof((match)->key->field)); \
- } \
} while (0)
static bool match_validate(const struct sw_flow_match *match,
- u64 key_attrs, u64 mask_attrs)
+ u64 key_attrs, u64 mask_attrs, bool log)
{
u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
u64 mask_allowed = key_attrs; /* At most allow all key attributes */
@@ -134,7 +128,8 @@ static bool match_validate(const struct sw_flow_match *match,
| (1 << OVS_KEY_ATTR_ICMP)
| (1 << OVS_KEY_ATTR_ICMPV6)
| (1 << OVS_KEY_ATTR_ARP)
- | (1 << OVS_KEY_ATTR_ND));
+ | (1 << OVS_KEY_ATTR_ND)
+ | (1 << OVS_KEY_ATTR_MPLS));
/* Always allowed mask fields. */
mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
@@ -145,10 +140,16 @@ static bool match_validate(const struct sw_flow_match *match,
if (match->key->eth.type == htons(ETH_P_ARP)
|| match->key->eth.type == htons(ETH_P_RARP)) {
key_expected |= 1 << OVS_KEY_ATTR_ARP;
- if (match->mask && (match->mask->key.tp.src == htons(0xff)))
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
}
+ if (eth_p_mpls(match->key->eth.type)) {
+ key_expected |= 1 << OVS_KEY_ATTR_MPLS;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_MPLS;
+ }
+
if (match->key->eth.type == htons(ETH_P_IP)) {
key_expected |= 1 << OVS_KEY_ATTR_IPV4;
if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
@@ -220,7 +221,7 @@ static bool match_validate(const struct sw_flow_match *match,
htons(NDISC_NEIGHBOUR_SOLICITATION) ||
match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
key_expected |= 1 << OVS_KEY_ATTR_ND;
- if (match->mask && (match->mask->key.tp.src == htons(0xffff)))
+ if (match->mask && (match->mask->key.tp.src == htons(0xff)))
mask_allowed |= 1 << OVS_KEY_ATTR_ND;
}
}
@@ -229,21 +230,65 @@ static bool match_validate(const struct sw_flow_match *match,
if ((key_attrs & key_expected) != key_expected) {
/* Key attributes check failed. */
- OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
- (unsigned long long)key_attrs, (unsigned long long)key_expected);
+ OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
+ (unsigned long long)key_attrs,
+ (unsigned long long)key_expected);
return false;
}
if ((mask_attrs & mask_allowed) != mask_attrs) {
/* Mask attributes check failed. */
- OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
- (unsigned long long)mask_attrs, (unsigned long long)mask_allowed);
+ OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
+ (unsigned long long)mask_attrs,
+ (unsigned long long)mask_allowed);
return false;
}
return true;
}
+size_t ovs_tun_key_attr_size(void)
+{
+ /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
+ * updating this function.
+ */
+ return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
+ + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
+ + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
+ + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
+ + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
+ + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
+ + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
+ + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
+ + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
+ + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
+ + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
+}
+
+size_t ovs_key_attr_size(void)
+{
+ /* Whenever adding new OVS_KEY_ FIELDS, we should consider
+ * updating this function.
+ */
+ BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
+
+ return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
+ + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
+ + ovs_tun_key_attr_size()
+ + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
+ + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
+ + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
+ + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
+ + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
+ + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
+ + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
+ + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
+ + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
+ + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
+ + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
+ + nla_total_size(28); /* OVS_KEY_ATTR_ND */
+}
+
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_ENCAP] = -1,
@@ -266,6 +311,7 @@ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32),
[OVS_KEY_ATTR_DP_HASH] = sizeof(u32),
[OVS_KEY_ATTR_TUNNEL] = -1,
+ [OVS_KEY_ATTR_MPLS] = sizeof(struct ovs_key_mpls),
};
static bool is_all_zero(const u8 *fp, size_t size)
@@ -284,7 +330,7 @@ static bool is_all_zero(const u8 *fp, size_t size)
static int __parse_flow_nlattrs(const struct nlattr *attr,
const struct nlattr *a[],
- u64 *attrsp, bool nz)
+ u64 *attrsp, bool log, bool nz)
{
const struct nlattr *nla;
u64 attrs;
@@ -296,21 +342,20 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
int expected_len;
if (type > OVS_KEY_ATTR_MAX) {
- OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
+ OVS_NLERR(log, "Key type %d is out of range max %d",
type, OVS_KEY_ATTR_MAX);
return -EINVAL;
}
if (attrs & (1 << type)) {
- OVS_NLERR("Duplicate key attribute (type %d).\n", type);
+ OVS_NLERR(log, "Duplicate key (type %d).", type);
return -EINVAL;
}
expected_len = ovs_key_lens[type];
if (nla_len(nla) != expected_len && expected_len != -1) {
- OVS_NLERR("Key attribute has unexpected length (type=%d"
- ", length=%d, expected=%d).\n", type,
- nla_len(nla), expected_len);
+ OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
+ type, nla_len(nla), expected_len);
return -EINVAL;
}
@@ -320,7 +365,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
}
}
if (rem) {
- OVS_NLERR("Message has %d unknown bytes.\n", rem);
+ OVS_NLERR(log, "Message has %d unknown bytes.", rem);
return -EINVAL;
}
@@ -329,28 +374,84 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
}
static int parse_flow_mask_nlattrs(const struct nlattr *attr,
- const struct nlattr *a[], u64 *attrsp)
+ const struct nlattr *a[], u64 *attrsp,
+ bool log)
{
- return __parse_flow_nlattrs(attr, a, attrsp, true);
+ return __parse_flow_nlattrs(attr, a, attrsp, log, true);
}
static int parse_flow_nlattrs(const struct nlattr *attr,
- const struct nlattr *a[], u64 *attrsp)
+ const struct nlattr *a[], u64 *attrsp,
+ bool log)
{
- return __parse_flow_nlattrs(attr, a, attrsp, false);
+ return __parse_flow_nlattrs(attr, a, attrsp, log, false);
+}
+
+static int genev_tun_opt_from_nlattr(const struct nlattr *a,
+ struct sw_flow_match *match, bool is_mask,
+ bool log)
+{
+ unsigned long opt_key_offset;
+
+ if (nla_len(a) > sizeof(match->key->tun_opts)) {
+ OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
+ nla_len(a), sizeof(match->key->tun_opts));
+ return -EINVAL;
+ }
+
+ if (nla_len(a) % 4 != 0) {
+ OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
+ nla_len(a));
+ return -EINVAL;
+ }
+
+ /* We need to record the length of the options passed
+ * down, otherwise packets with the same format but
+ * additional options will be silently matched.
+ */
+ if (!is_mask) {
+ SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
+ false);
+ } else {
+ /* This is somewhat unusual because it looks at
+ * both the key and mask while parsing the
+ * attributes (and by extension assumes the key
+ * is parsed first). Normally, we would verify
+ * that each is the correct length and that the
+ * attributes line up in the validate function.
+ * However, that is difficult because this is
+ * variable length and we won't have the
+ * information later.
+ */
+ if (match->key->tun_opts_len != nla_len(a)) {
+ OVS_NLERR(log, "Geneve option len %d != mask len %d",
+ match->key->tun_opts_len, nla_len(a));
+ return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
+ }
+
+ opt_key_offset = (unsigned long)GENEVE_OPTS((struct sw_flow_key *)0,
+ nla_len(a));
+ SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
+ nla_len(a), is_mask);
+ return 0;
}
static int ipv4_tun_from_nlattr(const struct nlattr *attr,
- struct sw_flow_match *match, bool is_mask)
+ struct sw_flow_match *match, bool is_mask,
+ bool log)
{
struct nlattr *a;
int rem;
bool ttl = false;
__be16 tun_flags = 0;
- unsigned long opt_key_offset;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
+ int err;
+
static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
[OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
[OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
@@ -359,20 +460,21 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
[OVS_TUNNEL_KEY_ATTR_TTL] = 1,
[OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
[OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
+ [OVS_TUNNEL_KEY_ATTR_TP_SRC] = sizeof(u16),
+ [OVS_TUNNEL_KEY_ATTR_TP_DST] = sizeof(u16),
[OVS_TUNNEL_KEY_ATTR_OAM] = 0,
[OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1,
};
if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
- OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
- type, OVS_TUNNEL_KEY_ATTR_MAX);
+ OVS_NLERR(log, "Tunnel attr %d out of range max %d",
+ type, OVS_TUNNEL_KEY_ATTR_MAX);
return -EINVAL;
}
if (ovs_tunnel_key_lens[type] != nla_len(a) &&
ovs_tunnel_key_lens[type] != -1) {
- OVS_NLERR("IPv4 tunnel attribute type has unexpected "
- " length (type=%d, length=%d, expected=%d).\n",
+ OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
type, nla_len(a), ovs_tunnel_key_lens[type]);
return -EINVAL;
}
@@ -406,62 +508,26 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
case OVS_TUNNEL_KEY_ATTR_CSUM:
tun_flags |= TUNNEL_CSUM;
break;
+ case OVS_TUNNEL_KEY_ATTR_TP_SRC:
+ SW_FLOW_KEY_PUT(match, tun_key.tp_src,
+ nla_get_be16(a), is_mask);
+ break;
+ case OVS_TUNNEL_KEY_ATTR_TP_DST:
+ SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
+ nla_get_be16(a), is_mask);
+ break;
case OVS_TUNNEL_KEY_ATTR_OAM:
tun_flags |= TUNNEL_OAM;
break;
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
- tun_flags |= TUNNEL_OPTIONS_PRESENT;
- if (nla_len(a) > sizeof(match->key->tun_opts)) {
- OVS_NLERR("Geneve option length exceeds maximum size (len %d, max %zu).\n",
- nla_len(a),
- sizeof(match->key->tun_opts));
- return -EINVAL;
- }
-
- if (nla_len(a) % 4 != 0) {
- OVS_NLERR("Geneve option length is not a multiple of 4 (len %d).\n",
- nla_len(a));
- return -EINVAL;
- }
-
- /* We need to record the length of the options passed
- * down, otherwise packets with the same format but
- * additional options will be silently matched.
- */
- if (!is_mask) {
- SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
- false);
- } else {
- /* This is somewhat unusual because it looks at
- * both the key and mask while parsing the
- * attributes (and by extension assumes the key
- * is parsed first). Normally, we would verify
- * that each is the correct length and that the
- * attributes line up in the validate function.
- * However, that is difficult because this is
- * variable length and we won't have the
- * information later.
- */
- if (match->key->tun_opts_len != nla_len(a)) {
- OVS_NLERR("Geneve option key length (%d) is different from mask length (%d).",
- match->key->tun_opts_len,
- nla_len(a));
- return -EINVAL;
- }
-
- SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff,
- true);
- }
+ err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
+ if (err)
+ return err;
- opt_key_offset = (unsigned long)GENEVE_OPTS(
- (struct sw_flow_key *)0,
- nla_len(a));
- SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset,
- nla_data(a), nla_len(a),
- is_mask);
+ tun_flags |= TUNNEL_OPTIONS_PRESENT;
break;
default:
- OVS_NLERR("Unknown IPv4 tunnel attribute (%d).\n",
+ OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
type);
return -EINVAL;
}
@@ -470,18 +536,19 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
if (rem > 0) {
- OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
+ OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.",
+ rem);
return -EINVAL;
}
if (!is_mask) {
if (!match->key->tun_key.ipv4_dst) {
- OVS_NLERR("IPv4 tunnel destination address is zero.\n");
+ OVS_NLERR(log, "IPv4 tunnel dst address is zero");
return -EINVAL;
}
if (!ttl) {
- OVS_NLERR("IPv4 tunnel TTL not specified.\n");
+ OVS_NLERR(log, "IPv4 tunnel TTL not specified.");
return -EINVAL;
}
}
@@ -514,6 +581,12 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
if ((output->tun_flags & TUNNEL_CSUM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
+ if (output->tp_src &&
+ nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
+ return -EMSGSIZE;
+ if (output->tp_dst &&
+ nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
+ return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_OAM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
@@ -525,7 +598,6 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
return 0;
}
-
static int ipv4_tun_to_nlattr(struct sk_buff *skb,
const struct ovs_key_ipv4_tunnel *output,
const struct geneve_opt *tun_opts,
@@ -546,8 +618,17 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb,
return 0;
}
+int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
+ const struct ovs_tunnel_info *egress_tun_info)
+{
+ return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
+ egress_tun_info->options,
+ egress_tun_info->options_len);
+}
+
static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
- const struct nlattr **a, bool is_mask)
+ const struct nlattr **a, bool is_mask,
+ bool log)
{
if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
@@ -572,10 +653,13 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
- if (is_mask)
+ if (is_mask) {
in_port = 0xffffffff; /* Always exact match in_port. */
- else if (in_port >= DP_MAX_PORTS)
+ } else if (in_port >= DP_MAX_PORTS) {
+ OVS_NLERR(log, "Port %d exceeds max allowable %d",
+ in_port, DP_MAX_PORTS);
return -EINVAL;
+ }
SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
*attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
@@ -591,7 +675,7 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
}
if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
- is_mask))
+ is_mask, log))
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
}
@@ -599,12 +683,12 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
}
static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
- const struct nlattr **a, bool is_mask)
+ const struct nlattr **a, bool is_mask,
+ bool log)
{
int err;
- u64 orig_attrs = attrs;
- err = metadata_from_nlattrs(match, &attrs, a, is_mask);
+ err = metadata_from_nlattrs(match, &attrs, a, is_mask, log);
if (err)
return err;
@@ -625,17 +709,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
if (!(tci & htons(VLAN_TAG_PRESENT))) {
if (is_mask)
- OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
+ OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.");
else
- OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
+ OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set.");
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
- } else if (!is_mask)
- SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
+ }
if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
__be16 eth_type;
@@ -645,8 +728,8 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
/* Always exact match EtherType. */
eth_type = htons(0xffff);
} else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
- OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
- ntohs(eth_type), ETH_P_802_3_MIN);
+ OVS_NLERR(log, "EtherType %x is less than min %x",
+ ntohs(eth_type), ETH_P_802_3_MIN);
return -EINVAL;
}
@@ -661,8 +744,8 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
- OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
- ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
+ OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
+ ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, ip.proto,
@@ -685,13 +768,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
- OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
- ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
+ OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
+ ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
return -EINVAL;
}
if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
- OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n",
+ OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x).\n",
ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
return -EINVAL;
}
@@ -723,7 +806,7 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
- OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
+ OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
arp_key->arp_op);
return -EINVAL;
}
@@ -742,6 +825,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
attrs &= ~(1 << OVS_KEY_ATTR_ARP);
}
+ if (attrs & (1 << OVS_KEY_ATTR_MPLS)) {
+ const struct ovs_key_mpls *mpls_key;
+
+ mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
+ SW_FLOW_KEY_PUT(match, mpls.top_lse,
+ mpls_key->mpls_lse, is_mask);
+
+ attrs &= ~(1 << OVS_KEY_ATTR_MPLS);
+ }
+
if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
const struct ovs_key_tcp *tcp_key;
@@ -752,15 +845,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
}
if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
- if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
- SW_FLOW_KEY_PUT(match, tp.flags,
- nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
- is_mask);
- } else {
- SW_FLOW_KEY_PUT(match, tp.flags,
- nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
- is_mask);
- }
+ SW_FLOW_KEY_PUT(match, tp.flags,
+ nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
+ is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
}
@@ -819,8 +906,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
attrs &= ~(1 << OVS_KEY_ATTR_ND);
}
- if (attrs != 0)
+ if (attrs != 0) {
+ OVS_NLERR(log, "Unknown key attributes %llx",
+ (unsigned long long)attrs);
return -EINVAL;
+ }
return 0;
}
@@ -858,10 +948,14 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val)
* of this flow.
* @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
* attribute specifies the mask field of the wildcarded flow.
+ * @log: Boolean to allow kernel error logging. Normally true, but when
+ * probing for feature compatibility this should be passed in as false to
+ * suppress unnecessary error logging.
*/
int ovs_nla_get_match(struct sw_flow_match *match,
- const struct nlattr *key,
- const struct nlattr *mask)
+ const struct nlattr *nla_key,
+ const struct nlattr *nla_mask,
+ bool log)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
const struct nlattr *encap;
@@ -871,7 +965,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
bool encap_valid = false;
int err;
- err = parse_flow_nlattrs(key, a, &key_attrs);
+ err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
if (err)
return err;
@@ -882,7 +976,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
(key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
- OVS_NLERR("Invalid Vlan frame.\n");
+ OVS_NLERR(log, "Invalid Vlan frame.");
return -EINVAL;
}
@@ -893,61 +987,68 @@ int ovs_nla_get_match(struct sw_flow_match *match,
encap_valid = true;
if (tci & htons(VLAN_TAG_PRESENT)) {
- err = parse_flow_nlattrs(encap, a, &key_attrs);
+ err = parse_flow_nlattrs(encap, a, &key_attrs, log);
if (err)
return err;
} else if (!tci) {
/* Corner case for truncated 802.1Q header. */
if (nla_len(encap)) {
- OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
+ OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute.");
return -EINVAL;
}
} else {
- OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
+ OVS_NLERR(log, "Encap attr is set for non-VLAN frame");
return -EINVAL;
}
}
- err = ovs_key_from_nlattrs(match, key_attrs, a, false);
+ err = ovs_key_from_nlattrs(match, key_attrs, a, false, log);
if (err)
return err;
- if (match->mask && !mask) {
- /* Create an exact match mask. We need to set to 0xff all the
- * 'match->mask' fields that have been touched in 'match->key'.
- * We cannot simply memset 'match->mask', because padding bytes
- * and fields not specified in 'match->key' should be left to 0.
- * Instead, we use a stream of netlink attributes, copied from
- * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care
- * of filling 'match->mask' appropriately.
- */
- newmask = kmemdup(key, nla_total_size(nla_len(key)),
- GFP_KERNEL);
- if (!newmask)
- return -ENOMEM;
+ if (match->mask) {
+ if (!nla_mask) {
+ /* Create an exact match mask. We need to set to 0xff
+ * all the 'match->mask' fields that have been touched
+ * in 'match->key'. We cannot simply memset
+ * 'match->mask', because padding bytes and fields not
+ * specified in 'match->key' should be left to 0.
+ * Instead, we use a stream of netlink attributes,
+ * copied from 'key' and set to 0xff.
+ * ovs_key_from_nlattrs() will take care of filling
+ * 'match->mask' appropriately.
+ */
+ newmask = kmemdup(nla_key,
+ nla_total_size(nla_len(nla_key)),
+ GFP_KERNEL);
+ if (!newmask)
+ return -ENOMEM;
- mask_set_nlattr(newmask, 0xff);
+ mask_set_nlattr(newmask, 0xff);
- /* The userspace does not send tunnel attributes that are 0,
- * but we should not wildcard them nonetheless.
- */
- if (match->key->tun_key.ipv4_dst)
- SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true);
+ /* The userspace does not send tunnel attributes that
+ * are 0, but we should not wildcard them nonetheless.
+ */
+ if (match->key->tun_key.ipv4_dst)
+ SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
+ 0xff, true);
- mask = newmask;
- }
+ nla_mask = newmask;
+ }
- if (mask) {
- err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
+ err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
if (err)
goto free_newmask;
+ /* Always match on tci. */
+ SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
+
if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
__be16 eth_type = 0;
__be16 tci = 0;
if (!encap_valid) {
- OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
+ OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame.");
err = -EINVAL;
goto free_newmask;
}
@@ -959,12 +1060,13 @@ int ovs_nla_get_match(struct sw_flow_match *match,
if (eth_type == htons(0xffff)) {
mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
encap = a[OVS_KEY_ATTR_ENCAP];
- err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
+ err = parse_flow_mask_nlattrs(encap, a,
+ &mask_attrs, log);
if (err)
goto free_newmask;
} else {
- OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
- ntohs(eth_type));
+ OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).",
+ ntohs(eth_type));
err = -EINVAL;
goto free_newmask;
}
@@ -973,18 +1075,19 @@ int ovs_nla_get_match(struct sw_flow_match *match,
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
if (!(tci & htons(VLAN_TAG_PRESENT))) {
- OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
+ OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).",
+ ntohs(tci));
err = -EINVAL;
goto free_newmask;
}
}
- err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
+ err = ovs_key_from_nlattrs(match, mask_attrs, a, true, log);
if (err)
goto free_newmask;
}
- if (!match_validate(match, key_attrs, mask_attrs))
+ if (!match_validate(match, key_attrs, mask_attrs, log))
err = -EINVAL;
free_newmask:
@@ -997,6 +1100,9 @@ free_newmask:
* @key: Receives extracted in_port, priority, tun_key and skb_mark.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
+ * @log: Boolean to allow kernel error logging. Normally true, but when
+ * probing for feature compatibility this should be passed in as false to
+ * suppress unnecessary error logging.
*
* This parses a series of Netlink attributes that form a flow key, which must
* take the same form accepted by flow_from_nlattrs(), but only enough of it to
@@ -1005,14 +1111,15 @@ free_newmask:
*/
int ovs_nla_get_flow_metadata(const struct nlattr *attr,
- struct sw_flow_key *key)
+ struct sw_flow_key *key,
+ bool log)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
struct sw_flow_match match;
u64 attrs = 0;
int err;
- err = parse_flow_nlattrs(attr, a, &attrs);
+ err = parse_flow_nlattrs(attr, a, &attrs, log);
if (err)
return -EINVAL;
@@ -1021,7 +1128,7 @@ int ovs_nla_get_flow_metadata(const struct nlattr *attr,
key->phy.in_port = DP_MAX_PORTS;
- return metadata_from_nlattrs(&match, &attrs, a, false);
+ return metadata_from_nlattrs(&match, &attrs, a, false, log);
}
int ovs_nla_put_flow(const struct sw_flow_key *swkey,
@@ -1147,6 +1254,14 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
arp_key->arp_op = htons(output->ip.proto);
ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
+ } else if (eth_p_mpls(swkey->eth.type)) {
+ struct ovs_key_mpls *mpls_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
+ if (!nla)
+ goto nla_put_failure;
+ mpls_key = nla_data(nla);
+ mpls_key->mpls_lse = output->mpls.top_lse;
}
if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -1233,12 +1348,14 @@ nla_put_failure:
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
-struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size)
+static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
{
struct sw_flow_actions *sfa;
- if (size > MAX_ACTIONS_BUFSIZE)
+ if (size > MAX_ACTIONS_BUFSIZE) {
+ OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
return ERR_PTR(-EINVAL);
+ }
sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
if (!sfa)
@@ -1256,7 +1373,7 @@ void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
}
static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
- int attr_len)
+ int attr_len, bool log)
{
struct sw_flow_actions *acts;
@@ -1276,7 +1393,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
new_acts_size = MAX_ACTIONS_BUFSIZE;
}
- acts = ovs_nla_alloc_flow_actions(new_acts_size);
+ acts = nla_alloc_flow_actions(new_acts_size, log);
if (IS_ERR(acts))
return (void *)acts;
@@ -1291,11 +1408,11 @@ out:
}
static struct nlattr *__add_action(struct sw_flow_actions **sfa,
- int attrtype, void *data, int len)
+ int attrtype, void *data, int len, bool log)
{
struct nlattr *a;
- a = reserve_sfa_size(sfa, nla_attr_size(len));
+ a = reserve_sfa_size(sfa, nla_attr_size(len), log);
if (IS_ERR(a))
return a;
@@ -1310,24 +1427,22 @@ static struct nlattr *__add_action(struct sw_flow_actions **sfa,
}
static int add_action(struct sw_flow_actions **sfa, int attrtype,
- void *data, int len)
+ void *data, int len, bool log)
{
struct nlattr *a;
- a = __add_action(sfa, attrtype, data, len);
- if (IS_ERR(a))
- return PTR_ERR(a);
+ a = __add_action(sfa, attrtype, data, len, log);
- return 0;
+ return PTR_ERR_OR_ZERO(a);
}
static inline int add_nested_action_start(struct sw_flow_actions **sfa,
- int attrtype)
+ int attrtype, bool log)
{
int used = (*sfa)->actions_len;
int err;
- err = add_action(sfa, attrtype, NULL, 0);
+ err = add_action(sfa, attrtype, NULL, 0, log);
if (err)
return err;
@@ -1343,9 +1458,15 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa,
a->nla_len = sfa->actions_len - st_offset;
}
+static int __ovs_nla_copy_actions(const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ int depth, struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci, bool log);
+
static int validate_and_copy_sample(const struct nlattr *attr,
const struct sw_flow_key *key, int depth,
- struct sw_flow_actions **sfa)
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci, bool log)
{
const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
const struct nlattr *probability, *actions;
@@ -1371,18 +1492,19 @@ static int validate_and_copy_sample(const struct nlattr *attr,
return -EINVAL;
/* validation done, copy sample action. */
- start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
+ start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
if (start < 0)
return start;
err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
- nla_data(probability), sizeof(u32));
+ nla_data(probability), sizeof(u32), log);
if (err)
return err;
- st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
+ st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
if (st_acts < 0)
return st_acts;
- err = ovs_nla_copy_actions(actions, key, depth + 1, sfa);
+ err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa,
+ eth_type, vlan_tci, log);
if (err)
return err;
@@ -1392,10 +1514,10 @@ static int validate_and_copy_sample(const struct nlattr *attr,
return 0;
}
-static int validate_tp_port(const struct sw_flow_key *flow_key)
+static int validate_tp_port(const struct sw_flow_key *flow_key,
+ __be16 eth_type)
{
- if ((flow_key->eth.type == htons(ETH_P_IP) ||
- flow_key->eth.type == htons(ETH_P_IPV6)) &&
+ if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) &&
(flow_key->tp.src || flow_key->tp.dst))
return 0;
@@ -1419,7 +1541,7 @@ void ovs_match_init(struct sw_flow_match *match,
}
static int validate_and_copy_set_tun(const struct nlattr *attr,
- struct sw_flow_actions **sfa)
+ struct sw_flow_actions **sfa, bool log)
{
struct sw_flow_match match;
struct sw_flow_key key;
@@ -1428,7 +1550,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
int err, start;
ovs_match_init(&match, &key, NULL);
- err = ipv4_tun_from_nlattr(nla_data(attr), &match, false);
+ err = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
if (err)
return err;
@@ -1457,12 +1579,12 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
};
- start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
+ start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
if (start < 0)
return start;
a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
- sizeof(*tun_info) + key.tun_opts_len);
+ sizeof(*tun_info) + key.tun_opts_len, log);
if (IS_ERR(a))
return PTR_ERR(a);
@@ -1490,7 +1612,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
static int validate_set(const struct nlattr *a,
const struct sw_flow_key *flow_key,
struct sw_flow_actions **sfa,
- bool *set_tun)
+ bool *set_tun, __be16 eth_type, bool log)
{
const struct nlattr *ovs_key = nla_data(a);
int key_type = nla_type(ovs_key);
@@ -1515,14 +1637,17 @@ static int validate_set(const struct nlattr *a,
break;
case OVS_KEY_ATTR_TUNNEL:
+ if (eth_p_mpls(eth_type))
+ return -EINVAL;
+
*set_tun = true;
- err = validate_and_copy_set_tun(a, sfa);
+ err = validate_and_copy_set_tun(a, sfa, log);
if (err)
return err;
break;
case OVS_KEY_ATTR_IPV4:
- if (flow_key->eth.type != htons(ETH_P_IP))
+ if (eth_type != htons(ETH_P_IP))
return -EINVAL;
if (!flow_key->ip.proto)
@@ -1538,7 +1663,7 @@ static int validate_set(const struct nlattr *a,
break;
case OVS_KEY_ATTR_IPV6:
- if (flow_key->eth.type != htons(ETH_P_IPV6))
+ if (eth_type != htons(ETH_P_IPV6))
return -EINVAL;
if (!flow_key->ip.proto)
@@ -1560,19 +1685,24 @@ static int validate_set(const struct nlattr *a,
if (flow_key->ip.proto != IPPROTO_TCP)
return -EINVAL;
- return validate_tp_port(flow_key);
+ return validate_tp_port(flow_key, eth_type);
case OVS_KEY_ATTR_UDP:
if (flow_key->ip.proto != IPPROTO_UDP)
return -EINVAL;
- return validate_tp_port(flow_key);
+ return validate_tp_port(flow_key, eth_type);
+
+ case OVS_KEY_ATTR_MPLS:
+ if (!eth_p_mpls(eth_type))
+ return -EINVAL;
+ break;
case OVS_KEY_ATTR_SCTP:
if (flow_key->ip.proto != IPPROTO_SCTP)
return -EINVAL;
- return validate_tp_port(flow_key);
+ return validate_tp_port(flow_key, eth_type);
default:
return -EINVAL;
@@ -1586,6 +1716,7 @@ static int validate_userspace(const struct nlattr *attr)
static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
[OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
[OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
+ [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
};
struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
int error;
@@ -1603,12 +1734,12 @@ static int validate_userspace(const struct nlattr *attr)
}
static int copy_action(const struct nlattr *from,
- struct sw_flow_actions **sfa)
+ struct sw_flow_actions **sfa, bool log)
{
int totlen = NLA_ALIGN(from->nla_len);
struct nlattr *to;
- to = reserve_sfa_size(sfa, from->nla_len);
+ to = reserve_sfa_size(sfa, from->nla_len, log);
if (IS_ERR(to))
return PTR_ERR(to);
@@ -1616,12 +1747,13 @@ static int copy_action(const struct nlattr *from,
return 0;
}
-int ovs_nla_copy_actions(const struct nlattr *attr,
- const struct sw_flow_key *key,
- int depth,
- struct sw_flow_actions **sfa)
+static int __ovs_nla_copy_actions(const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ int depth, struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci, bool log)
{
const struct nlattr *a;
+ bool out_tnl_port = false;
int rem, err;
if (depth >= SAMPLE_ACTION_DEPTH)
@@ -1633,6 +1765,8 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
[OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
[OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
[OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
+ [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
+ [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
[OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
[OVS_ACTION_ATTR_POP_VLAN] = 0,
[OVS_ACTION_ATTR_SET] = (u32)-1,
@@ -1662,6 +1796,8 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_OUTPUT:
if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL;
+ out_tnl_port = false;
+
break;
case OVS_ACTION_ATTR_HASH: {
@@ -1678,6 +1814,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
}
case OVS_ACTION_ATTR_POP_VLAN:
+ vlan_tci = htons(0);
break;
case OVS_ACTION_ATTR_PUSH_VLAN:
@@ -1686,29 +1823,77 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
return -EINVAL;
if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
return -EINVAL;
+ vlan_tci = vlan->vlan_tci;
break;
case OVS_ACTION_ATTR_RECIRC:
break;
+ case OVS_ACTION_ATTR_PUSH_MPLS: {
+ const struct ovs_action_push_mpls *mpls = nla_data(a);
+
+ /* Networking stack do not allow simultaneous Tunnel
+ * and MPLS GSO.
+ */
+ if (out_tnl_port)
+ return -EINVAL;
+
+ if (!eth_p_mpls(mpls->mpls_ethertype))
+ return -EINVAL;
+ /* Prohibit push MPLS other than to a white list
+ * for packets that have a known tag order.
+ */
+ if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
+ (eth_type != htons(ETH_P_IP) &&
+ eth_type != htons(ETH_P_IPV6) &&
+ eth_type != htons(ETH_P_ARP) &&
+ eth_type != htons(ETH_P_RARP) &&
+ !eth_p_mpls(eth_type)))
+ return -EINVAL;
+ eth_type = mpls->mpls_ethertype;
+ break;
+ }
+
+ case OVS_ACTION_ATTR_POP_MPLS:
+ if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
+ !eth_p_mpls(eth_type))
+ return -EINVAL;
+
+ /* Disallow subsequent L2.5+ set and mpls_pop actions
+ * as there is no check here to ensure that the new
+ * eth_type is valid and thus set actions could
+ * write off the end of the packet or otherwise
+ * corrupt it.
+ *
+ * Support for these actions is planned using packet
+ * recirculation.
+ */
+ eth_type = htons(0);
+ break;
+
case OVS_ACTION_ATTR_SET:
- err = validate_set(a, key, sfa, &skip_copy);
+ err = validate_set(a, key, sfa,
+ &out_tnl_port, eth_type, log);
if (err)
return err;
+
+ skip_copy = out_tnl_port;
break;
case OVS_ACTION_ATTR_SAMPLE:
- err = validate_and_copy_sample(a, key, depth, sfa);
+ err = validate_and_copy_sample(a, key, depth, sfa,
+ eth_type, vlan_tci, log);
if (err)
return err;
skip_copy = true;
break;
default:
+ OVS_NLERR(log, "Unknown Action type %d", type);
return -EINVAL;
}
if (!skip_copy) {
- err = copy_action(a, sfa);
+ err = copy_action(a, sfa, log);
if (err)
return err;
}
@@ -1720,6 +1905,24 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
return 0;
}
+int ovs_nla_copy_actions(const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa, bool log)
+{
+ int err;
+
+ *sfa = nla_alloc_flow_actions(nla_len(attr), log);
+ if (IS_ERR(*sfa))
+ return PTR_ERR(*sfa);
+
+ err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
+ key->eth.tci, log);
+ if (err)
+ kfree(*sfa);
+
+ return err;
+}
+
static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
{
const struct nlattr *a;
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 206e45add888..577f12be3459 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -37,24 +37,28 @@
#include "flow.h"
+size_t ovs_tun_key_attr_size(void);
+size_t ovs_key_attr_size(void);
+
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key, struct sw_flow_mask *mask);
int ovs_nla_put_flow(const struct sw_flow_key *,
const struct sw_flow_key *, struct sk_buff *);
-int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *);
+int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *,
+ bool log);
-int ovs_nla_get_match(struct sw_flow_match *match,
- const struct nlattr *,
- const struct nlattr *);
+int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
+ const struct nlattr *mask, bool log);
+int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
+ const struct ovs_tunnel_info *);
int ovs_nla_copy_actions(const struct nlattr *attr,
- const struct sw_flow_key *key, int depth,
- struct sw_flow_actions **sfa);
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa, bool log);
int ovs_nla_put_actions(const struct nlattr *attr,
int len, struct sk_buff *skb);
-struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len);
void ovs_nla_free_flow_actions(struct sw_flow_actions *);
#endif /* flow_netlink.h */
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index cf2d853646f0..5899bf161c61 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -25,7 +25,7 @@
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
-#include <linux/hash.h>
+#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
@@ -107,7 +107,7 @@ err:
return ERR_PTR(-ENOMEM);
}
-int ovs_flow_tbl_count(struct flow_table *table)
+int ovs_flow_tbl_count(const struct flow_table *table)
{
return table->count;
}
@@ -250,11 +250,14 @@ skip_flows:
__table_instance_destroy(ti);
}
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
+/* No need for locking this function is called from RCU callback or
+ * error path.
+ */
+void ovs_flow_tbl_destroy(struct flow_table *table)
{
- struct table_instance *ti = ovsl_dereference(table->ti);
+ struct table_instance *ti = rcu_dereference_raw(table->ti);
- table_instance_destroy(ti, deferred);
+ table_instance_destroy(ti, false);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -363,7 +366,7 @@ static u32 flow_hash(const struct sw_flow_key *key, int key_start,
/* Make sure number of hash bytes are multiple of u32. */
BUILD_BUG_ON(sizeof(long) % sizeof(u32));
- return arch_fast_hash2(hash_key, hash_u32s, 0);
+ return jhash2(hash_key, hash_u32s, 0);
}
static int flow_key_start(const struct sw_flow_key *key)
@@ -398,7 +401,7 @@ static bool flow_cmp_masked_key(const struct sw_flow *flow,
}
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- struct sw_flow_match *match)
+ const struct sw_flow_match *match)
{
struct sw_flow_key *key = match->key;
int key_start = flow_key_start(key);
@@ -409,7 +412,7 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
const struct sw_flow_key *unmasked,
- struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask)
{
struct sw_flow *flow;
struct hlist_head *head;
@@ -457,7 +460,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
- struct sw_flow_match *match)
+ const struct sw_flow_match *match)
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct sw_flow_mask *mask;
@@ -560,7 +563,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
/* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
- struct sw_flow_mask *new)
+ const struct sw_flow_mask *new)
{
struct sw_flow_mask *mask;
mask = flow_mask_find(tbl, new);
@@ -583,7 +586,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
/* Must be called with OVS mutex held. */
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- struct sw_flow_mask *mask)
+ const struct sw_flow_mask *mask)
{
struct table_instance *new_ti = NULL;
struct table_instance *ti;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 5918bff7f3f6..309fa6415689 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -61,12 +61,12 @@ struct sw_flow *ovs_flow_alloc(void);
void ovs_flow_free(struct sw_flow *, bool deferred);
int ovs_flow_tbl_init(struct flow_table *);
-int ovs_flow_tbl_count(struct flow_table *table);
-void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
+int ovs_flow_tbl_count(const struct flow_table *table);
+void ovs_flow_tbl_destroy(struct flow_table *table);
int ovs_flow_tbl_flush(struct flow_table *flow_table);
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- struct sw_flow_mask *mask);
+ const struct sw_flow_mask *mask);
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
int ovs_flow_tbl_num_masks(const struct flow_table *table);
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
@@ -77,9 +77,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
- struct sw_flow_match *match);
+ const struct sw_flow_match *match);
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
- struct sw_flow_match *match);
+ const struct sw_flow_match *match);
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
const struct sw_flow_mask *mask);
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 106a9d80b663..347fa2325b22 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -17,6 +17,7 @@
#include <linux/rculist.h>
#include <linux/udp.h>
#include <linux/if_vlan.h>
+#include <linux/module.h>
#include <net/geneve.h>
#include <net/icmp.h>
@@ -28,6 +29,8 @@
#include "datapath.h"
#include "vport.h"
+static struct vport_ops ovs_geneve_vport_ops;
+
/**
* struct geneve_port - Keeps track of open UDP ports
* @gs: The socket created for this port number.
@@ -65,7 +68,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
}
/* Convert 24 bit VNI to 64 bit tunnel ID. */
-static __be64 vni_to_tunnel_id(__u8 *vni)
+static __be64 vni_to_tunnel_id(const __u8 *vni)
{
#ifdef __BIG_ENDIAN
return (vni[0] << 16) | (vni[1] << 8) | vni[2];
@@ -94,7 +97,9 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
key = vni_to_tunnel_id(geneveh->vni);
- ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, flags,
+ ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
+ udp_hdr(skb)->source, udp_hdr(skb)->dest,
+ key, flags,
geneveh->options, opts_len);
ovs_vport_receive(vport, skb, &tun_info);
@@ -225,11 +230,46 @@ static const char *geneve_get_name(const struct vport *vport)
return geneve_port->name;
}
-const struct vport_ops ovs_geneve_vport_ops = {
+static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+ struct ovs_tunnel_info *egress_tun_info)
+{
+ struct geneve_port *geneve_port = geneve_vport(vport);
+ struct net *net = ovs_dp_get_net(vport->dp);
+ __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
+ __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
+
+ /* Get tp_src and tp_dst, refert to geneve_build_header().
+ */
+ return ovs_tunnel_get_egress_info(egress_tun_info,
+ ovs_dp_get_net(vport->dp),
+ OVS_CB(skb)->egress_tun_info,
+ IPPROTO_UDP, skb->mark, sport, dport);
+}
+
+static struct vport_ops ovs_geneve_vport_ops = {
.type = OVS_VPORT_TYPE_GENEVE,
.create = geneve_tnl_create,
.destroy = geneve_tnl_destroy,
.get_name = geneve_get_name,
.get_options = geneve_get_options,
.send = geneve_tnl_send,
+ .owner = THIS_MODULE,
+ .get_egress_tun_info = geneve_get_egress_tun_info,
};
+
+static int __init ovs_geneve_tnl_init(void)
+{
+ return ovs_vport_ops_register(&ovs_geneve_vport_ops);
+}
+
+static void __exit ovs_geneve_tnl_exit(void)
+{
+ ovs_vport_ops_unregister(&ovs_geneve_vport_ops);
+}
+
+module_init(ovs_geneve_tnl_init);
+module_exit(ovs_geneve_tnl_exit);
+
+MODULE_DESCRIPTION("OVS: Geneve swiching port");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("vport-type-5");
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 108b82da2fd9..6b69df545b1d 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -29,6 +29,7 @@
#include <linux/jhash.h>
#include <linux/list.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/rculist.h>
#include <net/route.h>
@@ -45,6 +46,8 @@
#include "datapath.h"
#include "vport.h"
+static struct vport_ops ovs_gre_vport_ops;
+
/* Returns the least-significant 32 bits of a __be64. */
static __be32 be64_get_low32(__be64 x)
{
@@ -105,7 +108,7 @@ static int gre_rcv(struct sk_buff *skb,
return PACKET_REJECT;
key = key_to_tunnel_id(tpi->key, tpi->seq);
- ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
+ ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
filter_tnl_flags(tpi->flags), NULL, 0);
ovs_vport_receive(vport, skb, &tun_info);
@@ -172,14 +175,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
goto err_free_rt;
}
- if (vlan_tx_tag_present(skb)) {
- if (unlikely(!__vlan_put_tag(skb,
- skb->vlan_proto,
- vlan_tx_tag_get(skb)))) {
- err = -ENOMEM;
- goto err_free_rt;
- }
- skb->vlan_tci = 0;
+ skb = vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb)) {
+ err = -ENOMEM;
+ goto err_free_rt;
}
/* Push Tunnel header. */
@@ -281,10 +280,38 @@ static void gre_tnl_destroy(struct vport *vport)
gre_exit();
}
-const struct vport_ops ovs_gre_vport_ops = {
+static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+ struct ovs_tunnel_info *egress_tun_info)
+{
+ return ovs_tunnel_get_egress_info(egress_tun_info,
+ ovs_dp_get_net(vport->dp),
+ OVS_CB(skb)->egress_tun_info,
+ IPPROTO_GRE, skb->mark, 0, 0);
+}
+
+static struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.create = gre_create,
.destroy = gre_tnl_destroy,
.get_name = gre_get_name,
.send = gre_tnl_send,
+ .get_egress_tun_info = gre_get_egress_tun_info,
+ .owner = THIS_MODULE,
};
+
+static int __init ovs_gre_tnl_init(void)
+{
+ return ovs_vport_ops_register(&ovs_gre_vport_ops);
+}
+
+static void __exit ovs_gre_tnl_exit(void)
+{
+ ovs_vport_ops_unregister(&ovs_gre_vport_ops);
+}
+
+module_init(ovs_gre_tnl_init);
+module_exit(ovs_gre_tnl_exit);
+
+MODULE_DESCRIPTION("OVS: GRE switching port");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("vport-type-3");
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 84516126e5f3..6a55f7105505 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -36,6 +36,8 @@ struct internal_dev {
struct vport *vport;
};
+static struct vport_ops ovs_internal_vport_ops;
+
static struct internal_dev *internal_dev_priv(struct net_device *netdev)
{
return netdev_priv(netdev);
@@ -222,6 +224,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
struct net_device *netdev = netdev_vport_priv(vport)->dev;
int len;
+ if (unlikely(!(netdev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ return 0;
+ }
+
len = skb->len;
skb_dst_drop(skb);
@@ -238,7 +245,7 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
return len;
}
-const struct vport_ops ovs_internal_vport_ops = {
+static struct vport_ops ovs_internal_vport_ops = {
.type = OVS_VPORT_TYPE_INTERNAL,
.create = internal_dev_create,
.destroy = internal_dev_destroy,
@@ -261,10 +268,21 @@ struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
int ovs_internal_dev_rtnl_link_register(void)
{
- return rtnl_link_register(&internal_dev_link_ops);
+ int err;
+
+ err = rtnl_link_register(&internal_dev_link_ops);
+ if (err < 0)
+ return err;
+
+ err = ovs_vport_ops_register(&ovs_internal_vport_ops);
+ if (err < 0)
+ rtnl_link_unregister(&internal_dev_link_ops);
+
+ return err;
}
void ovs_internal_dev_rtnl_link_unregister(void)
{
+ ovs_vport_ops_unregister(&ovs_internal_vport_ops);
rtnl_link_unregister(&internal_dev_link_ops);
}
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index d21f77d875ba..4776282c6417 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -33,6 +33,8 @@
#include "vport-internal_dev.h"
#include "vport-netdev.h"
+static struct vport_ops ovs_netdev_vport_ops;
+
/* Must be called with rcu_read_lock. */
static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
{
@@ -75,7 +77,7 @@ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
}
-static struct net_device *get_dpdev(struct datapath *dp)
+static struct net_device *get_dpdev(const struct datapath *dp)
{
struct vport *local;
@@ -224,10 +226,20 @@ struct vport *ovs_netdev_get_vport(struct net_device *dev)
return NULL;
}
-const struct vport_ops ovs_netdev_vport_ops = {
+static struct vport_ops ovs_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_NETDEV,
.create = netdev_create,
.destroy = netdev_destroy,
.get_name = ovs_netdev_get_name,
.send = netdev_send,
};
+
+int __init ovs_netdev_init(void)
+{
+ return ovs_vport_ops_register(&ovs_netdev_vport_ops);
+}
+
+void ovs_netdev_exit(void)
+{
+ ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
+}
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index 8df01c1127e5..6f7038e79c52 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -41,4 +41,7 @@ netdev_vport_priv(const struct vport *vport)
const char *ovs_netdev_get_name(const struct vport *);
void ovs_netdev_detach_dev(struct vport *);
+int __init ovs_netdev_init(void);
+void ovs_netdev_exit(void);
+
#endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 2735e01dca73..38f95a52241b 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -24,6 +24,7 @@
#include <linux/net.h>
#include <linux/rculist.h>
#include <linux/udp.h>
+#include <linux/module.h>
#include <net/icmp.h>
#include <net/ip.h>
@@ -50,6 +51,8 @@ struct vxlan_port {
char name[IFNAMSIZ];
};
+static struct vport_ops ovs_vxlan_vport_ops;
+
static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
{
return vport_priv(vport);
@@ -66,7 +69,9 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
/* Save outer tunnel values */
iph = ip_hdr(skb);
key = cpu_to_be64(ntohl(vx_vni) >> 8);
- ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY, NULL, 0);
+ ovs_flow_tun_info_init(&tun_info, iph,
+ udp_hdr(skb)->source, udp_hdr(skb)->dest,
+ key, TUNNEL_KEY, NULL, 0);
ovs_vport_receive(vport, skb, &tun_info);
}
@@ -186,17 +191,55 @@ error:
return err;
}
+static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+ struct ovs_tunnel_info *egress_tun_info)
+{
+ struct net *net = ovs_dp_get_net(vport->dp);
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ __be16 src_port;
+ int port_min;
+ int port_max;
+
+ inet_get_local_port_range(net, &port_min, &port_max);
+ src_port = udp_flow_src_port(net, skb, 0, 0, true);
+
+ return ovs_tunnel_get_egress_info(egress_tun_info, net,
+ OVS_CB(skb)->egress_tun_info,
+ IPPROTO_UDP, skb->mark,
+ src_port, dst_port);
+}
+
static const char *vxlan_get_name(const struct vport *vport)
{
struct vxlan_port *vxlan_port = vxlan_vport(vport);
return vxlan_port->name;
}
-const struct vport_ops ovs_vxlan_vport_ops = {
+static struct vport_ops ovs_vxlan_vport_ops = {
.type = OVS_VPORT_TYPE_VXLAN,
.create = vxlan_tnl_create,
.destroy = vxlan_tnl_destroy,
.get_name = vxlan_get_name,
.get_options = vxlan_get_options,
.send = vxlan_tnl_send,
+ .get_egress_tun_info = vxlan_get_egress_tun_info,
+ .owner = THIS_MODULE,
};
+
+static int __init ovs_vxlan_tnl_init(void)
+{
+ return ovs_vport_ops_register(&ovs_vxlan_vport_ops);
+}
+
+static void __exit ovs_vxlan_tnl_exit(void)
+{
+ ovs_vport_ops_unregister(&ovs_vxlan_vport_ops);
+}
+
+module_init(ovs_vxlan_tnl_init);
+module_exit(ovs_vxlan_tnl_exit);
+
+MODULE_DESCRIPTION("OVS: VXLAN switching port");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("vport-type-4");
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6015802ebe6f..9584526c0778 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -28,6 +28,7 @@
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <net/net_namespace.h>
+#include <linux/module.h>
#include "datapath.h"
#include "vport.h"
@@ -36,22 +37,7 @@
static void ovs_vport_record_error(struct vport *,
enum vport_err_type err_type);
-/* List of statically compiled vport implementations. Don't forget to also
- * add yours to the list at the bottom of vport.h. */
-static const struct vport_ops *vport_ops_list[] = {
- &ovs_netdev_vport_ops,
- &ovs_internal_vport_ops,
-
-#ifdef CONFIG_OPENVSWITCH_GRE
- &ovs_gre_vport_ops,
-#endif
-#ifdef CONFIG_OPENVSWITCH_VXLAN
- &ovs_vxlan_vport_ops,
-#endif
-#ifdef CONFIG_OPENVSWITCH_GENEVE
- &ovs_geneve_vport_ops,
-#endif
-};
+static LIST_HEAD(vport_ops_list);
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
static struct hlist_head *dev_table;
@@ -82,12 +68,38 @@ void ovs_vport_exit(void)
kfree(dev_table);
}
-static struct hlist_head *hash_bucket(struct net *net, const char *name)
+static struct hlist_head *hash_bucket(const struct net *net, const char *name)
{
unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
}
+int ovs_vport_ops_register(struct vport_ops *ops)
+{
+ int err = -EEXIST;
+ struct vport_ops *o;
+
+ ovs_lock();
+ list_for_each_entry(o, &vport_ops_list, list)
+ if (ops->type == o->type)
+ goto errout;
+
+ list_add_tail(&ops->list, &vport_ops_list);
+ err = 0;
+errout:
+ ovs_unlock();
+ return err;
+}
+EXPORT_SYMBOL_GPL(ovs_vport_ops_register);
+
+void ovs_vport_ops_unregister(struct vport_ops *ops)
+{
+ ovs_lock();
+ list_del(&ops->list);
+ ovs_unlock();
+}
+EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
+
/**
* ovs_vport_locate - find a port that has already been created
*
@@ -95,7 +107,7 @@ static struct hlist_head *hash_bucket(struct net *net, const char *name)
*
* Must be called with ovs or RCU read lock.
*/
-struct vport *ovs_vport_locate(struct net *net, const char *name)
+struct vport *ovs_vport_locate(const struct net *net, const char *name)
{
struct hlist_head *bucket = hash_bucket(net, name);
struct vport *vport;
@@ -153,6 +165,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
return vport;
}
+EXPORT_SYMBOL_GPL(ovs_vport_alloc);
/**
* ovs_vport_free - uninitialize and free vport
@@ -173,6 +186,18 @@ void ovs_vport_free(struct vport *vport)
free_percpu(vport->percpu_stats);
kfree(vport);
}
+EXPORT_SYMBOL_GPL(ovs_vport_free);
+
+static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
+{
+ struct vport_ops *ops;
+
+ list_for_each_entry(ops, &vport_ops_list, list)
+ if (ops->type == parms->type)
+ return ops;
+
+ return NULL;
+}
/**
* ovs_vport_add - add vport device (for kernel callers)
@@ -184,31 +209,40 @@ void ovs_vport_free(struct vport *vport)
*/
struct vport *ovs_vport_add(const struct vport_parms *parms)
{
+ struct vport_ops *ops;
struct vport *vport;
- int err = 0;
- int i;
- for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
- if (vport_ops_list[i]->type == parms->type) {
- struct hlist_head *bucket;
+ ops = ovs_vport_lookup(parms);
+ if (ops) {
+ struct hlist_head *bucket;
- vport = vport_ops_list[i]->create(parms);
- if (IS_ERR(vport)) {
- err = PTR_ERR(vport);
- goto out;
- }
+ if (!try_module_get(ops->owner))
+ return ERR_PTR(-EAFNOSUPPORT);
- bucket = hash_bucket(ovs_dp_get_net(vport->dp),
- vport->ops->get_name(vport));
- hlist_add_head_rcu(&vport->hash_node, bucket);
+ vport = ops->create(parms);
+ if (IS_ERR(vport)) {
+ module_put(ops->owner);
return vport;
}
+
+ bucket = hash_bucket(ovs_dp_get_net(vport->dp),
+ vport->ops->get_name(vport));
+ hlist_add_head_rcu(&vport->hash_node, bucket);
+ return vport;
}
- err = -EAFNOSUPPORT;
+ /* Unlock to attempt module load and return -EAGAIN if load
+ * was successful as we need to restart the port addition
+ * workflow.
+ */
+ ovs_unlock();
+ request_module("vport-type-%d", parms->type);
+ ovs_lock();
-out:
- return ERR_PTR(err);
+ if (!ovs_vport_lookup(parms))
+ return ERR_PTR(-EAFNOSUPPORT);
+ else
+ return ERR_PTR(-EAGAIN);
}
/**
@@ -242,6 +276,8 @@ void ovs_vport_del(struct vport *vport)
hlist_del_rcu(&vport->hash_node);
vport->ops->destroy(vport);
+
+ module_put(vport->ops->owner);
}
/**
@@ -344,7 +380,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
*
* Must be called with ovs_mutex.
*/
-int ovs_vport_set_upcall_portids(struct vport *vport, struct nlattr *ids)
+int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
{
struct vport_portids *old, *vport_portids;
@@ -435,7 +471,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
* skb->data should point to the Ethernet header.
*/
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *tun_info)
+ const struct ovs_tunnel_info *tun_info)
{
struct pcpu_sw_netstats *stats;
struct sw_flow_key key;
@@ -457,6 +493,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
}
ovs_dp_process_packet(skb, &key);
}
+EXPORT_SYMBOL_GPL(ovs_vport_receive);
/**
* ovs_vport_send - send a packet on a device
@@ -535,3 +572,65 @@ void ovs_vport_deferred_free(struct vport *vport)
call_rcu(&vport->rcu, free_vport_rcu);
}
+EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
+
+int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+ struct net *net,
+ const struct ovs_tunnel_info *tun_info,
+ u8 ipproto,
+ u32 skb_mark,
+ __be16 tp_src,
+ __be16 tp_dst)
+{
+ const struct ovs_key_ipv4_tunnel *tun_key;
+ struct rtable *rt;
+ struct flowi4 fl;
+
+ if (unlikely(!tun_info))
+ return -EINVAL;
+
+ tun_key = &tun_info->tunnel;
+
+ /* Route lookup to get srouce IP address.
+ * The process may need to be changed if the corresponding process
+ * in vports ops changed.
+ */
+ memset(&fl, 0, sizeof(fl));
+ fl.daddr = tun_key->ipv4_dst;
+ fl.saddr = tun_key->ipv4_src;
+ fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
+ fl.flowi4_mark = skb_mark;
+ fl.flowi4_proto = ipproto;
+
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ ip_rt_put(rt);
+
+ /* Generate egress_tun_info based on tun_info,
+ * saddr, tp_src and tp_dst
+ */
+ __ovs_flow_tun_info_init(egress_tun_info,
+ fl.saddr, tun_key->ipv4_dst,
+ tun_key->ipv4_tos,
+ tun_key->ipv4_ttl,
+ tp_src, tp_dst,
+ tun_key->tun_id,
+ tun_key->tun_flags,
+ tun_info->options,
+ tun_info->options_len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
+
+int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+ struct ovs_tunnel_info *info)
+{
+ /* get_egress_tun_info() is only implemented on tunnel ports. */
+ if (unlikely(!vport->ops->get_egress_tun_info))
+ return -EINVAL;
+
+ return vport->ops->get_egress_tun_info(vport, skb, info);
+}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 8942125de3a6..99c8e71d9e6c 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -45,19 +45,29 @@ void ovs_vport_exit(void);
struct vport *ovs_vport_add(const struct vport_parms *);
void ovs_vport_del(struct vport *);
-struct vport *ovs_vport_locate(struct net *net, const char *name);
+struct vport *ovs_vport_locate(const struct net *net, const char *name);
void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
int ovs_vport_set_options(struct vport *, struct nlattr *options);
int ovs_vport_get_options(const struct vport *, struct sk_buff *);
-int ovs_vport_set_upcall_portids(struct vport *, struct nlattr *pids);
+int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids);
int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
int ovs_vport_send(struct vport *, struct sk_buff *);
+int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+ struct net *net,
+ const struct ovs_tunnel_info *tun_info,
+ u8 ipproto,
+ u32 skb_mark,
+ __be16 tp_src,
+ __be16 tp_dst);
+int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+ struct ovs_tunnel_info *info);
+
/* The following definitions are for implementers of vport devices: */
struct vport_err_stats {
@@ -146,6 +156,8 @@ struct vport_parms {
* @get_name: Get the device's name.
* @send: Send a packet on the device. Returns the length of the packet sent,
* zero for dropped packets or negative for error.
+ * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
+ * a packet.
*/
struct vport_ops {
enum ovs_vport_type type;
@@ -161,6 +173,11 @@ struct vport_ops {
const char *(*get_name)(const struct vport *);
int (*send)(struct vport *, struct sk_buff *);
+ int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
+ struct ovs_tunnel_info *);
+
+ struct module *owner;
+ struct list_head list;
};
enum vport_err_type {
@@ -207,15 +224,7 @@ static inline struct vport *vport_from_priv(void *priv)
}
void ovs_vport_receive(struct vport *, struct sk_buff *,
- struct ovs_tunnel_info *);
-
-/* List of statically compiled vport implementations. Don't forget to also
- * add yours to the list at the top of vport.c. */
-extern const struct vport_ops ovs_netdev_vport_ops;
-extern const struct vport_ops ovs_internal_vport_ops;
-extern const struct vport_ops ovs_gre_vport_ops;
-extern const struct vport_ops ovs_vxlan_vport_ops;
-extern const struct vport_ops ovs_geneve_vport_ops;
+ const struct ovs_tunnel_info *);
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
@@ -224,4 +233,7 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
}
+int ovs_vport_ops_register(struct vport_ops *ops);
+void ovs_vport_ops_unregister(struct vport_ops *ops);
+
#endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 07c04a841ba0..e52a44785681 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1676,7 +1676,7 @@ retry:
if (len < hhlen)
skb_reset_network_header(skb);
}
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ err = memcpy_from_msg(skb_put(skb, len), msg, len);
if (err)
goto out_free;
goto retry;
@@ -2095,6 +2095,18 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
sock_wfree(skb);
}
+static bool ll_header_truncated(const struct net_device *dev, int len)
+{
+ /* net device doesn't like empty head */
+ if (unlikely(len <= dev->hard_header_len)) {
+ net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
+ current->comm, len, dev->hard_header_len);
+ return true;
+ }
+
+ return false;
+}
+
static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
void *frame, struct net_device *dev, int size_max,
__be16 proto, unsigned char *addr, int hlen)
@@ -2170,12 +2182,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
if (unlikely(err < 0))
return -EINVAL;
} else if (dev->hard_header_len) {
- /* net device doesn't like empty head */
- if (unlikely(tp_len <= dev->hard_header_len)) {
- pr_err("packet size is too short (%d < %d)\n",
- tp_len, dev->hard_header_len);
+ if (ll_header_truncated(dev, tp_len))
return -EINVAL;
- }
skb_push(skb, dev->hard_header_len);
err = skb_store_bits(skb, 0, data,
@@ -2400,6 +2408,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
unsigned short gso_type = 0;
int hlen, tlen;
int extra_len = 0;
+ ssize_t n;
/*
* Get and verify the address.
@@ -2438,19 +2447,21 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
len -= vnet_hdr_len;
- err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
- vnet_hdr_len);
- if (err < 0)
+ err = -EFAULT;
+ n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
+ if (n != vnet_hdr_len)
goto out_unlock;
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
- vnet_hdr.hdr_len))
- vnet_hdr.hdr_len = vnet_hdr.csum_start +
- vnet_hdr.csum_offset + 2;
+ (__virtio16_to_cpu(false, vnet_hdr.csum_start) +
+ __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 >
+ __virtio16_to_cpu(false, vnet_hdr.hdr_len)))
+ vnet_hdr.hdr_len = __cpu_to_virtio16(false,
+ __virtio16_to_cpu(false, vnet_hdr.csum_start) +
+ __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2);
err = -EINVAL;
- if (vnet_hdr.hdr_len > len)
+ if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len)
goto out_unlock;
if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
@@ -2492,7 +2503,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
err = -ENOBUFS;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
- skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
+ skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
+ __virtio16_to_cpu(false, vnet_hdr.hdr_len),
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
@@ -2500,12 +2512,17 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb_set_network_header(skb, reserve);
err = -EINVAL;
- if (sock->type == SOCK_DGRAM &&
- (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
- goto out_free;
+ if (sock->type == SOCK_DGRAM) {
+ offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
+ if (unlikely(offset) < 0)
+ goto out_free;
+ } else {
+ if (ll_header_truncated(dev, len))
+ goto out_free;
+ }
/* Returns -EFAULT on error */
- err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
+ err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
if (err)
goto out_free;
@@ -2534,14 +2551,16 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (po->has_vnet_hdr) {
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
- vnet_hdr.csum_offset)) {
+ u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start);
+ u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset);
+ if (!skb_partial_csum_set(skb, s, o)) {
err = -EINVAL;
goto out_free;
}
}
- skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
+ skb_shinfo(skb)->gso_size =
+ __virtio16_to_cpu(false, vnet_hdr.gso_size);
skb_shinfo(skb)->gso_type = gso_type;
/* Header must be checked, and gso_segs computed. */
@@ -2912,8 +2931,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
struct skb_shared_info *sinfo = skb_shinfo(skb);
/* This is a hint as to how much should be linear. */
- vnet_hdr.hdr_len = skb_headlen(skb);
- vnet_hdr.gso_size = sinfo->gso_size;
+ vnet_hdr.hdr_len =
+ __cpu_to_virtio16(false, skb_headlen(skb));
+ vnet_hdr.gso_size =
+ __cpu_to_virtio16(false, sinfo->gso_size);
if (sinfo->gso_type & SKB_GSO_TCPV4)
vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
@@ -2931,14 +2952,15 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- vnet_hdr.csum_start = skb_checksum_start_offset(skb);
- vnet_hdr.csum_offset = skb->csum_offset;
+ vnet_hdr.csum_start = __cpu_to_virtio16(false,
+ skb_checksum_start_offset(skb));
+ vnet_hdr.csum_offset = __cpu_to_virtio16(false,
+ skb->csum_offset);
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
- err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
- vnet_hdr_len);
+ err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
if (err < 0)
goto out_free;
}
@@ -2953,7 +2975,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_flags |= MSG_TRUNC;
}
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto out_free;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 5a940dbd74a3..32ab87d34828 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -426,16 +426,17 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa));
if (!out_dev) {
- LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n",
- pn_sockaddr_get_addr(&sa));
+ net_dbg_ratelimited("No Phonet route to %02X\n",
+ pn_sockaddr_get_addr(&sa));
goto out;
}
__skb_push(skb, sizeof(struct phonethdr));
skb->dev = out_dev;
if (out_dev == dev) {
- LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n",
- pn_sockaddr_get_addr(&sa), dev->name);
+ net_dbg_ratelimited("Phonet loop to %02X on %s\n",
+ pn_sockaddr_get_addr(&sa),
+ dev->name);
goto out_dev;
}
/* Some drivers (e.g. TUN) do not allocate HW header space */
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 290352c0e6b4..26054b4b467c 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -109,7 +109,7 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
return err;
skb_reserve(skb, MAX_PHONET_HEADER);
- err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
+ err = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
if (err < 0) {
kfree_skb(skb);
return err;
@@ -150,7 +150,7 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
copylen = len;
}
- rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen);
+ rval = skb_copy_datagram_msg(skb, 0, msg, copylen);
if (rval) {
rval = -EFAULT;
goto out;
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index e9a83a637185..fa8237fdc57b 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -203,8 +203,7 @@ static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb->len;
err = pep_write(sk, skb);
if (err) {
- LIMIT_NETDEBUG(KERN_WARNING"%s: TX error (%d)\n",
- dev->name, err);
+ net_dbg_ratelimited("%s: TX error (%d)\n", dev->name, err);
dev->stats.tx_aborted_errors++;
dev->stats.tx_errors++;
} else {
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 70a547ea5177..5d3f2b7507d4 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -272,8 +272,8 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
hdr = pnp_hdr(skb);
if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
- LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
- (unsigned int)hdr->data[0]);
+ net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
+ (unsigned int)hdr->data[0]);
return -EOPNOTSUPP;
}
@@ -304,8 +304,8 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
break;
default:
- LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
- (unsigned int)hdr->data[1]);
+ net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
+ (unsigned int)hdr->data[1]);
return -EOPNOTSUPP;
}
if (wake)
@@ -451,8 +451,8 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
break;
default:
- LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
- hdr->message_id);
+ net_dbg_ratelimited("Phonet unknown PEP message: %u\n",
+ hdr->message_id);
err = -EINVAL;
}
out:
@@ -1141,7 +1141,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
return err;
skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ err = memcpy_from_msg(skb_put(skb, len), msg, len);
if (err < 0)
goto outfree;
@@ -1296,7 +1296,7 @@ copy:
else
len = skb->len;
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
+ err = skb_copy_datagram_msg(skb, 0, msg, len);
if (!err)
err = (flags & MSG_TRUNC) ? skb->len : len;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 7280ab8810c2..c36d713229e0 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -316,8 +316,7 @@ int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
void rds_ib_inc_free(struct rds_incoming *inc);
-int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
- size_t size);
+int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_ib_recv_tasklet_fn(unsigned long data);
void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index d67de453c35a..1b981a4e42c2 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -472,15 +472,12 @@ static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache
return head;
}
-int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
- size_t size)
+int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_ib_incoming *ibinc;
struct rds_page_frag *frag;
- struct iovec *iov = first_iov;
unsigned long to_copy;
unsigned long frag_off = 0;
- unsigned long iov_off = 0;
int copied = 0;
int ret;
u32 len;
@@ -489,37 +486,25 @@ int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
len = be32_to_cpu(inc->i_hdr.h_len);
- while (copied < size && copied < len) {
+ while (iov_iter_count(to) && copied < len) {
if (frag_off == RDS_FRAG_SIZE) {
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
}
- while (iov_off == iov->iov_len) {
- iov_off = 0;
- iov++;
- }
-
- to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
- to_copy = min_t(size_t, to_copy, size - copied);
+ to_copy = min_t(unsigned long, iov_iter_count(to),
+ RDS_FRAG_SIZE - frag_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
- rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
- "[%p, %u] + %lu\n",
- to_copy, iov->iov_base, iov->iov_len, iov_off,
- sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
-
/* XXX needs + offset for multiple recvs per page */
- ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
- frag->f_sg.offset + frag_off,
- iov->iov_base + iov_off,
- to_copy);
- if (ret) {
- copied = ret;
- break;
- }
+ rds_stats_add(s_copy_to_user, to_copy);
+ ret = copy_page_to_iter(sg_page(&frag->f_sg),
+ frag->f_sg.offset + frag_off,
+ to_copy,
+ to);
+ if (ret != to_copy)
+ return -EFAULT;
- iov_off += to_copy;
frag_off += to_copy;
copied += to_copy;
}
diff --git a/net/rds/iw.h b/net/rds/iw.h
index 04ce3b193f79..cbe6674e31ee 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -325,8 +325,7 @@ int rds_iw_recv(struct rds_connection *conn);
int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
gfp_t page_gfp, int prefill);
void rds_iw_inc_free(struct rds_incoming *inc);
-int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
- size_t size);
+int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_iw_recv_tasklet_fn(unsigned long data);
void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index aa8bf6786008..a66d1794b2d0 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -303,15 +303,12 @@ void rds_iw_inc_free(struct rds_incoming *inc)
BUG_ON(atomic_read(&rds_iw_allocation) < 0);
}
-int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
- size_t size)
+int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_iw_incoming *iwinc;
struct rds_page_frag *frag;
- struct iovec *iov = first_iov;
unsigned long to_copy;
unsigned long frag_off = 0;
- unsigned long iov_off = 0;
int copied = 0;
int ret;
u32 len;
@@ -320,37 +317,25 @@ int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
len = be32_to_cpu(inc->i_hdr.h_len);
- while (copied < size && copied < len) {
+ while (iov_iter_count(to) && copied < len) {
if (frag_off == RDS_FRAG_SIZE) {
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
}
- while (iov_off == iov->iov_len) {
- iov_off = 0;
- iov++;
- }
-
- to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
- to_copy = min_t(size_t, to_copy, size - copied);
+ to_copy = min_t(unsigned long, iov_iter_count(to),
+ RDS_FRAG_SIZE - frag_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
- rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
- "[%p, %lu] + %lu\n",
- to_copy, iov->iov_base, iov->iov_len, iov_off,
- frag->f_page, frag->f_offset, frag_off);
-
/* XXX needs + offset for multiple recvs per page */
- ret = rds_page_copy_to_user(frag->f_page,
- frag->f_offset + frag_off,
- iov->iov_base + iov_off,
- to_copy);
- if (ret) {
- copied = ret;
- break;
- }
+ rds_stats_add(s_copy_to_user, to_copy);
+ ret = copy_page_to_iter(frag->f_page,
+ frag->f_offset + frag_off,
+ to_copy,
+ to);
+ if (ret != to_copy)
+ return -EFAULT;
- iov_off += to_copy;
frag_off += to_copy;
copied += to_copy;
}
diff --git a/net/rds/message.c b/net/rds/message.c
index aba232f9f308..5a21e6f5986f 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -264,75 +264,54 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
return rm;
}
-int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
- size_t total_len)
+int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
{
unsigned long to_copy;
- unsigned long iov_off;
unsigned long sg_off;
- struct iovec *iov;
struct scatterlist *sg;
int ret = 0;
- rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
+ rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
/*
* now allocate and copy in the data payload.
*/
sg = rm->data.op_sg;
- iov = first_iov;
- iov_off = 0;
sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
- while (total_len) {
+ while (iov_iter_count(from)) {
if (!sg_page(sg)) {
- ret = rds_page_remainder_alloc(sg, total_len,
+ ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
GFP_HIGHUSER);
if (ret)
- goto out;
+ return ret;
rm->data.op_nents++;
sg_off = 0;
}
- while (iov_off == iov->iov_len) {
- iov_off = 0;
- iov++;
- }
-
- to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
- to_copy = min_t(size_t, to_copy, total_len);
-
- rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
- "sg [%p, %u, %u] + %lu\n",
- to_copy, iov->iov_base, iov->iov_len, iov_off,
- (void *)sg_page(sg), sg->offset, sg->length, sg_off);
+ to_copy = min_t(unsigned long, iov_iter_count(from),
+ sg->length - sg_off);
- ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
- iov->iov_base + iov_off,
- to_copy);
- if (ret)
- goto out;
+ rds_stats_add(s_copy_from_user, to_copy);
+ ret = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
+ to_copy, from);
+ if (ret != to_copy)
+ return -EFAULT;
- iov_off += to_copy;
- total_len -= to_copy;
sg_off += to_copy;
if (sg_off == sg->length)
sg++;
}
-out:
return ret;
}
-int rds_message_inc_copy_to_user(struct rds_incoming *inc,
- struct iovec *first_iov, size_t size)
+int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_message *rm;
- struct iovec *iov;
struct scatterlist *sg;
unsigned long to_copy;
- unsigned long iov_off;
unsigned long vec_off;
int copied;
int ret;
@@ -341,36 +320,21 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
rm = container_of(inc, struct rds_message, m_inc);
len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
- iov = first_iov;
- iov_off = 0;
sg = rm->data.op_sg;
vec_off = 0;
copied = 0;
- while (copied < size && copied < len) {
- while (iov_off == iov->iov_len) {
- iov_off = 0;
- iov++;
- }
-
- to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
- to_copy = min_t(size_t, to_copy, size - copied);
+ while (iov_iter_count(to) && copied < len) {
+ to_copy = min_t(unsigned long, iov_iter_count(to),
+ sg->length - vec_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
- rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
- "sg [%p, %u, %u] + %lu\n",
- to_copy, iov->iov_base, iov->iov_len, iov_off,
- sg_page(sg), sg->offset, sg->length, vec_off);
-
- ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
- iov->iov_base + iov_off,
- to_copy);
- if (ret) {
- copied = ret;
- break;
- }
+ rds_stats_add(s_copy_to_user, to_copy);
+ ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off,
+ to_copy, to);
+ if (ret != to_copy)
+ return -EFAULT;
- iov_off += to_copy;
vec_off += to_copy;
copied += to_copy;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 48f8ffc60f8f..c2a5eef41343 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -431,8 +431,7 @@ struct rds_transport {
int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
int (*recv)(struct rds_connection *conn);
- int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
- size_t size);
+ int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
void (*inc_free)(struct rds_incoming *inc);
int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
@@ -657,8 +656,7 @@ rds_conn_connecting(struct rds_connection *conn)
/* message.c */
struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
-int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
- size_t total_len);
+int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
__be16 dport, u64 seq);
@@ -667,8 +665,7 @@ int rds_message_add_extension(struct rds_header *hdr,
int rds_message_next_extension(struct rds_header *hdr,
unsigned int *pos, void *buf, unsigned int *buflen);
int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
-int rds_message_inc_copy_to_user(struct rds_incoming *inc,
- struct iovec *first_iov, size_t size);
+int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_message_inc_free(struct rds_incoming *inc);
void rds_message_addref(struct rds_message *rm);
void rds_message_put(struct rds_message *rm);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index bd82522534fc..f9ec1acd801c 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -414,6 +414,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
goto out;
while (1) {
+ struct iov_iter save;
/* If there are pending notifications, do those - and nothing else */
if (!list_empty(&rs->rs_notify_queue)) {
ret = rds_notify_queue_get(rs, msg);
@@ -449,8 +450,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
&inc->i_conn->c_faddr,
ntohs(inc->i_hdr.h_sport));
- ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
- size);
+ save = msg->msg_iter;
+ ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
if (ret < 0)
break;
@@ -463,6 +464,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
rds_inc_put(inc);
inc = NULL;
rds_stats_inc(s_recv_deliver_raced);
+ msg->msg_iter = save;
continue;
}
diff --git a/net/rds/send.c b/net/rds/send.c
index 0a64541020b0..42f65d4305c8 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -826,7 +826,7 @@ static int rds_rm_size(struct msghdr *msg, int data_len)
int cmsg_groups = 0;
int retval;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
@@ -878,7 +878,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg;
int ret = 0;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
@@ -982,7 +982,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
ret = -ENOMEM;
goto out;
}
- ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
+ ret = rds_message_copy_from_user(rm, &msg->msg_iter);
if (ret)
goto out;
}
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 65637491f728..0dbdd37162da 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -69,8 +69,7 @@ void rds_tcp_recv_exit(void);
void rds_tcp_data_ready(struct sock *sk);
int rds_tcp_recv(struct rds_connection *conn);
void rds_tcp_inc_free(struct rds_incoming *inc);
-int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
- size_t size);
+int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
/* tcp_send.c */
void rds_tcp_xmit_prepare(struct rds_connection *conn);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 9ae6e0a264ec..fbc5ef88bc0e 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -59,50 +59,30 @@ void rds_tcp_inc_free(struct rds_incoming *inc)
/*
* this is pretty lame, but, whatever.
*/
-int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
- size_t size)
+int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_tcp_incoming *tinc;
- struct iovec *iov, tmp;
struct sk_buff *skb;
- unsigned long to_copy, skb_off;
int ret = 0;
- if (size == 0)
+ if (!iov_iter_count(to))
goto out;
tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
- iov = first_iov;
- tmp = *iov;
skb_queue_walk(&tinc->ti_skb_list, skb) {
- skb_off = 0;
- while (skb_off < skb->len) {
- while (tmp.iov_len == 0) {
- iov++;
- tmp = *iov;
- }
-
- to_copy = min(tmp.iov_len, size);
+ unsigned long to_copy, skb_off;
+ for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
+ to_copy = iov_iter_count(to);
to_copy = min(to_copy, skb->len - skb_off);
- rdsdebug("ret %d size %zu skb %p skb_off %lu "
- "skblen %d iov_base %p iov_len %zu cpy %lu\n",
- ret, size, skb, skb_off, skb->len,
- tmp.iov_base, tmp.iov_len, to_copy);
-
- /* modifies tmp as it copies */
- if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
- to_copy)) {
- ret = -EFAULT;
- goto out;
- }
+ if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
+ return -EFAULT;
rds_stats_add(s_copy_to_user, to_copy);
- size -= to_copy;
ret += to_copy;
- skb_off += to_copy;
- if (size == 0)
+
+ if (!iov_iter_count(to))
goto out;
}
}
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 2a4717967502..3f4a0bbeed3d 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -183,7 +183,6 @@ static struct platform_driver rfkill_gpio_driver = {
.remove = rfkill_gpio_remove,
.driver = {
.name = "rfkill_gpio",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(rfkill_acpi_match),
},
};
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index cf5b145902e5..50cd26a48e87 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -142,7 +142,6 @@ static struct platform_driver rfkill_regulator_driver = {
.remove = rfkill_regulator_remove,
.driver = {
.name = "rfkill-regulator",
- .owner = THIS_MODULE,
},
};
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index a85c1a086ae4..43bac7c4dd9e 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1121,7 +1121,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
skb_reset_transport_header(skb);
skb_put(skb, len);
- err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+ err = memcpy_from_msg(skb_transport_header(skb), msg, len);
if (err) {
kfree_skb(skb);
return err;
@@ -1249,7 +1249,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_flags |= MSG_TRUNC;
}
- skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ skb_copy_datagram_msg(skb, 0, msg, copied);
if (msg->msg_name) {
struct sockaddr_rose *srose;
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 0b4b9a79f5ab..e1a9373e5979 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -45,7 +45,7 @@ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
if (msg->msg_controllen == 0)
return -EINVAL;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
@@ -531,14 +531,12 @@ static int rxrpc_send_data(struct kiocb *iocb,
struct rxrpc_skb_priv *sp;
unsigned char __user *from;
struct sk_buff *skb;
- struct iovec *iov;
+ const struct iovec *iov;
struct sock *sk = &rx->sk;
long timeo;
bool more;
int ret, ioc, segment, copied;
- _enter(",,,{%zu},%zu", msg->msg_iovlen, len);
-
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
/* this should be in poll */
@@ -547,8 +545,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
return -EPIPE;
- iov = msg->msg_iov;
- ioc = msg->msg_iovlen - 1;
+ iov = msg->msg_iter.iov;
+ ioc = msg->msg_iter.nr_segs - 1;
from = iov->iov_base;
segment = iov->iov_len;
iov++;
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index e9aaa65c0778..4575485ad1b4 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -180,7 +180,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
if (copy > len - copied)
copy = len - copied;
- ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy);
+ ret = skb_copy_datagram_msg(skb, offset, msg, copy);
if (ret < 0)
goto copy_error;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a1a8e29e5fc9..c54c9d9d1ffb 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -22,8 +22,9 @@ menuconfig NET_SCHED
This code is considered to be experimental.
To administer these schedulers, you'll need the user-level utilities
- from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
- That package also contains some documentation; for more, check out
+ from the package iproute2+tc at
+ <https://www.kernel.org/pub/linux/utils/net/iproute2/>. That package
+ also contains some documentation; for more, check out
<http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>.
This Quality of Service (QoS) support will enable you to use
@@ -336,7 +337,7 @@ config NET_SCH_PLUG
of virtual machines by allowing the generated network output to be rolled
back if needed.
- For more information, please refer to http://wiki.xensource.com/xenwiki/Remus
+ For more information, please refer to <http://wiki.xenproject.org/wiki/Remus>
Say Y here if you are using this kernel for Xen dom0 and
want to protect Xen guests with Remus.
@@ -686,6 +687,17 @@ config NET_ACT_CSUM
To compile this code as a module, choose M here: the
module will be called act_csum.
+config NET_ACT_VLAN
+ tristate "Vlan manipulation"
+ depends on NET_CLS_ACT
+ ---help---
+ Say Y here to push or pop vlan headers.
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_vlan.
+
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 0a869a11f3e6..679f24ae7f93 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
+obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index d6bcbd9f7791..7fffc2272701 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -1,5 +1,5 @@
/*
- * net/sched/gact.c Generic actions
+ * net/sched/act_gact.c Generic actions
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8a64a0734aee..cbc8dd7dd48a 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -1,5 +1,5 @@
/*
- * net/sched/ipt.c iptables target interface
+ * net/sched/act_ipt.c iptables target interface
*
*TODO: Add other tables. For now we only support the ipv4 table targets
*
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index eb48306033d9..5953517ec059 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -1,5 +1,5 @@
/*
- * net/sched/mirred.c packet mirroring and redirect actions
+ * net/sched/act_mirred.c packet mirroring and redirect actions
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 5f9bcb2e080b..59649d588d79 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -1,5 +1,5 @@
/*
- * net/sched/pedit.c Generic packet editor
+ * net/sched/act_pedit.c Generic packet editor
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 69791ca77a05..9a1c42a43f92 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -1,5 +1,5 @@
/*
- * net/sched/police.c Input police filter.
+ * net/sched/act_police.c Input police filter
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 992c2317ce88..6a8d9488613a 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -1,5 +1,5 @@
/*
- * net/sched/simp.c Simple example of an action
+ * net/sched/act_simple.c Simple example of an action
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
new file mode 100644
index 000000000000..d735ecf0b1a7
--- /dev/null
+++ b/net/sched/act_vlan.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+#include <linux/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_vlan.h>
+
+#define VLAN_TAB_MASK 15
+
+static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_vlan *v = a->priv;
+ int action;
+ int err;
+
+ spin_lock(&v->tcf_lock);
+ v->tcf_tm.lastuse = jiffies;
+ bstats_update(&v->tcf_bstats, skb);
+ action = v->tcf_action;
+
+ switch (v->tcfv_action) {
+ case TCA_VLAN_ACT_POP:
+ err = skb_vlan_pop(skb);
+ if (err)
+ goto drop;
+ break;
+ case TCA_VLAN_ACT_PUSH:
+ err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid);
+ if (err)
+ goto drop;
+ break;
+ default:
+ BUG();
+ }
+
+ goto unlock;
+
+drop:
+ action = TC_ACT_SHOT;
+ v->tcf_qstats.drops++;
+unlock:
+ spin_unlock(&v->tcf_lock);
+ return action;
+}
+
+static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
+ [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) },
+ [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 },
+ [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 },
+};
+
+static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
+{
+ struct nlattr *tb[TCA_VLAN_MAX + 1];
+ struct tc_vlan *parm;
+ struct tcf_vlan *v;
+ int action;
+ __be16 push_vid = 0;
+ __be16 push_proto = 0;
+ int ret = 0;
+ int err;
+
+ if (!nla)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_VLAN_PARMS])
+ return -EINVAL;
+ parm = nla_data(tb[TCA_VLAN_PARMS]);
+ switch (parm->v_action) {
+ case TCA_VLAN_ACT_POP:
+ break;
+ case TCA_VLAN_ACT_PUSH:
+ if (!tb[TCA_VLAN_PUSH_VLAN_ID])
+ return -EINVAL;
+ push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
+ if (push_vid >= VLAN_VID_MASK)
+ return -ERANGE;
+
+ if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
+ push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
+ switch (push_proto) {
+ case htons(ETH_P_8021Q):
+ case htons(ETH_P_8021AD):
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+ } else {
+ push_proto = htons(ETH_P_8021Q);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ action = parm->v_action;
+
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
+ if (ret)
+ return ret;
+
+ ret = ACT_P_CREATED;
+ } else {
+ if (bind)
+ return 0;
+ tcf_hash_release(a, bind);
+ if (!ovr)
+ return -EEXIST;
+ }
+
+ v = to_vlan(a);
+
+ spin_lock_bh(&v->tcf_lock);
+
+ v->tcfv_action = action;
+ v->tcfv_push_vid = push_vid;
+ v->tcfv_push_proto = push_proto;
+
+ v->tcf_action = parm->action;
+
+ spin_unlock_bh(&v->tcf_lock);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(a);
+ return ret;
+}
+
+static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_vlan *v = a->priv;
+ struct tc_vlan opt = {
+ .index = v->tcf_index,
+ .refcnt = v->tcf_refcnt - ref,
+ .bindcnt = v->tcf_bindcnt - bind,
+ .action = v->tcf_action,
+ .v_action = v->tcfv_action,
+ };
+ struct tcf_t t;
+
+ if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ if (v->tcfv_action == TCA_VLAN_ACT_PUSH &&
+ (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) ||
+ nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, v->tcfv_push_proto)))
+ goto nla_put_failure;
+
+ t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(v->tcf_tm.expires);
+ if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t))
+ goto nla_put_failure;
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct tc_action_ops act_vlan_ops = {
+ .kind = "vlan",
+ .type = TCA_ACT_VLAN,
+ .owner = THIS_MODULE,
+ .act = tcf_vlan,
+ .dump = tcf_vlan_dump,
+ .init = tcf_vlan_init,
+};
+
+static int __init vlan_init_module(void)
+{
+ return tcf_register_action(&act_vlan_ops, VLAN_TAB_MASK);
+}
+
+static void __exit vlan_cleanup_module(void)
+{
+ tcf_unregister_action(&act_vlan_ops);
+}
+
+module_init(vlan_init_module);
+module_exit(vlan_cleanup_module);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_DESCRIPTION("vlan manipulation actions");
+MODULE_LICENSE("GPL v2");
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index cd61280941e5..5aed341406c2 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -72,10 +72,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
return l;
}
-static void basic_put(struct tcf_proto *tp, unsigned long f)
-{
-}
-
static int basic_init(struct tcf_proto *tp)
{
struct basic_head *head;
@@ -113,18 +109,12 @@ static void basic_destroy(struct tcf_proto *tp)
static int basic_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct basic_head *head = rtnl_dereference(tp->root);
- struct basic_filter *t, *f = (struct basic_filter *) arg;
-
- list_for_each_entry(t, &head->flist, link)
- if (t == f) {
- list_del_rcu(&t->link);
- tcf_unbind_filter(tp, &t->res);
- call_rcu(&t->rcu, basic_delete_filter);
- return 0;
- }
+ struct basic_filter *f = (struct basic_filter *) arg;
- return -ENOENT;
+ list_del_rcu(&f->link);
+ tcf_unbind_filter(tp, &f->res);
+ call_rcu(&f->rcu, basic_delete_filter);
+ return 0;
}
static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
@@ -188,10 +178,9 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
}
- err = -ENOBUFS;
fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
- if (fnew == NULL)
- goto errout;
+ if (!fnew)
+ return -ENOBUFS;
tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE);
err = -EINVAL;
@@ -293,7 +282,6 @@ static struct tcf_proto_ops cls_basic_ops __read_mostly = {
.init = basic_init,
.destroy = basic_destroy,
.get = basic_get,
- .put = basic_put,
.change = basic_change,
.delete = basic_delete,
.walk = basic_walk,
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index eed49d1d0878..84c8219c3e1c 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -109,19 +109,12 @@ static void __cls_bpf_delete_prog(struct rcu_head *rcu)
static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct cls_bpf_head *head = rtnl_dereference(tp->root);
- struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
+ struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
- list_for_each_entry(prog, &head->plist, link) {
- if (prog == todel) {
- list_del_rcu(&prog->link);
- tcf_unbind_filter(tp, &prog->res);
- call_rcu(&prog->rcu, __cls_bpf_delete_prog);
- return 0;
- }
- }
-
- return -ENOENT;
+ list_del_rcu(&prog->link);
+ tcf_unbind_filter(tp, &prog->res);
+ call_rcu(&prog->rcu, __cls_bpf_delete_prog);
+ return 0;
}
static void cls_bpf_destroy(struct tcf_proto *tp)
@@ -148,7 +141,7 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
if (head == NULL)
return 0UL;
- list_for_each_entry_rcu(prog, &head->plist, link) {
+ list_for_each_entry(prog, &head->plist, link) {
if (prog->handle == handle) {
ret = (unsigned long) prog;
break;
@@ -158,10 +151,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
return ret;
}
-static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
-{
-}
-
static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
struct cls_bpf_prog *prog,
unsigned long base, struct nlattr **tb,
@@ -344,7 +333,7 @@ static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog;
- list_for_each_entry_rcu(prog, &head->plist, link) {
+ list_for_each_entry(prog, &head->plist, link) {
if (arg->count < arg->skip)
goto skip;
if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
@@ -363,7 +352,6 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.init = cls_bpf_init,
.destroy = cls_bpf_destroy,
.get = cls_bpf_get,
- .put = cls_bpf_put,
.change = cls_bpf_change,
.delete = cls_bpf_delete,
.walk = cls_bpf_walk,
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d61a801222c1..221697ab0247 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -67,10 +67,6 @@ static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
return 0UL;
}
-static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
-{
-}
-
static int cls_cgroup_init(struct tcf_proto *tp)
{
return 0;
@@ -117,11 +113,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
return -ENOBUFS;
tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
- if (head)
- new->handle = head->handle;
- else
- new->handle = handle;
-
+ new->handle = handle;
new->tp = tp;
err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
cgroup_policy);
@@ -185,7 +177,6 @@ static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long
struct sk_buff *skb, struct tcmsg *t)
{
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
- unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
t->tcm_handle = head->handle;
@@ -206,7 +197,7 @@ static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long
return skb->len;
nla_put_failure:
- nlmsg_trim(skb, b);
+ nla_nest_cancel(skb, nest);
return -1;
}
@@ -217,7 +208,6 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
.classify = cls_cgroup_classify,
.destroy = cls_cgroup_destroy,
.get = cls_cgroup_get,
- .put = cls_cgroup_put,
.delete = cls_cgroup_delete,
.walk = cls_cgroup_walk,
.dump = cls_cgroup_dump,
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 4ac515f2a6ce..15d68f24a521 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -426,10 +426,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
goto err2;
/* Copy fold into fnew */
- fnew->handle = fold->handle;
- fnew->keymask = fold->keymask;
fnew->tp = fold->tp;
-
fnew->handle = fold->handle;
fnew->nkeys = fold->nkeys;
fnew->keymask = fold->keymask;
@@ -578,16 +575,12 @@ static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f;
- list_for_each_entry_rcu(f, &head->filters, list)
+ list_for_each_entry(f, &head->filters, list)
if (f->handle == handle)
return (unsigned long)f;
return 0;
}
-static void flow_put(struct tcf_proto *tp, unsigned long f)
-{
-}
-
static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
@@ -645,7 +638,7 @@ static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
return skb->len;
nla_put_failure:
- nlmsg_trim(skb, nest);
+ nla_nest_cancel(skb, nest);
return -1;
}
@@ -654,7 +647,7 @@ static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f;
- list_for_each_entry_rcu(f, &head->filters, list) {
+ list_for_each_entry(f, &head->filters, list) {
if (arg->count < arg->skip)
goto skip;
if (arg->fn(tp, (unsigned long)f, arg) < 0) {
@@ -674,7 +667,6 @@ static struct tcf_proto_ops cls_flow_ops __read_mostly = {
.change = flow_change,
.delete = flow_delete,
.get = flow_get,
- .put = flow_put,
.dump = flow_dump,
.walk = flow_walk,
.owner = THIS_MODULE,
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index dbfdfd1f1a9f..a5269f76004c 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -111,10 +111,6 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
return 0;
}
-static void fw_put(struct tcf_proto *tp, unsigned long f)
-{
-}
-
static int fw_init(struct tcf_proto *tp)
{
return 0;
@@ -360,7 +356,6 @@ static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = (struct fw_filter *)fh;
- unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
if (f == NULL)
@@ -401,7 +396,7 @@ static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
return skb->len;
nla_put_failure:
- nlmsg_trim(skb, b);
+ nla_nest_cancel(skb, nest);
return -1;
}
@@ -411,7 +406,6 @@ static struct tcf_proto_ops cls_fw_ops __read_mostly = {
.init = fw_init,
.destroy = fw_destroy,
.get = fw_get,
- .put = fw_put,
.change = fw_change,
.delete = fw_delete,
.walk = fw_walk,
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 109a329b7198..2ecd24688554 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -256,10 +256,6 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
return 0;
}
-static void route4_put(struct tcf_proto *tp, unsigned long f)
-{
-}
-
static int route4_init(struct tcf_proto *tp)
{
return 0;
@@ -597,7 +593,6 @@ static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct route4_filter *f = (struct route4_filter *)fh;
- unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
u32 id;
@@ -639,7 +634,7 @@ static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
return skb->len;
nla_put_failure:
- nlmsg_trim(skb, b);
+ nla_nest_cancel(skb, nest);
return -1;
}
@@ -649,7 +644,6 @@ static struct tcf_proto_ops cls_route4_ops __read_mostly = {
.init = route4_init,
.destroy = route4_destroy,
.get = route4_get,
- .put = route4_put,
.change = route4_change,
.delete = route4_delete,
.walk = route4_walk,
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 6bb55f277a5a..edd8ade3fbc1 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -271,10 +271,6 @@ static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
return 0;
}
-static void rsvp_put(struct tcf_proto *tp, unsigned long f)
-{
-}
-
static int rsvp_init(struct tcf_proto *tp)
{
struct rsvp_head *data;
@@ -657,7 +653,6 @@ static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
{
struct rsvp_filter *f = (struct rsvp_filter *)fh;
struct rsvp_session *s;
- unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
struct tc_rsvp_pinfo pinfo;
@@ -698,7 +693,7 @@ static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
return skb->len;
nla_put_failure:
- nlmsg_trim(skb, b);
+ nla_nest_cancel(skb, nest);
return -1;
}
@@ -708,7 +703,6 @@ static struct tcf_proto_ops RSVP_OPS __read_mostly = {
.init = rsvp_init,
.destroy = rsvp_destroy,
.get = rsvp_get,
- .put = rsvp_put,
.change = rsvp_change,
.delete = rsvp_delete,
.walk = rsvp_walk,
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 30f10fb07f4a..bd49bf547a47 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -116,13 +116,6 @@ static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
}
-
-static void tcindex_put(struct tcf_proto *tp, unsigned long f)
-{
- pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
-}
-
-
static int tcindex_init(struct tcf_proto *tp)
{
struct tcindex_data *p;
@@ -496,11 +489,10 @@ static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
- unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
- tp, fh, skb, t, p, r, b);
+ pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p\n",
+ tp, fh, skb, t, p, r);
pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
nest = nla_nest_start(skb, TCA_OPTIONS);
@@ -550,7 +542,7 @@ static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
return skb->len;
nla_put_failure:
- nlmsg_trim(skb, b);
+ nla_nest_cancel(skb, nest);
return -1;
}
@@ -560,7 +552,6 @@ static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
.init = tcindex_init,
.destroy = tcindex_destroy,
.get = tcindex_get,
- .put = tcindex_put,
.change = tcindex_change,
.delete = tcindex_delete,
.walk = tcindex_walk,
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 0472909bb014..09487afbfd51 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -299,10 +299,6 @@ static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
return (unsigned long)u32_lookup_key(ht, handle);
}
-static void u32_put(struct tcf_proto *tp, unsigned long f)
-{
-}
-
static u32 gen_new_htid(struct tc_u_common *tp_c)
{
int i = 0x800;
@@ -1021,7 +1017,6 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.init = u32_init,
.destroy = u32_destroy,
.get = u32_get,
- .put = u32_put,
.change = u32_change,
.delete = u32_delete,
.walk = u32_walk,
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index cbd7e1fd23b4..9b05924cc386 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -481,12 +481,11 @@ begin:
if (likely(rate))
do_div(len, rate);
/* Since socket rate can change later,
- * clamp the delay to 125 ms.
- * TODO: maybe segment the too big skb, as in commit
- * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
+ * clamp the delay to 1 second.
+ * Really, providers of too big packets should be fixed !
*/
- if (unlikely(len > 125 * NSEC_PER_MSEC)) {
- len = 125 * NSEC_PER_MSEC;
+ if (unlikely(len > NSEC_PER_SEC)) {
+ len = NSEC_PER_SEC;
q->stat_pkts_too_long++;
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index b9ca32ebc1de..1e52decb7b59 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -94,7 +94,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
TC_H_MIN(skb->priority) <= q->flows_cnt)
return TC_H_MIN(skb->priority);
- filter = rcu_dereference(q->filter_list);
+ filter = rcu_dereference_bh(q->filter_list);
if (!filter)
return fq_codel_hash(q, skb) + 1;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index b34331967e02..179f1c8c0d8b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -139,33 +139,20 @@ struct netem_sched_data {
/* Time stamp put into socket buffer control block
* Only valid when skbs are in our internal t(ime)fifo queue.
+ *
+ * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
+ * and skb->next & skb->prev are scratch space for a qdisc,
+ * we save skb->tstamp value in skb->cb[] before destroying it.
*/
struct netem_skb_cb {
psched_time_t time_to_send;
ktime_t tstamp_save;
};
-/* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
- * to hold a rb_node structure.
- *
- * If struct sk_buff layout is changed, the following checks will complain.
- */
-static struct rb_node *netem_rb_node(struct sk_buff *skb)
-{
- BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
- BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
- offsetof(struct sk_buff, next) + sizeof(skb->next));
- BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
- offsetof(struct sk_buff, prev) + sizeof(skb->prev));
- BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
- sizeof(skb->prev) +
- sizeof(skb->tstamp));
- return (struct rb_node *)&skb->next;
-}
static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
{
- return (struct sk_buff *)rb;
+ return container_of(rb, struct sk_buff, rbnode);
}
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
@@ -403,8 +390,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
else
p = &parent->rb_left;
}
- rb_link_node(netem_rb_node(nskb), parent, p);
- rb_insert_color(netem_rb_node(nskb), &q->t_root);
+ rb_link_node(&nskb->rbnode, parent, p);
+ rb_insert_color(&nskb->rbnode, &q->t_root);
sch->q.qlen++;
}
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 158701da2d31..a3380917f197 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -164,7 +164,7 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu
*/
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo,
- struct msghdr *msgh, int msg_len)
+ struct iov_iter *from)
{
int max, whole, i, offset, over, err;
int len, first_len;
@@ -172,6 +172,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
struct sctp_chunk *chunk;
struct sctp_datamsg *msg;
struct list_head *pos, *temp;
+ size_t msg_len = iov_iter_count(from);
__u8 frag;
msg = sctp_datamsg_new(GFP_KERNEL);
@@ -279,12 +280,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
goto errout;
}
- err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
+ err = sctp_user_addto_chunk(chunk, len, from);
if (err < 0)
goto errout_chunk_free;
- offset += len;
-
/* Put the chunk->skb back into the form expected by send. */
__skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
- (__u8 *)chunk->skb->data);
@@ -317,7 +316,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
goto errout;
}
- err = sctp_user_addto_chunk(chunk, offset, over, msgh->msg_iov);
+ err = sctp_user_addto_chunk(chunk, over, from);
/* Put the chunk->skb back into the form expected by send. */
__skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 42dffd428389..fc5e45b8a832 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -401,12 +401,12 @@ int sctp_packet_transmit(struct sctp_packet *packet)
sk = chunk->skb->sk;
/* Allocate the new skb. */
- nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
+ nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
if (!nskb)
goto nomem;
/* Make sure the outbound skb has enough header room reserved. */
- skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
+ skb_reserve(nskb, packet->overhead + MAX_HEADER);
/* Set the owning socket so that we know where to get the
* destination IP address.
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 34229ee7f379..0697eda5aed8 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -417,7 +417,7 @@ static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
- "REM_ADDR_RTX START\n");
+ "REM_ADDR_RTX START STATE\n");
return (void *)pos;
}
@@ -490,14 +490,20 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
* Note: We don't have a way to tally this at the moment
* so lets just leave it as zero for the moment
*/
- seq_printf(seq, "0 ");
+ seq_puts(seq, "0 ");
/*
* remote address start time (START). This is also not
* currently implemented, but we can record it with a
* jiffies marker in a subsequent patch
*/
- seq_printf(seq, "0");
+ seq_puts(seq, "0 ");
+
+ /*
+ * The current state of this destination. I.e.
+ * SCTP_ACTIVE, SCTP_INACTIVE, ...
+ */
+ seq_printf(seq, "%d", tsp->state);
seq_printf(seq, "\n");
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9f32741abb1c..e49e231cef52 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1001,7 +1001,7 @@ no_mem:
/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
- const struct msghdr *msg,
+ struct msghdr *msg,
size_t paylen)
{
struct sctp_chunk *retval;
@@ -1018,7 +1018,7 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
if (!payload)
goto err_payload;
- err = memcpy_fromiovec(payload, msg->msg_iov, paylen);
+ err = memcpy_from_msg(payload, msg, paylen);
if (err < 0)
goto err_copy;
}
@@ -1491,26 +1491,26 @@ static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
* chunk is not big enough.
* Returns a kernel err value.
*/
-int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
- struct iovec *data)
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
+ struct iov_iter *from)
{
- __u8 *target;
- int err = 0;
+ void *target;
+ ssize_t copied;
/* Make room in chunk for data. */
target = skb_put(chunk->skb, len);
/* Copy data (whole iovec) into chunk */
- if ((err = memcpy_fromiovecend(target, data, off, len)))
- goto out;
+ copied = copy_from_iter(target, len, from);
+ if (copied != len)
+ return -EFAULT;
/* Adjust the chunk length field. */
chunk->chunk_hdr->length =
htons(ntohs(chunk->chunk_hdr->length) + len);
chunk->chunk_end = skb_tail_pointer(chunk->skb);
-out:
- return err;
+ return 0;
}
/* Helper function to assign a TSN if needed. This assumes that both
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 634a2abb5f3a..2625eccb77d5 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -162,7 +162,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
chunk->skb->destructor = sctp_wfree;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
- *((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
+ skb_shinfo(chunk->skb)->destructor_arg = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
sizeof(struct sk_buff) +
@@ -1947,7 +1947,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
/* Break the message into multiple chunks of maximum size. */
- datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
+ datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
if (IS_ERR(datamsg)) {
err = PTR_ERR(datamsg);
goto out_free;
@@ -2095,7 +2095,7 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
if (copied > len)
copied = len;
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
event = sctp_skb2event(skb);
@@ -6592,8 +6592,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
struct cmsghdr *cmsg;
struct msghdr *my_msg = (struct msghdr *)msg;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL;
- cmsg = CMSG_NXTHDR(my_msg, cmsg)) {
+ for_each_cmsghdr(cmsg, my_msg) {
if (!CMSG_OK(my_msg, cmsg))
return -EINVAL;
@@ -6870,14 +6869,10 @@ static void sctp_wake_up_waiters(struct sock *sk,
*/
static void sctp_wfree(struct sk_buff *skb)
{
- struct sctp_association *asoc;
- struct sctp_chunk *chunk;
- struct sock *sk;
+ struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
+ struct sctp_association *asoc = chunk->asoc;
+ struct sock *sk = asoc->base.sk;
- /* Get the saved chunk pointer. */
- chunk = *((struct sctp_chunk **)(skb->cb));
- asoc = chunk->asoc;
- sk = asoc->base.sk;
asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index d49dc2ed30ad..ce469d648ffb 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -205,9 +205,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
goto out_free;
- if (!sctp_ulpevent_is_notification(event))
+ if (!sctp_ulpevent_is_notification(event)) {
sk_mark_napi_id(sk, skb);
-
+ sk_incoming_cpu_update(sk);
+ }
/* Check if the user wishes to receive this event. */
if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
goto out_free;
diff --git a/net/socket.c b/net/socket.c
index fe20c319a0bb..70bbde65e4ca 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -113,7 +113,6 @@ unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -151,7 +150,6 @@ static const struct file_operations socket_file_ops = {
.compat_ioctl = compat_sock_ioctl,
#endif
.mmap = sock_mmap,
- .open = sock_no_open, /* special open code to disallow open via /proc */
.release = sock_close,
.fasync = sock_fasync,
.sendpage = sock_sendpage,
@@ -559,23 +557,6 @@ static struct socket *sock_alloc(void)
return sock;
}
-/*
- * In theory you can't get an open on this inode, but /proc provides
- * a back door. Remember to keep it shut otherwise you'll let the
- * creepy crawlies in.
- */
-
-static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
-{
- return -ENXIO;
-}
-
-const struct file_operations bad_sock_fops = {
- .owner = THIS_MODULE,
- .open = sock_no_open,
- .llseek = noop_llseek,
-};
-
/**
* sock_release - close a socket
* @sock: socket to close
@@ -651,7 +632,8 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size);
}
-int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+static int do_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, bool nosec)
{
struct kiocb iocb;
struct sock_iocb siocb;
@@ -659,25 +641,22 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
init_sync_kiocb(&iocb, NULL);
iocb.private = &siocb;
- ret = __sock_sendmsg(&iocb, sock, msg, size);
+ ret = nosec ? __sock_sendmsg_nosec(&iocb, sock, msg, size) :
+ __sock_sendmsg(&iocb, sock, msg, size);
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&iocb);
return ret;
}
+
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+{
+ return do_sock_sendmsg(sock, msg, size, false);
+}
EXPORT_SYMBOL(sock_sendmsg);
static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size)
{
- struct kiocb iocb;
- struct sock_iocb siocb;
- int ret;
-
- init_sync_kiocb(&iocb, NULL);
- iocb.private = &siocb;
- ret = __sock_sendmsg_nosec(&iocb, sock, msg, size);
- if (-EIOCBQUEUED == ret)
- ret = wait_on_sync_kiocb(&iocb);
- return ret;
+ return do_sock_sendmsg(sock, msg, size, true);
}
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
@@ -691,8 +670,7 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
* the following is safe, since for compiler definitions of kvec and
* iovec are identical, yielding the same in-core layout and alignment
*/
- msg->msg_iov = (struct iovec *)vec;
- msg->msg_iovlen = num;
+ iov_iter_init(&msg->msg_iter, WRITE, (struct iovec *)vec, num, size);
result = sock_sendmsg(sock, msg, size);
set_fs(oldfs);
return result;
@@ -855,7 +833,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
* the following is safe, since for compiler definitions of kvec and
* iovec are identical, yielding the same in-core layout and alignment
*/
- msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num;
+ iov_iter_init(&msg->msg_iter, READ, (struct iovec *)vec, num, size);
result = sock_recvmsg(sock, msg, size, flags);
set_fs(oldfs);
return result;
@@ -915,8 +893,7 @@ static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
msg->msg_namelen = 0;
msg->msg_control = NULL;
msg->msg_controllen = 0;
- msg->msg_iov = (struct iovec *)iov;
- msg->msg_iovlen = nr_segs;
+ iov_iter_init(&msg->msg_iter, READ, iov, nr_segs, size);
msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags);
@@ -955,8 +932,7 @@ static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
msg->msg_namelen = 0;
msg->msg_control = NULL;
msg->msg_controllen = 0;
- msg->msg_iov = (struct iovec *)iov;
- msg->msg_iovlen = nr_segs;
+ iov_iter_init(&msg->msg_iter, WRITE, iov, nr_segs, size);
msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
if (sock->type == SOCK_SEQPACKET)
msg->msg_flags |= MSG_EOR;
@@ -1800,8 +1776,7 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
iov.iov_base = buff;
iov.iov_len = len;
msg.msg_name = NULL;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
+ iov_iter_init(&msg.msg_iter, WRITE, &iov, 1, len);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
@@ -1858,10 +1833,9 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
msg.msg_control = NULL;
msg.msg_controllen = 0;
- msg.msg_iovlen = 1;
- msg.msg_iov = &iov;
iov.iov_len = size;
iov.iov_base = ubuf;
+ iov_iter_init(&msg.msg_iter, READ, &iov, 1, size);
/* Save some cycles and don't copy the address if not needed */
msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
/* We assume all kernel code knows the size of sockaddr_storage */
@@ -1988,13 +1962,27 @@ struct used_address {
unsigned int name_len;
};
-static int copy_msghdr_from_user(struct msghdr *kmsg,
- struct msghdr __user *umsg)
+static ssize_t copy_msghdr_from_user(struct msghdr *kmsg,
+ struct user_msghdr __user *umsg,
+ struct sockaddr __user **save_addr,
+ struct iovec **iov)
{
- if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ struct sockaddr __user *uaddr;
+ struct iovec __user *uiov;
+ size_t nr_segs;
+ ssize_t err;
+
+ if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) ||
+ __get_user(uaddr, &umsg->msg_name) ||
+ __get_user(kmsg->msg_namelen, &umsg->msg_namelen) ||
+ __get_user(uiov, &umsg->msg_iov) ||
+ __get_user(nr_segs, &umsg->msg_iovlen) ||
+ __get_user(kmsg->msg_control, &umsg->msg_control) ||
+ __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
return -EFAULT;
- if (kmsg->msg_name == NULL)
+ if (!uaddr)
kmsg->msg_namelen = 0;
if (kmsg->msg_namelen < 0)
@@ -2002,10 +1990,35 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
- return 0;
+
+ if (save_addr)
+ *save_addr = uaddr;
+
+ if (uaddr && kmsg->msg_namelen) {
+ if (!save_addr) {
+ err = move_addr_to_kernel(uaddr, kmsg->msg_namelen,
+ kmsg->msg_name);
+ if (err < 0)
+ return err;
+ }
+ } else {
+ kmsg->msg_name = NULL;
+ kmsg->msg_namelen = 0;
+ }
+
+ if (nr_segs > UIO_MAXIOV)
+ return -EMSGSIZE;
+
+ err = rw_copy_check_uvector(save_addr ? READ : WRITE,
+ uiov, nr_segs,
+ UIO_FASTIOV, *iov, iov);
+ if (err >= 0)
+ iov_iter_init(&kmsg->msg_iter, save_addr ? READ : WRITE,
+ *iov, nr_segs, err);
+ return err;
}
-static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags,
struct used_address *used_address)
{
@@ -2017,34 +2030,15 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
__attribute__ ((aligned(sizeof(__kernel_size_t))));
/* 20 is size of ipv6_pktinfo */
unsigned char *ctl_buf = ctl;
- int err, ctl_len, total_len;
-
- err = -EFAULT;
- if (MSG_CMSG_COMPAT & flags) {
- if (get_compat_msghdr(msg_sys, msg_compat))
- return -EFAULT;
- } else {
- err = copy_msghdr_from_user(msg_sys, msg);
- if (err)
- return err;
- }
+ int ctl_len, total_len;
+ ssize_t err;
- if (msg_sys->msg_iovlen > UIO_FASTIOV) {
- err = -EMSGSIZE;
- if (msg_sys->msg_iovlen > UIO_MAXIOV)
- goto out;
- err = -ENOMEM;
- iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
- GFP_KERNEL);
- if (!iov)
- goto out;
- }
+ msg_sys->msg_name = &address;
- /* This will also move the address data into kernel space */
- if (MSG_CMSG_COMPAT & flags) {
- err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ);
- } else
- err = verify_iovec(msg_sys, iov, &address, VERIFY_READ);
+ if (MSG_CMSG_COMPAT & flags)
+ err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov);
+ else
+ err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov);
if (err < 0)
goto out_freeiov;
total_len = err;
@@ -2115,7 +2109,6 @@ out_freectl:
out_freeiov:
if (iov != iovstack)
kfree(iov);
-out:
return err;
}
@@ -2123,7 +2116,7 @@ out:
* BSD sendmsg interface
*/
-long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags)
{
int fput_needed, err;
struct msghdr msg_sys;
@@ -2140,7 +2133,7 @@ out:
return err;
}
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags)
{
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
@@ -2177,7 +2170,7 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
while (datagrams < vlen) {
if (MSG_CMSG_COMPAT & flags) {
- err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+ err = ___sys_sendmsg(sock, (struct user_msghdr __user *)compat_entry,
&msg_sys, flags, &used_address);
if (err < 0)
break;
@@ -2185,7 +2178,7 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
++compat_entry;
} else {
err = ___sys_sendmsg(sock,
- (struct msghdr __user *)entry,
+ (struct user_msghdr __user *)entry,
&msg_sys, flags, &used_address);
if (err < 0)
break;
@@ -2215,7 +2208,7 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
return __sys_sendmmsg(fd, mmsg, vlen, flags);
}
-static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
struct msghdr *msg_sys, unsigned int flags, int nosec)
{
struct compat_msghdr __user *msg_compat =
@@ -2223,44 +2216,22 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
unsigned long cmsg_ptr;
- int err, total_len, len;
+ int total_len, len;
+ ssize_t err;
/* kernel mode address */
struct sockaddr_storage addr;
/* user mode address pointers */
struct sockaddr __user *uaddr;
- int __user *uaddr_len;
-
- if (MSG_CMSG_COMPAT & flags) {
- if (get_compat_msghdr(msg_sys, msg_compat))
- return -EFAULT;
- } else {
- err = copy_msghdr_from_user(msg_sys, msg);
- if (err)
- return err;
- }
+ int __user *uaddr_len = COMPAT_NAMELEN(msg);
- if (msg_sys->msg_iovlen > UIO_FASTIOV) {
- err = -EMSGSIZE;
- if (msg_sys->msg_iovlen > UIO_MAXIOV)
- goto out;
- err = -ENOMEM;
- iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
- GFP_KERNEL);
- if (!iov)
- goto out;
- }
+ msg_sys->msg_name = &addr;
- /* Save the user-mode address (verify_iovec will change the
- * kernel msghdr to use the kernel address space)
- */
- uaddr = (__force void __user *)msg_sys->msg_name;
- uaddr_len = COMPAT_NAMELEN(msg);
if (MSG_CMSG_COMPAT & flags)
- err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
+ err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov);
else
- err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
+ err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov);
if (err < 0)
goto out_freeiov;
total_len = err;
@@ -2303,7 +2274,6 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
out_freeiov:
if (iov != iovstack)
kfree(iov);
-out:
return err;
}
@@ -2311,7 +2281,7 @@ out:
* BSD recvmsg interface
*/
-long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
+long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags)
{
int fput_needed, err;
struct msghdr msg_sys;
@@ -2328,7 +2298,7 @@ out:
return err;
}
-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg,
unsigned int, flags)
{
if (flags & MSG_CMSG_COMPAT)
@@ -2373,7 +2343,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
* No need to ask LSM for more than the first datagram.
*/
if (MSG_CMSG_COMPAT & flags) {
- err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+ err = ___sys_recvmsg(sock, (struct user_msghdr __user *)compat_entry,
&msg_sys, flags & ~MSG_WAITFORONE,
datagrams);
if (err < 0)
@@ -2382,7 +2352,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
++compat_entry;
} else {
err = ___sys_recvmsg(sock,
- (struct msghdr __user *)entry,
+ (struct user_msghdr __user *)entry,
&msg_sys, flags & ~MSG_WAITFORONE,
datagrams);
if (err < 0)
@@ -2571,13 +2541,13 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
(int __user *)a[4]);
break;
case SYS_SENDMSG:
- err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]);
+ err = sys_sendmsg(a0, (struct user_msghdr __user *)a1, a[2]);
break;
case SYS_SENDMMSG:
err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]);
break;
case SYS_RECVMSG:
- err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
+ err = sys_recvmsg(a0, (struct user_msghdr __user *)a1, a[2]);
break;
case SYS_RECVMMSG:
err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3],
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index de856ddf5fed..224a82f24d3c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -886,7 +886,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
u32 priv_len, maj_stat;
int pad, saved_len, remaining_len, offset;
- rqstp->rq_splice_ok = false;
+ clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
priv_len = svc_getnl(&buf->head[0]);
if (rqstp->rq_deferred) {
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 066362141133..33fb105d4352 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/ctype.h>
+#include <linux/string_helpers.h>
#include <asm/uaccess.h>
#include <linux/poll.h>
#include <linux/seq_file.h>
@@ -1067,30 +1068,15 @@ void qword_add(char **bpp, int *lp, char *str)
{
char *bp = *bpp;
int len = *lp;
- char c;
+ int ret;
if (len < 0) return;
- while ((c=*str++) && len)
- switch(c) {
- case ' ':
- case '\t':
- case '\n':
- case '\\':
- if (len >= 4) {
- *bp++ = '\\';
- *bp++ = '0' + ((c & 0300)>>6);
- *bp++ = '0' + ((c & 0070)>>3);
- *bp++ = '0' + ((c & 0007)>>0);
- }
- len -= 4;
- break;
- default:
- *bp++ = c;
- len--;
- }
- if (c || len <1) len = -1;
+ ret = string_escape_str(str, &bp, len, ESCAPE_OCTAL, "\\ \n\t");
+ if (ret < 0 || ret == len)
+ len = -1;
else {
+ len -= ret;
*bp++ = ' ';
len--;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2783fd80c229..91eaef1844c8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -191,7 +191,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
return err;
for_each_online_cpu(cpu) {
- BUG_ON(pidx > maxpools);
+ BUG_ON(pidx >= maxpools);
m->to_pool[cpu] = pidx;
m->pool_to[pidx] = cpu;
pidx++;
@@ -476,15 +476,11 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
i, serv->sv_name);
pool->sp_id = i;
- INIT_LIST_HEAD(&pool->sp_threads);
INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads);
spin_lock_init(&pool->sp_lock);
}
- if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
- serv->sv_shutdown = svc_rpcb_cleanup;
-
return serv;
}
@@ -505,13 +501,15 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
unsigned int npools = svc_pool_map_get();
serv = __svc_create(prog, bufsize, npools, shutdown);
+ if (!serv)
+ goto out_err;
- if (serv != NULL) {
- serv->sv_function = func;
- serv->sv_module = mod;
- }
-
+ serv->sv_function = func;
+ serv->sv_module = mod;
return serv;
+out_err:
+ svc_pool_map_put();
+ return NULL;
}
EXPORT_SYMBOL_GPL(svc_create_pooled);
@@ -615,12 +613,14 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
goto out_enomem;
serv->sv_nrthreads++;
+ __set_bit(RQ_BUSY, &rqstp->rq_flags);
+ spin_lock_init(&rqstp->rq_lock);
+ rqstp->rq_server = serv;
+ rqstp->rq_pool = pool;
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads++;
- list_add(&rqstp->rq_all, &pool->sp_all_threads);
+ list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
spin_unlock_bh(&pool->sp_lock);
- rqstp->rq_server = serv;
- rqstp->rq_pool = pool;
rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_argp)
@@ -685,7 +685,8 @@ found_pool:
* so we don't try to kill it again.
*/
rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
- list_del_init(&rqstp->rq_all);
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
+ list_del_rcu(&rqstp->rq_all);
task = rqstp->rq_task;
}
spin_unlock_bh(&pool->sp_lock);
@@ -783,10 +784,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads--;
- list_del(&rqstp->rq_all);
+ if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
+ list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock);
- kfree(rqstp);
+ kfree_rcu(rqstp, rq_rcu_head);
/* Release the server */
if (serv)
@@ -1086,10 +1088,10 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
goto err_short_len;
/* Will be turned off only in gss privacy case: */
- rqstp->rq_splice_ok = true;
+ set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* Will be turned off only when NFSv4 Sessions are used */
- rqstp->rq_usedeferral = true;
- rqstp->rq_dropme = false;
+ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+ clear_bit(RQ_DROPME, &rqstp->rq_flags);
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1189,7 +1191,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
/* Encode reply */
- if (rqstp->rq_dropme) {
+ if (test_bit(RQ_DROPME, &rqstp->rq_flags)) {
if (procp->pc_release)
procp->pc_release(rqstp, NULL, rqstp->rq_resp);
goto dropit;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bbb3b044b877..c69358b3cf7f 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -220,9 +220,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
*/
static void svc_xprt_received(struct svc_xprt *xprt)
{
- WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags));
- if (!test_bit(XPT_BUSY, &xprt->xpt_flags))
+ if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
+ WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
return;
+ }
+
/* As soon as we clear busy, the xprt could be closed and
* 'put', so we need a reference to call svc_xprt_do_enqueue with:
*/
@@ -310,25 +312,6 @@ char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
}
EXPORT_SYMBOL_GPL(svc_print_addr);
-/*
- * Queue up an idle server thread. Must have pool->sp_lock held.
- * Note: this is really a stack rather than a queue, so that we only
- * use as many different threads as we need, and the rest don't pollute
- * the cache.
- */
-static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
-{
- list_add(&rqstp->rq_list, &pool->sp_threads);
-}
-
-/*
- * Dequeue an nfsd thread. Must have pool->sp_lock held.
- */
-static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
-{
- list_del(&rqstp->rq_list);
-}
-
static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
{
if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
@@ -341,11 +324,12 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
{
struct svc_pool *pool;
- struct svc_rqst *rqstp;
+ struct svc_rqst *rqstp = NULL;
int cpu;
+ bool queued = false;
if (!svc_xprt_has_something_to_do(xprt))
- return;
+ goto out;
/* Mark transport as busy. It will remain in this state until
* the provider calls svc_xprt_received. We update XPT_BUSY
@@ -355,43 +339,69 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
/* Don't enqueue transport while already enqueued */
dprintk("svc: transport %p busy, not enqueued\n", xprt);
- return;
+ goto out;
}
cpu = get_cpu();
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
- spin_lock_bh(&pool->sp_lock);
- pool->sp_stats.packets++;
-
- if (!list_empty(&pool->sp_threads)) {
- rqstp = list_entry(pool->sp_threads.next,
- struct svc_rqst,
- rq_list);
- dprintk("svc: transport %p served by daemon %p\n",
- xprt, rqstp);
- svc_thread_dequeue(pool, rqstp);
- if (rqstp->rq_xprt)
- printk(KERN_ERR
- "svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
- rqstp, rqstp->rq_xprt);
- /* Note the order of the following 3 lines:
- * We want to assign xprt to rqstp->rq_xprt only _after_
- * we've woken up the process, so that we don't race with
- * the lockless check in svc_get_next_xprt().
+ atomic_long_inc(&pool->sp_stats.packets);
+
+redo_search:
+ /* find a thread for this xprt */
+ rcu_read_lock();
+ list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
+ /* Do a lockless check first */
+ if (test_bit(RQ_BUSY, &rqstp->rq_flags))
+ continue;
+
+ /*
+ * Once the xprt has been queued, it can only be dequeued by
+ * the task that intends to service it. All we can do at that
+ * point is to try to wake this thread back up so that it can
+ * do so.
*/
- svc_xprt_get(xprt);
+ if (!queued) {
+ spin_lock_bh(&rqstp->rq_lock);
+ if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
+ /* already busy, move on... */
+ spin_unlock_bh(&rqstp->rq_lock);
+ continue;
+ }
+
+ /* this one will do */
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+ spin_unlock_bh(&rqstp->rq_lock);
+ }
+ rcu_read_unlock();
+
+ atomic_long_inc(&pool->sp_stats.threads_woken);
wake_up_process(rqstp->rq_task);
- rqstp->rq_xprt = xprt;
- pool->sp_stats.threads_woken++;
- } else {
+ put_cpu();
+ goto out;
+ }
+ rcu_read_unlock();
+
+ /*
+ * We didn't find an idle thread to use, so we need to queue the xprt.
+ * Do so and then search again. If we find one, we can't hook this one
+ * up to it directly but we can wake the thread up in the hopes that it
+ * will pick it up once it searches for a xprt to service.
+ */
+ if (!queued) {
+ queued = true;
dprintk("svc: transport %p put into queue\n", xprt);
+ spin_lock_bh(&pool->sp_lock);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++;
+ spin_unlock_bh(&pool->sp_lock);
+ goto redo_search;
}
-
- spin_unlock_bh(&pool->sp_lock);
+ rqstp = NULL;
put_cpu();
+out:
+ trace_svc_xprt_do_enqueue(xprt, rqstp);
}
/*
@@ -408,22 +418,28 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
/*
- * Dequeue the first transport. Must be called with the pool->sp_lock held.
+ * Dequeue the first transport, if there is one.
*/
static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
{
- struct svc_xprt *xprt;
+ struct svc_xprt *xprt = NULL;
if (list_empty(&pool->sp_sockets))
- return NULL;
-
- xprt = list_entry(pool->sp_sockets.next,
- struct svc_xprt, xpt_ready);
- list_del_init(&xprt->xpt_ready);
+ goto out;
- dprintk("svc: transport %p dequeued, inuse=%d\n",
- xprt, atomic_read(&xprt->xpt_ref.refcount));
+ spin_lock_bh(&pool->sp_lock);
+ if (likely(!list_empty(&pool->sp_sockets))) {
+ xprt = list_first_entry(&pool->sp_sockets,
+ struct svc_xprt, xpt_ready);
+ list_del_init(&xprt->xpt_ready);
+ svc_xprt_get(xprt);
+ dprintk("svc: transport %p dequeued, inuse=%d\n",
+ xprt, atomic_read(&xprt->xpt_ref.refcount));
+ }
+ spin_unlock_bh(&pool->sp_lock);
+out:
+ trace_svc_xprt_dequeue(xprt);
return xprt;
}
@@ -484,34 +500,36 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
}
/*
- * External function to wake up a server waiting for data
- * This really only makes sense for services like lockd
- * which have exactly one thread anyway.
+ * Some svc_serv's will have occasional work to do, even when a xprt is not
+ * waiting to be serviced. This function is there to "kick" a task in one of
+ * those services so that it can wake up and do that work. Note that we only
+ * bother with pool 0 as we don't need to wake up more than one thread for
+ * this purpose.
*/
void svc_wake_up(struct svc_serv *serv)
{
struct svc_rqst *rqstp;
- unsigned int i;
struct svc_pool *pool;
- for (i = 0; i < serv->sv_nrpools; i++) {
- pool = &serv->sv_pools[i];
+ pool = &serv->sv_pools[0];
- spin_lock_bh(&pool->sp_lock);
- if (!list_empty(&pool->sp_threads)) {
- rqstp = list_entry(pool->sp_threads.next,
- struct svc_rqst,
- rq_list);
- dprintk("svc: daemon %p woken up.\n", rqstp);
- /*
- svc_thread_dequeue(pool, rqstp);
- rqstp->rq_xprt = NULL;
- */
- wake_up_process(rqstp->rq_task);
- } else
- pool->sp_task_pending = 1;
- spin_unlock_bh(&pool->sp_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
+ /* skip any that aren't queued */
+ if (test_bit(RQ_BUSY, &rqstp->rq_flags))
+ continue;
+ rcu_read_unlock();
+ dprintk("svc: daemon %p woken up.\n", rqstp);
+ wake_up_process(rqstp->rq_task);
+ trace_svc_wake_up(rqstp->rq_task->pid);
+ return;
}
+ rcu_read_unlock();
+
+ /* No free entries available */
+ set_bit(SP_TASK_PENDING, &pool->sp_flags);
+ smp_wmb();
+ trace_svc_wake_up(0);
}
EXPORT_SYMBOL_GPL(svc_wake_up);
@@ -622,75 +640,86 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
return 0;
}
+static bool
+rqst_should_sleep(struct svc_rqst *rqstp)
+{
+ struct svc_pool *pool = rqstp->rq_pool;
+
+ /* did someone call svc_wake_up? */
+ if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
+ return false;
+
+ /* was a socket queued? */
+ if (!list_empty(&pool->sp_sockets))
+ return false;
+
+ /* are we shutting down? */
+ if (signalled() || kthread_should_stop())
+ return false;
+
+ /* are we freezing? */
+ if (freezing(current))
+ return false;
+
+ return true;
+}
+
static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
{
struct svc_xprt *xprt;
struct svc_pool *pool = rqstp->rq_pool;
long time_left = 0;
+ /* rq_xprt should be clear on entry */
+ WARN_ON_ONCE(rqstp->rq_xprt);
+
/* Normally we will wait up to 5 seconds for any required
* cache information to be provided.
*/
rqstp->rq_chandle.thread_wait = 5*HZ;
- spin_lock_bh(&pool->sp_lock);
xprt = svc_xprt_dequeue(pool);
if (xprt) {
rqstp->rq_xprt = xprt;
- svc_xprt_get(xprt);
/* As there is a shortage of threads and this request
* had to be queued, don't allow the thread to wait so
* long for cache updates.
*/
rqstp->rq_chandle.thread_wait = 1*HZ;
- pool->sp_task_pending = 0;
- } else {
- if (pool->sp_task_pending) {
- pool->sp_task_pending = 0;
- xprt = ERR_PTR(-EAGAIN);
- goto out;
- }
- /*
- * We have to be able to interrupt this wait
- * to bring down the daemons ...
- */
- set_current_state(TASK_INTERRUPTIBLE);
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
+ return xprt;
+ }
- /* No data pending. Go to sleep */
- svc_thread_enqueue(pool, rqstp);
- spin_unlock_bh(&pool->sp_lock);
+ /*
+ * We have to be able to interrupt this wait
+ * to bring down the daemons ...
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ clear_bit(RQ_BUSY, &rqstp->rq_flags);
+ smp_mb();
- if (!(signalled() || kthread_should_stop())) {
- time_left = schedule_timeout(timeout);
- __set_current_state(TASK_RUNNING);
+ if (likely(rqst_should_sleep(rqstp)))
+ time_left = schedule_timeout(timeout);
+ else
+ __set_current_state(TASK_RUNNING);
- try_to_freeze();
+ try_to_freeze();
- xprt = rqstp->rq_xprt;
- if (xprt != NULL)
- return xprt;
- } else
- __set_current_state(TASK_RUNNING);
+ spin_lock_bh(&rqstp->rq_lock);
+ set_bit(RQ_BUSY, &rqstp->rq_flags);
+ spin_unlock_bh(&rqstp->rq_lock);
- spin_lock_bh(&pool->sp_lock);
- if (!time_left)
- pool->sp_stats.threads_timedout++;
+ xprt = rqstp->rq_xprt;
+ if (xprt != NULL)
+ return xprt;
- xprt = rqstp->rq_xprt;
- if (!xprt) {
- svc_thread_dequeue(pool, rqstp);
- spin_unlock_bh(&pool->sp_lock);
- dprintk("svc: server %p, no data yet\n", rqstp);
- if (signalled() || kthread_should_stop())
- return ERR_PTR(-EINTR);
- else
- return ERR_PTR(-EAGAIN);
- }
- }
-out:
- spin_unlock_bh(&pool->sp_lock);
- return xprt;
+ if (!time_left)
+ atomic_long_inc(&pool->sp_stats.threads_timedout);
+
+ if (signalled() || kthread_should_stop())
+ return ERR_PTR(-EINTR);
+ return ERR_PTR(-EAGAIN);
}
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -719,7 +748,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
dprintk("svc_recv: found XPT_CLOSE\n");
svc_delete_xprt(xprt);
/* Leave XPT_BUSY set on the dead xprt: */
- return 0;
+ goto out;
}
if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
@@ -750,6 +779,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
}
/* clear XPT_BUSY: */
svc_xprt_received(xprt);
+out:
+ trace_svc_handle_xprt(xprt, len);
return len;
}
@@ -797,7 +828,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
clear_bit(XPT_OLD, &xprt->xpt_flags);
- rqstp->rq_secure = xprt->xpt_ops->xpo_secure_port(rqstp);
+ if (xprt->xpt_ops->xpo_secure_port(rqstp))
+ set_bit(RQ_SECURE, &rqstp->rq_flags);
+ else
+ clear_bit(RQ_SECURE, &rqstp->rq_flags);
rqstp->rq_chandle.defer = svc_defer;
rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
@@ -895,7 +929,6 @@ static void svc_age_temp_xprts(unsigned long closure)
continue;
list_del_init(le);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- set_bit(XPT_DETACHED, &xprt->xpt_flags);
dprintk("queuing xprt %p for closing\n", xprt);
/* a thread will dequeue and close it soon */
@@ -935,8 +968,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
xprt->xpt_ops->xpo_detach(xprt);
spin_lock_bh(&serv->sv_lock);
- if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
- list_del_init(&xprt->xpt_list);
+ list_del_init(&xprt->xpt_list);
WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
@@ -1080,7 +1112,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
struct svc_deferred_req *dr;
- if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
+ if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
return NULL; /* if more than a page, give up FIXME */
if (rqstp->rq_deferred) {
dr = rqstp->rq_deferred;
@@ -1109,7 +1141,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
}
svc_xprt_get(rqstp->rq_xprt);
dr->xprt = rqstp->rq_xprt;
- rqstp->rq_dropme = true;
+ set_bit(RQ_DROPME, &rqstp->rq_flags);
dr->handle.revisit = svc_revisit;
return &dr->handle;
@@ -1311,10 +1343,10 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id,
- pool->sp_stats.packets,
+ (unsigned long)atomic_long_read(&pool->sp_stats.packets),
pool->sp_stats.sockets_queued,
- pool->sp_stats.threads_woken,
- pool->sp_stats.threads_timedout);
+ (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
+ (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
return 0;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f9c052d508f0..cc331b6cf573 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1145,7 +1145,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
- rqstp->rq_local = !!test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags);
+ if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
+ set_bit(RQ_LOCAL, &rqstp->rq_flags);
+ else
+ clear_bit(RQ_LOCAL, &rqstp->rq_flags);
p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
calldir = p[1];
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 290af97bf6f9..1cb61242e55e 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -617,9 +617,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
fraglen = min_t(int, buf->len - len, tail->iov_len);
tail->iov_len -= fraglen;
buf->len -= fraglen;
- if (tail->iov_len && buf->len == len) {
+ if (tail->iov_len) {
xdr->p = tail->iov_base + tail->iov_len;
- /* xdr->end, xdr->iov should be set already */
+ WARN_ON_ONCE(!xdr->end);
+ WARN_ON_ONCE(!xdr->iov);
return;
}
WARN_ON_ONCE(fraglen);
@@ -631,11 +632,11 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
old = new + fraglen;
xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
- if (buf->page_len && buf->len == len) {
+ if (buf->page_len) {
xdr->p = page_address(*xdr->page_ptr);
xdr->end = (void *)xdr->p + PAGE_SIZE;
xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
- /* xdr->iov should already be NULL */
+ WARN_ON_ONCE(xdr->iov);
return;
}
if (fraglen) {
diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig
new file mode 100644
index 000000000000..155754588fd6
--- /dev/null
+++ b/net/switchdev/Kconfig
@@ -0,0 +1,13 @@
+#
+# Configuration for Switch device support
+#
+
+config NET_SWITCHDEV
+ boolean "Switch (and switch-ish) device support (EXPERIMENTAL)"
+ depends on INET
+ ---help---
+ This module provides glue between core networking code and device
+ drivers in order to support hardware switch chips in very generic
+ meaning of the word "switch". This include devices supporting L2/L3 but
+ also various flow offloading chips, including switches embedded into
+ SR-IOV NICs.
diff --git a/net/switchdev/Makefile b/net/switchdev/Makefile
new file mode 100644
index 000000000000..5ed63ed324d0
--- /dev/null
+++ b/net/switchdev/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Switch device API
+#
+
+obj-$(CONFIG_NET_SWITCHDEV) += switchdev.o
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
new file mode 100644
index 000000000000..d162b21b14bd
--- /dev/null
+++ b/net/switchdev/switchdev.c
@@ -0,0 +1,52 @@
+/*
+ * net/switchdev/switchdev.c - Switch device API
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <net/switchdev.h>
+
+/**
+ * netdev_switch_parent_id_get - Get ID of a switch
+ * @dev: port device
+ * @psid: switch ID
+ *
+ * Get ID of a switch this port is part of.
+ */
+int netdev_switch_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_switch_parent_id_get)
+ return -EOPNOTSUPP;
+ return ops->ndo_switch_parent_id_get(dev, psid);
+}
+EXPORT_SYMBOL(netdev_switch_parent_id_get);
+
+/**
+ * netdev_switch_port_stp_update - Notify switch device port of STP
+ * state change
+ * @dev: port device
+ * @state: port STP state
+ *
+ * Notify switch device port of bridge port STP state change.
+ */
+int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_switch_port_stp_update)
+ return -EOPNOTSUPP;
+ WARN_ON(!ops->ndo_switch_parent_id_get);
+ return ops->ndo_switch_port_stp_update(dev, state);
+}
+EXPORT_SYMBOL(netdev_switch_port_stp_update);
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index b8a13caad59a..333e4592772c 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -7,8 +7,8 @@ obj-$(CONFIG_TIPC) := tipc.o
tipc-y += addr.o bcast.o bearer.o config.o \
core.o link.o discover.o msg.o \
name_distr.o subscr.o name_table.o net.o \
- netlink.o node.o node_subscr.o \
- socket.o log.o eth_media.o server.o
+ netlink.o node.o socket.o log.o eth_media.o \
+ server.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index b8670bf262e2..96ceefeb9daf 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -217,12 +217,13 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
*/
static void bclink_retransmit_pkt(u32 after, u32 to)
{
- struct sk_buff *buf;
+ struct sk_buff *skb;
- buf = bcl->first_out;
- while (buf && less_eq(buf_seqno(buf), after))
- buf = buf->next;
- tipc_link_retransmit(bcl, buf, mod(to - after));
+ skb_queue_walk(&bcl->outqueue, skb) {
+ if (more(buf_seqno(skb), after))
+ break;
+ }
+ tipc_link_retransmit(bcl, skb, mod(to - after));
}
/**
@@ -232,8 +233,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
*/
void tipc_bclink_wakeup_users(void)
{
- while (skb_queue_len(&bclink->link.waiting_sks))
- tipc_sk_rcv(skb_dequeue(&bclink->link.waiting_sks));
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
+ tipc_sk_rcv(skb);
+
}
/**
@@ -245,14 +249,14 @@ void tipc_bclink_wakeup_users(void)
*/
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
{
- struct sk_buff *crs;
+ struct sk_buff *skb, *tmp;
struct sk_buff *next;
unsigned int released = 0;
tipc_bclink_lock();
/* Bail out if tx queue is empty (no clean up is required) */
- crs = bcl->first_out;
- if (!crs)
+ skb = skb_peek(&bcl->outqueue);
+ if (!skb)
goto exit;
/* Determine which messages need to be acknowledged */
@@ -271,43 +275,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
* Bail out if specified sequence number does not correspond
* to a message that has been sent and not yet acknowledged
*/
- if (less(acked, buf_seqno(crs)) ||
+ if (less(acked, buf_seqno(skb)) ||
less(bcl->fsm_msg_cnt, acked) ||
less_eq(acked, n_ptr->bclink.acked))
goto exit;
}
/* Skip over packets that node has previously acknowledged */
- while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
- crs = crs->next;
+ skb_queue_walk(&bcl->outqueue, skb) {
+ if (more(buf_seqno(skb), n_ptr->bclink.acked))
+ break;
+ }
/* Update packets that node is now acknowledging */
+ skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
+ if (more(buf_seqno(skb), acked))
+ break;
- while (crs && less_eq(buf_seqno(crs), acked)) {
- next = crs->next;
-
- if (crs != bcl->next_out)
- bcbuf_decr_acks(crs);
- else {
- bcbuf_set_acks(crs, 0);
+ next = tipc_skb_queue_next(&bcl->outqueue, skb);
+ if (skb != bcl->next_out) {
+ bcbuf_decr_acks(skb);
+ } else {
+ bcbuf_set_acks(skb, 0);
bcl->next_out = next;
bclink_set_last_sent();
}
- if (bcbuf_acks(crs) == 0) {
- bcl->first_out = next;
- bcl->out_queue_size--;
- kfree_skb(crs);
+ if (bcbuf_acks(skb) == 0) {
+ __skb_unlink(skb, &bcl->outqueue);
+ kfree_skb(skb);
released = 1;
}
- crs = next;
}
n_ptr->bclink.acked = acked;
/* Try resolving broadcast link congestion, if necessary */
-
if (unlikely(bcl->next_out)) {
- tipc_link_push_queue(bcl);
+ tipc_link_push_packets(bcl);
bclink_set_last_sent();
}
if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
@@ -327,19 +331,16 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
struct sk_buff *buf;
/* Ignore "stale" link state info */
-
if (less_eq(last_sent, n_ptr->bclink.last_in))
return;
/* Update link synchronization state; quit if in sync */
-
bclink_update_last_sent(n_ptr, last_sent);
if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
return;
/* Update out-of-sync state; quit if loss is still unconfirmed */
-
if ((++n_ptr->bclink.oos_state) == 1) {
if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
return;
@@ -347,15 +348,15 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
}
/* Don't NACK if one has been recently sent (or seen) */
-
if (n_ptr->bclink.oos_state & 0x1)
return;
/* Send NACK */
-
buf = tipc_buf_acquire(INT_H_SIZE);
if (buf) {
struct tipc_msg *msg = buf_msg(buf);
+ struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
+ u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
INT_H_SIZE, n_ptr->addr);
@@ -363,9 +364,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
msg_set_mc_netid(msg, tipc_net_id);
msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
- msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
- ? buf_seqno(n_ptr->bclink.deferred_head) - 1
- : n_ptr->bclink.last_sent);
+ msg_set_bcgap_to(msg, to);
tipc_bclink_lock();
tipc_bearer_send(MAX_BEARERS, buf, NULL);
@@ -402,20 +401,20 @@ static void bclink_peek_nack(struct tipc_msg *msg)
/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
* and to identified node local sockets
- * @buf: chain of buffers containing message
+ * @list: chain of buffers containing message
* Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/
-int tipc_bclink_xmit(struct sk_buff *buf)
+int tipc_bclink_xmit(struct sk_buff_head *list)
{
int rc = 0;
int bc = 0;
- struct sk_buff *clbuf;
+ struct sk_buff *skb;
/* Prepare clone of message for local node */
- clbuf = tipc_msg_reassemble(buf);
- if (unlikely(!clbuf)) {
- kfree_skb_list(buf);
+ skb = tipc_msg_reassemble(list);
+ if (unlikely(!skb)) {
+ __skb_queue_purge(list);
return -EHOSTUNREACH;
}
@@ -423,11 +422,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
if (likely(bclink)) {
tipc_bclink_lock();
if (likely(bclink->bcast_nodes.count)) {
- rc = __tipc_link_xmit(bcl, buf);
+ rc = __tipc_link_xmit(bcl, list);
if (likely(!rc)) {
+ u32 len = skb_queue_len(&bcl->outqueue);
+
bclink_set_last_sent();
bcl->stats.queue_sz_counts++;
- bcl->stats.accu_queue_sz += bcl->out_queue_size;
+ bcl->stats.accu_queue_sz += len;
}
bc = 1;
}
@@ -435,13 +436,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
}
if (unlikely(!bc))
- kfree_skb_list(buf);
+ __skb_queue_purge(list);
/* Deliver message clone */
if (likely(!rc))
- tipc_sk_mcast_rcv(clbuf);
+ tipc_sk_mcast_rcv(skb);
else
- kfree_skb(clbuf);
+ kfree_skb(skb);
return rc;
}
@@ -462,7 +463,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
* Unicast an ACK periodically, ensuring that
* all nodes in the cluster don't ACK at the same time
*/
-
if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
tipc_link_proto_xmit(node->active_links[node->addr & 1],
STATE_MSG, 0, 0, 0, 0, 0);
@@ -484,7 +484,6 @@ void tipc_bclink_rcv(struct sk_buff *buf)
int deferred = 0;
/* Screen out unwanted broadcast messages */
-
if (msg_mc_netid(msg) != tipc_net_id)
goto exit;
@@ -497,7 +496,6 @@ void tipc_bclink_rcv(struct sk_buff *buf)
goto unlock;
/* Handle broadcast protocol message */
-
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
if (msg_type(msg) != STATE_MSG)
goto unlock;
@@ -518,14 +516,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
}
/* Handle in-sequence broadcast message */
-
seqno = msg_seqno(msg);
next_in = mod(node->bclink.last_in + 1);
if (likely(seqno == next_in)) {
receive:
/* Deliver message to destination */
-
if (likely(msg_isdata(msg))) {
tipc_bclink_lock();
bclink_accept_pkt(node, seqno);
@@ -574,7 +570,6 @@ receive:
buf = NULL;
/* Determine new synchronization state */
-
tipc_node_lock(node);
if (unlikely(!tipc_node_is_up(node)))
goto unlock;
@@ -582,33 +577,26 @@ receive:
if (node->bclink.last_in == node->bclink.last_sent)
goto unlock;
- if (!node->bclink.deferred_head) {
+ if (skb_queue_empty(&node->bclink.deferred_queue)) {
node->bclink.oos_state = 1;
goto unlock;
}
- msg = buf_msg(node->bclink.deferred_head);
+ msg = buf_msg(skb_peek(&node->bclink.deferred_queue));
seqno = msg_seqno(msg);
next_in = mod(next_in + 1);
if (seqno != next_in)
goto unlock;
/* Take in-sequence message from deferred queue & deliver it */
-
- buf = node->bclink.deferred_head;
- node->bclink.deferred_head = buf->next;
- buf->next = NULL;
- node->bclink.deferred_size--;
+ buf = __skb_dequeue(&node->bclink.deferred_queue);
goto receive;
}
/* Handle out-of-sequence broadcast message */
-
if (less(next_in, seqno)) {
- deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
- &node->bclink.deferred_tail,
+ deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue,
buf);
- node->bclink.deferred_size += deferred;
bclink_update_last_sent(node, seqno);
buf = NULL;
}
@@ -767,6 +755,118 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
tipc_bclink_unlock();
}
+static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
+ struct tipc_stats *stats)
+{
+ int i;
+ struct nlattr *nest;
+
+ struct nla_map {
+ __u32 key;
+ __u32 val;
+ };
+
+ struct nla_map map[] = {
+ {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
+ {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
+ {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
+ {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
+ {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
+ {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
+ {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
+ {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
+ {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
+ {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
+ {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
+ {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
+ {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
+ {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
+ {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
+ {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
+ {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
+ {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
+ {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
+ (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
+ };
+
+ nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
+ if (!nest)
+ return -EMSGSIZE;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (nla_put_u32(skb, map[i].key, map[i].val))
+ goto msg_full;
+
+ nla_nest_end(skb, nest);
+
+ return 0;
+msg_full:
+ nla_nest_cancel(skb, nest);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
+{
+ int err;
+ void *hdr;
+ struct nlattr *attrs;
+ struct nlattr *prop;
+
+ if (!bcl)
+ return 0;
+
+ tipc_bclink_lock();
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ NLM_F_MULTI, TIPC_NL_LINK_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
+ if (!attrs)
+ goto msg_full;
+
+ /* The broadcast link is always up */
+ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
+ goto attr_msg_full;
+
+ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
+ goto attr_msg_full;
+ if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
+ goto attr_msg_full;
+
+ prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
+ if (!prop)
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0]))
+ goto prop_msg_full;
+ nla_nest_end(msg->skb, prop);
+
+ err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
+ if (err)
+ goto attr_msg_full;
+
+ tipc_bclink_unlock();
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+prop_msg_full:
+ nla_nest_cancel(msg->skb, prop);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ tipc_bclink_unlock();
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
int tipc_bclink_stats(char *buf, const u32 buf_size)
{
@@ -851,7 +951,9 @@ int tipc_bclink_init(void)
sprintf(bcbearer->media.name, "tipc-broadcast");
spin_lock_init(&bclink->lock);
- __skb_queue_head_init(&bcl->waiting_sks);
+ __skb_queue_head_init(&bcl->outqueue);
+ __skb_queue_head_init(&bcl->deferred_queue);
+ skb_queue_head_init(&bcl->waiting_sks);
bcl->next_out_no = 1;
spin_lock_init(&bclink->node.lock);
__skb_queue_head_init(&bclink->node.waiting_sks);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index e7b0f85a82bc..644d79129fba 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -37,6 +37,8 @@
#ifndef _TIPC_BCAST_H
#define _TIPC_BCAST_H
+#include "netlink.h"
+
#define MAX_NODES 4096
#define WSIZE 32
#define TIPC_BCLINK_RESET 1
@@ -98,6 +100,8 @@ int tipc_bclink_reset_stats(void);
int tipc_bclink_set_queue_limits(u32 limit);
void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
uint tipc_bclink_get_mtu(void);
-int tipc_bclink_xmit(struct sk_buff *buf);
+int tipc_bclink_xmit(struct sk_buff_head *list);
void tipc_bclink_wakeup_users(void);
+int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
+
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 264474394f9f..463db5b15b8b 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1,7 +1,7 @@
/*
* net/tipc/bearer.c: TIPC bearer code
*
- * Copyright (c) 1996-2006, 2013, Ericsson AB
+ * Copyright (c) 1996-2006, 2013-2014, Ericsson AB
* Copyright (c) 2004-2006, 2010-2013, Wind River Systems
* All rights reserved.
*
@@ -37,6 +37,7 @@
#include "core.h"
#include "config.h"
#include "bearer.h"
+#include "link.h"
#include "discover.h"
#define MAX_ADDR_STR 60
@@ -49,6 +50,23 @@ static struct tipc_media * const media_info_array[] = {
NULL
};
+static const struct nla_policy
+tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
+ [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_BEARER_NAME] = {
+ .type = NLA_STRING,
+ .len = TIPC_MAX_BEARER_NAME
+ },
+ [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
+};
+
+static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
+ [TIPC_NLA_MEDIA_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_MEDIA_NAME] = { .type = NLA_STRING },
+ [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
+};
+
struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
@@ -627,3 +645,430 @@ void tipc_bearer_stop(void)
}
}
}
+
+/* Caller should hold rtnl_lock to protect the bearer */
+static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
+ struct tipc_bearer *bearer)
+{
+ void *hdr;
+ struct nlattr *attrs;
+ struct nlattr *prop;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ NLM_F_MULTI, TIPC_NL_BEARER_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_BEARER);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_string(msg->skb, TIPC_NLA_BEARER_NAME, bearer->name))
+ goto attr_msg_full;
+
+ prop = nla_nest_start(msg->skb, TIPC_NLA_BEARER_PROP);
+ if (!prop)
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, bearer->priority))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, bearer->tolerance))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->window))
+ goto prop_msg_full;
+
+ nla_nest_end(msg->skb, prop);
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+prop_msg_full:
+ nla_nest_cancel(msg->skb, prop);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ int i = cb->args[0];
+ struct tipc_bearer *bearer;
+ struct tipc_nl_msg msg;
+
+ if (i == MAX_BEARERS)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rtnl_lock();
+ for (i = 0; i < MAX_BEARERS; i++) {
+ bearer = rtnl_dereference(bearer_list[i]);
+ if (!bearer)
+ continue;
+
+ err = __tipc_nl_add_bearer(&msg, bearer);
+ if (err)
+ break;
+ }
+ rtnl_unlock();
+
+ cb->args[0] = i;
+ return skb->len;
+}
+
+int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct sk_buff *rep;
+ struct tipc_bearer *bearer;
+ struct tipc_nl_msg msg;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ msg.skb = rep;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ rtnl_lock();
+ bearer = tipc_bearer_find(name);
+ if (!bearer) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = __tipc_nl_add_bearer(&msg, bearer);
+ if (err)
+ goto err_out;
+ rtnl_unlock();
+
+ return genlmsg_reply(rep, info);
+err_out:
+ rtnl_unlock();
+ nlmsg_free(rep);
+
+ return err;
+}
+
+int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_bearer *bearer;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ rtnl_lock();
+ bearer = tipc_bearer_find(name);
+ if (!bearer) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ bearer_disable(bearer, false);
+ rtnl_unlock();
+
+ return 0;
+}
+
+int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *bearer;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ u32 domain;
+ u32 prio;
+
+ prio = TIPC_MEDIA_LINK_PRI;
+ domain = tipc_own_addr & TIPC_CLUSTER_MASK;
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+
+ bearer = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ if (attrs[TIPC_NLA_BEARER_DOMAIN])
+ domain = nla_get_u32(attrs[TIPC_NLA_BEARER_DOMAIN]);
+
+ if (attrs[TIPC_NLA_BEARER_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
+ props);
+ if (err)
+ return err;
+
+ if (props[TIPC_NLA_PROP_PRIO])
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ }
+
+ rtnl_lock();
+ err = tipc_enable_bearer(bearer, domain, prio);
+ if (err) {
+ rtnl_unlock();
+ return err;
+ }
+ rtnl_unlock();
+
+ return 0;
+}
+
+int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_bearer *b;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ rtnl_lock();
+ b = tipc_bearer_find(name);
+ if (!b) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (attrs[TIPC_NLA_BEARER_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
+ props);
+ if (err) {
+ rtnl_unlock();
+ return err;
+ }
+
+ if (props[TIPC_NLA_PROP_TOL])
+ b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ if (props[TIPC_NLA_PROP_PRIO])
+ b->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ if (props[TIPC_NLA_PROP_WIN])
+ b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ }
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
+ struct tipc_media *media)
+{
+ void *hdr;
+ struct nlattr *attrs;
+ struct nlattr *prop;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ NLM_F_MULTI, TIPC_NL_MEDIA_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_MEDIA);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_string(msg->skb, TIPC_NLA_MEDIA_NAME, media->name))
+ goto attr_msg_full;
+
+ prop = nla_nest_start(msg->skb, TIPC_NLA_MEDIA_PROP);
+ if (!prop)
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, media->priority))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, media->tolerance))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->window))
+ goto prop_msg_full;
+
+ nla_nest_end(msg->skb, prop);
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+prop_msg_full:
+ nla_nest_cancel(msg->skb, prop);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ int i = cb->args[0];
+ struct tipc_nl_msg msg;
+
+ if (i == MAX_MEDIA)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rtnl_lock();
+ for (; media_info_array[i] != NULL; i++) {
+ err = __tipc_nl_add_media(&msg, media_info_array[i]);
+ if (err)
+ break;
+ }
+ rtnl_unlock();
+
+ cb->args[0] = i;
+ return skb->len;
+}
+
+int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_nl_msg msg;
+ struct tipc_media *media;
+ struct sk_buff *rep;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_MEDIA])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX,
+ info->attrs[TIPC_NLA_MEDIA],
+ tipc_nl_media_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_MEDIA_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
+
+ rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ msg.skb = rep;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ rtnl_lock();
+ media = tipc_media_find(name);
+ if (!media) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = __tipc_nl_add_media(&msg, media);
+ if (err)
+ goto err_out;
+ rtnl_unlock();
+
+ return genlmsg_reply(rep, info);
+err_out:
+ rtnl_unlock();
+ nlmsg_free(rep);
+
+ return err;
+}
+
+int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_media *m;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_MEDIA])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX,
+ info->attrs[TIPC_NLA_MEDIA],
+ tipc_nl_media_policy);
+
+ if (!attrs[TIPC_NLA_MEDIA_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
+
+ rtnl_lock();
+ m = tipc_media_find(name);
+ if (!m) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (attrs[TIPC_NLA_MEDIA_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
+ props);
+ if (err) {
+ rtnl_unlock();
+ return err;
+ }
+
+ if (props[TIPC_NLA_PROP_TOL])
+ m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ if (props[TIPC_NLA_PROP_PRIO])
+ m->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ if (props[TIPC_NLA_PROP_WIN])
+ m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ }
+ rtnl_unlock();
+
+ return 0;
+}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 78fccc49de23..2c1230ac5dfe 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -1,7 +1,7 @@
/*
* net/tipc/bearer.h: Include file for TIPC bearer code
*
- * Copyright (c) 1996-2006, 2013, Ericsson AB
+ * Copyright (c) 1996-2006, 2013-2014, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -38,6 +38,8 @@
#define _TIPC_BEARER_H
#include "bcast.h"
+#include "netlink.h"
+#include <net/genetlink.h>
#define MAX_BEARERS 2
#define MAX_MEDIA 2
@@ -163,7 +165,7 @@ extern struct tipc_bearer __rcu *bearer_list[];
* TIPC routines available to supported media types
*/
-void tipc_rcv(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
+void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr);
int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
int tipc_disable_bearer(const char *name);
@@ -176,6 +178,16 @@ extern struct tipc_media eth_media_info;
extern struct tipc_media ib_media_info;
#endif
+int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
+
+int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
+
int tipc_media_set_priority(const char *name, u32 new_value);
int tipc_media_set_window(const char *name, u32 new_value);
void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index f773b148722f..84602137ce20 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -41,6 +41,7 @@
#include <linux/tipc.h>
#include <linux/tipc_config.h>
+#include <linux/tipc_netlink.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -191,6 +192,7 @@ struct tipc_skb_cb {
struct sk_buff *tail;
bool deferred;
bool wakeup_pending;
+ bool bundling;
u16 chain_sz;
u16 chain_imp;
};
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1db162aa64a5..23bcc1132365 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -36,10 +36,12 @@
#include "core.h"
#include "link.h"
+#include "bcast.h"
#include "socket.h"
#include "name_distr.h"
#include "discover.h"
#include "config.h"
+#include "netlink.h"
#include <linux/pkt_sched.h>
@@ -50,6 +52,30 @@ static const char *link_co_err = "Link changeover error, ";
static const char *link_rst_msg = "Resetting link ";
static const char *link_unk_evt = "Unknown link event ";
+static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_LINK_NAME] = {
+ .type = NLA_STRING,
+ .len = TIPC_MAX_LINK_NAME
+ },
+ [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
+ [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
+};
+
+/* Properties valid for media, bearar and link */
+static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
+ [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
+};
+
/*
* Out-of-range value for link session numbers
*/
@@ -123,18 +149,6 @@ static void link_init_max_pkt(struct tipc_link *l_ptr)
l_ptr->max_pkt_probes = 0;
}
-static u32 link_next_sent(struct tipc_link *l_ptr)
-{
- if (l_ptr->next_out)
- return buf_seqno(l_ptr->next_out);
- return mod(l_ptr->next_out_no);
-}
-
-static u32 link_last_sent(struct tipc_link *l_ptr)
-{
- return mod(link_next_sent(l_ptr) - 1);
-}
-
/*
* Simple non-static link routines (i.e. referenced outside this file)
*/
@@ -157,14 +171,17 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
*/
static void link_timeout(struct tipc_link *l_ptr)
{
+ struct sk_buff *skb;
+
tipc_node_lock(l_ptr->owner);
/* update counters used in statistical profiling of send traffic */
- l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
+ l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
l_ptr->stats.queue_sz_counts++;
- if (l_ptr->first_out) {
- struct tipc_msg *msg = buf_msg(l_ptr->first_out);
+ skb = skb_peek(&l_ptr->outqueue);
+ if (skb) {
+ struct tipc_msg *msg = buf_msg(skb);
u32 length = msg_size(msg);
if ((msg_user(msg) == MSG_FRAGMENTER) &&
@@ -192,11 +209,10 @@ static void link_timeout(struct tipc_link *l_ptr)
}
/* do all other link processing performed on a periodic basis */
-
link_state_event(l_ptr, TIMEOUT_EVT);
if (l_ptr->next_out)
- tipc_link_push_queue(l_ptr);
+ tipc_link_push_packets(l_ptr);
tipc_node_unlock(l_ptr->owner);
}
@@ -224,9 +240,10 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
char addr_string[16];
u32 peer = n_ptr->addr;
- if (n_ptr->link_cnt >= 2) {
+ if (n_ptr->link_cnt >= MAX_BEARERS) {
tipc_addr_string_fill(addr_string, n_ptr->addr);
- pr_err("Attempt to establish third link to %s\n", addr_string);
+ pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
+ n_ptr->link_cnt, addr_string, MAX_BEARERS);
return NULL;
}
@@ -274,7 +291,9 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
link_init_max_pkt(l_ptr);
l_ptr->next_out_no = 1;
- __skb_queue_head_init(&l_ptr->waiting_sks);
+ __skb_queue_head_init(&l_ptr->outqueue);
+ __skb_queue_head_init(&l_ptr->deferred_queue);
+ skb_queue_head_init(&l_ptr->waiting_sks);
link_reset_statistics(l_ptr);
@@ -339,7 +358,7 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
return false;
TIPC_SKB_CB(buf)->chain_sz = chain_sz;
TIPC_SKB_CB(buf)->chain_imp = imp;
- __skb_queue_tail(&link->waiting_sks, buf);
+ skb_queue_tail(&link->waiting_sks, buf);
link->stats.link_congs++;
return true;
}
@@ -352,30 +371,19 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
*/
static void link_prepare_wakeup(struct tipc_link *link)
{
- struct sk_buff_head *wq = &link->waiting_sks;
- struct sk_buff *buf;
- uint pend_qsz = link->out_queue_size;
+ uint pend_qsz = skb_queue_len(&link->outqueue);
+ struct sk_buff *skb, *tmp;
- for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) {
- if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp])
+ skb_queue_walk_safe(&link->waiting_sks, skb, tmp) {
+ if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
break;
- pend_qsz += TIPC_SKB_CB(buf)->chain_sz;
- __skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq));
+ pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
+ skb_unlink(skb, &link->waiting_sks);
+ skb_queue_tail(&link->owner->waiting_sks, skb);
}
}
/**
- * link_release_outqueue - purge link's outbound message queue
- * @l_ptr: pointer to link
- */
-static void link_release_outqueue(struct tipc_link *l_ptr)
-{
- kfree_skb_list(l_ptr->first_out);
- l_ptr->first_out = NULL;
- l_ptr->out_queue_size = 0;
-}
-
-/**
* tipc_link_reset_fragments - purge link's inbound message fragments queue
* @l_ptr: pointer to link
*/
@@ -391,11 +399,9 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
*/
void tipc_link_purge_queues(struct tipc_link *l_ptr)
{
- kfree_skb_list(l_ptr->oldest_deferred_in);
- kfree_skb_list(l_ptr->first_out);
+ __skb_queue_purge(&l_ptr->deferred_queue);
+ __skb_queue_purge(&l_ptr->outqueue);
tipc_link_reset_fragments(l_ptr);
- kfree_skb(l_ptr->proto_msg_queue);
- l_ptr->proto_msg_queue = NULL;
}
void tipc_link_reset(struct tipc_link *l_ptr)
@@ -427,25 +433,16 @@ void tipc_link_reset(struct tipc_link *l_ptr)
}
/* Clean up all queues: */
- link_release_outqueue(l_ptr);
- kfree_skb(l_ptr->proto_msg_queue);
- l_ptr->proto_msg_queue = NULL;
- kfree_skb_list(l_ptr->oldest_deferred_in);
+ __skb_queue_purge(&l_ptr->outqueue);
+ __skb_queue_purge(&l_ptr->deferred_queue);
if (!skb_queue_empty(&l_ptr->waiting_sks)) {
skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
owner->action_flags |= TIPC_WAKEUP_USERS;
}
- l_ptr->retransm_queue_head = 0;
- l_ptr->retransm_queue_size = 0;
- l_ptr->last_out = NULL;
- l_ptr->first_out = NULL;
l_ptr->next_out = NULL;
l_ptr->unacked_window = 0;
l_ptr->checkpoint = 1;
l_ptr->next_out_no = 1;
- l_ptr->deferred_inqueue_sz = 0;
- l_ptr->oldest_deferred_in = NULL;
- l_ptr->newest_deferred_in = NULL;
l_ptr->fsm_msg_cnt = 0;
l_ptr->stale_count = 0;
link_reset_statistics(l_ptr);
@@ -667,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
* - For all other messages we discard the buffer and return -EHOSTUNREACH
* - For TIPC internal messages we also reset the link
*/
-static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
+static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
{
- struct tipc_msg *msg = buf_msg(buf);
+ struct sk_buff *skb = skb_peek(list);
+ struct tipc_msg *msg = buf_msg(skb);
uint imp = tipc_msg_tot_importance(msg);
u32 oport = msg_tot_origport(msg);
@@ -682,30 +680,30 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
goto drop;
if (unlikely(msg_reroute_cnt(msg)))
goto drop;
- if (TIPC_SKB_CB(buf)->wakeup_pending)
+ if (TIPC_SKB_CB(skb)->wakeup_pending)
return -ELINKCONG;
- if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp))
+ if (link_schedule_user(link, oport, skb_queue_len(list), imp))
return -ELINKCONG;
drop:
- kfree_skb_list(buf);
+ __skb_queue_purge(list);
return -EHOSTUNREACH;
}
/**
* __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
* @link: link to use
- * @buf: chain of buffers containing message
+ * @list: chain of buffers containing message
+ *
* Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
* user data messages) or -EHOSTUNREACH (all other messages/senders)
* Only the socket functions tipc_send_stream() and tipc_send_packet() need
* to act on the return value, since they may need to do more send attempts.
*/
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
+int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
{
- struct tipc_msg *msg = buf_msg(buf);
+ struct tipc_msg *msg = buf_msg(skb_peek(list));
uint psz = msg_size(msg);
- uint qsz = link->out_queue_size;
uint sndlim = link->queue_limit[0];
uint imp = tipc_msg_tot_importance(msg);
uint mtu = link->max_pkt;
@@ -713,71 +711,83 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
uint seqno = link->next_out_no;
uint bc_last_in = link->owner->bclink.last_in;
struct tipc_media_addr *addr = &link->media_addr;
- struct sk_buff *next = buf->next;
+ struct sk_buff_head *outqueue = &link->outqueue;
+ struct sk_buff *skb, *tmp;
/* Match queue limits against msg importance: */
- if (unlikely(qsz >= link->queue_limit[imp]))
- return tipc_link_cong(link, buf);
+ if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
+ return tipc_link_cong(link, list);
/* Has valid packet limit been used ? */
if (unlikely(psz > mtu)) {
- kfree_skb_list(buf);
+ __skb_queue_purge(list);
return -EMSGSIZE;
}
/* Prepare each packet for sending, and add to outqueue: */
- while (buf) {
- next = buf->next;
- msg = buf_msg(buf);
+ skb_queue_walk_safe(list, skb, tmp) {
+ __skb_unlink(skb, list);
+ msg = buf_msg(skb);
msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
msg_set_bcast_ack(msg, bc_last_in);
- if (!link->first_out) {
- link->first_out = buf;
- } else if (qsz < sndlim) {
- link->last_out->next = buf;
- } else if (tipc_msg_bundle(link->last_out, buf, mtu)) {
+ if (skb_queue_len(outqueue) < sndlim) {
+ __skb_queue_tail(outqueue, skb);
+ tipc_bearer_send(link->bearer_id, skb, addr);
+ link->next_out = NULL;
+ link->unacked_window = 0;
+ } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
link->stats.sent_bundled++;
- buf = next;
- next = buf->next;
continue;
- } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) {
+ } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
+ link->addr)) {
link->stats.sent_bundled++;
link->stats.sent_bundles++;
- link->last_out->next = buf;
if (!link->next_out)
- link->next_out = buf;
+ link->next_out = skb_peek_tail(outqueue);
} else {
- link->last_out->next = buf;
+ __skb_queue_tail(outqueue, skb);
if (!link->next_out)
- link->next_out = buf;
- }
-
- /* Send packet if possible: */
- if (likely(++qsz <= sndlim)) {
- tipc_bearer_send(link->bearer_id, buf, addr);
- link->next_out = next;
- link->unacked_window = 0;
+ link->next_out = skb;
}
seqno++;
- link->last_out = buf;
- buf = next;
}
link->next_out_no = seqno;
- link->out_queue_size = qsz;
return 0;
}
+static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ __skb_queue_head_init(list);
+ __skb_queue_tail(list, skb);
+}
+
+static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
+{
+ struct sk_buff_head head;
+
+ skb2list(skb, &head);
+ return __tipc_link_xmit(link, &head);
+}
+
+int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
+{
+ struct sk_buff_head head;
+
+ skb2list(skb, &head);
+ return tipc_link_xmit(&head, dnode, selector);
+}
+
/**
* tipc_link_xmit() is the general link level function for message sending
- * @buf: chain of buffers containing message
+ * @list: chain of buffers containing message
* @dsz: amount of user data to be sent
* @dnode: address of destination node
* @selector: a number used for deterministic link selection
* Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/
-int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
+int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
{
struct tipc_link *link = NULL;
struct tipc_node *node;
@@ -788,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
tipc_node_lock(node);
link = node->active_links[selector & 1];
if (link)
- rc = __tipc_link_xmit(link, buf);
+ rc = __tipc_link_xmit(link, list);
tipc_node_unlock(node);
}
if (link)
return rc;
- if (likely(in_own_node(dnode)))
- return tipc_sk_rcv(buf);
+ if (likely(in_own_node(dnode))) {
+ /* As a node local message chain never contains more than one
+ * buffer, we just need to dequeue one SKB buffer from the
+ * head list.
+ */
+ return tipc_sk_rcv(__skb_dequeue(list));
+ }
+ __skb_queue_purge(list);
- kfree_skb_list(buf);
return rc;
}
@@ -812,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
*/
static void tipc_link_sync_xmit(struct tipc_link *link)
{
- struct sk_buff *buf;
+ struct sk_buff *skb;
struct tipc_msg *msg;
- buf = tipc_buf_acquire(INT_H_SIZE);
- if (!buf)
+ skb = tipc_buf_acquire(INT_H_SIZE);
+ if (!skb)
return;
- msg = buf_msg(buf);
+ msg = buf_msg(skb);
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
msg_set_last_bcast(msg, link->owner->bclink.acked);
- __tipc_link_xmit(link, buf);
+ __tipc_link_xmit_skb(link, skb);
}
/*
@@ -842,85 +857,46 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
kfree_skb(buf);
}
+struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ if (skb_queue_is_last(list, skb))
+ return NULL;
+ return skb->next;
+}
+
/*
- * tipc_link_push_packet: Push one unsent packet to the media
+ * tipc_link_push_packets - push unsent packets to bearer
+ *
+ * Push out the unsent messages of a link where congestion
+ * has abated. Node is locked.
+ *
+ * Called with node locked
*/
-static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
-{
- struct sk_buff *buf = l_ptr->first_out;
- u32 r_q_size = l_ptr->retransm_queue_size;
- u32 r_q_head = l_ptr->retransm_queue_head;
-
- /* Step to position where retransmission failed, if any, */
- /* consider that buffers may have been released in meantime */
- if (r_q_size && buf) {
- u32 last = lesser(mod(r_q_head + r_q_size),
- link_last_sent(l_ptr));
- u32 first = buf_seqno(buf);
-
- while (buf && less(first, r_q_head)) {
- first = mod(first + 1);
- buf = buf->next;
- }
- l_ptr->retransm_queue_head = r_q_head = first;
- l_ptr->retransm_queue_size = r_q_size = mod(last - first);
- }
-
- /* Continue retransmission now, if there is anything: */
- if (r_q_size && buf) {
- msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
- msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
- tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
- l_ptr->retransm_queue_head = mod(++r_q_head);
- l_ptr->retransm_queue_size = --r_q_size;
- l_ptr->stats.retransmitted++;
- return 0;
- }
-
- /* Send deferred protocol message, if any: */
- buf = l_ptr->proto_msg_queue;
- if (buf) {
- msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
- msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
- tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
- l_ptr->unacked_window = 0;
- kfree_skb(buf);
- l_ptr->proto_msg_queue = NULL;
- return 0;
- }
+void tipc_link_push_packets(struct tipc_link *l_ptr)
+{
+ struct sk_buff_head *outqueue = &l_ptr->outqueue;
+ struct sk_buff *skb = l_ptr->next_out;
+ struct tipc_msg *msg;
+ u32 next, first;
- /* Send one deferred data message, if send window not full: */
- buf = l_ptr->next_out;
- if (buf) {
- struct tipc_msg *msg = buf_msg(buf);
- u32 next = msg_seqno(msg);
- u32 first = buf_seqno(l_ptr->first_out);
+ skb_queue_walk_from(outqueue, skb) {
+ msg = buf_msg(skb);
+ next = msg_seqno(msg);
+ first = buf_seqno(skb_peek(outqueue));
if (mod(next - first) < l_ptr->queue_limit[0]) {
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- tipc_bearer_send(l_ptr->bearer_id, buf,
- &l_ptr->media_addr);
if (msg_user(msg) == MSG_BUNDLER)
- msg_set_type(msg, BUNDLE_CLOSED);
- l_ptr->next_out = buf->next;
- return 0;
+ TIPC_SKB_CB(skb)->bundling = false;
+ tipc_bearer_send(l_ptr->bearer_id, skb,
+ &l_ptr->media_addr);
+ l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
+ } else {
+ break;
}
}
- return 1;
-}
-
-/*
- * push_queue(): push out the unsent messages of a link where
- * congestion has abated. Node is locked
- */
-void tipc_link_push_queue(struct tipc_link *l_ptr)
-{
- u32 res;
-
- do {
- res = tipc_link_push_packet(l_ptr);
- } while (!res);
}
void tipc_link_reset_all(struct tipc_node *node)
@@ -984,20 +960,20 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
}
}
-void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
+void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
u32 retransmits)
{
struct tipc_msg *msg;
- if (!buf)
+ if (!skb)
return;
- msg = buf_msg(buf);
+ msg = buf_msg(skb);
/* Detect repeated retransmit failures */
if (l_ptr->last_retransmitted == msg_seqno(msg)) {
if (++l_ptr->stale_count > 100) {
- link_retransmit_failure(l_ptr, buf);
+ link_retransmit_failure(l_ptr, skb);
return;
}
} else {
@@ -1005,38 +981,29 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
l_ptr->stale_count = 1;
}
- while (retransmits && (buf != l_ptr->next_out) && buf) {
- msg = buf_msg(buf);
+ skb_queue_walk_from(&l_ptr->outqueue, skb) {
+ if (!retransmits || skb == l_ptr->next_out)
+ break;
+ msg = buf_msg(skb);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
- buf = buf->next;
+ tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
retransmits--;
l_ptr->stats.retransmitted++;
}
-
- l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
}
-/**
- * link_insert_deferred_queue - insert deferred messages back into receive chain
- */
-static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
- struct sk_buff *buf)
+static void link_retrieve_defq(struct tipc_link *link,
+ struct sk_buff_head *list)
{
u32 seq_no;
- if (l_ptr->oldest_deferred_in == NULL)
- return buf;
+ if (skb_queue_empty(&link->deferred_queue))
+ return;
- seq_no = buf_seqno(l_ptr->oldest_deferred_in);
- if (seq_no == mod(l_ptr->next_in_no)) {
- l_ptr->newest_deferred_in->next = buf;
- buf = l_ptr->oldest_deferred_in;
- l_ptr->oldest_deferred_in = NULL;
- l_ptr->deferred_inqueue_sz = 0;
- }
- return buf;
+ seq_no = buf_seqno(skb_peek(&link->deferred_queue));
+ if (seq_no == mod(link->next_in_no))
+ skb_queue_splice_tail_init(&link->deferred_queue, list);
}
/**
@@ -1096,43 +1063,42 @@ static int link_recv_buf_validate(struct sk_buff *buf)
/**
* tipc_rcv - process TIPC packets/messages arriving from off-node
- * @head: pointer to message buffer chain
+ * @skb: TIPC packet
* @b_ptr: pointer to bearer message arrived on
*
* Invoked with no locks held. Bearer pointer must point to a valid bearer
* structure (i.e. cannot be NULL), but bearer can be inactive.
*/
-void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
+void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
{
- while (head) {
- struct tipc_node *n_ptr;
- struct tipc_link *l_ptr;
- struct sk_buff *crs;
- struct sk_buff *buf = head;
- struct tipc_msg *msg;
- u32 seq_no;
- u32 ackd;
- u32 released = 0;
+ struct sk_buff_head head;
+ struct tipc_node *n_ptr;
+ struct tipc_link *l_ptr;
+ struct sk_buff *skb1, *tmp;
+ struct tipc_msg *msg;
+ u32 seq_no;
+ u32 ackd;
+ u32 released;
- head = head->next;
- buf->next = NULL;
+ skb2list(skb, &head);
+ while ((skb = __skb_dequeue(&head))) {
/* Ensure message is well-formed */
- if (unlikely(!link_recv_buf_validate(buf)))
+ if (unlikely(!link_recv_buf_validate(skb)))
goto discard;
/* Ensure message data is a single contiguous unit */
- if (unlikely(skb_linearize(buf)))
+ if (unlikely(skb_linearize(skb)))
goto discard;
/* Handle arrival of a non-unicast link message */
- msg = buf_msg(buf);
+ msg = buf_msg(skb);
if (unlikely(msg_non_seq(msg))) {
if (msg_user(msg) == LINK_CONFIG)
- tipc_disc_rcv(buf, b_ptr);
+ tipc_disc_rcv(skb, b_ptr);
else
- tipc_bclink_rcv(buf);
+ tipc_bclink_rcv(skb);
continue;
}
@@ -1171,22 +1137,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
if (n_ptr->bclink.recv_permitted)
tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
- crs = l_ptr->first_out;
- while ((crs != l_ptr->next_out) &&
- less_eq(buf_seqno(crs), ackd)) {
- struct sk_buff *next = crs->next;
- kfree_skb(crs);
- crs = next;
- released++;
- }
- if (released) {
- l_ptr->first_out = crs;
- l_ptr->out_queue_size -= released;
+ released = 0;
+ skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
+ if (skb1 == l_ptr->next_out ||
+ more(buf_seqno(skb1), ackd))
+ break;
+ __skb_unlink(skb1, &l_ptr->outqueue);
+ kfree_skb(skb1);
+ released = 1;
}
/* Try sending any messages link endpoint has pending */
if (unlikely(l_ptr->next_out))
- tipc_link_push_queue(l_ptr);
+ tipc_link_push_packets(l_ptr);
if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
link_prepare_wakeup(l_ptr);
@@ -1196,8 +1159,8 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Process the incoming packet */
if (unlikely(!link_working_working(l_ptr))) {
if (msg_user(msg) == LINK_PROTOCOL) {
- tipc_link_proto_rcv(l_ptr, buf);
- head = link_insert_deferred_queue(l_ptr, head);
+ tipc_link_proto_rcv(l_ptr, skb);
+ link_retrieve_defq(l_ptr, &head);
tipc_node_unlock(n_ptr);
continue;
}
@@ -1207,8 +1170,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
if (link_working_working(l_ptr)) {
/* Re-insert buffer in front of queue */
- buf->next = head;
- head = buf;
+ __skb_queue_head(&head, skb);
tipc_node_unlock(n_ptr);
continue;
}
@@ -1217,33 +1179,33 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Link is now in state WORKING_WORKING */
if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
- link_handle_out_of_seq_msg(l_ptr, buf);
- head = link_insert_deferred_queue(l_ptr, head);
+ link_handle_out_of_seq_msg(l_ptr, skb);
+ link_retrieve_defq(l_ptr, &head);
tipc_node_unlock(n_ptr);
continue;
}
l_ptr->next_in_no++;
- if (unlikely(l_ptr->oldest_deferred_in))
- head = link_insert_deferred_queue(l_ptr, head);
+ if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
+ link_retrieve_defq(l_ptr, &head);
if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
l_ptr->stats.sent_acks++;
tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
}
- if (tipc_link_prepare_input(l_ptr, &buf)) {
+ if (tipc_link_prepare_input(l_ptr, &skb)) {
tipc_node_unlock(n_ptr);
continue;
}
tipc_node_unlock(n_ptr);
- msg = buf_msg(buf);
- if (tipc_link_input(l_ptr, buf) != 0)
+
+ if (tipc_link_input(l_ptr, skb) != 0)
goto discard;
continue;
unlock_discard:
tipc_node_unlock(n_ptr);
discard:
- kfree_skb(buf);
+ kfree_skb(skb);
}
}
@@ -1326,48 +1288,37 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
*
* Returns increase in queue length (i.e. 0 or 1)
*/
-u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
- struct sk_buff *buf)
+u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
{
- struct sk_buff *queue_buf;
- struct sk_buff **prev;
- u32 seq_no = buf_seqno(buf);
-
- buf->next = NULL;
+ struct sk_buff *skb1;
+ u32 seq_no = buf_seqno(skb);
/* Empty queue ? */
- if (*head == NULL) {
- *head = *tail = buf;
+ if (skb_queue_empty(list)) {
+ __skb_queue_tail(list, skb);
return 1;
}
/* Last ? */
- if (less(buf_seqno(*tail), seq_no)) {
- (*tail)->next = buf;
- *tail = buf;
+ if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
+ __skb_queue_tail(list, skb);
return 1;
}
/* Locate insertion point in queue, then insert; discard if duplicate */
- prev = head;
- queue_buf = *head;
- for (;;) {
- u32 curr_seqno = buf_seqno(queue_buf);
+ skb_queue_walk(list, skb1) {
+ u32 curr_seqno = buf_seqno(skb1);
if (seq_no == curr_seqno) {
- kfree_skb(buf);
+ kfree_skb(skb);
return 0;
}
if (less(seq_no, curr_seqno))
break;
-
- prev = &queue_buf->next;
- queue_buf = queue_buf->next;
}
- buf->next = queue_buf;
- *prev = buf;
+ __skb_queue_before(list, skb1, skb);
return 1;
}
@@ -1397,15 +1348,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
return;
}
- if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
- &l_ptr->newest_deferred_in, buf)) {
- l_ptr->deferred_inqueue_sz++;
+ if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
l_ptr->stats.deferred_recv++;
TIPC_SKB_CB(buf)->deferred = true;
- if ((l_ptr->deferred_inqueue_sz % 16) == 1)
+ if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
- } else
+ } else {
l_ptr->stats.duplicates++;
+ }
}
/*
@@ -1419,12 +1369,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
u32 msg_size = sizeof(l_ptr->proto_msg);
int r_flag;
- /* Discard any previous message that was deferred due to congestion */
- if (l_ptr->proto_msg_queue) {
- kfree_skb(l_ptr->proto_msg_queue);
- l_ptr->proto_msg_queue = NULL;
- }
-
/* Don't send protocol message during link changeover */
if (l_ptr->exp_msg_count)
return;
@@ -1447,8 +1391,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
if (l_ptr->next_out)
next_sent = buf_seqno(l_ptr->next_out);
msg_set_next_sent(msg, next_sent);
- if (l_ptr->oldest_deferred_in) {
- u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
+ if (!skb_queue_empty(&l_ptr->deferred_queue)) {
+ u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
gap = mod(rec - mod(l_ptr->next_in_no));
}
msg_set_seq_gap(msg, gap);
@@ -1636,7 +1580,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
}
if (msg_seq_gap(msg)) {
l_ptr->stats.recv_nacks++;
- tipc_link_retransmit(l_ptr, l_ptr->first_out,
+ tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
msg_seq_gap(msg));
}
break;
@@ -1655,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
u32 selector)
{
struct tipc_link *tunnel;
- struct sk_buff *buf;
+ struct sk_buff *skb;
u32 length = msg_size(msg);
tunnel = l_ptr->owner->active_links[selector & 1];
@@ -1664,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
return;
}
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
- buf = tipc_buf_acquire(length + INT_H_SIZE);
- if (!buf) {
+ skb = tipc_buf_acquire(length + INT_H_SIZE);
+ if (!skb) {
pr_warn("%sunable to send tunnel msg\n", link_co_err);
return;
}
- skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
- skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
- __tipc_link_xmit(tunnel, buf);
+ skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
+ skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
+ __tipc_link_xmit_skb(tunnel, skb);
}
@@ -1683,10 +1627,10 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
*/
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
{
- u32 msgcount = l_ptr->out_queue_size;
- struct sk_buff *crs = l_ptr->first_out;
+ u32 msgcount = skb_queue_len(&l_ptr->outqueue);
struct tipc_link *tunnel = l_ptr->owner->active_links[0];
struct tipc_msg tunnel_hdr;
+ struct sk_buff *skb;
int split_bundles;
if (!tunnel)
@@ -1697,14 +1641,12 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
- if (!l_ptr->first_out) {
- struct sk_buff *buf;
-
- buf = tipc_buf_acquire(INT_H_SIZE);
- if (buf) {
- skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
+ if (skb_queue_empty(&l_ptr->outqueue)) {
+ skb = tipc_buf_acquire(INT_H_SIZE);
+ if (skb) {
+ skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
msg_set_size(&tunnel_hdr, INT_H_SIZE);
- __tipc_link_xmit(tunnel, buf);
+ __tipc_link_xmit_skb(tunnel, skb);
} else {
pr_warn("%sunable to send changeover msg\n",
link_co_err);
@@ -1715,8 +1657,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
split_bundles = (l_ptr->owner->active_links[0] !=
l_ptr->owner->active_links[1]);
- while (crs) {
- struct tipc_msg *msg = buf_msg(crs);
+ skb_queue_walk(&l_ptr->outqueue, skb) {
+ struct tipc_msg *msg = buf_msg(skb);
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
struct tipc_msg *m = msg_get_wrapped(msg);
@@ -1734,7 +1676,6 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
msg_link_selector(msg));
}
- crs = crs->next;
}
}
@@ -1750,17 +1691,16 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
struct tipc_link *tunnel)
{
- struct sk_buff *iter;
+ struct sk_buff *skb;
struct tipc_msg tunnel_hdr;
tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
- msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
+ msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
- iter = l_ptr->first_out;
- while (iter) {
- struct sk_buff *outbuf;
- struct tipc_msg *msg = buf_msg(iter);
+ skb_queue_walk(&l_ptr->outqueue, skb) {
+ struct sk_buff *outskb;
+ struct tipc_msg *msg = buf_msg(skb);
u32 length = msg_size(msg);
if (msg_user(msg) == MSG_BUNDLER)
@@ -1768,19 +1708,18 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
- outbuf = tipc_buf_acquire(length + INT_H_SIZE);
- if (outbuf == NULL) {
+ outskb = tipc_buf_acquire(length + INT_H_SIZE);
+ if (outskb == NULL) {
pr_warn("%sunable to send duplicate msg\n",
link_co_err);
return;
}
- skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
- skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
+ skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
+ skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
length);
- __tipc_link_xmit(tunnel, outbuf);
+ __tipc_link_xmit_skb(tunnel, outskb);
if (!tipc_link_is_up(l_ptr))
return;
- iter = iter->next;
}
}
@@ -2375,3 +2314,435 @@ static void link_print(struct tipc_link *l_ptr, const char *str)
else
pr_cont("\n");
}
+
+/* Parse and validate nested (link) properties valid for media, bearer and link
+ */
+int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
+{
+ int err;
+
+ err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
+ tipc_nl_prop_policy);
+ if (err)
+ return err;
+
+ if (props[TIPC_NLA_PROP_PRIO]) {
+ u32 prio;
+
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ if (prio > TIPC_MAX_LINK_PRI)
+ return -EINVAL;
+ }
+
+ if (props[TIPC_NLA_PROP_TOL]) {
+ u32 tol;
+
+ tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
+ return -EINVAL;
+ }
+
+ if (props[TIPC_NLA_PROP_WIN]) {
+ u32 win;
+
+ win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ int res = 0;
+ int bearer_id;
+ char *name;
+ struct tipc_link *link;
+ struct tipc_node *node;
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+
+ name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+
+ node = tipc_link_find_owner(name, &bearer_id);
+ if (!node)
+ return -EINVAL;
+
+ tipc_node_lock(node);
+
+ link = node->links[bearer_id];
+ if (!link) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[TIPC_NLA_LINK_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
+ props);
+ if (err) {
+ res = err;
+ goto out;
+ }
+
+ if (props[TIPC_NLA_PROP_TOL]) {
+ u32 tol;
+
+ tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ link_set_supervision_props(link, tol);
+ tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
+ }
+ if (props[TIPC_NLA_PROP_PRIO]) {
+ u32 prio;
+
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ link->priority = prio;
+ tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
+ }
+ if (props[TIPC_NLA_PROP_WIN]) {
+ u32 win;
+
+ win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ tipc_link_set_queue_limits(link, win);
+ }
+ }
+
+out:
+ tipc_node_unlock(node);
+
+ return res;
+}
+
+static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
+{
+ int i;
+ struct nlattr *stats;
+
+ struct nla_map {
+ u32 key;
+ u32 val;
+ };
+
+ struct nla_map map[] = {
+ {TIPC_NLA_STATS_RX_INFO, s->recv_info},
+ {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
+ {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
+ {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
+ {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
+ {TIPC_NLA_STATS_TX_INFO, s->sent_info},
+ {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
+ {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
+ {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
+ {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
+ {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
+ s->msg_length_counts : 1},
+ {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
+ {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
+ {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
+ {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
+ {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
+ {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
+ {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
+ {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
+ {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
+ {TIPC_NLA_STATS_RX_STATES, s->recv_states},
+ {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
+ {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
+ {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
+ {TIPC_NLA_STATS_TX_STATES, s->sent_states},
+ {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
+ {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
+ {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
+ {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
+ {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
+ {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
+ {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
+ {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
+ (s->accu_queue_sz / s->queue_sz_counts) : 0}
+ };
+
+ stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
+ if (!stats)
+ return -EMSGSIZE;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (nla_put_u32(skb, map[i].key, map[i].val))
+ goto msg_full;
+
+ nla_nest_end(skb, stats);
+
+ return 0;
+msg_full:
+ nla_nest_cancel(skb, stats);
+
+ return -EMSGSIZE;
+}
+
+/* Caller should hold appropriate locks to protect the link */
+static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
+{
+ int err;
+ void *hdr;
+ struct nlattr *attrs;
+ struct nlattr *prop;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ NLM_F_MULTI, TIPC_NL_LINK_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
+ tipc_cluster_mask(tipc_own_addr)))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
+ goto attr_msg_full;
+
+ if (tipc_link_is_up(link))
+ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
+ goto attr_msg_full;
+ if (tipc_link_is_active(link))
+ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
+ goto attr_msg_full;
+
+ prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
+ if (!prop)
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
+ link->queue_limit[TIPC_LOW_IMPORTANCE]))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
+ goto prop_msg_full;
+ nla_nest_end(msg->skb, prop);
+
+ err = __tipc_nl_add_stats(msg->skb, &link->stats);
+ if (err)
+ goto attr_msg_full;
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+prop_msg_full:
+ nla_nest_cancel(msg->skb, prop);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+/* Caller should hold node lock */
+static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
+ struct tipc_node *node,
+ u32 *prev_link)
+{
+ u32 i;
+ int err;
+
+ for (i = *prev_link; i < MAX_BEARERS; i++) {
+ *prev_link = i;
+
+ if (!node->links[i])
+ continue;
+
+ err = __tipc_nl_add_link(msg, node->links[i]);
+ if (err)
+ return err;
+ }
+ *prev_link = 0;
+
+ return 0;
+}
+
+int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct tipc_node *node;
+ struct tipc_nl_msg msg;
+ u32 prev_node = cb->args[0];
+ u32 prev_link = cb->args[1];
+ int done = cb->args[2];
+ int err;
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rcu_read_lock();
+
+ if (prev_node) {
+ node = tipc_node_find(prev_node);
+ if (!node) {
+ /* We never set seq or call nl_dump_check_consistent()
+ * this means that setting prev_seq here will cause the
+ * consistence check to fail in the netlink callback
+ * handler. Resulting in the last NLMSG_DONE message
+ * having the NLM_F_DUMP_INTR flag set.
+ */
+ cb->prev_seq = 1;
+ goto out;
+ }
+
+ list_for_each_entry_continue_rcu(node, &tipc_node_list, list) {
+ tipc_node_lock(node);
+ err = __tipc_nl_add_node_links(&msg, node, &prev_link);
+ tipc_node_unlock(node);
+ if (err)
+ goto out;
+
+ prev_node = node->addr;
+ }
+ } else {
+ err = tipc_nl_add_bc_link(&msg);
+ if (err)
+ goto out;
+
+ list_for_each_entry_rcu(node, &tipc_node_list, list) {
+ tipc_node_lock(node);
+ err = __tipc_nl_add_node_links(&msg, node, &prev_link);
+ tipc_node_unlock(node);
+ if (err)
+ goto out;
+
+ prev_node = node->addr;
+ }
+ }
+ done = 1;
+out:
+ rcu_read_unlock();
+
+ cb->args[0] = prev_node;
+ cb->args[1] = prev_link;
+ cb->args[2] = done;
+
+ return skb->len;
+}
+
+int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *ans_skb;
+ struct tipc_nl_msg msg;
+ struct tipc_link *link;
+ struct tipc_node *node;
+ char *name;
+ int bearer_id;
+ int err;
+
+ if (!info->attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
+ node = tipc_link_find_owner(name, &bearer_id);
+ if (!node)
+ return -EINVAL;
+
+ ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!ans_skb)
+ return -ENOMEM;
+
+ msg.skb = ans_skb;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ tipc_node_lock(node);
+ link = node->links[bearer_id];
+ if (!link) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = __tipc_nl_add_link(&msg, link);
+ if (err)
+ goto err_out;
+
+ tipc_node_unlock(node);
+
+ return genlmsg_reply(ans_skb, info);
+
+err_out:
+ tipc_node_unlock(node);
+ nlmsg_free(ans_skb);
+
+ return err;
+}
+
+int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *link_name;
+ unsigned int bearer_id;
+ struct tipc_link *link;
+ struct tipc_node *node;
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+
+ link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+
+ if (strcmp(link_name, tipc_bclink_name) == 0) {
+ err = tipc_bclink_reset_stats();
+ if (err)
+ return err;
+ return 0;
+ }
+
+ node = tipc_link_find_owner(link_name, &bearer_id);
+ if (!node)
+ return -EINVAL;
+
+ tipc_node_lock(node);
+
+ link = node->links[bearer_id];
+ if (!link) {
+ tipc_node_unlock(node);
+ return -EINVAL;
+ }
+
+ link_reset_statistics(link);
+
+ tipc_node_unlock(node);
+
+ return 0;
+}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index b567a3427fda..55812e87ca1e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -37,6 +37,7 @@
#ifndef _TIPC_LINK_H
#define _TIPC_LINK_H
+#include <net/genetlink.h>
#include "msg.h"
#include "node.h"
@@ -118,20 +119,13 @@ struct tipc_stats {
* @max_pkt: current maximum packet size for this link
* @max_pkt_target: desired maximum packet size for this link
* @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
- * @out_queue_size: # of messages in outbound message queue
- * @first_out: ptr to first outbound message in queue
- * @last_out: ptr to last outbound message in queue
+ * @outqueue: outbound message queue
* @next_out_no: next sequence number to use for outbound messages
* @last_retransmitted: sequence number of most recently retransmitted message
* @stale_count: # of identical retransmit requests made by peer
* @next_in_no: next sequence number to expect for inbound messages
- * @deferred_inqueue_sz: # of messages in inbound message queue
- * @oldest_deferred_in: ptr to first inbound message in queue
- * @newest_deferred_in: ptr to last inbound message in queue
+ * @deferred_queue: deferred queue saved OOS b'cast message received from node
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
- * @proto_msg_queue: ptr to (single) outbound control message
- * @retransm_queue_size: number of messages to retransmit
- * @retransm_queue_head: sequence number of first message to retransmit
* @next_out: ptr to first unsent outbound message in queue
* @waiting_sks: linked list of sockets waiting for link congestion to abate
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
@@ -175,24 +169,17 @@ struct tipc_link {
u32 max_pkt_probes;
/* Sending */
- u32 out_queue_size;
- struct sk_buff *first_out;
- struct sk_buff *last_out;
+ struct sk_buff_head outqueue;
u32 next_out_no;
u32 last_retransmitted;
u32 stale_count;
/* Reception */
u32 next_in_no;
- u32 deferred_inqueue_sz;
- struct sk_buff *oldest_deferred_in;
- struct sk_buff *newest_deferred_in;
+ struct sk_buff_head deferred_queue;
u32 unacked_window;
/* Congestion handling */
- struct sk_buff *proto_msg_queue;
- u32 retransm_queue_size;
- u32 retransm_queue_head;
struct sk_buff *next_out;
struct sk_buff_head waiting_sks;
@@ -226,18 +213,26 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
void tipc_link_reset_all(struct tipc_node *node);
void tipc_link_reset(struct tipc_link *l_ptr);
void tipc_link_reset_list(unsigned int bearer_id);
-int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf);
+int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
+int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
+int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
void tipc_link_bundle_rcv(struct sk_buff *buf);
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
-void tipc_link_push_queue(struct tipc_link *l_ptr);
-u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
- struct sk_buff *buf);
+void tipc_link_push_packets(struct tipc_link *l_ptr);
+u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
void tipc_link_retransmit(struct tipc_link *l_ptr,
struct sk_buff *start, u32 retransmits);
+struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
+ const struct sk_buff *skb);
+
+int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
/*
* Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
@@ -252,18 +247,14 @@ static inline u32 mod(u32 x)
return x & 0xffffu;
}
-static inline int between(u32 lower, u32 upper, u32 n)
+static inline int less_eq(u32 left, u32 right)
{
- if ((lower < n) && (n < upper))
- return 1;
- if ((upper < lower) && ((n > lower) || (n < upper)))
- return 1;
- return 0;
+ return mod(right - left) < 32768u;
}
-static inline int less_eq(u32 left, u32 right)
+static inline int more(u32 left, u32 right)
{
- return mod(right - left) < 32768u;
+ return !less_eq(left, right);
}
static inline int less(u32 left, u32 right)
@@ -302,7 +293,7 @@ static inline int link_reset_reset(struct tipc_link *l_ptr)
static inline int link_congested(struct tipc_link *l_ptr)
{
- return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
+ return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0];
}
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 74745a47d72a..a687b30a699c 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -91,7 +91,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
* @*headbuf: in: NULL for first frag, otherwise value returned from prev call
* out: set when successful non-complete reassembly, otherwise NULL
* @*buf: in: the buffer to append. Always defined
- * out: head buf after sucessful complete reassembly, otherwise NULL
+ * out: head buf after successful complete reassembly, otherwise NULL
* Returns 1 when reassembly complete, otherwise 0
*/
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
@@ -162,15 +162,16 @@ err:
/**
* tipc_msg_build - create buffer chain containing specified header and data
* @mhdr: Message header, to be prepended to data
- * @iov: User data
+ * @m: User message
* @offset: Posision in iov to start copying from
* @dsz: Total length of user data
* @pktmax: Max packet size that can be used
- * @chain: Buffer or chain of buffers to be returned to caller
+ * @list: Buffer or chain of buffers to be returned to caller
+ *
* Returns message data size or errno: -ENOMEM, -EFAULT
*/
-int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
- int offset, int dsz, int pktmax , struct sk_buff **chain)
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+ int dsz, int pktmax, struct sk_buff_head *list)
{
int mhsz = msg_hdr_sz(mhdr);
int msz = mhsz + dsz;
@@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
int pktrem = pktmax;
int drem = dsz;
struct tipc_msg pkthdr;
- struct sk_buff *buf, *prev;
+ struct sk_buff *skb;
char *pktpos;
int rc;
- uint chain_sz = 0;
+
msg_set_size(mhdr, msz);
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
- buf = tipc_buf_acquire(msz);
- *chain = buf;
- if (unlikely(!buf))
+ skb = tipc_buf_acquire(msz);
+ if (unlikely(!skb))
return -ENOMEM;
- skb_copy_to_linear_data(buf, mhdr, mhsz);
- pktpos = buf->data + mhsz;
- TIPC_SKB_CB(buf)->chain_sz = 1;
- if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz))
+ __skb_queue_tail(list, skb);
+ skb_copy_to_linear_data(skb, mhdr, mhsz);
+ pktpos = skb->data + mhsz;
+ if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
+ dsz))
return dsz;
rc = -EFAULT;
goto error;
@@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
msg_set_fragm_no(&pkthdr, pktno);
/* Prepare first fragment */
- *chain = buf = tipc_buf_acquire(pktmax);
- if (!buf)
+ skb = tipc_buf_acquire(pktmax);
+ if (!skb)
return -ENOMEM;
- chain_sz = 1;
- pktpos = buf->data;
- skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
+ __skb_queue_tail(list, skb);
+ pktpos = skb->data;
+ skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos += INT_H_SIZE;
pktrem -= INT_H_SIZE;
- skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz);
+ skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
pktpos += mhsz;
pktrem -= mhsz;
@@ -223,7 +224,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
if (drem < pktrem)
pktrem = drem;
- if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) {
+ if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
rc = -EFAULT;
goto error;
}
@@ -238,43 +239,41 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
pktsz = drem + INT_H_SIZE;
else
pktsz = pktmax;
- prev = buf;
- buf = tipc_buf_acquire(pktsz);
- if (!buf) {
+ skb = tipc_buf_acquire(pktsz);
+ if (!skb) {
rc = -ENOMEM;
goto error;
}
- chain_sz++;
- prev->next = buf;
+ __skb_queue_tail(list, skb);
msg_set_type(&pkthdr, FRAGMENT);
msg_set_size(&pkthdr, pktsz);
msg_set_fragm_no(&pkthdr, ++pktno);
- skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
- pktpos = buf->data + INT_H_SIZE;
+ skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
+ pktpos = skb->data + INT_H_SIZE;
pktrem = pktsz - INT_H_SIZE;
} while (1);
- TIPC_SKB_CB(*chain)->chain_sz = chain_sz;
- msg_set_type(buf_msg(buf), LAST_FRAGMENT);
+ msg_set_type(buf_msg(skb), LAST_FRAGMENT);
return dsz;
error:
- kfree_skb_list(*chain);
- *chain = NULL;
+ __skb_queue_purge(list);
+ __skb_queue_head_init(list);
return rc;
}
/**
* tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @bbuf: the existing buffer ("bundle")
- * @buf: buffer to be appended
+ * @list: the buffer chain of the existing buffer ("bundle")
+ * @skb: buffer to be appended
* @mtu: max allowable size for the bundle buffer
* Consumes buffer if successful
* Returns true if bundling could be performed, otherwise false
*/
-bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
+bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
{
- struct tipc_msg *bmsg = buf_msg(bbuf);
- struct tipc_msg *msg = buf_msg(buf);
+ struct sk_buff *bskb = skb_peek_tail(list);
+ struct tipc_msg *bmsg = buf_msg(bskb);
+ struct tipc_msg *msg = buf_msg(skb);
unsigned int bsz = msg_size(bmsg);
unsigned int msz = msg_size(msg);
u32 start = align(bsz);
@@ -289,35 +288,36 @@ bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
return false;
if (likely(msg_user(bmsg) != MSG_BUNDLER))
return false;
- if (likely(msg_type(bmsg) != BUNDLE_OPEN))
+ if (likely(!TIPC_SKB_CB(bskb)->bundling))
return false;
- if (unlikely(skb_tailroom(bbuf) < (pad + msz)))
+ if (unlikely(skb_tailroom(bskb) < (pad + msz)))
return false;
if (unlikely(max < (start + msz)))
return false;
- skb_put(bbuf, pad + msz);
- skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz);
+ skb_put(bskb, pad + msz);
+ skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
msg_set_size(bmsg, start + msz);
msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
- bbuf->next = buf->next;
- kfree_skb(buf);
+ kfree_skb(skb);
return true;
}
/**
* tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @buf: buffer to be appended and replaced
- * @mtu: max allowable size for the bundle buffer, inclusive header
+ * @list: the buffer chain
+ * @skb: buffer to be appended and replaced
+ * @mtu: max allowable size for the bundle buffer, inclusive header
* @dnode: destination node for message. (Not always present in header)
* Replaces buffer if successful
- * Returns true if sucess, otherwise false
+ * Returns true if success, otherwise false
*/
-bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
+ u32 mtu, u32 dnode)
{
- struct sk_buff *bbuf;
+ struct sk_buff *bskb;
struct tipc_msg *bmsg;
- struct tipc_msg *msg = buf_msg(*buf);
+ struct tipc_msg *msg = buf_msg(skb);
u32 msz = msg_size(msg);
u32 max = mtu - INT_H_SIZE;
@@ -330,20 +330,19 @@ bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
if (msz > (max / 2))
return false;
- bbuf = tipc_buf_acquire(max);
- if (!bbuf)
+ bskb = tipc_buf_acquire(max);
+ if (!bskb)
return false;
- skb_trim(bbuf, INT_H_SIZE);
- bmsg = buf_msg(bbuf);
- tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode);
+ skb_trim(bskb, INT_H_SIZE);
+ bmsg = buf_msg(bskb);
+ tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
msg_set_seqno(bmsg, msg_seqno(msg));
msg_set_ack(bmsg, msg_ack(msg));
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
- bbuf->next = (*buf)->next;
- tipc_msg_bundle(bbuf, *buf, mtu);
- *buf = bbuf;
- return true;
+ TIPC_SKB_CB(bskb)->bundling = true;
+ __skb_queue_tail(list, bskb);
+ return tipc_msg_bundle(list, skb, mtu);
}
/**
@@ -429,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
* reassemble the clones into one message
*/
-struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
+struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
{
- struct sk_buff *buf = chain;
- struct sk_buff *frag = buf;
+ struct sk_buff *skb;
+ struct sk_buff *frag = NULL;
struct sk_buff *head = NULL;
int hdr_sz;
/* Copy header if single buffer */
- if (!buf->next) {
- hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf));
- return __pskb_copy(buf, hdr_sz, GFP_ATOMIC);
+ if (skb_queue_len(list) == 1) {
+ skb = skb_peek(list);
+ hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
+ return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
}
/* Clone all fragments and reassemble */
- while (buf) {
- frag = skb_clone(buf, GFP_ATOMIC);
+ skb_queue_walk(list, skb) {
+ frag = skb_clone(skb, GFP_ATOMIC);
if (!frag)
goto error;
frag->next = NULL;
@@ -452,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
break;
if (!head)
goto error;
- buf = buf->next;
}
return frag;
error:
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 0ea7b695ac4d..d5c83d7ecb47 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -464,11 +464,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
#define FRAGMENT 1
#define LAST_FRAGMENT 2
-/* Bundling protocol message types
- */
-#define BUNDLE_OPEN 0
-#define BUNDLE_CLOSED 1
-
/*
* Link management protocol message types
*/
@@ -739,13 +734,14 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu);
+bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
-bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode);
+bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
+ u32 mtu, u32 dnode);
-int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
- int offset, int dsz, int mtu , struct sk_buff **chain);
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+ int dsz, int mtu, struct sk_buff_head *list);
-struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain);
+struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 376d2bb51d8d..ba6083dca95b 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -38,39 +38,6 @@
#include "link.h"
#include "name_distr.h"
-/**
- * struct publ_list - list of publications made by this node
- * @list: circular list of publications
- * @list_size: number of entries in list
- */
-struct publ_list {
- struct list_head list;
- u32 size;
-};
-
-static struct publ_list publ_zone = {
- .list = LIST_HEAD_INIT(publ_zone.list),
- .size = 0,
-};
-
-static struct publ_list publ_cluster = {
- .list = LIST_HEAD_INIT(publ_cluster.list),
- .size = 0,
-};
-
-static struct publ_list publ_node = {
- .list = LIST_HEAD_INIT(publ_node.list),
- .size = 0,
-};
-
-static struct publ_list *publ_lists[] = {
- NULL,
- &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
- &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
- &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
-};
-
-
int sysctl_tipc_named_timeout __read_mostly = 2000;
/**
@@ -114,9 +81,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
return buf;
}
-void named_cluster_distribute(struct sk_buff *buf)
+void named_cluster_distribute(struct sk_buff *skb)
{
- struct sk_buff *obuf;
+ struct sk_buff *oskb;
struct tipc_node *node;
u32 dnode;
@@ -127,15 +94,15 @@ void named_cluster_distribute(struct sk_buff *buf)
continue;
if (!tipc_node_active_links(node))
continue;
- obuf = skb_copy(buf, GFP_ATOMIC);
- if (!obuf)
+ oskb = skb_copy(skb, GFP_ATOMIC);
+ if (!oskb)
break;
- msg_set_destnode(buf_msg(obuf), dnode);
- tipc_link_xmit(obuf, dnode, dnode);
+ msg_set_destnode(buf_msg(oskb), dnode);
+ tipc_link_xmit_skb(oskb, dnode, dnode);
}
rcu_read_unlock();
- kfree_skb(buf);
+ kfree_skb(skb);
}
/**
@@ -146,8 +113,8 @@ struct sk_buff *tipc_named_publish(struct publication *publ)
struct sk_buff *buf;
struct distr_item *item;
- list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
- publ_lists[publ->scope]->size++;
+ list_add_tail_rcu(&publ->local_list,
+ &tipc_nametbl->publ_list[publ->scope]);
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
@@ -172,7 +139,6 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
struct distr_item *item;
list_del(&publ->local_list);
- publ_lists[publ->scope]->size--;
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
@@ -190,32 +156,28 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
/**
* named_distribute - prepare name info for bulk distribution to another node
- * @msg_list: list of messages (buffers) to be returned from this function
+ * @list: list of messages (buffers) to be returned from this function
* @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain
*/
-static void named_distribute(struct list_head *msg_list, u32 dnode,
- struct publ_list *pls)
+static void named_distribute(struct sk_buff_head *list, u32 dnode,
+ struct list_head *pls)
{
struct publication *publ;
- struct sk_buff *buf = NULL;
+ struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
- uint dsz = pls->size * ITEM_SIZE;
uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
- uint rem = dsz;
- uint msg_rem = 0;
+ uint msg_rem = msg_dsz;
- list_for_each_entry(publ, &pls->list, local_list) {
+ list_for_each_entry(publ, pls, local_list) {
/* Prepare next buffer: */
- if (!buf) {
- msg_rem = min_t(uint, rem, msg_dsz);
- rem -= msg_rem;
- buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
- if (!buf) {
+ if (!skb) {
+ skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
+ if (!skb) {
pr_warn("Bulk publication failure\n");
return;
}
- item = (struct distr_item *)msg_data(buf_msg(buf));
+ item = (struct distr_item *)msg_data(buf_msg(skb));
}
/* Pack publication into message: */
@@ -225,10 +187,16 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
/* Append full buffer to list: */
if (!msg_rem) {
- list_add_tail((struct list_head *)buf, msg_list);
- buf = NULL;
+ __skb_queue_tail(list, skb);
+ skb = NULL;
+ msg_rem = msg_dsz;
}
}
+ if (skb) {
+ msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
+ skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
+ __skb_queue_tail(list, skb);
+ }
}
/**
@@ -236,36 +204,68 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
*/
void tipc_named_node_up(u32 dnode)
{
- LIST_HEAD(msg_list);
- struct sk_buff *buf_chain;
-
- read_lock_bh(&tipc_nametbl_lock);
- named_distribute(&msg_list, dnode, &publ_cluster);
- named_distribute(&msg_list, dnode, &publ_zone);
- read_unlock_bh(&tipc_nametbl_lock);
-
- /* Convert circular list to linear list and send: */
- buf_chain = (struct sk_buff *)msg_list.next;
- ((struct sk_buff *)msg_list.prev)->next = NULL;
- tipc_link_xmit(buf_chain, dnode, dnode);
+ struct sk_buff_head head;
+
+ __skb_queue_head_init(&head);
+
+ rcu_read_lock();
+ named_distribute(&head, dnode,
+ &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
+ named_distribute(&head, dnode,
+ &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
+ rcu_read_unlock();
+
+ tipc_link_xmit(&head, dnode, dnode);
+}
+
+static void tipc_publ_subscribe(struct publication *publ, u32 addr)
+{
+ struct tipc_node *node;
+
+ if (in_own_node(addr))
+ return;
+
+ node = tipc_node_find(addr);
+ if (!node) {
+ pr_warn("Node subscription rejected, unknown node 0x%x\n",
+ addr);
+ return;
+ }
+
+ tipc_node_lock(node);
+ list_add_tail(&publ->nodesub_list, &node->publ_list);
+ tipc_node_unlock(node);
+}
+
+static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
+{
+ struct tipc_node *node;
+
+ node = tipc_node_find(addr);
+ if (!node)
+ return;
+
+ tipc_node_lock(node);
+ list_del_init(&publ->nodesub_list);
+ tipc_node_unlock(node);
}
/**
- * named_purge_publ - remove publication associated with a failed node
+ * tipc_publ_purge - remove publication associated with a failed node
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
*/
-static void named_purge_publ(struct publication *publ)
+static void tipc_publ_purge(struct publication *publ, u32 addr)
{
struct publication *p;
- write_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tipc_nametbl_lock);
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->node, publ->ref, publ->key);
if (p)
- tipc_nodesub_unsubscribe(&p->subscr);
- write_unlock_bh(&tipc_nametbl_lock);
+ tipc_publ_unsubscribe(p, addr);
+ spin_unlock_bh(&tipc_nametbl_lock);
if (p != publ) {
pr_err("Unable to remove publication from failed node\n"
@@ -274,7 +274,15 @@ static void named_purge_publ(struct publication *publ)
publ->key);
}
- kfree(p);
+ kfree_rcu(p, rcu);
+}
+
+void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
+{
+ struct publication *publ, *tmp;
+
+ list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
+ tipc_publ_purge(publ, addr);
}
/**
@@ -294,9 +302,7 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
TIPC_CLUSTER_SCOPE, node,
ntohl(i->ref), ntohl(i->key));
if (publ) {
- tipc_nodesub_subscribe(&publ->subscr, node, publ,
- (net_ev_handler)
- named_purge_publ);
+ tipc_publ_subscribe(publ, node);
return true;
}
} else if (dtype == WITHDRAWAL) {
@@ -304,8 +310,8 @@ static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
node, ntohl(i->ref),
ntohl(i->key));
if (publ) {
- tipc_nodesub_unsubscribe(&publ->subscr);
- kfree(publ);
+ tipc_publ_unsubscribe(publ, node);
+ kfree_rcu(publ, rcu);
return true;
}
} else {
@@ -370,14 +376,14 @@ void tipc_named_rcv(struct sk_buff *buf)
u32 count = msg_data_sz(msg) / ITEM_SIZE;
u32 node = msg_orignode(msg);
- write_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tipc_nametbl_lock);
while (count--) {
if (!tipc_update_nametbl(item, node, msg_type(msg)))
tipc_named_add_backlog(item, msg_type(msg), node);
item++;
}
tipc_named_process_backlog();
- write_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tipc_nametbl_lock);
kfree_skb(buf);
}
@@ -393,11 +399,12 @@ void tipc_named_reinit(void)
struct publication *publ;
int scope;
- write_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tipc_nametbl_lock);
for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
- list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
+ list_for_each_entry_rcu(publ, &tipc_nametbl->publ_list[scope],
+ local_list)
publ->node = tipc_own_addr;
- write_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tipc_nametbl_lock);
}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index b9e75feb3434..cef55cedcfb2 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -74,5 +74,6 @@ void tipc_named_node_up(u32 dnode);
void tipc_named_rcv(struct sk_buff *buf);
void tipc_named_reinit(void);
void tipc_named_process_backlog(void);
+void tipc_publ_notify(struct list_head *nsub_list, u32 addr);
#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 3a6a0a7c0759..c8df0223371a 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -1,8 +1,8 @@
/*
* net/tipc/name_table.c: TIPC name table code
*
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
+ * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,6 +42,12 @@
#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
+static const struct nla_policy
+tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = {
+ [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED }
+};
+
/**
* struct name_info - name sequence publication info
* @node_list: circular list of publications made by own node
@@ -86,6 +92,7 @@ struct sub_seq {
* @ns_list: links to adjacent name sequences in hash chain
* @subscriptions: list of subscriptions for this 'type'
* @lock: spinlock controlling access to publication lists of all sub-sequences
+ * @rcu: RCU callback head used for deferred freeing
*/
struct name_seq {
u32 type;
@@ -95,21 +102,11 @@ struct name_seq {
struct hlist_node ns_list;
struct list_head subscriptions;
spinlock_t lock;
+ struct rcu_head rcu;
};
-/**
- * struct name_table - table containing all existing port name publications
- * @types: pointer to fixed-sized array of name sequence lists,
- * accessed via hashing on 'type'; name sequence lists are *not* sorted
- * @local_publ_count: number of publications issued by this node
- */
-struct name_table {
- struct hlist_head *types;
- u32 local_publ_count;
-};
-
-static struct name_table table;
-DEFINE_RWLOCK(tipc_nametbl_lock);
+struct name_table *tipc_nametbl;
+DEFINE_SPINLOCK(tipc_nametbl_lock);
static int hash(int x)
{
@@ -136,9 +133,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
publ->node = node;
publ->ref = port_ref;
publ->key = key;
- INIT_LIST_HEAD(&publ->local_list);
INIT_LIST_HEAD(&publ->pport_list);
- INIT_LIST_HEAD(&publ->subscr.nodesub_list);
return publ;
}
@@ -173,22 +168,10 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
nseq->alloc = 1;
INIT_HLIST_NODE(&nseq->ns_list);
INIT_LIST_HEAD(&nseq->subscriptions);
- hlist_add_head(&nseq->ns_list, seq_head);
+ hlist_add_head_rcu(&nseq->ns_list, seq_head);
return nseq;
}
-/*
- * nameseq_delete_empty - deletes a name sequence structure if now unused
- */
-static void nameseq_delete_empty(struct name_seq *seq)
-{
- if (!seq->first_free && list_empty(&seq->subscriptions)) {
- hlist_del_init(&seq->ns_list);
- kfree(seq->sseqs);
- kfree(seq);
- }
-}
-
/**
* nameseq_find_subseq - find sub-sequence (if any) matching a name instance
*
@@ -469,8 +452,8 @@ static struct name_seq *nametbl_find_seq(u32 type)
struct hlist_head *seq_head;
struct name_seq *ns;
- seq_head = &table.types[hash(type)];
- hlist_for_each_entry(ns, seq_head, ns_list) {
+ seq_head = &tipc_nametbl->seq_hlist[hash(type)];
+ hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
if (ns->type == type)
return ns;
}
@@ -481,7 +464,9 @@ static struct name_seq *nametbl_find_seq(u32 type)
struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port, u32 key)
{
+ struct publication *publ;
struct name_seq *seq = nametbl_find_seq(type);
+ int index = hash(type);
if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
(lower > upper)) {
@@ -491,12 +476,16 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
}
if (!seq)
- seq = tipc_nameseq_create(type, &table.types[hash(type)]);
+ seq = tipc_nameseq_create(type,
+ &tipc_nametbl->seq_hlist[index]);
if (!seq)
return NULL;
- return tipc_nameseq_insert_publ(seq, type, lower, upper,
+ spin_lock_bh(&seq->lock);
+ publ = tipc_nameseq_insert_publ(seq, type, lower, upper,
scope, node, port, key);
+ spin_unlock_bh(&seq->lock);
+ return publ;
}
struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
@@ -508,8 +497,16 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
if (!seq)
return NULL;
+ spin_lock_bh(&seq->lock);
publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
- nameseq_delete_empty(seq);
+ if (!seq->first_free && list_empty(&seq->subscriptions)) {
+ hlist_del_init_rcu(&seq->ns_list);
+ kfree(seq->sseqs);
+ spin_unlock_bh(&seq->lock);
+ kfree_rcu(seq, rcu);
+ return publ;
+ }
+ spin_unlock_bh(&seq->lock);
return publ;
}
@@ -538,14 +535,14 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
if (!tipc_in_scope(*destnode, tipc_own_addr))
return 0;
- read_lock_bh(&tipc_nametbl_lock);
+ rcu_read_lock();
seq = nametbl_find_seq(type);
if (unlikely(!seq))
goto not_found;
+ spin_lock_bh(&seq->lock);
sseq = nameseq_find_subseq(seq, instance);
if (unlikely(!sseq))
- goto not_found;
- spin_lock_bh(&seq->lock);
+ goto no_match;
info = sseq->info;
/* Closest-First Algorithm */
@@ -595,7 +592,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
no_match:
spin_unlock_bh(&seq->lock);
not_found:
- read_unlock_bh(&tipc_nametbl_lock);
+ rcu_read_unlock();
*destnode = node;
return ref;
}
@@ -621,13 +618,12 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
struct name_info *info;
int res = 0;
- read_lock_bh(&tipc_nametbl_lock);
+ rcu_read_lock();
seq = nametbl_find_seq(type);
if (!seq)
goto exit;
spin_lock_bh(&seq->lock);
-
sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
sseq_stop = seq->sseqs + seq->first_free;
for (; sseq != sseq_stop; sseq++) {
@@ -645,10 +641,9 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
if (info->cluster_list_size != info->node_list_size)
res = 1;
}
-
spin_unlock_bh(&seq->lock);
exit:
- read_unlock_bh(&tipc_nametbl_lock);
+ rcu_read_unlock();
return res;
}
@@ -661,22 +656,23 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
struct publication *publ;
struct sk_buff *buf = NULL;
- if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
+ spin_lock_bh(&tipc_nametbl_lock);
+ if (tipc_nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
pr_warn("Publication failed, local publication limit reached (%u)\n",
TIPC_MAX_PUBLICATIONS);
+ spin_unlock_bh(&tipc_nametbl_lock);
return NULL;
}
- write_lock_bh(&tipc_nametbl_lock);
publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
tipc_own_addr, port_ref, key);
if (likely(publ)) {
- table.local_publ_count++;
+ tipc_nametbl->local_publ_count++;
buf = tipc_named_publish(publ);
/* Any pending external events? */
tipc_named_process_backlog();
}
- write_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tipc_nametbl_lock);
if (buf)
named_cluster_distribute(buf);
@@ -689,27 +685,28 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
{
struct publication *publ;
- struct sk_buff *buf;
+ struct sk_buff *skb = NULL;
- write_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tipc_nametbl_lock);
publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
if (likely(publ)) {
- table.local_publ_count--;
- buf = tipc_named_withdraw(publ);
+ tipc_nametbl->local_publ_count--;
+ skb = tipc_named_withdraw(publ);
/* Any pending external events? */
tipc_named_process_backlog();
- write_unlock_bh(&tipc_nametbl_lock);
list_del_init(&publ->pport_list);
- kfree(publ);
+ kfree_rcu(publ, rcu);
+ } else {
+ pr_err("Unable to remove local publication\n"
+ "(type=%u, lower=%u, ref=%u, key=%u)\n",
+ type, lower, ref, key);
+ }
+ spin_unlock_bh(&tipc_nametbl_lock);
- if (buf)
- named_cluster_distribute(buf);
+ if (skb) {
+ named_cluster_distribute(skb);
return 1;
}
- write_unlock_bh(&tipc_nametbl_lock);
- pr_err("Unable to remove local publication\n"
- "(type=%u, lower=%u, ref=%u, key=%u)\n",
- type, lower, ref, key);
return 0;
}
@@ -719,12 +716,14 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
void tipc_nametbl_subscribe(struct tipc_subscription *s)
{
u32 type = s->seq.type;
+ int index = hash(type);
struct name_seq *seq;
- write_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(type);
if (!seq)
- seq = tipc_nameseq_create(type, &table.types[hash(type)]);
+ seq = tipc_nameseq_create(type,
+ &tipc_nametbl->seq_hlist[index]);
if (seq) {
spin_lock_bh(&seq->lock);
tipc_nameseq_subscribe(seq, s);
@@ -733,7 +732,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
pr_warn("Failed to create subscription for {%u,%u,%u}\n",
s->seq.type, s->seq.lower, s->seq.upper);
}
- write_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tipc_nametbl_lock);
}
/**
@@ -743,18 +742,23 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
{
struct name_seq *seq;
- write_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(s->seq.type);
if (seq != NULL) {
spin_lock_bh(&seq->lock);
list_del_init(&s->nameseq_list);
- spin_unlock_bh(&seq->lock);
- nameseq_delete_empty(seq);
+ if (!seq->first_free && list_empty(&seq->subscriptions)) {
+ hlist_del_init_rcu(&seq->ns_list);
+ kfree(seq->sseqs);
+ spin_unlock_bh(&seq->lock);
+ kfree_rcu(seq, rcu);
+ } else {
+ spin_unlock_bh(&seq->lock);
+ }
}
- write_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tipc_nametbl_lock);
}
-
/**
* subseq_list - print specified sub-sequence contents into the given buffer
*/
@@ -876,8 +880,8 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
lowbound = 0;
upbound = ~0;
for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
- seq_head = &table.types[i];
- hlist_for_each_entry(seq, seq_head, ns_list) {
+ seq_head = &tipc_nametbl->seq_hlist[i];
+ hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
ret += nameseq_list(seq, buf + ret, len - ret,
depth, seq->type,
lowbound, upbound, i);
@@ -892,8 +896,8 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
}
ret += nametbl_header(buf + ret, len - ret, depth);
i = hash(type);
- seq_head = &table.types[i];
- hlist_for_each_entry(seq, seq_head, ns_list) {
+ seq_head = &tipc_nametbl->seq_hlist[i];
+ hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
if (seq->type == type) {
ret += nameseq_list(seq, buf + ret, len - ret,
depth, type,
@@ -925,11 +929,11 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
pb = TLV_DATA(rep_tlv);
pb_len = ULTRA_STRING_MAX_LEN;
argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
- read_lock_bh(&tipc_nametbl_lock);
+ rcu_read_lock();
str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
ntohl(argv->type),
ntohl(argv->lowbound), ntohl(argv->upbound));
- read_unlock_bh(&tipc_nametbl_lock);
+ rcu_read_unlock();
str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -939,12 +943,18 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
int tipc_nametbl_init(void)
{
- table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head),
- GFP_ATOMIC);
- if (!table.types)
+ int i;
+
+ tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
+ if (!tipc_nametbl)
return -ENOMEM;
- table.local_publ_count = 0;
+ for (i = 0; i < TIPC_NAMETBL_SIZE; i++)
+ INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]);
+
+ INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
+ INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
+ INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
return 0;
}
@@ -959,17 +969,19 @@ static void tipc_purge_publications(struct name_seq *seq)
struct sub_seq *sseq;
struct name_info *info;
- if (!seq->sseqs) {
- nameseq_delete_empty(seq);
- return;
- }
+ spin_lock_bh(&seq->lock);
sseq = seq->sseqs;
info = sseq->info;
list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
publ->ref, publ->key);
- kfree(publ);
+ kfree_rcu(publ, rcu);
}
+ hlist_del_init_rcu(&seq->ns_list);
+ kfree(seq->sseqs);
+ spin_unlock_bh(&seq->lock);
+
+ kfree_rcu(seq, rcu);
}
void tipc_nametbl_stop(void)
@@ -977,21 +989,202 @@ void tipc_nametbl_stop(void)
u32 i;
struct name_seq *seq;
struct hlist_head *seq_head;
- struct hlist_node *safe;
/* Verify name table is empty and purge any lingering
* publications, then release the name table
*/
- write_lock_bh(&tipc_nametbl_lock);
+ spin_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
- if (hlist_empty(&table.types[i]))
+ if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
continue;
- seq_head = &table.types[i];
- hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
+ seq_head = &tipc_nametbl->seq_hlist[i];
+ hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
tipc_purge_publications(seq);
}
}
- kfree(table.types);
- table.types = NULL;
- write_unlock_bh(&tipc_nametbl_lock);
+ spin_unlock_bh(&tipc_nametbl_lock);
+
+ synchronize_net();
+ kfree(tipc_nametbl);
+
+}
+
+static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
+ struct name_seq *seq,
+ struct sub_seq *sseq, u32 *last_publ)
+{
+ void *hdr;
+ struct nlattr *attrs;
+ struct nlattr *publ;
+ struct publication *p;
+
+ if (*last_publ) {
+ list_for_each_entry(p, &sseq->info->zone_list, zone_list)
+ if (p->key == *last_publ)
+ break;
+ if (p->key != *last_publ)
+ return -EPIPE;
+ } else {
+ p = list_first_entry(&sseq->info->zone_list, struct publication,
+ zone_list);
+ }
+
+ list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) {
+ *last_publ = p->key;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
+ &tipc_genl_v2_family, NLM_F_MULTI,
+ TIPC_NL_NAME_TABLE_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE);
+ if (!attrs)
+ goto msg_full;
+
+ publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
+ if (!publ)
+ goto attr_msg_full;
+
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
+ goto publ_msg_full;
+
+ nla_nest_end(msg->skb, publ);
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+ }
+ *last_publ = 0;
+
+ return 0;
+
+publ_msg_full:
+ nla_nest_cancel(msg->skb, publ);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
+ u32 *last_lower, u32 *last_publ)
+{
+ struct sub_seq *sseq;
+ struct sub_seq *sseq_start;
+ int err;
+
+ if (*last_lower) {
+ sseq_start = nameseq_find_subseq(seq, *last_lower);
+ if (!sseq_start)
+ return -EPIPE;
+ } else {
+ sseq_start = seq->sseqs;
+ }
+
+ for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) {
+ err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ);
+ if (err) {
+ *last_lower = sseq->lower;
+ return err;
+ }
+ }
+ *last_lower = 0;
+
+ return 0;
+}
+
+static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
+ u32 *last_lower, u32 *last_publ)
+{
+ struct hlist_head *seq_head;
+ struct name_seq *seq = NULL;
+ int err;
+ int i;
+
+ if (*last_type)
+ i = hash(*last_type);
+ else
+ i = 0;
+
+ for (; i < TIPC_NAMETBL_SIZE; i++) {
+ seq_head = &tipc_nametbl->seq_hlist[i];
+
+ if (*last_type) {
+ seq = nametbl_find_seq(*last_type);
+ if (!seq)
+ return -EPIPE;
+ } else {
+ hlist_for_each_entry_rcu(seq, seq_head, ns_list)
+ break;
+ if (!seq)
+ continue;
+ }
+
+ hlist_for_each_entry_from_rcu(seq, ns_list) {
+ spin_lock_bh(&seq->lock);
+ err = __tipc_nl_subseq_list(msg, seq, last_lower,
+ last_publ);
+
+ if (err) {
+ *last_type = seq->type;
+ spin_unlock_bh(&seq->lock);
+ return err;
+ }
+ spin_unlock_bh(&seq->lock);
+ }
+ *last_type = 0;
+ }
+ return 0;
+}
+
+int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ int done = cb->args[3];
+ u32 last_type = cb->args[0];
+ u32 last_lower = cb->args[1];
+ u32 last_publ = cb->args[2];
+ struct tipc_nl_msg msg;
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rcu_read_lock();
+ err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ);
+ if (!err) {
+ done = 1;
+ } else if (err != -EMSGSIZE) {
+ /* We never set seq or call nl_dump_check_consistent() this
+ * means that setting prev_seq here will cause the consistence
+ * check to fail in the netlink callback handler. Resulting in
+ * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
+ * we got an error.
+ */
+ cb->prev_seq = 1;
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = last_type;
+ cb->args[1] = last_lower;
+ cb->args[2] = last_publ;
+ cb->args[3] = done;
+
+ return skb->len;
}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index f02f48b9a216..5f0dee92010d 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -1,7 +1,7 @@
/*
* net/tipc/name_table.h: Include file for TIPC name table code
*
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2006, 2014, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -37,15 +37,15 @@
#ifndef _TIPC_NAME_TABLE_H
#define _TIPC_NAME_TABLE_H
-#include "node_subscr.h"
-
struct tipc_subscription;
struct tipc_port_list;
/*
* TIPC name types reserved for internal TIPC use (both current and planned)
*/
-#define TIPC_ZM_SRV 3 /* zone master service name type */
+#define TIPC_ZM_SRV 3 /* zone master service name type */
+#define TIPC_PUBL_SCOPE_NUM (TIPC_NODE_SCOPE + 1)
+#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
/**
* struct publication - info about a published (name or) name sequence
@@ -56,12 +56,13 @@ struct tipc_port_list;
* @node: network address of publishing port's node
* @ref: publishing port
* @key: publication key
- * @subscr: subscription to "node down" event (for off-node publications only)
+ * @nodesub_list: subscription to "node down" event (off-node publication only)
* @local_list: adjacent entries in list of publications made by this node
* @pport_list: adjacent entries in list of publications made by this port
* @node_list: adjacent matching name seq publications with >= node scope
* @cluster_list: adjacent matching name seq publications with >= cluster scope
* @zone_list: adjacent matching name seq publications with >= zone scope
+ * @rcu: RCU callback head used for deferred freeing
*
* Note that the node list, cluster list, and zone list are circular lists.
*/
@@ -73,16 +74,31 @@ struct publication {
u32 node;
u32 ref;
u32 key;
- struct tipc_node_subscr subscr;
+ struct list_head nodesub_list;
struct list_head local_list;
struct list_head pport_list;
struct list_head node_list;
struct list_head cluster_list;
struct list_head zone_list;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct name_table - table containing all existing port name publications
+ * @seq_hlist: name sequence hash lists
+ * @publ_list: pulication lists
+ * @local_publ_count: number of publications issued by this node
+ */
+struct name_table {
+ struct hlist_head seq_hlist[TIPC_NAMETBL_SIZE];
+ struct list_head publ_list[TIPC_PUBL_SCOPE_NUM];
+ u32 local_publ_count;
};
+extern spinlock_t tipc_nametbl_lock;
+extern struct name_table *tipc_nametbl;
-extern rwlock_t tipc_nametbl_lock;
+int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 93b9944a6a8b..cf13df3cde8f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -42,6 +42,11 @@
#include "node.h"
#include "config.h"
+static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+ [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
+};
+
/*
* The TIPC locking policy is designed to ensure a very fine locking
* granularity, permitting complete parallel access to individual
@@ -138,3 +143,104 @@ void tipc_net_stop(void)
pr_info("Left network mode\n");
}
+
+static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
+{
+ void *hdr;
+ struct nlattr *attrs;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ NLM_F_MULTI, TIPC_NL_NET_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_NET);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id))
+ goto attr_msg_full;
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ int done = cb->args[0];
+ struct tipc_nl_msg msg;
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ err = __tipc_nl_add_net(&msg);
+ if (err)
+ goto out;
+
+ done = 1;
+out:
+ cb->args[0] = done;
+
+ return skb->len;
+}
+
+int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_NET])
+ return -EINVAL;
+
+ err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
+ info->attrs[TIPC_NLA_NET],
+ tipc_nl_net_policy);
+ if (err)
+ return err;
+
+ if (attrs[TIPC_NLA_NET_ID]) {
+ u32 val;
+
+ /* Can't change net id once TIPC has joined a network */
+ if (tipc_own_addr)
+ return -EPERM;
+
+ val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
+ if (val < 1 || val > 9999)
+ return -EINVAL;
+
+ tipc_net_id = val;
+ }
+
+ if (attrs[TIPC_NLA_NET_ADDR]) {
+ u32 addr;
+
+ /* Can't change net addr once TIPC has joined a network */
+ if (tipc_own_addr)
+ return -EPERM;
+
+ addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
+ if (!tipc_addr_node_valid(addr))
+ return -EINVAL;
+
+ rtnl_lock();
+ tipc_net_start(addr);
+ rtnl_unlock();
+ }
+
+ return 0;
+}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 59ef3388be2c..a81c1b9eb150 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -1,7 +1,7 @@
/*
* net/tipc/net.h: Include file for TIPC network routing code
*
- * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 1995-2006, 2014, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -37,7 +37,13 @@
#ifndef _TIPC_NET_H
#define _TIPC_NET_H
+#include <net/genetlink.h>
+
int tipc_net_start(u32 addr);
+
void tipc_net_stop(void);
+int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
+
#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index ad844d365340..b891e3905bc4 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -1,7 +1,7 @@
/*
* net/tipc/netlink.c: TIPC configuration handling
*
- * Copyright (c) 2005-2006, Ericsson AB
+ * Copyright (c) 2005-2006, 2014, Ericsson AB
* Copyright (c) 2005-2007, Wind River Systems
* All rights reserved.
*
@@ -36,6 +36,12 @@
#include "core.h"
#include "config.h"
+#include "socket.h"
+#include "name_table.h"
+#include "bearer.h"
+#include "link.h"
+#include "node.h"
+#include "net.h"
#include <net/genetlink.h>
static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
@@ -68,6 +74,19 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
return 0;
}
+static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
+ [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TIPC_NLA_BEARER] = { .type = NLA_NESTED, },
+ [TIPC_NLA_SOCK] = { .type = NLA_NESTED, },
+ [TIPC_NLA_PUBL] = { .type = NLA_NESTED, },
+ [TIPC_NLA_LINK] = { .type = NLA_NESTED, },
+ [TIPC_NLA_MEDIA] = { .type = NLA_NESTED, },
+ [TIPC_NLA_NODE] = { .type = NLA_NESTED, },
+ [TIPC_NLA_NET] = { .type = NLA_NESTED, },
+ [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, }
+};
+
+/* Legacy ASCII API */
static struct genl_family tipc_genl_family = {
.id = GENL_ID_GENERATE,
.name = TIPC_GENL_NAME,
@@ -76,6 +95,7 @@ static struct genl_family tipc_genl_family = {
.maxattr = 0,
};
+/* Legacy ASCII API */
static struct genl_ops tipc_genl_ops[] = {
{
.cmd = TIPC_GENL_CMD,
@@ -83,12 +103,122 @@ static struct genl_ops tipc_genl_ops[] = {
},
};
+/* Users of the legacy API (tipc-config) can't handle that we add operations,
+ * so we have a separate genl handling for the new API.
+ */
+struct genl_family tipc_genl_v2_family = {
+ .id = GENL_ID_GENERATE,
+ .name = TIPC_GENL_V2_NAME,
+ .version = TIPC_GENL_V2_VERSION,
+ .hdrsize = 0,
+ .maxattr = TIPC_NLA_MAX,
+};
+
+static const struct genl_ops tipc_genl_v2_ops[] = {
+ {
+ .cmd = TIPC_NL_BEARER_DISABLE,
+ .doit = tipc_nl_bearer_disable,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_BEARER_ENABLE,
+ .doit = tipc_nl_bearer_enable,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_BEARER_GET,
+ .doit = tipc_nl_bearer_get,
+ .dumpit = tipc_nl_bearer_dump,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_BEARER_SET,
+ .doit = tipc_nl_bearer_set,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_SOCK_GET,
+ .dumpit = tipc_nl_sk_dump,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_PUBL_GET,
+ .dumpit = tipc_nl_publ_dump,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_LINK_GET,
+ .doit = tipc_nl_link_get,
+ .dumpit = tipc_nl_link_dump,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_LINK_SET,
+ .doit = tipc_nl_link_set,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_LINK_RESET_STATS,
+ .doit = tipc_nl_link_reset_stats,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_MEDIA_GET,
+ .doit = tipc_nl_media_get,
+ .dumpit = tipc_nl_media_dump,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_MEDIA_SET,
+ .doit = tipc_nl_media_set,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_NODE_GET,
+ .dumpit = tipc_nl_node_dump,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_NET_GET,
+ .dumpit = tipc_nl_net_dump,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_NET_SET,
+ .doit = tipc_nl_net_set,
+ .policy = tipc_nl_policy,
+ },
+ {
+ .cmd = TIPC_NL_NAME_TABLE_GET,
+ .dumpit = tipc_nl_name_table_dump,
+ .policy = tipc_nl_policy,
+ }
+};
+
+int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr)
+{
+ u32 maxattr = tipc_genl_v2_family.maxattr;
+
+ *attr = tipc_genl_v2_family.attrbuf;
+ if (!*attr)
+ return -EOPNOTSUPP;
+
+ return nlmsg_parse(nlh, GENL_HDRLEN, *attr, maxattr, tipc_nl_policy);
+}
+
int tipc_netlink_start(void)
{
int res;
res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops);
if (res) {
+ pr_err("Failed to register legacy interface\n");
+ return res;
+ }
+
+ res = genl_register_family_with_ops(&tipc_genl_v2_family,
+ tipc_genl_v2_ops);
+ if (res) {
pr_err("Failed to register netlink interface\n");
return res;
}
@@ -98,4 +228,5 @@ int tipc_netlink_start(void)
void tipc_netlink_stop(void)
{
genl_unregister_family(&tipc_genl_family);
+ genl_unregister_family(&tipc_genl_v2_family);
}
diff --git a/net/tipc/node_subscr.h b/net/tipc/netlink.h
index d91b8cc81e3d..1425c6869de0 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/netlink.h
@@ -1,8 +1,7 @@
/*
- * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
+ * net/tipc/netlink.h: Include file for TIPC netlink code
*
- * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * Copyright (c) 2014, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,30 +33,16 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _TIPC_NODE_SUBSCR_H
-#define _TIPC_NODE_SUBSCR_H
+#ifndef _TIPC_NETLINK_H
+#define _TIPC_NETLINK_H
-#include "addr.h"
+extern struct genl_family tipc_genl_v2_family;
+int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf);
-typedef void (*net_ev_handler) (void *usr_handle);
-
-/**
- * struct tipc_node_subscr - "node down" subscription entry
- * @node: ptr to node structure of interest (or NULL, if none)
- * @handle_node_down: routine to invoke when node fails
- * @usr_handle: argument to pass to routine when node fails
- * @nodesub_list: adjacent entries in list of subscriptions for the node
- */
-struct tipc_node_subscr {
- struct tipc_node *node;
- net_ev_handler handle_node_down;
- void *usr_handle;
- struct list_head nodesub_list;
+struct tipc_nl_msg {
+ struct sk_buff *skb;
+ u32 portid;
+ u32 seq;
};
-void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
- void *usr_handle, net_ev_handler handle_down);
-void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
-void tipc_nodesub_notify(struct list_head *nsub_list);
-
#endif
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 5781634e957d..8d353ec77a66 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -58,6 +58,12 @@ struct tipc_sock_conn {
struct list_head list;
};
+static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
+ [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
+ [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
+};
+
/*
* A trivial power-of-two bitmask technique is used for speed, since this
* operation is done for every incoming TIPC packet. The number of hash table
@@ -107,9 +113,10 @@ struct tipc_node *tipc_node_create(u32 addr)
spin_lock_init(&n_ptr->lock);
INIT_HLIST_NODE(&n_ptr->hash);
INIT_LIST_HEAD(&n_ptr->list);
- INIT_LIST_HEAD(&n_ptr->nsub);
+ INIT_LIST_HEAD(&n_ptr->publ_list);
INIT_LIST_HEAD(&n_ptr->conn_sks);
- __skb_queue_head_init(&n_ptr->waiting_sks);
+ skb_queue_head_init(&n_ptr->waiting_sks);
+ __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
@@ -375,8 +382,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.recv_permitted) {
- kfree_skb_list(n_ptr->bclink.deferred_head);
- n_ptr->bclink.deferred_size = 0;
+ __skb_queue_purge(&n_ptr->bclink.deferred_queue);
if (n_ptr->bclink.reasm_buf) {
kfree_skb(n_ptr->bclink.reasm_buf);
@@ -568,7 +574,7 @@ void tipc_node_unlock(struct tipc_node *node)
skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
if (flags & TIPC_NOTIFY_NODE_DOWN) {
- list_replace_init(&node->nsub, &nsub_list);
+ list_replace_init(&node->publ_list, &nsub_list);
list_replace_init(&node->conn_sks, &conn_sks);
}
node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
@@ -585,7 +591,7 @@ void tipc_node_unlock(struct tipc_node *node)
tipc_node_abort_sock_conns(&conn_sks);
if (!list_empty(&nsub_list))
- tipc_nodesub_notify(&nsub_list);
+ tipc_publ_notify(&nsub_list, addr);
if (flags & TIPC_WAKEUP_BCAST_USERS)
tipc_bclink_wakeup_users();
@@ -601,3 +607,93 @@ void tipc_node_unlock(struct tipc_node *node)
tipc_nametbl_withdraw(TIPC_LINK_STATE, addr,
link_id, addr);
}
+
+/* Caller should hold node lock for the passed node */
+static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
+{
+ void *hdr;
+ struct nlattr *attrs;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
+ NLM_F_MULTI, TIPC_NL_NODE_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
+ goto attr_msg_full;
+ if (tipc_node_is_up(node))
+ if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
+ goto attr_msg_full;
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ int done = cb->args[0];
+ int last_addr = cb->args[1];
+ struct tipc_node *node;
+ struct tipc_nl_msg msg;
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rcu_read_lock();
+
+ if (last_addr && !tipc_node_find(last_addr)) {
+ rcu_read_unlock();
+ /* We never set seq or call nl_dump_check_consistent() this
+ * means that setting prev_seq here will cause the consistence
+ * check to fail in the netlink callback handler. Resulting in
+ * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
+ * the node state changed while we released the lock.
+ */
+ cb->prev_seq = 1;
+ return -EPIPE;
+ }
+
+ list_for_each_entry_rcu(node, &tipc_node_list, list) {
+ if (last_addr) {
+ if (node->addr == last_addr)
+ last_addr = 0;
+ else
+ continue;
+ }
+
+ tipc_node_lock(node);
+ err = __tipc_nl_add_node(&msg, node);
+ if (err) {
+ last_addr = node->addr;
+ tipc_node_unlock(node);
+ goto out;
+ }
+
+ tipc_node_unlock(node);
+ }
+ done = 1;
+out:
+ cb->args[0] = done;
+ cb->args[1] = last_addr;
+ rcu_read_unlock();
+
+ return skb->len;
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 04e91458bb29..cbe0e950f1cc 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -1,7 +1,7 @@
/*
* net/tipc/node.h: Include file for TIPC node management routines
*
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2006, 2014, Ericsson AB
* Copyright (c) 2005, 2010-2014, Wind River Systems
* All rights reserved.
*
@@ -37,7 +37,6 @@
#ifndef _TIPC_NODE_H
#define _TIPC_NODE_H
-#include "node_subscr.h"
#include "addr.h"
#include "net.h"
#include "bearer.h"
@@ -72,9 +71,7 @@ enum {
* @last_in: sequence # of last in-sequence b'cast message received from node
* @last_sent: sequence # of last b'cast message sent by node
* @oos_state: state tracker for handling OOS b'cast messages
- * @deferred_size: number of OOS b'cast messages in deferred queue
- * @deferred_head: oldest OOS b'cast message received from node
- * @deferred_tail: newest OOS b'cast message received from node
+ * @deferred_queue: deferred queue saved OOS b'cast message received from node
* @reasm_buf: broadcast reassembly queue head from node
* @recv_permitted: true if node is allowed to receive b'cast messages
*/
@@ -84,8 +81,7 @@ struct tipc_node_bclink {
u32 last_sent;
u32 oos_state;
u32 deferred_size;
- struct sk_buff *deferred_head;
- struct sk_buff *deferred_tail;
+ struct sk_buff_head deferred_queue;
struct sk_buff *reasm_buf;
bool recv_permitted;
};
@@ -104,7 +100,7 @@ struct tipc_node_bclink {
* @link_cnt: number of links to node
* @signature: node instance identifier
* @link_id: local and remote bearer ids of changing link, if any
- * @nsub: list of "node down" subscriptions monitoring node
+ * @publ_list: list of publications
* @rcu: rcu struct for tipc_node
*/
struct tipc_node {
@@ -121,7 +117,7 @@ struct tipc_node {
int working_links;
u32 signature;
u32 link_id;
- struct list_head nsub;
+ struct list_head publ_list;
struct sk_buff_head waiting_sks;
struct list_head conn_sks;
struct rcu_head rcu;
@@ -145,6 +141,8 @@ void tipc_node_unlock(struct tipc_node *node);
int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port);
void tipc_node_remove_conn(u32 dnode, u32 port);
+int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
+
static inline void tipc_node_lock(struct tipc_node *node)
{
spin_lock_bh(&node->lock);
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
deleted file mode 100644
index 2d13eea8574a..000000000000
--- a/net/tipc/node_subscr.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * net/tipc/node_subscr.c: TIPC "node down" subscription handling
- *
- * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, 2010-2011, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "node_subscr.h"
-#include "node.h"
-
-/**
- * tipc_nodesub_subscribe - create "node down" subscription for specified node
- */
-void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
- void *usr_handle, net_ev_handler handle_down)
-{
- if (in_own_node(addr)) {
- node_sub->node = NULL;
- return;
- }
-
- node_sub->node = tipc_node_find(addr);
- if (!node_sub->node) {
- pr_warn("Node subscription rejected, unknown node 0x%x\n",
- addr);
- return;
- }
- node_sub->handle_node_down = handle_down;
- node_sub->usr_handle = usr_handle;
-
- tipc_node_lock(node_sub->node);
- list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
- tipc_node_unlock(node_sub->node);
-}
-
-/**
- * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
- */
-void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
-{
- if (!node_sub->node)
- return;
-
- tipc_node_lock(node_sub->node);
- list_del_init(&node_sub->nodesub_list);
- tipc_node_unlock(node_sub->node);
-}
-
-/**
- * tipc_nodesub_notify - notify subscribers that a node is unreachable
- *
- * Note: node is locked by caller
- */
-void tipc_nodesub_notify(struct list_head *nsub_list)
-{
- struct tipc_node_subscr *ns, *safe;
- net_ev_handler handle_node_down;
-
- list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
- handle_node_down = ns->handle_node_down;
- if (handle_node_down) {
- ns->handle_node_down = NULL;
- handle_node_down(ns->usr_handle);
- }
- }
-}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 51bddc236a15..4731cad99d1c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -121,6 +121,14 @@ static const struct proto_ops msg_ops;
static struct proto tipc_proto;
static struct proto tipc_proto_kern;
+static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
+ [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
+ [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
+ [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
+ [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
+};
+
/*
* Revised TIPC socket locking policy:
*
@@ -236,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk)
*/
static void tsk_rej_rx_queue(struct sock *sk)
{
- struct sk_buff *buf;
+ struct sk_buff *skb;
u32 dnode;
- while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
- if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
- tipc_link_xmit(buf, dnode, 0);
+ while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
+ if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
+ tipc_link_xmit_skb(skb, dnode, 0);
}
}
@@ -454,7 +462,7 @@ static int tipc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk;
- struct sk_buff *buf;
+ struct sk_buff *skb;
u32 dnode;
/*
@@ -473,11 +481,11 @@ static int tipc_release(struct socket *sock)
*/
dnode = tsk_peer_node(tsk);
while (sock->state != SS_DISCONNECTING) {
- buf = __skb_dequeue(&sk->sk_receive_queue);
- if (buf == NULL)
+ skb = __skb_dequeue(&sk->sk_receive_queue);
+ if (skb == NULL)
break;
- if (TIPC_SKB_CB(buf)->handle != NULL)
- kfree_skb(buf);
+ if (TIPC_SKB_CB(skb)->handle != NULL)
+ kfree_skb(skb);
else {
if ((sock->state == SS_CONNECTING) ||
(sock->state == SS_CONNECTED)) {
@@ -485,8 +493,8 @@ static int tipc_release(struct socket *sock)
tsk->connected = 0;
tipc_node_remove_conn(dnode, tsk->ref);
}
- if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
- tipc_link_xmit(buf, dnode, 0);
+ if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
+ tipc_link_xmit_skb(skb, dnode, 0);
}
}
@@ -494,12 +502,12 @@ static int tipc_release(struct socket *sock)
tipc_sk_ref_discard(tsk->ref);
k_cancel_timer(&tsk->timer);
if (tsk->connected) {
- buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
SHORT_H_SIZE, 0, dnode, tipc_own_addr,
tsk_peer_port(tsk),
tsk->ref, TIPC_ERR_NO_PORT);
- if (buf)
- tipc_link_xmit(buf, dnode, tsk->ref);
+ if (skb)
+ tipc_link_xmit_skb(skb, dnode, tsk->ref);
tipc_node_remove_conn(dnode, tsk->ref);
}
k_term_timer(&tsk->timer);
@@ -692,7 +700,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
* tipc_sendmcast - send multicast message
* @sock: socket structure
* @seq: destination address
- * @iov: message data to send
+ * @msg: message to send
* @dsz: total length of message data
* @timeo: timeout to wait for wakeup
*
@@ -700,11 +708,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
* Returns the number of bytes sent on success, or errno
*/
static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
- struct iovec *iov, size_t dsz, long timeo)
+ struct msghdr *msg, size_t dsz, long timeo)
{
struct sock *sk = sock->sk;
struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
- struct sk_buff *buf;
+ struct sk_buff_head head;
uint mtu;
int rc;
@@ -719,12 +727,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
new_mtu:
mtu = tipc_bclink_get_mtu();
- rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
+ __skb_queue_head_init(&head);
+ rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
if (unlikely(rc < 0))
return rc;
do {
- rc = tipc_bclink_xmit(buf);
+ rc = tipc_bclink_xmit(&head);
if (likely(rc >= 0)) {
rc = dsz;
break;
@@ -736,7 +745,7 @@ new_mtu:
tipc_sk(sk)->link_cong = 1;
rc = tipc_wait_for_sndmsg(sock, &timeo);
if (rc)
- kfree_skb_list(buf);
+ __skb_queue_purge(&head);
} while (!rc);
return rc;
}
@@ -818,39 +827,6 @@ exit:
return TIPC_OK;
}
-/**
- * dest_name_check - verify user is permitted to send to specified port name
- * @dest: destination address
- * @m: descriptor for message to be sent
- *
- * Prevents restricted configuration commands from being issued by
- * unauthorized users.
- *
- * Returns 0 if permission is granted, otherwise errno
- */
-static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
-{
- struct tipc_cfg_msg_hdr hdr;
-
- if (unlikely(dest->addrtype == TIPC_ADDR_ID))
- return 0;
- if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
- return 0;
- if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
- return 0;
- if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
- return -EACCES;
-
- if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
- return -EMSGSIZE;
- if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
- return -EFAULT;
- if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
- return -EACCES;
-
- return 0;
-}
-
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
{
struct sock *sk = sock->sk;
@@ -897,13 +873,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *mhdr = &tsk->phdr;
- struct iovec *iov = m->msg_iov;
u32 dnode, dport;
- struct sk_buff *buf;
+ struct sk_buff_head head;
+ struct sk_buff *skb;
struct tipc_name_seq *seq = &dest->addr.nameseq;
u32 mtu;
long timeo;
- int rc = -EINVAL;
+ int rc;
if (unlikely(!dest))
return -EDESTADDRREQ;
@@ -936,14 +912,11 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
tsk->conn_instance = dest->addr.name.name.instance;
}
}
- rc = dest_name_check(dest, m);
- if (rc)
- goto exit;
timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
if (dest->addrtype == TIPC_ADDR_MCAST) {
- rc = tipc_sendmcast(sock, seq, iov, dsz, timeo);
+ rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
goto exit;
} else if (dest->addrtype == TIPC_ADDR_NAME) {
u32 type = dest->addr.name.name.type;
@@ -974,13 +947,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
new_mtu:
mtu = tipc_node_get_mtu(dnode, tsk->ref);
- rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
+ __skb_queue_head_init(&head);
+ rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
if (rc < 0)
goto exit;
do {
- TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong;
- rc = tipc_link_xmit(buf, dnode, tsk->ref);
+ skb = skb_peek(&head);
+ TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
+ rc = tipc_link_xmit(&head, dnode, tsk->ref);
if (likely(rc >= 0)) {
if (sock->state != SS_READY)
sock->state = SS_CONNECTING;
@@ -994,7 +969,7 @@ new_mtu:
tsk->link_cong = 1;
rc = tipc_wait_for_sndmsg(sock, &timeo);
if (rc)
- kfree_skb_list(buf);
+ __skb_queue_purge(&head);
} while (!rc);
exit:
if (iocb)
@@ -1051,7 +1026,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *mhdr = &tsk->phdr;
- struct sk_buff *buf;
+ struct sk_buff_head head;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
u32 ref = tsk->ref;
int rc = -EINVAL;
@@ -1086,12 +1061,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
next:
mtu = tsk->max_pkt;
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
- rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf);
+ __skb_queue_head_init(&head);
+ rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
if (unlikely(rc < 0))
goto exit;
do {
if (likely(!tsk_conn_cong(tsk))) {
- rc = tipc_link_xmit(buf, dnode, ref);
+ rc = tipc_link_xmit(&head, dnode, ref);
if (likely(!rc)) {
tsk->sent_unacked++;
sent += send;
@@ -1109,7 +1085,7 @@ next:
}
rc = tipc_wait_for_sndpkt(sock, &timeo);
if (rc)
- kfree_skb_list(buf);
+ __skb_queue_purge(&head);
} while (!rc);
exit:
if (iocb)
@@ -1254,20 +1230,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
{
- struct sk_buff *buf = NULL;
+ struct sk_buff *skb = NULL;
struct tipc_msg *msg;
u32 peer_port = tsk_peer_port(tsk);
u32 dnode = tsk_peer_node(tsk);
if (!tsk->connected)
return;
- buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
+ skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
- if (!buf)
+ if (!skb)
return;
- msg = buf_msg(buf);
+ msg = buf_msg(skb);
msg_set_msgcnt(msg, ack);
- tipc_link_xmit(buf, dnode, msg_link_selector(msg));
+ tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
}
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1372,8 +1348,7 @@ restart:
sz = buf_len;
m->msg_flags |= MSG_TRUNC;
}
- res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
- m->msg_iov, sz);
+ res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
if (res)
goto exit;
res = sz;
@@ -1473,8 +1448,8 @@ restart:
needed = (buf_len - sz_copied);
sz_to_copy = (sz <= needed) ? sz : needed;
- res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
- m->msg_iov, sz_to_copy);
+ res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
+ m, sz_to_copy);
if (res)
goto exit;
@@ -1556,7 +1531,7 @@ static void tipc_data_ready(struct sock *sk)
* @tsk: TIPC socket
* @msg: message
*
- * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
*/
static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
{
@@ -1723,20 +1698,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
/**
* tipc_backlog_rcv - handle incoming message from backlog queue
* @sk: socket
- * @buf: message
+ * @skb: message
*
* Caller must hold socket lock, but not port lock.
*
* Returns 0
*/
-static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
+static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
int rc;
u32 onode;
struct tipc_sock *tsk = tipc_sk(sk);
- uint truesize = buf->truesize;
+ uint truesize = skb->truesize;
- rc = filter_rcv(sk, buf);
+ rc = filter_rcv(sk, skb);
if (likely(!rc)) {
if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
@@ -1744,25 +1719,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
return 0;
}
- if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc))
+ if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
return 0;
- tipc_link_xmit(buf, onode, 0);
+ tipc_link_xmit_skb(skb, onode, 0);
return 0;
}
/**
* tipc_sk_rcv - handle incoming message
- * @buf: buffer containing arriving message
+ * @skb: buffer containing arriving message
* Consumes buffer
* Returns 0 if success, or errno: -EHOSTUNREACH
*/
-int tipc_sk_rcv(struct sk_buff *buf)
+int tipc_sk_rcv(struct sk_buff *skb)
{
struct tipc_sock *tsk;
struct sock *sk;
- u32 dport = msg_destport(buf_msg(buf));
+ u32 dport = msg_destport(buf_msg(skb));
int rc = TIPC_OK;
uint limit;
u32 dnode;
@@ -1770,7 +1745,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
/* Validate destination and message */
tsk = tipc_sk_get(dport);
if (unlikely(!tsk)) {
- rc = tipc_msg_eval(buf, &dnode);
+ rc = tipc_msg_eval(skb, &dnode);
goto exit;
}
sk = &tsk->sk;
@@ -1779,12 +1754,12 @@ int tipc_sk_rcv(struct sk_buff *buf)
spin_lock_bh(&sk->sk_lock.slock);
if (!sock_owned_by_user(sk)) {
- rc = filter_rcv(sk, buf);
+ rc = filter_rcv(sk, skb);
} else {
if (sk->sk_backlog.len == 0)
atomic_set(&tsk->dupl_rcvcnt, 0);
- limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
- if (sk_add_backlog(sk, buf, limit))
+ limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
+ if (sk_add_backlog(sk, skb, limit))
rc = -TIPC_ERR_OVERLOAD;
}
spin_unlock_bh(&sk->sk_lock.slock);
@@ -1792,10 +1767,10 @@ int tipc_sk_rcv(struct sk_buff *buf)
if (likely(!rc))
return 0;
exit:
- if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc))
+ if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
return -EHOSTUNREACH;
- tipc_link_xmit(buf, dnode, 0);
+ tipc_link_xmit_skb(skb, dnode, 0);
return (rc < 0) ? -EHOSTUNREACH : 0;
}
@@ -2053,7 +2028,7 @@ static int tipc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct sk_buff *buf;
+ struct sk_buff *skb;
u32 dnode;
int res;
@@ -2068,23 +2043,23 @@ static int tipc_shutdown(struct socket *sock, int how)
restart:
/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
- buf = __skb_dequeue(&sk->sk_receive_queue);
- if (buf) {
- if (TIPC_SKB_CB(buf)->handle != NULL) {
- kfree_skb(buf);
+ skb = __skb_dequeue(&sk->sk_receive_queue);
+ if (skb) {
+ if (TIPC_SKB_CB(skb)->handle != NULL) {
+ kfree_skb(skb);
goto restart;
}
- if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN))
- tipc_link_xmit(buf, dnode, tsk->ref);
+ if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
+ tipc_link_xmit_skb(skb, dnode, tsk->ref);
tipc_node_remove_conn(dnode, tsk->ref);
} else {
dnode = tsk_peer_node(tsk);
- buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE,
0, dnode, tipc_own_addr,
tsk_peer_port(tsk),
tsk->ref, TIPC_CONN_SHUTDOWN);
- tipc_link_xmit(buf, dnode, tsk->ref);
+ tipc_link_xmit_skb(skb, dnode, tsk->ref);
}
tsk->connected = 0;
sock->state = SS_DISCONNECTING;
@@ -2113,7 +2088,7 @@ static void tipc_sk_timeout(unsigned long ref)
{
struct tipc_sock *tsk;
struct sock *sk;
- struct sk_buff *buf = NULL;
+ struct sk_buff *skb = NULL;
u32 peer_port, peer_node;
tsk = tipc_sk_get(ref);
@@ -2131,20 +2106,20 @@ static void tipc_sk_timeout(unsigned long ref)
if (tsk->probing_state == TIPC_CONN_PROBING) {
/* Previous probe not answered -> self abort */
- buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
SHORT_H_SIZE, 0, tipc_own_addr,
peer_node, ref, peer_port,
TIPC_ERR_NO_PORT);
} else {
- buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
+ skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
0, peer_node, tipc_own_addr,
peer_port, ref, TIPC_OK);
tsk->probing_state = TIPC_CONN_PROBING;
k_start_timer(&tsk->timer, tsk->probing_interval);
}
bh_unlock_sock(sk);
- if (buf)
- tipc_link_xmit(buf, peer_node, ref);
+ if (skb)
+ tipc_link_xmit_skb(skb, peer_node, ref);
exit:
tipc_sk_put(tsk);
}
@@ -2802,3 +2777,233 @@ void tipc_socket_stop(void)
sock_unregister(tipc_family_ops.family);
proto_unregister(&tipc_proto);
}
+
+/* Caller should hold socket lock for the passed tipc socket. */
+static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
+{
+ u32 peer_node;
+ u32 peer_port;
+ struct nlattr *nest;
+
+ peer_node = tsk_peer_node(tsk);
+ peer_port = tsk_peer_port(tsk);
+
+ nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
+
+ if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
+ goto msg_full;
+ if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
+ goto msg_full;
+
+ if (tsk->conn_type != 0) {
+ if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
+ goto msg_full;
+ if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
+ goto msg_full;
+ if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
+ goto msg_full;
+ }
+ nla_nest_end(skb, nest);
+
+ return 0;
+
+msg_full:
+ nla_nest_cancel(skb, nest);
+
+ return -EMSGSIZE;
+}
+
+/* Caller should hold socket lock for the passed tipc socket. */
+static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tipc_sock *tsk)
+{
+ int err;
+ void *hdr;
+ struct nlattr *attrs;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
+ if (!hdr)
+ goto msg_cancel;
+
+ attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
+ if (!attrs)
+ goto genlmsg_cancel;
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref))
+ goto attr_msg_cancel;
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
+ goto attr_msg_cancel;
+
+ if (tsk->connected) {
+ err = __tipc_nl_add_sk_con(skb, tsk);
+ if (err)
+ goto attr_msg_cancel;
+ } else if (!list_empty(&tsk->publications)) {
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
+ goto attr_msg_cancel;
+ }
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+
+attr_msg_cancel:
+ nla_nest_cancel(skb, attrs);
+genlmsg_cancel:
+ genlmsg_cancel(skb, hdr);
+msg_cancel:
+ return -EMSGSIZE;
+}
+
+int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ struct tipc_sock *tsk;
+ u32 prev_ref = cb->args[0];
+ u32 ref = prev_ref;
+
+ tsk = tipc_sk_get_next(&ref);
+ for (; tsk; tsk = tipc_sk_get_next(&ref)) {
+ lock_sock(&tsk->sk);
+ err = __tipc_nl_add_sk(skb, cb, tsk);
+ release_sock(&tsk->sk);
+ tipc_sk_put(tsk);
+ if (err)
+ break;
+
+ prev_ref = ref;
+ }
+
+ cb->args[0] = prev_ref;
+
+ return skb->len;
+}
+
+/* Caller should hold socket lock for the passed tipc socket. */
+static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct publication *publ)
+{
+ void *hdr;
+ struct nlattr *attrs;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
+ if (!hdr)
+ goto msg_cancel;
+
+ attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
+ if (!attrs)
+ goto genlmsg_cancel;
+
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
+ goto attr_msg_cancel;
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
+ goto attr_msg_cancel;
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
+ goto attr_msg_cancel;
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
+ goto attr_msg_cancel;
+
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+
+attr_msg_cancel:
+ nla_nest_cancel(skb, attrs);
+genlmsg_cancel:
+ genlmsg_cancel(skb, hdr);
+msg_cancel:
+ return -EMSGSIZE;
+}
+
+/* Caller should hold socket lock for the passed tipc socket. */
+static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct tipc_sock *tsk, u32 *last_publ)
+{
+ int err;
+ struct publication *p;
+
+ if (*last_publ) {
+ list_for_each_entry(p, &tsk->publications, pport_list) {
+ if (p->key == *last_publ)
+ break;
+ }
+ if (p->key != *last_publ) {
+ /* We never set seq or call nl_dump_check_consistent()
+ * this means that setting prev_seq here will cause the
+ * consistence check to fail in the netlink callback
+ * handler. Resulting in the last NLMSG_DONE message
+ * having the NLM_F_DUMP_INTR flag set.
+ */
+ cb->prev_seq = 1;
+ *last_publ = 0;
+ return -EPIPE;
+ }
+ } else {
+ p = list_first_entry(&tsk->publications, struct publication,
+ pport_list);
+ }
+
+ list_for_each_entry_from(p, &tsk->publications, pport_list) {
+ err = __tipc_nl_add_sk_publ(skb, cb, p);
+ if (err) {
+ *last_publ = p->key;
+ return err;
+ }
+ }
+ *last_publ = 0;
+
+ return 0;
+}
+
+int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ u32 tsk_ref = cb->args[0];
+ u32 last_publ = cb->args[1];
+ u32 done = cb->args[2];
+ struct tipc_sock *tsk;
+
+ if (!tsk_ref) {
+ struct nlattr **attrs;
+ struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
+
+ err = tipc_nlmsg_parse(cb->nlh, &attrs);
+ if (err)
+ return err;
+
+ err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
+ attrs[TIPC_NLA_SOCK],
+ tipc_nl_sock_policy);
+ if (err)
+ return err;
+
+ if (!sock[TIPC_NLA_SOCK_REF])
+ return -EINVAL;
+
+ tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
+ }
+
+ if (done)
+ return 0;
+
+ tsk = tipc_sk_get(tsk_ref);
+ if (!tsk)
+ return -EINVAL;
+
+ lock_sock(&tsk->sk);
+ err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
+ if (!err)
+ done = 1;
+ release_sock(&tsk->sk);
+ tipc_sk_put(tsk);
+
+ cb->args[0] = tsk_ref;
+ cb->args[1] = last_publ;
+ cb->args[2] = done;
+
+ return skb->len;
+}
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index baa43d03901e..d34089387006 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -36,6 +36,7 @@
#define _TIPC_SOCK_H
#include <net/sock.h>
+#include <net/genetlink.h>
#define TIPC_CONNACK_INTV 256
#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
@@ -47,5 +48,7 @@ void tipc_sk_mcast_rcv(struct sk_buff *buf);
void tipc_sk_reinit(void);
int tipc_sk_ref_table_init(u32 requested_size, u32 start);
void tipc_sk_ref_table_stop(void);
+int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
#endif
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 31b5cb232a43..0344206b984f 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -305,7 +305,6 @@ static int subscr_subscribe(struct tipc_subscr *s,
kfree(sub);
return -EINVAL;
}
- INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
sub->subscriber = subscriber;
sub->swap = swap;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e96884380732..8e1b10274b02 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1516,7 +1516,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
skb_put(skb, len - data_len);
skb->data_len = data_len;
skb->len = len;
- err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
+ err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
if (err)
goto out_free;
@@ -1694,8 +1694,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
skb_put(skb, size - data_len);
skb->data_len = data_len;
skb->len = size;
- err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
- sent, size);
+ err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
if (err) {
kfree_skb(skb);
goto out_err;
@@ -1825,7 +1824,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
else if (size < skb->len - skip)
msg->msg_flags |= MSG_TRUNC;
- err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
+ err = skb_copy_datagram_msg(skb, skip, msg, size);
if (err)
goto out_free;
@@ -2030,8 +2029,8 @@ again:
}
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
- if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
- msg->msg_iov, chunk)) {
+ if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
+ msg, chunk)) {
if (copied == 0)
copied = -EFAULT;
break;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 85d232bed87d..1d0e39c9a3e2 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1013,7 +1013,7 @@ static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out;
}
- err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len);
+ err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
out:
release_sock(sk);
@@ -1617,7 +1617,7 @@ static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
*/
written = transport->stream_enqueue(
- vsk, msg->msg_iov,
+ vsk, msg,
len - total_written);
if (written < 0) {
err = -ENOMEM;
@@ -1739,7 +1739,7 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
break;
read = transport->stream_dequeue(
- vsk, msg->msg_iov,
+ vsk, msg,
len - copied, flags);
if (read < 0) {
err = -ENOMEM;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 9bb63ffec4f2..02d2e5229240 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1697,7 +1697,7 @@ static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
static int vmci_transport_dgram_enqueue(
struct vsock_sock *vsk,
struct sockaddr_vm *remote_addr,
- struct iovec *iov,
+ struct msghdr *msg,
size_t len)
{
int err;
@@ -1714,7 +1714,7 @@ static int vmci_transport_dgram_enqueue(
if (!dg)
return -ENOMEM;
- memcpy_fromiovec(VMCI_DG_PAYLOAD(dg), iov, len);
+ memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
dg->dst = vmci_make_handle(remote_addr->svm_cid,
remote_addr->svm_port);
@@ -1773,8 +1773,7 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
}
/* Place the datagram payload in the user's iovec. */
- err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,
- payload_len);
+ err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len);
if (err)
goto out;
@@ -1836,22 +1835,23 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
static ssize_t vmci_transport_stream_dequeue(
struct vsock_sock *vsk,
- struct iovec *iov,
+ struct msghdr *msg,
size_t len,
int flags)
{
if (flags & MSG_PEEK)
- return vmci_qpair_peekv(vmci_trans(vsk)->qpair, iov, len, 0);
+ return vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg, len, 0);
else
- return vmci_qpair_dequev(vmci_trans(vsk)->qpair, iov, len, 0);
+ return vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg, len, 0);
}
static ssize_t vmci_transport_stream_enqueue(
struct vsock_sock *vsk,
- struct iovec *iov,
+ struct msghdr *msg,
size_t len)
{
- return vmci_qpair_enquev(vmci_trans(vsk)->qpair, iov, len, 0);
+ /* XXX: stripping const */
+ return vmci_qpair_enquev(vmci_trans(vsk)->qpair, (struct iovec *)msg->msg_iter.iov, len, 0);
}
static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 29c8675f9a11..22ba971741e5 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -175,7 +175,7 @@ config CFG80211_INTERNAL_REGDB
Most distributions have a CRDA package. So if unsure, say N.
config CFG80211_WEXT
- bool "cfg80211 wireless extensions compatibility"
+ bool
depends on CFG80211
select WEXT_CORE
help
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index a761670af31d..4c9e39f04ef8 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_WEXT_SPY) += wext-spy.o
obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
-cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o
+cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 72d81e2154d5..7aaf7415dc4c 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -115,7 +115,7 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
EXPORT_SYMBOL(cfg80211_chandef_valid);
static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
- int *pri40, int *pri80)
+ u32 *pri40, u32 *pri80)
{
int tmp;
@@ -366,6 +366,7 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
break;
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
@@ -602,7 +603,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
- u32 width, control_freq;
+ u32 width, control_freq, cap;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
@@ -642,7 +643,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
+ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
case NL80211_CHAN_WIDTH_80:
if (!vht_cap->vht_supported)
@@ -653,7 +655,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
case NL80211_CHAN_WIDTH_160:
if (!vht_cap->vht_supported)
return false;
- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
+ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
+ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
width = 160;
@@ -892,6 +896,13 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
*radar_detect |= BIT(wdev->chandef.width);
}
return;
+ case NL80211_IFTYPE_OCB:
+ if (wdev->chandef.chan) {
+ *chan = wdev->chandef.chan;
+ *chanmode = CHAN_MODE_SHARED;
+ return;
+ }
+ break;
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
diff --git a/net/wireless/core.c b/net/wireless/core.c
index f52a4cd7017c..53dda7728f86 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -86,11 +86,11 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
return &rdev->wiphy;
}
-int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
- char *newname)
+static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev,
+ const char *newname)
{
struct cfg80211_registered_device *rdev2;
- int wiphy_idx, taken = -1, result, digits;
+ int wiphy_idx, taken = -1, digits;
ASSERT_RTNL();
@@ -109,15 +109,28 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
+ /* Ensure another device does not already have this name. */
+ list_for_each_entry(rdev2, &cfg80211_rdev_list, list)
+ if (strcmp(newname, wiphy_name(&rdev2->wiphy)) == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
+ char *newname)
+{
+ int result;
+
+ ASSERT_RTNL();
/* Ignore nop renames */
- if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0)
+ if (strcmp(newname, wiphy_name(&rdev->wiphy)) == 0)
return 0;
- /* Ensure another device does not already have this name. */
- list_for_each_entry(rdev2, &cfg80211_rdev_list, list)
- if (strcmp(newname, dev_name(&rdev2->wiphy.dev)) == 0)
- return -EINVAL;
+ result = cfg80211_dev_check_name(rdev, newname);
+ if (result < 0)
+ return result;
result = device_rename(&rdev->wiphy.dev, newname);
if (result)
@@ -309,7 +322,8 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work)
/* exported functions */
-struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
+struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ const char *requested_name)
{
static atomic_t wiphy_counter = ATOMIC_INIT(0);
@@ -346,7 +360,31 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
rdev->wiphy_idx--;
/* give it a proper name */
- dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
+ if (requested_name && requested_name[0]) {
+ int rv;
+
+ rtnl_lock();
+ rv = cfg80211_dev_check_name(rdev, requested_name);
+
+ if (rv < 0) {
+ rtnl_unlock();
+ goto use_default_name;
+ }
+
+ rv = dev_set_name(&rdev->wiphy.dev, "%s", requested_name);
+ rtnl_unlock();
+ if (rv)
+ goto use_default_name;
+ } else {
+use_default_name:
+ /* NOTE: This is *probably* safe w/out holding rtnl because of
+ * the restrictions on phy names. Probably this call could
+ * fail if some other part of the kernel (re)named a device
+ * phyX. But, might should add some locking and check return
+ * value, and use a different name if this one exists?
+ */
+ dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
+ }
INIT_LIST_HEAD(&rdev->wdev_list);
INIT_LIST_HEAD(&rdev->beacon_registrations);
@@ -406,7 +444,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
return &rdev->wiphy;
}
-EXPORT_SYMBOL(wiphy_new);
+EXPORT_SYMBOL(wiphy_new_nm);
static int wiphy_verify_combinations(struct wiphy *wiphy)
{
@@ -503,6 +541,24 @@ int wiphy_register(struct wiphy *wiphy)
!wiphy->wowlan->tcp))
return -EINVAL;
#endif
+ if (WARN_ON((wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) &&
+ (!rdev->ops->tdls_channel_switch ||
+ !rdev->ops->tdls_cancel_channel_switch)))
+ return -EINVAL;
+
+ /*
+ * if a wiphy has unsupported modes for regulatory channel enforcement,
+ * opt-out of enforcement checking
+ */
+ if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_AP_VLAN) |
+ BIT(NL80211_IFTYPE_MONITOR)))
+ wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF;
if (WARN_ON(wiphy->coalesce &&
(!wiphy->coalesce->n_rules ||
@@ -831,7 +887,22 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_P2P_GO:
__cfg80211_stop_ap(rdev, dev, true);
break;
- default:
+ case NL80211_IFTYPE_OCB:
+ __cfg80211_leave_ocb(rdev, dev);
+ break;
+ case NL80211_IFTYPE_WDS:
+ /* must be handled by mac80211/driver, has no APIs */
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /* cannot happen, has no netdev */
+ break;
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MONITOR:
+ /* nothing to do */
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NUM_NL80211_IFTYPES:
+ /* invalid */
break;
}
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 7e3a3cef7df9..faa5b1609aae 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -111,6 +111,7 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
rdev->wiphy.wowlan_config->tcp->sock)
sock_release(rdev->wiphy.wowlan_config->tcp->sock);
kfree(rdev->wiphy.wowlan_config->tcp);
+ kfree(rdev->wiphy.wowlan_config->nd_config);
kfree(rdev->wiphy.wowlan_config);
#endif
}
@@ -290,6 +291,18 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
struct cfg80211_chan_def *chandef);
+/* OCB */
+int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ocb_setup *setup);
+int cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ocb_setup *setup);
+int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev);
+int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev);
+
/* AP */
int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5839c85075f1..7ca4b5133123 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -209,7 +209,7 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
}
/* policy for the attributes */
-static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
+static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
.len = 20-1 },
@@ -388,13 +388,14 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
[NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
[NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
- [NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG },
+ [NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG },
[NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY },
[NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG },
[NL80211_ATTR_TSID] = { .type = NLA_U8 },
[NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 },
[NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
[NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
+ [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
};
/* policy for the key attributes */
@@ -428,6 +429,7 @@ nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = {
[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG },
[NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG },
[NL80211_WOWLAN_TRIG_TCP_CONNECTION] = { .type = NLA_NESTED },
+ [NL80211_WOWLAN_TRIG_NET_DETECT] = { .type = NLA_NESTED },
};
static const struct nla_policy
@@ -884,7 +886,12 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
if (!wdev->current_bss)
return -ENOLINK;
break;
- default:
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_OCB:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_WDS:
+ case NUM_NL80211_IFTYPES:
return -EINVAL;
}
@@ -1083,6 +1090,8 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
if (large && nl80211_send_wowlan_tcp_caps(rdev, msg))
return -ENOBUFS;
+ /* TODO: send wowlan net detect */
+
nla_nest_end(msg, nl_wowlan);
return 0;
@@ -1514,8 +1523,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
CMD(channel_switch, CHANNEL_SWITCH);
CMD(set_qos_map, SET_QOS_MAP);
- if (rdev->wiphy.flags &
- WIPHY_FLAG_SUPPORTS_WMM_ADMISSION)
+ if (rdev->wiphy.features &
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)
CMD(add_tx_ts, ADD_TX_TS);
}
/* add into the if now */
@@ -2308,7 +2317,8 @@ static inline u64 wdev_id(struct wireless_dev *wdev)
static int nl80211_send_chandef(struct sk_buff *msg,
const struct cfg80211_chan_def *chandef)
{
- WARN_ON(!cfg80211_chandef_valid(chandef));
+ if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+ return -EINVAL;
if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
chandef->chan->center_freq))
@@ -2336,12 +2346,16 @@ static int nl80211_send_chandef(struct sk_buff *msg,
static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev)
+ struct wireless_dev *wdev, bool removal)
{
struct net_device *dev = wdev->netdev;
+ u8 cmd = NL80211_CMD_NEW_INTERFACE;
void *hdr;
- hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_INTERFACE);
+ if (removal)
+ cmd = NL80211_CMD_DEL_INTERFACE;
+
+ hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -1;
@@ -2408,7 +2422,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
}
if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- rdev, wdev) < 0) {
+ rdev, wdev, false) < 0) {
goto out;
}
if_idx++;
@@ -2436,7 +2450,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
- rdev, wdev) < 0) {
+ rdev, wdev, false) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -2582,7 +2596,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct vif_params params;
struct wireless_dev *wdev;
- struct sk_buff *msg;
+ struct sk_buff *msg, *event;
int err;
enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
u32 flags;
@@ -2605,7 +2619,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
!(rdev->wiphy.interface_modes & (1 << type)))
return -EOPNOTSUPP;
- if (type == NL80211_IFTYPE_P2P_DEVICE && info->attrs[NL80211_ATTR_MAC]) {
+ if ((type == NL80211_IFTYPE_P2P_DEVICE ||
+ rdev->wiphy.features & NL80211_FEATURE_MAC_ON_CREATE) &&
+ info->attrs[NL80211_ATTR_MAC]) {
nla_memcpy(params.macaddr, info->attrs[NL80211_ATTR_MAC],
ETH_ALEN);
if (!is_valid_ether_addr(params.macaddr))
@@ -2634,12 +2650,15 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
wdev = rdev_add_virtual_intf(rdev,
nla_data(info->attrs[NL80211_ATTR_IFNAME]),
type, err ? NULL : &flags, &params);
- if (IS_ERR(wdev)) {
+ if (WARN_ON(!wdev)) {
+ nlmsg_free(msg);
+ return -EPROTO;
+ } else if (IS_ERR(wdev)) {
nlmsg_free(msg);
return PTR_ERR(wdev);
}
- if (info->attrs[NL80211_ATTR_IFACE_SOCKET_OWNER])
+ if (info->attrs[NL80211_ATTR_SOCKET_OWNER])
wdev->owner_nlportid = info->snd_portid;
switch (type) {
@@ -2675,11 +2694,25 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
}
if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
- rdev, wdev) < 0) {
+ rdev, wdev, false) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
+ event = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (event) {
+ if (nl80211_send_iface(event, 0, 0, 0,
+ rdev, wdev, false) < 0) {
+ nlmsg_free(event);
+ goto out;
+ }
+
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy),
+ event, 0, NL80211_MCGRP_CONFIG,
+ GFP_KERNEL);
+ }
+
+out:
return genlmsg_reply(msg, info);
}
@@ -2687,10 +2720,18 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct wireless_dev *wdev = info->user_ptr[1];
+ struct sk_buff *msg;
+ int status;
if (!rdev->ops->del_virtual_intf)
return -EOPNOTSUPP;
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (msg && nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, true) < 0) {
+ nlmsg_free(msg);
+ msg = NULL;
+ }
+
/*
* If we remove a wireless device without a netdev then clear
* user_ptr[1] so that nl80211_post_doit won't dereference it
@@ -2701,7 +2742,15 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
if (!wdev->netdev)
info->user_ptr[1] = NULL;
- return rdev_del_virtual_intf(rdev, wdev);
+ status = rdev_del_virtual_intf(rdev, wdev);
+ if (status >= 0 && msg)
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy),
+ msg, 0, NL80211_MCGRP_CONFIG,
+ GFP_KERNEL);
+ else
+ nlmsg_free(msg);
+
+ return status;
}
static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
@@ -4398,10 +4447,12 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
- u8 *mac_addr = NULL;
+ struct station_del_parameters params;
+
+ memset(&params, 0, sizeof(params));
if (info->attrs[NL80211_ATTR_MAC])
- mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]);
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
@@ -4412,7 +4463,28 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->del_station)
return -EOPNOTSUPP;
- return rdev_del_station(rdev, dev, mac_addr);
+ if (info->attrs[NL80211_ATTR_MGMT_SUBTYPE]) {
+ params.subtype =
+ nla_get_u8(info->attrs[NL80211_ATTR_MGMT_SUBTYPE]);
+ if (params.subtype != IEEE80211_STYPE_DISASSOC >> 4 &&
+ params.subtype != IEEE80211_STYPE_DEAUTH >> 4)
+ return -EINVAL;
+ } else {
+ /* Default to Deauthentication frame */
+ params.subtype = IEEE80211_STYPE_DEAUTH >> 4;
+ }
+
+ if (info->attrs[NL80211_ATTR_REASON_CODE]) {
+ params.reason_code =
+ nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
+ if (params.reason_code == 0)
+ return -EINVAL; /* 0 is reserved */
+ } else {
+ /* Default to reason code 2 */
+ params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID;
+ }
+
+ return rdev_del_station(rdev, dev, &params);
}
static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
@@ -4423,7 +4495,7 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
void *hdr;
struct nlattr *pinfoattr;
- hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION);
+ hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_MPATH);
if (!hdr)
return -1;
@@ -4624,6 +4696,96 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
return rdev_del_mpath(rdev, dev, dst);
}
+static int nl80211_get_mpp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ int err;
+ struct net_device *dev = info->user_ptr[1];
+ struct mpath_info pinfo;
+ struct sk_buff *msg;
+ u8 *dst = NULL;
+ u8 mpp[ETH_ALEN];
+
+ memset(&pinfo, 0, sizeof(pinfo));
+
+ if (!info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ dst = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (!rdev->ops->get_mpp)
+ return -EOPNOTSUPP;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+ return -EOPNOTSUPP;
+
+ err = rdev_get_mpp(rdev, dev, dst, mpp, &pinfo);
+ if (err)
+ return err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0,
+ dev, dst, mpp, &pinfo) < 0) {
+ nlmsg_free(msg);
+ return -ENOBUFS;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int nl80211_dump_mpp(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct mpath_info pinfo;
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+ u8 dst[ETH_ALEN];
+ u8 mpp[ETH_ALEN];
+ int path_idx = cb->args[2];
+ int err;
+
+ err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ if (err)
+ return err;
+
+ if (!rdev->ops->dump_mpp) {
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
+ if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) {
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
+ while (1) {
+ err = rdev_dump_mpp(rdev, wdev->netdev, path_idx, dst,
+ mpp, &pinfo);
+ if (err == -ENOENT)
+ break;
+ if (err)
+ goto out_err;
+
+ if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ wdev->netdev, dst, mpp,
+ &pinfo) < 0)
+ goto out;
+
+ path_idx++;
+ }
+
+ out:
+ cb->args[2] = path_idx;
+ err = skb->len;
+ out_err:
+ nl80211_finish_wdev_dump(rdev);
+ return err;
+}
+
static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -5260,11 +5422,11 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
struct nlattr *nl_reg_rule;
- char *alpha2 = NULL;
- int rem_reg_rules = 0, r = 0;
+ char *alpha2;
+ int rem_reg_rules, r;
u32 num_rules = 0, rule_idx = 0, size_of_regd;
enum nl80211_dfs_regions dfs_region = NL80211_DFS_UNSET;
- struct ieee80211_regdomain *rd = NULL;
+ struct ieee80211_regdomain *rd;
if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
return -EINVAL;
@@ -5358,6 +5520,43 @@ static int validate_scan_freqs(struct nlattr *freqs)
return n_channels;
}
+static int nl80211_parse_random_mac(struct nlattr **attrs,
+ u8 *mac_addr, u8 *mac_addr_mask)
+{
+ int i;
+
+ if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) {
+ memset(mac_addr, 0, ETH_ALEN);
+ memset(mac_addr_mask, 0, ETH_ALEN);
+ mac_addr[0] = 0x2;
+ mac_addr_mask[0] = 0x3;
+
+ return 0;
+ }
+
+ /* need both or none */
+ if (!attrs[NL80211_ATTR_MAC] || !attrs[NL80211_ATTR_MAC_MASK])
+ return -EINVAL;
+
+ memcpy(mac_addr, nla_data(attrs[NL80211_ATTR_MAC]), ETH_ALEN);
+ memcpy(mac_addr_mask, nla_data(attrs[NL80211_ATTR_MAC_MASK]), ETH_ALEN);
+
+ /* don't allow or configure an mcast address */
+ if (!is_multicast_ether_addr(mac_addr_mask) ||
+ is_multicast_ether_addr(mac_addr))
+ return -EINVAL;
+
+ /*
+ * allow users to pass a MAC address that has bits set outside
+ * of the mask, but don't bother drivers with having to deal
+ * with such bits
+ */
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] &= mac_addr_mask[i];
+
+ return 0;
+}
+
static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -5535,6 +5734,25 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
err = -EOPNOTSUPP;
goto out_free;
}
+
+ if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ if (!(wiphy->features &
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR)) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ if (wdev->current_bss) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ err = nl80211_parse_random_mac(info->attrs,
+ request->mac_addr,
+ request->mac_addr_mask);
+ if (err)
+ goto out_free;
+ }
}
request->no_cck =
@@ -5561,14 +5779,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
return err;
}
-static int nl80211_start_sched_scan(struct sk_buff *skb,
- struct genl_info *info)
+static struct cfg80211_sched_scan_request *
+nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct nlattr **attrs)
{
struct cfg80211_sched_scan_request *request;
- struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
struct nlattr *attr;
- struct wiphy *wiphy;
int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
u32 interval;
enum ieee80211_band band;
@@ -5576,38 +5792,32 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
- if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
- !rdev->ops->sched_scan_start)
- return -EOPNOTSUPP;
-
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
+ if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE]))
+ return ERR_PTR(-EINVAL);
- if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
- return -EINVAL;
+ if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+ return ERR_PTR(-EINVAL);
- interval = nla_get_u32(info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
+ interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
if (interval == 0)
- return -EINVAL;
-
- wiphy = &rdev->wiphy;
+ return ERR_PTR(-EINVAL);
- if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
+ if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
- info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
+ attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
if (!n_channels)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
} else {
n_channels = ieee80211_get_num_supported_channels(wiphy);
}
- if (info->attrs[NL80211_ATTR_SCAN_SSIDS])
- nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
+ if (attrs[NL80211_ATTR_SCAN_SSIDS])
+ nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS],
tmp)
n_ssids++;
if (n_ssids > wiphy->max_sched_scan_ssids)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
/*
* First, count the number of 'real' matchsets. Due to an issue with
@@ -5618,9 +5828,9 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
* older userspace that treated a matchset with only the RSSI as the
* global RSSI for all other matchsets - if there are other matchsets.
*/
- if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
+ if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
nla_for_each_nested(attr,
- info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+ attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
tmp) {
struct nlattr *rssi;
@@ -5628,7 +5838,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
nla_data(attr), nla_len(attr),
nl80211_match_policy);
if (err)
- return err;
+ return ERR_PTR(err);
/* add other standalone attributes here */
if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) {
n_match_sets++;
@@ -5645,30 +5855,23 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
n_match_sets = 1;
if (n_match_sets > wiphy->max_match_sets)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- if (info->attrs[NL80211_ATTR_IE])
- ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ if (attrs[NL80211_ATTR_IE])
+ ie_len = nla_len(attrs[NL80211_ATTR_IE]);
else
ie_len = 0;
if (ie_len > wiphy->max_sched_scan_ie_len)
- return -EINVAL;
-
- if (rdev->sched_scan_req) {
- err = -EINPROGRESS;
- goto out;
- }
+ return ERR_PTR(-EINVAL);
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->match_sets) * n_match_sets
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
- if (!request) {
- err = -ENOMEM;
- goto out;
- }
+ if (!request)
+ return ERR_PTR(-ENOMEM);
if (n_ssids)
request->ssids = (void *)&request->channels[n_channels];
@@ -5693,10 +5896,10 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
request->n_match_sets = n_match_sets;
i = 0;
- if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
+ if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
nla_for_each_nested(attr,
- info->attrs[NL80211_ATTR_SCAN_FREQUENCIES],
+ attrs[NL80211_ATTR_SCAN_FREQUENCIES],
tmp) {
struct ieee80211_channel *chan;
@@ -5742,8 +5945,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
request->n_channels = i;
i = 0;
- if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
- nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
+ if (attrs[NL80211_ATTR_SCAN_SSIDS]) {
+ nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) {
if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
@@ -5757,9 +5960,9 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
}
i = 0;
- if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
+ if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
nla_for_each_nested(attr,
- info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+ attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
tmp) {
struct nlattr *ssid, *rssi;
@@ -5799,7 +6002,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
}
/* there was no other matchset, so the RSSI one is alone */
- if (i == 0)
+ if (i == 0 && n_match_sets)
request->match_sets[0].rssi_thold = default_match_rssi;
request->min_rssi_thold = INT_MAX;
@@ -5814,36 +6017,88 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (ie_len) {
request->ie_len = ie_len;
memcpy((void *)request->ie,
- nla_data(info->attrs[NL80211_ATTR_IE]),
+ nla_data(attrs[NL80211_ATTR_IE]),
request->ie_len);
}
- if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
+ if (attrs[NL80211_ATTR_SCAN_FLAGS]) {
request->flags = nla_get_u32(
- info->attrs[NL80211_ATTR_SCAN_FLAGS]);
+ attrs[NL80211_ATTR_SCAN_FLAGS]);
if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
!(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) {
err = -EOPNOTSUPP;
goto out_free;
}
+
+ if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ u32 flg = NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR;
+
+ if (!wdev) /* must be net-detect */
+ flg = NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+
+ if (!(wiphy->features & flg)) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ if (wdev && wdev->current_bss) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ err = nl80211_parse_random_mac(attrs, request->mac_addr,
+ request->mac_addr_mask);
+ if (err)
+ goto out_free;
+ }
}
- request->dev = dev;
- request->wiphy = &rdev->wiphy;
request->interval = interval;
request->scan_start = jiffies;
- err = rdev_sched_scan_start(rdev, dev, request);
- if (!err) {
- rdev->sched_scan_req = request;
- nl80211_send_sched_scan(rdev, dev,
- NL80211_CMD_START_SCHED_SCAN);
- goto out;
- }
+ return request;
out_free:
kfree(request);
-out:
+ return ERR_PTR(err);
+}
+
+static int nl80211_start_sched_scan(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
+ !rdev->ops->sched_scan_start)
+ return -EOPNOTSUPP;
+
+ if (rdev->sched_scan_req)
+ return -EINPROGRESS;
+
+ rdev->sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev,
+ info->attrs);
+ err = PTR_ERR_OR_ZERO(rdev->sched_scan_req);
+ if (err)
+ goto out_err;
+
+ err = rdev_sched_scan_start(rdev, dev, rdev->sched_scan_req);
+ if (err)
+ goto out_free;
+
+ rdev->sched_scan_req->dev = dev;
+ rdev->sched_scan_req->wiphy = &rdev->wiphy;
+
+ nl80211_send_sched_scan(rdev, dev,
+ NL80211_CMD_START_SCHED_SCAN);
+ return 0;
+
+out_free:
+ kfree(rdev->sched_scan_req);
+out_err:
+ rdev->sched_scan_req = NULL;
return err;
}
@@ -5923,7 +6178,6 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
* function is called under RTNL lock, so this should not be a problem.
*/
static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
- u8 radar_detect_width = 0;
int err;
bool need_new_beacon = false;
int len, i;
@@ -6059,10 +6313,8 @@ skip_beacons:
if (err < 0)
return err;
- if (err > 0) {
- radar_detect_width = BIT(params.chandef.width);
+ if (err > 0)
params.radar_required = true;
- }
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
@@ -6311,8 +6563,6 @@ static int nl80211_dump_survey(struct sk_buff *skb,
}
while (1) {
- struct ieee80211_channel *chan;
-
res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey);
if (res == -ENOENT)
break;
@@ -6325,9 +6575,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
goto out;
}
- chan = ieee80211_get_channel(&rdev->wiphy,
- survey.channel->center_freq);
- if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
+ if (survey.channel->flags & IEEE80211_CHAN_DISABLED) {
survey_idx++;
continue;
}
@@ -8159,6 +8407,28 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+static int nl80211_join_ocb(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct ocb_setup setup = {};
+ int err;
+
+ err = nl80211_parse_chandef(rdev, info, &setup.chandef);
+ if (err)
+ return err;
+
+ return cfg80211_join_ocb(rdev, dev, &setup);
+}
+
+static int nl80211_leave_ocb(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+
+ return cfg80211_leave_ocb(rdev, dev);
+}
+
static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8542,6 +8812,39 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
return 0;
}
+static int nl80211_parse_wowlan_nd(struct cfg80211_registered_device *rdev,
+ const struct wiphy_wowlan_support *wowlan,
+ struct nlattr *attr,
+ struct cfg80211_wowlan *trig)
+{
+ struct nlattr **tb;
+ int err;
+
+ tb = kzalloc(NUM_NL80211_ATTR * sizeof(*tb), GFP_KERNEL);
+ if (!tb)
+ return -ENOMEM;
+
+ if (!(wowlan->flags & WIPHY_WOWLAN_NET_DETECT)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = nla_parse(tb, NL80211_ATTR_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_policy);
+ if (err)
+ goto out;
+
+ trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb);
+ err = PTR_ERR_OR_ZERO(trig->nd_config);
+ if (err)
+ trig->nd_config = NULL;
+
+out:
+ kfree(tb);
+ return err;
+}
+
static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8687,6 +8990,14 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
goto error;
}
+ if (tb[NL80211_WOWLAN_TRIG_NET_DETECT]) {
+ err = nl80211_parse_wowlan_nd(
+ rdev, wowlan, tb[NL80211_WOWLAN_TRIG_NET_DETECT],
+ &new_triggers);
+ if (err)
+ goto error;
+ }
+
ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
if (!ntrig) {
err = -ENOMEM;
@@ -9444,7 +9755,7 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info)
u16 admitted_time = 0;
int err;
- if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_WMM_ADMISSION))
+ if (!(rdev->wiphy.features & NL80211_FEATURE_SUPPORTS_WMM_ADMISSION))
return -EOPNOTSUPP;
if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC] ||
@@ -9460,12 +9771,10 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
/* WMM uses TIDs 0-7 even for TSPEC */
- if (tsid < IEEE80211_FIRST_TSPEC_TSID) {
- if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_WMM_ADMISSION))
- return -EINVAL;
- } else {
+ if (tsid >= IEEE80211_FIRST_TSPEC_TSID) {
/* TODO: handle 802.11 TSPEC/admission control
- * need more attributes for that (e.g. BA session requirement)
+ * need more attributes for that (e.g. BA session requirement);
+ * change the WMM adminssion test above to allow both then
*/
return -EINVAL;
}
@@ -9521,6 +9830,98 @@ static int nl80211_del_tx_ts(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int nl80211_tdls_channel_switch(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_chan_def chandef = {};
+ const u8 *addr;
+ u8 oper_class;
+ int err;
+
+ if (!rdev->ops->tdls_channel_switch ||
+ !(rdev->wiphy.features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
+ return -EOPNOTSUPP;
+
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!info->attrs[NL80211_ATTR_MAC] ||
+ !info->attrs[NL80211_ATTR_OPER_CLASS])
+ return -EINVAL;
+
+ err = nl80211_parse_chandef(rdev, info, &chandef);
+ if (err)
+ return err;
+
+ /*
+ * Don't allow wide channels on the 2.4Ghz band, as per IEEE802.11-2012
+ * section 10.22.6.2.1. Disallow 5/10Mhz channels as well for now, the
+ * specification is not defined for them.
+ */
+ if (chandef.chan->band == IEEE80211_BAND_2GHZ &&
+ chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
+ chandef.width != NL80211_CHAN_WIDTH_20)
+ return -EINVAL;
+
+ /* we will be active on the TDLS link */
+ if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype))
+ return -EINVAL;
+
+ /* don't allow switching to DFS channels */
+ if (cfg80211_chandef_dfs_required(wdev->wiphy, &chandef, wdev->iftype))
+ return -EINVAL;
+
+ addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ oper_class = nla_get_u8(info->attrs[NL80211_ATTR_OPER_CLASS]);
+
+ wdev_lock(wdev);
+ err = rdev_tdls_channel_switch(rdev, dev, addr, oper_class, &chandef);
+ wdev_unlock(wdev);
+
+ return err;
+}
+
+static int nl80211_tdls_cancel_channel_switch(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ const u8 *addr;
+
+ if (!rdev->ops->tdls_channel_switch ||
+ !rdev->ops->tdls_cancel_channel_switch ||
+ !(rdev->wiphy.features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
+ return -EOPNOTSUPP;
+
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ wdev_lock(wdev);
+ rdev_tdls_cancel_channel_switch(rdev, dev, addr);
+ wdev_unlock(wdev);
+
+ return 0;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -9782,6 +10183,15 @@ static const struct genl_ops nl80211_ops[] = {
NL80211_FLAG_NEED_RTNL,
},
{
+ .cmd = NL80211_CMD_GET_MPP,
+ .doit = nl80211_get_mpp,
+ .dumpit = nl80211_dump_mpp,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL80211_CMD_SET_MPATH,
.doit = nl80211_set_mpath,
.policy = nl80211_policy,
@@ -10095,6 +10505,22 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_JOIN_OCB,
+ .doit = nl80211_join_ocb,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_LEAVE_OCB,
+ .doit = nl80211_leave_ocb,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
#ifdef CONFIG_PM
{
.cmd = NL80211_CMD_GET_WOWLAN,
@@ -10294,6 +10720,22 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH,
+ .doit = nl80211_tdls_channel_switch,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH,
+ .doit = nl80211_tdls_cancel_channel_switch,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
/* notification functions */
@@ -11325,55 +11767,155 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
}
EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
-void cfg80211_cqm_rssi_notify(struct net_device *dev,
- enum nl80211_cqm_rssi_threshold_event rssi_event,
- gfp_t gfp)
+static struct sk_buff *cfg80211_prepare_cqm(struct net_device *dev,
+ const char *mac, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct wiphy *wiphy = wdev->wiphy;
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct sk_buff *msg;
- struct nlattr *pinfoattr;
- void *hdr;
-
- trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ void **cb;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
- return;
+ return NULL;
- hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
- if (!hdr) {
+ cb = (void **)msg->cb;
+
+ cb[0] = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
+ if (!cb[0]) {
nlmsg_free(msg);
- return;
+ return NULL;
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
- pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
- if (!pinfoattr)
+ if (mac && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac))
goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
- rssi_event))
+ cb[1] = nla_nest_start(msg, NL80211_ATTR_CQM);
+ if (!cb[1])
goto nla_put_failure;
- nla_nest_end(msg, pinfoattr);
+ cb[2] = rdev;
- genlmsg_end(msg, hdr);
+ return msg;
+ nla_put_failure:
+ nlmsg_free(msg);
+ return NULL;
+}
+
+static void cfg80211_send_cqm(struct sk_buff *msg, gfp_t gfp)
+{
+ void **cb = (void **)msg->cb;
+ struct cfg80211_registered_device *rdev = cb[2];
+
+ nla_nest_end(msg, cb[1]);
+ genlmsg_end(msg, cb[0]);
+
+ memset(msg->cb, 0, sizeof(msg->cb));
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_MLME, gfp);
+}
+
+void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ enum nl80211_cqm_rssi_threshold_event rssi_event,
+ gfp_t gfp)
+{
+ struct sk_buff *msg;
+
+ trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
+
+ if (WARN_ON(rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW &&
+ rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH))
+ return;
+
+ msg = cfg80211_prepare_cqm(dev, NULL, gfp);
+ if (!msg)
+ return;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+ rssi_event))
+ goto nla_put_failure;
+
+ cfg80211_send_cqm(msg, gfp);
+
return;
nla_put_failure:
- genlmsg_cancel(msg, hdr);
nlmsg_free(msg);
}
EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
+void cfg80211_cqm_txe_notify(struct net_device *dev,
+ const u8 *peer, u32 num_packets,
+ u32 rate, u32 intvl, gfp_t gfp)
+{
+ struct sk_buff *msg;
+
+ msg = cfg80211_prepare_cqm(dev, peer, gfp);
+ if (!msg)
+ return;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl))
+ goto nla_put_failure;
+
+ cfg80211_send_cqm(msg, gfp);
+ return;
+
+ nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
+
+void cfg80211_cqm_pktloss_notify(struct net_device *dev,
+ const u8 *peer, u32 num_packets, gfp_t gfp)
+{
+ struct sk_buff *msg;
+
+ trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
+
+ msg = cfg80211_prepare_cqm(dev, peer, gfp);
+ if (!msg)
+ return;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets))
+ goto nla_put_failure;
+
+ cfg80211_send_cqm(msg, gfp);
+ return;
+
+ nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
+
+void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp)
+{
+ struct sk_buff *msg;
+
+ msg = cfg80211_prepare_cqm(dev, NULL, gfp);
+ if (!msg)
+ return;
+
+ if (nla_put_flag(msg, NL80211_ATTR_CQM_BEACON_LOSS_EVENT))
+ goto nla_put_failure;
+
+ cfg80211_send_cqm(msg, gfp);
+ return;
+
+ nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_cqm_beacon_loss_notify);
+
static void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp)
@@ -11491,7 +12033,9 @@ EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- gfp_t gfp)
+ gfp_t gfp,
+ enum nl80211_commands notif,
+ u8 count)
{
struct sk_buff *msg;
void *hdr;
@@ -11500,7 +12044,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CH_SWITCH_NOTIFY);
+ hdr = nl80211hdr_put(msg, 0, 0, 0, notif);
if (!hdr) {
nlmsg_free(msg);
return;
@@ -11512,6 +12056,10 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
if (nl80211_send_chandef(msg, chandef))
goto nla_put_failure;
+ if ((notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) &&
+ (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count)))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -11534,70 +12082,27 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
trace_cfg80211_ch_switch_notify(dev, chandef);
- if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
- wdev->iftype != NL80211_IFTYPE_P2P_GO &&
- wdev->iftype != NL80211_IFTYPE_ADHOC &&
- wdev->iftype != NL80211_IFTYPE_MESH_POINT))
- return;
-
wdev->chandef = *chandef;
wdev->preset_chandef = *chandef;
- nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
+ nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
+ NL80211_CMD_CH_SWITCH_NOTIFY, 0);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
-void cfg80211_cqm_txe_notify(struct net_device *dev,
- const u8 *peer, u32 num_packets,
- u32 rate, u32 intvl, gfp_t gfp)
+void cfg80211_ch_switch_started_notify(struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ u8 count)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct sk_buff *msg;
- struct nlattr *pinfoattr;
- void *hdr;
-
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
- if (!msg)
- return;
-
- hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
- if (!hdr) {
- nlmsg_free(msg);
- return;
- }
-
- if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
- goto nla_put_failure;
-
- pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
- if (!pinfoattr)
- goto nla_put_failure;
- if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets))
- goto nla_put_failure;
-
- if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate))
- goto nla_put_failure;
-
- if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl))
- goto nla_put_failure;
-
- nla_nest_end(msg, pinfoattr);
+ trace_cfg80211_ch_switch_started_notify(dev, chandef);
- genlmsg_end(msg, hdr);
-
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
- NL80211_MCGRP_MLME, gfp);
- return;
-
- nla_put_failure:
- genlmsg_cancel(msg, hdr);
- nlmsg_free(msg);
+ nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
+ NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, count);
}
-EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
+EXPORT_SYMBOL(cfg80211_ch_switch_started_notify);
void
nl80211_radar_notify(struct cfg80211_registered_device *rdev,
@@ -11647,54 +12152,6 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
-void cfg80211_cqm_pktloss_notify(struct net_device *dev,
- const u8 *peer, u32 num_packets, gfp_t gfp)
-{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct wiphy *wiphy = wdev->wiphy;
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct sk_buff *msg;
- struct nlattr *pinfoattr;
- void *hdr;
-
- trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
- if (!msg)
- return;
-
- hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
- if (!hdr) {
- nlmsg_free(msg);
- return;
- }
-
- if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
- goto nla_put_failure;
-
- pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
- if (!pinfoattr)
- goto nla_put_failure;
-
- if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets))
- goto nla_put_failure;
-
- nla_nest_end(msg, pinfoattr);
-
- genlmsg_end(msg, hdr);
-
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
- NL80211_MCGRP_MLME, gfp);
- return;
-
- nla_put_failure:
- genlmsg_cancel(msg, hdr);
- nlmsg_free(msg);
-}
-EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
-
void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
u64 cookie, bool acked, gfp_t gfp)
{
@@ -11782,6 +12239,67 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
EXPORT_SYMBOL(cfg80211_report_obss_beacon);
#ifdef CONFIG_PM
+static int cfg80211_net_detect_results(struct sk_buff *msg,
+ struct cfg80211_wowlan_wakeup *wakeup)
+{
+ struct cfg80211_wowlan_nd_info *nd = wakeup->net_detect;
+ struct nlattr *nl_results, *nl_match, *nl_freqs;
+ int i, j;
+
+ nl_results = nla_nest_start(
+ msg, NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS);
+ if (!nl_results)
+ return -EMSGSIZE;
+
+ for (i = 0; i < nd->n_matches; i++) {
+ struct cfg80211_wowlan_nd_match *match = nd->matches[i];
+
+ nl_match = nla_nest_start(msg, i);
+ if (!nl_match)
+ break;
+
+ /* The SSID attribute is optional in nl80211, but for
+ * simplicity reasons it's always present in the
+ * cfg80211 structure. If a driver can't pass the
+ * SSID, that needs to be changed. A zero length SSID
+ * is still a valid SSID (wildcard), so it cannot be
+ * used for this purpose.
+ */
+ if (nla_put(msg, NL80211_ATTR_SSID, match->ssid.ssid_len,
+ match->ssid.ssid)) {
+ nla_nest_cancel(msg, nl_match);
+ goto out;
+ }
+
+ if (match->n_channels) {
+ nl_freqs = nla_nest_start(
+ msg, NL80211_ATTR_SCAN_FREQUENCIES);
+ if (!nl_freqs) {
+ nla_nest_cancel(msg, nl_match);
+ goto out;
+ }
+
+ for (j = 0; j < match->n_channels; j++) {
+ if (nla_put_u32(msg,
+ NL80211_ATTR_WIPHY_FREQ,
+ match->channels[j])) {
+ nla_nest_cancel(msg, nl_freqs);
+ nla_nest_cancel(msg, nl_match);
+ goto out;
+ }
+ }
+
+ nla_nest_end(msg, nl_freqs);
+ }
+
+ nla_nest_end(msg, nl_match);
+ }
+
+out:
+ nla_nest_end(msg, nl_results);
+ return 0;
+}
+
void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
struct cfg80211_wowlan_wakeup *wakeup,
gfp_t gfp)
@@ -11876,6 +12394,10 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
goto free_msg;
}
+ if (wakeup->net_detect &&
+ cfg80211_net_detect_results(msg, wakeup))
+ goto free_msg;
+
nla_nest_end(msg, reasons);
}
diff --git a/net/wireless/ocb.c b/net/wireless/ocb.c
new file mode 100644
index 000000000000..c00d4a792319
--- /dev/null
+++ b/net/wireless/ocb.c
@@ -0,0 +1,88 @@
+/*
+ * OCB mode implementation
+ *
+ * Copyright: (c) 2014 Czech Technical University in Prague
+ * (c) 2014 Volkswagen Group Research
+ * Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
+ * Funded by: Volkswagen Group Research
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include "nl80211.h"
+#include "core.h"
+#include "rdev-ops.h"
+
+int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ocb_setup *setup)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
+ return -EOPNOTSUPP;
+
+ if (WARN_ON(!setup->chandef.chan))
+ return -EINVAL;
+
+ err = rdev_join_ocb(rdev, dev, setup);
+ if (!err)
+ wdev->chandef = setup->chandef;
+
+ return err;
+}
+
+int cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ocb_setup *setup)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_join_ocb(rdev, dev, setup);
+ wdev_unlock(wdev);
+
+ return err;
+}
+
+int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
+ return -EOPNOTSUPP;
+
+ if (!rdev->ops->leave_ocb)
+ return -EOPNOTSUPP;
+
+ err = rdev_leave_ocb(rdev, dev);
+ if (!err)
+ memset(&wdev->chandef, 0, sizeof(wdev->chandef));
+
+ return err;
+}
+
+int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_leave_ocb(rdev, dev);
+ wdev_unlock(wdev);
+
+ return err;
+}
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index f6d457d6a558..35cfb7134bdb 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -178,11 +178,12 @@ static inline int rdev_add_station(struct cfg80211_registered_device *rdev,
}
static inline int rdev_del_station(struct cfg80211_registered_device *rdev,
- struct net_device *dev, u8 *mac)
+ struct net_device *dev,
+ struct station_del_parameters *params)
{
int ret;
- trace_rdev_del_station(&rdev->wiphy, dev, mac);
- ret = rdev->ops->del_station(&rdev->wiphy, dev, mac);
+ trace_rdev_del_station(&rdev->wiphy, dev, params);
+ ret = rdev->ops->del_station(&rdev->wiphy, dev, params);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -263,6 +264,18 @@ static inline int rdev_get_mpath(struct cfg80211_registered_device *rdev,
}
+static inline int rdev_get_mpp(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, u8 *dst, u8 *mpp,
+ struct mpath_info *pinfo)
+{
+ int ret;
+
+ trace_rdev_get_mpp(&rdev->wiphy, dev, dst, mpp);
+ ret = rdev->ops->get_mpp(&rdev->wiphy, dev, dst, mpp, pinfo);
+ trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo);
+ return ret;
+}
+
static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev,
struct net_device *dev, int idx, u8 *dst,
u8 *next_hop, struct mpath_info *pinfo)
@@ -271,7 +284,20 @@ static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev,
int ret;
trace_rdev_dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop);
ret = rdev->ops->dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop,
- pinfo);
+ pinfo);
+ trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo);
+ return ret;
+}
+
+static inline int rdev_dump_mpp(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, int idx, u8 *dst,
+ u8 *mpp, struct mpath_info *pinfo)
+
+{
+ int ret;
+
+ trace_rdev_dump_mpp(&rdev->wiphy, dev, idx, dst, mpp);
+ ret = rdev->ops->dump_mpp(&rdev->wiphy, dev, idx, dst, mpp, pinfo);
trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo);
return ret;
}
@@ -322,6 +348,27 @@ static inline int rdev_leave_mesh(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int rdev_join_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ocb_setup *setup)
+{
+ int ret;
+ trace_rdev_join_ocb(&rdev->wiphy, dev, setup);
+ ret = rdev->ops->join_ocb(&rdev->wiphy, dev, setup);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline int rdev_leave_ocb(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ int ret;
+ trace_rdev_leave_ocb(&rdev->wiphy, dev);
+ ret = rdev->ops->leave_ocb(&rdev->wiphy, dev);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
static inline int rdev_change_bss(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct bss_parameters *params)
@@ -946,4 +993,28 @@ rdev_del_tx_ts(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_tdls_channel_switch(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *addr,
+ u8 oper_class, struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ trace_rdev_tdls_channel_switch(&rdev->wiphy, dev, addr, oper_class,
+ chandef);
+ ret = rdev->ops->tdls_channel_switch(&rdev->wiphy, dev, addr,
+ oper_class, chandef);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline void
+rdev_tdls_cancel_channel_switch(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *addr)
+{
+ trace_rdev_tdls_cancel_channel_switch(&rdev->wiphy, dev, addr);
+ rdev->ops->tdls_cancel_channel_switch(&rdev->wiphy, dev, addr);
+ trace_rdev_return_void(&rdev->wiphy);
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index b725a31a4751..7b8309840d4e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -56,6 +56,7 @@
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
+#include "rdev-ops.h"
#include "regdb.h"
#include "nl80211.h"
@@ -66,6 +67,12 @@
#define REG_DBG_PRINT(args...)
#endif
+/*
+ * Grace period we give before making sure all current interfaces reside on
+ * channels allowed by the current regulatory domain.
+ */
+#define REG_ENFORCE_GRACE_MS 60000
+
/**
* enum reg_request_treatment - regulatory request treatment
*
@@ -210,6 +217,9 @@ struct reg_beacon {
struct ieee80211_channel chan;
};
+static void reg_check_chans_work(struct work_struct *work);
+static DECLARE_DELAYED_WORK(reg_check_chans, reg_check_chans_work);
+
static void reg_todo(struct work_struct *work);
static DECLARE_WORK(reg_work, reg_todo);
@@ -573,8 +583,9 @@ static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy)
return get_cfg80211_regdom();
}
-unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
- const struct ieee80211_reg_rule *rule)
+static unsigned int
+reg_get_max_bandwidth_from_range(const struct ieee80211_regdomain *rd,
+ const struct ieee80211_reg_rule *rule)
{
const struct ieee80211_freq_range *freq_range = &rule->freq_range;
const struct ieee80211_freq_range *freq_range_tmp;
@@ -622,6 +633,27 @@ unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
return end_freq - start_freq;
}
+unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
+ const struct ieee80211_reg_rule *rule)
+{
+ unsigned int bw = reg_get_max_bandwidth_from_range(rd, rule);
+
+ if (rule->flags & NL80211_RRF_NO_160MHZ)
+ bw = min_t(unsigned int, bw, MHZ_TO_KHZ(80));
+ if (rule->flags & NL80211_RRF_NO_80MHZ)
+ bw = min_t(unsigned int, bw, MHZ_TO_KHZ(40));
+
+ /*
+ * HT40+/HT40- limits are handled per-channel. Only limit BW if both
+ * are not allowed.
+ */
+ if (rule->flags & NL80211_RRF_NO_HT40MINUS &&
+ rule->flags & NL80211_RRF_NO_HT40PLUS)
+ bw = min_t(unsigned int, bw, MHZ_TO_KHZ(20));
+
+ return bw;
+}
+
/* Sanity check on a regulatory rule */
static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
{
@@ -946,6 +978,16 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_OFDM;
if (rd_flags & NL80211_RRF_NO_OUTDOOR)
channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
+ if (rd_flags & NL80211_RRF_GO_CONCURRENT)
+ channel_flags |= IEEE80211_CHAN_GO_CONCURRENT;
+ if (rd_flags & NL80211_RRF_NO_HT40MINUS)
+ channel_flags |= IEEE80211_CHAN_NO_HT40MINUS;
+ if (rd_flags & NL80211_RRF_NO_HT40PLUS)
+ channel_flags |= IEEE80211_CHAN_NO_HT40PLUS;
+ if (rd_flags & NL80211_RRF_NO_80MHZ)
+ channel_flags |= IEEE80211_CHAN_NO_80MHZ;
+ if (rd_flags & NL80211_RRF_NO_160MHZ)
+ channel_flags |= IEEE80211_CHAN_NO_160MHZ;
return channel_flags;
}
@@ -1486,6 +1528,102 @@ static void reg_call_notifier(struct wiphy *wiphy,
wiphy->reg_notifier(wiphy, request);
}
+static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct ieee80211_channel *ch;
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ bool ret = true;
+
+ wdev_lock(wdev);
+
+ if (!wdev->netdev || !netif_running(wdev->netdev))
+ goto out;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (!wdev->beacon_interval)
+ goto out;
+
+ ret = cfg80211_reg_can_beacon(wiphy,
+ &wdev->chandef, wdev->iftype);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (!wdev->ssid_len)
+ goto out;
+
+ ret = cfg80211_reg_can_beacon(wiphy,
+ &wdev->chandef, wdev->iftype);
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (!wdev->current_bss ||
+ !wdev->current_bss->pub.channel)
+ goto out;
+
+ ch = wdev->current_bss->pub.channel;
+ if (rdev->ops->get_channel &&
+ !rdev_get_channel(rdev, wdev, &chandef))
+ ret = cfg80211_chandef_usable(wiphy, &chandef,
+ IEEE80211_CHAN_DISABLED);
+ else
+ ret = !(ch->flags & IEEE80211_CHAN_DISABLED);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /* no enforcement required */
+ break;
+ default:
+ /* others not implemented for now */
+ WARN_ON(1);
+ break;
+ }
+
+out:
+ wdev_unlock(wdev);
+ return ret;
+}
+
+static void reg_leave_invalid_chans(struct wiphy *wiphy)
+{
+ struct wireless_dev *wdev;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(wdev, &rdev->wdev_list, list)
+ if (!reg_wdev_chan_valid(wiphy, wdev))
+ cfg80211_leave(rdev, wdev);
+}
+
+static void reg_check_chans_work(struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev;
+
+ REG_DBG_PRINT("Verifying active interfaces after reg change\n");
+ rtnl_lock();
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+ if (!(rdev->wiphy.regulatory_flags &
+ REGULATORY_IGNORE_STALE_KICKOFF))
+ reg_leave_invalid_chans(&rdev->wiphy);
+
+ rtnl_unlock();
+}
+
+static void reg_check_channels(void)
+{
+ /*
+ * Give usermode a chance to do something nicer (move to another
+ * channel, orderly disconnection), before forcing a disconnection.
+ */
+ mod_delayed_work(system_power_efficient_wq,
+ &reg_check_chans,
+ msecs_to_jiffies(REG_ENFORCE_GRACE_MS));
+}
+
static void wiphy_update_regulatory(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
@@ -1525,6 +1663,8 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
wiphy = &rdev->wiphy;
wiphy_update_regulatory(wiphy, initiator);
}
+
+ reg_check_channels();
}
static void handle_channel_custom(struct wiphy *wiphy,
@@ -1565,10 +1705,23 @@ static void handle_channel_custom(struct wiphy *wiphy,
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ chan->dfs_state_entered = jiffies;
+ chan->dfs_state = NL80211_DFS_USABLE;
+
+ chan->beacon_found = false;
chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
chan->max_reg_power = chan->max_power =
(int) MBM_TO_DBM(power_rule->max_eirp);
+
+ if (chan->flags & IEEE80211_CHAN_RADAR) {
+ if (reg_rule->dfs_cac_ms)
+ chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
+ else
+ chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+ }
+
+ chan->max_power = chan->max_reg_power;
}
static void handle_band_custom(struct wiphy *wiphy,
@@ -1760,7 +1913,7 @@ static enum reg_request_treatment
reg_process_hint_driver(struct wiphy *wiphy,
struct regulatory_request *driver_request)
{
- const struct ieee80211_regdomain *regd;
+ const struct ieee80211_regdomain *regd, *tmp;
enum reg_request_treatment treatment;
treatment = __reg_process_hint_driver(driver_request);
@@ -1780,7 +1933,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
reg_free_request(driver_request);
return REG_REQ_IGNORE;
}
+
+ tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, regd);
+ rcu_free_regdom(tmp);
}
@@ -1839,11 +1995,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
return REG_REQ_IGNORE;
return REG_REQ_ALREADY_SET;
}
- /*
- * Two consecutive Country IE hints on the same wiphy.
- * This should be picked up early by the driver/stack
- */
- if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
+
+ if (regdom_changes(country_ie_request->alpha2))
return REG_REQ_OK;
return REG_REQ_ALREADY_SET;
}
@@ -1931,8 +2084,10 @@ static void reg_process_hint(struct regulatory_request *reg_request)
/* This is required so that the orig_* parameters are saved */
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
- wiphy->regulatory_flags & REGULATORY_STRICT_REG)
+ wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
wiphy_update_regulatory(wiphy, reg_request->initiator);
+ reg_check_channels();
+ }
return;
@@ -2813,6 +2968,7 @@ void regulatory_exit(void)
cancel_work_sync(&reg_work);
cancel_delayed_work_sync(&reg_timeout);
+ cancel_delayed_work_sync(&reg_check_chans);
/* Lock to suppress warnings */
rtnl_lock();
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dc1668ff543b..0ab3711c79a0 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -80,9 +80,18 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
if (!request)
return -ENOMEM;
- if (wdev->conn->params.channel)
+ if (wdev->conn->params.channel) {
+ enum ieee80211_band band = wdev->conn->params.channel->band;
+ struct ieee80211_supported_band *sband =
+ wdev->wiphy->bands[band];
+
+ if (!sband) {
+ kfree(request);
+ return -EINVAL;
+ }
request->channels[0] = wdev->conn->params.channel;
- else {
+ request->rates[band] = (1 << sband->n_bitrates) - 1;
+ } else {
int i = 0, j;
enum ieee80211_band band;
struct ieee80211_supported_band *bands;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 625a6e6d1168..ad38910f7036 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -600,6 +600,11 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ibss,
TP_ARGS(wiphy, netdev)
);
+DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ocb,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+ TP_ARGS(wiphy, netdev)
+);
+
DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
TP_ARGS(wiphy, netdev)
@@ -680,9 +685,34 @@ DECLARE_EVENT_CLASS(wiphy_netdev_mac_evt,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac))
);
-DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_station,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
- TP_ARGS(wiphy, netdev, mac)
+DECLARE_EVENT_CLASS(station_del,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct station_del_parameters *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(sta_mac)
+ __field(u8, subtype)
+ __field(u16, reason_code)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(sta_mac, params->mac);
+ __entry->subtype = params->subtype;
+ __entry->reason_code = params->reason_code;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
+ ", subtype: %u, reason_code: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
+ __entry->subtype, __entry->reason_code)
+);
+
+DEFINE_EVENT(station_del, rdev_del_station,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct station_del_parameters *params),
+ TP_ARGS(wiphy, netdev, params)
);
DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_get_station,
@@ -801,6 +831,51 @@ TRACE_EVENT(rdev_dump_mpath,
MAC_PR_ARG(next_hop))
);
+TRACE_EVENT(rdev_get_mpp,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ u8 *dst, u8 *mpp),
+ TP_ARGS(wiphy, netdev, dst, mpp),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(dst)
+ MAC_ENTRY(mpp)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(dst, dst);
+ MAC_ASSIGN(mpp, mpp);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT
+ ", mpp: " MAC_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG,
+ MAC_PR_ARG(dst), MAC_PR_ARG(mpp))
+);
+
+TRACE_EVENT(rdev_dump_mpp,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx,
+ u8 *dst, u8 *mpp),
+ TP_ARGS(wiphy, netdev, idx, mpp, dst),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(dst)
+ MAC_ENTRY(mpp)
+ __field(int, idx)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(dst, dst);
+ MAC_ASSIGN(mpp, mpp);
+ __entry->idx = idx;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: "
+ MAC_PR_FMT ", mpp: " MAC_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst),
+ MAC_PR_ARG(mpp))
+);
+
TRACE_EVENT(rdev_return_int_mpath_info,
TP_PROTO(struct wiphy *wiphy, int ret, struct mpath_info *pinfo),
TP_ARGS(wiphy, ret, pinfo),
@@ -1246,6 +1321,22 @@ TRACE_EVENT(rdev_join_ibss,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid)
);
+TRACE_EVENT(rdev_join_ocb,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const struct ocb_setup *setup),
+ TP_ARGS(wiphy, netdev, setup),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG)
+);
+
TRACE_EVENT(rdev_set_wiphy_params,
TP_PROTO(struct wiphy *wiphy, u32 changed),
TP_ARGS(wiphy, changed),
@@ -1941,6 +2032,48 @@ TRACE_EVENT(rdev_del_tx_ts,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tsid)
);
+TRACE_EVENT(rdev_tdls_channel_switch,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const u8 *addr, u8 oper_class,
+ struct cfg80211_chan_def *chandef),
+ TP_ARGS(wiphy, netdev, addr, oper_class, chandef),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(addr)
+ __field(u8, oper_class)
+ CHAN_DEF_ENTRY
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(addr, addr);
+ CHAN_DEF_ASSIGN(chandef);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT
+ " oper class %d, " CHAN_DEF_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr),
+ __entry->oper_class, CHAN_DEF_PR_ARG)
+);
+
+TRACE_EVENT(rdev_tdls_cancel_channel_switch,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const u8 *addr),
+ TP_ARGS(wiphy, netdev, addr),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(addr)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(addr, addr);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr))
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -2264,6 +2397,22 @@ TRACE_EVENT(cfg80211_ch_switch_notify,
NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
);
+TRACE_EVENT(cfg80211_ch_switch_started_notify,
+ TP_PROTO(struct net_device *netdev,
+ struct cfg80211_chan_def *chandef),
+ TP_ARGS(netdev, chandef),
+ TP_STRUCT__entry(
+ NETDEV_ENTRY
+ CHAN_DEF_ENTRY
+ ),
+ TP_fast_assign(
+ NETDEV_ASSIGN;
+ CHAN_DEF_ASSIGN(chandef);
+ ),
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
TRACE_EVENT(cfg80211_radar_event,
TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
TP_ARGS(wiphy, chandef),
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 5e233a577d0f..d0ac795445b7 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -442,7 +442,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
break;
case cpu_to_le16(0):
if (iftype != NL80211_IFTYPE_ADHOC &&
- iftype != NL80211_IFTYPE_STATION)
+ iftype != NL80211_IFTYPE_STATION &&
+ iftype != NL80211_IFTYPE_OCB)
return -1;
break;
}
@@ -519,6 +520,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
memcpy(hdr.addr3, skb->data, ETH_ALEN);
hdrlen = 24;
break;
+ case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_ADHOC:
/* DA SA BSSID */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -937,6 +939,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
if (dev->ieee80211_ptr->use_4addr)
break;
/* fall through */
+ case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_ADHOC:
dev->priv_flags |= IFF_DONT_BRIDGE;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5ad4418ef093..d9149b68b9bc 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1170,7 +1170,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
skb_reset_transport_header(skb);
skb_put(skb, len);
- rc = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
+ rc = memcpy_from_msg(skb_transport_header(skb), msg, len);
if (rc)
goto out_kfree_skb;
@@ -1335,7 +1335,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
/* Currently, each datagram always contains a complete record */
msg->msg_flags |= MSG_EOR;
- rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ rc = skb_copy_datagram_msg(skb, 0, msg, copied);
if (rc)
goto out_free_dgram;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 88bf289abdc9..cee479bc655c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -55,6 +55,7 @@ static int stale_bundle(struct dst_entry *dst);
static int xfrm_bundle_ok(struct xfrm_dst *xdst);
static void xfrm_policy_queue_process(unsigned long arg);
+static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
@@ -561,7 +562,7 @@ static void xfrm_hash_resize(struct work_struct *work)
mutex_lock(&hash_resize_mutex);
total = 0;
- for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
+ for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
if (xfrm_bydst_should_resize(net, dir, &total))
xfrm_bydst_resize(net, dir);
}
@@ -601,7 +602,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
write_lock_bh(&net->xfrm.xfrm_policy_lock);
/* reset the bydst and inexact table in all directions */
- for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
+ for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
hmask = net->xfrm.policy_bydst[dir].hmask;
odst = net->xfrm.policy_bydst[dir].table;
@@ -779,8 +780,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
hlist_add_behind(&policy->bydst, newpos);
else
hlist_add_head(&policy->bydst, chain);
- xfrm_pol_hold(policy);
- net->xfrm.policy_count[dir]++;
+ __xfrm_policy_link(policy, dir);
atomic_inc(&net->xfrm.flow_cache_genid);
/* After previous checking, family can either be AF_INET or AF_INET6 */
@@ -799,7 +799,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
- list_add(&policy->walk.all, &net->xfrm.policy_all);
write_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (delpol)
@@ -1247,17 +1246,10 @@ out:
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
- struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
- pol->family, dir);
list_add(&pol->walk.all, &net->xfrm.policy_all);
- hlist_add_head(&pol->bydst, chain);
- hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
net->xfrm.policy_count[dir]++;
xfrm_pol_hold(pol);
-
- if (xfrm_bydst_should_resize(net, dir, NULL))
- schedule_work(&net->xfrm.policy_hash_work);
}
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
@@ -1265,17 +1257,31 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
{
struct net *net = xp_net(pol);
- if (hlist_unhashed(&pol->bydst))
+ if (list_empty(&pol->walk.all))
return NULL;
- hlist_del_init(&pol->bydst);
- hlist_del(&pol->byidx);
- list_del(&pol->walk.all);
+ /* Socket policies are not hashed. */
+ if (!hlist_unhashed(&pol->bydst)) {
+ hlist_del(&pol->bydst);
+ hlist_del(&pol->byidx);
+ }
+
+ list_del_init(&pol->walk.all);
net->xfrm.policy_count[dir]--;
return pol;
}
+static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
+{
+ __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
+}
+
+static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
+{
+ __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
+}
+
int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
@@ -1307,7 +1313,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
if (pol) {
pol->curlft.add_time = get_seconds();
pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
- __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
+ xfrm_sk_policy_link(pol, dir);
}
if (old_pol) {
if (pol)
@@ -1316,7 +1322,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
/* Unlinking succeeds always. This is the only function
* allowed to delete or replace socket policy.
*/
- __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
+ xfrm_sk_policy_unlink(old_pol, dir);
}
write_unlock_bh(&net->xfrm.xfrm_policy_lock);
@@ -1349,7 +1355,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
memcpy(newp->xfrm_vec, old->xfrm_vec,
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
write_lock_bh(&net->xfrm.xfrm_policy_lock);
- __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
+ xfrm_sk_policy_link(newp, dir);
write_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_pol_put(newp);
}
@@ -1878,7 +1884,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
static void xfrm_policy_queue_process(unsigned long arg)
{
- int err = 0;
struct sk_buff *skb;
struct sock *sk;
struct dst_entry *dst;
@@ -1941,7 +1946,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
skb_dst_drop(skb);
skb_dst_set(skb, dst);
- err = dst_output(skb);
+ dst_output(skb);
}
out:
@@ -2966,10 +2971,11 @@ static int __net_init xfrm_policy_init(struct net *net)
goto out_byidx;
net->xfrm.policy_idx_hmask = hmask;
- for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
+ for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy_hash *htab;
net->xfrm.policy_count[dir] = 0;
+ net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
htab = &net->xfrm.policy_bydst[dir];
@@ -3021,7 +3027,7 @@ static void xfrm_policy_fini(struct net *net)
WARN_ON(!list_empty(&net->xfrm.policy_all));
- for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
+ for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
struct xfrm_policy_hash *htab;
WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index e812e988c111..8128594ab379 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -824,13 +824,15 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
ret = xfrm_mark_put(skb, &x->mark);
if (ret)
goto out;
- if (x->replay_esn) {
+ if (x->replay_esn)
ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
xfrm_replay_state_esn_len(x->replay_esn),
x->replay_esn);
- if (ret)
- goto out;
- }
+ else
+ ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
+ &x->replay);
+ if (ret)
+ goto out;
if (x->security)
ret = copy_sec_ctx(x->security, skb);
out:
@@ -2569,6 +2571,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
l += nla_total_size(sizeof(x->tfcpad));
if (x->replay_esn)
l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
+ else
+ l += nla_total_size(sizeof(struct xfrm_replay_state));
if (x->security)
l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
x->security->ctx_len);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 634391797856..b5b3600dcdf5 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -2,11 +2,32 @@
obj- := dummy.o
# List of programs to build
-hostprogs-y := test_verifier
+hostprogs-y := test_verifier test_maps
+hostprogs-y += sock_example
+hostprogs-y += sockex1
+hostprogs-y += sockex2
test_verifier-objs := test_verifier.o libbpf.o
+test_maps-objs := test_maps.o libbpf.o
+sock_example-objs := sock_example.o libbpf.o
+sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
+sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
+always += sockex1_kern.o
+always += sockex2_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
+
+HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
+HOSTLOADLIBES_sockex1 += -lelf
+HOSTLOADLIBES_sockex2 += -lelf
+
+# point this to your LLVM backend with bpf support
+LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
+
+%.o: %.c
+ clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
+ -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
+ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
new file mode 100644
index 000000000000..ca0333146006
--- /dev/null
+++ b/samples/bpf/bpf_helpers.h
@@ -0,0 +1,40 @@
+#ifndef __BPF_HELPERS_H
+#define __BPF_HELPERS_H
+
+/* helper macro to place programs, maps, license in
+ * different sections in elf_bpf file. Section names
+ * are interpreted by elf_bpf loader
+ */
+#define SEC(NAME) __attribute__((section(NAME), used))
+
+/* helper functions called from eBPF programs written in C */
+static void *(*bpf_map_lookup_elem)(void *map, void *key) =
+ (void *) BPF_FUNC_map_lookup_elem;
+static int (*bpf_map_update_elem)(void *map, void *key, void *value,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_map_update_elem;
+static int (*bpf_map_delete_elem)(void *map, void *key) =
+ (void *) BPF_FUNC_map_delete_elem;
+
+/* llvm builtin functions that eBPF C program may use to
+ * emit BPF_LD_ABS and BPF_LD_IND instructions
+ */
+struct sk_buff;
+unsigned long long load_byte(void *skb,
+ unsigned long long off) asm("llvm.bpf.load.byte");
+unsigned long long load_half(void *skb,
+ unsigned long long off) asm("llvm.bpf.load.half");
+unsigned long long load_word(void *skb,
+ unsigned long long off) asm("llvm.bpf.load.word");
+
+/* a helper structure used by eBPF C program
+ * to describe map attributes to elf_bpf loader
+ */
+struct bpf_map_def {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+};
+
+#endif
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
new file mode 100644
index 000000000000..1831d236382b
--- /dev/null
+++ b/samples/bpf/bpf_load.c
@@ -0,0 +1,203 @@
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <libelf.h>
+#include <gelf.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include "libbpf.h"
+#include "bpf_helpers.h"
+#include "bpf_load.h"
+
+static char license[128];
+static bool processed_sec[128];
+int map_fd[MAX_MAPS];
+int prog_fd[MAX_PROGS];
+int prog_cnt;
+
+static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
+{
+ int fd;
+ bool is_socket = strncmp(event, "socket", 6) == 0;
+
+ if (!is_socket)
+ /* tracing events tbd */
+ return -1;
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER,
+ prog, size, license);
+
+ if (fd < 0) {
+ printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
+ return -1;
+ }
+
+ prog_fd[prog_cnt++] = fd;
+
+ return 0;
+}
+
+static int load_maps(struct bpf_map_def *maps, int len)
+{
+ int i;
+
+ for (i = 0; i < len / sizeof(struct bpf_map_def); i++) {
+
+ map_fd[i] = bpf_create_map(maps[i].type,
+ maps[i].key_size,
+ maps[i].value_size,
+ maps[i].max_entries);
+ if (map_fd[i] < 0)
+ return 1;
+ }
+ return 0;
+}
+
+static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname,
+ GElf_Shdr *shdr, Elf_Data **data)
+{
+ Elf_Scn *scn;
+
+ scn = elf_getscn(elf, i);
+ if (!scn)
+ return 1;
+
+ if (gelf_getshdr(scn, shdr) != shdr)
+ return 2;
+
+ *shname = elf_strptr(elf, ehdr->e_shstrndx, shdr->sh_name);
+ if (!*shname || !shdr->sh_size)
+ return 3;
+
+ *data = elf_getdata(scn, 0);
+ if (!*data || elf_getdata(scn, *data) != NULL)
+ return 4;
+
+ return 0;
+}
+
+static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
+ GElf_Shdr *shdr, struct bpf_insn *insn)
+{
+ int i, nrels;
+
+ nrels = shdr->sh_size / shdr->sh_entsize;
+
+ for (i = 0; i < nrels; i++) {
+ GElf_Sym sym;
+ GElf_Rel rel;
+ unsigned int insn_idx;
+
+ gelf_getrel(data, i, &rel);
+
+ insn_idx = rel.r_offset / sizeof(struct bpf_insn);
+
+ gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym);
+
+ if (insn[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
+ printf("invalid relo for insn[%d].code 0x%x\n",
+ insn_idx, insn[insn_idx].code);
+ return 1;
+ }
+ insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+ insn[insn_idx].imm = map_fd[sym.st_value / sizeof(struct bpf_map_def)];
+ }
+
+ return 0;
+}
+
+int load_bpf_file(char *path)
+{
+ int fd, i;
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr, shdr_prog;
+ Elf_Data *data, *data_prog, *symbols = NULL;
+ char *shname, *shname_prog;
+
+ if (elf_version(EV_CURRENT) == EV_NONE)
+ return 1;
+
+ fd = open(path, O_RDONLY, 0);
+ if (fd < 0)
+ return 1;
+
+ elf = elf_begin(fd, ELF_C_READ, NULL);
+
+ if (!elf)
+ return 1;
+
+ if (gelf_getehdr(elf, &ehdr) != &ehdr)
+ return 1;
+
+ /* scan over all elf sections to get license and map info */
+ for (i = 1; i < ehdr.e_shnum; i++) {
+
+ if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
+ continue;
+
+ if (0) /* helpful for llvm debugging */
+ printf("section %d:%s data %p size %zd link %d flags %d\n",
+ i, shname, data->d_buf, data->d_size,
+ shdr.sh_link, (int) shdr.sh_flags);
+
+ if (strcmp(shname, "license") == 0) {
+ processed_sec[i] = true;
+ memcpy(license, data->d_buf, data->d_size);
+ } else if (strcmp(shname, "maps") == 0) {
+ processed_sec[i] = true;
+ if (load_maps(data->d_buf, data->d_size))
+ return 1;
+ } else if (shdr.sh_type == SHT_SYMTAB) {
+ symbols = data;
+ }
+ }
+
+ /* load programs that need map fixup (relocations) */
+ for (i = 1; i < ehdr.e_shnum; i++) {
+
+ if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
+ continue;
+ if (shdr.sh_type == SHT_REL) {
+ struct bpf_insn *insns;
+
+ if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog,
+ &shdr_prog, &data_prog))
+ continue;
+
+ insns = (struct bpf_insn *) data_prog->d_buf;
+
+ processed_sec[shdr.sh_info] = true;
+ processed_sec[i] = true;
+
+ if (parse_relo_and_apply(data, symbols, &shdr, insns))
+ continue;
+
+ if (memcmp(shname_prog, "events/", 7) == 0 ||
+ memcmp(shname_prog, "socket", 6) == 0)
+ load_and_attach(shname_prog, insns, data_prog->d_size);
+ }
+ }
+
+ /* load programs that don't use maps */
+ for (i = 1; i < ehdr.e_shnum; i++) {
+
+ if (processed_sec[i])
+ continue;
+
+ if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
+ continue;
+
+ if (memcmp(shname, "events/", 7) == 0 ||
+ memcmp(shname, "socket", 6) == 0)
+ load_and_attach(shname, data->d_buf, data->d_size);
+ }
+
+ close(fd);
+ return 0;
+}
diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h
new file mode 100644
index 000000000000..27789a34f5e6
--- /dev/null
+++ b/samples/bpf/bpf_load.h
@@ -0,0 +1,24 @@
+#ifndef __BPF_LOAD_H
+#define __BPF_LOAD_H
+
+#define MAX_MAPS 32
+#define MAX_PROGS 32
+
+extern int map_fd[MAX_MAPS];
+extern int prog_fd[MAX_PROGS];
+
+/* parses elf file compiled by llvm .c->.o
+ * . parses 'maps' section and creates maps via BPF syscall
+ * . parses 'license' section and passes it to syscall
+ * . parses elf relocations for BPF maps and adjusts BPF_LD_IMM64 insns by
+ * storing map_fd into insn->imm and marking such insns as BPF_PSEUDO_MAP_FD
+ * . loads eBPF programs via BPF syscall
+ *
+ * One ELF file can contain multiple BPF programs which will be loaded
+ * and their FDs stored stored in prog_fd array
+ *
+ * returns zero on success
+ */
+int load_bpf_file(char *path);
+
+#endif
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
index ff6504420738..46d50b7ddf79 100644
--- a/samples/bpf/libbpf.c
+++ b/samples/bpf/libbpf.c
@@ -7,6 +7,10 @@
#include <linux/netlink.h>
#include <linux/bpf.h>
#include <errno.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <linux/if_packet.h>
+#include <arpa/inet.h>
#include "libbpf.h"
static __u64 ptr_to_u64(void *ptr)
@@ -27,12 +31,13 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
}
-int bpf_update_elem(int fd, void *key, void *value)
+int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags)
{
union bpf_attr attr = {
.map_fd = fd,
.key = ptr_to_u64(key),
.value = ptr_to_u64(value),
+ .flags = flags,
};
return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
@@ -92,3 +97,27 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
}
+
+int open_raw_sock(const char *name)
+{
+ struct sockaddr_ll sll;
+ int sock;
+
+ sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL));
+ if (sock < 0) {
+ printf("cannot create raw socket\n");
+ return -1;
+ }
+
+ memset(&sll, 0, sizeof(sll));
+ sll.sll_family = AF_PACKET;
+ sll.sll_ifindex = if_nametoindex(name);
+ sll.sll_protocol = htons(ETH_P_ALL);
+ if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
+ printf("bind to %s: %s\n", name, strerror(errno));
+ close(sock);
+ return -1;
+ }
+
+ return sock;
+}
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index 8a31babeca5d..58c5fe1bdba1 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -6,7 +6,7 @@ struct bpf_insn;
int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
int max_entries);
-int bpf_update_elem(int fd, void *key, void *value);
+int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
int bpf_lookup_elem(int fd, void *key, void *value);
int bpf_delete_elem(int fd, void *key);
int bpf_get_next_key(int fd, void *key, void *next_key);
@@ -15,7 +15,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, int insn_len,
const char *license);
-#define LOG_BUF_SIZE 8192
+#define LOG_BUF_SIZE 65536
extern char bpf_log_buf[LOG_BUF_SIZE];
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -99,6 +99,16 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
+
+#define BPF_LD_ABS(SIZE, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
@@ -169,4 +179,7 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
.off = 0, \
.imm = 0 })
+/* create RAW socket and bind to interface 'name' */
+int open_raw_sock(const char *name);
+
#endif
diff --git a/samples/bpf/sock_example.c b/samples/bpf/sock_example.c
new file mode 100644
index 000000000000..c8ad0404416f
--- /dev/null
+++ b/samples/bpf/sock_example.c
@@ -0,0 +1,101 @@
+/* eBPF example program:
+ * - creates arraymap in kernel with key 4 bytes and value 8 bytes
+ *
+ * - loads eBPF program:
+ * r0 = skb->data[ETH_HLEN + offsetof(struct iphdr, protocol)];
+ * *(u32*)(fp - 4) = r0;
+ * // assuming packet is IPv4, lookup ip->proto in a map
+ * value = bpf_map_lookup_elem(map_fd, fp - 4);
+ * if (value)
+ * (*(u64*)value) += 1;
+ *
+ * - attaches this program to eth0 raw socket
+ *
+ * - every second user space reads map[tcp], map[udp], map[icmp] to see
+ * how many packets of given protocol were seen on eth0
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <stddef.h>
+#include "libbpf.h"
+
+static int test_sock(void)
+{
+ int sock = -1, map_fd, prog_fd, i, key;
+ long long value = 0, tcp_cnt, udp_cnt, icmp_cnt;
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value),
+ 256);
+ if (map_fd < 0) {
+ printf("failed to create map '%s'\n", strerror(errno));
+ goto cleanup;
+ }
+
+ struct bpf_insn prog[] = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, ETH_HLEN + offsetof(struct iphdr, protocol) /* R0 = ip->proto */),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+ BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
+ BPF_EXIT_INSN(),
+ };
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, sizeof(prog),
+ "GPL");
+ if (prog_fd < 0) {
+ printf("failed to load prog '%s'\n", strerror(errno));
+ goto cleanup;
+ }
+
+ sock = open_raw_sock("lo");
+
+ if (setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,
+ sizeof(prog_fd)) < 0) {
+ printf("setsockopt %s\n", strerror(errno));
+ goto cleanup;
+ }
+
+ for (i = 0; i < 10; i++) {
+ key = IPPROTO_TCP;
+ assert(bpf_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
+
+ key = IPPROTO_UDP;
+ assert(bpf_lookup_elem(map_fd, &key, &udp_cnt) == 0);
+
+ key = IPPROTO_ICMP;
+ assert(bpf_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
+
+ printf("TCP %lld UDP %lld ICMP %lld packets\n",
+ tcp_cnt, udp_cnt, icmp_cnt);
+ sleep(1);
+ }
+
+cleanup:
+ /* maps, programs, raw sockets will auto cleanup on process exit */
+ return 0;
+}
+
+int main(void)
+{
+ FILE *f;
+
+ f = popen("ping -c5 localhost", "r");
+ (void)f;
+
+ return test_sock();
+}
diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c
new file mode 100644
index 000000000000..066892662915
--- /dev/null
+++ b/samples/bpf/sockex1_kern.c
@@ -0,0 +1,25 @@
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/ip.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(long),
+ .max_entries = 256,
+};
+
+SEC("socket1")
+int bpf_prog1(struct sk_buff *skb)
+{
+ int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
+ long *value;
+
+ value = bpf_map_lookup_elem(&my_map, &index);
+ if (value)
+ __sync_fetch_and_add(value, 1);
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex1_user.c b/samples/bpf/sockex1_user.c
new file mode 100644
index 000000000000..34a443ff3831
--- /dev/null
+++ b/samples/bpf/sockex1_user.c
@@ -0,0 +1,49 @@
+#include <stdio.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+#include <unistd.h>
+#include <arpa/inet.h>
+
+int main(int ac, char **argv)
+{
+ char filename[256];
+ FILE *f;
+ int i, sock;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ sock = open_raw_sock("lo");
+
+ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, prog_fd,
+ sizeof(prog_fd[0])) == 0);
+
+ f = popen("ping -c5 localhost", "r");
+ (void) f;
+
+ for (i = 0; i < 5; i++) {
+ long long tcp_cnt, udp_cnt, icmp_cnt;
+ int key;
+
+ key = IPPROTO_TCP;
+ assert(bpf_lookup_elem(map_fd[0], &key, &tcp_cnt) == 0);
+
+ key = IPPROTO_UDP;
+ assert(bpf_lookup_elem(map_fd[0], &key, &udp_cnt) == 0);
+
+ key = IPPROTO_ICMP;
+ assert(bpf_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0);
+
+ printf("TCP %lld UDP %lld ICMP %lld packets\n",
+ tcp_cnt, udp_cnt, icmp_cnt);
+ sleep(1);
+ }
+
+ return 0;
+}
diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c
new file mode 100644
index 000000000000..6f0135f0f217
--- /dev/null
+++ b/samples/bpf/sockex2_kern.c
@@ -0,0 +1,215 @@
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+#include <uapi/linux/in.h>
+#include <uapi/linux/if.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/if_tunnel.h>
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1FFF
+
+struct vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+struct flow_keys {
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ __u16 thoff;
+ __u8 ip_proto;
+};
+
+static inline int proto_ports_offset(__u64 proto)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ return 0;
+ case IPPROTO_AH:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline int ip_is_fragment(struct sk_buff *ctx, __u64 nhoff)
+{
+ return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+ & (IP_MF | IP_OFFSET);
+}
+
+static inline __u32 ipv6_addr_hash(struct sk_buff *ctx, __u64 off)
+{
+ __u64 w0 = load_word(ctx, off);
+ __u64 w1 = load_word(ctx, off + 4);
+ __u64 w2 = load_word(ctx, off + 8);
+ __u64 w3 = load_word(ctx, off + 12);
+
+ return (__u32)(w0 ^ w1 ^ w2 ^ w3);
+}
+
+static inline __u64 parse_ip(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
+ struct flow_keys *flow)
+{
+ __u64 verlen;
+
+ if (unlikely(ip_is_fragment(skb, nhoff)))
+ *ip_proto = 0;
+ else
+ *ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
+
+ if (*ip_proto != IPPROTO_GRE) {
+ flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
+ flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
+ }
+
+ verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
+ if (likely(verlen == 0x45))
+ nhoff += 20;
+ else
+ nhoff += (verlen & 0xF) << 2;
+
+ return nhoff;
+}
+
+static inline __u64 parse_ipv6(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
+ struct flow_keys *flow)
+{
+ *ip_proto = load_byte(skb,
+ nhoff + offsetof(struct ipv6hdr, nexthdr));
+ flow->src = ipv6_addr_hash(skb,
+ nhoff + offsetof(struct ipv6hdr, saddr));
+ flow->dst = ipv6_addr_hash(skb,
+ nhoff + offsetof(struct ipv6hdr, daddr));
+ nhoff += sizeof(struct ipv6hdr);
+
+ return nhoff;
+}
+
+static inline bool flow_dissector(struct sk_buff *skb, struct flow_keys *flow)
+{
+ __u64 nhoff = ETH_HLEN;
+ __u64 ip_proto;
+ __u64 proto = load_half(skb, 12);
+ int poff;
+
+ if (proto == ETH_P_8021AD) {
+ proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
+ h_vlan_encapsulated_proto));
+ nhoff += sizeof(struct vlan_hdr);
+ }
+
+ if (proto == ETH_P_8021Q) {
+ proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
+ h_vlan_encapsulated_proto));
+ nhoff += sizeof(struct vlan_hdr);
+ }
+
+ if (likely(proto == ETH_P_IP))
+ nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
+ else if (proto == ETH_P_IPV6)
+ nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
+ else
+ return false;
+
+ switch (ip_proto) {
+ case IPPROTO_GRE: {
+ struct gre_hdr {
+ __be16 flags;
+ __be16 proto;
+ };
+
+ __u64 gre_flags = load_half(skb,
+ nhoff + offsetof(struct gre_hdr, flags));
+ __u64 gre_proto = load_half(skb,
+ nhoff + offsetof(struct gre_hdr, proto));
+
+ if (gre_flags & (GRE_VERSION|GRE_ROUTING))
+ break;
+
+ proto = gre_proto;
+ nhoff += 4;
+ if (gre_flags & GRE_CSUM)
+ nhoff += 4;
+ if (gre_flags & GRE_KEY)
+ nhoff += 4;
+ if (gre_flags & GRE_SEQ)
+ nhoff += 4;
+
+ if (proto == ETH_P_8021Q) {
+ proto = load_half(skb,
+ nhoff + offsetof(struct vlan_hdr,
+ h_vlan_encapsulated_proto));
+ nhoff += sizeof(struct vlan_hdr);
+ }
+
+ if (proto == ETH_P_IP)
+ nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
+ else if (proto == ETH_P_IPV6)
+ nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
+ else
+ return false;
+ break;
+ }
+ case IPPROTO_IPIP:
+ nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
+ break;
+ case IPPROTO_IPV6:
+ nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
+ break;
+ default:
+ break;
+ }
+
+ flow->ip_proto = ip_proto;
+ poff = proto_ports_offset(ip_proto);
+ if (poff >= 0) {
+ nhoff += poff;
+ flow->ports = load_word(skb, nhoff);
+ }
+
+ flow->thoff = (__u16) nhoff;
+
+ return true;
+}
+
+struct bpf_map_def SEC("maps") hash_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(__be32),
+ .value_size = sizeof(long),
+ .max_entries = 1024,
+};
+
+SEC("socket2")
+int bpf_prog2(struct sk_buff *skb)
+{
+ struct flow_keys flow;
+ long *value;
+ u32 key;
+
+ if (!flow_dissector(skb, &flow))
+ return 0;
+
+ key = flow.dst;
+ value = bpf_map_lookup_elem(&hash_map, &key);
+ if (value) {
+ __sync_fetch_and_add(value, 1);
+ } else {
+ long val = 1;
+
+ bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c
new file mode 100644
index 000000000000..d2d5f5a790d3
--- /dev/null
+++ b/samples/bpf/sockex2_user.c
@@ -0,0 +1,44 @@
+#include <stdio.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+#include <unistd.h>
+#include <arpa/inet.h>
+
+int main(int ac, char **argv)
+{
+ char filename[256];
+ FILE *f;
+ int i, sock;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ sock = open_raw_sock("lo");
+
+ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, prog_fd,
+ sizeof(prog_fd[0])) == 0);
+
+ f = popen("ping -c5 localhost", "r");
+ (void) f;
+
+ for (i = 0; i < 5; i++) {
+ int key = 0, next_key;
+ long long value;
+
+ while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
+ bpf_lookup_elem(map_fd[0], &next_key, &value);
+ printf("ip %s count %lld\n",
+ inet_ntoa((struct in_addr){htonl(next_key)}),
+ value);
+ key = next_key;
+ }
+ sleep(1);
+ }
+ return 0;
+}
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c
new file mode 100644
index 000000000000..e286b42307f3
--- /dev/null
+++ b/samples/bpf/test_maps.c
@@ -0,0 +1,291 @@
+/*
+ * Testsuite for eBPF maps
+ *
+ * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <linux/bpf.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include "libbpf.h"
+
+/* sanity tests for map API */
+static void test_hashmap_sanity(int i, void *data)
+{
+ long long key, next_key, value;
+ int map_fd;
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 2);
+ if (map_fd < 0) {
+ printf("failed to create hashmap '%s'\n", strerror(errno));
+ exit(1);
+ }
+
+ key = 1;
+ value = 1234;
+ /* insert key=1 element */
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
+
+ value = 0;
+ /* BPF_NOEXIST means: add new element if it doesn't exist */
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
+ /* key=1 already exists */
+ errno == EEXIST);
+
+ assert(bpf_update_elem(map_fd, &key, &value, -1) == -1 && errno == EINVAL);
+
+ /* check that key=1 can be found */
+ assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 1234);
+
+ key = 2;
+ /* check that key=2 is not found */
+ assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT);
+
+ /* BPF_EXIST means: update existing element */
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == -1 &&
+ /* key=2 is not there */
+ errno == ENOENT);
+
+ /* insert key=2 element */
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0);
+
+ /* key=1 and key=2 were inserted, check that key=0 cannot be inserted
+ * due to max_entries limit
+ */
+ key = 0;
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
+ errno == E2BIG);
+
+ /* check that key = 0 doesn't exist */
+ assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
+
+ /* iterate over two elements */
+ assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
+ next_key == 2);
+ assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
+ next_key == 1);
+ assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
+ errno == ENOENT);
+
+ /* delete both elements */
+ key = 1;
+ assert(bpf_delete_elem(map_fd, &key) == 0);
+ key = 2;
+ assert(bpf_delete_elem(map_fd, &key) == 0);
+ assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
+
+ key = 0;
+ /* check that map is empty */
+ assert(bpf_get_next_key(map_fd, &key, &next_key) == -1 &&
+ errno == ENOENT);
+ close(map_fd);
+}
+
+static void test_arraymap_sanity(int i, void *data)
+{
+ int key, next_key, map_fd;
+ long long value;
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 2);
+ if (map_fd < 0) {
+ printf("failed to create arraymap '%s'\n", strerror(errno));
+ exit(1);
+ }
+
+ key = 1;
+ value = 1234;
+ /* insert key=1 element */
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
+
+ value = 0;
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
+ errno == EEXIST);
+
+ /* check that key=1 can be found */
+ assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 1234);
+
+ key = 0;
+ /* check that key=0 is also found and zero initialized */
+ assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 0);
+
+
+ /* key=0 and key=1 were inserted, check that key=2 cannot be inserted
+ * due to max_entries limit
+ */
+ key = 2;
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == -1 &&
+ errno == E2BIG);
+
+ /* check that key = 2 doesn't exist */
+ assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT);
+
+ /* iterate over two elements */
+ assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
+ next_key == 0);
+ assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
+ next_key == 1);
+ assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
+ errno == ENOENT);
+
+ /* delete shouldn't succeed */
+ key = 1;
+ assert(bpf_delete_elem(map_fd, &key) == -1 && errno == EINVAL);
+
+ close(map_fd);
+}
+
+#define MAP_SIZE (32 * 1024)
+static void test_map_large(void)
+{
+ struct bigkey {
+ int a;
+ char b[116];
+ long long c;
+ } key;
+ int map_fd, i, value;
+
+ /* allocate 4Mbyte of memory */
+ map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+ MAP_SIZE);
+ if (map_fd < 0) {
+ printf("failed to create large map '%s'\n", strerror(errno));
+ exit(1);
+ }
+
+ for (i = 0; i < MAP_SIZE; i++) {
+ key = (struct bigkey) {.c = i};
+ value = i;
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0);
+ }
+ key.c = -1;
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
+ errno == E2BIG);
+
+ /* iterate through all elements */
+ for (i = 0; i < MAP_SIZE; i++)
+ assert(bpf_get_next_key(map_fd, &key, &key) == 0);
+ assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT);
+
+ key.c = 0;
+ assert(bpf_lookup_elem(map_fd, &key, &value) == 0 && value == 0);
+ key.a = 1;
+ assert(bpf_lookup_elem(map_fd, &key, &value) == -1 && errno == ENOENT);
+
+ close(map_fd);
+}
+
+/* fork N children and wait for them to complete */
+static void run_parallel(int tasks, void (*fn)(int i, void *data), void *data)
+{
+ pid_t pid[tasks];
+ int i;
+
+ for (i = 0; i < tasks; i++) {
+ pid[i] = fork();
+ if (pid[i] == 0) {
+ fn(i, data);
+ exit(0);
+ } else if (pid[i] == -1) {
+ printf("couldn't spawn #%d process\n", i);
+ exit(1);
+ }
+ }
+ for (i = 0; i < tasks; i++) {
+ int status;
+
+ assert(waitpid(pid[i], &status, 0) == pid[i]);
+ assert(status == 0);
+ }
+}
+
+static void test_map_stress(void)
+{
+ run_parallel(100, test_hashmap_sanity, NULL);
+ run_parallel(100, test_arraymap_sanity, NULL);
+}
+
+#define TASKS 1024
+#define DO_UPDATE 1
+#define DO_DELETE 0
+static void do_work(int fn, void *data)
+{
+ int map_fd = ((int *)data)[0];
+ int do_update = ((int *)data)[1];
+ int i;
+ int key, value;
+
+ for (i = fn; i < MAP_SIZE; i += TASKS) {
+ key = value = i;
+ if (do_update)
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0);
+ else
+ assert(bpf_delete_elem(map_fd, &key) == 0);
+ }
+}
+
+static void test_map_parallel(void)
+{
+ int i, map_fd, key = 0, value = 0;
+ int data[2];
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+ MAP_SIZE);
+ if (map_fd < 0) {
+ printf("failed to create map for parallel test '%s'\n",
+ strerror(errno));
+ exit(1);
+ }
+
+ data[0] = map_fd;
+ data[1] = DO_UPDATE;
+ /* use the same map_fd in children to add elements to this map
+ * child_0 adds key=0, key=1024, key=2048, ...
+ * child_1 adds key=1, key=1025, key=2049, ...
+ * child_1023 adds key=1023, ...
+ */
+ run_parallel(TASKS, do_work, data);
+
+ /* check that key=0 is already there */
+ assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
+ errno == EEXIST);
+
+ /* check that all elements were inserted */
+ key = -1;
+ for (i = 0; i < MAP_SIZE; i++)
+ assert(bpf_get_next_key(map_fd, &key, &key) == 0);
+ assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT);
+
+ /* another check for all elements */
+ for (i = 0; i < MAP_SIZE; i++) {
+ key = MAP_SIZE - i - 1;
+ assert(bpf_lookup_elem(map_fd, &key, &value) == 0 &&
+ value == key);
+ }
+
+ /* now let's delete all elemenets in parallel */
+ data[1] = DO_DELETE;
+ run_parallel(TASKS, do_work, data);
+
+ /* nothing should be left */
+ key = -1;
+ assert(bpf_get_next_key(map_fd, &key, &key) == -1 && errno == ENOENT);
+}
+
+int main(void)
+{
+ test_hashmap_sanity(0, NULL);
+ test_arraymap_sanity(0, NULL);
+ test_map_large();
+ test_map_parallel();
+ test_map_stress();
+ printf("test_maps: OK\n");
+ return 0;
+}
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index eb4bec0ad8af..b96175e90363 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -261,7 +261,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
.fixup = {2},
@@ -417,7 +417,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.errstr = "fd 0 is not pointing to valid bpf_map",
@@ -430,7 +430,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
@@ -445,7 +445,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
BPF_EXIT_INSN(),
@@ -461,7 +461,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
@@ -548,7 +548,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.fixup = {24},
@@ -602,6 +602,45 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
},
+ {
+ "jump test 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
};
static int probe_filter_length(struct bpf_insn *fp)
@@ -620,7 +659,7 @@ static int create_map(void)
long long key, value = 0;
int map_fd;
- map_fd = bpf_create_map(BPF_MAP_TYPE_UNSPEC, sizeof(key), sizeof(value), 1024);
+ map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 1024);
if (map_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
}
@@ -630,7 +669,7 @@ static int create_map(void)
static int test(void)
{
- int prog_fd, i;
+ int prog_fd, i, pass_cnt = 0, err_cnt = 0;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct bpf_insn *prog = tests[i].insns;
@@ -657,21 +696,25 @@ static int test(void)
printf("FAIL\nfailed to load prog '%s'\n",
strerror(errno));
printf("%s", bpf_log_buf);
+ err_cnt++;
goto fail;
}
} else {
if (prog_fd >= 0) {
printf("FAIL\nunexpected success to load\n");
printf("%s", bpf_log_buf);
+ err_cnt++;
goto fail;
}
if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
printf("FAIL\nunexpected error message: %s",
bpf_log_buf);
+ err_cnt++;
goto fail;
}
}
+ pass_cnt++;
printf("OK\n");
fail:
if (map_fd >= 0)
@@ -679,6 +722,7 @@ fail:
close(prog_fd);
}
+ printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
return 0;
}
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
new file mode 100644
index 000000000000..e9cc689033fe
--- /dev/null
+++ b/scripts/checkkconfigsymbols.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+
+"""Find Kconfig identifiers that are referenced but not defined."""
+
+# (c) 2014 Valentin Rothberg <valentinrothberg@gmail.com>
+# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
+#
+# Licensed under the terms of the GNU GPL License version 2
+
+
+import os
+import re
+from subprocess import Popen, PIPE, STDOUT
+
+
+# regex expressions
+OPERATORS = r"&|\(|\)|\||\!"
+FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
+DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
+EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
+STMT = r"^\s*(?:if|select|depends\s+on)\s+" + EXPR
+SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
+
+# regex objects
+REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
+REGEX_FEATURE = re.compile(r"(" + FEATURE + r")")
+REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
+REGEX_KCONFIG_DEF = re.compile(DEF)
+REGEX_KCONFIG_EXPR = re.compile(EXPR)
+REGEX_KCONFIG_STMT = re.compile(STMT)
+REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
+REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
+
+
+def main():
+ """Main function of this module."""
+ source_files = []
+ kconfig_files = []
+ defined_features = set()
+ referenced_features = dict() # {feature: [files]}
+
+ # use 'git ls-files' to get the worklist
+ pop = Popen("git ls-files", stdout=PIPE, stderr=STDOUT, shell=True)
+ (stdout, _) = pop.communicate() # wait until finished
+ if len(stdout) > 0 and stdout[-1] == "\n":
+ stdout = stdout[:-1]
+
+ for gitfile in stdout.rsplit("\n"):
+ if ".git" in gitfile or "ChangeLog" in gitfile or \
+ ".log" in gitfile or os.path.isdir(gitfile):
+ continue
+ if REGEX_FILE_KCONFIG.match(gitfile):
+ kconfig_files.append(gitfile)
+ else:
+ # all non-Kconfig files are checked for consistency
+ source_files.append(gitfile)
+
+ for sfile in source_files:
+ parse_source_file(sfile, referenced_features)
+
+ for kfile in kconfig_files:
+ parse_kconfig_file(kfile, defined_features, referenced_features)
+
+ print "Undefined symbol used\tFile list"
+ for feature in sorted(referenced_features):
+ # filter some false positives
+ if feature == "FOO" or feature == "BAR" or \
+ feature == "FOO_BAR" or feature == "XXX":
+ continue
+ if feature not in defined_features:
+ if feature.endswith("_MODULE"):
+ # avoid false positives for kernel modules
+ if feature[:-len("_MODULE")] in defined_features:
+ continue
+ files = referenced_features.get(feature)
+ print "%s\t%s" % (feature, ", ".join(files))
+
+
+def parse_source_file(sfile, referenced_features):
+ """Parse @sfile for referenced Kconfig features."""
+ lines = []
+ with open(sfile, "r") as stream:
+ lines = stream.readlines()
+
+ for line in lines:
+ if not "CONFIG_" in line:
+ continue
+ features = REGEX_SOURCE_FEATURE.findall(line)
+ for feature in features:
+ if not REGEX_FILTER_FEATURES.search(feature):
+ continue
+ sfiles = referenced_features.get(feature, set())
+ sfiles.add(sfile)
+ referenced_features[feature] = sfiles
+
+
+def get_features_in_line(line):
+ """Return mentioned Kconfig features in @line."""
+ return REGEX_FEATURE.findall(line)
+
+
+def parse_kconfig_file(kfile, defined_features, referenced_features):
+ """Parse @kfile and update feature definitions and references."""
+ lines = []
+ skip = False
+
+ with open(kfile, "r") as stream:
+ lines = stream.readlines()
+
+ for i in range(len(lines)):
+ line = lines[i]
+ line = line.strip('\n')
+ line = line.split("#")[0] # ignore comments
+
+ if REGEX_KCONFIG_DEF.match(line):
+ feature_def = REGEX_KCONFIG_DEF.findall(line)
+ defined_features.add(feature_def[0])
+ skip = False
+ elif REGEX_KCONFIG_HELP.match(line):
+ skip = True
+ elif skip:
+ # ignore content of help messages
+ pass
+ elif REGEX_KCONFIG_STMT.match(line):
+ features = get_features_in_line(line)
+ # multi-line statements
+ while line.endswith("\\"):
+ i += 1
+ line = lines[i]
+ line = line.strip('\n')
+ features.extend(get_features_in_line(line))
+ for feature in set(features):
+ paths = referenced_features.get(feature, set())
+ paths.add(kfile)
+ referenced_features[feature] = paths
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/checkkconfigsymbols.sh b/scripts/checkkconfigsymbols.sh
deleted file mode 100755
index ccb3391882d1..000000000000
--- a/scripts/checkkconfigsymbols.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/sh
-# Find Kconfig variables used in source code but never defined in Kconfig
-# Copyright (C) 2007, Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
-
-# Tested with dash.
-paths="$@"
-[ -z "$paths" ] && paths=.
-
-# Doing this once at the beginning saves a lot of time, on a cache-hot tree.
-Kconfigs="`find . -name 'Kconfig' -o -name 'Kconfig*[^~]'`"
-
-printf "File list \tundefined symbol used\n"
-find $paths -name '*.[chS]' -o -name 'Makefile' -o -name 'Makefile*[^~]'| while read i
-do
- # Output the bare Kconfig variable and the filename; the _MODULE part at
- # the end is not removed here (would need perl an not-hungry regexp for that).
- sed -ne 's!^.*\<\(UML_\)\?CONFIG_\([0-9A-Za-z_]\+\).*!\2 '$i'!p' < $i
-done | \
-# Smart "sort|uniq" implemented in awk and tuned to collect the names of all
-# files which use a given symbol
-awk '{map[$1, count[$1]++] = $2; }
-END {
- for (combIdx in map) {
- split(combIdx, separate, SUBSEP);
- # The value may have been removed.
- if (! ( (separate[1], separate[2]) in map ) )
- continue;
- symb=separate[1];
- printf "%s ", symb;
- #Use gawk extension to delete the names vector
- delete names;
- #Portably delete the names vector
- #split("", names);
- for (i=0; i < count[symb]; i++) {
- names[map[symb, i]] = 1;
- # Unfortunately, we may still encounter symb, i in the
- # outside iteration.
- delete map[symb, i];
- }
- i=0;
- for (name in names) {
- if (i > 0)
- printf ", %s", name;
- else
- printf "%s", name;
- i++;
- }
- printf "\n";
- }
-}' |
-while read symb files; do
- # Remove the _MODULE suffix when checking the variable name. This should
- # be done only on tristate symbols, actually, but Kconfig parsing is
- # beyond the purpose of this script.
- symb_bare=`echo $symb | sed -e 's/_MODULE//'`
- if ! grep -q "\<$symb_bare\>" $Kconfigs; then
- printf "$files: \t$symb\n"
- fi
-done|sort
diff --git a/scripts/coccinelle/api/platform_no_drv_owner.cocci b/scripts/coccinelle/api/platform_no_drv_owner.cocci
new file mode 100644
index 000000000000..e065b9e714fc
--- /dev/null
+++ b/scripts/coccinelle/api/platform_no_drv_owner.cocci
@@ -0,0 +1,106 @@
+/// Remove .owner field if calls are used which set it automatically
+///
+// Confidence: High
+// Copyright: (C) 2014 Wolfram Sang. GPL v2.
+
+virtual patch
+virtual context
+virtual org
+virtual report
+
+@match1@
+declarer name module_platform_driver;
+declarer name module_platform_driver_probe;
+identifier __driver;
+@@
+(
+ module_platform_driver(__driver);
+|
+ module_platform_driver_probe(__driver, ...);
+)
+
+@fix1 depends on match1 && patch && !context && !org && !report@
+identifier match1.__driver;
+@@
+ static struct platform_driver __driver = {
+ .driver = {
+- .owner = THIS_MODULE,
+ }
+ };
+
+@match2@
+identifier __driver;
+@@
+(
+ platform_driver_register(&__driver)
+|
+ platform_driver_probe(&__driver, ...)
+|
+ platform_create_bundle(&__driver, ...)
+)
+
+@fix2 depends on match2 && patch && !context && !org && !report@
+identifier match2.__driver;
+@@
+ static struct platform_driver __driver = {
+ .driver = {
+- .owner = THIS_MODULE,
+ }
+ };
+
+// ----------------------------------------------------------------------------
+
+@fix1_context depends on match1 && !patch && (context || org || report)@
+identifier match1.__driver;
+position j0;
+@@
+
+ static struct platform_driver __driver = {
+ .driver = {
+* .owner@j0 = THIS_MODULE,
+ }
+ };
+
+@fix2_context depends on match2 && !patch && (context || org || report)@
+identifier match2.__driver;
+position j0;
+@@
+
+ static struct platform_driver __driver = {
+ .driver = {
+* .owner@j0 = THIS_MODULE,
+ }
+ };
+
+// ----------------------------------------------------------------------------
+
+@script:python fix1_org depends on org@
+j0 << fix1_context.j0;
+@@
+
+msg = "No need to set .owner here. The core will do it."
+coccilib.org.print_todo(j0[0], msg)
+
+@script:python fix2_org depends on org@
+j0 << fix2_context.j0;
+@@
+
+msg = "No need to set .owner here. The core will do it."
+coccilib.org.print_todo(j0[0], msg)
+
+// ----------------------------------------------------------------------------
+
+@script:python fix1_report depends on report@
+j0 << fix1_context.j0;
+@@
+
+msg = "No need to set .owner here. The core will do it."
+coccilib.report.print_report(j0[0], msg)
+
+@script:python fix2_report depends on report@
+j0 << fix2_context.j0;
+@@
+
+msg = "No need to set .owner here. The core will do it."
+coccilib.report.print_report(j0[0], msg)
+
diff --git a/scripts/kconfig/list.h b/scripts/kconfig/list.h
index 685d80e1bb0e..2cf23f002d3f 100644
--- a/scripts/kconfig/list.h
+++ b/scripts/kconfig/list.h
@@ -34,7 +34,7 @@ struct list_head {
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
@@ -43,7 +43,7 @@ struct list_head {
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
@@ -55,7 +55,7 @@ struct list_head {
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 001facfa5b74..3d1984e59a30 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -404,7 +404,7 @@ do_file(char const *const fname)
}
if (w2(ghdr->e_machine) == EM_S390) {
reltype = R_390_64;
- mcount_adjust_64 = -8;
+ mcount_adjust_64 = -14;
}
if (w2(ghdr->e_machine) == EM_MIPS) {
reltype = R_MIPS_64;
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index d4b665610d67..56ea99a12ab7 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -243,7 +243,7 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "s390" && $bits == 64) {
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
- $mcount_adjust = -8;
+ $mcount_adjust = -14;
$alignment = 8;
$type = ".quad";
$ld .= " -m elf64_s390";
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 8d4fbff8b87c..5e3bd72b299a 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -14,7 +14,7 @@
#include <linux/err.h>
#include <linux/sched.h>
-#include <linux/rbtree.h>
+#include <linux/slab.h>
#include <linux/cred.h>
#include <linux/key-type.h>
#include <linux/digsig.h>
@@ -63,7 +63,7 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
return -EOPNOTSUPP;
}
-int integrity_init_keyring(const unsigned int id)
+int __init integrity_init_keyring(const unsigned int id)
{
const struct cred *cred = current_cred();
int err = 0;
@@ -84,3 +84,37 @@ int integrity_init_keyring(const unsigned int id)
}
return err;
}
+
+int __init integrity_load_x509(const unsigned int id, char *path)
+{
+ key_ref_t key;
+ char *data;
+ int rc;
+
+ if (!keyring[id])
+ return -EINVAL;
+
+ rc = integrity_read_file(path, &data);
+ if (rc < 0)
+ return rc;
+
+ key = key_create_or_update(make_key_ref(keyring[id], 1),
+ "asymmetric",
+ NULL,
+ data,
+ rc,
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_TRUSTED);
+ if (IS_ERR(key)) {
+ rc = PTR_ERR(key);
+ pr_err("Problem loading X.509 certificate (%d): %s\n",
+ rc, path);
+ } else {
+ pr_notice("Loaded X.509 cert '%s': %s\n",
+ key_ref_to_ptr(key)->description, path);
+ key_ref_put(key);
+ }
+ kfree(data);
+ return 0;
+}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index c5ee1a7c5e8a..f589c9a05da2 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -162,9 +162,14 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
(const char *)xattr_data, xattr_len,
calc.digest, sizeof(calc.digest));
if (!rc) {
- /* we probably want to replace rsa with hmac here */
- evm_update_evmxattr(dentry, xattr_name, xattr_value,
- xattr_value_len);
+ /* Replace RSA with HMAC if not mounted readonly and
+ * not immutable
+ */
+ if (!IS_RDONLY(dentry->d_inode) &&
+ !IS_IMMUTABLE(dentry->d_inode))
+ evm_update_evmxattr(dentry, xattr_name,
+ xattr_value,
+ xattr_value_len);
}
break;
default:
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index a521edf4cbd6..dbb6d141c3db 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -19,14 +19,14 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
#include "integrity.h"
static struct rb_root integrity_iint_tree = RB_ROOT;
static DEFINE_RWLOCK(integrity_iint_lock);
static struct kmem_cache *iint_cache __read_mostly;
-int iint_initialized;
-
/*
* __integrity_iint_find - return the iint associated with an inode
*/
@@ -166,7 +166,89 @@ static int __init integrity_iintcache_init(void)
iint_cache =
kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
0, SLAB_PANIC, init_once);
- iint_initialized = 1;
return 0;
}
security_initcall(integrity_iintcache_init);
+
+
+/*
+ * integrity_kernel_read - read data from the file
+ *
+ * This is a function for reading file content instead of kernel_read().
+ * It does not perform locking checks to ensure it cannot be blocked.
+ * It does not perform security checks because it is irrelevant for IMA.
+ *
+ */
+int integrity_kernel_read(struct file *file, loff_t offset,
+ char *addr, unsigned long count)
+{
+ mm_segment_t old_fs;
+ char __user *buf = (char __user *)addr;
+ ssize_t ret;
+
+ if (!(file->f_mode & FMODE_READ))
+ return -EBADF;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = __vfs_read(file, buf, count, &offset);
+ set_fs(old_fs);
+
+ return ret;
+}
+
+/*
+ * integrity_read_file - read entire file content into the buffer
+ *
+ * This is function opens a file, allocates the buffer of required
+ * size, read entire file content to the buffer and closes the file
+ *
+ * It is used only by init code.
+ *
+ */
+int __init integrity_read_file(const char *path, char **data)
+{
+ struct file *file;
+ loff_t size;
+ char *buf;
+ int rc = -EINVAL;
+
+ file = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(file)) {
+ rc = PTR_ERR(file);
+ pr_err("Unable to open file: %s (%d)", path, rc);
+ return rc;
+ }
+
+ size = i_size_read(file_inode(file));
+ if (size <= 0)
+ goto out;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = integrity_kernel_read(file, 0, buf, size);
+ if (rc < 0)
+ kfree(buf);
+ else if (rc != size)
+ rc = -EIO;
+ else
+ *data = buf;
+out:
+ fput(file);
+ return rc;
+}
+
+/*
+ * integrity_load_keys - load integrity keys hook
+ *
+ * Hooks is called from init/main.c:kernel_init_freeable()
+ * when rootfs is ready
+ */
+void __init integrity_load_keys(void)
+{
+ ima_load_x509();
+}
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index e099875643c5..57515bc915c0 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -10,7 +10,7 @@ config IMA
select CRYPTO_HASH_INFO
select TCG_TPM if HAS_IOMEM && !UML
select TCG_TIS if TCG_TPM && X86
- select TCG_IBMVTPM if TCG_TPM && PPC64
+ select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
help
The Trusted Computing Group(TCG) runtime Integrity
Measurement Architecture(IMA) maintains a list of hash
@@ -131,3 +131,28 @@ config IMA_TRUSTED_KEYRING
help
This option requires that all keys added to the .ima
keyring be signed by a key on the system trusted keyring.
+
+config IMA_LOAD_X509
+ bool "Load X509 certificate onto the '.ima' trusted keyring"
+ depends on IMA_TRUSTED_KEYRING
+ default n
+ help
+ File signature verification is based on the public keys
+ loaded on the .ima trusted keyring. These public keys are
+ X509 certificates signed by a trusted key on the
+ .system keyring. This option enables X509 certificate
+ loading from the kernel onto the '.ima' trusted keyring.
+
+config IMA_X509_PATH
+ string "IMA X509 certificate path"
+ depends on IMA_LOAD_X509
+ default "/etc/keys/x509_ima.der"
+ help
+ This option defines IMA X509 certificate path.
+
+config IMA_APPRAISE_SIGNED_INIT
+ bool "Require signed user-space initialization"
+ depends on IMA_LOAD_X509
+ default n
+ help
+ This option requires user-space init to be signed.
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index f92be1b14089..b8a27c5052d4 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -173,8 +173,7 @@ int ima_get_action(struct inode *inode, int mask, int function)
{
int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
- if (!ima_appraise)
- flags &= ~IMA_APPRAISE;
+ flags &= ima_policy_flag;
return ima_match_policy(inode, function, mask, flags);
}
@@ -325,11 +324,11 @@ const char *ima_d_path(struct path *path, char **pathbuf)
{
char *pathname = NULL;
- *pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+ *pathbuf = __getname();
if (*pathbuf) {
pathname = d_absolute_path(path, *pathbuf, PATH_MAX);
if (IS_ERR(pathname)) {
- kfree(*pathbuf);
+ __putname(*pathbuf);
*pathbuf = NULL;
pathname = NULL;
}
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 78d66dae15f4..686355fea7fd 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -67,36 +67,6 @@ MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
static struct crypto_shash *ima_shash_tfm;
static struct crypto_ahash *ima_ahash_tfm;
-/**
- * ima_kernel_read - read file content
- *
- * This is a function for reading file content instead of kernel_read().
- * It does not perform locking checks to ensure it cannot be blocked.
- * It does not perform security checks because it is irrelevant for IMA.
- *
- */
-static int ima_kernel_read(struct file *file, loff_t offset,
- char *addr, unsigned long count)
-{
- mm_segment_t old_fs;
- char __user *buf = addr;
- ssize_t ret = -EINVAL;
-
- if (!(file->f_mode & FMODE_READ))
- return -EBADF;
-
- old_fs = get_fs();
- set_fs(get_ds());
- if (file->f_op->read)
- ret = file->f_op->read(file, buf, count, &offset);
- else if (file->f_op->aio_read)
- ret = do_sync_read(file, buf, count, &offset);
- else if (file->f_op->read_iter)
- ret = new_sync_read(file, buf, count, &offset);
- set_fs(old_fs);
- return ret;
-}
-
int __init ima_init_crypto(void)
{
long rc;
@@ -324,7 +294,8 @@ static int ima_calc_file_hash_atfm(struct file *file,
}
/* read buffer */
rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
- rc = ima_kernel_read(file, offset, rbuf[active], rbuf_len);
+ rc = integrity_kernel_read(file, offset, rbuf[active],
+ rbuf_len);
if (rc != rbuf_len)
goto out3;
@@ -414,7 +385,7 @@ static int ima_calc_file_hash_tfm(struct file *file,
while (offset < i_size) {
int rbuf_len;
- rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
+ rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
if (rbuf_len < 0) {
rc = rbuf_len;
break;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index da92fcc08d15..461215e5fd31 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -118,6 +118,7 @@ static int ima_measurements_show(struct seq_file *m, void *v)
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
+ char *template_name;
int namelen;
u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
bool is_ima_template = false;
@@ -128,6 +129,9 @@ static int ima_measurements_show(struct seq_file *m, void *v)
if (e == NULL)
return -1;
+ template_name = (e->template_desc->name[0] != '\0') ?
+ e->template_desc->name : e->template_desc->fmt;
+
/*
* 1st: PCRIndex
* PCR used is always the same (config option) in
@@ -139,14 +143,14 @@ static int ima_measurements_show(struct seq_file *m, void *v)
ima_putc(m, e->digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
- namelen = strlen(e->template_desc->name);
+ namelen = strlen(template_name);
ima_putc(m, &namelen, sizeof(namelen));
/* 4th: template name */
- ima_putc(m, e->template_desc->name, namelen);
+ ima_putc(m, template_name, namelen);
/* 5th: template length (except for 'ima' template) */
- if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0)
+ if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
is_ima_template = true;
if (!is_ima_template)
@@ -200,6 +204,7 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
+ char *template_name;
int i;
/* get entry */
@@ -207,6 +212,9 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
if (e == NULL)
return -1;
+ template_name = (e->template_desc->name[0] != '\0') ?
+ e->template_desc->name : e->template_desc->fmt;
+
/* 1st: PCR used (config option) */
seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
@@ -214,7 +222,7 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
/* 3th: template name */
- seq_printf(m, " %s", e->template_desc->name);
+ seq_printf(m, " %s", template_name);
/* 4th: template specific data */
for (i = 0; i < e->template_desc->num_fields; i++) {
@@ -288,7 +296,12 @@ static struct dentry *runtime_measurements_count;
static struct dentry *violations;
static struct dentry *ima_policy;
-static atomic_t policy_opencount = ATOMIC_INIT(1);
+enum ima_fs_flags {
+ IMA_FS_BUSY,
+};
+
+static unsigned long ima_fs_flags;
+
/*
* ima_open_policy: sequentialize access to the policy file
*/
@@ -297,9 +310,9 @@ static int ima_open_policy(struct inode *inode, struct file *filp)
/* No point in being allowed to open it if you aren't going to write */
if (!(filp->f_flags & O_WRONLY))
return -EACCES;
- if (atomic_dec_and_test(&policy_opencount))
- return 0;
- return -EBUSY;
+ if (test_and_set_bit(IMA_FS_BUSY, &ima_fs_flags))
+ return -EBUSY;
+ return 0;
}
/*
@@ -311,10 +324,16 @@ static int ima_open_policy(struct inode *inode, struct file *filp)
*/
static int ima_release_policy(struct inode *inode, struct file *file)
{
+ const char *cause = valid_policy ? "completed" : "failed";
+
+ pr_info("IMA: policy update %s\n", cause);
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
+ "policy_update", cause, !valid_policy, 0);
+
if (!valid_policy) {
ima_delete_rules();
valid_policy = 1;
- atomic_set(&policy_opencount, 1);
+ clear_bit(IMA_FS_BUSY, &ima_fs_flags);
return 0;
}
ima_update_policy();
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 9164fc8cac84..5e4c29d174ee 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -24,6 +24,12 @@
#include <crypto/hash_info.h>
#include "ima.h"
+#ifdef CONFIG_IMA_X509_PATH
+#define IMA_X509_PATH CONFIG_IMA_X509_PATH
+#else
+#define IMA_X509_PATH "/etc/keys/x509_ima.der"
+#endif
+
/* name for boot aggregate entry */
static const char *boot_aggregate_name = "boot_aggregate";
int ima_used_chip;
@@ -91,6 +97,17 @@ err_out:
return result;
}
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void)
+{
+ int unset_flags = ima_policy_flag & IMA_APPRAISE;
+
+ ima_policy_flag &= ~unset_flags;
+ integrity_load_x509(INTEGRITY_KEYRING_IMA, IMA_X509_PATH);
+ ima_policy_flag |= unset_flags;
+}
+#endif
+
int __init ima_init(void)
{
u8 pcr_i[TPM_DIGEST_SIZE];
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 62f59eca32d3..eeee00dce729 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -143,7 +143,7 @@ void ima_file_free(struct file *file)
struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint;
- if (!iint_initialized || !S_ISREG(inode->i_mode))
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
return;
iint = integrity_iint_find(inode);
@@ -246,7 +246,8 @@ out_digsig:
rc = -EACCES;
kfree(xattr_value);
out_free:
- kfree(pathbuf);
+ if (pathbuf)
+ __putname(pathbuf);
out:
mutex_unlock(&inode->i_mutex);
if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index cdc620b2152f..d1eefb9d65fb 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -100,7 +100,13 @@ static struct ima_rule_entry default_appraise_rules[] = {
{.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+#ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
{.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER},
+#else
+ /* force signature */
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID,
+ .flags = IMA_FOWNER | IMA_DIGSIG_REQUIRED},
+#endif
};
static LIST_HEAD(ima_default_rules);
@@ -356,19 +362,8 @@ void __init ima_init_policy(void)
*/
void ima_update_policy(void)
{
- static const char op[] = "policy_update";
- const char *cause = "already-exists";
- int result = 1;
- int audit_info = 0;
-
- if (ima_rules == &ima_default_rules) {
- ima_rules = &ima_policy_rules;
- ima_update_policy_flag();
- cause = "complete";
- result = 0;
- }
- integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
- NULL, op, cause, result, audit_info);
+ ima_rules = &ima_policy_rules;
+ ima_update_policy_flag();
}
enum {
@@ -686,13 +681,12 @@ ssize_t ima_parse_add_rule(char *rule)
ssize_t result, len;
int audit_info = 0;
- /* Prevent installed policy from changing */
- if (ima_rules != &ima_default_rules) {
- integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
- NULL, op, "already-exists",
- -EACCES, audit_info);
- return -EACCES;
- }
+ p = strsep(&rule, "\n");
+ len = strlen(p) + 1;
+ p += strspn(p, " \t");
+
+ if (*p == '#' || *p == '\0')
+ return len;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
@@ -703,14 +697,6 @@ ssize_t ima_parse_add_rule(char *rule)
INIT_LIST_HEAD(&entry->list);
- p = strsep(&rule, "\n");
- len = strlen(p) + 1;
-
- if (*p == '#') {
- kfree(entry);
- return len;
- }
-
result = ima_parse_rule(p, entry);
if (result) {
kfree(entry);
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index e854862c9337..0b7404ebfa80 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -24,6 +24,7 @@ static struct ima_template_desc defined_templates[] = {
{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
{.name = "ima-ng", .fmt = "d-ng|n-ng"},
{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+ {.name = "", .fmt = ""}, /* placeholder for a custom format */
};
static struct ima_template_field supported_fields[] = {
@@ -41,19 +42,28 @@ static struct ima_template_field supported_fields[] = {
static struct ima_template_desc *ima_template;
static struct ima_template_desc *lookup_template_desc(const char *name);
+static int template_desc_init_fields(const char *template_fmt,
+ struct ima_template_field ***fields,
+ int *num_fields);
static int __init ima_template_setup(char *str)
{
struct ima_template_desc *template_desc;
int template_len = strlen(str);
+ if (ima_template)
+ return 1;
+
/*
* Verify that a template with the supplied name exists.
* If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
*/
template_desc = lookup_template_desc(str);
- if (!template_desc)
+ if (!template_desc) {
+ pr_err("template %s not found, using %s\n",
+ str, CONFIG_IMA_DEFAULT_TEMPLATE);
return 1;
+ }
/*
* Verify whether the current hash algorithm is supported
@@ -70,6 +80,25 @@ static int __init ima_template_setup(char *str)
}
__setup("ima_template=", ima_template_setup);
+static int __init ima_template_fmt_setup(char *str)
+{
+ int num_templates = ARRAY_SIZE(defined_templates);
+
+ if (ima_template)
+ return 1;
+
+ if (template_desc_init_fields(str, NULL, NULL) < 0) {
+ pr_err("format string '%s' not valid, using template %s\n",
+ str, CONFIG_IMA_DEFAULT_TEMPLATE);
+ return 1;
+ }
+
+ defined_templates[num_templates - 1].fmt = str;
+ ima_template = defined_templates + num_templates - 1;
+ return 1;
+}
+__setup("ima_template_fmt=", ima_template_fmt_setup);
+
static struct ima_template_desc *lookup_template_desc(const char *name)
{
int i;
@@ -113,43 +142,46 @@ static int template_desc_init_fields(const char *template_fmt,
struct ima_template_field ***fields,
int *num_fields)
{
- char *c, *template_fmt_copy, *template_fmt_ptr;
+ const char *template_fmt_ptr;
+ struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
int template_num_fields = template_fmt_size(template_fmt);
- int i, result = 0;
+ int i, len;
- if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX)
+ if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
+ pr_err("format string '%s' contains too many fields\n",
+ template_fmt);
return -EINVAL;
-
- /* copying is needed as strsep() modifies the original buffer */
- template_fmt_copy = kstrdup(template_fmt, GFP_KERNEL);
- if (template_fmt_copy == NULL)
- return -ENOMEM;
-
- *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL);
- if (*fields == NULL) {
- result = -ENOMEM;
- goto out;
}
- template_fmt_ptr = template_fmt_copy;
- for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL &&
- i < template_num_fields; i++) {
- struct ima_template_field *f = lookup_template_field(c);
+ for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields;
+ i++, template_fmt_ptr += len + 1) {
+ char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1];
+
+ len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr;
+ if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) {
+ pr_err("Invalid field with length %d\n", len);
+ return -EINVAL;
+ }
- if (!f) {
- result = -ENOENT;
- goto out;
+ memcpy(tmp_field_id, template_fmt_ptr, len);
+ tmp_field_id[len] = '\0';
+ found_fields[i] = lookup_template_field(tmp_field_id);
+ if (!found_fields[i]) {
+ pr_err("field '%s' not found\n", tmp_field_id);
+ return -ENOENT;
}
- (*fields)[i] = f;
}
- *num_fields = i;
-out:
- if (result < 0) {
- kfree(*fields);
- *fields = NULL;
+
+ if (fields && num_fields) {
+ *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL);
+ if (*fields == NULL)
+ return -ENOMEM;
+
+ memcpy(*fields, found_fields, i * sizeof(*fields));
+ *num_fields = i;
}
- kfree(template_fmt_copy);
- return result;
+
+ return 0;
}
struct ima_template_desc *ima_template_desc_current(void)
@@ -163,8 +195,15 @@ struct ima_template_desc *ima_template_desc_current(void)
int __init ima_init_template(void)
{
struct ima_template_desc *template = ima_template_desc_current();
+ int result;
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0)
+ pr_err("template %s init failed, result: %d\n",
+ (strlen(template->name) ?
+ template->name : template->fmt), result);
- return template_desc_init_fields(template->fmt,
- &(template->fields),
- &(template->num_fields));
+ return result;
}
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 9d1c2ebfe12a..0fc9519fefa9 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -120,6 +120,10 @@ struct integrity_iint_cache {
*/
struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+int integrity_kernel_read(struct file *file, loff_t offset,
+ char *addr, unsigned long count);
+int __init integrity_read_file(const char *path, char **data);
+
#define INTEGRITY_KEYRING_EVM 0
#define INTEGRITY_KEYRING_MODULE 1
#define INTEGRITY_KEYRING_IMA 2
@@ -130,7 +134,8 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
const char *digest, int digestlen);
-int integrity_init_keyring(const unsigned int id);
+int __init integrity_init_keyring(const unsigned int id);
+int __init integrity_load_x509(const unsigned int id, char *path);
#else
static inline int integrity_digsig_verify(const unsigned int id,
@@ -144,6 +149,7 @@ static inline int integrity_init_keyring(const unsigned int id)
{
return 0;
}
+
#endif /* CONFIG_INTEGRITY_SIGNATURE */
#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
@@ -157,6 +163,14 @@ static inline int asymmetric_verify(struct key *keyring, const char *sig,
}
#endif
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void);
+#else
+static inline void ima_load_x509(void)
+{
+}
+#endif
+
#ifdef CONFIG_INTEGRITY_AUDIT
/* declarations */
void integrity_audit_msg(int audit_msgno, struct inode *inode,
@@ -170,6 +184,3 @@ static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
{
}
#endif
-
-/* set during initialization */
-extern int iint_initialized;
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index db9675db1026..7bed4ad7cd76 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -1017,10 +1017,13 @@ static int __init init_encrypted(void)
ret = encrypted_shash_alloc();
if (ret < 0)
return ret;
+ ret = aes_get_sizes();
+ if (ret < 0)
+ goto out;
ret = register_key_type(&key_type_encrypted);
if (ret < 0)
goto out;
- return aes_get_sizes();
+ return 0;
out:
encrypted_shash_release();
return ret;
diff --git a/security/keys/key.c b/security/keys/key.c
index e17ba6aefdc0..aee2ec5a18fc 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -276,12 +276,10 @@ struct key *key_alloc(struct key_type *type, const char *desc,
if (!key)
goto no_memory_2;
- if (desc) {
- key->index_key.desc_len = desclen;
- key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
- if (!key->description)
- goto no_memory_3;
- }
+ key->index_key.desc_len = desclen;
+ key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
+ if (!key->description)
+ goto no_memory_3;
atomic_set(&key->usage, 1);
init_rwsem(&key->sem);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c603b20356ad..6da7532893a1 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -95,8 +95,6 @@
#include "audit.h"
#include "avc_ss.h"
-extern struct security_operations *security_ops;
-
/* SECMARK reference count */
static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 5b970ffde024..1158430f5bb9 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -142,8 +142,7 @@ int smk_access(struct smack_known *subject, struct smack_known *object,
* Tasks cannot be assigned the internet label.
* An internet subject can access any object.
*/
- if (object == &smack_known_web ||
- subject == &smack_known_web)
+ if (object == &smack_known_web || subject == &smack_known_web)
goto out_audit;
/*
* A star object can be accessed by any subject.
@@ -157,10 +156,11 @@ int smk_access(struct smack_known *subject, struct smack_known *object,
if (subject->smk_known == object->smk_known)
goto out_audit;
/*
- * A hat subject can read any object.
- * A floor object can be read by any subject.
+ * A hat subject can read or lock any object.
+ * A floor object can be read or locked by any subject.
*/
- if ((request & MAY_ANYREAD) == request) {
+ if ((request & MAY_ANYREAD) == request ||
+ (request & MAY_LOCK) == request) {
if (object == &smack_known_floor)
goto out_audit;
if (subject == &smack_known_hat)
@@ -452,10 +452,9 @@ char *smk_parse_smack(const char *string, int len)
return NULL;
smack = kzalloc(i + 1, GFP_KERNEL);
- if (smack != NULL) {
- strncpy(smack, string, i + 1);
- smack[i] = '\0';
- }
+ if (smack != NULL)
+ strncpy(smack, string, i);
+
return smack;
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 433ae61e7f42..f1b17a476e12 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -53,6 +53,7 @@
#define SMK_SENDING 2
LIST_HEAD(smk_ipv6_port_list);
+static struct kmem_cache *smack_inode_cache;
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
static void smk_bu_mode(int mode, char *s)
@@ -240,7 +241,7 @@ struct inode_smack *new_inode_smack(struct smack_known *skp)
{
struct inode_smack *isp;
- isp = kzalloc(sizeof(struct inode_smack), GFP_NOFS);
+ isp = kmem_cache_zalloc(smack_inode_cache, GFP_NOFS);
if (isp == NULL)
return NULL;
@@ -767,7 +768,7 @@ static int smack_inode_alloc_security(struct inode *inode)
*/
static void smack_inode_free_security(struct inode *inode)
{
- kfree(inode->i_security);
+ kmem_cache_free(smack_inode_cache, inode->i_security);
inode->i_security = NULL;
}
@@ -4264,10 +4265,16 @@ static __init int smack_init(void)
if (!security_module_enable(&smack_ops))
return 0;
+ smack_inode_cache = KMEM_CACHE(inode_smack, 0);
+ if (!smack_inode_cache)
+ return -ENOMEM;
+
tsp = new_task_smack(&smack_known_floor, &smack_known_floor,
GFP_KERNEL);
- if (tsp == NULL)
+ if (tsp == NULL) {
+ kmem_cache_destroy(smack_inode_cache);
return -ENOMEM;
+ }
printk(KERN_INFO "Smack: Initializing.\n");
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index 401107b85d30..23c371ecfb6b 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -243,13 +243,7 @@ static int onyx_snd_capture_source_info(struct snd_kcontrol *kcontrol,
{
static const char * const texts[] = { "Line-In", "Microphone" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int onyx_snd_capture_source_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index cf3c6303b7e3..364c7c4416e8 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -478,15 +478,9 @@ static struct snd_kcontrol_new drc_switch_control = {
static int tas_snd_capture_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "Line-In", "Microphone" };
+ static const char * const texts[] = { "Line-In", "Microphone" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int tas_snd_capture_source_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index a80d5ea87ccd..4e2b4fbf2496 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -79,8 +79,7 @@ static void i2sbus_release_dev(struct device *dev)
if (i2sdev->out.dbdma) iounmap(i2sdev->out.dbdma);
if (i2sdev->in.dbdma) iounmap(i2sdev->in.dbdma);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++)
- if (i2sdev->allocated_resource[i])
- release_and_free_resource(i2sdev->allocated_resource[i]);
+ release_and_free_resource(i2sdev->allocated_resource[i]);
free_dbdma_descriptor_ring(i2sdev, &i2sdev->out.dbdma_ring);
free_dbdma_descriptor_ring(i2sdev, &i2sdev->in.dbdma_ring);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++)
@@ -323,8 +322,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
if (dev->out.dbdma) iounmap(dev->out.dbdma);
if (dev->in.dbdma) iounmap(dev->in.dbdma);
for (i=0;i<3;i++)
- if (dev->allocated_resource[i])
- release_and_free_resource(dev->allocated_resource[i]);
+ release_and_free_resource(dev->allocated_resource[i]);
mutex_destroy(&dev->lock);
kfree(dev);
return 0;
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 3a10df6688ee..38590b322c54 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -241,7 +241,6 @@ static struct platform_driver pxa2xx_ac97_driver = {
.remove = pxa2xx_ac97_remove,
.driver = {
.name = "pxa2xx-ac97",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM_SLEEP
.pm = &pxa2xx_ac97_pm_ops,
#endif
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index a61d7a9a995e..01f8fdc42b1b 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -200,9 +200,7 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
} else {
printk(KERN_ERR "DMA error on channel %d (DCSR=%#x)\n",
dma_ch, dcsr);
- snd_pcm_stream_lock(substream);
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stop_xrun(substream);
}
}
EXPORT_SYMBOL(pxa2xx_pcm_dma_irq);
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 31061e3521d4..558618802000 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -242,7 +242,7 @@ static int atmel_abdac_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
- clk_enable(dac->sample_clk);
+ clk_prepare_enable(dac->sample_clk);
retval = dw_dma_cyclic_start(dac->dma.chan);
if (retval)
goto out;
@@ -254,7 +254,7 @@ static int atmel_abdac_trigger(struct snd_pcm_substream *substream, int cmd)
dw_dma_cyclic_stop(dac->dma.chan);
dac_writel(dac, DATA, 0);
dac_writel(dac, CTRL, 0);
- clk_disable(dac->sample_clk);
+ clk_disable_unprepare(dac->sample_clk);
break;
default:
retval = -EINVAL;
@@ -429,7 +429,7 @@ static int atmel_abdac_probe(struct platform_device *pdev)
retval = PTR_ERR(sample_clk);
goto out_put_pclk;
}
- clk_enable(pclk);
+ clk_prepare_enable(pclk);
retval = snd_card_new(&pdev->dev, SNDRV_DEFAULT_IDX1,
SNDRV_DEFAULT_STR1, THIS_MODULE,
@@ -528,7 +528,7 @@ out_free_card:
snd_card_free(card);
out_put_sample_clk:
clk_put(sample_clk);
- clk_disable(pclk);
+ clk_disable_unprepare(pclk);
out_put_pclk:
clk_put(pclk);
return retval;
@@ -541,8 +541,8 @@ static int atmel_abdac_suspend(struct device *pdev)
struct atmel_abdac *dac = card->private_data;
dw_dma_cyclic_stop(dac->dma.chan);
- clk_disable(dac->sample_clk);
- clk_disable(dac->pclk);
+ clk_disable_unprepare(dac->sample_clk);
+ clk_disable_unprepare(dac->pclk);
return 0;
}
@@ -552,8 +552,8 @@ static int atmel_abdac_resume(struct device *pdev)
struct snd_card *card = dev_get_drvdata(pdev);
struct atmel_abdac *dac = card->private_data;
- clk_enable(dac->pclk);
- clk_enable(dac->sample_clk);
+ clk_prepare_enable(dac->pclk);
+ clk_prepare_enable(dac->sample_clk);
if (test_bit(DMA_READY, &dac->flags))
dw_dma_cyclic_start(dac->dma.chan);
@@ -572,7 +572,7 @@ static int atmel_abdac_remove(struct platform_device *pdev)
struct atmel_abdac *dac = get_dac(card);
clk_put(dac->sample_clk);
- clk_disable(dac->pclk);
+ clk_disable_unprepare(dac->pclk);
clk_put(dac->pclk);
dma_release_channel(dac->dma.chan);
@@ -588,7 +588,6 @@ static struct platform_driver atmel_abdac_driver = {
.remove = atmel_abdac_remove,
.driver = {
.name = "atmel_abdac",
- .owner = THIS_MODULE,
.pm = ATMEL_ABDAC_PM_OPS,
},
};
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index b59427d5a697..4f6b14d704f3 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -773,7 +773,7 @@ static int atmel_ac97c_pcm_new(struct atmel_ac97c *chip)
return err;
}
retval = snd_pcm_new(chip->card, chip->card->shortname,
- chip->pdev->id, playback, capture, &pcm);
+ 0, playback, capture, &pcm);
if (retval)
return retval;
@@ -944,7 +944,7 @@ static int atmel_ac97c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "no peripheral clock\n");
return PTR_ERR(pclk);
}
- clk_enable(pclk);
+ clk_prepare_enable(pclk);
retval = snd_card_new(&pdev->dev, SNDRV_DEFAULT_IDX1,
SNDRV_DEFAULT_STR1, THIS_MODULE,
@@ -1122,7 +1122,7 @@ err_ioremap:
err_request_irq:
snd_card_free(card);
err_snd_card_new:
- clk_disable(pclk);
+ clk_disable_unprepare(pclk);
clk_put(pclk);
return retval;
}
@@ -1139,7 +1139,7 @@ static int atmel_ac97c_suspend(struct device *pdev)
if (test_bit(DMA_TX_READY, &chip->flags))
dw_dma_cyclic_stop(chip->dma.tx_chan);
}
- clk_disable(chip->pclk);
+ clk_disable_unprepare(chip->pclk);
return 0;
}
@@ -1149,7 +1149,7 @@ static int atmel_ac97c_resume(struct device *pdev)
struct snd_card *card = dev_get_drvdata(pdev);
struct atmel_ac97c *chip = card->private_data;
- clk_enable(chip->pclk);
+ clk_prepare_enable(chip->pclk);
if (cpu_is_at32ap7000()) {
if (test_bit(DMA_RX_READY, &chip->flags))
dw_dma_cyclic_start(chip->dma.rx_chan);
@@ -1177,7 +1177,7 @@ static int atmel_ac97c_remove(struct platform_device *pdev)
ac97c_writel(chip, COMR, 0);
ac97c_writel(chip, MR, 0);
- clk_disable(chip->pclk);
+ clk_disable_unprepare(chip->pclk);
clk_put(chip->pclk);
iounmap(chip->regs);
free_irq(chip->irq, chip);
@@ -1203,7 +1203,6 @@ static struct platform_driver atmel_ac97c_driver = {
.remove = atmel_ac97c_remove,
.driver = {
.name = "atmel_ac97c",
- .owner = THIS_MODULE,
.pm = ATMEL_AC97C_PM_OPS,
},
};
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 394a38909f6b..4daf2f58261c 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -14,6 +14,9 @@ snd-pcm-y := pcm.o pcm_native.o pcm_lib.o pcm_timer.o pcm_misc.o \
pcm_memory.o memalloc.o
snd-pcm-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
+# for trace-points
+CFLAGS_pcm_lib.o := -I$(src)
+
snd-pcm-dmaengine-objs := pcm_dmaengine.o
snd-rawmidi-objs := rawmidi.o
diff --git a/sound/core/control.c b/sound/core/control.c
index b9611344ff9e..bb96a467e88d 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -141,6 +141,16 @@ static int snd_ctl_release(struct inode *inode, struct file *file)
return 0;
}
+/**
+ * snd_ctl_notify - Send notification to user-space for a control change
+ * @card: the card to send notification
+ * @mask: the event mask, SNDRV_CTL_EVENT_*
+ * @id: the ctl element id to send notification
+ *
+ * This function adds an event record with the given id and mask, appends
+ * to the list and wakes up the user-space for notification. This can be
+ * called in the atomic context.
+ */
void snd_ctl_notify(struct snd_card *card, unsigned int mask,
struct snd_ctl_elem_id *id)
{
@@ -179,7 +189,6 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
}
read_unlock(&card->ctl_files_rwlock);
}
-
EXPORT_SYMBOL(snd_ctl_notify);
/**
@@ -261,7 +270,6 @@ struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol,
kctl.private_data = private_data;
return snd_ctl_new(&kctl, access);
}
-
EXPORT_SYMBOL(snd_ctl_new1);
/**
@@ -280,7 +288,6 @@ void snd_ctl_free_one(struct snd_kcontrol *kcontrol)
kfree(kcontrol);
}
}
-
EXPORT_SYMBOL(snd_ctl_free_one);
static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
@@ -376,7 +383,6 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
snd_ctl_free_one(kcontrol);
return err;
}
-
EXPORT_SYMBOL(snd_ctl_add);
/**
@@ -471,7 +477,6 @@ int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol)
snd_ctl_free_one(kcontrol);
return 0;
}
-
EXPORT_SYMBOL(snd_ctl_remove);
/**
@@ -499,7 +504,6 @@ int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id)
up_write(&card->controls_rwsem);
return ret;
}
-
EXPORT_SYMBOL(snd_ctl_remove_id);
/**
@@ -568,7 +572,7 @@ int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id,
ret = -ENOENT;
goto unlock;
}
- index_offset = snd_ctl_get_ioff(kctl, &kctl->id);
+ index_offset = snd_ctl_get_ioff(kctl, id);
vd = &kctl->vd[index_offset];
ret = 0;
if (active) {
@@ -617,7 +621,6 @@ int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id,
up_write(&card->controls_rwsem);
return 0;
}
-
EXPORT_SYMBOL(snd_ctl_rename_id);
/**
@@ -645,7 +648,6 @@ struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numi
}
return NULL;
}
-
EXPORT_SYMBOL(snd_ctl_find_numid);
/**
@@ -687,7 +689,6 @@ struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card,
}
return NULL;
}
-
EXPORT_SYMBOL(snd_ctl_find_id);
static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl,
@@ -1526,19 +1527,28 @@ static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *
return 0;
}
+/**
+ * snd_ctl_register_ioctl - register the device-specific control-ioctls
+ * @fcn: ioctl callback function
+ *
+ * called from each device manager like pcm.c, hwdep.c, etc.
+ */
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_ioctls);
}
-
EXPORT_SYMBOL(snd_ctl_register_ioctl);
#ifdef CONFIG_COMPAT
+/**
+ * snd_ctl_register_ioctl_compat - register the device-specific 32bit compat
+ * control-ioctls
+ * @fcn: ioctl callback function
+ */
int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_compat_ioctls);
}
-
EXPORT_SYMBOL(snd_ctl_register_ioctl_compat);
#endif
@@ -1566,19 +1576,26 @@ static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn,
return -EINVAL;
}
+/**
+ * snd_ctl_unregister_ioctl - de-register the device-specific control-ioctls
+ * @fcn: ioctl callback function to unregister
+ */
int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_ioctls);
}
-
EXPORT_SYMBOL(snd_ctl_unregister_ioctl);
#ifdef CONFIG_COMPAT
+/**
+ * snd_ctl_unregister_ioctl - de-register the device-specific compat 32bit
+ * control-ioctls
+ * @fcn: ioctl callback function to unregister
+ */
int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_compat_ioctls);
}
-
EXPORT_SYMBOL(snd_ctl_unregister_ioctl_compat);
#endif
@@ -1702,6 +1719,16 @@ int snd_ctl_create(struct snd_card *card)
/*
* Frequently used control callbacks/helpers
*/
+
+/**
+ * snd_ctl_boolean_mono_info - Helper function for a standard boolean info
+ * callback with a mono channel
+ * @kcontrol: the kcontrol instance
+ * @uinfo: info to store
+ *
+ * This is a function that can be used as info callback for a standard
+ * boolean control with a single mono channel.
+ */
int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -1711,9 +1738,17 @@ int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
uinfo->value.integer.max = 1;
return 0;
}
-
EXPORT_SYMBOL(snd_ctl_boolean_mono_info);
+/**
+ * snd_ctl_boolean_stereo_info - Helper function for a standard boolean info
+ * callback with stereo two channels
+ * @kcontrol: the kcontrol instance
+ * @uinfo: info to store
+ *
+ * This is a function that can be used as info callback for a standard
+ * boolean control with stereo two channels.
+ */
int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -1723,7 +1758,6 @@ int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
uinfo->value.integer.max = 1;
return 0;
}
-
EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
/**
@@ -1745,8 +1779,13 @@ int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
info->count = channels;
info->value.enumerated.items = items;
+ if (!items)
+ return 0;
if (info->value.enumerated.item >= items)
info->value.enumerated.item = items - 1;
+ WARN(strlen(names[info->value.enumerated.item]) >= sizeof(info->value.enumerated.name),
+ "ALSA: too long item name '%s'\n",
+ names[info->value.enumerated.item]);
strlcpy(info->value.enumerated.name,
names[info->value.enumerated.item],
sizeof(info->value.enumerated.name));
diff --git a/sound/core/init.c b/sound/core/init.c
index 7bdfd19e24a8..074875d68c15 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -438,17 +438,6 @@ int snd_card_disconnect(struct snd_card *card)
EXPORT_SYMBOL(snd_card_disconnect);
-/**
- * snd_card_free - frees given soundcard structure
- * @card: soundcard structure
- *
- * This function releases the soundcard structure and the all assigned
- * devices automatically. That is, you don't have to release the devices
- * by yourself.
- *
- * Return: Zero. Frees all associated devices and frees the control
- * interface associated to given soundcard.
- */
static int snd_card_do_free(struct snd_card *card)
{
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -469,6 +458,15 @@ static int snd_card_do_free(struct snd_card *card)
return 0;
}
+/**
+ * snd_card_free_when_closed - Disconnect the card, free it later eventually
+ * @card: soundcard structure
+ *
+ * Unlike snd_card_free(), this function doesn't try to release the card
+ * resource immediately, but tries to disconnect at first. When the card
+ * is still in use, the function returns before freeing the resources.
+ * The card resources will be freed when the refcount gets to zero.
+ */
int snd_card_free_when_closed(struct snd_card *card)
{
int ret = snd_card_disconnect(card);
@@ -479,6 +477,19 @@ int snd_card_free_when_closed(struct snd_card *card)
}
EXPORT_SYMBOL(snd_card_free_when_closed);
+/**
+ * snd_card_free - frees given soundcard structure
+ * @card: soundcard structure
+ *
+ * This function releases the soundcard structure and the all assigned
+ * devices automatically. That is, you don't have to release the devices
+ * by yourself.
+ *
+ * This function waits until the all resources are properly released.
+ *
+ * Return: Zero. Frees all associated devices and frees the control
+ * interface associated to given soundcard.
+ */
int snd_card_free(struct snd_card *card)
{
struct completion released;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index c6ff94ab1ad6..cfc56c806964 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -220,6 +220,10 @@ static char *snd_pcm_format_names[] = {
FORMAT(DSD_U32_BE),
};
+/**
+ * snd_pcm_format_name - Return a name string for the given PCM format
+ * @format: PCM format
+ */
const char *snd_pcm_format_name(snd_pcm_format_t format)
{
if ((__force unsigned int)format >= ARRAY_SIZE(snd_pcm_format_names))
@@ -481,6 +485,19 @@ static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
}
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+static void snd_pcm_xrun_injection_write(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct snd_pcm_substream *substream = entry->private_data;
+ struct snd_pcm_runtime *runtime;
+
+ snd_pcm_stream_lock_irq(substream);
+ runtime = substream->runtime;
+ if (runtime && runtime->status->state == SNDRV_PCM_STATE_RUNNING)
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stream_unlock_irq(substream);
+}
+
static void snd_pcm_xrun_debug_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
@@ -612,6 +629,22 @@ static int snd_pcm_substream_proc_init(struct snd_pcm_substream *substream)
}
substream->proc_status_entry = entry;
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ entry = snd_info_create_card_entry(card, "xrun_injection",
+ substream->proc_root);
+ if (entry) {
+ entry->private_data = substream;
+ entry->c.text.read = NULL;
+ entry->c.text.write = snd_pcm_xrun_injection_write;
+ entry->mode = S_IFREG | S_IWUSR;
+ if (snd_info_register(entry) < 0) {
+ snd_info_free_entry(entry);
+ entry = NULL;
+ }
+ }
+ substream->proc_xrun_injection_entry = entry;
+#endif /* CONFIG_SND_PCM_XRUN_DEBUG */
+
return 0;
}
@@ -625,6 +658,10 @@ static int snd_pcm_substream_proc_done(struct snd_pcm_substream *substream)
substream->proc_sw_params_entry = NULL;
snd_info_free_entry(substream->proc_status_entry);
substream->proc_status_entry = NULL;
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ snd_info_free_entry(substream->proc_xrun_injection_entry);
+ substream->proc_xrun_injection_entry = NULL;
+#endif
snd_info_free_entry(substream->proc_root);
substream->proc_root = NULL;
return 0;
@@ -709,7 +746,6 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
}
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_new_stream);
static int _snd_pcm_new(struct snd_card *card, const char *id, int device,
@@ -1157,6 +1193,15 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
return 0;
}
+/**
+ * snd_pcm_notify - Add/remove the notify list
+ * @notify: PCM notify list
+ * @nfree: 0 = register, 1 = unregister
+ *
+ * This adds the given notifier to the global list so that the callback is
+ * called for each registered PCM devices. This exists only for PCM OSS
+ * emulation, so far.
+ */
int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree)
{
struct snd_pcm *pcm;
@@ -1179,7 +1224,6 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree)
mutex_unlock(&register_mutex);
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_notify);
#ifdef CONFIG_PROC_FS
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index dfc28542a007..ec9e7866177f 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -32,6 +32,15 @@
#include <sound/pcm_params.h>
#include <sound/timer.h>
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+#define CREATE_TRACE_POINTS
+#include "pcm_trace.h"
+#else
+#define trace_hwptr(substream, pos, in_interrupt)
+#define trace_xrun(substream)
+#define trace_hw_ptr_error(substream, reason)
+#endif
+
/*
* fill ring buffer with silence
* runtime->silence_start: starting pointer to silence area
@@ -146,10 +155,6 @@ EXPORT_SYMBOL(snd_pcm_debug_name);
#define XRUN_DEBUG_BASIC (1<<0)
#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
-#define XRUN_DEBUG_PERIODUPDATE (1<<3) /* full period update info */
-#define XRUN_DEBUG_HWPTRUPDATE (1<<4) /* full hwptr update info */
-#define XRUN_DEBUG_LOG (1<<5) /* show last 10 positions on err */
-#define XRUN_DEBUG_LOGONCE (1<<6) /* do above only once */
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
@@ -168,6 +173,7 @@ static void xrun(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
+ trace_xrun(substream);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
@@ -180,97 +186,19 @@ static void xrun(struct snd_pcm_substream *substream)
}
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
-#define hw_ptr_error(substream, fmt, args...) \
+#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
do { \
+ trace_hw_ptr_error(substream, reason); \
if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
- xrun_log_show(substream); \
- pr_err_ratelimited("ALSA: PCM: " fmt, ##args); \
+ pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
+ (in_interrupt) ? 'Q' : 'P', ##args); \
dump_stack_on_xrun(substream); \
} \
} while (0)
-#define XRUN_LOG_CNT 10
-
-struct hwptr_log_entry {
- unsigned int in_interrupt;
- unsigned long jiffies;
- snd_pcm_uframes_t pos;
- snd_pcm_uframes_t period_size;
- snd_pcm_uframes_t buffer_size;
- snd_pcm_uframes_t old_hw_ptr;
- snd_pcm_uframes_t hw_ptr_base;
-};
-
-struct snd_pcm_hwptr_log {
- unsigned int idx;
- unsigned int hit: 1;
- struct hwptr_log_entry entries[XRUN_LOG_CNT];
-};
-
-static void xrun_log(struct snd_pcm_substream *substream,
- snd_pcm_uframes_t pos, int in_interrupt)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_pcm_hwptr_log *log = runtime->hwptr_log;
- struct hwptr_log_entry *entry;
-
- if (log == NULL) {
- log = kzalloc(sizeof(*log), GFP_ATOMIC);
- if (log == NULL)
- return;
- runtime->hwptr_log = log;
- } else {
- if (xrun_debug(substream, XRUN_DEBUG_LOGONCE) && log->hit)
- return;
- }
- entry = &log->entries[log->idx];
- entry->in_interrupt = in_interrupt;
- entry->jiffies = jiffies;
- entry->pos = pos;
- entry->period_size = runtime->period_size;
- entry->buffer_size = runtime->buffer_size;
- entry->old_hw_ptr = runtime->status->hw_ptr;
- entry->hw_ptr_base = runtime->hw_ptr_base;
- log->idx = (log->idx + 1) % XRUN_LOG_CNT;
-}
-
-static void xrun_log_show(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_hwptr_log *log = substream->runtime->hwptr_log;
- struct hwptr_log_entry *entry;
- char name[16];
- unsigned int idx;
- int cnt;
-
- if (log == NULL)
- return;
- if (xrun_debug(substream, XRUN_DEBUG_LOGONCE) && log->hit)
- return;
- snd_pcm_debug_name(substream, name, sizeof(name));
- for (cnt = 0, idx = log->idx; cnt < XRUN_LOG_CNT; cnt++) {
- entry = &log->entries[idx];
- if (entry->period_size == 0)
- break;
- pr_info("hwptr log: %s: %sj=%lu, pos=%ld/%ld/%ld, "
- "hwptr=%ld/%ld\n",
- name, entry->in_interrupt ? "[Q] " : "",
- entry->jiffies,
- (unsigned long)entry->pos,
- (unsigned long)entry->period_size,
- (unsigned long)entry->buffer_size,
- (unsigned long)entry->old_hw_ptr,
- (unsigned long)entry->hw_ptr_base);
- idx++;
- idx %= XRUN_LOG_CNT;
- }
- log->hit = 1;
-}
-
#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
#define hw_ptr_error(substream, fmt, args...) do { } while (0)
-#define xrun_log(substream, pos, in_interrupt) do { } while (0)
-#define xrun_log_show(substream) do { } while (0)
#endif
@@ -343,17 +271,15 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
if (printk_ratelimit()) {
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
- xrun_log_show(substream);
pcm_err(substream->pcm,
- "XRUN: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
+ "BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
name, pos, runtime->buffer_size,
runtime->period_size);
}
pos = 0;
}
pos -= pos % runtime->min_align;
- if (xrun_debug(substream, XRUN_DEBUG_LOG))
- xrun_log(substream, pos, in_interrupt);
+ trace_hwptr(substream, pos, in_interrupt);
hw_base = runtime->hw_ptr_base;
new_hw_ptr = hw_base + pos;
if (in_interrupt) {
@@ -388,22 +314,6 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
delta = new_hw_ptr - old_hw_ptr;
if (delta < 0)
delta += runtime->boundary;
- if (xrun_debug(substream, in_interrupt ?
- XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) {
- char name[16];
- snd_pcm_debug_name(substream, name, sizeof(name));
- pcm_dbg(substream->pcm,
- "%s_update: %s: pos=%u/%u/%u, hwptr=%ld/%ld/%ld/%ld\n",
- in_interrupt ? "period" : "hwptr",
- name,
- (unsigned int)pos,
- (unsigned int)runtime->period_size,
- (unsigned int)runtime->buffer_size,
- (unsigned long)delta,
- (unsigned long)old_hw_ptr,
- (unsigned long)new_hw_ptr,
- (unsigned long)runtime->hw_ptr_base);
- }
if (runtime->no_period_wakeup) {
snd_pcm_sframes_t xrun_threshold;
@@ -431,13 +341,10 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
/* something must be really wrong */
if (delta >= runtime->buffer_size + runtime->period_size) {
- hw_ptr_error(substream,
- "Unexpected hw_pointer value %s"
- "(stream=%i, pos=%ld, new_hw_ptr=%ld, "
- "old_hw_ptr=%ld)\n",
- in_interrupt ? "[Q] " : "[P]",
- substream->stream, (long)pos,
- (long)new_hw_ptr, (long)old_hw_ptr);
+ hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
+ "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
+ substream->stream, (long)pos,
+ (long)new_hw_ptr, (long)old_hw_ptr);
return 0;
}
@@ -474,11 +381,8 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
delta--;
}
/* align hw_base to buffer_size */
- hw_ptr_error(substream,
- "hw_ptr skipping! %s"
- "(pos=%ld, delta=%ld, period=%ld, "
- "jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
- in_interrupt ? "[Q] " : "",
+ hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
+ "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
(long)pos, (long)hdelta,
(long)runtime->period_size, jdelta,
((hdelta * HZ) / runtime->rate), hw_base,
@@ -490,11 +394,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
}
no_jiffies_check:
if (delta > runtime->period_size + runtime->period_size / 2) {
- hw_ptr_error(substream,
- "Lost interrupts? %s"
- "(stream=%i, delta=%ld, new_hw_ptr=%ld, "
- "old_hw_ptr=%ld)\n",
- in_interrupt ? "[Q] " : "",
+ hw_ptr_error(substream, in_interrupt,
+ "Lost interrupts?",
+ "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
substream->stream, (long)delta,
(long)new_hw_ptr,
(long)old_hw_ptr);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 166d59cdc86b..095d9572ad2b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -35,9 +35,6 @@
#include <sound/timer.h>
#include <sound/minors.h>
#include <asm/io.h>
-#if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT)
-#include <dma-coherence.h>
-#endif
/*
* Compatibility
@@ -77,6 +74,14 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
static DEFINE_RWLOCK(snd_pcm_link_rwlock);
static DECLARE_RWSEM(snd_pcm_link_rwsem);
+/**
+ * snd_pcm_stream_lock - Lock the PCM stream
+ * @substream: PCM substream
+ *
+ * This locks the PCM stream's spinlock or mutex depending on the nonatomic
+ * flag of the given substream. This also takes the global link rw lock
+ * (or rw sem), too, for avoiding the race with linked streams.
+ */
void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
{
if (substream->pcm->nonatomic) {
@@ -89,6 +94,12 @@ void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
+/**
+ * snd_pcm_stream_lock - Unlock the PCM stream
+ * @substream: PCM substream
+ *
+ * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
+ */
void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
{
if (substream->pcm->nonatomic) {
@@ -101,6 +112,14 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
+/**
+ * snd_pcm_stream_lock_irq - Lock the PCM stream
+ * @substream: PCM substream
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
+ * IRQ (only when nonatomic is false). In nonatomic case, this is identical
+ * as snd_pcm_stream_lock().
+ */
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
if (!substream->pcm->nonatomic)
@@ -109,6 +128,12 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
+/**
+ * snd_pcm_stream_unlock_irq - Unlock the PCM stream
+ * @substream: PCM substream
+ *
+ * This is a counter-part of snd_pcm_stream_lock_irq().
+ */
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
{
snd_pcm_stream_unlock(substream);
@@ -127,6 +152,13 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
+/**
+ * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This is a counter-part of snd_pcm_stream_lock_irqsave().
+ */
void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags)
{
@@ -195,6 +227,21 @@ int snd_pcm_info_user(struct snd_pcm_substream *substream,
return err;
}
+static bool hw_support_mmap(struct snd_pcm_substream *substream)
+{
+ if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
+ return false;
+ /* check architectures that return -EINVAL from dma_mmap_coherent() */
+ /* FIXME: this should be some global flag */
+#if defined(CONFIG_C6X) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) ||\
+ defined(CONFIG_PARISC) || defined(CONFIG_XTENSA)
+ if (!substream->ops->mmap &&
+ substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
+ return false;
+#endif
+ return true;
+}
+
#undef RULES_DEBUG
#ifdef RULES_DEBUG
@@ -372,8 +419,12 @@ int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
}
hw = &substream->runtime->hw;
- if (!params->info)
+ if (!params->info) {
params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES;
+ if (!hw_support_mmap(substream))
+ params->info &= ~(SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID);
+ }
if (!params->fifo_size) {
m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
@@ -849,14 +900,19 @@ static int snd_pcm_action_single(struct action_ops *ops,
return res;
}
-/* call in mutex-protected context */
-static int snd_pcm_action_mutex(struct action_ops *ops,
- struct snd_pcm_substream *substream,
- int state)
+/*
+ * Note: call with stream lock
+ */
+static int snd_pcm_action(struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+ int state)
{
int res;
- if (snd_pcm_stream_linked(substream)) {
+ if (!snd_pcm_stream_linked(substream))
+ return snd_pcm_action_single(ops, substream, state);
+
+ if (substream->pcm->nonatomic) {
if (!mutex_trylock(&substream->group->mutex)) {
mutex_unlock(&substream->self_group.mutex);
mutex_lock(&substream->group->mutex);
@@ -865,24 +921,6 @@ static int snd_pcm_action_mutex(struct action_ops *ops,
res = snd_pcm_action_group(ops, substream, state, 1);
mutex_unlock(&substream->group->mutex);
} else {
- res = snd_pcm_action_single(ops, substream, state);
- }
- return res;
-}
-
-/*
- * Note: call with stream lock
- */
-static int snd_pcm_action(struct action_ops *ops,
- struct snd_pcm_substream *substream,
- int state)
-{
- int res;
-
- if (substream->pcm->nonatomic)
- return snd_pcm_action_mutex(ops, substream, state);
-
- if (snd_pcm_stream_linked(substream)) {
if (!spin_trylock(&substream->group->lock)) {
spin_unlock(&substream->self_group.lock);
spin_lock(&substream->group->lock);
@@ -890,34 +928,10 @@ static int snd_pcm_action(struct action_ops *ops,
}
res = snd_pcm_action_group(ops, substream, state, 1);
spin_unlock(&substream->group->lock);
- } else {
- res = snd_pcm_action_single(ops, substream, state);
}
return res;
}
-static int snd_pcm_action_lock_mutex(struct action_ops *ops,
- struct snd_pcm_substream *substream,
- int state)
-{
- int res;
-
- down_read(&snd_pcm_link_rwsem);
- if (snd_pcm_stream_linked(substream)) {
- mutex_lock(&substream->group->mutex);
- mutex_lock(&substream->self_group.mutex);
- res = snd_pcm_action_group(ops, substream, state, 1);
- mutex_unlock(&substream->self_group.mutex);
- mutex_unlock(&substream->group->mutex);
- } else {
- mutex_lock(&substream->self_group.mutex);
- res = snd_pcm_action_single(ops, substream, state);
- mutex_unlock(&substream->self_group.mutex);
- }
- up_read(&snd_pcm_link_rwsem);
- return res;
-}
-
/*
* Note: don't use any locks before
*/
@@ -927,22 +941,9 @@ static int snd_pcm_action_lock_irq(struct action_ops *ops,
{
int res;
- if (substream->pcm->nonatomic)
- return snd_pcm_action_lock_mutex(ops, substream, state);
-
- read_lock_irq(&snd_pcm_link_rwlock);
- if (snd_pcm_stream_linked(substream)) {
- spin_lock(&substream->group->lock);
- spin_lock(&substream->self_group.lock);
- res = snd_pcm_action_group(ops, substream, state, 1);
- spin_unlock(&substream->self_group.lock);
- spin_unlock(&substream->group->lock);
- } else {
- spin_lock(&substream->self_group.lock);
- res = snd_pcm_action_single(ops, substream, state);
- spin_unlock(&substream->self_group.lock);
- }
- read_unlock_irq(&snd_pcm_link_rwlock);
+ snd_pcm_stream_lock_irq(substream);
+ res = snd_pcm_action(ops, substream, state);
+ snd_pcm_stream_unlock_irq(substream);
return res;
}
@@ -1051,10 +1052,10 @@ static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state != state) {
snd_pcm_trigger_tstamp(substream);
+ runtime->status->state = state;
if (substream->timer)
snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTOP,
&runtime->trigger_tstamp);
- runtime->status->state = state;
}
wake_up(&runtime->sleep);
wake_up(&runtime->tsleep);
@@ -1097,6 +1098,28 @@ int snd_pcm_drain_done(struct snd_pcm_substream *substream)
SNDRV_PCM_STATE_SETUP);
}
+/**
+ * snd_pcm_stop_xrun - stop the running streams as XRUN
+ * @substream: the PCM substream instance
+ *
+ * This stops the given running substream (and all linked substreams) as XRUN.
+ * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
+ *
+ * Return: Zero if successful, or a negative error code.
+ */
+int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ snd_pcm_stream_lock_irqsave(substream, flags);
+ if (snd_pcm_running(substream))
+ ret = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
+
/*
* pause callbacks
*/
@@ -1203,11 +1226,11 @@ static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
+ runtime->status->suspended_state = runtime->status->state;
+ runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
if (substream->timer)
snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSUSPEND,
&runtime->trigger_tstamp);
- runtime->status->suspended_state = runtime->status->state;
- runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
wake_up(&runtime->sleep);
wake_up(&runtime->tsleep);
}
@@ -1310,10 +1333,10 @@ static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
+ runtime->status->state = runtime->status->suspended_state;
if (substream->timer)
snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MRESUME,
&runtime->trigger_tstamp);
- runtime->status->state = runtime->status->suspended_state;
}
static struct action_ops snd_pcm_action_resume = {
@@ -2070,7 +2093,7 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
- if (hw->info & SNDRV_PCM_INFO_MMAP) {
+ if (hw_support_mmap(substream)) {
if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
@@ -3249,20 +3272,6 @@ static inline struct page *
snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
{
void *vaddr = substream->runtime->dma_area + ofs;
-#if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT)
- if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
- return virt_to_page(CAC_ADDR(vaddr));
-#endif
-#if defined(CONFIG_PPC32) && defined(CONFIG_NOT_COHERENT_CACHE)
- if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) {
- dma_addr_t addr = substream->runtime->dma_addr + ofs;
- addr -= get_dma_offset(substream->dma_buffer.dev.dev);
- /* assume dma_handle set via pfn_to_phys() in
- * mm/dma-noncoherent.c
- */
- return pfn_to_page(addr >> PAGE_SHIFT);
- }
-#endif
return virt_to_page(vaddr);
}
@@ -3307,16 +3316,18 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
.fault = snd_pcm_mmap_data_fault,
};
-#ifndef ARCH_HAS_DMA_MMAP_COHERENT
-/* This should be defined / handled globally! */
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-#define ARCH_HAS_DMA_MMAP_COHERENT
-#endif
-#endif
-
/*
* mmap the DMA buffer on RAM
*/
+
+/**
+ * snd_pcm_lib_default_mmap - Default PCM data mmap function
+ * @substream: PCM substream
+ * @area: VMA
+ *
+ * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
+ * this function is invoked implicitly.
+ */
int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
@@ -3329,7 +3340,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
area->vm_end - area->vm_start, area->vm_page_prot);
}
#endif /* CONFIG_GENERIC_ALLOCATOR */
-#ifdef ARCH_HAS_DMA_MMAP_COHERENT
+#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
if (!substream->ops->page &&
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
return dma_mmap_coherent(substream->dma_buffer.dev.dev,
@@ -3337,11 +3348,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
substream->runtime->dma_area,
substream->runtime->dma_addr,
area->vm_end - area->vm_start);
-#elif defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT)
- if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV &&
- !plat_device_is_coherent(substream->dma_buffer.dev.dev))
- area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
-#endif /* ARCH_HAS_DMA_MMAP_COHERENT */
+#endif /* CONFIG_X86 */
/* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault;
return 0;
@@ -3352,6 +3359,15 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
* mmap the DMA buffer on I/O memory area
*/
#if SNDRV_PCM_INFO_MMAP_IOMEM
+/**
+ * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
+ * @substream: PCM substream
+ * @area: VMA
+ *
+ * When your hardware uses the iomapped pages as the hardware buffer and
+ * wants to mmap it, pass this function as mmap pcm_ops. Note that this
+ * is supposed to work only on limited architectures.
+ */
int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
diff --git a/sound/core/pcm_trace.h b/sound/core/pcm_trace.h
new file mode 100644
index 000000000000..b63b654da5ff
--- /dev/null
+++ b/sound/core/pcm_trace.h
@@ -0,0 +1,110 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM snd_pcm
+#define TRACE_INCLUDE_FILE pcm_trace
+
+#if !defined(_PCM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _PCM_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(hwptr,
+ TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_uframes_t pos, bool irq),
+ TP_ARGS(substream, pos, irq),
+ TP_STRUCT__entry(
+ __field( bool, in_interrupt )
+ __field( unsigned int, card )
+ __field( unsigned int, device )
+ __field( unsigned int, number )
+ __field( unsigned int, stream )
+ __field( snd_pcm_uframes_t, pos )
+ __field( snd_pcm_uframes_t, period_size )
+ __field( snd_pcm_uframes_t, buffer_size )
+ __field( snd_pcm_uframes_t, old_hw_ptr )
+ __field( snd_pcm_uframes_t, hw_ptr_base )
+ ),
+ TP_fast_assign(
+ __entry->in_interrupt = (irq);
+ __entry->card = (substream)->pcm->card->number;
+ __entry->device = (substream)->pcm->device;
+ __entry->number = (substream)->number;
+ __entry->stream = (substream)->stream;
+ __entry->pos = (pos);
+ __entry->period_size = (substream)->runtime->period_size;
+ __entry->buffer_size = (substream)->runtime->buffer_size;
+ __entry->old_hw_ptr = (substream)->runtime->status->hw_ptr;
+ __entry->hw_ptr_base = (substream)->runtime->hw_ptr_base;
+ ),
+ TP_printk("pcmC%dD%d%c/sub%d: %s: pos=%lu, old=%lu, base=%lu, period=%lu, buf=%lu",
+ __entry->card, __entry->device,
+ __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c',
+ __entry->number,
+ __entry->in_interrupt ? "IRQ" : "POS",
+ (unsigned long)__entry->pos,
+ (unsigned long)__entry->old_hw_ptr,
+ (unsigned long)__entry->hw_ptr_base,
+ (unsigned long)__entry->period_size,
+ (unsigned long)__entry->buffer_size)
+);
+
+TRACE_EVENT(xrun,
+ TP_PROTO(struct snd_pcm_substream *substream),
+ TP_ARGS(substream),
+ TP_STRUCT__entry(
+ __field( unsigned int, card )
+ __field( unsigned int, device )
+ __field( unsigned int, number )
+ __field( unsigned int, stream )
+ __field( snd_pcm_uframes_t, period_size )
+ __field( snd_pcm_uframes_t, buffer_size )
+ __field( snd_pcm_uframes_t, old_hw_ptr )
+ __field( snd_pcm_uframes_t, hw_ptr_base )
+ ),
+ TP_fast_assign(
+ __entry->card = (substream)->pcm->card->number;
+ __entry->device = (substream)->pcm->device;
+ __entry->number = (substream)->number;
+ __entry->stream = (substream)->stream;
+ __entry->period_size = (substream)->runtime->period_size;
+ __entry->buffer_size = (substream)->runtime->buffer_size;
+ __entry->old_hw_ptr = (substream)->runtime->status->hw_ptr;
+ __entry->hw_ptr_base = (substream)->runtime->hw_ptr_base;
+ ),
+ TP_printk("pcmC%dD%d%c/sub%d: XRUN: old=%lu, base=%lu, period=%lu, buf=%lu",
+ __entry->card, __entry->device,
+ __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c',
+ __entry->number,
+ (unsigned long)__entry->old_hw_ptr,
+ (unsigned long)__entry->hw_ptr_base,
+ (unsigned long)__entry->period_size,
+ (unsigned long)__entry->buffer_size)
+);
+
+TRACE_EVENT(hw_ptr_error,
+ TP_PROTO(struct snd_pcm_substream *substream, const char *why),
+ TP_ARGS(substream, why),
+ TP_STRUCT__entry(
+ __field( unsigned int, card )
+ __field( unsigned int, device )
+ __field( unsigned int, number )
+ __field( unsigned int, stream )
+ __field( const char *, reason )
+ ),
+ TP_fast_assign(
+ __entry->card = (substream)->pcm->card->number;
+ __entry->device = (substream)->pcm->device;
+ __entry->number = (substream)->number;
+ __entry->stream = (substream)->stream;
+ __entry->reason = (why);
+ ),
+ TP_printk("pcmC%dD%d%c/sub%d: ERROR: %s",
+ __entry->card, __entry->device,
+ __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c',
+ __entry->number, __entry->reason)
+);
+
+#endif /* _PCM_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index b9184d20c39f..b0e32e161dd1 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -403,14 +403,11 @@ free_devinfo(void *private)
{
struct seq_oss_devinfo *dp = (struct seq_oss_devinfo *)private;
- if (dp->timer)
- snd_seq_oss_timer_delete(dp->timer);
+ snd_seq_oss_timer_delete(dp->timer);
- if (dp->writeq)
- snd_seq_oss_writeq_delete(dp->writeq);
+ snd_seq_oss_writeq_delete(dp->writeq);
- if (dp->readq)
- snd_seq_oss_readq_delete(dp->readq);
+ snd_seq_oss_readq_delete(dp->readq);
kfree(dp);
}
diff --git a/sound/core/seq/seq.c b/sound/core/seq/seq.c
index 712110561082..7e0aabb808a6 100644
--- a/sound/core/seq/seq.c
+++ b/sound/core/seq/seq.c
@@ -86,7 +86,6 @@ static int __init alsa_seq_init(void)
{
int err;
- snd_seq_autoload_lock();
if ((err = client_init_data()) < 0)
goto error;
@@ -110,8 +109,8 @@ static int __init alsa_seq_init(void)
if ((err = snd_seq_system_client_init()) < 0)
goto error;
+ snd_seq_autoload_init();
error:
- snd_seq_autoload_unlock();
return err;
}
@@ -131,6 +130,8 @@ static void __exit alsa_seq_exit(void)
/* release event memory */
snd_sequencer_memory_done();
+
+ snd_seq_autoload_exit();
}
module_init(alsa_seq_init)
diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
index 91a786a783e1..0631bdadd12b 100644
--- a/sound/core/seq/seq_device.c
+++ b/sound/core/seq/seq_device.c
@@ -56,6 +56,7 @@ MODULE_LICENSE("GPL");
#define DRIVER_LOADED (1<<0)
#define DRIVER_REQUESTED (1<<1)
#define DRIVER_LOCKED (1<<2)
+#define DRIVER_REQUESTING (1<<3)
struct ops_list {
char id[ID_LEN]; /* driver id */
@@ -127,42 +128,82 @@ static void snd_seq_device_info(struct snd_info_entry *entry,
#ifdef CONFIG_MODULES
/* avoid auto-loading during module_init() */
-static int snd_seq_in_init;
+static atomic_t snd_seq_in_init = ATOMIC_INIT(1); /* blocked as default */
void snd_seq_autoload_lock(void)
{
- snd_seq_in_init++;
+ atomic_inc(&snd_seq_in_init);
}
void snd_seq_autoload_unlock(void)
{
- snd_seq_in_init--;
+ atomic_dec(&snd_seq_in_init);
}
-#endif
-void snd_seq_device_load_drivers(void)
+static void autoload_drivers(void)
{
-#ifdef CONFIG_MODULES
- struct ops_list *ops;
+ /* avoid reentrance */
+ if (atomic_inc_return(&snd_seq_in_init) == 1) {
+ struct ops_list *ops;
+
+ mutex_lock(&ops_mutex);
+ list_for_each_entry(ops, &opslist, list) {
+ if ((ops->driver & DRIVER_REQUESTING) &&
+ !(ops->driver & DRIVER_REQUESTED)) {
+ ops->used++;
+ mutex_unlock(&ops_mutex);
+ ops->driver |= DRIVER_REQUESTED;
+ request_module("snd-%s", ops->id);
+ mutex_lock(&ops_mutex);
+ ops->used--;
+ }
+ }
+ mutex_unlock(&ops_mutex);
+ }
+ atomic_dec(&snd_seq_in_init);
+}
- /* Calling request_module during module_init()
- * may cause blocking.
- */
- if (snd_seq_in_init)
- return;
+static void call_autoload(struct work_struct *work)
+{
+ autoload_drivers();
+}
- mutex_lock(&ops_mutex);
- list_for_each_entry(ops, &opslist, list) {
- if (! (ops->driver & DRIVER_LOADED) &&
- ! (ops->driver & DRIVER_REQUESTED)) {
- ops->used++;
- mutex_unlock(&ops_mutex);
- ops->driver |= DRIVER_REQUESTED;
- request_module("snd-%s", ops->id);
- mutex_lock(&ops_mutex);
- ops->used--;
- }
+static DECLARE_WORK(autoload_work, call_autoload);
+
+static void try_autoload(struct ops_list *ops)
+{
+ if (!ops->driver) {
+ ops->driver |= DRIVER_REQUESTING;
+ schedule_work(&autoload_work);
}
+}
+
+static void queue_autoload_drivers(void)
+{
+ struct ops_list *ops;
+
+ mutex_lock(&ops_mutex);
+ list_for_each_entry(ops, &opslist, list)
+ try_autoload(ops);
mutex_unlock(&ops_mutex);
+}
+
+void snd_seq_autoload_init(void)
+{
+ atomic_dec(&snd_seq_in_init);
+#ifdef CONFIG_SND_SEQUENCER_MODULE
+ /* initial autoload only when snd-seq is a module */
+ queue_autoload_drivers();
+#endif
+}
+#else
+#define try_autoload(ops) /* NOP */
+#endif
+
+void snd_seq_device_load_drivers(void)
+{
+#ifdef CONFIG_MODULES
+ queue_autoload_drivers();
+ flush_work(&autoload_work);
#endif
}
@@ -214,13 +255,14 @@ int snd_seq_device_new(struct snd_card *card, int device, char *id, int argsize,
ops->num_devices++;
mutex_unlock(&ops->reg_mutex);
- unlock_driver(ops);
-
if ((err = snd_device_new(card, SNDRV_DEV_SEQUENCER, dev, &dops)) < 0) {
snd_seq_device_free(dev);
return err;
}
+ try_autoload(ops);
+ unlock_driver(ops);
+
if (result)
*result = dev;
@@ -318,16 +360,12 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
entry->init_device == NULL || entry->free_device == NULL)
return -EINVAL;
- snd_seq_autoload_lock();
ops = find_driver(id, 1);
- if (ops == NULL) {
- snd_seq_autoload_unlock();
+ if (ops == NULL)
return -ENOMEM;
- }
if (ops->driver & DRIVER_LOADED) {
pr_warn("ALSA: seq: driver_register: driver '%s' already exists\n", id);
unlock_driver(ops);
- snd_seq_autoload_unlock();
return -EBUSY;
}
@@ -344,7 +382,6 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
mutex_unlock(&ops->reg_mutex);
unlock_driver(ops);
- snd_seq_autoload_unlock();
return 0;
}
@@ -554,6 +591,9 @@ static int __init alsa_seq_device_init(void)
static void __exit alsa_seq_device_exit(void)
{
+#ifdef CONFIG_MODULES
+ cancel_work_sync(&autoload_work);
+#endif
remove_drivers();
#ifdef CONFIG_PROC_FS
snd_info_free_entry(info_entry);
@@ -570,6 +610,7 @@ EXPORT_SYMBOL(snd_seq_device_new);
EXPORT_SYMBOL(snd_seq_device_register_driver);
EXPORT_SYMBOL(snd_seq_device_unregister_driver);
#ifdef CONFIG_MODULES
+EXPORT_SYMBOL(snd_seq_autoload_init);
EXPORT_SYMBOL(snd_seq_autoload_lock);
EXPORT_SYMBOL(snd_seq_autoload_unlock);
#endif
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 0a418503ec41..84fffabdd129 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -39,8 +39,7 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
if (! sgbuf)
return -EINVAL;
- if (dmab->area)
- vunmap(dmab->area);
+ vunmap(dmab->area);
dmab->area = NULL;
tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 38ad1a0dd3f7..f1333060bf1c 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -355,8 +355,13 @@ int snd_unregister_device(int type, struct snd_card *card, int dev)
EXPORT_SYMBOL(snd_unregister_device);
-/* get the assigned device to the given type and device number;
- * the caller needs to release it via put_device() after using it
+/**
+ * snd_get_device - get the assigned device to the given type and device number
+ * @type: the device type, SNDRV_DEVICE_TYPE_XXX
+ * @card:the card instance
+ * @dev: the device index
+ *
+ * The caller needs to release it via put_device() after using it.
*/
struct device *snd_get_device(int type, struct snd_card *card, int dev)
{
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 2a16c86a60b3..7ea53399404d 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -1220,7 +1220,6 @@ static struct platform_driver loopback_driver = {
.remove = loopback_remove,
.driver = {
.name = SND_LOOPBACK_DRIVER,
- .owner = THIS_MODULE,
.pm = LOOPBACK_PM_OPS,
},
};
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index fab90bd2bd51..5d0dfb787cec 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -1162,7 +1162,6 @@ static struct platform_driver snd_dummy_driver = {
.remove = snd_dummy_remove,
.driver = {
.name = SND_DUMMY_DRIVER,
- .owner = THIS_MODULE,
.pm = SND_DUMMY_PM_OPS,
},
};
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
index 33ed76530d0b..bcca825a1c8d 100644
--- a/sound/drivers/ml403-ac97cr.c
+++ b/sound/drivers/ml403-ac97cr.c
@@ -1335,7 +1335,6 @@ static struct platform_driver snd_ml403_ac97cr_driver = {
.remove = snd_ml403_ac97cr_remove,
.driver = {
.name = SND_ML403_AC97CR_DRIVER,
- .owner = THIS_MODULE,
},
};
diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c
index 83014b83a44e..fed7e7e2177b 100644
--- a/sound/drivers/mpu401/mpu401.c
+++ b/sound/drivers/mpu401/mpu401.c
@@ -140,7 +140,6 @@ static struct platform_driver snd_mpu401_driver = {
.remove = snd_mpu401_remove,
.driver = {
.name = SND_MPU401_DRIVER,
- .owner = THIS_MODULE,
},
};
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index 4b66c7f22af7..15769447688f 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -759,7 +759,6 @@ static struct platform_driver snd_mtpav_driver = {
.remove = snd_mtpav_remove,
.driver = {
.name = SND_MTPAV_DRIVER,
- .owner = THIS_MODULE,
},
};
diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
index f5fd448dbc57..2a008a9ccf85 100644
--- a/sound/drivers/mts64.c
+++ b/sound/drivers/mts64.c
@@ -604,21 +604,11 @@ static struct snd_kcontrol_new mts64_ctl_smpte_time_frames = {
static int snd_mts64_ctl_smpte_fps_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[5] = { "24",
- "25",
- "29.97",
- "30D",
- "30" };
+ static const char * const texts[5] = {
+ "24", "25", "29.97", "30D", "30"
+ };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item > 4)
- uinfo->value.enumerated.item = 4;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 5, texts);
}
static int snd_mts64_ctl_smpte_fps_get(struct snd_kcontrol *kctl,
@@ -1040,7 +1030,6 @@ static struct platform_driver snd_mts64_driver = {
.remove = snd_mts64_remove,
.driver = {
.name = PLATFORM_DRIVER,
- .owner = THIS_MODULE,
}
};
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 36808cdab06f..2adc7548ffca 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -221,7 +221,6 @@ static void pcsp_shutdown(struct platform_device *dev)
static struct platform_driver pcsp_platform_driver = {
.driver = {
.name = "pcspkr",
- .owner = THIS_MODULE,
.pm = PCSP_PM_OPS,
},
.probe = pcsp_probe,
diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
index 78ccfa455527..464385a480e4 100644
--- a/sound/drivers/portman2x4.c
+++ b/sound/drivers/portman2x4.c
@@ -829,7 +829,6 @@ static struct platform_driver snd_portman_driver = {
.remove = snd_portman_remove,
.driver = {
.name = PLATFORM_DRIVER,
- .owner = THIS_MODULE,
}
};
diff --git a/sound/drivers/serial-u16550.c b/sound/drivers/serial-u16550.c
index 9ad4414fa25c..13a34e3c6382 100644
--- a/sound/drivers/serial-u16550.c
+++ b/sound/drivers/serial-u16550.c
@@ -994,7 +994,6 @@ static struct platform_driver snd_serial_driver = {
.remove = snd_serial_remove,
.driver = {
.name = SND_SERIAL_DRIVER,
- .owner = THIS_MODULE,
},
};
diff --git a/sound/drivers/virmidi.c b/sound/drivers/virmidi.c
index b178724295f3..33ef13a72e69 100644
--- a/sound/drivers/virmidi.c
+++ b/sound/drivers/virmidi.c
@@ -99,30 +99,33 @@ static int snd_virmidi_probe(struct platform_device *devptr)
if (midi_devs[dev] > MAX_MIDI_DEVICES) {
snd_printk(KERN_WARNING
- "too much midi devices for virmidi %d: "
- "force to use %d\n", dev, MAX_MIDI_DEVICES);
+ "too much midi devices for virmidi %d: force to use %d\n",
+ dev, MAX_MIDI_DEVICES);
midi_devs[dev] = MAX_MIDI_DEVICES;
}
for (idx = 0; idx < midi_devs[dev]; idx++) {
struct snd_rawmidi *rmidi;
struct snd_virmidi_dev *rdev;
- if ((err = snd_virmidi_new(card, idx, &rmidi)) < 0)
+
+ err = snd_virmidi_new(card, idx, &rmidi);
+ if (err < 0)
goto __nodev;
rdev = rmidi->private_data;
vmidi->midi[idx] = rmidi;
strcpy(rmidi->name, "Virtual Raw MIDI");
rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
}
-
+
strcpy(card->driver, "VirMIDI");
strcpy(card->shortname, "VirMIDI");
sprintf(card->longname, "Virtual MIDI Card %i", dev + 1);
- if ((err = snd_card_register(card)) == 0) {
+ err = snd_card_register(card);
+ if (!err) {
platform_set_drvdata(devptr, card);
return 0;
}
- __nodev:
+__nodev:
snd_card_free(card);
return err;
}
@@ -140,7 +143,6 @@ static struct platform_driver snd_virmidi_driver = {
.remove = snd_virmidi_remove,
.driver = {
.name = SND_VIRMIDI_DRIVER,
- .owner = THIS_MODULE,
},
};
@@ -157,13 +159,15 @@ static int __init alsa_card_virmidi_init(void)
{
int i, cards, err;
- if ((err = platform_driver_register(&snd_virmidi_driver)) < 0)
+ err = platform_driver_register(&snd_virmidi_driver);
+ if (err < 0)
return err;
cards = 0;
for (i = 0; i < SNDRV_CARDS; i++) {
struct platform_device *device;
- if (! enable[i])
+
+ if (!enable[i])
continue;
device = platform_device_register_simple(SND_VIRMIDI_DRIVER,
i, NULL, 0);
diff --git a/sound/drivers/vx/vx_core.c b/sound/drivers/vx/vx_core.c
index e8cc16993903..fc05a37fd017 100644
--- a/sound/drivers/vx/vx_core.c
+++ b/sound/drivers/vx/vx_core.c
@@ -416,6 +416,7 @@ int vx_send_rih(struct vx_core *chip, int cmd)
/**
* snd_vx_boot_xilinx - boot up the xilinx interface
+ * @chip: VX core instance
* @boot: the boot record to load
*/
int snd_vx_load_boot_image(struct vx_core *chip, const struct firmware *boot)
@@ -538,6 +539,8 @@ EXPORT_SYMBOL(snd_vx_threaded_irq_handler);
/**
* snd_vx_irq_handler - interrupt handler
+ * @irq: irq number
+ * @dev: VX core instance
*/
irqreturn_t snd_vx_irq_handler(int irq, void *dev)
{
@@ -649,6 +652,8 @@ static void vx_proc_init(struct vx_core *chip)
/**
* snd_vx_dsp_boot - load the DSP boot
+ * @chip: VX core instance
+ * @boot: firmware data
*/
int snd_vx_dsp_boot(struct vx_core *chip, const struct firmware *boot)
{
@@ -669,6 +674,8 @@ EXPORT_SYMBOL(snd_vx_dsp_boot);
/**
* snd_vx_dsp_load - load the DSP image
+ * @chip: VX core instance
+ * @dsp: firmware data
*/
int snd_vx_dsp_load(struct vx_core *chip, const struct firmware *dsp)
{
@@ -768,7 +775,10 @@ EXPORT_SYMBOL(snd_vx_resume);
/**
* snd_vx_create - constructor for struct vx_core
+ * @card: card instance
* @hw: hardware specific record
+ * @ops: VX ops pointer
+ * @extra_size: extra byte size to allocate appending to chip
*
* this function allocates the instance and prepare for the hardware
* initialization.
diff --git a/sound/drivers/vx/vx_mixer.c b/sound/drivers/vx/vx_mixer.c
index 3b6823fc0606..be9477e30739 100644
--- a/sound/drivers/vx/vx_mixer.c
+++ b/sound/drivers/vx/vx_mixer.c
@@ -471,30 +471,18 @@ static struct snd_kcontrol_new vx_control_output_level = {
*/
static int vx_audio_src_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts_mic[3] = {
+ static const char * const texts_mic[3] = {
"Digital", "Line", "Mic"
};
- static char *texts_vx2[2] = {
+ static const char * const texts_vx2[2] = {
"Digital", "Analog"
};
struct vx_core *chip = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- if (chip->type >= VX_TYPE_VXPOCKET) {
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name,
- texts_mic[uinfo->value.enumerated.item]);
- } else {
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name,
- texts_vx2[uinfo->value.enumerated.item]);
- }
- return 0;
+ if (chip->type >= VX_TYPE_VXPOCKET)
+ return snd_ctl_enum_info(uinfo, 1, 3, texts_mic);
+ else
+ return snd_ctl_enum_info(uinfo, 1, 2, texts_vx2);
}
static int vx_audio_src_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -539,18 +527,11 @@ static struct snd_kcontrol_new vx_control_audio_src = {
*/
static int vx_clock_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = {
+ static const char * const texts[3] = {
"Auto", "Internal", "External"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int vx_clock_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 46dff64908c8..ecec547782b2 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -13,28 +13,34 @@ config SND_FIREWIRE_LIB
select SND_RAWMIDI
config SND_DICE
- tristate "DICE-based DACs (EXPERIMENTAL)"
+ tristate "DICE-based DACs support"
select SND_HWDEP
select SND_FIREWIRE_LIB
help
Say Y here to include support for many DACs based on the DICE
- chip family (DICE-II/Jr/Mini) from TC Applied Technologies.
-
- At the moment, this driver supports playback only. If you
- want to use devices that support capturing, use FFADO instead.
+ chip family (DICE-II/Jr/Mini) which TC Applied Technologies produces.
To compile this driver as a module, choose M here: the module
will be called snd-dice.
-config SND_FIREWIRE_SPEAKERS
- tristate "FireWire speakers"
+config SND_OXFW
+ tristate "Oxford Semiconductor FW970/971 chipset support"
select SND_FIREWIRE_LIB
+ select SND_HWDEP
help
- Say Y here to include support for the Griffin FireWave Surround
- and the LaCie FireWire Speakers.
+ Say Y here to include support for FireWire devices based on
+ Oxford Semiconductor FW970/971 chipset.
+ * Griffin Firewave
+ * LaCie Firewire Speakers
+ * Behringer F-Control Audio 202
+ * Mackie(Loud) Onyx-i series (former models)
+ * Mackie(Loud) Onyx Satellite
+ * Mackie(Loud) Tapco Link.Firewire
+ * Mackie(Loud) d.2 pro/d.4 pro
+ * Mackie(Loud) U.420/U.420d
To compile this driver as a module, choose M here: the module
- will be called snd-firewire-speakers.
+ will be called snd-oxfw.
config SND_ISIGHT
tristate "Apple iSight microphone"
diff --git a/sound/firewire/Makefile b/sound/firewire/Makefile
index fad8d49306ab..8b37f084b2ab 100644
--- a/sound/firewire/Makefile
+++ b/sound/firewire/Makefile
@@ -1,13 +1,12 @@
snd-firewire-lib-objs := lib.o iso-resources.o packets-buffer.o \
fcp.o cmp.o amdtp.o
-snd-dice-objs := dice.o
-snd-firewire-speakers-objs := speakers.o
+snd-oxfw-objs := oxfw.o
snd-isight-objs := isight.o
snd-scs1x-objs := scs1x.o
obj-$(CONFIG_SND_FIREWIRE_LIB) += snd-firewire-lib.o
-obj-$(CONFIG_SND_DICE) += snd-dice.o
-obj-$(CONFIG_SND_FIREWIRE_SPEAKERS) += snd-firewire-speakers.o
+obj-$(CONFIG_SND_DICE) += dice/
+obj-$(CONFIG_SND_OXFW) += oxfw/
obj-$(CONFIG_SND_ISIGHT) += snd-isight.o
obj-$(CONFIG_SND_SCS1X) += snd-scs1x.o
obj-$(CONFIG_SND_FIREWORKS) += fireworks/
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 95fc2eaf11dc..3badc70124ab 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -1006,11 +1006,7 @@ void amdtp_stream_pcm_abort(struct amdtp_stream *s)
struct snd_pcm_substream *pcm;
pcm = ACCESS_ONCE(s->pcm);
- if (pcm) {
- snd_pcm_stream_lock_irq(pcm);
- if (snd_pcm_running(pcm))
- snd_pcm_stop(pcm, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irq(pcm);
- }
+ if (pcm)
+ snd_pcm_stop_xrun(pcm);
}
EXPORT_SYMBOL(amdtp_stream_pcm_abort);
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 4823c08196ac..e6e8926275b0 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -23,7 +23,7 @@
* corresponds to the end of event in the packet. Out of IEC 61883.
* @CIP_WRONG_DBS: Only for in-stream. The value of dbs is wrong in in-packets.
* The value of data_block_quadlets is used instead of reported value.
- * @SKIP_DBC_ZERO_CHECK: Only for in-stream. Packets with zero in dbc is
+ * @CIP_SKIP_DBC_ZERO_CHECK: Only for in-stream. Packets with zero in dbc is
* skipped for detecting discontinuity.
* @CIP_SKIP_INIT_DBC_CHECK: Only for in-stream. The value of dbc in first
* packet is not continuous from an initial value.
@@ -43,7 +43,27 @@ enum cip_flags {
};
/**
- * enum cip_sfc - a stream's sample rate
+ * enum cip_sfc - supported Sampling Frequency Codes (SFCs)
+ * @CIP_SFC_32000: 32,000 data blocks
+ * @CIP_SFC_44100: 44,100 data blocks
+ * @CIP_SFC_48000: 48,000 data blocks
+ * @CIP_SFC_88200: 88,200 data blocks
+ * @CIP_SFC_96000: 96,000 data blocks
+ * @CIP_SFC_176400: 176,400 data blocks
+ * @CIP_SFC_192000: 192,000 data blocks
+ * @CIP_SFC_COUNT: the number of supported SFCs
+ *
+ * These values are used to show nominal Sampling Frequency Code in
+ * Format Dependent Field (FDF) of AMDTP packet header. In IEC 61883-6:2002,
+ * this code means the number of events per second. Actually the code
+ * represents the number of data blocks transferred per second in an AMDTP
+ * stream.
+ *
+ * In IEC 61883-6:2005, some extensions were added to support more types of
+ * data such as 'One Bit LInear Audio', therefore the meaning of SFC became
+ * different depending on the types.
+ *
+ * Currently our implementation is compatible with IEC 61883-6:2002.
*/
enum cip_sfc {
CIP_SFC_32000 = 0,
diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h
index e13eef99c27a..dfbcd233178c 100644
--- a/sound/firewire/bebob/bebob.h
+++ b/sound/firewire/bebob/bebob.h
@@ -52,7 +52,7 @@ extern const unsigned int snd_bebob_rate_table[SND_BEBOB_STRM_FMT_ENTRIES];
#define SND_BEBOB_CLOCK_INTERNAL "Internal"
struct snd_bebob_clock_spec {
unsigned int num;
- char *const *labels;
+ const char *const *labels;
int (*get)(struct snd_bebob *bebob, unsigned int *id);
};
struct snd_bebob_rate_spec {
@@ -61,7 +61,7 @@ struct snd_bebob_rate_spec {
};
struct snd_bebob_meter_spec {
unsigned int num;
- char *const *labels;
+ const char *const *labels;
int (*get)(struct snd_bebob *bebob, u32 *target, unsigned int size);
};
struct snd_bebob_spec {
diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c
index 3b052ed0fbf5..fc67c1b7cb5b 100644
--- a/sound/firewire/bebob/bebob_focusrite.c
+++ b/sound/firewire/bebob/bebob_focusrite.c
@@ -103,10 +103,10 @@ saffire_write_quad(struct snd_bebob *bebob, u64 offset, u32 value)
&data, sizeof(__be32), 0);
}
-static char *const saffirepro_10_clk_src_labels[] = {
+static const char *const saffirepro_10_clk_src_labels[] = {
SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock"
};
-static char *const saffirepro_26_clk_src_labels[] = {
+static const char *const saffirepro_26_clk_src_labels[] = {
SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock"
};
/* Value maps between registers and labels for SaffirePro 10/26. */
@@ -195,7 +195,7 @@ end:
}
struct snd_bebob_spec saffire_le_spec;
-static char *const saffire_both_clk_src_labels[] = {
+static const char *const saffire_both_clk_src_labels[] = {
SND_BEBOB_CLOCK_INTERNAL, "S/PDIF"
};
static int
@@ -210,12 +210,12 @@ saffire_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
return err;
};
-static char *const saffire_le_meter_labels[] = {
+static const char *const saffire_le_meter_labels[] = {
ANA_IN, ANA_IN, DIG_IN,
ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT,
STM_IN, STM_IN
};
-static char *const saffire_meter_labels[] = {
+static const char *const saffire_meter_labels[] = {
ANA_IN, ANA_IN,
STM_IN, STM_IN, STM_IN, STM_IN, STM_IN,
};
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 70faa3a32526..a422aaa3bb0c 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -340,7 +340,7 @@ end:
}
/* Clock source control for special firmware */
-static char *const special_clk_labels[] = {
+static const char *const special_clk_labels[] = {
SND_BEBOB_CLOCK_INTERNAL " with Digital Mute", "Digital",
"Word Clock", SND_BEBOB_CLOCK_INTERNAL};
static int special_clk_get(struct snd_bebob *bebob, unsigned int *id)
@@ -352,17 +352,8 @@ static int special_clk_get(struct snd_bebob *bebob, unsigned int *id)
static int special_clk_ctl_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *einf)
{
- einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- einf->count = 1;
- einf->value.enumerated.items = ARRAY_SIZE(special_clk_labels);
-
- if (einf->value.enumerated.item >= einf->value.enumerated.items)
- einf->value.enumerated.item = einf->value.enumerated.items - 1;
-
- strcpy(einf->value.enumerated.name,
- special_clk_labels[einf->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(einf, 1, ARRAY_SIZE(special_clk_labels),
+ special_clk_labels);
}
static int special_clk_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *uval)
@@ -438,23 +429,15 @@ static struct snd_kcontrol_new special_sync_ctl = {
};
/* Digital input interface control for special firmware */
-static char *const special_dig_in_iface_labels[] = {
+static const char *const special_dig_in_iface_labels[] = {
"S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
};
static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *einf)
{
- einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- einf->count = 1;
- einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
-
- if (einf->value.enumerated.item >= einf->value.enumerated.items)
- einf->value.enumerated.item = einf->value.enumerated.items - 1;
-
- strcpy(einf->value.enumerated.name,
- special_dig_in_iface_labels[einf->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(einf, 1,
+ ARRAY_SIZE(special_dig_in_iface_labels),
+ special_dig_in_iface_labels);
}
static int special_dig_in_iface_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *uval)
@@ -539,23 +522,15 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
};
/* Digital output interface control for special firmware */
-static char *const special_dig_out_iface_labels[] = {
+static const char *const special_dig_out_iface_labels[] = {
"S/PDIF Optical and Coaxial", "ADAT Optical"
};
static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
struct snd_ctl_elem_info *einf)
{
- einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- einf->count = 1;
- einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
-
- if (einf->value.enumerated.item >= einf->value.enumerated.items)
- einf->value.enumerated.item = einf->value.enumerated.items - 1;
-
- strcpy(einf->value.enumerated.name,
- special_dig_out_iface_labels[einf->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(einf, 1,
+ ARRAY_SIZE(special_dig_out_iface_labels),
+ special_dig_out_iface_labels);
}
static int special_dig_out_iface_ctl_get(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *uval)
@@ -631,7 +606,7 @@ end:
}
/* Hardware metering for special firmware */
-static char *const special_meter_labels[] = {
+static const char *const special_meter_labels[] = {
ANA_IN, ANA_IN, ANA_IN, ANA_IN,
SPDIF_IN,
ADAT_IN, ADAT_IN, ADAT_IN, ADAT_IN,
@@ -671,30 +646,30 @@ end:
}
/* last 4 bytes are omitted because it's clock info. */
-static char *const fw410_meter_labels[] = {
+static const char *const fw410_meter_labels[] = {
ANA_IN, DIG_IN,
ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT, DIG_OUT,
HP_OUT
};
-static char *const audiophile_meter_labels[] = {
+static const char *const audiophile_meter_labels[] = {
ANA_IN, DIG_IN,
ANA_OUT, ANA_OUT, DIG_OUT,
HP_OUT, AUX_OUT,
};
-static char *const solo_meter_labels[] = {
+static const char *const solo_meter_labels[] = {
ANA_IN, DIG_IN,
STRM_IN, STRM_IN,
ANA_OUT, DIG_OUT
};
/* no clock info */
-static char *const ozonic_meter_labels[] = {
+static const char *const ozonic_meter_labels[] = {
ANA_IN, ANA_IN,
STRM_IN, STRM_IN,
ANA_OUT, ANA_OUT
};
/* TODO: need testers. these positions are based on authour's assumption */
-static char *const nrv10_meter_labels[] = {
+static const char *const nrv10_meter_labels[] = {
ANA_IN, ANA_IN, ANA_IN, ANA_IN,
DIG_IN,
ANA_OUT, ANA_OUT, ANA_OUT, ANA_OUT,
diff --git a/sound/firewire/bebob/bebob_terratec.c b/sound/firewire/bebob/bebob_terratec.c
index 9940611f2e1b..ad635004d699 100644
--- a/sound/firewire/bebob/bebob_terratec.c
+++ b/sound/firewire/bebob/bebob_terratec.c
@@ -8,7 +8,7 @@
#include "./bebob.h"
-static char *const phase88_rack_clk_src_labels[] = {
+static const char *const phase88_rack_clk_src_labels[] = {
SND_BEBOB_CLOCK_INTERNAL, "Digital In", "Word Clock"
};
static int
@@ -34,7 +34,7 @@ end:
return err;
}
-static char *const phase24_series_clk_src_labels[] = {
+static const char *const phase24_series_clk_src_labels[] = {
SND_BEBOB_CLOCK_INTERNAL, "Digital In"
};
static int
diff --git a/sound/firewire/bebob/bebob_yamaha.c b/sound/firewire/bebob/bebob_yamaha.c
index 9b7e798180ff..ef1fe3823a9c 100644
--- a/sound/firewire/bebob/bebob_yamaha.c
+++ b/sound/firewire/bebob/bebob_yamaha.c
@@ -28,7 +28,7 @@
* reccomend users to close ffado-mixer at 192.0kHz if mixer is needless.
*/
-static char *const clk_src_labels[] = {SND_BEBOB_CLOCK_INTERNAL, "SPDIF"};
+static const char *const clk_src_labels[] = {SND_BEBOB_CLOCK_INTERNAL, "SPDIF"};
static int
clk_src_get(struct snd_bebob *bebob, unsigned int *id)
{
diff --git a/sound/firewire/cmp.c b/sound/firewire/cmp.c
index ba8df5a1be39..ae3bc1940efa 100644
--- a/sound/firewire/cmp.c
+++ b/sound/firewire/cmp.c
@@ -114,6 +114,7 @@ static int pcr_modify(struct cmp_connection *c,
* cmp_connection_init - initializes a connection manager
* @c: the connection manager to initialize
* @unit: a unit of the target device
+ * @direction: input or output
* @pcr_index: the index of the iPCR/oPCR on the target device
*/
int cmp_connection_init(struct cmp_connection *c,
@@ -154,6 +155,7 @@ EXPORT_SYMBOL(cmp_connection_init);
/**
* cmp_connection_check_used - check connection is already esablished or not
* @c: the connection manager to be checked
+ * @used: the pointer to store the result of checking the connection
*/
int cmp_connection_check_used(struct cmp_connection *c, bool *used)
{
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c
deleted file mode 100644
index e3a04d69c853..000000000000
--- a/sound/firewire/dice.c
+++ /dev/null
@@ -1,1511 +0,0 @@
-/*
- * TC Applied Technologies Digital Interface Communications Engine driver
- *
- * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
- * Licensed under the terms of the GNU General Public License, version 2.
- */
-
-#include <linux/compat.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <sound/control.h>
-#include <sound/core.h>
-#include <sound/firewire.h>
-#include <sound/hwdep.h>
-#include <sound/info.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include "amdtp.h"
-#include "iso-resources.h"
-#include "lib.h"
-#include "dice-interface.h"
-
-
-struct dice {
- struct snd_card *card;
- struct fw_unit *unit;
- spinlock_t lock;
- struct mutex mutex;
- unsigned int global_offset;
- unsigned int rx_offset;
- unsigned int clock_caps;
- unsigned int rx_channels[3];
- unsigned int rx_midi_ports[3];
- struct fw_address_handler notification_handler;
- int owner_generation;
- int dev_lock_count; /* > 0 driver, < 0 userspace */
- bool dev_lock_changed;
- bool global_enabled;
- struct completion clock_accepted;
- wait_queue_head_t hwdep_wait;
- u32 notification_bits;
- struct fw_iso_resources resources;
- struct amdtp_stream stream;
-};
-
-MODULE_DESCRIPTION("DICE driver");
-MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
-MODULE_LICENSE("GPL v2");
-
-static const unsigned int dice_rates[] = {
- /* mode 0 */
- [0] = 32000,
- [1] = 44100,
- [2] = 48000,
- /* mode 1 */
- [3] = 88200,
- [4] = 96000,
- /* mode 2 */
- [5] = 176400,
- [6] = 192000,
-};
-
-static unsigned int rate_to_index(unsigned int rate)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
- if (dice_rates[i] == rate)
- return i;
-
- return 0;
-}
-
-static unsigned int rate_index_to_mode(unsigned int rate_index)
-{
- return ((int)rate_index - 1) / 2;
-}
-
-static void dice_lock_changed(struct dice *dice)
-{
- dice->dev_lock_changed = true;
- wake_up(&dice->hwdep_wait);
-}
-
-static int dice_try_lock(struct dice *dice)
-{
- int err;
-
- spin_lock_irq(&dice->lock);
-
- if (dice->dev_lock_count < 0) {
- err = -EBUSY;
- goto out;
- }
-
- if (dice->dev_lock_count++ == 0)
- dice_lock_changed(dice);
- err = 0;
-
-out:
- spin_unlock_irq(&dice->lock);
-
- return err;
-}
-
-static void dice_unlock(struct dice *dice)
-{
- spin_lock_irq(&dice->lock);
-
- if (WARN_ON(dice->dev_lock_count <= 0))
- goto out;
-
- if (--dice->dev_lock_count == 0)
- dice_lock_changed(dice);
-
-out:
- spin_unlock_irq(&dice->lock);
-}
-
-static inline u64 global_address(struct dice *dice, unsigned int offset)
-{
- return DICE_PRIVATE_SPACE + dice->global_offset + offset;
-}
-
-// TODO: rx index
-static inline u64 rx_address(struct dice *dice, unsigned int offset)
-{
- return DICE_PRIVATE_SPACE + dice->rx_offset + offset;
-}
-
-static int dice_owner_set(struct dice *dice)
-{
- struct fw_device *device = fw_parent_device(dice->unit);
- __be64 *buffer;
- int err, errors = 0;
-
- buffer = kmalloc(2 * 8, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- for (;;) {
- buffer[0] = cpu_to_be64(OWNER_NO_OWNER);
- buffer[1] = cpu_to_be64(
- ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
- dice->notification_handler.offset);
-
- dice->owner_generation = device->generation;
- smp_rmb(); /* node_id vs. generation */
- err = snd_fw_transaction(dice->unit,
- TCODE_LOCK_COMPARE_SWAP,
- global_address(dice, GLOBAL_OWNER),
- buffer, 2 * 8,
- FW_FIXED_GENERATION |
- dice->owner_generation);
-
- if (err == 0) {
- if (buffer[0] != cpu_to_be64(OWNER_NO_OWNER)) {
- dev_err(&dice->unit->device,
- "device is already in use\n");
- err = -EBUSY;
- }
- break;
- }
- if (err != -EAGAIN || ++errors >= 3)
- break;
-
- msleep(20);
- }
-
- kfree(buffer);
-
- return err;
-}
-
-static int dice_owner_update(struct dice *dice)
-{
- struct fw_device *device = fw_parent_device(dice->unit);
- __be64 *buffer;
- int err;
-
- if (dice->owner_generation == -1)
- return 0;
-
- buffer = kmalloc(2 * 8, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- buffer[0] = cpu_to_be64(OWNER_NO_OWNER);
- buffer[1] = cpu_to_be64(
- ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
- dice->notification_handler.offset);
-
- dice->owner_generation = device->generation;
- smp_rmb(); /* node_id vs. generation */
- err = snd_fw_transaction(dice->unit, TCODE_LOCK_COMPARE_SWAP,
- global_address(dice, GLOBAL_OWNER),
- buffer, 2 * 8,
- FW_FIXED_GENERATION | dice->owner_generation);
-
- if (err == 0) {
- if (buffer[0] != cpu_to_be64(OWNER_NO_OWNER)) {
- dev_err(&dice->unit->device,
- "device is already in use\n");
- err = -EBUSY;
- }
- } else if (err == -EAGAIN) {
- err = 0; /* try again later */
- }
-
- kfree(buffer);
-
- if (err < 0)
- dice->owner_generation = -1;
-
- return err;
-}
-
-static void dice_owner_clear(struct dice *dice)
-{
- struct fw_device *device = fw_parent_device(dice->unit);
- __be64 *buffer;
-
- buffer = kmalloc(2 * 8, GFP_KERNEL);
- if (!buffer)
- return;
-
- buffer[0] = cpu_to_be64(
- ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
- dice->notification_handler.offset);
- buffer[1] = cpu_to_be64(OWNER_NO_OWNER);
- snd_fw_transaction(dice->unit, TCODE_LOCK_COMPARE_SWAP,
- global_address(dice, GLOBAL_OWNER),
- buffer, 2 * 8, FW_QUIET |
- FW_FIXED_GENERATION | dice->owner_generation);
-
- kfree(buffer);
-
- dice->owner_generation = -1;
-}
-
-static int dice_enable_set(struct dice *dice)
-{
- __be32 value;
- int err;
-
- value = cpu_to_be32(1);
- err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
- global_address(dice, GLOBAL_ENABLE),
- &value, 4,
- FW_FIXED_GENERATION | dice->owner_generation);
- if (err < 0)
- return err;
-
- dice->global_enabled = true;
-
- return 0;
-}
-
-static void dice_enable_clear(struct dice *dice)
-{
- __be32 value;
-
- if (!dice->global_enabled)
- return;
-
- value = 0;
- snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
- global_address(dice, GLOBAL_ENABLE),
- &value, 4, FW_QUIET |
- FW_FIXED_GENERATION | dice->owner_generation);
-
- dice->global_enabled = false;
-}
-
-static void dice_notification(struct fw_card *card, struct fw_request *request,
- int tcode, int destination, int source,
- int generation, unsigned long long offset,
- void *data, size_t length, void *callback_data)
-{
- struct dice *dice = callback_data;
- u32 bits;
- unsigned long flags;
-
- if (tcode != TCODE_WRITE_QUADLET_REQUEST) {
- fw_send_response(card, request, RCODE_TYPE_ERROR);
- return;
- }
- if ((offset & 3) != 0) {
- fw_send_response(card, request, RCODE_ADDRESS_ERROR);
- return;
- }
-
- bits = be32_to_cpup(data);
-
- spin_lock_irqsave(&dice->lock, flags);
- dice->notification_bits |= bits;
- spin_unlock_irqrestore(&dice->lock, flags);
-
- fw_send_response(card, request, RCODE_COMPLETE);
-
- if (bits & NOTIFY_CLOCK_ACCEPTED)
- complete(&dice->clock_accepted);
- wake_up(&dice->hwdep_wait);
-}
-
-static int dice_rate_constraint(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- struct dice *dice = rule->private;
- const struct snd_interval *channels =
- hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
- struct snd_interval *rate =
- hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval allowed_rates = {
- .min = UINT_MAX, .max = 0, .integer = 1
- };
- unsigned int i, mode;
-
- for (i = 0; i < ARRAY_SIZE(dice_rates); ++i) {
- mode = rate_index_to_mode(i);
- if ((dice->clock_caps & (1 << i)) &&
- snd_interval_test(channels, dice->rx_channels[mode])) {
- allowed_rates.min = min(allowed_rates.min,
- dice_rates[i]);
- allowed_rates.max = max(allowed_rates.max,
- dice_rates[i]);
- }
- }
-
- return snd_interval_refine(rate, &allowed_rates);
-}
-
-static int dice_channels_constraint(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- struct dice *dice = rule->private;
- const struct snd_interval *rate =
- hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval *channels =
- hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
- struct snd_interval allowed_channels = {
- .min = UINT_MAX, .max = 0, .integer = 1
- };
- unsigned int i, mode;
-
- for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
- if ((dice->clock_caps & (1 << i)) &&
- snd_interval_test(rate, dice_rates[i])) {
- mode = rate_index_to_mode(i);
- allowed_channels.min = min(allowed_channels.min,
- dice->rx_channels[mode]);
- allowed_channels.max = max(allowed_channels.max,
- dice->rx_channels[mode]);
- }
-
- return snd_interval_refine(channels, &allowed_channels);
-}
-
-static int dice_open(struct snd_pcm_substream *substream)
-{
- static const struct snd_pcm_hardware hardware = {
- .info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER,
- .formats = AMDTP_OUT_PCM_FORMAT_BITS,
- .channels_min = UINT_MAX,
- .channels_max = 0,
- .buffer_bytes_max = 16 * 1024 * 1024,
- .period_bytes_min = 1,
- .period_bytes_max = UINT_MAX,
- .periods_min = 1,
- .periods_max = UINT_MAX,
- };
- struct dice *dice = substream->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned int i;
- int err;
-
- err = dice_try_lock(dice);
- if (err < 0)
- goto error;
-
- runtime->hw = hardware;
-
- for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
- if (dice->clock_caps & (1 << i))
- runtime->hw.rates |=
- snd_pcm_rate_to_rate_bit(dice_rates[i]);
- snd_pcm_limit_hw_rates(runtime);
-
- for (i = 0; i < 3; ++i)
- if (dice->rx_channels[i]) {
- runtime->hw.channels_min = min(runtime->hw.channels_min,
- dice->rx_channels[i]);
- runtime->hw.channels_max = max(runtime->hw.channels_max,
- dice->rx_channels[i]);
- }
-
- err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- dice_rate_constraint, dice,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
- if (err < 0)
- goto err_lock;
- err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- dice_channels_constraint, dice,
- SNDRV_PCM_HW_PARAM_RATE, -1);
- if (err < 0)
- goto err_lock;
-
- err = amdtp_stream_add_pcm_hw_constraints(&dice->stream, runtime);
- if (err < 0)
- goto err_lock;
-
- return 0;
-
-err_lock:
- dice_unlock(dice);
-error:
- return err;
-}
-
-static int dice_close(struct snd_pcm_substream *substream)
-{
- struct dice *dice = substream->private_data;
-
- dice_unlock(dice);
-
- return 0;
-}
-
-static int dice_stream_start_packets(struct dice *dice)
-{
- int err;
-
- if (amdtp_stream_running(&dice->stream))
- return 0;
-
- err = amdtp_stream_start(&dice->stream, dice->resources.channel,
- fw_parent_device(dice->unit)->max_speed);
- if (err < 0)
- return err;
-
- err = dice_enable_set(dice);
- if (err < 0) {
- amdtp_stream_stop(&dice->stream);
- return err;
- }
-
- return 0;
-}
-
-static int dice_stream_start(struct dice *dice)
-{
- __be32 channel;
- int err;
-
- if (!dice->resources.allocated) {
- err = fw_iso_resources_allocate(&dice->resources,
- amdtp_stream_get_max_payload(&dice->stream),
- fw_parent_device(dice->unit)->max_speed);
- if (err < 0)
- goto error;
-
- channel = cpu_to_be32(dice->resources.channel);
- err = snd_fw_transaction(dice->unit,
- TCODE_WRITE_QUADLET_REQUEST,
- rx_address(dice, RX_ISOCHRONOUS),
- &channel, 4, 0);
- if (err < 0)
- goto err_resources;
- }
-
- err = dice_stream_start_packets(dice);
- if (err < 0)
- goto err_rx_channel;
-
- return 0;
-
-err_rx_channel:
- channel = cpu_to_be32((u32)-1);
- snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
- rx_address(dice, RX_ISOCHRONOUS), &channel, 4, 0);
-err_resources:
- fw_iso_resources_free(&dice->resources);
-error:
- return err;
-}
-
-static void dice_stream_stop_packets(struct dice *dice)
-{
- if (amdtp_stream_running(&dice->stream)) {
- dice_enable_clear(dice);
- amdtp_stream_stop(&dice->stream);
- }
-}
-
-static void dice_stream_stop(struct dice *dice)
-{
- __be32 channel;
-
- dice_stream_stop_packets(dice);
-
- if (!dice->resources.allocated)
- return;
-
- channel = cpu_to_be32((u32)-1);
- snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
- rx_address(dice, RX_ISOCHRONOUS), &channel, 4, 0);
-
- fw_iso_resources_free(&dice->resources);
-}
-
-static int dice_change_rate(struct dice *dice, unsigned int clock_rate)
-{
- __be32 value;
- int err;
-
- reinit_completion(&dice->clock_accepted);
-
- value = cpu_to_be32(clock_rate | CLOCK_SOURCE_ARX1);
- err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
- global_address(dice, GLOBAL_CLOCK_SELECT),
- &value, 4, 0);
- if (err < 0)
- return err;
-
- if (!wait_for_completion_timeout(&dice->clock_accepted,
- msecs_to_jiffies(100)))
- dev_warn(&dice->unit->device, "clock change timed out\n");
-
- return 0;
-}
-
-static int dice_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct dice *dice = substream->private_data;
- unsigned int rate_index, mode, rate, channels, i;
- int err;
-
- mutex_lock(&dice->mutex);
- dice_stream_stop(dice);
- mutex_unlock(&dice->mutex);
-
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
- if (err < 0)
- return err;
-
- rate = params_rate(hw_params);
- rate_index = rate_to_index(rate);
- err = dice_change_rate(dice, rate_index << CLOCK_RATE_SHIFT);
- if (err < 0)
- return err;
-
- /*
- * At 176.4/192.0 kHz, Dice has a quirk to transfer two PCM frames in
- * one data block of AMDTP packet. Thus sampling transfer frequency is
- * a half of PCM sampling frequency, i.e. PCM frames at 192.0 kHz are
- * transferred on AMDTP packets at 96 kHz. Two successive samples of a
- * channel are stored consecutively in the packet. This quirk is called
- * as 'Dual Wire'.
- * For this quirk, blocking mode is required and PCM buffer size should
- * be aligned to SYT_INTERVAL.
- */
- channels = params_channels(hw_params);
- if (rate_index > 4) {
- if (channels > AMDTP_MAX_CHANNELS_FOR_PCM / 2) {
- err = -ENOSYS;
- return err;
- }
-
- rate /= 2;
- channels *= 2;
- dice->stream.double_pcm_frames = true;
- } else {
- dice->stream.double_pcm_frames = false;
- }
-
- mode = rate_index_to_mode(rate_index);
- amdtp_stream_set_parameters(&dice->stream, rate, channels,
- dice->rx_midi_ports[mode]);
- if (rate_index > 4) {
- channels /= 2;
-
- for (i = 0; i < channels; i++) {
- dice->stream.pcm_positions[i] = i * 2;
- dice->stream.pcm_positions[i + channels] = i * 2 + 1;
- }
- }
-
- amdtp_stream_set_pcm_format(&dice->stream,
- params_format(hw_params));
-
- return 0;
-}
-
-static int dice_hw_free(struct snd_pcm_substream *substream)
-{
- struct dice *dice = substream->private_data;
-
- mutex_lock(&dice->mutex);
- dice_stream_stop(dice);
- mutex_unlock(&dice->mutex);
-
- return snd_pcm_lib_free_vmalloc_buffer(substream);
-}
-
-static int dice_prepare(struct snd_pcm_substream *substream)
-{
- struct dice *dice = substream->private_data;
- int err;
-
- mutex_lock(&dice->mutex);
-
- if (amdtp_streaming_error(&dice->stream))
- dice_stream_stop_packets(dice);
-
- err = dice_stream_start(dice);
- if (err < 0) {
- mutex_unlock(&dice->mutex);
- return err;
- }
-
- mutex_unlock(&dice->mutex);
-
- amdtp_stream_pcm_prepare(&dice->stream);
-
- return 0;
-}
-
-static int dice_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct dice *dice = substream->private_data;
- struct snd_pcm_substream *pcm;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- pcm = substream;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- pcm = NULL;
- break;
- default:
- return -EINVAL;
- }
- amdtp_stream_pcm_trigger(&dice->stream, pcm);
-
- return 0;
-}
-
-static snd_pcm_uframes_t dice_pointer(struct snd_pcm_substream *substream)
-{
- struct dice *dice = substream->private_data;
-
- return amdtp_stream_pcm_pointer(&dice->stream);
-}
-
-static int dice_create_pcm(struct dice *dice)
-{
- static struct snd_pcm_ops ops = {
- .open = dice_open,
- .close = dice_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = dice_hw_params,
- .hw_free = dice_hw_free,
- .prepare = dice_prepare,
- .trigger = dice_trigger,
- .pointer = dice_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
- };
- struct snd_pcm *pcm;
- int err;
-
- err = snd_pcm_new(dice->card, "DICE", 0, 1, 0, &pcm);
- if (err < 0)
- return err;
- pcm->private_data = dice;
- strcpy(pcm->name, dice->card->shortname);
- pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->ops = &ops;
-
- return 0;
-}
-
-static long dice_hwdep_read(struct snd_hwdep *hwdep, char __user *buf,
- long count, loff_t *offset)
-{
- struct dice *dice = hwdep->private_data;
- DEFINE_WAIT(wait);
- union snd_firewire_event event;
-
- spin_lock_irq(&dice->lock);
-
- while (!dice->dev_lock_changed && dice->notification_bits == 0) {
- prepare_to_wait(&dice->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
- spin_unlock_irq(&dice->lock);
- schedule();
- finish_wait(&dice->hwdep_wait, &wait);
- if (signal_pending(current))
- return -ERESTARTSYS;
- spin_lock_irq(&dice->lock);
- }
-
- memset(&event, 0, sizeof(event));
- if (dice->dev_lock_changed) {
- event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
- event.lock_status.status = dice->dev_lock_count > 0;
- dice->dev_lock_changed = false;
-
- count = min(count, (long)sizeof(event.lock_status));
- } else {
- event.dice_notification.type = SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION;
- event.dice_notification.notification = dice->notification_bits;
- dice->notification_bits = 0;
-
- count = min(count, (long)sizeof(event.dice_notification));
- }
-
- spin_unlock_irq(&dice->lock);
-
- if (copy_to_user(buf, &event, count))
- return -EFAULT;
-
- return count;
-}
-
-static unsigned int dice_hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
- poll_table *wait)
-{
- struct dice *dice = hwdep->private_data;
- unsigned int events;
-
- poll_wait(file, &dice->hwdep_wait, wait);
-
- spin_lock_irq(&dice->lock);
- if (dice->dev_lock_changed || dice->notification_bits != 0)
- events = POLLIN | POLLRDNORM;
- else
- events = 0;
- spin_unlock_irq(&dice->lock);
-
- return events;
-}
-
-static int dice_hwdep_get_info(struct dice *dice, void __user *arg)
-{
- struct fw_device *dev = fw_parent_device(dice->unit);
- struct snd_firewire_get_info info;
-
- memset(&info, 0, sizeof(info));
- info.type = SNDRV_FIREWIRE_TYPE_DICE;
- info.card = dev->card->index;
- *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]);
- *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]);
- strlcpy(info.device_name, dev_name(&dev->device),
- sizeof(info.device_name));
-
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
-
- return 0;
-}
-
-static int dice_hwdep_lock(struct dice *dice)
-{
- int err;
-
- spin_lock_irq(&dice->lock);
-
- if (dice->dev_lock_count == 0) {
- dice->dev_lock_count = -1;
- err = 0;
- } else {
- err = -EBUSY;
- }
-
- spin_unlock_irq(&dice->lock);
-
- return err;
-}
-
-static int dice_hwdep_unlock(struct dice *dice)
-{
- int err;
-
- spin_lock_irq(&dice->lock);
-
- if (dice->dev_lock_count == -1) {
- dice->dev_lock_count = 0;
- err = 0;
- } else {
- err = -EBADFD;
- }
-
- spin_unlock_irq(&dice->lock);
-
- return err;
-}
-
-static int dice_hwdep_release(struct snd_hwdep *hwdep, struct file *file)
-{
- struct dice *dice = hwdep->private_data;
-
- spin_lock_irq(&dice->lock);
- if (dice->dev_lock_count == -1)
- dice->dev_lock_count = 0;
- spin_unlock_irq(&dice->lock);
-
- return 0;
-}
-
-static int dice_hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct dice *dice = hwdep->private_data;
-
- switch (cmd) {
- case SNDRV_FIREWIRE_IOCTL_GET_INFO:
- return dice_hwdep_get_info(dice, (void __user *)arg);
- case SNDRV_FIREWIRE_IOCTL_LOCK:
- return dice_hwdep_lock(dice);
- case SNDRV_FIREWIRE_IOCTL_UNLOCK:
- return dice_hwdep_unlock(dice);
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-#ifdef CONFIG_COMPAT
-static int dice_hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return dice_hwdep_ioctl(hwdep, file, cmd,
- (unsigned long)compat_ptr(arg));
-}
-#else
-#define dice_hwdep_compat_ioctl NULL
-#endif
-
-static int dice_create_hwdep(struct dice *dice)
-{
- static const struct snd_hwdep_ops ops = {
- .read = dice_hwdep_read,
- .release = dice_hwdep_release,
- .poll = dice_hwdep_poll,
- .ioctl = dice_hwdep_ioctl,
- .ioctl_compat = dice_hwdep_compat_ioctl,
- };
- struct snd_hwdep *hwdep;
- int err;
-
- err = snd_hwdep_new(dice->card, "DICE", 0, &hwdep);
- if (err < 0)
- return err;
- strcpy(hwdep->name, "DICE");
- hwdep->iface = SNDRV_HWDEP_IFACE_FW_DICE;
- hwdep->ops = ops;
- hwdep->private_data = dice;
- hwdep->exclusive = true;
-
- return 0;
-}
-
-static int dice_proc_read_mem(struct dice *dice, void *buffer,
- unsigned int offset_q, unsigned int quadlets)
-{
- unsigned int i;
- int err;
-
- err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
- DICE_PRIVATE_SPACE + 4 * offset_q,
- buffer, 4 * quadlets, 0);
- if (err < 0)
- return err;
-
- for (i = 0; i < quadlets; ++i)
- be32_to_cpus(&((u32 *)buffer)[i]);
-
- return 0;
-}
-
-static const char *str_from_array(const char *const strs[], unsigned int count,
- unsigned int i)
-{
- if (i < count)
- return strs[i];
- else
- return "(unknown)";
-}
-
-static void dice_proc_fixup_string(char *s, unsigned int size)
-{
- unsigned int i;
-
- for (i = 0; i < size; i += 4)
- cpu_to_le32s((u32 *)(s + i));
-
- for (i = 0; i < size - 2; ++i) {
- if (s[i] == '\0')
- return;
- if (s[i] == '\\' && s[i + 1] == '\\') {
- s[i + 2] = '\0';
- return;
- }
- }
- s[size - 1] = '\0';
-}
-
-static void dice_proc_read(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
-{
- static const char *const section_names[5] = {
- "global", "tx", "rx", "ext_sync", "unused2"
- };
- static const char *const clock_sources[] = {
- "aes1", "aes2", "aes3", "aes4", "aes", "adat", "tdif",
- "wc", "arx1", "arx2", "arx3", "arx4", "internal"
- };
- static const char *const rates[] = {
- "32000", "44100", "48000", "88200", "96000", "176400", "192000",
- "any low", "any mid", "any high", "none"
- };
- struct dice *dice = entry->private_data;
- u32 sections[ARRAY_SIZE(section_names) * 2];
- struct {
- u32 number;
- u32 size;
- } tx_rx_header;
- union {
- struct {
- u32 owner_hi, owner_lo;
- u32 notification;
- char nick_name[NICK_NAME_SIZE];
- u32 clock_select;
- u32 enable;
- u32 status;
- u32 extended_status;
- u32 sample_rate;
- u32 version;
- u32 clock_caps;
- char clock_source_names[CLOCK_SOURCE_NAMES_SIZE];
- } global;
- struct {
- u32 iso;
- u32 number_audio;
- u32 number_midi;
- u32 speed;
- char names[TX_NAMES_SIZE];
- u32 ac3_caps;
- u32 ac3_enable;
- } tx;
- struct {
- u32 iso;
- u32 seq_start;
- u32 number_audio;
- u32 number_midi;
- char names[RX_NAMES_SIZE];
- u32 ac3_caps;
- u32 ac3_enable;
- } rx;
- struct {
- u32 clock_source;
- u32 locked;
- u32 rate;
- u32 adat_user_data;
- } ext_sync;
- } buf;
- unsigned int quadlets, stream, i;
-
- if (dice_proc_read_mem(dice, sections, 0, ARRAY_SIZE(sections)) < 0)
- return;
- snd_iprintf(buffer, "sections:\n");
- for (i = 0; i < ARRAY_SIZE(section_names); ++i)
- snd_iprintf(buffer, " %s: offset %u, size %u\n",
- section_names[i],
- sections[i * 2], sections[i * 2 + 1]);
-
- quadlets = min_t(u32, sections[1], sizeof(buf.global) / 4);
- if (dice_proc_read_mem(dice, &buf.global, sections[0], quadlets) < 0)
- return;
- snd_iprintf(buffer, "global:\n");
- snd_iprintf(buffer, " owner: %04x:%04x%08x\n",
- buf.global.owner_hi >> 16,
- buf.global.owner_hi & 0xffff, buf.global.owner_lo);
- snd_iprintf(buffer, " notification: %08x\n", buf.global.notification);
- dice_proc_fixup_string(buf.global.nick_name, NICK_NAME_SIZE);
- snd_iprintf(buffer, " nick name: %s\n", buf.global.nick_name);
- snd_iprintf(buffer, " clock select: %s %s\n",
- str_from_array(clock_sources, ARRAY_SIZE(clock_sources),
- buf.global.clock_select & CLOCK_SOURCE_MASK),
- str_from_array(rates, ARRAY_SIZE(rates),
- (buf.global.clock_select & CLOCK_RATE_MASK)
- >> CLOCK_RATE_SHIFT));
- snd_iprintf(buffer, " enable: %u\n", buf.global.enable);
- snd_iprintf(buffer, " status: %slocked %s\n",
- buf.global.status & STATUS_SOURCE_LOCKED ? "" : "un",
- str_from_array(rates, ARRAY_SIZE(rates),
- (buf.global.status &
- STATUS_NOMINAL_RATE_MASK)
- >> CLOCK_RATE_SHIFT));
- snd_iprintf(buffer, " ext status: %08x\n", buf.global.extended_status);
- snd_iprintf(buffer, " sample rate: %u\n", buf.global.sample_rate);
- snd_iprintf(buffer, " version: %u.%u.%u.%u\n",
- (buf.global.version >> 24) & 0xff,
- (buf.global.version >> 16) & 0xff,
- (buf.global.version >> 8) & 0xff,
- (buf.global.version >> 0) & 0xff);
- if (quadlets >= 90) {
- snd_iprintf(buffer, " clock caps:");
- for (i = 0; i <= 6; ++i)
- if (buf.global.clock_caps & (1 << i))
- snd_iprintf(buffer, " %s", rates[i]);
- for (i = 0; i <= 12; ++i)
- if (buf.global.clock_caps & (1 << (16 + i)))
- snd_iprintf(buffer, " %s", clock_sources[i]);
- snd_iprintf(buffer, "\n");
- dice_proc_fixup_string(buf.global.clock_source_names,
- CLOCK_SOURCE_NAMES_SIZE);
- snd_iprintf(buffer, " clock source names: %s\n",
- buf.global.clock_source_names);
- }
-
- if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0)
- return;
- quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx) / 4);
- for (stream = 0; stream < tx_rx_header.number; ++stream) {
- if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 +
- stream * tx_rx_header.size,
- quadlets) < 0)
- break;
- snd_iprintf(buffer, "tx %u:\n", stream);
- snd_iprintf(buffer, " iso channel: %d\n", (int)buf.tx.iso);
- snd_iprintf(buffer, " audio channels: %u\n",
- buf.tx.number_audio);
- snd_iprintf(buffer, " midi ports: %u\n", buf.tx.number_midi);
- snd_iprintf(buffer, " speed: S%u\n", 100u << buf.tx.speed);
- if (quadlets >= 68) {
- dice_proc_fixup_string(buf.tx.names, TX_NAMES_SIZE);
- snd_iprintf(buffer, " names: %s\n", buf.tx.names);
- }
- if (quadlets >= 70) {
- snd_iprintf(buffer, " ac3 caps: %08x\n",
- buf.tx.ac3_caps);
- snd_iprintf(buffer, " ac3 enable: %08x\n",
- buf.tx.ac3_enable);
- }
- }
-
- if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0)
- return;
- quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx) / 4);
- for (stream = 0; stream < tx_rx_header.number; ++stream) {
- if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 +
- stream * tx_rx_header.size,
- quadlets) < 0)
- break;
- snd_iprintf(buffer, "rx %u:\n", stream);
- snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso);
- snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start);
- snd_iprintf(buffer, " audio channels: %u\n",
- buf.rx.number_audio);
- snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi);
- if (quadlets >= 68) {
- dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE);
- snd_iprintf(buffer, " names: %s\n", buf.rx.names);
- }
- if (quadlets >= 70) {
- snd_iprintf(buffer, " ac3 caps: %08x\n",
- buf.rx.ac3_caps);
- snd_iprintf(buffer, " ac3 enable: %08x\n",
- buf.rx.ac3_enable);
- }
- }
-
- quadlets = min_t(u32, sections[7], sizeof(buf.ext_sync) / 4);
- if (quadlets >= 4) {
- if (dice_proc_read_mem(dice, &buf.ext_sync,
- sections[6], 4) < 0)
- return;
- snd_iprintf(buffer, "ext status:\n");
- snd_iprintf(buffer, " clock source: %s\n",
- str_from_array(clock_sources,
- ARRAY_SIZE(clock_sources),
- buf.ext_sync.clock_source));
- snd_iprintf(buffer, " locked: %u\n", buf.ext_sync.locked);
- snd_iprintf(buffer, " rate: %s\n",
- str_from_array(rates, ARRAY_SIZE(rates),
- buf.ext_sync.rate));
- snd_iprintf(buffer, " adat user data: ");
- if (buf.ext_sync.adat_user_data & ADAT_USER_DATA_NO_DATA)
- snd_iprintf(buffer, "-\n");
- else
- snd_iprintf(buffer, "%x\n",
- buf.ext_sync.adat_user_data);
- }
-}
-
-static void dice_create_proc(struct dice *dice)
-{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(dice->card, "dice", &entry))
- snd_info_set_text_ops(entry, dice, dice_proc_read);
-}
-
-static void dice_card_free(struct snd_card *card)
-{
- struct dice *dice = card->private_data;
-
- amdtp_stream_destroy(&dice->stream);
- fw_core_remove_address_handler(&dice->notification_handler);
- mutex_destroy(&dice->mutex);
-}
-
-#define OUI_WEISS 0x001c6a
-
-#define DICE_CATEGORY_ID 0x04
-#define WEISS_CATEGORY_ID 0x00
-
-static int dice_interface_check(struct fw_unit *unit)
-{
- static const int min_values[10] = {
- 10, 0x64 / 4,
- 10, 0x18 / 4,
- 10, 0x18 / 4,
- 0, 0,
- 0, 0,
- };
- struct fw_device *device = fw_parent_device(unit);
- struct fw_csr_iterator it;
- int key, value, vendor = -1, model = -1, err;
- unsigned int category, i;
- __be32 pointers[ARRAY_SIZE(min_values)];
- __be32 tx_data[4];
- __be32 version;
-
- /*
- * Check that GUID and unit directory are constructed according to DICE
- * rules, i.e., that the specifier ID is the GUID's OUI, and that the
- * GUID chip ID consists of the 8-bit category ID, the 10-bit product
- * ID, and a 22-bit serial number.
- */
- fw_csr_iterator_init(&it, unit->directory);
- while (fw_csr_iterator_next(&it, &key, &value)) {
- switch (key) {
- case CSR_SPECIFIER_ID:
- vendor = value;
- break;
- case CSR_MODEL:
- model = value;
- break;
- }
- }
- if (vendor == OUI_WEISS)
- category = WEISS_CATEGORY_ID;
- else
- category = DICE_CATEGORY_ID;
- if (device->config_rom[3] != ((vendor << 8) | category) ||
- device->config_rom[4] >> 22 != model)
- return -ENODEV;
-
- /*
- * Check that the sub address spaces exist and are located inside the
- * private address space. The minimum values are chosen so that all
- * minimally required registers are included.
- */
- err = snd_fw_transaction(unit, TCODE_READ_BLOCK_REQUEST,
- DICE_PRIVATE_SPACE,
- pointers, sizeof(pointers), 0);
- if (err < 0)
- return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(pointers); ++i) {
- value = be32_to_cpu(pointers[i]);
- if (value < min_values[i] || value >= 0x40000)
- return -ENODEV;
- }
-
- /* We support playback only. Let capture devices be handled by FFADO. */
- err = snd_fw_transaction(unit, TCODE_READ_BLOCK_REQUEST,
- DICE_PRIVATE_SPACE +
- be32_to_cpu(pointers[2]) * 4,
- tx_data, sizeof(tx_data), 0);
- if (err < 0 || (tx_data[0] && tx_data[3]))
- return -ENODEV;
-
- /*
- * Check that the implemented DICE driver specification major version
- * number matches.
- */
- err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
- DICE_PRIVATE_SPACE +
- be32_to_cpu(pointers[0]) * 4 + GLOBAL_VERSION,
- &version, 4, 0);
- if (err < 0)
- return -ENODEV;
- if ((version & cpu_to_be32(0xff000000)) != cpu_to_be32(0x01000000)) {
- dev_err(&unit->device,
- "unknown DICE version: 0x%08x\n", be32_to_cpu(version));
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int highest_supported_mode_rate(struct dice *dice, unsigned int mode)
-{
- int i;
-
- for (i = ARRAY_SIZE(dice_rates) - 1; i >= 0; --i)
- if ((dice->clock_caps & (1 << i)) &&
- rate_index_to_mode(i) == mode)
- return i;
-
- return -1;
-}
-
-static int dice_read_mode_params(struct dice *dice, unsigned int mode)
-{
- __be32 values[2];
- int rate_index, err;
-
- rate_index = highest_supported_mode_rate(dice, mode);
- if (rate_index < 0) {
- dice->rx_channels[mode] = 0;
- dice->rx_midi_ports[mode] = 0;
- return 0;
- }
-
- err = dice_change_rate(dice, rate_index << CLOCK_RATE_SHIFT);
- if (err < 0)
- return err;
-
- err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
- rx_address(dice, RX_NUMBER_AUDIO),
- values, 2 * 4, 0);
- if (err < 0)
- return err;
-
- dice->rx_channels[mode] = be32_to_cpu(values[0]);
- dice->rx_midi_ports[mode] = be32_to_cpu(values[1]);
-
- return 0;
-}
-
-static int dice_read_params(struct dice *dice)
-{
- __be32 pointers[6];
- __be32 value;
- int mode, err;
-
- err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
- DICE_PRIVATE_SPACE,
- pointers, sizeof(pointers), 0);
- if (err < 0)
- return err;
-
- dice->global_offset = be32_to_cpu(pointers[0]) * 4;
- dice->rx_offset = be32_to_cpu(pointers[4]) * 4;
-
- /* some very old firmwares don't tell about their clock support */
- if (be32_to_cpu(pointers[1]) * 4 >= GLOBAL_CLOCK_CAPABILITIES + 4) {
- err = snd_fw_transaction(
- dice->unit, TCODE_READ_QUADLET_REQUEST,
- global_address(dice, GLOBAL_CLOCK_CAPABILITIES),
- &value, 4, 0);
- if (err < 0)
- return err;
- dice->clock_caps = be32_to_cpu(value);
- } else {
- /* this should be supported by any device */
- dice->clock_caps = CLOCK_CAP_RATE_44100 |
- CLOCK_CAP_RATE_48000 |
- CLOCK_CAP_SOURCE_ARX1 |
- CLOCK_CAP_SOURCE_INTERNAL;
- }
-
- for (mode = 2; mode >= 0; --mode) {
- err = dice_read_mode_params(dice, mode);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
-static void dice_card_strings(struct dice *dice)
-{
- struct snd_card *card = dice->card;
- struct fw_device *dev = fw_parent_device(dice->unit);
- char vendor[32], model[32];
- unsigned int i;
- int err;
-
- strcpy(card->driver, "DICE");
-
- strcpy(card->shortname, "DICE");
- BUILD_BUG_ON(NICK_NAME_SIZE < sizeof(card->shortname));
- err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
- global_address(dice, GLOBAL_NICK_NAME),
- card->shortname, sizeof(card->shortname), 0);
- if (err >= 0) {
- /* DICE strings are returned in "always-wrong" endianness */
- BUILD_BUG_ON(sizeof(card->shortname) % 4 != 0);
- for (i = 0; i < sizeof(card->shortname); i += 4)
- swab32s((u32 *)&card->shortname[i]);
- card->shortname[sizeof(card->shortname) - 1] = '\0';
- }
-
- strcpy(vendor, "?");
- fw_csr_string(dev->config_rom + 5, CSR_VENDOR, vendor, sizeof(vendor));
- strcpy(model, "?");
- fw_csr_string(dice->unit->directory, CSR_MODEL, model, sizeof(model));
- snprintf(card->longname, sizeof(card->longname),
- "%s %s (serial %u) at %s, S%d",
- vendor, model, dev->config_rom[4] & 0x3fffff,
- dev_name(&dice->unit->device), 100 << dev->max_speed);
-
- strcpy(card->mixername, "DICE");
-}
-
-static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
-{
- struct snd_card *card;
- struct dice *dice;
- __be32 clock_sel;
- int err;
-
- err = dice_interface_check(unit);
- if (err < 0)
- return err;
-
- err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE,
- sizeof(*dice), &card);
- if (err < 0)
- return err;
-
- dice = card->private_data;
- dice->card = card;
- spin_lock_init(&dice->lock);
- mutex_init(&dice->mutex);
- dice->unit = unit;
- init_completion(&dice->clock_accepted);
- init_waitqueue_head(&dice->hwdep_wait);
-
- dice->notification_handler.length = 4;
- dice->notification_handler.address_callback = dice_notification;
- dice->notification_handler.callback_data = dice;
- err = fw_core_add_address_handler(&dice->notification_handler,
- &fw_high_memory_region);
- if (err < 0)
- goto err_mutex;
-
- err = dice_owner_set(dice);
- if (err < 0)
- goto err_notification_handler;
-
- err = dice_read_params(dice);
- if (err < 0)
- goto err_owner;
-
- err = fw_iso_resources_init(&dice->resources, unit);
- if (err < 0)
- goto err_owner;
- dice->resources.channels_mask = 0x00000000ffffffffuLL;
-
- err = amdtp_stream_init(&dice->stream, unit, AMDTP_OUT_STREAM,
- CIP_BLOCKING);
- if (err < 0)
- goto err_resources;
-
- card->private_free = dice_card_free;
-
- dice_card_strings(dice);
-
- err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
- global_address(dice, GLOBAL_CLOCK_SELECT),
- &clock_sel, 4, 0);
- if (err < 0)
- goto error;
- clock_sel &= cpu_to_be32(~CLOCK_SOURCE_MASK);
- clock_sel |= cpu_to_be32(CLOCK_SOURCE_ARX1);
- err = snd_fw_transaction(unit, TCODE_WRITE_QUADLET_REQUEST,
- global_address(dice, GLOBAL_CLOCK_SELECT),
- &clock_sel, 4, 0);
- if (err < 0)
- goto error;
-
- err = dice_create_pcm(dice);
- if (err < 0)
- goto error;
-
- err = dice_create_hwdep(dice);
- if (err < 0)
- goto error;
-
- dice_create_proc(dice);
-
- err = snd_card_register(card);
- if (err < 0)
- goto error;
-
- dev_set_drvdata(&unit->device, dice);
-
- return 0;
-
-err_resources:
- fw_iso_resources_destroy(&dice->resources);
-err_owner:
- dice_owner_clear(dice);
-err_notification_handler:
- fw_core_remove_address_handler(&dice->notification_handler);
-err_mutex:
- mutex_destroy(&dice->mutex);
-error:
- snd_card_free(card);
- return err;
-}
-
-static void dice_remove(struct fw_unit *unit)
-{
- struct dice *dice = dev_get_drvdata(&unit->device);
-
- amdtp_stream_pcm_abort(&dice->stream);
-
- snd_card_disconnect(dice->card);
-
- mutex_lock(&dice->mutex);
-
- dice_stream_stop(dice);
- dice_owner_clear(dice);
-
- mutex_unlock(&dice->mutex);
-
- snd_card_free_when_closed(dice->card);
-}
-
-static void dice_bus_reset(struct fw_unit *unit)
-{
- struct dice *dice = dev_get_drvdata(&unit->device);
-
- /*
- * On a bus reset, the DICE firmware disables streaming and then goes
- * off contemplating its own navel for hundreds of milliseconds before
- * it can react to any of our attempts to reenable streaming. This
- * means that we lose synchronization anyway, so we force our streams
- * to stop so that the application can restart them in an orderly
- * manner.
- */
- amdtp_stream_pcm_abort(&dice->stream);
-
- mutex_lock(&dice->mutex);
-
- dice->global_enabled = false;
- dice_stream_stop_packets(dice);
-
- dice_owner_update(dice);
-
- fw_iso_resources_update(&dice->resources);
-
- mutex_unlock(&dice->mutex);
-}
-
-#define DICE_INTERFACE 0x000001
-
-static const struct ieee1394_device_id dice_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_VERSION,
- .version = DICE_INTERFACE,
- },
- { }
-};
-MODULE_DEVICE_TABLE(ieee1394, dice_id_table);
-
-static struct fw_driver dice_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = KBUILD_MODNAME,
- .bus = &fw_bus_type,
- },
- .probe = dice_probe,
- .update = dice_bus_reset,
- .remove = dice_remove,
- .id_table = dice_id_table,
-};
-
-static int __init alsa_dice_init(void)
-{
- return driver_register(&dice_driver.driver);
-}
-
-static void __exit alsa_dice_exit(void)
-{
- driver_unregister(&dice_driver.driver);
-}
-
-module_init(alsa_dice_init);
-module_exit(alsa_dice_exit);
diff --git a/sound/firewire/dice/Makefile b/sound/firewire/dice/Makefile
new file mode 100644
index 000000000000..9ef228ef7baf
--- /dev/null
+++ b/sound/firewire/dice/Makefile
@@ -0,0 +1,3 @@
+snd-dice-objs := dice-transaction.o dice-stream.o dice-proc.o dice-midi.o \
+ dice-pcm.o dice-hwdep.o dice.o
+obj-m += snd-dice.o
diff --git a/sound/firewire/dice/dice-hwdep.c b/sound/firewire/dice/dice-hwdep.c
new file mode 100644
index 000000000000..a4dc02a86f12
--- /dev/null
+++ b/sound/firewire/dice/dice-hwdep.c
@@ -0,0 +1,190 @@
+/*
+ * dice_hwdep.c - a part of driver for DICE based devices
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2014 Takashi Sakamoto <o-takashi@sakamocchi.jp>
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "dice.h"
+
+static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf,
+ long count, loff_t *offset)
+{
+ struct snd_dice *dice = hwdep->private_data;
+ DEFINE_WAIT(wait);
+ union snd_firewire_event event;
+
+ spin_lock_irq(&dice->lock);
+
+ while (!dice->dev_lock_changed && dice->notification_bits == 0) {
+ prepare_to_wait(&dice->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&dice->lock);
+ schedule();
+ finish_wait(&dice->hwdep_wait, &wait);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ spin_lock_irq(&dice->lock);
+ }
+
+ memset(&event, 0, sizeof(event));
+ if (dice->dev_lock_changed) {
+ event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
+ event.lock_status.status = dice->dev_lock_count > 0;
+ dice->dev_lock_changed = false;
+
+ count = min_t(long, count, sizeof(event.lock_status));
+ } else {
+ event.dice_notification.type =
+ SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION;
+ event.dice_notification.notification = dice->notification_bits;
+ dice->notification_bits = 0;
+
+ count = min_t(long, count, sizeof(event.dice_notification));
+ }
+
+ spin_unlock_irq(&dice->lock);
+
+ if (copy_to_user(buf, &event, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ poll_table *wait)
+{
+ struct snd_dice *dice = hwdep->private_data;
+ unsigned int events;
+
+ poll_wait(file, &dice->hwdep_wait, wait);
+
+ spin_lock_irq(&dice->lock);
+ if (dice->dev_lock_changed || dice->notification_bits != 0)
+ events = POLLIN | POLLRDNORM;
+ else
+ events = 0;
+ spin_unlock_irq(&dice->lock);
+
+ return events;
+}
+
+static int hwdep_get_info(struct snd_dice *dice, void __user *arg)
+{
+ struct fw_device *dev = fw_parent_device(dice->unit);
+ struct snd_firewire_get_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.type = SNDRV_FIREWIRE_TYPE_DICE;
+ info.card = dev->card->index;
+ *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]);
+ *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]);
+ strlcpy(info.device_name, dev_name(&dev->device),
+ sizeof(info.device_name));
+
+ if (copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int hwdep_lock(struct snd_dice *dice)
+{
+ int err;
+
+ spin_lock_irq(&dice->lock);
+
+ if (dice->dev_lock_count == 0) {
+ dice->dev_lock_count = -1;
+ err = 0;
+ } else {
+ err = -EBUSY;
+ }
+
+ spin_unlock_irq(&dice->lock);
+
+ return err;
+}
+
+static int hwdep_unlock(struct snd_dice *dice)
+{
+ int err;
+
+ spin_lock_irq(&dice->lock);
+
+ if (dice->dev_lock_count == -1) {
+ dice->dev_lock_count = 0;
+ err = 0;
+ } else {
+ err = -EBADFD;
+ }
+
+ spin_unlock_irq(&dice->lock);
+
+ return err;
+}
+
+static int hwdep_release(struct snd_hwdep *hwdep, struct file *file)
+{
+ struct snd_dice *dice = hwdep->private_data;
+
+ spin_lock_irq(&dice->lock);
+ if (dice->dev_lock_count == -1)
+ dice->dev_lock_count = 0;
+ spin_unlock_irq(&dice->lock);
+
+ return 0;
+}
+
+static int hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct snd_dice *dice = hwdep->private_data;
+
+ switch (cmd) {
+ case SNDRV_FIREWIRE_IOCTL_GET_INFO:
+ return hwdep_get_info(dice, (void __user *)arg);
+ case SNDRV_FIREWIRE_IOCTL_LOCK:
+ return hwdep_lock(dice);
+ case SNDRV_FIREWIRE_IOCTL_UNLOCK:
+ return hwdep_unlock(dice);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static int hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return hwdep_ioctl(hwdep, file, cmd,
+ (unsigned long)compat_ptr(arg));
+}
+#else
+#define hwdep_compat_ioctl NULL
+#endif
+
+int snd_dice_create_hwdep(struct snd_dice *dice)
+{
+ static const struct snd_hwdep_ops ops = {
+ .read = hwdep_read,
+ .release = hwdep_release,
+ .poll = hwdep_poll,
+ .ioctl = hwdep_ioctl,
+ .ioctl_compat = hwdep_compat_ioctl,
+ };
+ struct snd_hwdep *hwdep;
+ int err;
+
+ err = snd_hwdep_new(dice->card, "DICE", 0, &hwdep);
+ if (err < 0)
+ return err;
+ strcpy(hwdep->name, "DICE");
+ hwdep->iface = SNDRV_HWDEP_IFACE_FW_DICE;
+ hwdep->ops = ops;
+ hwdep->private_data = dice;
+ hwdep->exclusive = true;
+
+ return 0;
+}
diff --git a/sound/firewire/dice-interface.h b/sound/firewire/dice/dice-interface.h
index 27b044f84c81..27b044f84c81 100644
--- a/sound/firewire/dice-interface.h
+++ b/sound/firewire/dice/dice-interface.h
diff --git a/sound/firewire/dice/dice-midi.c b/sound/firewire/dice/dice-midi.c
new file mode 100644
index 000000000000..fe43ce791f84
--- /dev/null
+++ b/sound/firewire/dice/dice-midi.c
@@ -0,0 +1,157 @@
+/*
+ * dice_midi.c - a part of driver for Dice based devices
+ *
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+#include "dice.h"
+
+static int midi_open(struct snd_rawmidi_substream *substream)
+{
+ struct snd_dice *dice = substream->rmidi->private_data;
+ int err;
+
+ err = snd_dice_stream_lock_try(dice);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&dice->mutex);
+
+ dice->substreams_counter++;
+ err = snd_dice_stream_start_duplex(dice, 0);
+
+ mutex_unlock(&dice->mutex);
+
+ if (err < 0)
+ snd_dice_stream_lock_release(dice);
+
+ return err;
+}
+
+static int midi_close(struct snd_rawmidi_substream *substream)
+{
+ struct snd_dice *dice = substream->rmidi->private_data;
+
+ mutex_lock(&dice->mutex);
+
+ dice->substreams_counter--;
+ snd_dice_stream_stop_duplex(dice);
+
+ mutex_unlock(&dice->mutex);
+
+ snd_dice_stream_lock_release(dice);
+ return 0;
+}
+
+static void midi_capture_trigger(struct snd_rawmidi_substream *substrm, int up)
+{
+ struct snd_dice *dice = substrm->rmidi->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dice->lock, flags);
+
+ if (up)
+ amdtp_stream_midi_trigger(&dice->tx_stream,
+ substrm->number, substrm);
+ else
+ amdtp_stream_midi_trigger(&dice->tx_stream,
+ substrm->number, NULL);
+
+ spin_unlock_irqrestore(&dice->lock, flags);
+}
+
+static void midi_playback_trigger(struct snd_rawmidi_substream *substrm, int up)
+{
+ struct snd_dice *dice = substrm->rmidi->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dice->lock, flags);
+
+ if (up)
+ amdtp_stream_midi_trigger(&dice->rx_stream,
+ substrm->number, substrm);
+ else
+ amdtp_stream_midi_trigger(&dice->rx_stream,
+ substrm->number, NULL);
+
+ spin_unlock_irqrestore(&dice->lock, flags);
+}
+
+static struct snd_rawmidi_ops capture_ops = {
+ .open = midi_open,
+ .close = midi_close,
+ .trigger = midi_capture_trigger,
+};
+
+static struct snd_rawmidi_ops playback_ops = {
+ .open = midi_open,
+ .close = midi_close,
+ .trigger = midi_playback_trigger,
+};
+
+static void set_midi_substream_names(struct snd_dice *dice,
+ struct snd_rawmidi_str *str)
+{
+ struct snd_rawmidi_substream *subs;
+
+ list_for_each_entry(subs, &str->substreams, list) {
+ snprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d", dice->card->shortname, subs->number + 1);
+ }
+}
+
+int snd_dice_create_midi(struct snd_dice *dice)
+{
+ struct snd_rawmidi *rmidi;
+ struct snd_rawmidi_str *str;
+ unsigned int i, midi_in_ports, midi_out_ports;
+ int err;
+
+ midi_in_ports = midi_out_ports = 0;
+ for (i = 0; i < 3; i++) {
+ midi_in_ports = max(dice->tx_midi_ports[i], midi_in_ports);
+ midi_out_ports = max(dice->rx_midi_ports[i], midi_out_ports);
+ }
+
+ if (midi_in_ports + midi_out_ports == 0)
+ return 0;
+
+ /* create midi ports */
+ err = snd_rawmidi_new(dice->card, dice->card->driver, 0,
+ midi_out_ports, midi_in_ports,
+ &rmidi);
+ if (err < 0)
+ return err;
+
+ snprintf(rmidi->name, sizeof(rmidi->name),
+ "%s MIDI", dice->card->shortname);
+ rmidi->private_data = dice;
+
+ if (midi_in_ports > 0) {
+ rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT;
+
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT,
+ &capture_ops);
+
+ str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT];
+
+ set_midi_substream_names(dice, str);
+ }
+
+ if (midi_out_ports > 0) {
+ rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT;
+
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
+ &playback_ops);
+
+ str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT];
+
+ set_midi_substream_names(dice, str);
+ }
+
+ if ((midi_out_ports > 0) && (midi_in_ports > 0))
+ rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX;
+
+ return 0;
+}
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
new file mode 100644
index 000000000000..f77714511f8b
--- /dev/null
+++ b/sound/firewire/dice/dice-pcm.c
@@ -0,0 +1,422 @@
+/*
+ * dice_pcm.c - a part of driver for DICE based devices
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2014 Takashi Sakamoto <o-takashi@sakamocchi.jp>
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "dice.h"
+
+static int dice_rate_constraint(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_pcm_substream *substream = rule->private;
+ struct snd_dice *dice = substream->private_data;
+
+ const struct snd_interval *c =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *r =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval rates = {
+ .min = UINT_MAX, .max = 0, .integer = 1
+ };
+ unsigned int i, rate, mode, *pcm_channels;
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ pcm_channels = dice->tx_channels;
+ else
+ pcm_channels = dice->rx_channels;
+
+ for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) {
+ rate = snd_dice_rates[i];
+ if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0)
+ continue;
+
+ if (!snd_interval_test(c, pcm_channels[mode]))
+ continue;
+
+ rates.min = min(rates.min, rate);
+ rates.max = max(rates.max, rate);
+ }
+
+ return snd_interval_refine(r, &rates);
+}
+
+static int dice_channels_constraint(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_pcm_substream *substream = rule->private;
+ struct snd_dice *dice = substream->private_data;
+
+ const struct snd_interval *r =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *c =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval channels = {
+ .min = UINT_MAX, .max = 0, .integer = 1
+ };
+ unsigned int i, rate, mode, *pcm_channels;
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ pcm_channels = dice->tx_channels;
+ else
+ pcm_channels = dice->rx_channels;
+
+ for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) {
+ rate = snd_dice_rates[i];
+ if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0)
+ continue;
+
+ if (!snd_interval_test(r, rate))
+ continue;
+
+ channels.min = min(channels.min, pcm_channels[mode]);
+ channels.max = max(channels.max, pcm_channels[mode]);
+ }
+
+ return snd_interval_refine(c, &channels);
+}
+
+static void limit_channels_and_rates(struct snd_dice *dice,
+ struct snd_pcm_runtime *runtime,
+ unsigned int *pcm_channels)
+{
+ struct snd_pcm_hardware *hw = &runtime->hw;
+ unsigned int i, rate, mode;
+
+ hw->channels_min = UINT_MAX;
+ hw->channels_max = 0;
+
+ for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) {
+ rate = snd_dice_rates[i];
+ if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0)
+ continue;
+ hw->rates |= snd_pcm_rate_to_rate_bit(rate);
+
+ if (pcm_channels[mode] == 0)
+ continue;
+ hw->channels_min = min(hw->channels_min, pcm_channels[mode]);
+ hw->channels_max = max(hw->channels_max, pcm_channels[mode]);
+ }
+
+ snd_pcm_limit_hw_rates(runtime);
+}
+
+static void limit_period_and_buffer(struct snd_pcm_hardware *hw)
+{
+ hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */
+ hw->periods_max = UINT_MAX;
+
+ hw->period_bytes_min = 4 * hw->channels_max; /* byte for a frame */
+
+ /* Just to prevent from allocating much pages. */
+ hw->period_bytes_max = hw->period_bytes_min * 2048;
+ hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
+}
+
+static int init_hw_info(struct snd_dice *dice,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_hardware *hw = &runtime->hw;
+ struct amdtp_stream *stream;
+ unsigned int *pcm_channels;
+ int err;
+
+ hw->info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_JOINT_DUPLEX |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER;
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ hw->formats = AMDTP_IN_PCM_FORMAT_BITS;
+ stream = &dice->tx_stream;
+ pcm_channels = dice->tx_channels;
+ } else {
+ hw->formats = AMDTP_OUT_PCM_FORMAT_BITS;
+ stream = &dice->rx_stream;
+ pcm_channels = dice->rx_channels;
+ }
+
+ limit_channels_and_rates(dice, runtime, pcm_channels);
+ limit_period_and_buffer(hw);
+
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ dice_rate_constraint, substream,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (err < 0)
+ goto end;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ dice_channels_constraint, substream,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ goto end;
+
+ err = amdtp_stream_add_pcm_hw_constraints(stream, runtime);
+end:
+ return err;
+}
+
+static int pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+ unsigned int source, rate;
+ bool internal;
+ int err;
+
+ err = snd_dice_stream_lock_try(dice);
+ if (err < 0)
+ goto end;
+
+ err = init_hw_info(dice, substream);
+ if (err < 0)
+ goto err_locked;
+
+ err = snd_dice_transaction_get_clock_source(dice, &source);
+ if (err < 0)
+ goto err_locked;
+ switch (source) {
+ case CLOCK_SOURCE_AES1:
+ case CLOCK_SOURCE_AES2:
+ case CLOCK_SOURCE_AES3:
+ case CLOCK_SOURCE_AES4:
+ case CLOCK_SOURCE_AES_ANY:
+ case CLOCK_SOURCE_ADAT:
+ case CLOCK_SOURCE_TDIF:
+ case CLOCK_SOURCE_WC:
+ internal = false;
+ break;
+ default:
+ internal = true;
+ break;
+ }
+
+ /*
+ * When source of clock is not internal or any PCM streams are running,
+ * available sampling rate is limited at current sampling rate.
+ */
+ if (!internal ||
+ amdtp_stream_pcm_running(&dice->tx_stream) ||
+ amdtp_stream_pcm_running(&dice->rx_stream)) {
+ err = snd_dice_transaction_get_rate(dice, &rate);
+ if (err < 0)
+ goto err_locked;
+ substream->runtime->hw.rate_min = rate;
+ substream->runtime->hw.rate_max = rate;
+ }
+
+ snd_pcm_set_sync(substream);
+end:
+ return err;
+err_locked:
+ snd_dice_stream_lock_release(dice);
+ return err;
+}
+
+static int pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ snd_dice_stream_lock_release(dice);
+
+ return 0;
+}
+
+static int capture_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
+ mutex_lock(&dice->mutex);
+ dice->substreams_counter++;
+ mutex_unlock(&dice->mutex);
+ }
+
+ amdtp_stream_set_pcm_format(&dice->tx_stream,
+ params_format(hw_params));
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+}
+static int playback_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
+ mutex_lock(&dice->mutex);
+ dice->substreams_counter++;
+ mutex_unlock(&dice->mutex);
+ }
+
+ amdtp_stream_set_pcm_format(&dice->rx_stream,
+ params_format(hw_params));
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+}
+
+static int capture_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ mutex_lock(&dice->mutex);
+
+ if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
+ dice->substreams_counter--;
+
+ snd_dice_stream_stop_duplex(dice);
+
+ mutex_unlock(&dice->mutex);
+
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int playback_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ mutex_lock(&dice->mutex);
+
+ if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
+ dice->substreams_counter--;
+
+ snd_dice_stream_stop_duplex(dice);
+
+ mutex_unlock(&dice->mutex);
+
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int capture_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+ int err;
+
+ mutex_lock(&dice->mutex);
+ err = snd_dice_stream_start_duplex(dice, substream->runtime->rate);
+ mutex_unlock(&dice->mutex);
+ if (err >= 0)
+ amdtp_stream_pcm_prepare(&dice->tx_stream);
+
+ return 0;
+}
+static int playback_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+ int err;
+
+ mutex_lock(&dice->mutex);
+ err = snd_dice_stream_start_duplex(dice, substream->runtime->rate);
+ mutex_unlock(&dice->mutex);
+ if (err >= 0)
+ amdtp_stream_pcm_prepare(&dice->rx_stream);
+
+ return err;
+}
+
+static int capture_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ amdtp_stream_pcm_trigger(&dice->tx_stream, substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ amdtp_stream_pcm_trigger(&dice->tx_stream, NULL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+static int playback_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ amdtp_stream_pcm_trigger(&dice->rx_stream, substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ amdtp_stream_pcm_trigger(&dice->rx_stream, NULL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static snd_pcm_uframes_t capture_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ return amdtp_stream_pcm_pointer(&dice->tx_stream);
+}
+static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+
+ return amdtp_stream_pcm_pointer(&dice->rx_stream);
+}
+
+int snd_dice_create_pcm(struct snd_dice *dice)
+{
+ static struct snd_pcm_ops capture_ops = {
+ .open = pcm_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = capture_hw_params,
+ .hw_free = capture_hw_free,
+ .prepare = capture_prepare,
+ .trigger = capture_trigger,
+ .pointer = capture_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+ static struct snd_pcm_ops playback_ops = {
+ .open = pcm_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = playback_hw_params,
+ .hw_free = playback_hw_free,
+ .prepare = playback_prepare,
+ .trigger = playback_trigger,
+ .pointer = playback_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+ struct snd_pcm *pcm;
+ unsigned int i, capture, playback;
+ int err;
+
+ capture = playback = 0;
+ for (i = 0; i < 3; i++) {
+ if (dice->tx_channels[i] > 0)
+ capture = 1;
+ if (dice->rx_channels[i] > 0)
+ playback = 1;
+ }
+
+ err = snd_pcm_new(dice->card, "DICE", 0, playback, capture, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = dice;
+ strcpy(pcm->name, dice->card->shortname);
+
+ if (capture > 0)
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+
+ if (playback > 0)
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
+
+ return 0;
+}
diff --git a/sound/firewire/dice/dice-proc.c b/sound/firewire/dice/dice-proc.c
new file mode 100644
index 000000000000..f5c1d1bced59
--- /dev/null
+++ b/sound/firewire/dice/dice-proc.c
@@ -0,0 +1,252 @@
+/*
+ * dice_proc.c - a part of driver for Dice based devices
+ *
+ * Copyright (c) Clemens Ladisch
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "dice.h"
+
+static int dice_proc_read_mem(struct snd_dice *dice, void *buffer,
+ unsigned int offset_q, unsigned int quadlets)
+{
+ unsigned int i;
+ int err;
+
+ err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+ DICE_PRIVATE_SPACE + 4 * offset_q,
+ buffer, 4 * quadlets, 0);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < quadlets; ++i)
+ be32_to_cpus(&((u32 *)buffer)[i]);
+
+ return 0;
+}
+
+static const char *str_from_array(const char *const strs[], unsigned int count,
+ unsigned int i)
+{
+ if (i < count)
+ return strs[i];
+
+ return "(unknown)";
+}
+
+static void dice_proc_fixup_string(char *s, unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i += 4)
+ cpu_to_le32s((u32 *)(s + i));
+
+ for (i = 0; i < size - 2; ++i) {
+ if (s[i] == '\0')
+ return;
+ if (s[i] == '\\' && s[i + 1] == '\\') {
+ s[i + 2] = '\0';
+ return;
+ }
+ }
+ s[size - 1] = '\0';
+}
+
+static void dice_proc_read(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ static const char *const section_names[5] = {
+ "global", "tx", "rx", "ext_sync", "unused2"
+ };
+ static const char *const clock_sources[] = {
+ "aes1", "aes2", "aes3", "aes4", "aes", "adat", "tdif",
+ "wc", "arx1", "arx2", "arx3", "arx4", "internal"
+ };
+ static const char *const rates[] = {
+ "32000", "44100", "48000", "88200", "96000", "176400", "192000",
+ "any low", "any mid", "any high", "none"
+ };
+ struct snd_dice *dice = entry->private_data;
+ u32 sections[ARRAY_SIZE(section_names) * 2];
+ struct {
+ u32 number;
+ u32 size;
+ } tx_rx_header;
+ union {
+ struct {
+ u32 owner_hi, owner_lo;
+ u32 notification;
+ char nick_name[NICK_NAME_SIZE];
+ u32 clock_select;
+ u32 enable;
+ u32 status;
+ u32 extended_status;
+ u32 sample_rate;
+ u32 version;
+ u32 clock_caps;
+ char clock_source_names[CLOCK_SOURCE_NAMES_SIZE];
+ } global;
+ struct {
+ u32 iso;
+ u32 number_audio;
+ u32 number_midi;
+ u32 speed;
+ char names[TX_NAMES_SIZE];
+ u32 ac3_caps;
+ u32 ac3_enable;
+ } tx;
+ struct {
+ u32 iso;
+ u32 seq_start;
+ u32 number_audio;
+ u32 number_midi;
+ char names[RX_NAMES_SIZE];
+ u32 ac3_caps;
+ u32 ac3_enable;
+ } rx;
+ struct {
+ u32 clock_source;
+ u32 locked;
+ u32 rate;
+ u32 adat_user_data;
+ } ext_sync;
+ } buf;
+ unsigned int quadlets, stream, i;
+
+ if (dice_proc_read_mem(dice, sections, 0, ARRAY_SIZE(sections)) < 0)
+ return;
+ snd_iprintf(buffer, "sections:\n");
+ for (i = 0; i < ARRAY_SIZE(section_names); ++i)
+ snd_iprintf(buffer, " %s: offset %u, size %u\n",
+ section_names[i],
+ sections[i * 2], sections[i * 2 + 1]);
+
+ quadlets = min_t(u32, sections[1], sizeof(buf.global) / 4);
+ if (dice_proc_read_mem(dice, &buf.global, sections[0], quadlets) < 0)
+ return;
+ snd_iprintf(buffer, "global:\n");
+ snd_iprintf(buffer, " owner: %04x:%04x%08x\n",
+ buf.global.owner_hi >> 16,
+ buf.global.owner_hi & 0xffff, buf.global.owner_lo);
+ snd_iprintf(buffer, " notification: %08x\n", buf.global.notification);
+ dice_proc_fixup_string(buf.global.nick_name, NICK_NAME_SIZE);
+ snd_iprintf(buffer, " nick name: %s\n", buf.global.nick_name);
+ snd_iprintf(buffer, " clock select: %s %s\n",
+ str_from_array(clock_sources, ARRAY_SIZE(clock_sources),
+ buf.global.clock_select & CLOCK_SOURCE_MASK),
+ str_from_array(rates, ARRAY_SIZE(rates),
+ (buf.global.clock_select & CLOCK_RATE_MASK)
+ >> CLOCK_RATE_SHIFT));
+ snd_iprintf(buffer, " enable: %u\n", buf.global.enable);
+ snd_iprintf(buffer, " status: %slocked %s\n",
+ buf.global.status & STATUS_SOURCE_LOCKED ? "" : "un",
+ str_from_array(rates, ARRAY_SIZE(rates),
+ (buf.global.status &
+ STATUS_NOMINAL_RATE_MASK)
+ >> CLOCK_RATE_SHIFT));
+ snd_iprintf(buffer, " ext status: %08x\n", buf.global.extended_status);
+ snd_iprintf(buffer, " sample rate: %u\n", buf.global.sample_rate);
+ snd_iprintf(buffer, " version: %u.%u.%u.%u\n",
+ (buf.global.version >> 24) & 0xff,
+ (buf.global.version >> 16) & 0xff,
+ (buf.global.version >> 8) & 0xff,
+ (buf.global.version >> 0) & 0xff);
+ if (quadlets >= 90) {
+ snd_iprintf(buffer, " clock caps:");
+ for (i = 0; i <= 6; ++i)
+ if (buf.global.clock_caps & (1 << i))
+ snd_iprintf(buffer, " %s", rates[i]);
+ for (i = 0; i <= 12; ++i)
+ if (buf.global.clock_caps & (1 << (16 + i)))
+ snd_iprintf(buffer, " %s", clock_sources[i]);
+ snd_iprintf(buffer, "\n");
+ dice_proc_fixup_string(buf.global.clock_source_names,
+ CLOCK_SOURCE_NAMES_SIZE);
+ snd_iprintf(buffer, " clock source names: %s\n",
+ buf.global.clock_source_names);
+ }
+
+ if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0)
+ return;
+ quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx) / 4);
+ for (stream = 0; stream < tx_rx_header.number; ++stream) {
+ if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 +
+ stream * tx_rx_header.size,
+ quadlets) < 0)
+ break;
+ snd_iprintf(buffer, "tx %u:\n", stream);
+ snd_iprintf(buffer, " iso channel: %d\n", (int)buf.tx.iso);
+ snd_iprintf(buffer, " audio channels: %u\n",
+ buf.tx.number_audio);
+ snd_iprintf(buffer, " midi ports: %u\n", buf.tx.number_midi);
+ snd_iprintf(buffer, " speed: S%u\n", 100u << buf.tx.speed);
+ if (quadlets >= 68) {
+ dice_proc_fixup_string(buf.tx.names, TX_NAMES_SIZE);
+ snd_iprintf(buffer, " names: %s\n", buf.tx.names);
+ }
+ if (quadlets >= 70) {
+ snd_iprintf(buffer, " ac3 caps: %08x\n",
+ buf.tx.ac3_caps);
+ snd_iprintf(buffer, " ac3 enable: %08x\n",
+ buf.tx.ac3_enable);
+ }
+ }
+
+ if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0)
+ return;
+ quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx) / 4);
+ for (stream = 0; stream < tx_rx_header.number; ++stream) {
+ if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 +
+ stream * tx_rx_header.size,
+ quadlets) < 0)
+ break;
+ snd_iprintf(buffer, "rx %u:\n", stream);
+ snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso);
+ snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start);
+ snd_iprintf(buffer, " audio channels: %u\n",
+ buf.rx.number_audio);
+ snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi);
+ if (quadlets >= 68) {
+ dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE);
+ snd_iprintf(buffer, " names: %s\n", buf.rx.names);
+ }
+ if (quadlets >= 70) {
+ snd_iprintf(buffer, " ac3 caps: %08x\n",
+ buf.rx.ac3_caps);
+ snd_iprintf(buffer, " ac3 enable: %08x\n",
+ buf.rx.ac3_enable);
+ }
+ }
+
+ quadlets = min_t(u32, sections[7], sizeof(buf.ext_sync) / 4);
+ if (quadlets >= 4) {
+ if (dice_proc_read_mem(dice, &buf.ext_sync,
+ sections[6], 4) < 0)
+ return;
+ snd_iprintf(buffer, "ext status:\n");
+ snd_iprintf(buffer, " clock source: %s\n",
+ str_from_array(clock_sources,
+ ARRAY_SIZE(clock_sources),
+ buf.ext_sync.clock_source));
+ snd_iprintf(buffer, " locked: %u\n", buf.ext_sync.locked);
+ snd_iprintf(buffer, " rate: %s\n",
+ str_from_array(rates, ARRAY_SIZE(rates),
+ buf.ext_sync.rate));
+ snd_iprintf(buffer, " adat user data: ");
+ if (buf.ext_sync.adat_user_data & ADAT_USER_DATA_NO_DATA)
+ snd_iprintf(buffer, "-\n");
+ else
+ snd_iprintf(buffer, "%x\n",
+ buf.ext_sync.adat_user_data);
+ }
+}
+
+void snd_dice_create_proc(struct snd_dice *dice)
+{
+ struct snd_info_entry *entry;
+
+ if (!snd_card_proc_new(dice->card, "dice", &entry))
+ snd_info_set_text_ops(entry, dice, dice_proc_read);
+}
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
new file mode 100644
index 000000000000..fa9cf761b610
--- /dev/null
+++ b/sound/firewire/dice/dice-stream.c
@@ -0,0 +1,407 @@
+/*
+ * dice_stream.c - a part of driver for DICE based devices
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2014 Takashi Sakamoto <o-takashi@sakamocchi.jp>
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "dice.h"
+
+#define CALLBACK_TIMEOUT 200
+
+const unsigned int snd_dice_rates[SND_DICE_RATES_COUNT] = {
+ /* mode 0 */
+ [0] = 32000,
+ [1] = 44100,
+ [2] = 48000,
+ /* mode 1 */
+ [3] = 88200,
+ [4] = 96000,
+ /* mode 2 */
+ [5] = 176400,
+ [6] = 192000,
+};
+
+int snd_dice_stream_get_rate_mode(struct snd_dice *dice, unsigned int rate,
+ unsigned int *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(snd_dice_rates); i++) {
+ if (!(dice->clock_caps & BIT(i)))
+ continue;
+ if (snd_dice_rates[i] != rate)
+ continue;
+
+ *mode = (i - 1) / 2;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void release_resources(struct snd_dice *dice,
+ struct fw_iso_resources *resources)
+{
+ unsigned int channel;
+
+ /* Reset channel number */
+ channel = cpu_to_be32((u32)-1);
+ if (resources == &dice->tx_resources)
+ snd_dice_transaction_write_tx(dice, TX_ISOCHRONOUS,
+ &channel, 4);
+ else
+ snd_dice_transaction_write_rx(dice, RX_ISOCHRONOUS,
+ &channel, 4);
+
+ fw_iso_resources_free(resources);
+}
+
+static int keep_resources(struct snd_dice *dice,
+ struct fw_iso_resources *resources,
+ unsigned int max_payload_bytes)
+{
+ unsigned int channel;
+ int err;
+
+ err = fw_iso_resources_allocate(resources, max_payload_bytes,
+ fw_parent_device(dice->unit)->max_speed);
+ if (err < 0)
+ goto end;
+
+ /* Set channel number */
+ channel = cpu_to_be32(resources->channel);
+ if (resources == &dice->tx_resources)
+ err = snd_dice_transaction_write_tx(dice, TX_ISOCHRONOUS,
+ &channel, 4);
+ else
+ err = snd_dice_transaction_write_rx(dice, RX_ISOCHRONOUS,
+ &channel, 4);
+ if (err < 0)
+ release_resources(dice, resources);
+end:
+ return err;
+}
+
+static void stop_stream(struct snd_dice *dice, struct amdtp_stream *stream)
+{
+ amdtp_stream_pcm_abort(stream);
+ amdtp_stream_stop(stream);
+
+ if (stream == &dice->tx_stream)
+ release_resources(dice, &dice->tx_resources);
+ else
+ release_resources(dice, &dice->rx_resources);
+}
+
+static int start_stream(struct snd_dice *dice, struct amdtp_stream *stream,
+ unsigned int rate)
+{
+ struct fw_iso_resources *resources;
+ unsigned int i, mode, pcm_chs, midi_ports;
+ int err;
+
+ err = snd_dice_stream_get_rate_mode(dice, rate, &mode);
+ if (err < 0)
+ goto end;
+ if (stream == &dice->tx_stream) {
+ resources = &dice->tx_resources;
+ pcm_chs = dice->tx_channels[mode];
+ midi_ports = dice->tx_midi_ports[mode];
+ } else {
+ resources = &dice->rx_resources;
+ pcm_chs = dice->rx_channels[mode];
+ midi_ports = dice->rx_midi_ports[mode];
+ }
+
+ /*
+ * At 176.4/192.0 kHz, Dice has a quirk to transfer two PCM frames in
+ * one data block of AMDTP packet. Thus sampling transfer frequency is
+ * a half of PCM sampling frequency, i.e. PCM frames at 192.0 kHz are
+ * transferred on AMDTP packets at 96 kHz. Two successive samples of a
+ * channel are stored consecutively in the packet. This quirk is called
+ * as 'Dual Wire'.
+ * For this quirk, blocking mode is required and PCM buffer size should
+ * be aligned to SYT_INTERVAL.
+ */
+ if (mode > 1) {
+ rate /= 2;
+ pcm_chs *= 2;
+ stream->double_pcm_frames = true;
+ } else {
+ stream->double_pcm_frames = false;
+ }
+
+ amdtp_stream_set_parameters(stream, rate, pcm_chs, midi_ports);
+ if (mode > 1) {
+ pcm_chs /= 2;
+
+ for (i = 0; i < pcm_chs; i++) {
+ stream->pcm_positions[i] = i * 2;
+ stream->pcm_positions[i + pcm_chs] = i * 2 + 1;
+ }
+ }
+
+ err = keep_resources(dice, resources,
+ amdtp_stream_get_max_payload(stream));
+ if (err < 0) {
+ dev_err(&dice->unit->device,
+ "fail to keep isochronous resources\n");
+ goto end;
+ }
+
+ err = amdtp_stream_start(stream, resources->channel,
+ fw_parent_device(dice->unit)->max_speed);
+ if (err < 0)
+ release_resources(dice, resources);
+end:
+ return err;
+}
+
+static int get_sync_mode(struct snd_dice *dice, enum cip_flags *sync_mode)
+{
+ u32 source;
+ int err;
+
+ err = snd_dice_transaction_get_clock_source(dice, &source);
+ if (err < 0)
+ goto end;
+
+ switch (source) {
+ /* So-called 'SYT Match' modes, sync_to_syt value of packets received */
+ case CLOCK_SOURCE_ARX4: /* in 4th stream */
+ case CLOCK_SOURCE_ARX3: /* in 3rd stream */
+ case CLOCK_SOURCE_ARX2: /* in 2nd stream */
+ err = -ENOSYS;
+ break;
+ case CLOCK_SOURCE_ARX1: /* in 1st stream, which this driver uses */
+ *sync_mode = 0;
+ break;
+ default:
+ *sync_mode = CIP_SYNC_TO_DEVICE;
+ break;
+ }
+end:
+ return err;
+}
+
+int snd_dice_stream_start_duplex(struct snd_dice *dice, unsigned int rate)
+{
+ struct amdtp_stream *master, *slave;
+ unsigned int curr_rate;
+ enum cip_flags sync_mode;
+ int err = 0;
+
+ if (dice->substreams_counter == 0)
+ goto end;
+
+ err = get_sync_mode(dice, &sync_mode);
+ if (err < 0)
+ goto end;
+ if (sync_mode == CIP_SYNC_TO_DEVICE) {
+ master = &dice->tx_stream;
+ slave = &dice->rx_stream;
+ } else {
+ master = &dice->rx_stream;
+ slave = &dice->tx_stream;
+ }
+
+ /* Some packet queueing errors. */
+ if (amdtp_streaming_error(master) || amdtp_streaming_error(slave))
+ stop_stream(dice, master);
+
+ /* Stop stream if rate is different. */
+ err = snd_dice_transaction_get_rate(dice, &curr_rate);
+ if (err < 0) {
+ dev_err(&dice->unit->device,
+ "fail to get sampling rate\n");
+ goto end;
+ }
+ if (rate == 0)
+ rate = curr_rate;
+ if (rate != curr_rate)
+ stop_stream(dice, master);
+
+ if (!amdtp_stream_running(master)) {
+ stop_stream(dice, slave);
+ snd_dice_transaction_clear_enable(dice);
+
+ amdtp_stream_set_sync(sync_mode, master, slave);
+
+ err = snd_dice_transaction_set_rate(dice, rate);
+ if (err < 0) {
+ dev_err(&dice->unit->device,
+ "fail to set sampling rate\n");
+ goto end;
+ }
+
+ /* Start both streams. */
+ err = start_stream(dice, master, rate);
+ if (err < 0) {
+ dev_err(&dice->unit->device,
+ "fail to start AMDTP master stream\n");
+ goto end;
+ }
+ err = start_stream(dice, slave, rate);
+ if (err < 0) {
+ dev_err(&dice->unit->device,
+ "fail to start AMDTP slave stream\n");
+ stop_stream(dice, master);
+ goto end;
+ }
+ err = snd_dice_transaction_set_enable(dice);
+ if (err < 0) {
+ dev_err(&dice->unit->device,
+ "fail to enable interface\n");
+ stop_stream(dice, master);
+ stop_stream(dice, slave);
+ goto end;
+ }
+
+ /* Wait first callbacks */
+ if (!amdtp_stream_wait_callback(master, CALLBACK_TIMEOUT) ||
+ !amdtp_stream_wait_callback(slave, CALLBACK_TIMEOUT)) {
+ snd_dice_transaction_clear_enable(dice);
+ stop_stream(dice, master);
+ stop_stream(dice, slave);
+ err = -ETIMEDOUT;
+ }
+ }
+end:
+ return err;
+}
+
+void snd_dice_stream_stop_duplex(struct snd_dice *dice)
+{
+ if (dice->substreams_counter > 0)
+ return;
+
+ snd_dice_transaction_clear_enable(dice);
+
+ stop_stream(dice, &dice->tx_stream);
+ stop_stream(dice, &dice->rx_stream);
+}
+
+static int init_stream(struct snd_dice *dice, struct amdtp_stream *stream)
+{
+ int err;
+ struct fw_iso_resources *resources;
+ enum amdtp_stream_direction dir;
+
+ if (stream == &dice->tx_stream) {
+ resources = &dice->tx_resources;
+ dir = AMDTP_IN_STREAM;
+ } else {
+ resources = &dice->rx_resources;
+ dir = AMDTP_OUT_STREAM;
+ }
+
+ err = fw_iso_resources_init(resources, dice->unit);
+ if (err < 0)
+ goto end;
+ resources->channels_mask = 0x00000000ffffffffuLL;
+
+ err = amdtp_stream_init(stream, dice->unit, dir, CIP_BLOCKING);
+ if (err < 0) {
+ amdtp_stream_destroy(stream);
+ fw_iso_resources_destroy(resources);
+ }
+end:
+ return err;
+}
+
+static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream)
+{
+ amdtp_stream_destroy(stream);
+
+ if (stream == &dice->tx_stream)
+ fw_iso_resources_destroy(&dice->tx_resources);
+ else
+ fw_iso_resources_destroy(&dice->rx_resources);
+}
+
+int snd_dice_stream_init_duplex(struct snd_dice *dice)
+{
+ int err;
+
+ dice->substreams_counter = 0;
+
+ err = init_stream(dice, &dice->tx_stream);
+ if (err < 0)
+ goto end;
+
+ err = init_stream(dice, &dice->rx_stream);
+end:
+ return err;
+}
+
+void snd_dice_stream_destroy_duplex(struct snd_dice *dice)
+{
+ snd_dice_transaction_clear_enable(dice);
+
+ stop_stream(dice, &dice->tx_stream);
+ destroy_stream(dice, &dice->tx_stream);
+
+ stop_stream(dice, &dice->rx_stream);
+ destroy_stream(dice, &dice->rx_stream);
+
+ dice->substreams_counter = 0;
+}
+
+void snd_dice_stream_update_duplex(struct snd_dice *dice)
+{
+ /*
+ * On a bus reset, the DICE firmware disables streaming and then goes
+ * off contemplating its own navel for hundreds of milliseconds before
+ * it can react to any of our attempts to reenable streaming. This
+ * means that we lose synchronization anyway, so we force our streams
+ * to stop so that the application can restart them in an orderly
+ * manner.
+ */
+ dice->global_enabled = false;
+
+ stop_stream(dice, &dice->rx_stream);
+ stop_stream(dice, &dice->tx_stream);
+
+ fw_iso_resources_update(&dice->rx_resources);
+ fw_iso_resources_update(&dice->tx_resources);
+}
+
+static void dice_lock_changed(struct snd_dice *dice)
+{
+ dice->dev_lock_changed = true;
+ wake_up(&dice->hwdep_wait);
+}
+
+int snd_dice_stream_lock_try(struct snd_dice *dice)
+{
+ int err;
+
+ spin_lock_irq(&dice->lock);
+
+ if (dice->dev_lock_count < 0) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (dice->dev_lock_count++ == 0)
+ dice_lock_changed(dice);
+ err = 0;
+out:
+ spin_unlock_irq(&dice->lock);
+ return err;
+}
+
+void snd_dice_stream_lock_release(struct snd_dice *dice)
+{
+ spin_lock_irq(&dice->lock);
+
+ if (WARN_ON(dice->dev_lock_count <= 0))
+ goto out;
+
+ if (--dice->dev_lock_count == 0)
+ dice_lock_changed(dice);
+out:
+ spin_unlock_irq(&dice->lock);
+}
diff --git a/sound/firewire/dice/dice-transaction.c b/sound/firewire/dice/dice-transaction.c
new file mode 100644
index 000000000000..aee746187665
--- /dev/null
+++ b/sound/firewire/dice/dice-transaction.c
@@ -0,0 +1,382 @@
+/*
+ * dice_transaction.c - a part of driver for Dice based devices
+ *
+ * Copyright (c) Clemens Ladisch
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "dice.h"
+
+#define NOTIFICATION_TIMEOUT_MS 100
+
+static u64 get_subaddr(struct snd_dice *dice, enum snd_dice_addr_type type,
+ u64 offset)
+{
+ switch (type) {
+ case SND_DICE_ADDR_TYPE_TX:
+ offset += dice->tx_offset;
+ break;
+ case SND_DICE_ADDR_TYPE_RX:
+ offset += dice->rx_offset;
+ break;
+ case SND_DICE_ADDR_TYPE_SYNC:
+ offset += dice->sync_offset;
+ break;
+ case SND_DICE_ADDR_TYPE_RSRV:
+ offset += dice->rsrv_offset;
+ break;
+ case SND_DICE_ADDR_TYPE_GLOBAL:
+ default:
+ offset += dice->global_offset;
+ break;
+ }
+ offset += DICE_PRIVATE_SPACE;
+ return offset;
+}
+
+int snd_dice_transaction_write(struct snd_dice *dice,
+ enum snd_dice_addr_type type,
+ unsigned int offset, void *buf, unsigned int len)
+{
+ return snd_fw_transaction(dice->unit,
+ (len == 4) ? TCODE_WRITE_QUADLET_REQUEST :
+ TCODE_WRITE_BLOCK_REQUEST,
+ get_subaddr(dice, type, offset), buf, len, 0);
+}
+
+int snd_dice_transaction_read(struct snd_dice *dice,
+ enum snd_dice_addr_type type, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_fw_transaction(dice->unit,
+ (len == 4) ? TCODE_READ_QUADLET_REQUEST :
+ TCODE_READ_BLOCK_REQUEST,
+ get_subaddr(dice, type, offset), buf, len, 0);
+}
+
+static unsigned int get_clock_info(struct snd_dice *dice, __be32 *info)
+{
+ return snd_dice_transaction_read_global(dice, GLOBAL_CLOCK_SELECT,
+ info, 4);
+}
+
+static int set_clock_info(struct snd_dice *dice,
+ unsigned int rate, unsigned int source)
+{
+ unsigned int retries = 3;
+ unsigned int i;
+ __be32 info;
+ u32 mask;
+ u32 clock;
+ int err;
+retry:
+ err = get_clock_info(dice, &info);
+ if (err < 0)
+ goto end;
+
+ clock = be32_to_cpu(info);
+ if (source != UINT_MAX) {
+ mask = CLOCK_SOURCE_MASK;
+ clock &= ~mask;
+ clock |= source;
+ }
+ if (rate != UINT_MAX) {
+ for (i = 0; i < ARRAY_SIZE(snd_dice_rates); i++) {
+ if (snd_dice_rates[i] == rate)
+ break;
+ }
+ if (i == ARRAY_SIZE(snd_dice_rates)) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ mask = CLOCK_RATE_MASK;
+ clock &= ~mask;
+ clock |= i << CLOCK_RATE_SHIFT;
+ }
+ info = cpu_to_be32(clock);
+
+ if (completion_done(&dice->clock_accepted))
+ reinit_completion(&dice->clock_accepted);
+
+ err = snd_dice_transaction_write_global(dice, GLOBAL_CLOCK_SELECT,
+ &info, 4);
+ if (err < 0)
+ goto end;
+
+ /* Timeout means it's invalid request, probably bus reset occurred. */
+ if (wait_for_completion_timeout(&dice->clock_accepted,
+ msecs_to_jiffies(NOTIFICATION_TIMEOUT_MS)) == 0) {
+ if (retries-- == 0) {
+ err = -ETIMEDOUT;
+ goto end;
+ }
+
+ err = snd_dice_transaction_reinit(dice);
+ if (err < 0)
+ goto end;
+
+ msleep(500); /* arbitrary */
+ goto retry;
+ }
+end:
+ return err;
+}
+
+int snd_dice_transaction_get_clock_source(struct snd_dice *dice,
+ unsigned int *source)
+{
+ __be32 info;
+ int err;
+
+ err = get_clock_info(dice, &info);
+ if (err >= 0)
+ *source = be32_to_cpu(info) & CLOCK_SOURCE_MASK;
+
+ return err;
+}
+
+int snd_dice_transaction_get_rate(struct snd_dice *dice, unsigned int *rate)
+{
+ __be32 info;
+ unsigned int index;
+ int err;
+
+ err = get_clock_info(dice, &info);
+ if (err < 0)
+ goto end;
+
+ index = (be32_to_cpu(info) & CLOCK_RATE_MASK) >> CLOCK_RATE_SHIFT;
+ if (index >= SND_DICE_RATES_COUNT) {
+ err = -ENOSYS;
+ goto end;
+ }
+
+ *rate = snd_dice_rates[index];
+end:
+ return err;
+}
+int snd_dice_transaction_set_rate(struct snd_dice *dice, unsigned int rate)
+{
+ return set_clock_info(dice, rate, UINT_MAX);
+}
+
+int snd_dice_transaction_set_enable(struct snd_dice *dice)
+{
+ __be32 value;
+ int err = 0;
+
+ if (dice->global_enabled)
+ goto end;
+
+ value = cpu_to_be32(1);
+ err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+ get_subaddr(dice, SND_DICE_ADDR_TYPE_GLOBAL,
+ GLOBAL_ENABLE),
+ &value, 4,
+ FW_FIXED_GENERATION | dice->owner_generation);
+ if (err < 0)
+ goto end;
+
+ dice->global_enabled = true;
+end:
+ return err;
+}
+
+void snd_dice_transaction_clear_enable(struct snd_dice *dice)
+{
+ __be32 value;
+
+ value = 0;
+ snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+ get_subaddr(dice, SND_DICE_ADDR_TYPE_GLOBAL,
+ GLOBAL_ENABLE),
+ &value, 4, FW_QUIET |
+ FW_FIXED_GENERATION | dice->owner_generation);
+
+ dice->global_enabled = false;
+}
+
+static void dice_notification(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation, unsigned long long offset,
+ void *data, size_t length, void *callback_data)
+{
+ struct snd_dice *dice = callback_data;
+ u32 bits;
+ unsigned long flags;
+
+ if (tcode != TCODE_WRITE_QUADLET_REQUEST) {
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+ return;
+ }
+ if ((offset & 3) != 0) {
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+
+ bits = be32_to_cpup(data);
+
+ spin_lock_irqsave(&dice->lock, flags);
+ dice->notification_bits |= bits;
+ spin_unlock_irqrestore(&dice->lock, flags);
+
+ fw_send_response(card, request, RCODE_COMPLETE);
+
+ if (bits & NOTIFY_CLOCK_ACCEPTED)
+ complete(&dice->clock_accepted);
+ wake_up(&dice->hwdep_wait);
+}
+
+static int register_notification_address(struct snd_dice *dice, bool retry)
+{
+ struct fw_device *device = fw_parent_device(dice->unit);
+ __be64 *buffer;
+ unsigned int retries;
+ int err;
+
+ retries = (retry) ? 3 : 0;
+
+ buffer = kmalloc(2 * 8, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ for (;;) {
+ buffer[0] = cpu_to_be64(OWNER_NO_OWNER);
+ buffer[1] = cpu_to_be64(
+ ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
+ dice->notification_handler.offset);
+
+ dice->owner_generation = device->generation;
+ smp_rmb(); /* node_id vs. generation */
+ err = snd_fw_transaction(dice->unit, TCODE_LOCK_COMPARE_SWAP,
+ get_subaddr(dice,
+ SND_DICE_ADDR_TYPE_GLOBAL,
+ GLOBAL_OWNER),
+ buffer, 2 * 8,
+ FW_FIXED_GENERATION |
+ dice->owner_generation);
+ if (err == 0) {
+ /* success */
+ if (buffer[0] == cpu_to_be64(OWNER_NO_OWNER))
+ break;
+ /* The address seems to be already registered. */
+ if (buffer[0] == buffer[1])
+ break;
+
+ dev_err(&dice->unit->device,
+ "device is already in use\n");
+ err = -EBUSY;
+ }
+ if (err != -EAGAIN || retries-- > 0)
+ break;
+
+ msleep(20);
+ }
+
+ kfree(buffer);
+
+ if (err < 0)
+ dice->owner_generation = -1;
+
+ return err;
+}
+
+static void unregister_notification_address(struct snd_dice *dice)
+{
+ struct fw_device *device = fw_parent_device(dice->unit);
+ __be64 *buffer;
+
+ buffer = kmalloc(2 * 8, GFP_KERNEL);
+ if (buffer == NULL)
+ return;
+
+ buffer[0] = cpu_to_be64(
+ ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
+ dice->notification_handler.offset);
+ buffer[1] = cpu_to_be64(OWNER_NO_OWNER);
+ snd_fw_transaction(dice->unit, TCODE_LOCK_COMPARE_SWAP,
+ get_subaddr(dice, SND_DICE_ADDR_TYPE_GLOBAL,
+ GLOBAL_OWNER),
+ buffer, 2 * 8, FW_QUIET |
+ FW_FIXED_GENERATION | dice->owner_generation);
+
+ kfree(buffer);
+
+ dice->owner_generation = -1;
+}
+
+void snd_dice_transaction_destroy(struct snd_dice *dice)
+{
+ struct fw_address_handler *handler = &dice->notification_handler;
+
+ if (handler->callback_data == NULL)
+ return;
+
+ unregister_notification_address(dice);
+
+ fw_core_remove_address_handler(handler);
+ handler->callback_data = NULL;
+}
+
+int snd_dice_transaction_reinit(struct snd_dice *dice)
+{
+ struct fw_address_handler *handler = &dice->notification_handler;
+
+ if (handler->callback_data == NULL)
+ return -EINVAL;
+
+ return register_notification_address(dice, false);
+}
+
+int snd_dice_transaction_init(struct snd_dice *dice)
+{
+ struct fw_address_handler *handler = &dice->notification_handler;
+ __be32 *pointers;
+ int err;
+
+ /* Use the same way which dice_interface_check() does. */
+ pointers = kmalloc(sizeof(__be32) * 10, GFP_KERNEL);
+ if (pointers == NULL)
+ return -ENOMEM;
+
+ /* Get offsets for sub-addresses */
+ err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+ DICE_PRIVATE_SPACE,
+ pointers, sizeof(__be32) * 10, 0);
+ if (err < 0)
+ goto end;
+
+ /* Allocation callback in address space over host controller */
+ handler->length = 4;
+ handler->address_callback = dice_notification;
+ handler->callback_data = dice;
+ err = fw_core_add_address_handler(handler, &fw_high_memory_region);
+ if (err < 0) {
+ handler->callback_data = NULL;
+ goto end;
+ }
+
+ /* Register the address space */
+ err = register_notification_address(dice, true);
+ if (err < 0) {
+ fw_core_remove_address_handler(handler);
+ handler->callback_data = NULL;
+ goto end;
+ }
+
+ dice->global_offset = be32_to_cpu(pointers[0]) * 4;
+ dice->tx_offset = be32_to_cpu(pointers[2]) * 4;
+ dice->rx_offset = be32_to_cpu(pointers[4]) * 4;
+ dice->sync_offset = be32_to_cpu(pointers[6]) * 4;
+ dice->rsrv_offset = be32_to_cpu(pointers[8]) * 4;
+
+ /* Set up later. */
+ if (be32_to_cpu(pointers[1]) * 4 >= GLOBAL_CLOCK_CAPABILITIES + 4)
+ dice->clock_caps = 1;
+end:
+ kfree(pointers);
+ return err;
+}
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
new file mode 100644
index 000000000000..90d8f40ff727
--- /dev/null
+++ b/sound/firewire/dice/dice.c
@@ -0,0 +1,361 @@
+/*
+ * TC Applied Technologies Digital Interface Communications Engine driver
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "dice.h"
+
+MODULE_DESCRIPTION("DICE driver");
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_LICENSE("GPL v2");
+
+#define OUI_WEISS 0x001c6a
+
+#define DICE_CATEGORY_ID 0x04
+#define WEISS_CATEGORY_ID 0x00
+
+static int dice_interface_check(struct fw_unit *unit)
+{
+ static const int min_values[10] = {
+ 10, 0x64 / 4,
+ 10, 0x18 / 4,
+ 10, 0x18 / 4,
+ 0, 0,
+ 0, 0,
+ };
+ struct fw_device *device = fw_parent_device(unit);
+ struct fw_csr_iterator it;
+ int key, val, vendor = -1, model = -1, err;
+ unsigned int category, i;
+ __be32 *pointers, value;
+ __be32 version;
+
+ pointers = kmalloc_array(ARRAY_SIZE(min_values), sizeof(__be32),
+ GFP_KERNEL);
+ if (pointers == NULL)
+ return -ENOMEM;
+
+ /*
+ * Check that GUID and unit directory are constructed according to DICE
+ * rules, i.e., that the specifier ID is the GUID's OUI, and that the
+ * GUID chip ID consists of the 8-bit category ID, the 10-bit product
+ * ID, and a 22-bit serial number.
+ */
+ fw_csr_iterator_init(&it, unit->directory);
+ while (fw_csr_iterator_next(&it, &key, &val)) {
+ switch (key) {
+ case CSR_SPECIFIER_ID:
+ vendor = val;
+ break;
+ case CSR_MODEL:
+ model = val;
+ break;
+ }
+ }
+ if (vendor == OUI_WEISS)
+ category = WEISS_CATEGORY_ID;
+ else
+ category = DICE_CATEGORY_ID;
+ if (device->config_rom[3] != ((vendor << 8) | category) ||
+ device->config_rom[4] >> 22 != model) {
+ err = -ENODEV;
+ goto end;
+ }
+
+ /*
+ * Check that the sub address spaces exist and are located inside the
+ * private address space. The minimum values are chosen so that all
+ * minimally required registers are included.
+ */
+ err = snd_fw_transaction(unit, TCODE_READ_BLOCK_REQUEST,
+ DICE_PRIVATE_SPACE, pointers,
+ sizeof(__be32) * ARRAY_SIZE(min_values), 0);
+ if (err < 0) {
+ err = -ENODEV;
+ goto end;
+ }
+ for (i = 0; i < ARRAY_SIZE(min_values); ++i) {
+ value = be32_to_cpu(pointers[i]);
+ if (value < min_values[i] || value >= 0x40000) {
+ err = -ENODEV;
+ goto end;
+ }
+ }
+
+ /*
+ * Check that the implemented DICE driver specification major version
+ * number matches.
+ */
+ err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
+ DICE_PRIVATE_SPACE +
+ be32_to_cpu(pointers[0]) * 4 + GLOBAL_VERSION,
+ &version, 4, 0);
+ if (err < 0) {
+ err = -ENODEV;
+ goto end;
+ }
+ if ((version & cpu_to_be32(0xff000000)) != cpu_to_be32(0x01000000)) {
+ dev_err(&unit->device,
+ "unknown DICE version: 0x%08x\n", be32_to_cpu(version));
+ err = -ENODEV;
+ goto end;
+ }
+end:
+ return err;
+}
+
+static int highest_supported_mode_rate(struct snd_dice *dice,
+ unsigned int mode, unsigned int *rate)
+{
+ unsigned int i, m;
+
+ for (i = ARRAY_SIZE(snd_dice_rates); i > 0; i--) {
+ *rate = snd_dice_rates[i - 1];
+ if (snd_dice_stream_get_rate_mode(dice, *rate, &m) < 0)
+ continue;
+ if (mode == m)
+ break;
+ }
+ if (i == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dice_read_mode_params(struct snd_dice *dice, unsigned int mode)
+{
+ __be32 values[2];
+ unsigned int rate;
+ int err;
+
+ if (highest_supported_mode_rate(dice, mode, &rate) < 0) {
+ dice->tx_channels[mode] = 0;
+ dice->tx_midi_ports[mode] = 0;
+ dice->rx_channels[mode] = 0;
+ dice->rx_midi_ports[mode] = 0;
+ return 0;
+ }
+
+ err = snd_dice_transaction_set_rate(dice, rate);
+ if (err < 0)
+ return err;
+
+ err = snd_dice_transaction_read_tx(dice, TX_NUMBER_AUDIO,
+ values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ dice->tx_channels[mode] = be32_to_cpu(values[0]);
+ dice->tx_midi_ports[mode] = be32_to_cpu(values[1]);
+
+ err = snd_dice_transaction_read_rx(dice, RX_NUMBER_AUDIO,
+ values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ dice->rx_channels[mode] = be32_to_cpu(values[0]);
+ dice->rx_midi_ports[mode] = be32_to_cpu(values[1]);
+
+ return 0;
+}
+
+static int dice_read_params(struct snd_dice *dice)
+{
+ __be32 value;
+ int mode, err;
+
+ /* some very old firmwares don't tell about their clock support */
+ if (dice->clock_caps > 0) {
+ err = snd_dice_transaction_read_global(dice,
+ GLOBAL_CLOCK_CAPABILITIES,
+ &value, 4);
+ if (err < 0)
+ return err;
+ dice->clock_caps = be32_to_cpu(value);
+ } else {
+ /* this should be supported by any device */
+ dice->clock_caps = CLOCK_CAP_RATE_44100 |
+ CLOCK_CAP_RATE_48000 |
+ CLOCK_CAP_SOURCE_ARX1 |
+ CLOCK_CAP_SOURCE_INTERNAL;
+ }
+
+ for (mode = 2; mode >= 0; --mode) {
+ err = dice_read_mode_params(dice, mode);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void dice_card_strings(struct snd_dice *dice)
+{
+ struct snd_card *card = dice->card;
+ struct fw_device *dev = fw_parent_device(dice->unit);
+ char vendor[32], model[32];
+ unsigned int i;
+ int err;
+
+ strcpy(card->driver, "DICE");
+
+ strcpy(card->shortname, "DICE");
+ BUILD_BUG_ON(NICK_NAME_SIZE < sizeof(card->shortname));
+ err = snd_dice_transaction_read_global(dice, GLOBAL_NICK_NAME,
+ card->shortname,
+ sizeof(card->shortname));
+ if (err >= 0) {
+ /* DICE strings are returned in "always-wrong" endianness */
+ BUILD_BUG_ON(sizeof(card->shortname) % 4 != 0);
+ for (i = 0; i < sizeof(card->shortname); i += 4)
+ swab32s((u32 *)&card->shortname[i]);
+ card->shortname[sizeof(card->shortname) - 1] = '\0';
+ }
+
+ strcpy(vendor, "?");
+ fw_csr_string(dev->config_rom + 5, CSR_VENDOR, vendor, sizeof(vendor));
+ strcpy(model, "?");
+ fw_csr_string(dice->unit->directory, CSR_MODEL, model, sizeof(model));
+ snprintf(card->longname, sizeof(card->longname),
+ "%s %s (serial %u) at %s, S%d",
+ vendor, model, dev->config_rom[4] & 0x3fffff,
+ dev_name(&dice->unit->device), 100 << dev->max_speed);
+
+ strcpy(card->mixername, "DICE");
+}
+
+static void dice_card_free(struct snd_card *card)
+{
+ struct snd_dice *dice = card->private_data;
+
+ snd_dice_transaction_destroy(dice);
+ mutex_destroy(&dice->mutex);
+}
+
+static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
+{
+ struct snd_card *card;
+ struct snd_dice *dice;
+ int err;
+
+ err = dice_interface_check(unit);
+ if (err < 0)
+ goto end;
+
+ err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE,
+ sizeof(*dice), &card);
+ if (err < 0)
+ goto end;
+
+ dice = card->private_data;
+ dice->card = card;
+ dice->unit = unit;
+ card->private_free = dice_card_free;
+
+ spin_lock_init(&dice->lock);
+ mutex_init(&dice->mutex);
+ init_completion(&dice->clock_accepted);
+ init_waitqueue_head(&dice->hwdep_wait);
+
+ err = snd_dice_transaction_init(dice);
+ if (err < 0)
+ goto error;
+
+ err = dice_read_params(dice);
+ if (err < 0)
+ goto error;
+
+ dice_card_strings(dice);
+
+ err = snd_dice_create_pcm(dice);
+ if (err < 0)
+ goto error;
+
+ err = snd_dice_create_hwdep(dice);
+ if (err < 0)
+ goto error;
+
+ snd_dice_create_proc(dice);
+
+ err = snd_dice_create_midi(dice);
+ if (err < 0)
+ goto error;
+
+ err = snd_dice_stream_init_duplex(dice);
+ if (err < 0)
+ goto error;
+
+ err = snd_card_register(card);
+ if (err < 0) {
+ snd_dice_stream_destroy_duplex(dice);
+ goto error;
+ }
+
+ dev_set_drvdata(&unit->device, dice);
+end:
+ return err;
+error:
+ snd_card_free(card);
+ return err;
+}
+
+static void dice_remove(struct fw_unit *unit)
+{
+ struct snd_dice *dice = dev_get_drvdata(&unit->device);
+
+ snd_card_disconnect(dice->card);
+
+ snd_dice_stream_destroy_duplex(dice);
+
+ snd_card_free_when_closed(dice->card);
+}
+
+static void dice_bus_reset(struct fw_unit *unit)
+{
+ struct snd_dice *dice = dev_get_drvdata(&unit->device);
+
+ /* The handler address register becomes initialized. */
+ snd_dice_transaction_reinit(dice);
+
+ mutex_lock(&dice->mutex);
+ snd_dice_stream_update_duplex(dice);
+ mutex_unlock(&dice->mutex);
+}
+
+#define DICE_INTERFACE 0x000001
+
+static const struct ieee1394_device_id dice_id_table[] = {
+ {
+ .match_flags = IEEE1394_MATCH_VERSION,
+ .version = DICE_INTERFACE,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(ieee1394, dice_id_table);
+
+static struct fw_driver dice_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .bus = &fw_bus_type,
+ },
+ .probe = dice_probe,
+ .update = dice_bus_reset,
+ .remove = dice_remove,
+ .id_table = dice_id_table,
+};
+
+static int __init alsa_dice_init(void)
+{
+ return driver_register(&dice_driver.driver);
+}
+
+static void __exit alsa_dice_exit(void)
+{
+ driver_unregister(&dice_driver.driver);
+}
+
+module_init(alsa_dice_init);
+module_exit(alsa_dice_exit);
diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h
new file mode 100644
index 000000000000..ecf5dc862235
--- /dev/null
+++ b/sound/firewire/dice/dice.h
@@ -0,0 +1,189 @@
+/*
+ * dice.h - a part of driver for Dice based devices
+ *
+ * Copyright (c) Clemens Ladisch
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#ifndef SOUND_DICE_H_INCLUDED
+#define SOUND_DICE_H_INCLUDED
+
+#include <linux/compat.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/firewire.h>
+#include <sound/hwdep.h>
+#include <sound/info.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/rawmidi.h>
+
+#include "../amdtp.h"
+#include "../iso-resources.h"
+#include "../lib.h"
+#include "dice-interface.h"
+
+struct snd_dice {
+ struct snd_card *card;
+ struct fw_unit *unit;
+ spinlock_t lock;
+ struct mutex mutex;
+
+ /* Offsets for sub-addresses */
+ unsigned int global_offset;
+ unsigned int rx_offset;
+ unsigned int tx_offset;
+ unsigned int sync_offset;
+ unsigned int rsrv_offset;
+
+ unsigned int clock_caps;
+ unsigned int tx_channels[3];
+ unsigned int rx_channels[3];
+ unsigned int tx_midi_ports[3];
+ unsigned int rx_midi_ports[3];
+
+ struct fw_address_handler notification_handler;
+ int owner_generation;
+ u32 notification_bits;
+
+ /* For uapi */
+ int dev_lock_count; /* > 0 driver, < 0 userspace */
+ bool dev_lock_changed;
+ wait_queue_head_t hwdep_wait;
+
+ /* For streaming */
+ struct fw_iso_resources tx_resources;
+ struct fw_iso_resources rx_resources;
+ struct amdtp_stream tx_stream;
+ struct amdtp_stream rx_stream;
+ bool global_enabled;
+ struct completion clock_accepted;
+ unsigned int substreams_counter;
+};
+
+enum snd_dice_addr_type {
+ SND_DICE_ADDR_TYPE_PRIVATE,
+ SND_DICE_ADDR_TYPE_GLOBAL,
+ SND_DICE_ADDR_TYPE_TX,
+ SND_DICE_ADDR_TYPE_RX,
+ SND_DICE_ADDR_TYPE_SYNC,
+ SND_DICE_ADDR_TYPE_RSRV,
+};
+
+int snd_dice_transaction_write(struct snd_dice *dice,
+ enum snd_dice_addr_type type,
+ unsigned int offset,
+ void *buf, unsigned int len);
+int snd_dice_transaction_read(struct snd_dice *dice,
+ enum snd_dice_addr_type type, unsigned int offset,
+ void *buf, unsigned int len);
+
+static inline int snd_dice_transaction_write_global(struct snd_dice *dice,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_dice_transaction_write(dice,
+ SND_DICE_ADDR_TYPE_GLOBAL, offset,
+ buf, len);
+}
+static inline int snd_dice_transaction_read_global(struct snd_dice *dice,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_dice_transaction_read(dice,
+ SND_DICE_ADDR_TYPE_GLOBAL, offset,
+ buf, len);
+}
+static inline int snd_dice_transaction_write_tx(struct snd_dice *dice,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_dice_transaction_write(dice, SND_DICE_ADDR_TYPE_TX, offset,
+ buf, len);
+}
+static inline int snd_dice_transaction_read_tx(struct snd_dice *dice,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_dice_transaction_read(dice, SND_DICE_ADDR_TYPE_TX, offset,
+ buf, len);
+}
+static inline int snd_dice_transaction_write_rx(struct snd_dice *dice,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_dice_transaction_write(dice, SND_DICE_ADDR_TYPE_RX, offset,
+ buf, len);
+}
+static inline int snd_dice_transaction_read_rx(struct snd_dice *dice,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_dice_transaction_read(dice, SND_DICE_ADDR_TYPE_RX, offset,
+ buf, len);
+}
+static inline int snd_dice_transaction_write_sync(struct snd_dice *dice,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_dice_transaction_write(dice, SND_DICE_ADDR_TYPE_SYNC, offset,
+ buf, len);
+}
+static inline int snd_dice_transaction_read_sync(struct snd_dice *dice,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return snd_dice_transaction_read(dice, SND_DICE_ADDR_TYPE_SYNC, offset,
+ buf, len);
+}
+
+int snd_dice_transaction_get_clock_source(struct snd_dice *dice,
+ unsigned int *source);
+int snd_dice_transaction_set_rate(struct snd_dice *dice, unsigned int rate);
+int snd_dice_transaction_get_rate(struct snd_dice *dice, unsigned int *rate);
+int snd_dice_transaction_set_enable(struct snd_dice *dice);
+void snd_dice_transaction_clear_enable(struct snd_dice *dice);
+int snd_dice_transaction_init(struct snd_dice *dice);
+int snd_dice_transaction_reinit(struct snd_dice *dice);
+void snd_dice_transaction_destroy(struct snd_dice *dice);
+
+#define SND_DICE_RATES_COUNT 7
+extern const unsigned int snd_dice_rates[SND_DICE_RATES_COUNT];
+
+int snd_dice_stream_get_rate_mode(struct snd_dice *dice,
+ unsigned int rate, unsigned int *mode);
+
+int snd_dice_stream_start_duplex(struct snd_dice *dice, unsigned int rate);
+void snd_dice_stream_stop_duplex(struct snd_dice *dice);
+int snd_dice_stream_init_duplex(struct snd_dice *dice);
+void snd_dice_stream_destroy_duplex(struct snd_dice *dice);
+void snd_dice_stream_update_duplex(struct snd_dice *dice);
+
+int snd_dice_stream_lock_try(struct snd_dice *dice);
+void snd_dice_stream_lock_release(struct snd_dice *dice);
+
+int snd_dice_create_pcm(struct snd_dice *dice);
+
+int snd_dice_create_hwdep(struct snd_dice *dice);
+
+void snd_dice_create_proc(struct snd_dice *dice);
+
+int snd_dice_create_midi(struct snd_dice *dice);
+
+#endif
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 7ac94439e758..48d6dca471c6 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -131,14 +131,8 @@ static void isight_samples(struct isight *isight,
static void isight_pcm_abort(struct isight *isight)
{
- unsigned long flags;
-
- if (ACCESS_ONCE(isight->pcm_active)) {
- snd_pcm_stream_lock_irqsave(isight->pcm, flags);
- if (snd_pcm_running(isight->pcm))
- snd_pcm_stop(isight->pcm, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irqrestore(isight->pcm, flags);
- }
+ if (ACCESS_ONCE(isight->pcm_active))
+ snd_pcm_stop_xrun(isight->pcm);
}
static void isight_dropped_samples(struct isight *isight, unsigned int total)
diff --git a/sound/firewire/oxfw/Makefile b/sound/firewire/oxfw/Makefile
new file mode 100644
index 000000000000..a926850864f6
--- /dev/null
+++ b/sound/firewire/oxfw/Makefile
@@ -0,0 +1,3 @@
+snd-oxfw-objs := oxfw-command.o oxfw-stream.o oxfw-control.o oxfw-pcm.o \
+ oxfw-proc.o oxfw-midi.o oxfw-hwdep.o oxfw.o
+obj-m += snd-oxfw.o
diff --git a/sound/firewire/oxfw/oxfw-command.c b/sound/firewire/oxfw/oxfw-command.c
new file mode 100644
index 000000000000..12ef3253bc89
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw-command.c
@@ -0,0 +1,153 @@
+/*
+ * oxfw_command.c - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "oxfw.h"
+
+int avc_stream_set_format(struct fw_unit *unit, enum avc_general_plug_dir dir,
+ unsigned int pid, u8 *format, unsigned int len)
+{
+ u8 *buf;
+ int err;
+
+ buf = kmalloc(len + 10, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ buf[0] = 0x00; /* CONTROL */
+ buf[1] = 0xff; /* UNIT */
+ buf[2] = 0xbf; /* EXTENDED STREAM FORMAT INFORMATION */
+ buf[3] = 0xc0; /* SINGLE subfunction */
+ buf[4] = dir; /* Plug Direction */
+ buf[5] = 0x00; /* UNIT */
+ buf[6] = 0x00; /* PCR (Isochronous Plug) */
+ buf[7] = 0xff & pid; /* Plug ID */
+ buf[8] = 0xff; /* Padding */
+ buf[9] = 0xff; /* Support status in response */
+ memcpy(buf + 10, format, len);
+
+ /* do transaction and check buf[1-8] are the same against command */
+ err = fcp_avc_transaction(unit, buf, len + 10, buf, len + 10,
+ BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
+ BIT(6) | BIT(7) | BIT(8));
+ if ((err > 0) && (err < len + 10))
+ err = -EIO;
+ else if (buf[0] == 0x08) /* NOT IMPLEMENTED */
+ err = -ENOSYS;
+ else if (buf[0] == 0x0a) /* REJECTED */
+ err = -EINVAL;
+ else
+ err = 0;
+
+ kfree(buf);
+
+ return err;
+}
+
+int avc_stream_get_format(struct fw_unit *unit,
+ enum avc_general_plug_dir dir, unsigned int pid,
+ u8 *buf, unsigned int *len, unsigned int eid)
+{
+ unsigned int subfunc;
+ int err;
+
+ if (eid == 0xff)
+ subfunc = 0xc0; /* SINGLE */
+ else
+ subfunc = 0xc1; /* LIST */
+
+ buf[0] = 0x01; /* STATUS */
+ buf[1] = 0xff; /* UNIT */
+ buf[2] = 0xbf; /* EXTENDED STREAM FORMAT INFORMATION */
+ buf[3] = subfunc; /* SINGLE or LIST */
+ buf[4] = dir; /* Plug Direction */
+ buf[5] = 0x00; /* Unit */
+ buf[6] = 0x00; /* PCR (Isochronous Plug) */
+ buf[7] = 0xff & pid; /* Plug ID */
+ buf[8] = 0xff; /* Padding */
+ buf[9] = 0xff; /* support status in response */
+ buf[10] = 0xff & eid; /* entry ID for LIST subfunction */
+ buf[11] = 0xff; /* padding */
+
+ /* do transaction and check buf[1-7] are the same against command */
+ err = fcp_avc_transaction(unit, buf, 12, buf, *len,
+ BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) |
+ BIT(6) | BIT(7));
+ if ((err > 0) && (err < 10))
+ err = -EIO;
+ else if (buf[0] == 0x08) /* NOT IMPLEMENTED */
+ err = -ENOSYS;
+ else if (buf[0] == 0x0a) /* REJECTED */
+ err = -EINVAL;
+ else if (buf[0] == 0x0b) /* IN TRANSITION */
+ err = -EAGAIN;
+ /* LIST subfunction has entry ID */
+ else if ((subfunc == 0xc1) && (buf[10] != eid))
+ err = -EIO;
+ if (err < 0)
+ goto end;
+
+ /* keep just stream format information */
+ if (subfunc == 0xc0) {
+ memmove(buf, buf + 10, err - 10);
+ *len = err - 10;
+ } else {
+ memmove(buf, buf + 11, err - 11);
+ *len = err - 11;
+ }
+
+ err = 0;
+end:
+ return err;
+}
+
+int avc_general_inquiry_sig_fmt(struct fw_unit *unit, unsigned int rate,
+ enum avc_general_plug_dir dir,
+ unsigned short pid)
+{
+ unsigned int sfc;
+ u8 *buf;
+ int err;
+
+ for (sfc = 0; sfc < CIP_SFC_COUNT; sfc++) {
+ if (amdtp_rate_table[sfc] == rate)
+ break;
+ }
+ if (sfc == CIP_SFC_COUNT)
+ return -EINVAL;
+
+ buf = kzalloc(8, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ buf[0] = 0x02; /* SPECIFIC INQUIRY */
+ buf[1] = 0xff; /* UNIT */
+ if (dir == AVC_GENERAL_PLUG_DIR_IN)
+ buf[2] = 0x19; /* INPUT PLUG SIGNAL FORMAT */
+ else
+ buf[2] = 0x18; /* OUTPUT PLUG SIGNAL FORMAT */
+ buf[3] = 0xff & pid; /* plug id */
+ buf[4] = 0x90; /* EOH_1, Form_1, FMT. AM824 */
+ buf[5] = 0x07 & sfc; /* FDF-hi. AM824, frequency */
+ buf[6] = 0xff; /* FDF-mid. AM824, SYT hi (not used) */
+ buf[7] = 0xff; /* FDF-low. AM824, SYT lo (not used) */
+
+ /* do transaction and check buf[1-5] are the same against command */
+ err = fcp_avc_transaction(unit, buf, 8, buf, 8,
+ BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5));
+ if ((err > 0) && (err < 8))
+ err = -EIO;
+ else if (buf[0] == 0x08) /* NOT IMPLEMENTED */
+ err = -ENOSYS;
+ if (err < 0)
+ goto end;
+
+ err = 0;
+end:
+ kfree(buf);
+ return err;
+}
diff --git a/sound/firewire/oxfw/oxfw-control.c b/sound/firewire/oxfw/oxfw-control.c
new file mode 100644
index 000000000000..02a1cb90f20d
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw-control.c
@@ -0,0 +1,283 @@
+/*
+ * oxfw_stream.c - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "oxfw.h"
+
+enum control_action { CTL_READ, CTL_WRITE };
+enum control_attribute {
+ CTL_MIN = 0x02,
+ CTL_MAX = 0x03,
+ CTL_CURRENT = 0x10,
+};
+
+static int oxfw_mute_command(struct snd_oxfw *oxfw, bool *value,
+ enum control_action action)
+{
+ u8 *buf;
+ u8 response_ok;
+ int err;
+
+ buf = kmalloc(11, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (action == CTL_READ) {
+ buf[0] = 0x01; /* AV/C, STATUS */
+ response_ok = 0x0c; /* STABLE */
+ } else {
+ buf[0] = 0x00; /* AV/C, CONTROL */
+ response_ok = 0x09; /* ACCEPTED */
+ }
+ buf[1] = 0x08; /* audio unit 0 */
+ buf[2] = 0xb8; /* FUNCTION BLOCK */
+ buf[3] = 0x81; /* function block type: feature */
+ buf[4] = oxfw->device_info->mute_fb_id; /* function block ID */
+ buf[5] = 0x10; /* control attribute: current */
+ buf[6] = 0x02; /* selector length */
+ buf[7] = 0x00; /* audio channel number */
+ buf[8] = 0x01; /* control selector: mute */
+ buf[9] = 0x01; /* control data length */
+ if (action == CTL_READ)
+ buf[10] = 0xff;
+ else
+ buf[10] = *value ? 0x70 : 0x60;
+
+ err = fcp_avc_transaction(oxfw->unit, buf, 11, buf, 11, 0x3fe);
+ if (err < 0)
+ goto error;
+ if (err < 11) {
+ dev_err(&oxfw->unit->device, "short FCP response\n");
+ err = -EIO;
+ goto error;
+ }
+ if (buf[0] != response_ok) {
+ dev_err(&oxfw->unit->device, "mute command failed\n");
+ err = -EIO;
+ goto error;
+ }
+ if (action == CTL_READ)
+ *value = buf[10] == 0x70;
+
+ err = 0;
+
+error:
+ kfree(buf);
+
+ return err;
+}
+
+static int oxfw_volume_command(struct snd_oxfw *oxfw, s16 *value,
+ unsigned int channel,
+ enum control_attribute attribute,
+ enum control_action action)
+{
+ u8 *buf;
+ u8 response_ok;
+ int err;
+
+ buf = kmalloc(12, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (action == CTL_READ) {
+ buf[0] = 0x01; /* AV/C, STATUS */
+ response_ok = 0x0c; /* STABLE */
+ } else {
+ buf[0] = 0x00; /* AV/C, CONTROL */
+ response_ok = 0x09; /* ACCEPTED */
+ }
+ buf[1] = 0x08; /* audio unit 0 */
+ buf[2] = 0xb8; /* FUNCTION BLOCK */
+ buf[3] = 0x81; /* function block type: feature */
+ buf[4] = oxfw->device_info->volume_fb_id; /* function block ID */
+ buf[5] = attribute; /* control attribute */
+ buf[6] = 0x02; /* selector length */
+ buf[7] = channel; /* audio channel number */
+ buf[8] = 0x02; /* control selector: volume */
+ buf[9] = 0x02; /* control data length */
+ if (action == CTL_READ) {
+ buf[10] = 0xff;
+ buf[11] = 0xff;
+ } else {
+ buf[10] = *value >> 8;
+ buf[11] = *value;
+ }
+
+ err = fcp_avc_transaction(oxfw->unit, buf, 12, buf, 12, 0x3fe);
+ if (err < 0)
+ goto error;
+ if (err < 12) {
+ dev_err(&oxfw->unit->device, "short FCP response\n");
+ err = -EIO;
+ goto error;
+ }
+ if (buf[0] != response_ok) {
+ dev_err(&oxfw->unit->device, "volume command failed\n");
+ err = -EIO;
+ goto error;
+ }
+ if (action == CTL_READ)
+ *value = (buf[10] << 8) | buf[11];
+
+ err = 0;
+
+error:
+ kfree(buf);
+
+ return err;
+}
+
+static int oxfw_mute_get(struct snd_kcontrol *control,
+ struct snd_ctl_elem_value *value)
+{
+ struct snd_oxfw *oxfw = control->private_data;
+
+ value->value.integer.value[0] = !oxfw->mute;
+
+ return 0;
+}
+
+static int oxfw_mute_put(struct snd_kcontrol *control,
+ struct snd_ctl_elem_value *value)
+{
+ struct snd_oxfw *oxfw = control->private_data;
+ bool mute;
+ int err;
+
+ mute = !value->value.integer.value[0];
+
+ if (mute == oxfw->mute)
+ return 0;
+
+ err = oxfw_mute_command(oxfw, &mute, CTL_WRITE);
+ if (err < 0)
+ return err;
+ oxfw->mute = mute;
+
+ return 1;
+}
+
+static int oxfw_volume_info(struct snd_kcontrol *control,
+ struct snd_ctl_elem_info *info)
+{
+ struct snd_oxfw *oxfw = control->private_data;
+
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = oxfw->device_info->mixer_channels;
+ info->value.integer.min = oxfw->volume_min;
+ info->value.integer.max = oxfw->volume_max;
+
+ return 0;
+}
+
+static const u8 channel_map[6] = { 0, 1, 4, 5, 2, 3 };
+
+static int oxfw_volume_get(struct snd_kcontrol *control,
+ struct snd_ctl_elem_value *value)
+{
+ struct snd_oxfw *oxfw = control->private_data;
+ unsigned int i;
+
+ for (i = 0; i < oxfw->device_info->mixer_channels; ++i)
+ value->value.integer.value[channel_map[i]] = oxfw->volume[i];
+
+ return 0;
+}
+
+static int oxfw_volume_put(struct snd_kcontrol *control,
+ struct snd_ctl_elem_value *value)
+{
+ struct snd_oxfw *oxfw = control->private_data;
+ unsigned int i, changed_channels;
+ bool equal_values = true;
+ s16 volume;
+ int err;
+
+ for (i = 0; i < oxfw->device_info->mixer_channels; ++i) {
+ if (value->value.integer.value[i] < oxfw->volume_min ||
+ value->value.integer.value[i] > oxfw->volume_max)
+ return -EINVAL;
+ if (value->value.integer.value[i] !=
+ value->value.integer.value[0])
+ equal_values = false;
+ }
+
+ changed_channels = 0;
+ for (i = 0; i < oxfw->device_info->mixer_channels; ++i)
+ if (value->value.integer.value[channel_map[i]] !=
+ oxfw->volume[i])
+ changed_channels |= 1 << (i + 1);
+
+ if (equal_values && changed_channels != 0)
+ changed_channels = 1 << 0;
+
+ for (i = 0; i <= oxfw->device_info->mixer_channels; ++i) {
+ volume = value->value.integer.value[channel_map[i ? i - 1 : 0]];
+ if (changed_channels & (1 << i)) {
+ err = oxfw_volume_command(oxfw, &volume, i,
+ CTL_CURRENT, CTL_WRITE);
+ if (err < 0)
+ return err;
+ }
+ if (i > 0)
+ oxfw->volume[i - 1] = volume;
+ }
+
+ return changed_channels != 0;
+}
+
+int snd_oxfw_create_mixer(struct snd_oxfw *oxfw)
+{
+ static const struct snd_kcontrol_new controls[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "PCM Playback Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = oxfw_mute_get,
+ .put = oxfw_mute_put,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "PCM Playback Volume",
+ .info = oxfw_volume_info,
+ .get = oxfw_volume_get,
+ .put = oxfw_volume_put,
+ },
+ };
+ unsigned int i, first_ch;
+ int err;
+
+ err = oxfw_volume_command(oxfw, &oxfw->volume_min,
+ 0, CTL_MIN, CTL_READ);
+ if (err < 0)
+ return err;
+ err = oxfw_volume_command(oxfw, &oxfw->volume_max,
+ 0, CTL_MAX, CTL_READ);
+ if (err < 0)
+ return err;
+
+ err = oxfw_mute_command(oxfw, &oxfw->mute, CTL_READ);
+ if (err < 0)
+ return err;
+
+ first_ch = oxfw->device_info->mixer_channels == 1 ? 0 : 1;
+ for (i = 0; i < oxfw->device_info->mixer_channels; ++i) {
+ err = oxfw_volume_command(oxfw, &oxfw->volume[i],
+ first_ch + i, CTL_CURRENT, CTL_READ);
+ if (err < 0)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(controls); ++i) {
+ err = snd_ctl_add(oxfw->card,
+ snd_ctl_new1(&controls[i], oxfw));
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/sound/firewire/oxfw/oxfw-hwdep.c b/sound/firewire/oxfw/oxfw-hwdep.c
new file mode 100644
index 000000000000..ff2687ad0460
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw-hwdep.c
@@ -0,0 +1,190 @@
+/*
+ * oxfw_hwdep.c - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+/*
+ * This codes give three functionality.
+ *
+ * 1.get firewire node information
+ * 2.get notification about starting/stopping stream
+ * 3.lock/unlock stream
+ */
+
+#include "oxfw.h"
+
+static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
+ loff_t *offset)
+{
+ struct snd_oxfw *oxfw = hwdep->private_data;
+ DEFINE_WAIT(wait);
+ union snd_firewire_event event;
+
+ spin_lock_irq(&oxfw->lock);
+
+ while (!oxfw->dev_lock_changed) {
+ prepare_to_wait(&oxfw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&oxfw->lock);
+ schedule();
+ finish_wait(&oxfw->hwdep_wait, &wait);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ spin_lock_irq(&oxfw->lock);
+ }
+
+ memset(&event, 0, sizeof(event));
+ if (oxfw->dev_lock_changed) {
+ event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
+ event.lock_status.status = (oxfw->dev_lock_count > 0);
+ oxfw->dev_lock_changed = false;
+
+ count = min_t(long, count, sizeof(event.lock_status));
+ }
+
+ spin_unlock_irq(&oxfw->lock);
+
+ if (copy_to_user(buf, &event, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ poll_table *wait)
+{
+ struct snd_oxfw *oxfw = hwdep->private_data;
+ unsigned int events;
+
+ poll_wait(file, &oxfw->hwdep_wait, wait);
+
+ spin_lock_irq(&oxfw->lock);
+ if (oxfw->dev_lock_changed)
+ events = POLLIN | POLLRDNORM;
+ else
+ events = 0;
+ spin_unlock_irq(&oxfw->lock);
+
+ return events;
+}
+
+static int hwdep_get_info(struct snd_oxfw *oxfw, void __user *arg)
+{
+ struct fw_device *dev = fw_parent_device(oxfw->unit);
+ struct snd_firewire_get_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.type = SNDRV_FIREWIRE_TYPE_OXFW;
+ info.card = dev->card->index;
+ *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]);
+ *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]);
+ strlcpy(info.device_name, dev_name(&dev->device),
+ sizeof(info.device_name));
+
+ if (copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int hwdep_lock(struct snd_oxfw *oxfw)
+{
+ int err;
+
+ spin_lock_irq(&oxfw->lock);
+
+ if (oxfw->dev_lock_count == 0) {
+ oxfw->dev_lock_count = -1;
+ err = 0;
+ } else {
+ err = -EBUSY;
+ }
+
+ spin_unlock_irq(&oxfw->lock);
+
+ return err;
+}
+
+static int hwdep_unlock(struct snd_oxfw *oxfw)
+{
+ int err;
+
+ spin_lock_irq(&oxfw->lock);
+
+ if (oxfw->dev_lock_count == -1) {
+ oxfw->dev_lock_count = 0;
+ err = 0;
+ } else {
+ err = -EBADFD;
+ }
+
+ spin_unlock_irq(&oxfw->lock);
+
+ return err;
+}
+
+static int hwdep_release(struct snd_hwdep *hwdep, struct file *file)
+{
+ struct snd_oxfw *oxfw = hwdep->private_data;
+
+ spin_lock_irq(&oxfw->lock);
+ if (oxfw->dev_lock_count == -1)
+ oxfw->dev_lock_count = 0;
+ spin_unlock_irq(&oxfw->lock);
+
+ return 0;
+}
+
+static int hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct snd_oxfw *oxfw = hwdep->private_data;
+
+ switch (cmd) {
+ case SNDRV_FIREWIRE_IOCTL_GET_INFO:
+ return hwdep_get_info(oxfw, (void __user *)arg);
+ case SNDRV_FIREWIRE_IOCTL_LOCK:
+ return hwdep_lock(oxfw);
+ case SNDRV_FIREWIRE_IOCTL_UNLOCK:
+ return hwdep_unlock(oxfw);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static int hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return hwdep_ioctl(hwdep, file, cmd,
+ (unsigned long)compat_ptr(arg));
+}
+#else
+#define hwdep_compat_ioctl NULL
+#endif
+
+int snd_oxfw_create_hwdep(struct snd_oxfw *oxfw)
+{
+ static const struct snd_hwdep_ops hwdep_ops = {
+ .read = hwdep_read,
+ .release = hwdep_release,
+ .poll = hwdep_poll,
+ .ioctl = hwdep_ioctl,
+ .ioctl_compat = hwdep_compat_ioctl,
+ };
+ struct snd_hwdep *hwdep;
+ int err;
+
+ err = snd_hwdep_new(oxfw->card, oxfw->card->driver, 0, &hwdep);
+ if (err < 0)
+ goto end;
+ strcpy(hwdep->name, oxfw->card->driver);
+ hwdep->iface = SNDRV_HWDEP_IFACE_FW_OXFW;
+ hwdep->ops = hwdep_ops;
+ hwdep->private_data = oxfw;
+ hwdep->exclusive = true;
+end:
+ return err;
+}
diff --git a/sound/firewire/oxfw/oxfw-midi.c b/sound/firewire/oxfw/oxfw-midi.c
new file mode 100644
index 000000000000..540a30338516
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw-midi.c
@@ -0,0 +1,207 @@
+/*
+ * oxfw_midi.c - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "oxfw.h"
+
+static int midi_capture_open(struct snd_rawmidi_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->rmidi->private_data;
+ int err;
+
+ err = snd_oxfw_stream_lock_try(oxfw);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&oxfw->mutex);
+
+ oxfw->capture_substreams++;
+ err = snd_oxfw_stream_start_simplex(oxfw, &oxfw->tx_stream, 0, 0);
+
+ mutex_unlock(&oxfw->mutex);
+
+ if (err < 0)
+ snd_oxfw_stream_lock_release(oxfw);
+
+ return err;
+}
+
+static int midi_playback_open(struct snd_rawmidi_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->rmidi->private_data;
+ int err;
+
+ err = snd_oxfw_stream_lock_try(oxfw);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&oxfw->mutex);
+
+ oxfw->playback_substreams++;
+ err = snd_oxfw_stream_start_simplex(oxfw, &oxfw->rx_stream, 0, 0);
+
+ mutex_unlock(&oxfw->mutex);
+
+ if (err < 0)
+ snd_oxfw_stream_lock_release(oxfw);
+
+ return err;
+}
+
+static int midi_capture_close(struct snd_rawmidi_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->rmidi->private_data;
+
+ mutex_lock(&oxfw->mutex);
+
+ oxfw->capture_substreams--;
+ snd_oxfw_stream_stop_simplex(oxfw, &oxfw->tx_stream);
+
+ mutex_unlock(&oxfw->mutex);
+
+ snd_oxfw_stream_lock_release(oxfw);
+ return 0;
+}
+
+static int midi_playback_close(struct snd_rawmidi_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->rmidi->private_data;
+
+ mutex_lock(&oxfw->mutex);
+
+ oxfw->playback_substreams--;
+ snd_oxfw_stream_stop_simplex(oxfw, &oxfw->rx_stream);
+
+ mutex_unlock(&oxfw->mutex);
+
+ snd_oxfw_stream_lock_release(oxfw);
+ return 0;
+}
+
+static void midi_capture_trigger(struct snd_rawmidi_substream *substrm, int up)
+{
+ struct snd_oxfw *oxfw = substrm->rmidi->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&oxfw->lock, flags);
+
+ if (up)
+ amdtp_stream_midi_trigger(&oxfw->tx_stream,
+ substrm->number, substrm);
+ else
+ amdtp_stream_midi_trigger(&oxfw->tx_stream,
+ substrm->number, NULL);
+
+ spin_unlock_irqrestore(&oxfw->lock, flags);
+}
+
+static void midi_playback_trigger(struct snd_rawmidi_substream *substrm, int up)
+{
+ struct snd_oxfw *oxfw = substrm->rmidi->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&oxfw->lock, flags);
+
+ if (up)
+ amdtp_stream_midi_trigger(&oxfw->rx_stream,
+ substrm->number, substrm);
+ else
+ amdtp_stream_midi_trigger(&oxfw->rx_stream,
+ substrm->number, NULL);
+
+ spin_unlock_irqrestore(&oxfw->lock, flags);
+}
+
+static struct snd_rawmidi_ops midi_capture_ops = {
+ .open = midi_capture_open,
+ .close = midi_capture_close,
+ .trigger = midi_capture_trigger,
+};
+
+static struct snd_rawmidi_ops midi_playback_ops = {
+ .open = midi_playback_open,
+ .close = midi_playback_close,
+ .trigger = midi_playback_trigger,
+};
+
+static void set_midi_substream_names(struct snd_oxfw *oxfw,
+ struct snd_rawmidi_str *str)
+{
+ struct snd_rawmidi_substream *subs;
+
+ list_for_each_entry(subs, &str->substreams, list) {
+ snprintf(subs->name, sizeof(subs->name),
+ "%s MIDI %d",
+ oxfw->card->shortname, subs->number + 1);
+ }
+}
+
+int snd_oxfw_create_midi(struct snd_oxfw *oxfw)
+{
+ struct snd_oxfw_stream_formation formation;
+ struct snd_rawmidi *rmidi;
+ struct snd_rawmidi_str *str;
+ u8 *format;
+ int i, err;
+
+ /* If its stream has MIDI conformant data channel, add one MIDI port */
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ format = oxfw->tx_stream_formats[i];
+ if (format != NULL) {
+ err = snd_oxfw_stream_parse_format(format, &formation);
+ if (err >= 0 && formation.midi > 0)
+ oxfw->midi_input_ports = 1;
+ }
+
+ format = oxfw->rx_stream_formats[i];
+ if (format != NULL) {
+ err = snd_oxfw_stream_parse_format(format, &formation);
+ if (err >= 0 && formation.midi > 0)
+ oxfw->midi_output_ports = 1;
+ }
+ }
+ if ((oxfw->midi_input_ports == 0) && (oxfw->midi_output_ports == 0))
+ return 0;
+
+ /* create midi ports */
+ err = snd_rawmidi_new(oxfw->card, oxfw->card->driver, 0,
+ oxfw->midi_output_ports, oxfw->midi_input_ports,
+ &rmidi);
+ if (err < 0)
+ return err;
+
+ snprintf(rmidi->name, sizeof(rmidi->name),
+ "%s MIDI", oxfw->card->shortname);
+ rmidi->private_data = oxfw;
+
+ if (oxfw->midi_input_ports > 0) {
+ rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT;
+
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT,
+ &midi_capture_ops);
+
+ str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT];
+
+ set_midi_substream_names(oxfw, str);
+ }
+
+ if (oxfw->midi_output_ports > 0) {
+ rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT;
+
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
+ &midi_playback_ops);
+
+ str = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT];
+
+ set_midi_substream_names(oxfw, str);
+ }
+
+ if ((oxfw->midi_output_ports > 0) && (oxfw->midi_input_ports > 0))
+ rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX;
+
+ return 0;
+}
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
new file mode 100644
index 000000000000..9bc556b15a92
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -0,0 +1,424 @@
+/*
+ * oxfw_pcm.c - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "oxfw.h"
+
+static int hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ u8 **formats = rule->private;
+ struct snd_interval *r =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ const struct snd_interval *c =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval t = {
+ .min = UINT_MAX, .max = 0, .integer = 1
+ };
+ struct snd_oxfw_stream_formation formation;
+ unsigned int i, err;
+
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ if (formats[i] == NULL)
+ continue;
+
+ err = snd_oxfw_stream_parse_format(formats[i], &formation);
+ if (err < 0)
+ continue;
+ if (!snd_interval_test(c, formation.pcm))
+ continue;
+
+ t.min = min(t.min, formation.rate);
+ t.max = max(t.max, formation.rate);
+
+ }
+ return snd_interval_refine(r, &t);
+}
+
+static int hw_rule_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ u8 **formats = rule->private;
+ struct snd_interval *c =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ const struct snd_interval *r =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_oxfw_stream_formation formation;
+ unsigned int i, j, err;
+ unsigned int count, list[SND_OXFW_STREAM_FORMAT_ENTRIES] = {0};
+
+ count = 0;
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ if (formats[i] == NULL)
+ break;
+
+ err = snd_oxfw_stream_parse_format(formats[i], &formation);
+ if (err < 0)
+ continue;
+ if (!snd_interval_test(r, formation.rate))
+ continue;
+ if (list[count] == formation.pcm)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(list); j++) {
+ if (list[j] == formation.pcm)
+ break;
+ }
+ if (j == ARRAY_SIZE(list)) {
+ list[count] = formation.pcm;
+ if (++count == ARRAY_SIZE(list))
+ break;
+ }
+ }
+
+ return snd_interval_list(c, count, list, 0);
+}
+
+static void limit_channels_and_rates(struct snd_pcm_hardware *hw, u8 **formats)
+{
+ struct snd_oxfw_stream_formation formation;
+ unsigned int i, err;
+
+ hw->channels_min = UINT_MAX;
+ hw->channels_max = 0;
+
+ hw->rate_min = UINT_MAX;
+ hw->rate_max = 0;
+ hw->rates = 0;
+
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ if (formats[i] == NULL)
+ break;
+
+ err = snd_oxfw_stream_parse_format(formats[i], &formation);
+ if (err < 0)
+ continue;
+
+ hw->channels_min = min(hw->channels_min, formation.pcm);
+ hw->channels_max = max(hw->channels_max, formation.pcm);
+
+ hw->rate_min = min(hw->rate_min, formation.rate);
+ hw->rate_max = max(hw->rate_max, formation.rate);
+ hw->rates |= snd_pcm_rate_to_rate_bit(formation.rate);
+ }
+}
+
+static void limit_period_and_buffer(struct snd_pcm_hardware *hw)
+{
+ hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */
+ hw->periods_max = UINT_MAX;
+
+ hw->period_bytes_min = 4 * hw->channels_max; /* bytes for a frame */
+
+ /* Just to prevent from allocating much pages. */
+ hw->period_bytes_max = hw->period_bytes_min * 2048;
+ hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
+}
+
+static int init_hw_params(struct snd_oxfw *oxfw,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ u8 **formats;
+ struct amdtp_stream *stream;
+ int err;
+
+ runtime->hw.info = SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_JOINT_DUPLEX |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID;
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ runtime->hw.formats = AMDTP_IN_PCM_FORMAT_BITS;
+ stream = &oxfw->tx_stream;
+ formats = oxfw->tx_stream_formats;
+ } else {
+ runtime->hw.formats = AMDTP_OUT_PCM_FORMAT_BITS;
+ stream = &oxfw->rx_stream;
+ formats = oxfw->rx_stream_formats;
+ }
+
+ limit_channels_and_rates(&runtime->hw, formats);
+ limit_period_and_buffer(&runtime->hw);
+
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ hw_rule_channels, formats,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ goto end;
+
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ hw_rule_rate, formats,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (err < 0)
+ goto end;
+
+ err = amdtp_stream_add_pcm_hw_constraints(stream, runtime);
+end:
+ return err;
+}
+
+static int limit_to_current_params(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+ struct snd_oxfw_stream_formation formation;
+ enum avc_general_plug_dir dir;
+ int err;
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ dir = AVC_GENERAL_PLUG_DIR_OUT;
+ else
+ dir = AVC_GENERAL_PLUG_DIR_IN;
+
+ err = snd_oxfw_stream_get_current_formation(oxfw, dir, &formation);
+ if (err < 0)
+ goto end;
+
+ substream->runtime->hw.channels_min = formation.pcm;
+ substream->runtime->hw.channels_max = formation.pcm;
+ substream->runtime->hw.rate_min = formation.rate;
+ substream->runtime->hw.rate_max = formation.rate;
+end:
+ return err;
+}
+
+static int pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+ int err;
+
+ err = snd_oxfw_stream_lock_try(oxfw);
+ if (err < 0)
+ goto end;
+
+ err = init_hw_params(oxfw, substream);
+ if (err < 0)
+ goto err_locked;
+
+ /*
+ * When any PCM streams are already running, the available sampling
+ * rate is limited at current value.
+ */
+ if (amdtp_stream_pcm_running(&oxfw->tx_stream) ||
+ amdtp_stream_pcm_running(&oxfw->rx_stream)) {
+ err = limit_to_current_params(substream);
+ if (err < 0)
+ goto end;
+ }
+
+ snd_pcm_set_sync(substream);
+end:
+ return err;
+err_locked:
+ snd_oxfw_stream_lock_release(oxfw);
+ return err;
+}
+
+static int pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+
+ snd_oxfw_stream_lock_release(oxfw);
+ return 0;
+}
+
+static int pcm_capture_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+
+
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
+ mutex_lock(&oxfw->mutex);
+ oxfw->capture_substreams++;
+ mutex_unlock(&oxfw->mutex);
+ }
+
+ amdtp_stream_set_pcm_format(&oxfw->tx_stream, params_format(hw_params));
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+}
+static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
+ mutex_lock(&oxfw->mutex);
+ oxfw->playback_substreams++;
+ mutex_unlock(&oxfw->mutex);
+ }
+
+ amdtp_stream_set_pcm_format(&oxfw->rx_stream, params_format(hw_params));
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+}
+
+static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+
+ mutex_lock(&oxfw->mutex);
+
+ if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
+ oxfw->capture_substreams--;
+
+ snd_oxfw_stream_stop_simplex(oxfw, &oxfw->tx_stream);
+
+ mutex_unlock(&oxfw->mutex);
+
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+static int pcm_playback_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+
+ mutex_lock(&oxfw->mutex);
+
+ if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
+ oxfw->playback_substreams--;
+
+ snd_oxfw_stream_stop_simplex(oxfw, &oxfw->rx_stream);
+
+ mutex_unlock(&oxfw->mutex);
+
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int err;
+
+ mutex_lock(&oxfw->mutex);
+ err = snd_oxfw_stream_start_simplex(oxfw, &oxfw->tx_stream,
+ runtime->rate, runtime->channels);
+ mutex_unlock(&oxfw->mutex);
+ if (err < 0)
+ goto end;
+
+ amdtp_stream_pcm_prepare(&oxfw->tx_stream);
+end:
+ return err;
+}
+static int pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int err;
+
+ mutex_lock(&oxfw->mutex);
+ err = snd_oxfw_stream_start_simplex(oxfw, &oxfw->rx_stream,
+ runtime->rate, runtime->channels);
+ mutex_unlock(&oxfw->mutex);
+ if (err < 0)
+ goto end;
+
+ amdtp_stream_pcm_prepare(&oxfw->rx_stream);
+end:
+ return err;
+}
+
+static int pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+ struct snd_pcm_substream *pcm;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ pcm = substream;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ pcm = NULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ amdtp_stream_pcm_trigger(&oxfw->tx_stream, pcm);
+ return 0;
+}
+static int pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+ struct snd_pcm_substream *pcm;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ pcm = substream;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ pcm = NULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ amdtp_stream_pcm_trigger(&oxfw->rx_stream, pcm);
+ return 0;
+}
+
+static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstm)
+{
+ struct snd_oxfw *oxfw = sbstm->private_data;
+
+ return amdtp_stream_pcm_pointer(&oxfw->tx_stream);
+}
+static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstm)
+{
+ struct snd_oxfw *oxfw = sbstm->private_data;
+
+ return amdtp_stream_pcm_pointer(&oxfw->rx_stream);
+}
+
+int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
+{
+ static struct snd_pcm_ops capture_ops = {
+ .open = pcm_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = pcm_capture_hw_params,
+ .hw_free = pcm_capture_hw_free,
+ .prepare = pcm_capture_prepare,
+ .trigger = pcm_capture_trigger,
+ .pointer = pcm_capture_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+ static struct snd_pcm_ops playback_ops = {
+ .open = pcm_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = pcm_playback_hw_params,
+ .hw_free = pcm_playback_hw_free,
+ .prepare = pcm_playback_prepare,
+ .trigger = pcm_playback_trigger,
+ .pointer = pcm_playback_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
+ struct snd_pcm *pcm;
+ unsigned int cap = 0;
+ int err;
+
+ if (oxfw->has_output)
+ cap = 1;
+
+ err = snd_pcm_new(oxfw->card, oxfw->card->driver, 0, 1, cap, &pcm);
+ if (err < 0)
+ return err;
+
+ pcm->private_data = oxfw;
+ strcpy(pcm->name, oxfw->card->shortname);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
+ if (cap > 0)
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+
+ return 0;
+}
diff --git a/sound/firewire/oxfw/oxfw-proc.c b/sound/firewire/oxfw/oxfw-proc.c
new file mode 100644
index 000000000000..604808e5526d
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw-proc.c
@@ -0,0 +1,113 @@
+/*
+ * oxfw_proc.c - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "./oxfw.h"
+
+static void proc_read_formation(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct snd_oxfw *oxfw = entry->private_data;
+ struct snd_oxfw_stream_formation formation, curr;
+ u8 *format;
+ char flag;
+ unsigned int i, err;
+
+ /* Show input. */
+ err = snd_oxfw_stream_get_current_formation(oxfw,
+ AVC_GENERAL_PLUG_DIR_IN,
+ &curr);
+ if (err < 0)
+ return;
+
+ snd_iprintf(buffer, "Input Stream to device:\n");
+ snd_iprintf(buffer, "\tRate\tPCM\tMIDI\n");
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ format = oxfw->rx_stream_formats[i];
+ if (format == NULL)
+ continue;
+
+ err = snd_oxfw_stream_parse_format(format, &formation);
+ if (err < 0)
+ continue;
+
+ if (memcmp(&formation, &curr, sizeof(curr)) == 0)
+ flag = '*';
+ else
+ flag = ' ';
+
+ snd_iprintf(buffer, "%c\t%d\t%d\t%d\n", flag,
+ formation.rate, formation.pcm, formation.midi);
+ }
+
+ if (!oxfw->has_output)
+ return;
+
+ /* Show output. */
+ err = snd_oxfw_stream_get_current_formation(oxfw,
+ AVC_GENERAL_PLUG_DIR_OUT,
+ &curr);
+ if (err < 0)
+ return;
+
+ snd_iprintf(buffer, "Output Stream from device:\n");
+ snd_iprintf(buffer, "\tRate\tPCM\tMIDI\n");
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ format = oxfw->tx_stream_formats[i];
+ if (format == NULL)
+ continue;
+
+ err = snd_oxfw_stream_parse_format(format, &formation);
+ if (err < 0)
+ continue;
+
+ if (memcmp(&formation, &curr, sizeof(curr)) == 0)
+ flag = '*';
+ else
+ flag = ' ';
+
+ snd_iprintf(buffer, "%c\t%d\t%d\t%d\n", flag,
+ formation.rate, formation.pcm, formation.midi);
+ }
+}
+
+static void add_node(struct snd_oxfw *oxfw, struct snd_info_entry *root,
+ const char *name,
+ void (*op)(struct snd_info_entry *e,
+ struct snd_info_buffer *b))
+{
+ struct snd_info_entry *entry;
+
+ entry = snd_info_create_card_entry(oxfw->card, name, root);
+ if (entry == NULL)
+ return;
+
+ snd_info_set_text_ops(entry, oxfw, op);
+ if (snd_info_register(entry) < 0)
+ snd_info_free_entry(entry);
+}
+
+void snd_oxfw_proc_init(struct snd_oxfw *oxfw)
+{
+ struct snd_info_entry *root;
+
+ /*
+ * All nodes are automatically removed at snd_card_disconnect(),
+ * by following to link list.
+ */
+ root = snd_info_create_card_entry(oxfw->card, "firewire",
+ oxfw->card->proc_root);
+ if (root == NULL)
+ return;
+ root->mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ if (snd_info_register(root) < 0) {
+ snd_info_free_entry(root);
+ return;
+ }
+
+ add_node(oxfw, root, "formation", proc_read_formation);
+}
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
new file mode 100644
index 000000000000..b77cf80f1678
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -0,0 +1,685 @@
+/*
+ * oxfw_stream.c - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) 2014 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "oxfw.h"
+#include <linux/delay.h>
+
+#define AVC_GENERIC_FRAME_MAXIMUM_BYTES 512
+#define CALLBACK_TIMEOUT 200
+
+/*
+ * According to datasheet of Oxford Semiconductor:
+ * OXFW970: 32.0/44.1/48.0/96.0 Khz, 8 audio channels I/O
+ * OXFW971: 32.0/44.1/48.0/88.2/96.0/192.0 kHz, 16 audio channels I/O, MIDI I/O
+ */
+static const unsigned int oxfw_rate_table[] = {
+ [0] = 32000,
+ [1] = 44100,
+ [2] = 48000,
+ [3] = 88200,
+ [4] = 96000,
+ [5] = 192000,
+};
+
+/*
+ * See Table 5.7 – Sampling frequency for Multi-bit Audio
+ * in AV/C Stream Format Information Specification 1.1 (Apr 2005, 1394TA)
+ */
+static const unsigned int avc_stream_rate_table[] = {
+ [0] = 0x02,
+ [1] = 0x03,
+ [2] = 0x04,
+ [3] = 0x0a,
+ [4] = 0x05,
+ [5] = 0x07,
+};
+
+static int set_rate(struct snd_oxfw *oxfw, unsigned int rate)
+{
+ int err;
+
+ err = avc_general_set_sig_fmt(oxfw->unit, rate,
+ AVC_GENERAL_PLUG_DIR_IN, 0);
+ if (err < 0)
+ goto end;
+
+ if (oxfw->has_output)
+ err = avc_general_set_sig_fmt(oxfw->unit, rate,
+ AVC_GENERAL_PLUG_DIR_OUT, 0);
+end:
+ return err;
+}
+
+static int set_stream_format(struct snd_oxfw *oxfw, struct amdtp_stream *s,
+ unsigned int rate, unsigned int pcm_channels)
+{
+ u8 **formats;
+ struct snd_oxfw_stream_formation formation;
+ enum avc_general_plug_dir dir;
+ unsigned int i, err, len;
+
+ if (s == &oxfw->tx_stream) {
+ formats = oxfw->tx_stream_formats;
+ dir = AVC_GENERAL_PLUG_DIR_OUT;
+ } else {
+ formats = oxfw->rx_stream_formats;
+ dir = AVC_GENERAL_PLUG_DIR_IN;
+ }
+
+ /* Seek stream format for requirements. */
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ err = snd_oxfw_stream_parse_format(formats[i], &formation);
+ if (err < 0)
+ return err;
+
+ if ((formation.rate == rate) && (formation.pcm == pcm_channels))
+ break;
+ }
+ if (i == SND_OXFW_STREAM_FORMAT_ENTRIES)
+ return -EINVAL;
+
+ /* If assumed, just change rate. */
+ if (oxfw->assumed)
+ return set_rate(oxfw, rate);
+
+ /* Calculate format length. */
+ len = 5 + formats[i][4] * 2;
+
+ err = avc_stream_set_format(oxfw->unit, dir, 0, formats[i], len);
+ if (err < 0)
+ return err;
+
+ /* Some requests just after changing format causes freezing. */
+ msleep(100);
+
+ return 0;
+}
+
+static void stop_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
+{
+ amdtp_stream_pcm_abort(stream);
+ amdtp_stream_stop(stream);
+
+ if (stream == &oxfw->tx_stream)
+ cmp_connection_break(&oxfw->out_conn);
+ else
+ cmp_connection_break(&oxfw->in_conn);
+}
+
+static int start_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream,
+ unsigned int rate, unsigned int pcm_channels)
+{
+ u8 **formats;
+ struct cmp_connection *conn;
+ struct snd_oxfw_stream_formation formation;
+ unsigned int i, midi_ports;
+ int err;
+
+ if (stream == &oxfw->rx_stream) {
+ formats = oxfw->rx_stream_formats;
+ conn = &oxfw->in_conn;
+ } else {
+ formats = oxfw->tx_stream_formats;
+ conn = &oxfw->out_conn;
+ }
+
+ /* Get stream format */
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ if (formats[i] == NULL)
+ break;
+
+ err = snd_oxfw_stream_parse_format(formats[i], &formation);
+ if (err < 0)
+ goto end;
+ if (rate != formation.rate)
+ continue;
+ if (pcm_channels == 0 || pcm_channels == formation.pcm)
+ break;
+ }
+ if (i == SND_OXFW_STREAM_FORMAT_ENTRIES) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ pcm_channels = formation.pcm;
+ midi_ports = DIV_ROUND_UP(formation.midi, 8);
+
+ /* The stream should have one pcm channels at least */
+ if (pcm_channels == 0) {
+ err = -EINVAL;
+ goto end;
+ }
+ amdtp_stream_set_parameters(stream, rate, pcm_channels, midi_ports);
+
+ err = cmp_connection_establish(conn,
+ amdtp_stream_get_max_payload(stream));
+ if (err < 0)
+ goto end;
+
+ err = amdtp_stream_start(stream,
+ conn->resources.channel,
+ conn->speed);
+ if (err < 0) {
+ cmp_connection_break(conn);
+ goto end;
+ }
+
+ /* Wait first packet */
+ err = amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT);
+ if (err < 0)
+ stop_stream(oxfw, stream);
+end:
+ return err;
+}
+
+static int check_connection_used_by_others(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream)
+{
+ struct cmp_connection *conn;
+ bool used;
+ int err;
+
+ if (stream == &oxfw->tx_stream)
+ conn = &oxfw->out_conn;
+ else
+ conn = &oxfw->in_conn;
+
+ err = cmp_connection_check_used(conn, &used);
+ if ((err >= 0) && used && !amdtp_stream_running(stream)) {
+ dev_err(&oxfw->unit->device,
+ "Connection established by others: %cPCR[%d]\n",
+ (conn->direction == CMP_OUTPUT) ? 'o' : 'i',
+ conn->pcr_index);
+ err = -EBUSY;
+ }
+
+ return err;
+}
+
+int snd_oxfw_stream_init_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream)
+{
+ struct cmp_connection *conn;
+ enum cmp_direction c_dir;
+ enum amdtp_stream_direction s_dir;
+ int err;
+
+ if (stream == &oxfw->tx_stream) {
+ conn = &oxfw->out_conn;
+ c_dir = CMP_OUTPUT;
+ s_dir = AMDTP_IN_STREAM;
+ } else {
+ conn = &oxfw->in_conn;
+ c_dir = CMP_INPUT;
+ s_dir = AMDTP_OUT_STREAM;
+ }
+
+ err = cmp_connection_init(conn, oxfw->unit, c_dir, 0);
+ if (err < 0)
+ goto end;
+
+ err = amdtp_stream_init(stream, oxfw->unit, s_dir, CIP_NONBLOCKING);
+ if (err < 0) {
+ amdtp_stream_destroy(stream);
+ cmp_connection_destroy(conn);
+ goto end;
+ }
+
+ /* OXFW starts to transmit packets with non-zero dbc. */
+ if (stream == &oxfw->tx_stream)
+ oxfw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK;
+end:
+ return err;
+}
+
+int snd_oxfw_stream_start_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream,
+ unsigned int rate, unsigned int pcm_channels)
+{
+ struct amdtp_stream *opposite;
+ struct snd_oxfw_stream_formation formation;
+ enum avc_general_plug_dir dir;
+ unsigned int substreams, opposite_substreams;
+ int err = 0;
+
+ if (stream == &oxfw->tx_stream) {
+ substreams = oxfw->capture_substreams;
+ opposite = &oxfw->rx_stream;
+ opposite_substreams = oxfw->playback_substreams;
+ dir = AVC_GENERAL_PLUG_DIR_OUT;
+ } else {
+ substreams = oxfw->playback_substreams;
+ opposite_substreams = oxfw->capture_substreams;
+
+ if (oxfw->has_output)
+ opposite = &oxfw->rx_stream;
+ else
+ opposite = NULL;
+
+ dir = AVC_GENERAL_PLUG_DIR_IN;
+ }
+
+ if (substreams == 0)
+ goto end;
+
+ /*
+ * Considering JACK/FFADO streaming:
+ * TODO: This can be removed hwdep functionality becomes popular.
+ */
+ err = check_connection_used_by_others(oxfw, stream);
+ if (err < 0)
+ goto end;
+
+ /* packet queueing error */
+ if (amdtp_streaming_error(stream))
+ stop_stream(oxfw, stream);
+
+ err = snd_oxfw_stream_get_current_formation(oxfw, dir, &formation);
+ if (err < 0)
+ goto end;
+ if (rate == 0)
+ rate = formation.rate;
+ if (pcm_channels == 0)
+ pcm_channels = formation.pcm;
+
+ if ((formation.rate != rate) || (formation.pcm != pcm_channels)) {
+ if (opposite != NULL) {
+ err = check_connection_used_by_others(oxfw, opposite);
+ if (err < 0)
+ goto end;
+ stop_stream(oxfw, opposite);
+ }
+ stop_stream(oxfw, stream);
+
+ err = set_stream_format(oxfw, stream, rate, pcm_channels);
+ if (err < 0) {
+ dev_err(&oxfw->unit->device,
+ "fail to set stream format: %d\n", err);
+ goto end;
+ }
+
+ /* Start opposite stream if needed. */
+ if (opposite && !amdtp_stream_running(opposite) &&
+ (opposite_substreams > 0)) {
+ err = start_stream(oxfw, opposite, rate, 0);
+ if (err < 0) {
+ dev_err(&oxfw->unit->device,
+ "fail to restart stream: %d\n", err);
+ goto end;
+ }
+ }
+ }
+
+ /* Start requested stream. */
+ if (!amdtp_stream_running(stream)) {
+ err = start_stream(oxfw, stream, rate, pcm_channels);
+ if (err < 0)
+ dev_err(&oxfw->unit->device,
+ "fail to start stream: %d\n", err);
+ }
+end:
+ return err;
+}
+
+void snd_oxfw_stream_stop_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream)
+{
+ if (((stream == &oxfw->tx_stream) && (oxfw->capture_substreams > 0)) ||
+ ((stream == &oxfw->rx_stream) && (oxfw->playback_substreams > 0)))
+ return;
+
+ stop_stream(oxfw, stream);
+}
+
+void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream)
+{
+ struct cmp_connection *conn;
+
+ if (stream == &oxfw->tx_stream)
+ conn = &oxfw->out_conn;
+ else
+ conn = &oxfw->in_conn;
+
+ stop_stream(oxfw, stream);
+
+ amdtp_stream_destroy(stream);
+ cmp_connection_destroy(conn);
+}
+
+void snd_oxfw_stream_update_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream)
+{
+ struct cmp_connection *conn;
+
+ if (stream == &oxfw->tx_stream)
+ conn = &oxfw->out_conn;
+ else
+ conn = &oxfw->in_conn;
+
+ if (cmp_connection_update(conn) < 0)
+ stop_stream(oxfw, stream);
+ else
+ amdtp_stream_update(stream);
+}
+
+int snd_oxfw_stream_get_current_formation(struct snd_oxfw *oxfw,
+ enum avc_general_plug_dir dir,
+ struct snd_oxfw_stream_formation *formation)
+{
+ u8 *format;
+ unsigned int len;
+ int err;
+
+ len = AVC_GENERIC_FRAME_MAXIMUM_BYTES;
+ format = kmalloc(len, GFP_KERNEL);
+ if (format == NULL)
+ return -ENOMEM;
+
+ err = avc_stream_get_format_single(oxfw->unit, dir, 0, format, &len);
+ if (err < 0)
+ goto end;
+ if (len < 3) {
+ err = -EIO;
+ goto end;
+ }
+
+ err = snd_oxfw_stream_parse_format(format, formation);
+end:
+ kfree(format);
+ return err;
+}
+
+/*
+ * See Table 6.16 - AM824 Stream Format
+ * Figure 6.19 - format_information field for AM824 Compound
+ * in AV/C Stream Format Information Specification 1.1 (Apr 2005, 1394TA)
+ * Also 'Clause 12 AM824 sequence adaption layers' in IEC 61883-6:2005
+ */
+int snd_oxfw_stream_parse_format(u8 *format,
+ struct snd_oxfw_stream_formation *formation)
+{
+ unsigned int i, e, channels, type;
+
+ memset(formation, 0, sizeof(struct snd_oxfw_stream_formation));
+
+ /*
+ * this module can support a hierarchy combination that:
+ * Root: Audio and Music (0x90)
+ * Level 1: AM824 Compound (0x40)
+ */
+ if ((format[0] != 0x90) || (format[1] != 0x40))
+ return -ENOSYS;
+
+ /* check the sampling rate */
+ for (i = 0; i < ARRAY_SIZE(avc_stream_rate_table); i++) {
+ if (format[2] == avc_stream_rate_table[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(avc_stream_rate_table))
+ return -ENOSYS;
+
+ formation->rate = oxfw_rate_table[i];
+
+ for (e = 0; e < format[4]; e++) {
+ channels = format[5 + e * 2];
+ type = format[6 + e * 2];
+
+ switch (type) {
+ /* IEC 60958 Conformant, currently handled as MBLA */
+ case 0x00:
+ /* Multi Bit Linear Audio (Raw) */
+ case 0x06:
+ formation->pcm += channels;
+ break;
+ /* MIDI Conformant */
+ case 0x0d:
+ formation->midi = channels;
+ break;
+ /* IEC 61937-3 to 7 */
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ case 0x05:
+ /* Multi Bit Linear Audio */
+ case 0x07: /* DVD-Audio */
+ case 0x0c: /* High Precision */
+ /* One Bit Audio */
+ case 0x08: /* (Plain) Raw */
+ case 0x09: /* (Plain) SACD */
+ case 0x0a: /* (Encoded) Raw */
+ case 0x0b: /* (Encoded) SACD */
+ /* SMPTE Time-Code conformant */
+ case 0x0e:
+ /* Sample Count */
+ case 0x0f:
+ /* Anciliary Data */
+ case 0x10:
+ /* Synchronization Stream (Stereo Raw audio) */
+ case 0x40:
+ /* Don't care */
+ case 0xff:
+ default:
+ return -ENOSYS; /* not supported */
+ }
+ }
+
+ if (formation->pcm > AMDTP_MAX_CHANNELS_FOR_PCM ||
+ formation->midi > AMDTP_MAX_CHANNELS_FOR_MIDI)
+ return -ENOSYS;
+
+ return 0;
+}
+
+static int
+assume_stream_formats(struct snd_oxfw *oxfw, enum avc_general_plug_dir dir,
+ unsigned int pid, u8 *buf, unsigned int *len,
+ u8 **formats)
+{
+ struct snd_oxfw_stream_formation formation;
+ unsigned int i, eid;
+ int err;
+
+ /* get format at current sampling rate */
+ err = avc_stream_get_format_single(oxfw->unit, dir, pid, buf, len);
+ if (err < 0) {
+ dev_err(&oxfw->unit->device,
+ "fail to get current stream format for isoc %s plug %d:%d\n",
+ (dir == AVC_GENERAL_PLUG_DIR_IN) ? "in" : "out",
+ pid, err);
+ goto end;
+ }
+
+ /* parse and set stream format */
+ eid = 0;
+ err = snd_oxfw_stream_parse_format(buf, &formation);
+ if (err < 0)
+ goto end;
+
+ formats[eid] = kmalloc(*len, GFP_KERNEL);
+ if (formats[eid] == NULL) {
+ err = -ENOMEM;
+ goto end;
+ }
+ memcpy(formats[eid], buf, *len);
+
+ /* apply the format for each available sampling rate */
+ for (i = 0; i < ARRAY_SIZE(oxfw_rate_table); i++) {
+ if (formation.rate == oxfw_rate_table[i])
+ continue;
+
+ err = avc_general_inquiry_sig_fmt(oxfw->unit,
+ oxfw_rate_table[i],
+ dir, pid);
+ if (err < 0)
+ continue;
+
+ eid++;
+ formats[eid] = kmalloc(*len, GFP_KERNEL);
+ if (formats[eid] == NULL) {
+ err = -ENOMEM;
+ goto end;
+ }
+ memcpy(formats[eid], buf, *len);
+ formats[eid][2] = avc_stream_rate_table[i];
+ }
+
+ err = 0;
+ oxfw->assumed = true;
+end:
+ return err;
+}
+
+static int fill_stream_formats(struct snd_oxfw *oxfw,
+ enum avc_general_plug_dir dir,
+ unsigned short pid)
+{
+ u8 *buf, **formats;
+ unsigned int len, eid = 0;
+ struct snd_oxfw_stream_formation dummy;
+ int err;
+
+ buf = kmalloc(AVC_GENERIC_FRAME_MAXIMUM_BYTES, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (dir == AVC_GENERAL_PLUG_DIR_OUT)
+ formats = oxfw->tx_stream_formats;
+ else
+ formats = oxfw->rx_stream_formats;
+
+ /* get first entry */
+ len = AVC_GENERIC_FRAME_MAXIMUM_BYTES;
+ err = avc_stream_get_format_list(oxfw->unit, dir, 0, buf, &len, 0);
+ if (err == -ENOSYS) {
+ /* LIST subfunction is not implemented */
+ len = AVC_GENERIC_FRAME_MAXIMUM_BYTES;
+ err = assume_stream_formats(oxfw, dir, pid, buf, &len,
+ formats);
+ goto end;
+ } else if (err < 0) {
+ dev_err(&oxfw->unit->device,
+ "fail to get stream format %d for isoc %s plug %d:%d\n",
+ eid, (dir == AVC_GENERAL_PLUG_DIR_IN) ? "in" : "out",
+ pid, err);
+ goto end;
+ }
+
+ /* LIST subfunction is implemented */
+ while (eid < SND_OXFW_STREAM_FORMAT_ENTRIES) {
+ /* The format is too short. */
+ if (len < 3) {
+ err = -EIO;
+ break;
+ }
+
+ /* parse and set stream format */
+ err = snd_oxfw_stream_parse_format(buf, &dummy);
+ if (err < 0)
+ break;
+
+ formats[eid] = kmalloc(len, GFP_KERNEL);
+ if (formats[eid] == NULL) {
+ err = -ENOMEM;
+ break;
+ }
+ memcpy(formats[eid], buf, len);
+
+ /* get next entry */
+ len = AVC_GENERIC_FRAME_MAXIMUM_BYTES;
+ err = avc_stream_get_format_list(oxfw->unit, dir, 0,
+ buf, &len, ++eid);
+ /* No entries remained. */
+ if (err == -EINVAL) {
+ err = 0;
+ break;
+ } else if (err < 0) {
+ dev_err(&oxfw->unit->device,
+ "fail to get stream format %d for isoc %s plug %d:%d\n",
+ eid, (dir == AVC_GENERAL_PLUG_DIR_IN) ? "in" :
+ "out",
+ pid, err);
+ break;
+ }
+ }
+end:
+ kfree(buf);
+ return err;
+}
+
+int snd_oxfw_stream_discover(struct snd_oxfw *oxfw)
+{
+ u8 plugs[AVC_PLUG_INFO_BUF_BYTES];
+ int err;
+
+ /* the number of plugs for isoc in/out, ext in/out */
+ err = avc_general_get_plug_info(oxfw->unit, 0x1f, 0x07, 0x00, plugs);
+ if (err < 0) {
+ dev_err(&oxfw->unit->device,
+ "fail to get info for isoc/external in/out plugs: %d\n",
+ err);
+ goto end;
+ } else if ((plugs[0] == 0) && (plugs[1] == 0)) {
+ err = -ENOSYS;
+ goto end;
+ }
+
+ /* use oPCR[0] if exists */
+ if (plugs[1] > 0) {
+ err = fill_stream_formats(oxfw, AVC_GENERAL_PLUG_DIR_OUT, 0);
+ if (err < 0)
+ goto end;
+ oxfw->has_output = true;
+ }
+
+ /* use iPCR[0] if exists */
+ if (plugs[0] > 0)
+ err = fill_stream_formats(oxfw, AVC_GENERAL_PLUG_DIR_IN, 0);
+end:
+ return err;
+}
+
+void snd_oxfw_stream_lock_changed(struct snd_oxfw *oxfw)
+{
+ oxfw->dev_lock_changed = true;
+ wake_up(&oxfw->hwdep_wait);
+}
+
+int snd_oxfw_stream_lock_try(struct snd_oxfw *oxfw)
+{
+ int err;
+
+ spin_lock_irq(&oxfw->lock);
+
+ /* user land lock this */
+ if (oxfw->dev_lock_count < 0) {
+ err = -EBUSY;
+ goto end;
+ }
+
+ /* this is the first time */
+ if (oxfw->dev_lock_count++ == 0)
+ snd_oxfw_stream_lock_changed(oxfw);
+ err = 0;
+end:
+ spin_unlock_irq(&oxfw->lock);
+ return err;
+}
+
+void snd_oxfw_stream_lock_release(struct snd_oxfw *oxfw)
+{
+ spin_lock_irq(&oxfw->lock);
+
+ if (WARN_ON(oxfw->dev_lock_count <= 0))
+ goto end;
+ if (--oxfw->dev_lock_count == 0)
+ snd_oxfw_stream_lock_changed(oxfw);
+end:
+ spin_unlock_irq(&oxfw->lock);
+}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
new file mode 100644
index 000000000000..cf1d0b55e827
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw.c
@@ -0,0 +1,317 @@
+/*
+ * oxfw.c - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include "oxfw.h"
+
+#define OXFORD_FIRMWARE_ID_ADDRESS (CSR_REGISTER_BASE + 0x50000)
+/* 0x970?vvvv or 0x971?vvvv, where vvvv = firmware version */
+
+#define OXFORD_HARDWARE_ID_ADDRESS (CSR_REGISTER_BASE + 0x90020)
+#define OXFORD_HARDWARE_ID_OXFW970 0x39443841
+#define OXFORD_HARDWARE_ID_OXFW971 0x39373100
+
+#define VENDOR_LOUD 0x000ff2
+#define VENDOR_GRIFFIN 0x001292
+#define VENDOR_BEHRINGER 0x001564
+#define VENDOR_LACIE 0x00d04b
+
+#define SPECIFIER_1394TA 0x00a02d
+#define VERSION_AVC 0x010001
+
+MODULE_DESCRIPTION("Oxford Semiconductor FW970/971 driver");
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("snd-firewire-speakers");
+
+static bool detect_loud_models(struct fw_unit *unit)
+{
+ const char *const models[] = {
+ "Onyxi",
+ "Onyx-i",
+ "d.Pro",
+ "Mackie Onyx Satellite",
+ "Tapco LINK.firewire 4x6",
+ "U.420"};
+ char model[32];
+ unsigned int i;
+ int err;
+
+ err = fw_csr_string(unit->directory, CSR_MODEL,
+ model, sizeof(model));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(models); i++) {
+ if (strcmp(models[i], model) == 0)
+ break;
+ }
+
+ return (i < ARRAY_SIZE(models));
+}
+
+static int name_card(struct snd_oxfw *oxfw)
+{
+ struct fw_device *fw_dev = fw_parent_device(oxfw->unit);
+ char vendor[24];
+ char model[32];
+ const char *d, *v, *m;
+ u32 firmware;
+ int err;
+
+ /* get vendor name from root directory */
+ err = fw_csr_string(fw_dev->config_rom + 5, CSR_VENDOR,
+ vendor, sizeof(vendor));
+ if (err < 0)
+ goto end;
+
+ /* get model name from unit directory */
+ err = fw_csr_string(oxfw->unit->directory, CSR_MODEL,
+ model, sizeof(model));
+ if (err < 0)
+ goto end;
+
+ err = snd_fw_transaction(oxfw->unit, TCODE_READ_QUADLET_REQUEST,
+ OXFORD_FIRMWARE_ID_ADDRESS, &firmware, 4, 0);
+ if (err < 0)
+ goto end;
+ be32_to_cpus(&firmware);
+
+ /* to apply card definitions */
+ if (oxfw->device_info) {
+ d = oxfw->device_info->driver_name;
+ v = oxfw->device_info->vendor_name;
+ m = oxfw->device_info->model_name;
+ } else {
+ d = "OXFW";
+ v = vendor;
+ m = model;
+ }
+
+ strcpy(oxfw->card->driver, d);
+ strcpy(oxfw->card->mixername, m);
+ strcpy(oxfw->card->shortname, m);
+
+ snprintf(oxfw->card->longname, sizeof(oxfw->card->longname),
+ "%s %s (OXFW%x %04x), GUID %08x%08x at %s, S%d",
+ v, m, firmware >> 20, firmware & 0xffff,
+ fw_dev->config_rom[3], fw_dev->config_rom[4],
+ dev_name(&oxfw->unit->device), 100 << fw_dev->max_speed);
+end:
+ return err;
+}
+
+static void oxfw_card_free(struct snd_card *card)
+{
+ struct snd_oxfw *oxfw = card->private_data;
+ unsigned int i;
+
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ kfree(oxfw->tx_stream_formats[i]);
+ kfree(oxfw->rx_stream_formats[i]);
+ }
+
+ mutex_destroy(&oxfw->mutex);
+}
+
+static int oxfw_probe(struct fw_unit *unit,
+ const struct ieee1394_device_id *id)
+{
+ struct snd_card *card;
+ struct snd_oxfw *oxfw;
+ int err;
+
+ if ((id->vendor_id == VENDOR_LOUD) && !detect_loud_models(unit))
+ return -ENODEV;
+
+ err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE,
+ sizeof(*oxfw), &card);
+ if (err < 0)
+ return err;
+
+ card->private_free = oxfw_card_free;
+ oxfw = card->private_data;
+ oxfw->card = card;
+ mutex_init(&oxfw->mutex);
+ oxfw->unit = unit;
+ oxfw->device_info = (const struct device_info *)id->driver_data;
+ spin_lock_init(&oxfw->lock);
+ init_waitqueue_head(&oxfw->hwdep_wait);
+
+ err = snd_oxfw_stream_discover(oxfw);
+ if (err < 0)
+ goto error;
+
+ err = name_card(oxfw);
+ if (err < 0)
+ goto error;
+
+ err = snd_oxfw_create_pcm(oxfw);
+ if (err < 0)
+ goto error;
+
+ if (oxfw->device_info) {
+ err = snd_oxfw_create_mixer(oxfw);
+ if (err < 0)
+ goto error;
+ }
+
+ snd_oxfw_proc_init(oxfw);
+
+ err = snd_oxfw_create_midi(oxfw);
+ if (err < 0)
+ goto error;
+
+ err = snd_oxfw_create_hwdep(oxfw);
+ if (err < 0)
+ goto error;
+
+ err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->rx_stream);
+ if (err < 0)
+ goto error;
+ if (oxfw->has_output) {
+ err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->tx_stream);
+ if (err < 0)
+ goto error;
+ }
+
+ err = snd_card_register(card);
+ if (err < 0) {
+ snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
+ if (oxfw->has_output)
+ snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+ goto error;
+ }
+ dev_set_drvdata(&unit->device, oxfw);
+
+ return 0;
+error:
+ snd_card_free(card);
+ return err;
+}
+
+static void oxfw_bus_reset(struct fw_unit *unit)
+{
+ struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
+
+ fcp_bus_reset(oxfw->unit);
+
+ mutex_lock(&oxfw->mutex);
+
+ snd_oxfw_stream_update_simplex(oxfw, &oxfw->rx_stream);
+ if (oxfw->has_output)
+ snd_oxfw_stream_update_simplex(oxfw, &oxfw->tx_stream);
+
+ mutex_unlock(&oxfw->mutex);
+}
+
+static void oxfw_remove(struct fw_unit *unit)
+{
+ struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
+
+ snd_card_disconnect(oxfw->card);
+
+ snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
+ if (oxfw->has_output)
+ snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+
+ snd_card_free_when_closed(oxfw->card);
+}
+
+static const struct device_info griffin_firewave = {
+ .driver_name = "FireWave",
+ .vendor_name = "Griffin",
+ .model_name = "FireWave",
+ .mixer_channels = 6,
+ .mute_fb_id = 0x01,
+ .volume_fb_id = 0x02,
+};
+
+static const struct device_info lacie_speakers = {
+ .driver_name = "FWSpeakers",
+ .vendor_name = "LaCie",
+ .model_name = "FireWire Speakers",
+ .mixer_channels = 1,
+ .mute_fb_id = 0x01,
+ .volume_fb_id = 0x01,
+};
+
+static const struct ieee1394_device_id oxfw_id_table[] = {
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = VENDOR_GRIFFIN,
+ .model_id = 0x00f970,
+ .specifier_id = SPECIFIER_1394TA,
+ .version = VERSION_AVC,
+ .driver_data = (kernel_ulong_t)&griffin_firewave,
+ },
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = VENDOR_LACIE,
+ .model_id = 0x00f970,
+ .specifier_id = SPECIFIER_1394TA,
+ .version = VERSION_AVC,
+ .driver_data = (kernel_ulong_t)&lacie_speakers,
+ },
+ /* Behringer,F-Control Audio 202 */
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = VENDOR_BEHRINGER,
+ .model_id = 0x00fc22,
+ },
+ /*
+ * Any Mackie(Loud) models (name string/model id):
+ * Onyx-i series (former models): 0x081216
+ * Mackie Onyx Satellite: 0x00200f
+ * Tapco LINK.firewire 4x6: 0x000460
+ * d.2 pro: Unknown
+ * d.4 pro: Unknown
+ * U.420: Unknown
+ * U.420d: Unknown
+ */
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = VENDOR_LOUD,
+ .specifier_id = SPECIFIER_1394TA,
+ .version = VERSION_AVC,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table);
+
+static struct fw_driver oxfw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .bus = &fw_bus_type,
+ },
+ .probe = oxfw_probe,
+ .update = oxfw_bus_reset,
+ .remove = oxfw_remove,
+ .id_table = oxfw_id_table,
+};
+
+static int __init snd_oxfw_init(void)
+{
+ return driver_register(&oxfw_driver.driver);
+}
+
+static void __exit snd_oxfw_exit(void)
+{
+ driver_unregister(&oxfw_driver.driver);
+}
+
+module_init(snd_oxfw_init);
+module_exit(snd_oxfw_exit);
diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h
new file mode 100644
index 000000000000..cace5ad4fe76
--- /dev/null
+++ b/sound/firewire/oxfw/oxfw.h
@@ -0,0 +1,146 @@
+/*
+ * oxfw.h - a part of driver for OXFW970/971 based devices
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/info.h>
+#include <sound/rawmidi.h>
+#include <sound/firewire.h>
+#include <sound/hwdep.h>
+
+#include "../lib.h"
+#include "../fcp.h"
+#include "../packets-buffer.h"
+#include "../iso-resources.h"
+#include "../amdtp.h"
+#include "../cmp.h"
+
+struct device_info {
+ const char *driver_name;
+ const char *vendor_name;
+ const char *model_name;
+ unsigned int mixer_channels;
+ u8 mute_fb_id;
+ u8 volume_fb_id;
+};
+
+/* This is an arbitrary number for convinience. */
+#define SND_OXFW_STREAM_FORMAT_ENTRIES 10
+struct snd_oxfw {
+ struct snd_card *card;
+ struct fw_unit *unit;
+ const struct device_info *device_info;
+ struct mutex mutex;
+ spinlock_t lock;
+
+ bool has_output;
+ u8 *tx_stream_formats[SND_OXFW_STREAM_FORMAT_ENTRIES];
+ u8 *rx_stream_formats[SND_OXFW_STREAM_FORMAT_ENTRIES];
+ bool assumed;
+ struct cmp_connection out_conn;
+ struct cmp_connection in_conn;
+ struct amdtp_stream tx_stream;
+ struct amdtp_stream rx_stream;
+ unsigned int capture_substreams;
+ unsigned int playback_substreams;
+
+ unsigned int midi_input_ports;
+ unsigned int midi_output_ports;
+
+ bool mute;
+ s16 volume[6];
+ s16 volume_min;
+ s16 volume_max;
+
+ int dev_lock_count;
+ bool dev_lock_changed;
+ wait_queue_head_t hwdep_wait;
+};
+
+/*
+ * AV/C Stream Format Information Specification 1.1 Working Draft
+ * (Apr 2005, 1394TA)
+ */
+int avc_stream_set_format(struct fw_unit *unit, enum avc_general_plug_dir dir,
+ unsigned int pid, u8 *format, unsigned int len);
+int avc_stream_get_format(struct fw_unit *unit,
+ enum avc_general_plug_dir dir, unsigned int pid,
+ u8 *buf, unsigned int *len, unsigned int eid);
+static inline int
+avc_stream_get_format_single(struct fw_unit *unit,
+ enum avc_general_plug_dir dir, unsigned int pid,
+ u8 *buf, unsigned int *len)
+{
+ return avc_stream_get_format(unit, dir, pid, buf, len, 0xff);
+}
+static inline int
+avc_stream_get_format_list(struct fw_unit *unit,
+ enum avc_general_plug_dir dir, unsigned int pid,
+ u8 *buf, unsigned int *len,
+ unsigned int eid)
+{
+ return avc_stream_get_format(unit, dir, pid, buf, len, eid);
+}
+
+/*
+ * AV/C Digital Interface Command Set General Specification 4.2
+ * (Sep 2004, 1394TA)
+ */
+int avc_general_inquiry_sig_fmt(struct fw_unit *unit, unsigned int rate,
+ enum avc_general_plug_dir dir,
+ unsigned short pid);
+
+int snd_oxfw_stream_init_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream);
+int snd_oxfw_stream_start_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream,
+ unsigned int rate, unsigned int pcm_channels);
+void snd_oxfw_stream_stop_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream);
+void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream);
+void snd_oxfw_stream_update_simplex(struct snd_oxfw *oxfw,
+ struct amdtp_stream *stream);
+
+struct snd_oxfw_stream_formation {
+ unsigned int rate;
+ unsigned int pcm;
+ unsigned int midi;
+};
+int snd_oxfw_stream_parse_format(u8 *format,
+ struct snd_oxfw_stream_formation *formation);
+int snd_oxfw_stream_get_current_formation(struct snd_oxfw *oxfw,
+ enum avc_general_plug_dir dir,
+ struct snd_oxfw_stream_formation *formation);
+
+int snd_oxfw_stream_discover(struct snd_oxfw *oxfw);
+
+void snd_oxfw_stream_lock_changed(struct snd_oxfw *oxfw);
+int snd_oxfw_stream_lock_try(struct snd_oxfw *oxfw);
+void snd_oxfw_stream_lock_release(struct snd_oxfw *oxfw);
+
+int snd_oxfw_create_pcm(struct snd_oxfw *oxfw);
+
+int snd_oxfw_create_mixer(struct snd_oxfw *oxfw);
+
+void snd_oxfw_proc_init(struct snd_oxfw *oxfw);
+
+int snd_oxfw_create_midi(struct snd_oxfw *oxfw);
+
+int snd_oxfw_create_hwdep(struct snd_oxfw *oxfw);
diff --git a/sound/firewire/speakers.c b/sound/firewire/speakers.c
deleted file mode 100644
index 768d40ddfebb..000000000000
--- a/sound/firewire/speakers.c
+++ /dev/null
@@ -1,792 +0,0 @@
-/*
- * OXFW970-based speakers driver
- *
- * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
- * Licensed under the terms of the GNU General Public License, version 2.
- */
-
-#include <linux/device.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <sound/control.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include "cmp.h"
-#include "fcp.h"
-#include "amdtp.h"
-#include "lib.h"
-
-#define OXFORD_FIRMWARE_ID_ADDRESS (CSR_REGISTER_BASE + 0x50000)
-/* 0x970?vvvv or 0x971?vvvv, where vvvv = firmware version */
-
-#define OXFORD_HARDWARE_ID_ADDRESS (CSR_REGISTER_BASE + 0x90020)
-#define OXFORD_HARDWARE_ID_OXFW970 0x39443841
-#define OXFORD_HARDWARE_ID_OXFW971 0x39373100
-
-#define VENDOR_GRIFFIN 0x001292
-#define VENDOR_LACIE 0x00d04b
-
-#define SPECIFIER_1394TA 0x00a02d
-#define VERSION_AVC 0x010001
-
-struct device_info {
- const char *driver_name;
- const char *short_name;
- const char *long_name;
- int (*pcm_constraints)(struct snd_pcm_runtime *runtime);
- unsigned int mixer_channels;
- u8 mute_fb_id;
- u8 volume_fb_id;
-};
-
-struct fwspk {
- struct snd_card *card;
- struct fw_unit *unit;
- const struct device_info *device_info;
- struct mutex mutex;
- struct cmp_connection connection;
- struct amdtp_stream stream;
- bool mute;
- s16 volume[6];
- s16 volume_min;
- s16 volume_max;
-};
-
-MODULE_DESCRIPTION("FireWire speakers driver");
-MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
-MODULE_LICENSE("GPL v2");
-
-static int firewave_rate_constraint(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- static unsigned int stereo_rates[] = { 48000, 96000 };
- struct snd_interval *channels =
- hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
- struct snd_interval *rate =
- hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
-
- /* two channels work only at 48/96 kHz */
- if (snd_interval_max(channels) < 6)
- return snd_interval_list(rate, 2, stereo_rates, 0);
- return 0;
-}
-
-static int firewave_channels_constraint(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- static const struct snd_interval all_channels = { .min = 6, .max = 6 };
- struct snd_interval *rate =
- hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval *channels =
- hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
-
- /* 32/44.1 kHz work only with all six channels */
- if (snd_interval_max(rate) < 48000)
- return snd_interval_refine(channels, &all_channels);
- return 0;
-}
-
-static int firewave_constraints(struct snd_pcm_runtime *runtime)
-{
- static unsigned int channels_list[] = { 2, 6 };
- static struct snd_pcm_hw_constraint_list channels_list_constraint = {
- .count = 2,
- .list = channels_list,
- };
- int err;
-
- runtime->hw.rates = SNDRV_PCM_RATE_32000 |
- SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000 |
- SNDRV_PCM_RATE_96000;
- runtime->hw.channels_max = 6;
-
- err = snd_pcm_hw_constraint_list(runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- &channels_list_constraint);
- if (err < 0)
- return err;
- err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- firewave_rate_constraint, NULL,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
- if (err < 0)
- return err;
- err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- firewave_channels_constraint, NULL,
- SNDRV_PCM_HW_PARAM_RATE, -1);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static int lacie_speakers_constraints(struct snd_pcm_runtime *runtime)
-{
- runtime->hw.rates = SNDRV_PCM_RATE_32000 |
- SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000 |
- SNDRV_PCM_RATE_88200 |
- SNDRV_PCM_RATE_96000;
-
- return 0;
-}
-
-static int fwspk_open(struct snd_pcm_substream *substream)
-{
- static const struct snd_pcm_hardware hardware = {
- .info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER,
- .formats = AMDTP_OUT_PCM_FORMAT_BITS,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 4 * 1024 * 1024,
- .period_bytes_min = 1,
- .period_bytes_max = UINT_MAX,
- .periods_min = 1,
- .periods_max = UINT_MAX,
- };
- struct fwspk *fwspk = substream->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
- int err;
-
- runtime->hw = hardware;
-
- err = fwspk->device_info->pcm_constraints(runtime);
- if (err < 0)
- return err;
- err = snd_pcm_limit_hw_rates(runtime);
- if (err < 0)
- return err;
-
- err = amdtp_stream_add_pcm_hw_constraints(&fwspk->stream, runtime);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static int fwspk_close(struct snd_pcm_substream *substream)
-{
- return 0;
-}
-
-static void fwspk_stop_stream(struct fwspk *fwspk)
-{
- if (amdtp_stream_running(&fwspk->stream)) {
- amdtp_stream_stop(&fwspk->stream);
- cmp_connection_break(&fwspk->connection);
- }
-}
-
-static int fwspk_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct fwspk *fwspk = substream->private_data;
- int err;
-
- mutex_lock(&fwspk->mutex);
- fwspk_stop_stream(fwspk);
- mutex_unlock(&fwspk->mutex);
-
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
- if (err < 0)
- goto error;
-
- amdtp_stream_set_parameters(&fwspk->stream,
- params_rate(hw_params),
- params_channels(hw_params),
- 0);
-
- amdtp_stream_set_pcm_format(&fwspk->stream,
- params_format(hw_params));
-
- err = avc_general_set_sig_fmt(fwspk->unit, params_rate(hw_params),
- AVC_GENERAL_PLUG_DIR_IN, 0);
- if (err < 0) {
- dev_err(&fwspk->unit->device, "failed to set sample rate\n");
- goto err_buffer;
- }
-
- return 0;
-
-err_buffer:
- snd_pcm_lib_free_vmalloc_buffer(substream);
-error:
- return err;
-}
-
-static int fwspk_hw_free(struct snd_pcm_substream *substream)
-{
- struct fwspk *fwspk = substream->private_data;
-
- mutex_lock(&fwspk->mutex);
- fwspk_stop_stream(fwspk);
- mutex_unlock(&fwspk->mutex);
-
- return snd_pcm_lib_free_vmalloc_buffer(substream);
-}
-
-static int fwspk_prepare(struct snd_pcm_substream *substream)
-{
- struct fwspk *fwspk = substream->private_data;
- int err;
-
- mutex_lock(&fwspk->mutex);
-
- if (amdtp_streaming_error(&fwspk->stream))
- fwspk_stop_stream(fwspk);
-
- if (!amdtp_stream_running(&fwspk->stream)) {
- err = cmp_connection_establish(&fwspk->connection,
- amdtp_stream_get_max_payload(&fwspk->stream));
- if (err < 0)
- goto err_mutex;
-
- err = amdtp_stream_start(&fwspk->stream,
- fwspk->connection.resources.channel,
- fwspk->connection.speed);
- if (err < 0)
- goto err_connection;
- }
-
- mutex_unlock(&fwspk->mutex);
-
- amdtp_stream_pcm_prepare(&fwspk->stream);
-
- return 0;
-
-err_connection:
- cmp_connection_break(&fwspk->connection);
-err_mutex:
- mutex_unlock(&fwspk->mutex);
-
- return err;
-}
-
-static int fwspk_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct fwspk *fwspk = substream->private_data;
- struct snd_pcm_substream *pcm;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- pcm = substream;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- pcm = NULL;
- break;
- default:
- return -EINVAL;
- }
- amdtp_stream_pcm_trigger(&fwspk->stream, pcm);
- return 0;
-}
-
-static snd_pcm_uframes_t fwspk_pointer(struct snd_pcm_substream *substream)
-{
- struct fwspk *fwspk = substream->private_data;
-
- return amdtp_stream_pcm_pointer(&fwspk->stream);
-}
-
-static int fwspk_create_pcm(struct fwspk *fwspk)
-{
- static struct snd_pcm_ops ops = {
- .open = fwspk_open,
- .close = fwspk_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = fwspk_hw_params,
- .hw_free = fwspk_hw_free,
- .prepare = fwspk_prepare,
- .trigger = fwspk_trigger,
- .pointer = fwspk_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
- };
- struct snd_pcm *pcm;
- int err;
-
- err = snd_pcm_new(fwspk->card, "OXFW970", 0, 1, 0, &pcm);
- if (err < 0)
- return err;
- pcm->private_data = fwspk;
- strcpy(pcm->name, fwspk->device_info->short_name);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &ops);
- return 0;
-}
-
-enum control_action { CTL_READ, CTL_WRITE };
-enum control_attribute {
- CTL_MIN = 0x02,
- CTL_MAX = 0x03,
- CTL_CURRENT = 0x10,
-};
-
-static int fwspk_mute_command(struct fwspk *fwspk, bool *value,
- enum control_action action)
-{
- u8 *buf;
- u8 response_ok;
- int err;
-
- buf = kmalloc(11, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (action == CTL_READ) {
- buf[0] = 0x01; /* AV/C, STATUS */
- response_ok = 0x0c; /* STABLE */
- } else {
- buf[0] = 0x00; /* AV/C, CONTROL */
- response_ok = 0x09; /* ACCEPTED */
- }
- buf[1] = 0x08; /* audio unit 0 */
- buf[2] = 0xb8; /* FUNCTION BLOCK */
- buf[3] = 0x81; /* function block type: feature */
- buf[4] = fwspk->device_info->mute_fb_id; /* function block ID */
- buf[5] = 0x10; /* control attribute: current */
- buf[6] = 0x02; /* selector length */
- buf[7] = 0x00; /* audio channel number */
- buf[8] = 0x01; /* control selector: mute */
- buf[9] = 0x01; /* control data length */
- if (action == CTL_READ)
- buf[10] = 0xff;
- else
- buf[10] = *value ? 0x70 : 0x60;
-
- err = fcp_avc_transaction(fwspk->unit, buf, 11, buf, 11, 0x3fe);
- if (err < 0)
- goto error;
- if (err < 11) {
- dev_err(&fwspk->unit->device, "short FCP response\n");
- err = -EIO;
- goto error;
- }
- if (buf[0] != response_ok) {
- dev_err(&fwspk->unit->device, "mute command failed\n");
- err = -EIO;
- goto error;
- }
- if (action == CTL_READ)
- *value = buf[10] == 0x70;
-
- err = 0;
-
-error:
- kfree(buf);
-
- return err;
-}
-
-static int fwspk_volume_command(struct fwspk *fwspk, s16 *value,
- unsigned int channel,
- enum control_attribute attribute,
- enum control_action action)
-{
- u8 *buf;
- u8 response_ok;
- int err;
-
- buf = kmalloc(12, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (action == CTL_READ) {
- buf[0] = 0x01; /* AV/C, STATUS */
- response_ok = 0x0c; /* STABLE */
- } else {
- buf[0] = 0x00; /* AV/C, CONTROL */
- response_ok = 0x09; /* ACCEPTED */
- }
- buf[1] = 0x08; /* audio unit 0 */
- buf[2] = 0xb8; /* FUNCTION BLOCK */
- buf[3] = 0x81; /* function block type: feature */
- buf[4] = fwspk->device_info->volume_fb_id; /* function block ID */
- buf[5] = attribute; /* control attribute */
- buf[6] = 0x02; /* selector length */
- buf[7] = channel; /* audio channel number */
- buf[8] = 0x02; /* control selector: volume */
- buf[9] = 0x02; /* control data length */
- if (action == CTL_READ) {
- buf[10] = 0xff;
- buf[11] = 0xff;
- } else {
- buf[10] = *value >> 8;
- buf[11] = *value;
- }
-
- err = fcp_avc_transaction(fwspk->unit, buf, 12, buf, 12, 0x3fe);
- if (err < 0)
- goto error;
- if (err < 12) {
- dev_err(&fwspk->unit->device, "short FCP response\n");
- err = -EIO;
- goto error;
- }
- if (buf[0] != response_ok) {
- dev_err(&fwspk->unit->device, "volume command failed\n");
- err = -EIO;
- goto error;
- }
- if (action == CTL_READ)
- *value = (buf[10] << 8) | buf[11];
-
- err = 0;
-
-error:
- kfree(buf);
-
- return err;
-}
-
-static int fwspk_mute_get(struct snd_kcontrol *control,
- struct snd_ctl_elem_value *value)
-{
- struct fwspk *fwspk = control->private_data;
-
- value->value.integer.value[0] = !fwspk->mute;
-
- return 0;
-}
-
-static int fwspk_mute_put(struct snd_kcontrol *control,
- struct snd_ctl_elem_value *value)
-{
- struct fwspk *fwspk = control->private_data;
- bool mute;
- int err;
-
- mute = !value->value.integer.value[0];
-
- if (mute == fwspk->mute)
- return 0;
-
- err = fwspk_mute_command(fwspk, &mute, CTL_WRITE);
- if (err < 0)
- return err;
- fwspk->mute = mute;
-
- return 1;
-}
-
-static int fwspk_volume_info(struct snd_kcontrol *control,
- struct snd_ctl_elem_info *info)
-{
- struct fwspk *fwspk = control->private_data;
-
- info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- info->count = fwspk->device_info->mixer_channels;
- info->value.integer.min = fwspk->volume_min;
- info->value.integer.max = fwspk->volume_max;
-
- return 0;
-}
-
-static const u8 channel_map[6] = { 0, 1, 4, 5, 2, 3 };
-
-static int fwspk_volume_get(struct snd_kcontrol *control,
- struct snd_ctl_elem_value *value)
-{
- struct fwspk *fwspk = control->private_data;
- unsigned int i;
-
- for (i = 0; i < fwspk->device_info->mixer_channels; ++i)
- value->value.integer.value[channel_map[i]] = fwspk->volume[i];
-
- return 0;
-}
-
-static int fwspk_volume_put(struct snd_kcontrol *control,
- struct snd_ctl_elem_value *value)
-{
- struct fwspk *fwspk = control->private_data;
- unsigned int i, changed_channels;
- bool equal_values = true;
- s16 volume;
- int err;
-
- for (i = 0; i < fwspk->device_info->mixer_channels; ++i) {
- if (value->value.integer.value[i] < fwspk->volume_min ||
- value->value.integer.value[i] > fwspk->volume_max)
- return -EINVAL;
- if (value->value.integer.value[i] !=
- value->value.integer.value[0])
- equal_values = false;
- }
-
- changed_channels = 0;
- for (i = 0; i < fwspk->device_info->mixer_channels; ++i)
- if (value->value.integer.value[channel_map[i]] !=
- fwspk->volume[i])
- changed_channels |= 1 << (i + 1);
-
- if (equal_values && changed_channels != 0)
- changed_channels = 1 << 0;
-
- for (i = 0; i <= fwspk->device_info->mixer_channels; ++i) {
- volume = value->value.integer.value[channel_map[i ? i - 1 : 0]];
- if (changed_channels & (1 << i)) {
- err = fwspk_volume_command(fwspk, &volume, i,
- CTL_CURRENT, CTL_WRITE);
- if (err < 0)
- return err;
- }
- if (i > 0)
- fwspk->volume[i - 1] = volume;
- }
-
- return changed_channels != 0;
-}
-
-static int fwspk_create_mixer(struct fwspk *fwspk)
-{
- static const struct snd_kcontrol_new controls[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Switch",
- .info = snd_ctl_boolean_mono_info,
- .get = fwspk_mute_get,
- .put = fwspk_mute_put,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Volume",
- .info = fwspk_volume_info,
- .get = fwspk_volume_get,
- .put = fwspk_volume_put,
- },
- };
- unsigned int i, first_ch;
- int err;
-
- err = fwspk_volume_command(fwspk, &fwspk->volume_min,
- 0, CTL_MIN, CTL_READ);
- if (err < 0)
- return err;
- err = fwspk_volume_command(fwspk, &fwspk->volume_max,
- 0, CTL_MAX, CTL_READ);
- if (err < 0)
- return err;
-
- err = fwspk_mute_command(fwspk, &fwspk->mute, CTL_READ);
- if (err < 0)
- return err;
-
- first_ch = fwspk->device_info->mixer_channels == 1 ? 0 : 1;
- for (i = 0; i < fwspk->device_info->mixer_channels; ++i) {
- err = fwspk_volume_command(fwspk, &fwspk->volume[i],
- first_ch + i, CTL_CURRENT, CTL_READ);
- if (err < 0)
- return err;
- }
-
- for (i = 0; i < ARRAY_SIZE(controls); ++i) {
- err = snd_ctl_add(fwspk->card,
- snd_ctl_new1(&controls[i], fwspk));
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
-static u32 fwspk_read_firmware_version(struct fw_unit *unit)
-{
- __be32 data;
- int err;
-
- err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
- OXFORD_FIRMWARE_ID_ADDRESS, &data, 4, 0);
- return err >= 0 ? be32_to_cpu(data) : 0;
-}
-
-static void fwspk_card_free(struct snd_card *card)
-{
- struct fwspk *fwspk = card->private_data;
-
- amdtp_stream_destroy(&fwspk->stream);
- cmp_connection_destroy(&fwspk->connection);
- fw_unit_put(fwspk->unit);
- mutex_destroy(&fwspk->mutex);
-}
-
-static int fwspk_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *id)
-{
- struct fw_device *fw_dev = fw_parent_device(unit);
- struct snd_card *card;
- struct fwspk *fwspk;
- u32 firmware;
- int err;
-
- err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE,
- sizeof(*fwspk), &card);
- if (err < 0)
- return err;
-
- fwspk = card->private_data;
- fwspk->card = card;
- mutex_init(&fwspk->mutex);
- fwspk->unit = fw_unit_get(unit);
- fwspk->device_info = (const struct device_info *)id->driver_data;
-
- err = cmp_connection_init(&fwspk->connection, unit, CMP_INPUT, 0);
- if (err < 0)
- goto err_unit;
-
- err = amdtp_stream_init(&fwspk->stream, unit, AMDTP_OUT_STREAM,
- CIP_NONBLOCKING);
- if (err < 0)
- goto err_connection;
-
- card->private_free = fwspk_card_free;
-
- strcpy(card->driver, fwspk->device_info->driver_name);
- strcpy(card->shortname, fwspk->device_info->short_name);
- firmware = fwspk_read_firmware_version(unit);
- snprintf(card->longname, sizeof(card->longname),
- "%s (OXFW%x %04x), GUID %08x%08x at %s, S%d",
- fwspk->device_info->long_name,
- firmware >> 20, firmware & 0xffff,
- fw_dev->config_rom[3], fw_dev->config_rom[4],
- dev_name(&unit->device), 100 << fw_dev->max_speed);
- strcpy(card->mixername, "OXFW970");
-
- err = fwspk_create_pcm(fwspk);
- if (err < 0)
- goto error;
-
- err = fwspk_create_mixer(fwspk);
- if (err < 0)
- goto error;
-
- err = snd_card_register(card);
- if (err < 0)
- goto error;
-
- dev_set_drvdata(&unit->device, fwspk);
-
- return 0;
-
-err_connection:
- cmp_connection_destroy(&fwspk->connection);
-err_unit:
- fw_unit_put(fwspk->unit);
- mutex_destroy(&fwspk->mutex);
-error:
- snd_card_free(card);
- return err;
-}
-
-static void fwspk_bus_reset(struct fw_unit *unit)
-{
- struct fwspk *fwspk = dev_get_drvdata(&unit->device);
-
- fcp_bus_reset(fwspk->unit);
-
- if (cmp_connection_update(&fwspk->connection) < 0) {
- amdtp_stream_pcm_abort(&fwspk->stream);
- mutex_lock(&fwspk->mutex);
- fwspk_stop_stream(fwspk);
- mutex_unlock(&fwspk->mutex);
- return;
- }
-
- amdtp_stream_update(&fwspk->stream);
-}
-
-static void fwspk_remove(struct fw_unit *unit)
-{
- struct fwspk *fwspk = dev_get_drvdata(&unit->device);
-
- amdtp_stream_pcm_abort(&fwspk->stream);
- snd_card_disconnect(fwspk->card);
-
- mutex_lock(&fwspk->mutex);
- fwspk_stop_stream(fwspk);
- mutex_unlock(&fwspk->mutex);
-
- snd_card_free_when_closed(fwspk->card);
-}
-
-static const struct device_info griffin_firewave = {
- .driver_name = "FireWave",
- .short_name = "FireWave",
- .long_name = "Griffin FireWave Surround",
- .pcm_constraints = firewave_constraints,
- .mixer_channels = 6,
- .mute_fb_id = 0x01,
- .volume_fb_id = 0x02,
-};
-
-static const struct device_info lacie_speakers = {
- .driver_name = "FWSpeakers",
- .short_name = "FireWire Speakers",
- .long_name = "LaCie FireWire Speakers",
- .pcm_constraints = lacie_speakers_constraints,
- .mixer_channels = 1,
- .mute_fb_id = 0x01,
- .volume_fb_id = 0x01,
-};
-
-static const struct ieee1394_device_id fwspk_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID |
- IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .vendor_id = VENDOR_GRIFFIN,
- .model_id = 0x00f970,
- .specifier_id = SPECIFIER_1394TA,
- .version = VERSION_AVC,
- .driver_data = (kernel_ulong_t)&griffin_firewave,
- },
- {
- .match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_MODEL_ID |
- IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .vendor_id = VENDOR_LACIE,
- .model_id = 0x00f970,
- .specifier_id = SPECIFIER_1394TA,
- .version = VERSION_AVC,
- .driver_data = (kernel_ulong_t)&lacie_speakers,
- },
- { }
-};
-MODULE_DEVICE_TABLE(ieee1394, fwspk_id_table);
-
-static struct fw_driver fwspk_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = KBUILD_MODNAME,
- .bus = &fw_bus_type,
- },
- .probe = fwspk_probe,
- .update = fwspk_bus_reset,
- .remove = fwspk_remove,
- .id_table = fwspk_id_table,
-};
-
-static int __init alsa_fwspk_init(void)
-{
- return driver_register(&fwspk_driver.driver);
-}
-
-static void __exit alsa_fwspk_exit(void)
-{
- driver_unregister(&fwspk_driver.driver);
-}
-
-module_init(alsa_fwspk_init);
-module_exit(alsa_fwspk_exit);
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index f3735e64791c..67dbfde837ab 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -465,17 +465,10 @@ static int snd_akm4xxx_stereo_volume_put(struct snd_kcontrol *kcontrol,
static int snd_akm4xxx_deemphasis_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {
+ static const char * const texts[4] = {
"44.1kHz", "Off", "48kHz", "32kHz",
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item >= 4)
- uinfo->value.enumerated.item = 3;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int snd_akm4xxx_deemphasis_get(struct snd_kcontrol *kcontrol,
@@ -570,22 +563,13 @@ static int ak4xxx_capture_source_info(struct snd_kcontrol *kcontrol,
{
struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol);
int mixer_ch = AK_GET_SHIFT(kcontrol->private_value);
- const char **input_names;
- unsigned int num_names, idx;
+ unsigned int num_names;
num_names = ak4xxx_capture_num_inputs(ak, mixer_ch);
if (!num_names)
return -EINVAL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = num_names;
- idx = uinfo->value.enumerated.item;
- if (idx >= num_names)
- return -EINVAL;
- input_names = ak->adc_info[mixer_ch].input_names;
- strlcpy(uinfo->value.enumerated.name, input_names[idx],
- sizeof(uinfo->value.enumerated.name));
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, num_names,
+ ak->adc_info[mixer_ch].input_names);
}
static int ak4xxx_capture_source_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/isa/ad1816a/ad1816a_lib.c b/sound/isa/ad1816a/ad1816a_lib.c
index f0fd98e695e3..01a07986f4a3 100644
--- a/sound/isa/ad1816a/ad1816a_lib.c
+++ b/sound/isa/ad1816a/ad1816a_lib.c
@@ -731,18 +731,12 @@ int snd_ad1816a_timer(struct snd_ad1816a *chip, int device,
static int snd_ad1816a_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[8] = {
+ static const char * const texts[8] = {
"Line", "Mix", "CD", "Synth", "Video",
"Mic", "Phone",
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 2;
- uinfo->value.enumerated.items = 7;
- if (uinfo->value.enumerated.item > 6)
- uinfo->value.enumerated.item = 6;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 2, 7, texts);
}
static int snd_ad1816a_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c
index b3b4f15e45ba..b5450143407b 100644
--- a/sound/isa/es1688/es1688_lib.c
+++ b/sound/isa/es1688/es1688_lib.c
@@ -614,8 +614,7 @@ static int snd_es1688_free(struct snd_es1688 *chip)
{
if (chip->hardware != ES1688_HW_UNDEF)
snd_es1688_init(chip, 0);
- if (chip->res_port)
- release_and_free_resource(chip->res_port);
+ release_and_free_resource(chip->res_port);
if (chip->irq >= 0)
free_irq(chip->irq, (void *) chip);
if (chip->dma8 >= 0) {
@@ -762,18 +761,12 @@ int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip,
static int snd_es1688_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[9] = {
+ static const char * const texts[8] = {
"Mic", "Mic Master", "CD", "AOUT",
"Mic1", "Mix", "Line", "Master"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 8;
- if (uinfo->value.enumerated.item > 7)
- uinfo->value.enumerated.item = 7;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 8, texts);
}
static int snd_es1688_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index 6faaac60161a..b481bb8c31bc 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -156,6 +156,7 @@ struct snd_es18xx {
#define ES18XX_I2S 0x0200 /* I2S mixer control */
#define ES18XX_MUTEREC 0x0400 /* Record source can be muted */
#define ES18XX_CONTROL 0x0800 /* Has control ports */
+#define ES18XX_GPO_2BIT 0x1000 /* GPO0,1 controlled by PM port */
/* Power Management */
#define ES18XX_PM 0x07
@@ -964,44 +965,28 @@ static int snd_es18xx_capture_close(struct snd_pcm_substream *substream)
static int snd_es18xx_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts5Source[5] = {
+ static const char * const texts5Source[5] = {
"Mic", "CD", "Line", "Master", "Mix"
};
- static char *texts8Source[8] = {
+ static const char * const texts8Source[8] = {
"Mic", "Mic Master", "CD", "AOUT",
"Mic1", "Mix", "Line", "Master"
};
struct snd_es18xx *chip = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
switch (chip->version) {
case 0x1868:
case 0x1878:
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3)
- uinfo->value.enumerated.item = 3;
- strcpy(uinfo->value.enumerated.name,
- texts5Source[uinfo->value.enumerated.item]);
- break;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts5Source);
case 0x1887:
case 0x1888:
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item > 4)
- uinfo->value.enumerated.item = 4;
- strcpy(uinfo->value.enumerated.name, texts5Source[uinfo->value.enumerated.item]);
- break;
+ return snd_ctl_enum_info(uinfo, 1, 5, texts5Source);
case 0x1869: /* DS somewhat contradictory for 1869: could be be 5 or 8 */
case 0x1879:
- uinfo->value.enumerated.items = 8;
- if (uinfo->value.enumerated.item > 7)
- uinfo->value.enumerated.item = 7;
- strcpy(uinfo->value.enumerated.name, texts8Source[uinfo->value.enumerated.item]);
- break;
+ return snd_ctl_enum_info(uinfo, 1, 8, texts8Source);
default:
return -EINVAL;
}
- return 0;
}
static int snd_es18xx_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1136,11 +1121,14 @@ static int snd_es18xx_reg_read(struct snd_es18xx *chip, unsigned char reg)
return snd_es18xx_read(chip, reg);
}
-#define ES18XX_SINGLE(xname, xindex, reg, shift, mask, invert) \
+#define ES18XX_SINGLE(xname, xindex, reg, shift, mask, flags) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
.info = snd_es18xx_info_single, \
.get = snd_es18xx_get_single, .put = snd_es18xx_put_single, \
- .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) }
+ .private_value = reg | (shift << 8) | (mask << 16) | (flags << 24) }
+
+#define ES18XX_FL_INVERT (1 << 0)
+#define ES18XX_FL_PMPORT (1 << 1)
static int snd_es18xx_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
@@ -1159,10 +1147,14 @@ static int snd_es18xx_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_e
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
- int invert = (kcontrol->private_value >> 24) & 0xff;
+ int invert = (kcontrol->private_value >> 24) & ES18XX_FL_INVERT;
+ int pm_port = (kcontrol->private_value >> 24) & ES18XX_FL_PMPORT;
int val;
-
- val = snd_es18xx_reg_read(chip, reg);
+
+ if (pm_port)
+ val = inb(chip->port + ES18XX_PM);
+ else
+ val = snd_es18xx_reg_read(chip, reg);
ucontrol->value.integer.value[0] = (val >> shift) & mask;
if (invert)
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
@@ -1175,7 +1167,8 @@ static int snd_es18xx_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_e
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
- int invert = (kcontrol->private_value >> 24) & 0xff;
+ int invert = (kcontrol->private_value >> 24) & ES18XX_FL_INVERT;
+ int pm_port = (kcontrol->private_value >> 24) & ES18XX_FL_PMPORT;
unsigned char val;
val = (ucontrol->value.integer.value[0] & mask);
@@ -1183,6 +1176,15 @@ static int snd_es18xx_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_e
val = mask - val;
mask <<= shift;
val <<= shift;
+ if (pm_port) {
+ unsigned char cur = inb(chip->port + ES18XX_PM);
+
+ if ((cur & mask) == val)
+ return 0;
+ outb((cur & ~mask) | val, chip->port + ES18XX_PM);
+ return 1;
+ }
+
return snd_es18xx_reg_bits(chip, reg, mask, val) != val;
}
@@ -1304,7 +1306,7 @@ static struct snd_kcontrol_new snd_es18xx_opt_speaker =
ES18XX_SINGLE("Beep Playback Volume", 0, 0x3c, 0, 7, 0);
static struct snd_kcontrol_new snd_es18xx_opt_1869[] = {
-ES18XX_SINGLE("Capture Switch", 0, 0x1c, 4, 1, 1),
+ES18XX_SINGLE("Capture Switch", 0, 0x1c, 4, 1, ES18XX_FL_INVERT),
ES18XX_SINGLE("Video Playback Switch", 0, 0x7f, 0, 1, 0),
ES18XX_DOUBLE("Mono Playback Volume", 0, 0x6d, 0x6d, 4, 0, 15, 0),
ES18XX_DOUBLE("Mono Capture Volume", 0, 0x6f, 0x6f, 4, 0, 15, 0)
@@ -1363,6 +1365,11 @@ static struct snd_kcontrol_new snd_es18xx_hw_volume_controls[] = {
ES18XX_SINGLE("Hardware Master Volume Split", 0, 0x64, 7, 1, 0),
};
+static struct snd_kcontrol_new snd_es18xx_opt_gpo_2bit[] = {
+ES18XX_SINGLE("GPO0 Switch", 0, ES18XX_PM, 0, 1, ES18XX_FL_PMPORT),
+ES18XX_SINGLE("GPO1 Switch", 0, ES18XX_PM, 1, 1, ES18XX_FL_PMPORT),
+};
+
static int snd_es18xx_config_read(struct snd_es18xx *chip, unsigned char reg)
{
int data;
@@ -1629,10 +1636,10 @@ static int snd_es18xx_probe(struct snd_es18xx *chip,
switch (chip->version) {
case 0x1868:
- chip->caps = ES18XX_DUPLEX_MONO | ES18XX_DUPLEX_SAME | ES18XX_CONTROL;
+ chip->caps = ES18XX_DUPLEX_MONO | ES18XX_DUPLEX_SAME | ES18XX_CONTROL | ES18XX_GPO_2BIT;
break;
case 0x1869:
- chip->caps = ES18XX_PCM2 | ES18XX_SPATIALIZER | ES18XX_RECMIX | ES18XX_NEW_RATE | ES18XX_AUXB | ES18XX_MONO | ES18XX_MUTEREC | ES18XX_CONTROL | ES18XX_HWV;
+ chip->caps = ES18XX_PCM2 | ES18XX_SPATIALIZER | ES18XX_RECMIX | ES18XX_NEW_RATE | ES18XX_AUXB | ES18XX_MONO | ES18XX_MUTEREC | ES18XX_CONTROL | ES18XX_HWV | ES18XX_GPO_2BIT;
break;
case 0x1878:
chip->caps = ES18XX_DUPLEX_MONO | ES18XX_DUPLEX_SAME | ES18XX_I2S | ES18XX_CONTROL;
@@ -1642,7 +1649,7 @@ static int snd_es18xx_probe(struct snd_es18xx *chip,
break;
case 0x1887:
case 0x1888:
- chip->caps = ES18XX_PCM2 | ES18XX_RECMIX | ES18XX_AUXB | ES18XX_DUPLEX_SAME;
+ chip->caps = ES18XX_PCM2 | ES18XX_RECMIX | ES18XX_AUXB | ES18XX_DUPLEX_SAME | ES18XX_GPO_2BIT;
break;
default:
snd_printk(KERN_ERR "[0x%lx] unsupported chip ES%x\n",
@@ -1944,6 +1951,15 @@ static int snd_es18xx_mixer(struct snd_card *card)
return err;
}
}
+ if (chip->caps & ES18XX_GPO_2BIT) {
+ for (idx = 0; idx < ARRAY_SIZE(snd_es18xx_opt_gpo_2bit); idx++) {
+ err = snd_ctl_add(card,
+ snd_ctl_new1(&snd_es18xx_opt_gpo_2bit[idx],
+ chip));
+ if (err < 0)
+ return err;
+ }
+ }
return 0;
}
diff --git a/sound/isa/msnd/msnd_pinnacle_mixer.c b/sound/isa/msnd/msnd_pinnacle_mixer.c
index 031dc69b7470..17e49a071af4 100644
--- a/sound/isa/msnd/msnd_pinnacle_mixer.c
+++ b/sound/isa/msnd/msnd_pinnacle_mixer.c
@@ -55,20 +55,13 @@
static int snd_msndmix_info_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = {
+ static const char * const texts[3] = {
"Analog", "MASS", "SPDIF",
};
struct snd_msnd *chip = snd_kcontrol_chip(kcontrol);
unsigned items = test_bit(F_HAVEDIGITAL, &chip->flags) ? 3 : 2;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = items;
- if (uinfo->value.enumerated.item >= items)
- uinfo->value.enumerated.item = items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, items, texts);
}
static int snd_msndmix_get_mux(struct snd_kcontrol *kcontrol,
diff --git a/sound/isa/sb/emu8000_synth.c b/sound/isa/sb/emu8000_synth.c
index 4e3fcfb15ad4..95b39beb61c1 100644
--- a/sound/isa/sb/emu8000_synth.c
+++ b/sound/isa/sb/emu8000_synth.c
@@ -105,8 +105,7 @@ static int snd_emu8000_delete_device(struct snd_seq_device *dev)
snd_device_free(dev->card, hw->pcm);
if (hw->emu)
snd_emux_free(hw->emu);
- if (hw->memhdr)
- snd_util_memhdr_free(hw->memhdr);
+ snd_util_memhdr_free(hw->memhdr);
hw->emu = NULL;
hw->memhdr = NULL;
return 0;
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index 0bbcd4714d28..72b10f4f3e70 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -702,17 +702,11 @@ static int snd_sb16_get_dma_mode(struct snd_sb *chip)
static int snd_sb16_dma_control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = {
+ static const char * const texts[3] = {
"Auto", "Playback", "Capture"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_sb16_dma_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/isa/sb/sb_common.c b/sound/isa/sb/sb_common.c
index 3ef990602cdd..f22b4480828e 100644
--- a/sound/isa/sb/sb_common.c
+++ b/sound/isa/sb/sb_common.c
@@ -184,8 +184,7 @@ static int snd_sbdsp_probe(struct snd_sb * chip)
static int snd_sbdsp_free(struct snd_sb *chip)
{
- if (chip->res_port)
- release_and_free_resource(chip->res_port);
+ release_and_free_resource(chip->res_port);
if (chip->irq >= 0)
free_irq(chip->irq, (void *) chip);
#ifdef CONFIG_ISA
diff --git a/sound/isa/sb/sb_mixer.c b/sound/isa/sb/sb_mixer.c
index 1ff78ec9f0ac..e403334a19ad 100644
--- a/sound/isa/sb/sb_mixer.c
+++ b/sound/isa/sb/sb_mixer.c
@@ -182,17 +182,11 @@ static int snd_sbmixer_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_
static int snd_dt019x_input_sw_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static const char *texts[5] = {
+ static const char * const texts[5] = {
"CD", "Mic", "Line", "Synth", "Master"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item > 4)
- uinfo->value.enumerated.item = 4;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 5, texts);
}
static int snd_dt019x_input_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -275,18 +269,11 @@ static int snd_dt019x_input_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl
static int snd_als4k_mono_capture_route_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static const char *texts[3] = {
+ static const char * const texts[3] = {
"L chan only", "R chan only", "L ch/2 + R ch/2"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_als4k_mono_capture_route_get(struct snd_kcontrol *kcontrol,
@@ -335,17 +322,11 @@ static int snd_als4k_mono_capture_route_put(struct snd_kcontrol *kcontrol,
static int snd_sb8mixer_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static const char *texts[3] = {
+ static const char * const texts[3] = {
"Mic", "CD", "Line"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 360b08b03e1d..347bb1bda110 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1993,25 +1993,20 @@ EXPORT_SYMBOL(snd_wss_timer);
static int snd_wss_info_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {
+ static const char * const texts[4] = {
"Line", "Aux", "Mic", "Mix"
};
- static char *opl3sa_texts[4] = {
+ static const char * const opl3sa_texts[4] = {
"Line", "CD", "Mic", "Mix"
};
- static char *gusmax_texts[4] = {
+ static const char * const gusmax_texts[4] = {
"Line", "Synth", "Mic", "Mix"
};
- char **ptexts = texts;
+ const char * const *ptexts = texts;
struct snd_wss *chip = snd_kcontrol_chip(kcontrol);
if (snd_BUG_ON(!chip->card))
return -EINVAL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 2;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3)
- uinfo->value.enumerated.item = 3;
if (!strcmp(chip->card->driver, "GUS MAX"))
ptexts = gusmax_texts;
switch (chip->hardware) {
@@ -2023,8 +2018,7 @@ static int snd_wss_info_mux(struct snd_kcontrol *kcontrol,
ptexts = opl3sa_texts;
break;
}
- strcpy(uinfo->value.enumerated.name, ptexts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 2, 4, ptexts);
}
static int snd_wss_get_mux(struct snd_kcontrol *kcontrol,
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 23441b9e6148..ede449f0b50d 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -929,7 +929,6 @@ static struct platform_driver hal2_driver = {
.remove = hal2_remove,
.driver = {
.name = "sgihal2",
- .owner = THIS_MODULE,
}
};
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 04bb06c03ec8..f07aa3993f83 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -201,17 +201,10 @@ static int sgio2audio_gain_put(struct snd_kcontrol *kcontrol,
static int sgio2audio_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static const char *texts[3] = {
+ static const char * const texts[3] = {
"Cam Mic", "Mic", "Line"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item >= 3)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int sgio2audio_source_get(struct snd_kcontrol *kcontrol,
@@ -970,7 +963,6 @@ static struct platform_driver sgio2audio_driver = {
.remove = snd_sgio2audio_remove,
.driver = {
.name = "sgio2audio",
- .owner = THIS_MODULE,
}
};
diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c
index c2d45a5848bc..3f653618614d 100644
--- a/sound/oss/dmasound/dmasound_paula.c
+++ b/sound/oss/dmasound/dmasound_paula.c
@@ -729,7 +729,6 @@ static struct platform_driver amiga_audio_driver = {
.remove = __exit_p(amiga_audio_remove),
.driver = {
.name = "amiga-audio",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/oss/uart401.c b/sound/oss/uart401.c
index 279bc565ac7e..dae4d4344407 100644
--- a/sound/oss/uart401.c
+++ b/sound/oss/uart401.c
@@ -412,13 +412,10 @@ void unload_uart401(struct address_info *hw_config)
if (!devc->share_irq)
free_irq(devc->irq, devc);
- if (devc)
- {
- kfree(midi_devs[devc->my_dev]->converter);
- kfree(midi_devs[devc->my_dev]);
- kfree(devc);
- devc = NULL;
- }
+ kfree(midi_devs[devc->my_dev]->converter);
+ kfree(midi_devs[devc->my_dev]);
+ kfree(devc);
+
/* This kills midi_devs[x] */
sound_unload_mididev(hw_config->slots[4]);
}
diff --git a/sound/parisc/harmony.c b/sound/parisc/harmony.c
index 4b20be79c1dd..29604a239c44 100644
--- a/sound/parisc/harmony.c
+++ b/sound/parisc/harmony.c
@@ -776,15 +776,9 @@ static int
snd_harmony_captureroute_info(struct snd_kcontrol *kc,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = { "Line", "Mic" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ static const char * const texts[2] = { "Line", "Mic" };
+
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 14ad54b7928c..5ee2f17c287c 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -463,14 +463,8 @@ static int snd_ac97_info_enum_double(struct snd_kcontrol *kcontrol,
{
struct ac97_enum *e = (struct ac97_enum *)kcontrol->private_value;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = e->shift_l == e->shift_r ? 1 : 2;
- uinfo->value.enumerated.items = e->mask;
-
- if (uinfo->value.enumerated.item > e->mask - 1)
- uinfo->value.enumerated.item = e->mask - 1;
- strcpy(uinfo->value.enumerated.name, e->texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, e->shift_l == e->shift_r ? 1 : 2,
+ e->mask, e->texts);
}
static int snd_ac97_get_enum_double(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 991762215417..ceaac1c41906 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -33,7 +33,8 @@
static struct snd_kcontrol *snd_ac97_find_mixer_ctl(struct snd_ac97 *ac97,
const char *name);
static int snd_ac97_add_vmaster(struct snd_ac97 *ac97, char *name,
- const unsigned int *tlv, const char **slaves);
+ const unsigned int *tlv,
+ const char * const *slaves);
/*
* Chip specific initialization
@@ -81,22 +82,11 @@ static int ac97_update_bits_page(struct snd_ac97 *ac97, unsigned short reg, unsi
/*
* shared line-in/mic controls
*/
-static int ac97_enum_text_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo,
- const char **texts, unsigned int nums)
-{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = nums;
- if (uinfo->value.enumerated.item > nums - 1)
- uinfo->value.enumerated.item = nums - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
-}
-
static int ac97_surround_jack_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static const char *texts[] = { "Shared", "Independent" };
- return ac97_enum_text_info(kcontrol, uinfo, texts, 2);
+ static const char * const texts[] = { "Shared", "Independent" };
+
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int ac97_surround_jack_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -123,9 +113,9 @@ static int ac97_surround_jack_mode_put(struct snd_kcontrol *kcontrol, struct snd
static int ac97_channel_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static const char *texts[] = { "2ch", "4ch", "6ch", "8ch" };
- return ac97_enum_text_info(kcontrol, uinfo, texts,
- kcontrol->private_value);
+ static const char * const texts[] = { "2ch", "4ch", "6ch", "8ch" };
+
+ return snd_ctl_enum_info(uinfo, 1, kcontrol->private_value, texts);
}
static int ac97_channel_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -240,17 +230,11 @@ static inline int alc850_is_aux_back_surround(struct snd_ac97 *ac97)
static int snd_ac97_ymf7x3_info_speaker(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = {
+ static const char * const texts[3] = {
"Standard", "Small", "Smaller"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_ac97_ymf7x3_get_speaker(struct snd_kcontrol *kcontrol,
@@ -293,15 +277,9 @@ static const struct snd_kcontrol_new snd_ac97_ymf7x3_controls_speaker =
static int snd_ac97_ymf7x3_spdif_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = { "AC-Link", "A/D Converter" };
+ static const char * const texts[2] = { "AC-Link", "A/D Converter" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_ac97_ymf7x3_spdif_source_get(struct snd_kcontrol *kcontrol,
@@ -401,15 +379,9 @@ static int patch_yamaha_ymf743(struct snd_ac97 *ac97)
There is also a bit to mute S/PDIF output in a vendor-specific register. */
static int snd_ac97_ymf753_spdif_output_pin_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = { "Disabled", "Pin 43", "Pin 48" };
+ static const char * const texts[3] = { "Disabled", "Pin 43", "Pin 48" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_ac97_ymf753_spdif_output_pin_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1103,16 +1075,11 @@ static int patch_sigmatel_stac9756(struct snd_ac97 * ac97)
static int snd_ac97_stac9758_output_jack_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[5] = { "Input/Disabled", "Front Output",
+ static const char * const texts[5] = {
+ "Input/Disabled", "Front Output",
"Rear Output", "Center/LFE Output", "Mixer Output" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item > 4)
- uinfo->value.enumerated.item = 4;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 5, texts);
}
static int snd_ac97_stac9758_output_jack_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1147,16 +1114,11 @@ static int snd_ac97_stac9758_output_jack_put(struct snd_kcontrol *kcontrol, stru
static int snd_ac97_stac9758_input_jack_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[7] = { "Mic2 Jack", "Mic1 Jack", "Line In Jack",
+ static const char * const texts[7] = {
+ "Mic2 Jack", "Mic1 Jack", "Line In Jack",
"Front Jack", "Rear Jack", "Center/LFE Jack", "Mute" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 7;
- if (uinfo->value.enumerated.item > 6)
- uinfo->value.enumerated.item = 6;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 7, texts);
}
static int snd_ac97_stac9758_input_jack_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1181,15 +1143,11 @@ static int snd_ac97_stac9758_input_jack_put(struct snd_kcontrol *kcontrol, struc
static int snd_ac97_stac9758_phonesel_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = { "None", "Front Jack", "Rear Jack" };
+ static const char * const texts[3] = {
+ "None", "Front Jack", "Rear Jack"
+ };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_ac97_stac9758_phonesel_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1804,15 +1762,9 @@ static int patch_ad1886(struct snd_ac97 * ac97)
static int snd_ac97_ad198x_spdif_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = { "AC-Link", "A/D Converter" };
+ static const char * const texts[2] = { "AC-Link", "A/D Converter" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_ac97_ad198x_spdif_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1994,15 +1946,9 @@ static int snd_ac97_ad1888_lohpsel_put(struct snd_kcontrol *kcontrol, struct snd
static int snd_ac97_ad1888_downmix_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = {"Off", "6 -> 4", "6 -> 2"};
+ static const char * const texts[3] = {"Off", "6 -> 4", "6 -> 2"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_ac97_ad1888_downmix_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2153,16 +2099,11 @@ static int patch_ad1980(struct snd_ac97 * ac97)
static int snd_ac97_ad1985_vrefout_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {"High-Z", "3.7 V", "2.25 V", "0 V"};
+ static const char * const texts[4] = {
+ "High-Z", "3.7 V", "2.25 V", "0 V"
+ };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3)
- uinfo->value.enumerated.item = 3;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int snd_ac97_ad1985_vrefout_get(struct snd_kcontrol *kcontrol,
@@ -2756,20 +2697,18 @@ static const struct snd_kcontrol_new snd_ac97_controls_alc655[] = {
static int alc655_iec958_route_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts_655[3] = { "PCM", "Analog In", "IEC958 In" };
- static char *texts_658[4] = { "PCM", "Analog1 In", "Analog2 In", "IEC958 In" };
+ static const char * const texts_655[3] = {
+ "PCM", "Analog In", "IEC958 In"
+ };
+ static const char * const texts_658[4] = {
+ "PCM", "Analog1 In", "Analog2 In", "IEC958 In"
+ };
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = ac97->spec.dev_flags ? 4 : 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- ac97->spec.dev_flags ?
- texts_658[uinfo->value.enumerated.item] :
- texts_655[uinfo->value.enumerated.item]);
- return 0;
+ if (ac97->spec.dev_flags)
+ return snd_ctl_enum_info(uinfo, 1, 4, texts_658);
+ else
+ return snd_ctl_enum_info(uinfo, 1, 3, texts_655);
}
static int alc655_iec958_route_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -3055,15 +2994,9 @@ static int patch_cm9738(struct snd_ac97 * ac97)
static int snd_ac97_cmedia_spdif_playback_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "Analog", "Digital" };
+ static const char * const texts[] = { "Analog", "Digital" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_ac97_cmedia_spdif_playback_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -3235,15 +3168,9 @@ static const struct snd_kcontrol_new snd_ac97_cm9761_controls[] = {
static int cm9761_spdif_out_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "AC-Link", "ADC", "SPDIF-In" };
+ static const char * const texts[] = { "AC-Link", "ADC", "SPDIF-In" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int cm9761_spdif_out_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -3270,7 +3197,9 @@ static int cm9761_spdif_out_source_put(struct snd_kcontrol *kcontrol, struct snd
ucontrol->value.enumerated.item[0] == 1 ? 0x2 : 0);
}
-static const char *cm9761_dac_clock[] = { "AC-Link", "SPDIF-In", "Both" };
+static const char * const cm9761_dac_clock[] = {
+ "AC-Link", "SPDIF-In", "Both"
+};
static const struct ac97_enum cm9761_dac_clock_enum =
AC97_ENUM_SINGLE(AC97_CM9761_SPDIF_CTRL, 9, 3, cm9761_dac_clock);
@@ -3384,7 +3313,9 @@ static int patch_cm9761(struct snd_ac97 *ac97)
#define AC97_CM9780_MULTI_CHAN 0x66
#define AC97_CM9780_SPDIF 0x6c
-static const char *cm9780_ch_select[] = { "Front", "Side", "Center/LFE", "Rear" };
+static const char * const cm9780_ch_select[] = {
+ "Front", "Side", "Center/LFE", "Rear"
+};
static const struct ac97_enum cm9780_ch_select_enum =
AC97_ENUM_SINGLE(AC97_CM9780_MULTI_CHAN, 6, 4, cm9780_ch_select);
static const struct snd_kcontrol_new cm9780_controls[] = {
@@ -3430,7 +3361,7 @@ AC97_SINGLE("Downmix LFE and Center to Front", 0x5a, 12, 1, 0),
AC97_SINGLE("Downmix Surround to Front", 0x5a, 11, 1, 0),
};
-static const char *slave_vols_vt1616[] = {
+static const char * const slave_vols_vt1616[] = {
"Front Playback Volume",
"Surround Playback Volume",
"Center Playback Volume",
@@ -3438,7 +3369,7 @@ static const char *slave_vols_vt1616[] = {
NULL
};
-static const char *slave_sws_vt1616[] = {
+static const char * const slave_sws_vt1616[] = {
"Front Playback Switch",
"Surround Playback Switch",
"Center Playback Switch",
@@ -3459,10 +3390,11 @@ static struct snd_kcontrol *snd_ac97_find_mixer_ctl(struct snd_ac97 *ac97,
/* create a virtual master control and add slaves */
static int snd_ac97_add_vmaster(struct snd_ac97 *ac97, char *name,
- const unsigned int *tlv, const char **slaves)
+ const unsigned int *tlv,
+ const char * const *slaves)
{
struct snd_kcontrol *kctl;
- const char **s;
+ const char * const *s;
int err;
kctl = snd_ctl_make_virtual_master(name, tlv);
@@ -3552,11 +3484,12 @@ static int snd_ac97_vt1617a_smart51_info(struct snd_kcontrol *kcontrol,
* is SM51EN *AND* it's Bit14, not Bit15 so the table is very
* counter-intuitive */
- static const char* texts[] = { "LineIn Mic1", "LineIn Mic1 Mic3",
+ static const char * const texts[] = {"LineIn Mic1", "LineIn Mic1 Mic3",
"Surr LFE/C Mic3", "LineIn LFE/C Mic3",
"LineIn Mic2", "LineIn Mic2 Mic1",
"Surr LFE Mic1", "Surr LFE Mic1 Mic2"};
- return ac97_enum_text_info(kcontrol, uinfo, texts, 8);
+
+ return snd_ctl_enum_info(uinfo, 1, 8, texts);
}
static int snd_ac97_vt1617a_smart51_get(struct snd_kcontrol *kcontrol,
@@ -3685,7 +3618,7 @@ static int patch_vt1617a(struct snd_ac97 * ac97)
struct vt1618_uaj_item {
unsigned short mask;
unsigned short shift;
- const char *items[4];
+ const char * const items[4];
};
/* This list reflects the vt1618 docs for Vendor Defined Register 0x60. */
@@ -3720,9 +3653,8 @@ static struct vt1618_uaj_item vt1618_uaj[3] = {
static int snd_ac97_vt1618_UAJ_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- return ac97_enum_text_info(kcontrol, uinfo,
- vt1618_uaj[kcontrol->private_value].items,
- 4);
+ return snd_ctl_enum_info(uinfo, 1, 4,
+ vt1618_uaj[kcontrol->private_value].items);
}
/* All of the vt1618 Universal Audio Jack twiddlers are on
@@ -3767,9 +3699,9 @@ static int snd_ac97_vt1618_UAJ_put(struct snd_kcontrol *kcontrol,
static int snd_ac97_vt1618_aux_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static const char *txt_aux[] = {"Aux In", "Back Surr Out"};
+ static const char * const txt_aux[] = {"Aux In", "Back Surr Out"};
- return ac97_enum_text_info(kcontrol, uinfo, txt_aux, 2);
+ return snd_ctl_enum_info(uinfo, 1, 2, txt_aux);
}
static int snd_ac97_vt1618_aux_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/ac97/ac97_patch.h b/sound/pci/ac97/ac97_patch.h
index 47bf8dfe8276..d1ce151fe722 100644
--- a/sound/pci/ac97/ac97_patch.h
+++ b/sound/pci/ac97/ac97_patch.h
@@ -49,7 +49,7 @@ struct ac97_enum {
unsigned char shift_l;
unsigned char shift_r;
unsigned short mask;
- const char **texts;
+ const char * const *texts;
};
#define AC97_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xmask, xtexts) \
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 5017176bfaa1..e9273fb2a505 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -1,6 +1,6 @@
/*
* Asihpi soundcard
- * Copyright (c) by AudioScience Inc <alsa@audioscience.com>
+ * Copyright (c) by AudioScience Inc <support@audioscience.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,6 @@
#include "hpioctl.h"
#include "hpicmn.h"
-
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/jiffies.h>
@@ -47,7 +46,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("AudioScience inc. <support@audioscience.com>");
-MODULE_DESCRIPTION("AudioScience ALSA ASI5000 ASI6000 ASI87xx ASI89xx "
+MODULE_DESCRIPTION("AudioScience ALSA ASI5xxx ASI6xxx ASI87xx ASI89xx "
HPI_VER_STRING);
#if defined CONFIG_SND_DEBUG_VERBOSE
@@ -87,11 +86,11 @@ MODULE_PARM_DESC(enable_hpi_hwdep,
#ifdef KERNEL_ALSA_BUILD
static char *build_info = "Built using headers from kernel source";
module_param(build_info, charp, S_IRUGO);
-MODULE_PARM_DESC(build_info, "built using headers from kernel source");
+MODULE_PARM_DESC(build_info, "Built using headers from kernel source");
#else
static char *build_info = "Built within ALSA source";
module_param(build_info, charp, S_IRUGO);
-MODULE_PARM_DESC(build_info, "built within ALSA source");
+MODULE_PARM_DESC(build_info, "Built within ALSA source");
#endif
/* set to 1 to dump every control from adapter to log */
@@ -110,7 +109,7 @@ static int adapter_fs = DEFAULT_SAMPLERATE;
struct clk_source {
int source;
int index;
- char *name;
+ const char *name;
};
struct clk_cache {
@@ -125,6 +124,16 @@ struct snd_card_asihpi {
struct pci_dev *pci;
struct hpi_adapter *hpi;
+ /* In low latency mode there is only one stream, a pointer to its
+ * private data is stored here on trigger and cleared on stop.
+ * The interrupt handler uses it as a parameter when calling
+ * snd_card_asihpi_timer_function().
+ */
+ struct snd_card_asihpi_pcm *llmode_streampriv;
+ struct tasklet_struct t;
+ void (*pcm_start)(struct snd_pcm_substream *substream);
+ void (*pcm_stop)(struct snd_pcm_substream *substream);
+
u32 h_mixer;
struct clk_cache cc;
@@ -289,21 +298,17 @@ static void print_hwparams(struct snd_pcm_substream *substream,
{
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
- snd_printd("%s HWPARAMS\n", name);
- snd_printd(" samplerate %d Hz\n", params_rate(p));
- snd_printd(" channels %d\n", params_channels(p));
- snd_printd(" format %d\n", params_format(p));
- snd_printd(" subformat %d\n", params_subformat(p));
- snd_printd(" buffer %d B\n", params_buffer_bytes(p));
- snd_printd(" period %d B\n", params_period_bytes(p));
- snd_printd(" access %d\n", params_access(p));
- snd_printd(" period_size %d\n", params_period_size(p));
- snd_printd(" periods %d\n", params_periods(p));
- snd_printd(" buffer_size %d\n", params_buffer_size(p));
- snd_printd(" %d B/s\n", params_rate(p) *
- params_channels(p) *
+ snd_printdd("%s HWPARAMS\n", name);
+ snd_printdd(" samplerate=%dHz channels=%d format=%d subformat=%d\n",
+ params_rate(p), params_channels(p),
+ params_format(p), params_subformat(p));
+ snd_printdd(" buffer=%dB period=%dB period_size=%dB periods=%d\n",
+ params_buffer_bytes(p), params_period_bytes(p),
+ params_period_size(p), params_periods(p));
+ snd_printdd(" buffer_size=%d access=%d data_rate=%dB/s\n",
+ params_buffer_size(p), params_access(p),
+ params_rate(p) * params_channels(p) *
snd_pcm_format_width(params_format(p)) / 8);
-
}
static snd_pcm_format_t hpi_to_alsa_formats[] = {
@@ -375,7 +380,7 @@ static void snd_card_asihpi_pcm_samplerates(struct snd_card_asihpi *asihpi,
HPI_SOURCENODE_CLOCK_SOURCE, 0, 0, 0,
HPI_CONTROL_SAMPLECLOCK, &h_control);
if (err) {
- snd_printk(KERN_ERR
+ dev_err(&asihpi->pci->dev,
"No local sampleclock, err %d\n", err);
}
@@ -481,7 +486,7 @@ static int snd_card_asihpi_pcm_hw_params(struct snd_pcm_substream *substream,
params_buffer_bytes(params), runtime->dma_addr);
if (err == 0) {
snd_printdd(
- "stream_host_buffer_attach succeeded %u %lu\n",
+ "stream_host_buffer_attach success %u %lu\n",
params_buffer_bytes(params),
(unsigned long)runtime->dma_addr);
} else {
@@ -491,12 +496,7 @@ static int snd_card_asihpi_pcm_hw_params(struct snd_pcm_substream *substream,
}
err = hpi_stream_get_info_ex(dpcm->h_stream, NULL,
- &dpcm->hpi_buffer_attached,
- NULL, NULL, NULL);
-
- snd_printdd("stream_host_buffer_attach status 0x%x\n",
- dpcm->hpi_buffer_attached);
-
+ &dpcm->hpi_buffer_attached, NULL, NULL, NULL);
}
bytes_per_sec = params_rate(params) * params_channels(params);
width = snd_pcm_format_width(params_format(params));
@@ -538,7 +538,7 @@ static void snd_card_asihpi_pcm_timer_start(struct snd_pcm_substream *
int expiry;
expiry = HZ / 200;
- /*? (dpcm->period_bytes * HZ / dpcm->bytes_per_sec); */
+
expiry = max(expiry, 1); /* don't let it be zero! */
dpcm->timer.expires = jiffies + expiry;
dpcm->respawn_timer = 1;
@@ -554,6 +554,48 @@ static void snd_card_asihpi_pcm_timer_stop(struct snd_pcm_substream *substream)
del_timer(&dpcm->timer);
}
+static void snd_card_asihpi_pcm_int_start(struct snd_pcm_substream *substream)
+{
+ struct snd_card_asihpi_pcm *dpcm;
+ struct snd_card_asihpi *card;
+
+ BUG_ON(!substream);
+
+ dpcm = (struct snd_card_asihpi_pcm *)substream->runtime->private_data;
+ card = snd_pcm_substream_chip(substream);
+
+ BUG_ON(in_interrupt());
+ tasklet_disable(&card->t);
+ card->llmode_streampriv = dpcm;
+ tasklet_enable(&card->t);
+
+ hpi_handle_error(hpi_adapter_set_property(card->hpi->adapter->index,
+ HPI_ADAPTER_PROPERTY_IRQ_RATE,
+ card->update_interval_frames, 0));
+}
+
+static void snd_card_asihpi_pcm_int_stop(struct snd_pcm_substream *substream)
+{
+ struct snd_card_asihpi_pcm *dpcm;
+ struct snd_card_asihpi *card;
+
+ BUG_ON(!substream);
+
+ dpcm = (struct snd_card_asihpi_pcm *)substream->runtime->private_data;
+ card = snd_pcm_substream_chip(substream);
+
+ hpi_handle_error(hpi_adapter_set_property(card->hpi->adapter->index,
+ HPI_ADAPTER_PROPERTY_IRQ_RATE, 0, 0));
+
+ if (in_interrupt())
+ card->llmode_streampriv = NULL;
+ else {
+ tasklet_disable(&card->t);
+ card->llmode_streampriv = NULL;
+ tasklet_enable(&card->t);
+ }
+}
+
static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
int cmd)
{
@@ -564,10 +606,10 @@ static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
- snd_printdd("%s trigger\n", name);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
+ snd_printdd("%s trigger start\n", name);
snd_pcm_group_for_each_entry(s, substream) {
struct snd_pcm_runtime *runtime = s->runtime;
struct snd_card_asihpi_pcm *ds = runtime->private_data;
@@ -588,7 +630,7 @@ static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
* data??
*/
unsigned int preload = ds->period_bytes * 1;
- snd_printddd("%d preload x%x\n", s->number, preload);
+ snd_printddd("%d preload %d\n", s->number, preload);
hpi_handle_error(hpi_outstream_write_buf(
ds->h_stream,
&runtime->dma_area[0],
@@ -611,16 +653,16 @@ static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
} else
break;
}
- snd_printdd("start\n");
/* start the master stream */
- snd_card_asihpi_pcm_timer_start(substream);
+ card->pcm_start(substream);
if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE) ||
!card->can_dma)
hpi_handle_error(hpi_stream_start(dpcm->h_stream));
break;
case SNDRV_PCM_TRIGGER_STOP:
- snd_card_asihpi_pcm_timer_stop(substream);
+ snd_printdd("%s trigger stop\n", name);
+ card->pcm_stop(substream);
snd_pcm_group_for_each_entry(s, substream) {
if (snd_pcm_substream_chip(s) != card)
continue;
@@ -638,7 +680,6 @@ static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
} else
break;
}
- snd_printdd("stop\n");
/* _prepare and _hwparams reset the stream */
hpi_handle_error(hpi_stream_stop(dpcm->h_stream));
@@ -651,13 +692,13 @@ static int snd_card_asihpi_trigger(struct snd_pcm_substream *substream,
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- snd_printdd("pause release\n");
+ snd_printdd("%s trigger pause release\n", name);
+ card->pcm_start(substream);
hpi_handle_error(hpi_stream_start(dpcm->h_stream));
- snd_card_asihpi_pcm_timer_start(substream);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- snd_printdd("pause\n");
- snd_card_asihpi_pcm_timer_stop(substream);
+ snd_printdd("%s trigger pause push\n", name);
+ card->pcm_stop(substream);
hpi_handle_error(hpi_stream_stop(dpcm->h_stream));
break;
default:
@@ -729,9 +770,8 @@ static void snd_card_asihpi_timer_function(unsigned long data)
u32 buffer_size, bytes_avail, samples_played, on_card_bytes;
char name[16];
- snd_pcm_debug_name(substream, name, sizeof(name));
- snd_printdd("%s snd_card_asihpi_timer_function\n", name);
+ snd_pcm_debug_name(substream, name, sizeof(name));
/* find minimum newdata and buffer pos in group */
snd_pcm_group_for_each_entry(s, substream) {
@@ -769,10 +809,7 @@ static void snd_card_asihpi_timer_function(unsigned long data)
s->number);
ds->drained_count++;
if (ds->drained_count > 20) {
- unsigned long flags;
- snd_pcm_stream_lock_irqsave(s, flags);
- snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irqrestore(s, flags);
+ snd_pcm_stop_xrun(s);
continue;
}
} else {
@@ -794,19 +831,21 @@ static void snd_card_asihpi_timer_function(unsigned long data)
newdata);
}
- snd_printdd("hw_ptr 0x%04lX, appl_ptr 0x%04lX\n",
+ snd_printddd(
+ "timer1, %s, %d, S=%d, elap=%d, rw=%d, dsp=%d, left=%d, aux=%d, space=%d, hw_ptr=%ld, appl_ptr=%ld\n",
+ name, s->number, state,
+ ds->pcm_buf_elapsed_dma_ofs,
+ ds->pcm_buf_host_rw_ofs,
+ pcm_buf_dma_ofs,
+ (int)bytes_avail,
+
+ (int)on_card_bytes,
+ buffer_size-bytes_avail,
(unsigned long)frames_to_bytes(runtime,
runtime->status->hw_ptr),
(unsigned long)frames_to_bytes(runtime,
- runtime->control->appl_ptr));
-
- snd_printdd("%d S=%d, "
- "rw=0x%04X, dma=0x%04X, left=0x%04X, "
- "aux=0x%04X space=0x%04X\n",
- s->number, state,
- ds->pcm_buf_host_rw_ofs, pcm_buf_dma_ofs,
- (int)bytes_avail,
- (int)on_card_bytes, buffer_size-bytes_avail);
+ runtime->control->appl_ptr)
+ );
loops++;
}
pcm_buf_dma_ofs = min_buf_pos;
@@ -824,16 +863,18 @@ static void snd_card_asihpi_timer_function(unsigned long data)
next_jiffies = max(next_jiffies, 1U);
dpcm->timer.expires = jiffies + next_jiffies;
- snd_printdd("jif %d buf pos 0x%04X newdata 0x%04X xfer 0x%04X\n",
+ snd_printddd("timer2, jif=%d, buf_pos=%d, newdata=%d, xfer=%d\n",
next_jiffies, pcm_buf_dma_ofs, newdata, xfercount);
snd_pcm_group_for_each_entry(s, substream) {
struct snd_card_asihpi_pcm *ds = s->runtime->private_data;
+ runtime = s->runtime;
/* don't link Cap and Play */
if (substream->stream != s->stream)
continue;
+ /* Store dma offset for use by pointer callback */
ds->pcm_buf_dma_ofs = pcm_buf_dma_ofs;
if (xfercount &&
@@ -856,7 +897,7 @@ static void snd_card_asihpi_timer_function(unsigned long data)
}
if (s->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- snd_printddd("P%d write1 0x%04X 0x%04X\n",
+ snd_printddd("write1, P=%d, xfer=%d, buf_ofs=%d\n",
s->number, xfer1, buf_ofs);
hpi_handle_error(
hpi_outstream_write_buf(
@@ -866,7 +907,7 @@ static void snd_card_asihpi_timer_function(unsigned long data)
if (xfer2) {
pd = s->runtime->dma_area;
- snd_printddd("P%d write2 0x%04X 0x%04X\n",
+ snd_printddd("write2, P=%d, xfer=%d, buf_ofs=%d\n",
s->number,
xfercount - xfer1, buf_ofs);
hpi_handle_error(
@@ -876,7 +917,7 @@ static void snd_card_asihpi_timer_function(unsigned long data)
&ds->format));
}
} else {
- snd_printddd("C%d read1 0x%04x\n",
+ snd_printddd("read1, C=%d, xfer=%d\n",
s->number, xfer1);
hpi_handle_error(
hpi_instream_read_buf(
@@ -884,7 +925,7 @@ static void snd_card_asihpi_timer_function(unsigned long data)
pd, xfer1));
if (xfer2) {
pd = s->runtime->dma_area;
- snd_printddd("C%d read2 0x%04x\n",
+ snd_printddd("read2, C=%d, xfer=%d\n",
s->number, xfer2);
hpi_handle_error(
hpi_instream_read_buf(
@@ -892,16 +933,38 @@ static void snd_card_asihpi_timer_function(unsigned long data)
pd, xfer2));
}
}
+ /* ? host_rw_ofs always ahead of elapsed_dma_ofs by preload size? */
ds->pcm_buf_host_rw_ofs += xfercount;
ds->pcm_buf_elapsed_dma_ofs += xfercount;
snd_pcm_period_elapsed(s);
}
}
- if (dpcm->respawn_timer)
+ if (!card->hpi->interrupt_mode && dpcm->respawn_timer)
add_timer(&dpcm->timer);
}
+static void snd_card_asihpi_int_task(unsigned long data)
+{
+ struct hpi_adapter *a = (struct hpi_adapter *)data;
+ struct snd_card_asihpi *asihpi;
+
+ WARN_ON(!a || !a->snd_card || !a->snd_card->private_data);
+ asihpi = (struct snd_card_asihpi *)a->snd_card->private_data;
+ if (asihpi->llmode_streampriv)
+ snd_card_asihpi_timer_function(
+ (unsigned long)asihpi->llmode_streampriv);
+}
+
+static void snd_card_asihpi_isr(struct hpi_adapter *a)
+{
+ struct snd_card_asihpi *asihpi;
+
+ WARN_ON(!a || !a->snd_card || !a->snd_card->private_data);
+ asihpi = (struct snd_card_asihpi *)a->snd_card->private_data;
+ tasklet_schedule(&asihpi->t);
+}
+
/***************************** PLAYBACK OPS ****************/
static int snd_card_asihpi_playback_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
@@ -937,7 +1000,7 @@ snd_card_asihpi_playback_pointer(struct snd_pcm_substream *substream)
snd_pcm_debug_name(substream, name, sizeof(name));
ptr = bytes_to_frames(runtime, dpcm->pcm_buf_dma_ofs % dpcm->buffer_bytes);
- snd_printddd("%s pointer = 0x%04lx\n", name, (unsigned long)ptr);
+ snd_printddd("%s, pointer=%ld\n", name, (unsigned long)ptr);
return ptr;
}
@@ -1009,13 +1072,22 @@ static int snd_card_asihpi_playback_open(struct snd_pcm_substream *substream)
runtime->private_free = snd_card_asihpi_runtime_free;
memset(&snd_card_asihpi_playback, 0, sizeof(snd_card_asihpi_playback));
- snd_card_asihpi_playback.buffer_bytes_max = BUFFER_BYTES_MAX;
- snd_card_asihpi_playback.period_bytes_min = PERIOD_BYTES_MIN;
- /*?snd_card_asihpi_playback.period_bytes_min =
- card->out_max_chans * 4096; */
- snd_card_asihpi_playback.period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN;
- snd_card_asihpi_playback.periods_min = PERIODS_MIN;
- snd_card_asihpi_playback.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN;
+ if (!card->hpi->interrupt_mode) {
+ snd_card_asihpi_playback.buffer_bytes_max = BUFFER_BYTES_MAX;
+ snd_card_asihpi_playback.period_bytes_min = PERIOD_BYTES_MIN;
+ snd_card_asihpi_playback.period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN;
+ snd_card_asihpi_playback.periods_min = PERIODS_MIN;
+ snd_card_asihpi_playback.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN;
+ } else {
+ size_t pbmin = card->update_interval_frames *
+ card->out_max_chans;
+ snd_card_asihpi_playback.buffer_bytes_max = BUFFER_BYTES_MAX;
+ snd_card_asihpi_playback.period_bytes_min = pbmin;
+ snd_card_asihpi_playback.period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN;
+ snd_card_asihpi_playback.periods_min = PERIODS_MIN;
+ snd_card_asihpi_playback.periods_max = BUFFER_BYTES_MAX / pbmin;
+ }
+
/* snd_card_asihpi_playback.fifo_size = 0; */
snd_card_asihpi_playback.channels_max = card->out_max_chans;
snd_card_asihpi_playback.channels_min = card->out_min_chans;
@@ -1050,7 +1122,7 @@ static int snd_card_asihpi_playback_open(struct snd_pcm_substream *substream)
card->update_interval_frames);
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- card->update_interval_frames * 2, UINT_MAX);
+ card->update_interval_frames, UINT_MAX);
snd_printdd("playback open\n");
@@ -1085,9 +1157,10 @@ snd_card_asihpi_capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_card_asihpi_pcm *dpcm = runtime->private_data;
+ char name[16];
+ snd_pcm_debug_name(substream, name, sizeof(name));
- snd_printddd("capture pointer %d=%d\n",
- substream->number, dpcm->pcm_buf_dma_ofs);
+ snd_printddd("%s, pointer=%d\n", name, dpcm->pcm_buf_dma_ofs);
/* NOTE Unlike playback can't use actual samples_played
for the capture position, because those samples aren't yet in
the local buffer available for reading.
@@ -1115,8 +1188,6 @@ static int snd_card_asihpi_capture_prepare(struct snd_pcm_substream *substream)
return 0;
}
-
-
static u64 snd_card_asihpi_capture_formats(struct snd_card_asihpi *asihpi,
u32 h_stream)
{
@@ -1183,11 +1254,21 @@ static int snd_card_asihpi_capture_open(struct snd_pcm_substream *substream)
runtime->private_free = snd_card_asihpi_runtime_free;
memset(&snd_card_asihpi_capture, 0, sizeof(snd_card_asihpi_capture));
- snd_card_asihpi_capture.buffer_bytes_max = BUFFER_BYTES_MAX;
- snd_card_asihpi_capture.period_bytes_min = PERIOD_BYTES_MIN;
- snd_card_asihpi_capture.period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN;
- snd_card_asihpi_capture.periods_min = PERIODS_MIN;
- snd_card_asihpi_capture.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN;
+ if (!card->hpi->interrupt_mode) {
+ snd_card_asihpi_capture.buffer_bytes_max = BUFFER_BYTES_MAX;
+ snd_card_asihpi_capture.period_bytes_min = PERIOD_BYTES_MIN;
+ snd_card_asihpi_capture.period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN;
+ snd_card_asihpi_capture.periods_min = PERIODS_MIN;
+ snd_card_asihpi_capture.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN;
+ } else {
+ size_t pbmin = card->update_interval_frames *
+ card->out_max_chans;
+ snd_card_asihpi_capture.buffer_bytes_max = BUFFER_BYTES_MAX;
+ snd_card_asihpi_capture.period_bytes_min = pbmin;
+ snd_card_asihpi_capture.period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN;
+ snd_card_asihpi_capture.periods_min = PERIODS_MIN;
+ snd_card_asihpi_capture.periods_max = BUFFER_BYTES_MAX / pbmin;
+ }
/* snd_card_asihpi_capture.fifo_size = 0; */
snd_card_asihpi_capture.channels_max = card->in_max_chans;
snd_card_asihpi_capture.channels_min = card->in_min_chans;
@@ -1212,7 +1293,7 @@ static int snd_card_asihpi_capture_open(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
card->update_interval_frames);
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
- card->update_interval_frames * 2, UINT_MAX);
+ card->update_interval_frames, UINT_MAX);
snd_pcm_set_sync(substream);
@@ -1296,8 +1377,9 @@ static const char * const asihpi_tuner_band_names[] = {
"TV PAL I",
"TV PAL DK",
"TV SECAM",
+ "TV DAB",
};
-
+/* Number of strings must match the enumerations for HPI_TUNER_BAND in hpi.h */
compile_time_assert(
(ARRAY_SIZE(asihpi_tuner_band_names) ==
(HPI_TUNER_BAND_LAST+1)),
@@ -1317,9 +1399,11 @@ static const char * const asihpi_src_names[] = {
"Analog",
"Adapter",
"RTP",
- "Internal"
+ "Internal",
+ "AVB",
+ "BLU-Link"
};
-
+/* Number of strings must match the enumerations for HPI_SOURCENODES in hpi.h */
compile_time_assert(
(ARRAY_SIZE(asihpi_src_names) ==
(HPI_SOURCENODE_LAST_INDEX-HPI_SOURCENODE_NONE+1)),
@@ -1335,8 +1419,11 @@ static const char * const asihpi_dst_names[] = {
"Net",
"Analog",
"RTP",
+ "AVB",
+ "Internal",
+ "BLU-Link"
};
-
+/* Number of strings must match the enumerations for HPI_DESTNODES in hpi.h */
compile_time_assert(
(ARRAY_SIZE(asihpi_dst_names) ==
(HPI_DESTNODE_LAST_INDEX-HPI_DESTNODE_NONE+1)),
@@ -1351,7 +1438,7 @@ static inline int ctl_add(struct snd_card *card, struct snd_kcontrol_new *ctl,
if (err < 0)
return err;
else if (mixer_dump)
- snd_printk(KERN_INFO "added %s(%d)\n", ctl->name, ctl->index);
+ dev_info(&asihpi->pci->dev, "added %s(%d)\n", ctl->name, ctl->index);
return 0;
}
@@ -1625,18 +1712,7 @@ static const char * const asihpi_aesebu_format_names[] = {
static int snd_asihpi_aesebu_format_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
-
- strcpy(uinfo->value.enumerated.name,
- asihpi_aesebu_format_names[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, asihpi_aesebu_format_names);
}
static int snd_asihpi_aesebu_format_get(struct snd_kcontrol *kcontrol,
@@ -1863,22 +1939,7 @@ static int snd_asihpi_tuner_band_info(struct snd_kcontrol *kcontrol,
if (num_bands < 0)
return num_bands;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = num_bands;
-
- if (num_bands > 0) {
- if (uinfo->value.enumerated.item >=
- uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
-
- strcpy(uinfo->value.enumerated.name,
- asihpi_tuner_band_names[
- tuner_bands[uinfo->value.enumerated.item]]);
-
- }
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, num_bands, asihpi_tuner_band_names);
}
static int snd_asihpi_tuner_band_get(struct snd_kcontrol *kcontrol,
@@ -2253,7 +2314,7 @@ static int snd_asihpi_cmode_info(struct snd_kcontrol *kcontrol,
u32 h_control = kcontrol->private_value;
u16 mode;
int i;
- u16 mode_map[6];
+ const char *mapped_names[6];
int valid_modes = 0;
/* HPI channel mode values can be from 1 to 6
@@ -2262,24 +2323,14 @@ static int snd_asihpi_cmode_info(struct snd_kcontrol *kcontrol,
for (i = 0; i < HPI_CHANNEL_MODE_LAST; i++)
if (!hpi_channel_mode_query_mode(
h_control, i, &mode)) {
- mode_map[valid_modes] = mode;
+ mapped_names[valid_modes] = mode_names[mode];
valid_modes++;
}
if (!valid_modes)
return -EINVAL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = valid_modes;
-
- if (uinfo->value.enumerated.item >= valid_modes)
- uinfo->value.enumerated.item = valid_modes - 1;
-
- strcpy(uinfo->value.enumerated.name,
- mode_names[mode_map[uinfo->value.enumerated.item]]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, valid_modes, mapped_names);
}
static int snd_asihpi_cmode_get(struct snd_kcontrol *kcontrol,
@@ -2328,13 +2379,18 @@ static int snd_asihpi_cmode_add(struct snd_card_asihpi *asihpi,
/*------------------------------------------------------------
Sampleclock source controls
------------------------------------------------------------*/
-static char *sampleclock_sources[MAX_CLOCKSOURCES] = {
+static const char const *sampleclock_sources[] = {
"N/A", "Local PLL", "Digital Sync", "Word External", "Word Header",
"SMPTE", "Digital1", "Auto", "Network", "Invalid",
- "Prev Module",
+ "Prev Module", "BLU-Link",
"Digital2", "Digital3", "Digital4", "Digital5",
"Digital6", "Digital7", "Digital8"};
+ /* Number of strings must match expected enumerated values */
+ compile_time_assert(
+ (ARRAY_SIZE(sampleclock_sources) == MAX_CLOCKSOURCES),
+ assert_sampleclock_sources_size);
+
static int snd_asihpi_clksrc_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -2482,15 +2538,19 @@ static int snd_asihpi_clkrate_get(struct snd_kcontrol *kcontrol,
static int snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
struct hpi_control *hpi_ctl)
{
- struct snd_card *card = asihpi->card;
+ struct snd_card *card;
struct snd_kcontrol_new snd_control;
- struct clk_cache *clkcache = &asihpi->cc;
+ struct clk_cache *clkcache;
u32 hSC = hpi_ctl->h_control;
int has_aes_in = 0;
int i, j;
u16 source;
+ if (snd_BUG_ON(!asihpi))
+ return -EINVAL;
+ card = asihpi->card;
+ clkcache = &asihpi->cc;
snd_control.private_value = hpi_ctl->h_control;
clkcache->has_local = 0;
@@ -2592,7 +2652,7 @@ static int snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
if (err) {
if (err == HPI_ERROR_CONTROL_DISABLED) {
if (mixer_dump)
- snd_printk(KERN_INFO
+ dev_info(&asihpi->pci->dev,
"Disabled HPI Control(%d)\n",
idx);
continue;
@@ -2657,9 +2717,8 @@ static int snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
case HPI_CONTROL_COMPANDER:
default:
if (mixer_dump)
- snd_printk(KERN_INFO
- "Untranslated HPI Control"
- "(%d) %d %d %d %d %d\n",
+ dev_info(&asihpi->pci->dev,
+ "Untranslated HPI Control (%d) %d %d %d %d %d\n",
idx,
hpi_ctl.control_type,
hpi_ctl.src_node_type,
@@ -2674,7 +2733,7 @@ static int snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
if (HPI_ERROR_INVALID_OBJ_INDEX != err)
hpi_handle_error(err);
- snd_printk(KERN_INFO "%d mixer controls found\n", idx);
+ dev_info(&asihpi->pci->dev, "%d mixer controls found\n", idx);
return 0;
}
@@ -2837,8 +2896,7 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev,
&card);
if (err < 0)
return err;
- snd_printk(KERN_WARNING
- "**** WARNING **** Adapter index %d->ALSA index %d\n",
+ dev_warn(&pci_dev->dev, "Adapter index %d->ALSA index %d\n",
adapter_index, card->number);
}
@@ -2846,9 +2904,7 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev,
asihpi->card = card;
asihpi->pci = pci_dev;
asihpi->hpi = hpi;
-
- snd_printk(KERN_INFO "adapter ID=%4X index=%d\n",
- asihpi->hpi->adapter->type, adapter_index);
+ hpi->snd_card = card;
err = hpi_adapter_get_property(adapter_index,
HPI_ADAPTER_PROPERTY_CAPS1,
@@ -2868,8 +2924,16 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev,
if (err)
asihpi->update_interval_frames = 512;
- if (!asihpi->can_dma)
- asihpi->update_interval_frames *= 2;
+ if (hpi->interrupt_mode) {
+ asihpi->pcm_start = snd_card_asihpi_pcm_int_start;
+ asihpi->pcm_stop = snd_card_asihpi_pcm_int_stop;
+ tasklet_init(&asihpi->t, snd_card_asihpi_int_task,
+ (unsigned long)hpi);
+ hpi->interrupt_callback = snd_card_asihpi_isr;
+ } else {
+ asihpi->pcm_start = snd_card_asihpi_pcm_timer_start;
+ asihpi->pcm_stop = snd_card_asihpi_pcm_timer_stop;
+ }
hpi_handle_error(hpi_instream_open(adapter_index,
0, &h_stream));
@@ -2879,6 +2943,9 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev,
hpi_handle_error(hpi_instream_close(h_stream));
+ if (!asihpi->can_dma)
+ asihpi->update_interval_frames *= 2;
+
err = hpi_adapter_get_property(adapter_index,
HPI_ADAPTER_PROPERTY_CURCHANNELS,
&asihpi->in_max_chans, &asihpi->out_max_chans);
@@ -2896,20 +2963,21 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev,
asihpi->in_min_chans = 1;
}
- snd_printk(KERN_INFO "Has dma:%d, grouping:%d, mrx:%d\n",
+ dev_info(&pci_dev->dev, "Has dma:%d, grouping:%d, mrx:%d, uif:%d\n",
asihpi->can_dma,
asihpi->support_grouping,
- asihpi->support_mrx
+ asihpi->support_mrx,
+ asihpi->update_interval_frames
);
err = snd_card_asihpi_pcm_new(asihpi, 0);
if (err < 0) {
- snd_printk(KERN_ERR "pcm_new failed\n");
+ dev_err(&pci_dev->dev, "pcm_new failed\n");
goto __nodev;
}
err = snd_card_asihpi_mixer_new(asihpi);
if (err < 0) {
- snd_printk(KERN_ERR "mixer_new failed\n");
+ dev_err(&pci_dev->dev, "mixer_new failed\n");
goto __nodev;
}
@@ -2936,13 +3004,12 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev,
err = snd_card_register(card);
if (!err) {
- hpi->snd_card = card;
dev++;
return 0;
}
__nodev:
snd_card_free(card);
- snd_printk(KERN_ERR "snd_asihpi_probe error %d\n", err);
+ dev_err(&pci_dev->dev, "snd_asihpi_probe error %d\n", err);
return err;
}
@@ -2950,6 +3017,16 @@ __nodev:
static void snd_asihpi_remove(struct pci_dev *pci_dev)
{
struct hpi_adapter *hpi = pci_get_drvdata(pci_dev);
+ struct snd_card_asihpi *asihpi = hpi->snd_card->private_data;
+
+ /* Stop interrupts */
+ if (hpi->interrupt_mode) {
+ hpi->interrupt_callback = NULL;
+ hpi_handle_error(hpi_adapter_set_property(hpi->adapter->index,
+ HPI_ADAPTER_PROPERTY_IRQ_RATE, 0, 0));
+ tasklet_kill(&asihpi->t);
+ }
+
snd_card_free(hpi->snd_card);
hpi->snd_card = NULL;
asihpi_adapter_remove(pci_dev);
@@ -2971,10 +3048,6 @@ static struct pci_driver driver = {
.id_table = asihpi_pci_tbl,
.probe = snd_asihpi_probe,
.remove = snd_asihpi_remove,
-#ifdef CONFIG_PM_SLEEP
-/* .suspend = snd_asihpi_suspend,
- .resume = snd_asihpi_resume, */
-#endif
};
static int __init snd_asihpi_init(void)
diff --git a/sound/pci/asihpi/hpi.h b/sound/pci/asihpi/hpi.h
index 20887241a3ae..4466bd2c5272 100644
--- a/sound/pci/asihpi/hpi.h
+++ b/sound/pci/asihpi/hpi.h
@@ -196,8 +196,10 @@ enum HPI_SOURCENODES {
packets of RTP audio samples from other devices. */
HPI_SOURCENODE_RTP_DESTINATION = 112,
HPI_SOURCENODE_INTERNAL = 113, /**< node internal to the device. */
+ HPI_SOURCENODE_AVB = 114, /**< AVB input stream */
+ HPI_SOURCENODE_BLULINK = 115, /**< BLU-link input channel */
/* !!!Update this AND hpidebug.h if you add a new sourcenode type!!! */
- HPI_SOURCENODE_LAST_INDEX = 113 /**< largest ID */
+ HPI_SOURCENODE_LAST_INDEX = 115 /**< largest ID */
/* AX6 max sourcenode types = 15 */
};
@@ -224,8 +226,11 @@ enum HPI_DESTNODES {
/** RTP stream output node - This node is a source for
packets of RTP audio samples that are sent to other devices. */
HPI_DESTNODE_RTP_SOURCE = 208,
+ HPI_DESTNODE_AVB = 209, /**< AVB output stream */
+ HPI_DESTNODE_INTERNAL = 210, /**< node internal to the device. */
+ HPI_DESTNODE_BLULINK = 211, /**< BLU-link output channel. */
/* !!!Update this AND hpidebug.h if you add a new destnode type!!! */
- HPI_DESTNODE_LAST_INDEX = 208 /**< largest ID */
+ HPI_DESTNODE_LAST_INDEX = 211 /**< largest ID */
/* AX6 max destnode types = 15 */
};
@@ -752,7 +757,8 @@ enum HPI_TUNER_BAND {
HPI_TUNER_BAND_TV_PAL_I = 7, /**< PAL-I TV band*/
HPI_TUNER_BAND_TV_PAL_DK = 8, /**< PAL-D/K TV band*/
HPI_TUNER_BAND_TV_SECAM_L = 9, /**< SECAM-L TV band*/
- HPI_TUNER_BAND_LAST = 9 /**< the index of the last tuner band. */
+ HPI_TUNER_BAND_DAB = 10,
+ HPI_TUNER_BAND_LAST = 10 /**< the index of the last tuner band. */
};
/** Tuner mode attributes
@@ -842,8 +848,10 @@ enum HPI_SAMPLECLOCK_SOURCES {
HPI_SAMPLECLOCK_SOURCE_NETWORK = 8,
/** From previous adjacent module (ASI2416 only)*/
HPI_SAMPLECLOCK_SOURCE_PREV_MODULE = 10,
+/** Blu link sample clock*/
+ HPI_SAMPLECLOCK_SOURCE_BLULINK = 11,
/*! Update this if you add a new clock source.*/
- HPI_SAMPLECLOCK_SOURCE_LAST = 10
+ HPI_SAMPLECLOCK_SOURCE_LAST = 11
};
/** Equalizer filter types. Used by HPI_ParametricEq_SetBand()
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 4f2873880b16..8d5abfa4e24b 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -1,7 +1,7 @@
/******************************************************************************
AudioScience HPI driver
- Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
+ Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
@@ -163,6 +163,9 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
static void delete_adapter_obj(struct hpi_adapter_obj *pao);
+static int adapter_irq_query_and_clear(struct hpi_adapter_obj *pao,
+ u32 message);
+
static void outstream_host_buffer_allocate(struct hpi_adapter_obj *pao,
struct hpi_message *phm, struct hpi_response *phr);
@@ -283,7 +286,6 @@ static void adapter_message(struct hpi_adapter_obj *pao,
case HPI_ADAPTER_DELETE:
adapter_delete(pao, phm, phr);
break;
-
default:
hw_message(pao, phm, phr);
break;
@@ -673,6 +675,12 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
HPI_DEBUG_LOG(INFO, "bootload DSP OK\n");
+ pao->irq_query_and_clear = adapter_irq_query_and_clear;
+ pao->instream_host_buffer_status =
+ phw->p_interface_buffer->instream_host_buffer_status;
+ pao->outstream_host_buffer_status =
+ phw->p_interface_buffer->outstream_host_buffer_status;
+
return hpi_add_adapter(pao);
}
@@ -713,6 +721,21 @@ static void delete_adapter_obj(struct hpi_adapter_obj *pao)
/*****************************************************************************/
/* Adapter functions */
+static int adapter_irq_query_and_clear(struct hpi_adapter_obj *pao,
+ u32 message)
+{
+ struct hpi_hw_obj *phw = pao->priv;
+ u32 hsr = 0;
+
+ hsr = ioread32(phw->prHSR);
+ if (hsr & C6205_HSR_INTSRC) {
+ /* reset the interrupt from the DSP */
+ iowrite32(C6205_HSR_INTSRC, phw->prHSR);
+ return HPI_IRQ_MIXER;
+ }
+
+ return HPI_IRQ_NONE;
+}
/*****************************************************************************/
/* OutStream Host buffer functions */
@@ -1331,17 +1354,21 @@ static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
if (boot_code_id[1] != 0) {
/* DSP 1 is a C6713 */
/* CLKX0 <- '1' release the C6205 bootmode pulldowns */
- boot_loader_write_mem32(pao, 0, (0x018C0024L), 0x00002202);
+ boot_loader_write_mem32(pao, 0, 0x018C0024, 0x00002202);
hpios_delay_micro_seconds(100);
/* Reset the 6713 #1 - revB */
boot_loader_write_mem32(pao, 0, C6205_BAR0_TIMER1_CTL, 0);
-
- /* dummy read every 4 words for 6205 advisory 1.4.4 */
- boot_loader_read_mem32(pao, 0, 0);
-
+ /* value of bit 3 is unknown after DSP reset, other bits shoudl be 0 */
+ if (0 != (boot_loader_read_mem32(pao, 0,
+ (C6205_BAR0_TIMER1_CTL)) & ~8))
+ return HPI6205_ERROR_6205_REG;
hpios_delay_micro_seconds(100);
+
/* Release C6713 from reset - revB */
boot_loader_write_mem32(pao, 0, C6205_BAR0_TIMER1_CTL, 4);
+ if (4 != (boot_loader_read_mem32(pao, 0,
+ (C6205_BAR0_TIMER1_CTL)) & ~8))
+ return HPI6205_ERROR_6205_REG;
hpios_delay_micro_seconds(100);
}
@@ -2089,7 +2116,7 @@ static u16 message_response_sequence(struct hpi_adapter_obj *pao,
return 0;
}
- /* Assume buffer of type struct bus_master_interface
+ /* Assume buffer of type struct bus_master_interface_62
is allocated "noncacheable" */
if (!wait_dsp_ack(phw, H620_HIF_IDLE, HPI6205_TIMEOUT)) {
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index bc86cb726d79..48380ce2c81b 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -554,17 +554,21 @@ struct hpi_pci {
struct pci_dev *pci_dev;
};
+/** Adapter specification resource */
+struct hpi_adapter_specification {
+ u32 type;
+ u8 modules[4];
+};
+
struct hpi_resource {
union {
const struct hpi_pci *pci;
const char *net_if;
+ struct hpi_adapter_specification adapter_spec;
+ const void *sw_if;
} r;
-#ifndef HPI64BIT /* keep structure size constant */
- u32 pad_to64;
-#endif
u16 bus_type; /* HPI_BUS_PNPISA, _PCI, _USB etc */
u16 padding;
-
};
/** Format info used inside struct hpi_message
@@ -582,7 +586,7 @@ struct hpi_msg_format {
struct hpi_msg_data {
struct hpi_msg_format format;
u8 *pb_data;
-#ifndef HPI64BIT
+#ifndef CONFIG_64BIT
u32 padding;
#endif
u32 data_size;
@@ -595,7 +599,7 @@ struct hpi_data_legacy32 {
u32 data_size;
};
-#ifdef HPI64BIT
+#ifdef CONFIG_64BIT
/* Compatibility version of struct hpi_data*/
struct hpi_data_compat32 {
struct hpi_msg_format format;
@@ -682,8 +686,8 @@ union hpi_adapterx_msg {
u16 value;
} test_assert;
struct {
- u32 yes;
- } irq_query;
+ u32 message;
+ } irq;
u32 pad[3];
};
diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c
index 7ed5c26c3737..c7751243dc42 100644
--- a/sound/pci/asihpi/hpicmn.c
+++ b/sound/pci/asihpi/hpicmn.c
@@ -1,7 +1,7 @@
/******************************************************************************
AudioScience HPI driver
- Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
+ Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
@@ -206,6 +206,14 @@ static unsigned int control_cache_alloc_check(struct hpi_control_cache *pC)
struct hpi_control_cache_info *info =
(struct hpi_control_cache_info *)
&p_master_cache[byte_count];
+ u16 control_index = info->control_index;
+
+ if (control_index >= pC->control_count) {
+ HPI_DEBUG_LOG(INFO,
+ "adap %d control index %d out of range, cache not ready?\n",
+ pC->adap_idx, control_index);
+ return 0;
+ }
if (!info->size_in32bit_words) {
if (!i) {
@@ -225,10 +233,10 @@ static unsigned int control_cache_alloc_check(struct hpi_control_cache *pC)
}
if (info->control_type) {
- pC->p_info[info->control_index] = info;
+ pC->p_info[control_index] = info;
cached++;
} else { /* dummy cache entry */
- pC->p_info[info->control_index] = NULL;
+ pC->p_info[control_index] = NULL;
}
byte_count += info->size_in32bit_words * 4;
@@ -309,35 +317,18 @@ static const struct pad_ofs_size pad_desc[] = {
/** CheckControlCache checks the cache and fills the struct hpi_response
* accordingly. It returns one if a cache hit occurred, zero otherwise.
*/
-short hpi_check_control_cache(struct hpi_control_cache *p_cache,
+short hpi_check_control_cache_single(struct hpi_control_cache_single *pC,
struct hpi_message *phm, struct hpi_response *phr)
{
- short found = 1;
- struct hpi_control_cache_info *pI;
- struct hpi_control_cache_single *pC;
size_t response_size;
- if (!find_control(phm->obj_index, p_cache, &pI)) {
- HPI_DEBUG_LOG(VERBOSE,
- "HPICMN find_control() failed for adap %d\n",
- phm->adapter_index);
- return 0;
- }
-
- phr->error = 0;
- phr->specific_error = 0;
- phr->version = 0;
+ short found = 1;
/* set the default response size */
response_size =
sizeof(struct hpi_response_header) +
sizeof(struct hpi_control_res);
- /* pC is the default cached control strucure. May be cast to
- something else in the following switch statement.
- */
- pC = (struct hpi_control_cache_single *)pI;
-
- switch (pI->control_type) {
+ switch (pC->u.i.control_type) {
case HPI_CONTROL_METER:
if (phm->u.c.attribute == HPI_METER_PEAK) {
@@ -467,7 +458,7 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
break;
case HPI_CONTROL_PAD:{
struct hpi_control_cache_pad *p_pad;
- p_pad = (struct hpi_control_cache_pad *)pI;
+ p_pad = (struct hpi_control_cache_pad *)pC;
if (!(p_pad->field_valid_flags & (1 <<
HPI_CTL_ATTR_INDEX(phm->u.c.
@@ -531,7 +522,8 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
HPI_DEBUG_LOG(VERBOSE, "%s Adap %d, Ctl %d, Type %d, Attr %d\n",
found ? "Cached" : "Uncached", phm->adapter_index,
- pI->control_index, pI->control_type, phm->u.c.attribute);
+ pC->u.i.control_index, pC->u.i.control_type,
+ phm->u.c.attribute);
if (found) {
phr->size = (u16)response_size;
@@ -543,34 +535,36 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
return found;
}
-/** Updates the cache with Set values.
-
-Only update if no error.
-Volume and Level return the limited values in the response, so use these
-Multiplexer does so use sent values
-*/
-void hpi_cmn_control_cache_sync_to_msg(struct hpi_control_cache *p_cache,
+short hpi_check_control_cache(struct hpi_control_cache *p_cache,
struct hpi_message *phm, struct hpi_response *phr)
{
- struct hpi_control_cache_single *pC;
struct hpi_control_cache_info *pI;
- if (phr->error)
- return;
-
if (!find_control(phm->obj_index, p_cache, &pI)) {
HPI_DEBUG_LOG(VERBOSE,
"HPICMN find_control() failed for adap %d\n",
phm->adapter_index);
- return;
+ return 0;
}
- /* pC is the default cached control strucure.
- May be cast to something else in the following switch statement.
- */
- pC = (struct hpi_control_cache_single *)pI;
+ phr->error = 0;
+ phr->specific_error = 0;
+ phr->version = 0;
+
+ return hpi_check_control_cache_single((struct hpi_control_cache_single
+ *)pI, phm, phr);
+}
+
+/** Updates the cache with Set values.
- switch (pI->control_type) {
+Only update if no error.
+Volume and Level return the limited values in the response, so use these
+Multiplexer does so use sent values
+*/
+void hpi_cmn_control_cache_sync_to_msg_single(struct hpi_control_cache_single
+ *pC, struct hpi_message *phm, struct hpi_response *phr)
+{
+ switch (pC->u.i.control_type) {
case HPI_CONTROL_VOLUME:
if (phm->u.c.attribute == HPI_VOLUME_GAIN) {
pC->u.vol.an_log[0] = phr->u.c.an_log_value[0];
@@ -625,6 +619,30 @@ void hpi_cmn_control_cache_sync_to_msg(struct hpi_control_cache *p_cache,
}
}
+void hpi_cmn_control_cache_sync_to_msg(struct hpi_control_cache *p_cache,
+ struct hpi_message *phm, struct hpi_response *phr)
+{
+ struct hpi_control_cache_single *pC;
+ struct hpi_control_cache_info *pI;
+
+ if (phr->error)
+ return;
+
+ if (!find_control(phm->obj_index, p_cache, &pI)) {
+ HPI_DEBUG_LOG(VERBOSE,
+ "HPICMN find_control() failed for adap %d\n",
+ phm->adapter_index);
+ return;
+ }
+
+ /* pC is the default cached control strucure.
+ May be cast to something else in the following switch statement.
+ */
+ pC = (struct hpi_control_cache_single *)pI;
+
+ hpi_cmn_control_cache_sync_to_msg_single(pC, phm, phr);
+}
+
/** Allocate control cache.
\return Cache pointer, or NULL if allocation fails.
@@ -637,12 +655,13 @@ struct hpi_control_cache *hpi_alloc_control_cache(const u32 control_count,
if (!p_cache)
return NULL;
- p_cache->p_info = kcalloc(control_count, sizeof(*p_cache->p_info),
- GFP_KERNEL);
+ p_cache->p_info =
+ kcalloc(control_count, sizeof(*p_cache->p_info), GFP_KERNEL);
if (!p_cache->p_info) {
kfree(p_cache);
return NULL;
}
+
p_cache->cache_size_in_bytes = size_in_bytes;
p_cache->control_count = control_count;
p_cache->p_cache = p_dsp_control_buffer;
diff --git a/sound/pci/asihpi/hpicmn.h b/sound/pci/asihpi/hpicmn.h
index e44121283047..46629c2d101b 100644
--- a/sound/pci/asihpi/hpicmn.h
+++ b/sound/pci/asihpi/hpicmn.h
@@ -1,7 +1,7 @@
/**
AudioScience HPI driver
- Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
+ Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
@@ -21,7 +21,11 @@
struct hpi_adapter_obj;
/* a function that takes an adapter obj and returns an int */
-typedef int adapter_int_func(struct hpi_adapter_obj *pao);
+typedef int adapter_int_func(struct hpi_adapter_obj *pao, u32 message);
+
+#define HPI_IRQ_NONE (0)
+#define HPI_IRQ_MESSAGE (1)
+#define HPI_IRQ_MIXER (2)
struct hpi_adapter_obj {
struct hpi_pci pci; /* PCI info - bus#,dev#,address etc */
@@ -33,6 +37,9 @@ struct hpi_adapter_obj {
u16 dsp_crashed;
u16 has_control_cache;
void *priv;
+ adapter_int_func *irq_query_and_clear;
+ struct hpi_hostbuffer_status *instream_host_buffer_status;
+ struct hpi_hostbuffer_status *outstream_host_buffer_status;
};
struct hpi_control_cache {
@@ -55,13 +62,21 @@ void hpi_delete_adapter(struct hpi_adapter_obj *pao);
short hpi_check_control_cache(struct hpi_control_cache *pC,
struct hpi_message *phm, struct hpi_response *phr);
+
+short hpi_check_control_cache_single(struct hpi_control_cache_single *pC,
+ struct hpi_message *phm, struct hpi_response *phr);
+
struct hpi_control_cache *hpi_alloc_control_cache(const u32
number_of_controls, const u32 size_in_bytes, u8 *pDSP_control_buffer);
+
void hpi_free_control_cache(struct hpi_control_cache *p_cache);
void hpi_cmn_control_cache_sync_to_msg(struct hpi_control_cache *pC,
struct hpi_message *phm, struct hpi_response *phr);
+void hpi_cmn_control_cache_sync_to_msg_single(struct hpi_control_cache_single
+ *pC, struct hpi_message *phm, struct hpi_response *phr);
+
u16 hpi_validate_response(struct hpi_message *phm, struct hpi_response *phr);
hpi_handler_func HPI_COMMON;
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 032d563e3708..7eb617175fde 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -1,7 +1,7 @@
/******************************************************************************
AudioScience HPI driver
- Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
+ Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
@@ -37,11 +37,15 @@ static u16 gwSSX2_bypass;
static void hpi_init_message(struct hpi_message *phm, u16 object,
u16 function)
{
- memset(phm, 0, sizeof(*phm));
+ u16 size;
+
if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
- phm->size = msg_size[object];
+ size = msg_size[object];
else
- phm->size = sizeof(*phm);
+ size = sizeof(*phm);
+
+ memset(phm, 0, size);
+ phm->size = size;
if (gwSSX2_bypass)
phm->type = HPI_TYPE_SSX2BYPASS_MESSAGE;
@@ -60,12 +64,16 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
u16 error)
{
- memset(phr, 0, sizeof(*phr));
- phr->type = HPI_TYPE_RESPONSE;
+ u16 size;
+
if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
- phr->size = res_size[object];
+ size = res_size[object];
else
- phr->size = sizeof(*phr);
+ size = sizeof(*phr);
+
+ memset(phr, 0, sizeof(*phr));
+ phr->size = size;
+ phr->type = HPI_TYPE_RESPONSE;
phr->object = object;
phr->function = function;
phr->error = error;
@@ -86,7 +94,7 @@ void hpi_init_message_response(struct hpi_message *phm,
static void hpi_init_messageV1(struct hpi_message_header *phm, u16 size,
u16 object, u16 function)
{
- memset(phm, 0, sizeof(*phm));
+ memset(phm, 0, size);
if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
phm->size = size;
phm->type = HPI_TYPE_REQUEST;
@@ -100,7 +108,9 @@ static void hpi_init_messageV1(struct hpi_message_header *phm, u16 size,
void hpi_init_responseV1(struct hpi_response_header *phr, u16 size,
u16 object, u16 function)
{
- memset(phr, 0, sizeof(*phr));
+ (void)object;
+ (void)function;
+ memset(phr, 0, size);
phr->size = size;
phr->version = 1;
phr->type = HPI_TYPE_RESPONSE;
diff --git a/sound/pci/asihpi/hpimsgx.c b/sound/pci/asihpi/hpimsgx.c
index d4790ddc225c..736f45337fc7 100644
--- a/sound/pci/asihpi/hpimsgx.c
+++ b/sound/pci/asihpi/hpimsgx.c
@@ -1,7 +1,7 @@
/******************************************************************************
AudioScience HPI driver
- Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
+ Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +35,7 @@ static struct pci_device_id asihpi_pci_tbl[] = {
static struct hpios_spinlock msgx_lock;
static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
+static int logging_enabled = 1;
static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
*pci_info)
@@ -312,7 +313,9 @@ static void instream_message(struct hpi_message *phm,
void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
- HPI_DEBUG_MESSAGE(DEBUG, phm);
+
+ if (logging_enabled)
+ HPI_DEBUG_MESSAGE(DEBUG, phm);
if (phm->type != HPI_TYPE_REQUEST) {
hpi_init_response(phr, phm->object, phm->function,
@@ -352,8 +355,14 @@ void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
hw_entry_point(phm, phr);
break;
}
- HPI_DEBUG_RESPONSE(phr);
+ if (logging_enabled)
+ HPI_DEBUG_RESPONSE(phr);
+
+ if (phr->error >= HPI_ERROR_DSP_COMMUNICATION) {
+ hpi_debug_level_set(HPI_DEBUG_LEVEL_ERROR);
+ logging_enabled = 0;
+ }
}
static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 7f0272032fbb..6aa677e60555 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -1,7 +1,8 @@
/*******************************************************************************
-
AudioScience HPI driver
- Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
+ Common Linux HPI ioctl and module probe/remove functions
+
+ Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
@@ -12,11 +13,6 @@
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-Common Linux HPI ioctl and module probe/remove functions
*******************************************************************************/
#define SOURCEFILE_NAME "hpioctl.c"
@@ -29,6 +25,7 @@ Common Linux HPI ioctl and module probe/remove functions
#include "hpicmn.h"
#include <linux/fs.h>
+#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <asm/uaccess.h>
@@ -307,10 +304,38 @@ out:
return err;
}
+static int asihpi_irq_count;
+
+static irqreturn_t asihpi_isr(int irq, void *dev_id)
+{
+ struct hpi_adapter *a = dev_id;
+ int handled;
+
+ if (!a->adapter->irq_query_and_clear) {
+ pr_err("asihpi_isr ASI%04X:%d no handler\n", a->adapter->type,
+ a->adapter->index);
+ return IRQ_NONE;
+ }
+
+ handled = a->adapter->irq_query_and_clear(a->adapter, 0);
+
+ if (!handled)
+ return IRQ_NONE;
+
+ asihpi_irq_count++;
+ /* printk(KERN_INFO "asihpi_isr %d ASI%04X:%d irq handled\n",
+ asihpi_irq_count, a->adapter->type, a->adapter->index); */
+
+ if (a->interrupt_callback)
+ a->interrupt_callback(a);
+
+ return IRQ_HANDLED;
+}
+
int asihpi_adapter_probe(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
- int idx, nm;
+ int idx, nm, low_latency_mode = 0, irq_supported = 0;
int adapter_index;
unsigned int memlen;
struct hpi_message hm;
@@ -388,8 +413,38 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
hm.adapter_index = adapter.adapter->index;
hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
- if (hr.error)
+ if (hr.error) {
+ HPI_DEBUG_LOG(ERROR, "HPI_ADAPTER_OPEN failed, aborting\n");
goto err;
+ }
+
+ /* Check if current mode == Low Latency mode */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_GET_MODE);
+ hm.adapter_index = adapter.adapter->index;
+ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
+
+ if (!hr.error
+ && hr.u.ax.mode.adapter_mode == HPI_ADAPTER_MODE_LOW_LATENCY)
+ low_latency_mode = 1;
+ else
+ dev_info(&pci_dev->dev,
+ "Adapter at index %d is not in low latency mode\n",
+ adapter.adapter->index);
+
+ /* Check if IRQs are supported */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_GET_PROPERTY);
+ hm.adapter_index = adapter.adapter->index;
+ hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_SUPPORTS_IRQ;
+ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
+ if (hr.error || !hr.u.ax.property_get.parameter1) {
+ dev_info(&pci_dev->dev,
+ "IRQs not supported by adapter at index %d\n",
+ adapter.adapter->index);
+ } else {
+ irq_supported = 1;
+ }
/* WARNING can't init mutex in 'adapter'
* and then copy it to adapters[] ?!?!
@@ -398,6 +453,44 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
mutex_init(&adapters[adapter_index].mutex);
pci_set_drvdata(pci_dev, &adapters[adapter_index]);
+ if (low_latency_mode && irq_supported) {
+ if (!adapter.adapter->irq_query_and_clear) {
+ dev_err(&pci_dev->dev,
+ "no IRQ handler for adapter %d, aborting\n",
+ adapter.adapter->index);
+ goto err;
+ }
+
+ /* Disable IRQ generation on DSP side by setting the rate to 0 */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_SET_PROPERTY);
+ hm.adapter_index = adapter.adapter->index;
+ hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_IRQ_RATE;
+ hm.u.ax.property_set.parameter1 = 0;
+ hm.u.ax.property_set.parameter2 = 0;
+ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
+ if (hr.error) {
+ HPI_DEBUG_LOG(ERROR,
+ "HPI_ADAPTER_GET_MODE failed, aborting\n");
+ goto err;
+ }
+
+ /* Note: request_irq calls asihpi_isr here */
+ if (request_irq(pci_dev->irq, asihpi_isr, IRQF_SHARED,
+ "asihpi", &adapters[adapter_index])) {
+ dev_err(&pci_dev->dev, "request_irq(%d) failed\n",
+ pci_dev->irq);
+ goto err;
+ }
+
+ adapters[adapter_index].interrupt_mode = 1;
+
+ dev_info(&pci_dev->dev, "using irq %d\n", pci_dev->irq);
+ adapters[adapter_index].irq = pci_dev->irq;
+ } else {
+ dev_info(&pci_dev->dev, "using polled mode\n");
+ }
+
dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n",
adapter.adapter->type, adapter_index);
@@ -431,6 +524,15 @@ void asihpi_adapter_remove(struct pci_dev *pci_dev)
pa = pci_get_drvdata(pci_dev);
pci = pa->adapter->pci;
+ /* Disable IRQ generation on DSP side */
+ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
+ HPI_ADAPTER_SET_PROPERTY);
+ hm.adapter_index = pa->adapter->index;
+ hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_IRQ_RATE;
+ hm.u.ax.property_set.parameter1 = 0;
+ hm.u.ax.property_set.parameter2 = 0;
+ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
+
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_DELETE);
hm.adapter_index = pa->adapter->index;
@@ -442,8 +544,10 @@ void asihpi_adapter_remove(struct pci_dev *pci_dev)
iounmap(pci.ap_mem_base[idx]);
}
- if (pa->p_buffer)
- vfree(pa->p_buffer);
+ if (pa->irq)
+ free_irq(pa->irq, pa);
+
+ vfree(pa->p_buffer);
if (1)
dev_info(&pci_dev->dev,
diff --git a/sound/pci/asihpi/hpios.h b/sound/pci/asihpi/hpios.h
index d3fbd0d76c37..4e383601b9cf 100644
--- a/sound/pci/asihpi/hpios.h
+++ b/sound/pci/asihpi/hpios.h
@@ -41,10 +41,6 @@ HPI Operating System Specific macros for Linux Kernel driver
#define HPI_NO_OS_FILE_OPS
-#ifdef CONFIG_64BIT
-#define HPI64BIT
-#endif
-
/** Details of a memory area allocated with pci_alloc_consistent
Need all info for parameters to pci_free_consistent
*/
@@ -155,6 +151,10 @@ struct hpi_adapter {
struct hpi_adapter_obj *adapter;
struct snd_card *snd_card;
+ int irq;
+ int interrupt_mode;
+ void (*interrupt_callback) (struct hpi_adapter *);
+
/* mutex prevents contention for one card
between multiple user programs (via ioctl) */
struct mutex mutex;
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 7895c5d300c7..9c1c4452a8ee 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -688,9 +688,7 @@ static void snd_atiixp_xrun_dma(struct atiixp *chip, struct atiixp_dma *dma)
if (! dma->substream || ! dma->running)
return;
dev_dbg(chip->card->dev, "XRUN detected (DMA %d)\n", dma->ops->type);
- snd_pcm_stream_lock(dma->substream);
- snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock(dma->substream);
+ snd_pcm_stop_xrun(dma->substream);
}
/*
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 3c3241309a30..b2f63e0727de 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -638,9 +638,7 @@ static void snd_atiixp_xrun_dma(struct atiixp_modem *chip,
if (! dma->substream || ! dma->running)
return;
dev_dbg(chip->card->dev, "XRUN detected (DMA %d)\n", dma->ops->type);
- snd_pcm_stream_lock(dma->substream);
- snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock(dma->substream);
+ snd_pcm_stop_xrun(dma->substream);
}
/*
diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c
index 21ce31f636bc..996369134ea8 100644
--- a/sound/pci/au88x0/au88x0.c
+++ b/sound/pci/au88x0/au88x0.c
@@ -48,11 +48,10 @@ static void vortex_fix_latency(struct pci_dev *vortex)
{
int rc;
if (!(rc = pci_write_config_byte(vortex, 0x40, 0xff))) {
- pr_info( CARD_NAME
- ": vortex latency is 0xff\n");
+ dev_info(&vortex->dev, "vortex latency is 0xff\n");
} else {
- pr_warn( CARD_NAME
- ": could not set vortex latency: pci error 0x%x\n", rc);
+ dev_warn(&vortex->dev,
+ "could not set vortex latency: pci error 0x%x\n", rc);
}
}
@@ -70,11 +69,10 @@ static void vortex_fix_agp_bridge(struct pci_dev *via)
if (!(rc = pci_read_config_byte(via, 0x42, &value))
&& ((value & 0x10)
|| !(rc = pci_write_config_byte(via, 0x42, value | 0x10)))) {
- pr_info( CARD_NAME
- ": bridge config is 0x%x\n", value | 0x10);
+ dev_info(&via->dev, "bridge config is 0x%x\n", value | 0x10);
} else {
- pr_warn( CARD_NAME
- ": could not set vortex latency: pci error 0x%x\n", rc);
+ dev_warn(&via->dev,
+ "could not set vortex latency: pci error 0x%x\n", rc);
}
}
@@ -97,7 +95,8 @@ static void snd_vortex_workaround(struct pci_dev *vortex, int fix)
PCI_DEVICE_ID_AMD_FE_GATE_7007, NULL);
}
if (via) {
- pr_info( CARD_NAME ": Activating latency workaround...\n");
+ dev_info(&vortex->dev,
+ "Activating latency workaround...\n");
vortex_fix_latency(vortex);
vortex_fix_agp_bridge(via);
}
@@ -153,7 +152,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
return err;
if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 ||
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) {
- pr_err( "error to set DMA mask\n");
+ dev_err(card->dev, "error to set DMA mask\n");
pci_disable_device(pci);
return -ENXIO;
}
@@ -182,7 +181,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
chip->mmio = pci_ioremap_bar(pci, 0);
if (!chip->mmio) {
- pr_err( "MMIO area remap failed.\n");
+ dev_err(card->dev, "MMIO area remap failed.\n");
err = -ENOMEM;
goto ioremap_out;
}
@@ -191,14 +190,14 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
* This must be done before we do request_irq otherwise we can get spurious
* interrupts that we do not handle properly and make a mess of things */
if ((err = vortex_core_init(chip)) != 0) {
- pr_err( "hw core init failed\n");
+ dev_err(card->dev, "hw core init failed\n");
goto core_out;
}
if ((err = request_irq(pci->irq, vortex_interrupt,
IRQF_SHARED, KBUILD_MODNAME,
chip)) != 0) {
- pr_err( "cannot grab irq\n");
+ dev_err(card->dev, "cannot grab irq\n");
goto irq_out;
}
chip->irq = pci->irq;
@@ -315,7 +314,7 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_VORTEX_SYNTH,
sizeof(snd_vortex_synth_arg_t), &wave) < 0
|| wave == NULL) {
- snd_printk(KERN_ERR "Can't initialize Aureal wavetable synth\n");
+ dev_err(card->dev, "Can't initialize Aureal wavetable synth\n");
} else {
snd_vortex_synth_arg_t *arg;
@@ -342,11 +341,11 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
chip->rev = pci->revision;
#ifdef CHIP_AU8830
if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) {
- pr_alert(
- "vortex: The revision (%x) of your card has not been seen before.\n",
+ dev_alert(card->dev,
+ "The revision (%x) of your card has not been seen before.\n",
chip->rev);
- pr_alert(
- "vortex: Please email the results of 'lspci -vv' to openvortex-dev@nongnu.org.\n");
+ dev_alert(card->dev,
+ "Please email the results of 'lspci -vv' to openvortex-dev@nongnu.org.\n");
snd_card_free(card);
err = -ENODEV;
return err;
diff --git a/sound/pci/au88x0/au88x0.h b/sound/pci/au88x0/au88x0.h
index 466a5c8e8354..3a8fefefea77 100644
--- a/sound/pci/au88x0/au88x0.h
+++ b/sound/pci/au88x0/au88x0.h
@@ -243,7 +243,7 @@ static int vortex_core_init(vortex_t * card);
static int vortex_core_shutdown(vortex_t * card);
static void vortex_enable_int(vortex_t * card);
static irqreturn_t vortex_interrupt(int irq, void *dev_id);
-static int vortex_alsafmt_aspfmt(int alsafmt);
+static int vortex_alsafmt_aspfmt(int alsafmt, vortex_t *v);
/* Connection stuff. */
static void vortex_connect_default(vortex_t * vortex, int en);
@@ -278,7 +278,7 @@ static void vortex_mix_setvolumebyte(vortex_t * vortex, unsigned char mix,
static void vortex_Vort3D_enable(vortex_t * v);
static void vortex_Vort3D_disable(vortex_t * v);
static void vortex_Vort3D_connect(vortex_t * vortex, int en);
-static void vortex_Vort3D_InitializeSource(a3dsrc_t * a, int en);
+static void vortex_Vort3D_InitializeSource(a3dsrc_t *a, int en, vortex_t *v);
#endif
/* Driver stuff. */
diff --git a/sound/pci/au88x0/au88x0_a3d.c b/sound/pci/au88x0/au88x0_a3d.c
index 30f760e3d2c0..ab0f87312911 100644
--- a/sound/pci/au88x0/au88x0_a3d.c
+++ b/sound/pci/au88x0/au88x0_a3d.c
@@ -484,12 +484,13 @@ static void a3dsrc_ZeroState(a3dsrc_t * a)
}
/* Reset entire A3D engine */
-static void a3dsrc_ZeroStateA3D(a3dsrc_t * a)
+static void a3dsrc_ZeroStateA3D(a3dsrc_t *a, vortex_t *v)
{
int i, var, var2;
if ((a->vortex) == NULL) {
- pr_err( "vortex: ZeroStateA3D: ERROR: a->vortex is NULL\n");
+ dev_err(v->card->dev,
+ "ZeroStateA3D: ERROR: a->vortex is NULL\n");
return;
}
@@ -601,7 +602,7 @@ static void vortex_Vort3D_enable(vortex_t *v)
Vort3DRend_Initialize(v, XT_HEADPHONE);
for (i = 0; i < NR_A3D; i++) {
vortex_A3dSourceHw_Initialize(v, i % 4, i >> 2);
- a3dsrc_ZeroStateA3D(&(v->a3d[0]));
+ a3dsrc_ZeroStateA3D(&v->a3d[0], v);
}
/* Register ALSA controls */
vortex_a3d_register_controls(v);
@@ -628,15 +629,15 @@ static void vortex_Vort3D_connect(vortex_t * v, int en)
v->mixxtlk[0] =
vortex_adb_checkinout(v, v->fixed_res, en, VORTEX_RESOURCE_MIXIN);
if (v->mixxtlk[0] < 0) {
- pr_warn
- ("vortex: vortex_Vort3D: ERROR: not enough free mixer resources.\n");
+ dev_warn(v->card->dev,
+ "vortex_Vort3D: ERROR: not enough free mixer resources.\n");
return;
}
v->mixxtlk[1] =
vortex_adb_checkinout(v, v->fixed_res, en, VORTEX_RESOURCE_MIXIN);
if (v->mixxtlk[1] < 0) {
- pr_warn
- ("vortex: vortex_Vort3D: ERROR: not enough free mixer resources.\n");
+ dev_warn(v->card->dev,
+ "vortex_Vort3D: ERROR: not enough free mixer resources.\n");
return;
}
#endif
@@ -676,11 +677,11 @@ static void vortex_Vort3D_connect(vortex_t * v, int en)
}
/* Initialize one single A3D source. */
-static void vortex_Vort3D_InitializeSource(a3dsrc_t * a, int en)
+static void vortex_Vort3D_InitializeSource(a3dsrc_t *a, int en, vortex_t *v)
{
if (a->vortex == NULL) {
- pr_warn
- ("vortex: Vort3D_InitializeSource: A3D source not initialized\n");
+ dev_warn(v->card->dev,
+ "Vort3D_InitializeSource: A3D source not initialized\n");
return;
}
if (en) {
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 72e81286b70e..4667c3232b7f 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -285,8 +285,8 @@ vortex_mixer_addWTD(vortex_t * vortex, unsigned char mix, unsigned char ch)
temp = hwread(vortex->mmio, prev);
//printk(KERN_INFO "vortex: mixAddWTD: while addr=%x, val=%x\n", prev, temp);
if ((++lifeboat) > 0xf) {
- pr_err(
- "vortex_mixer_addWTD: lifeboat overflow\n");
+ dev_err(vortex->card->dev,
+ "vortex_mixer_addWTD: lifeboat overflow\n");
return 0;
}
}
@@ -303,7 +303,7 @@ vortex_mixer_delWTD(vortex_t * vortex, unsigned char mix, unsigned char ch)
eax = hwread(vortex->mmio, VORTEX_MIXER_SR);
if (((1 << ch) & eax) == 0) {
- pr_err( "mix ALARM %x\n", eax);
+ dev_err(vortex->card->dev, "mix ALARM %x\n", eax);
return 0;
}
ebp = VORTEX_MIXER_CHNBASE + (ch << 2);
@@ -324,8 +324,8 @@ vortex_mixer_delWTD(vortex_t * vortex, unsigned char mix, unsigned char ch)
//printk(KERN_INFO "vortex: mixdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src);
while ((edx & 0xf) != mix) {
if ((esi) > 0xf) {
- pr_err(
- "vortex: mixdelWTD: error lifeboat overflow\n");
+ dev_err(vortex->card->dev,
+ "mixdelWTD: error lifeboat overflow\n");
return 0;
}
esp14 = ebx;
@@ -492,7 +492,7 @@ vortex_src_persist_convratio(vortex_t * vortex, unsigned char src, int ratio)
hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), ratio);
temp = hwread(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2));
if ((++lifeboat) > 0x9) {
- pr_err( "Vortex: Src cvr fail\n");
+ dev_err(vortex->card->dev, "Src cvr fail\n");
break;
}
}
@@ -684,8 +684,8 @@ vortex_src_addWTD(vortex_t * vortex, unsigned char src, unsigned char ch)
temp = hwread(vortex->mmio, prev);
//printk(KERN_INFO "vortex: srcAddWTD: while addr=%x, val=%x\n", prev, temp);
if ((++lifeboat) > 0xf) {
- pr_err(
- "vortex_src_addWTD: lifeboat overflow\n");
+ dev_err(vortex->card->dev,
+ "vortex_src_addWTD: lifeboat overflow\n");
return 0;
}
}
@@ -703,7 +703,7 @@ vortex_src_delWTD(vortex_t * vortex, unsigned char src, unsigned char ch)
eax = hwread(vortex->mmio, VORTEX_SRCBLOCK_SR);
if (((1 << ch) & eax) == 0) {
- pr_err( "src alarm\n");
+ dev_err(vortex->card->dev, "src alarm\n");
return 0;
}
ebp = VORTEX_SRC_CHNBASE + (ch << 2);
@@ -724,8 +724,8 @@ vortex_src_delWTD(vortex_t * vortex, unsigned char src, unsigned char ch)
//printk(KERN_INFO "vortex: srcdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src);
while ((edx & 0xf) != src) {
if ((esi) > 0xf) {
- pr_warn
- ("vortex: srcdelWTD: error, lifeboat overflow\n");
+ dev_warn(vortex->card->dev,
+ "srcdelWTD: error, lifeboat overflow\n");
return 0;
}
esp14 = ebx;
@@ -819,8 +819,8 @@ vortex_fifo_setadbctrl(vortex_t * vortex, int fifo, int stereo, int priority,
do {
temp = hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2));
if (lifeboat++ > 0xbb8) {
- pr_err(
- "Vortex: vortex_fifo_setadbctrl fail\n");
+ dev_err(vortex->card->dev,
+ "vortex_fifo_setadbctrl fail\n");
break;
}
}
@@ -915,7 +915,8 @@ vortex_fifo_setwtctrl(vortex_t * vortex, int fifo, int ctrl, int priority,
do {
temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2));
if (lifeboat++ > 0xbb8) {
- pr_err( "Vortex: vortex_fifo_setwtctrl fail\n");
+ dev_err(vortex->card->dev,
+ "vortex_fifo_setwtctrl fail\n");
break;
}
}
@@ -1042,7 +1043,7 @@ static void vortex_fifo_init(vortex_t * vortex)
for (x = NR_ADB - 1; x >= 0; x--) {
hwwrite(vortex->mmio, addr, (FIFO_U0 | FIFO_U1));
if (hwread(vortex->mmio, addr) != (FIFO_U0 | FIFO_U1))
- pr_err( "bad adb fifo reset!");
+ dev_err(vortex->card->dev, "bad adb fifo reset!");
vortex_fifo_clearadbdata(vortex, x, FIFO_SIZE);
addr -= 4;
}
@@ -1053,9 +1054,9 @@ static void vortex_fifo_init(vortex_t * vortex)
for (x = NR_WT - 1; x >= 0; x--) {
hwwrite(vortex->mmio, addr, FIFO_U0);
if (hwread(vortex->mmio, addr) != FIFO_U0)
- pr_err(
- "bad wt fifo reset (0x%08x, 0x%08x)!\n",
- addr, hwread(vortex->mmio, addr));
+ dev_err(vortex->card->dev,
+ "bad wt fifo reset (0x%08x, 0x%08x)!\n",
+ addr, hwread(vortex->mmio, addr));
vortex_fifo_clearwtdata(vortex, x, FIFO_SIZE);
addr -= 4;
}
@@ -1213,8 +1214,9 @@ static int vortex_adbdma_bufshift(vortex_t * vortex, int adbdma)
if (dma->period_virt >= dma->nr_periods)
dma->period_virt -= dma->nr_periods;
if (delta != 1)
- pr_info( "vortex: %d virt=%d, real=%d, delta=%d\n",
- adbdma, dma->period_virt, dma->period_real, delta);
+ dev_info(vortex->card->dev,
+ "%d virt=%d, real=%d, delta=%d\n",
+ adbdma, dma->period_virt, dma->period_real, delta);
return delta;
}
@@ -1482,8 +1484,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
dma->period_real = page;
if (delta != 1)
- pr_warn( "vortex: wt virt = %d, delta = %d\n",
- dma->period_virt, delta);
+ dev_warn(vortex->card->dev, "wt virt = %d, delta = %d\n",
+ dma->period_virt, delta);
return delta;
}
@@ -1667,9 +1669,9 @@ vortex_adb_addroutes(vortex_t * vortex, unsigned char channel,
hwread(vortex->mmio,
VORTEX_ADB_RTBASE + (temp << 2)) & ADB_MASK;
if ((lifeboat++) > ADB_MASK) {
- pr_err(
- "vortex_adb_addroutes: unending route! 0x%x\n",
- *route);
+ dev_err(vortex->card->dev,
+ "vortex_adb_addroutes: unending route! 0x%x\n",
+ *route);
return;
}
}
@@ -1703,9 +1705,9 @@ vortex_adb_delroutes(vortex_t * vortex, unsigned char channel,
hwread(vortex->mmio,
VORTEX_ADB_RTBASE + (prev << 2)) & ADB_MASK;
if (((lifeboat++) > ADB_MASK) || (temp == ADB_MASK)) {
- pr_err(
- "vortex_adb_delroutes: route not found! 0x%x\n",
- route0);
+ dev_err(vortex->card->dev,
+ "vortex_adb_delroutes: route not found! 0x%x\n",
+ route0);
return;
}
}
@@ -2045,7 +2047,9 @@ vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype)
}
}
}
- pr_err( "vortex: FATAL: ResManager: resource type %d exhausted.\n", restype);
+ dev_err(vortex->card->dev,
+ "FATAL: ResManager: resource type %d exhausted.\n",
+ restype);
return -ENOMEM;
}
@@ -2173,11 +2177,13 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
memset(stream->resources, 0,
sizeof(unsigned char) *
VORTEX_RESOURCE_LAST);
- pr_err( "vortex: out of A3D sources. Sorry\n");
+ dev_err(vortex->card->dev,
+ "out of A3D sources. Sorry\n");
return -EBUSY;
}
/* (De)Initialize A3D hardware source. */
- vortex_Vort3D_InitializeSource(&(vortex->a3d[a3d]), en);
+ vortex_Vort3D_InitializeSource(&vortex->a3d[a3d], en,
+ vortex);
}
/* Make SPDIF out exclusive to "spdif" device when in use. */
if ((stream->type == VORTEX_PCM_SPDIF) && (en)) {
@@ -2421,7 +2427,7 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id)
hwread(vortex->mmio, VORTEX_IRQ_SOURCE);
// Is at least one IRQ flag set?
if (source == 0) {
- pr_err( "vortex: missing irq source\n");
+ dev_err(vortex->card->dev, "missing irq source\n");
return IRQ_NONE;
}
@@ -2429,19 +2435,19 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id)
// Attend every interrupt source.
if (unlikely(source & IRQ_ERR_MASK)) {
if (source & IRQ_FATAL) {
- pr_err( "vortex: IRQ fatal error\n");
+ dev_err(vortex->card->dev, "IRQ fatal error\n");
}
if (source & IRQ_PARITY) {
- pr_err( "vortex: IRQ parity error\n");
+ dev_err(vortex->card->dev, "IRQ parity error\n");
}
if (source & IRQ_REG) {
- pr_err( "vortex: IRQ reg error\n");
+ dev_err(vortex->card->dev, "IRQ reg error\n");
}
if (source & IRQ_FIFO) {
- pr_err( "vortex: IRQ fifo error\n");
+ dev_err(vortex->card->dev, "IRQ fifo error\n");
}
if (source & IRQ_DMA) {
- pr_err( "vortex: IRQ dma error\n");
+ dev_err(vortex->card->dev, "IRQ dma error\n");
}
handled = 1;
}
@@ -2489,7 +2495,7 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id)
}
if (!handled) {
- pr_err( "vortex: unknown irq source %x\n", source);
+ dev_err(vortex->card->dev, "unknown irq source %x\n", source);
}
return IRQ_RETVAL(handled);
}
@@ -2546,7 +2552,7 @@ vortex_codec_write(struct snd_ac97 * codec, unsigned short addr, unsigned short
while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) {
udelay(100);
if (lifeboat++ > POLL_COUNT) {
- pr_err( "vortex: ac97 codec stuck busy\n");
+ dev_err(card->card->dev, "ac97 codec stuck busy\n");
return;
}
}
@@ -2572,7 +2578,7 @@ static unsigned short vortex_codec_read(struct snd_ac97 * codec, unsigned short
while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) {
udelay(100);
if (lifeboat++ > POLL_COUNT) {
- pr_err( "vortex: ac97 codec stuck busy\n");
+ dev_err(card->card->dev, "ac97 codec stuck busy\n");
return 0xffff;
}
}
@@ -2586,7 +2592,8 @@ static unsigned short vortex_codec_read(struct snd_ac97 * codec, unsigned short
udelay(100);
data = hwread(card->mmio, VORTEX_CODEC_IO);
if (lifeboat++ > POLL_COUNT) {
- pr_err( "vortex: ac97 address never arrived\n");
+ dev_err(card->card->dev,
+ "ac97 address never arrived\n");
return 0xffff;
}
} while ((data & VORTEX_CODEC_ADDMASK) !=
@@ -2683,7 +2690,7 @@ static void vortex_spdif_init(vortex_t * vortex, int spdif_sr, int spdif_mode)
static int vortex_core_init(vortex_t *vortex)
{
- pr_info( "Vortex: init.... ");
+ dev_info(vortex->card->dev, "init started\n");
/* Hardware Init. */
hwwrite(vortex->mmio, VORTEX_CTRL, 0xffffffff);
msleep(5);
@@ -2728,7 +2735,7 @@ static int vortex_core_init(vortex_t *vortex)
//vortex_enable_timer_int(vortex);
//vortex_disable_timer_int(vortex);
- pr_info( "done.\n");
+ dev_info(vortex->card->dev, "init.... done.\n");
spin_lock_init(&vortex->lock);
return 0;
@@ -2737,7 +2744,7 @@ static int vortex_core_init(vortex_t *vortex)
static int vortex_core_shutdown(vortex_t * vortex)
{
- pr_info( "Vortex: shutdown...");
+ dev_info(vortex->card->dev, "shutdown started\n");
#ifndef CHIP_AU8820
vortex_eq_free(vortex);
vortex_Vort3D_disable(vortex);
@@ -2759,13 +2766,13 @@ static int vortex_core_shutdown(vortex_t * vortex)
msleep(5);
hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, 0xffff);
- pr_info( "done.\n");
+ dev_info(vortex->card->dev, "shutdown.... done.\n");
return 0;
}
/* Alsa support. */
-static int vortex_alsafmt_aspfmt(int alsafmt)
+static int vortex_alsafmt_aspfmt(int alsafmt, vortex_t *v)
{
int fmt;
@@ -2793,7 +2800,8 @@ static int vortex_alsafmt_aspfmt(int alsafmt)
break;
default:
fmt = 0x8;
- pr_err( "vortex: format unsupported %d\n", alsafmt);
+ dev_err(v->card->dev,
+ "format unsupported %d\n", alsafmt);
break;
}
return fmt;
diff --git a/sound/pci/au88x0/au88x0_eq.c b/sound/pci/au88x0/au88x0_eq.c
index 9404ba73eaf6..9585c5c63b96 100644
--- a/sound/pci/au88x0/au88x0_eq.c
+++ b/sound/pci/au88x0/au88x0_eq.c
@@ -845,7 +845,8 @@ snd_vortex_peaks_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *u
vortex_Eqlzr_GetAllPeaks(vortex, peaks, &count);
if (count != 20) {
- pr_err( "vortex: peak count error 20 != %d \n", count);
+ dev_err(vortex->card->dev,
+ "peak count error 20 != %d\n", count);
return -1;
}
for (i = 0; i < 20; i++)
diff --git a/sound/pci/au88x0/au88x0_game.c b/sound/pci/au88x0/au88x0_game.c
index 72daf6cf8169..151815b857a0 100644
--- a/sound/pci/au88x0/au88x0_game.c
+++ b/sound/pci/au88x0/au88x0_game.c
@@ -98,7 +98,8 @@ static int vortex_gameport_register(vortex_t *vortex)
vortex->gameport = gp = gameport_allocate_port();
if (!gp) {
- pr_err( "vortex: cannot allocate memory for gameport\n");
+ dev_err(vortex->card->dev,
+ "cannot allocate memory for gameport\n");
return -ENOMEM;
}
diff --git a/sound/pci/au88x0/au88x0_mpu401.c b/sound/pci/au88x0/au88x0_mpu401.c
index 328c1943c0c3..1025e55ca854 100644
--- a/sound/pci/au88x0/au88x0_mpu401.c
+++ b/sound/pci/au88x0/au88x0_mpu401.c
@@ -73,7 +73,7 @@ static int snd_vortex_midi(vortex_t *vortex)
/* Check if anything is OK. */
temp = hwread(vortex->mmio, VORTEX_MIDI_DATA);
if (temp != MPU401_ACK /*0xfe */ ) {
- pr_err( "midi port doesn't acknowledge!\n");
+ dev_err(vortex->card->dev, "midi port doesn't acknowledge!\n");
return -ENODEV;
}
/* Enable MPU401 interrupts. */
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index 5adc6b92ffab..a6d6d8d0867a 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -227,7 +227,7 @@ snd_vortex_pcm_hw_params(struct snd_pcm_substream *substream,
err =
snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0) {
- pr_err( "Vortex: pcm page alloc failed!\n");
+ dev_err(chip->card->dev, "Vortex: pcm page alloc failed!\n");
return err;
}
/*
@@ -332,7 +332,7 @@ static int snd_vortex_pcm_prepare(struct snd_pcm_substream *substream)
dir = 1;
else
dir = 0;
- fmt = vortex_alsafmt_aspfmt(runtime->format);
+ fmt = vortex_alsafmt_aspfmt(runtime->format, chip);
spin_lock_irq(&chip->lock);
if (VORTEX_PCM_TYPE(substream->pcm) != VORTEX_PCM_WT) {
vortex_adbdma_setmode(chip, dma, 1, dir, fmt,
@@ -371,7 +371,7 @@ static int snd_vortex_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
}
#ifndef CHIP_AU8810
else {
- pr_info( "vortex: wt start %d\n", dma);
+ dev_info(chip->card->dev, "wt start %d\n", dma);
vortex_wtdma_startfifo(chip, dma);
}
#endif
@@ -384,7 +384,7 @@ static int snd_vortex_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
vortex_adbdma_stopfifo(chip, dma);
#ifndef CHIP_AU8810
else {
- pr_info( "vortex: wt stop %d\n", dma);
+ dev_info(chip->card->dev, "wt stop %d\n", dma);
vortex_wtdma_stopfifo(chip, dma);
}
#endif
diff --git a/sound/pci/au88x0/au88x0_synth.c b/sound/pci/au88x0/au88x0_synth.c
index f094bac24291..78e12f4796f3 100644
--- a/sound/pci/au88x0/au88x0_synth.c
+++ b/sound/pci/au88x0/au88x0_synth.c
@@ -90,7 +90,7 @@ static int vortex_wt_allocroute(vortex_t * vortex, int wt, int nr_ch)
hwwrite(vortex->mmio, WT_PARM(wt, 2), 0);
temp = hwread(vortex->mmio, WT_PARM(wt, 3));
- pr_debug( "vortex: WT PARM3: %x\n", temp);
+ dev_dbg(vortex->card->dev, "WT PARM3: %x\n", temp);
//hwwrite(vortex->mmio, WT_PARM(wt, 3), temp);
hwwrite(vortex->mmio, WT_DELAY(wt, 0), 0);
@@ -98,7 +98,8 @@ static int vortex_wt_allocroute(vortex_t * vortex, int wt, int nr_ch)
hwwrite(vortex->mmio, WT_DELAY(wt, 2), 0);
hwwrite(vortex->mmio, WT_DELAY(wt, 3), 0);
- pr_debug( "vortex: WT GMODE: %x\n", hwread(vortex->mmio, WT_GMODE(wt)));
+ dev_dbg(vortex->card->dev, "WT GMODE: %x\n",
+ hwread(vortex->mmio, WT_GMODE(wt)));
hwwrite(vortex->mmio, WT_PARM(wt, 2), 0xffffffff);
hwwrite(vortex->mmio, WT_PARM(wt, 3), 0xcff1c810);
@@ -106,7 +107,8 @@ static int vortex_wt_allocroute(vortex_t * vortex, int wt, int nr_ch)
voice->parm0 = voice->parm1 = 0xcfb23e2f;
hwwrite(vortex->mmio, WT_PARM(wt, 0), voice->parm0);
hwwrite(vortex->mmio, WT_PARM(wt, 1), voice->parm1);
- pr_debug( "vortex: WT GMODE 2 : %x\n", hwread(vortex->mmio, WT_GMODE(wt)));
+ dev_dbg(vortex->card->dev, "WT GMODE 2 : %x\n",
+ hwread(vortex->mmio, WT_GMODE(wt)));
return 0;
}
@@ -196,14 +198,15 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
if ((reg == 5) || ((reg >= 7) && (reg <= 10)) || (reg == 0xc)) {
if (wt >= (NR_WT / NR_WT_PB)) {
- pr_warn
- ("vortex: WT SetReg: bank out of range. reg=0x%x, wt=%d\n",
- reg, wt);
+ dev_warn(vortex->card->dev,
+ "WT SetReg: bank out of range. reg=0x%x, wt=%d\n",
+ reg, wt);
return 0;
}
} else {
if (wt >= NR_WT) {
- pr_err( "vortex: WT SetReg: voice out of range\n");
+ dev_err(vortex->card->dev,
+ "WT SetReg: voice out of range\n");
return 0;
}
}
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index 3878cf5de9a4..e1cf01949fda 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -725,19 +725,10 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
static int snd_aw2_control_switch_capture_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = {
+ static const char * const texts[2] = {
"Analog", "Digital"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) {
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- }
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_aw2_control_switch_capture_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 5a69e26cb2fb..fdbb9c05c77b 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1034,11 +1034,6 @@ snd_azf3328_info_mixer_enum(struct snd_kcontrol *kcontrol,
const char * const *p = NULL;
snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = (reg.reg == IDX_MIXER_REC_SELECT) ? 2 : 1;
- uinfo->value.enumerated.items = reg.enum_c;
- if (uinfo->value.enumerated.item > reg.enum_c - 1U)
- uinfo->value.enumerated.item = reg.enum_c - 1U;
if (reg.reg == IDX_MIXER_ADVCTL2) {
switch(reg.lchan_shift) {
case 8: /* modem out sel */
@@ -1051,12 +1046,12 @@ snd_azf3328_info_mixer_enum(struct snd_kcontrol *kcontrol,
p = texts4;
break;
}
- } else
- if (reg.reg == IDX_MIXER_REC_SELECT)
+ } else if (reg.reg == IDX_MIXER_REC_SELECT)
p = texts3;
- strcpy(uinfo->value.enumerated.name, p[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo,
+ (reg.reg == IDX_MIXER_REC_SELECT) ? 2 : 1,
+ reg.enum_c, p);
}
static int
diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c
index 27de0de90018..68c0eb0a2807 100644
--- a/sound/pci/ca0106/ca0106_mixer.c
+++ b/sound/pci/ca0106/ca0106_mixer.c
@@ -185,17 +185,11 @@ static int snd_ca0106_shared_spdif_put(struct snd_kcontrol *kcontrol,
static int snd_ca0106_capture_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[6] = {
+ static const char * const texts[6] = {
"IEC958 out", "i2s mixer out", "IEC958 in", "i2s in", "AC97 in", "SRC out"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 6;
- if (uinfo->value.enumerated.item > 5)
- uinfo->value.enumerated.item = 5;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 6, texts);
}
static int snd_ca0106_capture_source_get(struct snd_kcontrol *kcontrol,
@@ -228,17 +222,11 @@ static int snd_ca0106_capture_source_put(struct snd_kcontrol *kcontrol,
static int snd_ca0106_i2c_capture_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[6] = {
+ static const char * const texts[4] = {
"Phone", "Mic", "Line in", "Aux"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3)
- uinfo->value.enumerated.item = 3;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int snd_ca0106_i2c_capture_source_get(struct snd_kcontrol *kcontrol,
@@ -273,29 +261,17 @@ static int snd_ca0106_i2c_capture_source_put(struct snd_kcontrol *kcontrol,
static int snd_ca0106_capture_line_in_side_out_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = { "Side out", "Line in" };
+ static const char * const texts[2] = { "Side out", "Line in" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_ca0106_capture_mic_line_in_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = { "Line in", "Mic in" };
+ static const char * const texts[2] = { "Line in", "Mic in" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_ca0106_capture_mic_line_in_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index 454659074390..977a59855fa6 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -438,7 +438,9 @@ atc_pcm_playback_position(struct ct_atc *atc, struct ct_atc_pcm *apcm)
position = src->ops->get_ca(src);
if (position < apcm->vm_block->addr) {
- snd_printdd("ctxfi: bad ca - ca=0x%08x, vba=0x%08x, vbs=0x%08x\n", position, apcm->vm_block->addr, apcm->vm_block->size);
+ dev_dbg(atc->card->dev,
+ "bad ca - ca=0x%08x, vba=0x%08x, vbs=0x%08x\n",
+ position, apcm->vm_block->addr, apcm->vm_block->size);
position = apcm->vm_block->addr;
}
@@ -1145,7 +1147,6 @@ static int atc_release_resources(struct ct_atc *atc)
int i;
struct daio_mgr *daio_mgr = NULL;
struct dao *dao = NULL;
- struct dai *dai = NULL;
struct daio *daio = NULL;
struct sum_mgr *sum_mgr = NULL;
struct src_mgr *src_mgr = NULL;
@@ -1172,9 +1173,6 @@ static int atc_release_resources(struct ct_atc *atc)
dao = container_of(daio, struct dao, daio);
dao->ops->clear_left_input(dao);
dao->ops->clear_right_input(dao);
- } else {
- dai = container_of(daio, struct dai, daio);
- /* some thing to do for dai ... */
}
daio_mgr->put_daio(daio_mgr, daio);
}
@@ -1299,7 +1297,7 @@ static int atc_identify_card(struct ct_atc *atc, unsigned int ssid)
atc->model = CT20K2_UNKNOWN;
}
atc->model_name = ct_subsys_name[atc->model];
- snd_printd("ctxfi: chip %s model %s (%04x:%04x) is found\n",
+ dev_info(atc->card->dev, "chip %s model %s (%04x:%04x) is found\n",
atc->chip_name, atc->model_name,
vendor_id, device_id);
return 0;
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index c1c3f8816fff..9b87dd28de83 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -528,8 +528,6 @@ static int get_daio_rsc(struct daio_mgr *mgr,
struct daio **rdaio)
{
int err;
- struct dai *dai = NULL;
- struct dao *dao = NULL;
unsigned long flags;
*rdaio = NULL;
@@ -544,27 +542,30 @@ static int get_daio_rsc(struct daio_mgr *mgr,
return err;
}
+ err = -ENOMEM;
/* Allocate mem for daio resource */
if (desc->type <= DAIO_OUT_MAX) {
- dao = kzalloc(sizeof(*dao), GFP_KERNEL);
- if (!dao) {
- err = -ENOMEM;
+ struct dao *dao = kzalloc(sizeof(*dao), GFP_KERNEL);
+ if (!dao)
goto error;
- }
+
err = dao_rsc_init(dao, desc, mgr);
- if (err)
+ if (err) {
+ kfree(dao);
goto error;
+ }
*rdaio = &dao->daio;
} else {
- dai = kzalloc(sizeof(*dai), GFP_KERNEL);
- if (!dai) {
- err = -ENOMEM;
+ struct dai *dai = kzalloc(sizeof(*dai), GFP_KERNEL);
+ if (!dai)
goto error;
- }
+
err = dai_rsc_init(dai, desc, mgr);
- if (err)
+ if (err) {
+ kfree(dai);
goto error;
+ }
*rdaio = &dai->daio;
}
@@ -575,11 +576,6 @@ static int get_daio_rsc(struct daio_mgr *mgr,
return 0;
error:
- if (dao)
- kfree(dao);
- else if (dai)
- kfree(dai);
-
spin_lock_irqsave(&mgr->mgr_lock, flags);
daio_mgr_put_rsc(&mgr->mgr, desc->type);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
diff --git a/sound/pci/ctxfi/cttimer.c b/sound/pci/ctxfi/cttimer.c
index 03fb909085af..a5d460453d7b 100644
--- a/sound/pci/ctxfi/cttimer.c
+++ b/sound/pci/ctxfi/cttimer.c
@@ -421,12 +421,12 @@ struct ct_timer *ct_timer_new(struct ct_atc *atc)
atimer->atc = atc;
hw = atc->hw;
if (!use_system_timer && hw->set_timer_irq) {
- snd_printd(KERN_INFO "ctxfi: Use xfi-native timer\n");
+ dev_info(atc->card->dev, "Use xfi-native timer\n");
atimer->ops = &ct_xfitimer_ops;
hw->irq_callback_data = atimer;
hw->irq_callback = ct_timer_interrupt;
} else {
- snd_printd(KERN_INFO "ctxfi: Use system timer\n");
+ dev_info(atc->card->dev, "Use system timer\n");
atimer->ops = &ct_systimer_ops;
}
return atimer;
diff --git a/sound/pci/echoaudio/darla20_dsp.c b/sound/pci/echoaudio/darla20_dsp.c
index 20c7cbc89bb3..febee5bda877 100644
--- a/sound/pci/echoaudio/darla20_dsp.c
+++ b/sound/pci/echoaudio/darla20_dsp.c
@@ -33,12 +33,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Darla20\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != DARLA20))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw: could not initialize DSP comm page\n");
return err;
}
@@ -57,7 +57,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
diff --git a/sound/pci/echoaudio/darla24_dsp.c b/sound/pci/echoaudio/darla24_dsp.c
index 6da6663e9176..7b4f6fd51b09 100644
--- a/sound/pci/echoaudio/darla24_dsp.c
+++ b/sound/pci/echoaudio/darla24_dsp.c
@@ -33,12 +33,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Darla24\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != DARLA24))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw: could not initialize DSP comm page\n");
return err;
}
@@ -56,7 +56,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -128,15 +127,17 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
clock = GD24_8000;
break;
default:
- DE_ACT(("set_sample_rate: Error, invalid sample rate %d\n",
- rate));
+ dev_err(chip->card->dev,
+ "set_sample_rate: Error, invalid sample rate %d\n",
+ rate);
return -EINVAL;
}
if (wait_handshake(chip))
return -EIO;
- DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock));
+ dev_dbg(chip->card->dev,
+ "set_sample_rate: %d clock %d\n", rate, clock);
chip->sample_rate = rate;
/* Override the sample rate if this card is set to Echo sync. */
diff --git a/sound/pci/echoaudio/echo3g_dsp.c b/sound/pci/echoaudio/echo3g_dsp.c
index 3cdc2ee2d1dd..ae11ce11b1c2 100644
--- a/sound/pci/echoaudio/echo3g_dsp.c
+++ b/sound/pci/echoaudio/echo3g_dsp.c
@@ -46,12 +46,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
int err;
local_irq_enable();
- DE_INIT(("init_hw() - Echo3G\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != ECHO3G))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -98,7 +98,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL |
ECHOCAPS_HAS_DIGITAL_MODE_ADAT;
- DE_INIT(("init_hw done\n"));
return err;
}
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 631aaa4046ad..21228adaa70c 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -48,13 +48,16 @@ static int get_firmware(const struct firmware **fw_entry,
#ifdef CONFIG_PM_SLEEP
if (chip->fw_cache[fw_index]) {
- DE_ACT(("firmware requested: %s is cached\n", card_fw[fw_index].data));
+ dev_dbg(chip->card->dev,
+ "firmware requested: %s is cached\n",
+ card_fw[fw_index].data);
*fw_entry = chip->fw_cache[fw_index];
return 0;
}
#endif
- DE_ACT(("firmware requested: %s\n", card_fw[fw_index].data));
+ dev_dbg(chip->card->dev,
+ "firmware requested: %s\n", card_fw[fw_index].data);
snprintf(name, sizeof(name), "ea/%s", card_fw[fw_index].data);
err = request_firmware(fw_entry, name, pci_device(chip));
if (err < 0)
@@ -69,13 +72,13 @@ static int get_firmware(const struct firmware **fw_entry,
-static void free_firmware(const struct firmware *fw_entry)
+static void free_firmware(const struct firmware *fw_entry,
+ struct echoaudio *chip)
{
#ifdef CONFIG_PM_SLEEP
- DE_ACT(("firmware not released (kept in cache)\n"));
+ dev_dbg(chip->card->dev, "firmware not released (kept in cache)\n");
#else
release_firmware(fw_entry);
- DE_ACT(("firmware released\n"));
#endif
}
@@ -89,10 +92,9 @@ static void free_firmware_cache(struct echoaudio *chip)
for (i = 0; i < 8 ; i++)
if (chip->fw_cache[i]) {
release_firmware(chip->fw_cache[i]);
- DE_ACT(("release_firmware(%d)\n", i));
+ dev_dbg(chip->card->dev, "release_firmware(%d)\n", i);
}
- DE_ACT(("firmware_cache released\n"));
#endif
}
@@ -286,7 +288,7 @@ static int pcm_open(struct snd_pcm_substream *substream,
/* Set up hw capabilities and contraints */
memcpy(&pipe->hw, &pcm_hardware_skel, sizeof(struct snd_pcm_hardware));
- DE_HWP(("max_channels=%d\n", max_channels));
+ dev_dbg(chip->card->dev, "max_channels=%d\n", max_channels);
pipe->constr.list = channels_list;
pipe->constr.mask = 0;
for (i = 0; channels_list[i] <= max_channels; i++);
@@ -336,7 +338,7 @@ static int pcm_open(struct snd_pcm_substream *substream,
if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
PAGE_SIZE, &pipe->sgpage)) < 0) {
- DE_HWP(("s-g list allocation failed\n"));
+ dev_err(chip->card->dev, "s-g list allocation failed\n");
return err;
}
@@ -350,7 +352,6 @@ static int pcm_analog_in_open(struct snd_pcm_substream *substream)
struct echoaudio *chip = snd_pcm_substream_chip(substream);
int err;
- DE_ACT(("pcm_analog_in_open\n"));
if ((err = pcm_open(substream, num_analog_busses_in(chip) -
substream->number)) < 0)
return err;
@@ -367,9 +368,9 @@ static int pcm_analog_in_open(struct snd_pcm_substream *substream)
atomic_inc(&chip->opencount);
if (atomic_read(&chip->opencount) > 1 && chip->rate_set)
chip->can_set_rate=0;
- DE_HWP(("pcm_analog_in_open cs=%d oc=%d r=%d\n",
+ dev_dbg(chip->card->dev, "pcm_analog_in_open cs=%d oc=%d r=%d\n",
chip->can_set_rate, atomic_read(&chip->opencount),
- chip->sample_rate));
+ chip->sample_rate);
return 0;
}
@@ -385,7 +386,6 @@ static int pcm_analog_out_open(struct snd_pcm_substream *substream)
#else
max_channels = num_analog_busses_out(chip);
#endif
- DE_ACT(("pcm_analog_out_open\n"));
if ((err = pcm_open(substream, max_channels - substream->number)) < 0)
return err;
if ((err = snd_pcm_hw_rule_add(substream->runtime, 0,
@@ -403,9 +403,9 @@ static int pcm_analog_out_open(struct snd_pcm_substream *substream)
atomic_inc(&chip->opencount);
if (atomic_read(&chip->opencount) > 1 && chip->rate_set)
chip->can_set_rate=0;
- DE_HWP(("pcm_analog_out_open cs=%d oc=%d r=%d\n",
+ dev_dbg(chip->card->dev, "pcm_analog_out_open cs=%d oc=%d r=%d\n",
chip->can_set_rate, atomic_read(&chip->opencount),
- chip->sample_rate));
+ chip->sample_rate);
return 0;
}
@@ -418,7 +418,6 @@ static int pcm_digital_in_open(struct snd_pcm_substream *substream)
struct echoaudio *chip = snd_pcm_substream_chip(substream);
int err, max_channels;
- DE_ACT(("pcm_digital_in_open\n"));
max_channels = num_digital_busses_in(chip) - substream->number;
mutex_lock(&chip->mode_mutex);
if (chip->digital_mode == DIGITAL_MODE_ADAT)
@@ -460,7 +459,6 @@ static int pcm_digital_out_open(struct snd_pcm_substream *substream)
struct echoaudio *chip = snd_pcm_substream_chip(substream);
int err, max_channels;
- DE_ACT(("pcm_digital_out_open\n"));
max_channels = num_digital_busses_out(chip) - substream->number;
mutex_lock(&chip->mode_mutex);
if (chip->digital_mode == DIGITAL_MODE_ADAT)
@@ -507,18 +505,17 @@ static int pcm_close(struct snd_pcm_substream *substream)
/* Nothing to do here. Audio is already off and pipe will be
* freed by its callback
*/
- DE_ACT(("pcm_close\n"));
atomic_dec(&chip->opencount);
oc = atomic_read(&chip->opencount);
- DE_ACT(("pcm_close oc=%d cs=%d rs=%d\n", oc,
- chip->can_set_rate, chip->rate_set));
+ dev_dbg(chip->card->dev, "pcm_close oc=%d cs=%d rs=%d\n", oc,
+ chip->can_set_rate, chip->rate_set);
if (oc < 2)
chip->can_set_rate = 1;
if (oc == 0)
chip->rate_set = 0;
- DE_ACT(("pcm_close2 oc=%d cs=%d rs=%d\n", oc,
- chip->can_set_rate,chip->rate_set));
+ dev_dbg(chip->card->dev, "pcm_close2 oc=%d cs=%d rs=%d\n", oc,
+ chip->can_set_rate, chip->rate_set);
return 0;
}
@@ -542,7 +539,7 @@ static int init_engine(struct snd_pcm_substream *substream,
*/
spin_lock_irq(&chip->lock);
if (pipe->index >= 0) {
- DE_HWP(("hwp_ie free(%d)\n", pipe->index));
+ dev_dbg(chip->card->dev, "hwp_ie free(%d)\n", pipe->index);
err = free_pipes(chip, pipe);
snd_BUG_ON(err);
chip->substream[pipe->index] = NULL;
@@ -551,16 +548,17 @@ static int init_engine(struct snd_pcm_substream *substream,
err = allocate_pipes(chip, pipe, pipe_index, interleave);
if (err < 0) {
spin_unlock_irq(&chip->lock);
- DE_ACT((KERN_NOTICE "allocate_pipes(%d) err=%d\n",
- pipe_index, err));
+ dev_err(chip->card->dev, "allocate_pipes(%d) err=%d\n",
+ pipe_index, err);
return err;
}
spin_unlock_irq(&chip->lock);
- DE_ACT((KERN_NOTICE "allocate_pipes()=%d\n", pipe_index));
+ dev_dbg(chip->card->dev, "allocate_pipes()=%d\n", pipe_index);
- DE_HWP(("pcm_hw_params (bufsize=%dB periods=%d persize=%dB)\n",
+ dev_dbg(chip->card->dev,
+ "pcm_hw_params (bufsize=%dB periods=%d persize=%dB)\n",
params_buffer_bytes(hw_params), params_periods(hw_params),
- params_period_bytes(hw_params)));
+ params_period_bytes(hw_params));
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (err < 0) {
@@ -615,7 +613,6 @@ static int init_engine(struct snd_pcm_substream *substream,
spin_lock_irq(&chip->lock);
set_sample_rate(chip, hw_params->rate_num / hw_params->rate_den);
spin_unlock_irq(&chip->lock);
- DE_HWP(("pcm_hw_params ok\n"));
return 0;
}
@@ -679,14 +676,13 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
spin_lock_irq(&chip->lock);
if (pipe->index >= 0) {
- DE_HWP(("pcm_hw_free(%d)\n", pipe->index));
+ dev_dbg(chip->card->dev, "pcm_hw_free(%d)\n", pipe->index);
free_pipes(chip, pipe);
chip->substream[pipe->index] = NULL;
pipe->index = -1;
}
spin_unlock_irq(&chip->lock);
- DE_HWP(("pcm_hw_freed\n"));
snd_pcm_lib_free_pages(substream);
return 0;
}
@@ -700,8 +696,8 @@ static int pcm_prepare(struct snd_pcm_substream *substream)
struct audioformat format;
int pipe_index = ((struct audiopipe *)runtime->private_data)->index;
- DE_HWP(("Prepare rate=%d format=%d channels=%d\n",
- runtime->rate, runtime->format, runtime->channels));
+ dev_dbg(chip->card->dev, "Prepare rate=%d format=%d channels=%d\n",
+ runtime->rate, runtime->format, runtime->channels);
format.interleave = runtime->channels;
format.data_are_bigendian = 0;
format.mono_to_stereo = 0;
@@ -721,8 +717,9 @@ static int pcm_prepare(struct snd_pcm_substream *substream)
format.bits_per_sample = 32;
break;
default:
- DE_HWP(("Prepare error: unsupported format %d\n",
- runtime->format));
+ dev_err(chip->card->dev,
+ "Prepare error: unsupported format %d\n",
+ runtime->format);
return -EINVAL;
}
@@ -757,10 +754,8 @@ static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&chip->lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
- DE_ACT(("pcm_trigger resume\n"));
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- DE_ACT(("pcm_trigger start\n"));
for (i = 0; i < DSP_MAXPIPES; i++) {
if (channelmask & (1 << i)) {
pipe = chip->substream[i]->runtime->private_data;
@@ -782,9 +777,7 @@ static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
chip->pipe_cyclic_mask);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
- DE_ACT(("pcm_trigger suspend\n"));
case SNDRV_PCM_TRIGGER_STOP:
- DE_ACT(("pcm_trigger stop\n"));
for (i = 0; i < DSP_MAXPIPES; i++) {
if (channelmask & (1 << i)) {
pipe = chip->substream[i]->runtime->private_data;
@@ -794,7 +787,6 @@ static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
err = stop_transport(chip, channelmask);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- DE_ACT(("pcm_trigger pause\n"));
for (i = 0; i < DSP_MAXPIPES; i++) {
if (channelmask & (1 << i)) {
pipe = chip->substream[i]->runtime->private_data;
@@ -931,7 +923,6 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &analog_capture_ops);
if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
return err;
- DE_INIT(("Analog PCM ok\n"));
#ifdef ECHOCARD_HAS_DIGITAL_IO
/* PCM#1 Digital inputs, no outputs */
@@ -944,7 +935,6 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &digital_capture_ops);
if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
return err;
- DE_INIT(("Digital PCM ok\n"));
#endif /* ECHOCARD_HAS_DIGITAL_IO */
#else /* ECHOCARD_HAS_VMIXER */
@@ -966,7 +956,6 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &analog_capture_ops);
if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
return err;
- DE_INIT(("Analog PCM ok\n"));
#ifdef ECHOCARD_HAS_DIGITAL_IO
/* PCM#1 Digital i/o */
@@ -981,7 +970,6 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &digital_capture_ops);
if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
return err;
- DE_INIT(("Digital PCM ok\n"));
#endif /* ECHOCARD_HAS_DIGITAL_IO */
#endif /* ECHOCARD_HAS_VMIXER */
@@ -1416,21 +1404,14 @@ static struct snd_kcontrol_new snd_echo_vmixer = {
static int snd_echo_digital_mode_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *names[4] = {
+ static const char * const names[4] = {
"S/PDIF Coaxial", "S/PDIF Optical", "ADAT Optical",
"S/PDIF Cdrom"
};
struct echoaudio *chip;
chip = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->value.enumerated.items = chip->num_digital_modes;
- uinfo->count = 1;
- if (uinfo->value.enumerated.item >= chip->num_digital_modes)
- uinfo->value.enumerated.item = chip->num_digital_modes - 1;
- strcpy(uinfo->value.enumerated.name, names[
- chip->digital_mode_list[uinfo->value.enumerated.item]]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, chip->num_digital_modes, names);
}
static int snd_echo_digital_mode_get(struct snd_kcontrol *kcontrol,
@@ -1481,7 +1462,8 @@ static int snd_echo_digital_mode_put(struct snd_kcontrol *kcontrol,
snd_ctl_notify(chip->card,
SNDRV_CTL_EVENT_MASK_VALUE,
&chip->clock_src_ctl->id);
- DE_ACT(("SDM() =%d\n", changed));
+ dev_dbg(chip->card->dev,
+ "SDM() =%d\n", changed);
}
if (changed >= 0)
changed = 1; /* No errors */
@@ -1509,16 +1491,9 @@ static struct snd_kcontrol_new snd_echo_digital_mode_switch = {
static int snd_echo_spdif_mode_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *names[2] = {"Consumer", "Professional"};
+ static const char * const names[2] = {"Consumer", "Professional"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->value.enumerated.items = 2;
- uinfo->count = 1;
- if (uinfo->value.enumerated.item)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name,
- names[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, names);
}
static int snd_echo_spdif_mode_get(struct snd_kcontrol *kcontrol,
@@ -1566,21 +1541,14 @@ static struct snd_kcontrol_new snd_echo_spdif_mode_switch = {
static int snd_echo_clock_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *names[8] = {
+ static const char * const names[8] = {
"Internal", "Word", "Super", "S/PDIF", "ADAT", "ESync",
"ESync96", "MTC"
};
struct echoaudio *chip;
chip = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->value.enumerated.items = chip->num_clock_sources;
- uinfo->count = 1;
- if (uinfo->value.enumerated.item >= chip->num_clock_sources)
- uinfo->value.enumerated.item = chip->num_clock_sources - 1;
- strcpy(uinfo->value.enumerated.name, names[
- chip->clock_source_list[uinfo->value.enumerated.item]]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, chip->num_clock_sources, names);
}
static int snd_echo_clock_source_get(struct snd_kcontrol *kcontrol,
@@ -1622,7 +1590,8 @@ static int snd_echo_clock_source_put(struct snd_kcontrol *kcontrol,
}
if (changed < 0)
- DE_ACT(("seticlk val%d err 0x%x\n", dclock, changed));
+ dev_dbg(chip->card->dev,
+ "seticlk val%d err 0x%x\n", dclock, changed);
return changed;
}
@@ -1879,7 +1848,7 @@ static irqreturn_t snd_echo_interrupt(int irq, void *dev_id)
#ifdef ECHOCARD_HAS_MIDI
if (st > 0 && chip->midi_in) {
snd_rawmidi_receive(chip->midi_in, chip->midi_buffer, st);
- DE_MID(("rawmidi_iread=%d\n", st));
+ dev_dbg(chip->card->dev, "rawmidi_iread=%d\n", st);
}
#endif
return IRQ_HANDLED;
@@ -1894,10 +1863,8 @@ static irqreturn_t snd_echo_interrupt(int irq, void *dev_id)
static int snd_echo_free(struct echoaudio *chip)
{
- DE_INIT(("Stop DSP...\n"));
if (chip->comm_page)
rest_in_peace(chip);
- DE_INIT(("Stopped.\n"));
if (chip->irq >= 0)
free_irq(chip->irq, chip);
@@ -1908,17 +1875,14 @@ static int snd_echo_free(struct echoaudio *chip)
if (chip->dsp_registers)
iounmap(chip->dsp_registers);
- if (chip->iores)
- release_and_free_resource(chip->iores);
+ release_and_free_resource(chip->iores);
- DE_INIT(("MMIO freed.\n"));
pci_disable_device(chip->pci);
/* release chip data */
free_firmware_cache(chip);
kfree(chip);
- DE_INIT(("Chip freed.\n"));
return 0;
}
@@ -1928,7 +1892,6 @@ static int snd_echo_dev_free(struct snd_device *device)
{
struct echoaudio *chip = device->device_data;
- DE_INIT(("snd_echo_dev_free()...\n"));
return snd_echo_free(chip);
}
@@ -1961,7 +1924,7 @@ static int snd_echo_create(struct snd_card *card,
pci_disable_device(pci);
return -ENOMEM;
}
- DE_INIT(("chip=%p\n", chip));
+ dev_dbg(card->dev, "chip=%p\n", chip);
spin_lock_init(&chip->lock);
chip->card = card;
chip->pci = pci;
@@ -1998,8 +1961,8 @@ static int snd_echo_create(struct snd_card *card,
return -EBUSY;
}
chip->irq = pci->irq;
- DE_INIT(("pci=%p irq=%d subdev=%04x Init hardware...\n",
- chip->pci, chip->irq, chip->pci->subsystem_device));
+ dev_dbg(card->dev, "pci=%p irq=%d subdev=%04x Init hardware...\n",
+ chip->pci, chip->irq, chip->pci->subsystem_device);
/* Create the DSP comm page - this is the area of memory used for most
of the communication with the DSP, which accesses it via bus mastering */
@@ -2017,11 +1980,10 @@ static int snd_echo_create(struct snd_card *card,
if (err >= 0)
err = set_mixer_defaults(chip);
if (err < 0) {
- DE_INIT(("init_hw err=%d\n", err));
+ dev_err(card->dev, "init_hw err=%d\n", err);
snd_echo_free(chip);
return err;
}
- DE_INIT(("Card init OK\n"));
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
snd_echo_free(chip);
@@ -2051,7 +2013,6 @@ static int snd_echo_probe(struct pci_dev *pci,
return -ENOENT;
}
- DE_INIT(("Echoaudio driver starting...\n"));
i = 0;
err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
0, &card);
@@ -2204,7 +2165,6 @@ static int snd_echo_suspend(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct echoaudio *chip = dev_get_drvdata(dev);
- DE_INIT(("suspend start\n"));
snd_pcm_suspend_all(chip->analog_pcm);
snd_pcm_suspend_all(chip->digital_pcm);
@@ -2231,7 +2191,6 @@ static int snd_echo_suspend(struct device *dev)
pci_save_state(pci);
pci_disable_device(pci);
- DE_INIT(("suspend done\n"));
return 0;
}
@@ -2245,7 +2204,6 @@ static int snd_echo_resume(struct device *dev)
u32 pipe_alloc_mask;
int err;
- DE_INIT(("resume start\n"));
pci_restore_state(pci);
commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
if (commpage_bak == NULL)
@@ -2256,11 +2214,10 @@ static int snd_echo_resume(struct device *dev)
err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
if (err < 0) {
kfree(commpage_bak);
- DE_INIT(("resume init_hw err=%d\n", err));
+ dev_err(dev, "resume init_hw err=%d\n", err);
snd_echo_free(chip);
return err;
}
- DE_INIT(("resume init OK\n"));
/* Temporarily set chip->pipe_alloc_mask=0 otherwise
* restore_dsp_settings() fails.
@@ -2273,7 +2230,6 @@ static int snd_echo_resume(struct device *dev)
kfree(commpage_bak);
return err;
}
- DE_INIT(("resume restore OK\n"));
memcpy(&commpage->audio_format, &commpage_bak->audio_format,
sizeof(commpage->audio_format));
@@ -2290,7 +2246,7 @@ static int snd_echo_resume(struct device *dev)
return -EBUSY;
}
chip->irq = pci->irq;
- DE_INIT(("resume irq=%d\n", chip->irq));
+ dev_dbg(dev, "resume irq=%d\n", chip->irq);
#ifdef ECHOCARD_HAS_MIDI
if (chip->midi_input_enabled)
@@ -2299,7 +2255,6 @@ static int snd_echo_resume(struct device *dev)
snd_echo_midi_output_trigger(chip->midi_out, 1);
#endif
- DE_INIT(("resume done\n"));
return 0;
}
diff --git a/sound/pci/echoaudio/echoaudio.h b/sound/pci/echoaudio/echoaudio.h
index b86b88da81cd..32515227a692 100644
--- a/sound/pci/echoaudio/echoaudio.h
+++ b/sound/pci/echoaudio/echoaudio.h
@@ -295,34 +295,6 @@
#define PIPE_STATE_PENDING 3 /* Pipe has pending start */
-/* Debug initialization */
-#ifdef CONFIG_SND_DEBUG
-#define DE_INIT(x) snd_printk x
-#else
-#define DE_INIT(x)
-#endif
-
-/* Debug hw_params callbacks */
-#ifdef CONFIG_SND_DEBUG
-#define DE_HWP(x) snd_printk x
-#else
-#define DE_HWP(x)
-#endif
-
-/* Debug normal activity (open, start, stop...) */
-#ifdef CONFIG_SND_DEBUG
-#define DE_ACT(x) snd_printk x
-#else
-#define DE_ACT(x)
-#endif
-
-/* Debug midi activity */
-#ifdef CONFIG_SND_DEBUG
-#define DE_MID(x) snd_printk x
-#else
-#define DE_MID(x)
-#endif
-
struct audiopipe {
volatile u32 *dma_counter; /* Commpage register that contains
@@ -468,7 +440,8 @@ static int wait_handshake(struct echoaudio *chip);
static int send_vector(struct echoaudio *chip, u32 command);
static int get_firmware(const struct firmware **fw_entry,
struct echoaudio *chip, const short fw_index);
-static void free_firmware(const struct firmware *fw_entry);
+static void free_firmware(const struct firmware *fw_entry,
+ struct echoaudio *chip);
#ifdef ECHOCARD_HAS_MIDI
static int enable_midi_input(struct echoaudio *chip, char enable);
diff --git a/sound/pci/echoaudio/echoaudio_3g.c b/sound/pci/echoaudio/echoaudio_3g.c
index 658db44ef746..2fa66dc3e675 100644
--- a/sound/pci/echoaudio/echoaudio_3g.c
+++ b/sound/pci/echoaudio/echoaudio_3g.c
@@ -51,7 +51,7 @@ static int check_asic_status(struct echoaudio *chip)
}
box_status = le32_to_cpu(chip->comm_page->ext_box_status);
- DE_INIT(("box_status=%x\n", box_status));
+ dev_dbg(chip->card->dev, "box_status=%x\n", box_status);
if (box_status == E3G_ASIC_NOT_LOADED)
return -ENODEV;
@@ -76,7 +76,8 @@ static int write_control_reg(struct echoaudio *chip, u32 ctl, u32 frq,
if (wait_handshake(chip))
return -EIO;
- DE_ACT(("WriteControlReg: Setting 0x%x, 0x%x\n", ctl, frq));
+ dev_dbg(chip->card->dev,
+ "WriteControlReg: Setting 0x%x, 0x%x\n", ctl, frq);
ctl = cpu_to_le32(ctl);
frq = cpu_to_le32(frq);
@@ -89,7 +90,7 @@ static int write_control_reg(struct echoaudio *chip, u32 ctl, u32 frq,
return send_vector(chip, DSP_VC_WRITE_CONTROL_REG);
}
- DE_ACT(("WriteControlReg: not written, no change\n"));
+ dev_dbg(chip->card->dev, "WriteControlReg: not written, no change\n");
return 0;
}
@@ -258,8 +259,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
- DE_ACT(("set_sample_rate: Cannot set sample rate - "
- "clock not set to CLK_CLOCKININTERNAL\n"));
+ dev_warn(chip->card->dev,
+ "Cannot set sample rate - clock not set to CLK_CLOCKININTERNAL\n");
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
@@ -313,7 +314,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */
chip->sample_rate = rate;
- DE_ACT(("SetSampleRate: %d clock %x\n", rate, control_reg));
+ dev_dbg(chip->card->dev,
+ "SetSampleRate: %d clock %x\n", rate, control_reg);
/* Tell the DSP about it - DSP reads both control reg & freq reg */
return write_control_reg(chip, control_reg, frq_reg, 0);
@@ -326,7 +328,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
{
u32 control_reg, clocks_from_dsp;
- DE_ACT(("set_input_clock:\n"));
/* Mask off the clock select bits */
control_reg = le32_to_cpu(chip->comm_page->control_register) &
@@ -335,13 +336,11 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
switch (clock) {
case ECHO_CLOCK_INTERNAL:
- DE_ACT(("Set Echo3G clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EAGAIN;
- DE_ACT(("Set Echo3G clock to SPDIF\n"));
control_reg |= E3G_SPDIF_CLOCK;
if (clocks_from_dsp & E3G_CLOCK_DETECT_BIT_SPDIF96)
control_reg |= E3G_DOUBLE_SPEED_MODE;
@@ -351,12 +350,10 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
case ECHO_CLOCK_ADAT:
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
- DE_ACT(("Set Echo3G clock to ADAT\n"));
control_reg |= E3G_ADAT_CLOCK;
control_reg &= ~E3G_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_WORD:
- DE_ACT(("Set Echo3G clock to WORD\n"));
control_reg |= E3G_WORD_CLOCK;
if (clocks_from_dsp & E3G_CLOCK_DETECT_BIT_WORD96)
control_reg |= E3G_DOUBLE_SPEED_MODE;
@@ -364,7 +361,8 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
control_reg &= ~E3G_DOUBLE_SPEED_MODE;
break;
default:
- DE_ACT(("Input clock 0x%x not supported for Echo3G\n", clock));
+ dev_err(chip->card->dev,
+ "Input clock 0x%x not supported for Echo3G\n", clock);
return -EINVAL;
}
@@ -392,7 +390,8 @@ static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
incompatible_clock = TRUE;
break;
default:
- DE_ACT(("Digital mode not supported: %d\n", mode));
+ dev_err(chip->card->dev,
+ "Digital mode not supported: %d\n", mode);
return -EINVAL;
}
@@ -427,6 +426,6 @@ static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
return err;
chip->digital_mode = mode;
- DE_ACT(("set_digital_mode(%d)\n", chip->digital_mode));
+ dev_dbg(chip->card->dev, "set_digital_mode(%d)\n", chip->digital_mode);
return incompatible_clock;
}
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index 5a6a217b82e0..1a9427aabe1d 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -80,7 +80,7 @@ static int send_vector(struct echoaudio *chip, u32 command)
udelay(1);
}
- DE_ACT((KERN_ERR "timeout on send_vector\n"));
+ dev_err(chip->card->dev, "timeout on send_vector\n");
return -EBUSY;
}
@@ -104,7 +104,7 @@ static int write_dsp(struct echoaudio *chip, u32 data)
}
chip->bad_board = TRUE; /* Set TRUE until DSP re-loaded */
- DE_ACT((KERN_ERR "write_dsp: Set bad_board to TRUE\n"));
+ dev_dbg(chip->card->dev, "write_dsp: Set bad_board to TRUE\n");
return -EIO;
}
@@ -127,7 +127,7 @@ static int read_dsp(struct echoaudio *chip, u32 *data)
}
chip->bad_board = TRUE; /* Set TRUE until DSP re-loaded */
- DE_INIT((KERN_ERR "read_dsp: Set bad_board to TRUE\n"));
+ dev_err(chip->card->dev, "read_dsp: Set bad_board to TRUE\n");
return -EIO;
}
@@ -154,8 +154,9 @@ static int read_sn(struct echoaudio *chip)
return -EIO;
}
}
- DE_INIT(("Read serial number %08x %08x %08x %08x %08x\n",
- sn[0], sn[1], sn[2], sn[3], sn[4]));
+ dev_dbg(chip->card->dev,
+ "Read serial number %08x %08x %08x %08x %08x\n",
+ sn[0], sn[1], sn[2], sn[3], sn[4]);
return 0;
}
@@ -205,13 +206,12 @@ static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic)
goto la_error;
}
- DE_INIT(("ASIC loaded\n"));
- free_firmware(fw);
+ free_firmware(fw, chip);
return 0;
la_error:
- DE_INIT(("failed on write_dsp\n"));
- free_firmware(fw);
+ dev_err(chip->card->dev, "failed on write_dsp\n");
+ free_firmware(fw, chip);
return -EIO;
}
@@ -241,8 +241,9 @@ static int install_resident_loader(struct echoaudio *chip)
loader is already installed, host flag 5 will be on. */
status = get_dsp_register(chip, CHI32_STATUS_REG);
if (status & CHI32_STATUS_REG_HF5) {
- DE_INIT(("Resident loader already installed; status is 0x%x\n",
- status));
+ dev_dbg(chip->card->dev,
+ "Resident loader already installed; status is 0x%x\n",
+ status);
return 0;
}
@@ -283,12 +284,14 @@ static int install_resident_loader(struct echoaudio *chip)
/* Write the count to the DSP */
if (write_dsp(chip, words)) {
- DE_INIT(("install_resident_loader: Failed to write word count!\n"));
+ dev_err(chip->card->dev,
+ "install_resident_loader: Failed to write word count!\n");
goto irl_error;
}
/* Write the DSP address */
if (write_dsp(chip, address)) {
- DE_INIT(("install_resident_loader: Failed to write DSP address!\n"));
+ dev_err(chip->card->dev,
+ "install_resident_loader: Failed to write DSP address!\n");
goto irl_error;
}
/* Write out this block of code to the DSP */
@@ -297,7 +300,8 @@ static int install_resident_loader(struct echoaudio *chip)
data = ((u32)code[index] << 16) + code[index + 1];
if (write_dsp(chip, data)) {
- DE_INIT(("install_resident_loader: Failed to write DSP code\n"));
+ dev_err(chip->card->dev,
+ "install_resident_loader: Failed to write DSP code\n");
goto irl_error;
}
index += 2;
@@ -312,16 +316,16 @@ static int install_resident_loader(struct echoaudio *chip)
}
if (i == 200) {
- DE_INIT(("Resident loader failed to set HF5\n"));
+ dev_err(chip->card->dev, "Resident loader failed to set HF5\n");
goto irl_error;
}
- DE_INIT(("Resident loader successfully installed\n"));
- free_firmware(fw);
+ dev_dbg(chip->card->dev, "Resident loader successfully installed\n");
+ free_firmware(fw, chip);
return 0;
irl_error:
- free_firmware(fw);
+ free_firmware(fw, chip);
return -EIO;
}
@@ -334,14 +338,14 @@ static int load_dsp(struct echoaudio *chip, u16 *code)
int index, words, i;
if (chip->dsp_code == code) {
- DE_INIT(("DSP is already loaded!\n"));
+ dev_warn(chip->card->dev, "DSP is already loaded!\n");
return 0;
}
chip->bad_board = TRUE; /* Set TRUE until DSP loaded */
chip->dsp_code = NULL; /* Current DSP code not loaded */
chip->asic_loaded = FALSE; /* Loading the DSP code will reset the ASIC */
- DE_INIT(("load_dsp: Set bad_board to TRUE\n"));
+ dev_dbg(chip->card->dev, "load_dsp: Set bad_board to TRUE\n");
/* If this board requires a resident loader, install it. */
#ifdef DSP_56361
@@ -351,7 +355,8 @@ static int load_dsp(struct echoaudio *chip, u16 *code)
/* Send software reset command */
if (send_vector(chip, DSP_VC_RESET) < 0) {
- DE_INIT(("LoadDsp: send_vector DSP_VC_RESET failed, Critical Failure\n"));
+ dev_err(chip->card->dev,
+ "LoadDsp: send_vector DSP_VC_RESET failed, Critical Failure\n");
return -EIO;
}
/* Delay 10us */
@@ -366,7 +371,8 @@ static int load_dsp(struct echoaudio *chip, u16 *code)
}
if (i == 1000) {
- DE_INIT(("load_dsp: Timeout waiting for CHI32_STATUS_REG_HF3\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: Timeout waiting for CHI32_STATUS_REG_HF3\n");
return -EIO;
}
@@ -403,29 +409,34 @@ static int load_dsp(struct echoaudio *chip, u16 *code)
index += 2;
if (write_dsp(chip, words) < 0) {
- DE_INIT(("load_dsp: failed to write number of DSP words\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: failed to write number of DSP words\n");
return -EIO;
}
if (write_dsp(chip, address) < 0) {
- DE_INIT(("load_dsp: failed to write DSP address\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: failed to write DSP address\n");
return -EIO;
}
if (write_dsp(chip, mem_type) < 0) {
- DE_INIT(("load_dsp: failed to write DSP memory type\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: failed to write DSP memory type\n");
return -EIO;
}
/* Code */
for (i = 0; i < words; i++, index+=2) {
data = ((u32)code[index] << 16) + code[index + 1];
if (write_dsp(chip, data) < 0) {
- DE_INIT(("load_dsp: failed to write DSP data\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: failed to write DSP data\n");
return -EIO;
}
}
}
if (write_dsp(chip, 0) < 0) { /* We're done!!! */
- DE_INIT(("load_dsp: Failed to write final zero\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: Failed to write final zero\n");
return -EIO;
}
udelay(10);
@@ -438,12 +449,14 @@ static int load_dsp(struct echoaudio *chip, u16 *code)
get_dsp_register(chip, CHI32_CONTROL_REG) & ~0x1b00);
if (write_dsp(chip, DSP_FNC_SET_COMMPAGE_ADDR) < 0) {
- DE_INIT(("load_dsp: Failed to write DSP_FNC_SET_COMMPAGE_ADDR\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: Failed to write DSP_FNC_SET_COMMPAGE_ADDR\n");
return -EIO;
}
if (write_dsp(chip, chip->comm_page_phys) < 0) {
- DE_INIT(("load_dsp: Failed to write comm page address\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: Failed to write comm page address\n");
return -EIO;
}
@@ -452,19 +465,20 @@ static int load_dsp(struct echoaudio *chip, u16 *code)
We don't actually use the serial number but we have to
get it as part of the DSP init voodoo. */
if (read_sn(chip) < 0) {
- DE_INIT(("load_dsp: Failed to read serial number\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: Failed to read serial number\n");
return -EIO;
}
chip->dsp_code = code; /* Show which DSP code loaded */
chip->bad_board = FALSE; /* DSP OK */
- DE_INIT(("load_dsp: OK!\n"));
return 0;
}
udelay(100);
}
- DE_INIT(("load_dsp: DSP load timed out waiting for HF4\n"));
+ dev_err(chip->card->dev,
+ "load_dsp: DSP load timed out waiting for HF4\n");
return -EIO;
}
@@ -491,7 +505,7 @@ static int load_firmware(struct echoaudio *chip)
if (err < 0)
return err;
err = load_dsp(chip, (u16 *)fw->data);
- free_firmware(fw);
+ free_firmware(fw, chip);
if (err < 0)
return err;
@@ -658,7 +672,6 @@ static void get_audio_meters(struct echoaudio *chip, long *meters)
static int restore_dsp_rettings(struct echoaudio *chip)
{
int i, o, err;
- DE_INIT(("restore_dsp_settings\n"));
if ((err = check_asic_status(chip)) < 0)
return err;
@@ -755,7 +768,6 @@ static int restore_dsp_rettings(struct echoaudio *chip)
if (send_vector(chip, DSP_VC_UPDATE_FLAGS) < 0)
return -EIO;
- DE_INIT(("restore_dsp_rettings done\n"));
return 0;
}
@@ -835,7 +847,8 @@ static void set_audio_format(struct echoaudio *chip, u16 pipe_index,
break;
}
}
- DE_ACT(("set_audio_format[%d] = %x\n", pipe_index, dsp_format));
+ dev_dbg(chip->card->dev,
+ "set_audio_format[%d] = %x\n", pipe_index, dsp_format);
chip->comm_page->audio_format[pipe_index] = cpu_to_le16(dsp_format);
}
@@ -848,7 +861,6 @@ Same thing for pause_ and stop_ -trasport below. */
static int start_transport(struct echoaudio *chip, u32 channel_mask,
u32 cyclic_mask)
{
- DE_ACT(("start_transport %x\n", channel_mask));
if (wait_handshake(chip))
return -EIO;
@@ -866,7 +878,7 @@ static int start_transport(struct echoaudio *chip, u32 channel_mask,
return 0;
}
- DE_ACT(("start_transport: No pipes to start!\n"));
+ dev_err(chip->card->dev, "start_transport: No pipes to start!\n");
return -EINVAL;
}
@@ -874,7 +886,6 @@ static int start_transport(struct echoaudio *chip, u32 channel_mask,
static int pause_transport(struct echoaudio *chip, u32 channel_mask)
{
- DE_ACT(("pause_transport %x\n", channel_mask));
if (wait_handshake(chip))
return -EIO;
@@ -893,7 +904,7 @@ static int pause_transport(struct echoaudio *chip, u32 channel_mask)
return 0;
}
- DE_ACT(("pause_transport: No pipes to stop!\n"));
+ dev_warn(chip->card->dev, "pause_transport: No pipes to stop!\n");
return 0;
}
@@ -901,7 +912,6 @@ static int pause_transport(struct echoaudio *chip, u32 channel_mask)
static int stop_transport(struct echoaudio *chip, u32 channel_mask)
{
- DE_ACT(("stop_transport %x\n", channel_mask));
if (wait_handshake(chip))
return -EIO;
@@ -920,7 +930,7 @@ static int stop_transport(struct echoaudio *chip, u32 channel_mask)
return 0;
}
- DE_ACT(("stop_transport: No pipes to stop!\n"));
+ dev_warn(chip->card->dev, "stop_transport: No pipes to stop!\n");
return 0;
}
@@ -937,7 +947,6 @@ static inline int is_pipe_allocated(struct echoaudio *chip, u16 pipe_index)
stopped and unallocated. */
static int rest_in_peace(struct echoaudio *chip)
{
- DE_ACT(("rest_in_peace() open=%x\n", chip->pipe_alloc_mask));
/* Stops all active pipes (just to be sure) */
stop_transport(chip, chip->active_mask);
@@ -965,7 +974,8 @@ static int init_dsp_comm_page(struct echoaudio *chip)
{
/* Check if the compiler added extra padding inside the structure */
if (offsetof(struct comm_page, midi_output) != 0xbe0) {
- DE_INIT(("init_dsp_comm_page() - Invalid struct comm_page structure\n"));
+ dev_err(chip->card->dev,
+ "init_dsp_comm_page() - Invalid struct comm_page structure\n");
return -EPERM;
}
@@ -999,7 +1009,6 @@ static int init_dsp_comm_page(struct echoaudio *chip)
*/
static int init_line_levels(struct echoaudio *chip)
{
- DE_INIT(("init_line_levels\n"));
memset(chip->output_gain, ECHOGAIN_MUTED, sizeof(chip->output_gain));
memset(chip->input_gain, ECHOGAIN_MUTED, sizeof(chip->input_gain));
memset(chip->monitor_gain, ECHOGAIN_MUTED, sizeof(chip->monitor_gain));
@@ -1051,7 +1060,8 @@ static int allocate_pipes(struct echoaudio *chip, struct audiopipe *pipe,
u32 channel_mask;
char is_cyclic;
- DE_ACT(("allocate_pipes: ch=%d int=%d\n", pipe_index, interleave));
+ dev_dbg(chip->card->dev,
+ "allocate_pipes: ch=%d int=%d\n", pipe_index, interleave);
if (chip->bad_board)
return -EIO;
@@ -1061,7 +1071,8 @@ static int allocate_pipes(struct echoaudio *chip, struct audiopipe *pipe,
for (channel_mask = i = 0; i < interleave; i++)
channel_mask |= 1 << (pipe_index + i);
if (chip->pipe_alloc_mask & channel_mask) {
- DE_ACT(("allocate_pipes: channel already open\n"));
+ dev_err(chip->card->dev,
+ "allocate_pipes: channel already open\n");
return -EAGAIN;
}
@@ -1078,7 +1089,6 @@ static int allocate_pipes(struct echoaudio *chip, struct audiopipe *pipe,
it moves data. The DMA counter is in units of bytes, not samples. */
pipe->dma_counter = &chip->comm_page->position[pipe_index];
*pipe->dma_counter = 0;
- DE_ACT(("allocate_pipes: ok\n"));
return pipe_index;
}
@@ -1089,7 +1099,6 @@ static int free_pipes(struct echoaudio *chip, struct audiopipe *pipe)
u32 channel_mask;
int i;
- DE_ACT(("free_pipes: Pipe %d\n", pipe->index));
if (snd_BUG_ON(!is_pipe_allocated(chip, pipe->index)))
return -EINVAL;
if (snd_BUG_ON(pipe->state != PIPE_STATE_STOPPED))
@@ -1131,7 +1140,7 @@ static int sglist_add_mapping(struct echoaudio *chip, struct audiopipe *pipe,
list[head].size = cpu_to_le32(length);
pipe->sglist_head++;
} else {
- DE_ACT(("SGlist: too many fragments\n"));
+ dev_err(chip->card->dev, "SGlist: too many fragments\n");
return -ENOMEM;
}
return 0;
diff --git a/sound/pci/echoaudio/echoaudio_gml.c b/sound/pci/echoaudio/echoaudio_gml.c
index afa273330e8a..23a099425834 100644
--- a/sound/pci/echoaudio/echoaudio_gml.c
+++ b/sound/pci/echoaudio/echoaudio_gml.c
@@ -46,7 +46,8 @@ static int check_asic_status(struct echoaudio *chip)
/* The DSP will return a value to indicate whether or not the
ASIC is currently loaded */
if (read_dsp(chip, &asic_status) < 0) {
- DE_INIT(("check_asic_status: failed on read_dsp\n"));
+ dev_err(chip->card->dev,
+ "check_asic_status: failed on read_dsp\n");
chip->asic_loaded = FALSE;
return -EIO;
}
@@ -68,7 +69,7 @@ static int write_control_reg(struct echoaudio *chip, u32 value, char force)
else
value &= ~GML_DIGITAL_IN_AUTO_MUTE;
- DE_ACT(("write_control_reg: 0x%x\n", value));
+ dev_dbg(chip->card->dev, "write_control_reg: 0x%x\n", value);
/* Write the control register */
value = cpu_to_le32(value);
@@ -91,7 +92,7 @@ If the auto-mute is disabled, the digital inputs are enabled regardless of
what the input clock is set or what is connected. */
static int set_input_auto_mute(struct echoaudio *chip, int automute)
{
- DE_ACT(("set_input_auto_mute %d\n", automute));
+ dev_dbg(chip->card->dev, "set_input_auto_mute %d\n", automute);
chip->digital_in_automute = automute;
@@ -194,7 +195,7 @@ static int set_professional_spdif(struct echoaudio *chip, char prof)
if ((err = write_control_reg(chip, control_reg, FALSE)))
return err;
chip->professional_spdif = prof;
- DE_ACT(("set_professional_spdif to %s\n",
- prof ? "Professional" : "Consumer"));
+ dev_dbg(chip->card->dev, "set_professional_spdif to %s\n",
+ prof ? "Professional" : "Consumer");
return 0;
}
diff --git a/sound/pci/echoaudio/gina20_dsp.c b/sound/pci/echoaudio/gina20_dsp.c
index d1615a0579d1..5dafe9260cb4 100644
--- a/sound/pci/echoaudio/gina20_dsp.c
+++ b/sound/pci/echoaudio/gina20_dsp.c
@@ -37,12 +37,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Gina20\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != GINA20))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -62,7 +62,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -149,7 +148,6 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
- DE_ACT(("set_input_clock:\n"));
switch (clock) {
case ECHO_CLOCK_INTERNAL:
@@ -158,7 +156,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
chip->spdif_status = GD_SPDIF_STATUS_UNDEF;
set_sample_rate(chip, chip->sample_rate);
chip->input_clock = clock;
- DE_ACT(("Set Gina clock to INTERNAL\n"));
break;
case ECHO_CLOCK_SPDIF:
chip->comm_page->gd_clock_state = GD_CLOCK_SPDIFIN;
@@ -166,7 +163,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
clear_handshake(chip);
send_vector(chip, DSP_VC_SET_GD_AUDIO_STATE);
chip->clock_state = GD_CLOCK_SPDIFIN;
- DE_ACT(("Set Gina20 clock to SPDIF\n"));
chip->input_clock = clock;
break;
default:
@@ -208,7 +204,6 @@ static int update_flags(struct echoaudio *chip)
static int set_professional_spdif(struct echoaudio *chip, char prof)
{
- DE_ACT(("set_professional_spdif %d\n", prof));
if (prof)
chip->comm_page->flags |=
cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF);
diff --git a/sound/pci/echoaudio/gina24_dsp.c b/sound/pci/echoaudio/gina24_dsp.c
index 98f7cfa81b5f..6971766938bf 100644
--- a/sound/pci/echoaudio/gina24_dsp.c
+++ b/sound/pci/echoaudio/gina24_dsp.c
@@ -41,12 +41,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Gina24\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != GINA24))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -78,7 +78,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -155,7 +154,6 @@ static int load_asic(struct echoaudio *chip)
control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
err = write_control_reg(chip, control_reg, TRUE);
}
- DE_INIT(("load_asic() done\n"));
return err;
}
@@ -171,8 +169,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
- DE_ACT(("set_sample_rate: Cannot set sample rate - "
- "clock not set to CLK_CLOCKININTERNAL\n"));
+ dev_warn(chip->card->dev,
+ "Cannot set sample rate - clock not set to CLK_CLOCKININTERNAL\n");
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
@@ -217,7 +215,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
clock = GML_8KHZ;
break;
default:
- DE_ACT(("set_sample_rate: %d invalid!\n", rate));
+ dev_err(chip->card->dev,
+ "set_sample_rate: %d invalid!\n", rate);
return -EINVAL;
}
@@ -225,7 +224,7 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */
chip->sample_rate = rate;
- DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock));
+ dev_dbg(chip->card->dev, "set_sample_rate: %d clock %d\n", rate, clock);
return write_control_reg(chip, control_reg, FALSE);
}
@@ -236,7 +235,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
{
u32 control_reg, clocks_from_dsp;
- DE_ACT(("set_input_clock:\n"));
/* Mask off the clock select bits */
control_reg = le32_to_cpu(chip->comm_page->control_register) &
@@ -245,13 +243,11 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
switch (clock) {
case ECHO_CLOCK_INTERNAL:
- DE_ACT(("Set Gina24 clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EAGAIN;
- DE_ACT(("Set Gina24 clock to SPDIF\n"));
control_reg |= GML_SPDIF_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96)
control_reg |= GML_DOUBLE_SPEED_MODE;
@@ -261,21 +257,19 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
case ECHO_CLOCK_ADAT:
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
- DE_ACT(("Set Gina24 clock to ADAT\n"));
control_reg |= GML_ADAT_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_ESYNC:
- DE_ACT(("Set Gina24 clock to ESYNC\n"));
control_reg |= GML_ESYNC_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_ESYNC96:
- DE_ACT(("Set Gina24 clock to ESYNC96\n"));
control_reg |= GML_ESYNC_CLOCK | GML_DOUBLE_SPEED_MODE;
break;
default:
- DE_ACT(("Input clock 0x%x not supported for Gina24\n", clock));
+ dev_err(chip->card->dev,
+ "Input clock 0x%x not supported for Gina24\n", clock);
return -EINVAL;
}
@@ -304,7 +298,8 @@ static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
incompatible_clock = TRUE;
break;
default:
- DE_ACT(("Digital mode not supported: %d\n", mode));
+ dev_err(chip->card->dev,
+ "Digital mode not supported: %d\n", mode);
return -EINVAL;
}
@@ -344,6 +339,7 @@ static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
return err;
chip->digital_mode = mode;
- DE_ACT(("set_digital_mode to %d\n", chip->digital_mode));
+ dev_dbg(chip->card->dev,
+ "set_digital_mode to %d\n", chip->digital_mode);
return incompatible_clock;
}
diff --git a/sound/pci/echoaudio/indigo_dsp.c b/sound/pci/echoaudio/indigo_dsp.c
index 5e85f14fe5a8..54edd67edff7 100644
--- a/sound/pci/echoaudio/indigo_dsp.c
+++ b/sound/pci/echoaudio/indigo_dsp.c
@@ -38,12 +38,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Indigo\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -60,7 +60,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -109,7 +108,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
control_reg = MIA_32000;
break;
default:
- DE_ACT(("set_sample_rate: %d invalid!\n", rate));
+ dev_err(chip->card->dev,
+ "set_sample_rate: %d invalid!\n", rate);
return -EINVAL;
}
@@ -147,7 +147,8 @@ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
index = output * num_pipes_out(chip) + pipe;
chip->comm_page->vmixer[index] = gain;
- DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain));
+ dev_dbg(chip->card->dev,
+ "set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain);
return 0;
}
diff --git a/sound/pci/echoaudio/indigo_express_dsp.c b/sound/pci/echoaudio/indigo_express_dsp.c
index 2e4ab3e34a74..ceda2d7046ac 100644
--- a/sound/pci/echoaudio/indigo_express_dsp.c
+++ b/sound/pci/echoaudio/indigo_express_dsp.c
@@ -61,7 +61,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
control_reg |= clock;
if (control_reg != old_control_reg) {
- DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock));
+ dev_dbg(chip->card->dev,
+ "set_sample_rate: %d clock %d\n", rate, clock);
chip->comm_page->control_register = cpu_to_le32(control_reg);
chip->sample_rate = rate;
clear_handshake(chip);
@@ -89,7 +90,8 @@ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
index = output * num_pipes_out(chip) + pipe;
chip->comm_page->vmixer[index] = gain;
- DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain));
+ dev_dbg(chip->card->dev,
+ "set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain);
return 0;
}
diff --git a/sound/pci/echoaudio/indigodj_dsp.c b/sound/pci/echoaudio/indigodj_dsp.c
index 68f3c8ccc1bf..2cf5cc092d7a 100644
--- a/sound/pci/echoaudio/indigodj_dsp.c
+++ b/sound/pci/echoaudio/indigodj_dsp.c
@@ -38,12 +38,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Indigo DJ\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_DJ))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -60,7 +60,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -109,7 +108,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
control_reg = MIA_32000;
break;
default:
- DE_ACT(("set_sample_rate: %d invalid!\n", rate));
+ dev_err(chip->card->dev,
+ "set_sample_rate: %d invalid!\n", rate);
return -EINVAL;
}
@@ -147,7 +147,8 @@ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
index = output * num_pipes_out(chip) + pipe;
chip->comm_page->vmixer[index] = gain;
- DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain));
+ dev_dbg(chip->card->dev,
+ "set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain);
return 0;
}
diff --git a/sound/pci/echoaudio/indigodjx_dsp.c b/sound/pci/echoaudio/indigodjx_dsp.c
index bb9632c752a9..5252863f1681 100644
--- a/sound/pci/echoaudio/indigodjx_dsp.c
+++ b/sound/pci/echoaudio/indigodjx_dsp.c
@@ -35,13 +35,13 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Indigo DJx\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_DJX))
return -ENODEV;
err = init_dsp_comm_page(chip);
if (err < 0) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -59,7 +59,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
diff --git a/sound/pci/echoaudio/indigoio_dsp.c b/sound/pci/echoaudio/indigoio_dsp.c
index beb9a5b69892..4e81787627e0 100644
--- a/sound/pci/echoaudio/indigoio_dsp.c
+++ b/sound/pci/echoaudio/indigoio_dsp.c
@@ -38,12 +38,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Indigo IO\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IO))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -60,7 +60,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -118,7 +117,8 @@ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
index = output * num_pipes_out(chip) + pipe;
chip->comm_page->vmixer[index] = gain;
- DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain));
+ dev_dbg(chip->card->dev,
+ "set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain);
return 0;
}
diff --git a/sound/pci/echoaudio/indigoiox_dsp.c b/sound/pci/echoaudio/indigoiox_dsp.c
index 394c6e76bcbc..6de3f9bc6d26 100644
--- a/sound/pci/echoaudio/indigoiox_dsp.c
+++ b/sound/pci/echoaudio/indigoiox_dsp.c
@@ -35,13 +35,13 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Indigo IOx\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IOX))
return -ENODEV;
err = init_dsp_comm_page(chip);
if (err < 0) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -59,7 +59,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
diff --git a/sound/pci/echoaudio/layla20_dsp.c b/sound/pci/echoaudio/layla20_dsp.c
index 53ce94605044..f2024a3565af 100644
--- a/sound/pci/echoaudio/layla20_dsp.c
+++ b/sound/pci/echoaudio/layla20_dsp.c
@@ -40,12 +40,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Layla20\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != LAYLA20))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -64,7 +64,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -121,7 +120,8 @@ static int check_asic_status(struct echoaudio *chip)
/* The DSP will return a value to indicate whether or not
the ASIC is currently loaded */
if (read_dsp(chip, &asic_status) < 0) {
- DE_ACT(("check_asic_status: failed on read_dsp\n"));
+ dev_err(chip->card->dev,
+ "check_asic_status: failed on read_dsp\n");
return -EIO;
}
@@ -164,8 +164,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
/* Only set the clock for internal mode. Do not return failure,
simply treat it as a non-event. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
- DE_ACT(("set_sample_rate: Cannot set sample rate - "
- "clock not set to CLK_CLOCKININTERNAL\n"));
+ dev_warn(chip->card->dev,
+ "Cannot set sample rate - clock not set to CLK_CLOCKININTERNAL\n");
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
return 0;
@@ -174,7 +174,7 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
if (wait_handshake(chip))
return -EIO;
- DE_ACT(("set_sample_rate(%d)\n", rate));
+ dev_dbg(chip->card->dev, "set_sample_rate(%d)\n", rate);
chip->sample_rate = rate;
chip->comm_page->sample_rate = cpu_to_le32(rate);
clear_handshake(chip);
@@ -188,29 +188,25 @@ static int set_input_clock(struct echoaudio *chip, u16 clock_source)
u16 clock;
u32 rate;
- DE_ACT(("set_input_clock:\n"));
rate = 0;
switch (clock_source) {
case ECHO_CLOCK_INTERNAL:
- DE_ACT(("Set Layla20 clock to INTERNAL\n"));
rate = chip->sample_rate;
clock = LAYLA20_CLOCK_INTERNAL;
break;
case ECHO_CLOCK_SPDIF:
- DE_ACT(("Set Layla20 clock to SPDIF\n"));
clock = LAYLA20_CLOCK_SPDIF;
break;
case ECHO_CLOCK_WORD:
- DE_ACT(("Set Layla20 clock to WORD\n"));
clock = LAYLA20_CLOCK_WORD;
break;
case ECHO_CLOCK_SUPER:
- DE_ACT(("Set Layla20 clock to SUPER\n"));
clock = LAYLA20_CLOCK_SUPER;
break;
default:
- DE_ACT(("Input clock 0x%x not supported for Layla24\n",
- clock_source));
+ dev_err(chip->card->dev,
+ "Input clock 0x%x not supported for Layla24\n",
+ clock_source);
return -EINVAL;
}
chip->input_clock = clock_source;
@@ -229,7 +225,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock_source)
static int set_output_clock(struct echoaudio *chip, u16 clock)
{
- DE_ACT(("set_output_clock: %d\n", clock));
switch (clock) {
case ECHO_CLOCK_SUPER:
clock = LAYLA20_OUTPUT_CLOCK_SUPER;
@@ -238,7 +233,7 @@ static int set_output_clock(struct echoaudio *chip, u16 clock)
clock = LAYLA20_OUTPUT_CLOCK_WORD;
break;
default:
- DE_ACT(("set_output_clock wrong clock\n"));
+ dev_err(chip->card->dev, "set_output_clock wrong clock\n");
return -EINVAL;
}
@@ -283,7 +278,6 @@ static int update_flags(struct echoaudio *chip)
static int set_professional_spdif(struct echoaudio *chip, char prof)
{
- DE_ACT(("set_professional_spdif %d\n", prof));
if (prof)
chip->comm_page->flags |=
cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF);
diff --git a/sound/pci/echoaudio/layla24_dsp.c b/sound/pci/echoaudio/layla24_dsp.c
index 8c041647f285..4f11e81f6c0e 100644
--- a/sound/pci/echoaudio/layla24_dsp.c
+++ b/sound/pci/echoaudio/layla24_dsp.c
@@ -40,12 +40,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Layla24\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != LAYLA24))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -69,7 +69,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
if ((err = init_line_levels(chip)) < 0)
return err;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -117,7 +116,6 @@ static int load_asic(struct echoaudio *chip)
if (chip->asic_loaded)
return 1;
- DE_INIT(("load_asic\n"));
/* Give the DSP a few milliseconds to settle down */
mdelay(10);
@@ -151,7 +149,6 @@ static int load_asic(struct echoaudio *chip)
err = write_control_reg(chip, GML_CONVERTER_ENABLE | GML_48KHZ,
TRUE);
- DE_INIT(("load_asic() done\n"));
return err;
}
@@ -167,8 +164,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
- DE_ACT(("set_sample_rate: Cannot set sample rate - "
- "clock not set to CLK_CLOCKININTERNAL\n"));
+ dev_warn(chip->card->dev,
+ "Cannot set sample rate - clock not set to CLK_CLOCKININTERNAL\n");
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
@@ -241,7 +238,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP ? */
chip->sample_rate = rate;
- DE_ACT(("set_sample_rate: %d clock %d\n", rate, control_reg));
+ dev_dbg(chip->card->dev,
+ "set_sample_rate: %d clock %d\n", rate, control_reg);
return write_control_reg(chip, control_reg, FALSE);
}
@@ -260,7 +258,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
/* Pick the new clock */
switch (clock) {
case ECHO_CLOCK_INTERNAL:
- DE_ACT(("Set Layla24 clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
@@ -269,7 +266,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
control_reg |= GML_SPDIF_CLOCK;
/* Layla24 doesn't support 96KHz S/PDIF */
control_reg &= ~GML_DOUBLE_SPEED_MODE;
- DE_ACT(("Set Layla24 clock to SPDIF\n"));
break;
case ECHO_CLOCK_WORD:
control_reg |= GML_WORD_CLOCK;
@@ -277,17 +273,16 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
- DE_ACT(("Set Layla24 clock to WORD\n"));
break;
case ECHO_CLOCK_ADAT:
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_ADAT_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
- DE_ACT(("Set Layla24 clock to ADAT\n"));
break;
default:
- DE_ACT(("Input clock 0x%x not supported for Layla24\n", clock));
+ dev_err(chip->card->dev,
+ "Input clock 0x%x not supported for Layla24\n", clock);
return -EINVAL;
}
@@ -353,7 +348,8 @@ static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
asic = FW_LAYLA24_2A_ASIC;
break;
default:
- DE_ACT(("Digital mode not supported: %d\n", mode));
+ dev_err(chip->card->dev,
+ "Digital mode not supported: %d\n", mode);
return -EINVAL;
}
@@ -393,6 +389,6 @@ static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
return err;
chip->digital_mode = mode;
- DE_ACT(("set_digital_mode to %d\n", mode));
+ dev_dbg(chip->card->dev, "set_digital_mode to %d\n", mode);
return incompatible_clock;
}
diff --git a/sound/pci/echoaudio/mia_dsp.c b/sound/pci/echoaudio/mia_dsp.c
index 6ebfa6e7ab9e..fdad079ad9a1 100644
--- a/sound/pci/echoaudio/mia_dsp.c
+++ b/sound/pci/echoaudio/mia_dsp.c
@@ -41,12 +41,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Mia\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != MIA))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -66,7 +66,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -126,7 +125,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
control_reg = MIA_32000;
break;
default:
- DE_ACT(("set_sample_rate: %d invalid!\n", rate));
+ dev_err(chip->card->dev,
+ "set_sample_rate: %d invalid!\n", rate);
return -EINVAL;
}
@@ -153,7 +153,7 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
- DE_ACT(("set_input_clock(%d)\n", clock));
+ dev_dbg(chip->card->dev, "set_input_clock(%d)\n", clock);
if (snd_BUG_ON(clock != ECHO_CLOCK_INTERNAL &&
clock != ECHO_CLOCK_SPDIF))
return -EINVAL;
@@ -181,7 +181,8 @@ static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
index = output * num_pipes_out(chip) + pipe;
chip->comm_page->vmixer[index] = gain;
- DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain));
+ dev_dbg(chip->card->dev,
+ "set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain);
return 0;
}
@@ -211,7 +212,7 @@ static int update_flags(struct echoaudio *chip)
static int set_professional_spdif(struct echoaudio *chip, char prof)
{
- DE_ACT(("set_professional_spdif %d\n", prof));
+ dev_dbg(chip->card->dev, "set_professional_spdif %d\n", prof);
if (prof)
chip->comm_page->flags |=
cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF);
diff --git a/sound/pci/echoaudio/midi.c b/sound/pci/echoaudio/midi.c
index 7f4dfae0323a..d913749d154a 100644
--- a/sound/pci/echoaudio/midi.c
+++ b/sound/pci/echoaudio/midi.c
@@ -36,7 +36,7 @@
/* Start and stop Midi input */
static int enable_midi_input(struct echoaudio *chip, char enable)
{
- DE_MID(("enable_midi_input(%d)\n", enable));
+ dev_dbg(chip->card->dev, "enable_midi_input(%d)\n", enable);
if (wait_handshake(chip))
return -EIO;
@@ -74,7 +74,7 @@ static int write_midi(struct echoaudio *chip, u8 *data, int bytes)
chip->comm_page->midi_out_free_count = 0;
clear_handshake(chip);
send_vector(chip, DSP_VC_MIDI_WRITE);
- DE_MID(("write_midi: %d\n", bytes));
+ dev_dbg(chip->card->dev, "write_midi: %d\n", bytes);
return bytes;
}
@@ -157,7 +157,6 @@ static int snd_echo_midi_input_open(struct snd_rawmidi_substream *substream)
struct echoaudio *chip = substream->rmidi->private_data;
chip->midi_in = substream;
- DE_MID(("rawmidi_iopen\n"));
return 0;
}
@@ -183,7 +182,6 @@ static int snd_echo_midi_input_close(struct snd_rawmidi_substream *substream)
struct echoaudio *chip = substream->rmidi->private_data;
chip->midi_in = NULL;
- DE_MID(("rawmidi_iclose\n"));
return 0;
}
@@ -196,7 +194,6 @@ static int snd_echo_midi_output_open(struct snd_rawmidi_substream *substream)
chip->tinuse = 0;
chip->midi_full = 0;
chip->midi_out = substream;
- DE_MID(("rawmidi_oopen\n"));
return 0;
}
@@ -209,7 +206,6 @@ static void snd_echo_midi_output_write(unsigned long data)
int bytes, sent, time;
unsigned char buf[MIDI_OUT_BUFFER_SIZE - 1];
- DE_MID(("snd_echo_midi_output_write\n"));
/* No interrupts are involved: we have to check at regular intervals
if the card's output buffer has room for new data. */
sent = bytes = 0;
@@ -218,7 +214,7 @@ static void snd_echo_midi_output_write(unsigned long data)
if (!snd_rawmidi_transmit_empty(chip->midi_out)) {
bytes = snd_rawmidi_transmit_peek(chip->midi_out, buf,
MIDI_OUT_BUFFER_SIZE - 1);
- DE_MID(("Try to send %d bytes...\n", bytes));
+ dev_dbg(chip->card->dev, "Try to send %d bytes...\n", bytes);
sent = write_midi(chip, buf, bytes);
if (sent < 0) {
dev_err(chip->card->dev,
@@ -227,12 +223,12 @@ static void snd_echo_midi_output_write(unsigned long data)
sent = 9000;
chip->midi_full = 1;
} else if (sent > 0) {
- DE_MID(("%d bytes sent\n", sent));
+ dev_dbg(chip->card->dev, "%d bytes sent\n", sent);
snd_rawmidi_transmit_ack(chip->midi_out, sent);
} else {
/* Buffer is full. DSP's internal buffer is 64 (128 ?)
bytes long. Let's wait until half of them are sent */
- DE_MID(("Full\n"));
+ dev_dbg(chip->card->dev, "Full\n");
sent = 32;
chip->midi_full = 1;
}
@@ -244,7 +240,8 @@ static void snd_echo_midi_output_write(unsigned long data)
sent */
time = (sent << 3) / 25 + 1; /* 8/25=0.32ms to send a byte */
mod_timer(&chip->timer, jiffies + (time * HZ + 999) / 1000);
- DE_MID(("Timer armed(%d)\n", ((time * HZ + 999) / 1000)));
+ dev_dbg(chip->card->dev,
+ "Timer armed(%d)\n", ((time * HZ + 999) / 1000));
}
spin_unlock_irqrestore(&chip->lock, flags);
}
@@ -256,7 +253,7 @@ static void snd_echo_midi_output_trigger(struct snd_rawmidi_substream *substream
{
struct echoaudio *chip = substream->rmidi->private_data;
- DE_MID(("snd_echo_midi_output_trigger(%d)\n", up));
+ dev_dbg(chip->card->dev, "snd_echo_midi_output_trigger(%d)\n", up);
spin_lock_irq(&chip->lock);
if (up) {
if (!chip->tinuse) {
@@ -270,7 +267,7 @@ static void snd_echo_midi_output_trigger(struct snd_rawmidi_substream *substream
chip->tinuse = 0;
spin_unlock_irq(&chip->lock);
del_timer_sync(&chip->timer);
- DE_MID(("Timer removed\n"));
+ dev_dbg(chip->card->dev, "Timer removed\n");
return;
}
}
@@ -287,7 +284,6 @@ static int snd_echo_midi_output_close(struct snd_rawmidi_substream *substream)
struct echoaudio *chip = substream->rmidi->private_data;
chip->midi_out = NULL;
- DE_MID(("rawmidi_oclose\n"));
return 0;
}
@@ -327,6 +323,5 @@ static int snd_echo_midi_create(struct snd_card *card,
chip->rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT |
SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX;
- DE_INIT(("MIDI ok\n"));
return 0;
}
diff --git a/sound/pci/echoaudio/mona_dsp.c b/sound/pci/echoaudio/mona_dsp.c
index 6e6a7eb555b8..843c7a5e5105 100644
--- a/sound/pci/echoaudio/mona_dsp.c
+++ b/sound/pci/echoaudio/mona_dsp.c
@@ -41,12 +41,12 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
- DE_INIT(("init_hw() - Mona\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != MONA))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
- DE_INIT(("init_hw - could not initialize DSP comm page\n"));
+ dev_err(chip->card->dev,
+ "init_hw - could not initialize DSP comm page\n");
return err;
}
@@ -71,7 +71,6 @@ static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
return err;
chip->bad_board = FALSE;
- DE_INIT(("init_hw done\n"));
return err;
}
@@ -202,8 +201,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
- DE_ACT(("set_sample_rate: Cannot set sample rate - "
- "clock not set to CLK_CLOCKININTERNAL\n"));
+ dev_dbg(chip->card->dev,
+ "Cannot set sample rate - clock not set to CLK_CLOCKININTERNAL\n");
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
@@ -279,7 +278,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
clock = GML_8KHZ;
break;
default:
- DE_ACT(("set_sample_rate: %d invalid!\n", rate));
+ dev_err(chip->card->dev,
+ "set_sample_rate: %d invalid!\n", rate);
return -EINVAL;
}
@@ -287,7 +287,8 @@ static int set_sample_rate(struct echoaudio *chip, u32 rate)
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */
chip->sample_rate = rate;
- DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock));
+ dev_dbg(chip->card->dev,
+ "set_sample_rate: %d clock %d\n", rate, clock);
return write_control_reg(chip, control_reg, force_write);
}
@@ -299,7 +300,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
u32 control_reg, clocks_from_dsp;
int err;
- DE_ACT(("set_input_clock:\n"));
/* Prevent two simultaneous calls to switch_asic() */
if (atomic_read(&chip->opencount))
@@ -312,7 +312,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
switch (clock) {
case ECHO_CLOCK_INTERNAL:
- DE_ACT(("Set Mona clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
@@ -324,7 +323,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
- DE_ACT(("Set Mona clock to SPDIF\n"));
control_reg |= GML_SPDIF_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96)
control_reg |= GML_DOUBLE_SPEED_MODE;
@@ -332,7 +330,6 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_WORD:
- DE_ACT(("Set Mona clock to WORD\n"));
spin_unlock_irq(&chip->lock);
err = switch_asic(chip, clocks_from_dsp &
GML_CLOCK_DETECT_BIT_WORD96);
@@ -346,14 +343,15 @@ static int set_input_clock(struct echoaudio *chip, u16 clock)
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_ADAT:
- DE_ACT(("Set Mona clock to ADAT\n"));
+ dev_dbg(chip->card->dev, "Set Mona clock to ADAT\n");
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_ADAT_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
default:
- DE_ACT(("Input clock 0x%x not supported for Mona\n", clock));
+ dev_err(chip->card->dev,
+ "Input clock 0x%x not supported for Mona\n", clock);
return -EINVAL;
}
@@ -381,7 +379,8 @@ static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
incompatible_clock = TRUE;
break;
default:
- DE_ACT(("Digital mode not supported: %d\n", mode));
+ dev_err(chip->card->dev,
+ "Digital mode not supported: %d\n", mode);
return -EINVAL;
}
@@ -422,6 +421,6 @@ static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
return err;
chip->digital_mode = mode;
- DE_ACT(("set_digital_mode to %d\n", mode));
+ dev_dbg(chip->card->dev, "set_digital_mode to %d\n", mode);
return incompatible_clock;
}
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 229269788023..b4458a630a7c 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1289,10 +1289,8 @@ static int snd_emu10k1_free(struct snd_emu10k1 *emu)
}
if (emu->emu1010.firmware_thread)
kthread_stop(emu->emu1010.firmware_thread);
- if (emu->firmware)
- release_firmware(emu->firmware);
- if (emu->dock_fw)
- release_firmware(emu->dock_fw);
+ release_firmware(emu->firmware);
+ release_firmware(emu->dock_fw);
if (emu->irq >= 0)
free_irq(emu->irq, emu);
/* remove reserved page */
@@ -1301,8 +1299,7 @@ static int snd_emu10k1_free(struct snd_emu10k1 *emu)
(struct snd_util_memblk *)emu->reserved_page);
emu->reserved_page = NULL;
}
- if (emu->memhdr)
- snd_util_memhdr_free(emu->memhdr);
+ snd_util_memhdr_free(emu->memhdr);
if (emu->silent_page.area)
snd_dma_free_pages(&emu->silent_page);
if (emu->ptb_pages.area)
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index e223de1408c0..15933f92f63a 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -180,7 +180,7 @@ MODULE_PARM_DESC(enable, "Enable the EMU10K1X soundcard.");
/* From 0x50 - 0x5f, last samples captured */
-/**
+/*
* The hardware has 3 channels for playback and 1 for capture.
* - channel 0 is the front channel
* - channel 1 is the rear channel
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 745f0627c634..eb5c0aba41c1 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -777,8 +777,7 @@ static void snd_emu10k1_ctl_private_free(struct snd_kcontrol *kctl)
kctl->private_value = 0;
list_del(&ctl->list);
kfree(ctl);
- if (kctl->tlv.p)
- kfree(kctl->tlv.p);
+ kfree(kctl->tlv.p);
}
static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index c5ae2a24d8a5..1de33025669a 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -83,7 +83,7 @@ static int snd_emu10k1_spdif_get_mask(struct snd_kcontrol *kcontrol,
* Items labels in enum mixer controls assigning source data to
* each destination
*/
-static char *emu1010_src_texts[] = {
+static const char * const emu1010_src_texts[] = {
"Silence",
"Dock Mic A",
"Dock Mic B",
@@ -141,7 +141,7 @@ static char *emu1010_src_texts[] = {
/* 1616(m) cardbus */
-static char *emu1616_src_texts[] = {
+static const char * const emu1616_src_texts[] = {
"Silence",
"Dock Mic A",
"Dock Mic B",
@@ -393,23 +393,11 @@ static int snd_emu1010_input_output_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
- char **items;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1616) {
- uinfo->value.enumerated.items = 49;
- items = emu1616_src_texts;
- } else {
- uinfo->value.enumerated.items = 53;
- items = emu1010_src_texts;
- }
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- items[uinfo->value.enumerated.item]);
- return 0;
+ if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1616)
+ return snd_ctl_enum_info(uinfo, 1, 49, emu1616_src_texts);
+ else
+ return snd_ctl_enum_info(uinfo, 1, 53, emu1010_src_texts);
}
static int snd_emu1010_output_source_get(struct snd_kcontrol *kcontrol,
@@ -699,19 +687,11 @@ static struct snd_kcontrol_new snd_emu1010_dac_pads[] = {
static int snd_emu1010_internal_clock_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {
+ static const char * const texts[4] = {
"44100", "48000", "SPDIF", "ADAT"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
-
-
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int snd_emu1010_internal_clock_get(struct snd_kcontrol *kcontrol,
@@ -830,21 +810,15 @@ static int snd_audigy_i2c_capture_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
#if 0
- static char *texts[4] = {
+ static const char * const texts[4] = {
"Unknown1", "Unknown2", "Mic", "Line"
};
#endif
- static char *texts[2] = {
+ static const char * const texts[2] = {
"Mic", "Line"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_audigy_i2c_capture_source_get(struct snd_kcontrol *kcontrol,
@@ -997,15 +971,9 @@ static struct snd_kcontrol_new snd_audigy_i2c_volume_ctls[] = {
#if 0
static int snd_audigy_spdif_output_rate_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"44100", "48000", "96000"};
+ static const char * const texts[] = {"44100", "48000", "96000"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_audigy_spdif_output_rate_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/emu10k1/p16v.c b/sound/pci/emu10k1/p16v.c
index a4fe7f0c9458..7ef3898a7806 100644
--- a/sound/pci/emu10k1/p16v.c
+++ b/sound/pci/emu10k1/p16v.c
@@ -757,18 +757,12 @@ static int snd_p16v_volume_put(struct snd_kcontrol *kcontrol,
static int snd_p16v_capture_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[8] = {
+ static const char * const texts[8] = {
"SPDIF", "I2S", "SRC48", "SRCMulti_SPDIF", "SRCMulti_I2S",
"CDIF", "FX", "AC97"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 8;
- if (uinfo->value.enumerated.item > 7)
- uinfo->value.enumerated.item = 7;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 8, texts);
}
static int snd_p16v_capture_source_get(struct snd_kcontrol *kcontrol,
@@ -805,15 +799,9 @@ static int snd_p16v_capture_source_put(struct snd_kcontrol *kcontrol,
static int snd_p16v_capture_channel_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = { "0", "1", "2", "3", };
+ static const char * const texts[4] = { "0", "1", "2", "3", };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3)
- uinfo->value.enumerated.item = 3;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int snd_p16v_capture_channel_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 639962443ccc..0fc46eb4e251 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -1045,18 +1045,12 @@ static int snd_es1938_new_pcm(struct es1938 *chip, int device)
static int snd_es1938_info_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[8] = {
+ static const char * const texts[8] = {
"Mic", "Mic Master", "CD", "AOUT",
"Mic1", "Mix", "Line", "Master"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 8;
- if (uinfo->value.enumerated.item > 7)
- uinfo->value.enumerated.item = 7;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 8, texts);
}
static int snd_es1938_get_mux(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index a9956a7c5677..6039700f8579 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -1710,7 +1710,8 @@ static void es1968_measure_clock(struct es1968 *chip)
int i, apu;
unsigned int pa, offset, t;
struct esm_memory *memory;
- struct timeval start_time, stop_time;
+ ktime_t start_time, stop_time;
+ ktime_t diff;
if (chip->clock == 0)
chip->clock = 48000; /* default clock value */
@@ -1761,12 +1762,12 @@ static void es1968_measure_clock(struct es1968 *chip)
snd_es1968_bob_inc(chip, ESM_BOB_FREQ);
__apu_set_register(chip, apu, 5, pa & 0xffff);
snd_es1968_trigger_apu(chip, apu, ESM_APU_16BITLINEAR);
- do_gettimeofday(&start_time);
+ start_time = ktime_get();
spin_unlock_irq(&chip->reg_lock);
msleep(50);
spin_lock_irq(&chip->reg_lock);
offset = __apu_get_register(chip, apu, 5);
- do_gettimeofday(&stop_time);
+ stop_time = ktime_get();
snd_es1968_trigger_apu(chip, apu, 0); /* stop */
snd_es1968_bob_dec(chip);
chip->in_measurement = 0;
@@ -1777,12 +1778,8 @@ static void es1968_measure_clock(struct es1968 *chip)
offset &= 0xfffe;
offset += chip->measure_count * (CLOCK_MEASURE_BUFSIZE/2);
- t = stop_time.tv_sec - start_time.tv_sec;
- t *= 1000000;
- if (stop_time.tv_usec < start_time.tv_usec)
- t -= start_time.tv_usec - stop_time.tv_usec;
- else
- t += stop_time.tv_usec - start_time.tv_usec;
+ diff = ktime_sub(stop_time, start_time);
+ t = ktime_to_us(diff);
if (t == 0) {
dev_err(chip->card->dev, "?? calculation error..\n");
} else {
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index c5038303a126..d167afffce5f 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -958,17 +958,11 @@ static int snd_fm801_put_double(struct snd_kcontrol *kcontrol,
static int snd_fm801_info_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[5] = {
+ static const char * const texts[5] = {
"AC97 Primary", "FM", "I2S", "PCM", "AC97 Secondary"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item > 4)
- uinfo->value.enumerated.item = 4;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 5, texts);
}
static int snd_fm801_get_mux(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index fcc5e478c9a1..1ede82200ee5 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -441,6 +441,13 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_parse_pin_defcfg);
+/**
+ * snd_hda_get_input_pin_attr - Get the input pin attribute from pin config
+ * @def_conf: pin configuration value
+ *
+ * Guess the input pin attribute (INPUT_PIN_ATTR_XXX) from the given
+ * default pin configuration value.
+ */
int snd_hda_get_input_pin_attr(unsigned int def_conf)
{
unsigned int loc = get_defcfg_location(def_conf);
@@ -464,12 +471,15 @@ EXPORT_SYMBOL_GPL(snd_hda_get_input_pin_attr);
/**
* hda_get_input_pin_label - Give a label for the given input pin
+ * @codec: the HDA codec
+ * @item: ping config item to refer
+ * @pin: the pin NID
+ * @check_location: flag to add the jack location prefix
*
- * When check_location is true, the function checks the pin location
+ * When @check_location is true, the function checks the pin location
* for mic and line-in pins, and set an appropriate prefix like "Front",
* "Rear", "Internal".
*/
-
static const char *hda_get_input_pin_label(struct hda_codec *codec,
const struct auto_pin_cfg_item *item,
hda_nid_t pin, bool check_location)
@@ -550,6 +560,9 @@ static int check_mic_location_need(struct hda_codec *codec,
/**
* hda_get_autocfg_input_label - Get a label for the given input
+ * @codec: the HDA codec
+ * @cfg: the parsed pin configuration
+ * @input: the input index number
*
* Get a label for the given input pin defined by the autocfg item.
* Unlike hda_get_input_pin_label(), this function checks all inputs
@@ -677,6 +690,12 @@ static int fill_audio_out_name(struct hda_codec *codec, hda_nid_t nid,
/**
* snd_hda_get_pin_label - Get a label for the given I/O pin
+ * @codec: the HDA codec
+ * @nid: pin NID
+ * @cfg: the parsed pin configuration
+ * @label: the string buffer to store
+ * @maxlen: the max length of string buffer (including termination)
+ * @indexp: the pointer to return the index number (for multiple ctls)
*
* Get a label for the given pin. This function works for both input and
* output pins. When @cfg is given as non-NULL, the function tries to get
@@ -748,6 +767,14 @@ int snd_hda_get_pin_label(struct hda_codec *codec, hda_nid_t nid,
}
EXPORT_SYMBOL_GPL(snd_hda_get_pin_label);
+/**
+ * snd_hda_add_verbs - Add verbs to the init list
+ * @codec: the HDA codec
+ * @list: zero-terminated verb list to add
+ *
+ * Append the given verb list to the execution list. The verbs will be
+ * performed at init and resume time via snd_hda_apply_verbs().
+ */
int snd_hda_add_verbs(struct hda_codec *codec,
const struct hda_verb *list)
{
@@ -760,6 +787,10 @@ int snd_hda_add_verbs(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_add_verbs);
+/**
+ * snd_hda_apply_verbs - Execute the init verb lists
+ * @codec: the HDA codec
+ */
void snd_hda_apply_verbs(struct hda_codec *codec)
{
int i;
@@ -770,6 +801,11 @@ void snd_hda_apply_verbs(struct hda_codec *codec)
}
EXPORT_SYMBOL_GPL(snd_hda_apply_verbs);
+/**
+ * snd_hda_apply_pincfgs - Set each pin config in the given list
+ * @codec: the HDA codec
+ * @cfg: NULL-terminated pin config table
+ */
void snd_hda_apply_pincfgs(struct hda_codec *codec,
const struct hda_pintbl *cfg)
{
@@ -837,6 +873,11 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
}
}
+/**
+ * snd_hda_apply_fixup - Apply the fixup chain with the given action
+ * @codec: the HDA codec
+ * @action: fixup action (HDA_FIXUP_ACT_XXX)
+ */
void snd_hda_apply_fixup(struct hda_codec *codec, int action)
{
if (codec->fixup_list)
@@ -855,6 +896,12 @@ static bool pin_config_match(struct hda_codec *codec,
return true;
}
+/**
+ * snd_hda_pick_pin_fixup - Pick up a fixup matching with the pin quirk list
+ * @codec: the HDA codec
+ * @pin_quirk: zero-terminated pin quirk list
+ * @fixlist: the fixup list
+ */
void snd_hda_pick_pin_fixup(struct hda_codec *codec,
const struct snd_hda_pin_quirk *pin_quirk,
const struct hda_fixup *fixlist)
@@ -881,6 +928,21 @@ void snd_hda_pick_pin_fixup(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_pick_pin_fixup);
+/**
+ * snd_hda_pick_fixup - Pick up a fixup matching with PCI/codec SSID or model string
+ * @codec: the HDA codec
+ * @models: NULL-terminated model string list
+ * @quirk: zero-terminated PCI/codec SSID quirk list
+ * @fixlist: the fixup list
+ *
+ * Pick up a fixup entry matching with the given model string or SSID.
+ * If a fixup was already set beforehand, the function doesn't do anything.
+ * When a special model string "nofixup" is given, also no fixup is applied.
+ *
+ * The function tries to find the matching model name at first, if given.
+ * If nothing matched, try to look up the PCI SSID.
+ * If still nothing matched, try to look up the codec SSID.
+ */
void snd_hda_pick_fixup(struct hda_codec *codec,
const struct hda_model_fixup *models,
const struct snd_pci_quirk *quirk,
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 8c6c50afc0b7..1e7de08e77cb 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -175,6 +175,11 @@ static int snd_hda_do_attach(struct hda_beep *beep)
return 0;
}
+/**
+ * snd_hda_enable_beep_device - Turn on/off beep sound
+ * @codec: the HDA codec
+ * @enable: flag to turn on/off
+ */
int snd_hda_enable_beep_device(struct hda_codec *codec, int enable)
{
struct hda_beep *beep = codec->beep;
@@ -191,6 +196,20 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable)
}
EXPORT_SYMBOL_GPL(snd_hda_enable_beep_device);
+/**
+ * snd_hda_attach_beep_device - Attach a beep input device
+ * @codec: the HDA codec
+ * @nid: beep NID
+ *
+ * Attach a beep object to the given widget. If beep hint is turned off
+ * explicitly or beep_mode of the codec is turned off, this doesn't nothing.
+ *
+ * The attached beep device has to be registered via
+ * snd_hda_register_beep_device() and released via snd_hda_detach_beep_device()
+ * appropriately.
+ *
+ * Currently, only one beep device is allowed to each codec.
+ */
int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
{
struct hda_beep *beep;
@@ -228,6 +247,10 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
}
EXPORT_SYMBOL_GPL(snd_hda_attach_beep_device);
+/**
+ * snd_hda_detach_beep_device - Detach the beep device
+ * @codec: the HDA codec
+ */
void snd_hda_detach_beep_device(struct hda_codec *codec)
{
struct hda_beep *beep = codec->beep;
@@ -240,6 +263,10 @@ void snd_hda_detach_beep_device(struct hda_codec *codec)
}
EXPORT_SYMBOL_GPL(snd_hda_detach_beep_device);
+/**
+ * snd_hda_register_beep_device - Register the beep device
+ * @codec: the HDA codec
+ */
int snd_hda_register_beep_device(struct hda_codec *codec)
{
struct hda_beep *beep = codec->beep;
@@ -269,6 +296,12 @@ static bool ctl_has_mute(struct snd_kcontrol *kcontrol)
}
/* get/put callbacks for beep mute mixer switches */
+
+/**
+ * snd_hda_mixer_amp_switch_get_beep - Get callback for beep controls
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
+ */
int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -283,6 +316,11 @@ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
}
EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_switch_get_beep);
+/**
+ * snd_hda_mixer_amp_switch_put_beep - Put callback for beep controls
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
+ */
int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 15e0089492f7..2fe86d2e1b09 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -77,6 +77,10 @@ static struct hda_vendor_id hda_vendor_ids[] = {
static DEFINE_MUTEX(preset_mutex);
static LIST_HEAD(hda_preset_tables);
+/**
+ * snd_hda_add_codec_preset - Add a codec preset to the chain
+ * @preset: codec preset table to add
+ */
int snd_hda_add_codec_preset(struct hda_codec_preset_list *preset)
{
mutex_lock(&preset_mutex);
@@ -86,6 +90,10 @@ int snd_hda_add_codec_preset(struct hda_codec_preset_list *preset)
}
EXPORT_SYMBOL_GPL(snd_hda_add_codec_preset);
+/**
+ * snd_hda_delete_codec_preset - Delete a codec preset from the chain
+ * @preset: codec preset table to delete
+ */
int snd_hda_delete_codec_preset(struct hda_codec_preset_list *preset)
{
mutex_lock(&preset_mutex);
@@ -338,8 +346,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid,
unsigned int parm;
parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT);
- if (parm == -1)
+ if (parm == -1) {
+ *start_id = 0;
return 0;
+ }
*start_id = (parm >> 16) & 0x7fff;
return (int)(parm & 0x7fff);
}
@@ -416,7 +426,6 @@ static int read_and_add_raw_conns(struct hda_codec *codec, hda_nid_t nid)
* snd_hda_get_conn_list - get connection list
* @codec: the HDA codec
* @nid: NID to parse
- * @len: number of connection list entries
* @listp: the pointer to store NID list
*
* Parses the connection list of the given widget and stores the pointer
@@ -827,8 +836,7 @@ static void snd_hda_bus_free(struct hda_bus *bus)
WARN_ON(!list_empty(&bus->codec_list));
if (bus->workq)
flush_workqueue(bus->workq);
- if (bus->unsol)
- kfree(bus->unsol);
+ kfree(bus->unsol);
if (bus->ops.private_free)
bus->ops.private_free(bus);
if (bus->workq)
@@ -966,14 +974,12 @@ find_codec_preset(struct hda_codec *codec)
mutex_unlock(&preset_mutex);
if (mod_requested < HDA_MODREQ_MAX_COUNT) {
- char name[32];
if (!mod_requested)
- snprintf(name, sizeof(name), "snd-hda-codec-id:%08x",
- codec->vendor_id);
+ request_module("snd-hda-codec-id:%08x",
+ codec->vendor_id);
else
- snprintf(name, sizeof(name), "snd-hda-codec-id:%04x*",
- (codec->vendor_id >> 16) & 0xffff);
- request_module(name);
+ request_module("snd-hda-codec-id:%04x*",
+ (codec->vendor_id >> 16) & 0xffff);
mod_requested++;
goto again;
}
@@ -1190,7 +1196,16 @@ unsigned int snd_hda_codec_get_pincfg(struct hda_codec *codec, hda_nid_t nid)
}
EXPORT_SYMBOL_GPL(snd_hda_codec_get_pincfg);
-/* remember the current pinctl target value */
+/**
+ * snd_hda_codec_set_pin_target - remember the current pinctl target value
+ * @codec: the HDA codec
+ * @nid: pin NID
+ * @val: assigned pinctl value
+ *
+ * This function stores the given value to a pinctl target value in the
+ * pincfg table. This isn't always as same as the actually written value
+ * but can be referred at any time via snd_hda_codec_get_pin_target().
+ */
int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid,
unsigned int val)
{
@@ -1204,7 +1219,11 @@ int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid,
}
EXPORT_SYMBOL_GPL(snd_hda_codec_set_pin_target);
-/* return the current pinctl target value */
+/**
+ * snd_hda_codec_get_pin_target - return the current pinctl target value
+ * @codec: the HDA codec
+ * @nid: pin NID
+ */
int snd_hda_codec_get_pin_target(struct hda_codec *codec, hda_nid_t nid)
{
struct hda_pincfg *pin;
@@ -1576,6 +1595,13 @@ int snd_hda_codec_new(struct hda_bus *bus,
}
EXPORT_SYMBOL_GPL(snd_hda_codec_new);
+/**
+ * snd_hda_codec_update_widgets - Refresh widget caps and pin defaults
+ * @codec: the HDA codec
+ *
+ * Forcibly refresh the all widget caps and the init pin configurations of
+ * the given codec.
+ */
int snd_hda_codec_update_widgets(struct hda_codec *codec)
{
hda_nid_t fg;
@@ -2006,6 +2032,7 @@ EXPORT_SYMBOL_GPL(query_amp_caps);
* @codec: the HD-audio codec
* @nid: the NID to query
* @dir: either #HDA_INPUT or #HDA_OUTPUT
+ * @bits: bit mask to check the result
*
* Check whether the widget has the given amp capability for the direction.
*/
@@ -2025,7 +2052,7 @@ EXPORT_SYMBOL_GPL(snd_hda_check_amp_caps);
* snd_hda_override_amp_caps - Override the AMP capabilities
* @codec: the CODEC to clean up
* @nid: the NID to clean up
- * @direction: either #HDA_INPUT or #HDA_OUTPUT
+ * @dir: either #HDA_INPUT or #HDA_OUTPUT
* @caps: the capability bits to set
*
* Override the cached AMP caps bits value by the given one.
@@ -2241,7 +2268,17 @@ int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid,
}
EXPORT_SYMBOL_GPL(snd_hda_codec_amp_stereo);
-/* Works like snd_hda_codec_amp_update() but it writes the value only at
+/**
+ * snd_hda_codec_amp_init - initialize the AMP value
+ * @codec: the HDA codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @dir: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Works like snd_hda_codec_amp_update() but it writes the value only at
* the first access. If the amp was already initialized / updated beforehand,
* this does nothing.
*/
@@ -2252,6 +2289,17 @@ int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch,
}
EXPORT_SYMBOL_GPL(snd_hda_codec_amp_init);
+/**
+ * snd_hda_codec_amp_init_stereo - initialize the stereo AMP value
+ * @codec: the HDA codec
+ * @nid: NID to read the AMP value
+ * @dir: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Call snd_hda_codec_amp_init() for both stereo channels.
+ */
int snd_hda_codec_amp_init_stereo(struct hda_codec *codec, hda_nid_t nid,
int dir, int idx, int mask, int val)
{
@@ -2322,6 +2370,8 @@ static u32 get_amp_max_value(struct hda_codec *codec, hda_nid_t nid, int dir,
/**
* snd_hda_mixer_amp_volume_info - Info callback for a standard AMP mixer
+ * @kcontrol: referred ctl element
+ * @uinfo: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_COMPOSE_AMP_VAL*() or related macros.
@@ -2383,6 +2433,8 @@ update_amp_value(struct hda_codec *codec, hda_nid_t nid,
/**
* snd_hda_mixer_amp_volume_get - Get callback for a standard AMP mixer volume
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_COMPOSE_AMP_VAL*() or related macros.
@@ -2408,6 +2460,8 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_volume_get);
/**
* snd_hda_mixer_amp_volume_put - Put callback for a standard AMP mixer volume
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_COMPOSE_AMP_VAL*() or related macros.
@@ -2438,6 +2492,10 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_volume_put);
/**
* snd_hda_mixer_amp_volume_put - TLV callback for a standard AMP mixer volume
+ * @kcontrol: ctl element
+ * @op_flag: operation flag
+ * @size: byte size of input TLV
+ * @_tlv: TLV data
*
* The control element is supposed to have the private_value field
* set up via HDA_COMPOSE_AMP_VAL*() or related macros.
@@ -2636,7 +2694,10 @@ void snd_hda_ctls_clear(struct hda_codec *codec)
snd_array_free(&codec->nids);
}
-/* pseudo device locking
+/**
+ * snd_hda_lock_devices - pseudo device locking
+ * @bus: the BUS
+ *
* toggle card->shutdown to allow/disallow the device access (as a hack)
*/
int snd_hda_lock_devices(struct hda_bus *bus)
@@ -2673,6 +2734,10 @@ int snd_hda_lock_devices(struct hda_bus *bus)
}
EXPORT_SYMBOL_GPL(snd_hda_lock_devices);
+/**
+ * snd_hda_unlock_devices - pseudo device unlocking
+ * @bus: the BUS
+ */
void snd_hda_unlock_devices(struct hda_bus *bus)
{
struct snd_card *card = bus->card;
@@ -2859,7 +2924,7 @@ static int add_slave(struct hda_codec *codec,
}
/**
- * snd_hda_add_vmaster - create a virtual master control and add slaves
+ * __snd_hda_add_vmaster - create a virtual master control and add slaves
* @codec: HD-audio codec
* @name: vmaster control name
* @tlv: TLV data (optional)
@@ -2927,16 +2992,8 @@ static int vmaster_mute_mode_info(struct snd_kcontrol *kcontrol,
static const char * const texts[] = {
"On", "Off", "Follow Master"
};
- unsigned int index;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- index = uinfo->value.enumerated.item;
- if (index >= 3)
- index = 2;
- strcpy(uinfo->value.enumerated.name, texts[index]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int vmaster_mute_mode_get(struct snd_kcontrol *kcontrol,
@@ -2970,10 +3027,15 @@ static struct snd_kcontrol_new vmaster_mute_mode = {
.put = vmaster_mute_mode_put,
};
-/*
- * Add a mute-LED hook with the given vmaster switch kctl
- * "Mute-LED Mode" control is automatically created and associated with
- * the given hook.
+/**
+ * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED
+ * @codec: the HDA codec
+ * @hook: the vmaster hook object
+ * @expose_enum_ctl: flag to create an enum ctl
+ *
+ * Add a mute-LED hook with the given vmaster switch kctl.
+ * When @expose_enum_ctl is set, "Mute-LED Mode" control is automatically
+ * created and associated with the given hook.
*/
int snd_hda_add_vmaster_hook(struct hda_codec *codec,
struct hda_vmaster_mute_hook *hook,
@@ -2995,9 +3057,12 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_add_vmaster_hook);
-/*
- * Call the hook with the current value for synchronization
- * Should be called in init callback
+/**
+ * snd_hda_sync_vmaster_hook - Sync vmaster hook
+ * @hook: the vmaster hook
+ *
+ * Call the hook with the current value for synchronization.
+ * Should be called in init callback.
*/
void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook)
{
@@ -3022,6 +3087,8 @@ EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook);
/**
* snd_hda_mixer_amp_switch_info - Info callback for a standard AMP mixer switch
+ * @kcontrol: referred ctl element
+ * @uinfo: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_COMPOSE_AMP_VAL*() or related macros.
@@ -3041,6 +3108,8 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_switch_info);
/**
* snd_hda_mixer_amp_switch_get - Get callback for a standard AMP mixer switch
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_COMPOSE_AMP_VAL*() or related macros.
@@ -3067,6 +3136,8 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_switch_get);
/**
* snd_hda_mixer_amp_switch_put - Put callback for a standard AMP mixer switch
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_COMPOSE_AMP_VAL*() or related macros.
@@ -3110,6 +3181,8 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_switch_put);
/**
* snd_hda_mixer_bind_switch_get - Get callback for a bound volume control
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_BIND_MUTE*() macros.
@@ -3133,6 +3206,8 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_switch_get);
/**
* snd_hda_mixer_bind_switch_put - Put callback for a bound volume control
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_BIND_MUTE*() macros.
@@ -3163,6 +3238,8 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_switch_put);
/**
* snd_hda_mixer_bind_ctls_info - Info callback for a generic bound control
+ * @kcontrol: referred ctl element
+ * @uinfo: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_BIND_VOL() or HDA_BIND_SW() macros.
@@ -3186,6 +3263,8 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_ctls_info);
/**
* snd_hda_mixer_bind_ctls_get - Get callback for a generic bound control
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_BIND_VOL() or HDA_BIND_SW() macros.
@@ -3209,6 +3288,8 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_ctls_get);
/**
* snd_hda_mixer_bind_ctls_put - Put callback for a generic bound control
+ * @kcontrol: ctl element
+ * @ucontrol: pointer to get/store the data
*
* The control element is supposed to have the private_value field
* set up via HDA_BIND_VOL() or HDA_BIND_SW() macros.
@@ -3238,6 +3319,10 @@ EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_ctls_put);
/**
* snd_hda_mixer_bind_tlv - TLV callback for a generic bound control
+ * @kcontrol: ctl element
+ * @op_flag: operation flag
+ * @size: byte size of input TLV
+ * @tlv: TLV data
*
* The control element is supposed to have the private_value field
* set up via HDA_BIND_VOL() macro.
@@ -3579,7 +3664,11 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_create_dig_out_ctls);
-/* get the hda_spdif_out entry from the given NID
+/**
+ * snd_hda_spdif_out_of_nid - get the hda_spdif_out entry from the given NID
+ * @codec: the HDA codec
+ * @nid: widget NID
+ *
* call within spdif_mutex lock
*/
struct hda_spdif_out *snd_hda_spdif_out_of_nid(struct hda_codec *codec,
@@ -3596,6 +3685,13 @@ struct hda_spdif_out *snd_hda_spdif_out_of_nid(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_spdif_out_of_nid);
+/**
+ * snd_hda_spdif_ctls_unassign - Unassign the given SPDIF ctl
+ * @codec: the HDA codec
+ * @idx: the SPDIF ctl index
+ *
+ * Unassign the widget from the given SPDIF control.
+ */
void snd_hda_spdif_ctls_unassign(struct hda_codec *codec, int idx)
{
struct hda_spdif_out *spdif;
@@ -3607,6 +3703,14 @@ void snd_hda_spdif_ctls_unassign(struct hda_codec *codec, int idx)
}
EXPORT_SYMBOL_GPL(snd_hda_spdif_ctls_unassign);
+/**
+ * snd_hda_spdif_ctls_assign - Assign the SPDIF controls to the given NID
+ * @codec: the HDA codec
+ * @idx: the SPDIF ctl idx
+ * @nid: widget NID
+ *
+ * Assign the widget to the SPDIF control with the given index.
+ */
void snd_hda_spdif_ctls_assign(struct hda_codec *codec, int idx, hda_nid_t nid)
{
struct hda_spdif_out *spdif;
@@ -3926,6 +4030,16 @@ void snd_hda_codec_flush_cache(struct hda_codec *codec)
}
EXPORT_SYMBOL_GPL(snd_hda_codec_flush_cache);
+/**
+ * snd_hda_codec_set_power_to_all - Set the power state to all widgets
+ * @codec: the HDA codec
+ * @fg: function group (not used now)
+ * @power_state: the power state to set (AC_PWRST_*)
+ *
+ * Set the given power state to all widgets that have the power control.
+ * If the codec has power_filter set, it evaluates the power state and
+ * filter out if it's unchanged as D3.
+ */
void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state)
{
@@ -3990,7 +4104,15 @@ static unsigned int hda_sync_power_state(struct hda_codec *codec,
return state;
}
-/* don't power down the widget if it controls eapd and EAPD_BTLENABLE is set */
+/**
+ * snd_hda_codec_eapd_power_filter - A power filter callback for EAPD
+ * @codec: the HDA codec
+ * @nid: widget NID
+ * @power_state: power state to evalue
+ *
+ * Don't power down the widget if it controls eapd and EAPD_BTLENABLE is set.
+ * This can be used a codec power_filter callback.
+ */
unsigned int snd_hda_codec_eapd_power_filter(struct hda_codec *codec,
hda_nid_t nid,
unsigned int power_state)
@@ -4315,6 +4437,7 @@ static struct hda_rate_tbl rate_bits[] = {
* @channels: the number of channels
* @format: the PCM format (SNDRV_PCM_FORMAT_XXX)
* @maxbps: the max. bps
+ * @spdif_ctls: HD-audio SPDIF status bits (0 if irrelevant)
*
* Calculate the format bitset from the given rate, channels and th PCM format.
*
@@ -4650,6 +4773,17 @@ static int set_pcm_default_values(struct hda_codec *codec,
/*
* codec prepare/cleanup entries
*/
+/**
+ * snd_hda_codec_prepare - Prepare a stream
+ * @codec: the HDA codec
+ * @hinfo: PCM information
+ * @stream: stream tag to assign
+ * @format: format id to assign
+ * @substream: PCM substream to assign
+ *
+ * Calls the prepare callback set by the codec with the given arguments.
+ * Clean up the inactive streams when successful.
+ */
int snd_hda_codec_prepare(struct hda_codec *codec,
struct hda_pcm_stream *hinfo,
unsigned int stream,
@@ -4666,6 +4800,14 @@ int snd_hda_codec_prepare(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_codec_prepare);
+/**
+ * snd_hda_codec_cleanup - Prepare a stream
+ * @codec: the HDA codec
+ * @hinfo: PCM information
+ * @substream: PCM substream
+ *
+ * Calls the cleanup callback set by the codec with the given arguments.
+ */
void snd_hda_codec_cleanup(struct hda_codec *codec,
struct hda_pcm_stream *hinfo,
struct snd_pcm_substream *substream)
@@ -4990,6 +5132,7 @@ static void __snd_hda_power_down(struct hda_codec *codec)
* snd_hda_power_save - Power-up/down/sync the codec
* @codec: HD-audio codec
* @delta: the counter delta to change
+ * @d3wait: sync for D3 transition complete
*
* Change the power-up counter via @delta, and power up or down the hardware
* appropriately. For the power-down, queue to the delayed action.
@@ -5065,6 +5208,10 @@ EXPORT_SYMBOL_GPL(snd_hda_check_amp_list_power);
/**
* snd_hda_ch_mode_info - Info callback helper for the channel mode enum
+ * @codec: the HDA codec
+ * @uinfo: pointer to get/store the data
+ * @chmode: channel mode array
+ * @num_chmodes: channel mode array size
*/
int snd_hda_ch_mode_info(struct hda_codec *codec,
struct snd_ctl_elem_info *uinfo,
@@ -5084,6 +5231,11 @@ EXPORT_SYMBOL_GPL(snd_hda_ch_mode_info);
/**
* snd_hda_ch_mode_get - Get callback helper for the channel mode enum
+ * @codec: the HDA codec
+ * @ucontrol: pointer to get/store the data
+ * @chmode: channel mode array
+ * @num_chmodes: channel mode array size
+ * @max_channels: max number of channels
*/
int snd_hda_ch_mode_get(struct hda_codec *codec,
struct snd_ctl_elem_value *ucontrol,
@@ -5105,6 +5257,11 @@ EXPORT_SYMBOL_GPL(snd_hda_ch_mode_get);
/**
* snd_hda_ch_mode_put - Put callback helper for the channel mode enum
+ * @codec: the HDA codec
+ * @ucontrol: pointer to get/store the data
+ * @chmode: channel mode array
+ * @num_chmodes: channel mode array size
+ * @max_channelsp: pointer to store the max channels
*/
int snd_hda_ch_mode_put(struct hda_codec *codec,
struct snd_ctl_elem_value *ucontrol,
@@ -5133,6 +5290,8 @@ EXPORT_SYMBOL_GPL(snd_hda_ch_mode_put);
/**
* snd_hda_input_mux_info_info - Info callback helper for the input-mux enum
+ * @imux: imux helper object
+ * @uinfo: pointer to get/store the data
*/
int snd_hda_input_mux_info(const struct hda_input_mux *imux,
struct snd_ctl_elem_info *uinfo)
@@ -5154,6 +5313,11 @@ EXPORT_SYMBOL_GPL(snd_hda_input_mux_info);
/**
* snd_hda_input_mux_info_put - Put callback helper for the input-mux enum
+ * @codec: the HDA codec
+ * @imux: imux helper object
+ * @ucontrol: pointer to get/store the data
+ * @nid: input mux NID
+ * @cur_val: pointer to get/store the current imux value
*/
int snd_hda_input_mux_put(struct hda_codec *codec,
const struct hda_input_mux *imux,
@@ -5178,7 +5342,13 @@ int snd_hda_input_mux_put(struct hda_codec *codec,
EXPORT_SYMBOL_GPL(snd_hda_input_mux_put);
-/*
+/**
+ * snd_hda_enum_helper_info - Helper for simple enum ctls
+ * @kcontrol: ctl element
+ * @uinfo: pointer to get/store the data
+ * @num_items: number of enum items
+ * @texts: enum item string array
+ *
* process kcontrol info callback of a simple string enum array
* when @num_items is 0 or @texts is NULL, assume a boolean enum array
*/
@@ -5195,14 +5365,7 @@ int snd_hda_enum_helper_info(struct snd_kcontrol *kcontrol,
texts = texts_default;
}
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = num_items;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, num_items, texts);
}
EXPORT_SYMBOL_GPL(snd_hda_enum_helper_info);
@@ -5274,6 +5437,8 @@ EXPORT_SYMBOL_GPL(snd_hda_bus_reboot_notify);
/**
* snd_hda_multi_out_dig_open - open the digital out in the exclusive mode
+ * @codec: the HDA codec
+ * @mout: hda_multi_out object
*/
int snd_hda_multi_out_dig_open(struct hda_codec *codec,
struct hda_multi_out *mout)
@@ -5290,6 +5455,11 @@ EXPORT_SYMBOL_GPL(snd_hda_multi_out_dig_open);
/**
* snd_hda_multi_out_dig_prepare - prepare the digital out stream
+ * @codec: the HDA codec
+ * @mout: hda_multi_out object
+ * @stream_tag: stream tag to assign
+ * @format: format id to assign
+ * @substream: PCM substream to assign
*/
int snd_hda_multi_out_dig_prepare(struct hda_codec *codec,
struct hda_multi_out *mout,
@@ -5306,6 +5476,8 @@ EXPORT_SYMBOL_GPL(snd_hda_multi_out_dig_prepare);
/**
* snd_hda_multi_out_dig_cleanup - clean-up the digital out stream
+ * @codec: the HDA codec
+ * @mout: hda_multi_out object
*/
int snd_hda_multi_out_dig_cleanup(struct hda_codec *codec,
struct hda_multi_out *mout)
@@ -5319,6 +5491,8 @@ EXPORT_SYMBOL_GPL(snd_hda_multi_out_dig_cleanup);
/**
* snd_hda_multi_out_dig_close - release the digital out stream
+ * @codec: the HDA codec
+ * @mout: hda_multi_out object
*/
int snd_hda_multi_out_dig_close(struct hda_codec *codec,
struct hda_multi_out *mout)
@@ -5332,6 +5506,10 @@ EXPORT_SYMBOL_GPL(snd_hda_multi_out_dig_close);
/**
* snd_hda_multi_out_analog_open - open analog outputs
+ * @codec: the HDA codec
+ * @mout: hda_multi_out object
+ * @substream: PCM substream to assign
+ * @hinfo: PCM information to assign
*
* Open analog outputs and set up the hw-constraints.
* If the digital outputs can be opened as slave, open the digital
@@ -5382,6 +5560,11 @@ EXPORT_SYMBOL_GPL(snd_hda_multi_out_analog_open);
/**
* snd_hda_multi_out_analog_prepare - Preapre the analog outputs.
+ * @codec: the HDA codec
+ * @mout: hda_multi_out object
+ * @stream_tag: stream tag to assign
+ * @format: format id to assign
+ * @substream: PCM substream to assign
*
* Set up the i/o for analog out.
* When the digital out is available, copy the front out to digital out, too.
@@ -5459,6 +5642,8 @@ EXPORT_SYMBOL_GPL(snd_hda_multi_out_analog_prepare);
/**
* snd_hda_multi_out_analog_cleanup - clean up the setting for analog out
+ * @codec: the HDA codec
+ * @mout: hda_multi_out object
*/
int snd_hda_multi_out_analog_cleanup(struct hda_codec *codec,
struct hda_multi_out *mout)
@@ -5490,6 +5675,8 @@ EXPORT_SYMBOL_GPL(snd_hda_multi_out_analog_cleanup);
/**
* snd_hda_get_default_vref - Get the default (mic) VREF pin bits
+ * @codec: the HDA codec
+ * @pin: referred pin NID
*
* Guess the suitable VREF pin bits to be set as the pin-control value.
* Note: the function doesn't set the AC_PINCTL_IN_EN bit.
@@ -5515,7 +5702,12 @@ unsigned int snd_hda_get_default_vref(struct hda_codec *codec, hda_nid_t pin)
}
EXPORT_SYMBOL_GPL(snd_hda_get_default_vref);
-/* correct the pin ctl value for matching with the pin cap */
+/**
+ * snd_hda_correct_pin_ctl - correct the pin ctl value for matching with the pin cap
+ * @codec: the HDA codec
+ * @pin: referred pin NID
+ * @val: pin ctl value to audit
+ */
unsigned int snd_hda_correct_pin_ctl(struct hda_codec *codec,
hda_nid_t pin, unsigned int val)
{
@@ -5566,6 +5758,19 @@ unsigned int snd_hda_correct_pin_ctl(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_correct_pin_ctl);
+/**
+ * _snd_hda_pin_ctl - Helper to set pin ctl value
+ * @codec: the HDA codec
+ * @pin: referred pin NID
+ * @val: pin control value to set
+ * @cached: access over codec pinctl cache or direct write
+ *
+ * This function is a helper to set a pin ctl value more safely.
+ * It corrects the pin ctl value via snd_hda_correct_pin_ctl(), stores the
+ * value in pin target array via snd_hda_codec_set_pin_target(), then
+ * actually writes the value via either snd_hda_codec_update_cache() or
+ * snd_hda_codec_write() depending on @cached flag.
+ */
int _snd_hda_set_pin_ctl(struct hda_codec *codec, hda_nid_t pin,
unsigned int val, bool cached)
{
@@ -5582,6 +5787,11 @@ EXPORT_SYMBOL_GPL(_snd_hda_set_pin_ctl);
/**
* snd_hda_add_imux_item - Add an item to input_mux
+ * @codec: the HDA codec
+ * @imux: imux helper object
+ * @label: the name of imux item to assign
+ * @index: index number of imux item to assign
+ * @type_idx: pointer to store the resultant label index
*
* When the same label is used already in the existing items, the number
* suffix is appended to the label. This label index number is stored
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index e1cd34d9011d..0e6d7534f491 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -371,7 +371,7 @@ error:
return ret;
}
-/**
+/*
* SNDRV_PCM_RATE_* and AC_PAR_PCM values don't match, print correct rates with
* hdmi-specific routine.
*/
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 64220c08bd98..63b69f750d8e 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -40,7 +40,12 @@
#include "hda_generic.h"
-/* initialize hda_gen_spec struct */
+/**
+ * snd_hda_gen_spec_init - initialize hda_gen_spec struct
+ * @spec: hda_gen_spec object to initialize
+ *
+ * Initialize the given hda_gen_spec object.
+ */
int snd_hda_gen_spec_init(struct hda_gen_spec *spec)
{
snd_array_init(&spec->kctls, sizeof(struct snd_kcontrol_new), 32);
@@ -51,6 +56,17 @@ int snd_hda_gen_spec_init(struct hda_gen_spec *spec)
}
EXPORT_SYMBOL_GPL(snd_hda_gen_spec_init);
+/**
+ * snd_hda_gen_add_kctl - Add a new kctl_new struct from the template
+ * @spec: hda_gen_spec object
+ * @name: name string to override the template, NULL if unchanged
+ * @temp: template for the new kctl
+ *
+ * Add a new kctl (actually snd_kcontrol_new to be instantiated later)
+ * element based on the given snd_kcontrol_new template @temp and the
+ * name string @name to the list in @spec.
+ * Returns the newly created object or NULL as error.
+ */
struct snd_kcontrol_new *
snd_hda_gen_add_kctl(struct hda_gen_spec *spec, const char *name,
const struct snd_kcontrol_new *temp)
@@ -259,8 +275,14 @@ static struct nid_path *get_nid_path(struct hda_codec *codec,
return NULL;
}
-/* get the path between the given NIDs;
- * passing 0 to either @pin or @dac behaves as a wildcard
+/**
+ * snd_hda_get_nid_path - get the path between the given NIDs
+ * @codec: the HDA codec
+ * @from_nid: the NID where the path start from
+ * @to_nid: the NID where the path ends at
+ *
+ * Return the found nid_path object or NULL for error.
+ * Passing 0 to either @from_nid or @to_nid behaves as a wildcard.
*/
struct nid_path *snd_hda_get_nid_path(struct hda_codec *codec,
hda_nid_t from_nid, hda_nid_t to_nid)
@@ -269,8 +291,14 @@ struct nid_path *snd_hda_get_nid_path(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_get_nid_path);
-/* get the index number corresponding to the path instance;
- * the index starts from 1, for easier checking the invalid value
+/**
+ * snd_hda_get_path_idx - get the index number corresponding to the path
+ * instance
+ * @codec: the HDA codec
+ * @path: nid_path object
+ *
+ * The returned index starts from 1, i.e. the actual array index with offset 1,
+ * and zero is handled as an invalid path
*/
int snd_hda_get_path_idx(struct hda_codec *codec, struct nid_path *path)
{
@@ -287,7 +315,12 @@ int snd_hda_get_path_idx(struct hda_codec *codec, struct nid_path *path)
}
EXPORT_SYMBOL_GPL(snd_hda_get_path_idx);
-/* get the path instance corresponding to the given index number */
+/**
+ * snd_hda_get_path_from_idx - get the path instance corresponding to the
+ * given index number
+ * @codec: the HDA codec
+ * @idx: the path index
+ */
struct nid_path *snd_hda_get_path_from_idx(struct hda_codec *codec, int idx)
{
struct hda_gen_spec *spec = codec->spec;
@@ -415,7 +448,18 @@ static bool __parse_nid_path(struct hda_codec *codec,
return true;
}
-/* parse the widget path from the given nid to the target nid;
+/**
+ * snd_hda_parse_nid_path - parse the widget path from the given nid to
+ * the target nid
+ * @codec: the HDA codec
+ * @from_nid: the NID where the path start from
+ * @to_nid: the NID where the path ends at
+ * @anchor_nid: the anchor indication
+ * @path: the path object to store the result
+ *
+ * Returns true if a matching path is found.
+ *
+ * The parsing behavior depends on parameters:
* when @from_nid is 0, try to find an empty DAC;
* when @anchor_nid is set to a positive value, only paths through the widget
* with the given value are evaluated.
@@ -436,9 +480,15 @@ bool snd_hda_parse_nid_path(struct hda_codec *codec, hda_nid_t from_nid,
}
EXPORT_SYMBOL_GPL(snd_hda_parse_nid_path);
-/*
- * parse the path between the given NIDs and add to the path list.
- * if no valid path is found, return NULL
+/**
+ * snd_hda_add_new_path - parse the path between the given NIDs and
+ * add to the path list
+ * @codec: the HDA codec
+ * @from_nid: the NID where the path start from
+ * @to_nid: the NID where the path ends at
+ * @anchor_nid: the anchor indication, see snd_hda_parse_nid_path()
+ *
+ * If no valid path is found, returns NULL.
*/
struct nid_path *
snd_hda_add_new_path(struct hda_codec *codec, hda_nid_t from_nid,
@@ -724,8 +774,14 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
}
}
-/* activate or deactivate the given path
- * if @add_aamix is set, enable the input from aa-mix NID as well (if any)
+/**
+ * snd_hda_activate_path - activate or deactivate the given path
+ * @codec: the HDA codec
+ * @path: the path to activate/deactivate
+ * @enable: flag to activate or not
+ * @add_aamix: enable the input from aamix NID
+ *
+ * If @add_aamix is set, enable the input from aa-mix NID as well (if any).
*/
void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
bool enable, bool add_aamix)
@@ -1038,11 +1094,24 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
break;
*index = ch;
return "Headphone";
+ case AUTO_PIN_LINE_OUT:
+ /* This deals with the case where we have two DACs and
+ * one LO, one HP and one Speaker */
+ if (!ch && cfg->speaker_outs && cfg->hp_outs) {
+ bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
+ bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type);
+ if (hp_lo_shared && spk_lo_shared)
+ return spec->vmaster_mute.hook ? "PCM" : "Master";
+ if (hp_lo_shared)
+ return "Headphone+LO";
+ if (spk_lo_shared)
+ return "Speaker+LO";
+ }
}
/* for a single channel output, we don't have to name the channel */
if (cfg->line_outs == 1 && !spec->multi_ios)
- return "PCM";
+ return "Line Out";
if (ch >= ARRAY_SIZE(channel_name)) {
snd_BUG();
@@ -3870,7 +3939,12 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
}
}
-/* Toggle outputs muting */
+/**
+ * snd_hda_gen_update_outputs - Toggle outputs muting
+ * @codec: the HDA codec
+ *
+ * Update the mute status of all outputs based on the current jack states.
+ */
void snd_hda_gen_update_outputs(struct hda_codec *codec)
{
struct hda_gen_spec *spec = codec->spec;
@@ -3931,7 +4005,11 @@ static void call_update_outputs(struct hda_codec *codec)
snd_ctl_sync_vmaster(spec->vmaster_mute.sw_kctl, false);
}
-/* standard HP-automute helper */
+/**
+ * snd_hda_gen_hp_automute - standard HP-automute helper
+ * @codec: the HDA codec
+ * @jack: jack object, NULL for the whole
+ */
void snd_hda_gen_hp_automute(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
@@ -3952,7 +4030,11 @@ void snd_hda_gen_hp_automute(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_gen_hp_automute);
-/* standard line-out-automute helper */
+/**
+ * snd_hda_gen_line_automute - standard line-out-automute helper
+ * @codec: the HDA codec
+ * @jack: jack object, NULL for the whole
+ */
void snd_hda_gen_line_automute(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
@@ -3973,7 +4055,11 @@ void snd_hda_gen_line_automute(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_gen_line_automute);
-/* standard mic auto-switch helper */
+/**
+ * snd_hda_gen_mic_autoswitch - standard mic auto-switch helper
+ * @codec: the HDA codec
+ * @jack: jack object, NULL for the whole
+ */
void snd_hda_gen_mic_autoswitch(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
@@ -4305,7 +4391,13 @@ static int check_auto_mic_availability(struct hda_codec *codec)
return 0;
}
-/* power_filter hook; make inactive widgets into power down */
+/**
+ * snd_hda_gen_path_power_filter - power_filter hook to make inactive widgets
+ * into power down
+ * @codec: the HDA codec
+ * @nid: NID to evalute
+ * @power_state: target power state
+ */
unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
hda_nid_t nid,
unsigned int power_state)
@@ -4341,8 +4433,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
}
}
-/*
- * Parse the given BIOS configuration and set up the hda_gen_spec
+/**
+ * snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and
+ * set up the hda_gen_spec
+ * @codec: the HDA codec
+ * @cfg: Parsed pin configuration
*
* return 1 if successful, 0 if the proper config is not found,
* or a negative error code
@@ -4524,10 +4619,16 @@ static const char * const slave_pfxs[] = {
"CLFE", "Bass Speaker", "PCM",
"Speaker Front", "Speaker Surround", "Speaker CLFE", "Speaker Side",
"Headphone Front", "Headphone Surround", "Headphone CLFE",
- "Headphone Side",
+ "Headphone Side", "Headphone+LO", "Speaker+LO",
NULL,
};
+/**
+ * snd_hda_gen_build_controls - Build controls from the parsed results
+ * @codec: the HDA codec
+ *
+ * Pass this to build_controls patch_ops.
+ */
int snd_hda_gen_build_controls(struct hda_codec *codec)
{
struct hda_gen_spec *spec = codec->spec;
@@ -5005,7 +5106,12 @@ static void fill_pcm_stream_name(char *str, size_t len, const char *sfx,
strlcat(str, sfx, len);
}
-/* build PCM streams based on the parsed results */
+/**
+ * snd_hda_gen_build_pcms - build PCM streams based on the parsed results
+ * @codec: the HDA codec
+ *
+ * Pass this to build_pcms patch_ops.
+ */
int snd_hda_gen_build_pcms(struct hda_codec *codec)
{
struct hda_gen_spec *spec = codec->spec;
@@ -5300,9 +5406,11 @@ static void clear_unsol_on_unused_pins(struct hda_codec *codec)
}
}
-/*
- * initialize the generic spec;
- * this can be put as patch_ops.init function
+/**
+ * snd_hda_gen_init - initialize the generic spec
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops init function.
*/
int snd_hda_gen_init(struct hda_codec *codec)
{
@@ -5338,9 +5446,11 @@ int snd_hda_gen_init(struct hda_codec *codec)
}
EXPORT_SYMBOL_GPL(snd_hda_gen_init);
-/*
- * free the generic spec;
- * this can be put as patch_ops.free function
+/**
+ * snd_hda_gen_free - free the generic spec
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops free function.
*/
void snd_hda_gen_free(struct hda_codec *codec)
{
@@ -5352,9 +5462,12 @@ void snd_hda_gen_free(struct hda_codec *codec)
EXPORT_SYMBOL_GPL(snd_hda_gen_free);
#ifdef CONFIG_PM
-/*
- * check the loopback power save state;
- * this can be put as patch_ops.check_power_status function
+/**
+ * snd_hda_gen_check_power_status - check the loopback power save state
+ * @codec: the HDA codec
+ * @nid: NID to inspect
+ *
+ * This can be put as patch_ops check_power_status function.
*/
int snd_hda_gen_check_power_status(struct hda_codec *codec, hda_nid_t nid)
{
@@ -5380,6 +5493,12 @@ static const struct hda_codec_ops generic_patch_ops = {
#endif
};
+/**
+ * snd_hda_parse_generic_codec - Generic codec parser
+ * @codec: the HDA codec
+ *
+ * This should be called from the HDA codec core.
+ */
int snd_hda_parse_generic_codec(struct hda_codec *codec)
{
struct hda_gen_spec *spec;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 243ffad40605..2bf0b568e3de 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -196,8 +196,8 @@ MODULE_PARM_DESC(align_buffer_size,
"Force buffer and period sizes to be multiple of 128 bytes.");
#ifdef CONFIG_X86
-static bool hda_snoop = true;
-module_param_named(snoop, hda_snoop, bool, 0444);
+static int hda_snoop = -1;
+module_param_named(snoop, hda_snoop, bint, 0444);
MODULE_PARM_DESC(snoop, "Enable/disable snooping");
#else
#define hda_snoop true
@@ -272,43 +272,56 @@ enum {
AZX_NUM_DRIVERS, /* keep this as last entry */
};
+#define azx_get_snoop_type(chip) \
+ (((chip)->driver_caps & AZX_DCAPS_SNOOP_MASK) >> 10)
+#define AZX_DCAPS_SNOOP_TYPE(type) ((AZX_SNOOP_TYPE_ ## type) << 10)
+
+/* quirks for old Intel chipsets */
+#define AZX_DCAPS_INTEL_ICH \
+ (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE)
+
/* quirks for Intel PCH */
#define AZX_DCAPS_INTEL_PCH_NOPM \
- (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
- AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
+ (AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\
+ AZX_DCAPS_REVERSE_ASSIGN | AZX_DCAPS_SNOOP_TYPE(SCH))
#define AZX_DCAPS_INTEL_PCH \
(AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
#define AZX_DCAPS_INTEL_HASWELL \
- (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_ALIGN_BUFSIZE | \
- AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME | \
- AZX_DCAPS_I915_POWERWELL)
+ (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\
+ AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
+ AZX_DCAPS_SNOOP_TYPE(SCH))
/* Broadwell HDMI can't use position buffer reliably, force to use LPIB */
#define AZX_DCAPS_INTEL_BROADWELL \
- (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_ALIGN_BUFSIZE | \
- AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_PM_RUNTIME | \
- AZX_DCAPS_I915_POWERWELL)
+ (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\
+ AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
+ AZX_DCAPS_SNOOP_TYPE(SCH))
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
- (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
- AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
+ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
+ AZX_DCAPS_SNOOP_TYPE(ATI))
/* quirks for ATI/AMD HDMI */
#define AZX_DCAPS_PRESET_ATI_HDMI \
(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\
AZX_DCAPS_NO_MSI64)
+/* quirks for ATI HDMI with snoop off */
+#define AZX_DCAPS_PRESET_ATI_HDMI_NS \
+ (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
+
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
- (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
- AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT |\
- AZX_DCAPS_CORBRP_SELF_CLEAR)
+ (AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
+ AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+ AZX_DCAPS_SNOOP_TYPE(NVIDIA))
#define AZX_DCAPS_PRESET_CTHDA \
- (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
+ (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB |\
+ AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
/*
* VGA-switcher support
@@ -437,6 +450,8 @@ static void update_pci_byte(struct pci_dev *pci, unsigned int reg,
static void azx_init_pci(struct azx *chip)
{
+ int snoop_type = azx_get_snoop_type(chip);
+
/* Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
* TCSEL == Traffic Class Select Register, which sets PCI express QOS
* Ensuring these bits are 0 clears playback static on some HD Audio
@@ -451,7 +466,7 @@ static void azx_init_pci(struct azx *chip)
/* For ATI SB450/600/700/800/900 and AMD Hudson azalia HD audio,
* we need to enable snoop.
*/
- if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) {
+ if (snoop_type == AZX_SNOOP_TYPE_ATI) {
dev_dbg(chip->card->dev, "Setting ATI snoop: %d\n",
azx_snoop(chip));
update_pci_byte(chip->pci,
@@ -460,7 +475,7 @@ static void azx_init_pci(struct azx *chip)
}
/* For NVIDIA HDA, enable snoop */
- if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) {
+ if (snoop_type == AZX_SNOOP_TYPE_NVIDIA) {
dev_dbg(chip->card->dev, "Setting Nvidia snoop: %d\n",
azx_snoop(chip));
update_pci_byte(chip->pci,
@@ -475,7 +490,7 @@ static void azx_init_pci(struct azx *chip)
}
/* Enable SCH/PCH snoop if needed */
- if (chip->driver_caps & AZX_DCAPS_SCH_SNOOP) {
+ if (snoop_type == AZX_SNOOP_TYPE_SCH) {
unsigned short snoop;
pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
if ((!azx_snoop(chip) && !(snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)) ||
@@ -1129,8 +1144,7 @@ static int azx_free(struct azx *chip)
pci_disable_device(chip->pci);
kfree(chip->azx_dev);
#ifdef CONFIG_SND_HDA_PATCH_LOADER
- if (chip->fw)
- release_firmware(chip->fw);
+ release_firmware(chip->fw);
#endif
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
hda_display_power(false);
@@ -1358,35 +1372,33 @@ static void check_msi(struct azx *chip)
/* check the snoop mode availability */
static void azx_check_snoop_available(struct azx *chip)
{
- bool snoop = chip->snoop;
+ int snoop = hda_snoop;
- switch (chip->driver_type) {
- case AZX_DRIVER_VIA:
+ if (snoop >= 0) {
+ dev_info(chip->card->dev, "Force to %s mode by module option\n",
+ snoop ? "snoop" : "non-snoop");
+ chip->snoop = snoop;
+ return;
+ }
+
+ snoop = true;
+ if (azx_get_snoop_type(chip) == AZX_SNOOP_TYPE_NONE &&
+ chip->driver_type == AZX_DRIVER_VIA) {
/* force to non-snoop mode for a new VIA controller
* when BIOS is set
*/
- if (snoop) {
- u8 val;
- pci_read_config_byte(chip->pci, 0x42, &val);
- if (!(val & 0x80) && chip->pci->revision == 0x30)
- snoop = false;
- }
- break;
- case AZX_DRIVER_ATIHDMI_NS:
- /* new ATI HDMI requires non-snoop */
- snoop = false;
- break;
- case AZX_DRIVER_CTHDA:
- case AZX_DRIVER_CMEDIA:
- snoop = false;
- break;
+ u8 val;
+ pci_read_config_byte(chip->pci, 0x42, &val);
+ if (!(val & 0x80) && chip->pci->revision == 0x30)
+ snoop = false;
}
- if (snoop != chip->snoop) {
- dev_info(chip->card->dev, "Force to %s mode\n",
- snoop ? "snoop" : "non-snoop");
- chip->snoop = snoop;
- }
+ if (chip->driver_caps & AZX_DCAPS_SNOOP_OFF)
+ snoop = false;
+
+ chip->snoop = snoop;
+ if (!snoop)
+ dev_info(chip->card->dev, "Force to non-snoop mode\n");
}
static void azx_probe_work(struct work_struct *work)
@@ -1446,7 +1458,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
check_probe_mask(chip, dev);
chip->single_cmd = single_cmd;
- chip->snoop = hda_snoop;
azx_check_snoop_available(chip);
if (bdl_pos_adj[dev] < 0) {
@@ -1554,10 +1565,8 @@ static int azx_first_init(struct azx *chip)
if (align_buffer_size >= 0)
chip->align_buffer_size = !!align_buffer_size;
else {
- if (chip->driver_caps & AZX_DCAPS_BUFSIZE)
+ if (chip->driver_caps & AZX_DCAPS_NO_ALIGN_BUFSIZE)
chip->align_buffer_size = 0;
- else if (chip->driver_caps & AZX_DCAPS_ALIGN_BUFSIZE)
- chip->align_buffer_size = 1;
else
chip->align_buffer_size = 1;
}
@@ -2044,36 +2053,35 @@ static const struct pci_device_id azx_ids[] = {
/* Braswell */
{ PCI_DEVICE(0x8086, 0x2284),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
- /* ICH */
+ /* ICH6 */
{ PCI_DEVICE(0x8086, 0x2668),
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
- AZX_DCAPS_BUFSIZE }, /* ICH6 */
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
+ /* ICH7 */
{ PCI_DEVICE(0x8086, 0x27d8),
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
- AZX_DCAPS_BUFSIZE }, /* ICH7 */
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
+ /* ESB2 */
{ PCI_DEVICE(0x8086, 0x269a),
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
- AZX_DCAPS_BUFSIZE }, /* ESB2 */
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
+ /* ICH8 */
{ PCI_DEVICE(0x8086, 0x284b),
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
- AZX_DCAPS_BUFSIZE }, /* ICH8 */
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
+ /* ICH9 */
{ PCI_DEVICE(0x8086, 0x293e),
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
- AZX_DCAPS_BUFSIZE }, /* ICH9 */
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
+ /* ICH9 */
{ PCI_DEVICE(0x8086, 0x293f),
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
- AZX_DCAPS_BUFSIZE }, /* ICH9 */
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
+ /* ICH10 */
{ PCI_DEVICE(0x8086, 0x3a3e),
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
- AZX_DCAPS_BUFSIZE }, /* ICH10 */
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
+ /* ICH10 */
{ PCI_DEVICE(0x8086, 0x3a6e),
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
- AZX_DCAPS_BUFSIZE }, /* ICH10 */
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_INTEL_ICH },
/* Generic Intel */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
- .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_BUFSIZE },
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_NO_ALIGN_BUFSIZE },
/* ATI SB 450/600/700/800/900 */
{ PCI_DEVICE(0x1002, 0x437b),
.driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
@@ -2128,13 +2136,13 @@ static const struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x1002, 0xaa98),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0x9902),
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI },
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0xaaa0),
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI },
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0xaaa8),
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI },
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0xaab0),
- .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI },
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
/* VIA VT8251/VT8237A */
{ PCI_DEVICE(0x1106, 0x3288),
.driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
@@ -2181,7 +2189,7 @@ static const struct pci_device_id azx_ids[] = {
/* CM8888 */
{ PCI_DEVICE(0x13f6, 0x5011),
.driver_data = AZX_DRIVER_CMEDIA |
- AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB },
+ AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_SNOOP_OFF },
/* Vortex86MX */
{ PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
/* VMware HDAudio */
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index f56765ae73a7..e664307617bd 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -20,6 +20,16 @@
#include "hda_auto_parser.h"
#include "hda_jack.h"
+/**
+ * is_jack_detectable - Check whether the given pin is jack-detectable
+ * @codec: the HDA codec
+ * @nid: pin NID
+ *
+ * Check whether the given pin is capable to report the jack detection.
+ * The jack detection might not work by various reasons, e.g. the jack
+ * detection is prohibited in the codec level, the pin config has
+ * AC_DEFCFG_MISC_NO_PRESENCE bit, no unsol support, etc.
+ */
bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid)
{
if (codec->no_jack_detect)
@@ -57,6 +67,8 @@ static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid)
/**
* snd_hda_jack_tbl_get - query the jack-table entry for the given NID
+ * @codec: the HDA codec
+ * @nid: pin NID to refer to
*/
struct hda_jack_tbl *
snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid)
@@ -75,6 +87,8 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get);
/**
* snd_hda_jack_tbl_get_from_tag - query the jack-table entry for the given tag
+ * @codec: the HDA codec
+ * @tag: tag value to refer to
*/
struct hda_jack_tbl *
snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag)
@@ -93,6 +107,8 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get_from_tag);
/**
* snd_hda_jack_tbl_new - create a jack-table entry for the given NID
+ * @codec: the HDA codec
+ * @nid: pin NID to assign
*/
static struct hda_jack_tbl *
snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid)
@@ -162,6 +178,7 @@ static void jack_detect_update(struct hda_codec *codec,
/**
* snd_hda_set_dirty_all - Mark all the cached as dirty
+ * @codec: the HDA codec
*
* This function sets the dirty flag to all entries of jack table.
* It's called from the resume path in hda_codec.c.
@@ -218,6 +235,9 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state);
/**
* snd_hda_jack_detect_enable - enable the jack-detection
+ * @codec: the HDA codec
+ * @nid: pin NID to enable
+ * @func: callback function to register
*
* In the case of error, the return value will be a pointer embedded with
* errno. Check and handle the return value appropriately with standard
@@ -258,6 +278,14 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
}
EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable_callback);
+/**
+ * snd_hda_jack_detect_enable - Enable the jack detection on the given pin
+ * @codec: the HDA codec
+ * @nid: pin NID to enable jack detection
+ *
+ * Enable the jack detection with the default callback. Returns zero if
+ * successful or a negative error code.
+ */
int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid)
{
return PTR_ERR_OR_ZERO(snd_hda_jack_detect_enable_callback(codec, nid, NULL));
@@ -266,6 +294,9 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable);
/**
* snd_hda_jack_set_gating_jack - Set gating jack.
+ * @codec: the HDA codec
+ * @gated_nid: gated pin NID
+ * @gating_nid: gating pin NID
*
* Indicates the gated jack is only valid when the gating jack is plugged.
*/
@@ -287,6 +318,7 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_set_gating_jack);
/**
* snd_hda_jack_report_sync - sync the states of all jacks and report if changed
+ * @codec: the HDA codec
*/
void snd_hda_jack_report_sync(struct hda_codec *codec)
{
@@ -349,6 +381,11 @@ static void hda_free_jack_priv(struct snd_jack *jack)
/**
* snd_hda_jack_add_kctl - Add a kctl for the given pin
+ * @codec: the HDA codec
+ * @nid: pin NID to assign
+ * @name: string name for the jack
+ * @idx: index number for the jack
+ * @phantom_jack: flag to deal as a phantom jack
*
* This assigns a jack-detection kctl to the given pin. The kcontrol
* will have the given name and index.
@@ -391,6 +428,15 @@ static int __snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
return 0;
}
+/**
+ * snd_hda_jack_add_kctl - Add a jack kctl for the given pin
+ * @codec: the HDA codec
+ * @nid: pin NID
+ * @name: the name string for the jack ctl
+ * @idx: the ctl index for the jack ctl
+ *
+ * This is a simple helper calling __snd_hda_jack_add_kctl().
+ */
int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
const char *name, int idx)
{
@@ -456,6 +502,8 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
/**
* snd_hda_jack_add_kctls - Add kctls for all pins included in the given pincfg
+ * @codec: the HDA codec
+ * @cfg: pin config table to parse
*/
int snd_hda_jack_add_kctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
@@ -531,6 +579,11 @@ static void call_jack_callback(struct hda_codec *codec,
}
}
+/**
+ * snd_hda_jack_unsol_event - Handle an unsolicited event
+ * @codec: the HDA codec
+ * @res: the unsolicited event data
+ */
void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res)
{
struct hda_jack_tbl *event;
@@ -546,6 +599,13 @@ void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res)
}
EXPORT_SYMBOL_GPL(snd_hda_jack_unsol_event);
+/**
+ * snd_hda_jack_poll_all - Poll all jacks
+ * @codec: the HDA codec
+ *
+ * Poll all detectable jacks with dirty flag, update the status, call
+ * callbacks and call snd_hda_jack_report_sync() if any changes are found.
+ */
void snd_hda_jack_poll_all(struct hda_codec *codec)
{
struct hda_jack_tbl *jack = codec->jacktbl.list;
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 13cb375454f6..b279e327a23b 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -72,6 +72,11 @@ enum {
int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid);
+/**
+ * snd_hda_jack_detect - Detect the jack
+ * @codec: the HDA codec
+ * @nid: pin NID to check jack detection
+ */
static inline bool snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
{
return snd_hda_jack_detect_state(codec, nid) != HDA_JACK_NOT_PRESENT;
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 5016014e57f2..aa484fdf4338 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -152,9 +152,8 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
/* bits 0-7 are used for indicating driver type */
#define AZX_DCAPS_NO_TCSEL (1 << 8) /* No Intel TCSEL bit */
#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
-#define AZX_DCAPS_ATI_SNOOP (1 << 10) /* ATI snoop enable */
-#define AZX_DCAPS_NVIDIA_SNOOP (1 << 11) /* Nvidia snoop enable */
-#define AZX_DCAPS_SCH_SNOOP (1 << 12) /* SCH/PCH snoop enable */
+#define AZX_DCAPS_SNOOP_MASK (3 << 10) /* snoop type mask */
+#define AZX_DCAPS_SNOOP_OFF (1 << 12) /* snoop default off */
#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
@@ -163,8 +162,8 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
#define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
-#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */
-#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
+#define AZX_DCAPS_NO_ALIGN_BUFSIZE (1 << 21) /* no buffer size alignment */
+/* 22 unused */
#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
@@ -173,6 +172,13 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
+enum {
+ AZX_SNOOP_TYPE_NONE ,
+ AZX_SNOOP_TYPE_SCH,
+ AZX_SNOOP_TYPE_ATI,
+ AZX_SNOOP_TYPE_NVIDIA,
+};
+
/* HD Audio class code */
#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 9b49f156a12e..bef721592c3a 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -417,8 +417,13 @@ static DEVICE_ATTR_RW(user_pin_configs);
static DEVICE_ATTR_WO(reconfig);
static DEVICE_ATTR_WO(clear);
-/*
- * Look for hint string
+/**
+ * snd_hda_get_hint - Look for hint string
+ * @codec: the HDA codec
+ * @key: the hint key string
+ *
+ * Look for a hint key/value pair matching with the given key string
+ * and returns the value string. If nothing found, returns NULL.
*/
const char *snd_hda_get_hint(struct hda_codec *codec, const char *key)
{
@@ -427,6 +432,15 @@ const char *snd_hda_get_hint(struct hda_codec *codec, const char *key)
}
EXPORT_SYMBOL_GPL(snd_hda_get_hint);
+/**
+ * snd_hda_get_bool_hint - Get a boolean hint value
+ * @codec: the HDA codec
+ * @key: the hint key string
+ *
+ * Look for a hint key/value pair matching with the given key string
+ * and returns a boolean value parsed from the value. If no matching
+ * key is found, return a negative value.
+ */
int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key)
{
const char *p;
@@ -453,6 +467,16 @@ int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key)
}
EXPORT_SYMBOL_GPL(snd_hda_get_bool_hint);
+/**
+ * snd_hda_get_bool_hint - Get a boolean hint value
+ * @codec: the HDA codec
+ * @key: the hint key string
+ * @valp: pointer to store a value
+ *
+ * Look for a hint key/value pair matching with the given key string
+ * and stores the integer value to @valp. If no matching key is found,
+ * return a negative error code. Otherwise it returns zero.
+ */
int snd_hda_get_int_hint(struct hda_codec *codec, const char *key, int *valp)
{
const char *p;
@@ -690,8 +714,11 @@ static int get_line_from_fw(char *buf, int size, size_t *fw_size_p,
return 1;
}
-/*
- * load a "patch" firmware file and parse it
+/**
+ * snd_hda_load_patch - load a "patch" firmware file and parse it
+ * @bus: HD-audio bus
+ * @fw_size: the firmware byte size
+ * @fw_buf: the firmware data
*/
int snd_hda_load_patch(struct hda_bus *bus, size_t fw_size, const void *fw_buf)
{
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 06275f8807a8..c81b715d6c98 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -332,6 +332,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+ SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
@@ -351,6 +352,7 @@ static const struct hda_model_fixup ad1986a_fixup_models[] = {
{ .id = AD1986A_FIXUP_LAPTOP, .name = "laptop" },
{ .id = AD1986A_FIXUP_LAPTOP_IMIC, .name = "laptop-imic" },
{ .id = AD1986A_FIXUP_LAPTOP_IMIC, .name = "laptop-eapd" }, /* alias */
+ { .id = AD1986A_FIXUP_EAPD, .name = "eapd" },
{}
};
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 4f7ffa8c4a0d..e0383eea9880 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -2417,7 +2417,7 @@ static int dspxfr_one_seg(struct hda_codec *codec,
* @reloc: Relocation address for loading single-segment overlays, or 0 for
* no relocation
* @sample_rate: sampling rate of the stream used for DSP download
- * @number_channels: channels of the stream used for DSP download
+ * @channels: channels of the stream used for DSP download
* @ovly: TRUE if overlay format is required
*
* Returns zero or a negative error code.
@@ -2556,10 +2556,7 @@ static void dspload_post_setup(struct hda_codec *codec)
}
/**
- * Download DSP from a DSP Image Fast Load structure. This structure is a
- * linear, non-constant sized element array of structures, each of which
- * contain the count of the data to be loaded, the data itself, and the
- * corresponding starting chip address of the starting data location.
+ * dspload_image - Download DSP from a DSP Image Fast Load structure.
*
* @codec: the HDA codec
* @fls: pointer to a fast load image
@@ -2570,6 +2567,10 @@ static void dspload_post_setup(struct hda_codec *codec)
* @router_chans: number of audio router channels to be allocated (0 means use
* internal defaults; max is 32)
*
+ * Download DSP from a DSP Image Fast Load structure. This structure is a
+ * linear, non-constant sized element array of structures, each of which
+ * contain the count of the data to be loaded, the data itself, and the
+ * corresponding starting chip address of the starting data location.
* Returns zero or a negative error code.
*/
static int dspload_image(struct hda_codec *codec,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b118a5be18df..a722067c491c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -96,6 +96,8 @@ struct alc_spec {
hda_nid_t cap_mute_led_nid;
unsigned int gpio_led; /* used for alc269_fixup_hp_gpio_led() */
+ unsigned int gpio_mute_led_mask;
+ unsigned int gpio_mic_led_mask;
hda_nid_t headset_mic_pin;
hda_nid_t headphone_mic_pin;
@@ -3310,41 +3312,45 @@ static void alc269_fixup_hp_mute_led_mic2(struct hda_codec *codec,
}
}
-/* turn on/off mute LED per vmaster hook */
-static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled)
+/* update LED status via GPIO */
+static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask,
+ bool enabled)
{
- struct hda_codec *codec = private_data;
struct alc_spec *spec = codec->spec;
unsigned int oldval = spec->gpio_led;
+ if (spec->mute_led_polarity)
+ enabled = !enabled;
+
if (enabled)
- spec->gpio_led &= ~0x08;
+ spec->gpio_led &= ~mask;
else
- spec->gpio_led |= 0x08;
+ spec->gpio_led |= mask;
if (spec->gpio_led != oldval)
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
spec->gpio_led);
}
-/* turn on/off mic-mute LED per capture hook */
-static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+/* turn on/off mute LED via GPIO per vmaster hook */
+static void alc_fixup_gpio_mute_hook(void *private_data, int enabled)
{
+ struct hda_codec *codec = private_data;
struct alc_spec *spec = codec->spec;
- unsigned int oldval = spec->gpio_led;
- if (!ucontrol)
- return;
+ alc_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
+}
- if (ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1])
- spec->gpio_led &= ~0x10;
- else
- spec->gpio_led |= 0x10;
- if (spec->gpio_led != oldval)
- snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
- spec->gpio_led);
+/* turn on/off mic-mute LED via GPIO per capture hook */
+static void alc_fixup_gpio_mic_mute_hook(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (ucontrol)
+ alc_update_gpio_led(codec, spec->gpio_mic_led_mask,
+ ucontrol->value.integer.value[0] ||
+ ucontrol->value.integer.value[1]);
}
static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
@@ -3358,9 +3364,33 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
};
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook;
- spec->gen.cap_sync_hook = alc269_fixup_hp_gpio_mic_mute_hook;
+ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+ spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
+ spec->gpio_led = 0;
+ spec->mute_led_polarity = 0;
+ spec->gpio_mute_led_mask = 0x08;
+ spec->gpio_mic_led_mask = 0x10;
+ snd_hda_add_verbs(codec, gpio_init);
+ }
+}
+
+static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const struct hda_verb gpio_init[] = {
+ { 0x01, AC_VERB_SET_GPIO_MASK, 0x22 },
+ { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x22 },
+ {}
+ };
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+ spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
spec->gpio_led = 0;
+ spec->mute_led_polarity = 0;
+ spec->gpio_mute_led_mask = 0x02;
+ spec->gpio_mic_led_mask = 0x20;
snd_hda_add_verbs(codec, gpio_init);
}
}
@@ -3402,9 +3432,11 @@ static void alc269_fixup_hp_gpio_mic1_led(struct hda_codec *codec,
};
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook;
+ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
spec->gpio_led = 0;
+ spec->mute_led_polarity = 0;
+ spec->gpio_mute_led_mask = 0x08;
spec->cap_mute_led_nid = 0x18;
snd_hda_add_verbs(codec, gpio_init);
codec->power_filter = led_power_filter;
@@ -3423,9 +3455,11 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
};
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook;
+ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
spec->gpio_led = 0;
+ spec->mute_led_polarity = 0;
+ spec->gpio_mute_led_mask = 0x08;
spec->cap_mute_led_nid = 0x18;
snd_hda_add_verbs(codec, gpio_init);
codec->power_filter = led_power_filter;
@@ -4300,6 +4334,7 @@ enum {
ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
ALC282_FIXUP_ASPIRE_V5_PINS,
ALC280_FIXUP_HP_GPIO4,
+ ALC286_FIXUP_HP_GPIO_LED,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -4769,6 +4804,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc280_fixup_hp_gpio4,
},
+ [ALC286_FIXUP_HP_GPIO_LED] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc286_fixup_hp_gpio_led,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -4809,6 +4848,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -4887,6 +4927,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
@@ -5697,22 +5738,6 @@ static void alc_fixup_bass_chmap(struct hda_codec *codec,
}
}
-/* turn on/off mute LED per vmaster hook */
-static void alc662_led_gpio1_mute_hook(void *private_data, int enabled)
-{
- struct hda_codec *codec = private_data;
- struct alc_spec *spec = codec->spec;
- unsigned int oldval = spec->gpio_led;
-
- if (enabled)
- spec->gpio_led |= 0x01;
- else
- spec->gpio_led &= ~0x01;
- if (spec->gpio_led != oldval)
- snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
- spec->gpio_led);
-}
-
/* avoid D3 for keeping GPIO up */
static unsigned int gpio_led_power_filter(struct hda_codec *codec,
hda_nid_t nid,
@@ -5735,8 +5760,10 @@ static void alc662_fixup_led_gpio1(struct hda_codec *codec,
};
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc662_led_gpio1_mute_hook;
+ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
spec->gpio_led = 0;
+ spec->mute_led_polarity = 1;
+ spec->gpio_mute_led_mask = 0x01;
snd_hda_add_verbs(codec, gpio_init);
codec->power_filter = gpio_led_power_filter;
}
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 3b3cf4ac9060..c9411dfff5a4 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -205,13 +205,7 @@ static int aureon_universe_inmux_info(struct snd_kcontrol *kcontrol,
static const char * const texts[3] =
{"Internal Aux", "Wavetable", "Rear Line-In"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int aureon_universe_inmux_get(struct snd_kcontrol *kcontrol,
@@ -1106,20 +1100,10 @@ static int wm_adc_mux_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_in
};
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 2;
- if (ice->eeprom.subvendor == VT1724_SUBDEVICE_AUREON71_UNIVERSE) {
- uinfo->value.enumerated.items = 8;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, universe_texts[uinfo->value.enumerated.item]);
- } else {
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- }
- return 0;
+ if (ice->eeprom.subvendor == VT1724_SUBDEVICE_AUREON71_UNIVERSE)
+ return snd_ctl_enum_info(uinfo, 2, 8, universe_texts);
+ else
+ return snd_ctl_enum_info(uinfo, 2, 5, texts);
}
static int wm_adc_mux_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1167,16 +1151,10 @@ static int aureon_cs8415_mux_info(struct snd_kcontrol *kcontrol, struct snd_ctl_
"CD",
"Coax"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
if (ice->eeprom.subvendor == VT1724_SUBDEVICE_PRODIGY71)
- strcpy(uinfo->value.enumerated.name, prodigy_texts[uinfo->value.enumerated.item]);
+ return snd_ctl_enum_info(uinfo, 1, 2, prodigy_texts);
else
- strcpy(uinfo->value.enumerated.name, aureon_texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, aureon_texts);
}
static int aureon_cs8415_mux_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1392,15 +1370,7 @@ static int aureon_oversampling_info(struct snd_kcontrol *k, struct snd_ctl_elem_
{
static const char * const texts[2] = { "128x", "64x" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int aureon_oversampling_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/pci/ice1712/ews.c b/sound/pci/ice1712/ews.c
index 817a1bc50a60..5cb587cf360e 100644
--- a/sound/pci/ice1712/ews.c
+++ b/sound/pci/ice1712/ews.c
@@ -580,13 +580,7 @@ static int snd_ice1712_ewx_io_sense_info(struct snd_kcontrol *kcontrol, struct s
static const char * const texts[2] = {
"+4dBu", "-10dBV",
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= 2)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_ice1712_ewx_io_sense_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -903,13 +897,7 @@ static int snd_ice1712_6fire_select_input_info(struct snd_kcontrol *kcontrol, st
static const char * const texts[4] = {
"Internal", "Front Input", "Rear Input", "Wave Table"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item >= 4)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int snd_ice1712_6fire_select_input_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/pci/ice1712/hoontech.c b/sound/pci/ice1712/hoontech.c
index 59e37c581691..a40001c1d9e8 100644
--- a/sound/pci/ice1712/hoontech.c
+++ b/sound/pci/ice1712/hoontech.c
@@ -309,11 +309,7 @@ static int snd_ice1712_value_init(struct snd_ice1712 *ice)
return err;
/* ak4524 controls */
- err = snd_ice1712_akm4xxx_build_controls(ice);
- if (err < 0)
- return err;
-
- return 0;
+ return snd_ice1712_akm4xxx_build_controls(ice);
}
static int snd_ice1712_ez8_init(struct snd_ice1712 *ice)
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 206ed2cbcef9..b039b46152c6 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -620,10 +620,9 @@ static int snd_ice1712_playback_ds_prepare(struct snd_pcm_substream *substream)
{
struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
- u32 period_size, buf_size, rate, tmp, chn;
+ u32 period_size, rate, tmp, chn;
period_size = snd_pcm_lib_period_bytes(substream) - 1;
- buf_size = snd_pcm_lib_buffer_bytes(substream) - 1;
tmp = 0x0064;
if (snd_pcm_format_width(runtime->format) == 16)
tmp &= ~0x04;
@@ -1295,10 +1294,7 @@ static int snd_ice1712_pcm_profi(struct snd_ice1712 *ice, int device, struct snd
return err;
}
- err = snd_ice1712_build_pro_mixer(ice);
- if (err < 0)
- return err;
- return 0;
+ return snd_ice1712_build_pro_mixer(ice);
}
/*
@@ -1545,10 +1541,9 @@ static int snd_ice1712_ac97_mixer(struct snd_ice1712 *ice)
dev_warn(ice->card->dev,
"cannot initialize ac97 for consumer, skipped\n");
else {
- err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_mixer_digmix_route_ac97, ice));
- if (err < 0)
- return err;
- return 0;
+ return snd_ctl_add(ice->card,
+ snd_ctl_new1(&snd_ice1712_mixer_digmix_route_ac97,
+ ice));
}
}
@@ -1839,13 +1834,7 @@ static int snd_ice1712_pro_internal_clock_info(struct snd_kcontrol *kcontrol,
"96000", /* 12: 7 */
"IEC958 Input", /* 13: -- */
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 14;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 14, texts);
}
static int snd_ice1712_pro_internal_clock_get(struct snd_kcontrol *kcontrol,
@@ -1930,13 +1919,7 @@ static int snd_ice1712_pro_internal_clock_default_info(struct snd_kcontrol *kcon
"96000", /* 12: 7 */
/* "IEC958 Input", 13: -- */
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 13;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 13, texts);
}
static int snd_ice1712_pro_internal_clock_default_get(struct snd_kcontrol *kcontrol,
@@ -2057,15 +2040,8 @@ static int snd_ice1712_pro_route_info(struct snd_kcontrol *kcontrol,
"IEC958 In L", "IEC958 In R", /* 9-10 */
"Digital Mixer", /* 11 - optional */
};
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items =
- snd_ctl_get_ioffidx(kcontrol, &uinfo->id) < 2 ? 12 : 11;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ int num_items = snd_ctl_get_ioffidx(kcontrol, &uinfo->id) < 2 ? 12 : 11;
+ return snd_ctl_enum_info(uinfo, 1, num_items, texts);
}
static int snd_ice1712_pro_route_analog_get(struct snd_kcontrol *kcontrol,
@@ -2516,11 +2492,8 @@ static int snd_ice1712_build_controls(struct snd_ice1712 *ice)
err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_mixer_pro_volume_rate, ice));
if (err < 0)
return err;
- err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_ice1712_mixer_pro_peak, ice));
- if (err < 0)
- return err;
-
- return 0;
+ return snd_ctl_add(ice->card,
+ snd_ctl_new1(&snd_ice1712_mixer_pro_peak, ice));
}
static int snd_ice1712_free(struct snd_ice1712 *ice)
@@ -2905,8 +2878,7 @@ static int snd_ice1712_resume(struct device *dev)
outw(ice->pm_saved_spdif_ctrl, ICEMT(ice, ROUTE_SPDOUT));
outw(ice->pm_saved_route, ICEMT(ice, ROUTE_PSDOUT03));
- if (ice->ac97)
- snd_ac97_resume(ice->ac97);
+ snd_ac97_resume(ice->ac97);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 08cb08ac85e6..d73da157ea14 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -2049,13 +2049,7 @@ static int snd_vt1724_pro_route_info(struct snd_kcontrol *kcontrol,
"IEC958 In L", "IEC958 In R", /* 3-4 */
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 5, texts);
}
static inline int analog_route_shift(int idx)
@@ -2503,11 +2497,8 @@ static int snd_vt1724_build_controls(struct snd_ice1712 *ice)
return err;
}
- err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_mixer_pro_peak, ice));
- if (err < 0)
- return err;
-
- return 0;
+ return snd_ctl_add(ice->card,
+ snd_ctl_new1(&snd_vt1724_mixer_pro_peak, ice));
}
static int snd_vt1724_free(struct snd_ice1712 *ice)
@@ -2884,8 +2875,7 @@ static int snd_vt1724_resume(struct device *dev)
outb(ice->pm_saved_spdif_cfg, ICEREG1724(ice, SPDIF_CFG));
outl(ice->pm_saved_route, ICEMT1724(ice, ROUTE_PLAYBACK));
- if (ice->ac97)
- snd_ac97_resume(ice->ac97);
+ snd_ac97_resume(ice->ac97);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
diff --git a/sound/pci/ice1712/juli.c b/sound/pci/ice1712/juli.c
index 7a6c0786c55c..a1536c1a7ed4 100644
--- a/sound/pci/ice1712/juli.c
+++ b/sound/pci/ice1712/juli.c
@@ -475,11 +475,8 @@ static int juli_add_controls(struct snd_ice1712 *ice)
return err;
/* only capture SPDIF over AK4114 */
- err = snd_ak4114_build(spec->ak4114, NULL,
+ return snd_ak4114_build(spec->ak4114, NULL,
ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream);
- if (err < 0)
- return err;
- return 0;
}
/*
diff --git a/sound/pci/ice1712/maya44.c b/sound/pci/ice1712/maya44.c
index 63aa39f06f02..7de25c4807fd 100644
--- a/sound/pci/ice1712/maya44.c
+++ b/sound/pci/ice1712/maya44.c
@@ -359,15 +359,7 @@ static int maya_rec_src_info(struct snd_kcontrol *kcontrol,
{
static const char * const texts[] = { "Line", "Mic" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = ARRAY_SIZE(texts);
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
}
static int maya_rec_src_get(struct snd_kcontrol *kcontrol,
@@ -411,15 +403,7 @@ static int maya_pb_route_info(struct snd_kcontrol *kcontrol,
"Input 1", "Input 2", "Input 3", "Input 4"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = ARRAY_SIZE(texts);
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
}
static int maya_pb_route_shift(int idx)
diff --git a/sound/pci/ice1712/phase.c b/sound/pci/ice1712/phase.c
index 0011e04f36a2..e9ca89c9174b 100644
--- a/sound/pci/ice1712/phase.c
+++ b/sound/pci/ice1712/phase.c
@@ -723,17 +723,7 @@ static int phase28_oversampling_info(struct snd_kcontrol *k,
{
static const char * const texts[2] = { "128x", "64x" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items -
- 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int phase28_oversampling_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/ice1712/pontis.c b/sound/pci/ice1712/pontis.c
index 5555eb4b2400..5101f40f6fbd 100644
--- a/sound/pci/ice1712/pontis.c
+++ b/sound/pci/ice1712/pontis.c
@@ -417,13 +417,7 @@ static int cs_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_inf
"Optical", /* RXP1 */
"CD", /* RXP2 */
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int cs_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c
index f3b491aa3e22..3919aed39ca0 100644
--- a/sound/pci/ice1712/prodigy192.c
+++ b/sound/pci/ice1712/prodigy192.c
@@ -284,15 +284,7 @@ static int stac9460_mic_sw_info(struct snd_kcontrol *kcontrol,
{
static const char * const texts[2] = { "Line In", "Mic" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
@@ -563,13 +555,7 @@ static int ak4114_input_sw_info(struct snd_kcontrol *kcontrol,
{
static const char * const texts[2] = { "Toslink", "Coax" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
@@ -772,10 +758,8 @@ static int prodigy192_init(struct snd_ice1712 *ice)
"AK4114 initialized with status %d\n", err);
} else
dev_dbg(ice->card->dev, "AK4114 not found\n");
- if (err < 0)
- return err;
- return 0;
+ return err;
}
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 2261d1e49150..2697402b5195 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -537,7 +537,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol,
static int wm_adc_mux_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char* texts[32] = {
+ static const char * const texts[32] = {
"NULL", WM_AIN1, WM_AIN2, WM_AIN1 "+" WM_AIN2,
WM_AIN3, WM_AIN1 "+" WM_AIN3, WM_AIN2 "+" WM_AIN3,
WM_AIN1 "+" WM_AIN2 "+" WM_AIN3,
@@ -560,14 +560,7 @@ static int wm_adc_mux_enum_info(struct snd_kcontrol *kcontrol,
WM_AIN1 "+" WM_AIN2 "+" WM_AIN3 "+" WM_AIN4 "+" WM_AIN5
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 32;
- if (uinfo->value.enumerated.item > 31)
- uinfo->value.enumerated.item = 31;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 32, texts);
}
static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/ice1712/quartet.c b/sound/pci/ice1712/quartet.c
index 2c2df4b74e01..6f55e02e5c84 100644
--- a/sound/pci/ice1712/quartet.c
+++ b/sound/pci/ice1712/quartet.c
@@ -46,7 +46,7 @@ struct qtet_kcontrol_private {
unsigned int bit;
void (*set_register)(struct snd_ice1712 *ice, unsigned int val);
unsigned int (*get_register)(struct snd_ice1712 *ice);
- unsigned char * const texts[2];
+ const char * const texts[2];
};
enum {
@@ -554,17 +554,7 @@ static int qtet_ain12_enum_info(struct snd_kcontrol *kcontrol,
{
static const char * const texts[3] =
{"Line In 1/2", "Mic", "Mic + Low-cut"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = ARRAY_SIZE(texts);
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
}
static int qtet_ain12_sw_get(struct snd_kcontrol *kcontrol,
@@ -706,17 +696,8 @@ static int qtet_enum_info(struct snd_kcontrol *kcontrol,
{
struct qtet_kcontrol_private private =
qtet_privates[kcontrol->private_value];
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = ARRAY_SIZE(private.texts);
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- private.texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(private.texts),
+ private.texts);
}
static int qtet_sw_get(struct snd_kcontrol *kcontrol,
@@ -852,11 +833,8 @@ static int qtet_add_controls(struct snd_ice1712 *ice)
if (err < 0)
return err;
/* only capture SPDIF over AK4113 */
- err = snd_ak4113_build(spec->ak4113,
+ return snd_ak4113_build(spec->ak4113,
ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream);
- if (err < 0)
- return err;
- return 0;
}
static inline int qtet_is_spdif_master(struct snd_ice1712 *ice)
diff --git a/sound/pci/ice1712/revo.c b/sound/pci/ice1712/revo.c
index 1112ec1953be..1d81ae677573 100644
--- a/sound/pci/ice1712/revo.c
+++ b/sound/pci/ice1712/revo.c
@@ -494,11 +494,13 @@ static int ap192_ak4114_init(struct snd_ice1712 *ice)
ap192_ak4114_write,
ak4114_init_vals, ak4114_init_txcsb,
ice, &spec->ak4114);
+ if (err < 0)
+ return err;
/* AK4114 in Revo cannot detect external rate correctly.
* No reason to stop capture stream due to incorrect checks */
spec->ak4114->check_flags = AK4114_CHECK_NO_RATE;
- return 0; /* error ignored; it's no fatal error */
+ return 0;
}
static int revo_init(struct snd_ice1712 *ice)
diff --git a/sound/pci/ice1712/se.c b/sound/pci/ice1712/se.c
index ffd894bb4507..1c5d5b22c7a0 100644
--- a/sound/pci/ice1712/se.c
+++ b/sound/pci/ice1712/se.c
@@ -452,14 +452,7 @@ static int se200pci_cont_enum_info(struct snd_kcontrol *kc,
c = se200pci_get_enum_count(n);
if (!c)
return -EINVAL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = c;
- if (uinfo->value.enumerated.item >= c)
- uinfo->value.enumerated.item = c - 1;
- strcpy(uinfo->value.enumerated.name,
- se200pci_cont[n].member[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, c, se200pci_cont[n].member);
}
static int se200pci_cont_volume_get(struct snd_kcontrol *kc,
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 9fe549b2efdf..59d21c9401d2 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -444,9 +444,9 @@ static char *stateName[] = {
"Invalid"
};
-static char *clockSourceTypeName[] = { "ADAT", "S/PDIF", "local" };
+static const char * const clockSourceTypeName[] = { "ADAT", "S/PDIF", "local" };
-static char *clockSourceName[] = {
+static const char * const clockSourceName[] = {
"ADAT at 44.1 kHz",
"ADAT at 48 kHz",
"S/PDIF at 44.1 kHz",
@@ -455,7 +455,7 @@ static char *clockSourceName[] = {
"local clock at 48 kHz"
};
-static char *channelName[] = {
+static const char * const channelName[] = {
"ADAT-1",
"ADAT-2",
"ADAT-3",
@@ -1844,14 +1844,9 @@ static int snd_korg1212_control_volume_put(struct snd_kcontrol *kcontrol,
static int snd_korg1212_control_route_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = (kcontrol->private_value >= 8) ? 2 : 1;
- uinfo->value.enumerated.items = kAudioChannels;
- if (uinfo->value.enumerated.item > kAudioChannels-1) {
- uinfo->value.enumerated.item = kAudioChannels-1;
- }
- strcpy(uinfo->value.enumerated.name, channelName[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo,
+ (kcontrol->private_value >= 8) ? 2 : 1,
+ kAudioChannels, channelName);
}
static int snd_korg1212_control_route_get(struct snd_kcontrol *kcontrol,
@@ -1961,14 +1956,7 @@ static int snd_korg1212_control_put(struct snd_kcontrol *kcontrol,
static int snd_korg1212_control_sync_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2) {
- uinfo->value.enumerated.item = 2;
- }
- strcpy(uinfo->value.enumerated.name, clockSourceTypeName[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, clockSourceTypeName);
}
static int snd_korg1212_control_sync_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index a75c8dc66dec..4cf4be5ef82a 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -719,7 +719,7 @@ static int lola_probe(struct pci_dev *pci,
err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
0, &card);
if (err < 0) {
- dev_err(card->dev, "Error creating card!\n");
+ dev_err(&pci->dev, "Error creating card!\n");
return err;
}
diff --git a/sound/pci/lola/lola_mixer.c b/sound/pci/lola/lola_mixer.c
index 782f4d8299ae..e7fe15dd5a90 100644
--- a/sound/pci/lola/lola_mixer.c
+++ b/sound/pci/lola/lola_mixer.c
@@ -108,8 +108,7 @@ int lola_init_pins(struct lola *chip, int dir, int *nidp)
void lola_free_mixer(struct lola *chip)
{
- if (chip->mixer.array_saved)
- vfree(chip->mixer.array_saved);
+ vfree(chip->mixer.array_saved);
}
int lola_init_mixer_widget(struct lola *chip, int nid)
diff --git a/sound/pci/lx6464es/lx_defs.h b/sound/pci/lx6464es/lx_defs.h
index 49d36bdd512c..469bcc685edf 100644
--- a/sound/pci/lx6464es/lx_defs.h
+++ b/sound/pci/lx6464es/lx_defs.h
@@ -175,7 +175,7 @@ enum buffer_flags {
BF_ZERO = 0x00, /* no flags (init).*/
};
-/**
+/*
* Stream Flags definitions
*/
enum stream_flags {
diff --git a/sound/pci/mixart/mixart_hwdep.c b/sound/pci/mixart/mixart_hwdep.c
index 581e1e74863c..9996a4dead0f 100644
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -37,10 +37,11 @@
/**
* wait for a value on a peudo register, exit with a timeout
*
- * @param mgr pointer to miXart manager structure
- * @param offset unsigned pseudo_register base + offset of value
- * @param value value
- * @param timeout timeout in centisenconds
+ * @mgr: pointer to miXart manager structure
+ * @offset: unsigned pseudo_register base + offset of value
+ * @is_egal: wait for the equal value
+ * @value: value
+ * @timeout: timeout in centisenconds
*/
static int mixart_wait_nice_for_register_value(struct mixart_mgr *mgr,
u32 offset, int is_egal,
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index b854fc5e01f5..c6092e48ceb6 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -501,10 +501,10 @@ int pcxhr_get_external_clock(struct pcxhr_mgr *mgr,
/*
* start or stop playback/capture substream
*/
-static int pcxhr_set_stream_state(struct pcxhr_stream *stream)
+static int pcxhr_set_stream_state(struct snd_pcxhr *chip,
+ struct pcxhr_stream *stream)
{
int err;
- struct snd_pcxhr *chip;
struct pcxhr_rmh rmh;
int stream_mask, start;
@@ -512,8 +512,8 @@ static int pcxhr_set_stream_state(struct pcxhr_stream *stream)
start = 1;
else {
if (stream->status != PCXHR_STREAM_STATUS_SCHEDULE_STOP) {
- snd_printk(KERN_ERR "ERROR pcxhr_set_stream_state "
- "CANNOT be stopped\n");
+ dev_err(chip->card->dev,
+ "pcxhr_set_stream_state CANNOT be stopped\n");
return -EINVAL;
}
start = 0;
@@ -560,6 +560,7 @@ static int pcxhr_set_format(struct pcxhr_stream *stream)
struct pcxhr_rmh rmh;
unsigned int header;
+ chip = snd_pcm_substream_chip(stream->substream);
switch (stream->format) {
case SNDRV_PCM_FORMAT_U8:
header = HEADER_FMT_BASE_LIN;
@@ -582,11 +583,10 @@ static int pcxhr_set_format(struct pcxhr_stream *stream)
header = HEADER_FMT_BASE_FLOAT | HEADER_FMT_INTEL;
break;
default:
- snd_printk(KERN_ERR
- "error pcxhr_set_format() : unknown format\n");
+ dev_err(chip->card->dev,
+ "error pcxhr_set_format() : unknown format\n");
return -EINVAL;
}
- chip = snd_pcm_substream_chip(stream->substream);
sample_rate = chip->mgr->sample_rate;
if (sample_rate <= 32000 && sample_rate !=0) {
@@ -643,11 +643,11 @@ static int pcxhr_update_r_buffer(struct pcxhr_stream *stream)
is_capture = (subs->stream == SNDRV_PCM_STREAM_CAPTURE);
stream_num = is_capture ? 0 : subs->number;
- snd_printdd("pcxhr_update_r_buffer(pcm%c%d) : "
- "addr(%p) bytes(%zx) subs(%d)\n",
- is_capture ? 'c' : 'p',
- chip->chip_idx, (void *)(long)subs->runtime->dma_addr,
- subs->runtime->dma_bytes, subs->number);
+ dev_dbg(chip->card->dev,
+ "pcxhr_update_r_buffer(pcm%c%d) : addr(%p) bytes(%zx) subs(%d)\n",
+ is_capture ? 'c' : 'p',
+ chip->chip_idx, (void *)(long)subs->runtime->dma_addr,
+ subs->runtime->dma_bytes, subs->number);
pcxhr_init_rmh(&rmh, CMD_UPDATE_R_BUFFERS);
pcxhr_set_pipe_cmd_params(&rmh, is_capture, stream->pipe->first_audio,
@@ -687,7 +687,7 @@ static int pcxhr_pipe_sample_count(struct pcxhr_stream *stream,
*sample_count = ((snd_pcm_uframes_t)rmh.stat[0]) << 24;
*sample_count += (snd_pcm_uframes_t)rmh.stat[1];
}
- snd_printdd("PIPE_SAMPLE_COUNT = %lx\n", *sample_count);
+ dev_dbg(chip->card->dev, "PIPE_SAMPLE_COUNT = %lx\n", *sample_count);
return err;
}
#endif
@@ -711,8 +711,9 @@ static void pcxhr_start_linked_stream(struct pcxhr_mgr *mgr)
int playback_mask = 0;
#ifdef CONFIG_SND_DEBUG_VERBOSE
- struct timeval my_tv1, my_tv2;
- do_gettimeofday(&my_tv1);
+ ktime_t start_time, stop_time, diff_time;
+
+ start_time = ktime_get();
#endif
mutex_lock(&mgr->setup_mutex);
@@ -778,12 +779,12 @@ static void pcxhr_start_linked_stream(struct pcxhr_mgr *mgr)
for (j = 0; j < chip->nb_streams_capt; j++) {
stream = &chip->capture_stream[j];
if (pcxhr_stream_scheduled_get_pipe(stream, &pipe))
- err = pcxhr_set_stream_state(stream);
+ err = pcxhr_set_stream_state(chip, stream);
}
for (j = 0; j < chip->nb_streams_play; j++) {
stream = &chip->playback_stream[j];
if (pcxhr_stream_scheduled_get_pipe(stream, &pipe))
- err = pcxhr_set_stream_state(stream);
+ err = pcxhr_set_stream_state(chip, stream);
}
}
@@ -823,9 +824,10 @@ static void pcxhr_start_linked_stream(struct pcxhr_mgr *mgr)
mutex_unlock(&mgr->setup_mutex);
#ifdef CONFIG_SND_DEBUG_VERBOSE
- do_gettimeofday(&my_tv2);
+ stop_time = ktime_get();
+ diff_time = ktime_sub(stop_time, start_time);
dev_dbg(&mgr->pci->dev, "***TRIGGER START*** TIME = %ld (err = %x)\n",
- (long)(my_tv2.tv_usec - my_tv1.tv_usec), err);
+ (long)(ktime_to_ns(diff_time)), err);
#endif
}
@@ -837,12 +839,12 @@ static int pcxhr_trigger(struct snd_pcm_substream *subs, int cmd)
{
struct pcxhr_stream *stream;
struct snd_pcm_substream *s;
+ struct snd_pcxhr *chip = snd_pcm_substream_chip(subs);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- snd_printdd("SNDRV_PCM_TRIGGER_START\n");
+ dev_dbg(chip->card->dev, "SNDRV_PCM_TRIGGER_START\n");
if (snd_pcm_stream_linked(subs)) {
- struct snd_pcxhr *chip = snd_pcm_substream_chip(subs);
snd_pcm_group_for_each_entry(s, subs) {
if (snd_pcm_substream_chip(s) != chip)
continue;
@@ -854,7 +856,7 @@ static int pcxhr_trigger(struct snd_pcm_substream *subs, int cmd)
pcxhr_start_linked_stream(chip->mgr);
} else {
stream = subs->runtime->private_data;
- snd_printdd("Only one Substream %c %d\n",
+ dev_dbg(chip->card->dev, "Only one Substream %c %d\n",
stream->pipe->is_capture ? 'C' : 'P',
stream->pipe->first_audio);
if (pcxhr_set_format(stream))
@@ -863,17 +865,17 @@ static int pcxhr_trigger(struct snd_pcm_substream *subs, int cmd)
return -EINVAL;
stream->status = PCXHR_STREAM_STATUS_SCHEDULE_RUN;
- if (pcxhr_set_stream_state(stream))
+ if (pcxhr_set_stream_state(chip, stream))
return -EINVAL;
stream->status = PCXHR_STREAM_STATUS_RUNNING;
}
break;
case SNDRV_PCM_TRIGGER_STOP:
- snd_printdd("SNDRV_PCM_TRIGGER_STOP\n");
+ dev_dbg(chip->card->dev, "SNDRV_PCM_TRIGGER_STOP\n");
snd_pcm_group_for_each_entry(s, subs) {
stream = s->runtime->private_data;
stream->status = PCXHR_STREAM_STATUS_SCHEDULE_STOP;
- if (pcxhr_set_stream_state(stream))
+ if (pcxhr_set_stream_state(chip, stream))
return -EINVAL;
snd_pcm_trigger_done(s, subs);
}
@@ -1636,7 +1638,7 @@ static int pcxhr_probe(struct pci_dev *pci,
0, &card);
if (err < 0) {
- dev_err(card->dev, "cannot allocate the card %d\n", i);
+ dev_err(&pci->dev, "cannot allocate the card %d\n", i);
pcxhr_free(mgr);
return err;
}
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index a584acb61c00..181f7729d409 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -910,8 +910,9 @@ int pcxhr_set_pipe_state(struct pcxhr_mgr *mgr, int playback_mask,
int audio_mask;
#ifdef CONFIG_SND_DEBUG_VERBOSE
- struct timeval my_tv1, my_tv2;
- do_gettimeofday(&my_tv1);
+ ktime_t start_time, stop_time, diff_time;
+
+ start_time = ktime_get();
#endif
audio_mask = (playback_mask |
(capture_mask << PCXHR_PIPE_STATE_CAPTURE_OFFSET));
@@ -960,9 +961,10 @@ int pcxhr_set_pipe_state(struct pcxhr_mgr *mgr, int playback_mask,
return err;
}
#ifdef CONFIG_SND_DEBUG_VERBOSE
- do_gettimeofday(&my_tv2);
+ stop_time = ktime_get();
+ diff_time = ktime_sub(stop_time, start_time);
dev_dbg(&mgr->pci->dev, "***SET PIPE STATE*** TIME = %ld (err = %x)\n",
- (long)(my_tv2.tv_usec - my_tv1.tv_usec), err);
+ (long)(ktime_to_ns(diff_time)), err);
#endif
return 0;
}
diff --git a/sound/pci/pcxhr/pcxhr_mixer.c b/sound/pci/pcxhr/pcxhr_mixer.c
index 95c9571780d8..63136c4f3f3d 100644
--- a/sound/pci/pcxhr/pcxhr_mixer.c
+++ b/sound/pci/pcxhr/pcxhr_mixer.c
@@ -660,14 +660,7 @@ static int pcxhr_audio_src_info(struct snd_kcontrol *kcontrol,
if (chip->mgr->board_has_mic)
i = 5; /* Mic and MicroMix available */
}
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = i;
- if (uinfo->value.enumerated.item > (i-1))
- uinfo->value.enumerated.item = i-1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, i, texts);
}
static int pcxhr_audio_src_get(struct snd_kcontrol *kcontrol,
@@ -756,14 +749,7 @@ static int pcxhr_clock_type_info(struct snd_kcontrol *kcontrol,
texts = textsPCXHR;
snd_BUG_ON(clock_items > (PCXHR_CLOCK_TYPE_MAX+1));
}
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = clock_items;
- if (uinfo->value.enumerated.item >= clock_items)
- uinfo->value.enumerated.item = clock_items-1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, clock_items, texts);
}
static int pcxhr_clock_type_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 4afd3cab775b..6c60dcd2e5a1 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1608,30 +1608,24 @@ snd_rme32_info_inputtype_control(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct rme32 *rme32 = snd_kcontrol_chip(kcontrol);
- static char *texts[4] = { "Optical", "Coaxial", "Internal", "XLR" };
+ static const char * const texts[4] = {
+ "Optical", "Coaxial", "Internal", "XLR"
+ };
+ int num_items;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
switch (rme32->pci->device) {
case PCI_DEVICE_ID_RME_DIGI32:
case PCI_DEVICE_ID_RME_DIGI32_8:
- uinfo->value.enumerated.items = 3;
+ num_items = 3;
break;
case PCI_DEVICE_ID_RME_DIGI32_PRO:
- uinfo->value.enumerated.items = 4;
+ num_items = 4;
break;
default:
snd_BUG();
- break;
- }
- if (uinfo->value.enumerated.item >
- uinfo->value.enumerated.items - 1) {
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
+ return -EINVAL;
}
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, num_items, texts);
}
static int
snd_rme32_get_inputtype_control(struct snd_kcontrol *kcontrol,
@@ -1695,20 +1689,12 @@ static int
snd_rme32_info_clockmode_control(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = { "AutoSync",
+ static const char * const texts[4] = { "AutoSync",
"Internal 32.0kHz",
"Internal 44.1kHz",
"Internal 48.0kHz" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3) {
- uinfo->value.enumerated.item = 3;
- }
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int
snd_rme32_get_clockmode_control(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 5a395c87c6fc..2f1a85185a16 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -1884,39 +1884,38 @@ snd_rme96_put_loopback_control(struct snd_kcontrol *kcontrol, struct snd_ctl_ele
static int
snd_rme96_info_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *_texts[5] = { "Optical", "Coaxial", "Internal", "XLR", "Analog" };
+ static const char * const _texts[5] = {
+ "Optical", "Coaxial", "Internal", "XLR", "Analog"
+ };
struct rme96 *rme96 = snd_kcontrol_chip(kcontrol);
- char *texts[5] = { _texts[0], _texts[1], _texts[2], _texts[3], _texts[4] };
+ const char *texts[5] = {
+ _texts[0], _texts[1], _texts[2], _texts[3], _texts[4]
+ };
+ int num_items;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
switch (rme96->pci->device) {
case PCI_DEVICE_ID_RME_DIGI96:
case PCI_DEVICE_ID_RME_DIGI96_8:
- uinfo->value.enumerated.items = 3;
+ num_items = 3;
break;
case PCI_DEVICE_ID_RME_DIGI96_8_PRO:
- uinfo->value.enumerated.items = 4;
+ num_items = 4;
break;
case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST:
if (rme96->rev > 4) {
/* PST */
- uinfo->value.enumerated.items = 4;
+ num_items = 4;
texts[3] = _texts[4]; /* Analog instead of XLR */
} else {
/* PAD */
- uinfo->value.enumerated.items = 5;
+ num_items = 5;
}
break;
default:
snd_BUG();
- break;
- }
- if (uinfo->value.enumerated.item > uinfo->value.enumerated.items - 1) {
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+ return -EINVAL;
}
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, num_items, texts);
}
static int
snd_rme96_get_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2002,16 +2001,9 @@ snd_rme96_put_inputtype_control(struct snd_kcontrol *kcontrol, struct snd_ctl_el
static int
snd_rme96_info_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = { "AutoSync", "Internal", "Word" };
+ static const char * const texts[3] = { "AutoSync", "Internal", "Word" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2) {
- uinfo->value.enumerated.item = 2;
- }
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int
snd_rme96_get_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2041,16 +2033,11 @@ snd_rme96_put_clockmode_control(struct snd_kcontrol *kcontrol, struct snd_ctl_el
static int
snd_rme96_info_attenuation_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = { "0 dB", "-6 dB", "-12 dB", "-18 dB" };
+ static const char * const texts[4] = {
+ "0 dB", "-6 dB", "-12 dB", "-18 dB"
+ };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3) {
- uinfo->value.enumerated.item = 3;
- }
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int
snd_rme96_get_attenuation_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2081,16 +2068,9 @@ snd_rme96_put_attenuation_control(struct snd_kcontrol *kcontrol, struct snd_ctl_
static int
snd_rme96_info_montracks_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = { "1+2", "3+4", "5+6", "7+8" };
+ static const char * const texts[4] = { "1+2", "3+4", "5+6", "7+8" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3) {
- uinfo->value.enumerated.item = 3;
- }
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int
snd_rme96_get_montracks_control(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 7646ba1664eb..cf5a6c8b9a63 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -1680,16 +1680,13 @@ static int hdsp_set_spdif_input(struct hdsp *hdsp, int in)
static int snd_hdsp_info_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {"Optical", "Coaxial", "Internal", "AES"};
+ static const char * const texts[4] = {
+ "Optical", "Coaxial", "Internal", "AES"
+ };
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = ((hdsp->io_type == H9632) ? 4 : 3);
- if (uinfo->value.enumerated.item > ((hdsp->io_type == H9632) ? 3 : 2))
- uinfo->value.enumerated.item = ((hdsp->io_type == H9632) ? 3 : 2);
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, (hdsp->io_type == H9632) ? 4 : 3,
+ texts);
}
static int snd_hdsp_get_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1786,16 +1783,14 @@ static int snd_hdsp_put_toggle_setting(struct snd_kcontrol *kcontrol,
static int snd_hdsp_info_spdif_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"32000", "44100", "48000", "64000", "88200", "96000", "None", "128000", "176400", "192000"};
+ static const char * const texts[] = {
+ "32000", "44100", "48000", "64000", "88200", "96000",
+ "None", "128000", "176400", "192000"
+ };
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = (hdsp->io_type == H9632) ? 10 : 7;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, (hdsp->io_type == H9632) ? 10 : 7,
+ texts);
}
static int snd_hdsp_get_spdif_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1872,14 +1867,13 @@ static int snd_hdsp_get_system_sample_rate(struct snd_kcontrol *kcontrol, struct
static int snd_hdsp_info_autosync_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
- static char *texts[] = {"32000", "44100", "48000", "64000", "88200", "96000", "None", "128000", "176400", "192000"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = (hdsp->io_type == H9632) ? 10 : 7 ;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ static const char * const texts[] = {
+ "32000", "44100", "48000", "64000", "88200", "96000",
+ "None", "128000", "176400", "192000"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, (hdsp->io_type == H9632) ? 10 : 7,
+ texts);
}
static int snd_hdsp_get_autosync_sample_rate(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1940,15 +1934,9 @@ static int hdsp_system_clock_mode(struct hdsp *hdsp)
static int snd_hdsp_info_system_clock_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"Master", "Slave" };
+ static const char * const texts[] = {"Master", "Slave" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_hdsp_get_system_clock_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2049,19 +2037,16 @@ static int hdsp_set_clock_source(struct hdsp *hdsp, int mode)
static int snd_hdsp_info_clock_source(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"AutoSync", "Internal 32.0 kHz", "Internal 44.1 kHz", "Internal 48.0 kHz", "Internal 64.0 kHz", "Internal 88.2 kHz", "Internal 96.0 kHz", "Internal 128 kHz", "Internal 176.4 kHz", "Internal 192.0 KHz" };
+ static const char * const texts[] = {
+ "AutoSync", "Internal 32.0 kHz", "Internal 44.1 kHz",
+ "Internal 48.0 kHz", "Internal 64.0 kHz", "Internal 88.2 kHz",
+ "Internal 96.0 kHz", "Internal 128 kHz", "Internal 176.4 kHz",
+ "Internal 192.0 KHz"
+ };
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- if (hdsp->io_type == H9632)
- uinfo->value.enumerated.items = 10;
- else
- uinfo->value.enumerated.items = 7;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, (hdsp->io_type == H9632) ? 10 : 7,
+ texts);
}
static int snd_hdsp_get_clock_source(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2165,15 +2150,9 @@ static int hdsp_set_da_gain(struct hdsp *hdsp, int mode)
static int snd_hdsp_info_da_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"Hi Gain", "+4 dBu", "-10 dbV"};
+ static const char * const texts[] = {"Hi Gain", "+4 dBu", "-10 dbV"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_hdsp_get_da_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2250,15 +2229,9 @@ static int hdsp_set_ad_gain(struct hdsp *hdsp, int mode)
static int snd_hdsp_info_ad_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"-10 dBV", "+4 dBu", "Lo Gain"};
+ static const char * const texts[] = {"-10 dBV", "+4 dBu", "Lo Gain"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_hdsp_get_ad_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2335,15 +2308,9 @@ static int hdsp_set_phone_gain(struct hdsp *hdsp, int mode)
static int snd_hdsp_info_phone_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"0 dB", "-6 dB", "-12 dB"};
+ static const char * const texts[] = {"0 dB", "-6 dB", "-12 dB"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_hdsp_get_phone_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2439,31 +2406,28 @@ static int hdsp_set_pref_sync_ref(struct hdsp *hdsp, int pref)
static int snd_hdsp_info_pref_sync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"Word", "IEC958", "ADAT1", "ADAT Sync", "ADAT2", "ADAT3" };
+ static const char * const texts[] = {
+ "Word", "IEC958", "ADAT1", "ADAT Sync", "ADAT2", "ADAT3"
+ };
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
+ int num_items;
switch (hdsp->io_type) {
case Digiface:
case H9652:
- uinfo->value.enumerated.items = 6;
+ num_items = 6;
break;
case Multiface:
- uinfo->value.enumerated.items = 4;
+ num_items = 4;
break;
case H9632:
- uinfo->value.enumerated.items = 3;
+ num_items = 3;
break;
default:
return -EINVAL;
}
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, num_items, texts);
}
static int snd_hdsp_get_pref_sync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2543,15 +2507,11 @@ static int hdsp_autosync_ref(struct hdsp *hdsp)
static int snd_hdsp_info_autosync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"Word", "ADAT Sync", "IEC958", "None", "ADAT1", "ADAT2", "ADAT3" };
+ static const char * const texts[] = {
+ "Word", "ADAT Sync", "IEC958", "None", "ADAT1", "ADAT2", "ADAT3"
+ };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 7;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 7, texts);
}
static int snd_hdsp_get_autosync_ref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -2738,14 +2698,9 @@ static int snd_hdsp_put_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
static int snd_hdsp_info_sync_check(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"No Lock", "Lock", "Sync" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ static const char * const texts[] = {"No Lock", "Lock", "Sync" };
+
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int hdsp_wc_sync_check(struct hdsp *hdsp)
@@ -3101,15 +3056,11 @@ static int snd_hdsp_put_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ct
static int snd_hdsp_info_rpm_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"Phono +6dB", "Phono 0dB", "Phono -6dB", "Line 0dB", "Line -6dB"};
+ static const char * const texts[] = {
+ "Phono +6dB", "Phono 0dB", "Phono -6dB", "Line 0dB", "Line -6dB"
+ };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 5, texts);
}
@@ -3234,15 +3185,9 @@ static int snd_hdsp_put_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl
static int snd_hdsp_info_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"On", "Off"};
+ static const char * const texts[] = {"On", "Off"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
@@ -3291,15 +3236,9 @@ static int snd_hdsp_put_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd
static int snd_hdsp_info_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"On", "Off"};
+ static const char * const texts[] = {"On", "Off"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static struct snd_kcontrol_new snd_hdsp_rpm_controls[] = {
@@ -5368,8 +5307,7 @@ static int snd_hdsp_free(struct hdsp *hdsp)
snd_hdsp_free_buffers(hdsp);
- if (hdsp->firmware)
- release_firmware(hdsp->firmware);
+ release_firmware(hdsp->firmware);
vfree(hdsp->fw_uploaded);
if (hdsp->iobase)
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 52d86af3ef2d..3342705a5715 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -1257,14 +1257,13 @@ static int hdspm_rate_multiplier(struct hdspm *hdspm, int rate)
/* check for external sample rate, returns the sample rate in Hz*/
static int hdspm_external_sample_rate(struct hdspm *hdspm)
{
- unsigned int status, status2, timecode;
+ unsigned int status, status2;
int syncref, rate = 0, rate_bits;
switch (hdspm->io_type) {
case AES32:
status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
status = hdspm_read(hdspm, HDSPM_statusRegister);
- timecode = hdspm_read(hdspm, HDSPM_timecodeRegister);
syncref = hdspm_autosync_ref(hdspm);
switch (syncref) {
@@ -2202,10 +2201,10 @@ static inline int hdspm_get_pll_freq(struct hdspm *hdspm)
return rate;
}
-/**
+/*
* Calculate the real sample rate from the
* current DDS value.
- **/
+ */
static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
{
unsigned int rate;
@@ -2271,9 +2270,9 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
}
-/**
+/*
* Returns the WordClock sample rate class for the given card.
- **/
+ */
static int hdspm_get_wc_sample_rate(struct hdspm *hdspm)
{
int status;
@@ -2296,9 +2295,9 @@ static int hdspm_get_wc_sample_rate(struct hdspm *hdspm)
}
-/**
+/*
* Returns the TCO sample rate class for the given card.
- **/
+ */
static int hdspm_get_tco_sample_rate(struct hdspm *hdspm)
{
int status;
@@ -2322,9 +2321,9 @@ static int hdspm_get_tco_sample_rate(struct hdspm *hdspm)
}
-/**
+/*
* Returns the SYNC_IN sample rate class for the given card.
- **/
+ */
static int hdspm_get_sync_in_sample_rate(struct hdspm *hdspm)
{
int status;
@@ -2344,9 +2343,9 @@ static int hdspm_get_sync_in_sample_rate(struct hdspm *hdspm)
return 0;
}
-/**
+/*
* Returns the AES sample rate class for the given card.
- **/
+ */
static int hdspm_get_aes_sample_rate(struct hdspm *hdspm, int index)
{
int timecode;
@@ -2362,10 +2361,10 @@ static int hdspm_get_aes_sample_rate(struct hdspm *hdspm, int index)
return 0;
}
-/**
+/*
* Returns the sample rate class for input source <idx> for
* 'new style' cards like the AIO and RayDAT.
- **/
+ */
static int hdspm_get_s1_sample_rate(struct hdspm *hdspm, unsigned int idx)
{
int status = hdspm_read(hdspm, HDSPM_RD_STATUS_2);
@@ -2513,10 +2512,10 @@ static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
}
-/**
+/*
* Returns the system clock mode for the given card.
* @returns 0 - master, 1 - slave
- **/
+ */
static int hdspm_system_clock_mode(struct hdspm *hdspm)
{
switch (hdspm->io_type) {
@@ -2535,10 +2534,10 @@ static int hdspm_system_clock_mode(struct hdspm *hdspm)
}
-/**
+/*
* Sets the system clock mode.
* @param mode 0 - master, 1 - slave
- **/
+ */
static void hdspm_set_system_clock_mode(struct hdspm *hdspm, int mode)
{
hdspm_set_toggle_setting(hdspm,
@@ -2645,18 +2644,7 @@ static int hdspm_set_clock_source(struct hdspm * hdspm, int mode)
static int snd_hdspm_info_clock_source(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 9;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
-
- strcpy(uinfo->value.enumerated.name,
- texts_freq[uinfo->value.enumerated.item+1]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 9, texts_freq + 1);
}
static int snd_hdspm_get_clock_source(struct snd_kcontrol *kcontrol,
@@ -2704,11 +2692,11 @@ static int snd_hdspm_put_clock_source(struct snd_kcontrol *kcontrol,
}
-/**
+/*
* Returns the current preferred sync reference setting.
* The semantics of the return value are depending on the
* card, please see the comments for clarification.
- **/
+ */
static int hdspm_pref_sync_ref(struct hdspm * hdspm)
{
switch (hdspm->io_type) {
@@ -2807,11 +2795,11 @@ static int hdspm_pref_sync_ref(struct hdspm * hdspm)
}
-/**
+/*
* Set the preferred sync reference to <pref>. The semantics
* of <pref> are depending on the card type, see the comments
* for clarification.
- **/
+ */
static int hdspm_set_pref_sync_ref(struct hdspm * hdspm, int pref)
{
int p = 0;
@@ -4113,9 +4101,9 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
-/**
+/*
* TCO controls
- **/
+ */
static void hdspm_tco_write(struct hdspm *hdspm)
{
unsigned int tc[4] = { 0, 0, 0, 0};
@@ -4873,18 +4861,15 @@ snd_hdspm_proc_read_madi(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct hdspm *hdspm = entry->private_data;
- unsigned int status, status2, control, freq;
+ unsigned int status, status2;
char *pref_sync_ref;
char *autosync_ref;
char *system_clock_mode;
- char *insel;
int x, x2;
status = hdspm_read(hdspm, HDSPM_statusRegister);
status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
- control = hdspm->control_register;
- freq = hdspm_read(hdspm, HDSPM_timecodeRegister);
snd_iprintf(buffer, "%s (Card #%d) Rev.%x Status2first3bits: %x\n",
hdspm->card_name, hdspm->card->number + 1,
@@ -4947,17 +4932,6 @@ snd_hdspm_proc_read_madi(struct snd_info_entry *entry,
snd_iprintf(buffer, "Line out: %s\n",
(hdspm->control_register & HDSPM_LineOut) ? "on " : "off");
- switch (hdspm->control_register & HDSPM_InputMask) {
- case HDSPM_InputOptical:
- insel = "Optical";
- break;
- case HDSPM_InputCoaxial:
- insel = "Coaxial";
- break;
- default:
- insel = "Unknown";
- }
-
snd_iprintf(buffer,
"ClearTrackMarker = %s, Transmit in %s Channel Mode, "
"Auto Input %s\n",
@@ -5202,15 +5176,13 @@ snd_hdspm_proc_read_raydat(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct hdspm *hdspm = entry->private_data;
- unsigned int status1, status2, status3, control, i;
+ unsigned int status1, status2, status3, i;
unsigned int lock, sync;
status1 = hdspm_read(hdspm, HDSPM_RD_STATUS_1); /* s1 */
status2 = hdspm_read(hdspm, HDSPM_RD_STATUS_2); /* freq */
status3 = hdspm_read(hdspm, HDSPM_RD_STATUS_3); /* s2 */
- control = hdspm->control_register;
-
snd_iprintf(buffer, "STATUS1: 0x%08x\n", status1);
snd_iprintf(buffer, "STATUS2: 0x%08x\n", status2);
snd_iprintf(buffer, "STATUS3: 0x%08x\n", status3);
@@ -5431,7 +5403,7 @@ static irqreturn_t snd_hdspm_interrupt(int irq, void *dev_id)
HDSPM_midi2IRQPending | HDSPM_midi3IRQPending);
/* now = get_cycles(); */
- /**
+ /*
* LAT_2..LAT_0 period counter (win) counter (mac)
* 6 4096 ~256053425 ~514672358
* 5 2048 ~128024983 ~257373821
@@ -5440,7 +5412,7 @@ static irqreturn_t snd_hdspm_interrupt(int irq, void *dev_id)
* 2 256 ~16003039 ~32260176
* 1 128 ~7998738 ~16194507
* 0 64 ~3998231 ~8191558
- **/
+ */
/*
dev_info(hdspm->card->dev, "snd_hdspm_interrupt %llu @ %llx\n",
now-hdspm->last_interrupt, status & 0xFFC0);
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index fa9a2a8dce5a..6521521853b8 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -920,15 +920,9 @@ static int rme9652_set_adat1_input(struct snd_rme9652 *rme9652, int internal)
static int snd_rme9652_info_adat1_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = {"ADAT1", "Internal"};
+ static const char * const texts[2] = {"ADAT1", "Internal"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_rme9652_get_adat1_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -991,15 +985,9 @@ static int rme9652_set_spdif_input(struct snd_rme9652 *rme9652, int in)
static int snd_rme9652_info_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = {"ADAT1", "Coaxial", "Internal"};
+ static const char * const texts[3] = {"ADAT1", "Coaxial", "Internal"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_rme9652_get_spdif_in(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1140,15 +1128,11 @@ static int rme9652_set_sync_mode(struct snd_rme9652 *rme9652, int mode)
static int snd_rme9652_info_sync_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = {"AutoSync", "Master", "Word Clock"};
+ static const char * const texts[3] = {
+ "AutoSync", "Master", "Word Clock"
+ };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 3;
- if (uinfo->value.enumerated.item > 2)
- uinfo->value.enumerated.item = 2;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 3, texts);
}
static int snd_rme9652_get_sync_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1231,16 +1215,14 @@ static int rme9652_set_sync_pref(struct snd_rme9652 *rme9652, int pref)
static int snd_rme9652_info_sync_pref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {"IEC958 In", "ADAT1 In", "ADAT2 In", "ADAT3 In"};
+ static const char * const texts[4] = {
+ "IEC958 In", "ADAT1 In", "ADAT2 In", "ADAT3 In"
+ };
struct snd_rme9652 *rme9652 = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = rme9652->ss_channels == RME9652_NCHANNELS ? 4 : 3;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1,
+ rme9652->ss_channels == RME9652_NCHANNELS ? 4 : 3,
+ texts);
}
static int snd_rme9652_get_sync_pref(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -1392,15 +1374,11 @@ static int snd_rme9652_get_spdif_rate(struct snd_kcontrol *kcontrol, struct snd_
static int snd_rme9652_info_adat_sync(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {"No Lock", "Lock", "No Lock Sync", "Lock Sync"};
+ static const char * const texts[4] = {
+ "No Lock", "Lock", "No Lock Sync", "Lock Sync"
+ };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 4, texts);
}
static int snd_rme9652_get_adat_sync(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 5b0d317cc9a6..313a7328bf9c 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -918,17 +918,11 @@ static int snd_sonicvibes_pcm(struct sonicvibes *sonic, int device,
static int snd_sonicvibes_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[7] = {
+ static const char * const texts[7] = {
"CD", "PCM", "Aux1", "Line", "Aux0", "Mic", "Mix"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 2;
- uinfo->value.enumerated.items = 7;
- if (uinfo->value.enumerated.item >= 7)
- uinfo->value.enumerated.item = 6;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 2, 7, texts);
}
static int snd_sonicvibes_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index da875dced2ef..57cd757acfe7 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -3702,8 +3702,7 @@ static int snd_trident_free(struct snd_trident *trident)
free_irq(trident->irq, trident);
if (trident->tlb.buffer.area) {
outl(0, TRID_REG(trident, NX_TLBC));
- if (trident->tlb.memhdr)
- snd_util_memhdr_free(trident->tlb.memhdr);
+ snd_util_memhdr_free(trident->tlb.memhdr);
if (trident->tlb.silent_page.area)
snd_dma_free_pages(&trident->tlb.silent_page);
vfree(trident->tlb.shadow_entries);
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index ecedf4dbfa2a..e088467fb736 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -1610,16 +1610,10 @@ static int snd_via8233_capture_source_info(struct snd_kcontrol *kcontrol,
/* formerly they were "Line" and "Mic", but it looks like that they
* have nothing to do with the actual physical connections...
*/
- static char *texts[2] = {
+ static const char * const texts[2] = {
"Input1", "Input2"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= 2)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snd_via8233_capture_source_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
index 2d1570273e99..52c1a8d5b88a 100644
--- a/sound/pci/vx222/vx222_ops.c
+++ b/sound/pci/vx222/vx222_ops.c
@@ -92,6 +92,7 @@ static inline unsigned long vx2_reg_addr(struct vx_core *_chip, int reg)
/**
* snd_vx_inb - read a byte from the register
+ * @chip: VX core instance
* @offset: register enum
*/
static unsigned char vx2_inb(struct vx_core *chip, int offset)
@@ -101,6 +102,7 @@ static unsigned char vx2_inb(struct vx_core *chip, int offset)
/**
* snd_vx_outb - write a byte on the register
+ * @chip: VX core instance
* @offset: the register offset
* @val: the value to write
*/
@@ -114,6 +116,7 @@ static void vx2_outb(struct vx_core *chip, int offset, unsigned char val)
/**
* snd_vx_inl - read a 32bit word from the register
+ * @chip: VX core instance
* @offset: register enum
*/
static unsigned int vx2_inl(struct vx_core *chip, int offset)
@@ -123,6 +126,7 @@ static unsigned int vx2_inl(struct vx_core *chip, int offset)
/**
* snd_vx_outl - write a 32bit word on the register
+ * @chip: VX core instance
* @offset: the register enum
* @val: the value to write
*/
@@ -223,6 +227,7 @@ static int vx2_test_xilinx(struct vx_core *_chip)
/**
* vx_setup_pseudo_dma - set up the pseudo dma read/write mode.
+ * @chip: VX core instance
* @do_write: 0 = read, 1 = set up for DMA write
*/
static void vx2_setup_pseudo_dma(struct vx_core *chip, int do_write)
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 92ec11456e3a..b16f42deed67 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -174,6 +174,7 @@ static int snd_vxpocket_new(struct snd_card *card, int ibl,
/**
* snd_vxpocket_assign_resources - initialize the hardware and card instance.
+ * @chip: VX core instance
* @port: i/o port for the card
* @irq: irq number for the card
*
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 8a431bcb056c..5a13b22748b2 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -887,8 +887,7 @@ static int snd_pmac_free(struct snd_pmac *chip)
}
}
- if (chip->pdev)
- pci_dev_put(chip->pdev);
+ pci_dev_put(chip->pdev);
of_node_put(chip->node);
kfree(chip);
return 0;
diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c
index 350a7c8f86dd..33c6be9fb388 100644
--- a/sound/ppc/powermac.c
+++ b/sound/ppc/powermac.c
@@ -168,7 +168,6 @@ static struct platform_driver snd_pmac_driver = {
.remove = snd_pmac_remove,
.driver = {
.name = SND_PMAC_DRIVER,
- .owner = THIS_MODULE,
.pm = SND_PMAC_PM_OPS,
},
};
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index b9ffc17a4799..24c8766a925d 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -795,16 +795,11 @@ static int snapper_set_capture_source(struct pmac_tumbler *mix)
static int snapper_info_capture_source(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = {
+ static const char * const texts[2] = {
"Line", "Mic"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
+
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
}
static int snapper_get_capture_source(struct snd_kcontrol *kcontrol,
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index 47849eaf266d..f44dda610ed2 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -652,7 +652,6 @@ static struct platform_driver snd_aica_driver = {
.remove = snd_aica_remove,
.driver = {
.name = SND_AICA_DRIVER,
- .owner = THIS_MODULE,
},
};
diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
index d1fb74dabbd1..abf9c0cab1e2 100644
--- a/sound/sh/sh_dac_audio.c
+++ b/sound/sh/sh_dac_audio.c
@@ -436,7 +436,6 @@ static struct platform_driver sh_dac_driver = {
.remove = snd_sh_dac_remove,
.driver = {
.name = "dac_audio",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index d88edfced8c4..865e090c8061 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,10 +1,14 @@
snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
-snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o
+snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
snd-soc-core-objs += soc-generic-dmaengine-pcm.o
endif
+ifneq ($(CONFIG_SND_SOC_AC97_BUS),)
+snd-soc-core-objs += soc-ac97.o
+endif
+
obj-$(CONFIG_SND_SOC) += snd-soc-core.o
obj-$(CONFIG_SND_SOC) += codecs/
obj-$(CONFIG_SND_SOC) += generic/
diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c
index 6058c1fd5070..7752860f7230 100644
--- a/sound/soc/adi/axi-i2s.c
+++ b/sound/soc/adi/axi-i2s.c
@@ -263,7 +263,6 @@ MODULE_DEVICE_TABLE(of, axi_i2s_of_match);
static struct platform_driver axi_i2s_driver = {
.driver = {
.name = "axi-i2s",
- .owner = THIS_MODULE,
.of_match_table = axi_i2s_of_match,
},
.probe = axi_i2s_probe,
diff --git a/sound/soc/adi/axi-spdif.c b/sound/soc/adi/axi-spdif.c
index 198e3a4640f6..d7259d412892 100644
--- a/sound/soc/adi/axi-spdif.c
+++ b/sound/soc/adi/axi-spdif.c
@@ -258,7 +258,6 @@ MODULE_DEVICE_TABLE(of, axi_spdif_of_match);
static struct platform_driver axi_spdif_driver = {
.driver = {
.name = "axi-spdif",
- .owner = THIS_MODULE,
.of_match_table = axi_spdif_of_match,
},
.probe = axi_spdif_probe,
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 27e3fc4a536b..fb3878312bf8 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -52,12 +52,3 @@ config SND_AT91_SOC_SAM9X5_WM8731
help
Say Y if you want to add support for audio SoC on an
at91sam9x5 based board that is using WM8731 codec.
-
-config SND_AT91_SOC_AFEB9260
- tristate "SoC Audio support for AFEB9260 board"
- depends on ARCH_AT91 && ATMEL_SSC && ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
- select SND_ATMEL_SOC_PDC
- select SND_ATMEL_SOC_SSC
- select SND_SOC_TLV320AIC23_I2C
- help
- Say Y here to support sound on AFEB9260 board.
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index 5baabc8bde3a..466a821da98c 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -17,4 +17,3 @@ snd-soc-sam9x5-wm8731-objs := sam9x5_wm8731.o
obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o
obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o
-obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
index b79a2a864513..33fb3bb133df 100644
--- a/sound/soc/atmel/atmel-pcm-dma.c
+++ b/sound/soc/atmel/atmel-pcm-dma.c
@@ -80,9 +80,7 @@ static void atmel_pcm_dma_irq(u32 ssc_sr,
/* stop RX and capture: will be enabled again at restart */
ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_disable);
- snd_pcm_stream_lock(substream);
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stop_xrun(substream);
/* now drain RHR and read status to remove xrun condition */
ssc_readx(prtd->ssc->regs, SSC_RHR);
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index f403f399808a..b1cc2a4a7fc0 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -310,7 +310,10 @@ static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
* transmit and receive, so if a value has already
* been set, it must match this value.
*/
- if (ssc_p->cmr_div == 0)
+ if (ssc_p->dir_mask !=
+ (SSC_DIR_MASK_PLAYBACK | SSC_DIR_MASK_CAPTURE))
+ ssc_p->cmr_div = div;
+ else if (ssc_p->cmr_div == 0)
ssc_p->cmr_div = div;
else
if (div != ssc_p->cmr_div)
diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c
index 4052268ce462..aa354e1c6ff7 100644
--- a/sound/soc/atmel/atmel_wm8904.c
+++ b/sound/soc/atmel/atmel_wm8904.c
@@ -181,7 +181,6 @@ static const struct of_device_id atmel_asoc_wm8904_dt_ids[] = {
static struct platform_driver atmel_asoc_wm8904_driver = {
.driver = {
.name = "atmel-wm8904-audio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_asoc_wm8904_dt_ids),
},
.probe = atmel_asoc_wm8904_probe,
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index bb1149126c54..66b66d0e7514 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -309,7 +309,6 @@ MODULE_DEVICE_TABLE(of, at91sam9g20ek_wm8731_dt_ids);
static struct platform_driver at91sam9g20ek_audio_driver = {
.driver = {
.name = "at91sam9g20ek-audio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(at91sam9g20ek_wm8731_dt_ids),
},
.probe = at91sam9g20ek_audio_probe,
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 3188036a18f0..ccdf547f4d8c 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -192,7 +192,6 @@ MODULE_DEVICE_TABLE(of, sam9x5_wm8731_of_match);
static struct platform_driver sam9x5_wm8731_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sam9x5_wm8731_of_match),
},
.probe = sam9x5_wm8731_driver_probe,
diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c
deleted file mode 100644
index 9579799ace54..000000000000
--- a/sound/soc/atmel/snd-soc-afeb9260.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * afeb9260.c -- SoC audio for AFEB9260
- *
- * Copyright (C) 2009 Sergey Lapin <slapin@ossfans.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-#include <linux/atmel-ssc.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <linux/gpio.h>
-
-#include "../codecs/tlv320aic23.h"
-#include "atmel-pcm.h"
-#include "atmel_ssc_dai.h"
-
-#define CODEC_CLOCK 12000000
-
-static int afeb9260_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- int err;
-
- /* Set the codec system clock for DAC and ADC */
- err =
- snd_soc_dai_set_sysclk(codec_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_IN);
-
- if (err < 0) {
- printk(KERN_ERR "can't set codec system clock\n");
- return err;
- }
-
- return err;
-}
-
-static struct snd_soc_ops afeb9260_ops = {
- .hw_params = afeb9260_hw_params,
-};
-
-static const struct snd_soc_dapm_widget tlv320aic23_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_LINE("Line In", NULL),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
-};
-
-static const struct snd_soc_dapm_route afeb9260_audio_map[] = {
- {"Headphone Jack", NULL, "LHPOUT"},
- {"Headphone Jack", NULL, "RHPOUT"},
-
- {"LLINEIN", NULL, "Line In"},
- {"RLINEIN", NULL, "Line In"},
-
- {"MICIN", NULL, "Mic Jack"},
-};
-
-
-/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link afeb9260_dai = {
- .name = "TLV320AIC23",
- .stream_name = "AIC23",
- .cpu_dai_name = "atmel-ssc-dai.0",
- .codec_dai_name = "tlv320aic23-hifi",
- .platform_name = "atmel_pcm-audio",
- .codec_name = "tlv320aic23-codec.0-001a",
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
- SND_SOC_DAIFMT_CBM_CFM,
- .ops = &afeb9260_ops,
-};
-
-/* Audio machine driver */
-static struct snd_soc_card snd_soc_machine_afeb9260 = {
- .name = "AFEB9260",
- .owner = THIS_MODULE,
- .dai_link = &afeb9260_dai,
- .num_links = 1,
-
- .dapm_widgets = tlv320aic23_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets),
- .dapm_routes = afeb9260_audio_map,
- .num_dapm_routes = ARRAY_SIZE(afeb9260_audio_map),
-};
-
-static struct platform_device *afeb9260_snd_device;
-
-static int __init afeb9260_soc_init(void)
-{
- int err;
- struct device *dev;
-
- if (!(machine_is_afeb9260()))
- return -ENODEV;
-
-
- afeb9260_snd_device = platform_device_alloc("soc-audio", -1);
- if (!afeb9260_snd_device) {
- printk(KERN_ERR "ASoC: Platform device allocation failed\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(afeb9260_snd_device, &snd_soc_machine_afeb9260);
- err = platform_device_add(afeb9260_snd_device);
- if (err)
- goto err1;
-
- dev = &afeb9260_snd_device->dev;
-
- return 0;
-err1:
- platform_device_put(afeb9260_snd_device);
- return err;
-}
-
-static void __exit afeb9260_soc_exit(void)
-{
- platform_device_unregister(afeb9260_snd_device);
-}
-
-module_init(afeb9260_soc_init);
-module_exit(afeb9260_soc_exit);
-
-MODULE_AUTHOR("Sergey Lapin <slapin@ossfans.org>");
-MODULE_DESCRIPTION("ALSA SoC for AFEB9260");
-MODULE_LICENSE("GPL");
-
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index c8a2de103c5f..29a97d52e8ad 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -205,7 +205,7 @@ static int au1xac97c_dai_probe(struct snd_soc_dai *dai)
static struct snd_soc_dai_driver au1xac97c_dai_driver = {
.name = "alchemy-ac97c",
- .ac97_control = 1,
+ .bus_control = true,
.probe = au1xac97c_dai_probe,
.playback = {
.rates = AC97_RATES,
@@ -334,7 +334,6 @@ static const struct dev_pm_ops au1xpscac97_pmops = {
static struct platform_driver au1xac97c_driver = {
.driver = {
.name = "alchemy-ac97c",
- .owner = THIS_MODULE,
.pm = AU1XPSCAC97_PMOPS,
},
.probe = au1xac97c_drvprobe,
diff --git a/sound/soc/au1x/db1000.c b/sound/soc/au1x/db1000.c
index 376d976bcc2d..452f404abfd2 100644
--- a/sound/soc/au1x/db1000.c
+++ b/sound/soc/au1x/db1000.c
@@ -51,7 +51,6 @@ static int db1000_audio_remove(struct platform_device *pdev)
static struct platform_driver db1000_audio_driver = {
.driver = {
.name = "db1000-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = db1000_audio_probe,
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index decba87a074c..a747ac0b399f 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -200,7 +200,6 @@ static int db1200_audio_remove(struct platform_device *pdev)
static struct platform_driver db1200_audio_driver = {
.driver = {
.name = "db1200-ac97",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.id_table = db1200_pids,
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index 17a24d804734..b06b8d8128c6 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -363,7 +363,6 @@ static int au1xpsc_pcm_drvremove(struct platform_device *pdev)
static struct platform_driver au1xpsc_pcm_driver = {
.driver = {
.name = "au1xpsc-pcm",
- .owner = THIS_MODULE,
},
.probe = au1xpsc_pcm_drvprobe,
.remove = au1xpsc_pcm_drvremove,
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index e920b60bf6c2..6ffaaff469c7 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -331,7 +331,6 @@ static int alchemy_pcm_drvremove(struct platform_device *pdev)
static struct platform_driver alchemy_pcmdma_driver = {
.driver = {
.name = "alchemy-pcm-dma",
- .owner = THIS_MODULE,
},
.probe = alchemy_pcm_drvprobe,
.remove = alchemy_pcm_drvremove,
diff --git a/sound/soc/au1x/i2sc.c b/sound/soc/au1x/i2sc.c
index b3f37f6edbcb..450c842c776c 100644
--- a/sound/soc/au1x/i2sc.c
+++ b/sound/soc/au1x/i2sc.c
@@ -310,7 +310,6 @@ static const struct dev_pm_ops au1xi2sc_pmops = {
static struct platform_driver au1xi2s_driver = {
.driver = {
.name = "alchemy-i2sc",
- .owner = THIS_MODULE,
.pm = AU1XI2SC_PMOPS,
},
.probe = au1xi2s_drvprobe,
diff --git a/sound/soc/au1x/psc-ac97.c b/sound/soc/au1x/psc-ac97.c
index 84f31e1f9d24..bb53c7059005 100644
--- a/sound/soc/au1x/psc-ac97.c
+++ b/sound/soc/au1x/psc-ac97.c
@@ -343,7 +343,7 @@ static const struct snd_soc_dai_ops au1xpsc_ac97_dai_ops = {
};
static const struct snd_soc_dai_driver au1xpsc_ac97_dai_template = {
- .ac97_control = 1,
+ .bus_control = true,
.probe = au1xpsc_ac97_probe,
.playback = {
.rates = AC97_RATES,
@@ -490,7 +490,6 @@ static struct dev_pm_ops au1xpscac97_pmops = {
static struct platform_driver au1xpsc_ac97_driver = {
.driver = {
.name = "au1xpsc_ac97",
- .owner = THIS_MODULE,
.pm = AU1XPSCAC97_PMOPS,
},
.probe = au1xpsc_ac97_drvprobe,
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index 814beffc56f2..e742ef668496 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -419,7 +419,6 @@ static struct dev_pm_ops au1xpsci2s_pmops = {
static struct platform_driver au1xpsc_i2s_driver = {
.driver = {
.name = "au1xpsc_i2s",
- .owner = THIS_MODULE,
.pm = AU1XPSCI2S_PMOPS,
},
.probe = au1xpsc_i2s_drvprobe,
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 2685fe4f8427..03fa1cbf8ec1 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -866,7 +866,6 @@ static struct platform_driver bcm2835_i2s_driver = {
.probe = bcm2835_i2s_probe,
.driver = {
.name = "bcm2835-i2s",
- .owner = THIS_MODULE,
.of_match_table = bcm2835_i2s_of_match,
},
};
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index cdb8ee75ded9..238913e030e0 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -462,7 +462,6 @@ static int bf5xx_soc_platform_remove(struct platform_device *pdev)
static struct platform_driver bf5xx_pcm_driver = {
.driver = {
.name = "bfin-ac97-pcm-audio",
- .owner = THIS_MODULE,
},
.probe = bf5xx_soc_platform_probe,
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index e82eb373a731..a040cfe29fc0 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -260,7 +260,7 @@ static int bf5xx_ac97_resume(struct snd_soc_dai *dai)
#endif
static struct snd_soc_dai_driver bfin_ac97_dai = {
- .ac97_control = 1,
+ .bus_control = true,
.suspend = bf5xx_ac97_suspend,
.resume = bf5xx_ac97_resume,
.playback = {
@@ -375,7 +375,6 @@ static int asoc_bfin_ac97_remove(struct platform_device *pdev)
static struct platform_driver asoc_bfin_ac97_driver = {
.driver = {
.name = "bfin-ac97",
- .owner = THIS_MODULE,
},
.probe = asoc_bfin_ac97_probe,
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
index 8fcfc4ec3a51..5bf1501e5e3c 100644
--- a/sound/soc/blackfin/bf5xx-ad1836.c
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -104,7 +104,6 @@ static int bf5xx_ad1836_driver_remove(struct platform_device *pdev)
static struct platform_driver bf5xx_ad1836_driver = {
.driver = {
.name = "bfin-snd-ad1836",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bf5xx_ad1836_driver_probe,
diff --git a/sound/soc/blackfin/bf5xx-ad1980.c b/sound/soc/blackfin/bf5xx-ad1980.c
index 3450e8f9080d..0fa81a523b8a 100644
--- a/sound/soc/blackfin/bf5xx-ad1980.c
+++ b/sound/soc/blackfin/bf5xx-ad1980.c
@@ -46,8 +46,6 @@
#include <linux/gpio.h>
#include <asm/portmux.h>
-#include "../codecs/ad1980.h"
-
#include "bf5xx-ac97.h"
static struct snd_soc_card bf5xx_board;
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index bcf591373a7a..d95477afcc67 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -354,7 +354,6 @@ static int bfin_i2s_soc_platform_remove(struct platform_device *pdev)
static struct platform_driver bfin_i2s_pcm_driver = {
.driver = {
.name = "bfin-i2s-pcm-audio",
- .owner = THIS_MODULE,
},
.probe = bfin_i2s_soc_platform_probe,
diff --git a/sound/soc/blackfin/bf5xx-i2s.c b/sound/soc/blackfin/bf5xx-i2s.c
index 39d774839b3e..b69aeef6418e 100644
--- a/sound/soc/blackfin/bf5xx-i2s.c
+++ b/sound/soc/blackfin/bf5xx-i2s.c
@@ -379,7 +379,6 @@ static struct platform_driver bfin_i2s_driver = {
.remove = bf5xx_i2s_remove,
.driver = {
.name = "bfin-i2s",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/blackfin/bf6xx-i2s.c b/sound/soc/blackfin/bf6xx-i2s.c
index 5810a0603f2f..bd3b4d464145 100644
--- a/sound/soc/blackfin/bf6xx-i2s.c
+++ b/sound/soc/blackfin/bf6xx-i2s.c
@@ -229,7 +229,6 @@ static struct platform_driver bfin_i2s_driver = {
.remove = bfin_i2s_remove,
.driver = {
.name = "bfin-i2s",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/blackfin/bfin-eval-adau1373.c b/sound/soc/blackfin/bfin-eval-adau1373.c
index 4ef9683bcad8..523baf5820d7 100644
--- a/sound/soc/blackfin/bfin-eval-adau1373.c
+++ b/sound/soc/blackfin/bfin-eval-adau1373.c
@@ -169,7 +169,6 @@ static int bfin_eval_adau1373_remove(struct platform_device *pdev)
static struct platform_driver bfin_eval_adau1373_driver = {
.driver = {
.name = "bfin-eval-adau1373",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adau1373_probe,
diff --git a/sound/soc/blackfin/bfin-eval-adau1701.c b/sound/soc/blackfin/bfin-eval-adau1701.c
index 3b55081a96c0..f9e926dfd4ef 100644
--- a/sound/soc/blackfin/bfin-eval-adau1701.c
+++ b/sound/soc/blackfin/bfin-eval-adau1701.c
@@ -109,7 +109,6 @@ static int bfin_eval_adau1701_remove(struct platform_device *pdev)
static struct platform_driver bfin_eval_adau1701_driver = {
.driver = {
.name = "bfin-eval-adau1701",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adau1701_probe,
diff --git a/sound/soc/blackfin/bfin-eval-adau1x61.c b/sound/soc/blackfin/bfin-eval-adau1x61.c
index 3011906f9d3b..4229f76daec9 100644
--- a/sound/soc/blackfin/bfin-eval-adau1x61.c
+++ b/sound/soc/blackfin/bfin-eval-adau1x61.c
@@ -129,7 +129,6 @@ static int bfin_eval_adau1x61_probe(struct platform_device *pdev)
static struct platform_driver bfin_eval_adau1x61_driver = {
.driver = {
.name = "bfin-eval-adau1x61",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adau1x61_probe,
diff --git a/sound/soc/blackfin/bfin-eval-adau1x81.c b/sound/soc/blackfin/bfin-eval-adau1x81.c
index 5c380f6aed1a..3e01cbe53fc7 100644
--- a/sound/soc/blackfin/bfin-eval-adau1x81.c
+++ b/sound/soc/blackfin/bfin-eval-adau1x81.c
@@ -117,7 +117,6 @@ static int bfin_eval_adau1x81_probe(struct platform_device *pdev)
static struct platform_driver bfin_eval_adau1x81_driver = {
.driver = {
.name = "bfin-eval-adau1x81",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adau1x81_probe,
diff --git a/sound/soc/blackfin/bfin-eval-adav80x.c b/sound/soc/blackfin/bfin-eval-adav80x.c
index 3b1b61a44815..27eee66afdb2 100644
--- a/sound/soc/blackfin/bfin-eval-adav80x.c
+++ b/sound/soc/blackfin/bfin-eval-adav80x.c
@@ -141,7 +141,6 @@ MODULE_DEVICE_TABLE(platform, bfin_eval_adav80x_ids);
static struct platform_driver bfin_eval_adav80x_driver = {
.driver = {
.name = "bfin-eval-adav80x",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adav80x_probe,
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 5477c5475923..7b7fbcd49e5e 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -36,7 +36,8 @@ config SND_EP93XX_SOC_EDB93XX
tristate "SoC Audio support for Cirrus Logic EDB93xx boards"
depends on SND_EP93XX_SOC && (MACH_EDB9301 || MACH_EDB9302 || MACH_EDB9302A || MACH_EDB9307A || MACH_EDB9315A)
select SND_EP93XX_SOC_I2S
- select SND_SOC_CS4271
+ select SND_SOC_CS4271_I2C if I2C
+ select SND_SOC_CS4271_SPI if SPI_MASTER
help
Say Y or M here if you want to add support for I2S audio on the
Cirrus Logic EDB93xx boards.
diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
index 4f900efc437c..85962657aabe 100644
--- a/sound/soc/cirrus/edb93xx.c
+++ b/sound/soc/cirrus/edb93xx.c
@@ -113,7 +113,6 @@ static int edb93xx_remove(struct platform_device *pdev)
static struct platform_driver edb93xx_driver = {
.driver = {
.name = "edb93xx-audio",
- .owner = THIS_MODULE,
},
.probe = edb93xx_probe,
.remove = edb93xx_remove,
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index f30dadf85b99..bbf7a9266a99 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -338,7 +338,7 @@ static const struct snd_soc_dai_ops ep93xx_ac97_dai_ops = {
static struct snd_soc_dai_driver ep93xx_ac97_dai = {
.name = "ep93xx-ac97",
.id = 0,
- .ac97_control = 1,
+ .bus_control = true,
.probe = ep93xx_ac97_dai_probe,
.playback = {
.stream_name = "AC97 Playback",
@@ -439,7 +439,6 @@ static struct platform_driver ep93xx_ac97_driver = {
.remove = ep93xx_ac97_remove,
.driver = {
.name = "ep93xx-ac97",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index 943145f9d1b6..934f8aefdd90 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -451,7 +451,6 @@ static struct platform_driver ep93xx_i2s_driver = {
.remove = ep93xx_i2s_remove,
.driver = {
.name = "ep93xx-i2s",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/cirrus/simone.c b/sound/soc/cirrus/simone.c
index 822a19a89e74..1ec661834e5a 100644
--- a/sound/soc/cirrus/simone.c
+++ b/sound/soc/cirrus/simone.c
@@ -74,7 +74,6 @@ static int simone_remove(struct platform_device *pdev)
static struct platform_driver simone_driver = {
.driver = {
.name = "simone-audio",
- .owner = THIS_MODULE,
},
.probe = simone_probe,
.remove = simone_remove,
diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
index 5b68b106cfc2..98089df08df6 100644
--- a/sound/soc/cirrus/snappercl15.c
+++ b/sound/soc/cirrus/snappercl15.c
@@ -123,7 +123,6 @@ static int snappercl15_remove(struct platform_device *pdev)
static struct platform_driver snappercl15_driver = {
.driver = {
.name = "snappercl15-audio",
- .owner = THIS_MODULE,
},
.probe = snappercl15_probe,
.remove = snappercl15_remove,
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index e88a6b67f781..a2bf27f4baab 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1423,7 +1423,6 @@ static int pm860x_codec_remove(struct platform_device *pdev)
static struct platform_driver pm860x_codec_driver = {
.driver = {
.name = "88pm860x-codec",
- .owner = THIS_MODULE,
},
.probe = pm860x_codec_probe,
.remove = pm860x_codec_remove,
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index a68d1731a8fd..883c5778b309 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -50,7 +50,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CS42L73 if I2C
select SND_SOC_CS4265 if I2C
select SND_SOC_CS4270 if I2C
- select SND_SOC_CS4271 if SND_SOC_I2C_AND_SPI
+ select SND_SOC_CS4271_I2C if I2C
+ select SND_SOC_CS4271_SPI if SPI_MASTER
select SND_SOC_CS42XX8_I2C if I2C
select SND_SOC_CX20442 if TTY
select SND_SOC_DA7210 if I2C
@@ -85,7 +86,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_RT5645 if I2C
select SND_SOC_RT5651 if I2C
select SND_SOC_RT5670 if I2C
- select SND_SOC_RT5677 if I2C
+ select SND_SOC_RT5677 if I2C && SPI_MASTER
select SND_SOC_SGTL5000 if I2C
select SND_SOC_SI476X if MFD_SI476X_CORE
select SND_SOC_SIRF_AUDIO_CODEC
@@ -101,6 +102,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
select SND_SOC_TAS2552 if I2C
select SND_SOC_TAS5086 if I2C
+ select SND_SOC_TFA9879 if I2C
select SND_SOC_TLV320AIC23_I2C if I2C
select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
select SND_SOC_TLV320AIC26 if SPI_MASTER
@@ -109,6 +111,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TLV320AIC3X if I2C
select SND_SOC_TPA6130A2 if I2C
select SND_SOC_TLV320DAC33 if I2C
+ select SND_SOC_TS3A227E if I2C
select SND_SOC_TWL4030 if TWL4030_CORE
select SND_SOC_TWL6040 if TWL6040_CORE
select SND_SOC_UDA134X
@@ -223,6 +226,7 @@ config SND_SOC_AD193X_I2C
select SND_SOC_AD193X
config SND_SOC_AD1980
+ select REGMAP_AC97
tristate
config SND_SOC_AD73311
@@ -336,7 +340,8 @@ config SND_SOC_CS42L51
tristate
config SND_SOC_CS42L51_I2C
- tristate
+ tristate "Cirrus Logic CS42L51 CODEC (I2C)"
+ depends on I2C
select SND_SOC_CS42L51
config SND_SOC_CS42L52
@@ -370,8 +375,19 @@ config SND_SOC_CS4270_VD33_ERRATA
depends on SND_SOC_CS4270
config SND_SOC_CS4271
- tristate "Cirrus Logic CS4271 CODEC"
- depends on SND_SOC_I2C_AND_SPI
+ tristate
+
+config SND_SOC_CS4271_I2C
+ tristate "Cirrus Logic CS4271 CODEC (I2C)"
+ depends on I2C
+ select SND_SOC_CS4271
+ select REGMAP_I2C
+
+config SND_SOC_CS4271_SPI
+ tristate "Cirrus Logic CS4271 CODEC (SPI)"
+ depends on SPI_MASTER
+ select SND_SOC_CS4271
+ select REGMAP_SPI
config SND_SOC_CS42XX8
tristate
@@ -487,7 +503,8 @@ config SND_SOC_RT286
depends on I2C
config SND_SOC_RT5631
- tristate
+ tristate "Realtek ALC5631/RT5631 CODEC"
+ depends on I2C
config SND_SOC_RT5640
tristate
@@ -504,6 +521,10 @@ config SND_SOC_RT5670
config SND_SOC_RT5677
tristate
+config SND_SOC_RT5677_SPI
+ tristate
+ default SND_SOC_RT5677
+
#Freescale sgtl5000 codec
config SND_SOC_SGTL5000
tristate "Freescale SGTL5000 CODEC"
@@ -577,15 +598,21 @@ config SND_SOC_TAS5086
tristate "Texas Instruments TAS5086 speaker amplifier"
depends on I2C
+config SND_SOC_TFA9879
+ tristate "NXP Semiconductors TFA9879 amplifier"
+ depends on I2C
+
config SND_SOC_TLV320AIC23
tristate
config SND_SOC_TLV320AIC23_I2C
- tristate
+ tristate "Texas Instruments TLV320AIC23 audio CODEC - I2C"
+ depends on I2C
select SND_SOC_TLV320AIC23
config SND_SOC_TLV320AIC23_SPI
- tristate
+ tristate "Texas Instruments TLV320AIC23 audio CODEC - SPI"
+ depends on SPI_MASTER
select SND_SOC_TLV320AIC23
config SND_SOC_TLV320AIC26
@@ -607,6 +634,10 @@ config SND_SOC_TLV320AIC3X
config SND_SOC_TLV320DAC33
tristate
+config SND_SOC_TS3A227E
+ tristate "TI Headset/Mic detect and keypress chip"
+ depends on I2C
+
config SND_SOC_TWL4030
select MFD_TWL4030_AUDIO
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 5dce451661e4..bbdfd1e1c182 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -41,6 +41,8 @@ snd-soc-cs42l73-objs := cs42l73.o
snd-soc-cs4265-objs := cs4265.o
snd-soc-cs4270-objs := cs4270.o
snd-soc-cs4271-objs := cs4271.o
+snd-soc-cs4271-i2c-objs := cs4271-i2c.o
+snd-soc-cs4271-spi-objs := cs4271-spi.o
snd-soc-cs42xx8-objs := cs42xx8.o
snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
snd-soc-cx20442-objs := cx20442.o
@@ -80,6 +82,7 @@ snd-soc-rt5645-objs := rt5645.o
snd-soc-rt5651-objs := rt5651.o
snd-soc-rt5670-objs := rt5670.o
snd-soc-rt5677-objs := rt5677.o
+snd-soc-rt5677-spi-objs := rt5677-spi.o
snd-soc-sgtl5000-objs := sgtl5000.o
snd-soc-alc5623-objs := alc5623.o
snd-soc-alc5632-objs := alc5632.o
@@ -101,6 +104,7 @@ snd-soc-sta350-objs := sta350.o
snd-soc-sta529-objs := sta529.o
snd-soc-stac9766-objs := stac9766.o
snd-soc-tas5086-objs := tas5086.o
+snd-soc-tfa9879-objs := tfa9879.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
snd-soc-tlv320aic23-spi-objs := tlv320aic23-spi.o
@@ -109,6 +113,7 @@ snd-soc-tlv320aic31xx-objs := tlv320aic31xx.o
snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o
snd-soc-tlv320aic3x-objs := tlv320aic3x.o
snd-soc-tlv320dac33-objs := tlv320dac33.o
+snd-soc-ts3a227e-objs := ts3a227e.o
snd-soc-twl4030-objs := twl4030.o
snd-soc-twl6040-objs := twl6040.o
snd-soc-uda134x-objs := uda134x.o
@@ -217,6 +222,8 @@ obj-$(CONFIG_SND_SOC_CS42L73) += snd-soc-cs42l73.o
obj-$(CONFIG_SND_SOC_CS4265) += snd-soc-cs4265.o
obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
obj-$(CONFIG_SND_SOC_CS4271) += snd-soc-cs4271.o
+obj-$(CONFIG_SND_SOC_CS4271_I2C) += snd-soc-cs4271-i2c.o
+obj-$(CONFIG_SND_SOC_CS4271_SPI) += snd-soc-cs4271-spi.o
obj-$(CONFIG_SND_SOC_CS42XX8) += snd-soc-cs42xx8.o
obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
@@ -256,6 +263,7 @@ obj-$(CONFIG_SND_SOC_RT5645) += snd-soc-rt5645.o
obj-$(CONFIG_SND_SOC_RT5651) += snd-soc-rt5651.o
obj-$(CONFIG_SND_SOC_RT5670) += snd-soc-rt5670.o
obj-$(CONFIG_SND_SOC_RT5677) += snd-soc-rt5677.o
+obj-$(CONFIG_SND_SOC_RT5677_SPI) += snd-soc-rt5677-spi.o
obj-$(CONFIG_SND_SOC_SGTL5000) += snd-soc-sgtl5000.o
obj-$(CONFIG_SND_SOC_SIGMADSP) += snd-soc-sigmadsp.o
obj-$(CONFIG_SND_SOC_SIGMADSP_I2C) += snd-soc-sigmadsp-i2c.o
@@ -274,6 +282,7 @@ obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o
obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o
obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
+obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_SPI) += snd-soc-tlv320aic23-spi.o
@@ -282,6 +291,7 @@ obj-$(CONFIG_SND_SOC_TLV320AIC31XX) += snd-soc-tlv320aic31xx.o
obj-$(CONFIG_SND_SOC_TLV320AIC32X4) += snd-soc-tlv320aic32x4.o
obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
+obj-$(CONFIG_SND_SOC_TS3A227E) += snd-soc-ts3a227e.o
obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
obj-$(CONFIG_SND_SOC_UDA134X) += snd-soc-uda134x.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index fd43827bb856..7895689588da 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -126,13 +126,13 @@ struct ab8500_codec_drvdata_dbg {
/* Private data for AB8500 device-driver */
struct ab8500_codec_drvdata {
struct regmap *regmap;
+ struct mutex ctrl_lock;
/* Sidetone */
long *sid_fir_values;
enum sid_state sid_status;
/* ANC */
- struct mutex anc_lock;
long *anc_fir_values;
long *anc_iir_values;
enum anc_state anc_status;
@@ -1129,9 +1129,9 @@ static int sid_status_control_get(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
- mutex_lock(&codec->mutex);
+ mutex_lock(&drvdata->ctrl_lock);
ucontrol->value.integer.value[0] = drvdata->sid_status;
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&drvdata->ctrl_lock);
return 0;
}
@@ -1154,7 +1154,7 @@ static int sid_status_control_put(struct snd_kcontrol *kcontrol,
return -EIO;
}
- mutex_lock(&codec->mutex);
+ mutex_lock(&drvdata->ctrl_lock);
sidconf = snd_soc_read(codec, AB8500_SIDFIRCONF);
if (((sidconf & BIT(AB8500_SIDFIRCONF_FIRSIDBUSY)) != 0)) {
@@ -1185,7 +1185,7 @@ static int sid_status_control_put(struct snd_kcontrol *kcontrol,
drvdata->sid_status = SID_FIR_CONFIGURED;
out:
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&drvdata->ctrl_lock);
dev_dbg(codec->dev, "%s: Exit\n", __func__);
@@ -1198,9 +1198,9 @@ static int anc_status_control_get(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
- mutex_lock(&codec->mutex);
+ mutex_lock(&drvdata->ctrl_lock);
ucontrol->value.integer.value[0] = drvdata->anc_status;
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&drvdata->ctrl_lock);
return 0;
}
@@ -1217,7 +1217,7 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
dev_dbg(dev, "%s: Enter.\n", __func__);
- mutex_lock(&drvdata->anc_lock);
+ mutex_lock(&drvdata->ctrl_lock);
req = ucontrol->value.integer.value[0];
if (req >= ARRAY_SIZE(enum_anc_state)) {
@@ -1244,9 +1244,7 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
}
snd_soc_dapm_sync(&codec->dapm);
- mutex_lock(&codec->mutex);
anc_configure(codec, apply_fir, apply_iir);
- mutex_unlock(&codec->mutex);
if (apply_fir) {
if (drvdata->anc_status == ANC_IIR_CONFIGURED)
@@ -1265,7 +1263,7 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
snd_soc_dapm_sync(&codec->dapm);
cleanup:
- mutex_unlock(&drvdata->anc_lock);
+ mutex_unlock(&drvdata->ctrl_lock);
if (status < 0)
dev_err(dev, "%s: Unable to configure ANC! (status = %d)\n",
@@ -1294,14 +1292,15 @@ static int filter_control_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct ab8500_codec_drvdata *drvdata = snd_soc_codec_get_drvdata(codec);
struct filter_control *fc =
(struct filter_control *)kcontrol->private_value;
unsigned int i;
- mutex_lock(&codec->mutex);
+ mutex_lock(&drvdata->ctrl_lock);
for (i = 0; i < fc->count; i++)
ucontrol->value.integer.value[i] = fc->value[i];
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&drvdata->ctrl_lock);
return 0;
}
@@ -1310,14 +1309,15 @@ static int filter_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct ab8500_codec_drvdata *drvdata = snd_soc_codec_get_drvdata(codec);
struct filter_control *fc =
(struct filter_control *)kcontrol->private_value;
unsigned int i;
- mutex_lock(&codec->mutex);
+ mutex_lock(&drvdata->ctrl_lock);
for (i = 0; i < fc->count; i++)
fc->value[i] = ucontrol->value.integer.value[i];
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&drvdata->ctrl_lock);
return 0;
}
@@ -2545,7 +2545,7 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
(void)snd_soc_dapm_disable_pin(&codec->dapm, "ANC Configure Input");
- mutex_init(&drvdata->anc_lock);
+ mutex_init(&drvdata->ctrl_lock);
return status;
}
@@ -2609,7 +2609,6 @@ static int ab8500_codec_driver_remove(struct platform_device *pdev)
static struct platform_driver ab8500_codec_platform_driver = {
.driver = {
.name = "ab8500-codec",
- .owner = THIS_MODULE,
},
.probe = ab8500_codec_driver_probe,
.remove = ab8500_codec_driver_remove,
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index bd9b1839c8b0..d0ac723eee32 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -37,10 +37,11 @@ static int ac97_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
int reg = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
AC97_PCM_FRONT_DAC_RATE : AC97_PCM_LR_ADC_RATE;
- return snd_ac97_set_rate(codec->ac97, reg, substream->runtime->rate);
+ return snd_ac97_set_rate(ac97, reg, substream->runtime->rate);
}
#define STD_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
@@ -53,7 +54,6 @@ static const struct snd_soc_dai_ops ac97_dai_ops = {
static struct snd_soc_dai_driver ac97_dai = {
.name = "ac97-hifi",
- .ac97_control = 1,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 1,
@@ -71,6 +71,7 @@ static struct snd_soc_dai_driver ac97_dai = {
static int ac97_soc_probe(struct snd_soc_codec *codec)
{
+ struct snd_ac97 *ac97;
struct snd_ac97_bus *ac97_bus;
struct snd_ac97_template ac97_template;
int ret;
@@ -82,24 +83,31 @@ static int ac97_soc_probe(struct snd_soc_codec *codec)
return ret;
memset(&ac97_template, 0, sizeof(struct snd_ac97_template));
- ret = snd_ac97_mixer(ac97_bus, &ac97_template, &codec->ac97);
+ ret = snd_ac97_mixer(ac97_bus, &ac97_template, &ac97);
if (ret < 0)
return ret;
+ snd_soc_codec_set_drvdata(codec, ac97);
+
return 0;
}
#ifdef CONFIG_PM
static int ac97_soc_suspend(struct snd_soc_codec *codec)
{
- snd_ac97_suspend(codec->ac97);
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+
+ snd_ac97_suspend(ac97);
return 0;
}
static int ac97_soc_resume(struct snd_soc_codec *codec)
{
- snd_ac97_resume(codec->ac97);
+
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+
+ snd_ac97_resume(ac97);
return 0;
}
@@ -134,7 +142,6 @@ static int ac97_remove(struct platform_device *pdev)
static struct platform_driver ac97_codec_driver = {
.driver = {
.name = "ac97-codec",
- .owner = THIS_MODULE,
},
.probe = ac97_probe,
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index 6844d0b2af68..387530b0b0fd 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -72,11 +72,13 @@ static const struct snd_kcontrol_new ad193x_snd_controls[] = {
};
static const struct snd_soc_dapm_widget ad193x_dapm_widgets[] = {
- SND_SOC_DAPM_DAC("DAC", "Playback", AD193X_DAC_CTRL0, 0, 1),
+ SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_PGA("DAC Output", AD193X_DAC_CTRL0, 0, 1, NULL, 0),
SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_SUPPLY("PLL_PWR", AD193X_PLL_CLK_CTRL0, 0, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("ADC_PWR", AD193X_ADC_CTRL0, 0, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("SYSCLK", AD193X_PLL_CLK_CTRL0, 7, 0, NULL, 0),
+ SND_SOC_DAPM_VMID("VMID"),
SND_SOC_DAPM_OUTPUT("DAC1OUT"),
SND_SOC_DAPM_OUTPUT("DAC2OUT"),
SND_SOC_DAPM_OUTPUT("DAC3OUT"),
@@ -87,13 +89,15 @@ static const struct snd_soc_dapm_widget ad193x_dapm_widgets[] = {
static const struct snd_soc_dapm_route audio_paths[] = {
{ "DAC", NULL, "SYSCLK" },
+ { "DAC Output", NULL, "DAC" },
+ { "DAC Output", NULL, "VMID" },
{ "ADC", NULL, "SYSCLK" },
{ "DAC", NULL, "ADC_PWR" },
{ "ADC", NULL, "ADC_PWR" },
- { "DAC1OUT", NULL, "DAC" },
- { "DAC2OUT", NULL, "DAC" },
- { "DAC3OUT", NULL, "DAC" },
- { "DAC4OUT", NULL, "DAC" },
+ { "DAC1OUT", NULL, "DAC Output" },
+ { "DAC2OUT", NULL, "DAC Output" },
+ { "DAC3OUT", NULL, "DAC Output" },
+ { "DAC4OUT", NULL, "DAC Output" },
{ "ADC", NULL, "ADC1IN" },
{ "ADC", NULL, "ADC2IN" },
{ "SYSCLK", NULL, "PLL_PWR" },
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 304d3003339a..3cc69a626454 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -24,34 +24,86 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
#include <sound/soc.h>
-#include "ad1980.h"
+static const struct reg_default ad1980_reg_defaults[] = {
+ { 0x02, 0x8000 },
+ { 0x04, 0x8000 },
+ { 0x06, 0x8000 },
+ { 0x0c, 0x8008 },
+ { 0x0e, 0x8008 },
+ { 0x10, 0x8808 },
+ { 0x12, 0x8808 },
+ { 0x16, 0x8808 },
+ { 0x18, 0x8808 },
+ { 0x1a, 0x0000 },
+ { 0x1c, 0x8000 },
+ { 0x20, 0x0000 },
+ { 0x28, 0x03c7 },
+ { 0x2c, 0xbb80 },
+ { 0x2e, 0xbb80 },
+ { 0x30, 0xbb80 },
+ { 0x32, 0xbb80 },
+ { 0x36, 0x8080 },
+ { 0x38, 0x8080 },
+ { 0x3a, 0x2000 },
+ { 0x60, 0x0000 },
+ { 0x62, 0x0000 },
+ { 0x72, 0x0000 },
+ { 0x74, 0x1001 },
+ { 0x76, 0x0000 },
+};
-/*
- * AD1980 register cache
- */
-static const u16 ad1980_reg[] = {
- 0x0090, 0x8000, 0x8000, 0x8000, /* 0 - 6 */
- 0x0000, 0x0000, 0x8008, 0x8008, /* 8 - e */
- 0x8808, 0x8808, 0x0000, 0x8808, /* 10 - 16 */
- 0x8808, 0x0000, 0x8000, 0x0000, /* 18 - 1e */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 20 - 26 */
- 0x03c7, 0x0000, 0xbb80, 0xbb80, /* 28 - 2e */
- 0xbb80, 0xbb80, 0x0000, 0x8080, /* 30 - 36 */
- 0x8080, 0x2000, 0x0000, 0x0000, /* 38 - 3e */
- 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */
- 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */
- 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */
- 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */
- 0x8080, 0x0000, 0x0000, 0x0000, /* 60 - 66 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */
- 0x0000, 0x0000, 0x1001, 0x0000, /* 70 - 76 */
- 0x0000, 0x0000, 0x4144, 0x5370 /* 78 - 7e */
+static bool ad1980_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_RESET ... AC97_MASTER_MONO:
+ case AC97_PHONE ... AC97_CD:
+ case AC97_AUX ... AC97_GENERAL_PURPOSE:
+ case AC97_POWERDOWN ... AC97_PCM_LR_ADC_RATE:
+ case AC97_SPDIF:
+ case AC97_CODEC_CLASS_REV:
+ case AC97_PCI_SVID:
+ case AC97_AD_CODEC_CFG:
+ case AC97_AD_JACK_SPDIF:
+ case AC97_AD_SERIAL_CFG:
+ case AC97_VENDOR_ID1:
+ case AC97_VENDOR_ID2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ad1980_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_VENDOR_ID1:
+ case AC97_VENDOR_ID2:
+ return false;
+ default:
+ return ad1980_readable_reg(dev, reg);
+ }
+}
+
+static const struct regmap_config ad1980_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = regmap_ac97_default_volatile,
+ .readable_reg = ad1980_readable_reg,
+ .writeable_reg = ad1980_writeable_reg,
+
+ .reg_defaults = ad1980_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(ad1980_reg_defaults),
};
static const char *ad1980_rec_sel[] = {"Mic", "CD", "NC", "AUX", "Line",
@@ -134,45 +186,8 @@ static const struct snd_soc_dapm_route ad1980_dapm_routes[] = {
{ "HP_OUT_R", NULL, "Playback" },
};
-static unsigned int ac97_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
-
- switch (reg) {
- case AC97_RESET:
- case AC97_INT_PAGING:
- case AC97_POWERDOWN:
- case AC97_EXTENDED_STATUS:
- case AC97_VENDOR_ID1:
- case AC97_VENDOR_ID2:
- return soc_ac97_ops->read(codec->ac97, reg);
- default:
- reg = reg >> 1;
-
- if (reg >= ARRAY_SIZE(ad1980_reg))
- return -EINVAL;
-
- return cache[reg];
- }
-}
-
-static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- u16 *cache = codec->reg_cache;
-
- soc_ac97_ops->write(codec->ac97, reg, val);
- reg = reg >> 1;
- if (reg < ARRAY_SIZE(ad1980_reg))
- cache[reg] = val;
-
- return 0;
-}
-
static struct snd_soc_dai_driver ad1980_dai = {
.name = "ad1980-hifi",
- .ac97_control = 1,
.playback = {
.stream_name = "Playback",
.channels_min = 2,
@@ -189,108 +204,115 @@ static struct snd_soc_dai_driver ad1980_dai = {
static int ad1980_reset(struct snd_soc_codec *codec, int try_warm)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
unsigned int retry_cnt = 0;
do {
if (try_warm && soc_ac97_ops->warm_reset) {
- soc_ac97_ops->warm_reset(codec->ac97);
- if (ac97_read(codec, AC97_RESET) == 0x0090)
+ soc_ac97_ops->warm_reset(ac97);
+ if (snd_soc_read(codec, AC97_RESET) == 0x0090)
return 1;
}
- soc_ac97_ops->reset(codec->ac97);
+ soc_ac97_ops->reset(ac97);
/*
* Set bit 16slot in register 74h, then every slot will has only
* 16 bits. This command is sent out in 20bit mode, in which
* case the first nibble of data is eaten by the addr. (Tag is
* always 16 bit)
*/
- ac97_write(codec, AC97_AD_SERIAL_CFG, 0x9900);
+ snd_soc_write(codec, AC97_AD_SERIAL_CFG, 0x9900);
- if (ac97_read(codec, AC97_RESET) == 0x0090)
+ if (snd_soc_read(codec, AC97_RESET) == 0x0090)
return 0;
} while (retry_cnt++ < 10);
- printk(KERN_ERR "AD1980 AC97 reset failed\n");
+ dev_err(codec->dev, "Failed to reset: AC97 link error\n");
+
return -EIO;
}
static int ad1980_soc_probe(struct snd_soc_codec *codec)
{
+ struct snd_ac97 *ac97;
+ struct regmap *regmap;
int ret;
u16 vendor_id2;
u16 ext_status;
- printk(KERN_INFO "AD1980 SoC Audio Codec\n");
-
- ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0);
- if (ret < 0) {
- printk(KERN_ERR "ad1980: failed to register AC97 codec\n");
+ ac97 = snd_soc_new_ac97_codec(codec);
+ if (IS_ERR(ac97)) {
+ ret = PTR_ERR(ac97);
+ dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
return ret;
}
+ regmap = regmap_init_ac97(ac97, &ad1980_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_ac97;
+ }
+
+ snd_soc_codec_init_regmap(codec, regmap);
+ snd_soc_codec_set_drvdata(codec, ac97);
+
ret = ad1980_reset(codec, 0);
- if (ret < 0) {
- printk(KERN_ERR "Failed to reset AD1980: AC97 link error\n");
+ if (ret < 0)
goto reset_err;
- }
/* Read out vendor ID to make sure it is ad1980 */
- if (ac97_read(codec, AC97_VENDOR_ID1) != 0x4144) {
+ if (snd_soc_read(codec, AC97_VENDOR_ID1) != 0x4144) {
ret = -ENODEV;
goto reset_err;
}
- vendor_id2 = ac97_read(codec, AC97_VENDOR_ID2);
+ vendor_id2 = snd_soc_read(codec, AC97_VENDOR_ID2);
if (vendor_id2 != 0x5370) {
if (vendor_id2 != 0x5374) {
ret = -ENODEV;
goto reset_err;
} else {
- printk(KERN_WARNING "ad1980: "
- "Found AD1981 - only 2/2 IN/OUT Channels "
- "supported\n");
+ dev_warn(codec->dev,
+ "Found AD1981 - only 2/2 IN/OUT Channels supported\n");
}
}
/* unmute captures and playbacks volume */
- ac97_write(codec, AC97_MASTER, 0x0000);
- ac97_write(codec, AC97_PCM, 0x0000);
- ac97_write(codec, AC97_REC_GAIN, 0x0000);
- ac97_write(codec, AC97_CENTER_LFE_MASTER, 0x0000);
- ac97_write(codec, AC97_SURROUND_MASTER, 0x0000);
+ snd_soc_write(codec, AC97_MASTER, 0x0000);
+ snd_soc_write(codec, AC97_PCM, 0x0000);
+ snd_soc_write(codec, AC97_REC_GAIN, 0x0000);
+ snd_soc_write(codec, AC97_CENTER_LFE_MASTER, 0x0000);
+ snd_soc_write(codec, AC97_SURROUND_MASTER, 0x0000);
/*power on LFE/CENTER/Surround DACs*/
- ext_status = ac97_read(codec, AC97_EXTENDED_STATUS);
- ac97_write(codec, AC97_EXTENDED_STATUS, ext_status&~0x3800);
-
- snd_soc_add_codec_controls(codec, ad1980_snd_ac97_controls,
- ARRAY_SIZE(ad1980_snd_ac97_controls));
+ ext_status = snd_soc_read(codec, AC97_EXTENDED_STATUS);
+ snd_soc_write(codec, AC97_EXTENDED_STATUS, ext_status&~0x3800);
return 0;
reset_err:
- snd_soc_free_ac97_codec(codec);
+ snd_soc_codec_exit_regmap(codec);
+err_free_ac97:
+ snd_soc_free_ac97_codec(ac97);
return ret;
}
static int ad1980_soc_remove(struct snd_soc_codec *codec)
{
- snd_soc_free_ac97_codec(codec);
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+
+ snd_soc_codec_exit_regmap(codec);
+ snd_soc_free_ac97_codec(ac97);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_ad1980 = {
.probe = ad1980_soc_probe,
.remove = ad1980_soc_remove,
- .reg_cache_size = ARRAY_SIZE(ad1980_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_default = ad1980_reg,
- .reg_cache_step = 2,
- .write = ac97_write,
- .read = ac97_read,
+ .controls = ad1980_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(ad1980_snd_ac97_controls),
.dapm_widgets = ad1980_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(ad1980_dapm_widgets),
.dapm_routes = ad1980_dapm_routes,
@@ -312,7 +334,6 @@ static int ad1980_remove(struct platform_device *pdev)
static struct platform_driver ad1980_codec_driver = {
.driver = {
.name = "ad1980",
- .owner = THIS_MODULE,
},
.probe = ad1980_probe,
diff --git a/sound/soc/codecs/ad1980.h b/sound/soc/codecs/ad1980.h
deleted file mode 100644
index eb0af44ad3df..000000000000
--- a/sound/soc/codecs/ad1980.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * ad1980.h -- ad1980 Soc Audio driver
- *
- * WARNING:
- *
- * Because Analog Devices Inc. discontinued the ad1980 sound chip since
- * Sep. 2009, this ad1980 driver is not maintained, tested and supported
- * by ADI now.
- */
-
-#ifndef _AD1980_H
-#define _AD1980_H
-/* Bit definition of Power-Down Control/Status Register */
-#define ADC 0x0001
-#define DAC 0x0002
-#define ANL 0x0004
-#define REF 0x0008
-#define PR0 0x0100
-#define PR1 0x0200
-#define PR2 0x0400
-#define PR3 0x0800
-#define PR4 0x1000
-#define PR5 0x2000
-#define PR6 0x4000
-
-#endif
diff --git a/sound/soc/codecs/ad73311.c b/sound/soc/codecs/ad73311.c
index 5fac8adbc136..a9400aef60b5 100644
--- a/sound/soc/codecs/ad73311.c
+++ b/sound/soc/codecs/ad73311.c
@@ -76,7 +76,6 @@ static int ad73311_remove(struct platform_device *pdev)
static struct platform_driver ad73311_codec_driver = {
.driver = {
.name = "ad73311",
- .owner = THIS_MODULE,
},
.probe = ad73311_probe,
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 7c784ad3e8b2..783dcb57043a 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -551,7 +551,7 @@ static const struct snd_kcontrol_new adau1373_drc_controls[] = {
static int adau1373_pll_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
unsigned int pll_id = w->name[3] - '1';
unsigned int val;
@@ -823,7 +823,7 @@ static const struct snd_soc_dapm_widget adau1373_dapm_widgets[] = {
static int adau1373_check_aif_clk(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- struct snd_soc_codec *codec = source->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
unsigned int dai;
const char *clk;
@@ -844,7 +844,7 @@ static int adau1373_check_aif_clk(struct snd_soc_dapm_widget *source,
static int adau1373_check_src(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- struct snd_soc_codec *codec = source->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
unsigned int dai;
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index 370b742117ef..d4e219b6b98f 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -22,9 +22,14 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <asm/unaligned.h>
+
#include "sigmadsp.h"
#include "adau1701.h"
+#define ADAU1701_SAFELOAD_DATA(i) (0x0810 + (i))
+#define ADAU1701_SAFELOAD_ADDR(i) (0x0815 + (i))
+
#define ADAU1701_DSPCTRL 0x081c
#define ADAU1701_SEROCTL 0x081e
#define ADAU1701_SERICTL 0x081f
@@ -42,6 +47,7 @@
#define ADAU1701_DSPCTRL_CR (1 << 2)
#define ADAU1701_DSPCTRL_DAM (1 << 3)
#define ADAU1701_DSPCTRL_ADM (1 << 4)
+#define ADAU1701_DSPCTRL_IST (1 << 5)
#define ADAU1701_DSPCTRL_SR_48 0x00
#define ADAU1701_DSPCTRL_SR_96 0x01
#define ADAU1701_DSPCTRL_SR_192 0x02
@@ -102,7 +108,10 @@ struct adau1701 {
unsigned int pll_clkdiv;
unsigned int sysclk;
struct regmap *regmap;
+ struct i2c_client *client;
u8 pin_config[12];
+
+ struct sigmadsp *sigmadsp;
};
static const struct snd_kcontrol_new adau1701_controls[] = {
@@ -159,6 +168,7 @@ static bool adau1701_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case ADAU1701_DACSET:
+ case ADAU1701_DSPCTRL:
return true;
default:
return false;
@@ -238,12 +248,58 @@ static int adau1701_reg_read(void *context, unsigned int reg,
return 0;
}
-static int adau1701_reset(struct snd_soc_codec *codec, unsigned int clkdiv)
+static int adau1701_safeload(struct sigmadsp *sigmadsp, unsigned int addr,
+ const uint8_t bytes[], size_t len)
+{
+ struct i2c_client *client = to_i2c_client(sigmadsp->dev);
+ struct adau1701 *adau1701 = i2c_get_clientdata(client);
+ unsigned int val;
+ unsigned int i;
+ uint8_t buf[10];
+ int ret;
+
+ ret = regmap_read(adau1701->regmap, ADAU1701_DSPCTRL, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADAU1701_DSPCTRL_IST)
+ msleep(50);
+
+ for (i = 0; i < len / 4; i++) {
+ put_unaligned_le16(ADAU1701_SAFELOAD_DATA(i), buf);
+ buf[2] = 0x00;
+ memcpy(buf + 3, bytes + i * 4, 4);
+ ret = i2c_master_send(client, buf, 7);
+ if (ret < 0)
+ return ret;
+ else if (ret != 7)
+ return -EIO;
+
+ put_unaligned_le16(ADAU1701_SAFELOAD_ADDR(i), buf);
+ put_unaligned_le16(addr + i, buf + 2);
+ ret = i2c_master_send(client, buf, 4);
+ if (ret < 0)
+ return ret;
+ else if (ret != 4)
+ return -EIO;
+ }
+
+ return regmap_update_bits(adau1701->regmap, ADAU1701_DSPCTRL,
+ ADAU1701_DSPCTRL_IST, ADAU1701_DSPCTRL_IST);
+}
+
+static const struct sigmadsp_ops adau1701_sigmadsp_ops = {
+ .safeload = adau1701_safeload,
+};
+
+static int adau1701_reset(struct snd_soc_codec *codec, unsigned int clkdiv,
+ unsigned int rate)
{
struct adau1701 *adau1701 = snd_soc_codec_get_drvdata(codec);
- struct i2c_client *client = to_i2c_client(codec->dev);
int ret;
+ sigmadsp_reset(adau1701->sigmadsp);
+
if (clkdiv != ADAU1707_CLKDIV_UNSET &&
gpio_is_valid(adau1701->gpio_pll_mode[0]) &&
gpio_is_valid(adau1701->gpio_pll_mode[1])) {
@@ -284,7 +340,7 @@ static int adau1701_reset(struct snd_soc_codec *codec, unsigned int clkdiv)
* know the correct PLL setup
*/
if (clkdiv != ADAU1707_CLKDIV_UNSET) {
- ret = process_sigma_firmware(client, ADAU1701_FIRMWARE);
+ ret = sigmadsp_setup(adau1701->sigmadsp, rate);
if (ret) {
dev_warn(codec->dev, "Failed to load firmware\n");
return ret;
@@ -385,7 +441,7 @@ static int adau1701_hw_params(struct snd_pcm_substream *substream,
* firmware upload.
*/
if (clkdiv != adau1701->pll_clkdiv) {
- ret = adau1701_reset(codec, clkdiv);
+ ret = adau1701_reset(codec, clkdiv, params_rate(params));
if (ret < 0)
return ret;
}
@@ -554,6 +610,14 @@ static int adau1701_set_sysclk(struct snd_soc_codec *codec, int clk_id,
return 0;
}
+static int adau1701_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct adau1701 *adau1701 = snd_soc_codec_get_drvdata(dai->codec);
+
+ return sigmadsp_restrict_params(adau1701->sigmadsp, substream);
+}
+
#define ADAU1701_RATES (SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | \
SNDRV_PCM_RATE_192000)
@@ -564,6 +628,7 @@ static const struct snd_soc_dai_ops adau1701_dai_ops = {
.set_fmt = adau1701_set_dai_fmt,
.hw_params = adau1701_hw_params,
.digital_mute = adau1701_digital_mute,
+ .startup = adau1701_startup,
};
static struct snd_soc_dai_driver adau1701_dai = {
@@ -600,6 +665,10 @@ static int adau1701_probe(struct snd_soc_codec *codec)
unsigned int val;
struct adau1701 *adau1701 = snd_soc_codec_get_drvdata(codec);
+ ret = sigmadsp_attach(adau1701->sigmadsp, &codec->component);
+ if (ret)
+ return ret;
+
/*
* Let the pll_clkdiv variable default to something that won't happen
* at runtime. That way, we can postpone the firmware download from
@@ -609,7 +678,7 @@ static int adau1701_probe(struct snd_soc_codec *codec)
adau1701->pll_clkdiv = ADAU1707_CLKDIV_UNSET;
/* initalize with pre-configured pll mode settings */
- ret = adau1701_reset(codec, adau1701->pll_clkdiv);
+ ret = adau1701_reset(codec, adau1701->pll_clkdiv, 0);
if (ret < 0)
return ret;
@@ -667,6 +736,7 @@ static int adau1701_i2c_probe(struct i2c_client *client,
if (!adau1701)
return -ENOMEM;
+ adau1701->client = client;
adau1701->regmap = devm_regmap_init(dev, NULL, client,
&adau1701_regmap);
if (IS_ERR(adau1701->regmap))
@@ -722,6 +792,12 @@ static int adau1701_i2c_probe(struct i2c_client *client,
adau1701->gpio_pll_mode[1] = gpio_pll_mode[1];
i2c_set_clientdata(client, adau1701);
+
+ adau1701->sigmadsp = devm_sigmadsp_init_i2c(client,
+ &adau1701_sigmadsp_ops, ADAU1701_FIRMWARE);
+ if (IS_ERR(adau1701->sigmadsp))
+ return PTR_ERR(adau1701->sigmadsp);
+
ret = snd_soc_register_codec(&client->dev, &adau1701_codec_drv,
&adau1701_dai, 1);
return ret;
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index 91f60282fd2f..a1baeee160f4 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -255,7 +255,8 @@ static const struct snd_kcontrol_new adau1761_input_mux_control =
static int adau1761_dejitter_fixup(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct adau *adau = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct adau *adau = snd_soc_codec_get_drvdata(codec);
/* After any power changes have been made the dejitter circuit
* has to be reinitialized. */
@@ -702,11 +703,6 @@ static int adau1761_codec_probe(struct snd_soc_codec *codec)
ARRAY_SIZE(adau1761_dapm_routes));
if (ret)
return ret;
-
- ret = adau17x1_load_firmware(adau, codec->dev,
- ADAU1761_FIRMWARE);
- if (ret)
- dev_warn(codec->dev, "Failed to firmware\n");
}
ret = adau17x1_add_routes(codec);
@@ -775,16 +771,20 @@ int adau1761_probe(struct device *dev, struct regmap *regmap,
enum adau17x1_type type, void (*switch_mode)(struct device *dev))
{
struct snd_soc_dai_driver *dai_drv;
+ const char *firmware_name;
int ret;
- ret = adau17x1_probe(dev, regmap, type, switch_mode);
- if (ret)
- return ret;
-
- if (type == ADAU1361)
+ if (type == ADAU1361) {
dai_drv = &adau1361_dai_driver;
- else
+ firmware_name = NULL;
+ } else {
dai_drv = &adau1761_dai_driver;
+ firmware_name = ADAU1761_FIRMWARE;
+ }
+
+ ret = adau17x1_probe(dev, regmap, type, switch_mode, firmware_name);
+ if (ret)
+ return ret;
return snd_soc_register_codec(dev, &adau1761_codec_driver, dai_drv, 1);
}
@@ -798,6 +798,7 @@ const struct regmap_config adau1761_regmap_config = {
.num_reg_defaults = ARRAY_SIZE(adau1761_reg_defaults),
.readable_reg = adau1761_readable_register,
.volatile_reg = adau17x1_volatile_register,
+ .precious_reg = adau17x1_precious_register,
.cache_type = REGCACHE_RBTREE,
};
EXPORT_SYMBOL_GPL(adau1761_regmap_config);
diff --git a/sound/soc/codecs/adau1781.c b/sound/soc/codecs/adau1781.c
index e9fc00fb13dd..35581f43fa6d 100644
--- a/sound/soc/codecs/adau1781.c
+++ b/sound/soc/codecs/adau1781.c
@@ -174,7 +174,7 @@ static const struct snd_kcontrol_new adau1781_mono_mixer_controls[] = {
static int adau1781_dejitter_fixup(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct adau *adau = snd_soc_codec_get_drvdata(codec);
/* After any power changes have been made the dejitter circuit
@@ -385,7 +385,6 @@ static int adau1781_codec_probe(struct snd_soc_codec *codec)
{
struct adau1781_platform_data *pdata = dev_get_platdata(codec->dev);
struct adau *adau = snd_soc_codec_get_drvdata(codec);
- const char *firmware;
int ret;
ret = adau17x1_add_widgets(codec);
@@ -422,25 +421,10 @@ static int adau1781_codec_probe(struct snd_soc_codec *codec)
return ret;
}
- switch (adau->type) {
- case ADAU1381:
- firmware = ADAU1381_FIRMWARE;
- break;
- case ADAU1781:
- firmware = ADAU1781_FIRMWARE;
- break;
- default:
- return -EINVAL;
- }
-
ret = adau17x1_add_routes(codec);
if (ret < 0)
return ret;
- ret = adau17x1_load_firmware(adau, codec->dev, firmware);
- if (ret)
- dev_warn(codec->dev, "Failed to load firmware\n");
-
return 0;
}
@@ -488,6 +472,7 @@ const struct regmap_config adau1781_regmap_config = {
.num_reg_defaults = ARRAY_SIZE(adau1781_reg_defaults),
.readable_reg = adau1781_readable_register,
.volatile_reg = adau17x1_volatile_register,
+ .precious_reg = adau17x1_precious_register,
.cache_type = REGCACHE_RBTREE,
};
EXPORT_SYMBOL_GPL(adau1781_regmap_config);
@@ -495,9 +480,21 @@ EXPORT_SYMBOL_GPL(adau1781_regmap_config);
int adau1781_probe(struct device *dev, struct regmap *regmap,
enum adau17x1_type type, void (*switch_mode)(struct device *dev))
{
+ const char *firmware_name;
int ret;
- ret = adau17x1_probe(dev, regmap, type, switch_mode);
+ switch (type) {
+ case ADAU1381:
+ firmware_name = ADAU1381_FIRMWARE;
+ break;
+ case ADAU1781:
+ firmware_name = ADAU1781_FIRMWARE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = adau17x1_probe(dev, regmap, type, switch_mode, firmware_name);
if (ret)
return ret;
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 3e16c1c64115..fa2e690e51c8 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -61,7 +61,8 @@ static const struct snd_kcontrol_new adau17x1_controls[] = {
static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct adau *adau = snd_soc_codec_get_drvdata(w->codec);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct adau *adau = snd_soc_codec_get_drvdata(codec);
int ret;
if (SND_SOC_DAPM_EVENT_ON(event)) {
@@ -307,6 +308,7 @@ static int adau17x1_hw_params(struct snd_pcm_substream *substream,
struct adau *adau = snd_soc_codec_get_drvdata(codec);
unsigned int val, div, dsp_div;
unsigned int freq;
+ int ret;
if (adau->clk_src == ADAU17X1_CLK_SRC_PLL)
freq = adau->pll_freq;
@@ -356,6 +358,12 @@ static int adau17x1_hw_params(struct snd_pcm_substream *substream,
regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dsp_div);
}
+ if (adau->sigmadsp) {
+ ret = adau17x1_setup_firmware(adau, params_rate(params));
+ if (ret < 0)
+ return ret;
+ }
+
if (adau->dai_fmt != SND_SOC_DAIFMT_RIGHT_J)
return 0;
@@ -661,12 +669,24 @@ static int adau17x1_set_dai_tdm_slot(struct snd_soc_dai *dai,
return 0;
}
+static int adau17x1_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct adau *adau = snd_soc_codec_get_drvdata(dai->codec);
+
+ if (adau->sigmadsp)
+ return sigmadsp_restrict_params(adau->sigmadsp, substream);
+
+ return 0;
+}
+
const struct snd_soc_dai_ops adau17x1_dai_ops = {
.hw_params = adau17x1_hw_params,
.set_sysclk = adau17x1_set_dai_sysclk,
.set_fmt = adau17x1_set_dai_fmt,
.set_pll = adau17x1_set_dai_pll,
.set_tdm_slot = adau17x1_set_dai_tdm_slot,
+ .startup = adau17x1_startup,
};
EXPORT_SYMBOL_GPL(adau17x1_dai_ops);
@@ -687,8 +707,22 @@ int adau17x1_set_micbias_voltage(struct snd_soc_codec *codec,
}
EXPORT_SYMBOL_GPL(adau17x1_set_micbias_voltage);
+bool adau17x1_precious_register(struct device *dev, unsigned int reg)
+{
+ /* SigmaDSP parameter memory */
+ if (reg < 0x400)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(adau17x1_precious_register);
+
bool adau17x1_readable_register(struct device *dev, unsigned int reg)
{
+ /* SigmaDSP parameter memory */
+ if (reg < 0x400)
+ return true;
+
switch (reg) {
case ADAU17X1_CLOCK_CONTROL:
case ADAU17X1_PLL_CONTROL:
@@ -745,8 +779,7 @@ bool adau17x1_volatile_register(struct device *dev, unsigned int reg)
}
EXPORT_SYMBOL_GPL(adau17x1_volatile_register);
-int adau17x1_load_firmware(struct adau *adau, struct device *dev,
- const char *firmware)
+int adau17x1_setup_firmware(struct adau *adau, unsigned int rate)
{
int ret;
int dspsr;
@@ -758,7 +791,7 @@ int adau17x1_load_firmware(struct adau *adau, struct device *dev,
regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1);
regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf);
- ret = process_sigma_firmware_regmap(dev, adau->regmap, firmware);
+ ret = sigmadsp_setup(adau->sigmadsp, rate);
if (ret) {
regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0);
return ret;
@@ -767,7 +800,7 @@ int adau17x1_load_firmware(struct adau *adau, struct device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(adau17x1_load_firmware);
+EXPORT_SYMBOL_GPL(adau17x1_setup_firmware);
int adau17x1_add_widgets(struct snd_soc_codec *codec)
{
@@ -787,8 +820,21 @@ int adau17x1_add_widgets(struct snd_soc_codec *codec)
ret = snd_soc_dapm_new_controls(&codec->dapm,
adau17x1_dsp_dapm_widgets,
ARRAY_SIZE(adau17x1_dsp_dapm_widgets));
+ if (ret)
+ return ret;
+
+ if (!adau->sigmadsp)
+ return 0;
+
+ ret = sigmadsp_attach(adau->sigmadsp, &codec->component);
+ if (ret) {
+ dev_err(codec->dev, "Failed to attach firmware: %d\n",
+ ret);
+ return ret;
+ }
}
- return ret;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(adau17x1_add_widgets);
@@ -829,7 +875,8 @@ int adau17x1_resume(struct snd_soc_codec *codec)
EXPORT_SYMBOL_GPL(adau17x1_resume);
int adau17x1_probe(struct device *dev, struct regmap *regmap,
- enum adau17x1_type type, void (*switch_mode)(struct device *dev))
+ enum adau17x1_type type, void (*switch_mode)(struct device *dev),
+ const char *firmware_name)
{
struct adau *adau;
@@ -846,6 +893,16 @@ int adau17x1_probe(struct device *dev, struct regmap *regmap,
dev_set_drvdata(dev, adau);
+ if (firmware_name) {
+ adau->sigmadsp = devm_sigmadsp_init_regmap(dev, regmap, NULL,
+ firmware_name);
+ if (IS_ERR(adau->sigmadsp)) {
+ dev_warn(dev, "Could not find firmware file: %ld\n",
+ PTR_ERR(adau->sigmadsp));
+ adau->sigmadsp = NULL;
+ }
+ }
+
if (switch_mode)
switch_mode(dev);
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index e4a557fd7155..e13583e6ff56 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -4,6 +4,8 @@
#include <linux/regmap.h>
#include <linux/platform_data/adau17x1.h>
+#include "sigmadsp.h"
+
enum adau17x1_type {
ADAU1361,
ADAU1761,
@@ -42,22 +44,24 @@ struct adau {
bool dsp_bypass[2];
struct regmap *regmap;
+ struct sigmadsp *sigmadsp;
};
int adau17x1_add_widgets(struct snd_soc_codec *codec);
int adau17x1_add_routes(struct snd_soc_codec *codec);
int adau17x1_probe(struct device *dev, struct regmap *regmap,
- enum adau17x1_type type, void (*switch_mode)(struct device *dev));
+ enum adau17x1_type type, void (*switch_mode)(struct device *dev),
+ const char *firmware_name);
int adau17x1_set_micbias_voltage(struct snd_soc_codec *codec,
enum adau17x1_micbias_voltage micbias);
bool adau17x1_readable_register(struct device *dev, unsigned int reg);
bool adau17x1_volatile_register(struct device *dev, unsigned int reg);
+bool adau17x1_precious_register(struct device *dev, unsigned int reg);
int adau17x1_resume(struct snd_soc_codec *codec);
extern const struct snd_soc_dai_ops adau17x1_dai_ops;
-int adau17x1_load_firmware(struct adau *adau, struct device *dev,
- const char *firmware);
+int adau17x1_setup_firmware(struct adau *adau, unsigned int rate);
bool adau17x1_has_dsp(struct adau *adau);
#define ADAU17X1_CLOCK_CONTROL 0x4000
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index ce3cdca9fc62..b67480f1b1aa 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -212,7 +212,7 @@ static const struct snd_soc_dapm_widget adav80x_dapm_widgets[] = {
static int adav80x_dapm_sysclk_check(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- struct snd_soc_codec *codec = source->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
const char *clk;
@@ -236,7 +236,7 @@ static int adav80x_dapm_sysclk_check(struct snd_soc_dapm_widget *source,
static int adav80x_dapm_pll_check(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
- struct snd_soc_codec *codec = source->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
return adav80x->pll_src == ADAV80X_PLL_SRC_XTAL;
diff --git a/sound/soc/codecs/ads117x.c b/sound/soc/codecs/ads117x.c
index 8f388edff586..1222282e93c3 100644
--- a/sound/soc/codecs/ads117x.c
+++ b/sound/soc/codecs/ads117x.c
@@ -78,7 +78,6 @@ static int ads117x_remove(struct platform_device *pdev)
static struct platform_driver ads117x_codec_driver = {
.driver = {
.name = "ads117x-codec",
- .owner = THIS_MODULE,
},
.probe = ads117x_probe,
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index 30e297890fec..9130d916f2f4 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -373,33 +373,9 @@ static struct snd_soc_dai_driver ak4535_dai = {
.ops = &ak4535_dai_ops,
};
-static int ak4535_suspend(struct snd_soc_codec *codec)
-{
- ak4535_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int ak4535_resume(struct snd_soc_codec *codec)
{
snd_soc_cache_sync(codec);
- ak4535_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
-static int ak4535_probe(struct snd_soc_codec *codec)
-{
- /* power on device */
- ak4535_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- snd_soc_add_codec_controls(codec, ak4535_snd_controls,
- ARRAY_SIZE(ak4535_snd_controls));
- return 0;
-}
-
-/* power down chip */
-static int ak4535_remove(struct snd_soc_codec *codec)
-{
- ak4535_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -416,11 +392,12 @@ static const struct regmap_config ak4535_regmap = {
};
static struct snd_soc_codec_driver soc_codec_dev_ak4535 = {
- .probe = ak4535_probe,
- .remove = ak4535_remove,
- .suspend = ak4535_suspend,
.resume = ak4535_resume,
.set_bias_level = ak4535_set_bias_level,
+ .suspend_bias_off = true,
+
+ .controls = ak4535_snd_controls,
+ .num_controls = ARRAY_SIZE(ak4535_snd_controls),
.dapm_widgets = ak4535_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(ak4535_dapm_widgets),
.dapm_routes = ak4535_audio_map,
diff --git a/sound/soc/codecs/ak4554.c b/sound/soc/codecs/ak4554.c
index 79e9555766c0..16ce9f9fefa1 100644
--- a/sound/soc/codecs/ak4554.c
+++ b/sound/soc/codecs/ak4554.c
@@ -93,7 +93,6 @@ MODULE_DEVICE_TABLE(of, ak4554_of_match);
static struct platform_driver ak4554_driver = {
.driver = {
.name = "ak4554-adc-dac",
- .owner = THIS_MODULE,
.of_match_table = ak4554_of_match,
},
.probe = ak4554_soc_probe,
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 7afe8f482088..70861c7b1631 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -505,39 +505,7 @@ static struct snd_soc_dai_driver ak4641_dai[] = {
},
};
-static int ak4641_suspend(struct snd_soc_codec *codec)
-{
- ak4641_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int ak4641_resume(struct snd_soc_codec *codec)
-{
- ak4641_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
-static int ak4641_probe(struct snd_soc_codec *codec)
-{
- /* power on device */
- ak4641_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int ak4641_remove(struct snd_soc_codec *codec)
-{
- ak4641_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-
static struct snd_soc_codec_driver soc_codec_dev_ak4641 = {
- .probe = ak4641_probe,
- .remove = ak4641_remove,
- .suspend = ak4641_suspend,
- .resume = ak4641_resume,
.controls = ak4641_snd_controls,
.num_controls = ARRAY_SIZE(ak4641_snd_controls),
.dapm_widgets = ak4641_dapm_widgets,
@@ -545,6 +513,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4641 = {
.dapm_routes = ak4641_audio_map,
.num_dapm_routes = ARRAY_SIZE(ak4641_audio_map),
.set_bias_level = ak4641_set_bias_level,
+ .suspend_bias_off = true,
};
static const struct regmap_config ak4641_regmap = {
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 041712592e29..dde8b49c19ad 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -491,23 +491,7 @@ static int ak4642_resume(struct snd_soc_codec *codec)
return 0;
}
-
-static int ak4642_probe(struct snd_soc_codec *codec)
-{
- ak4642_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int ak4642_remove(struct snd_soc_codec *codec)
-{
- ak4642_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_ak4642 = {
- .probe = ak4642_probe,
- .remove = ak4642_remove,
.resume = ak4642_resume,
.set_bias_level = ak4642_set_bias_level,
.controls = ak4642_snd_controls,
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 998fa0c5a0b9..686cacb0e835 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -611,20 +611,7 @@ static struct snd_soc_dai_driver ak4671_dai = {
.ops = &ak4671_dai_ops,
};
-static int ak4671_probe(struct snd_soc_codec *codec)
-{
- return ak4671_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-}
-
-static int ak4671_remove(struct snd_soc_codec *codec)
-{
- ak4671_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_ak4671 = {
- .probe = ak4671_probe,
- .remove = ak4671_remove,
.set_bias_level = ak4671_set_bias_level,
.controls = ak4671_snd_controls,
.num_controls = ARRAY_SIZE(ak4671_snd_controls),
diff --git a/sound/soc/codecs/ak5386.c b/sound/soc/codecs/ak5386.c
index 8107a1cac876..afa95360826d 100644
--- a/sound/soc/codecs/ak5386.c
+++ b/sound/soc/codecs/ak5386.c
@@ -205,7 +205,6 @@ static struct platform_driver ak5386_driver = {
.remove = ak5386_remove,
.driver = {
.name = "ak5386",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ak5386_dt_ids),
},
};
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index 9d0755aa1d16..bdf8c5ac8ca4 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -866,7 +866,6 @@ static int alc5623_suspend(struct snd_soc_codec *codec)
{
struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
- alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
regcache_cache_only(alc5623->regmap, true);
return 0;
@@ -887,15 +886,6 @@ static int alc5623_resume(struct snd_soc_codec *codec)
return ret;
}
- alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- /* charge alc5623 caps */
- if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
- alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- codec->dapm.bias_level = SND_SOC_BIAS_ON;
- alc5623_set_bias_level(codec, codec->dapm.bias_level);
- }
-
return 0;
}
@@ -906,9 +896,6 @@ static int alc5623_probe(struct snd_soc_codec *codec)
alc5623_reset(codec);
- /* power on device */
- alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
if (alc5623->add_ctrl) {
snd_soc_write(codec, ALC5623_ADD_CTRL_REG,
alc5623->add_ctrl);
@@ -964,19 +951,12 @@ static int alc5623_probe(struct snd_soc_codec *codec)
return 0;
}
-/* power down chip */
-static int alc5623_remove(struct snd_soc_codec *codec)
-{
- alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_device_alc5623 = {
.probe = alc5623_probe,
- .remove = alc5623_remove,
.suspend = alc5623_suspend,
.resume = alc5623_resume,
.set_bias_level = alc5623_set_bias_level,
+ .suspend_bias_off = true,
};
static const struct regmap_config alc5623_regmap = {
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 85942ca36cbf..d1fdbc266631 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -1038,23 +1038,15 @@ static struct snd_soc_dai_driver alc5632_dai = {
};
#ifdef CONFIG_PM
-static int alc5632_suspend(struct snd_soc_codec *codec)
-{
- alc5632_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int alc5632_resume(struct snd_soc_codec *codec)
{
struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec);
regcache_sync(alc5632->regmap);
- alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
#else
-#define alc5632_suspend NULL
#define alc5632_resume NULL
#endif
@@ -1062,9 +1054,6 @@ static int alc5632_probe(struct snd_soc_codec *codec)
{
struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec);
- /* power on device */
- alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
switch (alc5632->id) {
case 0x5c:
snd_soc_add_codec_controls(codec, alc5632_vol_snd_controls,
@@ -1077,19 +1066,12 @@ static int alc5632_probe(struct snd_soc_codec *codec)
return 0;
}
-/* power down chip */
-static int alc5632_remove(struct snd_soc_codec *codec)
-{
- alc5632_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_device_alc5632 = {
.probe = alc5632_probe,
- .remove = alc5632_remove,
- .suspend = alc5632_suspend,
.resume = alc5632_resume,
.set_bias_level = alc5632_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = alc5632_snd_controls,
.num_controls = ARRAY_SIZE(alc5632_snd_controls),
.dapm_widgets = alc5632_dapm_widgets,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 0c05e7a7945f..9550d7433ad0 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -61,6 +61,11 @@
#define ARIZONA_FLL_MIN_OUTDIV 2
#define ARIZONA_FLL_MAX_OUTDIV 7
+#define ARIZONA_FMT_DSP_MODE_A 0
+#define ARIZONA_FMT_DSP_MODE_B 1
+#define ARIZONA_FMT_I2S_MODE 2
+#define ARIZONA_FMT_LEFT_JUSTIFIED_MODE 3
+
#define arizona_fll_err(_fll, fmt, ...) \
dev_err(_fll->arizona->dev, "FLL%d: " fmt, _fll->id, ##__VA_ARGS__)
#define arizona_fll_warn(_fll, fmt, ...) \
@@ -648,7 +653,7 @@ SOC_ENUM_SINGLE_DECL(arizona_in_hpf_cut_enum,
EXPORT_SYMBOL_GPL(arizona_in_hpf_cut_enum);
static const char * const arizona_in_dmic_osr_text[] = {
- "1.536MHz", "3.072MHz", "6.144MHz",
+ "1.536MHz", "3.072MHz", "6.144MHz", "768kHz",
};
const struct soc_enum arizona_in_dmic_osr[] = {
@@ -946,10 +951,26 @@ static int arizona_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
- mode = 0;
+ mode = ARIZONA_FMT_DSP_MODE_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK)
+ != SND_SOC_DAIFMT_CBM_CFM) {
+ arizona_aif_err(dai, "DSP_B not valid in slave mode\n");
+ return -EINVAL;
+ }
+ mode = ARIZONA_FMT_DSP_MODE_B;
break;
case SND_SOC_DAIFMT_I2S:
- mode = 2;
+ mode = ARIZONA_FMT_I2S_MODE;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK)
+ != SND_SOC_DAIFMT_CBM_CFM) {
+ arizona_aif_err(dai, "LEFT_J not valid in slave mode\n");
+ return -EINVAL;
+ }
+ mode = ARIZONA_FMT_LEFT_JUSTIFIED_MODE;
break;
default:
arizona_aif_err(dai, "Unsupported DAI format %d\n",
@@ -1164,13 +1185,13 @@ static void arizona_wm5102_set_dac_comp(struct snd_soc_codec *codec,
{ 0x80, 0x0 },
};
- mutex_lock(&codec->mutex);
+ mutex_lock(&arizona->dac_comp_lock);
dac_comp[1].def = arizona->dac_comp_coeff;
if (rate >= 176400)
dac_comp[2].def = arizona->dac_comp_enabled;
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&arizona->dac_comp_lock);
regmap_multi_reg_write(arizona->regmap,
dac_comp,
@@ -1298,7 +1319,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
/* Force multiple of 2 channels for I2S mode */
val = snd_soc_read(codec, base + ARIZONA_AIF_FORMAT);
- if ((channels & 1) && (val & ARIZONA_AIF1_FMT_MASK)) {
+ val &= ARIZONA_AIF1_FMT_MASK;
+ if ((channels & 1) && (val == ARIZONA_FMT_I2S_MODE)) {
arizona_aif_dbg(dai, "Forcing stereo mode\n");
bclk_target /= channels;
bclk_target *= channels + 1;
diff --git a/sound/soc/codecs/bt-sco.c b/sound/soc/codecs/bt-sco.c
index c4cf0699e77f..5075bf0a7276 100644
--- a/sound/soc/codecs/bt-sco.c
+++ b/sound/soc/codecs/bt-sco.c
@@ -77,7 +77,6 @@ MODULE_DEVICE_TABLE(platform, bt_sco_driver_ids);
static struct platform_driver bt_sco_driver = {
.driver = {
.name = "bt-sco",
- .owner = THIS_MODULE,
},
.probe = bt_sco_probe,
.remove = bt_sco_remove,
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 537327c7f7f1..d6dedd4eab29 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -62,14 +62,10 @@ static int cq93vc_mute(struct snd_soc_dai *dai, int mute)
static int cq93vc_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
- struct snd_soc_codec *codec = codec_dai->codec;
- struct davinci_vc *davinci_vc = codec->dev->platform_data;
-
switch (freq) {
case 22579200:
case 27000000:
case 33868800:
- davinci_vc->cq93vc.sysclk = freq;
return 0;
}
@@ -126,32 +122,6 @@ static struct snd_soc_dai_driver cq93vc_dai = {
.ops = &cq93vc_dai_ops,
};
-static int cq93vc_resume(struct snd_soc_codec *codec)
-{
- cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int cq93vc_probe(struct snd_soc_codec *codec)
-{
- struct davinci_vc *davinci_vc = codec->dev->platform_data;
-
- davinci_vc->cq93vc.codec = codec;
-
- /* Off, with power on */
- cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int cq93vc_remove(struct snd_soc_codec *codec)
-{
- cq93vc_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
static struct regmap *cq93vc_get_regmap(struct device *dev)
{
struct davinci_vc *davinci_vc = dev->platform_data;
@@ -161,9 +131,6 @@ static struct regmap *cq93vc_get_regmap(struct device *dev)
static struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
.set_bias_level = cq93vc_set_bias_level,
- .probe = cq93vc_probe,
- .remove = cq93vc_remove,
- .resume = cq93vc_resume,
.get_regmap = cq93vc_get_regmap,
.controls = cq93vc_snd_controls,
.num_controls = ARRAY_SIZE(cq93vc_snd_controls),
@@ -184,7 +151,6 @@ static int cq93vc_platform_remove(struct platform_device *pdev)
static struct platform_driver cq93vc_codec_driver = {
.driver = {
.name = "cq93vc-codec",
- .owner = THIS_MODULE,
},
.probe = cq93vc_platform_probe,
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 4fdd47d700e3..ce6086835ebd 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -32,7 +32,6 @@
#include "cs4265.h"
struct cs4265_private {
- struct device *dev;
struct regmap *regmap;
struct gpio_desc *reset_gpio;
u8 format;
@@ -598,7 +597,6 @@ static int cs4265_i2c_probe(struct i2c_client *i2c_client,
GFP_KERNEL);
if (cs4265 == NULL)
return -ENOMEM;
- cs4265->dev = &i2c_client->dev;
cs4265->regmap = devm_regmap_init_i2c(i2c_client, &cs4265_regmap);
if (IS_ERR(cs4265->regmap)) {
diff --git a/sound/soc/codecs/cs4271-i2c.c b/sound/soc/codecs/cs4271-i2c.c
new file mode 100644
index 000000000000..b264da030340
--- /dev/null
+++ b/sound/soc/codecs/cs4271-i2c.c
@@ -0,0 +1,62 @@
+/*
+ * CS4271 I2C audio driver
+ *
+ * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include "cs4271.h"
+
+static int cs4271_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap_config config;
+
+ config = cs4271_regmap_config;
+ config.reg_bits = 8;
+ config.val_bits = 8;
+
+ return cs4271_probe(&client->dev,
+ devm_regmap_init_i2c(client, &config));
+}
+
+static int cs4271_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id cs4271_i2c_id[] = {
+ { "cs4271", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cs4271_i2c_id);
+
+static struct i2c_driver cs4271_i2c_driver = {
+ .driver = {
+ .name = "cs4271",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cs4271_dt_ids),
+ },
+ .probe = cs4271_i2c_probe,
+ .remove = cs4271_i2c_remove,
+ .id_table = cs4271_i2c_id,
+};
+module_i2c_driver(cs4271_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS4271 I2C Driver");
+MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs4271-spi.c b/sound/soc/codecs/cs4271-spi.c
new file mode 100644
index 000000000000..acd49d86e706
--- /dev/null
+++ b/sound/soc/codecs/cs4271-spi.c
@@ -0,0 +1,55 @@
+/*
+ * CS4271 SPI audio driver
+ *
+ * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include "cs4271.h"
+
+static int cs4271_spi_probe(struct spi_device *spi)
+{
+ struct regmap_config config;
+
+ config = cs4271_regmap_config;
+ config.reg_bits = 16;
+ config.val_bits = 8;
+ config.read_flag_mask = 0x21;
+ config.write_flag_mask = 0x20;
+
+ return cs4271_probe(&spi->dev, devm_regmap_init_spi(spi, &config));
+}
+
+static int cs4271_spi_remove(struct spi_device *spi)
+{
+ snd_soc_unregister_codec(&spi->dev);
+ return 0;
+}
+
+static struct spi_driver cs4271_spi_driver = {
+ .driver = {
+ .name = "cs4271",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cs4271_dt_ids),
+ },
+ .probe = cs4271_spi_probe,
+ .remove = cs4271_spi_remove,
+};
+module_spi_driver(cs4271_spi_driver);
+
+MODULE_DESCRIPTION("ASoC CS4271 SPI Driver");
+MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 93cec52f4733..79a4efcb894c 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -23,8 +23,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/spi/spi.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -32,6 +30,7 @@
#include <sound/soc.h>
#include <sound/tlv.h>
#include <sound/cs4271.h>
+#include "cs4271.h"
#define CS4271_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
@@ -527,14 +526,15 @@ static int cs4271_soc_resume(struct snd_soc_codec *codec)
#endif /* CONFIG_PM */
#ifdef CONFIG_OF
-static const struct of_device_id cs4271_dt_ids[] = {
+const struct of_device_id cs4271_dt_ids[] = {
{ .compatible = "cirrus,cs4271", },
{ }
};
MODULE_DEVICE_TABLE(of, cs4271_dt_ids);
+EXPORT_SYMBOL_GPL(cs4271_dt_ids);
#endif
-static int cs4271_probe(struct snd_soc_codec *codec)
+static int cs4271_codec_probe(struct snd_soc_codec *codec)
{
struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
struct cs4271_platform_data *cs4271plat = codec->dev->platform_data;
@@ -587,7 +587,7 @@ static int cs4271_probe(struct snd_soc_codec *codec)
return 0;
}
-static int cs4271_remove(struct snd_soc_codec *codec)
+static int cs4271_codec_remove(struct snd_soc_codec *codec)
{
struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
@@ -599,8 +599,8 @@ static int cs4271_remove(struct snd_soc_codec *codec)
};
static struct snd_soc_codec_driver soc_codec_dev_cs4271 = {
- .probe = cs4271_probe,
- .remove = cs4271_remove,
+ .probe = cs4271_codec_probe,
+ .remove = cs4271_codec_remove,
.suspend = cs4271_soc_suspend,
.resume = cs4271_soc_resume,
@@ -642,14 +642,8 @@ static int cs4271_common_probe(struct device *dev,
return 0;
}
-#if defined(CONFIG_SPI_MASTER)
-
-static const struct regmap_config cs4271_spi_regmap = {
- .reg_bits = 16,
- .val_bits = 8,
+const struct regmap_config cs4271_regmap_config = {
.max_register = CS4271_LASTREG,
- .read_flag_mask = 0x21,
- .write_flag_mask = 0x20,
.reg_defaults = cs4271_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(cs4271_reg_defaults),
@@ -657,140 +651,27 @@ static const struct regmap_config cs4271_spi_regmap = {
.volatile_reg = cs4271_volatile_reg,
};
+EXPORT_SYMBOL_GPL(cs4271_regmap_config);
-static int cs4271_spi_probe(struct spi_device *spi)
+int cs4271_probe(struct device *dev, struct regmap *regmap)
{
struct cs4271_private *cs4271;
int ret;
- ret = cs4271_common_probe(&spi->dev, &cs4271);
- if (ret < 0)
- return ret;
-
- spi_set_drvdata(spi, cs4271);
- cs4271->regmap = devm_regmap_init_spi(spi, &cs4271_spi_regmap);
- if (IS_ERR(cs4271->regmap))
- return PTR_ERR(cs4271->regmap);
-
- return snd_soc_register_codec(&spi->dev, &soc_codec_dev_cs4271,
- &cs4271_dai, 1);
-}
-
-static int cs4271_spi_remove(struct spi_device *spi)
-{
- snd_soc_unregister_codec(&spi->dev);
- return 0;
-}
-
-static struct spi_driver cs4271_spi_driver = {
- .driver = {
- .name = "cs4271",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(cs4271_dt_ids),
- },
- .probe = cs4271_spi_probe,
- .remove = cs4271_spi_remove,
-};
-#endif /* defined(CONFIG_SPI_MASTER) */
-
-#if IS_ENABLED(CONFIG_I2C)
-static const struct i2c_device_id cs4271_i2c_id[] = {
- {"cs4271", 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, cs4271_i2c_id);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
-static const struct regmap_config cs4271_i2c_regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = CS4271_LASTREG,
-
- .reg_defaults = cs4271_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(cs4271_reg_defaults),
- .cache_type = REGCACHE_RBTREE,
-
- .volatile_reg = cs4271_volatile_reg,
-};
-
-static int cs4271_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct cs4271_private *cs4271;
- int ret;
-
- ret = cs4271_common_probe(&client->dev, &cs4271);
+ ret = cs4271_common_probe(dev, &cs4271);
if (ret < 0)
return ret;
- i2c_set_clientdata(client, cs4271);
- cs4271->regmap = devm_regmap_init_i2c(client, &cs4271_i2c_regmap);
- if (IS_ERR(cs4271->regmap))
- return PTR_ERR(cs4271->regmap);
+ dev_set_drvdata(dev, cs4271);
+ cs4271->regmap = regmap;
- return snd_soc_register_codec(&client->dev, &soc_codec_dev_cs4271,
- &cs4271_dai, 1);
-}
-
-static int cs4271_i2c_remove(struct i2c_client *client)
-{
- snd_soc_unregister_codec(&client->dev);
- return 0;
-}
-
-static struct i2c_driver cs4271_i2c_driver = {
- .driver = {
- .name = "cs4271",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(cs4271_dt_ids),
- },
- .id_table = cs4271_i2c_id,
- .probe = cs4271_i2c_probe,
- .remove = cs4271_i2c_remove,
-};
-#endif /* IS_ENABLED(CONFIG_I2C) */
-
-/*
- * We only register our serial bus driver here without
- * assignment to particular chip. So if any of the below
- * fails, there is some problem with I2C or SPI subsystem.
- * In most cases this module will be compiled with support
- * of only one serial bus.
- */
-static int __init cs4271_modinit(void)
-{
- int ret;
-
-#if IS_ENABLED(CONFIG_I2C)
- ret = i2c_add_driver(&cs4271_i2c_driver);
- if (ret) {
- pr_err("Failed to register CS4271 I2C driver: %d\n", ret);
- return ret;
- }
-#endif
-
-#if defined(CONFIG_SPI_MASTER)
- ret = spi_register_driver(&cs4271_spi_driver);
- if (ret) {
- pr_err("Failed to register CS4271 SPI driver: %d\n", ret);
- return ret;
- }
-#endif
-
- return 0;
-}
-module_init(cs4271_modinit);
-
-static void __exit cs4271_modexit(void)
-{
-#if defined(CONFIG_SPI_MASTER)
- spi_unregister_driver(&cs4271_spi_driver);
-#endif
-
-#if IS_ENABLED(CONFIG_I2C)
- i2c_del_driver(&cs4271_i2c_driver);
-#endif
+ return snd_soc_register_codec(dev, &soc_codec_dev_cs4271, &cs4271_dai,
+ 1);
}
-module_exit(cs4271_modexit);
+EXPORT_SYMBOL_GPL(cs4271_probe);
MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
MODULE_DESCRIPTION("Cirrus Logic CS4271 ALSA SoC Codec Driver");
diff --git a/sound/soc/codecs/cs4271.h b/sound/soc/codecs/cs4271.h
new file mode 100644
index 000000000000..9adad8eefdc9
--- /dev/null
+++ b/sound/soc/codecs/cs4271.h
@@ -0,0 +1,11 @@
+#ifndef _CS4271_PRIV_H
+#define _CS4271_PRIV_H
+
+#include <linux/regmap.h>
+
+extern const struct of_device_id cs4271_dt_ids[];
+extern const struct regmap_config cs4271_regmap_config;
+
+int cs4271_probe(struct device *dev, struct regmap *regmap);
+
+#endif
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 669c38fc3034..b3951524339f 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -153,15 +153,17 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
static int cs42l51_pdn_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
switch (event) {
case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(w->codec, CS42L51_POWER_CTL1,
+ snd_soc_update_bits(codec, CS42L51_POWER_CTL1,
CS42L51_POWER_CTL1_PDN,
CS42L51_POWER_CTL1_PDN);
break;
default:
case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits(w->codec, CS42L51_POWER_CTL1,
+ snd_soc_update_bits(codec, CS42L51_POWER_CTL1,
CS42L51_POWER_CTL1_PDN, 0);
break;
}
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 2f8b94683e83..7c55537c69cf 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -584,7 +584,7 @@ static const struct snd_kcontrol_new cs42l73_snd_controls[] = {
static int cs42l73_spklo_spk_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMD:
@@ -600,7 +600,7 @@ static int cs42l73_spklo_spk_amp_event(struct snd_soc_dapm_widget *w,
static int cs42l73_ear_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMD:
@@ -618,7 +618,7 @@ static int cs42l73_ear_amp_event(struct snd_soc_dapm_widget *w,
static int cs42l73_hp_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMD:
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 8f95b0300f1a..0b10979513c4 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -429,7 +429,6 @@ static int __exit cx20442_platform_remove(struct platform_device *pdev)
static struct platform_driver cx20442_platform_driver = {
.driver = {
.name = "cx20442-codec",
- .owner = THIS_MODULE,
},
.probe = cx20442_platform_probe,
.remove = __exit_p(cx20442_platform_remove),
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index b2090b2a5e2d..fde53251c047 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -74,7 +74,6 @@ MODULE_ALIAS("platform:dmic-codec");
static struct platform_driver dmic_driver = {
.driver = {
.name = "dmic-codec",
- .owner = THIS_MODULE,
},
.probe = dmic_dev_probe,
.remove = dmic_dev_remove,
diff --git a/sound/soc/codecs/hdmi.c b/sound/soc/codecs/hdmi.c
index 1087fd5f9917..bd42ad34e004 100644
--- a/sound/soc/codecs/hdmi.c
+++ b/sound/soc/codecs/hdmi.c
@@ -47,6 +47,7 @@ static struct snd_soc_dai_driver hdmi_codec_dai = {
SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE,
+ .sig_bits = 24,
},
.capture = {
.stream_name = "Capture",
@@ -75,6 +76,7 @@ static struct snd_soc_codec_driver hdmi_codec = {
.num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
.dapm_routes = hdmi_routes,
.num_dapm_routes = ARRAY_SIZE(hdmi_routes),
+ .ignore_pmdown_time = true,
};
static int hdmi_codec_probe(struct platform_device *pdev)
@@ -92,7 +94,6 @@ static int hdmi_codec_remove(struct platform_device *pdev)
static struct platform_driver hdmi_codec_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(hdmi_audio_codec_ids),
},
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index df7c01cf7072..933f4476d76c 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -364,7 +364,6 @@ static struct platform_driver jz4740_codec_driver = {
.remove = jz4740_codec_remove,
.driver = {
.name = "jz4740-codec",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index c1ae5764983f..c4dfde9bdf1c 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -1395,15 +1395,7 @@ static struct snd_soc_dai_driver lm49453_dai[] = {
},
};
-/* power down chip */
-static int lm49453_remove(struct snd_soc_codec *codec)
-{
- lm49453_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_lm49453 = {
- .remove = lm49453_remove,
.set_bias_level = lm49453_set_bias_level,
.controls = lm49453_snd_controls,
.num_controls = ARRAY_SIZE(lm49453_snd_controls),
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 2cd3e5427441..805b3f8cd39d 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -875,7 +875,7 @@ static const struct snd_kcontrol_new max98088_right_ADC_mixer_controls[] = {
static int max98088_mic_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -905,7 +905,7 @@ static int max98088_mic_event(struct snd_soc_dapm_widget *w,
static int max98088_line_pga(struct snd_soc_dapm_widget *w,
int event, int line, u8 channel)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
u8 *state;
@@ -1887,25 +1887,6 @@ static void max98088_handle_pdata(struct snd_soc_codec *codec)
max98088_handle_eq_pdata(codec);
}
-#ifdef CONFIG_PM
-static int max98088_suspend(struct snd_soc_codec *codec)
-{
- max98088_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int max98088_resume(struct snd_soc_codec *codec)
-{
- max98088_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define max98088_suspend NULL
-#define max98088_resume NULL
-#endif
-
static int max98088_probe(struct snd_soc_codec *codec)
{
struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
@@ -1946,9 +1927,6 @@ static int max98088_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV);
- /* initialize registers cache to hardware default */
- max98088_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
snd_soc_write(codec, M98088_REG_0F_IRQ_ENABLE, 0x00);
snd_soc_write(codec, M98088_REG_22_MIX_DAC,
@@ -1974,7 +1952,6 @@ static int max98088_remove(struct snd_soc_codec *codec)
{
struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
- max98088_set_bias_level(codec, SND_SOC_BIAS_OFF);
kfree(max98088->eq_texts);
return 0;
@@ -1983,9 +1960,9 @@ static int max98088_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_max98088 = {
.probe = max98088_probe,
.remove = max98088_remove,
- .suspend = max98088_suspend,
- .resume = max98088_resume,
.set_bias_level = max98088_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = max98088_snd_controls,
.num_controls = ARRAY_SIZE(max98088_snd_controls),
.dapm_widgets = max98088_dapm_widgets,
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 88a65da3c53c..b112b1c2c394 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -806,7 +806,7 @@ static const struct snd_kcontrol_new max98091_snd_controls[] = {
static int max98090_micinput_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
unsigned int val = snd_soc_read(codec, w->reg);
@@ -1311,6 +1311,10 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
{"MIC1 Input", NULL, "MIC1"},
{"MIC2 Input", NULL, "MIC2"},
+ {"DMICL", NULL, "DMICL_ENA"},
+ {"DMICL", NULL, "DMICR_ENA"},
+ {"DMICR", NULL, "DMICL_ENA"},
+ {"DMICR", NULL, "DMICR_ENA"},
{"DMICL", NULL, "AHPF"},
{"DMICR", NULL, "AHPF"},
@@ -1368,8 +1372,6 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
{"DMIC Mux", "ADC", "ADCR"},
{"DMIC Mux", "DMIC", "DMICL"},
{"DMIC Mux", "DMIC", "DMICR"},
- {"DMIC Mux", "DMIC", "DMICL_ENA"},
- {"DMIC Mux", "DMIC", "DMICR_ENA"},
{"LBENL Mux", "Normal", "DMIC Mux"},
{"LBENL Mux", "Loopback", "LTENL Mux"},
@@ -1395,8 +1397,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
{"STENL Mux", "Sidetone Left", "DMICL"},
{"STENR Mux", "Sidetone Right", "ADCR"},
{"STENR Mux", "Sidetone Right", "DMICR"},
- {"DACL", "NULL", "STENL Mux"},
- {"DACR", "NULL", "STENL Mux"},
+ {"DACL", NULL, "STENL Mux"},
+ {"DACR", NULL, "STENR Mux"},
{"AIFINL", NULL, "SHDN"},
{"AIFINR", NULL, "SHDN"},
@@ -1826,27 +1828,155 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-static const int comp_pclk_rates[] = {
- 11289600, 12288000, 12000000, 13000000, 19200000
-};
-
-static const int dmic_micclk[] = {
- 2, 2, 2, 2, 4, 2
-};
+static const int dmic_divisors[] = { 2, 3, 4, 5, 6, 8 };
static const int comp_lrclk_rates[] = {
8000, 16000, 32000, 44100, 48000, 96000
};
-static const int dmic_comp[6][6] = {
- {7, 8, 3, 3, 3, 3},
- {7, 8, 3, 3, 3, 3},
- {7, 8, 3, 3, 3, 3},
- {7, 8, 3, 1, 1, 1},
- {7, 8, 3, 1, 2, 2},
- {7, 8, 3, 3, 3, 3}
+struct dmic_table {
+ int pclk;
+ struct {
+ int freq;
+ int comp[6]; /* One each for 8, 16, 32, 44.1, 48, and 96 kHz */
+ } settings[6]; /* One for each dmic divisor. */
};
+static const struct dmic_table dmic_table[] = { /* One for each pclk freq. */
+ {
+ .pclk = 11289600,
+ .settings = {
+ { .freq = 2, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 1, .comp = { 7, 8, 2, 2, 2, 2 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 0, .comp = { 7, 8, 6, 6, 6, 6 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ },
+ },
+ {
+ .pclk = 12000000,
+ .settings = {
+ { .freq = 2, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 1, .comp = { 7, 8, 2, 2, 2, 2 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 0, .comp = { 7, 8, 5, 5, 6, 6 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ }
+ },
+ {
+ .pclk = 12288000,
+ .settings = {
+ { .freq = 2, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 1, .comp = { 7, 8, 2, 2, 2, 2 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 0, .comp = { 7, 8, 6, 6, 6, 6 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ { .freq = 0, .comp = { 7, 8, 3, 3, 3, 3 } },
+ }
+ },
+ {
+ .pclk = 13000000,
+ .settings = {
+ { .freq = 2, .comp = { 7, 8, 1, 1, 1, 1 } },
+ { .freq = 1, .comp = { 7, 8, 0, 0, 0, 0 } },
+ { .freq = 0, .comp = { 7, 8, 1, 1, 1, 1 } },
+ { .freq = 0, .comp = { 7, 8, 4, 4, 5, 5 } },
+ { .freq = 0, .comp = { 7, 8, 1, 1, 1, 1 } },
+ { .freq = 0, .comp = { 7, 8, 1, 1, 1, 1 } },
+ }
+ },
+ {
+ .pclk = 19200000,
+ .settings = {
+ { .freq = 2, .comp = { 0, 0, 0, 0, 0, 0 } },
+ { .freq = 1, .comp = { 7, 8, 1, 1, 1, 1 } },
+ { .freq = 0, .comp = { 7, 8, 5, 5, 6, 6 } },
+ { .freq = 0, .comp = { 7, 8, 2, 2, 3, 3 } },
+ { .freq = 0, .comp = { 7, 8, 1, 1, 2, 2 } },
+ { .freq = 0, .comp = { 7, 8, 5, 5, 6, 6 } },
+ }
+ },
+};
+
+static int max98090_find_divisor(int target_freq, int pclk)
+{
+ int current_diff = INT_MAX;
+ int test_diff = INT_MAX;
+ int divisor_index = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dmic_divisors); i++) {
+ test_diff = abs(target_freq - (pclk / dmic_divisors[i]));
+ if (test_diff < current_diff) {
+ current_diff = test_diff;
+ divisor_index = i;
+ }
+ }
+
+ return divisor_index;
+}
+
+static int max98090_find_closest_pclk(int pclk)
+{
+ int m1;
+ int m2;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dmic_table); i++) {
+ if (pclk == dmic_table[i].pclk)
+ return i;
+ if (pclk < dmic_table[i].pclk) {
+ if (i == 0)
+ return i;
+ m1 = pclk - dmic_table[i-1].pclk;
+ m2 = dmic_table[i].pclk - pclk;
+ if (m1 < m2)
+ return i - 1;
+ else
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max98090_configure_dmic(struct max98090_priv *max98090,
+ int target_dmic_clk, int pclk, int fs)
+{
+ int micclk_index;
+ int pclk_index;
+ int dmic_freq;
+ int dmic_comp;
+ int i;
+
+ pclk_index = max98090_find_closest_pclk(pclk);
+ if (pclk_index < 0)
+ return pclk_index;
+
+ micclk_index = max98090_find_divisor(target_dmic_clk, pclk);
+
+ for (i = 0; i < ARRAY_SIZE(comp_lrclk_rates) - 1; i++) {
+ if (fs <= (comp_lrclk_rates[i] + comp_lrclk_rates[i+1]) / 2)
+ break;
+ }
+
+ dmic_freq = dmic_table[pclk_index].settings[micclk_index].freq;
+ dmic_comp = dmic_table[pclk_index].settings[micclk_index].comp[i];
+
+ regmap_update_bits(max98090->regmap, M98090_REG_DIGITAL_MIC_ENABLE,
+ M98090_MICCLK_MASK,
+ micclk_index << M98090_MICCLK_SHIFT);
+
+ regmap_update_bits(max98090->regmap, M98090_REG_DIGITAL_MIC_CONFIG,
+ M98090_DMIC_COMP_MASK | M98090_DMIC_FREQ_MASK,
+ dmic_comp << M98090_DMIC_COMP_SHIFT |
+ dmic_freq << M98090_DMIC_FREQ_SHIFT);
+
+ return 0;
+}
+
static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -1854,7 +1984,6 @@ static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_codec *codec = dai->codec;
struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
struct max98090_cdata *cdata;
- int i, j;
cdata = &max98090->dai[0];
max98090->bclk = snd_soc_params_to_bclk(params);
@@ -1893,27 +2022,8 @@ static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
snd_soc_update_bits(codec, M98090_REG_FILTER_CONFIG,
M98090_DHF_MASK, M98090_DHF_MASK);
- /* Check for supported PCLK to LRCLK ratios */
- for (j = 0; j < ARRAY_SIZE(comp_pclk_rates); j++) {
- if (comp_pclk_rates[j] == max98090->sysclk) {
- break;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(comp_lrclk_rates) - 1; i++) {
- if (max98090->lrclk <= (comp_lrclk_rates[i] +
- comp_lrclk_rates[i + 1]) / 2) {
- break;
- }
- }
-
- snd_soc_update_bits(codec, M98090_REG_DIGITAL_MIC_ENABLE,
- M98090_MICCLK_MASK,
- dmic_micclk[j] << M98090_MICCLK_SHIFT);
-
- snd_soc_update_bits(codec, M98090_REG_DIGITAL_MIC_CONFIG,
- M98090_DMIC_COMP_MASK,
- dmic_comp[j][i] << M98090_DMIC_COMP_SHIFT);
+ max98090_configure_dmic(max98090, max98090->dmic_freq, max98090->pclk,
+ max98090->lrclk);
return 0;
}
@@ -1944,12 +2054,15 @@ static int max98090_dai_set_sysclk(struct snd_soc_dai *dai,
if ((freq >= 10000000) && (freq <= 20000000)) {
snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
M98090_PSCLK_DIV1);
+ max98090->pclk = freq;
} else if ((freq > 20000000) && (freq <= 40000000)) {
snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
M98090_PSCLK_DIV2);
+ max98090->pclk = freq >> 1;
} else if ((freq > 40000000) && (freq <= 60000000)) {
snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
M98090_PSCLK_DIV4);
+ max98090->pclk = freq >> 2;
} else {
dev_err(codec->dev, "Invalid master clock frequency\n");
return -EINVAL;
@@ -2324,6 +2437,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
/* Initialize private data */
max98090->sysclk = (unsigned)-1;
+ max98090->pclk = (unsigned)-1;
max98090->master = false;
cdata = &max98090->dai[0];
@@ -2463,6 +2577,11 @@ static int max98090_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, max98090);
max98090->pdata = i2c->dev.platform_data;
+ ret = of_property_read_u32(i2c->dev.of_node, "maxim,dmic-freq",
+ &max98090->dmic_freq);
+ if (ret < 0)
+ max98090->dmic_freq = MAX98090_DEFAULT_DMIC_FREQ;
+
max98090->regmap = devm_regmap_init_i2c(i2c, &max98090_regmap);
if (IS_ERR(max98090->regmap)) {
ret = PTR_ERR(max98090->regmap);
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h
index a5f6bada06da..21ff743f5af2 100644
--- a/sound/soc/codecs/max98090.h
+++ b/sound/soc/codecs/max98090.h
@@ -12,6 +12,12 @@
#define _MAX98090_H
/*
+ * The default operating frequency for a DMIC attached to the codec.
+ * This can be overridden by a device tree property.
+ */
+#define MAX98090_DEFAULT_DMIC_FREQ 2500000
+
+/*
* MAX98090 Register Definitions
*/
@@ -1518,8 +1524,10 @@ struct max98090_priv {
struct max98090_pdata *pdata;
struct clk *mclk;
unsigned int sysclk;
+ unsigned int pclk;
unsigned int bclk;
unsigned int lrclk;
+ u32 dmic_freq;
struct max98090_cdata dai[1];
int jack_state;
struct delayed_work jack_work;
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 0ee6797d5083..8fba0c3db798 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/clk.h>
+#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -57,6 +58,7 @@ struct max98095_priv {
unsigned int mic2pre;
struct snd_soc_jack *headphone_jack;
struct snd_soc_jack *mic_jack;
+ struct mutex lock;
};
static const struct reg_default max98095_reg_def[] = {
@@ -864,7 +866,7 @@ static const struct snd_kcontrol_new max98095_right_ADC_mixer_controls[] = {
static int max98095_mic_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
switch (event) {
@@ -894,7 +896,7 @@ static int max98095_mic_event(struct snd_soc_dapm_widget *w,
static int max98095_line_pga(struct snd_soc_dapm_widget *w,
int event, u8 channel)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
u8 *state;
@@ -942,7 +944,7 @@ static int max98095_pga_in2_event(struct snd_soc_dapm_widget *w,
static int max98095_lineout_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -1803,7 +1805,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
regsave = snd_soc_read(codec, M98095_088_CFG_LEVEL);
snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, 0);
- mutex_lock(&codec->mutex);
+ mutex_lock(&max98095->lock);
snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, M98095_SEG);
m98095_eq_band(codec, channel, 0, coef_set->band1);
m98095_eq_band(codec, channel, 1, coef_set->band2);
@@ -1811,7 +1813,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
m98095_eq_band(codec, channel, 3, coef_set->band4);
m98095_eq_band(codec, channel, 4, coef_set->band5);
snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, 0);
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&max98095->lock);
/* Restore the original on/off state */
snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, regsave);
@@ -1957,12 +1959,12 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
regsave = snd_soc_read(codec, M98095_088_CFG_LEVEL);
snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, 0);
- mutex_lock(&codec->mutex);
+ mutex_lock(&max98095->lock);
snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, M98095_SEG);
m98095_biquad_band(codec, channel, 0, coef_set->band1);
m98095_biquad_band(codec, channel, 1, coef_set->band2);
snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, 0);
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&max98095->lock);
/* Restore the original on/off state */
snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, regsave);
@@ -2317,9 +2319,6 @@ static int max98095_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, M98095_097_PWR_SYS, M98095_PWRSV);
- /* initialize registers cache to hardware default */
- max98095_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
snd_soc_write(codec, M98095_048_MIX_DAC_LR,
M98095_DAI1L_TO_DACL|M98095_DAI1R_TO_DACR);
@@ -2359,8 +2358,6 @@ static int max98095_remove(struct snd_soc_codec *codec)
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *client = to_i2c_client(codec->dev);
- max98095_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
if (max98095->headphone_jack || max98095->mic_jack)
max98095_jack_detect_disable(codec);
@@ -2395,6 +2392,8 @@ static int max98095_i2c_probe(struct i2c_client *i2c,
if (max98095 == NULL)
return -ENOMEM;
+ mutex_init(&max98095->lock);
+
max98095->regmap = devm_regmap_init_i2c(i2c, &max98095_regmap);
if (IS_ERR(max98095->regmap)) {
ret = PTR_ERR(max98095->regmap);
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index 4fdf5aaa236f..10f8e47ce2c2 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -291,25 +291,6 @@ static struct snd_soc_dai_driver max9850_dai = {
.ops = &max9850_dai_ops,
};
-#ifdef CONFIG_PM
-static int max9850_suspend(struct snd_soc_codec *codec)
-{
- max9850_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int max9850_resume(struct snd_soc_codec *codec)
-{
- max9850_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define max9850_suspend NULL
-#define max9850_resume NULL
-#endif
-
static int max9850_probe(struct snd_soc_codec *codec)
{
/* enable zero-detect */
@@ -324,9 +305,8 @@ static int max9850_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_max9850 = {
.probe = max9850_probe,
- .suspend = max9850_suspend,
- .resume = max9850_resume,
.set_bias_level = max9850_set_bias_level,
+ .suspend_bias_off = true,
.controls = max9850_controls,
.num_controls = ARRAY_SIZE(max9850_controls),
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 71f775aad7c7..c1e441c2c8af 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -802,7 +802,6 @@ static int mc13783_codec_remove(struct platform_device *pdev)
static struct platform_driver mc13783_codec_driver = {
.driver = {
.name = "mc13783-codec",
- .owner = THIS_MODULE,
},
.remove = mc13783_codec_remove,
};
diff --git a/sound/soc/codecs/pcm3008.c b/sound/soc/codecs/pcm3008.c
index b6618c4a7597..7e73fa4b3183 100644
--- a/sound/soc/codecs/pcm3008.c
+++ b/sound/soc/codecs/pcm3008.c
@@ -162,7 +162,6 @@ static struct platform_driver pcm3008_codec_driver = {
.remove = pcm3008_codec_remove,
.driver = {
.name = "pcm3008-codec",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 4aa555cbcca8..2cd4fe463102 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/dmi.h>
#include <linux/acpi.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -36,11 +37,13 @@
struct rt286_priv {
struct regmap *regmap;
+ struct snd_soc_codec *codec;
struct rt286_platform_data pdata;
struct i2c_client *i2c;
struct snd_soc_jack *jack;
struct delayed_work jack_detect_work;
int sys_clk;
+ int clk_id;
struct reg_default *index_cache;
};
@@ -188,7 +191,7 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value)
u8 data[4];
int ret, i;
- /*handle index registers*/
+ /* handle index registers */
if (reg <= 0xff) {
rt286_hw_write(client, RT286_COEF_INDEX, reg);
for (i = 0; i < INDEX_CACHE_SIZE; i++) {
@@ -231,7 +234,7 @@ static int rt286_hw_read(void *context, unsigned int reg, unsigned int *value)
__be32 be_reg;
unsigned int index, vid, buf = 0x0;
- /*handle index registers*/
+ /* handle index registers */
if (reg <= 0xff) {
rt286_hw_write(client, RT286_COEF_INDEX, reg);
reg = RT286_PROC_COEF;
@@ -298,7 +301,6 @@ static int rt286_support_power_controls[] = {
static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic)
{
unsigned int val, buf;
- int i;
*hp = false;
*mic = false;
@@ -309,67 +311,44 @@ static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic)
if (*hp) {
/* power on HV,VERF */
regmap_update_bits(rt286->regmap,
- RT286_POWER_CTRL1, 0x1001, 0x0);
+ RT286_DC_GAIN, 0x200, 0x200);
+
+ snd_soc_dapm_force_enable_pin(&rt286->codec->dapm,
+ "HV");
+ snd_soc_dapm_force_enable_pin(&rt286->codec->dapm,
+ "VREF");
/* power LDO1 */
- regmap_update_bits(rt286->regmap,
- RT286_POWER_CTRL2, 0x4, 0x4);
- regmap_write(rt286->regmap, RT286_SET_MIC1, 0x24);
- regmap_read(rt286->regmap, RT286_CBJ_CTRL2, &val);
+ snd_soc_dapm_force_enable_pin(&rt286->codec->dapm,
+ "LDO1");
+ snd_soc_dapm_sync(&rt286->codec->dapm);
- msleep(200);
- i = 40;
- while (((val & 0x0800) == 0) && (i > 0)) {
- regmap_read(rt286->regmap,
- RT286_CBJ_CTRL2, &val);
- i--;
- msleep(20);
- }
+ regmap_write(rt286->regmap, RT286_SET_MIC1, 0x24);
+ msleep(50);
- if (0x0400 == (val & 0x0700)) {
- *mic = false;
+ regmap_update_bits(rt286->regmap,
+ RT286_CBJ_CTRL1, 0xfcc0, 0xd400);
+ msleep(300);
+ regmap_read(rt286->regmap, RT286_CBJ_CTRL2, &val);
- regmap_write(rt286->regmap,
- RT286_SET_MIC1, 0x20);
- /* power off HV,VERF */
- regmap_update_bits(rt286->regmap,
- RT286_POWER_CTRL1, 0x1001, 0x1001);
- regmap_update_bits(rt286->regmap,
- RT286_A_BIAS_CTRL3, 0xc000, 0x0000);
- regmap_update_bits(rt286->regmap,
- RT286_CBJ_CTRL1, 0x0030, 0x0000);
- regmap_update_bits(rt286->regmap,
- RT286_A_BIAS_CTRL2, 0xc000, 0x0000);
- } else if ((0x0200 == (val & 0x0700)) ||
- (0x0100 == (val & 0x0700))) {
+ if (0x0070 == (val & 0x0070)) {
*mic = true;
- regmap_update_bits(rt286->regmap,
- RT286_A_BIAS_CTRL3, 0xc000, 0x8000);
- regmap_update_bits(rt286->regmap,
- RT286_CBJ_CTRL1, 0x0030, 0x0020);
- regmap_update_bits(rt286->regmap,
- RT286_A_BIAS_CTRL2, 0xc000, 0x8000);
} else {
- *mic = false;
+ regmap_update_bits(rt286->regmap,
+ RT286_CBJ_CTRL1, 0xfcc0, 0xe400);
+ msleep(300);
+ regmap_read(rt286->regmap,
+ RT286_CBJ_CTRL2, &val);
+ if (0x0070 == (val & 0x0070))
+ *mic = true;
+ else
+ *mic = false;
}
-
- regmap_update_bits(rt286->regmap,
- RT286_MISC_CTRL1,
- 0x0060, 0x0000);
- } else {
- regmap_update_bits(rt286->regmap,
- RT286_MISC_CTRL1,
- 0x0060, 0x0020);
regmap_update_bits(rt286->regmap,
- RT286_A_BIAS_CTRL3,
- 0xc000, 0x8000);
- regmap_update_bits(rt286->regmap,
- RT286_CBJ_CTRL1,
- 0x0030, 0x0020);
- regmap_update_bits(rt286->regmap,
- RT286_A_BIAS_CTRL2,
- 0xc000, 0x8000);
+ RT286_DC_GAIN, 0x200, 0x0);
+ } else {
*mic = false;
+ regmap_write(rt286->regmap, RT286_SET_MIC1, 0x20);
}
} else {
regmap_read(rt286->regmap, RT286_GET_HP_SENSE, &buf);
@@ -378,6 +357,12 @@ static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic)
*mic = buf & 0x80000000;
}
+ snd_soc_dapm_disable_pin(&rt286->codec->dapm, "HV");
+ snd_soc_dapm_disable_pin(&rt286->codec->dapm, "VREF");
+ if (!*hp)
+ snd_soc_dapm_disable_pin(&rt286->codec->dapm, "LDO1");
+ snd_soc_dapm_sync(&rt286->codec->dapm);
+
return 0;
}
@@ -415,6 +400,17 @@ int rt286_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
}
EXPORT_SYMBOL_GPL(rt286_mic_detect);
+static int is_mclk_mode(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct rt286_priv *rt286 = snd_soc_codec_get_drvdata(source->codec);
+
+ if (rt286->clk_id == RT286_SCLK_S_MCLK)
+ return 1;
+ else
+ return 0;
+}
+
static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -6350, 50, 0);
static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 1000, 0);
@@ -568,7 +564,84 @@ static int rt286_adc_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int rt286_vref_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ RT286_CBJ_CTRL1, 0x0400, 0x0000);
+ mdelay(50);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt286_ldo2_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec, RT286_POWER_CTRL2, 0x38, 0x08);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, RT286_POWER_CTRL2, 0x38, 0x30);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt286_mic1_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ RT286_A_BIAS_CTRL3, 0xc000, 0x8000);
+ snd_soc_update_bits(codec,
+ RT286_A_BIAS_CTRL2, 0xc000, 0x8000);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ RT286_A_BIAS_CTRL3, 0xc000, 0x0000);
+ snd_soc_update_bits(codec,
+ RT286_A_BIAS_CTRL2, 0xc000, 0x0000);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget rt286_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY_S("HV", 1, RT286_POWER_CTRL1,
+ 12, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("VREF", RT286_POWER_CTRL1,
+ 0, 1, rt286_vref_event, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT286_POWER_CTRL2,
+ 2, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("LDO2", 2, RT286_POWER_CTRL1,
+ 13, 1, rt286_ldo2_event, SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("MCLK MODE", RT286_PLL_CTRL1,
+ 5, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MIC1 Input Buffer", SND_SOC_NOPM,
+ 0, 0, rt286_mic1_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
/* Input Lines */
SND_SOC_DAPM_INPUT("DMIC1 Pin"),
SND_SOC_DAPM_INPUT("DMIC2 Pin"),
@@ -642,6 +715,25 @@ static const struct snd_soc_dapm_widget rt286_dapm_widgets[] = {
};
static const struct snd_soc_dapm_route rt286_dapm_routes[] = {
+ {"ADC 0", NULL, "MCLK MODE", is_mclk_mode},
+ {"ADC 1", NULL, "MCLK MODE", is_mclk_mode},
+ {"Front", NULL, "MCLK MODE", is_mclk_mode},
+ {"Surround", NULL, "MCLK MODE", is_mclk_mode},
+
+ {"HP Power", NULL, "LDO1"},
+ {"HP Power", NULL, "LDO2"},
+
+ {"MIC1", NULL, "LDO1"},
+ {"MIC1", NULL, "LDO2"},
+ {"MIC1", NULL, "HV"},
+ {"MIC1", NULL, "VREF"},
+ {"MIC1", NULL, "MIC1 Input Buffer"},
+
+ {"SPO", NULL, "LDO1"},
+ {"SPO", NULL, "LDO2"},
+ {"SPO", NULL, "HV"},
+ {"SPO", NULL, "VREF"},
+
{"DMIC1", NULL, "DMIC1 Pin"},
{"DMIC2", NULL, "DMIC2 Pin"},
{"DMIC1", NULL, "DMIC Receiver"},
@@ -880,6 +972,7 @@ static int rt286_set_dai_sysclk(struct snd_soc_dai *dai,
}
rt286->sys_clk = freq;
+ rt286->clk_id = clk_id;
return 0;
}
@@ -915,13 +1008,18 @@ static int rt286_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_ON:
mdelay(10);
+ snd_soc_update_bits(codec,
+ RT286_CBJ_CTRL1, 0x0400, 0x0400);
+ snd_soc_update_bits(codec,
+ RT286_DC_GAIN, 0x200, 0x0);
+
break;
case SND_SOC_BIAS_STANDBY:
snd_soc_write(codec,
RT286_SET_AUDIO_POWER, AC_PWRST_D3);
snd_soc_update_bits(codec,
- RT286_DC_GAIN, 0x200, 0x0);
+ RT286_CBJ_CTRL1, 0x0400, 0x0000);
break;
default:
@@ -962,6 +1060,7 @@ static int rt286_probe(struct snd_soc_codec *codec)
{
struct rt286_priv *rt286 = snd_soc_codec_get_drvdata(codec);
+ rt286->codec = codec;
codec->dapm.bias_level = SND_SOC_BIAS_OFF;
if (rt286->i2c->irq) {
@@ -1107,6 +1206,16 @@ static const struct acpi_device_id rt286_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, rt286_acpi_match);
+static struct dmi_system_id force_combo_jack_table[] = {
+ {
+ .ident = "Intel Wilson Beach",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "Wilson Beach SDS")
+ }
+ },
+ { }
+};
+
static int rt286_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -1142,6 +1251,9 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
if (pdata)
rt286->pdata = *pdata;
+ if (dmi_check_system(force_combo_jack_table))
+ rt286->pdata.cbj_en = true;
+
regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
for (i = 0; i < RT286_POWER_REG_LEN; i++)
@@ -1152,7 +1264,6 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
if (!rt286->pdata.cbj_en) {
regmap_write(rt286->regmap, RT286_CBJ_CTRL2, 0x0000);
regmap_write(rt286->regmap, RT286_MIC1_DET_CTRL, 0x0816);
- regmap_write(rt286->regmap, RT286_MISC_CTRL1, 0x0000);
regmap_update_bits(rt286->regmap,
RT286_CBJ_CTRL1, 0xf000, 0xb000);
} else {
@@ -1169,10 +1280,12 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
mdelay(10);
- /*Power down LDO2*/
- regmap_update_bits(rt286->regmap, RT286_POWER_CTRL2, 0x8, 0x0);
+ regmap_write(rt286->regmap, RT286_MISC_CTRL1, 0x0000);
+ /* Power down LDO, VREF */
+ regmap_update_bits(rt286->regmap, RT286_POWER_CTRL2, 0xc, 0x0);
+ regmap_update_bits(rt286->regmap, RT286_POWER_CTRL1, 0x1001, 0x1001);
- /*Set depop parameter*/
+ /* Set depop parameter */
regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL2, 0x403a, 0x401a);
regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 1ba27db660a6..6d7b7ca7d530 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -1612,29 +1612,6 @@ static int rt5631_probe(struct snd_soc_codec *codec)
return 0;
}
-static int rt5631_remove(struct snd_soc_codec *codec)
-{
- rt5631_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int rt5631_suspend(struct snd_soc_codec *codec)
-{
- rt5631_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int rt5631_resume(struct snd_soc_codec *codec)
-{
- rt5631_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define rt5631_suspend NULL
-#define rt5631_resume NULL
-#endif
-
#define RT5631_STEREO_RATES SNDRV_PCM_RATE_8000_96000
#define RT5631_FORMAT (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
@@ -1672,10 +1649,8 @@ static struct snd_soc_dai_driver rt5631_dai[] = {
static struct snd_soc_codec_driver soc_codec_dev_rt5631 = {
.probe = rt5631_probe,
- .remove = rt5631_remove,
- .suspend = rt5631_suspend,
- .resume = rt5631_resume,
.set_bias_level = rt5631_set_bias_level,
+ .suspend_bias_off = true,
.controls = rt5631_snd_controls,
.num_controls = ARRAY_SIZE(rt5631_snd_controls),
.dapm_widgets = rt5631_dapm_widgets,
@@ -1686,10 +1661,20 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5631 = {
static const struct i2c_device_id rt5631_i2c_id[] = {
{ "rt5631", 0 },
+ { "alc5631", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt5631_i2c_id);
+#ifdef CONFIG_OF
+static struct of_device_id rt5631_i2c_dt_ids[] = {
+ { .compatible = "realtek,rt5631"},
+ { .compatible = "realtek,alc5631"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rt5631_i2c_dt_ids);
+#endif
+
static const struct regmap_config rt5631_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
@@ -1734,6 +1719,7 @@ static struct i2c_driver rt5631_i2c_driver = {
.driver = {
.name = "rt5631",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rt5631_i2c_dt_ids),
},
.probe = rt5631_i2c_probe,
.remove = rt5631_i2c_remove,
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index d16331e0b64d..a7789a8726e3 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -554,6 +554,53 @@ static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
return 0;
}
+static int is_using_asrc(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int reg, shift, val;
+
+ switch (source->shift) {
+ case 0:
+ reg = RT5645_ASRC_3;
+ shift = 0;
+ break;
+ case 1:
+ reg = RT5645_ASRC_3;
+ shift = 4;
+ break;
+ case 3:
+ reg = RT5645_ASRC_2;
+ shift = 0;
+ break;
+ case 8:
+ reg = RT5645_ASRC_2;
+ shift = 4;
+ break;
+ case 9:
+ reg = RT5645_ASRC_2;
+ shift = 8;
+ break;
+ case 10:
+ reg = RT5645_ASRC_2;
+ shift = 12;
+ break;
+ default:
+ return 0;
+ }
+
+ val = (snd_soc_read(source->codec, reg) >> shift) & 0xf;
+ switch (val) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ return 1;
+ default:
+ return 0;
+ }
+
+}
+
/* Digital Mixer */
static const struct snd_kcontrol_new rt5645_sto1_adc_l_mix[] = {
SOC_DAPM_SINGLE("ADC1 Switch", RT5645_STO1_ADC_MIXER,
@@ -1246,6 +1293,30 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5645_PWR_VOL,
RT5645_PWR_MIC_DET_BIT, 0, NULL, 0),
+ /* ASRC */
+ SND_SOC_DAPM_SUPPLY_S("I2S1 ASRC", 1, RT5645_ASRC_1,
+ 11, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S2 ASRC", 1, RT5645_ASRC_1,
+ 12, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC STO ASRC", 1, RT5645_ASRC_1,
+ 10, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC MONO L ASRC", 1, RT5645_ASRC_1,
+ 9, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC MONO R ASRC", 1, RT5645_ASRC_1,
+ 8, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO1 ASRC", 1, RT5645_ASRC_1,
+ 7, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC MONO L ASRC", 1, RT5645_ASRC_1,
+ 5, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC MONO R ASRC", 1, RT5645_ASRC_1,
+ 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC STO1 ASRC", 1, RT5645_ASRC_1,
+ 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC MONO L ASRC", 1, RT5645_ASRC_1,
+ 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC MONO R ASRC", 1, RT5645_ASRC_1,
+ 0, 0, NULL, 0),
+
/* Input Side */
/* micbias */
SND_SOC_DAPM_MICBIAS("micbias1", RT5645_PWR_ANLG2,
@@ -1504,6 +1575,17 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
};
static const struct snd_soc_dapm_route rt5645_dapm_routes[] = {
+ { "adc stereo1 filter", NULL, "ADC STO1 ASRC", is_using_asrc },
+ { "adc stereo2 filter", NULL, "ADC STO2 ASRC", is_using_asrc },
+ { "adc mono left filter", NULL, "ADC MONO L ASRC", is_using_asrc },
+ { "adc mono right filter", NULL, "ADC MONO R ASRC", is_using_asrc },
+ { "dac mono left filter", NULL, "DAC MONO L ASRC", is_using_asrc },
+ { "dac mono right filter", NULL, "DAC MONO R ASRC", is_using_asrc },
+ { "dac stereo1 filter", NULL, "DAC STO ASRC", is_using_asrc },
+
+ { "I2S1", NULL, "I2S1 ASRC" },
+ { "I2S2", NULL, "I2S2 ASRC" },
+
{ "IN1P", NULL, "LDO2" },
{ "IN2P", NULL, "LDO2" },
@@ -1550,12 +1632,15 @@ static const struct snd_soc_dapm_route rt5645_dapm_routes[] = {
{ "Stereo1 DMIC Mux", "DMIC1", "DMIC1" },
{ "Stereo1 DMIC Mux", "DMIC2", "DMIC2" },
+ { "Stereo1 DMIC Mux", NULL, "DMIC STO1 ASRC" },
{ "Mono DMIC L Mux", "DMIC1", "DMIC L1" },
{ "Mono DMIC L Mux", "DMIC2", "DMIC L2" },
+ { "Mono DMIC L Mux", NULL, "DMIC MONO L ASRC" },
{ "Mono DMIC R Mux", "DMIC1", "DMIC R1" },
{ "Mono DMIC R Mux", "DMIC2", "DMIC R2" },
+ { "Mono DMIC R Mux", NULL, "DMIC MONO R ASRC" },
{ "Stereo1 ADC L2 Mux", "DMIC", "Stereo1 DMIC Mux" },
{ "Stereo1 ADC L2 Mux", "DAC MIX", "DAC MIXL" },
@@ -2029,8 +2114,11 @@ static int rt5645_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
struct snd_soc_codec *codec = dai->codec;
unsigned int val = 0;
- if (rx_mask || tx_mask)
+ if (rx_mask || tx_mask) {
val |= (1 << 14);
+ snd_soc_update_bits(codec, RT5645_BASS_BACK,
+ RT5645_G_BB_BST_MASK, RT5645_G_BB_BST_25DB);
+ }
switch (slots) {
case 4:
@@ -2071,8 +2159,8 @@ static int rt5645_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
switch (level) {
- case SND_SOC_BIAS_STANDBY:
- if (SND_SOC_BIAS_OFF == codec->dapm.bias_level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (SND_SOC_BIAS_STANDBY == codec->dapm.bias_level) {
snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
RT5645_PWR_VREF1 | RT5645_PWR_MB |
RT5645_PWR_BG | RT5645_PWR_VREF2,
@@ -2087,15 +2175,24 @@ static int rt5645_set_bias_level(struct snd_soc_codec *codec,
}
break;
+ case SND_SOC_BIAS_STANDBY:
+ snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
+ RT5645_PWR_VREF1 | RT5645_PWR_MB |
+ RT5645_PWR_BG | RT5645_PWR_VREF2,
+ RT5645_PWR_VREF1 | RT5645_PWR_MB |
+ RT5645_PWR_BG | RT5645_PWR_VREF2);
+ snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
+ RT5645_PWR_FV1 | RT5645_PWR_FV2,
+ RT5645_PWR_FV1 | RT5645_PWR_FV2);
+ break;
+
case SND_SOC_BIAS_OFF:
snd_soc_write(codec, RT5645_DEPOP_M2, 0x1100);
snd_soc_write(codec, RT5645_GEN_CTRL1, 0x0128);
- snd_soc_write(codec, RT5645_PWR_DIG1, 0x0000);
- snd_soc_write(codec, RT5645_PWR_DIG2, 0x0000);
- snd_soc_write(codec, RT5645_PWR_VOL, 0x0000);
- snd_soc_write(codec, RT5645_PWR_MIXER, 0x0000);
- snd_soc_write(codec, RT5645_PWR_ANLG1, 0x0000);
- snd_soc_write(codec, RT5645_PWR_ANLG2, 0x0000);
+ snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
+ RT5645_PWR_VREF1 | RT5645_PWR_MB |
+ RT5645_PWR_BG | RT5645_PWR_VREF2 |
+ RT5645_PWR_FV1 | RT5645_PWR_FV2, 0x0);
break;
default:
@@ -2106,8 +2203,7 @@ static int rt5645_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-static int rt5645_jack_detect(struct snd_soc_codec *codec,
- struct snd_soc_jack *jack)
+static int rt5645_jack_detect(struct snd_soc_codec *codec)
{
struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
int gpio_state, jack_type = 0;
@@ -2145,34 +2241,44 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec,
snd_soc_dapm_disable_pin(&codec->dapm, "micbias1");
snd_soc_dapm_disable_pin(&codec->dapm, "micbias2");
- snd_soc_dapm_disable_pin(&codec->dapm, "LDO2");
+ if (rt5645->pdata.jd_mode == 0)
+ snd_soc_dapm_disable_pin(&codec->dapm, "LDO2");
snd_soc_dapm_disable_pin(&codec->dapm, "Mic Det Power");
snd_soc_dapm_sync(&codec->dapm);
}
- snd_soc_jack_report(rt5645->jack, jack_type, SND_JACK_HEADSET);
-
+ snd_soc_jack_report(rt5645->hp_jack, jack_type, SND_JACK_HEADPHONE);
+ snd_soc_jack_report(rt5645->mic_jack, jack_type, SND_JACK_MICROPHONE);
return 0;
}
int rt5645_set_jack_detect(struct snd_soc_codec *codec,
- struct snd_soc_jack *jack)
+ struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack)
{
struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
- rt5645->jack = jack;
-
- rt5645_jack_detect(codec, rt5645->jack);
+ rt5645->hp_jack = hp_jack;
+ rt5645->mic_jack = mic_jack;
+ rt5645_jack_detect(codec);
return 0;
}
EXPORT_SYMBOL_GPL(rt5645_set_jack_detect);
+static void rt5645_jack_detect_work(struct work_struct *work)
+{
+ struct rt5645_priv *rt5645 =
+ container_of(work, struct rt5645_priv, jack_detect_work.work);
+
+ rt5645_jack_detect(rt5645->codec);
+}
+
static irqreturn_t rt5645_irq(int irq, void *data)
{
struct rt5645_priv *rt5645 = data;
- rt5645_jack_detect(rt5645->codec, rt5645->jack);
+ queue_delayed_work(system_power_efficient_wq,
+ &rt5645->jack_detect_work, msecs_to_jiffies(250));
return IRQ_HANDLED;
}
@@ -2187,6 +2293,13 @@ static int rt5645_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
+ /* for JD function */
+ if (rt5645->pdata.en_jd_func) {
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "JD Power");
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2");
+ snd_soc_dapm_sync(&codec->dapm);
+ }
+
return 0;
}
@@ -2420,6 +2533,51 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
}
+ if (rt5645->pdata.en_jd_func) {
+ regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
+ RT5645_IRQ_CLK_GATE_CTRL | RT5645_MICINDET_MANU,
+ RT5645_IRQ_CLK_GATE_CTRL | RT5645_MICINDET_MANU);
+ regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
+ RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
+ regmap_update_bits(rt5645->regmap, RT5645_JD_CTRL3,
+ RT5645_JD_CBJ_EN | RT5645_JD_CBJ_POL,
+ RT5645_JD_CBJ_EN | RT5645_JD_CBJ_POL);
+ regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
+ RT5645_IRQ_CLK_INT, RT5645_IRQ_CLK_INT);
+ }
+
+ if (rt5645->pdata.jd_mode) {
+ regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
+ RT5645_IRQ_JD_1_1_EN, RT5645_IRQ_JD_1_1_EN);
+ regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
+ RT5645_JD_PSV_MODE, RT5645_JD_PSV_MODE);
+ regmap_update_bits(rt5645->regmap, RT5645_HPO_MIXER,
+ RT5645_IRQ_PSV_MODE, RT5645_IRQ_PSV_MODE);
+ regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
+ RT5645_MIC2_OVCD_EN, RT5645_MIC2_OVCD_EN);
+ regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
+ RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
+ switch (rt5645->pdata.jd_mode) {
+ case 1:
+ regmap_update_bits(rt5645->regmap, RT5645_A_JD_CTRL1,
+ RT5645_JD1_MODE_MASK,
+ RT5645_JD1_MODE_0);
+ break;
+ case 2:
+ regmap_update_bits(rt5645->regmap, RT5645_A_JD_CTRL1,
+ RT5645_JD1_MODE_MASK,
+ RT5645_JD1_MODE_1);
+ break;
+ case 3:
+ regmap_update_bits(rt5645->regmap, RT5645_A_JD_CTRL1,
+ RT5645_JD1_MODE_MASK,
+ RT5645_JD1_MODE_2);
+ break;
+ default:
+ break;
+ }
+ }
+
if (rt5645->i2c->irq) {
ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
@@ -2438,6 +2596,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
dev_err(&i2c->dev, "Fail gpio_direction hp_det_gpio\n");
}
+ INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
+
return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
rt5645_dai, ARRAY_SIZE(rt5645_dai));
}
@@ -2449,6 +2609,8 @@ static int rt5645_i2c_remove(struct i2c_client *i2c)
if (i2c->irq)
free_irq(i2c->irq, rt5645);
+ cancel_delayed_work_sync(&rt5645->jack_detect_work);
+
if (gpio_is_valid(rt5645->pdata.hp_det_gpio))
gpio_free(rt5645->pdata.hp_det_gpio);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 50c62c5668ea..a815e36a2bdb 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -594,6 +594,7 @@
#define RT5645_M_DAC1_HM_SFT 14
#define RT5645_M_HPVOL_HM (0x1 << 13)
#define RT5645_M_HPVOL_HM_SFT 13
+#define RT5645_IRQ_PSV_MODE (0x1 << 12)
/* SPK Left Mixer Control (0x46) */
#define RT5645_G_RM_L_SM_L_MASK (0x3 << 14)
@@ -1348,6 +1349,12 @@
#define RT5645_PWR_CLK25M_SFT 4
#define RT5645_PWR_CLK25M_PD (0x0 << 4)
#define RT5645_PWR_CLK25M_PU (0x1 << 4)
+#define RT5645_IRQ_CLK_MCLK (0x0 << 3)
+#define RT5645_IRQ_CLK_INT (0x1 << 3)
+#define RT5645_JD1_MODE_MASK (0x3 << 0)
+#define RT5645_JD1_MODE_0 (0x0 << 0)
+#define RT5645_JD1_MODE_1 (0x1 << 0)
+#define RT5645_JD1_MODE_2 (0x2 << 0)
/* VAD Control 4 (0x9d) */
#define RT5645_VAD_SEL_MASK (0x3 << 8)
@@ -1636,6 +1643,7 @@
#define RT5645_OT_P_SFT 10
#define RT5645_OT_P_NOR (0x0 << 10)
#define RT5645_OT_P_INV (0x1 << 10)
+#define RT5645_IRQ_JD_1_1_EN (0x1 << 9)
/* IRQ Control 2 (0xbe) */
#define RT5645_IRQ_MB1_OC_MASK (0x1 << 15)
@@ -1853,6 +1861,7 @@
#define RT5645_M_BB_HPF_R_SFT 6
#define RT5645_G_BB_BST_MASK (0x3f)
#define RT5645_G_BB_BST_SFT 0
+#define RT5645_G_BB_BST_25DB 0x14
/* MP3 Plus Control 1 (0xd0) */
#define RT5645_M_MP3_L_MASK (0x1 << 15)
@@ -2116,6 +2125,10 @@ enum {
#define RT5645_RXDP2_SEL_ADC (0x1 << 3)
#define RT5645_RXDP2_SEL_SFT (3)
+/* General Control3 (0xfc) */
+#define RT5645_JD_PSV_MODE (0x1 << 12)
+#define RT5645_IRQ_CLK_GATE_CTRL (0x1 << 11)
+#define RT5645_MICINDET_MANU (0x1 << 7)
/* Vendor ID (0xfd) */
#define RT5645_VER_C 0x2
@@ -2167,7 +2180,9 @@ struct rt5645_priv {
struct rt5645_platform_data pdata;
struct regmap *regmap;
struct i2c_client *i2c;
- struct snd_soc_jack *jack;
+ struct snd_soc_jack *hp_jack;
+ struct snd_soc_jack *mic_jack;
+ struct delayed_work jack_detect_work;
int sysclk;
int sysclk_src;
@@ -2181,6 +2196,6 @@ struct rt5645_priv {
};
int rt5645_set_jack_detect(struct snd_soc_codec *codec,
- struct snd_soc_jack *jack);
+ struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack);
#endif /* __RT5645_H__ */
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 9bd8b4f63303..8a0833de1665 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
+#include <linux/acpi.h>
#include <linux/spi/spi.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -575,6 +576,18 @@ static int is_using_asrc(struct snd_soc_dapm_widget *source,
}
+static int can_use_asrc(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+ struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
+
+ if (rt5670->sysclk > rt5670->lrck[RT5670_AIF1] * 384)
+ return 1;
+
+ return 0;
+}
+
/* Digital Mixer */
static const struct snd_kcontrol_new rt5670_sto1_adc_l_mix[] = {
SOC_DAPM_SINGLE("ADC1 Switch", RT5670_STO1_ADC_MIXER,
@@ -1281,6 +1294,14 @@ static const struct snd_soc_dapm_widget rt5670_dapm_widgets[] = {
9, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("DAC MONO R ASRC", 1, RT5670_ASRC_1,
8, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO1 ASRC", 1, RT5670_ASRC_1,
+ 7, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC STO2 ASRC", 1, RT5670_ASRC_1,
+ 6, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC MONO L ASRC", 1, RT5670_ASRC_1,
+ 5, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC MONO R ASRC", 1, RT5670_ASRC_1,
+ 4, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("ADC STO1 ASRC", 1, RT5670_ASRC_1,
3, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("ADC STO2 ASRC", 1, RT5670_ASRC_1,
@@ -1595,29 +1616,40 @@ static const struct snd_soc_dapm_widget rt5670_dapm_widgets[] = {
/* PDM */
SND_SOC_DAPM_SUPPLY("PDM1 Power", RT5670_PWR_DIG2,
RT5670_PWR_PDM1_BIT, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("PDM2 Power", RT5670_PWR_DIG2,
- RT5670_PWR_PDM2_BIT, 0, NULL, 0),
SND_SOC_DAPM_MUX("PDM1 L Mux", RT5670_PDM_OUT_CTRL,
RT5670_M_PDM1_L_SFT, 1, &rt5670_pdm1_l_mux),
SND_SOC_DAPM_MUX("PDM1 R Mux", RT5670_PDM_OUT_CTRL,
RT5670_M_PDM1_R_SFT, 1, &rt5670_pdm1_r_mux),
- SND_SOC_DAPM_MUX("PDM2 L Mux", RT5670_PDM_OUT_CTRL,
- RT5670_M_PDM2_L_SFT, 1, &rt5670_pdm2_l_mux),
- SND_SOC_DAPM_MUX("PDM2 R Mux", RT5670_PDM_OUT_CTRL,
- RT5670_M_PDM2_R_SFT, 1, &rt5670_pdm2_r_mux),
/* Output Lines */
SND_SOC_DAPM_OUTPUT("HPOL"),
SND_SOC_DAPM_OUTPUT("HPOR"),
SND_SOC_DAPM_OUTPUT("LOUTL"),
SND_SOC_DAPM_OUTPUT("LOUTR"),
+};
+
+static const struct snd_soc_dapm_widget rt5670_specific_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("PDM2 Power", RT5670_PWR_DIG2,
+ RT5670_PWR_PDM2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("PDM2 L Mux", RT5670_PDM_OUT_CTRL,
+ RT5670_M_PDM2_L_SFT, 1, &rt5670_pdm2_l_mux),
+ SND_SOC_DAPM_MUX("PDM2 R Mux", RT5670_PDM_OUT_CTRL,
+ RT5670_M_PDM2_R_SFT, 1, &rt5670_pdm2_r_mux),
SND_SOC_DAPM_OUTPUT("PDM1L"),
SND_SOC_DAPM_OUTPUT("PDM1R"),
SND_SOC_DAPM_OUTPUT("PDM2L"),
SND_SOC_DAPM_OUTPUT("PDM2R"),
};
+static const struct snd_soc_dapm_widget rt5672_specific_dapm_widgets[] = {
+ SND_SOC_DAPM_PGA("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("SPOLP"),
+ SND_SOC_DAPM_OUTPUT("SPOLN"),
+ SND_SOC_DAPM_OUTPUT("SPORP"),
+ SND_SOC_DAPM_OUTPUT("SPORN"),
+};
+
static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
{ "ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc },
{ "ADC Stereo2 Filter", NULL, "ADC STO2 ASRC", is_using_asrc },
@@ -1626,9 +1658,13 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
{ "DAC Mono Left Filter", NULL, "DAC MONO L ASRC", is_using_asrc },
{ "DAC Mono Right Filter", NULL, "DAC MONO R ASRC", is_using_asrc },
{ "DAC Stereo1 Filter", NULL, "DAC STO ASRC", is_using_asrc },
+ { "Stereo1 DMIC Mux", NULL, "DMIC STO1 ASRC", can_use_asrc },
+ { "Stereo2 DMIC Mux", NULL, "DMIC STO2 ASRC", can_use_asrc },
+ { "Mono DMIC L Mux", NULL, "DMIC MONO L ASRC", can_use_asrc },
+ { "Mono DMIC R Mux", NULL, "DMIC MONO R ASRC", can_use_asrc },
- { "I2S1", NULL, "I2S1 ASRC" },
- { "I2S2", NULL, "I2S2 ASRC" },
+ { "I2S1", NULL, "I2S1 ASRC", can_use_asrc},
+ { "I2S2", NULL, "I2S2 ASRC", can_use_asrc},
{ "DMIC1", NULL, "DMIC L1" },
{ "DMIC1", NULL, "DMIC R1" },
@@ -1970,12 +2006,6 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
{ "PDM1 R Mux", "Stereo DAC", "Stereo DAC MIXR" },
{ "PDM1 R Mux", "Mono DAC", "Mono DAC MIXR" },
{ "PDM1 R Mux", NULL, "PDM1 Power" },
- { "PDM2 L Mux", "Stereo DAC", "Stereo DAC MIXL" },
- { "PDM2 L Mux", "Mono DAC", "Mono DAC MIXL" },
- { "PDM2 L Mux", NULL, "PDM2 Power" },
- { "PDM2 R Mux", "Stereo DAC", "Stereo DAC MIXR" },
- { "PDM2 R Mux", "Mono DAC", "Mono DAC MIXR" },
- { "PDM2 R Mux", NULL, "PDM2 Power" },
{ "HP Amp", NULL, "HPO MIX" },
{ "HP Amp", NULL, "Mic Det Power" },
@@ -1993,13 +2023,30 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
{ "LOUTR", NULL, "LOUT R Playback" },
{ "LOUTL", NULL, "Improve HP Amp Drv" },
{ "LOUTR", NULL, "Improve HP Amp Drv" },
+};
+static const struct snd_soc_dapm_route rt5670_specific_dapm_routes[] = {
+ { "PDM2 L Mux", "Stereo DAC", "Stereo DAC MIXL" },
+ { "PDM2 L Mux", "Mono DAC", "Mono DAC MIXL" },
+ { "PDM2 L Mux", NULL, "PDM2 Power" },
+ { "PDM2 R Mux", "Stereo DAC", "Stereo DAC MIXR" },
+ { "PDM2 R Mux", "Mono DAC", "Mono DAC MIXR" },
+ { "PDM2 R Mux", NULL, "PDM2 Power" },
{ "PDM1L", NULL, "PDM1 L Mux" },
{ "PDM1R", NULL, "PDM1 R Mux" },
{ "PDM2L", NULL, "PDM2 L Mux" },
{ "PDM2R", NULL, "PDM2 R Mux" },
};
+static const struct snd_soc_dapm_route rt5672_specific_dapm_routes[] = {
+ { "SPO Amp", NULL, "PDM1 L Mux" },
+ { "SPO Amp", NULL, "PDM1 R Mux" },
+ { "SPOLP", NULL, "SPO Amp" },
+ { "SPOLN", NULL, "SPO Amp" },
+ { "SPORP", NULL, "SPO Amp" },
+ { "SPORN", NULL, "SPO Amp" },
+};
+
static int rt5670_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
@@ -2287,6 +2334,8 @@ static int rt5670_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
static int rt5670_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
+
switch (level) {
case SND_SOC_BIAS_PREPARE:
if (SND_SOC_BIAS_STANDBY == codec->dapm.bias_level) {
@@ -2308,16 +2357,27 @@ static int rt5670_set_bias_level(struct snd_soc_codec *codec,
}
break;
case SND_SOC_BIAS_STANDBY:
- snd_soc_write(codec, RT5670_PWR_DIG1, 0x0000);
- snd_soc_write(codec, RT5670_PWR_DIG2, 0x0001);
- snd_soc_write(codec, RT5670_PWR_VOL, 0x0000);
- snd_soc_write(codec, RT5670_PWR_MIXER, 0x0001);
- snd_soc_write(codec, RT5670_PWR_ANLG1, 0x2800);
- snd_soc_write(codec, RT5670_PWR_ANLG2, 0x0004);
- snd_soc_update_bits(codec, RT5670_DIG_MISC, 0x1, 0x0);
+ snd_soc_update_bits(codec, RT5670_PWR_ANLG1,
+ RT5670_PWR_VREF1 | RT5670_PWR_VREF2 |
+ RT5670_PWR_FV1 | RT5670_PWR_FV2, 0);
snd_soc_update_bits(codec, RT5670_PWR_ANLG1,
RT5670_LDO_SEL_MASK, 0x1);
break;
+ case SND_SOC_BIAS_OFF:
+ if (rt5670->pdata.jd_mode)
+ snd_soc_update_bits(codec, RT5670_PWR_ANLG1,
+ RT5670_PWR_VREF1 | RT5670_PWR_MB |
+ RT5670_PWR_BG | RT5670_PWR_VREF2 |
+ RT5670_PWR_FV1 | RT5670_PWR_FV2,
+ RT5670_PWR_MB | RT5670_PWR_BG);
+ else
+ snd_soc_update_bits(codec, RT5670_PWR_ANLG1,
+ RT5670_PWR_VREF1 | RT5670_PWR_MB |
+ RT5670_PWR_BG | RT5670_PWR_VREF2 |
+ RT5670_PWR_FV1 | RT5670_PWR_FV2, 0);
+
+ snd_soc_update_bits(codec, RT5670_DIG_MISC, 0x1, 0x0);
+ break;
default:
break;
@@ -2331,6 +2391,29 @@ static int rt5670_probe(struct snd_soc_codec *codec)
{
struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
+ switch (snd_soc_read(codec, RT5670_RESET) & RT5670_ID_MASK) {
+ case RT5670_ID_5670:
+ case RT5670_ID_5671:
+ snd_soc_dapm_new_controls(&codec->dapm,
+ rt5670_specific_dapm_widgets,
+ ARRAY_SIZE(rt5670_specific_dapm_widgets));
+ snd_soc_dapm_add_routes(&codec->dapm,
+ rt5670_specific_dapm_routes,
+ ARRAY_SIZE(rt5670_specific_dapm_routes));
+ break;
+ case RT5670_ID_5672:
+ snd_soc_dapm_new_controls(&codec->dapm,
+ rt5672_specific_dapm_widgets,
+ ARRAY_SIZE(rt5672_specific_dapm_widgets));
+ snd_soc_dapm_add_routes(&codec->dapm,
+ rt5672_specific_dapm_routes,
+ ARRAY_SIZE(rt5672_specific_dapm_routes));
+ break;
+ default:
+ dev_err(codec->dev,
+ "The driver is for RT5670 RT5671 or RT5672 only\n");
+ return -ENODEV;
+ }
rt5670->codec = codec;
return 0;
@@ -2452,10 +2535,20 @@ static const struct regmap_config rt5670_regmap = {
static const struct i2c_device_id rt5670_i2c_id[] = {
{ "rt5670", 0 },
+ { "rt5671", 0 },
+ { "rt5672", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt5670_i2c_id);
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5670_acpi_match[] = {
+ { "10EC5670", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, rt5670_acpi_match);
+#endif
+
static int rt5670_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -2644,6 +2737,7 @@ static struct i2c_driver rt5670_i2c_driver = {
.driver = {
.name = "rt5670",
.owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(rt5670_acpi_match),
},
.probe = rt5670_i2c_probe,
.remove = rt5670_i2c_remove,
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index a0b5c855b492..d11b9c207e26 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -228,6 +228,12 @@
#define RT5670_R_VOL_MASK (0x3f)
#define RT5670_R_VOL_SFT 0
+/* SW Reset & Device ID (0x00) */
+#define RT5670_ID_MASK (0x3 << 1)
+#define RT5670_ID_5670 (0x0 << 1)
+#define RT5670_ID_5672 (0x1 << 1)
+#define RT5670_ID_5671 (0x2 << 1)
+
/* Combo Jack Control 1 (0x0a) */
#define RT5670_CBJ_BST1_MASK (0xf << 12)
#define RT5670_CBJ_BST1_SFT (12)
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
new file mode 100644
index 000000000000..ef6348cb9157
--- /dev/null
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -0,0 +1,130 @@
+/*
+ * rt5677-spi.c -- RT5677 ALSA SoC audio codec driver
+ *
+ * Copyright 2013 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/spi/spi.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_qos.h>
+#include <linux/sysfs.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+
+#include "rt5677-spi.h"
+
+static struct spi_device *g_spi;
+
+/**
+ * rt5677_spi_write - Write data to SPI.
+ * @txbuf: Data Buffer for writing.
+ * @len: Data length.
+ *
+ *
+ * Returns true for success.
+ */
+int rt5677_spi_write(u8 *txbuf, size_t len)
+{
+ int status;
+
+ status = spi_write(g_spi, txbuf, len);
+
+ if (status)
+ dev_err(&g_spi->dev, "rt5677_spi_write error %d\n", status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(rt5677_spi_write);
+
+/**
+ * rt5677_spi_burst_write - Write data to SPI by rt5677 dsp memory address.
+ * @addr: Start address.
+ * @txbuf: Data Buffer for writng.
+ * @len: Data length, it must be a multiple of 8.
+ *
+ *
+ * Returns true for success.
+ */
+int rt5677_spi_burst_write(u32 addr, const struct firmware *fw)
+{
+ u8 spi_cmd = RT5677_SPI_CMD_BURST_WRITE;
+ u8 *write_buf;
+ unsigned int i, end, offset = 0;
+
+ write_buf = kmalloc(RT5677_SPI_BUF_LEN + 6, GFP_KERNEL);
+
+ if (write_buf == NULL)
+ return -ENOMEM;
+
+ while (offset < fw->size) {
+ if (offset + RT5677_SPI_BUF_LEN <= fw->size)
+ end = RT5677_SPI_BUF_LEN;
+ else
+ end = fw->size % RT5677_SPI_BUF_LEN;
+
+ write_buf[0] = spi_cmd;
+ write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
+ write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
+ write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
+ write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+ for (i = 0; i < end; i += 8) {
+ write_buf[i + 12] = fw->data[offset + i + 0];
+ write_buf[i + 11] = fw->data[offset + i + 1];
+ write_buf[i + 10] = fw->data[offset + i + 2];
+ write_buf[i + 9] = fw->data[offset + i + 3];
+ write_buf[i + 8] = fw->data[offset + i + 4];
+ write_buf[i + 7] = fw->data[offset + i + 5];
+ write_buf[i + 6] = fw->data[offset + i + 6];
+ write_buf[i + 5] = fw->data[offset + i + 7];
+ }
+
+ write_buf[end + 5] = spi_cmd;
+
+ rt5677_spi_write(write_buf, end + 6);
+
+ offset += RT5677_SPI_BUF_LEN;
+ }
+
+ kfree(write_buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5677_spi_burst_write);
+
+static int rt5677_spi_probe(struct spi_device *spi)
+{
+ g_spi = spi;
+ return 0;
+}
+
+static struct spi_driver rt5677_spi_driver = {
+ .driver = {
+ .name = "rt5677",
+ .owner = THIS_MODULE,
+ },
+ .probe = rt5677_spi_probe,
+};
+module_spi_driver(rt5677_spi_driver);
+
+MODULE_DESCRIPTION("ASoC RT5677 SPI driver");
+MODULE_AUTHOR("Oder Chiou <oder_chiou@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5677-spi.h b/sound/soc/codecs/rt5677-spi.h
new file mode 100644
index 000000000000..ec41b2b3b2ca
--- /dev/null
+++ b/sound/soc/codecs/rt5677-spi.h
@@ -0,0 +1,21 @@
+/*
+ * rt5677-spi.h -- RT5677 ALSA SoC audio codec driver
+ *
+ * Copyright 2013 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5677_SPI_H__
+#define __RT5677_SPI_H__
+
+#define RT5677_SPI_BUF_LEN 240
+#define RT5677_SPI_CMD_BURST_WRITE 0x05
+
+int rt5677_spi_write(u8 *txbuf, size_t len);
+int rt5677_spi_burst_write(u32 addr, const struct firmware *fw);
+
+#endif /* __RT5677_SPI_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 16aa4d99a713..81fe1464d268 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -20,6 +20,7 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/firmware.h>
#include <linux/gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -31,6 +32,7 @@
#include "rl6231.h"
#include "rt5677.h"
+#include "rt5677-spi.h"
#define RT5677_DEVICE_ID 0x6327
@@ -53,12 +55,13 @@ static const struct regmap_range_cfg rt5677_ranges[] = {
};
static const struct reg_default init_list[] = {
+ {RT5677_ASRC_12, 0x0018},
{RT5677_PR_BASE + 0x3d, 0x364d},
- {RT5677_PR_BASE + 0x17, 0x4fc0},
- {RT5677_PR_BASE + 0x13, 0x0312},
- {RT5677_PR_BASE + 0x1e, 0x0000},
- {RT5677_PR_BASE + 0x12, 0x0eaa},
- {RT5677_PR_BASE + 0x14, 0x018a},
+ {RT5677_PR_BASE + 0x17, 0x4fc0},
+ {RT5677_PR_BASE + 0x13, 0x0312},
+ {RT5677_PR_BASE + 0x1e, 0x0000},
+ {RT5677_PR_BASE + 0x12, 0x0eaa},
+ {RT5677_PR_BASE + 0x14, 0x018a},
};
#define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list)
@@ -171,7 +174,7 @@ static const struct reg_default rt5677_reg[] = {
{RT5677_ASRC_9 , 0x0000},
{RT5677_ASRC_10 , 0x0000},
{RT5677_ASRC_11 , 0x0000},
- {RT5677_ASRC_12 , 0x0008},
+ {RT5677_ASRC_12 , 0x0018},
{RT5677_ASRC_13 , 0x0000},
{RT5677_ASRC_14 , 0x0000},
{RT5677_ASRC_15 , 0x0000},
@@ -537,10 +540,232 @@ static bool rt5677_readable_register(struct device *dev, unsigned int reg)
}
}
+/**
+ * rt5677_dsp_mode_i2c_write_addr - Write value to address on DSP mode.
+ * @rt5677: Private Data.
+ * @addr: Address index.
+ * @value: Address data.
+ *
+ *
+ * Returns 0 for success or negative error code.
+ */
+static int rt5677_dsp_mode_i2c_write_addr(struct rt5677_priv *rt5677,
+ unsigned int addr, unsigned int value, unsigned int opcode)
+{
+ struct snd_soc_codec *codec = rt5677->codec;
+ int ret;
+
+ mutex_lock(&rt5677->dsp_cmd_lock);
+
+ ret = regmap_write(rt5677->regmap_physical, RT5677_DSP_I2C_ADDR_MSB,
+ addr >> 16);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set addr msb value: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_write(rt5677->regmap_physical, RT5677_DSP_I2C_ADDR_LSB,
+ addr & 0xffff);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set addr lsb value: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_write(rt5677->regmap_physical, RT5677_DSP_I2C_DATA_MSB,
+ value >> 16);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set data msb value: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_write(rt5677->regmap_physical, RT5677_DSP_I2C_DATA_LSB,
+ value & 0xffff);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set data lsb value: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_write(rt5677->regmap_physical, RT5677_DSP_I2C_OP_CODE,
+ opcode);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set op code value: %d\n", ret);
+ goto err;
+ }
+
+err:
+ mutex_unlock(&rt5677->dsp_cmd_lock);
+
+ return ret;
+}
+
+/**
+ * rt5677_dsp_mode_i2c_read_addr - Read value from address on DSP mode.
+ * rt5677: Private Data.
+ * @addr: Address index.
+ * @value: Address data.
+ *
+ *
+ * Returns 0 for success or negative error code.
+ */
+static int rt5677_dsp_mode_i2c_read_addr(
+ struct rt5677_priv *rt5677, unsigned int addr, unsigned int *value)
+{
+ struct snd_soc_codec *codec = rt5677->codec;
+ int ret;
+ unsigned int msb, lsb;
+
+ mutex_lock(&rt5677->dsp_cmd_lock);
+
+ ret = regmap_write(rt5677->regmap_physical, RT5677_DSP_I2C_ADDR_MSB,
+ addr >> 16);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set addr msb value: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_write(rt5677->regmap_physical, RT5677_DSP_I2C_ADDR_LSB,
+ addr & 0xffff);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set addr lsb value: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_write(rt5677->regmap_physical, RT5677_DSP_I2C_OP_CODE,
+ 0x0002);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set op code value: %d\n", ret);
+ goto err;
+ }
+
+ regmap_read(rt5677->regmap_physical, RT5677_DSP_I2C_DATA_MSB, &msb);
+ regmap_read(rt5677->regmap_physical, RT5677_DSP_I2C_DATA_LSB, &lsb);
+ *value = (msb << 16) | lsb;
+
+err:
+ mutex_unlock(&rt5677->dsp_cmd_lock);
+
+ return ret;
+}
+
+/**
+ * rt5677_dsp_mode_i2c_write - Write register on DSP mode.
+ * rt5677: Private Data.
+ * @reg: Register index.
+ * @value: Register data.
+ *
+ *
+ * Returns 0 for success or negative error code.
+ */
+static int rt5677_dsp_mode_i2c_write(struct rt5677_priv *rt5677,
+ unsigned int reg, unsigned int value)
+{
+ return rt5677_dsp_mode_i2c_write_addr(rt5677, 0x18020000 + reg * 2,
+ value, 0x0001);
+}
+
+/**
+ * rt5677_dsp_mode_i2c_read - Read register on DSP mode.
+ * @codec: SoC audio codec device.
+ * @reg: Register index.
+ * @value: Register data.
+ *
+ *
+ * Returns 0 for success or negative error code.
+ */
+static int rt5677_dsp_mode_i2c_read(
+ struct rt5677_priv *rt5677, unsigned int reg, unsigned int *value)
+{
+ int ret = rt5677_dsp_mode_i2c_read_addr(rt5677, 0x18020000 + reg * 2,
+ value);
+
+ *value &= 0xffff;
+
+ return ret;
+}
+
+static void rt5677_set_dsp_mode(struct snd_soc_codec *codec, bool on)
+{
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+
+ if (on) {
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1, 0x2, 0x2);
+ rt5677->is_dsp_mode = true;
+ } else {
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1, 0x2, 0x0);
+ rt5677->is_dsp_mode = false;
+ }
+}
+
+static int rt5677_set_dsp_vad(struct snd_soc_codec *codec, bool on)
+{
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ static bool activity;
+ int ret;
+
+ if (on && !activity) {
+ activity = true;
+
+ regcache_cache_only(rt5677->regmap, false);
+ regcache_cache_bypass(rt5677->regmap, true);
+
+ regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC, 0x1, 0x1);
+ regmap_update_bits(rt5677->regmap,
+ RT5677_PR_BASE + RT5677_BIAS_CUR4, 0x0f00, 0x0f00);
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
+ RT5677_LDO1_SEL_MASK, 0x0);
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG2,
+ RT5677_PWR_LDO1, RT5677_PWR_LDO1);
+ regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK1,
+ RT5677_MCLK_SRC_MASK, RT5677_MCLK2_SRC);
+ regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK2,
+ RT5677_PLL2_PR_SRC_MASK | RT5677_DSP_CLK_SRC_MASK,
+ RT5677_PLL2_PR_SRC_MCLK2 | RT5677_DSP_CLK_SRC_BYPASS);
+ regmap_write(rt5677->regmap, RT5677_PWR_DSP2, 0x07ff);
+ regmap_write(rt5677->regmap, RT5677_PWR_DSP1, 0x07fd);
+ rt5677_set_dsp_mode(codec, true);
+
+ ret = request_firmware(&rt5677->fw1, RT5677_FIRMWARE1,
+ codec->dev);
+ if (ret == 0) {
+ rt5677_spi_burst_write(0x50000000, rt5677->fw1);
+ release_firmware(rt5677->fw1);
+ }
+
+ ret = request_firmware(&rt5677->fw2, RT5677_FIRMWARE2,
+ codec->dev);
+ if (ret == 0) {
+ rt5677_spi_burst_write(0x60000000, rt5677->fw2);
+ release_firmware(rt5677->fw2);
+ }
+
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1, 0x1, 0x0);
+
+ regcache_cache_bypass(rt5677->regmap, false);
+ regcache_cache_only(rt5677->regmap, true);
+ } else if (!on && activity) {
+ activity = false;
+
+ regcache_cache_only(rt5677->regmap, false);
+ regcache_cache_bypass(rt5677->regmap, true);
+
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1, 0x1, 0x1);
+ rt5677_set_dsp_mode(codec, false);
+ regmap_write(rt5677->regmap, RT5677_PWR_DSP1, 0x0001);
+
+ regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
+
+ regcache_cache_bypass(rt5677->regmap, false);
+ regcache_mark_dirty(rt5677->regmap);
+ regcache_sync(rt5677->regmap);
+ }
+
+ return 0;
+}
+
static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
static const DECLARE_TLV_DB_SCALE(st_vol_tlv, -4650, 150, 0);
@@ -556,6 +781,31 @@ static unsigned int bst_tlv[] = {
8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
};
+static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
+
+ return 0;
+}
+
+static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+
+ rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
+
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ rt5677_set_dsp_vad(codec, rt5677->dsp_vad_en);
+
+ return 0;
+}
+
static const struct snd_kcontrol_new rt5677_snd_controls[] = {
/* OUTPUT Control */
SOC_SINGLE("OUT1 Playback Switch", RT5677_LOUT1,
@@ -567,13 +817,13 @@ static const struct snd_kcontrol_new rt5677_snd_controls[] = {
/* DAC Digital Volume */
SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5677_DAC1_DIG_VOL,
- RT5677_L_VOL_SFT, RT5677_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ RT5677_L_VOL_SFT, RT5677_R_VOL_SFT, 87, 0, dac_vol_tlv),
SOC_DOUBLE_TLV("DAC2 Playback Volume", RT5677_DAC2_DIG_VOL,
- RT5677_L_VOL_SFT, RT5677_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ RT5677_L_VOL_SFT, RT5677_R_VOL_SFT, 87, 0, dac_vol_tlv),
SOC_DOUBLE_TLV("DAC3 Playback Volume", RT5677_DAC3_DIG_VOL,
- RT5677_L_VOL_SFT, RT5677_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ RT5677_L_VOL_SFT, RT5677_R_VOL_SFT, 87, 0, dac_vol_tlv),
SOC_DOUBLE_TLV("DAC4 Playback Volume", RT5677_DAC4_DIG_VOL,
- RT5677_L_VOL_SFT, RT5677_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ RT5677_L_VOL_SFT, RT5677_R_VOL_SFT, 87, 0, dac_vol_tlv),
/* IN1/IN2 Control */
SOC_SINGLE_TLV("IN1 Boost", RT5677_IN1, RT5677_BST_SFT1, 8, 0, bst_tlv),
@@ -592,19 +842,19 @@ static const struct snd_kcontrol_new rt5677_snd_controls[] = {
RT5677_L_MUTE_SFT, RT5677_R_MUTE_SFT, 1, 1),
SOC_DOUBLE_TLV("ADC1 Capture Volume", RT5677_STO1_ADC_DIG_VOL,
- RT5677_STO1_ADC_L_VOL_SFT, RT5677_STO1_ADC_R_VOL_SFT, 127, 0,
+ RT5677_STO1_ADC_L_VOL_SFT, RT5677_STO1_ADC_R_VOL_SFT, 63, 0,
adc_vol_tlv),
SOC_DOUBLE_TLV("ADC2 Capture Volume", RT5677_STO2_ADC_DIG_VOL,
- RT5677_STO1_ADC_L_VOL_SFT, RT5677_STO1_ADC_R_VOL_SFT, 127, 0,
+ RT5677_STO1_ADC_L_VOL_SFT, RT5677_STO1_ADC_R_VOL_SFT, 63, 0,
adc_vol_tlv),
SOC_DOUBLE_TLV("ADC3 Capture Volume", RT5677_STO3_ADC_DIG_VOL,
- RT5677_STO1_ADC_L_VOL_SFT, RT5677_STO1_ADC_R_VOL_SFT, 127, 0,
+ RT5677_STO1_ADC_L_VOL_SFT, RT5677_STO1_ADC_R_VOL_SFT, 63, 0,
adc_vol_tlv),
SOC_DOUBLE_TLV("ADC4 Capture Volume", RT5677_STO4_ADC_DIG_VOL,
- RT5677_STO1_ADC_L_VOL_SFT, RT5677_STO1_ADC_R_VOL_SFT, 127, 0,
+ RT5677_STO1_ADC_L_VOL_SFT, RT5677_STO1_ADC_R_VOL_SFT, 63, 0,
adc_vol_tlv),
SOC_DOUBLE_TLV("Mono ADC Capture Volume", RT5677_MONO_ADC_DIG_VOL,
- RT5677_MONO_ADC_L_VOL_SFT, RT5677_MONO_ADC_R_VOL_SFT, 127, 0,
+ RT5677_MONO_ADC_L_VOL_SFT, RT5677_MONO_ADC_R_VOL_SFT, 63, 0,
adc_vol_tlv),
/* Sidetone Control */
@@ -627,6 +877,9 @@ static const struct snd_kcontrol_new rt5677_snd_controls[] = {
SOC_DOUBLE_TLV("Mono ADC Boost Volume", RT5677_ADC_BST_CTRL2,
RT5677_MONO_ADC_L_BST_SFT, RT5677_MONO_ADC_R_BST_SFT, 3, 0,
adc_bst_tlv),
+
+ SOC_SINGLE_EXT("DSP VAD Switch", SND_SOC_NOPM, 0, 1, 0,
+ rt5677_dsp_vad_get, rt5677_dsp_vad_put),
};
/**
@@ -1086,7 +1339,7 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5677_ib45_bypass_src_mux =
SOC_DAPM_ENUM("IB45 Bypass Source", rt5677_ib45_bypass_src_enum);
-/* Stereo ADC Source 2 */ /* MX-27 MX26 MX25 [11:10] */
+/* Stereo ADC Source 2 */ /* MX-27 MX26 MX25 [11:10] */
static const char * const rt5677_stereo_adc2_src[] = {
"DD MIX1", "DMIC", "Stereo DAC MIX"
};
@@ -1171,7 +1424,7 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5677_sto2_adc_lr_mux =
SOC_DAPM_ENUM("Stereo2 ADC LR Source", rt5677_stereo2_adc_lr_enum);
-/* Stereo1 ADC Source 1 */ /* MX-27 MX26 MX25 [13:12] */
+/* Stereo1 ADC Source 1 */ /* MX-27 MX26 MX25 [13:12] */
static const char * const rt5677_stereo_adc1_src[] = {
"DD MIX1", "ADC1/2", "Stereo DAC MIX"
};
@@ -1443,7 +1696,7 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5677_pdm2_r_mux =
SOC_DAPM_ENUM("PDM2 Source", rt5677_pdm2_r_enum);
-/* TDM IF1/2 SLB ADC1 Data Selection */ /* MX-3C MX-41 [5:4] MX-08 [1:0]*/
+/* TDM IF1/2 SLB ADC1 Data Selection */ /* MX-3C MX-41 [5:4] MX-08 [1:0] */
static const char * const rt5677_if12_adc1_src[] = {
"STO1 ADC MIX", "OB01", "VAD ADC"
};
@@ -1521,7 +1774,7 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5677_slb_adc3_mux =
SOC_DAPM_ENUM("SLB ADC3 Source", rt5677_slb_adc3_enum);
-/* TDM IF1/2 SLB ADC4 Data Selection */ /* MX-3C MX-41 [11:10] MX-08 [7:6] */
+/* TDM IF1/2 SLB ADC4 Data Selection */ /* MX-3C MX-41 [11:10] MX-08 [7:6] */
static const char * const rt5677_if12_adc4_src[] = {
"STO4 ADC MIX", "OB67", "OB01"
};
@@ -1547,7 +1800,7 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5677_slb_adc4_mux =
SOC_DAPM_ENUM("SLB ADC4 Source", rt5677_slb_adc4_enum);
-/* Interface3/4 ADC Data Input */ /* MX-2F [3:0] MX-30 [7:4]*/
+/* Interface3/4 ADC Data Input */ /* MX-2F [3:0] MX-30 [7:4] */
static const char * const rt5677_if34_adc_src[] = {
"STO1 ADC MIX", "STO2 ADC MIX", "STO3 ADC MIX", "STO4 ADC MIX",
"MONO ADC MIX", "OB01", "OB23", "VAD ADC"
@@ -1567,6 +1820,213 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5677_if4_adc_mux =
SOC_DAPM_ENUM("IF4 ADC Source", rt5677_if4_adc_enum);
+/* TDM IF1/2 ADC Data Selection */ /* MX-3B MX-40 [7:6][5:4][3:2][1:0] */
+static const char * const rt5677_if12_adc_swap_src[] = {
+ "L/R", "R/L", "L/L", "R/R"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_adc1_swap_enum, RT5677_TDM1_CTRL1,
+ RT5677_IF1_ADC1_SWAP_SFT, rt5677_if12_adc_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if1_adc1_swap_mux =
+ SOC_DAPM_ENUM("IF1 ADC1 Swap Source", rt5677_if1_adc1_swap_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_adc2_swap_enum, RT5677_TDM1_CTRL1,
+ RT5677_IF1_ADC2_SWAP_SFT, rt5677_if12_adc_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if1_adc2_swap_mux =
+ SOC_DAPM_ENUM("IF1 ADC2 Swap Source", rt5677_if1_adc2_swap_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_adc3_swap_enum, RT5677_TDM1_CTRL1,
+ RT5677_IF1_ADC3_SWAP_SFT, rt5677_if12_adc_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if1_adc3_swap_mux =
+ SOC_DAPM_ENUM("IF1 ADC3 Swap Source", rt5677_if1_adc3_swap_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_adc4_swap_enum, RT5677_TDM1_CTRL1,
+ RT5677_IF1_ADC4_SWAP_SFT, rt5677_if12_adc_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if1_adc4_swap_mux =
+ SOC_DAPM_ENUM("IF1 ADC4 Swap Source", rt5677_if1_adc4_swap_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_adc1_swap_enum, RT5677_TDM2_CTRL1,
+ RT5677_IF1_ADC2_SWAP_SFT, rt5677_if12_adc_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if2_adc1_swap_mux =
+ SOC_DAPM_ENUM("IF1 ADC2 Swap Source", rt5677_if2_adc1_swap_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_adc2_swap_enum, RT5677_TDM2_CTRL1,
+ RT5677_IF2_ADC2_SWAP_SFT, rt5677_if12_adc_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if2_adc2_swap_mux =
+ SOC_DAPM_ENUM("IF2 ADC2 Swap Source", rt5677_if2_adc2_swap_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_adc3_swap_enum, RT5677_TDM2_CTRL1,
+ RT5677_IF2_ADC3_SWAP_SFT, rt5677_if12_adc_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if2_adc3_swap_mux =
+ SOC_DAPM_ENUM("IF2 ADC3 Swap Source", rt5677_if2_adc3_swap_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_adc4_swap_enum, RT5677_TDM2_CTRL1,
+ RT5677_IF2_ADC4_SWAP_SFT, rt5677_if12_adc_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if2_adc4_swap_mux =
+ SOC_DAPM_ENUM("IF2 ADC4 Swap Source", rt5677_if2_adc4_swap_enum);
+
+/* TDM IF1 ADC Data Selection */ /* MX-3C [2:0] */
+static const char * const rt5677_if1_adc_tdm_swap_src[] = {
+ "1/2/3/4", "2/1/3/4", "2/3/1/4", "4/1/2/3", "1/3/2/4", "1/4/2/3",
+ "3/1/2/4", "3/4/1/2"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_adc_tdm_swap_enum, RT5677_TDM1_CTRL2,
+ RT5677_IF1_ADC_CTRL_SFT, rt5677_if1_adc_tdm_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if1_adc_tdm_swap_mux =
+ SOC_DAPM_ENUM("IF1 ADC TDM Swap Source", rt5677_if1_adc_tdm_swap_enum);
+
+/* TDM IF2 ADC Data Selection */ /* MX-41[2:0] */
+static const char * const rt5677_if2_adc_tdm_swap_src[] = {
+ "1/2/3/4", "2/1/3/4", "3/1/2/4", "4/1/2/3", "1/3/2/4", "1/4/2/3",
+ "2/3/1/4", "3/4/1/2"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_adc_tdm_swap_enum, RT5677_TDM2_CTRL2,
+ RT5677_IF2_ADC_CTRL_SFT, rt5677_if2_adc_tdm_swap_src);
+
+static const struct snd_kcontrol_new rt5677_if2_adc_tdm_swap_mux =
+ SOC_DAPM_ENUM("IF2 ADC TDM Swap Source", rt5677_if2_adc_tdm_swap_enum);
+
+/* TDM IF1/2 DAC Data Selection */ /* MX-3E[14:12][10:8][6:4][2:0]
+ MX-3F[14:12][10:8][6:4][2:0]
+ MX-43[14:12][10:8][6:4][2:0]
+ MX-44[14:12][10:8][6:4][2:0] */
+static const char * const rt5677_if12_dac_tdm_sel_src[] = {
+ "Slot0", "Slot1", "Slot2", "Slot3", "Slot4", "Slot5", "Slot6", "Slot7"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_dac0_tdm_sel_enum, RT5677_TDM1_CTRL4,
+ RT5677_IF1_DAC0_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if1_dac0_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF1 DAC0 TDM Source", rt5677_if1_dac0_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_dac1_tdm_sel_enum, RT5677_TDM1_CTRL4,
+ RT5677_IF1_DAC1_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if1_dac1_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF1 DAC1 TDM Source", rt5677_if1_dac1_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_dac2_tdm_sel_enum, RT5677_TDM1_CTRL4,
+ RT5677_IF1_DAC2_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if1_dac2_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF1 DAC2 TDM Source", rt5677_if1_dac2_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_dac3_tdm_sel_enum, RT5677_TDM1_CTRL4,
+ RT5677_IF1_DAC3_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if1_dac3_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF1 DAC3 TDM Source", rt5677_if1_dac3_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_dac4_tdm_sel_enum, RT5677_TDM1_CTRL5,
+ RT5677_IF1_DAC4_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if1_dac4_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF1 DAC4 TDM Source", rt5677_if1_dac4_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_dac5_tdm_sel_enum, RT5677_TDM1_CTRL5,
+ RT5677_IF1_DAC5_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if1_dac5_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF1 DAC5 TDM Source", rt5677_if1_dac5_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_dac6_tdm_sel_enum, RT5677_TDM1_CTRL5,
+ RT5677_IF1_DAC6_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if1_dac6_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF1 DAC6 TDM Source", rt5677_if1_dac6_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if1_dac7_tdm_sel_enum, RT5677_TDM1_CTRL5,
+ RT5677_IF1_DAC7_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if1_dac7_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF1 DAC7 TDM Source", rt5677_if1_dac7_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_dac0_tdm_sel_enum, RT5677_TDM2_CTRL4,
+ RT5677_IF2_DAC0_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if2_dac0_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF2 DAC0 TDM Source", rt5677_if2_dac0_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_dac1_tdm_sel_enum, RT5677_TDM2_CTRL4,
+ RT5677_IF2_DAC1_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if2_dac1_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF2 DAC1 TDM Source", rt5677_if2_dac1_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_dac2_tdm_sel_enum, RT5677_TDM2_CTRL4,
+ RT5677_IF2_DAC2_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if2_dac2_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF2 DAC2 TDM Source", rt5677_if2_dac2_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_dac3_tdm_sel_enum, RT5677_TDM2_CTRL4,
+ RT5677_IF2_DAC3_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if2_dac3_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF2 DAC3 TDM Source", rt5677_if2_dac3_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_dac4_tdm_sel_enum, RT5677_TDM2_CTRL5,
+ RT5677_IF2_DAC4_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if2_dac4_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF2 DAC4 TDM Source", rt5677_if2_dac4_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_dac5_tdm_sel_enum, RT5677_TDM2_CTRL5,
+ RT5677_IF2_DAC5_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if2_dac5_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF2 DAC5 TDM Source", rt5677_if2_dac5_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_dac6_tdm_sel_enum, RT5677_TDM2_CTRL5,
+ RT5677_IF2_DAC6_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if2_dac6_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF2 DAC6 TDM Source", rt5677_if2_dac6_tdm_sel_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5677_if2_dac7_tdm_sel_enum, RT5677_TDM2_CTRL5,
+ RT5677_IF2_DAC7_SFT, rt5677_if12_dac_tdm_sel_src);
+
+static const struct snd_kcontrol_new rt5677_if2_dac7_tdm_sel_mux =
+ SOC_DAPM_ENUM("IF2 DAC7 TDM Source", rt5677_if2_dac7_tdm_sel_enum);
+
static int rt5677_bst1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1678,6 +2138,77 @@ static int rt5677_set_micbias1_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int rt5677_if1_adc_tdm_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ regmap_read(rt5677->regmap, RT5677_TDM1_CTRL2, &value);
+ if (value & RT5677_IF1_ADC_CTRL_MASK)
+ regmap_update_bits(rt5677->regmap, RT5677_TDM1_CTRL1,
+ RT5677_IF1_ADC_MODE_MASK,
+ RT5677_IF1_ADC_MODE_TDM);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5677_if2_adc_tdm_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ regmap_read(rt5677->regmap, RT5677_TDM2_CTRL2, &value);
+ if (value & RT5677_IF2_ADC_CTRL_MASK)
+ regmap_update_bits(rt5677->regmap, RT5677_TDM2_CTRL1,
+ RT5677_IF2_ADC_MODE_MASK,
+ RT5677_IF2_ADC_MODE_TDM);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (codec->dapm.bias_level != SND_SOC_BIAS_ON &&
+ !rt5677->is_vref_slow) {
+ mdelay(20);
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
+ RT5677_PWR_FV1 | RT5677_PWR_FV2,
+ RT5677_PWR_FV1 | RT5677_PWR_FV2);
+ rt5677->is_vref_slow = true;
+ }
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU),
@@ -1837,10 +2368,8 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_PGA("Stereo4 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("Sto2 ADC LR MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("Mono ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("IF1_ADC1", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("IF1_ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("IF1_ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("IF1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF2 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
/* DSP */
SND_SOC_DAPM_MUX("IB9 Mux", SND_SOC_NOPM, 0, 0,
@@ -1963,6 +2492,17 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
&rt5677_if1_adc3_mux),
SND_SOC_DAPM_MUX("IF1 ADC4 Mux", SND_SOC_NOPM, 0, 0,
&rt5677_if1_adc4_mux),
+ SND_SOC_DAPM_MUX("IF1 ADC1 Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_adc1_swap_mux),
+ SND_SOC_DAPM_MUX("IF1 ADC2 Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_adc2_swap_mux),
+ SND_SOC_DAPM_MUX("IF1 ADC3 Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_adc3_swap_mux),
+ SND_SOC_DAPM_MUX("IF1 ADC4 Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_adc4_swap_mux),
+ SND_SOC_DAPM_MUX_E("IF1 ADC TDM Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_adc_tdm_swap_mux, rt5677_if1_adc_tdm_event,
+ SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_MUX("IF2 ADC1 Mux", SND_SOC_NOPM, 0, 0,
&rt5677_if2_adc1_mux),
SND_SOC_DAPM_MUX("IF2 ADC2 Mux", SND_SOC_NOPM, 0, 0,
@@ -1971,6 +2511,17 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
&rt5677_if2_adc3_mux),
SND_SOC_DAPM_MUX("IF2 ADC4 Mux", SND_SOC_NOPM, 0, 0,
&rt5677_if2_adc4_mux),
+ SND_SOC_DAPM_MUX("IF2 ADC1 Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_adc1_swap_mux),
+ SND_SOC_DAPM_MUX("IF2 ADC2 Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_adc2_swap_mux),
+ SND_SOC_DAPM_MUX("IF2 ADC3 Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_adc3_swap_mux),
+ SND_SOC_DAPM_MUX("IF2 ADC4 Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_adc4_swap_mux),
+ SND_SOC_DAPM_MUX_E("IF2 ADC TDM Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_adc_tdm_swap_mux, rt5677_if2_adc_tdm_event,
+ SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_MUX("IF3 ADC Mux", SND_SOC_NOPM, 0, 0,
&rt5677_if3_adc_mux),
SND_SOC_DAPM_MUX("IF4 ADC Mux", SND_SOC_NOPM, 0, 0,
@@ -1984,6 +2535,40 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_MUX("SLB ADC4 Mux", SND_SOC_NOPM, 0, 0,
&rt5677_slb_adc4_mux),
+ SND_SOC_DAPM_MUX("IF1 DAC0 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_dac0_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF1 DAC1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_dac1_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF1 DAC2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_dac2_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF1 DAC3 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_dac3_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF1 DAC4 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_dac4_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF1 DAC5 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_dac5_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF1 DAC6 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_dac6_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF1 DAC7 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if1_dac7_tdm_sel_mux),
+
+ SND_SOC_DAPM_MUX("IF2 DAC0 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_dac0_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF2 DAC1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_dac1_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF2 DAC2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_dac2_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF2 DAC3 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_dac3_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF2 DAC4 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_dac4_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF2 DAC5 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_dac5_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF2 DAC6 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_dac6_tdm_sel_mux),
+ SND_SOC_DAPM_MUX("IF2 DAC7 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5677_if2_dac7_tdm_sel_mux),
+
/* Audio Interface */
SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
@@ -2022,7 +2607,7 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
rt5677_ob_7_mix, ARRAY_SIZE(rt5677_ob_7_mix)),
/* Output Side */
- /* DAC mixer before sound effect */
+ /* DAC mixer before sound effect */
SND_SOC_DAPM_MIXER("DAC1 MIXL", SND_SOC_NOPM, 0, 0,
rt5677_dac_l_mix, ARRAY_SIZE(rt5677_dac_l_mix)),
SND_SOC_DAPM_MIXER("DAC1 MIXR", SND_SOC_NOPM, 0, 0,
@@ -2109,13 +2694,20 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_MUX("PDM2 R Mux", RT5677_PDM_OUT_CTRL, RT5677_M_PDM2_R_SFT,
1, &rt5677_pdm2_r_mux),
- SND_SOC_DAPM_PGA_S("LOUT1 amp", 1, RT5677_PWR_ANLG1, RT5677_PWR_LO1_BIT,
+ SND_SOC_DAPM_PGA_S("LOUT1 amp", 0, RT5677_PWR_ANLG1, RT5677_PWR_LO1_BIT,
0, NULL, 0),
- SND_SOC_DAPM_PGA_S("LOUT2 amp", 1, RT5677_PWR_ANLG1, RT5677_PWR_LO2_BIT,
+ SND_SOC_DAPM_PGA_S("LOUT2 amp", 0, RT5677_PWR_ANLG1, RT5677_PWR_LO2_BIT,
0, NULL, 0),
- SND_SOC_DAPM_PGA_S("LOUT3 amp", 1, RT5677_PWR_ANLG1, RT5677_PWR_LO3_BIT,
+ SND_SOC_DAPM_PGA_S("LOUT3 amp", 0, RT5677_PWR_ANLG1, RT5677_PWR_LO3_BIT,
0, NULL, 0),
+ SND_SOC_DAPM_PGA_S("LOUT1 vref", 1, SND_SOC_NOPM, 0, 0,
+ rt5677_vref_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PGA_S("LOUT2 vref", 1, SND_SOC_NOPM, 0, 0,
+ rt5677_vref_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PGA_S("LOUT3 vref", 1, SND_SOC_NOPM, 0, 0,
+ rt5677_vref_event, SND_SOC_DAPM_POST_PMU),
+
/* Output Lines */
SND_SOC_DAPM_OUTPUT("LOUT1"),
SND_SOC_DAPM_OUTPUT("LOUT2"),
@@ -2124,6 +2716,8 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("PDM1R"),
SND_SOC_DAPM_OUTPUT("PDM2L"),
SND_SOC_DAPM_OUTPUT("PDM2R"),
+
+ SND_SOC_DAPM_POST("vref", rt5677_vref_event),
};
static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
@@ -2354,11 +2948,42 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "IF1 ADC4 Mux", "OB67", "OB67" },
{ "IF1 ADC4 Mux", "OB01", "OB01 Bypass Mux" },
+ { "IF1 ADC1 Swap Mux", "L/R", "IF1 ADC1 Mux" },
+ { "IF1 ADC1 Swap Mux", "R/L", "IF1 ADC1 Mux" },
+ { "IF1 ADC1 Swap Mux", "L/L", "IF1 ADC1 Mux" },
+ { "IF1 ADC1 Swap Mux", "R/R", "IF1 ADC1 Mux" },
+
+ { "IF1 ADC2 Swap Mux", "L/R", "IF1 ADC2 Mux" },
+ { "IF1 ADC2 Swap Mux", "R/L", "IF1 ADC2 Mux" },
+ { "IF1 ADC2 Swap Mux", "L/L", "IF1 ADC2 Mux" },
+ { "IF1 ADC2 Swap Mux", "R/R", "IF1 ADC2 Mux" },
+
+ { "IF1 ADC3 Swap Mux", "L/R", "IF1 ADC3 Mux" },
+ { "IF1 ADC3 Swap Mux", "R/L", "IF1 ADC3 Mux" },
+ { "IF1 ADC3 Swap Mux", "L/L", "IF1 ADC3 Mux" },
+ { "IF1 ADC3 Swap Mux", "R/R", "IF1 ADC3 Mux" },
+
+ { "IF1 ADC4 Swap Mux", "L/R", "IF1 ADC4 Mux" },
+ { "IF1 ADC4 Swap Mux", "R/L", "IF1 ADC4 Mux" },
+ { "IF1 ADC4 Swap Mux", "L/L", "IF1 ADC4 Mux" },
+ { "IF1 ADC4 Swap Mux", "R/R", "IF1 ADC4 Mux" },
+
+ { "IF1 ADC", NULL, "IF1 ADC1 Swap Mux" },
+ { "IF1 ADC", NULL, "IF1 ADC2 Swap Mux" },
+ { "IF1 ADC", NULL, "IF1 ADC3 Swap Mux" },
+ { "IF1 ADC", NULL, "IF1 ADC4 Swap Mux" },
+
+ { "IF1 ADC TDM Swap Mux", "1/2/3/4", "IF1 ADC" },
+ { "IF1 ADC TDM Swap Mux", "2/1/3/4", "IF1 ADC" },
+ { "IF1 ADC TDM Swap Mux", "2/3/1/4", "IF1 ADC" },
+ { "IF1 ADC TDM Swap Mux", "4/1/2/3", "IF1 ADC" },
+ { "IF1 ADC TDM Swap Mux", "1/3/2/4", "IF1 ADC" },
+ { "IF1 ADC TDM Swap Mux", "1/4/2/3", "IF1 ADC" },
+ { "IF1 ADC TDM Swap Mux", "3/1/2/4", "IF1 ADC" },
+ { "IF1 ADC TDM Swap Mux", "3/4/1/2", "IF1 ADC" },
+
{ "AIF1TX", NULL, "I2S1" },
- { "AIF1TX", NULL, "IF1 ADC1 Mux" },
- { "AIF1TX", NULL, "IF1 ADC2 Mux" },
- { "AIF1TX", NULL, "IF1 ADC3 Mux" },
- { "AIF1TX", NULL, "IF1 ADC4 Mux" },
+ { "AIF1TX", NULL, "IF1 ADC TDM Swap Mux" },
{ "IF2 ADC1 Mux", "STO1 ADC MIX", "Stereo1 ADC MIX" },
{ "IF2 ADC1 Mux", "OB01", "OB01 Bypass Mux" },
@@ -2375,11 +3000,42 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "IF2 ADC4 Mux", "OB67", "OB67" },
{ "IF2 ADC4 Mux", "OB01", "OB01 Bypass Mux" },
+ { "IF2 ADC1 Swap Mux", "L/R", "IF2 ADC1 Mux" },
+ { "IF2 ADC1 Swap Mux", "R/L", "IF2 ADC1 Mux" },
+ { "IF2 ADC1 Swap Mux", "L/L", "IF2 ADC1 Mux" },
+ { "IF2 ADC1 Swap Mux", "R/R", "IF2 ADC1 Mux" },
+
+ { "IF2 ADC2 Swap Mux", "L/R", "IF2 ADC2 Mux" },
+ { "IF2 ADC2 Swap Mux", "R/L", "IF2 ADC2 Mux" },
+ { "IF2 ADC2 Swap Mux", "L/L", "IF2 ADC2 Mux" },
+ { "IF2 ADC2 Swap Mux", "R/R", "IF2 ADC2 Mux" },
+
+ { "IF2 ADC3 Swap Mux", "L/R", "IF2 ADC3 Mux" },
+ { "IF2 ADC3 Swap Mux", "R/L", "IF2 ADC3 Mux" },
+ { "IF2 ADC3 Swap Mux", "L/L", "IF2 ADC3 Mux" },
+ { "IF2 ADC3 Swap Mux", "R/R", "IF2 ADC3 Mux" },
+
+ { "IF2 ADC4 Swap Mux", "L/R", "IF2 ADC4 Mux" },
+ { "IF2 ADC4 Swap Mux", "R/L", "IF2 ADC4 Mux" },
+ { "IF2 ADC4 Swap Mux", "L/L", "IF2 ADC4 Mux" },
+ { "IF2 ADC4 Swap Mux", "R/R", "IF2 ADC4 Mux" },
+
+ { "IF2 ADC", NULL, "IF2 ADC1 Swap Mux" },
+ { "IF2 ADC", NULL, "IF2 ADC2 Swap Mux" },
+ { "IF2 ADC", NULL, "IF2 ADC3 Swap Mux" },
+ { "IF2 ADC", NULL, "IF2 ADC4 Swap Mux" },
+
+ { "IF2 ADC TDM Swap Mux", "1/2/3/4", "IF2 ADC" },
+ { "IF2 ADC TDM Swap Mux", "2/1/3/4", "IF2 ADC" },
+ { "IF2 ADC TDM Swap Mux", "3/1/2/4", "IF2 ADC" },
+ { "IF2 ADC TDM Swap Mux", "4/1/2/3", "IF2 ADC" },
+ { "IF2 ADC TDM Swap Mux", "1/3/2/4", "IF2 ADC" },
+ { "IF2 ADC TDM Swap Mux", "1/4/2/3", "IF2 ADC" },
+ { "IF2 ADC TDM Swap Mux", "2/3/1/4", "IF2 ADC" },
+ { "IF2 ADC TDM Swap Mux", "3/4/1/2", "IF2 ADC" },
+
{ "AIF2TX", NULL, "I2S2" },
- { "AIF2TX", NULL, "IF2 ADC1 Mux" },
- { "AIF2TX", NULL, "IF2 ADC2 Mux" },
- { "AIF2TX", NULL, "IF2 ADC3 Mux" },
- { "AIF2TX", NULL, "IF2 ADC4 Mux" },
+ { "AIF2TX", NULL, "IF2 ADC TDM Swap Mux" },
{ "IF3 ADC Mux", "STO1 ADC MIX", "Stereo1 ADC MIX" },
{ "IF3 ADC Mux", "STO2 ADC MIX", "Stereo2 ADC MIX" },
@@ -2569,14 +3225,86 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "IF1 DAC6", NULL, "I2S1" },
{ "IF1 DAC7", NULL, "I2S1" },
- { "IF1 DAC01", NULL, "IF1 DAC0" },
- { "IF1 DAC01", NULL, "IF1 DAC1" },
- { "IF1 DAC23", NULL, "IF1 DAC2" },
- { "IF1 DAC23", NULL, "IF1 DAC3" },
- { "IF1 DAC45", NULL, "IF1 DAC4" },
- { "IF1 DAC45", NULL, "IF1 DAC5" },
- { "IF1 DAC67", NULL, "IF1 DAC6" },
- { "IF1 DAC67", NULL, "IF1 DAC7" },
+ { "IF1 DAC0 Mux", "Slot0", "IF1 DAC0" },
+ { "IF1 DAC0 Mux", "Slot1", "IF1 DAC1" },
+ { "IF1 DAC0 Mux", "Slot2", "IF1 DAC2" },
+ { "IF1 DAC0 Mux", "Slot3", "IF1 DAC3" },
+ { "IF1 DAC0 Mux", "Slot4", "IF1 DAC4" },
+ { "IF1 DAC0 Mux", "Slot5", "IF1 DAC5" },
+ { "IF1 DAC0 Mux", "Slot6", "IF1 DAC6" },
+ { "IF1 DAC0 Mux", "Slot7", "IF1 DAC7" },
+
+ { "IF1 DAC1 Mux", "Slot0", "IF1 DAC0" },
+ { "IF1 DAC1 Mux", "Slot1", "IF1 DAC1" },
+ { "IF1 DAC1 Mux", "Slot2", "IF1 DAC2" },
+ { "IF1 DAC1 Mux", "Slot3", "IF1 DAC3" },
+ { "IF1 DAC1 Mux", "Slot4", "IF1 DAC4" },
+ { "IF1 DAC1 Mux", "Slot5", "IF1 DAC5" },
+ { "IF1 DAC1 Mux", "Slot6", "IF1 DAC6" },
+ { "IF1 DAC1 Mux", "Slot7", "IF1 DAC7" },
+
+ { "IF1 DAC2 Mux", "Slot0", "IF1 DAC0" },
+ { "IF1 DAC2 Mux", "Slot1", "IF1 DAC1" },
+ { "IF1 DAC2 Mux", "Slot2", "IF1 DAC2" },
+ { "IF1 DAC2 Mux", "Slot3", "IF1 DAC3" },
+ { "IF1 DAC2 Mux", "Slot4", "IF1 DAC4" },
+ { "IF1 DAC2 Mux", "Slot5", "IF1 DAC5" },
+ { "IF1 DAC2 Mux", "Slot6", "IF1 DAC6" },
+ { "IF1 DAC2 Mux", "Slot7", "IF1 DAC7" },
+
+ { "IF1 DAC3 Mux", "Slot0", "IF1 DAC0" },
+ { "IF1 DAC3 Mux", "Slot1", "IF1 DAC1" },
+ { "IF1 DAC3 Mux", "Slot2", "IF1 DAC2" },
+ { "IF1 DAC3 Mux", "Slot3", "IF1 DAC3" },
+ { "IF1 DAC3 Mux", "Slot4", "IF1 DAC4" },
+ { "IF1 DAC3 Mux", "Slot5", "IF1 DAC5" },
+ { "IF1 DAC3 Mux", "Slot6", "IF1 DAC6" },
+ { "IF1 DAC3 Mux", "Slot7", "IF1 DAC7" },
+
+ { "IF1 DAC4 Mux", "Slot0", "IF1 DAC0" },
+ { "IF1 DAC4 Mux", "Slot1", "IF1 DAC1" },
+ { "IF1 DAC4 Mux", "Slot2", "IF1 DAC2" },
+ { "IF1 DAC4 Mux", "Slot3", "IF1 DAC3" },
+ { "IF1 DAC4 Mux", "Slot4", "IF1 DAC4" },
+ { "IF1 DAC4 Mux", "Slot5", "IF1 DAC5" },
+ { "IF1 DAC4 Mux", "Slot6", "IF1 DAC6" },
+ { "IF1 DAC4 Mux", "Slot7", "IF1 DAC7" },
+
+ { "IF1 DAC5 Mux", "Slot0", "IF1 DAC0" },
+ { "IF1 DAC5 Mux", "Slot1", "IF1 DAC1" },
+ { "IF1 DAC5 Mux", "Slot2", "IF1 DAC2" },
+ { "IF1 DAC5 Mux", "Slot3", "IF1 DAC3" },
+ { "IF1 DAC5 Mux", "Slot4", "IF1 DAC4" },
+ { "IF1 DAC5 Mux", "Slot5", "IF1 DAC5" },
+ { "IF1 DAC5 Mux", "Slot6", "IF1 DAC6" },
+ { "IF1 DAC5 Mux", "Slot7", "IF1 DAC7" },
+
+ { "IF1 DAC6 Mux", "Slot0", "IF1 DAC0" },
+ { "IF1 DAC6 Mux", "Slot1", "IF1 DAC1" },
+ { "IF1 DAC6 Mux", "Slot2", "IF1 DAC2" },
+ { "IF1 DAC6 Mux", "Slot3", "IF1 DAC3" },
+ { "IF1 DAC6 Mux", "Slot4", "IF1 DAC4" },
+ { "IF1 DAC6 Mux", "Slot5", "IF1 DAC5" },
+ { "IF1 DAC6 Mux", "Slot6", "IF1 DAC6" },
+ { "IF1 DAC6 Mux", "Slot7", "IF1 DAC7" },
+
+ { "IF1 DAC7 Mux", "Slot0", "IF1 DAC0" },
+ { "IF1 DAC7 Mux", "Slot1", "IF1 DAC1" },
+ { "IF1 DAC7 Mux", "Slot2", "IF1 DAC2" },
+ { "IF1 DAC7 Mux", "Slot3", "IF1 DAC3" },
+ { "IF1 DAC7 Mux", "Slot4", "IF1 DAC4" },
+ { "IF1 DAC7 Mux", "Slot5", "IF1 DAC5" },
+ { "IF1 DAC7 Mux", "Slot6", "IF1 DAC6" },
+ { "IF1 DAC7 Mux", "Slot7", "IF1 DAC7" },
+
+ { "IF1 DAC01", NULL, "IF1 DAC0 Mux" },
+ { "IF1 DAC01", NULL, "IF1 DAC1 Mux" },
+ { "IF1 DAC23", NULL, "IF1 DAC2 Mux" },
+ { "IF1 DAC23", NULL, "IF1 DAC3 Mux" },
+ { "IF1 DAC45", NULL, "IF1 DAC4 Mux" },
+ { "IF1 DAC45", NULL, "IF1 DAC5 Mux" },
+ { "IF1 DAC67", NULL, "IF1 DAC6 Mux" },
+ { "IF1 DAC67", NULL, "IF1 DAC7 Mux" },
{ "IF2 DAC0", NULL, "AIF2RX" },
{ "IF2 DAC1", NULL, "AIF2RX" },
@@ -2595,14 +3323,86 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "IF2 DAC6", NULL, "I2S2" },
{ "IF2 DAC7", NULL, "I2S2" },
- { "IF2 DAC01", NULL, "IF2 DAC0" },
- { "IF2 DAC01", NULL, "IF2 DAC1" },
- { "IF2 DAC23", NULL, "IF2 DAC2" },
- { "IF2 DAC23", NULL, "IF2 DAC3" },
- { "IF2 DAC45", NULL, "IF2 DAC4" },
- { "IF2 DAC45", NULL, "IF2 DAC5" },
- { "IF2 DAC67", NULL, "IF2 DAC6" },
- { "IF2 DAC67", NULL, "IF2 DAC7" },
+ { "IF2 DAC0 Mux", "Slot0", "IF2 DAC0" },
+ { "IF2 DAC0 Mux", "Slot1", "IF2 DAC1" },
+ { "IF2 DAC0 Mux", "Slot2", "IF2 DAC2" },
+ { "IF2 DAC0 Mux", "Slot3", "IF2 DAC3" },
+ { "IF2 DAC0 Mux", "Slot4", "IF2 DAC4" },
+ { "IF2 DAC0 Mux", "Slot5", "IF2 DAC5" },
+ { "IF2 DAC0 Mux", "Slot6", "IF2 DAC6" },
+ { "IF2 DAC0 Mux", "Slot7", "IF2 DAC7" },
+
+ { "IF2 DAC1 Mux", "Slot0", "IF2 DAC0" },
+ { "IF2 DAC1 Mux", "Slot1", "IF2 DAC1" },
+ { "IF2 DAC1 Mux", "Slot2", "IF2 DAC2" },
+ { "IF2 DAC1 Mux", "Slot3", "IF2 DAC3" },
+ { "IF2 DAC1 Mux", "Slot4", "IF2 DAC4" },
+ { "IF2 DAC1 Mux", "Slot5", "IF2 DAC5" },
+ { "IF2 DAC1 Mux", "Slot6", "IF2 DAC6" },
+ { "IF2 DAC1 Mux", "Slot7", "IF2 DAC7" },
+
+ { "IF2 DAC2 Mux", "Slot0", "IF2 DAC0" },
+ { "IF2 DAC2 Mux", "Slot1", "IF2 DAC1" },
+ { "IF2 DAC2 Mux", "Slot2", "IF2 DAC2" },
+ { "IF2 DAC2 Mux", "Slot3", "IF2 DAC3" },
+ { "IF2 DAC2 Mux", "Slot4", "IF2 DAC4" },
+ { "IF2 DAC2 Mux", "Slot5", "IF2 DAC5" },
+ { "IF2 DAC2 Mux", "Slot6", "IF2 DAC6" },
+ { "IF2 DAC2 Mux", "Slot7", "IF2 DAC7" },
+
+ { "IF2 DAC3 Mux", "Slot0", "IF2 DAC0" },
+ { "IF2 DAC3 Mux", "Slot1", "IF2 DAC1" },
+ { "IF2 DAC3 Mux", "Slot2", "IF2 DAC2" },
+ { "IF2 DAC3 Mux", "Slot3", "IF2 DAC3" },
+ { "IF2 DAC3 Mux", "Slot4", "IF2 DAC4" },
+ { "IF2 DAC3 Mux", "Slot5", "IF2 DAC5" },
+ { "IF2 DAC3 Mux", "Slot6", "IF2 DAC6" },
+ { "IF2 DAC3 Mux", "Slot7", "IF2 DAC7" },
+
+ { "IF2 DAC4 Mux", "Slot0", "IF2 DAC0" },
+ { "IF2 DAC4 Mux", "Slot1", "IF2 DAC1" },
+ { "IF2 DAC4 Mux", "Slot2", "IF2 DAC2" },
+ { "IF2 DAC4 Mux", "Slot3", "IF2 DAC3" },
+ { "IF2 DAC4 Mux", "Slot4", "IF2 DAC4" },
+ { "IF2 DAC4 Mux", "Slot5", "IF2 DAC5" },
+ { "IF2 DAC4 Mux", "Slot6", "IF2 DAC6" },
+ { "IF2 DAC4 Mux", "Slot7", "IF2 DAC7" },
+
+ { "IF2 DAC5 Mux", "Slot0", "IF2 DAC0" },
+ { "IF2 DAC5 Mux", "Slot1", "IF2 DAC1" },
+ { "IF2 DAC5 Mux", "Slot2", "IF2 DAC2" },
+ { "IF2 DAC5 Mux", "Slot3", "IF2 DAC3" },
+ { "IF2 DAC5 Mux", "Slot4", "IF2 DAC4" },
+ { "IF2 DAC5 Mux", "Slot5", "IF2 DAC5" },
+ { "IF2 DAC5 Mux", "Slot6", "IF2 DAC6" },
+ { "IF2 DAC5 Mux", "Slot7", "IF2 DAC7" },
+
+ { "IF2 DAC6 Mux", "Slot0", "IF2 DAC0" },
+ { "IF2 DAC6 Mux", "Slot1", "IF2 DAC1" },
+ { "IF2 DAC6 Mux", "Slot2", "IF2 DAC2" },
+ { "IF2 DAC6 Mux", "Slot3", "IF2 DAC3" },
+ { "IF2 DAC6 Mux", "Slot4", "IF2 DAC4" },
+ { "IF2 DAC6 Mux", "Slot5", "IF2 DAC5" },
+ { "IF2 DAC6 Mux", "Slot6", "IF2 DAC6" },
+ { "IF2 DAC6 Mux", "Slot7", "IF2 DAC7" },
+
+ { "IF2 DAC7 Mux", "Slot0", "IF2 DAC0" },
+ { "IF2 DAC7 Mux", "Slot1", "IF2 DAC1" },
+ { "IF2 DAC7 Mux", "Slot2", "IF2 DAC2" },
+ { "IF2 DAC7 Mux", "Slot3", "IF2 DAC3" },
+ { "IF2 DAC7 Mux", "Slot4", "IF2 DAC4" },
+ { "IF2 DAC7 Mux", "Slot5", "IF2 DAC5" },
+ { "IF2 DAC7 Mux", "Slot6", "IF2 DAC6" },
+ { "IF2 DAC7 Mux", "Slot7", "IF2 DAC7" },
+
+ { "IF2 DAC01", NULL, "IF2 DAC0 Mux" },
+ { "IF2 DAC01", NULL, "IF2 DAC1 Mux" },
+ { "IF2 DAC23", NULL, "IF2 DAC2 Mux" },
+ { "IF2 DAC23", NULL, "IF2 DAC3 Mux" },
+ { "IF2 DAC45", NULL, "IF2 DAC4 Mux" },
+ { "IF2 DAC45", NULL, "IF2 DAC5 Mux" },
+ { "IF2 DAC67", NULL, "IF2 DAC6 Mux" },
+ { "IF2 DAC67", NULL, "IF2 DAC7 Mux" },
{ "IF3 DAC", NULL, "AIF3RX" },
{ "IF3 DAC", NULL, "I2S3" },
@@ -2806,9 +3606,13 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "LOUT2 amp", NULL, "DAC 2" },
{ "LOUT3 amp", NULL, "DAC 3" },
- { "LOUT1", NULL, "LOUT1 amp" },
- { "LOUT2", NULL, "LOUT2 amp" },
- { "LOUT3", NULL, "LOUT3 amp" },
+ { "LOUT1 vref", NULL, "LOUT1 amp" },
+ { "LOUT2 vref", NULL, "LOUT2 amp" },
+ { "LOUT3 vref", NULL, "LOUT3 amp" },
+
+ { "LOUT1", NULL, "LOUT1 vref" },
+ { "LOUT2", NULL, "LOUT2 vref" },
+ { "LOUT3", NULL, "LOUT3 vref" },
{ "PDM1L", NULL, "PDM1 L Mux" },
{ "PDM1R", NULL, "PDM1 R Mux" },
@@ -2837,7 +3641,8 @@ static int rt5677_hw_params(struct snd_pcm_substream *substream,
rt5677->lrck[dai->id] = params_rate(params);
pre_div = rl6231_get_clk_info(rt5677->sysclk, rt5677->lrck[dai->id]);
if (pre_div < 0) {
- dev_err(codec->dev, "Unsupported clock setting\n");
+ dev_err(codec->dev, "Unsupported clock setting: sysclk=%dHz lrck=%dHz\n",
+ rt5677->sysclk, rt5677->lrck[dai->id]);
return -EINVAL;
}
frame_size = snd_soc_params_to_frame_size(params);
@@ -3181,6 +3986,8 @@ static int rt5677_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) {
+ rt5677_set_dsp_vad(codec, false);
+
regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
RT5677_LDO1_SEL_MASK | RT5677_LDO2_SEL_MASK,
0x0055);
@@ -3188,14 +3995,12 @@ static int rt5677_set_bias_level(struct snd_soc_codec *codec,
RT5677_PR_BASE + RT5677_BIAS_CUR4,
0x0f00, 0x0f00);
regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
+ RT5677_PWR_FV1 | RT5677_PWR_FV2 |
RT5677_PWR_VREF1 | RT5677_PWR_MB |
RT5677_PWR_BG | RT5677_PWR_VREF2,
RT5677_PWR_VREF1 | RT5677_PWR_MB |
RT5677_PWR_BG | RT5677_PWR_VREF2);
- mdelay(20);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
- RT5677_PWR_FV1 | RT5677_PWR_FV2,
- RT5677_PWR_FV1 | RT5677_PWR_FV2);
+ rt5677->is_vref_slow = false;
regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG2,
RT5677_PWR_CORE, RT5677_PWR_CORE);
regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC,
@@ -3214,6 +4019,9 @@ static int rt5677_set_bias_level(struct snd_soc_codec *codec,
regmap_write(rt5677->regmap, RT5677_PWR_ANLG2, 0x0000);
regmap_update_bits(rt5677->regmap,
RT5677_PR_BASE + RT5677_BIAS_CUR4, 0x0f00, 0x0000);
+
+ if (rt5677->dsp_vad_en)
+ rt5677_set_dsp_vad(codec, true);
break;
default:
@@ -3309,6 +4117,78 @@ static int rt5677_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
return 0;
}
+/** Configures the gpio as
+ * 0 - floating
+ * 1 - pull down
+ * 2 - pull up
+ */
+static void rt5677_gpio_config(struct rt5677_priv *rt5677, unsigned offset,
+ int value)
+{
+ int shift;
+
+ switch (offset) {
+ case RT5677_GPIO1 ... RT5677_GPIO2:
+ shift = 2 * (1 - offset);
+ regmap_update_bits(rt5677->regmap,
+ RT5677_PR_BASE + RT5677_DIG_IN_PIN_ST_CTRL2,
+ 0x3 << shift,
+ (value & 0x3) << shift);
+ break;
+
+ case RT5677_GPIO3 ... RT5677_GPIO6:
+ shift = 2 * (9 - offset);
+ regmap_update_bits(rt5677->regmap,
+ RT5677_PR_BASE + RT5677_DIG_IN_PIN_ST_CTRL3,
+ 0x3 << shift,
+ (value & 0x3) << shift);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int rt5677_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+ struct regmap_irq_chip_data *data = rt5677->irq_data;
+ int irq;
+
+ if (offset >= RT5677_GPIO1 && offset <= RT5677_GPIO3) {
+ if ((rt5677->pdata.jd1_gpio == 1 && offset == RT5677_GPIO1) ||
+ (rt5677->pdata.jd1_gpio == 2 &&
+ offset == RT5677_GPIO2) ||
+ (rt5677->pdata.jd1_gpio == 3 &&
+ offset == RT5677_GPIO3)) {
+ irq = RT5677_IRQ_JD1;
+ } else {
+ return -ENXIO;
+ }
+ }
+
+ if (offset >= RT5677_GPIO4 && offset <= RT5677_GPIO6) {
+ if ((rt5677->pdata.jd2_gpio == 1 && offset == RT5677_GPIO4) ||
+ (rt5677->pdata.jd2_gpio == 2 &&
+ offset == RT5677_GPIO5) ||
+ (rt5677->pdata.jd2_gpio == 3 &&
+ offset == RT5677_GPIO6)) {
+ irq = RT5677_IRQ_JD2;
+ } else if ((rt5677->pdata.jd3_gpio == 1 &&
+ offset == RT5677_GPIO4) ||
+ (rt5677->pdata.jd3_gpio == 2 &&
+ offset == RT5677_GPIO5) ||
+ (rt5677->pdata.jd3_gpio == 3 &&
+ offset == RT5677_GPIO6)) {
+ irq = RT5677_IRQ_JD3;
+ } else {
+ return -ENXIO;
+ }
+ }
+
+ return regmap_irq_get_virq(data, irq);
+}
+
static struct gpio_chip rt5677_template_chip = {
.label = "rt5677",
.owner = THIS_MODULE,
@@ -3316,6 +4196,7 @@ static struct gpio_chip rt5677_template_chip = {
.set = rt5677_gpio_set,
.direction_input = rt5677_gpio_direction_in,
.get = rt5677_gpio_get,
+ .to_irq = rt5677_to_irq,
.can_sleep = 1,
};
@@ -3341,6 +4222,11 @@ static void rt5677_free_gpio(struct i2c_client *i2c)
gpiochip_remove(&rt5677->gpio_chip);
}
#else
+static void rt5677_gpio_config(struct rt5677_priv *rt5677, unsigned offset,
+ int value)
+{
+}
+
static void rt5677_init_gpio(struct i2c_client *i2c)
{
}
@@ -3353,6 +4239,7 @@ static void rt5677_free_gpio(struct i2c_client *i2c)
static int rt5677_probe(struct snd_soc_codec *codec)
{
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ int i;
rt5677->codec = codec;
@@ -3371,6 +4258,37 @@ static int rt5677_probe(struct snd_soc_codec *codec)
regmap_write(rt5677->regmap, RT5677_DIG_MISC, 0x0020);
regmap_write(rt5677->regmap, RT5677_PWR_DSP2, 0x0c00);
+ for (i = 0; i < RT5677_GPIO_NUM; i++)
+ rt5677_gpio_config(rt5677, i, rt5677->pdata.gpio_config[i]);
+
+ if (rt5677->irq_data) {
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL1, 0x8000,
+ 0x8000);
+ regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC, 0x0018,
+ 0x0008);
+
+ if (rt5677->pdata.jd1_gpio)
+ regmap_update_bits(rt5677->regmap, RT5677_JD_CTRL1,
+ RT5677_SEL_GPIO_JD1_MASK,
+ rt5677->pdata.jd1_gpio <<
+ RT5677_SEL_GPIO_JD1_SFT);
+
+ if (rt5677->pdata.jd2_gpio)
+ regmap_update_bits(rt5677->regmap, RT5677_JD_CTRL1,
+ RT5677_SEL_GPIO_JD2_MASK,
+ rt5677->pdata.jd2_gpio <<
+ RT5677_SEL_GPIO_JD2_SFT);
+
+ if (rt5677->pdata.jd3_gpio)
+ regmap_update_bits(rt5677->regmap, RT5677_JD_CTRL1,
+ RT5677_SEL_GPIO_JD3_MASK,
+ rt5677->pdata.jd3_gpio <<
+ RT5677_SEL_GPIO_JD3_SFT);
+ }
+
+ mutex_init(&rt5677->dsp_cmd_lock);
+ mutex_init(&rt5677->dsp_pri_lock);
+
return 0;
}
@@ -3390,8 +4308,11 @@ static int rt5677_suspend(struct snd_soc_codec *codec)
{
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
- regcache_cache_only(rt5677->regmap, true);
- regcache_mark_dirty(rt5677->regmap);
+ if (!rt5677->dsp_vad_en) {
+ regcache_cache_only(rt5677->regmap, true);
+ regcache_mark_dirty(rt5677->regmap);
+ }
+
if (gpio_is_valid(rt5677->pow_ldo2))
gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
@@ -3406,8 +4327,11 @@ static int rt5677_resume(struct snd_soc_codec *codec)
gpio_set_value_cansleep(rt5677->pow_ldo2, 1);
msleep(10);
}
- regcache_cache_only(rt5677->regmap, false);
- regcache_sync(rt5677->regmap);
+
+ if (!rt5677->dsp_vad_en) {
+ regcache_cache_only(rt5677->regmap, false);
+ regcache_sync(rt5677->regmap);
+ }
return 0;
}
@@ -3416,6 +4340,51 @@ static int rt5677_resume(struct snd_soc_codec *codec)
#define rt5677_resume NULL
#endif
+static int rt5677_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct i2c_client *client = context;
+ struct rt5677_priv *rt5677 = i2c_get_clientdata(client);
+
+ if (rt5677->is_dsp_mode) {
+ if (reg > 0xff) {
+ mutex_lock(&rt5677->dsp_pri_lock);
+ rt5677_dsp_mode_i2c_write(rt5677, RT5677_PRIV_INDEX,
+ reg & 0xff);
+ rt5677_dsp_mode_i2c_read(rt5677, RT5677_PRIV_DATA, val);
+ mutex_unlock(&rt5677->dsp_pri_lock);
+ } else {
+ rt5677_dsp_mode_i2c_read(rt5677, reg, val);
+ }
+ } else {
+ regmap_read(rt5677->regmap_physical, reg, val);
+ }
+
+ return 0;
+}
+
+static int rt5677_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *client = context;
+ struct rt5677_priv *rt5677 = i2c_get_clientdata(client);
+
+ if (rt5677->is_dsp_mode) {
+ if (reg > 0xff) {
+ mutex_lock(&rt5677->dsp_pri_lock);
+ rt5677_dsp_mode_i2c_write(rt5677, RT5677_PRIV_INDEX,
+ reg & 0xff);
+ rt5677_dsp_mode_i2c_write(rt5677, RT5677_PRIV_DATA,
+ val);
+ mutex_unlock(&rt5677->dsp_pri_lock);
+ } else {
+ rt5677_dsp_mode_i2c_write(rt5677, reg, val);
+ }
+ } else {
+ regmap_write(rt5677->regmap_physical, reg, val);
+ }
+
+ return 0;
+}
+
#define RT5677_STEREO_RATES SNDRV_PCM_RATE_8000_96000
#define RT5677_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
@@ -3541,6 +4510,20 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5677 = {
.num_dapm_routes = ARRAY_SIZE(rt5677_dapm_routes),
};
+static const struct regmap_config rt5677_regmap_physical = {
+ .name = "physical",
+ .reg_bits = 8,
+ .val_bits = 16,
+
+ .max_register = RT5677_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5677_ranges) *
+ RT5677_PR_SPACING),
+ .readable_reg = rt5677_readable_register,
+
+ .cache_type = REGCACHE_NONE,
+ .ranges = rt5677_ranges,
+ .num_ranges = ARRAY_SIZE(rt5677_ranges),
+};
+
static const struct regmap_config rt5677_regmap = {
.reg_bits = 8,
.val_bits = 16,
@@ -3550,6 +4533,8 @@ static const struct regmap_config rt5677_regmap = {
.volatile_reg = rt5677_volatile_register,
.readable_reg = rt5677_readable_register,
+ .reg_read = rt5677_read,
+ .reg_write = rt5677_write,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = rt5677_reg,
@@ -3590,9 +4575,77 @@ static int rt5677_parse_dt(struct rt5677_priv *rt5677, struct device_node *np)
(rt5677->pow_ldo2 != -ENOENT))
return rt5677->pow_ldo2;
+ of_property_read_u8_array(np, "realtek,gpio-config",
+ rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
+
+ of_property_read_u32(np, "realtek,jd1-gpio", &rt5677->pdata.jd1_gpio);
+ of_property_read_u32(np, "realtek,jd2-gpio", &rt5677->pdata.jd2_gpio);
+ of_property_read_u32(np, "realtek,jd3-gpio", &rt5677->pdata.jd3_gpio);
+
+ return 0;
+}
+
+static struct regmap_irq rt5677_irqs[] = {
+ [RT5677_IRQ_JD1] = {
+ .reg_offset = 0,
+ .mask = RT5677_EN_IRQ_GPIO_JD1,
+ },
+ [RT5677_IRQ_JD2] = {
+ .reg_offset = 0,
+ .mask = RT5677_EN_IRQ_GPIO_JD2,
+ },
+ [RT5677_IRQ_JD3] = {
+ .reg_offset = 0,
+ .mask = RT5677_EN_IRQ_GPIO_JD3,
+ },
+};
+
+static struct regmap_irq_chip rt5677_irq_chip = {
+ .name = "rt5677",
+ .irqs = rt5677_irqs,
+ .num_irqs = ARRAY_SIZE(rt5677_irqs),
+
+ .num_regs = 1,
+ .status_base = RT5677_IRQ_CTRL1,
+ .mask_base = RT5677_IRQ_CTRL1,
+ .mask_invert = 1,
+};
+
+static int rt5677_init_irq(struct i2c_client *i2c)
+{
+ int ret;
+ struct rt5677_priv *rt5677 = i2c_get_clientdata(i2c);
+
+ if (!rt5677->pdata.jd1_gpio &&
+ !rt5677->pdata.jd2_gpio &&
+ !rt5677->pdata.jd3_gpio)
+ return 0;
+
+ if (!i2c->irq) {
+ dev_err(&i2c->dev, "No interrupt specified\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_add_irq_chip(rt5677->regmap, i2c->irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0,
+ &rt5677_irq_chip, &rt5677->irq_data);
+
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to register IRQ chip: %d\n", ret);
+ return ret;
+ }
+
return 0;
}
+static void rt5677_free_irq(struct i2c_client *i2c)
+{
+ struct rt5677_priv *rt5677 = i2c_get_clientdata(i2c);
+
+ if (rt5677->irq_data)
+ regmap_del_irq_chip(i2c->irq, rt5677->irq_data);
+}
+
static int rt5677_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -3638,7 +4691,16 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
msleep(10);
}
- rt5677->regmap = devm_regmap_init_i2c(i2c, &rt5677_regmap);
+ rt5677->regmap_physical = devm_regmap_init_i2c(i2c,
+ &rt5677_regmap_physical);
+ if (IS_ERR(rt5677->regmap_physical)) {
+ ret = PTR_ERR(rt5677->regmap_physical);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ rt5677->regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &rt5677_regmap);
if (IS_ERR(rt5677->regmap)) {
ret = PTR_ERR(rt5677->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -3690,6 +4752,7 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
}
rt5677_init_gpio(i2c);
+ rt5677_init_irq(i2c);
return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5677,
rt5677_dai, ARRAY_SIZE(rt5677_dai));
@@ -3698,6 +4761,7 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
static int rt5677_i2c_remove(struct i2c_client *i2c)
{
snd_soc_unregister_codec(&i2c->dev);
+ rt5677_free_irq(i2c);
rt5677_free_gpio(i2c);
return 0;
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index d4eb6d5e6746..c0a625f290cc 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -13,6 +13,7 @@
#define __RT5677_H__
#include <sound/rt5677.h>
+#include <linux/gpio/driver.h>
/* Info */
#define RT5677_RESET 0x00
@@ -305,10 +306,10 @@
#define RT5677_R_MUTE_SFT 7
#define RT5677_VOL_R_MUTE (0x1 << 6)
#define RT5677_VOL_R_SFT 6
-#define RT5677_L_VOL_MASK (0x3f << 8)
-#define RT5677_L_VOL_SFT 8
-#define RT5677_R_VOL_MASK (0x3f)
-#define RT5677_R_VOL_SFT 0
+#define RT5677_L_VOL_MASK (0x7f << 9)
+#define RT5677_L_VOL_SFT 9
+#define RT5677_R_VOL_MASK (0x7f << 1)
+#define RT5677_R_VOL_SFT 1
/* LOUT1 Control (0x01) */
#define RT5677_LOUT1_L_MUTE (0x1 << 15)
@@ -446,16 +447,16 @@
#define RT5677_SEL_DAC2_R_SRC_SFT 0
/* Stereo1 ADC Digital Volume Control (0x1c) */
-#define RT5677_STO1_ADC_L_VOL_MASK (0x7f << 8)
-#define RT5677_STO1_ADC_L_VOL_SFT 8
-#define RT5677_STO1_ADC_R_VOL_MASK (0x7f)
-#define RT5677_STO1_ADC_R_VOL_SFT 0
+#define RT5677_STO1_ADC_L_VOL_MASK (0x3f << 9)
+#define RT5677_STO1_ADC_L_VOL_SFT 9
+#define RT5677_STO1_ADC_R_VOL_MASK (0x3f << 1)
+#define RT5677_STO1_ADC_R_VOL_SFT 1
/* Mono ADC Digital Volume Control (0x1d) */
-#define RT5677_MONO_ADC_L_VOL_MASK (0x7f << 8)
-#define RT5677_MONO_ADC_L_VOL_SFT 8
-#define RT5677_MONO_ADC_R_VOL_MASK (0x7f)
-#define RT5677_MONO_ADC_R_VOL_SFT 0
+#define RT5677_MONO_ADC_L_VOL_MASK (0x3f << 9)
+#define RT5677_MONO_ADC_L_VOL_SFT 9
+#define RT5677_MONO_ADC_R_VOL_MASK (0x3f << 1)
+#define RT5677_MONO_ADC_R_VOL_SFT 1
/* Stereo 1/2 ADC Boost Gain Control (0x1e) */
#define RT5677_STO1_ADC_L_BST_MASK (0x3 << 14)
@@ -798,7 +799,21 @@
#define RT5677_PDM2_I2C_EXE (0x1 << 1)
#define RT5677_PDM2_I2C_BUSY (0x1 << 0)
-/* MX3C TDM1 control 1 (0x3c) */
+/* TDM1 control 1 (0x3b) */
+#define RT5677_IF1_ADC_MODE_MASK (0x1 << 12)
+#define RT5677_IF1_ADC_MODE_SFT 12
+#define RT5677_IF1_ADC_MODE_I2S (0x0 << 12)
+#define RT5677_IF1_ADC_MODE_TDM (0x1 << 12)
+#define RT5677_IF1_ADC1_SWAP_MASK (0x3 << 6)
+#define RT5677_IF1_ADC1_SWAP_SFT 6
+#define RT5677_IF1_ADC2_SWAP_MASK (0x3 << 4)
+#define RT5677_IF1_ADC2_SWAP_SFT 4
+#define RT5677_IF1_ADC3_SWAP_MASK (0x3 << 2)
+#define RT5677_IF1_ADC3_SWAP_SFT 2
+#define RT5677_IF1_ADC4_SWAP_MASK (0x3 << 0)
+#define RT5677_IF1_ADC4_SWAP_SFT 0
+
+/* TDM1 control 2 (0x3c) */
#define RT5677_IF1_ADC4_MASK (0x3 << 10)
#define RT5677_IF1_ADC4_SFT 10
#define RT5677_IF1_ADC3_MASK (0x3 << 8)
@@ -807,8 +822,44 @@
#define RT5677_IF1_ADC2_SFT 6
#define RT5677_IF1_ADC1_MASK (0x3 << 4)
#define RT5677_IF1_ADC1_SFT 4
-
-/* MX41 TDM2 control 1 (0x41) */
+#define RT5677_IF1_ADC_CTRL_MASK (0x7 << 0)
+#define RT5677_IF1_ADC_CTRL_SFT 0
+
+/* TDM1 control 4 (0x3e) */
+#define RT5677_IF1_DAC0_MASK (0x7 << 12)
+#define RT5677_IF1_DAC0_SFT 12
+#define RT5677_IF1_DAC1_MASK (0x7 << 8)
+#define RT5677_IF1_DAC1_SFT 8
+#define RT5677_IF1_DAC2_MASK (0x7 << 4)
+#define RT5677_IF1_DAC2_SFT 4
+#define RT5677_IF1_DAC3_MASK (0x7 << 0)
+#define RT5677_IF1_DAC3_SFT 0
+
+/* TDM1 control 5 (0x3f) */
+#define RT5677_IF1_DAC4_MASK (0x7 << 12)
+#define RT5677_IF1_DAC4_SFT 12
+#define RT5677_IF1_DAC5_MASK (0x7 << 8)
+#define RT5677_IF1_DAC5_SFT 8
+#define RT5677_IF1_DAC6_MASK (0x7 << 4)
+#define RT5677_IF1_DAC6_SFT 4
+#define RT5677_IF1_DAC7_MASK (0x7 << 0)
+#define RT5677_IF1_DAC7_SFT 0
+
+/* TDM2 control 1 (0x40) */
+#define RT5677_IF2_ADC_MODE_MASK (0x1 << 12)
+#define RT5677_IF2_ADC_MODE_SFT 12
+#define RT5677_IF2_ADC_MODE_I2S (0x0 << 12)
+#define RT5677_IF2_ADC_MODE_TDM (0x1 << 12)
+#define RT5677_IF2_ADC1_SWAP_MASK (0x3 << 6)
+#define RT5677_IF2_ADC1_SWAP_SFT 6
+#define RT5677_IF2_ADC2_SWAP_MASK (0x3 << 4)
+#define RT5677_IF2_ADC2_SWAP_SFT 4
+#define RT5677_IF2_ADC3_SWAP_MASK (0x3 << 2)
+#define RT5677_IF2_ADC3_SWAP_SFT 2
+#define RT5677_IF2_ADC4_SWAP_MASK (0x3 << 0)
+#define RT5677_IF2_ADC4_SWAP_SFT 0
+
+/* TDM2 control 2 (0x41) */
#define RT5677_IF2_ADC4_MASK (0x3 << 10)
#define RT5677_IF2_ADC4_SFT 10
#define RT5677_IF2_ADC3_MASK (0x3 << 8)
@@ -817,6 +868,28 @@
#define RT5677_IF2_ADC2_SFT 6
#define RT5677_IF2_ADC1_MASK (0x3 << 4)
#define RT5677_IF2_ADC1_SFT 4
+#define RT5677_IF2_ADC_CTRL_MASK (0x7 << 0)
+#define RT5677_IF2_ADC_CTRL_SFT 0
+
+/* TDM2 control 4 (0x43) */
+#define RT5677_IF2_DAC0_MASK (0x7 << 12)
+#define RT5677_IF2_DAC0_SFT 12
+#define RT5677_IF2_DAC1_MASK (0x7 << 8)
+#define RT5677_IF2_DAC1_SFT 8
+#define RT5677_IF2_DAC2_MASK (0x7 << 4)
+#define RT5677_IF2_DAC2_SFT 4
+#define RT5677_IF2_DAC3_MASK (0x7 << 0)
+#define RT5677_IF2_DAC3_SFT 0
+
+/* TDM2 control 5 (0x44) */
+#define RT5677_IF2_DAC4_MASK (0x7 << 12)
+#define RT5677_IF2_DAC4_SFT 12
+#define RT5677_IF2_DAC5_MASK (0x7 << 8)
+#define RT5677_IF2_DAC5_SFT 8
+#define RT5677_IF2_DAC6_MASK (0x7 << 4)
+#define RT5677_IF2_DAC6_SFT 4
+#define RT5677_IF2_DAC7_MASK (0x7 << 0)
+#define RT5677_IF2_DAC7_SFT 0
/* Digital Microphone Control 1 (0x50) */
#define RT5677_DMIC_1_EN_MASK (0x1 << 15)
@@ -1367,6 +1440,48 @@
#define RT5677_SEL_SRC_IB01 (0x1 << 0)
#define RT5677_SEL_SRC_IB01_SFT 0
+/* Jack Detect Control 1 (0xb5) */
+#define RT5677_SEL_GPIO_JD1_MASK (0x3 << 14)
+#define RT5677_SEL_GPIO_JD1_SFT 14
+#define RT5677_SEL_GPIO_JD2_MASK (0x3 << 12)
+#define RT5677_SEL_GPIO_JD2_SFT 12
+#define RT5677_SEL_GPIO_JD3_MASK (0x3 << 10)
+#define RT5677_SEL_GPIO_JD3_SFT 10
+
+/* IRQ Control 1 (0xbd) */
+#define RT5677_STA_GPIO_JD1 (0x1 << 15)
+#define RT5677_STA_GPIO_JD1_SFT 15
+#define RT5677_EN_IRQ_GPIO_JD1 (0x1 << 14)
+#define RT5677_EN_IRQ_GPIO_JD1_SFT 14
+#define RT5677_EN_GPIO_JD1_STICKY (0x1 << 13)
+#define RT5677_EN_GPIO_JD1_STICKY_SFT 13
+#define RT5677_INV_GPIO_JD1 (0x1 << 12)
+#define RT5677_INV_GPIO_JD1_SFT 12
+#define RT5677_STA_GPIO_JD2 (0x1 << 11)
+#define RT5677_STA_GPIO_JD2_SFT 11
+#define RT5677_EN_IRQ_GPIO_JD2 (0x1 << 10)
+#define RT5677_EN_IRQ_GPIO_JD2_SFT 10
+#define RT5677_EN_GPIO_JD2_STICKY (0x1 << 9)
+#define RT5677_EN_GPIO_JD2_STICKY_SFT 9
+#define RT5677_INV_GPIO_JD2 (0x1 << 8)
+#define RT5677_INV_GPIO_JD2_SFT 8
+#define RT5677_STA_MICBIAS1_OVCD (0x1 << 7)
+#define RT5677_STA_MICBIAS1_OVCD_SFT 7
+#define RT5677_EN_IRQ_MICBIAS1_OVCD (0x1 << 6)
+#define RT5677_EN_IRQ_MICBIAS1_OVCD_SFT 6
+#define RT5677_EN_MICBIAS1_OVCD_STICKY (0x1 << 5)
+#define RT5677_EN_MICBIAS1_OVCD_STICKY_SFT 5
+#define RT5677_INV_MICBIAS1_OVCD (0x1 << 4)
+#define RT5677_INV_MICBIAS1_OVCD_SFT 4
+#define RT5677_STA_GPIO_JD3 (0x1 << 3)
+#define RT5677_STA_GPIO_JD3_SFT 3
+#define RT5677_EN_IRQ_GPIO_JD3 (0x1 << 2)
+#define RT5677_EN_IRQ_GPIO_JD3_SFT 2
+#define RT5677_EN_GPIO_JD3_STICKY (0x1 << 1)
+#define RT5677_EN_GPIO_JD3_STICKY_SFT 1
+#define RT5677_INV_GPIO_JD3 (0x1 << 0)
+#define RT5677_INV_GPIO_JD3_SFT 0
+
/* GPIO status (0xbf) */
#define RT5677_GPIO6_STATUS_MASK (0x1 << 5)
#define RT5677_GPIO6_STATUS_SFT 5
@@ -1506,6 +1621,9 @@
#define RT5677_GPIO5_FUNC_GPIO (0x0 << 9)
#define RT5677_GPIO5_FUNC_DMIC (0x1 << 9)
+#define RT5677_FIRMWARE1 "rt5677_dsp_fw1.bin"
+#define RT5677_FIRMWARE2 "rt5677_dsp_fw2.bin"
+
/* System Clock Source */
enum {
RT5677_SCLK_S_MCLK,
@@ -1541,10 +1659,18 @@ enum {
RT5677_GPIO_NUM,
};
+enum {
+ RT5677_IRQ_JD1,
+ RT5677_IRQ_JD2,
+ RT5677_IRQ_JD3,
+};
+
struct rt5677_priv {
struct snd_soc_codec *codec;
struct rt5677_platform_data pdata;
- struct regmap *regmap;
+ struct regmap *regmap, *regmap_physical;
+ const struct firmware *fw1, *fw2;
+ struct mutex dsp_cmd_lock, dsp_pri_lock;
int sysclk;
int sysclk_src;
@@ -1558,6 +1684,10 @@ struct rt5677_priv {
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio_chip;
#endif
+ bool dsp_vad_en;
+ struct regmap_irq_chip_data *irq_data;
+ bool is_dsp_mode;
+ bool is_vref_slow;
};
#endif /* __RT5677_H__ */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index dab9b15304af..29cf7ce610f4 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/clk.h>
+#include <linux/log2.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -121,6 +122,13 @@ struct ldo_regulator {
bool enabled;
};
+enum sgtl5000_micbias_resistor {
+ SGTL5000_MICBIAS_OFF = 0,
+ SGTL5000_MICBIAS_2K = 2,
+ SGTL5000_MICBIAS_4K = 4,
+ SGTL5000_MICBIAS_8K = 8,
+};
+
/* sgtl5000 private structure in codec */
struct sgtl5000_priv {
int sysclk; /* sysclk rate */
@@ -131,6 +139,8 @@ struct sgtl5000_priv {
struct regmap *regmap;
struct clk *mclk;
int revision;
+ u8 micbias_resistor;
+ u8 micbias_voltage;
};
/*
@@ -145,12 +155,14 @@ struct sgtl5000_priv {
static int mic_bias_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(w->codec);
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- /* change mic bias resistor to 4Kohm */
+ /* change mic bias resistor */
snd_soc_update_bits(w->codec, SGTL5000_CHIP_MIC_CTRL,
- SGTL5000_BIAS_R_MASK,
- SGTL5000_BIAS_R_4k << SGTL5000_BIAS_R_SHIFT);
+ SGTL5000_BIAS_R_MASK,
+ sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
break;
case SND_SOC_DAPM_PRE_PMD:
@@ -530,16 +542,16 @@ static int sgtl5000_set_dai_sysclk(struct snd_soc_dai *codec_dai,
/*
* set clock according to i2s frame clock,
- * sgtl5000 provide 2 clock sources.
- * 1. sys_mclk. sample freq can only configure to
+ * sgtl5000 provides 2 clock sources:
+ * 1. sys_mclk: sample freq can only be configured to
* 1/256, 1/384, 1/512 of sys_mclk.
- * 2. pll. can derive any audio clocks.
+ * 2. pll: can derive any audio clocks.
*
* clock setting rules:
- * 1. in slave mode, only sys_mclk can use.
- * 2. as constraint by sys_mclk, sample freq should
- * set to 32k, 44.1k and above.
- * 3. using sys_mclk prefer to pll to save power.
+ * 1. in slave mode, only sys_mclk can be used
+ * 2. as constraint by sys_mclk, sample freq should be set to 32 kHz, 44.1 kHz
+ * and above.
+ * 3. usage of sys_mclk is preferred over pll to save power.
*/
static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
{
@@ -549,8 +561,8 @@ static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
/*
* sample freq should be divided by frame clock,
- * if frame clock lower than 44.1khz, sample feq should set to
- * 32khz or 44.1khz.
+ * if frame clock is lower than 44.1 kHz, sample freq should be set to
+ * 32 kHz or 44.1 kHz.
*/
switch (frame_rate) {
case 8000:
@@ -603,9 +615,10 @@ static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
/*
* calculate the divider of mclk/sample_freq,
- * factor of freq =96k can only be 256, since mclk in range (12m,27m)
+ * factor of freq = 96 kHz can only be 256, since mclk is in the range
+ * of 8 MHz - 27 MHz
*/
- switch (sgtl5000->sysclk / sys_fs) {
+ switch (sgtl5000->sysclk / frame_rate) {
case 256:
clk_ctl |= SGTL5000_MCLK_FREQ_256FS <<
SGTL5000_MCLK_FREQ_SHIFT;
@@ -619,7 +632,7 @@ static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
SGTL5000_MCLK_FREQ_SHIFT;
break;
default:
- /* if mclk not satisify the divider, use pll */
+ /* if mclk does not satisfy the divider, use pll */
if (sgtl5000->master) {
clk_ctl |= SGTL5000_MCLK_FREQ_PLL <<
SGTL5000_MCLK_FREQ_SHIFT;
@@ -628,7 +641,7 @@ static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
"PLL not supported in slave mode\n");
dev_err(codec->dev, "%d ratio is not supported. "
"SYS_MCLK needs to be 256, 384 or 512 * fs\n",
- sgtl5000->sysclk / sys_fs);
+ sgtl5000->sysclk / frame_rate);
return -EINVAL;
}
}
@@ -795,7 +808,7 @@ static int ldo_regulator_enable(struct regulator_dev *dev)
SGTL5000_LINEREG_D_POWERUP,
SGTL5000_LINEREG_D_POWERUP);
- /* when internal ldo enabled, simple digital power can be disabled */
+ /* when internal ldo is enabled, simple digital power can be disabled */
snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
SGTL5000_LINREG_SIMPLE_POWERUP,
0);
@@ -1079,7 +1092,7 @@ static bool sgtl5000_readable(struct device *dev, unsigned int reg)
/*
* sgtl5000 has 3 internal power supplies:
* 1. VAG, normally set to vdda/2
- * 2. chargepump, set to different value
+ * 2. charge pump, set to different value
* according to voltage of vdda and vddio
* 3. line out VAG, normally set to vddio/2
*
@@ -1325,8 +1338,13 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
SGTL5000_HP_ZCD_EN |
SGTL5000_ADC_ZCD_EN);
- snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 2);
+ snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
+ SGTL5000_BIAS_R_MASK,
+ sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
+ snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
+ SGTL5000_BIAS_R_MASK,
+ sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
/*
* disable DAP
* TODO:
@@ -1416,10 +1434,10 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
{
struct sgtl5000_priv *sgtl5000;
int ret, reg, rev;
- unsigned int mclk;
+ struct device_node *np = client->dev.of_node;
+ u32 value;
- sgtl5000 = devm_kzalloc(&client->dev, sizeof(struct sgtl5000_priv),
- GFP_KERNEL);
+ sgtl5000 = devm_kzalloc(&client->dev, sizeof(*sgtl5000), GFP_KERNEL);
if (!sgtl5000)
return -ENOMEM;
@@ -1440,14 +1458,6 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
return ret;
}
- /* SGTL5000 SYS_MCLK should be between 8 and 27 MHz */
- mclk = clk_get_rate(sgtl5000->mclk);
- if (mclk < 8000000 || mclk > 27000000) {
- dev_err(&client->dev, "Invalid SYS_CLK frequency: %u.%03uMHz\n",
- mclk / 1000000, mclk / 1000 % 1000);
- return -EINVAL;
- }
-
ret = clk_prepare_enable(sgtl5000->mclk);
if (ret)
return ret;
@@ -1469,6 +1479,47 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
dev_info(&client->dev, "sgtl5000 revision 0x%x\n", rev);
sgtl5000->revision = rev;
+ if (np) {
+ if (!of_property_read_u32(np,
+ "micbias-resistor-k-ohms", &value)) {
+ switch (value) {
+ case SGTL5000_MICBIAS_OFF:
+ sgtl5000->micbias_resistor = 0;
+ break;
+ case SGTL5000_MICBIAS_2K:
+ sgtl5000->micbias_resistor = 1;
+ break;
+ case SGTL5000_MICBIAS_4K:
+ sgtl5000->micbias_resistor = 2;
+ break;
+ case SGTL5000_MICBIAS_8K:
+ sgtl5000->micbias_resistor = 3;
+ break;
+ default:
+ sgtl5000->micbias_resistor = 2;
+ dev_err(&client->dev,
+ "Unsuitable MicBias resistor\n");
+ }
+ } else {
+ /* default is 4Kohms */
+ sgtl5000->micbias_resistor = 2;
+ }
+ if (!of_property_read_u32(np,
+ "micbias-voltage-m-volts", &value)) {
+ /* 1250mV => 0 */
+ /* steps of 250mV */
+ if ((value >= 1250) && (value <= 3000))
+ sgtl5000->micbias_voltage = (value / 250) - 5;
+ else {
+ sgtl5000->micbias_voltage = 0;
+ dev_err(&client->dev,
+ "Unsuitable MicBias resistor\n");
+ }
+ } else {
+ sgtl5000->micbias_voltage = 0;
+ }
+ }
+
i2c_set_clientdata(client, sgtl5000);
/* Ensure sgtl5000 will start with sane register values */
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index cdf882fa7716..3e72964280c6 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -261,7 +261,6 @@ MODULE_ALIAS("platform:si476x-codec");
static struct platform_driver si476x_platform_driver = {
.driver = {
.name = "si476x-codec",
- .owner = THIS_MODULE,
},
.probe = si476x_platform_probe,
.remove = si476x_platform_remove,
diff --git a/sound/soc/codecs/sigmadsp-i2c.c b/sound/soc/codecs/sigmadsp-i2c.c
index 246081aae8ca..21ca3a5e9f66 100644
--- a/sound/soc/codecs/sigmadsp-i2c.c
+++ b/sound/soc/codecs/sigmadsp-i2c.c
@@ -6,29 +6,88 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/i2c.h>
#include <linux/export.h>
+#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
#include "sigmadsp.h"
-static int sigma_action_write_i2c(void *control_data,
- const struct sigma_action *sa, size_t len)
+static int sigmadsp_write_i2c(void *control_data,
+ unsigned int addr, const uint8_t data[], size_t len)
+{
+ uint8_t *buf;
+ int ret;
+
+ buf = kzalloc(2 + len, GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+
+ put_unaligned_be16(addr, buf);
+ memcpy(buf + 2, data, len);
+
+ ret = i2c_master_send(control_data, buf, len + 2);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int sigmadsp_read_i2c(void *control_data,
+ unsigned int addr, uint8_t data[], size_t len)
{
- return i2c_master_send(control_data, (const unsigned char *)&sa->addr,
- len);
+ struct i2c_client *client = control_data;
+ struct i2c_msg msgs[2];
+ uint8_t buf[2];
+ int ret;
+
+ put_unaligned_be16(addr, buf);
+
+ msgs[0].addr = client->addr;
+ msgs[0].len = sizeof(buf);
+ msgs[0].buf = buf;
+ msgs[0].flags = 0;
+
+ msgs[1].addr = client->addr;
+ msgs[1].len = len;
+ msgs[1].buf = data;
+ msgs[1].flags = I2C_M_RD;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+ else if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+ return 0;
}
-int process_sigma_firmware(struct i2c_client *client, const char *name)
+/**
+ * devm_sigmadsp_init_i2c() - Initialize SigmaDSP instance
+ * @client: The parent I2C device
+ * @ops: The sigmadsp_ops to use for this instance
+ * @firmware_name: Name of the firmware file to load
+ *
+ * Allocates a SigmaDSP instance and loads the specified firmware file.
+ *
+ * Returns a pointer to a struct sigmadsp on success, or a PTR_ERR() on error.
+ */
+struct sigmadsp *devm_sigmadsp_init_i2c(struct i2c_client *client,
+ const struct sigmadsp_ops *ops, const char *firmware_name)
{
- struct sigma_firmware ssfw;
+ struct sigmadsp *sigmadsp;
+
+ sigmadsp = devm_sigmadsp_init(&client->dev, ops, firmware_name);
+ if (IS_ERR(sigmadsp))
+ return sigmadsp;
- ssfw.control_data = client;
- ssfw.write = sigma_action_write_i2c;
+ sigmadsp->control_data = client;
+ sigmadsp->write = sigmadsp_write_i2c;
+ sigmadsp->read = sigmadsp_read_i2c;
- return _process_sigma_firmware(&client->dev, &ssfw, name);
+ return sigmadsp;
}
-EXPORT_SYMBOL(process_sigma_firmware);
+EXPORT_SYMBOL_GPL(devm_sigmadsp_init_i2c);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("SigmaDSP I2C firmware loader");
diff --git a/sound/soc/codecs/sigmadsp-regmap.c b/sound/soc/codecs/sigmadsp-regmap.c
index f78ed8d2cfb2..912861be5b87 100644
--- a/sound/soc/codecs/sigmadsp-regmap.c
+++ b/sound/soc/codecs/sigmadsp-regmap.c
@@ -12,24 +12,48 @@
#include "sigmadsp.h"
-static int sigma_action_write_regmap(void *control_data,
- const struct sigma_action *sa, size_t len)
+static int sigmadsp_write_regmap(void *control_data,
+ unsigned int addr, const uint8_t data[], size_t len)
{
- return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
- sa->payload, len - 2);
+ return regmap_raw_write(control_data, addr,
+ data, len);
}
-int process_sigma_firmware_regmap(struct device *dev, struct regmap *regmap,
- const char *name)
+static int sigmadsp_read_regmap(void *control_data,
+ unsigned int addr, uint8_t data[], size_t len)
{
- struct sigma_firmware ssfw;
+ return regmap_raw_read(control_data, addr,
+ data, len);
+}
+
+/**
+ * devm_sigmadsp_init_i2c() - Initialize SigmaDSP instance
+ * @dev: The parent device
+ * @regmap: Regmap instance to use
+ * @ops: The sigmadsp_ops to use for this instance
+ * @firmware_name: Name of the firmware file to load
+ *
+ * Allocates a SigmaDSP instance and loads the specified firmware file.
+ *
+ * Returns a pointer to a struct sigmadsp on success, or a PTR_ERR() on error.
+ */
+struct sigmadsp *devm_sigmadsp_init_regmap(struct device *dev,
+ struct regmap *regmap, const struct sigmadsp_ops *ops,
+ const char *firmware_name)
+{
+ struct sigmadsp *sigmadsp;
+
+ sigmadsp = devm_sigmadsp_init(dev, ops, firmware_name);
+ if (IS_ERR(sigmadsp))
+ return sigmadsp;
- ssfw.control_data = regmap;
- ssfw.write = sigma_action_write_regmap;
+ sigmadsp->control_data = regmap;
+ sigmadsp->write = sigmadsp_write_regmap;
+ sigmadsp->read = sigmadsp_read_regmap;
- return _process_sigma_firmware(dev, &ssfw, name);
+ return sigmadsp;
}
-EXPORT_SYMBOL(process_sigma_firmware_regmap);
+EXPORT_SYMBOL_GPL(devm_sigmadsp_init_regmap);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("SigmaDSP regmap firmware loader");
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index f2de7e049bc6..d53680ac78e4 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -1,23 +1,74 @@
/*
* Load Analog Devices SigmaStudio firmware files
*
- * Copyright 2009-2011 Analog Devices Inc.
+ * Copyright 2009-2014 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/crc32.h>
-#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <sound/control.h>
+#include <sound/soc.h>
#include "sigmadsp.h"
#define SIGMA_MAGIC "ADISIGM"
+#define SIGMA_FW_CHUNK_TYPE_DATA 0
+#define SIGMA_FW_CHUNK_TYPE_CONTROL 1
+#define SIGMA_FW_CHUNK_TYPE_SAMPLERATES 2
+
+struct sigmadsp_control {
+ struct list_head head;
+ uint32_t samplerates;
+ unsigned int addr;
+ unsigned int num_bytes;
+ const char *name;
+ struct snd_kcontrol *kcontrol;
+ bool cached;
+ uint8_t cache[];
+};
+
+struct sigmadsp_data {
+ struct list_head head;
+ uint32_t samplerates;
+ unsigned int addr;
+ unsigned int length;
+ uint8_t data[];
+};
+
+struct sigma_fw_chunk {
+ __le32 length;
+ __le32 tag;
+ __le32 samplerates;
+} __packed;
+
+struct sigma_fw_chunk_data {
+ struct sigma_fw_chunk chunk;
+ __le16 addr;
+ uint8_t data[];
+} __packed;
+
+struct sigma_fw_chunk_control {
+ struct sigma_fw_chunk chunk;
+ __le16 type;
+ __le16 addr;
+ __le16 num_bytes;
+ const char name[];
+} __packed;
+
+struct sigma_fw_chunk_samplerate {
+ struct sigma_fw_chunk chunk;
+ __le32 samplerates[];
+} __packed;
+
struct sigma_firmware_header {
unsigned char magic[7];
u8 version;
@@ -28,12 +79,286 @@ enum {
SIGMA_ACTION_WRITEXBYTES = 0,
SIGMA_ACTION_WRITESINGLE,
SIGMA_ACTION_WRITESAFELOAD,
- SIGMA_ACTION_DELAY,
- SIGMA_ACTION_PLLWAIT,
- SIGMA_ACTION_NOOP,
SIGMA_ACTION_END,
};
+struct sigma_action {
+ u8 instr;
+ u8 len_hi;
+ __le16 len;
+ __be16 addr;
+ unsigned char payload[];
+} __packed;
+
+static int sigmadsp_write(struct sigmadsp *sigmadsp, unsigned int addr,
+ const uint8_t data[], size_t len)
+{
+ return sigmadsp->write(sigmadsp->control_data, addr, data, len);
+}
+
+static int sigmadsp_read(struct sigmadsp *sigmadsp, unsigned int addr,
+ uint8_t data[], size_t len)
+{
+ return sigmadsp->read(sigmadsp->control_data, addr, data, len);
+}
+
+static int sigmadsp_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct sigmadsp_control *ctrl = (void *)kcontrol->private_value;
+
+ info->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ info->count = ctrl->num_bytes;
+
+ return 0;
+}
+
+static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
+ struct sigmadsp_control *ctrl, void *data)
+{
+ /* safeload loads up to 20 bytes in a atomic operation */
+ if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
+ sigmadsp->ops->safeload)
+ return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
+ ctrl->num_bytes);
+ else
+ return sigmadsp_write(sigmadsp, ctrl->addr, data,
+ ctrl->num_bytes);
+}
+
+static int sigmadsp_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sigmadsp_control *ctrl = (void *)kcontrol->private_value;
+ struct sigmadsp *sigmadsp = snd_kcontrol_chip(kcontrol);
+ uint8_t *data;
+ int ret = 0;
+
+ mutex_lock(&sigmadsp->lock);
+
+ data = ucontrol->value.bytes.data;
+
+ if (!(kcontrol->vd[0].access & SNDRV_CTL_ELEM_ACCESS_INACTIVE))
+ ret = sigmadsp_ctrl_write(sigmadsp, ctrl, data);
+
+ if (ret == 0) {
+ memcpy(ctrl->cache, data, ctrl->num_bytes);
+ ctrl->cached = true;
+ }
+
+ mutex_unlock(&sigmadsp->lock);
+
+ return ret;
+}
+
+static int sigmadsp_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sigmadsp_control *ctrl = (void *)kcontrol->private_value;
+ struct sigmadsp *sigmadsp = snd_kcontrol_chip(kcontrol);
+ int ret = 0;
+
+ mutex_lock(&sigmadsp->lock);
+
+ if (!ctrl->cached) {
+ ret = sigmadsp_read(sigmadsp, ctrl->addr, ctrl->cache,
+ ctrl->num_bytes);
+ }
+
+ if (ret == 0) {
+ ctrl->cached = true;
+ memcpy(ucontrol->value.bytes.data, ctrl->cache,
+ ctrl->num_bytes);
+ }
+
+ mutex_unlock(&sigmadsp->lock);
+
+ return ret;
+}
+
+static void sigmadsp_control_free(struct snd_kcontrol *kcontrol)
+{
+ struct sigmadsp_control *ctrl = (void *)kcontrol->private_value;
+
+ ctrl->kcontrol = NULL;
+}
+
+static bool sigma_fw_validate_control_name(const char *name, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i++) {
+ /* Normal ASCII characters are valid */
+ if (name[i] < ' ' || name[i] > '~')
+ return false;
+ }
+
+ return true;
+}
+
+static int sigma_fw_load_control(struct sigmadsp *sigmadsp,
+ const struct sigma_fw_chunk *chunk, unsigned int length)
+{
+ const struct sigma_fw_chunk_control *ctrl_chunk;
+ struct sigmadsp_control *ctrl;
+ unsigned int num_bytes;
+ size_t name_len;
+ char *name;
+ int ret;
+
+ if (length <= sizeof(*ctrl_chunk))
+ return -EINVAL;
+
+ ctrl_chunk = (const struct sigma_fw_chunk_control *)chunk;
+
+ name_len = length - sizeof(*ctrl_chunk);
+ if (name_len >= SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+ name_len = SNDRV_CTL_ELEM_ID_NAME_MAXLEN - 1;
+
+ /* Make sure there are no non-displayable characaters in the string */
+ if (!sigma_fw_validate_control_name(ctrl_chunk->name, name_len))
+ return -EINVAL;
+
+ num_bytes = le16_to_cpu(ctrl_chunk->num_bytes);
+ ctrl = kzalloc(sizeof(*ctrl) + num_bytes, GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ name = kzalloc(name_len + 1, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_free_ctrl;
+ }
+ memcpy(name, ctrl_chunk->name, name_len);
+ name[name_len] = '\0';
+ ctrl->name = name;
+
+ ctrl->addr = le16_to_cpu(ctrl_chunk->addr);
+ ctrl->num_bytes = num_bytes;
+ ctrl->samplerates = le32_to_cpu(chunk->samplerates);
+
+ list_add_tail(&ctrl->head, &sigmadsp->ctrl_list);
+
+ return 0;
+
+err_free_ctrl:
+ kfree(ctrl);
+
+ return ret;
+}
+
+static int sigma_fw_load_data(struct sigmadsp *sigmadsp,
+ const struct sigma_fw_chunk *chunk, unsigned int length)
+{
+ const struct sigma_fw_chunk_data *data_chunk;
+ struct sigmadsp_data *data;
+
+ if (length <= sizeof(*data_chunk))
+ return -EINVAL;
+
+ data_chunk = (struct sigma_fw_chunk_data *)chunk;
+
+ length -= sizeof(*data_chunk);
+
+ data = kzalloc(sizeof(*data) + length, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->addr = le16_to_cpu(data_chunk->addr);
+ data->length = length;
+ data->samplerates = le32_to_cpu(chunk->samplerates);
+ memcpy(data->data, data_chunk->data, length);
+ list_add_tail(&data->head, &sigmadsp->data_list);
+
+ return 0;
+}
+
+static int sigma_fw_load_samplerates(struct sigmadsp *sigmadsp,
+ const struct sigma_fw_chunk *chunk, unsigned int length)
+{
+ const struct sigma_fw_chunk_samplerate *rate_chunk;
+ unsigned int num_rates;
+ unsigned int *rates;
+ unsigned int i;
+
+ rate_chunk = (const struct sigma_fw_chunk_samplerate *)chunk;
+
+ num_rates = (length - sizeof(*rate_chunk)) / sizeof(__le32);
+
+ if (num_rates > 32 || num_rates == 0)
+ return -EINVAL;
+
+ /* We only allow one samplerates block per file */
+ if (sigmadsp->rate_constraints.count)
+ return -EINVAL;
+
+ rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+
+ for (i = 0; i < num_rates; i++)
+ rates[i] = le32_to_cpu(rate_chunk->samplerates[i]);
+
+ sigmadsp->rate_constraints.count = num_rates;
+ sigmadsp->rate_constraints.list = rates;
+
+ return 0;
+}
+
+static int sigmadsp_fw_load_v2(struct sigmadsp *sigmadsp,
+ const struct firmware *fw)
+{
+ struct sigma_fw_chunk *chunk;
+ unsigned int length, pos;
+ int ret;
+
+ /*
+ * Make sure that there is at least one chunk to avoid integer
+ * underflows later on. Empty firmware is still valid though.
+ */
+ if (fw->size < sizeof(*chunk) + sizeof(struct sigma_firmware_header))
+ return 0;
+
+ pos = sizeof(struct sigma_firmware_header);
+
+ while (pos < fw->size - sizeof(*chunk)) {
+ chunk = (struct sigma_fw_chunk *)(fw->data + pos);
+
+ length = le32_to_cpu(chunk->length);
+
+ if (length > fw->size - pos || length < sizeof(*chunk))
+ return -EINVAL;
+
+ switch (le32_to_cpu(chunk->tag)) {
+ case SIGMA_FW_CHUNK_TYPE_DATA:
+ ret = sigma_fw_load_data(sigmadsp, chunk, length);
+ break;
+ case SIGMA_FW_CHUNK_TYPE_CONTROL:
+ ret = sigma_fw_load_control(sigmadsp, chunk, length);
+ break;
+ case SIGMA_FW_CHUNK_TYPE_SAMPLERATES:
+ ret = sigma_fw_load_samplerates(sigmadsp, chunk, length);
+ break;
+ default:
+ dev_warn(sigmadsp->dev, "Unknown chunk type: %d\n",
+ chunk->tag);
+ ret = 0;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /*
+ * This can not overflow since if length is larger than the
+ * maximum firmware size (0x4000000) we'll error out earilier.
+ */
+ pos += ALIGN(length, sizeof(__le32));
+ }
+
+ return 0;
+}
+
static inline u32 sigma_action_len(struct sigma_action *sa)
{
return (sa->len_hi << 16) | le16_to_cpu(sa->len);
@@ -62,11 +387,11 @@ static size_t sigma_action_size(struct sigma_action *sa)
* Returns a negative error value in case of an error, 0 if processing of
* the firmware should be stopped after this action, 1 otherwise.
*/
-static int
-process_sigma_action(struct sigma_firmware *ssfw, struct sigma_action *sa)
+static int process_sigma_action(struct sigmadsp *sigmadsp,
+ struct sigma_action *sa)
{
size_t len = sigma_action_len(sa);
- int ret;
+ struct sigmadsp_data *data;
pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
sa->instr, sa->addr, len);
@@ -75,13 +400,17 @@ process_sigma_action(struct sigma_firmware *ssfw, struct sigma_action *sa)
case SIGMA_ACTION_WRITEXBYTES:
case SIGMA_ACTION_WRITESINGLE:
case SIGMA_ACTION_WRITESAFELOAD:
- ret = ssfw->write(ssfw->control_data, sa, len);
- if (ret < 0)
+ if (len < 3)
return -EINVAL;
- break;
- case SIGMA_ACTION_DELAY:
- udelay(len);
- len = 0;
+
+ data = kzalloc(sizeof(*data) + len - 2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->addr = be16_to_cpu(sa->addr);
+ data->length = len - 2;
+ memcpy(data->data, sa->payload, data->length);
+ list_add_tail(&data->head, &sigmadsp->data_list);
break;
case SIGMA_ACTION_END:
return 0;
@@ -92,22 +421,24 @@ process_sigma_action(struct sigma_firmware *ssfw, struct sigma_action *sa)
return 1;
}
-static int
-process_sigma_actions(struct sigma_firmware *ssfw)
+static int sigmadsp_fw_load_v1(struct sigmadsp *sigmadsp,
+ const struct firmware *fw)
{
struct sigma_action *sa;
- size_t size;
+ size_t size, pos;
int ret;
- while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
- sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+ pos = sizeof(struct sigma_firmware_header);
+
+ while (pos + sizeof(*sa) <= fw->size) {
+ sa = (struct sigma_action *)(fw->data + pos);
size = sigma_action_size(sa);
- ssfw->pos += size;
- if (ssfw->pos > ssfw->fw->size || size == 0)
+ pos += size;
+ if (pos > fw->size || size == 0)
break;
- ret = process_sigma_action(ssfw, sa);
+ ret = process_sigma_action(sigmadsp, sa);
pr_debug("%s: action returned %i\n", __func__, ret);
@@ -115,29 +446,47 @@ process_sigma_actions(struct sigma_firmware *ssfw)
return ret;
}
- if (ssfw->pos != ssfw->fw->size)
+ if (pos != fw->size)
return -EINVAL;
return 0;
}
-int _process_sigma_firmware(struct device *dev,
- struct sigma_firmware *ssfw, const char *name)
+static void sigmadsp_firmware_release(struct sigmadsp *sigmadsp)
{
- int ret;
- struct sigma_firmware_header *ssfw_head;
+ struct sigmadsp_control *ctrl, *_ctrl;
+ struct sigmadsp_data *data, *_data;
+
+ list_for_each_entry_safe(ctrl, _ctrl, &sigmadsp->ctrl_list, head) {
+ kfree(ctrl->name);
+ kfree(ctrl);
+ }
+
+ list_for_each_entry_safe(data, _data, &sigmadsp->data_list, head)
+ kfree(data);
+
+ INIT_LIST_HEAD(&sigmadsp->ctrl_list);
+ INIT_LIST_HEAD(&sigmadsp->data_list);
+}
+
+static void devm_sigmadsp_release(struct device *dev, void *res)
+{
+ sigmadsp_firmware_release((struct sigmadsp *)res);
+}
+
+static int sigmadsp_firmware_load(struct sigmadsp *sigmadsp, const char *name)
+{
+ const struct sigma_firmware_header *ssfw_head;
const struct firmware *fw;
+ int ret;
u32 crc;
- pr_debug("%s: loading firmware %s\n", __func__, name);
-
/* first load the blob */
- ret = request_firmware(&fw, name, dev);
+ ret = request_firmware(&fw, name, sigmadsp->dev);
if (ret) {
pr_debug("%s: request_firmware() failed with %i\n", __func__, ret);
- return ret;
+ goto done;
}
- ssfw->fw = fw;
/* then verify the header */
ret = -EINVAL;
@@ -149,13 +498,13 @@ int _process_sigma_firmware(struct device *dev,
* overflows later in the loading process.
*/
if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000) {
- dev_err(dev, "Failed to load firmware: Invalid size\n");
+ dev_err(sigmadsp->dev, "Failed to load firmware: Invalid size\n");
goto done;
}
ssfw_head = (void *)fw->data;
if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic))) {
- dev_err(dev, "Failed to load firmware: Invalid magic\n");
+ dev_err(sigmadsp->dev, "Failed to load firmware: Invalid magic\n");
goto done;
}
@@ -163,23 +512,303 @@ int _process_sigma_firmware(struct device *dev,
fw->size - sizeof(*ssfw_head));
pr_debug("%s: crc=%x\n", __func__, crc);
if (crc != le32_to_cpu(ssfw_head->crc)) {
- dev_err(dev, "Failed to load firmware: Wrong crc checksum: expected %x got %x\n",
+ dev_err(sigmadsp->dev, "Failed to load firmware: Wrong crc checksum: expected %x got %x\n",
le32_to_cpu(ssfw_head->crc), crc);
goto done;
}
- ssfw->pos = sizeof(*ssfw_head);
+ switch (ssfw_head->version) {
+ case 1:
+ ret = sigmadsp_fw_load_v1(sigmadsp, fw);
+ break;
+ case 2:
+ ret = sigmadsp_fw_load_v2(sigmadsp, fw);
+ break;
+ default:
+ dev_err(sigmadsp->dev,
+ "Failed to load firmware: Invalid version %d. Supported firmware versions: 1, 2\n",
+ ssfw_head->version);
+ ret = -EINVAL;
+ break;
+ }
- /* finally process all of the actions */
- ret = process_sigma_actions(ssfw);
+ if (ret)
+ sigmadsp_firmware_release(sigmadsp);
- done:
+done:
release_firmware(fw);
- pr_debug("%s: loaded %s\n", __func__, name);
+ return ret;
+}
+
+static int sigmadsp_init(struct sigmadsp *sigmadsp, struct device *dev,
+ const struct sigmadsp_ops *ops, const char *firmware_name)
+{
+ sigmadsp->ops = ops;
+ sigmadsp->dev = dev;
+
+ INIT_LIST_HEAD(&sigmadsp->ctrl_list);
+ INIT_LIST_HEAD(&sigmadsp->data_list);
+ mutex_init(&sigmadsp->lock);
+
+ return sigmadsp_firmware_load(sigmadsp, firmware_name);
+}
+
+/**
+ * devm_sigmadsp_init() - Initialize SigmaDSP instance
+ * @dev: The parent device
+ * @ops: The sigmadsp_ops to use for this instance
+ * @firmware_name: Name of the firmware file to load
+ *
+ * Allocates a SigmaDSP instance and loads the specified firmware file.
+ *
+ * Returns a pointer to a struct sigmadsp on success, or a PTR_ERR() on error.
+ */
+struct sigmadsp *devm_sigmadsp_init(struct device *dev,
+ const struct sigmadsp_ops *ops, const char *firmware_name)
+{
+ struct sigmadsp *sigmadsp;
+ int ret;
+
+ sigmadsp = devres_alloc(devm_sigmadsp_release, sizeof(*sigmadsp),
+ GFP_KERNEL);
+ if (!sigmadsp)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sigmadsp_init(sigmadsp, dev, ops, firmware_name);
+ if (ret) {
+ devres_free(sigmadsp);
+ return ERR_PTR(ret);
+ }
+
+ devres_add(dev, sigmadsp);
+
+ return sigmadsp;
+}
+EXPORT_SYMBOL_GPL(devm_sigmadsp_init);
+
+static int sigmadsp_rate_to_index(struct sigmadsp *sigmadsp, unsigned int rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < sigmadsp->rate_constraints.count; i++) {
+ if (sigmadsp->rate_constraints.list[i] == rate)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static unsigned int sigmadsp_get_samplerate_mask(struct sigmadsp *sigmadsp,
+ unsigned int samplerate)
+{
+ int samplerate_index;
+
+ if (samplerate == 0)
+ return 0;
+
+ if (sigmadsp->rate_constraints.count) {
+ samplerate_index = sigmadsp_rate_to_index(sigmadsp, samplerate);
+ if (samplerate_index < 0)
+ return 0;
+
+ return BIT(samplerate_index);
+ } else {
+ return ~0;
+ }
+}
+
+static bool sigmadsp_samplerate_valid(unsigned int supported,
+ unsigned int requested)
+{
+ /* All samplerates are supported */
+ if (!supported)
+ return true;
+
+ return supported & requested;
+}
+
+static int sigmadsp_alloc_control(struct sigmadsp *sigmadsp,
+ struct sigmadsp_control *ctrl, unsigned int samplerate_mask)
+{
+ struct snd_kcontrol_new template;
+ struct snd_kcontrol *kcontrol;
+
+ memset(&template, 0, sizeof(template));
+ template.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ template.name = ctrl->name;
+ template.info = sigmadsp_ctrl_info;
+ template.get = sigmadsp_ctrl_get;
+ template.put = sigmadsp_ctrl_put;
+ template.private_value = (unsigned long)ctrl;
+ template.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ if (!sigmadsp_samplerate_valid(ctrl->samplerates, samplerate_mask))
+ template.access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+
+ kcontrol = snd_ctl_new1(&template, sigmadsp);
+ if (!kcontrol)
+ return -ENOMEM;
+
+ kcontrol->private_free = sigmadsp_control_free;
+ ctrl->kcontrol = kcontrol;
+
+ return snd_ctl_add(sigmadsp->component->card->snd_card, kcontrol);
+}
+
+static void sigmadsp_activate_ctrl(struct sigmadsp *sigmadsp,
+ struct sigmadsp_control *ctrl, unsigned int samplerate_mask)
+{
+ struct snd_card *card = sigmadsp->component->card->snd_card;
+ struct snd_kcontrol_volatile *vd;
+ struct snd_ctl_elem_id id;
+ bool active;
+ bool changed = false;
+
+ active = sigmadsp_samplerate_valid(ctrl->samplerates, samplerate_mask);
+
+ down_write(&card->controls_rwsem);
+ if (!ctrl->kcontrol) {
+ up_write(&card->controls_rwsem);
+ return;
+ }
+
+ id = ctrl->kcontrol->id;
+ vd = &ctrl->kcontrol->vd[0];
+ if (active == (bool)(vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE)) {
+ vd->access ^= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ changed = true;
+ }
+ up_write(&card->controls_rwsem);
+
+ if (active && changed) {
+ mutex_lock(&sigmadsp->lock);
+ if (ctrl->cached)
+ sigmadsp_ctrl_write(sigmadsp, ctrl, ctrl->cache);
+ mutex_unlock(&sigmadsp->lock);
+ }
+
+ if (changed)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, &id);
+}
+
+/**
+ * sigmadsp_attach() - Attach a sigmadsp instance to a ASoC component
+ * @sigmadsp: The sigmadsp instance to attach
+ * @component: The component to attach to
+ *
+ * Typically called in the components probe callback.
+ *
+ * Note, once this function has been called the firmware must not be released
+ * until after the ALSA snd_card that the component belongs to has been
+ * disconnected, even if sigmadsp_attach() returns an error.
+ */
+int sigmadsp_attach(struct sigmadsp *sigmadsp,
+ struct snd_soc_component *component)
+{
+ struct sigmadsp_control *ctrl;
+ unsigned int samplerate_mask;
+ int ret;
+
+ sigmadsp->component = component;
+
+ samplerate_mask = sigmadsp_get_samplerate_mask(sigmadsp,
+ sigmadsp->current_samplerate);
+
+ list_for_each_entry(ctrl, &sigmadsp->ctrl_list, head) {
+ ret = sigmadsp_alloc_control(sigmadsp, ctrl, samplerate_mask);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sigmadsp_attach);
+
+/**
+ * sigmadsp_setup() - Setup the DSP for the specified samplerate
+ * @sigmadsp: The sigmadsp instance to configure
+ * @samplerate: The samplerate the DSP should be configured for
+ *
+ * Loads the appropriate firmware program and parameter memory (if not already
+ * loaded) and enables the controls for the specified samplerate. Any control
+ * parameter changes that have been made previously will be restored.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int sigmadsp_setup(struct sigmadsp *sigmadsp, unsigned int samplerate)
+{
+ struct sigmadsp_control *ctrl;
+ unsigned int samplerate_mask;
+ struct sigmadsp_data *data;
+ int ret;
+
+ if (sigmadsp->current_samplerate == samplerate)
+ return 0;
+
+ samplerate_mask = sigmadsp_get_samplerate_mask(sigmadsp, samplerate);
+ if (samplerate_mask == 0)
+ return -EINVAL;
+
+ list_for_each_entry(data, &sigmadsp->data_list, head) {
+ if (!sigmadsp_samplerate_valid(data->samplerates,
+ samplerate_mask))
+ continue;
+ ret = sigmadsp_write(sigmadsp, data->addr, data->data,
+ data->length);
+ if (ret)
+ goto err;
+ }
+
+ list_for_each_entry(ctrl, &sigmadsp->ctrl_list, head)
+ sigmadsp_activate_ctrl(sigmadsp, ctrl, samplerate_mask);
+
+ sigmadsp->current_samplerate = samplerate;
+
+ return 0;
+err:
+ sigmadsp_reset(sigmadsp);
return ret;
}
-EXPORT_SYMBOL_GPL(_process_sigma_firmware);
+EXPORT_SYMBOL_GPL(sigmadsp_setup);
+
+/**
+ * sigmadsp_reset() - Notify the sigmadsp instance that the DSP has been reset
+ * @sigmadsp: The sigmadsp instance to reset
+ *
+ * Should be called whenever the DSP has been reset and parameter and program
+ * memory need to be re-loaded.
+ */
+void sigmadsp_reset(struct sigmadsp *sigmadsp)
+{
+ struct sigmadsp_control *ctrl;
+
+ list_for_each_entry(ctrl, &sigmadsp->ctrl_list, head)
+ sigmadsp_activate_ctrl(sigmadsp, ctrl, false);
+
+ sigmadsp->current_samplerate = 0;
+}
+EXPORT_SYMBOL_GPL(sigmadsp_reset);
+
+/**
+ * sigmadsp_restrict_params() - Applies DSP firmware specific constraints
+ * @sigmadsp: The sigmadsp instance
+ * @substream: The substream to restrict
+ *
+ * Applies samplerate constraints that may be required by the firmware Should
+ * typically be called from the CODEC/component drivers startup callback.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int sigmadsp_restrict_params(struct sigmadsp *sigmadsp,
+ struct snd_pcm_substream *substream)
+{
+ if (sigmadsp->rate_constraints.count == 0)
+ return 0;
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &sigmadsp->rate_constraints);
+}
+EXPORT_SYMBOL_GPL(sigmadsp_restrict_params);
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sigmadsp.h b/sound/soc/codecs/sigmadsp.h
index c47cd23e9827..614475cbb823 100644
--- a/sound/soc/codecs/sigmadsp.h
+++ b/sound/soc/codecs/sigmadsp.h
@@ -11,31 +11,56 @@
#include <linux/device.h>
#include <linux/regmap.h>
+#include <linux/list.h>
-struct sigma_action {
- u8 instr;
- u8 len_hi;
- __le16 len;
- __be16 addr;
- unsigned char payload[];
-} __packed;
+#include <sound/pcm.h>
-struct sigma_firmware {
- const struct firmware *fw;
- size_t pos;
+struct sigmadsp;
+struct snd_soc_component;
+struct snd_pcm_substream;
+
+struct sigmadsp_ops {
+ int (*safeload)(struct sigmadsp *sigmadsp, unsigned int addr,
+ const uint8_t *data, size_t len);
+};
+
+struct sigmadsp {
+ const struct sigmadsp_ops *ops;
+
+ struct list_head ctrl_list;
+ struct list_head data_list;
+
+ struct snd_pcm_hw_constraint_list rate_constraints;
+
+ unsigned int current_samplerate;
+ struct snd_soc_component *component;
+ struct device *dev;
+
+ struct mutex lock;
void *control_data;
- int (*write)(void *control_data, const struct sigma_action *sa,
- size_t len);
+ int (*write)(void *, unsigned int, const uint8_t *, size_t);
+ int (*read)(void *, unsigned int, uint8_t *, size_t);
};
-int _process_sigma_firmware(struct device *dev,
- struct sigma_firmware *ssfw, const char *name);
+struct sigmadsp *devm_sigmadsp_init(struct device *dev,
+ const struct sigmadsp_ops *ops, const char *firmware_name);
+void sigmadsp_reset(struct sigmadsp *sigmadsp);
+
+int sigmadsp_restrict_params(struct sigmadsp *sigmadsp,
+ struct snd_pcm_substream *substream);
struct i2c_client;
-extern int process_sigma_firmware(struct i2c_client *client, const char *name);
-extern int process_sigma_firmware_regmap(struct device *dev,
- struct regmap *regmap, const char *name);
+struct sigmadsp *devm_sigmadsp_init_regmap(struct device *dev,
+ struct regmap *regmap, const struct sigmadsp_ops *ops,
+ const char *firmware_name);
+struct sigmadsp *devm_sigmadsp_init_i2c(struct i2c_client *client,
+ const struct sigmadsp_ops *ops, const char *firmware_name);
+
+int sigmadsp_attach(struct sigmadsp *sigmadsp,
+ struct snd_soc_component *component);
+int sigmadsp_setup(struct sigmadsp *sigmadsp, unsigned int rate);
+void sigmadsp_reset(struct sigmadsp *sigmadsp);
#endif
diff --git a/sound/soc/codecs/sirf-audio-codec.c b/sound/soc/codecs/sirf-audio-codec.c
index 06ba4923fd5a..0a8e43c98a07 100644
--- a/sound/soc/codecs/sirf-audio-codec.c
+++ b/sound/soc/codecs/sirf-audio-codec.c
@@ -120,7 +120,8 @@ static int atlas6_codec_enable_and_reset_event(struct snd_soc_dapm_widget *w,
{
#define ATLAS6_CODEC_ENABLE_BITS (1 << 29)
#define ATLAS6_CODEC_RESET_BITS (1 << 28)
- struct sirf_audio_codec *sirf_audio_codec = dev_get_drvdata(w->codec->dev);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct sirf_audio_codec *sirf_audio_codec = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
enable_and_reset_codec(sirf_audio_codec->regmap,
@@ -142,7 +143,8 @@ static int prima2_codec_enable_and_reset_event(struct snd_soc_dapm_widget *w,
{
#define PRIMA2_CODEC_ENABLE_BITS (1 << 27)
#define PRIMA2_CODEC_RESET_BITS (1 << 26)
- struct sirf_audio_codec *sirf_audio_codec = dev_get_drvdata(w->codec->dev);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct sirf_audio_codec *sirf_audio_codec = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
enable_and_reset_codec(sirf_audio_codec->regmap,
@@ -565,7 +567,6 @@ static const struct dev_pm_ops sirf_audio_codec_pm_ops = {
static struct platform_driver sirf_audio_codec_driver = {
.driver = {
.name = "sirf-audio-codec",
- .owner = THIS_MODULE,
.of_match_table = sirf_audio_codec_of_match,
.pm = &sirf_audio_codec_pm_ops,
},
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index cf8fa40662f0..1f451a1946eb 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -867,25 +867,16 @@ static int sn95031_codec_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, SN95031_SSR2, 0x10);
snd_soc_write(codec, SN95031_SSR3, 0x40);
- snd_soc_add_codec_controls(codec, sn95031_snd_controls,
- ARRAY_SIZE(sn95031_snd_controls));
-
- return 0;
-}
-
-static int sn95031_codec_remove(struct snd_soc_codec *codec)
-{
- pr_debug("codec_remove called\n");
- sn95031_set_vaud_bias(codec, SND_SOC_BIAS_OFF);
-
return 0;
}
static struct snd_soc_codec_driver sn95031_codec = {
.probe = sn95031_codec_probe,
- .remove = sn95031_codec_remove,
.set_bias_level = sn95031_set_vaud_bias,
.idle_bias_off = true,
+
+ .controls = sn95031_snd_controls,
+ .num_controls = ARRAY_SIZE(sn95031_snd_controls),
.dapm_widgets = sn95031_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(sn95031_dapm_widgets),
.dapm_routes = sn95031_audio_map,
@@ -916,7 +907,6 @@ static int sn95031_device_remove(struct platform_device *pdev)
static struct platform_driver sn95031_codec_driver = {
.driver = {
.name = "sn95031",
- .owner = THIS_MODULE,
},
.probe = sn95031_device_probe,
.remove = sn95031_device_remove,
diff --git a/sound/soc/codecs/spdif_receiver.c b/sound/soc/codecs/spdif_receiver.c
index e3501f40c7b3..3ec41ccbf4e2 100644
--- a/sound/soc/codecs/spdif_receiver.c
+++ b/sound/soc/codecs/spdif_receiver.c
@@ -80,7 +80,6 @@ static struct platform_driver spdif_dir_driver = {
.remove = spdif_dir_remove,
.driver = {
.name = "spdif-dir",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(spdif_dir_dt_ids),
},
};
diff --git a/sound/soc/codecs/spdif_transmitter.c b/sound/soc/codecs/spdif_transmitter.c
index e0df537dd4b7..ef634a9ad673 100644
--- a/sound/soc/codecs/spdif_transmitter.c
+++ b/sound/soc/codecs/spdif_transmitter.c
@@ -80,7 +80,6 @@ static struct platform_driver spdif_dit_driver = {
.remove = spdif_dit_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(spdif_dit_dt_ids),
},
};
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 4b5c17f8507e..a984485108cd 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -69,6 +69,22 @@
#define SSM4567_DAC_FS_64000_96000 0x3
#define SSM4567_DAC_FS_128000_192000 0x4
+/* SAI_CTRL_1 */
+#define SSM4567_SAI_CTRL_1_BCLK BIT(6)
+#define SSM4567_SAI_CTRL_1_TDM_BLCKS_MASK (0x3 << 4)
+#define SSM4567_SAI_CTRL_1_TDM_BLCKS_32 (0x0 << 4)
+#define SSM4567_SAI_CTRL_1_TDM_BLCKS_48 (0x1 << 4)
+#define SSM4567_SAI_CTRL_1_TDM_BLCKS_64 (0x2 << 4)
+#define SSM4567_SAI_CTRL_1_FSYNC BIT(3)
+#define SSM4567_SAI_CTRL_1_LJ BIT(2)
+#define SSM4567_SAI_CTRL_1_TDM BIT(1)
+#define SSM4567_SAI_CTRL_1_PDM BIT(0)
+
+/* SAI_CTRL_2 */
+#define SSM4567_SAI_CTRL_2_AUTO_SLOT BIT(3)
+#define SSM4567_SAI_CTRL_2_TDM_SLOT_MASK 0x7
+#define SSM4567_SAI_CTRL_2_TDM_SLOT(x) (x)
+
struct ssm4567 {
struct regmap *regmap;
};
@@ -145,15 +161,24 @@ static const struct snd_kcontrol_new ssm4567_snd_controls[] = {
SOC_SINGLE_TLV("Master Playback Volume", SSM4567_REG_DAC_VOLUME, 0,
0xff, 1, ssm4567_vol_tlv),
SOC_SINGLE("DAC Low Power Mode Switch", SSM4567_REG_DAC_CTRL, 4, 1, 0),
+ SOC_SINGLE("DAC High Pass Filter Switch", SSM4567_REG_DAC_CTRL,
+ 5, 1, 0),
};
+static const struct snd_kcontrol_new ssm4567_amplifier_boost_control =
+ SOC_DAPM_SINGLE("Switch", SSM4567_REG_POWER_CTRL, 1, 1, 1);
+
static const struct snd_soc_dapm_widget ssm4567_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DAC", "HiFi Playback", SSM4567_REG_POWER_CTRL, 2, 1),
+ SND_SOC_DAPM_SWITCH("Amplifier Boost", SSM4567_REG_POWER_CTRL, 3, 1,
+ &ssm4567_amplifier_boost_control),
SND_SOC_DAPM_OUTPUT("OUT"),
};
static const struct snd_soc_dapm_route ssm4567_routes[] = {
+ { "OUT", NULL, "Amplifier Boost" },
+ { "Amplifier Boost", "Switch", "DAC" },
{ "OUT", NULL, "DAC" },
};
@@ -192,6 +217,107 @@ static int ssm4567_mute(struct snd_soc_dai *dai, int mute)
SSM4567_DAC_MUTE, val);
}
+static int ssm4567_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int width)
+{
+ struct ssm4567 *ssm4567 = snd_soc_dai_get_drvdata(dai);
+ unsigned int blcks;
+ int slot;
+ int ret;
+
+ if (tx_mask == 0)
+ return -EINVAL;
+
+ if (rx_mask && rx_mask != tx_mask)
+ return -EINVAL;
+
+ slot = __ffs(tx_mask);
+ if (tx_mask != BIT(slot))
+ return -EINVAL;
+
+ switch (width) {
+ case 32:
+ blcks = SSM4567_SAI_CTRL_1_TDM_BLCKS_32;
+ break;
+ case 48:
+ blcks = SSM4567_SAI_CTRL_1_TDM_BLCKS_48;
+ break;
+ case 64:
+ blcks = SSM4567_SAI_CTRL_1_TDM_BLCKS_64;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_2,
+ SSM4567_SAI_CTRL_2_AUTO_SLOT | SSM4567_SAI_CTRL_2_TDM_SLOT_MASK,
+ SSM4567_SAI_CTRL_2_TDM_SLOT(slot));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
+ SSM4567_SAI_CTRL_1_TDM_BLCKS_MASK, blcks);
+}
+
+static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct ssm4567 *ssm4567 = snd_soc_dai_get_drvdata(dai);
+ unsigned int ctrl1 = 0;
+ bool invert_fclk;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ invert_fclk = false;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ ctrl1 |= SSM4567_SAI_CTRL_1_BCLK;
+ invert_fclk = false;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
+ invert_fclk = true;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ ctrl1 |= SSM4567_SAI_CTRL_1_BCLK;
+ invert_fclk = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ctrl1 |= SSM4567_SAI_CTRL_1_LJ;
+ invert_fclk = !invert_fclk;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ ctrl1 |= SSM4567_SAI_CTRL_1_TDM;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ ctrl1 |= SSM4567_SAI_CTRL_1_TDM | SSM4567_SAI_CTRL_1_LJ;
+ break;
+ case SND_SOC_DAIFMT_PDM:
+ ctrl1 |= SSM4567_SAI_CTRL_1_PDM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (invert_fclk)
+ ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
+
+ return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
+}
+
static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
{
int ret = 0;
@@ -246,6 +372,8 @@ static int ssm4567_set_bias_level(struct snd_soc_codec *codec,
static const struct snd_soc_dai_ops ssm4567_dai_ops = {
.hw_params = ssm4567_hw_params,
.digital_mute = ssm4567_mute,
+ .set_fmt = ssm4567_set_dai_fmt,
+ .set_tdm_slot = ssm4567_set_tdm_slot,
};
static struct snd_soc_dai_driver ssm4567_dai = {
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 48740855566d..7e18200dd6a9 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -833,23 +833,6 @@ static struct snd_soc_dai_driver sta32x_dai = {
.ops = &sta32x_dai_ops,
};
-#ifdef CONFIG_PM
-static int sta32x_suspend(struct snd_soc_codec *codec)
-{
- sta32x_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int sta32x_resume(struct snd_soc_codec *codec)
-{
- sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define sta32x_suspend NULL
-#define sta32x_resume NULL
-#endif
-
static int sta32x_probe(struct snd_soc_codec *codec)
{
struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
@@ -936,7 +919,6 @@ static int sta32x_remove(struct snd_soc_codec *codec)
struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
sta32x_watchdog_stop(sta32x);
- sta32x_set_bias_level(codec, SND_SOC_BIAS_OFF);
regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
return 0;
@@ -955,9 +937,8 @@ static bool sta32x_reg_is_volatile(struct device *dev, unsigned int reg)
static const struct snd_soc_codec_driver sta32x_codec = {
.probe = sta32x_probe,
.remove = sta32x_remove,
- .suspend = sta32x_suspend,
- .resume = sta32x_resume,
.set_bias_level = sta32x_set_bias_level,
+ .suspend_bias_off = true,
.controls = sta32x_snd_controls,
.num_controls = ARRAY_SIZE(sta32x_snd_controls),
.dapm_widgets = sta32x_dapm_widgets,
diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c
index cc97dd52aa9c..bda2ee18769e 100644
--- a/sound/soc/codecs/sta350.c
+++ b/sound/soc/codecs/sta350.c
@@ -912,23 +912,6 @@ static struct snd_soc_dai_driver sta350_dai = {
.ops = &sta350_dai_ops,
};
-#ifdef CONFIG_PM
-static int sta350_suspend(struct snd_soc_codec *codec)
-{
- sta350_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int sta350_resume(struct snd_soc_codec *codec)
-{
- sta350_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define sta350_suspend NULL
-#define sta350_resume NULL
-#endif
-
static int sta350_probe(struct snd_soc_codec *codec)
{
struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec);
@@ -1065,7 +1048,6 @@ static int sta350_remove(struct snd_soc_codec *codec)
{
struct sta350_priv *sta350 = snd_soc_codec_get_drvdata(codec);
- sta350_set_bias_level(codec, SND_SOC_BIAS_OFF);
regulator_bulk_disable(ARRAY_SIZE(sta350->supplies), sta350->supplies);
return 0;
@@ -1074,9 +1056,8 @@ static int sta350_remove(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver sta350_codec = {
.probe = sta350_probe,
.remove = sta350_remove,
- .suspend = sta350_suspend,
- .resume = sta350_resume,
.set_bias_level = sta350_set_bias_level,
+ .suspend_bias_off = true,
.controls = sta350_snd_controls,
.num_controls = ARRAY_SIZE(sta350_snd_controls),
.dapm_widgets = sta350_dapm_widgets,
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index 89c748dd3d6e..b0f436d10125 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -319,41 +319,10 @@ static struct snd_soc_dai_driver sta529_dai = {
.ops = &sta529_dai_ops,
};
-static int sta529_probe(struct snd_soc_codec *codec)
-{
- sta529_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-/* power down chip */
-static int sta529_remove(struct snd_soc_codec *codec)
-{
- sta529_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int sta529_suspend(struct snd_soc_codec *codec)
-{
- sta529_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int sta529_resume(struct snd_soc_codec *codec)
-{
- sta529_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
static const struct snd_soc_codec_driver sta529_codec_driver = {
- .probe = sta529_probe,
- .remove = sta529_remove,
.set_bias_level = sta529_set_bias_level,
- .suspend = sta529_suspend,
- .resume = sta529_resume,
+ .suspend_bias_off = true,
+
.controls = sta529_snd_controls,
.num_controls = ARRAY_SIZE(sta529_snd_controls),
};
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 53b810d23fea..6464caf72b21 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -139,18 +139,19 @@ static const struct snd_kcontrol_new stac9766_snd_ac97_controls[] = {
static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int val)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
u16 *cache = codec->reg_cache;
if (reg > AC97_STAC_PAGE0) {
stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
- soc_ac97_ops->write(codec->ac97, reg, val);
+ soc_ac97_ops->write(ac97, reg, val);
stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
return 0;
}
if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
return -EIO;
- soc_ac97_ops->write(codec->ac97, reg, val);
+ soc_ac97_ops->write(ac97, reg, val);
cache[reg / 2] = val;
return 0;
}
@@ -158,11 +159,12 @@ static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
unsigned int reg)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
u16 val = 0, *cache = codec->reg_cache;
if (reg > AC97_STAC_PAGE0) {
stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
- val = soc_ac97_ops->read(codec->ac97, reg - AC97_STAC_PAGE0);
+ val = soc_ac97_ops->read(ac97, reg - AC97_STAC_PAGE0);
stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
return val;
}
@@ -173,7 +175,7 @@ static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
reg == AC97_INT_PAGING || reg == AC97_VENDOR_ID1 ||
reg == AC97_VENDOR_ID2) {
- val = soc_ac97_ops->read(codec->ac97, reg);
+ val = soc_ac97_ops->read(ac97, reg);
return val;
}
return cache[reg / 2];
@@ -240,45 +242,41 @@ static int stac9766_set_bias_level(struct snd_soc_codec *codec,
static int stac9766_reset(struct snd_soc_codec *codec, int try_warm)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+
if (try_warm && soc_ac97_ops->warm_reset) {
- soc_ac97_ops->warm_reset(codec->ac97);
+ soc_ac97_ops->warm_reset(ac97);
if (stac9766_ac97_read(codec, 0) == stac9766_reg[0])
return 1;
}
- soc_ac97_ops->reset(codec->ac97);
+ soc_ac97_ops->reset(ac97);
if (soc_ac97_ops->warm_reset)
- soc_ac97_ops->warm_reset(codec->ac97);
+ soc_ac97_ops->warm_reset(ac97);
if (stac9766_ac97_read(codec, 0) != stac9766_reg[0])
return -EIO;
return 0;
}
-static int stac9766_codec_suspend(struct snd_soc_codec *codec)
-{
- stac9766_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int stac9766_codec_resume(struct snd_soc_codec *codec)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
u16 id, reset;
reset = 0;
/* give the codec an AC97 warm reset to start the link */
reset:
if (reset > 5) {
- printk(KERN_ERR "stac9766 failed to resume");
+ dev_err(codec->dev, "Failed to resume\n");
return -EIO;
}
- codec->ac97->bus->ops->warm_reset(codec->ac97);
- id = soc_ac97_ops->read(codec->ac97, AC97_VENDOR_ID2);
+ ac97->bus->ops->warm_reset(ac97);
+ id = soc_ac97_ops->read(ac97, AC97_VENDOR_ID2);
if (id != 0x4c13) {
stac9766_reset(codec, 0);
reset++;
goto reset;
}
- stac9766_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
@@ -294,7 +292,6 @@ static const struct snd_soc_dai_ops stac9766_dai_ops_digital = {
static struct snd_soc_dai_driver stac9766_dai[] = {
{
.name = "stac9766-hifi-analog",
- .ac97_control = 1,
/* stream cababilities */
.playback = {
@@ -316,7 +313,6 @@ static struct snd_soc_dai_driver stac9766_dai[] = {
},
{
.name = "stac9766-hifi-IEC958",
- .ac97_control = 1,
/* stream cababilities */
.playback = {
@@ -334,46 +330,48 @@ static struct snd_soc_dai_driver stac9766_dai[] = {
static int stac9766_codec_probe(struct snd_soc_codec *codec)
{
+ struct snd_ac97 *ac97;
int ret = 0;
- ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0);
- if (ret < 0)
- goto codec_err;
+ ac97 = snd_soc_new_ac97_codec(codec);
+ if (IS_ERR(ac97))
+ return PTR_ERR(ac97);
+
+ snd_soc_codec_set_drvdata(codec, ac97);
/* do a cold reset for the controller and then try
* a warm reset followed by an optional cold reset for codec */
stac9766_reset(codec, 0);
ret = stac9766_reset(codec, 1);
if (ret < 0) {
- printk(KERN_ERR "Failed to reset STAC9766: AC97 link error\n");
+ dev_err(codec->dev, "Failed to reset: AC97 link error\n");
goto codec_err;
}
- stac9766_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- snd_soc_add_codec_controls(codec, stac9766_snd_ac97_controls,
- ARRAY_SIZE(stac9766_snd_ac97_controls));
-
return 0;
codec_err:
- snd_soc_free_ac97_codec(codec);
+ snd_soc_free_ac97_codec(ac97);
return ret;
}
static int stac9766_codec_remove(struct snd_soc_codec *codec)
{
- snd_soc_free_ac97_codec(codec);
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+
+ snd_soc_free_ac97_codec(ac97);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_stac9766 = {
+ .controls = stac9766_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(stac9766_snd_ac97_controls),
.write = stac9766_ac97_write,
.read = stac9766_ac97_read,
.set_bias_level = stac9766_set_bias_level,
+ .suspend_bias_off = true,
.probe = stac9766_codec_probe,
.remove = stac9766_codec_remove,
- .suspend = stac9766_codec_suspend,
.resume = stac9766_codec_resume,
.reg_cache_size = ARRAY_SIZE(stac9766_reg),
.reg_word_size = sizeof(u16),
@@ -396,7 +394,6 @@ static int stac9766_remove(struct platform_device *pdev)
static struct platform_driver stac9766_codec_driver = {
.driver = {
.name = "stac9766-codec",
- .owner = THIS_MODULE,
},
.probe = stac9766_probe,
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index 0fde5df40f4d..ae23acdd2708 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -345,7 +345,6 @@ static const struct reg_default tas2552_init_regs[] = {
static int tas2552_codec_probe(struct snd_soc_codec *codec)
{
struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec);
- struct snd_soc_dapm_context *dapm = &codec->dapm;
int ret;
tas2552->codec = codec;
@@ -390,11 +389,6 @@ static int tas2552_codec_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, TAS2552_CFG_2, TAS2552_BOOST_EN |
TAS2552_APT_EN | TAS2552_LIM_EN);
- snd_soc_dapm_new_controls(dapm, tas2552_dapm_widgets,
- ARRAY_SIZE(tas2552_dapm_widgets));
- snd_soc_dapm_add_routes(dapm, tas2552_audio_map,
- ARRAY_SIZE(tas2552_audio_map));
-
return 0;
patch_fail:
@@ -462,6 +456,10 @@ static struct snd_soc_codec_driver soc_codec_dev_tas2552 = {
.resume = tas2552_resume,
.controls = tas2552_snd_controls,
.num_controls = ARRAY_SIZE(tas2552_snd_controls),
+ .dapm_widgets = tas2552_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas2552_dapm_widgets),
+ .dapm_routes = tas2552_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas2552_audio_map),
};
static const struct regmap_config tas2552_regmap_config = {
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
new file mode 100644
index 000000000000..16f1b71edb55
--- /dev/null
+++ b/sound/soc/codecs/tfa9879.c
@@ -0,0 +1,328 @@
+/*
+ * tfa9879.c -- driver for NXP Semiconductors TFA9879
+ *
+ * Copyright (C) 2014 Axentia Technologies AB
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <sound/pcm_params.h>
+
+#include "tfa9879.h"
+
+struct tfa9879_priv {
+ struct regmap *regmap;
+ int lsb_justified;
+};
+
+static int tfa9879_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct tfa9879_priv *tfa9879 = snd_soc_codec_get_drvdata(codec);
+ int fs;
+ int i2s_set = 0;
+
+ switch (params_rate(params)) {
+ case 8000:
+ fs = TFA9879_I2S_FS_8000;
+ break;
+ case 11025:
+ fs = TFA9879_I2S_FS_11025;
+ break;
+ case 12000:
+ fs = TFA9879_I2S_FS_12000;
+ break;
+ case 16000:
+ fs = TFA9879_I2S_FS_16000;
+ break;
+ case 22050:
+ fs = TFA9879_I2S_FS_22050;
+ break;
+ case 24000:
+ fs = TFA9879_I2S_FS_24000;
+ break;
+ case 32000:
+ fs = TFA9879_I2S_FS_32000;
+ break;
+ case 44100:
+ fs = TFA9879_I2S_FS_44100;
+ break;
+ case 48000:
+ fs = TFA9879_I2S_FS_48000;
+ break;
+ case 64000:
+ fs = TFA9879_I2S_FS_64000;
+ break;
+ case 88200:
+ fs = TFA9879_I2S_FS_88200;
+ break;
+ case 96000:
+ fs = TFA9879_I2S_FS_96000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_width(params)) {
+ case 16:
+ i2s_set = TFA9879_I2S_SET_LSB_J_16;
+ break;
+ case 24:
+ i2s_set = TFA9879_I2S_SET_LSB_J_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (tfa9879->lsb_justified)
+ snd_soc_update_bits(codec, TFA9879_SERIAL_INTERFACE_1,
+ TFA9879_I2S_SET_MASK,
+ i2s_set << TFA9879_I2S_SET_SHIFT);
+
+ snd_soc_update_bits(codec, TFA9879_SERIAL_INTERFACE_1,
+ TFA9879_I2S_FS_MASK,
+ fs << TFA9879_I2S_FS_SHIFT);
+ return 0;
+}
+
+static int tfa9879_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+
+ snd_soc_update_bits(codec, TFA9879_MISC_CONTROL,
+ TFA9879_S_MUTE_MASK,
+ !!mute << TFA9879_S_MUTE_SHIFT);
+
+ return 0;
+}
+
+static int tfa9879_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct tfa9879_priv *tfa9879 = snd_soc_codec_get_drvdata(codec);
+ int i2s_set;
+ int sck_pol;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ sck_pol = TFA9879_SCK_POL_NORMAL;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ sck_pol = TFA9879_SCK_POL_INVERSE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ tfa9879->lsb_justified = 0;
+ i2s_set = TFA9879_I2S_SET_I2S_24;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ tfa9879->lsb_justified = 0;
+ i2s_set = TFA9879_I2S_SET_MSB_J_24;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ tfa9879->lsb_justified = 1;
+ i2s_set = TFA9879_I2S_SET_LSB_J_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, TFA9879_SERIAL_INTERFACE_1,
+ TFA9879_SCK_POL_MASK,
+ sck_pol << TFA9879_SCK_POL_SHIFT);
+ snd_soc_update_bits(codec, TFA9879_SERIAL_INTERFACE_1,
+ TFA9879_I2S_SET_MASK,
+ i2s_set << TFA9879_I2S_SET_SHIFT);
+ return 0;
+}
+
+static struct reg_default tfa9879_regs[] = {
+ { TFA9879_DEVICE_CONTROL, 0x0000 }, /* 0x00 */
+ { TFA9879_SERIAL_INTERFACE_1, 0x0a18 }, /* 0x01 */
+ { TFA9879_PCM_IOM2_FORMAT_1, 0x0007 }, /* 0x02 */
+ { TFA9879_SERIAL_INTERFACE_2, 0x0a18 }, /* 0x03 */
+ { TFA9879_PCM_IOM2_FORMAT_2, 0x0007 }, /* 0x04 */
+ { TFA9879_EQUALIZER_A1, 0x59dd }, /* 0x05 */
+ { TFA9879_EQUALIZER_A2, 0xc63e }, /* 0x06 */
+ { TFA9879_EQUALIZER_B1, 0x651a }, /* 0x07 */
+ { TFA9879_EQUALIZER_B2, 0xe53e }, /* 0x08 */
+ { TFA9879_EQUALIZER_C1, 0x4616 }, /* 0x09 */
+ { TFA9879_EQUALIZER_C2, 0xd33e }, /* 0x0a */
+ { TFA9879_EQUALIZER_D1, 0x4df3 }, /* 0x0b */
+ { TFA9879_EQUALIZER_D2, 0xea3e }, /* 0x0c */
+ { TFA9879_EQUALIZER_E1, 0x5ee0 }, /* 0x0d */
+ { TFA9879_EQUALIZER_E2, 0xf93e }, /* 0x0e */
+ { TFA9879_BYPASS_CONTROL, 0x0093 }, /* 0x0f */
+ { TFA9879_DYNAMIC_RANGE_COMPR, 0x92ba }, /* 0x10 */
+ { TFA9879_BASS_TREBLE, 0x12a5 }, /* 0x11 */
+ { TFA9879_HIGH_PASS_FILTER, 0x0004 }, /* 0x12 */
+ { TFA9879_VOLUME_CONTROL, 0x10bd }, /* 0x13 */
+ { TFA9879_MISC_CONTROL, 0x0000 }, /* 0x14 */
+};
+
+static bool tfa9879_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == TFA9879_MISC_STATUS;
+}
+
+static const DECLARE_TLV_DB_SCALE(volume_tlv, -7050, 50, 1);
+static const DECLARE_TLV_DB_SCALE(tb_gain_tlv, -1800, 200, 0);
+static const char * const tb_freq_text[] = {
+ "Low", "Mid", "High"
+};
+static const struct soc_enum treble_freq_enum =
+ SOC_ENUM_SINGLE(TFA9879_BASS_TREBLE, TFA9879_F_TRBLE_SHIFT,
+ ARRAY_SIZE(tb_freq_text), tb_freq_text);
+static const struct soc_enum bass_freq_enum =
+ SOC_ENUM_SINGLE(TFA9879_BASS_TREBLE, TFA9879_F_BASS_SHIFT,
+ ARRAY_SIZE(tb_freq_text), tb_freq_text);
+
+static const struct snd_kcontrol_new tfa9879_controls[] = {
+ SOC_SINGLE_TLV("PCM Playback Volume", TFA9879_VOLUME_CONTROL,
+ TFA9879_VOL_SHIFT, 0xbd, 1, volume_tlv),
+ SOC_SINGLE_TLV("Treble Volume", TFA9879_BASS_TREBLE,
+ TFA9879_G_TRBLE_SHIFT, 18, 0, tb_gain_tlv),
+ SOC_SINGLE_TLV("Bass Volume", TFA9879_BASS_TREBLE,
+ TFA9879_G_BASS_SHIFT, 18, 0, tb_gain_tlv),
+ SOC_ENUM("Treble Corner Freq", treble_freq_enum),
+ SOC_ENUM("Bass Corner Freq", bass_freq_enum),
+};
+
+static const struct snd_soc_dapm_widget tfa9879_dapm_widgets[] = {
+SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_DAC("DAC", NULL, TFA9879_DEVICE_CONTROL, TFA9879_OPMODE_SHIFT, 0),
+SND_SOC_DAPM_OUTPUT("LINEOUT"),
+SND_SOC_DAPM_SUPPLY("POWER", TFA9879_DEVICE_CONTROL, TFA9879_POWERUP_SHIFT, 0,
+ NULL, 0),
+};
+
+static const struct snd_soc_dapm_route tfa9879_dapm_routes[] = {
+ { "DAC", NULL, "AIFINL" },
+ { "DAC", NULL, "AIFINR" },
+
+ { "LINEOUT", NULL, "DAC" },
+
+ { "DAC", NULL, "POWER" },
+};
+
+static const struct snd_soc_codec_driver tfa9879_codec = {
+ .controls = tfa9879_controls,
+ .num_controls = ARRAY_SIZE(tfa9879_controls),
+
+ .dapm_widgets = tfa9879_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tfa9879_dapm_widgets),
+ .dapm_routes = tfa9879_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(tfa9879_dapm_routes),
+};
+
+static const struct regmap_config tfa9879_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+
+ .volatile_reg = tfa9879_volatile_reg,
+ .max_register = TFA9879_MISC_STATUS,
+ .reg_defaults = tfa9879_regs,
+ .num_reg_defaults = ARRAY_SIZE(tfa9879_regs),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct snd_soc_dai_ops tfa9879_dai_ops = {
+ .hw_params = tfa9879_hw_params,
+ .digital_mute = tfa9879_digital_mute,
+ .set_fmt = tfa9879_set_fmt,
+};
+
+#define TFA9879_RATES SNDRV_PCM_RATE_8000_96000
+
+#define TFA9879_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_driver tfa9879_dai = {
+ .name = "tfa9879-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = TFA9879_RATES,
+ .formats = TFA9879_FORMATS, },
+ .ops = &tfa9879_dai_ops,
+};
+
+static int tfa9879_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tfa9879_priv *tfa9879;
+ int i;
+
+ tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL);
+ if (IS_ERR(tfa9879))
+ return PTR_ERR(tfa9879);
+
+ i2c_set_clientdata(i2c, tfa9879);
+
+ tfa9879->regmap = devm_regmap_init_i2c(i2c, &tfa9879_regmap);
+ if (IS_ERR(tfa9879->regmap))
+ return PTR_ERR(tfa9879->regmap);
+
+ /* Ensure the device is in reset state */
+ for (i = 0; i < ARRAY_SIZE(tfa9879_regs); i++)
+ regmap_write(tfa9879->regmap,
+ tfa9879_regs[i].reg, tfa9879_regs[i].def);
+
+ return snd_soc_register_codec(&i2c->dev, &tfa9879_codec,
+ &tfa9879_dai, 1);
+}
+
+static int tfa9879_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id tfa9879_i2c_id[] = {
+ { "tfa9879", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tfa9879_i2c_id);
+
+static struct i2c_driver tfa9879_i2c_driver = {
+ .driver = {
+ .name = "tfa9879",
+ .owner = THIS_MODULE,
+ },
+ .probe = tfa9879_i2c_probe,
+ .remove = tfa9879_i2c_remove,
+ .id_table = tfa9879_i2c_id,
+};
+
+module_i2c_driver(tfa9879_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC NXP Semiconductors TFA9879 driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tfa9879.h b/sound/soc/codecs/tfa9879.h
new file mode 100644
index 000000000000..3408c90c4628
--- /dev/null
+++ b/sound/soc/codecs/tfa9879.h
@@ -0,0 +1,202 @@
+/*
+ * tfa9879.h -- driver for NXP Semiconductors TFA9879
+ *
+ * Copyright (C) 2014 Axentia Technologies AB
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _TFA9879_H
+#define _TFA9879_H
+
+#define TFA9879_DEVICE_CONTROL 0x00
+#define TFA9879_SERIAL_INTERFACE_1 0x01
+#define TFA9879_PCM_IOM2_FORMAT_1 0x02
+#define TFA9879_SERIAL_INTERFACE_2 0x03
+#define TFA9879_PCM_IOM2_FORMAT_2 0x04
+#define TFA9879_EQUALIZER_A1 0x05
+#define TFA9879_EQUALIZER_A2 0x06
+#define TFA9879_EQUALIZER_B1 0x07
+#define TFA9879_EQUALIZER_B2 0x08
+#define TFA9879_EQUALIZER_C1 0x09
+#define TFA9879_EQUALIZER_C2 0x0a
+#define TFA9879_EQUALIZER_D1 0x0b
+#define TFA9879_EQUALIZER_D2 0x0c
+#define TFA9879_EQUALIZER_E1 0x0d
+#define TFA9879_EQUALIZER_E2 0x0e
+#define TFA9879_BYPASS_CONTROL 0x0f
+#define TFA9879_DYNAMIC_RANGE_COMPR 0x10
+#define TFA9879_BASS_TREBLE 0x11
+#define TFA9879_HIGH_PASS_FILTER 0x12
+#define TFA9879_VOLUME_CONTROL 0x13
+#define TFA9879_MISC_CONTROL 0x14
+#define TFA9879_MISC_STATUS 0x15
+
+/* TFA9879_DEVICE_CONTROL */
+#define TFA9879_INPUT_SEL_MASK 0x0010
+#define TFA9879_INPUT_SEL_SHIFT 4
+#define TFA9879_OPMODE_MASK 0x0008
+#define TFA9879_OPMODE_SHIFT 3
+#define TFA9879_RESET_MASK 0x0002
+#define TFA9879_RESET_SHIFT 1
+#define TFA9879_POWERUP_MASK 0x0001
+#define TFA9879_POWERUP_SHIFT 0
+
+/* TFA9879_SERIAL_INTERFACE */
+#define TFA9879_MONO_SEL_MASK 0x0c00
+#define TFA9879_MONO_SEL_SHIFT 10
+#define TFA9879_MONO_SEL_LEFT 0
+#define TFA9879_MONO_SEL_RIGHT 1
+#define TFA9879_MONO_SEL_BOTH 2
+#define TFA9879_I2S_FS_MASK 0x03c0
+#define TFA9879_I2S_FS_SHIFT 6
+#define TFA9879_I2S_FS_8000 0
+#define TFA9879_I2S_FS_11025 1
+#define TFA9879_I2S_FS_12000 2
+#define TFA9879_I2S_FS_16000 3
+#define TFA9879_I2S_FS_22050 4
+#define TFA9879_I2S_FS_24000 5
+#define TFA9879_I2S_FS_32000 6
+#define TFA9879_I2S_FS_44100 7
+#define TFA9879_I2S_FS_48000 8
+#define TFA9879_I2S_FS_64000 9
+#define TFA9879_I2S_FS_88200 10
+#define TFA9879_I2S_FS_96000 11
+#define TFA9879_I2S_SET_MASK 0x0038
+#define TFA9879_I2S_SET_SHIFT 3
+#define TFA9879_I2S_SET_MSB_J_24 2
+#define TFA9879_I2S_SET_I2S_24 3
+#define TFA9879_I2S_SET_LSB_J_16 4
+#define TFA9879_I2S_SET_LSB_J_18 5
+#define TFA9879_I2S_SET_LSB_J_20 6
+#define TFA9879_I2S_SET_LSB_J_24 7
+#define TFA9879_SCK_POL_MASK 0x0004
+#define TFA9879_SCK_POL_SHIFT 2
+#define TFA9879_SCK_POL_NORMAL 0
+#define TFA9879_SCK_POL_INVERSE 1
+#define TFA9879_I_MODE_MASK 0x0003
+#define TFA9879_I_MODE_SHIFT 0
+#define TFA9879_I_MODE_I2S 0
+#define TFA9879_I_MODE_PCM_IOM2_SHORT 1
+#define TFA9879_I_MODE_PCM_IOM2_LONG 2
+
+/* TFA9879_PCM_IOM2_FORMAT */
+#define TFA9879_PCM_FS_MASK 0x0800
+#define TFA9879_PCM_FS_SHIFT 11
+#define TFA9879_A_LAW_MASK 0x0400
+#define TFA9879_A_LAW_SHIFT 10
+#define TFA9879_PCM_COMP_MASK 0x0200
+#define TFA9879_PCM_COMP_SHIFT 9
+#define TFA9879_PCM_DL_MASK 0x0100
+#define TFA9879_PCM_DL_SHIFT 8
+#define TFA9879_D1_SLOT_MASK 0x00f0
+#define TFA9879_D1_SLOT_SHIFT 4
+#define TFA9879_D2_SLOT_MASK 0x000f
+#define TFA9879_D2_SLOT_SHIFT 0
+
+/* TFA9879_EQUALIZER_X1 */
+#define TFA9879_T1_MASK 0x8000
+#define TFA9879_T1_SHIFT 15
+#define TFA9879_K1M_MASK 0x7ff0
+#define TFA9879_K1M_SHIFT 4
+#define TFA9879_K1E_MASK 0x000f
+#define TFA9879_K1E_SHIFT 0
+
+/* TFA9879_EQUALIZER_X2 */
+#define TFA9879_T2_MASK 0x8000
+#define TFA9879_T2_SHIFT 15
+#define TFA9879_K2M_MASK 0x7800
+#define TFA9879_K2M_SHIFT 11
+#define TFA9879_K2E_MASK 0x0700
+#define TFA9879_K2E_SHIFT 8
+#define TFA9879_K0_MASK 0x00fe
+#define TFA9879_K0_SHIFT 1
+#define TFA9879_S_MASK 0x0001
+#define TFA9879_S_SHIFT 0
+
+/* TFA9879_BYPASS_CONTROL */
+#define TFA9879_L_OCP_MASK 0x00c0
+#define TFA9879_L_OCP_SHIFT 6
+#define TFA9879_L_OTP_MASK 0x0030
+#define TFA9879_L_OTP_SHIFT 4
+#define TFA9879_CLIPCTRL_MASK 0x0008
+#define TFA9879_CLIPCTRL_SHIFT 3
+#define TFA9879_HPF_BP_MASK 0x0004
+#define TFA9879_HPF_BP_SHIFT 2
+#define TFA9879_DRC_BP_MASK 0x0002
+#define TFA9879_DRC_BP_SHIFT 1
+#define TFA9879_EQ_BP_MASK 0x0001
+#define TFA9879_EQ_BP_SHIFT 0
+
+/* TFA9879_DYNAMIC_RANGE_COMPR */
+#define TFA9879_AT_LVL_MASK 0xf000
+#define TFA9879_AT_LVL_SHIFT 12
+#define TFA9879_AT_RATE_MASK 0x0f00
+#define TFA9879_AT_RATE_SHIFT 8
+#define TFA9879_RL_LVL_MASK 0x00f0
+#define TFA9879_RL_LVL_SHIFT 4
+#define TFA9879_RL_RATE_MASK 0x000f
+#define TFA9879_RL_RATE_SHIFT 0
+
+/* TFA9879_BASS_TREBLE */
+#define TFA9879_G_TRBLE_MASK 0x3e00
+#define TFA9879_G_TRBLE_SHIFT 9
+#define TFA9879_F_TRBLE_MASK 0x0180
+#define TFA9879_F_TRBLE_SHIFT 7
+#define TFA9879_G_BASS_MASK 0x007c
+#define TFA9879_G_BASS_SHIFT 2
+#define TFA9879_F_BASS_MASK 0x0003
+#define TFA9879_F_BASS_SHIFT 0
+
+/* TFA9879_HIGH_PASS_FILTER */
+#define TFA9879_HP_CTRL_MASK 0x00ff
+#define TFA9879_HP_CTRL_SHIFT 0
+
+/* TFA9879_VOLUME_CONTROL */
+#define TFA9879_ZR_CRSS_MASK 0x1000
+#define TFA9879_ZR_CRSS_SHIFT 12
+#define TFA9879_VOL_MASK 0x00ff
+#define TFA9879_VOL_SHIFT 0
+
+/* TFA9879_MISC_CONTROL */
+#define TFA9879_DE_PHAS_MASK 0x0c00
+#define TFA9879_DE_PHAS_SHIFT 10
+#define TFA9879_H_MUTE_MASK 0x0200
+#define TFA9879_H_MUTE_SHIFT 9
+#define TFA9879_S_MUTE_MASK 0x0100
+#define TFA9879_S_MUTE_SHIFT 8
+#define TFA9879_P_LIM_MASK 0x00ff
+#define TFA9879_P_LIM_SHIFT 0
+
+/* TFA9879_MISC_STATUS */
+#define TFA9879_PS_MASK 0x4000
+#define TFA9879_PS_SHIFT 14
+#define TFA9879_PORA_MASK 0x2000
+#define TFA9879_PORA_SHIFT 13
+#define TFA9879_AMP_MASK 0x0600
+#define TFA9879_AMP_SHIFT 9
+#define TFA9879_IBP_2_MASK 0x0100
+#define TFA9879_IBP_2_SHIFT 8
+#define TFA9879_OFP_2_MASK 0x0080
+#define TFA9879_OFP_2_SHIFT 7
+#define TFA9879_UFP_2_MASK 0x0040
+#define TFA9879_UFP_2_SHIFT 6
+#define TFA9879_IBP_1_MASK 0x0020
+#define TFA9879_IBP_1_SHIFT 5
+#define TFA9879_OFP_1_MASK 0x0010
+#define TFA9879_OFP_1_SHIFT 4
+#define TFA9879_UFP_1_MASK 0x0008
+#define TFA9879_UFP_1_SHIFT 3
+#define TFA9879_OCPOKA_MASK 0x0004
+#define TFA9879_OCPOKA_SHIFT 2
+#define TFA9879_OCPOKB_MASK 0x0002
+#define TFA9879_OCPOKB_SHIFT 1
+#define TFA9879_OTPOK_MASK 0x0001
+#define TFA9879_OTPOK_SHIFT 0
+
+#endif
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index d67167920c2f..cc17e7e5126e 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -540,19 +540,11 @@ static struct snd_soc_dai_driver tlv320aic23_dai = {
.ops = &tlv320aic23_dai_ops,
};
-static int tlv320aic23_suspend(struct snd_soc_codec *codec)
-{
- tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
static int tlv320aic23_resume(struct snd_soc_codec *codec)
{
struct aic23 *aic23 = snd_soc_codec_get_drvdata(codec);
regcache_mark_dirty(aic23->regmap);
regcache_sync(aic23->regmap);
- tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
@@ -562,9 +554,6 @@ static int tlv320aic23_codec_probe(struct snd_soc_codec *codec)
/* Reset codec */
snd_soc_write(codec, TLV320AIC23_RESET, 0);
- /* power on device */
- tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
snd_soc_write(codec, TLV320AIC23_DIGT, TLV320AIC23_DEEMP_44K);
/* Unmute input */
@@ -589,18 +578,12 @@ static int tlv320aic23_codec_probe(struct snd_soc_codec *codec)
return 0;
}
-static int tlv320aic23_remove(struct snd_soc_codec *codec)
-{
- tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_tlv320aic23 = {
.probe = tlv320aic23_codec_probe,
- .remove = tlv320aic23_remove,
- .suspend = tlv320aic23_suspend,
.resume = tlv320aic23_resume,
.set_bias_level = tlv320aic23_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = tlv320aic23_snd_controls,
.num_controls = ARRAY_SIZE(tlv320aic23_snd_controls),
.dapm_widgets = tlv320aic23_dapm_widgets,
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index 145fe5b253d4..dc3223d6eca1 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -911,12 +911,13 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
}
aic31xx->p_div = i;
- for (i = 0; aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++) {
- if (i == ARRAY_SIZE(aic31xx_divs)) {
- dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n",
- __func__, freq);
- return -EINVAL;
- }
+ for (i = 0; i < ARRAY_SIZE(aic31xx_divs) &&
+ aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++)
+ ;
+ if (i == ARRAY_SIZE(aic31xx_divs)) {
+ dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n",
+ __func__, freq);
+ return -EINVAL;
}
/* set clock on MCLK, BCLK, or GPIO1 as PLL input */
@@ -1056,18 +1057,6 @@ static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-static int aic31xx_suspend(struct snd_soc_codec *codec)
-{
- aic31xx_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int aic31xx_resume(struct snd_soc_codec *codec)
-{
- aic31xx_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int aic31xx_codec_probe(struct snd_soc_codec *codec)
{
int ret = 0;
@@ -1110,8 +1099,6 @@ static int aic31xx_codec_remove(struct snd_soc_codec *codec)
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
int i;
- /* power down chip */
- aic31xx_set_bias_level(codec, SND_SOC_BIAS_OFF);
for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
regulator_unregister_notifier(aic31xx->supplies[i].consumer,
@@ -1123,9 +1110,9 @@ static int aic31xx_codec_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_driver_aic31xx = {
.probe = aic31xx_codec_probe,
.remove = aic31xx_codec_remove,
- .suspend = aic31xx_suspend,
- .resume = aic31xx_resume,
.set_bias_level = aic31xx_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = aic31xx_snd_controls,
.num_controls = ARRAY_SIZE(aic31xx_snd_controls),
.dapm_widgets = aic31xx_dapm_widgets,
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 6ea662db2410..015467ed606b 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -597,18 +597,6 @@ static struct snd_soc_dai_driver aic32x4_dai = {
.symmetric_rates = 1,
};
-static int aic32x4_suspend(struct snd_soc_codec *codec)
-{
- aic32x4_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int aic32x4_resume(struct snd_soc_codec *codec)
-{
- aic32x4_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int aic32x4_probe(struct snd_soc_codec *codec)
{
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
@@ -654,8 +642,6 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, AIC32X4_RMICPGANIN,
AIC32X4_RMICPGANIN_CM1R_10K);
- aic32x4_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
/*
* Workaround: for an unknown reason, the ADC needs to be powered up
* and down for the first capture to work properly. It seems related to
@@ -669,18 +655,10 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
return 0;
}
-static int aic32x4_remove(struct snd_soc_codec *codec)
-{
- aic32x4_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
.probe = aic32x4_probe,
- .remove = aic32x4_remove,
- .suspend = aic32x4_suspend,
- .resume = aic32x4_resume,
.set_bias_level = aic32x4_set_bias_level,
+ .suspend_bias_off = true,
.controls = aic32x4_snd_controls,
.num_controls = ARRAY_SIZE(aic32x4_snd_controls),
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index f7c2a575a892..b7ebce054b4e 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -78,6 +78,8 @@ struct aic3x_priv {
struct aic3x_disable_nb disable_nb[AIC3X_NUM_SUPPLIES];
struct aic3x_setup_data *setup;
unsigned int sysclk;
+ unsigned int dai_fmt;
+ unsigned int tdm_delay;
struct list_head list;
int master;
int gpio_reset;
@@ -214,61 +216,78 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static const char *aic3x_left_dac_mux[] = { "DAC_L1", "DAC_L3", "DAC_L2" };
-static const char *aic3x_right_dac_mux[] = { "DAC_R1", "DAC_R3", "DAC_R2" };
-static const char *aic3x_left_hpcom_mux[] =
- { "differential of HPLOUT", "constant VCM", "single-ended" };
-static const char *aic3x_right_hpcom_mux[] =
- { "differential of HPROUT", "constant VCM", "single-ended",
- "differential of HPLCOM", "external feedback" };
-static const char *aic3x_linein_mode_mux[] = { "single-ended", "differential" };
-static const char *aic3x_adc_hpf[] =
- { "Disabled", "0.0045xFs", "0.0125xFs", "0.025xFs" };
-
-#define LDAC_ENUM 0
-#define RDAC_ENUM 1
-#define LHPCOM_ENUM 2
-#define RHPCOM_ENUM 3
-#define LINE1L_2_L_ENUM 4
-#define LINE1L_2_R_ENUM 5
-#define LINE1R_2_L_ENUM 6
-#define LINE1R_2_R_ENUM 7
-#define LINE2L_ENUM 8
-#define LINE2R_ENUM 9
-#define ADC_HPF_ENUM 10
-
-static const struct soc_enum aic3x_enum[] = {
- SOC_ENUM_SINGLE(DAC_LINE_MUX, 6, 3, aic3x_left_dac_mux),
- SOC_ENUM_SINGLE(DAC_LINE_MUX, 4, 3, aic3x_right_dac_mux),
- SOC_ENUM_SINGLE(HPLCOM_CFG, 4, 3, aic3x_left_hpcom_mux),
- SOC_ENUM_SINGLE(HPRCOM_CFG, 3, 5, aic3x_right_hpcom_mux),
- SOC_ENUM_SINGLE(LINE1L_2_LADC_CTRL, 7, 2, aic3x_linein_mode_mux),
- SOC_ENUM_SINGLE(LINE1L_2_RADC_CTRL, 7, 2, aic3x_linein_mode_mux),
- SOC_ENUM_SINGLE(LINE1R_2_LADC_CTRL, 7, 2, aic3x_linein_mode_mux),
- SOC_ENUM_SINGLE(LINE1R_2_RADC_CTRL, 7, 2, aic3x_linein_mode_mux),
- SOC_ENUM_SINGLE(LINE2L_2_LADC_CTRL, 7, 2, aic3x_linein_mode_mux),
- SOC_ENUM_SINGLE(LINE2R_2_RADC_CTRL, 7, 2, aic3x_linein_mode_mux),
- SOC_ENUM_DOUBLE(AIC3X_CODEC_DFILT_CTRL, 6, 4, 4, aic3x_adc_hpf),
-};
-
-static const char *aic3x_agc_level[] =
- { "-5.5dB", "-8dB", "-10dB", "-12dB", "-14dB", "-17dB", "-20dB", "-24dB" };
-static const struct soc_enum aic3x_agc_level_enum[] = {
- SOC_ENUM_SINGLE(LAGC_CTRL_A, 4, 8, aic3x_agc_level),
- SOC_ENUM_SINGLE(RAGC_CTRL_A, 4, 8, aic3x_agc_level),
-};
-
-static const char *aic3x_agc_attack[] = { "8ms", "11ms", "16ms", "20ms" };
-static const struct soc_enum aic3x_agc_attack_enum[] = {
- SOC_ENUM_SINGLE(LAGC_CTRL_A, 2, 4, aic3x_agc_attack),
- SOC_ENUM_SINGLE(RAGC_CTRL_A, 2, 4, aic3x_agc_attack),
-};
-
-static const char *aic3x_agc_decay[] = { "100ms", "200ms", "400ms", "500ms" };
-static const struct soc_enum aic3x_agc_decay_enum[] = {
- SOC_ENUM_SINGLE(LAGC_CTRL_A, 0, 4, aic3x_agc_decay),
- SOC_ENUM_SINGLE(RAGC_CTRL_A, 0, 4, aic3x_agc_decay),
-};
+static const char * const aic3x_left_dac_mux[] = {
+ "DAC_L1", "DAC_L3", "DAC_L2" };
+static SOC_ENUM_SINGLE_DECL(aic3x_left_dac_enum, DAC_LINE_MUX, 6,
+ aic3x_left_dac_mux);
+
+static const char * const aic3x_right_dac_mux[] = {
+ "DAC_R1", "DAC_R3", "DAC_R2" };
+static SOC_ENUM_SINGLE_DECL(aic3x_right_dac_enum, DAC_LINE_MUX, 4,
+ aic3x_right_dac_mux);
+
+static const char * const aic3x_left_hpcom_mux[] = {
+ "differential of HPLOUT", "constant VCM", "single-ended" };
+static SOC_ENUM_SINGLE_DECL(aic3x_left_hpcom_enum, HPLCOM_CFG, 4,
+ aic3x_left_hpcom_mux);
+
+static const char * const aic3x_right_hpcom_mux[] = {
+ "differential of HPROUT", "constant VCM", "single-ended",
+ "differential of HPLCOM", "external feedback" };
+static SOC_ENUM_SINGLE_DECL(aic3x_right_hpcom_enum, HPRCOM_CFG, 3,
+ aic3x_right_hpcom_mux);
+
+static const char * const aic3x_linein_mode_mux[] = {
+ "single-ended", "differential" };
+static SOC_ENUM_SINGLE_DECL(aic3x_line1l_2_l_enum, LINE1L_2_LADC_CTRL, 7,
+ aic3x_linein_mode_mux);
+static SOC_ENUM_SINGLE_DECL(aic3x_line1l_2_r_enum, LINE1L_2_RADC_CTRL, 7,
+ aic3x_linein_mode_mux);
+static SOC_ENUM_SINGLE_DECL(aic3x_line1r_2_l_enum, LINE1R_2_LADC_CTRL, 7,
+ aic3x_linein_mode_mux);
+static SOC_ENUM_SINGLE_DECL(aic3x_line1r_2_r_enum, LINE1R_2_RADC_CTRL, 7,
+ aic3x_linein_mode_mux);
+static SOC_ENUM_SINGLE_DECL(aic3x_line2l_2_ldac_enum, LINE2L_2_LADC_CTRL, 7,
+ aic3x_linein_mode_mux);
+static SOC_ENUM_SINGLE_DECL(aic3x_line2r_2_rdac_enum, LINE2R_2_RADC_CTRL, 7,
+ aic3x_linein_mode_mux);
+
+static const char * const aic3x_adc_hpf[] = {
+ "Disabled", "0.0045xFs", "0.0125xFs", "0.025xFs" };
+static SOC_ENUM_DOUBLE_DECL(aic3x_adc_hpf_enum, AIC3X_CODEC_DFILT_CTRL, 6, 4,
+ aic3x_adc_hpf);
+
+static const char * const aic3x_agc_level[] = {
+ "-5.5dB", "-8dB", "-10dB", "-12dB",
+ "-14dB", "-17dB", "-20dB", "-24dB" };
+static SOC_ENUM_SINGLE_DECL(aic3x_lagc_level_enum, LAGC_CTRL_A, 4,
+ aic3x_agc_level);
+static SOC_ENUM_SINGLE_DECL(aic3x_ragc_level_enum, RAGC_CTRL_A, 4,
+ aic3x_agc_level);
+
+static const char * const aic3x_agc_attack[] = {
+ "8ms", "11ms", "16ms", "20ms" };
+static SOC_ENUM_SINGLE_DECL(aic3x_lagc_attack_enum, LAGC_CTRL_A, 2,
+ aic3x_agc_attack);
+static SOC_ENUM_SINGLE_DECL(aic3x_ragc_attack_enum, RAGC_CTRL_A, 2,
+ aic3x_agc_attack);
+
+static const char * const aic3x_agc_decay[] = {
+ "100ms", "200ms", "400ms", "500ms" };
+static SOC_ENUM_SINGLE_DECL(aic3x_lagc_decay_enum, LAGC_CTRL_A, 0,
+ aic3x_agc_decay);
+static SOC_ENUM_SINGLE_DECL(aic3x_ragc_decay_enum, RAGC_CTRL_A, 0,
+ aic3x_agc_decay);
+
+static const char * const aic3x_poweron_time[] = {
+ "0us", "10us", "100us", "1ms", "10ms", "50ms",
+ "100ms", "200ms", "400ms", "800ms", "2s", "4s" };
+static SOC_ENUM_SINGLE_DECL(aic3x_poweron_time_enum, HPOUT_POP_REDUCTION, 4,
+ aic3x_poweron_time);
+
+static const char * const aic3x_rampup_step[] = { "0ms", "1ms", "2ms", "4ms" };
+static SOC_ENUM_SINGLE_DECL(aic3x_rampup_step_enum, HPOUT_POP_REDUCTION, 2,
+ aic3x_rampup_step);
/*
* DAC digital volumes. From -63.5 to 0 dB in 0.5 dB steps
@@ -383,12 +402,12 @@ static const struct snd_kcontrol_new aic3x_snd_controls[] = {
* adjust PGA to max value when ADC is on and will never go back.
*/
SOC_DOUBLE_R("AGC Switch", LAGC_CTRL_A, RAGC_CTRL_A, 7, 0x01, 0),
- SOC_ENUM("Left AGC Target level", aic3x_agc_level_enum[0]),
- SOC_ENUM("Right AGC Target level", aic3x_agc_level_enum[1]),
- SOC_ENUM("Left AGC Attack time", aic3x_agc_attack_enum[0]),
- SOC_ENUM("Right AGC Attack time", aic3x_agc_attack_enum[1]),
- SOC_ENUM("Left AGC Decay time", aic3x_agc_decay_enum[0]),
- SOC_ENUM("Right AGC Decay time", aic3x_agc_decay_enum[1]),
+ SOC_ENUM("Left AGC Target level", aic3x_lagc_level_enum),
+ SOC_ENUM("Right AGC Target level", aic3x_ragc_level_enum),
+ SOC_ENUM("Left AGC Attack time", aic3x_lagc_attack_enum),
+ SOC_ENUM("Right AGC Attack time", aic3x_ragc_attack_enum),
+ SOC_ENUM("Left AGC Decay time", aic3x_lagc_decay_enum),
+ SOC_ENUM("Right AGC Decay time", aic3x_ragc_decay_enum),
/* De-emphasis */
SOC_DOUBLE("De-emphasis Switch", AIC3X_CODEC_DFILT_CTRL, 2, 0, 0x01, 0),
@@ -398,7 +417,11 @@ static const struct snd_kcontrol_new aic3x_snd_controls[] = {
0, 119, 0, adc_tlv),
SOC_DOUBLE_R("PGA Capture Switch", LADC_VOL, RADC_VOL, 7, 0x01, 1),
- SOC_ENUM("ADC HPF Cut-off", aic3x_enum[ADC_HPF_ENUM]),
+ SOC_ENUM("ADC HPF Cut-off", aic3x_adc_hpf_enum),
+
+ /* Pop reduction */
+ SOC_ENUM("Output Driver Power-On time", aic3x_poweron_time_enum),
+ SOC_ENUM("Output Driver Ramp-up step", aic3x_rampup_step_enum),
};
static const struct snd_kcontrol_new aic3x_mono_controls[] = {
@@ -425,19 +448,19 @@ static const struct snd_kcontrol_new aic3x_classd_amp_gain_ctrl =
/* Left DAC Mux */
static const struct snd_kcontrol_new aic3x_left_dac_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[LDAC_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_left_dac_enum);
/* Right DAC Mux */
static const struct snd_kcontrol_new aic3x_right_dac_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[RDAC_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_right_dac_enum);
/* Left HPCOM Mux */
static const struct snd_kcontrol_new aic3x_left_hpcom_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[LHPCOM_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_left_hpcom_enum);
/* Right HPCOM Mux */
static const struct snd_kcontrol_new aic3x_right_hpcom_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[RHPCOM_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_right_hpcom_enum);
/* Left Line Mixer */
static const struct snd_kcontrol_new aic3x_left_line_mixer_controls[] = {
@@ -529,23 +552,23 @@ static const struct snd_kcontrol_new aic3x_right_pga_mixer_controls[] = {
/* Left Line1 Mux */
static const struct snd_kcontrol_new aic3x_left_line1l_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[LINE1L_2_L_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_line1l_2_l_enum);
static const struct snd_kcontrol_new aic3x_right_line1l_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[LINE1L_2_R_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_line1l_2_r_enum);
/* Right Line1 Mux */
static const struct snd_kcontrol_new aic3x_right_line1r_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[LINE1R_2_R_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_line1r_2_r_enum);
static const struct snd_kcontrol_new aic3x_left_line1r_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[LINE1R_2_L_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_line1r_2_l_enum);
/* Left Line2 Mux */
static const struct snd_kcontrol_new aic3x_left_line2_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[LINE2L_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_line2l_2_ldac_enum);
/* Right Line2 Mux */
static const struct snd_kcontrol_new aic3x_right_line2_mux_controls =
-SOC_DAPM_ENUM("Route", aic3x_enum[LINE2R_ENUM]);
+SOC_DAPM_ENUM("Route", aic3x_line2r_2_rdac_enum);
static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
/* Left DAC to Left Outputs */
@@ -1009,6 +1032,25 @@ found:
return 0;
}
+static int aic3x_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
+ int delay = 0;
+
+ /* TDM slot selection only valid in DSP_A/_B mode */
+ if (aic3x->dai_fmt == SND_SOC_DAIFMT_DSP_A)
+ delay += (aic3x->tdm_delay + 1);
+ else if (aic3x->dai_fmt == SND_SOC_DAIFMT_DSP_B)
+ delay += aic3x->tdm_delay;
+
+ /* Configure data delay */
+ snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay);
+
+ return 0;
+}
+
static int aic3x_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
@@ -1048,7 +1090,6 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
struct snd_soc_codec *codec = codec_dai->codec;
struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
u8 iface_areg, iface_breg;
- int delay = 0;
iface_areg = snd_soc_read(codec, AIC3X_ASD_INTF_CTRLA) & 0x3f;
iface_breg = snd_soc_read(codec, AIC3X_ASD_INTF_CTRLB) & 0x3f;
@@ -1076,7 +1117,6 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
case (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF):
break;
case (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF):
- delay = 1;
case (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF):
iface_breg |= (0x01 << 6);
break;
@@ -1090,10 +1130,45 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
+ aic3x->dai_fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+
/* set iface */
snd_soc_write(codec, AIC3X_ASD_INTF_CTRLA, iface_areg);
snd_soc_write(codec, AIC3X_ASD_INTF_CTRLB, iface_breg);
- snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay);
+
+ return 0;
+}
+
+static int aic3x_set_dai_tdm_slot(struct snd_soc_dai *codec_dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
+ unsigned int lsb;
+
+ if (tx_mask != rx_mask) {
+ dev_err(codec->dev, "tx and rx masks must be symmetric\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(!tx_mask)) {
+ dev_err(codec->dev, "tx and rx masks need to be non 0\n");
+ return -EINVAL;
+ }
+
+ /* TDM based on DSP mode requires slots to be adjacent */
+ lsb = __ffs(tx_mask);
+ if ((lsb + 1) != __fls(tx_mask)) {
+ dev_err(codec->dev, "Invalid mask, slots must be adjacent\n");
+ return -EINVAL;
+ }
+
+ aic3x->tdm_delay = lsb * slot_width;
+
+ /* DOUT in high-impedance on inactive bit clocks */
+ snd_soc_update_bits(codec, AIC3X_ASD_INTF_CTRLA,
+ DOUT_TRISTATE, DOUT_TRISTATE);
return 0;
}
@@ -1212,9 +1287,11 @@ static int aic3x_set_bias_level(struct snd_soc_codec *codec,
static const struct snd_soc_dai_ops aic3x_dai_ops = {
.hw_params = aic3x_hw_params,
+ .prepare = aic3x_prepare,
.digital_mute = aic3x_mute,
.set_sysclk = aic3x_set_dai_sysclk,
.set_fmt = aic3x_set_dai_fmt,
+ .set_tdm_slot = aic3x_set_dai_tdm_slot,
};
static struct snd_soc_dai_driver aic3x_dai = {
@@ -1414,7 +1491,6 @@ static int aic3x_remove(struct snd_soc_codec *codec)
struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
int i;
- aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF);
list_del(&aic3x->list);
for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
regulator_unregister_notifier(aic3x->supplies[i].consumer,
diff --git a/sound/soc/codecs/tlv320aic3x.h b/sound/soc/codecs/tlv320aic3x.h
index e521ac3ddde8..89fa692df206 100644
--- a/sound/soc/codecs/tlv320aic3x.h
+++ b/sound/soc/codecs/tlv320aic3x.h
@@ -169,6 +169,7 @@
/* Audio serial data interface control register A bits */
#define BIT_CLK_MASTER 0x80
#define WORD_CLK_MASTER 0x40
+#define DOUT_TRISTATE 0x20
/* Codec Datapath setup register 7 */
#define FSREF_44100 (1 << 7)
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index e21ed934bdbf..0fe2ced5b09f 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -1436,8 +1436,6 @@ static int dac33_soc_remove(struct snd_soc_codec *codec)
{
struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
- dac33_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
if (dac33->irq >= 0) {
free_irq(dac33->irq, dac33->codec);
destroy_workqueue(dac33->dac33_wq);
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
new file mode 100644
index 000000000000..1d1205702d23
--- /dev/null
+++ b/sound/soc/codecs/ts3a227e.c
@@ -0,0 +1,314 @@
+/*
+ * TS3A227E Autonomous Audio Accessory Detection and Configuration Switch
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+
+struct ts3a227e {
+ struct regmap *regmap;
+ struct snd_soc_jack *jack;
+ bool plugged;
+ bool mic_present;
+ unsigned int buttons_held;
+};
+
+/* Button values to be reported on the jack */
+static const int ts3a227e_buttons[] = {
+ SND_JACK_BTN_0,
+ SND_JACK_BTN_1,
+ SND_JACK_BTN_2,
+ SND_JACK_BTN_3,
+};
+
+#define TS3A227E_NUM_BUTTONS 4
+#define TS3A227E_JACK_MASK (SND_JACK_HEADPHONE | \
+ SND_JACK_MICROPHONE | \
+ SND_JACK_BTN_0 | \
+ SND_JACK_BTN_1 | \
+ SND_JACK_BTN_2 | \
+ SND_JACK_BTN_3)
+
+/* TS3A227E registers */
+#define TS3A227E_REG_DEVICE_ID 0x00
+#define TS3A227E_REG_INTERRUPT 0x01
+#define TS3A227E_REG_KP_INTERRUPT 0x02
+#define TS3A227E_REG_INTERRUPT_DISABLE 0x03
+#define TS3A227E_REG_SETTING_1 0x04
+#define TS3A227E_REG_SETTING_2 0x05
+#define TS3A227E_REG_SETTING_3 0x06
+#define TS3A227E_REG_SWITCH_CONTROL_1 0x07
+#define TS3A227E_REG_SWITCH_CONTROL_2 0x08
+#define TS3A227E_REG_SWITCH_STATUS_1 0x09
+#define TS3A227E_REG_SWITCH_STATUS_2 0x0a
+#define TS3A227E_REG_ACCESSORY_STATUS 0x0b
+#define TS3A227E_REG_ADC_OUTPUT 0x0c
+#define TS3A227E_REG_KP_THRESHOLD_1 0x0d
+#define TS3A227E_REG_KP_THRESHOLD_2 0x0e
+#define TS3A227E_REG_KP_THRESHOLD_3 0x0f
+
+/* TS3A227E_REG_INTERRUPT 0x01 */
+#define INS_REM_EVENT 0x01
+#define DETECTION_COMPLETE_EVENT 0x02
+
+/* TS3A227E_REG_KP_INTERRUPT 0x02 */
+#define PRESS_MASK(idx) (0x01 << (2 * (idx)))
+#define RELEASE_MASK(idx) (0x02 << (2 * (idx)))
+
+/* TS3A227E_REG_INTERRUPT_DISABLE 0x03 */
+#define INS_REM_INT_DISABLE 0x01
+#define DETECTION_COMPLETE_INT_DISABLE 0x02
+#define ADC_COMPLETE_INT_DISABLE 0x04
+#define INTB_DISABLE 0x08
+
+/* TS3A227E_REG_SETTING_2 0x05 */
+#define KP_ENABLE 0x04
+
+/* TS3A227E_REG_ACCESSORY_STATUS 0x0b */
+#define TYPE_3_POLE 0x01
+#define TYPE_4_POLE_OMTP 0x02
+#define TYPE_4_POLE_STANDARD 0x04
+#define JACK_INSERTED 0x08
+#define EITHER_MIC_MASK (TYPE_4_POLE_OMTP | TYPE_4_POLE_STANDARD)
+
+static const struct reg_default ts3a227e_reg_defaults[] = {
+ { TS3A227E_REG_DEVICE_ID, 0x10 },
+ { TS3A227E_REG_INTERRUPT, 0x00 },
+ { TS3A227E_REG_KP_INTERRUPT, 0x00 },
+ { TS3A227E_REG_INTERRUPT_DISABLE, 0x08 },
+ { TS3A227E_REG_SETTING_1, 0x23 },
+ { TS3A227E_REG_SETTING_2, 0x00 },
+ { TS3A227E_REG_SETTING_3, 0x0e },
+ { TS3A227E_REG_SWITCH_CONTROL_1, 0x00 },
+ { TS3A227E_REG_SWITCH_CONTROL_2, 0x00 },
+ { TS3A227E_REG_SWITCH_STATUS_1, 0x0c },
+ { TS3A227E_REG_SWITCH_STATUS_2, 0x00 },
+ { TS3A227E_REG_ACCESSORY_STATUS, 0x00 },
+ { TS3A227E_REG_ADC_OUTPUT, 0x00 },
+ { TS3A227E_REG_KP_THRESHOLD_1, 0x20 },
+ { TS3A227E_REG_KP_THRESHOLD_2, 0x40 },
+ { TS3A227E_REG_KP_THRESHOLD_3, 0x68 },
+};
+
+static bool ts3a227e_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TS3A227E_REG_DEVICE_ID ... TS3A227E_REG_KP_THRESHOLD_3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ts3a227e_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TS3A227E_REG_INTERRUPT_DISABLE ... TS3A227E_REG_SWITCH_CONTROL_2:
+ case TS3A227E_REG_KP_THRESHOLD_1 ... TS3A227E_REG_KP_THRESHOLD_3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ts3a227e_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TS3A227E_REG_INTERRUPT ... TS3A227E_REG_INTERRUPT_DISABLE:
+ case TS3A227E_REG_SETTING_2:
+ case TS3A227E_REG_SWITCH_STATUS_1 ... TS3A227E_REG_ADC_OUTPUT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ts3a227e_jack_report(struct ts3a227e *ts3a227e)
+{
+ unsigned int i;
+ int report = 0;
+
+ if (!ts3a227e->jack)
+ return;
+
+ if (ts3a227e->plugged)
+ report = SND_JACK_HEADPHONE;
+ if (ts3a227e->mic_present)
+ report |= SND_JACK_MICROPHONE;
+ for (i = 0; i < TS3A227E_NUM_BUTTONS; i++) {
+ if (ts3a227e->buttons_held & (1 << i))
+ report |= ts3a227e_buttons[i];
+ }
+ snd_soc_jack_report(ts3a227e->jack, report, TS3A227E_JACK_MASK);
+}
+
+static void ts3a227e_new_jack_state(struct ts3a227e *ts3a227e, unsigned acc_reg)
+{
+ bool plugged, mic_present;
+
+ plugged = !!(acc_reg & JACK_INSERTED);
+ mic_present = plugged && !!(acc_reg & EITHER_MIC_MASK);
+
+ ts3a227e->plugged = plugged;
+
+ if (mic_present != ts3a227e->mic_present) {
+ ts3a227e->mic_present = mic_present;
+ ts3a227e->buttons_held = 0;
+ if (mic_present) {
+ /* Enable key press detection. */
+ regmap_update_bits(ts3a227e->regmap,
+ TS3A227E_REG_SETTING_2,
+ KP_ENABLE, KP_ENABLE);
+ }
+ }
+}
+
+static irqreturn_t ts3a227e_interrupt(int irq, void *data)
+{
+ struct ts3a227e *ts3a227e = (struct ts3a227e *)data;
+ struct regmap *regmap = ts3a227e->regmap;
+ unsigned int int_reg, kp_int_reg, acc_reg, i;
+
+ /* Check for plug/unplug. */
+ regmap_read(regmap, TS3A227E_REG_INTERRUPT, &int_reg);
+ if (int_reg & (DETECTION_COMPLETE_EVENT | INS_REM_EVENT)) {
+ regmap_read(regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
+ ts3a227e_new_jack_state(ts3a227e, acc_reg);
+ }
+
+ /* Report any key events. */
+ regmap_read(regmap, TS3A227E_REG_KP_INTERRUPT, &kp_int_reg);
+ for (i = 0; i < TS3A227E_NUM_BUTTONS; i++) {
+ if (kp_int_reg & PRESS_MASK(i))
+ ts3a227e->buttons_held |= (1 << i);
+ if (kp_int_reg & RELEASE_MASK(i))
+ ts3a227e->buttons_held &= ~(1 << i);
+ }
+
+ ts3a227e_jack_report(ts3a227e);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ts3a227e_enable_jack_detect - Specify a jack for event reporting
+ *
+ * @component: component to register the jack with
+ * @jack: jack to use to report headset and button events on
+ *
+ * After this function has been called the headset insert/remove and button
+ * events 0-3 will be routed to the given jack. Jack can be null to stop
+ * reporting.
+ */
+int ts3a227e_enable_jack_detect(struct snd_soc_component *component,
+ struct snd_soc_jack *jack)
+{
+ struct ts3a227e *ts3a227e = snd_soc_component_get_drvdata(component);
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ ts3a227e->jack = jack;
+ ts3a227e_jack_report(ts3a227e);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ts3a227e_enable_jack_detect);
+
+static struct snd_soc_component_driver ts3a227e_soc_driver;
+
+static const struct regmap_config ts3a227e_regmap_config = {
+ .val_bits = 8,
+ .reg_bits = 8,
+
+ .max_register = TS3A227E_REG_KP_THRESHOLD_3,
+ .readable_reg = ts3a227e_readable_reg,
+ .writeable_reg = ts3a227e_writeable_reg,
+ .volatile_reg = ts3a227e_volatile_reg,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = ts3a227e_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(ts3a227e_reg_defaults),
+};
+
+static int ts3a227e_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct ts3a227e *ts3a227e;
+ struct device *dev = &i2c->dev;
+ int ret;
+
+ ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);
+ if (ts3a227e == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, ts3a227e);
+
+ ts3a227e->regmap = devm_regmap_init_i2c(i2c, &ts3a227e_regmap_config);
+ if (IS_ERR(ts3a227e->regmap))
+ return PTR_ERR(ts3a227e->regmap);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL, ts3a227e_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "TS3A227E", ts3a227e);
+ if (ret) {
+ dev_err(dev, "Cannot request irq %d (%d)\n", i2c->irq, ret);
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_component(&i2c->dev, &ts3a227e_soc_driver,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ /* Enable interrupts except for ADC complete. */
+ regmap_update_bits(ts3a227e->regmap, TS3A227E_REG_INTERRUPT_DISABLE,
+ INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,
+ ADC_COMPLETE_INT_DISABLE);
+
+ return 0;
+}
+
+static const struct i2c_device_id ts3a227e_i2c_ids[] = {
+ { "ts3a227e", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ts3a227e_i2c_ids);
+
+static const struct of_device_id ts3a227e_of_match[] = {
+ { .compatible = "ti,ts3a227e", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ts3a227e_of_match);
+
+static struct i2c_driver ts3a227e_driver = {
+ .driver = {
+ .name = "ts3a227e",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ts3a227e_of_match),
+ },
+ .probe = ts3a227e_i2c_probe,
+ .id_table = ts3a227e_i2c_ids,
+};
+module_i2c_driver(ts3a227e_driver);
+
+MODULE_DESCRIPTION("ASoC ts3a227e driver");
+MODULE_AUTHOR("Dylan Reid <dgreid@chromium.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/ts3a227e.h b/sound/soc/codecs/ts3a227e.h
new file mode 100644
index 000000000000..e2acf9c5bebe
--- /dev/null
+++ b/sound/soc/codecs/ts3a227e.h
@@ -0,0 +1,17 @@
+/*
+ * TS3A227E Autonous Audio Accessory Detection and Configureation Switch
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _TS3A227E_H
+#define _TS3A227E_H
+
+int ts3a227e_enable_jack_detect(struct snd_soc_component *component,
+ struct snd_soc_jack *jack);
+
+#endif
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index b6b0cb399599..44af3188afb9 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -2177,8 +2177,6 @@ static int twl4030_soc_remove(struct snd_soc_codec *codec)
struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
struct twl4030_codec_data *pdata = twl4030->pdata;
- twl4030_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
if (pdata && pdata->hs_extmute && gpio_is_valid(pdata->hs_extmute_gpio))
gpio_free(pdata->hs_extmute_gpio);
@@ -2220,7 +2218,6 @@ static struct platform_driver twl4030_codec_driver = {
.remove = twl4030_codec_remove,
.driver = {
.name = "twl4030-codec",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 0f6067f04e29..90f47f988b3f 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -1095,25 +1095,6 @@ static struct snd_soc_dai_driver twl6040_dai[] = {
},
};
-#ifdef CONFIG_PM
-static int twl6040_suspend(struct snd_soc_codec *codec)
-{
- twl6040_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int twl6040_resume(struct snd_soc_codec *codec)
-{
- twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define twl6040_suspend NULL
-#define twl6040_resume NULL
-#endif
-
static int twl6040_probe(struct snd_soc_codec *codec)
{
struct twl6040_data *priv;
@@ -1160,7 +1141,6 @@ static int twl6040_remove(struct snd_soc_codec *codec)
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
free_irq(priv->plug_irq, codec);
- twl6040_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -1168,11 +1148,10 @@ static int twl6040_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_twl6040 = {
.probe = twl6040_probe,
.remove = twl6040_remove,
- .suspend = twl6040_suspend,
- .resume = twl6040_resume,
.read = twl6040_read,
.write = twl6040_write,
.set_bias_level = twl6040_set_bias_level,
+ .suspend_bias_off = true,
.ignore_pmdown_time = true,
.controls = twl6040_snd_controls,
@@ -1198,7 +1177,6 @@ static int twl6040_codec_remove(struct platform_device *pdev)
static struct platform_driver twl6040_codec_driver = {
.driver = {
.name = "twl6040-codec",
- .owner = THIS_MODULE,
},
.probe = twl6040_codec_probe,
.remove = twl6040_codec_remove,
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 32b2f78aa62c..f883308c00de 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -518,11 +518,6 @@ static int uda134x_soc_probe(struct snd_soc_codec *codec)
uda134x_reset(codec);
- if (pd->is_powered_on_standby)
- uda134x_set_bias_level(codec, SND_SOC_BIAS_ON);
- else
- uda134x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
if (pd->model == UDA134X_UDA1341) {
widgets = uda1341_dapm_widgets;
num_widgets = ARRAY_SIZE(uda1341_dapm_widgets);
@@ -574,44 +569,21 @@ static int uda134x_soc_remove(struct snd_soc_codec *codec)
{
struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
- uda134x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- uda134x_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
kfree(uda134x);
return 0;
}
-#if defined(CONFIG_PM)
-static int uda134x_soc_suspend(struct snd_soc_codec *codec)
-{
- uda134x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- uda134x_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int uda134x_soc_resume(struct snd_soc_codec *codec)
-{
- uda134x_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
- uda134x_set_bias_level(codec, SND_SOC_BIAS_ON);
- return 0;
-}
-#else
-#define uda134x_soc_suspend NULL
-#define uda134x_soc_resume NULL
-#endif /* CONFIG_PM */
-
static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
.probe = uda134x_soc_probe,
.remove = uda134x_soc_remove,
- .suspend = uda134x_soc_suspend,
- .resume = uda134x_soc_resume,
.reg_cache_size = sizeof(uda134x_reg),
.reg_word_size = sizeof(u8),
.reg_cache_default = uda134x_reg,
.reg_cache_step = 1,
.read = uda134x_read_reg_cache,
- .write = uda134x_write,
.set_bias_level = uda134x_set_bias_level,
+ .suspend_bias_off = true,
+
.dapm_widgets = uda134x_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(uda134x_dapm_widgets),
.dapm_routes = uda134x_dapm_routes,
@@ -633,7 +605,6 @@ static int uda134x_codec_remove(struct platform_device *pdev)
static struct platform_driver uda134x_codec_driver = {
.driver = {
.name = "uda134x-codec",
- .owner = THIS_MODULE,
},
.probe = uda134x_codec_probe,
.remove = uda134x_codec_remove,
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index e62e70781ec2..dc7778b6dd7f 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -693,18 +693,6 @@ static struct snd_soc_dai_driver uda1380_dai[] = {
},
};
-static int uda1380_suspend(struct snd_soc_codec *codec)
-{
- uda1380_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int uda1380_resume(struct snd_soc_codec *codec)
-{
- uda1380_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int uda1380_probe(struct snd_soc_codec *codec)
{
struct uda1380_platform_data *pdata =codec->dev->platform_data;
@@ -739,8 +727,6 @@ static int uda1380_probe(struct snd_soc_codec *codec)
INIT_WORK(&uda1380->work, uda1380_flush_work);
- /* power on device */
- uda1380_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* set clock input */
switch (pdata->dac_clk) {
case UDA1380_DAC_CLK_SYSCLK:
@@ -766,8 +752,6 @@ static int uda1380_remove(struct snd_soc_codec *codec)
{
struct uda1380_platform_data *pdata =codec->dev->platform_data;
- uda1380_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
gpio_free(pdata->gpio_reset);
gpio_free(pdata->gpio_power);
@@ -777,11 +761,11 @@ static int uda1380_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
.probe = uda1380_probe,
.remove = uda1380_remove,
- .suspend = uda1380_suspend,
- .resume = uda1380_resume,
.read = uda1380_read_reg_cache,
.write = uda1380_write,
.set_bias_level = uda1380_set_bias_level,
+ .suspend_bias_off = true,
+
.reg_cache_size = ARRAY_SIZE(uda1380_reg),
.reg_word_size = sizeof(u16),
.reg_cache_default = uda1380_reg,
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index f3d4e88d0b7b..80fb1dc81f6c 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -452,7 +452,6 @@ static int wl1273_probe(struct snd_soc_codec *codec)
{
struct wl1273_core **core = codec->dev->platform_data;
struct wl1273_priv *wl1273;
- int r;
dev_dbg(codec->dev, "%s.\n", __func__);
@@ -470,12 +469,7 @@ static int wl1273_probe(struct snd_soc_codec *codec)
snd_soc_codec_set_drvdata(codec, wl1273);
- r = snd_soc_add_codec_controls(codec, wl1273_controls,
- ARRAY_SIZE(wl1273_controls));
- if (r)
- kfree(wl1273);
-
- return r;
+ return 0;
}
static int wl1273_remove(struct snd_soc_codec *codec)
@@ -492,6 +486,8 @@ static struct snd_soc_codec_driver soc_codec_dev_wl1273 = {
.probe = wl1273_probe,
.remove = wl1273_remove,
+ .controls = wl1273_controls,
+ .num_controls = ARRAY_SIZE(wl1273_controls),
.dapm_widgets = wl1273_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wl1273_dapm_widgets),
.dapm_routes = wl1273_dapm_routes,
@@ -515,7 +511,6 @@ MODULE_ALIAS("platform:wl1273-codec");
static struct platform_driver wl1273_platform_driver = {
.driver = {
.name = "wl1273-codec",
- .owner = THIS_MODULE,
},
.probe = wl1273_platform_probe,
.remove = wl1273_platform_remove,
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index f60234962527..f439ae052128 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -619,10 +619,10 @@ static int wm5102_out_comp_coeff_get(struct snd_kcontrol *kcontrol,
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
uint16_t data;
- mutex_lock(&codec->mutex);
+ mutex_lock(&arizona->dac_comp_lock);
data = cpu_to_be16(arizona->dac_comp_coeff);
memcpy(ucontrol->value.bytes.data, &data, sizeof(data));
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&arizona->dac_comp_lock);
return 0;
}
@@ -633,11 +633,11 @@ static int wm5102_out_comp_coeff_put(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
- mutex_lock(&codec->mutex);
+ mutex_lock(&arizona->dac_comp_lock);
memcpy(&arizona->dac_comp_coeff, ucontrol->value.bytes.data,
sizeof(arizona->dac_comp_coeff));
arizona->dac_comp_coeff = be16_to_cpu(arizona->dac_comp_coeff);
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&arizona->dac_comp_lock);
return 0;
}
@@ -648,9 +648,9 @@ static int wm5102_out_comp_switch_get(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
- mutex_lock(&codec->mutex);
+ mutex_lock(&arizona->dac_comp_lock);
ucontrol->value.integer.value[0] = arizona->dac_comp_enabled;
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&arizona->dac_comp_lock);
return 0;
}
@@ -661,9 +661,9 @@ static int wm5102_out_comp_switch_put(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
- mutex_lock(&codec->mutex);
+ mutex_lock(&arizona->dac_comp_lock);
arizona->dac_comp_enabled = ucontrol->value.integer.value[0];
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&arizona->dac_comp_lock);
return 0;
}
@@ -1900,6 +1900,8 @@ static int wm5102_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, wm5102);
+ mutex_init(&arizona->dac_comp_lock);
+
wm5102->core.arizona = arizona;
wm5102->core.num_inputs = 6;
@@ -1958,7 +1960,6 @@ static int wm5102_remove(struct platform_device *pdev)
static struct platform_driver wm5102_codec_driver = {
.driver = {
.name = "wm5102-codec",
- .owner = THIS_MODULE,
},
.probe = wm5102_probe,
.remove = wm5102_remove,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 2f2ec26d831c..4456b38a3ef5 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1738,7 +1738,6 @@ static int wm5110_remove(struct platform_device *pdev)
static struct platform_driver wm5110_codec_driver = {
.driver = {
.name = "wm5110-codec",
- .owner = THIS_MODULE,
},
.probe = wm5110_probe,
.remove = wm5110_remove,
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 628ec774cf22..574579b98872 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1242,19 +1242,6 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-static int wm8350_suspend(struct snd_soc_codec *codec)
-{
- wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8350_resume(struct snd_soc_codec *codec)
-{
- wm8350_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
static void wm8350_hp_work(struct wm8350_data *priv,
struct wm8350_jack_data *jack,
u16 mask)
@@ -1565,9 +1552,6 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICD,
wm8350_mic_handler, 0, "Microphone detect", priv);
-
- wm8350_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
}
@@ -1596,8 +1580,6 @@ static int wm8350_codec_remove(struct snd_soc_codec *codec)
* wait for its completion */
flush_delayed_work(&codec->dapm.delayed_work);
- wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
return 0;
@@ -1613,10 +1595,9 @@ static struct regmap *wm8350_get_regmap(struct device *dev)
static struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
.probe = wm8350_codec_probe,
.remove = wm8350_codec_remove,
- .suspend = wm8350_suspend,
- .resume = wm8350_resume,
.get_regmap = wm8350_get_regmap,
.set_bias_level = wm8350_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8350_snd_controls,
.num_controls = ARRAY_SIZE(wm8350_snd_controls),
@@ -1641,7 +1622,6 @@ static int wm8350_remove(struct platform_device *pdev)
static struct platform_driver wm8350_codec_driver = {
.driver = {
.name = "wm8350-codec",
- .owner = THIS_MODULE,
},
.probe = wm8350_probe,
.remove = wm8350_remove,
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 72471bef2e9a..8ee446987aa9 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -58,12 +58,10 @@ static struct regulator_bulk_data power[] = {
/* codec private data */
struct wm8400_priv {
- struct snd_soc_codec *codec;
struct wm8400 *wm8400;
u16 fake_register;
unsigned int sysclk;
unsigned int pcmclk;
- struct work_struct work;
int fll_in, fll_out;
};
@@ -1278,30 +1276,6 @@ static struct snd_soc_dai_driver wm8400_dai = {
.ops = &wm8400_dai_ops,
};
-static int wm8400_suspend(struct snd_soc_codec *codec)
-{
- wm8400_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int wm8400_resume(struct snd_soc_codec *codec)
-{
- wm8400_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static void wm8400_probe_deferred(struct work_struct *work)
-{
- struct wm8400_priv *priv = container_of(work, struct wm8400_priv,
- work);
- struct snd_soc_codec *codec = priv->codec;
-
- /* charge output caps */
- wm8400_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-}
-
static int wm8400_codec_probe(struct snd_soc_codec *codec)
{
struct wm8400 *wm8400 = dev_get_platdata(codec->dev);
@@ -1316,7 +1290,6 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
snd_soc_codec_set_drvdata(codec, priv);
priv->wm8400 = wm8400;
- priv->codec = codec;
ret = devm_regulator_bulk_get(wm8400->dev,
ARRAY_SIZE(power), &power[0]);
@@ -1325,8 +1298,6 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
return ret;
}
- INIT_WORK(&priv->work, wm8400_probe_deferred);
-
wm8400_codec_reset(codec);
reg = snd_soc_read(codec, WM8400_POWER_MANAGEMENT_1);
@@ -1343,8 +1314,6 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, WM8400_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8));
snd_soc_write(codec, WM8400_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8));
- if (!schedule_work(&priv->work))
- return -EINVAL;
return 0;
}
@@ -1369,10 +1338,9 @@ static struct regmap *wm8400_get_regmap(struct device *dev)
static struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
.probe = wm8400_codec_probe,
.remove = wm8400_codec_remove,
- .suspend = wm8400_suspend,
- .resume = wm8400_resume,
.get_regmap = wm8400_get_regmap,
.set_bias_level = wm8400_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8400_snd_controls,
.num_controls = ARRAY_SIZE(wm8400_snd_controls),
@@ -1397,7 +1365,6 @@ static int wm8400_remove(struct platform_device *pdev)
static struct platform_driver wm8400_codec_driver = {
.driver = {
.name = "wm8400-codec",
- .owner = THIS_MODULE,
},
.probe = wm8400_probe,
.remove = wm8400_remove,
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index e11127f9069e..8736ad094b24 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -575,41 +575,17 @@ static struct snd_soc_dai_driver wm8510_dai = {
.symmetric_rates = 1,
};
-static int wm8510_suspend(struct snd_soc_codec *codec)
-{
- wm8510_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8510_resume(struct snd_soc_codec *codec)
-{
- wm8510_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int wm8510_probe(struct snd_soc_codec *codec)
{
wm8510_reset(codec);
- /* power on device */
- wm8510_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-/* power down chip */
-static int wm8510_remove(struct snd_soc_codec *codec)
-{
- wm8510_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8510 = {
.probe = wm8510_probe,
- .remove = wm8510_remove,
- .suspend = wm8510_suspend,
- .resume = wm8510_resume,
.set_bias_level = wm8510_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8510_snd_controls,
.num_controls = ARRAY_SIZE(wm8510_snd_controls),
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index ec1f5740dbd0..b1cc94f5fc4b 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -372,23 +372,6 @@ static struct snd_soc_dai_driver wm8523_dai = {
.ops = &wm8523_dai_ops,
};
-#ifdef CONFIG_PM
-static int wm8523_suspend(struct snd_soc_codec *codec)
-{
- wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8523_resume(struct snd_soc_codec *codec)
-{
- wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define wm8523_suspend NULL
-#define wm8523_resume NULL
-#endif
-
static int wm8523_probe(struct snd_soc_codec *codec)
{
struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec);
@@ -402,23 +385,13 @@ static int wm8523_probe(struct snd_soc_codec *codec)
WM8523_DACR_VU, WM8523_DACR_VU);
snd_soc_update_bits(codec, WM8523_DAC_CTRL3, WM8523_ZC, WM8523_ZC);
- wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int wm8523_remove(struct snd_soc_codec *codec)
-{
- wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8523 = {
.probe = wm8523_probe,
- .remove = wm8523_remove,
- .suspend = wm8523_suspend,
- .resume = wm8523_resume,
.set_bias_level = wm8523_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8523_controls,
.num_controls = ARRAY_SIZE(wm8523_controls),
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 911605ee25b0..0a887c5ec83a 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -882,8 +882,6 @@ static int wm8580_probe(struct snd_soc_codec *codec)
goto err_regulator_enable;
}
- wm8580_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
err_regulator_enable:
@@ -897,8 +895,6 @@ static int wm8580_remove(struct snd_soc_codec *codec)
{
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
- wm8580_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
return 0;
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index 32187e739b4f..121e46d53779 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -350,19 +350,6 @@ static struct snd_soc_dai_driver wm8711_dai = {
.ops = &wm8711_ops,
};
-static int wm8711_suspend(struct snd_soc_codec *codec)
-{
- snd_soc_write(codec, WM8711_ACTIVE, 0x0);
- wm8711_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8711_resume(struct snd_soc_codec *codec)
-{
- wm8711_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int wm8711_probe(struct snd_soc_codec *codec)
{
int ret;
@@ -373,8 +360,6 @@ static int wm8711_probe(struct snd_soc_codec *codec)
return ret;
}
- wm8711_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
/* Latch the update bits */
snd_soc_update_bits(codec, WM8711_LOUT1V, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8711_ROUT1V, 0x0100, 0x0100);
@@ -383,19 +368,11 @@ static int wm8711_probe(struct snd_soc_codec *codec)
}
-/* power down chip */
-static int wm8711_remove(struct snd_soc_codec *codec)
-{
- wm8711_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8711 = {
.probe = wm8711_probe,
- .remove = wm8711_remove,
- .suspend = wm8711_suspend,
- .resume = wm8711_resume,
.set_bias_level = wm8711_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = wm8711_snd_controls,
.num_controls = ARRAY_SIZE(wm8711_snd_controls),
.dapm_widgets = wm8711_dapm_widgets,
diff --git a/sound/soc/codecs/wm8727.c b/sound/soc/codecs/wm8727.c
index 7b1a6d5c11c6..bb25a75f92a2 100644
--- a/sound/soc/codecs/wm8727.c
+++ b/sound/soc/codecs/wm8727.c
@@ -75,7 +75,6 @@ static int wm8727_remove(struct platform_device *pdev)
static struct platform_driver wm8727_codec_driver = {
.driver = {
.name = "wm8727",
- .owner = THIS_MODULE,
},
.probe = wm8727_probe,
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index 38ff826f589a..55c7fb4fc786 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -212,40 +212,10 @@ static struct snd_soc_dai_driver wm8728_dai = {
.ops = &wm8728_dai_ops,
};
-static int wm8728_suspend(struct snd_soc_codec *codec)
-{
- wm8728_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int wm8728_resume(struct snd_soc_codec *codec)
-{
- wm8728_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int wm8728_probe(struct snd_soc_codec *codec)
-{
- /* power on device */
- wm8728_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int wm8728_remove(struct snd_soc_codec *codec)
-{
- wm8728_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8728 = {
- .probe = wm8728_probe,
- .remove = wm8728_remove,
- .suspend = wm8728_suspend,
- .resume = wm8728_resume,
.set_bias_level = wm8728_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = wm8728_snd_controls,
.num_controls = ARRAY_SIZE(wm8728_snd_controls),
.dapm_widgets = wm8728_dapm_widgets,
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index eebb3280bfad..b9211b42f6e9 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -24,6 +24,7 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <linux/of_device.h>
+#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -50,6 +51,8 @@ struct wm8731_priv {
int sysclk_type;
int playback_fs;
bool deemph;
+
+ struct mutex lock;
};
@@ -138,7 +141,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
if (deemph > 1)
return -EINVAL;
- mutex_lock(&codec->mutex);
+ mutex_lock(&wm8731->lock);
if (wm8731->deemph != deemph) {
wm8731->deemph = deemph;
@@ -146,7 +149,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
ret = 1;
}
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&wm8731->lock);
return ret;
}
@@ -559,25 +562,6 @@ static struct snd_soc_dai_driver wm8731_dai = {
.symmetric_rates = 1,
};
-#ifdef CONFIG_PM
-static int wm8731_suspend(struct snd_soc_codec *codec)
-{
- wm8731_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int wm8731_resume(struct snd_soc_codec *codec)
-{
- wm8731_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define wm8731_suspend NULL
-#define wm8731_resume NULL
-#endif
-
static int wm8731_probe(struct snd_soc_codec *codec)
{
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
@@ -633,8 +617,6 @@ static int wm8731_remove(struct snd_soc_codec *codec)
{
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
- wm8731_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
return 0;
@@ -643,9 +625,9 @@ static int wm8731_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_wm8731 = {
.probe = wm8731_probe,
.remove = wm8731_remove,
- .suspend = wm8731_suspend,
- .resume = wm8731_resume,
.set_bias_level = wm8731_set_bias_level,
+ .suspend_bias_off = true,
+
.dapm_widgets = wm8731_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8731_dapm_widgets),
.dapm_routes = wm8731_intercon,
@@ -680,11 +662,12 @@ static int wm8731_spi_probe(struct spi_device *spi)
struct wm8731_priv *wm8731;
int ret;
- wm8731 = devm_kzalloc(&spi->dev, sizeof(struct wm8731_priv),
- GFP_KERNEL);
+ wm8731 = devm_kzalloc(&spi->dev, sizeof(*wm8731), GFP_KERNEL);
if (wm8731 == NULL)
return -ENOMEM;
+ mutex_init(&wm8731->lock);
+
wm8731->regmap = devm_regmap_init_spi(spi, &wm8731_regmap);
if (IS_ERR(wm8731->regmap)) {
ret = PTR_ERR(wm8731->regmap);
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index 744a422ecb05..ada9ac1ba2c6 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -277,17 +277,6 @@ static const struct snd_soc_dapm_route intercon[] = {
{ "AIF", NULL, "ADCR" },
};
-static int wm8737_add_widgets(struct snd_soc_codec *codec)
-{
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- snd_soc_dapm_new_controls(dapm, wm8737_dapm_widgets,
- ARRAY_SIZE(wm8737_dapm_widgets));
- snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
- return 0;
-}
-
/* codec mclk clock divider coefficients */
static const struct {
u32 mclk;
@@ -548,23 +537,6 @@ static struct snd_soc_dai_driver wm8737_dai = {
.ops = &wm8737_dai_ops,
};
-#ifdef CONFIG_PM
-static int wm8737_suspend(struct snd_soc_codec *codec)
-{
- wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8737_resume(struct snd_soc_codec *codec)
-{
- wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define wm8737_suspend NULL
-#define wm8737_resume NULL
-#endif
-
static int wm8737_probe(struct snd_soc_codec *codec)
{
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
@@ -593,10 +565,6 @@ static int wm8737_probe(struct snd_soc_codec *codec)
/* Bias level configuration will have done an extra enable */
regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
- snd_soc_add_codec_controls(codec, wm8737_snd_controls,
- ARRAY_SIZE(wm8737_snd_controls));
- wm8737_add_widgets(codec);
-
return 0;
err_enable:
@@ -605,18 +573,17 @@ err_get:
return ret;
}
-static int wm8737_remove(struct snd_soc_codec *codec)
-{
- wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8737 = {
.probe = wm8737_probe,
- .remove = wm8737_remove,
- .suspend = wm8737_suspend,
- .resume = wm8737_resume,
.set_bias_level = wm8737_set_bias_level,
+ .suspend_bias_off = true,
+
+ .controls = wm8737_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8737_snd_controls),
+ .dapm_widgets = wm8737_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8737_dapm_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
};
static const struct of_device_id wm8737_of_match[] = {
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 67653a2db223..f6847fdd6ddd 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -686,18 +686,6 @@ static struct snd_soc_dai_driver wm8750_dai = {
.ops = &wm8750_dai_ops,
};
-static int wm8750_suspend(struct snd_soc_codec *codec)
-{
- wm8750_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8750_resume(struct snd_soc_codec *codec)
-{
- wm8750_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int wm8750_probe(struct snd_soc_codec *codec)
{
int ret;
@@ -708,9 +696,6 @@ static int wm8750_probe(struct snd_soc_codec *codec)
return ret;
}
- /* charge output caps */
- wm8750_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
/* set the update bits */
snd_soc_update_bits(codec, WM8750_LDAC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8750_RDAC, 0x0100, 0x0100);
@@ -724,18 +709,10 @@ static int wm8750_probe(struct snd_soc_codec *codec)
return ret;
}
-static int wm8750_remove(struct snd_soc_codec *codec)
-{
- wm8750_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8750 = {
.probe = wm8750_probe,
- .remove = wm8750_remove,
- .suspend = wm8750_suspend,
- .resume = wm8750_resume,
.set_bias_level = wm8750_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8750_snd_controls,
.num_controls = ARRAY_SIZE(wm8750_snd_controls),
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 70952ceb278b..c13050b77931 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -408,24 +408,6 @@ static struct snd_soc_dai_driver wm8776_dai[] = {
},
};
-#ifdef CONFIG_PM
-static int wm8776_suspend(struct snd_soc_codec *codec)
-{
- wm8776_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int wm8776_resume(struct snd_soc_codec *codec)
-{
- wm8776_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define wm8776_suspend NULL
-#define wm8776_resume NULL
-#endif
-
static int wm8776_probe(struct snd_soc_codec *codec)
{
int ret = 0;
@@ -436,8 +418,6 @@ static int wm8776_probe(struct snd_soc_codec *codec)
return ret;
}
- wm8776_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
/* Latch the update bits; right channel only since we always
* update both. */
snd_soc_update_bits(codec, WM8776_HPRVOL, 0x100, 0x100);
@@ -446,19 +426,10 @@ static int wm8776_probe(struct snd_soc_codec *codec)
return ret;
}
-/* power down chip */
-static int wm8776_remove(struct snd_soc_codec *codec)
-{
- wm8776_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8776 = {
.probe = wm8776_probe,
- .remove = wm8776_remove,
- .suspend = wm8776_suspend,
- .resume = wm8776_resume,
.set_bias_level = wm8776_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8776_snd_controls,
.num_controls = ARRAY_SIZE(wm8776_snd_controls),
diff --git a/sound/soc/codecs/wm8782.c b/sound/soc/codecs/wm8782.c
index 8092495605ce..fb55fd845d27 100644
--- a/sound/soc/codecs/wm8782.c
+++ b/sound/soc/codecs/wm8782.c
@@ -72,7 +72,6 @@ static int wm8782_remove(struct platform_device *pdev)
static struct platform_driver wm8782_codec_driver = {
.driver = {
.name = "wm8782",
- .owner = THIS_MODULE,
},
.probe = wm8782_probe,
.remove = wm8782_remove,
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 3addc5fe5cb2..1315f7642503 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -524,7 +524,6 @@ static int wm8804_remove(struct snd_soc_codec *codec)
int i;
wm8804 = snd_soc_codec_get_drvdata(codec);
- wm8804_set_bias_level(codec, SND_SOC_BIAS_OFF);
for (i = 0; i < ARRAY_SIZE(wm8804->supplies); ++i)
regulator_unregister_notifier(wm8804->supplies[i].consumer,
@@ -606,8 +605,6 @@ static int wm8804_probe(struct snd_soc_codec *codec)
goto err_reg_enable;
}
- wm8804_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
err_reg_enable:
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 44a5f1511f0f..3a0d4b7d692f 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1209,16 +1209,8 @@ static int wm8900_probe(struct snd_soc_codec *codec)
return 0;
}
-/* power down chip */
-static int wm8900_remove(struct snd_soc_codec *codec)
-{
- wm8900_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8900 = {
.probe = wm8900_probe,
- .remove = wm8900_remove,
.suspend = wm8900_suspend,
.resume = wm8900_resume,
.set_bias_level = wm8900_set_bias_level,
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index c038b3e04398..cc6b0ef98a34 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -26,6 +26,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/pcm.h>
@@ -117,12 +118,12 @@ static const struct reg_default wm8903_reg_defaults[] = {
struct wm8903_priv {
struct wm8903_platform_data *pdata;
struct device *dev;
- struct snd_soc_codec *codec;
struct regmap *regmap;
int sysclk;
int irq;
+ struct mutex lock;
int fs;
int deemph;
@@ -457,7 +458,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
if (deemph > 1)
return -EINVAL;
- mutex_lock(&codec->mutex);
+ mutex_lock(&wm8903->lock);
if (wm8903->deemph != deemph) {
wm8903->deemph = deemph;
@@ -465,7 +466,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
ret = 1;
}
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&wm8903->lock);
return ret;
}
@@ -1757,21 +1758,12 @@ static struct snd_soc_dai_driver wm8903_dai = {
.symmetric_rates = 1,
};
-static int wm8903_suspend(struct snd_soc_codec *codec)
-{
- wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
static int wm8903_resume(struct snd_soc_codec *codec)
{
struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
regcache_sync(wm8903->regmap);
- wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
}
@@ -1889,33 +1881,12 @@ static void wm8903_free_gpio(struct wm8903_priv *wm8903)
}
#endif
-static int wm8903_probe(struct snd_soc_codec *codec)
-{
- struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-
- wm8903->codec = codec;
-
- /* power on device */
- wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-/* power down chip */
-static int wm8903_remove(struct snd_soc_codec *codec)
-{
- wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8903 = {
- .probe = wm8903_probe,
- .remove = wm8903_remove,
- .suspend = wm8903_suspend,
.resume = wm8903_resume,
.set_bias_level = wm8903_set_bias_level,
.seq_notifier = wm8903_seq_notifier,
+ .suspend_bias_off = true,
+
.controls = wm8903_snd_controls,
.num_controls = ARRAY_SIZE(wm8903_snd_controls),
.dapm_widgets = wm8903_dapm_widgets,
@@ -2023,6 +1994,8 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
GFP_KERNEL);
if (wm8903 == NULL)
return -ENOMEM;
+
+ mutex_init(&wm8903->lock);
wm8903->dev = &i2c->dev;
wm8903->regmap = devm_regmap_init_i2c(i2c, &wm8903_regmap);
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 52011043e54c..e4142b4309eb 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -695,17 +695,6 @@ static struct snd_soc_dai_driver wm8940_dai = {
.symmetric_rates = 1,
};
-static int wm8940_suspend(struct snd_soc_codec *codec)
-{
- return wm8940_set_bias_level(codec, SND_SOC_BIAS_OFF);
-}
-
-static int wm8940_resume(struct snd_soc_codec *codec)
-{
- wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int wm8940_probe(struct snd_soc_codec *codec)
{
struct wm8940_setup_data *pdata = codec->dev->platform_data;
@@ -736,18 +725,11 @@ static int wm8940_probe(struct snd_soc_codec *codec)
return ret;
}
-static int wm8940_remove(struct snd_soc_codec *codec)
-{
- wm8940_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8940 = {
.probe = wm8940_probe,
- .remove = wm8940_remove,
- .suspend = wm8940_suspend,
- .resume = wm8940_resume,
.set_bias_level = wm8940_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = wm8940_snd_controls,
.num_controls = ARRAY_SIZE(wm8940_snd_controls),
.dapm_widgets = wm8940_dapm_widgets,
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 09d91d9dc4ee..1173f7fef5a7 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -866,29 +866,6 @@ static struct snd_soc_dai_driver wm8955_dai = {
.ops = &wm8955_dai_ops,
};
-#ifdef CONFIG_PM
-static int wm8955_suspend(struct snd_soc_codec *codec)
-{
- struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
-
- wm8955_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- regcache_mark_dirty(wm8955->regmap);
-
- return 0;
-}
-
-static int wm8955_resume(struct snd_soc_codec *codec)
-{
- wm8955_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define wm8955_suspend NULL
-#define wm8955_resume NULL
-#endif
-
static int wm8955_probe(struct snd_soc_codec *codec)
{
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
@@ -964,18 +941,10 @@ err_enable:
return ret;
}
-static int wm8955_remove(struct snd_soc_codec *codec)
-{
- wm8955_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8955 = {
.probe = wm8955_probe,
- .remove = wm8955_remove,
- .suspend = wm8955_suspend,
- .resume = wm8955_resume,
.set_bias_level = wm8955_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8955_snd_controls,
.num_controls = ARRAY_SIZE(wm8955_snd_controls),
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 0dada7f0105e..3cbc82b33292 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -867,9 +867,9 @@ static void wm8958_enh_eq_loaded(const struct firmware *fw, void *context)
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (fw && (wm8958_dsp2_fw(codec, "ENH_EQ", fw, true) == 0)) {
- mutex_lock(&codec->mutex);
+ mutex_lock(&wm8994->fw_lock);
wm8994->enh_eq = fw;
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&wm8994->fw_lock);
}
}
@@ -879,9 +879,9 @@ static void wm8958_mbc_vss_loaded(const struct firmware *fw, void *context)
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (fw && (wm8958_dsp2_fw(codec, "MBC+VSS", fw, true) == 0)) {
- mutex_lock(&codec->mutex);
+ mutex_lock(&wm8994->fw_lock);
wm8994->mbc_vss = fw;
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&wm8994->fw_lock);
}
}
@@ -891,9 +891,9 @@ static void wm8958_mbc_loaded(const struct firmware *fw, void *context)
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
if (fw && (wm8958_dsp2_fw(codec, "MBC", fw, true) == 0)) {
- mutex_lock(&codec->mutex);
+ mutex_lock(&wm8994->fw_lock);
wm8994->mbc = fw;
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&wm8994->fw_lock);
}
}
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 4dc4e85116cd..031a1ae71d94 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -125,9 +125,10 @@ struct wm8960_priv {
struct snd_soc_dapm_widget *out3;
bool deemph;
int playback_fs;
+ struct wm8960_data pdata;
};
-#define wm8960_reset(c) snd_soc_write(c, WM8960_RESET, 0)
+#define wm8960_reset(c) regmap_write(c, WM8960_RESET, 0)
/* enumerated controls */
static const char *wm8960_polarity[] = {"No Inversion", "Left Inverted",
@@ -440,8 +441,8 @@ static const struct snd_soc_dapm_route audio_paths_capless[] = {
static int wm8960_add_widgets(struct snd_soc_codec *codec)
{
- struct wm8960_data *pdata = codec->dev->platform_data;
struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+ struct wm8960_data *pdata = &wm8960->pdata;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_dapm_widget *w;
@@ -942,56 +943,15 @@ static struct snd_soc_dai_driver wm8960_dai = {
.symmetric_rates = 1,
};
-static int wm8960_suspend(struct snd_soc_codec *codec)
-{
- struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
-
- wm8960->set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8960_resume(struct snd_soc_codec *codec)
-{
- struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
-
- wm8960->set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int wm8960_probe(struct snd_soc_codec *codec)
{
struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
- struct wm8960_data *pdata = dev_get_platdata(codec->dev);
- int ret;
-
- wm8960->set_bias_level = wm8960_set_bias_level_out3;
-
- if (!pdata) {
- dev_warn(codec->dev, "No platform data supplied\n");
- } else {
- if (pdata->capless)
- wm8960->set_bias_level = wm8960_set_bias_level_capless;
- }
-
- ret = wm8960_reset(codec);
- if (ret < 0) {
- dev_err(codec->dev, "Failed to issue reset\n");
- return ret;
- }
-
- wm8960->set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ struct wm8960_data *pdata = &wm8960->pdata;
- /* Latch the update bits */
- snd_soc_update_bits(codec, WM8960_LINVOL, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_RINVOL, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_LADC, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_RADC, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_LDAC, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_RDAC, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_LOUT1, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_ROUT1, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_LOUT2, 0x100, 0x100);
- snd_soc_update_bits(codec, WM8960_ROUT2, 0x100, 0x100);
+ if (pdata->capless)
+ wm8960->set_bias_level = wm8960_set_bias_level_capless;
+ else
+ wm8960->set_bias_level = wm8960_set_bias_level_out3;
snd_soc_add_codec_controls(codec, wm8960_snd_controls,
ARRAY_SIZE(wm8960_snd_controls));
@@ -1000,21 +960,10 @@ static int wm8960_probe(struct snd_soc_codec *codec)
return 0;
}
-/* power down chip */
-static int wm8960_remove(struct snd_soc_codec *codec)
-{
- struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
-
- wm8960->set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8960 = {
.probe = wm8960_probe,
- .remove = wm8960_remove,
- .suspend = wm8960_suspend,
- .resume = wm8960_resume,
.set_bias_level = wm8960_set_bias_level,
+ .suspend_bias_off = true,
};
static const struct regmap_config wm8960_regmap = {
@@ -1029,6 +978,18 @@ static const struct regmap_config wm8960_regmap = {
.volatile_reg = wm8960_volatile,
};
+static void wm8960_set_pdata_from_of(struct i2c_client *i2c,
+ struct wm8960_data *pdata)
+{
+ const struct device_node *np = i2c->dev.of_node;
+
+ if (of_property_read_bool(np, "wlf,capless"))
+ pdata->capless = true;
+
+ if (of_property_read_bool(np, "wlf,shared-lrclk"))
+ pdata->shared_lrclk = true;
+}
+
static int wm8960_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -1045,7 +1006,18 @@ static int wm8960_i2c_probe(struct i2c_client *i2c,
if (IS_ERR(wm8960->regmap))
return PTR_ERR(wm8960->regmap);
- if (pdata && pdata->shared_lrclk) {
+ if (pdata)
+ memcpy(&wm8960->pdata, pdata, sizeof(struct wm8960_data));
+ else if (i2c->dev.of_node)
+ wm8960_set_pdata_from_of(i2c, &wm8960->pdata);
+
+ ret = wm8960_reset(wm8960->regmap);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to issue reset\n");
+ return ret;
+ }
+
+ if (wm8960->pdata.shared_lrclk) {
ret = regmap_update_bits(wm8960->regmap, WM8960_ADDCTL2,
0x4, 0x4);
if (ret != 0) {
@@ -1055,6 +1027,18 @@ static int wm8960_i2c_probe(struct i2c_client *i2c,
}
}
+ /* Latch the update bits */
+ regmap_update_bits(wm8960->regmap, WM8960_LINVOL, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_RINVOL, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_LADC, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_RADC, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_LDAC, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_RDAC, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_LOUT1, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_ROUT1, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_LOUT2, 0x100, 0x100);
+ regmap_update_bits(wm8960->regmap, WM8960_ROUT2, 0x100, 0x100);
+
i2c_set_clientdata(i2c, wm8960);
ret = snd_soc_register_codec(&i2c->dev,
@@ -1075,10 +1059,17 @@ static const struct i2c_device_id wm8960_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8960_i2c_id);
+static const struct of_device_id wm8960_of_match[] = {
+ { .compatible = "wlf,wm8960", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wm8960_of_match);
+
static struct i2c_driver wm8960_i2c_driver = {
.driver = {
.name = "wm8960",
.owner = THIS_MODULE,
+ .of_match_table = wm8960_of_match,
},
.probe = wm8960_i2c_probe,
.remove = wm8960_i2c_remove,
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 41d23e920ad5..eeffd05384b4 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -835,7 +835,6 @@ static struct snd_soc_dai_driver wm8961_dai = {
static int wm8961_probe(struct snd_soc_codec *codec)
{
- struct snd_soc_dapm_context *dapm = &codec->dapm;
u16 reg;
/* Enable class W */
@@ -871,50 +870,33 @@ static int wm8961_probe(struct snd_soc_codec *codec)
reg &= ~WM8961_MANUAL_MODE;
snd_soc_write(codec, WM8961_CLOCKING_3, reg);
- wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- snd_soc_add_codec_controls(codec, wm8961_snd_controls,
- ARRAY_SIZE(wm8961_snd_controls));
- snd_soc_dapm_new_controls(dapm, wm8961_dapm_widgets,
- ARRAY_SIZE(wm8961_dapm_widgets));
- snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
-
- return 0;
-}
-
-static int wm8961_remove(struct snd_soc_codec *codec)
-{
- wm8961_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
#ifdef CONFIG_PM
-static int wm8961_suspend(struct snd_soc_codec *codec)
-{
- wm8961_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
static int wm8961_resume(struct snd_soc_codec *codec)
{
snd_soc_cache_sync(codec);
- wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
}
#else
-#define wm8961_suspend NULL
#define wm8961_resume NULL
#endif
static struct snd_soc_codec_driver soc_codec_dev_wm8961 = {
.probe = wm8961_probe,
- .remove = wm8961_remove,
- .suspend = wm8961_suspend,
.resume = wm8961_resume,
.set_bias_level = wm8961_set_bias_level,
+ .suspend_bias_off = true,
+
+ .controls = wm8961_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8961_snd_controls),
+ .dapm_widgets = wm8961_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8961_dapm_widgets),
+ .dapm_routes = audio_paths,
+ .num_dapm_routes = ARRAY_SIZE(audio_paths),
};
static const struct regmap_config wm8961_regmap = {
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 77ea0012afca..d32d554f5b34 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -26,6 +26,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/pcm.h>
@@ -67,6 +68,7 @@ struct wm8962_priv {
int fll_fref;
int fll_fout;
+ struct mutex dsp2_ena_lock;
u16 dsp2_ena;
struct delayed_work mic_work;
@@ -1570,7 +1572,7 @@ static int wm8962_dsp2_ena_put(struct snd_kcontrol *kcontrol,
int dsp2_running = snd_soc_read(codec, WM8962_DSP2_POWER_MANAGEMENT) &
WM8962_DSP2_ENA;
- mutex_lock(&codec->mutex);
+ mutex_lock(&wm8962->dsp2_ena_lock);
if (ucontrol->value.integer.value[0])
wm8962->dsp2_ena |= 1 << shift;
@@ -1590,7 +1592,7 @@ static int wm8962_dsp2_ena_put(struct snd_kcontrol *kcontrol,
}
out:
- mutex_unlock(&codec->mutex);
+ mutex_unlock(&wm8962->dsp2_ena_lock);
return ret;
}
@@ -3552,11 +3554,12 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
unsigned int reg;
int ret, i, irq_pol, trigger;
- wm8962 = devm_kzalloc(&i2c->dev, sizeof(struct wm8962_priv),
- GFP_KERNEL);
+ wm8962 = devm_kzalloc(&i2c->dev, sizeof(*wm8962), GFP_KERNEL);
if (wm8962 == NULL)
return -ENOMEM;
+ mutex_init(&wm8962->dsp2_ena_lock);
+
i2c_set_clientdata(i2c, wm8962);
INIT_DELAYED_WORK(&wm8962->mic_work, wm8962_mic_work);
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 682e9eda1019..ff0e4646b934 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -568,18 +568,6 @@ static struct snd_soc_dai_driver wm8974_dai = {
.symmetric_rates = 1,
};
-static int wm8974_suspend(struct snd_soc_codec *codec)
-{
- wm8974_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8974_resume(struct snd_soc_codec *codec)
-{
- wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static const struct regmap_config wm8974_regmap = {
.reg_bits = 7,
.val_bits = 9,
@@ -599,24 +587,13 @@ static int wm8974_probe(struct snd_soc_codec *codec)
return ret;
}
- wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return ret;
-}
-
-/* power down chip */
-static int wm8974_remove(struct snd_soc_codec *codec)
-{
- wm8974_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8974 = {
.probe = wm8974_probe,
- .remove = wm8974_remove,
- .suspend = wm8974_suspend,
- .resume = wm8974_resume,
.set_bias_level = wm8974_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8974_snd_controls,
.num_controls = ARRAY_SIZE(wm8974_snd_controls),
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index ee2ba574952b..cf7032911721 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -991,21 +991,11 @@ static int wm8978_probe(struct snd_soc_codec *codec)
for (i = 0; i < ARRAY_SIZE(update_reg); i++)
snd_soc_update_bits(codec, update_reg[i], 0x100, 0x100);
- wm8978_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-/* power down chip */
-static int wm8978_remove(struct snd_soc_codec *codec)
-{
- wm8978_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8978 = {
.probe = wm8978_probe,
- .remove = wm8978_remove,
.suspend = wm8978_suspend,
.resume = wm8978_resume,
.set_bias_level = wm8978_set_bias_level,
diff --git a/sound/soc/codecs/wm8983.c b/sound/soc/codecs/wm8983.c
index ac5defda8824..5d1cf08a72b8 100644
--- a/sound/soc/codecs/wm8983.c
+++ b/sound/soc/codecs/wm8983.c
@@ -967,29 +967,6 @@ static int wm8983_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-#ifdef CONFIG_PM
-static int wm8983_suspend(struct snd_soc_codec *codec)
-{
- wm8983_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8983_resume(struct snd_soc_codec *codec)
-{
- wm8983_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define wm8983_suspend NULL
-#define wm8983_resume NULL
-#endif
-
-static int wm8983_remove(struct snd_soc_codec *codec)
-{
- wm8983_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int wm8983_probe(struct snd_soc_codec *codec)
{
int ret;
@@ -1055,10 +1032,8 @@ static struct snd_soc_dai_driver wm8983_dai = {
static struct snd_soc_codec_driver soc_codec_dev_wm8983 = {
.probe = wm8983_probe,
- .remove = wm8983_remove,
- .suspend = wm8983_suspend,
- .resume = wm8983_resume,
.set_bias_level = wm8983_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8983_snd_controls,
.num_controls = ARRAY_SIZE(wm8983_snd_controls),
.dapm_widgets = wm8983_dapm_widgets,
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index ee380190399f..0b3b54c9971d 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -961,29 +961,6 @@ static int wm8985_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-#ifdef CONFIG_PM
-static int wm8985_suspend(struct snd_soc_codec *codec)
-{
- wm8985_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8985_resume(struct snd_soc_codec *codec)
-{
- wm8985_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define wm8985_suspend NULL
-#define wm8985_resume NULL
-#endif
-
-static int wm8985_remove(struct snd_soc_codec *codec)
-{
- wm8985_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int wm8985_probe(struct snd_soc_codec *codec)
{
size_t i;
@@ -1023,7 +1000,6 @@ static int wm8985_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, WM8985_BIAS_CTRL, WM8985_BIASCUT,
WM8985_BIASCUT);
- wm8985_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
err_reg_enable:
@@ -1064,10 +1040,8 @@ static struct snd_soc_dai_driver wm8985_dai = {
static struct snd_soc_codec_driver soc_codec_dev_wm8985 = {
.probe = wm8985_probe,
- .remove = wm8985_remove,
- .suspend = wm8985_suspend,
- .resume = wm8985_resume,
.set_bias_level = wm8985_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8985_snd_controls,
.num_controls = ARRAY_SIZE(wm8985_snd_controls),
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index a5130d965146..e418199155a8 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -793,21 +793,6 @@ static struct snd_soc_dai_driver wm8988_dai = {
.symmetric_rates = 1,
};
-static int wm8988_suspend(struct snd_soc_codec *codec)
-{
- struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
-
- wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF);
- regcache_mark_dirty(wm8988->regmap);
- return 0;
-}
-
-static int wm8988_resume(struct snd_soc_codec *codec)
-{
- wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int wm8988_probe(struct snd_soc_codec *codec)
{
int ret = 0;
@@ -825,23 +810,13 @@ static int wm8988_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, WM8988_ROUT2V, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8988_RINVOL, 0x0100, 0x0100);
- wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int wm8988_remove(struct snd_soc_codec *codec)
-{
- wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8988 = {
.probe = wm8988_probe,
- .remove = wm8988_remove,
- .suspend = wm8988_suspend,
- .resume = wm8988_resume,
.set_bias_level = wm8988_set_bias_level,
+ .suspend_bias_off = true,
.controls = wm8988_snd_controls,
.num_controls = ARRAY_SIZE(wm8988_snd_controls),
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 03e43e3f395e..8a584229310a 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1271,18 +1271,6 @@ static struct snd_soc_dai_driver wm8990_dai = {
.ops = &wm8990_dai_ops,
};
-static int wm8990_suspend(struct snd_soc_codec *codec)
-{
- wm8990_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8990_resume(struct snd_soc_codec *codec)
-{
- wm8990_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
/*
* initialise the WM8990 driver
* register the mixer and dsp interfaces with the kernel
@@ -1309,19 +1297,11 @@ static int wm8990_probe(struct snd_soc_codec *codec)
return 0;
}
-/* power down chip */
-static int wm8990_remove(struct snd_soc_codec *codec)
-{
- wm8990_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8990 = {
.probe = wm8990_probe,
- .remove = wm8990_remove,
- .suspend = wm8990_suspend,
- .resume = wm8990_resume,
.set_bias_level = wm8990_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = wm8990_snd_controls,
.num_controls = ARRAY_SIZE(wm8990_snd_controls),
.dapm_widgets = wm8990_dapm_widgets,
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index d0be89731cdb..b0ac2c3e31b9 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -1227,32 +1227,6 @@ static int wm8991_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-static int wm8991_suspend(struct snd_soc_codec *codec)
-{
- wm8991_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8991_resume(struct snd_soc_codec *codec)
-{
- wm8991_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
-/* power down chip */
-static int wm8991_remove(struct snd_soc_codec *codec)
-{
- wm8991_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8991_probe(struct snd_soc_codec *codec)
-{
- wm8991_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
#define WM8991_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE)
@@ -1293,11 +1267,9 @@ static struct snd_soc_dai_driver wm8991_dai = {
};
static struct snd_soc_codec_driver soc_codec_dev_wm8991 = {
- .probe = wm8991_probe,
- .remove = wm8991_remove,
- .suspend = wm8991_suspend,
- .resume = wm8991_resume,
.set_bias_level = wm8991_set_bias_level,
+ .suspend_bias_off = true,
+
.controls = wm8991_snd_controls,
.num_controls = ARRAY_SIZE(wm8991_snd_controls),
.dapm_widgets = wm8991_dapm_widgets,
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 93b14eda355a..53c6fe359496 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1486,7 +1486,6 @@ static int wm8993_probe(struct snd_soc_codec *codec)
{
struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
struct snd_soc_dapm_context *dapm = &codec->dapm;
- int ret;
wm8993->hubs_data.hp_startup_mode = 1;
wm8993->hubs_data.dcs_codes_l = -2;
@@ -1518,10 +1517,6 @@ static int wm8993_probe(struct snd_soc_codec *codec)
wm8993->pdata.micbias1_lvl,
wm8993->pdata.micbias2_lvl);
- ret = wm8993_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- if (ret != 0)
- return ret;
-
snd_soc_add_codec_controls(codec, wm8993_snd_controls,
ARRAY_SIZE(wm8993_snd_controls));
if (wm8993->pdata.num_retune_configs != 0) {
@@ -1550,12 +1545,6 @@ static int wm8993_probe(struct snd_soc_codec *codec)
}
-static int wm8993_remove(struct snd_soc_codec *codec)
-{
- wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
#ifdef CONFIG_PM
static int wm8993_suspend(struct snd_soc_codec *codec)
{
@@ -1629,7 +1618,6 @@ static const struct regmap_config wm8993_regmap = {
static struct snd_soc_codec_driver soc_codec_dev_wm8993 = {
.probe = wm8993_probe,
- .remove = wm8993_remove,
.suspend = wm8993_suspend,
.resume = wm8993_resume,
.set_bias_level = wm8993_set_bias_level,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 1fcb9f3f3097..1b97de2e4e67 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -4391,8 +4391,6 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
struct wm8994 *control = wm8994->wm8994;
int i;
- wm8994_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
for (i = 0; i < ARRAY_SIZE(wm8994->fll_locked); i++)
wm8994_free_irq(wm8994->wm8994, WM8994_IRQ_FLL1_LOCK + i,
&wm8994->fll_locked[i]);
@@ -4457,6 +4455,8 @@ static int wm8994_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, wm8994);
+ mutex_init(&wm8994->fw_lock);
+
wm8994->wm8994 = dev_get_drvdata(pdev->dev.parent);
pm_runtime_enable(&pdev->dev);
@@ -4508,7 +4508,6 @@ static const struct dev_pm_ops wm8994_pm_ops = {
static struct platform_driver wm8994_codec_driver = {
.driver = {
.name = "wm8994-codec",
- .owner = THIS_MODULE,
.pm = &wm8994_pm_ops,
},
.probe = wm8994_probe,
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index 6536f8d45ac6..dd73387b1cc4 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -13,6 +13,7 @@
#include <linux/firmware.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include "wm_hubs.h"
@@ -156,6 +157,7 @@ struct wm8994_priv {
unsigned int aif1clk_disable:1;
unsigned int aif2clk_disable:1;
+ struct mutex fw_lock;
int dsp_active;
const struct firmware *cur_fw;
const struct firmware *mbc;
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 1288edeb8c7d..c280f0a3a424 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -2004,7 +2004,6 @@ static int wm8995_remove(struct snd_soc_codec *codec)
int i;
wm8995 = snd_soc_codec_get_drvdata(codec);
- wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
for (i = 0; i < ARRAY_SIZE(wm8995->supplies); ++i)
regulator_unregister_notifier(wm8995->supplies[i].consumer,
@@ -2078,8 +2077,6 @@ static int wm8995_probe(struct snd_soc_codec *codec)
goto err_reg_enable;
}
- wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
/* Latch volume updates (right only; we always do left then right). */
snd_soc_update_bits(codec, WM8995_AIF1_DAC1_RIGHT_VOLUME,
WM8995_AIF1DAC1_VU_MASK, WM8995_AIF1DAC1_VU);
@@ -2102,13 +2099,6 @@ static int wm8995_probe(struct snd_soc_codec *codec)
wm8995_update_class_w(codec);
- snd_soc_add_codec_controls(codec, wm8995_snd_controls,
- ARRAY_SIZE(wm8995_snd_controls));
- snd_soc_dapm_new_controls(&codec->dapm, wm8995_dapm_widgets,
- ARRAY_SIZE(wm8995_dapm_widgets));
- snd_soc_dapm_add_routes(&codec->dapm, wm8995_intercon,
- ARRAY_SIZE(wm8995_intercon));
-
return 0;
err_reg_enable:
@@ -2205,6 +2195,13 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
.remove = wm8995_remove,
.set_bias_level = wm8995_set_bias_level,
.idle_bias_off = true,
+
+ .controls = wm8995_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8995_snd_controls),
+ .dapm_widgets = wm8995_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8995_dapm_widgets),
+ .dapm_routes = wm8995_intercon,
+ .num_dapm_routes = ARRAY_SIZE(wm8995_intercon),
};
static struct regmap_config wm8995_regmap = {
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index ab33fe596519..7e8bfe27566b 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1165,7 +1165,6 @@ static int wm8997_remove(struct platform_device *pdev)
static struct platform_driver wm8997_codec_driver = {
.driver = {
.name = "wm8997-codec",
- .owner = THIS_MODULE,
},
.probe = wm8997_probe,
.remove = wm8997_remove,
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 0cdc9e2184ab..b1d946facd57 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -1277,15 +1277,8 @@ static int wm9081_probe(struct snd_soc_codec *codec)
return 0;
}
-static int wm9081_remove(struct snd_soc_codec *codec)
-{
- wm9081_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm9081 = {
.probe = wm9081_probe,
- .remove = wm9081_remove,
.set_sysclk = wm9081_set_sysclk,
.set_bias_level = wm9081_set_bias_level,
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index a13f0725611a..6ffe8dc4f3fa 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -550,45 +550,15 @@ static int wm9090_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, WM9090_CLOCKING_1,
WM9090_TOCLK_ENA, WM9090_TOCLK_ENA);
- wm9090_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
wm9090_add_controls(codec);
return 0;
}
-#ifdef CONFIG_PM
-static int wm9090_suspend(struct snd_soc_codec *codec)
-{
- wm9090_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int wm9090_resume(struct snd_soc_codec *codec)
-{
- wm9090_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define wm9090_suspend NULL
-#define wm9090_resume NULL
-#endif
-
-static int wm9090_remove(struct snd_soc_codec *codec)
-{
- wm9090_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm9090 = {
.probe = wm9090_probe,
- .remove = wm9090_remove,
- .suspend = wm9090_suspend,
- .resume = wm9090_resume,
.set_bias_level = wm9090_set_bias_level,
+ .suspend_bias_off = true,
};
static const struct regmap_config wm9090_regmap = {
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index c0b7f45dfa37..3eddb18fefd1 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -203,13 +203,14 @@ static const struct snd_soc_dapm_route wm9705_audio_map[] = {
/* We use a register cache to enhance read performance. */
static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
u16 *cache = codec->reg_cache;
switch (reg) {
case AC97_RESET:
case AC97_VENDOR_ID1:
case AC97_VENDOR_ID2:
- return soc_ac97_ops->read(codec->ac97, reg);
+ return soc_ac97_ops->read(ac97, reg);
default:
reg = reg >> 1;
@@ -223,9 +224,10 @@ static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg)
static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int val)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
u16 *cache = codec->reg_cache;
- soc_ac97_ops->write(codec->ac97, reg, val);
+ soc_ac97_ops->write(ac97, reg, val);
reg = reg >> 1;
if (reg < (ARRAY_SIZE(wm9705_reg)))
cache[reg] = val;
@@ -263,7 +265,6 @@ static const struct snd_soc_dai_ops wm9705_dai_ops = {
static struct snd_soc_dai_driver wm9705_dai[] = {
{
.name = "wm9705-hifi",
- .ac97_control = 1,
.playback = {
.stream_name = "HiFi Playback",
.channels_min = 1,
@@ -294,36 +295,41 @@ static struct snd_soc_dai_driver wm9705_dai[] = {
static int wm9705_reset(struct snd_soc_codec *codec)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+
if (soc_ac97_ops->reset) {
- soc_ac97_ops->reset(codec->ac97);
+ soc_ac97_ops->reset(ac97);
if (ac97_read(codec, 0) == wm9705_reg[0])
return 0; /* Success */
}
+ dev_err(codec->dev, "Failed to reset: AC97 link error\n");
+
return -EIO;
}
#ifdef CONFIG_PM
static int wm9705_soc_suspend(struct snd_soc_codec *codec)
{
- soc_ac97_ops->write(codec->ac97, AC97_POWERDOWN, 0xffff);
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+
+ soc_ac97_ops->write(ac97, AC97_POWERDOWN, 0xffff);
return 0;
}
static int wm9705_soc_resume(struct snd_soc_codec *codec)
{
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
int i, ret;
u16 *cache = codec->reg_cache;
ret = wm9705_reset(codec);
- if (ret < 0) {
- printk(KERN_ERR "could not reset AC97 codec\n");
+ if (ret < 0)
return ret;
- }
for (i = 2; i < ARRAY_SIZE(wm9705_reg) << 1; i += 2) {
- soc_ac97_ops->write(codec->ac97, i, cache[i>>1]);
+ soc_ac97_ops->write(ac97, i, cache[i>>1]);
}
return 0;
@@ -335,31 +341,34 @@ static int wm9705_soc_resume(struct snd_soc_codec *codec)
static int wm9705_soc_probe(struct snd_soc_codec *codec)
{
+ struct snd_ac97 *ac97;
int ret = 0;
- ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0);
- if (ret < 0) {
- printk(KERN_ERR "wm9705: failed to register AC97 codec\n");
+ ac97 = snd_soc_new_ac97_codec(codec);
+ if (IS_ERR(ac97)) {
+ ret = PTR_ERR(ac97);
+ dev_err(codec->dev, "Failed to register AC97 codec\n");
return ret;
}
+ snd_soc_codec_set_drvdata(codec, ac97);
+
ret = wm9705_reset(codec);
if (ret)
goto reset_err;
- snd_soc_add_codec_controls(codec, wm9705_snd_ac97_controls,
- ARRAY_SIZE(wm9705_snd_ac97_controls));
-
return 0;
reset_err:
- snd_soc_free_ac97_codec(codec);
+ snd_soc_free_ac97_codec(ac97);
return ret;
}
static int wm9705_soc_remove(struct snd_soc_codec *codec)
{
- snd_soc_free_ac97_codec(codec);
+ struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+
+ snd_soc_free_ac97_codec(ac97);
return 0;
}
@@ -374,6 +383,9 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
.reg_word_size = sizeof(u16),
.reg_cache_step = 2,
.reg_cache_default = wm9705_reg,
+
+ .controls = wm9705_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(wm9705_snd_ac97_controls),
.dapm_widgets = wm9705_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm9705_dapm_widgets),
.dapm_routes = wm9705_audio_map,
@@ -395,7 +407,6 @@ static int wm9705_remove(struct platform_device *pdev)
static struct platform_driver wm9705_codec_driver = {
.driver = {
.name = "wm9705-codec",
- .owner = THIS_MODULE,
},
.probe = wm9705_probe,
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index c5eb746087b4..e04643d2bb24 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -23,6 +23,12 @@
#include <sound/tlv.h>
#include "wm9712.h"
+struct wm9712_priv {
+ struct snd_ac97 *ac97;
+ unsigned int hp_mixer[2];
+ struct mutex lock;
+};
+
static unsigned int ac97_read(struct snd_soc_codec *codec,
unsigned int reg);
static int ac97_write(struct snd_soc_codec *codec,
@@ -48,12 +54,10 @@ static const u16 wm9712_reg[] = {
0x0000, 0x0000, 0x0000, 0x0000, /* 6e */
0x0000, 0x0000, 0x0000, 0x0006, /* 76 */
0x0001, 0x0000, 0x574d, 0x4c12, /* 7e */
- 0x0000, 0x0000 /* virtual hp mixers */
};
-/* virtual HP mixers regs */
-#define HPL_MIXER 0x80
-#define HPR_MIXER 0x82
+#define HPL_MIXER 0x0
+#define HPR_MIXER 0x1
static const char *wm9712_alc_select[] = {"None", "Left", "Right", "Stereo"};
static const char *wm9712_alc_mux[] = {"Stereo", "Left", "Right", "None"};
@@ -157,75 +161,108 @@ SOC_SINGLE_TLV("Mic 2 Volume", AC97_MIC, 0, 31, 1, main_tlv),
SOC_SINGLE_TLV("Mic Boost Volume", AC97_MIC, 7, 1, 0, boost_tlv),
};
+static const unsigned int wm9712_mixer_mute_regs[] = {
+ AC97_VIDEO,
+ AC97_PCM,
+ AC97_LINE,
+ AC97_PHONE,
+ AC97_CD,
+ AC97_PC_BEEP,
+};
+
/* We have to create a fake left and right HP mixers because
* the codec only has a single control that is shared by both channels.
* This makes it impossible to determine the audio path.
*/
-static int mixer_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
+static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
- u16 l, r, beep, line, phone, mic, pcm, aux;
-
- l = ac97_read(w->codec, HPL_MIXER);
- r = ac97_read(w->codec, HPR_MIXER);
- beep = ac97_read(w->codec, AC97_PC_BEEP);
- mic = ac97_read(w->codec, AC97_VIDEO);
- phone = ac97_read(w->codec, AC97_PHONE);
- line = ac97_read(w->codec, AC97_LINE);
- pcm = ac97_read(w->codec, AC97_PCM);
- aux = ac97_read(w->codec, AC97_CD);
-
- if (l & 0x1 || r & 0x1)
- ac97_write(w->codec, AC97_VIDEO, mic & 0x7fff);
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+ struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val = ucontrol->value.enumerated.item[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int mixer, mask, shift, old;
+ struct snd_soc_dapm_update update;
+ bool change;
+
+ mixer = mc->shift >> 8;
+ shift = mc->shift & 0xff;
+ mask = 1 << shift;
+
+ mutex_lock(&wm9712->lock);
+ old = wm9712->hp_mixer[mixer];
+ if (ucontrol->value.enumerated.item[0])
+ wm9712->hp_mixer[mixer] |= mask;
else
- ac97_write(w->codec, AC97_VIDEO, mic | 0x8000);
+ wm9712->hp_mixer[mixer] &= ~mask;
+
+ change = old != wm9712->hp_mixer[mixer];
+ if (change) {
+ update.kcontrol = kcontrol;
+ update.reg = wm9712_mixer_mute_regs[shift];
+ update.mask = 0x8000;
+ if ((wm9712->hp_mixer[0] & mask) ||
+ (wm9712->hp_mixer[1] & mask))
+ update.val = 0x0;
+ else
+ update.val = 0x8000;
+
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, val,
+ &update);
+ }
- if (l & 0x2 || r & 0x2)
- ac97_write(w->codec, AC97_PCM, pcm & 0x7fff);
- else
- ac97_write(w->codec, AC97_PCM, pcm | 0x8000);
+ mutex_unlock(&wm9712->lock);
- if (l & 0x4 || r & 0x4)
- ac97_write(w->codec, AC97_LINE, line & 0x7fff);
- else
- ac97_write(w->codec, AC97_LINE, line | 0x8000);
+ return change;
+}
- if (l & 0x8 || r & 0x8)
- ac97_write(w->codec, AC97_PHONE, phone & 0x7fff);
- else
- ac97_write(w->codec, AC97_PHONE, phone | 0x8000);
+static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+ struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int shift, mixer;
- if (l & 0x10 || r & 0x10)
- ac97_write(w->codec, AC97_CD, aux & 0x7fff);
- else
- ac97_write(w->codec, AC97_CD, aux | 0x8000);
+ mixer = mc->shift >> 8;
+ shift = mc->shift & 0xff;
- if (l & 0x20 || r & 0x20)
- ac97_write(w->codec, AC97_PC_BEEP, beep & 0x7fff);
- else
- ac97_write(w->codec, AC97_PC_BEEP, beep | 0x8000);
+ ucontrol->value.enumerated.item[0] =
+ (wm9712->hp_mixer[mixer] >> shift) & 1;
return 0;
}
+#define WM9712_HP_MIXER_CTRL(xname, xmixer, xshift) { \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_volsw, \
+ .get = wm9712_hp_mixer_get, .put = wm9712_hp_mixer_put, \
+ .private_value = SOC_SINGLE_VALUE(SND_SOC_NOPM, \
+ (xmixer << 8) | xshift, 1, 0, 0) \
+}
+
/* Left Headphone Mixers */
static const struct snd_kcontrol_new wm9712_hpl_mixer_controls[] = {
- SOC_DAPM_SINGLE("PCBeep Bypass Switch", HPL_MIXER, 5, 1, 0),
- SOC_DAPM_SINGLE("Aux Playback Switch", HPL_MIXER, 4, 1, 0),
- SOC_DAPM_SINGLE("Phone Bypass Switch", HPL_MIXER, 3, 1, 0),
- SOC_DAPM_SINGLE("Line Bypass Switch", HPL_MIXER, 2, 1, 0),
- SOC_DAPM_SINGLE("PCM Playback Switch", HPL_MIXER, 1, 1, 0),
- SOC_DAPM_SINGLE("Mic Sidetone Switch", HPL_MIXER, 0, 1, 0),
+ WM9712_HP_MIXER_CTRL("PCBeep Bypass Switch", HPL_MIXER, 5),
+ WM9712_HP_MIXER_CTRL("Aux Playback Switch", HPL_MIXER, 4),
+ WM9712_HP_MIXER_CTRL("Phone Bypass Switch", HPL_MIXER, 3),
+ WM9712_HP_MIXER_CTRL("Line Bypass Switch", HPL_MIXER, 2),
+ WM9712_HP_MIXER_CTRL("PCM Playback Switch", HPL_MIXER, 1),
+ WM9712_HP_MIXER_CTRL("Mic Sidetone Switch", HPL_MIXER, 0),
};
/* Right Headphone Mixers */
static const struct snd_kcontrol_new wm9712_hpr_mixer_controls[] = {
- SOC_DAPM_SINGLE("PCBeep Bypass Switch", HPR_MIXER, 5, 1, 0),
- SOC_DAPM_SINGLE("Aux Playback Switch", HPR_MIXER, 4, 1, 0),
- SOC_DAPM_SINGLE("Phone Bypass Switch", HPR_MIXER, 3, 1, 0),
- SOC_DAPM_SINGLE("Line Bypass Switch", HPR_MIXER, 2, 1, 0),
- SOC_DAPM_SINGLE("PCM Playback Switch", HPR_MIXER, 1, 1, 0),
- SOC_DAPM_SINGLE("Mic Sidetone Switch", HPR_MIXER, 0, 1, 0),
+ WM9712_HP_MIXER_CTRL("PCBeep Bypass Switch", HPR_MIXER, 5),
+ WM9712_HP_MIXER_CTRL("Aux Playback Switch", HPR_MIXER, 4),
+ WM9712_HP_MIXER_CTRL("Phone Bypass Switch", HPR_MIXER, 3),
+ WM9712_HP_MIXER_CTRL("Line Bypass Switch", HPR_MIXER, 2),
+ WM9712_HP_MIXER_CTRL("PCM Playback Switch", HPR_MIXER, 1),
+ WM9712_HP_MIXER_CTRL("Mic Sidetone Switch", HPR_MIXER, 0),
};
/* Speaker Mixer */
@@ -299,12 +336,10 @@ SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
&wm9712_diff_sel_controls),
SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
-SND_SOC_DAPM_MIXER_E("Left HP Mixer", AC97_INT_PAGING, 9, 1,
- &wm9712_hpl_mixer_controls[0], ARRAY_SIZE(wm9712_hpl_mixer_controls),
- mixer_event, SND_SOC_DAPM_POST_REG),
-SND_SOC_DAPM_MIXER_E("Right HP Mixer", AC97_INT_PAGING, 8, 1,
- &wm9712_hpr_mixer_controls[0], ARRAY_SIZE(wm9712_hpr_mixer_controls),
- mixer_event, SND_SOC_DAPM_POST_REG),
+SND_SOC_DAPM_MIXER("Left HP Mixer", AC97_INT_PAGING, 9, 1,
+ &wm9712_hpl_mixer_controls[0], ARRAY_SIZE(wm9712_hpl_mixer_controls)),
+SND_SOC_DAPM_MIXER("Right HP Mixer", AC97_INT_PAGING, 8, 1,
+ &wm9712_hpr_mixer_controls[0], ARRAY_SIZE(wm9712_hpr_mixer_controls)),
SND_SOC_DAPM_MIXER("Phone Mixer", AC97_INT_PAGING, 6, 1,
&wm9712_phone_mixer_controls[0], ARRAY_SIZE(wm9712_phone_mixer_controls)),
SND_SOC_DAPM_MIXER("Speaker Mixer", AC97_INT_PAGING, 7, 1,
@@ -450,12 +485,13 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
static unsigned int ac97_read(struct snd_soc_codec *codec,
unsigned int reg)
{
+ struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
u16 *cache = codec->reg_cache;
if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 ||
reg == AC97_REC_GAIN)
- return soc_ac97_ops->read(codec->ac97, reg);
+ return soc_ac97_ops->read(wm9712->ac97, reg);
else {
reg = reg >> 1;
@@ -469,10 +505,10 @@ static unsigned int ac97_read(struct snd_soc_codec *codec,
static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int val)
{
+ struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
u16 *cache = codec->reg_cache;
- if (reg < 0x7c)
- soc_ac97_ops->write(codec->ac97, reg, val);
+ soc_ac97_ops->write(wm9712->ac97, reg, val);
reg = reg >> 1;
if (reg < (ARRAY_SIZE(wm9712_reg)))
cache[reg] = val;
@@ -532,7 +568,6 @@ static const struct snd_soc_dai_ops wm9712_dai_ops_aux = {
static struct snd_soc_dai_driver wm9712_dai[] = {
{
.name = "wm9712-hifi",
- .ac97_control = 1,
.playback = {
.stream_name = "HiFi Playback",
.channels_min = 1,
@@ -581,40 +616,35 @@ static int wm9712_set_bias_level(struct snd_soc_codec *codec,
static int wm9712_reset(struct snd_soc_codec *codec, int try_warm)
{
+ struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
+
if (try_warm && soc_ac97_ops->warm_reset) {
- soc_ac97_ops->warm_reset(codec->ac97);
+ soc_ac97_ops->warm_reset(wm9712->ac97);
if (ac97_read(codec, 0) == wm9712_reg[0])
return 1;
}
- soc_ac97_ops->reset(codec->ac97);
+ soc_ac97_ops->reset(wm9712->ac97);
if (soc_ac97_ops->warm_reset)
- soc_ac97_ops->warm_reset(codec->ac97);
+ soc_ac97_ops->warm_reset(wm9712->ac97);
if (ac97_read(codec, 0) != wm9712_reg[0])
goto err;
return 0;
err:
- printk(KERN_ERR "WM9712 AC97 reset failed\n");
+ dev_err(codec->dev, "Failed to reset: AC97 link error\n");
return -EIO;
}
-static int wm9712_soc_suspend(struct snd_soc_codec *codec)
-{
- wm9712_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int wm9712_soc_resume(struct snd_soc_codec *codec)
{
+ struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
int i, ret;
u16 *cache = codec->reg_cache;
ret = wm9712_reset(codec, 1);
- if (ret < 0) {
- printk(KERN_ERR "could not reset AC97 codec\n");
+ if (ret < 0)
return ret;
- }
wm9712_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -624,7 +654,7 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
if (i == AC97_INT_PAGING || i == AC97_POWERDOWN ||
(i > 0x58 && i != 0x5c))
continue;
- soc_ac97_ops->write(codec->ac97, i, cache[i>>1]);
+ soc_ac97_ops->write(wm9712->ac97, i, cache[i>>1]);
}
}
@@ -633,52 +663,53 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
static int wm9712_soc_probe(struct snd_soc_codec *codec)
{
+ struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
- ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0);
- if (ret < 0) {
- printk(KERN_ERR "wm9712: failed to register AC97 codec\n");
+ wm9712->ac97 = snd_soc_new_ac97_codec(codec);
+ if (IS_ERR(wm9712->ac97)) {
+ ret = PTR_ERR(wm9712->ac97);
+ dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
return ret;
}
ret = wm9712_reset(codec, 0);
- if (ret < 0) {
- printk(KERN_ERR "Failed to reset WM9712: AC97 link error\n");
+ if (ret < 0)
goto reset_err;
- }
/* set alc mux to none */
ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
- wm9712_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- snd_soc_add_codec_controls(codec, wm9712_snd_ac97_controls,
- ARRAY_SIZE(wm9712_snd_ac97_controls));
-
return 0;
reset_err:
- snd_soc_free_ac97_codec(codec);
+ snd_soc_free_ac97_codec(wm9712->ac97);
return ret;
}
static int wm9712_soc_remove(struct snd_soc_codec *codec)
{
- snd_soc_free_ac97_codec(codec);
+ struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
+
+ snd_soc_free_ac97_codec(wm9712->ac97);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
.probe = wm9712_soc_probe,
.remove = wm9712_soc_remove,
- .suspend = wm9712_soc_suspend,
.resume = wm9712_soc_resume,
.read = ac97_read,
.write = ac97_write,
.set_bias_level = wm9712_set_bias_level,
+ .suspend_bias_off = true,
.reg_cache_size = ARRAY_SIZE(wm9712_reg),
.reg_word_size = sizeof(u16),
.reg_cache_step = 2,
.reg_cache_default = wm9712_reg,
+
+ .controls = wm9712_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(wm9712_snd_ac97_controls),
.dapm_widgets = wm9712_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm9712_dapm_widgets),
.dapm_routes = wm9712_audio_map,
@@ -687,6 +718,16 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
static int wm9712_probe(struct platform_device *pdev)
{
+ struct wm9712_priv *wm9712;
+
+ wm9712 = devm_kzalloc(&pdev->dev, sizeof(*wm9712), GFP_KERNEL);
+ if (wm9712 == NULL)
+ return -ENOMEM;
+
+ mutex_init(&wm9712->lock);
+
+ platform_set_drvdata(pdev, wm9712);
+
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm9712, wm9712_dai, ARRAY_SIZE(wm9712_dai));
}
@@ -700,7 +741,6 @@ static int wm9712_remove(struct platform_device *pdev)
static struct platform_driver wm9712_codec_driver = {
.driver = {
.name = "wm9712-codec",
- .owner = THIS_MODULE,
},
.probe = wm9712_probe,
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index bddee30a4bc7..71b9d5b0734d 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -30,7 +30,10 @@
#include "wm9713.h"
struct wm9713_priv {
+ struct snd_ac97 *ac97;
u32 pll_in; /* PLL input frequency */
+ unsigned int hp_mixer[2];
+ struct mutex lock;
};
static unsigned int ac97_read(struct snd_soc_codec *codec,
@@ -59,13 +62,10 @@ static const u16 wm9713_reg[] = {
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0006,
0x0001, 0x0000, 0x574d, 0x4c13,
- 0x0000, 0x0000, 0x0000
};
-/* virtual HP mixers regs */
-#define HPL_MIXER 0x80
-#define HPR_MIXER 0x82
-#define MICB_MUX 0x82
+#define HPL_MIXER 0
+#define HPR_MIXER 1
static const char *wm9713_mic_mixer[] = {"Stereo", "Mic 1", "Mic 2", "Mute"};
static const char *wm9713_rec_mux[] = {"Stereo", "Left", "Right", "Mute"};
@@ -110,7 +110,7 @@ SOC_ENUM_SINGLE(AC97_REC_GAIN_MIC, 10, 8, wm9713_dac_inv), /* dac invert 2 15 */
SOC_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, wm9713_bass), /* bass control 16 */
SOC_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9713_ng_type), /* noise gate type 17 */
SOC_ENUM_SINGLE(AC97_3D_CONTROL, 12, 3, wm9713_mic_select), /* mic selection 18 */
-SOC_ENUM_SINGLE(MICB_MUX, 0, 2, wm9713_micb_select), /* mic selection 19 */
+SOC_ENUM_SINGLE_VIRT(2, wm9713_micb_select), /* mic selection 19 */
};
static const DECLARE_TLV_DB_SCALE(out_tlv, -4650, 150, 0);
@@ -234,6 +234,14 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
return 0;
}
+static const unsigned int wm9713_mixer_mute_regs[] = {
+ AC97_PC_BEEP,
+ AC97_MASTER_TONE,
+ AC97_PHONE,
+ AC97_REC_SEL,
+ AC97_PCM,
+ AC97_AUX,
+};
/* We have to create a fake left and right HP mixers because
* the codec only has a single control that is shared by both channels.
@@ -241,73 +249,95 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
* register map, thus we add a new (virtual) register to help determine the
* audio route within the device.
*/
-static int mixer_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
- u16 l, r, beep, tone, phone, rec, pcm, aux;
-
- l = ac97_read(w->codec, HPL_MIXER);
- r = ac97_read(w->codec, HPR_MIXER);
- beep = ac97_read(w->codec, AC97_PC_BEEP);
- tone = ac97_read(w->codec, AC97_MASTER_TONE);
- phone = ac97_read(w->codec, AC97_PHONE);
- rec = ac97_read(w->codec, AC97_REC_SEL);
- pcm = ac97_read(w->codec, AC97_PCM);
- aux = ac97_read(w->codec, AC97_AUX);
-
- if (event & SND_SOC_DAPM_PRE_REG)
- return 0;
- if ((l & 0x1) || (r & 0x1))
- ac97_write(w->codec, AC97_PC_BEEP, beep & 0x7fff);
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+ struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val = ucontrol->value.enumerated.item[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int mixer, mask, shift, old;
+ struct snd_soc_dapm_update update;
+ bool change;
+
+ mixer = mc->shift >> 8;
+ shift = mc->shift & 0xff;
+ mask = (1 << shift);
+
+ mutex_lock(&wm9713->lock);
+ old = wm9713->hp_mixer[mixer];
+ if (ucontrol->value.enumerated.item[0])
+ wm9713->hp_mixer[mixer] |= mask;
else
- ac97_write(w->codec, AC97_PC_BEEP, beep | 0x8000);
+ wm9713->hp_mixer[mixer] &= ~mask;
+
+ change = old != wm9713->hp_mixer[mixer];
+ if (change) {
+ update.kcontrol = kcontrol;
+ update.reg = wm9713_mixer_mute_regs[shift];
+ update.mask = 0x8000;
+ if ((wm9713->hp_mixer[0] & mask) ||
+ (wm9713->hp_mixer[1] & mask))
+ update.val = 0x0;
+ else
+ update.val = 0x8000;
+
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, val,
+ &update);
+ }
- if ((l & 0x2) || (r & 0x2))
- ac97_write(w->codec, AC97_MASTER_TONE, tone & 0x7fff);
- else
- ac97_write(w->codec, AC97_MASTER_TONE, tone | 0x8000);
+ mutex_unlock(&wm9713->lock);
- if ((l & 0x4) || (r & 0x4))
- ac97_write(w->codec, AC97_PHONE, phone & 0x7fff);
- else
- ac97_write(w->codec, AC97_PHONE, phone | 0x8000);
+ return change;
+}
- if ((l & 0x8) || (r & 0x8))
- ac97_write(w->codec, AC97_REC_SEL, rec & 0x7fff);
- else
- ac97_write(w->codec, AC97_REC_SEL, rec | 0x8000);
+static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+ struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int mixer, shift;
- if ((l & 0x10) || (r & 0x10))
- ac97_write(w->codec, AC97_PCM, pcm & 0x7fff);
- else
- ac97_write(w->codec, AC97_PCM, pcm | 0x8000);
+ mixer = mc->shift >> 8;
+ shift = mc->shift & 0xff;
- if ((l & 0x20) || (r & 0x20))
- ac97_write(w->codec, AC97_AUX, aux & 0x7fff);
- else
- ac97_write(w->codec, AC97_AUX, aux | 0x8000);
+ ucontrol->value.enumerated.item[0] =
+ (wm9713->hp_mixer[mixer] >> shift) & 1;
return 0;
}
+#define WM9713_HP_MIXER_CTRL(xname, xmixer, xshift) { \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_volsw, \
+ .get = wm9713_hp_mixer_get, .put = wm9713_hp_mixer_put, \
+ .private_value = SOC_DOUBLE_VALUE(SND_SOC_NOPM, \
+ xshift, xmixer, 1, 0, 0) \
+}
+
/* Left Headphone Mixers */
static const struct snd_kcontrol_new wm9713_hpl_mixer_controls[] = {
-SOC_DAPM_SINGLE("Beep Playback Switch", HPL_MIXER, 5, 1, 0),
-SOC_DAPM_SINGLE("Voice Playback Switch", HPL_MIXER, 4, 1, 0),
-SOC_DAPM_SINGLE("Aux Playback Switch", HPL_MIXER, 3, 1, 0),
-SOC_DAPM_SINGLE("PCM Playback Switch", HPL_MIXER, 2, 1, 0),
-SOC_DAPM_SINGLE("MonoIn Playback Switch", HPL_MIXER, 1, 1, 0),
-SOC_DAPM_SINGLE("Bypass Playback Switch", HPL_MIXER, 0, 1, 0),
+WM9713_HP_MIXER_CTRL("Beep Playback Switch", HPL_MIXER, 5),
+WM9713_HP_MIXER_CTRL("Voice Playback Switch", HPL_MIXER, 4),
+WM9713_HP_MIXER_CTRL("Aux Playback Switch", HPL_MIXER, 3),
+WM9713_HP_MIXER_CTRL("PCM Playback Switch", HPL_MIXER, 2),
+WM9713_HP_MIXER_CTRL("MonoIn Playback Switch", HPL_MIXER, 1),
+WM9713_HP_MIXER_CTRL("Bypass Playback Switch", HPL_MIXER, 0),
};
/* Right Headphone Mixers */
static const struct snd_kcontrol_new wm9713_hpr_mixer_controls[] = {
-SOC_DAPM_SINGLE("Beep Playback Switch", HPR_MIXER, 5, 1, 0),
-SOC_DAPM_SINGLE("Voice Playback Switch", HPR_MIXER, 4, 1, 0),
-SOC_DAPM_SINGLE("Aux Playback Switch", HPR_MIXER, 3, 1, 0),
-SOC_DAPM_SINGLE("PCM Playback Switch", HPR_MIXER, 2, 1, 0),
-SOC_DAPM_SINGLE("MonoIn Playback Switch", HPR_MIXER, 1, 1, 0),
-SOC_DAPM_SINGLE("Bypass Playback Switch", HPR_MIXER, 0, 1, 0),
+WM9713_HP_MIXER_CTRL("Beep Playback Switch", HPR_MIXER, 5),
+WM9713_HP_MIXER_CTRL("Voice Playback Switch", HPR_MIXER, 4),
+WM9713_HP_MIXER_CTRL("Aux Playback Switch", HPR_MIXER, 3),
+WM9713_HP_MIXER_CTRL("PCM Playback Switch", HPR_MIXER, 2),
+WM9713_HP_MIXER_CTRL("MonoIn Playback Switch", HPR_MIXER, 1),
+WM9713_HP_MIXER_CTRL("Bypass Playback Switch", HPR_MIXER, 0),
};
/* headphone capture mux */
@@ -429,12 +459,10 @@ SND_SOC_DAPM_MUX("Mic A Source", SND_SOC_NOPM, 0, 0,
&wm9713_mic_sel_mux_controls),
SND_SOC_DAPM_MUX("Mic B Source", SND_SOC_NOPM, 0, 0,
&wm9713_micb_sel_mux_controls),
-SND_SOC_DAPM_MIXER_E("Left HP Mixer", AC97_EXTENDED_MID, 3, 1,
- &wm9713_hpl_mixer_controls[0], ARRAY_SIZE(wm9713_hpl_mixer_controls),
- mixer_event, SND_SOC_DAPM_POST_REG),
-SND_SOC_DAPM_MIXER_E("Right HP Mixer", AC97_EXTENDED_MID, 2, 1,
- &wm9713_hpr_mixer_controls[0], ARRAY_SIZE(wm9713_hpr_mixer_controls),
- mixer_event, SND_SOC_DAPM_POST_REG),
+SND_SOC_DAPM_MIXER("Left HP Mixer", AC97_EXTENDED_MID, 3, 1,
+ &wm9713_hpl_mixer_controls[0], ARRAY_SIZE(wm9713_hpl_mixer_controls)),
+SND_SOC_DAPM_MIXER("Right HP Mixer", AC97_EXTENDED_MID, 2, 1,
+ &wm9713_hpr_mixer_controls[0], ARRAY_SIZE(wm9713_hpr_mixer_controls)),
SND_SOC_DAPM_MIXER("Mono Mixer", AC97_EXTENDED_MID, 0, 1,
&wm9713_mono_mixer_controls[0], ARRAY_SIZE(wm9713_mono_mixer_controls)),
SND_SOC_DAPM_MIXER("Speaker Mixer", AC97_EXTENDED_MID, 1, 1,
@@ -647,12 +675,13 @@ static const struct snd_soc_dapm_route wm9713_audio_map[] = {
static unsigned int ac97_read(struct snd_soc_codec *codec,
unsigned int reg)
{
+ struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
u16 *cache = codec->reg_cache;
if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 ||
reg == AC97_CD)
- return soc_ac97_ops->read(codec->ac97, reg);
+ return soc_ac97_ops->read(wm9713->ac97, reg);
else {
reg = reg >> 1;
@@ -666,9 +695,10 @@ static unsigned int ac97_read(struct snd_soc_codec *codec,
static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int val)
{
+ struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
+
u16 *cache = codec->reg_cache;
- if (reg < 0x7c)
- soc_ac97_ops->write(codec->ac97, reg, val);
+ soc_ac97_ops->write(wm9713->ac97, reg, val);
reg = reg >> 1;
if (reg < (ARRAY_SIZE(wm9713_reg)))
cache[reg] = val;
@@ -689,7 +719,8 @@ struct _pll_div {
* to allow rounding later */
#define FIXED_PLL_SIZE ((1 << 22) * 10)
-static void pll_factors(struct _pll_div *pll_div, unsigned int source)
+static void pll_factors(struct snd_soc_codec *codec,
+ struct _pll_div *pll_div, unsigned int source)
{
u64 Kpart;
unsigned int K, Ndiv, Nmod, target;
@@ -724,7 +755,7 @@ static void pll_factors(struct _pll_div *pll_div, unsigned int source)
Ndiv = target / source;
if ((Ndiv < 5) || (Ndiv > 12))
- printk(KERN_WARNING
+ dev_warn(codec->dev,
"WM9713 PLL N value %u out of recommended range!\n",
Ndiv);
@@ -768,7 +799,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
return 0;
}
- pll_factors(&pll_div, freq_in);
+ pll_factors(codec, &pll_div, freq_in);
if (pll_div.k == 0) {
reg = (pll_div.n << 12) | (pll_div.lf << 11) |
@@ -1049,7 +1080,6 @@ static const struct snd_soc_dai_ops wm9713_dai_ops_voice = {
static struct snd_soc_dai_driver wm9713_dai[] = {
{
.name = "wm9713-hifi",
- .ac97_control = 1,
.playback = {
.stream_name = "HiFi Playback",
.channels_min = 1,
@@ -1095,17 +1125,22 @@ static struct snd_soc_dai_driver wm9713_dai[] = {
int wm9713_reset(struct snd_soc_codec *codec, int try_warm)
{
+ struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
+
if (try_warm && soc_ac97_ops->warm_reset) {
- soc_ac97_ops->warm_reset(codec->ac97);
+ soc_ac97_ops->warm_reset(wm9713->ac97);
if (ac97_read(codec, 0) == wm9713_reg[0])
return 1;
}
- soc_ac97_ops->reset(codec->ac97);
+ soc_ac97_ops->reset(wm9713->ac97);
if (soc_ac97_ops->warm_reset)
- soc_ac97_ops->warm_reset(codec->ac97);
- if (ac97_read(codec, 0) != wm9713_reg[0])
+ soc_ac97_ops->warm_reset(wm9713->ac97);
+ if (ac97_read(codec, 0) != wm9713_reg[0]) {
+ dev_err(codec->dev, "Failed to reset: AC97 link error\n");
return -EIO;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(wm9713_reset);
@@ -1163,10 +1198,8 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
u16 *cache = codec->reg_cache;
ret = wm9713_reset(codec, 1);
- if (ret < 0) {
- printk(KERN_ERR "could not reset AC97 codec\n");
+ if (ret < 0)
return ret;
- }
wm9713_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1180,7 +1213,7 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
if (i == AC97_POWERDOWN || i == AC97_EXTENDED_MID ||
i == AC97_EXTENDED_MSTATUS || i > 0x66)
continue;
- soc_ac97_ops->write(codec->ac97, i, cache[i>>1]);
+ soc_ac97_ops->write(wm9713->ac97, i, cache[i>>1]);
}
}
@@ -1189,50 +1222,36 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
static int wm9713_soc_probe(struct snd_soc_codec *codec)
{
- struct wm9713_priv *wm9713;
+ struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
int ret = 0, reg;
- wm9713 = kzalloc(sizeof(struct wm9713_priv), GFP_KERNEL);
- if (wm9713 == NULL)
- return -ENOMEM;
- snd_soc_codec_set_drvdata(codec, wm9713);
-
- ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0);
- if (ret < 0)
- goto codec_err;
+ wm9713->ac97 = snd_soc_new_ac97_codec(codec);
+ if (IS_ERR(wm9713->ac97))
+ return PTR_ERR(wm9713->ac97);
/* do a cold reset for the controller and then try
* a warm reset followed by an optional cold reset for codec */
wm9713_reset(codec, 0);
ret = wm9713_reset(codec, 1);
- if (ret < 0) {
- printk(KERN_ERR "Failed to reset WM9713: AC97 link error\n");
+ if (ret < 0)
goto reset_err;
- }
-
- wm9713_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* unmute the adc - move to kcontrol */
reg = ac97_read(codec, AC97_CD) & 0x7fff;
ac97_write(codec, AC97_CD, reg);
- snd_soc_add_codec_controls(codec, wm9713_snd_ac97_controls,
- ARRAY_SIZE(wm9713_snd_ac97_controls));
-
return 0;
reset_err:
- snd_soc_free_ac97_codec(codec);
-codec_err:
- kfree(wm9713);
+ snd_soc_free_ac97_codec(wm9713->ac97);
return ret;
}
static int wm9713_soc_remove(struct snd_soc_codec *codec)
{
struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
- snd_soc_free_ac97_codec(codec);
- kfree(wm9713);
+
+ snd_soc_free_ac97_codec(wm9713->ac97);
return 0;
}
@@ -1248,6 +1267,9 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9713 = {
.reg_word_size = sizeof(u16),
.reg_cache_step = 2,
.reg_cache_default = wm9713_reg,
+
+ .controls = wm9713_snd_ac97_controls,
+ .num_controls = ARRAY_SIZE(wm9713_snd_ac97_controls),
.dapm_widgets = wm9713_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm9713_dapm_widgets),
.dapm_routes = wm9713_audio_map,
@@ -1256,6 +1278,16 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9713 = {
static int wm9713_probe(struct platform_device *pdev)
{
+ struct wm9713_priv *wm9713;
+
+ wm9713 = devm_kzalloc(&pdev->dev, sizeof(*wm9713), GFP_KERNEL);
+ if (wm9713 == NULL)
+ return -ENOMEM;
+
+ mutex_init(&wm9713->lock);
+
+ platform_set_drvdata(pdev, wm9713);
+
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm9713, wm9713_dai, ARRAY_SIZE(wm9713_dai));
}
@@ -1269,7 +1301,6 @@ static int wm9713_remove(struct platform_device *pdev)
static struct platform_driver wm9713_codec_driver = {
.driver = {
.name = "wm9713-codec",
- .owner = THIS_MODULE,
},
.probe = wm9713_probe,
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 67124783558a..720d6e852986 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -169,11 +170,12 @@ static struct wm_adsp_buf *wm_adsp_buf_alloc(const void *src, size_t len,
if (buf == NULL)
return NULL;
- buf->buf = kmemdup(src, len, GFP_KERNEL | GFP_DMA);
+ buf->buf = vmalloc(len);
if (!buf->buf) {
- kfree(buf);
+ vfree(buf);
return NULL;
}
+ memcpy(buf->buf, src, len);
if (list)
list_add_tail(&buf->list, list);
@@ -188,7 +190,7 @@ static void wm_adsp_buf_free(struct list_head *list)
struct wm_adsp_buf,
list);
list_del(&buf->list);
- kfree(buf->buf);
+ vfree(buf->buf);
kfree(buf);
}
}
@@ -684,38 +686,24 @@ static int wm_adsp_load(struct wm_adsp *dsp)
}
if (reg) {
- size_t to_write = PAGE_SIZE;
- size_t remain = le32_to_cpu(region->len);
- const u8 *data = region->data;
-
- while (remain > 0) {
- if (remain < PAGE_SIZE)
- to_write = remain;
-
- buf = wm_adsp_buf_alloc(data,
- to_write,
- &buf_list);
- if (!buf) {
- adsp_err(dsp, "Out of memory\n");
- ret = -ENOMEM;
- goto out_fw;
- }
-
- ret = regmap_raw_write_async(regmap, reg,
- buf->buf,
- to_write);
- if (ret != 0) {
- adsp_err(dsp,
- "%s.%d: Failed to write %zd bytes at %d in %s: %d\n",
- file, regions,
- to_write, offset,
- region_name, ret);
- goto out_fw;
- }
+ buf = wm_adsp_buf_alloc(region->data,
+ le32_to_cpu(region->len),
+ &buf_list);
+ if (!buf) {
+ adsp_err(dsp, "Out of memory\n");
+ ret = -ENOMEM;
+ goto out_fw;
+ }
- data += to_write;
- reg += to_write / 2;
- remain -= to_write;
+ ret = regmap_raw_write_async(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
+ if (ret != 0) {
+ adsp_err(dsp,
+ "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
+ file, regions,
+ le32_to_cpu(region->len), offset,
+ region_name, ret);
+ goto out_fw;
}
}
@@ -1065,8 +1053,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
be32_to_cpu(adsp1_alg[i].zm));
region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return -ENOMEM;
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
region->type = WMFW_ADSP1_DM;
region->alg = be32_to_cpu(adsp1_alg[i].alg.id);
region->base = be32_to_cpu(adsp1_alg[i].dm);
@@ -1083,8 +1073,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
}
region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return -ENOMEM;
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
region->type = WMFW_ADSP1_ZM;
region->alg = be32_to_cpu(adsp1_alg[i].alg.id);
region->base = be32_to_cpu(adsp1_alg[i].zm);
@@ -1113,8 +1105,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
be32_to_cpu(adsp2_alg[i].zm));
region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return -ENOMEM;
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
region->type = WMFW_ADSP2_XM;
region->alg = be32_to_cpu(adsp2_alg[i].alg.id);
region->base = be32_to_cpu(adsp2_alg[i].xm);
@@ -1131,8 +1125,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
}
region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return -ENOMEM;
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
region->type = WMFW_ADSP2_YM;
region->alg = be32_to_cpu(adsp2_alg[i].alg.id);
region->base = be32_to_cpu(adsp2_alg[i].ym);
@@ -1149,8 +1145,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
}
region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return -ENOMEM;
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
region->type = WMFW_ADSP2_ZM;
region->alg = be32_to_cpu(adsp2_alg[i].alg.id);
region->base = be32_to_cpu(adsp2_alg[i].zm);
@@ -1595,13 +1593,6 @@ static void wm_adsp2_boot_work(struct work_struct *work)
if (ret != 0)
goto err;
- ret = regmap_update_bits_async(dsp->regmap,
- dsp->base + ADSP2_CONTROL,
- ADSP2_CORE_ENA,
- ADSP2_CORE_ENA);
- if (ret != 0)
- goto err;
-
dsp->running = true;
return;
@@ -1651,8 +1642,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
ret = regmap_update_bits(dsp->regmap,
dsp->base + ADSP2_CONTROL,
- ADSP2_START,
- ADSP2_START);
+ ADSP2_CORE_ENA | ADSP2_START,
+ ADSP2_CORE_ENA | ADSP2_START);
if (ret != 0)
goto err;
break;
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index a50010e2891f..158cb3d1db70 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -445,7 +445,6 @@ static struct platform_driver davinci_evm_driver = {
.remove = davinci_evm_remove,
.driver = {
.name = "davinci_evm",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = of_match_ptr(davinci_evm_dt_ids),
},
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 7682af31d6e6..15fb28fc8e1b 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -770,7 +770,6 @@ static struct platform_driver davinci_mcbsp_driver = {
.remove = davinci_i2s_remove,
.driver = {
.name = "davinci-mcbsp",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 0eed9b1b24e1..30b94d4f9c5d 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -70,6 +70,7 @@ struct davinci_mcasp {
void __iomem *base;
u32 fifo_base;
struct device *dev;
+ struct snd_pcm_substream *substreams[2];
/* McASP specific data */
int tdm_slots;
@@ -80,6 +81,7 @@ struct davinci_mcasp {
u8 bclk_div;
u16 bclk_lrclk_ratio;
int streams;
+ u32 irq_request[2];
int sysclk_freq;
bool bclk_master;
@@ -90,6 +92,9 @@ struct davinci_mcasp {
bool dat_port;
+ /* Used for comstraint setting on the second stream */
+ u32 channels;
+
#ifdef CONFIG_PM_SLEEP
struct davinci_mcasp_context context;
#endif
@@ -154,9 +159,16 @@ static bool mcasp_is_synchronous(struct davinci_mcasp *mcasp)
static void mcasp_start_rx(struct davinci_mcasp *mcasp)
{
+ if (mcasp->rxnumevt) { /* enable FIFO */
+ u32 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
+
+ mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
+ mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
+ }
+
+ /* Start clocks */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
-
/*
* When ASYNC == 0 the transmit and receive sections operate
* synchronously from the transmit clock and frame sync. We need to make
@@ -167,74 +179,69 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp)
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
}
+ /* Activate serializer(s) */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_RXBUF_REG, 0);
-
- mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
- mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_RXBUF_REG, 0);
-
+ /* Release RX state machine */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
+ /* Release Frame Sync generator */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
-
if (mcasp_is_synchronous(mcasp))
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
+
+ /* enable receive IRQs */
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_EVTCTLR_REG,
+ mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE]);
}
static void mcasp_start_tx(struct davinci_mcasp *mcasp)
{
- u8 offset = 0, i;
u32 cnt;
+ if (mcasp->txnumevt) { /* enable FIFO */
+ u32 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
+
+ mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
+ mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
+ }
+
+ /* Start clocks */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
+ /* Activate serializer(s) */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_TXBUF_REG, 0);
- mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
- mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_TXBUF_REG, 0);
- for (i = 0; i < mcasp->num_serializer; i++) {
- if (mcasp->serial_dir[i] == TX_MODE) {
- offset = i;
- break;
- }
- }
-
- /* wait for TX ready */
+ /* wait for XDATA to be cleared */
cnt = 0;
- while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(offset)) &
- TXSTATE) && (cnt < 100000))
+ while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) &
+ ~XRDATA) && (cnt < 100000))
cnt++;
- mcasp_set_reg(mcasp, DAVINCI_MCASP_TXBUF_REG, 0);
+ /* Release TX state machine */
+ mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
+ /* Release Frame Sync generator */
+ mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
+
+ /* enable transmit IRQs */
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_EVTCTLX_REG,
+ mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK]);
}
static void davinci_mcasp_start(struct davinci_mcasp *mcasp, int stream)
{
- u32 reg;
-
mcasp->streams++;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (mcasp->txnumevt) { /* enable FIFO */
- reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
- mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
- mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
- }
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
mcasp_start_tx(mcasp);
- } else {
- if (mcasp->rxnumevt) { /* enable FIFO */
- reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
- mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
- mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
- }
+ else
mcasp_start_rx(mcasp);
- }
}
static void mcasp_stop_rx(struct davinci_mcasp *mcasp)
{
+ /* disable IRQ sources */
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_EVTCTLR_REG,
+ mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE]);
+
/*
* In synchronous mode stop the TX clocks if no other stream is
* running
@@ -244,12 +251,22 @@ static void mcasp_stop_rx(struct davinci_mcasp *mcasp)
mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, 0);
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
+
+ if (mcasp->rxnumevt) { /* disable FIFO */
+ u32 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
+
+ mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
+ }
}
static void mcasp_stop_tx(struct davinci_mcasp *mcasp)
{
u32 val = 0;
+ /* disable IRQ sources */
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_EVTCTLX_REG,
+ mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK]);
+
/*
* In synchronous mode keep TX clocks running if the capture stream is
* still running.
@@ -259,27 +276,92 @@ static void mcasp_stop_tx(struct davinci_mcasp *mcasp)
mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, val);
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
+
+ if (mcasp->txnumevt) { /* disable FIFO */
+ u32 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
+
+ mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
+ }
}
static void davinci_mcasp_stop(struct davinci_mcasp *mcasp, int stream)
{
- u32 reg;
-
mcasp->streams--;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (mcasp->txnumevt) { /* disable FIFO */
- reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
- mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
- }
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
mcasp_stop_tx(mcasp);
- } else {
- if (mcasp->rxnumevt) { /* disable FIFO */
- reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
- mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
- }
+ else
mcasp_stop_rx(mcasp);
+}
+
+static irqreturn_t davinci_mcasp_tx_irq_handler(int irq, void *data)
+{
+ struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
+ struct snd_pcm_substream *substream;
+ u32 irq_mask = mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK];
+ u32 handled_mask = 0;
+ u32 stat;
+
+ stat = mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG);
+ if (stat & XUNDRN & irq_mask) {
+ dev_warn(mcasp->dev, "Transmit buffer underflow\n");
+ handled_mask |= XUNDRN;
+
+ substream = mcasp->substreams[SNDRV_PCM_STREAM_PLAYBACK];
+ if (substream) {
+ snd_pcm_stream_lock_irq(substream);
+ if (snd_pcm_running(substream))
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stream_unlock_irq(substream);
+ }
}
+
+ if (!handled_mask)
+ dev_warn(mcasp->dev, "unhandled tx event. txstat: 0x%08x\n",
+ stat);
+
+ if (stat & XRERR)
+ handled_mask |= XRERR;
+
+ /* Ack the handled event only */
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, handled_mask);
+
+ return IRQ_RETVAL(handled_mask);
+}
+
+static irqreturn_t davinci_mcasp_rx_irq_handler(int irq, void *data)
+{
+ struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
+ struct snd_pcm_substream *substream;
+ u32 irq_mask = mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE];
+ u32 handled_mask = 0;
+ u32 stat;
+
+ stat = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG);
+ if (stat & ROVRN & irq_mask) {
+ dev_warn(mcasp->dev, "Receive buffer overflow\n");
+ handled_mask |= ROVRN;
+
+ substream = mcasp->substreams[SNDRV_PCM_STREAM_CAPTURE];
+ if (substream) {
+ snd_pcm_stream_lock_irq(substream);
+ if (snd_pcm_running(substream))
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stream_unlock_irq(substream);
+ }
+ }
+
+ if (!handled_mask)
+ dev_warn(mcasp->dev, "unhandled rx event. rxstat: 0x%08x\n",
+ stat);
+
+ if (stat & XRERR)
+ handled_mask |= XRERR;
+
+ /* Ack the handled event only */
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, handled_mask);
+
+ return IRQ_RETVAL(handled_mask);
}
static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
@@ -500,8 +582,17 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
* both left and right channels), so it has to be divided by number of
* tdm-slots (for I2S - divided by 2).
*/
- if (mcasp->bclk_lrclk_ratio)
- word_length = mcasp->bclk_lrclk_ratio / mcasp->tdm_slots;
+ if (mcasp->bclk_lrclk_ratio) {
+ u32 slot_length = mcasp->bclk_lrclk_ratio / mcasp->tdm_slots;
+
+ /*
+ * When we have more bclk then it is needed for the data, we
+ * need to use the rotation to move the received samples to have
+ * correct alignment.
+ */
+ rx_rotate = (slot_length - word_length) / 4;
+ word_length = slot_length;
+ }
/* mapping of the XSSZ bit-field as described in the datasheet */
fmt = (word_length >> 1) - 1;
@@ -635,19 +726,29 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
return 0;
}
-static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream)
+static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
+ int channels)
{
int i, active_slots;
+ int total_slots;
+ int active_serializers;
u32 mask = 0;
u32 busel = 0;
- if ((mcasp->tdm_slots < 2) || (mcasp->tdm_slots > 32)) {
- dev_err(mcasp->dev, "tdm slot %d not supported\n",
- mcasp->tdm_slots);
- return -EINVAL;
- }
+ total_slots = mcasp->tdm_slots;
+
+ /*
+ * If more than one serializer is needed, then use them with
+ * their specified tdm_slots count. Otherwise, one serializer
+ * can cope with the transaction using as many slots as channels
+ * in the stream, requires channels symmetry
+ */
+ active_serializers = (channels + total_slots - 1) / total_slots;
+ if (active_serializers == 1)
+ active_slots = channels;
+ else
+ active_slots = total_slots;
- active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots;
for (i = 0; i < active_slots; i++)
mask |= (1 << i);
@@ -659,12 +760,12 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream)
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
- FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF));
+ FSXMOD(total_slots), FSXMOD(0x1FF));
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
- FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF));
+ FSRMOD(total_slots), FSRMOD(0x1FF));
return 0;
}
@@ -778,7 +879,8 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
ret = mcasp_dit_hw_param(mcasp, params_rate(params));
else
- ret = mcasp_i2s_hw_param(mcasp, substream->stream);
+ ret = mcasp_i2s_hw_param(mcasp, substream->stream,
+ channels);
if (ret)
return ret;
@@ -826,6 +928,9 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
davinci_config_channel_size(mcasp, word_length);
+ if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE)
+ mcasp->channels = channels;
+
return 0;
}
@@ -854,7 +959,65 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
return ret;
}
+static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
+ u32 max_channels = 0;
+ int i, dir;
+
+ mcasp->substreams[substream->stream] = substream;
+
+ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
+ return 0;
+
+ /*
+ * Limit the maximum allowed channels for the first stream:
+ * number of serializers for the direction * tdm slots per serializer
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dir = TX_MODE;
+ else
+ dir = RX_MODE;
+
+ for (i = 0; i < mcasp->num_serializer; i++) {
+ if (mcasp->serial_dir[i] == dir)
+ max_channels++;
+ }
+ max_channels *= mcasp->tdm_slots;
+ /*
+ * If the already active stream has less channels than the calculated
+ * limnit based on the seirializers * tdm_slots, we need to use that as
+ * a constraint for the second stream.
+ * Otherwise (first stream or less allowed channels) we use the
+ * calculated constraint.
+ */
+ if (mcasp->channels && mcasp->channels < max_channels)
+ max_channels = mcasp->channels;
+
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ 2, max_channels);
+ return 0;
+}
+
+static void davinci_mcasp_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
+
+ mcasp->substreams[substream->stream] = NULL;
+
+ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
+ return;
+
+ if (!cpu_dai->active)
+ mcasp->channels = 0;
+}
+
static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
+ .startup = davinci_mcasp_startup,
+ .shutdown = davinci_mcasp_shutdown,
.trigger = davinci_mcasp_trigger,
.hw_params = davinci_mcasp_hw_params,
.set_fmt = davinci_mcasp_set_dai_fmt,
@@ -971,6 +1134,7 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
},
.ops = &davinci_mcasp_dai_ops,
+ .symmetric_samplebits = 1,
},
{
.name = "davinci-mcasp.1",
@@ -1194,6 +1358,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
struct resource *mem, *ioarea, *res, *dat;
struct davinci_mcasp_pdata *pdata;
struct davinci_mcasp *mcasp;
+ char *irq_name;
+ int irq;
int ret;
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
@@ -1235,6 +1401,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (IS_ERR_VALUE(ret)) {
dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
+ pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -1246,7 +1413,21 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
}
mcasp->op_mode = pdata->op_mode;
- mcasp->tdm_slots = pdata->tdm_slots;
+ /* sanity check for tdm slots parameter */
+ if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
+ if (pdata->tdm_slots < 2) {
+ dev_err(&pdev->dev, "invalid tdm slots: %d\n",
+ pdata->tdm_slots);
+ mcasp->tdm_slots = 2;
+ } else if (pdata->tdm_slots > 32) {
+ dev_err(&pdev->dev, "invalid tdm slots: %d\n",
+ pdata->tdm_slots);
+ mcasp->tdm_slots = 32;
+ } else {
+ mcasp->tdm_slots = pdata->tdm_slots;
+ }
+ }
+
mcasp->num_serializer = pdata->num_serializer;
#ifdef CONFIG_PM_SLEEP
mcasp->context.xrsr_regs = devm_kzalloc(&pdev->dev,
@@ -1260,6 +1441,36 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
mcasp->dev = &pdev->dev;
+ irq = platform_get_irq_byname(pdev, "rx");
+ if (irq >= 0) {
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n",
+ dev_name(&pdev->dev));
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ davinci_mcasp_rx_irq_handler,
+ IRQF_ONESHOT, irq_name, mcasp);
+ if (ret) {
+ dev_err(&pdev->dev, "RX IRQ request failed\n");
+ goto err;
+ }
+
+ mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE] = ROVRN;
+ }
+
+ irq = platform_get_irq_byname(pdev, "tx");
+ if (irq >= 0) {
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx\n",
+ dev_name(&pdev->dev));
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ davinci_mcasp_tx_irq_handler,
+ IRQF_ONESHOT, irq_name, mcasp);
+ if (ret) {
+ dev_err(&pdev->dev, "TX IRQ request failed\n");
+ goto err;
+ }
+
+ mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK] = XUNDRN;
+ }
+
dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
if (dat)
mcasp->dat_port = true;
@@ -1391,7 +1602,6 @@ static struct platform_driver davinci_mcasp_driver = {
.remove = davinci_mcasp_remove,
.driver = {
.name = "davinci-mcasp",
- .owner = THIS_MODULE,
.of_match_table = mcasp_dt_ids,
},
};
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index 98fbc451892a..79dc511180bf 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -253,6 +253,13 @@
#define TXFSRST BIT(12) /* Frame Sync Generator Reset */
/*
+ * DAVINCI_MCASP_TXSTAT_REG - Transmitter Status Register Bits
+ * DAVINCI_MCASP_RXSTAT_REG - Receiver Status Register Bits
+ */
+#define XRERR BIT(8) /* Transmit/Receive error */
+#define XRDATA BIT(5) /* Transmit/Receive data ready */
+
+/*
* DAVINCI_MCASP_AMUTE_REG - Mute Control Register Bits
*/
#define MUTENA(val) (val)
@@ -279,6 +286,16 @@
#define TXDATADMADIS BIT(0)
/*
+ * DAVINCI_MCASP_EVTCTLR_REG - Receiver Interrupt Control Register Bits
+ */
+#define ROVRN BIT(0)
+
+/*
+ * DAVINCI_MCASP_EVTCTLX_REG - Transmitter Interrupt Control Register Bits
+ */
+#define XUNDRN BIT(0)
+
+/*
* DAVINCI_MCASP_W[R]FIFOCTL - Write/Read FIFO Control Register bits
*/
#define FIFO_ENABLE BIT(16)
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index 77aef05588c3..5bee04279ebe 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -267,7 +267,6 @@ static struct platform_driver davinci_vcif_driver = {
.remove = davinci_vcif_remove,
.driver = {
.name = "davinci-vcif",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index e961388e6e9c..b93168d4f648 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -338,31 +338,34 @@ static int dw_i2s_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no i2s resource defined\n");
- return -ENODEV;
- }
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "i2s region already claimed\n");
- return -EBUSY;
- }
-
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_warn(&pdev->dev, "kzalloc fail\n");
return -ENOMEM;
}
- dev->i2s_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!dev->i2s_base) {
- dev_err(&pdev->dev, "ioremap fail for i2s_region\n");
+ dw_i2s_dai = devm_kzalloc(&pdev->dev, sizeof(*dw_i2s_dai), GFP_KERNEL);
+ if (!dw_i2s_dai) {
+ dev_err(&pdev->dev, "mem allocation failed for dai driver\n");
return -ENOMEM;
}
+ dw_i2s_dai->ops = &dw_i2s_dai_ops;
+ dw_i2s_dai->suspend = dw_i2s_suspend;
+ dw_i2s_dai->resume = dw_i2s_resume;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no i2s resource defined\n");
+ return -ENODEV;
+ }
+
+ dev->i2s_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->i2s_base)) {
+ dev_err(&pdev->dev, "ioremap fail for i2s_region\n");
+ return PTR_ERR(dev->i2s_base);
+ }
+
cap = pdata->cap;
dev->capability = cap;
dev->i2s_clk_cfg = pdata->i2s_clk_cfg;
@@ -388,13 +391,6 @@ static int dw_i2s_probe(struct platform_device *pdev)
if (ret < 0)
goto err_clk_put;
- dw_i2s_dai = devm_kzalloc(&pdev->dev, sizeof(*dw_i2s_dai), GFP_KERNEL);
- if (!dw_i2s_dai) {
- dev_err(&pdev->dev, "mem allocation failed for dai driver\n");
- ret = -ENOMEM;
- goto err_clk_disable;
- }
-
if (cap & DWC_I2S_PLAY) {
dev_dbg(&pdev->dev, " designware: play supported\n");
dw_i2s_dai->playback.channels_min = MIN_CHANNEL_NUM;
@@ -411,10 +407,6 @@ static int dw_i2s_probe(struct platform_device *pdev)
dw_i2s_dai->capture.rates = pdata->snd_rates;
}
- dw_i2s_dai->ops = &dw_i2s_dai_ops;
- dw_i2s_dai->suspend = dw_i2s_suspend;
- dw_i2s_dai->resume = dw_i2s_resume;
-
dev->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, dev);
ret = snd_soc_register_component(&pdev->dev, &dw_i2s_component,
@@ -449,7 +441,6 @@ static struct platform_driver dw_i2s_driver = {
.remove = dw_i2s_remove,
.driver = {
.name = "designware-i2s",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index eb093d5b85c4..9ce70fc67b09 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -105,7 +105,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
int ret;
int int_port = 0, ext_port;
struct device_node *np = pdev->dev.of_node;
- struct device_node *ssi_np, *codec_np;
+ struct device_node *ssi_np = NULL, *codec_np = NULL;
eukrea_tlv320.dev = &pdev->dev;
if (np) {
@@ -217,8 +217,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
err:
if (ret)
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
- if (np)
- of_node_put(ssi_np);
+ of_node_put(ssi_np);
return ret;
}
@@ -239,7 +238,6 @@ MODULE_DEVICE_TABLE(of, imx_tlv320_dt_ids);
static struct platform_driver eukrea_tlv320_driver = {
.driver = {
.name = "eukrea_tlv320",
- .owner = THIS_MODULE,
.of_match_table = imx_tlv320_dt_ids,
},
.probe = eukrea_tlv320_probe,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 007c772f3cef..3f6959c8e2f7 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -51,6 +51,7 @@ struct codec_priv {
* @sysclk_freq[2]: SYSCLK rates for set_sysclk()
* @sysclk_dir[2]: SYSCLK directions for set_sysclk()
* @sysclk_id[2]: SYSCLK ids for set_sysclk()
+ * @slot_width: Slot width of each frame
*
* Note: [1] for tx and [0] for rx
*/
@@ -58,6 +59,7 @@ struct cpu_priv {
unsigned long sysclk_freq[2];
u32 sysclk_dir[2];
u32 sysclk_id[2];
+ u32 slot_width;
};
/**
@@ -125,7 +127,12 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
priv->sample_rate = params_rate(params);
priv->sample_format = params_format(params);
- if (priv->card.set_bias_level)
+ /*
+ * If codec-dai is DAI Master and all configurations are already in the
+ * set_bias_level(), bypass the remaining settings in hw_params().
+ * Note: (dai_fmt & CBM_CFM) includes CBM_CFM and CBM_CFS.
+ */
+ if (priv->card.set_bias_level && priv->dai_fmt & SND_SOC_DAIFMT_CBM_CFM)
return 0;
/* Specific configurations of DAIs starts from here */
@@ -137,6 +144,15 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+ if (cpu_priv->slot_width) {
+ ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2,
+ cpu_priv->slot_width);
+ if (ret) {
+ dev_err(dev, "failed to set TDM slot for cpu dai\n");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -448,6 +464,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
priv->cpu_priv.sysclk_dir[TX] = SND_SOC_CLOCK_OUT;
priv->cpu_priv.sysclk_dir[RX] = SND_SOC_CLOCK_OUT;
+ priv->cpu_priv.slot_width = 32;
priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
} else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) {
priv->codec_priv.mclk_id = SGTL5000_SYSCLK;
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index a609aafc994d..93d7e56c6066 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -151,14 +151,7 @@ static const struct snd_pcm_hardware fsl_dma_hardware = {
*/
static void fsl_dma_abort_stream(struct snd_pcm_substream *substream)
{
- unsigned long flags;
-
- snd_pcm_stream_lock_irqsave(substream, flags);
-
- if (snd_pcm_running(substream))
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
-
- snd_pcm_stream_unlock_irqrestore(substream, flags);
+ snd_pcm_stop_xrun(substream);
}
/**
@@ -971,7 +964,6 @@ MODULE_DEVICE_TABLE(of, fsl_soc_dma_ids);
static struct platform_driver fsl_soc_dma_driver = {
.driver = {
.name = "fsl-pcm-audio",
- .owner = THIS_MODULE,
.of_match_table = fsl_soc_dma_ids,
},
.probe = fsl_soc_dma_probe,
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index a645e296199e..1c08ab13637c 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -513,10 +513,15 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
u32 width = snd_pcm_format_width(params_format(params));
u32 channels = params_channels(params);
u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
+ u32 slot_width = width;
u32 bclk, mask, val;
int ret;
- bclk = params_rate(params) * esai_priv->slot_width * esai_priv->slots;
+ /* Override slot_width if being specifially set */
+ if (esai_priv->slot_width)
+ slot_width = esai_priv->slot_width;
+
+ bclk = params_rate(params) * slot_width * esai_priv->slots;
ret = fsl_esai_set_bclk(dai, tx, bclk);
if (ret)
@@ -538,7 +543,7 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), mask, val);
mask = ESAI_xCR_xSWS_MASK | (tx ? ESAI_xCR_PADC : 0);
- val = ESAI_xCR_xSWS(esai_priv->slot_width, width) | (tx ? ESAI_xCR_PADC : 0);
+ val = ESAI_xCR_xSWS(slot_width, width) | (tx ? ESAI_xCR_PADC : 0);
regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val);
@@ -780,9 +785,6 @@ static int fsl_esai_probe(struct platform_device *pdev)
return ret;
}
- /* Set a default slot size */
- esai_priv->slot_width = 32;
-
/* Set a default slot number */
esai_priv->slots = 2;
@@ -855,7 +857,6 @@ static struct platform_driver fsl_esai_driver = {
.probe = fsl_esai_probe,
.driver = {
.name = "fsl-esai-dai",
- .owner = THIS_MODULE,
.of_match_table = fsl_esai_dt_ids,
},
};
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 7eeb1dd8ce27..032d2d33619c 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -678,7 +678,6 @@ static struct platform_driver fsl_sai_driver = {
.probe = fsl_sai_probe,
.driver = {
.name = "fsl-sai",
- .owner = THIS_MODULE,
.of_match_table = fsl_sai_ids,
},
};
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 9b791621294c..af0429421fc8 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1283,7 +1283,6 @@ MODULE_DEVICE_TABLE(of, fsl_spdif_dt_ids);
static struct platform_driver fsl_spdif_driver = {
.driver = {
.name = "fsl-spdif-dai",
- .owner = THIS_MODULE,
.of_match_table = fsl_spdif_dt_ids,
},
.probe = fsl_spdif_probe,
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index e6955170dc42..a65f17d57ffb 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -67,8 +67,6 @@
/**
* FSLSSI_I2S_FORMATS: audio formats supported by the SSI
*
- * This driver currently only supports the SSI running in I2S slave mode.
- *
* The SSI has a limitation in that the samples must be in the same byte
* order as the host CPU. This is because when multiple bytes are written
* to the STX register, the bytes and bits must be written in the same
@@ -1099,7 +1097,7 @@ static const struct snd_soc_component_driver fsl_ssi_component = {
};
static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
- .ac97_control = 1,
+ .bus_control = true,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
@@ -1363,7 +1361,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
return PTR_ERR(ssi_private->regs);
}
- ssi_private->irq = irq_of_parse_and_map(np, 0);
+ ssi_private->irq = platform_get_irq(pdev, 0);
if (!ssi_private->irq) {
dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
return -ENXIO;
@@ -1389,7 +1387,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
if (ssi_private->soc->imx) {
ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem);
if (ret)
- goto error_irqmap;
+ return ret;
}
ret = snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
@@ -1412,7 +1410,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev);
if (ret)
- goto error_asoc_register;
+ goto error_irq;
/*
* If codec-handle property is missing from SSI node, we assume
@@ -1460,10 +1458,6 @@ error_asoc_register:
if (ssi_private->soc->imx)
fsl_ssi_imx_clean(pdev, ssi_private);
-error_irqmap:
- if (ssi_private->use_dma)
- irq_dispose_mapping(ssi_private->irq);
-
return ret;
}
@@ -1480,16 +1474,12 @@ static int fsl_ssi_remove(struct platform_device *pdev)
if (ssi_private->soc->imx)
fsl_ssi_imx_clean(pdev, ssi_private);
- if (ssi_private->use_dma)
- irq_dispose_mapping(ssi_private->irq);
-
return 0;
}
static struct platform_driver fsl_ssi_driver = {
.driver = {
.name = "fsl-ssi-dai",
- .owner = THIS_MODULE,
.of_match_table = fsl_ssi_ids,
},
.probe = fsl_ssi_probe,
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 46f9beb6b273..d9050d946ae7 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -356,7 +356,6 @@ static struct platform_driver imx_audmux_driver = {
.id_table = imx_audmux_ids,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = imx_audmux_dt_ids,
}
};
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index a2fd7321b5a9..6bf5bce01a92 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -159,7 +159,6 @@ static int imx_mc13783_remove(struct platform_device *pdev)
static struct platform_driver imx_mc13783_audio_driver = {
.driver = {
.name = "imx_mc13783",
- .owner = THIS_MODULE,
},
.probe = imx_mc13783_probe,
.remove = imx_mc13783_remove
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 1cb22dd034eb..b99e0b5e00e9 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -175,10 +175,8 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
fail:
if (data && !IS_ERR(data->codec_clk))
clk_put(data->codec_clk);
- if (ssi_np)
- of_node_put(ssi_np);
- if (codec_np)
- of_node_put(codec_np);
+ of_node_put(ssi_np);
+ of_node_put(codec_np);
return ret;
}
@@ -202,7 +200,6 @@ MODULE_DEVICE_TABLE(of, imx_sgtl5000_dt_ids);
static struct platform_driver imx_sgtl5000_driver = {
.driver = {
.name = "imx-sgtl5000",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = imx_sgtl5000_dt_ids,
},
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
index e1dc40143600..e94704f1b9ee 100644
--- a/sound/soc/fsl/imx-spdif.c
+++ b/sound/soc/fsl/imx-spdif.c
@@ -74,8 +74,7 @@ static int imx_spdif_audio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
end:
- if (spdif_np)
- of_node_put(spdif_np);
+ of_node_put(spdif_np);
return ret;
}
@@ -89,7 +88,6 @@ MODULE_DEVICE_TABLE(of, imx_spdif_dt_ids);
static struct platform_driver imx_spdif_driver = {
.driver = {
.name = "imx-spdif",
- .owner = THIS_MODULE,
.of_match_table = imx_spdif_dt_ids,
},
.probe = imx_spdif_audio_probe,
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index ab2fdd76b693..fa801e17c51e 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -382,7 +382,7 @@ static struct snd_soc_dai_driver imx_ssi_dai = {
static struct snd_soc_dai_driver imx_ac97_dai = {
.probe = imx_ssi_dai_probe,
- .ac97_control = 1,
+ .bus_control = true,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
@@ -647,7 +647,6 @@ static struct platform_driver imx_ssi_driver = {
.driver = {
.name = "imx-ssi",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 3a3d17ce6ba4..4caacb05a623 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -281,10 +281,8 @@ static int imx_wm8962_probe(struct platform_device *pdev)
clk_fail:
clk_disable_unprepare(data->codec_clk);
fail:
- if (ssi_np)
- of_node_put(ssi_np);
- if (codec_np)
- of_node_put(codec_np);
+ of_node_put(ssi_np);
+ of_node_put(codec_np);
return ret;
}
@@ -309,7 +307,6 @@ MODULE_DEVICE_TABLE(of, imx_wm8962_dt_ids);
static struct platform_driver imx_wm8962_driver = {
.driver = {
.name = "imx-wm8962",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = imx_wm8962_dt_ids,
},
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index f2b5d756b1f3..0b82e209b6e3 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -327,9 +327,6 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
goto capture_alloc_err;
}
- if (rtd->codec->ac97)
- rtd->codec->ac97->private_data = psc_dma;
-
return 0;
capture_alloc_err:
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
index 24eafa2cfbf4..08d2a8069b0a 100644
--- a/sound/soc/fsl/mpc5200_psc_ac97.c
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -237,7 +237,7 @@ static const struct snd_soc_dai_ops psc_ac97_digital_ops = {
static struct snd_soc_dai_driver psc_ac97_dai[] = {
{
.name = "mpc5200-psc-ac97.0",
- .ac97_control = 1,
+ .bus_control = true,
.probe = psc_ac97_probe,
.playback = {
.stream_name = "AC97 Playback",
@@ -257,7 +257,7 @@ static struct snd_soc_dai_driver psc_ac97_dai[] = {
},
{
.name = "mpc5200-psc-ac97.1",
- .ac97_control = 1,
+ .bus_control = true,
.playback = {
.stream_name = "AC97 SPDIF",
.channels_min = 1,
@@ -282,7 +282,6 @@ static const struct snd_soc_component_driver psc_ac97_component = {
static int psc_ac97_of_probe(struct platform_device *op)
{
int rc;
- struct snd_ac97 ac97;
struct mpc52xx_psc __iomem *regs;
rc = mpc5200_audio_dma_create(op);
@@ -304,7 +303,6 @@ static int psc_ac97_of_probe(struct platform_device *op)
psc_dma = dev_get_drvdata(&op->dev);
regs = psc_dma->psc_regs;
- ac97.private_data = psc_dma;
psc_dma->imr = 0;
out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
@@ -340,7 +338,6 @@ static struct platform_driver psc_ac97_driver = {
.remove = psc_ac97_of_remove,
.driver = {
.name = "mpc5200-psc-ac97",
- .owner = THIS_MODULE,
.of_match_table = psc_ac97_match,
},
};
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c
index 5d07e8a74a21..51fb0c00fe73 100644
--- a/sound/soc/fsl/mpc5200_psc_i2s.c
+++ b/sound/soc/fsl/mpc5200_psc_i2s.c
@@ -229,7 +229,6 @@ static struct platform_driver psc_i2s_driver = {
.remove = psc_i2s_of_remove,
.driver = {
.name = "mpc5200-psc-i2s",
- .owner = THIS_MODULE,
.of_match_table = psc_i2s_match,
},
};
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index fa756d05b2f7..9621b9140df6 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -389,7 +389,6 @@ static struct platform_driver mpc8610_hpcd_driver = {
* in lowercase letters.
*/
.name = "snd-soc-mpc8610hpcd",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/fsl/mx27vis-aic32x4.c b/sound/soc/fsl/mx27vis-aic32x4.c
index f4c3bda5e69e..b1ced7b8d80c 100644
--- a/sound/soc/fsl/mx27vis-aic32x4.c
+++ b/sound/soc/fsl/mx27vis-aic32x4.c
@@ -229,7 +229,6 @@ static int mx27vis_aic32x4_remove(struct platform_device *pdev)
static struct platform_driver mx27vis_aic32x4_audio_driver = {
.driver = {
.name = "mx27vis",
- .owner = THIS_MODULE,
},
.probe = mx27vis_aic32x4_probe,
.remove = mx27vis_aic32x4_remove,
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index f75c3cf0e6de..71c1a7dc3aeb 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -398,7 +398,6 @@ static struct platform_driver p1022_ds_driver = {
* in lowercase letters.
*/
.name = "snd-soc-p1022ds",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c
index 9d89bb028621..ee29048424be 100644
--- a/sound/soc/fsl/p1022_rdk.c
+++ b/sound/soc/fsl/p1022_rdk.c
@@ -348,7 +348,6 @@ static struct platform_driver p1022_rdk_driver = {
* in lowercase letters.
*/
.name = "snd-soc-p1022rdk",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index 3665f612819d..c44459d24c50 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -124,7 +124,6 @@ static struct platform_driver pcm030_fabric_driver = {
.remove = pcm030_fabric_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = pcm030_audio_match,
},
};
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index d1b7293c133e..fb9240fdc9b7 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -29,7 +29,9 @@ struct simple_card_data {
} *dai_props;
unsigned int mclk_fs;
int gpio_hp_det;
+ int gpio_hp_det_invert;
int gpio_mic_det;
+ int gpio_mic_det_invert;
struct snd_soc_dai_link dai_link[]; /* dynamically allocated */
};
@@ -148,6 +150,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
simple_card_hp_jack_pins);
simple_card_hp_jack_gpio.gpio = priv->gpio_hp_det;
+ simple_card_hp_jack_gpio.invert = priv->gpio_hp_det_invert;
snd_soc_jack_add_gpios(&simple_card_hp_jack, 1,
&simple_card_hp_jack_gpio);
}
@@ -159,6 +162,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
ARRAY_SIZE(simple_card_mic_jack_pins),
simple_card_mic_jack_pins);
simple_card_mic_jack_gpio.gpio = priv->gpio_mic_det;
+ simple_card_mic_jack_gpio.invert = priv->gpio_mic_det_invert;
snd_soc_jack_add_gpios(&simple_card_mic_jack, 1,
&simple_card_mic_jack_gpio);
}
@@ -226,6 +230,52 @@ asoc_simple_card_sub_parse_of(struct device_node *np,
return 0;
}
+static int asoc_simple_card_parse_daifmt(struct device_node *node,
+ struct simple_card_data *priv,
+ struct device_node *codec,
+ char *prefix, int idx)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct device_node *bitclkmaster = NULL;
+ struct device_node *framemaster = NULL;
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
+ struct asoc_simple_dai *cpu_dai = &dai_props->cpu_dai;
+ struct asoc_simple_dai *codec_dai = &dai_props->codec_dai;
+ unsigned int daifmt;
+
+ daifmt = snd_soc_of_parse_daifmt(node, prefix,
+ &bitclkmaster, &framemaster);
+ daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+
+ if (strlen(prefix) && !bitclkmaster && !framemaster) {
+ /*
+ * No dai-link level and master setting was not found from
+ * sound node level, revert back to legacy DT parsing and
+ * take the settings from codec node.
+ */
+ dev_dbg(dev, "Revert to legacy daifmt parsing\n");
+
+ cpu_dai->fmt = codec_dai->fmt =
+ snd_soc_of_parse_daifmt(codec, NULL, NULL, NULL) |
+ (daifmt & ~SND_SOC_DAIFMT_CLOCK_MASK);
+ } else {
+ if (codec == bitclkmaster)
+ daifmt |= (codec == framemaster) ?
+ SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
+ else
+ daifmt |= (codec == framemaster) ?
+ SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
+
+ cpu_dai->fmt = daifmt;
+ codec_dai->fmt = daifmt;
+ }
+
+ of_node_put(bitclkmaster);
+ of_node_put(framemaster);
+
+ return 0;
+}
+
static int asoc_simple_card_dai_link_of(struct device_node *node,
struct simple_card_data *priv,
int idx,
@@ -234,10 +284,8 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
- struct device_node *np = NULL;
- struct device_node *bitclkmaster = NULL;
- struct device_node *framemaster = NULL;
- unsigned int daifmt;
+ struct device_node *cpu = NULL;
+ struct device_node *codec = NULL;
char *name;
char prop[128];
char *prefix = "";
@@ -247,85 +295,36 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
if (is_top_level_node)
prefix = "simple-audio-card,";
- daifmt = snd_soc_of_parse_daifmt(node, prefix,
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
-
snprintf(prop, sizeof(prop), "%scpu", prefix);
- np = of_get_child_by_name(node, prop);
- if (!np) {
+ cpu = of_get_child_by_name(node, prop);
+
+ snprintf(prop, sizeof(prop), "%scodec", prefix);
+ codec = of_get_child_by_name(node, prop);
+
+ if (!cpu || !codec) {
ret = -EINVAL;
dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
goto dai_link_of_err;
}
- ret = asoc_simple_card_sub_parse_of(np, &dai_props->cpu_dai,
+ ret = asoc_simple_card_parse_daifmt(node, priv,
+ codec, prefix, idx);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_sub_parse_of(cpu, &dai_props->cpu_dai,
&dai_link->cpu_of_node,
&dai_link->cpu_dai_name,
&cpu_args);
if (ret < 0)
goto dai_link_of_err;
- dai_props->cpu_dai.fmt = daifmt;
- switch (((np == bitclkmaster) << 4) | (np == framemaster)) {
- case 0x11:
- dai_props->cpu_dai.fmt |= SND_SOC_DAIFMT_CBS_CFS;
- break;
- case 0x10:
- dai_props->cpu_dai.fmt |= SND_SOC_DAIFMT_CBS_CFM;
- break;
- case 0x01:
- dai_props->cpu_dai.fmt |= SND_SOC_DAIFMT_CBM_CFS;
- break;
- default:
- dai_props->cpu_dai.fmt |= SND_SOC_DAIFMT_CBM_CFM;
- break;
- }
-
- of_node_put(np);
- snprintf(prop, sizeof(prop), "%scodec", prefix);
- np = of_get_child_by_name(node, prop);
- if (!np) {
- ret = -EINVAL;
- dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
- goto dai_link_of_err;
- }
-
- ret = asoc_simple_card_sub_parse_of(np, &dai_props->codec_dai,
+ ret = asoc_simple_card_sub_parse_of(codec, &dai_props->codec_dai,
&dai_link->codec_of_node,
&dai_link->codec_dai_name, NULL);
if (ret < 0)
goto dai_link_of_err;
- if (strlen(prefix) && !bitclkmaster && !framemaster) {
- /*
- * No DAI link level and master setting was found
- * from sound node level, revert back to legacy DT
- * parsing and take the settings from codec node.
- */
- dev_dbg(dev, "%s: Revert to legacy daifmt parsing\n",
- __func__);
- dai_props->cpu_dai.fmt = dai_props->codec_dai.fmt =
- snd_soc_of_parse_daifmt(np, NULL, NULL, NULL) |
- (daifmt & ~SND_SOC_DAIFMT_CLOCK_MASK);
- } else {
- dai_props->codec_dai.fmt = daifmt;
- switch (((np == bitclkmaster) << 4) | (np == framemaster)) {
- case 0x11:
- dai_props->codec_dai.fmt |= SND_SOC_DAIFMT_CBM_CFM;
- break;
- case 0x10:
- dai_props->codec_dai.fmt |= SND_SOC_DAIFMT_CBM_CFS;
- break;
- case 0x01:
- dai_props->codec_dai.fmt |= SND_SOC_DAIFMT_CBS_CFM;
- break;
- default:
- dai_props->codec_dai.fmt |= SND_SOC_DAIFMT_CBS_CFS;
- break;
- }
- }
-
if (!dai_link->cpu_dai_name || !dai_link->codec_dai_name) {
ret = -EINVAL;
goto dai_link_of_err;
@@ -368,12 +367,9 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
dai_link->cpu_dai_name = NULL;
dai_link_of_err:
- if (np)
- of_node_put(np);
- if (bitclkmaster)
- of_node_put(bitclkmaster);
- if (framemaster)
- of_node_put(framemaster);
+ of_node_put(cpu);
+ of_node_put(codec);
+
return ret;
}
@@ -381,6 +377,7 @@ static int asoc_simple_card_parse_of(struct device_node *node,
struct simple_card_data *priv)
{
struct device *dev = simple_priv_to_dev(priv);
+ enum of_gpio_flags flags;
u32 val;
int ret;
@@ -436,13 +433,15 @@ static int asoc_simple_card_parse_of(struct device_node *node,
return ret;
}
- priv->gpio_hp_det = of_get_named_gpio(node,
- "simple-audio-card,hp-det-gpio", 0);
+ priv->gpio_hp_det = of_get_named_gpio_flags(node,
+ "simple-audio-card,hp-det-gpio", 0, &flags);
+ priv->gpio_hp_det_invert = !!(flags & OF_GPIO_ACTIVE_LOW);
if (priv->gpio_hp_det == -EPROBE_DEFER)
return -EPROBE_DEFER;
- priv->gpio_mic_det = of_get_named_gpio(node,
- "simple-audio-card,mic-det-gpio", 0);
+ priv->gpio_mic_det = of_get_named_gpio_flags(node,
+ "simple-audio-card,mic-det-gpio", 0, &flags);
+ priv->gpio_mic_det_invert = !!(flags & OF_GPIO_ACTIVE_LOW);
if (priv->gpio_mic_det == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -457,18 +456,13 @@ static int asoc_simple_card_unref(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct snd_soc_dai_link *dai_link;
- struct device_node *np;
int num_links;
for (num_links = 0, dai_link = card->dai_link;
num_links < card->num_links;
num_links++, dai_link++) {
- np = (struct device_node *) dai_link->cpu_of_node;
- if (np)
- of_node_put(np);
- np = (struct device_node *) dai_link->codec_of_node;
- if (np)
- of_node_put(np);
+ of_node_put(dai_link->cpu_of_node);
+ of_node_put(dai_link->codec_of_node);
}
return 0;
}
@@ -590,7 +584,6 @@ MODULE_DEVICE_TABLE(of, asoc_simple_of_match);
static struct platform_driver asoc_simple_card = {
.driver = {
.name = "asoc-simple-card",
- .owner = THIS_MODULE,
.of_match_table = asoc_simple_of_match,
},
.probe = asoc_simple_card_probe,
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index f5b4a9c79cdf..e989ecf046c9 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -3,6 +3,7 @@ config SND_MFLD_MACHINE
depends on INTEL_SCU_IPC
select SND_SOC_SN95031
select SND_SST_MFLD_PLATFORM
+ select SND_SST_IPC_PCI
help
This adds support for ASoC machine driver for Intel(R) MID Medfield platform
used as alsa device in audio substem in Intel(R) MID devices
@@ -12,10 +13,23 @@ config SND_MFLD_MACHINE
config SND_SST_MFLD_PLATFORM
tristate
+config SND_SST_IPC
+ tristate
+
+config SND_SST_IPC_PCI
+ tristate
+ select SND_SST_IPC
+
+config SND_SST_IPC_ACPI
+ tristate
+ select SND_SST_IPC
+ depends on ACPI
+
config SND_SOC_INTEL_SST
tristate "ASoC support for Intel(R) Smart Sound Technology"
select SND_SOC_INTEL_SST_ACPI if ACPI
depends on (X86 || COMPILE_TEST)
+ depends on DW_DMAC_CORE
help
This adds support for Intel(R) Smart Sound Technology (SST).
Say Y if you have such a device
@@ -32,7 +46,8 @@ config SND_SOC_INTEL_BAYTRAIL
config SND_SOC_INTEL_HASWELL_MACH
tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
- depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
+ depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C && \\
+ I2C_DESIGNWARE_PLATFORM
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
@@ -61,7 +76,8 @@ config SND_SOC_INTEL_BYT_MAX98090_MACH
config SND_SOC_INTEL_BROADWELL_MACH
tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
- depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC
+ depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC && \\
+ I2C_DESIGNWARE_PLATFORM
select SND_SOC_INTEL_HASWELL
select SND_COMPRESS_OFFLOAD
select SND_SOC_RT286
@@ -70,3 +86,27 @@ config SND_SOC_INTEL_BROADWELL_MACH
Ultrabook platforms.
Say Y if you have such a device
If unsure select "N".
+
+config SND_SOC_INTEL_BYTCR_RT5640_MACH
+ tristate "ASoC Audio DSP Support for MID BYT Platform"
+ depends on X86
+ select SND_SOC_RT5640
+ select SND_SST_MFLD_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) MID Baytrail platform
+ used as alsa device in audio substem in Intel(R) MID devices
+ Say Y if you have such a device
+ If unsure select "N".
+
+config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
+ tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
+ depends on X86_INTEL_LPSS
+ select SND_SOC_RT5670
+ select SND_SST_MFLD_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with RT5672 audio codec.
+ Say Y if you have such a device
+ If unsure select "N".
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index f841786dad15..e928ec385300 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -26,8 +26,15 @@ snd-soc-sst-haswell-objs := haswell.o
snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
snd-soc-sst-broadwell-objs := broadwell.o
+snd-soc-sst-bytcr-dpcm-rt5640-objs := bytcr_dpcm_rt5640.o
+snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
+obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-dpcm-rt5640.o
+obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o
+
+# DSP driver
+obj-$(CONFIG_SND_SST_IPC) += sst/
diff --git a/sound/soc/intel/broadwell.c b/sound/soc/intel/broadwell.c
index 0e550f14028f..7cf95d5d5d80 100644
--- a/sound/soc/intel/broadwell.c
+++ b/sound/soc/intel/broadwell.c
@@ -19,6 +19,7 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
+#include <sound/jack.h>
#include <sound/pcm_params.h>
#include "sst-dsp.h"
@@ -26,8 +27,26 @@
#include "../codecs/rt286.h"
+static struct snd_soc_jack broadwell_headset;
+/* Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin broadwell_headset_pins[] = {
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+
+static const struct snd_kcontrol_new broadwell_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+};
+
static const struct snd_soc_dapm_widget broadwell_widgets[] = {
- SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_SPK("Speaker", NULL),
SND_SOC_DAPM_MIC("Mic Jack", NULL),
SND_SOC_DAPM_MIC("DMIC1", NULL),
@@ -42,7 +61,7 @@ static const struct snd_soc_dapm_route broadwell_rt286_map[] = {
{"Speaker", NULL, "SPOL"},
/* HP jack connectors - unknown if we have jack deteck */
- {"Headphones", NULL, "HPO Pin"},
+ {"Headphone Jack", NULL, "HPO Pin"},
/* other jacks */
{"MIC1", NULL, "Mic Jack"},
@@ -57,6 +76,27 @@ static const struct snd_soc_dapm_route broadwell_rt286_map[] = {
{"AIF1 Playback", NULL, "SSP0 CODEC OUT"},
};
+static int broadwell_rt286_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ int ret = 0;
+ ret = snd_soc_jack_new(codec, "Headset",
+ SND_JACK_HEADSET | SND_JACK_BTN_0, &broadwell_headset);
+
+ if (ret)
+ return ret;
+
+ ret = snd_soc_jack_add_pins(&broadwell_headset,
+ ARRAY_SIZE(broadwell_headset_pins),
+ broadwell_headset_pins);
+ if (ret)
+ return ret;
+
+ rt286_mic_detect(codec, &broadwell_headset);
+ return 0;
+}
+
+
static int broadwell_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
@@ -116,7 +156,7 @@ static int broadwell_rtd_init(struct snd_soc_pcm_runtime *rtd)
}
/* always connected - check HP for jack detect */
- snd_soc_dapm_enable_pin(dapm, "Headphones");
+ snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
snd_soc_dapm_enable_pin(dapm, "Speaker");
snd_soc_dapm_enable_pin(dapm, "Mic Jack");
snd_soc_dapm_enable_pin(dapm, "Line Jack");
@@ -131,7 +171,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
/* Front End DAI links */
{
.name = "System PCM",
- .stream_name = "System Playback",
+ .stream_name = "System Playback/Capture",
.cpu_dai_name = "System Pin",
.platform_name = "haswell-pcm-audio",
.dynamic = 1,
@@ -140,6 +180,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.init = broadwell_rtd_init,
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_playback = 1,
+ .dpcm_capture = 1,
},
{
.name = "Offload0",
@@ -174,18 +215,6 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_capture = 1,
},
- {
- .name = "Capture PCM",
- .stream_name = "Capture",
- .cpu_dai_name = "Capture Pin",
- .platform_name = "haswell-pcm-audio",
- .dynamic = 1,
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_capture = 1,
- },
-
/* Back End DAI links */
{
/* SSP0 - Codec */
@@ -196,6 +225,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.no_pcm = 1,
.codec_name = "i2c-INT343A:00",
.codec_dai_name = "rt286-aif1",
+ .init = broadwell_rt286_codec_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ignore_suspend = 1,
@@ -213,6 +243,8 @@ static struct snd_soc_card broadwell_rt286 = {
.owner = THIS_MODULE,
.dai_link = broadwell_rt286_dais,
.num_links = ARRAY_SIZE(broadwell_rt286_dais),
+ .controls = broadwell_controls,
+ .num_controls = ARRAY_SIZE(broadwell_controls),
.dapm_widgets = broadwell_widgets,
.num_dapm_widgets = ARRAY_SIZE(broadwell_widgets),
.dapm_routes = broadwell_rt286_map,
@@ -238,7 +270,6 @@ static struct platform_driver broadwell_audio = {
.remove = broadwell_audio_remove,
.driver = {
.name = "broadwell-audio",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/intel/byt-max98090.c b/sound/soc/intel/byt-max98090.c
index d52681e7225e..9832afe7d22c 100644
--- a/sound/soc/intel/byt-max98090.c
+++ b/sound/soc/intel/byt-max98090.c
@@ -181,7 +181,6 @@ static struct platform_driver byt_max98090_driver = {
.remove = byt_max98090_remove,
.driver = {
.name = "byt-max98090",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
};
diff --git a/sound/soc/intel/byt-rt5640.c b/sound/soc/intel/byt-rt5640.c
index e03abdf21c1b..0cba7830c5e9 100644
--- a/sound/soc/intel/byt-rt5640.c
+++ b/sound/soc/intel/byt-rt5640.c
@@ -224,7 +224,6 @@ static struct platform_driver byt_rt5640_audio = {
.probe = byt_rt5640_probe,
.driver = {
.name = "byt-rt5640",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
};
diff --git a/sound/soc/intel/bytcr_dpcm_rt5640.c b/sound/soc/intel/bytcr_dpcm_rt5640.c
new file mode 100644
index 000000000000..f5d0fc1ab10c
--- /dev/null
+++ b/sound/soc/intel/bytcr_dpcm_rt5640.c
@@ -0,0 +1,230 @@
+/*
+ * byt_cr_dpcm_rt5640.c - ASoc Machine driver for Intel Byt CR platform
+ *
+ * Copyright (C) 2014 Intel Corp
+ * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../codecs/rt5640.h"
+#include "sst-atom-controls.h"
+
+static const struct snd_soc_dapm_widget byt_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+};
+
+static const struct snd_soc_dapm_route byt_audio_map[] = {
+ {"IN2P", NULL, "Headset Mic"},
+ {"IN2N", NULL, "Headset Mic"},
+ {"Headset Mic", NULL, "MICBIAS1"},
+ {"IN1P", NULL, "MICBIAS1"},
+ {"LDO2", NULL, "Int Mic"},
+ {"Headphone", NULL, "HPOL"},
+ {"Headphone", NULL, "HPOR"},
+ {"Ext Spk", NULL, "SPOLP"},
+ {"Ext Spk", NULL, "SPOLN"},
+ {"Ext Spk", NULL, "SPORP"},
+ {"Ext Spk", NULL, "SPORN"},
+
+ {"AIF1 Playback", NULL, "ssp2 Tx"},
+ {"ssp2 Tx", NULL, "codec_out0"},
+ {"ssp2 Tx", NULL, "codec_out1"},
+ {"codec_in0", NULL, "ssp2 Rx"},
+ {"codec_in1", NULL, "ssp2 Rx"},
+ {"ssp2 Rx", NULL, "AIF1 Capture"},
+};
+
+static const struct snd_kcontrol_new byt_mc_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Ext Spk"),
+};
+
+static int byt_aif1_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ snd_soc_dai_set_bclk_ratio(codec_dai, 50);
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_PLL1,
+ params_rate(params) * 512,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec clock %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5640_PLL1_S_BCLK1,
+ params_rate(params) * 50,
+ params_rate(params) * 512);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_pcm_stream byt_dai_params = {
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+};
+
+static int byt_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /* The DSP will covert the FE rate to 48k, stereo, 24bits */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP2 to 24-bit */
+ snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK],
+ SNDRV_PCM_FORMAT_S24_LE);
+ return 0;
+}
+
+static unsigned int rates_48000[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+ .count = ARRAY_SIZE(rates_48000),
+ .list = rates_48000,
+};
+
+static int byt_aif1_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_48000);
+}
+
+static struct snd_soc_ops byt_aif1_ops = {
+ .startup = byt_aif1_startup,
+};
+
+static struct snd_soc_ops byt_be_ssp2_ops = {
+ .hw_params = byt_aif1_hw_params,
+};
+
+static struct snd_soc_dai_link byt_dailink[] = {
+ [MERR_DPCM_AUDIO] = {
+ .name = "Baytrail Audio Port",
+ .stream_name = "Baytrail Audio",
+ .cpu_dai_name = "media-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ .ignore_suspend = 1,
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &byt_aif1_ops,
+ },
+ [MERR_DPCM_COMPR] = {
+ .name = "Baytrail Compressed Port",
+ .stream_name = "Baytrail Compress",
+ .cpu_dai_name = "compress-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ },
+ /* back ends */
+ {
+ .name = "SSP2-Codec",
+ .be_id = 1,
+ .cpu_dai_name = "ssp2-port",
+ .platform_name = "sst-mfld-platform",
+ .no_pcm = 1,
+ .codec_dai_name = "rt5640-aif1",
+ .codec_name = "i2c-10EC5640:00",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .be_hw_params_fixup = byt_codec_fixup,
+ .ignore_suspend = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &byt_be_ssp2_ops,
+ },
+};
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_byt = {
+ .name = "baytrailcraudio",
+ .dai_link = byt_dailink,
+ .num_links = ARRAY_SIZE(byt_dailink),
+ .dapm_widgets = byt_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(byt_dapm_widgets),
+ .dapm_routes = byt_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(byt_audio_map),
+ .controls = byt_mc_controls,
+ .num_controls = ARRAY_SIZE(byt_mc_controls),
+};
+
+static int snd_byt_mc_probe(struct platform_device *pdev)
+{
+ int ret_val = 0;
+
+ /* register the soc card */
+ snd_soc_card_byt.dev = &pdev->dev;
+
+ ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_byt);
+ if (ret_val) {
+ dev_err(&pdev->dev, "devm_snd_soc_register_card failed %d\n", ret_val);
+ return ret_val;
+ }
+ platform_set_drvdata(pdev, &snd_soc_card_byt);
+ return ret_val;
+}
+
+static struct platform_driver snd_byt_mc_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bytt100_rt5640",
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = snd_byt_mc_probe,
+};
+
+module_platform_driver(snd_byt_mc_driver);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver");
+MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bytrt5640-audio");
diff --git a/sound/soc/intel/cht_bsw_rt5672.c b/sound/soc/intel/cht_bsw_rt5672.c
new file mode 100644
index 000000000000..9b8b561171b7
--- /dev/null
+++ b/sound/soc/intel/cht_bsw_rt5672.c
@@ -0,0 +1,285 @@
+/*
+ * cht_bsw_rt5672.c - ASoc Machine driver for Intel Cherryview-based platforms
+ * Cherrytrail and Braswell, with RT5672 codec.
+ *
+ * Copyright (C) 2014 Intel Corp
+ * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * Mengdong Lin <mengdong.lin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../codecs/rt5670.h"
+#include "sst-atom-controls.h"
+
+/* The platform clock #3 outputs 19.2Mhz clock to codec as I2S MCLK */
+#define CHT_PLAT_CLK_3_HZ 19200000
+#define CHT_CODEC_DAI "rt5670-aif1"
+
+static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
+{
+ int i;
+
+ for (i = 0; i < card->num_rtd; i++) {
+ struct snd_soc_pcm_runtime *rtd;
+
+ rtd = card->rtd + i;
+ if (!strncmp(rtd->codec_dai->name, CHT_CODEC_DAI,
+ strlen(CHT_CODEC_DAI)))
+ return rtd->codec_dai;
+ }
+ return NULL;
+}
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+
+ codec_dai = cht_get_codec_dai(card);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
+ return -EIO;
+ }
+
+ if (!SND_SOC_DAPM_EVENT_OFF(event))
+ return 0;
+
+ /* Set codec sysclk source to its internal clock because codec PLL will
+ * be off when idle and MCLK will also be off by ACPI when codec is
+ * runtime suspended. Codec needs clock for jack detection and button
+ * press.
+ */
+ snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_RCCLK,
+ 0, SND_SOC_CLOCK_IN);
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route cht_audio_map[] = {
+ {"IN1P", NULL, "Headset Mic"},
+ {"IN1N", NULL, "Headset Mic"},
+ {"DMIC L1", NULL, "Int Mic"},
+ {"DMIC R1", NULL, "Int Mic"},
+ {"Headphone", NULL, "HPOL"},
+ {"Headphone", NULL, "HPOR"},
+ {"Ext Spk", NULL, "SPOLP"},
+ {"Ext Spk", NULL, "SPOLN"},
+ {"Ext Spk", NULL, "SPORP"},
+ {"Ext Spk", NULL, "SPORN"},
+ {"AIF1 Playback", NULL, "ssp2 Tx"},
+ {"ssp2 Tx", NULL, "codec_out0"},
+ {"ssp2 Tx", NULL, "codec_out1"},
+ {"codec_in0", NULL, "ssp2 Rx"},
+ {"codec_in1", NULL, "ssp2 Rx"},
+ {"ssp2 Rx", NULL, "AIF1 Capture"},
+ {"Headphone", NULL, "Platform Clock"},
+ {"Headset Mic", NULL, "Platform Clock"},
+ {"Int Mic", NULL, "Platform Clock"},
+ {"Ext Spk", NULL, "Platform Clock"},
+};
+
+static const struct snd_kcontrol_new cht_mc_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Ext Spk"),
+};
+
+static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ /* set codec PLL source to the 19.2MHz platform clock (MCLK) */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5670_PLL1_S_MCLK,
+ CHT_PLAT_CLK_3_HZ, params_rate(params) * 512);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
+ return ret;
+ }
+
+ /* set codec sysclk source to PLL */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_PLL1,
+ params_rate(params) * 512,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ int ret;
+ struct snd_soc_dai *codec_dai = runtime->codec_dai;
+
+ /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24);
+ if (ret < 0) {
+ dev_err(runtime->dev, "can't set codec TDM slot %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /* The DSP will covert the FE rate to 48k, stereo, 24bits */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP2 to 24-bit */
+ snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK],
+ SNDRV_PCM_FORMAT_S24_LE);
+ return 0;
+}
+
+static unsigned int rates_48000[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+ .count = ARRAY_SIZE(rates_48000),
+ .list = rates_48000,
+};
+
+static int cht_aif1_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_48000);
+}
+
+static struct snd_soc_ops cht_aif1_ops = {
+ .startup = cht_aif1_startup,
+};
+
+static struct snd_soc_ops cht_be_ssp2_ops = {
+ .hw_params = cht_aif1_hw_params,
+};
+
+static struct snd_soc_dai_link cht_dailink[] = {
+ /* Front End DAI links */
+ [MERR_DPCM_AUDIO] = {
+ .name = "Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "media-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ .ignore_suspend = 1,
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &cht_aif1_ops,
+ },
+ [MERR_DPCM_COMPR] = {
+ .name = "Compressed Port",
+ .stream_name = "Compress",
+ .cpu_dai_name = "compress-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP2 - Codec */
+ .name = "SSP2-Codec",
+ .be_id = 1,
+ .cpu_dai_name = "ssp2-port",
+ .platform_name = "sst-mfld-platform",
+ .no_pcm = 1,
+ .codec_dai_name = "rt5670-aif1",
+ .codec_name = "i2c-10EC5670:00",
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .init = cht_codec_init,
+ .be_hw_params_fixup = cht_codec_fixup,
+ .ignore_suspend = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &cht_be_ssp2_ops,
+ },
+};
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_cht = {
+ .name = "cherrytrailcraudio",
+ .dai_link = cht_dailink,
+ .num_links = ARRAY_SIZE(cht_dailink),
+ .dapm_widgets = cht_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cht_dapm_widgets),
+ .dapm_routes = cht_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cht_audio_map),
+ .controls = cht_mc_controls,
+ .num_controls = ARRAY_SIZE(cht_mc_controls),
+};
+
+static int snd_cht_mc_probe(struct platform_device *pdev)
+{
+ int ret_val = 0;
+
+ /* register the soc card */
+ snd_soc_card_cht.dev = &pdev->dev;
+ ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
+ if (ret_val) {
+ dev_err(&pdev->dev,
+ "snd_soc_register_card failed %d\n", ret_val);
+ return ret_val;
+ }
+ platform_set_drvdata(pdev, &snd_soc_card_cht);
+ return ret_val;
+}
+
+static struct platform_driver snd_cht_mc_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cht-bsw-rt5672",
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = snd_cht_mc_probe,
+};
+
+module_platform_driver(snd_cht_mc_driver);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver");
+MODULE_AUTHOR("Subhransu S. Prusty, Mengdong Lin");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cht-bsw-rt5672");
diff --git a/sound/soc/intel/haswell.c b/sound/soc/intel/haswell.c
index 3981982674ac..35edf51a52aa 100644
--- a/sound/soc/intel/haswell.c
+++ b/sound/soc/intel/haswell.c
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
/* Front End DAI links */
{
.name = "System",
- .stream_name = "System Playback",
+ .stream_name = "System Playback/Capture",
.cpu_dai_name = "System Pin",
.platform_name = "haswell-pcm-audio",
.dynamic = 1,
@@ -118,6 +118,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
.init = haswell_rtd_init,
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_playback = 1,
+ .dpcm_capture = 1,
},
{
.name = "Offload0",
@@ -152,17 +153,6 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_capture = 1,
},
- {
- .name = "Capture",
- .stream_name = "Capture",
- .cpu_dai_name = "Capture Pin",
- .platform_name = "haswell-pcm-audio",
- .dynamic = 1,
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_capture = 1,
- },
/* Back End DAI links */
{
@@ -209,7 +199,6 @@ static struct platform_driver haswell_audio = {
.probe = haswell_audio_probe,
.driver = {
.name = "haswell-audio",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/intel/mfld_machine.c b/sound/soc/intel/mfld_machine.c
index 031d78783fc8..90b7a57713a0 100644
--- a/sound/soc/intel/mfld_machine.c
+++ b/sound/soc/intel/mfld_machine.c
@@ -420,7 +420,6 @@ static int snd_mfld_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_mfld_mc_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "msic_audio",
},
.probe = snd_mfld_mc_probe,
diff --git a/sound/soc/intel/sst-acpi.c b/sound/soc/intel/sst-acpi.c
index 03d0a166b635..b3d84560fbb5 100644
--- a/sound/soc/intel/sst-acpi.c
+++ b/sound/soc/intel/sst-acpi.c
@@ -275,7 +275,6 @@ static struct platform_driver sst_acpi_driver = {
.remove = sst_acpi_remove,
.driver = {
.name = "sst-acpi",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(sst_acpi_match),
},
};
diff --git a/sound/soc/intel/sst-atom-controls.c b/sound/soc/intel/sst-atom-controls.c
index 7104a34181a9..90aa5c0476f3 100644
--- a/sound/soc/intel/sst-atom-controls.c
+++ b/sound/soc/intel/sst-atom-controls.c
@@ -15,6 +15,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
+ * In the dpcm driver modelling when a particular FE/BE/Mixer/Pipe is active
+ * we forward the settings and parameters, rest we keep the values in
+ * driver and forward when DAPM enables them
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -81,6 +84,183 @@ static int sst_fill_and_send_cmd(struct sst_data *drv,
return ret;
}
+/**
+ * tx map value is a bitfield where each bit represents a FW channel
+ *
+ * 3 2 1 0 # 0 = codec0, 1 = codec1
+ * RLRLRLRL # 3, 4 = reserved
+ *
+ * e.g. slot 0 rx map = 00001100b -> data from slot 0 goes into codec_in1 L,R
+ */
+static u8 sst_ssp_tx_map[SST_MAX_TDM_SLOTS] = {
+ 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default rx map */
+};
+
+/**
+ * rx map value is a bitfield where each bit represents a slot
+ *
+ * 76543210 # 0 = slot 0, 1 = slot 1
+ *
+ * e.g. codec1_0 tx map = 00000101b -> data from codec_out1_0 goes into slot 0, 2
+ */
+static u8 sst_ssp_rx_map[SST_MAX_TDM_SLOTS] = {
+ 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default tx map */
+};
+
+/**
+ * NOTE: this is invoked with lock held
+ */
+static int sst_send_slot_map(struct sst_data *drv)
+{
+ struct sst_param_sba_ssp_slot_map cmd;
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ cmd.header.command_id = SBA_SET_SSP_SLOT_MAP;
+ cmd.header.length = sizeof(struct sst_param_sba_ssp_slot_map)
+ - sizeof(struct sst_dsp_header);
+
+ cmd.param_id = SBA_SET_SSP_SLOT_MAP;
+ cmd.param_len = sizeof(cmd.rx_slot_map) + sizeof(cmd.tx_slot_map)
+ + sizeof(cmd.ssp_index);
+ cmd.ssp_index = SSP_CODEC;
+
+ memcpy(cmd.rx_slot_map, &sst_ssp_tx_map[0], sizeof(cmd.rx_slot_map));
+ memcpy(cmd.tx_slot_map, &sst_ssp_rx_map[0], sizeof(cmd.tx_slot_map));
+
+ return sst_fill_and_send_cmd_unlocked(drv, SST_IPC_IA_SET_PARAMS,
+ SST_FLAG_BLOCKED, SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+}
+
+int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct sst_enum *e = (struct sst_enum *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = e->max;
+
+ if (uinfo->value.enumerated.item > e->max - 1)
+ uinfo->value.enumerated.item = e->max - 1;
+ strcpy(uinfo->value.enumerated.name,
+ e->texts[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+/**
+ * sst_slot_get - get the status of the interleaver/deinterleaver control
+ *
+ * Searches the map where the control status is stored, and gets the
+ * channel/slot which is currently set for this enumerated control. Since it is
+ * an enumerated control, there is only one possible value.
+ */
+static int sst_slot_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sst_enum *e = (void *)kcontrol->private_value;
+ struct snd_soc_component *c = snd_kcontrol_chip(kcontrol);
+ struct sst_data *drv = snd_soc_component_get_drvdata(c);
+ unsigned int ctl_no = e->reg;
+ unsigned int is_tx = e->tx;
+ unsigned int val, mux;
+ u8 *map = is_tx ? sst_ssp_rx_map : sst_ssp_tx_map;
+
+ mutex_lock(&drv->lock);
+ val = 1 << ctl_no;
+ /* search which slot/channel has this bit set - there should be only one */
+ for (mux = e->max; mux > 0; mux--)
+ if (map[mux - 1] & val)
+ break;
+
+ ucontrol->value.enumerated.item[0] = mux;
+ mutex_unlock(&drv->lock);
+
+ dev_dbg(c->dev, "%s - %s map = %#x\n",
+ is_tx ? "tx channel" : "rx slot",
+ e->texts[mux], mux ? map[mux - 1] : -1);
+ return 0;
+}
+
+/* sst_check_and_send_slot_map - helper for checking power state and sending
+ * slot map cmd
+ *
+ * called with lock held
+ */
+static int sst_check_and_send_slot_map(struct sst_data *drv, struct snd_kcontrol *kcontrol)
+{
+ struct sst_enum *e = (void *)kcontrol->private_value;
+ int ret = 0;
+
+ if (e->w && e->w->power)
+ ret = sst_send_slot_map(drv);
+ else
+ dev_err(&drv->pdev->dev, "Slot control: %s doesn't have DAPM widget!!!\n",
+ kcontrol->id.name);
+ return ret;
+}
+
+/**
+ * sst_slot_put - set the status of interleaver/deinterleaver control
+ *
+ * (de)interleaver controls are defined in opposite sense to be user-friendly
+ *
+ * Instead of the enum value being the value written to the register, it is the
+ * register address; and the kcontrol number (register num) is the value written
+ * to the register. This is so that there can be only one value for each
+ * slot/channel since there is only one control for each slot/channel.
+ *
+ * This means that whenever an enum is set, we need to clear the bit
+ * for that kcontrol_no for all the interleaver OR deinterleaver registers
+ */
+static int sst_slot_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct sst_data *drv = snd_soc_component_get_drvdata(c);
+ struct sst_enum *e = (void *)kcontrol->private_value;
+ int i, ret = 0;
+ unsigned int ctl_no = e->reg;
+ unsigned int is_tx = e->tx;
+ unsigned int slot_channel_no;
+ unsigned int val, mux;
+ u8 *map;
+
+ map = is_tx ? sst_ssp_rx_map : sst_ssp_tx_map;
+
+ val = 1 << ctl_no;
+ mux = ucontrol->value.enumerated.item[0];
+ if (mux > e->max - 1)
+ return -EINVAL;
+
+ mutex_lock(&drv->lock);
+ /* first clear all registers of this bit */
+ for (i = 0; i < e->max; i++)
+ map[i] &= ~val;
+
+ if (mux == 0) {
+ /* kctl set to 'none' and we reset the bits so send IPC */
+ ret = sst_check_and_send_slot_map(drv, kcontrol);
+
+ mutex_unlock(&drv->lock);
+ return ret;
+ }
+
+ /* offset by one to take "None" into account */
+ slot_channel_no = mux - 1;
+ map[slot_channel_no] |= val;
+
+ dev_dbg(c->dev, "%s %s map = %#x\n",
+ is_tx ? "tx channel" : "rx slot",
+ e->texts[mux], map[slot_channel_no]);
+
+ ret = sst_check_and_send_slot_map(drv, kcontrol);
+
+ mutex_unlock(&drv->lock);
+ return ret;
+}
+
static int sst_send_algo_cmd(struct sst_data *drv,
struct sst_algo_control *bc)
{
@@ -104,6 +284,34 @@ static int sst_send_algo_cmd(struct sst_data *drv,
return ret;
}
+/**
+ * sst_find_and_send_pipe_algo - send all the algo parameters for a pipe
+ *
+ * The algos which are in each pipeline are sent to the firmware one by one
+ *
+ * Called with lock held
+ */
+static int sst_find_and_send_pipe_algo(struct sst_data *drv,
+ const char *pipe, struct sst_ids *ids)
+{
+ int ret = 0;
+ struct sst_algo_control *bc;
+ struct sst_module *algo = NULL;
+
+ dev_dbg(&drv->pdev->dev, "Enter: widget=%s\n", pipe);
+
+ list_for_each_entry(algo, &ids->algo_list, node) {
+ bc = (void *)algo->kctl->private_value;
+
+ dev_dbg(&drv->pdev->dev, "Found algo control name=%s pipe=%s\n",
+ algo->kctl->id.name, pipe);
+ ret = sst_send_algo_cmd(drv, bc);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
static int sst_algo_bytes_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -162,6 +370,743 @@ static int sst_algo_control_set(struct snd_kcontrol *kcontrol,
return ret;
}
+static int sst_gain_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = mc->stereo ? 2 : 1;
+ uinfo->value.integer.min = mc->min;
+ uinfo->value.integer.max = mc->max;
+
+ return 0;
+}
+
+/**
+ * sst_send_gain_cmd - send the gain algorithm IPC to the FW
+ * @gv: the stored value of gain (also contains rampduration)
+ * @mute: flag that indicates whether this was called from the
+ * digital_mute callback or directly. If called from the
+ * digital_mute callback, module will be muted/unmuted based on this
+ * flag. The flag is always 0 if called directly.
+ *
+ * Called with sst_data.lock held
+ *
+ * The user-set gain value is sent only if the user-controllable 'mute' control
+ * is OFF (indicated by gv->mute). Otherwise, the mute value (MIN value) is
+ * sent.
+ */
+static int sst_send_gain_cmd(struct sst_data *drv, struct sst_gain_value *gv,
+ u16 task_id, u16 loc_id, u16 module_id, int mute)
+{
+ struct sst_cmd_set_gain_dual cmd;
+
+ dev_dbg(&drv->pdev->dev, "Enter\n");
+
+ cmd.header.command_id = MMX_SET_GAIN;
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ cmd.gain_cell_num = 1;
+
+ if (mute || gv->mute) {
+ cmd.cell_gains[0].cell_gain_left = SST_GAIN_MIN_VALUE;
+ cmd.cell_gains[0].cell_gain_right = SST_GAIN_MIN_VALUE;
+ } else {
+ cmd.cell_gains[0].cell_gain_left = gv->l_gain;
+ cmd.cell_gains[0].cell_gain_right = gv->r_gain;
+ }
+
+ SST_FILL_DESTINATION(2, cmd.cell_gains[0].dest,
+ loc_id, module_id);
+ cmd.cell_gains[0].gain_time_constant = gv->ramp_duration;
+
+ cmd.header.length = sizeof(struct sst_cmd_set_gain_dual)
+ - sizeof(struct sst_dsp_header);
+
+ /* we are with lock held, so call the unlocked api to send */
+ return sst_fill_and_send_cmd_unlocked(drv, SST_IPC_IA_SET_PARAMS,
+ SST_FLAG_BLOCKED, task_id, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+}
+
+static int sst_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+ struct sst_gain_value *gv = mc->gain_val;
+
+ switch (mc->type) {
+ case SST_GAIN_TLV:
+ ucontrol->value.integer.value[0] = gv->l_gain;
+ ucontrol->value.integer.value[1] = gv->r_gain;
+ break;
+
+ case SST_GAIN_MUTE:
+ ucontrol->value.integer.value[0] = gv->mute ? 1 : 0;
+ break;
+
+ case SST_GAIN_RAMP_DURATION:
+ ucontrol->value.integer.value[0] = gv->ramp_duration;
+ break;
+
+ default:
+ dev_err(component->dev, "Invalid Input- gain type:%d\n",
+ mc->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sst_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct sst_data *drv = snd_soc_component_get_drvdata(cmpnt);
+ struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+ struct sst_gain_value *gv = mc->gain_val;
+
+ mutex_lock(&drv->lock);
+
+ switch (mc->type) {
+ case SST_GAIN_TLV:
+ gv->l_gain = ucontrol->value.integer.value[0];
+ gv->r_gain = ucontrol->value.integer.value[1];
+ dev_dbg(cmpnt->dev, "%s: Volume %d, %d\n",
+ mc->pname, gv->l_gain, gv->r_gain);
+ break;
+
+ case SST_GAIN_MUTE:
+ gv->mute = !!ucontrol->value.integer.value[0];
+ dev_dbg(cmpnt->dev, "%s: Mute %d\n", mc->pname, gv->mute);
+ break;
+
+ case SST_GAIN_RAMP_DURATION:
+ gv->ramp_duration = ucontrol->value.integer.value[0];
+ dev_dbg(cmpnt->dev, "%s: Ramp Delay%d\n",
+ mc->pname, gv->ramp_duration);
+ break;
+
+ default:
+ mutex_unlock(&drv->lock);
+ dev_err(cmpnt->dev, "Invalid Input- gain type:%d\n",
+ mc->type);
+ return -EINVAL;
+ }
+
+ if (mc->w && mc->w->power)
+ ret = sst_send_gain_cmd(drv, gv, mc->task_id,
+ mc->pipe_id | mc->instance_id, mc->module_id, 0);
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int sst_set_pipe_gain(struct sst_ids *ids,
+ struct sst_data *drv, int mute);
+
+static int sst_send_pipe_module_params(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol)
+{
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct sst_data *drv = snd_soc_component_get_drvdata(c);
+ struct sst_ids *ids = w->priv;
+
+ mutex_lock(&drv->lock);
+ sst_find_and_send_pipe_algo(drv, w->name, ids);
+ sst_set_pipe_gain(ids, drv, 0);
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int sst_generic_modules_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ return sst_send_pipe_module_params(w, k);
+ return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(sst_gain_tlv_common, SST_GAIN_MIN_VALUE * 10, 10, 0);
+
+/* Look up table to convert MIXER SW bit regs to SWM inputs */
+static const uint swm_mixer_input_ids[SST_SWM_INPUT_COUNT] = {
+ [SST_IP_CODEC0] = SST_SWM_IN_CODEC0,
+ [SST_IP_CODEC1] = SST_SWM_IN_CODEC1,
+ [SST_IP_LOOP0] = SST_SWM_IN_SPROT_LOOP,
+ [SST_IP_LOOP1] = SST_SWM_IN_MEDIA_LOOP1,
+ [SST_IP_LOOP2] = SST_SWM_IN_MEDIA_LOOP2,
+ [SST_IP_PCM0] = SST_SWM_IN_PCM0,
+ [SST_IP_PCM1] = SST_SWM_IN_PCM1,
+ [SST_IP_MEDIA0] = SST_SWM_IN_MEDIA0,
+ [SST_IP_MEDIA1] = SST_SWM_IN_MEDIA1,
+ [SST_IP_MEDIA2] = SST_SWM_IN_MEDIA2,
+ [SST_IP_MEDIA3] = SST_SWM_IN_MEDIA3,
+};
+
+/**
+ * fill_swm_input - fill in the SWM input ids given the register
+ *
+ * The register value is a bit-field inicated which mixer inputs are ON. Use the
+ * lookup table to get the input-id and fill it in the structure.
+ */
+static int fill_swm_input(struct snd_soc_component *cmpnt,
+ struct swm_input_ids *swm_input, unsigned int reg)
+{
+ uint i, is_set, nb_inputs = 0;
+ u16 input_loc_id;
+
+ dev_dbg(cmpnt->dev, "reg: %#x\n", reg);
+ for (i = 0; i < SST_SWM_INPUT_COUNT; i++) {
+ is_set = reg & BIT(i);
+ if (!is_set)
+ continue;
+
+ input_loc_id = swm_mixer_input_ids[i];
+ SST_FILL_DESTINATION(2, swm_input->input_id,
+ input_loc_id, SST_DEFAULT_MODULE_ID);
+ nb_inputs++;
+ swm_input++;
+ dev_dbg(cmpnt->dev, "input id: %#x, nb_inputs: %d\n",
+ input_loc_id, nb_inputs);
+
+ if (nb_inputs == SST_CMD_SWM_MAX_INPUTS) {
+ dev_warn(cmpnt->dev, "SET_SWM cmd max inputs reached");
+ break;
+ }
+ }
+ return nb_inputs;
+}
+
+
+/**
+ * called with lock held
+ */
+static int sst_set_pipe_gain(struct sst_ids *ids,
+ struct sst_data *drv, int mute)
+{
+ int ret = 0;
+ struct sst_gain_mixer_control *mc;
+ struct sst_gain_value *gv;
+ struct sst_module *gain = NULL;
+
+ list_for_each_entry(gain, &ids->gain_list, node) {
+ struct snd_kcontrol *kctl = gain->kctl;
+
+ dev_dbg(&drv->pdev->dev, "control name=%s\n", kctl->id.name);
+ mc = (void *)kctl->private_value;
+ gv = mc->gain_val;
+
+ ret = sst_send_gain_cmd(drv, gv, mc->task_id,
+ mc->pipe_id | mc->instance_id, mc->module_id, mute);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int sst_swm_mixer_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct sst_cmd_set_swm cmd;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct sst_data *drv = snd_soc_component_get_drvdata(cmpnt);
+ struct sst_ids *ids = w->priv;
+ bool set_mixer = false;
+ struct soc_mixer_control *mc;
+ int val = 0;
+ int i = 0;
+
+ dev_dbg(cmpnt->dev, "widget = %s\n", w->name);
+ /*
+ * Identify which mixer input is on and send the bitmap of the
+ * inputs as an IPC to the DSP.
+ */
+ for (i = 0; i < w->num_kcontrols; i++) {
+ if (dapm_kcontrol_get_value(w->kcontrols[i])) {
+ mc = (struct soc_mixer_control *)(w->kcontrols[i])->private_value;
+ val |= 1 << mc->shift;
+ }
+ }
+ dev_dbg(cmpnt->dev, "val = %#x\n", val);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ case SND_SOC_DAPM_POST_PMD:
+ set_mixer = true;
+ break;
+ case SND_SOC_DAPM_POST_REG:
+ if (w->power)
+ set_mixer = true;
+ break;
+ default:
+ set_mixer = false;
+ }
+
+ if (set_mixer == false)
+ return 0;
+
+ if (SND_SOC_DAPM_EVENT_ON(event) ||
+ event == SND_SOC_DAPM_POST_REG)
+ cmd.switch_state = SST_SWM_ON;
+ else
+ cmd.switch_state = SST_SWM_OFF;
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ /* MMX_SET_SWM == SBA_SET_SWM */
+ cmd.header.command_id = SBA_SET_SWM;
+
+ SST_FILL_DESTINATION(2, cmd.output_id,
+ ids->location_id, SST_DEFAULT_MODULE_ID);
+ cmd.nb_inputs = fill_swm_input(cmpnt, &cmd.input[0], val);
+ cmd.header.length = offsetof(struct sst_cmd_set_swm, input)
+ - sizeof(struct sst_dsp_header)
+ + (cmd.nb_inputs * sizeof(cmd.input[0]));
+
+ return sst_fill_and_send_cmd(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ ids->task_id, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+}
+
+/* SBA mixers - 16 inputs */
+#define SST_SBA_DECLARE_MIX_CONTROLS(kctl_name) \
+ static const struct snd_kcontrol_new kctl_name[] = { \
+ SOC_DAPM_SINGLE("codec_in0 Switch", SND_SOC_NOPM, SST_IP_CODEC0, 1, 0), \
+ SOC_DAPM_SINGLE("codec_in1 Switch", SND_SOC_NOPM, SST_IP_CODEC1, 1, 0), \
+ SOC_DAPM_SINGLE("sprot_loop_in Switch", SND_SOC_NOPM, SST_IP_LOOP0, 1, 0), \
+ SOC_DAPM_SINGLE("media_loop1_in Switch", SND_SOC_NOPM, SST_IP_LOOP1, 1, 0), \
+ SOC_DAPM_SINGLE("media_loop2_in Switch", SND_SOC_NOPM, SST_IP_LOOP2, 1, 0), \
+ SOC_DAPM_SINGLE("pcm0_in Switch", SND_SOC_NOPM, SST_IP_PCM0, 1, 0), \
+ SOC_DAPM_SINGLE("pcm1_in Switch", SND_SOC_NOPM, SST_IP_PCM1, 1, 0), \
+ }
+
+#define SST_SBA_MIXER_GRAPH_MAP(mix_name) \
+ { mix_name, "codec_in0 Switch", "codec_in0" }, \
+ { mix_name, "codec_in1 Switch", "codec_in1" }, \
+ { mix_name, "sprot_loop_in Switch", "sprot_loop_in" }, \
+ { mix_name, "media_loop1_in Switch", "media_loop1_in" }, \
+ { mix_name, "media_loop2_in Switch", "media_loop2_in" }, \
+ { mix_name, "pcm0_in Switch", "pcm0_in" }, \
+ { mix_name, "pcm1_in Switch", "pcm1_in" }
+
+#define SST_MMX_DECLARE_MIX_CONTROLS(kctl_name) \
+ static const struct snd_kcontrol_new kctl_name[] = { \
+ SOC_DAPM_SINGLE("media0_in Switch", SND_SOC_NOPM, SST_IP_MEDIA0, 1, 0), \
+ SOC_DAPM_SINGLE("media1_in Switch", SND_SOC_NOPM, SST_IP_MEDIA1, 1, 0), \
+ SOC_DAPM_SINGLE("media2_in Switch", SND_SOC_NOPM, SST_IP_MEDIA2, 1, 0), \
+ SOC_DAPM_SINGLE("media3_in Switch", SND_SOC_NOPM, SST_IP_MEDIA3, 1, 0), \
+ }
+
+SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media0_controls);
+SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media1_controls);
+
+/* 18 SBA mixers */
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm0_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm1_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm2_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_sprot_l0_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l1_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l2_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_voip_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec0_controls);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec1_controls);
+
+/*
+ * sst_handle_vb_timer - Start/Stop the DSP scheduler
+ *
+ * The DSP expects first cmd to be SBA_VB_START, so at first startup send
+ * that.
+ * DSP expects last cmd to be SBA_VB_IDLE, so at last shutdown send that.
+ *
+ * Do refcount internally so that we send command only at first start
+ * and last end. Since SST driver does its own ref count, invoke sst's
+ * power ops always!
+ */
+int sst_handle_vb_timer(struct snd_soc_dai *dai, bool enable)
+{
+ int ret = 0;
+ struct sst_cmd_generic cmd;
+ struct sst_data *drv = snd_soc_dai_get_drvdata(dai);
+ static int timer_usage;
+
+ if (enable)
+ cmd.header.command_id = SBA_VB_START;
+ else
+ cmd.header.command_id = SBA_IDLE;
+ dev_dbg(dai->dev, "enable=%u, usage=%d\n", enable, timer_usage);
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ cmd.header.length = 0;
+
+ if (enable) {
+ ret = sst->ops->power(sst->dev, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ mutex_lock(&drv->lock);
+ if (enable)
+ timer_usage++;
+ else
+ timer_usage--;
+
+ /*
+ * Send the command only if this call is the first enable or last
+ * disable
+ */
+ if ((enable && (timer_usage == 1)) ||
+ (!enable && (timer_usage == 0))) {
+ ret = sst_fill_and_send_cmd_unlocked(drv, SST_IPC_IA_CMD,
+ SST_FLAG_BLOCKED, SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+ if (ret && enable) {
+ timer_usage--;
+ enable = false;
+ }
+ }
+ mutex_unlock(&drv->lock);
+
+ if (!enable)
+ sst->ops->power(sst->dev, false);
+ return ret;
+}
+
+/**
+ * sst_ssp_config - contains SSP configuration for media UC
+ */
+static const struct sst_ssp_config sst_ssp_configs = {
+ .ssp_id = SSP_CODEC,
+ .bits_per_slot = 24,
+ .slots = 4,
+ .ssp_mode = SSP_MODE_MASTER,
+ .pcm_mode = SSP_PCM_MODE_NETWORK,
+ .duplex = SSP_DUPLEX,
+ .ssp_protocol = SSP_MODE_PCM,
+ .fs_width = 1,
+ .fs_frequency = SSP_FS_48_KHZ,
+ .active_slot_map = 0xF,
+ .start_delay = 0,
+};
+
+int send_ssp_cmd(struct snd_soc_dai *dai, const char *id, bool enable)
+{
+ struct sst_cmd_sba_hw_set_ssp cmd;
+ struct sst_data *drv = snd_soc_dai_get_drvdata(dai);
+ const struct sst_ssp_config *config;
+
+ dev_info(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id);
+
+ SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+ cmd.header.command_id = SBA_HW_SET_SSP;
+ cmd.header.length = sizeof(struct sst_cmd_sba_hw_set_ssp)
+ - sizeof(struct sst_dsp_header);
+
+ config = &sst_ssp_configs;
+ dev_dbg(dai->dev, "ssp_id: %u\n", config->ssp_id);
+
+ if (enable)
+ cmd.switch_state = SST_SWITCH_ON;
+ else
+ cmd.switch_state = SST_SWITCH_OFF;
+
+ cmd.selection = config->ssp_id;
+ cmd.nb_bits_per_slots = config->bits_per_slot;
+ cmd.nb_slots = config->slots;
+ cmd.mode = config->ssp_mode | (config->pcm_mode << 1);
+ cmd.duplex = config->duplex;
+ cmd.active_tx_slot_map = config->active_slot_map;
+ cmd.active_rx_slot_map = config->active_slot_map;
+ cmd.frame_sync_frequency = config->fs_frequency;
+ cmd.frame_sync_polarity = SSP_FS_ACTIVE_HIGH;
+ cmd.data_polarity = 1;
+ cmd.frame_sync_width = config->fs_width;
+ cmd.ssp_protocol = config->ssp_protocol;
+ cmd.start_delay = config->start_delay;
+ cmd.reserved1 = cmd.reserved2 = 0xFF;
+
+ return sst_fill_and_send_cmd(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+}
+
+static int sst_set_be_modules(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ int ret = 0;
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct sst_data *drv = snd_soc_component_get_drvdata(c);
+
+ dev_dbg(c->dev, "Enter: widget=%s\n", w->name);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = sst_send_slot_map(drv);
+ if (ret)
+ return ret;
+ ret = sst_send_pipe_module_params(w, k);
+ }
+ return ret;
+}
+
+static int sst_set_media_path(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ int ret = 0;
+ struct sst_cmd_set_media_path cmd;
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct sst_data *drv = snd_soc_component_get_drvdata(c);
+ struct sst_ids *ids = w->priv;
+
+ dev_dbg(c->dev, "widget=%s\n", w->name);
+ dev_dbg(c->dev, "task=%u, location=%#x\n",
+ ids->task_id, ids->location_id);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ cmd.switch_state = SST_PATH_ON;
+ else
+ cmd.switch_state = SST_PATH_OFF;
+
+ SST_FILL_DESTINATION(2, cmd.header.dst,
+ ids->location_id, SST_DEFAULT_MODULE_ID);
+
+ /* MMX_SET_MEDIA_PATH == SBA_SET_MEDIA_PATH */
+ cmd.header.command_id = MMX_SET_MEDIA_PATH;
+ cmd.header.length = sizeof(struct sst_cmd_set_media_path)
+ - sizeof(struct sst_dsp_header);
+
+ ret = sst_fill_and_send_cmd(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ ids->task_id, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+ if (ret)
+ return ret;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ret = sst_send_pipe_module_params(w, k);
+ return ret;
+}
+
+static int sst_set_media_loop(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ int ret = 0;
+ struct sst_cmd_sba_set_media_loop_map cmd;
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct sst_data *drv = snd_soc_component_get_drvdata(c);
+ struct sst_ids *ids = w->priv;
+
+ dev_dbg(c->dev, "Enter:widget=%s\n", w->name);
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ cmd.switch_state = SST_SWITCH_ON;
+ else
+ cmd.switch_state = SST_SWITCH_OFF;
+
+ SST_FILL_DESTINATION(2, cmd.header.dst,
+ ids->location_id, SST_DEFAULT_MODULE_ID);
+
+ cmd.header.command_id = SBA_SET_MEDIA_LOOP_MAP;
+ cmd.header.length = sizeof(struct sst_cmd_sba_set_media_loop_map)
+ - sizeof(struct sst_dsp_header);
+ cmd.param.part.cfg.rate = 2; /* 48khz */
+
+ cmd.param.part.cfg.format = ids->format; /* stereo/Mono */
+ cmd.param.part.cfg.s_length = 1; /* 24bit left justified */
+ cmd.map = 0; /* Algo sequence: Gain - DRP - FIR - IIR */
+
+ ret = sst_fill_and_send_cmd(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+ SST_TASK_SBA, 0, &cmd,
+ sizeof(cmd.header) + cmd.header.length);
+ if (ret)
+ return ret;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ret = sst_send_pipe_module_params(w, k);
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
+ SST_AIF_IN("codec_in0", sst_set_be_modules),
+ SST_AIF_IN("codec_in1", sst_set_be_modules),
+ SST_AIF_OUT("codec_out0", sst_set_be_modules),
+ SST_AIF_OUT("codec_out1", sst_set_be_modules),
+
+ /* Media Paths */
+ /* MediaX IN paths are set via ALLOC, so no SET_MEDIA_PATH command */
+ SST_PATH_INPUT("media0_in", SST_TASK_MMX, SST_SWM_IN_MEDIA0, sst_generic_modules_event),
+ SST_PATH_INPUT("media1_in", SST_TASK_MMX, SST_SWM_IN_MEDIA1, NULL),
+ SST_PATH_INPUT("media2_in", SST_TASK_MMX, SST_SWM_IN_MEDIA2, sst_set_media_path),
+ SST_PATH_INPUT("media3_in", SST_TASK_MMX, SST_SWM_IN_MEDIA3, NULL),
+ SST_PATH_OUTPUT("media0_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA0, sst_set_media_path),
+ SST_PATH_OUTPUT("media1_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA1, sst_set_media_path),
+
+ /* SBA PCM Paths */
+ SST_PATH_INPUT("pcm0_in", SST_TASK_SBA, SST_SWM_IN_PCM0, sst_set_media_path),
+ SST_PATH_INPUT("pcm1_in", SST_TASK_SBA, SST_SWM_IN_PCM1, sst_set_media_path),
+ SST_PATH_OUTPUT("pcm0_out", SST_TASK_SBA, SST_SWM_OUT_PCM0, sst_set_media_path),
+ SST_PATH_OUTPUT("pcm1_out", SST_TASK_SBA, SST_SWM_OUT_PCM1, sst_set_media_path),
+ SST_PATH_OUTPUT("pcm2_out", SST_TASK_SBA, SST_SWM_OUT_PCM2, sst_set_media_path),
+
+ /* SBA Loops */
+ SST_PATH_INPUT("sprot_loop_in", SST_TASK_SBA, SST_SWM_IN_SPROT_LOOP, NULL),
+ SST_PATH_INPUT("media_loop1_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP1, NULL),
+ SST_PATH_INPUT("media_loop2_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP2, NULL),
+ SST_PATH_MEDIA_LOOP_OUTPUT("sprot_loop_out", SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP, SST_FMT_MONO, sst_set_media_loop),
+ SST_PATH_MEDIA_LOOP_OUTPUT("media_loop1_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1, SST_FMT_MONO, sst_set_media_loop),
+ SST_PATH_MEDIA_LOOP_OUTPUT("media_loop2_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2, SST_FMT_STEREO, sst_set_media_loop),
+
+ /* Media Mixers */
+ SST_SWM_MIXER("media0_out mix 0", SND_SOC_NOPM, SST_TASK_MMX, SST_SWM_OUT_MEDIA0,
+ sst_mix_media0_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("media1_out mix 0", SND_SOC_NOPM, SST_TASK_MMX, SST_SWM_OUT_MEDIA1,
+ sst_mix_media1_controls, sst_swm_mixer_event),
+
+ /* SBA PCM mixers */
+ SST_SWM_MIXER("pcm0_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_PCM0,
+ sst_mix_pcm0_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("pcm1_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_PCM1,
+ sst_mix_pcm1_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("pcm2_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_PCM2,
+ sst_mix_pcm2_controls, sst_swm_mixer_event),
+
+ /* SBA Loop mixers */
+ SST_SWM_MIXER("sprot_loop_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP,
+ sst_mix_sprot_l0_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("media_loop1_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1,
+ sst_mix_media_l1_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("media_loop2_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2,
+ sst_mix_media_l2_controls, sst_swm_mixer_event),
+
+ /* SBA Backend mixers */
+ SST_SWM_MIXER("codec_out0 mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_CODEC0,
+ sst_mix_codec0_controls, sst_swm_mixer_event),
+ SST_SWM_MIXER("codec_out1 mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_CODEC1,
+ sst_mix_codec1_controls, sst_swm_mixer_event),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+ {"media0_in", NULL, "Compress Playback"},
+ {"media1_in", NULL, "Headset Playback"},
+ {"media2_in", NULL, "pcm0_out"},
+
+ {"media0_out mix 0", "media0_in Switch", "media0_in"},
+ {"media0_out mix 0", "media1_in Switch", "media1_in"},
+ {"media0_out mix 0", "media2_in Switch", "media2_in"},
+ {"media0_out mix 0", "media3_in Switch", "media3_in"},
+ {"media1_out mix 0", "media0_in Switch", "media0_in"},
+ {"media1_out mix 0", "media1_in Switch", "media1_in"},
+ {"media1_out mix 0", "media2_in Switch", "media2_in"},
+ {"media1_out mix 0", "media3_in Switch", "media3_in"},
+
+ {"media0_out", NULL, "media0_out mix 0"},
+ {"media1_out", NULL, "media1_out mix 0"},
+ {"pcm0_in", NULL, "media0_out"},
+ {"pcm1_in", NULL, "media1_out"},
+
+ {"Headset Capture", NULL, "pcm1_out"},
+ {"Headset Capture", NULL, "pcm2_out"},
+ {"pcm0_out", NULL, "pcm0_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("pcm0_out mix 0"),
+ {"pcm1_out", NULL, "pcm1_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("pcm1_out mix 0"),
+ {"pcm2_out", NULL, "pcm2_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("pcm2_out mix 0"),
+
+ {"media_loop1_in", NULL, "media_loop1_out"},
+ {"media_loop1_out", NULL, "media_loop1_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("media_loop1_out mix 0"),
+ {"media_loop2_in", NULL, "media_loop2_out"},
+ {"media_loop2_out", NULL, "media_loop2_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("media_loop2_out mix 0"),
+ {"sprot_loop_in", NULL, "sprot_loop_out"},
+ {"sprot_loop_out", NULL, "sprot_loop_out mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("sprot_loop_out mix 0"),
+
+ {"codec_out0", NULL, "codec_out0 mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("codec_out0 mix 0"),
+ {"codec_out1", NULL, "codec_out1 mix 0"},
+ SST_SBA_MIXER_GRAPH_MAP("codec_out1 mix 0"),
+
+};
+static const char * const slot_names[] = {
+ "none",
+ "slot 0", "slot 1", "slot 2", "slot 3",
+ "slot 4", "slot 5", "slot 6", "slot 7", /* not supported by FW */
+};
+
+static const char * const channel_names[] = {
+ "none",
+ "codec_out0_0", "codec_out0_1", "codec_out1_0", "codec_out1_1",
+ "codec_out2_0", "codec_out2_1", "codec_out3_0", "codec_out3_1", /* not supported by FW */
+};
+
+#define SST_INTERLEAVER(xpname, slot_name, slotno) \
+ SST_SSP_SLOT_CTL(xpname, "tx interleaver", slot_name, slotno, true, \
+ channel_names, sst_slot_get, sst_slot_put)
+
+#define SST_DEINTERLEAVER(xpname, channel_name, channel_no) \
+ SST_SSP_SLOT_CTL(xpname, "rx deinterleaver", channel_name, channel_no, false, \
+ slot_names, sst_slot_get, sst_slot_put)
+
+static const struct snd_kcontrol_new sst_slot_controls[] = {
+ SST_INTERLEAVER("codec_out", "slot 0", 0),
+ SST_INTERLEAVER("codec_out", "slot 1", 1),
+ SST_INTERLEAVER("codec_out", "slot 2", 2),
+ SST_INTERLEAVER("codec_out", "slot 3", 3),
+ SST_DEINTERLEAVER("codec_in", "codec_in0_0", 0),
+ SST_DEINTERLEAVER("codec_in", "codec_in0_1", 1),
+ SST_DEINTERLEAVER("codec_in", "codec_in1_0", 2),
+ SST_DEINTERLEAVER("codec_in", "codec_in1_1", 3),
+};
+
+/* Gain helper with min/max set */
+#define SST_GAIN(name, path_id, task_id, instance, gain_var) \
+ SST_GAIN_KCONTROLS(name, "Gain", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE, \
+ SST_GAIN_TC_MIN, SST_GAIN_TC_MAX, \
+ sst_gain_get, sst_gain_put, \
+ SST_MODULE_ID_GAIN_CELL, path_id, instance, task_id, \
+ sst_gain_tlv_common, gain_var)
+
+#define SST_VOLUME(name, path_id, task_id, instance, gain_var) \
+ SST_GAIN_KCONTROLS(name, "Volume", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE, \
+ SST_GAIN_TC_MIN, SST_GAIN_TC_MAX, \
+ sst_gain_get, sst_gain_put, \
+ SST_MODULE_ID_VOLUME, path_id, instance, task_id, \
+ sst_gain_tlv_common, gain_var)
+
+static struct sst_gain_value sst_gains[];
+
+static const struct snd_kcontrol_new sst_gain_controls[] = {
+ SST_GAIN("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[0]),
+ SST_GAIN("media1_in", SST_PATH_INDEX_MEDIA1_IN, SST_TASK_MMX, 0, &sst_gains[1]),
+ SST_GAIN("media2_in", SST_PATH_INDEX_MEDIA2_IN, SST_TASK_MMX, 0, &sst_gains[2]),
+ SST_GAIN("media3_in", SST_PATH_INDEX_MEDIA3_IN, SST_TASK_MMX, 0, &sst_gains[3]),
+
+ SST_GAIN("pcm0_in", SST_PATH_INDEX_PCM0_IN, SST_TASK_SBA, 0, &sst_gains[4]),
+ SST_GAIN("pcm1_in", SST_PATH_INDEX_PCM1_IN, SST_TASK_SBA, 0, &sst_gains[5]),
+ SST_GAIN("pcm1_out", SST_PATH_INDEX_PCM1_OUT, SST_TASK_SBA, 0, &sst_gains[6]),
+ SST_GAIN("pcm2_out", SST_PATH_INDEX_PCM2_OUT, SST_TASK_SBA, 0, &sst_gains[7]),
+
+ SST_GAIN("codec_in0", SST_PATH_INDEX_CODEC_IN0, SST_TASK_SBA, 0, &sst_gains[8]),
+ SST_GAIN("codec_in1", SST_PATH_INDEX_CODEC_IN1, SST_TASK_SBA, 0, &sst_gains[9]),
+ SST_GAIN("codec_out0", SST_PATH_INDEX_CODEC_OUT0, SST_TASK_SBA, 0, &sst_gains[10]),
+ SST_GAIN("codec_out1", SST_PATH_INDEX_CODEC_OUT1, SST_TASK_SBA, 0, &sst_gains[11]),
+ SST_GAIN("media_loop1_out", SST_PATH_INDEX_MEDIA_LOOP1_OUT, SST_TASK_SBA, 0, &sst_gains[12]),
+ SST_GAIN("media_loop2_out", SST_PATH_INDEX_MEDIA_LOOP2_OUT, SST_TASK_SBA, 0, &sst_gains[13]),
+ SST_GAIN("sprot_loop_out", SST_PATH_INDEX_SPROT_LOOP_OUT, SST_TASK_SBA, 0, &sst_gains[14]),
+ SST_VOLUME("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[15]),
+};
+
+#define SST_GAIN_NUM_CONTROLS 3
+/* the SST_GAIN macro above will create three alsa controls for each
+ * instance invoked, gain, mute and ramp duration, which use the same gain
+ * cell sst_gain to keep track of data
+ * To calculate number of gain cell instances we need to device by 3 in
+ * below caulcation for gain cell memory.
+ * This gets rid of static number and issues while adding new controls
+ */
+static struct sst_gain_value sst_gains[ARRAY_SIZE(sst_gain_controls)/SST_GAIN_NUM_CONTROLS];
+
static const struct snd_kcontrol_new sst_algo_controls[] = {
SST_ALGO_KCONTROL_BYTES("media_loop1_out", "fir", 272, SST_MODULE_ID_FIR_24,
SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
@@ -198,21 +1143,280 @@ static int sst_algo_control_init(struct device *dev)
return 0;
}
-int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform)
+static bool is_sst_dapm_widget(struct snd_soc_dapm_widget *w)
+{
+ switch (w->id) {
+ case snd_soc_dapm_pga:
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
+ case snd_soc_dapm_input:
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_mixer:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * sst_send_pipe_gains - send gains for the front-end DAIs
+ *
+ * The gains in the pipes connected to the front-ends are muted/unmuted
+ * automatically via the digital_mute() DAPM callback. This function sends the
+ * gains for the front-end pipes.
+ */
+int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
+{
+ struct sst_data *drv = snd_soc_dai_get_drvdata(dai);
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_dapm_path *p = NULL;
+
+ dev_dbg(dai->dev, "enter, dai-name=%s dir=%d\n", dai->name, stream);
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ dev_dbg(dai->dev, "Stream name=%s\n",
+ dai->playback_widget->name);
+ w = dai->playback_widget;
+ list_for_each_entry(p, &w->sinks, list_source) {
+ if (p->connected && !p->connected(w, p->sink))
+ continue;
+
+ if (p->connect && p->sink->power &&
+ is_sst_dapm_widget(p->sink)) {
+ struct sst_ids *ids = p->sink->priv;
+
+ dev_dbg(dai->dev, "send gains for widget=%s\n",
+ p->sink->name);
+ mutex_lock(&drv->lock);
+ sst_set_pipe_gain(ids, drv, mute);
+ mutex_unlock(&drv->lock);
+ }
+ }
+ } else {
+ dev_dbg(dai->dev, "Stream name=%s\n",
+ dai->capture_widget->name);
+ w = dai->capture_widget;
+ list_for_each_entry(p, &w->sources, list_sink) {
+ if (p->connected && !p->connected(w, p->sink))
+ continue;
+
+ if (p->connect && p->source->power &&
+ is_sst_dapm_widget(p->source)) {
+ struct sst_ids *ids = p->source->priv;
+
+ dev_dbg(dai->dev, "send gain for widget=%s\n",
+ p->source->name);
+ mutex_lock(&drv->lock);
+ sst_set_pipe_gain(ids, drv, mute);
+ mutex_unlock(&drv->lock);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * sst_fill_module_list - populate the list of modules/gains for a pipe
+ *
+ *
+ * Fills the widget pointer in the kcontrol private data, and also fills the
+ * kcontrol pointer in the widget private data.
+ *
+ * Widget pointer is used to send the algo/gain in the .put() handler if the
+ * widget is powerd on.
+ *
+ * Kcontrol pointer is used to send the algo/gain in the widget power ON/OFF
+ * event handler. Each widget (pipe) has multiple algos stored in the algo_list.
+ */
+static int sst_fill_module_list(struct snd_kcontrol *kctl,
+ struct snd_soc_dapm_widget *w, int type)
{
+ struct sst_module *module = NULL;
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct sst_ids *ids = w->priv;
int ret = 0;
+
+ module = devm_kzalloc(c->dev, sizeof(*module), GFP_KERNEL);
+ if (!module)
+ return -ENOMEM;
+
+ if (type == SST_MODULE_GAIN) {
+ struct sst_gain_mixer_control *mc = (void *)kctl->private_value;
+
+ mc->w = w;
+ module->kctl = kctl;
+ list_add_tail(&module->node, &ids->gain_list);
+ } else if (type == SST_MODULE_ALGO) {
+ struct sst_algo_control *bc = (void *)kctl->private_value;
+
+ bc->w = w;
+ module->kctl = kctl;
+ list_add_tail(&module->node, &ids->algo_list);
+ } else {
+ dev_err(c->dev, "invoked for unknown type %d module %s",
+ type, kctl->id.name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * sst_fill_widget_module_info - fill list of gains/algos for the pipe
+ * @widget: pipe modelled as a DAPM widget
+ *
+ * Fill the list of gains/algos for the widget by looking at all the card
+ * controls and comparing the name of the widget with the first part of control
+ * name. First part of control name contains the pipe name (widget name).
+ */
+static int sst_fill_widget_module_info(struct snd_soc_dapm_widget *w,
+ struct snd_soc_platform *platform)
+{
+ struct snd_kcontrol *kctl;
+ int index, ret = 0;
+ struct snd_card *card = platform->component.card->snd_card;
+ char *idx;
+
+ down_read(&card->controls_rwsem);
+
+ list_for_each_entry(kctl, &card->controls, list) {
+ idx = strstr(kctl->id.name, " ");
+ if (idx == NULL)
+ continue;
+ index = strlen(kctl->id.name) - strlen(idx);
+
+ if (strstr(kctl->id.name, "Volume") &&
+ !strncmp(kctl->id.name, w->name, index))
+ ret = sst_fill_module_list(kctl, w, SST_MODULE_GAIN);
+
+ else if (strstr(kctl->id.name, "params") &&
+ !strncmp(kctl->id.name, w->name, index))
+ ret = sst_fill_module_list(kctl, w, SST_MODULE_ALGO);
+
+ else if (strstr(kctl->id.name, "Switch") &&
+ !strncmp(kctl->id.name, w->name, index) &&
+ strstr(kctl->id.name, "Gain")) {
+ struct sst_gain_mixer_control *mc =
+ (void *)kctl->private_value;
+
+ mc->w = w;
+
+ } else if (strstr(kctl->id.name, "interleaver") &&
+ !strncmp(kctl->id.name, w->name, index)) {
+ struct sst_enum *e = (void *)kctl->private_value;
+
+ e->w = w;
+
+ } else if (strstr(kctl->id.name, "deinterleaver") &&
+ !strncmp(kctl->id.name, w->name, index)) {
+
+ struct sst_enum *e = (void *)kctl->private_value;
+
+ e->w = w;
+ }
+
+ if (ret < 0) {
+ up_read(&card->controls_rwsem);
+ return ret;
+ }
+ }
+
+ up_read(&card->controls_rwsem);
+ return 0;
+}
+
+/**
+ * sst_fill_linked_widgets - fill the parent pointer for the linked widget
+ */
+static void sst_fill_linked_widgets(struct snd_soc_platform *platform,
+ struct sst_ids *ids)
+{
+ struct snd_soc_dapm_widget *w;
+ unsigned int len = strlen(ids->parent_wname);
+
+ list_for_each_entry(w, &platform->component.card->widgets, list) {
+ if (!strncmp(ids->parent_wname, w->name, len)) {
+ ids->parent_w = w;
+ break;
+ }
+ }
+}
+
+/**
+ * sst_map_modules_to_pipe - fill algo/gains list for all pipes
+ */
+static int sst_map_modules_to_pipe(struct snd_soc_platform *platform)
+{
+ struct snd_soc_dapm_widget *w;
+ int ret = 0;
+
+ list_for_each_entry(w, &platform->component.card->widgets, list) {
+ if (is_sst_dapm_widget(w) && (w->priv)) {
+ struct sst_ids *ids = w->priv;
+
+ dev_dbg(platform->dev, "widget type=%d name=%s\n",
+ w->id, w->name);
+ INIT_LIST_HEAD(&ids->algo_list);
+ INIT_LIST_HEAD(&ids->gain_list);
+ ret = sst_fill_widget_module_info(w, platform);
+
+ if (ret < 0)
+ return ret;
+
+ /* fill linked widgets */
+ if (ids->parent_wname != NULL)
+ sst_fill_linked_widgets(platform, ids);
+ }
+ }
+ return 0;
+}
+
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform)
+{
+ int i, ret = 0;
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(&platform->component);
struct sst_data *drv = snd_soc_platform_get_drvdata(platform);
+ unsigned int gains = ARRAY_SIZE(sst_gain_controls)/3;
drv->byte_stream = devm_kzalloc(platform->dev,
SST_MAX_BIN_BYTES, GFP_KERNEL);
if (!drv->byte_stream)
return -ENOMEM;
- /*Initialize algo control params*/
+ snd_soc_dapm_new_controls(dapm, sst_dapm_widgets,
+ ARRAY_SIZE(sst_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, intercon,
+ ARRAY_SIZE(intercon));
+ snd_soc_dapm_new_widgets(dapm->card);
+
+ for (i = 0; i < gains; i++) {
+ sst_gains[i].mute = SST_GAIN_MUTE_DEFAULT;
+ sst_gains[i].l_gain = SST_GAIN_VOLUME_DEFAULT;
+ sst_gains[i].r_gain = SST_GAIN_VOLUME_DEFAULT;
+ sst_gains[i].ramp_duration = SST_GAIN_RAMP_DURATION_DEFAULT;
+ }
+
+ ret = snd_soc_add_platform_controls(platform, sst_gain_controls,
+ ARRAY_SIZE(sst_gain_controls));
+ if (ret)
+ return ret;
+
+ /* Initialize algo control params */
ret = sst_algo_control_init(platform->dev);
if (ret)
return ret;
ret = snd_soc_add_platform_controls(platform, sst_algo_controls,
ARRAY_SIZE(sst_algo_controls));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_add_platform_controls(platform, sst_slot_controls,
+ ARRAY_SIZE(sst_slot_controls));
+ if (ret)
+ return ret;
+
+ ret = sst_map_modules_to_pipe(platform);
+
return ret;
}
diff --git a/sound/soc/intel/sst-atom-controls.h b/sound/soc/intel/sst-atom-controls.h
index a73e894b175c..dfebfdd5eb2a 100644
--- a/sound/soc/intel/sst-atom-controls.h
+++ b/sound/soc/intel/sst-atom-controls.h
@@ -23,6 +23,9 @@
#ifndef __SST_ATOM_CONTROLS_H__
#define __SST_ATOM_CONTROLS_H__
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
enum {
MERR_DPCM_AUDIO = 0,
MERR_DPCM_COMPR,
@@ -360,16 +363,416 @@ struct sst_dsp_header {
struct sst_cmd_generic {
struct sst_dsp_header header;
} __packed;
+
+struct swm_input_ids {
+ struct sst_destination_id input_id;
+} __packed;
+
+struct sst_cmd_set_swm {
+ struct sst_dsp_header header;
+ struct sst_destination_id output_id;
+ u16 switch_state;
+ u16 nb_inputs;
+ struct swm_input_ids input[SST_CMD_SWM_MAX_INPUTS];
+} __packed;
+
+struct sst_cmd_set_media_path {
+ struct sst_dsp_header header;
+ u16 switch_state;
+} __packed;
+
+struct pcm_cfg {
+ u8 s_length:2;
+ u8 rate:3;
+ u8 format:3;
+} __packed;
+
+struct sst_cmd_set_speech_path {
+ struct sst_dsp_header header;
+ u16 switch_state;
+ struct {
+ u16 rsvd:8;
+ struct pcm_cfg cfg;
+ } config;
+} __packed;
+
+struct gain_cell {
+ struct sst_destination_id dest;
+ s16 cell_gain_left;
+ s16 cell_gain_right;
+ u16 gain_time_constant;
+} __packed;
+
+#define NUM_GAIN_CELLS 1
+struct sst_cmd_set_gain_dual {
+ struct sst_dsp_header header;
+ u16 gain_cell_num;
+ struct gain_cell cell_gains[NUM_GAIN_CELLS];
+} __packed;
struct sst_cmd_set_params {
struct sst_destination_id dst;
u16 command_id;
char params[0];
} __packed;
+
+
+struct sst_cmd_sba_vb_start {
+ struct sst_dsp_header header;
+} __packed;
+
+union sba_media_loop_params {
+ struct {
+ u16 rsvd:8;
+ struct pcm_cfg cfg;
+ } part;
+ u16 full;
+} __packed;
+
+struct sst_cmd_sba_set_media_loop_map {
+ struct sst_dsp_header header;
+ u16 switch_state;
+ union sba_media_loop_params param;
+ u16 map;
+} __packed;
+
+struct sst_cmd_tone_stop {
+ struct sst_dsp_header header;
+ u16 switch_state;
+} __packed;
+
+enum sst_ssp_mode {
+ SSP_MODE_MASTER = 0,
+ SSP_MODE_SLAVE = 1,
+};
+
+enum sst_ssp_pcm_mode {
+ SSP_PCM_MODE_NORMAL = 0,
+ SSP_PCM_MODE_NETWORK = 1,
+};
+
+enum sst_ssp_duplex {
+ SSP_DUPLEX = 0,
+ SSP_RX = 1,
+ SSP_TX = 2,
+};
+
+enum sst_ssp_fs_frequency {
+ SSP_FS_8_KHZ = 0,
+ SSP_FS_16_KHZ = 1,
+ SSP_FS_44_1_KHZ = 2,
+ SSP_FS_48_KHZ = 3,
+};
+
+enum sst_ssp_fs_polarity {
+ SSP_FS_ACTIVE_LOW = 0,
+ SSP_FS_ACTIVE_HIGH = 1,
+};
+
+enum sst_ssp_protocol {
+ SSP_MODE_PCM = 0,
+ SSP_MODE_I2S = 1,
+};
+
+enum sst_ssp_port_id {
+ SSP_MODEM = 0,
+ SSP_BT = 1,
+ SSP_FM = 2,
+ SSP_CODEC = 3,
+};
+
+struct sst_cmd_sba_hw_set_ssp {
+ struct sst_dsp_header header;
+ u16 selection; /* 0:SSP0(def), 1:SSP1, 2:SSP2 */
+
+ u16 switch_state;
+
+ u16 nb_bits_per_slots:6; /* 0-32 bits, 24 (def) */
+ u16 nb_slots:4; /* 0-8: slots per frame */
+ u16 mode:3; /* 0:Master, 1: Slave */
+ u16 duplex:3;
+
+ u16 active_tx_slot_map:8; /* Bit map, 0:off, 1:on */
+ u16 reserved1:8;
+
+ u16 active_rx_slot_map:8; /* Bit map 0: Off, 1:On */
+ u16 reserved2:8;
+
+ u16 frame_sync_frequency;
+
+ u16 frame_sync_polarity:8;
+ u16 data_polarity:8;
+
+ u16 frame_sync_width; /* 1 to N clocks */
+ u16 ssp_protocol:8;
+ u16 start_delay:8; /* Start delay in terms of clock ticks */
+} __packed;
+
+#define SST_MAX_TDM_SLOTS 8
+
+struct sst_param_sba_ssp_slot_map {
+ struct sst_dsp_header header;
+
+ u16 param_id;
+ u16 param_len;
+ u16 ssp_index;
+
+ u8 rx_slot_map[SST_MAX_TDM_SLOTS];
+ u8 tx_slot_map[SST_MAX_TDM_SLOTS];
+} __packed;
+
+enum {
+ SST_PROBE_EXTRACTOR = 0,
+ SST_PROBE_INJECTOR = 1,
+};
+
+/**** widget defines *****/
+
+#define SST_MODULE_GAIN 1
+#define SST_MODULE_ALGO 2
+
+#define SST_FMT_MONO 0
+#define SST_FMT_STEREO 3
+
+/* physical SSP numbers */
+enum {
+ SST_SSP0 = 0,
+ SST_SSP1,
+ SST_SSP2,
+ SST_SSP_LAST = SST_SSP2,
+};
+
+#define SST_NUM_SSPS (SST_SSP_LAST + 1) /* physical SSPs */
+#define SST_MAX_SSP_MUX 2 /* single SSP muxed between pipes */
+#define SST_MAX_SSP_DOMAINS 2 /* domains present in each pipe */
+
+struct sst_module {
+ struct snd_kcontrol *kctl;
+ struct list_head node;
+};
+
+struct sst_ssp_config {
+ u8 ssp_id;
+ u8 bits_per_slot;
+ u8 slots;
+ u8 ssp_mode;
+ u8 pcm_mode;
+ u8 duplex;
+ u8 ssp_protocol;
+ u8 fs_frequency;
+ u8 active_slot_map;
+ u8 start_delay;
+ u16 fs_width;
+};
+
+struct sst_ssp_cfg {
+ const u8 ssp_number;
+ const int *mux_shift;
+ const int (*domain_shift)[SST_MAX_SSP_MUX];
+ const struct sst_ssp_config (*ssp_config)[SST_MAX_SSP_MUX][SST_MAX_SSP_DOMAINS];
+};
+
+struct sst_ids {
+ u16 location_id;
+ u16 module_id;
+ u8 task_id;
+ u8 format;
+ u8 reg;
+ const char *parent_wname;
+ struct snd_soc_dapm_widget *parent_w;
+ struct list_head algo_list;
+ struct list_head gain_list;
+ const struct sst_pcm_format *pcm_fmt;
+};
+
+
+#define SST_AIF_IN(wname, wevent) \
+{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, \
+ .on_val = 1, .off_val = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .task_id = 0, .location_id = 0 } \
+}
+
+#define SST_AIF_OUT(wname, wevent) \
+{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, \
+ .on_val = 1, .off_val = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .task_id = 0, .location_id = 0 } \
+}
+
+#define SST_INPUT(wname, wevent) \
+{ .id = snd_soc_dapm_input, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, \
+ .on_val = 1, .off_val = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .task_id = 0, .location_id = 0 } \
+}
+
+#define SST_OUTPUT(wname, wevent) \
+{ .id = snd_soc_dapm_output, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, \
+ .on_val = 1, .off_val = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .task_id = 0, .location_id = 0 } \
+}
+
+#define SST_DAPM_OUTPUT(wname, wloc_id, wtask_id, wformat, wevent) \
+{ .id = snd_soc_dapm_output, .name = wname, .sname = NULL, \
+ .reg = SND_SOC_NOPM, .shift = 0, \
+ .on_val = 1, .off_val = 0, \
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
+ .priv = (void *)&(struct sst_ids) { .location_id = wloc_id, .task_id = wtask_id,\
+ .pcm_fmt = wformat, } \
+}
+
+#define SST_PATH(wname, wtask, wloc_id, wevent, wflags) \
+{ .id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0, \
+ .kcontrol_news = NULL, .num_kcontrols = 0, \
+ .on_val = 1, .off_val = 0, \
+ .event = wevent, .event_flags = wflags, \
+ .priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, } \
+}
+
+#define SST_LINKED_PATH(wname, wtask, wloc_id, linked_wname, wevent, wflags) \
+{ .id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0, \
+ .kcontrol_news = NULL, .num_kcontrols = 0, \
+ .on_val = 1, .off_val = 0, \
+ .event = wevent, .event_flags = wflags, \
+ .priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, \
+ .parent_wname = linked_wname} \
+}
+
+#define SST_PATH_MEDIA_LOOP(wname, wtask, wloc_id, wformat, wevent, wflags) \
+{ .id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0, \
+ .kcontrol_news = NULL, .num_kcontrols = 0, \
+ .event = wevent, .event_flags = wflags, \
+ .priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, \
+ .format = wformat,} \
+}
+
+/* output is triggered before input */
+#define SST_PATH_INPUT(name, task_id, loc_id, event) \
+ SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)
+
+#define SST_PATH_LINKED_INPUT(name, task_id, loc_id, linked_wname, event) \
+ SST_LINKED_PATH(name, task_id, loc_id, linked_wname, event, \
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)
+
+#define SST_PATH_OUTPUT(name, task_id, loc_id, event) \
+ SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+#define SST_PATH_LINKED_OUTPUT(name, task_id, loc_id, linked_wname, event) \
+ SST_LINKED_PATH(name, task_id, loc_id, linked_wname, event, \
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+#define SST_PATH_MEDIA_LOOP_OUTPUT(name, task_id, loc_id, format, event) \
+ SST_PATH_MEDIA_LOOP(name, task_id, loc_id, format, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+
+#define SST_SWM_MIXER(wname, wreg, wtask, wloc_id, wcontrols, wevent) \
+{ .id = snd_soc_dapm_mixer, .name = wname, .reg = SND_SOC_NOPM, .shift = 0, \
+ .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols),\
+ .event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD | \
+ SND_SOC_DAPM_POST_REG, \
+ .priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, \
+ .reg = wreg } \
+}
+
+enum sst_gain_kcontrol_type {
+ SST_GAIN_TLV,
+ SST_GAIN_MUTE,
+ SST_GAIN_RAMP_DURATION,
+};
+
+struct sst_gain_mixer_control {
+ bool stereo;
+ enum sst_gain_kcontrol_type type;
+ struct sst_gain_value *gain_val;
+ int max;
+ int min;
+ u16 instance_id;
+ u16 module_id;
+ u16 pipe_id;
+ u16 task_id;
+ char pname[44];
+ struct snd_soc_dapm_widget *w;
+};
+
+struct sst_gain_value {
+ u16 ramp_duration;
+ s16 l_gain;
+ s16 r_gain;
+ bool mute;
+};
+#define SST_GAIN_VOLUME_DEFAULT (-1440)
+#define SST_GAIN_RAMP_DURATION_DEFAULT 5 /* timeconstant */
+#define SST_GAIN_MUTE_DEFAULT true
+
+#define SST_GAIN_KCONTROL_TLV(xname, xhandler_get, xhandler_put, \
+ xmod, xpipe, xinstance, xtask, tlv_array, xgain_val, \
+ xmin, xmax, xpname) \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = sst_gain_ctl_info,\
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+ { .stereo = true, .max = xmax, .min = xmin, .type = SST_GAIN_TLV, \
+ .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+ .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_GAIN_KCONTROL_INT(xname, xhandler_get, xhandler_put, \
+ xmod, xpipe, xinstance, xtask, xtype, xgain_val, \
+ xmin, xmax, xpname) \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = sst_gain_ctl_info, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+ { .stereo = false, .max = xmax, .min = xmin, .type = xtype, \
+ .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+ .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_GAIN_KCONTROL_BOOL(xname, xhandler_get, xhandler_put,\
+ xmod, xpipe, xinstance, xtask, xgain_val, xpname) \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_bool_ext, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+ { .stereo = false, .type = SST_GAIN_MUTE, \
+ .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+ .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
#define SST_CONTROL_NAME(xpname, xmname, xinstance, xtype) \
xpname " " xmname " " #xinstance " " xtype
#define SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, xtype, xsubmodule) \
xpname " " xmname " " #xinstance " " xtype " " xsubmodule
+
+/*
+ * 3 Controls for each Gain module
+ * e.g. - pcm0_in Gain 0 Volume
+ * - pcm0_in Gain 0 Ramp Delay
+ * - pcm0_in Gain 0 Switch
+ */
+#define SST_GAIN_KCONTROLS(xpname, xmname, xmin_gain, xmax_gain, xmin_tc, xmax_tc, \
+ xhandler_get, xhandler_put, \
+ xmod, xpipe, xinstance, xtask, tlv_array, xgain_val) \
+ { SST_GAIN_KCONTROL_INT(SST_CONTROL_NAME(xpname, xmname, xinstance, "Ramp Delay"), \
+ xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, SST_GAIN_RAMP_DURATION, \
+ xgain_val, xmin_tc, xmax_tc, xpname) }, \
+ { SST_GAIN_KCONTROL_BOOL(SST_CONTROL_NAME(xpname, xmname, xinstance, "Switch"), \
+ xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, \
+ xgain_val, xpname) } ,\
+ { SST_GAIN_KCONTROL_TLV(SST_CONTROL_NAME(xpname, xmname, xinstance, "Volume"), \
+ xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, tlv_array, \
+ xgain_val, xmin_gain, xmax_gain, xpname) }
+
+#define SST_GAIN_TC_MIN 5
+#define SST_GAIN_TC_MAX 5000
+#define SST_GAIN_MIN_VALUE -1440 /* in 0.1 DB units */
+#define SST_GAIN_MAX_VALUE 360
+
enum sst_algo_kcontrol_type {
SST_ALGO_PARAMS,
SST_ALGO_BYPASS,
@@ -439,4 +842,29 @@ struct sst_enum {
struct snd_soc_dapm_widget *w;
};
+/* only 4 slots/channels supported atm */
+#define SST_SSP_SLOT_ENUM(s_ch_no, is_tx, xtexts) \
+ (struct sst_enum){ .reg = s_ch_no, .tx = is_tx, .max = 4+1, .texts = xtexts, }
+
+#define SST_SLOT_CTL_NAME(xpname, xmname, s_ch_name) \
+ xpname " " xmname " " s_ch_name
+
+#define SST_SSP_SLOT_CTL(xpname, xmname, s_ch_name, s_ch_no, is_tx, xtexts, xget, xput) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = SST_SLOT_CTL_NAME(xpname, xmname, s_ch_name), \
+ .info = sst_slot_enum_info, \
+ .get = xget, .put = xput, \
+ .private_value = (unsigned long)&SST_SSP_SLOT_ENUM(s_ch_no, is_tx, xtexts), \
+}
+
+#define SST_MUX_CTL_NAME(xpname, xinstance) \
+ xpname " " #xinstance
+
+#define SST_SSP_MUX_ENUM(xreg, xshift, xtexts) \
+ (struct soc_enum) SOC_ENUM_DOUBLE(xreg, xshift, xshift, ARRAY_SIZE(xtexts), xtexts)
+
+#define SST_SSP_MUX_CTL(xpname, xinstance, xreg, xshift, xtexts) \
+ SOC_DAPM_ENUM(SST_MUX_CTL_NAME(xpname, xinstance), \
+ SST_SSP_MUX_ENUM(xreg, xshift, xtexts))
+
#endif
diff --git a/sound/soc/intel/sst-baytrail-dsp.c b/sound/soc/intel/sst-baytrail-dsp.c
index fc588764ffa3..5a9e56700f31 100644
--- a/sound/soc/intel/sst-baytrail-dsp.c
+++ b/sound/soc/intel/sst-baytrail-dsp.c
@@ -67,17 +67,12 @@ static int sst_byt_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
{
struct dma_block_info *block;
struct sst_module *mod;
- struct sst_module_data block_data;
struct sst_module_template template;
int count;
memset(&template, 0, sizeof(template));
template.id = module->type;
template.entry = module->entry_point;
- template.p.type = SST_MEM_DRAM;
- template.p.data_type = SST_DATA_P;
- template.s.type = SST_MEM_DRAM;
- template.s.data_type = SST_DATA_S;
mod = sst_module_new(fw, &template, NULL);
if (mod == NULL)
@@ -94,19 +89,19 @@ static int sst_byt_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
switch (block->type) {
case SST_BYT_IRAM:
- block_data.offset = block->ram_offset +
+ mod->offset = block->ram_offset +
dsp->addr.iram_offset;
- block_data.type = SST_MEM_IRAM;
+ mod->type = SST_MEM_IRAM;
break;
case SST_BYT_DRAM:
- block_data.offset = block->ram_offset +
+ mod->offset = block->ram_offset +
dsp->addr.dram_offset;
- block_data.type = SST_MEM_DRAM;
+ mod->type = SST_MEM_DRAM;
break;
case SST_BYT_CACHE:
- block_data.offset = block->ram_offset +
+ mod->offset = block->ram_offset +
(dsp->addr.fw_ext - dsp->addr.lpe);
- block_data.type = SST_MEM_CACHE;
+ mod->type = SST_MEM_CACHE;
break;
default:
dev_err(dsp->dev, "wrong ram type 0x%x in block0x%x\n",
@@ -114,11 +109,10 @@ static int sst_byt_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
return -EINVAL;
}
- block_data.size = block->size;
- block_data.data_type = SST_DATA_M;
- block_data.data = (void *)block + sizeof(*block);
+ mod->size = block->size;
+ mod->data = (void *)block + sizeof(*block);
- sst_module_insert_fixed_block(mod, &block_data);
+ sst_module_alloc_blocks(mod);
block = (void *)block + sizeof(*block) + block->size;
}
diff --git a/sound/soc/intel/sst-baytrail-pcm.c b/sound/soc/intel/sst-baytrail-pcm.c
index eab1c7d85187..3bb6288d8b4d 100644
--- a/sound/soc/intel/sst-baytrail-pcm.c
+++ b/sound/soc/intel/sst-baytrail-pcm.c
@@ -497,7 +497,6 @@ static int sst_byt_pcm_dev_remove(struct platform_device *pdev)
static struct platform_driver sst_byt_pcm_driver = {
.driver = {
.name = "baytrail-pcm-audio",
- .owner = THIS_MODULE,
.pm = SST_BYT_PM_OPS,
},
diff --git a/sound/soc/intel/sst-dsp-priv.h b/sound/soc/intel/sst-dsp-priv.h
index ffb308bd81ce..b9da030e312d 100644
--- a/sound/soc/intel/sst-dsp-priv.h
+++ b/sound/soc/intel/sst-dsp-priv.h
@@ -26,6 +26,9 @@ struct sst_mem_block;
struct sst_module;
struct sst_fw;
+/* do we need to remove or keep */
+#define DSP_DRAM_ADDR_OFFSET 0x400000
+
/*
* DSP Operations exported by platform Audio DSP driver.
*/
@@ -33,6 +36,9 @@ struct sst_ops {
/* DSP core boot / reset */
void (*boot)(struct sst_dsp *);
void (*reset)(struct sst_dsp *);
+ int (*wake)(struct sst_dsp *);
+ void (*sleep)(struct sst_dsp *);
+ void (*stall)(struct sst_dsp *);
/* Shim IO */
void (*write)(void __iomem *addr, u32 offset, u32 value);
@@ -67,6 +73,8 @@ struct sst_addr {
u32 shim_offset;
u32 iram_offset;
u32 dram_offset;
+ u32 dsp_iram_offset;
+ u32 dsp_dram_offset;
void __iomem *lpe;
void __iomem *shim;
void __iomem *pci_cfg;
@@ -84,15 +92,6 @@ struct sst_mailbox {
};
/*
- * Audio DSP Firmware data types.
- */
-enum sst_data_type {
- SST_DATA_M = 0, /* module block data */
- SST_DATA_P = 1, /* peristant data (text, data) */
- SST_DATA_S = 2, /* scratch data (usually buffers) */
-};
-
-/*
* Audio DSP memory block types.
*/
enum sst_mem_type {
@@ -125,23 +124,6 @@ struct sst_fw {
};
/*
- * Audio DSP Generic Module data.
- *
- * This is used to dsecribe any sections of persistent (text and data) and
- * scratch (buffers) of module data in ADSP memory space.
- */
-struct sst_module_data {
-
- enum sst_mem_type type; /* destination memory type */
- enum sst_data_type data_type; /* type of module data */
-
- u32 size; /* size in bytes */
- int32_t offset; /* offset in FW file */
- u32 data_offset; /* offset in ADSP memory space */
- void *data; /* module data */
-};
-
-/*
* Audio DSP Generic Module Template.
*
* Used to define and register a new FW module. This data is extracted from
@@ -150,15 +132,52 @@ struct sst_module_data {
struct sst_module_template {
u32 id;
u32 entry; /* entry point */
- struct sst_module_data s; /* scratch data */
- struct sst_module_data p; /* peristant data */
+ u32 scratch_size;
+ u32 persistent_size;
+};
+
+/*
+ * Block Allocator - Used to allocate blocks of DSP memory.
+ */
+struct sst_block_allocator {
+ u32 id;
+ u32 offset;
+ int size;
+ enum sst_mem_type type;
+};
+
+/*
+ * Runtime Module Instance - A module object can be instanciated multiple
+ * times within the DSP FW.
+ */
+struct sst_module_runtime {
+ struct sst_dsp *dsp;
+ int id;
+ struct sst_module *module; /* parent module we belong too */
+
+ u32 persistent_offset; /* private memory offset */
+ void *private;
+
+ struct list_head list;
+ struct list_head block_list; /* list of blocks used */
+};
+
+/*
+ * Runtime Module Context - The runtime context must be manually stored by the
+ * driver prior to enter S3 and restored after leaving S3. This should really be
+ * part of the memory context saved by the enter D3 message IPC ???
+ */
+struct sst_module_runtime_context {
+ dma_addr_t dma_buffer;
+ u32 *buffer;
};
/*
* Audio DSP Generic Module.
*
* Each Firmware file can consist of 1..N modules. A module can span multiple
- * ADSP memory blocks. The simplest FW will be a file with 1 module.
+ * ADSP memory blocks. The simplest FW will be a file with 1 module. A module
+ * can be instanciated multiple times in the DSP.
*/
struct sst_module {
struct sst_dsp *dsp;
@@ -167,10 +186,13 @@ struct sst_module {
/* module configuration */
u32 id;
u32 entry; /* module entry point */
- u32 offset; /* module offset in firmware file */
+ s32 offset; /* module offset in firmware file */
u32 size; /* module size */
- struct sst_module_data s; /* scratch data */
- struct sst_module_data p; /* peristant data */
+ u32 scratch_size; /* global scratch memory required */
+ u32 persistent_size; /* private memory required */
+ enum sst_mem_type type; /* destination memory type */
+ u32 data_offset; /* offset in ADSP memory space */
+ void *data; /* module data */
/* runtime */
u32 usage_count; /* can be unloaded if count == 0 */
@@ -180,6 +202,7 @@ struct sst_module {
struct list_head block_list; /* Module list of blocks in use */
struct list_head list; /* DSP list of modules */
struct list_head list_fw; /* FW list of modules */
+ struct list_head runtime_list; /* list of runtime module objects*/
};
/*
@@ -208,7 +231,6 @@ struct sst_mem_block {
struct sst_block_ops *ops; /* block operations, if any */
/* block status */
- enum sst_data_type data_type; /* data type held in this block */
u32 bytes_used; /* bytes in use by modules */
void *private; /* generic core does not touch this */
int users; /* number of modules using this block */
@@ -253,6 +275,11 @@ struct sst_dsp {
struct list_head module_list;
struct list_head fw_list;
+ /* scratch buffer */
+ struct list_head scratch_block_list;
+ u32 scratch_offset;
+ u32 scratch_size;
+
/* platform data */
struct sst_pdata *pdata;
@@ -290,18 +317,33 @@ void sst_fw_unload(struct sst_fw *sst_fw);
/* Create/Free firmware modules */
struct sst_module *sst_module_new(struct sst_fw *sst_fw,
struct sst_module_template *template, void *private);
-void sst_module_free(struct sst_module *sst_module);
-int sst_module_insert(struct sst_module *sst_module);
-int sst_module_remove(struct sst_module *sst_module);
-int sst_module_insert_fixed_block(struct sst_module *module,
- struct sst_module_data *data);
+void sst_module_free(struct sst_module *module);
struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id);
-
-/* allocate/free pesistent/scratch memory regions managed by drv */
-struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp);
-void sst_mem_block_free_scratch(struct sst_dsp *dsp,
- struct sst_module *scratch);
-int sst_block_module_remove(struct sst_module *module);
+int sst_module_alloc_blocks(struct sst_module *module);
+int sst_module_free_blocks(struct sst_module *module);
+
+/* Create/Free firmware module runtime instances */
+struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
+ int id, void *private);
+void sst_module_runtime_free(struct sst_module_runtime *runtime);
+struct sst_module_runtime *sst_module_runtime_get_from_id(
+ struct sst_module *module, u32 id);
+int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
+ int offset);
+int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime);
+int sst_module_runtime_save(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context);
+int sst_module_runtime_restore(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context);
+
+/* generic block allocation */
+int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list);
+int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list);
+
+/* scratch allocation */
+int sst_block_alloc_scratch(struct sst_dsp *dsp);
+void sst_block_free_scratch(struct sst_dsp *dsp);
/* Register the DSPs memory blocks - would be nice to read from ACPI */
struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
@@ -309,4 +351,10 @@ struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
void *private);
void sst_mem_block_unregister_all(struct sst_dsp *dsp);
+/* Create/Free DMA resources */
+int sst_dma_new(struct sst_dsp *sst);
+void sst_dma_free(struct sst_dma *dma);
+
+u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
+ enum sst_mem_type type);
#endif
diff --git a/sound/soc/intel/sst-dsp.c b/sound/soc/intel/sst-dsp.c
index cd23060a0d86..86e410845670 100644
--- a/sound/soc/intel/sst-dsp.c
+++ b/sound/soc/intel/sst-dsp.c
@@ -245,6 +245,29 @@ int sst_dsp_boot(struct sst_dsp *sst)
}
EXPORT_SYMBOL_GPL(sst_dsp_boot);
+int sst_dsp_wake(struct sst_dsp *sst)
+{
+ if (sst->ops->wake)
+ return sst->ops->wake(sst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_wake);
+
+void sst_dsp_sleep(struct sst_dsp *sst)
+{
+ if (sst->ops->sleep)
+ sst->ops->sleep(sst);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_sleep);
+
+void sst_dsp_stall(struct sst_dsp *sst)
+{
+ if (sst->ops->stall)
+ sst->ops->stall(sst);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_stall);
+
void sst_dsp_ipc_msg_tx(struct sst_dsp *dsp, u32 msg)
{
sst_dsp_shim_write_unlocked(dsp, SST_IPCX, msg | SST_IPCX_BUSY);
@@ -352,6 +375,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
INIT_LIST_HEAD(&sst->free_block_list);
INIT_LIST_HEAD(&sst->module_list);
INIT_LIST_HEAD(&sst->fw_list);
+ INIT_LIST_HEAD(&sst->scratch_block_list);
/* Initialise SST Audio DSP */
if (sst->ops->init) {
@@ -366,6 +390,10 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
if (err)
goto irq_err;
+ err = sst_dma_new(sst);
+ if (err)
+ dev_warn(dev, "sst_dma_new failed %d\n", err);
+
return sst;
irq_err:
@@ -381,6 +409,9 @@ void sst_dsp_free(struct sst_dsp *sst)
free_irq(sst->irq, sst);
if (sst->ops->free)
sst->ops->free(sst);
+
+ if (sst->dma)
+ sst_dma_free(sst->dma);
}
EXPORT_SYMBOL_GPL(sst_dsp_free);
diff --git a/sound/soc/intel/sst-dsp.h b/sound/soc/intel/sst-dsp.h
index 3165dfa97408..f291e32f0077 100644
--- a/sound/soc/intel/sst-dsp.h
+++ b/sound/soc/intel/sst-dsp.h
@@ -30,6 +30,9 @@
#define SST_DMA_TYPE_DW 1
#define SST_DMA_TYPE_MID 2
+/* autosuspend delay 5s*/
+#define SST_RUNTIME_SUSPEND_DELAY (5 * 1000)
+
/* SST Shim register map
* The register naming can differ between products. Some products also
* contain extra functionality.
@@ -156,12 +159,18 @@
#define SST_VDRTCTL3 0xaC
/* VDRTCTL0 */
-#define SST_VDRTCL0_APLLSE_MASK 1
-#define SST_VDRTCL0_DSRAMPGE_SHIFT 16
-#define SST_VDRTCL0_DSRAMPGE_MASK (0xffff << SST_VDRTCL0_DSRAMPGE_SHIFT)
-#define SST_VDRTCL0_ISRAMPGE_SHIFT 6
+#define SST_VDRTCL0_D3PGD (1 << 0)
+#define SST_VDRTCL0_D3SRAMPGD (1 << 1)
+#define SST_VDRTCL0_DSRAMPGE_SHIFT 12
+#define SST_VDRTCL0_DSRAMPGE_MASK (0xfffff << SST_VDRTCL0_DSRAMPGE_SHIFT)
+#define SST_VDRTCL0_ISRAMPGE_SHIFT 2
#define SST_VDRTCL0_ISRAMPGE_MASK (0x3ff << SST_VDRTCL0_ISRAMPGE_SHIFT)
+/* VDRTCTL2 */
+#define SST_VDRTCL2_DCLCGE (1 << 1)
+#define SST_VDRTCL2_DTCGE (1 << 10)
+#define SST_VDRTCL2_APLLSE_MASK (1 << 31)
+
/* PMCS */
#define SST_PMCS 0x84
#define SST_PMCS_PS_MASK 0x3
@@ -245,6 +254,17 @@ void sst_memcpy_fromio_32(struct sst_dsp *sst,
/* DSP reset & boot */
void sst_dsp_reset(struct sst_dsp *sst);
int sst_dsp_boot(struct sst_dsp *sst);
+int sst_dsp_wake(struct sst_dsp *sst);
+void sst_dsp_sleep(struct sst_dsp *sst);
+void sst_dsp_stall(struct sst_dsp *sst);
+
+/* DMA */
+int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id);
+void sst_dsp_dma_put_channel(struct sst_dsp *dsp);
+int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size);
+int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size);
/* Msg IO */
void sst_dsp_ipc_msg_tx(struct sst_dsp *dsp, u32 msg);
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index 3bb43dac892d..4a5bde9c686b 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -23,6 +23,11 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
+
+/* supported DMA engine drivers */
+#include <linux/platform_data/dma-dw.h>
+#include <linux/dma/dw.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -30,16 +35,301 @@
#include "sst-dsp.h"
#include "sst-dsp-priv.h"
-static void block_module_remove(struct sst_module *module);
+#define SST_DMA_RESOURCES 2
+#define SST_DSP_DMA_MAX_BURST 0x3
+#define SST_HSW_BLOCK_ANY 0xffffffff
+
+#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
+
+struct sst_dma {
+ struct sst_dsp *sst;
+
+ struct dw_dma_chip *chip;
+
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *ch;
+};
+
+static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
+{
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy((void *)dest, src, bytes/4);
+}
+
+static void sst_dma_transfer_complete(void *arg)
+{
+ struct sst_dsp *sst = (struct sst_dsp *)arg;
+
+ dev_dbg(sst->dev, "DMA: callback\n");
+}
+
+static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct sst_dma *dma = sst->dma;
+
+ if (dma->ch == NULL) {
+ dev_err(sst->dev, "error: no DMA channel\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
+ (unsigned long)src_addr, (unsigned long)dest_addr, size);
+
+ desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
+ src_addr, size, DMA_CTRL_ACK);
+ if (!desc){
+ dev_err(sst->dev, "error: dma prep memcpy failed\n");
+ return -EINVAL;
+ }
+
+ desc->callback = sst_dma_transfer_complete;
+ desc->callback_param = sst;
+
+ desc->tx_submit(desc);
+ dma_wait_for_async_tx(desc);
+
+ return 0;
+}
+
+/* copy to DSP */
+int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
+ src_addr, size);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
+
+/* copy from DSP */
+int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ return sst_dsp_dma_copy(sst, dest_addr,
+ src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
+
+/* remove module from memory - callers hold locks */
+static void block_list_remove(struct sst_dsp *dsp,
+ struct list_head *block_list)
+{
+ struct sst_mem_block *block, *tmp;
+ int err;
+
+ /* disable each block */
+ list_for_each_entry(block, block_list, module_list) {
+
+ if (block->ops && block->ops->disable) {
+ err = block->ops->disable(block);
+ if (err < 0)
+ dev_err(dsp->dev,
+ "error: cant disable block %d:%d\n",
+ block->type, block->index);
+ }
+ }
+
+ /* mark each block as free */
+ list_for_each_entry_safe(block, tmp, block_list, module_list) {
+ list_del(&block->module_list);
+ list_move(&block->list, &dsp->free_block_list);
+ dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
+ }
+}
+
+/* prepare the memory block to receive data from host - callers hold locks */
+static int block_list_prepare(struct sst_dsp *dsp,
+ struct list_head *block_list)
+{
+ struct sst_mem_block *block;
+ int ret = 0;
+
+ /* enable each block so that's it'e ready for data */
+ list_for_each_entry(block, block_list, module_list) {
+
+ if (block->ops && block->ops->enable && !block->users) {
+ ret = block->ops->enable(block);
+ if (ret < 0) {
+ dev_err(dsp->dev,
+ "error: cant disable block %d:%d\n",
+ block->type, block->index);
+ goto err;
+ }
+ }
+ }
+ return ret;
+
+err:
+ list_for_each_entry(block, block_list, module_list) {
+ if (block->ops && block->ops->disable)
+ block->ops->disable(block);
+ }
+ return ret;
+}
+
+static struct dw_dma_platform_data dw_pdata = {
+ .is_private = 1,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+};
+
+static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
+ int irq)
+{
+ struct dw_dma_chip *chip;
+ int err;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ chip->irq = irq;
+ chip->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(chip->regs))
+ return ERR_CAST(chip->regs);
+
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
+ if (err)
+ return ERR_PTR(err);
+
+ chip->dev = dev;
+ err = dw_dma_probe(chip, &dw_pdata);
+ if (err)
+ return ERR_PTR(err);
+
+ return chip;
+}
+
+static void dw_remove(struct dw_dma_chip *chip)
+{
+ dw_dma_remove(chip);
+}
+
+static bool dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ struct sst_dsp *dsp = (struct sst_dsp *)param;
+
+ return chan->device->dev == dsp->dma_dev;
+}
-static void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
+int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
{
- u32 i;
+ struct sst_dma *dma = dsp->dma;
+ struct dma_slave_config slave;
+ dma_cap_mask_t mask;
+ int ret;
+
+ /* The Intel MID DMA engine driver needs the slave config set but
+ * Synopsis DMA engine driver safely ignores the slave config */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
+ if (dma->ch == NULL) {
+ dev_err(dsp->dev, "error: DMA request channel failed\n");
+ return -EIO;
+ }
+
+ memset(&slave, 0, sizeof(slave));
+ slave.direction = DMA_MEM_TO_DEV;
+ slave.src_addr_width =
+ slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
+
+ ret = dmaengine_slave_config(dma->ch, &slave);
+ if (ret) {
+ dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
+ ret);
+ dma_release_channel(dma->ch);
+ dma->ch = NULL;
+ }
- /* copy one 32 bit word at a time as 64 bit access is not supported */
- for (i = 0; i < bytes; i += 4)
- memcpy_toio(dest + i, src + i, 4);
+ return ret;
}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
+
+void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
+{
+ struct sst_dma *dma = dsp->dma;
+
+ if (!dma->ch)
+ return;
+
+ dma_release_channel(dma->ch);
+ dma->ch = NULL;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
+
+int sst_dma_new(struct sst_dsp *sst)
+{
+ struct sst_pdata *sst_pdata = sst->pdata;
+ struct sst_dma *dma;
+ struct resource mem;
+ const char *dma_dev_name;
+ int ret = 0;
+
+ /* configure the correct platform data for whatever DMA engine
+ * is attached to the ADSP IP. */
+ switch (sst->pdata->dma_engine) {
+ case SST_DMA_TYPE_DW:
+ dma_dev_name = "dw_dmac";
+ break;
+ case SST_DMA_TYPE_MID:
+ dma_dev_name = "Intel MID DMA";
+ break;
+ default:
+ dev_err(sst->dev, "error: invalid DMA engine %d\n",
+ sst->pdata->dma_engine);
+ return -EINVAL;
+ }
+
+ dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->sst = sst;
+
+ memset(&mem, 0, sizeof(mem));
+
+ mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
+ mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
+ mem.flags = IORESOURCE_MEM;
+
+ /* now register DMA engine device */
+ dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
+ if (IS_ERR(dma->chip)) {
+ dev_err(sst->dev, "error: DMA device register failed\n");
+ ret = PTR_ERR(dma->chip);
+ goto err_dma_dev;
+ }
+
+ sst->dma = dma;
+ sst->fw_use_dma = true;
+ return 0;
+
+err_dma_dev:
+ devm_kfree(sst->dev, dma);
+ return ret;
+}
+EXPORT_SYMBOL(sst_dma_new);
+
+void sst_dma_free(struct sst_dma *dma)
+{
+
+ if (dma == NULL)
+ return;
+
+ if (dma->ch)
+ dma_release_channel(dma->ch);
+
+ if (dma->chip)
+ dw_remove(dma->chip);
+
+}
+EXPORT_SYMBOL(sst_dma_free);
/* create new generic firmware object */
struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
@@ -71,6 +361,12 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
/* copy FW data to DMA-able memory */
memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
+ if (dsp->fw_use_dma) {
+ err = sst_dsp_dma_get_channel(dsp, 0);
+ if (err < 0)
+ goto chan_err;
+ }
+
/* call core specific FW paser to load FW data into DSP */
err = dsp->ops->parse_fw(sst_fw);
if (err < 0) {
@@ -78,6 +374,9 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
goto parse_err;
}
+ if (dsp->fw_use_dma)
+ sst_dsp_dma_put_channel(dsp);
+
mutex_lock(&dsp->mutex);
list_add(&sst_fw->list, &dsp->fw_list);
mutex_unlock(&dsp->mutex);
@@ -85,9 +384,13 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
return sst_fw;
parse_err:
- dma_free_coherent(dsp->dev, sst_fw->size,
+ if (dsp->fw_use_dma)
+ sst_dsp_dma_put_channel(dsp);
+chan_err:
+ dma_free_coherent(dsp->dma_dev, sst_fw->size,
sst_fw->dma_buf,
sst_fw->dmable_fw_paddr);
+ sst_fw->dma_buf = NULL;
kfree(sst_fw);
return NULL;
}
@@ -111,21 +414,37 @@ EXPORT_SYMBOL_GPL(sst_fw_reload);
void sst_fw_unload(struct sst_fw *sst_fw)
{
- struct sst_dsp *dsp = sst_fw->dsp;
- struct sst_module *module, *tmp;
+ struct sst_dsp *dsp = sst_fw->dsp;
+ struct sst_module *module, *mtmp;
+ struct sst_module_runtime *runtime, *rtmp;
+
+ dev_dbg(dsp->dev, "unloading firmware\n");
+
+ mutex_lock(&dsp->mutex);
+
+ /* check module by module */
+ list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
+ if (module->sst_fw == sst_fw) {
+
+ /* remove runtime modules */
+ list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
- dev_dbg(dsp->dev, "unloading firmware\n");
+ block_list_remove(dsp, &runtime->block_list);
+ list_del(&runtime->list);
+ kfree(runtime);
+ }
+
+ /* now remove the module */
+ block_list_remove(dsp, &module->block_list);
+ list_del(&module->list);
+ kfree(module);
+ }
+ }
- mutex_lock(&dsp->mutex);
- list_for_each_entry_safe(module, tmp, &dsp->module_list, list) {
- if (module->sst_fw == sst_fw) {
- block_module_remove(module);
- list_del(&module->list);
- kfree(module);
- }
- }
+ /* remove all scratch blocks */
+ block_list_remove(dsp, &dsp->scratch_block_list);
- mutex_unlock(&dsp->mutex);
+ mutex_unlock(&dsp->mutex);
}
EXPORT_SYMBOL_GPL(sst_fw_unload);
@@ -138,7 +457,8 @@ void sst_fw_free(struct sst_fw *sst_fw)
list_del(&sst_fw->list);
mutex_unlock(&dsp->mutex);
- dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
+ if (sst_fw->dma_buf)
+ dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
sst_fw->dmable_fw_paddr);
kfree(sst_fw);
}
@@ -175,11 +495,11 @@ struct sst_module *sst_module_new(struct sst_fw *sst_fw,
sst_module->id = template->id;
sst_module->dsp = dsp;
sst_module->sst_fw = sst_fw;
-
- memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
- memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
+ sst_module->scratch_size = template->scratch_size;
+ sst_module->persistent_size = template->persistent_size;
INIT_LIST_HEAD(&sst_module->block_list);
+ INIT_LIST_HEAD(&sst_module->runtime_list);
mutex_lock(&dsp->mutex);
list_add(&sst_module->list, &dsp->module_list);
@@ -202,73 +522,122 @@ void sst_module_free(struct sst_module *sst_module)
}
EXPORT_SYMBOL_GPL(sst_module_free);
-static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
- u32 offset)
+struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
+ int id, void *private)
+{
+ struct sst_dsp *dsp = module->dsp;
+ struct sst_module_runtime *runtime;
+
+ runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
+ if (runtime == NULL)
+ return NULL;
+
+ runtime->id = id;
+ runtime->dsp = dsp;
+ runtime->module = module;
+ INIT_LIST_HEAD(&runtime->block_list);
+
+ mutex_lock(&dsp->mutex);
+ list_add(&runtime->list, &module->runtime_list);
+ mutex_unlock(&dsp->mutex);
+
+ return runtime;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_new);
+
+void sst_module_runtime_free(struct sst_module_runtime *runtime)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+
+ mutex_lock(&dsp->mutex);
+ list_del(&runtime->list);
+ mutex_unlock(&dsp->mutex);
+
+ kfree(runtime);
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_free);
+
+static struct sst_mem_block *find_block(struct sst_dsp *dsp,
+ struct sst_block_allocator *ba)
{
struct sst_mem_block *block;
list_for_each_entry(block, &dsp->free_block_list, list) {
- if (block->type == type && block->offset == offset)
+ if (block->type == ba->type && block->offset == ba->offset)
return block;
}
return NULL;
}
-static int block_alloc_contiguous(struct sst_module *module,
- struct sst_module_data *data, u32 offset, int size)
+/* Block allocator must be on block boundary */
+static int block_alloc_contiguous(struct sst_dsp *dsp,
+ struct sst_block_allocator *ba, struct list_head *block_list)
{
struct list_head tmp = LIST_HEAD_INIT(tmp);
- struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block;
+ u32 block_start = SST_HSW_BLOCK_ANY;
+ int size = ba->size, offset = ba->offset;
+
+ while (ba->size > 0) {
- while (size > 0) {
- block = find_block(dsp, data->type, offset);
+ block = find_block(dsp, ba);
if (!block) {
list_splice(&tmp, &dsp->free_block_list);
+
+ ba->size = size;
+ ba->offset = offset;
return -ENOMEM;
}
list_move_tail(&block->list, &tmp);
- offset += block->size;
- size -= block->size;
+ ba->offset += block->size;
+ ba->size -= block->size;
}
+ ba->size = size;
+ ba->offset = offset;
+
+ list_for_each_entry(block, &tmp, list) {
+
+ if (block->offset < block_start)
+ block_start = block->offset;
- list_for_each_entry(block, &tmp, list)
- list_add(&block->module_list, &module->block_list);
+ list_add(&block->module_list, block_list);
+
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
+ }
list_splice(&tmp, &dsp->used_block_list);
return 0;
}
-/* allocate free DSP blocks for module data - callers hold locks */
-static int block_alloc(struct sst_module *module,
- struct sst_module_data *data)
+/* allocate first free DSP blocks for data - callers hold locks */
+static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
{
- struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block, *tmp;
int ret = 0;
- if (data->size == 0)
+ if (ba->size == 0)
return 0;
/* find first free whole blocks that can hold module */
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
/* ignore blocks with wrong type */
- if (block->type != data->type)
+ if (block->type != ba->type)
continue;
- if (data->size > block->size)
+ if (ba->size > block->size)
continue;
- data->offset = block->offset;
- block->data_type = data->data_type;
- block->bytes_used = data->size % block->size;
- list_add(&block->module_list, &module->block_list);
+ ba->offset = block->offset;
+ block->bytes_used = ba->size % block->size;
+ list_add(&block->module_list, block_list);
list_move(&block->list, &dsp->used_block_list);
- dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
- module->id, block->type, block->index);
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
return 0;
}
@@ -276,15 +645,19 @@ static int block_alloc(struct sst_module *module,
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
/* ignore blocks with wrong type */
- if (block->type != data->type)
+ if (block->type != ba->type)
continue;
/* do we span > 1 blocks */
- if (data->size > block->size) {
- ret = block_alloc_contiguous(module, data,
- block->offset, data->size);
+ if (ba->size > block->size) {
+
+ /* align ba to block boundary */
+ ba->offset = block->offset;
+
+ ret = block_alloc_contiguous(dsp, ba, block_list);
if (ret == 0)
return ret;
+
}
}
@@ -292,93 +665,74 @@ static int block_alloc(struct sst_module *module,
return -ENOMEM;
}
-/* remove module from memory - callers hold locks */
-static void block_module_remove(struct sst_module *module)
+int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
{
- struct sst_mem_block *block, *tmp;
- struct sst_dsp *dsp = module->dsp;
- int err;
+ int ret;
- /* disable each block */
- list_for_each_entry(block, &module->block_list, module_list) {
+ dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
+ ba->size, ba->offset, ba->type);
- if (block->ops && block->ops->disable) {
- err = block->ops->disable(block);
- if (err < 0)
- dev_err(dsp->dev,
- "error: cant disable block %d:%d\n",
- block->type, block->index);
- }
- }
+ mutex_lock(&dsp->mutex);
- /* mark each block as free */
- list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
- list_del(&block->module_list);
- list_move(&block->list, &dsp->free_block_list);
+ ret = block_alloc(dsp, ba, block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
+ goto out;
}
-}
-
-/* prepare the memory block to receive data from host - callers hold locks */
-static int block_module_prepare(struct sst_module *module)
-{
- struct sst_mem_block *block;
- int ret = 0;
- /* enable each block so that's it'e ready for module P/S data */
- list_for_each_entry(block, &module->block_list, module_list) {
+ /* prepare DSP blocks for module usage */
+ ret = block_list_prepare(dsp, block_list);
+ if (ret < 0)
+ dev_err(dsp->dev, "error: prepare failed\n");
- if (block->ops && block->ops->enable) {
- ret = block->ops->enable(block);
- if (ret < 0) {
- dev_err(module->dsp->dev,
- "error: cant disable block %d:%d\n",
- block->type, block->index);
- goto err;
- }
- }
- }
+out:
+ mutex_unlock(&dsp->mutex);
return ret;
+}
+EXPORT_SYMBOL_GPL(sst_alloc_blocks);
-err:
- list_for_each_entry(block, &module->block_list, module_list) {
- if (block->ops && block->ops->disable)
- block->ops->disable(block);
- }
- return ret;
+int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
+{
+ mutex_lock(&dsp->mutex);
+ block_list_remove(dsp, block_list);
+ mutex_unlock(&dsp->mutex);
+ return 0;
}
+EXPORT_SYMBOL_GPL(sst_free_blocks);
/* allocate memory blocks for static module addresses - callers hold locks */
-static int block_alloc_fixed(struct sst_module *module,
- struct sst_module_data *data)
+static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
{
- struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block, *tmp;
- u32 end = data->offset + data->size, block_end;
+ u32 end = ba->offset + ba->size, block_end;
int err;
/* only IRAM/DRAM blocks are managed */
- if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
+ if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
return 0;
/* are blocks already attached to this module */
- list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
+ list_for_each_entry_safe(block, tmp, block_list, module_list) {
- /* force compacting mem blocks of the same data_type */
- if (block->data_type != data->data_type)
+ /* ignore blocks with wrong type */
+ if (block->type != ba->type)
continue;
block_end = block->offset + block->size;
/* find block that holds section */
- if (data->offset >= block->offset && end < block_end)
+ if (ba->offset >= block->offset && end <= block_end)
return 0;
/* does block span more than 1 section */
- if (data->offset >= block->offset && data->offset < block_end) {
+ if (ba->offset >= block->offset && ba->offset < block_end) {
- err = block_alloc_contiguous(module, data,
- block->offset + block->size,
- data->size - block->size);
+ /* align ba to block boundary */
+ ba->size -= block_end - ba->offset;
+ ba->offset = block_end;
+ err = block_alloc_contiguous(dsp, ba, block_list);
if (err < 0)
return -ENOMEM;
@@ -391,82 +745,270 @@ static int block_alloc_fixed(struct sst_module *module,
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
block_end = block->offset + block->size;
+ /* ignore blocks with wrong type */
+ if (block->type != ba->type)
+ continue;
+
/* find block that holds section */
- if (data->offset >= block->offset && end < block_end) {
+ if (ba->offset >= block->offset && end <= block_end) {
/* add block */
- block->data_type = data->data_type;
list_move(&block->list, &dsp->used_block_list);
- list_add(&block->module_list, &module->block_list);
+ list_add(&block->module_list, block_list);
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
return 0;
}
/* does block span more than 1 section */
- if (data->offset >= block->offset && data->offset < block_end) {
+ if (ba->offset >= block->offset && ba->offset < block_end) {
- err = block_alloc_contiguous(module, data,
- block->offset, data->size);
+ /* align ba to block boundary */
+ ba->offset = block->offset;
+
+ err = block_alloc_contiguous(dsp, ba, block_list);
if (err < 0)
return -ENOMEM;
return 0;
}
-
}
return -ENOMEM;
}
/* Load fixed module data into DSP memory blocks */
-int sst_module_insert_fixed_block(struct sst_module *module,
- struct sst_module_data *data)
+int sst_module_alloc_blocks(struct sst_module *module)
{
struct sst_dsp *dsp = module->dsp;
+ struct sst_fw *sst_fw = module->sst_fw;
+ struct sst_block_allocator ba;
int ret;
+ ba.size = module->size;
+ ba.type = module->type;
+ ba.offset = module->offset;
+
+ dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
+ ba.size, ba.offset, ba.type);
+
mutex_lock(&dsp->mutex);
/* alloc blocks that includes this section */
- ret = block_alloc_fixed(module, data);
+ ret = block_alloc_fixed(dsp, &ba, &module->block_list);
if (ret < 0) {
dev_err(dsp->dev,
"error: no free blocks for section at offset 0x%x size 0x%x\n",
- data->offset, data->size);
+ module->offset, module->size);
mutex_unlock(&dsp->mutex);
return -ENOMEM;
}
/* prepare DSP blocks for module copy */
- ret = block_module_prepare(module);
+ ret = block_list_prepare(dsp, &module->block_list);
if (ret < 0) {
dev_err(dsp->dev, "error: fw module prepare failed\n");
goto err;
}
/* copy partial module data to blocks */
- sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
+ if (dsp->fw_use_dma) {
+ ret = sst_dsp_dma_copyto(dsp,
+ dsp->addr.lpe_base + module->offset,
+ sst_fw->dmable_fw_paddr + module->data_offset,
+ module->size);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: module copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
+ module->size);
mutex_unlock(&dsp->mutex);
return ret;
err:
- block_module_remove(module);
+ block_list_remove(dsp, &module->block_list);
mutex_unlock(&dsp->mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
+EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
/* Unload entire module from DSP memory */
-int sst_block_module_remove(struct sst_module *module)
+int sst_module_free_blocks(struct sst_module *module)
{
struct sst_dsp *dsp = module->dsp;
mutex_lock(&dsp->mutex);
- block_module_remove(module);
+ block_list_remove(dsp, &module->block_list);
mutex_unlock(&dsp->mutex);
return 0;
}
-EXPORT_SYMBOL_GPL(sst_block_module_remove);
+EXPORT_SYMBOL_GPL(sst_module_free_blocks);
+
+int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
+ int offset)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ struct sst_block_allocator ba;
+ int ret;
+
+ if (module->persistent_size == 0)
+ return 0;
+
+ ba.size = module->persistent_size;
+ ba.type = SST_MEM_DRAM;
+
+ mutex_lock(&dsp->mutex);
+
+ /* do we need to allocate at a fixed address ? */
+ if (offset != 0) {
+
+ ba.offset = offset;
+
+ dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
+ ba.size, ba.type, ba.offset);
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
+
+ } else {
+ dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
+ ba.size, ba.type);
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc(dsp, &ba, &runtime->block_list);
+ }
+ if (ret < 0) {
+ dev_err(dsp->dev,
+ "error: no free blocks for runtime module size 0x%x\n",
+ module->persistent_size);
+ mutex_unlock(&dsp->mutex);
+ return -ENOMEM;
+ }
+ runtime->persistent_offset = ba.offset;
+
+ /* prepare DSP blocks for module copy */
+ ret = block_list_prepare(dsp, &runtime->block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: runtime block prepare failed\n");
+ goto err;
+ }
+
+ mutex_unlock(&dsp->mutex);
+ return ret;
+
+err:
+ block_list_remove(dsp, &module->block_list);
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
+
+int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+
+ mutex_lock(&dsp->mutex);
+ block_list_remove(dsp, &runtime->block_list);
+ mutex_unlock(&dsp->mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
+
+int sst_module_runtime_save(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ int ret = 0;
+
+ dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
+ runtime->id, runtime->persistent_offset,
+ module->persistent_size);
+
+ context->buffer = dma_alloc_coherent(dsp->dma_dev,
+ module->persistent_size,
+ &context->dma_buffer, GFP_DMA | GFP_KERNEL);
+ if (!context->buffer) {
+ dev_err(dsp->dev, "error: DMA context alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dsp->mutex);
+
+ if (dsp->fw_use_dma) {
+
+ ret = sst_dsp_dma_get_channel(dsp, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
+ dsp->addr.lpe_base + runtime->persistent_offset,
+ module->persistent_size);
+ sst_dsp_dma_put_channel(dsp);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: context copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(context->buffer, dsp->addr.lpe +
+ runtime->persistent_offset,
+ module->persistent_size);
+
+err:
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_save);
+
+int sst_module_runtime_restore(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ int ret = 0;
+
+ dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
+ runtime->id, runtime->persistent_offset,
+ module->persistent_size);
+
+ mutex_lock(&dsp->mutex);
+
+ if (!context->buffer) {
+ dev_info(dsp->dev, "no context buffer need to restore!\n");
+ goto err;
+ }
+
+ if (dsp->fw_use_dma) {
+
+ ret = sst_dsp_dma_get_channel(dsp, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = sst_dsp_dma_copyto(dsp,
+ dsp->addr.lpe_base + runtime->persistent_offset,
+ context->dma_buffer, module->persistent_size);
+ sst_dsp_dma_put_channel(dsp);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: module copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
+ context->buffer, module->persistent_size);
+
+ dma_free_coherent(dsp->dma_dev, module->persistent_size,
+ context->buffer, context->dma_buffer);
+ context->buffer = NULL;
+
+err:
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
/* register a DSP memory block for use with FW based modules */
struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
@@ -519,80 +1061,84 @@ void sst_mem_block_unregister_all(struct sst_dsp *dsp)
EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
/* allocate scratch buffer blocks */
-struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
+int sst_block_alloc_scratch(struct sst_dsp *dsp)
{
- struct sst_module *sst_module, *scratch;
- struct sst_mem_block *block, *tmp;
- u32 block_size;
- int ret = 0;
-
- scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
- if (scratch == NULL)
- return NULL;
+ struct sst_module *module;
+ struct sst_block_allocator ba;
+ int ret;
mutex_lock(&dsp->mutex);
/* calculate required scratch size */
- list_for_each_entry(sst_module, &dsp->module_list, list) {
- if (scratch->s.size < sst_module->s.size)
- scratch->s.size = sst_module->s.size;
+ dsp->scratch_size = 0;
+ list_for_each_entry(module, &dsp->module_list, list) {
+ dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
+ module->id, module->scratch_size);
+ if (dsp->scratch_size < module->scratch_size)
+ dsp->scratch_size = module->scratch_size;
}
- dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
- scratch->s.size);
-
- /* init scratch module */
- scratch->dsp = dsp;
- scratch->s.type = SST_MEM_DRAM;
- scratch->s.data_type = SST_DATA_S;
- INIT_LIST_HEAD(&scratch->block_list);
+ dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
+ dsp->scratch_size);
- /* check free blocks before looking at used blocks for space */
- if (!list_empty(&dsp->free_block_list))
- block = list_first_entry(&dsp->free_block_list,
- struct sst_mem_block, list);
- else
- block = list_first_entry(&dsp->used_block_list,
- struct sst_mem_block, list);
- block_size = block->size;
+ if (dsp->scratch_size == 0) {
+ dev_info(dsp->dev, "no modules need scratch buffer\n");
+ mutex_unlock(&dsp->mutex);
+ return 0;
+ }
/* allocate blocks for module scratch buffers */
dev_dbg(dsp->dev, "allocating scratch blocks\n");
- ret = block_alloc(scratch, &scratch->s);
+
+ ba.size = dsp->scratch_size;
+ ba.type = SST_MEM_DRAM;
+
+ /* do we need to allocate at fixed offset */
+ if (dsp->scratch_offset != 0) {
+
+ dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
+ ba.size, ba.type, ba.offset);
+
+ ba.offset = dsp->scratch_offset;
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
+
+ } else {
+ dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
+ ba.size, ba.type);
+
+ ba.offset = 0;
+ ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
+ }
if (ret < 0) {
dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
- goto err;
+ mutex_unlock(&dsp->mutex);
+ return ret;
}
- /* assign the same offset of scratch to each module */
- list_for_each_entry(sst_module, &dsp->module_list, list)
- sst_module->s.offset = scratch->s.offset;
-
- mutex_unlock(&dsp->mutex);
- return scratch;
+ ret = block_list_prepare(dsp, &dsp->scratch_block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: scratch block prepare failed\n");
+ mutex_unlock(&dsp->mutex);
+ return ret;
+ }
-err:
- list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
- list_del(&block->module_list);
+ /* assign the same offset of scratch to each module */
+ dsp->scratch_offset = ba.offset;
mutex_unlock(&dsp->mutex);
- return NULL;
+ return dsp->scratch_size;
}
-EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
+EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
/* free all scratch blocks */
-void sst_mem_block_free_scratch(struct sst_dsp *dsp,
- struct sst_module *scratch)
+void sst_block_free_scratch(struct sst_dsp *dsp)
{
- struct sst_mem_block *block, *tmp;
-
mutex_lock(&dsp->mutex);
-
- list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
- list_del(&block->module_list);
-
+ block_list_remove(dsp, &dsp->scratch_block_list);
mutex_unlock(&dsp->mutex);
}
-EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
+EXPORT_SYMBOL_GPL(sst_block_free_scratch);
/* get a module from it's unique ID */
struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
@@ -612,3 +1158,40 @@ struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
return NULL;
}
EXPORT_SYMBOL_GPL(sst_module_get_from_id);
+
+struct sst_module_runtime *sst_module_runtime_get_from_id(
+ struct sst_module *module, u32 id)
+{
+ struct sst_module_runtime *runtime;
+ struct sst_dsp *dsp = module->dsp;
+
+ mutex_lock(&dsp->mutex);
+
+ list_for_each_entry(runtime, &module->runtime_list, list) {
+ if (runtime->id == id) {
+ mutex_unlock(&dsp->mutex);
+ return runtime;
+ }
+ }
+
+ mutex_unlock(&dsp->mutex);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
+
+/* returns block address in DSP address space */
+u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
+ enum sst_mem_type type)
+{
+ switch (type) {
+ case SST_MEM_IRAM:
+ return offset - dsp->addr.iram_offset +
+ dsp->addr.dsp_iram_offset;
+ case SST_MEM_DRAM:
+ return offset - dsp->addr.dram_offset +
+ dsp->addr.dsp_dram_offset;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c
index 4b6c163c10ff..57039b00efc2 100644
--- a/sound/soc/intel/sst-haswell-dsp.c
+++ b/sound/soc/intel/sst-haswell-dsp.c
@@ -42,6 +42,10 @@
#define SST_LP_SHIM_OFFSET 0xE7000
#define SST_WPT_IRAM_OFFSET 0xA0000
#define SST_LP_IRAM_OFFSET 0x80000
+#define SST_WPT_DSP_DRAM_OFFSET 0x400000
+#define SST_WPT_DSP_IRAM_OFFSET 0x00000
+#define SST_LPT_DSP_DRAM_OFFSET 0x400000
+#define SST_LPT_DSP_IRAM_OFFSET 0x00000
#define SST_SHIM_PM_REG 0x84
@@ -86,9 +90,8 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
{
struct dma_block_info *block;
struct sst_module *mod;
- struct sst_module_data block_data;
struct sst_module_template template;
- int count;
+ int count, ret;
void __iomem *ram;
/* TODO: allowed module types need to be configurable */
@@ -109,13 +112,9 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
memset(&template, 0, sizeof(template));
template.id = module->type;
- template.entry = module->entry_point;
- template.p.size = module->info.persistent_size;
- template.p.type = SST_MEM_DRAM;
- template.p.data_type = SST_DATA_P;
- template.s.size = module->info.scratch_size;
- template.s.type = SST_MEM_DRAM;
- template.s.data_type = SST_DATA_S;
+ template.entry = module->entry_point - 4;
+ template.persistent_size = module->info.persistent_size;
+ template.scratch_size = module->info.scratch_size;
mod = sst_module_new(fw, &template, NULL);
if (mod == NULL)
@@ -135,14 +134,14 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
switch (block->type) {
case SST_HSW_IRAM:
ram = dsp->addr.lpe;
- block_data.offset =
+ mod->offset =
block->ram_offset + dsp->addr.iram_offset;
- block_data.type = SST_MEM_IRAM;
+ mod->type = SST_MEM_IRAM;
break;
case SST_HSW_DRAM:
ram = dsp->addr.lpe;
- block_data.offset = block->ram_offset;
- block_data.type = SST_MEM_DRAM;
+ mod->offset = block->ram_offset;
+ mod->type = SST_MEM_DRAM;
break;
default:
dev_err(dsp->dev, "error: bad type 0x%x for block 0x%x\n",
@@ -151,30 +150,34 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
return -EINVAL;
}
- block_data.size = block->size;
- block_data.data_type = SST_DATA_M;
- block_data.data = (void *)block + sizeof(*block);
- block_data.data_offset = block_data.data - fw->dma_buf;
+ mod->size = block->size;
+ mod->data = (void *)block + sizeof(*block);
+ mod->data_offset = mod->data - fw->dma_buf;
- dev_dbg(dsp->dev, "copy firmware block %d type 0x%x "
+ dev_dbg(dsp->dev, "module block %d type 0x%x "
"size 0x%x ==> ram %p offset 0x%x\n",
- count, block->type, block->size, ram,
+ count, mod->type, block->size, ram,
block->ram_offset);
- sst_module_insert_fixed_block(mod, &block_data);
+ ret = sst_module_alloc_blocks(mod);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: could not allocate blocks for module %d\n",
+ count);
+ sst_module_free(mod);
+ return ret;
+ }
block = (void *)block + sizeof(*block) + block->size;
}
+
return 0;
}
static int hsw_parse_fw_image(struct sst_fw *sst_fw)
{
struct fw_header *header;
- struct sst_module *scratch;
struct fw_module_header *module;
struct sst_dsp *dsp = sst_fw->dsp;
- struct sst_hsw *hsw = sst_fw->private;
int ret, count;
/* Read the header information from the data pointer */
@@ -204,12 +207,8 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw)
module = (void *)module + sizeof(*module) + module->mod_size;
}
- /* allocate persistent/scratch mem regions */
- scratch = sst_mem_block_alloc_scratch(dsp);
- if (scratch == NULL)
- return -ENOMEM;
-
- sst_hsw_set_scratch_module(hsw, scratch);
+ /* allocate scratch mem regions */
+ sst_block_alloc_scratch(dsp);
return 0;
}
@@ -248,8 +247,94 @@ static irqreturn_t hsw_irq(int irq, void *context)
return ret;
}
-static void hsw_boot(struct sst_dsp *sst)
+static void hsw_set_dsp_D3(struct sst_dsp *sst)
+{
+ u32 val;
+ u32 reg;
+
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+ /* enable power gating and switch off DRAM & IRAM blocks */
+ val = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+ val |= SST_VDRTCL0_DSRAMPGE_MASK |
+ SST_VDRTCL0_ISRAMPGE_MASK;
+ val &= ~(SST_VDRTCL0_D3PGD | SST_VDRTCL0_D3SRAMPGD);
+ writel(val, sst->addr.pci_cfg + SST_VDRTCTL0);
+
+ /* switch off audio PLL */
+ val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ val |= SST_VDRTCL2_APLLSE_MASK;
+ writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+ /* disable MCLK(clkctl.smos = 0) */
+ sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
+ SST_CLKCTL_MASK, 0);
+
+ /* Set D3 state, delay 50 us */
+ val = readl(sst->addr.pci_cfg + SST_PMCS);
+ val |= SST_PMCS_PS_MASK;
+ writel(val, sst->addr.pci_cfg + SST_PMCS);
+ udelay(50);
+
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+ udelay(50);
+
+}
+
+static void hsw_reset(struct sst_dsp *sst)
{
+ /* put DSP into reset and stall */
+ sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
+ SST_CSR_RST | SST_CSR_STALL,
+ SST_CSR_RST | SST_CSR_STALL);
+
+ /* keep in reset for 10ms */
+ mdelay(10);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
+ SST_CSR_RST | SST_CSR_STALL, SST_CSR_STALL);
+}
+
+static int hsw_set_dsp_D0(struct sst_dsp *sst)
+{
+ int tries = 10;
+ u32 reg;
+
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+ /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+ reg |= SST_VDRTCL0_D3PGD;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+
+ /* Set D0 state */
+ reg = readl(sst->addr.pci_cfg + SST_PMCS);
+ reg &= ~SST_PMCS_PS_MASK;
+ writel(reg, sst->addr.pci_cfg + SST_PMCS);
+
+ /* check that ADSP shim is enabled */
+ while (tries--) {
+ reg = readl(sst->addr.pci_cfg + SST_PMCS) & SST_PMCS_PS_MASK;
+ if (reg == 0)
+ goto finish;
+
+ msleep(1);
+ }
+
+ return -ENODEV;
+
+finish:
/* select SSP1 19.2MHz base clock, SSP clock 0, turn off Low Power Clock */
sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
SST_CSR_S1IOCS | SST_CSR_SBCS1 | SST_CSR_LPCS, 0x0);
@@ -264,34 +349,96 @@ static void hsw_boot(struct sst_dsp *sst)
SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0,
SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0);
+ /* Stall and reset core, set CSR */
+ hsw_reset(sst);
+
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+ udelay(50);
+
+ /* switch on audio PLL */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg &= ~SST_VDRTCL2_APLLSE_MASK;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+ /* set default power gating control, enable power gating control for all blocks. that is,
+ can't be accessed, please enable each block before accessing. */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+ reg |= SST_VDRTCL0_DSRAMPGE_MASK | SST_VDRTCL0_ISRAMPGE_MASK;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+
+
/* disable DMA finish function for SSP0 & SSP1 */
sst_dsp_shim_update_bits_unlocked(sst, SST_CSR2, SST_CSR2_SDFD_SSP1,
SST_CSR2_SDFD_SSP1);
- /* enable DMA engine 0,1 all channels to access host memory */
- sst_dsp_shim_update_bits_unlocked(sst, SST_HMDC,
- SST_HMDC_HDDA1(0xff) | SST_HMDC_HDDA0(0xff),
- SST_HMDC_HDDA1(0xff) | SST_HMDC_HDDA0(0xff));
+ /* set on-demond mode on engine 0,1 for all channels */
+ sst_dsp_shim_update_bits(sst, SST_HMDC,
+ SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH,
+ SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH);
+
+ /* Enable Interrupt from both sides */
+ sst_dsp_shim_update_bits(sst, SST_IMRX, (SST_IMRX_BUSY | SST_IMRX_DONE),
+ 0x0);
+ sst_dsp_shim_update_bits(sst, SST_IMRD, (SST_IMRD_DONE | SST_IMRD_BUSY |
+ SST_IMRD_SSP0 | SST_IMRD_DMAC), 0x0);
+
+ /* clear IPC registers */
+ sst_dsp_shim_write(sst, SST_IPCX, 0x0);
+ sst_dsp_shim_write(sst, SST_IPCD, 0x0);
+ sst_dsp_shim_write(sst, 0x80, 0x6);
+ sst_dsp_shim_write(sst, 0xe0, 0x300a);
+
+ return 0;
+}
- /* disable all clock gating */
- writel(0x0, sst->addr.pci_cfg + SST_VDRTCTL2);
+static void hsw_boot(struct sst_dsp *sst)
+{
+ /* set oportunistic mode on engine 0,1 for all channels */
+ sst_dsp_shim_update_bits(sst, SST_HMDC,
+ SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH, 0);
/* set DSP to RUN */
sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, SST_CSR_STALL, 0x0);
}
-static void hsw_reset(struct sst_dsp *sst)
+static void hsw_stall(struct sst_dsp *sst)
+{
+ /* stall DSP */
+ sst_dsp_shim_update_bits(sst, SST_CSR,
+ SST_CSR_24MHZ_LPCS | SST_CSR_STALL,
+ SST_CSR_STALL | SST_CSR_24MHZ_LPCS);
+}
+
+static void hsw_sleep(struct sst_dsp *sst)
{
+ dev_dbg(sst->dev, "HSW_PM dsp runtime suspend\n");
+
/* put DSP into reset and stall */
- sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
- SST_CSR_RST | SST_CSR_STALL, SST_CSR_RST | SST_CSR_STALL);
+ sst_dsp_shim_update_bits(sst, SST_CSR,
+ SST_CSR_24MHZ_LPCS | SST_CSR_RST | SST_CSR_STALL,
+ SST_CSR_RST | SST_CSR_STALL | SST_CSR_24MHZ_LPCS);
- /* keep in reset for 10ms */
- mdelay(10);
+ hsw_set_dsp_D3(sst);
+ dev_dbg(sst->dev, "HSW_PM dsp runtime suspend exit\n");
+}
- /* take DSP out of reset and keep stalled for FW loading */
- sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
- SST_CSR_RST | SST_CSR_STALL, SST_CSR_STALL);
+static int hsw_wake(struct sst_dsp *sst)
+{
+ int ret;
+
+ dev_dbg(sst->dev, "HSW_PM dsp runtime resume\n");
+
+ ret = hsw_set_dsp_D0(sst);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(sst->dev, "HSW_PM dsp runtime resume exit\n");
+
+ return 0;
}
struct sst_adsp_memregion {
@@ -396,6 +543,11 @@ static int hsw_block_enable(struct sst_mem_block *block)
dev_dbg(block->dsp->dev, " enabled block %d:%d at offset 0x%x\n",
block->type, block->index, block->offset);
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ val &= ~SST_VDRTCL2_DCLCGE;
+ writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
+
val = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
bit = hsw_block_get_bit(block);
writel(val & ~bit, sst->addr.pci_cfg + SST_VDRTCTL0);
@@ -403,6 +555,13 @@ static int hsw_block_enable(struct sst_mem_block *block)
/* wait 18 DSP clock ticks */
udelay(10);
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ val |= SST_VDRTCL2_DCLCGE;
+ writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+ udelay(50);
+
/*add a dummy read before the SRAM block is written, otherwise the writing may miss bytes sometimes.*/
sst_mem_block_dummy_read(block);
return 0;
@@ -420,10 +579,26 @@ static int hsw_block_disable(struct sst_mem_block *block)
dev_dbg(block->dsp->dev, " disabled block %d:%d at offset 0x%x\n",
block->type, block->index, block->offset);
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ val &= ~SST_VDRTCL2_DCLCGE;
+ writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+
val = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
bit = hsw_block_get_bit(block);
writel(val | bit, sst->addr.pci_cfg + SST_VDRTCTL0);
+ /* wait 18 DSP clock ticks */
+ udelay(10);
+
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ val |= SST_VDRTCL2_DCLCGE;
+ writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
+
+ udelay(50);
+
return 0;
}
@@ -432,27 +607,6 @@ static struct sst_block_ops sst_hsw_ops = {
.disable = hsw_block_disable,
};
-static int hsw_enable_shim(struct sst_dsp *sst)
-{
- int tries = 10;
- u32 reg;
-
- /* enable shim */
- reg = readl(sst->addr.pci_cfg + SST_SHIM_PM_REG);
- writel(reg & ~0x3, sst->addr.pci_cfg + SST_SHIM_PM_REG);
-
- /* check that ADSP shim is enabled */
- while (tries--) {
- reg = sst_dsp_shim_read_unlocked(sst, SST_CSR);
- if (reg != 0xffffffff)
- return 0;
-
- msleep(1);
- }
-
- return -ENODEV;
-}
-
static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
{
const struct sst_adsp_memregion *region;
@@ -467,12 +621,16 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
region = lp_region;
region_count = ARRAY_SIZE(lp_region);
sst->addr.iram_offset = SST_LP_IRAM_OFFSET;
+ sst->addr.dsp_iram_offset = SST_LPT_DSP_IRAM_OFFSET;
+ sst->addr.dsp_dram_offset = SST_LPT_DSP_DRAM_OFFSET;
sst->addr.shim_offset = SST_LP_SHIM_OFFSET;
break;
case SST_DEV_ID_WILDCAT_POINT:
region = wpt_region;
region_count = ARRAY_SIZE(wpt_region);
sst->addr.iram_offset = SST_WPT_IRAM_OFFSET;
+ sst->addr.dsp_iram_offset = SST_WPT_DSP_IRAM_OFFSET;
+ sst->addr.dsp_dram_offset = SST_WPT_DSP_DRAM_OFFSET;
sst->addr.shim_offset = SST_WPT_SHIM_OFFSET;
break;
default:
@@ -487,7 +645,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
}
/* enable the DSP SHIM */
- ret = hsw_enable_shim(sst);
+ ret = hsw_set_dsp_D0(sst);
if (ret < 0) {
dev_err(dev, "error: failed to set DSP D0 and reset SHIM\n");
return ret;
@@ -497,10 +655,6 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
if (ret)
return ret;
- /* Enable Interrupt from both sides */
- sst_dsp_shim_update_bits_unlocked(sst, SST_IMRX, 0x3, 0x0);
- sst_dsp_shim_update_bits_unlocked(sst, SST_IMRD,
- (0x3 | 0x1 << 16 | 0x3 << 21), 0x0);
/* register DSP memory blocks - ideally we should get this from ACPI */
for (i = 0; i < region_count; i++) {
@@ -532,6 +686,9 @@ static void hsw_free(struct sst_dsp *sst)
struct sst_ops haswell_ops = {
.reset = hsw_reset,
.boot = hsw_boot,
+ .stall = hsw_stall,
+ .wake = hsw_wake,
+ .sleep = hsw_sleep,
.write = sst_shim32_write,
.read = sst_shim32_read,
.write64 = sst_shim32_write64,
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index b6291516dbbf..3f8c48231364 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -30,6 +30,7 @@
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
#include "sst-haswell-ipc.h"
#include "sst-dsp.h"
@@ -276,6 +277,7 @@ struct sst_hsw {
struct sst_hsw_ipc_fw_version version;
struct sst_module *scratch;
bool fw_done;
+ struct sst_fw *sst_fw;
/* stream */
struct list_head stream_list;
@@ -289,6 +291,8 @@ struct sst_hsw {
/* DX */
struct sst_hsw_ipc_dx_reply dx;
+ void *dx_context;
+ dma_addr_t dx_context_paddr;
/* boot */
wait_queue_head_t boot_wait;
@@ -1038,14 +1042,9 @@ int sst_hsw_stream_set_volume(struct sst_hsw *hsw,
trace_ipc_request("set stream volume", stream->reply.stream_hw_id);
- if (channel > 1)
+ if (channel >= 2 && channel != SST_HSW_CHANNELS_ALL)
return -EINVAL;
- if (stream->mute[channel]) {
- stream->mute_volume[channel] = volume;
- return 0;
- }
-
header = IPC_GLB_TYPE(IPC_GLB_STREAM_MESSAGE) |
IPC_STR_TYPE(IPC_STR_STAGE_MESSAGE);
header |= (stream->reply.stream_hw_id << IPC_STR_ID_SHIFT);
@@ -1053,9 +1052,28 @@ int sst_hsw_stream_set_volume(struct sst_hsw *hsw,
header |= (stage_id << IPC_STG_ID_SHIFT);
req = &stream->vol_req;
- req->channel = channel;
req->target_volume = volume;
+ /* set both at same time ? */
+ if (channel == SST_HSW_CHANNELS_ALL) {
+ if (hsw->mute[0] && hsw->mute[1]) {
+ hsw->mute_volume[0] = hsw->mute_volume[1] = volume;
+ return 0;
+ } else if (hsw->mute[0])
+ req->channel = 1;
+ else if (hsw->mute[1])
+ req->channel = 0;
+ else
+ req->channel = SST_HSW_CHANNELS_ALL;
+ } else {
+ /* set only 1 channel */
+ if (hsw->mute[channel]) {
+ hsw->mute_volume[channel] = volume;
+ return 0;
+ }
+ req->channel = channel;
+ }
+
ret = ipc_tx_message_wait(hsw, header, req, sizeof(*req), NULL, 0);
if (ret < 0) {
dev_err(hsw->dev, "error: set stream volume failed\n");
@@ -1134,8 +1152,11 @@ int sst_hsw_mixer_set_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
trace_ipc_request("set mixer volume", volume);
+ if (channel >= 2 && channel != SST_HSW_CHANNELS_ALL)
+ return -EINVAL;
+
/* set both at same time ? */
- if (channel == 2) {
+ if (channel == SST_HSW_CHANNELS_ALL) {
if (hsw->mute[0] && hsw->mute[1]) {
hsw->mute_volume[0] = hsw->mute_volume[1] = volume;
return 0;
@@ -1144,7 +1165,7 @@ int sst_hsw_mixer_set_volume(struct sst_hsw *hsw, u32 stage_id, u32 channel,
else if (hsw->mute[1])
req.channel = 0;
else
- req.channel = 0xffffffff;
+ req.channel = SST_HSW_CHANNELS_ALL;
} else {
/* set only 1 channel */
if (hsw->mute[channel]) {
@@ -1256,10 +1277,6 @@ int sst_hsw_stream_set_channels(struct sst_hsw *hsw,
return -EINVAL;
}
- /* stereo is only supported atm */
- if (channels != 2)
- return -EINVAL;
-
stream->request.format.ch_num = channels;
return 0;
}
@@ -1355,10 +1372,11 @@ int sst_hsw_stream_buffer(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
}
int sst_hsw_stream_set_module_info(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, enum sst_hsw_module_id module_id,
- u32 entry_point)
+ struct sst_hsw_stream *stream, struct sst_module_runtime *runtime)
{
struct sst_hsw_module_map *map = &stream->request.map;
+ struct sst_dsp *dsp = sst_hsw_get_dsp(hsw);
+ struct sst_module *module = runtime->module;
if (stream->commited) {
dev_err(hsw->dev, "error: stream committed for set module\n");
@@ -1367,36 +1385,25 @@ int sst_hsw_stream_set_module_info(struct sst_hsw *hsw,
/* only support initial module atm */
map->module_entries_count = 1;
- map->module_entries[0].module_id = module_id;
- map->module_entries[0].entry_point = entry_point;
-
- return 0;
-}
-
-int sst_hsw_stream_set_pmemory_info(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u32 offset, u32 size)
-{
- if (stream->commited) {
- dev_err(hsw->dev, "error: stream committed for set pmem\n");
- return -EINVAL;
- }
-
- stream->request.persistent_mem.offset = offset;
- stream->request.persistent_mem.size = size;
-
- return 0;
-}
-
-int sst_hsw_stream_set_smemory_info(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, u32 offset, u32 size)
-{
- if (stream->commited) {
- dev_err(hsw->dev, "error: stream committed for set smem\n");
- return -EINVAL;
- }
-
- stream->request.scratch_mem.offset = offset;
- stream->request.scratch_mem.size = size;
+ map->module_entries[0].module_id = module->id;
+ map->module_entries[0].entry_point = module->entry;
+
+ stream->request.persistent_mem.offset =
+ sst_dsp_get_offset(dsp, runtime->persistent_offset, SST_MEM_DRAM);
+ stream->request.persistent_mem.size = module->persistent_size;
+
+ stream->request.scratch_mem.offset =
+ sst_dsp_get_offset(dsp, dsp->scratch_offset, SST_MEM_DRAM);
+ stream->request.scratch_mem.size = dsp->scratch_size;
+
+ dev_dbg(hsw->dev, "module %d runtime %d using:\n", module->id,
+ runtime->id);
+ dev_dbg(hsw->dev, " persistent offset 0x%x bytes 0x%x\n",
+ stream->request.persistent_mem.offset,
+ stream->request.persistent_mem.size);
+ dev_dbg(hsw->dev, " scratch offset 0x%x bytes 0x%x\n",
+ stream->request.scratch_mem.offset,
+ stream->request.scratch_mem.size);
return 0;
}
@@ -1630,6 +1637,10 @@ int sst_hsw_device_set_config(struct sst_hsw *hsw,
config.clock_frequency = mclk;
config.mode = mode;
config.clock_divider = clock_divider;
+ if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER)
+ config.channels = 4;
+ else
+ config.channels = 2;
trace_hsw_device_config_req(&config);
@@ -1673,34 +1684,283 @@ int sst_hsw_dx_set_state(struct sst_hsw *hsw,
dev_dbg(hsw->dev, "ipc: got %d entry numbers for state %d\n",
dx->entries_no, state);
- memcpy(&hsw->dx, dx, sizeof(*dx));
- return 0;
+ return ret;
}
-/* Used to save state into hsw->dx_reply */
-int sst_hsw_dx_get_state(struct sst_hsw *hsw, u32 item,
- u32 *offset, u32 *size, u32 *source)
+struct sst_module_runtime *sst_hsw_runtime_module_create(struct sst_hsw *hsw,
+ int mod_id, int offset)
{
- struct sst_hsw_ipc_dx_memory_item *dx_mem;
- struct sst_hsw_ipc_dx_reply *dx_reply;
- int entry_no;
+ struct sst_dsp *dsp = hsw->dsp;
+ struct sst_module *module;
+ struct sst_module_runtime *runtime;
+ int err;
- dx_reply = &hsw->dx;
- entry_no = dx_reply->entries_no;
+ module = sst_module_get_from_id(dsp, mod_id);
+ if (module == NULL) {
+ dev_err(dsp->dev, "error: failed to get module %d for pcm\n",
+ mod_id);
+ return NULL;
+ }
+
+ runtime = sst_module_runtime_new(module, mod_id, NULL);
+ if (runtime == NULL) {
+ dev_err(dsp->dev, "error: failed to create module %d runtime\n",
+ mod_id);
+ return NULL;
+ }
+
+ err = sst_module_runtime_alloc_blocks(runtime, offset);
+ if (err < 0) {
+ dev_err(dsp->dev, "error: failed to alloc blocks for module %d runtime\n",
+ mod_id);
+ sst_module_runtime_free(runtime);
+ return NULL;
+ }
+
+ dev_dbg(dsp->dev, "runtime id %d created for module %d\n", runtime->id,
+ mod_id);
+ return runtime;
+}
+
+void sst_hsw_runtime_module_free(struct sst_module_runtime *runtime)
+{
+ sst_module_runtime_free_blocks(runtime);
+ sst_module_runtime_free(runtime);
+}
+
+#ifdef CONFIG_PM
+static int sst_hsw_dx_state_dump(struct sst_hsw *hsw)
+{
+ struct sst_dsp *sst = hsw->dsp;
+ u32 item, offset, size;
+ int ret = 0;
- trace_ipc_request("PM get Dx state", entry_no);
+ trace_ipc_request("PM state dump. Items #", SST_HSW_MAX_DX_REGIONS);
- if (item >= entry_no)
+ if (hsw->dx.entries_no > SST_HSW_MAX_DX_REGIONS) {
+ dev_err(hsw->dev,
+ "error: number of FW context regions greater than %d\n",
+ SST_HSW_MAX_DX_REGIONS);
+ memset(&hsw->dx, 0, sizeof(hsw->dx));
return -EINVAL;
+ }
+
+ ret = sst_dsp_dma_get_channel(sst, 0);
+ if (ret < 0) {
+ dev_err(hsw->dev, "error: cant allocate dma channel %d\n", ret);
+ return ret;
+ }
+
+ /* set on-demond mode on engine 0 channel 3 */
+ sst_dsp_shim_update_bits(sst, SST_HMDC,
+ SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH,
+ SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH);
+
+ for (item = 0; item < hsw->dx.entries_no; item++) {
+ if (hsw->dx.mem_info[item].source == SST_HSW_DX_TYPE_MEMORY_DUMP
+ && hsw->dx.mem_info[item].offset > DSP_DRAM_ADDR_OFFSET
+ && hsw->dx.mem_info[item].offset <
+ DSP_DRAM_ADDR_OFFSET + SST_HSW_DX_CONTEXT_SIZE) {
+
+ offset = hsw->dx.mem_info[item].offset
+ - DSP_DRAM_ADDR_OFFSET;
+ size = (hsw->dx.mem_info[item].size + 3) & (~3);
+
+ ret = sst_dsp_dma_copyfrom(sst, hsw->dx_context_paddr + offset,
+ sst->addr.lpe_base + offset, size);
+ if (ret < 0) {
+ dev_err(hsw->dev,
+ "error: FW context dump failed\n");
+ memset(&hsw->dx, 0, sizeof(hsw->dx));
+ goto out;
+ }
+ }
+ }
+
+out:
+ sst_dsp_dma_put_channel(sst);
+ return ret;
+}
+
+static int sst_hsw_dx_state_restore(struct sst_hsw *hsw)
+{
+ struct sst_dsp *sst = hsw->dsp;
+ u32 item, offset, size;
+ int ret;
+
+ for (item = 0; item < hsw->dx.entries_no; item++) {
+ if (hsw->dx.mem_info[item].source == SST_HSW_DX_TYPE_MEMORY_DUMP
+ && hsw->dx.mem_info[item].offset > DSP_DRAM_ADDR_OFFSET
+ && hsw->dx.mem_info[item].offset <
+ DSP_DRAM_ADDR_OFFSET + SST_HSW_DX_CONTEXT_SIZE) {
+
+ offset = hsw->dx.mem_info[item].offset
+ - DSP_DRAM_ADDR_OFFSET;
+ size = (hsw->dx.mem_info[item].size + 3) & (~3);
+
+ ret = sst_dsp_dma_copyto(sst, sst->addr.lpe_base + offset,
+ hsw->dx_context_paddr + offset, size);
+ if (ret < 0) {
+ dev_err(hsw->dev,
+ "error: FW context restore failed\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void sst_hsw_drop_all(struct sst_hsw *hsw)
+{
+ struct ipc_message *msg, *tmp;
+ unsigned long flags;
+ int tx_drop_cnt = 0, rx_drop_cnt = 0;
- dx_mem = &dx_reply->mem_info[item];
- *offset = dx_mem->offset;
- *size = dx_mem->size;
- *source = dx_mem->source;
+ /* drop all TX and Rx messages before we stall + reset DSP */
+ spin_lock_irqsave(&hsw->dsp->spinlock, flags);
+
+ list_for_each_entry_safe(msg, tmp, &hsw->tx_list, list) {
+ list_move(&msg->list, &hsw->empty_list);
+ tx_drop_cnt++;
+ }
+
+ list_for_each_entry_safe(msg, tmp, &hsw->rx_list, list) {
+ list_move(&msg->list, &hsw->empty_list);
+ rx_drop_cnt++;
+ }
+
+ spin_unlock_irqrestore(&hsw->dsp->spinlock, flags);
+
+ if (tx_drop_cnt || rx_drop_cnt)
+ dev_err(hsw->dev, "dropped IPC msg RX=%d, TX=%d\n",
+ tx_drop_cnt, rx_drop_cnt);
+}
+
+int sst_hsw_dsp_load(struct sst_hsw *hsw)
+{
+ struct sst_dsp *dsp = hsw->dsp;
+ int ret;
+
+ dev_dbg(hsw->dev, "loading audio DSP....");
+
+ ret = sst_dsp_wake(dsp);
+ if (ret < 0) {
+ dev_err(hsw->dev, "error: failed to wake audio DSP\n");
+ return -ENODEV;
+ }
+
+ ret = sst_dsp_dma_get_channel(dsp, 0);
+ if (ret < 0) {
+ dev_err(hsw->dev, "error: cant allocate dma channel %d\n", ret);
+ return ret;
+ }
+
+ ret = sst_fw_reload(hsw->sst_fw);
+ if (ret < 0) {
+ dev_err(hsw->dev, "error: SST FW reload failed\n");
+ sst_dsp_dma_put_channel(dsp);
+ return -ENOMEM;
+ }
+ sst_dsp_dma_put_channel(dsp);
return 0;
}
+static int sst_hsw_dsp_restore(struct sst_hsw *hsw)
+{
+ struct sst_dsp *dsp = hsw->dsp;
+ int ret;
+
+ dev_dbg(hsw->dev, "restoring audio DSP....");
+
+ ret = sst_dsp_dma_get_channel(dsp, 0);
+ if (ret < 0) {
+ dev_err(hsw->dev, "error: cant allocate dma channel %d\n", ret);
+ return ret;
+ }
+
+ ret = sst_hsw_dx_state_restore(hsw);
+ if (ret < 0) {
+ dev_err(hsw->dev, "error: SST FW context restore failed\n");
+ sst_dsp_dma_put_channel(dsp);
+ return -ENOMEM;
+ }
+ sst_dsp_dma_put_channel(dsp);
+
+ /* wait for DSP boot completion */
+ sst_dsp_boot(dsp);
+
+ return ret;
+}
+
+int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw)
+{
+ int ret;
+
+ dev_dbg(hsw->dev, "audio dsp runtime suspend\n");
+
+ ret = sst_hsw_dx_set_state(hsw, SST_HSW_DX_STATE_D3, &hsw->dx);
+ if (ret < 0)
+ return ret;
+
+ sst_dsp_stall(hsw->dsp);
+
+ ret = sst_hsw_dx_state_dump(hsw);
+ if (ret < 0)
+ return ret;
+
+ sst_hsw_drop_all(hsw);
+
+ return 0;
+}
+
+int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw)
+{
+ sst_fw_unload(hsw->sst_fw);
+ sst_block_free_scratch(hsw->dsp);
+
+ hsw->boot_complete = false;
+
+ sst_dsp_sleep(hsw->dsp);
+
+ return 0;
+}
+
+int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw)
+{
+ struct device *dev = hsw->dev;
+ int ret;
+
+ dev_dbg(dev, "audio dsp runtime resume\n");
+
+ if (hsw->boot_complete)
+ return 1; /* tell caller no action is required */
+
+ ret = sst_hsw_dsp_restore(hsw);
+ if (ret < 0)
+ dev_err(dev, "error: audio DSP boot failure\n");
+
+ ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
+ msecs_to_jiffies(IPC_BOOT_MSECS));
+ if (ret == 0) {
+ dev_err(hsw->dev, "error: audio DSP boot timeout IPCD 0x%x IPCX 0x%x\n",
+ sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCD),
+ sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCX));
+ return -EIO;
+ }
+
+ /* Set ADSP SSP port settings */
+ ret = sst_hsw_device_set_config(hsw, SST_HSW_DEVICE_SSP_0,
+ SST_HSW_DEVICE_MCLK_FREQ_24_MHZ,
+ SST_HSW_DEVICE_CLOCK_MASTER, 9);
+ if (ret < 0)
+ dev_err(dev, "error: SSP re-initialization failed\n");
+
+ return ret;
+}
+#endif
+
static int msg_empty_list_init(struct sst_hsw *hsw)
{
int i;
@@ -1718,12 +1978,6 @@ static int msg_empty_list_init(struct sst_hsw *hsw)
return 0;
}
-void sst_hsw_set_scratch_module(struct sst_hsw *hsw,
- struct sst_module *scratch)
-{
- hsw->scratch = scratch;
-}
-
struct sst_dsp *sst_hsw_get_dsp(struct sst_hsw *hsw)
{
return hsw->dsp;
@@ -1738,7 +1992,6 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
{
struct sst_hsw_ipc_fw_version version;
struct sst_hsw *hsw;
- struct sst_fw *hsw_sst_fw;
int ret;
dev_dbg(dev, "initialising Audio DSP IPC\n");
@@ -1780,12 +2033,19 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
goto dsp_err;
}
+ /* allocate DMA buffer for context storage */
+ hsw->dx_context = dma_alloc_coherent(hsw->dsp->dma_dev,
+ SST_HSW_DX_CONTEXT_SIZE, &hsw->dx_context_paddr, GFP_KERNEL);
+ if (hsw->dx_context == NULL) {
+ ret = -ENOMEM;
+ goto dma_err;
+ }
+
/* keep the DSP in reset state for base FW loading */
sst_dsp_reset(hsw->dsp);
- hsw_sst_fw = sst_fw_new(hsw->dsp, pdata->fw, hsw);
-
- if (hsw_sst_fw == NULL) {
+ hsw->sst_fw = sst_fw_new(hsw->dsp, pdata->fw, hsw);
+ if (hsw->sst_fw == NULL) {
ret = -ENODEV;
dev_err(dev, "error: failed to load firmware\n");
goto fw_err;
@@ -1797,7 +2057,9 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
msecs_to_jiffies(IPC_BOOT_MSECS));
if (ret == 0) {
ret = -EIO;
- dev_err(hsw->dev, "error: ADSP boot timeout\n");
+ dev_err(hsw->dev, "error: audio DSP boot timeout IPCD 0x%x IPCX 0x%x\n",
+ sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCD),
+ sst_dsp_shim_read_unlocked(hsw->dsp, SST_IPCX));
goto boot_err;
}
@@ -1816,8 +2078,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
boot_err:
sst_dsp_reset(hsw->dsp);
- sst_fw_free(hsw_sst_fw);
+ sst_fw_free(hsw->sst_fw);
fw_err:
+ dma_free_coherent(hsw->dsp->dma_dev, SST_HSW_DX_CONTEXT_SIZE,
+ hsw->dx_context, hsw->dx_context_paddr);
+dma_err:
sst_dsp_free(hsw->dsp);
dsp_err:
kthread_stop(hsw->tx_thread);
@@ -1834,6 +2099,8 @@ void sst_hsw_dsp_free(struct device *dev, struct sst_pdata *pdata)
sst_dsp_reset(hsw->dsp);
sst_fw_free_all(hsw->dsp);
+ dma_free_coherent(hsw->dsp->dma_dev, SST_HSW_DX_CONTEXT_SIZE,
+ hsw->dx_context, hsw->dx_context_paddr);
sst_dsp_free(hsw->dsp);
kfree(hsw->scratch);
kthread_stop(hsw->tx_thread);
diff --git a/sound/soc/intel/sst-haswell-ipc.h b/sound/soc/intel/sst-haswell-ipc.h
index 2ac194a6d04b..138e894ab413 100644
--- a/sound/soc/intel/sst-haswell-ipc.h
+++ b/sound/soc/intel/sst-haswell-ipc.h
@@ -21,8 +21,10 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#define SST_HSW_NO_CHANNELS 2
+#define SST_HSW_NO_CHANNELS 4
#define SST_HSW_MAX_DX_REGIONS 14
+#define SST_HSW_DX_CONTEXT_SIZE (640 * 1024)
+#define SST_HSW_CHANNELS_ALL 0xffffffff
#define SST_HSW_FW_LOG_CONFIG_DWORDS 12
#define SST_HSW_GLOBAL_LOG 15
@@ -40,6 +42,7 @@ struct sst_hsw_stream;
struct sst_hsw_log_stream;
struct sst_pdata;
struct sst_module;
+struct sst_module_runtime;
extern struct sst_ops haswell_ops;
/* Stream Allocate Path ID */
@@ -84,6 +87,7 @@ enum sst_hsw_device_mclk {
enum sst_hsw_device_mode {
SST_HSW_DEVICE_CLOCK_SLAVE = 0,
SST_HSW_DEVICE_CLOCK_MASTER = 1,
+ SST_HSW_DEVICE_TDM_CLOCK_MASTER = 2,
};
/* DX Power State */
@@ -295,7 +299,8 @@ struct sst_hsw_ipc_device_config_req {
u32 clock_frequency;
u32 mode;
u16 clock_divider;
- u16 reserved;
+ u8 channels;
+ u8 reserved;
} __attribute__((packed));
/* Audio Data formats */
@@ -430,8 +435,7 @@ int sst_hsw_stream_set_map_config(struct sst_hsw *hsw,
int sst_hsw_stream_set_style(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
enum sst_hsw_interleaving style);
int sst_hsw_stream_set_module_info(struct sst_hsw *hsw,
- struct sst_hsw_stream *stream, enum sst_hsw_module_id module_id,
- u32 entry_point);
+ struct sst_hsw_stream *stream, struct sst_module_runtime *runtime);
int sst_hsw_stream_set_pmemory_info(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 offset, u32 size);
int sst_hsw_stream_set_smemory_info(struct sst_hsw *hsw,
@@ -484,7 +488,16 @@ int sst_hsw_dx_get_state(struct sst_hsw *hsw, u32 item,
int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata);
void sst_hsw_dsp_free(struct device *dev, struct sst_pdata *pdata);
struct sst_dsp *sst_hsw_get_dsp(struct sst_hsw *hsw);
-void sst_hsw_set_scratch_module(struct sst_hsw *hsw,
- struct sst_module *scratch);
+
+/* runtime module management */
+struct sst_module_runtime *sst_hsw_runtime_module_create(struct sst_hsw *hsw,
+ int mod_id, int offset);
+void sst_hsw_runtime_module_free(struct sst_module_runtime *runtime);
+
+/* PM */
+int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw);
+int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw);
+int sst_hsw_dsp_load(struct sst_hsw *hsw);
+int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw);
#endif
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index 4df867cbb92a..b8a782c0d4cd 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -18,6 +18,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <sound/core.h>
@@ -73,6 +74,13 @@ static const u32 volume_map[] = {
#define HSW_PCM_PERIODS_MAX 64
#define HSW_PCM_PERIODS_MIN 2
+#define HSW_PCM_DAI_ID_SYSTEM 0
+#define HSW_PCM_DAI_ID_OFFLOAD0 1
+#define HSW_PCM_DAI_ID_OFFLOAD1 2
+#define HSW_PCM_DAI_ID_LOOPBACK 3
+#define HSW_PCM_DAI_ID_CAPTURE 4
+
+
static const struct snd_pcm_hardware hsw_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
@@ -89,22 +97,39 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = {
.buffer_bytes_max = HSW_PCM_PERIODS_MAX * PAGE_SIZE,
};
+struct hsw_pcm_module_map {
+ int dai_id;
+ enum sst_hsw_module_id mod_id;
+};
+
/* private data for each PCM DSP stream */
struct hsw_pcm_data {
int dai_id;
struct sst_hsw_stream *stream;
+ struct sst_module_runtime *runtime;
+ struct sst_module_runtime_context context;
+ struct snd_pcm *hsw_pcm;
u32 volume[2];
struct snd_pcm_substream *substream;
struct snd_compr_stream *cstream;
unsigned int wpos;
struct mutex mutex;
bool allocated;
+ int persistent_offset;
+};
+
+enum hsw_pm_state {
+ HSW_PM_STATE_D3 = 0,
+ HSW_PM_STATE_D0 = 1,
};
/* private data for the driver */
struct hsw_priv_data {
/* runtime DSP */
struct sst_hsw *hsw;
+ struct device *dev;
+ enum hsw_pm_state pm_state;
+ struct snd_soc_card *soc_card;
/* page tables */
struct snd_dma_buffer dmab[HSW_PCM_COUNT][2];
@@ -138,21 +163,25 @@ static inline unsigned int hsw_ipc_to_mixer(u32 value)
static int hsw_stream_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
- struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(cmpnt);
+ struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
+ struct hsw_priv_data *pdata =
+ snd_soc_platform_get_drvdata(platform);
struct hsw_pcm_data *pcm_data = &pdata->pcm[mc->reg];
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
mutex_lock(&pcm_data->mutex);
+ pm_runtime_get_sync(pdata->dev);
if (!pcm_data->stream) {
pcm_data->volume[0] =
hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
pcm_data->volume[1] =
hsw_mixer_to_ipc(ucontrol->value.integer.value[1]);
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return 0;
}
@@ -160,7 +189,8 @@ static int hsw_stream_volume_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.integer.value[0] ==
ucontrol->value.integer.value[1]) {
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
- sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0, 2, volume);
+ /* apply volume value to all channels */
+ sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0, SST_HSW_CHANNELS_ALL, volume);
} else {
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0, 0, volume);
@@ -168,6 +198,8 @@ static int hsw_stream_volume_put(struct snd_kcontrol *kcontrol,
sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0, 1, volume);
}
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return 0;
}
@@ -175,21 +207,25 @@ static int hsw_stream_volume_put(struct snd_kcontrol *kcontrol,
static int hsw_stream_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
- struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(cmpnt);
+ struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
+ struct hsw_priv_data *pdata =
+ snd_soc_platform_get_drvdata(platform);
struct hsw_pcm_data *pcm_data = &pdata->pcm[mc->reg];
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
mutex_lock(&pcm_data->mutex);
+ pm_runtime_get_sync(pdata->dev);
if (!pcm_data->stream) {
ucontrol->value.integer.value[0] =
hsw_ipc_to_mixer(pcm_data->volume[0]);
ucontrol->value.integer.value[1] =
hsw_ipc_to_mixer(pcm_data->volume[1]);
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return 0;
}
@@ -198,6 +234,9 @@ static int hsw_stream_volume_get(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] = hsw_ipc_to_mixer(volume);
sst_hsw_stream_get_volume(hsw, pcm_data->stream, 0, 1, &volume);
ucontrol->value.integer.value[1] = hsw_ipc_to_mixer(volume);
+
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return 0;
@@ -206,16 +245,18 @@ static int hsw_stream_volume_get(struct snd_kcontrol *kcontrol,
static int hsw_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
- struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(cmpnt);
+ struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
+ pm_runtime_get_sync(pdata->dev);
+
if (ucontrol->value.integer.value[0] ==
ucontrol->value.integer.value[1]) {
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
- sst_hsw_mixer_set_volume(hsw, 0, 2, volume);
+ sst_hsw_mixer_set_volume(hsw, 0, SST_HSW_CHANNELS_ALL, volume);
} else {
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
@@ -225,23 +266,28 @@ static int hsw_volume_put(struct snd_kcontrol *kcontrol,
sst_hsw_mixer_set_volume(hsw, 0, 1, volume);
}
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
return 0;
}
static int hsw_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
- struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(cmpnt);
+ struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
struct sst_hsw *hsw = pdata->hsw;
unsigned int volume = 0;
+ pm_runtime_get_sync(pdata->dev);
sst_hsw_mixer_get_volume(hsw, 0, 0, &volume);
ucontrol->value.integer.value[0] = hsw_ipc_to_mixer(volume);
sst_hsw_mixer_get_volume(hsw, 0, 1, &volume);
ucontrol->value.integer.value[1] = hsw_ipc_to_mixer(volume);
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
return 0;
}
@@ -252,23 +298,19 @@ static const DECLARE_TLV_DB_SCALE(hsw_vol_tlv, -9000, 300, 1);
static const struct snd_kcontrol_new hsw_volume_controls[] = {
/* Global DSP volume */
SOC_DOUBLE_EXT_TLV("Master Playback Volume", 0, 0, 8,
- ARRAY_SIZE(volume_map) -1, 0,
+ ARRAY_SIZE(volume_map) - 1, 0,
hsw_volume_get, hsw_volume_put, hsw_vol_tlv),
/* Offload 0 volume */
SOC_DOUBLE_EXT_TLV("Media0 Playback Volume", 1, 0, 8,
- ARRAY_SIZE(volume_map), 0,
+ ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
/* Offload 1 volume */
SOC_DOUBLE_EXT_TLV("Media1 Playback Volume", 2, 0, 8,
- ARRAY_SIZE(volume_map), 0,
- hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
- /* Loopback volume */
- SOC_DOUBLE_EXT_TLV("Loopback Capture Volume", 3, 0, 8,
- ARRAY_SIZE(volume_map), 0,
+ ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
/* Mic Capture volume */
- SOC_DOUBLE_EXT_TLV("Mic Capture Volume", 4, 0, 8,
- ARRAY_SIZE(volume_map), 0,
+ SOC_DOUBLE_EXT_TLV("Mic Capture Volume", 0, 0, 8,
+ ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
};
@@ -354,8 +396,14 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
/* DSP stream type depends on DAI ID */
switch (rtd->cpu_dai->id) {
case 0:
- stream_type = SST_HSW_STREAM_TYPE_SYSTEM;
- module_id = SST_HSW_MODULE_PCM_SYSTEM;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ stream_type = SST_HSW_STREAM_TYPE_SYSTEM;
+ module_id = SST_HSW_MODULE_PCM_SYSTEM;
+ }
+ else {
+ stream_type = SST_HSW_STREAM_TYPE_CAPTURE;
+ module_id = SST_HSW_MODULE_PCM_CAPTURE;
+ }
break;
case 1:
case 2:
@@ -368,10 +416,6 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
path_id = SST_HSW_STREAM_PATH_SSP0_OUT;
module_id = SST_HSW_MODULE_PCM_REFERENCE;
break;
- case 4:
- stream_type = SST_HSW_STREAM_TYPE_CAPTURE;
- module_id = SST_HSW_MODULE_PCM_CAPTURE;
- break;
default:
dev_err(rtd->dev, "error: invalid DAI ID %d\n",
rtd->cpu_dai->id);
@@ -421,13 +465,7 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- /* we only support stereo atm */
channels = params_channels(params);
- if (channels != 2) {
- dev_err(rtd->dev, "error: invalid channels %d\n", channels);
- return -EINVAL;
- }
-
map = create_channel_map(SST_HSW_CHANNEL_CONFIG_STEREO);
sst_hsw_stream_set_map_config(hsw, pcm_data->stream,
map, SST_HSW_CHANNEL_CONFIG_STEREO);
@@ -478,35 +516,23 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- /* we use hardcoded memory offsets atm, will be updated for new FW */
- if (stream_type == SST_HSW_STREAM_TYPE_CAPTURE) {
- sst_hsw_stream_set_module_info(hsw, pcm_data->stream,
- SST_HSW_MODULE_PCM_CAPTURE, module_data->entry);
- sst_hsw_stream_set_pmemory_info(hsw, pcm_data->stream,
- 0x449400, 0x4000);
- sst_hsw_stream_set_smemory_info(hsw, pcm_data->stream,
- 0x400000, 0);
- } else { /* stream_type == SST_HSW_STREAM_TYPE_SYSTEM */
- sst_hsw_stream_set_module_info(hsw, pcm_data->stream,
- SST_HSW_MODULE_PCM_SYSTEM, module_data->entry);
-
- sst_hsw_stream_set_pmemory_info(hsw, pcm_data->stream,
- module_data->offset, module_data->size);
- sst_hsw_stream_set_pmemory_info(hsw, pcm_data->stream,
- 0x44d400, 0x3800);
-
- sst_hsw_stream_set_smemory_info(hsw, pcm_data->stream,
- module_data->offset, module_data->size);
- sst_hsw_stream_set_smemory_info(hsw, pcm_data->stream,
- 0x400000, 0);
- }
+ sst_hsw_stream_set_module_info(hsw, pcm_data->stream,
+ pcm_data->runtime);
ret = sst_hsw_stream_commit(hsw, pcm_data->stream);
if (ret < 0) {
dev_err(rtd->dev, "error: failed to commit stream %d\n", ret);
return ret;
}
- pcm_data->allocated = true;
+
+ if (!pcm_data->allocated) {
+ /* Set previous saved volume */
+ sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0,
+ 0, pcm_data->volume[0]);
+ sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0,
+ 1, pcm_data->volume[1]);
+ pcm_data->allocated = true;
+ }
ret = sst_hsw_stream_pause(hsw, pcm_data->stream, 1);
if (ret < 0)
@@ -558,7 +584,7 @@ static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data)
pos = frames_to_bytes(runtime,
(runtime->control->appl_ptr % runtime->buffer_size));
- dev_dbg(rtd->dev, "PCM: App pointer %d bytes\n", pos);
+ dev_vdbg(rtd->dev, "PCM: App pointer %d bytes\n", pos);
/* let alsa know we have play a period */
snd_pcm_period_elapsed(substream);
@@ -580,7 +606,7 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
offset = bytes_to_frames(runtime, position);
ppos = sst_hsw_get_dsp_presentation_position(hsw, pcm_data->stream);
- dev_dbg(rtd->dev, "PCM: DMA pointer %du bytes, pos %llu\n",
+ dev_vdbg(rtd->dev, "PCM: DMA pointer %du bytes, pos %llu\n",
position, ppos);
return offset;
}
@@ -596,6 +622,7 @@ static int hsw_pcm_open(struct snd_pcm_substream *substream)
pcm_data = &pdata->pcm[rtd->cpu_dai->id];
mutex_lock(&pcm_data->mutex);
+ pm_runtime_get_sync(pdata->dev);
snd_soc_pcm_set_drvdata(rtd, pcm_data);
pcm_data->substream = substream;
@@ -606,16 +633,12 @@ static int hsw_pcm_open(struct snd_pcm_substream *substream)
hsw_notify_pointer, pcm_data);
if (pcm_data->stream == NULL) {
dev_err(rtd->dev, "error: failed to create stream\n");
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return -EINVAL;
}
- /* Set previous saved volume */
- sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0,
- 0, pcm_data->volume[0]);
- sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0,
- 1, pcm_data->volume[1]);
-
mutex_unlock(&pcm_data->mutex);
return 0;
}
@@ -645,6 +668,8 @@ static int hsw_pcm_close(struct snd_pcm_substream *substream)
pcm_data->stream = NULL;
out:
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return ret;
}
@@ -660,6 +685,56 @@ static struct snd_pcm_ops hsw_pcm_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
+/* static mappings between PCMs and modules - may be dynamic in future */
+static struct hsw_pcm_module_map mod_map[] = {
+ {HSW_PCM_DAI_ID_SYSTEM, SST_HSW_MODULE_PCM_SYSTEM},
+ {HSW_PCM_DAI_ID_OFFLOAD0, SST_HSW_MODULE_PCM},
+ {HSW_PCM_DAI_ID_OFFLOAD1, SST_HSW_MODULE_PCM},
+ {HSW_PCM_DAI_ID_LOOPBACK, SST_HSW_MODULE_PCM_REFERENCE},
+ {HSW_PCM_DAI_ID_CAPTURE, SST_HSW_MODULE_PCM_CAPTURE},
+};
+
+static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
+{
+ struct sst_hsw *hsw = pdata->hsw;
+ struct hsw_pcm_data *pcm_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
+ pcm_data = &pdata->pcm[i];
+
+ /* create new runtime module, use same offset if recreated */
+ pcm_data->runtime = sst_hsw_runtime_module_create(hsw,
+ mod_map[i].mod_id, pcm_data->persistent_offset);
+ if (pcm_data->runtime == NULL)
+ goto err;
+ pcm_data->persistent_offset =
+ pcm_data->runtime->persistent_offset;
+ }
+
+ return 0;
+
+err:
+ for (--i; i >= 0; i--) {
+ pcm_data = &pdata->pcm[i];
+ sst_hsw_runtime_module_free(pcm_data->runtime);
+ }
+
+ return -ENODEV;
+}
+
+static void hsw_pcm_free_modules(struct hsw_priv_data *pdata)
+{
+ struct hsw_pcm_data *pcm_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
+ pcm_data = &pdata->pcm[i];
+
+ sst_hsw_runtime_module_free(pcm_data->runtime);
+ }
+}
+
static void hsw_pcm_free(struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
@@ -670,6 +745,7 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct snd_pcm *pcm = rtd->pcm;
struct snd_soc_platform *platform = rtd->platform;
struct sst_pdata *pdata = dev_get_platdata(platform->dev);
+ struct hsw_priv_data *priv_data = dev_get_drvdata(platform->dev);
struct device *dev = pdata->dma_dev;
int ret = 0;
@@ -686,6 +762,7 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
return ret;
}
}
+ priv_data->pcm[rtd->cpu_dai->id].hsw_pcm = pcm;
return ret;
}
@@ -696,6 +773,7 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
static struct snd_soc_dai_driver hsw_dais[] = {
{
.name = "System Pin",
+ .id = HSW_PCM_DAI_ID_SYSTEM,
.playback = {
.stream_name = "System Playback",
.channels_min = 2,
@@ -703,10 +781,18 @@ static struct snd_soc_dai_driver hsw_dais[] = {
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
},
+ .capture = {
+ .stream_name = "Analog Capture",
+ .channels_min = 2,
+ .channels_max = 4,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
+ },
},
{
/* PCM */
.name = "Offload0 Pin",
+ .id = HSW_PCM_DAI_ID_OFFLOAD0,
.playback = {
.stream_name = "Offload0 Playback",
.channels_min = 2,
@@ -718,6 +804,7 @@ static struct snd_soc_dai_driver hsw_dais[] = {
{
/* PCM */
.name = "Offload1 Pin",
+ .id = HSW_PCM_DAI_ID_OFFLOAD1,
.playback = {
.stream_name = "Offload1 Playback",
.channels_min = 2,
@@ -728,6 +815,7 @@ static struct snd_soc_dai_driver hsw_dais[] = {
},
{
.name = "Loopback Pin",
+ .id = HSW_PCM_DAI_ID_LOOPBACK,
.capture = {
.stream_name = "Loopback Capture",
.channels_min = 2,
@@ -736,16 +824,6 @@ static struct snd_soc_dai_driver hsw_dais[] = {
.formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
},
},
- {
- .name = "Capture Pin",
- .capture = {
- .stream_name = "Analog Capture",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
- },
- },
};
static const struct snd_soc_dapm_widget widgets[] = {
@@ -776,9 +854,20 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
{
struct hsw_priv_data *priv_data = snd_soc_platform_get_drvdata(platform);
struct sst_pdata *pdata = dev_get_platdata(platform->dev);
- struct device *dma_dev = pdata->dma_dev;
+ struct device *dma_dev, *dev;
int i, ret = 0;
+ if (!pdata)
+ return -ENODEV;
+
+ dev = platform->dev;
+ dma_dev = pdata->dma_dev;
+
+ priv_data->hsw = pdata->dsp;
+ priv_data->dev = platform->dev;
+ priv_data->pm_state = HSW_PM_STATE_D0;
+ priv_data->soc_card = platform->component.card;
+
/* allocate DSP buffer page tables */
for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
@@ -801,6 +890,16 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
}
}
+ /* allocate runtime modules */
+ hsw_pcm_create_modules(priv_data);
+
+ /* enable runtime PM with auto suspend */
+ pm_runtime_set_autosuspend_delay(platform->dev,
+ SST_RUNTIME_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(platform->dev);
+ pm_runtime_enable(platform->dev);
+ pm_runtime_idle(platform->dev);
+
return 0;
err:
@@ -819,6 +918,9 @@ static int hsw_pcm_remove(struct snd_soc_platform *platform)
snd_soc_platform_get_drvdata(platform);
int i;
+ pm_runtime_disable(platform->dev);
+ hsw_pcm_free_modules(priv_data);
+
for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
if (hsw_dais[i].playback.channels_min)
snd_dma_free_pages(&priv_data->dmab[i][0]);
@@ -896,10 +998,179 @@ static int hsw_pcm_dev_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
+
+static int hsw_pcm_runtime_idle(struct device *dev)
+{
+ return 0;
+}
+
+static int hsw_pcm_runtime_suspend(struct device *dev)
+{
+ struct hsw_priv_data *pdata = dev_get_drvdata(dev);
+ struct sst_hsw *hsw = pdata->hsw;
+
+ if (pdata->pm_state == HSW_PM_STATE_D3)
+ return 0;
+
+ sst_hsw_dsp_runtime_suspend(hsw);
+ sst_hsw_dsp_runtime_sleep(hsw);
+ pdata->pm_state = HSW_PM_STATE_D3;
+
+ return 0;
+}
+
+static int hsw_pcm_runtime_resume(struct device *dev)
+{
+ struct hsw_priv_data *pdata = dev_get_drvdata(dev);
+ struct sst_hsw *hsw = pdata->hsw;
+ int ret;
+
+ if (pdata->pm_state == HSW_PM_STATE_D0)
+ return 0;
+
+ ret = sst_hsw_dsp_load(hsw);
+ if (ret < 0) {
+ dev_err(dev, "failed to reload %d\n", ret);
+ return ret;
+ }
+
+ ret = hsw_pcm_create_modules(pdata);
+ if (ret < 0) {
+ dev_err(dev, "failed to create modules %d\n", ret);
+ return ret;
+ }
+
+ ret = sst_hsw_dsp_runtime_resume(hsw);
+ if (ret < 0)
+ return ret;
+ else if (ret == 1) /* no action required */
+ return 0;
+
+ pdata->pm_state = HSW_PM_STATE_D0;
+ return ret;
+}
+
+#else
+#define hsw_pcm_runtime_idle NULL
+#define hsw_pcm_runtime_suspend NULL
+#define hsw_pcm_runtime_resume NULL
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_RUNTIME)
+
+static void hsw_pcm_complete(struct device *dev)
+{
+ struct hsw_priv_data *pdata = dev_get_drvdata(dev);
+ struct sst_hsw *hsw = pdata->hsw;
+ struct hsw_pcm_data *pcm_data;
+ int i, err;
+
+ if (pdata->pm_state == HSW_PM_STATE_D0)
+ return;
+
+ err = sst_hsw_dsp_load(hsw);
+ if (err < 0) {
+ dev_err(dev, "failed to reload %d\n", err);
+ return;
+ }
+
+ err = hsw_pcm_create_modules(pdata);
+ if (err < 0) {
+ dev_err(dev, "failed to create modules %d\n", err);
+ return;
+ }
+
+ for (i = 0; i < HSW_PCM_DAI_ID_CAPTURE + 1; i++) {
+ pcm_data = &pdata->pcm[i];
+
+ if (!pcm_data->substream)
+ continue;
+
+ err = sst_module_runtime_restore(pcm_data->runtime,
+ &pcm_data->context);
+ if (err < 0)
+ dev_err(dev, "failed to restore context for PCM %d\n", i);
+ }
+
+ snd_soc_resume(pdata->soc_card->dev);
+
+ err = sst_hsw_dsp_runtime_resume(hsw);
+ if (err < 0)
+ return;
+ else if (err == 1) /* no action required */
+ return;
+
+ pdata->pm_state = HSW_PM_STATE_D0;
+ return;
+}
+
+static int hsw_pcm_prepare(struct device *dev)
+{
+ struct hsw_priv_data *pdata = dev_get_drvdata(dev);
+ struct sst_hsw *hsw = pdata->hsw;
+ struct hsw_pcm_data *pcm_data;
+ int i, err;
+
+ if (pdata->pm_state == HSW_PM_STATE_D3)
+ return 0;
+ /* suspend all active streams */
+ for (i = 0; i < HSW_PCM_DAI_ID_CAPTURE + 1; i++) {
+ pcm_data = &pdata->pcm[i];
+
+ if (!pcm_data->substream)
+ continue;
+ dev_dbg(dev, "suspending pcm %d\n", i);
+ snd_pcm_suspend_all(pcm_data->hsw_pcm);
+
+ /* We need to wait until the DSP FW stops the streams */
+ msleep(2);
+ }
+
+ snd_soc_suspend(pdata->soc_card->dev);
+ snd_soc_poweroff(pdata->soc_card->dev);
+
+ /* enter D3 state and stall */
+ sst_hsw_dsp_runtime_suspend(hsw);
+
+ /* preserve persistent memory */
+ for (i = 0; i < HSW_PCM_DAI_ID_CAPTURE + 1; i++) {
+ pcm_data = &pdata->pcm[i];
+
+ if (!pcm_data->substream)
+ continue;
+
+ dev_dbg(dev, "saving context pcm %d\n", i);
+ err = sst_module_runtime_save(pcm_data->runtime,
+ &pcm_data->context);
+ if (err < 0)
+ dev_err(dev, "failed to save context for PCM %d\n", i);
+ }
+
+ /* put the DSP to sleep */
+ sst_hsw_dsp_runtime_sleep(hsw);
+ pdata->pm_state = HSW_PM_STATE_D3;
+
+ return 0;
+}
+
+#else
+#define hsw_pcm_prepare NULL
+#define hsw_pcm_complete NULL
+#endif
+
+static const struct dev_pm_ops hsw_pcm_pm = {
+ .runtime_idle = hsw_pcm_runtime_idle,
+ .runtime_suspend = hsw_pcm_runtime_suspend,
+ .runtime_resume = hsw_pcm_runtime_resume,
+ .prepare = hsw_pcm_prepare,
+ .complete = hsw_pcm_complete,
+};
+
static struct platform_driver hsw_pcm_driver = {
.driver = {
.name = "haswell-pcm-audio",
- .owner = THIS_MODULE,
+ .pm = &hsw_pcm_pm,
},
.probe = hsw_pcm_dev_probe,
diff --git a/sound/soc/intel/sst-mfld-platform-compress.c b/sound/soc/intel/sst-mfld-platform-compress.c
index 59467775c9b8..395168986462 100644
--- a/sound/soc/intel/sst-mfld-platform-compress.c
+++ b/sound/soc/intel/sst-mfld-platform-compress.c
@@ -67,8 +67,11 @@ static int sst_platform_compr_open(struct snd_compr_stream *cstream)
goto out_ops;
}
stream->compr_ops = sst->compr_ops;
-
stream->id = 0;
+
+ /* Turn on LPE */
+ sst->compr_ops->power(sst->dev, true);
+
sst_set_stream_status(stream, SST_PLATFORM_INIT);
runtime->private_data = stream;
return 0;
@@ -83,6 +86,9 @@ static int sst_platform_compr_free(struct snd_compr_stream *cstream)
int ret_val = 0, str_id;
stream = cstream->runtime->private_data;
+ /* Turn off LPE */
+ sst->compr_ops->power(sst->dev, false);
+
/*need to check*/
str_id = stream->id;
if (str_id)
diff --git a/sound/soc/intel/sst-mfld-platform-pcm.c b/sound/soc/intel/sst-mfld-platform-pcm.c
index aa9b600dfc9b..a1a8d9d91539 100644
--- a/sound/soc/intel/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/sst-mfld-platform-pcm.c
@@ -101,35 +101,11 @@ static struct sst_dev_stream_map dpcm_strm_map[] = {
{MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, 0},
};
-/* MFLD - MSIC */
-static struct snd_soc_dai_driver sst_platform_dai[] = {
+static int sst_media_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
{
- .name = "Headset-cpu-dai",
- .id = 0,
- .playback = {
- .channels_min = SST_STEREO,
- .channels_max = SST_STEREO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S24_LE,
- },
- .capture = {
- .channels_min = 1,
- .channels_max = 5,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S24_LE,
- },
-},
-{
- .name = "Compress-cpu-dai",
- .compress_dai = 1,
- .playback = {
- .channels_min = SST_STEREO,
- .channels_max = SST_STEREO,
- .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
-},
-};
+
+ return sst_send_pipe_gains(dai, stream, mute);
+}
/* helper functions */
void sst_set_stream_status(struct sst_runtime_stream *stream,
@@ -451,12 +427,133 @@ static int sst_media_hw_free(struct snd_pcm_substream *substream,
return snd_pcm_lib_free_pages(substream);
}
+static int sst_enable_ssp(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int ret = 0;
+
+ if (!dai->active) {
+ ret = sst_handle_vb_timer(dai, true);
+ if (ret)
+ return ret;
+ ret = send_ssp_cmd(dai, dai->name, 1);
+ }
+ return ret;
+}
+
+static void sst_disable_ssp(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ if (!dai->active) {
+ send_ssp_cmd(dai, dai->name, 0);
+ sst_handle_vb_timer(dai, false);
+ }
+}
+
static struct snd_soc_dai_ops sst_media_dai_ops = {
.startup = sst_media_open,
.shutdown = sst_media_close,
.prepare = sst_media_prepare,
.hw_params = sst_media_hw_params,
.hw_free = sst_media_hw_free,
+ .mute_stream = sst_media_digital_mute,
+};
+
+static struct snd_soc_dai_ops sst_compr_dai_ops = {
+ .mute_stream = sst_media_digital_mute,
+};
+
+static struct snd_soc_dai_ops sst_be_dai_ops = {
+ .startup = sst_enable_ssp,
+ .shutdown = sst_disable_ssp,
+};
+
+static struct snd_soc_dai_driver sst_platform_dai[] = {
+{
+ .name = "media-cpu-dai",
+ .ops = &sst_media_dai_ops,
+ .playback = {
+ .stream_name = "Headset Playback",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Headset Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = "compress-cpu-dai",
+ .compress_dai = 1,
+ .ops = &sst_compr_dai_ops,
+ .playback = {
+ .stream_name = "Compress Playback",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+/* BE CPU Dais */
+{
+ .name = "ssp0-port",
+ .ops = &sst_be_dai_ops,
+ .playback = {
+ .stream_name = "ssp0 Tx",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "ssp0 Rx",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = "ssp1-port",
+ .ops = &sst_be_dai_ops,
+ .playback = {
+ .stream_name = "ssp1 Tx",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "ssp1 Rx",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
+{
+ .name = "ssp2-port",
+ .ops = &sst_be_dai_ops,
+ .playback = {
+ .stream_name = "ssp2 Tx",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "ssp2 Rx",
+ .channels_min = SST_STEREO,
+ .channels_max = SST_STEREO,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+},
};
static int sst_platform_open(struct snd_pcm_substream *substream)
@@ -609,6 +706,7 @@ static int sst_platform_probe(struct platform_device *pdev)
pdata->pdev_strm_map = dpcm_strm_map;
pdata->strm_map_size = ARRAY_SIZE(dpcm_strm_map);
drv->pdata = pdata;
+ drv->pdev = pdev;
mutex_init(&drv->lock);
dev_set_drvdata(&pdev->dev, drv);
@@ -639,7 +737,6 @@ static int sst_platform_remove(struct platform_device *pdev)
static struct platform_driver sst_platform_driver = {
.driver = {
.name = "sst-mfld-platform",
- .owner = THIS_MODULE,
},
.probe = sst_platform_probe,
.remove = sst_platform_remove,
diff --git a/sound/soc/intel/sst-mfld-platform.h b/sound/soc/intel/sst-mfld-platform.h
index 19f83ec51613..79c8d1246a8f 100644
--- a/sound/soc/intel/sst-mfld-platform.h
+++ b/sound/soc/intel/sst-mfld-platform.h
@@ -117,6 +117,7 @@ struct compress_sst_ops {
int (*get_codec_caps)(struct snd_compr_codec_caps *codec);
int (*set_metadata)(struct device *dev, unsigned int str_id,
struct snd_compr_metadata *mdata);
+ int (*power)(struct device *dev, bool state);
};
struct sst_ops {
@@ -153,6 +154,10 @@ struct sst_device {
struct sst_data;
int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform);
+int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute);
+int send_ssp_cmd(struct snd_soc_dai *dai, const char *id, bool enable);
+int sst_handle_vb_timer(struct snd_soc_dai *dai, bool enable);
+
void sst_set_stream_status(struct sst_runtime_stream *stream, int state);
int sst_fill_stream_params(void *substream, const struct sst_data *ctx,
struct snd_sst_params *str_params, bool is_compress);
diff --git a/sound/soc/intel/sst/Makefile b/sound/soc/intel/sst/Makefile
new file mode 100644
index 000000000000..fd21726361b5
--- /dev/null
+++ b/sound/soc/intel/sst/Makefile
@@ -0,0 +1,7 @@
+snd-intel-sst-core-objs := sst.o sst_ipc.o sst_stream.o sst_drv_interface.o sst_loader.o sst_pvt.o
+snd-intel-sst-pci-objs += sst_pci.o
+snd-intel-sst-acpi-objs += sst_acpi.o
+
+obj-$(CONFIG_SND_SST_IPC) += snd-intel-sst-core.o
+obj-$(CONFIG_SND_SST_IPC_PCI) += snd-intel-sst-pci.o
+obj-$(CONFIG_SND_SST_IPC_ACPI) += snd-intel-sst-acpi.o
diff --git a/sound/soc/intel/sst/sst.c b/sound/soc/intel/sst/sst.c
new file mode 100644
index 000000000000..8a8d56a146e7
--- /dev/null
+++ b/sound/soc/intel/sst/sst.c
@@ -0,0 +1,437 @@
+/*
+ * sst.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-14 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/async.h>
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst-mfld-platform.h"
+#include "sst.h"
+#include "../sst-dsp.h"
+
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
+MODULE_LICENSE("GPL v2");
+
+static inline bool sst_is_process_reply(u32 msg_id)
+{
+ return ((msg_id & PROCESS_MSG) ? true : false);
+}
+
+static inline bool sst_validate_mailbox_size(unsigned int size)
+{
+ return ((size <= SST_MAILBOX_SIZE) ? true : false);
+}
+
+static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context)
+{
+ union interrupt_reg_mrfld isr;
+ union ipc_header_mrfld header;
+ union sst_imr_reg_mrfld imr;
+ struct ipc_post *msg = NULL;
+ unsigned int size = 0;
+ struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+ irqreturn_t retval = IRQ_HANDLED;
+
+ /* Interrupt arrived, check src */
+ isr.full = sst_shim_read64(drv->shim, SST_ISRX);
+
+ if (isr.part.done_interrupt) {
+ /* Clear done bit */
+ spin_lock(&drv->ipc_spin_lock);
+ header.full = sst_shim_read64(drv->shim,
+ drv->ipc_reg.ipcx);
+ header.p.header_high.part.done = 0;
+ sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full);
+
+ /* write 1 to clear status register */;
+ isr.part.done_interrupt = 1;
+ sst_shim_write64(drv->shim, SST_ISRX, isr.full);
+ spin_unlock(&drv->ipc_spin_lock);
+
+ /* we can send more messages to DSP so trigger work */
+ queue_work(drv->post_msg_wq, &drv->ipc_post_msg_wq);
+ retval = IRQ_HANDLED;
+ }
+
+ if (isr.part.busy_interrupt) {
+ /* message from dsp so copy that */
+ spin_lock(&drv->ipc_spin_lock);
+ imr.full = sst_shim_read64(drv->shim, SST_IMRX);
+ imr.part.busy_interrupt = 1;
+ sst_shim_write64(drv->shim, SST_IMRX, imr.full);
+ spin_unlock(&drv->ipc_spin_lock);
+ header.full = sst_shim_read64(drv->shim, drv->ipc_reg.ipcd);
+
+ if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) {
+ drv->ops->clear_interrupt(drv);
+ return IRQ_HANDLED;
+ }
+
+ if (header.p.header_high.part.large) {
+ size = header.p.header_low_payload;
+ if (sst_validate_mailbox_size(size)) {
+ memcpy_fromio(msg->mailbox_data,
+ drv->mailbox + drv->mailbox_recv_offset, size);
+ } else {
+ dev_err(drv->dev,
+ "Mailbox not copied, payload size is: %u\n", size);
+ header.p.header_low_payload = 0;
+ }
+ }
+
+ msg->mrfld_header = header;
+ msg->is_process_reply =
+ sst_is_process_reply(header.p.header_high.part.msg_id);
+ spin_lock(&drv->rx_msg_lock);
+ list_add_tail(&msg->node, &drv->rx_list);
+ spin_unlock(&drv->rx_msg_lock);
+ drv->ops->clear_interrupt(drv);
+ retval = IRQ_WAKE_THREAD;
+ }
+ return retval;
+}
+
+static irqreturn_t intel_sst_irq_thread_mrfld(int irq, void *context)
+{
+ struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+ struct ipc_post *__msg, *msg = NULL;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
+ if (list_empty(&drv->rx_list)) {
+ spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+ return IRQ_HANDLED;
+ }
+
+ list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) {
+ list_del(&msg->node);
+ spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+ if (msg->is_process_reply)
+ drv->ops->process_message(msg);
+ else
+ drv->ops->process_reply(drv, msg);
+
+ if (msg->is_large)
+ kfree(msg->mailbox_data);
+ kfree(msg);
+ spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
+ }
+ spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+ return IRQ_HANDLED;
+}
+
+static int sst_save_dsp_context_v2(struct intel_sst_drv *sst)
+{
+ int ret = 0;
+
+ ret = sst_prepare_and_post_msg(sst, SST_TASK_ID_MEDIA, IPC_CMD,
+ IPC_PREP_D3, PIPE_RSVD, 0, NULL, NULL,
+ true, true, false, true);
+
+ if (ret < 0) {
+ dev_err(sst->dev, "not suspending FW!!, Err: %d\n", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static struct intel_sst_ops mrfld_ops = {
+ .interrupt = intel_sst_interrupt_mrfld,
+ .irq_thread = intel_sst_irq_thread_mrfld,
+ .clear_interrupt = intel_sst_clear_intr_mrfld,
+ .start = sst_start_mrfld,
+ .reset = intel_sst_reset_dsp_mrfld,
+ .post_message = sst_post_message_mrfld,
+ .process_reply = sst_process_reply_mrfld,
+ .save_dsp_context = sst_save_dsp_context_v2,
+ .alloc_stream = sst_alloc_stream_mrfld,
+ .post_download = sst_post_download_mrfld,
+};
+
+int sst_driver_ops(struct intel_sst_drv *sst)
+{
+
+ switch (sst->dev_id) {
+ case SST_MRFLD_PCI_ID:
+ case SST_BYT_ACPI_ID:
+ case SST_CHV_ACPI_ID:
+ sst->tstamp = SST_TIME_STAMP_MRFLD;
+ sst->ops = &mrfld_ops;
+ return 0;
+
+ default:
+ dev_err(sst->dev,
+ "SST Driver capablities missing for dev_id: %x", sst->dev_id);
+ return -EINVAL;
+ };
+}
+
+void sst_process_pending_msg(struct work_struct *work)
+{
+ struct intel_sst_drv *ctx = container_of(work,
+ struct intel_sst_drv, ipc_post_msg_wq);
+
+ ctx->ops->post_message(ctx, NULL, false);
+}
+
+static int sst_workqueue_init(struct intel_sst_drv *ctx)
+{
+ INIT_LIST_HEAD(&ctx->memcpy_list);
+ INIT_LIST_HEAD(&ctx->rx_list);
+ INIT_LIST_HEAD(&ctx->ipc_dispatch_list);
+ INIT_LIST_HEAD(&ctx->block_list);
+ INIT_WORK(&ctx->ipc_post_msg_wq, sst_process_pending_msg);
+ init_waitqueue_head(&ctx->wait_queue);
+
+ ctx->post_msg_wq =
+ create_singlethread_workqueue("sst_post_msg_wq");
+ if (!ctx->post_msg_wq)
+ return -EBUSY;
+ return 0;
+}
+
+static void sst_init_locks(struct intel_sst_drv *ctx)
+{
+ mutex_init(&ctx->sst_lock);
+ spin_lock_init(&ctx->rx_msg_lock);
+ spin_lock_init(&ctx->ipc_spin_lock);
+ spin_lock_init(&ctx->block_lock);
+}
+
+int sst_alloc_drv_context(struct intel_sst_drv **ctx,
+ struct device *dev, unsigned int dev_id)
+{
+ *ctx = devm_kzalloc(dev, sizeof(struct intel_sst_drv), GFP_KERNEL);
+ if (!(*ctx))
+ return -ENOMEM;
+
+ (*ctx)->dev = dev;
+ (*ctx)->dev_id = dev_id;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_alloc_drv_context);
+
+int sst_context_init(struct intel_sst_drv *ctx)
+{
+ int ret = 0, i;
+
+ if (!ctx->pdata)
+ return -EINVAL;
+
+ if (!ctx->pdata->probe_data)
+ return -EINVAL;
+
+ memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info));
+
+ ret = sst_driver_ops(ctx);
+ if (ret != 0)
+ return -EINVAL;
+
+ sst_init_locks(ctx);
+ sst_set_fw_state_locked(ctx, SST_RESET);
+
+ /* pvt_id 0 reserved for async messages */
+ ctx->pvt_id = 1;
+ ctx->stream_cnt = 0;
+ ctx->fw_in_mem = NULL;
+ /* we use memcpy, so set to 0 */
+ ctx->use_dma = 0;
+ ctx->use_lli = 0;
+
+ if (sst_workqueue_init(ctx))
+ return -EINVAL;
+
+ ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off;
+ ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset;
+ ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset;
+
+ dev_info(ctx->dev, "Got drv data max stream %d\n",
+ ctx->info.max_streams);
+
+ for (i = 1; i <= ctx->info.max_streams; i++) {
+ struct stream_info *stream = &ctx->streams[i];
+
+ memset(stream, 0, sizeof(*stream));
+ stream->pipe_id = PIPE_RSVD;
+ mutex_init(&stream->lock);
+ }
+
+ /* Register the ISR */
+ ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt,
+ ctx->ops->irq_thread, 0, SST_DRV_NAME,
+ ctx);
+ if (ret)
+ goto do_free_mem;
+
+ dev_dbg(ctx->dev, "Registered IRQ %#x\n", ctx->irq_num);
+
+ /* default intr are unmasked so set this as masked */
+ sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038);
+
+ ctx->qos = devm_kzalloc(ctx->dev,
+ sizeof(struct pm_qos_request), GFP_KERNEL);
+ if (!ctx->qos) {
+ ret = -ENOMEM;
+ goto do_free_mem;
+ }
+ pm_qos_add_request(ctx->qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ dev_dbg(ctx->dev, "Requesting FW %s now...\n", ctx->firmware_name);
+ ret = request_firmware_nowait(THIS_MODULE, true, ctx->firmware_name,
+ ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb);
+ if (ret) {
+ dev_err(ctx->dev, "Firmware download failed:%d\n", ret);
+ goto do_free_mem;
+ }
+ sst_register(ctx->dev);
+ return 0;
+
+do_free_mem:
+ destroy_workqueue(ctx->post_msg_wq);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_context_init);
+
+void sst_context_cleanup(struct intel_sst_drv *ctx)
+{
+ pm_runtime_get_noresume(ctx->dev);
+ pm_runtime_disable(ctx->dev);
+ sst_unregister(ctx->dev);
+ sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+ flush_scheduled_work();
+ destroy_workqueue(ctx->post_msg_wq);
+ pm_qos_remove_request(ctx->qos);
+ kfree(ctx->fw_sg_list.src);
+ kfree(ctx->fw_sg_list.dst);
+ ctx->fw_sg_list.list_len = 0;
+ kfree(ctx->fw_in_mem);
+ ctx->fw_in_mem = NULL;
+ sst_memcpy_free_resources(ctx);
+ ctx = NULL;
+}
+EXPORT_SYMBOL_GPL(sst_context_cleanup);
+
+static inline void sst_save_shim64(struct intel_sst_drv *ctx,
+ void __iomem *shim,
+ struct sst_shim_regs64 *shim_regs)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+
+ shim_regs->imrx = sst_shim_read64(shim, SST_IMRX),
+
+ spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+}
+
+static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
+ void __iomem *shim,
+ struct sst_shim_regs64 *shim_regs)
+{
+ unsigned long irq_flags;
+
+ /*
+ * we only need to restore IMRX for this case, rest will be
+ * initialize by FW or driver when firmware is loaded
+ */
+ spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+ sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
+ spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+}
+
+void sst_configure_runtime_pm(struct intel_sst_drv *ctx)
+{
+ pm_runtime_set_autosuspend_delay(ctx->dev, SST_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(ctx->dev);
+ /*
+ * For acpi devices, the actual physical device state is
+ * initially active. So change the state to active before
+ * enabling the pm
+ */
+ pm_runtime_enable(ctx->dev);
+
+ if (acpi_disabled)
+ pm_runtime_set_active(ctx->dev);
+ else
+ pm_runtime_put_noidle(ctx->dev);
+
+ sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
+}
+EXPORT_SYMBOL_GPL(sst_configure_runtime_pm);
+
+static int intel_sst_runtime_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (ctx->sst_state == SST_RESET) {
+ dev_dbg(dev, "LPE is already in RESET state, No action\n");
+ return 0;
+ }
+ /* save fw context */
+ if (ctx->ops->save_dsp_context(ctx))
+ return -EBUSY;
+
+ /* Move the SST state to Reset */
+ sst_set_fw_state_locked(ctx, SST_RESET);
+
+ synchronize_irq(ctx->irq_num);
+ flush_workqueue(ctx->post_msg_wq);
+
+ /* save the shim registers because PMC doesn't save state */
+ sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
+
+ return ret;
+}
+
+static int intel_sst_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (ctx->sst_state == SST_RESET) {
+ ret = sst_load_fw(ctx);
+ if (ret) {
+ dev_err(dev, "FW download fail %d\n", ret);
+ sst_set_fw_state_locked(ctx, SST_RESET);
+ }
+ }
+ return ret;
+}
+
+const struct dev_pm_ops intel_sst_pm = {
+ .runtime_suspend = intel_sst_runtime_suspend,
+ .runtime_resume = intel_sst_runtime_resume,
+};
+EXPORT_SYMBOL_GPL(intel_sst_pm);
diff --git a/sound/soc/intel/sst/sst.h b/sound/soc/intel/sst/sst.h
new file mode 100644
index 000000000000..7f4bbfcbc6f5
--- /dev/null
+++ b/sound/soc/intel/sst/sst.h
@@ -0,0 +1,546 @@
+/*
+ * sst.h - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-14 Intel Corporation
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Common private declarations for SST
+ */
+#ifndef __SST_H__
+#define __SST_H__
+
+#include <linux/firmware.h>
+
+/* driver names */
+#define SST_DRV_NAME "intel_sst_driver"
+#define SST_MRFLD_PCI_ID 0x119A
+#define SST_BYT_ACPI_ID 0x80860F28
+#define SST_CHV_ACPI_ID 0x808622A8
+
+#define SST_SUSPEND_DELAY 2000
+#define FW_CONTEXT_MEM (64*1024)
+#define SST_ICCM_BOUNDARY 4
+#define SST_CONFIG_SSP_SIGN 0x7ffe8001
+
+#define MRFLD_FW_VIRTUAL_BASE 0xC0000000
+#define MRFLD_FW_DDR_BASE_OFFSET 0x0
+#define MRFLD_FW_FEATURE_BASE_OFFSET 0x4
+#define MRFLD_FW_BSS_RESET_BIT 0
+
+extern const struct dev_pm_ops intel_sst_pm;
+enum sst_states {
+ SST_FW_LOADING = 1,
+ SST_FW_RUNNING,
+ SST_RESET,
+ SST_SHUTDOWN,
+};
+
+enum sst_algo_ops {
+ SST_SET_ALGO = 0,
+ SST_GET_ALGO = 1,
+};
+
+#define SST_BLOCK_TIMEOUT 1000
+
+#define FW_SIGNATURE_SIZE 4
+
+/* stream states */
+enum sst_stream_states {
+ STREAM_UN_INIT = 0, /* Freed/Not used stream */
+ STREAM_RUNNING = 1, /* Running */
+ STREAM_PAUSED = 2, /* Paused stream */
+ STREAM_DECODE = 3, /* stream is in decoding only state */
+ STREAM_INIT = 4, /* stream init, waiting for data */
+ STREAM_RESET = 5, /* force reset on recovery */
+};
+
+enum sst_ram_type {
+ SST_IRAM = 1,
+ SST_DRAM = 2,
+ SST_DDR = 5,
+ SST_CUSTOM_INFO = 7, /* consists of FW binary information */
+};
+
+/* SST shim registers to structure mapping */
+union interrupt_reg {
+ struct {
+ u64 done_interrupt:1;
+ u64 busy_interrupt:1;
+ u64 rsvd:62;
+ } part;
+ u64 full;
+};
+
+union sst_pisr_reg {
+ struct {
+ u32 pssp0:1;
+ u32 pssp1:1;
+ u32 rsvd0:3;
+ u32 dmac:1;
+ u32 rsvd1:26;
+ } part;
+ u32 full;
+};
+
+union sst_pimr_reg {
+ struct {
+ u32 ssp0:1;
+ u32 ssp1:1;
+ u32 rsvd0:3;
+ u32 dmac:1;
+ u32 rsvd1:10;
+ u32 ssp0_sc:1;
+ u32 ssp1_sc:1;
+ u32 rsvd2:3;
+ u32 dmac_sc:1;
+ u32 rsvd3:10;
+ } part;
+ u32 full;
+};
+
+union config_status_reg_mrfld {
+ struct {
+ u64 lpe_reset:1;
+ u64 lpe_reset_vector:1;
+ u64 runstall:1;
+ u64 pwaitmode:1;
+ u64 clk_sel:3;
+ u64 rsvd2:1;
+ u64 sst_clk:3;
+ u64 xt_snoop:1;
+ u64 rsvd3:4;
+ u64 clk_sel1:6;
+ u64 clk_enable:3;
+ u64 rsvd4:6;
+ u64 slim0baseclk:1;
+ u64 rsvd:32;
+ } part;
+ u64 full;
+};
+
+union interrupt_reg_mrfld {
+ struct {
+ u64 done_interrupt:1;
+ u64 busy_interrupt:1;
+ u64 rsvd:62;
+ } part;
+ u64 full;
+};
+
+union sst_imr_reg_mrfld {
+ struct {
+ u64 done_interrupt:1;
+ u64 busy_interrupt:1;
+ u64 rsvd:62;
+ } part;
+ u64 full;
+};
+
+/**
+ * struct sst_block - This structure is used to block a user/fw data call to another
+ * fw/user call
+ *
+ * @condition: condition for blocking check
+ * @ret_code: ret code when block is released
+ * @data: data ptr
+ * @size: size of data
+ * @on: block condition
+ * @msg_id: msg_id = msgid in mfld/ctp, mrfld = NULL
+ * @drv_id: str_id in mfld/ctp, = drv_id in mrfld
+ * @node: list head node
+ */
+struct sst_block {
+ bool condition;
+ int ret_code;
+ void *data;
+ u32 size;
+ bool on;
+ u32 msg_id;
+ u32 drv_id;
+ struct list_head node;
+};
+
+/**
+ * struct stream_info - structure that holds the stream information
+ *
+ * @status : stream current state
+ * @prev : stream prev state
+ * @ops : stream operation pb/cp/drm...
+ * @bufs: stream buffer list
+ * @lock : stream mutex for protecting state
+ * @pcm_substream : PCM substream
+ * @period_elapsed : PCM period elapsed callback
+ * @sfreq : stream sampling freq
+ * @str_type : stream type
+ * @cumm_bytes : cummulative bytes decoded
+ * @str_type : stream type
+ * @src : stream source
+ */
+struct stream_info {
+ unsigned int status;
+ unsigned int prev;
+ unsigned int ops;
+ struct mutex lock;
+
+ void *pcm_substream;
+ void (*period_elapsed)(void *pcm_substream);
+
+ unsigned int sfreq;
+ u32 cumm_bytes;
+
+ void *compr_cb_param;
+ void (*compr_cb)(void *compr_cb_param);
+
+ void *drain_cb_param;
+ void (*drain_notify)(void *drain_cb_param);
+
+ unsigned int num_ch;
+ unsigned int pipe_id;
+ unsigned int str_id;
+ unsigned int task_id;
+};
+
+#define SST_FW_SIGN "$SST"
+#define SST_FW_LIB_SIGN "$LIB"
+
+/**
+ * struct sst_fw_header - FW file headers
+ *
+ * @signature : FW signature
+ * @file_size: size of fw image
+ * @modules : # of modules
+ * @file_format : version of header format
+ * @reserved : reserved fields
+ */
+struct sst_fw_header {
+ unsigned char signature[FW_SIGNATURE_SIZE];
+ u32 file_size;
+ u32 modules;
+ u32 file_format;
+ u32 reserved[4];
+};
+
+/**
+ * struct fw_module_header - module header in FW
+ *
+ * @signature: module signature
+ * @mod_size: size of module
+ * @blocks: block count
+ * @type: block type
+ * @entry_point: module netry point
+ */
+struct fw_module_header {
+ unsigned char signature[FW_SIGNATURE_SIZE];
+ u32 mod_size;
+ u32 blocks;
+ u32 type;
+ u32 entry_point;
+};
+
+/**
+ * struct fw_block_info - block header for FW
+ *
+ * @type: block ram type I/D
+ * @size: size of block
+ * @ram_offset: offset in ram
+ */
+struct fw_block_info {
+ enum sst_ram_type type;
+ u32 size;
+ u32 ram_offset;
+ u32 rsvd;
+};
+
+struct sst_runtime_param {
+ struct snd_sst_runtime_params param;
+};
+
+struct sst_sg_list {
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ int list_len;
+ unsigned int sg_idx;
+};
+
+struct sst_memcpy_list {
+ struct list_head memcpylist;
+ void *dstn;
+ const void *src;
+ u32 size;
+ bool is_io;
+};
+
+/*Firmware Module Information*/
+enum sst_lib_dwnld_status {
+ SST_LIB_NOT_FOUND = 0,
+ SST_LIB_FOUND,
+ SST_LIB_DOWNLOADED,
+};
+
+struct sst_module_info {
+ const char *name; /*Library name*/
+ u32 id; /*Module ID*/
+ u32 entry_pt; /*Module entry point*/
+ u8 status; /*module status*/
+ u8 rsvd1;
+ u16 rsvd2;
+};
+
+/*
+ * Structure for managing the Library Region(1.5MB)
+ * in DDR in Merrifield
+ */
+struct sst_mem_mgr {
+ phys_addr_t current_base;
+ int avail;
+ unsigned int count;
+};
+
+struct sst_ipc_reg {
+ int ipcx;
+ int ipcd;
+};
+
+struct sst_shim_regs64 {
+ u64 csr;
+ u64 pisr;
+ u64 pimr;
+ u64 isrx;
+ u64 isrd;
+ u64 imrx;
+ u64 imrd;
+ u64 ipcx;
+ u64 ipcd;
+ u64 isrsc;
+ u64 isrlpesc;
+ u64 imrsc;
+ u64 imrlpesc;
+ u64 ipcsc;
+ u64 ipclpesc;
+ u64 clkctl;
+ u64 csr2;
+};
+
+/**
+ * struct intel_sst_drv - driver ops
+ *
+ * @sst_state : current sst device state
+ * @dev_id : device identifier, pci_id for pci devices and acpi_id for acpi
+ * devices
+ * @shim : SST shim pointer
+ * @mailbox : SST mailbox pointer
+ * @iram : SST IRAM pointer
+ * @dram : SST DRAM pointer
+ * @pdata : SST info passed as a part of pci platform data
+ * @shim_phy_add : SST shim phy addr
+ * @shim_regs64: Struct to save shim registers
+ * @ipc_dispatch_list : ipc messages dispatched
+ * @rx_list : to copy the process_reply/process_msg from DSP
+ * @ipc_post_msg_wq : wq to post IPC messages context
+ * @mad_ops : MAD driver operations registered
+ * @mad_wq : MAD driver wq
+ * @post_msg_wq : wq to post IPC messages
+ * @streams : sst stream contexts
+ * @list_lock : sst driver list lock (deprecated)
+ * @ipc_spin_lock : spin lock to handle audio shim access and ipc queue
+ * @block_lock : spin lock to add block to block_list and assign pvt_id
+ * @rx_msg_lock : spin lock to handle the rx messages from the DSP
+ * @scard_ops : sst card ops
+ * @pci : sst pci device struture
+ * @dev : pointer to current device struct
+ * @sst_lock : sst device lock
+ * @pvt_id : sst private id
+ * @stream_cnt : total sst active stream count
+ * @pb_streams : total active pb streams
+ * @cp_streams : total active cp streams
+ * @audio_start : audio status
+ * @qos : PM Qos struct
+ * firmware_name : Firmware / Library name
+ */
+struct intel_sst_drv {
+ int sst_state;
+ int irq_num;
+ unsigned int dev_id;
+ void __iomem *ddr;
+ void __iomem *shim;
+ void __iomem *mailbox;
+ void __iomem *iram;
+ void __iomem *dram;
+ unsigned int mailbox_add;
+ unsigned int iram_base;
+ unsigned int dram_base;
+ unsigned int shim_phy_add;
+ unsigned int iram_end;
+ unsigned int dram_end;
+ unsigned int ddr_end;
+ unsigned int ddr_base;
+ unsigned int mailbox_recv_offset;
+ struct sst_shim_regs64 *shim_regs64;
+ struct list_head block_list;
+ struct list_head ipc_dispatch_list;
+ struct sst_platform_info *pdata;
+ struct list_head rx_list;
+ struct work_struct ipc_post_msg_wq;
+ wait_queue_head_t wait_queue;
+ struct workqueue_struct *post_msg_wq;
+ unsigned int tstamp;
+ /* str_id 0 is not used */
+ struct stream_info streams[MAX_NUM_STREAMS+1];
+ spinlock_t ipc_spin_lock;
+ spinlock_t block_lock;
+ spinlock_t rx_msg_lock;
+ struct pci_dev *pci;
+ struct device *dev;
+ volatile long unsigned pvt_id;
+ struct mutex sst_lock;
+ unsigned int stream_cnt;
+ unsigned int csr_value;
+ void *fw_in_mem;
+ struct sst_sg_list fw_sg_list, library_list;
+ struct intel_sst_ops *ops;
+ struct sst_info info;
+ struct pm_qos_request *qos;
+ unsigned int use_dma;
+ unsigned int use_lli;
+ atomic_t fw_clear_context;
+ bool lib_dwnld_reqd;
+ struct list_head memcpy_list;
+ struct sst_ipc_reg ipc_reg;
+ struct sst_mem_mgr lib_mem_mgr;
+ /*
+ * Holder for firmware name. Due to async call it needs to be
+ * persistent till worker thread gets called
+ */
+ char firmware_name[20];
+};
+
+/* misc definitions */
+#define FW_DWNL_ID 0x01
+
+struct intel_sst_ops {
+ irqreturn_t (*interrupt)(int, void *);
+ irqreturn_t (*irq_thread)(int, void *);
+ void (*clear_interrupt)(struct intel_sst_drv *ctx);
+ int (*start)(struct intel_sst_drv *ctx);
+ int (*reset)(struct intel_sst_drv *ctx);
+ void (*process_reply)(struct intel_sst_drv *ctx, struct ipc_post *msg);
+ int (*post_message)(struct intel_sst_drv *ctx,
+ struct ipc_post *msg, bool sync);
+ void (*process_message)(struct ipc_post *msg);
+ void (*set_bypass)(bool set);
+ int (*save_dsp_context)(struct intel_sst_drv *sst);
+ void (*restore_dsp_context)(void);
+ int (*alloc_stream)(struct intel_sst_drv *ctx, void *params);
+ void (*post_download)(struct intel_sst_drv *sst);
+};
+
+int sst_pause_stream(struct intel_sst_drv *sst_drv_ctx, int id);
+int sst_resume_stream(struct intel_sst_drv *sst_drv_ctx, int id);
+int sst_drop_stream(struct intel_sst_drv *sst_drv_ctx, int id);
+int sst_free_stream(struct intel_sst_drv *sst_drv_ctx, int id);
+int sst_start_stream(struct intel_sst_drv *sst_drv_ctx, int str_id);
+int sst_send_byte_stream_mrfld(struct intel_sst_drv *ctx,
+ struct snd_sst_bytes_v2 *sbytes);
+int sst_set_stream_param(int str_id, struct snd_sst_params *str_param);
+int sst_set_metadata(int str_id, char *params);
+int sst_get_stream(struct intel_sst_drv *sst_drv_ctx,
+ struct snd_sst_params *str_param);
+int sst_get_stream_allocated(struct intel_sst_drv *ctx,
+ struct snd_sst_params *str_param,
+ struct snd_sst_lib_download **lib_dnld);
+int sst_drain_stream(struct intel_sst_drv *sst_drv_ctx,
+ int str_id, bool partial_drain);
+int sst_post_message_mrfld(struct intel_sst_drv *ctx,
+ struct ipc_post *msg, bool sync);
+void sst_process_reply_mrfld(struct intel_sst_drv *ctx, struct ipc_post *msg);
+int sst_start_mrfld(struct intel_sst_drv *ctx);
+int intel_sst_reset_dsp_mrfld(struct intel_sst_drv *ctx);
+void intel_sst_clear_intr_mrfld(struct intel_sst_drv *ctx);
+
+int sst_load_fw(struct intel_sst_drv *ctx);
+int sst_load_library(struct snd_sst_lib_download *lib, u8 ops);
+void sst_post_download_mrfld(struct intel_sst_drv *ctx);
+int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
+void sst_memcpy_free_resources(struct intel_sst_drv *ctx);
+
+int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
+ struct sst_block *block);
+int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
+ struct sst_block *block);
+int sst_create_ipc_msg(struct ipc_post **arg, bool large);
+int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id);
+void sst_clean_stream(struct stream_info *stream);
+int intel_sst_register_compress(struct intel_sst_drv *sst);
+int intel_sst_remove_compress(struct intel_sst_drv *sst);
+void sst_cdev_fragment_elapsed(struct intel_sst_drv *ctx, int str_id);
+int sst_send_sync_msg(int ipc, int str_id);
+int sst_get_num_channel(struct snd_sst_params *str_param);
+int sst_get_sfreq(struct snd_sst_params *str_param);
+int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params);
+void sst_restore_fw_context(void);
+struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
+ u32 msg_id, u32 drv_id);
+int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
+ struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
+ u32 msg_id, u32 drv_id);
+int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed);
+int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
+ u32 drv_id, u32 ipc, void *data, u32 size);
+int sst_request_firmware_async(struct intel_sst_drv *ctx);
+int sst_driver_ops(struct intel_sst_drv *sst);
+struct sst_platform_info *sst_get_acpi_driver_data(const char *hid);
+void sst_firmware_load_cb(const struct firmware *fw, void *context);
+int sst_prepare_and_post_msg(struct intel_sst_drv *sst,
+ int task_id, int ipc_msg, int cmd_id, int pipe_id,
+ size_t mbox_data_len, const void *mbox_data, void **data,
+ bool large, bool fill_dsp, bool sync, bool response);
+
+void sst_process_pending_msg(struct work_struct *work);
+int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx);
+void sst_init_stream(struct stream_info *stream,
+ int codec, int sst_id, int ops, u8 slot);
+int sst_validate_strid(struct intel_sst_drv *sst_drv_ctx, int str_id);
+struct stream_info *get_stream_info(struct intel_sst_drv *sst_drv_ctx,
+ int str_id);
+int get_stream_id_mrfld(struct intel_sst_drv *sst_drv_ctx,
+ u32 pipe_id);
+u32 relocate_imr_addr_mrfld(u32 base_addr);
+void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst,
+ struct ipc_post *msg);
+int sst_pm_runtime_put(struct intel_sst_drv *sst_drv);
+int sst_shim_write(void __iomem *addr, int offset, int value);
+u32 sst_shim_read(void __iomem *addr, int offset);
+u64 sst_reg_read64(void __iomem *addr, int offset);
+int sst_shim_write64(void __iomem *addr, int offset, u64 value);
+u64 sst_shim_read64(void __iomem *addr, int offset);
+void sst_set_fw_state_locked(
+ struct intel_sst_drv *sst_drv_ctx, int sst_state);
+void sst_fill_header_mrfld(union ipc_header_mrfld *header,
+ int msg, int task_id, int large, int drv_id);
+void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg,
+ int pipe_id, int len);
+
+int sst_register(struct device *);
+int sst_unregister(struct device *);
+
+int sst_alloc_drv_context(struct intel_sst_drv **ctx,
+ struct device *dev, unsigned int dev_id);
+int sst_context_init(struct intel_sst_drv *ctx);
+void sst_context_cleanup(struct intel_sst_drv *ctx);
+void sst_configure_runtime_pm(struct intel_sst_drv *ctx);
+#endif
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
new file mode 100644
index 000000000000..31124aa4434e
--- /dev/null
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -0,0 +1,383 @@
+/*
+ * sst_acpi.c - SST (LPE) driver init file for ACPI enumeration.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * Authors: Ramesh Babu K V <Ramesh.Babu@intel.com>
+ * Authors: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/acpi.h>
+#include <asm/platform_sst_audio.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/compress_driver.h>
+#include <acpi/acbuffer.h>
+#include <acpi/platform/acenv.h>
+#include <acpi/platform/aclinux.h>
+#include <acpi/actypes.h>
+#include <acpi/acpi_bus.h>
+#include "../sst-mfld-platform.h"
+#include "../sst-dsp.h"
+#include "sst.h"
+
+struct sst_machines {
+ char codec_id[32];
+ char board[32];
+ char machine[32];
+ void (*machine_quirk)(void);
+ char firmware[32];
+ struct sst_platform_info *pdata;
+
+};
+
+/* LPE viewpoint addresses */
+#define SST_BYT_IRAM_PHY_START 0xff2c0000
+#define SST_BYT_IRAM_PHY_END 0xff2d4000
+#define SST_BYT_DRAM_PHY_START 0xff300000
+#define SST_BYT_DRAM_PHY_END 0xff320000
+#define SST_BYT_IMR_VIRT_START 0xc0000000 /* virtual addr in LPE */
+#define SST_BYT_IMR_VIRT_END 0xc01fffff
+#define SST_BYT_SHIM_PHY_ADDR 0xff340000
+#define SST_BYT_MBOX_PHY_ADDR 0xff344000
+#define SST_BYT_DMA0_PHY_ADDR 0xff298000
+#define SST_BYT_DMA1_PHY_ADDR 0xff29c000
+#define SST_BYT_SSP0_PHY_ADDR 0xff2a0000
+#define SST_BYT_SSP2_PHY_ADDR 0xff2a2000
+
+#define BYT_FW_MOD_TABLE_OFFSET 0x80000
+#define BYT_FW_MOD_TABLE_SIZE 0x100
+#define BYT_FW_MOD_OFFSET (BYT_FW_MOD_TABLE_OFFSET + BYT_FW_MOD_TABLE_SIZE)
+
+static const struct sst_info byt_fwparse_info = {
+ .use_elf = false,
+ .max_streams = 25,
+ .iram_start = SST_BYT_IRAM_PHY_START,
+ .iram_end = SST_BYT_IRAM_PHY_END,
+ .iram_use = true,
+ .dram_start = SST_BYT_DRAM_PHY_START,
+ .dram_end = SST_BYT_DRAM_PHY_END,
+ .dram_use = true,
+ .imr_start = SST_BYT_IMR_VIRT_START,
+ .imr_end = SST_BYT_IMR_VIRT_END,
+ .imr_use = true,
+ .mailbox_start = SST_BYT_MBOX_PHY_ADDR,
+ .num_probes = 0,
+ .lpe_viewpt_rqd = true,
+};
+
+static const struct sst_ipc_info byt_ipc_info = {
+ .ipc_offset = 0,
+ .mbox_recv_off = 0x400,
+};
+
+static const struct sst_lib_dnld_info byt_lib_dnld_info = {
+ .mod_base = SST_BYT_IMR_VIRT_START,
+ .mod_end = SST_BYT_IMR_VIRT_END,
+ .mod_table_offset = BYT_FW_MOD_TABLE_OFFSET,
+ .mod_table_size = BYT_FW_MOD_TABLE_SIZE,
+ .mod_ddr_dnld = false,
+};
+
+static const struct sst_res_info byt_rvp_res_info = {
+ .shim_offset = 0x140000,
+ .shim_size = 0x000100,
+ .shim_phy_addr = SST_BYT_SHIM_PHY_ADDR,
+ .ssp0_offset = 0xa0000,
+ .ssp0_size = 0x1000,
+ .dma0_offset = 0x98000,
+ .dma0_size = 0x4000,
+ .dma1_offset = 0x9c000,
+ .dma1_size = 0x4000,
+ .iram_offset = 0x0c0000,
+ .iram_size = 0x14000,
+ .dram_offset = 0x100000,
+ .dram_size = 0x28000,
+ .mbox_offset = 0x144000,
+ .mbox_size = 0x1000,
+ .acpi_lpe_res_index = 0,
+ .acpi_ddr_index = 2,
+ .acpi_ipc_irq_index = 5,
+};
+
+static struct sst_platform_info byt_rvp_platform_data = {
+ .probe_data = &byt_fwparse_info,
+ .ipc_info = &byt_ipc_info,
+ .lib_info = &byt_lib_dnld_info,
+ .res_info = &byt_rvp_res_info,
+ .platform = "sst-mfld-platform",
+};
+
+/* Cherryview (Cherrytrail and Braswell) uses same mrfld dpcm fw as Baytrail,
+ * so pdata is same as Baytrail.
+ */
+static struct sst_platform_info chv_platform_data = {
+ .probe_data = &byt_fwparse_info,
+ .ipc_info = &byt_ipc_info,
+ .lib_info = &byt_lib_dnld_info,
+ .res_info = &byt_rvp_res_info,
+ .platform = "sst-mfld-platform",
+};
+
+static int sst_platform_get_resources(struct intel_sst_drv *ctx)
+{
+ struct resource *rsrc;
+ struct platform_device *pdev = to_platform_device(ctx->dev);
+
+ /* All ACPI resource request here */
+ /* Get Shim addr */
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM,
+ ctx->pdata->res_info->acpi_lpe_res_index);
+ if (!rsrc) {
+ dev_err(ctx->dev, "Invalid SHIM base from IFWI");
+ return -EIO;
+ }
+ dev_info(ctx->dev, "LPE base: %#x size:%#x", (unsigned int) rsrc->start,
+ (unsigned int)resource_size(rsrc));
+
+ ctx->iram_base = rsrc->start + ctx->pdata->res_info->iram_offset;
+ ctx->iram_end = ctx->iram_base + ctx->pdata->res_info->iram_size - 1;
+ dev_info(ctx->dev, "IRAM base: %#x", ctx->iram_base);
+ ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base,
+ ctx->pdata->res_info->iram_size);
+ if (!ctx->iram) {
+ dev_err(ctx->dev, "unable to map IRAM");
+ return -EIO;
+ }
+
+ ctx->dram_base = rsrc->start + ctx->pdata->res_info->dram_offset;
+ ctx->dram_end = ctx->dram_base + ctx->pdata->res_info->dram_size - 1;
+ dev_info(ctx->dev, "DRAM base: %#x", ctx->dram_base);
+ ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base,
+ ctx->pdata->res_info->dram_size);
+ if (!ctx->dram) {
+ dev_err(ctx->dev, "unable to map DRAM");
+ return -EIO;
+ }
+
+ ctx->shim_phy_add = rsrc->start + ctx->pdata->res_info->shim_offset;
+ dev_info(ctx->dev, "SHIM base: %#x", ctx->shim_phy_add);
+ ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add,
+ ctx->pdata->res_info->shim_size);
+ if (!ctx->shim) {
+ dev_err(ctx->dev, "unable to map SHIM");
+ return -EIO;
+ }
+
+ /* reassign physical address to LPE viewpoint address */
+ ctx->shim_phy_add = ctx->pdata->res_info->shim_phy_addr;
+
+ /* Get mailbox addr */
+ ctx->mailbox_add = rsrc->start + ctx->pdata->res_info->mbox_offset;
+ dev_info(ctx->dev, "Mailbox base: %#x", ctx->mailbox_add);
+ ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add,
+ ctx->pdata->res_info->mbox_size);
+ if (!ctx->mailbox) {
+ dev_err(ctx->dev, "unable to map mailbox");
+ return -EIO;
+ }
+
+ /* reassign physical address to LPE viewpoint address */
+ ctx->mailbox_add = ctx->info.mailbox_start;
+
+ rsrc = platform_get_resource(pdev, IORESOURCE_MEM,
+ ctx->pdata->res_info->acpi_ddr_index);
+ if (!rsrc) {
+ dev_err(ctx->dev, "Invalid DDR base from IFWI");
+ return -EIO;
+ }
+ ctx->ddr_base = rsrc->start;
+ ctx->ddr_end = rsrc->end;
+ dev_info(ctx->dev, "DDR base: %#x", ctx->ddr_base);
+ ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base,
+ resource_size(rsrc));
+ if (!ctx->ddr) {
+ dev_err(ctx->dev, "unable to map DDR");
+ return -EIO;
+ }
+
+ /* Find the IRQ */
+ ctx->irq_num = platform_get_irq(pdev,
+ ctx->pdata->res_info->acpi_ipc_irq_index);
+ return 0;
+}
+
+static acpi_status sst_acpi_mach_match(acpi_handle handle, u32 level,
+ void *context, void **ret)
+{
+ *(bool *)context = true;
+ return AE_OK;
+}
+
+static struct sst_machines *sst_acpi_find_machine(
+ struct sst_machines *machines)
+{
+ struct sst_machines *mach;
+ bool found = false;
+
+ for (mach = machines; mach->codec_id; mach++)
+ if (ACPI_SUCCESS(acpi_get_devices(mach->codec_id,
+ sst_acpi_mach_match,
+ &found, NULL)) && found)
+ return mach;
+
+ return NULL;
+}
+
+int sst_acpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+ struct intel_sst_drv *ctx;
+ const struct acpi_device_id *id;
+ struct sst_machines *mach;
+ struct platform_device *mdev;
+ struct platform_device *plat_dev;
+ unsigned int dev_id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -ENODEV;
+ dev_dbg(dev, "for %s", id->id);
+
+ mach = (struct sst_machines *)id->driver_data;
+ mach = sst_acpi_find_machine(mach);
+ if (mach == NULL) {
+ dev_err(dev, "No matching machine driver found\n");
+ return -ENODEV;
+ }
+
+ ret = kstrtouint(id->id, 16, &dev_id);
+ if (ret < 0) {
+ dev_err(dev, "Unique device id conversion error: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "ACPI device id: %x\n", dev_id);
+
+ plat_dev = platform_device_register_data(dev, mach->pdata->platform, -1, NULL, 0);
+ if (plat_dev == NULL) {
+ dev_err(dev, "Failed to create machine device: %s\n", mach->pdata->platform);
+ return -ENODEV;
+ }
+
+ /* Create platform device for sst machine driver */
+ mdev = platform_device_register_data(dev, mach->machine, -1, NULL, 0);
+ if (mdev == NULL) {
+ dev_err(dev, "Failed to create machine device: %s\n", mach->machine);
+ return -ENODEV;
+ }
+
+ ret = sst_alloc_drv_context(&ctx, dev, dev_id);
+ if (ret < 0)
+ return ret;
+
+ /* Fill sst platform data */
+ ctx->pdata = mach->pdata;
+ strcpy(ctx->firmware_name, mach->firmware);
+
+ ret = sst_platform_get_resources(ctx);
+ if (ret)
+ return ret;
+
+ ret = sst_context_init(ctx);
+ if (ret < 0)
+ return ret;
+
+ /* need to save shim registers in BYT */
+ ctx->shim_regs64 = devm_kzalloc(ctx->dev, sizeof(*ctx->shim_regs64),
+ GFP_KERNEL);
+ if (!ctx->shim_regs64) {
+ return -ENOMEM;
+ goto do_sst_cleanup;
+ }
+
+ sst_configure_runtime_pm(ctx);
+ platform_set_drvdata(pdev, ctx);
+ return ret;
+
+do_sst_cleanup:
+ sst_context_cleanup(ctx);
+ platform_set_drvdata(pdev, NULL);
+ dev_err(ctx->dev, "failed with %d\n", ret);
+ return ret;
+}
+
+/**
+* intel_sst_remove - remove function
+*
+* @pdev: platform device structure
+*
+* This function is called by OS when a device is unloaded
+* This frees the interrupt etc
+*/
+int sst_acpi_remove(struct platform_device *pdev)
+{
+ struct intel_sst_drv *ctx;
+
+ ctx = platform_get_drvdata(pdev);
+ sst_context_cleanup(ctx);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct sst_machines sst_acpi_bytcr[] = {
+ {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin",
+ &byt_rvp_platform_data },
+ {},
+};
+
+/* Cherryview-based platforms: CherryTrail and Braswell */
+static struct sst_machines sst_acpi_chv[] = {
+ {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin",
+ &chv_platform_data },
+ {},
+};
+
+static const struct acpi_device_id sst_acpi_ids[] = {
+ { "80860F28", (unsigned long)&sst_acpi_bytcr},
+ { "808622A8", (unsigned long) &sst_acpi_chv},
+ { },
+};
+
+MODULE_DEVICE_TABLE(acpi, sst_acpi_ids);
+
+static struct platform_driver sst_acpi_driver = {
+ .driver = {
+ .name = "intel_sst_acpi",
+ .owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(sst_acpi_ids),
+ .pm = &intel_sst_pm,
+ },
+ .probe = sst_acpi_probe,
+ .remove = sst_acpi_remove,
+};
+
+module_platform_driver(sst_acpi_driver);
+
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine ACPI Driver");
+MODULE_AUTHOR("Ramesh Babu K V");
+MODULE_AUTHOR("Omair Mohammed Abdullah");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("sst");
diff --git a/sound/soc/intel/sst/sst_drv_interface.c b/sound/soc/intel/sst/sst_drv_interface.c
new file mode 100644
index 000000000000..5f75ef3cdd22
--- /dev/null
+++ b/sound/soc/intel/sst/sst_drv_interface.c
@@ -0,0 +1,686 @@
+/*
+ * sst_drv_interface.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-14 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/math64.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/compress_driver.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst-mfld-platform.h"
+#include "sst.h"
+#include "../sst-dsp.h"
+
+
+
+#define NUM_CODEC 2
+#define MIN_FRAGMENT 2
+#define MAX_FRAGMENT 4
+#define MIN_FRAGMENT_SIZE (50 * 1024)
+#define MAX_FRAGMENT_SIZE (1024 * 1024)
+#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
+
+int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
+{
+ struct stream_info *stream;
+ int ret = 0;
+
+ stream = get_stream_info(ctx, str_id);
+ if (stream) {
+ /* str_id is valid, so stream is alloacted */
+ ret = sst_free_stream(ctx, str_id);
+ if (ret)
+ sst_clean_stream(&ctx->streams[str_id]);
+ return ret;
+ } else {
+ dev_err(ctx->dev, "we tried to free stream context %d which was freed!!!\n", str_id);
+ }
+ return ret;
+}
+
+int sst_get_stream_allocated(struct intel_sst_drv *ctx,
+ struct snd_sst_params *str_param,
+ struct snd_sst_lib_download **lib_dnld)
+{
+ int retval;
+
+ retval = ctx->ops->alloc_stream(ctx, str_param);
+ if (retval > 0)
+ dev_dbg(ctx->dev, "Stream allocated %d\n", retval);
+ return retval;
+
+}
+
+/*
+ * sst_get_sfreq - this function returns the frequency of the stream
+ *
+ * @str_param : stream params
+ */
+int sst_get_sfreq(struct snd_sst_params *str_param)
+{
+ switch (str_param->codec) {
+ case SST_CODEC_TYPE_PCM:
+ return str_param->sparams.uc.pcm_params.sfreq;
+ case SST_CODEC_TYPE_AAC:
+ return str_param->sparams.uc.aac_params.externalsr;
+ case SST_CODEC_TYPE_MP3:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * sst_get_num_channel - get number of channels for the stream
+ *
+ * @str_param : stream params
+ */
+int sst_get_num_channel(struct snd_sst_params *str_param)
+{
+ switch (str_param->codec) {
+ case SST_CODEC_TYPE_PCM:
+ return str_param->sparams.uc.pcm_params.num_chan;
+ case SST_CODEC_TYPE_MP3:
+ return str_param->sparams.uc.mp3_params.num_chan;
+ case SST_CODEC_TYPE_AAC:
+ return str_param->sparams.uc.aac_params.num_chan;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * sst_get_stream - this function prepares for stream allocation
+ *
+ * @str_param : stream param
+ */
+int sst_get_stream(struct intel_sst_drv *ctx,
+ struct snd_sst_params *str_param)
+{
+ int retval;
+ struct stream_info *str_info;
+
+ /* stream is not allocated, we are allocating */
+ retval = ctx->ops->alloc_stream(ctx, str_param);
+ if (retval <= 0) {
+ return -EIO;
+ }
+ /* store sampling freq */
+ str_info = &ctx->streams[retval];
+ str_info->sfreq = sst_get_sfreq(str_param);
+
+ return retval;
+}
+
+static int sst_power_control(struct device *dev, bool state)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ dev_dbg(ctx->dev, "state:%d", state);
+ if (state == true)
+ return pm_runtime_get_sync(dev);
+ else
+ return sst_pm_runtime_put(ctx);
+}
+
+/*
+ * sst_open_pcm_stream - Open PCM interface
+ *
+ * @str_param: parameters of pcm stream
+ *
+ * This function is called by MID sound card driver to open
+ * a new pcm interface
+ */
+static int sst_open_pcm_stream(struct device *dev,
+ struct snd_sst_params *str_param)
+{
+ int retval;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (!str_param)
+ return -EINVAL;
+
+ retval = sst_get_stream(ctx, str_param);
+ if (retval > 0)
+ ctx->stream_cnt++;
+ else
+ dev_err(ctx->dev, "sst_get_stream returned err %d\n", retval);
+
+ return retval;
+}
+
+static int sst_cdev_open(struct device *dev,
+ struct snd_sst_params *str_params, struct sst_compress_cb *cb)
+{
+ int str_id, retval;
+ struct stream_info *stream;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ retval = pm_runtime_get_sync(ctx->dev);
+ if (retval < 0)
+ return retval;
+
+ str_id = sst_get_stream(ctx, str_params);
+ if (str_id > 0) {
+ dev_dbg(dev, "stream allocated in sst_cdev_open %d\n", str_id);
+ stream = &ctx->streams[str_id];
+ stream->compr_cb = cb->compr_cb;
+ stream->compr_cb_param = cb->param;
+ stream->drain_notify = cb->drain_notify;
+ stream->drain_cb_param = cb->drain_cb_param;
+ } else {
+ dev_err(dev, "stream encountered error during alloc %d\n", str_id);
+ str_id = -EINVAL;
+ sst_pm_runtime_put(ctx);
+ }
+ return str_id;
+}
+
+static int sst_cdev_close(struct device *dev, unsigned int str_id)
+{
+ int retval;
+ struct stream_info *stream;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ stream = get_stream_info(ctx, str_id);
+ if (!stream) {
+ dev_err(dev, "stream info is NULL for str %d!!!\n", str_id);
+ return -EINVAL;
+ }
+
+ if (stream->status == STREAM_RESET) {
+ dev_dbg(dev, "stream in reset state...\n");
+ stream->status = STREAM_UN_INIT;
+
+ retval = 0;
+ goto put;
+ }
+
+ retval = sst_free_stream(ctx, str_id);
+put:
+ stream->compr_cb_param = NULL;
+ stream->compr_cb = NULL;
+
+ if (retval)
+ dev_err(dev, "free stream returned err %d\n", retval);
+
+ dev_dbg(dev, "End\n");
+ return retval;
+
+}
+
+static int sst_cdev_ack(struct device *dev, unsigned int str_id,
+ unsigned long bytes)
+{
+ struct stream_info *stream;
+ struct snd_sst_tstamp fw_tstamp = {0,};
+ int offset;
+ void __iomem *addr;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ stream = get_stream_info(ctx, str_id);
+ if (!stream)
+ return -EINVAL;
+
+ /* update bytes sent */
+ stream->cumm_bytes += bytes;
+ dev_dbg(dev, "bytes copied %d inc by %ld\n", stream->cumm_bytes, bytes);
+
+ memcpy_fromio(&fw_tstamp,
+ ((void *)(ctx->mailbox + ctx->tstamp)
+ +(str_id * sizeof(fw_tstamp))),
+ sizeof(fw_tstamp));
+
+ fw_tstamp.bytes_copied = stream->cumm_bytes;
+ dev_dbg(dev, "bytes sent to fw %llu inc by %ld\n",
+ fw_tstamp.bytes_copied, bytes);
+
+ addr = ((void *)(ctx->mailbox + ctx->tstamp)) +
+ (str_id * sizeof(fw_tstamp));
+ offset = offsetof(struct snd_sst_tstamp, bytes_copied);
+ sst_shim_write(addr, offset, fw_tstamp.bytes_copied);
+ return 0;
+}
+
+static int sst_cdev_set_metadata(struct device *dev,
+ unsigned int str_id, struct snd_compr_metadata *metadata)
+{
+ int retval = 0;
+ struct stream_info *str_info;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "set metadata for stream %d\n", str_id);
+
+ str_info = get_stream_info(ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+
+ dev_dbg(dev, "pipe id = %d\n", str_info->pipe_id);
+ retval = sst_prepare_and_post_msg(ctx, str_info->task_id, IPC_CMD,
+ IPC_IA_SET_STREAM_PARAMS_MRFLD, str_info->pipe_id,
+ sizeof(*metadata), metadata, NULL,
+ true, true, true, false);
+
+ return retval;
+}
+
+static int sst_cdev_stream_pause(struct device *dev, unsigned int str_id)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ return sst_pause_stream(ctx, str_id);
+}
+
+static int sst_cdev_stream_pause_release(struct device *dev,
+ unsigned int str_id)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ return sst_resume_stream(ctx, str_id);
+}
+
+static int sst_cdev_stream_start(struct device *dev, unsigned int str_id)
+{
+ struct stream_info *str_info;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ str_info = get_stream_info(ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ str_info->prev = str_info->status;
+ str_info->status = STREAM_RUNNING;
+ return sst_start_stream(ctx, str_id);
+}
+
+static int sst_cdev_stream_drop(struct device *dev, unsigned int str_id)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ return sst_drop_stream(ctx, str_id);
+}
+
+static int sst_cdev_stream_drain(struct device *dev, unsigned int str_id)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ return sst_drain_stream(ctx, str_id, false);
+}
+
+static int sst_cdev_stream_partial_drain(struct device *dev,
+ unsigned int str_id)
+{
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ return sst_drain_stream(ctx, str_id, true);
+}
+
+static int sst_cdev_tstamp(struct device *dev, unsigned int str_id,
+ struct snd_compr_tstamp *tstamp)
+{
+ struct snd_sst_tstamp fw_tstamp = {0,};
+ struct stream_info *stream;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ memcpy_fromio(&fw_tstamp,
+ ((void *)(ctx->mailbox + ctx->tstamp)
+ +(str_id * sizeof(fw_tstamp))),
+ sizeof(fw_tstamp));
+
+ stream = get_stream_info(ctx, str_id);
+ if (!stream)
+ return -EINVAL;
+ dev_dbg(dev, "rb_counter %llu in bytes\n", fw_tstamp.ring_buffer_counter);
+
+ tstamp->copied_total = fw_tstamp.ring_buffer_counter;
+ tstamp->pcm_frames = fw_tstamp.frames_decoded;
+ tstamp->pcm_io_frames = div_u64(fw_tstamp.hardware_counter,
+ (u64)((stream->num_ch) * SST_GET_BYTES_PER_SAMPLE(24)));
+ tstamp->sampling_rate = fw_tstamp.sampling_frequency;
+
+ dev_dbg(dev, "PCM = %u\n", tstamp->pcm_io_frames);
+ dev_dbg(dev, "Ptr Query on strid = %d copied_total %d, decodec %d\n",
+ str_id, tstamp->copied_total, tstamp->pcm_frames);
+ dev_dbg(dev, "rendered %d\n", tstamp->pcm_io_frames);
+
+ return 0;
+}
+
+static int sst_cdev_caps(struct snd_compr_caps *caps)
+{
+ caps->num_codecs = NUM_CODEC;
+ caps->min_fragment_size = MIN_FRAGMENT_SIZE; /* 50KB */
+ caps->max_fragment_size = MAX_FRAGMENT_SIZE; /* 1024KB */
+ caps->min_fragments = MIN_FRAGMENT;
+ caps->max_fragments = MAX_FRAGMENT;
+ caps->codecs[0] = SND_AUDIOCODEC_MP3;
+ caps->codecs[1] = SND_AUDIOCODEC_AAC;
+ return 0;
+}
+
+static struct snd_compr_codec_caps caps_mp3 = {
+ .num_descriptors = 1,
+ .descriptor[0].max_ch = 2,
+ .descriptor[0].sample_rates[0] = 48000,
+ .descriptor[0].sample_rates[1] = 44100,
+ .descriptor[0].sample_rates[2] = 32000,
+ .descriptor[0].sample_rates[3] = 16000,
+ .descriptor[0].sample_rates[4] = 8000,
+ .descriptor[0].num_sample_rates = 5,
+ .descriptor[0].bit_rate[0] = 320,
+ .descriptor[0].bit_rate[1] = 192,
+ .descriptor[0].num_bitrates = 2,
+ .descriptor[0].profiles = 0,
+ .descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO,
+ .descriptor[0].formats = 0,
+};
+
+static struct snd_compr_codec_caps caps_aac = {
+ .num_descriptors = 2,
+ .descriptor[1].max_ch = 2,
+ .descriptor[0].sample_rates[0] = 48000,
+ .descriptor[0].sample_rates[1] = 44100,
+ .descriptor[0].sample_rates[2] = 32000,
+ .descriptor[0].sample_rates[3] = 16000,
+ .descriptor[0].sample_rates[4] = 8000,
+ .descriptor[0].num_sample_rates = 5,
+ .descriptor[1].bit_rate[0] = 320,
+ .descriptor[1].bit_rate[1] = 192,
+ .descriptor[1].num_bitrates = 2,
+ .descriptor[1].profiles = 0,
+ .descriptor[1].modes = 0,
+ .descriptor[1].formats =
+ (SND_AUDIOSTREAMFORMAT_MP4ADTS |
+ SND_AUDIOSTREAMFORMAT_RAW),
+};
+
+static int sst_cdev_codec_caps(struct snd_compr_codec_caps *codec)
+{
+ if (codec->codec == SND_AUDIOCODEC_MP3)
+ *codec = caps_mp3;
+ else if (codec->codec == SND_AUDIOCODEC_AAC)
+ *codec = caps_aac;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+void sst_cdev_fragment_elapsed(struct intel_sst_drv *ctx, int str_id)
+{
+ struct stream_info *stream;
+
+ dev_dbg(ctx->dev, "fragment elapsed from firmware for str_id %d\n",
+ str_id);
+ stream = &ctx->streams[str_id];
+ if (stream->compr_cb)
+ stream->compr_cb(stream->compr_cb_param);
+}
+
+/*
+ * sst_close_pcm_stream - Close PCM interface
+ *
+ * @str_id: stream id to be closed
+ *
+ * This function is called by MID sound card driver to close
+ * an existing pcm interface
+ */
+static int sst_close_pcm_stream(struct device *dev, unsigned int str_id)
+{
+ struct stream_info *stream;
+ int retval = 0;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ stream = get_stream_info(ctx, str_id);
+ if (!stream) {
+ dev_err(ctx->dev, "stream info is NULL for str %d!!!\n", str_id);
+ return -EINVAL;
+ }
+
+ if (stream->status == STREAM_RESET) {
+ /* silently fail here as we have cleaned the stream earlier */
+ dev_dbg(ctx->dev, "stream in reset state...\n");
+
+ retval = 0;
+ goto put;
+ }
+
+ retval = free_stream_context(ctx, str_id);
+put:
+ stream->pcm_substream = NULL;
+ stream->status = STREAM_UN_INIT;
+ stream->period_elapsed = NULL;
+ ctx->stream_cnt--;
+
+ if (retval)
+ dev_err(ctx->dev, "free stream returned err %d\n", retval);
+
+ dev_dbg(ctx->dev, "Exit\n");
+ return 0;
+}
+
+static inline int sst_calc_tstamp(struct intel_sst_drv *ctx,
+ struct pcm_stream_info *info,
+ struct snd_pcm_substream *substream,
+ struct snd_sst_tstamp *fw_tstamp)
+{
+ size_t delay_bytes, delay_frames;
+ size_t buffer_sz;
+ u32 pointer_bytes, pointer_samples;
+
+ dev_dbg(ctx->dev, "mrfld ring_buffer_counter %llu in bytes\n",
+ fw_tstamp->ring_buffer_counter);
+ dev_dbg(ctx->dev, "mrfld hardware_counter %llu in bytes\n",
+ fw_tstamp->hardware_counter);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ delay_bytes = (size_t) (fw_tstamp->ring_buffer_counter -
+ fw_tstamp->hardware_counter);
+ else
+ delay_bytes = (size_t) (fw_tstamp->hardware_counter -
+ fw_tstamp->ring_buffer_counter);
+ delay_frames = bytes_to_frames(substream->runtime, delay_bytes);
+ buffer_sz = snd_pcm_lib_buffer_bytes(substream);
+ div_u64_rem(fw_tstamp->ring_buffer_counter, buffer_sz, &pointer_bytes);
+ pointer_samples = bytes_to_samples(substream->runtime, pointer_bytes);
+
+ dev_dbg(ctx->dev, "pcm delay %zu in bytes\n", delay_bytes);
+
+ info->buffer_ptr = pointer_samples / substream->runtime->channels;
+
+ info->pcm_delay = delay_frames / substream->runtime->channels;
+ dev_dbg(ctx->dev, "buffer ptr %llu pcm_delay rep: %llu\n",
+ info->buffer_ptr, info->pcm_delay);
+ return 0;
+}
+
+static int sst_read_timestamp(struct device *dev, struct pcm_stream_info *info)
+{
+ struct stream_info *stream;
+ struct snd_pcm_substream *substream;
+ struct snd_sst_tstamp fw_tstamp;
+ unsigned int str_id;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ str_id = info->str_id;
+ stream = get_stream_info(ctx, str_id);
+ if (!stream)
+ return -EINVAL;
+
+ if (!stream->pcm_substream)
+ return -EINVAL;
+ substream = stream->pcm_substream;
+
+ memcpy_fromio(&fw_tstamp,
+ ((void *)(ctx->mailbox + ctx->tstamp)
+ + (str_id * sizeof(fw_tstamp))),
+ sizeof(fw_tstamp));
+ return sst_calc_tstamp(ctx, info, substream, &fw_tstamp);
+}
+
+static int sst_stream_start(struct device *dev, int str_id)
+{
+ struct stream_info *str_info;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (ctx->sst_state != SST_FW_RUNNING)
+ return 0;
+ str_info = get_stream_info(ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ str_info->prev = str_info->status;
+ str_info->status = STREAM_RUNNING;
+ sst_start_stream(ctx, str_id);
+
+ return 0;
+}
+
+static int sst_stream_drop(struct device *dev, int str_id)
+{
+ struct stream_info *str_info;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (ctx->sst_state != SST_FW_RUNNING)
+ return 0;
+
+ str_info = get_stream_info(ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ str_info->prev = STREAM_UN_INIT;
+ str_info->status = STREAM_INIT;
+ return sst_drop_stream(ctx, str_id);
+}
+
+static int sst_stream_init(struct device *dev, struct pcm_stream_info *str_info)
+{
+ int str_id = 0;
+ struct stream_info *stream;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ str_id = str_info->str_id;
+
+ if (ctx->sst_state != SST_FW_RUNNING)
+ return 0;
+
+ stream = get_stream_info(ctx, str_id);
+ if (!stream)
+ return -EINVAL;
+
+ dev_dbg(ctx->dev, "setting the period ptrs\n");
+ stream->pcm_substream = str_info->arg;
+ stream->period_elapsed = str_info->period_elapsed;
+ stream->sfreq = str_info->sfreq;
+ stream->prev = stream->status;
+ stream->status = STREAM_INIT;
+ dev_dbg(ctx->dev,
+ "pcm_substream %p, period_elapsed %p, sfreq %d, status %d\n",
+ stream->pcm_substream, stream->period_elapsed,
+ stream->sfreq, stream->status);
+
+ return 0;
+}
+
+/*
+ * sst_set_byte_stream - Set generic params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to configure
+ * SST runtime params.
+ */
+static int sst_send_byte_stream(struct device *dev,
+ struct snd_sst_bytes_v2 *bytes)
+{
+ int ret_val = 0;
+ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+ if (NULL == bytes)
+ return -EINVAL;
+ ret_val = pm_runtime_get_sync(ctx->dev);
+ if (ret_val < 0)
+ return ret_val;
+
+ ret_val = sst_send_byte_stream_mrfld(ctx, bytes);
+ sst_pm_runtime_put(ctx);
+
+ return ret_val;
+}
+
+static struct sst_ops pcm_ops = {
+ .open = sst_open_pcm_stream,
+ .stream_init = sst_stream_init,
+ .stream_start = sst_stream_start,
+ .stream_drop = sst_stream_drop,
+ .stream_read_tstamp = sst_read_timestamp,
+ .send_byte_stream = sst_send_byte_stream,
+ .close = sst_close_pcm_stream,
+ .power = sst_power_control,
+};
+
+static struct compress_sst_ops compr_ops = {
+ .open = sst_cdev_open,
+ .close = sst_cdev_close,
+ .stream_pause = sst_cdev_stream_pause,
+ .stream_pause_release = sst_cdev_stream_pause_release,
+ .stream_start = sst_cdev_stream_start,
+ .stream_drop = sst_cdev_stream_drop,
+ .stream_drain = sst_cdev_stream_drain,
+ .stream_partial_drain = sst_cdev_stream_partial_drain,
+ .tstamp = sst_cdev_tstamp,
+ .ack = sst_cdev_ack,
+ .get_caps = sst_cdev_caps,
+ .get_codec_caps = sst_cdev_codec_caps,
+ .set_metadata = sst_cdev_set_metadata,
+ .power = sst_power_control,
+};
+
+static struct sst_device sst_dsp_device = {
+ .name = "Intel(R) SST LPE",
+ .dev = NULL,
+ .ops = &pcm_ops,
+ .compr_ops = &compr_ops,
+};
+
+/*
+ * sst_register - function to register DSP
+ *
+ * This functions registers DSP with the platform driver
+ */
+int sst_register(struct device *dev)
+{
+ int ret_val;
+
+ sst_dsp_device.dev = dev;
+ ret_val = sst_register_dsp(&sst_dsp_device);
+ if (ret_val)
+ dev_err(dev, "Unable to register DSP with platform driver\n");
+
+ return ret_val;
+}
+
+int sst_unregister(struct device *dev)
+{
+ return sst_unregister_dsp(&sst_dsp_device);
+}
diff --git a/sound/soc/intel/sst/sst_ipc.c b/sound/soc/intel/sst/sst_ipc.c
new file mode 100644
index 000000000000..484e60978477
--- /dev/null
+++ b/sound/soc/intel/sst/sst_ipc.c
@@ -0,0 +1,373 @@
+/*
+ * sst_ipc.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-14 Intel Corporation
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/compress_driver.h>
+#include <asm/intel-mid.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst-mfld-platform.h"
+#include "sst.h"
+#include "../sst-dsp.h"
+
+struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
+ u32 msg_id, u32 drv_id)
+{
+ struct sst_block *msg = NULL;
+
+ dev_dbg(ctx->dev, "Enter\n");
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return NULL;
+ msg->condition = false;
+ msg->on = true;
+ msg->msg_id = msg_id;
+ msg->drv_id = drv_id;
+ spin_lock_bh(&ctx->block_lock);
+ list_add_tail(&msg->node, &ctx->block_list);
+ spin_unlock_bh(&ctx->block_lock);
+
+ return msg;
+}
+
+/*
+ * while handling the interrupts, we need to check for message status and
+ * then if we are blocking for a message
+ *
+ * here we are unblocking the blocked ones, this is based on id we have
+ * passed and search that for block threads.
+ * We will not find block in two cases
+ * a) when its small message and block in not there, so silently ignore
+ * them
+ * b) when we are actually not able to find the block (bug perhaps)
+ *
+ * Since we have bit of small messages we can spam kernel log with err
+ * print on above so need to keep as debug prints which should be enabled
+ * via dynamic debug while debugging IPC issues
+ */
+int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
+ u32 drv_id, u32 ipc, void *data, u32 size)
+{
+ struct sst_block *block = NULL;
+
+ dev_dbg(ctx->dev, "Enter\n");
+
+ spin_lock_bh(&ctx->block_lock);
+ list_for_each_entry(block, &ctx->block_list, node) {
+ dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id,
+ block->drv_id);
+ if (block->msg_id == ipc && block->drv_id == drv_id) {
+ dev_dbg(ctx->dev, "free up the block\n");
+ block->ret_code = result;
+ block->data = data;
+ block->size = size;
+ block->condition = true;
+ spin_unlock_bh(&ctx->block_lock);
+ wake_up(&ctx->wait_queue);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&ctx->block_lock);
+ dev_dbg(ctx->dev,
+ "Block not found or a response received for a short msg for ipc %d, drv_id %d\n",
+ ipc, drv_id);
+ return -EINVAL;
+}
+
+int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
+{
+ struct sst_block *block = NULL, *__block;
+
+ dev_dbg(ctx->dev, "Enter\n");
+ spin_lock_bh(&ctx->block_lock);
+ list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
+ if (block == freed) {
+ pr_debug("pvt_id freed --> %d\n", freed->drv_id);
+ /* toggle the index position of pvt_id */
+ list_del(&freed->node);
+ spin_unlock_bh(&ctx->block_lock);
+ kfree(freed->data);
+ freed->data = NULL;
+ kfree(freed);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&ctx->block_lock);
+ dev_err(ctx->dev, "block is already freed!!!\n");
+ return -EINVAL;
+}
+
+int sst_post_message_mrfld(struct intel_sst_drv *sst_drv_ctx,
+ struct ipc_post *ipc_msg, bool sync)
+{
+ struct ipc_post *msg = ipc_msg;
+ union ipc_header_mrfld header;
+ unsigned int loop_count = 0;
+ int retval = 0;
+ unsigned long irq_flags;
+
+ dev_dbg(sst_drv_ctx->dev, "Enter: sync: %d\n", sync);
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+ if (sync) {
+ while (header.p.header_high.part.busy) {
+ if (loop_count > 25) {
+ dev_err(sst_drv_ctx->dev,
+ "sst: Busy wait failed, cant send this msg\n");
+ retval = -EBUSY;
+ goto out;
+ }
+ cpu_relax();
+ loop_count++;
+ header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+ }
+ } else {
+ if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
+ /* queue is empty, nothing to send */
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ dev_dbg(sst_drv_ctx->dev,
+ "Empty msg queue... NO Action\n");
+ return 0;
+ }
+
+ if (header.p.header_high.part.busy) {
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ dev_dbg(sst_drv_ctx->dev, "Busy not free... post later\n");
+ return 0;
+ }
+
+ /* copy msg from list */
+ msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
+ struct ipc_post, node);
+ list_del(&msg->node);
+ }
+ dev_dbg(sst_drv_ctx->dev, "sst: Post message: header = %x\n",
+ msg->mrfld_header.p.header_high.full);
+ dev_dbg(sst_drv_ctx->dev, "sst: size = 0x%x\n",
+ msg->mrfld_header.p.header_low_payload);
+
+ if (msg->mrfld_header.p.header_high.part.large)
+ memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+ msg->mailbox_data,
+ msg->mrfld_header.p.header_low_payload);
+
+ sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
+
+out:
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ kfree(msg->mailbox_data);
+ kfree(msg);
+ return retval;
+}
+
+void intel_sst_clear_intr_mrfld(struct intel_sst_drv *sst_drv_ctx)
+{
+ union interrupt_reg_mrfld isr;
+ union interrupt_reg_mrfld imr;
+ union ipc_header_mrfld clear_ipc;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+ imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
+ isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
+
+ /* write 1 to clear*/
+ isr.part.busy_interrupt = 1;
+ sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
+
+ /* Set IA done bit */
+ clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
+
+ clear_ipc.p.header_high.part.busy = 0;
+ clear_ipc.p.header_high.part.done = 1;
+ clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
+ sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
+ /* un mask busy interrupt */
+ imr.part.busy_interrupt = 0;
+ sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
+ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+}
+
+
+/*
+ * process_fw_init - process the FW init msg
+ *
+ * @msg: IPC message mailbox data from FW
+ *
+ * This function processes the FW init msg from FW
+ * marks FW state and prints debug info of loaded FW
+ */
+static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
+ void *msg)
+{
+ struct ipc_header_fw_init *init =
+ (struct ipc_header_fw_init *)msg;
+ int retval = 0;
+
+ dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n");
+ if (init->result) {
+ sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
+ dev_err(sst_drv_ctx->dev, "FW Init failed, Error %x\n",
+ init->result);
+ retval = init->result;
+ goto ret;
+ }
+
+ret:
+ sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
+}
+
+static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx,
+ struct ipc_post *msg)
+{
+ u32 msg_id;
+ int str_id;
+ u32 data_size, i;
+ void *data_offset;
+ struct stream_info *stream;
+ union ipc_header_high msg_high;
+ u32 msg_low, pipe_id;
+
+ msg_high = msg->mrfld_header.p.header_high;
+ msg_low = msg->mrfld_header.p.header_low_payload;
+ msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
+ data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
+ data_size = msg_low - (sizeof(struct ipc_dsp_hdr));
+
+ switch (msg_id) {
+ case IPC_SST_PERIOD_ELAPSED_MRFLD:
+ pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+ str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
+ if (str_id > 0) {
+ dev_dbg(sst_drv_ctx->dev,
+ "Period elapsed rcvd for pipe id 0x%x\n",
+ pipe_id);
+ stream = &sst_drv_ctx->streams[str_id];
+ if (stream->period_elapsed)
+ stream->period_elapsed(stream->pcm_substream);
+ if (stream->compr_cb)
+ stream->compr_cb(stream->compr_cb_param);
+ }
+ break;
+
+ case IPC_IA_DRAIN_STREAM_MRFLD:
+ pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+ str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
+ if (str_id > 0) {
+ stream = &sst_drv_ctx->streams[str_id];
+ if (stream->drain_notify)
+ stream->drain_notify(stream->drain_cb_param);
+ }
+ break;
+
+ case IPC_IA_FW_ASYNC_ERR_MRFLD:
+ dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n");
+ for (i = 0; i < (data_size/4); i++)
+ print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
+ 16, 4, data_offset, data_size, false);
+ break;
+
+ case IPC_IA_FW_INIT_CMPLT_MRFLD:
+ process_fw_init(sst_drv_ctx, data_offset);
+ break;
+
+ case IPC_IA_BUF_UNDER_RUN_MRFLD:
+ pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+ str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
+ if (str_id > 0)
+ dev_err(sst_drv_ctx->dev,
+ "Buffer under-run for pipe:%#x str_id:%d\n",
+ pipe_id, str_id);
+ break;
+
+ default:
+ dev_err(sst_drv_ctx->dev,
+ "Unrecognized async msg from FW msg_id %#x\n", msg_id);
+ }
+}
+
+void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
+ struct ipc_post *msg)
+{
+ unsigned int drv_id;
+ void *data;
+ union ipc_header_high msg_high;
+ u32 msg_low;
+ struct ipc_dsp_hdr *dsp_hdr;
+ unsigned int cmd_id;
+
+ msg_high = msg->mrfld_header.p.header_high;
+ msg_low = msg->mrfld_header.p.header_low_payload;
+
+ dev_dbg(sst_drv_ctx->dev, "IPC process message header %x payload %x\n",
+ msg->mrfld_header.p.header_high.full,
+ msg->mrfld_header.p.header_low_payload);
+
+ drv_id = msg_high.part.drv_id;
+
+ /* Check for async messages first */
+ if (drv_id == SST_ASYNC_DRV_ID) {
+ /*FW sent async large message*/
+ process_fw_async_msg(sst_drv_ctx, msg);
+ return;
+ }
+
+ /* FW sent short error response for an IPC */
+ if (msg_high.part.result && drv_id && !msg_high.part.large) {
+ /* 32-bit FW error code in msg_low */
+ dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low);
+ sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+ msg_high.part.drv_id,
+ msg_high.part.msg_id, NULL, 0);
+ return;
+ }
+
+ /*
+ * Process all valid responses
+ * if it is a large message, the payload contains the size to
+ * copy from mailbox
+ **/
+ if (msg_high.part.large) {
+ data = kzalloc(msg_low, GFP_KERNEL);
+ if (!data)
+ return;
+ memcpy(data, (void *) msg->mailbox_data, msg_low);
+ /* Copy command id so that we can use to put sst to reset */
+ dsp_hdr = (struct ipc_dsp_hdr *)data;
+ cmd_id = dsp_hdr->cmd_id;
+ dev_dbg(sst_drv_ctx->dev, "cmd_id %d\n", dsp_hdr->cmd_id);
+ if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+ msg_high.part.drv_id,
+ msg_high.part.msg_id, data, msg_low))
+ kfree(data);
+ } else {
+ sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+ msg_high.part.drv_id,
+ msg_high.part.msg_id, NULL, 0);
+ }
+
+}
diff --git a/sound/soc/intel/sst/sst_loader.c b/sound/soc/intel/sst/sst_loader.c
new file mode 100644
index 000000000000..b580f96e25e5
--- /dev/null
+++ b/sound/soc/intel/sst/sst_loader.c
@@ -0,0 +1,456 @@
+/*
+ * sst_dsp.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-14 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file contains all dsp controlling functions like firmware download,
+ * setting/resetting dsp cores, etc
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/compress_driver.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst-mfld-platform.h"
+#include "sst.h"
+#include "../sst-dsp.h"
+
+static inline void memcpy32_toio(void __iomem *dst, const void *src, int count)
+{
+ /* __iowrite32_copy uses 32-bit count values so divide by 4 for
+ * right count in words
+ */
+ __iowrite32_copy(dst, src, count/4);
+}
+
+/**
+ * intel_sst_reset_dsp_mrfld - Resetting SST DSP
+ *
+ * This resets DSP in case of MRFLD platfroms
+ */
+int intel_sst_reset_dsp_mrfld(struct intel_sst_drv *sst_drv_ctx)
+{
+ union config_status_reg_mrfld csr;
+
+ dev_dbg(sst_drv_ctx->dev, "sst: Resetting the DSP in mrfld\n");
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+
+ dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full);
+
+ csr.full |= 0x7;
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+
+ dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full);
+
+ csr.full &= ~(0x1);
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full);
+ return 0;
+}
+
+/**
+ * sst_start_merrifield - Start the SST DSP processor
+ *
+ * This starts the DSP in MERRIFIELD platfroms
+ */
+int sst_start_mrfld(struct intel_sst_drv *sst_drv_ctx)
+{
+ union config_status_reg_mrfld csr;
+
+ dev_dbg(sst_drv_ctx->dev, "sst: Starting the DSP in mrfld LALALALA\n");
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full);
+
+ csr.full |= 0x7;
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full);
+
+ csr.part.xt_snoop = 1;
+ csr.full &= ~(0x5);
+ sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+ dev_dbg(sst_drv_ctx->dev, "sst: Starting the DSP_merrifield:%llx\n",
+ csr.full);
+ return 0;
+}
+
+static int sst_validate_fw_image(struct intel_sst_drv *ctx, unsigned long size,
+ struct fw_module_header **module, u32 *num_modules)
+{
+ struct sst_fw_header *header;
+ const void *sst_fw_in_mem = ctx->fw_in_mem;
+
+ dev_dbg(ctx->dev, "Enter\n");
+
+ /* Read the header information from the data pointer */
+ header = (struct sst_fw_header *)sst_fw_in_mem;
+ dev_dbg(ctx->dev,
+ "header sign=%s size=%x modules=%x fmt=%x size=%zx\n",
+ header->signature, header->file_size, header->modules,
+ header->file_format, sizeof(*header));
+
+ /* verify FW */
+ if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
+ (size != header->file_size + sizeof(*header))) {
+ /* Invalid FW signature */
+ dev_err(ctx->dev, "InvalidFW sign/filesize mismatch\n");
+ return -EINVAL;
+ }
+ *num_modules = header->modules;
+ *module = (void *)sst_fw_in_mem + sizeof(*header);
+
+ return 0;
+}
+
+/*
+ * sst_fill_memcpy_list - Fill the memcpy list
+ *
+ * @memcpy_list: List to be filled
+ * @destn: Destination addr to be filled in the list
+ * @src: Source addr to be filled in the list
+ * @size: Size to be filled in the list
+ *
+ * Adds the node to the list after required fields
+ * are populated in the node
+ */
+static int sst_fill_memcpy_list(struct list_head *memcpy_list,
+ void *destn, const void *src, u32 size, bool is_io)
+{
+ struct sst_memcpy_list *listnode;
+
+ listnode = kzalloc(sizeof(*listnode), GFP_KERNEL);
+ if (listnode == NULL)
+ return -ENOMEM;
+ listnode->dstn = destn;
+ listnode->src = src;
+ listnode->size = size;
+ listnode->is_io = is_io;
+ list_add_tail(&listnode->memcpylist, memcpy_list);
+
+ return 0;
+}
+
+/**
+ * sst_parse_module_memcpy - Parse audio FW modules and populate the memcpy list
+ *
+ * @sst_drv_ctx : driver context
+ * @module : FW module header
+ * @memcpy_list : Pointer to the list to be populated
+ * Create the memcpy list as the number of block to be copied
+ * returns error or 0 if module sizes are proper
+ */
+static int sst_parse_module_memcpy(struct intel_sst_drv *sst_drv_ctx,
+ struct fw_module_header *module, struct list_head *memcpy_list)
+{
+ struct fw_block_info *block;
+ u32 count;
+ int ret_val = 0;
+ void __iomem *ram_iomem;
+
+ dev_dbg(sst_drv_ctx->dev, "module sign %s size %x blocks %x type %x\n",
+ module->signature, module->mod_size,
+ module->blocks, module->type);
+ dev_dbg(sst_drv_ctx->dev, "module entrypoint 0x%x\n", module->entry_point);
+
+ block = (void *)module + sizeof(*module);
+
+ for (count = 0; count < module->blocks; count++) {
+ if (block->size <= 0) {
+ dev_err(sst_drv_ctx->dev, "block size invalid\n");
+ return -EINVAL;
+ }
+ switch (block->type) {
+ case SST_IRAM:
+ ram_iomem = sst_drv_ctx->iram;
+ break;
+ case SST_DRAM:
+ ram_iomem = sst_drv_ctx->dram;
+ break;
+ case SST_DDR:
+ ram_iomem = sst_drv_ctx->ddr;
+ break;
+ case SST_CUSTOM_INFO:
+ block = (void *)block + sizeof(*block) + block->size;
+ continue;
+ default:
+ dev_err(sst_drv_ctx->dev, "wrong ram type0x%x in block0x%x\n",
+ block->type, count);
+ return -EINVAL;
+ }
+
+ ret_val = sst_fill_memcpy_list(memcpy_list,
+ ram_iomem + block->ram_offset,
+ (void *)block + sizeof(*block), block->size, 1);
+ if (ret_val)
+ return ret_val;
+
+ block = (void *)block + sizeof(*block) + block->size;
+ }
+ return 0;
+}
+
+/**
+ * sst_parse_fw_memcpy - parse the firmware image & populate the list for memcpy
+ *
+ * @ctx : pointer to drv context
+ * @size : size of the firmware
+ * @fw_list : pointer to list_head to be populated
+ * This function parses the FW image and saves the parsed image in the list
+ * for memcpy
+ */
+static int sst_parse_fw_memcpy(struct intel_sst_drv *ctx, unsigned long size,
+ struct list_head *fw_list)
+{
+ struct fw_module_header *module;
+ u32 count, num_modules;
+ int ret_val;
+
+ ret_val = sst_validate_fw_image(ctx, size, &module, &num_modules);
+ if (ret_val)
+ return ret_val;
+
+ for (count = 0; count < num_modules; count++) {
+ ret_val = sst_parse_module_memcpy(ctx, module, fw_list);
+ if (ret_val)
+ return ret_val;
+ module = (void *)module + sizeof(*module) + module->mod_size;
+ }
+
+ return 0;
+}
+
+/**
+ * sst_do_memcpy - function initiates the memcpy
+ *
+ * @memcpy_list: Pter to memcpy list on which the memcpy needs to be initiated
+ *
+ * Triggers the memcpy
+ */
+static void sst_do_memcpy(struct list_head *memcpy_list)
+{
+ struct sst_memcpy_list *listnode;
+
+ list_for_each_entry(listnode, memcpy_list, memcpylist) {
+ if (listnode->is_io == true)
+ memcpy32_toio((void __iomem *)listnode->dstn,
+ listnode->src, listnode->size);
+ else
+ memcpy(listnode->dstn, listnode->src, listnode->size);
+ }
+}
+
+void sst_memcpy_free_resources(struct intel_sst_drv *sst_drv_ctx)
+{
+ struct sst_memcpy_list *listnode, *tmplistnode;
+
+ /* Free the list */
+ if (!list_empty(&sst_drv_ctx->memcpy_list)) {
+ list_for_each_entry_safe(listnode, tmplistnode,
+ &sst_drv_ctx->memcpy_list, memcpylist) {
+ list_del(&listnode->memcpylist);
+ kfree(listnode);
+ }
+ }
+}
+
+static int sst_cache_and_parse_fw(struct intel_sst_drv *sst,
+ const struct firmware *fw)
+{
+ int retval = 0;
+
+ sst->fw_in_mem = kzalloc(fw->size, GFP_KERNEL);
+ if (!sst->fw_in_mem) {
+ retval = -ENOMEM;
+ goto end_release;
+ }
+ dev_dbg(sst->dev, "copied fw to %p", sst->fw_in_mem);
+ dev_dbg(sst->dev, "phys: %lx", (unsigned long)virt_to_phys(sst->fw_in_mem));
+ memcpy(sst->fw_in_mem, fw->data, fw->size);
+ retval = sst_parse_fw_memcpy(sst, fw->size, &sst->memcpy_list);
+ if (retval) {
+ dev_err(sst->dev, "Failed to parse fw\n");
+ kfree(sst->fw_in_mem);
+ sst->fw_in_mem = NULL;
+ }
+
+end_release:
+ release_firmware(fw);
+ return retval;
+
+}
+
+void sst_firmware_load_cb(const struct firmware *fw, void *context)
+{
+ struct intel_sst_drv *ctx = context;
+
+ dev_dbg(ctx->dev, "Enter\n");
+
+ if (fw == NULL) {
+ dev_err(ctx->dev, "request fw failed\n");
+ return;
+ }
+
+ mutex_lock(&ctx->sst_lock);
+
+ if (ctx->sst_state != SST_RESET ||
+ ctx->fw_in_mem != NULL) {
+ if (fw != NULL)
+ release_firmware(fw);
+ mutex_unlock(&ctx->sst_lock);
+ return;
+ }
+
+ dev_dbg(ctx->dev, "Request Fw completed\n");
+ sst_cache_and_parse_fw(ctx, fw);
+ mutex_unlock(&ctx->sst_lock);
+}
+
+/*
+ * sst_request_fw - requests audio fw from kernel and saves a copy
+ *
+ * This function requests the SST FW from the kernel, parses it and
+ * saves a copy in the driver context
+ */
+static int sst_request_fw(struct intel_sst_drv *sst)
+{
+ int retval = 0;
+ const struct firmware *fw;
+
+ retval = request_firmware(&fw, sst->firmware_name, sst->dev);
+ if (fw == NULL) {
+ dev_err(sst->dev, "fw is returning as null\n");
+ return -EINVAL;
+ }
+ if (retval) {
+ dev_err(sst->dev, "request fw failed %d\n", retval);
+ return retval;
+ }
+ mutex_lock(&sst->sst_lock);
+ retval = sst_cache_and_parse_fw(sst, fw);
+ mutex_unlock(&sst->sst_lock);
+
+ return retval;
+}
+
+/*
+ * Writing the DDR physical base to DCCM offset
+ * so that FW can use it to setup TLB
+ */
+static void sst_dccm_config_write(void __iomem *dram_base,
+ unsigned int ddr_base)
+{
+ void __iomem *addr;
+ u32 bss_reset = 0;
+
+ addr = (void __iomem *)(dram_base + MRFLD_FW_DDR_BASE_OFFSET);
+ memcpy32_toio(addr, (void *)&ddr_base, sizeof(u32));
+ bss_reset |= (1 << MRFLD_FW_BSS_RESET_BIT);
+ addr = (void __iomem *)(dram_base + MRFLD_FW_FEATURE_BASE_OFFSET);
+ memcpy32_toio(addr, &bss_reset, sizeof(u32));
+
+}
+
+void sst_post_download_mrfld(struct intel_sst_drv *ctx)
+{
+ sst_dccm_config_write(ctx->dram, ctx->ddr_base);
+ dev_dbg(ctx->dev, "config written to DCCM\n");
+}
+
+/**
+ * sst_load_fw - function to load FW into DSP
+ * Transfers the FW to DSP using dma/memcpy
+ */
+int sst_load_fw(struct intel_sst_drv *sst_drv_ctx)
+{
+ int ret_val = 0;
+ struct sst_block *block;
+
+ dev_dbg(sst_drv_ctx->dev, "sst_load_fw\n");
+
+ if (sst_drv_ctx->sst_state != SST_RESET ||
+ sst_drv_ctx->sst_state == SST_SHUTDOWN)
+ return -EAGAIN;
+
+ if (!sst_drv_ctx->fw_in_mem) {
+ dev_dbg(sst_drv_ctx->dev, "sst: FW not in memory retry to download\n");
+ ret_val = sst_request_fw(sst_drv_ctx);
+ if (ret_val)
+ return ret_val;
+ }
+
+ BUG_ON(!sst_drv_ctx->fw_in_mem);
+ block = sst_create_block(sst_drv_ctx, 0, FW_DWNL_ID);
+ if (block == NULL)
+ return -ENOMEM;
+
+ /* Prevent C-states beyond C6 */
+ pm_qos_update_request(sst_drv_ctx->qos, 0);
+
+ sst_drv_ctx->sst_state = SST_FW_LOADING;
+
+ ret_val = sst_drv_ctx->ops->reset(sst_drv_ctx);
+ if (ret_val)
+ goto restore;
+
+ sst_do_memcpy(&sst_drv_ctx->memcpy_list);
+
+ /* Write the DRAM/DCCM config before enabling FW */
+ if (sst_drv_ctx->ops->post_download)
+ sst_drv_ctx->ops->post_download(sst_drv_ctx);
+
+ /* bring sst out of reset */
+ ret_val = sst_drv_ctx->ops->start(sst_drv_ctx);
+ if (ret_val)
+ goto restore;
+
+ ret_val = sst_wait_timeout(sst_drv_ctx, block);
+ if (ret_val) {
+ dev_err(sst_drv_ctx->dev, "fw download failed %d\n" , ret_val);
+ /* FW download failed due to timeout */
+ ret_val = -EBUSY;
+
+ }
+
+
+restore:
+ /* Re-enable Deeper C-states beyond C6 */
+ pm_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
+ sst_free_block(sst_drv_ctx, block);
+ dev_dbg(sst_drv_ctx->dev, "fw load successful!!!\n");
+
+ if (sst_drv_ctx->ops->restore_dsp_context)
+ sst_drv_ctx->ops->restore_dsp_context();
+ sst_drv_ctx->sst_state = SST_FW_RUNNING;
+ return ret_val;
+}
+
diff --git a/sound/soc/intel/sst/sst_pci.c b/sound/soc/intel/sst/sst_pci.c
new file mode 100644
index 000000000000..3a0b3bf0af97
--- /dev/null
+++ b/sound/soc/intel/sst/sst_pci.c
@@ -0,0 +1,209 @@
+/*
+ * sst_pci.c - SST (LPE) driver init file for pci enumeration.
+ *
+ * Copyright (C) 2008-14 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst-mfld-platform.h"
+#include "sst.h"
+
+static int sst_platform_get_resources(struct intel_sst_drv *ctx)
+{
+ int ddr_base, ret = 0;
+ struct pci_dev *pci = ctx->pci;
+
+ ret = pci_request_regions(pci, SST_DRV_NAME);
+ if (ret)
+ return ret;
+
+ /* map registers */
+ /* DDR base */
+ if (ctx->dev_id == SST_MRFLD_PCI_ID) {
+ ctx->ddr_base = pci_resource_start(pci, 0);
+ /* check that the relocated IMR base matches with FW Binary */
+ ddr_base = relocate_imr_addr_mrfld(ctx->ddr_base);
+ if (!ctx->pdata->lib_info) {
+ dev_err(ctx->dev, "lib_info pointer NULL\n");
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ if (ddr_base != ctx->pdata->lib_info->mod_base) {
+ dev_err(ctx->dev,
+ "FW LSP DDR BASE does not match with IFWI\n");
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ ctx->ddr_end = pci_resource_end(pci, 0);
+
+ ctx->ddr = pcim_iomap(pci, 0,
+ pci_resource_len(pci, 0));
+ if (!ctx->ddr) {
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ dev_dbg(ctx->dev, "sst: DDR Ptr %p\n", ctx->ddr);
+ } else {
+ ctx->ddr = NULL;
+ }
+ /* SHIM */
+ ctx->shim_phy_add = pci_resource_start(pci, 1);
+ ctx->shim = pcim_iomap(pci, 1, pci_resource_len(pci, 1));
+ if (!ctx->shim) {
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ dev_dbg(ctx->dev, "SST Shim Ptr %p\n", ctx->shim);
+
+ /* Shared SRAM */
+ ctx->mailbox_add = pci_resource_start(pci, 2);
+ ctx->mailbox = pcim_iomap(pci, 2, pci_resource_len(pci, 2));
+ if (!ctx->mailbox) {
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ dev_dbg(ctx->dev, "SRAM Ptr %p\n", ctx->mailbox);
+
+ /* IRAM */
+ ctx->iram_end = pci_resource_end(pci, 3);
+ ctx->iram_base = pci_resource_start(pci, 3);
+ ctx->iram = pcim_iomap(pci, 3, pci_resource_len(pci, 3));
+ if (!ctx->iram) {
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ dev_dbg(ctx->dev, "IRAM Ptr %p\n", ctx->iram);
+
+ /* DRAM */
+ ctx->dram_end = pci_resource_end(pci, 4);
+ ctx->dram_base = pci_resource_start(pci, 4);
+ ctx->dram = pcim_iomap(pci, 4, pci_resource_len(pci, 4));
+ if (!ctx->dram) {
+ ret = -EINVAL;
+ goto do_release_regions;
+ }
+ dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram);
+do_release_regions:
+ pci_release_regions(pci);
+ return 0;
+}
+
+/*
+ * intel_sst_probe - PCI probe function
+ *
+ * @pci: PCI device structure
+ * @pci_id: PCI device ID structure
+ *
+ */
+static int intel_sst_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ int ret = 0;
+ struct intel_sst_drv *sst_drv_ctx;
+ struct sst_platform_info *sst_pdata = pci->dev.platform_data;
+
+ dev_dbg(&pci->dev, "Probe for DID %x\n", pci->device);
+ ret = sst_alloc_drv_context(&sst_drv_ctx, &pci->dev, pci->device);
+ if (ret < 0)
+ return ret;
+
+ sst_drv_ctx->pdata = sst_pdata;
+ sst_drv_ctx->irq_num = pci->irq;
+ snprintf(sst_drv_ctx->firmware_name, sizeof(sst_drv_ctx->firmware_name),
+ "%s%04x%s", "fw_sst_",
+ sst_drv_ctx->dev_id, ".bin");
+
+ ret = sst_context_init(sst_drv_ctx);
+ if (ret < 0)
+ return ret;
+
+ /* Init the device */
+ ret = pcim_enable_device(pci);
+ if (ret) {
+ dev_err(sst_drv_ctx->dev,
+ "device can't be enabled. Returned err: %d\n", ret);
+ goto do_free_drv_ctx;
+ }
+ sst_drv_ctx->pci = pci_dev_get(pci);
+ ret = sst_platform_get_resources(sst_drv_ctx);
+ if (ret < 0)
+ goto do_free_drv_ctx;
+
+ pci_set_drvdata(pci, sst_drv_ctx);
+ sst_configure_runtime_pm(sst_drv_ctx);
+
+ return ret;
+
+do_free_drv_ctx:
+ sst_context_cleanup(sst_drv_ctx);
+ dev_err(sst_drv_ctx->dev, "Probe failed with %d\n", ret);
+ return ret;
+}
+
+/**
+ * intel_sst_remove - PCI remove function
+ *
+ * @pci: PCI device structure
+ *
+ * This function is called by OS when a device is unloaded
+ * This frees the interrupt etc
+ */
+static void intel_sst_remove(struct pci_dev *pci)
+{
+ struct intel_sst_drv *sst_drv_ctx = pci_get_drvdata(pci);
+
+ sst_context_cleanup(sst_drv_ctx);
+ pci_dev_put(sst_drv_ctx->pci);
+ pci_release_regions(pci);
+ pci_set_drvdata(pci, NULL);
+}
+
+/* PCI Routines */
+static struct pci_device_id intel_sst_ids[] = {
+ { PCI_VDEVICE(INTEL, SST_MRFLD_PCI_ID), 0},
+ { 0, }
+};
+
+static struct pci_driver sst_driver = {
+ .name = SST_DRV_NAME,
+ .id_table = intel_sst_ids,
+ .probe = intel_sst_probe,
+ .remove = intel_sst_remove,
+#ifdef CONFIG_PM
+ .driver = {
+ .pm = &intel_sst_pm,
+ },
+#endif
+};
+
+module_pci_driver(sst_driver);
+
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine PCI Driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
+MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("sst");
diff --git a/sound/soc/intel/sst/sst_pvt.c b/sound/soc/intel/sst/sst_pvt.c
new file mode 100644
index 000000000000..4b7720864492
--- /dev/null
+++ b/sound/soc/intel/sst/sst_pvt.c
@@ -0,0 +1,449 @@
+/*
+ * sst_pvt.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-14 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/kobject.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <sound/asound.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/compress_driver.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst-mfld-platform.h"
+#include "sst.h"
+#include "../sst-dsp.h"
+
+int sst_shim_write(void __iomem *addr, int offset, int value)
+{
+ writel(value, addr + offset);
+ return 0;
+}
+
+u32 sst_shim_read(void __iomem *addr, int offset)
+{
+ return readl(addr + offset);
+}
+
+u64 sst_reg_read64(void __iomem *addr, int offset)
+{
+ u64 val = 0;
+
+ memcpy_fromio(&val, addr + offset, sizeof(val));
+
+ return val;
+}
+
+int sst_shim_write64(void __iomem *addr, int offset, u64 value)
+{
+ memcpy_toio(addr + offset, &value, sizeof(value));
+ return 0;
+}
+
+u64 sst_shim_read64(void __iomem *addr, int offset)
+{
+ u64 val = 0;
+
+ memcpy_fromio(&val, addr + offset, sizeof(val));
+ return val;
+}
+
+void sst_set_fw_state_locked(
+ struct intel_sst_drv *sst_drv_ctx, int sst_state)
+{
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ sst_drv_ctx->sst_state = sst_state;
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+}
+
+/*
+ * sst_wait_interruptible - wait on event
+ *
+ * @sst_drv_ctx: Driver context
+ * @block: Driver block to wait on
+ *
+ * This function waits without a timeout (and is interruptable) for a
+ * given block event
+ */
+int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
+ struct sst_block *block)
+{
+ int retval = 0;
+
+ if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
+ block->condition)) {
+ /* event wake */
+ if (block->ret_code < 0) {
+ dev_err(sst_drv_ctx->dev,
+ "stream failed %d\n", block->ret_code);
+ retval = -EBUSY;
+ } else {
+ dev_dbg(sst_drv_ctx->dev, "event up\n");
+ retval = 0;
+ }
+ } else {
+ dev_err(sst_drv_ctx->dev, "signal interrupted\n");
+ retval = -EINTR;
+ }
+ return retval;
+
+}
+
+unsigned long long read_shim_data(struct intel_sst_drv *sst, int addr)
+{
+ unsigned long long val = 0;
+
+ switch (sst->dev_id) {
+ case SST_MRFLD_PCI_ID:
+ case SST_BYT_ACPI_ID:
+ val = sst_shim_read64(sst->shim, addr);
+ break;
+ }
+ return val;
+}
+
+void write_shim_data(struct intel_sst_drv *sst, int addr,
+ unsigned long long data)
+{
+ switch (sst->dev_id) {
+ case SST_MRFLD_PCI_ID:
+ case SST_BYT_ACPI_ID:
+ sst_shim_write64(sst->shim, addr, (u64) data);
+ break;
+ }
+}
+
+/*
+ * sst_wait_timeout - wait on event for timeout
+ *
+ * @sst_drv_ctx: Driver context
+ * @block: Driver block to wait on
+ *
+ * This function waits with a timeout value (and is not interruptible) on a
+ * given block event
+ */
+int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block)
+{
+ int retval = 0;
+
+ /*
+ * NOTE:
+ * Observed that FW processes the alloc msg and replies even
+ * before the alloc thread has finished execution
+ */
+ dev_dbg(sst_drv_ctx->dev,
+ "waiting for condition %x ipc %d drv_id %d\n",
+ block->condition, block->msg_id, block->drv_id);
+ if (wait_event_timeout(sst_drv_ctx->wait_queue,
+ block->condition,
+ msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
+ /* event wake */
+ dev_dbg(sst_drv_ctx->dev, "Event wake %x\n",
+ block->condition);
+ dev_dbg(sst_drv_ctx->dev, "message ret: %d\n",
+ block->ret_code);
+ retval = -block->ret_code;
+ } else {
+ block->on = false;
+ dev_err(sst_drv_ctx->dev,
+ "Wait timed-out condition:%#x, msg_id:%#x fw_state %#x\n",
+ block->condition, block->msg_id, sst_drv_ctx->sst_state);
+ sst_drv_ctx->sst_state = SST_RESET;
+
+ retval = -EBUSY;
+ }
+ return retval;
+}
+
+/*
+ * sst_create_ipc_msg - create a IPC message
+ *
+ * @arg: ipc message
+ * @large: large or short message
+ *
+ * this function allocates structures to send a large or short
+ * message to the firmware
+ */
+int sst_create_ipc_msg(struct ipc_post **arg, bool large)
+{
+ struct ipc_post *msg;
+
+ msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+ if (large) {
+ msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
+ if (!msg->mailbox_data) {
+ kfree(msg);
+ return -ENOMEM;
+ }
+ } else {
+ msg->mailbox_data = NULL;
+ }
+ msg->is_large = large;
+ *arg = msg;
+ return 0;
+}
+
+/*
+ * sst_create_block_and_ipc_msg - Creates IPC message and sst block
+ * @arg: passed to sst_create_ipc_message API
+ * @large: large or short message
+ * @sst_drv_ctx: sst driver context
+ * @block: return block allocated
+ * @msg_id: IPC
+ * @drv_id: stream id or private id
+ */
+int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
+ struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
+ u32 msg_id, u32 drv_id)
+{
+ int retval = 0;
+
+ retval = sst_create_ipc_msg(arg, large);
+ if (retval)
+ return retval;
+ *block = sst_create_block(sst_drv_ctx, msg_id, drv_id);
+ if (*block == NULL) {
+ kfree(*arg);
+ return -ENOMEM;
+ }
+ return retval;
+}
+
+/*
+ * sst_clean_stream - clean the stream context
+ *
+ * @stream: stream structure
+ *
+ * this function resets the stream contexts
+ * should be called in free
+ */
+void sst_clean_stream(struct stream_info *stream)
+{
+ stream->status = STREAM_UN_INIT;
+ stream->prev = STREAM_UN_INIT;
+ mutex_lock(&stream->lock);
+ stream->cumm_bytes = 0;
+ mutex_unlock(&stream->lock);
+}
+
+int sst_prepare_and_post_msg(struct intel_sst_drv *sst,
+ int task_id, int ipc_msg, int cmd_id, int pipe_id,
+ size_t mbox_data_len, const void *mbox_data, void **data,
+ bool large, bool fill_dsp, bool sync, bool response)
+{
+ struct ipc_post *msg = NULL;
+ struct ipc_dsp_hdr dsp_hdr;
+ struct sst_block *block;
+ int ret = 0, pvt_id;
+
+ pvt_id = sst_assign_pvt_id(sst);
+ if (pvt_id < 0)
+ return pvt_id;
+
+ if (response)
+ ret = sst_create_block_and_ipc_msg(
+ &msg, large, sst, &block, ipc_msg, pvt_id);
+ else
+ ret = sst_create_ipc_msg(&msg, large);
+
+ if (ret < 0) {
+ test_and_clear_bit(pvt_id, &sst->pvt_id);
+ return -ENOMEM;
+ }
+
+ dev_dbg(sst->dev, "pvt_id = %d, pipe id = %d, task = %d ipc_msg: %d\n",
+ pvt_id, pipe_id, task_id, ipc_msg);
+ sst_fill_header_mrfld(&msg->mrfld_header, ipc_msg,
+ task_id, large, pvt_id);
+ msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr) + mbox_data_len;
+ msg->mrfld_header.p.header_high.part.res_rqd = !sync;
+ dev_dbg(sst->dev, "header:%x\n",
+ msg->mrfld_header.p.header_high.full);
+ dev_dbg(sst->dev, "response rqd: %x",
+ msg->mrfld_header.p.header_high.part.res_rqd);
+ dev_dbg(sst->dev, "msg->mrfld_header.p.header_low_payload:%d",
+ msg->mrfld_header.p.header_low_payload);
+ if (fill_dsp) {
+ sst_fill_header_dsp(&dsp_hdr, cmd_id, pipe_id, mbox_data_len);
+ memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+ if (mbox_data_len) {
+ memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+ mbox_data, mbox_data_len);
+ }
+ }
+
+ if (sync)
+ sst->ops->post_message(sst, msg, true);
+ else
+ sst_add_to_dispatch_list_and_post(sst, msg);
+
+ if (response) {
+ ret = sst_wait_timeout(sst, block);
+ if (ret < 0) {
+ goto out;
+ } else if(block->data) {
+ if (!data)
+ goto out;
+ *data = kzalloc(block->size, GFP_KERNEL);
+ if (!(*data)) {
+ ret = -ENOMEM;
+ goto out;
+ } else
+ memcpy(data, (void *) block->data, block->size);
+ }
+ }
+out:
+ if (response)
+ sst_free_block(sst, block);
+ test_and_clear_bit(pvt_id, &sst->pvt_id);
+ return ret;
+}
+
+int sst_pm_runtime_put(struct intel_sst_drv *sst_drv)
+{
+ int ret;
+
+ pm_runtime_mark_last_busy(sst_drv->dev);
+ ret = pm_runtime_put_autosuspend(sst_drv->dev);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+void sst_fill_header_mrfld(union ipc_header_mrfld *header,
+ int msg, int task_id, int large, int drv_id)
+{
+ header->full = 0;
+ header->p.header_high.part.msg_id = msg;
+ header->p.header_high.part.task_id = task_id;
+ header->p.header_high.part.large = large;
+ header->p.header_high.part.drv_id = drv_id;
+ header->p.header_high.part.done = 0;
+ header->p.header_high.part.busy = 1;
+ header->p.header_high.part.res_rqd = 1;
+}
+
+void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg,
+ int pipe_id, int len)
+{
+ dsp->cmd_id = msg;
+ dsp->mod_index_id = 0xff;
+ dsp->pipe_id = pipe_id;
+ dsp->length = len;
+ dsp->mod_id = 0;
+}
+
+#define SST_MAX_BLOCKS 15
+/*
+ * sst_assign_pvt_id - assign a pvt id for stream
+ *
+ * @sst_drv_ctx : driver context
+ *
+ * this function assigns a private id for calls that dont have stream
+ * context yet, should be called with lock held
+ * uses bits for the id, and finds first free bits and assigns that
+ */
+int sst_assign_pvt_id(struct intel_sst_drv *drv)
+{
+ int local;
+
+ spin_lock(&drv->block_lock);
+ /* find first zero index from lsb */
+ local = ffz(drv->pvt_id);
+ dev_dbg(drv->dev, "pvt_id assigned --> %d\n", local);
+ if (local >= SST_MAX_BLOCKS){
+ spin_unlock(&drv->block_lock);
+ dev_err(drv->dev, "PVT _ID error: no free id blocks ");
+ return -EINVAL;
+ }
+ /* toggle the index */
+ change_bit(local, &drv->pvt_id);
+ spin_unlock(&drv->block_lock);
+ return local;
+}
+
+void sst_init_stream(struct stream_info *stream,
+ int codec, int sst_id, int ops, u8 slot)
+{
+ stream->status = STREAM_INIT;
+ stream->prev = STREAM_UN_INIT;
+ stream->ops = ops;
+}
+
+int sst_validate_strid(
+ struct intel_sst_drv *sst_drv_ctx, int str_id)
+{
+ if (str_id <= 0 || str_id > sst_drv_ctx->info.max_streams) {
+ dev_err(sst_drv_ctx->dev,
+ "SST ERR: invalid stream id : %d, max %d\n",
+ str_id, sst_drv_ctx->info.max_streams);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct stream_info *get_stream_info(
+ struct intel_sst_drv *sst_drv_ctx, int str_id)
+{
+ if (sst_validate_strid(sst_drv_ctx, str_id))
+ return NULL;
+ return &sst_drv_ctx->streams[str_id];
+}
+
+int get_stream_id_mrfld(struct intel_sst_drv *sst_drv_ctx,
+ u32 pipe_id)
+{
+ int i;
+
+ for (i = 1; i <= sst_drv_ctx->info.max_streams; i++)
+ if (pipe_id == sst_drv_ctx->streams[i].pipe_id)
+ return i;
+
+ dev_dbg(sst_drv_ctx->dev, "no such pipe_id(%u)", pipe_id);
+ return -1;
+}
+
+u32 relocate_imr_addr_mrfld(u32 base_addr)
+{
+ /* Get the difference from 512MB aligned base addr */
+ /* relocate the base */
+ base_addr = MRFLD_FW_VIRTUAL_BASE + (base_addr % (512 * 1024 * 1024));
+ return base_addr;
+}
+EXPORT_SYMBOL_GPL(relocate_imr_addr_mrfld);
+
+void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst,
+ struct ipc_post *msg)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+ list_add_tail(&msg->node, &sst->ipc_dispatch_list);
+ spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+ sst->ops->post_message(sst, NULL, false);
+}
diff --git a/sound/soc/intel/sst/sst_stream.c b/sound/soc/intel/sst/sst_stream.c
new file mode 100644
index 000000000000..dae2a41997aa
--- /dev/null
+++ b/sound/soc/intel/sst/sst_stream.c
@@ -0,0 +1,437 @@
+/*
+ * sst_stream.c - Intel SST Driver for audio engine
+ *
+ * Copyright (C) 2008-14 Intel Corp
+ * Authors: Vinod Koul <vinod.koul@intel.com>
+ * Harsha Priya <priya.harsha@intel.com>
+ * Dharageswari R <dharageswari.r@intel.com>
+ * KP Jeeja <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/compress_driver.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst-mfld-platform.h"
+#include "sst.h"
+#include "../sst-dsp.h"
+
+int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params)
+{
+ struct snd_sst_alloc_mrfld alloc_param;
+ struct snd_sst_params *str_params;
+ struct snd_sst_tstamp fw_tstamp;
+ struct stream_info *str_info;
+ struct snd_sst_alloc_response *response;
+ unsigned int str_id, pipe_id, task_id;
+ int i, num_ch, ret = 0;
+ void *data = NULL;
+
+ dev_dbg(sst_drv_ctx->dev, "Enter\n");
+ BUG_ON(!params);
+
+ str_params = (struct snd_sst_params *)params;
+ memset(&alloc_param, 0, sizeof(alloc_param));
+ alloc_param.operation = str_params->ops;
+ alloc_param.codec_type = str_params->codec;
+ alloc_param.sg_count = str_params->aparams.sg_count;
+ alloc_param.ring_buf_info[0].addr =
+ str_params->aparams.ring_buf_info[0].addr;
+ alloc_param.ring_buf_info[0].size =
+ str_params->aparams.ring_buf_info[0].size;
+ alloc_param.frag_size = str_params->aparams.frag_size;
+
+ memcpy(&alloc_param.codec_params, &str_params->sparams,
+ sizeof(struct snd_sst_stream_params));
+
+ /*
+ * fill channel map params for multichannel support.
+ * Ideally channel map should be received from upper layers
+ * for multichannel support.
+ * Currently hardcoding as per FW reqm.
+ */
+ num_ch = sst_get_num_channel(str_params);
+ for (i = 0; i < 8; i++) {
+ if (i < num_ch)
+ alloc_param.codec_params.uc.pcm_params.channel_map[i] = i;
+ else
+ alloc_param.codec_params.uc.pcm_params.channel_map[i] = 0xFF;
+ }
+
+ str_id = str_params->stream_id;
+ str_info = get_stream_info(sst_drv_ctx, str_id);
+ if (str_info == NULL) {
+ dev_err(sst_drv_ctx->dev, "get stream info returned null\n");
+ return -EINVAL;
+ }
+
+ pipe_id = str_params->device_type;
+ task_id = str_params->task;
+ sst_drv_ctx->streams[str_id].pipe_id = pipe_id;
+ sst_drv_ctx->streams[str_id].task_id = task_id;
+ sst_drv_ctx->streams[str_id].num_ch = num_ch;
+
+ if (sst_drv_ctx->info.lpe_viewpt_rqd)
+ alloc_param.ts = sst_drv_ctx->info.mailbox_start +
+ sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp));
+ else
+ alloc_param.ts = sst_drv_ctx->mailbox_add +
+ sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp));
+
+ dev_dbg(sst_drv_ctx->dev, "alloc tstamp location = 0x%x\n",
+ alloc_param.ts);
+ dev_dbg(sst_drv_ctx->dev, "assigned pipe id 0x%x to task %d\n",
+ pipe_id, task_id);
+
+ /* allocate device type context */
+ sst_init_stream(&sst_drv_ctx->streams[str_id], alloc_param.codec_type,
+ str_id, alloc_param.operation, 0);
+
+ dev_info(sst_drv_ctx->dev, "Alloc for str %d pipe %#x\n",
+ str_id, pipe_id);
+ ret = sst_prepare_and_post_msg(sst_drv_ctx, task_id, IPC_CMD,
+ IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param),
+ &alloc_param, data, true, true, false, true);
+
+ if (ret < 0) {
+ dev_err(sst_drv_ctx->dev, "FW alloc failed ret %d\n", ret);
+ /* alloc failed, so reset the state to uninit */
+ str_info->status = STREAM_UN_INIT;
+ str_id = ret;
+ } else if (data) {
+ response = (struct snd_sst_alloc_response *)data;
+ ret = response->str_type.result;
+ if (!ret)
+ goto out;
+ dev_err(sst_drv_ctx->dev, "FW alloc failed ret %d\n", ret);
+ if (ret == SST_ERR_STREAM_IN_USE) {
+ dev_err(sst_drv_ctx->dev,
+ "FW not in clean state, send free for:%d\n", str_id);
+ sst_free_stream(sst_drv_ctx, str_id);
+ }
+ str_id = -ret;
+ }
+out:
+ kfree(data);
+ return str_id;
+}
+
+/**
+* sst_start_stream - Send msg for a starting stream
+* @str_id: stream ID
+*
+* This function is called by any function which wants to start
+* a stream.
+*/
+int sst_start_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
+{
+ int retval = 0;
+ struct stream_info *str_info;
+ u16 data = 0;
+
+ dev_dbg(sst_drv_ctx->dev, "sst_start_stream for %d\n", str_id);
+ str_info = get_stream_info(sst_drv_ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ if (str_info->status != STREAM_RUNNING)
+ return -EBADRQC;
+
+ retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id,
+ IPC_CMD, IPC_IA_START_STREAM_MRFLD, str_info->pipe_id,
+ sizeof(u16), &data, NULL, true, true, true, false);
+
+ return retval;
+}
+
+int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
+ struct snd_sst_bytes_v2 *bytes)
+{ struct ipc_post *msg = NULL;
+ u32 length;
+ int pvt_id, ret = 0;
+ struct sst_block *block = NULL;
+
+ dev_dbg(sst_drv_ctx->dev,
+ "type:%u ipc_msg:%u block:%u task_id:%u pipe: %#x length:%#x\n",
+ bytes->type, bytes->ipc_msg, bytes->block, bytes->task_id,
+ bytes->pipe_id, bytes->len);
+
+ if (sst_create_ipc_msg(&msg, true))
+ return -ENOMEM;
+
+ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+ sst_fill_header_mrfld(&msg->mrfld_header, bytes->ipc_msg,
+ bytes->task_id, 1, pvt_id);
+ msg->mrfld_header.p.header_high.part.res_rqd = bytes->block;
+ length = bytes->len;
+ msg->mrfld_header.p.header_low_payload = length;
+ dev_dbg(sst_drv_ctx->dev, "length is %d\n", length);
+ memcpy(msg->mailbox_data, &bytes->bytes, bytes->len);
+ if (bytes->block) {
+ block = sst_create_block(sst_drv_ctx, bytes->ipc_msg, pvt_id);
+ if (block == NULL) {
+ kfree(msg);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+ dev_dbg(sst_drv_ctx->dev, "msg->mrfld_header.p.header_low_payload:%d",
+ msg->mrfld_header.p.header_low_payload);
+
+ if (bytes->block) {
+ ret = sst_wait_timeout(sst_drv_ctx, block);
+ if (ret) {
+ dev_err(sst_drv_ctx->dev, "fw returned err %d\n", ret);
+ sst_free_block(sst_drv_ctx, block);
+ goto out;
+ }
+ }
+ if (bytes->type == SND_SST_BYTES_GET) {
+ /*
+ * copy the reply and send back
+ * we need to update only sz and payload
+ */
+ if (bytes->block) {
+ unsigned char *r = block->data;
+
+ dev_dbg(sst_drv_ctx->dev, "read back %d bytes",
+ bytes->len);
+ memcpy(bytes->bytes, r, bytes->len);
+ }
+ }
+ if (bytes->block)
+ sst_free_block(sst_drv_ctx, block);
+out:
+ test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
+ return 0;
+}
+
+/*
+ * sst_pause_stream - Send msg for a pausing stream
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to pause
+ * an already running stream.
+ */
+int sst_pause_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
+{
+ int retval = 0;
+ struct stream_info *str_info;
+
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_pause_stream for %d\n", str_id);
+ str_info = get_stream_info(sst_drv_ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ if (str_info->status == STREAM_PAUSED)
+ return 0;
+ if (str_info->status == STREAM_RUNNING ||
+ str_info->status == STREAM_INIT) {
+ if (str_info->prev == STREAM_UN_INIT)
+ return -EBADRQC;
+
+ retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD,
+ IPC_IA_PAUSE_STREAM_MRFLD, str_info->pipe_id,
+ 0, NULL, NULL, true, true, false, true);
+
+ if (retval == 0) {
+ str_info->prev = str_info->status;
+ str_info->status = STREAM_PAUSED;
+ } else if (retval == SST_ERR_INVALID_STREAM_ID) {
+ retval = -EINVAL;
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ sst_clean_stream(str_info);
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ }
+ } else {
+ retval = -EBADRQC;
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:BADRQC for stream\n ");
+ }
+
+ return retval;
+}
+
+/**
+ * sst_resume_stream - Send msg for resuming stream
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to resume
+ * an already paused stream.
+ */
+int sst_resume_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
+{
+ int retval = 0;
+ struct stream_info *str_info;
+
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_resume_stream for %d\n", str_id);
+ str_info = get_stream_info(sst_drv_ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ if (str_info->status == STREAM_RUNNING)
+ return 0;
+ if (str_info->status == STREAM_PAUSED) {
+ retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id,
+ IPC_CMD, IPC_IA_RESUME_STREAM_MRFLD,
+ str_info->pipe_id, 0, NULL, NULL,
+ true, true, false, true);
+
+ if (!retval) {
+ if (str_info->prev == STREAM_RUNNING)
+ str_info->status = STREAM_RUNNING;
+ else
+ str_info->status = STREAM_INIT;
+ str_info->prev = STREAM_PAUSED;
+ } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
+ retval = -EINVAL;
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ sst_clean_stream(str_info);
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ }
+ } else {
+ retval = -EBADRQC;
+ dev_err(sst_drv_ctx->dev, "SST ERR: BADQRC for stream\n");
+ }
+
+ return retval;
+}
+
+
+/**
+ * sst_drop_stream - Send msg for stopping stream
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to stop
+ * a stream.
+ */
+int sst_drop_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
+{
+ int retval = 0;
+ struct stream_info *str_info;
+
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_drop_stream for %d\n", str_id);
+ str_info = get_stream_info(sst_drv_ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+
+ if (str_info->status != STREAM_UN_INIT) {
+ str_info->prev = STREAM_UN_INIT;
+ str_info->status = STREAM_INIT;
+ str_info->cumm_bytes = 0;
+ retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id,
+ IPC_CMD, IPC_IA_DROP_STREAM_MRFLD,
+ str_info->pipe_id, 0, NULL, NULL,
+ true, true, true, false);
+ } else {
+ retval = -EBADRQC;
+ dev_dbg(sst_drv_ctx->dev, "BADQRC for stream, state %x\n",
+ str_info->status);
+ }
+ return retval;
+}
+
+/**
+* sst_drain_stream - Send msg for draining stream
+* @str_id: stream ID
+*
+* This function is called by any function which wants to drain
+* a stream.
+*/
+int sst_drain_stream(struct intel_sst_drv *sst_drv_ctx,
+ int str_id, bool partial_drain)
+{
+ int retval = 0;
+ struct stream_info *str_info;
+
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_drain_stream for %d\n", str_id);
+ str_info = get_stream_info(sst_drv_ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ if (str_info->status != STREAM_RUNNING &&
+ str_info->status != STREAM_INIT &&
+ str_info->status != STREAM_PAUSED) {
+ dev_err(sst_drv_ctx->dev, "SST ERR: BADQRC for stream = %d\n",
+ str_info->status);
+ return -EBADRQC;
+ }
+
+ retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD,
+ IPC_IA_DRAIN_STREAM_MRFLD, str_info->pipe_id,
+ sizeof(u8), &partial_drain, NULL, true, true, false, false);
+ /*
+ * with new non blocked drain implementation in core we dont need to
+ * wait for respsonse, and need to only invoke callback for drain
+ * complete
+ */
+
+ return retval;
+}
+
+/**
+ * sst_free_stream - Frees a stream
+ * @str_id: stream ID
+ *
+ * This function is called by any function which wants to free
+ * a stream.
+ */
+int sst_free_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
+{
+ int retval = 0;
+ struct stream_info *str_info;
+ struct intel_sst_ops *ops;
+
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_free_stream for %d\n", str_id);
+
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ if (sst_drv_ctx->sst_state == SST_RESET) {
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ str_info = get_stream_info(sst_drv_ctx, str_id);
+ if (!str_info)
+ return -EINVAL;
+ ops = sst_drv_ctx->ops;
+
+ mutex_lock(&str_info->lock);
+ if (str_info->status != STREAM_UN_INIT) {
+ str_info->prev = str_info->status;
+ str_info->status = STREAM_UN_INIT;
+ mutex_unlock(&str_info->lock);
+
+ dev_info(sst_drv_ctx->dev, "Free for str %d pipe %#x\n",
+ str_id, str_info->pipe_id);
+ retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD,
+ IPC_IA_FREE_STREAM_MRFLD, str_info->pipe_id, 0,
+ NULL, NULL, true, true, false, true);
+
+ dev_dbg(sst_drv_ctx->dev, "sst: wait for free returned %d\n",
+ retval);
+ mutex_lock(&sst_drv_ctx->sst_lock);
+ sst_clean_stream(str_info);
+ mutex_unlock(&sst_drv_ctx->sst_lock);
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:Stream freed\n");
+ } else {
+ mutex_unlock(&str_info->lock);
+ retval = -EBADRQC;
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:BADQRC for stream\n");
+ }
+
+ return retval;
+}
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 3f9c3a9ae36f..d3d45c6f064f 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -455,7 +455,6 @@ static struct platform_driver jz4740_i2s_driver = {
.probe = jz4740_i2s_dev_probe,
.driver = {
.name = "jz4740-i2s",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/jz4740/qi_lb60.c b/sound/soc/jz4740/qi_lb60.c
index 5cb91f9e8626..53586999fcaa 100644
--- a/sound/soc/jz4740/qi_lb60.c
+++ b/sound/soc/jz4740/qi_lb60.c
@@ -77,25 +77,18 @@ static int qi_lb60_probe(struct platform_device *pdev)
{
struct qi_lb60 *qi_lb60;
struct snd_soc_card *card = &qi_lb60_card;
- int ret;
qi_lb60 = devm_kzalloc(&pdev->dev, sizeof(*qi_lb60), GFP_KERNEL);
if (!qi_lb60)
return -ENOMEM;
- qi_lb60->snd_gpio = devm_gpiod_get(&pdev->dev, "snd");
+ qi_lb60->snd_gpio = devm_gpiod_get(&pdev->dev, "snd", GPIOD_OUT_LOW);
if (IS_ERR(qi_lb60->snd_gpio))
return PTR_ERR(qi_lb60->snd_gpio);
- ret = gpiod_direction_output(qi_lb60->snd_gpio, 0);
- if (ret)
- return ret;
- qi_lb60->amp_gpio = devm_gpiod_get(&pdev->dev, "amp");
+ qi_lb60->amp_gpio = devm_gpiod_get(&pdev->dev, "amp", GPIOD_OUT_LOW);
if (IS_ERR(qi_lb60->amp_gpio))
return PTR_ERR(qi_lb60->amp_gpio);
- ret = gpiod_direction_output(qi_lb60->amp_gpio, 0);
- if (ret)
- return ret;
card->dev = &pdev->dev;
@@ -107,7 +100,6 @@ static int qi_lb60_probe(struct platform_device *pdev)
static struct platform_driver qi_lb60_driver = {
.driver = {
.name = "qi-lb60-audio",
- .owner = THIS_MODULE,
},
.probe = qi_lb60_probe,
};
diff --git a/sound/soc/kirkwood/armada-370-db.c b/sound/soc/kirkwood/armada-370-db.c
index c44333849259..de7563bdc5c2 100644
--- a/sound/soc/kirkwood/armada-370-db.c
+++ b/sound/soc/kirkwood/armada-370-db.c
@@ -134,7 +134,6 @@ static const struct of_device_id a370db_dt_ids[] = {
static struct platform_driver a370db_driver = {
.driver = {
.name = "a370db-audio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(a370db_dt_ids),
},
.probe = a370db_probe,
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 0704cd6d2314..def7d8260c4e 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -657,7 +657,6 @@ static struct platform_driver kirkwood_i2s_driver = {
.remove = kirkwood_i2s_dev_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mvebu_audio_of_match),
},
};
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 231d7e7b0711..d9865082160c 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -773,7 +773,7 @@ static int mxs_saif_probe(struct platform_device *pdev)
saif->dev = &pdev->dev;
ret = devm_request_irq(&pdev->dev, saif->irq, mxs_saif_irq, 0,
- "mxs-saif", saif);
+ dev_name(&pdev->dev), saif);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
@@ -815,7 +815,6 @@ static struct platform_driver mxs_saif_driver = {
.driver = {
.name = "mxs-saif",
- .owner = THIS_MODULE,
.of_match_table = mxs_saif_dt_ids,
},
};
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 61822cc53bd3..6f1916b71815 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -49,13 +49,6 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
break;
}
- /* Sgtl5000 sysclk should be >= 8MHz and <= 27M */
- if (mclk < 8000000 || mclk > 27000000) {
- dev_err(codec_dai->dev, "Invalid mclk frequency: %u.%03uMHz\n",
- mclk / 1000000, mclk / 1000 % 1000);
- return -EINVAL;
- }
-
/* Set SGTL5000's SYSCLK (provided by SAIF MCLK) */
ret = snd_soc_dai_set_sysclk(codec_dai, SGTL5000_SYSCLK, mclk, 0);
if (ret) {
@@ -194,7 +187,6 @@ MODULE_DEVICE_TABLE(of, mxs_sgtl5000_dt_ids);
static struct platform_driver mxs_sgtl5000_audio_driver = {
.driver = {
.name = "mxs-sgtl5000",
- .owner = THIS_MODULE,
.of_match_table = mxs_sgtl5000_dt_ids,
},
.probe = mxs_sgtl5000_probe,
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index f2f67942b229..b6615affe571 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -298,7 +298,7 @@ static const struct snd_soc_dai_ops nuc900_ac97_dai_ops = {
static struct snd_soc_dai_driver nuc900_ac97_dai = {
.probe = nuc900_ac97_probe,
.remove = nuc900_ac97_remove,
- .ac97_control = 1,
+ .bus_control = true,
.playback = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -385,7 +385,6 @@ static int nuc900_ac97_drvremove(struct platform_device *pdev)
static struct platform_driver nuc900_ac97_driver = {
.driver = {
.name = "nuc900-ac97",
- .owner = THIS_MODULE,
},
.probe = nuc900_ac97_drvprobe,
.remove = nuc900_ac97_drvremove,
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index f434ed79d1b6..b779a3d9b5dd 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -347,7 +347,6 @@ static int nuc900_soc_platform_remove(struct platform_device *pdev)
static struct platform_driver nuc900_pcm_driver = {
.driver = {
.name = "nuc900-pcm-audio",
- .owner = THIS_MODULE,
},
.probe = nuc900_soc_platform_probe,
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index d44463a7b0fa..a2cd3486ac55 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -12,8 +12,20 @@ config SND_OMAP_SOC_MCBSP
config SND_OMAP_SOC_MCPDM
tristate
-config SND_OMAP_SOC_HDMI
- tristate
+config SND_OMAP_SOC_HDMI_AUDIO
+ tristate "HDMI audio support for OMAP4+ based SoCs"
+ depends on SND_OMAP_SOC
+ help
+ For HDMI audio to work OMAPDSS HDMI support should be
+ enabled.
+ The hdmi audio driver implements cpu-dai component using the
+ callbacks provided by OMAPDSS and registers the component
+ under DSS HDMI device. Omap-pcm is registered for platform
+ component also under DSS HDMI device. Dummy codec is used as
+ as codec component. The hdmi audio driver implements also
+ the card and registers it under its own platform device.
+ The device for the dirver is registered by OMAPDSS hdmi
+ driver.
config SND_OMAP_SOC_N810
tristate "SoC Audio support for Nokia N810"
@@ -25,15 +37,15 @@ config SND_OMAP_SOC_N810
Say Y if you want to add support for SoC audio on Nokia N810.
config SND_OMAP_SOC_RX51
- tristate "SoC Audio support for Nokia RX-51"
- depends on SND_OMAP_SOC && ARM && (MACH_NOKIA_RX51 || COMPILE_TEST) && I2C
+ tristate "SoC Audio support for Nokia N900 (RX-51)"
+ depends on SND_OMAP_SOC && ARM && I2C
select SND_OMAP_SOC_MCBSP
select SND_SOC_TLV320AIC3X
select SND_SOC_TPA6130A2
depends on GPIOLIB
help
- Say Y if you want to add support for SoC audio on Nokia RX-51
- hardware. This is also known as Nokia N900 product.
+ Say Y if you want to add support for SoC audio on Nokia N900
+ cellphone.
config SND_OMAP_SOC_AMS_DELTA
tristate "SoC Audio support for Amstrad E3 (Delta) videophone"
@@ -100,16 +112,6 @@ config SND_OMAP_SOC_OMAP_ABE_TWL6040
- PandaBoard (4430)
- PandaBoardES (4460)
-config SND_OMAP_SOC_OMAP_HDMI
- tristate "SoC Audio support for Texas Instruments OMAP HDMI"
- depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS
- select SND_OMAP_SOC_HDMI
- select SND_SOC_HDMI_CODEC
- select OMAP4_DSS_HDMI_AUDIO
- help
- Say Y if you want to add support for SoC HDMI audio on Texas Instruments
- OMAP4 chips
-
config SND_OMAP_SOC_OMAP3_PANDORA
tristate "SoC Audio support for OMAP3 Pandora"
depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP3_PANDORA
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index a725905b2c68..db36fbd5d1a0 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -3,13 +3,13 @@ snd-soc-omap-objs := omap-pcm.o
snd-soc-omap-dmic-objs := omap-dmic.o
snd-soc-omap-mcbsp-objs := omap-mcbsp.o mcbsp.o
snd-soc-omap-mcpdm-objs := omap-mcpdm.o
-snd-soc-omap-hdmi-objs := omap-hdmi.o
+snd-soc-omap-hdmi-audio-objs := omap-hdmi-audio.o
obj-$(CONFIG_SND_OMAP_SOC) += snd-soc-omap.o
obj-$(CONFIG_SND_OMAP_SOC_DMIC) += snd-soc-omap-dmic.o
obj-$(CONFIG_SND_OMAP_SOC_MCBSP) += snd-soc-omap-mcbsp.o
obj-$(CONFIG_SND_OMAP_SOC_MCPDM) += snd-soc-omap-mcpdm.o
-obj-$(CONFIG_SND_OMAP_SOC_HDMI) += snd-soc-omap-hdmi.o
+obj-$(CONFIG_SND_OMAP_SOC_HDMI_AUDIO) += snd-soc-omap-hdmi-audio.o
# OMAP Machine Support
snd-soc-n810-objs := n810.o
@@ -20,7 +20,6 @@ snd-soc-am3517evm-objs := am3517evm.o
snd-soc-omap-abe-twl6040-objs := omap-abe-twl6040.o
snd-soc-omap-twl4030-objs := omap-twl4030.o
snd-soc-omap3pandora-objs := omap3pandora.o
-snd-soc-omap-hdmi-card-objs := omap-hdmi-card.o
obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
@@ -30,4 +29,3 @@ obj-$(CONFIG_SND_OMAP_SOC_AM3517EVM) += snd-soc-am3517evm.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040) += snd-soc-omap-abe-twl6040.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP_TWL4030) += snd-soc-omap-twl4030.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
-obj-$(CONFIG_SND_OMAP_SOC_OMAP_HDMI) += snd-soc-omap-hdmi-card.o
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 8c9cc64a9dfb..4c6afb75eea6 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -599,7 +599,6 @@ static int ams_delta_remove(struct platform_device *pdev)
static struct platform_driver ams_delta_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = ams_delta_probe,
.remove = ams_delta_remove,
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 86c75384c3c8..68a125205375 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -621,8 +621,7 @@ void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
mcbsp->reg_cache = NULL;
spin_unlock(&mcbsp->lock);
- if (reg_cache)
- kfree(reg_cache);
+ kfree(reg_cache);
}
/*
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index cec836ed0c01..b9c65f1ad5a8 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -354,7 +354,6 @@ MODULE_DEVICE_TABLE(of, omap_abe_of_match);
static struct platform_driver omap_abe_driver = {
.driver = {
.name = "omap-abe-twl6040",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = omap_abe_of_match,
},
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 0f34e28a3d55..09db2aec12a3 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -509,7 +509,6 @@ MODULE_DEVICE_TABLE(of, omap_dmic_of_match);
static struct platform_driver asoc_dmic_driver = {
.driver = {
.name = "omap-dmic",
- .owner = THIS_MODULE,
.of_match_table = omap_dmic_of_match,
},
.probe = asoc_dmic_probe,
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
new file mode 100644
index 000000000000..3f9ac7dbdc80
--- /dev/null
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -0,0 +1,407 @@
+/*
+ * omap-hdmi-audio.c -- OMAP4+ DSS HDMI audio support library
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <sound/dmaengine_pcm.h>
+#include <uapi/sound/asound.h>
+#include <sound/asoundef.h>
+#include <sound/omap-pcm.h>
+#include <sound/omap-hdmi-audio.h>
+#include <video/omapdss.h>
+
+#define DRV_NAME "omap-hdmi-audio"
+
+struct hdmi_audio_data {
+ struct snd_soc_card *card;
+
+ const struct omap_hdmi_audio_ops *ops;
+ struct device *dssdev;
+ struct snd_dmaengine_dai_dma_data dma_data;
+ struct omap_dss_audio dss_audio;
+ struct snd_aes_iec958 iec;
+ struct snd_cea_861_aud_if cea;
+
+ struct mutex current_stream_lock;
+ struct snd_pcm_substream *current_stream;
+};
+
+static
+struct hdmi_audio_data *card_drvdata_substream(struct snd_pcm_substream *ss)
+{
+ struct snd_soc_pcm_runtime *rtd = ss->private_data;
+
+ return snd_soc_card_get_drvdata(rtd->card);
+}
+
+static void hdmi_dai_abort(struct device *dev)
+{
+ struct hdmi_audio_data *ad = dev_get_drvdata(dev);
+
+ mutex_lock(&ad->current_stream_lock);
+ if (ad->current_stream && ad->current_stream->runtime &&
+ snd_pcm_running(ad->current_stream)) {
+ dev_err(dev, "HDMI display disabled, aborting playback\n");
+ snd_pcm_stream_lock_irq(ad->current_stream);
+ snd_pcm_stop(ad->current_stream, SNDRV_PCM_STATE_DISCONNECTED);
+ snd_pcm_stream_unlock_irq(ad->current_stream);
+ }
+ mutex_unlock(&ad->current_stream_lock);
+}
+
+static int hdmi_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_audio_data *ad = card_drvdata_substream(substream);
+ int ret;
+ /*
+ * Make sure that the period bytes are multiple of the DMA packet size.
+ * Largest packet size we use is 32 32-bit words = 128 bytes
+ */
+ ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
+ if (ret < 0) {
+ dev_err(dai->dev, "could not apply constraint\n");
+ return ret;
+ }
+
+ snd_soc_dai_set_dma_data(dai, substream, &ad->dma_data);
+
+ mutex_lock(&ad->current_stream_lock);
+ ad->current_stream = substream;
+ mutex_unlock(&ad->current_stream_lock);
+
+ ret = ad->ops->audio_startup(ad->dssdev, hdmi_dai_abort);
+
+ if (ret) {
+ mutex_lock(&ad->current_stream_lock);
+ ad->current_stream = NULL;
+ mutex_unlock(&ad->current_stream_lock);
+ }
+
+ return ret;
+}
+
+static int hdmi_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_audio_data *ad = card_drvdata_substream(substream);
+ struct snd_aes_iec958 *iec = &ad->iec;
+ struct snd_cea_861_aud_if *cea = &ad->cea;
+
+ WARN_ON(ad->current_stream != substream);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ ad->dma_data.maxburst = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ad->dma_data.maxburst = 32;
+ break;
+ default:
+ dev_err(dai->dev, "format not supported!\n");
+ return -EINVAL;
+ }
+
+ ad->dss_audio.iec = iec;
+ ad->dss_audio.cea = cea;
+ /*
+ * fill the IEC-60958 channel status word
+ */
+ /* initialize the word bytes */
+ memset(iec->status, 0, sizeof(iec->status));
+
+ /* specify IEC-60958-3 (commercial use) */
+ iec->status[0] &= ~IEC958_AES0_PROFESSIONAL;
+
+ /* specify that the audio is LPCM*/
+ iec->status[0] &= ~IEC958_AES0_NONAUDIO;
+
+ iec->status[0] |= IEC958_AES0_CON_NOT_COPYRIGHT;
+
+ iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
+
+ iec->status[0] |= IEC958_AES1_PRO_MODE_NOTID;
+
+ iec->status[1] = IEC958_AES1_CON_GENERAL;
+
+ iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
+
+ iec->status[2] |= IEC958_AES2_CON_CHANNEL_UNSPEC;
+
+ switch (params_rate(params)) {
+ case 32000:
+ iec->status[3] |= IEC958_AES3_CON_FS_32000;
+ break;
+ case 44100:
+ iec->status[3] |= IEC958_AES3_CON_FS_44100;
+ break;
+ case 48000:
+ iec->status[3] |= IEC958_AES3_CON_FS_48000;
+ break;
+ case 88200:
+ iec->status[3] |= IEC958_AES3_CON_FS_88200;
+ break;
+ case 96000:
+ iec->status[3] |= IEC958_AES3_CON_FS_96000;
+ break;
+ case 176400:
+ iec->status[3] |= IEC958_AES3_CON_FS_176400;
+ break;
+ case 192000:
+ iec->status[3] |= IEC958_AES3_CON_FS_192000;
+ break;
+ default:
+ dev_err(dai->dev, "rate not supported!\n");
+ return -EINVAL;
+ }
+
+ /* specify the clock accuracy */
+ iec->status[3] |= IEC958_AES3_CON_CLOCK_1000PPM;
+
+ /*
+ * specify the word length. The same word length value can mean
+ * two different lengths. Hence, we need to specify the maximum
+ * word length as well.
+ */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ iec->status[4] |= IEC958_AES4_CON_WORDLEN_20_16;
+ iec->status[4] &= ~IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ iec->status[4] |= IEC958_AES4_CON_WORDLEN_24_20;
+ iec->status[4] |= IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ default:
+ dev_err(dai->dev, "format not supported!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Fill the CEA-861 audio infoframe (see spec for details)
+ */
+
+ cea->db1_ct_cc = (params_channels(params) - 1)
+ & CEA861_AUDIO_INFOFRAME_DB1CC;
+ cea->db1_ct_cc |= CEA861_AUDIO_INFOFRAME_DB1CT_FROM_STREAM;
+
+ cea->db2_sf_ss = CEA861_AUDIO_INFOFRAME_DB2SF_FROM_STREAM;
+ cea->db2_sf_ss |= CEA861_AUDIO_INFOFRAME_DB2SS_FROM_STREAM;
+
+ cea->db3 = 0; /* not used, all zeros */
+
+ /*
+ * The OMAP HDMI IP requires to use the 8-channel channel code when
+ * transmitting more than two channels.
+ */
+ if (params_channels(params) == 2)
+ cea->db4_ca = 0x0;
+ else
+ cea->db4_ca = 0x13;
+
+ cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PROHIBITED;
+ /* the expression is trivial but makes clear what we are doing */
+ cea->db5_dminh_lsv |= (0 & CEA861_AUDIO_INFOFRAME_DB5_LSV);
+
+ return ad->ops->audio_config(ad->dssdev, &ad->dss_audio);
+}
+
+static int hdmi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_audio_data *ad = card_drvdata_substream(substream);
+ int err = 0;
+
+ WARN_ON(ad->current_stream != substream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ err = ad->ops->audio_start(ad->dssdev);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ad->ops->audio_stop(ad->dssdev);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static void hdmi_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_audio_data *ad = card_drvdata_substream(substream);
+
+ WARN_ON(ad->current_stream != substream);
+
+ ad->ops->audio_shutdown(ad->dssdev);
+
+ mutex_lock(&ad->current_stream_lock);
+ ad->current_stream = NULL;
+ mutex_unlock(&ad->current_stream_lock);
+}
+
+static const struct snd_soc_dai_ops hdmi_dai_ops = {
+ .startup = hdmi_dai_startup,
+ .hw_params = hdmi_dai_hw_params,
+ .trigger = hdmi_dai_trigger,
+ .shutdown = hdmi_dai_shutdown,
+};
+
+static const struct snd_soc_component_driver omap_hdmi_component = {
+ .name = "omapdss_hdmi",
+};
+
+static struct snd_soc_dai_driver omap5_hdmi_dai = {
+ .name = "omap5-hdmi-dai",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &hdmi_dai_ops,
+};
+
+static struct snd_soc_dai_driver omap4_hdmi_dai = {
+ .name = "omap4-hdmi-dai",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &hdmi_dai_ops,
+};
+
+static int omap_hdmi_audio_probe(struct platform_device *pdev)
+{
+ struct omap_hdmi_audio_pdata *ha = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct hdmi_audio_data *ad;
+ struct snd_soc_dai_driver *dai_drv;
+ struct snd_soc_card *card;
+ int ret;
+
+ if (!ha) {
+ dev_err(dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ ad = devm_kzalloc(dev, sizeof(*ad), GFP_KERNEL);
+ if (!ad)
+ return -ENOMEM;
+ ad->dssdev = ha->dev;
+ ad->ops = ha->ops;
+ ad->dma_data.addr = ha->audio_dma_addr;
+ ad->dma_data.filter_data = "audio_tx";
+ ad->dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ mutex_init(&ad->current_stream_lock);
+
+ switch (ha->dss_version) {
+ case OMAPDSS_VER_OMAP4430_ES1:
+ case OMAPDSS_VER_OMAP4430_ES2:
+ case OMAPDSS_VER_OMAP4:
+ dai_drv = &omap4_hdmi_dai;
+ break;
+ case OMAPDSS_VER_OMAP5:
+ dai_drv = &omap5_hdmi_dai;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = snd_soc_register_component(ad->dssdev, &omap_hdmi_component,
+ dai_drv, 1);
+ if (ret)
+ return ret;
+
+ ret = omap_pcm_platform_register(ad->dssdev);
+ if (ret)
+ return ret;
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ card->name = devm_kasprintf(dev, GFP_KERNEL,
+ "HDMI %s", dev_name(ad->dssdev));
+ card->owner = THIS_MODULE;
+ card->dai_link =
+ devm_kzalloc(dev, sizeof(*(card->dai_link)), GFP_KERNEL);
+ card->dai_link->name = card->name;
+ card->dai_link->stream_name = card->name;
+ card->dai_link->cpu_dai_name = dev_name(ad->dssdev);
+ card->dai_link->platform_name = dev_name(ad->dssdev);
+ card->dai_link->codec_name = "snd-soc-dummy";
+ card->dai_link->codec_dai_name = "snd-soc-dummy-dai";
+ card->num_links = 1;
+ card->dev = dev;
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(dev, "snd_soc_register_card failed (%d)\n", ret);
+ snd_soc_unregister_component(ad->dssdev);
+ return ret;
+ }
+
+ ad->card = card;
+ snd_soc_card_set_drvdata(card, ad);
+
+ dev_set_drvdata(dev, ad);
+
+ return 0;
+}
+
+static int omap_hdmi_audio_remove(struct platform_device *pdev)
+{
+ struct hdmi_audio_data *ad = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(ad->card);
+ snd_soc_unregister_component(ad->dssdev);
+ return 0;
+}
+
+static struct platform_driver hdmi_audio_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = omap_hdmi_audio_probe,
+ .remove = omap_hdmi_audio_remove,
+};
+
+module_platform_driver(hdmi_audio_driver);
+
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("OMAP HDMI Audio Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/omap-hdmi-card.c b/sound/soc/omap/omap-hdmi-card.c
deleted file mode 100644
index f649fe84b629..000000000000
--- a/sound/soc/omap/omap-hdmi-card.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * omap-hdmi-card.c
- *
- * OMAP ALSA SoC machine driver for TI OMAP HDMI
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Ricardo Neri <ricardo.neri@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <asm/mach-types.h>
-#include <video/omapdss.h>
-
-#define DRV_NAME "omap-hdmi-audio"
-
-static struct snd_soc_dai_link omap_hdmi_dai = {
- .name = "HDMI",
- .stream_name = "HDMI",
- .cpu_dai_name = "omap-hdmi-audio-dai",
- .platform_name = "omap-hdmi-audio-dai",
- .codec_name = "hdmi-audio-codec",
- .codec_dai_name = "hdmi-hifi",
-};
-
-static struct snd_soc_card snd_soc_omap_hdmi = {
- .name = "OMAPHDMI",
- .owner = THIS_MODULE,
- .dai_link = &omap_hdmi_dai,
- .num_links = 1,
-};
-
-static int omap_hdmi_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &snd_soc_omap_hdmi;
- int ret;
-
- card->dev = &pdev->dev;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
- card->dev = NULL;
- return ret;
- }
- return 0;
-}
-
-static int omap_hdmi_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
- card->dev = NULL;
- return 0;
-}
-
-static struct platform_driver omap_hdmi_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .probe = omap_hdmi_probe,
- .remove = omap_hdmi_remove,
-};
-
-module_platform_driver(omap_hdmi_driver);
-
-MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
-MODULE_DESCRIPTION("OMAP HDMI machine ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/omap-hdmi.c b/sound/soc/omap/omap-hdmi.c
deleted file mode 100644
index eb9c39299f81..000000000000
--- a/sound/soc/omap/omap-hdmi.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * omap-hdmi.c
- *
- * OMAP ALSA SoC DAI driver for HDMI audio on OMAP4 processors.
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Authors: Jorge Candelaria <jorge.candelaria@ti.com>
- * Ricardo Neri <ricardo.neri@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <sound/soc.h>
-#include <sound/asound.h>
-#include <sound/asoundef.h>
-#include <sound/dmaengine_pcm.h>
-#include <video/omapdss.h>
-#include <sound/omap-pcm.h>
-
-#include "omap-hdmi.h"
-
-#define DRV_NAME "omap-hdmi-audio-dai"
-
-struct hdmi_priv {
- struct snd_dmaengine_dai_dma_data dma_data;
- unsigned int dma_req;
- struct omap_dss_audio dss_audio;
- struct snd_aes_iec958 iec;
- struct snd_cea_861_aud_if cea;
- struct omap_dss_device *dssdev;
-};
-
-static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
- int err;
- /*
- * Make sure that the period bytes are multiple of the DMA packet size.
- * Largest packet size we use is 32 32-bit words = 128 bytes
- */
- err = snd_pcm_hw_constraint_step(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
- if (err < 0) {
- dev_err(dai->dev, "could not apply constraint\n");
- return err;
- }
-
- if (!priv->dssdev->driver->audio_supported(priv->dssdev)) {
- dev_err(dai->dev, "audio not supported\n");
- return -ENODEV;
- }
-
- snd_soc_dai_set_dma_data(dai, substream, &priv->dma_data);
-
- return 0;
-}
-
-static int omap_hdmi_dai_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
-
- return priv->dssdev->driver->audio_enable(priv->dssdev);
-}
-
-static int omap_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
- struct snd_aes_iec958 *iec = &priv->iec;
- struct snd_cea_861_aud_if *cea = &priv->cea;
- int err = 0;
-
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- priv->dma_data.maxburst = 16;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- priv->dma_data.maxburst = 32;
- break;
- default:
- dev_err(dai->dev, "format not supported!\n");
- return -EINVAL;
- }
-
- /*
- * fill the IEC-60958 channel status word
- */
- /* initialize the word bytes */
- memset(iec->status, 0, sizeof(iec->status));
-
- /* specify IEC-60958-3 (commercial use) */
- iec->status[0] &= ~IEC958_AES0_PROFESSIONAL;
-
- /* specify that the audio is LPCM*/
- iec->status[0] &= ~IEC958_AES0_NONAUDIO;
-
- iec->status[0] |= IEC958_AES0_CON_NOT_COPYRIGHT;
-
- iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
-
- iec->status[0] |= IEC958_AES1_PRO_MODE_NOTID;
-
- iec->status[1] = IEC958_AES1_CON_GENERAL;
-
- iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
-
- iec->status[2] |= IEC958_AES2_CON_CHANNEL_UNSPEC;
-
- switch (params_rate(params)) {
- case 32000:
- iec->status[3] |= IEC958_AES3_CON_FS_32000;
- break;
- case 44100:
- iec->status[3] |= IEC958_AES3_CON_FS_44100;
- break;
- case 48000:
- iec->status[3] |= IEC958_AES3_CON_FS_48000;
- break;
- case 88200:
- iec->status[3] |= IEC958_AES3_CON_FS_88200;
- break;
- case 96000:
- iec->status[3] |= IEC958_AES3_CON_FS_96000;
- break;
- case 176400:
- iec->status[3] |= IEC958_AES3_CON_FS_176400;
- break;
- case 192000:
- iec->status[3] |= IEC958_AES3_CON_FS_192000;
- break;
- default:
- dev_err(dai->dev, "rate not supported!\n");
- return -EINVAL;
- }
-
- /* specify the clock accuracy */
- iec->status[3] |= IEC958_AES3_CON_CLOCK_1000PPM;
-
- /*
- * specify the word length. The same word length value can mean
- * two different lengths. Hence, we need to specify the maximum
- * word length as well.
- */
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- iec->status[4] |= IEC958_AES4_CON_WORDLEN_20_16;
- iec->status[4] &= ~IEC958_AES4_CON_MAX_WORDLEN_24;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- iec->status[4] |= IEC958_AES4_CON_WORDLEN_24_20;
- iec->status[4] |= IEC958_AES4_CON_MAX_WORDLEN_24;
- break;
- default:
- dev_err(dai->dev, "format not supported!\n");
- return -EINVAL;
- }
-
- /*
- * Fill the CEA-861 audio infoframe (see spec for details)
- */
-
- cea->db1_ct_cc = (params_channels(params) - 1)
- & CEA861_AUDIO_INFOFRAME_DB1CC;
- cea->db1_ct_cc |= CEA861_AUDIO_INFOFRAME_DB1CT_FROM_STREAM;
-
- cea->db2_sf_ss = CEA861_AUDIO_INFOFRAME_DB2SF_FROM_STREAM;
- cea->db2_sf_ss |= CEA861_AUDIO_INFOFRAME_DB2SS_FROM_STREAM;
-
- cea->db3 = 0; /* not used, all zeros */
-
- /*
- * The OMAP HDMI IP requires to use the 8-channel channel code when
- * transmitting more than two channels.
- */
- if (params_channels(params) == 2)
- cea->db4_ca = 0x0;
- else
- cea->db4_ca = 0x13;
-
- cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PROHIBITED;
- /* the expression is trivial but makes clear what we are doing */
- cea->db5_dminh_lsv |= (0 & CEA861_AUDIO_INFOFRAME_DB5_LSV);
-
- priv->dss_audio.iec = iec;
- priv->dss_audio.cea = cea;
-
- err = priv->dssdev->driver->audio_config(priv->dssdev,
- &priv->dss_audio);
-
- return err;
-}
-
-static int omap_hdmi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
- int err = 0;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- err = priv->dssdev->driver->audio_start(priv->dssdev);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- priv->dssdev->driver->audio_stop(priv->dssdev);
- break;
- default:
- err = -EINVAL;
- }
- return err;
-}
-
-static void omap_hdmi_dai_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
-
- priv->dssdev->driver->audio_disable(priv->dssdev);
-}
-
-static const struct snd_soc_dai_ops omap_hdmi_dai_ops = {
- .startup = omap_hdmi_dai_startup,
- .hw_params = omap_hdmi_dai_hw_params,
- .prepare = omap_hdmi_dai_prepare,
- .trigger = omap_hdmi_dai_trigger,
- .shutdown = omap_hdmi_dai_shutdown,
-};
-
-static struct snd_soc_dai_driver omap_hdmi_dai = {
- .playback = {
- .channels_min = 2,
- .channels_max = 8,
- .rates = OMAP_HDMI_RATES,
- .formats = OMAP_HDMI_FORMATS,
- },
- .ops = &omap_hdmi_dai_ops,
-};
-
-static const struct snd_soc_component_driver omap_hdmi_component = {
- .name = DRV_NAME,
-};
-
-static int omap_hdmi_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *hdmi_rsrc;
- struct hdmi_priv *hdmi_data;
- bool hdmi_dev_found = false;
-
- hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
- if (hdmi_data == NULL) {
- dev_err(&pdev->dev, "Cannot allocate memory for HDMI data\n");
- return -ENOMEM;
- }
-
- hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!hdmi_rsrc) {
- dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n");
- return -ENODEV;
- }
-
- hdmi_data->dma_data.addr = hdmi_rsrc->start + OMAP_HDMI_AUDIO_DMA_PORT;
-
- hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!hdmi_rsrc) {
- dev_err(&pdev->dev, "Cannot obtain IORESOURCE_DMA HDMI\n");
- return -ENODEV;
- }
-
- hdmi_data->dma_req = hdmi_rsrc->start;
- hdmi_data->dma_data.filter_data = &hdmi_data->dma_req;
- hdmi_data->dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-
- /*
- * TODO: We assume that there is only one DSS HDMI device. Future
- * OMAP implementations may support more than one HDMI devices and
- * we should provided separate audio support for all of them.
- */
- /* Find an HDMI device. */
- for_each_dss_dev(hdmi_data->dssdev) {
- omap_dss_get_device(hdmi_data->dssdev);
-
- if (!hdmi_data->dssdev->driver) {
- omap_dss_put_device(hdmi_data->dssdev);
- continue;
- }
-
- if (hdmi_data->dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
- hdmi_dev_found = true;
- break;
- }
- }
-
- if (!hdmi_dev_found) {
- dev_err(&pdev->dev, "no driver for HDMI display found\n");
- return -ENODEV;
- }
-
- dev_set_drvdata(&pdev->dev, hdmi_data);
- ret = snd_soc_register_component(&pdev->dev, &omap_hdmi_component,
- &omap_hdmi_dai, 1);
-
- if (ret)
- return ret;
-
- return omap_pcm_platform_register(&pdev->dev);
-}
-
-static int omap_hdmi_remove(struct platform_device *pdev)
-{
- struct hdmi_priv *hdmi_data = dev_get_drvdata(&pdev->dev);
-
- snd_soc_unregister_component(&pdev->dev);
-
- if (hdmi_data == NULL) {
- dev_err(&pdev->dev, "cannot obtain HDMi data\n");
- return -ENODEV;
- }
-
- omap_dss_put_device(hdmi_data->dssdev);
- return 0;
-}
-
-static struct platform_driver hdmi_dai_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .probe = omap_hdmi_probe,
- .remove = omap_hdmi_remove,
-};
-
-module_platform_driver(hdmi_dai_driver);
-
-MODULE_AUTHOR("Jorge Candelaria <jorge.candelaria@ti.com>");
-MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
-MODULE_DESCRIPTION("OMAP HDMI SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/omap-hdmi.h b/sound/soc/omap/omap-hdmi.h
deleted file mode 100644
index 6ad2bf4f2697..000000000000
--- a/sound/soc/omap/omap-hdmi.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * omap-hdmi.h
- *
- * Definitions for OMAP ALSA SoC DAI driver for HDMI audio on OMAP4 processors.
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Authors: Jorge Candelaria <jorge.candelaria@ti.com>
- * Ricardo Neri <ricardo.neri@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __OMAP_HDMI_H__
-#define __OMAP_HDMI_H__
-
-#define OMAP_HDMI_AUDIO_DMA_PORT 0x8c
-
-#define OMAP_HDMI_RATES (SNDRV_PCM_RATE_32000 | \
- SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
- SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
- SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
-
-#define OMAP_HDMI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FMTBIT_S24_LE)
-
-#endif
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index bd3ef2a88be0..8b79cafab1e2 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -831,7 +831,6 @@ static int asoc_mcbsp_remove(struct platform_device *pdev)
static struct platform_driver asoc_mcbsp_driver = {
.driver = {
.name = "omap-mcbsp",
- .owner = THIS_MODULE,
.of_match_table = omap_mcbsp_of_match,
},
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index f0e2ebeab02b..b837265ac3e9 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -512,7 +512,6 @@ MODULE_DEVICE_TABLE(of, omap_mcpdm_of_match);
static struct platform_driver asoc_mcpdm_driver = {
.driver = {
.name = "omap-mcpdm",
- .owner = THIS_MODULE,
.of_match_table = omap_mcpdm_of_match,
},
diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/omap/omap-twl4030.c
index 4336d1831485..5e551c762b7a 100644
--- a/sound/soc/omap/omap-twl4030.c
+++ b/sound/soc/omap/omap-twl4030.c
@@ -375,7 +375,6 @@ MODULE_DEVICE_TABLE(of, omap_twl4030_of_match);
static struct platform_driver omap_twl4030_driver = {
.driver = {
.name = "omap-twl4030",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = omap_twl4030_of_match,
},
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index b10ae8074461..04896d6252a2 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -519,7 +519,6 @@ MODULE_DEVICE_TABLE(of, rx51_audio_of_match);
static struct platform_driver rx51_soc_driver = {
.driver = {
.name = "rx51-audio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rx51_audio_of_match),
},
.probe = rx51_soc_probe,
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index c8dd53f9c35d..79936e3e80e7 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -155,7 +155,6 @@ static int brownstone_remove(struct platform_device *pdev)
static struct platform_driver mmp_driver = {
.driver = {
.name = "brownstone-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = brownstone_probe,
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index 5a88136aa800..b7cd0a71fd70 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -327,7 +327,6 @@ static int corgi_remove(struct platform_device *pdev)
static struct platform_driver corgi_driver = {
.driver = {
.name = "corgi-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = corgi_probe,
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index c29fedab2f49..7c691aae8af2 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -177,7 +177,6 @@ static int e740_remove(struct platform_device *pdev)
static struct platform_driver e740_driver = {
.driver = {
.name = "e740-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = e740_probe,
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index ee36aba88063..30544b65b5a8 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -159,7 +159,6 @@ static int e750_remove(struct platform_device *pdev)
static struct platform_driver e750_driver = {
.driver = {
.name = "e750-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = e750_probe,
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index 24c2078ce70b..45d4bd46fff6 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -140,7 +140,6 @@ static int e800_remove(struct platform_device *pdev)
static struct platform_driver e800_driver = {
.driver = {
.name = "e800-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = e800_probe,
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 05559a725bec..ce26551052a3 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -226,7 +226,6 @@ static int hx4700_audio_remove(struct platform_device *pdev)
static struct platform_driver hx4700_audio_driver = {
.driver = {
.name = "hx4700-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = hx4700_audio_probe,
diff --git a/sound/soc/pxa/imote2.c b/sound/soc/pxa/imote2.c
index fd2f4eda1fd3..29fabbfd21f1 100644
--- a/sound/soc/pxa/imote2.c
+++ b/sound/soc/pxa/imote2.c
@@ -90,7 +90,6 @@ static int imote2_remove(struct platform_device *pdev)
static struct platform_driver imote2_driver = {
.driver = {
.name = "imote2-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = imote2_probe,
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 595eee341e90..396dbd51a64f 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -127,15 +127,12 @@ static const struct snd_soc_dapm_route audio_map[] = {
static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
- unsigned short reg;
/* Prepare GPIO8 for rear speaker amplifier */
- reg = codec->driver->read(codec, AC97_GPIO_CFG);
- codec->driver->write(codec, AC97_GPIO_CFG, reg | 0x0100);
+ snd_soc_update_bits(codec, AC97_GPIO_CFG, 0x100, 0x100);
/* Prepare MIC input */
- reg = codec->driver->read(codec, AC97_3D_CONTROL);
- codec->driver->write(codec, AC97_3D_CONTROL, reg | 0xc000);
+ snd_soc_update_bits(codec, AC97_3D_CONTROL, 0xc000, 0xc000);
return 0;
}
@@ -205,7 +202,6 @@ static struct platform_driver mioa701_wm9713_driver = {
.remove = mioa701_wm9713_remove,
.driver = {
.name = "mioa701-wm9713",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
};
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 64e8b949a1a3..1eb45dcfb8e8 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -244,7 +244,6 @@ static int mmp_pcm_remove(struct platform_device *pdev)
static struct platform_driver mmp_pcm_driver = {
.driver = {
.name = "mmp-pcm-audio",
- .owner = THIS_MODULE,
},
.probe = mmp_pcm_probe,
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index 5bf5f1f7cac5..eca60c29791a 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -472,7 +472,6 @@ static int asoc_mmp_sspa_remove(struct platform_device *pdev)
static struct platform_driver asoc_mmp_sspa_driver = {
.driver = {
.name = "mmp-sspa-dai",
- .owner = THIS_MODULE,
},
.probe = asoc_mmp_sspa_probe,
.remove = asoc_mmp_sspa_remove,
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 17f9521ff6ea..1eebca2f0a97 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -172,7 +172,6 @@ static struct platform_driver palm27x_wm9712_driver = {
.remove = palm27x_asoc_remove,
.driver = {
.name = "palm27x-asoc",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
};
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 21f340065318..0fce8c420e96 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -296,7 +296,6 @@ static int poodle_remove(struct platform_device *pdev)
static struct platform_driver poodle_driver = {
.driver = {
.name = "poodle-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = poodle_probe,
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index a8e097433074..fbe2e93d6edc 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -97,7 +97,7 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
int ret = 0;
if (!cpu_dai->active) {
- clk_enable(ssp->clk);
+ clk_prepare_enable(ssp->clk);
pxa_ssp_disable(ssp);
}
@@ -121,7 +121,7 @@ static void pxa_ssp_shutdown(struct snd_pcm_substream *substream,
if (!cpu_dai->active) {
pxa_ssp_disable(ssp);
- clk_disable(ssp->clk);
+ clk_disable_unprepare(ssp->clk);
}
kfree(snd_soc_dai_get_dma_data(cpu_dai, substream));
@@ -136,7 +136,7 @@ static int pxa_ssp_suspend(struct snd_soc_dai *cpu_dai)
struct ssp_device *ssp = priv->ssp;
if (!cpu_dai->active)
- clk_enable(ssp->clk);
+ clk_prepare_enable(ssp->clk);
priv->cr0 = __raw_readl(ssp->mmio_base + SSCR0);
priv->cr1 = __raw_readl(ssp->mmio_base + SSCR1);
@@ -144,7 +144,7 @@ static int pxa_ssp_suspend(struct snd_soc_dai *cpu_dai)
priv->psp = __raw_readl(ssp->mmio_base + SSPSP);
pxa_ssp_disable(ssp);
- clk_disable(ssp->clk);
+ clk_disable_unprepare(ssp->clk);
return 0;
}
@@ -154,7 +154,7 @@ static int pxa_ssp_resume(struct snd_soc_dai *cpu_dai)
struct ssp_device *ssp = priv->ssp;
uint32_t sssr = SSSR_ROR | SSSR_TUR | SSSR_BCE;
- clk_enable(ssp->clk);
+ clk_prepare_enable(ssp->clk);
__raw_writel(sssr, ssp->mmio_base + SSSR);
__raw_writel(priv->cr0 & ~SSCR0_SSE, ssp->mmio_base + SSCR0);
@@ -165,7 +165,7 @@ static int pxa_ssp_resume(struct snd_soc_dai *cpu_dai)
if (cpu_dai->active)
pxa_ssp_enable(ssp);
else
- clk_disable(ssp->clk);
+ clk_disable_unprepare(ssp->clk);
return 0;
}
@@ -256,11 +256,11 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
/* The SSP clock must be disabled when changing SSP clock mode
* on PXA2xx. On PXA3xx it must be enabled when doing so. */
if (ssp->type != PXA3xx_SSP)
- clk_disable(ssp->clk);
+ clk_disable_unprepare(ssp->clk);
val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0;
pxa_ssp_write_reg(ssp, SSCR0, val);
if (ssp->type != PXA3xx_SSP)
- clk_enable(ssp->clk);
+ clk_prepare_enable(ssp->clk);
return 0;
}
@@ -826,7 +826,6 @@ static int asoc_ssp_remove(struct platform_device *pdev)
static struct platform_driver asoc_ssp_driver = {
.driver = {
.name = "pxa-ssp-dai",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pxa_ssp_of_ids),
},
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index ae956e3f4b9d..1f6054650991 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -157,7 +157,7 @@ static const struct snd_soc_dai_ops pxa_ac97_mic_dai_ops = {
static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
{
.name = "pxa2xx-ac97",
- .ac97_control = 1,
+ .bus_control = true,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
@@ -174,7 +174,7 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
},
{
.name = "pxa2xx-ac97-aux",
- .ac97_control = 1,
+ .bus_control = true,
.playback = {
.stream_name = "AC97 Aux Playback",
.channels_min = 1,
@@ -191,7 +191,7 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
},
{
.name = "pxa2xx-ac97-mic",
- .ac97_control = 1,
+ .bus_control = true,
.capture = {
.stream_name = "AC97 Mic Capture",
.channels_min = 1,
@@ -261,7 +261,6 @@ static struct platform_driver pxa2xx_ac97_driver = {
.remove = pxa2xx_ac97_dev_remove,
.driver = {
.name = "pxa2xx-ac97",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM_SLEEP
.pm = &pxa2xx_ac97_pm_ops,
#endif
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index c0d648d3339f..e68290c15328 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -383,7 +383,6 @@ static struct platform_driver pxa2xx_i2s_driver = {
.driver = {
.name = "pxa2xx-i2s",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 42f2f0175981..a51c9da66614 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -143,7 +143,6 @@ static const struct of_device_id snd_soc_pxa_audio_match[] = {
static struct platform_driver pxa_pcm_driver = {
.driver = {
.name = "pxa-pcm-audio",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(snd_soc_pxa_audio_match),
},
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index 1373b017a951..d7d5fb20ea6f 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -305,19 +305,15 @@ static struct snd_soc_card snd_soc_spitz = {
.num_dapm_routes = ARRAY_SIZE(spitz_audio_map),
};
-static struct platform_device *spitz_snd_device;
-
-static int __init spitz_init(void)
+static int spitz_probe(struct platform_device *pdev)
{
+ struct snd_soc_card *card = &snd_soc_spitz;
int ret;
- if (!(machine_is_spitz() || machine_is_borzoi() || machine_is_akita()))
- return -ENODEV;
-
- if (machine_is_borzoi() || machine_is_spitz())
- spitz_mic_gpio = SPITZ_GPIO_MIC_BIAS;
- else
+ if (machine_is_akita())
spitz_mic_gpio = AKITA_GPIO_MIC_BIAS;
+ else
+ spitz_mic_gpio = SPITZ_GPIO_MIC_BIAS;
ret = gpio_request(spitz_mic_gpio, "MIC GPIO");
if (ret)
@@ -327,37 +323,45 @@ static int __init spitz_init(void)
if (ret)
goto err2;
- spitz_snd_device = platform_device_alloc("soc-audio", -1);
- if (!spitz_snd_device) {
- ret = -ENOMEM;
+ card->dev = &pdev->dev;
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
+ ret);
goto err2;
}
- platform_set_drvdata(spitz_snd_device, &snd_soc_spitz);
-
- ret = platform_device_add(spitz_snd_device);
- if (ret)
- goto err3;
-
return 0;
-err3:
- platform_device_put(spitz_snd_device);
err2:
gpio_free(spitz_mic_gpio);
err1:
return ret;
}
-static void __exit spitz_exit(void)
+static int spitz_remove(struct platform_device *pdev)
{
- platform_device_unregister(spitz_snd_device);
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
gpio_free(spitz_mic_gpio);
+ return 0;
}
-module_init(spitz_init);
-module_exit(spitz_exit);
+static struct platform_driver spitz_driver = {
+ .driver = {
+ .name = "spitz-audio",
+ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = spitz_probe,
+ .remove = spitz_remove,
+};
+
+module_platform_driver(spitz_driver);
MODULE_AUTHOR("Richard Purdie");
MODULE_DESCRIPTION("ALSA SoC Spitz");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spitz-audio");
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index 4a956d1cb269..cb49284e853a 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -265,7 +265,6 @@ static int tosa_remove(struct platform_device *pdev)
static struct platform_driver tosa_driver = {
.driver = {
.name = "tosa-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = tosa_probe,
diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c
index 9d7c5b7e9539..e3d7257ad09c 100644
--- a/sound/soc/pxa/ttc-dkb.c
+++ b/sound/soc/pxa/ttc-dkb.c
@@ -155,7 +155,6 @@ static int ttc_dkb_remove(struct platform_device *pdev)
static struct platform_driver ttc_dkb_driver = {
.driver = {
.name = "ttc-dkb-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = ttc_dkb_probe,
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index 78fc159559b0..e18182699d83 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -1,11 +1,16 @@
config SND_SOC_ROCKCHIP
tristate "ASoC support for Rockchip"
depends on COMPILE_TEST || ARCH_ROCKCHIP
- select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M if you want to add support for codecs attached to
the Rockchip SoCs' Audio interfaces. You will also need to
select the audio interfaces to support below.
config SND_SOC_ROCKCHIP_I2S
- tristate
+ tristate "Rockchip I2S Device Driver"
+ depends on CLKDEV_LOOKUP && SND_SOC_ROCKCHIP
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Say Y or M if you want to add support for I2S driver for
+ Rockchip I2S device. The device supports upto maximum of
+ 8 channels each for play and record.
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index c74ba37f862c..26ec5117b35c 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -528,7 +528,6 @@ static struct platform_driver rockchip_i2s_driver = {
.remove = rockchip_i2s_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rockchip_i2s_match),
.pm = &rockchip_i2s_pm_ops,
},
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 55a38697443d..fc67f97f19f6 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -1,6 +1,6 @@
config SND_SOC_SAMSUNG
tristate "ASoC support for Samsung"
- depends on PLAT_SAMSUNG
+ depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
depends on S3C64XX_PL080 || !ARCH_S3C64XX
depends on S3C24XX_DMAC || !ARCH_S3C24XX
select SND_SOC_GENERIC_DMAENGINE_PCM
@@ -239,3 +239,9 @@ config SND_SOC_ODROIDX2
select SND_SAMSUNG_I2S
help
Say Y here to enable audio support for the Odroid-X2/U3.
+
+config SND_SOC_ARNDALE_RT5631_ALC5631
+ tristate "Audio support for RT5631(ALC5631) on Arndale Board"
+ depends on SND_SOC_SAMSUNG
+ select SND_SAMSUNG_I2S
+ select SND_SOC_RT5631
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index 91505ddaaf95..31e3dba7e3b5 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -45,6 +45,7 @@ snd-soc-lowland-objs := lowland.o
snd-soc-littlemill-objs := littlemill.o
snd-soc-bells-objs := bells.o
snd-soc-odroidx2-max98090-objs := odroidx2_max98090.o
+snd-soc-arndale-rt5631-objs := arndale_rt5631.o
obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
@@ -71,3 +72,4 @@ obj-$(CONFIG_SND_SOC_LOWLAND) += snd-soc-lowland.o
obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o
obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o
obj-$(CONFIG_SND_SOC_ODROIDX2) += snd-soc-odroidx2-max98090.o
+obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index e1615113fd84..e4145509d63c 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -288,7 +288,7 @@ static int s3c_ac97_mic_dai_probe(struct snd_soc_dai *dai)
static struct snd_soc_dai_driver s3c_ac97_dai[] = {
[S3C_AC97_DAI_PCM] = {
.name = "samsung-ac97",
- .ac97_control = 1,
+ .bus_control = true,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
@@ -306,7 +306,7 @@ static struct snd_soc_dai_driver s3c_ac97_dai[] = {
},
[S3C_AC97_DAI_MIC] = {
.name = "samsung-ac97-mic",
- .ac97_control = 1,
+ .bus_control = true,
.capture = {
.stream_name = "AC97 Mic Capture",
.channels_min = 1,
@@ -442,7 +442,6 @@ static struct platform_driver s3c_ac97_driver = {
.remove = s3c_ac97_remove,
.driver = {
.name = "samsung-ac97",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
new file mode 100644
index 000000000000..1e2b61ca8db2
--- /dev/null
+++ b/sound/soc/samsung/arndale_rt5631.c
@@ -0,0 +1,150 @@
+/*
+ * arndale_rt5631.c
+ *
+ * Copyright (c) 2014, Insignal Co., Ltd.
+ *
+ * Author: Claude <claude@insginal.co.kr>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "i2s.h"
+
+static int arndale_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int rfs, ret;
+ unsigned long rclk;
+
+ rfs = 256;
+
+ rclk = params_rate(params) * rfs;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
+ 0, SND_SOC_CLOCK_OUT);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
+ 0, SND_SOC_CLOCK_OUT);
+
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, rclk, SND_SOC_CLOCK_OUT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct snd_soc_ops arndale_ops = {
+ .hw_params = arndale_hw_params,
+};
+
+static struct snd_soc_dai_link arndale_rt5631_dai[] = {
+ {
+ .name = "RT5631 HiFi",
+ .stream_name = "Primary",
+ .codec_dai_name = "rt5631-hifi",
+ .dai_fmt = SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &arndale_ops,
+ },
+};
+
+static struct snd_soc_card arndale_rt5631 = {
+ .name = "Arndale RT5631",
+ .dai_link = arndale_rt5631_dai,
+ .num_links = ARRAY_SIZE(arndale_rt5631_dai),
+};
+
+static int arndale_audio_probe(struct platform_device *pdev)
+{
+ int n, ret;
+ struct device_node *np = pdev->dev.of_node;
+ struct snd_soc_card *card = &arndale_rt5631;
+
+ card->dev = &pdev->dev;
+
+ for (n = 0; np && n < ARRAY_SIZE(arndale_rt5631_dai); n++) {
+ if (!arndale_rt5631_dai[n].cpu_dai_name) {
+ arndale_rt5631_dai[n].cpu_of_node = of_parse_phandle(np,
+ "samsung,audio-cpu", n);
+
+ if (!arndale_rt5631_dai[n].cpu_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'samsung,audio-cpu' missing or invalid\n");
+ return -EINVAL;
+ }
+ }
+ if (!arndale_rt5631_dai[n].platform_name)
+ arndale_rt5631_dai[n].platform_of_node =
+ arndale_rt5631_dai[n].cpu_of_node;
+
+ arndale_rt5631_dai[n].codec_name = NULL;
+ arndale_rt5631_dai[n].codec_of_node = of_parse_phandle(np,
+ "samsung,audio-codec", n);
+ if (!arndale_rt5631_dai[0].codec_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'samsung,audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = devm_snd_soc_register_card(card->dev, card);
+
+ if (ret)
+ dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
+
+ return ret;
+}
+
+static int arndale_audio_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
+
+ return 0;
+}
+
+static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
+ { .compatible = "samsung,arndale-rt5631", },
+ { .compatible = "samsung,arndale-alc5631", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, samsung_arndale_rt5631_of_match);
+
+static struct platform_driver arndale_audio_driver = {
+ .driver = {
+ .name = "arndale-audio",
+ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
+ },
+ .probe = arndale_audio_probe,
+ .remove = arndale_audio_remove,
+};
+
+module_platform_driver(arndale_audio_driver);
+
+MODULE_AUTHOR("Claude <claude@insignal.co.kr>");
+MODULE_DESCRIPTION("ALSA SoC Driver for Arndale Board");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c
index 5b21207cf551..e5f05e62fa3c 100644
--- a/sound/soc/samsung/bells.c
+++ b/sound/soc/samsung/bells.c
@@ -445,7 +445,6 @@ static int bells_probe(struct platform_device *pdev)
static struct platform_driver bells_driver = {
.driver = {
.name = "bells",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bells_probe,
diff --git a/sound/soc/samsung/i2s-regs.h b/sound/soc/samsung/i2s-regs.h
index 821a50231002..9170c311d66e 100644
--- a/sound/soc/samsung/i2s-regs.h
+++ b/sound/soc/samsung/i2s-regs.h
@@ -33,8 +33,9 @@
#define I2SLVL3ADDR 0x3c
#define I2SSTR1 0x40
#define I2SVER 0x44
-#define I2SFIC2 0x48
+#define I2SFIC1 0x48
#define I2STDM 0x4c
+#define I2SFSTA 0x50
#define CON_RSTCLR (1 << 31)
#define CON_FRXOFSTATUS (1 << 26)
@@ -93,8 +94,6 @@
#define MOD_BLC_24BIT (2 << 13)
#define MOD_BLC_MASK (3 << 13)
-#define MOD_IMS_SYSMUX (1 << 10)
-#define MOD_SLAVE (1 << 11)
#define MOD_TXONLY (0 << 8)
#define MOD_RXONLY (1 << 8)
#define MOD_TXRX (2 << 8)
@@ -132,7 +131,10 @@
#define EXYNOS5420_MOD_BCLK_256FS 8
#define EXYNOS5420_MOD_BCLK_MASK 0xf
-#define MOD_CDCLKCON (1 << 12)
+#define EXYNOS7_MOD_RCLK_64FS 4
+#define EXYNOS7_MOD_RCLK_128FS 5
+#define EXYNOS7_MOD_RCLK_96FS 6
+#define EXYNOS7_MOD_RCLK_192FS 7
#define PSR_PSREN (1 << 15)
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index de8b13270bab..b1a7c5bce4a1 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -36,9 +36,24 @@ enum samsung_dai_type {
TYPE_SEC,
};
+struct samsung_i2s_variant_regs {
+ unsigned int bfs_off;
+ unsigned int rfs_off;
+ unsigned int sdf_off;
+ unsigned int txr_off;
+ unsigned int rclksrc_off;
+ unsigned int mss_off;
+ unsigned int cdclkcon_off;
+ unsigned int lrp_off;
+ unsigned int bfs_mask;
+ unsigned int rfs_mask;
+ unsigned int ftx0cnt_off;
+};
+
struct samsung_i2s_dai_data {
int dai_type;
u32 quirks;
+ const struct samsung_i2s_variant_regs *i2s_variant_regs;
};
struct i2s_dai {
@@ -81,6 +96,7 @@ struct i2s_dai {
u32 suspend_i2scon;
u32 suspend_i2spsr;
unsigned long gpios[7]; /* i2s gpio line numbers */
+ const struct samsung_i2s_variant_regs *variant_regs;
};
/* Lock for cross i/f checks */
@@ -95,7 +111,8 @@ static inline bool is_secondary(struct i2s_dai *i2s)
/* If operating in SoC-Slave mode */
static inline bool is_slave(struct i2s_dai *i2s)
{
- return (readl(i2s->addr + I2SMOD) & MOD_SLAVE) ? true : false;
+ u32 mod = readl(i2s->addr + I2SMOD);
+ return (mod & (1 << i2s->variant_regs->mss_off)) ? true : false;
}
/* If this interface of the controller is transmitting data */
@@ -200,14 +217,14 @@ static inline bool is_manager(struct i2s_dai *i2s)
static inline unsigned get_rfs(struct i2s_dai *i2s)
{
u32 rfs;
-
- if (i2s->quirks & QUIRK_SUPPORTS_TDM)
- rfs = readl(i2s->addr + I2SMOD) >> EXYNOS5420_MOD_RCLK_SHIFT;
- else
- rfs = (readl(i2s->addr + I2SMOD) >> MOD_RCLK_SHIFT);
- rfs &= MOD_RCLK_MASK;
+ rfs = readl(i2s->addr + I2SMOD) >> i2s->variant_regs->rfs_off;
+ rfs &= i2s->variant_regs->rfs_mask;
switch (rfs) {
+ case 7: return 192;
+ case 6: return 96;
+ case 5: return 128;
+ case 4: return 64;
case 3: return 768;
case 2: return 384;
case 1: return 512;
@@ -219,15 +236,23 @@ static inline unsigned get_rfs(struct i2s_dai *i2s)
static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
{
u32 mod = readl(i2s->addr + I2SMOD);
- int rfs_shift;
+ int rfs_shift = i2s->variant_regs->rfs_off;
- if (i2s->quirks & QUIRK_SUPPORTS_TDM)
- rfs_shift = EXYNOS5420_MOD_RCLK_SHIFT;
- else
- rfs_shift = MOD_RCLK_SHIFT;
- mod &= ~(MOD_RCLK_MASK << rfs_shift);
+ mod &= ~(i2s->variant_regs->rfs_mask << rfs_shift);
switch (rfs) {
+ case 192:
+ mod |= (EXYNOS7_MOD_RCLK_192FS << rfs_shift);
+ break;
+ case 96:
+ mod |= (EXYNOS7_MOD_RCLK_96FS << rfs_shift);
+ break;
+ case 128:
+ mod |= (EXYNOS7_MOD_RCLK_128FS << rfs_shift);
+ break;
+ case 64:
+ mod |= (EXYNOS7_MOD_RCLK_64FS << rfs_shift);
+ break;
case 768:
mod |= (MOD_RCLK_768FS << rfs_shift);
break;
@@ -249,14 +274,8 @@ static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
static inline unsigned get_bfs(struct i2s_dai *i2s)
{
u32 bfs;
-
- if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
- bfs = readl(i2s->addr + I2SMOD) >> EXYNOS5420_MOD_BCLK_SHIFT;
- bfs &= EXYNOS5420_MOD_BCLK_MASK;
- } else {
- bfs = readl(i2s->addr + I2SMOD) >> MOD_BCLK_SHIFT;
- bfs &= MOD_BCLK_MASK;
- }
+ bfs = readl(i2s->addr + I2SMOD) >> i2s->variant_regs->bfs_off;
+ bfs &= i2s->variant_regs->bfs_mask;
switch (bfs) {
case 8: return 256;
@@ -275,16 +294,8 @@ static inline unsigned get_bfs(struct i2s_dai *i2s)
static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
{
u32 mod = readl(i2s->addr + I2SMOD);
- int bfs_shift;
int tdm = i2s->quirks & QUIRK_SUPPORTS_TDM;
-
- if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
- bfs_shift = EXYNOS5420_MOD_BCLK_SHIFT;
- mod &= ~(EXYNOS5420_MOD_BCLK_MASK << bfs_shift);
- } else {
- bfs_shift = MOD_BCLK_SHIFT;
- mod &= ~(MOD_BCLK_MASK << bfs_shift);
- }
+ int bfs_shift = i2s->variant_regs->bfs_off;
/* Non-TDM I2S controllers do not support BCLK > 48 * FS */
if (!tdm && bfs > 48) {
@@ -292,6 +303,8 @@ static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
return;
}
+ mod &= ~(i2s->variant_regs->bfs_mask << bfs_shift);
+
switch (bfs) {
case 48:
mod |= (MOD_BCLK_48FS << bfs_shift);
@@ -346,8 +359,9 @@ static inline int get_blc(struct i2s_dai *i2s)
static void i2s_txctrl(struct i2s_dai *i2s, int on)
{
void __iomem *addr = i2s->addr;
+ int txr_off = i2s->variant_regs->txr_off;
u32 con = readl(addr + I2SCON);
- u32 mod = readl(addr + I2SMOD) & ~MOD_MASK;
+ u32 mod = readl(addr + I2SMOD) & ~(3 << txr_off);
if (on) {
con |= CON_ACTIVE;
@@ -362,9 +376,9 @@ static void i2s_txctrl(struct i2s_dai *i2s, int on)
}
if (any_rx_active(i2s))
- mod |= MOD_TXRX;
+ mod |= 2 << txr_off;
else
- mod |= MOD_TXONLY;
+ mod |= 0 << txr_off;
} else {
if (is_secondary(i2s)) {
con |= CON_TXSDMA_PAUSE;
@@ -382,7 +396,7 @@ static void i2s_txctrl(struct i2s_dai *i2s, int on)
con |= CON_TXCH_PAUSE;
if (any_rx_active(i2s))
- mod |= MOD_RXONLY;
+ mod |= 1 << txr_off;
else
con &= ~CON_ACTIVE;
}
@@ -395,23 +409,24 @@ static void i2s_txctrl(struct i2s_dai *i2s, int on)
static void i2s_rxctrl(struct i2s_dai *i2s, int on)
{
void __iomem *addr = i2s->addr;
+ int txr_off = i2s->variant_regs->txr_off;
u32 con = readl(addr + I2SCON);
- u32 mod = readl(addr + I2SMOD) & ~MOD_MASK;
+ u32 mod = readl(addr + I2SMOD) & ~(3 << txr_off);
if (on) {
con |= CON_RXDMA_ACTIVE | CON_ACTIVE;
con &= ~(CON_RXDMA_PAUSE | CON_RXCH_PAUSE);
if (any_tx_active(i2s))
- mod |= MOD_TXRX;
+ mod |= 2 << txr_off;
else
- mod |= MOD_RXONLY;
+ mod |= 1 << txr_off;
} else {
con |= CON_RXDMA_PAUSE | CON_RXCH_PAUSE;
con &= ~CON_RXDMA_ACTIVE;
if (any_tx_active(i2s))
- mod |= MOD_TXONLY;
+ mod |= 0 << txr_off;
else
con &= ~CON_ACTIVE;
}
@@ -451,6 +466,9 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
struct i2s_dai *i2s = to_info(dai);
struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
u32 mod = readl(i2s->addr + I2SMOD);
+ const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
+ unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
+ unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
switch (clk_id) {
case SAMSUNG_I2S_OPCLK:
@@ -465,18 +483,18 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
if ((rfs && other && other->rfs && (other->rfs != rfs)) ||
(any_active(i2s) &&
(((dir == SND_SOC_CLOCK_IN)
- && !(mod & MOD_CDCLKCON)) ||
+ && !(mod & cdcon_mask)) ||
((dir == SND_SOC_CLOCK_OUT)
- && (mod & MOD_CDCLKCON))))) {
+ && (mod & cdcon_mask))))) {
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
}
if (dir == SND_SOC_CLOCK_IN)
- mod |= MOD_CDCLKCON;
+ mod |= 1 << i2s_regs->cdclkcon_off;
else
- mod &= ~MOD_CDCLKCON;
+ mod &= ~(1 << i2s_regs->cdclkcon_off);
i2s->rfs = rfs;
break;
@@ -491,8 +509,8 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
if (!any_active(i2s)) {
if (i2s->op_clk && !IS_ERR(i2s->op_clk)) {
- if ((clk_id && !(mod & MOD_IMS_SYSMUX)) ||
- (!clk_id && (mod & MOD_IMS_SYSMUX))) {
+ if ((clk_id && !(mod & rsrc_mask)) ||
+ (!clk_id && (mod & rsrc_mask))) {
clk_disable_unprepare(i2s->op_clk);
clk_put(i2s->op_clk);
} else {
@@ -520,8 +538,8 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
other->op_clk = i2s->op_clk;
other->rclk_srcrate = i2s->rclk_srcrate;
}
- } else if ((!clk_id && (mod & MOD_IMS_SYSMUX))
- || (clk_id && !(mod & MOD_IMS_SYSMUX))) {
+ } else if ((!clk_id && (mod & rsrc_mask))
+ || (clk_id && !(mod & rsrc_mask))) {
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
@@ -533,11 +551,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
}
if (clk_id == 0)
- mod &= ~MOD_IMS_SYSMUX;
+ mod &= ~(1 << i2s_regs->rclksrc_off);
else
- mod |= MOD_IMS_SYSMUX;
- break;
+ mod |= 1 << i2s_regs->rclksrc_off;
+ break;
default:
dev_err(&i2s->pdev->dev, "We don't serve that!\n");
return -EINVAL;
@@ -553,16 +571,12 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
{
struct i2s_dai *i2s = to_info(dai);
u32 mod = readl(i2s->addr + I2SMOD);
- int lrp_shift, sdf_shift, sdf_mask, lrp_rlow;
+ int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
u32 tmp = 0;
- if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
- lrp_shift = EXYNOS5420_MOD_LRP_SHIFT;
- sdf_shift = EXYNOS5420_MOD_SDF_SHIFT;
- } else {
- lrp_shift = MOD_LRP_SHIFT;
- sdf_shift = MOD_SDF_SHIFT;
- }
+ lrp_shift = i2s->variant_regs->lrp_off;
+ sdf_shift = i2s->variant_regs->sdf_off;
+ mod_slave = 1 << i2s->variant_regs->mss_off;
sdf_mask = MOD_SDF_MASK << sdf_shift;
lrp_rlow = MOD_LR_RLOW << lrp_shift;
@@ -605,7 +619,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
- tmp |= MOD_SLAVE;
+ tmp |= mod_slave;
break;
case SND_SOC_DAIFMT_CBS_CFS:
/* Set default source clock in Master mode */
@@ -623,13 +637,13 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
* channel.
*/
if (any_active(i2s) &&
- ((mod & (sdf_mask | lrp_rlow | MOD_SLAVE)) != tmp)) {
+ ((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
}
- mod &= ~(sdf_mask | lrp_rlow | MOD_SLAVE);
+ mod &= ~(sdf_mask | lrp_rlow | mod_slave);
mod |= tmp;
writel(mod, i2s->addr + I2SMOD);
@@ -751,6 +765,7 @@ static void i2s_shutdown(struct snd_pcm_substream *substream,
struct i2s_dai *i2s = to_info(dai);
struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
unsigned long flags;
+ const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
spin_lock_irqsave(&lock, flags);
@@ -761,7 +776,7 @@ static void i2s_shutdown(struct snd_pcm_substream *substream,
other->mode |= DAI_MANAGER;
} else {
u32 mod = readl(i2s->addr + I2SMOD);
- i2s->cdclk_out = !(mod & MOD_CDCLKCON);
+ i2s->cdclk_out = !(mod & (1 << i2s_regs->cdclkcon_off));
if (other)
other->cdclk_out = i2s->cdclk_out;
}
@@ -914,13 +929,14 @@ i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
struct i2s_dai *i2s = to_info(dai);
u32 reg = readl(i2s->addr + I2SFIC);
snd_pcm_sframes_t delay;
+ const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
delay = FIC_RXCOUNT(reg);
else if (is_secondary(i2s))
delay = FICS_TXCOUNT(readl(i2s->addr + I2SFICS));
else
- delay = FIC_TXCOUNT(reg);
+ delay = (reg >> i2s_regs->ftx0cnt_off) & 0x7f;
return delay;
}
@@ -956,6 +972,7 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct i2s_dai *i2s = to_info(dai);
struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+ int ret;
if (other && other->clk) { /* If this is probe on secondary */
samsung_asoc_init_dma_data(dai, &other->sec_dai->dma_playback,
@@ -973,9 +990,14 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
if (IS_ERR(i2s->clk)) {
dev_err(&i2s->pdev->dev, "failed to get i2s_clock\n");
iounmap(i2s->addr);
- return -ENOENT;
+ return PTR_ERR(i2s->clk);
+ }
+
+ ret = clk_prepare_enable(i2s->clk);
+ if (ret != 0) {
+ dev_err(&i2s->pdev->dev, "failed to enable clock: %d\n", ret);
+ return ret;
}
- clk_prepare_enable(i2s->clk);
samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
@@ -987,7 +1009,7 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
if (i2s->quirks & QUIRK_NEED_RSTCLR)
writel(CON_RSTCLR, i2s->addr + I2SCON);
- if (i2s->quirks & QUIRK_SEC_DAI)
+ if (i2s->quirks & QUIRK_SUPPORTS_IDMA)
idma_reg_addr_init(i2s->addr,
i2s->sec_dai->idma_playback.dma_addr);
@@ -1199,10 +1221,9 @@ static int samsung_i2s_probe(struct platform_device *pdev)
quirks = i2s_dai_data->quirks;
if (of_property_read_u32(np, "samsung,idma-addr",
&idma_addr)) {
- if (quirks & QUIRK_SEC_DAI) {
- dev_err(&pdev->dev, "idma address is not"\
+ if (quirks & QUIRK_SUPPORTS_IDMA) {
+ dev_info(&pdev->dev, "idma address is not"\
"specified");
- return -EINVAL;
}
}
}
@@ -1228,6 +1249,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
pri_dai->dma_capture.dma_size = 4;
pri_dai->base = regs_base;
pri_dai->quirks = quirks;
+ pri_dai->variant_regs = i2s_dai_data->i2s_variant_regs;
if (quirks & QUIRK_PRI_6CHAN)
pri_dai->i2s_dai_drv.playback.channels_max = 6;
@@ -1302,20 +1324,93 @@ static int samsung_i2s_remove(struct platform_device *pdev)
return 0;
}
+static const struct samsung_i2s_variant_regs i2sv3_regs = {
+ .bfs_off = 1,
+ .rfs_off = 3,
+ .sdf_off = 5,
+ .txr_off = 8,
+ .rclksrc_off = 10,
+ .mss_off = 11,
+ .cdclkcon_off = 12,
+ .lrp_off = 7,
+ .bfs_mask = 0x3,
+ .rfs_mask = 0x3,
+ .ftx0cnt_off = 8,
+};
+
+static const struct samsung_i2s_variant_regs i2sv6_regs = {
+ .bfs_off = 0,
+ .rfs_off = 4,
+ .sdf_off = 6,
+ .txr_off = 8,
+ .rclksrc_off = 10,
+ .mss_off = 11,
+ .cdclkcon_off = 12,
+ .lrp_off = 15,
+ .bfs_mask = 0xf,
+ .rfs_mask = 0x3,
+ .ftx0cnt_off = 8,
+};
+
+static const struct samsung_i2s_variant_regs i2sv7_regs = {
+ .bfs_off = 0,
+ .rfs_off = 4,
+ .sdf_off = 7,
+ .txr_off = 9,
+ .rclksrc_off = 11,
+ .mss_off = 12,
+ .cdclkcon_off = 22,
+ .lrp_off = 15,
+ .bfs_mask = 0xf,
+ .rfs_mask = 0x7,
+ .ftx0cnt_off = 0,
+};
+
+static const struct samsung_i2s_variant_regs i2sv5_i2s1_regs = {
+ .bfs_off = 0,
+ .rfs_off = 3,
+ .sdf_off = 6,
+ .txr_off = 8,
+ .rclksrc_off = 10,
+ .mss_off = 11,
+ .cdclkcon_off = 12,
+ .lrp_off = 15,
+ .bfs_mask = 0x7,
+ .rfs_mask = 0x7,
+ .ftx0cnt_off = 8,
+};
+
static const struct samsung_i2s_dai_data i2sv3_dai_type = {
.dai_type = TYPE_PRI,
.quirks = QUIRK_NO_MUXPSR,
+ .i2s_variant_regs = &i2sv3_regs,
};
static const struct samsung_i2s_dai_data i2sv5_dai_type = {
.dai_type = TYPE_PRI,
- .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
+ .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR |
+ QUIRK_SUPPORTS_IDMA,
+ .i2s_variant_regs = &i2sv3_regs,
};
static const struct samsung_i2s_dai_data i2sv6_dai_type = {
.dai_type = TYPE_PRI,
.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR |
+ QUIRK_SUPPORTS_TDM | QUIRK_SUPPORTS_IDMA,
+ .i2s_variant_regs = &i2sv6_regs,
+};
+
+static const struct samsung_i2s_dai_data i2sv7_dai_type = {
+ .dai_type = TYPE_PRI,
+ .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR |
QUIRK_SUPPORTS_TDM,
+ .i2s_variant_regs = &i2sv7_regs,
+};
+
+static const struct samsung_i2s_dai_data i2sv5_dai_type_i2s1 = {
+ .dai_type = TYPE_PRI,
+ .quirks = QUIRK_PRI_6CHAN | QUIRK_NEED_RSTCLR,
+ .i2s_variant_regs = &i2sv5_i2s1_regs,
};
static const struct samsung_i2s_dai_data samsung_dai_type_pri = {
@@ -1329,10 +1424,13 @@ static const struct samsung_i2s_dai_data samsung_dai_type_sec = {
static struct platform_device_id samsung_i2s_driver_ids[] = {
{
.name = "samsung-i2s",
- .driver_data = (kernel_ulong_t)&samsung_dai_type_pri,
+ .driver_data = (kernel_ulong_t)&i2sv3_dai_type,
}, {
.name = "samsung-i2s-sec",
.driver_data = (kernel_ulong_t)&samsung_dai_type_sec,
+ }, {
+ .name = "samsung-i2sv4",
+ .driver_data = (kernel_ulong_t)&i2sv5_dai_type,
},
{},
};
@@ -1349,6 +1447,12 @@ static const struct of_device_id exynos_i2s_match[] = {
}, {
.compatible = "samsung,exynos5420-i2s",
.data = &i2sv6_dai_type,
+ }, {
+ .compatible = "samsung,exynos7-i2s",
+ .data = &i2sv7_dai_type,
+ }, {
+ .compatible = "samsung,exynos7-i2s1",
+ .data = &i2sv5_dai_type_i2s1,
},
{},
};
@@ -1366,7 +1470,6 @@ static struct platform_driver samsung_i2s_driver = {
.id_table = samsung_i2s_driver_ids,
.driver = {
.name = "samsung-i2s",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(exynos_i2s_match),
.pm = &samsung_i2s_pm,
},
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index 0e8dd985fcb3..4ed29ffc1c54 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -418,7 +418,6 @@ static int asoc_idma_platform_probe(struct platform_device *pdev)
static struct platform_driver asoc_idma_driver = {
.driver = {
.name = "samsung-idma",
- .owner = THIS_MODULE,
},
.probe = asoc_idma_platform_probe,
diff --git a/sound/soc/samsung/littlemill.c b/sound/soc/samsung/littlemill.c
index 840787e63cb1..141519c21e21 100644
--- a/sound/soc/samsung/littlemill.c
+++ b/sound/soc/samsung/littlemill.c
@@ -315,7 +315,6 @@ static int littlemill_probe(struct platform_device *pdev)
static struct platform_driver littlemill_driver = {
.driver = {
.name = "littlemill",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = littlemill_probe,
diff --git a/sound/soc/samsung/lowland.c b/sound/soc/samsung/lowland.c
index bd5f0d643a86..243dea7ba38f 100644
--- a/sound/soc/samsung/lowland.c
+++ b/sound/soc/samsung/lowland.c
@@ -198,7 +198,6 @@ static int lowland_probe(struct platform_device *pdev)
static struct platform_driver lowland_driver = {
.driver = {
.name = "lowland",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = lowland_probe,
diff --git a/sound/soc/samsung/odroidx2_max98090.c b/sound/soc/samsung/odroidx2_max98090.c
index 3c8f60423e82..fa4f1d2f69bf 100644
--- a/sound/soc/samsung/odroidx2_max98090.c
+++ b/sound/soc/samsung/odroidx2_max98090.c
@@ -153,8 +153,8 @@ static int odroidx2_audio_remove(struct platform_device *pdev)
snd_soc_unregister_card(card);
- of_node_put((struct device_node *)odroidx2_dai[0].cpu_of_node);
- of_node_put((struct device_node *)odroidx2_dai[0].codec_of_node);
+ of_node_put(odroidx2_dai[0].cpu_of_node);
+ of_node_put(odroidx2_dai[0].codec_of_node);
return 0;
}
@@ -162,7 +162,6 @@ static int odroidx2_audio_remove(struct platform_device *pdev)
static struct platform_driver odroidx2_audio_driver = {
.driver = {
.name = "odroidx2-audio",
- .owner = THIS_MODULE,
.of_match_table = odroidx2_audio_of_match,
.pm = &snd_soc_pm_ops,
},
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index bac034b15a27..b320a9d3fbf8 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -626,7 +626,6 @@ static struct platform_driver s3c_pcm_driver = {
.remove = s3c_pcm_dev_remove,
.driver = {
.name = "samsung-pcm",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 27b339c6580e..2b766d212ce0 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -180,7 +180,6 @@ static struct platform_driver s3c2412_iis_driver = {
.probe = s3c2412_iis_dev_probe,
.driver = {
.name = "s3c2412-iis",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index e87d9a2053b8..326d3c3804e3 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -485,7 +485,6 @@ static struct platform_driver s3c24xx_iis_driver = {
.probe = s3c24xx_iis_dev_probe,
.driver = {
.name = "s3c24xx-iis",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/samsung/s3c24xx_simtec_hermes.c b/sound/soc/samsung/s3c24xx_simtec_hermes.c
index 2d30b7b6818a..7ac924c595bf 100644
--- a/sound/soc/samsung/s3c24xx_simtec_hermes.c
+++ b/sound/soc/samsung/s3c24xx_simtec_hermes.c
@@ -99,7 +99,6 @@ static int simtec_audio_hermes_probe(struct platform_device *pd)
static struct platform_driver simtec_audio_hermes_platdrv = {
.driver = {
- .owner = THIS_MODULE,
.name = "s3c24xx-simtec-hermes-snd",
.pm = simtec_audio_pm,
},
diff --git a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
index 83f6c7d49cd6..b4ed2fc1a65c 100644
--- a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
+++ b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
@@ -87,7 +87,6 @@ static int simtec_audio_tlv320aic23_probe(struct platform_device *pd)
static struct platform_driver simtec_audio_tlv320aic23_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "s3c24xx-simtec-tlv320aic23",
.pm = simtec_audio_pm,
},
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 1b7b52b0af97..9c6f7db56f60 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -340,7 +340,6 @@ static struct platform_driver s3c24xx_uda134x_driver = {
.remove = s3c24xx_uda134x_remove,
.driver = {
.name = "s3c24xx_uda134x",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/samsung/smdk_wm8580pcm.c b/sound/soc/samsung/smdk_wm8580pcm.c
index 63d079303561..05c609c62de9 100644
--- a/sound/soc/samsung/smdk_wm8580pcm.c
+++ b/sound/soc/samsung/smdk_wm8580pcm.c
@@ -173,7 +173,6 @@ static int snd_smdk_probe(struct platform_device *pdev)
static struct platform_driver snd_smdk_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "samsung-smdk-pcm",
},
.probe = snd_smdk_probe,
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index 3d6272a8cad2..d38595fbdab7 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -191,7 +191,6 @@ static int smdk_audio_probe(struct platform_device *pdev)
static struct platform_driver smdk_audio_driver = {
.driver = {
.name = "smdk-audio-wm8994",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(samsung_wm8994_of_match),
.pm = &snd_soc_pm_ops,
},
diff --git a/sound/soc/samsung/smdk_wm8994pcm.c b/sound/soc/samsung/smdk_wm8994pcm.c
index b6c09979be1f..c470e8eed6e1 100644
--- a/sound/soc/samsung/smdk_wm8994pcm.c
+++ b/sound/soc/samsung/smdk_wm8994pcm.c
@@ -143,7 +143,6 @@ static int snd_smdk_probe(struct platform_device *pdev)
static struct platform_driver snd_smdk_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "samsung-smdk-pcm",
},
.probe = snd_smdk_probe,
diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c
index 72118a77dd5b..7651dc924161 100644
--- a/sound/soc/samsung/snow.c
+++ b/sound/soc/samsung/snow.c
@@ -115,7 +115,6 @@ MODULE_DEVICE_TABLE(of, snow_of_match);
static struct platform_driver snow_driver = {
.driver = {
.name = "snow-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = snow_of_match,
},
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index d7d2e208f486..36dbc0e96004 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -477,7 +477,6 @@ static struct platform_driver samsung_spdif_driver = {
.remove = spdif_remove,
.driver = {
.name = "samsung-spdif",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c
index a05482651aae..5ec7c52282f2 100644
--- a/sound/soc/samsung/speyside.c
+++ b/sound/soc/samsung/speyside.c
@@ -340,7 +340,6 @@ static int speyside_probe(struct platform_device *pdev)
static struct platform_driver speyside_driver = {
.driver = {
.name = "speyside",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = speyside_probe,
diff --git a/sound/soc/samsung/tobermory.c b/sound/soc/samsung/tobermory.c
index 6a2b9f14d624..9c80506527c4 100644
--- a/sound/soc/samsung/tobermory.c
+++ b/sound/soc/samsung/tobermory.c
@@ -234,7 +234,6 @@ static int tobermory_probe(struct platform_device *pdev)
static struct platform_driver tobermory_driver = {
.driver = {
.name = "tobermory",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = tobermory_probe,
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index c85f8eb66c97..a5b2c4ea90d9 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -345,7 +345,6 @@ static int sh7760_soc_platform_remove(struct platform_device *pdev)
static struct platform_driver sh7760_pcm_driver = {
.driver = {
.name = "sh7760-pcm-audio",
- .owner = THIS_MODULE,
},
.probe = sh7760_soc_platform_probe,
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 88e5df474ccf..8869971d7884 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -842,12 +842,9 @@ static int fsi_clk_disable(struct device *dev,
return -EINVAL;
if (1 == clock->count--) {
- if (clock->xck)
- clk_disable(clock->xck);
- if (clock->ick)
- clk_disable(clock->ick);
- if (clock->div)
- clk_disable(clock->div);
+ clk_disable(clock->xck);
+ clk_disable(clock->ick);
+ clk_disable(clock->div);
}
return 0;
diff --git a/sound/soc/sh/hac.c b/sound/soc/sh/hac.c
index 0af2e4dfd139..84c51037a7d0 100644
--- a/sound/soc/sh/hac.c
+++ b/sound/soc/sh/hac.c
@@ -272,7 +272,7 @@ static const struct snd_soc_dai_ops hac_dai_ops = {
static struct snd_soc_dai_driver sh4_hac_dai[] = {
{
.name = "hac-dai.0",
- .ac97_control = 1,
+ .bus_control = true,
.playback = {
.rates = AC97_RATES,
.formats = AC97_FMTS,
@@ -333,7 +333,6 @@ static int hac_soc_platform_remove(struct platform_device *pdev)
static struct platform_driver hac_pcm_driver = {
.driver = {
.name = "hac-pcm-audio",
- .owner = THIS_MODULE,
},
.probe = hac_soc_platform_probe,
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index fc41a0e8b09f..14d1a7193469 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -430,7 +430,7 @@ int rsnd_adg_probe(struct platform_device *pdev,
adg->clk[CLKI] = devm_clk_get(dev, "clk_i");
for_each_rsnd_clk(clk, adg, i)
- dev_dbg(dev, "clk %d : %p\n", i, clk);
+ dev_dbg(dev, "clk %d : %p : %ld\n", i, clk, clk_get_rate(clk));
rsnd_adg_ssi_clk_init(priv, adg);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 70042197f9e2..75308bbc2ce8 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -349,7 +349,7 @@ int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
dma_name);
if (!dma->chan) {
dev_err(dev, "can't get dma channel\n");
- return -EIO;
+ goto rsnd_dma_channel_err;
}
ret = dmaengine_slave_config(dma->chan, &cfg);
@@ -363,8 +363,15 @@ int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
rsnd_dma_init_err:
rsnd_dma_quit(priv, dma);
+rsnd_dma_channel_err:
- return ret;
+ /*
+ * DMA failed. try to PIO mode
+ * see
+ * rsnd_ssi_fallback()
+ * rsnd_rdai_continuance_probe()
+ */
+ return -EAGAIN;
}
void rsnd_dma_quit(struct rsnd_priv *priv,
@@ -409,9 +416,16 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod)
({ \
struct rsnd_priv *priv = rsnd_mod_to_priv(mod); \
struct device *dev = rsnd_priv_to_dev(priv); \
- dev_dbg(dev, "%s [%d] %s\n", \
- rsnd_mod_name(mod), rsnd_mod_id(mod), #func); \
- (mod)->ops->func(mod, rdai); \
+ u32 mask = 1 << __rsnd_mod_shift_##func; \
+ u32 call = __rsnd_mod_call_##func << __rsnd_mod_shift_##func; \
+ int ret = 0; \
+ if ((mod->status & mask) == call) { \
+ dev_dbg(dev, "%s[%d] %s\n", \
+ rsnd_mod_name(mod), rsnd_mod_id(mod), #func); \
+ ret = (mod)->ops->func(mod, rdai); \
+ mod->status = (mod->status & ~mask) | (~call & mask); \
+ } \
+ ret; \
})
#define rsnd_mod_call(mod, func, rdai...) \
@@ -456,6 +470,13 @@ static int rsnd_dai_connect(struct rsnd_mod *mod,
return 0;
}
+static void rsnd_dai_disconnect(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io)
+{
+ mod->io = NULL;
+ io->mod[mod->type] = NULL;
+}
+
int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai)
{
int id = rdai - priv->rdai;
@@ -686,6 +707,20 @@ static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
ret; \
})
+#define rsnd_path_break(priv, io, type) \
+{ \
+ struct rsnd_mod *mod; \
+ int id = -1; \
+ \
+ if (rsnd_is_enable_path(io, type)) { \
+ id = rsnd_info_id(priv, io, type); \
+ if (id >= 0) { \
+ mod = rsnd_##type##_mod_get(priv, id); \
+ rsnd_dai_disconnect(mod, io); \
+ } \
+ } \
+}
+
static int rsnd_path_init(struct rsnd_priv *priv,
struct rsnd_dai *rdai,
struct rsnd_dai_stream *io)
@@ -934,6 +969,150 @@ static struct snd_pcm_ops rsnd_pcm_ops = {
};
/*
+ * snd_kcontrol
+ */
+#define kcontrol_to_cfg(kctrl) ((struct rsnd_kctrl_cfg *)kctrl->private_value)
+static int rsnd_kctrl_info(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct rsnd_kctrl_cfg *cfg = kcontrol_to_cfg(kctrl);
+
+ if (cfg->texts) {
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = cfg->size;
+ uinfo->value.enumerated.items = cfg->max;
+ if (uinfo->value.enumerated.item >= cfg->max)
+ uinfo->value.enumerated.item = cfg->max - 1;
+ strlcpy(uinfo->value.enumerated.name,
+ cfg->texts[uinfo->value.enumerated.item],
+ sizeof(uinfo->value.enumerated.name));
+ } else {
+ uinfo->count = cfg->size;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = cfg->max;
+ uinfo->type = (cfg->max == 1) ?
+ SNDRV_CTL_ELEM_TYPE_BOOLEAN :
+ SNDRV_CTL_ELEM_TYPE_INTEGER;
+ }
+
+ return 0;
+}
+
+static int rsnd_kctrl_get(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *uc)
+{
+ struct rsnd_kctrl_cfg *cfg = kcontrol_to_cfg(kctrl);
+ int i;
+
+ for (i = 0; i < cfg->size; i++)
+ if (cfg->texts)
+ uc->value.enumerated.item[i] = cfg->val[i];
+ else
+ uc->value.integer.value[i] = cfg->val[i];
+
+ return 0;
+}
+
+static int rsnd_kctrl_put(struct snd_kcontrol *kctrl,
+ struct snd_ctl_elem_value *uc)
+{
+ struct rsnd_mod *mod = snd_kcontrol_chip(kctrl);
+ struct rsnd_kctrl_cfg *cfg = kcontrol_to_cfg(kctrl);
+ int i, change = 0;
+
+ for (i = 0; i < cfg->size; i++) {
+ if (cfg->texts) {
+ change |= (uc->value.enumerated.item[i] != cfg->val[i]);
+ cfg->val[i] = uc->value.enumerated.item[i];
+ } else {
+ change |= (uc->value.integer.value[i] != cfg->val[i]);
+ cfg->val[i] = uc->value.integer.value[i];
+ }
+ }
+
+ if (change)
+ cfg->update(mod);
+
+ return change;
+}
+
+static int __rsnd_kctrl_new(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct snd_soc_pcm_runtime *rtd,
+ const unsigned char *name,
+ struct rsnd_kctrl_cfg *cfg,
+ void (*update)(struct rsnd_mod *mod))
+{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_kcontrol *kctrl;
+ struct snd_kcontrol_new knew = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = name,
+ .info = rsnd_kctrl_info,
+ .get = rsnd_kctrl_get,
+ .put = rsnd_kctrl_put,
+ .private_value = (unsigned long)cfg,
+ };
+ int ret;
+
+ kctrl = snd_ctl_new1(&knew, mod);
+ if (!kctrl)
+ return -ENOMEM;
+
+ ret = snd_ctl_add(card, kctrl);
+ if (ret < 0)
+ return ret;
+
+ cfg->update = update;
+
+ return 0;
+}
+
+int rsnd_kctrl_new_m(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct snd_soc_pcm_runtime *rtd,
+ const unsigned char *name,
+ void (*update)(struct rsnd_mod *mod),
+ struct rsnd_kctrl_cfg_m *_cfg,
+ u32 max)
+{
+ _cfg->cfg.max = max;
+ _cfg->cfg.size = RSND_DVC_CHANNELS;
+ _cfg->cfg.val = _cfg->val;
+ return __rsnd_kctrl_new(mod, rdai, rtd, name, &_cfg->cfg, update);
+}
+
+int rsnd_kctrl_new_s(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct snd_soc_pcm_runtime *rtd,
+ const unsigned char *name,
+ void (*update)(struct rsnd_mod *mod),
+ struct rsnd_kctrl_cfg_s *_cfg,
+ u32 max)
+{
+ _cfg->cfg.max = max;
+ _cfg->cfg.size = 1;
+ _cfg->cfg.val = &_cfg->val;
+ return __rsnd_kctrl_new(mod, rdai, rtd, name, &_cfg->cfg, update);
+}
+
+int rsnd_kctrl_new_e(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct snd_soc_pcm_runtime *rtd,
+ const unsigned char *name,
+ struct rsnd_kctrl_cfg_s *_cfg,
+ void (*update)(struct rsnd_mod *mod),
+ const char * const *texts,
+ u32 max)
+{
+ _cfg->cfg.max = max;
+ _cfg->cfg.size = 1;
+ _cfg->cfg.val = &_cfg->val;
+ _cfg->cfg.texts = texts;
+ return __rsnd_kctrl_new(mod, rdai, rtd, name, &_cfg->cfg, update);
+}
+
+/*
* snd_soc_platform
*/
@@ -976,6 +1155,49 @@ static const struct snd_soc_component_driver rsnd_soc_component = {
.name = "rsnd",
};
+static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ int is_play)
+{
+ struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
+ int ret;
+
+ ret = rsnd_dai_call(probe, io, rdai);
+ if (ret == -EAGAIN) {
+ /*
+ * Fallback to PIO mode
+ */
+
+ /*
+ * call "remove" for SSI/SRC/DVC
+ * SSI will be switch to PIO mode if it was DMA mode
+ * see
+ * rsnd_dma_init()
+ * rsnd_ssi_fallback()
+ */
+ rsnd_dai_call(remove, io, rdai);
+
+ /*
+ * remove SRC/DVC from DAI,
+ */
+ rsnd_path_break(priv, io, src);
+ rsnd_path_break(priv, io, dvc);
+
+ /*
+ * fallback
+ */
+ rsnd_dai_call(fallback, io, rdai);
+
+ /*
+ * retry to "probe".
+ * DAI has SSI which is PIO mode only now.
+ */
+ ret = rsnd_dai_call(probe, io, rdai);
+ }
+
+ return ret;
+}
+
/*
* rsnd probe
*/
@@ -1037,11 +1259,11 @@ static int rsnd_probe(struct platform_device *pdev)
}
for_each_rsnd_dai(rdai, priv, i) {
- ret = rsnd_dai_call(probe, &rdai->playback, rdai);
+ ret = rsnd_rdai_continuance_probe(priv, rdai, 1);
if (ret)
goto exit_snd_probe;
- ret = rsnd_dai_call(probe, &rdai->capture, rdai);
+ ret = rsnd_rdai_continuance_probe(priv, rdai, 0);
if (ret)
goto exit_snd_probe;
}
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 3f443930c2b1..5380a4827ba7 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -11,8 +11,6 @@
#include "rsnd.h"
#define RSND_DVC_NAME_SIZE 16
-#define RSND_DVC_VOLUME_MAX 100
-#define RSND_DVC_VOLUME_NUM 2
#define DVC_NAME "dvc"
@@ -20,8 +18,11 @@ struct rsnd_dvc {
struct rsnd_dvc_platform_info *info; /* rcar_snd.h */
struct rsnd_mod mod;
struct clk *clk;
- u8 volume[RSND_DVC_VOLUME_NUM];
- u8 mute[RSND_DVC_VOLUME_NUM];
+ struct rsnd_kctrl_cfg_m volume;
+ struct rsnd_kctrl_cfg_m mute;
+ struct rsnd_kctrl_cfg_s ren; /* Ramp Enable */
+ struct rsnd_kctrl_cfg_s rup; /* Ramp Rate Up */
+ struct rsnd_kctrl_cfg_s rdown; /* Ramp Rate Down */
};
#define rsnd_mod_to_dvc(_mod) \
@@ -33,23 +34,87 @@ struct rsnd_dvc {
((pos) = (struct rsnd_dvc *)(priv)->dvc + i); \
i++)
+static const char const *dvc_ramp_rate[] = {
+ "128 dB/1 step", /* 00000 */
+ "64 dB/1 step", /* 00001 */
+ "32 dB/1 step", /* 00010 */
+ "16 dB/1 step", /* 00011 */
+ "8 dB/1 step", /* 00100 */
+ "4 dB/1 step", /* 00101 */
+ "2 dB/1 step", /* 00110 */
+ "1 dB/1 step", /* 00111 */
+ "0.5 dB/1 step", /* 01000 */
+ "0.25 dB/1 step", /* 01001 */
+ "0.125 dB/1 step", /* 01010 */
+ "0.125 dB/2 steps", /* 01011 */
+ "0.125 dB/4 steps", /* 01100 */
+ "0.125 dB/8 steps", /* 01101 */
+ "0.125 dB/16 steps", /* 01110 */
+ "0.125 dB/32 steps", /* 01111 */
+ "0.125 dB/64 steps", /* 10000 */
+ "0.125 dB/128 steps", /* 10001 */
+ "0.125 dB/256 steps", /* 10010 */
+ "0.125 dB/512 steps", /* 10011 */
+ "0.125 dB/1024 steps", /* 10100 */
+ "0.125 dB/2048 steps", /* 10101 */
+ "0.125 dB/4096 steps", /* 10110 */
+ "0.125 dB/8192 steps", /* 10111 */
+};
+
static void rsnd_dvc_volume_update(struct rsnd_mod *mod)
{
struct rsnd_dvc *dvc = rsnd_mod_to_dvc(mod);
- u32 max = (0x00800000 - 1);
- u32 vol[RSND_DVC_VOLUME_NUM];
+ u32 val[RSND_DVC_CHANNELS];
+ u32 dvucr = 0;
u32 mute = 0;
int i;
- for (i = 0; i < RSND_DVC_VOLUME_NUM; i++) {
- vol[i] = max / RSND_DVC_VOLUME_MAX * dvc->volume[i];
- mute |= (!!dvc->mute[i]) << i;
+ for (i = 0; i < dvc->mute.cfg.size; i++)
+ mute |= (!!dvc->mute.cfg.val[i]) << i;
+
+ /* Disable DVC Register access */
+ rsnd_mod_write(mod, DVC_DVUER, 0);
+
+ /* Enable Ramp */
+ if (dvc->ren.val) {
+ dvucr |= 0x10;
+
+ /* Digital Volume Max */
+ for (i = 0; i < RSND_DVC_CHANNELS; i++)
+ val[i] = dvc->volume.cfg.max;
+
+ rsnd_mod_write(mod, DVC_VRCTR, 0xff);
+ rsnd_mod_write(mod, DVC_VRPDR, dvc->rup.val << 8 |
+ dvc->rdown.val);
+ /*
+ * FIXME !!
+ * use scale-downed Digital Volume
+ * as Volume Ramp
+ * 7F FFFF -> 3FF
+ */
+ rsnd_mod_write(mod, DVC_VRDBR,
+ 0x3ff - (dvc->volume.val[0] >> 13));
+
+ } else {
+ for (i = 0; i < RSND_DVC_CHANNELS; i++)
+ val[i] = dvc->volume.val[i];
+ }
+
+ /* Enable Digital Volume */
+ dvucr |= 0x100;
+ rsnd_mod_write(mod, DVC_VOL0R, val[0]);
+ rsnd_mod_write(mod, DVC_VOL1R, val[1]);
+
+ /* Enable Mute */
+ if (mute) {
+ dvucr |= 0x1;
+ rsnd_mod_write(mod, DVC_ZCMCR, mute);
}
- rsnd_mod_write(mod, DVC_VOL0R, vol[0]);
- rsnd_mod_write(mod, DVC_VOL1R, vol[1]);
+ rsnd_mod_write(mod, DVC_DVUCR, dvucr);
- rsnd_mod_write(mod, DVC_ZCMCR, mute);
+ /* Enable DVC Register access */
+ rsnd_mod_write(mod, DVC_DVUER, 1);
}
static int rsnd_dvc_probe_gen2(struct rsnd_mod *mod,
@@ -58,7 +123,8 @@ static int rsnd_dvc_probe_gen2(struct rsnd_mod *mod,
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
- dev_dbg(dev, "%s (Gen2) is probed\n", rsnd_mod_name(mod));
+ dev_dbg(dev, "%s[%d] (Gen2) is probed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
return 0;
}
@@ -102,16 +168,11 @@ static int rsnd_dvc_init(struct rsnd_mod *dvc_mod,
rsnd_mod_write(dvc_mod, DVC_ADINR, rsnd_get_adinr(dvc_mod));
- /* enable Volume / Mute */
- rsnd_mod_write(dvc_mod, DVC_DVUCR, 0x101);
-
/* ch0/ch1 Volume */
rsnd_dvc_volume_update(dvc_mod);
rsnd_mod_write(dvc_mod, DVC_DVUIR, 0);
- rsnd_mod_write(dvc_mod, DVC_DVUER, 1);
-
rsnd_adg_set_cmd_timsel_gen2(rdai, dvc_mod, io);
return 0;
@@ -143,86 +204,6 @@ static int rsnd_dvc_stop(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_dvc_volume_info(struct snd_kcontrol *kctrl,
- struct snd_ctl_elem_info *uinfo)
-{
- struct rsnd_mod *mod = snd_kcontrol_chip(kctrl);
- struct rsnd_dvc *dvc = rsnd_mod_to_dvc(mod);
- u8 *val = (u8 *)kctrl->private_value;
-
- uinfo->count = RSND_DVC_VOLUME_NUM;
- uinfo->value.integer.min = 0;
-
- if (val == dvc->volume) {
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->value.integer.max = RSND_DVC_VOLUME_MAX;
- } else {
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- uinfo->value.integer.max = 1;
- }
-
- return 0;
-}
-
-static int rsnd_dvc_volume_get(struct snd_kcontrol *kctrl,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 *val = (u8 *)kctrl->private_value;
- int i;
-
- for (i = 0; i < RSND_DVC_VOLUME_NUM; i++)
- ucontrol->value.integer.value[i] = val[i];
-
- return 0;
-}
-
-static int rsnd_dvc_volume_put(struct snd_kcontrol *kctrl,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct rsnd_mod *mod = snd_kcontrol_chip(kctrl);
- u8 *val = (u8 *)kctrl->private_value;
- int i, change = 0;
-
- for (i = 0; i < RSND_DVC_VOLUME_NUM; i++) {
- change |= (ucontrol->value.integer.value[i] != val[i]);
- val[i] = ucontrol->value.integer.value[i];
- }
-
- if (change)
- rsnd_dvc_volume_update(mod);
-
- return change;
-}
-
-static int __rsnd_dvc_pcm_new(struct rsnd_mod *mod,
- struct rsnd_dai *rdai,
- struct snd_soc_pcm_runtime *rtd,
- const unsigned char *name,
- u8 *private)
-{
- struct snd_card *card = rtd->card->snd_card;
- struct snd_kcontrol *kctrl;
- struct snd_kcontrol_new knew = {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = name,
- .info = rsnd_dvc_volume_info,
- .get = rsnd_dvc_volume_get,
- .put = rsnd_dvc_volume_put,
- .private_value = (unsigned long)private,
- };
- int ret;
-
- kctrl = snd_ctl_new1(&knew, mod);
- if (!kctrl)
- return -ENOMEM;
-
- ret = snd_ctl_add(card, kctrl);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd)
@@ -232,18 +213,48 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
int ret;
/* Volume */
- ret = __rsnd_dvc_pcm_new(mod, rdai, rtd,
+ ret = rsnd_kctrl_new_m(mod, rdai, rtd,
rsnd_dai_is_play(rdai, io) ?
"DVC Out Playback Volume" : "DVC In Capture Volume",
- dvc->volume);
+ rsnd_dvc_volume_update,
+ &dvc->volume, 0x00800000 - 1);
if (ret < 0)
return ret;
/* Mute */
- ret = __rsnd_dvc_pcm_new(mod, rdai, rtd,
+ ret = rsnd_kctrl_new_m(mod, rdai, rtd,
rsnd_dai_is_play(rdai, io) ?
"DVC Out Mute Switch" : "DVC In Mute Switch",
- dvc->mute);
+ rsnd_dvc_volume_update,
+ &dvc->mute, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Ramp */
+ ret = rsnd_kctrl_new_s(mod, rdai, rtd,
+ rsnd_dai_is_play(rdai, io) ?
+ "DVC Out Ramp Switch" : "DVC In Ramp Switch",
+ rsnd_dvc_volume_update,
+ &dvc->ren, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_kctrl_new_e(mod, rdai, rtd,
+ rsnd_dai_is_play(rdai, io) ?
+ "DVC Out Ramp Up Rate" : "DVC In Ramp Up Rate",
+ &dvc->rup,
+ rsnd_dvc_volume_update,
+ dvc_ramp_rate, ARRAY_SIZE(dvc_ramp_rate));
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_kctrl_new_e(mod, rdai, rtd,
+ rsnd_dai_is_play(rdai, io) ?
+ "DVC Out Ramp Down Rate" : "DVC In Ramp Down Rate",
+ &dvc->rdown,
+ rsnd_dvc_volume_update,
+ dvc_ramp_rate, ARRAY_SIZE(dvc_ramp_rate));
+
if (ret < 0)
return ret;
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index f95e7ab135e8..87a6f2d62775 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -8,6 +8,17 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+/*
+ * #define DEBUG
+ *
+ * you can also add below in
+ * ${LINUX}/drivers/base/regmap/regmap.c
+ * for regmap debug
+ *
+ * #define LOG_DEVICE "xxxx.rcar_sound"
+ */
+
#include "rsnd.h"
struct rsnd_gen {
@@ -67,9 +78,10 @@ u32 rsnd_read(struct rsnd_priv *priv,
if (!rsnd_is_accessible_reg(priv, gen, reg))
return 0;
- regmap_fields_read(gen->regs[reg], rsnd_mod_id(mod), &val);
+ dev_dbg(dev, "r %s[%d] - %4d : %08x\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod), reg, val);
- dev_dbg(dev, "r %s - 0x%04d : %08x\n", rsnd_mod_name(mod), reg, val);
+ regmap_fields_read(gen->regs[reg], rsnd_mod_id(mod), &val);
return val;
}
@@ -84,9 +96,10 @@ void rsnd_write(struct rsnd_priv *priv,
if (!rsnd_is_accessible_reg(priv, gen, reg))
return;
- regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data);
+ dev_dbg(dev, "w %s[%d] - %4d : %08x\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod), reg, data);
- dev_dbg(dev, "w %s - 0x%04d : %08x\n", rsnd_mod_name(mod), reg, data);
+ regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data);
}
void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
@@ -98,11 +111,11 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
if (!rsnd_is_accessible_reg(priv, gen, reg))
return;
+ dev_dbg(dev, "b %s[%d] - %4d : %08x/%08x\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod), reg, data, mask);
+
regmap_fields_update_bits(gen->regs[reg], rsnd_mod_id(mod),
mask, data);
-
- dev_dbg(dev, "b %s - 0x%04d : %08x/%08x\n",
- rsnd_mod_name(mod), reg, data, mask);
}
#define rsnd_gen_regmap_init(priv, id_size, reg_id, conf) \
@@ -311,6 +324,9 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
RSND_GEN_M_REG(DVC_ADINR, 0xe08, 0x100),
RSND_GEN_M_REG(DVC_DVUCR, 0xe10, 0x100),
RSND_GEN_M_REG(DVC_ZCMCR, 0xe14, 0x100),
+ RSND_GEN_M_REG(DVC_VRCTR, 0xe18, 0x100),
+ RSND_GEN_M_REG(DVC_VRPDR, 0xe1c, 0x100),
+ RSND_GEN_M_REG(DVC_VRDBR, 0xe20, 0x100),
RSND_GEN_M_REG(DVC_VOL0R, 0xe28, 0x100),
RSND_GEN_M_REG(DVC_VOL1R, 0xe2c, 0x100),
RSND_GEN_M_REG(DVC_DVUER, 0xe48, 0x100),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index d119adf97c9c..5826c8abf794 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -91,6 +91,9 @@ enum rsnd_reg {
RSND_REG_SHARE20,
RSND_REG_SHARE21,
RSND_REG_SHARE22,
+ RSND_REG_SHARE23,
+ RSND_REG_SHARE24,
+ RSND_REG_SHARE25,
RSND_REG_MAX,
};
@@ -129,6 +132,9 @@ enum rsnd_reg {
#define RSND_REG_CMD_CTRL RSND_REG_SHARE20
#define RSND_REG_CMDOUT_TIMSEL RSND_REG_SHARE21
#define RSND_REG_BUSIF_DALIGN RSND_REG_SHARE22
+#define RSND_REG_DVC_VRCTR RSND_REG_SHARE23
+#define RSND_REG_DVC_VRPDR RSND_REG_SHARE24
+#define RSND_REG_DVC_VRDBR RSND_REG_SHARE25
struct rsnd_of_data;
struct rsnd_priv;
@@ -200,6 +206,8 @@ struct rsnd_mod_ops {
int (*pcm_new)(struct rsnd_mod *mod,
struct rsnd_dai *rdai,
struct snd_soc_pcm_runtime *rtd);
+ int (*fallback)(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai);
};
struct rsnd_dai_stream;
@@ -210,7 +218,35 @@ struct rsnd_mod {
struct rsnd_mod_ops *ops;
struct rsnd_dma dma;
struct rsnd_dai_stream *io;
+ u32 status;
};
+/*
+ * status
+ *
+ * bit
+ * 0 0: probe 1: remove
+ * 1 0: init 1: quit
+ * 2 0: start 1: stop
+ * 3 0: pcm_new
+ * 4 0: fallback
+ */
+#define __rsnd_mod_shift_probe 0
+#define __rsnd_mod_shift_remove 0
+#define __rsnd_mod_shift_init 1
+#define __rsnd_mod_shift_quit 1
+#define __rsnd_mod_shift_start 2
+#define __rsnd_mod_shift_stop 2
+#define __rsnd_mod_shift_pcm_new 3
+#define __rsnd_mod_shift_fallback 4
+
+#define __rsnd_mod_call_probe 0
+#define __rsnd_mod_call_remove 1
+#define __rsnd_mod_call_init 0
+#define __rsnd_mod_call_quit 1
+#define __rsnd_mod_call_start 0
+#define __rsnd_mod_call_stop 1
+#define __rsnd_mod_call_pcm_new 0
+#define __rsnd_mod_call_fallback 0
#define rsnd_mod_to_priv(mod) ((mod)->priv)
#define rsnd_mod_to_dma(mod) (&(mod)->dma)
@@ -267,7 +303,8 @@ struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id);
int rsnd_dai_is_play(struct rsnd_dai *rdai, struct rsnd_dai_stream *io);
int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai);
#define rsnd_dai_get_platform_info(rdai) ((rdai)->info)
-#define rsnd_io_to_runtime(io) ((io)->substream->runtime)
+#define rsnd_io_to_runtime(io) ((io)->substream ? \
+ (io)->substream->runtime : NULL)
void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int cnt);
int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional);
@@ -382,6 +419,51 @@ struct rsnd_priv {
})
/*
+ * rsnd_kctrl
+ */
+struct rsnd_kctrl_cfg {
+ unsigned int max;
+ unsigned int size;
+ u32 *val;
+ const char * const *texts;
+ void (*update)(struct rsnd_mod *mod);
+};
+
+#define RSND_DVC_CHANNELS 2
+struct rsnd_kctrl_cfg_m {
+ struct rsnd_kctrl_cfg cfg;
+ u32 val[RSND_DVC_CHANNELS];
+};
+
+struct rsnd_kctrl_cfg_s {
+ struct rsnd_kctrl_cfg cfg;
+ u32 val;
+};
+
+int rsnd_kctrl_new_m(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct snd_soc_pcm_runtime *rtd,
+ const unsigned char *name,
+ void (*update)(struct rsnd_mod *mod),
+ struct rsnd_kctrl_cfg_m *_cfg,
+ u32 max);
+int rsnd_kctrl_new_s(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct snd_soc_pcm_runtime *rtd,
+ const unsigned char *name,
+ void (*update)(struct rsnd_mod *mod),
+ struct rsnd_kctrl_cfg_s *_cfg,
+ u32 max);
+int rsnd_kctrl_new_e(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct snd_soc_pcm_runtime *rtd,
+ const unsigned char *name,
+ struct rsnd_kctrl_cfg_s *_cfg,
+ void (*update)(struct rsnd_mod *mod),
+ const char * const *texts,
+ u32 max);
+
+/*
* R-Car SRC
*/
int rsnd_src_probe(struct platform_device *pdev,
@@ -395,10 +477,11 @@ int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
struct rsnd_dai *rdai,
int use_busif);
int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai,
- int use_busif);
-int rsnd_src_enable_ssi_irq(struct rsnd_mod *ssi_mod,
+ struct rsnd_dai *rdai);
+int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod,
struct rsnd_dai *rdai);
+int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod,
+ struct rsnd_dai *rdai);
#define rsnd_src_nr(priv) ((priv)->src_nr)
@@ -410,6 +493,7 @@ int rsnd_ssi_probe(struct platform_device *pdev,
struct rsnd_priv *priv);
struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
+int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
/*
* R-Car DVC
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 9183e0145503..eede3ac6eed2 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -175,30 +175,47 @@ int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
}
int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod,
- struct rsnd_dai *rdai,
- int use_busif)
+ struct rsnd_dai *rdai)
{
/*
* DMA settings for SSIU
*/
- if (use_busif)
- rsnd_mod_write(ssi_mod, SSI_CTRL, 0);
+ rsnd_mod_write(ssi_mod, SSI_CTRL, 0);
return 0;
}
-int rsnd_src_enable_ssi_irq(struct rsnd_mod *ssi_mod,
+int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod,
struct rsnd_dai *rdai)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
- /* enable PIO interrupt if Gen2 */
- if (rsnd_is_gen2(priv))
+ if (rsnd_is_gen1(priv))
+ return 0;
+
+ /* enable SSI interrupt if Gen2 */
+ if (rsnd_ssi_is_dma_mode(ssi_mod))
+ rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0e000000);
+ else
rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0f000000);
return 0;
}
+int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod,
+ struct rsnd_dai *rdai)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
+
+ if (rsnd_is_gen1(priv))
+ return 0;
+
+ /* disable SSI interrupt if Gen2 */
+ rsnd_mod_write(ssi_mod, INT_ENABLE, 0x00000000);
+
+ return 0;
+}
+
unsigned int rsnd_src_get_ssi_rate(struct rsnd_priv *priv,
struct rsnd_dai_stream *io,
struct snd_pcm_runtime *runtime)
@@ -239,12 +256,6 @@ static int rsnd_src_set_convert_rate(struct rsnd_mod *mod,
rsnd_mod_write(mod, SRC_SWRSR, 0);
rsnd_mod_write(mod, SRC_SWRSR, 1);
- /*
- * Initialize the operation of the SRC internal circuits
- * see rsnd_src_start()
- */
- rsnd_mod_write(mod, SRC_SRCIR, 1);
-
/* Set channel number and output bit length */
rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr(mod));
@@ -269,6 +280,12 @@ static int rsnd_src_init(struct rsnd_mod *mod,
clk_prepare_enable(src->clk);
+ /*
+ * Initialize the operation of the SRC internal circuits
+ * see rsnd_src_start()
+ */
+ rsnd_mod_write(mod, SRC_SRCIR, 1);
+
return 0;
}
@@ -282,32 +299,20 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_src_start(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_src_start(struct rsnd_mod *mod)
{
- struct rsnd_src *src = rsnd_mod_to_src(mod);
-
/*
* Cancel the initialization and operate the SRC function
- * see rsnd_src_set_convert_rate()
+ * see rsnd_src_init()
*/
rsnd_mod_write(mod, SRC_SRCIR, 0);
- if (rsnd_src_convert_rate(src))
- rsnd_mod_write(mod, SRC_ROUTE_MODE0, 1);
-
return 0;
}
-
-static int rsnd_src_stop(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_src_stop(struct rsnd_mod *mod)
{
- struct rsnd_src *src = rsnd_mod_to_src(mod);
-
- if (rsnd_src_convert_rate(src))
- rsnd_mod_write(mod, SRC_ROUTE_MODE0, 0);
-
+ /* nothing to do */
return 0;
}
@@ -414,6 +419,7 @@ static int rsnd_src_set_convert_timing_gen1(struct rsnd_mod *mod,
static int rsnd_src_set_convert_rate_gen1(struct rsnd_mod *mod,
struct rsnd_dai *rdai)
{
+ struct rsnd_src *src = rsnd_mod_to_src(mod);
int ret;
ret = rsnd_src_set_convert_rate(mod, rdai);
@@ -427,6 +433,10 @@ static int rsnd_src_set_convert_rate_gen1(struct rsnd_mod *mod,
rsnd_mod_write(mod, SRC_MNFSR,
rsnd_mod_read(mod, SRC_IFSVR) / 100 * 98);
+ /* Gen1/Gen2 are not compatible */
+ if (rsnd_src_convert_rate(src))
+ rsnd_mod_write(mod, SRC_ROUTE_MODE0, 1);
+
/* no SRC_BFSSR settings, since SRC_SRCCR::BUFMD is 0 */
return 0;
@@ -438,7 +448,8 @@ static int rsnd_src_probe_gen1(struct rsnd_mod *mod,
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
- dev_dbg(dev, "%s (Gen1) is probed\n", rsnd_mod_name(mod));
+ dev_dbg(dev, "%s[%d] (Gen1) is probed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
return 0;
}
@@ -474,7 +485,7 @@ static int rsnd_src_start_gen1(struct rsnd_mod *mod,
rsnd_mod_bset(mod, SRC_ROUTE_CTRL, (1 << id), (1 << id));
- return rsnd_src_start(mod, rdai);
+ return rsnd_src_start(mod);
}
static int rsnd_src_stop_gen1(struct rsnd_mod *mod,
@@ -484,7 +495,7 @@ static int rsnd_src_stop_gen1(struct rsnd_mod *mod,
rsnd_mod_bset(mod, SRC_ROUTE_CTRL, (1 << id), 0);
- return rsnd_src_stop(mod, rdai);
+ return rsnd_src_stop(mod);
}
static struct rsnd_mod_ops rsnd_src_gen1_ops = {
@@ -507,16 +518,17 @@ static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
struct rsnd_src *src = rsnd_mod_to_src(mod);
+ u32 convert_rate = rsnd_src_convert_rate(src);
uint ratio;
int ret;
/* 6 - 1/6 are very enough ratio for SRC_BSDSR */
- if (!rsnd_src_convert_rate(src))
+ if (!convert_rate)
ratio = 0;
- else if (rsnd_src_convert_rate(src) > runtime->rate)
- ratio = 100 * rsnd_src_convert_rate(src) / runtime->rate;
+ else if (convert_rate > runtime->rate)
+ ratio = 100 * convert_rate / runtime->rate;
else
- ratio = 100 * runtime->rate / rsnd_src_convert_rate(src);
+ ratio = 100 * runtime->rate / convert_rate;
if (ratio > 600) {
dev_err(dev, "FSO/FSI ratio error\n");
@@ -529,6 +541,11 @@ static int rsnd_src_set_convert_rate_gen2(struct rsnd_mod *mod,
rsnd_mod_write(mod, SRC_SRCCR, 0x00011110);
+ if (convert_rate) {
+ /* Gen1/Gen2 are not compatible */
+ rsnd_mod_write(mod, SRC_ROUTE_MODE0, 1);
+ }
+
switch (rsnd_mod_id(mod)) {
case 5:
case 6:
@@ -578,9 +595,11 @@ static int rsnd_src_probe_gen2(struct rsnd_mod *mod,
rsnd_info_is_playback(priv, src),
src->info->dma_id);
if (ret < 0)
- dev_err(dev, "SRC DMA failed\n");
-
- dev_dbg(dev, "%s (Gen2) is probed\n", rsnd_mod_name(mod));
+ dev_err(dev, "%s[%d] (Gen2) failed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+ else
+ dev_dbg(dev, "%s[%d] (Gen2) is probed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
return ret;
}
@@ -624,7 +643,7 @@ static int rsnd_src_start_gen2(struct rsnd_mod *mod,
rsnd_mod_write(mod, SRC_CTRL, val);
- return rsnd_src_start(mod, rdai);
+ return rsnd_src_start(mod);
}
static int rsnd_src_stop_gen2(struct rsnd_mod *mod,
@@ -636,7 +655,7 @@ static int rsnd_src_stop_gen2(struct rsnd_mod *mod,
rsnd_dma_stop(rsnd_mod_to_dma(&src->mod));
- return rsnd_src_stop(mod, rdai);
+ return rsnd_src_stop(mod);
}
static struct rsnd_mod_ops rsnd_src_gen2_ops = {
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 34e84009162b..3844fbef4664 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -68,7 +68,6 @@ struct rsnd_ssi {
struct rsnd_dai *rdai;
u32 cr_own;
u32 cr_clk;
- u32 cr_etc;
int err;
unsigned int usrcnt;
unsigned int rate;
@@ -83,7 +82,7 @@ struct rsnd_ssi {
#define rsnd_ssi_nr(priv) ((priv)->ssi_nr)
#define rsnd_mod_to_ssi(_mod) container_of((_mod), struct rsnd_ssi, mod)
#define rsnd_dma_to_ssi(dma) rsnd_mod_to_ssi(rsnd_dma_to_mod(dma))
-#define rsnd_ssi_pio_available(ssi) ((ssi)->info->pio_irq > 0)
+#define rsnd_ssi_pio_available(ssi) ((ssi)->info->irq > 0)
#define rsnd_ssi_dma_available(ssi) \
rsnd_dma_available(rsnd_mod_to_dma(&(ssi)->mod))
#define rsnd_ssi_clk_from_parent(ssi) ((ssi)->parent)
@@ -96,6 +95,9 @@ static int rsnd_ssi_use_busif(struct rsnd_mod *mod)
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
int use_busif = 0;
+ if (!rsnd_ssi_is_dma_mode(mod))
+ return 0;
+
if (!(rsnd_ssi_mode_flags(ssi) & RSND_SSI_NO_BUSIF))
use_busif = 1;
if (rsnd_io_to_mod_src(io))
@@ -159,7 +161,8 @@ static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi,
ssi->cr_clk = FORCE | SWL_32 |
SCKD | SWSD | CKDV(j);
- dev_dbg(dev, "ssi%d outputs %u Hz\n",
+ dev_dbg(dev, "%s[%d] outputs %u Hz\n",
+ rsnd_mod_name(&ssi->mod),
rsnd_mod_id(&ssi->mod), rate);
return 0;
@@ -184,6 +187,7 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
{
struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
struct device *dev = rsnd_priv_to_dev(priv);
+ u32 cr_mode;
u32 cr;
if (0 == ssi->usrcnt) {
@@ -197,16 +201,29 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
}
}
+ cr_mode = rsnd_ssi_is_dma_mode(&ssi->mod) ?
+ DMEN : /* DMA : enable DMA */
+ DIEN; /* PIO : enable Data interrupt */
+
+
cr = ssi->cr_own |
ssi->cr_clk |
- ssi->cr_etc |
- EN;
+ cr_mode |
+ UIEN | OIEN | EN;
rsnd_mod_write(&ssi->mod, SSICR, cr);
+ /* enable WS continue */
+ if (rsnd_dai_is_clk_master(rdai))
+ rsnd_mod_write(&ssi->mod, SSIWSR, CONT);
+
+ /* clear error status */
+ rsnd_mod_write(&ssi->mod, SSISR, 0);
+
ssi->usrcnt++;
- dev_dbg(dev, "ssi%d hw started\n", rsnd_mod_id(&ssi->mod));
+ dev_dbg(dev, "%s[%d] hw started\n",
+ rsnd_mod_name(&ssi->mod), rsnd_mod_id(&ssi->mod));
}
static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
@@ -249,7 +266,8 @@ static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
clk_disable_unprepare(ssi->clk);
}
- dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
+ dev_dbg(dev, "%s[%d] hw stopped\n",
+ rsnd_mod_name(&ssi->mod), rsnd_mod_id(&ssi->mod));
}
/*
@@ -334,25 +352,54 @@ static void rsnd_ssi_record_error(struct rsnd_ssi *ssi, u32 status)
}
}
-/*
- * SSI PIO
- */
-static irqreturn_t rsnd_ssi_pio_interrupt(int irq, void *data)
+static int rsnd_ssi_start(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+
+ rsnd_src_ssiu_start(mod, rdai, rsnd_ssi_use_busif(mod));
+
+ rsnd_ssi_hw_start(ssi, rdai, io);
+
+ rsnd_src_ssi_irq_enable(mod, rdai);
+
+ return 0;
+}
+
+static int rsnd_ssi_stop(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+ rsnd_src_ssi_irq_disable(mod, rdai);
+
+ rsnd_ssi_record_error(ssi, rsnd_mod_read(mod, SSISR));
+
+ rsnd_ssi_hw_stop(ssi, rdai);
+
+ rsnd_src_ssiu_stop(mod, rdai);
+
+ return 0;
+}
+
+static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
{
struct rsnd_ssi *ssi = data;
+ struct rsnd_dai *rdai = ssi->rdai;
struct rsnd_mod *mod = &ssi->mod;
struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
u32 status = rsnd_mod_read(mod, SSISR);
- irqreturn_t ret = IRQ_NONE;
- if (io && (status & DIRQ)) {
- struct rsnd_dai *rdai = ssi->rdai;
+ if (!io)
+ return IRQ_NONE;
+
+ /* PIO only */
+ if (status & DIRQ) {
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 *buf = (u32 *)(runtime->dma_area +
rsnd_dai_pointer_offset(io, 0));
- rsnd_ssi_record_error(ssi, status);
-
/*
* 8/16/32 data can be assesse to TDR/RDR register
* directly as 32bit data
@@ -364,73 +411,60 @@ static irqreturn_t rsnd_ssi_pio_interrupt(int irq, void *data)
*buf = rsnd_mod_read(mod, SSIRDR);
rsnd_dai_pointer_update(io, sizeof(*buf));
+ }
+
+ /* PIO / DMA */
+ if (status & (UIRQ | OIRQ)) {
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ /*
+ * restart SSI
+ */
+ rsnd_ssi_stop(mod, rdai);
+ rsnd_ssi_start(mod, rdai);
- ret = IRQ_HANDLED;
+ dev_dbg(dev, "%s[%d] restart\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
}
- return ret;
+ rsnd_ssi_record_error(ssi, status);
+
+ return IRQ_HANDLED;
}
+/*
+ * SSI PIO
+ */
static int rsnd_ssi_pio_probe(struct rsnd_mod *mod,
struct rsnd_dai *rdai)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- int irq = ssi->info->pio_irq;
int ret;
- ret = devm_request_irq(dev, irq,
- rsnd_ssi_pio_interrupt,
+ ret = devm_request_irq(dev, ssi->info->irq,
+ rsnd_ssi_interrupt,
IRQF_SHARED,
dev_name(dev), ssi);
if (ret)
- dev_err(dev, "SSI request interrupt failed\n");
-
- dev_dbg(dev, "%s (PIO) is probed\n", rsnd_mod_name(mod));
+ dev_err(dev, "%s[%d] (PIO) request interrupt failed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+ else
+ dev_dbg(dev, "%s[%d] (PIO) is probed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
return ret;
}
-static int rsnd_ssi_pio_start(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
-{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
-
- /* enable PIO IRQ */
- ssi->cr_etc = UIEN | OIEN | DIEN;
-
- rsnd_src_ssiu_start(mod, rdai, 0);
-
- rsnd_src_enable_ssi_irq(mod, rdai);
-
- rsnd_ssi_hw_start(ssi, rdai, io);
-
- return 0;
-}
-
-static int rsnd_ssi_pio_stop(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
-{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-
- ssi->cr_etc = 0;
-
- rsnd_ssi_hw_stop(ssi, rdai);
-
- rsnd_src_ssiu_stop(mod, rdai, 0);
-
- return 0;
-}
-
static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.name = SSI_NAME,
.probe = rsnd_ssi_pio_probe,
.init = rsnd_ssi_init,
.quit = rsnd_ssi_quit,
- .start = rsnd_ssi_pio_start,
- .stop = rsnd_ssi_pio_stop,
+ .start = rsnd_ssi_start,
+ .stop = rsnd_ssi_stop,
};
static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
@@ -442,15 +476,28 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
int dma_id = ssi->info->dma_id;
int ret;
+ ret = devm_request_irq(dev, ssi->info->irq,
+ rsnd_ssi_interrupt,
+ IRQF_SHARED,
+ dev_name(dev), ssi);
+ if (ret)
+ goto rsnd_ssi_dma_probe_fail;
+
ret = rsnd_dma_init(
priv, rsnd_mod_to_dma(mod),
rsnd_info_is_playback(priv, ssi),
dma_id);
+ if (ret)
+ goto rsnd_ssi_dma_probe_fail;
- if (ret < 0)
- dev_err(dev, "SSI DMA failed\n");
+ dev_dbg(dev, "%s[%d] (DMA) is probed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ return ret;
- dev_dbg(dev, "%s (DMA) is probed\n", rsnd_mod_name(mod));
+rsnd_ssi_dma_probe_fail:
+ dev_err(dev, "%s[%d] (DMA) is failed\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
return ret;
}
@@ -458,30 +505,48 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
struct rsnd_dai *rdai)
{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ int irq = ssi->info->irq;
+
rsnd_dma_quit(rsnd_mod_to_priv(mod), rsnd_mod_to_dma(mod));
+ /* PIO will request IRQ again */
+ devm_free_irq(dev, irq, ssi);
+
return 0;
}
-static int rsnd_ssi_dma_start(struct rsnd_mod *mod,
- struct rsnd_dai *rdai)
+static int rsnd_ssi_fallback(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai)
{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct rsnd_dma *dma = rsnd_mod_to_dma(&ssi->mod);
- struct rsnd_dai_stream *io = rsnd_mod_to_io(mod);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
- /* enable DMA transfer */
- ssi->cr_etc = DMEN;
+ /*
+ * fallback to PIO
+ *
+ * SSI .probe might be called again.
+ * see
+ * rsnd_rdai_continuance_probe()
+ */
+ mod->ops = &rsnd_ssi_pio_ops;
- rsnd_src_ssiu_start(mod, rdai, rsnd_ssi_use_busif(mod));
+ dev_info(dev, "%s[%d] fallback to PIO mode\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
- rsnd_dma_start(dma);
+ return 0;
+}
- rsnd_ssi_hw_start(ssi, ssi->rdai, io);
+static int rsnd_ssi_dma_start(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai)
+{
+ struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
- /* enable WS continue */
- if (rsnd_dai_is_clk_master(rdai))
- rsnd_mod_write(&ssi->mod, SSIWSR, CONT);
+ rsnd_ssi_start(mod, rdai);
+
+ rsnd_dma_start(dma);
return 0;
}
@@ -489,18 +554,11 @@ static int rsnd_ssi_dma_start(struct rsnd_mod *mod,
static int rsnd_ssi_dma_stop(struct rsnd_mod *mod,
struct rsnd_dai *rdai)
{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct rsnd_dma *dma = rsnd_mod_to_dma(&ssi->mod);
-
- ssi->cr_etc = 0;
-
- rsnd_ssi_record_error(ssi, rsnd_mod_read(mod, SSISR));
-
- rsnd_ssi_hw_stop(ssi, rdai);
+ struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
rsnd_dma_stop(dma);
- rsnd_src_ssiu_stop(mod, rdai, 1);
+ rsnd_ssi_stop(mod, rdai);
return 0;
}
@@ -519,8 +577,15 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
.quit = rsnd_ssi_quit,
.start = rsnd_ssi_dma_start,
.stop = rsnd_ssi_dma_stop,
+ .fallback = rsnd_ssi_fallback,
};
+int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
+{
+ return mod->ops == &rsnd_ssi_dma_ops;
+}
+
+
/*
* Non SSI
*/
@@ -614,7 +679,7 @@ static void rsnd_of_parse_ssi(struct platform_device *pdev,
/*
* irq
*/
- ssi_info->pio_irq = irq_of_parse_and_map(np, 0);
+ ssi_info->irq = irq_of_parse_and_map(np, 0);
/*
* DMA
diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c
index d55babee14f8..abb0d956231c 100644
--- a/sound/soc/sh/siu_dai.c
+++ b/sound/soc/sh/siu_dai.c
@@ -845,7 +845,6 @@ static int siu_remove(struct platform_device *pdev)
static struct platform_driver siu_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "siu-pcm-audio",
},
.probe = siu_probe,
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index e889405ebd38..ab13146e4f82 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -398,7 +398,6 @@ static int sh4_soc_dai_remove(struct platform_device *pdev)
static struct platform_driver sh4_ssi_driver = {
.driver = {
.name = "sh4-ssi-dai",
- .owner = THIS_MODULE,
},
.probe = sh4_soc_dai_probe,
diff --git a/sound/soc/sirf/sirf-audio-port.c b/sound/soc/sirf/sirf-audio-port.c
index b4afa31b2bc1..3f2cce03275c 100644
--- a/sound/soc/sirf/sirf-audio-port.c
+++ b/sound/soc/sirf/sirf-audio-port.c
@@ -74,7 +74,6 @@ MODULE_DEVICE_TABLE(of, sirf_audio_port_of_match);
static struct platform_driver sirf_audio_port_driver = {
.driver = {
.name = "sirf-audio-port",
- .owner = THIS_MODULE,
.of_match_table = sirf_audio_port_of_match,
},
.probe = sirf_audio_port_probe,
diff --git a/sound/soc/sirf/sirf-audio.c b/sound/soc/sirf/sirf-audio.c
index ecef51021653..94ea152e0362 100644
--- a/sound/soc/sirf/sirf-audio.c
+++ b/sound/soc/sirf/sirf-audio.c
@@ -143,7 +143,6 @@ MODULE_DEVICE_TABLE(of, sirf_audio_of_match);
static struct platform_driver sirf_audio_driver = {
.driver = {
.name = "sirf-audio-card",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = sirf_audio_of_match,
},
diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
index 186dc7f33a55..45fc06c0e0e5 100644
--- a/sound/soc/sirf/sirf-usp.c
+++ b/sound/soc/sirf/sirf-usp.c
@@ -422,7 +422,6 @@ static const struct dev_pm_ops sirf_usp_pcm_pm_ops = {
static struct platform_driver sirf_usp_pcm_driver = {
.driver = {
.name = "sirf-usp-pcm",
- .owner = THIS_MODULE,
.of_match_table = sirf_usp_pcm_of_match,
.pm = &sirf_usp_pcm_pm_ops,
},
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
new file mode 100644
index 000000000000..2e10e9a38376
--- /dev/null
+++ b/sound/soc/soc-ac97.c
@@ -0,0 +1,256 @@
+/*
+ * soc-ac97.c -- ALSA SoC Audio Layer AC97 support
+ *
+ * Copyright 2005 Wolfson Microelectronics PLC.
+ * Copyright 2005 Openedhand Ltd.
+ * Copyright (C) 2010 Slimlogic Ltd.
+ * Copyright (C) 2010 Texas Instruments Inc.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ * with code, comments and ideas from :-
+ * Richard Purdie <richard@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/slab.h>
+#include <sound/ac97_codec.h>
+#include <sound/soc.h>
+
+struct snd_ac97_reset_cfg {
+ struct pinctrl *pctl;
+ struct pinctrl_state *pstate_reset;
+ struct pinctrl_state *pstate_warm_reset;
+ struct pinctrl_state *pstate_run;
+ int gpio_sdata;
+ int gpio_sync;
+ int gpio_reset;
+};
+
+static struct snd_ac97_bus soc_ac97_bus = {
+ .ops = NULL, /* Gets initialized in snd_soc_set_ac97_ops() */
+};
+
+static void soc_ac97_device_release(struct device *dev)
+{
+ kfree(to_ac97_t(dev));
+}
+
+/**
+ * snd_soc_new_ac97_codec - initailise AC97 device
+ * @codec: audio codec
+ *
+ * Initialises AC97 codec resources for use by ad-hoc devices only.
+ */
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+{
+ struct snd_ac97 *ac97;
+ int ret;
+
+ ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
+ if (ac97 == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ac97->bus = &soc_ac97_bus;
+ ac97->num = 0;
+
+ ac97->dev.bus = &ac97_bus_type;
+ ac97->dev.parent = codec->component.card->dev;
+ ac97->dev.release = soc_ac97_device_release;
+
+ dev_set_name(&ac97->dev, "%d-%d:%s",
+ codec->component.card->snd_card->number, 0,
+ codec->component.name);
+
+ ret = device_register(&ac97->dev);
+ if (ret) {
+ put_device(&ac97->dev);
+ return ERR_PTR(ret);
+ }
+
+ return ac97;
+}
+EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
+
+/**
+ * snd_soc_free_ac97_codec - free AC97 codec device
+ * @codec: audio codec
+ *
+ * Frees AC97 codec device resources.
+ */
+void snd_soc_free_ac97_codec(struct snd_ac97 *ac97)
+{
+ device_del(&ac97->dev);
+ ac97->bus = NULL;
+ put_device(&ac97->dev);
+}
+EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
+
+static struct snd_ac97_reset_cfg snd_ac97_rst_cfg;
+
+static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97)
+{
+ struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
+
+ pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset);
+
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 1);
+
+ udelay(10);
+
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
+
+ pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
+ msleep(2);
+}
+
+static void snd_soc_ac97_reset(struct snd_ac97 *ac97)
+{
+ struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
+
+ pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset);
+
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_sdata, 0);
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 0);
+
+ udelay(10);
+
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 1);
+
+ pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
+ msleep(2);
+}
+
+static int snd_soc_ac97_parse_pinctl(struct device *dev,
+ struct snd_ac97_reset_cfg *cfg)
+{
+ struct pinctrl *p;
+ struct pinctrl_state *state;
+ int gpio;
+ int ret;
+
+ p = devm_pinctrl_get(dev);
+ if (IS_ERR(p)) {
+ dev_err(dev, "Failed to get pinctrl\n");
+ return PTR_ERR(p);
+ }
+ cfg->pctl = p;
+
+ state = pinctrl_lookup_state(p, "ac97-reset");
+ if (IS_ERR(state)) {
+ dev_err(dev, "Can't find pinctrl state ac97-reset\n");
+ return PTR_ERR(state);
+ }
+ cfg->pstate_reset = state;
+
+ state = pinctrl_lookup_state(p, "ac97-warm-reset");
+ if (IS_ERR(state)) {
+ dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n");
+ return PTR_ERR(state);
+ }
+ cfg->pstate_warm_reset = state;
+
+ state = pinctrl_lookup_state(p, "ac97-running");
+ if (IS_ERR(state)) {
+ dev_err(dev, "Can't find pinctrl state ac97-running\n");
+ return PTR_ERR(state);
+ }
+ cfg->pstate_run = state;
+
+ gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 0);
+ if (gpio < 0) {
+ dev_err(dev, "Can't find ac97-sync gpio\n");
+ return gpio;
+ }
+ ret = devm_gpio_request(dev, gpio, "AC97 link sync");
+ if (ret) {
+ dev_err(dev, "Failed requesting ac97-sync gpio\n");
+ return ret;
+ }
+ cfg->gpio_sync = gpio;
+
+ gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 1);
+ if (gpio < 0) {
+ dev_err(dev, "Can't find ac97-sdata gpio %d\n", gpio);
+ return gpio;
+ }
+ ret = devm_gpio_request(dev, gpio, "AC97 link sdata");
+ if (ret) {
+ dev_err(dev, "Failed requesting ac97-sdata gpio\n");
+ return ret;
+ }
+ cfg->gpio_sdata = gpio;
+
+ gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 2);
+ if (gpio < 0) {
+ dev_err(dev, "Can't find ac97-reset gpio\n");
+ return gpio;
+ }
+ ret = devm_gpio_request(dev, gpio, "AC97 link reset");
+ if (ret) {
+ dev_err(dev, "Failed requesting ac97-reset gpio\n");
+ return ret;
+ }
+ cfg->gpio_reset = gpio;
+
+ return 0;
+}
+
+struct snd_ac97_bus_ops *soc_ac97_ops;
+EXPORT_SYMBOL_GPL(soc_ac97_ops);
+
+int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
+{
+ if (ops == soc_ac97_ops)
+ return 0;
+
+ if (soc_ac97_ops && ops)
+ return -EBUSY;
+
+ soc_ac97_ops = ops;
+ soc_ac97_bus.ops = ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops);
+
+/**
+ * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions
+ *
+ * This function sets the reset and warm_reset properties of ops and parses
+ * the device node of pdev to get pinctrl states and gpio numbers to use.
+ */
+int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct snd_ac97_reset_cfg cfg;
+ int ret;
+
+ ret = snd_soc_ac97_parse_pinctl(dev, &cfg);
+ if (ret)
+ return ret;
+
+ ret = snd_soc_set_ac97_ops(ops);
+ if (ret)
+ return ret;
+
+ ops->warm_reset = snd_soc_ac97_warm_reset;
+ ops->reset = snd_soc_ac97_reset;
+
+ snd_ac97_rst_cfg = cfg;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset);
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index a9f82b5aba9d..07f43356f963 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -15,56 +15,6 @@
#include <linux/export.h>
#include <linux/slab.h>
-#include <trace/events/asoc.h>
-
-static bool snd_soc_set_cache_val(void *base, unsigned int idx,
- unsigned int val, unsigned int word_size)
-{
- switch (word_size) {
- case 1: {
- u8 *cache = base;
- if (cache[idx] == val)
- return true;
- cache[idx] = val;
- break;
- }
- case 2: {
- u16 *cache = base;
- if (cache[idx] == val)
- return true;
- cache[idx] = val;
- break;
- }
- default:
- WARN(1, "Invalid word_size %d\n", word_size);
- break;
- }
- return false;
-}
-
-static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
- unsigned int word_size)
-{
- if (!base)
- return -1;
-
- switch (word_size) {
- case 1: {
- const u8 *cache = base;
- return cache[idx];
- }
- case 2: {
- const u16 *cache = base;
- return cache[idx];
- }
- default:
- WARN(1, "Invalid word_size %d\n", word_size);
- break;
- }
- /* unreachable */
- return -1;
-}
-
int snd_soc_cache_init(struct snd_soc_codec *codec)
{
const struct snd_soc_codec_driver *codec_drv = codec->driver;
@@ -75,8 +25,6 @@ int snd_soc_cache_init(struct snd_soc_codec *codec)
if (!reg_size)
return 0;
- mutex_init(&codec->cache_rw_mutex);
-
dev_dbg(codec->dev, "ASoC: Initializing cache for %s codec\n",
codec->component.name);
@@ -103,100 +51,3 @@ int snd_soc_cache_exit(struct snd_soc_codec *codec)
codec->reg_cache = NULL;
return 0;
}
-
-/**
- * snd_soc_cache_read: Fetch the value of a given register from the cache.
- *
- * @codec: CODEC to configure.
- * @reg: The register index.
- * @value: The value to be returned.
- */
-int snd_soc_cache_read(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int *value)
-{
- if (!value)
- return -EINVAL;
-
- mutex_lock(&codec->cache_rw_mutex);
- if (!ZERO_OR_NULL_PTR(codec->reg_cache))
- *value = snd_soc_get_cache_val(codec->reg_cache, reg,
- codec->driver->reg_word_size);
- mutex_unlock(&codec->cache_rw_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_cache_read);
-
-/**
- * snd_soc_cache_write: Set the value of a given register in the cache.
- *
- * @codec: CODEC to configure.
- * @reg: The register index.
- * @value: The new register value.
- */
-int snd_soc_cache_write(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- mutex_lock(&codec->cache_rw_mutex);
- if (!ZERO_OR_NULL_PTR(codec->reg_cache))
- snd_soc_set_cache_val(codec->reg_cache, reg, value,
- codec->driver->reg_word_size);
- mutex_unlock(&codec->cache_rw_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_cache_write);
-
-static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
-{
- int i;
- int ret;
- const struct snd_soc_codec_driver *codec_drv;
- unsigned int val;
-
- codec_drv = codec->driver;
- for (i = 0; i < codec_drv->reg_cache_size; ++i) {
- ret = snd_soc_cache_read(codec, i, &val);
- if (ret)
- return ret;
- if (codec_drv->reg_cache_default)
- if (snd_soc_get_cache_val(codec_drv->reg_cache_default,
- i, codec_drv->reg_word_size) == val)
- continue;
-
- ret = snd_soc_write(codec, i, val);
- if (ret)
- return ret;
- dev_dbg(codec->dev, "ASoC: Synced register %#x, value = %#x\n",
- i, val);
- }
- return 0;
-}
-
-/**
- * snd_soc_cache_sync: Sync the register cache with the hardware.
- *
- * @codec: CODEC to configure.
- *
- * Any registers that should not be synced should be marked as
- * volatile. In general drivers can choose not to use the provided
- * syncing functionality if they so require.
- */
-int snd_soc_cache_sync(struct snd_soc_codec *codec)
-{
- const char *name = "flat";
- int ret;
-
- if (!codec->cache_sync)
- return 0;
-
- dev_dbg(codec->dev, "ASoC: Syncing cache for %s codec\n",
- codec->component.name);
- trace_snd_soc_cache_sync(codec, name, "start");
- ret = snd_soc_flat_cache_sync(codec);
- if (!ret)
- codec->cache_sync = 0;
- trace_snd_soc_cache_sync(codec, name, "end");
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index cecfab3cc948..590a82f01d0b 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -258,10 +258,7 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
- else
- dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
@@ -456,11 +453,7 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
if (ret < 0)
goto out;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
- else
- dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
-
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
out:
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b60ff56ebc0f..985052b3fbed 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -34,9 +34,6 @@
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <sound/ac97_codec.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/pcm.h>
@@ -69,16 +66,6 @@ static int pmdown_time = 5000;
module_param(pmdown_time, int, 0);
MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
-struct snd_ac97_reset_cfg {
- struct pinctrl *pctl;
- struct pinctrl_state *pstate_reset;
- struct pinctrl_state *pstate_warm_reset;
- struct pinctrl_state *pstate_run;
- int gpio_sdata;
- int gpio_sync;
- int gpio_reset;
-};
-
/* returns the minimum number of bytes needed to represent
* a particular given value */
static int min_bytes_needed(unsigned long val)
@@ -309,9 +296,6 @@ static void soc_init_codec_debugfs(struct snd_soc_component *component)
{
struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
- debugfs_create_bool("cache_sync", 0444, codec->component.debugfs_root,
- &codec->cache_sync);
-
codec->debugfs_reg = debugfs_create_file("codec_reg", 0644,
codec->component.debugfs_root,
codec, &codec_reg_fops);
@@ -499,40 +483,6 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
-#ifdef CONFIG_SND_SOC_AC97_BUS
-/* unregister ac97 codec */
-static int soc_ac97_dev_unregister(struct snd_soc_codec *codec)
-{
- if (codec->ac97->dev.bus)
- device_unregister(&codec->ac97->dev);
- return 0;
-}
-
-/* stop no dev release warning */
-static void soc_ac97_device_release(struct device *dev){}
-
-/* register ac97 codec to bus */
-static int soc_ac97_dev_register(struct snd_soc_codec *codec)
-{
- int err;
-
- codec->ac97->dev.bus = &ac97_bus_type;
- codec->ac97->dev.parent = codec->component.card->dev;
- codec->ac97->dev.release = soc_ac97_device_release;
-
- dev_set_name(&codec->ac97->dev, "%d-%d:%s",
- codec->component.card->snd_card->number, 0,
- codec->component.name);
- err = device_register(&codec->ac97->dev);
- if (err < 0) {
- dev_err(codec->dev, "ASoC: Can't register ac97 bus\n");
- codec->ac97->dev.bus = NULL;
- return err;
- }
- return 0;
-}
-#endif
-
static void codec2codec_close_delayed_work(struct work_struct *work)
{
/* Currently nothing to do for c2c links
@@ -592,17 +542,12 @@ int snd_soc_suspend(struct device *dev)
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
- struct snd_soc_platform *platform = card->rtd[i].platform;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
- if (cpu_dai->driver->suspend && !cpu_dai->driver->ac97_control)
+ if (cpu_dai->driver->suspend && !cpu_dai->driver->bus_control)
cpu_dai->driver->suspend(cpu_dai);
- if (platform->driver->suspend && !platform->suspended) {
- platform->driver->suspend(cpu_dai);
- platform->suspended = 1;
- }
}
/* close any waiting streams and save state */
@@ -629,8 +574,8 @@ int snd_soc_suspend(struct device *dev)
SND_SOC_DAPM_STREAM_SUSPEND);
}
- /* Recheck all analogue paths too */
- dapm_mark_io_dirty(&card->dapm);
+ /* Recheck all endpoints too, their state is affected by suspend */
+ dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
/* suspend all CODECs */
@@ -656,7 +601,6 @@ int snd_soc_suspend(struct device *dev)
if (codec->driver->suspend)
codec->driver->suspend(codec);
codec->suspended = 1;
- codec->cache_sync = 1;
if (codec->component.regmap)
regcache_mark_dirty(codec->component.regmap);
/* deactivate pins to sleep state */
@@ -676,7 +620,7 @@ int snd_soc_suspend(struct device *dev)
if (card->rtd[i].dai_link->ignore_suspend)
continue;
- if (cpu_dai->driver->suspend && cpu_dai->driver->ac97_control)
+ if (cpu_dai->driver->suspend && cpu_dai->driver->bus_control)
cpu_dai->driver->suspend(cpu_dai);
/* deactivate pins to sleep state */
@@ -712,14 +656,14 @@ static void soc_resume_deferred(struct work_struct *work)
if (card->resume_pre)
card->resume_pre(card);
- /* resume AC97 DAIs */
+ /* resume control bus DAIs */
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
- if (cpu_dai->driver->resume && cpu_dai->driver->ac97_control)
+ if (cpu_dai->driver->resume && cpu_dai->driver->bus_control)
cpu_dai->driver->resume(cpu_dai);
}
@@ -775,17 +719,12 @@ static void soc_resume_deferred(struct work_struct *work)
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
- struct snd_soc_platform *platform = card->rtd[i].platform;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
- if (cpu_dai->driver->resume && !cpu_dai->driver->ac97_control)
+ if (cpu_dai->driver->resume && !cpu_dai->driver->bus_control)
cpu_dai->driver->resume(cpu_dai);
- if (platform->driver->resume && platform->suspended) {
- platform->driver->resume(cpu_dai);
- platform->suspended = 0;
- }
}
if (card->resume_post)
@@ -796,8 +735,8 @@ static void soc_resume_deferred(struct work_struct *work)
/* userspace can access us now we are back as we were before */
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
- /* Recheck all analogue paths too */
- dapm_mark_io_dirty(&card->dapm);
+ /* Recheck all endpoints too, their state is affected by suspend */
+ dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
}
@@ -805,7 +744,8 @@ static void soc_resume_deferred(struct work_struct *work)
int snd_soc_resume(struct device *dev)
{
struct snd_soc_card *card = dev_get_drvdata(dev);
- int i, ac97_control = 0;
+ bool bus_control = false;
+ int i;
/* If the card is not initialized yet there is nothing to do */
if (!card->instantiated)
@@ -828,17 +768,18 @@ int snd_soc_resume(struct device *dev)
}
}
- /* AC97 devices might have other drivers hanging off them so
- * need to resume immediately. Other drivers don't have that
- * problem and may take a substantial amount of time to resume
+ /*
+ * DAIs that also act as the control bus master might have other drivers
+ * hanging off them so need to resume immediately. Other drivers don't
+ * have that problem and may take a substantial amount of time to resume
* due to I/O costs and anti-pop so handle them out of line.
*/
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
- ac97_control |= cpu_dai->driver->ac97_control;
+ bus_control |= cpu_dai->driver->bus_control;
}
- if (ac97_control) {
- dev_dbg(dev, "ASoC: Resuming AC97 immediately\n");
+ if (bus_control) {
+ dev_dbg(dev, "ASoC: Resuming control bus master immediately\n");
soc_resume_deferred(&card->deferred_resume_work);
} else {
dev_dbg(dev, "ASoC: Scheduling resume work\n");
@@ -1251,25 +1192,22 @@ static int soc_probe_link_components(struct snd_soc_card *card, int num,
return 0;
}
-static int soc_probe_codec_dai(struct snd_soc_card *card,
- struct snd_soc_dai *codec_dai,
- int order)
+static int soc_probe_dai(struct snd_soc_dai *dai, int order)
{
int ret;
- if (!codec_dai->probed && codec_dai->driver->probe_order == order) {
- if (codec_dai->driver->probe) {
- ret = codec_dai->driver->probe(codec_dai);
+ if (!dai->probed && dai->driver->probe_order == order) {
+ if (dai->driver->probe) {
+ ret = dai->driver->probe(dai);
if (ret < 0) {
- dev_err(codec_dai->dev,
- "ASoC: failed to probe CODEC DAI %s: %d\n",
- codec_dai->name, ret);
+ dev_err(dai->dev,
+ "ASoC: failed to probe DAI %s: %d\n",
+ dai->name, ret);
return ret;
}
}
- /* mark codec_dai as probed and add to card dai list */
- codec_dai->probed = 1;
+ dai->probed = 1;
}
return 0;
@@ -1319,40 +1257,22 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
{
struct snd_soc_dai_link *dai_link = &card->dai_link[num];
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
- struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int i, ret;
dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n",
card->name, num, order);
- /* config components */
- cpu_dai->platform = platform;
- cpu_dai->card = card;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->card = card;
-
/* set default power off timeout */
rtd->pmdown_time = pmdown_time;
- /* probe the cpu_dai */
- if (!cpu_dai->probed &&
- cpu_dai->driver->probe_order == order) {
- if (cpu_dai->driver->probe) {
- ret = cpu_dai->driver->probe(cpu_dai);
- if (ret < 0) {
- dev_err(cpu_dai->dev,
- "ASoC: failed to probe CPU DAI %s: %d\n",
- cpu_dai->name, ret);
- return ret;
- }
- }
- cpu_dai->probed = 1;
- }
+ ret = soc_probe_dai(cpu_dai, order);
+ if (ret)
+ return ret;
/* probe the CODEC DAI */
for (i = 0; i < rtd->num_codecs; i++) {
- ret = soc_probe_codec_dai(card, rtd->codec_dais[i], order);
+ ret = soc_probe_dai(rtd->codec_dais[i], order);
if (ret)
return ret;
}
@@ -1422,84 +1342,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
}
}
- /* add platform data for AC97 devices */
- for (i = 0; i < rtd->num_codecs; i++) {
- if (rtd->codec_dais[i]->driver->ac97_control)
- snd_ac97_dev_add_pdata(rtd->codec_dais[i]->codec->ac97,
- rtd->cpu_dai->ac97_pdata);
- }
-
return 0;
}
-#ifdef CONFIG_SND_SOC_AC97_BUS
-static int soc_register_ac97_codec(struct snd_soc_codec *codec,
- struct snd_soc_dai *codec_dai)
-{
- int ret;
-
- /* Only instantiate AC97 if not already done by the adaptor
- * for the generic AC97 subsystem.
- */
- if (codec_dai->driver->ac97_control && !codec->ac97_registered) {
- /*
- * It is possible that the AC97 device is already registered to
- * the device subsystem. This happens when the device is created
- * via snd_ac97_mixer(). Currently only SoC codec that does so
- * is the generic AC97 glue but others migh emerge.
- *
- * In those cases we don't try to register the device again.
- */
- if (!codec->ac97_created)
- return 0;
-
- ret = soc_ac97_dev_register(codec);
- if (ret < 0) {
- dev_err(codec->dev,
- "ASoC: AC97 device register failed: %d\n", ret);
- return ret;
- }
-
- codec->ac97_registered = 1;
- }
- return 0;
-}
-
-static void soc_unregister_ac97_codec(struct snd_soc_codec *codec)
-{
- if (codec->ac97_registered) {
- soc_ac97_dev_unregister(codec);
- codec->ac97_registered = 0;
- }
-}
-
-static int soc_register_ac97_dai_link(struct snd_soc_pcm_runtime *rtd)
-{
- int i, ret;
-
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
- ret = soc_register_ac97_codec(codec_dai->codec, codec_dai);
- if (ret) {
- while (--i >= 0)
- soc_unregister_ac97_codec(codec_dai->codec);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void soc_unregister_ac97_dai_link(struct snd_soc_pcm_runtime *rtd)
-{
- int i;
-
- for (i = 0; i < rtd->num_codecs; i++)
- soc_unregister_ac97_codec(rtd->codec_dais[i]->codec);
-}
-#endif
-
static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num];
@@ -1793,20 +1638,6 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
goto probe_aux_dev_err;
}
-#ifdef CONFIG_SND_SOC_AC97_BUS
- /* register any AC97 codecs */
- for (i = 0; i < card->num_rtd; i++) {
- ret = soc_register_ac97_dai_link(&card->rtd[i]);
- if (ret < 0) {
- dev_err(card->dev,
- "ASoC: failed to register AC97: %d\n", ret);
- while (--i >= 0)
- soc_unregister_ac97_dai_link(&card->rtd[i]);
- goto probe_aux_dev_err;
- }
- }
-#endif
-
card->instantiated = 1;
snd_soc_dapm_sync(&card->dapm);
mutex_unlock(&card->mutex);
@@ -1941,7 +1772,6 @@ EXPORT_SYMBOL_GPL(snd_soc_pm_ops);
static struct platform_driver soc_driver = {
.driver = {
.name = "soc-audio",
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = soc_probe,
@@ -1949,216 +1779,6 @@ static struct platform_driver soc_driver = {
};
/**
- * snd_soc_new_ac97_codec - initailise AC97 device
- * @codec: audio codec
- * @ops: AC97 bus operations
- * @num: AC97 codec number
- *
- * Initialises AC97 codec resources for use by ad-hoc devices only.
- */
-int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
- struct snd_ac97_bus_ops *ops, int num)
-{
- codec->ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
- if (codec->ac97 == NULL)
- return -ENOMEM;
-
- codec->ac97->bus = kzalloc(sizeof(struct snd_ac97_bus), GFP_KERNEL);
- if (codec->ac97->bus == NULL) {
- kfree(codec->ac97);
- codec->ac97 = NULL;
- return -ENOMEM;
- }
-
- codec->ac97->bus->ops = ops;
- codec->ac97->num = num;
-
- /*
- * Mark the AC97 device to be created by us. This way we ensure that the
- * device will be registered with the device subsystem later on.
- */
- codec->ac97_created = 1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
-
-static struct snd_ac97_reset_cfg snd_ac97_rst_cfg;
-
-static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97)
-{
- struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
-
- pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset);
-
- gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 1);
-
- udelay(10);
-
- gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
-
- pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
- msleep(2);
-}
-
-static void snd_soc_ac97_reset(struct snd_ac97 *ac97)
-{
- struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
-
- pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset);
-
- gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
- gpio_direction_output(snd_ac97_rst_cfg.gpio_sdata, 0);
- gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 0);
-
- udelay(10);
-
- gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 1);
-
- pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
- msleep(2);
-}
-
-static int snd_soc_ac97_parse_pinctl(struct device *dev,
- struct snd_ac97_reset_cfg *cfg)
-{
- struct pinctrl *p;
- struct pinctrl_state *state;
- int gpio;
- int ret;
-
- p = devm_pinctrl_get(dev);
- if (IS_ERR(p)) {
- dev_err(dev, "Failed to get pinctrl\n");
- return PTR_ERR(p);
- }
- cfg->pctl = p;
-
- state = pinctrl_lookup_state(p, "ac97-reset");
- if (IS_ERR(state)) {
- dev_err(dev, "Can't find pinctrl state ac97-reset\n");
- return PTR_ERR(state);
- }
- cfg->pstate_reset = state;
-
- state = pinctrl_lookup_state(p, "ac97-warm-reset");
- if (IS_ERR(state)) {
- dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n");
- return PTR_ERR(state);
- }
- cfg->pstate_warm_reset = state;
-
- state = pinctrl_lookup_state(p, "ac97-running");
- if (IS_ERR(state)) {
- dev_err(dev, "Can't find pinctrl state ac97-running\n");
- return PTR_ERR(state);
- }
- cfg->pstate_run = state;
-
- gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 0);
- if (gpio < 0) {
- dev_err(dev, "Can't find ac97-sync gpio\n");
- return gpio;
- }
- ret = devm_gpio_request(dev, gpio, "AC97 link sync");
- if (ret) {
- dev_err(dev, "Failed requesting ac97-sync gpio\n");
- return ret;
- }
- cfg->gpio_sync = gpio;
-
- gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 1);
- if (gpio < 0) {
- dev_err(dev, "Can't find ac97-sdata gpio %d\n", gpio);
- return gpio;
- }
- ret = devm_gpio_request(dev, gpio, "AC97 link sdata");
- if (ret) {
- dev_err(dev, "Failed requesting ac97-sdata gpio\n");
- return ret;
- }
- cfg->gpio_sdata = gpio;
-
- gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 2);
- if (gpio < 0) {
- dev_err(dev, "Can't find ac97-reset gpio\n");
- return gpio;
- }
- ret = devm_gpio_request(dev, gpio, "AC97 link reset");
- if (ret) {
- dev_err(dev, "Failed requesting ac97-reset gpio\n");
- return ret;
- }
- cfg->gpio_reset = gpio;
-
- return 0;
-}
-
-struct snd_ac97_bus_ops *soc_ac97_ops;
-EXPORT_SYMBOL_GPL(soc_ac97_ops);
-
-int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
-{
- if (ops == soc_ac97_ops)
- return 0;
-
- if (soc_ac97_ops && ops)
- return -EBUSY;
-
- soc_ac97_ops = ops;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops);
-
-/**
- * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions
- *
- * This function sets the reset and warm_reset properties of ops and parses
- * the device node of pdev to get pinctrl states and gpio numbers to use.
- */
-int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
- struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct snd_ac97_reset_cfg cfg;
- int ret;
-
- ret = snd_soc_ac97_parse_pinctl(dev, &cfg);
- if (ret)
- return ret;
-
- ret = snd_soc_set_ac97_ops(ops);
- if (ret)
- return ret;
-
- ops->warm_reset = snd_soc_ac97_warm_reset;
- ops->reset = snd_soc_ac97_reset;
-
- snd_ac97_rst_cfg = cfg;
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset);
-
-/**
- * snd_soc_free_ac97_codec - free AC97 codec device
- * @codec: audio codec
- *
- * Frees AC97 codec device resources.
- */
-void snd_soc_free_ac97_codec(struct snd_soc_codec *codec)
-{
-#ifdef CONFIG_SND_SOC_AC97_BUS
- soc_unregister_ac97_codec(codec);
-#endif
- kfree(codec->ac97->bus);
- kfree(codec->ac97);
- codec->ac97 = NULL;
- codec->ac97_created = 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
-
-/**
* snd_soc_cnew - create new control
* @_template: control template
* @data: control private data
@@ -2326,7 +1946,7 @@ EXPORT_SYMBOL_GPL(snd_soc_add_card_controls);
int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
const struct snd_kcontrol_new *controls, int num_controls)
{
- struct snd_card *card = dai->card->snd_card;
+ struct snd_card *card = dai->component->card->snd_card;
return snd_soc_add_controls(card, dai->dev, controls, num_controls,
NULL, dai);
@@ -2334,1020 +1954,6 @@ int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls);
/**
- * snd_soc_info_enum_double - enumerated double mixer info callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information about a double enumerated
- * mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = e->shift_l == e->shift_r ? 1 : 2;
- uinfo->value.enumerated.items = e->items;
-
- if (uinfo->value.enumerated.item >= e->items)
- uinfo->value.enumerated.item = e->items - 1;
- strlcpy(uinfo->value.enumerated.name,
- e->texts[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name));
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_enum_double);
-
-/**
- * snd_soc_get_enum_double - enumerated double mixer get callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to get the value of a double enumerated mixer.
- *
- * Returns 0 for success.
- */
-int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int val, item;
- unsigned int reg_val;
- int ret;
-
- ret = snd_soc_component_read(component, e->reg, &reg_val);
- if (ret)
- return ret;
- val = (reg_val >> e->shift_l) & e->mask;
- item = snd_soc_enum_val_to_item(e, val);
- ucontrol->value.enumerated.item[0] = item;
- if (e->shift_l != e->shift_r) {
- val = (reg_val >> e->shift_l) & e->mask;
- item = snd_soc_enum_val_to_item(e, val);
- ucontrol->value.enumerated.item[1] = item;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_get_enum_double);
-
-/**
- * snd_soc_put_enum_double - enumerated double mixer put callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to set the value of a double enumerated mixer.
- *
- * Returns 0 for success.
- */
-int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int *item = ucontrol->value.enumerated.item;
- unsigned int val;
- unsigned int mask;
-
- if (item[0] >= e->items)
- return -EINVAL;
- val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l;
- mask = e->mask << e->shift_l;
- if (e->shift_l != e->shift_r) {
- if (item[1] >= e->items)
- return -EINVAL;
- val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
- mask |= e->mask << e->shift_r;
- }
-
- return snd_soc_component_update_bits(component, e->reg, mask, val);
-}
-EXPORT_SYMBOL_GPL(snd_soc_put_enum_double);
-
-/**
- * snd_soc_read_signed - Read a codec register and interprete as signed value
- * @component: component
- * @reg: Register to read
- * @mask: Mask to use after shifting the register value
- * @shift: Right shift of register value
- * @sign_bit: Bit that describes if a number is negative or not.
- * @signed_val: Pointer to where the read value should be stored
- *
- * This functions reads a codec register. The register value is shifted right
- * by 'shift' bits and masked with the given 'mask'. Afterwards it translates
- * the given registervalue into a signed integer if sign_bit is non-zero.
- *
- * Returns 0 on sucess, otherwise an error value
- */
-static int snd_soc_read_signed(struct snd_soc_component *component,
- unsigned int reg, unsigned int mask, unsigned int shift,
- unsigned int sign_bit, int *signed_val)
-{
- int ret;
- unsigned int val;
-
- ret = snd_soc_component_read(component, reg, &val);
- if (ret < 0)
- return ret;
-
- val = (val >> shift) & mask;
-
- if (!sign_bit) {
- *signed_val = val;
- return 0;
- }
-
- /* non-negative number */
- if (!(val & BIT(sign_bit))) {
- *signed_val = val;
- return 0;
- }
-
- ret = val;
-
- /*
- * The register most probably does not contain a full-sized int.
- * Instead we have an arbitrary number of bits in a signed
- * representation which has to be translated into a full-sized int.
- * This is done by filling up all bits above the sign-bit.
- */
- ret |= ~((int)(BIT(sign_bit) - 1));
-
- *signed_val = ret;
-
- return 0;
-}
-
-/**
- * snd_soc_info_volsw - single mixer info callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information about a single mixer control, or a double
- * mixer control that spans 2 registers.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- int platform_max;
-
- if (!mc->platform_max)
- mc->platform_max = mc->max;
- platform_max = mc->platform_max;
-
- if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- else
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-
- uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = platform_max - mc->min;
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
-
-/**
- * snd_soc_get_volsw - single mixer get callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to get the value of a single mixer control, or a double mixer
- * control that spans 2 registers.
- *
- * Returns 0 for success.
- */
-int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int reg2 = mc->rreg;
- unsigned int shift = mc->shift;
- unsigned int rshift = mc->rshift;
- int max = mc->max;
- int min = mc->min;
- int sign_bit = mc->sign_bit;
- unsigned int mask = (1 << fls(max)) - 1;
- unsigned int invert = mc->invert;
- int val;
- int ret;
-
- if (sign_bit)
- mask = BIT(sign_bit + 1) - 1;
-
- ret = snd_soc_read_signed(component, reg, mask, shift, sign_bit, &val);
- if (ret)
- return ret;
-
- ucontrol->value.integer.value[0] = val - min;
- if (invert)
- ucontrol->value.integer.value[0] =
- max - ucontrol->value.integer.value[0];
-
- if (snd_soc_volsw_is_stereo(mc)) {
- if (reg == reg2)
- ret = snd_soc_read_signed(component, reg, mask, rshift,
- sign_bit, &val);
- else
- ret = snd_soc_read_signed(component, reg2, mask, shift,
- sign_bit, &val);
- if (ret)
- return ret;
-
- ucontrol->value.integer.value[1] = val - min;
- if (invert)
- ucontrol->value.integer.value[1] =
- max - ucontrol->value.integer.value[1];
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_get_volsw);
-
-/**
- * snd_soc_put_volsw - single mixer put callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to set the value of a single mixer control, or a double mixer
- * control that spans 2 registers.
- *
- * Returns 0 for success.
- */
-int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int reg2 = mc->rreg;
- unsigned int shift = mc->shift;
- unsigned int rshift = mc->rshift;
- int max = mc->max;
- int min = mc->min;
- unsigned int sign_bit = mc->sign_bit;
- unsigned int mask = (1 << fls(max)) - 1;
- unsigned int invert = mc->invert;
- int err;
- bool type_2r = false;
- unsigned int val2 = 0;
- unsigned int val, val_mask;
-
- if (sign_bit)
- mask = BIT(sign_bit + 1) - 1;
-
- val = ((ucontrol->value.integer.value[0] + min) & mask);
- if (invert)
- val = max - val;
- val_mask = mask << shift;
- val = val << shift;
- if (snd_soc_volsw_is_stereo(mc)) {
- val2 = ((ucontrol->value.integer.value[1] + min) & mask);
- if (invert)
- val2 = max - val2;
- if (reg == reg2) {
- val_mask |= mask << rshift;
- val |= val2 << rshift;
- } else {
- val2 = val2 << shift;
- type_2r = true;
- }
- }
- err = snd_soc_component_update_bits(component, reg, val_mask, val);
- if (err < 0)
- return err;
-
- if (type_2r)
- err = snd_soc_component_update_bits(component, reg2, val_mask,
- val2);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
-
-/**
- * snd_soc_get_volsw_sx - single mixer get callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to get the value of a single mixer control, or a double mixer
- * control that spans 2 registers.
- *
- * Returns 0 for success.
- */
-int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int reg2 = mc->rreg;
- unsigned int shift = mc->shift;
- unsigned int rshift = mc->rshift;
- int max = mc->max;
- int min = mc->min;
- int mask = (1 << (fls(min + max) - 1)) - 1;
- unsigned int val;
- int ret;
-
- ret = snd_soc_component_read(component, reg, &val);
- if (ret < 0)
- return ret;
-
- ucontrol->value.integer.value[0] = ((val >> shift) - min) & mask;
-
- if (snd_soc_volsw_is_stereo(mc)) {
- ret = snd_soc_component_read(component, reg2, &val);
- if (ret < 0)
- return ret;
-
- val = ((val >> rshift) - min) & mask;
- ucontrol->value.integer.value[1] = val;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx);
-
-/**
- * snd_soc_put_volsw_sx - double mixer set callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to set the value of a double mixer control that spans 2 registers.
- *
- * Returns 0 for success.
- */
-int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
-
- unsigned int reg = mc->reg;
- unsigned int reg2 = mc->rreg;
- unsigned int shift = mc->shift;
- unsigned int rshift = mc->rshift;
- int max = mc->max;
- int min = mc->min;
- int mask = (1 << (fls(min + max) - 1)) - 1;
- int err = 0;
- unsigned int val, val_mask, val2 = 0;
-
- val_mask = mask << shift;
- val = (ucontrol->value.integer.value[0] + min) & mask;
- val = val << shift;
-
- err = snd_soc_component_update_bits(component, reg, val_mask, val);
- if (err < 0)
- return err;
-
- if (snd_soc_volsw_is_stereo(mc)) {
- val_mask = mask << rshift;
- val2 = (ucontrol->value.integer.value[1] + min) & mask;
- val2 = val2 << rshift;
-
- err = snd_soc_component_update_bits(component, reg2, val_mask,
- val2);
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx);
-
-/**
- * snd_soc_info_volsw_s8 - signed mixer info callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information about a signed mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- int platform_max;
- int min = mc->min;
-
- if (!mc->platform_max)
- mc->platform_max = mc->max;
- platform_max = mc->platform_max;
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 2;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = platform_max - min;
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_volsw_s8);
-
-/**
- * snd_soc_get_volsw_s8 - signed mixer get callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to get the value of a signed mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- unsigned int reg = mc->reg;
- unsigned int val;
- int min = mc->min;
- int ret;
-
- ret = snd_soc_component_read(component, reg, &val);
- if (ret)
- return ret;
-
- ucontrol->value.integer.value[0] =
- ((signed char)(val & 0xff))-min;
- ucontrol->value.integer.value[1] =
- ((signed char)((val >> 8) & 0xff))-min;
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_get_volsw_s8);
-
-/**
- * snd_soc_put_volsw_sgn - signed mixer put callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to set the value of a signed mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- unsigned int reg = mc->reg;
- int min = mc->min;
- unsigned int val;
-
- val = (ucontrol->value.integer.value[0]+min) & 0xff;
- val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8;
-
- return snd_soc_component_update_bits(component, reg, 0xffff, val);
-}
-EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8);
-
-/**
- * snd_soc_info_volsw_range - single mixer info callback with range.
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information, within a range, about a single
- * mixer control.
- *
- * returns 0 for success.
- */
-int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- int platform_max;
- int min = mc->min;
-
- if (!mc->platform_max)
- mc->platform_max = mc->max;
- platform_max = mc->platform_max;
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = platform_max - min;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_volsw_range);
-
-/**
- * snd_soc_put_volsw_range - single mixer put value callback with range.
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to set the value, within a range, for a single mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- unsigned int reg = mc->reg;
- unsigned int rreg = mc->rreg;
- unsigned int shift = mc->shift;
- int min = mc->min;
- int max = mc->max;
- unsigned int mask = (1 << fls(max)) - 1;
- unsigned int invert = mc->invert;
- unsigned int val, val_mask;
- int ret;
-
- if (invert)
- val = (max - ucontrol->value.integer.value[0]) & mask;
- else
- val = ((ucontrol->value.integer.value[0] + min) & mask);
- val_mask = mask << shift;
- val = val << shift;
-
- ret = snd_soc_component_update_bits(component, reg, val_mask, val);
- if (ret < 0)
- return ret;
-
- if (snd_soc_volsw_is_stereo(mc)) {
- if (invert)
- val = (max - ucontrol->value.integer.value[1]) & mask;
- else
- val = ((ucontrol->value.integer.value[1] + min) & mask);
- val_mask = mask << shift;
- val = val << shift;
-
- ret = snd_soc_component_update_bits(component, rreg, val_mask,
- val);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range);
-
-/**
- * snd_soc_get_volsw_range - single mixer get callback with range
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to get the value, within a range, of a single mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int rreg = mc->rreg;
- unsigned int shift = mc->shift;
- int min = mc->min;
- int max = mc->max;
- unsigned int mask = (1 << fls(max)) - 1;
- unsigned int invert = mc->invert;
- unsigned int val;
- int ret;
-
- ret = snd_soc_component_read(component, reg, &val);
- if (ret)
- return ret;
-
- ucontrol->value.integer.value[0] = (val >> shift) & mask;
- if (invert)
- ucontrol->value.integer.value[0] =
- max - ucontrol->value.integer.value[0];
- else
- ucontrol->value.integer.value[0] =
- ucontrol->value.integer.value[0] - min;
-
- if (snd_soc_volsw_is_stereo(mc)) {
- ret = snd_soc_component_read(component, rreg, &val);
- if (ret)
- return ret;
-
- ucontrol->value.integer.value[1] = (val >> shift) & mask;
- if (invert)
- ucontrol->value.integer.value[1] =
- max - ucontrol->value.integer.value[1];
- else
- ucontrol->value.integer.value[1] =
- ucontrol->value.integer.value[1] - min;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
-
-/**
- * snd_soc_limit_volume - Set new limit to an existing volume control.
- *
- * @codec: where to look for the control
- * @name: Name of the control
- * @max: new maximum limit
- *
- * Return 0 for success, else error.
- */
-int snd_soc_limit_volume(struct snd_soc_codec *codec,
- const char *name, int max)
-{
- struct snd_card *card = codec->component.card->snd_card;
- struct snd_kcontrol *kctl;
- struct soc_mixer_control *mc;
- int found = 0;
- int ret = -EINVAL;
-
- /* Sanity check for name and max */
- if (unlikely(!name || max <= 0))
- return -EINVAL;
-
- list_for_each_entry(kctl, &card->controls, list) {
- if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) {
- found = 1;
- break;
- }
- }
- if (found) {
- mc = (struct soc_mixer_control *)kctl->private_value;
- if (max <= mc->max) {
- mc->platform_max = max;
- ret = 0;
- }
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_limit_volume);
-
-int snd_soc_bytes_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_bytes *params = (void *)kcontrol->private_value;
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
- uinfo->count = params->num_regs * component->val_bytes;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_bytes_info);
-
-int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_bytes *params = (void *)kcontrol->private_value;
- int ret;
-
- if (component->regmap)
- ret = regmap_raw_read(component->regmap, params->base,
- ucontrol->value.bytes.data,
- params->num_regs * component->val_bytes);
- else
- ret = -EINVAL;
-
- /* Hide any masked bytes to ensure consistent data reporting */
- if (ret == 0 && params->mask) {
- switch (component->val_bytes) {
- case 1:
- ucontrol->value.bytes.data[0] &= ~params->mask;
- break;
- case 2:
- ((u16 *)(&ucontrol->value.bytes.data))[0]
- &= cpu_to_be16(~params->mask);
- break;
- case 4:
- ((u32 *)(&ucontrol->value.bytes.data))[0]
- &= cpu_to_be32(~params->mask);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_bytes_get);
-
-int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_bytes *params = (void *)kcontrol->private_value;
- int ret, len;
- unsigned int val, mask;
- void *data;
-
- if (!component->regmap || !params->num_regs)
- return -EINVAL;
-
- len = params->num_regs * component->val_bytes;
-
- data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA);
- if (!data)
- return -ENOMEM;
-
- /*
- * If we've got a mask then we need to preserve the register
- * bits. We shouldn't modify the incoming data so take a
- * copy.
- */
- if (params->mask) {
- ret = regmap_read(component->regmap, params->base, &val);
- if (ret != 0)
- goto out;
-
- val &= params->mask;
-
- switch (component->val_bytes) {
- case 1:
- ((u8 *)data)[0] &= ~params->mask;
- ((u8 *)data)[0] |= val;
- break;
- case 2:
- mask = ~params->mask;
- ret = regmap_parse_val(component->regmap,
- &mask, &mask);
- if (ret != 0)
- goto out;
-
- ((u16 *)data)[0] &= mask;
-
- ret = regmap_parse_val(component->regmap,
- &val, &val);
- if (ret != 0)
- goto out;
-
- ((u16 *)data)[0] |= val;
- break;
- case 4:
- mask = ~params->mask;
- ret = regmap_parse_val(component->regmap,
- &mask, &mask);
- if (ret != 0)
- goto out;
-
- ((u32 *)data)[0] &= mask;
-
- ret = regmap_parse_val(component->regmap,
- &val, &val);
- if (ret != 0)
- goto out;
-
- ((u32 *)data)[0] |= val;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- }
-
- ret = regmap_raw_write(component->regmap, params->base,
- data, len);
-
-out:
- kfree(data);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_bytes_put);
-
-int snd_soc_bytes_info_ext(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *ucontrol)
-{
- struct soc_bytes_ext *params = (void *)kcontrol->private_value;
-
- ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
- ucontrol->count = params->max;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_bytes_info_ext);
-
-int snd_soc_bytes_tlv_callback(struct snd_kcontrol *kcontrol, int op_flag,
- unsigned int size, unsigned int __user *tlv)
-{
- struct soc_bytes_ext *params = (void *)kcontrol->private_value;
- unsigned int count = size < params->max ? size : params->max;
- int ret = -ENXIO;
-
- switch (op_flag) {
- case SNDRV_CTL_TLV_OP_READ:
- if (params->get)
- ret = params->get(tlv, count);
- break;
- case SNDRV_CTL_TLV_OP_WRITE:
- if (params->put)
- ret = params->put(tlv, count);
- break;
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_bytes_tlv_callback);
-
-/**
- * snd_soc_info_xr_sx - signed multi register info callback
- * @kcontrol: mreg control
- * @uinfo: control element information
- *
- * Callback to provide information of a control that can
- * span multiple codec registers which together
- * forms a single signed value in a MSB/LSB manner.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_xr_sx(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct soc_mreg_control *mc =
- (struct soc_mreg_control *)kcontrol->private_value;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
- uinfo->value.integer.min = mc->min;
- uinfo->value.integer.max = mc->max;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_xr_sx);
-
-/**
- * snd_soc_get_xr_sx - signed multi register get callback
- * @kcontrol: mreg control
- * @ucontrol: control element information
- *
- * Callback to get the value of a control that can span
- * multiple codec registers which together forms a single
- * signed value in a MSB/LSB manner. The control supports
- * specifying total no of bits used to allow for bitfields
- * across the multiple codec registers.
- *
- * Returns 0 for success.
- */
-int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mreg_control *mc =
- (struct soc_mreg_control *)kcontrol->private_value;
- unsigned int regbase = mc->regbase;
- unsigned int regcount = mc->regcount;
- unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
- unsigned int regwmask = (1<<regwshift)-1;
- unsigned int invert = mc->invert;
- unsigned long mask = (1UL<<mc->nbits)-1;
- long min = mc->min;
- long max = mc->max;
- long val = 0;
- unsigned int regval;
- unsigned int i;
- int ret;
-
- for (i = 0; i < regcount; i++) {
- ret = snd_soc_component_read(component, regbase+i, &regval);
- if (ret)
- return ret;
- val |= (regval & regwmask) << (regwshift*(regcount-i-1));
- }
- val &= mask;
- if (min < 0 && val > max)
- val |= ~mask;
- if (invert)
- val = max - val;
- ucontrol->value.integer.value[0] = val;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_get_xr_sx);
-
-/**
- * snd_soc_put_xr_sx - signed multi register get callback
- * @kcontrol: mreg control
- * @ucontrol: control element information
- *
- * Callback to set the value of a control that can span
- * multiple codec registers which together forms a single
- * signed value in a MSB/LSB manner. The control supports
- * specifying total no of bits used to allow for bitfields
- * across the multiple codec registers.
- *
- * Returns 0 for success.
- */
-int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mreg_control *mc =
- (struct soc_mreg_control *)kcontrol->private_value;
- unsigned int regbase = mc->regbase;
- unsigned int regcount = mc->regcount;
- unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
- unsigned int regwmask = (1<<regwshift)-1;
- unsigned int invert = mc->invert;
- unsigned long mask = (1UL<<mc->nbits)-1;
- long max = mc->max;
- long val = ucontrol->value.integer.value[0];
- unsigned int i, regval, regmask;
- int err;
-
- if (invert)
- val = max - val;
- val &= mask;
- for (i = 0; i < regcount; i++) {
- regval = (val >> (regwshift*(regcount-i-1))) & regwmask;
- regmask = (mask >> (regwshift*(regcount-i-1))) & regwmask;
- err = snd_soc_component_update_bits(component, regbase+i,
- regmask, regval);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
-
-/**
- * snd_soc_get_strobe - strobe get callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback get the value of a strobe mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_get_strobe(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int shift = mc->shift;
- unsigned int mask = 1 << shift;
- unsigned int invert = mc->invert != 0;
- unsigned int val;
- int ret;
-
- ret = snd_soc_component_read(component, reg, &val);
- if (ret)
- return ret;
-
- val &= mask;
-
- if (shift != 0 && val != 0)
- val = val >> shift;
- ucontrol->value.enumerated.item[0] = val ^ invert;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_get_strobe);
-
-/**
- * snd_soc_put_strobe - strobe put callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback strobe a register bit to high then low (or the inverse)
- * in one pass of a single mixer enum control.
- *
- * Returns 1 for success.
- */
-int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int shift = mc->shift;
- unsigned int mask = 1 << shift;
- unsigned int invert = mc->invert != 0;
- unsigned int strobe = ucontrol->value.enumerated.item[0] != 0;
- unsigned int val1 = (strobe ^ invert) ? mask : 0;
- unsigned int val2 = (strobe ^ invert) ? 0 : mask;
- int err;
-
- err = snd_soc_component_update_bits(component, reg, mask, val1);
- if (err < 0)
- return err;
-
- return snd_soc_component_update_bits(component, reg, mask, val2);
-}
-EXPORT_SYMBOL_GPL(snd_soc_put_strobe);
-
-/**
* snd_soc_dai_set_sysclk - configure DAI system or master clock.
* @dai: DAI
* @clk_id: DAI specific clock ID
@@ -3996,22 +2602,62 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
return 0;
}
-static void snd_soc_component_init_regmap(struct snd_soc_component *component)
+static void snd_soc_component_setup_regmap(struct snd_soc_component *component)
{
- if (!component->regmap)
- component->regmap = dev_get_regmap(component->dev, NULL);
- if (component->regmap) {
- int val_bytes = regmap_get_val_bytes(component->regmap);
- /* Errors are legitimate for non-integer byte multiples */
- if (val_bytes > 0)
- component->val_bytes = val_bytes;
- }
+ int val_bytes = regmap_get_val_bytes(component->regmap);
+
+ /* Errors are legitimate for non-integer byte multiples */
+ if (val_bytes > 0)
+ component->val_bytes = val_bytes;
+}
+
+#ifdef CONFIG_REGMAP
+
+/**
+ * snd_soc_component_init_regmap() - Initialize regmap instance for the component
+ * @component: The component for which to initialize the regmap instance
+ * @regmap: The regmap instance that should be used by the component
+ *
+ * This function allows deferred assignment of the regmap instance that is
+ * associated with the component. Only use this if the regmap instance is not
+ * yet ready when the component is registered. The function must also be called
+ * before the first IO attempt of the component.
+ */
+void snd_soc_component_init_regmap(struct snd_soc_component *component,
+ struct regmap *regmap)
+{
+ component->regmap = regmap;
+ snd_soc_component_setup_regmap(component);
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap);
+
+/**
+ * snd_soc_component_exit_regmap() - De-initialize regmap instance for the component
+ * @component: The component for which to de-initialize the regmap instance
+ *
+ * Calls regmap_exit() on the regmap instance associated to the component and
+ * removes the regmap instance from the component.
+ *
+ * This function should only be used if snd_soc_component_init_regmap() was used
+ * to initialize the regmap instance.
+ */
+void snd_soc_component_exit_regmap(struct snd_soc_component *component)
+{
+ regmap_exit(component->regmap);
+ component->regmap = NULL;
}
+EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap);
+
+#endif
static void snd_soc_component_add_unlocked(struct snd_soc_component *component)
{
- if (!component->write && !component->read)
- snd_soc_component_init_regmap(component);
+ if (!component->write && !component->read) {
+ if (!component->regmap)
+ component->regmap = dev_get_regmap(component->dev, NULL);
+ if (component->regmap)
+ snd_soc_component_setup_regmap(component);
+ }
list_add(&component->list, &component_list);
}
@@ -4362,7 +3008,6 @@ int snd_soc_register_codec(struct device *dev,
codec->dev = dev;
codec->driver = codec_drv;
codec->component.val_bytes = codec_drv->reg_word_size;
- mutex_init(&codec->mutex);
#ifdef CONFIG_DEBUG_FS
codec->component.init_debugfs = soc_init_codec_debugfs;
@@ -4585,7 +3230,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname)
{
struct device_node *np = card->dev->of_node;
- int num_routes;
+ int num_routes, old_routes;
struct snd_soc_dapm_route *routes;
int i, ret;
@@ -4603,7 +3248,9 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
- routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes),
+ old_routes = card->num_dapm_routes;
+ routes = devm_kzalloc(card->dev,
+ (old_routes + num_routes) * sizeof(*routes),
GFP_KERNEL);
if (!routes) {
dev_err(card->dev,
@@ -4611,9 +3258,11 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
+ memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes));
+
for (i = 0; i < num_routes; i++) {
ret = of_property_read_string_index(np, propname,
- 2 * i, &routes[i].sink);
+ 2 * i, &routes[old_routes + i].sink);
if (ret) {
dev_err(card->dev,
"ASoC: Property '%s' index %d could not be read: %d\n",
@@ -4621,7 +3270,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
ret = of_property_read_string_index(np, propname,
- (2 * i) + 1, &routes[i].source);
+ (2 * i) + 1, &routes[old_routes + i].source);
if (ret) {
dev_err(card->dev,
"ASoC: Property '%s' index %d could not be read: %d\n",
@@ -4630,7 +3279,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
}
}
- card->num_dapm_routes = num_routes;
+ card->num_dapm_routes += num_routes;
card->dapm_routes = routes;
return 0;
@@ -4750,36 +3399,30 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
}
EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt);
-int snd_soc_of_get_dai_name(struct device_node *of_node,
- const char **dai_name)
+static int snd_soc_get_dai_name(struct of_phandle_args *args,
+ const char **dai_name)
{
struct snd_soc_component *pos;
- struct of_phandle_args args;
- int ret;
-
- ret = of_parse_phandle_with_args(of_node, "sound-dai",
- "#sound-dai-cells", 0, &args);
- if (ret)
- return ret;
-
- ret = -EPROBE_DEFER;
+ int ret = -EPROBE_DEFER;
mutex_lock(&client_mutex);
list_for_each_entry(pos, &component_list, list) {
- if (pos->dev->of_node != args.np)
+ if (pos->dev->of_node != args->np)
continue;
if (pos->driver->of_xlate_dai_name) {
- ret = pos->driver->of_xlate_dai_name(pos, &args, dai_name);
+ ret = pos->driver->of_xlate_dai_name(pos,
+ args,
+ dai_name);
} else {
int id = -1;
- switch (args.args_count) {
+ switch (args->args_count) {
case 0:
id = 0; /* same as dai_drv[0] */
break;
case 1:
- id = args.args[0];
+ id = args->args[0];
break;
default:
/* not supported */
@@ -4801,6 +3444,21 @@ int snd_soc_of_get_dai_name(struct device_node *of_node,
break;
}
mutex_unlock(&client_mutex);
+ return ret;
+}
+
+int snd_soc_of_get_dai_name(struct device_node *of_node,
+ const char **dai_name)
+{
+ struct of_phandle_args args;
+ int ret;
+
+ ret = of_parse_phandle_with_args(of_node, "sound-dai",
+ "#sound-dai-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ ret = snd_soc_get_dai_name(&args, dai_name);
of_node_put(args.np);
@@ -4808,6 +3466,77 @@ int snd_soc_of_get_dai_name(struct device_node *of_node,
}
EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name);
+/*
+ * snd_soc_of_get_dai_link_codecs - Parse a list of CODECs in the devicetree
+ * @dev: Card device
+ * @of_node: Device node
+ * @dai_link: DAI link
+ *
+ * Builds an array of CODEC DAI components from the DAI link property
+ * 'sound-dai'.
+ * The array is set in the DAI link and the number of DAIs is set accordingly.
+ * The device nodes in the array (of_node) must be dereferenced by the caller.
+ *
+ * Returns 0 for success
+ */
+int snd_soc_of_get_dai_link_codecs(struct device *dev,
+ struct device_node *of_node,
+ struct snd_soc_dai_link *dai_link)
+{
+ struct of_phandle_args args;
+ struct snd_soc_dai_link_component *component;
+ char *name;
+ int index, num_codecs, ret;
+
+ /* Count the number of CODECs */
+ name = "sound-dai";
+ num_codecs = of_count_phandle_with_args(of_node, name,
+ "#sound-dai-cells");
+ if (num_codecs <= 0) {
+ if (num_codecs == -ENOENT)
+ dev_err(dev, "No 'sound-dai' property\n");
+ else
+ dev_err(dev, "Bad phandle in 'sound-dai'\n");
+ return num_codecs;
+ }
+ component = devm_kzalloc(dev,
+ sizeof *component * num_codecs,
+ GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+ dai_link->codecs = component;
+ dai_link->num_codecs = num_codecs;
+
+ /* Parse the list */
+ for (index = 0, component = dai_link->codecs;
+ index < dai_link->num_codecs;
+ index++, component++) {
+ ret = of_parse_phandle_with_args(of_node, name,
+ "#sound-dai-cells",
+ index, &args);
+ if (ret)
+ goto err;
+ component->of_node = args.np;
+ ret = snd_soc_get_dai_name(&args, &component->dai_name);
+ if (ret < 0)
+ goto err;
+ }
+ return 0;
+err:
+ for (index = 0, component = dai_link->codecs;
+ index < dai_link->num_codecs;
+ index++, component++) {
+ if (!component->of_node)
+ break;
+ of_node_put(component->of_node);
+ component->of_node = NULL;
+ }
+ dai_link->codecs = NULL;
+ dai_link->num_codecs = 0;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_codecs);
+
static int __init snd_soc_init(void)
{
#ifdef CONFIG_DEBUG_FS
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c61cb9cedbcd..c5136bb1f982 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -159,27 +159,135 @@ static void dapm_mark_dirty(struct snd_soc_dapm_widget *w, const char *reason)
}
}
-void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm)
+/*
+ * dapm_widget_invalidate_input_paths() - Invalidate the cached number of input
+ * paths
+ * @w: The widget for which to invalidate the cached number of input paths
+ *
+ * The function resets the cached number of inputs for the specified widget and
+ * all widgets that can be reached via outgoing paths from the widget.
+ *
+ * This function must be called if the number of input paths for a widget might
+ * have changed. E.g. if the source state of a widget changes or a path is added
+ * or activated with the widget as the sink.
+ */
+static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_widget *sink;
+ struct snd_soc_dapm_path *p;
+ LIST_HEAD(list);
+
+ dapm_assert_locked(w->dapm);
+
+ if (w->inputs == -1)
+ return;
+
+ w->inputs = -1;
+ list_add_tail(&w->work_list, &list);
+
+ list_for_each_entry(w, &list, work_list) {
+ list_for_each_entry(p, &w->sinks, list_source) {
+ if (p->is_supply || p->weak || !p->connect)
+ continue;
+ sink = p->sink;
+ if (sink->inputs != -1) {
+ sink->inputs = -1;
+ list_add_tail(&sink->work_list, &list);
+ }
+ }
+ }
+}
+
+/*
+ * dapm_widget_invalidate_output_paths() - Invalidate the cached number of
+ * output paths
+ * @w: The widget for which to invalidate the cached number of output paths
+ *
+ * Resets the cached number of outputs for the specified widget and all widgets
+ * that can be reached via incoming paths from the widget.
+ *
+ * This function must be called if the number of output paths for a widget might
+ * have changed. E.g. if the sink state of a widget changes or a path is added
+ * or activated with the widget as the source.
+ */
+static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_widget *source;
+ struct snd_soc_dapm_path *p;
+ LIST_HEAD(list);
+
+ dapm_assert_locked(w->dapm);
+
+ if (w->outputs == -1)
+ return;
+
+ w->outputs = -1;
+ list_add_tail(&w->work_list, &list);
+
+ list_for_each_entry(w, &list, work_list) {
+ list_for_each_entry(p, &w->sources, list_sink) {
+ if (p->is_supply || p->weak || !p->connect)
+ continue;
+ source = p->source;
+ if (source->outputs != -1) {
+ source->outputs = -1;
+ list_add_tail(&source->work_list, &list);
+ }
+ }
+ }
+}
+
+/*
+ * dapm_path_invalidate() - Invalidates the cached number of inputs and outputs
+ * for the widgets connected to a path
+ * @p: The path to invalidate
+ *
+ * Resets the cached number of inputs for the sink of the path and the cached
+ * number of outputs for the source of the path.
+ *
+ * This function must be called when a path is added, removed or the connected
+ * state changes.
+ */
+static void dapm_path_invalidate(struct snd_soc_dapm_path *p)
+{
+ /*
+ * Weak paths or supply paths do not influence the number of input or
+ * output paths of their neighbors.
+ */
+ if (p->weak || p->is_supply)
+ return;
+
+ /*
+ * The number of connected endpoints is the sum of the number of
+ * connected endpoints of all neighbors. If a node with 0 connected
+ * endpoints is either connected or disconnected that sum won't change,
+ * so there is no need to re-check the path.
+ */
+ if (p->source->inputs != 0)
+ dapm_widget_invalidate_input_paths(p->sink);
+ if (p->sink->outputs != 0)
+ dapm_widget_invalidate_output_paths(p->source);
+}
+
+void dapm_mark_endpoints_dirty(struct snd_soc_card *card)
{
- struct snd_soc_card *card = dapm->card;
struct snd_soc_dapm_widget *w;
mutex_lock(&card->dapm_mutex);
list_for_each_entry(w, &card->widgets, list) {
- switch (w->id) {
- case snd_soc_dapm_input:
- case snd_soc_dapm_output:
- dapm_mark_dirty(w, "Rechecking inputs and outputs");
- break;
- default:
- break;
+ if (w->is_sink || w->is_source) {
+ dapm_mark_dirty(w, "Rechecking endpoints");
+ if (w->is_sink)
+ dapm_widget_invalidate_output_paths(w);
+ if (w->is_source)
+ dapm_widget_invalidate_input_paths(w);
}
}
mutex_unlock(&card->dapm_mutex);
}
-EXPORT_SYMBOL_GPL(dapm_mark_io_dirty);
+EXPORT_SYMBOL_GPL(dapm_mark_endpoints_dirty);
/* create a new dapm widget */
static inline struct snd_soc_dapm_widget *dapm_cnew_widget(
@@ -386,8 +494,6 @@ static void dapm_reset(struct snd_soc_card *card)
list_for_each_entry(w, &card->widgets, list) {
w->new_power = w->power;
w->power_checked = false;
- w->inputs = -1;
- w->outputs = -1;
}
}
@@ -469,10 +575,9 @@ out:
/* connect mux widget to its interconnecting audio paths */
static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
- struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
- struct snd_soc_dapm_path *path, const char *control_name,
- const struct snd_kcontrol_new *kcontrol)
+ struct snd_soc_dapm_path *path, const char *control_name)
{
+ const struct snd_kcontrol_new *kcontrol = &path->sink->kcontrol_news[0];
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val, item;
int i;
@@ -493,10 +598,7 @@ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
for (i = 0; i < e->items; i++) {
if (!(strcmp(control_name, e->texts[i]))) {
- list_add(&path->list, &dapm->card->paths);
- list_add(&path->list_sink, &dest->sources);
- list_add(&path->list_source, &src->sinks);
- path->name = (char*)e->texts[i];
+ path->name = e->texts[i];
if (i == item)
path->connect = 1;
else
@@ -509,11 +611,10 @@ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
}
/* set up initial codec paths */
-static void dapm_set_mixer_path_status(struct snd_soc_dapm_widget *w,
- struct snd_soc_dapm_path *p, int i)
+static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
{
struct soc_mixer_control *mc = (struct soc_mixer_control *)
- w->kcontrol_news[i].private_value;
+ p->sink->kcontrol_news[i].private_value;
unsigned int reg = mc->reg;
unsigned int shift = mc->shift;
unsigned int max = mc->max;
@@ -522,7 +623,7 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_widget *w,
unsigned int val;
if (reg != SND_SOC_NOPM) {
- soc_dapm_read(w->dapm, reg, &val);
+ soc_dapm_read(p->sink->dapm, reg, &val);
val = (val >> shift) & mask;
if (invert)
val = max - val;
@@ -534,19 +635,15 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_widget *w,
/* connect mixer widget to its interconnecting audio paths */
static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
- struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
struct snd_soc_dapm_path *path, const char *control_name)
{
int i;
/* search for mixer kcontrol */
- for (i = 0; i < dest->num_kcontrols; i++) {
- if (!strcmp(control_name, dest->kcontrol_news[i].name)) {
- list_add(&path->list, &dapm->card->paths);
- list_add(&path->list_sink, &dest->sources);
- list_add(&path->list_source, &src->sinks);
- path->name = dest->kcontrol_news[i].name;
- dapm_set_mixer_path_status(dest, path, i);
+ for (i = 0; i < path->sink->num_kcontrols; i++) {
+ if (!strcmp(control_name, path->sink->kcontrol_news[i].name)) {
+ path->name = path->sink->kcontrol_news[i].name;
+ dapm_set_mixer_path_status(path, i);
return 0;
}
}
@@ -738,8 +835,10 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
if (ret < 0)
return ret;
- list_for_each_entry(path, &w->sources, list_sink)
- dapm_kcontrol_add_path(w->kcontrols[0], path);
+ list_for_each_entry(path, &w->sources, list_sink) {
+ if (path->name)
+ dapm_kcontrol_add_path(w->kcontrols[0], path);
+ }
return 0;
}
@@ -754,34 +853,6 @@ static int dapm_new_pga(struct snd_soc_dapm_widget *w)
return 0;
}
-/* reset 'walked' bit for each dapm path */
-static void dapm_clear_walk_output(struct snd_soc_dapm_context *dapm,
- struct list_head *sink)
-{
- struct snd_soc_dapm_path *p;
-
- list_for_each_entry(p, sink, list_source) {
- if (p->walked) {
- p->walked = 0;
- dapm_clear_walk_output(dapm, &p->sink->sinks);
- }
- }
-}
-
-static void dapm_clear_walk_input(struct snd_soc_dapm_context *dapm,
- struct list_head *source)
-{
- struct snd_soc_dapm_path *p;
-
- list_for_each_entry(p, source, list_sink) {
- if (p->walked) {
- p->walked = 0;
- dapm_clear_walk_input(dapm, &p->source->sources);
- }
- }
-}
-
-
/* We implement power down on suspend by checking the power state of
* the ALSA card - when we are suspending the ALSA state for the card
* is set to D3.
@@ -856,61 +927,23 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
DAPM_UPDATE_STAT(widget, path_checks);
- switch (widget->id) {
- case snd_soc_dapm_supply:
- case snd_soc_dapm_regulator_supply:
- case snd_soc_dapm_clock_supply:
- case snd_soc_dapm_kcontrol:
- return 0;
- default:
- break;
- }
-
- switch (widget->id) {
- case snd_soc_dapm_adc:
- case snd_soc_dapm_aif_out:
- case snd_soc_dapm_dai_out:
- if (widget->active) {
- widget->outputs = snd_soc_dapm_suspend_check(widget);
- return widget->outputs;
- }
- default:
- break;
- }
-
- if (widget->connected) {
- /* connected pin ? */
- if (widget->id == snd_soc_dapm_output && !widget->ext) {
- widget->outputs = snd_soc_dapm_suspend_check(widget);
- return widget->outputs;
- }
-
- /* connected jack or spk ? */
- if (widget->id == snd_soc_dapm_hp ||
- widget->id == snd_soc_dapm_spk ||
- (widget->id == snd_soc_dapm_line &&
- !list_empty(&widget->sources))) {
- widget->outputs = snd_soc_dapm_suspend_check(widget);
- return widget->outputs;
- }
+ if (widget->is_sink && widget->connected) {
+ widget->outputs = snd_soc_dapm_suspend_check(widget);
+ return widget->outputs;
}
list_for_each_entry(path, &widget->sinks, list_source) {
DAPM_UPDATE_STAT(widget, neighbour_checks);
- if (path->weak)
+ if (path->weak || path->is_supply)
continue;
if (path->walking)
return 1;
- if (path->walked)
- continue;
-
trace_snd_soc_dapm_output_path(widget, path);
- if (path->sink && path->connect) {
- path->walked = 1;
+ if (path->connect) {
path->walking = 1;
/* do we need to add this widget to the list ? */
@@ -952,73 +985,23 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
DAPM_UPDATE_STAT(widget, path_checks);
- switch (widget->id) {
- case snd_soc_dapm_supply:
- case snd_soc_dapm_regulator_supply:
- case snd_soc_dapm_clock_supply:
- case snd_soc_dapm_kcontrol:
- return 0;
- default:
- break;
- }
-
- /* active stream ? */
- switch (widget->id) {
- case snd_soc_dapm_dac:
- case snd_soc_dapm_aif_in:
- case snd_soc_dapm_dai_in:
- if (widget->active) {
- widget->inputs = snd_soc_dapm_suspend_check(widget);
- return widget->inputs;
- }
- default:
- break;
- }
-
- if (widget->connected) {
- /* connected pin ? */
- if (widget->id == snd_soc_dapm_input && !widget->ext) {
- widget->inputs = snd_soc_dapm_suspend_check(widget);
- return widget->inputs;
- }
-
- /* connected VMID/Bias for lower pops */
- if (widget->id == snd_soc_dapm_vmid) {
- widget->inputs = snd_soc_dapm_suspend_check(widget);
- return widget->inputs;
- }
-
- /* connected jack ? */
- if (widget->id == snd_soc_dapm_mic ||
- (widget->id == snd_soc_dapm_line &&
- !list_empty(&widget->sinks))) {
- widget->inputs = snd_soc_dapm_suspend_check(widget);
- return widget->inputs;
- }
-
- /* signal generator */
- if (widget->id == snd_soc_dapm_siggen) {
- widget->inputs = snd_soc_dapm_suspend_check(widget);
- return widget->inputs;
- }
+ if (widget->is_source && widget->connected) {
+ widget->inputs = snd_soc_dapm_suspend_check(widget);
+ return widget->inputs;
}
list_for_each_entry(path, &widget->sources, list_sink) {
DAPM_UPDATE_STAT(widget, neighbour_checks);
- if (path->weak)
+ if (path->weak || path->is_supply)
continue;
if (path->walking)
return 1;
- if (path->walked)
- continue;
-
trace_snd_soc_dapm_input_path(widget, path);
- if (path->source && path->connect) {
- path->walked = 1;
+ if (path->connect) {
path->walking = 1;
/* do we need to add this widget to the list ? */
@@ -1060,21 +1043,25 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list)
{
- struct snd_soc_card *card = dai->card;
+ struct snd_soc_card *card = dai->component->card;
+ struct snd_soc_dapm_widget *w;
int paths;
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- dapm_reset(card);
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /*
+ * For is_connected_{output,input}_ep fully discover the graph we need
+ * to reset the cached number of inputs and outputs.
+ */
+ list_for_each_entry(w, &card->widgets, list) {
+ w->inputs = -1;
+ w->outputs = -1;
+ }
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
paths = is_connected_output_ep(dai->playback_widget, list);
- dapm_clear_walk_output(&card->dapm,
- &dai->playback_widget->sinks);
- } else {
+ else
paths = is_connected_input_ep(dai->capture_widget, list);
- dapm_clear_walk_input(&card->dapm,
- &dai->capture_widget->sources);
- }
trace_snd_soc_dapm_connected(paths, stream);
mutex_unlock(&card->dapm_mutex);
@@ -1163,44 +1150,10 @@ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w)
DAPM_UPDATE_STAT(w, power_checks);
in = is_connected_input_ep(w, NULL);
- dapm_clear_walk_input(w->dapm, &w->sources);
out = is_connected_output_ep(w, NULL);
- dapm_clear_walk_output(w->dapm, &w->sinks);
return out != 0 && in != 0;
}
-/* Check to see if an ADC has power */
-static int dapm_adc_check_power(struct snd_soc_dapm_widget *w)
-{
- int in;
-
- DAPM_UPDATE_STAT(w, power_checks);
-
- if (w->active) {
- in = is_connected_input_ep(w, NULL);
- dapm_clear_walk_input(w->dapm, &w->sources);
- return in != 0;
- } else {
- return dapm_generic_check_power(w);
- }
-}
-
-/* Check to see if a DAC has power */
-static int dapm_dac_check_power(struct snd_soc_dapm_widget *w)
-{
- int out;
-
- DAPM_UPDATE_STAT(w, power_checks);
-
- if (w->active) {
- out = is_connected_output_ep(w, NULL);
- dapm_clear_walk_output(w->dapm, &w->sinks);
- return out != 0;
- } else {
- return dapm_generic_check_power(w);
- }
-}
-
/* Check to see if a power supply is needed */
static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
{
@@ -1219,9 +1172,6 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
!path->connected(path->source, path->sink))
continue;
- if (!path->sink)
- continue;
-
if (dapm_widget_power_check(path->sink))
return 1;
}
@@ -1636,27 +1586,14 @@ static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power,
/* If we changed our power state perhaps our neigbours changed
* also.
*/
- list_for_each_entry(path, &w->sources, list_sink) {
- if (path->source) {
- dapm_widget_set_peer_power(path->source, power,
+ list_for_each_entry(path, &w->sources, list_sink)
+ dapm_widget_set_peer_power(path->source, power, path->connect);
+
+ /* Supplies can't affect their outputs, only their inputs */
+ if (!w->is_supply) {
+ list_for_each_entry(path, &w->sinks, list_source)
+ dapm_widget_set_peer_power(path->sink, power,
path->connect);
- }
- }
- switch (w->id) {
- case snd_soc_dapm_supply:
- case snd_soc_dapm_regulator_supply:
- case snd_soc_dapm_clock_supply:
- case snd_soc_dapm_kcontrol:
- /* Supplies can't affect their outputs, only their inputs */
- break;
- default:
- list_for_each_entry(path, &w->sinks, list_source) {
- if (path->sink) {
- dapm_widget_set_peer_power(path->sink, power,
- path->connect);
- }
- }
- break;
}
if (power)
@@ -1863,10 +1800,14 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (!buf)
return -ENOMEM;
- in = is_connected_input_ep(w, NULL);
- dapm_clear_walk_input(w->dapm, &w->sources);
- out = is_connected_output_ep(w, NULL);
- dapm_clear_walk_output(w->dapm, &w->sinks);
+ /* Supply widgets are not handled by is_connected_{input,output}_ep() */
+ if (w->is_supply) {
+ in = 0;
+ out = 0;
+ } else {
+ in = is_connected_input_ep(w, NULL);
+ out = is_connected_output_ep(w, NULL);
+ }
ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
w->name, w->power ? "On" : "Off",
@@ -2011,32 +1952,45 @@ static inline void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
#endif
+/*
+ * soc_dapm_connect_path() - Connects or disconnects a path
+ * @path: The path to update
+ * @connect: The new connect state of the path. True if the path is connected,
+ * false if it is disconneted.
+ * @reason: The reason why the path changed (for debugging only)
+ */
+static void soc_dapm_connect_path(struct snd_soc_dapm_path *path,
+ bool connect, const char *reason)
+{
+ if (path->connect == connect)
+ return;
+
+ path->connect = connect;
+ dapm_mark_dirty(path->source, reason);
+ dapm_mark_dirty(path->sink, reason);
+ dapm_path_invalidate(path);
+}
+
/* test and update the power status of a mux widget */
static int soc_dapm_mux_update_power(struct snd_soc_card *card,
struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e)
{
struct snd_soc_dapm_path *path;
int found = 0;
+ bool connect;
lockdep_assert_held(&card->dapm_mutex);
/* find dapm widget path assoc with kcontrol */
dapm_kcontrol_for_each_path(path, kcontrol) {
- if (!path->name || !e->texts[mux])
- continue;
-
found = 1;
/* we now need to match the string in the enum to the path */
- if (!(strcmp(path->name, e->texts[mux]))) {
- path->connect = 1; /* new connection */
- dapm_mark_dirty(path->source, "mux connection");
- } else {
- if (path->connect)
- dapm_mark_dirty(path->source,
- "mux disconnection");
- path->connect = 0; /* old connection must be powered down */
- }
- dapm_mark_dirty(path->sink, "mux change");
+ if (!(strcmp(path->name, e->texts[mux])))
+ connect = true;
+ else
+ connect = false;
+
+ soc_dapm_connect_path(path, connect, "mux update");
}
if (found)
@@ -2075,9 +2029,7 @@ static int soc_dapm_mixer_update_power(struct snd_soc_card *card,
/* find dapm widget path assoc with kcontrol */
dapm_kcontrol_for_each_path(path, kcontrol) {
found = 1;
- path->connect = connect;
- dapm_mark_dirty(path->source, "mixer connection");
- dapm_mark_dirty(path->sink, "mixer update");
+ soc_dapm_connect_path(path, connect, "mixer update");
}
if (found)
@@ -2255,8 +2207,11 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
return -EINVAL;
}
- if (w->connected != status)
+ if (w->connected != status) {
dapm_mark_dirty(w, "pin configuration");
+ dapm_widget_invalidate_input_paths(w);
+ dapm_widget_invalidate_output_paths(w);
+ }
w->connected = status;
if (status == 0)
@@ -2309,6 +2264,53 @@ int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm)
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
+/*
+ * dapm_update_widget_flags() - Re-compute widget sink and source flags
+ * @w: The widget for which to update the flags
+ *
+ * Some widgets have a dynamic category which depends on which neighbors they
+ * are connected to. This function update the category for these widgets.
+ *
+ * This function must be called whenever a path is added or removed to a widget.
+ */
+static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_path *p;
+
+ switch (w->id) {
+ case snd_soc_dapm_input:
+ w->is_source = 1;
+ list_for_each_entry(p, &w->sources, list_sink) {
+ if (p->source->id == snd_soc_dapm_micbias ||
+ p->source->id == snd_soc_dapm_mic ||
+ p->source->id == snd_soc_dapm_line ||
+ p->source->id == snd_soc_dapm_output) {
+ w->is_source = 0;
+ break;
+ }
+ }
+ break;
+ case snd_soc_dapm_output:
+ w->is_sink = 1;
+ list_for_each_entry(p, &w->sinks, list_source) {
+ if (p->sink->id == snd_soc_dapm_spk ||
+ p->sink->id == snd_soc_dapm_hp ||
+ p->sink->id == snd_soc_dapm_line ||
+ p->sink->id == snd_soc_dapm_input) {
+ w->is_sink = 0;
+ break;
+ }
+ }
+ break;
+ case snd_soc_dapm_line:
+ w->is_sink = !list_empty(&w->sources);
+ w->is_source = !list_empty(&w->sinks);
+ break;
+ default:
+ break;
+ }
+}
+
static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
const char *control,
@@ -2318,6 +2320,27 @@ static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
struct snd_soc_dapm_path *path;
int ret;
+ if (wsink->is_supply && !wsource->is_supply) {
+ dev_err(dapm->dev,
+ "Connecting non-supply widget to supply widget is not supported (%s -> %s)\n",
+ wsource->name, wsink->name);
+ return -EINVAL;
+ }
+
+ if (connected && !wsource->is_supply) {
+ dev_err(dapm->dev,
+ "connected() callback only supported for supply widgets (%s -> %s)\n",
+ wsource->name, wsink->name);
+ return -EINVAL;
+ }
+
+ if (wsource->is_supply && control) {
+ dev_err(dapm->dev,
+ "Conditional paths are not supported for supply widgets (%s -> [%s] -> %s)\n",
+ wsource->name, control, wsink->name);
+ return -EINVAL;
+ }
+
path = kzalloc(sizeof(struct snd_soc_dapm_path), GFP_KERNEL);
if (!path)
return -ENOMEM;
@@ -2330,85 +2353,49 @@ static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
INIT_LIST_HEAD(&path->list_source);
INIT_LIST_HEAD(&path->list_sink);
- /* check for external widgets */
- if (wsink->id == snd_soc_dapm_input) {
- if (wsource->id == snd_soc_dapm_micbias ||
- wsource->id == snd_soc_dapm_mic ||
- wsource->id == snd_soc_dapm_line ||
- wsource->id == snd_soc_dapm_output)
- wsink->ext = 1;
- }
- if (wsource->id == snd_soc_dapm_output) {
- if (wsink->id == snd_soc_dapm_spk ||
- wsink->id == snd_soc_dapm_hp ||
- wsink->id == snd_soc_dapm_line ||
- wsink->id == snd_soc_dapm_input)
- wsource->ext = 1;
- }
-
- dapm_mark_dirty(wsource, "Route added");
- dapm_mark_dirty(wsink, "Route added");
+ if (wsource->is_supply || wsink->is_supply)
+ path->is_supply = 1;
/* connect static paths */
if (control == NULL) {
- list_add(&path->list, &dapm->card->paths);
- list_add(&path->list_sink, &wsink->sources);
- list_add(&path->list_source, &wsource->sinks);
path->connect = 1;
- return 0;
- }
-
- /* connect dynamic paths */
- switch (wsink->id) {
- case snd_soc_dapm_adc:
- case snd_soc_dapm_dac:
- case snd_soc_dapm_pga:
- case snd_soc_dapm_out_drv:
- case snd_soc_dapm_input:
- case snd_soc_dapm_output:
- case snd_soc_dapm_siggen:
- case snd_soc_dapm_micbias:
- case snd_soc_dapm_vmid:
- case snd_soc_dapm_pre:
- case snd_soc_dapm_post:
- case snd_soc_dapm_supply:
- case snd_soc_dapm_regulator_supply:
- case snd_soc_dapm_clock_supply:
- case snd_soc_dapm_aif_in:
- case snd_soc_dapm_aif_out:
- case snd_soc_dapm_dai_in:
- case snd_soc_dapm_dai_out:
- case snd_soc_dapm_dai_link:
- case snd_soc_dapm_kcontrol:
- list_add(&path->list, &dapm->card->paths);
- list_add(&path->list_sink, &wsink->sources);
- list_add(&path->list_source, &wsource->sinks);
- path->connect = 1;
- return 0;
- case snd_soc_dapm_mux:
- ret = dapm_connect_mux(dapm, wsource, wsink, path, control,
- &wsink->kcontrol_news[0]);
- if (ret != 0)
- goto err;
- break;
- case snd_soc_dapm_switch:
- case snd_soc_dapm_mixer:
- case snd_soc_dapm_mixer_named_ctl:
- ret = dapm_connect_mixer(dapm, wsource, wsink, path, control);
- if (ret != 0)
+ } else {
+ /* connect dynamic paths */
+ switch (wsink->id) {
+ case snd_soc_dapm_mux:
+ ret = dapm_connect_mux(dapm, path, control);
+ if (ret != 0)
+ goto err;
+ break;
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_mixer:
+ case snd_soc_dapm_mixer_named_ctl:
+ ret = dapm_connect_mixer(dapm, path, control);
+ if (ret != 0)
+ goto err;
+ break;
+ default:
+ dev_err(dapm->dev,
+ "Control not supported for path %s -> [%s] -> %s\n",
+ wsource->name, control, wsink->name);
+ ret = -EINVAL;
goto err;
- break;
- case snd_soc_dapm_hp:
- case snd_soc_dapm_mic:
- case snd_soc_dapm_line:
- case snd_soc_dapm_spk:
- list_add(&path->list, &dapm->card->paths);
- list_add(&path->list_sink, &wsink->sources);
- list_add(&path->list_source, &wsource->sinks);
- path->connect = 0;
- return 0;
+ }
}
+ list_add(&path->list, &dapm->card->paths);
+ list_add(&path->list_sink, &wsink->sources);
+ list_add(&path->list_source, &wsource->sinks);
+
+ dapm_update_widget_flags(wsource);
+ dapm_update_widget_flags(wsink);
+
+ dapm_mark_dirty(wsource, "Route added");
+ dapm_mark_dirty(wsink, "Route added");
+
+ if (dapm->card->instantiated && path->connect)
+ dapm_path_invalidate(path);
+
return 0;
err:
kfree(path);
@@ -2489,6 +2476,7 @@ err:
static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route)
{
+ struct snd_soc_dapm_widget *wsource, *wsink;
struct snd_soc_dapm_path *path, *p;
const char *sink;
const char *source;
@@ -2526,10 +2514,19 @@ static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm,
}
if (path) {
- dapm_mark_dirty(path->source, "Route removed");
- dapm_mark_dirty(path->sink, "Route removed");
+ wsource = path->source;
+ wsink = path->sink;
+
+ dapm_mark_dirty(wsource, "Route removed");
+ dapm_mark_dirty(wsink, "Route removed");
+ if (path->connect)
+ dapm_path_invalidate(path);
dapm_free_path(path);
+
+ /* Update any path related flags */
+ dapm_update_widget_flags(wsource);
+ dapm_update_widget_flags(wsink);
} else {
dev_warn(dapm->dev, "ASoC: Route %s->%s does not exist\n",
source, sink);
@@ -3087,40 +3084,44 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
}
switch (w->id) {
- case snd_soc_dapm_switch:
- case snd_soc_dapm_mixer:
- case snd_soc_dapm_mixer_named_ctl:
+ case snd_soc_dapm_mic:
+ case snd_soc_dapm_input:
+ w->is_source = 1;
w->power_check = dapm_generic_check_power;
break;
- case snd_soc_dapm_mux:
+ case snd_soc_dapm_spk:
+ case snd_soc_dapm_hp:
+ case snd_soc_dapm_output:
+ w->is_sink = 1;
w->power_check = dapm_generic_check_power;
break;
- case snd_soc_dapm_dai_out:
- w->power_check = dapm_adc_check_power;
- break;
- case snd_soc_dapm_dai_in:
- w->power_check = dapm_dac_check_power;
+ case snd_soc_dapm_vmid:
+ case snd_soc_dapm_siggen:
+ w->is_source = 1;
+ w->power_check = dapm_always_on_check_power;
break;
+ case snd_soc_dapm_mux:
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_mixer:
+ case snd_soc_dapm_mixer_named_ctl:
case snd_soc_dapm_adc:
case snd_soc_dapm_aif_out:
case snd_soc_dapm_dac:
case snd_soc_dapm_aif_in:
case snd_soc_dapm_pga:
case snd_soc_dapm_out_drv:
- case snd_soc_dapm_input:
- case snd_soc_dapm_output:
case snd_soc_dapm_micbias:
- case snd_soc_dapm_spk:
- case snd_soc_dapm_hp:
- case snd_soc_dapm_mic:
case snd_soc_dapm_line:
case snd_soc_dapm_dai_link:
+ case snd_soc_dapm_dai_out:
+ case snd_soc_dapm_dai_in:
w->power_check = dapm_generic_check_power;
break;
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
case snd_soc_dapm_clock_supply:
case snd_soc_dapm_kcontrol:
+ w->is_supply = 1;
w->power_check = dapm_supply_check_power;
break;
default:
@@ -3137,6 +3138,9 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
INIT_LIST_HEAD(&w->dirty);
list_add(&w->list, &dapm->card->widgets);
+ w->inputs = -1;
+ w->outputs = -1;
+
/* machine layer set ups unconnected pins and insertions */
w->connected = 1;
return w;
@@ -3484,6 +3488,14 @@ static void soc_dapm_dai_stream_event(struct snd_soc_dai *dai, int stream,
case SND_SOC_DAPM_STREAM_PAUSE_RELEASE:
break;
}
+
+ if (w->id == snd_soc_dapm_dai_in) {
+ w->is_source = w->active;
+ dapm_widget_invalidate_input_paths(w);
+ } else {
+ w->is_sink = w->active;
+ dapm_widget_invalidate_output_paths(w);
+ }
}
}
@@ -3610,7 +3622,15 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
}
dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin);
- w->connected = 1;
+ if (!w->connected) {
+ /*
+ * w->force does not affect the number of input or output paths,
+ * so we only have to recheck if w->connected is changed
+ */
+ dapm_widget_invalidate_input_paths(w);
+ dapm_widget_invalidate_output_paths(w);
+ w->connected = 1;
+ }
w->force = 1;
dapm_mark_dirty(w, "force enable");
@@ -3788,35 +3808,54 @@ int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
+/**
+ * dapm_is_external_path() - Checks if a path is a external path
+ * @card: The card the path belongs to
+ * @path: The path to check
+ *
+ * Returns true if the path is either between two different DAPM contexts or
+ * between two external pins of the same DAPM context. Otherwise returns
+ * false.
+ */
+static bool dapm_is_external_path(struct snd_soc_card *card,
+ struct snd_soc_dapm_path *path)
+{
+ dev_dbg(card->dev,
+ "... Path %s(id:%d dapm:%p) - %s(id:%d dapm:%p)\n",
+ path->source->name, path->source->id, path->source->dapm,
+ path->sink->name, path->sink->id, path->sink->dapm);
+
+ /* Connection between two different DAPM contexts */
+ if (path->source->dapm != path->sink->dapm)
+ return true;
+
+ /* Loopback connection from external pin to external pin */
+ if (path->sink->id == snd_soc_dapm_input) {
+ switch (path->source->id) {
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_micbias:
+ return true;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
static bool snd_soc_dapm_widget_in_card_paths(struct snd_soc_card *card,
struct snd_soc_dapm_widget *w)
{
struct snd_soc_dapm_path *p;
- list_for_each_entry(p, &card->paths, list) {
- if ((p->source == w) || (p->sink == w)) {
- dev_dbg(card->dev,
- "... Path %s(id:%d dapm:%p) - %s(id:%d dapm:%p)\n",
- p->source->name, p->source->id, p->source->dapm,
- p->sink->name, p->sink->id, p->sink->dapm);
+ list_for_each_entry(p, &w->sources, list_sink) {
+ if (dapm_is_external_path(card, p))
+ return true;
+ }
- /* Connected to something other than the codec */
- if (p->source->dapm != p->sink->dapm)
- return true;
- /*
- * Loopback connection from codec external pin to
- * codec external pin
- */
- if (p->sink->id == snd_soc_dapm_input) {
- switch (p->source->id) {
- case snd_soc_dapm_output:
- case snd_soc_dapm_micbias:
- return true;
- default:
- break;
- }
- }
- }
+ list_for_each_entry(p, &w->sinks, list_source) {
+ if (dapm_is_external_path(card, p))
+ return true;
}
return false;
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index ab47fea997a3..4380dcc064a5 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -116,7 +116,7 @@ EXPORT_SYMBOL_GPL(snd_soc_jack_report);
*
* @jack: ASoC jack
* @count: Number of zones
- * @zone: Array of zones
+ * @zones: Array of zones
*
* After this function has been called the zones specified in the
* array will be associated with the jack.
@@ -309,7 +309,7 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
/* GPIO descriptor */
gpios[i].desc = gpiod_get_index(gpios[i].gpiod_dev,
gpios[i].name,
- gpios[i].idx);
+ gpios[i].idx, GPIOD_IN);
if (IS_ERR(gpios[i].desc)) {
ret = PTR_ERR(gpios[i].desc);
dev_err(gpios[i].gpiod_dev,
@@ -327,17 +327,14 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
goto undo;
}
- ret = gpio_request(gpios[i].gpio, gpios[i].name);
+ ret = gpio_request_one(gpios[i].gpio, GPIOF_IN,
+ gpios[i].name);
if (ret)
goto undo;
gpios[i].desc = gpio_to_desc(gpios[i].gpio);
}
- ret = gpiod_direction_input(gpios[i].desc);
- if (ret)
- goto err;
-
INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
gpios[i].jack = jack;
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
new file mode 100644
index 000000000000..100d92b5b77e
--- /dev/null
+++ b/sound/soc/soc-ops.c
@@ -0,0 +1,952 @@
+/*
+ * soc-ops.c -- Generic ASoC operations
+ *
+ * Copyright 2005 Wolfson Microelectronics PLC.
+ * Copyright 2005 Openedhand Ltd.
+ * Copyright (C) 2010 Slimlogic Ltd.
+ * Copyright (C) 2010 Texas Instruments Inc.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ * with code, comments and ideas from :-
+ * Richard Purdie <richard@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dpcm.h>
+#include <sound/initval.h>
+
+/**
+ * snd_soc_info_enum_double - enumerated double mixer info callback
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information about a double enumerated
+ * mixer control.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+ return snd_ctl_enum_info(uinfo, e->shift_l == e->shift_r ? 1 : 2,
+ e->items, e->texts);
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_enum_double);
+
+/**
+ * snd_soc_get_enum_double - enumerated double mixer get callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to get the value of a double enumerated mixer.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int val, item;
+ unsigned int reg_val;
+ int ret;
+
+ ret = snd_soc_component_read(component, e->reg, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val >> e->shift_l) & e->mask;
+ item = snd_soc_enum_val_to_item(e, val);
+ ucontrol->value.enumerated.item[0] = item;
+ if (e->shift_l != e->shift_r) {
+ val = (reg_val >> e->shift_l) & e->mask;
+ item = snd_soc_enum_val_to_item(e, val);
+ ucontrol->value.enumerated.item[1] = item;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_enum_double);
+
+/**
+ * snd_soc_put_enum_double - enumerated double mixer put callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to set the value of a double enumerated mixer.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int *item = ucontrol->value.enumerated.item;
+ unsigned int val;
+ unsigned int mask;
+
+ if (item[0] >= e->items)
+ return -EINVAL;
+ val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l;
+ mask = e->mask << e->shift_l;
+ if (e->shift_l != e->shift_r) {
+ if (item[1] >= e->items)
+ return -EINVAL;
+ val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
+ mask |= e->mask << e->shift_r;
+ }
+
+ return snd_soc_component_update_bits(component, e->reg, mask, val);
+}
+EXPORT_SYMBOL_GPL(snd_soc_put_enum_double);
+
+/**
+ * snd_soc_read_signed - Read a codec register and interprete as signed value
+ * @component: component
+ * @reg: Register to read
+ * @mask: Mask to use after shifting the register value
+ * @shift: Right shift of register value
+ * @sign_bit: Bit that describes if a number is negative or not.
+ * @signed_val: Pointer to where the read value should be stored
+ *
+ * This functions reads a codec register. The register value is shifted right
+ * by 'shift' bits and masked with the given 'mask'. Afterwards it translates
+ * the given registervalue into a signed integer if sign_bit is non-zero.
+ *
+ * Returns 0 on sucess, otherwise an error value
+ */
+static int snd_soc_read_signed(struct snd_soc_component *component,
+ unsigned int reg, unsigned int mask, unsigned int shift,
+ unsigned int sign_bit, int *signed_val)
+{
+ int ret;
+ unsigned int val;
+
+ ret = snd_soc_component_read(component, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ val = (val >> shift) & mask;
+
+ if (!sign_bit) {
+ *signed_val = val;
+ return 0;
+ }
+
+ /* non-negative number */
+ if (!(val & BIT(sign_bit))) {
+ *signed_val = val;
+ return 0;
+ }
+
+ ret = val;
+
+ /*
+ * The register most probably does not contain a full-sized int.
+ * Instead we have an arbitrary number of bits in a signed
+ * representation which has to be translated into a full-sized int.
+ * This is done by filling up all bits above the sign-bit.
+ */
+ ret |= ~((int)(BIT(sign_bit) - 1));
+
+ *signed_val = ret;
+
+ return 0;
+}
+
+/**
+ * snd_soc_info_volsw - single mixer info callback
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information about a single mixer control, or a double
+ * mixer control that spans 2 registers.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int platform_max;
+
+ if (!mc->platform_max)
+ mc->platform_max = mc->max;
+ platform_max = mc->platform_max;
+
+ if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+ uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = platform_max - mc->min;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
+
+/**
+ * snd_soc_get_volsw - single mixer get callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to get the value of a single mixer control, or a double mixer
+ * control that spans 2 registers.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ unsigned int rshift = mc->rshift;
+ int max = mc->max;
+ int min = mc->min;
+ int sign_bit = mc->sign_bit;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ int val;
+ int ret;
+
+ if (sign_bit)
+ mask = BIT(sign_bit + 1) - 1;
+
+ ret = snd_soc_read_signed(component, reg, mask, shift, sign_bit, &val);
+ if (ret)
+ return ret;
+
+ ucontrol->value.integer.value[0] = val - min;
+ if (invert)
+ ucontrol->value.integer.value[0] =
+ max - ucontrol->value.integer.value[0];
+
+ if (snd_soc_volsw_is_stereo(mc)) {
+ if (reg == reg2)
+ ret = snd_soc_read_signed(component, reg, mask, rshift,
+ sign_bit, &val);
+ else
+ ret = snd_soc_read_signed(component, reg2, mask, shift,
+ sign_bit, &val);
+ if (ret)
+ return ret;
+
+ ucontrol->value.integer.value[1] = val - min;
+ if (invert)
+ ucontrol->value.integer.value[1] =
+ max - ucontrol->value.integer.value[1];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_volsw);
+
+/**
+ * snd_soc_put_volsw - single mixer put callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to set the value of a single mixer control, or a double mixer
+ * control that spans 2 registers.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ unsigned int rshift = mc->rshift;
+ int max = mc->max;
+ int min = mc->min;
+ unsigned int sign_bit = mc->sign_bit;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ int err;
+ bool type_2r = false;
+ unsigned int val2 = 0;
+ unsigned int val, val_mask;
+
+ if (sign_bit)
+ mask = BIT(sign_bit + 1) - 1;
+
+ val = ((ucontrol->value.integer.value[0] + min) & mask);
+ if (invert)
+ val = max - val;
+ val_mask = mask << shift;
+ val = val << shift;
+ if (snd_soc_volsw_is_stereo(mc)) {
+ val2 = ((ucontrol->value.integer.value[1] + min) & mask);
+ if (invert)
+ val2 = max - val2;
+ if (reg == reg2) {
+ val_mask |= mask << rshift;
+ val |= val2 << rshift;
+ } else {
+ val2 = val2 << shift;
+ type_2r = true;
+ }
+ }
+ err = snd_soc_component_update_bits(component, reg, val_mask, val);
+ if (err < 0)
+ return err;
+
+ if (type_2r)
+ err = snd_soc_component_update_bits(component, reg2, val_mask,
+ val2);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
+
+/**
+ * snd_soc_get_volsw_sx - single mixer get callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to get the value of a single mixer control, or a double mixer
+ * control that spans 2 registers.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ unsigned int rshift = mc->rshift;
+ int max = mc->max;
+ int min = mc->min;
+ int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int val;
+ int ret;
+
+ ret = snd_soc_component_read(component, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ ucontrol->value.integer.value[0] = ((val >> shift) - min) & mask;
+
+ if (snd_soc_volsw_is_stereo(mc)) {
+ ret = snd_soc_component_read(component, reg2, &val);
+ if (ret < 0)
+ return ret;
+
+ val = ((val >> rshift) - min) & mask;
+ ucontrol->value.integer.value[1] = val;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx);
+
+/**
+ * snd_soc_put_volsw_sx - double mixer set callback
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to set the value of a double mixer control that spans 2 registers.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ unsigned int rshift = mc->rshift;
+ int max = mc->max;
+ int min = mc->min;
+ int mask = (1 << (fls(min + max) - 1)) - 1;
+ int err = 0;
+ unsigned int val, val_mask, val2 = 0;
+
+ val_mask = mask << shift;
+ val = (ucontrol->value.integer.value[0] + min) & mask;
+ val = val << shift;
+
+ err = snd_soc_component_update_bits(component, reg, val_mask, val);
+ if (err < 0)
+ return err;
+
+ if (snd_soc_volsw_is_stereo(mc)) {
+ val_mask = mask << rshift;
+ val2 = (ucontrol->value.integer.value[1] + min) & mask;
+ val2 = val2 << rshift;
+
+ err = snd_soc_component_update_bits(component, reg2, val_mask,
+ val2);
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx);
+
+/**
+ * snd_soc_info_volsw_range - single mixer info callback with range.
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information, within a range, about a single
+ * mixer control.
+ *
+ * returns 0 for success.
+ */
+int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int platform_max;
+ int min = mc->min;
+
+ if (!mc->platform_max)
+ mc->platform_max = mc->max;
+ platform_max = mc->platform_max;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = platform_max - min;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_volsw_range);
+
+/**
+ * snd_soc_put_volsw_range - single mixer put value callback with range.
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to set the value, within a range, for a single mixer control.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ unsigned int reg = mc->reg;
+ unsigned int rreg = mc->rreg;
+ unsigned int shift = mc->shift;
+ int min = mc->min;
+ int max = mc->max;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ unsigned int val, val_mask;
+ int ret;
+
+ if (invert)
+ val = (max - ucontrol->value.integer.value[0]) & mask;
+ else
+ val = ((ucontrol->value.integer.value[0] + min) & mask);
+ val_mask = mask << shift;
+ val = val << shift;
+
+ ret = snd_soc_component_update_bits(component, reg, val_mask, val);
+ if (ret < 0)
+ return ret;
+
+ if (snd_soc_volsw_is_stereo(mc)) {
+ if (invert)
+ val = (max - ucontrol->value.integer.value[1]) & mask;
+ else
+ val = ((ucontrol->value.integer.value[1] + min) & mask);
+ val_mask = mask << shift;
+ val = val << shift;
+
+ ret = snd_soc_component_update_bits(component, rreg, val_mask,
+ val);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range);
+
+/**
+ * snd_soc_get_volsw_range - single mixer get callback with range
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to get the value, within a range, of a single mixer control.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int rreg = mc->rreg;
+ unsigned int shift = mc->shift;
+ int min = mc->min;
+ int max = mc->max;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ unsigned int val;
+ int ret;
+
+ ret = snd_soc_component_read(component, reg, &val);
+ if (ret)
+ return ret;
+
+ ucontrol->value.integer.value[0] = (val >> shift) & mask;
+ if (invert)
+ ucontrol->value.integer.value[0] =
+ max - ucontrol->value.integer.value[0];
+ else
+ ucontrol->value.integer.value[0] =
+ ucontrol->value.integer.value[0] - min;
+
+ if (snd_soc_volsw_is_stereo(mc)) {
+ ret = snd_soc_component_read(component, rreg, &val);
+ if (ret)
+ return ret;
+
+ ucontrol->value.integer.value[1] = (val >> shift) & mask;
+ if (invert)
+ ucontrol->value.integer.value[1] =
+ max - ucontrol->value.integer.value[1];
+ else
+ ucontrol->value.integer.value[1] =
+ ucontrol->value.integer.value[1] - min;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
+
+/**
+ * snd_soc_limit_volume - Set new limit to an existing volume control.
+ *
+ * @codec: where to look for the control
+ * @name: Name of the control
+ * @max: new maximum limit
+ *
+ * Return 0 for success, else error.
+ */
+int snd_soc_limit_volume(struct snd_soc_codec *codec,
+ const char *name, int max)
+{
+ struct snd_card *card = codec->component.card->snd_card;
+ struct snd_kcontrol *kctl;
+ struct soc_mixer_control *mc;
+ int found = 0;
+ int ret = -EINVAL;
+
+ /* Sanity check for name and max */
+ if (unlikely(!name || max <= 0))
+ return -EINVAL;
+
+ list_for_each_entry(kctl, &card->controls, list) {
+ if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ mc = (struct soc_mixer_control *)kctl->private_value;
+ if (max <= mc->max) {
+ mc->platform_max = max;
+ ret = 0;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_limit_volume);
+
+int snd_soc_bytes_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_bytes *params = (void *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = params->num_regs * component->val_bytes;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_bytes_info);
+
+int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_bytes *params = (void *)kcontrol->private_value;
+ int ret;
+
+ if (component->regmap)
+ ret = regmap_raw_read(component->regmap, params->base,
+ ucontrol->value.bytes.data,
+ params->num_regs * component->val_bytes);
+ else
+ ret = -EINVAL;
+
+ /* Hide any masked bytes to ensure consistent data reporting */
+ if (ret == 0 && params->mask) {
+ switch (component->val_bytes) {
+ case 1:
+ ucontrol->value.bytes.data[0] &= ~params->mask;
+ break;
+ case 2:
+ ((u16 *)(&ucontrol->value.bytes.data))[0]
+ &= cpu_to_be16(~params->mask);
+ break;
+ case 4:
+ ((u32 *)(&ucontrol->value.bytes.data))[0]
+ &= cpu_to_be32(~params->mask);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_bytes_get);
+
+int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_bytes *params = (void *)kcontrol->private_value;
+ int ret, len;
+ unsigned int val, mask;
+ void *data;
+
+ if (!component->regmap || !params->num_regs)
+ return -EINVAL;
+
+ len = params->num_regs * component->val_bytes;
+
+ data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+
+ /*
+ * If we've got a mask then we need to preserve the register
+ * bits. We shouldn't modify the incoming data so take a
+ * copy.
+ */
+ if (params->mask) {
+ ret = regmap_read(component->regmap, params->base, &val);
+ if (ret != 0)
+ goto out;
+
+ val &= params->mask;
+
+ switch (component->val_bytes) {
+ case 1:
+ ((u8 *)data)[0] &= ~params->mask;
+ ((u8 *)data)[0] |= val;
+ break;
+ case 2:
+ mask = ~params->mask;
+ ret = regmap_parse_val(component->regmap,
+ &mask, &mask);
+ if (ret != 0)
+ goto out;
+
+ ((u16 *)data)[0] &= mask;
+
+ ret = regmap_parse_val(component->regmap,
+ &val, &val);
+ if (ret != 0)
+ goto out;
+
+ ((u16 *)data)[0] |= val;
+ break;
+ case 4:
+ mask = ~params->mask;
+ ret = regmap_parse_val(component->regmap,
+ &mask, &mask);
+ if (ret != 0)
+ goto out;
+
+ ((u32 *)data)[0] &= mask;
+
+ ret = regmap_parse_val(component->regmap,
+ &val, &val);
+ if (ret != 0)
+ goto out;
+
+ ((u32 *)data)[0] |= val;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ret = regmap_raw_write(component->regmap, params->base,
+ data, len);
+
+out:
+ kfree(data);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_bytes_put);
+
+int snd_soc_bytes_info_ext(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *ucontrol)
+{
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+
+ ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ ucontrol->count = params->max;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_bytes_info_ext);
+
+int snd_soc_bytes_tlv_callback(struct snd_kcontrol *kcontrol, int op_flag,
+ unsigned int size, unsigned int __user *tlv)
+{
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ unsigned int count = size < params->max ? size : params->max;
+ int ret = -ENXIO;
+
+ switch (op_flag) {
+ case SNDRV_CTL_TLV_OP_READ:
+ if (params->get)
+ ret = params->get(tlv, count);
+ break;
+ case SNDRV_CTL_TLV_OP_WRITE:
+ if (params->put)
+ ret = params->put(tlv, count);
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_bytes_tlv_callback);
+
+/**
+ * snd_soc_info_xr_sx - signed multi register info callback
+ * @kcontrol: mreg control
+ * @uinfo: control element information
+ *
+ * Callback to provide information of a control that can
+ * span multiple codec registers which together
+ * forms a single signed value in a MSB/LSB manner.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_info_xr_sx(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_mreg_control *mc =
+ (struct soc_mreg_control *)kcontrol->private_value;
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = mc->min;
+ uinfo->value.integer.max = mc->max;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_xr_sx);
+
+/**
+ * snd_soc_get_xr_sx - signed multi register get callback
+ * @kcontrol: mreg control
+ * @ucontrol: control element information
+ *
+ * Callback to get the value of a control that can span
+ * multiple codec registers which together forms a single
+ * signed value in a MSB/LSB manner. The control supports
+ * specifying total no of bits used to allow for bitfields
+ * across the multiple codec registers.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mreg_control *mc =
+ (struct soc_mreg_control *)kcontrol->private_value;
+ unsigned int regbase = mc->regbase;
+ unsigned int regcount = mc->regcount;
+ unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
+ unsigned int regwmask = (1<<regwshift)-1;
+ unsigned int invert = mc->invert;
+ unsigned long mask = (1UL<<mc->nbits)-1;
+ long min = mc->min;
+ long max = mc->max;
+ long val = 0;
+ unsigned int regval;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < regcount; i++) {
+ ret = snd_soc_component_read(component, regbase+i, &regval);
+ if (ret)
+ return ret;
+ val |= (regval & regwmask) << (regwshift*(regcount-i-1));
+ }
+ val &= mask;
+ if (min < 0 && val > max)
+ val |= ~mask;
+ if (invert)
+ val = max - val;
+ ucontrol->value.integer.value[0] = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_xr_sx);
+
+/**
+ * snd_soc_put_xr_sx - signed multi register get callback
+ * @kcontrol: mreg control
+ * @ucontrol: control element information
+ *
+ * Callback to set the value of a control that can span
+ * multiple codec registers which together forms a single
+ * signed value in a MSB/LSB manner. The control supports
+ * specifying total no of bits used to allow for bitfields
+ * across the multiple codec registers.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mreg_control *mc =
+ (struct soc_mreg_control *)kcontrol->private_value;
+ unsigned int regbase = mc->regbase;
+ unsigned int regcount = mc->regcount;
+ unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
+ unsigned int regwmask = (1<<regwshift)-1;
+ unsigned int invert = mc->invert;
+ unsigned long mask = (1UL<<mc->nbits)-1;
+ long max = mc->max;
+ long val = ucontrol->value.integer.value[0];
+ unsigned int i, regval, regmask;
+ int err;
+
+ if (invert)
+ val = max - val;
+ val &= mask;
+ for (i = 0; i < regcount; i++) {
+ regval = (val >> (regwshift*(regcount-i-1))) & regwmask;
+ regmask = (mask >> (regwshift*(regcount-i-1))) & regwmask;
+ err = snd_soc_component_update_bits(component, regbase+i,
+ regmask, regval);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
+
+/**
+ * snd_soc_get_strobe - strobe get callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback get the value of a strobe mixer control.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_get_strobe(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int shift = mc->shift;
+ unsigned int mask = 1 << shift;
+ unsigned int invert = mc->invert != 0;
+ unsigned int val;
+ int ret;
+
+ ret = snd_soc_component_read(component, reg, &val);
+ if (ret)
+ return ret;
+
+ val &= mask;
+
+ if (shift != 0 && val != 0)
+ val = val >> shift;
+ ucontrol->value.enumerated.item[0] = val ^ invert;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_strobe);
+
+/**
+ * snd_soc_put_strobe - strobe put callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback strobe a register bit to high then low (or the inverse)
+ * in one pass of a single mixer enum control.
+ *
+ * Returns 1 for success.
+ */
+int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int shift = mc->shift;
+ unsigned int mask = 1 << shift;
+ unsigned int invert = mc->invert != 0;
+ unsigned int strobe = ucontrol->value.enumerated.item[0] != 0;
+ unsigned int val1 = (strobe ^ invert) ? mask : 0;
+ unsigned int val2 = (strobe ^ invert) ? 0 : mask;
+ int err;
+
+ err = snd_soc_component_update_bits(component, reg, mask, val1);
+ if (err < 0)
+ return err;
+
+ return snd_soc_component_update_bits(component, reg, mask, val2);
+}
+EXPORT_SYMBOL_GPL(snd_soc_put_strobe);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 57277dd79e11..eb87d96e2cf0 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -654,6 +654,8 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
codec_dai->rate = 0;
}
+ snd_soc_dai_digital_mute(cpu_dai, 1, substream->stream);
+
if (cpu_dai->driver->ops->shutdown)
cpu_dai->driver->ops->shutdown(substream, cpu_dai);
@@ -772,6 +774,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
for (i = 0; i < rtd->num_codecs; i++)
snd_soc_dai_digital_mute(rtd->codec_dais[i], 0,
substream->stream);
+ snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
out:
mutex_unlock(&rtd->pcm_mutex);
@@ -1664,6 +1667,10 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
continue;
+ /* do not free hw if this BE is used by other FE */
+ if (be->dpcm[stream].users > 1)
+ continue;
+
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
@@ -2288,7 +2295,13 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
fe->dai_link->name);
/* skip if FE doesn't have playback capability */
- if (!fe->cpu_dai->driver->playback.channels_min)
+ if (!fe->cpu_dai->driver->playback.channels_min
+ || !fe->codec_dai->driver->playback.channels_min)
+ goto capture;
+
+ /* skip if FE isn't currently playing */
+ if (!fe->cpu_dai->playback_active
+ || !fe->codec_dai->playback_active)
goto capture;
paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
@@ -2318,7 +2331,13 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
dpcm_path_put(&list);
capture:
/* skip if FE doesn't have capture capability */
- if (!fe->cpu_dai->driver->capture.channels_min)
+ if (!fe->cpu_dai->driver->capture.channels_min
+ || !fe->codec_dai->driver->capture.channels_min)
+ continue;
+
+ /* skip if FE isn't currently capturing */
+ if (!fe->cpu_dai->capture_active
+ || !fe->codec_dai->capture_active)
continue;
paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 7f22ca35a413..362c69ac1d6c 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -154,7 +154,6 @@ static int snd_soc_dummy_remove(struct platform_device *pdev)
static struct platform_driver soc_dummy_driver = {
.driver = {
.name = "snd-soc-dummy",
- .owner = THIS_MODULE,
},
.probe = snd_soc_dummy_probe,
.remove = snd_soc_dummy_remove,
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
index 4ab442a63d7e..a4028601da01 100644
--- a/sound/soc/spear/spdif_in.c
+++ b/sound/soc/spear/spdif_in.c
@@ -274,7 +274,6 @@ static struct platform_driver spdif_in_driver = {
.probe = spdif_in_probe,
.driver = {
.name = "spdif-in",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/spear/spdif_out.c b/sound/soc/spear/spdif_out.c
index 19cca043e6e4..0a72d52d533e 100644
--- a/sound/soc/spear/spdif_out.c
+++ b/sound/soc/spear/spdif_out.c
@@ -354,7 +354,6 @@ static struct platform_driver spdif_out_driver = {
.probe = spdif_out_probe,
.driver = {
.name = "spdif-out",
- .owner = THIS_MODULE,
.pm = SPDIF_OUT_DEV_PM_OPS,
},
};
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index 3b0fa12dbff7..a68368edab9c 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -228,7 +228,7 @@ static int tegra20_ac97_probe(struct snd_soc_dai *dai)
static struct snd_soc_dai_driver tegra20_ac97_dai = {
.name = "tegra-ac97-pcm",
- .ac97_control = 1,
+ .bus_control = true,
.probe = tegra20_ac97_probe,
.playback = {
.stream_name = "PCM Playback",
@@ -438,7 +438,6 @@ static const struct of_device_id tegra20_ac97_of_match[] = {
static struct platform_driver tegra20_ac97_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = tegra20_ac97_of_match,
},
.probe = tegra20_ac97_platform_probe,
diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c
index a634f13b3ffc..f52600b4f3fd 100644
--- a/sound/soc/tegra/tegra20_das.c
+++ b/sound/soc/tegra/tegra20_das.c
@@ -233,7 +233,6 @@ static struct platform_driver tegra20_das_driver = {
.remove = tegra20_das_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = tegra20_das_of_match,
},
};
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 79a9932ffe6e..05f1c6ee99e3 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -464,7 +464,6 @@ static const struct dev_pm_ops tegra20_i2s_pm_ops = {
static struct platform_driver tegra20_i2s_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = tegra20_i2s_of_match,
.pm = &tegra20_i2s_pm_ops,
},
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index a0ce92400faf..9141477a528d 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -387,7 +387,6 @@ static const struct dev_pm_ops tegra20_spdif_pm_ops = {
static struct platform_driver tegra20_spdif_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &tegra20_spdif_pm_ops,
},
.probe = tegra20_spdif_platform_probe,
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index 0db68f49f4d9..bc94e5d8e79a 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -723,7 +723,6 @@ static struct platform_driver tegra30_ahub_driver = {
.remove = tegra30_ahub_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = tegra30_ahub_of_match,
.pm = &tegra30_ahub_pm_ops,
},
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index f146c41dd3ec..fe36375ba89c 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -585,7 +585,6 @@ static const struct dev_pm_ops tegra30_i2s_pm_ops = {
static struct platform_driver tegra30_i2s_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = tegra30_i2s_of_match,
.pm = &tegra30_i2s_pm_ops,
},
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index a83aff09dce2..769aca2fc5f5 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -250,7 +250,6 @@ static const struct of_device_id tegra_alc5632_of_match[] = {
static struct platform_driver tegra_alc5632_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = tegra_alc5632_of_match,
},
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index 01921d7e73fa..af3fb997b752 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -308,7 +308,6 @@ static const struct of_device_id tegra_max98090_of_match[] = {
static struct platform_driver tegra_max98090_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = tegra_max98090_of_match,
},
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index a6898831fb9f..ed759a3076b8 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -44,6 +44,7 @@
struct tegra_rt5640 {
struct tegra_asoc_utils_data util_data;
int gpio_hp_det;
+ enum of_gpio_flags gpio_hp_det_flags;
};
static int tegra_rt5640_asoc_hw_params(struct snd_pcm_substream *substream,
@@ -119,6 +120,8 @@ static int tegra_rt5640_asoc_init(struct snd_soc_pcm_runtime *rtd)
if (gpio_is_valid(machine->gpio_hp_det)) {
tegra_rt5640_hp_jack_gpio.gpio = machine->gpio_hp_det;
+ tegra_rt5640_hp_jack_gpio.invert =
+ !!(machine->gpio_hp_det_flags & OF_GPIO_ACTIVE_LOW);
snd_soc_jack_add_gpios(&tegra_rt5640_hp_jack,
1,
&tegra_rt5640_hp_jack_gpio);
@@ -180,7 +183,8 @@ static int tegra_rt5640_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, card);
snd_soc_card_set_drvdata(card, machine);
- machine->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
+ machine->gpio_hp_det = of_get_named_gpio_flags(
+ np, "nvidia,hp-det-gpios", 0, &machine->gpio_hp_det_flags);
if (machine->gpio_hp_det == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -251,7 +255,6 @@ static const struct of_device_id tegra_rt5640_of_match[] = {
static struct platform_driver tegra_rt5640_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = tegra_rt5640_of_match,
},
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index 769e28f6642e..f0cd01dbfc38 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -204,7 +204,6 @@ static const struct of_device_id tegra_wm8753_of_match[] = {
static struct platform_driver tegra_wm8753_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = tegra_wm8753_of_match,
},
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 86e05e938585..e52420dae2b4 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -386,7 +386,6 @@ static const struct of_device_id tegra_wm8903_of_match[] = {
static struct platform_driver tegra_wm8903_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = tegra_wm8903_of_match,
},
diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c
index de087ee3458a..2868b4839bc0 100644
--- a/sound/soc/tegra/tegra_wm9712.c
+++ b/sound/soc/tegra/tegra_wm9712.c
@@ -167,7 +167,6 @@ static const struct of_device_id tegra_wm9712_of_match[] = {
static struct platform_driver tegra_wm9712_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = tegra_wm9712_of_match,
},
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index 589d2d9b553a..2cea203c4f5f 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -193,7 +193,6 @@ MODULE_DEVICE_TABLE(of, trimslice_of_match);
static struct platform_driver tegra_snd_trimslice_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = trimslice_of_match,
},
.probe = tegra_snd_trimslice_probe,
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index 9edd68db9f48..e2ad00e3cae1 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -152,7 +152,7 @@ static int txx9aclc_ac97_remove(struct snd_soc_dai *dai)
}
static struct snd_soc_dai_driver txx9aclc_ac97_dai = {
- .ac97_control = 1,
+ .bus_control = true,
.probe = txx9aclc_ac97_probe,
.remove = txx9aclc_ac97_remove,
.playback = {
@@ -224,7 +224,6 @@ static struct platform_driver txx9aclc_ac97_driver = {
.remove = txx9aclc_ac97_dev_remove,
.driver = {
.name = "txx9aclc-ac97",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/txx9/txx9aclc-generic.c b/sound/soc/txx9/txx9aclc-generic.c
index b056a1431ed4..d0b1e7759968 100644
--- a/sound/soc/txx9/txx9aclc-generic.c
+++ b/sound/soc/txx9/txx9aclc-generic.c
@@ -66,7 +66,6 @@ static struct platform_driver txx9aclc_generic_driver = {
.remove = __exit_p(txx9aclc_generic_remove),
.driver = {
.name = "txx9aclc-generic",
- .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index cd71fd889d8b..070e44e251ce 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -292,7 +292,7 @@ static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct snd_card *card = rtd->card->snd_card;
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
- struct platform_device *pdev = to_platform_device(dai->platform->dev);
+ struct platform_device *pdev = to_platform_device(rtd->platform->dev);
struct txx9aclc_soc_device *dev;
struct resource *r;
int i;
@@ -429,7 +429,6 @@ static int txx9aclc_soc_platform_remove(struct platform_device *pdev)
static struct platform_driver txx9aclc_pcm_driver = {
.driver = {
.name = "txx9aclc-pcm-audio",
- .owner = THIS_MODULE,
},
.probe = txx9aclc_soc_platform_probe,
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index b3b66aa98dce..4e0c0e502ade 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -63,12 +63,8 @@ static void mop500_of_node_put(void)
int i;
for (i = 0; i < 2; i++) {
- if (mop500_dai_links[i].cpu_of_node)
- of_node_put((struct device_node *)
- mop500_dai_links[i].cpu_of_node);
- if (mop500_dai_links[i].codec_of_node)
- of_node_put((struct device_node *)
- mop500_dai_links[i].codec_of_node);
+ of_node_put(mop500_dai_links[i].cpu_of_node);
+ of_node_put(mop500_dai_links[i].codec_of_node);
}
}
@@ -159,7 +155,6 @@ static const struct of_device_id snd_soc_mop500_match[] = {
static struct platform_driver snd_soc_mop500_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "snd-soc-mop500",
.of_match_table = snd_soc_mop500_match,
},
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 5f4807b2c007..978f2d7316b0 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -856,7 +856,6 @@ static const struct of_device_id ux500_msp_i2s_match[] = {
static struct platform_driver msp_i2s_driver = {
.driver = {
.name = "ux500-msp-i2s",
- .owner = THIS_MODULE,
.of_match_table = ux500_msp_i2s_match,
},
.probe = ux500_msp_drv_probe,
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 4a85e1433472..86280d63b76d 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -1067,7 +1067,6 @@ static const struct of_device_id amd7930_match[] = {
static struct platform_driver amd7930_sbus_driver = {
.driver = {
.name = "audio",
- .owner = THIS_MODULE,
.of_match_table = amd7930_match,
},
.probe = amd7930_sbus_probe,
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index 4e91bcaa3664..30bdc971883b 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -1285,19 +1285,11 @@ static int snd_cs4231_timer(struct snd_card *card)
static int snd_cs4231_info_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {
+ static const char * const texts[4] = {
"Line", "CD", "Mic", "Mix"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 2;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item > 3)
- uinfo->value.enumerated.item = 3;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 2, 4, texts);
}
static int snd_cs4231_get_mux(struct snd_kcontrol *kcontrol,
@@ -2119,7 +2111,6 @@ MODULE_DEVICE_TABLE(of, cs4231_match);
static struct platform_driver cs4231_driver = {
.driver = {
.name = "audio",
- .owner = THIS_MODULE,
.of_match_table = cs4231_match,
},
.probe = cs4231_probe,
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index b2c3d0d5dca3..0190cb6332f2 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2686,7 +2686,6 @@ MODULE_DEVICE_TABLE(of, dbri_match);
static struct platform_driver dbri_sbus_driver = {
.driver = {
.name = "dbri",
- .owner = THIS_MODULE,
.of_match_table = dbri_match,
},
.probe = dbri_probe,
diff --git a/sound/usb/6fire/control.c b/sound/usb/6fire/control.c
index 184e3987ac24..54656eed6e2e 100644
--- a/sound/usb/6fire/control.c
+++ b/sound/usb/6fire/control.c
@@ -25,8 +25,8 @@
#include "comm.h"
#include "chip.h"
-static char *opt_coax_texts[2] = { "Optical", "Coax" };
-static char *line_phono_texts[2] = { "Line", "Phono" };
+static const char * const opt_coax_texts[2] = { "Optical", "Coax" };
+static const char * const line_phono_texts[2] = { "Line", "Phono" };
/*
* data that needs to be sent to device. sets up card internal stuff.
@@ -327,14 +327,7 @@ static int usb6fire_control_input_vol_get(struct snd_kcontrol *kcontrol,
static int usb6fire_control_line_phono_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name,
- line_phono_texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, line_phono_texts);
}
static int usb6fire_control_line_phono_put(struct snd_kcontrol *kcontrol,
@@ -361,14 +354,7 @@ static int usb6fire_control_line_phono_get(struct snd_kcontrol *kcontrol,
static int usb6fire_control_opt_coax_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name,
- opt_coax_texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, 2, opt_coax_texts);
}
static int usb6fire_control_opt_coax_put(struct snd_kcontrol *kcontrol,
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index 3b02e54b8f6d..62c25e74f0e5 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -316,7 +316,7 @@ static int usb6fire_fw_fpga_upload(
while (c != end) {
for (i = 0; c != end && i < FPGA_BUFSIZE; i++, c++)
- buffer[i] = byte_rev_table[(u8) *c];
+ buffer[i] = bitrev8((u8)*c);
ret = usb6fire_fw_fpga_write(device, buffer, i);
if (ret < 0) {
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index ba40489b2de4..36f4115eb1cd 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -679,25 +679,16 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
void usb6fire_pcm_abort(struct sfire_chip *chip)
{
struct pcm_runtime *rt = chip->pcm;
- unsigned long flags;
int i;
if (rt) {
rt->panic = true;
- if (rt->playback.instance) {
- snd_pcm_stream_lock_irqsave(rt->playback.instance, flags);
- snd_pcm_stop(rt->playback.instance,
- SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irqrestore(rt->playback.instance, flags);
- }
+ if (rt->playback.instance)
+ snd_pcm_stop_xrun(rt->playback.instance);
- if (rt->capture.instance) {
- snd_pcm_stream_lock_irqsave(rt->capture.instance, flags);
- snd_pcm_stop(rt->capture.instance,
- SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irqrestore(rt->capture.instance, flags);
- }
+ if (rt->capture.instance)
+ snd_pcm_stop_xrun(rt->capture.instance);
for (i = 0; i < PCM_N_URBS; i++) {
usb_poison_urb(&rt->in_urbs[i].instance);
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 2b92f0dcbc4c..bcee4060fd18 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -9,6 +9,7 @@ snd-usb-audio-objs := card.o \
helper.o \
mixer.o \
mixer_quirks.o \
+ mixer_scarlett.o \
pcm.o \
proc.o \
quirks.o \
diff --git a/sound/usb/card.c b/sound/usb/card.c
index f61ebb17cc64..1fab9778807a 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -112,15 +112,13 @@ static struct usb_driver usb_audio_driver;
/*
* disconnect streams
- * called from snd_usb_audio_disconnect()
+ * called from usb_audio_disconnect()
*/
-static void snd_usb_stream_disconnect(struct list_head *head)
+static void snd_usb_stream_disconnect(struct snd_usb_stream *as)
{
int idx;
- struct snd_usb_stream *as;
struct snd_usb_substream *subs;
- as = list_entry(head, struct snd_usb_stream, list);
for (idx = 0; idx < 2; idx++) {
subs = &as->substream[idx];
if (!subs->num_formats)
@@ -307,10 +305,10 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
static int snd_usb_audio_free(struct snd_usb_audio *chip)
{
- struct list_head *p, *n;
+ struct snd_usb_endpoint *ep, *n;
- list_for_each_safe(p, n, &chip->ep_list)
- snd_usb_endpoint_free(p);
+ list_for_each_entry_safe(ep, n, &chip->ep_list, list)
+ snd_usb_endpoint_free(ep);
mutex_destroy(&chip->mutex);
kfree(chip);
@@ -323,16 +321,6 @@ static int snd_usb_audio_dev_free(struct snd_device *device)
return snd_usb_audio_free(chip);
}
-static void remove_trailing_spaces(char *str)
-{
- char *p;
-
- if (!*str)
- return;
- for (p = str + strlen(str) - 1; p >= str && isspace(*p); p--)
- *p = 0;
-}
-
/*
* create a chip instance and set its names.
*/
@@ -416,7 +404,7 @@ static int snd_usb_audio_create(struct usb_interface *intf,
USB_ID_PRODUCT(chip->usb_id));
}
}
- remove_trailing_spaces(card->shortname);
+ strim(card->shortname);
/* retrieve the vendor and device strings as longname */
if (quirk && quirk->vendor_name && *quirk->vendor_name) {
@@ -430,7 +418,7 @@ static int snd_usb_audio_create(struct usb_interface *intf,
/* we don't really care if there isn't any vendor string */
}
if (len > 0) {
- remove_trailing_spaces(card->longname);
+ strim(card->longname);
if (*card->longname)
strlcat(card->longname, " ", sizeof(card->longname));
}
@@ -475,14 +463,14 @@ static int snd_usb_audio_create(struct usb_interface *intf,
* only at the first time. the successive calls of this function will
* append the pcm interface to the corresponding card.
*/
-static struct snd_usb_audio *
-snd_usb_audio_probe(struct usb_device *dev,
- struct usb_interface *intf,
- const struct usb_device_id *usb_id)
+static int usb_audio_probe(struct usb_interface *intf,
+ const struct usb_device_id *usb_id)
{
- const struct snd_usb_audio_quirk *quirk = (const struct snd_usb_audio_quirk *)usb_id->driver_info;
- int i, err;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ const struct snd_usb_audio_quirk *quirk =
+ (const struct snd_usb_audio_quirk *)usb_id->driver_info;
struct snd_usb_audio *chip;
+ int i, err;
struct usb_host_interface *alts;
int ifnum;
u32 id;
@@ -492,10 +480,11 @@ snd_usb_audio_probe(struct usb_device *dev,
id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
if (quirk && quirk->ifnum >= 0 && ifnum != quirk->ifnum)
- goto __err_val;
+ return -ENXIO;
- if (snd_usb_apply_boot_quirk(dev, intf, quirk) < 0)
- goto __err_val;
+ err = snd_usb_apply_boot_quirk(dev, intf, quirk);
+ if (err < 0)
+ return err;
/*
* found a config. now register to ALSA
@@ -508,6 +497,7 @@ snd_usb_audio_probe(struct usb_device *dev,
if (usb_chip[i] && usb_chip[i]->dev == dev) {
if (usb_chip[i]->shutdown) {
dev_err(&dev->dev, "USB device is in the shutdown state, cannot create a card instance\n");
+ err = -EIO;
goto __error;
}
chip = usb_chip[i];
@@ -523,15 +513,16 @@ snd_usb_audio_probe(struct usb_device *dev,
if (enable[i] && ! usb_chip[i] &&
(vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) &&
(pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) {
- if (snd_usb_audio_create(intf, dev, i, quirk,
- &chip) < 0) {
+ err = snd_usb_audio_create(intf, dev, i, quirk,
+ &chip);
+ if (err < 0)
goto __error;
- }
chip->pm_intf = intf;
break;
}
if (!chip) {
dev_err(&dev->dev, "no available usb audio device\n");
+ err = -ENODEV;
goto __error;
}
}
@@ -548,28 +539,32 @@ snd_usb_audio_probe(struct usb_device *dev,
err = 1; /* continue */
if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
/* need some special handlings */
- if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
+ err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk);
+ if (err < 0)
goto __error;
}
if (err > 0) {
/* create normal USB audio interfaces */
- if (snd_usb_create_streams(chip, ifnum) < 0 ||
- snd_usb_create_mixer(chip, ifnum, ignore_ctl_error) < 0) {
+ err = snd_usb_create_streams(chip, ifnum);
+ if (err < 0)
+ goto __error;
+ err = snd_usb_create_mixer(chip, ifnum, ignore_ctl_error);
+ if (err < 0)
goto __error;
- }
}
/* we are allowed to call snd_card_register() many times */
- if (snd_card_register(chip->card) < 0) {
+ err = snd_card_register(chip->card);
+ if (err < 0)
goto __error;
- }
usb_chip[chip->index] = chip;
chip->num_interfaces++;
chip->probing = 0;
+ usb_set_intfdata(intf, chip);
mutex_unlock(&register_mutex);
- return chip;
+ return 0;
__error:
if (chip) {
@@ -578,17 +573,16 @@ snd_usb_audio_probe(struct usb_device *dev,
chip->probing = 0;
}
mutex_unlock(&register_mutex);
- __err_val:
- return NULL;
+ return err;
}
/*
* we need to take care of counter, since disconnection can be called also
* many times as well as usb_audio_probe().
*/
-static void snd_usb_audio_disconnect(struct usb_device *dev,
- struct snd_usb_audio *chip)
+static void usb_audio_disconnect(struct usb_interface *intf)
{
+ struct snd_usb_audio *chip = usb_get_intfdata(intf);
struct snd_card *card;
struct list_head *p;
bool was_shutdown;
@@ -604,12 +598,14 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
mutex_lock(&register_mutex);
if (!was_shutdown) {
+ struct snd_usb_stream *as;
struct snd_usb_endpoint *ep;
+ struct usb_mixer_interface *mixer;
snd_card_disconnect(card);
/* release the pcm resources */
- list_for_each(p, &chip->pcm_list) {
- snd_usb_stream_disconnect(p);
+ list_for_each_entry(as, &chip->pcm_list, list) {
+ snd_usb_stream_disconnect(as);
}
/* release the endpoint resources */
list_for_each_entry(ep, &chip->ep_list, list) {
@@ -620,8 +616,8 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
snd_usbmidi_disconnect(p);
}
/* release mixer resources */
- list_for_each(p, &chip->mixer_list) {
- snd_usb_mixer_disconnect(p);
+ list_for_each_entry(mixer, &chip->mixer_list, list) {
+ snd_usb_mixer_disconnect(mixer);
}
}
@@ -635,27 +631,6 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
}
}
-/*
- * new 2.5 USB kernel API
- */
-static int usb_audio_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct snd_usb_audio *chip;
- chip = snd_usb_audio_probe(interface_to_usbdev(intf), intf, id);
- if (chip) {
- usb_set_intfdata(intf, chip);
- return 0;
- } else
- return -EIO;
-}
-
-static void usb_audio_disconnect(struct usb_interface *intf)
-{
- snd_usb_audio_disconnect(interface_to_usbdev(intf),
- usb_get_intfdata(intf));
-}
-
#ifdef CONFIG_PM
int snd_usb_autoresume(struct snd_usb_audio *chip)
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 114e3e7ff511..03b074419964 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -348,6 +348,8 @@ static void snd_complete_urb(struct urb *urb)
{
struct snd_urb_ctx *ctx = urb->context;
struct snd_usb_endpoint *ep = ctx->ep;
+ struct snd_pcm_substream *substream;
+ unsigned long flags;
int err;
if (unlikely(urb->status == -ENOENT || /* unlinked */
@@ -364,8 +366,6 @@ static void snd_complete_urb(struct urb *urb)
goto exit_clear;
if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
- unsigned long flags;
-
spin_lock_irqsave(&ep->lock, flags);
list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
spin_unlock_irqrestore(&ep->lock, flags);
@@ -389,7 +389,10 @@ static void snd_complete_urb(struct urb *urb)
return;
usb_audio_err(ep->chip, "cannot submit urb (err = %d)\n", err);
- //snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ if (ep->data_subs && ep->data_subs->pcm_substream) {
+ substream = ep->data_subs->pcm_substream;
+ snd_pcm_stop_xrun(substream);
+ }
exit_clear:
clear_bit(ctx->index, &ep->active_mask);
@@ -1002,15 +1005,12 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
/**
* snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
*
- * @ep: the list header of the endpoint to free
+ * @ep: the endpoint to free
*
* This free all resources of the given ep.
*/
-void snd_usb_endpoint_free(struct list_head *head)
+void snd_usb_endpoint_free(struct snd_usb_endpoint *ep)
{
- struct snd_usb_endpoint *ep;
-
- ep = list_entry(head, struct snd_usb_endpoint, list);
kfree(ep);
}
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index e61ee5c356a3..6428392d8f62 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -24,7 +24,7 @@ void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
-void snd_usb_endpoint_free(struct list_head *head);
+void snd_usb_endpoint_free(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index d3d49525a16b..5bfb695547f8 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -365,6 +365,8 @@ static void snd_usbmidi_error_timer(unsigned long data)
if (in && in->error_resubmit) {
in->error_resubmit = 0;
for (j = 0; j < INPUT_URBS; ++j) {
+ if (atomic_read(&in->urbs[j]->use_count))
+ continue;
in->urbs[j]->dev = umidi->dev;
snd_usbmidi_submit_urb(in->urbs[j], GFP_ATOMIC);
}
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index a1bab149df4d..9581089c28c5 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -613,24 +613,14 @@ static int start_usb_playback(struct ua101 *ua)
static void abort_alsa_capture(struct ua101 *ua)
{
- unsigned long flags;
-
- if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) {
- snd_pcm_stream_lock_irqsave(ua->capture.substream, flags);
- snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags);
- }
+ if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
+ snd_pcm_stop_xrun(ua->capture.substream);
}
static void abort_alsa_playback(struct ua101 *ua)
{
- unsigned long flags;
-
- if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) {
- snd_pcm_stream_lock_irqsave(ua->playback.substream, flags);
- snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags);
- }
+ if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
+ snd_pcm_stop_xrun(ua->playback.substream);
}
static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 6e354d326858..41650d5b93b7 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -136,6 +136,10 @@ check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen)
return strlcpy(buf, p->name, buflen);
}
+/* ignore the error value if ignore_ctl_error flag is set */
+#define filter_error(cval, err) \
+ ((cval)->head.mixer->ignore_ctl_error ? 0 : (err))
+
/* check whether the control should be ignored */
static inline int
check_ignored_ctl(const struct usbmix_name_map *p)
@@ -286,13 +290,13 @@ static int get_abs_value(struct usb_mixer_elem_info *cval, int val)
static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request,
int validx, int *value_ret)
{
- struct snd_usb_audio *chip = cval->mixer->chip;
+ struct snd_usb_audio *chip = cval->head.mixer->chip;
unsigned char buf[2];
int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
int timeout = 10;
int idx = 0, err;
- err = snd_usb_autoresume(cval->mixer->chip);
+ err = snd_usb_autoresume(chip);
if (err < 0)
return -EIO;
@@ -300,7 +304,7 @@ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request,
while (timeout-- > 0) {
if (chip->shutdown)
break;
- idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
+ idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, idx, buf, val_len) >= val_len) {
@@ -316,14 +320,14 @@ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request,
out:
up_read(&chip->shutdown_rwsem);
- snd_usb_autosuspend(cval->mixer->chip);
+ snd_usb_autosuspend(chip);
return err;
}
static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
int validx, int *value_ret)
{
- struct snd_usb_audio *chip = cval->mixer->chip;
+ struct snd_usb_audio *chip = cval->head.mixer->chip;
unsigned char buf[2 + 3 * sizeof(__u16)]; /* enough space for one range */
unsigned char *val;
int idx = 0, ret, size;
@@ -347,7 +351,7 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
if (chip->shutdown) {
ret = -ENODEV;
} else {
- idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
+ idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, idx, buf, size);
@@ -392,7 +396,7 @@ static int get_ctl_value(struct usb_mixer_elem_info *cval, int request,
{
validx += cval->idx_off;
- return (cval->mixer->protocol == UAC_VERSION_1) ?
+ return (cval->head.mixer->protocol == UAC_VERSION_1) ?
get_ctl_value_v1(cval, request, validx, value_ret) :
get_ctl_value_v2(cval, request, validx, value_ret);
}
@@ -412,7 +416,7 @@ static inline int get_cur_mix_raw(struct usb_mixer_elem_info *cval,
value);
}
-static int get_cur_mix_value(struct usb_mixer_elem_info *cval,
+int snd_usb_get_cur_mix_value(struct usb_mixer_elem_info *cval,
int channel, int index, int *value)
{
int err;
@@ -423,8 +427,8 @@ static int get_cur_mix_value(struct usb_mixer_elem_info *cval,
}
err = get_cur_mix_raw(cval, channel, value);
if (err < 0) {
- if (!cval->mixer->ignore_ctl_error)
- usb_audio_dbg(cval->mixer->chip,
+ if (!cval->head.mixer->ignore_ctl_error)
+ usb_audio_dbg(cval->head.mixer->chip,
"cannot get current value for control %d ch %d: err = %d\n",
cval->control, channel, err);
return err;
@@ -441,13 +445,13 @@ static int get_cur_mix_value(struct usb_mixer_elem_info *cval,
int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
int request, int validx, int value_set)
{
- struct snd_usb_audio *chip = cval->mixer->chip;
+ struct snd_usb_audio *chip = cval->head.mixer->chip;
unsigned char buf[2];
int idx = 0, val_len, err, timeout = 10;
validx += cval->idx_off;
- if (cval->mixer->protocol == UAC_VERSION_1) {
+ if (cval->head.mixer->protocol == UAC_VERSION_1) {
val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
} else { /* UAC_VERSION_2 */
/* audio class v2 controls are always 2 bytes in size */
@@ -472,7 +476,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
while (timeout-- > 0) {
if (chip->shutdown)
break;
- idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
+ idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
if (snd_usb_ctl_msg(chip->dev,
usb_sndctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
@@ -497,7 +501,7 @@ static int set_cur_ctl_value(struct usb_mixer_elem_info *cval,
return snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, validx, value);
}
-static int set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
+int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
int index, int value)
{
int err;
@@ -506,7 +510,7 @@ static int set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
cval->ch_readonly & (1 << (channel - 1));
if (read_only) {
- usb_audio_dbg(cval->mixer->chip,
+ usb_audio_dbg(cval->head.mixer->chip,
"%s(): channel %d of control %d is read_only\n",
__func__, channel, cval->control);
return 0;
@@ -565,10 +569,10 @@ static int check_matrix_bitmap(unsigned char *bmap,
* if failed, give up and free the control instance.
*/
-int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
+int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
struct snd_kcontrol *kctl)
{
- struct usb_mixer_elem_info *cval = kctl->private_data;
+ struct usb_mixer_interface *mixer = list->mixer;
int err;
while (snd_ctl_find_id(mixer->chip->card, &kctl->id))
@@ -578,9 +582,9 @@ int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
err);
return err;
}
- cval->elem_id = &kctl->id;
- cval->next_id_elem = mixer->id_elems[cval->id];
- mixer->id_elems[cval->id] = cval;
+ list->kctl = kctl;
+ list->next_id_elem = mixer->id_elems[list->id];
+ mixer->id_elems[list->id] = list;
return 0;
}
@@ -815,7 +819,7 @@ static struct usb_feature_control_info audio_feature_info[] = {
};
/* private_free callback */
-static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
+void snd_usb_mixer_elem_free(struct snd_kcontrol *kctl)
{
kfree(kctl->private_data);
kctl->private_data = NULL;
@@ -829,7 +833,7 @@ static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
static void volume_control_quirks(struct usb_mixer_elem_info *cval,
struct snd_kcontrol *kctl)
{
- struct snd_usb_audio *chip = cval->mixer->chip;
+ struct snd_usb_audio *chip = cval->head.mixer->chip;
switch (chip->usb_id) {
case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
@@ -954,10 +958,10 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
}
if (get_ctl_value(cval, UAC_GET_MAX, (cval->control << 8) | minchn, &cval->max) < 0 ||
get_ctl_value(cval, UAC_GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) {
- usb_audio_err(cval->mixer->chip,
+ usb_audio_err(cval->head.mixer->chip,
"%d:%d: cannot get min/max values for control %d (id %d)\n",
- cval->id, snd_usb_ctrl_intf(cval->mixer->chip),
- cval->control, cval->id);
+ cval->head.id, snd_usb_ctrl_intf(cval->head.mixer->chip),
+ cval->control, cval->head.id);
return -EINVAL;
}
if (get_ctl_value(cval, UAC_GET_RES,
@@ -998,7 +1002,7 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
else
test -= cval->res;
if (test < cval->min || test > cval->max ||
- set_cur_mix_value(cval, minchn, 0, test) ||
+ snd_usb_set_cur_mix_value(cval, minchn, 0, test) ||
get_cur_mix_raw(cval, minchn, &check)) {
cval->res = last_valid_res;
break;
@@ -1007,7 +1011,7 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
break;
cval->res *= 2;
}
- set_cur_mix_value(cval, minchn, 0, saved);
+ snd_usb_set_cur_mix_value(cval, minchn, 0, saved);
}
cval->initialized = 1;
@@ -1061,7 +1065,7 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol,
kcontrol->vd[0].access &=
~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK);
- snd_ctl_notify(cval->mixer->chip->card,
+ snd_ctl_notify(cval->head.mixer->chip->card,
SNDRV_CTL_EVENT_MASK_INFO,
&kcontrol->id);
}
@@ -1086,9 +1090,9 @@ static int mixer_ctl_feature_get(struct snd_kcontrol *kcontrol,
for (c = 0; c < MAX_CHANNELS; c++) {
if (!(cval->cmask & (1 << c)))
continue;
- err = get_cur_mix_value(cval, c + 1, cnt, &val);
+ err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &val);
if (err < 0)
- return cval->mixer->ignore_ctl_error ? 0 : err;
+ return filter_error(cval, err);
val = get_relative_value(cval, val);
ucontrol->value.integer.value[cnt] = val;
cnt++;
@@ -1096,9 +1100,9 @@ static int mixer_ctl_feature_get(struct snd_kcontrol *kcontrol,
return 0;
} else {
/* master channel */
- err = get_cur_mix_value(cval, 0, 0, &val);
+ err = snd_usb_get_cur_mix_value(cval, 0, 0, &val);
if (err < 0)
- return cval->mixer->ignore_ctl_error ? 0 : err;
+ return filter_error(cval, err);
val = get_relative_value(cval, val);
ucontrol->value.integer.value[0] = val;
}
@@ -1118,26 +1122,26 @@ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol,
for (c = 0; c < MAX_CHANNELS; c++) {
if (!(cval->cmask & (1 << c)))
continue;
- err = get_cur_mix_value(cval, c + 1, cnt, &oval);
+ err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &oval);
if (err < 0)
- return cval->mixer->ignore_ctl_error ? 0 : err;
+ return filter_error(cval, err);
val = ucontrol->value.integer.value[cnt];
val = get_abs_value(cval, val);
if (oval != val) {
- set_cur_mix_value(cval, c + 1, cnt, val);
+ snd_usb_set_cur_mix_value(cval, c + 1, cnt, val);
changed = 1;
}
cnt++;
}
} else {
/* master channel */
- err = get_cur_mix_value(cval, 0, 0, &oval);
+ err = snd_usb_get_cur_mix_value(cval, 0, 0, &oval);
if (err < 0)
- return cval->mixer->ignore_ctl_error ? 0 : err;
+ return filter_error(cval, err);
val = ucontrol->value.integer.value[0];
val = get_abs_value(cval, val);
if (val != oval) {
- set_cur_mix_value(cval, 0, 0, val);
+ snd_usb_set_cur_mix_value(cval, 0, 0, val);
changed = 1;
}
}
@@ -1231,8 +1235,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return;
- cval->mixer = state->mixer;
- cval->id = unitid;
+ snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = control;
cval->cmask = ctl_mask;
cval->val_type = audio_feature_info[control-1].type;
@@ -1250,7 +1253,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
/*
* If all channels in the mask are marked read-only, make the control
- * read-only. set_cur_mix_value() will check the mask again and won't
+ * read-only. snd_usb_set_cur_mix_value() will check the mask again and won't
* issue write commands to read-only channels.
*/
if (cval->channels == readonly_mask)
@@ -1263,7 +1266,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
kfree(cval);
return;
}
- kctl->private_free = usb_mixer_elem_free;
+ kctl->private_free = snd_usb_mixer_elem_free;
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
mapped_name = len != 0;
@@ -1290,9 +1293,8 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
kctl->id.name,
sizeof(kctl->id.name), 1);
if (!len)
- len = snprintf(kctl->id.name,
- sizeof(kctl->id.name),
- "Feature %d", unitid);
+ snprintf(kctl->id.name, sizeof(kctl->id.name),
+ "Feature %d", unitid);
}
if (!mapped_name)
@@ -1305,9 +1307,9 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
*/
if (!mapped_name && !(state->oterm.type >> 16)) {
if ((state->oterm.type & 0xff00) == 0x0100)
- len = append_ctl_name(kctl, " Capture");
+ append_ctl_name(kctl, " Capture");
else
- len = append_ctl_name(kctl, " Playback");
+ append_ctl_name(kctl, " Playback");
}
append_ctl_name(kctl, control == UAC_FU_MUTE ?
" Switch" : " Volume");
@@ -1344,14 +1346,14 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
range);
usb_audio_warn(state->chip,
"[%d] FU [%s] ch = %d, val = %d/%d/%d",
- cval->id, kctl->id.name, cval->channels,
+ cval->head.id, kctl->id.name, cval->channels,
cval->min, cval->max, cval->res);
}
usb_audio_dbg(state->chip, "[%d] FU [%s] ch = %d, val = %d/%d/%d\n",
- cval->id, kctl->id.name, cval->channels,
+ cval->head.id, kctl->id.name, cval->channels,
cval->min, cval->max, cval->res);
- snd_usb_mixer_add_control(state->mixer, kctl);
+ snd_usb_mixer_add_control(&cval->head, kctl);
}
/*
@@ -1525,8 +1527,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state,
if (!cval)
return;
- cval->mixer = state->mixer;
- cval->id = unitid;
+ snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = in_ch + 1; /* based on 1 */
cval->val_type = USB_MIXER_S16;
for (i = 0; i < num_outs; i++) {
@@ -1547,7 +1548,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state,
kfree(cval);
return;
}
- kctl->private_free = usb_mixer_elem_free;
+ kctl->private_free = snd_usb_mixer_elem_free;
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (!len)
@@ -1558,8 +1559,8 @@ static void build_mixer_unit_ctl(struct mixer_build *state,
append_ctl_name(kctl, " Volume");
usb_audio_dbg(state->chip, "[%d] MU [%s] ch = %d, val = %d/%d\n",
- cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
- snd_usb_mixer_add_control(state->mixer, kctl);
+ cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max);
+ snd_usb_mixer_add_control(&cval->head, kctl);
}
/*
@@ -1629,12 +1630,10 @@ static int mixer_ctl_procunit_get(struct snd_kcontrol *kcontrol,
int err, val;
err = get_cur_ctl_value(cval, cval->control << 8, &val);
- if (err < 0 && cval->mixer->ignore_ctl_error) {
+ if (err < 0) {
ucontrol->value.integer.value[0] = cval->min;
- return 0;
+ return filter_error(cval, err);
}
- if (err < 0)
- return err;
val = get_relative_value(cval, val);
ucontrol->value.integer.value[0] = val;
return 0;
@@ -1648,11 +1647,8 @@ static int mixer_ctl_procunit_put(struct snd_kcontrol *kcontrol,
int val, oval, err;
err = get_cur_ctl_value(cval, cval->control << 8, &oval);
- if (err < 0) {
- if (cval->mixer->ignore_ctl_error)
- return 0;
- return err;
- }
+ if (err < 0)
+ return filter_error(cval, err);
val = ucontrol->value.integer.value[0];
val = get_abs_value(cval, val);
if (val != oval) {
@@ -1814,8 +1810,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return -ENOMEM;
- cval->mixer = state->mixer;
- cval->id = unitid;
+ snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = valinfo->control;
cval->val_type = valinfo->val_type;
cval->channels = 1;
@@ -1847,7 +1842,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
kfree(cval);
return -ENOMEM;
}
- kctl->private_free = usb_mixer_elem_free;
+ kctl->private_free = snd_usb_mixer_elem_free;
if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) {
/* nothing */ ;
@@ -1868,10 +1863,10 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
usb_audio_dbg(state->chip,
"[%d] PU [%s] ch = %d, val = %d/%d\n",
- cval->id, kctl->id.name, cval->channels,
+ cval->head.id, kctl->id.name, cval->channels,
cval->min, cval->max);
- err = snd_usb_mixer_add_control(state->mixer, kctl);
+ err = snd_usb_mixer_add_control(&cval->head, kctl);
if (err < 0)
return err;
}
@@ -1924,11 +1919,8 @@ static int mixer_ctl_selector_get(struct snd_kcontrol *kcontrol,
err = get_cur_ctl_value(cval, cval->control << 8, &val);
if (err < 0) {
- if (cval->mixer->ignore_ctl_error) {
- ucontrol->value.enumerated.item[0] = 0;
- return 0;
- }
- return err;
+ ucontrol->value.enumerated.item[0] = 0;
+ return filter_error(cval, err);
}
val = get_relative_value(cval, val);
ucontrol->value.enumerated.item[0] = val;
@@ -1943,11 +1935,8 @@ static int mixer_ctl_selector_put(struct snd_kcontrol *kcontrol,
int val, oval, err;
err = get_cur_ctl_value(cval, cval->control << 8, &oval);
- if (err < 0) {
- if (cval->mixer->ignore_ctl_error)
- return 0;
- return err;
- }
+ if (err < 0)
+ return filter_error(cval, err);
val = ucontrol->value.enumerated.item[0];
val = get_abs_value(cval, val);
if (val != oval) {
@@ -2024,8 +2013,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (!cval)
return -ENOMEM;
- cval->mixer = state->mixer;
- cval->id = unitid;
+ snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->val_type = USB_MIXER_U8;
cval->channels = 1;
cval->min = 1;
@@ -2096,11 +2084,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
}
usb_audio_dbg(state->chip, "[%d] SU [%s] items = %d\n",
- cval->id, kctl->id.name, desc->bNrInPins);
- if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
- return err;
-
- return 0;
+ cval->head.id, kctl->id.name, desc->bNrInPins);
+ return snd_usb_mixer_add_control(&cval->head, kctl);
}
/*
@@ -2245,25 +2230,21 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
{
- struct usb_mixer_elem_info *info;
+ struct usb_mixer_elem_list *list;
- for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem)
+ for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem)
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
- info->elem_id);
+ &list->kctl->id);
}
static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
- int unitid,
- struct usb_mixer_elem_info *cval)
+ struct usb_mixer_elem_list *list)
{
+ struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list;
static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN",
"S8", "U8", "S16", "U16"};
- snd_iprintf(buffer, " Unit: %i\n", unitid);
- if (cval->elem_id)
- snd_iprintf(buffer, " Control: name=\"%s\", index=%i\n",
- cval->elem_id->name, cval->elem_id->index);
snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
- "channels=%i, type=\"%s\"\n", cval->id,
+ "channels=%i, type=\"%s\"\n", cval->head.id,
cval->control, cval->cmask, cval->channels,
val_types[cval->val_type]);
snd_iprintf(buffer, " Volume: min=%i, max=%i, dBmin=%i, dBmax=%i\n",
@@ -2275,7 +2256,7 @@ static void snd_usb_mixer_proc_read(struct snd_info_entry *entry,
{
struct snd_usb_audio *chip = entry->private_data;
struct usb_mixer_interface *mixer;
- struct usb_mixer_elem_info *cval;
+ struct usb_mixer_elem_list *list;
int unitid;
list_for_each_entry(mixer, &chip->mixer_list, list) {
@@ -2285,9 +2266,17 @@ static void snd_usb_mixer_proc_read(struct snd_info_entry *entry,
mixer->ignore_ctl_error);
snd_iprintf(buffer, "Card: %s\n", chip->card->longname);
for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) {
- for (cval = mixer->id_elems[unitid]; cval;
- cval = cval->next_id_elem)
- snd_usb_mixer_dump_cval(buffer, unitid, cval);
+ for (list = mixer->id_elems[unitid]; list;
+ list = list->next_id_elem) {
+ snd_iprintf(buffer, " Unit: %i\n", list->id);
+ if (list->kctl)
+ snd_iprintf(buffer,
+ " Control: name=\"%s\", index=%i\n",
+ list->kctl->id.name,
+ list->kctl->id.index);
+ if (list->dump)
+ list->dump(buffer, list);
+ }
}
}
}
@@ -2295,7 +2284,7 @@ static void snd_usb_mixer_proc_read(struct snd_info_entry *entry,
static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
int attribute, int value, int index)
{
- struct usb_mixer_elem_info *info;
+ struct usb_mixer_elem_list *list;
__u8 unitid = (index >> 8) & 0xff;
__u8 control = (value >> 8) & 0xff;
__u8 channel = value & 0xff;
@@ -2307,7 +2296,13 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
return;
}
- for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem) {
+ for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) {
+ struct usb_mixer_elem_info *info;
+
+ if (!list->kctl)
+ continue;
+
+ info = (struct usb_mixer_elem_info *)list;
if (info->control != control)
continue;
@@ -2320,7 +2315,7 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
info->cached = 0;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
- info->elem_id);
+ &info->head.kctl->id);
break;
case UAC2_CS_RANGE:
@@ -2485,11 +2480,8 @@ _error:
return err;
}
-void snd_usb_mixer_disconnect(struct list_head *p)
+void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
{
- struct usb_mixer_interface *mixer;
-
- mixer = list_entry(p, struct usb_mixer_interface, list);
usb_kill_urb(mixer->urb);
usb_kill_urb(mixer->rc_urb);
}
@@ -2521,8 +2513,9 @@ int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer)
return 0;
}
-static int restore_mixer_value(struct usb_mixer_elem_info *cval)
+static int restore_mixer_value(struct usb_mixer_elem_list *list)
{
+ struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list;
int c, err, idx;
if (cval->cmask) {
@@ -2531,7 +2524,7 @@ static int restore_mixer_value(struct usb_mixer_elem_info *cval)
if (!(cval->cmask & (1 << c)))
continue;
if (cval->cached & (1 << c)) {
- err = set_cur_mix_value(cval, c + 1, idx,
+ err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
cval->cache_val[idx]);
if (err < 0)
return err;
@@ -2541,7 +2534,7 @@ static int restore_mixer_value(struct usb_mixer_elem_info *cval)
} else {
/* master */
if (cval->cached) {
- err = set_cur_mix_value(cval, 0, 0, *cval->cache_val);
+ err = snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
if (err < 0)
return err;
}
@@ -2552,19 +2545,19 @@ static int restore_mixer_value(struct usb_mixer_elem_info *cval)
int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
{
- struct usb_mixer_elem_info *cval;
+ struct usb_mixer_elem_list *list;
int id, err;
- /* FIXME: any mixer quirks? */
-
if (reset_resume) {
/* restore cached mixer values */
for (id = 0; id < MAX_ID_ELEMS; id++) {
- for (cval = mixer->id_elems[id]; cval;
- cval = cval->next_id_elem) {
- err = restore_mixer_value(cval);
- if (err < 0)
- return err;
+ for (list = mixer->id_elems[id]; list;
+ list = list->next_id_elem) {
+ if (list->resume) {
+ err = list->resume(list);
+ if (err < 0)
+ return err;
+ }
}
}
}
@@ -2572,3 +2565,15 @@ int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
return snd_usb_mixer_activate(mixer);
}
#endif
+
+void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
+ struct usb_mixer_interface *mixer,
+ int unitid)
+{
+ list->mixer = mixer;
+ list->id = unitid;
+ list->dump = snd_usb_mixer_dump_cval;
+#ifdef CONFIG_PM
+ list->resume = restore_mixer_value;
+#endif
+}
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 73b1f649447b..d3268f0ee2b3 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -1,6 +1,8 @@
#ifndef __USBMIXER_H
#define __USBMIXER_H
+#include <sound/info.h>
+
struct usb_mixer_interface {
struct snd_usb_audio *chip;
struct usb_host_interface *hostif;
@@ -8,7 +10,7 @@ struct usb_mixer_interface {
unsigned int ignore_ctl_error;
struct urb *urb;
/* array[MAX_ID_ELEMS], indexed by unit id */
- struct usb_mixer_elem_info **id_elems;
+ struct usb_mixer_elem_list **id_elems;
/* the usb audio specification version this interface complies to */
int protocol;
@@ -20,9 +22,6 @@ struct usb_mixer_interface {
struct urb *rc_urb;
struct usb_ctrlrequest *rc_setup_packet;
u8 rc_buffer[6];
-
- u8 audigy2nx_leds[3];
- u8 xonar_u1_status;
};
#define MAX_CHANNELS 16 /* max logical channels */
@@ -36,11 +35,21 @@ enum {
USB_MIXER_U16,
};
-struct usb_mixer_elem_info {
+typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
+ struct usb_mixer_elem_list *list);
+typedef int (*usb_mixer_elem_resume_func_t)(struct usb_mixer_elem_list *elem);
+
+struct usb_mixer_elem_list {
struct usb_mixer_interface *mixer;
- struct usb_mixer_elem_info *next_id_elem; /* list of controls with same id */
- struct snd_ctl_elem_id *elem_id;
+ struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
+ struct snd_kcontrol *kctl;
unsigned int id;
+ usb_mixer_elem_dump_func_t dump;
+ usb_mixer_elem_resume_func_t resume;
+};
+
+struct usb_mixer_elem_info {
+ struct usb_mixer_elem_list head;
unsigned int control; /* CS or ICN (high byte) */
unsigned int cmask; /* channel mask bitmap: 0 = master */
unsigned int idx_off; /* Control index offset */
@@ -53,20 +62,25 @@ struct usb_mixer_elem_info {
int cached;
int cache_val[MAX_CHANNELS];
u8 initialized;
+ void *private_data;
};
int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
int ignore_error);
-void snd_usb_mixer_disconnect(struct list_head *p);
+void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer);
void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
int request, int validx, int value_set);
-int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
+int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
struct snd_kcontrol *kctl);
+void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
+ struct usb_mixer_interface *mixer,
+ int unitid);
+
int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *_tlv);
@@ -75,4 +89,12 @@ int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);
int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume);
#endif
+int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
+ int index, int value);
+
+int snd_usb_get_cur_mix_value(struct usb_mixer_elem_info *cval,
+ int channel, int index, int *value);
+
+extern void snd_usb_mixer_elem_free(struct snd_kcontrol *kctl);
+
#endif /* __USBMIXER_H */
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index d1d72ff50347..1994d41348f8 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -179,6 +179,11 @@ static struct usbmix_name_map audigy2nx_map[] = {
{ 0 } /* terminator */
};
+static struct usbmix_name_map mbox1_map[] = {
+ { 1, "Clock" },
+ { 0 } /* terminator */
+};
+
static struct usbmix_selector_map c400_selectors[] = {
{
.id = 0x80,
@@ -416,6 +421,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.map = aureon_51_2_map,
},
{
+ .id = USB_ID(0x0dba, 0x1000),
+ .map = mbox1_map,
+ },
+ {
.id = USB_ID(0x13e5, 0x0001),
.map = scratch_live_map,
.ignore_ctl_error = 1,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 8c9bf4b7aaf0..dc9df007d3e3 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -41,6 +41,7 @@
#include "usbaudio.h"
#include "mixer.h"
#include "mixer_quirks.h"
+#include "mixer_scarlett.h"
#include "helper.h"
extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl;
@@ -52,13 +53,6 @@ struct std_mono_table {
snd_kcontrol_tlv_rw_t *tlv_callback;
};
-/* private_free callback */
-static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
-{
- kfree(kctl->private_data);
- kctl->private_data = NULL;
-}
-
/* This function allows for the creation of standard UAC controls.
* See the quirks for M-Audio FTUs or Ebox-44.
* If you don't want to set a TLV callback pass NULL.
@@ -75,7 +69,6 @@ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer,
const char *name,
snd_kcontrol_tlv_rw_t *tlv_callback)
{
- int err;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
@@ -83,8 +76,7 @@ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer,
if (!cval)
return -ENOMEM;
- cval->id = unitid;
- cval->mixer = mixer;
+ snd_usb_mixer_elem_init_std(&cval->head, mixer, unitid);
cval->val_type = val_type;
cval->channels = 1;
cval->control = control;
@@ -108,7 +100,7 @@ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer,
/* Set name */
snprintf(kctl->id.name, sizeof(kctl->id.name), name);
- kctl->private_free = usb_mixer_elem_free;
+ kctl->private_free = snd_usb_mixer_elem_free;
/* set TLV */
if (tlv_callback) {
@@ -118,11 +110,7 @@ static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer,
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
/* Add control to mixer */
- err = snd_usb_mixer_add_control(mixer, kctl);
- if (err < 0)
- return err;
-
- return 0;
+ return snd_usb_mixer_add_control(&cval->head, kctl);
}
static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer,
@@ -156,6 +144,32 @@ static int snd_create_std_mono_table(struct usb_mixer_interface *mixer,
return 0;
}
+static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
+ int id,
+ usb_mixer_elem_resume_func_t resume,
+ const struct snd_kcontrol_new *knew,
+ struct usb_mixer_elem_list **listp)
+{
+ struct usb_mixer_elem_list *list;
+ struct snd_kcontrol *kctl;
+
+ list = kzalloc(sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ if (listp)
+ *listp = list;
+ list->mixer = mixer;
+ list->id = id;
+ list->resume = resume;
+ kctl = snd_ctl_new1(knew, list);
+ if (!kctl) {
+ kfree(list);
+ return -ENOMEM;
+ }
+ kctl->private_free = snd_usb_mixer_elem_free;
+ return snd_usb_mixer_add_control(list, kctl);
+}
+
/*
* Sound Blaster remote control configuration
*
@@ -283,84 +297,90 @@ static int snd_usb_soundblaster_remote_init(struct usb_mixer_interface *mixer)
static int snd_audigy2nx_led_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
- int index = kcontrol->private_value;
-
- ucontrol->value.integer.value[0] = mixer->audigy2nx_leds[index];
+ ucontrol->value.integer.value[0] = kcontrol->private_value >> 8;
return 0;
}
-static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+static int snd_audigy2nx_led_update(struct usb_mixer_interface *mixer,
+ int value, int index)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
- int index = kcontrol->private_value;
- int value = ucontrol->value.integer.value[0];
- int err, changed;
+ struct snd_usb_audio *chip = mixer->chip;
+ int err;
- if (value > 1)
- return -EINVAL;
- changed = value != mixer->audigy2nx_leds[index];
- down_read(&mixer->chip->shutdown_rwsem);
- if (mixer->chip->shutdown) {
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown) {
err = -ENODEV;
goto out;
}
- if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+ if (chip->usb_id == USB_ID(0x041e, 0x3042))
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), 0x24,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
!value, 0, NULL, 0);
/* USB X-Fi S51 Pro */
- if (mixer->chip->usb_id == USB_ID(0x041e, 0x30df))
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+ if (chip->usb_id == USB_ID(0x041e, 0x30df))
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), 0x24,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
!value, 0, NULL, 0);
else
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), 0x24,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
value, index + 2, NULL, 0);
out:
- up_read(&mixer->chip->shutdown_rwsem);
- if (err < 0)
- return err;
- mixer->audigy2nx_leds[index] = value;
- return changed;
+ up_read(&chip->shutdown_rwsem);
+ return err;
}
-static struct snd_kcontrol_new snd_audigy2nx_controls[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "CMSS LED Switch",
- .info = snd_audigy2nx_led_info,
- .get = snd_audigy2nx_led_get,
- .put = snd_audigy2nx_led_put,
- .private_value = 0,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Power LED Switch",
- .info = snd_audigy2nx_led_info,
- .get = snd_audigy2nx_led_get,
- .put = snd_audigy2nx_led_put,
- .private_value = 1,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Dolby Digital LED Switch",
- .info = snd_audigy2nx_led_info,
- .get = snd_audigy2nx_led_get,
- .put = snd_audigy2nx_led_put,
- .private_value = 2,
- },
+static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct usb_mixer_interface *mixer = list->mixer;
+ int index = kcontrol->private_value & 0xff;
+ int value = ucontrol->value.integer.value[0];
+ int old_value = kcontrol->private_value >> 8;
+ int err;
+
+ if (value > 1)
+ return -EINVAL;
+ if (value == old_value)
+ return 0;
+ kcontrol->private_value = (value << 8) | index;
+ err = snd_audigy2nx_led_update(mixer, value, index);
+ return err < 0 ? err : 1;
+}
+
+static int snd_audigy2nx_led_resume(struct usb_mixer_elem_list *list)
+{
+ int priv_value = list->kctl->private_value;
+
+ return snd_audigy2nx_led_update(list->mixer, priv_value >> 8,
+ priv_value & 0xff);
+}
+
+/* name and private_value are set dynamically */
+static struct snd_kcontrol_new snd_audigy2nx_control = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .info = snd_audigy2nx_led_info,
+ .get = snd_audigy2nx_led_get,
+ .put = snd_audigy2nx_led_put,
+};
+
+static const char * const snd_audigy2nx_led_names[] = {
+ "CMSS LED Switch",
+ "Power LED Switch",
+ "Dolby Digital LED Switch",
};
static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer)
{
int i, err;
- for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) {
+ for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_led_names); ++i) {
+ struct snd_kcontrol_new knew;
+
/* USB X-Fi S51 doesn't have a CMSS LED */
if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0)
continue;
@@ -373,12 +393,16 @@ static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer)
mixer->chip->usb_id == USB_ID(0x041e, 0x30df) ||
mixer->chip->usb_id == USB_ID(0x041e, 0x3048)))
break;
- err = snd_ctl_add(mixer->chip->card,
- snd_ctl_new1(&snd_audigy2nx_controls[i], mixer));
+
+ knew = snd_audigy2nx_control;
+ knew.name = snd_audigy2nx_led_names[i];
+ knew.private_value = (1 << 8) | i; /* LED on as default */
+ err = add_single_ctl_with_resume(mixer, 0,
+ snd_audigy2nx_led_resume,
+ &knew, NULL);
if (err < 0)
return err;
}
- mixer->audigy2nx_leds[1] = 1; /* Power LED is on by default */
return 0;
}
@@ -437,19 +461,9 @@ static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
static int snd_emu0204_ch_switch_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static const char *texts[2] = {"1/2",
- "3/4"
- };
+ static const char * const texts[2] = {"1/2", "3/4"};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
}
static int snd_emu0204_ch_switch_get(struct snd_kcontrol *kcontrol,
@@ -459,100 +473,122 @@ static int snd_emu0204_ch_switch_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static int snd_emu0204_ch_switch_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int snd_emu0204_ch_switch_update(struct usb_mixer_interface *mixer,
+ int value)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
- unsigned int value = ucontrol->value.enumerated.item[0];
- int err, changed;
+ struct snd_usb_audio *chip = mixer->chip;
+ int err;
unsigned char buf[2];
- if (value > 1)
- return -EINVAL;
-
- buf[0] = 0x01;
- buf[1] = value ? 0x02 : 0x01;
-
- changed = value != kcontrol->private_value;
- down_read(&mixer->chip->shutdown_rwsem);
+ down_read(&chip->shutdown_rwsem);
if (mixer->chip->shutdown) {
err = -ENODEV;
goto out;
}
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0), UAC_SET_CUR,
+
+ buf[0] = 0x01;
+ buf[1] = value ? 0x02 : 0x01;
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
0x0400, 0x0e00, buf, 2);
out:
- up_read(&mixer->chip->shutdown_rwsem);
- if (err < 0)
- return err;
+ up_read(&chip->shutdown_rwsem);
+ return err;
+}
+
+static int snd_emu0204_ch_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct usb_mixer_interface *mixer = list->mixer;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+ int err;
+
+ if (value > 1)
+ return -EINVAL;
+
+ if (value == kcontrol->private_value)
+ return 0;
+
kcontrol->private_value = value;
- return changed;
+ err = snd_emu0204_ch_switch_update(mixer, value);
+ return err < 0 ? err : 1;
}
+static int snd_emu0204_ch_switch_resume(struct usb_mixer_elem_list *list)
+{
+ return snd_emu0204_ch_switch_update(list->mixer,
+ list->kctl->private_value);
+}
-static struct snd_kcontrol_new snd_emu0204_controls[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Front Jack Channels",
- .info = snd_emu0204_ch_switch_info,
- .get = snd_emu0204_ch_switch_get,
- .put = snd_emu0204_ch_switch_put,
- .private_value = 0,
- },
+static struct snd_kcontrol_new snd_emu0204_control = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Front Jack Channels",
+ .info = snd_emu0204_ch_switch_info,
+ .get = snd_emu0204_ch_switch_get,
+ .put = snd_emu0204_ch_switch_put,
+ .private_value = 0,
};
static int snd_emu0204_controls_create(struct usb_mixer_interface *mixer)
{
- int i, err;
-
- for (i = 0; i < ARRAY_SIZE(snd_emu0204_controls); ++i) {
- err = snd_ctl_add(mixer->chip->card,
- snd_ctl_new1(&snd_emu0204_controls[i], mixer));
- if (err < 0)
- return err;
- }
-
- return 0;
+ return add_single_ctl_with_resume(mixer, 0,
+ snd_emu0204_ch_switch_resume,
+ &snd_emu0204_control, NULL);
}
+
/* ASUS Xonar U1 / U3 controls */
static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
-
- ucontrol->value.integer.value[0] = !!(mixer->xonar_u1_status & 0x02);
+ ucontrol->value.integer.value[0] = !!(kcontrol->private_value & 0x02);
return 0;
}
+static int snd_xonar_u1_switch_update(struct usb_mixer_interface *mixer,
+ unsigned char status)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ int err;
+
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown)
+ err = -ENODEV;
+ else
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), 0x08,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 50, 0, &status, 1);
+ up_read(&chip->shutdown_rwsem);
+ return err;
+}
+
static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
u8 old_status, new_status;
- int err, changed;
+ int err;
- old_status = mixer->xonar_u1_status;
+ old_status = kcontrol->private_value;
if (ucontrol->value.integer.value[0])
new_status = old_status | 0x02;
else
new_status = old_status & ~0x02;
- changed = new_status != old_status;
- down_read(&mixer->chip->shutdown_rwsem);
- if (mixer->chip->shutdown)
- err = -ENODEV;
- else
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0), 0x08,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
- 50, 0, &new_status, 1);
- up_read(&mixer->chip->shutdown_rwsem);
- if (err < 0)
- return err;
- mixer->xonar_u1_status = new_status;
- return changed;
+ if (new_status == old_status)
+ return 0;
+
+ kcontrol->private_value = new_status;
+ err = snd_xonar_u1_switch_update(list->mixer, new_status);
+ return err < 0 ? err : 1;
+}
+
+static int snd_xonar_u1_switch_resume(struct usb_mixer_elem_list *list)
+{
+ return snd_xonar_u1_switch_update(list->mixer,
+ list->kctl->private_value);
}
static struct snd_kcontrol_new snd_xonar_u1_output_switch = {
@@ -561,82 +597,213 @@ static struct snd_kcontrol_new snd_xonar_u1_output_switch = {
.info = snd_ctl_boolean_mono_info,
.get = snd_xonar_u1_switch_get,
.put = snd_xonar_u1_switch_put,
+ .private_value = 0x05,
};
static int snd_xonar_u1_controls_create(struct usb_mixer_interface *mixer)
{
+ return add_single_ctl_with_resume(mixer, 0,
+ snd_xonar_u1_switch_resume,
+ &snd_xonar_u1_output_switch, NULL);
+}
+
+/* Digidesign Mbox 1 clock source switch (internal/spdif) */
+
+static int snd_mbox1_switch_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.enumerated.item[0] = kctl->private_value;
+ return 0;
+}
+
+static int snd_mbox1_switch_update(struct usb_mixer_interface *mixer, int val)
+{
+ struct snd_usb_audio *chip = mixer->chip;
int err;
+ unsigned char buff[3];
+
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown) {
+ err = -ENODEV;
+ goto err;
+ }
- err = snd_ctl_add(mixer->chip->card,
- snd_ctl_new1(&snd_xonar_u1_output_switch, mixer));
+ /* Prepare for magic command to toggle clock source */
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_rcvctrlpipe(chip->dev, 0), 0x81,
+ USB_DIR_IN |
+ USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE, 0x00, 0x500, buff, 1);
if (err < 0)
- return err;
- mixer->xonar_u1_status = 0x05;
- return 0;
+ goto err;
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_rcvctrlpipe(chip->dev, 0), 0x81,
+ USB_DIR_IN |
+ USB_TYPE_CLASS |
+ USB_RECIP_ENDPOINT, 0x100, 0x81, buff, 3);
+ if (err < 0)
+ goto err;
+
+ /* 2 possibilities: Internal -> send sample rate
+ * S/PDIF sync -> send zeroes
+ * NB: Sample rate locked to 48kHz on purpose to
+ * prevent user from resetting the sample rate
+ * while S/PDIF sync is enabled and confusing
+ * this configuration.
+ */
+ if (val == 0) {
+ buff[0] = 0x80;
+ buff[1] = 0xbb;
+ buff[2] = 0x00;
+ } else {
+ buff[0] = buff[1] = buff[2] = 0x00;
+ }
+
+ /* Send the magic command to toggle the clock source */
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), 0x1,
+ USB_TYPE_CLASS |
+ USB_RECIP_ENDPOINT, 0x100, 0x81, buff, 3);
+ if (err < 0)
+ goto err;
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_rcvctrlpipe(chip->dev, 0), 0x81,
+ USB_DIR_IN |
+ USB_TYPE_CLASS |
+ USB_RECIP_ENDPOINT, 0x100, 0x81, buff, 3);
+ if (err < 0)
+ goto err;
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_rcvctrlpipe(chip->dev, 0), 0x81,
+ USB_DIR_IN |
+ USB_TYPE_CLASS |
+ USB_RECIP_ENDPOINT, 0x100, 0x2, buff, 3);
+ if (err < 0)
+ goto err;
+
+err:
+ up_read(&chip->shutdown_rwsem);
+ return err;
+}
+
+static int snd_mbox1_switch_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl);
+ struct usb_mixer_interface *mixer = list->mixer;
+ int err;
+ bool cur_val, new_val;
+
+ cur_val = kctl->private_value;
+ new_val = ucontrol->value.enumerated.item[0];
+ if (cur_val == new_val)
+ return 0;
+
+ kctl->private_value = new_val;
+ err = snd_mbox1_switch_update(mixer, new_val);
+ return err < 0 ? err : 1;
+}
+
+static int snd_mbox1_switch_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const texts[2] = {
+ "Internal",
+ "S/PDIF"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
+}
+
+static int snd_mbox1_switch_resume(struct usb_mixer_elem_list *list)
+{
+ return snd_mbox1_switch_update(list->mixer, list->kctl->private_value);
+}
+
+static struct snd_kcontrol_new snd_mbox1_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Clock Source",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = snd_mbox1_switch_info,
+ .get = snd_mbox1_switch_get,
+ .put = snd_mbox1_switch_put,
+ .private_value = 0
+};
+
+static int snd_mbox1_create_sync_switch(struct usb_mixer_interface *mixer)
+{
+ return add_single_ctl_with_resume(mixer, 0,
+ snd_mbox1_switch_resume,
+ &snd_mbox1_switch, NULL);
}
/* Native Instruments device quirks */
#define _MAKE_NI_CONTROL(bRequest,wIndex) ((bRequest) << 16 | (wIndex))
-static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int snd_ni_control_init_val(struct usb_mixer_interface *mixer,
+ struct snd_kcontrol *kctl)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
struct usb_device *dev = mixer->chip->dev;
- u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
- u16 wIndex = kcontrol->private_value & 0xffff;
- u8 tmp;
- int ret;
-
- down_read(&mixer->chip->shutdown_rwsem);
- if (mixer->chip->shutdown)
- ret = -ENODEV;
- else
- ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0, wIndex,
- &tmp, sizeof(tmp));
- up_read(&mixer->chip->shutdown_rwsem);
+ unsigned int pval = kctl->private_value;
+ u8 value;
+ int err;
- if (ret < 0) {
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ (pval >> 16) & 0xff,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0, pval & 0xffff, &value, 1);
+ if (err < 0) {
dev_err(&dev->dev,
- "unable to issue vendor read request (ret = %d)", ret);
- return ret;
+ "unable to issue vendor read request (ret = %d)", err);
+ return err;
}
- ucontrol->value.integer.value[0] = tmp;
+ kctl->private_value |= (value << 24);
+ return 0;
+}
+static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = kcontrol->private_value >> 24;
return 0;
}
+static int snd_ni_update_cur_val(struct usb_mixer_elem_list *list)
+{
+ struct snd_usb_audio *chip = list->mixer->chip;
+ unsigned int pval = list->kctl->private_value;
+ int err;
+
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown)
+ err = -ENODEV;
+ else
+ err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
+ (pval >> 16) & 0xff,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ pval >> 24, pval & 0xffff, NULL, 0, 1000);
+ up_read(&chip->shutdown_rwsem);
+ return err;
+}
+
static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
- struct usb_device *dev = mixer->chip->dev;
- u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
- u16 wIndex = kcontrol->private_value & 0xffff;
- u16 wValue = ucontrol->value.integer.value[0];
- int ret;
-
- down_read(&mixer->chip->shutdown_rwsem);
- if (mixer->chip->shutdown)
- ret = -ENODEV;
- else
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- wValue, wIndex,
- NULL, 0, 1000);
- up_read(&mixer->chip->shutdown_rwsem);
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ u8 oldval = (kcontrol->private_value >> 24) & 0xff;
+ u8 newval = ucontrol->value.integer.value[0];
+ int err;
- if (ret < 0) {
- dev_err(&dev->dev,
- "unable to issue vendor write request (ret = %d)", ret);
- return ret;
- }
+ if (oldval == newval)
+ return 0;
- return 0;
+ kcontrol->private_value &= ~(0xff << 24);
+ kcontrol->private_value |= newval;
+ err = snd_ni_update_cur_val(list);
+ return err < 0 ? err : 1;
}
static struct snd_kcontrol_new snd_nativeinstruments_ta6_mixers[] = {
@@ -707,16 +874,17 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
};
for (i = 0; i < count; i++) {
- struct snd_kcontrol *c;
+ struct usb_mixer_elem_list *list;
template.name = kc[i].name;
template.private_value = kc[i].private_value;
- c = snd_ctl_new1(&template, mixer);
- err = snd_ctl_add(mixer->chip->card, c);
-
+ err = add_single_ctl_with_resume(mixer, 0,
+ snd_ni_update_cur_val,
+ &template, &list);
if (err < 0)
break;
+ snd_ni_control_init_val(mixer, list->kctl);
}
return err;
@@ -724,170 +892,88 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
/* M-Audio FastTrack Ultra quirks */
/* FTU Effect switch (also used by C400/C600) */
-struct snd_ftu_eff_switch_priv_val {
- struct usb_mixer_interface *mixer;
- int cached_value;
- int is_cached;
- int bUnitID;
- int validx;
-};
-
static int snd_ftu_eff_switch_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static const char *texts[8] = {"Room 1",
- "Room 2",
- "Room 3",
- "Hall 1",
- "Hall 2",
- "Plate",
- "Delay",
- "Echo"
+ static const char *const texts[8] = {
+ "Room 1", "Room 2", "Room 3", "Hall 1",
+ "Hall 2", "Plate", "Delay", "Echo"
};
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 8;
- if (uinfo->value.enumerated.item > 7)
- uinfo->value.enumerated.item = 7;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
-
- return 0;
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
}
-static int snd_ftu_eff_switch_get(struct snd_kcontrol *kctl,
- struct snd_ctl_elem_value *ucontrol)
+static int snd_ftu_eff_switch_init(struct usb_mixer_interface *mixer,
+ struct snd_kcontrol *kctl)
{
- struct snd_usb_audio *chip;
- struct usb_mixer_interface *mixer;
- struct snd_ftu_eff_switch_priv_val *pval;
+ struct usb_device *dev = mixer->chip->dev;
+ unsigned int pval = kctl->private_value;
int err;
unsigned char value[2];
- int id, validx;
-
- const int val_len = 2;
value[0] = 0x00;
value[1] = 0x00;
- pval = (struct snd_ftu_eff_switch_priv_val *)
- kctl->private_value;
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ pval & 0xff00,
+ snd_usb_ctrl_intf(mixer->chip) | ((pval & 0xff) << 8),
+ value, 2);
+ if (err < 0)
+ return err;
- if (pval->is_cached) {
- ucontrol->value.enumerated.item[0] = pval->cached_value;
- return 0;
- }
+ kctl->private_value |= value[0] << 24;
+ return 0;
+}
- mixer = (struct usb_mixer_interface *) pval->mixer;
- if (snd_BUG_ON(!mixer))
- return -EINVAL;
+static int snd_ftu_eff_switch_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.enumerated.item[0] = kctl->private_value >> 24;
+ return 0;
+}
- chip = (struct snd_usb_audio *) mixer->chip;
- if (snd_BUG_ON(!chip))
- return -EINVAL;
+static int snd_ftu_eff_switch_update(struct usb_mixer_elem_list *list)
+{
+ struct snd_usb_audio *chip = list->mixer->chip;
+ unsigned int pval = list->kctl->private_value;
+ unsigned char value[2];
+ int err;
- id = pval->bUnitID;
- validx = pval->validx;
+ value[0] = pval >> 24;
+ value[1] = 0;
- down_read(&mixer->chip->shutdown_rwsem);
- if (mixer->chip->shutdown)
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown)
err = -ENODEV;
else
err = snd_usb_ctl_msg(chip->dev,
- usb_rcvctrlpipe(chip->dev, 0), UAC_GET_CUR,
- USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
- validx << 8, snd_usb_ctrl_intf(chip) | (id << 8),
- value, val_len);
- up_read(&mixer->chip->shutdown_rwsem);
- if (err < 0)
- return err;
-
- ucontrol->value.enumerated.item[0] = value[0];
- pval->cached_value = value[0];
- pval->is_cached = 1;
-
- return 0;
+ usb_sndctrlpipe(chip->dev, 0),
+ UAC_SET_CUR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+ pval & 0xff00,
+ snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
+ value, 2);
+ up_read(&chip->shutdown_rwsem);
+ return err;
}
static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_usb_audio *chip;
- struct snd_ftu_eff_switch_priv_val *pval;
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl);
+ unsigned int pval = list->kctl->private_value;
+ int cur_val, err, new_val;
- struct usb_mixer_interface *mixer;
- int changed, cur_val, err, new_val;
- unsigned char value[2];
- int id, validx;
-
- const int val_len = 2;
-
- changed = 0;
-
- pval = (struct snd_ftu_eff_switch_priv_val *)
- kctl->private_value;
- cur_val = pval->cached_value;
+ cur_val = pval >> 24;
new_val = ucontrol->value.enumerated.item[0];
+ if (cur_val == new_val)
+ return 0;
- mixer = (struct usb_mixer_interface *) pval->mixer;
- if (snd_BUG_ON(!mixer))
- return -EINVAL;
-
- chip = (struct snd_usb_audio *) mixer->chip;
- if (snd_BUG_ON(!chip))
- return -EINVAL;
-
- id = pval->bUnitID;
- validx = pval->validx;
-
- if (!pval->is_cached) {
- /* Read current value */
- down_read(&mixer->chip->shutdown_rwsem);
- if (mixer->chip->shutdown)
- err = -ENODEV;
- else
- err = snd_usb_ctl_msg(chip->dev,
- usb_rcvctrlpipe(chip->dev, 0), UAC_GET_CUR,
- USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
- validx << 8, snd_usb_ctrl_intf(chip) | (id << 8),
- value, val_len);
- up_read(&mixer->chip->shutdown_rwsem);
- if (err < 0)
- return err;
-
- cur_val = value[0];
- pval->cached_value = cur_val;
- pval->is_cached = 1;
- }
- /* update value if needed */
- if (cur_val != new_val) {
- value[0] = new_val;
- value[1] = 0;
- down_read(&mixer->chip->shutdown_rwsem);
- if (mixer->chip->shutdown)
- err = -ENODEV;
- else
- err = snd_usb_ctl_msg(chip->dev,
- usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
- USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
- validx << 8, snd_usb_ctrl_intf(chip) | (id << 8),
- value, val_len);
- up_read(&mixer->chip->shutdown_rwsem);
- if (err < 0)
- return err;
-
- pval->cached_value = new_val;
- pval->is_cached = 1;
- changed = 1;
- }
-
- return changed;
-}
-
-static void kctl_private_value_free(struct snd_kcontrol *kctl)
-{
- kfree((void *)kctl->private_value);
+ kctl->private_value &= ~(0xff << 24);
+ kctl->private_value |= new_val << 24;
+ err = snd_ftu_eff_switch_update(list);
+ return err < 0 ? err : 1;
}
static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
@@ -902,33 +988,16 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
.get = snd_ftu_eff_switch_get,
.put = snd_ftu_eff_switch_put
};
-
+ struct usb_mixer_elem_list *list;
int err;
- struct snd_kcontrol *kctl;
- struct snd_ftu_eff_switch_priv_val *pval;
-
- pval = kzalloc(sizeof(*pval), GFP_KERNEL);
- if (!pval)
- return -ENOMEM;
-
- pval->cached_value = 0;
- pval->is_cached = 0;
- pval->mixer = mixer;
- pval->bUnitID = bUnitID;
- pval->validx = validx;
-
- template.private_value = (unsigned long) pval;
- kctl = snd_ctl_new1(&template, mixer->chip);
- if (!kctl) {
- kfree(pval);
- return -ENOMEM;
- }
- kctl->private_free = kctl_private_value_free;
- err = snd_ctl_add(mixer->chip->card, kctl);
+ err = add_single_ctl_with_resume(mixer, bUnitID,
+ snd_ftu_eff_switch_update,
+ &template, &list);
if (err < 0)
return err;
-
+ list->kctl->private_value = (validx << 8) | bUnitID;
+ snd_ftu_eff_switch_init(mixer, list->kctl);
return 0;
}
@@ -1110,7 +1179,7 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
int unitid = 12; /* SamleRate ExtensionUnit ID */
list_for_each_entry(mixer, &chip->mixer_list, list) {
- cval = mixer->id_elems[unitid];
+ cval = (struct usb_mixer_elem_info *)mixer->id_elems[unitid];
if (cval) {
snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
cval->control << 8,
@@ -1440,7 +1509,8 @@ static int snd_microii_spdif_info(struct snd_kcontrol *kcontrol,
static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_audio *chip = list->mixer->chip;
int err;
struct usb_interface *iface;
struct usb_host_interface *alts;
@@ -1448,17 +1518,23 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
unsigned char data[3];
int rate;
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown) {
+ err = -ENODEV;
+ goto end;
+ }
+
ucontrol->value.iec958.status[0] = kcontrol->private_value & 0xff;
ucontrol->value.iec958.status[1] = (kcontrol->private_value >> 8) & 0xff;
ucontrol->value.iec958.status[2] = 0x00;
/* use known values for that card: interface#1 altsetting#1 */
- iface = usb_ifnum_to_if(mixer->chip->dev, 1);
+ iface = usb_ifnum_to_if(chip->dev, 1);
alts = &iface->altsetting[1];
ep = get_endpoint(alts, 0)->bEndpointAddress;
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_rcvctrlpipe(mixer->chip->dev, 0),
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_rcvctrlpipe(chip->dev, 0),
UAC_GET_CUR,
USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
@@ -1473,22 +1549,27 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
IEC958_AES3_CON_FS_48000 : IEC958_AES3_CON_FS_44100;
err = 0;
-end:
+ end:
+ up_read(&chip->shutdown_rwsem);
return err;
}
-static int snd_microii_spdif_default_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int snd_microii_spdif_default_update(struct usb_mixer_elem_list *list)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
- int err;
+ struct snd_usb_audio *chip = list->mixer->chip;
+ unsigned int pval = list->kctl->private_value;
u8 reg;
- unsigned long priv_backup = kcontrol->private_value;
+ int err;
- reg = ((ucontrol->value.iec958.status[1] & 0x0f) << 4) |
- (ucontrol->value.iec958.status[0] & 0x0f);
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0),
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown) {
+ err = -ENODEV;
+ goto end;
+ }
+
+ reg = ((pval >> 4) & 0xf0) | (pval & 0x0f);
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0),
UAC_SET_CUR,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
reg,
@@ -1498,15 +1579,10 @@ static int snd_microii_spdif_default_put(struct snd_kcontrol *kcontrol,
if (err < 0)
goto end;
- kcontrol->private_value &= 0xfffff0f0;
- kcontrol->private_value |= (ucontrol->value.iec958.status[1] & 0x0f) << 8;
- kcontrol->private_value |= (ucontrol->value.iec958.status[0] & 0x0f);
-
- reg = (ucontrol->value.iec958.status[0] & IEC958_AES0_NONAUDIO) ?
- 0xa0 : 0x20;
- reg |= (ucontrol->value.iec958.status[1] >> 4) & 0x0f;
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0),
+ reg = (pval & IEC958_AES0_NONAUDIO) ? 0xa0 : 0x20;
+ reg |= (pval >> 12) & 0x0f;
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0),
UAC_SET_CUR,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
reg,
@@ -1516,16 +1592,36 @@ static int snd_microii_spdif_default_put(struct snd_kcontrol *kcontrol,
if (err < 0)
goto end;
- kcontrol->private_value &= 0xffff0fff;
- kcontrol->private_value |= (ucontrol->value.iec958.status[1] & 0xf0) << 8;
+ end:
+ up_read(&chip->shutdown_rwsem);
+ return err;
+}
+
+static int snd_microii_spdif_default_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ unsigned int pval, pval_old;
+ int err;
+
+ pval = pval_old = kcontrol->private_value;
+ pval &= 0xfffff0f0;
+ pval |= (ucontrol->value.iec958.status[1] & 0x0f) << 8;
+ pval |= (ucontrol->value.iec958.status[0] & 0x0f);
+
+ pval &= 0xffff0fff;
+ pval |= (ucontrol->value.iec958.status[1] & 0xf0) << 8;
/* The frequency bits in AES3 cannot be set via register access. */
/* Silently ignore any bits from the request that cannot be set. */
- err = (priv_backup != kcontrol->private_value);
-end:
- return err;
+ if (pval == pval_old)
+ return 0;
+
+ kcontrol->private_value = pval;
+ err = snd_microii_spdif_default_update(list);
+ return err < 0 ? err : 1;
}
static int snd_microii_spdif_mask_get(struct snd_kcontrol *kcontrol,
@@ -1547,15 +1643,20 @@ static int snd_microii_spdif_switch_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static int snd_microii_spdif_switch_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int snd_microii_spdif_switch_update(struct usb_mixer_elem_list *list)
{
- struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_audio *chip = list->mixer->chip;
+ u8 reg = list->kctl->private_value;
int err;
- u8 reg = ucontrol->value.integer.value[0] ? 0x28 : 0x2a;
- err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0),
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown) {
+ err = -ENODEV;
+ goto end;
+ }
+
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0),
UAC_SET_CUR,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
reg,
@@ -1563,15 +1664,27 @@ static int snd_microii_spdif_switch_put(struct snd_kcontrol *kcontrol,
NULL,
0);
- if (!err) {
- err = (reg != (kcontrol->private_value & 0x0ff));
- if (err)
- kcontrol->private_value = reg;
- }
-
+ end:
+ up_read(&chip->shutdown_rwsem);
return err;
}
+static int snd_microii_spdif_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ u8 reg;
+ int err;
+
+ reg = ucontrol->value.integer.value[0] ? 0x28 : 0x2a;
+ if (reg != list->kctl->private_value)
+ return 0;
+
+ kcontrol->private_value = reg;
+ err = snd_microii_spdif_switch_update(list);
+ return err < 0 ? err : 1;
+}
+
static struct snd_kcontrol_new snd_microii_mixer_spdif[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1601,10 +1714,17 @@ static struct snd_kcontrol_new snd_microii_mixer_spdif[] = {
static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
{
int err, i;
+ static usb_mixer_elem_resume_func_t resume_funcs[] = {
+ snd_microii_spdif_default_update,
+ NULL,
+ snd_microii_spdif_switch_update
+ };
for (i = 0; i < ARRAY_SIZE(snd_microii_mixer_spdif); ++i) {
- err = snd_ctl_add(mixer->chip->card,
- snd_ctl_new1(&snd_microii_mixer_spdif[i], mixer));
+ err = add_single_ctl_with_resume(mixer, 0,
+ resume_funcs[i],
+ &snd_microii_mixer_spdif[i],
+ NULL);
if (err < 0)
return err;
}
@@ -1661,6 +1781,10 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
err = snd_microii_controls_create(mixer);
break;
+ case USB_ID(0x0dba, 0x1000): /* Digidesign Mbox 1 */
+ err = snd_mbox1_create_sync_switch(mixer);
+ break;
+
case USB_ID(0x17cc, 0x1011): /* Traktor Audio 6 */
err = snd_nativeinstruments_create_mixer(mixer,
snd_nativeinstruments_ta6_mixers,
@@ -1677,6 +1801,14 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
/* detection is disabled in mixer_maps.c */
err = snd_create_std_mono_table(mixer, ebox44_table);
break;
+
+ case USB_ID(0x1235, 0x8012): /* Focusrite Scarlett 6i6 */
+ case USB_ID(0x1235, 0x8002): /* Focusrite Scarlett 8i6 */
+ case USB_ID(0x1235, 0x8004): /* Focusrite Scarlett 18i6 */
+ case USB_ID(0x1235, 0x8014): /* Focusrite Scarlett 18i8 */
+ case USB_ID(0x1235, 0x800c): /* Focusrite Scarlett 18i20 */
+ err = snd_scarlett_controls_create(mixer);
+ break;
}
return err;
diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c
new file mode 100644
index 000000000000..9109652b88b9
--- /dev/null
+++ b/sound/usb/mixer_scarlett.c
@@ -0,0 +1,1004 @@
+/*
+ * Scarlett Driver for ALSA
+ *
+ * Copyright (c) 2013 by Tobias Hoffmann
+ * Copyright (c) 2013 by Robin Gareus <robin at gareus.org>
+ * Copyright (c) 2002 by Takashi Iwai <tiwai at suse.de>
+ * Copyright (c) 2014 by Chris J Arges <chris.j.arges at canonical.com>
+ *
+ * Many codes borrowed from audio.c by
+ * Alan Cox (alan at lxorguk.ukuu.org.uk)
+ * Thomas Sailer (sailer at ife.ee.ethz.ch)
+ *
+ * Code cleanup:
+ * David Henningsson <david.henningsson at canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Rewritten and extended to support more models, e.g. Scarlett 18i8.
+ *
+ * Auto-detection via UAC2 is not feasible to properly discover the vast
+ * majority of features. It's related to both Linux/ALSA's UAC2 as well as
+ * Focusrite's implementation of it. Eventually quirks may be sufficient but
+ * right now it's a major headache to work arount these things.
+ *
+ * NB. Neither the OSX nor the win driver provided by Focusrite performs
+ * discovery, they seem to operate the same as this driver.
+ */
+
+/* Mixer Interface for the Focusrite Scarlett 18i6 audio interface.
+ *
+ * The protocol was reverse engineered by looking at communication between
+ * Scarlett MixControl (v 1.2.128.0) and the Focusrite(R) Scarlett 18i6
+ * (firmware v305) using wireshark and usbmon in January 2013.
+ * Extended in July 2013.
+ *
+ * this mixer gives complete access to all features of the device:
+ * - change Impedance of inputs (Line-in, Mic / Instrument, Hi-Z)
+ * - select clock source
+ * - dynamic input to mixer-matrix assignment
+ * - 18 x 6 mixer-matrix gain stages
+ * - bus routing & volume control
+ * - automatic re-initialization on connect if device was power-cycled
+ *
+ * USB URB commands overview (bRequest = 0x01 = UAC2_CS_CUR)
+ * wIndex
+ * 0x01 Analog Input line/instrument impedance switch, wValue=0x0901 +
+ * channel, data=Line/Inst (2bytes)
+ * pad (-10dB) switch, wValue=0x0b01 + channel, data=Off/On (2bytes)
+ * ?? wValue=0x0803/04, ?? (2bytes)
+ * 0x0a Master Volume, wValue=0x0200+bus[0:all + only 1..4?] data(2bytes)
+ * Bus Mute/Unmute wValue=0x0100+bus[0:all + only 1..4?], data(2bytes)
+ * 0x28 Clock source, wValue=0x0100, data={1:int,2:spdif,3:adat} (1byte)
+ * 0x29 Set Sample-rate, wValue=0x0100, data=sample-rate(4bytes)
+ * 0x32 Mixer mux, wValue=0x0600 + mixer-channel, data=input-to-connect(2bytes)
+ * 0x33 Output mux, wValue=bus, data=input-to-connect(2bytes)
+ * 0x34 Capture mux, wValue=0...18, data=input-to-connect(2bytes)
+ * 0x3c Matrix Mixer gains, wValue=mixer-node data=gain(2bytes)
+ * ?? [sometimes](4bytes, e.g 0x000003be 0x000003bf ...03ff)
+ *
+ * USB reads: (i.e. actually issued by original software)
+ * 0x01 wValue=0x0901+channel (1byte!!), wValue=0x0b01+channed (1byte!!)
+ * 0x29 wValue=0x0100 sample-rate(4bytes)
+ * wValue=0x0200 ?? 1byte (only once)
+ * 0x2a wValue=0x0100 ?? 4bytes, sample-rate2 ??
+ *
+ * USB reads with bRequest = 0x03 = UAC2_CS_MEM
+ * 0x3c wValue=0x0002 1byte: sync status (locked=1)
+ * wValue=0x0000 18*2byte: peak meter (inputs)
+ * wValue=0x0001 8(?)*2byte: peak meter (mix)
+ * wValue=0x0003 6*2byte: peak meter (pcm/daw)
+ *
+ * USB write with bRequest = 0x03
+ * 0x3c Save settings to hardware: wValue=0x005a, data=0xa5
+ *
+ *
+ * <ditaa>
+ * /--------------\ 18chn 6chn /--------------\
+ * | Hardware in +--+-------\ /------+--+ ALSA PCM out |
+ * \--------------/ | | | | \--------------/
+ * | | | |
+ * | v v |
+ * | +---------------+ |
+ * | \ Matrix Mux / |
+ * | +-----+-----+ |
+ * | | |
+ * | | 18chn |
+ * | v |
+ * | +-----------+ |
+ * | | Mixer | |
+ * | | Matrix | |
+ * | | | |
+ * | | 18x6 Gain | |
+ * | | stages | |
+ * | +-----+-----+ |
+ * | | |
+ * | | |
+ * | 18chn | 6chn | 6chn
+ * v v v
+ * =========================
+ * +---------------+ +--—------------+
+ * \ Output Mux / \ Capture Mux /
+ * +-----+-----+ +-----+-----+
+ * | |
+ * | 6chn |
+ * v |
+ * +-------------+ |
+ * | Master Gain | |
+ * +------+------+ |
+ * | |
+ * | 6chn | 18chn
+ * | (3 stereo pairs) |
+ * /--------------\ | | /--------------\
+ * | Hardware out |<--/ \-->| ALSA PCM in |
+ * \--------------/ \--------------/
+ * </ditaa>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/audio-v2.h>
+
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/tlv.h>
+
+#include "usbaudio.h"
+#include "mixer.h"
+#include "helper.h"
+#include "power.h"
+
+#include "mixer_scarlett.h"
+
+/* some gui mixers can't handle negative ctl values */
+#define SND_SCARLETT_LEVEL_BIAS 128
+#define SND_SCARLETT_MATRIX_IN_MAX 18
+#define SND_SCARLETT_CONTROLS_MAX 10
+#define SND_SCARLETT_OFFSETS_MAX 5
+
+enum {
+ SCARLETT_OUTPUTS,
+ SCARLETT_SWITCH_IMPEDANCE,
+ SCARLETT_SWITCH_PAD,
+};
+
+enum {
+ SCARLETT_OFFSET_PCM = 0,
+ SCARLETT_OFFSET_ANALOG = 1,
+ SCARLETT_OFFSET_SPDIF = 2,
+ SCARLETT_OFFSET_ADAT = 3,
+ SCARLETT_OFFSET_MIX = 4,
+};
+
+struct scarlett_mixer_elem_enum_info {
+ int start;
+ int len;
+ int offsets[SND_SCARLETT_OFFSETS_MAX];
+ char const * const *names;
+};
+
+struct scarlett_mixer_control {
+ unsigned char num;
+ unsigned char type;
+ const char *name;
+};
+
+struct scarlett_device_info {
+ int matrix_in;
+ int matrix_out;
+ int input_len;
+ int output_len;
+
+ struct scarlett_mixer_elem_enum_info opt_master;
+ struct scarlett_mixer_elem_enum_info opt_matrix;
+
+ /* initial values for matrix mux */
+ int matrix_mux_init[SND_SCARLETT_MATRIX_IN_MAX];
+
+ int num_controls; /* number of items in controls */
+ const struct scarlett_mixer_control controls[SND_SCARLETT_CONTROLS_MAX];
+};
+
+/********************** Enum Strings *************************/
+
+static const struct scarlett_mixer_elem_enum_info opt_pad = {
+ .start = 0,
+ .len = 2,
+ .offsets = {},
+ .names = (char const * const []){
+ "0dB", "-10dB"
+ }
+};
+
+static const struct scarlett_mixer_elem_enum_info opt_impedance = {
+ .start = 0,
+ .len = 2,
+ .offsets = {},
+ .names = (char const * const []){
+ "Line", "Hi-Z"
+ }
+};
+
+static const struct scarlett_mixer_elem_enum_info opt_clock = {
+ .start = 1,
+ .len = 3,
+ .offsets = {},
+ .names = (char const * const []){
+ "Internal", "SPDIF", "ADAT"
+ }
+};
+
+static const struct scarlett_mixer_elem_enum_info opt_sync = {
+ .start = 0,
+ .len = 2,
+ .offsets = {},
+ .names = (char const * const []){
+ "No Lock", "Locked"
+ }
+};
+
+static int scarlett_ctl_switch_info(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ uinfo->count = elem->channels;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ return 0;
+}
+
+static int scarlett_ctl_switch_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ int i, err, val;
+
+ for (i = 0; i < elem->channels; i++) {
+ err = snd_usb_get_cur_mix_value(elem, i, i, &val);
+ if (err < 0)
+ return err;
+
+ val = !val; /* invert mute logic for mixer */
+ ucontrol->value.integer.value[i] = val;
+ }
+
+ return 0;
+}
+
+static int scarlett_ctl_switch_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ int i, changed = 0;
+ int err, oval, val;
+
+ for (i = 0; i < elem->channels; i++) {
+ err = snd_usb_get_cur_mix_value(elem, i, i, &oval);
+ if (err < 0)
+ return err;
+
+ val = ucontrol->value.integer.value[i];
+ val = !val;
+ if (oval != val) {
+ err = snd_usb_set_cur_mix_value(elem, i, i, val);
+ if (err < 0)
+ return err;
+
+ changed = 1;
+ }
+ }
+
+ return changed;
+}
+
+static int scarlett_ctl_resume(struct usb_mixer_elem_list *list)
+{
+ struct usb_mixer_elem_info *elem =
+ container_of(list, struct usb_mixer_elem_info, head);
+ int i;
+
+ for (i = 0; i < elem->channels; i++)
+ if (elem->cached & (1 << i))
+ snd_usb_set_cur_mix_value(elem, i, i,
+ elem->cache_val[i]);
+ return 0;
+}
+
+static int scarlett_ctl_info(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = elem->channels;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = (int)kctl->private_value +
+ SND_SCARLETT_LEVEL_BIAS;
+ uinfo->value.integer.step = 1;
+ return 0;
+}
+
+static int scarlett_ctl_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ int i, err, val;
+
+ for (i = 0; i < elem->channels; i++) {
+ err = snd_usb_get_cur_mix_value(elem, i, i, &val);
+ if (err < 0)
+ return err;
+
+ val = clamp(val / 256, -128, (int)kctl->private_value) +
+ SND_SCARLETT_LEVEL_BIAS;
+ ucontrol->value.integer.value[i] = val;
+ }
+
+ return 0;
+}
+
+static int scarlett_ctl_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ int i, changed = 0;
+ int err, oval, val;
+
+ for (i = 0; i < elem->channels; i++) {
+ err = snd_usb_get_cur_mix_value(elem, i, i, &oval);
+ if (err < 0)
+ return err;
+
+ val = ucontrol->value.integer.value[i] -
+ SND_SCARLETT_LEVEL_BIAS;
+ val = val * 256;
+ if (oval != val) {
+ err = snd_usb_set_cur_mix_value(elem, i, i, val);
+ if (err < 0)
+ return err;
+
+ changed = 1;
+ }
+ }
+
+ return changed;
+}
+
+static void scarlett_generate_name(int i, char *dst, int offsets[])
+{
+ if (i > offsets[SCARLETT_OFFSET_MIX])
+ sprintf(dst, "Mix %c",
+ 'A'+(i - offsets[SCARLETT_OFFSET_MIX] - 1));
+ else if (i > offsets[SCARLETT_OFFSET_ADAT])
+ sprintf(dst, "ADAT %d", i - offsets[SCARLETT_OFFSET_ADAT]);
+ else if (i > offsets[SCARLETT_OFFSET_SPDIF])
+ sprintf(dst, "SPDIF %d", i - offsets[SCARLETT_OFFSET_SPDIF]);
+ else if (i > offsets[SCARLETT_OFFSET_ANALOG])
+ sprintf(dst, "Analog %d", i - offsets[SCARLETT_OFFSET_ANALOG]);
+ else if (i > offsets[SCARLETT_OFFSET_PCM])
+ sprintf(dst, "PCM %d", i - offsets[SCARLETT_OFFSET_PCM]);
+ else
+ sprintf(dst, "Off");
+}
+
+static int scarlett_ctl_enum_dynamic_info(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct scarlett_mixer_elem_enum_info *opt = elem->private_data;
+ unsigned int items = opt->len;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = elem->channels;
+ uinfo->value.enumerated.items = items;
+
+ if (uinfo->value.enumerated.item >= items)
+ uinfo->value.enumerated.item = items - 1;
+
+ /* generate name dynamically based on item number and offset info */
+ scarlett_generate_name(uinfo->value.enumerated.item,
+ uinfo->value.enumerated.name,
+ opt->offsets);
+
+ return 0;
+}
+
+static int scarlett_ctl_enum_info(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct scarlett_mixer_elem_enum_info *opt = elem->private_data;
+
+ return snd_ctl_enum_info(uinfo, elem->channels, opt->len,
+ (const char * const *)opt->names);
+}
+
+static int scarlett_ctl_enum_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct scarlett_mixer_elem_enum_info *opt = elem->private_data;
+ int err, val;
+
+ err = snd_usb_get_cur_mix_value(elem, 0, 0, &val);
+ if (err < 0)
+ return err;
+
+ val = clamp(val - opt->start, 0, opt->len-1);
+
+ ucontrol->value.enumerated.item[0] = val;
+
+ return 0;
+}
+
+static int scarlett_ctl_enum_put(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct scarlett_mixer_elem_enum_info *opt = elem->private_data;
+ int err, oval, val;
+
+ err = snd_usb_get_cur_mix_value(elem, 0, 0, &oval);
+ if (err < 0)
+ return err;
+
+ val = ucontrol->value.integer.value[0];
+ val = val + opt->start;
+ if (val != oval) {
+ snd_usb_set_cur_mix_value(elem, 0, 0, val);
+ return 1;
+ }
+ return 0;
+}
+
+static int scarlett_ctl_enum_resume(struct usb_mixer_elem_list *list)
+{
+ struct usb_mixer_elem_info *elem =
+ container_of(list, struct usb_mixer_elem_info, head);
+
+ if (elem->cached)
+ snd_usb_set_cur_mix_value(elem, 0, 0, *elem->cache_val);
+ return 0;
+}
+
+static int scarlett_ctl_meter_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *elem = kctl->private_data;
+ struct snd_usb_audio *chip = elem->head.mixer->chip;
+ unsigned char buf[2 * MAX_CHANNELS] = {0, };
+ int wValue = (elem->control << 8) | elem->idx_off;
+ int idx = snd_usb_ctrl_intf(chip) | (elem->head.id << 8);
+ int err;
+
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_rcvctrlpipe(chip->dev, 0),
+ UAC2_CS_MEM,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS |
+ USB_DIR_IN, wValue, idx, buf, elem->channels);
+ if (err < 0)
+ return err;
+
+ ucontrol->value.enumerated.item[0] = clamp((int)buf[0], 0, 1);
+ return 0;
+}
+
+static struct snd_kcontrol_new usb_scarlett_ctl_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = scarlett_ctl_switch_info,
+ .get = scarlett_ctl_switch_get,
+ .put = scarlett_ctl_switch_put,
+};
+
+static const DECLARE_TLV_DB_SCALE(db_scale_scarlett_gain, -12800, 100, 0);
+
+static struct snd_kcontrol_new usb_scarlett_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .name = "",
+ .info = scarlett_ctl_info,
+ .get = scarlett_ctl_get,
+ .put = scarlett_ctl_put,
+ .private_value = 6, /* max value */
+ .tlv = { .p = db_scale_scarlett_gain }
+};
+
+static struct snd_kcontrol_new usb_scarlett_ctl_master = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .name = "",
+ .info = scarlett_ctl_info,
+ .get = scarlett_ctl_get,
+ .put = scarlett_ctl_put,
+ .private_value = 6, /* max value */
+ .tlv = { .p = db_scale_scarlett_gain }
+};
+
+static struct snd_kcontrol_new usb_scarlett_ctl_enum = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = scarlett_ctl_enum_info,
+ .get = scarlett_ctl_enum_get,
+ .put = scarlett_ctl_enum_put,
+};
+
+static struct snd_kcontrol_new usb_scarlett_ctl_dynamic_enum = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "",
+ .info = scarlett_ctl_enum_dynamic_info,
+ .get = scarlett_ctl_enum_get,
+ .put = scarlett_ctl_enum_put,
+};
+
+static struct snd_kcontrol_new usb_scarlett_ctl_sync = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .name = "",
+ .info = scarlett_ctl_enum_info,
+ .get = scarlett_ctl_meter_get,
+};
+
+static int add_new_ctl(struct usb_mixer_interface *mixer,
+ const struct snd_kcontrol_new *ncontrol,
+ usb_mixer_elem_resume_func_t resume,
+ int index, int offset, int num,
+ int val_type, int channels, const char *name,
+ const struct scarlett_mixer_elem_enum_info *opt,
+ struct usb_mixer_elem_info **elem_ret
+)
+{
+ struct snd_kcontrol *kctl;
+ struct usb_mixer_elem_info *elem;
+ int err;
+
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return -ENOMEM;
+
+ elem->head.mixer = mixer;
+ elem->head.resume = resume;
+ elem->control = offset;
+ elem->idx_off = num;
+ elem->head.id = index;
+ elem->val_type = val_type;
+
+ elem->channels = channels;
+
+ /* add scarlett_mixer_elem_enum_info struct */
+ elem->private_data = (void *)opt;
+
+ kctl = snd_ctl_new1(ncontrol, elem);
+ if (!kctl) {
+ kfree(elem);
+ return -ENOMEM;
+ }
+ kctl->private_free = snd_usb_mixer_elem_free;
+
+ strlcpy(kctl->id.name, name, sizeof(kctl->id.name));
+
+ err = snd_usb_mixer_add_control(&elem->head, kctl);
+ if (err < 0)
+ return err;
+
+ if (elem_ret)
+ *elem_ret = elem;
+
+ return 0;
+}
+
+static int add_output_ctls(struct usb_mixer_interface *mixer,
+ int index, const char *name,
+ const struct scarlett_device_info *info)
+{
+ int err;
+ char mx[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ struct usb_mixer_elem_info *elem;
+
+ /* Add mute switch */
+ snprintf(mx, sizeof(mx), "Master %d (%s) Playback Switch",
+ index + 1, name);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_switch,
+ scarlett_ctl_resume, 0x0a, 0x01,
+ 2*index+1, USB_MIXER_S16, 2, mx, NULL, &elem);
+ if (err < 0)
+ return err;
+
+ /* Add volume control and initialize to 0 */
+ snprintf(mx, sizeof(mx), "Master %d (%s) Playback Volume",
+ index + 1, name);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_master,
+ scarlett_ctl_resume, 0x0a, 0x02,
+ 2*index+1, USB_MIXER_S16, 2, mx, NULL, &elem);
+ if (err < 0)
+ return err;
+
+ /* Add L channel source playback enumeration */
+ snprintf(mx, sizeof(mx), "Master %dL (%s) Source Playback Enum",
+ index + 1, name);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_dynamic_enum,
+ scarlett_ctl_enum_resume, 0x33, 0x00,
+ 2*index, USB_MIXER_S16, 1, mx, &info->opt_master,
+ &elem);
+ if (err < 0)
+ return err;
+
+ /* Add R channel source playback enumeration */
+ snprintf(mx, sizeof(mx), "Master %dR (%s) Source Playback Enum",
+ index + 1, name);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_dynamic_enum,
+ scarlett_ctl_enum_resume, 0x33, 0x00,
+ 2*index+1, USB_MIXER_S16, 1, mx, &info->opt_master,
+ &elem);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/********************** device-specific config *************************/
+
+/* untested... */
+static struct scarlett_device_info s6i6_info = {
+ .matrix_in = 18,
+ .matrix_out = 8,
+ .input_len = 6,
+ .output_len = 6,
+
+ .opt_master = {
+ .start = -1,
+ .len = 27,
+ .offsets = {0, 12, 16, 18, 18},
+ .names = NULL
+ },
+
+ .opt_matrix = {
+ .start = -1,
+ .len = 19,
+ .offsets = {0, 12, 16, 18, 18},
+ .names = NULL
+ },
+
+ .num_controls = 0,
+ .controls = {
+ { .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" },
+ { .num = 1, .type = SCARLETT_OUTPUTS, .name = "Headphone" },
+ { .num = 2, .type = SCARLETT_OUTPUTS, .name = "SPDIF" },
+ { .num = 1, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 1, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 2, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 2, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 3, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 4, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ },
+
+ .matrix_mux_init = {
+ 12, 13, 14, 15, /* Analog -> 1..4 */
+ 16, 17, /* SPDIF -> 5,6 */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PCM[1..12] -> 7..18 */
+ 8, 9, 10, 11
+ }
+};
+
+/* untested... */
+static struct scarlett_device_info s8i6_info = {
+ .matrix_in = 18,
+ .matrix_out = 6,
+ .input_len = 8,
+ .output_len = 6,
+
+ .opt_master = {
+ .start = -1,
+ .len = 25,
+ .offsets = {0, 12, 16, 18, 18},
+ .names = NULL
+ },
+
+ .opt_matrix = {
+ .start = -1,
+ .len = 19,
+ .offsets = {0, 12, 16, 18, 18},
+ .names = NULL
+ },
+
+ .num_controls = 7,
+ .controls = {
+ { .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" },
+ { .num = 1, .type = SCARLETT_OUTPUTS, .name = "Headphone" },
+ { .num = 2, .type = SCARLETT_OUTPUTS, .name = "SPDIF" },
+ { .num = 1, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 2, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 3, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 4, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ },
+
+ .matrix_mux_init = {
+ 12, 13, 14, 15, /* Analog -> 1..4 */
+ 16, 17, /* SPDIF -> 5,6 */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PCM[1..12] -> 7..18 */
+ 8, 9, 10, 11
+ }
+};
+
+static struct scarlett_device_info s18i6_info = {
+ .matrix_in = 18,
+ .matrix_out = 6,
+ .input_len = 18,
+ .output_len = 6,
+
+ .opt_master = {
+ .start = -1,
+ .len = 31,
+ .offsets = {0, 6, 14, 16, 24},
+ .names = NULL,
+ },
+
+ .opt_matrix = {
+ .start = -1,
+ .len = 25,
+ .offsets = {0, 6, 14, 16, 24},
+ .names = NULL,
+ },
+
+ .num_controls = 5,
+ .controls = {
+ { .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" },
+ { .num = 1, .type = SCARLETT_OUTPUTS, .name = "Headphone" },
+ { .num = 2, .type = SCARLETT_OUTPUTS, .name = "SPDIF" },
+ { .num = 1, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 2, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ },
+
+ .matrix_mux_init = {
+ 6, 7, 8, 9, 10, 11, 12, 13, /* Analog -> 1..8 */
+ 16, 17, 18, 19, 20, 21, /* ADAT[1..6] -> 9..14 */
+ 14, 15, /* SPDIF -> 15,16 */
+ 0, 1 /* PCM[1,2] -> 17,18 */
+ }
+};
+
+static struct scarlett_device_info s18i8_info = {
+ .matrix_in = 18,
+ .matrix_out = 8,
+ .input_len = 18,
+ .output_len = 8,
+
+ .opt_master = {
+ .start = -1,
+ .len = 35,
+ .offsets = {0, 8, 16, 18, 26},
+ .names = NULL
+ },
+
+ .opt_matrix = {
+ .start = -1,
+ .len = 27,
+ .offsets = {0, 8, 16, 18, 26},
+ .names = NULL
+ },
+
+ .num_controls = 10,
+ .controls = {
+ { .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" },
+ { .num = 1, .type = SCARLETT_OUTPUTS, .name = "Headphone 1" },
+ { .num = 2, .type = SCARLETT_OUTPUTS, .name = "Headphone 2" },
+ { .num = 3, .type = SCARLETT_OUTPUTS, .name = "SPDIF" },
+ { .num = 1, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 1, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 2, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 2, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 3, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 4, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ },
+
+ .matrix_mux_init = {
+ 8, 9, 10, 11, 12, 13, 14, 15, /* Analog -> 1..8 */
+ 18, 19, 20, 21, 22, 23, /* ADAT[1..6] -> 9..14 */
+ 16, 17, /* SPDIF -> 15,16 */
+ 0, 1 /* PCM[1,2] -> 17,18 */
+ }
+};
+
+static struct scarlett_device_info s18i20_info = {
+ .matrix_in = 18,
+ .matrix_out = 8,
+ .input_len = 18,
+ .output_len = 20,
+
+ .opt_master = {
+ .start = -1,
+ .len = 47,
+ .offsets = {0, 20, 28, 30, 38},
+ .names = NULL
+ },
+
+ .opt_matrix = {
+ .start = -1,
+ .len = 39,
+ .offsets = {0, 20, 28, 30, 38},
+ .names = NULL
+ },
+
+ .num_controls = 10,
+ .controls = {
+ { .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" },
+ { .num = 1, .type = SCARLETT_OUTPUTS, .name = "Line 3/4" },
+ { .num = 2, .type = SCARLETT_OUTPUTS, .name = "Line 5/6" },
+ { .num = 3, .type = SCARLETT_OUTPUTS, .name = "Line 7/8" },
+ { .num = 4, .type = SCARLETT_OUTPUTS, .name = "Line 9/10" },
+ { .num = 5, .type = SCARLETT_OUTPUTS, .name = "SPDIF" },
+ { .num = 6, .type = SCARLETT_OUTPUTS, .name = "ADAT 1/2" },
+ { .num = 7, .type = SCARLETT_OUTPUTS, .name = "ADAT 3/4" },
+ { .num = 8, .type = SCARLETT_OUTPUTS, .name = "ADAT 5/6" },
+ { .num = 9, .type = SCARLETT_OUTPUTS, .name = "ADAT 7/8" },
+ /*{ .num = 1, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 1, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 2, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
+ { .num = 2, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 3, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 4, .type = SCARLETT_SWITCH_PAD, .name = NULL},*/
+ },
+
+ .matrix_mux_init = {
+ 20, 21, 22, 23, 24, 25, 26, 27, /* Analog -> 1..8 */
+ 30, 31, 32, 33, 34, 35, /* ADAT[1..6] -> 9..14 */
+ 28, 29, /* SPDIF -> 15,16 */
+ 0, 1 /* PCM[1,2] -> 17,18 */
+ }
+};
+
+
+static int scarlett_controls_create_generic(struct usb_mixer_interface *mixer,
+ struct scarlett_device_info *info)
+{
+ int i, err;
+ char mx[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ const struct scarlett_mixer_control *ctl;
+ struct usb_mixer_elem_info *elem;
+
+ /* create master switch and playback volume */
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_switch,
+ scarlett_ctl_resume, 0x0a, 0x01, 0,
+ USB_MIXER_S16, 1, "Master Playback Switch", NULL,
+ &elem);
+ if (err < 0)
+ return err;
+
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_master,
+ scarlett_ctl_resume, 0x0a, 0x02, 0,
+ USB_MIXER_S16, 1, "Master Playback Volume", NULL,
+ &elem);
+ if (err < 0)
+ return err;
+
+ /* iterate through controls in info struct and create each one */
+ for (i = 0; i < info->num_controls; i++) {
+ ctl = &info->controls[i];
+
+ switch (ctl->type) {
+ case SCARLETT_OUTPUTS:
+ err = add_output_ctls(mixer, ctl->num, ctl->name, info);
+ if (err < 0)
+ return err;
+ break;
+ case SCARLETT_SWITCH_IMPEDANCE:
+ sprintf(mx, "Input %d Impedance Switch", ctl->num);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_enum,
+ scarlett_ctl_enum_resume, 0x01,
+ 0x09, ctl->num, USB_MIXER_S16, 1, mx,
+ &opt_impedance, &elem);
+ if (err < 0)
+ return err;
+ break;
+ case SCARLETT_SWITCH_PAD:
+ sprintf(mx, "Input %d Pad Switch", ctl->num);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_enum,
+ scarlett_ctl_enum_resume, 0x01,
+ 0x0b, ctl->num, USB_MIXER_S16, 1, mx,
+ &opt_pad, &elem);
+ if (err < 0)
+ return err;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Create and initialize a mixer for the Focusrite(R) Scarlett
+ */
+int snd_scarlett_controls_create(struct usb_mixer_interface *mixer)
+{
+ int err, i, o;
+ char mx[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ struct scarlett_device_info *info;
+ struct usb_mixer_elem_info *elem;
+ static char sample_rate_buffer[4] = { '\x80', '\xbb', '\x00', '\x00' };
+
+ /* only use UAC_VERSION_2 */
+ if (!mixer->protocol)
+ return 0;
+
+ switch (mixer->chip->usb_id) {
+ case USB_ID(0x1235, 0x8012):
+ info = &s6i6_info;
+ break;
+ case USB_ID(0x1235, 0x8002):
+ info = &s8i6_info;
+ break;
+ case USB_ID(0x1235, 0x8004):
+ info = &s18i6_info;
+ break;
+ case USB_ID(0x1235, 0x8014):
+ info = &s18i8_info;
+ break;
+ case USB_ID(0x1235, 0x800c):
+ info = &s18i20_info;
+ break;
+ default: /* device not (yet) supported */
+ return -EINVAL;
+ }
+
+ /* generic function to create controls */
+ err = scarlett_controls_create_generic(mixer, info);
+ if (err < 0)
+ return err;
+
+ /* setup matrix controls */
+ for (i = 0; i < info->matrix_in; i++) {
+ snprintf(mx, sizeof(mx), "Matrix %02d Input Playback Route",
+ i+1);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_dynamic_enum,
+ scarlett_ctl_enum_resume, 0x32,
+ 0x06, i, USB_MIXER_S16, 1, mx,
+ &info->opt_matrix, &elem);
+ if (err < 0)
+ return err;
+
+ for (o = 0; o < info->matrix_out; o++) {
+ sprintf(mx, "Matrix %02d Mix %c Playback Volume", i+1,
+ o+'A');
+ err = add_new_ctl(mixer, &usb_scarlett_ctl,
+ scarlett_ctl_resume, 0x3c, 0x00,
+ (i << 3) + (o & 0x07), USB_MIXER_S16,
+ 1, mx, NULL, &elem);
+ if (err < 0)
+ return err;
+
+ }
+ }
+
+ for (i = 0; i < info->input_len; i++) {
+ snprintf(mx, sizeof(mx), "Input Source %02d Capture Route",
+ i+1);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_dynamic_enum,
+ scarlett_ctl_enum_resume, 0x34,
+ 0x00, i, USB_MIXER_S16, 1, mx,
+ &info->opt_master, &elem);
+ if (err < 0)
+ return err;
+ }
+
+ /* val_len == 1 needed here */
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_enum,
+ scarlett_ctl_enum_resume, 0x28, 0x01, 0,
+ USB_MIXER_U8, 1, "Sample Clock Source",
+ &opt_clock, &elem);
+ if (err < 0)
+ return err;
+
+ /* val_len == 1 and UAC2_CS_MEM */
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_sync, NULL, 0x3c, 0x00, 2,
+ USB_MIXER_U8, 1, "Sample Clock Sync Status",
+ &opt_sync, &elem);
+ if (err < 0)
+ return err;
+
+ /* initialize sampling rate to 48000 */
+ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_sndctrlpipe(mixer->chip->dev, 0), UAC2_CS_CUR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS |
+ USB_DIR_OUT, 0x0100, snd_usb_ctrl_intf(mixer->chip) |
+ (0x29 << 8), sample_rate_buffer, 4);
+ if (err < 0)
+ return err;
+
+ return err;
+}
diff --git a/sound/usb/mixer_scarlett.h b/sound/usb/mixer_scarlett.h
new file mode 100644
index 000000000000..19c592ab0332
--- /dev/null
+++ b/sound/usb/mixer_scarlett.h
@@ -0,0 +1,6 @@
+#ifndef __USB_MIXER_SCARLETT_H
+#define __USB_MIXER_SCARLETT_H
+
+int snd_scarlett_controls_create(struct usb_mixer_interface *mixer);
+
+#endif /* __USB_MIXER_SCARLETT_H */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index c62a1659106d..0d8aba5fe1a8 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -482,6 +482,11 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
/* set interface */
if (subs->interface != fmt->iface ||
subs->altset_idx != fmt->altset_idx) {
+
+ err = snd_usb_select_mode_quirk(subs, fmt);
+ if (err < 0)
+ return -EIO;
+
err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
if (err < 0) {
dev_err(&dev->dev,
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index c657752a420c..0a598af9b38b 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2667,57 +2667,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.type = QUIRK_MIDI_NOVATION
}
},
-{
- /*
- * Focusrite Scarlett 18i6
- *
- * Avoid mixer creation, which otherwise fails because some of
- * the interface descriptor subtypes for interface 0 are
- * unknown. That should be fixed or worked-around but this at
- * least allows the device to be used successfully with a DAW
- * and an external mixer. See comments below about other
- * ignored interfaces.
- */
- USB_DEVICE(0x1235, 0x8004),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Focusrite",
- .product_name = "Scarlett 18i6",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_COMPOSITE,
- .data = & (const struct snd_usb_audio_quirk[]) {
- {
- /* InterfaceSubClass 1 (Control Device) */
- .ifnum = 0,
- .type = QUIRK_IGNORE_INTERFACE
- },
- {
- .ifnum = 1,
- .type = QUIRK_AUDIO_STANDARD_INTERFACE
- },
- {
- .ifnum = 2,
- .type = QUIRK_AUDIO_STANDARD_INTERFACE
- },
- {
- /* InterfaceSubClass 1 (Control Device) */
- .ifnum = 3,
- .type = QUIRK_IGNORE_INTERFACE
- },
- {
- .ifnum = 4,
- .type = QUIRK_MIDI_STANDARD_INTERFACE
- },
- {
- /* InterfaceSubClass 1 (Device Firmware Update) */
- .ifnum = 5,
- .type = QUIRK_IGNORE_INTERFACE
- },
- {
- .ifnum = -1
- }
- }
- }
-},
/* Access Music devices */
{
@@ -2804,133 +2753,45 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
-/* Hauppauge HVR-950Q and HVR-850 */
-{
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7200),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-950Q",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
-{
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7210),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-950Q",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
-{
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7217),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-950Q",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
-{
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721b),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-950Q",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
-{
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721e),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-950Q",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
-{
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721f),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-950Q",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
-{
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7240),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-850",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
-{
- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7280),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-950Q",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
-{
- USB_DEVICE_VENDOR_SPEC(0x0fd9, 0x0008),
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
- USB_DEVICE_ID_MATCH_INT_CLASS |
- USB_DEVICE_ID_MATCH_INT_SUBCLASS,
- .bInterfaceClass = USB_CLASS_AUDIO,
- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .vendor_name = "Hauppauge",
- .product_name = "HVR-950Q",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_AUDIO_ALIGN_TRANSFER,
- }
-},
+/*
+ * Auvitek au0828 devices with audio interface.
+ * This should be kept in sync with drivers/media/usb/au0828/au0828-cards.c
+ * Please notice that some drivers are DVB only, and don't need to be
+ * here. That's the case, for example, of DVICO_FUSIONHDTV7.
+ */
+
+#define AU0828_DEVICE(vid, pid, vname, pname) { \
+ USB_DEVICE_VENDOR_SPEC(vid, pid), \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_CLASS | \
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
+ .bInterfaceClass = USB_CLASS_AUDIO, \
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, \
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { \
+ .vendor_name = vname, \
+ .product_name = pname, \
+ .ifnum = QUIRK_ANY_INTERFACE, \
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
+ } \
+}
+
+AU0828_DEVICE(0x2040, 0x7200, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x7240, "Hauppauge", "HVR-850"),
+AU0828_DEVICE(0x2040, 0x7210, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x7217, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x721b, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x721e, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x721f, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x7280, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x0fd9, 0x0008, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x7201, "Hauppauge", "HVR-950Q-MXL"),
+AU0828_DEVICE(0x2040, 0x7211, "Hauppauge", "HVR-950Q-MXL"),
+AU0828_DEVICE(0x2040, 0x7281, "Hauppauge", "HVR-950Q-MXL"),
+AU0828_DEVICE(0x05e1, 0x0480, "Hauppauge", "Woodbury"),
+AU0828_DEVICE(0x2040, 0x8200, "Hauppauge", "Woodbury"),
+AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
+AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
/* Digidesign Mbox */
{
@@ -2944,7 +2805,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.data = (const struct snd_usb_audio_quirk[]){
{
.ifnum = 0,
- .type = QUIRK_IGNORE_INTERFACE,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
},
{
.ifnum = 1,
@@ -2955,16 +2816,40 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.iface = 1,
.altsetting = 1,
.altset_idx = 1,
- .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .attributes = 0x4,
.endpoint = 0x02,
- .ep_attr = 0x01,
- .rates = SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000,
- .rate_min = 44100,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_SYNC,
+ .maxpacksize = 0x130,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
.rate_max = 48000,
- .nr_rates = 2,
+ .nr_rates = 1,
.rate_table = (unsigned int[]) {
- 44100, 48000
+ 48000
+ }
+ }
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 2,
+ .iface = 1,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0x4,
+ .endpoint = 0x81,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .maxpacksize = 0x130,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
}
}
},
@@ -2972,7 +2857,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.ifnum = -1
}
}
-
}
},
@@ -3203,6 +3087,46 @@ YAMAHA_DEVICE(0x7010, "UB99"),
{
/*
+ * ZOOM R16/24 in audio interface mode.
+ * Mixer descriptors are garbage, further quirks will be needed
+ * to make any of it functional, thus disabled for now.
+ * Playback stream appears to start and run fine but no sound
+ * is produced, so also disabled for now.
+ */
+ USB_DEVICE(0x1686, 0x00dd),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ /* Mixer */
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ {
+ /* Playback */
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ {
+ /* Capture */
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+ },
+ {
+ /* Midi */
+ .ifnum = 3,
+ .type = QUIRK_MIDI_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = -1
+ },
+ }
+ }
+},
+
+{
+ /*
* Some USB MIDI devices don't have an audio control interface,
* so we have to grab MIDI streaming interfaces here.
*/
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 60dfe0d28771..4dbfb3d18ee2 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -43,12 +43,13 @@
static int create_composite_quirk(struct snd_usb_audio *chip,
struct usb_interface *iface,
struct usb_driver *driver,
- const struct snd_usb_audio_quirk *quirk)
+ const struct snd_usb_audio_quirk *quirk_comp)
{
int probed_ifnum = get_iface_desc(iface->altsetting)->bInterfaceNumber;
+ const struct snd_usb_audio_quirk *quirk;
int err;
- for (quirk = quirk->data; quirk->ifnum >= 0; ++quirk) {
+ for (quirk = quirk_comp->data; quirk->ifnum >= 0; ++quirk) {
iface = usb_ifnum_to_if(chip->dev, quirk->ifnum);
if (!iface)
continue;
@@ -58,9 +59,17 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
err = snd_usb_create_quirk(chip, iface, driver, quirk);
if (err < 0)
return err;
- if (quirk->ifnum != probed_ifnum)
+ }
+
+ for (quirk = quirk_comp->data; quirk->ifnum >= 0; ++quirk) {
+ iface = usb_ifnum_to_if(chip->dev, quirk->ifnum);
+ if (!iface)
+ continue;
+ if (quirk->ifnum != probed_ifnum &&
+ !usb_interface_claimed(iface))
usb_driver_claim_interface(driver, iface, (void *)-1L);
}
+
return 0;
}
@@ -1102,6 +1111,44 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
}
}
+
+/* Marantz/Denon USB DACs need a vendor cmd to switch
+ * between PCM and native DSD mode
+ */
+int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
+ struct audioformat *fmt)
+{
+ struct usb_device *dev = subs->dev;
+ int err;
+
+ switch (subs->stream->chip->usb_id) {
+ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
+ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
+
+ /* First switch to alt set 0, otherwise the mode switch cmd
+ * will not be accepted by the DAC
+ */
+ err = usb_set_interface(dev, fmt->iface, 0);
+ if (err < 0)
+ return err;
+
+ mdelay(20); /* Delay needed after setting the interface */
+
+ switch (fmt->altsetting) {
+ case 2: /* DSD mode requested */
+ case 1: /* PCM mode requested */
+ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ fmt->altsetting - 1, 1, NULL, 0);
+ if (err < 0)
+ return err;
+ break;
+ }
+ mdelay(20);
+ }
+ return 0;
+}
+
void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep)
{
/*
@@ -1160,6 +1207,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
break;
}
}
+
+ /* Zoom R16/24 needs a tiny delay here, otherwise requests like
+ * get/set frequency return as failed despite actually succeeding.
+ */
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1686) &&
+ (le16_to_cpu(dev->descriptor.idProduct) == 0x00dd) &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(1);
}
/*
@@ -1204,5 +1259,16 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
break;
}
+ /* Denon/Marantz devices with USB DAC functionality */
+ switch (chip->usb_id) {
+ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
+ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
+ if (fp->altsetting == 2)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h
index 665e972a1b40..1b862386577d 100644
--- a/sound/usb/quirks.h
+++ b/sound/usb/quirks.h
@@ -31,6 +31,9 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
__u8 request, __u8 requesttype, __u16 value,
__u16 index, void *data, __u16 size);
+int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
+ struct audioformat *fmt);
+
u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
struct audioformat *fp,
unsigned int sample_bytes);
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index a63330dd1407..61d5dc2a3421 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -272,13 +272,8 @@ static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
for (s = 0; s < 4; s++) {
struct snd_usX2Y_substream *subs = usX2Y->subs[s];
if (subs) {
- if (atomic_read(&subs->state) >= state_PRERUNNING) {
- unsigned long flags;
-
- snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags);
- snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags);
- }
+ if (atomic_read(&subs->state) >= state_PRERUNNING)
+ snd_pcm_stop_xrun(subs->pcm_substream);
for (u = 0; u < NRURBS; u++) {
struct urb *urb = subs->urb[u];
if (NULL != urb)
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index 8f96b3ee0724..f437d739f37d 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -33,6 +33,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <dirent.h>
+#include <getopt.h>
static int target_fd;
static char target_fname[W_MAX_PATH];
@@ -126,15 +127,43 @@ static int hv_copy_cancel(void)
}
-int main(void)
+void print_usage(char *argv[])
+{
+ fprintf(stderr, "Usage: %s [options]\n"
+ "Options are:\n"
+ " -n, --no-daemon stay in foreground, don't daemonize\n"
+ " -h, --help print this help\n", argv[0]);
+}
+
+int main(int argc, char *argv[])
{
int fd, fcopy_fd, len;
int error;
+ int daemonize = 1, long_index = 0, opt;
int version = FCOPY_CURRENT_VERSION;
char *buffer[4096 * 2];
struct hv_fcopy_hdr *in_msg;
- if (daemon(1, 0)) {
+ static struct option long_options[] = {
+ {"help", no_argument, 0, 'h' },
+ {"no-daemon", no_argument, 0, 'n' },
+ {0, 0, 0, 0 }
+ };
+
+ while ((opt = getopt_long(argc, argv, "hn", long_options,
+ &long_index)) != -1) {
+ switch (opt) {
+ case 'n':
+ daemonize = 0;
+ break;
+ case 'h':
+ default:
+ print_usage(argv);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (daemonize && daemon(1, 0)) {
syslog(LOG_ERR, "daemon() failed; error: %s", strerror(errno));
exit(EXIT_FAILURE);
}
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 4088b816a3ee..6a6432a20a1d 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -43,6 +43,7 @@
#include <fcntl.h>
#include <dirent.h>
#include <net/if.h>
+#include <getopt.h>
/*
* KVP protocol: The user mode component first registers with the
@@ -1417,7 +1418,15 @@ netlink_send(int fd, struct cn_msg *msg)
return sendmsg(fd, &message, 0);
}
-int main(void)
+void print_usage(char *argv[])
+{
+ fprintf(stderr, "Usage: %s [options]\n"
+ "Options are:\n"
+ " -n, --no-daemon stay in foreground, don't daemonize\n"
+ " -h, --help print this help\n", argv[0]);
+}
+
+int main(int argc, char *argv[])
{
int fd, len, nl_group;
int error;
@@ -1435,9 +1444,30 @@ int main(void)
struct hv_kvp_ipaddr_value *kvp_ip_val;
char *kvp_recv_buffer;
size_t kvp_recv_buffer_len;
+ int daemonize = 1, long_index = 0, opt;
+
+ static struct option long_options[] = {
+ {"help", no_argument, 0, 'h' },
+ {"no-daemon", no_argument, 0, 'n' },
+ {0, 0, 0, 0 }
+ };
+
+ while ((opt = getopt_long(argc, argv, "hn", long_options,
+ &long_index)) != -1) {
+ switch (opt) {
+ case 'n':
+ daemonize = 0;
+ break;
+ case 'h':
+ default:
+ print_usage(argv);
+ exit(EXIT_FAILURE);
+ }
+ }
- if (daemon(1, 0))
+ if (daemonize && daemon(1, 0))
return 1;
+
openlog("KVP", 0, LOG_USER);
syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
@@ -1529,8 +1559,15 @@ int main(void)
addr_p, &addr_l);
if (len < 0) {
+ int saved_errno = errno;
syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
addr.nl_pid, errno, strerror(errno));
+
+ if (saved_errno == ENOBUFS) {
+ syslog(LOG_ERR, "receive error: ignored");
+ continue;
+ }
+
close(fd);
return -1;
}
@@ -1733,8 +1770,15 @@ kvp_done:
len = netlink_send(fd, incoming_cn_msg);
if (len < 0) {
+ int saved_errno = errno;
syslog(LOG_ERR, "net_link send failed; error: %d %s", errno,
strerror(errno));
+
+ if (saved_errno == ENOMEM || saved_errno == ENOBUFS) {
+ syslog(LOG_ERR, "send error: ignored");
+ continue;
+ }
+
exit(EXIT_FAILURE);
}
}
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 6a213b8cd7b9..5e63f70bd956 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -36,6 +36,7 @@
#include <linux/hyperv.h>
#include <linux/netlink.h>
#include <syslog.h>
+#include <getopt.h>
static struct sockaddr_nl addr;
@@ -44,35 +45,51 @@ static struct sockaddr_nl addr;
#endif
-static int vss_do_freeze(char *dir, unsigned int cmd, char *fs_op)
+/* Don't use syslog() in the function since that can cause write to disk */
+static int vss_do_freeze(char *dir, unsigned int cmd)
{
int ret, fd = open(dir, O_RDONLY);
if (fd < 0)
return 1;
+
ret = ioctl(fd, cmd, 0);
- syslog(LOG_INFO, "VSS: %s of %s: %s\n", fs_op, dir, strerror(errno));
+
+ /*
+ * If a partition is mounted more than once, only the first
+ * FREEZE/THAW can succeed and the later ones will get
+ * EBUSY/EINVAL respectively: there could be 2 cases:
+ * 1) a user may mount the same partition to differnt directories
+ * by mistake or on purpose;
+ * 2) The subvolume of btrfs appears to have the same partition
+ * mounted more than once.
+ */
+ if (ret) {
+ if ((cmd == FIFREEZE && errno == EBUSY) ||
+ (cmd == FITHAW && errno == EINVAL)) {
+ close(fd);
+ return 0;
+ }
+ }
+
close(fd);
return !!ret;
}
static int vss_operate(int operation)
{
- char *fs_op;
char match[] = "/dev/";
FILE *mounts;
struct mntent *ent;
unsigned int cmd;
- int error = 0, root_seen = 0;
+ int error = 0, root_seen = 0, save_errno = 0;
switch (operation) {
case VSS_OP_FREEZE:
cmd = FIFREEZE;
- fs_op = "freeze";
break;
case VSS_OP_THAW:
cmd = FITHAW;
- fs_op = "thaw";
break;
default:
return -1;
@@ -85,7 +102,7 @@ static int vss_operate(int operation)
while ((ent = getmntent(mounts))) {
if (strncmp(ent->mnt_fsname, match, strlen(match)))
continue;
- if (strcmp(ent->mnt_type, "iso9660") == 0)
+ if (hasmntopt(ent, MNTOPT_RO) != NULL)
continue;
if (strcmp(ent->mnt_type, "vfat") == 0)
continue;
@@ -93,14 +110,30 @@ static int vss_operate(int operation)
root_seen = 1;
continue;
}
- error |= vss_do_freeze(ent->mnt_dir, cmd, fs_op);
+ error |= vss_do_freeze(ent->mnt_dir, cmd);
+ if (error && operation == VSS_OP_FREEZE)
+ goto err;
}
- endmntent(mounts);
if (root_seen) {
- error |= vss_do_freeze("/", cmd, fs_op);
+ error |= vss_do_freeze("/", cmd);
+ if (error && operation == VSS_OP_FREEZE)
+ goto err;
}
+ goto out;
+err:
+ save_errno = errno;
+ vss_operate(VSS_OP_THAW);
+ /* Call syslog after we thaw all filesystems */
+ if (ent)
+ syslog(LOG_ERR, "FREEZE of %s failed; error:%d %s",
+ ent->mnt_dir, save_errno, strerror(save_errno));
+ else
+ syslog(LOG_ERR, "FREEZE of / failed; error:%d %s", save_errno,
+ strerror(save_errno));
+out:
+ endmntent(mounts);
return error;
}
@@ -131,7 +164,15 @@ static int netlink_send(int fd, struct cn_msg *msg)
return sendmsg(fd, &message, 0);
}
-int main(void)
+void print_usage(char *argv[])
+{
+ fprintf(stderr, "Usage: %s [options]\n"
+ "Options are:\n"
+ " -n, --no-daemon stay in foreground, don't daemonize\n"
+ " -h, --help print this help\n", argv[0]);
+}
+
+int main(int argc, char *argv[])
{
int fd, len, nl_group;
int error;
@@ -143,8 +184,28 @@ int main(void)
struct hv_vss_msg *vss_msg;
char *vss_recv_buffer;
size_t vss_recv_buffer_len;
+ int daemonize = 1, long_index = 0, opt;
+
+ static struct option long_options[] = {
+ {"help", no_argument, 0, 'h' },
+ {"no-daemon", no_argument, 0, 'n' },
+ {0, 0, 0, 0 }
+ };
+
+ while ((opt = getopt_long(argc, argv, "hn", long_options,
+ &long_index)) != -1) {
+ switch (opt) {
+ case 'n':
+ daemonize = 0;
+ break;
+ case 'h':
+ default:
+ print_usage(argv);
+ exit(EXIT_FAILURE);
+ }
+ }
- if (daemon(1, 0))
+ if (daemonize && daemon(1, 0))
return 1;
openlog("Hyper-V VSS", 0, LOG_USER);
@@ -249,8 +310,16 @@ int main(void)
case VSS_OP_FREEZE:
case VSS_OP_THAW:
error = vss_operate(op);
- if (error)
+ syslog(LOG_INFO, "VSS: op=%s: %s\n",
+ op == VSS_OP_FREEZE ? "FREEZE" : "THAW",
+ error ? "failed" : "succeeded");
+
+ if (error) {
error = HV_E_FAIL;
+ syslog(LOG_ERR, "op=%d failed!", op);
+ syslog(LOG_ERR, "report it with these files:");
+ syslog(LOG_ERR, "/etc/fstab and /proc/mounts");
+ }
break;
default:
syslog(LOG_ERR, "Illegal op:%d\n", op);
diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h
deleted file mode 100644
index d82b170bb216..000000000000
--- a/tools/perf/util/include/asm/hash.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_GENERIC_HASH_H
-#define __ASM_GENERIC_HASH_H
-
-/* Stub */
-
-#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 45f145c6f843..4e511221a0c1 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,20 +1,23 @@
TARGETS = breakpoints
TARGETS += cpu-hotplug
TARGETS += efivarfs
+TARGETS += exec
+TARGETS += firmware
+TARGETS += ftrace
TARGETS += kcmp
TARGETS += memfd
TARGETS += memory-hotplug
-TARGETS += mqueue
TARGETS += mount
+TARGETS += mqueue
TARGETS += net
+TARGETS += powerpc
TARGETS += ptrace
+TARGETS += size
+TARGETS += sysctl
TARGETS += timers
-TARGETS += vm
-TARGETS += powerpc
TARGETS += user
-TARGETS += sysctl
-TARGETS += firmware
-TARGETS += ftrace
+TARGETS += vm
+#Please keep the TARGETS list alphabetically sorted
TARGETS_HOTPLUG = cpu-hotplug
TARGETS_HOTPLUG += memory-hotplug
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test.c b/tools/testing/selftests/breakpoints/breakpoint_test.c
index a0743f3b2b57..120895ab5505 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test.c
@@ -17,6 +17,8 @@
#include <sys/types.h>
#include <sys/wait.h>
+#include "../kselftest.h"
+
/* Breakpoint access modes */
enum {
@@ -42,7 +44,7 @@ static void set_breakpoint_addr(void *addr, int n)
offsetof(struct user, u_debugreg[n]), addr);
if (ret) {
perror("Can't set breakpoint addr\n");
- exit(-1);
+ ksft_exit_fail();
}
}
@@ -105,7 +107,7 @@ static void toggle_breakpoint(int n, int type, int len,
offsetof(struct user, u_debugreg[7]), dr7);
if (ret) {
perror("Can't set dr7");
- exit(-1);
+ ksft_exit_fail();
}
}
@@ -275,7 +277,7 @@ static void check_success(const char *msg)
msg2 = "Ok";
if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1)) {
perror("Can't poke\n");
- exit(-1);
+ ksft_exit_fail();
}
}
@@ -390,5 +392,5 @@ int main(int argc, char **argv)
wait(NULL);
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/exec/.gitignore b/tools/testing/selftests/exec/.gitignore
new file mode 100644
index 000000000000..64073e050c6a
--- /dev/null
+++ b/tools/testing/selftests/exec/.gitignore
@@ -0,0 +1,9 @@
+subdir*
+script*
+execveat
+execveat.symlink
+execveat.moved
+execveat.path.ephemeral
+execveat.ephemeral
+execveat.denatured
+xxxxxxxx* \ No newline at end of file
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
new file mode 100644
index 000000000000..66dfc2ce1788
--- /dev/null
+++ b/tools/testing/selftests/exec/Makefile
@@ -0,0 +1,25 @@
+CC = $(CROSS_COMPILE)gcc
+CFLAGS = -Wall
+BINARIES = execveat
+DEPS = execveat.symlink execveat.denatured script subdir
+all: $(BINARIES) $(DEPS)
+
+subdir:
+ mkdir -p $@
+script:
+ echo '#!/bin/sh' > $@
+ echo 'exit $$*' >> $@
+ chmod +x $@
+execveat.symlink: execveat
+ ln -s -f $< $@
+execveat.denatured: execveat
+ cp $< $@
+ chmod -x $@
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $^
+
+run_tests: all
+ ./execveat
+
+clean:
+ rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx*
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
new file mode 100644
index 000000000000..33a5c06d95ca
--- /dev/null
+++ b/tools/testing/selftests/exec/execveat.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2014 Google, Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2
+ *
+ * Selftests for execveat(2).
+ */
+
+#define _GNU_SOURCE /* to get O_PATH, AT_EMPTY_PATH */
+#include <sys/sendfile.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+static char longpath[2 * PATH_MAX] = "";
+static char *envp[] = { "IN_TEST=yes", NULL, NULL };
+static char *argv[] = { "execveat", "99", NULL };
+
+static int execveat_(int fd, const char *path, char **argv, char **envp,
+ int flags)
+{
+#ifdef __NR_execveat
+ return syscall(__NR_execveat, fd, path, argv, envp, flags);
+#else
+ errno = -ENOSYS;
+ return -1;
+#endif
+}
+
+#define check_execveat_fail(fd, path, flags, errno) \
+ _check_execveat_fail(fd, path, flags, errno, #errno)
+static int _check_execveat_fail(int fd, const char *path, int flags,
+ int expected_errno, const char *errno_str)
+{
+ int rc;
+
+ errno = 0;
+ printf("Check failure of execveat(%d, '%s', %d) with %s... ",
+ fd, path?:"(null)", flags, errno_str);
+ rc = execveat_(fd, path, argv, envp, flags);
+
+ if (rc > 0) {
+ printf("[FAIL] (unexpected success from execveat(2))\n");
+ return 1;
+ }
+ if (errno != expected_errno) {
+ printf("[FAIL] (expected errno %d (%s) not %d (%s)\n",
+ expected_errno, strerror(expected_errno),
+ errno, strerror(errno));
+ return 1;
+ }
+ printf("[OK]\n");
+ return 0;
+}
+
+static int check_execveat_invoked_rc(int fd, const char *path, int flags,
+ int expected_rc)
+{
+ int status;
+ int rc;
+ pid_t child;
+ int pathlen = path ? strlen(path) : 0;
+
+ if (pathlen > 40)
+ printf("Check success of execveat(%d, '%.20s...%s', %d)... ",
+ fd, path, (path + pathlen - 20), flags);
+ else
+ printf("Check success of execveat(%d, '%s', %d)... ",
+ fd, path?:"(null)", flags);
+ child = fork();
+ if (child < 0) {
+ printf("[FAIL] (fork() failed)\n");
+ return 1;
+ }
+ if (child == 0) {
+ /* Child: do execveat(). */
+ rc = execveat_(fd, path, argv, envp, flags);
+ printf("[FAIL]: execveat() failed, rc=%d errno=%d (%s)\n",
+ rc, errno, strerror(errno));
+ exit(1); /* should not reach here */
+ }
+ /* Parent: wait for & check child's exit status. */
+ rc = waitpid(child, &status, 0);
+ if (rc != child) {
+ printf("[FAIL] (waitpid(%d,...) returned %d)\n", child, rc);
+ return 1;
+ }
+ if (!WIFEXITED(status)) {
+ printf("[FAIL] (child %d did not exit cleanly, status=%08x)\n",
+ child, status);
+ return 1;
+ }
+ if (WEXITSTATUS(status) != expected_rc) {
+ printf("[FAIL] (child %d exited with %d not %d)\n",
+ child, WEXITSTATUS(status), expected_rc);
+ return 1;
+ }
+ printf("[OK]\n");
+ return 0;
+}
+
+static int check_execveat(int fd, const char *path, int flags)
+{
+ return check_execveat_invoked_rc(fd, path, flags, 99);
+}
+
+static char *concat(const char *left, const char *right)
+{
+ char *result = malloc(strlen(left) + strlen(right) + 1);
+
+ strcpy(result, left);
+ strcat(result, right);
+ return result;
+}
+
+static int open_or_die(const char *filename, int flags)
+{
+ int fd = open(filename, flags);
+
+ if (fd < 0) {
+ printf("Failed to open '%s'; "
+ "check prerequisites are available\n", filename);
+ exit(1);
+ }
+ return fd;
+}
+
+static void exe_cp(const char *src, const char *dest)
+{
+ int in_fd = open_or_die(src, O_RDONLY);
+ int out_fd = open(dest, O_RDWR|O_CREAT|O_TRUNC, 0755);
+ struct stat info;
+
+ fstat(in_fd, &info);
+ sendfile(out_fd, in_fd, NULL, info.st_size);
+ close(in_fd);
+ close(out_fd);
+}
+
+#define XX_DIR_LEN 200
+static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
+{
+ int fail = 0;
+ int ii, count, len;
+ char longname[XX_DIR_LEN + 1];
+ int fd;
+
+ if (*longpath == '\0') {
+ /* Create a filename close to PATH_MAX in length */
+ memset(longname, 'x', XX_DIR_LEN - 1);
+ longname[XX_DIR_LEN - 1] = '/';
+ longname[XX_DIR_LEN] = '\0';
+ count = (PATH_MAX - 3) / XX_DIR_LEN;
+ for (ii = 0; ii < count; ii++) {
+ strcat(longpath, longname);
+ mkdir(longpath, 0755);
+ }
+ len = (PATH_MAX - 3) - (count * XX_DIR_LEN);
+ if (len <= 0)
+ len = 1;
+ memset(longname, 'y', len);
+ longname[len] = '\0';
+ strcat(longpath, longname);
+ }
+ exe_cp(src, longpath);
+
+ /*
+ * Execute as a pre-opened file descriptor, which works whether this is
+ * a script or not (because the interpreter sees a filename like
+ * "/dev/fd/20").
+ */
+ fd = open(longpath, O_RDONLY);
+ if (fd > 0) {
+ printf("Invoke copy of '%s' via filename of length %lu:\n",
+ src, strlen(longpath));
+ fail += check_execveat(fd, "", AT_EMPTY_PATH);
+ } else {
+ printf("Failed to open length %lu filename, errno=%d (%s)\n",
+ strlen(longpath), errno, strerror(errno));
+ fail++;
+ }
+
+ /*
+ * Execute as a long pathname relative to ".". If this is a script,
+ * the interpreter will launch but fail to open the script because its
+ * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
+ */
+ if (is_script)
+ fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127);
+ else
+ fail += check_execveat(dot_dfd, longpath, 0);
+
+ return fail;
+}
+
+static int run_tests(void)
+{
+ int fail = 0;
+ char *fullname = realpath("execveat", NULL);
+ char *fullname_script = realpath("script", NULL);
+ char *fullname_symlink = concat(fullname, ".symlink");
+ int subdir_dfd = open_or_die("subdir", O_DIRECTORY|O_RDONLY);
+ int subdir_dfd_ephemeral = open_or_die("subdir.ephemeral",
+ O_DIRECTORY|O_RDONLY);
+ int dot_dfd = open_or_die(".", O_DIRECTORY|O_RDONLY);
+ int dot_dfd_path = open_or_die(".", O_DIRECTORY|O_RDONLY|O_PATH);
+ int dot_dfd_cloexec = open_or_die(".", O_DIRECTORY|O_RDONLY|O_CLOEXEC);
+ int fd = open_or_die("execveat", O_RDONLY);
+ int fd_path = open_or_die("execveat", O_RDONLY|O_PATH);
+ int fd_symlink = open_or_die("execveat.symlink", O_RDONLY);
+ int fd_denatured = open_or_die("execveat.denatured", O_RDONLY);
+ int fd_denatured_path = open_or_die("execveat.denatured",
+ O_RDONLY|O_PATH);
+ int fd_script = open_or_die("script", O_RDONLY);
+ int fd_ephemeral = open_or_die("execveat.ephemeral", O_RDONLY);
+ int fd_ephemeral_path = open_or_die("execveat.path.ephemeral",
+ O_RDONLY|O_PATH);
+ int fd_script_ephemeral = open_or_die("script.ephemeral", O_RDONLY);
+ int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC);
+ int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC);
+
+ /* Change file position to confirm it doesn't affect anything */
+ lseek(fd, 10, SEEK_SET);
+
+ /* Normal executable file: */
+ /* dfd + path */
+ fail += check_execveat(subdir_dfd, "../execveat", 0);
+ fail += check_execveat(dot_dfd, "execveat", 0);
+ fail += check_execveat(dot_dfd_path, "execveat", 0);
+ /* absolute path */
+ fail += check_execveat(AT_FDCWD, fullname, 0);
+ /* absolute path with nonsense dfd */
+ fail += check_execveat(99, fullname, 0);
+ /* fd + no path */
+ fail += check_execveat(fd, "", AT_EMPTY_PATH);
+ /* O_CLOEXEC fd + no path */
+ fail += check_execveat(fd_cloexec, "", AT_EMPTY_PATH);
+ /* O_PATH fd */
+ fail += check_execveat(fd_path, "", AT_EMPTY_PATH);
+
+ /* Mess with executable file that's already open: */
+ /* fd + no path to a file that's been renamed */
+ rename("execveat.ephemeral", "execveat.moved");
+ fail += check_execveat(fd_ephemeral, "", AT_EMPTY_PATH);
+ /* fd + no path to a file that's been deleted */
+ unlink("execveat.moved"); /* remove the file now fd open */
+ fail += check_execveat(fd_ephemeral, "", AT_EMPTY_PATH);
+
+ /* Mess with executable file that's already open with O_PATH */
+ /* fd + no path to a file that's been deleted */
+ unlink("execveat.path.ephemeral");
+ fail += check_execveat(fd_ephemeral_path, "", AT_EMPTY_PATH);
+
+ /* Invalid argument failures */
+ fail += check_execveat_fail(fd, "", 0, ENOENT);
+ fail += check_execveat_fail(fd, NULL, AT_EMPTY_PATH, EFAULT);
+
+ /* Symlink to executable file: */
+ /* dfd + path */
+ fail += check_execveat(dot_dfd, "execveat.symlink", 0);
+ fail += check_execveat(dot_dfd_path, "execveat.symlink", 0);
+ /* absolute path */
+ fail += check_execveat(AT_FDCWD, fullname_symlink, 0);
+ /* fd + no path, even with AT_SYMLINK_NOFOLLOW (already followed) */
+ fail += check_execveat(fd_symlink, "", AT_EMPTY_PATH);
+ fail += check_execveat(fd_symlink, "",
+ AT_EMPTY_PATH|AT_SYMLINK_NOFOLLOW);
+
+ /* Symlink fails when AT_SYMLINK_NOFOLLOW set: */
+ /* dfd + path */
+ fail += check_execveat_fail(dot_dfd, "execveat.symlink",
+ AT_SYMLINK_NOFOLLOW, ELOOP);
+ fail += check_execveat_fail(dot_dfd_path, "execveat.symlink",
+ AT_SYMLINK_NOFOLLOW, ELOOP);
+ /* absolute path */
+ fail += check_execveat_fail(AT_FDCWD, fullname_symlink,
+ AT_SYMLINK_NOFOLLOW, ELOOP);
+
+ /* Shell script wrapping executable file: */
+ /* dfd + path */
+ fail += check_execveat(subdir_dfd, "../script", 0);
+ fail += check_execveat(dot_dfd, "script", 0);
+ fail += check_execveat(dot_dfd_path, "script", 0);
+ /* absolute path */
+ fail += check_execveat(AT_FDCWD, fullname_script, 0);
+ /* fd + no path */
+ fail += check_execveat(fd_script, "", AT_EMPTY_PATH);
+ fail += check_execveat(fd_script, "",
+ AT_EMPTY_PATH|AT_SYMLINK_NOFOLLOW);
+ /* O_CLOEXEC fd fails for a script (as script file inaccessible) */
+ fail += check_execveat_fail(fd_script_cloexec, "", AT_EMPTY_PATH,
+ ENOENT);
+ fail += check_execveat_fail(dot_dfd_cloexec, "script", 0, ENOENT);
+
+ /* Mess with script file that's already open: */
+ /* fd + no path to a file that's been renamed */
+ rename("script.ephemeral", "script.moved");
+ fail += check_execveat(fd_script_ephemeral, "", AT_EMPTY_PATH);
+ /* fd + no path to a file that's been deleted */
+ unlink("script.moved"); /* remove the file while fd open */
+ fail += check_execveat(fd_script_ephemeral, "", AT_EMPTY_PATH);
+
+ /* Rename a subdirectory in the path: */
+ rename("subdir.ephemeral", "subdir.moved");
+ fail += check_execveat(subdir_dfd_ephemeral, "../script", 0);
+ fail += check_execveat(subdir_dfd_ephemeral, "script", 0);
+ /* Remove the subdir and its contents */
+ unlink("subdir.moved/script");
+ unlink("subdir.moved");
+ /* Shell loads via deleted subdir OK because name starts with .. */
+ fail += check_execveat(subdir_dfd_ephemeral, "../script", 0);
+ fail += check_execveat_fail(subdir_dfd_ephemeral, "script", 0, ENOENT);
+
+ /* Flag values other than AT_SYMLINK_NOFOLLOW => EINVAL */
+ fail += check_execveat_fail(dot_dfd, "execveat", 0xFFFF, EINVAL);
+ /* Invalid path => ENOENT */
+ fail += check_execveat_fail(dot_dfd, "no-such-file", 0, ENOENT);
+ fail += check_execveat_fail(dot_dfd_path, "no-such-file", 0, ENOENT);
+ fail += check_execveat_fail(AT_FDCWD, "no-such-file", 0, ENOENT);
+ /* Attempt to execute directory => EACCES */
+ fail += check_execveat_fail(dot_dfd, "", AT_EMPTY_PATH, EACCES);
+ /* Attempt to execute non-executable => EACCES */
+ fail += check_execveat_fail(dot_dfd, "Makefile", 0, EACCES);
+ fail += check_execveat_fail(fd_denatured, "", AT_EMPTY_PATH, EACCES);
+ fail += check_execveat_fail(fd_denatured_path, "", AT_EMPTY_PATH,
+ EACCES);
+ /* Attempt to execute nonsense FD => EBADF */
+ fail += check_execveat_fail(99, "", AT_EMPTY_PATH, EBADF);
+ fail += check_execveat_fail(99, "execveat", 0, EBADF);
+ /* Attempt to execute relative to non-directory => ENOTDIR */
+ fail += check_execveat_fail(fd, "execveat", 0, ENOTDIR);
+
+ fail += check_execveat_pathmax(dot_dfd, "execveat", 0);
+ fail += check_execveat_pathmax(dot_dfd, "script", 1);
+ return fail;
+}
+
+static void prerequisites(void)
+{
+ int fd;
+ const char *script = "#!/bin/sh\nexit $*\n";
+
+ /* Create ephemeral copies of files */
+ exe_cp("execveat", "execveat.ephemeral");
+ exe_cp("execveat", "execveat.path.ephemeral");
+ exe_cp("script", "script.ephemeral");
+ mkdir("subdir.ephemeral", 0755);
+
+ fd = open("subdir.ephemeral/script", O_RDWR|O_CREAT|O_TRUNC, 0755);
+ write(fd, script, strlen(script));
+ close(fd);
+}
+
+int main(int argc, char **argv)
+{
+ int ii;
+ int rc;
+ const char *verbose = getenv("VERBOSE");
+
+ if (argc >= 2) {
+ /* If we are invoked with an argument, don't run tests. */
+ const char *in_test = getenv("IN_TEST");
+
+ if (verbose) {
+ printf(" invoked with:");
+ for (ii = 0; ii < argc; ii++)
+ printf(" [%d]='%s'", ii, argv[ii]);
+ printf("\n");
+ }
+
+ /* Check expected environment transferred. */
+ if (!in_test || strcmp(in_test, "yes") != 0) {
+ printf("[FAIL] (no IN_TEST=yes in env)\n");
+ return 1;
+ }
+
+ /* Use the final argument as an exit code. */
+ rc = atoi(argv[argc - 1]);
+ fflush(stdout);
+ } else {
+ prerequisites();
+ if (verbose)
+ envp[1] = "VERBOSE=1";
+ rc = run_tests();
+ if (rc > 0)
+ printf("%d tests failed\n", rc);
+ }
+ return rc;
+}
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index 552f0810bffb..1b2ce334bb3f 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -5,6 +5,8 @@
#include <linux/msg.h>
#include <fcntl.h>
+#include "../kselftest.h"
+
#define MAX_MSG_SIZE 32
struct msg1 {
@@ -195,58 +197,58 @@ int main(int argc, char **argv)
if (getuid() != 0) {
printf("Please run the test as root - Exiting.\n");
- exit(1);
+ return ksft_exit_fail();
}
msgque.key = ftok(argv[0], 822155650);
if (msgque.key == -1) {
- printf("Can't make key\n");
- return -errno;
+ printf("Can't make key: %d\n", -errno);
+ return ksft_exit_fail();
}
msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
if (msgque.msq_id == -1) {
err = -errno;
- printf("Can't create queue\n");
+ printf("Can't create queue: %d\n", err);
goto err_out;
}
err = fill_msgque(&msgque);
if (err) {
- printf("Failed to fill queue\n");
+ printf("Failed to fill queue: %d\n", err);
goto err_destroy;
}
err = dump_queue(&msgque);
if (err) {
- printf("Failed to dump queue\n");
+ printf("Failed to dump queue: %d\n", err);
goto err_destroy;
}
err = check_and_destroy_queue(&msgque);
if (err) {
- printf("Failed to check and destroy queue\n");
+ printf("Failed to check and destroy queue: %d\n", err);
goto err_out;
}
err = restore_queue(&msgque);
if (err) {
- printf("Failed to restore queue\n");
+ printf("Failed to restore queue: %d\n", err);
goto err_destroy;
}
err = check_and_destroy_queue(&msgque);
if (err) {
- printf("Failed to test queue\n");
+ printf("Failed to test queue: %d\n", err);
goto err_out;
}
- return 0;
+ return ksft_exit_pass();
err_destroy:
if (msgctl(msgque.msq_id, IPC_RMID, 0)) {
printf("Failed to destroy queue: %d\n", -errno);
- return -errno;
+ return ksft_exit_fail();
}
err_out:
- return err;
+ return ksft_exit_fail();
}
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
index 8aabd82db9e4..ff0eefdc6ceb 100644
--- a/tools/testing/selftests/kcmp/Makefile
+++ b/tools/testing/selftests/kcmp/Makefile
@@ -1,25 +1,7 @@
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
-ifeq ($(ARCH),i386)
- ARCH := x86
- CFLAGS := -DCONFIG_X86_32 -D__i386__
-endif
-ifeq ($(ARCH),x86_64)
- ARCH := x86
- CFLAGS := -DCONFIG_X86_64 -D__x86_64__
-endif
-
-CFLAGS += -I../../../../arch/x86/include/generated/
-CFLAGS += -I../../../../include/
+CC := $(CROSS_COMPILE)$(CC)
CFLAGS += -I../../../../usr/include/
-CFLAGS += -I../../../../arch/x86/include/
-all:
-ifeq ($(ARCH),x86)
- gcc $(CFLAGS) kcmp_test.c -o kcmp_test
-else
- echo "Not an x86 target, can't build kcmp selftest"
-endif
+all: kcmp_test
run_tests: all
@./kcmp_test || echo "kcmp_test: [FAIL]"
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
index dbba4084869c..a5a4da856dfe 100644
--- a/tools/testing/selftests/kcmp/kcmp_test.c
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -17,6 +17,8 @@
#include <sys/stat.h>
#include <sys/wait.h>
+#include "../kselftest.h"
+
static long sys_kcmp(int pid1, int pid2, int type, int fd1, int fd2)
{
return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
@@ -34,13 +36,13 @@ int main(int argc, char **argv)
if (fd1 < 0) {
perror("Can't create file");
- exit(1);
+ ksft_exit_fail();
}
pid2 = fork();
if (pid2 < 0) {
perror("fork failed");
- exit(1);
+ ksft_exit_fail();
}
if (!pid2) {
@@ -50,7 +52,7 @@ int main(int argc, char **argv)
fd2 = open(kpath, O_RDWR, 0644);
if (fd2 < 0) {
perror("Can't open file");
- exit(1);
+ ksft_exit_fail();
}
/* An example of output and arguments */
@@ -74,23 +76,34 @@ int main(int argc, char **argv)
if (ret) {
printf("FAIL: 0 expected but %d returned (%s)\n",
ret, strerror(errno));
+ ksft_inc_fail_cnt();
ret = -1;
- } else
+ } else {
printf("PASS: 0 returned as expected\n");
+ ksft_inc_pass_cnt();
+ }
/* Compare with self */
ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
if (ret) {
printf("FAIL: 0 expected but %d returned (%s)\n",
ret, strerror(errno));
+ ksft_inc_fail_cnt();
ret = -1;
- } else
+ } else {
printf("PASS: 0 returned as expected\n");
+ ksft_inc_pass_cnt();
+ }
+
+ ksft_print_cnts();
- exit(ret);
+ if (ret)
+ ksft_exit_fail();
+ else
+ ksft_exit_pass();
}
waitpid(pid2, &status, P_ALL);
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
new file mode 100644
index 000000000000..572c8888167a
--- /dev/null
+++ b/tools/testing/selftests/kselftest.h
@@ -0,0 +1,62 @@
+/*
+ * kselftest.h: kselftest framework return codes to include from
+ * selftests.
+ *
+ * Copyright (c) 2014 Shuah Khan <shuahkh@osg.samsung.com>
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This file is released under the GPLv2.
+ */
+#ifndef __KSELFTEST_H
+#define __KSELFTEST_H
+
+#include <stdlib.h>
+#include <unistd.h>
+
+/* counters */
+struct ksft_count {
+ unsigned int ksft_pass;
+ unsigned int ksft_fail;
+ unsigned int ksft_xfail;
+ unsigned int ksft_xpass;
+ unsigned int ksft_xskip;
+};
+
+static struct ksft_count ksft_cnt;
+
+static inline void ksft_inc_pass_cnt(void) { ksft_cnt.ksft_pass++; }
+static inline void ksft_inc_fail_cnt(void) { ksft_cnt.ksft_fail++; }
+static inline void ksft_inc_xfail_cnt(void) { ksft_cnt.ksft_xfail++; }
+static inline void ksft_inc_xpass_cnt(void) { ksft_cnt.ksft_xpass++; }
+static inline void ksft_inc_xskip_cnt(void) { ksft_cnt.ksft_xskip++; }
+
+static inline void ksft_print_cnts(void)
+{
+ printf("Pass: %d Fail: %d Xfail: %d Xpass: %d, Xskip: %d\n",
+ ksft_cnt.ksft_pass, ksft_cnt.ksft_fail,
+ ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass,
+ ksft_cnt.ksft_xskip);
+}
+
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+static inline int ksft_exit_xfail(void)
+{
+ exit(2);
+}
+static inline int ksft_exit_xpass(void)
+{
+ exit(3);
+}
+static inline int ksft_exit_skip(void)
+{
+ exit(4);
+}
+
+#endif /* __KSELFTEST_H */
diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
index 1b3ff2fda4d0..517785052f1c 100644
--- a/tools/testing/selftests/mount/unprivileged-remount-test.c
+++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
@@ -6,6 +6,8 @@
#include <sys/types.h>
#include <sys/mount.h>
#include <sys/wait.h>
+#include <sys/vfs.h>
+#include <sys/statvfs.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
@@ -32,11 +34,14 @@
# define CLONE_NEWPID 0x20000000
#endif
+#ifndef MS_REC
+# define MS_REC 16384
+#endif
#ifndef MS_RELATIME
-#define MS_RELATIME (1 << 21)
+# define MS_RELATIME (1 << 21)
#endif
#ifndef MS_STRICTATIME
-#define MS_STRICTATIME (1 << 24)
+# define MS_STRICTATIME (1 << 24)
#endif
static void die(char *fmt, ...)
@@ -48,17 +53,14 @@ static void die(char *fmt, ...)
exit(EXIT_FAILURE);
}
-static void write_file(char *filename, char *fmt, ...)
+static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
{
char buf[4096];
int fd;
ssize_t written;
int buf_len;
- va_list ap;
- va_start(ap, fmt);
buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
if (buf_len < 0) {
die("vsnprintf failed: %s\n",
strerror(errno));
@@ -69,6 +71,8 @@ static void write_file(char *filename, char *fmt, ...)
fd = open(filename, O_WRONLY);
if (fd < 0) {
+ if ((errno == ENOENT) && enoent_ok)
+ return;
die("open of %s failed: %s\n",
filename, strerror(errno));
}
@@ -87,6 +91,65 @@ static void write_file(char *filename, char *fmt, ...)
}
}
+static void maybe_write_file(char *filename, char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vmaybe_write_file(true, filename, fmt, ap);
+ va_end(ap);
+
+}
+
+static void write_file(char *filename, char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vmaybe_write_file(false, filename, fmt, ap);
+ va_end(ap);
+
+}
+
+static int read_mnt_flags(const char *path)
+{
+ int ret;
+ struct statvfs stat;
+ int mnt_flags;
+
+ ret = statvfs(path, &stat);
+ if (ret != 0) {
+ die("statvfs of %s failed: %s\n",
+ path, strerror(errno));
+ }
+ if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \
+ ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \
+ ST_SYNCHRONOUS | ST_MANDLOCK)) {
+ die("Unrecognized mount flags\n");
+ }
+ mnt_flags = 0;
+ if (stat.f_flag & ST_RDONLY)
+ mnt_flags |= MS_RDONLY;
+ if (stat.f_flag & ST_NOSUID)
+ mnt_flags |= MS_NOSUID;
+ if (stat.f_flag & ST_NODEV)
+ mnt_flags |= MS_NODEV;
+ if (stat.f_flag & ST_NOEXEC)
+ mnt_flags |= MS_NOEXEC;
+ if (stat.f_flag & ST_NOATIME)
+ mnt_flags |= MS_NOATIME;
+ if (stat.f_flag & ST_NODIRATIME)
+ mnt_flags |= MS_NODIRATIME;
+ if (stat.f_flag & ST_RELATIME)
+ mnt_flags |= MS_RELATIME;
+ if (stat.f_flag & ST_SYNCHRONOUS)
+ mnt_flags |= MS_SYNCHRONOUS;
+ if (stat.f_flag & ST_MANDLOCK)
+ mnt_flags |= ST_MANDLOCK;
+
+ return mnt_flags;
+}
+
static void create_and_enter_userns(void)
{
uid_t uid;
@@ -100,13 +163,10 @@ static void create_and_enter_userns(void)
strerror(errno));
}
+ maybe_write_file("/proc/self/setgroups", "deny");
write_file("/proc/self/uid_map", "0 %d 1", uid);
write_file("/proc/self/gid_map", "0 %d 1", gid);
- if (setgroups(0, NULL) != 0) {
- die("setgroups failed: %s\n",
- strerror(errno));
- }
if (setgid(0) != 0) {
die ("setgid(0) failed %s\n",
strerror(errno));
@@ -118,7 +178,8 @@ static void create_and_enter_userns(void)
}
static
-bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+bool test_unpriv_remount(const char *fstype, const char *mount_options,
+ int mount_flags, int remount_flags, int invalid_flags)
{
pid_t child;
@@ -151,9 +212,11 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
strerror(errno));
}
- if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
- die("mount of /tmp failed: %s\n",
- strerror(errno));
+ if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) {
+ die("mount of %s with options '%s' on /tmp failed: %s\n",
+ fstype,
+ mount_options? mount_options : "",
+ strerror(errno));
}
create_and_enter_userns();
@@ -181,62 +244,127 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
static bool test_unpriv_remount_simple(int mount_flags)
{
- return test_unpriv_remount(mount_flags, mount_flags, 0);
+ return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0);
}
static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
{
- return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
+ return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags,
+ invalid_flags);
+}
+
+static bool test_priv_mount_unpriv_remount(void)
+{
+ pid_t child;
+ int ret;
+ const char *orig_path = "/dev";
+ const char *dest_path = "/tmp";
+ int orig_mnt_flags, remount_mnt_flags;
+
+ child = fork();
+ if (child == -1) {
+ die("fork failed: %s\n",
+ strerror(errno));
+ }
+ if (child != 0) { /* parent */
+ pid_t pid;
+ int status;
+ pid = waitpid(child, &status, 0);
+ if (pid == -1) {
+ die("waitpid failed: %s\n",
+ strerror(errno));
+ }
+ if (pid != child) {
+ die("waited for %d got %d\n",
+ child, pid);
+ }
+ if (!WIFEXITED(status)) {
+ die("child did not terminate cleanly\n");
+ }
+ return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
+ }
+
+ orig_mnt_flags = read_mnt_flags(orig_path);
+
+ create_and_enter_userns();
+ ret = unshare(CLONE_NEWNS);
+ if (ret != 0) {
+ die("unshare(CLONE_NEWNS) failed: %s\n",
+ strerror(errno));
+ }
+
+ ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL);
+ if (ret != 0) {
+ die("recursive bind mount of %s onto %s failed: %s\n",
+ orig_path, dest_path, strerror(errno));
+ }
+
+ ret = mount(dest_path, dest_path, "none",
+ MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL);
+ if (ret != 0) {
+ /* system("cat /proc/self/mounts"); */
+ die("remount of /tmp failed: %s\n",
+ strerror(errno));
+ }
+
+ remount_mnt_flags = read_mnt_flags(dest_path);
+ if (orig_mnt_flags != remount_mnt_flags) {
+ die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n",
+ dest_path, orig_path);
+ }
+ exit(EXIT_SUCCESS);
}
int main(int argc, char **argv)
{
- if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
+ if (!test_unpriv_remount_simple(MS_RDONLY)) {
die("MS_RDONLY malfunctions\n");
}
- if (!test_unpriv_remount_simple(MS_NODEV)) {
+ if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) {
die("MS_NODEV malfunctions\n");
}
- if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
+ if (!test_unpriv_remount_simple(MS_NOSUID)) {
die("MS_NOSUID malfunctions\n");
}
- if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
+ if (!test_unpriv_remount_simple(MS_NOEXEC)) {
die("MS_NOEXEC malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_RELATIME,
+ MS_NOATIME))
{
die("MS_RELATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_STRICTATIME,
+ MS_NOATIME))
{
die("MS_STRICTATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
- MS_STRICTATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_NOATIME,
+ MS_STRICTATIME))
{
- die("MS_RELATIME malfunctions\n");
+ die("MS_NOATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME,
+ MS_NOATIME))
{
- die("MS_RELATIME malfunctions\n");
+ die("MS_RELATIME|MS_NODIRATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME,
+ MS_NOATIME))
{
- die("MS_RELATIME malfunctions\n");
+ die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n");
}
- if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
- MS_STRICTATIME|MS_NODEV))
+ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME,
+ MS_STRICTATIME))
{
- die("MS_RELATIME malfunctions\n");
+ die("MS_NOATIME|MS_DIRATIME malfunctions\n");
}
- if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
- MS_NOATIME|MS_NODEV))
+ if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME))
{
die("Default atime malfunctions\n");
}
+ if (!test_priv_mount_unpriv_remount()) {
+ die("Mount flags unexpectedly changed after remount\n");
+ }
return EXIT_SUCCESS;
}
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index c7493b8f9b0e..62f22cc9941c 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -14,12 +14,6 @@ all: $(NET_PROGS)
run_tests: all
@/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]"
@/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]"
- @if /sbin/modprobe test_bpf ; then \
- /sbin/rmmod test_bpf; \
- echo "test_bpf: ok"; \
- else \
- echo "test_bpf: [FAIL]"; \
- exit 1; \
- fi
+ ./test_bpf.sh
clean:
$(RM) $(NET_PROGS)
diff --git a/tools/testing/selftests/net/test_bpf.sh b/tools/testing/selftests/net/test_bpf.sh
new file mode 100755
index 000000000000..8b29796d46aa
--- /dev/null
+++ b/tools/testing/selftests/net/test_bpf.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+# Runs bpf test using test_bpf kernel module
+
+if /sbin/modprobe -q test_bpf ; then
+ /sbin/modprobe -q -r test_bpf;
+ echo "test_bpf: ok";
+else
+ echo "test_bpf: [FAIL]";
+ exit 1;
+fi
diff --git a/tools/testing/selftests/size/.gitignore b/tools/testing/selftests/size/.gitignore
new file mode 100644
index 000000000000..189b7818de34
--- /dev/null
+++ b/tools/testing/selftests/size/.gitignore
@@ -0,0 +1 @@
+get_size
diff --git a/tools/testing/selftests/size/Makefile b/tools/testing/selftests/size/Makefile
new file mode 100644
index 000000000000..04dc25e4fa92
--- /dev/null
+++ b/tools/testing/selftests/size/Makefile
@@ -0,0 +1,12 @@
+CC = $(CROSS_COMPILE)gcc
+
+all: get_size
+
+get_size: get_size.c
+ $(CC) -static -ffreestanding -nostartfiles -s $< -o $@
+
+run_tests: all
+ ./get_size
+
+clean:
+ $(RM) get_size
diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c
new file mode 100644
index 000000000000..2d1af7cca463
--- /dev/null
+++ b/tools/testing/selftests/size/get_size.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014 Sony Mobile Communications Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2
+ *
+ * Selftest for runtime system size
+ *
+ * Prints the amount of RAM that the currently running system is using.
+ *
+ * This program tries to be as small as possible itself, to
+ * avoid perturbing the system memory utilization with its
+ * own execution. It also attempts to have as few dependencies
+ * on kernel features as possible.
+ *
+ * It should be statically linked, with startup libs avoided.
+ * It uses no library calls, and only the following 3 syscalls:
+ * sysinfo(), write(), and _exit()
+ *
+ * For output, it avoids printf (which in some C libraries
+ * has large external dependencies) by implementing it's own
+ * number output and print routines, and using __builtin_strlen()
+ */
+
+#include <sys/sysinfo.h>
+#include <unistd.h>
+
+#define STDOUT_FILENO 1
+
+static int print(const char *s)
+{
+ return write(STDOUT_FILENO, s, __builtin_strlen(s));
+}
+
+static inline char *num_to_str(unsigned long num, char *buf, int len)
+{
+ unsigned int digit;
+
+ /* put digits in buffer from back to front */
+ buf += len - 1;
+ *buf = 0;
+ do {
+ digit = num % 10;
+ *(--buf) = digit + '0';
+ num /= 10;
+ } while (num > 0);
+
+ return buf;
+}
+
+static int print_num(unsigned long num)
+{
+ char num_buf[30];
+
+ return print(num_to_str(num, num_buf, sizeof(num_buf)));
+}
+
+static int print_k_value(const char *s, unsigned long num, unsigned long units)
+{
+ unsigned long long temp;
+ int ccode;
+
+ print(s);
+
+ temp = num;
+ temp = (temp * units)/1024;
+ num = temp;
+ ccode = print_num(num);
+ print("\n");
+ return ccode;
+}
+
+/* this program has no main(), as startup libraries are not used */
+void _start(void)
+{
+ int ccode;
+ struct sysinfo info;
+ unsigned long used;
+
+ print("Testing system size.\n");
+ print("1..1\n");
+
+ ccode = sysinfo(&info);
+ if (ccode < 0) {
+ print("not ok 1 get runtime memory use\n");
+ print("# could not get sysinfo\n");
+ _exit(ccode);
+ }
+ /* ignore cache complexities for now */
+ used = info.totalram - info.freeram - info.bufferram;
+ print_k_value("ok 1 get runtime memory use # size = ", used,
+ info.mem_unit);
+
+ print("# System runtime memory report (units in Kilobytes):\n");
+ print_k_value("# Total: ", info.totalram, info.mem_unit);
+ print_k_value("# Free: ", info.freeram, info.mem_unit);
+ print_k_value("# Buffer: ", info.bufferram, info.mem_unit);
+ print_k_value("# In use: ", used, info.mem_unit);
+
+ _exit(0);
+}
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 41bd85559d4b..f87d970a485c 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -15,6 +15,8 @@
#include <time.h>
#include <pthread.h>
+#include "../kselftest.h"
+
#define DELAY 2
#define USECS_PER_SEC 1000000
@@ -194,16 +196,16 @@ int main(int argc, char **argv)
printf("based timers if other threads run on the CPU...\n");
if (check_itimer(ITIMER_VIRTUAL) < 0)
- return -1;
+ return ksft_exit_fail();
if (check_itimer(ITIMER_PROF) < 0)
- return -1;
+ return ksft_exit_fail();
if (check_itimer(ITIMER_REAL) < 0)
- return -1;
+ return ksft_exit_fail();
if (check_timer_create(CLOCK_THREAD_CPUTIME_ID) < 0)
- return -1;
+ return ksft_exit_fail();
/*
* It's unfortunately hard to reliably test a timer expiration
@@ -215,7 +217,7 @@ int main(int argc, char **argv)
* find a better solution.
*/
if (check_timer_create(CLOCK_PROCESS_CPUTIME_ID) < 0)
- return -1;
+ return ksft_exit_fail();
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/user/Makefile b/tools/testing/selftests/user/Makefile
index 396255bd720e..12c9d15bab07 100644
--- a/tools/testing/selftests/user/Makefile
+++ b/tools/testing/selftests/user/Makefile
@@ -4,10 +4,4 @@
all:
run_tests: all
- @if /sbin/modprobe test_user_copy ; then \
- rmmod test_user_copy; \
- echo "user_copy: ok"; \
- else \
- echo "user_copy: [FAIL]"; \
- exit 1; \
- fi
+ ./test_user_copy.sh
diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh
new file mode 100755
index 000000000000..350107f40c1d
--- /dev/null
+++ b/tools/testing/selftests/user/test_user_copy.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+# Runs copy_to/from_user infrastructure using test_user_copy kernel module
+
+if /sbin/modprobe -q test_user_copy; then
+ /sbin/modprobe -q -r test_user_copy
+ echo "user_copy: ok"
+else
+ echo "user_copy: [FAIL]"
+ exit 1
+fi
diff --git a/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c b/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c
index af4b0508be77..aaca1f44e788 100644
--- a/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c
+++ b/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c
@@ -342,7 +342,7 @@ int main(int argc, char *argv[])
iobuf[i].requested = ret;
printf("submit: %d requests buf: %d\n", ret, i);
} else
- perror("unable to submit reqests");
+ perror("unable to submit requests");
}
/* if event is ready to read */
diff --git a/tools/usb/usbip/libsrc/list.h b/tools/usb/usbip/libsrc/list.h
index 8d0c936e184f..5eaaa78e2c6a 100644
--- a/tools/usb/usbip/libsrc/list.h
+++ b/tools/usb/usbip/libsrc/list.h
@@ -98,7 +98,7 @@ static inline void list_del(struct list_head *entry)
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @member: the name of the list_head within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
index 2f87f2d348ba..2a7cd2b8d966 100644
--- a/tools/usb/usbip/src/usbipd.c
+++ b/tools/usb/usbip/src/usbipd.c
@@ -91,7 +91,6 @@ static void usbipd_help(void)
static int recv_request_import(int sockfd)
{
struct op_import_request req;
- struct op_common reply;
struct usbip_exported_device *edev;
struct usbip_usb_device pdu_udev;
struct list_head *i;
@@ -100,7 +99,6 @@ static int recv_request_import(int sockfd)
int rc;
memset(&req, 0, sizeof(req));
- memset(&reply, 0, sizeof(reply));
rc = usbip_net_recv(sockfd, &req, sizeof(req));
if (rc < 0) {
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index 5a2d1f0f6bc7..8eb6421761cd 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -6,31 +6,11 @@
/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
#define list_add_tail(a, b) do {} while (0)
#define list_del(a) do {} while (0)
-
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITS_PER_BYTE 8
-#define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE)
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-
-/* TODO: Not atomic as it should be:
- * we don't use this for anything important. */
-static inline void clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-
- *p &= ~mask;
-}
-
-static inline int test_bit(int nr, const volatile unsigned long *addr)
-{
- return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
/* end of stubs */
struct virtio_device {
void *dev;
- unsigned long features[1];
+ u64 features;
};
struct virtqueue {
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index 5049967f99f7..83b27e8e9d72 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -2,5 +2,5 @@
#define VIRTIO_TRANSPORT_F_END 32
#define virtio_has_feature(dev, feature) \
- test_bit((feature), (dev)->features)
+ (__virtio_test_bit((dev), feature))
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 00ea679b3826..db3437c641a6 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -60,7 +60,7 @@ void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
{
struct vhost_vring_state state = { .index = info->idx };
struct vhost_vring_file file = { .index = info->idx };
- unsigned long long features = dev->vdev.features[0];
+ unsigned long long features = dev->vdev.features;
struct vhost_vring_addr addr = {
.index = info->idx,
.desc_user_addr = (uint64_t)(unsigned long)info->vring.desc,
@@ -113,8 +113,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
{
int r;
memset(dev, 0, sizeof *dev);
- dev->vdev.features[0] = features;
- dev->vdev.features[1] = features >> 32;
+ dev->vdev.features = features;
dev->buf_size = 1024;
dev->buf = malloc(dev->buf_size);
assert(dev->buf);
diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c
index 14a4f4cab5b9..9d4b1bca54be 100644
--- a/tools/virtio/vringh_test.c
+++ b/tools/virtio/vringh_test.c
@@ -304,7 +304,7 @@ static int parallel_test(unsigned long features,
close(to_guest[1]);
close(to_host[0]);
- gvdev.vdev.features[0] = features;
+ gvdev.vdev.features = features;
gvdev.to_host_fd = to_host[1];
gvdev.notifies = 0;
@@ -449,13 +449,13 @@ int main(int argc, char *argv[])
bool fast_vringh = false, parallel = false;
getrange = getrange_iov;
- vdev.features[0] = 0;
+ vdev.features = 0;
while (argv[1]) {
if (strcmp(argv[1], "--indirect") == 0)
- vdev.features[0] |= (1 << VIRTIO_RING_F_INDIRECT_DESC);
+ __virtio_set_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
else if (strcmp(argv[1], "--eventidx") == 0)
- vdev.features[0] |= (1 << VIRTIO_RING_F_EVENT_IDX);
+ __virtio_set_bit(&vdev, VIRTIO_RING_F_EVENT_IDX);
else if (strcmp(argv[1], "--slow-range") == 0)
getrange = getrange_slow;
else if (strcmp(argv[1], "--fast-vringh") == 0)
@@ -468,7 +468,7 @@ int main(int argc, char *argv[])
}
if (parallel)
- return parallel_test(vdev.features[0], getrange, fast_vringh);
+ return parallel_test(vdev.features, getrange, fast_vringh);
if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
abort();
@@ -483,7 +483,7 @@ int main(int argc, char *argv[])
/* Set up host side. */
vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
- vringh_init_user(&vrh, vdev.features[0], RINGSIZE, true,
+ vringh_init_user(&vrh, vdev.features, RINGSIZE, true,
vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
/* No descriptor to get yet... */
@@ -652,13 +652,13 @@ int main(int argc, char *argv[])
}
/* Test weird (but legal!) indirect. */
- if (vdev.features[0] & (1 << VIRTIO_RING_F_INDIRECT_DESC)) {
+ if (__virtio_test_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
char *data = __user_addr_max - USER_MEM/4;
struct vring_desc *d = __user_addr_max - USER_MEM/2;
struct vring vring;
/* Force creation of direct, which we modify. */
- vdev.features[0] &= ~(1 << VIRTIO_RING_F_INDIRECT_DESC);
+ __virtio_clear_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
__user_addr_min,
never_notify_host,
diff --git a/tools/vm/Makefile b/tools/vm/Makefile
index 3d907dacf2ac..ac884b65a072 100644
--- a/tools/vm/Makefile
+++ b/tools/vm/Makefile
@@ -1,6 +1,6 @@
# Makefile for vm tools
#
-TARGETS=page-types slabinfo
+TARGETS=page-types slabinfo page_owner_sort
LIB_DIR = ../lib/api
LIBS = $(LIB_DIR)/libapikfs.a
@@ -18,5 +18,5 @@ $(LIBS):
$(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
clean:
- $(RM) page-types slabinfo
+ $(RM) page-types slabinfo page_owner_sort
make -C $(LIB_DIR) clean
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
new file mode 100644
index 000000000000..77147b42d598
--- /dev/null
+++ b/tools/vm/page_owner_sort.c
@@ -0,0 +1,144 @@
+/*
+ * User-space helper to sort the output of /sys/kernel/debug/page_owner
+ *
+ * Example use:
+ * cat /sys/kernel/debug/page_owner > page_owner_full.txt
+ * grep -v ^PFN page_owner_full.txt > page_owner.txt
+ * ./sort page_owner.txt sorted_page_owner.txt
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+
+struct block_list {
+ char *txt;
+ int len;
+ int num;
+};
+
+
+static struct block_list *list;
+static int list_size;
+static int max_size;
+
+struct block_list *block_head;
+
+int read_block(char *buf, int buf_size, FILE *fin)
+{
+ char *curr = buf, *const buf_end = buf + buf_size;
+
+ while (buf_end - curr > 1 && fgets(curr, buf_end - curr, fin)) {
+ if (*curr == '\n') /* empty line */
+ return curr - buf;
+ curr += strlen(curr);
+ }
+
+ return -1; /* EOF or no space left in buf. */
+}
+
+static int compare_txt(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return strcmp(l1->txt, l2->txt);
+}
+
+static int compare_num(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l2->num - l1->num;
+}
+
+static void add_list(char *buf, int len)
+{
+ if (list_size != 0 &&
+ len == list[list_size-1].len &&
+ memcmp(buf, list[list_size-1].txt, len) == 0) {
+ list[list_size-1].num++;
+ return;
+ }
+ if (list_size == max_size) {
+ printf("max_size too small??\n");
+ exit(1);
+ }
+ list[list_size].txt = malloc(len+1);
+ list[list_size].len = len;
+ list[list_size].num = 1;
+ memcpy(list[list_size].txt, buf, len);
+ list[list_size].txt[len] = 0;
+ list_size++;
+ if (list_size % 1000 == 0) {
+ printf("loaded %d\r", list_size);
+ fflush(stdout);
+ }
+}
+
+#define BUF_SIZE 1024
+
+int main(int argc, char **argv)
+{
+ FILE *fin, *fout;
+ char buf[BUF_SIZE];
+ int ret, i, count;
+ struct block_list *list2;
+ struct stat st;
+
+ if (argc < 3) {
+ printf("Usage: ./program <input> <output>\n");
+ perror("open: ");
+ exit(1);
+ }
+
+ fin = fopen(argv[1], "r");
+ fout = fopen(argv[2], "w");
+ if (!fin || !fout) {
+ printf("Usage: ./program <input> <output>\n");
+ perror("open: ");
+ exit(1);
+ }
+
+ fstat(fileno(fin), &st);
+ max_size = st.st_size / 100; /* hack ... */
+
+ list = malloc(max_size * sizeof(*list));
+
+ for ( ; ; ) {
+ ret = read_block(buf, BUF_SIZE, fin);
+ if (ret < 0)
+ break;
+
+ add_list(buf, ret);
+ }
+
+ printf("loaded %d\n", list_size);
+
+ printf("sorting ....\n");
+
+ qsort(list, list_size, sizeof(list[0]), compare_txt);
+
+ list2 = malloc(sizeof(*list) * list_size);
+
+ printf("culling\n");
+
+ for (i = count = 0; i < list_size; i++) {
+ if (count == 0 ||
+ strcmp(list2[count-1].txt, list[i].txt) != 0) {
+ list2[count++] = list[i];
+ } else {
+ list2[count-1].num += list[i].num;
+ }
+ }
+
+ qsort(list2, count, sizeof(list[0]), compare_num);
+
+ for (i = 0; i < count; i++)
+ fprintf(fout, "%d times:\n%s\n", list2[i].num, list2[i].txt);
+
+ return 0;
+}
diff --git a/usr/Kconfig b/usr/Kconfig
index 2d4c77eecf2e..572dcf7b6a44 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -46,17 +46,17 @@ config INITRAMFS_ROOT_GID
If you are not sure, leave it set to "0".
config RD_GZIP
- bool "Support initial ramdisks compressed using gzip" if EXPERT
- default y
+ bool "Support initial ramdisks compressed using gzip"
depends on BLK_DEV_INITRD
+ default y
select DECOMPRESS_GZIP
help
Support loading of a gzip encoded initial ramdisk or cpio buffer.
If unsure, say Y.
config RD_BZIP2
- bool "Support initial ramdisks compressed using bzip2" if EXPERT
- default !EXPERT
+ bool "Support initial ramdisks compressed using bzip2"
+ default y
depends on BLK_DEV_INITRD
select DECOMPRESS_BZIP2
help
@@ -64,8 +64,8 @@ config RD_BZIP2
If unsure, say N.
config RD_LZMA
- bool "Support initial ramdisks compressed using LZMA" if EXPERT
- default !EXPERT
+ bool "Support initial ramdisks compressed using LZMA"
+ default y
depends on BLK_DEV_INITRD
select DECOMPRESS_LZMA
help
@@ -73,17 +73,17 @@ config RD_LZMA
If unsure, say N.
config RD_XZ
- bool "Support initial ramdisks compressed using XZ" if EXPERT
- default !EXPERT
+ bool "Support initial ramdisks compressed using XZ"
depends on BLK_DEV_INITRD
+ default y
select DECOMPRESS_XZ
help
Support loading of a XZ encoded initial ramdisk or cpio buffer.
If unsure, say N.
config RD_LZO
- bool "Support initial ramdisks compressed using LZO" if EXPERT
- default !EXPERT
+ bool "Support initial ramdisks compressed using LZO"
+ default y
depends on BLK_DEV_INITRD
select DECOMPRESS_LZO
help
@@ -91,8 +91,8 @@ config RD_LZO
If unsure, say N.
config RD_LZ4
- bool "Support initial ramdisks compressed using LZ4" if EXPERT
- default !EXPERT
+ bool "Support initial ramdisks compressed using LZ4"
+ default y
depends on BLK_DEV_INITRD
select DECOMPRESS_LZ4
help
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 22fa819a9b6a..1c0772b340d8 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
{
+ int ret;
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
- kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- timer->irq->irq,
- timer->irq->level);
+ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer->irq->irq,
+ timer->irq->level);
+ WARN_ON(ret);
}
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
timer_disarm(timer);
}
-int kvm_timer_init(struct kvm *kvm)
+void kvm_timer_enable(struct kvm *kvm)
{
- if (timecounter && wqueue) {
- kvm->arch.timer.cntvoff = kvm_phys_timer_read();
+ if (kvm->arch.timer.enabled)
+ return;
+
+ /*
+ * There is a potential race here between VCPUs starting for the first
+ * time, which may be enabling the timer multiple times. That doesn't
+ * hurt though, because we're just setting a variable to the same
+ * variable that it already was. The important thing is that all
+ * VCPUs have the enabled variable set, before entering the guest, if
+ * the arch timers are enabled.
+ */
+ if (timecounter && wqueue)
kvm->arch.timer.enabled = 1;
- }
+}
- return 0;
+void kvm_timer_init(struct kvm *kvm)
+{
+ kvm->arch.timer.cntvoff = kvm_phys_timer_read();
}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index aacdb59f30de..03affc7bf453 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -91,6 +91,7 @@
#define ACCESS_WRITE_VALUE (3 << 1)
#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
+static int vgic_init(struct kvm *kvm);
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
static void vgic_update_state(struct kvm *kvm);
@@ -1607,7 +1608,7 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
}
}
-static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
+static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
unsigned int irq_num, bool level)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1643,9 +1644,10 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
vgic_dist_irq_clear_level(vcpu, irq_num);
if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
vgic_dist_irq_clear_pending(vcpu, irq_num);
- } else {
- vgic_dist_irq_clear_pending(vcpu, irq_num);
}
+
+ ret = false;
+ goto out;
}
enabled = vgic_irq_is_enabled(vcpu, irq_num);
@@ -1672,7 +1674,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
out:
spin_unlock(&dist->lock);
- return ret;
+ return ret ? cpuid : -EINVAL;
}
/**
@@ -1692,11 +1694,26 @@ out:
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level)
{
- if (likely(vgic_initialized(kvm)) &&
- vgic_update_irq_pending(kvm, cpuid, irq_num, level))
- vgic_kick_vcpus(kvm);
+ int ret = 0;
+ int vcpu_id;
- return 0;
+ if (unlikely(!vgic_initialized(kvm))) {
+ mutex_lock(&kvm->lock);
+ ret = vgic_init(kvm);
+ mutex_unlock(&kvm->lock);
+
+ if (ret)
+ goto out;
+ }
+
+ vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+ if (vcpu_id >= 0) {
+ /* kick the specified vcpu */
+ kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
+ }
+
+out:
+ return ret;
}
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@ -1726,39 +1743,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
+ vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
kvm_vgic_vcpu_destroy(vcpu);
return -ENOMEM;
}
- return 0;
-}
-
-/**
- * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
- * @vcpu: pointer to the vcpu struct
- *
- * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
- * this vcpu and enable the VGIC for this VCPU
- */
-static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
-{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int i;
-
- for (i = 0; i < dist->nr_irqs; i++) {
- if (i < VGIC_NR_PPIS)
- vgic_bitmap_set_irq_val(&dist->irq_enabled,
- vcpu->vcpu_id, i, 1);
- if (i < VGIC_NR_PRIVATE_IRQS)
- vgic_bitmap_set_irq_val(&dist->irq_cfg,
- vcpu->vcpu_id, i, VGIC_CFG_EDGE);
-
- vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
- }
+ memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
/*
* Store the number of LRs per vcpu, so we don't have to go
@@ -1767,7 +1759,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
*/
vgic_cpu->nr_lr = vgic->nr_lr;
- vgic_enable(vcpu);
+ return 0;
}
void kvm_vgic_destroy(struct kvm *kvm)
@@ -1798,20 +1790,21 @@ void kvm_vgic_destroy(struct kvm *kvm)
dist->irq_spi_cpu = NULL;
dist->irq_spi_target = NULL;
dist->irq_pending_on_cpu = NULL;
+ dist->nr_cpus = 0;
}
/*
* Allocate and initialize the various data structures. Must be called
* with kvm->lock held!
*/
-static int vgic_init_maps(struct kvm *kvm)
+static int vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int nr_cpus, nr_irqs;
- int ret, i;
+ int ret, i, vcpu_id;
- if (dist->nr_cpus) /* Already allocated */
+ if (vgic_initialized(kvm))
return 0;
nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
@@ -1859,16 +1852,28 @@ static int vgic_init_maps(struct kvm *kvm)
if (ret)
goto out;
- kvm_for_each_vcpu(i, vcpu, kvm) {
+ for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
+ vgic_set_target_reg(kvm, 0, i);
+
+ kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
if (ret) {
kvm_err("VGIC: Failed to allocate vcpu memory\n");
break;
}
- }
- for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
- vgic_set_target_reg(kvm, 0, i);
+ for (i = 0; i < dist->nr_irqs; i++) {
+ if (i < VGIC_NR_PPIS)
+ vgic_bitmap_set_irq_val(&dist->irq_enabled,
+ vcpu->vcpu_id, i, 1);
+ if (i < VGIC_NR_PRIVATE_IRQS)
+ vgic_bitmap_set_irq_val(&dist->irq_cfg,
+ vcpu->vcpu_id, i,
+ VGIC_CFG_EDGE);
+ }
+
+ vgic_enable(vcpu);
+ }
out:
if (ret)
@@ -1878,25 +1883,23 @@ out:
}
/**
- * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
+ * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct
*
* Map the virtual CPU interface into the VM before running any VCPUs. We
* can't do this at creation time, because user space must first set the
- * virtual CPU interface address in the guest physical address space. Also
- * initialize the ITARGETSRn regs to 0 on the emulated distributor.
+ * virtual CPU interface address in the guest physical address space.
*/
-int kvm_vgic_init(struct kvm *kvm)
+int kvm_vgic_map_resources(struct kvm *kvm)
{
- struct kvm_vcpu *vcpu;
- int ret = 0, i;
+ int ret = 0;
if (!irqchip_in_kernel(kvm))
return 0;
mutex_lock(&kvm->lock);
- if (vgic_initialized(kvm))
+ if (vgic_ready(kvm))
goto out;
if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
@@ -1906,7 +1909,11 @@ int kvm_vgic_init(struct kvm *kvm)
goto out;
}
- ret = vgic_init_maps(kvm);
+ /*
+ * Initialize the vgic if this hasn't already been done on demand by
+ * accessing the vgic state from userspace.
+ */
+ ret = vgic_init(kvm);
if (ret) {
kvm_err("Unable to allocate maps\n");
goto out;
@@ -1920,9 +1927,6 @@ int kvm_vgic_init(struct kvm *kvm)
goto out;
}
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vgic_vcpu_init(vcpu);
-
kvm->arch.vgic.ready = true;
out:
if (ret)
@@ -2167,7 +2171,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
- ret = vgic_init_maps(dev->kvm);
+ ret = vgic_init(dev->kvm);
if (ret)
goto out;
@@ -2289,7 +2293,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
mutex_lock(&dev->kvm->lock);
- if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
+ if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
ret = -EBUSY;
else
dev->kvm->arch.vgic.nr_irqs = val;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b0fb390943c6..148b2392c762 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -36,9 +36,6 @@
#include <linux/seqlock.h>
#include <trace/events/kvm.h>
-#ifdef __KVM_HAVE_IOAPIC
-#include "ioapic.h"
-#endif
#include "iodev.h"
#ifdef CONFIG_HAVE_KVM_IRQFD
@@ -492,9 +489,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
mutex_unlock(&kvm->irq_lock);
-#ifdef __KVM_HAVE_IOAPIC
kvm_vcpu_request_scan_ioapic(kvm);
-#endif
}
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -504,9 +499,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_srcu(&kvm->irq_srcu);
-#ifdef __KVM_HAVE_IOAPIC
kvm_vcpu_request_scan_ioapic(kvm);
-#endif
}
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3cee7b167052..f5283438ee05 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -124,15 +124,6 @@ int vcpu_load(struct kvm_vcpu *vcpu)
if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
- if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
- /* The thread running this VCPU changed. */
- struct pid *oldpid = vcpu->pid;
- struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
- rcu_assign_pointer(vcpu->pid, newpid);
- if (oldpid)
- synchronize_rcu();
- put_pid(oldpid);
- }
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
@@ -468,9 +459,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (r)
goto out_err_no_disable;
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
- INIT_HLIST_HEAD(&kvm->mask_notifier_list);
-#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
@@ -668,48 +656,46 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
return 0;
}
-static int cmp_memslot(const void *slot1, const void *slot2)
-{
- struct kvm_memory_slot *s1, *s2;
-
- s1 = (struct kvm_memory_slot *)slot1;
- s2 = (struct kvm_memory_slot *)slot2;
-
- if (s1->npages < s2->npages)
- return 1;
- if (s1->npages > s2->npages)
- return -1;
-
- return 0;
-}
-
/*
- * Sort the memslots base on its size, so the larger slots
- * will get better fit.
+ * Insert memslot and re-sort memslots based on their GFN,
+ * so binary search could be used to lookup GFN.
+ * Sorting algorithm takes advantage of having initially
+ * sorted array and known changed memslot position.
*/
-static void sort_memslots(struct kvm_memslots *slots)
-{
- int i;
-
- sort(slots->memslots, KVM_MEM_SLOTS_NUM,
- sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
-
- for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
- slots->id_to_index[slots->memslots[i].id] = i;
-}
-
static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new)
{
- if (new) {
- int id = new->id;
- struct kvm_memory_slot *old = id_to_memslot(slots, id);
- unsigned long npages = old->npages;
+ int id = new->id;
+ int i = slots->id_to_index[id];
+ struct kvm_memory_slot *mslots = slots->memslots;
- *old = *new;
- if (new->npages != npages)
- sort_memslots(slots);
+ WARN_ON(mslots[i].id != id);
+ if (!new->npages) {
+ new->base_gfn = 0;
+ if (mslots[i].npages)
+ slots->used_slots--;
+ } else {
+ if (!mslots[i].npages)
+ slots->used_slots++;
}
+
+ while (i < KVM_MEM_SLOTS_NUM - 1 &&
+ new->base_gfn <= mslots[i + 1].base_gfn) {
+ if (!mslots[i + 1].npages)
+ break;
+ mslots[i] = mslots[i + 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i++;
+ }
+ while (i > 0 &&
+ new->base_gfn > mslots[i - 1].base_gfn) {
+ mslots[i] = mslots[i - 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i--;
+ }
+
+ mslots[i] = *new;
+ slots->id_to_index[mslots[i].id] = i;
}
static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
@@ -727,7 +713,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
}
static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
- struct kvm_memslots *slots, struct kvm_memory_slot *new)
+ struct kvm_memslots *slots)
{
struct kvm_memslots *old_memslots = kvm->memslots;
@@ -738,7 +724,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
WARN_ON(old_memslots->generation & 1);
slots->generation = old_memslots->generation + 1;
- update_memslots(slots, new);
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
@@ -760,7 +745,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
*
* Discontiguous memory is allowed, mostly for framebuffers.
*
- * Must be called holding mmap_sem for write.
+ * Must be called holding kvm->slots_lock for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
@@ -866,15 +851,16 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
+ slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+ GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
- old_memslots = install_new_memslots(kvm, slots, NULL);
+ old_memslots = install_new_memslots(kvm, slots);
/* slot was deleted or moved, clear iommu mapping */
kvm_iommu_unmap_pages(kvm, &old);
@@ -886,6 +872,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
+
+ /*
+ * We can re-use the old_memslots from above, the only difference
+ * from the currently installed memslots is the invalid flag. This
+ * will get overwritten by update_memslots anyway.
+ */
slots = old_memslots;
}
@@ -893,26 +885,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (r)
goto out_slots;
- r = -ENOMEM;
- /*
- * We can re-use the old_memslots from above, the only difference
- * from the currently installed memslots is the invalid flag. This
- * will get overwritten by update_memslots anyway.
- */
- if (!slots) {
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
- }
-
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (change == KVM_MR_DELETE) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
- old_memslots = install_new_memslots(kvm, slots, &new);
+ update_memslots(slots, &new);
+ old_memslots = install_new_memslots(kvm, slots);
kvm_arch_commit_memory_region(kvm, mem, &old, change);
@@ -1799,10 +1779,6 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
rcu_read_unlock();
if (!task)
return ret;
- if (task->flags & PF_VCPU) {
- put_task_struct(task);
- return ret;
- }
ret = yield_to(task, 1);
put_task_struct(task);
@@ -2065,6 +2041,15 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = -EINVAL;
if (arg)
goto out;
+ if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
+ /* The thread running this VCPU changed. */
+ struct pid *oldpid = vcpu->pid;
+ struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+ rcu_assign_pointer(vcpu->pid, newpid);
+ if (oldpid)
+ synchronize_rcu();
+ put_pid(oldpid);
+ }
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
@@ -2599,8 +2584,6 @@ static long kvm_vm_ioctl(struct file *filp,
break;
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
- if (r == -ENOTTY)
- r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
}
out:
return r;